From a66f0feff7de38a18a421451e54005843dc7e0c0 Mon Sep 17 00:00:00 2001 From: svcmobrel-release Date: Mon, 25 Aug 2025 10:24:25 -0700 Subject: [PATCH] Updating prebuilts and/or headers d13779dbbab1c776db15f462cd46b29f2c0f8c7c - Makefile 7d577fdb9594ae572ff38fdda682a4796ab832ca - COPYING 5728867ce2e96b63b29367be6aa1c0e47bcafc8f - SECURITY.md 6b73bf6a534ddc0f64e8ba88739381c3b7fb4b5c - nv-compiler.sh 05e911b99b109a721d2045f025b21189e2718e60 - README.md ec5f1eb408e0b650158e0310fb1ddd8e9b323a6f - CONTRIBUTING.md af3ee56442f16029cb9b13537477c384226b22fc - CODE_OF_CONDUCT.md 07bd07999f296d935386a8edf719d0e296f63227 - kernel-open/Kbuild 45b68e3eacda04dcadce48a8238574302a71a3ca - kernel-open/Makefile 99f4563141af1278f13cb23a6e6c24d21d583d7b - kernel-open/conftest.sh 0b1508742a1c5a04b6c3a4be1b48b506f4180848 - kernel-open/dkms.conf 19a5da412ce1557b721b8550a4a80196f6162ba6 - kernel-open/common/inc/os_dsi_panel_props.h 4750735d6f3b334499c81d499a06a654a052713d - kernel-open/common/inc/nv-caps.h 92de3baafe321dd0dcf8665aae4614d5ac670718 - kernel-open/common/inc/rs_access.h 60ef64c0f15526ae2d786e5cec07f28570f0663b - kernel-open/common/inc/conftest.h 880e45b68b19fdb91ac94991f0e6d7fc3b406b1f - kernel-open/common/inc/nv-pci-types.h 6d2f660ef0942edf664874f260266ec81cd0ff08 - kernel-open/common/inc/nvtypes.h c45b2faf17ca2a205c56daa11e3cb9d864be2238 - kernel-open/common/inc/nv-modeset-interface.h 5bc7a748c7d3dfa6559ca4f9fe6199e17098ec8f - kernel-open/common/inc/nv-lock.h b249abc0a7d0c9889008e98cb2f8515a9d310b85 - kernel-open/common/inc/nvgputypes.h e4a4f57abb8769d204468b2f5000c81f5ea7c92f - kernel-open/common/inc/nv-procfs.h 8b19b93e958aca626899f035334a4c96f8776eb6 - kernel-open/common/inc/nv.h ede1f77acb43e28391bceac058e00a7a8d799b0d - kernel-open/common/inc/nvmisc.h ae374d3e438f8d3b60df8c4602618c58564b73f9 - kernel-open/common/inc/rm-gpu-ops.h 3f7b20e27e6576ee1f2f0557d269697a0b8af7ec - kernel-open/common/inc/nv-firmware-registry.h 5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - kernel-open/common/inc/dce_rm_client_ipc.h 3e8075872e2efa843b74b884ef5098468edc4f18 - kernel-open/common/inc/nvimpshared.h befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - kernel-open/common/inc/nv_stdarg.h 0e70d16576584082ee4c7f3ff9944f3bd107b1c1 - kernel-open/common/inc/cpuopsys.h d7ab0ee225361daacd280ff98848851933a10a98 - kernel-open/common/inc/nv-list-helpers.h b02c378ac0521c380fc2403f0520949f785b1db6 - kernel-open/common/inc/nv-dmabuf.h a3d1e51c0f4217f1dc4cb0c48aa0eafd054d4e5e - kernel-open/common/inc/nv-procfs-utils.h 81592e5c17bebad04cd11d73672c859baa070329 - kernel-open/common/inc/nv-chardev-numbers.h 61cf8f3fd32142dc402f6802b5d4c9af6c875c35 - kernel-open/common/inc/nv-firmware.h d5253e7e4abd3ad8d72375260aa80037adcd8973 - kernel-open/common/inc/nv_dpy_id.h 61a9589e4a8ec122e5a6c2258658d493ee747897 - kernel-open/common/inc/nv-platform.h b986bc6591ba17a74ad81ec4c93347564c6d5165 - kernel-open/common/inc/nvkms-format.h 4f487eccd762f3ca645a685d5c333ff569e7987c - kernel-open/common/inc/nv-kthread-q-os.h 4015c4557ea0790a2bdf5695832c89e31d75aee9 - kernel-open/common/inc/nvlimits.h 143051f69a53db0e7c5d2f846a9c14d666e264b4 - kernel-open/common/inc/nv-kref.h 56f432032bef4683c2801f46bec5065923475fb1 - kernel-open/common/inc/nv-kthread-q.h b4c5d759f035b540648117b1bff6b1701476a398 - kernel-open/common/inc/nvCpuUuid.h 67a9707c568e167bae4404c7785ed614babb7b82 - kernel-open/common/inc/nv-linux.h 7c7888550b12eeb98128ea9ac771b897327f538e - kernel-open/common/inc/nv-hypervisor.h f9cb3701681994ff6f32833892d900b0da2b89f6 - kernel-open/common/inc/nv-pgprot.h b8700a911ac85770bf25d70b9692308af63966bd - kernel-open/common/inc/nvstatuscodes.h 3a5f4f105672921b857fec7f2b577d9d525afe37 - kernel-open/common/inc/nv-timer.h 5cd0b3f9c7f544e9064efc9b5ba4f297e5494315 - kernel-open/common/inc/nv-time.h 7a78f354e0b68f03d6ab566d5b755e299456f361 - kernel-open/common/inc/os_gpio.h 154abd192eb950fecffcca470ee80b27f224fd79 - kernel-open/common/inc/nv-proto.h 2eb11e523a3ecba2dcd68f3146e1e666a44256ae - kernel-open/common/inc/nv-ioctl.h 1328058925b64e97588d670fe70466b31af7c7c1 - kernel-open/common/inc/nv-mm.h 25d89847c11449b329941a26f04aec955cfaf150 - kernel-open/common/inc/nv-pci.h 95bf694a98ba78d5a19e66463b8adda631e6ce4c - kernel-open/common/inc/nvstatus.h d74a8d4a9ae3d36e92b39bc7c74b27df44626b1c - kernel-open/common/inc/nv_mig_types.h b3258444b6a2c2399f5f00c7cac5b470c41caeaa - kernel-open/common/inc/nv-hash.h 4c856c1324060dcb5a9e72e5e82c7a60f6324733 - kernel-open/common/inc/nvkms-kapi.h 44cb5bc2bc87a5c3447bcb61f2ce5aef08c07fa7 - kernel-open/common/inc/nv_uvm_interface.h 1e7eec6561b04d2d21c3515987aaa116e9401c1f - kernel-open/common/inc/nv-kernel-interface-api.h c54c62de441828282db9a4f5b35c2fa5c97d94f1 - kernel-open/common/inc/nvkms-api-types.h ade7410c1c0572dbed49b4b0d97b87245ca59115 - kernel-open/common/inc/os-interface.h 2ffd0138e1b3425ade16b962c3ff02a82cde2e64 - kernel-open/common/inc/nv-ioctl-numa.h 995d8447f8539bd736cc09d62983ae8ebc7e3436 - kernel-open/common/inc/nv_common_utils.h c75bfc368c6ce3fc2c1a0c5062834e90d822b365 - kernel-open/common/inc/nv-memdbg.h dfd7b82a7f2939d4c1869840059705c6b71bffe3 - kernel-open/common/inc/nv-msi.h 3b12d770f8592b94a8c7774c372e80ad08c5774c - kernel-open/common/inc/nvi2c.h 894ef9e230604572bbceabdfd5f241059d54aa10 - kernel-open/common/inc/nv_speculation_barrier.h 107d1ecb8a128044260915ea259b1e64de3defea - kernel-open/common/inc/nv-ioctl-numbers.h 19cfcbf5a3021aa9aaa0ceacbb6711e7f7a6e09e - kernel-open/common/inc/nv_uvm_user_types.h cfcd2ef5eaec92f8e4647fff02a3b7e16473cbff - kernel-open/common/inc/nv_uvm_types.h b642fb649ce2ba17f37c8aa73f61b38f99a74986 - kernel-open/common/inc/nv-retpoline.h 3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - kernel-open/common/inc/nv-gpu-info.h cda75171ca7d8bf920aab6d56ef9aadec16fd15d - kernel-open/common/inc/os/nv_memory_type.h 70b67003fda6bdb8a01fa1e41c3b0e25136a856c - kernel-open/common/inc/os/nv_memory_area.h 11b09260232a88aa1f73f109fdfab491a7b73576 - kernel-open/nvidia/nv-nano-timer.c dcf4427b83cce7737f2b784d410291bf7a9612dc - kernel-open/nvidia/nv-reg.h 0b8ff957fb14f20ba86f61e556d1ab15bf5acd74 - kernel-open/nvidia/nv-imp.c 6b09b5ef8a37f78c8e82074b06b40ef593c81807 - kernel-open/nvidia/libspdm_rsa.c b8d361216db85fe897cbced2a9600507b7708c61 - kernel-open/nvidia/libspdm_hkdf_sha.c 66e2bfc490fb77e0b72a8192b719d3dc74d25d59 - kernel-open/nvidia/nv-pat.c 26a30f2d26c2a97a6e2ee457d97d32f48b0bf25b - kernel-open/nvidia/nv-vm.c b8a770cea0629c57d8b0b3d7414d7b0f043ee8cf - kernel-open/nvidia/libspdm_ecc.c 4c183eb39251cd78d90868ec6f75ebc7a37e6644 - kernel-open/nvidia/os-usermap.c 8c30b6230439edcbec62636cc93be512bca8637f - kernel-open/nvidia/nv-usermap.c 7af675f85642229b7e7de05dcadd622550fe7ad7 - kernel-open/nvidia/nv-vtophys.c d11ab03a617b29efcf00f85e24ebce60f91cf82c - kernel-open/nvidia/nv-backlight.c ef8fd76c55625aeaa71c9b789c4cf519ef6116b2 - kernel-open/nvidia/libspdm_hkdf.c 1590794925ebd9cbc14aae8c47e0cc205a3f4b52 - kernel-open/nvidia/nv-rsync.h 934a686ba8d7b77cce2d928cb3b04f611d9f9187 - kernel-open/nvidia/libspdm_aead.c f16e6a33b5004566333fb8b99504a0fb95d51226 - kernel-open/nvidia/nv-gpio.c 8ed2c3b93eeaa52342d944e794180fd5d386688a - kernel-open/nvidia/libspdm_rsa_ext.c 2e5d18118835c19c5ca7edee9bceeae613b9d7f9 - kernel-open/nvidia/nv-procfs.c 3e820e66f556be10c0d9728d4187e43c30658736 - kernel-open/nvidia/nv.c 65fe797fb5d4af2db67544ddb79d49ab1b7ca859 - kernel-open/nvidia/nv-dsi-parse-panel-props.c e3efae4ed920545062a2d06064df8be1a2a42135 - kernel-open/nvidia/nv-caps-imex.h 8c64e75aaaa9ac6f17aae7ed62db23eb2e5b9953 - kernel-open/nvidia/nv_uvm_interface.c 4563589496a93a2720e25807ca1be2565f03554c - kernel-open/nvidia/nv-bpmp.c aea97021d9aa023a357f009fcddc710f710ceb5e - kernel-open/nvidia/libspdm_x509.c f29e5bc1c7bd2c670780cdbb7275900a69f4d205 - kernel-open/nvidia/internal_crypt_lib.h 13dc24fb41516c777328d4db64fa39a9e2c40191 - kernel-open/nvidia/nv-modeset-interface.c 6ae527b69eebb44224b05e8cb3546757532d8a16 - kernel-open/nvidia/nv-dma.c fe204e3820d206b5b0c34a51084f39b97310305a - kernel-open/nvidia/nv-ipc-soc.c 60d6ff5becc0ddbcf4b489b9d88c1dec8ccc67be - kernel-open/nvidia/nv-platform-pm.c c1f7c81018a414b7a657431b115a1b86d3ebe3e7 - kernel-open/nvidia/os-mlock.c c762aa186dc72ed0b9183492f9bd187c301d33d3 - kernel-open/nvidia/nv-kthread-q.c 70bece14e12b9ffc92816ee8159a4ce596579d78 - kernel-open/nvidia/os-pci.c a677049bb56fa5ebe22fe43b0c4a12acd58a6677 - kernel-open/nvidia/nv-p2p.c e4d12f027cb5f74124da71bbbc23bcb33651834a - kernel-open/nvidia/nv-pci-table.c 415b8f457c01417f32c998ae310b5a42dd5805cb - kernel-open/nvidia/nv-pci.c 6dfc57ac42bed97c6ff81d82e493f05b369e0b84 - kernel-open/nvidia/nvspdm_cryptlib_extensions.h bba706cfbc04b3a880b5e661066f92e765fad663 - kernel-open/nvidia/nv-caps-imex.c ed3c83f62e4ccc4b53d886eedd4b47518a361393 - kernel-open/nvidia/nv-dmabuf.c 66b7fad4d73a23153298ce777afb14d2c8be42c1 - kernel-open/nvidia/libspdm_internal_crypt_lib.c 6d4fbea733fdcd92fc6a8a5884e8bb359f9e8abd - kernel-open/nvidia/rmp2pdefines.h b71bf4426322ab59e78e2a1500509a5f4b2b71ab - kernel-open/nvidia/nv-pat.h 9a5a58bd6eb71a4c32e334a1a4e3326a17143cce - kernel-open/nvidia/os-interface.c 1a91f5e6d517856303da448bea80d167b238e41c - kernel-open/nvidia/nv-i2c.c 7d409e3f0255d17457bffbf318e2f9ea160680a5 - kernel-open/nvidia/nv-pci-table.h c50865d3070a0c3476ce24ff1ab4cc4e3f9ea4be - kernel-open/nvidia/detect-self-hosted.h 7ae9a57b9e99fd2a3534798e52e57f7784738a53 - kernel-open/nvidia/nv-report-err.c 3b27e4eaa97bd6fa71f1a075b50af69b1ec16454 - kernel-open/nvidia/libspdm_ec.c dd9e367cba9e0672c998ec6d570be38084a365ab - kernel-open/nvidia/libspdm_rand.c d8b8077adb7fd70eb9528d421bdef98c4378b57a - kernel-open/nvidia/nv-msi.c 1cabb1e7fa825216c09f9d2f103657b0ac2dc85a - kernel-open/nvidia/nv-platform.c dd819a875c584bc469082fcf519779ea00b1d952 - kernel-open/nvidia/libspdm_aead_aes_gcm.c 74958745f83b14c04aaa60248bf5c86ceef6b5cb - kernel-open/nvidia/nv-acpi.c 4d19a1756af848d25fd2fd8cc691dcbcf0afb776 - kernel-open/nvidia/os-registry.c 80f9ac558a57c60cbf70f3ecaf73c71e60c98885 - kernel-open/nvidia/nv-rsync.c 7f5d251db1db4a179a67efea0178fbfda94f95d0 - kernel-open/nvidia/nv_gpu_ops.h 642c3a7d10b263ab9a63073f83ad843566927b58 - kernel-open/nvidia/libspdm_hmac_sha.c 7d53c2d27580d1b2cc56246d9972f3f310a3cd34 - kernel-open/nvidia/nv-clk.c 0f28ebcdb723e836c923e40642429838fa9e86dc - kernel-open/nvidia/nvidia-sources.Kbuild 99540efd2dfa6907b84e628e12370eefb0222850 - kernel-open/nvidia/nv-mmap.c 11ac7a3a3b4def7fa31a289f5f8461ad90eca06b - kernel-open/nvidia/nv-tracepoint.h a14b9115cff1e5e7491737083588a5646c8c227b - kernel-open/nvidia/nv-report-err.h 011f975d4f94f7b734efa23d3c8075321eaaf0e8 - kernel-open/nvidia/nv-memdbg.c 1ba353673c266cb47ebcd07707e8ce125353e751 - kernel-open/nvidia/nvidia.Kbuild ac976b92e83f19125d6b3f7e95d9523e430b9b09 - kernel-open/nvidia/nv-p2p.h 9b036018501d9b8543aabe7ec35dbe33023bb3e0 - kernel-open/nvidia/nv-host1x.c 11778961efc78ef488be5387fa3de0c1b761c0d9 - kernel-open/nvidia/libspdm_sha.c 02b1936dd9a9e30141245209d79b8304b7f12eb9 - kernel-open/nvidia/nv-cray.c 2d61ad39b2356c9cfd8d57c1842e80a20272e37f - kernel-open/nvidia/nv-caps.c fc199c04b321db79ab5446574d9b994f8bfe6c24 - kernel-open/nvidia/libspdm_shash.c fa178a7209f56008e67b553a2c5ad1b2dd383aac - kernel-open/nvidia/hal/library/cryptlib/cryptlib_rng.h 34de62da6f880ba8022299c77eddbb11d7fc68d2 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_hash.h 8af43a3f0e4201aa6ff0099221a371fb1801e818 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_rsa.h cf94004b7b5729982806f7d6ef7cc6db53e3de56 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_aead.h 9a6e164ec60c2feb1eb8782e3028afbffe420927 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_mac.h 4991dfa8852edbdd1ffbd2d44f7b6ac4e1c8c752 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_ec.h 7694b027d74d65561ce6cd15a8c0822e4b32b73a - kernel-open/nvidia/hal/library/cryptlib/cryptlib_sm2.h 8b84a0cc1127f39652362007e048ea568c9cf80b - kernel-open/nvidia/hal/library/cryptlib/cryptlib_ecd.h 2d7b566655ba8a05fae4ea4f6c806b75d7ebb5f3 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_cert.h 0dcb1fd3982e6307b07c917cb453cddbcd1d2f43 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_dh.h 7ff12b437215b77c920a845943e4101dcde289c4 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_hkdf.h 16dd525c52448a32cc8da75d6a644d8a35efbfee - kernel-open/nvidia/library/spdm_lib_config.h 53a9acf65cad6bc4869a15d8086990365c987456 - kernel-open/nvidia/library/cryptlib.h cfbaebb1091f7b1a8d2e3c54c2301ac45ade6c40 - kernel-open/nvidia/internal/libspdm_lib_config.h 2ea094687fbee1e116cd0362cbeba7592439e0b6 - kernel-open/nvidia-drm/nvidia-drm-crtc.h bed7b5053d09473188061b0d7f6a3a65b64f72e0 - kernel-open/nvidia-drm/nvidia-drm-linux.c 0f8e4535cf97fadea23c9848483355583f492131 - kernel-open/nvidia-drm/nvidia-drm-utils.c 35034b6f174cd6a14b7d94a07f777794570959b4 - kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h 072e1d6a260e348dada181162949eee190321ed8 - kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c e86dac2985f4e61f4e2676b3290e47cdcb951c46 - kernel-open/nvidia-drm/nvidia-drm-modeset.c f00a605cac7ffc7f309e3952c5d4cea7cbfc0b7e - kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h 99642b76e9a84b5a1d2e2f4a8c7fb7bcd77a44fd - kernel-open/nvidia-drm/nvidia-drm.h 763833186eabf1a0501434426c18161febf624d4 - kernel-open/nvidia-drm/nvidia-drm-fb.h 4bada3ff7bfee8b7e222fc4cafb2ac97c67d7898 - kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h 99a2e922a448b4d76318ec151378c8bbf5971595 - kernel-open/nvidia-drm/nvidia-drm-helper.c ae6efc1bbec8a5e948b7244f4801f0b4b398f203 - kernel-open/nvidia-drm/nvidia-drm.c 94c28482252c983fd97532634ffafea0bf77337a - kernel-open/nvidia-drm/nvidia-drm-ioctl.h a4f77f8ce94f63f3ca2a970c1935d8da48ab5ccc - kernel-open/nvidia-drm/nvidia-drm-format.c b78e4f40234f908e722f172485e4466d80b7b501 - kernel-open/nvidia-drm/nvidia-drm-drv.h 4154c5562cebd2747bd15fb302c19cb0cefe1c9c - kernel-open/nvidia-drm/nvidia-drm-connector.h c762aa186dc72ed0b9183492f9bd187c301d33d3 - kernel-open/nvidia-drm/nv-kthread-q.c e4d12f027cb5f74124da71bbbc23bcb33651834a - kernel-open/nvidia-drm/nv-pci-table.c 47110750cf788e7d9ddb5db85be3658ac660a109 - kernel-open/nvidia-drm/nvidia-drm-fence.h 73a1acab50e65c468cb71b65238a051bc306ae70 - kernel-open/nvidia-drm/nvidia-drm-encoder.h aa388c0d44060b8586967240927306006531cdb7 - kernel-open/nvidia-drm/nvidia-drm-helper.h d0b4f4383a7d29be40dd22e36faa96dae12d2364 - kernel-open/nvidia-drm/nvidia-drm-os-interface.h 63a2fec1f2c425e084bdc07ff05bda62ed6b6ff1 - kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c a46422076a6a3e439349fbda4fc46e4add29b8e5 - kernel-open/nvidia-drm/nvidia-drm-drv.c 19031f2eaaaeb0fa1da61681fa6048c3e303848b - kernel-open/nvidia-drm/nvidia-drm-gem.c 71ea2d5b02bf8fb3e8cf6b7c84686e2edbc244d0 - kernel-open/nvidia-drm/nvidia-drm-encoder.c 7d409e3f0255d17457bffbf318e2f9ea160680a5 - kernel-open/nvidia-drm/nv-pci-table.h 9f57b8724205e03ca66b32fe710cd36b82932528 - kernel-open/nvidia-drm/nvidia-drm-conftest.h 6e9838b169beffe149ba12625acb496504d36d50 - kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c d2525a36b7aec71982df80a89b861f220312103d - kernel-open/nvidia-drm/nvidia-dma-resv-helper.h a505f0aa98ebcf438307f6bacf9bf5a5be189839 - kernel-open/nvidia-drm/nvidia-drm-connector.c d5518597469dc874ee7e264b9400db51af2fcd44 - kernel-open/nvidia-drm/nvidia-drm-format.h 437d87e7e4bd34ae3c67b27c2faaa394575acf70 - kernel-open/nvidia-drm/nvidia-drm-priv.h 88b2035ddbba8c7f455209e61256b4e7b09c11dd - kernel-open/nvidia-drm/nvidia-drm-fence.c eff6a0b72274c8824b7a79e9aee261da3a6fb4f1 - kernel-open/nvidia-drm/nvidia-drm-gem.h 6528efa1f8061678b8543c5c0be8761cab860858 - kernel-open/nvidia-drm/nvidia-drm-modeset.h 46a41b0b3470190abcdc57a739238a9cd773812b - kernel-open/nvidia-drm/nvidia-drm.Kbuild 995d8447f8539bd736cc09d62983ae8ebc7e3436 - kernel-open/nvidia-drm/nv_common_utils.h 40b5613d1fbbe6b74bff67a5d07974ad321f75f0 - kernel-open/nvidia-drm/nvidia-drm-utils.h d924c494620760887546f428f87387d8ed5b99a6 - kernel-open/nvidia-drm/nvidia-drm-fb.c 5eb8385042f3efa5c2e14d168cdb40b211467552 - kernel-open/nvidia-drm/nvidia-drm-crtc.c 62a9b9b30fd7417d9ab085b2bfc731aadd9826f9 - kernel-open/nvidia-drm/nvidia-drm-os-interface.c ca86fee8bd52e6c84e376199c5f3890078bc2031 - kernel-open/nvidia-modeset/nvidia-modeset-os-interface.h 04ea084a5c5d496cc43103d1997053246a2fa94c - kernel-open/nvidia-modeset/nvidia-modeset-linux.c b2a5ddfd8dcb3000b9d102bd55b5b560730e81d5 - kernel-open/nvidia-modeset/nvkms.h c762aa186dc72ed0b9183492f9bd187c301d33d3 - kernel-open/nvidia-modeset/nv-kthread-q.c da6fd16e29300170aba8a652ea6296241f66243b - kernel-open/nvidia-modeset/nvidia-modeset.Kbuild 2ea1436104463c5e3d177e8574c3b4298976d37e - kernel-open/nvidia-modeset/nvkms-ioctl.h 13d4f9648118dd25b790be0d8d72ebaa12cc8d0e - src/common/sdk/nvidia/inc/rs_access.h 579be4859587206460d8729804aab19180fb69bb - src/common/sdk/nvidia/inc/nvtypes.h 993f17e3094243623f793ae16bd84b5fa3f335ec - src/common/sdk/nvidia/inc/g_finn_rm_api.h a54d77d45f9b0c5ae3fa8b59d2117145260800b6 - src/common/sdk/nvidia/inc/cc_drv.h b249abc0a7d0c9889008e98cb2f8515a9d310b85 - src/common/sdk/nvidia/inc/nvgputypes.h 78a4b6b19a38de41527ef8b290754deca5906817 - src/common/sdk/nvidia/inc/nvcd.h ede1f77acb43e28391bceac058e00a7a8d799b0d - src/common/sdk/nvidia/inc/nvmisc.h 46966ed7fc8d85931b49b12683c42666181f33f6 - src/common/sdk/nvidia/inc/nvimpshared.h befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - src/common/sdk/nvidia/inc/nv_stdarg.h f5a682339a89d2b119b43e5b9263dd67346ed3bc - src/common/sdk/nvidia/inc/cpuopsys.h cf1de27d5bcbd0adbe3c3b64466193b7d9094c71 - src/common/sdk/nvidia/inc/nverror.h 4015c4557ea0790a2bdf5695832c89e31d75aee9 - src/common/sdk/nvidia/inc/nvlimits.h 7c7888550b12eeb98128ea9ac771b897327f538e - src/common/sdk/nvidia/inc/nv-hypervisor.h b8700a911ac85770bf25d70b9692308af63966bd - src/common/sdk/nvidia/inc/nvstatuscodes.h 95bf694a98ba78d5a19e66463b8adda631e6ce4c - src/common/sdk/nvidia/inc/nvstatus.h a506a41b8dcf657fb39a740ffc1dfd83835d6c89 - src/common/sdk/nvidia/inc/nvcfg_sdk.h 1e7eec6561b04d2d21c3515987aaa116e9401c1f - src/common/sdk/nvidia/inc/nv-kernel-interface-api.h af0bc90b3ad4767de53b8ff91e246fdab0146e8b - src/common/sdk/nvidia/inc/nvsecurityinfo.h 5cec5038e1f4a395a08b765c8361a9560f3312b7 - src/common/sdk/nvidia/inc/nvdisptypes.h c8b96af9d498f87cb9acde064648f9e84d789055 - src/common/sdk/nvidia/inc/nv_vgpu_types.h 3b12d770f8592b94a8c7774c372e80ad08c5774c - src/common/sdk/nvidia/inc/nvi2c.h bbf6c09ef9bb10ab63d337bf011872f9073c3e5b - src/common/sdk/nvidia/inc/nvos.h 9bca638f5832d831880f090c583fac6fc8cf6ee6 - src/common/sdk/nvidia/inc/dpringbuffertypes.h 7de14a0c3cc8460a9c41e1ee32fda5409c5b9988 - src/common/sdk/nvidia/inc/mmu_fmt_types.h 774318ced0fdcb199e99cf0fee9688259dd01a51 - src/common/sdk/nvidia/inc/nvfixedtypes.h ed51b6e2d454af3da36f9c5f4a8a7958d2c5f156 - src/common/sdk/nvidia/inc/alloc/alloc_channel.h ffe618524466cbbff64de55d88fd987e198bb8c9 - src/common/sdk/nvidia/inc/class/cl9271.h cef74c734fc7d2f32ff74095c59212d9e1d4cafc - src/common/sdk/nvidia/inc/class/cl84a0.h 9f8a45cb986e3ad2bd4a8900469fe5f8b0c9463a - src/common/sdk/nvidia/inc/class/cl9870.h a6bb32861fa3f93ccb16490f0f2751a1ef333eed - src/common/sdk/nvidia/inc/class/cl0101.h e6818f1728a66a70080e87dac15a6f92dd875b4e - src/common/sdk/nvidia/inc/class/cl927d.h 522682a17bacd5c1d6081c0020d094ee3d5c4a30 - src/common/sdk/nvidia/inc/class/clcb97.h 89d4eeb421fc2be3b9717e333e9ff67bfffa24e8 - src/common/sdk/nvidia/inc/class/cl2080.h f558fddfdc088b86a1b479542b8e782e42a5bdce - src/common/sdk/nvidia/inc/class/clc37a.h d301edef2d1dd42382670e5a6ceef0d8caf67d28 - src/common/sdk/nvidia/inc/class/cl90cd.h 1dfae8f11f8e92908f59a1c9493e84ce40d53b90 - src/common/sdk/nvidia/inc/class/cl0070.h 95d99f0805c8451f0f221483b3618e4dbd1e1dd8 - src/common/sdk/nvidia/inc/class/cl90f1.h 99a34eee22f584d5dfb49c3018a8cb9a7b1035ed - src/common/sdk/nvidia/inc/class/cl5070_notification.h c4f090f0dae5bdebf28c514c1b5a9bd8606aa56c - src/common/sdk/nvidia/inc/class/cl9097.h 4b77798281f3754a80961308d44a70b1a717283b - src/common/sdk/nvidia/inc/class/clc46f.h bd2a88f8dbc64add00ad366aa3e76d116cb090b3 - src/common/sdk/nvidia/inc/class/cl0073.h e587a693bc1cee68983a7039ddbf16a3d3461d64 - src/common/sdk/nvidia/inc/class/cl9471.h ddbffcce44afa7c07924fd64a608f7f3fe608ccc - src/common/sdk/nvidia/inc/class/cl0071.h 74c75472658eea77d031bf3979dd7fe695b4293f - src/common/sdk/nvidia/inc/class/cl0092_callback.h fd16daebcd23a680b988dde4ae99625434dcb8fa - src/common/sdk/nvidia/inc/class/cl0000.h c2d8bb02052e80cd0d11695e734f5e05ab7faeb5 - src/common/sdk/nvidia/inc/class/cl907dswspare.h 5ca1d01dab6b9e814160ddce868d00aa9a1ead58 - src/common/sdk/nvidia/inc/class/clc873.h 7c7406d40a09372dcae2aaf3fcad225c3dd2cf3f - src/common/sdk/nvidia/inc/class/cl9010_callback.h 2240664ad950c9c2e64b6f4d18e05349bc91443c - src/common/sdk/nvidia/inc/class/clc573.h 593384ce8938ceeec46c782d6869eda3c7b8c274 - src/common/sdk/nvidia/inc/class/cl900e.h 101da471fe4e167815425793491e43193e407d9a - src/common/sdk/nvidia/inc/class/clc397.h dec74b9cf8062f1a0a8bbeca58b4f98722fd94b0 - src/common/sdk/nvidia/inc/class/cl0076.h 46f74fc51a7ec532330e966cad032782e80808b8 - src/common/sdk/nvidia/inc/class/clcc7b.h 053e3c0de24348d3f7e7fe9cbd1743f46be7a978 - src/common/sdk/nvidia/inc/class/cl0004.h 71e34a03bcfa70edfbec4dbdeade82a932057938 - src/common/sdk/nvidia/inc/class/clc637.h 447fe99b23c5dbe3d2a7601e8228a1a1831c6705 - src/common/sdk/nvidia/inc/class/clcc70.h 89ed6dd37fca994e18e03a5410d865b88e1ff776 - src/common/sdk/nvidia/inc/class/clc87e.h 03d873c3a0e0376440f23171640d9c517f7a34e9 - src/common/sdk/nvidia/inc/class/cl902d.h 78259dc2a70da76ef222ac2dc460fe3caa32457a - src/common/sdk/nvidia/inc/class/clc37e.h b7a5b31a8c3606aa98ba823e37e21520b55ba95c - src/common/sdk/nvidia/inc/class/cl402c.h 5ee1adc8d952212b37211c6f4f677ba672f5117c - src/common/sdk/nvidia/inc/class/clcc71.h bd12f7cdc3a01668b9c486dc6456f9263dd459ea - src/common/sdk/nvidia/inc/class/clc57b.h 4b2f2194a1655cc6ae707866f130bbe357d0c21f - src/common/sdk/nvidia/inc/class/clb097tex.h 5409e5af182ac18ef8d13380bdfe7cf2e83d37d7 - src/common/sdk/nvidia/inc/class/clc37b.h aeb4cbab8d1d0fbd0a5747fa36d6f56c00234b2d - src/common/sdk/nvidia/inc/class/clc097tex.h 36fd6906e2688dad2e7ab648be7e070b9eb6f11d - src/common/sdk/nvidia/inc/class/clc971.h 513c505274565fa25c5a80f88a7d361ffbcb08c3 - src/common/sdk/nvidia/inc/class/cl0005.h 53e6252cd85a60698c49a721f4e41da1cb14e5bd - src/common/sdk/nvidia/inc/class/clc97dswspare.h 645adeb829dbcf315bf67ff8387e7a5d982d7b6e - src/common/sdk/nvidia/inc/class/cl00de.h 0f91db32d9e346b4d9f3762c9e59a8f8e5fd0903 - src/common/sdk/nvidia/inc/class/clcc7d.h a24c2a943c7ceceb8d015f5cd02148f8c4e7c23d - src/common/sdk/nvidia/inc/class/clb097.h 691bb932ea3f60d2b9ad3e4d7fa53ab1a2a5e6c5 - src/common/sdk/nvidia/inc/class/clc870.h 758e2fb8b5d89079f03be09d74964e9246cb180c - src/common/sdk/nvidia/inc/class/clc797.h f4af32374be4d05a2e55c97053a4f0d1f4b85154 - src/common/sdk/nvidia/inc/class/cl0000_notification.h 1e578eb23dacca047e0b342cce3024b3134f8de9 - src/common/sdk/nvidia/inc/class/clc7b5.h 941a031920c0b3bb16473a6a3d4ba8c52c1259d7 - src/common/sdk/nvidia/inc/class/cl917e.h b23cdfb66f40c6d9a903f602b8ff4526063b5a2d - src/common/sdk/nvidia/inc/class/clc097.h 0de3548dde4e076cbd0446330b2d5ae4862c1501 - src/common/sdk/nvidia/inc/class/clc973.h ddb996ff90b80c0f58729b9ac89fa6d2d3950e49 - src/common/sdk/nvidia/inc/class/cla16f.h cb610aaae807d182b4a2ee46b9b43ebfa4a49a08 - src/common/sdk/nvidia/inc/class/clc57e.h 9e1d2f90d77e23f1d2163a8f8d8d747058e21947 - src/common/sdk/nvidia/inc/class/cl9010.h 7a14243de2b228f086810f968a1712627f1333fd - src/common/sdk/nvidia/inc/class/clc36f.h 7c8e1f1055f9522cfb2935ea0aae612ef172c26e - src/common/sdk/nvidia/inc/class/clc370_notification.h 64ad2ab88e2006bcdace06e7109981496c39f265 - src/common/sdk/nvidia/inc/class/clc87d.h 36c6162356ac39346c8900b1e0074e4b614d4b5a - src/common/sdk/nvidia/inc/class/clc370.h 5df0ce4eb733554e963eb3c7938396f58f2dd4d5 - src/common/sdk/nvidia/inc/class/cl2081.h a4d82d12346918edd0a7564a5c6cbfe849532b7f - src/common/sdk/nvidia/inc/class/clca70.h 159b78a13e43a2afe6c17714a6f8619675480346 - src/common/sdk/nvidia/inc/class/clc86f.h 6ddba2e93c046ae04f48685c73f8f2d9fe74a398 - src/common/sdk/nvidia/inc/class/clc67a.h 83c6378ef27c8b640895a123801d27e6c4fd3754 - src/common/sdk/nvidia/inc/class/clc671.h 7f75433a769a020d9f36996c855c8ce6ab39dd83 - src/common/sdk/nvidia/inc/class/clc37dcrcnotif.h 31ac68401e642baf44effb681d42374f42cf86b1 - src/common/sdk/nvidia/inc/class/cl00c3.h 95ca0b08eed54d1c6dd76fdf9cf4715007df1b20 - src/common/sdk/nvidia/inc/class/cl0020.h 20d5608c2d6e55efd6d1756a00739f7a05d3a2b3 - src/common/sdk/nvidia/inc/class/clc361.h 9797f4758d534181eeaa6bc88d576de43ba56045 - src/common/sdk/nvidia/inc/class/clc574.h a39d75d3e479aebaf3849415e156c3cfe427298a - src/common/sdk/nvidia/inc/class/clc771.h eac86d7180236683b86f980f89ec7ebfe6c85791 - src/common/sdk/nvidia/inc/class/cl957d.h f7a2fea4725d59e95294c397ede001504b777b0d - src/common/sdk/nvidia/inc/class/clc697.h f3f33f70ec85c983acec8862ccaabf5b186de2bb - src/common/sdk/nvidia/inc/class/cl9270.h 8b94512c9746c6976c4efeee0291bf44bb5e0152 - src/common/sdk/nvidia/inc/class/clcc73.h 60d0c7923699599a5a4732decfbcb89e1d77b69e - src/common/sdk/nvidia/inc/class/cl9770.h e0c9a155f829c158c02c21b49c083168f8b00cbe - src/common/sdk/nvidia/inc/class/clc37dswspare.h 499bc681107a2b7ad7af3d2211b582b8fb9d9761 - src/common/sdk/nvidia/inc/class/clcc7a.h e1bfd0c78f397e7c924c9521f87da8286bebe3f1 - src/common/sdk/nvidia/inc/class/cl84a0_deprecated.h 2f291dc867e71f625c59f72787b9fb391a16d0e6 - src/common/sdk/nvidia/inc/class/clc638.h 8d2dcc086f892dd58270c9e53e747513ed4b2f93 - src/common/sdk/nvidia/inc/class/clb06f.h 3d262347ab41547d9ccc28a892d24c83c6b1158e - src/common/sdk/nvidia/inc/class/cla06f.h bae36cac0a8d83003ded2305409192995d264d04 - src/common/sdk/nvidia/inc/class/cl0001.h ba8f5899df4287b8440bcb9c8e09e10db73ebf12 - src/common/sdk/nvidia/inc/class/clc97a.h 7bfcd7cf1735b2a54839e8a734e2227060ebf570 - src/common/sdk/nvidia/inc/class/clc197.h e231c552afb3a78da7341ee49bf36940f1f65202 - src/common/sdk/nvidia/inc/class/clc77d.h 821396a58944ba4620f43cf6ee833b7a04d67193 - src/common/sdk/nvidia/inc/class/clc970.h 1f1879fcddf3c3f1f6c44df0e51822ad1bfa1aae - src/common/sdk/nvidia/inc/class/cl9171.h a23967cf3b15eefe0cc37fef5d03dfc716770d85 - src/common/sdk/nvidia/inc/class/clc372sw.h 02ff42b6686954e4571b8a318575372239db623b - src/common/sdk/nvidia/inc/class/cl30f1_notification.h 4be055f206ef1049e8a5b824f9f4830eba0e224c - src/common/sdk/nvidia/inc/class/cla26f.h ef173136a93cdd2e02ec82d7db05dc223b93c0e1 - src/common/sdk/nvidia/inc/class/clc770.h a3e011723b5863277a453bfcfb59ce967cee0673 - src/common/sdk/nvidia/inc/class/clc670.h f33b9fdad6ceb534530fecfd16b40a71f5f5cfdc - src/common/sdk/nvidia/inc/class/clc56f.h 02906b5ba8aab0736a38fd1f6d7b4f6026a5185b - src/common/sdk/nvidia/inc/class/clc57esw.h aa6387d7ce55a88789c5731e89dedde57115131c - src/common/sdk/nvidia/inc/class/clc97b.h 86ab048c67a075349622c597fa9c4f2a9a3d8635 - src/common/sdk/nvidia/inc/class/cl9571.h 9b2d08d7a37beea802642f807d40413c7f9a8212 - src/common/sdk/nvidia/inc/class/clc37d.h bd9f406625e6c0cce816a5ddfb9078723e7f7fb5 - src/common/sdk/nvidia/inc/class/clb0b5sw.h ab27db8414f1400a3f4d9011e83ac49628b4fe91 - src/common/sdk/nvidia/inc/class/cl987d.h 2614a83d383b540f23ef721ec49af1dfde629098 - src/common/sdk/nvidia/inc/class/cl0080.h 9db39be032023bff165cd9d36bee2466617015a5 - src/common/sdk/nvidia/inc/class/cl0002.h 094bec72bfa8c618edc139bc353b20433f1c1da2 - src/common/sdk/nvidia/inc/class/cl2080_notification.h e72a7871d872b2eb823cc67c0a7d4cafb3d0ca18 - src/common/sdk/nvidia/inc/class/cl90ec.h 0ad3b3e00dc83a0487bd96abd5fe467213aa51ad - src/common/sdk/nvidia/inc/class/clc597.h 869e41c3ba08d704fcf00541075986de43d6b090 - src/common/sdk/nvidia/inc/class/cl917b.h b685769b5f3fed613227498866d06cc3c1caca28 - src/common/sdk/nvidia/inc/class/cl2082.h 4c0d054bd0d9935d8d2cedba3f5e910d6b6f8ed3 - src/common/sdk/nvidia/inc/class/clc997.h 1697a9ed528d633a1e78c0071868d7dff899af26 - src/common/sdk/nvidia/inc/class/clc57a.h 8e85d29d4006dbd3a913fcc088be5e8c87bbdabb - src/common/sdk/nvidia/inc/class/cl0100.h 15d1f928a9b3f36065e377e29367577ae92ab065 - src/common/sdk/nvidia/inc/class/cl0080_notification.h e3bd2cacd357e411bc1b6b7d7660ffa97c3a7ee3 - src/common/sdk/nvidia/inc/class/clb197.h 16f9950a48c4e670b939a89724b547c5be9938bf - src/common/sdk/nvidia/inc/class/clc570.h 060722ac6a529a379375bb399785cbf2380db4fd - src/common/sdk/nvidia/inc/class/clc373.h bd910ff84b9920af83e706a8ab37c68157a372c8 - src/common/sdk/nvidia/inc/class/clc97e.h b71d1f698a3e3c4ac9db1f5824db983cf136981a - src/common/sdk/nvidia/inc/class/cl9170.h 2a031d85b85c4b1e5b278f6010ca8f33b2192de1 - src/common/sdk/nvidia/inc/class/cl90e7.h 9ceb4ec8538818c8b1dcc7ffe885584b8e0f435e - src/common/sdk/nvidia/inc/class/cla097.h a9503a5558b08071f35b11df9a917310947c378b - src/common/sdk/nvidia/inc/class/cl00da.h d8000ab8ef59e64d17b4089c43953ca69b7f605f - src/common/sdk/nvidia/inc/class/clc67e.h 6400b9ad3460dafe00424e3c1b1b7a05ab865a63 - src/common/sdk/nvidia/inc/class/cl50a0.h 7032fd79731907df00a2fe0bbf6c0f4ce87f021d - src/common/sdk/nvidia/inc/class/cl917dcrcnotif.h b11e7b13106fd6656d1b8268ffc15700fba58628 - src/common/sdk/nvidia/inc/class/clc371.h ff47d8a4b4bdb3b9cd04ddb7666005ac7fcf2231 - src/common/sdk/nvidia/inc/class/cl003e.h 0285aed652c6aedd392092cdf2c7b28fde13a263 - src/common/sdk/nvidia/inc/class/cl00fc.h 81b4e4432da8412c119e795662819cfe7558711f - src/common/sdk/nvidia/inc/class/cl917a.h 38265d86eb7c771d2d3fc5102d53e6a170a7f560 - src/common/sdk/nvidia/inc/class/cl0041.h 848c89981de73d681615266e4e983b74c2ef418f - src/common/sdk/nvidia/inc/class/cla06fsubch.h 2d76476dba432ffc1292d2d5dd2a84ff3a359568 - src/common/sdk/nvidia/inc/class/cl0092.h b46b2cfcf72fc2f9722bd42cea8daaeeda861471 - src/common/sdk/nvidia/inc/class/clc871.h 022e8405220e482f83629dd482efee81cc49f665 - src/common/sdk/nvidia/inc/class/clc77f.h fe7484d17bc643ad61faabee5419ddc81cf9bfd6 - src/common/sdk/nvidia/inc/class/cl9570.h bb79bbd1b0a37283802bc59f184abe0f9ced08a5 - src/common/sdk/nvidia/inc/class/cl0040.h 6249715d9876f5825ad62f563bf070e93710a2ad - src/common/sdk/nvidia/inc/class/clc67d.h b1133e9abe15cf7b22c04d9627afa2027e781b81 - src/common/sdk/nvidia/inc/class/cl917c.h 7ef21c4f4fd4032c8f25f8fb33669e692a26e700 - src/common/sdk/nvidia/inc/class/clcb97tex.h 73b706e4916f4c70302387c88c8e14e7b2c1f4e6 - src/common/sdk/nvidia/inc/class/clc67b.h c40fd87fa6293d483b5bf510e2e331143ded9fa4 - src/common/sdk/nvidia/inc/class/cl9470.h 20894d974d1f8f993c290463f1c97c71fd2e40b1 - src/common/sdk/nvidia/inc/class/cl30f1.h 9f7f04825f3f218cc0c4610938935e2f0a73e13b - src/common/sdk/nvidia/inc/class/clc97d.h 04ab1761d913030cb7485149ecd365f2f9c0f7da - src/common/sdk/nvidia/inc/class/cl0005_notification.h da8d312d2fdc6012e354df4fa71ed62ae4aac369 - src/common/sdk/nvidia/inc/class/cl927c.h 158c98c8721d558ab64a025e6fdd04ce7a16ba9e - src/common/sdk/nvidia/inc/class/cl947d.h 5416c871e8d50a4e76cbad446030dbedbe1644fd - src/common/sdk/nvidia/inc/class/cl00f2.h 0b35244321b1f2f6647f8389f6fa7254c34790e2 - src/common/sdk/nvidia/inc/class/cl90cdtrace.h 39161706917567f434a6fff736b22f3358923e68 - src/common/sdk/nvidia/inc/class/clc06f.h bc3674f2384cb3695ce5f035ed16e9c39bba4d1b - src/common/sdk/nvidia/inc/class/cl00fe.h dd4f75c438d19c27e52f25b36fc8ded1ce02133c - src/common/sdk/nvidia/inc/class/cl917cswspare.h 435a34753d445eb9711c7132d70bd26df2b8bdab - src/common/sdk/nvidia/inc/class/cl917d.h b31019107ada7b0fb8247c09d93b95a630821fa8 - src/common/sdk/nvidia/inc/class/clcc7e.h 31939808cd46382b1c63bc1e0bd4af953302773f - src/common/sdk/nvidia/inc/class/cl977d.h 83427e3172c64c3b9ef393205ccc3b961ec65190 - src/common/sdk/nvidia/inc/class/cl5070.h db8dd50ad3e64fe0472d82c0940908d5da5e0321 - src/common/sdk/nvidia/inc/class/cla0b5.h 28867d69a6ceac83da53a11a5e1ef87d9476f0be - src/common/sdk/nvidia/inc/class/clc57d.h 8b07d7aca050be883fdc0d6f4b19eac0b0b6c796 - src/common/sdk/nvidia/inc/class/clc673.h c116d91177c6cbfb8c25e7f35bb49a8d5a51816c - src/common/sdk/nvidia/inc/class/cl008f.h 4fc2133935b8e560c9a1048bc0b1f1c2f0a4464c - src/common/sdk/nvidia/inc/class/cl00c1.h 5a6098f821e8faa19345313477726431f9271cde - src/common/sdk/nvidia/inc/class/clc661.h 6db83e33cb3432f34d4b55c3de222eaf793a90f0 - src/common/sdk/nvidia/inc/class/cl00b1.h 5b573deb4d68ccb67d9cccc11b28203c5db3d2f7 - src/common/sdk/nvidia/inc/ctrl/ctrl0002.h 88947927d79e15df8cbf77a59ac883a29e970413 - src/common/sdk/nvidia/inc/ctrl/ctrlc638.h 625af1df5c9453bd35a9e873ee5c77e73d5fd195 - src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h ade4a731f59c7cd16b4a60d318a19147b9918bb9 - src/common/sdk/nvidia/inc/ctrl/ctrl0004.h 90843f8173a341deb7f1466cd69a17114c6b9e4f - src/common/sdk/nvidia/inc/ctrl/ctrl90f1.h a305225ceda0a39c76ed61b819a1f4165f5644f5 - src/common/sdk/nvidia/inc/ctrl/ctrl00fe.h be3c9e2de8b8d33fe04389b224fa6ad95ecd089b - src/common/sdk/nvidia/inc/ctrl/ctrla06f.h c3e3213f548f93592f7d3dfd76e63a2102d800ec - src/common/sdk/nvidia/inc/ctrl/ctrl0076.h d7415e78725899f9d10fa2d5f03f3d62cef42f26 - src/common/sdk/nvidia/inc/ctrl/ctrlc36f.h 9e343f73f46238075cef766cad499533559dfa28 - src/common/sdk/nvidia/inc/ctrl/ctrl00da.h f7601ce8c7c2d7a1143bff5280e3e5d9b5c4c147 - src/common/sdk/nvidia/inc/ctrl/ctrl906f.h 97ac039e796faca6c9f78e16020fe96225b33492 - src/common/sdk/nvidia/inc/ctrl/ctrlc637.h fe7ce28fe76174a6de68236b44ea565ba2ea687b - src/common/sdk/nvidia/inc/ctrl/ctrl00de.h 3ba6904c69aa7710c4561d5643b18fc41e141d4e - src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h b178067ba5f93e7fafb4c2ee0f5032acf9bc55d7 - src/common/sdk/nvidia/inc/ctrl/ctrla081.h 58a5d3a55b2d9b29d4f1b1e7b5d4d02ae6885e30 - src/common/sdk/nvidia/inc/ctrl/ctrl003e.h 16a24249210637987d17af6069ae5168404743ee - src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h 58f8e48d5851cc10e3c5fd3655d7948b9f327ca0 - src/common/sdk/nvidia/inc/ctrl/ctrl2080.h b86c4d68c5758f9813f00cc562110c72ef602da7 - src/common/sdk/nvidia/inc/ctrl/ctrl90e7.h c042a366bc755def9e4132e2768c1675871dbe65 - src/common/sdk/nvidia/inc/ctrl/ctrl0041.h c8b2e0e64bb3cf3c562dee5fa7913035f82d8247 - src/common/sdk/nvidia/inc/ctrl/ctrl402c.h 352825959d98fe9b47a474cfdd154d380c80d24e - src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h 9d908bb15aecc9d8094e1b6c13301efba6032079 - src/common/sdk/nvidia/inc/ctrl/ctrl0080.h 3fcf5dbb82508d88a040981a7ab21eac1466bb2b - src/common/sdk/nvidia/inc/ctrl/ctrl0073.h bfee287b190fd698735c5660592741ba5c25a8ea - src/common/sdk/nvidia/inc/ctrl/ctrl0020.h 2e65ccd2704919780a152c69f53400a0dc5e6e41 - src/common/sdk/nvidia/inc/ctrl/ctrlb06f.h 4fb7753f3502303314d9e8f853ee3b752f7e9317 - src/common/sdk/nvidia/inc/ctrl/ctrl0100.h 8764e07e9d348163db4eb41b0c3cf32c76172c0d - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h 5782a19aeaf9695c13940cf4532e41523a8460e3 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000base.h f21c15122509a8843e676a2bd5e799c58cd96379 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h 326b61039197db58d8369256f6d7dc9764aea421 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h e7452921bdbd036ca3a37c60c49829c05e95c2d5 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h 5f3b68d39f14137d33f239408a6a13543f4ac966 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h d08ef822e97ee56984618d52ed3ed55ee395eadb - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h 8fcc64b22b0f6cde40d5ecd23e5e2444277a5999 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h 70d65d4f923ec0efd8931433ae50930d12f78a07 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h a33a1c1173962183793d84276e46c61d27ca867e - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpuacct.h 1b594c39d1439c3d1ecc24c4325b2ea8c2724548 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h 0146d2b3ecec8760e76dacd8ce6bb75c343c6cac - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h 11abea0cdf485863196de56169451980ee6c016a - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000event.h 4f0ccb0667bd3e3070e40f3f83bede7849bc78e4 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h 08dda80bac8d3418ad08e291012cf315dc9e5805 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h 28b06c8f8152dce2b2e684a4ba84acd25a8b8c26 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h add9e3867e3dbd2c11bed36604680af4aaa0f164 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h 2ffb93d092df65570b074ad97f0bb436a1c66dff - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h 79fd7ed84cb238ea90ea3691f40ea7140034d3dc - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h 2ea79d79223b06633fb7f541ebbe5a300ba3885f - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h 44c9aa512eb0b9b92cace9e674299f2a9227c37c - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080internal.h a3328cf6633f9b04258eff05ce30e66cc6930310 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h a427892e601a4ca4f88cc5778ff78895324f3728 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h 92ff82d1045933baa79958a9f6efd451b0123e95 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h 7ef9e10955708592e92e127eb3fb372adff44818 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h 3c1bd0db339456c335acd50a75ace42cb8bbe6f8 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h be10e3f4a9dd2f2ab35305ee0af628ef339b25a7 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h db66195c8e7252c5f424953275cbb7be90a17ba8 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h c74ac448c3382d92e662804b56e73edd748e2678 - src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83debase.h 7318f74523bb6a015e561dba1a06b47a278d856d - src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83dedebug.h 702d9cb471a344a25911449cc580f69f7155ab1c - src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h 3f747a4fc98291329e0245a971248cf2c28a1b60 - src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372base.h 19c7eff334c591c803dcd93fc0818798c281df48 - src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fbase.h c7dcbc0ae7454df6523c6deb5f07a70dc2fdbc15 - src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fgpu.h 882b13d54585a6fc5534d12b9cdcec29c8cde337 - src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fucodecoverage.h 76fb63a6782ff1236303fdd7bf2698f42965a266 - src/common/sdk/nvidia/inc/ctrl/ctrl90e7/ctrl90e7base.h 00d2655f569187190bd117bdf37fe4ddd5e92320 - src/common/sdk/nvidia/inc/ctrl/ctrl90e7/ctrl90e7bbx.h 8064c31eb1e447561c415f9835aecac97d5f3517 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h 713aa1291aef3f79304ad35c5143a7576f242f63 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h bb7955387f6a286927e7922019676ca0aba713e6 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073internal.h 35367f08b96510a5312653b5197d6bb34c0a3d00 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073event.h a0cf9dfb520e3320cd9c154c01cd2f1a7bbbd864 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h c2066c407f81538047c435fffca2705c28107663 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h d727b328e995a7d969ec036f2d5b52264568a7bf - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h 52f251090780737f14eb993150f3ae73be303921 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h 77eb4fab61225663a3f49b868c983d5d532ca184 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h 6ca26c7149455e43f32e8b83b74f4a34a24a2d29 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073base.h 134d43961ea1d42fc36d75685fdd7944f92b0b53 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h 022feef64678b2f71ab70dc67d5d604054990957 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h 2a00952f0f3988c5425fec957a19d926ae75ba28 - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h 79b38bbe679d397b48b78266aa5f50459fe5b5bc - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370base.h 514d012dbfd9e056b7f729bccb213fa9193d433e - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370or.h 6ef99465758f71f420ac17765380cc37dbcac68a - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370event.h 5f70c2eb6a144bc4d7ca8be63fa46391909e8201 - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h f4ed3ccff4720114d1aaed82484ed70cf07626db - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h ba3b73356bf0d1409ecfd963b623c50ec83f1813 - src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06finternal.h bb0a5ff091ef854b19e7da0043b7b7b10232c3de - src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fbase.h 1f25c9f215991f34fee94dafac5fad0e7460db1c - src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h ddeb0df51d5f662948f9098a5d85b40c8ab6504b - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070base.h e3fb93f0ff3469ec76cecdc6f0bf1c296551a2b1 - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070impoverrides.h a138379dd76c468072f1862b8fc6ae79ee876b4e - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070common.h ee99443c1bd3441df474566622486b04c4502ac0 - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h 44e1b06211eee31e42e81879f5220f26ddec70ae - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h ff789d585a7f001b8bd32e07a268c635d39b17ab - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h 03f54e22b39ad5cf682eada7147c6c155f16b385 - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h e8d883de767aa995a374d8da56b5c9da8787cb1d - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070system.h 8fdb493bda6119025c1d00f289a6394e7dcd1b53 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h cfa32c37f373eeef53aedc3f4dffff1634c122e8 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h 41a0a14e04527fa2c349d2895bb41affd154c999 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h ecd312fabb249a25655e151cee3615c5ab61ffa7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h c30b5995d353e68623b32fea398f461351e3b8f1 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h aa0f685b94bdae99a58aa1a45735b0593a2e6f5a - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h aa86ffd04a55436ecacbedb1626f6187bbddedf7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf.h 3423a69bba50e1405b5a7d631bfff1f6f0a1673f - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h 1990d0c4fa84c6d078282d4d7d0624ccb0325ce7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h 146263409e5304f661da349b56761ab7403144bd - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h 8b622186edb156e980d02bd59a71c01923d1aa23 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h 70dc706ea4ee7b143a716aae9e4f8c0bcef6c249 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h 0a156fc54f45386fabd06ef5ec11ba3a816fbfb7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobj.h c157e185d3c64ee9476ddc75881bfc5a5b8b997f - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h 785d96360f86bc53eb428fd3f4fbeda395400c8a - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h b8e8c5ccab01d7997d1fd5579a690cb3279a8ab3 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080base.h b2eecbca32d87b939858bf0b22f93c06b49b3a04 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h 24a891a02e1a882769d4da3454e4dfcf42b1ea6c - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h 6969b092708d57f88b0f0fdbb3464c786f90710c - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h 013bd8d50841ea314f5ea2bd507470f2c3aff831 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h d63388ff48ca055c82bcd6148506eacd0e26b4dc - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vgpumgrinternal.h 96f72ec608cd198be995f3acd9c04afe7c7e6dc8 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h 359c6b06f2712a527d1ef08465179c14a8b4a751 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h 4c2af959d06536294d62b2366a6ba61ca744bd50 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h d15e8e86ca66b3a69a774e322dfdd349b9f978b9 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spdm.h 898fa08818b657c27b456d952e7a4e09d8d197ee - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h 9933e90ad92eb7df2f64dcc30dcd680d5f7c530d - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h 18d1a44b7113c1707bbf5c65fb1be790304c0bed - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h 0cd5e883dfafb74ce2ec9bccca6e688a27e6cfa9 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h 07f82ae90cde3c6e2e6c5af135c40e01660c39a3 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h c8f1c115d78bab309c0a887324b0dabfb8f9ea2d - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h ecceb8f7382c8f55c6ccd0330e14ccbc49fcd09c - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h 2577a1d505a3d682e223fbcbc6d4c7d13162749d - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h d3969094e68f9d584ba9c6fb5457801caff6ccc1 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmu.h 74f1abf45a2a0f60c82e4825b9abfa6c57cab648 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080power.h 115f683e5926ae130de87e4cea805ef6915ed728 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h d4ba227a522423503e5044c774dbcca692c48247 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h 97bb79e74b25134fa02a60d310b3e81170df6fd6 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clkavfs.h baeb07c8bdadf835db754452f63d40956bc6a199 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h 338c7de5d574fe91cda1372c5221e754d4c4b717 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h 4e4a4f9e94f2d7748064949f4b16845829670bf6 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080thermal.h 5ac6c9a299256935259eaf94323ae58995a97ad7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h e4441458a7914414a2092f36a9f93389ed65154a - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h b55e4cf81b6112868eb6f6cd9c1a3b32f8fcda49 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h 302f79771fcdba3122cf61affb53e0a3a3a27e6d - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h 5c7b955ef5e6f6ca9c0944e8a2b2c4a1ae760e04 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h 93a9fa93eb3d1099991e4682b6228124220ca293 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h 7f1af5b788616bab285a73bab5098fb6d134b159 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h 51dbd71f1cd5a66dd7a5b0fbb753713d27ff937c - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h cf1757ff453132fb64be0dec6c50eb935db29784 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink_common.h 59254e4bdc475b70cfd0b445ef496f27c20faab0 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h 119432bbce99e91484a2bac79ca5257a36a7f98b - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h 7f15697ca8645f77352f88c2a84713f348e98a24 - src/common/unix/nvidia-3d/include/nvidia-3d-vertex-arrays.h 220ac9628fe5afa0191b8c20304402baf0f70353 - src/common/unix/nvidia-3d/include/nvidia-3d-fermi.h 23478354284aa1be69bc70fa4157aa408177829c - src/common/unix/nvidia-3d/include/nvidia-3d-volta.h 75859a11c0fae125a0619c47ead964416ac8d6ed - src/common/unix/nvidia-3d/include/nvidia-3d-pascal.h e621c127011311e8f97c8784d8539751a820bf47 - src/common/unix/nvidia-3d/include/nvidia-3d-maxwell.h 07fc2cd8495309f1218b9ddee4a4809b6dcb65a3 - src/common/unix/nvidia-3d/include/nvidia-3d-types-priv.h 1276b525f23b582e029c2ddc9ed0115f8e9dafb4 - src/common/unix/nvidia-3d/include/nvidia-3d-hopper.h 5030b264e17b70df0c99bc9da4350bdb48f2f60a - src/common/unix/nvidia-3d/include/nvidia-3d-kepler.h 146b4f305bfe710622a878fe3e9afd4f834124b8 - src/common/unix/nvidia-3d/include/nvidia-3d-turing.h 61f0a408812c04a59fb8f12713ce34d2ed544fe3 - src/common/unix/nvidia-3d/include/nvidia-3d-surface.h e7a4acaef431a49ca7efd6bf72b6e8b57fafbab0 - src/common/unix/nvidia-3d/include/nv_xz_mem_hooks.h 40a9c57cca5b2f8acfe3ead472dcf0adc9423050 - src/common/unix/nvidia-3d/src/nvidia-3d-vertex-arrays.c af1a4d99bd19b72de120ba2046f35b95650985b1 - src/common/unix/nvidia-3d/src/nvidia-3d-volta.c f78f737f1dfb52cf248543cced017a8fbad7b270 - src/common/unix/nvidia-3d/src/nvidia-3d-surface.c 4ea7a2a6811239760a1b56833fb07dbf8a99a10e - src/common/unix/nvidia-3d/src/nvidia-3d-hopper.c e43e6ce6b9781d44b68868703fdbb779fc95f5d4 - src/common/unix/nvidia-3d/src/nvidia-3d-kepler.c 09fa5fbae25e08c819277566d7281f17305863f8 - src/common/unix/nvidia-3d/src/nvidia-3d-turing.c e0ef9ab77cfdf207c800a9c067739add28632047 - src/common/unix/nvidia-3d/src/nvidia-3d-pascal.c 57f19f6aa7b896794aafacd978b2469d976f6f78 - src/common/unix/nvidia-3d/src/nvidia-3d-maxwell.c 08c29625af227debb72dd703630a754ac4fbeee0 - src/common/unix/nvidia-3d/src/nvidia-3d-core.c 7ca41841cc54bd597f5c10cc346b8f574b1c2acf - src/common/unix/nvidia-3d/src/nvidia-3d-fermi.c d0331b7ebba0537af50bdf5815d9c048cbeb3388 - src/common/unix/nvidia-3d/src/nvidia-3d-init.c 569a662ce5f79dc450f44eeb7a0ff36580ba27fe - src/common/unix/nvidia-3d/interface/nvidia-3d-types.h a06524af04de90562b08b6b26783232cf7ff01d4 - src/common/unix/nvidia-3d/interface/nvidia-3d-utils.h 3e97ecc773087c0c7f370faf0a9ff838793c9bd6 - src/common/unix/nvidia-3d/interface/nvidia-3d-color-targets.h 2d91e6f3ad425d3ca95de79ecb929b22cac57f52 - src/common/unix/nvidia-3d/interface/nvidia-3d-shaders.h fd454a2318e970e6b1cb4a4b7b5633e4cb2e8b45 - src/common/unix/nvidia-3d/interface/nvidia-3d.h 34daeec12bbf45f0f85406afc56414da45afc2e6 - src/common/unix/nvidia-3d/interface/nvidia-3d-shader-constants.h 727210acfe72963aa6dddf1bcee91dc122897113 - src/common/unix/nvidia-3d/interface/nvidia-3d-constant-buffers.h 069b576dc1f03143999512cd03fc48fe18ed6706 - src/common/unix/nvidia-3d/interface/nvidia-3d-imports.h 2476f128437c0520204e13a4ddd2239ff3f40c21 - src/common/unix/common/inc/nv-float.h 881cbcc7ed39ea9198279136205dbe40142be35e - src/common/unix/common/inc/nv_assert.h cb7c13757ca480e10b4ef3e3851d82ad5ccca3f1 - src/common/unix/common/inc/nv_mode_timings.h d5253e7e4abd3ad8d72375260aa80037adcd8973 - src/common/unix/common/inc/nv_dpy_id.h 3e64a8fe60bb1266a769be8a5c0716e10c816b38 - src/common/unix/common/inc/nv_amodel_enum.h 995d8447f8539bd736cc09d62983ae8ebc7e3436 - src/common/unix/common/inc/nv_common_utils.h edded9ca3d455444372fe6c497b2d61bd0cc3f96 - src/common/unix/common/utils/nv_memory_tracker.c 7bccb5a3dea9208f0fbd86d36efc369f215d5c3c - src/common/unix/common/utils/unix_rm_handle.c 26f2a36442266c5d2664d509ecfd31094a83e152 - src/common/unix/common/utils/nv_vasprintf.c e903bbbecf4fb3085aaccca0628f0a0e4aba3e58 - src/common/unix/common/utils/nv_mode_timings_utils.c 667b361db93e35d12d979c47e4d7a68be9aa93b6 - src/common/unix/common/utils/interface/nv_mode_timings_utils.h 07c675d22c4f0f4be6647b65b6487e2d6927c347 - src/common/unix/common/utils/interface/nv_memory_tracker.h 8d9c4d69394b23d689a4aa6727eb3da1d383765a - src/common/unix/common/utils/interface/unix_rm_handle.h 9e008270f277e243f9167ab50401602378a2a6e8 - src/common/unix/common/utils/interface/nv_vasprintf.h 673bbd33569f55a900b5388a77d19edd3822ecf3 - src/common/unix/xzminidec/src/xz_dec_stream.c 9c67bdcbea04fbe1a5b2746549e502cdc368b54e - src/common/unix/xzminidec/src/xz_config.h f2cfbcf1e2cb1d7545b5de609a4e7672bf8ae976 - src/common/unix/xzminidec/src/xz_dec_bcj.c 93af3bcdf863afa9655107c86f49aefdf9c05d90 - src/common/unix/xzminidec/src/xz_lzma2.h fba46fe8f4a160d71a708578a85ab6731e4e024f - src/common/unix/xzminidec/src/xz_crc32.c 0ce26be0fb63a7ae52e2bb15a1770c80b9a5ac84 - src/common/unix/xzminidec/src/xz_stream.h 8365ec8d875fad74507d49228ad8959c66bbc360 - src/common/unix/xzminidec/src/xz_dec_lzma2.c 2ade48b4c53fc3bebf1587bc0a1a08b26cd5981d - src/common/unix/xzminidec/src/xz_private.h c2a87873eeff2a8010bb8a2cb8d1df28a20a0097 - src/common/unix/xzminidec/interface/xz.h 4498dc65d71b2b8635b365550e5e521da14c8e6b - src/common/unix/nvidia-push/include/nvidia-push-priv.h 4847b168b4f5e78dbb92cfec80734789a9131b87 - src/common/unix/nvidia-push/include/nvidia-push-priv-imports.h 616dd99d8dda5dbe35032a5fc558ff48f7cc1620 - src/common/unix/nvidia-push/src/nvidia-push-init.c 0916485ec1ff275771d88a725dcbf586663dbc33 - src/common/unix/nvidia-push/src/nvidia-push.c 548f9e591d2c851b157575e1b83e25eb47bc61e6 - src/common/unix/nvidia-push/interface/nvidia-push-methods.h 5f5013bdbda9582252db2e92a105a57f24ca7d96 - src/common/unix/nvidia-push/interface/nvidia-push-init.h f3576444d1dbcc4e9379bee6151ef8c7a382e276 - src/common/unix/nvidia-push/interface/nvidia-push-utils.h 918c4f2e2edd0a52c7085f758286dacd21b5b4c5 - src/common/unix/nvidia-push/interface/nvidia-push-types.h b54add7dea08ff736ac27ee259f6ccb389c01f09 - src/common/unix/nvidia-headsurface/nvidia-headsurface-types.h 5d014581148b38eede1d31a1f48e388cf6eb7a45 - src/common/unix/nvidia-headsurface/nvidia-headsurface-constants.h e1fbb040ea9d3c773ed07deb9ef5d63c8c8cab7a - src/common/inc/nvSha1.h 8f0d91e1a8f0d3474fb91dc3e6234e55d2c79fcc - src/common/inc/rmosxfac.h bcad75550591ede46152403e40413f87e85b0a80 - src/common/inc/nvlog_defs.h ebccc5c2af2863509e957fe98b01d9a14d8b0367 - src/common/inc/nv_list.h 0e970acfcadddd89fae91c812647fecb80c98d52 - src/common/inc/pex.h 73e2133709eb920a92fcebf7aaab958020493183 - src/common/inc/nvctassert.h 6fa5359ffe91b624548c226b6139f241771a9289 - src/common/inc/jt.h 489ce9f046d9c2ff95a1284ab5e04b5843b874ae - src/common/inc/nvVer.h 7ab322addb3e1ba880cee07dc0d26d882db097b0 - src/common/inc/nvCpuIntrinsics.h d9c0905f374db0b9cc164ce42eab457d1ba28c53 - src/common/inc/nvop.h d70c17a0693c8b5dbf7c83f693eec352ce22917c - src/common/inc/nv_smg.h b4c5d759f035b540648117b1bff6b1701476a398 - src/common/inc/nvCpuUuid.h 4282574b39d1bcaf394b63aca8769bb52462b89b - src/common/inc/nvBinSegment.h 8c41b32c479f0de04df38798c56fd180514736fc - src/common/inc/nvBldVer.h 62e510fa46465f69e9c55fabf1c8124bee3091c4 - src/common/inc/nvHdmiFrlCommon.h 82aadec9509f41eab58727c3498dc24a30a0128e - src/common/inc/nvrmcontext.h d74a8d4a9ae3d36e92b39bc7c74b27df44626b1c - src/common/inc/nv_mig_types.h a346380cebac17412b4efc0aef2fad27c33b8fb5 - src/common/inc/nvlog_inc2.h e670ffdd499c13e5025aceae5541426ab2ab0925 - src/common/inc/gps.h 963aebc9ec7bcb9c445eee419f72289b21680cdd - src/common/inc/hdmi_spec.h 987027bed503d8ce5ad01706aae4a16ee37f3e2d - src/common/inc/nvSemaphoreCommon.h 5257e84f2048b01258c78cec70987f158f6b0c44 - src/common/inc/nvlog_inc.h 4a88a536b71995db70e3a83a48d47072693ec69d - src/common/inc/nv_speculation_barrier.h 2408132586b69e580ff909f7f66451aa2882abff - src/common/inc/nvPNPVendorIds.h 4f7ca8fb43d6885cf60869ed241476032f20f5f3 - src/common/inc/nvUnixVersion.h 23edf9cce2608c494dad045b9466b8f3a18bab56 - src/common/inc/displayport/dpcd20.h ecc26f6fae35818791733c1a56ea1b556bba7f4f - src/common/inc/displayport/displayport2x.h aad6f14dacdb166a8d884cae6c5f382d98e5c46c - src/common/inc/displayport/dpcd14.h 27572a26d0a0a32f38606323ea6da65096bac039 - src/common/inc/displayport/displayport.h 8f7c9c19a76eca84fc2556841042c2f1c3d07a1a - src/common/inc/displayport/dpcd.h 4ee8a4d2a0fe12d348ac4c1a1e0a22bd272e146d - src/common/inc/swref/common_def_nvlink.h e182f9538fea08b5d25f3e74083a7a12a7d49809 - src/common/inc/swref/published/nv_ref.h 641e9803749cbeeca1149c43fe2da5e6edf25137 - src/common/inc/swref/published/nv_arch.h 059493ce7d5390b7e859a19d1a24752df8126ace - src/common/inc/swref/published/turing/tu102/kind_macros.h 86a59440492fd6f869aef3509f0e64a492b4550d - src/common/inc/swref/published/turing/tu102/dev_mmu.h 38589617aab40efdd86b401a18d1e28b5d3b9f8e - src/common/inc/swref/published/disp/v05_02/dev_disp.h 1ea0c3d6ea0c79c01accc7b25d15b421ab49a55d - src/common/inc/swref/published/disp/v04_02/dev_disp.h c01e4a95ede641ff5a9e6918b39db4d2099c91cb - src/common/inc/swref/published/disp/v05_01/dev_disp.h 04345c77f8c7a8b4825f0cb7fc96ca7c876af51c - src/common/inc/swref/published/disp/v04_01/dev_disp.h 1604a3fa3e3142118c82a1dc621cdac81806195a - src/common/inc/swref/published/disp/v03_00/dev_disp.h c4f12d6055573a19f9211fdddd3778575e2a17fd - src/common/inc/swref/published/disp/v02_04/dev_disp.h 64c123c90018c5ee122b02b02cbccfcd5ec32cab - src/common/inc/swref/published/t23x/t234/dev_fuse.h b5ce995e9e5afcd73d39642e31998e087ea133e8 - src/common/shared/nvstatus/nvstatus.c 08816a33e698308c76f3a026c29d0dcb41c5ee20 - src/common/shared/inc/compat.h 9231ac111286772170925e8f6cf92bde5914abb8 - src/common/shared/inc/nvdevid.h 750ecc85242882a9e428d5a5cf1a64f418d59c5f - src/common/displayport/inc/dp_object.h a6ff1a7aee138f6771c5b0bbedb593a2641e1114 - src/common/displayport/inc/dp_messages.h 80380945c76c58648756446435d615f74630f2da - src/common/displayport/inc/dp_timeout.h cdb1e7797c250b0a7c0449e2df5ce71e42b83432 - src/common/displayport/inc/dp_merger.h 070b4f6216f19feebb6a67cbb9c3eb22dc60cf74 - src/common/displayport/inc/dp_buffer.h 02b65d96a7a345eaa87042faf6dd94052235009c - src/common/displayport/inc/dp_messageheader.h 78595e6262d5ab0e6232392dc0852feaf83c7585 - src/common/displayport/inc/dp_auxbus.h e27519c72e533a69f7433638a1d292fb9df8772e - src/common/displayport/inc/dp_crc.h b2db6b37515f7c979e18686694546b9fa5145459 - src/common/displayport/inc/dp_hostimp.h 29ee5f4ef6670f06e96c07b36c11e3bad8bee6aa - src/common/displayport/inc/dp_address.h 575f4f97189ad6b4944bdd4127cdbee79d8c688d - src/common/displayport/inc/dp_groupimpl.h cf09c061fa898cd84edd34a9457726abc501b03c - src/common/displayport/inc/dp_configcaps.h afa1135330de2ce8f1a6d20e99b54f507b5adbbd - src/common/displayport/inc/dp_evoadapter.h 01f1dd58ed5bb12503fa45be7a6657cde0a857e2 - src/common/displayport/inc/dp_guid.h cca426d571c6b01f7953180e2e550e55c629f0f4 - src/common/displayport/inc/dp_auxretry.h a086546bf92d7e5e9adf66dcac012b3dc81c2597 - src/common/displayport/inc/dp_internal.h f6e1b0850f5ed0f23f263d4104523d9290bb8669 - src/common/displayport/inc/dp_vrr.h 2f134665b274bb223c3f74e0ec5c6a0392fa6387 - src/common/displayport/inc/dp_discovery.h 07d22f84e6a386dad251761278a828dab64b6dd5 - src/common/displayport/inc/dp_bitstream.h f09aae8321de23e0a48072d0e082aecb84a3ebbe - src/common/displayport/inc/dp_mainlink.h cae50568f7bef4a2a69c4d718a5297b9ae15da3f - src/common/displayport/inc/dp_deviceimpl.h eb9cdbb0a907926b1afd2a551ec19830f06ae205 - src/common/displayport/inc/dp_splitter.h 5bd3706ceea585df76a75dda7f9581b91ee8f998 - src/common/displayport/inc/dp_tracing.h 4a098c4d09dedc33b86748d5fe9a30d097675e9f - src/common/displayport/inc/dp_list.h 6c87ce702f215b21c1ab0064a2a85b3eda96ecec - src/common/displayport/inc/dp_edid.h be558902391fb6cb5085652b560391b54befca4b - src/common/displayport/inc/dp_printf.h 379d3933c90eaf9c35a0bad2bd6af960a321465f - src/common/displayport/inc/dp_wardatabase.h 2016714a04d46ac8412ef55d2156d86ba4d594eb - src/common/displayport/inc/dp_auxdefs.h e2075486b392d6b231f2f133922ac096ca4bc095 - src/common/displayport/inc/dp_ringbuffer.h 09c80a469f1e7e0edd6381578d66fd0e789bc0db - src/common/displayport/inc/dp_regkeydatabase.h 7622cb576c2ebbfe65c0f6132d8561ab1815f668 - src/common/displayport/inc/dp_qse.h dd420c9e7c271d8bea047d431667524105473e95 - src/common/displayport/inc/dp_linkconfig.h e02e5621eaea52a2266a86dcd587f4714680caf4 - src/common/displayport/inc/dp_linkedlist.h 430f42522a1e60f2420aa2e4e471aa20945d0253 - src/common/displayport/inc/dp_timer.h 0f71b80d0a0d53fc6581ef341a4e637a467a3795 - src/common/displayport/inc/dp_connectorimpl.h c8c55dfc7b085b421b01bd9dc7b74abe6f9a0932 - src/common/displayport/inc/dp_connector.h 78ef30b2caf2cf4ff441b5613a796b93ae8973bd - src/common/displayport/inc/dp_messagecodings.h 1363fca23628f312c4b6b0c868b8a43f4a8a5a24 - src/common/displayport/inc/dp_watermark.h d2b00a849a81f6c6092e3b2c4e7ed20fcee62b39 - src/common/displayport/inc/dptestutil/dp_testmessage.h 70b155b0da07a92ede884a9cec715f67e6b5c3e8 - src/common/displayport/src/dp_list.cpp 107b170d4496a754f22819e66794bcdc51256b7c - src/common/displayport/src/dp_sst_edid.cpp fea946e5320e7de8e9229bca8d4a6a14b9e8db59 - src/common/displayport/src/dp_crc.cpp 2caf1cd4a99e55126883dbdd9f6b74883c71e171 - src/common/displayport/src/dp_messagecodings.cpp ef3fefa8dd819d4086c054919b769ca18d058469 - src/common/displayport/src/dp_wardatabase.cpp c49e37f3e225e60a74c71a2b571e542e12fd9bc9 - src/common/displayport/src/dp_watermark.cpp e874ffeaeb6deec57605bf91eaa2af116a9762bd - src/common/displayport/src/dp_bitstream.cpp d699ce22e5e2d641caa2fbacca3095d7dd7b3ffe - src/common/displayport/src/dp_evoadapter.cpp 5f2fb1683cff15175e3ef2276b721863886adc79 - src/common/displayport/src/dp_vrr.cpp 0717b87aafecbe2216e0f0b53ee088a980ef7ad4 - src/common/displayport/src/dp_auxretry.cpp 0670fb5302b1bd3fc65daa848f23e4086619b5e6 - src/common/displayport/src/dp_discovery.cpp 5c12759c27407e8df4c8f1f7bc6ec1595b6b1a63 - src/common/displayport/src/dp_messages.cpp 93ba2409667997fdbcb7af1a8f24ec4a0e15b62c - src/common/displayport/src/dp_timer.cpp ffdd039884b1400eaf4d6d7cc81d0faba5282014 - src/common/displayport/src/dp_deviceimpl.cpp c625716e5516a290ac501563e2a73eef9b4f7dd6 - src/common/displayport/src/dp_edid.cpp af1672e8abb92d8d574d9605285753a8580c5d10 - src/common/displayport/src/dp_groupimpl.cpp 2cda981a5e36285ba4173573d074f8761e74f186 - src/common/displayport/src/dp_qse.cpp 5c7adbdfe295f7e1a1d4899a62bf95b456f84412 - src/common/displayport/src/dp_messageheader.cpp d3c4c54f96cc02d37fab45521685426e5c38fb4d - src/common/displayport/src/dp_mst_edid.cpp f56f92e32710b0342805b785d34ba1a9f2a54ed3 - src/common/displayport/src/dp_guid.cpp eb7e47407bd04e871f891038cc08736d066ffaa9 - src/common/displayport/src/dp_connectorimpl.cpp a62b774b7c45882b5854b91b600987c343c24966 - src/common/displayport/src/dp_linkconfig.cpp 0a8818da34b5321763c1f60cb8b6ea5e1a2837f1 - src/common/displayport/src/dp_splitter.cpp 24c0787ce5ec691c6b8edb351000265f47e0156a - src/common/displayport/src/dp_buffer.cpp 422a5d3426d5e1cc2346d9d5f86ccde66062ffdc - src/common/displayport/src/dp_merger.cpp 41589d1d5bfa4316d5d066a7201226baed5332db - src/common/displayport/src/dp_configcaps.cpp a0b68fce10eb0b95518cfd291e2d282872225295 - src/common/displayport/src/dptestutil/dp_testmessage.cpp f0a73cd173382d8abd4b0c70da8b32e144740bb5 - src/common/modeset/timing/nvt_dmt.c 15d7c508b621c877887962b2c27cdb6c7d1144a0 - src/common/modeset/timing/nvt_util.c 1341b987df8336c882e31d22d2141cadfb67272d - src/common/modeset/timing/nvtiming.h f8faf3eabd24a1239e1d4faebdc40c0ffa713ff9 - src/common/modeset/timing/nvt_edid.c c95a1c7914b0d1cba366f2a29e08eb93e0ad033d - src/common/modeset/timing/nvt_edidext_displayid.c 3d3a0889baed7a15c2adce54ba56c1dc783faffd - src/common/modeset/timing/dpsdp.h ff92b05f8648cb4bc31c0f64707065bb56ff3eb3 - src/common/modeset/timing/nvt_dsc_pps.c f75b1d98895bdccda0db2d8dd8feba53b88180c5 - src/common/modeset/timing/displayid.h 1997adbf2f6f5be7eb6c7a88e6660391a85d891b - src/common/modeset/timing/nvt_gtf.c 2737ed1d1eccd163f9cd12b1944f96a03c526b31 - src/common/modeset/timing/nvtiming_pvt.h 58b68f1272b069bb7819cbe86fd9e19d8acd0571 - src/common/modeset/timing/edid.h 6d221aad371436ba304448ba2cf04f89148a09bb - src/common/modeset/timing/nvt_edidext_displayid20.c 48761f63bc2794dfbde10492cc53137458cfcd0e - src/common/modeset/timing/nvt_dsc_pps.h 08ef97092899a3dc80251f61cedc73a851d70baa - src/common/modeset/timing/nvt_edidext_861.c d7cb716eeae50ecfe44fb3c4c4476de598ab78d7 - src/common/modeset/timing/nvt_tv.c 080c1de64d099ecb1aeb9b0b2f176f7be2d609b5 - src/common/modeset/timing/displayid20.h 1c2e163802849848e9ae1586d38c4cd82494217f - src/common/modeset/timing/nvt_ovt.c 54aa88075d9ceb9c6ef99d9c15cb32751a33f8d0 - src/common/modeset/timing/nvt_cvt.c e13cbe77f864afcddaccff7aeb1923cd02f1482f - src/common/modeset/timing/nvt_displayid20.c f8911888bdd441666c03fe27381d7730b7dd9131 - src/common/modeset/hdmipacket/nvhdmipkt_internal.h 12118b508a757fd0a162d1e740d93685a67363ea - src/common/modeset/hdmipacket/nvhdmipkt.c 5b541b9ab6fe9333815a760d4043fef725b1c848 - src/common/modeset/hdmipacket/nvhdmipkt_C971.c 83d94f0a5eb7318d00d96115b0139f9f99052ddc - src/common/modeset/hdmipacket/nvhdmipkt_CC71.c b390bf4f74d690068ff24dce90b79b227769ac2f - src/common/modeset/hdmipacket/nvhdmipkt_C671.c 206727972ab3a5f8a2cde0e153d63aef929b6c01 - src/common/modeset/hdmipacket/nvhdmipkt_0073.c a71968671ce6b64e235de6902bebc2a06da7ae04 - src/common/modeset/hdmipacket/nvhdmipkt_9171.c 54a1b5e5aaf0848a72befc896ed12f1de433ad4f - src/common/modeset/hdmipacket/nvhdmipkt_9471.c 57dbf547549c6fe24eb51cc54185b321c263108f - src/common/modeset/hdmipacket/nvhdmipkt.h 9be7b7be94a35d1d9a04f269ff560dbbb7860a2a - src/common/modeset/hdmipacket/nvhdmipkt_9571.c 559406ebdbd7f810f1ecbeb3e78b6518834b90fe - src/common/modeset/hdmipacket/nvhdmipkt_class.h e1df3885cd76f5159801c1f66f20b18537eaecf3 - src/common/modeset/hdmipacket/nvhdmipkt_C871.c 5e12a290fc91202e4ba9e823b6d8457594ed72d3 - src/common/modeset/hdmipacket/nvhdmi_frlInterface.h 67db549636b67a32d646fb7fc6c8db2f13689ecc - src/common/modeset/hdmipacket/nvhdmipkt_9271.c e6d500269128cbd93790fe68fbcad5ba45c2ba7d - src/common/modeset/hdmipacket/nvhdmipkt_C371.c 764d216e9941d0dcf41e89b2a0ddd8acf55902c8 - src/common/modeset/hdmipacket/nvhdmipkt_common.h b882497ae393bf66a728dae395b64ac53602a1a5 - src/common/softfloat/nvidia/nv-softfloat.h be9407a273620c0ba619b53ed72d59d52620c3e4 - src/common/softfloat/nvidia/platform.h f6d98979ab2d1e2b0d664333104130af6abbcad5 - src/common/softfloat/source/f64_to_i64_r_minMag.c 21a6232d93734b01692689258a3fdfbbf4ff089d - src/common/softfloat/source/s_roundToUI32.c 29321080baa7eab86947ac825561fdcff54a0e43 - src/common/softfloat/source/i32_to_f32.c dafa667ee5dd52c97fc0c3b7144f6b619406c225 - src/common/softfloat/source/s_mulAddF64.c 108eec2abf1cddb397ce9f652465c2e52f7c143b - src/common/softfloat/source/f64_roundToInt.c 513a7d1c3053fc119efcd8ae1bcc9652edc45315 - src/common/softfloat/source/f32_lt.c d19ff7dfece53875f2d6c6f7dd9e7772f7b0b7ec - src/common/softfloat/source/f32_to_i64_r_minMag.c 2db07bbb8242bc55a24ef483af6d648db0660de0 - src/common/softfloat/source/f32_add.c c951c9dffa123e4f77ed235eca49ef9b67f9f3d2 - src/common/softfloat/source/s_subMagsF64.c 5c1026617c588bcf5f1e59230bd5bb900600b9ac - src/common/softfloat/source/f64_mul.c 5c4ee32cc78efc718aaa60ec31d0b00b1bee3c2c - src/common/softfloat/source/f64_to_ui64_r_minMag.c 6fa7493285fe2f7fdc0ac056a6367e90327905c2 - src/common/softfloat/source/f32_sub.c da3b3f94a817909a3dc93ca5fa7675805c7979e0 - src/common/softfloat/source/f64_isSignalingNaN.c d701741d8d6a92bb890e53deda1b795f5787f465 - src/common/softfloat/source/f64_le.c baa7af4eea226140c26ffe6ab02a863d07f729fb - src/common/softfloat/source/f64_eq_signaling.c 2e5c29d842a8ebc5fbf987068dc9394cee609cc7 - src/common/softfloat/source/f32_to_ui64.c 054b23a974fc8d0bab232be433c4e516e6c1250a - src/common/softfloat/source/f64_lt_quiet.c dde685423af544e5359efdb51b4bf9457c67fa3b - src/common/softfloat/source/f32_sqrt.c fb062ecbe62a1f5878fd47f0c61490f2bde279dd - src/common/softfloat/source/s_roundToI32.c 8e58f0258218475616ff4e6317516d40ad475626 - src/common/softfloat/source/f32_lt_quiet.c ab19c6b50c40b8089cb915226d4553d1aa902b0e - src/common/softfloat/source/f64_to_i32_r_minMag.c 86fdc2472526375539216461732d1db6a9f85b55 - src/common/softfloat/source/s_roundPackToF32.c 9266c83f3e50093cc45d7be6ab993a0e72af1685 - src/common/softfloat/source/s_roundPackToF64.c 2e0fec421f4defd293cf55c5f3af7d91f4b7d2cc - src/common/softfloat/source/ui64_to_f32.c 68843a93e1f46195243ef1164f611b759cf19d17 - src/common/softfloat/source/f32_le_quiet.c 00ab2120f71117161d4f6daaa9b90a3036a99841 - src/common/softfloat/source/f32_to_ui32.c d0f8f08c225b60d88b6358d344404ba9df3038ec - src/common/softfloat/source/s_normSubnormalF32Sig.c 0108fe6f0d394ad72083aff9bb58507f97a0b669 - src/common/softfloat/source/ui32_to_f64.c 7bc81f5bc894118c08bfd52b59e010bc068ed762 - src/common/softfloat/source/ui32_to_f32.c 0adfa7e174cdb488bb22b06642e14e7fc6f49c67 - src/common/softfloat/source/s_roundToI64.c c3ce12c227d25bc0de48fbcf914fc208e2448741 - src/common/softfloat/source/f64_sub.c b9fd15957f7ae5effeccb5d8adaa7434b43f44e1 - src/common/softfloat/source/s_roundToUI64.c 29396b7c23941024a59d5ea06698d2fbc7e1a6ca - src/common/softfloat/source/f64_to_i64.c ae25eea499b3ea5bdd96c905fd0542da11083048 - src/common/softfloat/source/s_normRoundPackToF64.c b22876b0695f58ee56143c9f461f1dde32fefbf3 - src/common/softfloat/source/f64_to_ui64.c b8c5ccc1e511637d8b2ba2657de4937b80c01c07 - src/common/softfloat/source/f32_le.c 0126e0fceb1fa7912f4d5b8c3a6ebb4a048eb98a - src/common/softfloat/source/f16_to_f32.c 1ff879eca2a273293b5cd6048419b2d2d8063b93 - src/common/softfloat/source/f64_mulAdd.c 0e9694d551848d88531f5461a9b3b91611652e9a - src/common/softfloat/source/f64_to_ui32_r_minMag.c 5a5e0d9f1ee7e8c0d1d4f9fbcf6eba330a5f1792 - src/common/softfloat/source/f32_isSignalingNaN.c bc992c88f3de09e3a82447cf06dbde7c6604f7f8 - src/common/softfloat/source/f64_to_f32.c 1a86a6948bf6768bd23a19f1f05d40968c1d2b15 - src/common/softfloat/source/f64_rem.c 50daf9186bc5d0180d1453c957164b136d5ffc89 - src/common/softfloat/source/f64_eq.c 09cb0cdb90eb23b53cd9c1a76ba26021084710d1 - src/common/softfloat/source/s_addMagsF32.c 9f4d355d85fbe998e243fe4c7bbf8ad23062b6e2 - src/common/softfloat/source/i64_to_f64.c fd40a71c7ebf9d632a384fadf9487cfef4f3ea98 - src/common/softfloat/source/s_shiftRightJam128.c aaf6ccb77a1a89fa055a0fb63513297b35e2e54b - src/common/softfloat/source/f64_le_quiet.c 38bd00e9c4d2f1354c611404cca6209a6c417669 - src/common/softfloat/source/s_countLeadingZeros64.c d9a86343e6cc75714f65f690082dd4b0ba724be9 - src/common/softfloat/source/s_roundPackToF16.c 0bf499c0e3a54186fa32b38b310cc9d98ccdcfe3 - src/common/softfloat/source/f32_eq.c d4b26dc407a891e9ff5324853f1845a99c5d5cd2 - src/common/softfloat/source/f32_to_i32.c 296c40b0589536cb9af3231ad3dcd7f2baaa6887 - src/common/softfloat/source/f64_lt.c 0d8e42636a3409a647291fdb388001c2b11bba07 - src/common/softfloat/source/f32_to_f16.c 9a60700ce25578100d83d529e49f08f71cf35e17 - src/common/softfloat/source/s_normSubnormalF16Sig.c ec1a797b11f6e846928a4a49a8756f288bda1dfa - src/common/softfloat/source/i32_to_f64.c 729e790328168c64d65a1355e990274c249bbb3a - src/common/softfloat/source/f32_to_i32_r_minMag.c 9a5b93459ace2da23964da98617d6b18006fab86 - src/common/softfloat/source/s_countLeadingZeros8.c 84b0a01ba2a667eb28b166d45bd91352ead83e69 - src/common/softfloat/source/i64_to_f32.c 4b37be398b3e73ae59245f03b2ba2394fc902b4d - src/common/softfloat/source/s_normSubnormalF64Sig.c 6f83fa864007e8227ae09bb36a7fdc18832d4445 - src/common/softfloat/source/f32_mul.c daeb408588738b3eb4c8b092d7f92ac597cf1fc6 - src/common/softfloat/source/f32_rem.c a94c8c2bd74633027e52e96f41d24714d8081eb4 - src/common/softfloat/source/s_approxRecipSqrt_1Ks.c 69dc4cc63b2a9873a6eb636ee7cb704cbd502001 - src/common/softfloat/source/f64_to_ui32.c 50b3147f8413f0595a4c3d6e6eeab84c1ffecada - src/common/softfloat/source/s_normRoundPackToF32.c bbc70102b30f152a560eb98e7a1a4b11b9ede85e - src/common/softfloat/source/f64_sqrt.c 760fd7c257a1f915b61a1089b2acb143c18a082e - src/common/softfloat/source/s_addMagsF64.c ebb4f674b6213fec29761fc4e05c1e3ddeda6d17 - src/common/softfloat/source/f32_mulAdd.c 4445b1fbbd507144f038fd939311ff95bc2cf5f1 - src/common/softfloat/source/ui64_to_f64.c 871cb1a4037d7b4e73cb20ad18390736eea7ae36 - src/common/softfloat/source/f32_to_ui64_r_minMag.c ce37cdce572a3b02d42120e81c4969b39d1a67b6 - src/common/softfloat/source/f64_to_i32.c c29536f617d71fe30accac44b2f1df61c98a97dc - src/common/softfloat/source/f64_div.c 54cbeb5872a86e822bda852ec15d3dcdad4511ce - src/common/softfloat/source/f64_add.c e7890082ce426d88b4ec93893da32e306478c0d1 - src/common/softfloat/source/s_approxRecipSqrt32_1.c 824383b03952c611154bea0a862da2b9e2a43827 - src/common/softfloat/source/s_subMagsF32.c 00c612847b3bd227a006a4a2697df85866b80315 - src/common/softfloat/source/s_mulAddF32.c 7c8e5ab3f9bf6b2764ce5fffe80b2674be566a12 - src/common/softfloat/source/softfloat_state.c e4930e155580a0f5aa7f3694a6205bc9aebfe7aa - src/common/softfloat/source/f32_to_f64.c 1484fc96d7731695bda674e99947280a86990997 - src/common/softfloat/source/f32_to_i64.c 2960704c290f29aae36b8fe006884d5c4abcabb4 - src/common/softfloat/source/f32_div.c 23b76c1d0be64e27a6f7e2ea7b8919f1a45a8e7c - src/common/softfloat/source/f32_to_ui32_r_minMag.c fe06512577e642b09196d46430d038d027491e9f - src/common/softfloat/source/f32_eq_signaling.c 5e6f9e120a17cc73297a35e4d57e4b9cbce01780 - src/common/softfloat/source/s_mul64To128.c e0ad81cfb5d2c0e74dc4ece9518ca15ffc77beaf - src/common/softfloat/source/f32_roundToInt.c d8b0c55a49c4fa0b040541db6d5ff634d7d103e7 - src/common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c a6d5c83f6a0542b33ac9c23ac65ef69002cfff9d - src/common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c 8efb3f7cd3217b5cd25896b4bad058c72fe5b89a - src/common/softfloat/source/8086-SSE/specialize.h 3d0dbc0a672d039a6346e1c21ddf87ffc9181978 - src/common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c d152bc457b655725185bdff42b36bb96d6e6715e - src/common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c 1dd1b424087d9c872684df0c1b4063b077992d5f - src/common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c 252c816378fddab616b1f2a61e9fedd549224483 - src/common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c 21a11759ed2afd746a47c4d78b67640c2d052165 - src/common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c 98a850359fe08a7e39212f89ce96014ba80910da - src/common/softfloat/source/8086-SSE/s_f16UIToCommonNaN.c 0cbae7a5abc336331d460cbd3640d2cda02af434 - src/common/softfloat/source/8086-SSE/softfloat_raiseFlags.c 4cd1d6cfca3936a39aab9bc0eb622f5c7c848be1 - src/common/softfloat/source/include/softfloat_types.h 1ded4df85ff5fa904fa54c27d681265425be1658 - src/common/softfloat/source/include/primitiveTypes.h 5f589a4d48cc59a0e5762303df9ea4a06ca398da - src/common/softfloat/source/include/softfloat.h 9d8a025889f3ec0e1cca7c4b52308158e1f39226 - src/common/softfloat/source/include/primitives.h f118cad66d3c8ee17a52cec97cd3dc7e7a1cf2bc - src/common/softfloat/source/include/internals.h 14045fa6330dc6ed20d35eac5b4c5909631bca90 - src/common/src/nv_smg.c abccf0a8732b881d904d937287ced46edcde45ac - src/nvidia/Makefile c5f16fdf43ca3d2845d120c219d1da11257072b0 - src/nvidia/nv-kernel.ld dcf4427b83cce7737f2b784d410291bf7a9612dc - src/nvidia/arch/nvalloc/unix/include/nv-reg.h 4750735d6f3b334499c81d499a06a654a052713d - src/nvidia/arch/nvalloc/unix/include/nv-caps.h 3c61881e9730a8a1686e422358cdfff59616b670 - src/nvidia/arch/nvalloc/unix/include/nv_escape.h 7fc52a43b242a8a921c2707589fa07c8c44da11c - src/nvidia/arch/nvalloc/unix/include/nv.h 81592e5c17bebad04cd11d73672c859baa070329 - src/nvidia/arch/nvalloc/unix/include/nv-chardev-numbers.h e69045379ed58dc0110d16d17eb39a6f600f0d1d - src/nvidia/arch/nvalloc/unix/include/nv-ioctl-lockless-diag.h d1b1a1bc1fa30c1a966e95447f7831a06340d2d0 - src/nvidia/arch/nvalloc/unix/include/nv-priv.h 7e0175a8006f06b1d5f5be078d851a4f01648b96 - src/nvidia/arch/nvalloc/unix/include/nv-nb-regs.h 2eb11e523a3ecba2dcd68f3146e1e666a44256ae - src/nvidia/arch/nvalloc/unix/include/nv-ioctl.h 5f004c33f130e6c5cd275f9c85d46185e4e9b757 - src/nvidia/arch/nvalloc/unix/include/os_custom.h 499e72dad20bcc283ee307471f8539b315211da4 - src/nvidia/arch/nvalloc/unix/include/nv-unix-nvos-params-wrappers.h 824ffbe85c591c7423855bee7bf3193473ef2b70 - src/nvidia/arch/nvalloc/unix/include/osapi.h 669bd0c054b00a74e8996c18063fa9bbf5cd7690 - src/nvidia/arch/nvalloc/unix/include/os-interface.h 2ffd0138e1b3425ade16b962c3ff02a82cde2e64 - src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numa.h b3ecb82f142a50bdc37eafaeb86d67f10fbcf73f - src/nvidia/arch/nvalloc/unix/include/rmobjexportimport.h af45762b6eeae912cc2602acf7dc31d30775ade7 - src/nvidia/arch/nvalloc/unix/include/nv-kernel-rmapi-ops.h 107d1ecb8a128044260915ea259b1e64de3defea - src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numbers.h 3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - src/nvidia/arch/nvalloc/unix/include/nv-gpu-info.h 98a5a3bd7b94e69f4e7d2c3a1769583c17ef5b57 - src/nvidia/arch/nvalloc/unix/src/os.c a659a503a6fcffdcacd2b76ae6b1f156b4b9216c - src/nvidia/arch/nvalloc/unix/src/osmemdesc.c b5ae9b8d551a3e5489605c13686fb6cce4579598 - src/nvidia/arch/nvalloc/unix/src/power-management-tegra.c a17aae37486b325442e447489b64add3694ab8b0 - src/nvidia/arch/nvalloc/unix/src/osunix.c b5b409625fde1b640e4e93276e35248f0fccfa4c - src/nvidia/arch/nvalloc/unix/src/gcc_helper.c 07f9c0995f1fbbba9eb819321996b57c1d2b86cd - src/nvidia/arch/nvalloc/unix/src/exports-stubs.c d8815125dbf79831b8fe55367bba60e7115243cc - src/nvidia/arch/nvalloc/unix/src/osinit.c ef270b45ff3d72db9b319408c8bb060303e589f5 - src/nvidia/arch/nvalloc/unix/src/osapi.c a7383deea9dcab093323d8dde1ede73f85f93343 - src/nvidia/arch/nvalloc/unix/src/rmobjexportimport.c b1a6d0a1ca4307b8e8d9cf136c94ef7c9efbae4c - src/nvidia/arch/nvalloc/unix/src/registry.c 915ee6dbffff92a86d68ac38549b25aa1e146872 - src/nvidia/arch/nvalloc/unix/src/os-hypervisor-stubs.c ffea38efca6a43af9bc61bb6cb8c2b14c3d6fc20 - src/nvidia/arch/nvalloc/unix/src/escape.c d1089d8ee0ffcdbf73a42d7c4edb90769aa79d8c - src/nvidia/arch/nvalloc/common/inc/nvrangetypes.h 8530e3d1db60647a9132e10c2119a75295f18060 - src/nvidia/arch/nvalloc/common/inc/nv-firmware.h 1cd024cc06bba6f7c3663ca2d03fe25bd77761d3 - src/nvidia/generated/g_gpu_mgmt_api_nvoc.c 0be1c1ff5f200a9aa68cdf3d03bc4780e757a1ea - src/nvidia/generated/g_traceable_nvoc.h 998d18bc2f6e2cdd00cf383000b66be8e8778baa - src/nvidia/generated/g_nv_debug_dump_nvoc.h 4491368ac52cfda834bdd24df3b6f156c32ec3a9 - src/nvidia/generated/g_client_nvoc.c 4eb2331b2f9f8d8c01d62ad771702e9b42f22b65 - src/nvidia/generated/g_lock_stress_nvoc.h 6b5bf7b2f5dd000bfa2949e14642dd582ba4a378 - src/nvidia/generated/g_event_buffer_nvoc.h cd5f4b0bc23710e5b6277ff214a62c4993e95581 - src/nvidia/generated/g_code_coverage_mgr_nvoc.c b9903d23010ea9d63117c27d5fe0cfba09849fa4 - src/nvidia/generated/g_context_dma_nvoc.c 4b7aaad308f2f25b07d932fc0fe0c3327db522a9 - src/nvidia/generated/g_objtmr_nvoc.h 7bd355d08dc6f2509db22ed56f1c05ab97f5f620 - src/nvidia/generated/g_allclasses.h 4eea9bd7952613f08af07508e2e9c1c0344940e7 - src/nvidia/generated/g_gpu_mgr_nvoc.h c5cad88aa7de5a04a3b6f9836f355347448d6a7b - src/nvidia/generated/g_rmconfig_util.h db1d1e047d00780efbe4c1c1ae6e4fecd3ab49e8 - src/nvidia/generated/g_os_desc_mem_nvoc.h 1ec59322d0874153252a387dcb50bf6d7328d56e - src/nvidia/generated/g_system_mem_nvoc.c 21e57b9c63e847eeb5a29c218db2c5c37db83298 - src/nvidia/generated/g_gpu_nvoc.c 4613f3d42dbc899b278fca71c3aaae79159d7dbe - src/nvidia/generated/g_gpu_user_shared_data_nvoc.c b55573cb02ff8129aa4f5aa050ac53d1f4fcfdb2 - src/nvidia/generated/g_rs_resource_nvoc.h 16c8d551a3a908ec194d39c88c5603cea436c9b7 - src/nvidia/generated/g_binary_api_nvoc.c a232e1da560db2322a921a9f0dc260ad703af2b4 - src/nvidia/generated/g_mem_nvoc.h c503ca5954b8f6ebdba96904a1616a55ce08a2d3 - src/nvidia/generated/g_device_nvoc.c e7cc58e9f8173583bd253fa73df56324e48aa5ad - src/nvidia/generated/g_io_vaspace_nvoc.h b93ab0b9e39ca3c5b397cbdba58e4d9894d4130f - src/nvidia/generated/g_rpc-structures.h afda2b8579ed309e23be0ad1a835ee84fcbe535f - src/nvidia/generated/g_client_nvoc.h e97edab623386f7d1534b4f053a66fc8659167f6 - src/nvidia/generated/g_event_nvoc.h f4b2bffbdbb2b0b398e8dfe3420e46b2bf27839c - src/nvidia/generated/g_hal_nvoc.h 4626f4a1a4eadc3695d79454db25bd0153d1165d - src/nvidia/generated/g_resource_fwd_decls_nvoc.h 30035e0fb1ae8b816fc42b78a17eb30462640ce4 - src/nvidia/generated/g_kernel_head_nvoc.h 52ae6273ddf101e9715aed99991506cad8e96859 - src/nvidia/generated/g_disp_inst_mem_nvoc.c abc769851bd523ee08cf829bf3864cf5475066ec - src/nvidia/generated/g_subdevice_nvoc.h 255c404719b18c2a3aec2a47948c0fbcf4affd4b - src/nvidia/generated/rmconfig.h c7fda8cbe109ad2736694ce9ec0e2ab93d0e3f2c - src/nvidia/generated/g_mem_list_nvoc.h f9bdef39159a8475626a0edcbc3a53505a0ff80a - src/nvidia/generated/g_os_hal.h dc7bbba203ee5ff91b6f14eb3abfad8c15854e1d - src/nvidia/generated/g_mem_desc_nvoc.h 1702c9d021149c0f5c73ebeda7bea29e246af31d - src/nvidia/generated/g_nv_name_released.h 2e0c45e4186d44774286a71daf797c980c2ddf7a - src/nvidia/generated/g_objtmr_nvoc.c 9b78bc02a8fe0ec297167bb4bdb7f8255b94198b - src/nvidia/generated/g_disp_capabilities_nvoc.h 967d8c0d7d5c1271e82f30af992f48322695d367 - src/nvidia/generated/g_eng_state_nvoc.h 831cdf0767703c00918e70ef3933716b201781f1 - src/nvidia/generated/g_syncpoint_mem_nvoc.c ce74dbd8f88f50af0b3ea3b3034395cd98eb08e8 - src/nvidia/generated/g_gpu_access_nvoc.c 08ad957117efefe2e04448bce1cad2dec0e984af - src/nvidia/generated/g_odb.h 033a6d6bac0829783afe8a582fa6c4f329be7f04 - src/nvidia/generated/g_hypervisor_nvoc.h c1471919f6c19e1b576b7c636ba5ae7ab9d58177 - src/nvidia/generated/g_gpu_db_nvoc.c f68b7e209e268d14b0b98686d1766683139b9b5f - src/nvidia/generated/g_system_nvoc.c cdcab5a0094b9e9664f7a0e62ec31783617de5ab - src/nvidia/generated/g_code_coverage_mgr_nvoc.h 5e614b6db957a0ae77502ca6d5966bca506f8020 - src/nvidia/generated/g_gpu_group_nvoc.h eb15207a28b8eed41182de6311ec48f5e321729f - src/nvidia/generated/g_gpu_user_shared_data_nvoc.h ef9def144aaf1b2b292c9815c68a6007eff56dda - src/nvidia/generated/g_rs_server_nvoc.c eb07ee114f8cfc039978cdb7501c3ea03c879864 - src/nvidia/generated/g_generic_engine_nvoc.c d2f3d17e05337992bc031c823186583d62c10235 - src/nvidia/generated/g_chips2halspec_nvoc.h ad94c2430328b91392db363158fa2279b794cc54 - src/nvidia/generated/g_gpu_resource_nvoc.h c77048521f9c9890f14108c2c5457d78a85fe69d - src/nvidia/generated/g_gpu_access_nvoc.h 38a98487eec65d8807e47f99b013619c1537e983 - src/nvidia/generated/g_dce_client_nvoc.c d09bde39b1f12490ea0a696d6915d521c9f13953 - src/nvidia/generated/g_rpc-message-header.h 9becba61ba5ff7580b353abfb87cbe0f37817195 - src/nvidia/generated/g_binary_api_nvoc.h 50f70075eac2515b189e2d07a06b13cfa826945f - src/nvidia/generated/g_rs_client_nvoc.h f8b984c6bc09554753cfe6692dde2eb3171abc57 - src/nvidia/generated/g_disp_channel_nvoc.h 4931b316fc042705a5f094c8c23b0038f980b404 - src/nvidia/generated/g_generic_engine_nvoc.h 2a28557874bd51f567ef42c75fd4e3b09d8ad44d - src/nvidia/generated/g_gpu_arch_nvoc.c a17058fe665949f1e3861fe092e29b229cefbe62 - src/nvidia/generated/g_mem_mgr_nvoc.h 7aa02b964507a8269d35dc56170955025b98bd1a - src/nvidia/generated/g_gpu_arch_nvoc.h 0b9296f7797325b80ff0900f19a3763b564eb26b - src/nvidia/generated/g_context_dma_nvoc.h 4210ff36876e84e0adf1e9d4afb6654c7e6e5060 - src/nvidia/generated/g_resserv_nvoc.h 3613b4ec9b285a4e29edefa833704789c887c189 - src/nvidia/generated/g_tmr_nvoc.c 517b6b986a3749c9a6dd0f22bbef6569cdb48d97 - src/nvidia/generated/g_rs_client_nvoc.c 7670f19682bcd6224c999a8f80e770368e735632 - src/nvidia/generated/g_lock_stress_nvoc.c b348b1b465cb359ca3cf10f5e121714ffb95b582 - src/nvidia/generated/g_standard_mem_nvoc.c 54fa23e7cf0f07d625c25d5c08dad9cd1714f851 - src/nvidia/generated/g_standard_mem_nvoc.h 7e528d775caa7ff2bf4159c94fc2c2e4d3aadffc - src/nvidia/generated/g_chips2halspec_nvoc.c 40aa2c65168c893c725c983b2219ceff03d05608 - src/nvidia/generated/g_gpu_halspec_nvoc.h 17c4ce5e67bf8bc8f48a4e2b1b7752d4597703ad - src/nvidia/generated/g_kernel_head_nvoc.c 3ad8329c7f7d63633b7abf2cdd502e4257fa1726 - src/nvidia/generated/g_event_nvoc.c 7aba35752cd4c6447f844cd9432d7dc1bc77b33d - src/nvidia/generated/g_disp_capabilities_nvoc.c fa3a5418a5d6bd7fb2b375ed7f7b64293fdf5f86 - src/nvidia/generated/g_ioaccess_nvoc.h 3c3961ddf6422294c3322e3b0a3c97ee94bfd010 - src/nvidia/generated/g_gpu_mgr_nvoc.c b73b22368abf741cc0a5108b6c9585a81de28b57 - src/nvidia/generated/g_hal.h 6e219df1367ce7dc8f5f4a1f2209a7808a927871 - src/nvidia/generated/g_hal_mgr_nvoc.c 279538daf54163a7a53aab1330fba2c00fc3f234 - src/nvidia/generated/g_rmconfig_util.c 49e84272bbce137683232275b4f13a19c644c650 - src/nvidia/generated/g_prereq_tracker_nvoc.h 57eb0772bc280690eade3f5d54f786e252c75099 - src/nvidia/generated/g_object_nvoc.c 113297c44e702cd6535e007c1c5b2dd5e6f809dc - src/nvidia/generated/g_ioaccess_nvoc.c 216040d1883e8c4f1e8b47d9f6b279ec111d094d - src/nvidia/generated/g_hal_mgr_nvoc.h 113b10cf6cef2608ff4a288e2944d56da64f355d - src/nvidia/generated/g_gpu_group_nvoc.c 86bb88ccdfa34510d4acf21684e5b8bd32d820b2 - src/nvidia/generated/g_disp_sf_user_nvoc.h 5c0ed2e135f53ca09fbfb542bea88b304a2e1208 - src/nvidia/generated/g_event_buffer_nvoc.c 979082b8c018eee55d880265f7bfd294360816c6 - src/nvidia/generated/g_hda_codec_api_nvoc.c f917323efc9429fcea8643eb9a8d5ee46b1b50a5 - src/nvidia/generated/g_eng_state_nvoc.c 437329a9c6e35e4b02945ec035448e704521280e - src/nvidia/generated/g_hda_codec_api_nvoc.h fba7a2891fe10e837f5897034b8176a7307fbb12 - src/nvidia/generated/g_lock_test_nvoc.h 05269b7e73347b580f11decf0e1b9f467d0cb60c - src/nvidia/generated/g_dce_client_nvoc.h e175ab2ef1fd5b64c9f0d665a26b2ed6f864b106 - src/nvidia/generated/g_vaspace_nvoc.h cc7ec616b034ec01da1c5176b6c62759c3f31a06 - src/nvidia/generated/g_subdevice_nvoc.c 93f9738c0e8aa715592306ddf023adf6b548dcc4 - src/nvidia/generated/g_nvh_state.h 1745f3002758556d1b6d11a24d088ef87ba18bd5 - src/nvidia/generated/g_virt_mem_mgr_nvoc.c 8c9f26e959fa9a6a3c4a5cb8875458cc4a9bfe9e - src/nvidia/generated/g_os_nvoc.c 3b0e038829647cfe0d8807579db33416a420d1d2 - src/nvidia/generated/g_chips2halspec.h a1fad555b8ad36437992afdd6e3e08d236167ac7 - src/nvidia/generated/g_journal_nvoc.h d210a82e3dda39239201cfc1c2fcb2e971915c1e - src/nvidia/generated/g_device_nvoc.h 836f88914b046eadad9435786e1b474ee6690f5f - src/nvidia/generated/g_gpu_nvoc.h ea0d27b0f05818e2e44be7d04b31f8843e1d05b7 - src/nvidia/generated/g_io_vaspace_nvoc.c 10529db24fb0501aa7f2aae25e0a87247ab5405c - src/nvidia/generated/g_resource_nvoc.h 5d47bed309c731bfee4144f61093192e7efcaa55 - src/nvidia/generated/g_disp_channel_nvoc.c 8771d8f2cf58f5e1d91ece01c1962677cebc5e4b - src/nvidia/generated/g_rmconfig_private.h 951c1c8969a621344d4d2a3ec61b1ad51b39ea79 - src/nvidia/generated/g_client_resource_nvoc.c 629b6daac6c9215dc982973b6adcf84314d34d57 - src/nvidia/generated/g_gpu_halspec_nvoc.c 29d5ccf874298c8156314a6eb23c209f2920b779 - src/nvidia/generated/g_gpu_resource_nvoc.c fc26ab853e7c981c271ced30dfd78d95cd9bcdfd - src/nvidia/generated/g_gpu_db_nvoc.h aa76beb8b33254fae884434b688093f9c7f12c87 - src/nvidia/generated/g_hal_private.h 86739259b5059c9b9ea3061bd8d1846385cb95f4 - src/nvidia/generated/g_sdk-structures.h 41bc858f6aca964a8977ad96911ecf1e8b46385d - src/nvidia/generated/g_hal_archimpl.h f87916eae53dbea2f6bdbe80a0e53ecc2071d9fd - src/nvidia/generated/g_lock_test_nvoc.c 6b8597803d509372152e3915f15139186294add5 - src/nvidia/generated/g_gpu_class_list.c 2101385d1332db9a2902370a6b3c6117ca8b2737 - src/nvidia/generated/g_kern_disp_nvoc.h d71ff42bc0fc0faf1999a6cbe88c4492a47e200e - src/nvidia/generated/g_os_nvoc.h e58abb783f7561d0af925c2fca392c5165fcb199 - src/nvidia/generated/g_kern_disp_nvoc.c d6a34926ab710156c9c4b2d9f12a44e6dafd43d1 - src/nvidia/generated/g_tmr_nvoc.h c4c67b0e0284656b32c7b4547e22d521c442124a - src/nvidia/generated/g_disp_objs_nvoc.h 8e49b4d77641c98c6101dbc88a79290ceca6271a - src/nvidia/generated/g_rs_server_nvoc.h af206c390549eff5d690ad07f3e58cd417f07f5f - src/nvidia/generated/g_hal_register.h be659882e731b6a2019639265af46239c5c96ebf - src/nvidia/generated/g_hal_nvoc.c db76e8669776fbfa901c60d9b9908af2fabc4703 - src/nvidia/generated/g_virt_mem_mgr_nvoc.h 797bd0197236fb0afc2c7e052487db803ac5baf0 - src/nvidia/generated/g_rs_resource_nvoc.c 884bed29fb4735ae0b4504fc874702acd29ee541 - src/nvidia/generated/g_mem_mgr_nvoc.c 3168beb42f15591a50339692d502e04977615a7b - src/nvidia/generated/g_prereq_tracker_nvoc.c 8e0071daaf5471a0fb3856705ec993704eaed4b5 - src/nvidia/generated/g_disp_inst_mem_nvoc.h fb464cf839a1e76ac2a27346c7cd46ca921f1f56 - src/nvidia/generated/g_traceable_nvoc.c 8588d6f88ab5e8682952063fe0e2c840b334c622 - src/nvidia/generated/g_eng_desc_nvoc.h de99523103dd7df0934cbe7aa21179ec7f241817 - src/nvidia/generated/g_os_desc_mem_nvoc.c aa43dd8bdbdc71dc64d65e948221c7d5235588e7 - src/nvidia/generated/g_disp_objs_nvoc.c 9b6cc3a5e9e35139e9245cbe753fe9a552a488c0 - src/nvidia/generated/g_syncpoint_mem_nvoc.h ae311b0968df9e9c9c2cec89e3060c472fc70a4c - src/nvidia/generated/g_mem_nvoc.c dc7a782be9a0096701771cb9b2dc020c2f814e6d - src/nvidia/generated/g_system_nvoc.h 93a47004dd1c7529c6ee5f8abdf8b49c336fb681 - src/nvidia/generated/g_disp_sf_user_nvoc.c 3b5dfad8fccd7251cc177c7ea1b90265b4b6c901 - src/nvidia/generated/g_gpu_mgmt_api_nvoc.h b53ec15a1aaf102d42b79881cd1b270afeb7205c - src/nvidia/generated/g_system_mem_nvoc.h 67b2d3ea81ebe7be679bcafc688ced0d64f16edf - src/nvidia/generated/g_object_nvoc.h b1be7145e70d8811fbdbe07c0e99f32ad0e38429 - src/nvidia/generated/g_client_resource_nvoc.h 0d5b87b117d39b173a2a21a5cd71572bc2b26697 - src/nvidia/generated/g_resource_nvoc.c 51df7972f9932c2a5d800d4e2b3e4828e5aa2038 - src/nvidia/generated/g_vaspace_nvoc.c 0820fa0a975b2474ce0fdf64508cbd7758f60e5c - src/nvidia/generated/g_ref_count_nvoc.h fff3ebc8527b34f8c463daad4d20ee5e33321344 - src/nvidia/inc/lib/ref_count.h ec26741397ebd68078e8b5e34da3b3c889681b70 - src/nvidia/inc/lib/base_utils.h f8d9eb5f6a6883de962b63b4b7de35c01b20182f - src/nvidia/inc/lib/protobuf/prb.h 601edb7333b87349d791d430f1cac84fb6fbb919 - src/nvidia/inc/lib/zlib/inflate.h 671c628ff9d4e8075f953766adcab9bfc54bd67c - src/nvidia/inc/libraries/poolalloc.h 1e8730e4abd210e3c648ef999ccc2b1f1839b94c - src/nvidia/inc/libraries/field_desc.h 8dd7f2d9956278ed036bbc288bff4dde86a9b509 - src/nvidia/inc/libraries/eventbufferproducer.h 1b28bd0ee2e560ca2854a73a3ee5fb1cf713d013 - src/nvidia/inc/libraries/nvoc/utility.h d3cd73c0c97a291e76e28a6e3834d666e6452172 - src/nvidia/inc/libraries/nvoc/prelude.h 79b556739f0648cec938f281794663433fc5e048 - src/nvidia/inc/libraries/nvoc/runtime.h 91c67f272f0ada6f386e9f4a78fbde70aa5c883d - src/nvidia/inc/libraries/nvoc/object.h c0f66cf7b2fb6ca24b5d4badede9dcac0e3b8311 - src/nvidia/inc/libraries/nvoc/rtti.h a3db778e81f7188a700e008e4c5f5b1320ab811e - src/nvidia/inc/libraries/mmu/gmmu_fmt.h 1daea206ab581fa3554ff1811e1253a7d0053ac0 - src/nvidia/inc/libraries/mmu/mmu_fmt.h 56b8bae7756ed36d0831f76f95033f74eaab01db - src/nvidia/inc/libraries/prereq_tracker/prereq_tracker.h b8e52b576e6668e4de7ea65a31e12c2bb491a591 - src/nvidia/inc/libraries/mapping_reuse/mapping_reuse.h e772583f7fbf994fcf923d527d42372a716b4c57 - src/nvidia/inc/libraries/ioaccess/ioaccess.h 26853c886d848fb88e14da3aceab23f90589c05d - src/nvidia/inc/libraries/utils/nvprintf_level.h c314121149d3b28e58a62e2ccf81bf6904d1e4bc - src/nvidia/inc/libraries/utils/nvmacro.h 72dcc09b77608263573bd34adf09393328eddf86 - src/nvidia/inc/libraries/utils/nvrange.h b598ccd2721892b6915d4be432f1fc332477b666 - src/nvidia/inc/libraries/utils/nvbitvector.h 9aa5870d052a45c2489a6ea1a4f2e30fbc52d6be - src/nvidia/inc/libraries/utils/nv_enum.h 4849eb6c567e3ba952c22e702461c1a84ec88c6a - src/nvidia/inc/libraries/utils/nvprintf.h 1b265cb4fcc628862e4b27ae63a897871987eb76 - src/nvidia/inc/libraries/utils/nvassert.h 39113db75fdab5a42f9d8653ed1c90018b8b1df4 - src/nvidia/inc/libraries/containers/map.h 11ce1423312f4c34df19672e45678d0531cc299d - src/nvidia/inc/libraries/containers/ringbuf.h 5f116730f8b7a46e9875850e9b6ffb2a908ad6c2 - src/nvidia/inc/libraries/containers/btree.h fc211c8276ebcee194080140b5f3c30fba3dfe49 - src/nvidia/inc/libraries/containers/queue.h 661b551f4795f076d7d4c4dab8a2ae2f52b0af06 - src/nvidia/inc/libraries/containers/list.h 47c69b04f95664e742f1a0a02711eeb1fb71000b - src/nvidia/inc/libraries/containers/eheap_old.h 5da20ecad3ff8405dea782792c6397d21ba76f7c - src/nvidia/inc/libraries/containers/vector.h bcfc41a04576a4244c9dc3fe2a32c8e582f16c3e - src/nvidia/inc/libraries/containers/type_safety.h 5cabf8b70c3bb188022db16f6ff96bcae7d7fe21 - src/nvidia/inc/libraries/containers/multimap.h 4e26106c9c758c9e48418451ac01cf591ed74a31 - src/nvidia/inc/libraries/nvlog/nvlog_printf.h 41843197a5c11abc93df89b8f10a5f815e7fe6af - src/nvidia/inc/libraries/nvlog/nvlog.h 13aedc8ccf6acdd71be71b2219f79cd1af411273 - src/nvidia/inc/libraries/nvlog/internal/nvlog_printf_internal.h 7c9c9456aaacbeffa11a9af54fe2250095ebbb00 - src/nvidia/inc/libraries/tls/tls.h 87a130551593551380ac3e408f8044cc0423c01a - src/nvidia/inc/libraries/nvport/nvport.h 2487ffc1eb1e50b27ba07e0581da543d80bdaa72 - src/nvidia/inc/libraries/nvport/safe.h 4bf45849bc1c6b89d7a79d761cce84a1d5026eac - src/nvidia/inc/libraries/nvport/debug.h 147d47ef4bd860394d1d8ae82c68d97887e2898b - src/nvidia/inc/libraries/nvport/core.h 6d698ca4fc5e48c525f214a57e1de0cc4aa9e36b - src/nvidia/inc/libraries/nvport/thread.h 6065fa9a525d80f9b61acb19e476066823df0700 - src/nvidia/inc/libraries/nvport/sync.h a1d93b6ec8ff01a3c2651e772a826ee11a7781d7 - src/nvidia/inc/libraries/nvport/util.h fb5a011275328b7c1edc55abc62e604462b37673 - src/nvidia/inc/libraries/nvport/atomic.h 16a35b2b6fd6eb855acd64d72480b285795f54b2 - src/nvidia/inc/libraries/nvport/memory.h f31ed19d0588861b8c2b1489dd4e70d430110db5 - src/nvidia/inc/libraries/nvport/crypto.h 96c7c30c9f6503675f0903a16207a0ac06a6963d - src/nvidia/inc/libraries/nvport/cpu.h 53d843988669f61528cd45099ced749defa4cf7e - src/nvidia/inc/libraries/nvport/string.h d1863efe7b8a63f1c5a7f47856b95ad31fd1a561 - src/nvidia/inc/libraries/nvport/inline/debug_unix_kernel_os.h 9596b274389ea56acff6ca81db8201f41f2dd39d - src/nvidia/inc/libraries/nvport/inline/atomic_clang.h a8c9b83169aceb5f97d9f7a411db449496dc18f6 - src/nvidia/inc/libraries/nvport/inline/util_generic.h bbece45965ffbc85fbd383a8a7c30890c6074b21 - src/nvidia/inc/libraries/nvport/inline/util_gcc_clang.h a7cb79bf7ac48e0f5642ecfd2e430bb85587dddf - src/nvidia/inc/libraries/nvport/inline/memory_tracking.h 1d6a239ed6c8dab1397f056a81ff456141ec7f9c - src/nvidia/inc/libraries/nvport/inline/util_valist.h f267235fd8690e1b1d7485d3a815841607683671 - src/nvidia/inc/libraries/nvport/inline/safe_generic.h 645734ed505a4d977490e54b26cdf49657e20506 - src/nvidia/inc/libraries/nvport/inline/sync_tracking.h a902e0f4265bd3dbd251afefa8ceb0389464d886 - src/nvidia/inc/libraries/nvport/inline/atomic_gcc.h 2dec1c73507f66736674d203cc4a00813ccb11bc - src/nvidia/inc/libraries/resserv/rs_domain.h fa5a5d8fa07cae6b8ef9d9135dc5d7e7624533d2 - src/nvidia/inc/libraries/resserv/resserv.h 972165721958839bc1d510fda9409d35ff89ec21 - src/nvidia/inc/libraries/resserv/rs_server.h 883bf7295d707014278e035f670d151275975d18 - src/nvidia/inc/libraries/resserv/rs_resource.h 2ad85ddca7cd230cea917e249871277ef1e59db1 - src/nvidia/inc/libraries/resserv/rs_client.h cd033fe116a41285a979e629a2ee7b11ec99369f - src/nvidia/inc/libraries/resserv/rs_access_rights.h df174d6b4f718ef699ca6f38c16aaeffa111ad3c - src/nvidia/inc/libraries/resserv/rs_access_map.h 5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - src/nvidia/inc/os/dce_rm_client_ipc.h 4aa45a3755ef172aa35279e87dd5cd83cab1bc2e - src/nvidia/inc/kernel/vgpu/rpc_hal_stubs.h f2fd94a00e5debf1dc7f7ad4c00d417552fb0554 - src/nvidia/inc/kernel/vgpu/rpc.h 37598b6c25aac1a07cbc2bc5c76ebecdbca56eb6 - src/nvidia/inc/kernel/vgpu/rm_plugin_shared_code.h fea4bbeb739723d3b80b5b3d8943e746e58fae07 - src/nvidia/inc/kernel/vgpu/dev_vgpu.h f64d3723d0c475558bed799da8d2c5ec32a7d3a8 - src/nvidia/inc/kernel/vgpu/vgpuapi.h 8bf8282ce6112a2afb2e7f64d138d6ce90cf37c0 - src/nvidia/inc/kernel/vgpu/rpc_global_enums.h 69360faa428e157580fac445bcf601f44f7646c0 - src/nvidia/inc/kernel/vgpu/rpc_headers.h b9af629ab29b527f7830b78f52b55b8535b8dbfd - src/nvidia/inc/kernel/vgpu/vgpu_util.h e33b5b8c324c23d28e91324a87b47a24823dc5f5 - src/nvidia/inc/kernel/vgpu/rpc_vgpu.h af9d17b204fdddc6f97280fdafd5a414ee8274dc - src/nvidia/inc/kernel/diagnostics/code_coverage_mgr.h c6efd51b8b8447829a0867cd7fb7a5a5a2fb1e3d - src/nvidia/inc/kernel/diagnostics/traceable.h fd780f85cb1cd0fd3914fa31d1bd4933437b791d - src/nvidia/inc/kernel/diagnostics/tracer.h 7e75b5d99376fba058b31996d49449f8fe62d3f0 - src/nvidia/inc/kernel/diagnostics/profiler.h 7615ac3a83d0ad23b2160ff8ad90bec9eb1f3c6c - src/nvidia/inc/kernel/diagnostics/journal.h b259f23312abe56d34a8f0da36ef549ef60ba5b0 - src/nvidia/inc/kernel/diagnostics/nv_debug_dump.h 7f3f19ed69089ba05f5cac44982547718dbf4662 - src/nvidia/inc/kernel/diagnostics/xid_context.h 3a28bf1692efb34d2161907c3781401951cc2d4f - src/nvidia/inc/kernel/diagnostics/journal_structs.h 8ef620afdf720259cead00d20fae73d31e59c2f7 - src/nvidia/inc/kernel/virtualization/hypervisor/hypervisor.h 701375e96d771b4105f5fe4949ed4a542be4f3d7 - src/nvidia/inc/kernel/os/os_stub.h 408c0340350b813c3cba17fd36171075e156df72 - src/nvidia/inc/kernel/os/os.h c8496199cd808ed4c79d8e149961e721ad96714e - src/nvidia/inc/kernel/os/capability.h cda75171ca7d8bf920aab6d56ef9aadec16fd15d - src/nvidia/inc/kernel/os/nv_memory_type.h 70b67003fda6bdb8a01fa1e41c3b0e25136a856c - src/nvidia/inc/kernel/os/nv_memory_area.h 497492340cea19a93b62da69ca2000b811c8f5d6 - src/nvidia/inc/kernel/rmapi/event_buffer.h 499c3d0d76276ee9441d57948ea97877c48b1daa - src/nvidia/inc/kernel/rmapi/rmapi.h b4bae9ea958b4d014908459e08c93319784c47dd - src/nvidia/inc/kernel/rmapi/event.h 0500c41247fdecd66f25428d279c6dab72bab13e - src/nvidia/inc/kernel/rmapi/binary_api.h 61e3704cd51161c9804cb168d5ce4553b7311973 - src/nvidia/inc/kernel/rmapi/resource.h 2baec15f4c68a9c59dd107a0db288e39914e6737 - src/nvidia/inc/kernel/rmapi/client.h ac9288d75555180c1d5dd6dd7e0e11fb57a967f2 - src/nvidia/inc/kernel/rmapi/exports.h 835f193521f216d29c678a6018cd9791914b6c01 - src/nvidia/inc/kernel/rmapi/lock_stress.h b9ff9b201bf2df8651f0c408158aa617638868f6 - src/nvidia/inc/kernel/rmapi/rmapi_specific.h 20adc296ffe79f27d5c24c70716c972a2e0c9a5d - src/nvidia/inc/kernel/rmapi/control.h deed1715907c1dab8e3304bd4f63b688b72104b7 - src/nvidia/inc/kernel/rmapi/mapping_list.h 4453fe6463e3155063f2bdbf36f44697606a80a5 - src/nvidia/inc/kernel/rmapi/client_resource.h 6cc2de07b21fb21cef1b5b87fb2f1c935782262c - src/nvidia/inc/kernel/rmapi/rs_utils.h 35a65c31c6dcc2824011245ff6e2d5a30f95525c - src/nvidia/inc/kernel/rmapi/rmapi_utils.h a92dbf2870fe0df245ea8967f2f6a68f5075ecaf - src/nvidia/inc/kernel/rmapi/resource_fwd_decls.h 23e243f9abcb2a4f2d10d141303cd55677b04436 - src/nvidia/inc/kernel/rmapi/rmapi_cache_handlers.h 2724476b61b1790f1b7c293cc86e8a268125e11c - src/nvidia/inc/kernel/rmapi/param_copy.h 15f788614e08d805e963653460858cf013fe0178 - src/nvidia/inc/kernel/rmapi/lock_test.h 2b23f2dbd8f3f63a17a1b63ebb40a2fd7fd8801a - src/nvidia/inc/kernel/rmapi/alloc_size.h 893ec596aab365c2ff393bf2b96aea57f37d01f8 - src/nvidia/inc/kernel/platform/nvpcf.h 5e9928552086947b10092792db4a8c4c57a84adf - src/nvidia/inc/kernel/platform/acpi_common.h e762205698aff945603324331b443bb2f20cf778 - src/nvidia/inc/kernel/platform/sli/sli.h 15754215ec49815f547dd999b2262a34670dde0b - src/nvidia/inc/kernel/core/locks.h bdc4ab675c6f6c4bd77c3aaf08aa5c865b186802 - src/nvidia/inc/kernel/core/hal.h ad378b09a277fba0efd3291d167e1d21071bdf1b - src/nvidia/inc/kernel/core/printf.h a054be86a4476ba7b9a97052dfcfa4155e059cb9 - src/nvidia/inc/kernel/core/info_block.h bffae4da6a1f9b7dc7c879587fd674b49b46dac1 - src/nvidia/inc/kernel/core/core.h 37f267155ddfc3db38f110dbb0397f0463d055ff - src/nvidia/inc/kernel/core/strict.h b00302aec7e4f4e3b89a2f699f8b1f18fc17b1ba - src/nvidia/inc/kernel/core/hal_mgr.h 2d741243a6ae800052ddd478cc6aa7ad0b18f112 - src/nvidia/inc/kernel/core/prelude.h ebc7c06d9e94218af4cf6b0c03e83650e391e5bc - src/nvidia/inc/kernel/core/thread_state.h b5859c7862fb3eeb266f7213845885789801194a - src/nvidia/inc/kernel/core/system.h 07f45cd5fab5814e21b9e84425564b43776118fd - src/nvidia/inc/kernel/gpu/gpu_resource_desc.h 7010ff346c27b6453c091f5577672b8b1821808d - src/nvidia/inc/kernel/gpu/gpu_access.h 10ba0b9d4c67c8027b391073dab8dc4388f32fd7 - src/nvidia/inc/kernel/gpu/nvbitmask.h 59f72837997cb0c8ffc491d9a61c61e61b9dca94 - src/nvidia/inc/kernel/gpu/gpu_shared_data_map.h bca121fb72d54afd714654f1a50eb7192da3135f - src/nvidia/inc/kernel/gpu/gpu_uuid.h 3f0f23a15201105779f3d25dc7628b42990c4b7e - src/nvidia/inc/kernel/gpu/gpu_timeout.h 1ac9c8bf155d1f25f790032b2b6306223199d9ff - src/nvidia/inc/kernel/gpu/gpu_arch.h f17b704f2489ffedcc057d4a6da77c42ece42923 - src/nvidia/inc/kernel/gpu/gpu_resource.h 28d0d82b58ef13662e8896d3bbc42d340836294e - src/nvidia/inc/kernel/gpu/gpu_user_shared_data.h e33e4d1537839e41898ff0fab8949e90ee1aed46 - src/nvidia/inc/kernel/gpu/gpu_device_mapping.h 426c6ab6cecc3b1ba540b01309d1603301a86db1 - src/nvidia/inc/kernel/gpu/eng_desc.h 5f5677bee452c64a1b890c3eb65e81fda66ddbaa - src/nvidia/inc/kernel/gpu/error_cont.h d624e0c45cc8ad24e8c0b2fb5281c0c8a1c7a6d3 - src/nvidia/inc/kernel/gpu/gpu_engine_type.h c33ab6494c9423c327707fce2bcb771328984a3c - src/nvidia/inc/kernel/gpu/gpu_halspec.h 145b1bc37e6c36b466ea33dd0579d22b530d8dd3 - src/nvidia/inc/kernel/gpu/kern_gpu_power.h c771936af1de030194894db1312d847038ddb0cb - src/nvidia/inc/kernel/gpu/gpu_child_list.h 0e8353854e837f0ef0fbf0d5ff5d7a25aa1eef7c - src/nvidia/inc/kernel/gpu/eng_state.h 76b24227c65570898c19e16bf35b2cad143f3d05 - src/nvidia/inc/kernel/gpu/gpu.h 0a0c9a8f27feec3e90e15ce9879532ec77450de5 - src/nvidia/inc/kernel/gpu/gpu_acpi_data.h 9ed922ffed4454a10c5e2d8b3123ed653ec653e4 - src/nvidia/inc/kernel/gpu/gpu_ecc.h f2947fefcaf0611cd80c2c88ce3fdea70953c1ed - src/nvidia/inc/kernel/gpu/gpu_child_class_defs.h efc50bb2ff6ccf1b7715fd413ca680034920758e - src/nvidia/inc/kernel/gpu/subdevice/generic_engine.h 24d01769b39a6dd62574a95fad64443b05872151 - src/nvidia/inc/kernel/gpu/subdevice/subdevice.h 576216219d27aa887beeccefc22bcead4d1234d7 - src/nvidia/inc/kernel/gpu/disp/kern_disp.h 277a2719f8c063037c6a9ed55ade2b1cb17f48ae - src/nvidia/inc/kernel/gpu/disp/disp_capabilities.h 51a209575d3e3fe8feb7269ece7df0846e18ca2a - src/nvidia/inc/kernel/gpu/disp/kern_disp_type.h d0899f0e55e6675e267d4c72577be52e39b66121 - src/nvidia/inc/kernel/gpu/disp/kern_disp_max.h be7da8d1106ee14ff808d86abffb86794299b2df - src/nvidia/inc/kernel/gpu/disp/disp_objs.h 74bc902cd00b17da3a1dfa7fd3ebc058de439b76 - src/nvidia/inc/kernel/gpu/disp/disp_channel.h b39826404d84e0850aa3385691d8dde6e30d70d4 - src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h 24397d051c941427e54cefc1062d8cd977a8725e - src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank.h 9a33a37c6cea9bad513aa14c942c689f28f7c0d8 - src/nvidia/inc/kernel/gpu/disp/head/kernel_head.h 5179f01acf7e9e251552dc17c0dcd84f7d341d82 - src/nvidia/inc/kernel/gpu/disp/inst_mem/disp_inst_mem.h 22fc153d91a3917ac8e3f2aa94f0d52bfb11f7c2 - src/nvidia/inc/kernel/gpu/hfrp/kern_hfrp_commands_responses.h 173e9ecd2224a5259c79f2491302ba4415e82f70 - src/nvidia/inc/kernel/gpu/hfrp/kernel_hfrp.h 3118f2e9b47cfac98a92d195ce67ea63e50bf3ab - src/nvidia/inc/kernel/gpu/hfrp/kern_hfrp_common.h 1feab39692ea8796ac7675f4780dfd51e6e16326 - src/nvidia/inc/kernel/gpu/timer/objtmr.h 0cff83f4fdcc8d025cd68e0a12faaeead09fa03b - src/nvidia/inc/kernel/gpu/timer/tmr.h 71dd4fccd3b601508230a2b8b720aaf531a160ff - src/nvidia/inc/kernel/gpu/gsp/gsp_trace_rats_macro.h e1979c71f3d5ffc92bf2306f9360b70bca0edf1f - src/nvidia/inc/kernel/gpu/gsp/message_queue.h 23d38dc3e66affac9342a839f5ba0d79a40f63ba - src/nvidia/inc/kernel/gpu/gsp/kernel_gsp_trace_rats.h bb9b8ec9840109b15c174da02e7ac85c1e2c0c70 - src/nvidia/inc/kernel/gpu/rpc/objrpc.h 1cc21ad9136024f7437ef745db6652343588c50a - src/nvidia/inc/kernel/gpu/rpc/objrpcstructurecopy.h 7b7cf3b6459711065d1b849bf5acaea10b6400ca - src/nvidia/inc/kernel/gpu/intr/intr_common.h 1e3bebe46b7f2f542eedace554a4156b3afb51f1 - src/nvidia/inc/kernel/gpu/audio/hda_codec_api.h 97d0a067e89251672f191788abe81cf26dcb335f - src/nvidia/inc/kernel/gpu/device/device.h 889ba18a43cc2b5c5e970a90ddcb770ce873b785 - src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h 6756126ddd616d6393037bebf371fceacaf3a9f1 - src/nvidia/inc/kernel/gpu/mem_mgr/context_dma.h e4c67260b5cb693d695ad3d8aa96aaed45688322 - src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator_common.h 20416f7239833dcaa743bbf988702610e9251289 - src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h 407cad27681bde8235305464150e275a4a93b5d5 - src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h 5be45f3abdbb65a8eea959d98499ea8ff9a79de9 - src/nvidia/inc/kernel/gpu/mem_mgr/rm_page_size.h 76de30ac7b722cc5d59fc834d6b9c795ec14d7a5 - src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h ce4e0f7177f46f4fc507a68b635e5395a3f7dde6 - src/nvidia/inc/kernel/gpu/dce_client/dce_client.h 2c48d7335bdb0b7ea88b78216c0aeab2e11e00c1 - src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h 5b151d0d97b83c9fb76b76c476947f9e15e774ad - src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h e188d9f2d042ffe029b96d8fbb16c79a0fc0fb01 - src/nvidia/inc/kernel/gpu_mgr/gpu_db.h ea32018e3464bb1ac792e39227badf482fa2dc67 - src/nvidia/inc/kernel/gpu_mgr/gpu_group.h 02d6a37ef1bb057604cb98a905fa02429f200c96 - src/nvidia/inc/kernel/mem_mgr/mem.h a5f49a031db4171228a27482d091283e84632ace - src/nvidia/inc/kernel/mem_mgr/system_mem.h d15991bc770c5ab41fe746995294c5213efa056b - src/nvidia/inc/kernel/mem_mgr/io_vaspace.h 5ae08b2077506cbc41e40e1b3672e615ce9d910f - src/nvidia/inc/kernel/mem_mgr/vaspace.h 0ce5d6370c086d2944b2e8d31ff72a510d98dc8f - src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h 4c386104eaead66c66df11258c3f1182b46e96ee - src/nvidia/inc/kernel/mem_mgr/syncpoint_mem.h 1a08e83fd6f0a072d6887c60c529e29211bcd007 - src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h 2d4afabd63699feec3aea5e89601db009fc51a08 - src/nvidia/inc/kernel/mem_mgr/standard_mem.h 24928c8b4e8b238f1921a1699f3af59bcff994ed - src/nvidia/src/lib/base_utils.c a6134d6f5f3e3b0b4c274eb3b2d0a146644c842b - src/nvidia/src/lib/zlib/inflate.c 2e57601af217d0d8c4986abb593e8864e53e7e0b - src/nvidia/src/libraries/nvoc/src/runtime.c 9ea8bf51c44e500c9963a12a1e2a71ebffe6c4e8 - src/nvidia/src/libraries/nvbitvector/nvbitvector.c 0e7a9b9c697f260438ca5fda8527b0f4edc2de13 - src/nvidia/src/libraries/prereq_tracker/prereq_tracker.c e5ead344020dfc973ee7c7383e0f687a29642683 - src/nvidia/src/libraries/mapping_reuse/mapping_reuse.c 3c885d2c0e6cfb3f8585bddcba128b02e0196167 - src/nvidia/src/libraries/eventbuffer/eventbufferproducer.c ee7ea17829dfbbf9e6cd8d6c6fb2ada086b5d36e - src/nvidia/src/libraries/ioaccess/ioaccess.c ca2ba7f19b705e39dbb8890a84ce84d34fbd8aa4 - src/nvidia/src/libraries/utils/nvassert.c 864bd314450490b687a652335a44fb407835152c - src/nvidia/src/libraries/containers/ringbuf.c eb919a9e8711830c1c3f7fe71273e0a39862292e - src/nvidia/src/libraries/containers/vector.c 53aa343682f721f57058c7a17b1e872ca6fe7cea - src/nvidia/src/libraries/containers/map.c 7f58f03ec069ad5f5c64fedf4a484cc93473bd04 - src/nvidia/src/libraries/containers/queue.c 23c328fc27ad0317efe6ccd2da71cfd9db9da236 - src/nvidia/src/libraries/containers/multimap.c ae669a466f1fecf67746a9fafc8c1119294c93d7 - src/nvidia/src/libraries/containers/list.c 9c80df385a47834da4f92dc11053ca40a37a7fe7 - src/nvidia/src/libraries/containers/btree/btree.c a0e23ad69d805a7de439f0fbf79241c6466efdc2 - src/nvidia/src/libraries/containers/eheap/eheap_old.c cccb1fedee02a240692688090e00ac1e289dec9e - src/nvidia/src/libraries/tls/tls.c a045a19d750d48387640ab659bb30f724c34b8c8 - src/nvidia/src/libraries/nvport/util/util_unix_kernel_os.c d047abe66dd8a459c15224cc056fc6f2176b0c6a - src/nvidia/src/libraries/nvport/util/util_gcc_clang.c f0c486c1ad0f7d9516b13a02d52b4d857d8865b1 - src/nvidia/src/libraries/nvport/util/util_compiler_switch.c 9b69fbf3efea6ba58f9ba7cb0189c9264c994657 - src/nvidia/src/libraries/nvport/sync/sync_common.h eb8b5fcab51c47f58a37958ddb38ff90991bcbbe - src/nvidia/src/libraries/nvport/sync/sync_unix_kernel_os.c b2ae1406c94779f575d3e2233a7ab248ac10e74f - src/nvidia/src/libraries/nvport/sync/inc/sync_unix_kernel_os_def.h e2fec1a305dfec07456faec8ea5e75f601d76b5e - src/nvidia/src/libraries/nvport/memory/memory_tracking.c c5a16e5bb7d304ffe5e83d7b27226cbecdbc7ce1 - src/nvidia/src/libraries/nvport/memory/memory_unix_kernel_os.c db01179ad5e6333844bd3e31b62d0dc262c98875 - src/nvidia/src/libraries/nvport/memory/memory_generic.h 2c00bd224d17c0cc5469b5140f3be3d23b494922 - src/nvidia/src/libraries/nvport/string/string_generic.c b387005657f81538fab5962d4aabbc5dc681aa1b - src/nvidia/src/libraries/nvport/core/core.c 702c73446bba35f88249cfe609ac0ca39dbd80ff - src/nvidia/src/libraries/nvport/crypto/crypto_random_xorshift.c 9ca28a5af5663dec54b4cd35f48a8a3d8e52e25f - src/nvidia/src/libraries/nvport/cpu/cpu_common.c a305654bafc883ad28a134a04e83bbd409e0fc06 - src/nvidia/src/libraries/nvport/cpu/cpu_common.h 099c17e5931d5d881d8248ec68041fa0bbc2a9bc - src/nvidia/src/libraries/nvport/thread/thread_unix_kernel_os.c 1f2e9d09e658474b36d0b0ecd9380d0d2bcc86b2 - src/nvidia/src/libraries/resserv/src/rs_domain.c f9cb28c60e7063ddb5b2a2af4a053a477c95c74b - src/nvidia/src/libraries/resserv/src/rs_server.c dac54d97b38ad722198ec918668f175dc5122e4e - src/nvidia/src/libraries/resserv/src/rs_access_map.c ede517ff5f53666a23ad2edec7e9fcd85c6ef7d1 - src/nvidia/src/libraries/resserv/src/rs_client.c 26d872a8495e38065af34aed9a60ab9a08898d40 - src/nvidia/src/libraries/resserv/src/rs_resource.c 408e1e5430e5e507e7e59adc292175150e50b825 - src/nvidia/src/libraries/resserv/src/rs_access_rights.c 304e2fb9bbf6d37358779d4e321f33ac76efcd39 - src/nvidia/src/kernel/diagnostics/nvlog.c b3a29311cc22e2dae686f8ed2df6bc828aa826cf - src/nvidia/src/kernel/diagnostics/profiler.c 439543a41a36b0959b5f4c099f4adaa379b9f912 - src/nvidia/src/kernel/diagnostics/code_coverage_mgr.c c1e5733847085bede6eb128eff3bad14549a31db - src/nvidia/src/kernel/diagnostics/nvlog_printf.c d10c5031c3bc00ae1243729c39496df38d2c9ae3 - src/nvidia/src/kernel/os/os_init.c 2255d1ae2d942c3fed9a4b0a41020d0e49cb8648 - src/nvidia/src/kernel/os/os_timer.c b887b661ffbe6c223c60f544b1fab32690cd8c75 - src/nvidia/src/kernel/os/os_sanity.c f228bc86fd9149675cb554d6f596d81fdd4c3770 - src/nvidia/src/kernel/os/os_stubs.c 8800bf3ec679a1c3d36b89992b3f2f95365ec834 - src/nvidia/src/kernel/rmapi/entry_points.c 348c34e13f006f1320536876cb7393d8232e61de - src/nvidia/src/kernel/rmapi/rpc_common.c 8f033323f3ae264a79f779abb163442deb17e88a - src/nvidia/src/kernel/rmapi/rmapi.c bc7c0b5bd06a1c58714b782d85f740632c6e152f - src/nvidia/src/kernel/rmapi/rmapi_cache_handlers.c ac6a5b3adf15eac4a7bd9ae24981f6f5fc727097 - src/nvidia/src/kernel/rmapi/deprecated_context.h b1e57ee17d6641412a4065317be3b81e5db94824 - src/nvidia/src/kernel/rmapi/event_notification.c a965c5f028c1d47d7da0dd03dabbf8aebc817523 - src/nvidia/src/kernel/rmapi/rs_utils.c a2ad052692006f70e97fd3d186f19c7ddfe80c4c - src/nvidia/src/kernel/rmapi/deprecated_context.c 7a0a8914b407f836627d8262de2de6cab2dd691d - src/nvidia/src/kernel/rmapi/rmapi_specific.c d915b65380b59e557e5043f839c42d4105caa111 - src/nvidia/src/kernel/rmapi/rmapi_utils.c 2c5b12d5eb17c313138262cd1e42eb940a4d9ed8 - src/nvidia/src/kernel/rmapi/client.c ab24efdee819d113fe72ec12c0e359c514151336 - src/nvidia/src/kernel/rmapi/resource_desc_flags.h 1745523e56fc0ff5a45d4b2473e13f0cc6f2afb1 - src/nvidia/src/kernel/rmapi/event_buffer.c f70b6d7e8f21bf26d9c8171d62cbdf934fe3a30e - src/nvidia/src/kernel/rmapi/rmapi_stubs.c 09fc97bd7daa74a0b2e55fc5632b2f25464412dc - src/nvidia/src/kernel/rmapi/client_resource.c c21223701bd7afd09e706616105f3f5f365afa5d - src/nvidia/src/kernel/rmapi/rmapi_finn.c 433c6091b3b986151e27ea952cef1dc83ff3095c - src/nvidia/src/kernel/rmapi/lock_test.c 682977753c878ccee6279e539cf11bee2b548752 - src/nvidia/src/kernel/rmapi/resource_desc.c 6dc3f6642c450043cc9b361037f4cb2091e7cb58 - src/nvidia/src/kernel/rmapi/sharing.c 00a6ef509ed8484d038c54b47642bc1a00125077 - src/nvidia/src/kernel/rmapi/lock_stress.c 3b53d6b8ef183702327b4bc3a96aa06f67475ddc - src/nvidia/src/kernel/rmapi/param_copy.c 1c9b26108c6b7f27c5f4fe84e10d83cfb32c9b5b - src/nvidia/src/kernel/rmapi/resource_list.h 3b9809740d88ab4b5b9c9d1adbd3ec304f6f6c7e - src/nvidia/src/kernel/rmapi/resource.c 41c397e2cc8c8b1c9c734c435d2d4c17cf709e63 - src/nvidia/src/kernel/rmapi/mapping_cpu.c 58ed3486109a54829f1afdf214c15529eaed678b - src/nvidia/src/kernel/rmapi/mapping.c 0172aa3770ca55bbfbd5e66f48f4e4820a4d5576 - src/nvidia/src/kernel/rmapi/event.c e26021985ccfa2fb94c96310d9700df405817889 - src/nvidia/src/kernel/rmapi/control.c 6ee3cc915f68b5b70274eec219b7fd6799479459 - src/nvidia/src/kernel/rmapi/rmapi_cache.c 7a4abc27bdbcbb758545783f4182f200587ae3bd - src/nvidia/src/kernel/rmapi/binary_api.c f821719c449e0300a3c27ebeaa3f4d6791ddaf60 - src/nvidia/src/kernel/rmapi/alloc_free.c b7561ece996380512992736f947ddea0ba7f075e - src/nvidia/src/kernel/rmapi/resource_desc.h 72a6ae5bcae8eb4197047aaa5c1780b689544c87 - src/nvidia/src/kernel/rmapi/entry_points.h 4fbbb955e617d7b014e201a5263915939c87f884 - src/nvidia/src/kernel/rmapi/resource_list_required_includes.h a16bffcad38862470b4424fa9a1b0d4013304600 - src/nvidia/src/kernel/core/hal_mgr.c 4d3f32dbc4cbe3d4d1301079eaf21005f74dea90 - src/nvidia/src/kernel/core/locks_common.c e7195ca43692b6fbf6a3533437650c596cee88db - src/nvidia/src/kernel/core/locks_minimal.c ee0bf4f81d33e9a7b6bbb2be27bb3973c8cb5b18 - src/nvidia/src/kernel/core/system.c 905a0f08067503374c757ed34d1ea87379ab4a71 - src/nvidia/src/kernel/core/thread_state.c afa03f17393b28b9fc791bf09c4d35833447808d - src/nvidia/src/kernel/core/hal/hal.c d3922085d63a7edf02b582fe0b6e3acba6124c25 - src/nvidia/src/kernel/core/hal/hals_all.c 8eac3ea49f9a53063f7106211e5236372d87bdaf - src/nvidia/src/kernel/core/hal/info_block.c 1f258d22d361a8902c27a4329e553a73b3fbe6e9 - src/nvidia/src/kernel/gpu/device.c f520afc43afd9e40f779d2bdf3acc48ff7419625 - src/nvidia/src/kernel/gpu/eng_state.c 7ed54a614b756e32a61366d2009db26d1ef5fcc4 - src/nvidia/src/kernel/gpu/gpu_arch.c 1b2a50c873087a28cc4edd4a65945bcafc84bcf0 - src/nvidia/src/kernel/gpu/gpu_uuid.c 5bbac8b7323fe7f048e54b2ebc3ebe4f30655181 - src/nvidia/src/kernel/gpu/gpu.c c7f5b73c217a181f5ff28886bf691ec7d528cb86 - src/nvidia/src/kernel/gpu/gpu_resource.c 2408846a2a5c24a102df13919f384c6675f56f29 - src/nvidia/src/kernel/gpu/device_ctrl.c 2b40a86a112c7643a69b094194c2ee1dd294f16a - src/nvidia/src/kernel/gpu/gpu_gspclient.c 261a5b014b3869c3ce5e830cf8b9529fa0b8a09d - src/nvidia/src/kernel/gpu/gpu_resource_desc.c 4e1be780ac696a61f056933e5550040a2d42c6bd - src/nvidia/src/kernel/gpu/gpu_device_mapping.c 57941830e179d534a7329608658c82fd91ff4a57 - src/nvidia/src/kernel/gpu/gpu_timeout.c 89a6229720a7d5276d73ad51a210ce6f60cedb08 - src/nvidia/src/kernel/gpu/gpu_user_shared_data.c bc508781e640dbf756d9c9e43e75227d05b413c7 - src/nvidia/src/kernel/gpu/device_share.c 84c2c6a59313d36aa70c8a01cfedf1d1e7a3d931 - src/nvidia/src/kernel/gpu/gpu_access.c d0d744c416a52404a52c35ede015629990934003 - src/nvidia/src/kernel/gpu/gpu_engine_type.c 12c1f9494317c34b1b9bfcc58bf7bee81b08c98e - src/nvidia/src/kernel/gpu/gpu_t234d_kernel.c ea626b20043182e3b374cb05d02c75b482fcd3a3 - src/nvidia/src/kernel/gpu/gpu_rmapi.c 099da8d641fb4481f9a4c625588dd4aa4ce20bcd - src/nvidia/src/kernel/gpu/subdevice/subdevice.c 6fab19f1f68bdb8d2b969efc6f030e2066bc6b5e - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c b4e503b320119fecdb22dfda1268ce31e1a7ecd7 - src/nvidia/src/kernel/gpu/subdevice/generic_engine.c 9afe5cedd5e7d535ee56f4f5b3cc549f154d8be2 - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c 796d1368584a9318a39ed313dcb86bbcca40ad83 - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c 4c363a34fe12b9bb0d428c3d90974d7085d0366f - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_internal_kernel.c fcf79cf10019193a9e57f8d19b5a37bac6120365 - src/nvidia/src/kernel/gpu/arch/t25x/kern_gpu_t256d.c 095d4a87b067038bd2d80a1c4b2d9407810b0e66 - src/nvidia/src/kernel/gpu/arch/t26x/kern_gpu_t264d.c c20ed8bd9fda88b036c6ff677b7c25ebd171434f - src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_arch_t234d.c b09af17437a01e63e960414a4534074da240dc59 - src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_t234d.c ceb516c8064e1df2d18897f98f5c8ea58e907973 - src/nvidia/src/kernel/gpu/disp/disp_capabilities.c c67baeb5df33080d99f322786759fc3f5436301d - src/nvidia/src/kernel/gpu/disp/disp_channel.c 8fafebf746bfcde2c53435be386a8a0846973b0c - src/nvidia/src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c 6437dd659a38c62cd81fb59f229bd94e59f37e71 - src/nvidia/src/kernel/gpu/disp/disp_sf_user.c 0fbfb9dd91147f04bea1060788efc1121078c159 - src/nvidia/src/kernel/gpu/disp/kern_disp.c 5aa67b54fcd16f648d7a72b9c2c4ff3fb6d3a5be - src/nvidia/src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c 56027ec220553e1febe42f37fd70757cbb034dcb - src/nvidia/src/kernel/gpu/disp/disp_objs.c b95080033ecc8736a0cdf9476cec7563c4a2af0f - src/nvidia/src/kernel/gpu/disp/vblank_callback/vblank.c caba45a10f43e7817f491e7856ef30dd49782f6e - src/nvidia/src/kernel/gpu/disp/head/kernel_head.c f59763139d9993ae545ded8057706cc4d65afc0c - src/nvidia/src/kernel/gpu/disp/head/arch/v04/kernel_head_0401.c eb00ffa5a892558d39db15f473e2c308acfd86d9 - src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0404.c 2b19caf7def14190c99dc4e41983b4a3e3334f22 - src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0401.c 6d99d644a8294d08b0fdebf183306bbdadf819e3 - src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0402.c 57fec208154cd0d25838a688f6457598baf2de7a - src/nvidia/src/kernel/gpu/disp/arch/v02/kern_disp_0204.c 64aa574198449e9556328d1c08f08b3bde5bfad0 - src/nvidia/src/kernel/gpu/disp/arch/v05/kern_disp_0501.c d911e6ae9f7b96e6f441208d38701a8d833e7455 - src/nvidia/src/kernel/gpu/disp/arch/v03/kern_disp_0300.c ae5ef73d6e74026e0b847977c41b92cbf0f30a62 - src/nvidia/src/kernel/gpu/disp/inst_mem/disp_inst_mem.c 4cfab589176c432463859f148ad32c7dac2c83d3 - src/nvidia/src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c 60e8d1fa9cd375be783c4575baa2e99ac2b22a88 - src/nvidia/src/kernel/gpu/timer/timer.c f6e518524581b772f8fdbc80418a2018570940ca - src/nvidia/src/kernel/gpu/timer/timer_ostimer.c 1f4d15f959df38f4f6ea48c7b10fc859c6e04b12 - src/nvidia/src/kernel/gpu/audio/hda_codec_api.c 10a8bfd47ce609763c07a0d61be2f71f9f91889e - src/nvidia/src/kernel/gpu/mem_mgr/mem_ctrl.c bfc82499a8b9b8ce10411f6c391b0e575dc7c0d6 - src/nvidia/src/kernel/gpu/mem_mgr/context_dma.c a62f423d6cf69e96b0523a233ec00353d63ee8bd - src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c 92611eb4f3bed31064a9efbb54a1ece7ffcfc2af - src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c 4a95b73f744807d96510b0ad7181eae5b12839ce - src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c ce09583697a98a2d0e8466dd45764f15945f55c2 - src/nvidia/src/kernel/gpu/dce_client/dce_client_rpc.c cebb9eee63e23bb934881b3313e422b50fb38abb - src/nvidia/src/kernel/gpu/dce_client/dce_client.c d5d8ff429d3bda7103bafcb2dca94678efc8ddd8 - src/nvidia/src/kernel/gpu_mgr/gpu_group.c 2b49d8a3413a1731bc4fb0bab3f32ff272a71a8c - src/nvidia/src/kernel/gpu_mgr/gpu_db.c 37d1e3dd86e6409b8e461f90386e013194c9e4d1 - src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c fe618e428d9a172a0fd9412f5a20df64d7270418 - src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c 593bbc5b93b620019144fadf1281a180ec050012 - src/nvidia/src/kernel/mem_mgr/syncpoint_mem.c 54c1d1a44474a7027c5290551e60f13678226301 - src/nvidia/src/kernel/mem_mgr/standard_mem.c 44069d6ebbd94a11267e6cc0179ab167f91faec4 - src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c 5a5e689cf264134ae8c4300d986c209c04167743 - src/nvidia/src/kernel/mem_mgr/vaspace.c 5b9048e62581a3fbb0227d1a46c4ee8d8397bf5b - src/nvidia/src/kernel/mem_mgr/mem_mgr_internal.h 630200d06b6588d7fa8c5b1ea16146e8281163d7 - src/nvidia/src/kernel/mem_mgr/io_vaspace.c 04876ed2dedf0ac3228ec6261a0f3f79609e44a5 - src/nvidia/src/kernel/mem_mgr/system_mem.c 873de51b330501a86ec7656fcf3f615034c49f8e - src/nvidia/src/kernel/mem_mgr/os_desc_mem.c ed8376f04af08af8da7d47c6340ff38a8910de87 - src/nvidia/src/kernel/mem_mgr/mem.c 08762b3172f6309f1aeab895761193fa19cb176f - src/nvidia/interface/nv_sriov_defines.h 024b112ea410ee1b1badb585b03fdbabb64ade34 - src/nvidia/interface/nvrm_registry.h 3f7b20e27e6576ee1f2f0557d269697a0b8af7ec - src/nvidia/interface/nv-firmware-registry.h d02ee5bb3f19dffd8b5c30dc852cea243bcdf399 - src/nvidia/interface/acpidsmguids.h 60c7cafce7bd5240e8409e3c5b71214262347efc - src/nvidia/interface/acpigenfuncs.h bff92c9767308a13df1d0858d5f9c82af155679a - src/nvidia/interface/nvacpitypes.h 7790849d0d261e84d04ab5a481bb57309de6409a - src/nvidia/interface/deprecated/rmapi_deprecated_utils.c 82f65de514ef7e2204cfb618d398cf3af8c12778 - src/nvidia/interface/deprecated/rmapi_deprecated.h 49e299b7257e179b701747e061b6b0214d5565f0 - src/nvidia/interface/rmapi/src/g_finn_rm_api.c 7b8431767b7c4b3861582ddab27a079568bf0660 - src/nvidia-modeset/Makefile 7e1249c1d187aec5891eabe5bacae2189d33dc55 - src/nvidia-modeset/lib/nvkms-sync.c c3ab6005d7083e90145cac66addf815c4f93d9a0 - src/nvidia-modeset/lib/nvkms-format.c f69ac0ec080036b8abc7f1ae7b857989f5c9df4a - src/nvidia-modeset/include/nvkms-headsurface-3d.h b8854261256a801af52d1201081afa9c17486a96 - src/nvidia-modeset/include/nvkms-3dvision.h 3212e81bcde5a5dcec5dbba4155a41ca52dd2304 - src/nvidia-modeset/include/nvkms-prealloc.h 24aaf3a4cb16be7a5aaa8317090142743e3dd797 - src/nvidia-modeset/include/nvkms-flip-workarea.h be6cff078fcf66221762a4af1515e01d294dd2f6 - src/nvidia-modeset/include/nvkms-push.h 4361f10ff446c401c3f52bf36aed52ca24706d49 - src/nvidia-modeset/include/nvkms-vrr.h 08aa0dd2f18a8cf74539ea8b25ef3f3646567a0c - src/nvidia-modeset/include/nvkms-evo1.h 9bfb2d12ecdaecaba7eaaffa3040ab142d37f892 - src/nvidia-modeset/include/nvkms-prealloc-types.h 0bd9cf097cfa373f0bed7be8fe5299e2ea4bf669 - src/nvidia-modeset/include/g_nvkms-evo-states.h 708e037052ea0b3d6309fa44a205282b7a69a331 - src/nvidia-modeset/include/nvkms-difr.h 412d8028a548e67e9ef85cb7d3f88385e70c56f9 - src/nvidia-modeset/include/nvkms-console-restore.h 52b6d1a1a6793d232571e6366709436b018ae3b7 - src/nvidia-modeset/include/nvkms-dpy.h 81fcc817dfb8ae1f98b63d2c1acacc303fedb554 - src/nvidia-modeset/include/nvkms-dpy-override.h 0f251b41b076bb80eeebf7d54e6fd6c764404c28 - src/nvidia-modeset/include/nvkms-evo-states.h 70d9251f331bbf28f5c5bbdf939ebad94db9362d - src/nvidia-modeset/include/nvkms-softfloat.h 6e3681d5caa36312804c91630eaaf510eda897d2 - src/nvidia-modeset/include/nvkms-dma.h eb5248c4b0b51e7aecd2de87e496253b3b235c70 - src/nvidia-modeset/include/nvkms-utils-flip.h 377dd4a29b2ea5937a9b8fc3fba0c9e4ef92992e - src/nvidia-modeset/include/nvkms-cursor.h e1225d674a0e6e58110750868c45a4655110a4d8 - src/nvidia-modeset/include/nvkms-headsurface-swapgroup.h 9e3d50761d3a27c1db3085ff82b7d194ff47bf34 - src/nvidia-modeset/include/nvkms-rm.h fd9fa6da0fc28b00be524b0bed25a68c56278363 - src/nvidia-modeset/include/nvkms-modeset.h be6e0e97c1e7ffc0daa2f14ef7b05b9f9c11dc16 - src/nvidia-modeset/include/nvkms-attributes.h e30d9c286263051d14a1862f0c630295a78abde7 - src/nvidia-modeset/include/nvkms-headsurface-priv.h 3fd0822b8b44d13685ecde9d02300e6cfbb123db - src/nvidia-modeset/include/nvkms-hdmi.h 6b21a68e254becdd2641bc456f194f54c23abe51 - src/nvidia-modeset/include/nvkms-framelock.h 53122264a19ea00ef26e6accde3a3a7570e46b15 - src/nvidia-modeset/include/nvkms-vblank-sem-control.h 1b21352fd9d0b1c5708cb8512acf20ba2e13955d - src/nvidia-modeset/include/nvkms-headsurface.h 59d20eff40e4e488eb3ab7c97b5e171142dcdbcf - src/nvidia-modeset/include/nvkms-modeset-workarea.h 933f9b359a1c3807771e2719c6dd80d71beff3c8 - src/nvidia-modeset/include/nvkms-utils.h f5f3b11c78a8b0eef40c09e1751615a47f516edb - src/nvidia-modeset/include/nvkms-hal.h 03f3fd4c2fb7db83441805a5c350b121bd3117b4 - src/nvidia-modeset/include/nvkms-setlut-workarea.h 31acf6af2a4c82e3429efa77d110cb346c11905f - src/nvidia-modeset/include/nvkms-lut.h e4bae9a0df729119071902f7ad59704c97adee0e - src/nvidia-modeset/include/nvkms-private.h fbe2cbfd32b40d8188c6b25716fb360720ab5760 - src/nvidia-modeset/include/nvkms-evo.h 04f2e01c7f798a615319accc2dd713f617a81172 - src/nvidia-modeset/include/nvkms-headsurface-config.h 4a94381bd8c24b09193577d3f05d6d61f178e1cf - src/nvidia-modeset/include/nvkms-ctxdma.h b4d53599736b03ee1bc149abe7b602336f40295c - src/nvidia-modeset/include/nvkms-flip.h 46fc0e138ba7be5fa3ea0ada3ee0a78656950c80 - src/nvidia-modeset/include/nvkms-modeset-types.h 260b6ef87c755e55a803adad4ce49f2d57315f9a - src/nvidia-modeset/include/nvkms-event.h 35fa1444c57f7adbbddddc612237f3ad38cdd78f - src/nvidia-modeset/include/nvkms-rmapi.h 8782df838ea3d2617e9842c89389f51137b19a73 - src/nvidia-modeset/include/nvkms-headsurface-matrix.h 881d7e4187ff9c7e9d02672aedafc1605f3055ec - src/nvidia-modeset/include/nvkms-modepool.h 60c01e29aa91aa80bf3750a1b11fe61a6cdfde58 - src/nvidia-modeset/include/nvkms-types.h cc3dc4021b76782434efd2aa81d3ffdd1f3b1f0a - src/nvidia-modeset/include/nvkms-headsurface-ioctl.h 3dc2113c55970fa70b7afb4fd30f2f1e777ebc12 - src/nvidia-modeset/include/nvkms-surface.h aa43ad7f970331c56378b7797f66b0a77d8e99dd - src/nvidia-modeset/include/nvkms-evo3.h 8c7e0e15c1038fe518e98d8f86fafb250b10a1d2 - src/nvidia-modeset/include/nvkms-stereo.h 9deeeae9081fd828a14f3b0df5fbf17a81161786 - src/nvidia-modeset/include/nvkms-hw-flip.h 6460f8427fdb375d659975c7f6eaadaca0ed2b2c - src/nvidia-modeset/include/dp/nvdp-device.h 1912d523f567c4fc36075942cf8acaf5d5478232 - src/nvidia-modeset/include/dp/nvdp-connector-event-sink.h a233bdcd5daa0582acf2cd5b0f339ad54d09bf13 - src/nvidia-modeset/include/dp/nvdp-timer.h 2b91423ff88ca398324088d4f910e81f6944123a - src/nvidia-modeset/include/dp/nvdp-connector.h aa8aa13c6fc48ff5ef621f243e94dcc01a46dea3 - src/nvidia-modeset/kapi/include/nvkms-kapi-notifiers.h c0de6efe1d5c57da324118f108ea0570a6923036 - src/nvidia-modeset/kapi/include/nvkms-kapi-internal.h b01351ece15ce0d54a19ad0d7ffa056963d72488 - src/nvidia-modeset/kapi/src/nvkms-kapi.c a4d52bb238ce94f3427f25bd169e58d5d5f4abd1 - src/nvidia-modeset/kapi/src/nvkms-kapi-notifiers.c ce42ceac4c4cf9d249d66ab57ae2f435cd9623fc - src/nvidia-modeset/kapi/src/nvkms-kapi-sync.c 80c2c9a2a05beb0202239db8b0dd7080ff21c194 - src/nvidia-modeset/kapi/interface/nvkms-kapi-private.h 4c856c1324060dcb5a9e72e5e82c7a60f6324733 - src/nvidia-modeset/kapi/interface/nvkms-kapi.h 11af2aeea97398b58f628fe4685b5dfcfda5791b - src/nvidia-modeset/src/nvkms-modeset.c 016fd1b111731c6d323425d52bfe1a04d8bcade7 - src/nvidia-modeset/src/nvkms-headsurface-swapgroup.c 37a6d00e8721a9c4134810f8be3e7168f8cbb226 - src/nvidia-modeset/src/nvkms-evo.c 4758c601621603597bd2387c4f08b3fdc17e375d - src/nvidia-modeset/src/nvkms-hw-flip.c 5e3188c2d9b580ff69e45842f841f5c92c0c6edb - src/nvidia-modeset/src/nvkms-headsurface-ioctl.c e1a3c31638416a0132c5301fe5dd4b1c93f14376 - src/nvidia-modeset/src/nvkms-cursor3.c d48ff2da5fac6f8cd0522a25b947b5b8c01812ba - src/nvidia-modeset/src/nvkms-rm.c 30ad7839985dea46e6b6d43499210a3056da51ad - src/nvidia-modeset/src/nvkms-utils-flip.c 2c24667a18374ae967917df219f3775d9a79ae04 - src/nvidia-modeset/src/nvkms-headsurface-3d.c fb8b4aa1e36f23e1927be3dbd351ab0357aeb735 - src/nvidia-modeset/src/nvkms-evo3.c 9ce404d122bbdcd5f626f2c2b7ff08a9bfcf4045 - src/nvidia-modeset/src/nvkms-flip.c e5c96eb6b9884daf4a8d0d467b009008a45065b9 - src/nvidia-modeset/src/g_nvkms-evo-states.c 094c2169412cb577a6e9db9420da084264119284 - src/nvidia-modeset/src/nvkms-hal.c 1e0bf57319954911ddd2fe87b0cd05e257f1439e - src/nvidia-modeset/src/nvkms-surface.c bd2e4a6102432d4ac1faf92b5d3db29e9e3cfafc - src/nvidia-modeset/src/nvkms-utils.c 6d41c9f84cc9ce2d16812e94a3fba055b3fc7308 - src/nvidia-modeset/src/nvkms-conf.c 05bfe67d8cb956a666804b8f27e507bbd35e2c2d - src/nvidia-modeset/src/nvkms-difr.c 9a8746ee4a4e772b8ac13f06dc0de8a250fdb4c7 - src/nvidia-modeset/src/nvkms-ctxdma.c 382141f251ce64e2d33add3b89225c373da9ea7d - src/nvidia-modeset/src/nvkms-hdmi.c 2e1644a912e7a27ec04288e000c3fa5439eecb60 - src/nvidia-modeset/src/nvkms-headsurface-matrix.c 127a3f77febf09d56b6fe3534bc62ff0ffa535d8 - src/nvidia-modeset/src/nvkms-dpy.c e0756f45732035b1000a03bd8a995a46041904ae - src/nvidia-modeset/src/nvkms-vblank-sem-control.c e4044bb85de59d662d0d579771c076cbe9b10bbb - src/nvidia-modeset/src/nvkms.c 12cbc57714f458b5673115bb5c4d380509d05277 - src/nvidia-modeset/src/nvkms-cursor.c 5c93bc35d8f93330dd7a1f7808e39c6001ee83e8 - src/nvidia-modeset/src/nvkms-headsurface-config.c ed78249de63139ec2629bde58b616cef649281f1 - src/nvidia-modeset/src/nvkms-evo2.c c51c4f2e3ac11bf86d4549ce5e9d9010199e37dd - src/nvidia-modeset/src/nvkms-prealloc.c 9d38d5147d06a293a272087d78d0b96b6003f11e - src/nvidia-modeset/src/nvkms-attributes.c 65b02b48caff2a9100b8c5614f91d42fb20da9c0 - src/nvidia-modeset/src/nvkms-dpy-override.c a62b617aa5c89056c19a5f3c91402df8cfcc1103 - src/nvidia-modeset/src/nvkms-push.c 9fea40b7b55d6ebf3f73b5d469751c873ffbe7c0 - src/nvidia-modeset/src/nvkms-dma.c da726d20eea99a96af4c10aace88f419e8ee2a34 - src/nvidia-modeset/src/nvkms-event.c a1c7c3c1191762c0a1038674dee0075d532ccd2d - src/nvidia-modeset/src/nvkms-headsurface.c 2fabe1c14116a2b07f24d01710394ee84a6e3914 - src/nvidia-modeset/src/nvkms-3dvision.c 89b58b1e67ff7ed43c889fe7d85329d7f4762b91 - src/nvidia-modeset/src/nvkms-hw-states.c c799d52bdc792efc377fb5cd307b0eb445c44d6a - src/nvidia-modeset/src/nvkms-cursor2.c dd6c86b5557b02dd15a8ea0f10bde9770d90874e - src/nvidia-modeset/src/nvkms-evo4.c be49ea18102a44914e0d7686c51430df18336383 - src/nvidia-modeset/src/nvkms-framelock.c 6bdb90474b5d31c53104f7b29b447b3f798aaa0e - src/nvidia-modeset/src/nvkms-vrr.c 05ca4acdfeb9b99eccc7e222846fc688473322ae - src/nvidia-modeset/src/nvkms-rmapi-dgpu.c f754a27436fd1e1fa103de6110224c21ad7ea9f4 - src/nvidia-modeset/src/nvkms-pow.c e8c6d2eedfba19f8f06dd57f629588615cf1a2e9 - src/nvidia-modeset/src/nvkms-evo1.c d15f314bea66574e0ffc72966b86bae8366412f5 - src/nvidia-modeset/src/nvkms-console-restore.c 0699860902369359e5ff1a0ef46b87e955d4bb7a - src/nvidia-modeset/src/nvkms-modepool.c 403e6dbff0a607c2aecf3204c56633bd7b612ae2 - src/nvidia-modeset/src/nvkms-stereo.c fd6ecacc4f273c88960148c070dd17d93f49909b - src/nvidia-modeset/src/nvkms-lut.c 771fee54d1123871e380db6f3227b4946b6be647 - src/nvidia-modeset/src/dp/nvdp-timer.cpp 6b985fc50b5040ce1a81418bed73a60edb5d3289 - src/nvidia-modeset/src/dp/nvdp-timer.hpp dcf9f99e79a13b109a8665597f0fc7c00ec37957 - src/nvidia-modeset/src/dp/nvdp-connector.cpp e0e50fc1c526ecf0fe2f60689a25adda1257e2b3 - src/nvidia-modeset/src/dp/nvdp-evo-interface.cpp 16081091156a813977dfdd0718d55ea4a66a0686 - src/nvidia-modeset/src/dp/nvdp-device.cpp 6e17f81da1b94414c1cbf18c3ea92f25352d8bf5 - src/nvidia-modeset/src/dp/nvdp-connector-event-sink.cpp 81065db63fda6468fdf56d853781fca8af610798 - src/nvidia-modeset/src/dp/nvdp-evo-interface.hpp e1f003a64cec57f299e65567d29e69951a62f44a - src/nvidia-modeset/src/dp/nvdp-host.cpp ca07b8e8f507de47694ac7b3b1719b0931da02c6 - src/nvidia-modeset/src/dp/nvdp-connector-event-sink.hpp 2b49249a135293d01e82ef11ee520596c9825875 - src/nvidia-modeset/src/shaders/g_pascal_shaders 09cb78322cc8465d42a4be6a1c3682566c66462d - src/nvidia-modeset/src/shaders/g_maxwell_shaders a62c80e00077041d38d84e06c5834dca527e8a55 - src/nvidia-modeset/src/shaders/g_volta_shader_info.h 21cf709a8717d43c4abc6b66c8faad141592b7ce - src/nvidia-modeset/src/shaders/g_nvidia-headsurface-shader-info.h fec9074463a5505e300f9feb77b60ec77b781bb7 - src/nvidia-modeset/src/shaders/g_turing_shader_info.h cad54ab33c1132ba7453f54e9a02d34504e4fd5c - src/nvidia-modeset/src/shaders/g_pascal_shader_info.h f3bdeb7d46fdc9c31940ea799ce4a0d328fe1844 - src/nvidia-modeset/src/shaders/g_ampere_shaders 0ba4739302e0938b5599afb7e7ad281b21e25cec - src/nvidia-modeset/src/shaders/g_maxwell_shader_info.h 1c02043d31faf4f79c4a54dd5a622e87ee276be8 - src/nvidia-modeset/src/shaders/g_volta_shaders f540d144503d00941a1b32fb1a3d13061065b24e - src/nvidia-modeset/src/shaders/g_hopper_shader_info.h 74824b796722071bc3d90e4dacfed245dcda28cd - src/nvidia-modeset/src/shaders/g_turing_shaders ce728856b76bfa428b199fd3b97e0cbc24ef54cd - src/nvidia-modeset/src/shaders/g_hopper_shaders 02bb8bc0f5d228d4a9a383d797daffd8936c4ad7 - src/nvidia-modeset/src/shaders/g_ampere_shader_info.h 9f35175e44247d4facb26a60614d40fcdb74416f - src/nvidia-modeset/src/shaders/g_shader_names.h ca86fee8bd52e6c84e376199c5f3890078bc2031 - src/nvidia-modeset/os-interface/include/nvidia-modeset-os-interface.h b2a5ddfd8dcb3000b9d102bd55b5b560730e81d5 - src/nvidia-modeset/os-interface/include/nvkms.h 51b367a6e289cc8957388745988315024f97506e - src/nvidia-modeset/interface/nvkms-api.h b986bc6591ba17a74ad81ec4c93347564c6d5165 - src/nvidia-modeset/interface/nvkms-format.h 2ea1436104463c5e3d177e8574c3b4298976d37e - src/nvidia-modeset/interface/nvkms-ioctl.h 3bf4a2d1fec120ef5313c8bf119bc22fb3cf0cc5 - src/nvidia-modeset/interface/nvkms-modetimings.h c54c62de441828282db9a4f5b35c2fa5c97d94f1 - src/nvidia-modeset/interface/nvkms-api-types.h 8e3e74d2b3f45381e7b0012d930cf451cbd1728f - src/nvidia-modeset/interface/nvkms-sync.h Change-Id: If5ef3d3202eab829a730f4711eb572cfbfea8273 --- CODE_OF_CONDUCT.md | 141 + CONTRIBUTING.md | 21 + COPYING | 369 + Makefile | 86 + README.md | 987 ++ SECURITY.md | 16 + commitFile.txt | 1510 +++ kernel-open/Kbuild | 306 + kernel-open/Makefile | 184 + kernel-open/common/inc/conftest.h | 34 + kernel-open/common/inc/cpuopsys.h | 437 + kernel-open/common/inc/dce_rm_client_ipc.h | 35 + kernel-open/common/inc/nv-caps.h | 94 + kernel-open/common/inc/nv-chardev-numbers.h | 43 + kernel-open/common/inc/nv-dmabuf.h | 31 + kernel-open/common/inc/nv-firmware-registry.h | 83 + kernel-open/common/inc/nv-firmware.h | 148 + kernel-open/common/inc/nv-gpu-info.h | 44 + kernel-open/common/inc/nv-hash.h | 88 + kernel-open/common/inc/nv-hypervisor.h | 93 + kernel-open/common/inc/nv-ioctl-numa.h | 81 + kernel-open/common/inc/nv-ioctl-numbers.h | 44 + kernel-open/common/inc/nv-ioctl.h | 156 + .../common/inc/nv-kernel-interface-api.h | 41 + kernel-open/common/inc/nv-kref.h | 61 + kernel-open/common/inc/nv-kthread-q-os.h | 64 + kernel-open/common/inc/nv-kthread-q.h | 205 + kernel-open/common/inc/nv-linux.h | 1875 +++ kernel-open/common/inc/nv-list-helpers.h | 76 + kernel-open/common/inc/nv-lock.h | 60 + kernel-open/common/inc/nv-memdbg.h | 49 + kernel-open/common/inc/nv-mm.h | 210 + kernel-open/common/inc/nv-modeset-interface.h | 122 + kernel-open/common/inc/nv-msi.h | 101 + kernel-open/common/inc/nv-pci-types.h | 36 + kernel-open/common/inc/nv-pci.h | 41 + kernel-open/common/inc/nv-pgprot.h | 105 + kernel-open/common/inc/nv-platform.h | 48 + kernel-open/common/inc/nv-procfs-utils.h | 177 + kernel-open/common/inc/nv-procfs.h | 28 + kernel-open/common/inc/nv-proto.h | 95 + kernel-open/common/inc/nv-retpoline.h | 82 + kernel-open/common/inc/nv-time.h | 191 + kernel-open/common/inc/nv-timer.h | 62 + kernel-open/common/inc/nv.h | 1297 ++ kernel-open/common/inc/nvCpuUuid.h | 44 + kernel-open/common/inc/nv_common_utils.h | 120 + kernel-open/common/inc/nv_dpy_id.h | 370 + kernel-open/common/inc/nv_mig_types.h | 46 + .../common/inc/nv_speculation_barrier.h | 227 + kernel-open/common/inc/nv_stdarg.h | 39 + kernel-open/common/inc/nv_uvm_interface.h | 1869 +++ kernel-open/common/inc/nv_uvm_types.h | 1114 ++ kernel-open/common/inc/nv_uvm_user_types.h | 166 + kernel-open/common/inc/nvgputypes.h | 177 + kernel-open/common/inc/nvi2c.h | 37 + kernel-open/common/inc/nvimpshared.h | 96 + kernel-open/common/inc/nvkms-api-types.h | 788 ++ kernel-open/common/inc/nvkms-format.h | 126 + kernel-open/common/inc/nvkms-kapi.h | 1624 +++ kernel-open/common/inc/nvlimits.h | 58 + kernel-open/common/inc/nvmisc.h | 1000 ++ kernel-open/common/inc/nvstatus.h | 123 + kernel-open/common/inc/nvstatuscodes.h | 180 + kernel-open/common/inc/nvtypes.h | 671 ++ kernel-open/common/inc/os-interface.h | 279 + kernel-open/common/inc/os/nv_memory_area.h | 104 + kernel-open/common/inc/os/nv_memory_type.h | 41 + kernel-open/common/inc/os_dsi_panel_props.h | 387 + kernel-open/common/inc/os_gpio.h | 34 + kernel-open/common/inc/rm-gpu-ops.h | 119 + kernel-open/common/inc/rs_access.h | 276 + kernel-open/conftest.sh | 5513 +++++++++ kernel-open/count-lines.mk | 25 + kernel-open/dkms.conf | 12 + kernel-open/header-presence-tests.mk | 45 + kernel-open/nvidia-drm/nv-kthread-q.c | 329 + kernel-open/nvidia-drm/nv-pci-table.c | 90 + kernel-open/nvidia-drm/nv-pci-table.h | 32 + kernel-open/nvidia-drm/nv_common_utils.h | 120 + .../nvidia-drm/nvidia-dma-resv-helper.h | 136 + kernel-open/nvidia-drm/nvidia-drm-conftest.h | 204 + kernel-open/nvidia-drm/nvidia-drm-connector.c | 640 + kernel-open/nvidia-drm/nvidia-drm-connector.h | 104 + kernel-open/nvidia-drm/nvidia-drm-crtc.c | 3131 +++++ kernel-open/nvidia-drm/nvidia-drm-crtc.h | 354 + kernel-open/nvidia-drm/nvidia-drm-drv.c | 2236 ++++ kernel-open/nvidia-drm/nvidia-drm-drv.h | 44 + kernel-open/nvidia-drm/nvidia-drm-encoder.c | 337 + kernel-open/nvidia-drm/nvidia-drm-encoder.h | 64 + kernel-open/nvidia-drm/nvidia-drm-fb.c | 309 + kernel-open/nvidia-drm/nvidia-drm-fb.h | 60 + kernel-open/nvidia-drm/nvidia-drm-fence.c | 1829 +++ kernel-open/nvidia-drm/nvidia-drm-fence.h | 60 + kernel-open/nvidia-drm/nvidia-drm-format.c | 209 + kernel-open/nvidia-drm/nvidia-drm-format.h | 45 + .../nvidia-drm/nvidia-drm-gem-dma-buf.c | 256 + .../nvidia-drm/nvidia-drm-gem-dma-buf.h | 75 + .../nvidia-drm/nvidia-drm-gem-nvkms-memory.c | 643 + .../nvidia-drm/nvidia-drm-gem-nvkms-memory.h | 118 + .../nvidia-drm/nvidia-drm-gem-user-memory.c | 235 + .../nvidia-drm/nvidia-drm-gem-user-memory.h | 71 + kernel-open/nvidia-drm/nvidia-drm-gem.c | 399 + kernel-open/nvidia-drm/nvidia-drm-gem.h | 187 + kernel-open/nvidia-drm/nvidia-drm-helper.c | 181 + kernel-open/nvidia-drm/nvidia-drm-helper.h | 476 + kernel-open/nvidia-drm/nvidia-drm-ioctl.h | 399 + kernel-open/nvidia-drm/nvidia-drm-linux.c | 66 + kernel-open/nvidia-drm/nvidia-drm-modeset.c | 865 ++ kernel-open/nvidia-drm/nvidia-drm-modeset.h | 53 + .../nvidia-drm/nvidia-drm-os-interface.c | 257 + .../nvidia-drm/nvidia-drm-os-interface.h | 112 + kernel-open/nvidia-drm/nvidia-drm-priv.h | 211 + kernel-open/nvidia-drm/nvidia-drm-sources.mk | 111 + kernel-open/nvidia-drm/nvidia-drm-utils.c | 228 + kernel-open/nvidia-drm/nvidia-drm-utils.h | 54 + kernel-open/nvidia-drm/nvidia-drm.Kbuild | 33 + kernel-open/nvidia-drm/nvidia-drm.c | 61 + kernel-open/nvidia-drm/nvidia-drm.h | 31 + kernel-open/nvidia-modeset/nv-kthread-q.c | 329 + .../nvidia-modeset/nvidia-modeset-linux.c | 2198 ++++ .../nvidia-modeset-os-interface.h | 387 + .../nvidia-modeset/nvidia-modeset.Kbuild | 103 + kernel-open/nvidia-modeset/nvkms-ioctl.h | 73 + kernel-open/nvidia-modeset/nvkms.h | 127 + kernel-open/nvidia/detect-self-hosted.h | 47 + .../hal/library/cryptlib/cryptlib_aead.h | 211 + .../hal/library/cryptlib/cryptlib_cert.h | 416 + .../nvidia/hal/library/cryptlib/cryptlib_dh.h | 98 + .../nvidia/hal/library/cryptlib/cryptlib_ec.h | 246 + .../hal/library/cryptlib/cryptlib_ecd.h | 173 + .../hal/library/cryptlib/cryptlib_hash.h | 772 ++ .../hal/library/cryptlib/cryptlib_hkdf.h | 266 + .../hal/library/cryptlib/cryptlib_mac.h | 833 ++ .../hal/library/cryptlib/cryptlib_rng.h | 30 + .../hal/library/cryptlib/cryptlib_rsa.h | 274 + .../hal/library/cryptlib/cryptlib_sm2.h | 217 + .../nvidia/internal/libspdm_lib_config.h | 91 + kernel-open/nvidia/internal_crypt_lib.h | 170 + kernel-open/nvidia/library/cryptlib.h | 109 + kernel-open/nvidia/library/spdm_lib_config.h | 445 + kernel-open/nvidia/libspdm_aead.c | 477 + kernel-open/nvidia/libspdm_aead_aes_gcm.c | 117 + kernel-open/nvidia/libspdm_ec.c | 172 + kernel-open/nvidia/libspdm_ecc.c | 410 + kernel-open/nvidia/libspdm_hkdf.c | 158 + kernel-open/nvidia/libspdm_hkdf_sha.c | 111 + kernel-open/nvidia/libspdm_hmac_sha.c | 282 + .../nvidia/libspdm_internal_crypt_lib.c | 42 + kernel-open/nvidia/libspdm_rand.c | 37 + kernel-open/nvidia/libspdm_rsa.c | 613 + kernel-open/nvidia/libspdm_rsa_ext.c | 85 + kernel-open/nvidia/libspdm_sha.c | 264 + kernel-open/nvidia/libspdm_shash.c | 181 + kernel-open/nvidia/libspdm_x509.c | 682 ++ kernel-open/nvidia/nv-acpi.c | 1546 +++ kernel-open/nvidia/nv-backlight.c | 81 + kernel-open/nvidia/nv-bpmp.c | 108 + kernel-open/nvidia/nv-caps-imex.c | 240 + kernel-open/nvidia/nv-caps-imex.h | 34 + kernel-open/nvidia/nv-caps.c | 878 ++ kernel-open/nvidia/nv-clk.c | 932 ++ kernel-open/nvidia/nv-cray.c | 217 + kernel-open/nvidia/nv-dma.c | 1003 ++ kernel-open/nvidia/nv-dmabuf.c | 1866 +++ kernel-open/nvidia/nv-dsi-parse-panel-props.c | 1014 ++ kernel-open/nvidia/nv-gpio.c | 263 + kernel-open/nvidia/nv-host1x.c | 83 + kernel-open/nvidia/nv-i2c.c | 558 + kernel-open/nvidia/nv-imp.c | 350 + kernel-open/nvidia/nv-ipc-soc.c | 143 + kernel-open/nvidia/nv-kthread-q.c | 329 + kernel-open/nvidia/nv-memdbg.c | 259 + kernel-open/nvidia/nv-mmap.c | 860 ++ kernel-open/nvidia/nv-modeset-interface.c | 146 + kernel-open/nvidia/nv-msi.c | 167 + kernel-open/nvidia/nv-nano-timer.c | 215 + kernel-open/nvidia/nv-p2p.c | 1027 ++ kernel-open/nvidia/nv-p2p.h | 478 + kernel-open/nvidia/nv-pat.c | 405 + kernel-open/nvidia/nv-pat.h | 59 + kernel-open/nvidia/nv-pci-table.c | 90 + kernel-open/nvidia/nv-pci-table.h | 32 + kernel-open/nvidia/nv-pci.c | 2608 ++++ kernel-open/nvidia/nv-platform-pm.c | 130 + kernel-open/nvidia/nv-platform.c | 1700 +++ kernel-open/nvidia/nv-procfs.c | 1465 +++ kernel-open/nvidia/nv-reg.h | 1093 ++ kernel-open/nvidia/nv-report-err.c | 80 + kernel-open/nvidia/nv-report-err.h | 66 + kernel-open/nvidia/nv-rsync.c | 161 + kernel-open/nvidia/nv-rsync.h | 55 + kernel-open/nvidia/nv-tracepoint.h | 66 + kernel-open/nvidia/nv-usermap.c | 155 + kernel-open/nvidia/nv-vm.c | 1104 ++ kernel-open/nvidia/nv-vtophys.c | 39 + kernel-open/nvidia/nv.c | 6285 ++++++++++ kernel-open/nvidia/nv_gpu_ops.h | 348 + kernel-open/nvidia/nv_uvm_interface.c | 1723 +++ kernel-open/nvidia/nvidia-sources.Kbuild | 57 + kernel-open/nvidia/nvidia.Kbuild | 246 + .../nvidia/nvspdm_cryptlib_extensions.h | 46 + kernel-open/nvidia/os-interface.c | 2767 +++++ kernel-open/nvidia/os-mlock.c | 331 + kernel-open/nvidia/os-pci.c | 225 + kernel-open/nvidia/os-registry.c | 356 + kernel-open/nvidia/os-usermap.c | 59 + kernel-open/nvidia/rmp2pdefines.h | 31 + nv-compiler.sh | 82 + push_info.txt | 1 + src/common/displayport/inc/dp_address.h | 288 + src/common/displayport/inc/dp_auxbus.h | 80 + src/common/displayport/inc/dp_auxdefs.h | 96 + src/common/displayport/inc/dp_auxretry.h | 181 + src/common/displayport/inc/dp_bitstream.h | 98 + src/common/displayport/inc/dp_buffer.h | 97 + src/common/displayport/inc/dp_configcaps.h | 1455 +++ src/common/displayport/inc/dp_connector.h | 783 ++ src/common/displayport/inc/dp_connectorimpl.h | 811 ++ src/common/displayport/inc/dp_crc.h | 41 + src/common/displayport/inc/dp_deviceimpl.h | 556 + src/common/displayport/inc/dp_discovery.h | 329 + src/common/displayport/inc/dp_edid.h | 308 + src/common/displayport/inc/dp_evoadapter.h | 446 + src/common/displayport/inc/dp_groupimpl.h | 147 + src/common/displayport/inc/dp_guid.h | 120 + src/common/displayport/inc/dp_hostimp.h | 57 + src/common/displayport/inc/dp_internal.h | 129 + src/common/displayport/inc/dp_linkconfig.h | 559 + src/common/displayport/inc/dp_linkedlist.h | 143 + src/common/displayport/inc/dp_list.h | 84 + src/common/displayport/inc/dp_mainlink.h | 276 + src/common/displayport/inc/dp_merger.h | 148 + .../displayport/inc/dp_messagecodings.h | 663 + src/common/displayport/inc/dp_messageheader.h | 94 + src/common/displayport/inc/dp_messages.h | 376 + src/common/displayport/inc/dp_object.h | 132 + src/common/displayport/inc/dp_printf.h | 52 + src/common/displayport/inc/dp_qse.h | 109 + .../displayport/inc/dp_regkeydatabase.h | 134 + src/common/displayport/inc/dp_ringbuffer.h | 33 + src/common/displayport/inc/dp_splitter.h | 159 + src/common/displayport/inc/dp_timeout.h | 74 + src/common/displayport/inc/dp_timer.h | 103 + src/common/displayport/inc/dp_tracing.h | 128 + src/common/displayport/inc/dp_vrr.h | 95 + src/common/displayport/inc/dp_wardatabase.h | 75 + src/common/displayport/inc/dp_watermark.h | 141 + .../inc/dptestutil/dp_testmessage.h | 190 + src/common/displayport/src/dp_auxretry.cpp | 316 + src/common/displayport/src/dp_bitstream.cpp | 204 + src/common/displayport/src/dp_buffer.cpp | 272 + src/common/displayport/src/dp_configcaps.cpp | 3043 +++++ .../displayport/src/dp_connectorimpl.cpp | 8735 ++++++++++++++ src/common/displayport/src/dp_crc.cpp | 93 + src/common/displayport/src/dp_deviceimpl.cpp | 3496 ++++++ src/common/displayport/src/dp_discovery.cpp | 939 ++ src/common/displayport/src/dp_edid.cpp | 648 + src/common/displayport/src/dp_evoadapter.cpp | 2186 ++++ src/common/displayport/src/dp_groupimpl.cpp | 745 ++ src/common/displayport/src/dp_guid.cpp | 81 + src/common/displayport/src/dp_linkconfig.cpp | 157 + src/common/displayport/src/dp_list.cpp | 159 + src/common/displayport/src/dp_merger.cpp | 311 + .../displayport/src/dp_messagecodings.cpp | 749 ++ .../displayport/src/dp_messageheader.cpp | 86 + src/common/displayport/src/dp_messages.cpp | 626 + src/common/displayport/src/dp_mst_edid.cpp | 189 + src/common/displayport/src/dp_qse.cpp | 293 + src/common/displayport/src/dp_splitter.cpp | 315 + src/common/displayport/src/dp_sst_edid.cpp | 343 + src/common/displayport/src/dp_timer.cpp | 200 + src/common/displayport/src/dp_vrr.cpp | 248 + src/common/displayport/src/dp_wardatabase.cpp | 650 + src/common/displayport/src/dp_watermark.cpp | 879 ++ .../src/dptestutil/dp_testmessage.cpp | 168 + src/common/inc/displayport/displayport.h | 771 ++ src/common/inc/displayport/displayport2x.h | 185 + src/common/inc/displayport/dpcd.h | 1528 +++ src/common/inc/displayport/dpcd14.h | 748 ++ src/common/inc/displayport/dpcd20.h | 198 + src/common/inc/gps.h | 54 + src/common/inc/hdmi_spec.h | 86 + src/common/inc/jt.h | 115 + src/common/inc/nvBinSegment.h | 36 + src/common/inc/nvBldVer.h | 73 + src/common/inc/nvCpuIntrinsics.h | 438 + src/common/inc/nvCpuUuid.h | 44 + src/common/inc/nvHdmiFrlCommon.h | 134 + src/common/inc/nvPNPVendorIds.h | 572 + src/common/inc/nvSemaphoreCommon.h | 217 + src/common/inc/nvSha1.h | 390 + src/common/inc/nvUnixVersion.h | 16 + src/common/inc/nvVer.h | 18 + src/common/inc/nv_list.h | 558 + src/common/inc/nv_mig_types.h | 46 + src/common/inc/nv_smg.h | 100 + src/common/inc/nv_speculation_barrier.h | 219 + src/common/inc/nvctassert.h | 189 + src/common/inc/nvlog_defs.h | 564 + src/common/inc/nvlog_inc.h | 39 + src/common/inc/nvlog_inc2.h | 46 + src/common/inc/nvop.h | 122 + src/common/inc/nvrmcontext.h | 73 + src/common/inc/pex.h | 71 + src/common/inc/rmosxfac.h | 43 + src/common/inc/swref/common_def_nvlink.h | 44 + .../swref/published/disp/v02_04/dev_disp.h | 58 + .../swref/published/disp/v03_00/dev_disp.h | 281 + .../swref/published/disp/v04_01/dev_disp.h | 59 + .../swref/published/disp/v04_02/dev_disp.h | 27 + .../swref/published/disp/v05_01/dev_disp.h | 65 + .../swref/published/disp/v05_02/dev_disp.h | 33 + src/common/inc/swref/published/nv_arch.h | 126 + src/common/inc/swref/published/nv_ref.h | 215 + .../inc/swref/published/t23x/t234/dev_fuse.h | 31 + .../swref/published/turing/tu102/dev_mmu.h | 119 + .../published/turing/tu102/kind_macros.h | 36 + .../modeset/hdmipacket/nvhdmi_frlInterface.h | 268 + src/common/modeset/hdmipacket/nvhdmipkt.c | 706 ++ src/common/modeset/hdmipacket/nvhdmipkt.h | 426 + .../modeset/hdmipacket/nvhdmipkt_0073.c | 422 + .../modeset/hdmipacket/nvhdmipkt_9171.c | 834 ++ .../modeset/hdmipacket/nvhdmipkt_9271.c | 71 + .../modeset/hdmipacket/nvhdmipkt_9471.c | 71 + .../modeset/hdmipacket/nvhdmipkt_9571.c | 71 + .../modeset/hdmipacket/nvhdmipkt_C371.c | 71 + .../modeset/hdmipacket/nvhdmipkt_C671.c | 1415 +++ .../modeset/hdmipacket/nvhdmipkt_C871.c | 641 + .../modeset/hdmipacket/nvhdmipkt_C971.c | 204 + .../modeset/hdmipacket/nvhdmipkt_CC71.c | 46 + .../modeset/hdmipacket/nvhdmipkt_class.h | 197 + .../modeset/hdmipacket/nvhdmipkt_common.h | 132 + .../modeset/hdmipacket/nvhdmipkt_internal.h | 77 + src/common/modeset/timing/displayid.h | 776 ++ src/common/modeset/timing/displayid20.h | 797 ++ src/common/modeset/timing/dpsdp.h | 376 + src/common/modeset/timing/edid.h | 352 + src/common/modeset/timing/nvt_cvt.c | 675 ++ src/common/modeset/timing/nvt_displayid20.c | 2012 ++++ src/common/modeset/timing/nvt_dmt.c | 345 + src/common/modeset/timing/nvt_dsc_pps.c | 2759 +++++ src/common/modeset/timing/nvt_dsc_pps.h | 353 + src/common/modeset/timing/nvt_edid.c | 3214 +++++ src/common/modeset/timing/nvt_edidext_861.c | 4015 +++++++ .../modeset/timing/nvt_edidext_displayid.c | 1386 +++ .../modeset/timing/nvt_edidext_displayid20.c | 436 + src/common/modeset/timing/nvt_gtf.c | 138 + src/common/modeset/timing/nvt_ovt.c | 295 + src/common/modeset/timing/nvt_tv.c | 192 + src/common/modeset/timing/nvt_util.c | 472 + src/common/modeset/timing/nvtiming.h | 5943 +++++++++ src/common/modeset/timing/nvtiming_pvt.h | 170 + .../sdk/nvidia/inc/alloc/alloc_channel.h | 345 + src/common/sdk/nvidia/inc/cc_drv.h | 101 + src/common/sdk/nvidia/inc/class/cl0000.h | 53 + .../nvidia/inc/class/cl0000_notification.h | 60 + src/common/sdk/nvidia/inc/class/cl0001.h | 37 + src/common/sdk/nvidia/inc/class/cl0002.h | 51 + src/common/sdk/nvidia/inc/class/cl0004.h | 50 + src/common/sdk/nvidia/inc/class/cl0005.h | 58 + .../nvidia/inc/class/cl0005_notification.h | 51 + src/common/sdk/nvidia/inc/class/cl0020.h | 31 + src/common/sdk/nvidia/inc/class/cl003e.h | 51 + src/common/sdk/nvidia/inc/class/cl0040.h | 55 + src/common/sdk/nvidia/inc/class/cl0041.h | 45 + src/common/sdk/nvidia/inc/class/cl0070.h | 73 + src/common/sdk/nvidia/inc/class/cl0071.h | 38 + src/common/sdk/nvidia/inc/class/cl0073.h | 56 + src/common/sdk/nvidia/inc/class/cl0076.h | 38 + src/common/sdk/nvidia/inc/class/cl0080.h | 64 + .../nvidia/inc/class/cl0080_notification.h | 45 + src/common/sdk/nvidia/inc/class/cl008f.h | 32 + src/common/sdk/nvidia/inc/class/cl0092.h | 72 + .../sdk/nvidia/inc/class/cl0092_callback.h | 28 + src/common/sdk/nvidia/inc/class/cl00b1.h | 28 + src/common/sdk/nvidia/inc/class/cl00c1.h | 65 + src/common/sdk/nvidia/inc/class/cl00c3.h | 43 + src/common/sdk/nvidia/inc/class/cl00da.h | 85 + src/common/sdk/nvidia/inc/class/cl00de.h | 436 + src/common/sdk/nvidia/inc/class/cl00f2.h | 38 + src/common/sdk/nvidia/inc/class/cl00fc.h | 39 + src/common/sdk/nvidia/inc/class/cl00fe.h | 53 + src/common/sdk/nvidia/inc/class/cl0100.h | 51 + src/common/sdk/nvidia/inc/class/cl0101.h | 39 + src/common/sdk/nvidia/inc/class/cl2080.h | 46 + .../nvidia/inc/class/cl2080_notification.h | 613 + src/common/sdk/nvidia/inc/class/cl2081.h | 40 + src/common/sdk/nvidia/inc/class/cl2082.h | 40 + src/common/sdk/nvidia/inc/class/cl30f1.h | 56 + .../nvidia/inc/class/cl30f1_notification.h | 74 + src/common/sdk/nvidia/inc/class/cl402c.h | 47 + src/common/sdk/nvidia/inc/class/cl5070.h | 44 + .../nvidia/inc/class/cl5070_notification.h | 45 + src/common/sdk/nvidia/inc/class/cl50a0.h | 38 + src/common/sdk/nvidia/inc/class/cl84a0.h | 110 + .../sdk/nvidia/inc/class/cl84a0_deprecated.h | 90 + src/common/sdk/nvidia/inc/class/cl900e.h | 39 + src/common/sdk/nvidia/inc/class/cl9010.h | 45 + .../sdk/nvidia/inc/class/cl9010_callback.h | 27 + src/common/sdk/nvidia/inc/class/cl902d.h | 1092 ++ .../sdk/nvidia/inc/class/cl907dswspare.h | 37 + src/common/sdk/nvidia/inc/class/cl9097.h | 3815 ++++++ src/common/sdk/nvidia/inc/class/cl90cd.h | 237 + src/common/sdk/nvidia/inc/class/cl90cdtrace.h | 66 + src/common/sdk/nvidia/inc/class/cl90e7.h | 38 + src/common/sdk/nvidia/inc/class/cl90ec.h | 46 + src/common/sdk/nvidia/inc/class/cl90f1.h | 39 + src/common/sdk/nvidia/inc/class/cl9170.h | 41 + src/common/sdk/nvidia/inc/class/cl9171.h | 317 + src/common/sdk/nvidia/inc/class/cl917a.h | 57 + src/common/sdk/nvidia/inc/class/cl917b.h | 61 + src/common/sdk/nvidia/inc/class/cl917c.h | 298 + .../sdk/nvidia/inc/class/cl917cswspare.h | 37 + src/common/sdk/nvidia/inc/class/cl917d.h | 1551 +++ .../sdk/nvidia/inc/class/cl917dcrcnotif.h | 45 + src/common/sdk/nvidia/inc/class/cl917e.h | 265 + src/common/sdk/nvidia/inc/class/cl9270.h | 41 + src/common/sdk/nvidia/inc/class/cl9271.h | 317 + src/common/sdk/nvidia/inc/class/cl927c.h | 299 + src/common/sdk/nvidia/inc/class/cl927d.h | 1556 +++ src/common/sdk/nvidia/inc/class/cl9470.h | 41 + src/common/sdk/nvidia/inc/class/cl9471.h | 317 + src/common/sdk/nvidia/inc/class/cl947d.h | 1606 +++ src/common/sdk/nvidia/inc/class/cl9570.h | 41 + src/common/sdk/nvidia/inc/class/cl9571.h | 317 + src/common/sdk/nvidia/inc/class/cl957d.h | 1602 +++ src/common/sdk/nvidia/inc/class/cl9770.h | 41 + src/common/sdk/nvidia/inc/class/cl977d.h | 1587 +++ src/common/sdk/nvidia/inc/class/cl9870.h | 41 + src/common/sdk/nvidia/inc/class/cl987d.h | 1590 +++ src/common/sdk/nvidia/inc/class/cla06f.h | 240 + src/common/sdk/nvidia/inc/class/cla06fsubch.h | 33 + src/common/sdk/nvidia/inc/class/cla097.h | 3817 ++++++ src/common/sdk/nvidia/inc/class/cla0b5.h | 262 + src/common/sdk/nvidia/inc/class/cla16f.h | 254 + src/common/sdk/nvidia/inc/class/cla26f.h | 254 + src/common/sdk/nvidia/inc/class/clb06f.h | 260 + src/common/sdk/nvidia/inc/class/clb097.h | 3966 ++++++ src/common/sdk/nvidia/inc/class/clb097tex.h | 2050 ++++ src/common/sdk/nvidia/inc/class/clb0b5sw.h | 54 + src/common/sdk/nvidia/inc/class/clb197.h | 4160 +++++++ src/common/sdk/nvidia/inc/class/clc06f.h | 312 + src/common/sdk/nvidia/inc/class/clc097.h | 4191 +++++++ src/common/sdk/nvidia/inc/class/clc097tex.h | 1353 +++ src/common/sdk/nvidia/inc/class/clc197.h | 4242 +++++++ src/common/sdk/nvidia/inc/class/clc361.h | 35 + src/common/sdk/nvidia/inc/class/clc36f.h | 366 + src/common/sdk/nvidia/inc/class/clc370.h | 43 + .../nvidia/inc/class/clc370_notification.h | 44 + src/common/sdk/nvidia/inc/class/clc371.h | 109 + src/common/sdk/nvidia/inc/class/clc372sw.h | 36 + src/common/sdk/nvidia/inc/class/clc373.h | 350 + src/common/sdk/nvidia/inc/class/clc37a.h | 214 + src/common/sdk/nvidia/inc/class/clc37b.h | 68 + src/common/sdk/nvidia/inc/class/clc37d.h | 953 ++ .../sdk/nvidia/inc/class/clc37dcrcnotif.h | 50 + .../sdk/nvidia/inc/class/clc37dswspare.h | 36 + src/common/sdk/nvidia/inc/class/clc37e.h | 498 + src/common/sdk/nvidia/inc/class/clc397.h | 4219 +++++++ src/common/sdk/nvidia/inc/class/clc46f.h | 365 + src/common/sdk/nvidia/inc/class/clc56f.h | 367 + src/common/sdk/nvidia/inc/class/clc570.h | 41 + src/common/sdk/nvidia/inc/class/clc573.h | 606 + src/common/sdk/nvidia/inc/class/clc574.h | 41 + src/common/sdk/nvidia/inc/class/clc57a.h | 213 + src/common/sdk/nvidia/inc/class/clc57b.h | 67 + src/common/sdk/nvidia/inc/class/clc57d.h | 1277 ++ src/common/sdk/nvidia/inc/class/clc57e.h | 657 + src/common/sdk/nvidia/inc/class/clc57esw.h | 45 + src/common/sdk/nvidia/inc/class/clc597.h | 4352 +++++++ src/common/sdk/nvidia/inc/class/clc637.h | 62 + src/common/sdk/nvidia/inc/class/clc638.h | 50 + src/common/sdk/nvidia/inc/class/clc661.h | 27 + src/common/sdk/nvidia/inc/class/clc670.h | 41 + src/common/sdk/nvidia/inc/class/clc671.h | 124 + src/common/sdk/nvidia/inc/class/clc673.h | 505 + src/common/sdk/nvidia/inc/class/clc67a.h | 213 + src/common/sdk/nvidia/inc/class/clc67b.h | 67 + src/common/sdk/nvidia/inc/class/clc67d.h | 1337 +++ src/common/sdk/nvidia/inc/class/clc67e.h | 698 ++ src/common/sdk/nvidia/inc/class/clc697.h | 4352 +++++++ src/common/sdk/nvidia/inc/class/clc770.h | 35 + src/common/sdk/nvidia/inc/class/clc771.h | 257 + src/common/sdk/nvidia/inc/class/clc77d.h | 1372 +++ src/common/sdk/nvidia/inc/class/clc77f.h | 34 + src/common/sdk/nvidia/inc/class/clc797.h | 4481 +++++++ src/common/sdk/nvidia/inc/class/clc7b5.h | 304 + src/common/sdk/nvidia/inc/class/clc86f.h | 191 + src/common/sdk/nvidia/inc/class/clc870.h | 41 + src/common/sdk/nvidia/inc/class/clc871.h | 380 + src/common/sdk/nvidia/inc/class/clc873.h | 399 + src/common/sdk/nvidia/inc/class/clc87d.h | 1336 +++ src/common/sdk/nvidia/inc/class/clc87e.h | 697 ++ src/common/sdk/nvidia/inc/class/clc970.h | 41 + src/common/sdk/nvidia/inc/class/clc971.h | 329 + src/common/sdk/nvidia/inc/class/clc973.h | 379 + src/common/sdk/nvidia/inc/class/clc97a.h | 168 + src/common/sdk/nvidia/inc/class/clc97b.h | 64 + src/common/sdk/nvidia/inc/class/clc97d.h | 1377 +++ .../sdk/nvidia/inc/class/clc97dswspare.h | 41 + src/common/sdk/nvidia/inc/class/clc97e.h | 740 ++ src/common/sdk/nvidia/inc/class/clc997.h | 4481 +++++++ src/common/sdk/nvidia/inc/class/clca70.h | 41 + src/common/sdk/nvidia/inc/class/clcb97.h | 4480 +++++++ src/common/sdk/nvidia/inc/class/clcb97tex.h | 2437 ++++ src/common/sdk/nvidia/inc/class/clcc70.h | 41 + src/common/sdk/nvidia/inc/class/clcc71.h | 186 + src/common/sdk/nvidia/inc/class/clcc73.h | 915 ++ src/common/sdk/nvidia/inc/class/clcc7a.h | 216 + src/common/sdk/nvidia/inc/class/clcc7b.h | 70 + src/common/sdk/nvidia/inc/class/clcc7d.h | 2007 ++++ src/common/sdk/nvidia/inc/class/clcc7e.h | 906 ++ src/common/sdk/nvidia/inc/cpuopsys.h | 422 + .../nvidia/inc/ctrl/ctrl0000/ctrl0000base.h | 68 + .../nvidia/inc/ctrl/ctrl0000/ctrl0000client.h | 184 + .../nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h | 324 + .../nvidia/inc/ctrl/ctrl0000/ctrl0000event.h | 145 + .../nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h | 1129 ++ .../inc/ctrl/ctrl0000/ctrl0000gpuacct.h | 255 + .../nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h | 101 + .../nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h | 637 + .../nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h | 98 + .../inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h | 112 + .../nvidia/inc/ctrl/ctrl0000/ctrl0000system.h | 3113 +++++ .../nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h | 439 + .../nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h | 224 + src/common/sdk/nvidia/inc/ctrl/ctrl0002.h | 178 + src/common/sdk/nvidia/inc/ctrl/ctrl0004.h | 93 + src/common/sdk/nvidia/inc/ctrl/ctrl0020.h | 80 + src/common/sdk/nvidia/inc/ctrl/ctrl003e.h | 136 + src/common/sdk/nvidia/inc/ctrl/ctrl0041.h | 494 + src/common/sdk/nvidia/inc/ctrl/ctrl0073.h | 46 + .../nvidia/inc/ctrl/ctrl0073/ctrl0073base.h | 61 + .../nvidia/inc/ctrl/ctrl0073/ctrl0073common.h | 119 + .../nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h | 1665 +++ .../sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h | 3045 +++++ .../nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h | 32 + .../nvidia/inc/ctrl/ctrl0073/ctrl0073event.h | 112 + .../inc/ctrl/ctrl0073/ctrl0073internal.h | 59 + .../nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h | 70 + .../inc/ctrl/ctrl0073/ctrl0073specific.h | 3140 +++++ .../nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h | 201 + .../nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h | 32 + .../nvidia/inc/ctrl/ctrl0073/ctrl0073system.h | 2571 ++++ src/common/sdk/nvidia/inc/ctrl/ctrl0076.h | 41 + src/common/sdk/nvidia/inc/ctrl/ctrl0080.h | 49 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080base.h | 73 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h | 148 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h | 112 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h | 32 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h | 890 ++ .../sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h | 264 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h | 423 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h | 656 + .../sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h | 292 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080host.h | 87 + .../inc/ctrl/ctrl0080/ctrl0080internal.h | 151 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h | 90 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h | 75 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h | 63 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h | 95 + src/common/sdk/nvidia/inc/ctrl/ctrl00da.h | 258 + src/common/sdk/nvidia/inc/ctrl/ctrl00de.h | 59 + src/common/sdk/nvidia/inc/ctrl/ctrl00fe.h | 107 + src/common/sdk/nvidia/inc/ctrl/ctrl0100.h | 237 + src/common/sdk/nvidia/inc/ctrl/ctrl2080.h | 90 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h | 32 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080base.h | 114 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h | 484 + .../inc/ctrl/ctrl2080/ctrl2080boardobj.h | 1557 +++ .../ctrl2080/ctrl2080boardobjgrpclasses.h | 34 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h | 1700 +++ .../sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h | 511 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h | 32 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h | 45 + .../inc/ctrl/ctrl2080/ctrl2080clkavfs.h | 36 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h | 180 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h | 127 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h | 247 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080event.h | 373 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h | 33 + .../sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h | 2896 +++++ .../nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h | 1166 ++ .../nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h | 208 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h | 452 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h | 34 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h | 74 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h | 4739 ++++++++ .../nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h | 96 + .../sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h | 1986 +++ .../nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h | 299 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h | 193 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h | 73 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h | 372 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h | 31 + .../inc/ctrl/ctrl2080/ctrl2080internal.h | 5338 +++++++++ .../nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h | 100 + .../sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h | 341 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h | 385 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h | 3820 ++++++ .../inc/ctrl/ctrl2080/ctrl2080nvlink_common.h | 42 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h | 923 ++ .../inc/ctrl/ctrl2080/ctrl2080perf_cf.h | 33 + .../ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h | 32 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h | 67 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080pmu.h | 40 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h | 34 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080power.h | 123 + .../sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h | 384 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080spdm.h | 248 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h | 32 + .../inc/ctrl/ctrl2080/ctrl2080thermal.h | 417 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h | 260 + .../inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h | 32 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h | 190 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h | 42 + .../ctrl/ctrl2080/ctrl2080vgpumgrinternal.h | 522 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h | 38 + .../nvidia/inc/ctrl/ctrl208f/ctrl208fbase.h | 73 + .../nvidia/inc/ctrl/ctrl208f/ctrl208fgpu.h | 169 + .../inc/ctrl/ctrl208f/ctrl208fucodecoverage.h | 145 + src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h | 1560 +++ src/common/sdk/nvidia/inc/ctrl/ctrl402c.h | 971 ++ .../nvidia/inc/ctrl/ctrl5070/ctrl5070base.h | 66 + .../nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h | 1179 ++ .../nvidia/inc/ctrl/ctrl5070/ctrl5070common.h | 79 + .../inc/ctrl/ctrl5070/ctrl5070impoverrides.h | 33 + .../sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h | 564 + .../sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h | 437 + .../nvidia/inc/ctrl/ctrl5070/ctrl5070system.h | 81 + .../nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h | 32 + .../nvidia/inc/ctrl/ctrl83de/ctrl83debase.h | 55 + .../nvidia/inc/ctrl/ctrl83de/ctrl83dedebug.h | 1234 ++ src/common/sdk/nvidia/inc/ctrl/ctrl906f.h | 222 + src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h | 174 + src/common/sdk/nvidia/inc/ctrl/ctrl90e7.h | 34 + .../nvidia/inc/ctrl/ctrl90e7/ctrl90e7base.h | 57 + .../nvidia/inc/ctrl/ctrl90e7/ctrl90e7bbx.h | 92 + src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h | 124 + src/common/sdk/nvidia/inc/ctrl/ctrl90f1.h | 407 + src/common/sdk/nvidia/inc/ctrl/ctrla06f.h | 36 + .../nvidia/inc/ctrl/ctrla06f/ctrla06fbase.h | 59 + .../nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h | 261 + .../inc/ctrl/ctrla06f/ctrla06finternal.h | 64 + src/common/sdk/nvidia/inc/ctrl/ctrla081.h | 1076 ++ src/common/sdk/nvidia/inc/ctrl/ctrlb06f.h | 365 + src/common/sdk/nvidia/inc/ctrl/ctrlc36f.h | 159 + .../nvidia/inc/ctrl/ctrlc370/ctrlc370base.h | 66 + .../nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h | 303 + .../nvidia/inc/ctrl/ctrlc370/ctrlc370event.h | 56 + .../sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370or.h | 41 + .../sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h | 123 + .../nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h | 133 + .../nvidia/inc/ctrl/ctrlc372/ctrlc372base.h | 61 + .../nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h | 934 ++ src/common/sdk/nvidia/inc/ctrl/ctrlc637.h | 407 + src/common/sdk/nvidia/inc/ctrl/ctrlc638.h | 91 + src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h | 74 + src/common/sdk/nvidia/inc/dpringbuffertypes.h | 59 + src/common/sdk/nvidia/inc/g_finn_rm_api.h | 717 ++ src/common/sdk/nvidia/inc/mmu_fmt_types.h | 151 + src/common/sdk/nvidia/inc/nv-hypervisor.h | 93 + .../sdk/nvidia/inc/nv-kernel-interface-api.h | 41 + src/common/sdk/nvidia/inc/nv_stdarg.h | 39 + src/common/sdk/nvidia/inc/nv_vgpu_types.h | 61 + src/common/sdk/nvidia/inc/nvcd.h | 159 + src/common/sdk/nvidia/inc/nvcfg_sdk.h | 29 + src/common/sdk/nvidia/inc/nvdisptypes.h | 92 + src/common/sdk/nvidia/inc/nverror.h | 297 + src/common/sdk/nvidia/inc/nvfixedtypes.h | 408 + src/common/sdk/nvidia/inc/nvgputypes.h | 177 + src/common/sdk/nvidia/inc/nvi2c.h | 37 + src/common/sdk/nvidia/inc/nvimpshared.h | 93 + src/common/sdk/nvidia/inc/nvlimits.h | 58 + src/common/sdk/nvidia/inc/nvmisc.h | 1000 ++ src/common/sdk/nvidia/inc/nvos.h | 2955 +++++ src/common/sdk/nvidia/inc/nvsecurityinfo.h | 71 + src/common/sdk/nvidia/inc/nvstatus.h | 123 + src/common/sdk/nvidia/inc/nvstatuscodes.h | 180 + src/common/sdk/nvidia/inc/nvtypes.h | 636 + src/common/sdk/nvidia/inc/rs_access.h | 273 + src/common/shared/inc/compat.h | 50 + src/common/shared/inc/nvdevid.h | 709 ++ src/common/shared/nvstatus/nvstatus.c | 83 + src/common/softfloat/COPYING.txt | 37 + src/common/softfloat/nvidia/nv-softfloat.h | 163 + src/common/softfloat/nvidia/platform.h | 56 + .../source/8086-SSE/s_commonNaNToF16UI.c | 51 + .../source/8086-SSE/s_commonNaNToF32UI.c | 51 + .../source/8086-SSE/s_commonNaNToF64UI.c | 53 + .../source/8086-SSE/s_f16UIToCommonNaN.c | 59 + .../source/8086-SSE/s_f32UIToCommonNaN.c | 59 + .../source/8086-SSE/s_f64UIToCommonNaN.c | 59 + .../source/8086-SSE/s_propagateNaNF32UI.c | 63 + .../source/8086-SSE/s_propagateNaNF64UI.c | 63 + .../source/8086-SSE/softfloat_raiseFlags.c | 52 + .../softfloat/source/8086-SSE/specialize.h | 216 + src/common/softfloat/source/f16_to_f32.c | 93 + src/common/softfloat/source/f32_add.c | 61 + src/common/softfloat/source/f32_div.c | 176 + src/common/softfloat/source/f32_eq.c | 66 + .../softfloat/source/f32_eq_signaling.c | 61 + .../softfloat/source/f32_isSignalingNaN.c | 51 + src/common/softfloat/source/f32_le.c | 66 + src/common/softfloat/source/f32_le_quiet.c | 71 + src/common/softfloat/source/f32_lt.c | 66 + src/common/softfloat/source/f32_lt_quiet.c | 71 + src/common/softfloat/source/f32_mul.c | 137 + src/common/softfloat/source/f32_mulAdd.c | 60 + src/common/softfloat/source/f32_rem.c | 168 + src/common/softfloat/source/f32_roundToInt.c | 113 + src/common/softfloat/source/f32_sqrt.c | 121 + src/common/softfloat/source/f32_sub.c | 61 + src/common/softfloat/source/f32_to_f16.c | 88 + src/common/softfloat/source/f32_to_f64.c | 93 + src/common/softfloat/source/f32_to_i32.c | 84 + .../softfloat/source/f32_to_i32_r_minMag.c | 89 + src/common/softfloat/source/f32_to_i64.c | 84 + .../softfloat/source/f32_to_i64_r_minMag.c | 94 + src/common/softfloat/source/f32_to_ui32.c | 84 + .../softfloat/source/f32_to_ui32_r_minMag.c | 88 + src/common/softfloat/source/f32_to_ui64.c | 84 + .../softfloat/source/f32_to_ui64_r_minMag.c | 90 + src/common/softfloat/source/f64_add.c | 65 + src/common/softfloat/source/f64_div.c | 172 + src/common/softfloat/source/f64_eq.c | 66 + .../softfloat/source/f64_eq_signaling.c | 61 + .../softfloat/source/f64_isSignalingNaN.c | 51 + src/common/softfloat/source/f64_le.c | 67 + src/common/softfloat/source/f64_le_quiet.c | 72 + src/common/softfloat/source/f64_lt.c | 67 + src/common/softfloat/source/f64_lt_quiet.c | 72 + src/common/softfloat/source/f64_mul.c | 139 + src/common/softfloat/source/f64_mulAdd.c | 60 + src/common/softfloat/source/f64_rem.c | 185 + src/common/softfloat/source/f64_roundToInt.c | 113 + src/common/softfloat/source/f64_sqrt.c | 133 + src/common/softfloat/source/f64_sub.c | 65 + src/common/softfloat/source/f64_to_f32.c | 88 + src/common/softfloat/source/f64_to_i32.c | 82 + .../softfloat/source/f64_to_i32_r_minMag.c | 96 + src/common/softfloat/source/f64_to_i64.c | 84 + .../softfloat/source/f64_to_i64_r_minMag.c | 100 + src/common/softfloat/source/f64_to_ui32.c | 82 + .../softfloat/source/f64_to_ui32_r_minMag.c | 88 + src/common/softfloat/source/f64_to_ui64.c | 84 + .../softfloat/source/f64_to_ui64_r_minMag.c | 93 + src/common/softfloat/source/i32_to_f32.c | 58 + src/common/softfloat/source/i32_to_f64.c | 65 + src/common/softfloat/source/i64_to_f32.c | 70 + src/common/softfloat/source/i64_to_f64.c | 58 + .../softfloat/source/include/internals.h | 147 + .../softfloat/source/include/primitiveTypes.h | 83 + .../softfloat/source/include/primitives.h | 297 + .../softfloat/source/include/softfloat.h | 172 + .../source/include/softfloat_types.h | 81 + src/common/softfloat/source/s_addMagsF32.c | 126 + src/common/softfloat/source/s_addMagsF64.c | 128 + .../softfloat/source/s_approxRecipSqrt32_1.c | 74 + .../softfloat/source/s_approxRecipSqrt_1Ks.c | 49 + .../softfloat/source/s_countLeadingZeros64.c | 73 + .../softfloat/source/s_countLeadingZeros8.c | 59 + src/common/softfloat/source/s_mul64To128.c | 67 + src/common/softfloat/source/s_mulAddF32.c | 224 + src/common/softfloat/source/s_mulAddF64.c | 243 + .../softfloat/source/s_normRoundPackToF32.c | 58 + .../softfloat/source/s_normRoundPackToF64.c | 58 + .../softfloat/source/s_normSubnormalF16Sig.c | 52 + .../softfloat/source/s_normSubnormalF32Sig.c | 52 + .../softfloat/source/s_normSubnormalF64Sig.c | 52 + .../softfloat/source/s_roundPackToF16.c | 113 + .../softfloat/source/s_roundPackToF32.c | 113 + .../softfloat/source/s_roundPackToF64.c | 117 + src/common/softfloat/source/s_roundToI32.c | 84 + src/common/softfloat/source/s_roundToI64.c | 89 + src/common/softfloat/source/s_roundToUI32.c | 80 + src/common/softfloat/source/s_roundToUI64.c | 85 + .../softfloat/source/s_shiftRightJam128.c | 70 + src/common/softfloat/source/s_subMagsF32.c | 143 + src/common/softfloat/source/s_subMagsF64.c | 141 + src/common/softfloat/source/softfloat_state.c | 49 + src/common/softfloat/source/ui32_to_f32.c | 57 + src/common/softfloat/source/ui32_to_f64.c | 59 + src/common/softfloat/source/ui64_to_f32.c | 64 + src/common/softfloat/source/ui64_to_f64.c | 59 + src/common/src/nv_smg.c | 677 ++ src/common/unix/common/inc/nv-float.h | 40 + src/common/unix/common/inc/nv_amodel_enum.h | 40 + src/common/unix/common/inc/nv_assert.h | 82 + src/common/unix/common/inc/nv_common_utils.h | 120 + src/common/unix/common/inc/nv_dpy_id.h | 370 + src/common/unix/common/inc/nv_mode_timings.h | 163 + .../utils/interface/nv_memory_tracker.h | 62 + .../utils/interface/nv_mode_timings_utils.h | 135 + .../common/utils/interface/nv_vasprintf.h | 65 + .../common/utils/interface/unix_rm_handle.h | 122 + .../unix/common/utils/nv_memory_tracker.c | 230 + .../unix/common/utils/nv_mode_timings_utils.c | 159 + src/common/unix/common/utils/nv_vasprintf.c | 74 + src/common/unix/common/utils/unix_rm_handle.c | 385 + .../unix/nvidia-3d/include/nv_xz_mem_hooks.h | 44 + .../unix/nvidia-3d/include/nvidia-3d-fermi.h | 41 + .../unix/nvidia-3d/include/nvidia-3d-hopper.h | 35 + .../unix/nvidia-3d/include/nvidia-3d-kepler.h | 45 + .../nvidia-3d/include/nvidia-3d-maxwell.h | 35 + .../unix/nvidia-3d/include/nvidia-3d-pascal.h | 35 + .../nvidia-3d/include/nvidia-3d-surface.h | 33 + .../unix/nvidia-3d/include/nvidia-3d-turing.h | 35 + .../nvidia-3d/include/nvidia-3d-types-priv.h | 48 + .../include/nvidia-3d-vertex-arrays.h | 32 + .../unix/nvidia-3d/include/nvidia-3d-volta.h | 35 + .../interface/nvidia-3d-color-targets.h | 93 + .../interface/nvidia-3d-constant-buffers.h | 196 + .../nvidia-3d/interface/nvidia-3d-imports.h | 41 + .../interface/nvidia-3d-shader-constants.h | 53 + .../nvidia-3d/interface/nvidia-3d-shaders.h | 69 + .../nvidia-3d/interface/nvidia-3d-types.h | 477 + .../nvidia-3d/interface/nvidia-3d-utils.h | 104 + .../unix/nvidia-3d/interface/nvidia-3d.h | 296 + .../unix/nvidia-3d/src/nvidia-3d-core.c | 162 + .../unix/nvidia-3d/src/nvidia-3d-fermi.c | 557 + .../unix/nvidia-3d/src/nvidia-3d-hopper.c | 384 + .../unix/nvidia-3d/src/nvidia-3d-init.c | 493 + .../unix/nvidia-3d/src/nvidia-3d-kepler.c | 150 + .../unix/nvidia-3d/src/nvidia-3d-maxwell.c | 435 + .../unix/nvidia-3d/src/nvidia-3d-pascal.c | 431 + .../unix/nvidia-3d/src/nvidia-3d-surface.c | 296 + .../unix/nvidia-3d/src/nvidia-3d-turing.c | 56 + .../nvidia-3d/src/nvidia-3d-vertex-arrays.c | 531 + .../unix/nvidia-3d/src/nvidia-3d-volta.c | 41 + .../nvidia-headsurface-constants.h | 44 + .../nvidia-headsurface-types.h | 67 + .../include/nvidia-push-priv-imports.h | 203 + .../nvidia-push/include/nvidia-push-priv.h | 122 + .../nvidia-push/interface/nvidia-push-init.h | 267 + .../interface/nvidia-push-methods.h | 259 + .../nvidia-push/interface/nvidia-push-types.h | 295 + .../nvidia-push/interface/nvidia-push-utils.h | 180 + .../unix/nvidia-push/src/nvidia-push-init.c | 1551 +++ src/common/unix/nvidia-push/src/nvidia-push.c | 1161 ++ src/common/unix/xzminidec/interface/xz.h | 285 + src/common/unix/xzminidec/src/xz_config.h | 113 + src/common/unix/xzminidec/src/xz_crc32.c | 59 + src/common/unix/xzminidec/src/xz_dec_bcj.c | 574 + src/common/unix/xzminidec/src/xz_dec_lzma2.c | 1173 ++ src/common/unix/xzminidec/src/xz_dec_stream.c | 829 ++ src/common/unix/xzminidec/src/xz_lzma2.h | 204 + src/common/unix/xzminidec/src/xz_private.h | 156 + src/common/unix/xzminidec/src/xz_stream.h | 62 + src/nvidia-modeset/Makefile | 225 + .../include/dp/nvdp-connector-event-sink.h | 44 + .../include/dp/nvdp-connector.h | 98 + src/nvidia-modeset/include/dp/nvdp-device.h | 47 + src/nvidia-modeset/include/dp/nvdp-timer.h | 41 + .../include/g_nvkms-evo-states.h | 46 + src/nvidia-modeset/include/nvkms-3dvision.h | 39 + src/nvidia-modeset/include/nvkms-attributes.h | 51 + .../include/nvkms-console-restore.h | 31 + src/nvidia-modeset/include/nvkms-ctxdma.h | 42 + src/nvidia-modeset/include/nvkms-cursor.h | 53 + src/nvidia-modeset/include/nvkms-difr.h | 45 + src/nvidia-modeset/include/nvkms-dma.h | 286 + .../include/nvkms-dpy-override.h | 63 + src/nvidia-modeset/include/nvkms-dpy.h | 98 + src/nvidia-modeset/include/nvkms-event.h | 32 + src/nvidia-modeset/include/nvkms-evo-states.h | 107 + src/nvidia-modeset/include/nvkms-evo.h | 423 + src/nvidia-modeset/include/nvkms-evo1.h | 78 + src/nvidia-modeset/include/nvkms-evo3.h | 333 + .../include/nvkms-flip-workarea.h | 73 + src/nvidia-modeset/include/nvkms-flip.h | 110 + src/nvidia-modeset/include/nvkms-framelock.h | 79 + src/nvidia-modeset/include/nvkms-hal.h | 31 + src/nvidia-modeset/include/nvkms-hdmi.h | 94 + .../include/nvkms-headsurface-3d.h | 70 + .../include/nvkms-headsurface-config.h | 238 + .../include/nvkms-headsurface-ioctl.h | 50 + .../include/nvkms-headsurface-matrix.h | 33 + .../include/nvkms-headsurface-priv.h | 531 + .../include/nvkms-headsurface-swapgroup.h | 89 + .../include/nvkms-headsurface.h | 134 + src/nvidia-modeset/include/nvkms-hw-flip.h | 128 + src/nvidia-modeset/include/nvkms-lut.h | 63 + src/nvidia-modeset/include/nvkms-modepool.h | 65 + .../include/nvkms-modeset-types.h | 88 + .../include/nvkms-modeset-workarea.h | 64 + src/nvidia-modeset/include/nvkms-modeset.h | 89 + .../include/nvkms-prealloc-types.h | 55 + src/nvidia-modeset/include/nvkms-prealloc.h | 44 + src/nvidia-modeset/include/nvkms-private.h | 80 + src/nvidia-modeset/include/nvkms-push.h | 31 + src/nvidia-modeset/include/nvkms-rm.h | 164 + src/nvidia-modeset/include/nvkms-rmapi.h | 119 + .../include/nvkms-setlut-workarea.h | 36 + src/nvidia-modeset/include/nvkms-softfloat.h | 90 + src/nvidia-modeset/include/nvkms-stereo.h | 43 + src/nvidia-modeset/include/nvkms-surface.h | 111 + src/nvidia-modeset/include/nvkms-types.h | 3283 +++++ src/nvidia-modeset/include/nvkms-utils-flip.h | 73 + src/nvidia-modeset/include/nvkms-utils.h | 288 + .../include/nvkms-vblank-sem-control.h | 44 + src/nvidia-modeset/include/nvkms-vrr.h | 80 + .../interface/nvkms-api-types.h | 788 ++ src/nvidia-modeset/interface/nvkms-api.h | 4435 +++++++ src/nvidia-modeset/interface/nvkms-format.h | 126 + src/nvidia-modeset/interface/nvkms-ioctl.h | 73 + .../interface/nvkms-modetimings.h | 61 + src/nvidia-modeset/interface/nvkms-sync.h | 100 + .../kapi/include/nvkms-kapi-internal.h | 268 + .../kapi/include/nvkms-kapi-notifiers.h | 99 + .../kapi/interface/nvkms-kapi-private.h | 67 + .../kapi/interface/nvkms-kapi.h | 1624 +++ .../kapi/src/nvkms-kapi-notifiers.c | 349 + src/nvidia-modeset/kapi/src/nvkms-kapi-sync.c | 571 + src/nvidia-modeset/kapi/src/nvkms-kapi.c | 4113 +++++++ src/nvidia-modeset/lib/nvkms-format.c | 133 + src/nvidia-modeset/lib/nvkms-sync.c | 402 + .../include/nvidia-modeset-os-interface.h | 387 + .../os-interface/include/nvkms.h | 127 + .../src/dp/nvdp-connector-event-sink.cpp | 653 + .../src/dp/nvdp-connector-event-sink.hpp | 103 + src/nvidia-modeset/src/dp/nvdp-connector.cpp | 1234 ++ src/nvidia-modeset/src/dp/nvdp-device.cpp | 169 + .../src/dp/nvdp-evo-interface.cpp | 168 + .../src/dp/nvdp-evo-interface.hpp | 61 + src/nvidia-modeset/src/dp/nvdp-host.cpp | 77 + src/nvidia-modeset/src/dp/nvdp-timer.cpp | 141 + src/nvidia-modeset/src/dp/nvdp-timer.hpp | 93 + src/nvidia-modeset/src/g_nvkms-evo-states.c | 2826 +++++ src/nvidia-modeset/src/nvkms-3dvision.c | 54 + src/nvidia-modeset/src/nvkms-attributes.c | 1552 +++ src/nvidia-modeset/src/nvkms-conf.c | 599 + .../src/nvkms-console-restore.c | 983 ++ src/nvidia-modeset/src/nvkms-ctxdma.c | 221 + src/nvidia-modeset/src/nvkms-cursor.c | 457 + src/nvidia-modeset/src/nvkms-cursor2.c | 50 + src/nvidia-modeset/src/nvkms-cursor3.c | 125 + src/nvidia-modeset/src/nvkms-difr.c | 867 ++ src/nvidia-modeset/src/nvkms-dma.c | 506 + src/nvidia-modeset/src/nvkms-dpy-override.c | 264 + src/nvidia-modeset/src/nvkms-dpy.c | 3531 ++++++ src/nvidia-modeset/src/nvkms-event.c | 207 + src/nvidia-modeset/src/nvkms-evo.c | 10006 ++++++++++++++++ src/nvidia-modeset/src/nvkms-evo1.c | 809 ++ src/nvidia-modeset/src/nvkms-evo2.c | 4200 +++++++ src/nvidia-modeset/src/nvkms-evo3.c | 8596 +++++++++++++ src/nvidia-modeset/src/nvkms-evo4.c | 1913 +++ src/nvidia-modeset/src/nvkms-flip.c | 1281 ++ src/nvidia-modeset/src/nvkms-framelock.c | 2396 ++++ src/nvidia-modeset/src/nvkms-hal.c | 212 + src/nvidia-modeset/src/nvkms-hdmi.c | 2453 ++++ src/nvidia-modeset/src/nvkms-headsurface-3d.c | 2062 ++++ .../src/nvkms-headsurface-config.c | 2693 +++++ .../src/nvkms-headsurface-ioctl.c | 725 ++ .../src/nvkms-headsurface-matrix.c | 661 + .../src/nvkms-headsurface-swapgroup.c | 949 ++ src/nvidia-modeset/src/nvkms-headsurface.c | 2975 +++++ src/nvidia-modeset/src/nvkms-hw-flip.c | 3359 ++++++ src/nvidia-modeset/src/nvkms-hw-states.c | 1139 ++ src/nvidia-modeset/src/nvkms-lut.c | 458 + src/nvidia-modeset/src/nvkms-modepool.c | 2091 ++++ src/nvidia-modeset/src/nvkms-modeset.c | 4292 +++++++ src/nvidia-modeset/src/nvkms-pow.c | 468 + src/nvidia-modeset/src/nvkms-prealloc.c | 164 + src/nvidia-modeset/src/nvkms-push.c | 309 + src/nvidia-modeset/src/nvkms-rm.c | 5816 +++++++++ src/nvidia-modeset/src/nvkms-rmapi-dgpu.c | 284 + src/nvidia-modeset/src/nvkms-stereo.c | 62 + src/nvidia-modeset/src/nvkms-surface.c | 1384 +++ src/nvidia-modeset/src/nvkms-utils-flip.c | 399 + src/nvidia-modeset/src/nvkms-utils.c | 803 ++ .../src/nvkms-vblank-sem-control.c | 337 + src/nvidia-modeset/src/nvkms-vrr.c | 1112 ++ src/nvidia-modeset/src/nvkms.c | 6901 +++++++++++ .../src/shaders/g_ampere_shader_info.h | 328 + .../src/shaders/g_ampere_shaders | Bin 0 -> 244480 bytes .../src/shaders/g_hopper_shader_info.h | 328 + .../src/shaders/g_hopper_shaders | Bin 0 -> 244224 bytes .../src/shaders/g_maxwell_shader_info.h | 428 + .../src/shaders/g_maxwell_shaders | Bin 0 -> 171776 bytes .../g_nvidia-headsurface-shader-info.h | 341 + .../src/shaders/g_pascal_shader_info.h | 428 + .../src/shaders/g_pascal_shaders | Bin 0 -> 171776 bytes .../src/shaders/g_shader_names.h | 46 + .../src/shaders/g_turing_shader_info.h | 328 + .../src/shaders/g_turing_shaders | Bin 0 -> 247296 bytes .../src/shaders/g_volta_shader_info.h | 328 + .../src/shaders/g_volta_shaders | Bin 0 -> 250240 bytes src/nvidia-modeset/srcs.mk | 225 + src/nvidia/Makefile | 217 + .../arch/nvalloc/common/inc/nv-firmware.h | 139 + .../arch/nvalloc/common/inc/nvrangetypes.h | 162 + .../arch/nvalloc/unix/include/nv-caps.h | 94 + .../nvalloc/unix/include/nv-chardev-numbers.h | 43 + .../arch/nvalloc/unix/include/nv-gpu-info.h | 44 + .../unix/include/nv-ioctl-lockless-diag.h | 43 + .../arch/nvalloc/unix/include/nv-ioctl-numa.h | 81 + .../nvalloc/unix/include/nv-ioctl-numbers.h | 44 + .../arch/nvalloc/unix/include/nv-ioctl.h | 156 + .../unix/include/nv-kernel-rmapi-ops.h | 61 + .../arch/nvalloc/unix/include/nv-nb-regs.h | 65 + .../arch/nvalloc/unix/include/nv-priv.h | 364 + src/nvidia/arch/nvalloc/unix/include/nv-reg.h | 1093 ++ .../include/nv-unix-nvos-params-wrappers.h | 49 + src/nvidia/arch/nvalloc/unix/include/nv.h | 1293 ++ .../arch/nvalloc/unix/include/nv_escape.h | 55 + .../arch/nvalloc/unix/include/os-interface.h | 275 + .../arch/nvalloc/unix/include/os_custom.h | 60 + src/nvidia/arch/nvalloc/unix/include/osapi.h | 189 + .../nvalloc/unix/include/rmobjexportimport.h | 40 + src/nvidia/arch/nvalloc/unix/src/escape.c | 869 ++ .../arch/nvalloc/unix/src/exports-stubs.c | 316 + src/nvidia/arch/nvalloc/unix/src/gcc_helper.c | 35 + .../nvalloc/unix/src/os-hypervisor-stubs.c | 192 + src/nvidia/arch/nvalloc/unix/src/os.c | 5391 +++++++++ src/nvidia/arch/nvalloc/unix/src/osapi.c | 4818 ++++++++ src/nvidia/arch/nvalloc/unix/src/osinit.c | 2043 ++++ src/nvidia/arch/nvalloc/unix/src/osmemdesc.c | 1218 ++ src/nvidia/arch/nvalloc/unix/src/osunix.c | 39 + .../nvalloc/unix/src/power-management-tegra.c | 161 + src/nvidia/arch/nvalloc/unix/src/registry.c | 524 + .../arch/nvalloc/unix/src/rmobjexportimport.c | 604 + src/nvidia/exports_link_command.txt | 108 + src/nvidia/generated/g_allclasses.h | 280 + src/nvidia/generated/g_binary_api_nvoc.c | 1077 ++ src/nvidia/generated/g_binary_api_nvoc.h | 589 + src/nvidia/generated/g_chips2halspec.h | 3 + src/nvidia/generated/g_chips2halspec_nvoc.c | 86 + src/nvidia/generated/g_chips2halspec_nvoc.h | 165 + src/nvidia/generated/g_client_nvoc.c | 511 + src/nvidia/generated/g_client_nvoc.h | 430 + src/nvidia/generated/g_client_resource_nvoc.c | 1627 +++ src/nvidia/generated/g_client_resource_nvoc.h | 857 ++ .../generated/g_code_coverage_mgr_nvoc.c | 204 + .../generated/g_code_coverage_mgr_nvoc.h | 226 + src/nvidia/generated/g_context_dma_nvoc.c | 582 + src/nvidia/generated/g_context_dma_nvoc.h | 469 + src/nvidia/generated/g_dce_client_nvoc.c | 325 + src/nvidia/generated/g_dce_client_nvoc.h | 327 + src/nvidia/generated/g_device_nvoc.c | 826 ++ src/nvidia/generated/g_device_nvoc.h | 594 + .../generated/g_disp_capabilities_nvoc.c | 518 + .../generated/g_disp_capabilities_nvoc.h | 333 + src/nvidia/generated/g_disp_channel_nvoc.c | 1853 +++ src/nvidia/generated/g_disp_channel_nvoc.h | 1059 ++ src/nvidia/generated/g_disp_inst_mem_nvoc.c | 222 + src/nvidia/generated/g_disp_inst_mem_nvoc.h | 403 + src/nvidia/generated/g_disp_objs_nvoc.c | 5578 +++++++++ src/nvidia/generated/g_disp_objs_nvoc.h | 2846 +++++ src/nvidia/generated/g_disp_sf_user_nvoc.c | 518 + src/nvidia/generated/g_disp_sf_user_nvoc.h | 333 + src/nvidia/generated/g_eng_desc_nvoc.h | 1792 +++ src/nvidia/generated/g_eng_state_nvoc.c | 214 + src/nvidia/generated/g_eng_state_nvoc.h | 413 + src/nvidia/generated/g_event_buffer_nvoc.c | 526 + src/nvidia/generated/g_event_buffer_nvoc.h | 386 + src/nvidia/generated/g_event_nvoc.c | 920 ++ src/nvidia/generated/g_event_nvoc.h | 752 ++ src/nvidia/generated/g_generic_engine_nvoc.c | 519 + src/nvidia/generated/g_generic_engine_nvoc.h | 330 + src/nvidia/generated/g_gpu_access_nvoc.c | 568 + src/nvidia/generated/g_gpu_access_nvoc.h | 764 ++ src/nvidia/generated/g_gpu_arch_nvoc.c | 243 + src/nvidia/generated/g_gpu_arch_nvoc.h | 206 + src/nvidia/generated/g_gpu_class_list.c | 137 + src/nvidia/generated/g_gpu_db_nvoc.c | 204 + src/nvidia/generated/g_gpu_db_nvoc.h | 205 + src/nvidia/generated/g_gpu_group_nvoc.c | 198 + src/nvidia/generated/g_gpu_group_nvoc.h | 352 + src/nvidia/generated/g_gpu_halspec_nvoc.c | 238 + src/nvidia/generated/g_gpu_halspec_nvoc.h | 195 + src/nvidia/generated/g_gpu_mgmt_api_nvoc.c | 466 + src/nvidia/generated/g_gpu_mgmt_api_nvoc.h | 313 + src/nvidia/generated/g_gpu_mgr_nvoc.c | 204 + src/nvidia/generated/g_gpu_mgr_nvoc.h | 623 + src/nvidia/generated/g_gpu_nvoc.c | 698 ++ src/nvidia/generated/g_gpu_nvoc.h | 5062 ++++++++ src/nvidia/generated/g_gpu_resource_nvoc.c | 441 + src/nvidia/generated/g_gpu_resource_nvoc.h | 428 + .../generated/g_gpu_user_shared_data_nvoc.c | 558 + .../generated/g_gpu_user_shared_data_nvoc.h | 354 + src/nvidia/generated/g_hal.h | 161 + src/nvidia/generated/g_hal_archimpl.h | 113 + src/nvidia/generated/g_hal_mgr_nvoc.c | 204 + src/nvidia/generated/g_hal_mgr_nvoc.h | 175 + src/nvidia/generated/g_hal_nvoc.c | 198 + src/nvidia/generated/g_hal_nvoc.h | 185 + src/nvidia/generated/g_hal_private.h | 104 + src/nvidia/generated/g_hal_register.h | 89 + src/nvidia/generated/g_hda_codec_api_nvoc.c | 515 + src/nvidia/generated/g_hda_codec_api_nvoc.h | 321 + src/nvidia/generated/g_hypervisor_nvoc.h | 231 + src/nvidia/generated/g_io_vaspace_nvoc.c | 285 + src/nvidia/generated/g_io_vaspace_nvoc.h | 347 + src/nvidia/generated/g_ioaccess_nvoc.c | 137 + src/nvidia/generated/g_ioaccess_nvoc.h | 288 + src/nvidia/generated/g_journal_nvoc.h | 58 + src/nvidia/generated/g_kern_disp_nvoc.c | 420 + src/nvidia/generated/g_kern_disp_nvoc.h | 1261 ++ src/nvidia/generated/g_kernel_head_nvoc.c | 277 + src/nvidia/generated/g_kernel_head_nvoc.h | 581 + src/nvidia/generated/g_lock_stress_nvoc.c | 727 ++ src/nvidia/generated/g_lock_stress_nvoc.h | 423 + src/nvidia/generated/g_lock_test_nvoc.c | 519 + src/nvidia/generated/g_lock_test_nvoc.h | 327 + src/nvidia/generated/g_mem_desc_nvoc.h | 1593 +++ src/nvidia/generated/g_mem_list_nvoc.h | 330 + src/nvidia/generated/g_mem_mgr_nvoc.c | 481 + src/nvidia/generated/g_mem_mgr_nvoc.h | 2930 +++++ src/nvidia/generated/g_mem_nvoc.c | 534 + src/nvidia/generated/g_mem_nvoc.h | 594 + src/nvidia/generated/g_nv_debug_dump_nvoc.h | 441 + src/nvidia/generated/g_nv_name_released.h | 6710 +++++++++++ src/nvidia/generated/g_nvh_state.h | 28 + src/nvidia/generated/g_object_nvoc.c | 181 + src/nvidia/generated/g_object_nvoc.h | 235 + src/nvidia/generated/g_objtmr_nvoc.c | 389 + src/nvidia/generated/g_objtmr_nvoc.h | 1271 ++ src/nvidia/generated/g_odb.h | 60 + src/nvidia/generated/g_os_desc_mem_nvoc.c | 527 + src/nvidia/generated/g_os_desc_mem_nvoc.h | 332 + src/nvidia/generated/g_os_hal.h | 10 + src/nvidia/generated/g_os_nvoc.c | 199 + src/nvidia/generated/g_os_nvoc.h | 1456 +++ src/nvidia/generated/g_prereq_tracker_nvoc.c | 205 + src/nvidia/generated/g_prereq_tracker_nvoc.h | 305 + src/nvidia/generated/g_ref_count_nvoc.h | 237 + .../generated/g_resource_fwd_decls_nvoc.h | 616 + src/nvidia/generated/g_resource_nvoc.c | 489 + src/nvidia/generated/g_resource_nvoc.h | 480 + src/nvidia/generated/g_resserv_nvoc.h | 457 + src/nvidia/generated/g_rmconfig_private.h | 849 ++ src/nvidia/generated/g_rmconfig_util.c | 106 + src/nvidia/generated/g_rmconfig_util.h | 38 + src/nvidia/generated/g_rpc-message-header.h | 68 + src/nvidia/generated/g_rpc-structures.h | 222 + src/nvidia/generated/g_rs_client_nvoc.c | 564 + src/nvidia/generated/g_rs_client_nvoc.h | 795 ++ src/nvidia/generated/g_rs_resource_nvoc.c | 225 + src/nvidia/generated/g_rs_resource_nvoc.h | 979 ++ src/nvidia/generated/g_rs_server_nvoc.c | 402 + src/nvidia/generated/g_rs_server_nvoc.h | 1425 +++ src/nvidia/generated/g_sdk-structures.h | 62 + src/nvidia/generated/g_standard_mem_nvoc.c | 526 + src/nvidia/generated/g_standard_mem_nvoc.h | 384 + src/nvidia/generated/g_subdevice_nvoc.c | 2759 +++++ src/nvidia/generated/g_subdevice_nvoc.h | 1497 +++ src/nvidia/generated/g_syncpoint_mem_nvoc.c | 526 + src/nvidia/generated/g_syncpoint_mem_nvoc.h | 329 + src/nvidia/generated/g_system_mem_nvoc.c | 652 + src/nvidia/generated/g_system_mem_nvoc.h | 367 + src/nvidia/generated/g_system_nvoc.c | 238 + src/nvidia/generated/g_system_nvoc.h | 725 ++ src/nvidia/generated/g_tmr_nvoc.c | 623 + src/nvidia/generated/g_tmr_nvoc.h | 432 + src/nvidia/generated/g_traceable_nvoc.c | 119 + src/nvidia/generated/g_traceable_nvoc.h | 122 + src/nvidia/generated/g_vaspace_nvoc.c | 151 + src/nvidia/generated/g_vaspace_nvoc.h | 456 + src/nvidia/generated/g_virt_mem_mgr_nvoc.c | 198 + src/nvidia/generated/g_virt_mem_mgr_nvoc.h | 178 + src/nvidia/generated/rmconfig.h | 810 ++ src/nvidia/inc/kernel/core/core.h | 50 + src/nvidia/inc/kernel/core/hal.h | 3 + src/nvidia/inc/kernel/core/hal_mgr.h | 3 + src/nvidia/inc/kernel/core/info_block.h | 47 + src/nvidia/inc/kernel/core/locks.h | 195 + src/nvidia/inc/kernel/core/prelude.h | 116 + src/nvidia/inc/kernel/core/printf.h | 263 + src/nvidia/inc/kernel/core/strict.h | 99 + src/nvidia/inc/kernel/core/system.h | 3 + src/nvidia/inc/kernel/core/thread_state.h | 227 + .../kernel/diagnostics/code_coverage_mgr.h | 3 + src/nvidia/inc/kernel/diagnostics/journal.h | 3 + .../inc/kernel/diagnostics/journal_structs.h | 53 + .../inc/kernel/diagnostics/nv_debug_dump.h | 3 + src/nvidia/inc/kernel/diagnostics/profiler.h | 119 + src/nvidia/inc/kernel/diagnostics/traceable.h | 3 + src/nvidia/inc/kernel/diagnostics/tracer.h | 188 + .../inc/kernel/diagnostics/xid_context.h | 64 + .../inc/kernel/gpu/audio/hda_codec_api.h | 3 + .../inc/kernel/gpu/dce_client/dce_client.h | 3 + src/nvidia/inc/kernel/gpu/device/device.h | 3 + .../inc/kernel/gpu/disp/disp_capabilities.h | 3 + src/nvidia/inc/kernel/gpu/disp/disp_channel.h | 3 + src/nvidia/inc/kernel/gpu/disp/disp_objs.h | 3 + src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h | 3 + .../inc/kernel/gpu/disp/head/kernel_head.h | 3 + .../kernel/gpu/disp/inst_mem/disp_inst_mem.h | 3 + src/nvidia/inc/kernel/gpu/disp/kern_disp.h | 3 + .../inc/kernel/gpu/disp/kern_disp_max.h | 35 + .../inc/kernel/gpu/disp/kern_disp_type.h | 68 + .../kernel/gpu/disp/vblank_callback/vblank.h | 111 + src/nvidia/inc/kernel/gpu/eng_desc.h | 3 + src/nvidia/inc/kernel/gpu/eng_state.h | 3 + src/nvidia/inc/kernel/gpu/error_cont.h | 189 + src/nvidia/inc/kernel/gpu/gpu.h | 3 + src/nvidia/inc/kernel/gpu/gpu_access.h | 3 + src/nvidia/inc/kernel/gpu/gpu_acpi_data.h | 112 + src/nvidia/inc/kernel/gpu/gpu_arch.h | 3 + .../inc/kernel/gpu/gpu_child_class_defs.h | 47 + src/nvidia/inc/kernel/gpu/gpu_child_list.h | 313 + .../inc/kernel/gpu/gpu_device_mapping.h | 68 + src/nvidia/inc/kernel/gpu/gpu_ecc.h | 60 + src/nvidia/inc/kernel/gpu/gpu_engine_type.h | 181 + src/nvidia/inc/kernel/gpu/gpu_halspec.h | 3 + src/nvidia/inc/kernel/gpu/gpu_resource.h | 3 + src/nvidia/inc/kernel/gpu/gpu_resource_desc.h | 37 + .../inc/kernel/gpu/gpu_shared_data_map.h | 85 + src/nvidia/inc/kernel/gpu/gpu_timeout.h | 155 + .../inc/kernel/gpu/gpu_user_shared_data.h | 3 + src/nvidia/inc/kernel/gpu/gpu_uuid.h | 54 + .../inc/kernel/gpu/gsp/gsp_trace_rats_macro.h | 40 + .../kernel/gpu/gsp/kernel_gsp_trace_rats.h | 64 + src/nvidia/inc/kernel/gpu/gsp/message_queue.h | 34 + .../gpu/hfrp/kern_hfrp_commands_responses.h | 73 + .../inc/kernel/gpu/hfrp/kern_hfrp_common.h | 130 + src/nvidia/inc/kernel/gpu/hfrp/kernel_hfrp.h | 118 + src/nvidia/inc/kernel/gpu/intr/intr_common.h | 49 + src/nvidia/inc/kernel/gpu/kern_gpu_power.h | 73 + .../inc/kernel/gpu/mem_mgr/context_dma.h | 3 + src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h | 161 + src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h | 3 + src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h | 3 + src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h | 46 + .../inc/kernel/gpu/mem_mgr/rm_page_size.h | 87 + .../gpu/mem_mgr/virt_mem_allocator_common.h | 108 + src/nvidia/inc/kernel/gpu/nvbitmask.h | 39 + src/nvidia/inc/kernel/gpu/rpc/objrpc.h | 131 + .../inc/kernel/gpu/rpc/objrpcstructurecopy.h | 42 + .../inc/kernel/gpu/subdevice/generic_engine.h | 3 + .../inc/kernel/gpu/subdevice/subdevice.h | 3 + src/nvidia/inc/kernel/gpu/timer/objtmr.h | 3 + src/nvidia/inc/kernel/gpu/timer/tmr.h | 3 + src/nvidia/inc/kernel/gpu_mgr/gpu_db.h | 3 + src/nvidia/inc/kernel/gpu_mgr/gpu_group.h | 3 + src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h | 3 + src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h | 3 + src/nvidia/inc/kernel/mem_mgr/io_vaspace.h | 3 + src/nvidia/inc/kernel/mem_mgr/mem.h | 3 + src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h | 3 + src/nvidia/inc/kernel/mem_mgr/standard_mem.h | 3 + src/nvidia/inc/kernel/mem_mgr/syncpoint_mem.h | 3 + src/nvidia/inc/kernel/mem_mgr/system_mem.h | 3 + src/nvidia/inc/kernel/mem_mgr/vaspace.h | 3 + src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h | 3 + src/nvidia/inc/kernel/os/capability.h | 46 + src/nvidia/inc/kernel/os/nv_memory_area.h | 104 + src/nvidia/inc/kernel/os/nv_memory_type.h | 41 + src/nvidia/inc/kernel/os/os.h | 3 + src/nvidia/inc/kernel/os/os_stub.h | 42 + src/nvidia/inc/kernel/platform/acpi_common.h | 113 + src/nvidia/inc/kernel/platform/nvpcf.h | 617 + src/nvidia/inc/kernel/platform/sli/sli.h | 66 + src/nvidia/inc/kernel/rmapi/alloc_size.h | 38 + src/nvidia/inc/kernel/rmapi/binary_api.h | 3 + src/nvidia/inc/kernel/rmapi/client.h | 3 + src/nvidia/inc/kernel/rmapi/client_resource.h | 3 + src/nvidia/inc/kernel/rmapi/control.h | 369 + src/nvidia/inc/kernel/rmapi/event.h | 3 + src/nvidia/inc/kernel/rmapi/event_buffer.h | 3 + src/nvidia/inc/kernel/rmapi/exports.h | 127 + src/nvidia/inc/kernel/rmapi/lock_stress.h | 108 + src/nvidia/inc/kernel/rmapi/lock_test.h | 48 + src/nvidia/inc/kernel/rmapi/mapping_list.h | 167 + src/nvidia/inc/kernel/rmapi/param_copy.h | 99 + src/nvidia/inc/kernel/rmapi/resource.h | 3 + .../inc/kernel/rmapi/resource_fwd_decls.h | 3 + src/nvidia/inc/kernel/rmapi/rmapi.h | 465 + .../inc/kernel/rmapi/rmapi_cache_handlers.h | 80 + src/nvidia/inc/kernel/rmapi/rmapi_specific.h | 63 + src/nvidia/inc/kernel/rmapi/rmapi_utils.h | 67 + src/nvidia/inc/kernel/rmapi/rs_utils.h | 188 + src/nvidia/inc/kernel/vgpu/dev_vgpu.h | 349 + .../inc/kernel/vgpu/rm_plugin_shared_code.h | 8581 +++++++++++++ src/nvidia/inc/kernel/vgpu/rpc.h | 555 + src/nvidia/inc/kernel/vgpu/rpc_global_enums.h | 292 + src/nvidia/inc/kernel/vgpu/rpc_hal_stubs.h | 76 + src/nvidia/inc/kernel/vgpu/rpc_headers.h | 259 + src/nvidia/inc/kernel/vgpu/rpc_vgpu.h | 73 + src/nvidia/inc/kernel/vgpu/vgpu_util.h | 53 + src/nvidia/inc/kernel/vgpu/vgpuapi.h | 71 + .../virtualization/hypervisor/hypervisor.h | 3 + src/nvidia/inc/lib/base_utils.h | 79 + src/nvidia/inc/lib/protobuf/prb.h | 299 + src/nvidia/inc/lib/ref_count.h | 3 + src/nvidia/inc/lib/zlib/inflate.h | 134 + src/nvidia/inc/libraries/containers/btree.h | 68 + .../inc/libraries/containers/eheap_old.h | 113 + src/nvidia/inc/libraries/containers/list.h | 337 + src/nvidia/inc/libraries/containers/map.h | 303 + .../inc/libraries/containers/multimap.h | 301 + src/nvidia/inc/libraries/containers/queue.h | 146 + src/nvidia/inc/libraries/containers/ringbuf.h | 169 + .../inc/libraries/containers/type_safety.h | 283 + src/nvidia/inc/libraries/containers/vector.h | 203 + .../inc/libraries/eventbufferproducer.h | 177 + src/nvidia/inc/libraries/field_desc.h | 450 + src/nvidia/inc/libraries/ioaccess/ioaccess.h | 149 + .../libraries/mapping_reuse/mapping_reuse.h | 118 + src/nvidia/inc/libraries/mmu/gmmu_fmt.h | 704 ++ src/nvidia/inc/libraries/mmu/mmu_fmt.h | 237 + .../nvlog/internal/nvlog_printf_internal.h | 154 + src/nvidia/inc/libraries/nvlog/nvlog.h | 400 + src/nvidia/inc/libraries/nvlog/nvlog_printf.h | 92 + src/nvidia/inc/libraries/nvoc/object.h | 131 + src/nvidia/inc/libraries/nvoc/prelude.h | 266 + src/nvidia/inc/libraries/nvoc/rtti.h | 80 + src/nvidia/inc/libraries/nvoc/runtime.h | 118 + src/nvidia/inc/libraries/nvoc/utility.h | 28 + src/nvidia/inc/libraries/nvport/atomic.h | 425 + src/nvidia/inc/libraries/nvport/core.h | 64 + src/nvidia/inc/libraries/nvport/cpu.h | 657 + src/nvidia/inc/libraries/nvport/crypto.h | 346 + src/nvidia/inc/libraries/nvport/debug.h | 318 + .../libraries/nvport/inline/atomic_clang.h | 478 + .../inc/libraries/nvport/inline/atomic_gcc.h | 493 + .../nvport/inline/debug_unix_kernel_os.h | 76 + .../libraries/nvport/inline/memory_tracking.h | 370 + .../libraries/nvport/inline/safe_generic.h | 311 + .../libraries/nvport/inline/sync_tracking.h | 211 + .../libraries/nvport/inline/util_gcc_clang.h | 188 + .../libraries/nvport/inline/util_generic.h | 267 + .../inc/libraries/nvport/inline/util_valist.h | 30 + src/nvidia/inc/libraries/nvport/memory.h | 1068 ++ src/nvidia/inc/libraries/nvport/nvport.h | 262 + src/nvidia/inc/libraries/nvport/safe.h | 621 + src/nvidia/inc/libraries/nvport/string.h | 179 + src/nvidia/inc/libraries/nvport/sync.h | 829 ++ src/nvidia/inc/libraries/nvport/thread.h | 318 + src/nvidia/inc/libraries/nvport/util.h | 254 + src/nvidia/inc/libraries/poolalloc.h | 290 + .../libraries/prereq_tracker/prereq_tracker.h | 3 + src/nvidia/inc/libraries/resserv/resserv.h | 398 + .../inc/libraries/resserv/rs_access_map.h | 234 + .../inc/libraries/resserv/rs_access_rights.h | 167 + src/nvidia/inc/libraries/resserv/rs_client.h | 557 + src/nvidia/inc/libraries/resserv/rs_domain.h | 80 + .../inc/libraries/resserv/rs_resource.h | 871 ++ src/nvidia/inc/libraries/resserv/rs_server.h | 1199 ++ src/nvidia/inc/libraries/tls/tls.h | 347 + src/nvidia/inc/libraries/utils/nv_enum.h | 684 ++ src/nvidia/inc/libraries/utils/nvassert.h | 992 ++ src/nvidia/inc/libraries/utils/nvbitvector.h | 605 + src/nvidia/inc/libraries/utils/nvmacro.h | 251 + src/nvidia/inc/libraries/utils/nvprintf.h | 393 + .../inc/libraries/utils/nvprintf_level.h | 64 + src/nvidia/inc/libraries/utils/nvrange.h | 378 + src/nvidia/inc/os/dce_rm_client_ipc.h | 35 + src/nvidia/interface/acpidsmguids.h | 90 + src/nvidia/interface/acpigenfuncs.h | 83 + .../interface/deprecated/rmapi_deprecated.h | 121 + .../deprecated/rmapi_deprecated_utils.c | 468 + src/nvidia/interface/nv-firmware-registry.h | 83 + src/nvidia/interface/nv_sriov_defines.h | 101 + src/nvidia/interface/nvacpitypes.h | 47 + src/nvidia/interface/nvrm_registry.h | 2967 +++++ .../interface/rmapi/src/g_finn_rm_api.c | 9042 ++++++++++++++ src/nvidia/nv-kernel.ld | 35 + src/nvidia/src/kernel/core/hal/hal.c | 68 + src/nvidia/src/kernel/core/hal/hals_all.c | 59 + src/nvidia/src/kernel/core/hal/info_block.c | 171 + src/nvidia/src/kernel/core/hal_mgr.c | 229 + src/nvidia/src/kernel/core/locks_common.c | 289 + src/nvidia/src/kernel/core/locks_minimal.c | 328 + src/nvidia/src/kernel/core/system.c | 760 ++ src/nvidia/src/kernel/core/thread_state.c | 1287 ++ .../kernel/diagnostics/code_coverage_mgr.c | 121 + src/nvidia/src/kernel/diagnostics/nvlog.c | 830 ++ .../src/kernel/diagnostics/nvlog_printf.c | 1322 ++ src/nvidia/src/kernel/diagnostics/profiler.c | 227 + .../gpu/arch/t23x/kern_gpu_arch_t234d.c | 36 + .../src/kernel/gpu/arch/t23x/kern_gpu_t234d.c | 207 + .../src/kernel/gpu/arch/t25x/kern_gpu_t256d.c | 89 + .../src/kernel/gpu/arch/t26x/kern_gpu_t264d.c | 47 + .../src/kernel/gpu/audio/hda_codec_api.c | 34 + .../src/kernel/gpu/dce_client/dce_client.c | 117 + .../kernel/gpu/dce_client/dce_client_rpc.c | 704 ++ src/nvidia/src/kernel/gpu/device.c | 607 + src/nvidia/src/kernel/gpu/device_ctrl.c | 381 + src/nvidia/src/kernel/gpu/device_share.c | 318 + .../kernel/gpu/disp/arch/v02/kern_disp_0204.c | 219 + .../kernel/gpu/disp/arch/v03/kern_disp_0300.c | 681 ++ .../kernel/gpu/disp/arch/v04/kern_disp_0401.c | 91 + .../kernel/gpu/disp/arch/v04/kern_disp_0402.c | 148 + .../kernel/gpu/disp/arch/v04/kern_disp_0404.c | 55 + .../kernel/gpu/disp/arch/v05/kern_disp_0501.c | 711 ++ .../src/kernel/gpu/disp/disp_capabilities.c | 77 + src/nvidia/src/kernel/gpu/disp/disp_channel.c | 863 ++ .../gpu/disp/disp_common_kern_ctrl_minimal.c | 564 + .../gpu/disp/disp_object_kern_ctrl_minimal.c | 110 + src/nvidia/src/kernel/gpu/disp/disp_objs.c | 701 ++ src/nvidia/src/kernel/gpu/disp/disp_sf_user.c | 80 + .../gpu/disp/head/arch/v04/kernel_head_0401.c | 98 + .../src/kernel/gpu/disp/head/kernel_head.c | 445 + .../inst_mem/arch/v03/disp_inst_mem_0300.c | 360 + .../kernel/gpu/disp/inst_mem/disp_inst_mem.c | 959 ++ src/nvidia/src/kernel/gpu/disp/kern_disp.c | 1755 +++ .../kernel/gpu/disp/vblank_callback/vblank.c | 679 ++ src/nvidia/src/kernel/gpu/eng_state.c | 428 + src/nvidia/src/kernel/gpu/gpu.c | 4957 ++++++++ src/nvidia/src/kernel/gpu/gpu_access.c | 1873 +++ src/nvidia/src/kernel/gpu/gpu_arch.c | 40 + .../src/kernel/gpu/gpu_device_mapping.c | 329 + src/nvidia/src/kernel/gpu/gpu_engine_type.c | 370 + src/nvidia/src/kernel/gpu/gpu_gspclient.c | 95 + src/nvidia/src/kernel/gpu/gpu_resource.c | 415 + src/nvidia/src/kernel/gpu/gpu_resource_desc.c | 585 + src/nvidia/src/kernel/gpu/gpu_rmapi.c | 700 ++ src/nvidia/src/kernel/gpu/gpu_t234d_kernel.c | 68 + src/nvidia/src/kernel/gpu/gpu_timeout.c | 601 + .../src/kernel/gpu/gpu_user_shared_data.c | 565 + src/nvidia/src/kernel/gpu/gpu_uuid.c | 296 + .../mem_mgr/arch/turing/mem_mgr_tu102_base.c | 89 + .../src/kernel/gpu/mem_mgr/context_dma.c | 656 + src/nvidia/src/kernel/gpu/mem_mgr/mem_ctrl.c | 310 + src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c | 4307 +++++++ src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c | 1560 +++ .../src/kernel/gpu/subdevice/generic_engine.c | 160 + .../src/kernel/gpu/subdevice/subdevice.c | 370 + .../subdevice/subdevice_ctrl_event_kernel.c | 299 + .../gpu/subdevice/subdevice_ctrl_gpu_kernel.c | 1617 +++ .../subdevice_ctrl_internal_kernel.c | 60 + .../subdevice/subdevice_ctrl_timer_kernel.c | 464 + src/nvidia/src/kernel/gpu/timer/timer.c | 1836 +++ .../src/kernel/gpu/timer/timer_ostimer.c | 329 + src/nvidia/src/kernel/gpu_mgr/gpu_db.c | 461 + src/nvidia/src/kernel/gpu_mgr/gpu_group.c | 329 + src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c | 65 + src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c | 3499 ++++++ src/nvidia/src/kernel/mem_mgr/io_vaspace.c | 561 + src/nvidia/src/kernel/mem_mgr/mem.c | 1272 ++ .../src/kernel/mem_mgr/mem_mgr_internal.h | 38 + src/nvidia/src/kernel/mem_mgr/os_desc_mem.c | 258 + src/nvidia/src/kernel/mem_mgr/standard_mem.c | 235 + src/nvidia/src/kernel/mem_mgr/syncpoint_mem.c | 114 + src/nvidia/src/kernel/mem_mgr/system_mem.c | 761 ++ src/nvidia/src/kernel/mem_mgr/vaspace.c | 233 + src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c | 188 + src/nvidia/src/kernel/os/os_init.c | 408 + src/nvidia/src/kernel/os/os_sanity.c | 55 + src/nvidia/src/kernel/os/os_stubs.c | 726 ++ src/nvidia/src/kernel/os/os_timer.c | 454 + src/nvidia/src/kernel/rmapi/alloc_free.c | 1668 +++ src/nvidia/src/kernel/rmapi/binary_api.c | 162 + src/nvidia/src/kernel/rmapi/client.c | 948 ++ src/nvidia/src/kernel/rmapi/client_resource.c | 1791 +++ src/nvidia/src/kernel/rmapi/control.c | 1149 ++ .../src/kernel/rmapi/deprecated_context.c | 205 + .../src/kernel/rmapi/deprecated_context.h | 42 + src/nvidia/src/kernel/rmapi/entry_points.c | 583 + src/nvidia/src/kernel/rmapi/entry_points.h | 396 + src/nvidia/src/kernel/rmapi/event.c | 753 ++ src/nvidia/src/kernel/rmapi/event_buffer.c | 731 ++ .../src/kernel/rmapi/event_notification.c | 1128 ++ src/nvidia/src/kernel/rmapi/lock_stress.c | 598 + src/nvidia/src/kernel/rmapi/lock_test.c | 103 + src/nvidia/src/kernel/rmapi/mapping.c | 535 + src/nvidia/src/kernel/rmapi/mapping_cpu.c | 986 ++ src/nvidia/src/kernel/rmapi/param_copy.c | 348 + src/nvidia/src/kernel/rmapi/resource.c | 311 + src/nvidia/src/kernel/rmapi/resource_desc.c | 218 + src/nvidia/src/kernel/rmapi/resource_desc.h | 63 + .../src/kernel/rmapi/resource_desc_flags.h | 94 + src/nvidia/src/kernel/rmapi/resource_list.h | 488 + .../rmapi/resource_list_required_includes.h | 48 + src/nvidia/src/kernel/rmapi/rmapi.c | 1045 ++ src/nvidia/src/kernel/rmapi/rmapi_cache.c | 1784 +++ .../src/kernel/rmapi/rmapi_cache_handlers.c | 200 + src/nvidia/src/kernel/rmapi/rmapi_finn.c | 609 + src/nvidia/src/kernel/rmapi/rmapi_specific.c | 157 + src/nvidia/src/kernel/rmapi/rmapi_stubs.c | 181 + src/nvidia/src/kernel/rmapi/rmapi_utils.c | 243 + src/nvidia/src/kernel/rmapi/rpc_common.c | 123 + src/nvidia/src/kernel/rmapi/rs_utils.c | 382 + src/nvidia/src/kernel/rmapi/sharing.c | 430 + src/nvidia/src/lib/base_utils.c | 385 + src/nvidia/src/lib/zlib/inflate.c | 1157 ++ .../src/libraries/containers/btree/btree.c | 841 ++ .../libraries/containers/eheap/eheap_old.c | 1367 +++ src/nvidia/src/libraries/containers/list.c | 438 + src/nvidia/src/libraries/containers/map.c | 913 ++ .../src/libraries/containers/multimap.c | 394 + src/nvidia/src/libraries/containers/queue.c | 303 + src/nvidia/src/libraries/containers/ringbuf.c | 233 + src/nvidia/src/libraries/containers/vector.c | 478 + .../eventbuffer/eventbufferproducer.c | 304 + src/nvidia/src/libraries/ioaccess/ioaccess.c | 93 + .../libraries/mapping_reuse/mapping_reuse.c | 324 + .../src/libraries/nvbitvector/nvbitvector.c | 990 ++ src/nvidia/src/libraries/nvoc/src/runtime.c | 370 + src/nvidia/src/libraries/nvport/core/core.c | 98 + .../src/libraries/nvport/cpu/cpu_common.c | 61 + .../src/libraries/nvport/cpu/cpu_common.h | 54 + .../nvport/crypto/crypto_random_xorshift.c | 190 + .../libraries/nvport/memory/memory_generic.h | 195 + .../libraries/nvport/memory/memory_tracking.c | 1945 +++ .../nvport/memory/memory_unix_kernel_os.c | 206 + .../libraries/nvport/string/string_generic.c | 379 + .../nvport/sync/inc/sync_unix_kernel_os_def.h | 60 + .../src/libraries/nvport/sync/sync_common.h | 158 + .../nvport/sync/sync_unix_kernel_os.c | 339 + .../nvport/thread/thread_unix_kernel_os.c | 60 + .../nvport/util/util_compiler_switch.c | 38 + .../libraries/nvport/util/util_gcc_clang.c | 80 + .../nvport/util/util_unix_kernel_os.c | 44 + .../libraries/prereq_tracker/prereq_tracker.c | 349 + .../src/libraries/resserv/src/rs_access_map.c | 717 ++ .../libraries/resserv/src/rs_access_rights.c | 124 + .../src/libraries/resserv/src/rs_client.c | 1845 +++ .../src/libraries/resserv/src/rs_domain.c | 52 + .../src/libraries/resserv/src/rs_resource.c | 750 ++ .../src/libraries/resserv/src/rs_server.c | 5008 ++++++++ src/nvidia/src/libraries/tls/tls.c | 673 ++ src/nvidia/src/libraries/utils/nvassert.c | 341 + src/nvidia/srcs.mk | 220 + utils.mk | 618 + version.mk | 11 + 1519 files changed, 722732 insertions(+) create mode 100644 CODE_OF_CONDUCT.md create mode 100644 CONTRIBUTING.md create mode 100644 COPYING create mode 100644 Makefile create mode 100644 README.md create mode 100644 SECURITY.md create mode 100644 commitFile.txt create mode 100644 kernel-open/Kbuild create mode 100644 kernel-open/Makefile create mode 100644 kernel-open/common/inc/conftest.h create mode 100644 kernel-open/common/inc/cpuopsys.h create mode 100644 kernel-open/common/inc/dce_rm_client_ipc.h create mode 100644 kernel-open/common/inc/nv-caps.h create mode 100644 kernel-open/common/inc/nv-chardev-numbers.h create mode 100644 kernel-open/common/inc/nv-dmabuf.h create mode 100644 kernel-open/common/inc/nv-firmware-registry.h create mode 100644 kernel-open/common/inc/nv-firmware.h create mode 100644 kernel-open/common/inc/nv-gpu-info.h create mode 100644 kernel-open/common/inc/nv-hash.h create mode 100644 kernel-open/common/inc/nv-hypervisor.h create mode 100644 kernel-open/common/inc/nv-ioctl-numa.h create mode 100644 kernel-open/common/inc/nv-ioctl-numbers.h create mode 100644 kernel-open/common/inc/nv-ioctl.h create mode 100644 kernel-open/common/inc/nv-kernel-interface-api.h create mode 100644 kernel-open/common/inc/nv-kref.h create mode 100644 kernel-open/common/inc/nv-kthread-q-os.h create mode 100644 kernel-open/common/inc/nv-kthread-q.h create mode 100644 kernel-open/common/inc/nv-linux.h create mode 100644 kernel-open/common/inc/nv-list-helpers.h create mode 100644 kernel-open/common/inc/nv-lock.h create mode 100644 kernel-open/common/inc/nv-memdbg.h create mode 100644 kernel-open/common/inc/nv-mm.h create mode 100644 kernel-open/common/inc/nv-modeset-interface.h create mode 100644 kernel-open/common/inc/nv-msi.h create mode 100644 kernel-open/common/inc/nv-pci-types.h create mode 100644 kernel-open/common/inc/nv-pci.h create mode 100644 kernel-open/common/inc/nv-pgprot.h create mode 100644 kernel-open/common/inc/nv-platform.h create mode 100644 kernel-open/common/inc/nv-procfs-utils.h create mode 100644 kernel-open/common/inc/nv-procfs.h create mode 100644 kernel-open/common/inc/nv-proto.h create mode 100644 kernel-open/common/inc/nv-retpoline.h create mode 100644 kernel-open/common/inc/nv-time.h create mode 100644 kernel-open/common/inc/nv-timer.h create mode 100644 kernel-open/common/inc/nv.h create mode 100644 kernel-open/common/inc/nvCpuUuid.h create mode 100644 kernel-open/common/inc/nv_common_utils.h create mode 100644 kernel-open/common/inc/nv_dpy_id.h create mode 100644 kernel-open/common/inc/nv_mig_types.h create mode 100644 kernel-open/common/inc/nv_speculation_barrier.h create mode 100644 kernel-open/common/inc/nv_stdarg.h create mode 100644 kernel-open/common/inc/nv_uvm_interface.h create mode 100644 kernel-open/common/inc/nv_uvm_types.h create mode 100644 kernel-open/common/inc/nv_uvm_user_types.h create mode 100644 kernel-open/common/inc/nvgputypes.h create mode 100644 kernel-open/common/inc/nvi2c.h create mode 100644 kernel-open/common/inc/nvimpshared.h create mode 100644 kernel-open/common/inc/nvkms-api-types.h create mode 100644 kernel-open/common/inc/nvkms-format.h create mode 100644 kernel-open/common/inc/nvkms-kapi.h create mode 100644 kernel-open/common/inc/nvlimits.h create mode 100644 kernel-open/common/inc/nvmisc.h create mode 100644 kernel-open/common/inc/nvstatus.h create mode 100644 kernel-open/common/inc/nvstatuscodes.h create mode 100644 kernel-open/common/inc/nvtypes.h create mode 100644 kernel-open/common/inc/os-interface.h create mode 100644 kernel-open/common/inc/os/nv_memory_area.h create mode 100644 kernel-open/common/inc/os/nv_memory_type.h create mode 100644 kernel-open/common/inc/os_dsi_panel_props.h create mode 100644 kernel-open/common/inc/os_gpio.h create mode 100644 kernel-open/common/inc/rm-gpu-ops.h create mode 100644 kernel-open/common/inc/rs_access.h create mode 100755 kernel-open/conftest.sh create mode 100644 kernel-open/count-lines.mk create mode 100644 kernel-open/dkms.conf create mode 100644 kernel-open/header-presence-tests.mk create mode 100644 kernel-open/nvidia-drm/nv-kthread-q.c create mode 100644 kernel-open/nvidia-drm/nv-pci-table.c create mode 100644 kernel-open/nvidia-drm/nv-pci-table.h create mode 100644 kernel-open/nvidia-drm/nv_common_utils.h create mode 100644 kernel-open/nvidia-drm/nvidia-dma-resv-helper.h create mode 100644 kernel-open/nvidia-drm/nvidia-drm-conftest.h create mode 100644 kernel-open/nvidia-drm/nvidia-drm-connector.c create mode 100644 kernel-open/nvidia-drm/nvidia-drm-connector.h create mode 100644 kernel-open/nvidia-drm/nvidia-drm-crtc.c create mode 100644 kernel-open/nvidia-drm/nvidia-drm-crtc.h create mode 100644 kernel-open/nvidia-drm/nvidia-drm-drv.c create mode 100644 kernel-open/nvidia-drm/nvidia-drm-drv.h create mode 100644 kernel-open/nvidia-drm/nvidia-drm-encoder.c create mode 100644 kernel-open/nvidia-drm/nvidia-drm-encoder.h create mode 100644 kernel-open/nvidia-drm/nvidia-drm-fb.c create mode 100644 kernel-open/nvidia-drm/nvidia-drm-fb.h create mode 100644 kernel-open/nvidia-drm/nvidia-drm-fence.c create mode 100644 kernel-open/nvidia-drm/nvidia-drm-fence.h create mode 100644 kernel-open/nvidia-drm/nvidia-drm-format.c create mode 100644 kernel-open/nvidia-drm/nvidia-drm-format.h create mode 100644 kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c create mode 100644 kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h create mode 100644 kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c create mode 100644 kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h create mode 100644 kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c create mode 100644 kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h create mode 100644 kernel-open/nvidia-drm/nvidia-drm-gem.c create mode 100644 kernel-open/nvidia-drm/nvidia-drm-gem.h create mode 100644 kernel-open/nvidia-drm/nvidia-drm-helper.c create mode 100644 kernel-open/nvidia-drm/nvidia-drm-helper.h create mode 100644 kernel-open/nvidia-drm/nvidia-drm-ioctl.h create mode 100644 kernel-open/nvidia-drm/nvidia-drm-linux.c create mode 100644 kernel-open/nvidia-drm/nvidia-drm-modeset.c create mode 100644 kernel-open/nvidia-drm/nvidia-drm-modeset.h create mode 100644 kernel-open/nvidia-drm/nvidia-drm-os-interface.c create mode 100644 kernel-open/nvidia-drm/nvidia-drm-os-interface.h create mode 100644 kernel-open/nvidia-drm/nvidia-drm-priv.h create mode 100644 kernel-open/nvidia-drm/nvidia-drm-sources.mk create mode 100644 kernel-open/nvidia-drm/nvidia-drm-utils.c create mode 100644 kernel-open/nvidia-drm/nvidia-drm-utils.h create mode 100644 kernel-open/nvidia-drm/nvidia-drm.Kbuild create mode 100644 kernel-open/nvidia-drm/nvidia-drm.c create mode 100644 kernel-open/nvidia-drm/nvidia-drm.h create mode 100644 kernel-open/nvidia-modeset/nv-kthread-q.c create mode 100644 kernel-open/nvidia-modeset/nvidia-modeset-linux.c create mode 100644 kernel-open/nvidia-modeset/nvidia-modeset-os-interface.h create mode 100644 kernel-open/nvidia-modeset/nvidia-modeset.Kbuild create mode 100644 kernel-open/nvidia-modeset/nvkms-ioctl.h create mode 100644 kernel-open/nvidia-modeset/nvkms.h create mode 100644 kernel-open/nvidia/detect-self-hosted.h create mode 100644 kernel-open/nvidia/hal/library/cryptlib/cryptlib_aead.h create mode 100644 kernel-open/nvidia/hal/library/cryptlib/cryptlib_cert.h create mode 100644 kernel-open/nvidia/hal/library/cryptlib/cryptlib_dh.h create mode 100644 kernel-open/nvidia/hal/library/cryptlib/cryptlib_ec.h create mode 100644 kernel-open/nvidia/hal/library/cryptlib/cryptlib_ecd.h create mode 100644 kernel-open/nvidia/hal/library/cryptlib/cryptlib_hash.h create mode 100644 kernel-open/nvidia/hal/library/cryptlib/cryptlib_hkdf.h create mode 100644 kernel-open/nvidia/hal/library/cryptlib/cryptlib_mac.h create mode 100644 kernel-open/nvidia/hal/library/cryptlib/cryptlib_rng.h create mode 100644 kernel-open/nvidia/hal/library/cryptlib/cryptlib_rsa.h create mode 100644 kernel-open/nvidia/hal/library/cryptlib/cryptlib_sm2.h create mode 100644 kernel-open/nvidia/internal/libspdm_lib_config.h create mode 100644 kernel-open/nvidia/internal_crypt_lib.h create mode 100644 kernel-open/nvidia/library/cryptlib.h create mode 100644 kernel-open/nvidia/library/spdm_lib_config.h create mode 100644 kernel-open/nvidia/libspdm_aead.c create mode 100644 kernel-open/nvidia/libspdm_aead_aes_gcm.c create mode 100644 kernel-open/nvidia/libspdm_ec.c create mode 100644 kernel-open/nvidia/libspdm_ecc.c create mode 100644 kernel-open/nvidia/libspdm_hkdf.c create mode 100644 kernel-open/nvidia/libspdm_hkdf_sha.c create mode 100644 kernel-open/nvidia/libspdm_hmac_sha.c create mode 100644 kernel-open/nvidia/libspdm_internal_crypt_lib.c create mode 100644 kernel-open/nvidia/libspdm_rand.c create mode 100644 kernel-open/nvidia/libspdm_rsa.c create mode 100644 kernel-open/nvidia/libspdm_rsa_ext.c create mode 100644 kernel-open/nvidia/libspdm_sha.c create mode 100644 kernel-open/nvidia/libspdm_shash.c create mode 100644 kernel-open/nvidia/libspdm_x509.c create mode 100644 kernel-open/nvidia/nv-acpi.c create mode 100644 kernel-open/nvidia/nv-backlight.c create mode 100644 kernel-open/nvidia/nv-bpmp.c create mode 100644 kernel-open/nvidia/nv-caps-imex.c create mode 100644 kernel-open/nvidia/nv-caps-imex.h create mode 100644 kernel-open/nvidia/nv-caps.c create mode 100644 kernel-open/nvidia/nv-clk.c create mode 100644 kernel-open/nvidia/nv-cray.c create mode 100644 kernel-open/nvidia/nv-dma.c create mode 100644 kernel-open/nvidia/nv-dmabuf.c create mode 100644 kernel-open/nvidia/nv-dsi-parse-panel-props.c create mode 100644 kernel-open/nvidia/nv-gpio.c create mode 100644 kernel-open/nvidia/nv-host1x.c create mode 100644 kernel-open/nvidia/nv-i2c.c create mode 100644 kernel-open/nvidia/nv-imp.c create mode 100644 kernel-open/nvidia/nv-ipc-soc.c create mode 100644 kernel-open/nvidia/nv-kthread-q.c create mode 100644 kernel-open/nvidia/nv-memdbg.c create mode 100644 kernel-open/nvidia/nv-mmap.c create mode 100644 kernel-open/nvidia/nv-modeset-interface.c create mode 100644 kernel-open/nvidia/nv-msi.c create mode 100644 kernel-open/nvidia/nv-nano-timer.c create mode 100644 kernel-open/nvidia/nv-p2p.c create mode 100644 kernel-open/nvidia/nv-p2p.h create mode 100644 kernel-open/nvidia/nv-pat.c create mode 100644 kernel-open/nvidia/nv-pat.h create mode 100644 kernel-open/nvidia/nv-pci-table.c create mode 100644 kernel-open/nvidia/nv-pci-table.h create mode 100644 kernel-open/nvidia/nv-pci.c create mode 100644 kernel-open/nvidia/nv-platform-pm.c create mode 100644 kernel-open/nvidia/nv-platform.c create mode 100644 kernel-open/nvidia/nv-procfs.c create mode 100644 kernel-open/nvidia/nv-reg.h create mode 100644 kernel-open/nvidia/nv-report-err.c create mode 100644 kernel-open/nvidia/nv-report-err.h create mode 100644 kernel-open/nvidia/nv-rsync.c create mode 100644 kernel-open/nvidia/nv-rsync.h create mode 100644 kernel-open/nvidia/nv-tracepoint.h create mode 100644 kernel-open/nvidia/nv-usermap.c create mode 100644 kernel-open/nvidia/nv-vm.c create mode 100644 kernel-open/nvidia/nv-vtophys.c create mode 100644 kernel-open/nvidia/nv.c create mode 100644 kernel-open/nvidia/nv_gpu_ops.h create mode 100644 kernel-open/nvidia/nv_uvm_interface.c create mode 100644 kernel-open/nvidia/nvidia-sources.Kbuild create mode 100644 kernel-open/nvidia/nvidia.Kbuild create mode 100644 kernel-open/nvidia/nvspdm_cryptlib_extensions.h create mode 100644 kernel-open/nvidia/os-interface.c create mode 100644 kernel-open/nvidia/os-mlock.c create mode 100644 kernel-open/nvidia/os-pci.c create mode 100644 kernel-open/nvidia/os-registry.c create mode 100644 kernel-open/nvidia/os-usermap.c create mode 100644 kernel-open/nvidia/rmp2pdefines.h create mode 100755 nv-compiler.sh create mode 100644 push_info.txt create mode 100644 src/common/displayport/inc/dp_address.h create mode 100644 src/common/displayport/inc/dp_auxbus.h create mode 100644 src/common/displayport/inc/dp_auxdefs.h create mode 100644 src/common/displayport/inc/dp_auxretry.h create mode 100644 src/common/displayport/inc/dp_bitstream.h create mode 100644 src/common/displayport/inc/dp_buffer.h create mode 100644 src/common/displayport/inc/dp_configcaps.h create mode 100644 src/common/displayport/inc/dp_connector.h create mode 100644 src/common/displayport/inc/dp_connectorimpl.h create mode 100644 src/common/displayport/inc/dp_crc.h create mode 100644 src/common/displayport/inc/dp_deviceimpl.h create mode 100644 src/common/displayport/inc/dp_discovery.h create mode 100644 src/common/displayport/inc/dp_edid.h create mode 100644 src/common/displayport/inc/dp_evoadapter.h create mode 100644 src/common/displayport/inc/dp_groupimpl.h create mode 100644 src/common/displayport/inc/dp_guid.h create mode 100644 src/common/displayport/inc/dp_hostimp.h create mode 100644 src/common/displayport/inc/dp_internal.h create mode 100644 src/common/displayport/inc/dp_linkconfig.h create mode 100644 src/common/displayport/inc/dp_linkedlist.h create mode 100644 src/common/displayport/inc/dp_list.h create mode 100644 src/common/displayport/inc/dp_mainlink.h create mode 100644 src/common/displayport/inc/dp_merger.h create mode 100644 src/common/displayport/inc/dp_messagecodings.h create mode 100644 src/common/displayport/inc/dp_messageheader.h create mode 100644 src/common/displayport/inc/dp_messages.h create mode 100644 src/common/displayport/inc/dp_object.h create mode 100644 src/common/displayport/inc/dp_printf.h create mode 100644 src/common/displayport/inc/dp_qse.h create mode 100644 src/common/displayport/inc/dp_regkeydatabase.h create mode 100644 src/common/displayport/inc/dp_ringbuffer.h create mode 100644 src/common/displayport/inc/dp_splitter.h create mode 100644 src/common/displayport/inc/dp_timeout.h create mode 100644 src/common/displayport/inc/dp_timer.h create mode 100644 src/common/displayport/inc/dp_tracing.h create mode 100644 src/common/displayport/inc/dp_vrr.h create mode 100644 src/common/displayport/inc/dp_wardatabase.h create mode 100644 src/common/displayport/inc/dp_watermark.h create mode 100644 src/common/displayport/inc/dptestutil/dp_testmessage.h create mode 100644 src/common/displayport/src/dp_auxretry.cpp create mode 100644 src/common/displayport/src/dp_bitstream.cpp create mode 100644 src/common/displayport/src/dp_buffer.cpp create mode 100644 src/common/displayport/src/dp_configcaps.cpp create mode 100644 src/common/displayport/src/dp_connectorimpl.cpp create mode 100644 src/common/displayport/src/dp_crc.cpp create mode 100644 src/common/displayport/src/dp_deviceimpl.cpp create mode 100644 src/common/displayport/src/dp_discovery.cpp create mode 100644 src/common/displayport/src/dp_edid.cpp create mode 100644 src/common/displayport/src/dp_evoadapter.cpp create mode 100644 src/common/displayport/src/dp_groupimpl.cpp create mode 100644 src/common/displayport/src/dp_guid.cpp create mode 100644 src/common/displayport/src/dp_linkconfig.cpp create mode 100644 src/common/displayport/src/dp_list.cpp create mode 100644 src/common/displayport/src/dp_merger.cpp create mode 100644 src/common/displayport/src/dp_messagecodings.cpp create mode 100644 src/common/displayport/src/dp_messageheader.cpp create mode 100644 src/common/displayport/src/dp_messages.cpp create mode 100644 src/common/displayport/src/dp_mst_edid.cpp create mode 100644 src/common/displayport/src/dp_qse.cpp create mode 100644 src/common/displayport/src/dp_splitter.cpp create mode 100644 src/common/displayport/src/dp_sst_edid.cpp create mode 100644 src/common/displayport/src/dp_timer.cpp create mode 100644 src/common/displayport/src/dp_vrr.cpp create mode 100644 src/common/displayport/src/dp_wardatabase.cpp create mode 100644 src/common/displayport/src/dp_watermark.cpp create mode 100644 src/common/displayport/src/dptestutil/dp_testmessage.cpp create mode 100644 src/common/inc/displayport/displayport.h create mode 100644 src/common/inc/displayport/displayport2x.h create mode 100644 src/common/inc/displayport/dpcd.h create mode 100644 src/common/inc/displayport/dpcd14.h create mode 100644 src/common/inc/displayport/dpcd20.h create mode 100644 src/common/inc/gps.h create mode 100644 src/common/inc/hdmi_spec.h create mode 100644 src/common/inc/jt.h create mode 100644 src/common/inc/nvBinSegment.h create mode 100644 src/common/inc/nvBldVer.h create mode 100644 src/common/inc/nvCpuIntrinsics.h create mode 100644 src/common/inc/nvCpuUuid.h create mode 100644 src/common/inc/nvHdmiFrlCommon.h create mode 100644 src/common/inc/nvPNPVendorIds.h create mode 100644 src/common/inc/nvSemaphoreCommon.h create mode 100644 src/common/inc/nvSha1.h create mode 100644 src/common/inc/nvUnixVersion.h create mode 100644 src/common/inc/nvVer.h create mode 100644 src/common/inc/nv_list.h create mode 100644 src/common/inc/nv_mig_types.h create mode 100644 src/common/inc/nv_smg.h create mode 100644 src/common/inc/nv_speculation_barrier.h create mode 100644 src/common/inc/nvctassert.h create mode 100644 src/common/inc/nvlog_defs.h create mode 100644 src/common/inc/nvlog_inc.h create mode 100644 src/common/inc/nvlog_inc2.h create mode 100644 src/common/inc/nvop.h create mode 100644 src/common/inc/nvrmcontext.h create mode 100644 src/common/inc/pex.h create mode 100644 src/common/inc/rmosxfac.h create mode 100644 src/common/inc/swref/common_def_nvlink.h create mode 100644 src/common/inc/swref/published/disp/v02_04/dev_disp.h create mode 100644 src/common/inc/swref/published/disp/v03_00/dev_disp.h create mode 100644 src/common/inc/swref/published/disp/v04_01/dev_disp.h create mode 100644 src/common/inc/swref/published/disp/v04_02/dev_disp.h create mode 100644 src/common/inc/swref/published/disp/v05_01/dev_disp.h create mode 100644 src/common/inc/swref/published/disp/v05_02/dev_disp.h create mode 100644 src/common/inc/swref/published/nv_arch.h create mode 100644 src/common/inc/swref/published/nv_ref.h create mode 100644 src/common/inc/swref/published/t23x/t234/dev_fuse.h create mode 100644 src/common/inc/swref/published/turing/tu102/dev_mmu.h create mode 100644 src/common/inc/swref/published/turing/tu102/kind_macros.h create mode 100644 src/common/modeset/hdmipacket/nvhdmi_frlInterface.h create mode 100644 src/common/modeset/hdmipacket/nvhdmipkt.c create mode 100644 src/common/modeset/hdmipacket/nvhdmipkt.h create mode 100644 src/common/modeset/hdmipacket/nvhdmipkt_0073.c create mode 100644 src/common/modeset/hdmipacket/nvhdmipkt_9171.c create mode 100644 src/common/modeset/hdmipacket/nvhdmipkt_9271.c create mode 100644 src/common/modeset/hdmipacket/nvhdmipkt_9471.c create mode 100644 src/common/modeset/hdmipacket/nvhdmipkt_9571.c create mode 100644 src/common/modeset/hdmipacket/nvhdmipkt_C371.c create mode 100644 src/common/modeset/hdmipacket/nvhdmipkt_C671.c create mode 100644 src/common/modeset/hdmipacket/nvhdmipkt_C871.c create mode 100644 src/common/modeset/hdmipacket/nvhdmipkt_C971.c create mode 100644 src/common/modeset/hdmipacket/nvhdmipkt_CC71.c create mode 100644 src/common/modeset/hdmipacket/nvhdmipkt_class.h create mode 100644 src/common/modeset/hdmipacket/nvhdmipkt_common.h create mode 100644 src/common/modeset/hdmipacket/nvhdmipkt_internal.h create mode 100644 src/common/modeset/timing/displayid.h create mode 100644 src/common/modeset/timing/displayid20.h create mode 100644 src/common/modeset/timing/dpsdp.h create mode 100644 src/common/modeset/timing/edid.h create mode 100644 src/common/modeset/timing/nvt_cvt.c create mode 100644 src/common/modeset/timing/nvt_displayid20.c create mode 100644 src/common/modeset/timing/nvt_dmt.c create mode 100644 src/common/modeset/timing/nvt_dsc_pps.c create mode 100644 src/common/modeset/timing/nvt_dsc_pps.h create mode 100644 src/common/modeset/timing/nvt_edid.c create mode 100644 src/common/modeset/timing/nvt_edidext_861.c create mode 100644 src/common/modeset/timing/nvt_edidext_displayid.c create mode 100644 src/common/modeset/timing/nvt_edidext_displayid20.c create mode 100644 src/common/modeset/timing/nvt_gtf.c create mode 100644 src/common/modeset/timing/nvt_ovt.c create mode 100644 src/common/modeset/timing/nvt_tv.c create mode 100644 src/common/modeset/timing/nvt_util.c create mode 100644 src/common/modeset/timing/nvtiming.h create mode 100644 src/common/modeset/timing/nvtiming_pvt.h create mode 100644 src/common/sdk/nvidia/inc/alloc/alloc_channel.h create mode 100644 src/common/sdk/nvidia/inc/cc_drv.h create mode 100644 src/common/sdk/nvidia/inc/class/cl0000.h create mode 100644 src/common/sdk/nvidia/inc/class/cl0000_notification.h create mode 100644 src/common/sdk/nvidia/inc/class/cl0001.h create mode 100644 src/common/sdk/nvidia/inc/class/cl0002.h create mode 100644 src/common/sdk/nvidia/inc/class/cl0004.h create mode 100644 src/common/sdk/nvidia/inc/class/cl0005.h create mode 100644 src/common/sdk/nvidia/inc/class/cl0005_notification.h create mode 100644 src/common/sdk/nvidia/inc/class/cl0020.h create mode 100644 src/common/sdk/nvidia/inc/class/cl003e.h create mode 100644 src/common/sdk/nvidia/inc/class/cl0040.h create mode 100644 src/common/sdk/nvidia/inc/class/cl0041.h create mode 100644 src/common/sdk/nvidia/inc/class/cl0070.h create mode 100644 src/common/sdk/nvidia/inc/class/cl0071.h create mode 100644 src/common/sdk/nvidia/inc/class/cl0073.h create mode 100644 src/common/sdk/nvidia/inc/class/cl0076.h create mode 100644 src/common/sdk/nvidia/inc/class/cl0080.h create mode 100644 src/common/sdk/nvidia/inc/class/cl0080_notification.h create mode 100644 src/common/sdk/nvidia/inc/class/cl008f.h create mode 100644 src/common/sdk/nvidia/inc/class/cl0092.h create mode 100644 src/common/sdk/nvidia/inc/class/cl0092_callback.h create mode 100644 src/common/sdk/nvidia/inc/class/cl00b1.h create mode 100644 src/common/sdk/nvidia/inc/class/cl00c1.h create mode 100644 src/common/sdk/nvidia/inc/class/cl00c3.h create mode 100644 src/common/sdk/nvidia/inc/class/cl00da.h create mode 100644 src/common/sdk/nvidia/inc/class/cl00de.h create mode 100644 src/common/sdk/nvidia/inc/class/cl00f2.h create mode 100644 src/common/sdk/nvidia/inc/class/cl00fc.h create mode 100644 src/common/sdk/nvidia/inc/class/cl00fe.h create mode 100644 src/common/sdk/nvidia/inc/class/cl0100.h create mode 100644 src/common/sdk/nvidia/inc/class/cl0101.h create mode 100644 src/common/sdk/nvidia/inc/class/cl2080.h create mode 100644 src/common/sdk/nvidia/inc/class/cl2080_notification.h create mode 100644 src/common/sdk/nvidia/inc/class/cl2081.h create mode 100644 src/common/sdk/nvidia/inc/class/cl2082.h create mode 100644 src/common/sdk/nvidia/inc/class/cl30f1.h create mode 100644 src/common/sdk/nvidia/inc/class/cl30f1_notification.h create mode 100644 src/common/sdk/nvidia/inc/class/cl402c.h create mode 100644 src/common/sdk/nvidia/inc/class/cl5070.h create mode 100644 src/common/sdk/nvidia/inc/class/cl5070_notification.h create mode 100644 src/common/sdk/nvidia/inc/class/cl50a0.h create mode 100644 src/common/sdk/nvidia/inc/class/cl84a0.h create mode 100644 src/common/sdk/nvidia/inc/class/cl84a0_deprecated.h create mode 100644 src/common/sdk/nvidia/inc/class/cl900e.h create mode 100644 src/common/sdk/nvidia/inc/class/cl9010.h create mode 100644 src/common/sdk/nvidia/inc/class/cl9010_callback.h create mode 100644 src/common/sdk/nvidia/inc/class/cl902d.h create mode 100644 src/common/sdk/nvidia/inc/class/cl907dswspare.h create mode 100644 src/common/sdk/nvidia/inc/class/cl9097.h create mode 100644 src/common/sdk/nvidia/inc/class/cl90cd.h create mode 100644 src/common/sdk/nvidia/inc/class/cl90cdtrace.h create mode 100644 src/common/sdk/nvidia/inc/class/cl90e7.h create mode 100644 src/common/sdk/nvidia/inc/class/cl90ec.h create mode 100644 src/common/sdk/nvidia/inc/class/cl90f1.h create mode 100644 src/common/sdk/nvidia/inc/class/cl9170.h create mode 100644 src/common/sdk/nvidia/inc/class/cl9171.h create mode 100644 src/common/sdk/nvidia/inc/class/cl917a.h create mode 100644 src/common/sdk/nvidia/inc/class/cl917b.h create mode 100644 src/common/sdk/nvidia/inc/class/cl917c.h create mode 100644 src/common/sdk/nvidia/inc/class/cl917cswspare.h create mode 100644 src/common/sdk/nvidia/inc/class/cl917d.h create mode 100644 src/common/sdk/nvidia/inc/class/cl917dcrcnotif.h create mode 100644 src/common/sdk/nvidia/inc/class/cl917e.h create mode 100644 src/common/sdk/nvidia/inc/class/cl9270.h create mode 100644 src/common/sdk/nvidia/inc/class/cl9271.h create mode 100644 src/common/sdk/nvidia/inc/class/cl927c.h create mode 100644 src/common/sdk/nvidia/inc/class/cl927d.h create mode 100644 src/common/sdk/nvidia/inc/class/cl9470.h create mode 100644 src/common/sdk/nvidia/inc/class/cl9471.h create mode 100644 src/common/sdk/nvidia/inc/class/cl947d.h create mode 100644 src/common/sdk/nvidia/inc/class/cl9570.h create mode 100644 src/common/sdk/nvidia/inc/class/cl9571.h create mode 100644 src/common/sdk/nvidia/inc/class/cl957d.h create mode 100644 src/common/sdk/nvidia/inc/class/cl9770.h create mode 100644 src/common/sdk/nvidia/inc/class/cl977d.h create mode 100644 src/common/sdk/nvidia/inc/class/cl9870.h create mode 100644 src/common/sdk/nvidia/inc/class/cl987d.h create mode 100644 src/common/sdk/nvidia/inc/class/cla06f.h create mode 100644 src/common/sdk/nvidia/inc/class/cla06fsubch.h create mode 100644 src/common/sdk/nvidia/inc/class/cla097.h create mode 100644 src/common/sdk/nvidia/inc/class/cla0b5.h create mode 100644 src/common/sdk/nvidia/inc/class/cla16f.h create mode 100644 src/common/sdk/nvidia/inc/class/cla26f.h create mode 100644 src/common/sdk/nvidia/inc/class/clb06f.h create mode 100644 src/common/sdk/nvidia/inc/class/clb097.h create mode 100644 src/common/sdk/nvidia/inc/class/clb097tex.h create mode 100644 src/common/sdk/nvidia/inc/class/clb0b5sw.h create mode 100644 src/common/sdk/nvidia/inc/class/clb197.h create mode 100644 src/common/sdk/nvidia/inc/class/clc06f.h create mode 100644 src/common/sdk/nvidia/inc/class/clc097.h create mode 100644 src/common/sdk/nvidia/inc/class/clc097tex.h create mode 100644 src/common/sdk/nvidia/inc/class/clc197.h create mode 100644 src/common/sdk/nvidia/inc/class/clc361.h create mode 100644 src/common/sdk/nvidia/inc/class/clc36f.h create mode 100644 src/common/sdk/nvidia/inc/class/clc370.h create mode 100644 src/common/sdk/nvidia/inc/class/clc370_notification.h create mode 100644 src/common/sdk/nvidia/inc/class/clc371.h create mode 100644 src/common/sdk/nvidia/inc/class/clc372sw.h create mode 100644 src/common/sdk/nvidia/inc/class/clc373.h create mode 100644 src/common/sdk/nvidia/inc/class/clc37a.h create mode 100644 src/common/sdk/nvidia/inc/class/clc37b.h create mode 100644 src/common/sdk/nvidia/inc/class/clc37d.h create mode 100644 src/common/sdk/nvidia/inc/class/clc37dcrcnotif.h create mode 100644 src/common/sdk/nvidia/inc/class/clc37dswspare.h create mode 100644 src/common/sdk/nvidia/inc/class/clc37e.h create mode 100644 src/common/sdk/nvidia/inc/class/clc397.h create mode 100644 src/common/sdk/nvidia/inc/class/clc46f.h create mode 100644 src/common/sdk/nvidia/inc/class/clc56f.h create mode 100644 src/common/sdk/nvidia/inc/class/clc570.h create mode 100644 src/common/sdk/nvidia/inc/class/clc573.h create mode 100644 src/common/sdk/nvidia/inc/class/clc574.h create mode 100644 src/common/sdk/nvidia/inc/class/clc57a.h create mode 100644 src/common/sdk/nvidia/inc/class/clc57b.h create mode 100644 src/common/sdk/nvidia/inc/class/clc57d.h create mode 100644 src/common/sdk/nvidia/inc/class/clc57e.h create mode 100644 src/common/sdk/nvidia/inc/class/clc57esw.h create mode 100644 src/common/sdk/nvidia/inc/class/clc597.h create mode 100644 src/common/sdk/nvidia/inc/class/clc637.h create mode 100644 src/common/sdk/nvidia/inc/class/clc638.h create mode 100644 src/common/sdk/nvidia/inc/class/clc661.h create mode 100644 src/common/sdk/nvidia/inc/class/clc670.h create mode 100644 src/common/sdk/nvidia/inc/class/clc671.h create mode 100644 src/common/sdk/nvidia/inc/class/clc673.h create mode 100644 src/common/sdk/nvidia/inc/class/clc67a.h create mode 100644 src/common/sdk/nvidia/inc/class/clc67b.h create mode 100644 src/common/sdk/nvidia/inc/class/clc67d.h create mode 100644 src/common/sdk/nvidia/inc/class/clc67e.h create mode 100644 src/common/sdk/nvidia/inc/class/clc697.h create mode 100644 src/common/sdk/nvidia/inc/class/clc770.h create mode 100644 src/common/sdk/nvidia/inc/class/clc771.h create mode 100644 src/common/sdk/nvidia/inc/class/clc77d.h create mode 100644 src/common/sdk/nvidia/inc/class/clc77f.h create mode 100644 src/common/sdk/nvidia/inc/class/clc797.h create mode 100644 src/common/sdk/nvidia/inc/class/clc7b5.h create mode 100644 src/common/sdk/nvidia/inc/class/clc86f.h create mode 100644 src/common/sdk/nvidia/inc/class/clc870.h create mode 100644 src/common/sdk/nvidia/inc/class/clc871.h create mode 100644 src/common/sdk/nvidia/inc/class/clc873.h create mode 100644 src/common/sdk/nvidia/inc/class/clc87d.h create mode 100644 src/common/sdk/nvidia/inc/class/clc87e.h create mode 100644 src/common/sdk/nvidia/inc/class/clc970.h create mode 100644 src/common/sdk/nvidia/inc/class/clc971.h create mode 100644 src/common/sdk/nvidia/inc/class/clc973.h create mode 100644 src/common/sdk/nvidia/inc/class/clc97a.h create mode 100644 src/common/sdk/nvidia/inc/class/clc97b.h create mode 100644 src/common/sdk/nvidia/inc/class/clc97d.h create mode 100644 src/common/sdk/nvidia/inc/class/clc97dswspare.h create mode 100644 src/common/sdk/nvidia/inc/class/clc97e.h create mode 100644 src/common/sdk/nvidia/inc/class/clc997.h create mode 100644 src/common/sdk/nvidia/inc/class/clca70.h create mode 100644 src/common/sdk/nvidia/inc/class/clcb97.h create mode 100644 src/common/sdk/nvidia/inc/class/clcb97tex.h create mode 100644 src/common/sdk/nvidia/inc/class/clcc70.h create mode 100644 src/common/sdk/nvidia/inc/class/clcc71.h create mode 100644 src/common/sdk/nvidia/inc/class/clcc73.h create mode 100644 src/common/sdk/nvidia/inc/class/clcc7a.h create mode 100644 src/common/sdk/nvidia/inc/class/clcc7b.h create mode 100644 src/common/sdk/nvidia/inc/class/clcc7d.h create mode 100644 src/common/sdk/nvidia/inc/class/clcc7e.h create mode 100644 src/common/sdk/nvidia/inc/cpuopsys.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000base.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000event.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpuacct.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0002.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0004.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0020.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl003e.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0041.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0073.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073base.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073event.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073internal.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0076.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0080.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080internal.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl00da.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl00de.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl00fe.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl0100.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080base.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobj.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clkavfs.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink_common.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmu.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080power.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spdm.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080thermal.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vgpumgrinternal.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fbase.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fgpu.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fucodecoverage.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl402c.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070base.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070common.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070impoverrides.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070system.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83debase.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83dedebug.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl906f.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl90e7.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl90e7/ctrl90e7base.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl90e7/ctrl90e7bbx.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrl90f1.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrla06f.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fbase.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06finternal.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrla081.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrlb06f.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrlc36f.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370base.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370event.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370or.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372base.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrlc637.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrlc638.h create mode 100644 src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h create mode 100644 src/common/sdk/nvidia/inc/dpringbuffertypes.h create mode 100644 src/common/sdk/nvidia/inc/g_finn_rm_api.h create mode 100644 src/common/sdk/nvidia/inc/mmu_fmt_types.h create mode 100644 src/common/sdk/nvidia/inc/nv-hypervisor.h create mode 100644 src/common/sdk/nvidia/inc/nv-kernel-interface-api.h create mode 100644 src/common/sdk/nvidia/inc/nv_stdarg.h create mode 100644 src/common/sdk/nvidia/inc/nv_vgpu_types.h create mode 100644 src/common/sdk/nvidia/inc/nvcd.h create mode 100644 src/common/sdk/nvidia/inc/nvcfg_sdk.h create mode 100644 src/common/sdk/nvidia/inc/nvdisptypes.h create mode 100644 src/common/sdk/nvidia/inc/nverror.h create mode 100644 src/common/sdk/nvidia/inc/nvfixedtypes.h create mode 100644 src/common/sdk/nvidia/inc/nvgputypes.h create mode 100644 src/common/sdk/nvidia/inc/nvi2c.h create mode 100644 src/common/sdk/nvidia/inc/nvimpshared.h create mode 100644 src/common/sdk/nvidia/inc/nvlimits.h create mode 100644 src/common/sdk/nvidia/inc/nvmisc.h create mode 100644 src/common/sdk/nvidia/inc/nvos.h create mode 100644 src/common/sdk/nvidia/inc/nvsecurityinfo.h create mode 100644 src/common/sdk/nvidia/inc/nvstatus.h create mode 100644 src/common/sdk/nvidia/inc/nvstatuscodes.h create mode 100644 src/common/sdk/nvidia/inc/nvtypes.h create mode 100644 src/common/sdk/nvidia/inc/rs_access.h create mode 100644 src/common/shared/inc/compat.h create mode 100644 src/common/shared/inc/nvdevid.h create mode 100644 src/common/shared/nvstatus/nvstatus.c create mode 100644 src/common/softfloat/COPYING.txt create mode 100644 src/common/softfloat/nvidia/nv-softfloat.h create mode 100644 src/common/softfloat/nvidia/platform.h create mode 100644 src/common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c create mode 100644 src/common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c create mode 100644 src/common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c create mode 100644 src/common/softfloat/source/8086-SSE/s_f16UIToCommonNaN.c create mode 100644 src/common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c create mode 100644 src/common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c create mode 100644 src/common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c create mode 100644 src/common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c create mode 100644 src/common/softfloat/source/8086-SSE/softfloat_raiseFlags.c create mode 100644 src/common/softfloat/source/8086-SSE/specialize.h create mode 100644 src/common/softfloat/source/f16_to_f32.c create mode 100644 src/common/softfloat/source/f32_add.c create mode 100644 src/common/softfloat/source/f32_div.c create mode 100644 src/common/softfloat/source/f32_eq.c create mode 100644 src/common/softfloat/source/f32_eq_signaling.c create mode 100644 src/common/softfloat/source/f32_isSignalingNaN.c create mode 100644 src/common/softfloat/source/f32_le.c create mode 100644 src/common/softfloat/source/f32_le_quiet.c create mode 100644 src/common/softfloat/source/f32_lt.c create mode 100644 src/common/softfloat/source/f32_lt_quiet.c create mode 100644 src/common/softfloat/source/f32_mul.c create mode 100644 src/common/softfloat/source/f32_mulAdd.c create mode 100644 src/common/softfloat/source/f32_rem.c create mode 100644 src/common/softfloat/source/f32_roundToInt.c create mode 100644 src/common/softfloat/source/f32_sqrt.c create mode 100644 src/common/softfloat/source/f32_sub.c create mode 100644 src/common/softfloat/source/f32_to_f16.c create mode 100644 src/common/softfloat/source/f32_to_f64.c create mode 100644 src/common/softfloat/source/f32_to_i32.c create mode 100644 src/common/softfloat/source/f32_to_i32_r_minMag.c create mode 100644 src/common/softfloat/source/f32_to_i64.c create mode 100644 src/common/softfloat/source/f32_to_i64_r_minMag.c create mode 100644 src/common/softfloat/source/f32_to_ui32.c create mode 100644 src/common/softfloat/source/f32_to_ui32_r_minMag.c create mode 100644 src/common/softfloat/source/f32_to_ui64.c create mode 100644 src/common/softfloat/source/f32_to_ui64_r_minMag.c create mode 100644 src/common/softfloat/source/f64_add.c create mode 100644 src/common/softfloat/source/f64_div.c create mode 100644 src/common/softfloat/source/f64_eq.c create mode 100644 src/common/softfloat/source/f64_eq_signaling.c create mode 100644 src/common/softfloat/source/f64_isSignalingNaN.c create mode 100644 src/common/softfloat/source/f64_le.c create mode 100644 src/common/softfloat/source/f64_le_quiet.c create mode 100644 src/common/softfloat/source/f64_lt.c create mode 100644 src/common/softfloat/source/f64_lt_quiet.c create mode 100644 src/common/softfloat/source/f64_mul.c create mode 100644 src/common/softfloat/source/f64_mulAdd.c create mode 100644 src/common/softfloat/source/f64_rem.c create mode 100644 src/common/softfloat/source/f64_roundToInt.c create mode 100644 src/common/softfloat/source/f64_sqrt.c create mode 100644 src/common/softfloat/source/f64_sub.c create mode 100644 src/common/softfloat/source/f64_to_f32.c create mode 100644 src/common/softfloat/source/f64_to_i32.c create mode 100644 src/common/softfloat/source/f64_to_i32_r_minMag.c create mode 100644 src/common/softfloat/source/f64_to_i64.c create mode 100644 src/common/softfloat/source/f64_to_i64_r_minMag.c create mode 100644 src/common/softfloat/source/f64_to_ui32.c create mode 100644 src/common/softfloat/source/f64_to_ui32_r_minMag.c create mode 100644 src/common/softfloat/source/f64_to_ui64.c create mode 100644 src/common/softfloat/source/f64_to_ui64_r_minMag.c create mode 100644 src/common/softfloat/source/i32_to_f32.c create mode 100644 src/common/softfloat/source/i32_to_f64.c create mode 100644 src/common/softfloat/source/i64_to_f32.c create mode 100644 src/common/softfloat/source/i64_to_f64.c create mode 100644 src/common/softfloat/source/include/internals.h create mode 100644 src/common/softfloat/source/include/primitiveTypes.h create mode 100644 src/common/softfloat/source/include/primitives.h create mode 100644 src/common/softfloat/source/include/softfloat.h create mode 100644 src/common/softfloat/source/include/softfloat_types.h create mode 100644 src/common/softfloat/source/s_addMagsF32.c create mode 100644 src/common/softfloat/source/s_addMagsF64.c create mode 100644 src/common/softfloat/source/s_approxRecipSqrt32_1.c create mode 100644 src/common/softfloat/source/s_approxRecipSqrt_1Ks.c create mode 100644 src/common/softfloat/source/s_countLeadingZeros64.c create mode 100644 src/common/softfloat/source/s_countLeadingZeros8.c create mode 100644 src/common/softfloat/source/s_mul64To128.c create mode 100644 src/common/softfloat/source/s_mulAddF32.c create mode 100644 src/common/softfloat/source/s_mulAddF64.c create mode 100644 src/common/softfloat/source/s_normRoundPackToF32.c create mode 100644 src/common/softfloat/source/s_normRoundPackToF64.c create mode 100644 src/common/softfloat/source/s_normSubnormalF16Sig.c create mode 100644 src/common/softfloat/source/s_normSubnormalF32Sig.c create mode 100644 src/common/softfloat/source/s_normSubnormalF64Sig.c create mode 100644 src/common/softfloat/source/s_roundPackToF16.c create mode 100644 src/common/softfloat/source/s_roundPackToF32.c create mode 100644 src/common/softfloat/source/s_roundPackToF64.c create mode 100644 src/common/softfloat/source/s_roundToI32.c create mode 100644 src/common/softfloat/source/s_roundToI64.c create mode 100644 src/common/softfloat/source/s_roundToUI32.c create mode 100644 src/common/softfloat/source/s_roundToUI64.c create mode 100644 src/common/softfloat/source/s_shiftRightJam128.c create mode 100644 src/common/softfloat/source/s_subMagsF32.c create mode 100644 src/common/softfloat/source/s_subMagsF64.c create mode 100644 src/common/softfloat/source/softfloat_state.c create mode 100644 src/common/softfloat/source/ui32_to_f32.c create mode 100644 src/common/softfloat/source/ui32_to_f64.c create mode 100644 src/common/softfloat/source/ui64_to_f32.c create mode 100644 src/common/softfloat/source/ui64_to_f64.c create mode 100644 src/common/src/nv_smg.c create mode 100644 src/common/unix/common/inc/nv-float.h create mode 100644 src/common/unix/common/inc/nv_amodel_enum.h create mode 100644 src/common/unix/common/inc/nv_assert.h create mode 100644 src/common/unix/common/inc/nv_common_utils.h create mode 100644 src/common/unix/common/inc/nv_dpy_id.h create mode 100644 src/common/unix/common/inc/nv_mode_timings.h create mode 100644 src/common/unix/common/utils/interface/nv_memory_tracker.h create mode 100644 src/common/unix/common/utils/interface/nv_mode_timings_utils.h create mode 100644 src/common/unix/common/utils/interface/nv_vasprintf.h create mode 100644 src/common/unix/common/utils/interface/unix_rm_handle.h create mode 100644 src/common/unix/common/utils/nv_memory_tracker.c create mode 100644 src/common/unix/common/utils/nv_mode_timings_utils.c create mode 100644 src/common/unix/common/utils/nv_vasprintf.c create mode 100644 src/common/unix/common/utils/unix_rm_handle.c create mode 100644 src/common/unix/nvidia-3d/include/nv_xz_mem_hooks.h create mode 100644 src/common/unix/nvidia-3d/include/nvidia-3d-fermi.h create mode 100644 src/common/unix/nvidia-3d/include/nvidia-3d-hopper.h create mode 100644 src/common/unix/nvidia-3d/include/nvidia-3d-kepler.h create mode 100644 src/common/unix/nvidia-3d/include/nvidia-3d-maxwell.h create mode 100644 src/common/unix/nvidia-3d/include/nvidia-3d-pascal.h create mode 100644 src/common/unix/nvidia-3d/include/nvidia-3d-surface.h create mode 100644 src/common/unix/nvidia-3d/include/nvidia-3d-turing.h create mode 100644 src/common/unix/nvidia-3d/include/nvidia-3d-types-priv.h create mode 100644 src/common/unix/nvidia-3d/include/nvidia-3d-vertex-arrays.h create mode 100644 src/common/unix/nvidia-3d/include/nvidia-3d-volta.h create mode 100644 src/common/unix/nvidia-3d/interface/nvidia-3d-color-targets.h create mode 100644 src/common/unix/nvidia-3d/interface/nvidia-3d-constant-buffers.h create mode 100644 src/common/unix/nvidia-3d/interface/nvidia-3d-imports.h create mode 100644 src/common/unix/nvidia-3d/interface/nvidia-3d-shader-constants.h create mode 100644 src/common/unix/nvidia-3d/interface/nvidia-3d-shaders.h create mode 100644 src/common/unix/nvidia-3d/interface/nvidia-3d-types.h create mode 100644 src/common/unix/nvidia-3d/interface/nvidia-3d-utils.h create mode 100644 src/common/unix/nvidia-3d/interface/nvidia-3d.h create mode 100644 src/common/unix/nvidia-3d/src/nvidia-3d-core.c create mode 100644 src/common/unix/nvidia-3d/src/nvidia-3d-fermi.c create mode 100644 src/common/unix/nvidia-3d/src/nvidia-3d-hopper.c create mode 100644 src/common/unix/nvidia-3d/src/nvidia-3d-init.c create mode 100644 src/common/unix/nvidia-3d/src/nvidia-3d-kepler.c create mode 100644 src/common/unix/nvidia-3d/src/nvidia-3d-maxwell.c create mode 100644 src/common/unix/nvidia-3d/src/nvidia-3d-pascal.c create mode 100644 src/common/unix/nvidia-3d/src/nvidia-3d-surface.c create mode 100644 src/common/unix/nvidia-3d/src/nvidia-3d-turing.c create mode 100644 src/common/unix/nvidia-3d/src/nvidia-3d-vertex-arrays.c create mode 100644 src/common/unix/nvidia-3d/src/nvidia-3d-volta.c create mode 100644 src/common/unix/nvidia-headsurface/nvidia-headsurface-constants.h create mode 100644 src/common/unix/nvidia-headsurface/nvidia-headsurface-types.h create mode 100644 src/common/unix/nvidia-push/include/nvidia-push-priv-imports.h create mode 100644 src/common/unix/nvidia-push/include/nvidia-push-priv.h create mode 100644 src/common/unix/nvidia-push/interface/nvidia-push-init.h create mode 100644 src/common/unix/nvidia-push/interface/nvidia-push-methods.h create mode 100644 src/common/unix/nvidia-push/interface/nvidia-push-types.h create mode 100644 src/common/unix/nvidia-push/interface/nvidia-push-utils.h create mode 100644 src/common/unix/nvidia-push/src/nvidia-push-init.c create mode 100644 src/common/unix/nvidia-push/src/nvidia-push.c create mode 100644 src/common/unix/xzminidec/interface/xz.h create mode 100644 src/common/unix/xzminidec/src/xz_config.h create mode 100644 src/common/unix/xzminidec/src/xz_crc32.c create mode 100644 src/common/unix/xzminidec/src/xz_dec_bcj.c create mode 100644 src/common/unix/xzminidec/src/xz_dec_lzma2.c create mode 100644 src/common/unix/xzminidec/src/xz_dec_stream.c create mode 100644 src/common/unix/xzminidec/src/xz_lzma2.h create mode 100644 src/common/unix/xzminidec/src/xz_private.h create mode 100644 src/common/unix/xzminidec/src/xz_stream.h create mode 100644 src/nvidia-modeset/Makefile create mode 100644 src/nvidia-modeset/include/dp/nvdp-connector-event-sink.h create mode 100644 src/nvidia-modeset/include/dp/nvdp-connector.h create mode 100644 src/nvidia-modeset/include/dp/nvdp-device.h create mode 100644 src/nvidia-modeset/include/dp/nvdp-timer.h create mode 100644 src/nvidia-modeset/include/g_nvkms-evo-states.h create mode 100644 src/nvidia-modeset/include/nvkms-3dvision.h create mode 100644 src/nvidia-modeset/include/nvkms-attributes.h create mode 100644 src/nvidia-modeset/include/nvkms-console-restore.h create mode 100644 src/nvidia-modeset/include/nvkms-ctxdma.h create mode 100644 src/nvidia-modeset/include/nvkms-cursor.h create mode 100644 src/nvidia-modeset/include/nvkms-difr.h create mode 100644 src/nvidia-modeset/include/nvkms-dma.h create mode 100644 src/nvidia-modeset/include/nvkms-dpy-override.h create mode 100644 src/nvidia-modeset/include/nvkms-dpy.h create mode 100644 src/nvidia-modeset/include/nvkms-event.h create mode 100644 src/nvidia-modeset/include/nvkms-evo-states.h create mode 100644 src/nvidia-modeset/include/nvkms-evo.h create mode 100644 src/nvidia-modeset/include/nvkms-evo1.h create mode 100644 src/nvidia-modeset/include/nvkms-evo3.h create mode 100644 src/nvidia-modeset/include/nvkms-flip-workarea.h create mode 100644 src/nvidia-modeset/include/nvkms-flip.h create mode 100644 src/nvidia-modeset/include/nvkms-framelock.h create mode 100644 src/nvidia-modeset/include/nvkms-hal.h create mode 100644 src/nvidia-modeset/include/nvkms-hdmi.h create mode 100644 src/nvidia-modeset/include/nvkms-headsurface-3d.h create mode 100644 src/nvidia-modeset/include/nvkms-headsurface-config.h create mode 100644 src/nvidia-modeset/include/nvkms-headsurface-ioctl.h create mode 100644 src/nvidia-modeset/include/nvkms-headsurface-matrix.h create mode 100644 src/nvidia-modeset/include/nvkms-headsurface-priv.h create mode 100644 src/nvidia-modeset/include/nvkms-headsurface-swapgroup.h create mode 100644 src/nvidia-modeset/include/nvkms-headsurface.h create mode 100644 src/nvidia-modeset/include/nvkms-hw-flip.h create mode 100644 src/nvidia-modeset/include/nvkms-lut.h create mode 100644 src/nvidia-modeset/include/nvkms-modepool.h create mode 100644 src/nvidia-modeset/include/nvkms-modeset-types.h create mode 100644 src/nvidia-modeset/include/nvkms-modeset-workarea.h create mode 100644 src/nvidia-modeset/include/nvkms-modeset.h create mode 100644 src/nvidia-modeset/include/nvkms-prealloc-types.h create mode 100644 src/nvidia-modeset/include/nvkms-prealloc.h create mode 100644 src/nvidia-modeset/include/nvkms-private.h create mode 100644 src/nvidia-modeset/include/nvkms-push.h create mode 100644 src/nvidia-modeset/include/nvkms-rm.h create mode 100644 src/nvidia-modeset/include/nvkms-rmapi.h create mode 100644 src/nvidia-modeset/include/nvkms-setlut-workarea.h create mode 100644 src/nvidia-modeset/include/nvkms-softfloat.h create mode 100644 src/nvidia-modeset/include/nvkms-stereo.h create mode 100644 src/nvidia-modeset/include/nvkms-surface.h create mode 100644 src/nvidia-modeset/include/nvkms-types.h create mode 100644 src/nvidia-modeset/include/nvkms-utils-flip.h create mode 100644 src/nvidia-modeset/include/nvkms-utils.h create mode 100644 src/nvidia-modeset/include/nvkms-vblank-sem-control.h create mode 100644 src/nvidia-modeset/include/nvkms-vrr.h create mode 100644 src/nvidia-modeset/interface/nvkms-api-types.h create mode 100644 src/nvidia-modeset/interface/nvkms-api.h create mode 100644 src/nvidia-modeset/interface/nvkms-format.h create mode 100644 src/nvidia-modeset/interface/nvkms-ioctl.h create mode 100644 src/nvidia-modeset/interface/nvkms-modetimings.h create mode 100644 src/nvidia-modeset/interface/nvkms-sync.h create mode 100644 src/nvidia-modeset/kapi/include/nvkms-kapi-internal.h create mode 100644 src/nvidia-modeset/kapi/include/nvkms-kapi-notifiers.h create mode 100644 src/nvidia-modeset/kapi/interface/nvkms-kapi-private.h create mode 100644 src/nvidia-modeset/kapi/interface/nvkms-kapi.h create mode 100644 src/nvidia-modeset/kapi/src/nvkms-kapi-notifiers.c create mode 100644 src/nvidia-modeset/kapi/src/nvkms-kapi-sync.c create mode 100644 src/nvidia-modeset/kapi/src/nvkms-kapi.c create mode 100644 src/nvidia-modeset/lib/nvkms-format.c create mode 100644 src/nvidia-modeset/lib/nvkms-sync.c create mode 100644 src/nvidia-modeset/os-interface/include/nvidia-modeset-os-interface.h create mode 100644 src/nvidia-modeset/os-interface/include/nvkms.h create mode 100644 src/nvidia-modeset/src/dp/nvdp-connector-event-sink.cpp create mode 100644 src/nvidia-modeset/src/dp/nvdp-connector-event-sink.hpp create mode 100644 src/nvidia-modeset/src/dp/nvdp-connector.cpp create mode 100644 src/nvidia-modeset/src/dp/nvdp-device.cpp create mode 100644 src/nvidia-modeset/src/dp/nvdp-evo-interface.cpp create mode 100644 src/nvidia-modeset/src/dp/nvdp-evo-interface.hpp create mode 100644 src/nvidia-modeset/src/dp/nvdp-host.cpp create mode 100644 src/nvidia-modeset/src/dp/nvdp-timer.cpp create mode 100644 src/nvidia-modeset/src/dp/nvdp-timer.hpp create mode 100644 src/nvidia-modeset/src/g_nvkms-evo-states.c create mode 100644 src/nvidia-modeset/src/nvkms-3dvision.c create mode 100644 src/nvidia-modeset/src/nvkms-attributes.c create mode 100644 src/nvidia-modeset/src/nvkms-conf.c create mode 100644 src/nvidia-modeset/src/nvkms-console-restore.c create mode 100644 src/nvidia-modeset/src/nvkms-ctxdma.c create mode 100644 src/nvidia-modeset/src/nvkms-cursor.c create mode 100644 src/nvidia-modeset/src/nvkms-cursor2.c create mode 100644 src/nvidia-modeset/src/nvkms-cursor3.c create mode 100644 src/nvidia-modeset/src/nvkms-difr.c create mode 100644 src/nvidia-modeset/src/nvkms-dma.c create mode 100644 src/nvidia-modeset/src/nvkms-dpy-override.c create mode 100644 src/nvidia-modeset/src/nvkms-dpy.c create mode 100644 src/nvidia-modeset/src/nvkms-event.c create mode 100644 src/nvidia-modeset/src/nvkms-evo.c create mode 100644 src/nvidia-modeset/src/nvkms-evo1.c create mode 100644 src/nvidia-modeset/src/nvkms-evo2.c create mode 100644 src/nvidia-modeset/src/nvkms-evo3.c create mode 100644 src/nvidia-modeset/src/nvkms-evo4.c create mode 100644 src/nvidia-modeset/src/nvkms-flip.c create mode 100644 src/nvidia-modeset/src/nvkms-framelock.c create mode 100644 src/nvidia-modeset/src/nvkms-hal.c create mode 100644 src/nvidia-modeset/src/nvkms-hdmi.c create mode 100644 src/nvidia-modeset/src/nvkms-headsurface-3d.c create mode 100644 src/nvidia-modeset/src/nvkms-headsurface-config.c create mode 100644 src/nvidia-modeset/src/nvkms-headsurface-ioctl.c create mode 100644 src/nvidia-modeset/src/nvkms-headsurface-matrix.c create mode 100644 src/nvidia-modeset/src/nvkms-headsurface-swapgroup.c create mode 100644 src/nvidia-modeset/src/nvkms-headsurface.c create mode 100644 src/nvidia-modeset/src/nvkms-hw-flip.c create mode 100644 src/nvidia-modeset/src/nvkms-hw-states.c create mode 100644 src/nvidia-modeset/src/nvkms-lut.c create mode 100644 src/nvidia-modeset/src/nvkms-modepool.c create mode 100644 src/nvidia-modeset/src/nvkms-modeset.c create mode 100644 src/nvidia-modeset/src/nvkms-pow.c create mode 100644 src/nvidia-modeset/src/nvkms-prealloc.c create mode 100644 src/nvidia-modeset/src/nvkms-push.c create mode 100644 src/nvidia-modeset/src/nvkms-rm.c create mode 100644 src/nvidia-modeset/src/nvkms-rmapi-dgpu.c create mode 100644 src/nvidia-modeset/src/nvkms-stereo.c create mode 100644 src/nvidia-modeset/src/nvkms-surface.c create mode 100644 src/nvidia-modeset/src/nvkms-utils-flip.c create mode 100644 src/nvidia-modeset/src/nvkms-utils.c create mode 100644 src/nvidia-modeset/src/nvkms-vblank-sem-control.c create mode 100644 src/nvidia-modeset/src/nvkms-vrr.c create mode 100644 src/nvidia-modeset/src/nvkms.c create mode 100644 src/nvidia-modeset/src/shaders/g_ampere_shader_info.h create mode 100644 src/nvidia-modeset/src/shaders/g_ampere_shaders create mode 100644 src/nvidia-modeset/src/shaders/g_hopper_shader_info.h create mode 100644 src/nvidia-modeset/src/shaders/g_hopper_shaders create mode 100644 src/nvidia-modeset/src/shaders/g_maxwell_shader_info.h create mode 100644 src/nvidia-modeset/src/shaders/g_maxwell_shaders create mode 100644 src/nvidia-modeset/src/shaders/g_nvidia-headsurface-shader-info.h create mode 100644 src/nvidia-modeset/src/shaders/g_pascal_shader_info.h create mode 100644 src/nvidia-modeset/src/shaders/g_pascal_shaders create mode 100644 src/nvidia-modeset/src/shaders/g_shader_names.h create mode 100644 src/nvidia-modeset/src/shaders/g_turing_shader_info.h create mode 100644 src/nvidia-modeset/src/shaders/g_turing_shaders create mode 100644 src/nvidia-modeset/src/shaders/g_volta_shader_info.h create mode 100644 src/nvidia-modeset/src/shaders/g_volta_shaders create mode 100644 src/nvidia-modeset/srcs.mk create mode 100644 src/nvidia/Makefile create mode 100644 src/nvidia/arch/nvalloc/common/inc/nv-firmware.h create mode 100644 src/nvidia/arch/nvalloc/common/inc/nvrangetypes.h create mode 100644 src/nvidia/arch/nvalloc/unix/include/nv-caps.h create mode 100644 src/nvidia/arch/nvalloc/unix/include/nv-chardev-numbers.h create mode 100644 src/nvidia/arch/nvalloc/unix/include/nv-gpu-info.h create mode 100644 src/nvidia/arch/nvalloc/unix/include/nv-ioctl-lockless-diag.h create mode 100644 src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numa.h create mode 100644 src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numbers.h create mode 100644 src/nvidia/arch/nvalloc/unix/include/nv-ioctl.h create mode 100644 src/nvidia/arch/nvalloc/unix/include/nv-kernel-rmapi-ops.h create mode 100644 src/nvidia/arch/nvalloc/unix/include/nv-nb-regs.h create mode 100644 src/nvidia/arch/nvalloc/unix/include/nv-priv.h create mode 100644 src/nvidia/arch/nvalloc/unix/include/nv-reg.h create mode 100644 src/nvidia/arch/nvalloc/unix/include/nv-unix-nvos-params-wrappers.h create mode 100644 src/nvidia/arch/nvalloc/unix/include/nv.h create mode 100644 src/nvidia/arch/nvalloc/unix/include/nv_escape.h create mode 100644 src/nvidia/arch/nvalloc/unix/include/os-interface.h create mode 100644 src/nvidia/arch/nvalloc/unix/include/os_custom.h create mode 100644 src/nvidia/arch/nvalloc/unix/include/osapi.h create mode 100644 src/nvidia/arch/nvalloc/unix/include/rmobjexportimport.h create mode 100644 src/nvidia/arch/nvalloc/unix/src/escape.c create mode 100644 src/nvidia/arch/nvalloc/unix/src/exports-stubs.c create mode 100644 src/nvidia/arch/nvalloc/unix/src/gcc_helper.c create mode 100644 src/nvidia/arch/nvalloc/unix/src/os-hypervisor-stubs.c create mode 100644 src/nvidia/arch/nvalloc/unix/src/os.c create mode 100644 src/nvidia/arch/nvalloc/unix/src/osapi.c create mode 100644 src/nvidia/arch/nvalloc/unix/src/osinit.c create mode 100644 src/nvidia/arch/nvalloc/unix/src/osmemdesc.c create mode 100644 src/nvidia/arch/nvalloc/unix/src/osunix.c create mode 100644 src/nvidia/arch/nvalloc/unix/src/power-management-tegra.c create mode 100644 src/nvidia/arch/nvalloc/unix/src/registry.c create mode 100644 src/nvidia/arch/nvalloc/unix/src/rmobjexportimport.c create mode 100644 src/nvidia/exports_link_command.txt create mode 100644 src/nvidia/generated/g_allclasses.h create mode 100644 src/nvidia/generated/g_binary_api_nvoc.c create mode 100644 src/nvidia/generated/g_binary_api_nvoc.h create mode 100644 src/nvidia/generated/g_chips2halspec.h create mode 100644 src/nvidia/generated/g_chips2halspec_nvoc.c create mode 100644 src/nvidia/generated/g_chips2halspec_nvoc.h create mode 100644 src/nvidia/generated/g_client_nvoc.c create mode 100644 src/nvidia/generated/g_client_nvoc.h create mode 100644 src/nvidia/generated/g_client_resource_nvoc.c create mode 100644 src/nvidia/generated/g_client_resource_nvoc.h create mode 100644 src/nvidia/generated/g_code_coverage_mgr_nvoc.c create mode 100644 src/nvidia/generated/g_code_coverage_mgr_nvoc.h create mode 100644 src/nvidia/generated/g_context_dma_nvoc.c create mode 100644 src/nvidia/generated/g_context_dma_nvoc.h create mode 100644 src/nvidia/generated/g_dce_client_nvoc.c create mode 100644 src/nvidia/generated/g_dce_client_nvoc.h create mode 100644 src/nvidia/generated/g_device_nvoc.c create mode 100644 src/nvidia/generated/g_device_nvoc.h create mode 100644 src/nvidia/generated/g_disp_capabilities_nvoc.c create mode 100644 src/nvidia/generated/g_disp_capabilities_nvoc.h create mode 100644 src/nvidia/generated/g_disp_channel_nvoc.c create mode 100644 src/nvidia/generated/g_disp_channel_nvoc.h create mode 100644 src/nvidia/generated/g_disp_inst_mem_nvoc.c create mode 100644 src/nvidia/generated/g_disp_inst_mem_nvoc.h create mode 100644 src/nvidia/generated/g_disp_objs_nvoc.c create mode 100644 src/nvidia/generated/g_disp_objs_nvoc.h create mode 100644 src/nvidia/generated/g_disp_sf_user_nvoc.c create mode 100644 src/nvidia/generated/g_disp_sf_user_nvoc.h create mode 100644 src/nvidia/generated/g_eng_desc_nvoc.h create mode 100644 src/nvidia/generated/g_eng_state_nvoc.c create mode 100644 src/nvidia/generated/g_eng_state_nvoc.h create mode 100644 src/nvidia/generated/g_event_buffer_nvoc.c create mode 100644 src/nvidia/generated/g_event_buffer_nvoc.h create mode 100644 src/nvidia/generated/g_event_nvoc.c create mode 100644 src/nvidia/generated/g_event_nvoc.h create mode 100644 src/nvidia/generated/g_generic_engine_nvoc.c create mode 100644 src/nvidia/generated/g_generic_engine_nvoc.h create mode 100644 src/nvidia/generated/g_gpu_access_nvoc.c create mode 100644 src/nvidia/generated/g_gpu_access_nvoc.h create mode 100644 src/nvidia/generated/g_gpu_arch_nvoc.c create mode 100644 src/nvidia/generated/g_gpu_arch_nvoc.h create mode 100644 src/nvidia/generated/g_gpu_class_list.c create mode 100644 src/nvidia/generated/g_gpu_db_nvoc.c create mode 100644 src/nvidia/generated/g_gpu_db_nvoc.h create mode 100644 src/nvidia/generated/g_gpu_group_nvoc.c create mode 100644 src/nvidia/generated/g_gpu_group_nvoc.h create mode 100644 src/nvidia/generated/g_gpu_halspec_nvoc.c create mode 100644 src/nvidia/generated/g_gpu_halspec_nvoc.h create mode 100644 src/nvidia/generated/g_gpu_mgmt_api_nvoc.c create mode 100644 src/nvidia/generated/g_gpu_mgmt_api_nvoc.h create mode 100644 src/nvidia/generated/g_gpu_mgr_nvoc.c create mode 100644 src/nvidia/generated/g_gpu_mgr_nvoc.h create mode 100644 src/nvidia/generated/g_gpu_nvoc.c create mode 100644 src/nvidia/generated/g_gpu_nvoc.h create mode 100644 src/nvidia/generated/g_gpu_resource_nvoc.c create mode 100644 src/nvidia/generated/g_gpu_resource_nvoc.h create mode 100644 src/nvidia/generated/g_gpu_user_shared_data_nvoc.c create mode 100644 src/nvidia/generated/g_gpu_user_shared_data_nvoc.h create mode 100644 src/nvidia/generated/g_hal.h create mode 100644 src/nvidia/generated/g_hal_archimpl.h create mode 100644 src/nvidia/generated/g_hal_mgr_nvoc.c create mode 100644 src/nvidia/generated/g_hal_mgr_nvoc.h create mode 100644 src/nvidia/generated/g_hal_nvoc.c create mode 100644 src/nvidia/generated/g_hal_nvoc.h create mode 100644 src/nvidia/generated/g_hal_private.h create mode 100644 src/nvidia/generated/g_hal_register.h create mode 100644 src/nvidia/generated/g_hda_codec_api_nvoc.c create mode 100644 src/nvidia/generated/g_hda_codec_api_nvoc.h create mode 100644 src/nvidia/generated/g_hypervisor_nvoc.h create mode 100644 src/nvidia/generated/g_io_vaspace_nvoc.c create mode 100644 src/nvidia/generated/g_io_vaspace_nvoc.h create mode 100644 src/nvidia/generated/g_ioaccess_nvoc.c create mode 100644 src/nvidia/generated/g_ioaccess_nvoc.h create mode 100644 src/nvidia/generated/g_journal_nvoc.h create mode 100644 src/nvidia/generated/g_kern_disp_nvoc.c create mode 100644 src/nvidia/generated/g_kern_disp_nvoc.h create mode 100644 src/nvidia/generated/g_kernel_head_nvoc.c create mode 100644 src/nvidia/generated/g_kernel_head_nvoc.h create mode 100644 src/nvidia/generated/g_lock_stress_nvoc.c create mode 100644 src/nvidia/generated/g_lock_stress_nvoc.h create mode 100644 src/nvidia/generated/g_lock_test_nvoc.c create mode 100644 src/nvidia/generated/g_lock_test_nvoc.h create mode 100644 src/nvidia/generated/g_mem_desc_nvoc.h create mode 100644 src/nvidia/generated/g_mem_list_nvoc.h create mode 100644 src/nvidia/generated/g_mem_mgr_nvoc.c create mode 100644 src/nvidia/generated/g_mem_mgr_nvoc.h create mode 100644 src/nvidia/generated/g_mem_nvoc.c create mode 100644 src/nvidia/generated/g_mem_nvoc.h create mode 100644 src/nvidia/generated/g_nv_debug_dump_nvoc.h create mode 100644 src/nvidia/generated/g_nv_name_released.h create mode 100644 src/nvidia/generated/g_nvh_state.h create mode 100644 src/nvidia/generated/g_object_nvoc.c create mode 100644 src/nvidia/generated/g_object_nvoc.h create mode 100644 src/nvidia/generated/g_objtmr_nvoc.c create mode 100644 src/nvidia/generated/g_objtmr_nvoc.h create mode 100644 src/nvidia/generated/g_odb.h create mode 100644 src/nvidia/generated/g_os_desc_mem_nvoc.c create mode 100644 src/nvidia/generated/g_os_desc_mem_nvoc.h create mode 100644 src/nvidia/generated/g_os_hal.h create mode 100644 src/nvidia/generated/g_os_nvoc.c create mode 100644 src/nvidia/generated/g_os_nvoc.h create mode 100644 src/nvidia/generated/g_prereq_tracker_nvoc.c create mode 100644 src/nvidia/generated/g_prereq_tracker_nvoc.h create mode 100644 src/nvidia/generated/g_ref_count_nvoc.h create mode 100644 src/nvidia/generated/g_resource_fwd_decls_nvoc.h create mode 100644 src/nvidia/generated/g_resource_nvoc.c create mode 100644 src/nvidia/generated/g_resource_nvoc.h create mode 100644 src/nvidia/generated/g_resserv_nvoc.h create mode 100644 src/nvidia/generated/g_rmconfig_private.h create mode 100644 src/nvidia/generated/g_rmconfig_util.c create mode 100644 src/nvidia/generated/g_rmconfig_util.h create mode 100644 src/nvidia/generated/g_rpc-message-header.h create mode 100644 src/nvidia/generated/g_rpc-structures.h create mode 100644 src/nvidia/generated/g_rs_client_nvoc.c create mode 100644 src/nvidia/generated/g_rs_client_nvoc.h create mode 100644 src/nvidia/generated/g_rs_resource_nvoc.c create mode 100644 src/nvidia/generated/g_rs_resource_nvoc.h create mode 100644 src/nvidia/generated/g_rs_server_nvoc.c create mode 100644 src/nvidia/generated/g_rs_server_nvoc.h create mode 100644 src/nvidia/generated/g_sdk-structures.h create mode 100644 src/nvidia/generated/g_standard_mem_nvoc.c create mode 100644 src/nvidia/generated/g_standard_mem_nvoc.h create mode 100644 src/nvidia/generated/g_subdevice_nvoc.c create mode 100644 src/nvidia/generated/g_subdevice_nvoc.h create mode 100644 src/nvidia/generated/g_syncpoint_mem_nvoc.c create mode 100644 src/nvidia/generated/g_syncpoint_mem_nvoc.h create mode 100644 src/nvidia/generated/g_system_mem_nvoc.c create mode 100644 src/nvidia/generated/g_system_mem_nvoc.h create mode 100644 src/nvidia/generated/g_system_nvoc.c create mode 100644 src/nvidia/generated/g_system_nvoc.h create mode 100644 src/nvidia/generated/g_tmr_nvoc.c create mode 100644 src/nvidia/generated/g_tmr_nvoc.h create mode 100644 src/nvidia/generated/g_traceable_nvoc.c create mode 100644 src/nvidia/generated/g_traceable_nvoc.h create mode 100644 src/nvidia/generated/g_vaspace_nvoc.c create mode 100644 src/nvidia/generated/g_vaspace_nvoc.h create mode 100644 src/nvidia/generated/g_virt_mem_mgr_nvoc.c create mode 100644 src/nvidia/generated/g_virt_mem_mgr_nvoc.h create mode 100644 src/nvidia/generated/rmconfig.h create mode 100644 src/nvidia/inc/kernel/core/core.h create mode 100644 src/nvidia/inc/kernel/core/hal.h create mode 100644 src/nvidia/inc/kernel/core/hal_mgr.h create mode 100644 src/nvidia/inc/kernel/core/info_block.h create mode 100644 src/nvidia/inc/kernel/core/locks.h create mode 100644 src/nvidia/inc/kernel/core/prelude.h create mode 100644 src/nvidia/inc/kernel/core/printf.h create mode 100644 src/nvidia/inc/kernel/core/strict.h create mode 100644 src/nvidia/inc/kernel/core/system.h create mode 100644 src/nvidia/inc/kernel/core/thread_state.h create mode 100644 src/nvidia/inc/kernel/diagnostics/code_coverage_mgr.h create mode 100644 src/nvidia/inc/kernel/diagnostics/journal.h create mode 100644 src/nvidia/inc/kernel/diagnostics/journal_structs.h create mode 100644 src/nvidia/inc/kernel/diagnostics/nv_debug_dump.h create mode 100644 src/nvidia/inc/kernel/diagnostics/profiler.h create mode 100644 src/nvidia/inc/kernel/diagnostics/traceable.h create mode 100644 src/nvidia/inc/kernel/diagnostics/tracer.h create mode 100644 src/nvidia/inc/kernel/diagnostics/xid_context.h create mode 100644 src/nvidia/inc/kernel/gpu/audio/hda_codec_api.h create mode 100644 src/nvidia/inc/kernel/gpu/dce_client/dce_client.h create mode 100644 src/nvidia/inc/kernel/gpu/device/device.h create mode 100644 src/nvidia/inc/kernel/gpu/disp/disp_capabilities.h create mode 100644 src/nvidia/inc/kernel/gpu/disp/disp_channel.h create mode 100644 src/nvidia/inc/kernel/gpu/disp/disp_objs.h create mode 100644 src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h create mode 100644 src/nvidia/inc/kernel/gpu/disp/head/kernel_head.h create mode 100644 src/nvidia/inc/kernel/gpu/disp/inst_mem/disp_inst_mem.h create mode 100644 src/nvidia/inc/kernel/gpu/disp/kern_disp.h create mode 100644 src/nvidia/inc/kernel/gpu/disp/kern_disp_max.h create mode 100644 src/nvidia/inc/kernel/gpu/disp/kern_disp_type.h create mode 100644 src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank.h create mode 100644 src/nvidia/inc/kernel/gpu/eng_desc.h create mode 100644 src/nvidia/inc/kernel/gpu/eng_state.h create mode 100644 src/nvidia/inc/kernel/gpu/error_cont.h create mode 100644 src/nvidia/inc/kernel/gpu/gpu.h create mode 100644 src/nvidia/inc/kernel/gpu/gpu_access.h create mode 100644 src/nvidia/inc/kernel/gpu/gpu_acpi_data.h create mode 100644 src/nvidia/inc/kernel/gpu/gpu_arch.h create mode 100644 src/nvidia/inc/kernel/gpu/gpu_child_class_defs.h create mode 100644 src/nvidia/inc/kernel/gpu/gpu_child_list.h create mode 100644 src/nvidia/inc/kernel/gpu/gpu_device_mapping.h create mode 100644 src/nvidia/inc/kernel/gpu/gpu_ecc.h create mode 100644 src/nvidia/inc/kernel/gpu/gpu_engine_type.h create mode 100644 src/nvidia/inc/kernel/gpu/gpu_halspec.h create mode 100644 src/nvidia/inc/kernel/gpu/gpu_resource.h create mode 100644 src/nvidia/inc/kernel/gpu/gpu_resource_desc.h create mode 100644 src/nvidia/inc/kernel/gpu/gpu_shared_data_map.h create mode 100644 src/nvidia/inc/kernel/gpu/gpu_timeout.h create mode 100644 src/nvidia/inc/kernel/gpu/gpu_user_shared_data.h create mode 100644 src/nvidia/inc/kernel/gpu/gpu_uuid.h create mode 100644 src/nvidia/inc/kernel/gpu/gsp/gsp_trace_rats_macro.h create mode 100644 src/nvidia/inc/kernel/gpu/gsp/kernel_gsp_trace_rats.h create mode 100644 src/nvidia/inc/kernel/gpu/gsp/message_queue.h create mode 100644 src/nvidia/inc/kernel/gpu/hfrp/kern_hfrp_commands_responses.h create mode 100644 src/nvidia/inc/kernel/gpu/hfrp/kern_hfrp_common.h create mode 100644 src/nvidia/inc/kernel/gpu/hfrp/kernel_hfrp.h create mode 100644 src/nvidia/inc/kernel/gpu/intr/intr_common.h create mode 100644 src/nvidia/inc/kernel/gpu/kern_gpu_power.h create mode 100644 src/nvidia/inc/kernel/gpu/mem_mgr/context_dma.h create mode 100644 src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h create mode 100644 src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h create mode 100644 src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h create mode 100644 src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h create mode 100644 src/nvidia/inc/kernel/gpu/mem_mgr/rm_page_size.h create mode 100644 src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator_common.h create mode 100644 src/nvidia/inc/kernel/gpu/nvbitmask.h create mode 100644 src/nvidia/inc/kernel/gpu/rpc/objrpc.h create mode 100644 src/nvidia/inc/kernel/gpu/rpc/objrpcstructurecopy.h create mode 100644 src/nvidia/inc/kernel/gpu/subdevice/generic_engine.h create mode 100644 src/nvidia/inc/kernel/gpu/subdevice/subdevice.h create mode 100644 src/nvidia/inc/kernel/gpu/timer/objtmr.h create mode 100644 src/nvidia/inc/kernel/gpu/timer/tmr.h create mode 100644 src/nvidia/inc/kernel/gpu_mgr/gpu_db.h create mode 100644 src/nvidia/inc/kernel/gpu_mgr/gpu_group.h create mode 100644 src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h create mode 100644 src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h create mode 100644 src/nvidia/inc/kernel/mem_mgr/io_vaspace.h create mode 100644 src/nvidia/inc/kernel/mem_mgr/mem.h create mode 100644 src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h create mode 100644 src/nvidia/inc/kernel/mem_mgr/standard_mem.h create mode 100644 src/nvidia/inc/kernel/mem_mgr/syncpoint_mem.h create mode 100644 src/nvidia/inc/kernel/mem_mgr/system_mem.h create mode 100644 src/nvidia/inc/kernel/mem_mgr/vaspace.h create mode 100644 src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h create mode 100644 src/nvidia/inc/kernel/os/capability.h create mode 100644 src/nvidia/inc/kernel/os/nv_memory_area.h create mode 100644 src/nvidia/inc/kernel/os/nv_memory_type.h create mode 100644 src/nvidia/inc/kernel/os/os.h create mode 100644 src/nvidia/inc/kernel/os/os_stub.h create mode 100644 src/nvidia/inc/kernel/platform/acpi_common.h create mode 100644 src/nvidia/inc/kernel/platform/nvpcf.h create mode 100644 src/nvidia/inc/kernel/platform/sli/sli.h create mode 100644 src/nvidia/inc/kernel/rmapi/alloc_size.h create mode 100644 src/nvidia/inc/kernel/rmapi/binary_api.h create mode 100644 src/nvidia/inc/kernel/rmapi/client.h create mode 100644 src/nvidia/inc/kernel/rmapi/client_resource.h create mode 100644 src/nvidia/inc/kernel/rmapi/control.h create mode 100644 src/nvidia/inc/kernel/rmapi/event.h create mode 100644 src/nvidia/inc/kernel/rmapi/event_buffer.h create mode 100644 src/nvidia/inc/kernel/rmapi/exports.h create mode 100644 src/nvidia/inc/kernel/rmapi/lock_stress.h create mode 100644 src/nvidia/inc/kernel/rmapi/lock_test.h create mode 100644 src/nvidia/inc/kernel/rmapi/mapping_list.h create mode 100644 src/nvidia/inc/kernel/rmapi/param_copy.h create mode 100644 src/nvidia/inc/kernel/rmapi/resource.h create mode 100644 src/nvidia/inc/kernel/rmapi/resource_fwd_decls.h create mode 100644 src/nvidia/inc/kernel/rmapi/rmapi.h create mode 100644 src/nvidia/inc/kernel/rmapi/rmapi_cache_handlers.h create mode 100644 src/nvidia/inc/kernel/rmapi/rmapi_specific.h create mode 100644 src/nvidia/inc/kernel/rmapi/rmapi_utils.h create mode 100644 src/nvidia/inc/kernel/rmapi/rs_utils.h create mode 100644 src/nvidia/inc/kernel/vgpu/dev_vgpu.h create mode 100644 src/nvidia/inc/kernel/vgpu/rm_plugin_shared_code.h create mode 100644 src/nvidia/inc/kernel/vgpu/rpc.h create mode 100644 src/nvidia/inc/kernel/vgpu/rpc_global_enums.h create mode 100644 src/nvidia/inc/kernel/vgpu/rpc_hal_stubs.h create mode 100644 src/nvidia/inc/kernel/vgpu/rpc_headers.h create mode 100644 src/nvidia/inc/kernel/vgpu/rpc_vgpu.h create mode 100644 src/nvidia/inc/kernel/vgpu/vgpu_util.h create mode 100644 src/nvidia/inc/kernel/vgpu/vgpuapi.h create mode 100644 src/nvidia/inc/kernel/virtualization/hypervisor/hypervisor.h create mode 100644 src/nvidia/inc/lib/base_utils.h create mode 100644 src/nvidia/inc/lib/protobuf/prb.h create mode 100644 src/nvidia/inc/lib/ref_count.h create mode 100644 src/nvidia/inc/lib/zlib/inflate.h create mode 100644 src/nvidia/inc/libraries/containers/btree.h create mode 100644 src/nvidia/inc/libraries/containers/eheap_old.h create mode 100644 src/nvidia/inc/libraries/containers/list.h create mode 100644 src/nvidia/inc/libraries/containers/map.h create mode 100644 src/nvidia/inc/libraries/containers/multimap.h create mode 100644 src/nvidia/inc/libraries/containers/queue.h create mode 100644 src/nvidia/inc/libraries/containers/ringbuf.h create mode 100644 src/nvidia/inc/libraries/containers/type_safety.h create mode 100644 src/nvidia/inc/libraries/containers/vector.h create mode 100644 src/nvidia/inc/libraries/eventbufferproducer.h create mode 100644 src/nvidia/inc/libraries/field_desc.h create mode 100644 src/nvidia/inc/libraries/ioaccess/ioaccess.h create mode 100644 src/nvidia/inc/libraries/mapping_reuse/mapping_reuse.h create mode 100644 src/nvidia/inc/libraries/mmu/gmmu_fmt.h create mode 100644 src/nvidia/inc/libraries/mmu/mmu_fmt.h create mode 100644 src/nvidia/inc/libraries/nvlog/internal/nvlog_printf_internal.h create mode 100644 src/nvidia/inc/libraries/nvlog/nvlog.h create mode 100644 src/nvidia/inc/libraries/nvlog/nvlog_printf.h create mode 100644 src/nvidia/inc/libraries/nvoc/object.h create mode 100644 src/nvidia/inc/libraries/nvoc/prelude.h create mode 100644 src/nvidia/inc/libraries/nvoc/rtti.h create mode 100644 src/nvidia/inc/libraries/nvoc/runtime.h create mode 100644 src/nvidia/inc/libraries/nvoc/utility.h create mode 100644 src/nvidia/inc/libraries/nvport/atomic.h create mode 100644 src/nvidia/inc/libraries/nvport/core.h create mode 100644 src/nvidia/inc/libraries/nvport/cpu.h create mode 100644 src/nvidia/inc/libraries/nvport/crypto.h create mode 100644 src/nvidia/inc/libraries/nvport/debug.h create mode 100644 src/nvidia/inc/libraries/nvport/inline/atomic_clang.h create mode 100644 src/nvidia/inc/libraries/nvport/inline/atomic_gcc.h create mode 100644 src/nvidia/inc/libraries/nvport/inline/debug_unix_kernel_os.h create mode 100644 src/nvidia/inc/libraries/nvport/inline/memory_tracking.h create mode 100644 src/nvidia/inc/libraries/nvport/inline/safe_generic.h create mode 100644 src/nvidia/inc/libraries/nvport/inline/sync_tracking.h create mode 100644 src/nvidia/inc/libraries/nvport/inline/util_gcc_clang.h create mode 100644 src/nvidia/inc/libraries/nvport/inline/util_generic.h create mode 100644 src/nvidia/inc/libraries/nvport/inline/util_valist.h create mode 100644 src/nvidia/inc/libraries/nvport/memory.h create mode 100644 src/nvidia/inc/libraries/nvport/nvport.h create mode 100644 src/nvidia/inc/libraries/nvport/safe.h create mode 100644 src/nvidia/inc/libraries/nvport/string.h create mode 100644 src/nvidia/inc/libraries/nvport/sync.h create mode 100644 src/nvidia/inc/libraries/nvport/thread.h create mode 100644 src/nvidia/inc/libraries/nvport/util.h create mode 100644 src/nvidia/inc/libraries/poolalloc.h create mode 100644 src/nvidia/inc/libraries/prereq_tracker/prereq_tracker.h create mode 100644 src/nvidia/inc/libraries/resserv/resserv.h create mode 100644 src/nvidia/inc/libraries/resserv/rs_access_map.h create mode 100644 src/nvidia/inc/libraries/resserv/rs_access_rights.h create mode 100644 src/nvidia/inc/libraries/resserv/rs_client.h create mode 100644 src/nvidia/inc/libraries/resserv/rs_domain.h create mode 100644 src/nvidia/inc/libraries/resserv/rs_resource.h create mode 100644 src/nvidia/inc/libraries/resserv/rs_server.h create mode 100644 src/nvidia/inc/libraries/tls/tls.h create mode 100644 src/nvidia/inc/libraries/utils/nv_enum.h create mode 100644 src/nvidia/inc/libraries/utils/nvassert.h create mode 100644 src/nvidia/inc/libraries/utils/nvbitvector.h create mode 100644 src/nvidia/inc/libraries/utils/nvmacro.h create mode 100644 src/nvidia/inc/libraries/utils/nvprintf.h create mode 100644 src/nvidia/inc/libraries/utils/nvprintf_level.h create mode 100644 src/nvidia/inc/libraries/utils/nvrange.h create mode 100644 src/nvidia/inc/os/dce_rm_client_ipc.h create mode 100644 src/nvidia/interface/acpidsmguids.h create mode 100644 src/nvidia/interface/acpigenfuncs.h create mode 100644 src/nvidia/interface/deprecated/rmapi_deprecated.h create mode 100644 src/nvidia/interface/deprecated/rmapi_deprecated_utils.c create mode 100644 src/nvidia/interface/nv-firmware-registry.h create mode 100644 src/nvidia/interface/nv_sriov_defines.h create mode 100644 src/nvidia/interface/nvacpitypes.h create mode 100644 src/nvidia/interface/nvrm_registry.h create mode 100644 src/nvidia/interface/rmapi/src/g_finn_rm_api.c create mode 100644 src/nvidia/nv-kernel.ld create mode 100644 src/nvidia/src/kernel/core/hal/hal.c create mode 100644 src/nvidia/src/kernel/core/hal/hals_all.c create mode 100644 src/nvidia/src/kernel/core/hal/info_block.c create mode 100644 src/nvidia/src/kernel/core/hal_mgr.c create mode 100644 src/nvidia/src/kernel/core/locks_common.c create mode 100644 src/nvidia/src/kernel/core/locks_minimal.c create mode 100644 src/nvidia/src/kernel/core/system.c create mode 100644 src/nvidia/src/kernel/core/thread_state.c create mode 100644 src/nvidia/src/kernel/diagnostics/code_coverage_mgr.c create mode 100644 src/nvidia/src/kernel/diagnostics/nvlog.c create mode 100644 src/nvidia/src/kernel/diagnostics/nvlog_printf.c create mode 100644 src/nvidia/src/kernel/diagnostics/profiler.c create mode 100644 src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_arch_t234d.c create mode 100644 src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_t234d.c create mode 100644 src/nvidia/src/kernel/gpu/arch/t25x/kern_gpu_t256d.c create mode 100644 src/nvidia/src/kernel/gpu/arch/t26x/kern_gpu_t264d.c create mode 100644 src/nvidia/src/kernel/gpu/audio/hda_codec_api.c create mode 100644 src/nvidia/src/kernel/gpu/dce_client/dce_client.c create mode 100644 src/nvidia/src/kernel/gpu/dce_client/dce_client_rpc.c create mode 100644 src/nvidia/src/kernel/gpu/device.c create mode 100644 src/nvidia/src/kernel/gpu/device_ctrl.c create mode 100644 src/nvidia/src/kernel/gpu/device_share.c create mode 100644 src/nvidia/src/kernel/gpu/disp/arch/v02/kern_disp_0204.c create mode 100644 src/nvidia/src/kernel/gpu/disp/arch/v03/kern_disp_0300.c create mode 100644 src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0401.c create mode 100644 src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0402.c create mode 100644 src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0404.c create mode 100644 src/nvidia/src/kernel/gpu/disp/arch/v05/kern_disp_0501.c create mode 100644 src/nvidia/src/kernel/gpu/disp/disp_capabilities.c create mode 100644 src/nvidia/src/kernel/gpu/disp/disp_channel.c create mode 100644 src/nvidia/src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c create mode 100644 src/nvidia/src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c create mode 100644 src/nvidia/src/kernel/gpu/disp/disp_objs.c create mode 100644 src/nvidia/src/kernel/gpu/disp/disp_sf_user.c create mode 100644 src/nvidia/src/kernel/gpu/disp/head/arch/v04/kernel_head_0401.c create mode 100644 src/nvidia/src/kernel/gpu/disp/head/kernel_head.c create mode 100644 src/nvidia/src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c create mode 100644 src/nvidia/src/kernel/gpu/disp/inst_mem/disp_inst_mem.c create mode 100644 src/nvidia/src/kernel/gpu/disp/kern_disp.c create mode 100644 src/nvidia/src/kernel/gpu/disp/vblank_callback/vblank.c create mode 100644 src/nvidia/src/kernel/gpu/eng_state.c create mode 100644 src/nvidia/src/kernel/gpu/gpu.c create mode 100644 src/nvidia/src/kernel/gpu/gpu_access.c create mode 100644 src/nvidia/src/kernel/gpu/gpu_arch.c create mode 100644 src/nvidia/src/kernel/gpu/gpu_device_mapping.c create mode 100644 src/nvidia/src/kernel/gpu/gpu_engine_type.c create mode 100644 src/nvidia/src/kernel/gpu/gpu_gspclient.c create mode 100644 src/nvidia/src/kernel/gpu/gpu_resource.c create mode 100644 src/nvidia/src/kernel/gpu/gpu_resource_desc.c create mode 100644 src/nvidia/src/kernel/gpu/gpu_rmapi.c create mode 100644 src/nvidia/src/kernel/gpu/gpu_t234d_kernel.c create mode 100644 src/nvidia/src/kernel/gpu/gpu_timeout.c create mode 100644 src/nvidia/src/kernel/gpu/gpu_user_shared_data.c create mode 100644 src/nvidia/src/kernel/gpu/gpu_uuid.c create mode 100644 src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c create mode 100644 src/nvidia/src/kernel/gpu/mem_mgr/context_dma.c create mode 100644 src/nvidia/src/kernel/gpu/mem_mgr/mem_ctrl.c create mode 100644 src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c create mode 100644 src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c create mode 100644 src/nvidia/src/kernel/gpu/subdevice/generic_engine.c create mode 100644 src/nvidia/src/kernel/gpu/subdevice/subdevice.c create mode 100644 src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c create mode 100644 src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c create mode 100644 src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_internal_kernel.c create mode 100644 src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c create mode 100644 src/nvidia/src/kernel/gpu/timer/timer.c create mode 100644 src/nvidia/src/kernel/gpu/timer/timer_ostimer.c create mode 100644 src/nvidia/src/kernel/gpu_mgr/gpu_db.c create mode 100644 src/nvidia/src/kernel/gpu_mgr/gpu_group.c create mode 100644 src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c create mode 100644 src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c create mode 100644 src/nvidia/src/kernel/mem_mgr/io_vaspace.c create mode 100644 src/nvidia/src/kernel/mem_mgr/mem.c create mode 100644 src/nvidia/src/kernel/mem_mgr/mem_mgr_internal.h create mode 100644 src/nvidia/src/kernel/mem_mgr/os_desc_mem.c create mode 100644 src/nvidia/src/kernel/mem_mgr/standard_mem.c create mode 100644 src/nvidia/src/kernel/mem_mgr/syncpoint_mem.c create mode 100644 src/nvidia/src/kernel/mem_mgr/system_mem.c create mode 100644 src/nvidia/src/kernel/mem_mgr/vaspace.c create mode 100644 src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c create mode 100644 src/nvidia/src/kernel/os/os_init.c create mode 100644 src/nvidia/src/kernel/os/os_sanity.c create mode 100644 src/nvidia/src/kernel/os/os_stubs.c create mode 100644 src/nvidia/src/kernel/os/os_timer.c create mode 100644 src/nvidia/src/kernel/rmapi/alloc_free.c create mode 100644 src/nvidia/src/kernel/rmapi/binary_api.c create mode 100644 src/nvidia/src/kernel/rmapi/client.c create mode 100644 src/nvidia/src/kernel/rmapi/client_resource.c create mode 100644 src/nvidia/src/kernel/rmapi/control.c create mode 100644 src/nvidia/src/kernel/rmapi/deprecated_context.c create mode 100644 src/nvidia/src/kernel/rmapi/deprecated_context.h create mode 100644 src/nvidia/src/kernel/rmapi/entry_points.c create mode 100644 src/nvidia/src/kernel/rmapi/entry_points.h create mode 100644 src/nvidia/src/kernel/rmapi/event.c create mode 100644 src/nvidia/src/kernel/rmapi/event_buffer.c create mode 100644 src/nvidia/src/kernel/rmapi/event_notification.c create mode 100644 src/nvidia/src/kernel/rmapi/lock_stress.c create mode 100644 src/nvidia/src/kernel/rmapi/lock_test.c create mode 100644 src/nvidia/src/kernel/rmapi/mapping.c create mode 100644 src/nvidia/src/kernel/rmapi/mapping_cpu.c create mode 100644 src/nvidia/src/kernel/rmapi/param_copy.c create mode 100644 src/nvidia/src/kernel/rmapi/resource.c create mode 100644 src/nvidia/src/kernel/rmapi/resource_desc.c create mode 100644 src/nvidia/src/kernel/rmapi/resource_desc.h create mode 100644 src/nvidia/src/kernel/rmapi/resource_desc_flags.h create mode 100644 src/nvidia/src/kernel/rmapi/resource_list.h create mode 100644 src/nvidia/src/kernel/rmapi/resource_list_required_includes.h create mode 100644 src/nvidia/src/kernel/rmapi/rmapi.c create mode 100644 src/nvidia/src/kernel/rmapi/rmapi_cache.c create mode 100644 src/nvidia/src/kernel/rmapi/rmapi_cache_handlers.c create mode 100644 src/nvidia/src/kernel/rmapi/rmapi_finn.c create mode 100644 src/nvidia/src/kernel/rmapi/rmapi_specific.c create mode 100644 src/nvidia/src/kernel/rmapi/rmapi_stubs.c create mode 100644 src/nvidia/src/kernel/rmapi/rmapi_utils.c create mode 100644 src/nvidia/src/kernel/rmapi/rpc_common.c create mode 100644 src/nvidia/src/kernel/rmapi/rs_utils.c create mode 100644 src/nvidia/src/kernel/rmapi/sharing.c create mode 100644 src/nvidia/src/lib/base_utils.c create mode 100644 src/nvidia/src/lib/zlib/inflate.c create mode 100644 src/nvidia/src/libraries/containers/btree/btree.c create mode 100644 src/nvidia/src/libraries/containers/eheap/eheap_old.c create mode 100644 src/nvidia/src/libraries/containers/list.c create mode 100644 src/nvidia/src/libraries/containers/map.c create mode 100644 src/nvidia/src/libraries/containers/multimap.c create mode 100644 src/nvidia/src/libraries/containers/queue.c create mode 100644 src/nvidia/src/libraries/containers/ringbuf.c create mode 100644 src/nvidia/src/libraries/containers/vector.c create mode 100644 src/nvidia/src/libraries/eventbuffer/eventbufferproducer.c create mode 100644 src/nvidia/src/libraries/ioaccess/ioaccess.c create mode 100644 src/nvidia/src/libraries/mapping_reuse/mapping_reuse.c create mode 100644 src/nvidia/src/libraries/nvbitvector/nvbitvector.c create mode 100644 src/nvidia/src/libraries/nvoc/src/runtime.c create mode 100644 src/nvidia/src/libraries/nvport/core/core.c create mode 100644 src/nvidia/src/libraries/nvport/cpu/cpu_common.c create mode 100644 src/nvidia/src/libraries/nvport/cpu/cpu_common.h create mode 100644 src/nvidia/src/libraries/nvport/crypto/crypto_random_xorshift.c create mode 100644 src/nvidia/src/libraries/nvport/memory/memory_generic.h create mode 100644 src/nvidia/src/libraries/nvport/memory/memory_tracking.c create mode 100644 src/nvidia/src/libraries/nvport/memory/memory_unix_kernel_os.c create mode 100644 src/nvidia/src/libraries/nvport/string/string_generic.c create mode 100644 src/nvidia/src/libraries/nvport/sync/inc/sync_unix_kernel_os_def.h create mode 100644 src/nvidia/src/libraries/nvport/sync/sync_common.h create mode 100644 src/nvidia/src/libraries/nvport/sync/sync_unix_kernel_os.c create mode 100644 src/nvidia/src/libraries/nvport/thread/thread_unix_kernel_os.c create mode 100644 src/nvidia/src/libraries/nvport/util/util_compiler_switch.c create mode 100644 src/nvidia/src/libraries/nvport/util/util_gcc_clang.c create mode 100644 src/nvidia/src/libraries/nvport/util/util_unix_kernel_os.c create mode 100644 src/nvidia/src/libraries/prereq_tracker/prereq_tracker.c create mode 100644 src/nvidia/src/libraries/resserv/src/rs_access_map.c create mode 100644 src/nvidia/src/libraries/resserv/src/rs_access_rights.c create mode 100644 src/nvidia/src/libraries/resserv/src/rs_client.c create mode 100644 src/nvidia/src/libraries/resserv/src/rs_domain.c create mode 100644 src/nvidia/src/libraries/resserv/src/rs_resource.c create mode 100644 src/nvidia/src/libraries/resserv/src/rs_server.c create mode 100644 src/nvidia/src/libraries/tls/tls.c create mode 100644 src/nvidia/src/libraries/utils/nvassert.c create mode 100644 src/nvidia/srcs.mk create mode 100644 utils.mk create mode 100644 version.mk diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..643ae6d --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,141 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contribute to a positive environment for our +community include: + +* Using welcoming and inclusive language +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, or to ban temporarily or permanently any +contributor for other behaviors that they deem inappropriate, threatening, +offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when +an individual is representing the project or its community. Examples of representing +our community include using an official e-mail address, posting via an official +social media account, or acting as an appointed representative at an online or +offline event. Representation of a project may be further defined and clarified +by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders and moderators responsible for enforcement at +GitHub_Conduct@nvidia.com. +All complaints will be reviewed and investigated and will result in a response +that is deemed necessary and appropriate to the circumstances. Leaders and moderators +are obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Moderators who do not follow or enforce the Code of Conduct in good faith +may face temporary or permanent repercussions as determined by other members of the +community’s leadership. + +## Enforcement Guidelines + +Community leaders and moderators will follow these Community Impact Guidelines +in determining the consequences for any action they deem in violation of this +Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community moderators, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating an egregious single violation, or a pattern of +violation of community standards, including sustained inappropriate behavior, +harassment of an individual, or aggression toward or disparagement of classes of +individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..17a4454 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,21 @@ +Thank you for all the enthusiasm around open-gpu-kernel-modules. + +## Non-functional (cosmetic) changes + +While we appreciate your enthusiasm, we have decided not to accept non-functional changes such as +non-code typo fixes, comment and language adjustments, whitespace changes, and similar. + +Changes going into this codebase incur significant overhead. As such, we want to focus our resources +on executable code improvements for now. + +If you have questions, or are unsure about the nature of your desired change, please ask us on the +[Discussion boards](https://github.com/NVIDIA/open-gpu-kernel-modules/discussions)! + +## Code style + +We currently do not publish a code style guide, as we have many different components coming together. +Please read the existing code in the repository, especially the one surrounding your proposed change, +to get a feel for what you should aim for. + +Don't worry too much about it! We are happy to guide you through any neccessary style changes through +code review of your PR. diff --git a/COPYING b/COPYING new file mode 100644 index 0000000..84a3c32 --- /dev/null +++ b/COPYING @@ -0,0 +1,369 @@ + +Except where noted otherwise, the individual files within this package are +licensed as MIT: + + Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +However, when linked together to form a Linux kernel module, the resulting Linux +kernel module is dual licensed as MIT/GPLv2. + + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. + diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..449c7f5 --- /dev/null +++ b/Makefile @@ -0,0 +1,86 @@ +########################################################################### +# This is the top level makefile for the NVIDIA Linux kernel module source +# package. +# +# To build: run `make modules` +# To install the build kernel modules: run (as root) `make modules_install` +########################################################################### + +########################################################################### +# variables +########################################################################### + +nv_kernel_o = src/nvidia/$(OUTPUTDIR)/nv-kernel.o +nv_kernel_o_binary = kernel-open/nvidia/nv-kernel.o_binary + +nv_modeset_kernel_o = src/nvidia-modeset/$(OUTPUTDIR)/nv-modeset-kernel.o +nv_modeset_kernel_o_binary = kernel-open/nvidia-modeset/nv-modeset-kernel.o_binary + +########################################################################### +# rules +########################################################################### + +include utils.mk + +.PHONY: all +all: modules + +########################################################################### +# nv-kernel.o is the OS agnostic portion of nvidia.ko +########################################################################### + +.PHONY: $(nv_kernel_o) +$(nv_kernel_o): + $(MAKE) -C src/nvidia + +$(nv_kernel_o_binary): $(nv_kernel_o) + cd $(dir $@) && ln -sf ../../$^ $(notdir $@) + + +########################################################################### +# nv-modeset-kernel.o is the OS agnostic portion of nvidia-modeset.ko +########################################################################### + +.PHONY: $(nv_modeset_kernel_o) +$(nv_modeset_kernel_o): + $(MAKE) -C src/nvidia-modeset + +$(nv_modeset_kernel_o_binary): $(nv_modeset_kernel_o) + cd $(dir $@) && ln -sf ../../$^ $(notdir $@) + + +########################################################################### +# After the OS agnostic portions are built, descend into kernel-open/ and build +# the kernel modules with kbuild. +########################################################################### + +.PHONY: modules +modules: $(nv_kernel_o_binary) $(nv_modeset_kernel_o_binary) + $(MAKE) -C kernel-open modules + +########################################################################### +# Install the built kernel modules using kbuild. +########################################################################### + +.PHONY: modules_install +modules_install: + $(MAKE) -C kernel-open modules_install + +########################################################################### +# clean +########################################################################### + +.PHONY: clean +clean: nvidia.clean nvidia-modeset.clean kernel-open.clean + +.PHONY: nvidia.clean +nvidia.clean: + $(MAKE) -C src/nvidia clean + +.PHONY: nvidia-modeset.clean +nvidia-modeset.clean: + $(MAKE) -C src/nvidia-modeset clean + +.PHONY: kernel-open.clean +kernel-open.clean: + $(MAKE) -C kernel-open clean diff --git a/README.md b/README.md new file mode 100644 index 0000000..429093a --- /dev/null +++ b/README.md @@ -0,0 +1,987 @@ +# NVIDIA Linux Open GPU Kernel Module Source + +This is the source release of the NVIDIA Linux open GPU kernel modules, +version 580.00. + + +## How to Build + +To build: + + make modules -j$(nproc) + +To install, first uninstall any existing NVIDIA kernel modules. Then, +as root: + + make modules_install -j$(nproc) + +Note that the kernel modules built here must be used with GSP +firmware and user-space NVIDIA GPU driver components from a corresponding +580.00 driver release. This can be achieved by installing +the NVIDIA GPU driver from the .run file using the `--no-kernel-modules` +option. E.g., + + sh ./NVIDIA-Linux-[...].run --no-kernel-modules + + +## Supported Target CPU Architectures + +Currently, the kernel modules can be built for x86_64 or aarch64. +If cross-compiling, set these variables on the make command line: + + TARGET_ARCH=aarch64|x86_64 + CC + LD + AR + CXX + OBJCOPY + +E.g., + + # compile on x86_64 for aarch64 + make modules -j$(nproc) \ + TARGET_ARCH=aarch64 \ + CC=aarch64-linux-gnu-gcc \ + LD=aarch64-linux-gnu-ld \ + AR=aarch64-linux-gnu-ar \ + CXX=aarch64-linux-gnu-g++ \ + OBJCOPY=aarch64-linux-gnu-objcopy + + +## Other Build Knobs + +NV_VERBOSE - Set this to "1" to print each complete command executed; + otherwise, a succinct "CC" line is printed. + +DEBUG - Set this to "1" to build the kernel modules as debug. By default, the + build compiles without debugging information. This also enables + various debug log messages in the kernel modules. + +These variables can be set on the make command line. E.g., + + make modules -j$(nproc) NV_VERBOSE=1 + + +## Supported Toolchains + +Any reasonably modern version of GCC or Clang can be used to build the +kernel modules. Note that the kernel interface layers of the kernel +modules must be built with the toolchain that was used to build the +kernel. + + +## Supported Linux Kernel Versions + +The NVIDIA open kernel modules support the same range of Linux kernel +versions that are supported with the proprietary NVIDIA kernel modules. +This is currently Linux kernel 4.15 or newer. + + +## How to Contribute + +Contributions can be made by creating a pull request on +https://github.com/NVIDIA/open-gpu-kernel-modules +We'll respond via GitHub. + +Note that when submitting a pull request, you will be prompted to accept +a Contributor License Agreement. + +This code base is shared with NVIDIA's proprietary drivers, and various +processing is performed on the shared code to produce the source code that is +published here. This has several implications for the foreseeable future: + +* The GitHub repository will function mostly as a snapshot of each driver + release. + +* We do not expect to be able to provide revision history for individual + changes that were made to NVIDIA's shared code base. There will likely + only be one git commit per driver release. + +* We may not be able to reflect individual contributions as separate + git commits in the GitHub repository. + +* Because the code undergoes various processing prior to publishing here, + contributions made here require manual merging to be applied to the shared + code base. Therefore, large refactoring changes made here may be difficult to + merge and accept back into the shared code base. If you have large + refactoring to suggest, please contact us in advance, so we can coordinate. + + +## How to Report Issues + +Problems specific to the Open GPU Kernel Modules can be reported in the +Issues section of the https://github.com/NVIDIA/open-gpu-kernel-modules +repository. + +Further, any of the existing bug reporting venues can be used to communicate +problems to NVIDIA, such as our forum: + +https://forums.developer.nvidia.com/c/gpu-graphics/linux/148 + +or linux-bugs@nvidia.com. + +Please see the 'NVIDIA Contact Info and Additional Resources' section +of the NVIDIA GPU Driver README for details. + +Please see the separate [SECURITY.md](SECURITY.md) document if you +believe you have discovered a security vulnerability in this software. + + +## Kernel Interface and OS-Agnostic Components of Kernel Modules + +Most of NVIDIA's kernel modules are split into two components: + +* An "OS-agnostic" component: this is the component of each kernel module + that is independent of operating system. + +* A "kernel interface layer": this is the component of each kernel module + that is specific to the Linux kernel version and configuration. + +When packaged in the NVIDIA .run installation package, the OS-agnostic +component is provided as a binary: it is large and time-consuming to +compile, so pre-built versions are provided so that the user does +not have to compile it during every driver installation. For the +nvidia.ko kernel module, this component is named "nv-kernel.o_binary". +For the nvidia-modeset.ko kernel module, this component is named +"nv-modeset-kernel.o_binary". Neither nvidia-drm.ko nor nvidia-uvm.ko +have OS-agnostic components. + +The kernel interface layer component for each kernel module must be built +for the target kernel. + + +## Directory Structure Layout + +- `kernel-open/` The kernel interface layer +- `kernel-open/nvidia/` The kernel interface layer for nvidia.ko +- `kernel-open/nvidia-drm/` The kernel interface layer for nvidia-drm.ko +- `kernel-open/nvidia-modeset/` The kernel interface layer for nvidia-modeset.ko +- `kernel-open/nvidia-uvm/` The kernel interface layer for nvidia-uvm.ko + +- `src/` The OS-agnostic code +- `src/nvidia/` The OS-agnostic code for nvidia.ko +- `src/nvidia-modeset/` The OS-agnostic code for nvidia-modeset.ko +- `src/common/` Utility code used by one or more of nvidia.ko and nvidia-modeset.ko +- `nouveau/` Tools for integration with the Nouveau device driver + + +## Nouveau device driver integration + +The Python script in the 'nouveau' directory is used to extract some of the +firmware binary images (and related data) encoded in the source code and +store them as distinct files. These files are used by the Nouveau device +driver to load and communicate with the GSP firmware. + +The layout of the binary files is described in nouveau_firmware_layout.ods, +which is an OpenDocument Spreadsheet file, compatible with most spreadsheet +software applications. + + +## Compatible GPUs + +The NVIDIA open kernel modules can be used on any Turing or later GPU (see the +table below). + +For details on feature support and limitations, see the NVIDIA GPU driver +end user README here: + +https://us.download.nvidia.com/XFree86/Linux-x86_64/580.00/README/kernel_open.html + +For vGPU support, please refer to the README.vgpu packaged in the vGPU Host +Package for more details. + +In the below table, if three IDs are listed, the first is the PCI Device +ID, the second is the PCI Subsystem Vendor ID, and the third is the PCI +Subsystem Device ID. + +| Product Name | PCI ID | +| ------------------------------------------------------- | -------------- | +| NVIDIA TITAN RTX | 1E02 | +| NVIDIA GeForce RTX 2080 Ti | 1E04 | +| NVIDIA GeForce RTX 2080 Ti | 1E07 | +| NVIDIA CMP 50HX | 1E09 | +| Quadro RTX 6000 | 1E30 | +| Quadro RTX 8000 | 1E30 1028 129E | +| Quadro RTX 8000 | 1E30 103C 129E | +| Quadro RTX 8000 | 1E30 10DE 129E | +| Quadro RTX 6000 | 1E36 | +| Quadro RTX 8000 | 1E78 10DE 13D8 | +| Quadro RTX 6000 | 1E78 10DE 13D9 | +| NVIDIA GeForce RTX 2080 SUPER | 1E81 | +| NVIDIA GeForce RTX 2080 | 1E82 | +| NVIDIA GeForce RTX 2070 SUPER | 1E84 | +| NVIDIA GeForce RTX 2080 | 1E87 | +| NVIDIA GeForce RTX 2060 | 1E89 | +| NVIDIA GeForce RTX 2080 | 1E90 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1025 1375 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1028 08A1 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1028 08A2 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1028 08EA | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1028 08EB | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1028 08EC | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1028 08ED | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1028 08EE | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1028 08EF | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1028 093B | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1028 093C | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 103C 8572 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 103C 8573 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 103C 8602 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 103C 8606 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 103C 86C6 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 103C 86C7 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 103C 87A6 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 103C 87A7 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1043 131F | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1043 137F | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1043 141F | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1043 1751 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1458 1660 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1458 1661 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1458 1662 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1458 75A6 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1458 75A7 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1458 86A6 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1458 86A7 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1462 1274 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1462 1277 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 152D 1220 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1558 95E1 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1558 97E1 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1A58 2002 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1A58 2005 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1A58 2007 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1A58 3000 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1A58 3001 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1E90 1D05 1069 | +| NVIDIA GeForce RTX 2070 Super | 1E91 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 103C 8607 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 103C 8736 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 103C 8738 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 103C 8772 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 103C 878A | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 103C 878B | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 1043 1E61 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 1458 1511 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 1458 75B3 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 1458 75B4 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 1458 76B2 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 1458 76B3 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 1458 78A2 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 1458 78A3 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 1458 86B2 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 1458 86B3 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 1462 12AE | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 1462 12B0 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 1462 12C6 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 17AA 22C3 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 17AA 22C5 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 1A58 2009 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 1A58 200A | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 1A58 3002 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1E91 8086 3012 | +| NVIDIA GeForce RTX 2080 Super | 1E93 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 1025 1401 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 1025 149C | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 1028 09D2 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 103C 8607 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 103C 86C7 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 103C 8736 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 103C 8738 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 103C 8772 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 103C 87A6 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 103C 87A7 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 1458 75B1 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 1458 75B2 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 1458 76B0 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 1458 76B1 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 1458 78A0 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 1458 78A1 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 1458 86B0 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 1458 86B1 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 1462 12AE | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 1462 12B0 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 1462 12B4 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 1462 12C6 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 1558 50D3 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 1558 70D1 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 17AA 22C3 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 17AA 22C5 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 1A58 2009 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 1A58 200A | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 1A58 3002 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1E93 1D05 1089 | +| Quadro RTX 5000 | 1EB0 | +| Quadro RTX 4000 | 1EB1 | +| Quadro RTX 5000 | 1EB5 | +| Quadro RTX 5000 with Max-Q Design | 1EB5 1025 1375 | +| Quadro RTX 5000 with Max-Q Design | 1EB5 1025 1401 | +| Quadro RTX 5000 with Max-Q Design | 1EB5 1025 149C | +| Quadro RTX 5000 with Max-Q Design | 1EB5 1028 09C3 | +| Quadro RTX 5000 with Max-Q Design | 1EB5 103C 8736 | +| Quadro RTX 5000 with Max-Q Design | 1EB5 103C 8738 | +| Quadro RTX 5000 with Max-Q Design | 1EB5 103C 8772 | +| Quadro RTX 5000 with Max-Q Design | 1EB5 103C 8780 | +| Quadro RTX 5000 with Max-Q Design | 1EB5 103C 8782 | +| Quadro RTX 5000 with Max-Q Design | 1EB5 103C 8783 | +| Quadro RTX 5000 with Max-Q Design | 1EB5 103C 8785 | +| Quadro RTX 5000 with Max-Q Design | 1EB5 1043 1DD1 | +| Quadro RTX 5000 with Max-Q Design | 1EB5 1462 1274 | +| Quadro RTX 5000 with Max-Q Design | 1EB5 1462 12B0 | +| Quadro RTX 5000 with Max-Q Design | 1EB5 1462 12C6 | +| Quadro RTX 5000 with Max-Q Design | 1EB5 17AA 22B8 | +| Quadro RTX 5000 with Max-Q Design | 1EB5 17AA 22BA | +| Quadro RTX 5000 with Max-Q Design | 1EB5 1A58 2005 | +| Quadro RTX 5000 with Max-Q Design | 1EB5 1A58 2007 | +| Quadro RTX 5000 with Max-Q Design | 1EB5 1A58 2008 | +| Quadro RTX 5000 with Max-Q Design | 1EB5 1A58 200A | +| Quadro RTX 4000 | 1EB6 | +| Quadro RTX 4000 with Max-Q Design | 1EB6 1028 09C3 | +| Quadro RTX 4000 with Max-Q Design | 1EB6 103C 8736 | +| Quadro RTX 4000 with Max-Q Design | 1EB6 103C 8738 | +| Quadro RTX 4000 with Max-Q Design | 1EB6 103C 8772 | +| Quadro RTX 4000 with Max-Q Design | 1EB6 103C 8780 | +| Quadro RTX 4000 with Max-Q Design | 1EB6 103C 8782 | +| Quadro RTX 4000 with Max-Q Design | 1EB6 103C 8783 | +| Quadro RTX 4000 with Max-Q Design | 1EB6 103C 8785 | +| Quadro RTX 4000 with Max-Q Design | 1EB6 1462 1274 | +| Quadro RTX 4000 with Max-Q Design | 1EB6 1462 1277 | +| Quadro RTX 4000 with Max-Q Design | 1EB6 1462 12B0 | +| Quadro RTX 4000 with Max-Q Design | 1EB6 1462 12C6 | +| Quadro RTX 4000 with Max-Q Design | 1EB6 17AA 22B8 | +| Quadro RTX 4000 with Max-Q Design | 1EB6 17AA 22BA | +| Tesla T4 | 1EB8 10DE 12A2 | +| NVIDIA GeForce RTX 2070 SUPER | 1EC2 | +| NVIDIA GeForce RTX 2070 SUPER | 1EC7 | +| NVIDIA GeForce RTX 2080 | 1ED0 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1ED0 1025 132D | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1ED0 1028 08ED | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1ED0 1028 08EE | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1ED0 1028 08EF | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1ED0 103C 8572 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1ED0 103C 8573 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1ED0 103C 8600 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1ED0 103C 8605 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1ED0 1043 138F | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1ED0 1043 15C1 | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1ED0 17AA 3FEE | +| NVIDIA GeForce RTX 2080 with Max-Q Design | 1ED0 17AA 3FFE | +| NVIDIA GeForce RTX 2070 Super | 1ED1 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1ED1 1025 1432 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1ED1 103C 8746 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1ED1 103C 878A | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1ED1 1043 165F | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1ED1 144D C192 | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1ED1 17AA 3FCE | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1ED1 17AA 3FCF | +| NVIDIA GeForce RTX 2070 Super with Max-Q Design | 1ED1 17AA 3FD0 | +| NVIDIA GeForce RTX 2080 Super | 1ED3 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1ED3 1025 1432 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1ED3 1028 09D1 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1ED3 103C 8746 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1ED3 103C 878A | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1ED3 1043 1D61 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1ED3 1043 1E51 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1ED3 1043 1F01 | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1ED3 17AA 3FCE | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1ED3 17AA 3FCF | +| NVIDIA GeForce RTX 2080 Super with Max-Q Design | 1ED3 17AA 3FD0 | +| Quadro RTX 5000 | 1EF5 | +| NVIDIA GeForce RTX 2070 | 1F02 | +| NVIDIA GeForce RTX 2060 | 1F03 | +| NVIDIA GeForce RTX 2060 SUPER | 1F06 | +| NVIDIA GeForce RTX 2070 | 1F07 | +| NVIDIA GeForce RTX 2060 | 1F08 | +| NVIDIA GeForce GTX 1650 | 1F0A | +| NVIDIA CMP 40HX | 1F0B | +| NVIDIA GeForce RTX 2070 | 1F10 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1025 132D | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1025 1342 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1028 08A1 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1028 08A2 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1028 08EA | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1028 08EB | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1028 08EC | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1028 08ED | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1028 08EE | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1028 08EF | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1028 093B | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1028 093C | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 103C 8572 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 103C 8573 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 103C 8602 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 103C 8606 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1043 132F | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1043 136F | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1043 1881 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1043 1E6E | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1458 1658 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1458 1663 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1458 1664 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1458 75A4 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1458 75A5 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1458 86A4 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1458 86A5 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1462 1274 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1462 1277 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1558 95E1 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1558 97E1 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1A58 2002 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1A58 2005 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1A58 2007 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1A58 3000 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1A58 3001 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1D05 105E | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1D05 1070 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 1D05 2087 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F10 8086 2087 | +| NVIDIA GeForce RTX 2060 | 1F11 | +| NVIDIA GeForce RTX 2060 | 1F12 | +| NVIDIA GeForce RTX 2060 with Max-Q Design | 1F12 1028 098F | +| NVIDIA GeForce RTX 2060 with Max-Q Design | 1F12 103C 8741 | +| NVIDIA GeForce RTX 2060 with Max-Q Design | 1F12 103C 8744 | +| NVIDIA GeForce RTX 2060 with Max-Q Design | 1F12 103C 878E | +| NVIDIA GeForce RTX 2060 with Max-Q Design | 1F12 103C 880E | +| NVIDIA GeForce RTX 2060 with Max-Q Design | 1F12 1043 1E11 | +| NVIDIA GeForce RTX 2060 with Max-Q Design | 1F12 1043 1F11 | +| NVIDIA GeForce RTX 2060 with Max-Q Design | 1F12 1462 12D9 | +| NVIDIA GeForce RTX 2060 with Max-Q Design | 1F12 17AA 3801 | +| NVIDIA GeForce RTX 2060 with Max-Q Design | 1F12 17AA 3802 | +| NVIDIA GeForce RTX 2060 with Max-Q Design | 1F12 17AA 3803 | +| NVIDIA GeForce RTX 2070 | 1F14 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1025 1401 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1025 1432 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1025 1442 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1025 1446 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1025 147D | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1028 09E2 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1028 09F3 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 103C 8607 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 103C 86C6 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 103C 86C7 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 103C 8736 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 103C 8738 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 103C 8746 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 103C 8772 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 103C 878A | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 103C 878B | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 103C 87A6 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 103C 87A7 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1043 174F | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1458 1512 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1458 75B5 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1458 75B6 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1458 76B4 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1458 76B5 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1458 78A4 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1458 78A5 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1458 86B4 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1458 86B5 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1462 12AE | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1462 12B0 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1462 12C6 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1558 50D3 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1558 70D1 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1A58 200C | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1A58 2011 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F14 1A58 3002 | +| NVIDIA GeForce RTX 2060 | 1F15 | +| Quadro RTX 3000 | 1F36 | +| Quadro RTX 3000 with Max-Q Design | 1F36 1028 0990 | +| Quadro RTX 3000 with Max-Q Design | 1F36 103C 8736 | +| Quadro RTX 3000 with Max-Q Design | 1F36 103C 8738 | +| Quadro RTX 3000 with Max-Q Design | 1F36 103C 8772 | +| Quadro RTX 3000 with Max-Q Design | 1F36 1043 13CF | +| Quadro RTX 3000 with Max-Q Design | 1F36 1414 0032 | +| NVIDIA GeForce RTX 2060 SUPER | 1F42 | +| NVIDIA GeForce RTX 2060 SUPER | 1F47 | +| NVIDIA GeForce RTX 2070 | 1F50 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F50 1028 08ED | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F50 1028 08EE | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F50 1028 08EF | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F50 103C 8572 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F50 103C 8573 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F50 103C 8574 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F50 103C 8600 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F50 103C 8605 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F50 17AA 3FEE | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F50 17AA 3FFE | +| NVIDIA GeForce RTX 2060 | 1F51 | +| NVIDIA GeForce RTX 2070 | 1F54 | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F54 103C 878A | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F54 17AA 3FCE | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F54 17AA 3FCF | +| NVIDIA GeForce RTX 2070 with Max-Q Design | 1F54 17AA 3FD0 | +| NVIDIA GeForce RTX 2060 | 1F55 | +| Quadro RTX 3000 | 1F76 | +| Matrox D-Series D2450 | 1F76 102B 2800 | +| Matrox D-Series D2480 | 1F76 102B 2900 | +| NVIDIA GeForce GTX 1650 | 1F82 | +| NVIDIA GeForce GTX 1630 | 1F83 | +| NVIDIA GeForce GTX 1650 | 1F91 | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F91 103C 863E | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F91 103C 86E7 | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F91 103C 86E8 | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F91 1043 12CF | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F91 1043 156F | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F91 1414 0032 | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F91 144D C822 | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F91 1462 127E | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F91 1462 1281 | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F91 1462 1284 | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F91 1462 1285 | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F91 1462 129C | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F91 17AA 229F | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F91 17AA 3802 | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F91 17AA 3806 | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F91 17AA 3F1A | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F91 1A58 1001 | +| NVIDIA GeForce GTX 1650 Ti | 1F95 | +| NVIDIA GeForce GTX 1650 Ti with Max-Q Design | 1F95 1025 1479 | +| NVIDIA GeForce GTX 1650 Ti with Max-Q Design | 1F95 1025 147A | +| NVIDIA GeForce GTX 1650 Ti with Max-Q Design | 1F95 1025 147B | +| NVIDIA GeForce GTX 1650 Ti with Max-Q Design | 1F95 1025 147C | +| NVIDIA GeForce GTX 1650 Ti with Max-Q Design | 1F95 103C 86E7 | +| NVIDIA GeForce GTX 1650 Ti with Max-Q Design | 1F95 103C 86E8 | +| NVIDIA GeForce GTX 1650 Ti with Max-Q Design | 1F95 103C 8815 | +| NVIDIA GeForce GTX 1650 Ti with Max-Q Design | 1F95 1043 1DFF | +| NVIDIA GeForce GTX 1650 Ti with Max-Q Design | 1F95 1043 1E1F | +| NVIDIA GeForce GTX 1650 Ti with Max-Q Design | 1F95 144D C838 | +| NVIDIA GeForce GTX 1650 Ti with Max-Q Design | 1F95 1462 12BD | +| NVIDIA GeForce GTX 1650 Ti with Max-Q Design | 1F95 1462 12C5 | +| NVIDIA GeForce GTX 1650 Ti with Max-Q Design | 1F95 1462 12D2 | +| NVIDIA GeForce GTX 1650 Ti with Max-Q Design | 1F95 17AA 22C0 | +| NVIDIA GeForce GTX 1650 Ti with Max-Q Design | 1F95 17AA 22C1 | +| NVIDIA GeForce GTX 1650 Ti with Max-Q Design | 1F95 17AA 3837 | +| NVIDIA GeForce GTX 1650 Ti with Max-Q Design | 1F95 17AA 3F95 | +| NVIDIA GeForce GTX 1650 Ti with Max-Q Design | 1F95 1A58 1003 | +| NVIDIA GeForce GTX 1650 Ti with Max-Q Design | 1F95 1A58 1006 | +| NVIDIA GeForce GTX 1650 Ti with Max-Q Design | 1F95 1A58 1007 | +| NVIDIA GeForce GTX 1650 Ti with Max-Q Design | 1F95 1E83 3E30 | +| NVIDIA GeForce GTX 1650 | 1F96 | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F96 1462 1297 | +| NVIDIA GeForce MX450 | 1F97 | +| NVIDIA GeForce MX450 | 1F98 | +| NVIDIA GeForce GTX 1650 | 1F99 | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F99 1025 1479 | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F99 1025 147A | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F99 1025 147B | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F99 1025 147C | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F99 103C 8815 | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F99 1043 13B2 | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F99 1043 1402 | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F99 1043 1902 | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F99 1462 12BD | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F99 1462 12C5 | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F99 1462 12D2 | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F99 17AA 22DA | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F99 17AA 3F93 | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F99 1E83 3E30 | +| NVIDIA GeForce MX450 | 1F9C | +| NVIDIA GeForce GTX 1650 | 1F9D | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F9D 1043 128D | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F9D 1043 130D | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F9D 1043 149C | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F9D 1043 185C | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F9D 1043 189C | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F9D 1462 12F4 | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F9D 1462 1302 | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F9D 1462 131B | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F9D 1462 1326 | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F9D 1462 132A | +| NVIDIA GeForce GTX 1650 with Max-Q Design | 1F9D 1462 132E | +| NVIDIA GeForce MX550 | 1F9F | +| NVIDIA GeForce MX550 | 1FA0 | +| NVIDIA T1000 | 1FB0 1028 12DB | +| NVIDIA T1000 | 1FB0 103C 12DB | +| NVIDIA T1000 | 1FB0 103C 8A80 | +| NVIDIA T1000 | 1FB0 10DE 12DB | +| NVIDIA DGX Display | 1FB0 10DE 1485 | +| NVIDIA T1000 | 1FB0 17AA 12DB | +| NVIDIA T600 | 1FB1 1028 1488 | +| NVIDIA T600 | 1FB1 103C 1488 | +| NVIDIA T600 | 1FB1 103C 8A80 | +| NVIDIA T600 | 1FB1 10DE 1488 | +| NVIDIA T600 | 1FB1 17AA 1488 | +| NVIDIA T400 | 1FB2 1028 1489 | +| NVIDIA T400 | 1FB2 103C 1489 | +| NVIDIA T400 | 1FB2 103C 8A80 | +| NVIDIA T400 | 1FB2 10DE 1489 | +| NVIDIA T400 | 1FB2 17AA 1489 | +| NVIDIA T600 Laptop GPU | 1FB6 | +| NVIDIA T550 Laptop GPU | 1FB7 | +| Quadro T2000 | 1FB8 | +| Quadro T2000 with Max-Q Design | 1FB8 1028 097E | +| Quadro T2000 with Max-Q Design | 1FB8 103C 8736 | +| Quadro T2000 with Max-Q Design | 1FB8 103C 8738 | +| Quadro T2000 with Max-Q Design | 1FB8 103C 8772 | +| Quadro T2000 with Max-Q Design | 1FB8 103C 8780 | +| Quadro T2000 with Max-Q Design | 1FB8 103C 8782 | +| Quadro T2000 with Max-Q Design | 1FB8 103C 8783 | +| Quadro T2000 with Max-Q Design | 1FB8 103C 8785 | +| Quadro T2000 with Max-Q Design | 1FB8 103C 87F0 | +| Quadro T2000 with Max-Q Design | 1FB8 1462 1281 | +| Quadro T2000 with Max-Q Design | 1FB8 1462 12BD | +| Quadro T2000 with Max-Q Design | 1FB8 17AA 22C0 | +| Quadro T2000 with Max-Q Design | 1FB8 17AA 22C1 | +| Quadro T1000 | 1FB9 | +| Quadro T1000 with Max-Q Design | 1FB9 1025 1479 | +| Quadro T1000 with Max-Q Design | 1FB9 1025 147A | +| Quadro T1000 with Max-Q Design | 1FB9 1025 147B | +| Quadro T1000 with Max-Q Design | 1FB9 1025 147C | +| Quadro T1000 with Max-Q Design | 1FB9 103C 8736 | +| Quadro T1000 with Max-Q Design | 1FB9 103C 8738 | +| Quadro T1000 with Max-Q Design | 1FB9 103C 8772 | +| Quadro T1000 with Max-Q Design | 1FB9 103C 8780 | +| Quadro T1000 with Max-Q Design | 1FB9 103C 8782 | +| Quadro T1000 with Max-Q Design | 1FB9 103C 8783 | +| Quadro T1000 with Max-Q Design | 1FB9 103C 8785 | +| Quadro T1000 with Max-Q Design | 1FB9 103C 87F0 | +| Quadro T1000 with Max-Q Design | 1FB9 1462 12BD | +| Quadro T1000 with Max-Q Design | 1FB9 17AA 22C0 | +| Quadro T1000 with Max-Q Design | 1FB9 17AA 22C1 | +| NVIDIA T600 Laptop GPU | 1FBA | +| NVIDIA T500 | 1FBB | +| NVIDIA T1200 Laptop GPU | 1FBC | +| NVIDIA GeForce GTX 1650 | 1FDD | +| NVIDIA T1000 8GB | 1FF0 1028 1612 | +| NVIDIA T1000 8GB | 1FF0 103C 1612 | +| NVIDIA T1000 8GB | 1FF0 103C 8A80 | +| NVIDIA T1000 8GB | 1FF0 10DE 1612 | +| NVIDIA T1000 8GB | 1FF0 17AA 1612 | +| NVIDIA T400 4GB | 1FF2 1028 1613 | +| NVIDIA T400 4GB | 1FF2 103C 1613 | +| NVIDIA T400E | 1FF2 103C 18FF | +| NVIDIA T400 4GB | 1FF2 103C 8A80 | +| NVIDIA T400 4GB | 1FF2 10DE 1613 | +| NVIDIA T400E | 1FF2 10DE 18FF | +| NVIDIA T400 4GB | 1FF2 17AA 1613 | +| NVIDIA T400E | 1FF2 17AA 18FF | +| Quadro T1000 | 1FF9 | +| NVIDIA A100-SXM4-40GB | 20B0 | +| NVIDIA A100-PG509-200 | 20B0 10DE 1450 | +| NVIDIA A100-SXM4-80GB | 20B2 10DE 1463 | +| NVIDIA A100-SXM4-80GB | 20B2 10DE 147F | +| NVIDIA A100-SXM4-80GB | 20B2 10DE 1622 | +| NVIDIA A100-SXM4-80GB | 20B2 10DE 1623 | +| NVIDIA PG509-210 | 20B2 10DE 1625 | +| NVIDIA A100-SXM-64GB | 20B3 10DE 14A7 | +| NVIDIA A100-SXM-64GB | 20B3 10DE 14A8 | +| NVIDIA A100 80GB PCIe | 20B5 10DE 1533 | +| NVIDIA A100 80GB PCIe | 20B5 10DE 1642 | +| NVIDIA PG506-232 | 20B6 10DE 1492 | +| NVIDIA A30 | 20B7 10DE 1532 | +| NVIDIA A30 | 20B7 10DE 1804 | +| NVIDIA A30 | 20B7 10DE 1852 | +| NVIDIA A800-SXM4-40GB | 20BD 10DE 17F4 | +| NVIDIA A100-PCIE-40GB | 20F1 10DE 145F | +| NVIDIA A800-SXM4-80GB | 20F3 10DE 179B | +| NVIDIA A800-SXM4-80GB | 20F3 10DE 179C | +| NVIDIA A800-SXM4-80GB | 20F3 10DE 179D | +| NVIDIA A800-SXM4-80GB | 20F3 10DE 179E | +| NVIDIA A800-SXM4-80GB | 20F3 10DE 179F | +| NVIDIA A800-SXM4-80GB | 20F3 10DE 17A0 | +| NVIDIA A800-SXM4-80GB | 20F3 10DE 17A1 | +| NVIDIA A800-SXM4-80GB | 20F3 10DE 17A2 | +| NVIDIA A800 80GB PCIe | 20F5 10DE 1799 | +| NVIDIA A800 80GB PCIe LC | 20F5 10DE 179A | +| NVIDIA A800 40GB Active | 20F6 1028 180A | +| NVIDIA A800 40GB Active | 20F6 103C 180A | +| NVIDIA A800 40GB Active | 20F6 10DE 180A | +| NVIDIA A800 40GB Active | 20F6 17AA 180A | +| NVIDIA AX800 | 20FD 10DE 17F8 | +| NVIDIA GeForce GTX 1660 Ti | 2182 | +| NVIDIA GeForce GTX 1660 | 2184 | +| NVIDIA GeForce GTX 1650 SUPER | 2187 | +| NVIDIA GeForce GTX 1650 | 2188 | +| NVIDIA CMP 30HX | 2189 | +| NVIDIA GeForce GTX 1660 Ti | 2191 | +| NVIDIA GeForce GTX 1660 Ti with Max-Q Design | 2191 1028 0949 | +| NVIDIA GeForce GTX 1660 Ti with Max-Q Design | 2191 103C 85FB | +| NVIDIA GeForce GTX 1660 Ti with Max-Q Design | 2191 103C 85FE | +| NVIDIA GeForce GTX 1660 Ti with Max-Q Design | 2191 103C 86D6 | +| NVIDIA GeForce GTX 1660 Ti with Max-Q Design | 2191 103C 8741 | +| NVIDIA GeForce GTX 1660 Ti with Max-Q Design | 2191 103C 8744 | +| NVIDIA GeForce GTX 1660 Ti with Max-Q Design | 2191 103C 878D | +| NVIDIA GeForce GTX 1660 Ti with Max-Q Design | 2191 103C 87AF | +| NVIDIA GeForce GTX 1660 Ti with Max-Q Design | 2191 103C 87B3 | +| NVIDIA GeForce GTX 1660 Ti with Max-Q Design | 2191 1043 171F | +| NVIDIA GeForce GTX 1660 Ti with Max-Q Design | 2191 1043 17EF | +| NVIDIA GeForce GTX 1660 Ti with Max-Q Design | 2191 1043 18D1 | +| NVIDIA GeForce GTX 1660 Ti with Max-Q Design | 2191 1414 0032 | +| NVIDIA GeForce GTX 1660 Ti with Max-Q Design | 2191 1462 128A | +| NVIDIA GeForce GTX 1660 Ti with Max-Q Design | 2191 1462 128B | +| NVIDIA GeForce GTX 1660 Ti with Max-Q Design | 2191 1462 12C6 | +| NVIDIA GeForce GTX 1660 Ti with Max-Q Design | 2191 1462 12CB | +| NVIDIA GeForce GTX 1660 Ti with Max-Q Design | 2191 1462 12CC | +| NVIDIA GeForce GTX 1660 Ti with Max-Q Design | 2191 1462 12D9 | +| NVIDIA GeForce GTX 1660 Ti with Max-Q Design | 2191 17AA 380C | +| NVIDIA GeForce GTX 1660 Ti with Max-Q Design | 2191 17AA 381D | +| NVIDIA GeForce GTX 1660 Ti with Max-Q Design | 2191 17AA 381E | +| NVIDIA GeForce GTX 1650 Ti | 2192 | +| NVIDIA GeForce GTX 1660 SUPER | 21C4 | +| NVIDIA GeForce GTX 1660 Ti | 21D1 | +| NVIDIA GeForce RTX 3090 Ti | 2203 | +| NVIDIA GeForce RTX 3090 | 2204 | +| NVIDIA GeForce RTX 3080 | 2206 | +| NVIDIA GeForce RTX 3070 Ti | 2207 | +| NVIDIA GeForce RTX 3080 Ti | 2208 | +| NVIDIA GeForce RTX 3080 | 220A | +| NVIDIA CMP 90HX | 220D | +| NVIDIA GeForce RTX 3080 | 2216 | +| NVIDIA RTX A6000 | 2230 1028 1459 | +| NVIDIA RTX A6000 | 2230 103C 1459 | +| NVIDIA RTX A6000 | 2230 10DE 1459 | +| NVIDIA RTX A6000 | 2230 17AA 1459 | +| NVIDIA RTX A5000 | 2231 1028 147E | +| NVIDIA RTX A5000 | 2231 103C 147E | +| NVIDIA RTX A5000 | 2231 10DE 147E | +| NVIDIA RTX A5000 | 2231 17AA 147E | +| NVIDIA RTX A4500 | 2232 1028 163C | +| NVIDIA RTX A4500 | 2232 103C 163C | +| NVIDIA RTX A4500 | 2232 10DE 163C | +| NVIDIA RTX A4500 | 2232 17AA 163C | +| NVIDIA RTX A5500 | 2233 1028 165A | +| NVIDIA RTX A5500 | 2233 103C 165A | +| NVIDIA RTX A5500 | 2233 10DE 165A | +| NVIDIA RTX A5500 | 2233 17AA 165A | +| NVIDIA A40 | 2235 10DE 145A | +| NVIDIA A10 | 2236 10DE 1482 | +| NVIDIA A10G | 2237 10DE 152F | +| NVIDIA A10M | 2238 10DE 1677 | +| NVIDIA H100 NVL | 2321 10DE 1839 | +| NVIDIA H800 PCIe | 2322 10DE 17A4 | +| NVIDIA H800 | 2324 10DE 17A6 | +| NVIDIA H800 | 2324 10DE 17A8 | +| NVIDIA H20 | 2329 10DE 198B | +| NVIDIA H20 | 2329 10DE 198C | +| NVIDIA H20-3e | 232C 10DE 2063 | +| NVIDIA H100 80GB HBM3 | 2330 10DE 16C0 | +| NVIDIA H100 80GB HBM3 | 2330 10DE 16C1 | +| NVIDIA H100 PCIe | 2331 10DE 1626 | +| NVIDIA H200 | 2335 10DE 18BE | +| NVIDIA H200 | 2335 10DE 18BF | +| NVIDIA H100 | 2339 10DE 17FC | +| NVIDIA H800 NVL | 233A 10DE 183A | +| NVIDIA H200 NVL | 233B 10DE 1996 | +| NVIDIA GH200 120GB | 2342 10DE 16EB | +| NVIDIA GH200 120GB | 2342 10DE 1805 | +| NVIDIA GH200 480GB | 2342 10DE 1809 | +| NVIDIA GH200 144G HBM3e | 2348 10DE 18D2 | +| NVIDIA GeForce RTX 3060 Ti | 2414 | +| NVIDIA GeForce RTX 3080 Ti Laptop GPU | 2420 | +| NVIDIA RTX A5500 Laptop GPU | 2438 | +| NVIDIA GeForce RTX 3080 Ti Laptop GPU | 2460 | +| NVIDIA GeForce RTX 3070 Ti | 2482 | +| NVIDIA GeForce RTX 3070 | 2484 | +| NVIDIA GeForce RTX 3060 Ti | 2486 | +| NVIDIA GeForce RTX 3060 | 2487 | +| NVIDIA GeForce RTX 3070 | 2488 | +| NVIDIA GeForce RTX 3060 Ti | 2489 | +| NVIDIA CMP 70HX | 248A | +| NVIDIA GeForce RTX 3080 Laptop GPU | 249C | +| NVIDIA GeForce RTX 3060 Laptop GPU | 249C 1D05 1194 | +| NVIDIA GeForce RTX 3070 Laptop GPU | 249D | +| NVIDIA GeForce RTX 3070 Ti Laptop GPU | 24A0 | +| NVIDIA GeForce RTX 3060 Laptop GPU | 24A0 1D05 1192 | +| NVIDIA RTX A4000 | 24B0 1028 14AD | +| NVIDIA RTX A4000 | 24B0 103C 14AD | +| NVIDIA RTX A4000 | 24B0 10DE 14AD | +| NVIDIA RTX A4000 | 24B0 17AA 14AD | +| NVIDIA RTX A4000H | 24B1 10DE 1658 | +| NVIDIA RTX A5000 Laptop GPU | 24B6 | +| NVIDIA RTX A4000 Laptop GPU | 24B7 | +| NVIDIA RTX A3000 Laptop GPU | 24B8 | +| NVIDIA RTX A3000 12GB Laptop GPU | 24B9 | +| NVIDIA RTX A4500 Laptop GPU | 24BA | +| NVIDIA RTX A3000 12GB Laptop GPU | 24BB | +| NVIDIA GeForce RTX 3060 | 24C7 | +| NVIDIA GeForce RTX 3060 Ti | 24C9 | +| NVIDIA GeForce RTX 3080 Laptop GPU | 24DC | +| NVIDIA GeForce RTX 3070 Laptop GPU | 24DD | +| NVIDIA GeForce RTX 3070 Ti Laptop GPU | 24E0 | +| NVIDIA RTX A4500 Embedded GPU | 24FA | +| NVIDIA GeForce RTX 3060 | 2503 | +| NVIDIA GeForce RTX 3060 | 2504 | +| NVIDIA GeForce RTX 3050 | 2507 | +| NVIDIA GeForce RTX 3050 OEM | 2508 | +| NVIDIA GeForce RTX 3060 Laptop GPU | 2520 | +| NVIDIA GeForce RTX 3060 Laptop GPU | 2521 | +| NVIDIA GeForce RTX 3050 Ti Laptop GPU | 2523 | +| NVIDIA RTX A2000 | 2531 1028 151D | +| NVIDIA RTX A2000 | 2531 103C 151D | +| NVIDIA RTX A2000 | 2531 10DE 151D | +| NVIDIA RTX A2000 | 2531 17AA 151D | +| NVIDIA GeForce RTX 3060 | 2544 | +| NVIDIA GeForce RTX 3060 Laptop GPU | 2560 | +| NVIDIA GeForce RTX 3050 Ti Laptop GPU | 2563 | +| NVIDIA RTX A2000 12GB | 2571 1028 1611 | +| NVIDIA RTX A2000 12GB | 2571 103C 1611 | +| NVIDIA RTX A2000 12GB | 2571 10DE 1611 | +| NVIDIA RTX A2000 12GB | 2571 17AA 1611 | +| NVIDIA GeForce RTX 3050 | 2582 | +| NVIDIA GeForce RTX 3050 | 2584 | +| NVIDIA GeForce RTX 3050 Ti Laptop GPU | 25A0 | +| NVIDIA GeForce RTX 3050Ti Laptop GPU | 25A0 103C 8928 | +| NVIDIA GeForce RTX 3050Ti Laptop GPU | 25A0 103C 89F9 | +| NVIDIA GeForce RTX 3060 Laptop GPU | 25A0 1D05 1196 | +| NVIDIA GeForce RTX 3050 Laptop GPU | 25A2 | +| NVIDIA GeForce RTX 3050 Ti Laptop GPU | 25A2 1028 0BAF | +| NVIDIA GeForce RTX 3060 Laptop GPU | 25A2 1D05 1195 | +| NVIDIA GeForce RTX 3050 Laptop GPU | 25A5 | +| NVIDIA GeForce MX570 | 25A6 | +| NVIDIA GeForce RTX 2050 | 25A7 | +| NVIDIA GeForce RTX 2050 | 25A9 | +| NVIDIA GeForce MX570 A | 25AA | +| NVIDIA GeForce RTX 3050 4GB Laptop GPU | 25AB | +| NVIDIA GeForce RTX 3050 6GB Laptop GPU | 25AC | +| NVIDIA GeForce RTX 2050 | 25AD | +| NVIDIA RTX A1000 | 25B0 1028 1878 | +| NVIDIA RTX A1000 | 25B0 103C 1878 | +| NVIDIA RTX A1000 | 25B0 103C 8D96 | +| NVIDIA RTX A1000 | 25B0 10DE 1878 | +| NVIDIA RTX A1000 | 25B0 17AA 1878 | +| NVIDIA RTX A400 | 25B2 1028 1879 | +| NVIDIA RTX A400 | 25B2 103C 1879 | +| NVIDIA RTX A400 | 25B2 103C 8D95 | +| NVIDIA RTX A400 | 25B2 10DE 1879 | +| NVIDIA RTX A400 | 25B2 17AA 1879 | +| NVIDIA A16 | 25B6 10DE 14A9 | +| NVIDIA A2 | 25B6 10DE 157E | +| NVIDIA RTX A2000 Laptop GPU | 25B8 | +| NVIDIA RTX A1000 Laptop GPU | 25B9 | +| NVIDIA RTX A2000 8GB Laptop GPU | 25BA | +| NVIDIA RTX A500 Laptop GPU | 25BB | +| NVIDIA RTX A1000 6GB Laptop GPU | 25BC | +| NVIDIA RTX A500 Laptop GPU | 25BD | +| NVIDIA GeForce RTX 3050 Ti Laptop GPU | 25E0 | +| NVIDIA GeForce RTX 3050 Laptop GPU | 25E2 | +| NVIDIA GeForce RTX 3050 Laptop GPU | 25E5 | +| NVIDIA GeForce RTX 3050 6GB Laptop GPU | 25EC | +| NVIDIA GeForce RTX 2050 | 25ED | +| NVIDIA RTX A1000 Embedded GPU | 25F9 | +| NVIDIA RTX A2000 Embedded GPU | 25FA | +| NVIDIA RTX A500 Embedded GPU | 25FB | +| NVIDIA GeForce RTX 4090 | 2684 | +| NVIDIA GeForce RTX 4090 D | 2685 | +| NVIDIA GeForce RTX 4070 Ti SUPER | 2689 | +| NVIDIA RTX 6000 Ada Generation | 26B1 1028 16A1 | +| NVIDIA RTX 6000 Ada Generation | 26B1 103C 16A1 | +| NVIDIA RTX 6000 Ada Generation | 26B1 10DE 16A1 | +| NVIDIA RTX 6000 Ada Generation | 26B1 17AA 16A1 | +| NVIDIA RTX 5000 Ada Generation | 26B2 1028 17FA | +| NVIDIA RTX 5000 Ada Generation | 26B2 103C 17FA | +| NVIDIA RTX 5000 Ada Generation | 26B2 10DE 17FA | +| NVIDIA RTX 5000 Ada Generation | 26B2 17AA 17FA | +| NVIDIA RTX 5880 Ada Generation | 26B3 1028 1934 | +| NVIDIA RTX 5880 Ada Generation | 26B3 103C 1934 | +| NVIDIA RTX 5880 Ada Generation | 26B3 10DE 1934 | +| NVIDIA RTX 5880 Ada Generation | 26B3 17AA 1934 | +| NVIDIA L40 | 26B5 10DE 169D | +| NVIDIA L40 | 26B5 10DE 17DA | +| NVIDIA L40S | 26B9 10DE 1851 | +| NVIDIA L40S | 26B9 10DE 18CF | +| NVIDIA L20 | 26BA 10DE 1957 | +| NVIDIA L20 | 26BA 10DE 1990 | +| NVIDIA GeForce RTX 4080 SUPER | 2702 | +| NVIDIA GeForce RTX 4080 | 2704 | +| NVIDIA GeForce RTX 4070 Ti SUPER | 2705 | +| NVIDIA GeForce RTX 4070 | 2709 | +| NVIDIA GeForce RTX 4090 Laptop GPU | 2717 | +| NVIDIA RTX 5000 Ada Generation Laptop GPU | 2730 | +| NVIDIA GeForce RTX 4090 Laptop GPU | 2757 | +| NVIDIA RTX 5000 Ada Generation Embedded GPU | 2770 | +| NVIDIA GeForce RTX 4070 Ti | 2782 | +| NVIDIA GeForce RTX 4070 SUPER | 2783 | +| NVIDIA GeForce RTX 4070 | 2786 | +| NVIDIA GeForce RTX 4060 Ti | 2788 | +| NVIDIA GeForce RTX 4080 Laptop GPU | 27A0 | +| NVIDIA RTX 4000 SFF Ada Generation | 27B0 1028 16FA | +| NVIDIA RTX 4000 SFF Ada Generation | 27B0 103C 16FA | +| NVIDIA RTX 4000 SFF Ada Generation | 27B0 10DE 16FA | +| NVIDIA RTX 4000 SFF Ada Generation | 27B0 17AA 16FA | +| NVIDIA RTX 4500 Ada Generation | 27B1 1028 180C | +| NVIDIA RTX 4500 Ada Generation | 27B1 103C 180C | +| NVIDIA RTX 4500 Ada Generation | 27B1 10DE 180C | +| NVIDIA RTX 4500 Ada Generation | 27B1 17AA 180C | +| NVIDIA RTX 4000 Ada Generation | 27B2 1028 181B | +| NVIDIA RTX 4000 Ada Generation | 27B2 103C 181B | +| NVIDIA RTX 4000 Ada Generation | 27B2 10DE 181B | +| NVIDIA RTX 4000 Ada Generation | 27B2 17AA 181B | +| NVIDIA L2 | 27B6 10DE 1933 | +| NVIDIA L4 | 27B8 10DE 16CA | +| NVIDIA L4 | 27B8 10DE 16EE | +| NVIDIA RTX 4000 Ada Generation Laptop GPU | 27BA | +| NVIDIA RTX 3500 Ada Generation Laptop GPU | 27BB | +| NVIDIA GeForce RTX 4080 Laptop GPU | 27E0 | +| NVIDIA RTX 3500 Ada Generation Embedded GPU | 27FB | +| NVIDIA GeForce RTX 4060 Ti | 2803 | +| NVIDIA GeForce RTX 4060 Ti | 2805 | +| NVIDIA GeForce RTX 4060 | 2808 | +| NVIDIA GeForce RTX 4070 Laptop GPU | 2820 | +| NVIDIA GeForce RTX 3050 A Laptop GPU | 2822 | +| NVIDIA RTX 3000 Ada Generation Laptop GPU | 2838 | +| NVIDIA GeForce RTX 4070 Laptop GPU | 2860 | +| NVIDIA GeForce RTX 4060 | 2882 | +| NVIDIA GeForce RTX 4060 Laptop GPU | 28A0 | +| NVIDIA GeForce RTX 4050 Laptop GPU | 28A1 | +| NVIDIA GeForce RTX 3050 A Laptop GPU | 28A3 | +| NVIDIA RTX 2000 Ada Generation | 28B0 1028 1870 | +| NVIDIA RTX 2000 Ada Generation | 28B0 103C 1870 | +| NVIDIA RTX 2000E Ada Generation | 28B0 103C 1871 | +| NVIDIA RTX 2000 Ada Generation | 28B0 10DE 1870 | +| NVIDIA RTX 2000E Ada Generation | 28B0 10DE 1871 | +| NVIDIA RTX 2000 Ada Generation | 28B0 17AA 1870 | +| NVIDIA RTX 2000E Ada Generation | 28B0 17AA 1871 | +| NVIDIA RTX 2000 Ada Generation Laptop GPU | 28B8 | +| NVIDIA RTX 1000 Ada Generation Laptop GPU | 28B9 | +| NVIDIA RTX 500 Ada Generation Laptop GPU | 28BA | +| NVIDIA RTX 500 Ada Generation Laptop GPU | 28BB | +| NVIDIA GeForce RTX 4060 Laptop GPU | 28E0 | +| NVIDIA GeForce RTX 4050 Laptop GPU | 28E1 | +| NVIDIA GeForce RTX 3050 A Laptop GPU | 28E3 | +| NVIDIA RTX 2000 Ada Generation Embedded GPU | 28F8 | +| NVIDIA B200 | 2901 10DE 1999 | +| NVIDIA B200 | 2901 10DE 199B | +| NVIDIA B200 | 2901 10DE 20DA | +| NVIDIA GB200 | 2941 10DE 2046 | +| NVIDIA GB200 | 2941 10DE 20CA | +| NVIDIA GB200 | 2941 10DE 20D5 | +| NVIDIA GB200 | 2941 10DE 21C9 | +| NVIDIA GB200 | 2941 10DE 21CA | +| NVIDIA GeForce RTX 5090 | 2B85 | +| NVIDIA GeForce RTX 5090 D | 2B87 | +| NVIDIA RTX PRO 6000 Blackwell Workstation Edition | 2BB1 1028 204B | +| NVIDIA RTX PRO 6000 Blackwell Workstation Edition | 2BB1 103C 204B | +| NVIDIA RTX PRO 6000 Blackwell Workstation Edition | 2BB1 10DE 204B | +| NVIDIA RTX PRO 6000 Blackwell Workstation Edition | 2BB1 17AA 204B | +| NVIDIA RTX PRO 5000 Blackwell | 2BB3 1028 204D | +| NVIDIA RTX PRO 5000 Blackwell | 2BB3 103C 204D | +| NVIDIA RTX PRO 5000 Blackwell | 2BB3 10DE 204D | +| NVIDIA RTX PRO 5000 Blackwell | 2BB3 17AA 204D | +| NVIDIA RTX PRO 6000 Blackwell Max-Q Workstation Edition | 2BB4 1028 204C | +| NVIDIA RTX PRO 6000 Blackwell Max-Q Workstation Edition | 2BB4 103C 204C | +| NVIDIA RTX PRO 6000 Blackwell Max-Q Workstation Edition | 2BB4 10DE 204C | +| NVIDIA RTX PRO 6000 Blackwell Max-Q Workstation Edition | 2BB4 17AA 204C | +| NVIDIA RTX PRO 6000 Blackwell Server Edition | 2BB5 10DE 204E | +| NVIDIA GeForce RTX 5080 | 2C02 | +| NVIDIA GeForce RTX 5070 Ti | 2C05 | +| NVIDIA GeForce RTX 5090 Laptop GPU | 2C18 | +| NVIDIA GeForce RTX 5080 Laptop GPU | 2C19 | +| NVIDIA RTX PRO 5000 Blackwell Generation Laptop GPU | 2C38 | +| NVIDIA RTX PRO 4000 Blackwell Generation Laptop GPU | 2C39 | +| NVIDIA GeForce RTX 5090 Laptop GPU | 2C58 | +| NVIDIA GeForce RTX 5080 Laptop GPU | 2C59 | +| NVIDIA GeForce RTX 5060 Ti | 2D04 | +| NVIDIA GeForce RTX 5060 | 2D05 | +| NVIDIA GeForce RTX 5070 Laptop GPU | 2D18 | +| NVIDIA GeForce RTX 5060 Laptop GPU | 2D19 | +| NVIDIA RTX PRO 2000 Blackwell Generation Laptop GPU | 2D39 | +| NVIDIA GeForce RTX 5070 Laptop GPU | 2D58 | +| NVIDIA GeForce RTX 5060 Laptop GPU | 2D59 | +| NVIDIA RTX PRO 1000 Blackwell Generation Laptop GPU | 2DB8 | +| NVIDIA RTX PRO 500 Blackwell Generation Laptop GPU | 2DB9 | +| NVIDIA GeForce RTX 5070 | 2F04 | +| NVIDIA GeForce RTX 5070 Ti Laptop GPU | 2F18 | +| NVIDIA RTX PRO 3000 Blackwell Generation Laptop GPU | 2F38 | +| NVIDIA GeForce RTX 5070 Ti Laptop GPU | 2F58 | diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..16ecec5 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,16 @@ +# Report a Security Vulnerability + +To report a potential security vulnerability in any NVIDIA product, please use either: +* This web form: [Security Vulnerability Submission Form](https://www.nvidia.com/object/submit-security-vulnerability.html), or +* Send email to: [NVIDIA PSIRT](mailto:psirt@nvidia.com) + +**OEM Partners should contact their NVIDIA Customer Program Manager** + +If reporting a potential vulnerability via email, please encrypt it using NVIDIA’s public PGP key ([see PGP Key page](https://www.nvidia.com/en-us/security/pgp-key/)) and include the following information: +* Product/Driver name and version/branch that contains the vulnerability +* Type of vulnerability (code execution, denial of service, buffer overflow, etc.) +* Instructions to reproduce the vulnerability +* Proof-of-concept or exploit code +* Potential impact of the vulnerability, including how an attacker could exploit the vulnerability + +See https://www.nvidia.com/en-us/security/ for past NVIDIA Security Bulletins and Notices. diff --git a/commitFile.txt b/commitFile.txt new file mode 100644 index 0000000..f47ec0c --- /dev/null +++ b/commitFile.txt @@ -0,0 +1,1510 @@ +Updating prebuilts and/or headers + +d13779dbbab1c776db15f462cd46b29f2c0f8c7c - Makefile +7d577fdb9594ae572ff38fdda682a4796ab832ca - COPYING +5728867ce2e96b63b29367be6aa1c0e47bcafc8f - SECURITY.md +6b73bf6a534ddc0f64e8ba88739381c3b7fb4b5c - nv-compiler.sh +05e911b99b109a721d2045f025b21189e2718e60 - README.md +ec5f1eb408e0b650158e0310fb1ddd8e9b323a6f - CONTRIBUTING.md +af3ee56442f16029cb9b13537477c384226b22fc - CODE_OF_CONDUCT.md +07bd07999f296d935386a8edf719d0e296f63227 - kernel-open/Kbuild +45b68e3eacda04dcadce48a8238574302a71a3ca - kernel-open/Makefile +99f4563141af1278f13cb23a6e6c24d21d583d7b - kernel-open/conftest.sh +0b1508742a1c5a04b6c3a4be1b48b506f4180848 - kernel-open/dkms.conf +19a5da412ce1557b721b8550a4a80196f6162ba6 - kernel-open/common/inc/os_dsi_panel_props.h +4750735d6f3b334499c81d499a06a654a052713d - kernel-open/common/inc/nv-caps.h +92de3baafe321dd0dcf8665aae4614d5ac670718 - kernel-open/common/inc/rs_access.h +60ef64c0f15526ae2d786e5cec07f28570f0663b - kernel-open/common/inc/conftest.h +880e45b68b19fdb91ac94991f0e6d7fc3b406b1f - kernel-open/common/inc/nv-pci-types.h +6d2f660ef0942edf664874f260266ec81cd0ff08 - kernel-open/common/inc/nvtypes.h +c45b2faf17ca2a205c56daa11e3cb9d864be2238 - kernel-open/common/inc/nv-modeset-interface.h +5bc7a748c7d3dfa6559ca4f9fe6199e17098ec8f - kernel-open/common/inc/nv-lock.h +b249abc0a7d0c9889008e98cb2f8515a9d310b85 - kernel-open/common/inc/nvgputypes.h +e4a4f57abb8769d204468b2f5000c81f5ea7c92f - kernel-open/common/inc/nv-procfs.h +8b19b93e958aca626899f035334a4c96f8776eb6 - kernel-open/common/inc/nv.h +ede1f77acb43e28391bceac058e00a7a8d799b0d - kernel-open/common/inc/nvmisc.h +ae374d3e438f8d3b60df8c4602618c58564b73f9 - kernel-open/common/inc/rm-gpu-ops.h +3f7b20e27e6576ee1f2f0557d269697a0b8af7ec - kernel-open/common/inc/nv-firmware-registry.h +5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - kernel-open/common/inc/dce_rm_client_ipc.h +3e8075872e2efa843b74b884ef5098468edc4f18 - kernel-open/common/inc/nvimpshared.h +befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - kernel-open/common/inc/nv_stdarg.h +0e70d16576584082ee4c7f3ff9944f3bd107b1c1 - kernel-open/common/inc/cpuopsys.h +d7ab0ee225361daacd280ff98848851933a10a98 - kernel-open/common/inc/nv-list-helpers.h +b02c378ac0521c380fc2403f0520949f785b1db6 - kernel-open/common/inc/nv-dmabuf.h +a3d1e51c0f4217f1dc4cb0c48aa0eafd054d4e5e - kernel-open/common/inc/nv-procfs-utils.h +81592e5c17bebad04cd11d73672c859baa070329 - kernel-open/common/inc/nv-chardev-numbers.h +61cf8f3fd32142dc402f6802b5d4c9af6c875c35 - kernel-open/common/inc/nv-firmware.h +d5253e7e4abd3ad8d72375260aa80037adcd8973 - kernel-open/common/inc/nv_dpy_id.h +61a9589e4a8ec122e5a6c2258658d493ee747897 - kernel-open/common/inc/nv-platform.h +b986bc6591ba17a74ad81ec4c93347564c6d5165 - kernel-open/common/inc/nvkms-format.h +4f487eccd762f3ca645a685d5c333ff569e7987c - kernel-open/common/inc/nv-kthread-q-os.h +4015c4557ea0790a2bdf5695832c89e31d75aee9 - kernel-open/common/inc/nvlimits.h +143051f69a53db0e7c5d2f846a9c14d666e264b4 - kernel-open/common/inc/nv-kref.h +56f432032bef4683c2801f46bec5065923475fb1 - kernel-open/common/inc/nv-kthread-q.h +b4c5d759f035b540648117b1bff6b1701476a398 - kernel-open/common/inc/nvCpuUuid.h +67a9707c568e167bae4404c7785ed614babb7b82 - kernel-open/common/inc/nv-linux.h +7c7888550b12eeb98128ea9ac771b897327f538e - kernel-open/common/inc/nv-hypervisor.h +f9cb3701681994ff6f32833892d900b0da2b89f6 - kernel-open/common/inc/nv-pgprot.h +b8700a911ac85770bf25d70b9692308af63966bd - kernel-open/common/inc/nvstatuscodes.h +3a5f4f105672921b857fec7f2b577d9d525afe37 - kernel-open/common/inc/nv-timer.h +5cd0b3f9c7f544e9064efc9b5ba4f297e5494315 - kernel-open/common/inc/nv-time.h +7a78f354e0b68f03d6ab566d5b755e299456f361 - kernel-open/common/inc/os_gpio.h +154abd192eb950fecffcca470ee80b27f224fd79 - kernel-open/common/inc/nv-proto.h +2eb11e523a3ecba2dcd68f3146e1e666a44256ae - kernel-open/common/inc/nv-ioctl.h +1328058925b64e97588d670fe70466b31af7c7c1 - kernel-open/common/inc/nv-mm.h +25d89847c11449b329941a26f04aec955cfaf150 - kernel-open/common/inc/nv-pci.h +95bf694a98ba78d5a19e66463b8adda631e6ce4c - kernel-open/common/inc/nvstatus.h +d74a8d4a9ae3d36e92b39bc7c74b27df44626b1c - kernel-open/common/inc/nv_mig_types.h +b3258444b6a2c2399f5f00c7cac5b470c41caeaa - kernel-open/common/inc/nv-hash.h +4c856c1324060dcb5a9e72e5e82c7a60f6324733 - kernel-open/common/inc/nvkms-kapi.h +44cb5bc2bc87a5c3447bcb61f2ce5aef08c07fa7 - kernel-open/common/inc/nv_uvm_interface.h +1e7eec6561b04d2d21c3515987aaa116e9401c1f - kernel-open/common/inc/nv-kernel-interface-api.h +c54c62de441828282db9a4f5b35c2fa5c97d94f1 - kernel-open/common/inc/nvkms-api-types.h +ade7410c1c0572dbed49b4b0d97b87245ca59115 - kernel-open/common/inc/os-interface.h +2ffd0138e1b3425ade16b962c3ff02a82cde2e64 - kernel-open/common/inc/nv-ioctl-numa.h +995d8447f8539bd736cc09d62983ae8ebc7e3436 - kernel-open/common/inc/nv_common_utils.h +c75bfc368c6ce3fc2c1a0c5062834e90d822b365 - kernel-open/common/inc/nv-memdbg.h +dfd7b82a7f2939d4c1869840059705c6b71bffe3 - kernel-open/common/inc/nv-msi.h +3b12d770f8592b94a8c7774c372e80ad08c5774c - kernel-open/common/inc/nvi2c.h +894ef9e230604572bbceabdfd5f241059d54aa10 - kernel-open/common/inc/nv_speculation_barrier.h +107d1ecb8a128044260915ea259b1e64de3defea - kernel-open/common/inc/nv-ioctl-numbers.h +19cfcbf5a3021aa9aaa0ceacbb6711e7f7a6e09e - kernel-open/common/inc/nv_uvm_user_types.h +cfcd2ef5eaec92f8e4647fff02a3b7e16473cbff - kernel-open/common/inc/nv_uvm_types.h +b642fb649ce2ba17f37c8aa73f61b38f99a74986 - kernel-open/common/inc/nv-retpoline.h +3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - kernel-open/common/inc/nv-gpu-info.h +cda75171ca7d8bf920aab6d56ef9aadec16fd15d - kernel-open/common/inc/os/nv_memory_type.h +70b67003fda6bdb8a01fa1e41c3b0e25136a856c - kernel-open/common/inc/os/nv_memory_area.h +11b09260232a88aa1f73f109fdfab491a7b73576 - kernel-open/nvidia/nv-nano-timer.c +dcf4427b83cce7737f2b784d410291bf7a9612dc - kernel-open/nvidia/nv-reg.h +0b8ff957fb14f20ba86f61e556d1ab15bf5acd74 - kernel-open/nvidia/nv-imp.c +6b09b5ef8a37f78c8e82074b06b40ef593c81807 - kernel-open/nvidia/libspdm_rsa.c +b8d361216db85fe897cbced2a9600507b7708c61 - kernel-open/nvidia/libspdm_hkdf_sha.c +66e2bfc490fb77e0b72a8192b719d3dc74d25d59 - kernel-open/nvidia/nv-pat.c +26a30f2d26c2a97a6e2ee457d97d32f48b0bf25b - kernel-open/nvidia/nv-vm.c +b8a770cea0629c57d8b0b3d7414d7b0f043ee8cf - kernel-open/nvidia/libspdm_ecc.c +4c183eb39251cd78d90868ec6f75ebc7a37e6644 - kernel-open/nvidia/os-usermap.c +8c30b6230439edcbec62636cc93be512bca8637f - kernel-open/nvidia/nv-usermap.c +7af675f85642229b7e7de05dcadd622550fe7ad7 - kernel-open/nvidia/nv-vtophys.c +d11ab03a617b29efcf00f85e24ebce60f91cf82c - kernel-open/nvidia/nv-backlight.c +ef8fd76c55625aeaa71c9b789c4cf519ef6116b2 - kernel-open/nvidia/libspdm_hkdf.c +1590794925ebd9cbc14aae8c47e0cc205a3f4b52 - kernel-open/nvidia/nv-rsync.h +934a686ba8d7b77cce2d928cb3b04f611d9f9187 - kernel-open/nvidia/libspdm_aead.c +f16e6a33b5004566333fb8b99504a0fb95d51226 - kernel-open/nvidia/nv-gpio.c +8ed2c3b93eeaa52342d944e794180fd5d386688a - kernel-open/nvidia/libspdm_rsa_ext.c +2e5d18118835c19c5ca7edee9bceeae613b9d7f9 - kernel-open/nvidia/nv-procfs.c +3e820e66f556be10c0d9728d4187e43c30658736 - kernel-open/nvidia/nv.c +65fe797fb5d4af2db67544ddb79d49ab1b7ca859 - kernel-open/nvidia/nv-dsi-parse-panel-props.c +e3efae4ed920545062a2d06064df8be1a2a42135 - kernel-open/nvidia/nv-caps-imex.h +8c64e75aaaa9ac6f17aae7ed62db23eb2e5b9953 - kernel-open/nvidia/nv_uvm_interface.c +4563589496a93a2720e25807ca1be2565f03554c - kernel-open/nvidia/nv-bpmp.c +aea97021d9aa023a357f009fcddc710f710ceb5e - kernel-open/nvidia/libspdm_x509.c +f29e5bc1c7bd2c670780cdbb7275900a69f4d205 - kernel-open/nvidia/internal_crypt_lib.h +13dc24fb41516c777328d4db64fa39a9e2c40191 - kernel-open/nvidia/nv-modeset-interface.c +6ae527b69eebb44224b05e8cb3546757532d8a16 - kernel-open/nvidia/nv-dma.c +fe204e3820d206b5b0c34a51084f39b97310305a - kernel-open/nvidia/nv-ipc-soc.c +60d6ff5becc0ddbcf4b489b9d88c1dec8ccc67be - kernel-open/nvidia/nv-platform-pm.c +c1f7c81018a414b7a657431b115a1b86d3ebe3e7 - kernel-open/nvidia/os-mlock.c +c762aa186dc72ed0b9183492f9bd187c301d33d3 - kernel-open/nvidia/nv-kthread-q.c +70bece14e12b9ffc92816ee8159a4ce596579d78 - kernel-open/nvidia/os-pci.c +a677049bb56fa5ebe22fe43b0c4a12acd58a6677 - kernel-open/nvidia/nv-p2p.c +e4d12f027cb5f74124da71bbbc23bcb33651834a - kernel-open/nvidia/nv-pci-table.c +415b8f457c01417f32c998ae310b5a42dd5805cb - kernel-open/nvidia/nv-pci.c +6dfc57ac42bed97c6ff81d82e493f05b369e0b84 - kernel-open/nvidia/nvspdm_cryptlib_extensions.h +bba706cfbc04b3a880b5e661066f92e765fad663 - kernel-open/nvidia/nv-caps-imex.c +ed3c83f62e4ccc4b53d886eedd4b47518a361393 - kernel-open/nvidia/nv-dmabuf.c +66b7fad4d73a23153298ce777afb14d2c8be42c1 - kernel-open/nvidia/libspdm_internal_crypt_lib.c +6d4fbea733fdcd92fc6a8a5884e8bb359f9e8abd - kernel-open/nvidia/rmp2pdefines.h +b71bf4426322ab59e78e2a1500509a5f4b2b71ab - kernel-open/nvidia/nv-pat.h +9a5a58bd6eb71a4c32e334a1a4e3326a17143cce - kernel-open/nvidia/os-interface.c +1a91f5e6d517856303da448bea80d167b238e41c - kernel-open/nvidia/nv-i2c.c +7d409e3f0255d17457bffbf318e2f9ea160680a5 - kernel-open/nvidia/nv-pci-table.h +c50865d3070a0c3476ce24ff1ab4cc4e3f9ea4be - kernel-open/nvidia/detect-self-hosted.h +7ae9a57b9e99fd2a3534798e52e57f7784738a53 - kernel-open/nvidia/nv-report-err.c +3b27e4eaa97bd6fa71f1a075b50af69b1ec16454 - kernel-open/nvidia/libspdm_ec.c +dd9e367cba9e0672c998ec6d570be38084a365ab - kernel-open/nvidia/libspdm_rand.c +d8b8077adb7fd70eb9528d421bdef98c4378b57a - kernel-open/nvidia/nv-msi.c +1cabb1e7fa825216c09f9d2f103657b0ac2dc85a - kernel-open/nvidia/nv-platform.c +dd819a875c584bc469082fcf519779ea00b1d952 - kernel-open/nvidia/libspdm_aead_aes_gcm.c +74958745f83b14c04aaa60248bf5c86ceef6b5cb - kernel-open/nvidia/nv-acpi.c +4d19a1756af848d25fd2fd8cc691dcbcf0afb776 - kernel-open/nvidia/os-registry.c +80f9ac558a57c60cbf70f3ecaf73c71e60c98885 - kernel-open/nvidia/nv-rsync.c +7f5d251db1db4a179a67efea0178fbfda94f95d0 - kernel-open/nvidia/nv_gpu_ops.h +642c3a7d10b263ab9a63073f83ad843566927b58 - kernel-open/nvidia/libspdm_hmac_sha.c +7d53c2d27580d1b2cc56246d9972f3f310a3cd34 - kernel-open/nvidia/nv-clk.c +0f28ebcdb723e836c923e40642429838fa9e86dc - kernel-open/nvidia/nvidia-sources.Kbuild +99540efd2dfa6907b84e628e12370eefb0222850 - kernel-open/nvidia/nv-mmap.c +11ac7a3a3b4def7fa31a289f5f8461ad90eca06b - kernel-open/nvidia/nv-tracepoint.h +a14b9115cff1e5e7491737083588a5646c8c227b - kernel-open/nvidia/nv-report-err.h +011f975d4f94f7b734efa23d3c8075321eaaf0e8 - kernel-open/nvidia/nv-memdbg.c +1ba353673c266cb47ebcd07707e8ce125353e751 - kernel-open/nvidia/nvidia.Kbuild +ac976b92e83f19125d6b3f7e95d9523e430b9b09 - kernel-open/nvidia/nv-p2p.h +9b036018501d9b8543aabe7ec35dbe33023bb3e0 - kernel-open/nvidia/nv-host1x.c +11778961efc78ef488be5387fa3de0c1b761c0d9 - kernel-open/nvidia/libspdm_sha.c +02b1936dd9a9e30141245209d79b8304b7f12eb9 - kernel-open/nvidia/nv-cray.c +2d61ad39b2356c9cfd8d57c1842e80a20272e37f - kernel-open/nvidia/nv-caps.c +fc199c04b321db79ab5446574d9b994f8bfe6c24 - kernel-open/nvidia/libspdm_shash.c +fa178a7209f56008e67b553a2c5ad1b2dd383aac - kernel-open/nvidia/hal/library/cryptlib/cryptlib_rng.h +34de62da6f880ba8022299c77eddbb11d7fc68d2 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_hash.h +8af43a3f0e4201aa6ff0099221a371fb1801e818 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_rsa.h +cf94004b7b5729982806f7d6ef7cc6db53e3de56 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_aead.h +9a6e164ec60c2feb1eb8782e3028afbffe420927 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_mac.h +4991dfa8852edbdd1ffbd2d44f7b6ac4e1c8c752 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_ec.h +7694b027d74d65561ce6cd15a8c0822e4b32b73a - kernel-open/nvidia/hal/library/cryptlib/cryptlib_sm2.h +8b84a0cc1127f39652362007e048ea568c9cf80b - kernel-open/nvidia/hal/library/cryptlib/cryptlib_ecd.h +2d7b566655ba8a05fae4ea4f6c806b75d7ebb5f3 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_cert.h +0dcb1fd3982e6307b07c917cb453cddbcd1d2f43 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_dh.h +7ff12b437215b77c920a845943e4101dcde289c4 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_hkdf.h +16dd525c52448a32cc8da75d6a644d8a35efbfee - kernel-open/nvidia/library/spdm_lib_config.h +53a9acf65cad6bc4869a15d8086990365c987456 - kernel-open/nvidia/library/cryptlib.h +cfbaebb1091f7b1a8d2e3c54c2301ac45ade6c40 - kernel-open/nvidia/internal/libspdm_lib_config.h +2ea094687fbee1e116cd0362cbeba7592439e0b6 - kernel-open/nvidia-drm/nvidia-drm-crtc.h +bed7b5053d09473188061b0d7f6a3a65b64f72e0 - kernel-open/nvidia-drm/nvidia-drm-linux.c +0f8e4535cf97fadea23c9848483355583f492131 - kernel-open/nvidia-drm/nvidia-drm-utils.c +35034b6f174cd6a14b7d94a07f777794570959b4 - kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h +072e1d6a260e348dada181162949eee190321ed8 - kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c +e86dac2985f4e61f4e2676b3290e47cdcb951c46 - kernel-open/nvidia-drm/nvidia-drm-modeset.c +f00a605cac7ffc7f309e3952c5d4cea7cbfc0b7e - kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h +99642b76e9a84b5a1d2e2f4a8c7fb7bcd77a44fd - kernel-open/nvidia-drm/nvidia-drm.h +763833186eabf1a0501434426c18161febf624d4 - kernel-open/nvidia-drm/nvidia-drm-fb.h +4bada3ff7bfee8b7e222fc4cafb2ac97c67d7898 - kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h +99a2e922a448b4d76318ec151378c8bbf5971595 - kernel-open/nvidia-drm/nvidia-drm-helper.c +ae6efc1bbec8a5e948b7244f4801f0b4b398f203 - kernel-open/nvidia-drm/nvidia-drm.c +94c28482252c983fd97532634ffafea0bf77337a - kernel-open/nvidia-drm/nvidia-drm-ioctl.h +a4f77f8ce94f63f3ca2a970c1935d8da48ab5ccc - kernel-open/nvidia-drm/nvidia-drm-format.c +b78e4f40234f908e722f172485e4466d80b7b501 - kernel-open/nvidia-drm/nvidia-drm-drv.h +4154c5562cebd2747bd15fb302c19cb0cefe1c9c - kernel-open/nvidia-drm/nvidia-drm-connector.h +c762aa186dc72ed0b9183492f9bd187c301d33d3 - kernel-open/nvidia-drm/nv-kthread-q.c +e4d12f027cb5f74124da71bbbc23bcb33651834a - kernel-open/nvidia-drm/nv-pci-table.c +47110750cf788e7d9ddb5db85be3658ac660a109 - kernel-open/nvidia-drm/nvidia-drm-fence.h +73a1acab50e65c468cb71b65238a051bc306ae70 - kernel-open/nvidia-drm/nvidia-drm-encoder.h +aa388c0d44060b8586967240927306006531cdb7 - kernel-open/nvidia-drm/nvidia-drm-helper.h +d0b4f4383a7d29be40dd22e36faa96dae12d2364 - kernel-open/nvidia-drm/nvidia-drm-os-interface.h +63a2fec1f2c425e084bdc07ff05bda62ed6b6ff1 - kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c +a46422076a6a3e439349fbda4fc46e4add29b8e5 - kernel-open/nvidia-drm/nvidia-drm-drv.c +19031f2eaaaeb0fa1da61681fa6048c3e303848b - kernel-open/nvidia-drm/nvidia-drm-gem.c +71ea2d5b02bf8fb3e8cf6b7c84686e2edbc244d0 - kernel-open/nvidia-drm/nvidia-drm-encoder.c +7d409e3f0255d17457bffbf318e2f9ea160680a5 - kernel-open/nvidia-drm/nv-pci-table.h +9f57b8724205e03ca66b32fe710cd36b82932528 - kernel-open/nvidia-drm/nvidia-drm-conftest.h +6e9838b169beffe149ba12625acb496504d36d50 - kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c +d2525a36b7aec71982df80a89b861f220312103d - kernel-open/nvidia-drm/nvidia-dma-resv-helper.h +a505f0aa98ebcf438307f6bacf9bf5a5be189839 - kernel-open/nvidia-drm/nvidia-drm-connector.c +d5518597469dc874ee7e264b9400db51af2fcd44 - kernel-open/nvidia-drm/nvidia-drm-format.h +437d87e7e4bd34ae3c67b27c2faaa394575acf70 - kernel-open/nvidia-drm/nvidia-drm-priv.h +88b2035ddbba8c7f455209e61256b4e7b09c11dd - kernel-open/nvidia-drm/nvidia-drm-fence.c +eff6a0b72274c8824b7a79e9aee261da3a6fb4f1 - kernel-open/nvidia-drm/nvidia-drm-gem.h +6528efa1f8061678b8543c5c0be8761cab860858 - kernel-open/nvidia-drm/nvidia-drm-modeset.h +46a41b0b3470190abcdc57a739238a9cd773812b - kernel-open/nvidia-drm/nvidia-drm.Kbuild +995d8447f8539bd736cc09d62983ae8ebc7e3436 - kernel-open/nvidia-drm/nv_common_utils.h +40b5613d1fbbe6b74bff67a5d07974ad321f75f0 - kernel-open/nvidia-drm/nvidia-drm-utils.h +d924c494620760887546f428f87387d8ed5b99a6 - kernel-open/nvidia-drm/nvidia-drm-fb.c +5eb8385042f3efa5c2e14d168cdb40b211467552 - kernel-open/nvidia-drm/nvidia-drm-crtc.c +62a9b9b30fd7417d9ab085b2bfc731aadd9826f9 - kernel-open/nvidia-drm/nvidia-drm-os-interface.c +ca86fee8bd52e6c84e376199c5f3890078bc2031 - kernel-open/nvidia-modeset/nvidia-modeset-os-interface.h +04ea084a5c5d496cc43103d1997053246a2fa94c - kernel-open/nvidia-modeset/nvidia-modeset-linux.c +b2a5ddfd8dcb3000b9d102bd55b5b560730e81d5 - kernel-open/nvidia-modeset/nvkms.h +c762aa186dc72ed0b9183492f9bd187c301d33d3 - kernel-open/nvidia-modeset/nv-kthread-q.c +da6fd16e29300170aba8a652ea6296241f66243b - kernel-open/nvidia-modeset/nvidia-modeset.Kbuild +2ea1436104463c5e3d177e8574c3b4298976d37e - kernel-open/nvidia-modeset/nvkms-ioctl.h +13d4f9648118dd25b790be0d8d72ebaa12cc8d0e - src/common/sdk/nvidia/inc/rs_access.h +579be4859587206460d8729804aab19180fb69bb - src/common/sdk/nvidia/inc/nvtypes.h +993f17e3094243623f793ae16bd84b5fa3f335ec - src/common/sdk/nvidia/inc/g_finn_rm_api.h +a54d77d45f9b0c5ae3fa8b59d2117145260800b6 - src/common/sdk/nvidia/inc/cc_drv.h +b249abc0a7d0c9889008e98cb2f8515a9d310b85 - src/common/sdk/nvidia/inc/nvgputypes.h +78a4b6b19a38de41527ef8b290754deca5906817 - src/common/sdk/nvidia/inc/nvcd.h +ede1f77acb43e28391bceac058e00a7a8d799b0d - src/common/sdk/nvidia/inc/nvmisc.h +46966ed7fc8d85931b49b12683c42666181f33f6 - src/common/sdk/nvidia/inc/nvimpshared.h +befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - src/common/sdk/nvidia/inc/nv_stdarg.h +f5a682339a89d2b119b43e5b9263dd67346ed3bc - src/common/sdk/nvidia/inc/cpuopsys.h +cf1de27d5bcbd0adbe3c3b64466193b7d9094c71 - src/common/sdk/nvidia/inc/nverror.h +4015c4557ea0790a2bdf5695832c89e31d75aee9 - src/common/sdk/nvidia/inc/nvlimits.h +7c7888550b12eeb98128ea9ac771b897327f538e - src/common/sdk/nvidia/inc/nv-hypervisor.h +b8700a911ac85770bf25d70b9692308af63966bd - src/common/sdk/nvidia/inc/nvstatuscodes.h +95bf694a98ba78d5a19e66463b8adda631e6ce4c - src/common/sdk/nvidia/inc/nvstatus.h +a506a41b8dcf657fb39a740ffc1dfd83835d6c89 - src/common/sdk/nvidia/inc/nvcfg_sdk.h +1e7eec6561b04d2d21c3515987aaa116e9401c1f - src/common/sdk/nvidia/inc/nv-kernel-interface-api.h +af0bc90b3ad4767de53b8ff91e246fdab0146e8b - src/common/sdk/nvidia/inc/nvsecurityinfo.h +5cec5038e1f4a395a08b765c8361a9560f3312b7 - src/common/sdk/nvidia/inc/nvdisptypes.h +c8b96af9d498f87cb9acde064648f9e84d789055 - src/common/sdk/nvidia/inc/nv_vgpu_types.h +3b12d770f8592b94a8c7774c372e80ad08c5774c - src/common/sdk/nvidia/inc/nvi2c.h +bbf6c09ef9bb10ab63d337bf011872f9073c3e5b - src/common/sdk/nvidia/inc/nvos.h +9bca638f5832d831880f090c583fac6fc8cf6ee6 - src/common/sdk/nvidia/inc/dpringbuffertypes.h +7de14a0c3cc8460a9c41e1ee32fda5409c5b9988 - src/common/sdk/nvidia/inc/mmu_fmt_types.h +774318ced0fdcb199e99cf0fee9688259dd01a51 - src/common/sdk/nvidia/inc/nvfixedtypes.h +ed51b6e2d454af3da36f9c5f4a8a7958d2c5f156 - src/common/sdk/nvidia/inc/alloc/alloc_channel.h +ffe618524466cbbff64de55d88fd987e198bb8c9 - src/common/sdk/nvidia/inc/class/cl9271.h +cef74c734fc7d2f32ff74095c59212d9e1d4cafc - src/common/sdk/nvidia/inc/class/cl84a0.h +9f8a45cb986e3ad2bd4a8900469fe5f8b0c9463a - src/common/sdk/nvidia/inc/class/cl9870.h +a6bb32861fa3f93ccb16490f0f2751a1ef333eed - src/common/sdk/nvidia/inc/class/cl0101.h +e6818f1728a66a70080e87dac15a6f92dd875b4e - src/common/sdk/nvidia/inc/class/cl927d.h +522682a17bacd5c1d6081c0020d094ee3d5c4a30 - src/common/sdk/nvidia/inc/class/clcb97.h +89d4eeb421fc2be3b9717e333e9ff67bfffa24e8 - src/common/sdk/nvidia/inc/class/cl2080.h +f558fddfdc088b86a1b479542b8e782e42a5bdce - src/common/sdk/nvidia/inc/class/clc37a.h +d301edef2d1dd42382670e5a6ceef0d8caf67d28 - src/common/sdk/nvidia/inc/class/cl90cd.h +1dfae8f11f8e92908f59a1c9493e84ce40d53b90 - src/common/sdk/nvidia/inc/class/cl0070.h +95d99f0805c8451f0f221483b3618e4dbd1e1dd8 - src/common/sdk/nvidia/inc/class/cl90f1.h +99a34eee22f584d5dfb49c3018a8cb9a7b1035ed - src/common/sdk/nvidia/inc/class/cl5070_notification.h +c4f090f0dae5bdebf28c514c1b5a9bd8606aa56c - src/common/sdk/nvidia/inc/class/cl9097.h +4b77798281f3754a80961308d44a70b1a717283b - src/common/sdk/nvidia/inc/class/clc46f.h +bd2a88f8dbc64add00ad366aa3e76d116cb090b3 - src/common/sdk/nvidia/inc/class/cl0073.h +e587a693bc1cee68983a7039ddbf16a3d3461d64 - src/common/sdk/nvidia/inc/class/cl9471.h +ddbffcce44afa7c07924fd64a608f7f3fe608ccc - src/common/sdk/nvidia/inc/class/cl0071.h +74c75472658eea77d031bf3979dd7fe695b4293f - src/common/sdk/nvidia/inc/class/cl0092_callback.h +fd16daebcd23a680b988dde4ae99625434dcb8fa - src/common/sdk/nvidia/inc/class/cl0000.h +c2d8bb02052e80cd0d11695e734f5e05ab7faeb5 - src/common/sdk/nvidia/inc/class/cl907dswspare.h +5ca1d01dab6b9e814160ddce868d00aa9a1ead58 - src/common/sdk/nvidia/inc/class/clc873.h +7c7406d40a09372dcae2aaf3fcad225c3dd2cf3f - src/common/sdk/nvidia/inc/class/cl9010_callback.h +2240664ad950c9c2e64b6f4d18e05349bc91443c - src/common/sdk/nvidia/inc/class/clc573.h +593384ce8938ceeec46c782d6869eda3c7b8c274 - src/common/sdk/nvidia/inc/class/cl900e.h +101da471fe4e167815425793491e43193e407d9a - src/common/sdk/nvidia/inc/class/clc397.h +dec74b9cf8062f1a0a8bbeca58b4f98722fd94b0 - src/common/sdk/nvidia/inc/class/cl0076.h +46f74fc51a7ec532330e966cad032782e80808b8 - src/common/sdk/nvidia/inc/class/clcc7b.h +053e3c0de24348d3f7e7fe9cbd1743f46be7a978 - src/common/sdk/nvidia/inc/class/cl0004.h +71e34a03bcfa70edfbec4dbdeade82a932057938 - src/common/sdk/nvidia/inc/class/clc637.h +447fe99b23c5dbe3d2a7601e8228a1a1831c6705 - src/common/sdk/nvidia/inc/class/clcc70.h +89ed6dd37fca994e18e03a5410d865b88e1ff776 - src/common/sdk/nvidia/inc/class/clc87e.h +03d873c3a0e0376440f23171640d9c517f7a34e9 - src/common/sdk/nvidia/inc/class/cl902d.h +78259dc2a70da76ef222ac2dc460fe3caa32457a - src/common/sdk/nvidia/inc/class/clc37e.h +b7a5b31a8c3606aa98ba823e37e21520b55ba95c - src/common/sdk/nvidia/inc/class/cl402c.h +5ee1adc8d952212b37211c6f4f677ba672f5117c - src/common/sdk/nvidia/inc/class/clcc71.h +bd12f7cdc3a01668b9c486dc6456f9263dd459ea - src/common/sdk/nvidia/inc/class/clc57b.h +4b2f2194a1655cc6ae707866f130bbe357d0c21f - src/common/sdk/nvidia/inc/class/clb097tex.h +5409e5af182ac18ef8d13380bdfe7cf2e83d37d7 - src/common/sdk/nvidia/inc/class/clc37b.h +aeb4cbab8d1d0fbd0a5747fa36d6f56c00234b2d - src/common/sdk/nvidia/inc/class/clc097tex.h +36fd6906e2688dad2e7ab648be7e070b9eb6f11d - src/common/sdk/nvidia/inc/class/clc971.h +513c505274565fa25c5a80f88a7d361ffbcb08c3 - src/common/sdk/nvidia/inc/class/cl0005.h +53e6252cd85a60698c49a721f4e41da1cb14e5bd - src/common/sdk/nvidia/inc/class/clc97dswspare.h +645adeb829dbcf315bf67ff8387e7a5d982d7b6e - src/common/sdk/nvidia/inc/class/cl00de.h +0f91db32d9e346b4d9f3762c9e59a8f8e5fd0903 - src/common/sdk/nvidia/inc/class/clcc7d.h +a24c2a943c7ceceb8d015f5cd02148f8c4e7c23d - src/common/sdk/nvidia/inc/class/clb097.h +691bb932ea3f60d2b9ad3e4d7fa53ab1a2a5e6c5 - src/common/sdk/nvidia/inc/class/clc870.h +758e2fb8b5d89079f03be09d74964e9246cb180c - src/common/sdk/nvidia/inc/class/clc797.h +f4af32374be4d05a2e55c97053a4f0d1f4b85154 - src/common/sdk/nvidia/inc/class/cl0000_notification.h +1e578eb23dacca047e0b342cce3024b3134f8de9 - src/common/sdk/nvidia/inc/class/clc7b5.h +941a031920c0b3bb16473a6a3d4ba8c52c1259d7 - src/common/sdk/nvidia/inc/class/cl917e.h +b23cdfb66f40c6d9a903f602b8ff4526063b5a2d - src/common/sdk/nvidia/inc/class/clc097.h +0de3548dde4e076cbd0446330b2d5ae4862c1501 - src/common/sdk/nvidia/inc/class/clc973.h +ddb996ff90b80c0f58729b9ac89fa6d2d3950e49 - src/common/sdk/nvidia/inc/class/cla16f.h +cb610aaae807d182b4a2ee46b9b43ebfa4a49a08 - src/common/sdk/nvidia/inc/class/clc57e.h +9e1d2f90d77e23f1d2163a8f8d8d747058e21947 - src/common/sdk/nvidia/inc/class/cl9010.h +7a14243de2b228f086810f968a1712627f1333fd - src/common/sdk/nvidia/inc/class/clc36f.h +7c8e1f1055f9522cfb2935ea0aae612ef172c26e - src/common/sdk/nvidia/inc/class/clc370_notification.h +64ad2ab88e2006bcdace06e7109981496c39f265 - src/common/sdk/nvidia/inc/class/clc87d.h +36c6162356ac39346c8900b1e0074e4b614d4b5a - src/common/sdk/nvidia/inc/class/clc370.h +5df0ce4eb733554e963eb3c7938396f58f2dd4d5 - src/common/sdk/nvidia/inc/class/cl2081.h +a4d82d12346918edd0a7564a5c6cbfe849532b7f - src/common/sdk/nvidia/inc/class/clca70.h +159b78a13e43a2afe6c17714a6f8619675480346 - src/common/sdk/nvidia/inc/class/clc86f.h +6ddba2e93c046ae04f48685c73f8f2d9fe74a398 - src/common/sdk/nvidia/inc/class/clc67a.h +83c6378ef27c8b640895a123801d27e6c4fd3754 - src/common/sdk/nvidia/inc/class/clc671.h +7f75433a769a020d9f36996c855c8ce6ab39dd83 - src/common/sdk/nvidia/inc/class/clc37dcrcnotif.h +31ac68401e642baf44effb681d42374f42cf86b1 - src/common/sdk/nvidia/inc/class/cl00c3.h +95ca0b08eed54d1c6dd76fdf9cf4715007df1b20 - src/common/sdk/nvidia/inc/class/cl0020.h +20d5608c2d6e55efd6d1756a00739f7a05d3a2b3 - src/common/sdk/nvidia/inc/class/clc361.h +9797f4758d534181eeaa6bc88d576de43ba56045 - src/common/sdk/nvidia/inc/class/clc574.h +a39d75d3e479aebaf3849415e156c3cfe427298a - src/common/sdk/nvidia/inc/class/clc771.h +eac86d7180236683b86f980f89ec7ebfe6c85791 - src/common/sdk/nvidia/inc/class/cl957d.h +f7a2fea4725d59e95294c397ede001504b777b0d - src/common/sdk/nvidia/inc/class/clc697.h +f3f33f70ec85c983acec8862ccaabf5b186de2bb - src/common/sdk/nvidia/inc/class/cl9270.h +8b94512c9746c6976c4efeee0291bf44bb5e0152 - src/common/sdk/nvidia/inc/class/clcc73.h +60d0c7923699599a5a4732decfbcb89e1d77b69e - src/common/sdk/nvidia/inc/class/cl9770.h +e0c9a155f829c158c02c21b49c083168f8b00cbe - src/common/sdk/nvidia/inc/class/clc37dswspare.h +499bc681107a2b7ad7af3d2211b582b8fb9d9761 - src/common/sdk/nvidia/inc/class/clcc7a.h +e1bfd0c78f397e7c924c9521f87da8286bebe3f1 - src/common/sdk/nvidia/inc/class/cl84a0_deprecated.h +2f291dc867e71f625c59f72787b9fb391a16d0e6 - src/common/sdk/nvidia/inc/class/clc638.h +8d2dcc086f892dd58270c9e53e747513ed4b2f93 - src/common/sdk/nvidia/inc/class/clb06f.h +3d262347ab41547d9ccc28a892d24c83c6b1158e - src/common/sdk/nvidia/inc/class/cla06f.h +bae36cac0a8d83003ded2305409192995d264d04 - src/common/sdk/nvidia/inc/class/cl0001.h +ba8f5899df4287b8440bcb9c8e09e10db73ebf12 - src/common/sdk/nvidia/inc/class/clc97a.h +7bfcd7cf1735b2a54839e8a734e2227060ebf570 - src/common/sdk/nvidia/inc/class/clc197.h +e231c552afb3a78da7341ee49bf36940f1f65202 - src/common/sdk/nvidia/inc/class/clc77d.h +821396a58944ba4620f43cf6ee833b7a04d67193 - src/common/sdk/nvidia/inc/class/clc970.h +1f1879fcddf3c3f1f6c44df0e51822ad1bfa1aae - src/common/sdk/nvidia/inc/class/cl9171.h +a23967cf3b15eefe0cc37fef5d03dfc716770d85 - src/common/sdk/nvidia/inc/class/clc372sw.h +02ff42b6686954e4571b8a318575372239db623b - src/common/sdk/nvidia/inc/class/cl30f1_notification.h +4be055f206ef1049e8a5b824f9f4830eba0e224c - src/common/sdk/nvidia/inc/class/cla26f.h +ef173136a93cdd2e02ec82d7db05dc223b93c0e1 - src/common/sdk/nvidia/inc/class/clc770.h +a3e011723b5863277a453bfcfb59ce967cee0673 - src/common/sdk/nvidia/inc/class/clc670.h +f33b9fdad6ceb534530fecfd16b40a71f5f5cfdc - src/common/sdk/nvidia/inc/class/clc56f.h +02906b5ba8aab0736a38fd1f6d7b4f6026a5185b - src/common/sdk/nvidia/inc/class/clc57esw.h +aa6387d7ce55a88789c5731e89dedde57115131c - src/common/sdk/nvidia/inc/class/clc97b.h +86ab048c67a075349622c597fa9c4f2a9a3d8635 - src/common/sdk/nvidia/inc/class/cl9571.h +9b2d08d7a37beea802642f807d40413c7f9a8212 - src/common/sdk/nvidia/inc/class/clc37d.h +bd9f406625e6c0cce816a5ddfb9078723e7f7fb5 - src/common/sdk/nvidia/inc/class/clb0b5sw.h +ab27db8414f1400a3f4d9011e83ac49628b4fe91 - src/common/sdk/nvidia/inc/class/cl987d.h +2614a83d383b540f23ef721ec49af1dfde629098 - src/common/sdk/nvidia/inc/class/cl0080.h +9db39be032023bff165cd9d36bee2466617015a5 - src/common/sdk/nvidia/inc/class/cl0002.h +094bec72bfa8c618edc139bc353b20433f1c1da2 - src/common/sdk/nvidia/inc/class/cl2080_notification.h +e72a7871d872b2eb823cc67c0a7d4cafb3d0ca18 - src/common/sdk/nvidia/inc/class/cl90ec.h +0ad3b3e00dc83a0487bd96abd5fe467213aa51ad - src/common/sdk/nvidia/inc/class/clc597.h +869e41c3ba08d704fcf00541075986de43d6b090 - src/common/sdk/nvidia/inc/class/cl917b.h +b685769b5f3fed613227498866d06cc3c1caca28 - src/common/sdk/nvidia/inc/class/cl2082.h +4c0d054bd0d9935d8d2cedba3f5e910d6b6f8ed3 - src/common/sdk/nvidia/inc/class/clc997.h +1697a9ed528d633a1e78c0071868d7dff899af26 - src/common/sdk/nvidia/inc/class/clc57a.h +8e85d29d4006dbd3a913fcc088be5e8c87bbdabb - src/common/sdk/nvidia/inc/class/cl0100.h +15d1f928a9b3f36065e377e29367577ae92ab065 - src/common/sdk/nvidia/inc/class/cl0080_notification.h +e3bd2cacd357e411bc1b6b7d7660ffa97c3a7ee3 - src/common/sdk/nvidia/inc/class/clb197.h +16f9950a48c4e670b939a89724b547c5be9938bf - src/common/sdk/nvidia/inc/class/clc570.h +060722ac6a529a379375bb399785cbf2380db4fd - src/common/sdk/nvidia/inc/class/clc373.h +bd910ff84b9920af83e706a8ab37c68157a372c8 - src/common/sdk/nvidia/inc/class/clc97e.h +b71d1f698a3e3c4ac9db1f5824db983cf136981a - src/common/sdk/nvidia/inc/class/cl9170.h +2a031d85b85c4b1e5b278f6010ca8f33b2192de1 - src/common/sdk/nvidia/inc/class/cl90e7.h +9ceb4ec8538818c8b1dcc7ffe885584b8e0f435e - src/common/sdk/nvidia/inc/class/cla097.h +a9503a5558b08071f35b11df9a917310947c378b - src/common/sdk/nvidia/inc/class/cl00da.h +d8000ab8ef59e64d17b4089c43953ca69b7f605f - src/common/sdk/nvidia/inc/class/clc67e.h +6400b9ad3460dafe00424e3c1b1b7a05ab865a63 - src/common/sdk/nvidia/inc/class/cl50a0.h +7032fd79731907df00a2fe0bbf6c0f4ce87f021d - src/common/sdk/nvidia/inc/class/cl917dcrcnotif.h +b11e7b13106fd6656d1b8268ffc15700fba58628 - src/common/sdk/nvidia/inc/class/clc371.h +ff47d8a4b4bdb3b9cd04ddb7666005ac7fcf2231 - src/common/sdk/nvidia/inc/class/cl003e.h +0285aed652c6aedd392092cdf2c7b28fde13a263 - src/common/sdk/nvidia/inc/class/cl00fc.h +81b4e4432da8412c119e795662819cfe7558711f - src/common/sdk/nvidia/inc/class/cl917a.h +38265d86eb7c771d2d3fc5102d53e6a170a7f560 - src/common/sdk/nvidia/inc/class/cl0041.h +848c89981de73d681615266e4e983b74c2ef418f - src/common/sdk/nvidia/inc/class/cla06fsubch.h +2d76476dba432ffc1292d2d5dd2a84ff3a359568 - src/common/sdk/nvidia/inc/class/cl0092.h +b46b2cfcf72fc2f9722bd42cea8daaeeda861471 - src/common/sdk/nvidia/inc/class/clc871.h +022e8405220e482f83629dd482efee81cc49f665 - src/common/sdk/nvidia/inc/class/clc77f.h +fe7484d17bc643ad61faabee5419ddc81cf9bfd6 - src/common/sdk/nvidia/inc/class/cl9570.h +bb79bbd1b0a37283802bc59f184abe0f9ced08a5 - src/common/sdk/nvidia/inc/class/cl0040.h +6249715d9876f5825ad62f563bf070e93710a2ad - src/common/sdk/nvidia/inc/class/clc67d.h +b1133e9abe15cf7b22c04d9627afa2027e781b81 - src/common/sdk/nvidia/inc/class/cl917c.h +7ef21c4f4fd4032c8f25f8fb33669e692a26e700 - src/common/sdk/nvidia/inc/class/clcb97tex.h +73b706e4916f4c70302387c88c8e14e7b2c1f4e6 - src/common/sdk/nvidia/inc/class/clc67b.h +c40fd87fa6293d483b5bf510e2e331143ded9fa4 - src/common/sdk/nvidia/inc/class/cl9470.h +20894d974d1f8f993c290463f1c97c71fd2e40b1 - src/common/sdk/nvidia/inc/class/cl30f1.h +9f7f04825f3f218cc0c4610938935e2f0a73e13b - src/common/sdk/nvidia/inc/class/clc97d.h +04ab1761d913030cb7485149ecd365f2f9c0f7da - src/common/sdk/nvidia/inc/class/cl0005_notification.h +da8d312d2fdc6012e354df4fa71ed62ae4aac369 - src/common/sdk/nvidia/inc/class/cl927c.h +158c98c8721d558ab64a025e6fdd04ce7a16ba9e - src/common/sdk/nvidia/inc/class/cl947d.h +5416c871e8d50a4e76cbad446030dbedbe1644fd - src/common/sdk/nvidia/inc/class/cl00f2.h +0b35244321b1f2f6647f8389f6fa7254c34790e2 - src/common/sdk/nvidia/inc/class/cl90cdtrace.h +39161706917567f434a6fff736b22f3358923e68 - src/common/sdk/nvidia/inc/class/clc06f.h +bc3674f2384cb3695ce5f035ed16e9c39bba4d1b - src/common/sdk/nvidia/inc/class/cl00fe.h +dd4f75c438d19c27e52f25b36fc8ded1ce02133c - src/common/sdk/nvidia/inc/class/cl917cswspare.h +435a34753d445eb9711c7132d70bd26df2b8bdab - src/common/sdk/nvidia/inc/class/cl917d.h +b31019107ada7b0fb8247c09d93b95a630821fa8 - src/common/sdk/nvidia/inc/class/clcc7e.h +31939808cd46382b1c63bc1e0bd4af953302773f - src/common/sdk/nvidia/inc/class/cl977d.h +83427e3172c64c3b9ef393205ccc3b961ec65190 - src/common/sdk/nvidia/inc/class/cl5070.h +db8dd50ad3e64fe0472d82c0940908d5da5e0321 - src/common/sdk/nvidia/inc/class/cla0b5.h +28867d69a6ceac83da53a11a5e1ef87d9476f0be - src/common/sdk/nvidia/inc/class/clc57d.h +8b07d7aca050be883fdc0d6f4b19eac0b0b6c796 - src/common/sdk/nvidia/inc/class/clc673.h +c116d91177c6cbfb8c25e7f35bb49a8d5a51816c - src/common/sdk/nvidia/inc/class/cl008f.h +4fc2133935b8e560c9a1048bc0b1f1c2f0a4464c - src/common/sdk/nvidia/inc/class/cl00c1.h +5a6098f821e8faa19345313477726431f9271cde - src/common/sdk/nvidia/inc/class/clc661.h +6db83e33cb3432f34d4b55c3de222eaf793a90f0 - src/common/sdk/nvidia/inc/class/cl00b1.h +5b573deb4d68ccb67d9cccc11b28203c5db3d2f7 - src/common/sdk/nvidia/inc/ctrl/ctrl0002.h +88947927d79e15df8cbf77a59ac883a29e970413 - src/common/sdk/nvidia/inc/ctrl/ctrlc638.h +625af1df5c9453bd35a9e873ee5c77e73d5fd195 - src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h +ade4a731f59c7cd16b4a60d318a19147b9918bb9 - src/common/sdk/nvidia/inc/ctrl/ctrl0004.h +90843f8173a341deb7f1466cd69a17114c6b9e4f - src/common/sdk/nvidia/inc/ctrl/ctrl90f1.h +a305225ceda0a39c76ed61b819a1f4165f5644f5 - src/common/sdk/nvidia/inc/ctrl/ctrl00fe.h +be3c9e2de8b8d33fe04389b224fa6ad95ecd089b - src/common/sdk/nvidia/inc/ctrl/ctrla06f.h +c3e3213f548f93592f7d3dfd76e63a2102d800ec - src/common/sdk/nvidia/inc/ctrl/ctrl0076.h +d7415e78725899f9d10fa2d5f03f3d62cef42f26 - src/common/sdk/nvidia/inc/ctrl/ctrlc36f.h +9e343f73f46238075cef766cad499533559dfa28 - src/common/sdk/nvidia/inc/ctrl/ctrl00da.h +f7601ce8c7c2d7a1143bff5280e3e5d9b5c4c147 - src/common/sdk/nvidia/inc/ctrl/ctrl906f.h +97ac039e796faca6c9f78e16020fe96225b33492 - src/common/sdk/nvidia/inc/ctrl/ctrlc637.h +fe7ce28fe76174a6de68236b44ea565ba2ea687b - src/common/sdk/nvidia/inc/ctrl/ctrl00de.h +3ba6904c69aa7710c4561d5643b18fc41e141d4e - src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h +b178067ba5f93e7fafb4c2ee0f5032acf9bc55d7 - src/common/sdk/nvidia/inc/ctrl/ctrla081.h +58a5d3a55b2d9b29d4f1b1e7b5d4d02ae6885e30 - src/common/sdk/nvidia/inc/ctrl/ctrl003e.h +16a24249210637987d17af6069ae5168404743ee - src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h +58f8e48d5851cc10e3c5fd3655d7948b9f327ca0 - src/common/sdk/nvidia/inc/ctrl/ctrl2080.h +b86c4d68c5758f9813f00cc562110c72ef602da7 - src/common/sdk/nvidia/inc/ctrl/ctrl90e7.h +c042a366bc755def9e4132e2768c1675871dbe65 - src/common/sdk/nvidia/inc/ctrl/ctrl0041.h +c8b2e0e64bb3cf3c562dee5fa7913035f82d8247 - src/common/sdk/nvidia/inc/ctrl/ctrl402c.h +352825959d98fe9b47a474cfdd154d380c80d24e - src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h +9d908bb15aecc9d8094e1b6c13301efba6032079 - src/common/sdk/nvidia/inc/ctrl/ctrl0080.h +3fcf5dbb82508d88a040981a7ab21eac1466bb2b - src/common/sdk/nvidia/inc/ctrl/ctrl0073.h +bfee287b190fd698735c5660592741ba5c25a8ea - src/common/sdk/nvidia/inc/ctrl/ctrl0020.h +2e65ccd2704919780a152c69f53400a0dc5e6e41 - src/common/sdk/nvidia/inc/ctrl/ctrlb06f.h +4fb7753f3502303314d9e8f853ee3b752f7e9317 - src/common/sdk/nvidia/inc/ctrl/ctrl0100.h +8764e07e9d348163db4eb41b0c3cf32c76172c0d - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h +5782a19aeaf9695c13940cf4532e41523a8460e3 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000base.h +f21c15122509a8843e676a2bd5e799c58cd96379 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h +326b61039197db58d8369256f6d7dc9764aea421 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h +e7452921bdbd036ca3a37c60c49829c05e95c2d5 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h +5f3b68d39f14137d33f239408a6a13543f4ac966 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h +d08ef822e97ee56984618d52ed3ed55ee395eadb - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h +8fcc64b22b0f6cde40d5ecd23e5e2444277a5999 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h +70d65d4f923ec0efd8931433ae50930d12f78a07 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h +a33a1c1173962183793d84276e46c61d27ca867e - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpuacct.h +1b594c39d1439c3d1ecc24c4325b2ea8c2724548 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h +0146d2b3ecec8760e76dacd8ce6bb75c343c6cac - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h +11abea0cdf485863196de56169451980ee6c016a - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000event.h +4f0ccb0667bd3e3070e40f3f83bede7849bc78e4 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h +08dda80bac8d3418ad08e291012cf315dc9e5805 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h +28b06c8f8152dce2b2e684a4ba84acd25a8b8c26 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h +add9e3867e3dbd2c11bed36604680af4aaa0f164 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h +2ffb93d092df65570b074ad97f0bb436a1c66dff - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h +79fd7ed84cb238ea90ea3691f40ea7140034d3dc - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h +2ea79d79223b06633fb7f541ebbe5a300ba3885f - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h +44c9aa512eb0b9b92cace9e674299f2a9227c37c - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080internal.h +a3328cf6633f9b04258eff05ce30e66cc6930310 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h +a427892e601a4ca4f88cc5778ff78895324f3728 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h +92ff82d1045933baa79958a9f6efd451b0123e95 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h +7ef9e10955708592e92e127eb3fb372adff44818 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h +3c1bd0db339456c335acd50a75ace42cb8bbe6f8 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h +be10e3f4a9dd2f2ab35305ee0af628ef339b25a7 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h +db66195c8e7252c5f424953275cbb7be90a17ba8 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h +c74ac448c3382d92e662804b56e73edd748e2678 - src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83debase.h +7318f74523bb6a015e561dba1a06b47a278d856d - src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83dedebug.h +702d9cb471a344a25911449cc580f69f7155ab1c - src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h +3f747a4fc98291329e0245a971248cf2c28a1b60 - src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372base.h +19c7eff334c591c803dcd93fc0818798c281df48 - src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fbase.h +c7dcbc0ae7454df6523c6deb5f07a70dc2fdbc15 - src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fgpu.h +882b13d54585a6fc5534d12b9cdcec29c8cde337 - src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fucodecoverage.h +76fb63a6782ff1236303fdd7bf2698f42965a266 - src/common/sdk/nvidia/inc/ctrl/ctrl90e7/ctrl90e7base.h +00d2655f569187190bd117bdf37fe4ddd5e92320 - src/common/sdk/nvidia/inc/ctrl/ctrl90e7/ctrl90e7bbx.h +8064c31eb1e447561c415f9835aecac97d5f3517 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h +713aa1291aef3f79304ad35c5143a7576f242f63 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h +bb7955387f6a286927e7922019676ca0aba713e6 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073internal.h +35367f08b96510a5312653b5197d6bb34c0a3d00 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073event.h +a0cf9dfb520e3320cd9c154c01cd2f1a7bbbd864 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h +c2066c407f81538047c435fffca2705c28107663 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h +d727b328e995a7d969ec036f2d5b52264568a7bf - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h +52f251090780737f14eb993150f3ae73be303921 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h +77eb4fab61225663a3f49b868c983d5d532ca184 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h +6ca26c7149455e43f32e8b83b74f4a34a24a2d29 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073base.h +134d43961ea1d42fc36d75685fdd7944f92b0b53 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h +022feef64678b2f71ab70dc67d5d604054990957 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h +2a00952f0f3988c5425fec957a19d926ae75ba28 - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h +79b38bbe679d397b48b78266aa5f50459fe5b5bc - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370base.h +514d012dbfd9e056b7f729bccb213fa9193d433e - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370or.h +6ef99465758f71f420ac17765380cc37dbcac68a - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370event.h +5f70c2eb6a144bc4d7ca8be63fa46391909e8201 - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h +f4ed3ccff4720114d1aaed82484ed70cf07626db - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h +ba3b73356bf0d1409ecfd963b623c50ec83f1813 - src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06finternal.h +bb0a5ff091ef854b19e7da0043b7b7b10232c3de - src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fbase.h +1f25c9f215991f34fee94dafac5fad0e7460db1c - src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h +ddeb0df51d5f662948f9098a5d85b40c8ab6504b - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070base.h +e3fb93f0ff3469ec76cecdc6f0bf1c296551a2b1 - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070impoverrides.h +a138379dd76c468072f1862b8fc6ae79ee876b4e - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070common.h +ee99443c1bd3441df474566622486b04c4502ac0 - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h +44e1b06211eee31e42e81879f5220f26ddec70ae - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h +ff789d585a7f001b8bd32e07a268c635d39b17ab - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h +03f54e22b39ad5cf682eada7147c6c155f16b385 - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h +e8d883de767aa995a374d8da56b5c9da8787cb1d - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070system.h +8fdb493bda6119025c1d00f289a6394e7dcd1b53 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h +cfa32c37f373eeef53aedc3f4dffff1634c122e8 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h +41a0a14e04527fa2c349d2895bb41affd154c999 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h +ecd312fabb249a25655e151cee3615c5ab61ffa7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h +c30b5995d353e68623b32fea398f461351e3b8f1 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h +aa0f685b94bdae99a58aa1a45735b0593a2e6f5a - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h +aa86ffd04a55436ecacbedb1626f6187bbddedf7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf.h +3423a69bba50e1405b5a7d631bfff1f6f0a1673f - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h +1990d0c4fa84c6d078282d4d7d0624ccb0325ce7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h +146263409e5304f661da349b56761ab7403144bd - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h +8b622186edb156e980d02bd59a71c01923d1aa23 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h +70dc706ea4ee7b143a716aae9e4f8c0bcef6c249 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h +0a156fc54f45386fabd06ef5ec11ba3a816fbfb7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobj.h +c157e185d3c64ee9476ddc75881bfc5a5b8b997f - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h +785d96360f86bc53eb428fd3f4fbeda395400c8a - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h +b8e8c5ccab01d7997d1fd5579a690cb3279a8ab3 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080base.h +b2eecbca32d87b939858bf0b22f93c06b49b3a04 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h +24a891a02e1a882769d4da3454e4dfcf42b1ea6c - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h +6969b092708d57f88b0f0fdbb3464c786f90710c - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h +013bd8d50841ea314f5ea2bd507470f2c3aff831 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h +d63388ff48ca055c82bcd6148506eacd0e26b4dc - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vgpumgrinternal.h +96f72ec608cd198be995f3acd9c04afe7c7e6dc8 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h +359c6b06f2712a527d1ef08465179c14a8b4a751 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h +4c2af959d06536294d62b2366a6ba61ca744bd50 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h +d15e8e86ca66b3a69a774e322dfdd349b9f978b9 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spdm.h +898fa08818b657c27b456d952e7a4e09d8d197ee - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h +9933e90ad92eb7df2f64dcc30dcd680d5f7c530d - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h +18d1a44b7113c1707bbf5c65fb1be790304c0bed - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h +0cd5e883dfafb74ce2ec9bccca6e688a27e6cfa9 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h +07f82ae90cde3c6e2e6c5af135c40e01660c39a3 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h +c8f1c115d78bab309c0a887324b0dabfb8f9ea2d - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h +ecceb8f7382c8f55c6ccd0330e14ccbc49fcd09c - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h +2577a1d505a3d682e223fbcbc6d4c7d13162749d - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h +d3969094e68f9d584ba9c6fb5457801caff6ccc1 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmu.h +74f1abf45a2a0f60c82e4825b9abfa6c57cab648 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080power.h +115f683e5926ae130de87e4cea805ef6915ed728 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h +d4ba227a522423503e5044c774dbcca692c48247 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h +97bb79e74b25134fa02a60d310b3e81170df6fd6 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clkavfs.h +baeb07c8bdadf835db754452f63d40956bc6a199 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h +338c7de5d574fe91cda1372c5221e754d4c4b717 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h +4e4a4f9e94f2d7748064949f4b16845829670bf6 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080thermal.h +5ac6c9a299256935259eaf94323ae58995a97ad7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h +e4441458a7914414a2092f36a9f93389ed65154a - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h +b55e4cf81b6112868eb6f6cd9c1a3b32f8fcda49 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h +302f79771fcdba3122cf61affb53e0a3a3a27e6d - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h +5c7b955ef5e6f6ca9c0944e8a2b2c4a1ae760e04 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h +93a9fa93eb3d1099991e4682b6228124220ca293 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h +7f1af5b788616bab285a73bab5098fb6d134b159 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h +51dbd71f1cd5a66dd7a5b0fbb753713d27ff937c - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h +cf1757ff453132fb64be0dec6c50eb935db29784 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink_common.h +59254e4bdc475b70cfd0b445ef496f27c20faab0 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h +119432bbce99e91484a2bac79ca5257a36a7f98b - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h +7f15697ca8645f77352f88c2a84713f348e98a24 - src/common/unix/nvidia-3d/include/nvidia-3d-vertex-arrays.h +220ac9628fe5afa0191b8c20304402baf0f70353 - src/common/unix/nvidia-3d/include/nvidia-3d-fermi.h +23478354284aa1be69bc70fa4157aa408177829c - src/common/unix/nvidia-3d/include/nvidia-3d-volta.h +75859a11c0fae125a0619c47ead964416ac8d6ed - src/common/unix/nvidia-3d/include/nvidia-3d-pascal.h +e621c127011311e8f97c8784d8539751a820bf47 - src/common/unix/nvidia-3d/include/nvidia-3d-maxwell.h +07fc2cd8495309f1218b9ddee4a4809b6dcb65a3 - src/common/unix/nvidia-3d/include/nvidia-3d-types-priv.h +1276b525f23b582e029c2ddc9ed0115f8e9dafb4 - src/common/unix/nvidia-3d/include/nvidia-3d-hopper.h +5030b264e17b70df0c99bc9da4350bdb48f2f60a - src/common/unix/nvidia-3d/include/nvidia-3d-kepler.h +146b4f305bfe710622a878fe3e9afd4f834124b8 - src/common/unix/nvidia-3d/include/nvidia-3d-turing.h +61f0a408812c04a59fb8f12713ce34d2ed544fe3 - src/common/unix/nvidia-3d/include/nvidia-3d-surface.h +e7a4acaef431a49ca7efd6bf72b6e8b57fafbab0 - src/common/unix/nvidia-3d/include/nv_xz_mem_hooks.h +40a9c57cca5b2f8acfe3ead472dcf0adc9423050 - src/common/unix/nvidia-3d/src/nvidia-3d-vertex-arrays.c +af1a4d99bd19b72de120ba2046f35b95650985b1 - src/common/unix/nvidia-3d/src/nvidia-3d-volta.c +f78f737f1dfb52cf248543cced017a8fbad7b270 - src/common/unix/nvidia-3d/src/nvidia-3d-surface.c +4ea7a2a6811239760a1b56833fb07dbf8a99a10e - src/common/unix/nvidia-3d/src/nvidia-3d-hopper.c +e43e6ce6b9781d44b68868703fdbb779fc95f5d4 - src/common/unix/nvidia-3d/src/nvidia-3d-kepler.c +09fa5fbae25e08c819277566d7281f17305863f8 - src/common/unix/nvidia-3d/src/nvidia-3d-turing.c +e0ef9ab77cfdf207c800a9c067739add28632047 - src/common/unix/nvidia-3d/src/nvidia-3d-pascal.c +57f19f6aa7b896794aafacd978b2469d976f6f78 - src/common/unix/nvidia-3d/src/nvidia-3d-maxwell.c +08c29625af227debb72dd703630a754ac4fbeee0 - src/common/unix/nvidia-3d/src/nvidia-3d-core.c +7ca41841cc54bd597f5c10cc346b8f574b1c2acf - src/common/unix/nvidia-3d/src/nvidia-3d-fermi.c +d0331b7ebba0537af50bdf5815d9c048cbeb3388 - src/common/unix/nvidia-3d/src/nvidia-3d-init.c +569a662ce5f79dc450f44eeb7a0ff36580ba27fe - src/common/unix/nvidia-3d/interface/nvidia-3d-types.h +a06524af04de90562b08b6b26783232cf7ff01d4 - src/common/unix/nvidia-3d/interface/nvidia-3d-utils.h +3e97ecc773087c0c7f370faf0a9ff838793c9bd6 - src/common/unix/nvidia-3d/interface/nvidia-3d-color-targets.h +2d91e6f3ad425d3ca95de79ecb929b22cac57f52 - src/common/unix/nvidia-3d/interface/nvidia-3d-shaders.h +fd454a2318e970e6b1cb4a4b7b5633e4cb2e8b45 - src/common/unix/nvidia-3d/interface/nvidia-3d.h +34daeec12bbf45f0f85406afc56414da45afc2e6 - src/common/unix/nvidia-3d/interface/nvidia-3d-shader-constants.h +727210acfe72963aa6dddf1bcee91dc122897113 - src/common/unix/nvidia-3d/interface/nvidia-3d-constant-buffers.h +069b576dc1f03143999512cd03fc48fe18ed6706 - src/common/unix/nvidia-3d/interface/nvidia-3d-imports.h +2476f128437c0520204e13a4ddd2239ff3f40c21 - src/common/unix/common/inc/nv-float.h +881cbcc7ed39ea9198279136205dbe40142be35e - src/common/unix/common/inc/nv_assert.h +cb7c13757ca480e10b4ef3e3851d82ad5ccca3f1 - src/common/unix/common/inc/nv_mode_timings.h +d5253e7e4abd3ad8d72375260aa80037adcd8973 - src/common/unix/common/inc/nv_dpy_id.h +3e64a8fe60bb1266a769be8a5c0716e10c816b38 - src/common/unix/common/inc/nv_amodel_enum.h +995d8447f8539bd736cc09d62983ae8ebc7e3436 - src/common/unix/common/inc/nv_common_utils.h +edded9ca3d455444372fe6c497b2d61bd0cc3f96 - src/common/unix/common/utils/nv_memory_tracker.c +7bccb5a3dea9208f0fbd86d36efc369f215d5c3c - src/common/unix/common/utils/unix_rm_handle.c +26f2a36442266c5d2664d509ecfd31094a83e152 - src/common/unix/common/utils/nv_vasprintf.c +e903bbbecf4fb3085aaccca0628f0a0e4aba3e58 - src/common/unix/common/utils/nv_mode_timings_utils.c +667b361db93e35d12d979c47e4d7a68be9aa93b6 - src/common/unix/common/utils/interface/nv_mode_timings_utils.h +07c675d22c4f0f4be6647b65b6487e2d6927c347 - src/common/unix/common/utils/interface/nv_memory_tracker.h +8d9c4d69394b23d689a4aa6727eb3da1d383765a - src/common/unix/common/utils/interface/unix_rm_handle.h +9e008270f277e243f9167ab50401602378a2a6e8 - src/common/unix/common/utils/interface/nv_vasprintf.h +673bbd33569f55a900b5388a77d19edd3822ecf3 - src/common/unix/xzminidec/src/xz_dec_stream.c +9c67bdcbea04fbe1a5b2746549e502cdc368b54e - src/common/unix/xzminidec/src/xz_config.h +f2cfbcf1e2cb1d7545b5de609a4e7672bf8ae976 - src/common/unix/xzminidec/src/xz_dec_bcj.c +93af3bcdf863afa9655107c86f49aefdf9c05d90 - src/common/unix/xzminidec/src/xz_lzma2.h +fba46fe8f4a160d71a708578a85ab6731e4e024f - src/common/unix/xzminidec/src/xz_crc32.c +0ce26be0fb63a7ae52e2bb15a1770c80b9a5ac84 - src/common/unix/xzminidec/src/xz_stream.h +8365ec8d875fad74507d49228ad8959c66bbc360 - src/common/unix/xzminidec/src/xz_dec_lzma2.c +2ade48b4c53fc3bebf1587bc0a1a08b26cd5981d - src/common/unix/xzminidec/src/xz_private.h +c2a87873eeff2a8010bb8a2cb8d1df28a20a0097 - src/common/unix/xzminidec/interface/xz.h +4498dc65d71b2b8635b365550e5e521da14c8e6b - src/common/unix/nvidia-push/include/nvidia-push-priv.h +4847b168b4f5e78dbb92cfec80734789a9131b87 - src/common/unix/nvidia-push/include/nvidia-push-priv-imports.h +616dd99d8dda5dbe35032a5fc558ff48f7cc1620 - src/common/unix/nvidia-push/src/nvidia-push-init.c +0916485ec1ff275771d88a725dcbf586663dbc33 - src/common/unix/nvidia-push/src/nvidia-push.c +548f9e591d2c851b157575e1b83e25eb47bc61e6 - src/common/unix/nvidia-push/interface/nvidia-push-methods.h +5f5013bdbda9582252db2e92a105a57f24ca7d96 - src/common/unix/nvidia-push/interface/nvidia-push-init.h +f3576444d1dbcc4e9379bee6151ef8c7a382e276 - src/common/unix/nvidia-push/interface/nvidia-push-utils.h +918c4f2e2edd0a52c7085f758286dacd21b5b4c5 - src/common/unix/nvidia-push/interface/nvidia-push-types.h +b54add7dea08ff736ac27ee259f6ccb389c01f09 - src/common/unix/nvidia-headsurface/nvidia-headsurface-types.h +5d014581148b38eede1d31a1f48e388cf6eb7a45 - src/common/unix/nvidia-headsurface/nvidia-headsurface-constants.h +e1fbb040ea9d3c773ed07deb9ef5d63c8c8cab7a - src/common/inc/nvSha1.h +8f0d91e1a8f0d3474fb91dc3e6234e55d2c79fcc - src/common/inc/rmosxfac.h +bcad75550591ede46152403e40413f87e85b0a80 - src/common/inc/nvlog_defs.h +ebccc5c2af2863509e957fe98b01d9a14d8b0367 - src/common/inc/nv_list.h +0e970acfcadddd89fae91c812647fecb80c98d52 - src/common/inc/pex.h +73e2133709eb920a92fcebf7aaab958020493183 - src/common/inc/nvctassert.h +6fa5359ffe91b624548c226b6139f241771a9289 - src/common/inc/jt.h +489ce9f046d9c2ff95a1284ab5e04b5843b874ae - src/common/inc/nvVer.h +7ab322addb3e1ba880cee07dc0d26d882db097b0 - src/common/inc/nvCpuIntrinsics.h +d9c0905f374db0b9cc164ce42eab457d1ba28c53 - src/common/inc/nvop.h +d70c17a0693c8b5dbf7c83f693eec352ce22917c - src/common/inc/nv_smg.h +b4c5d759f035b540648117b1bff6b1701476a398 - src/common/inc/nvCpuUuid.h +4282574b39d1bcaf394b63aca8769bb52462b89b - src/common/inc/nvBinSegment.h +8c41b32c479f0de04df38798c56fd180514736fc - src/common/inc/nvBldVer.h +62e510fa46465f69e9c55fabf1c8124bee3091c4 - src/common/inc/nvHdmiFrlCommon.h +82aadec9509f41eab58727c3498dc24a30a0128e - src/common/inc/nvrmcontext.h +d74a8d4a9ae3d36e92b39bc7c74b27df44626b1c - src/common/inc/nv_mig_types.h +a346380cebac17412b4efc0aef2fad27c33b8fb5 - src/common/inc/nvlog_inc2.h +e670ffdd499c13e5025aceae5541426ab2ab0925 - src/common/inc/gps.h +963aebc9ec7bcb9c445eee419f72289b21680cdd - src/common/inc/hdmi_spec.h +987027bed503d8ce5ad01706aae4a16ee37f3e2d - src/common/inc/nvSemaphoreCommon.h +5257e84f2048b01258c78cec70987f158f6b0c44 - src/common/inc/nvlog_inc.h +4a88a536b71995db70e3a83a48d47072693ec69d - src/common/inc/nv_speculation_barrier.h +2408132586b69e580ff909f7f66451aa2882abff - src/common/inc/nvPNPVendorIds.h +4f7ca8fb43d6885cf60869ed241476032f20f5f3 - src/common/inc/nvUnixVersion.h +23edf9cce2608c494dad045b9466b8f3a18bab56 - src/common/inc/displayport/dpcd20.h +ecc26f6fae35818791733c1a56ea1b556bba7f4f - src/common/inc/displayport/displayport2x.h +aad6f14dacdb166a8d884cae6c5f382d98e5c46c - src/common/inc/displayport/dpcd14.h +27572a26d0a0a32f38606323ea6da65096bac039 - src/common/inc/displayport/displayport.h +8f7c9c19a76eca84fc2556841042c2f1c3d07a1a - src/common/inc/displayport/dpcd.h +4ee8a4d2a0fe12d348ac4c1a1e0a22bd272e146d - src/common/inc/swref/common_def_nvlink.h +e182f9538fea08b5d25f3e74083a7a12a7d49809 - src/common/inc/swref/published/nv_ref.h +641e9803749cbeeca1149c43fe2da5e6edf25137 - src/common/inc/swref/published/nv_arch.h +059493ce7d5390b7e859a19d1a24752df8126ace - src/common/inc/swref/published/turing/tu102/kind_macros.h +86a59440492fd6f869aef3509f0e64a492b4550d - src/common/inc/swref/published/turing/tu102/dev_mmu.h +38589617aab40efdd86b401a18d1e28b5d3b9f8e - src/common/inc/swref/published/disp/v05_02/dev_disp.h +1ea0c3d6ea0c79c01accc7b25d15b421ab49a55d - src/common/inc/swref/published/disp/v04_02/dev_disp.h +c01e4a95ede641ff5a9e6918b39db4d2099c91cb - src/common/inc/swref/published/disp/v05_01/dev_disp.h +04345c77f8c7a8b4825f0cb7fc96ca7c876af51c - src/common/inc/swref/published/disp/v04_01/dev_disp.h +1604a3fa3e3142118c82a1dc621cdac81806195a - src/common/inc/swref/published/disp/v03_00/dev_disp.h +c4f12d6055573a19f9211fdddd3778575e2a17fd - src/common/inc/swref/published/disp/v02_04/dev_disp.h +64c123c90018c5ee122b02b02cbccfcd5ec32cab - src/common/inc/swref/published/t23x/t234/dev_fuse.h +b5ce995e9e5afcd73d39642e31998e087ea133e8 - src/common/shared/nvstatus/nvstatus.c +08816a33e698308c76f3a026c29d0dcb41c5ee20 - src/common/shared/inc/compat.h +9231ac111286772170925e8f6cf92bde5914abb8 - src/common/shared/inc/nvdevid.h +750ecc85242882a9e428d5a5cf1a64f418d59c5f - src/common/displayport/inc/dp_object.h +a6ff1a7aee138f6771c5b0bbedb593a2641e1114 - src/common/displayport/inc/dp_messages.h +80380945c76c58648756446435d615f74630f2da - src/common/displayport/inc/dp_timeout.h +cdb1e7797c250b0a7c0449e2df5ce71e42b83432 - src/common/displayport/inc/dp_merger.h +070b4f6216f19feebb6a67cbb9c3eb22dc60cf74 - src/common/displayport/inc/dp_buffer.h +02b65d96a7a345eaa87042faf6dd94052235009c - src/common/displayport/inc/dp_messageheader.h +78595e6262d5ab0e6232392dc0852feaf83c7585 - src/common/displayport/inc/dp_auxbus.h +e27519c72e533a69f7433638a1d292fb9df8772e - src/common/displayport/inc/dp_crc.h +b2db6b37515f7c979e18686694546b9fa5145459 - src/common/displayport/inc/dp_hostimp.h +29ee5f4ef6670f06e96c07b36c11e3bad8bee6aa - src/common/displayport/inc/dp_address.h +575f4f97189ad6b4944bdd4127cdbee79d8c688d - src/common/displayport/inc/dp_groupimpl.h +cf09c061fa898cd84edd34a9457726abc501b03c - src/common/displayport/inc/dp_configcaps.h +afa1135330de2ce8f1a6d20e99b54f507b5adbbd - src/common/displayport/inc/dp_evoadapter.h +01f1dd58ed5bb12503fa45be7a6657cde0a857e2 - src/common/displayport/inc/dp_guid.h +cca426d571c6b01f7953180e2e550e55c629f0f4 - src/common/displayport/inc/dp_auxretry.h +a086546bf92d7e5e9adf66dcac012b3dc81c2597 - src/common/displayport/inc/dp_internal.h +f6e1b0850f5ed0f23f263d4104523d9290bb8669 - src/common/displayport/inc/dp_vrr.h +2f134665b274bb223c3f74e0ec5c6a0392fa6387 - src/common/displayport/inc/dp_discovery.h +07d22f84e6a386dad251761278a828dab64b6dd5 - src/common/displayport/inc/dp_bitstream.h +f09aae8321de23e0a48072d0e082aecb84a3ebbe - src/common/displayport/inc/dp_mainlink.h +cae50568f7bef4a2a69c4d718a5297b9ae15da3f - src/common/displayport/inc/dp_deviceimpl.h +eb9cdbb0a907926b1afd2a551ec19830f06ae205 - src/common/displayport/inc/dp_splitter.h +5bd3706ceea585df76a75dda7f9581b91ee8f998 - src/common/displayport/inc/dp_tracing.h +4a098c4d09dedc33b86748d5fe9a30d097675e9f - src/common/displayport/inc/dp_list.h +6c87ce702f215b21c1ab0064a2a85b3eda96ecec - src/common/displayport/inc/dp_edid.h +be558902391fb6cb5085652b560391b54befca4b - src/common/displayport/inc/dp_printf.h +379d3933c90eaf9c35a0bad2bd6af960a321465f - src/common/displayport/inc/dp_wardatabase.h +2016714a04d46ac8412ef55d2156d86ba4d594eb - src/common/displayport/inc/dp_auxdefs.h +e2075486b392d6b231f2f133922ac096ca4bc095 - src/common/displayport/inc/dp_ringbuffer.h +09c80a469f1e7e0edd6381578d66fd0e789bc0db - src/common/displayport/inc/dp_regkeydatabase.h +7622cb576c2ebbfe65c0f6132d8561ab1815f668 - src/common/displayport/inc/dp_qse.h +dd420c9e7c271d8bea047d431667524105473e95 - src/common/displayport/inc/dp_linkconfig.h +e02e5621eaea52a2266a86dcd587f4714680caf4 - src/common/displayport/inc/dp_linkedlist.h +430f42522a1e60f2420aa2e4e471aa20945d0253 - src/common/displayport/inc/dp_timer.h +0f71b80d0a0d53fc6581ef341a4e637a467a3795 - src/common/displayport/inc/dp_connectorimpl.h +c8c55dfc7b085b421b01bd9dc7b74abe6f9a0932 - src/common/displayport/inc/dp_connector.h +78ef30b2caf2cf4ff441b5613a796b93ae8973bd - src/common/displayport/inc/dp_messagecodings.h +1363fca23628f312c4b6b0c868b8a43f4a8a5a24 - src/common/displayport/inc/dp_watermark.h +d2b00a849a81f6c6092e3b2c4e7ed20fcee62b39 - src/common/displayport/inc/dptestutil/dp_testmessage.h +70b155b0da07a92ede884a9cec715f67e6b5c3e8 - src/common/displayport/src/dp_list.cpp +107b170d4496a754f22819e66794bcdc51256b7c - src/common/displayport/src/dp_sst_edid.cpp +fea946e5320e7de8e9229bca8d4a6a14b9e8db59 - src/common/displayport/src/dp_crc.cpp +2caf1cd4a99e55126883dbdd9f6b74883c71e171 - src/common/displayport/src/dp_messagecodings.cpp +ef3fefa8dd819d4086c054919b769ca18d058469 - src/common/displayport/src/dp_wardatabase.cpp +c49e37f3e225e60a74c71a2b571e542e12fd9bc9 - src/common/displayport/src/dp_watermark.cpp +e874ffeaeb6deec57605bf91eaa2af116a9762bd - src/common/displayport/src/dp_bitstream.cpp +d699ce22e5e2d641caa2fbacca3095d7dd7b3ffe - src/common/displayport/src/dp_evoadapter.cpp +5f2fb1683cff15175e3ef2276b721863886adc79 - src/common/displayport/src/dp_vrr.cpp +0717b87aafecbe2216e0f0b53ee088a980ef7ad4 - src/common/displayport/src/dp_auxretry.cpp +0670fb5302b1bd3fc65daa848f23e4086619b5e6 - src/common/displayport/src/dp_discovery.cpp +5c12759c27407e8df4c8f1f7bc6ec1595b6b1a63 - src/common/displayport/src/dp_messages.cpp +93ba2409667997fdbcb7af1a8f24ec4a0e15b62c - src/common/displayport/src/dp_timer.cpp +ffdd039884b1400eaf4d6d7cc81d0faba5282014 - src/common/displayport/src/dp_deviceimpl.cpp +c625716e5516a290ac501563e2a73eef9b4f7dd6 - src/common/displayport/src/dp_edid.cpp +af1672e8abb92d8d574d9605285753a8580c5d10 - src/common/displayport/src/dp_groupimpl.cpp +2cda981a5e36285ba4173573d074f8761e74f186 - src/common/displayport/src/dp_qse.cpp +5c7adbdfe295f7e1a1d4899a62bf95b456f84412 - src/common/displayport/src/dp_messageheader.cpp +d3c4c54f96cc02d37fab45521685426e5c38fb4d - src/common/displayport/src/dp_mst_edid.cpp +f56f92e32710b0342805b785d34ba1a9f2a54ed3 - src/common/displayport/src/dp_guid.cpp +eb7e47407bd04e871f891038cc08736d066ffaa9 - src/common/displayport/src/dp_connectorimpl.cpp +a62b774b7c45882b5854b91b600987c343c24966 - src/common/displayport/src/dp_linkconfig.cpp +0a8818da34b5321763c1f60cb8b6ea5e1a2837f1 - src/common/displayport/src/dp_splitter.cpp +24c0787ce5ec691c6b8edb351000265f47e0156a - src/common/displayport/src/dp_buffer.cpp +422a5d3426d5e1cc2346d9d5f86ccde66062ffdc - src/common/displayport/src/dp_merger.cpp +41589d1d5bfa4316d5d066a7201226baed5332db - src/common/displayport/src/dp_configcaps.cpp +a0b68fce10eb0b95518cfd291e2d282872225295 - src/common/displayport/src/dptestutil/dp_testmessage.cpp +f0a73cd173382d8abd4b0c70da8b32e144740bb5 - src/common/modeset/timing/nvt_dmt.c +15d7c508b621c877887962b2c27cdb6c7d1144a0 - src/common/modeset/timing/nvt_util.c +1341b987df8336c882e31d22d2141cadfb67272d - src/common/modeset/timing/nvtiming.h +f8faf3eabd24a1239e1d4faebdc40c0ffa713ff9 - src/common/modeset/timing/nvt_edid.c +c95a1c7914b0d1cba366f2a29e08eb93e0ad033d - src/common/modeset/timing/nvt_edidext_displayid.c +3d3a0889baed7a15c2adce54ba56c1dc783faffd - src/common/modeset/timing/dpsdp.h +ff92b05f8648cb4bc31c0f64707065bb56ff3eb3 - src/common/modeset/timing/nvt_dsc_pps.c +f75b1d98895bdccda0db2d8dd8feba53b88180c5 - src/common/modeset/timing/displayid.h +1997adbf2f6f5be7eb6c7a88e6660391a85d891b - src/common/modeset/timing/nvt_gtf.c +2737ed1d1eccd163f9cd12b1944f96a03c526b31 - src/common/modeset/timing/nvtiming_pvt.h +58b68f1272b069bb7819cbe86fd9e19d8acd0571 - src/common/modeset/timing/edid.h +6d221aad371436ba304448ba2cf04f89148a09bb - src/common/modeset/timing/nvt_edidext_displayid20.c +48761f63bc2794dfbde10492cc53137458cfcd0e - src/common/modeset/timing/nvt_dsc_pps.h +08ef97092899a3dc80251f61cedc73a851d70baa - src/common/modeset/timing/nvt_edidext_861.c +d7cb716eeae50ecfe44fb3c4c4476de598ab78d7 - src/common/modeset/timing/nvt_tv.c +080c1de64d099ecb1aeb9b0b2f176f7be2d609b5 - src/common/modeset/timing/displayid20.h +1c2e163802849848e9ae1586d38c4cd82494217f - src/common/modeset/timing/nvt_ovt.c +54aa88075d9ceb9c6ef99d9c15cb32751a33f8d0 - src/common/modeset/timing/nvt_cvt.c +e13cbe77f864afcddaccff7aeb1923cd02f1482f - src/common/modeset/timing/nvt_displayid20.c +f8911888bdd441666c03fe27381d7730b7dd9131 - src/common/modeset/hdmipacket/nvhdmipkt_internal.h +12118b508a757fd0a162d1e740d93685a67363ea - src/common/modeset/hdmipacket/nvhdmipkt.c +5b541b9ab6fe9333815a760d4043fef725b1c848 - src/common/modeset/hdmipacket/nvhdmipkt_C971.c +83d94f0a5eb7318d00d96115b0139f9f99052ddc - src/common/modeset/hdmipacket/nvhdmipkt_CC71.c +b390bf4f74d690068ff24dce90b79b227769ac2f - src/common/modeset/hdmipacket/nvhdmipkt_C671.c +206727972ab3a5f8a2cde0e153d63aef929b6c01 - src/common/modeset/hdmipacket/nvhdmipkt_0073.c +a71968671ce6b64e235de6902bebc2a06da7ae04 - src/common/modeset/hdmipacket/nvhdmipkt_9171.c +54a1b5e5aaf0848a72befc896ed12f1de433ad4f - src/common/modeset/hdmipacket/nvhdmipkt_9471.c +57dbf547549c6fe24eb51cc54185b321c263108f - src/common/modeset/hdmipacket/nvhdmipkt.h +9be7b7be94a35d1d9a04f269ff560dbbb7860a2a - src/common/modeset/hdmipacket/nvhdmipkt_9571.c +559406ebdbd7f810f1ecbeb3e78b6518834b90fe - src/common/modeset/hdmipacket/nvhdmipkt_class.h +e1df3885cd76f5159801c1f66f20b18537eaecf3 - src/common/modeset/hdmipacket/nvhdmipkt_C871.c +5e12a290fc91202e4ba9e823b6d8457594ed72d3 - src/common/modeset/hdmipacket/nvhdmi_frlInterface.h +67db549636b67a32d646fb7fc6c8db2f13689ecc - src/common/modeset/hdmipacket/nvhdmipkt_9271.c +e6d500269128cbd93790fe68fbcad5ba45c2ba7d - src/common/modeset/hdmipacket/nvhdmipkt_C371.c +764d216e9941d0dcf41e89b2a0ddd8acf55902c8 - src/common/modeset/hdmipacket/nvhdmipkt_common.h +b882497ae393bf66a728dae395b64ac53602a1a5 - src/common/softfloat/nvidia/nv-softfloat.h +be9407a273620c0ba619b53ed72d59d52620c3e4 - src/common/softfloat/nvidia/platform.h +f6d98979ab2d1e2b0d664333104130af6abbcad5 - src/common/softfloat/source/f64_to_i64_r_minMag.c +21a6232d93734b01692689258a3fdfbbf4ff089d - src/common/softfloat/source/s_roundToUI32.c +29321080baa7eab86947ac825561fdcff54a0e43 - src/common/softfloat/source/i32_to_f32.c +dafa667ee5dd52c97fc0c3b7144f6b619406c225 - src/common/softfloat/source/s_mulAddF64.c +108eec2abf1cddb397ce9f652465c2e52f7c143b - src/common/softfloat/source/f64_roundToInt.c +513a7d1c3053fc119efcd8ae1bcc9652edc45315 - src/common/softfloat/source/f32_lt.c +d19ff7dfece53875f2d6c6f7dd9e7772f7b0b7ec - src/common/softfloat/source/f32_to_i64_r_minMag.c +2db07bbb8242bc55a24ef483af6d648db0660de0 - src/common/softfloat/source/f32_add.c +c951c9dffa123e4f77ed235eca49ef9b67f9f3d2 - src/common/softfloat/source/s_subMagsF64.c +5c1026617c588bcf5f1e59230bd5bb900600b9ac - src/common/softfloat/source/f64_mul.c +5c4ee32cc78efc718aaa60ec31d0b00b1bee3c2c - src/common/softfloat/source/f64_to_ui64_r_minMag.c +6fa7493285fe2f7fdc0ac056a6367e90327905c2 - src/common/softfloat/source/f32_sub.c +da3b3f94a817909a3dc93ca5fa7675805c7979e0 - src/common/softfloat/source/f64_isSignalingNaN.c +d701741d8d6a92bb890e53deda1b795f5787f465 - src/common/softfloat/source/f64_le.c +baa7af4eea226140c26ffe6ab02a863d07f729fb - src/common/softfloat/source/f64_eq_signaling.c +2e5c29d842a8ebc5fbf987068dc9394cee609cc7 - src/common/softfloat/source/f32_to_ui64.c +054b23a974fc8d0bab232be433c4e516e6c1250a - src/common/softfloat/source/f64_lt_quiet.c +dde685423af544e5359efdb51b4bf9457c67fa3b - src/common/softfloat/source/f32_sqrt.c +fb062ecbe62a1f5878fd47f0c61490f2bde279dd - src/common/softfloat/source/s_roundToI32.c +8e58f0258218475616ff4e6317516d40ad475626 - src/common/softfloat/source/f32_lt_quiet.c +ab19c6b50c40b8089cb915226d4553d1aa902b0e - src/common/softfloat/source/f64_to_i32_r_minMag.c +86fdc2472526375539216461732d1db6a9f85b55 - src/common/softfloat/source/s_roundPackToF32.c +9266c83f3e50093cc45d7be6ab993a0e72af1685 - src/common/softfloat/source/s_roundPackToF64.c +2e0fec421f4defd293cf55c5f3af7d91f4b7d2cc - src/common/softfloat/source/ui64_to_f32.c +68843a93e1f46195243ef1164f611b759cf19d17 - src/common/softfloat/source/f32_le_quiet.c +00ab2120f71117161d4f6daaa9b90a3036a99841 - src/common/softfloat/source/f32_to_ui32.c +d0f8f08c225b60d88b6358d344404ba9df3038ec - src/common/softfloat/source/s_normSubnormalF32Sig.c +0108fe6f0d394ad72083aff9bb58507f97a0b669 - src/common/softfloat/source/ui32_to_f64.c +7bc81f5bc894118c08bfd52b59e010bc068ed762 - src/common/softfloat/source/ui32_to_f32.c +0adfa7e174cdb488bb22b06642e14e7fc6f49c67 - src/common/softfloat/source/s_roundToI64.c +c3ce12c227d25bc0de48fbcf914fc208e2448741 - src/common/softfloat/source/f64_sub.c +b9fd15957f7ae5effeccb5d8adaa7434b43f44e1 - src/common/softfloat/source/s_roundToUI64.c +29396b7c23941024a59d5ea06698d2fbc7e1a6ca - src/common/softfloat/source/f64_to_i64.c +ae25eea499b3ea5bdd96c905fd0542da11083048 - src/common/softfloat/source/s_normRoundPackToF64.c +b22876b0695f58ee56143c9f461f1dde32fefbf3 - src/common/softfloat/source/f64_to_ui64.c +b8c5ccc1e511637d8b2ba2657de4937b80c01c07 - src/common/softfloat/source/f32_le.c +0126e0fceb1fa7912f4d5b8c3a6ebb4a048eb98a - src/common/softfloat/source/f16_to_f32.c +1ff879eca2a273293b5cd6048419b2d2d8063b93 - src/common/softfloat/source/f64_mulAdd.c +0e9694d551848d88531f5461a9b3b91611652e9a - src/common/softfloat/source/f64_to_ui32_r_minMag.c +5a5e0d9f1ee7e8c0d1d4f9fbcf6eba330a5f1792 - src/common/softfloat/source/f32_isSignalingNaN.c +bc992c88f3de09e3a82447cf06dbde7c6604f7f8 - src/common/softfloat/source/f64_to_f32.c +1a86a6948bf6768bd23a19f1f05d40968c1d2b15 - src/common/softfloat/source/f64_rem.c +50daf9186bc5d0180d1453c957164b136d5ffc89 - src/common/softfloat/source/f64_eq.c +09cb0cdb90eb23b53cd9c1a76ba26021084710d1 - src/common/softfloat/source/s_addMagsF32.c +9f4d355d85fbe998e243fe4c7bbf8ad23062b6e2 - src/common/softfloat/source/i64_to_f64.c +fd40a71c7ebf9d632a384fadf9487cfef4f3ea98 - src/common/softfloat/source/s_shiftRightJam128.c +aaf6ccb77a1a89fa055a0fb63513297b35e2e54b - src/common/softfloat/source/f64_le_quiet.c +38bd00e9c4d2f1354c611404cca6209a6c417669 - src/common/softfloat/source/s_countLeadingZeros64.c +d9a86343e6cc75714f65f690082dd4b0ba724be9 - src/common/softfloat/source/s_roundPackToF16.c +0bf499c0e3a54186fa32b38b310cc9d98ccdcfe3 - src/common/softfloat/source/f32_eq.c +d4b26dc407a891e9ff5324853f1845a99c5d5cd2 - src/common/softfloat/source/f32_to_i32.c +296c40b0589536cb9af3231ad3dcd7f2baaa6887 - src/common/softfloat/source/f64_lt.c +0d8e42636a3409a647291fdb388001c2b11bba07 - src/common/softfloat/source/f32_to_f16.c +9a60700ce25578100d83d529e49f08f71cf35e17 - src/common/softfloat/source/s_normSubnormalF16Sig.c +ec1a797b11f6e846928a4a49a8756f288bda1dfa - src/common/softfloat/source/i32_to_f64.c +729e790328168c64d65a1355e990274c249bbb3a - src/common/softfloat/source/f32_to_i32_r_minMag.c +9a5b93459ace2da23964da98617d6b18006fab86 - src/common/softfloat/source/s_countLeadingZeros8.c +84b0a01ba2a667eb28b166d45bd91352ead83e69 - src/common/softfloat/source/i64_to_f32.c +4b37be398b3e73ae59245f03b2ba2394fc902b4d - src/common/softfloat/source/s_normSubnormalF64Sig.c +6f83fa864007e8227ae09bb36a7fdc18832d4445 - src/common/softfloat/source/f32_mul.c +daeb408588738b3eb4c8b092d7f92ac597cf1fc6 - src/common/softfloat/source/f32_rem.c +a94c8c2bd74633027e52e96f41d24714d8081eb4 - src/common/softfloat/source/s_approxRecipSqrt_1Ks.c +69dc4cc63b2a9873a6eb636ee7cb704cbd502001 - src/common/softfloat/source/f64_to_ui32.c +50b3147f8413f0595a4c3d6e6eeab84c1ffecada - src/common/softfloat/source/s_normRoundPackToF32.c +bbc70102b30f152a560eb98e7a1a4b11b9ede85e - src/common/softfloat/source/f64_sqrt.c +760fd7c257a1f915b61a1089b2acb143c18a082e - src/common/softfloat/source/s_addMagsF64.c +ebb4f674b6213fec29761fc4e05c1e3ddeda6d17 - src/common/softfloat/source/f32_mulAdd.c +4445b1fbbd507144f038fd939311ff95bc2cf5f1 - src/common/softfloat/source/ui64_to_f64.c +871cb1a4037d7b4e73cb20ad18390736eea7ae36 - src/common/softfloat/source/f32_to_ui64_r_minMag.c +ce37cdce572a3b02d42120e81c4969b39d1a67b6 - src/common/softfloat/source/f64_to_i32.c +c29536f617d71fe30accac44b2f1df61c98a97dc - src/common/softfloat/source/f64_div.c +54cbeb5872a86e822bda852ec15d3dcdad4511ce - src/common/softfloat/source/f64_add.c +e7890082ce426d88b4ec93893da32e306478c0d1 - src/common/softfloat/source/s_approxRecipSqrt32_1.c +824383b03952c611154bea0a862da2b9e2a43827 - src/common/softfloat/source/s_subMagsF32.c +00c612847b3bd227a006a4a2697df85866b80315 - src/common/softfloat/source/s_mulAddF32.c +7c8e5ab3f9bf6b2764ce5fffe80b2674be566a12 - src/common/softfloat/source/softfloat_state.c +e4930e155580a0f5aa7f3694a6205bc9aebfe7aa - src/common/softfloat/source/f32_to_f64.c +1484fc96d7731695bda674e99947280a86990997 - src/common/softfloat/source/f32_to_i64.c +2960704c290f29aae36b8fe006884d5c4abcabb4 - src/common/softfloat/source/f32_div.c +23b76c1d0be64e27a6f7e2ea7b8919f1a45a8e7c - src/common/softfloat/source/f32_to_ui32_r_minMag.c +fe06512577e642b09196d46430d038d027491e9f - src/common/softfloat/source/f32_eq_signaling.c +5e6f9e120a17cc73297a35e4d57e4b9cbce01780 - src/common/softfloat/source/s_mul64To128.c +e0ad81cfb5d2c0e74dc4ece9518ca15ffc77beaf - src/common/softfloat/source/f32_roundToInt.c +d8b0c55a49c4fa0b040541db6d5ff634d7d103e7 - src/common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c +a6d5c83f6a0542b33ac9c23ac65ef69002cfff9d - src/common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c +8efb3f7cd3217b5cd25896b4bad058c72fe5b89a - src/common/softfloat/source/8086-SSE/specialize.h +3d0dbc0a672d039a6346e1c21ddf87ffc9181978 - src/common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c +d152bc457b655725185bdff42b36bb96d6e6715e - src/common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c +1dd1b424087d9c872684df0c1b4063b077992d5f - src/common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c +252c816378fddab616b1f2a61e9fedd549224483 - src/common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c +21a11759ed2afd746a47c4d78b67640c2d052165 - src/common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c +98a850359fe08a7e39212f89ce96014ba80910da - src/common/softfloat/source/8086-SSE/s_f16UIToCommonNaN.c +0cbae7a5abc336331d460cbd3640d2cda02af434 - src/common/softfloat/source/8086-SSE/softfloat_raiseFlags.c +4cd1d6cfca3936a39aab9bc0eb622f5c7c848be1 - src/common/softfloat/source/include/softfloat_types.h +1ded4df85ff5fa904fa54c27d681265425be1658 - src/common/softfloat/source/include/primitiveTypes.h +5f589a4d48cc59a0e5762303df9ea4a06ca398da - src/common/softfloat/source/include/softfloat.h +9d8a025889f3ec0e1cca7c4b52308158e1f39226 - src/common/softfloat/source/include/primitives.h +f118cad66d3c8ee17a52cec97cd3dc7e7a1cf2bc - src/common/softfloat/source/include/internals.h +14045fa6330dc6ed20d35eac5b4c5909631bca90 - src/common/src/nv_smg.c +abccf0a8732b881d904d937287ced46edcde45ac - src/nvidia/Makefile +c5f16fdf43ca3d2845d120c219d1da11257072b0 - src/nvidia/nv-kernel.ld +dcf4427b83cce7737f2b784d410291bf7a9612dc - src/nvidia/arch/nvalloc/unix/include/nv-reg.h +4750735d6f3b334499c81d499a06a654a052713d - src/nvidia/arch/nvalloc/unix/include/nv-caps.h +3c61881e9730a8a1686e422358cdfff59616b670 - src/nvidia/arch/nvalloc/unix/include/nv_escape.h +7fc52a43b242a8a921c2707589fa07c8c44da11c - src/nvidia/arch/nvalloc/unix/include/nv.h +81592e5c17bebad04cd11d73672c859baa070329 - src/nvidia/arch/nvalloc/unix/include/nv-chardev-numbers.h +e69045379ed58dc0110d16d17eb39a6f600f0d1d - src/nvidia/arch/nvalloc/unix/include/nv-ioctl-lockless-diag.h +d1b1a1bc1fa30c1a966e95447f7831a06340d2d0 - src/nvidia/arch/nvalloc/unix/include/nv-priv.h +7e0175a8006f06b1d5f5be078d851a4f01648b96 - src/nvidia/arch/nvalloc/unix/include/nv-nb-regs.h +2eb11e523a3ecba2dcd68f3146e1e666a44256ae - src/nvidia/arch/nvalloc/unix/include/nv-ioctl.h +5f004c33f130e6c5cd275f9c85d46185e4e9b757 - src/nvidia/arch/nvalloc/unix/include/os_custom.h +499e72dad20bcc283ee307471f8539b315211da4 - src/nvidia/arch/nvalloc/unix/include/nv-unix-nvos-params-wrappers.h +824ffbe85c591c7423855bee7bf3193473ef2b70 - src/nvidia/arch/nvalloc/unix/include/osapi.h +669bd0c054b00a74e8996c18063fa9bbf5cd7690 - src/nvidia/arch/nvalloc/unix/include/os-interface.h +2ffd0138e1b3425ade16b962c3ff02a82cde2e64 - src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numa.h +b3ecb82f142a50bdc37eafaeb86d67f10fbcf73f - src/nvidia/arch/nvalloc/unix/include/rmobjexportimport.h +af45762b6eeae912cc2602acf7dc31d30775ade7 - src/nvidia/arch/nvalloc/unix/include/nv-kernel-rmapi-ops.h +107d1ecb8a128044260915ea259b1e64de3defea - src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numbers.h +3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - src/nvidia/arch/nvalloc/unix/include/nv-gpu-info.h +98a5a3bd7b94e69f4e7d2c3a1769583c17ef5b57 - src/nvidia/arch/nvalloc/unix/src/os.c +a659a503a6fcffdcacd2b76ae6b1f156b4b9216c - src/nvidia/arch/nvalloc/unix/src/osmemdesc.c +b5ae9b8d551a3e5489605c13686fb6cce4579598 - src/nvidia/arch/nvalloc/unix/src/power-management-tegra.c +a17aae37486b325442e447489b64add3694ab8b0 - src/nvidia/arch/nvalloc/unix/src/osunix.c +b5b409625fde1b640e4e93276e35248f0fccfa4c - src/nvidia/arch/nvalloc/unix/src/gcc_helper.c +07f9c0995f1fbbba9eb819321996b57c1d2b86cd - src/nvidia/arch/nvalloc/unix/src/exports-stubs.c +d8815125dbf79831b8fe55367bba60e7115243cc - src/nvidia/arch/nvalloc/unix/src/osinit.c +ef270b45ff3d72db9b319408c8bb060303e589f5 - src/nvidia/arch/nvalloc/unix/src/osapi.c +a7383deea9dcab093323d8dde1ede73f85f93343 - src/nvidia/arch/nvalloc/unix/src/rmobjexportimport.c +b1a6d0a1ca4307b8e8d9cf136c94ef7c9efbae4c - src/nvidia/arch/nvalloc/unix/src/registry.c +915ee6dbffff92a86d68ac38549b25aa1e146872 - src/nvidia/arch/nvalloc/unix/src/os-hypervisor-stubs.c +ffea38efca6a43af9bc61bb6cb8c2b14c3d6fc20 - src/nvidia/arch/nvalloc/unix/src/escape.c +d1089d8ee0ffcdbf73a42d7c4edb90769aa79d8c - src/nvidia/arch/nvalloc/common/inc/nvrangetypes.h +8530e3d1db60647a9132e10c2119a75295f18060 - src/nvidia/arch/nvalloc/common/inc/nv-firmware.h +1cd024cc06bba6f7c3663ca2d03fe25bd77761d3 - src/nvidia/generated/g_gpu_mgmt_api_nvoc.c +0be1c1ff5f200a9aa68cdf3d03bc4780e757a1ea - src/nvidia/generated/g_traceable_nvoc.h +998d18bc2f6e2cdd00cf383000b66be8e8778baa - src/nvidia/generated/g_nv_debug_dump_nvoc.h +4491368ac52cfda834bdd24df3b6f156c32ec3a9 - src/nvidia/generated/g_client_nvoc.c +4eb2331b2f9f8d8c01d62ad771702e9b42f22b65 - src/nvidia/generated/g_lock_stress_nvoc.h +6b5bf7b2f5dd000bfa2949e14642dd582ba4a378 - src/nvidia/generated/g_event_buffer_nvoc.h +cd5f4b0bc23710e5b6277ff214a62c4993e95581 - src/nvidia/generated/g_code_coverage_mgr_nvoc.c +b9903d23010ea9d63117c27d5fe0cfba09849fa4 - src/nvidia/generated/g_context_dma_nvoc.c +4b7aaad308f2f25b07d932fc0fe0c3327db522a9 - src/nvidia/generated/g_objtmr_nvoc.h +7bd355d08dc6f2509db22ed56f1c05ab97f5f620 - src/nvidia/generated/g_allclasses.h +4eea9bd7952613f08af07508e2e9c1c0344940e7 - src/nvidia/generated/g_gpu_mgr_nvoc.h +c5cad88aa7de5a04a3b6f9836f355347448d6a7b - src/nvidia/generated/g_rmconfig_util.h +db1d1e047d00780efbe4c1c1ae6e4fecd3ab49e8 - src/nvidia/generated/g_os_desc_mem_nvoc.h +1ec59322d0874153252a387dcb50bf6d7328d56e - src/nvidia/generated/g_system_mem_nvoc.c +21e57b9c63e847eeb5a29c218db2c5c37db83298 - src/nvidia/generated/g_gpu_nvoc.c +4613f3d42dbc899b278fca71c3aaae79159d7dbe - src/nvidia/generated/g_gpu_user_shared_data_nvoc.c +b55573cb02ff8129aa4f5aa050ac53d1f4fcfdb2 - src/nvidia/generated/g_rs_resource_nvoc.h +16c8d551a3a908ec194d39c88c5603cea436c9b7 - src/nvidia/generated/g_binary_api_nvoc.c +a232e1da560db2322a921a9f0dc260ad703af2b4 - src/nvidia/generated/g_mem_nvoc.h +c503ca5954b8f6ebdba96904a1616a55ce08a2d3 - src/nvidia/generated/g_device_nvoc.c +e7cc58e9f8173583bd253fa73df56324e48aa5ad - src/nvidia/generated/g_io_vaspace_nvoc.h +b93ab0b9e39ca3c5b397cbdba58e4d9894d4130f - src/nvidia/generated/g_rpc-structures.h +afda2b8579ed309e23be0ad1a835ee84fcbe535f - src/nvidia/generated/g_client_nvoc.h +e97edab623386f7d1534b4f053a66fc8659167f6 - src/nvidia/generated/g_event_nvoc.h +f4b2bffbdbb2b0b398e8dfe3420e46b2bf27839c - src/nvidia/generated/g_hal_nvoc.h +4626f4a1a4eadc3695d79454db25bd0153d1165d - src/nvidia/generated/g_resource_fwd_decls_nvoc.h +30035e0fb1ae8b816fc42b78a17eb30462640ce4 - src/nvidia/generated/g_kernel_head_nvoc.h +52ae6273ddf101e9715aed99991506cad8e96859 - src/nvidia/generated/g_disp_inst_mem_nvoc.c +abc769851bd523ee08cf829bf3864cf5475066ec - src/nvidia/generated/g_subdevice_nvoc.h +255c404719b18c2a3aec2a47948c0fbcf4affd4b - src/nvidia/generated/rmconfig.h +c7fda8cbe109ad2736694ce9ec0e2ab93d0e3f2c - src/nvidia/generated/g_mem_list_nvoc.h +f9bdef39159a8475626a0edcbc3a53505a0ff80a - src/nvidia/generated/g_os_hal.h +dc7bbba203ee5ff91b6f14eb3abfad8c15854e1d - src/nvidia/generated/g_mem_desc_nvoc.h +1702c9d021149c0f5c73ebeda7bea29e246af31d - src/nvidia/generated/g_nv_name_released.h +2e0c45e4186d44774286a71daf797c980c2ddf7a - src/nvidia/generated/g_objtmr_nvoc.c +9b78bc02a8fe0ec297167bb4bdb7f8255b94198b - src/nvidia/generated/g_disp_capabilities_nvoc.h +967d8c0d7d5c1271e82f30af992f48322695d367 - src/nvidia/generated/g_eng_state_nvoc.h +831cdf0767703c00918e70ef3933716b201781f1 - src/nvidia/generated/g_syncpoint_mem_nvoc.c +ce74dbd8f88f50af0b3ea3b3034395cd98eb08e8 - src/nvidia/generated/g_gpu_access_nvoc.c +08ad957117efefe2e04448bce1cad2dec0e984af - src/nvidia/generated/g_odb.h +033a6d6bac0829783afe8a582fa6c4f329be7f04 - src/nvidia/generated/g_hypervisor_nvoc.h +c1471919f6c19e1b576b7c636ba5ae7ab9d58177 - src/nvidia/generated/g_gpu_db_nvoc.c +f68b7e209e268d14b0b98686d1766683139b9b5f - src/nvidia/generated/g_system_nvoc.c +cdcab5a0094b9e9664f7a0e62ec31783617de5ab - src/nvidia/generated/g_code_coverage_mgr_nvoc.h +5e614b6db957a0ae77502ca6d5966bca506f8020 - src/nvidia/generated/g_gpu_group_nvoc.h +eb15207a28b8eed41182de6311ec48f5e321729f - src/nvidia/generated/g_gpu_user_shared_data_nvoc.h +ef9def144aaf1b2b292c9815c68a6007eff56dda - src/nvidia/generated/g_rs_server_nvoc.c +eb07ee114f8cfc039978cdb7501c3ea03c879864 - src/nvidia/generated/g_generic_engine_nvoc.c +d2f3d17e05337992bc031c823186583d62c10235 - src/nvidia/generated/g_chips2halspec_nvoc.h +ad94c2430328b91392db363158fa2279b794cc54 - src/nvidia/generated/g_gpu_resource_nvoc.h +c77048521f9c9890f14108c2c5457d78a85fe69d - src/nvidia/generated/g_gpu_access_nvoc.h +38a98487eec65d8807e47f99b013619c1537e983 - src/nvidia/generated/g_dce_client_nvoc.c +d09bde39b1f12490ea0a696d6915d521c9f13953 - src/nvidia/generated/g_rpc-message-header.h +9becba61ba5ff7580b353abfb87cbe0f37817195 - src/nvidia/generated/g_binary_api_nvoc.h +50f70075eac2515b189e2d07a06b13cfa826945f - src/nvidia/generated/g_rs_client_nvoc.h +f8b984c6bc09554753cfe6692dde2eb3171abc57 - src/nvidia/generated/g_disp_channel_nvoc.h +4931b316fc042705a5f094c8c23b0038f980b404 - src/nvidia/generated/g_generic_engine_nvoc.h +2a28557874bd51f567ef42c75fd4e3b09d8ad44d - src/nvidia/generated/g_gpu_arch_nvoc.c +a17058fe665949f1e3861fe092e29b229cefbe62 - src/nvidia/generated/g_mem_mgr_nvoc.h +7aa02b964507a8269d35dc56170955025b98bd1a - src/nvidia/generated/g_gpu_arch_nvoc.h +0b9296f7797325b80ff0900f19a3763b564eb26b - src/nvidia/generated/g_context_dma_nvoc.h +4210ff36876e84e0adf1e9d4afb6654c7e6e5060 - src/nvidia/generated/g_resserv_nvoc.h +3613b4ec9b285a4e29edefa833704789c887c189 - src/nvidia/generated/g_tmr_nvoc.c +517b6b986a3749c9a6dd0f22bbef6569cdb48d97 - src/nvidia/generated/g_rs_client_nvoc.c +7670f19682bcd6224c999a8f80e770368e735632 - src/nvidia/generated/g_lock_stress_nvoc.c +b348b1b465cb359ca3cf10f5e121714ffb95b582 - src/nvidia/generated/g_standard_mem_nvoc.c +54fa23e7cf0f07d625c25d5c08dad9cd1714f851 - src/nvidia/generated/g_standard_mem_nvoc.h +7e528d775caa7ff2bf4159c94fc2c2e4d3aadffc - src/nvidia/generated/g_chips2halspec_nvoc.c +40aa2c65168c893c725c983b2219ceff03d05608 - src/nvidia/generated/g_gpu_halspec_nvoc.h +17c4ce5e67bf8bc8f48a4e2b1b7752d4597703ad - src/nvidia/generated/g_kernel_head_nvoc.c +3ad8329c7f7d63633b7abf2cdd502e4257fa1726 - src/nvidia/generated/g_event_nvoc.c +7aba35752cd4c6447f844cd9432d7dc1bc77b33d - src/nvidia/generated/g_disp_capabilities_nvoc.c +fa3a5418a5d6bd7fb2b375ed7f7b64293fdf5f86 - src/nvidia/generated/g_ioaccess_nvoc.h +3c3961ddf6422294c3322e3b0a3c97ee94bfd010 - src/nvidia/generated/g_gpu_mgr_nvoc.c +b73b22368abf741cc0a5108b6c9585a81de28b57 - src/nvidia/generated/g_hal.h +6e219df1367ce7dc8f5f4a1f2209a7808a927871 - src/nvidia/generated/g_hal_mgr_nvoc.c +279538daf54163a7a53aab1330fba2c00fc3f234 - src/nvidia/generated/g_rmconfig_util.c +49e84272bbce137683232275b4f13a19c644c650 - src/nvidia/generated/g_prereq_tracker_nvoc.h +57eb0772bc280690eade3f5d54f786e252c75099 - src/nvidia/generated/g_object_nvoc.c +113297c44e702cd6535e007c1c5b2dd5e6f809dc - src/nvidia/generated/g_ioaccess_nvoc.c +216040d1883e8c4f1e8b47d9f6b279ec111d094d - src/nvidia/generated/g_hal_mgr_nvoc.h +113b10cf6cef2608ff4a288e2944d56da64f355d - src/nvidia/generated/g_gpu_group_nvoc.c +86bb88ccdfa34510d4acf21684e5b8bd32d820b2 - src/nvidia/generated/g_disp_sf_user_nvoc.h +5c0ed2e135f53ca09fbfb542bea88b304a2e1208 - src/nvidia/generated/g_event_buffer_nvoc.c +979082b8c018eee55d880265f7bfd294360816c6 - src/nvidia/generated/g_hda_codec_api_nvoc.c +f917323efc9429fcea8643eb9a8d5ee46b1b50a5 - src/nvidia/generated/g_eng_state_nvoc.c +437329a9c6e35e4b02945ec035448e704521280e - src/nvidia/generated/g_hda_codec_api_nvoc.h +fba7a2891fe10e837f5897034b8176a7307fbb12 - src/nvidia/generated/g_lock_test_nvoc.h +05269b7e73347b580f11decf0e1b9f467d0cb60c - src/nvidia/generated/g_dce_client_nvoc.h +e175ab2ef1fd5b64c9f0d665a26b2ed6f864b106 - src/nvidia/generated/g_vaspace_nvoc.h +cc7ec616b034ec01da1c5176b6c62759c3f31a06 - src/nvidia/generated/g_subdevice_nvoc.c +93f9738c0e8aa715592306ddf023adf6b548dcc4 - src/nvidia/generated/g_nvh_state.h +1745f3002758556d1b6d11a24d088ef87ba18bd5 - src/nvidia/generated/g_virt_mem_mgr_nvoc.c +8c9f26e959fa9a6a3c4a5cb8875458cc4a9bfe9e - src/nvidia/generated/g_os_nvoc.c +3b0e038829647cfe0d8807579db33416a420d1d2 - src/nvidia/generated/g_chips2halspec.h +a1fad555b8ad36437992afdd6e3e08d236167ac7 - src/nvidia/generated/g_journal_nvoc.h +d210a82e3dda39239201cfc1c2fcb2e971915c1e - src/nvidia/generated/g_device_nvoc.h +836f88914b046eadad9435786e1b474ee6690f5f - src/nvidia/generated/g_gpu_nvoc.h +ea0d27b0f05818e2e44be7d04b31f8843e1d05b7 - src/nvidia/generated/g_io_vaspace_nvoc.c +10529db24fb0501aa7f2aae25e0a87247ab5405c - src/nvidia/generated/g_resource_nvoc.h +5d47bed309c731bfee4144f61093192e7efcaa55 - src/nvidia/generated/g_disp_channel_nvoc.c +8771d8f2cf58f5e1d91ece01c1962677cebc5e4b - src/nvidia/generated/g_rmconfig_private.h +951c1c8969a621344d4d2a3ec61b1ad51b39ea79 - src/nvidia/generated/g_client_resource_nvoc.c +629b6daac6c9215dc982973b6adcf84314d34d57 - src/nvidia/generated/g_gpu_halspec_nvoc.c +29d5ccf874298c8156314a6eb23c209f2920b779 - src/nvidia/generated/g_gpu_resource_nvoc.c +fc26ab853e7c981c271ced30dfd78d95cd9bcdfd - src/nvidia/generated/g_gpu_db_nvoc.h +aa76beb8b33254fae884434b688093f9c7f12c87 - src/nvidia/generated/g_hal_private.h +86739259b5059c9b9ea3061bd8d1846385cb95f4 - src/nvidia/generated/g_sdk-structures.h +41bc858f6aca964a8977ad96911ecf1e8b46385d - src/nvidia/generated/g_hal_archimpl.h +f87916eae53dbea2f6bdbe80a0e53ecc2071d9fd - src/nvidia/generated/g_lock_test_nvoc.c +6b8597803d509372152e3915f15139186294add5 - src/nvidia/generated/g_gpu_class_list.c +2101385d1332db9a2902370a6b3c6117ca8b2737 - src/nvidia/generated/g_kern_disp_nvoc.h +d71ff42bc0fc0faf1999a6cbe88c4492a47e200e - src/nvidia/generated/g_os_nvoc.h +e58abb783f7561d0af925c2fca392c5165fcb199 - src/nvidia/generated/g_kern_disp_nvoc.c +d6a34926ab710156c9c4b2d9f12a44e6dafd43d1 - src/nvidia/generated/g_tmr_nvoc.h +c4c67b0e0284656b32c7b4547e22d521c442124a - src/nvidia/generated/g_disp_objs_nvoc.h +8e49b4d77641c98c6101dbc88a79290ceca6271a - src/nvidia/generated/g_rs_server_nvoc.h +af206c390549eff5d690ad07f3e58cd417f07f5f - src/nvidia/generated/g_hal_register.h +be659882e731b6a2019639265af46239c5c96ebf - src/nvidia/generated/g_hal_nvoc.c +db76e8669776fbfa901c60d9b9908af2fabc4703 - src/nvidia/generated/g_virt_mem_mgr_nvoc.h +797bd0197236fb0afc2c7e052487db803ac5baf0 - src/nvidia/generated/g_rs_resource_nvoc.c +884bed29fb4735ae0b4504fc874702acd29ee541 - src/nvidia/generated/g_mem_mgr_nvoc.c +3168beb42f15591a50339692d502e04977615a7b - src/nvidia/generated/g_prereq_tracker_nvoc.c +8e0071daaf5471a0fb3856705ec993704eaed4b5 - src/nvidia/generated/g_disp_inst_mem_nvoc.h +fb464cf839a1e76ac2a27346c7cd46ca921f1f56 - src/nvidia/generated/g_traceable_nvoc.c +8588d6f88ab5e8682952063fe0e2c840b334c622 - src/nvidia/generated/g_eng_desc_nvoc.h +de99523103dd7df0934cbe7aa21179ec7f241817 - src/nvidia/generated/g_os_desc_mem_nvoc.c +aa43dd8bdbdc71dc64d65e948221c7d5235588e7 - src/nvidia/generated/g_disp_objs_nvoc.c +9b6cc3a5e9e35139e9245cbe753fe9a552a488c0 - src/nvidia/generated/g_syncpoint_mem_nvoc.h +ae311b0968df9e9c9c2cec89e3060c472fc70a4c - src/nvidia/generated/g_mem_nvoc.c +dc7a782be9a0096701771cb9b2dc020c2f814e6d - src/nvidia/generated/g_system_nvoc.h +93a47004dd1c7529c6ee5f8abdf8b49c336fb681 - src/nvidia/generated/g_disp_sf_user_nvoc.c +3b5dfad8fccd7251cc177c7ea1b90265b4b6c901 - src/nvidia/generated/g_gpu_mgmt_api_nvoc.h +b53ec15a1aaf102d42b79881cd1b270afeb7205c - src/nvidia/generated/g_system_mem_nvoc.h +67b2d3ea81ebe7be679bcafc688ced0d64f16edf - src/nvidia/generated/g_object_nvoc.h +b1be7145e70d8811fbdbe07c0e99f32ad0e38429 - src/nvidia/generated/g_client_resource_nvoc.h +0d5b87b117d39b173a2a21a5cd71572bc2b26697 - src/nvidia/generated/g_resource_nvoc.c +51df7972f9932c2a5d800d4e2b3e4828e5aa2038 - src/nvidia/generated/g_vaspace_nvoc.c +0820fa0a975b2474ce0fdf64508cbd7758f60e5c - src/nvidia/generated/g_ref_count_nvoc.h +fff3ebc8527b34f8c463daad4d20ee5e33321344 - src/nvidia/inc/lib/ref_count.h +ec26741397ebd68078e8b5e34da3b3c889681b70 - src/nvidia/inc/lib/base_utils.h +f8d9eb5f6a6883de962b63b4b7de35c01b20182f - src/nvidia/inc/lib/protobuf/prb.h +601edb7333b87349d791d430f1cac84fb6fbb919 - src/nvidia/inc/lib/zlib/inflate.h +671c628ff9d4e8075f953766adcab9bfc54bd67c - src/nvidia/inc/libraries/poolalloc.h +1e8730e4abd210e3c648ef999ccc2b1f1839b94c - src/nvidia/inc/libraries/field_desc.h +8dd7f2d9956278ed036bbc288bff4dde86a9b509 - src/nvidia/inc/libraries/eventbufferproducer.h +1b28bd0ee2e560ca2854a73a3ee5fb1cf713d013 - src/nvidia/inc/libraries/nvoc/utility.h +d3cd73c0c97a291e76e28a6e3834d666e6452172 - src/nvidia/inc/libraries/nvoc/prelude.h +79b556739f0648cec938f281794663433fc5e048 - src/nvidia/inc/libraries/nvoc/runtime.h +91c67f272f0ada6f386e9f4a78fbde70aa5c883d - src/nvidia/inc/libraries/nvoc/object.h +c0f66cf7b2fb6ca24b5d4badede9dcac0e3b8311 - src/nvidia/inc/libraries/nvoc/rtti.h +a3db778e81f7188a700e008e4c5f5b1320ab811e - src/nvidia/inc/libraries/mmu/gmmu_fmt.h +1daea206ab581fa3554ff1811e1253a7d0053ac0 - src/nvidia/inc/libraries/mmu/mmu_fmt.h +56b8bae7756ed36d0831f76f95033f74eaab01db - src/nvidia/inc/libraries/prereq_tracker/prereq_tracker.h +b8e52b576e6668e4de7ea65a31e12c2bb491a591 - src/nvidia/inc/libraries/mapping_reuse/mapping_reuse.h +e772583f7fbf994fcf923d527d42372a716b4c57 - src/nvidia/inc/libraries/ioaccess/ioaccess.h +26853c886d848fb88e14da3aceab23f90589c05d - src/nvidia/inc/libraries/utils/nvprintf_level.h +c314121149d3b28e58a62e2ccf81bf6904d1e4bc - src/nvidia/inc/libraries/utils/nvmacro.h +72dcc09b77608263573bd34adf09393328eddf86 - src/nvidia/inc/libraries/utils/nvrange.h +b598ccd2721892b6915d4be432f1fc332477b666 - src/nvidia/inc/libraries/utils/nvbitvector.h +9aa5870d052a45c2489a6ea1a4f2e30fbc52d6be - src/nvidia/inc/libraries/utils/nv_enum.h +4849eb6c567e3ba952c22e702461c1a84ec88c6a - src/nvidia/inc/libraries/utils/nvprintf.h +1b265cb4fcc628862e4b27ae63a897871987eb76 - src/nvidia/inc/libraries/utils/nvassert.h +39113db75fdab5a42f9d8653ed1c90018b8b1df4 - src/nvidia/inc/libraries/containers/map.h +11ce1423312f4c34df19672e45678d0531cc299d - src/nvidia/inc/libraries/containers/ringbuf.h +5f116730f8b7a46e9875850e9b6ffb2a908ad6c2 - src/nvidia/inc/libraries/containers/btree.h +fc211c8276ebcee194080140b5f3c30fba3dfe49 - src/nvidia/inc/libraries/containers/queue.h +661b551f4795f076d7d4c4dab8a2ae2f52b0af06 - src/nvidia/inc/libraries/containers/list.h +47c69b04f95664e742f1a0a02711eeb1fb71000b - src/nvidia/inc/libraries/containers/eheap_old.h +5da20ecad3ff8405dea782792c6397d21ba76f7c - src/nvidia/inc/libraries/containers/vector.h +bcfc41a04576a4244c9dc3fe2a32c8e582f16c3e - src/nvidia/inc/libraries/containers/type_safety.h +5cabf8b70c3bb188022db16f6ff96bcae7d7fe21 - src/nvidia/inc/libraries/containers/multimap.h +4e26106c9c758c9e48418451ac01cf591ed74a31 - src/nvidia/inc/libraries/nvlog/nvlog_printf.h +41843197a5c11abc93df89b8f10a5f815e7fe6af - src/nvidia/inc/libraries/nvlog/nvlog.h +13aedc8ccf6acdd71be71b2219f79cd1af411273 - src/nvidia/inc/libraries/nvlog/internal/nvlog_printf_internal.h +7c9c9456aaacbeffa11a9af54fe2250095ebbb00 - src/nvidia/inc/libraries/tls/tls.h +87a130551593551380ac3e408f8044cc0423c01a - src/nvidia/inc/libraries/nvport/nvport.h +2487ffc1eb1e50b27ba07e0581da543d80bdaa72 - src/nvidia/inc/libraries/nvport/safe.h +4bf45849bc1c6b89d7a79d761cce84a1d5026eac - src/nvidia/inc/libraries/nvport/debug.h +147d47ef4bd860394d1d8ae82c68d97887e2898b - src/nvidia/inc/libraries/nvport/core.h +6d698ca4fc5e48c525f214a57e1de0cc4aa9e36b - src/nvidia/inc/libraries/nvport/thread.h +6065fa9a525d80f9b61acb19e476066823df0700 - src/nvidia/inc/libraries/nvport/sync.h +a1d93b6ec8ff01a3c2651e772a826ee11a7781d7 - src/nvidia/inc/libraries/nvport/util.h +fb5a011275328b7c1edc55abc62e604462b37673 - src/nvidia/inc/libraries/nvport/atomic.h +16a35b2b6fd6eb855acd64d72480b285795f54b2 - src/nvidia/inc/libraries/nvport/memory.h +f31ed19d0588861b8c2b1489dd4e70d430110db5 - src/nvidia/inc/libraries/nvport/crypto.h +96c7c30c9f6503675f0903a16207a0ac06a6963d - src/nvidia/inc/libraries/nvport/cpu.h +53d843988669f61528cd45099ced749defa4cf7e - src/nvidia/inc/libraries/nvport/string.h +d1863efe7b8a63f1c5a7f47856b95ad31fd1a561 - src/nvidia/inc/libraries/nvport/inline/debug_unix_kernel_os.h +9596b274389ea56acff6ca81db8201f41f2dd39d - src/nvidia/inc/libraries/nvport/inline/atomic_clang.h +a8c9b83169aceb5f97d9f7a411db449496dc18f6 - src/nvidia/inc/libraries/nvport/inline/util_generic.h +bbece45965ffbc85fbd383a8a7c30890c6074b21 - src/nvidia/inc/libraries/nvport/inline/util_gcc_clang.h +a7cb79bf7ac48e0f5642ecfd2e430bb85587dddf - src/nvidia/inc/libraries/nvport/inline/memory_tracking.h +1d6a239ed6c8dab1397f056a81ff456141ec7f9c - src/nvidia/inc/libraries/nvport/inline/util_valist.h +f267235fd8690e1b1d7485d3a815841607683671 - src/nvidia/inc/libraries/nvport/inline/safe_generic.h +645734ed505a4d977490e54b26cdf49657e20506 - src/nvidia/inc/libraries/nvport/inline/sync_tracking.h +a902e0f4265bd3dbd251afefa8ceb0389464d886 - src/nvidia/inc/libraries/nvport/inline/atomic_gcc.h +2dec1c73507f66736674d203cc4a00813ccb11bc - src/nvidia/inc/libraries/resserv/rs_domain.h +fa5a5d8fa07cae6b8ef9d9135dc5d7e7624533d2 - src/nvidia/inc/libraries/resserv/resserv.h +972165721958839bc1d510fda9409d35ff89ec21 - src/nvidia/inc/libraries/resserv/rs_server.h +883bf7295d707014278e035f670d151275975d18 - src/nvidia/inc/libraries/resserv/rs_resource.h +2ad85ddca7cd230cea917e249871277ef1e59db1 - src/nvidia/inc/libraries/resserv/rs_client.h +cd033fe116a41285a979e629a2ee7b11ec99369f - src/nvidia/inc/libraries/resserv/rs_access_rights.h +df174d6b4f718ef699ca6f38c16aaeffa111ad3c - src/nvidia/inc/libraries/resserv/rs_access_map.h +5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - src/nvidia/inc/os/dce_rm_client_ipc.h +4aa45a3755ef172aa35279e87dd5cd83cab1bc2e - src/nvidia/inc/kernel/vgpu/rpc_hal_stubs.h +f2fd94a00e5debf1dc7f7ad4c00d417552fb0554 - src/nvidia/inc/kernel/vgpu/rpc.h +37598b6c25aac1a07cbc2bc5c76ebecdbca56eb6 - src/nvidia/inc/kernel/vgpu/rm_plugin_shared_code.h +fea4bbeb739723d3b80b5b3d8943e746e58fae07 - src/nvidia/inc/kernel/vgpu/dev_vgpu.h +f64d3723d0c475558bed799da8d2c5ec32a7d3a8 - src/nvidia/inc/kernel/vgpu/vgpuapi.h +8bf8282ce6112a2afb2e7f64d138d6ce90cf37c0 - src/nvidia/inc/kernel/vgpu/rpc_global_enums.h +69360faa428e157580fac445bcf601f44f7646c0 - src/nvidia/inc/kernel/vgpu/rpc_headers.h +b9af629ab29b527f7830b78f52b55b8535b8dbfd - src/nvidia/inc/kernel/vgpu/vgpu_util.h +e33b5b8c324c23d28e91324a87b47a24823dc5f5 - src/nvidia/inc/kernel/vgpu/rpc_vgpu.h +af9d17b204fdddc6f97280fdafd5a414ee8274dc - src/nvidia/inc/kernel/diagnostics/code_coverage_mgr.h +c6efd51b8b8447829a0867cd7fb7a5a5a2fb1e3d - src/nvidia/inc/kernel/diagnostics/traceable.h +fd780f85cb1cd0fd3914fa31d1bd4933437b791d - src/nvidia/inc/kernel/diagnostics/tracer.h +7e75b5d99376fba058b31996d49449f8fe62d3f0 - src/nvidia/inc/kernel/diagnostics/profiler.h +7615ac3a83d0ad23b2160ff8ad90bec9eb1f3c6c - src/nvidia/inc/kernel/diagnostics/journal.h +b259f23312abe56d34a8f0da36ef549ef60ba5b0 - src/nvidia/inc/kernel/diagnostics/nv_debug_dump.h +7f3f19ed69089ba05f5cac44982547718dbf4662 - src/nvidia/inc/kernel/diagnostics/xid_context.h +3a28bf1692efb34d2161907c3781401951cc2d4f - src/nvidia/inc/kernel/diagnostics/journal_structs.h +8ef620afdf720259cead00d20fae73d31e59c2f7 - src/nvidia/inc/kernel/virtualization/hypervisor/hypervisor.h +701375e96d771b4105f5fe4949ed4a542be4f3d7 - src/nvidia/inc/kernel/os/os_stub.h +408c0340350b813c3cba17fd36171075e156df72 - src/nvidia/inc/kernel/os/os.h +c8496199cd808ed4c79d8e149961e721ad96714e - src/nvidia/inc/kernel/os/capability.h +cda75171ca7d8bf920aab6d56ef9aadec16fd15d - src/nvidia/inc/kernel/os/nv_memory_type.h +70b67003fda6bdb8a01fa1e41c3b0e25136a856c - src/nvidia/inc/kernel/os/nv_memory_area.h +497492340cea19a93b62da69ca2000b811c8f5d6 - src/nvidia/inc/kernel/rmapi/event_buffer.h +499c3d0d76276ee9441d57948ea97877c48b1daa - src/nvidia/inc/kernel/rmapi/rmapi.h +b4bae9ea958b4d014908459e08c93319784c47dd - src/nvidia/inc/kernel/rmapi/event.h +0500c41247fdecd66f25428d279c6dab72bab13e - src/nvidia/inc/kernel/rmapi/binary_api.h +61e3704cd51161c9804cb168d5ce4553b7311973 - src/nvidia/inc/kernel/rmapi/resource.h +2baec15f4c68a9c59dd107a0db288e39914e6737 - src/nvidia/inc/kernel/rmapi/client.h +ac9288d75555180c1d5dd6dd7e0e11fb57a967f2 - src/nvidia/inc/kernel/rmapi/exports.h +835f193521f216d29c678a6018cd9791914b6c01 - src/nvidia/inc/kernel/rmapi/lock_stress.h +b9ff9b201bf2df8651f0c408158aa617638868f6 - src/nvidia/inc/kernel/rmapi/rmapi_specific.h +20adc296ffe79f27d5c24c70716c972a2e0c9a5d - src/nvidia/inc/kernel/rmapi/control.h +deed1715907c1dab8e3304bd4f63b688b72104b7 - src/nvidia/inc/kernel/rmapi/mapping_list.h +4453fe6463e3155063f2bdbf36f44697606a80a5 - src/nvidia/inc/kernel/rmapi/client_resource.h +6cc2de07b21fb21cef1b5b87fb2f1c935782262c - src/nvidia/inc/kernel/rmapi/rs_utils.h +35a65c31c6dcc2824011245ff6e2d5a30f95525c - src/nvidia/inc/kernel/rmapi/rmapi_utils.h +a92dbf2870fe0df245ea8967f2f6a68f5075ecaf - src/nvidia/inc/kernel/rmapi/resource_fwd_decls.h +23e243f9abcb2a4f2d10d141303cd55677b04436 - src/nvidia/inc/kernel/rmapi/rmapi_cache_handlers.h +2724476b61b1790f1b7c293cc86e8a268125e11c - src/nvidia/inc/kernel/rmapi/param_copy.h +15f788614e08d805e963653460858cf013fe0178 - src/nvidia/inc/kernel/rmapi/lock_test.h +2b23f2dbd8f3f63a17a1b63ebb40a2fd7fd8801a - src/nvidia/inc/kernel/rmapi/alloc_size.h +893ec596aab365c2ff393bf2b96aea57f37d01f8 - src/nvidia/inc/kernel/platform/nvpcf.h +5e9928552086947b10092792db4a8c4c57a84adf - src/nvidia/inc/kernel/platform/acpi_common.h +e762205698aff945603324331b443bb2f20cf778 - src/nvidia/inc/kernel/platform/sli/sli.h +15754215ec49815f547dd999b2262a34670dde0b - src/nvidia/inc/kernel/core/locks.h +bdc4ab675c6f6c4bd77c3aaf08aa5c865b186802 - src/nvidia/inc/kernel/core/hal.h +ad378b09a277fba0efd3291d167e1d21071bdf1b - src/nvidia/inc/kernel/core/printf.h +a054be86a4476ba7b9a97052dfcfa4155e059cb9 - src/nvidia/inc/kernel/core/info_block.h +bffae4da6a1f9b7dc7c879587fd674b49b46dac1 - src/nvidia/inc/kernel/core/core.h +37f267155ddfc3db38f110dbb0397f0463d055ff - src/nvidia/inc/kernel/core/strict.h +b00302aec7e4f4e3b89a2f699f8b1f18fc17b1ba - src/nvidia/inc/kernel/core/hal_mgr.h +2d741243a6ae800052ddd478cc6aa7ad0b18f112 - src/nvidia/inc/kernel/core/prelude.h +ebc7c06d9e94218af4cf6b0c03e83650e391e5bc - src/nvidia/inc/kernel/core/thread_state.h +b5859c7862fb3eeb266f7213845885789801194a - src/nvidia/inc/kernel/core/system.h +07f45cd5fab5814e21b9e84425564b43776118fd - src/nvidia/inc/kernel/gpu/gpu_resource_desc.h +7010ff346c27b6453c091f5577672b8b1821808d - src/nvidia/inc/kernel/gpu/gpu_access.h +10ba0b9d4c67c8027b391073dab8dc4388f32fd7 - src/nvidia/inc/kernel/gpu/nvbitmask.h +59f72837997cb0c8ffc491d9a61c61e61b9dca94 - src/nvidia/inc/kernel/gpu/gpu_shared_data_map.h +bca121fb72d54afd714654f1a50eb7192da3135f - src/nvidia/inc/kernel/gpu/gpu_uuid.h +3f0f23a15201105779f3d25dc7628b42990c4b7e - src/nvidia/inc/kernel/gpu/gpu_timeout.h +1ac9c8bf155d1f25f790032b2b6306223199d9ff - src/nvidia/inc/kernel/gpu/gpu_arch.h +f17b704f2489ffedcc057d4a6da77c42ece42923 - src/nvidia/inc/kernel/gpu/gpu_resource.h +28d0d82b58ef13662e8896d3bbc42d340836294e - src/nvidia/inc/kernel/gpu/gpu_user_shared_data.h +e33e4d1537839e41898ff0fab8949e90ee1aed46 - src/nvidia/inc/kernel/gpu/gpu_device_mapping.h +426c6ab6cecc3b1ba540b01309d1603301a86db1 - src/nvidia/inc/kernel/gpu/eng_desc.h +5f5677bee452c64a1b890c3eb65e81fda66ddbaa - src/nvidia/inc/kernel/gpu/error_cont.h +d624e0c45cc8ad24e8c0b2fb5281c0c8a1c7a6d3 - src/nvidia/inc/kernel/gpu/gpu_engine_type.h +c33ab6494c9423c327707fce2bcb771328984a3c - src/nvidia/inc/kernel/gpu/gpu_halspec.h +145b1bc37e6c36b466ea33dd0579d22b530d8dd3 - src/nvidia/inc/kernel/gpu/kern_gpu_power.h +c771936af1de030194894db1312d847038ddb0cb - src/nvidia/inc/kernel/gpu/gpu_child_list.h +0e8353854e837f0ef0fbf0d5ff5d7a25aa1eef7c - src/nvidia/inc/kernel/gpu/eng_state.h +76b24227c65570898c19e16bf35b2cad143f3d05 - src/nvidia/inc/kernel/gpu/gpu.h +0a0c9a8f27feec3e90e15ce9879532ec77450de5 - src/nvidia/inc/kernel/gpu/gpu_acpi_data.h +9ed922ffed4454a10c5e2d8b3123ed653ec653e4 - src/nvidia/inc/kernel/gpu/gpu_ecc.h +f2947fefcaf0611cd80c2c88ce3fdea70953c1ed - src/nvidia/inc/kernel/gpu/gpu_child_class_defs.h +efc50bb2ff6ccf1b7715fd413ca680034920758e - src/nvidia/inc/kernel/gpu/subdevice/generic_engine.h +24d01769b39a6dd62574a95fad64443b05872151 - src/nvidia/inc/kernel/gpu/subdevice/subdevice.h +576216219d27aa887beeccefc22bcead4d1234d7 - src/nvidia/inc/kernel/gpu/disp/kern_disp.h +277a2719f8c063037c6a9ed55ade2b1cb17f48ae - src/nvidia/inc/kernel/gpu/disp/disp_capabilities.h +51a209575d3e3fe8feb7269ece7df0846e18ca2a - src/nvidia/inc/kernel/gpu/disp/kern_disp_type.h +d0899f0e55e6675e267d4c72577be52e39b66121 - src/nvidia/inc/kernel/gpu/disp/kern_disp_max.h +be7da8d1106ee14ff808d86abffb86794299b2df - src/nvidia/inc/kernel/gpu/disp/disp_objs.h +74bc902cd00b17da3a1dfa7fd3ebc058de439b76 - src/nvidia/inc/kernel/gpu/disp/disp_channel.h +b39826404d84e0850aa3385691d8dde6e30d70d4 - src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h +24397d051c941427e54cefc1062d8cd977a8725e - src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank.h +9a33a37c6cea9bad513aa14c942c689f28f7c0d8 - src/nvidia/inc/kernel/gpu/disp/head/kernel_head.h +5179f01acf7e9e251552dc17c0dcd84f7d341d82 - src/nvidia/inc/kernel/gpu/disp/inst_mem/disp_inst_mem.h +22fc153d91a3917ac8e3f2aa94f0d52bfb11f7c2 - src/nvidia/inc/kernel/gpu/hfrp/kern_hfrp_commands_responses.h +173e9ecd2224a5259c79f2491302ba4415e82f70 - src/nvidia/inc/kernel/gpu/hfrp/kernel_hfrp.h +3118f2e9b47cfac98a92d195ce67ea63e50bf3ab - src/nvidia/inc/kernel/gpu/hfrp/kern_hfrp_common.h +1feab39692ea8796ac7675f4780dfd51e6e16326 - src/nvidia/inc/kernel/gpu/timer/objtmr.h +0cff83f4fdcc8d025cd68e0a12faaeead09fa03b - src/nvidia/inc/kernel/gpu/timer/tmr.h +71dd4fccd3b601508230a2b8b720aaf531a160ff - src/nvidia/inc/kernel/gpu/gsp/gsp_trace_rats_macro.h +e1979c71f3d5ffc92bf2306f9360b70bca0edf1f - src/nvidia/inc/kernel/gpu/gsp/message_queue.h +23d38dc3e66affac9342a839f5ba0d79a40f63ba - src/nvidia/inc/kernel/gpu/gsp/kernel_gsp_trace_rats.h +bb9b8ec9840109b15c174da02e7ac85c1e2c0c70 - src/nvidia/inc/kernel/gpu/rpc/objrpc.h +1cc21ad9136024f7437ef745db6652343588c50a - src/nvidia/inc/kernel/gpu/rpc/objrpcstructurecopy.h +7b7cf3b6459711065d1b849bf5acaea10b6400ca - src/nvidia/inc/kernel/gpu/intr/intr_common.h +1e3bebe46b7f2f542eedace554a4156b3afb51f1 - src/nvidia/inc/kernel/gpu/audio/hda_codec_api.h +97d0a067e89251672f191788abe81cf26dcb335f - src/nvidia/inc/kernel/gpu/device/device.h +889ba18a43cc2b5c5e970a90ddcb770ce873b785 - src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h +6756126ddd616d6393037bebf371fceacaf3a9f1 - src/nvidia/inc/kernel/gpu/mem_mgr/context_dma.h +e4c67260b5cb693d695ad3d8aa96aaed45688322 - src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator_common.h +20416f7239833dcaa743bbf988702610e9251289 - src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h +407cad27681bde8235305464150e275a4a93b5d5 - src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h +5be45f3abdbb65a8eea959d98499ea8ff9a79de9 - src/nvidia/inc/kernel/gpu/mem_mgr/rm_page_size.h +76de30ac7b722cc5d59fc834d6b9c795ec14d7a5 - src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h +ce4e0f7177f46f4fc507a68b635e5395a3f7dde6 - src/nvidia/inc/kernel/gpu/dce_client/dce_client.h +2c48d7335bdb0b7ea88b78216c0aeab2e11e00c1 - src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h +5b151d0d97b83c9fb76b76c476947f9e15e774ad - src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h +e188d9f2d042ffe029b96d8fbb16c79a0fc0fb01 - src/nvidia/inc/kernel/gpu_mgr/gpu_db.h +ea32018e3464bb1ac792e39227badf482fa2dc67 - src/nvidia/inc/kernel/gpu_mgr/gpu_group.h +02d6a37ef1bb057604cb98a905fa02429f200c96 - src/nvidia/inc/kernel/mem_mgr/mem.h +a5f49a031db4171228a27482d091283e84632ace - src/nvidia/inc/kernel/mem_mgr/system_mem.h +d15991bc770c5ab41fe746995294c5213efa056b - src/nvidia/inc/kernel/mem_mgr/io_vaspace.h +5ae08b2077506cbc41e40e1b3672e615ce9d910f - src/nvidia/inc/kernel/mem_mgr/vaspace.h +0ce5d6370c086d2944b2e8d31ff72a510d98dc8f - src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h +4c386104eaead66c66df11258c3f1182b46e96ee - src/nvidia/inc/kernel/mem_mgr/syncpoint_mem.h +1a08e83fd6f0a072d6887c60c529e29211bcd007 - src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h +2d4afabd63699feec3aea5e89601db009fc51a08 - src/nvidia/inc/kernel/mem_mgr/standard_mem.h +24928c8b4e8b238f1921a1699f3af59bcff994ed - src/nvidia/src/lib/base_utils.c +a6134d6f5f3e3b0b4c274eb3b2d0a146644c842b - src/nvidia/src/lib/zlib/inflate.c +2e57601af217d0d8c4986abb593e8864e53e7e0b - src/nvidia/src/libraries/nvoc/src/runtime.c +9ea8bf51c44e500c9963a12a1e2a71ebffe6c4e8 - src/nvidia/src/libraries/nvbitvector/nvbitvector.c +0e7a9b9c697f260438ca5fda8527b0f4edc2de13 - src/nvidia/src/libraries/prereq_tracker/prereq_tracker.c +e5ead344020dfc973ee7c7383e0f687a29642683 - src/nvidia/src/libraries/mapping_reuse/mapping_reuse.c +3c885d2c0e6cfb3f8585bddcba128b02e0196167 - src/nvidia/src/libraries/eventbuffer/eventbufferproducer.c +ee7ea17829dfbbf9e6cd8d6c6fb2ada086b5d36e - src/nvidia/src/libraries/ioaccess/ioaccess.c +ca2ba7f19b705e39dbb8890a84ce84d34fbd8aa4 - src/nvidia/src/libraries/utils/nvassert.c +864bd314450490b687a652335a44fb407835152c - src/nvidia/src/libraries/containers/ringbuf.c +eb919a9e8711830c1c3f7fe71273e0a39862292e - src/nvidia/src/libraries/containers/vector.c +53aa343682f721f57058c7a17b1e872ca6fe7cea - src/nvidia/src/libraries/containers/map.c +7f58f03ec069ad5f5c64fedf4a484cc93473bd04 - src/nvidia/src/libraries/containers/queue.c +23c328fc27ad0317efe6ccd2da71cfd9db9da236 - src/nvidia/src/libraries/containers/multimap.c +ae669a466f1fecf67746a9fafc8c1119294c93d7 - src/nvidia/src/libraries/containers/list.c +9c80df385a47834da4f92dc11053ca40a37a7fe7 - src/nvidia/src/libraries/containers/btree/btree.c +a0e23ad69d805a7de439f0fbf79241c6466efdc2 - src/nvidia/src/libraries/containers/eheap/eheap_old.c +cccb1fedee02a240692688090e00ac1e289dec9e - src/nvidia/src/libraries/tls/tls.c +a045a19d750d48387640ab659bb30f724c34b8c8 - src/nvidia/src/libraries/nvport/util/util_unix_kernel_os.c +d047abe66dd8a459c15224cc056fc6f2176b0c6a - src/nvidia/src/libraries/nvport/util/util_gcc_clang.c +f0c486c1ad0f7d9516b13a02d52b4d857d8865b1 - src/nvidia/src/libraries/nvport/util/util_compiler_switch.c +9b69fbf3efea6ba58f9ba7cb0189c9264c994657 - src/nvidia/src/libraries/nvport/sync/sync_common.h +eb8b5fcab51c47f58a37958ddb38ff90991bcbbe - src/nvidia/src/libraries/nvport/sync/sync_unix_kernel_os.c +b2ae1406c94779f575d3e2233a7ab248ac10e74f - src/nvidia/src/libraries/nvport/sync/inc/sync_unix_kernel_os_def.h +e2fec1a305dfec07456faec8ea5e75f601d76b5e - src/nvidia/src/libraries/nvport/memory/memory_tracking.c +c5a16e5bb7d304ffe5e83d7b27226cbecdbc7ce1 - src/nvidia/src/libraries/nvport/memory/memory_unix_kernel_os.c +db01179ad5e6333844bd3e31b62d0dc262c98875 - src/nvidia/src/libraries/nvport/memory/memory_generic.h +2c00bd224d17c0cc5469b5140f3be3d23b494922 - src/nvidia/src/libraries/nvport/string/string_generic.c +b387005657f81538fab5962d4aabbc5dc681aa1b - src/nvidia/src/libraries/nvport/core/core.c +702c73446bba35f88249cfe609ac0ca39dbd80ff - src/nvidia/src/libraries/nvport/crypto/crypto_random_xorshift.c +9ca28a5af5663dec54b4cd35f48a8a3d8e52e25f - src/nvidia/src/libraries/nvport/cpu/cpu_common.c +a305654bafc883ad28a134a04e83bbd409e0fc06 - src/nvidia/src/libraries/nvport/cpu/cpu_common.h +099c17e5931d5d881d8248ec68041fa0bbc2a9bc - src/nvidia/src/libraries/nvport/thread/thread_unix_kernel_os.c +1f2e9d09e658474b36d0b0ecd9380d0d2bcc86b2 - src/nvidia/src/libraries/resserv/src/rs_domain.c +f9cb28c60e7063ddb5b2a2af4a053a477c95c74b - src/nvidia/src/libraries/resserv/src/rs_server.c +dac54d97b38ad722198ec918668f175dc5122e4e - src/nvidia/src/libraries/resserv/src/rs_access_map.c +ede517ff5f53666a23ad2edec7e9fcd85c6ef7d1 - src/nvidia/src/libraries/resserv/src/rs_client.c +26d872a8495e38065af34aed9a60ab9a08898d40 - src/nvidia/src/libraries/resserv/src/rs_resource.c +408e1e5430e5e507e7e59adc292175150e50b825 - src/nvidia/src/libraries/resserv/src/rs_access_rights.c +304e2fb9bbf6d37358779d4e321f33ac76efcd39 - src/nvidia/src/kernel/diagnostics/nvlog.c +b3a29311cc22e2dae686f8ed2df6bc828aa826cf - src/nvidia/src/kernel/diagnostics/profiler.c +439543a41a36b0959b5f4c099f4adaa379b9f912 - src/nvidia/src/kernel/diagnostics/code_coverage_mgr.c +c1e5733847085bede6eb128eff3bad14549a31db - src/nvidia/src/kernel/diagnostics/nvlog_printf.c +d10c5031c3bc00ae1243729c39496df38d2c9ae3 - src/nvidia/src/kernel/os/os_init.c +2255d1ae2d942c3fed9a4b0a41020d0e49cb8648 - src/nvidia/src/kernel/os/os_timer.c +b887b661ffbe6c223c60f544b1fab32690cd8c75 - src/nvidia/src/kernel/os/os_sanity.c +f228bc86fd9149675cb554d6f596d81fdd4c3770 - src/nvidia/src/kernel/os/os_stubs.c +8800bf3ec679a1c3d36b89992b3f2f95365ec834 - src/nvidia/src/kernel/rmapi/entry_points.c +348c34e13f006f1320536876cb7393d8232e61de - src/nvidia/src/kernel/rmapi/rpc_common.c +8f033323f3ae264a79f779abb163442deb17e88a - src/nvidia/src/kernel/rmapi/rmapi.c +bc7c0b5bd06a1c58714b782d85f740632c6e152f - src/nvidia/src/kernel/rmapi/rmapi_cache_handlers.c +ac6a5b3adf15eac4a7bd9ae24981f6f5fc727097 - src/nvidia/src/kernel/rmapi/deprecated_context.h +b1e57ee17d6641412a4065317be3b81e5db94824 - src/nvidia/src/kernel/rmapi/event_notification.c +a965c5f028c1d47d7da0dd03dabbf8aebc817523 - src/nvidia/src/kernel/rmapi/rs_utils.c +a2ad052692006f70e97fd3d186f19c7ddfe80c4c - src/nvidia/src/kernel/rmapi/deprecated_context.c +7a0a8914b407f836627d8262de2de6cab2dd691d - src/nvidia/src/kernel/rmapi/rmapi_specific.c +d915b65380b59e557e5043f839c42d4105caa111 - src/nvidia/src/kernel/rmapi/rmapi_utils.c +2c5b12d5eb17c313138262cd1e42eb940a4d9ed8 - src/nvidia/src/kernel/rmapi/client.c +ab24efdee819d113fe72ec12c0e359c514151336 - src/nvidia/src/kernel/rmapi/resource_desc_flags.h +1745523e56fc0ff5a45d4b2473e13f0cc6f2afb1 - src/nvidia/src/kernel/rmapi/event_buffer.c +f70b6d7e8f21bf26d9c8171d62cbdf934fe3a30e - src/nvidia/src/kernel/rmapi/rmapi_stubs.c +09fc97bd7daa74a0b2e55fc5632b2f25464412dc - src/nvidia/src/kernel/rmapi/client_resource.c +c21223701bd7afd09e706616105f3f5f365afa5d - src/nvidia/src/kernel/rmapi/rmapi_finn.c +433c6091b3b986151e27ea952cef1dc83ff3095c - src/nvidia/src/kernel/rmapi/lock_test.c +682977753c878ccee6279e539cf11bee2b548752 - src/nvidia/src/kernel/rmapi/resource_desc.c +6dc3f6642c450043cc9b361037f4cb2091e7cb58 - src/nvidia/src/kernel/rmapi/sharing.c +00a6ef509ed8484d038c54b47642bc1a00125077 - src/nvidia/src/kernel/rmapi/lock_stress.c +3b53d6b8ef183702327b4bc3a96aa06f67475ddc - src/nvidia/src/kernel/rmapi/param_copy.c +1c9b26108c6b7f27c5f4fe84e10d83cfb32c9b5b - src/nvidia/src/kernel/rmapi/resource_list.h +3b9809740d88ab4b5b9c9d1adbd3ec304f6f6c7e - src/nvidia/src/kernel/rmapi/resource.c +41c397e2cc8c8b1c9c734c435d2d4c17cf709e63 - src/nvidia/src/kernel/rmapi/mapping_cpu.c +58ed3486109a54829f1afdf214c15529eaed678b - src/nvidia/src/kernel/rmapi/mapping.c +0172aa3770ca55bbfbd5e66f48f4e4820a4d5576 - src/nvidia/src/kernel/rmapi/event.c +e26021985ccfa2fb94c96310d9700df405817889 - src/nvidia/src/kernel/rmapi/control.c +6ee3cc915f68b5b70274eec219b7fd6799479459 - src/nvidia/src/kernel/rmapi/rmapi_cache.c +7a4abc27bdbcbb758545783f4182f200587ae3bd - src/nvidia/src/kernel/rmapi/binary_api.c +f821719c449e0300a3c27ebeaa3f4d6791ddaf60 - src/nvidia/src/kernel/rmapi/alloc_free.c +b7561ece996380512992736f947ddea0ba7f075e - src/nvidia/src/kernel/rmapi/resource_desc.h +72a6ae5bcae8eb4197047aaa5c1780b689544c87 - src/nvidia/src/kernel/rmapi/entry_points.h +4fbbb955e617d7b014e201a5263915939c87f884 - src/nvidia/src/kernel/rmapi/resource_list_required_includes.h +a16bffcad38862470b4424fa9a1b0d4013304600 - src/nvidia/src/kernel/core/hal_mgr.c +4d3f32dbc4cbe3d4d1301079eaf21005f74dea90 - src/nvidia/src/kernel/core/locks_common.c +e7195ca43692b6fbf6a3533437650c596cee88db - src/nvidia/src/kernel/core/locks_minimal.c +ee0bf4f81d33e9a7b6bbb2be27bb3973c8cb5b18 - src/nvidia/src/kernel/core/system.c +905a0f08067503374c757ed34d1ea87379ab4a71 - src/nvidia/src/kernel/core/thread_state.c +afa03f17393b28b9fc791bf09c4d35833447808d - src/nvidia/src/kernel/core/hal/hal.c +d3922085d63a7edf02b582fe0b6e3acba6124c25 - src/nvidia/src/kernel/core/hal/hals_all.c +8eac3ea49f9a53063f7106211e5236372d87bdaf - src/nvidia/src/kernel/core/hal/info_block.c +1f258d22d361a8902c27a4329e553a73b3fbe6e9 - src/nvidia/src/kernel/gpu/device.c +f520afc43afd9e40f779d2bdf3acc48ff7419625 - src/nvidia/src/kernel/gpu/eng_state.c +7ed54a614b756e32a61366d2009db26d1ef5fcc4 - src/nvidia/src/kernel/gpu/gpu_arch.c +1b2a50c873087a28cc4edd4a65945bcafc84bcf0 - src/nvidia/src/kernel/gpu/gpu_uuid.c +5bbac8b7323fe7f048e54b2ebc3ebe4f30655181 - src/nvidia/src/kernel/gpu/gpu.c +c7f5b73c217a181f5ff28886bf691ec7d528cb86 - src/nvidia/src/kernel/gpu/gpu_resource.c +2408846a2a5c24a102df13919f384c6675f56f29 - src/nvidia/src/kernel/gpu/device_ctrl.c +2b40a86a112c7643a69b094194c2ee1dd294f16a - src/nvidia/src/kernel/gpu/gpu_gspclient.c +261a5b014b3869c3ce5e830cf8b9529fa0b8a09d - src/nvidia/src/kernel/gpu/gpu_resource_desc.c +4e1be780ac696a61f056933e5550040a2d42c6bd - src/nvidia/src/kernel/gpu/gpu_device_mapping.c +57941830e179d534a7329608658c82fd91ff4a57 - src/nvidia/src/kernel/gpu/gpu_timeout.c +89a6229720a7d5276d73ad51a210ce6f60cedb08 - src/nvidia/src/kernel/gpu/gpu_user_shared_data.c +bc508781e640dbf756d9c9e43e75227d05b413c7 - src/nvidia/src/kernel/gpu/device_share.c +84c2c6a59313d36aa70c8a01cfedf1d1e7a3d931 - src/nvidia/src/kernel/gpu/gpu_access.c +d0d744c416a52404a52c35ede015629990934003 - src/nvidia/src/kernel/gpu/gpu_engine_type.c +12c1f9494317c34b1b9bfcc58bf7bee81b08c98e - src/nvidia/src/kernel/gpu/gpu_t234d_kernel.c +ea626b20043182e3b374cb05d02c75b482fcd3a3 - src/nvidia/src/kernel/gpu/gpu_rmapi.c +099da8d641fb4481f9a4c625588dd4aa4ce20bcd - src/nvidia/src/kernel/gpu/subdevice/subdevice.c +6fab19f1f68bdb8d2b969efc6f030e2066bc6b5e - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c +b4e503b320119fecdb22dfda1268ce31e1a7ecd7 - src/nvidia/src/kernel/gpu/subdevice/generic_engine.c +9afe5cedd5e7d535ee56f4f5b3cc549f154d8be2 - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c +796d1368584a9318a39ed313dcb86bbcca40ad83 - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c +4c363a34fe12b9bb0d428c3d90974d7085d0366f - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_internal_kernel.c +fcf79cf10019193a9e57f8d19b5a37bac6120365 - src/nvidia/src/kernel/gpu/arch/t25x/kern_gpu_t256d.c +095d4a87b067038bd2d80a1c4b2d9407810b0e66 - src/nvidia/src/kernel/gpu/arch/t26x/kern_gpu_t264d.c +c20ed8bd9fda88b036c6ff677b7c25ebd171434f - src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_arch_t234d.c +b09af17437a01e63e960414a4534074da240dc59 - src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_t234d.c +ceb516c8064e1df2d18897f98f5c8ea58e907973 - src/nvidia/src/kernel/gpu/disp/disp_capabilities.c +c67baeb5df33080d99f322786759fc3f5436301d - src/nvidia/src/kernel/gpu/disp/disp_channel.c +8fafebf746bfcde2c53435be386a8a0846973b0c - src/nvidia/src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c +6437dd659a38c62cd81fb59f229bd94e59f37e71 - src/nvidia/src/kernel/gpu/disp/disp_sf_user.c +0fbfb9dd91147f04bea1060788efc1121078c159 - src/nvidia/src/kernel/gpu/disp/kern_disp.c +5aa67b54fcd16f648d7a72b9c2c4ff3fb6d3a5be - src/nvidia/src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c +56027ec220553e1febe42f37fd70757cbb034dcb - src/nvidia/src/kernel/gpu/disp/disp_objs.c +b95080033ecc8736a0cdf9476cec7563c4a2af0f - src/nvidia/src/kernel/gpu/disp/vblank_callback/vblank.c +caba45a10f43e7817f491e7856ef30dd49782f6e - src/nvidia/src/kernel/gpu/disp/head/kernel_head.c +f59763139d9993ae545ded8057706cc4d65afc0c - src/nvidia/src/kernel/gpu/disp/head/arch/v04/kernel_head_0401.c +eb00ffa5a892558d39db15f473e2c308acfd86d9 - src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0404.c +2b19caf7def14190c99dc4e41983b4a3e3334f22 - src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0401.c +6d99d644a8294d08b0fdebf183306bbdadf819e3 - src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0402.c +57fec208154cd0d25838a688f6457598baf2de7a - src/nvidia/src/kernel/gpu/disp/arch/v02/kern_disp_0204.c +64aa574198449e9556328d1c08f08b3bde5bfad0 - src/nvidia/src/kernel/gpu/disp/arch/v05/kern_disp_0501.c +d911e6ae9f7b96e6f441208d38701a8d833e7455 - src/nvidia/src/kernel/gpu/disp/arch/v03/kern_disp_0300.c +ae5ef73d6e74026e0b847977c41b92cbf0f30a62 - src/nvidia/src/kernel/gpu/disp/inst_mem/disp_inst_mem.c +4cfab589176c432463859f148ad32c7dac2c83d3 - src/nvidia/src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c +60e8d1fa9cd375be783c4575baa2e99ac2b22a88 - src/nvidia/src/kernel/gpu/timer/timer.c +f6e518524581b772f8fdbc80418a2018570940ca - src/nvidia/src/kernel/gpu/timer/timer_ostimer.c +1f4d15f959df38f4f6ea48c7b10fc859c6e04b12 - src/nvidia/src/kernel/gpu/audio/hda_codec_api.c +10a8bfd47ce609763c07a0d61be2f71f9f91889e - src/nvidia/src/kernel/gpu/mem_mgr/mem_ctrl.c +bfc82499a8b9b8ce10411f6c391b0e575dc7c0d6 - src/nvidia/src/kernel/gpu/mem_mgr/context_dma.c +a62f423d6cf69e96b0523a233ec00353d63ee8bd - src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c +92611eb4f3bed31064a9efbb54a1ece7ffcfc2af - src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c +4a95b73f744807d96510b0ad7181eae5b12839ce - src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c +ce09583697a98a2d0e8466dd45764f15945f55c2 - src/nvidia/src/kernel/gpu/dce_client/dce_client_rpc.c +cebb9eee63e23bb934881b3313e422b50fb38abb - src/nvidia/src/kernel/gpu/dce_client/dce_client.c +d5d8ff429d3bda7103bafcb2dca94678efc8ddd8 - src/nvidia/src/kernel/gpu_mgr/gpu_group.c +2b49d8a3413a1731bc4fb0bab3f32ff272a71a8c - src/nvidia/src/kernel/gpu_mgr/gpu_db.c +37d1e3dd86e6409b8e461f90386e013194c9e4d1 - src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c +fe618e428d9a172a0fd9412f5a20df64d7270418 - src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c +593bbc5b93b620019144fadf1281a180ec050012 - src/nvidia/src/kernel/mem_mgr/syncpoint_mem.c +54c1d1a44474a7027c5290551e60f13678226301 - src/nvidia/src/kernel/mem_mgr/standard_mem.c +44069d6ebbd94a11267e6cc0179ab167f91faec4 - src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c +5a5e689cf264134ae8c4300d986c209c04167743 - src/nvidia/src/kernel/mem_mgr/vaspace.c +5b9048e62581a3fbb0227d1a46c4ee8d8397bf5b - src/nvidia/src/kernel/mem_mgr/mem_mgr_internal.h +630200d06b6588d7fa8c5b1ea16146e8281163d7 - src/nvidia/src/kernel/mem_mgr/io_vaspace.c +04876ed2dedf0ac3228ec6261a0f3f79609e44a5 - src/nvidia/src/kernel/mem_mgr/system_mem.c +873de51b330501a86ec7656fcf3f615034c49f8e - src/nvidia/src/kernel/mem_mgr/os_desc_mem.c +ed8376f04af08af8da7d47c6340ff38a8910de87 - src/nvidia/src/kernel/mem_mgr/mem.c +08762b3172f6309f1aeab895761193fa19cb176f - src/nvidia/interface/nv_sriov_defines.h +024b112ea410ee1b1badb585b03fdbabb64ade34 - src/nvidia/interface/nvrm_registry.h +3f7b20e27e6576ee1f2f0557d269697a0b8af7ec - src/nvidia/interface/nv-firmware-registry.h +d02ee5bb3f19dffd8b5c30dc852cea243bcdf399 - src/nvidia/interface/acpidsmguids.h +60c7cafce7bd5240e8409e3c5b71214262347efc - src/nvidia/interface/acpigenfuncs.h +bff92c9767308a13df1d0858d5f9c82af155679a - src/nvidia/interface/nvacpitypes.h +7790849d0d261e84d04ab5a481bb57309de6409a - src/nvidia/interface/deprecated/rmapi_deprecated_utils.c +82f65de514ef7e2204cfb618d398cf3af8c12778 - src/nvidia/interface/deprecated/rmapi_deprecated.h +49e299b7257e179b701747e061b6b0214d5565f0 - src/nvidia/interface/rmapi/src/g_finn_rm_api.c +7b8431767b7c4b3861582ddab27a079568bf0660 - src/nvidia-modeset/Makefile +7e1249c1d187aec5891eabe5bacae2189d33dc55 - src/nvidia-modeset/lib/nvkms-sync.c +c3ab6005d7083e90145cac66addf815c4f93d9a0 - src/nvidia-modeset/lib/nvkms-format.c +f69ac0ec080036b8abc7f1ae7b857989f5c9df4a - src/nvidia-modeset/include/nvkms-headsurface-3d.h +b8854261256a801af52d1201081afa9c17486a96 - src/nvidia-modeset/include/nvkms-3dvision.h +3212e81bcde5a5dcec5dbba4155a41ca52dd2304 - src/nvidia-modeset/include/nvkms-prealloc.h +24aaf3a4cb16be7a5aaa8317090142743e3dd797 - src/nvidia-modeset/include/nvkms-flip-workarea.h +be6cff078fcf66221762a4af1515e01d294dd2f6 - src/nvidia-modeset/include/nvkms-push.h +4361f10ff446c401c3f52bf36aed52ca24706d49 - src/nvidia-modeset/include/nvkms-vrr.h +08aa0dd2f18a8cf74539ea8b25ef3f3646567a0c - src/nvidia-modeset/include/nvkms-evo1.h +9bfb2d12ecdaecaba7eaaffa3040ab142d37f892 - src/nvidia-modeset/include/nvkms-prealloc-types.h +0bd9cf097cfa373f0bed7be8fe5299e2ea4bf669 - src/nvidia-modeset/include/g_nvkms-evo-states.h +708e037052ea0b3d6309fa44a205282b7a69a331 - src/nvidia-modeset/include/nvkms-difr.h +412d8028a548e67e9ef85cb7d3f88385e70c56f9 - src/nvidia-modeset/include/nvkms-console-restore.h +52b6d1a1a6793d232571e6366709436b018ae3b7 - src/nvidia-modeset/include/nvkms-dpy.h +81fcc817dfb8ae1f98b63d2c1acacc303fedb554 - src/nvidia-modeset/include/nvkms-dpy-override.h +0f251b41b076bb80eeebf7d54e6fd6c764404c28 - src/nvidia-modeset/include/nvkms-evo-states.h +70d9251f331bbf28f5c5bbdf939ebad94db9362d - src/nvidia-modeset/include/nvkms-softfloat.h +6e3681d5caa36312804c91630eaaf510eda897d2 - src/nvidia-modeset/include/nvkms-dma.h +eb5248c4b0b51e7aecd2de87e496253b3b235c70 - src/nvidia-modeset/include/nvkms-utils-flip.h +377dd4a29b2ea5937a9b8fc3fba0c9e4ef92992e - src/nvidia-modeset/include/nvkms-cursor.h +e1225d674a0e6e58110750868c45a4655110a4d8 - src/nvidia-modeset/include/nvkms-headsurface-swapgroup.h +9e3d50761d3a27c1db3085ff82b7d194ff47bf34 - src/nvidia-modeset/include/nvkms-rm.h +fd9fa6da0fc28b00be524b0bed25a68c56278363 - src/nvidia-modeset/include/nvkms-modeset.h +be6e0e97c1e7ffc0daa2f14ef7b05b9f9c11dc16 - src/nvidia-modeset/include/nvkms-attributes.h +e30d9c286263051d14a1862f0c630295a78abde7 - src/nvidia-modeset/include/nvkms-headsurface-priv.h +3fd0822b8b44d13685ecde9d02300e6cfbb123db - src/nvidia-modeset/include/nvkms-hdmi.h +6b21a68e254becdd2641bc456f194f54c23abe51 - src/nvidia-modeset/include/nvkms-framelock.h +53122264a19ea00ef26e6accde3a3a7570e46b15 - src/nvidia-modeset/include/nvkms-vblank-sem-control.h +1b21352fd9d0b1c5708cb8512acf20ba2e13955d - src/nvidia-modeset/include/nvkms-headsurface.h +59d20eff40e4e488eb3ab7c97b5e171142dcdbcf - src/nvidia-modeset/include/nvkms-modeset-workarea.h +933f9b359a1c3807771e2719c6dd80d71beff3c8 - src/nvidia-modeset/include/nvkms-utils.h +f5f3b11c78a8b0eef40c09e1751615a47f516edb - src/nvidia-modeset/include/nvkms-hal.h +03f3fd4c2fb7db83441805a5c350b121bd3117b4 - src/nvidia-modeset/include/nvkms-setlut-workarea.h +31acf6af2a4c82e3429efa77d110cb346c11905f - src/nvidia-modeset/include/nvkms-lut.h +e4bae9a0df729119071902f7ad59704c97adee0e - src/nvidia-modeset/include/nvkms-private.h +fbe2cbfd32b40d8188c6b25716fb360720ab5760 - src/nvidia-modeset/include/nvkms-evo.h +04f2e01c7f798a615319accc2dd713f617a81172 - src/nvidia-modeset/include/nvkms-headsurface-config.h +4a94381bd8c24b09193577d3f05d6d61f178e1cf - src/nvidia-modeset/include/nvkms-ctxdma.h +b4d53599736b03ee1bc149abe7b602336f40295c - src/nvidia-modeset/include/nvkms-flip.h +46fc0e138ba7be5fa3ea0ada3ee0a78656950c80 - src/nvidia-modeset/include/nvkms-modeset-types.h +260b6ef87c755e55a803adad4ce49f2d57315f9a - src/nvidia-modeset/include/nvkms-event.h +35fa1444c57f7adbbddddc612237f3ad38cdd78f - src/nvidia-modeset/include/nvkms-rmapi.h +8782df838ea3d2617e9842c89389f51137b19a73 - src/nvidia-modeset/include/nvkms-headsurface-matrix.h +881d7e4187ff9c7e9d02672aedafc1605f3055ec - src/nvidia-modeset/include/nvkms-modepool.h +60c01e29aa91aa80bf3750a1b11fe61a6cdfde58 - src/nvidia-modeset/include/nvkms-types.h +cc3dc4021b76782434efd2aa81d3ffdd1f3b1f0a - src/nvidia-modeset/include/nvkms-headsurface-ioctl.h +3dc2113c55970fa70b7afb4fd30f2f1e777ebc12 - src/nvidia-modeset/include/nvkms-surface.h +aa43ad7f970331c56378b7797f66b0a77d8e99dd - src/nvidia-modeset/include/nvkms-evo3.h +8c7e0e15c1038fe518e98d8f86fafb250b10a1d2 - src/nvidia-modeset/include/nvkms-stereo.h +9deeeae9081fd828a14f3b0df5fbf17a81161786 - src/nvidia-modeset/include/nvkms-hw-flip.h +6460f8427fdb375d659975c7f6eaadaca0ed2b2c - src/nvidia-modeset/include/dp/nvdp-device.h +1912d523f567c4fc36075942cf8acaf5d5478232 - src/nvidia-modeset/include/dp/nvdp-connector-event-sink.h +a233bdcd5daa0582acf2cd5b0f339ad54d09bf13 - src/nvidia-modeset/include/dp/nvdp-timer.h +2b91423ff88ca398324088d4f910e81f6944123a - src/nvidia-modeset/include/dp/nvdp-connector.h +aa8aa13c6fc48ff5ef621f243e94dcc01a46dea3 - src/nvidia-modeset/kapi/include/nvkms-kapi-notifiers.h +c0de6efe1d5c57da324118f108ea0570a6923036 - src/nvidia-modeset/kapi/include/nvkms-kapi-internal.h +b01351ece15ce0d54a19ad0d7ffa056963d72488 - src/nvidia-modeset/kapi/src/nvkms-kapi.c +a4d52bb238ce94f3427f25bd169e58d5d5f4abd1 - src/nvidia-modeset/kapi/src/nvkms-kapi-notifiers.c +ce42ceac4c4cf9d249d66ab57ae2f435cd9623fc - src/nvidia-modeset/kapi/src/nvkms-kapi-sync.c +80c2c9a2a05beb0202239db8b0dd7080ff21c194 - src/nvidia-modeset/kapi/interface/nvkms-kapi-private.h +4c856c1324060dcb5a9e72e5e82c7a60f6324733 - src/nvidia-modeset/kapi/interface/nvkms-kapi.h +11af2aeea97398b58f628fe4685b5dfcfda5791b - src/nvidia-modeset/src/nvkms-modeset.c +016fd1b111731c6d323425d52bfe1a04d8bcade7 - src/nvidia-modeset/src/nvkms-headsurface-swapgroup.c +37a6d00e8721a9c4134810f8be3e7168f8cbb226 - src/nvidia-modeset/src/nvkms-evo.c +4758c601621603597bd2387c4f08b3fdc17e375d - src/nvidia-modeset/src/nvkms-hw-flip.c +5e3188c2d9b580ff69e45842f841f5c92c0c6edb - src/nvidia-modeset/src/nvkms-headsurface-ioctl.c +e1a3c31638416a0132c5301fe5dd4b1c93f14376 - src/nvidia-modeset/src/nvkms-cursor3.c +d48ff2da5fac6f8cd0522a25b947b5b8c01812ba - src/nvidia-modeset/src/nvkms-rm.c +30ad7839985dea46e6b6d43499210a3056da51ad - src/nvidia-modeset/src/nvkms-utils-flip.c +2c24667a18374ae967917df219f3775d9a79ae04 - src/nvidia-modeset/src/nvkms-headsurface-3d.c +fb8b4aa1e36f23e1927be3dbd351ab0357aeb735 - src/nvidia-modeset/src/nvkms-evo3.c +9ce404d122bbdcd5f626f2c2b7ff08a9bfcf4045 - src/nvidia-modeset/src/nvkms-flip.c +e5c96eb6b9884daf4a8d0d467b009008a45065b9 - src/nvidia-modeset/src/g_nvkms-evo-states.c +094c2169412cb577a6e9db9420da084264119284 - src/nvidia-modeset/src/nvkms-hal.c +1e0bf57319954911ddd2fe87b0cd05e257f1439e - src/nvidia-modeset/src/nvkms-surface.c +bd2e4a6102432d4ac1faf92b5d3db29e9e3cfafc - src/nvidia-modeset/src/nvkms-utils.c +6d41c9f84cc9ce2d16812e94a3fba055b3fc7308 - src/nvidia-modeset/src/nvkms-conf.c +05bfe67d8cb956a666804b8f27e507bbd35e2c2d - src/nvidia-modeset/src/nvkms-difr.c +9a8746ee4a4e772b8ac13f06dc0de8a250fdb4c7 - src/nvidia-modeset/src/nvkms-ctxdma.c +382141f251ce64e2d33add3b89225c373da9ea7d - src/nvidia-modeset/src/nvkms-hdmi.c +2e1644a912e7a27ec04288e000c3fa5439eecb60 - src/nvidia-modeset/src/nvkms-headsurface-matrix.c +127a3f77febf09d56b6fe3534bc62ff0ffa535d8 - src/nvidia-modeset/src/nvkms-dpy.c +e0756f45732035b1000a03bd8a995a46041904ae - src/nvidia-modeset/src/nvkms-vblank-sem-control.c +e4044bb85de59d662d0d579771c076cbe9b10bbb - src/nvidia-modeset/src/nvkms.c +12cbc57714f458b5673115bb5c4d380509d05277 - src/nvidia-modeset/src/nvkms-cursor.c +5c93bc35d8f93330dd7a1f7808e39c6001ee83e8 - src/nvidia-modeset/src/nvkms-headsurface-config.c +ed78249de63139ec2629bde58b616cef649281f1 - src/nvidia-modeset/src/nvkms-evo2.c +c51c4f2e3ac11bf86d4549ce5e9d9010199e37dd - src/nvidia-modeset/src/nvkms-prealloc.c +9d38d5147d06a293a272087d78d0b96b6003f11e - src/nvidia-modeset/src/nvkms-attributes.c +65b02b48caff2a9100b8c5614f91d42fb20da9c0 - src/nvidia-modeset/src/nvkms-dpy-override.c +a62b617aa5c89056c19a5f3c91402df8cfcc1103 - src/nvidia-modeset/src/nvkms-push.c +9fea40b7b55d6ebf3f73b5d469751c873ffbe7c0 - src/nvidia-modeset/src/nvkms-dma.c +da726d20eea99a96af4c10aace88f419e8ee2a34 - src/nvidia-modeset/src/nvkms-event.c +a1c7c3c1191762c0a1038674dee0075d532ccd2d - src/nvidia-modeset/src/nvkms-headsurface.c +2fabe1c14116a2b07f24d01710394ee84a6e3914 - src/nvidia-modeset/src/nvkms-3dvision.c +89b58b1e67ff7ed43c889fe7d85329d7f4762b91 - src/nvidia-modeset/src/nvkms-hw-states.c +c799d52bdc792efc377fb5cd307b0eb445c44d6a - src/nvidia-modeset/src/nvkms-cursor2.c +dd6c86b5557b02dd15a8ea0f10bde9770d90874e - src/nvidia-modeset/src/nvkms-evo4.c +be49ea18102a44914e0d7686c51430df18336383 - src/nvidia-modeset/src/nvkms-framelock.c +6bdb90474b5d31c53104f7b29b447b3f798aaa0e - src/nvidia-modeset/src/nvkms-vrr.c +05ca4acdfeb9b99eccc7e222846fc688473322ae - src/nvidia-modeset/src/nvkms-rmapi-dgpu.c +f754a27436fd1e1fa103de6110224c21ad7ea9f4 - src/nvidia-modeset/src/nvkms-pow.c +e8c6d2eedfba19f8f06dd57f629588615cf1a2e9 - src/nvidia-modeset/src/nvkms-evo1.c +d15f314bea66574e0ffc72966b86bae8366412f5 - src/nvidia-modeset/src/nvkms-console-restore.c +0699860902369359e5ff1a0ef46b87e955d4bb7a - src/nvidia-modeset/src/nvkms-modepool.c +403e6dbff0a607c2aecf3204c56633bd7b612ae2 - src/nvidia-modeset/src/nvkms-stereo.c +fd6ecacc4f273c88960148c070dd17d93f49909b - src/nvidia-modeset/src/nvkms-lut.c +771fee54d1123871e380db6f3227b4946b6be647 - src/nvidia-modeset/src/dp/nvdp-timer.cpp +6b985fc50b5040ce1a81418bed73a60edb5d3289 - src/nvidia-modeset/src/dp/nvdp-timer.hpp +dcf9f99e79a13b109a8665597f0fc7c00ec37957 - src/nvidia-modeset/src/dp/nvdp-connector.cpp +e0e50fc1c526ecf0fe2f60689a25adda1257e2b3 - src/nvidia-modeset/src/dp/nvdp-evo-interface.cpp +16081091156a813977dfdd0718d55ea4a66a0686 - src/nvidia-modeset/src/dp/nvdp-device.cpp +6e17f81da1b94414c1cbf18c3ea92f25352d8bf5 - src/nvidia-modeset/src/dp/nvdp-connector-event-sink.cpp +81065db63fda6468fdf56d853781fca8af610798 - src/nvidia-modeset/src/dp/nvdp-evo-interface.hpp +e1f003a64cec57f299e65567d29e69951a62f44a - src/nvidia-modeset/src/dp/nvdp-host.cpp +ca07b8e8f507de47694ac7b3b1719b0931da02c6 - src/nvidia-modeset/src/dp/nvdp-connector-event-sink.hpp +2b49249a135293d01e82ef11ee520596c9825875 - src/nvidia-modeset/src/shaders/g_pascal_shaders +09cb78322cc8465d42a4be6a1c3682566c66462d - src/nvidia-modeset/src/shaders/g_maxwell_shaders +a62c80e00077041d38d84e06c5834dca527e8a55 - src/nvidia-modeset/src/shaders/g_volta_shader_info.h +21cf709a8717d43c4abc6b66c8faad141592b7ce - src/nvidia-modeset/src/shaders/g_nvidia-headsurface-shader-info.h +fec9074463a5505e300f9feb77b60ec77b781bb7 - src/nvidia-modeset/src/shaders/g_turing_shader_info.h +cad54ab33c1132ba7453f54e9a02d34504e4fd5c - src/nvidia-modeset/src/shaders/g_pascal_shader_info.h +f3bdeb7d46fdc9c31940ea799ce4a0d328fe1844 - src/nvidia-modeset/src/shaders/g_ampere_shaders +0ba4739302e0938b5599afb7e7ad281b21e25cec - src/nvidia-modeset/src/shaders/g_maxwell_shader_info.h +1c02043d31faf4f79c4a54dd5a622e87ee276be8 - src/nvidia-modeset/src/shaders/g_volta_shaders +f540d144503d00941a1b32fb1a3d13061065b24e - src/nvidia-modeset/src/shaders/g_hopper_shader_info.h +74824b796722071bc3d90e4dacfed245dcda28cd - src/nvidia-modeset/src/shaders/g_turing_shaders +ce728856b76bfa428b199fd3b97e0cbc24ef54cd - src/nvidia-modeset/src/shaders/g_hopper_shaders +02bb8bc0f5d228d4a9a383d797daffd8936c4ad7 - src/nvidia-modeset/src/shaders/g_ampere_shader_info.h +9f35175e44247d4facb26a60614d40fcdb74416f - src/nvidia-modeset/src/shaders/g_shader_names.h +ca86fee8bd52e6c84e376199c5f3890078bc2031 - src/nvidia-modeset/os-interface/include/nvidia-modeset-os-interface.h +b2a5ddfd8dcb3000b9d102bd55b5b560730e81d5 - src/nvidia-modeset/os-interface/include/nvkms.h +51b367a6e289cc8957388745988315024f97506e - src/nvidia-modeset/interface/nvkms-api.h +b986bc6591ba17a74ad81ec4c93347564c6d5165 - src/nvidia-modeset/interface/nvkms-format.h +2ea1436104463c5e3d177e8574c3b4298976d37e - src/nvidia-modeset/interface/nvkms-ioctl.h +3bf4a2d1fec120ef5313c8bf119bc22fb3cf0cc5 - src/nvidia-modeset/interface/nvkms-modetimings.h +c54c62de441828282db9a4f5b35c2fa5c97d94f1 - src/nvidia-modeset/interface/nvkms-api-types.h +8e3e74d2b3f45381e7b0012d930cf451cbd1728f - src/nvidia-modeset/interface/nvkms-sync.h diff --git a/kernel-open/Kbuild b/kernel-open/Kbuild new file mode 100644 index 0000000..1b07b82 --- /dev/null +++ b/kernel-open/Kbuild @@ -0,0 +1,306 @@ +########################################################################### +# Kbuild file for NVIDIA Linux GPU driver kernel modules +########################################################################### + +# +# The parent makefile is expected to define: +# +# NV_KERNEL_SOURCES : The root of the kernel source tree. +# NV_KERNEL_OUTPUT : The kernel's output tree. +# NV_KERNEL_MODULES : A whitespace-separated list of modules to build. +# ARCH : The target CPU architecture: x86_64|arm64 +# +# Kbuild provides the variables: +# +# $(src) : The directory containing this Kbuild file. +# $(obj) : The directory where the output from this build is written. +# + +NV_BUILD_TYPE ?= release + +# +# Utility macro ASSIGN_PER_OBJ_CFLAGS: to control CFLAGS on a +# per-object basis, Kbuild honors the 'CFLAGS_$(object)' variable. +# E.g., "CFLAGS_nv.o" for CFLAGS that are specific to nv.o. Use this +# macro to assign 'CFLAGS_$(object)' variables for multiple object +# files. +# +# $(1): The object files. +# $(2): The CFLAGS to add for those object files. +# +# With kernel git commit 54b8ae66ae1a3454a7645d159a482c31cd89ab33, the +# handling of object-specific CFLAGs, CFLAGS_$(object) has changed. Prior to +# this commit, the CFLAGS_$(object) variable was required to be defined with +# only the the object name (). With the aforementioned git +# commit, it is now required to give Kbuild relative paths along-with the +# object name (CFLAGS_/somefile.o>). As a result, CFLAGS_$(object) +# is set twice, once with a relative path to the object files and once with +# just the object files. +# +ASSIGN_PER_OBJ_CFLAGS = \ + $(foreach _cflags_variable, \ + $(notdir $(1)) $(1), \ + $(eval $(addprefix CFLAGS_,$(_cflags_variable)) += $(2))) + + +# +# Include the specifics of the individual NVIDIA kernel modules. +# +# Each of these should: +# - Append to 'obj-m', to indicate the kernel module that should be built. +# - Define the object files that should get built to produce the kernel module. +# - Tie into conftest (see the description below). +# + +NV_UNDEF_BEHAVIOR_SANITIZER ?= +ifeq ($(NV_UNDEF_BEHAVIOR_SANITIZER),1) + UBSAN_SANITIZE := y +endif + +# +# Command to create a symbolic link, explicitly resolving the symlink target +# to an absolute path to abstract away the difference between Linux < 6.13, +# where the CWD is the Linux kernel source tree for Kbuild extmod builds, and +# Linux >= 6.13, where the CWD is the external module source tree. +# +# This is used to create the nv*-kernel.o -> nv*-kernel.o_binary symlinks for +# kernel modules which use precompiled binary object files. +# + +quiet_cmd_symlink = SYMLINK $@ + cmd_symlink = ln -sf $(abspath $<) $@ + + +$(foreach _module, $(NV_KERNEL_MODULES), \ + $(eval include $(src)/$(_module)/$(_module).Kbuild)) + + +ccflags-y += -I$(src)/common/inc +ccflags-y += -I$(src) +ccflags-y += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-format-extra-args +ccflags-y += -D__KERNEL__ -DMODULE -DNVRM +ccflags-y += -DNV_VERSION_STRING=\"580.00\" + +# Include and link Tegra out-of-tree modules. +ifneq ($(wildcard /usr/src/nvidia/nvidia-oot),) + SYSSRCNVOOT ?= /usr/src/nvidia/nvidia-oot +endif + +ifneq ($(SYSSRCHOST1X),) + ccflags-y += -I$(SYSSRCHOST1X) +endif + +ifneq ($(SYSSRCNVOOT),) + ccflags-y += -I$(SYSSRCNVOOT)/include + KBUILD_EXTRA_SYMBOLS = $(SYSSRCNVOOT)/Module.symvers +endif + +# Some Android kernels prohibit driver use of filesystem functions like +# filp_open() and kernel_read(). Disable the NV_FILESYSTEM_ACCESS_AVAILABLE +# functionality that uses those functions when building for Android. + +PLATFORM_IS_ANDROID ?= 0 + +ifeq ($(PLATFORM_IS_ANDROID),1) + ccflags-y += -DNV_FILESYSTEM_ACCESS_AVAILABLE=0 +else + ccflags-y += -DNV_FILESYSTEM_ACCESS_AVAILABLE=1 +endif + +ccflags-y += -Wno-unused-function + +ifneq ($(NV_BUILD_TYPE),debug) + ccflags-y += -Wuninitialized +endif + +ccflags-y += -fno-strict-aliasing + +ifeq ($(ARCH),arm64) + ccflags-y += -mstrict-align +endif + +ifeq ($(NV_BUILD_TYPE),debug) + ccflags-y += -g +endif + +ccflags-y += -ffreestanding + +ifeq ($(ARCH),arm64) + ccflags-y += -mgeneral-regs-only -march=armv8-a + ccflags-y += $(call cc-option,-mno-outline-atomics,) +endif + +ifeq ($(ARCH),x86_64) + ccflags-y += -mno-red-zone -mcmodel=kernel +endif + +ccflags-y += +ccflags-y += $(call cc-option,-Werror=undef,) +ccflags-y += -DNV_SPECTRE_V2=$(NV_SPECTRE_V2) +ccflags-y += -DNV_KERNEL_INTERFACE_LAYER + +# +# Detect SGI UV systems and apply system-specific optimizations. +# + +ifneq ($(wildcard /proc/sgi_uv),) + ccflags-y += -DNV_CONFIG_X86_UV +endif + +ifdef VGX_FORCE_VFIO_PCI_CORE + ccflags-y += -DNV_VGPU_FORCE_VFIO_PCI_CORE +endif + +WARNINGS_AS_ERRORS ?= +ifeq ($(WARNINGS_AS_ERRORS),1) + ccflags-y += -Werror +else + ccflags-y += -Wno-error +endif + +# +# The conftest.sh script tests various aspects of the target kernel. +# The per-module Kbuild files included above should: +# +# - Append to the NV_CONFTEST_*_COMPILE_TESTS variables to indicate +# which conftests they require. +# - Append to the NV_OBJECTS_DEPEND_ON_CONFTEST variable any object files +# that depend on conftest. +# +# The conftest machinery below will run the requested tests and +# generate the appropriate header files. +# + +CC ?= cc +LD ?= ld + +NV_CONFTEST_SCRIPT := $(src)/conftest.sh +NV_CONFTEST_HEADER := $(obj)/conftest/headers.h + +NV_CONFTEST_CMD := /bin/sh $(NV_CONFTEST_SCRIPT) \ + "$(CC)" $(ARCH) $(NV_KERNEL_SOURCES) $(NV_KERNEL_OUTPUT) + +NV_CFLAGS_FROM_CONFTEST := $(shell $(NV_CONFTEST_CMD) build_cflags) + +NV_CONFTEST_CFLAGS = $(NV_CFLAGS_FROM_CONFTEST) $(ccflags-y) -fno-pie +NV_CONFTEST_CFLAGS += $(filter -std=%,$(KBUILD_CFLAGS)) +NV_CONFTEST_CFLAGS += $(call cc-disable-warning,pointer-sign) +NV_CONFTEST_CFLAGS += $(call cc-option,-fshort-wchar,) +NV_CONFTEST_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types,) +NV_CONFTEST_CFLAGS += -Wno-error + +NV_CONFTEST_COMPILE_TEST_HEADERS := $(obj)/conftest/macros.h +NV_CONFTEST_COMPILE_TEST_HEADERS += $(obj)/conftest/functions.h +NV_CONFTEST_COMPILE_TEST_HEADERS += $(obj)/conftest/symbols.h +NV_CONFTEST_COMPILE_TEST_HEADERS += $(obj)/conftest/types.h +NV_CONFTEST_COMPILE_TEST_HEADERS += $(obj)/conftest/generic.h + +NV_CONFTEST_HEADERS := $(obj)/conftest/patches.h +NV_CONFTEST_HEADERS += $(obj)/conftest/headers.h +NV_CONFTEST_HEADERS += $(NV_CONFTEST_COMPILE_TEST_HEADERS) + + +# +# Generate a header file for a single conftest compile test. Each compile test +# header depends on conftest.sh, as well as the generated conftest/headers.h +# file, which is included in the compile test preamble. +# + +$(obj)/conftest/compile-tests/%.h: $(NV_CONFTEST_SCRIPT) $(NV_CONFTEST_HEADER) + @mkdir -p $(obj)/conftest/compile-tests + @echo " CONFTEST: $(notdir $*)" + @$(NV_CONFTEST_CMD) compile_tests '$(NV_CONFTEST_CFLAGS)' \ + $(notdir $*) > $@ + +# +# Concatenate a conftest/*.h header from its constituent compile test headers +# +# $(1): The name of the concatenated header +# $(2): The list of compile tests that make up the header +# + +define NV_GENERATE_COMPILE_TEST_HEADER + $(obj)/conftest/$(1).h: $(addprefix $(obj)/conftest/compile-tests/,$(addsuffix .h,$(2))) + @mkdir -p $(obj)/conftest + @# concatenate /dev/null to prevent cat from hanging when $$^ is empty + @cat $$^ /dev/null > $$@ +endef + +# +# Generate the conftest compile test headers from the lists of compile tests +# provided by the module-specific Kbuild files. +# + +NV_CONFTEST_FUNCTION_COMPILE_TESTS ?= +NV_CONFTEST_GENERIC_COMPILE_TESTS ?= +NV_CONFTEST_MACRO_COMPILE_TESTS ?= +NV_CONFTEST_SYMBOL_COMPILE_TESTS ?= +NV_CONFTEST_TYPE_COMPILE_TESTS ?= + +$(eval $(call NV_GENERATE_COMPILE_TEST_HEADER,functions,$(NV_CONFTEST_FUNCTION_COMPILE_TESTS))) +$(eval $(call NV_GENERATE_COMPILE_TEST_HEADER,generic,$(NV_CONFTEST_GENERIC_COMPILE_TESTS))) +$(eval $(call NV_GENERATE_COMPILE_TEST_HEADER,macros,$(NV_CONFTEST_MACRO_COMPILE_TESTS))) +$(eval $(call NV_GENERATE_COMPILE_TEST_HEADER,symbols,$(NV_CONFTEST_SYMBOL_COMPILE_TESTS))) +$(eval $(call NV_GENERATE_COMPILE_TEST_HEADER,types,$(NV_CONFTEST_TYPE_COMPILE_TESTS))) + +$(obj)/conftest/patches.h: $(NV_CONFTEST_SCRIPT) + @mkdir -p $(obj)/conftest + @$(NV_CONFTEST_CMD) patch_check > $@ + +include $(src)/header-presence-tests.mk + +# Filename to store the define for the header in $(1); this is only consumed by +# the rule below that concatenates all of these together. +NV_HEADER_PRESENCE_PART = $(addprefix $(obj)/conftest/header_presence/,$(addsuffix .part,$(1))) + +# Define a rule to check the header $(1). +define NV_HEADER_PRESENCE_CHECK + $$(call NV_HEADER_PRESENCE_PART,$(1)): $$(NV_CONFTEST_SCRIPT) $(obj)/conftest/uts_release + @mkdir -p $$(dir $$@) + @$$(NV_CONFTEST_CMD) test_kernel_header '$$(NV_CONFTEST_CFLAGS)' '$(1)' > $$@ +endef + +# Evaluate the rule above for each header in the list. +$(foreach header,$(NV_HEADER_PRESENCE_TESTS),$(eval $(call NV_HEADER_PRESENCE_CHECK,$(header)))) + +# Concatenate all of the parts into headers.h. +$(obj)/conftest/headers.h: $(call NV_HEADER_PRESENCE_PART,$(NV_HEADER_PRESENCE_TESTS)) + @cat $^ > $@ + +clean-dirs := $(obj)/conftest + + +# For any object files that depend on conftest, declare the dependency here. +$(addprefix $(obj)/,$(NV_OBJECTS_DEPEND_ON_CONFTEST)): | $(NV_CONFTEST_HEADERS) + +# Sanity checks of the build environment and target system/kernel + +BUILD_SANITY_CHECKS = \ + cc_sanity_check \ + cc_version_check \ + dom0_sanity_check \ + xen_sanity_check \ + preempt_rt_sanity_check \ + vgpu_kvm_sanity_check \ + module_symvers_sanity_check + +.PHONY: $(BUILD_SANITY_CHECKS) + +$(BUILD_SANITY_CHECKS): + @$(NV_CONFTEST_CMD) $@ full_output + +# Perform all sanity checks before generating the conftest headers + +$(NV_CONFTEST_HEADERS): | $(BUILD_SANITY_CHECKS) + +# Make the conftest headers depend on the kernel version string + +$(obj)/conftest/uts_release: NV_GENERATE_UTS_RELEASE + @mkdir -p $(dir $@) + @NV_UTS_RELEASE="// Kernel version: `$(NV_CONFTEST_CMD) compile_tests '$(NV_CONFTEST_CFLAGS)' uts_release`"; \ + if ! [ -f "$@" ] || [ "$$NV_UTS_RELEASE" != "`cat $@`" ]; \ + then echo "$$NV_UTS_RELEASE" > $@; fi + +.PHONY: NV_GENERATE_UTS_RELEASE + +$(NV_CONFTEST_HEADERS): $(obj)/conftest/uts_release diff --git a/kernel-open/Makefile b/kernel-open/Makefile new file mode 100644 index 0000000..1b98d33 --- /dev/null +++ b/kernel-open/Makefile @@ -0,0 +1,184 @@ +# +# This Makefile was automatically generated; do not edit. +# + +########################################################################### +# Makefile for NVIDIA Linux GPU driver kernel modules +########################################################################### + +# This makefile is read twice: when a user or nvidia-installer invokes +# 'make', this file is read. It then invokes the Linux kernel's +# Kbuild. Modern versions of Kbuild will then read the Kbuild file in +# this directory. However, old versions of Kbuild will instead read +# this Makefile. For backwards compatibility, when read by Kbuild +# (recognized by KERNELRELEASE not being empty), do nothing but +# include the Kbuild file in this directory. + +ifneq ($(KERNELRELEASE),) + include $(src)/Kbuild +else + + # Determine the location of the Linux kernel source tree, and of the + # kernel's output tree. Use this to invoke Kbuild, and pass the paths + # to the source and output trees to NVIDIA's Kbuild file via + # NV_KERNEL_{SOURCES,OUTPUT}. + + ifdef SYSSRC + KERNEL_SOURCES := $(SYSSRC) + else + KERNEL_UNAME ?= $(shell uname -r) + KERNEL_MODLIB := /lib/modules/$(KERNEL_UNAME) + KERNEL_SOURCES := $(shell ((test -d $(KERNEL_MODLIB)/source && echo $(KERNEL_MODLIB)/source) || (test -d $(KERNEL_MODLIB)/build/source && echo $(KERNEL_MODLIB)/build/source)) || echo $(KERNEL_MODLIB)/build) + endif + + KERNEL_OUTPUT := $(KERNEL_SOURCES) + KBUILD_PARAMS := + + ifdef SYSOUT + ifneq ($(SYSOUT), $(KERNEL_SOURCES)) + KERNEL_OUTPUT := $(SYSOUT) + KBUILD_PARAMS := KBUILD_OUTPUT=$(KERNEL_OUTPUT) + endif + else + KERNEL_UNAME ?= $(shell uname -r) + KERNEL_MODLIB := /lib/modules/$(KERNEL_UNAME) + # $(filter patter...,text) - Returns all whitespace-separated words in text that + # do match any of the pattern words, removing any words that do not match. + # Set the KERNEL_OUTPUT only if either $(KERNEL_MODLIB)/source or + # $(KERNEL_MODLIB)/build/source path matches the KERNEL_SOURCES. + ifneq ($(filter $(KERNEL_SOURCES),$(KERNEL_MODLIB)/source $(KERNEL_MODLIB)/build/source),) + KERNEL_OUTPUT := $(KERNEL_MODLIB)/build + KBUILD_PARAMS := KBUILD_OUTPUT=$(KERNEL_OUTPUT) + endif + endif + + # If CC hasn't been set explicitly, check the value of CONFIG_CC_VERSION_TEXT. + # Look for the compiler specified there, and use it by default, if found. + ifeq ($(origin CC),default) + cc_version_text=$(firstword $(shell . $(KERNEL_OUTPUT)/.config; \ + echo "$$CONFIG_CC_VERSION_TEXT")) + + ifneq ($(cc_version_text),) + ifeq ($(shell command -v $(cc_version_text)),) + $(warning WARNING: Unable to locate the compiler $(cc_version_text) \ + from CONFIG_CC_VERSION_TEXT in the kernel configuration.) + else + CC=$(cc_version_text) + endif + endif + endif + + CC ?= cc + LD ?= ld + OBJDUMP ?= objdump + AWK ?= awk + # Bake the following awk program in a string. The program is needed to add C++ + # to the languages excluded from BTF generation. + # + # Also, unconditionally return success (0) from the awk program, rather than + # propagating pahole's return status (with 'exit system(pahole_cmd)'), to + # workaround an DW_TAG_rvalue_reference_type error in + # kernel/nvidia-modeset.ko. + # + # BEGIN { + # pahole_cmd = "pahole" + # for (i = 1; i < ARGC; i++) { + # if (ARGV[i] ~ /--lang_exclude=/) { + # pahole_cmd = pahole_cmd sprintf(" %s,c++", ARGV[i]) + # } else { + # pahole_cmd = pahole_cmd sprintf(" %s", ARGV[i]) + # } + # } + # system(pahole_cmd) + # } + PAHOLE_AWK_PROGRAM = BEGIN { pahole_cmd = \"pahole\"; for (i = 1; i < ARGC; i++) { if (ARGV[i] ~ /--lang_exclude=/) { pahole_cmd = pahole_cmd sprintf(\" %s,c++\", ARGV[i]); } else { pahole_cmd = pahole_cmd sprintf(\" %s\", ARGV[i]); } } system(pahole_cmd); } + # If scripts/pahole-flags.sh is not present in the kernel tree, add PAHOLE and + # PAHOLE_AWK_PROGRAM assignments to PAHOLE_VARIABLES; otherwise assign the + # empty string to PAHOLE_VARIABLES. + PAHOLE_VARIABLES=$(if $(wildcard $(KERNEL_SOURCES)/scripts/pahole-flags.sh),,"PAHOLE=$(AWK) '$(PAHOLE_AWK_PROGRAM)'") + + ifndef ARCH + ARCH := $(shell uname -m | sed -e 's/i.86/i386/' \ + -e 's/aarch64/arm64/' \ + -e 's/riscv64/riscv/' \ + ) + endif + + KERNEL_ARCH = $(ARCH) + + ifneq ($(filter $(ARCH),i386 x86_64),) + KERNEL_ARCH = x86 + else + ifeq ($(filter $(ARCH),arm64 riscv),) + $(error Unsupported architecture $(ARCH)) + endif + endif + + NV_KERNEL_MODULES ?= $(wildcard nvidia nvidia-modeset nvidia-drm) + NV_KERNEL_MODULES := $(filter-out $(NV_EXCLUDE_KERNEL_MODULES), \ + $(NV_KERNEL_MODULES)) + INSTALL_MOD_DIR ?= kernel/drivers/video + + NV_VERBOSE ?= + SPECTRE_V2_RETPOLINE ?= 0 + + ifeq ($(NV_VERBOSE),1) + KBUILD_PARAMS += V=1 + endif + KBUILD_PARAMS += -C $(KERNEL_SOURCES) M=$(CURDIR) + KBUILD_PARAMS += ARCH=$(ARCH) + KBUILD_PARAMS += NV_KERNEL_SOURCES=$(KERNEL_SOURCES) + KBUILD_PARAMS += NV_KERNEL_OUTPUT=$(KERNEL_OUTPUT) + KBUILD_PARAMS += NV_KERNEL_MODULES="$(NV_KERNEL_MODULES)" + KBUILD_PARAMS += INSTALL_MOD_DIR="$(INSTALL_MOD_DIR)" + KBUILD_PARAMS += NV_SPECTRE_V2=$(SPECTRE_V2_RETPOLINE) + + .PHONY: modules module clean clean_conftest modules_install + modules clean modules_install: + @$(MAKE) "LD=$(LD)" "CC=$(CC)" "OBJDUMP=$(OBJDUMP)" \ + $(PAHOLE_VARIABLES) $(KBUILD_PARAMS) $@ + @if [ "$@" = "modules" ]; then \ + for module in $(NV_KERNEL_MODULES); do \ + if [ -x split-object-file.sh ]; then \ + ./split-object-file.sh $$module.ko; \ + fi; \ + done; \ + fi + + # Compatibility target for scripts that may be directly calling the + # "module" target from the old build system. + + module: modules + + # Check if the any of kernel module linker scripts exist. If they do, pass + # them as linker options (via variable NV_MODULE_LD_SCRIPTS) while building + # the kernel interface object files. These scripts do some processing on the + # module symbols on which the Linux kernel's module resolution is dependent + # and hence must be used whenever present. + + LD_SCRIPT ?= $(KERNEL_SOURCES)/scripts/module-common.lds \ + $(KERNEL_SOURCES)/arch/$(KERNEL_ARCH)/kernel/module.lds \ + $(KERNEL_OUTPUT)/arch/$(KERNEL_ARCH)/module.lds \ + $(KERNEL_OUTPUT)/scripts/module.lds + NV_MODULE_COMMON_SCRIPTS := $(foreach s, $(wildcard $(LD_SCRIPT)), -T $(s)) + + # Use $* to match the stem % in the kernel interface file %-linux.o. Replace + # "nv" with "nvidia" in $* as appropriate: e.g. nv-modeset-linux.o links + # nvidia-modeset.mod.o and nvidia-modeset/nv-modeset-interface.o. The kernel + # interface file must have the .mod.o object linked into it: otherwise, the + # kernel module produced by linking the interface against its corresponding + # core object file will not be loadable. The .mod.o file is built as part of + # the MODPOST process (stage 2), so the rule to build the kernel interface + # cannot be defined in the *Kbuild files, which are only used during stage 1. + + %-linux.o: modules + $(LD) $(NV_MODULE_COMMON_SCRIPTS) -r -o $@ \ + $(subst nv,nvidia,$*).mod.o $(subst nv,nvidia,$*)/$*-interface.o + + # Kbuild's "clean" rule won't clean up the conftest headers on its own, and + # clean-dirs doesn't appear to work as advertised. + clean_conftest: + $(RM) -r conftest + clean: clean_conftest + +endif # KERNELRELEASE diff --git a/kernel-open/common/inc/conftest.h b/kernel-open/common/inc/conftest.h new file mode 100644 index 0000000..dd05144 --- /dev/null +++ b/kernel-open/common/inc/conftest.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _CONFTEST_H +#define _CONFTEST_H + +#include "conftest/headers.h" +#include "conftest/functions.h" +#include "conftest/generic.h" +#include "conftest/macros.h" +#include "conftest/symbols.h" +#include "conftest/types.h" + +#endif diff --git a/kernel-open/common/inc/cpuopsys.h b/kernel-open/common/inc/cpuopsys.h new file mode 100644 index 0000000..9743041 --- /dev/null +++ b/kernel-open/common/inc/cpuopsys.h @@ -0,0 +1,437 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! \brief + * Define compile time symbols for CPU type and operating system type. + * This file should only contain preprocessor commands so that + * there are no dependencies on other files. + * + * cpuopsys.h + * + * Copyright (c) 2001, Nvidia Corporation. All rights reserved. + */ + +/*! + * Uniform names are defined for compile time options to distinguish + * CPU types and Operating systems. + * Distinctions between CPU and OpSys should be orthogonal. + * + * These uniform names have initially been defined by keying off the + * makefile/build names defined for builds in the OpenGL group. + * Getting the uniform names defined for other builds may require + * different qualifications. + * + * The file is placed here to allow for the possibility of all driver + * components using the same naming convention for conditional compilation. + */ + +#ifndef CPUOPSYS_H +#define CPUOPSYS_H + +/*****************************************************************************/ +/* Define all OS/CPU-Chip related symbols */ + +/* ***** WINDOWS variations */ +#if defined(_WIN32) || defined(_WIN16) +# define NV_WINDOWS + +# if defined(_WIN32_WINNT) +# define NV_WINDOWS_NT +# elif defined(_WIN32_WCE) +# define NV_WINDOWS_CE +# elif !defined(NV_MODS) +# define NV_WINDOWS_9X +# endif +#endif /* _WIN32 || defined(_WIN16) */ + +/* ***** Unix variations */ +#if defined(__linux__) && !defined(NV_LINUX) && !defined(NV_VMWARE) +# define NV_LINUX +#endif /* defined(__linux__) */ + +#if defined(__VMWARE__) && !defined(NV_VMWARE) +# define NV_VMWARE +#endif /* defined(__VMWARE__) */ + +/* SunOS + gcc */ +#if defined(__sun__) && defined(__svr4__) && !defined(NV_SUNOS) +# define NV_SUNOS +#endif /* defined(__sun__) && defined(__svr4__) */ + +/* SunOS + Sun Compiler (named SunPro, Studio or Forte) */ +#if defined(__SUNPRO_C) || defined(__SUNPRO_CC) +# define NV_SUNPRO_C +# define NV_SUNOS +#endif /* defined(_SUNPRO_C) || defined(__SUNPRO_CC) */ + +#if defined(__FreeBSD__) && !defined(NV_BSD) +# define NV_BSD +#endif /* defined(__FreeBSD__) */ + +/* XXXar don't define NV_UNIX on MacOSX or vxworks or QNX */ +#if (defined(__unix__) || defined(__unix) || defined(__INTEGRITY) ) && !defined(nvmacosx) && !defined(vxworks) && !defined(NV_UNIX) && !defined(__QNX__) && !defined(__QNXNTO__)/* XXX until removed from Makefiles */ +# define NV_UNIX +#endif /* defined(__unix__) */ + +#if (defined(__QNX__) || defined(__QNXNTO__)) && !defined(NV_QNX) +# define NV_QNX +#endif + +#if (defined(__ANDROID__) || defined(ANDROID)) && !defined(NV_ANDROID) +# define NV_ANDROID +#endif + +#if defined(DceCore) && !defined(NV_DCECORE) +# define NV_DCECORE +#endif + +/* ***** Apple variations */ +#if defined(macintosh) || defined(__APPLE__) +# define NV_MACINTOSH +# if defined(__MACH__) +# define NV_MACINTOSH_OSX +# else +# define NV_MACINTOSH_OS9 +# endif +# if defined(__LP64__) +# define NV_MACINTOSH_64 +# endif +#endif /* defined(macintosh) */ + +/* ***** VxWorks */ +/* Tornado 2.21 is gcc 2.96 and #defines __vxworks. */ +/* Tornado 2.02 is gcc 2.7.2 and doesn't define any OS symbol, so we rely on */ +/* the build system #defining vxworks. */ +#if defined(__vxworks) || defined(vxworks) +# define NV_VXWORKS +#endif + +/* ***** Integrity OS */ +#if defined(__INTEGRITY) +# if !defined(NV_INTEGRITY) +# define NV_INTEGRITY +# endif +#endif + +/* ***** Processor type variations */ +/* Note: The prefix NV_CPU_* is taken by Nvcm.h */ + +#if ((defined(_M_IX86) || defined(__i386__) || defined(__i386)) && !defined(NVCPU_X86)) /* XXX until removed from Makefiles */ +/* _M_IX86 for windows, __i386__ for Linux (or any x86 using gcc) */ +/* __i386 for Studio compiler on Solaris x86 */ +# define NVCPU_X86 /* any IA32 machine (not x86-64) */ +# define NVCPU_MIN_PAGE_SHIFT 12 +#endif + +#if defined(_WIN32) && defined(_M_IA64) +# define NVCPU_IA64_WINDOWS /* any IA64 for Windows opsys */ +#endif +#if defined(NV_LINUX) && defined(__ia64__) +# define NVCPU_IA64_LINUX /* any IA64 for Linux opsys */ +#endif +#if defined(NVCPU_IA64_WINDOWS) || defined(NVCPU_IA64_LINUX) || defined(IA64) +# define NVCPU_IA64 /* any IA64 for any opsys */ +#endif + +#if (defined(NV_MACINTOSH) && !(defined(__i386__) || defined(__x86_64__))) || defined(__PPC__) || defined(__ppc) +# if defined(__powerpc64__) && defined(__LITTLE_ENDIAN__) +# ifndef NVCPU_PPC64LE +# define NVCPU_PPC64LE /* PPC 64-bit little endian */ +# endif +# else +# ifndef NVCPU_PPC +# define NVCPU_PPC /* any non-PPC64LE PowerPC architecture */ +# endif +# ifndef NV_BIG_ENDIAN +# define NV_BIG_ENDIAN +# endif +# endif +# define NVCPU_FAMILY_PPC +#endif + +#if defined(__x86_64) || defined(AMD64) || defined(_M_AMD64) +# define NVCPU_X86_64 /* any x86-64 for any opsys */ +#endif + +#if defined(NVCPU_X86) || defined(NVCPU_X86_64) +# define NVCPU_FAMILY_X86 +#endif + +#if defined(__riscv) && (__riscv_xlen==64) +# define NVCPU_RISCV64 +# if defined(__nvriscv) +# define NVCPU_NVRISCV64 +# endif +#endif + +#if defined(__arm__) || defined(_M_ARM) +/* + * 32-bit instruction set on, e.g., ARMv7 or AArch32 execution state + * on ARMv8 + */ +# define NVCPU_ARM +# define NVCPU_MIN_PAGE_SHIFT 12 +#endif + +#if defined(__aarch64__) || defined(__ARM64__) || defined(_M_ARM64) +# define NVCPU_AARCH64 /* 64-bit A64 instruction set on ARMv8 */ +# define NVCPU_MIN_PAGE_SHIFT 12 +#endif + +#if defined(NVCPU_ARM) || defined(NVCPU_AARCH64) +# define NVCPU_FAMILY_ARM +#endif + +#if defined(__SH4__) +# ifndef NVCPU_SH4 +# define NVCPU_SH4 /* Renesas (formerly Hitachi) SH4 */ +# endif +# if defined NV_WINDOWS_CE +# define NVCPU_MIN_PAGE_SHIFT 12 +# endif +#endif + +/* For Xtensa processors */ +#if defined(__XTENSA__) +# define NVCPU_XTENSA +# if defined(__XTENSA_EB__) +# define NV_BIG_ENDIAN +# endif +#endif + + +/* + * Other flavors of CPU type should be determined at run-time. + * For example, an x86 architecture with/without SSE. + * If it can compile, then there's no need for a compile time option. + * For some current GCC limitations, these may be fixed by using the Intel + * compiler for certain files in a Linux build. + */ + +/* The minimum page size can be determined from the minimum page shift */ +#if defined(NVCPU_MIN_PAGE_SHIFT) +#define NVCPU_MIN_PAGE_SIZE (1 << NVCPU_MIN_PAGE_SHIFT) +#endif + +#if defined(NVCPU_IA64) || defined(NVCPU_X86_64) || \ + defined(NV_MACINTOSH_64) || defined(NVCPU_AARCH64) || \ + defined(NVCPU_PPC64LE) || defined(NVCPU_RISCV64) +# define NV_64_BITS /* all architectures where pointers are 64 bits */ +#else +/* we assume 32 bits. I don't see a need for NV_16_BITS. */ +#endif + +/* For verification-only features not intended to be included in normal drivers */ +#if defined(ENABLE_VERIF_FEATURES) +#define NV_VERIF_FEATURES +#endif + +/* + * New, safer family of #define's -- these ones use 0 vs. 1 rather than + * defined/!defined. This is advantageous because if you make a typo, + * say misspelled ENDIAN: + * + * #if NVCPU_IS_BIG_ENDAIN + * + * ...some compilers can give you a warning telling you that you screwed up. + * The compiler can also give you a warning if you forget to #include + * "cpuopsys.h" in your code before the point where you try to use these + * conditionals. + * + * Also, the names have been prefixed in more cases with "CPU" or "OS" for + * increased clarity. You can tell the names apart from the old ones because + * they all use "_IS_" in the name. + * + * Finally, these can be used in "if" statements and not just in #if's. For + * example: + * + * if (NVCPU_IS_BIG_ENDIAN) x = Swap32(x); + * + * Maybe some day in the far-off future these can replace the old #define's. + */ + +#if defined(NV_MODS) +#define NV_IS_MODS 1 +#else +#define NV_IS_MODS 0 +#endif + +#if defined(NV_WINDOWS) +#define NVOS_IS_WINDOWS 1 +#else +#define NVOS_IS_WINDOWS 0 +#endif +#if defined(NV_WINDOWS_CE) +#define NVOS_IS_WINDOWS_CE 1 +#else +#define NVOS_IS_WINDOWS_CE 0 +#endif +#if defined(NV_LINUX) +#define NVOS_IS_LINUX 1 +#else +#define NVOS_IS_LINUX 0 +#endif +#if defined(NV_UNIX) +#define NVOS_IS_UNIX 1 +#else +#define NVOS_IS_UNIX 0 +#endif +#if defined(NV_BSD) +#define NVOS_IS_FREEBSD 1 +#else +#define NVOS_IS_FREEBSD 0 +#endif +#if defined(NV_SUNOS) +#define NVOS_IS_SOLARIS 1 +#else +#define NVOS_IS_SOLARIS 0 +#endif +#if defined(NV_VMWARE) +#define NVOS_IS_VMWARE 1 +#else +#define NVOS_IS_VMWARE 0 +#endif +#if defined(NV_QNX) +#define NVOS_IS_QNX 1 +#else +#define NVOS_IS_QNX 0 +#endif +#if defined(NV_ANDROID) +#define NVOS_IS_ANDROID 1 +#else +#define NVOS_IS_ANDROID 0 +#endif +#if defined(NV_MACINTOSH) +#define NVOS_IS_MACINTOSH 1 +#else +#define NVOS_IS_MACINTOSH 0 +#endif +#if defined(NV_VXWORKS) +#define NVOS_IS_VXWORKS 1 +#else +#define NVOS_IS_VXWORKS 0 +#endif +#if defined(NV_LIBOS) +#define NVOS_IS_LIBOS 1 +#else +#define NVOS_IS_LIBOS 0 +#endif +#if defined(NV_INTEGRITY) +#define NVOS_IS_INTEGRITY 1 +#else +#define NVOS_IS_INTEGRITY 0 +#endif + +#if defined(NVCPU_X86) +#define NVCPU_IS_X86 1 +#else +#define NVCPU_IS_X86 0 +#endif +#if defined(NVCPU_RISCV64) +#define NVCPU_IS_RISCV64 1 +#else +#define NVCPU_IS_RISCV64 0 +#endif +#if defined(NVCPU_NVRISCV64) +#define NVCPU_IS_NVRISCV64 1 +#else +#define NVCPU_IS_NVRISCV64 0 +#endif +#if defined(NVCPU_IA64) +#define NVCPU_IS_IA64 1 +#else +#define NVCPU_IS_IA64 0 +#endif +#if defined(NVCPU_X86_64) +#define NVCPU_IS_X86_64 1 +#else +#define NVCPU_IS_X86_64 0 +#endif +#if defined(NVCPU_FAMILY_X86) +#define NVCPU_IS_FAMILY_X86 1 +#else +#define NVCPU_IS_FAMILY_X86 0 +#endif +#if defined(NVCPU_PPC) +#define NVCPU_IS_PPC 1 +#else +#define NVCPU_IS_PPC 0 +#endif +#if defined(NVCPU_PPC64LE) +#define NVCPU_IS_PPC64LE 1 +#else +#define NVCPU_IS_PPC64LE 0 +#endif +#if defined(NVCPU_FAMILY_PPC) +#define NVCPU_IS_FAMILY_PPC 1 +#else +#define NVCPU_IS_FAMILY_PPC 0 +#endif +#if defined(NVCPU_ARM) +#define NVCPU_IS_ARM 1 +#else +#define NVCPU_IS_ARM 0 +#endif +#if defined(NVCPU_AARCH64) +#define NVCPU_IS_AARCH64 1 +#else +#define NVCPU_IS_AARCH64 0 +#endif +#if defined(NVCPU_FAMILY_ARM) +#define NVCPU_IS_FAMILY_ARM 1 +#else +#define NVCPU_IS_FAMILY_ARM 0 +#endif +#if defined(NVCPU_SH4) +#define NVCPU_IS_SH4 1 +#else +#define NVCPU_IS_SH4 0 +#endif +#if defined(NVCPU_XTENSA) +#define NVCPU_IS_XTENSA 1 +#else +#define NVCPU_IS_XTENSA 0 +#endif +#if defined(NV_BIG_ENDIAN) +#define NVCPU_IS_BIG_ENDIAN 1 +#else +#define NVCPU_IS_BIG_ENDIAN 0 +#endif +#if defined(NV_64_BITS) +#define NVCPU_IS_64_BITS 1 +#else +#define NVCPU_IS_64_BITS 0 +#endif +#if defined(NVCPU_FAMILY_ARM) +#define NVCPU_IS_PCIE_CACHE_COHERENT 0 +#else +#define NVCPU_IS_PCIE_CACHE_COHERENT 1 +#endif +#if defined(NV_DCECORE) +#define NVOS_IS_DCECORE 1 +#else +#define NVOS_IS_DCECORE 0 +#endif +/*****************************************************************************/ + +#endif /* CPUOPSYS_H */ diff --git a/kernel-open/common/inc/dce_rm_client_ipc.h b/kernel-open/common/inc/dce_rm_client_ipc.h new file mode 100644 index 0000000..9b1b5d0 --- /dev/null +++ b/kernel-open/common/inc/dce_rm_client_ipc.h @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _OS_DCE_CLIENT_IPC_H_ +#define _OS_DCE_CLIENT_IPC_H_ + +// RM IPC Client Types + +#define DCE_CLIENT_RM_IPC_TYPE_SYNC 0x0 +#define DCE_CLIENT_RM_IPC_TYPE_EVENT 0x1 +#define DCE_CLIENT_RM_IPC_TYPE_MAX 0x2 + +void dceclientHandleAsyncRpcCallback(NvU32 handle, NvU32 interfaceType, + NvU32 msgLength, void *data, + void *usrCtx); +#endif diff --git a/kernel-open/common/inc/nv-caps.h b/kernel-open/common/inc/nv-caps.h new file mode 100644 index 0000000..35bbf7c --- /dev/null +++ b/kernel-open/common/inc/nv-caps.h @@ -0,0 +1,94 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_CAPS_H_ +#define _NV_CAPS_H_ + +#include + +/* + * Opaque OS-specific struct; on Linux, this has member + * 'struct proc_dir_entry'. + */ +typedef struct nv_cap nv_cap_t; + +/* + * Creates directory named "capabilities" under the provided path. + * + * @param[in] path Absolute path + * + * Returns a valid nv_cap_t upon success. Otherwise, returns NULL. + */ +nv_cap_t* NV_API_CALL nv_cap_init(const char *path); + +/* + * Creates capability directory entry + * + * @param[in] parent_cap Parent capability directory + * @param[in] name Capability directory's name + * @param[in] mode Capability directory's access mode + * + * Returns a valid nv_cap_t upon success. Otherwise, returns NULL. + */ +nv_cap_t* NV_API_CALL nv_cap_create_dir_entry(nv_cap_t *parent_cap, const char *name, int mode); + +/* + * Creates capability file entry + * + * @param[in] parent_cap Parent capability directory + * @param[in] name Capability file's name + * @param[in] mode Capability file's access mode + * + * Returns a valid nv_cap_t upon success. Otherwise, returns NULL. + */ +nv_cap_t* NV_API_CALL nv_cap_create_file_entry(nv_cap_t *parent_cap, const char *name, int mode); + +/* + * Destroys capability entry + * + * @param[in] cap Capability entry + */ +void NV_API_CALL nv_cap_destroy_entry(nv_cap_t *cap); + +/* + * Validates and duplicates the provided file descriptor + * + * @param[in] cap Capability entry + * @param[in] fd File descriptor to be validated + * + * Returns duplicate fd upon success. Otherwise, returns -1. + */ +int NV_API_CALL nv_cap_validate_and_dup_fd(const nv_cap_t *cap, int fd); + +/* + * Closes file descriptor + * + * This function should be used to close duplicate file descriptors + * returned by nv_cap_validate_and_dup_fd. + * + * @param[in] fd File descriptor to be validated + * + */ +void NV_API_CALL nv_cap_close_fd(int fd); + +#endif /* _NV_CAPS_H_ */ diff --git a/kernel-open/common/inc/nv-chardev-numbers.h b/kernel-open/common/inc/nv-chardev-numbers.h new file mode 100644 index 0000000..54ca547 --- /dev/null +++ b/kernel-open/common/inc/nv-chardev-numbers.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _NV_CHARDEV_NUMBERS_H_ +#define _NV_CHARDEV_NUMBERS_H_ + +// NVIDIA's reserved major character device number (Linux). +#define NV_MAJOR_DEVICE_NUMBER 195 + +// Minor numbers 0 to 247 reserved for regular devices +#define NV_MINOR_DEVICE_NUMBER_REGULAR_MAX 247 + +// Minor numbers 248 to 253 currently unused + +// Minor number 254 reserved for the modeset device (provided by NVKMS) +#define NV_MINOR_DEVICE_NUMBER_MODESET_DEVICE 254 + +// Minor number 255 reserved for the control device +#define NV_MINOR_DEVICE_NUMBER_CONTROL_DEVICE 255 + +#endif // _NV_CHARDEV_NUMBERS_H_ + diff --git a/kernel-open/common/inc/nv-dmabuf.h b/kernel-open/common/inc/nv-dmabuf.h new file mode 100644 index 0000000..ab794df --- /dev/null +++ b/kernel-open/common/inc/nv-dmabuf.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_DMABUF_H_ +#define _NV_DMABUF_H_ + +#include "nv-linux.h" + +NV_STATUS nv_dma_buf_export(nv_state_t *, nv_ioctl_export_to_dma_buf_fd_t *); + +#endif // _NV_DMABUF_H_ diff --git a/kernel-open/common/inc/nv-firmware-registry.h b/kernel-open/common/inc/nv-firmware-registry.h new file mode 100644 index 0000000..ab8b405 --- /dev/null +++ b/kernel-open/common/inc/nv-firmware-registry.h @@ -0,0 +1,83 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +// This file holds GPU firmware related registry key definitions that are +// shared between Windows and Unix +// + +#ifndef NV_FIRMWARE_REGISTRY_H +#define NV_FIRMWARE_REGISTRY_H + +// +// Registry key that when enabled, will enable use of GPU firmware. +// +// Possible mode values: +// 0 - Do not enable GPU firmware +// 1 - Enable GPU firmware +// 2 - (Default) Use the default enablement policy for GPU firmware +// +// Setting this to anything other than 2 will alter driver firmware- +// enablement policies, possibly disabling GPU firmware where it would +// have otherwise been enabled by default. +// +// Policy bits: +// +// POLICY_ALLOW_FALLBACK: +// As the normal behavior is to fail GPU initialization if this registry +// entry is set in such a way that results in an invalid configuration, if +// instead the user would like the driver to automatically try to fallback +// to initializing the failing GPU with firmware disabled, then this bit can +// be set (ex: 0x11 means try to enable GPU firmware but fall back if needed). +// Note that this can result in a mixed mode configuration (ex: GPU0 has +// firmware enabled, but GPU1 does not). +// +#define NV_REG_STR_ENABLE_GPU_FIRMWARE "EnableGpuFirmware" + +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_MASK 0x0000000F +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DISABLED 0x00000000 +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_ENABLED 0x00000001 +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DEFAULT 0x00000002 + +#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_MASK 0x000000F0 +#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_ALLOW_FALLBACK 0x00000010 + +#define NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE 0x00000012 + +// +// Registry key that when enabled, will send GPU firmware logs +// to the system log, when possible. +// +// Possible values: +// 0 - Do not send GPU firmware logs to the system log +// 1 - Enable sending of GPU firmware logs to the system log +// 2 - (Default) Enable sending of GPU firmware logs to the system log for +// the debug kernel driver build only +// +#define NV_REG_STR_ENABLE_GPU_FIRMWARE_LOGS "EnableGpuFirmwareLogs" + +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_DISABLE 0x00000000 +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE 0x00000001 +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG 0x00000002 + +#endif // NV_FIRMWARE_REGISTRY_H diff --git a/kernel-open/common/inc/nv-firmware.h b/kernel-open/common/inc/nv-firmware.h new file mode 100644 index 0000000..4636243 --- /dev/null +++ b/kernel-open/common/inc/nv-firmware.h @@ -0,0 +1,148 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NV_FIRMWARE_H +#define NV_FIRMWARE_H + + + +#include +#include + +typedef enum +{ + NV_FIRMWARE_TYPE_GSP, + NV_FIRMWARE_TYPE_GSP_LOG, +#if defined(NV_VMWARE) + NV_FIRMWARE_TYPE_BINDATA +#endif +} nv_firmware_type_t; + +typedef enum +{ + NV_FIRMWARE_CHIP_FAMILY_NULL = 0, + NV_FIRMWARE_CHIP_FAMILY_TU10X = 1, + NV_FIRMWARE_CHIP_FAMILY_TU11X = 2, + NV_FIRMWARE_CHIP_FAMILY_GA100 = 3, + NV_FIRMWARE_CHIP_FAMILY_GA10X = 4, + NV_FIRMWARE_CHIP_FAMILY_AD10X = 5, + NV_FIRMWARE_CHIP_FAMILY_GH100 = 6, + NV_FIRMWARE_CHIP_FAMILY_GB10X = 8, + NV_FIRMWARE_CHIP_FAMILY_GB10Y = 11, + NV_FIRMWARE_CHIP_FAMILY_END, +} nv_firmware_chip_family_t; + +static inline const char *nv_firmware_chip_family_to_string( + nv_firmware_chip_family_t fw_chip_family +) +{ + switch (fw_chip_family) { + case NV_FIRMWARE_CHIP_FAMILY_GB10X: return "gb10x"; + case NV_FIRMWARE_CHIP_FAMILY_GB10Y: return "gb10y"; + case NV_FIRMWARE_CHIP_FAMILY_GH100: return "gh100"; + case NV_FIRMWARE_CHIP_FAMILY_AD10X: return "ad10x"; + case NV_FIRMWARE_CHIP_FAMILY_GA10X: return "ga10x"; + case NV_FIRMWARE_CHIP_FAMILY_GA100: return "ga100"; + case NV_FIRMWARE_CHIP_FAMILY_TU11X: return "tu11x"; + case NV_FIRMWARE_CHIP_FAMILY_TU10X: return "tu10x"; + + case NV_FIRMWARE_CHIP_FAMILY_END: // fall through + case NV_FIRMWARE_CHIP_FAMILY_NULL: + return ""; + } + return ""; +} + +// The includer may optionally define +// NV_FIRMWARE_FOR_NAME(name) +// to return a platform-defined string for a given a gsp_* or gsp_log_* name. +// +// The function nv_firmware_for_chip_family will then be available. +#if defined(NV_FIRMWARE_FOR_NAME) +static inline const char *nv_firmware_for_chip_family( + nv_firmware_type_t fw_type, + nv_firmware_chip_family_t fw_chip_family +) +{ + if (fw_type == NV_FIRMWARE_TYPE_GSP) + { + switch (fw_chip_family) + { + case NV_FIRMWARE_CHIP_FAMILY_GB10X: // fall through + case NV_FIRMWARE_CHIP_FAMILY_GB10Y: // fall through + case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through + case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through + case NV_FIRMWARE_CHIP_FAMILY_GA10X: + return NV_FIRMWARE_FOR_NAME("gsp_ga10x"); + + case NV_FIRMWARE_CHIP_FAMILY_GA100: // fall through + case NV_FIRMWARE_CHIP_FAMILY_TU11X: // fall through + case NV_FIRMWARE_CHIP_FAMILY_TU10X: + return NV_FIRMWARE_FOR_NAME("gsp_tu10x"); + + case NV_FIRMWARE_CHIP_FAMILY_END: // fall through + case NV_FIRMWARE_CHIP_FAMILY_NULL: + return ""; + } + } + else if (fw_type == NV_FIRMWARE_TYPE_GSP_LOG) + { + switch (fw_chip_family) + { + case NV_FIRMWARE_CHIP_FAMILY_GB10X: // fall through + case NV_FIRMWARE_CHIP_FAMILY_GB10Y: // fall through + case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through + case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through + case NV_FIRMWARE_CHIP_FAMILY_GA10X: + return NV_FIRMWARE_FOR_NAME("gsp_log_ga10x"); + + case NV_FIRMWARE_CHIP_FAMILY_GA100: // fall through + case NV_FIRMWARE_CHIP_FAMILY_TU11X: // fall through + case NV_FIRMWARE_CHIP_FAMILY_TU10X: + return NV_FIRMWARE_FOR_NAME("gsp_log_tu10x"); + + case NV_FIRMWARE_CHIP_FAMILY_END: // fall through + case NV_FIRMWARE_CHIP_FAMILY_NULL: + return ""; + } + } +#if defined(NV_VMWARE) + else if (fw_type == NV_FIRMWARE_TYPE_BINDATA) + { + return NV_FIRMWARE_FOR_NAME("bindata_image"); + } +#endif + return ""; +} +#endif // defined(NV_FIRMWARE_FOR_NAME) + +// The includer may optionally define +// NV_FIRMWARE_DECLARE_GSP(name) +// which will then be invoked (at the top-level) for each +// gsp_* (but not gsp_log_*) +#if defined(NV_FIRMWARE_DECLARE_GSP) +NV_FIRMWARE_DECLARE_GSP("gsp_ga10x") +NV_FIRMWARE_DECLARE_GSP("gsp_tu10x") +#endif // defined(NV_FIRMWARE_DECLARE_GSP) + +#endif // NV_FIRMWARE_DECLARE_GSP diff --git a/kernel-open/common/inc/nv-gpu-info.h b/kernel-open/common/inc/nv-gpu-info.h new file mode 100644 index 0000000..a8c0c0a --- /dev/null +++ b/kernel-open/common/inc/nv-gpu-info.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_GPU_INFO_H_ +#define _NV_GPU_INFO_H_ + +typedef struct { + NvU32 gpu_id; + + struct { + NvU32 domain; + NvU8 bus, slot, function; + } pci_info; + + /* + * opaque OS-specific pointer; on Linux, this is a pointer to the + * 'struct device' for the GPU. + */ + void *os_device_ptr; +} nv_gpu_info_t; + +#define NV_MAX_GPUS 32 + +#endif /* _NV_GPU_INFO_H_ */ diff --git a/kernel-open/common/inc/nv-hash.h b/kernel-open/common/inc/nv-hash.h new file mode 100644 index 0000000..b75a79c --- /dev/null +++ b/kernel-open/common/inc/nv-hash.h @@ -0,0 +1,88 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-22 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __NV_HASH_H__ +#define __NV_HASH_H__ + +#include "conftest.h" +#include "nv-list-helpers.h" +#include +#include +#include + +#include /* full_name_hash() */ + +#define nv_string_hash(_str) full_name_hash(NULL, _str, strlen(_str)) + +/** + * This naive hashtable was introduced by commit d9b482c8ba19 (v3.7, 2012-10-31). + * To support older kernels import necessary functionality from + * . + */ + +#define NV_HASH_SIZE(name) (ARRAY_SIZE(name)) +#define NV_HASH_BITS(name) ilog2(NV_HASH_SIZE(name)) + +/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ +#define NV_HASH_MIN(val, bits) \ + (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) + +#define NV_DECLARE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] + +static inline void _nv_hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + { + INIT_HLIST_HEAD(&ht[i]); + } +} + +/** + * nv_hash_init - initialize a hash table + * @hashtable: hashtable to be initialized + */ +#define nv_hash_init(hashtable) _nv_hash_init(hashtable, NV_HASH_SIZE(hashtable)) + +/** + * nv_hash_add - add an object to a hashtable + * @hashtable: hashtable to add to + * @node: the &struct hlist_node of the object to be added + * @key: the key of the object to be added + */ +#define nv_hash_add(hashtable, node, key) \ + hlist_add_head(node, &hashtable[NV_HASH_MIN(key, NV_HASH_BITS(hashtable))]) + +/** + * nv_hash_for_each_possible - iterate over all possible objects hashing to the + * same bucket + * @name: hashtable to iterate + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + * @key: the key of the objects to iterate over + */ +#define nv_hash_for_each_possible(name, obj, member, key) \ + hlist_for_each_entry(obj, &name[NV_HASH_MIN(key, NV_HASH_BITS(name))], member) + +#endif // __NV_HASH_H__ diff --git a/kernel-open/common/inc/nv-hypervisor.h b/kernel-open/common/inc/nv-hypervisor.h new file mode 100644 index 0000000..a2b2649 --- /dev/null +++ b/kernel-open/common/inc/nv-hypervisor.h @@ -0,0 +1,93 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_HYPERVISOR_H_ +#define _NV_HYPERVISOR_H_ + +#include + +// Enums for supported hypervisor types. +// New hypervisor type should be added before OS_HYPERVISOR_UNKNOWN +typedef enum _HYPERVISOR_TYPE +{ + OS_HYPERVISOR_XEN = 0, + OS_HYPERVISOR_VMWARE, + OS_HYPERVISOR_HYPERV, + OS_HYPERVISOR_KVM, + OS_HYPERVISOR_UNKNOWN +} HYPERVISOR_TYPE; + +#define CMD_VFIO_WAKE_REMOVE_GPU 1 +#define CMD_VGPU_VFIO_PRESENT 2 +#define CMD_VFIO_PCI_CORE_PRESENT 3 + +#define MAX_VF_COUNT_PER_GPU 64 + +typedef enum _VGPU_TYPE_INFO +{ + VGPU_TYPE_NAME = 0, + VGPU_TYPE_DESCRIPTION, + VGPU_TYPE_INSTANCES, +} VGPU_TYPE_INFO; + +typedef struct +{ + void *nv; + NvU32 domain; + NvU32 bus; + NvU32 device; + NvU32 return_status; +} vgpu_vfio_info; + +typedef struct +{ + NvU32 domain; + NvU8 bus; + NvU8 slot; + NvU8 function; + NvBool isNvidiaAttached; + NvBool isMdevAttached; +} vgpu_vf_pci_info; + +typedef enum VGPU_CMD_PROCESS_VF_INFO_E +{ + NV_VGPU_SAVE_VF_INFO = 0, + NV_VGPU_REMOVE_VF_PCI_INFO = 1, + NV_VGPU_REMOVE_VF_MDEV_INFO = 2, + NV_VGPU_GET_VF_INFO = 3 +} VGPU_CMD_PROCESS_VF_INFO; + +typedef enum VGPU_DEVICE_STATE_E +{ + NV_VGPU_DEV_UNUSED = 0, + NV_VGPU_DEV_OPENED = 1, + NV_VGPU_DEV_IN_USE = 2 +} VGPU_DEVICE_STATE; + +/* + * Function prototypes + */ + +HYPERVISOR_TYPE NV_API_CALL nv_get_hypervisor_type(void); + +#endif // _NV_HYPERVISOR_H_ diff --git a/kernel-open/common/inc/nv-ioctl-numa.h b/kernel-open/common/inc/nv-ioctl-numa.h new file mode 100644 index 0000000..0af5267 --- /dev/null +++ b/kernel-open/common/inc/nv-ioctl-numa.h @@ -0,0 +1,81 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef NV_IOCTL_NUMA_H +#define NV_IOCTL_NUMA_H + +#include + +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) +#include +#elif defined (NV_KERNEL_INTERFACE_LAYER) && defined(NV_BSD) +#include +#else + +#include + +#if !defined(__aligned) +#define __aligned(n) __attribute__((aligned(n))) +#endif + +#endif + +#define NV_ESC_NUMA_INFO (NV_IOCTL_BASE + 15) +#define NV_ESC_SET_NUMA_STATUS (NV_IOCTL_BASE + 16) + +#define NV_IOCTL_NUMA_INFO_MAX_OFFLINE_ADDRESSES 64 +typedef struct offline_addresses +{ + uint64_t addresses[NV_IOCTL_NUMA_INFO_MAX_OFFLINE_ADDRESSES] __aligned(8); + uint32_t numEntries; +} nv_offline_addresses_t; + + +/* per-device NUMA memory info as assigned by the system */ +typedef struct nv_ioctl_numa_info +{ + int32_t nid; + int32_t status; + uint64_t memblock_size __aligned(8); + uint64_t numa_mem_addr __aligned(8); + uint64_t numa_mem_size __aligned(8); + uint8_t use_auto_online; + nv_offline_addresses_t offline_addresses __aligned(8); +} nv_ioctl_numa_info_t; + +/* set the status of the device NUMA memory */ +typedef struct nv_ioctl_set_numa_status +{ + int32_t status; +} nv_ioctl_set_numa_status_t; + +#define NV_IOCTL_NUMA_STATUS_DISABLED 0 +#define NV_IOCTL_NUMA_STATUS_OFFLINE 1 +#define NV_IOCTL_NUMA_STATUS_ONLINE_IN_PROGRESS 2 +#define NV_IOCTL_NUMA_STATUS_ONLINE 3 +#define NV_IOCTL_NUMA_STATUS_ONLINE_FAILED 4 +#define NV_IOCTL_NUMA_STATUS_OFFLINE_IN_PROGRESS 5 +#define NV_IOCTL_NUMA_STATUS_OFFLINE_FAILED 6 + +#endif diff --git a/kernel-open/common/inc/nv-ioctl-numbers.h b/kernel-open/common/inc/nv-ioctl-numbers.h new file mode 100644 index 0000000..d0efa6f --- /dev/null +++ b/kernel-open/common/inc/nv-ioctl-numbers.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef NV_IOCTL_NUMBERS_H +#define NV_IOCTL_NUMBERS_H + +/* NOTE: using an ioctl() number > 55 will overflow! */ +#define NV_IOCTL_MAGIC 'F' +#define NV_IOCTL_BASE 200 +#define NV_ESC_CARD_INFO (NV_IOCTL_BASE + 0) +#define NV_ESC_REGISTER_FD (NV_IOCTL_BASE + 1) +#define NV_ESC_ALLOC_OS_EVENT (NV_IOCTL_BASE + 6) +#define NV_ESC_FREE_OS_EVENT (NV_IOCTL_BASE + 7) +#define NV_ESC_STATUS_CODE (NV_IOCTL_BASE + 9) +#define NV_ESC_CHECK_VERSION_STR (NV_IOCTL_BASE + 10) +#define NV_ESC_IOCTL_XFER_CMD (NV_IOCTL_BASE + 11) +#define NV_ESC_ATTACH_GPUS_TO_FD (NV_IOCTL_BASE + 12) +#define NV_ESC_QUERY_DEVICE_INTR (NV_IOCTL_BASE + 13) +#define NV_ESC_SYS_PARAMS (NV_IOCTL_BASE + 14) +#define NV_ESC_EXPORT_TO_DMABUF_FD (NV_IOCTL_BASE + 17) +#define NV_ESC_WAIT_OPEN_COMPLETE (NV_IOCTL_BASE + 18) + +#endif diff --git a/kernel-open/common/inc/nv-ioctl.h b/kernel-open/common/inc/nv-ioctl.h new file mode 100644 index 0000000..3a8e88f --- /dev/null +++ b/kernel-open/common/inc/nv-ioctl.h @@ -0,0 +1,156 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef NV_IOCTL_H +#define NV_IOCTL_H + +#include +#include + +typedef struct { + NvU32 domain; /* PCI domain number */ + NvU8 bus; /* PCI bus number */ + NvU8 slot; /* PCI slot number */ + NvU8 function; /* PCI function number */ + NvU16 vendor_id; /* PCI vendor ID */ + NvU16 device_id; /* PCI device ID */ +} nv_pci_info_t; + +/* + * ioctl()'s with parameter structures too large for the + * _IOC cmd layout use the nv_ioctl_xfer_t structure + * and the NV_ESC_IOCTL_XFER_CMD ioctl() to pass the actual + * size and user argument pointer into the RM, which + * will then copy it to/from kernel space in separate steps. + */ +typedef struct nv_ioctl_xfer +{ + NvU32 cmd; + NvU32 size; + NvP64 ptr NV_ALIGN_BYTES(8); +} nv_ioctl_xfer_t; + +typedef struct nv_ioctl_card_info +{ + NvBool valid; + nv_pci_info_t pci_info; /* PCI config information */ + NvU32 gpu_id; + NvU16 interrupt_line; + NvU64 reg_address NV_ALIGN_BYTES(8); + NvU64 reg_size NV_ALIGN_BYTES(8); + NvU64 fb_address NV_ALIGN_BYTES(8); + NvU64 fb_size NV_ALIGN_BYTES(8); + NvU32 minor_number; + NvU8 dev_name[10]; /* device names such as vmgfx[0-32] for vmkernel */ +} nv_ioctl_card_info_t; + +/* alloc event */ +typedef struct nv_ioctl_alloc_os_event +{ + NvHandle hClient; + NvHandle hDevice; + NvU32 fd; + NvU32 Status; +} nv_ioctl_alloc_os_event_t; + +/* free event */ +typedef struct nv_ioctl_free_os_event +{ + NvHandle hClient; + NvHandle hDevice; + NvU32 fd; + NvU32 Status; +} nv_ioctl_free_os_event_t; + +/* status code */ +typedef struct nv_ioctl_status_code +{ + NvU32 domain; + NvU8 bus; + NvU8 slot; + NvU32 status; +} nv_ioctl_status_code_t; + +/* check version string */ +#define NV_RM_API_VERSION_STRING_LENGTH 64 + +typedef struct nv_ioctl_rm_api_version +{ + NvU32 cmd; + NvU32 reply; + char versionString[NV_RM_API_VERSION_STRING_LENGTH]; +} nv_ioctl_rm_api_version_t; + +#define NV_RM_API_VERSION_CMD_STRICT 0 +#define NV_RM_API_VERSION_CMD_RELAXED '1' +#define NV_RM_API_VERSION_CMD_QUERY '2' + +#define NV_RM_API_VERSION_REPLY_UNRECOGNIZED 0 +#define NV_RM_API_VERSION_REPLY_RECOGNIZED 1 + +typedef struct nv_ioctl_query_device_intr +{ + NvU32 intrStatus NV_ALIGN_BYTES(4); + NvU32 status; +} nv_ioctl_query_device_intr; + +/* system parameters that the kernel driver may use for configuration */ +typedef struct nv_ioctl_sys_params +{ + NvU64 memblock_size NV_ALIGN_BYTES(8); +} nv_ioctl_sys_params_t; + +typedef struct nv_ioctl_register_fd +{ + int ctl_fd; +} nv_ioctl_register_fd_t; + +#define NV_DMABUF_EXPORT_MAX_HANDLES 128 + +#define NV_DMABUF_EXPORT_MAPPING_TYPE_DEFAULT 0 +#define NV_DMABUF_EXPORT_MAPPING_TYPE_FORCE_PCIE 1 + +typedef struct nv_ioctl_export_to_dma_buf_fd +{ + int fd; + NvHandle hClient; + NvU32 totalObjects; + NvU32 numObjects; + NvU32 index; + NvU64 totalSize NV_ALIGN_BYTES(8); + NvU8 mappingType; + NvBool bAllowMmap; + NvHandle handles[NV_DMABUF_EXPORT_MAX_HANDLES]; + NvU64 offsets[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8); + NvU64 sizes[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8); + NvU32 status; +} nv_ioctl_export_to_dma_buf_fd_t; + +typedef struct nv_ioctl_wait_open_complete +{ + int rc; + NvU32 adapterStatus; +} nv_ioctl_wait_open_complete_t; + +#endif diff --git a/kernel-open/common/inc/nv-kernel-interface-api.h b/kernel-open/common/inc/nv-kernel-interface-api.h new file mode 100644 index 0000000..183f9b4 --- /dev/null +++ b/kernel-open/common/inc/nv-kernel-interface-api.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_KERNEL_INTERFACE_API_H +#define _NV_KERNEL_INTERFACE_API_H +/************************************************************************************************************** +* +* File: nv-kernel-interface-api.h +* +* Description: +* Defines the NV API related macros. +* +**************************************************************************************************************/ + +#if NVOS_IS_UNIX && NVCPU_IS_X86_64 && defined(__use_altstack__) +#define NV_API_CALL __attribute__((altstack(0))) +#else +#define NV_API_CALL +#endif + +#endif /* _NV_KERNEL_INTERFACE_API_H */ diff --git a/kernel-open/common/inc/nv-kref.h b/kernel-open/common/inc/nv-kref.h new file mode 100644 index 0000000..7e28ce2 --- /dev/null +++ b/kernel-open/common/inc/nv-kref.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_KREF_H__ +#define __NV_KREF_H__ + +#include + +typedef struct nv_kref +{ + atomic_t refcount; +} nv_kref_t; + +static inline void nv_kref_init(nv_kref_t *nv_kref) +{ + atomic_set(&nv_kref->refcount, 1); +} + +static inline void nv_kref_get(nv_kref_t *nv_kref) +{ + atomic_inc(&nv_kref->refcount); +} + +static inline int nv_kref_put(nv_kref_t *nv_kref, + void (*release)(nv_kref_t *nv_kref)) +{ + if (atomic_dec_and_test(&nv_kref->refcount)) + { + release(nv_kref); + return 1; + } + + return 0; +} + +static inline unsigned int nv_kref_read(const nv_kref_t *nv_kref) +{ + return atomic_read(&nv_kref->refcount); +} + +#endif // __NV_KREF_H__ diff --git a/kernel-open/common/inc/nv-kthread-q-os.h b/kernel-open/common/inc/nv-kthread-q-os.h new file mode 100644 index 0000000..4d7decf --- /dev/null +++ b/kernel-open/common/inc/nv-kthread-q-os.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_KTHREAD_QUEUE_OS_H__ +#define __NV_KTHREAD_QUEUE_OS_H__ + +#include // atomic_t +#include // list +#include // task_struct +#include // NUMA_NO_NODE +#include + +#include "conftest.h" + +struct nv_kthread_q +{ + struct list_head q_list_head; + spinlock_t q_lock; + + // This is a counting semaphore. It gets incremented and decremented + // exactly once for each item that is added to the queue. + struct semaphore q_sem; + atomic_t main_loop_should_exit; + + struct task_struct *q_kthread; + + bool is_unload_flush_ongoing; +}; + +struct nv_kthread_q_item +{ + struct list_head q_list_node; + nv_q_func_t function_to_run; + void *function_args; +}; + + +#ifndef NUMA_NO_NODE +#define NUMA_NO_NODE (-1) +#endif + +#define NV_KTHREAD_NO_NODE NUMA_NO_NODE + +#endif diff --git a/kernel-open/common/inc/nv-kthread-q.h b/kernel-open/common/inc/nv-kthread-q.h new file mode 100644 index 0000000..a278ca5 --- /dev/null +++ b/kernel-open/common/inc/nv-kthread-q.h @@ -0,0 +1,205 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_KTHREAD_QUEUE_H__ +#define __NV_KTHREAD_QUEUE_H__ + +struct nv_kthread_q; +struct nv_kthread_q_item; +typedef struct nv_kthread_q nv_kthread_q_t; +typedef struct nv_kthread_q_item nv_kthread_q_item_t; + +typedef void (*nv_q_func_t)(void *args); + +#include "nv-kthread-q-os.h" + +//////////////////////////////////////////////////////////////////////////////// +// nv_kthread_q: +// +// 1. API and overview +// +// This "nv_kthread_q" system implements a simple queuing system for deferred +// work. The nv_kthread_q system has goals and use cases that are similar to +// the named workqueues in the Linux kernel, but nv_kthread_q is much (10x or +// so) smaller, simpler--and correspondingly less general. Deferred work +// items are put into a queue, and run within the context of a dedicated set +// of kernel threads (kthread). +// +// In order to avoid confusion with the Linux workqueue system, I have +// avoided using the term "work", and instead refer to "queues" (also called +// "q's") and "queue items" (also called "q_items"), in both variable names +// and comments. +// +// This module depends only upon the Linux kernel. +// +// Queue items that are submitted to separate nv_kthread_q instances are +// guaranteed to be run in different kthreads. +// +// Queue items that are submitted to the same nv_kthread_q are not guaranteed +// to be serialized, nor are they guaranteed to run concurrently. +// +// 2. Allocations +// +// The caller allocates queues and queue items. The nv_kthread_q APIs do +// the initialization (zeroing and setup) of queues and queue items. +// Allocation is handled that way, because one of the first use cases is a +// bottom half interrupt handler, and for that, queue items should be +// pre-allocated (for example, one per GPU), so that no allocation is +// required in the top-half interrupt handler. Relevant API calls: +// +// 3. Queue initialization +// +// nv_kthread_q_init() initializes a queue on the current NUMA node. +// +// or +// +// nv_kthread_q_init_on_node() initializes a queue on a specific NUMA node. +// +// 3. Scheduling things for the queue to run +// +// The nv_kthread_q_schedule_q_item() routine will schedule a q_item to run. +// +// 4. Stopping the queue(s) +// +// The nv_kthread_q_stop() routine will flush the queue, and safely stop +// the kthread, before returning. +// +//////////////////////////////////////////////////////////////////////////////// + +// +// The queue must not be used before calling this routine. +// +// The caller allocates an nv_kthread_q_t item. This routine initializes +// the queue, and starts up a kernel thread ("kthread") to service the queue. +// The queue will initially be empty; there is intentionally no way to +// pre-initialize the queue with items to run. +// +// In order to avoid external dependencies (specifically, NV_STATUS codes), this +// returns a Linux kernel (negative) errno on failure, and zero on success. It +// is safe to call nv_kthread_q_stop() on a queue that nv_kthread_q_init() +// failed for. +// +// A short prefix of the qname arg will show up in []'s, via the ps(1) utility. +// +// The kernel thread stack is preferably allocated on the specified NUMA node, +// but fallback to another node is possible because kernel allocators do not +// guarantee affinity. Note that NUMA-affinity applies only to +// the kthread stack. This API does not do anything about limiting the CPU +// affinity of the kthread. That is left to the caller. +// +// Reusing a queue: once a queue is initialized, it must be safely shut down +// (see "Stopping the queue(s)", below), before it can be reused. So, for +// a simple queue use case, the following will work: +// +// nv_kthread_q_init_on_node(&some_q, "display_name", preferred_node); +// nv_kthread_q_stop(&some_q); +// nv_kthread_q_init_on_node(&some_q, "reincarnated", preferred_node); +// nv_kthread_q_stop(&some_q); +// +int nv_kthread_q_init_on_node(nv_kthread_q_t *q, + const char *qname, + int preferred_node); + +// +// This routine is the same as nv_kthread_q_init_on_node() with the exception +// that the queue stack will be allocated on the NUMA node of the caller. +// +int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname); + +// +// The caller is responsible for stopping all queues, by calling this routine +// before, for example, kernel module unloading. This nv_kthread_q_stop() +// routine will flush the queue, and safely stop the kthread, before returning. +// +// You may ONLY call nv_kthread_q_stop() once, unless you reinitialize the +// queue in between, as shown in the nv_kthread_q_init() documentation, above. +// +// Do not add any more items to the queue after calling nv_kthread_q_stop. +// +// Calling nv_kthread_q_stop() on a queue which has been zero-initialized or +// for which nv_kthread_q_init() failed, is a no-op. +// +void nv_kthread_q_stop(nv_kthread_q_t *q); + +// +// All items that were in the queue before nv_kthread_q_flush was called, and +// all items scheduled by those items, will get run before this function +// returns. +// +// You may NOT call nv_kthread_q_flush() after having called nv_kthread_q_stop. +// +// This actually flushes the queue twice. That ensures that the queue is fully +// flushed, for an important use case: rescheduling from within one's own +// callback. In order to do that safely, you need to: +// +// -- set a flag that tells the callback to stop rescheduling itself. +// +// -- call either nv_kthread_q_flush or nv_kthread_q_stop (which internally +// calls nv_kthread_q_flush). The nv_kthread_q_flush, in turn, actually +// flushes the queue *twice*. The first flush waits for any callbacks +// to finish, that missed seeing the "stop_rescheduling" flag. The +// second flush waits for callbacks that were already scheduled when the +// first flush finished. +// +void nv_kthread_q_flush(nv_kthread_q_t *q); + +// Assigns function_to_run and function_args to the q_item. +// +// This must be called before calling nv_kthread_q_schedule_q_item. +void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item, + nv_q_func_t function_to_run, + void *function_args); + +// +// The caller must have already set up the queue, via nv_kthread_q_init(). +// The caller owns the lifetime of the q_item. The nv_kthread_q system runs +// q_items, and adds or removes them from the queue. However, due to the first +// law of q-dynamics, it neither creates nor destroys q_items. +// +// When the callback (the function_to_run argument) is actually run, it is OK +// to free the q_item from within that routine. The nv_kthread_q system +// promises to be done with the q_item before that point. +// +// nv_kthread_q_schedule_q_item may be called from multiple threads at once, +// without danger of corrupting anything. This routine may also be safely +// called from interrupt context, including top-half ISRs. +// +// It is OK to reschedule the same q_item from within its own callback function. +// +// It is also OK to attempt to reschedule the same q_item, if that q_item is +// already pending in the queue. The q_item will not be rescheduled if it is +// already pending. +// +// Returns true (non-zero) if the item was actually scheduled. Returns false if +// the item was not scheduled, which can happen if: +// +// -- The q_item was already pending in a queue, or +// -- The queue is shutting down (or not yet started up). +// +int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q, + nv_kthread_q_item_t *q_item); + +// Built-in test. Returns -1 if any subtest failed, or 0 upon success. +int nv_kthread_q_run_self_test(void); + +#endif // __NV_KTHREAD_QUEUE_H__ diff --git a/kernel-open/common/inc/nv-linux.h b/kernel-open/common/inc/nv-linux.h new file mode 100644 index 0000000..d46d246 --- /dev/null +++ b/kernel-open/common/inc/nv-linux.h @@ -0,0 +1,1875 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_LINUX_H_ +#define _NV_LINUX_H_ + +#include "nvstatus.h" +#include "nv.h" +#include "nv-ioctl-numa.h" +#include "conftest.h" + +#include "nv-lock.h" +#include "nv-pgprot.h" +#include "nv-mm.h" +#include "os-interface.h" +#include "nv-timer.h" +#include "nv-time.h" +#include "nv-chardev-numbers.h" +#include "nv-platform.h" + +#ifndef AUTOCONF_INCLUDED +#if defined(NV_GENERATED_AUTOCONF_H_PRESENT) +#include +#else +#include +#endif +#endif + +#if defined(NV_GENERATED_UTSRELEASE_H_PRESENT) + #include +#endif + +#if defined(NV_GENERATED_COMPILE_H_PRESENT) + #include +#endif + +#include +#include + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +#error "This driver does not support kernels older than Linux 4.15!" +#endif + +#if defined (CONFIG_SMP) && !defined (__SMP__) +#define __SMP__ +#endif + +#if defined (CONFIG_MODVERSIONS) && !defined (MODVERSIONS) +# define MODVERSIONS +#endif + +#include +#include +#include +#include + +#include + +#if !defined(VM_RESERVED) +#define VM_RESERVED 0x00000000 +#endif +#if !defined(VM_DONTEXPAND) +#define VM_DONTEXPAND 0x00000000 +#endif +#if !defined(VM_DONTDUMP) +#define VM_DONTDUMP 0x00000000 +#endif + +#include /* module_init, module_exit */ +#include /* pic_t, size_t, __u32, etc */ +#include /* error codes */ +#include /* circular linked list */ +#include /* NULL, offsetof */ +#include /* wait queues */ +#include /* strchr(), strpbrk() */ + +#include /* isspace(), etc */ +#include /* acquire_console_sem(), etc */ +#include /* cpufreq_get */ + +#include /* kmalloc, kfree, etc */ +#include /* vmalloc, vfree, etc */ + +#include /* poll_wait */ +#include /* mdelay, udelay */ + +#include /* suser(), capable() replacement */ + +#include /* get_random_bytes() */ + +#if defined(NV_LINUX_DMA_BUF_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_AVAILABLE) +#include +#include + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#include +#endif /* NV_DRM_AVAILABLE */ + +/* task and signal-related items */ +#include +#include +#include +#include /* module_param() */ +#include /* flush_tlb(), flush_tlb_all() */ + +#include /* pci_find_class, etc */ +#include /* tasklets, interrupt helpers */ +#include +#include /* fget(), fput() */ +#include +#include /* CPU hotplug support */ + +#include /* pm_runtime_* */ +#include /* files_fdtable, etc */ + +#include /* do_div() */ +#if defined(NV_ASM_SYSTEM_H_PRESENT) +#include /* cli, sli, save_flags */ +#endif +#include /* ioremap, virt_to_phys */ +#include /* access_ok */ +#include /* PAGE_OFFSET */ +#include /* pte bit definitions */ +#include /* __set_bit() */ +#include /* FD_SET() */ + +#include "nv-list-helpers.h" + +/* + * Use current->cred->euid, instead of calling current_euid(). + * The latter can pull in the GPL-only debug_lockdep_rcu_enabled() + * symbol when CONFIG_PROVE_RCU. That is only used for debugging. + * + * The Linux kernel relies on the assumption that only the current process + * is permitted to change its cred structure. Therefore, current_euid() + * does not require the RCU's read lock on current->cred. + */ +#define NV_CURRENT_EUID() (__kuid_val(current->cred->euid)) + +#if defined(CONFIG_VGA_ARB) +#include +#endif + +#include +#include + +#if defined(NV_LINUX_DMA_MAP_OPS_H_PRESENT) +#include +#endif + +#if defined(CONFIG_SWIOTLB) && defined(NVCPU_AARCH64) +#include +#endif + +#include +#include +#include + +#include +#include + +#include /* workqueue */ +#include "nv-kthread-q.h" /* kthread based queue */ +#include /* efi_enabled */ +#include /* fb_info struct */ +#include /* screen_info */ + +#if !defined(CONFIG_PCI) +#warning "Attempting to build driver for a platform with no PCI support!" +#include +#endif + +#if defined(CONFIG_CRAY_XT) +#include +NV_STATUS nvos_forward_error_to_cray(struct pci_dev *, NvU32, + const char *, va_list); +#endif + +#if defined(NV_ASM_SET_MEMORY_H_PRESENT) +#include +#endif + +#if defined(NV_SET_MEMORY_UC_PRESENT) +#undef NV_SET_PAGES_UC_PRESENT +#endif + +#if !defined(NVCPU_AARCH64) && !defined(NVCPU_RISCV64) +#if !defined(NV_SET_MEMORY_UC_PRESENT) && !defined(NV_SET_PAGES_UC_PRESENT) +#error "This driver requires the ability to change memory types!" +#endif +#endif + +/* + * Traditionally, CONFIG_XEN indicated that the target kernel was + * built exclusively for use under a Xen hypervisor, requiring + * modifications to or disabling of a variety of NVIDIA graphics + * driver code paths. As of the introduction of CONFIG_PARAVIRT + * and support for Xen hypervisors within the CONFIG_PARAVIRT_GUEST + * architecture, CONFIG_XEN merely indicates that the target + * kernel can run under a Xen hypervisor, but not that it will. + * + * If CONFIG_XEN and CONFIG_PARAVIRT are defined, the old Xen + * specific code paths are disabled. If the target kernel executes + * stand-alone, the NVIDIA graphics driver will work fine. If the + * kernels executes under a Xen (or other) hypervisor, however, the + * NVIDIA graphics driver has no way of knowing and is unlikely + * to work correctly. + */ +#if defined(CONFIG_XEN) && !defined(CONFIG_PARAVIRT) +#include +#include +#define NV_XEN_SUPPORT_FULLY_VIRTUALIZED_KERNEL +#endif + +#ifdef CONFIG_KDB +#include +#include +#endif + +#if defined(CONFIG_X86_REMOTE_DEBUG) +#include +#endif + +#if defined(DEBUG) && defined(CONFIG_KGDB) && \ + defined(NVCPU_AARCH64) +#include +#endif + +#if defined(NVCPU_X86_64) && !defined(NV_XEN_SUPPORT_FULLY_VIRTUALIZED_KERNEL) +#define NV_ENABLE_PAT_SUPPORT +#endif + +#define NV_PAT_MODE_DISABLED 0 +#define NV_PAT_MODE_KERNEL 1 +#define NV_PAT_MODE_BUILTIN 2 + +extern int nv_pat_mode; + +#if defined(CONFIG_HOTPLUG_CPU) +#define NV_ENABLE_HOTPLUG_CPU +#include /* struct notifier_block, etc */ +#endif + +#if (defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)) +#include +#endif + +#if defined(CONFIG_ACPI) +#include +#define NV_LINUX_ACPI_EVENTS_SUPPORTED 1 +#endif + +#if defined(NV_LINUX_ACPI_EVENTS_SUPPORTED) +#define NV_ACPI_WALK_NAMESPACE(type, start_object, max_depth, \ + user_function, args...) \ + acpi_walk_namespace(type, start_object, max_depth, \ + user_function, NULL, args) +#endif + +#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_PREEMPT_RT_FULL) +#define NV_CONFIG_PREEMPT_RT 1 +#endif + +#ifndef get_cpu +#define get_cpu() smp_processor_id() +#define put_cpu() +#endif + +#if defined(NVCPU_X86_64) +#if !defined(pmd_large) +#define pmd_large(_pmd) \ + ((pmd_val(_pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT)) +#endif +#endif /* defined(NVCPU_X86_64) */ + +#define NV_PAGE_COUNT(page) \ + ((unsigned int)page_count(page)) +#define NV_GET_PAGE_FLAGS(page_ptr) \ + (NV_GET_PAGE_STRUCT(page_ptr->phys_addr)->flags) + +/* + * Before the introduction of VM_PFNMAP, there was an VM_UNPAGED flag. + * Drivers which wanted to call remap_pfn_range on normal pages had to use this + * VM_UNPAGED flag *and* set PageReserved. With the introduction of VM_PFNMAP, + * that restriction went away. This is described in commit + * + * 2005-10-28 6aab341e0a28aff100a09831c5300a2994b8b986 + * ("mm: re-architect the VM_UNPAGED logic") + * + * , which added VM_PFNMAP and vm_normal_page. Therefore, if VM_PFNMAP is + * defined, then we do *not* need to mark a page as reserved, in order to + * call remap_pfn_range(). + */ +#if !defined(VM_PFNMAP) +#define NV_MAYBE_RESERVE_PAGE(ptr_ptr) \ + SetPageReserved(NV_GET_PAGE_STRUCT(page_ptr->phys_addr)) +#define NV_MAYBE_UNRESERVE_PAGE(page_ptr) \ + ClearPageReserved(NV_GET_PAGE_STRUCT(page_ptr->phys_addr)) +#else +#define NV_MAYBE_RESERVE_PAGE(ptr_ptr) +#define NV_MAYBE_UNRESERVE_PAGE(page_ptr) +#endif /* defined(VM_PFNMAP) */ + +#if !defined(__GFP_COMP) +#define __GFP_COMP 0 +#endif + +#if !defined(DEBUG) && defined(__GFP_NOWARN) +#define NV_GFP_KERNEL (GFP_KERNEL | __GFP_NOWARN) +#define NV_GFP_ATOMIC (GFP_ATOMIC | __GFP_NOWARN) +#else +#define NV_GFP_KERNEL (GFP_KERNEL) +#define NV_GFP_ATOMIC (GFP_ATOMIC) +#endif + +#if defined(GFP_DMA32) +/* + * GFP_DMA32 is similar to GFP_DMA, but instructs the Linux zone + * allocator to allocate memory from the first 4GB on platforms + * such as Linux/x86-64; the alternative is to use an IOMMU such + * as the one implemented with the K8 GART, if available. + */ +#define NV_GFP_DMA32 (GFP_DMA32) +#else +#define NV_GFP_DMA32 0 +#endif + +#if defined(NVCPU_AARCH64) || defined(NVCPU_RISCV64) +#define NV_ALLOW_WRITE_COMBINING(mt) 1 +#elif defined(NVCPU_X86_64) +#if defined(NV_ENABLE_PAT_SUPPORT) +#define NV_ALLOW_WRITE_COMBINING(mt) \ + ((nv_pat_mode != NV_PAT_MODE_DISABLED) && \ + ((mt) != NV_MEMORY_TYPE_REGISTERS)) +#else +#define NV_ALLOW_WRITE_COMBINING(mt) 0 +#endif +#endif + +#if !defined(IRQF_SHARED) +#define IRQF_SHARED SA_SHIRQ +#endif + +#define NV_MAX_RECURRING_WARNING_MESSAGES 10 + +/* various memory tracking/debugging techniques + * disabled for retail builds, enabled for debug builds + */ + +// allow an easy way to convert all debug printfs related to memory +// management back and forth between 'info' and 'errors' +#if defined(NV_DBG_MEM) +#define NV_DBG_MEMINFO NV_DBG_ERRORS +#else +#define NV_DBG_MEMINFO NV_DBG_INFO +#endif + +// Provides a consistent way for the driver to obtain the maximum page order +// Starting with Linux kernel 6.8, MAX_ORDER is renamed to MAX_PAGE_ORDER. +#if defined(MAX_PAGE_ORDER) +#define NV_MAX_PAGE_ORDER MAX_PAGE_ORDER +#else +// Linux kernel 6.4.0 changed the meaning of the MAX_ORDER define. +// Prior to 6.4.0, MAX_ORDER was defined as the number of orders available - +// By default defined at 11, it signals that values between 0 and 10 (inclusive) +// are valid order values that the Linux buddy allocator supports. +// +// Starting with 6.4.0, MAX_ORDER is redefined as the maximum valid order value. +// By default defined at 10, it signals that order == 10 is the maximum valid +// order value that the Linux buddy allocator supports. +// +// To smooth interfacing, define NV_MAX_PAGE_ORDER in a safe way even though it might cause +// RM to report a smaller than max order value. +#define NV_MAX_PAGE_ORDER (MAX_ORDER - 1) +#endif // defined(MAX_PAGE_ORDER) + + +#define NV_MEM_TRACKING_PAD_SIZE(size) \ + (size) = NV_ALIGN_UP((size + sizeof(void *)), sizeof(void *)) + +#define NV_MEM_TRACKING_HIDE_SIZE(ptr, size) \ + if ((ptr != NULL) && (*(ptr) != NULL)) \ + { \ + NvU8 *__ptr; \ + *(unsigned long *) *(ptr) = (size); \ + __ptr = *(ptr); __ptr += sizeof(void *); \ + *(ptr) = (void *) __ptr; \ + } +#define NV_MEM_TRACKING_RETRIEVE_SIZE(ptr, size) \ + { \ + NvU8 *__ptr = (ptr); __ptr -= sizeof(void *); \ + (ptr) = (void *) __ptr; \ + (size) = *(unsigned long *) (ptr); \ + } + +/* keep track of memory usage */ +#include "nv-memdbg.h" + +static inline void *nv_vmalloc(unsigned long size) +{ +#if defined(NV_VMALLOC_HAS_PGPROT_T_ARG) + void *ptr = __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); +#else + void *ptr = __vmalloc(size, GFP_KERNEL); +#endif + NV_MEMDBG_ADD(ptr, size); + return ptr; +} + +static inline void nv_vfree(void *ptr, NvU64 size) +{ + NV_MEMDBG_REMOVE(ptr, size); + vfree(ptr); +} + +static inline void *nv_ioremap(NvU64 phys, NvU64 size) +{ +#if IS_ENABLED(CONFIG_INTEL_TDX_GUEST) && defined(NV_IOREMAP_DRIVER_HARDENED_PRESENT) + void *ptr = ioremap_driver_hardened(phys, size); +#else + void *ptr = ioremap(phys, size); +#endif + NV_MEMDBG_ADD(ptr, size); + return ptr; +} + +static inline void *nv_ioremap_nocache(NvU64 phys, NvU64 size) +{ + return nv_ioremap(phys, size); +} + +static inline void *nv_ioremap_cache(NvU64 phys, NvU64 size) +{ + void *ptr = NULL; +#if IS_ENABLED(CONFIG_INTEL_TDX_GUEST) && defined(NV_IOREMAP_CACHE_SHARED_PRESENT) + ptr = ioremap_cache_shared(phys, size); +#elif defined(NV_IOREMAP_CACHE_PRESENT) + ptr = ioremap_cache(phys, size); +#else + return nv_ioremap(phys, size); +#endif + + NV_MEMDBG_ADD(ptr, size); + + return ptr; +} + +static inline void *nv_ioremap_wc(NvU64 phys, NvU64 size) +{ + void *ptr = NULL; +#if IS_ENABLED(CONFIG_INTEL_TDX_GUEST) && defined(NV_IOREMAP_DRIVER_HARDENED_WC_PRESENT) + ptr = ioremap_driver_hardened_wc(phys, size); +#elif defined(NV_IOREMAP_WC_PRESENT) + ptr = ioremap_wc(phys, size); +#else + return nv_ioremap_nocache(phys, size); +#endif + + NV_MEMDBG_ADD(ptr, size); + + return ptr; +} + +static inline void nv_iounmap(void *ptr, NvU64 size) +{ + NV_MEMDBG_REMOVE(ptr, size); + iounmap(ptr); +} + +static NvBool nv_numa_node_has_memory(int node_id) +{ + if (node_id < 0 || node_id >= MAX_NUMNODES) + return NV_FALSE; + return node_state(node_id, N_MEMORY) ? NV_TRUE : NV_FALSE; +} + +#define NV_KMALLOC(ptr, size) \ + { \ + (ptr) = kmalloc(size, NV_GFP_KERNEL); \ + NV_MEMDBG_ADD(ptr, size); \ + } + +#define NV_KZALLOC(ptr, size) \ + { \ + (ptr) = kzalloc(size, NV_GFP_KERNEL); \ + NV_MEMDBG_ADD(ptr, size); \ + } + +#define NV_KMALLOC_ATOMIC(ptr, size) \ + { \ + (ptr) = kmalloc(size, NV_GFP_ATOMIC); \ + NV_MEMDBG_ADD(ptr, size); \ + } + +#if defined(__GFP_RETRY_MAYFAIL) +#define NV_GFP_NO_OOM (NV_GFP_KERNEL | __GFP_RETRY_MAYFAIL) +#elif defined(__GFP_NORETRY) +#define NV_GFP_NO_OOM (NV_GFP_KERNEL | __GFP_NORETRY) +#else +#define NV_GFP_NO_OOM (NV_GFP_KERNEL) +#endif + +#define NV_KMALLOC_NO_OOM(ptr, size) \ + { \ + (ptr) = kmalloc(size, NV_GFP_NO_OOM); \ + NV_MEMDBG_ADD(ptr, size); \ + } + +#define NV_KFREE(ptr, size) \ + { \ + NV_MEMDBG_REMOVE(ptr, size); \ + kfree((void *) (ptr)); \ + } + +#define NV_ALLOC_PAGES_NODE(ptr, nid, order, gfp_mask) \ + { \ + (ptr) = (unsigned long) alloc_pages_node(nid, gfp_mask, order); \ + } + +#define NV_GET_FREE_PAGES(ptr, order, gfp_mask) \ + { \ + (ptr) = __get_free_pages(gfp_mask, order); \ + } + +#define NV_FREE_PAGES(ptr, order) \ + { \ + free_pages(ptr, order); \ + } + +static inline pgprot_t nv_sme_clr(pgprot_t prot) +{ +#if defined(__sme_clr) + return __pgprot(__sme_clr(pgprot_val(prot))); +#else + return prot; +#endif // __sme_clr +} + +static inline pgprot_t nv_adjust_pgprot(pgprot_t vm_prot) +{ + pgprot_t prot = __pgprot(pgprot_val(vm_prot)); + +#if defined(pgprot_decrypted) + return pgprot_decrypted(prot); +#else + return nv_sme_clr(prot); +#endif // pgprot_decrypted +} + +#if defined(PAGE_KERNEL_NOENC) +#if defined(__pgprot_mask) +#define NV_PAGE_KERNEL_NOCACHE_NOENC __pgprot_mask(__PAGE_KERNEL_NOCACHE) +#elif defined(default_pgprot) +#define NV_PAGE_KERNEL_NOCACHE_NOENC default_pgprot(__PAGE_KERNEL_NOCACHE) +#elif defined( __pgprot) +#define NV_PAGE_KERNEL_NOCACHE_NOENC __pgprot(__PAGE_KERNEL_NOCACHE) +#else +#error "Unsupported kernel!!!" +#endif +#endif + +#define NV_GET_CURRENT_PROCESS() current->tgid +#define NV_IN_ATOMIC() in_atomic() +#define NV_LOCAL_BH_DISABLE() local_bh_disable() +#define NV_LOCAL_BH_ENABLE() local_bh_enable() +#define NV_COPY_TO_USER(to, from, n) copy_to_user(to, from, n) +#define NV_COPY_FROM_USER(to, from, n) copy_from_user(to, from, n) + +#define NV_IS_SUSER() capable(CAP_SYS_ADMIN) +#define NV_PCI_DEVICE_NAME(pci_dev) ((pci_dev)->pretty_name) +#define NV_CLI() local_irq_disable() +#define NV_SAVE_FLAGS(eflags) local_save_flags(eflags) +#define NV_RESTORE_FLAGS(eflags) local_irq_restore(eflags) +#define NV_MAY_SLEEP() (!irqs_disabled() && !in_interrupt() && !NV_IN_ATOMIC()) +#define NV_MODULE_PARAMETER(x) module_param(x, int, 0) +#define NV_MODULE_STRING_PARAMETER(x) module_param(x, charp, 0) +#undef MODULE_PARM + +#define NV_NUM_CPUS() num_possible_cpus() + +#define NV_HAVE_MEMORY_ENCRYPT_DECRYPT 0 + +#if defined(NVCPU_X86_64) && \ + NV_IS_EXPORT_SYMBOL_GPL_set_memory_encrypted && \ + NV_IS_EXPORT_SYMBOL_GPL_set_memory_decrypted +#undef NV_HAVE_MEMORY_ENCRYPT_DECRYPT +#define NV_HAVE_MEMORY_ENCRYPT_DECRYPT 1 +#endif + +static inline void nv_set_memory_decrypted_zeroed(NvBool unencrypted, + unsigned long virt_addr, + int num_native_pages, + size_t size) +{ + if (virt_addr == 0) + return; + +#if NV_HAVE_MEMORY_ENCRYPT_DECRYPT + if (unencrypted) + { + set_memory_decrypted(virt_addr, num_native_pages); + memset((void *)virt_addr, 0, size); + } +#endif +} + +static inline void nv_set_memory_encrypted(NvBool unencrypted, + unsigned long virt_addr, + int num_native_pages) +{ + if (virt_addr == 0) + return; + +#if NV_HAVE_MEMORY_ENCRYPT_DECRYPT + if (unencrypted) + { + set_memory_encrypted(virt_addr, num_native_pages); + } +#endif +} + +static inline dma_addr_t nv_phys_to_dma(struct device *dev, NvU64 pa) +{ +#if defined(NV_PHYS_TO_DMA_PRESENT) + return phys_to_dma(dev, pa); +#elif defined(NV_XEN_SUPPORT_FULLY_VIRTUALIZED_KERNEL) + return phys_to_machine(pa); +#else + return (dma_addr_t)pa; +#endif +} + +#define NV_GET_OFFSET_IN_PAGE(phys_page) offset_in_page(phys_page) +#define NV_GET_PAGE_STRUCT(phys_page) virt_to_page(__va(phys_page)) +#define NV_VMA_PGOFF(vma) ((vma)->vm_pgoff) +#define NV_VMA_SIZE(vma) ((vma)->vm_end - (vma)->vm_start) +#define NV_VMA_OFFSET(vma) (((NvU64)(vma)->vm_pgoff) << PAGE_SHIFT) +#define NV_VMA_PRIVATE(vma) ((vma)->vm_private_data) +#define NV_VMA_FILE(vma) ((vma)->vm_file) + +#define NV_DEVICE_MINOR_NUMBER(x) minor((x)->i_rdev) + +#define NV_PCI_DISABLE_DEVICE(pci_dev) \ + { \ + NvU16 __cmd[2]; \ + pci_read_config_word((pci_dev), PCI_COMMAND, &__cmd[0]); \ + pci_disable_device(pci_dev); \ + pci_read_config_word((pci_dev), PCI_COMMAND, &__cmd[1]); \ + __cmd[1] |= PCI_COMMAND_MEMORY; \ + pci_write_config_word((pci_dev), PCI_COMMAND, \ + (__cmd[1] | (__cmd[0] & PCI_COMMAND_IO))); \ + } + +#define NV_PCI_RESOURCE_START(pci_dev, bar) pci_resource_start(pci_dev, (bar)) +#define NV_PCI_RESOURCE_SIZE(pci_dev, bar) pci_resource_len(pci_dev, (bar)) +#define NV_PCI_RESOURCE_FLAGS(pci_dev, bar) pci_resource_flags(pci_dev, (bar)) + +#define NV_PCI_RESOURCE_VALID(pci_dev, bar) \ + ((NV_PCI_RESOURCE_START(pci_dev, bar) != 0) && \ + (NV_PCI_RESOURCE_SIZE(pci_dev, bar) != 0)) + +#define NV_PCI_DOMAIN_NUMBER(pci_dev) (NvU32)pci_domain_nr(pci_dev->bus) +#define NV_PCI_BUS_NUMBER(pci_dev) (pci_dev)->bus->number +#define NV_PCI_DEVFN(pci_dev) (pci_dev)->devfn +#define NV_PCI_SLOT_NUMBER(pci_dev) PCI_SLOT(NV_PCI_DEVFN(pci_dev)) + +#if defined(CONFIG_X86_UV) && defined(NV_CONFIG_X86_UV) +#define NV_GET_DOMAIN_BUS_AND_SLOT(domain,bus,devfn) \ + ({ \ + struct pci_dev *__dev = NULL; \ + while ((__dev = pci_get_device(PCI_VENDOR_ID_NVIDIA, \ + PCI_ANY_ID, __dev)) != NULL) \ + { \ + if ((NV_PCI_DOMAIN_NUMBER(__dev) == domain) && \ + (NV_PCI_BUS_NUMBER(__dev) == bus) && \ + (NV_PCI_DEVFN(__dev) == devfn)) \ + { \ + break; \ + } \ + } \ + if (__dev == NULL) \ + { \ + while ((__dev = pci_get_class((PCI_CLASS_BRIDGE_HOST << 8), \ + __dev)) != NULL) \ + { \ + if ((NV_PCI_DOMAIN_NUMBER(__dev) == domain) && \ + (NV_PCI_BUS_NUMBER(__dev) == bus) && \ + (NV_PCI_DEVFN(__dev) == devfn)) \ + { \ + break; \ + } \ + } \ + } \ + if (__dev == NULL) \ + { \ + while ((__dev = pci_get_class((PCI_CLASS_BRIDGE_PCI << 8), \ + __dev)) != NULL) \ + { \ + if ((NV_PCI_DOMAIN_NUMBER(__dev) == domain) && \ + (NV_PCI_BUS_NUMBER(__dev) == bus) && \ + (NV_PCI_DEVFN(__dev) == devfn)) \ + { \ + break; \ + } \ + } \ + } \ + if (__dev == NULL) \ + { \ + while ((__dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, \ + __dev)) != NULL) \ + { \ + if ((NV_PCI_DOMAIN_NUMBER(__dev) == domain) && \ + (NV_PCI_BUS_NUMBER(__dev) == bus) && \ + (NV_PCI_DEVFN(__dev) == devfn)) \ + { \ + break; \ + } \ + } \ + } \ + __dev; \ + }) +#else +#define NV_GET_DOMAIN_BUS_AND_SLOT(domain,bus, devfn) \ + pci_get_domain_bus_and_slot(domain, bus, devfn) +#endif + +#define NV_PRINT_AT(nv_debug_level,at) \ + { \ + nv_printf(nv_debug_level, \ + "NVRM: VM: %s:%d: 0x%p, %d page(s), count = %d, " \ + "page_table = 0x%p\n", __FUNCTION__, __LINE__, at, \ + at->num_pages, NV_ATOMIC_READ(at->usage_count), \ + at->page_table); \ + } + +#define NV_PRINT_VMA(nv_debug_level,vma) \ + { \ + nv_printf(nv_debug_level, \ + "NVRM: VM: %s:%d: 0x%lx - 0x%lx, 0x%08lx bytes @ 0x%016llx, 0x%p, 0x%p\n", \ + __FUNCTION__, __LINE__, vma->vm_start, vma->vm_end, NV_VMA_SIZE(vma), \ + NV_VMA_OFFSET(vma), NV_VMA_PRIVATE(vma), NV_VMA_FILE(vma)); \ + } + +#ifndef minor +# define minor(x) MINOR(x) +#endif + +#if defined(cpu_relax) +#define NV_CPU_RELAX() cpu_relax() +#else +#define NV_CPU_RELAX() barrier() +#endif + +#ifndef IRQ_RETVAL +typedef void irqreturn_t; +#define IRQ_RETVAL(a) +#endif + +#if !defined(PCI_COMMAND_SERR) +#define PCI_COMMAND_SERR 0x100 +#endif +#if !defined(PCI_COMMAND_INTX_DISABLE) +#define PCI_COMMAND_INTX_DISABLE 0x400 +#endif + +#ifndef PCI_CAP_ID_EXP +#define PCI_CAP_ID_EXP 0x10 +#endif + +/* + * If the host OS has page sizes larger than 4KB, we may have a security + * problem. Registers are typically grouped in 4KB pages, but if there are + * larger pages, then the smallest userspace mapping possible (e.g., a page) + * may give more access than intended to the user. + */ +#define NV_4K_PAGE_ISOLATION_REQUIRED(addr, size) \ + ((PAGE_SIZE > NV_RM_PAGE_SIZE) && \ + ((size) <= NV_RM_PAGE_SIZE) && \ + (((addr) >> NV_RM_PAGE_SHIFT) == \ + (((addr) + (size) - 1) >> NV_RM_PAGE_SHIFT))) + +static inline int nv_remap_page_range(struct vm_area_struct *vma, + unsigned long virt_addr, NvU64 phys_addr, NvU64 size, pgprot_t prot) +{ + return remap_pfn_range(vma, virt_addr, (phys_addr >> PAGE_SHIFT), size, + prot); +} + +static inline int nv_io_remap_page_range(struct vm_area_struct *vma, + NvU64 phys_addr, NvU64 size, NvU64 start) +{ + int ret = -1; +#if !defined(NV_XEN_SUPPORT_FULLY_VIRTUALIZED_KERNEL) + ret = nv_remap_page_range(vma, start, phys_addr, size, + nv_adjust_pgprot(vma->vm_page_prot)); +#else + ret = io_remap_pfn_range(vma, start, (phys_addr >> PAGE_SHIFT), + size, nv_adjust_pgprot(vma->vm_page_prot)); +#endif + return ret; +} + +static inline vm_fault_t nv_insert_pfn(struct vm_area_struct *vma, + NvU64 virt_addr, NvU64 pfn) +{ + /* + * vm_insert_pfn{,_prot} replaced with vmf_insert_pfn{,_prot} in Linux 4.20 + */ +#if defined(NV_VMF_INSERT_PFN_PROT_PRESENT) + return vmf_insert_pfn_prot(vma, virt_addr, pfn, + __pgprot(pgprot_val(vma->vm_page_prot))); +#else + int ret = vm_insert_pfn_prot(vma, virt_addr, pfn, + __pgprot(pgprot_val(vma->vm_page_prot))); + switch (ret) + { + case 0: + case -EBUSY: + /* + * EBUSY indicates that another thread already handled + * the faulted range. + */ + return VM_FAULT_NOPAGE; + case -ENOMEM: + return VM_FAULT_OOM; + default: + break; + } + return VM_FAULT_SIGBUS; +#endif /* defined(NV_VMF_INSERT_PFN_PROT_PRESENT) */ +} + +/* Converts BAR index to Linux specific PCI BAR index */ +static inline NvU8 nv_bar_index_to_os_bar_index +( + struct pci_dev *dev, + NvU8 nv_bar_index +) +{ + NvU8 bar_index = 0; + NvU8 i; + + BUG_ON(nv_bar_index >= NV_GPU_NUM_BARS); + + for (i = 0; i < nv_bar_index; i++) + { + if (NV_PCI_RESOURCE_FLAGS(dev, bar_index) & PCI_BASE_ADDRESS_MEM_TYPE_64) + { + bar_index += 2; + } + else + { + bar_index++; + } + } + + return bar_index; +} + +#define NV_PAGE_MASK (NvU64)(long)PAGE_MASK + +extern void *nvidia_stack_t_cache; + +/* + * On Linux, when a kmem cache is created, a new sysfs entry is created for the + * same unless it's merged with an existing cache. Upstream Linux kernel commit + * 3b7b314053d021601940c50b07f5f1423ae67e21 (version 4.12+) made cache + * destruction asynchronous which creates a race between cache destroy and + * create. A new cache created with attributes as a previous cache, which is + * scheduled for destruction, can try to create a sysfs entry with the same + * conflicting name. Upstream Linux kernel commit + * d50d82faa0c964e31f7a946ba8aba7c715ca7ab0 (4.18) fixes this issue by cleaning + * up sysfs entry within slab_mutex, so the entry is deleted before a cache with + * the same attributes could be created. + * The definition for sysfs_slab_unlink() was moved to mm/slab.h in commit + * 19975f83412f ("mm/slab: move the rest of slub_def.h to mm/slab.h") (6.8). + * Since we can't conftest mm/slab.h, use the fact that linux/slub_def.h was + * removed by the commit. + * + * To workaround this kernel issue, we take two steps: + * - Create unmergeable caches: a kmem_cache with a constructor is unmergeable. + * So, we define an empty contructor for the same. Creating an unmergeable + * cache ensures that the kernel doesn't generate an internal name and always + * uses our name instead. + * + * - Generate a unique cache name by appending the current timestamp (ns). We + * wait for the timestamp to increment by at least one to ensure that we do + * not hit a name conflict in cache create -> destroy (async) -> create cycle. + */ +#if !defined(NV_SYSFS_SLAB_UNLINK_PRESENT) && defined(NV_LINUX_SLUB_DEF_H_PRESENT) +static inline void nv_kmem_ctor_dummy(void *arg) +{ + (void)arg; +} +#else +#define nv_kmem_ctor_dummy NULL +#endif + +#define NV_KMEM_CACHE_CREATE(name, type) \ + nv_kmem_cache_create(name, sizeof(type), 0) + +/* The NULL pointer check is required for kernels older than 4.3 */ +#define NV_KMEM_CACHE_DESTROY(kmem_cache) \ + if (kmem_cache != NULL) \ + { \ + kmem_cache_destroy(kmem_cache); \ + } + +#define NV_KMEM_CACHE_ALLOC_ATOMIC(kmem_cache) \ + kmem_cache_alloc(kmem_cache, GFP_ATOMIC) +#define NV_KMEM_CACHE_ALLOC(kmem_cache) \ + kmem_cache_alloc(kmem_cache, GFP_KERNEL) +#define NV_KMEM_CACHE_FREE(ptr, kmem_cache) \ + kmem_cache_free(kmem_cache, ptr) + +static inline void *nv_kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) +{ +#if !defined(NV_SYSFS_SLAB_UNLINK_PRESENT) && defined(NV_LINUX_SLUB_DEF_H_PRESENT) + /* + * We cannot call kmem_cache_zalloc directly as it adds the __GFP_ZERO + * flag. This flag together with the presence of a slab constructor is + * flagged as a potential bug by the Linux kernel since it is the role + * of a constructor to fill an allocated object with the desired + * pattern. In our case, we specified a (dummy) constructor as a + * workaround for a bug and not to zero-initialize objects. So, we take + * the pain here to memset allocated object ourselves. + */ + void *object = kmem_cache_alloc(k, flags); + if (object) + memset(object, 0, kmem_cache_size(k)); + return object; +#else + return kmem_cache_zalloc(k, flags); +#endif +} + +static inline int nv_kmem_cache_alloc_stack_atomic(nvidia_stack_t **stack) +{ + nvidia_stack_t *sp = NULL; +#if defined(NVCPU_X86_64) + if (rm_is_altstack_in_use()) + { + sp = NV_KMEM_CACHE_ALLOC_ATOMIC(nvidia_stack_t_cache); + if (sp == NULL) + return -ENOMEM; + sp->size = sizeof(sp->stack); + sp->top = sp->stack + sp->size; + } +#endif + *stack = sp; + return 0; +} + +static inline int nv_kmem_cache_alloc_stack(nvidia_stack_t **stack) +{ + nvidia_stack_t *sp = NULL; +#if defined(NVCPU_X86_64) + if (rm_is_altstack_in_use()) + { + sp = NV_KMEM_CACHE_ALLOC(nvidia_stack_t_cache); + if (sp == NULL) + return -ENOMEM; + sp->size = sizeof(sp->stack); + sp->top = sp->stack + sp->size; + } +#endif + *stack = sp; + return 0; +} + +static inline void nv_kmem_cache_free_stack(nvidia_stack_t *stack) +{ +#if defined(NVCPU_X86_64) + if (stack != NULL && rm_is_altstack_in_use()) + { + NV_KMEM_CACHE_FREE(stack, nvidia_stack_t_cache); + } +#endif +} + +#if defined(NVCPU_X86_64) +/* + * RAM is cached on Linux by default, we can assume there's + * nothing to be done here. This is not the case for the + * other memory spaces: we will have made an attempt to add + * a WC MTRR for the frame buffer. + * + * If a WC MTRR is present, we can't satisfy the WB mapping + * attempt here, since the achievable effective memory + * types in that case are WC and UC, if not it's typically + * UC (MTRRdefType is UC); we could only satisfy WB mapping + * requests with a WB MTRR. + */ +#define NV_ALLOW_CACHING(mt) ((mt) == NV_MEMORY_TYPE_SYSTEM) +#else +#define NV_ALLOW_CACHING(mt) ((mt) != NV_MEMORY_TYPE_REGISTERS) +#endif + +typedef struct nvidia_pte_s { + NvU64 phys_addr; + unsigned long virt_addr; +} nvidia_pte_t; + +#if defined(CONFIG_DMA_SHARED_BUFFER) +/* Standard dma_buf-related information. */ +struct nv_dma_buf +{ + struct dma_buf *dma_buf; + struct dma_buf_attachment *dma_attach; + struct sg_table *sgt; + enum dma_data_direction direction; +}; +#endif // CONFIG_DMA_SHARED_BUFFER + +typedef struct nv_alloc_s { + struct nv_alloc_s *next; + struct device *dev; + atomic_t usage_count; + struct { + NvBool contig : 1; + NvBool guest : 1; + NvBool zeroed : 1; + NvBool aliased : 1; + NvBool user : 1; + NvBool node : 1; + NvBool peer_io : 1; + NvBool physical : 1; + NvBool unencrypted : 1; + NvBool coherent : 1; + NvBool carveout : 1; + } flags; + unsigned int cache_type; + unsigned int num_pages; + unsigned int order; + unsigned int size; + nvidia_pte_t *page_table; /* array of physical pages allocated */ + unsigned int pid; + struct page **user_pages; + NvU64 guest_id; /* id of guest VM */ + NvS32 node_id; /* Node id for memory allocation when node is set in flags */ + void *import_priv; + struct sg_table *import_sgt; + dma_addr_t dma_handle; /* dma handle used by dma_alloc_coherent(), dma_free_coherent() */ +} nv_alloc_t; + +/** + * nv_is_dma_direct - return true if direct_dma is enabled + * + * Starting with the 5.0 kernel, SWIOTLB is merged into + * direct_dma, so systems without an IOMMU use direct_dma. We + * need to know if this is the case, so that we can use a + * different check for SWIOTLB enablement. + */ +static inline NvBool nv_is_dma_direct(struct device *dev) +{ + NvBool is_direct = NV_FALSE; + +#if defined(NV_DMA_IS_DIRECT_PRESENT) + if (dma_is_direct(get_dma_ops(dev))) + is_direct = NV_TRUE; +#endif + + return is_direct; +} + +/** + * nv_dma_maps_swiotlb - return NV_TRUE if swiotlb is enabled + * + * SWIOTLB creates bounce buffers for the DMA mapping layer to + * use if a driver asks the kernel to map a DMA buffer that is + * outside of the device's addressable range. The driver does + * not function correctly if bounce buffers are enabled for the + * device. So if SWIOTLB is enabled, we should avoid making + * mapping calls. + */ +static inline NvBool +nv_dma_maps_swiotlb(struct device *dev) +{ + NvBool swiotlb_in_use = NV_FALSE; +#if defined(CONFIG_SWIOTLB) + /* + * The __attribute__ ((unused)) is necessary because in at least one + * case, *none* of the preprocessor branches below are taken, and + * so the ops variable ends up never being referred to at all. This can + * happen with the (NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_map_sg_attrs == 1) + * case. + */ + const struct dma_map_ops *ops __attribute__ ((unused)) = get_dma_ops(dev); + + /* + * The switch from dma_mapping_ops -> dma_map_ops coincided with the + * switch from swiotlb_map_sg -> swiotlb_map_sg_attrs. + */ + #if NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_map_sg_attrs != 0 + swiotlb_in_use = (ops->map_sg == swiotlb_map_sg_attrs); + #elif NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_dma_ops != 0 + swiotlb_in_use = (ops == &swiotlb_dma_ops); + #endif + /* + * The "else" case that is not shown + * (for NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_map_sg_attrs == 0 || + * NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_dma_ops == 0) does + * nothing, and ends up dropping us out to the last line of this function, + * effectively returning false. The nearly-human-readable version of that + * case is "get_dma_ops() is defined, but neither swiotlb_map_sg_attrs + * nor swiotlb_dma_ops is present". + * + * That can happen on kernels that fall within below range: + * + * 2017-12-24 4bd89ed39b2ab8dc4ac4b6c59b07d420b0213bec + * ("swiotlb: remove various exports") + * 2018-06-28 210d0797c97d0e8f3b1a932a0dc143f4c57008a3 + * ("swiotlb: export swiotlb_dma_ops") + * + * Related to this: Between above two commits, this driver has no way of + * detecting whether or not the SWIOTLB is in use. Furthermore, the + * driver cannot support DMA remapping. That leads to the following + * point: "swiotlb=force" is not supported for kernels falling in above + * range. + * + * The other "else" case that is not shown: + * Starting with the 5.0 kernel, swiotlb is integrated into dma_direct, + * which is used when there's no IOMMU. In these kernels, ops == NULL, + * swiotlb_dma_ops no longer exists, and we do not support swiotlb=force + * (doing so would require detecting when swiotlb=force is enabled and + * then returning NV_TRUE even when dma_direct is in use). So for now, + * we just return NV_FALSE and in nv_compute_gfp_mask() we check for + * whether swiotlb could possibly be used (outside of swiotlb=force). + */ + + /* + * Commit 2017-11-07 d7b417fa08d ("x86/mm: Add DMA support for + * SEV memory encryption") forces SWIOTLB to be enabled when AMD SEV + * is active in all cases. + */ + if (os_cc_enabled) + swiotlb_in_use = NV_TRUE; +#endif + + return swiotlb_in_use; +} + +/* + * TODO: Bug 1522381 will allow us to move these mapping relationships into + * common code. + */ + +/* + * Bug 1606851: the Linux kernel scatterlist code doesn't work for regions + * greater than or equal to 4GB, due to regular use of unsigned int + * throughout. So we need to split our mappings into 4GB-minus-1-page-or-less + * chunks and manage them separately. + */ +typedef struct nv_dma_submap_s { + NvU32 page_count; + NvU32 sg_map_count; + struct sg_table sgt; + NvBool imported; +} nv_dma_submap_t; + +typedef struct nv_dma_map_s { + struct page **pages; + NvU64 page_count; + NvBool contiguous; + NvU32 cache_type; + struct sg_table *import_sgt; + + union + { + struct + { + NvU32 submap_count; + nv_dma_submap_t *submaps; + } discontig; + + struct + { + NvU64 dma_addr; + } contig; + } mapping; + + struct device *dev; +} nv_dma_map_t; + +#define NV_FOR_EACH_DMA_SUBMAP(dm, sm, i) \ + for (i = 0, sm = &dm->mapping.discontig.submaps[0]; \ + i < dm->mapping.discontig.submap_count; \ + i++, sm = &dm->mapping.discontig.submaps[i]) + +/* + * On 4K ARM kernels, use max submap size a multiple of 64K to keep nv-p2p happy. + * Despite 4K OS pages, we still use 64K P2P pages due to dependent modules still using 64K. + * Instead of using (4G-4K), use max submap size as (4G-64K) since the mapped IOVA range + * must be aligned at 64K boundary. + * TODO : NV_DMA_SUBMAP_MAX_PAGES should be ((NvU32)(NV_DMA_U32_MAX_4K_PAGES - ). + * iGPU supports MAX 2MB page size. So it should be good enough for now. + */ +#if defined(CONFIG_ARM64_4K_PAGES) +#define NV_DMA_U32_MAX_4K_PAGES ((NvU32)((NV_U32_MAX >> PAGE_SHIFT) + 1)) +#define NV_DMA_SUBMAP_MAX_PAGES ((NvU32)(NV_DMA_U32_MAX_4K_PAGES - 512)) +#else +#define NV_DMA_SUBMAP_MAX_PAGES ((NvU32)(NV_U32_MAX >> PAGE_SHIFT)) +#endif + +#define NV_DMA_SUBMAP_IDX_TO_PAGE_IDX(s) (s * NV_DMA_SUBMAP_MAX_PAGES) + +/* + * DO NOT use sg_alloc_table_from_pages on Xen Server, even if it's available. + * This will glom multiple pages into a single sg element, which + * xen_swiotlb_map_sg_attrs may try to route to the SWIOTLB. We must only use + * single-page sg elements on Xen Server. + */ +#if !defined(NV_DOM0_KERNEL_PRESENT) + #define NV_ALLOC_DMA_SUBMAP_SCATTERLIST(dm, sm, i) \ + ((sg_alloc_table_from_pages(&sm->sgt, \ + &dm->pages[NV_DMA_SUBMAP_IDX_TO_PAGE_IDX(i)], \ + sm->page_count, 0, \ + sm->page_count * PAGE_SIZE, NV_GFP_KERNEL) == 0) ? NV_OK : \ + NV_ERR_OPERATING_SYSTEM) +#else + #define NV_ALLOC_DMA_SUBMAP_SCATTERLIST(dm, sm, i) \ + ((sg_alloc_table(&sm->sgt, sm->page_count, NV_GFP_KERNEL)) == \ + 0 ? NV_OK : NV_ERR_OPERATING_SYSTEM) +#endif + +typedef struct nv_work_s { + struct work_struct task; + void *data; +} nv_work_t; + +#define NV_MAX_REGISTRY_KEYS_LENGTH 512 + +typedef enum +{ + NV_DEV_STACK_TIMER, + NV_DEV_STACK_ISR, + NV_DEV_STACK_ISR_BH, + NV_DEV_STACK_ISR_BH_UNLOCKED, + NV_DEV_STACK_GPU_WAKEUP, + NV_DEV_STACK_COUNT +} nvidia_linux_dev_stack_t; + +/* Linux version of the opaque type used for os_queue_work_item() */ +struct os_work_queue { + nv_kthread_q_t nvk; +}; + +/* Linux version of the opaque type used for os_wait_*() */ +struct os_wait_queue { + struct completion q; +}; + +#define MAX_CLIENTS_PER_ADAPTER 127 +#define MAX_TEGRA_I2C_PORTS 16 + +typedef struct nv_i2c_client_entry_s +{ + NvU32 port; + void *pOsClient[MAX_CLIENTS_PER_ADAPTER]; +} nv_i2c_client_entry_t; + +/*! + * @brief Mapping between clock names and clock handles. + * + * TEGRA_DISP_WHICH_CLK_MAX: maximum number of clocks + * defined in below enum. + * + * arch/nvalloc/unix/include/nv.h + * enum TEGRASOC_WHICH_CLK_MAX; + * + */ +typedef struct nvsoc_clks_s { + struct { + struct clk *handles; + const char *clkName; + } clk[TEGRASOC_WHICH_CLK_MAX]; +} nvsoc_clks_t; + +/* + * To report error in msi/msix when unhandled count reaches a threshold + */ + +typedef struct nv_irq_count_info_s +{ + int irq; + NvU64 unhandled; + NvU64 total; + NvU64 last_unhandled; +} nv_irq_count_info_t; + +/* Linux-specific version of nv_dma_device_t */ +struct nv_dma_device { + struct { + NvU64 start; + NvU64 limit; + } addressable_range; + + struct device *dev; +}; + +/* Properties of the coherent link */ +typedef struct coherent_link_info_s { + /* Physical Address of the GPU memory in SOC AMAP. In the case of + * baremetal OS environment it is System Physical Address(SPA) and in the case + * of virutalized OS environment it is Intermediate Physical Address(IPA) */ + NvU64 gpu_mem_pa; + + /* Physical address of the reserved portion of the GPU memory, applicable + * only in Grace Hopper self hosted passthrough virtualizatioan platform. */ + NvU64 rsvd_mem_pa; + + /* Bitmap of NUMA node ids, corresponding to the reserved PXMs, + * available for adding GPU memory to the kernel as system RAM */ + DECLARE_BITMAP(free_node_bitmap, MAX_NUMNODES); +} coherent_link_info_t; + +#if defined(NV_LINUX_ACPI_EVENTS_SUPPORTED) +/* + * acpi data storage structure + * + * This structure retains the pointer to the device, + * and any other baggage we want to carry along + * + */ +typedef struct +{ + nvidia_stack_t *sp; + struct acpi_device *device; + struct acpi_handle *handle; + void *notifier_data; + int notify_handler_installed; +} nv_acpi_t; +#endif + +struct nv_pci_tegra_devfreq_data; +struct nv_pci_tegra_devfreq_dev; + +/* linux-specific version of old nv_state_t */ +/* this is a general os-specific state structure. the first element *must* be + the general state structure, for the generic unix-based code */ +typedef struct nv_linux_state_s { + nv_state_t nv_state; + + atomic_t usage_count; + NvU32 suspend_count; + + struct device *dev; + struct pci_dev *pci_dev; + + /* coherent link information */ + coherent_link_info_t coherent_link_info; + + /* Dedicated queue to be used for removing FB memory which is onlined + * to kernel as a NUMA node. Refer Bug : 3879845*/ + nv_kthread_q_t remove_numa_memory_q; + + /* NUMA node information for the platforms where GPU memory is presented + * as a NUMA node to the kernel */ + struct { + /* NUMA node id >=0 when the platform supports GPU memory as NUMA node + * otherwise it holds the value of NUMA_NO_NODE */ + NvS32 node_id; + + /* NUMA online/offline status for platforms that support GPU memory as + * NUMA node */ + atomic_t status; + NvBool use_auto_online; + } numa_info; + + nvidia_stack_t *sp[NV_DEV_STACK_COUNT]; + + char registry_keys[NV_MAX_REGISTRY_KEYS_LENGTH]; + + nv_work_t work; + + /* get a timer callback every second */ + struct nv_timer rc_timer; + + /* lock for linux-specific data, not used by core rm */ + struct semaphore ldata_lock; + + /* proc directory information */ + struct proc_dir_entry *proc_dir; + + NvU32 minor_num; + struct nv_linux_state_s *next; + + /* DRM private information */ + struct drm_device *drm; + + /* kthread based bottom half servicing queue and elements */ + nv_kthread_q_t bottom_half_q; + nv_kthread_q_item_t bottom_half_q_item; + + /* Lock for unlocked bottom half protecting common allocated stack */ + void *isr_bh_unlocked_mutex; + + NvBool tce_bypass_enabled; + + NvU32 num_intr; + + /* Lock serializing ISRs for different MSI-X vectors */ + nv_spinlock_t msix_isr_lock; + + /* Lock serializing bottom halves for different MSI-X vectors */ + void *msix_bh_mutex; + + struct msix_entry *msix_entries; + + NvU64 numa_memblock_size; + + struct { + struct backlight_device *dev; + NvU32 displayId; + const char *device_name; + } backlight; + + /* + * file handle for pci sysfs config file (/sys/bus/pci/devices/.../config) + * which will be opened during device probe + */ + struct file *sysfs_config_file; + + /* Per-GPU queue */ + struct os_work_queue queue; + + /* GPU user mapping revocation/remapping (only for non-CTL device) */ + struct semaphore mmap_lock; /* Protects all fields in this category */ + struct list_head open_files; + NvBool all_mappings_revoked; + NvBool safe_to_mmap; + NvBool gpu_wakeup_callback_needed; + + /* Per-device notifier block for ACPI events */ + struct notifier_block acpi_nb; + +#if defined(NV_LINUX_ACPI_EVENTS_SUPPORTED) + nv_acpi_t* nv_acpi_object; +#endif + + nv_i2c_client_entry_t i2c_clients[MAX_TEGRA_I2C_PORTS]; + + struct reset_control *dpaux0_reset; + struct reset_control *nvdisplay_reset; + struct reset_control *dsi_core_reset; + struct reset_control *mipi_cal_reset; + struct reset_control *hdacodec_reset; + + /* + * nv_imp_icc_path represents the interconnect path across which display + * data must travel. + */ + struct icc_path *nv_imp_icc_path; + +#if defined(NV_DEVM_ICC_GET_PRESENT) + /* + * is_upstream_icc_path tracks whether we are using upstream ICC. This + * is required till we fully migrate to use upstream ICC when it is + * available. Right now, even if upstream ICC is available we are still + * using downstream ICC mechanisms for T23x. + */ + NvBool is_upstream_icc_path; +#endif + + nvsoc_clks_t soc_clk_handles; + + /* Lock serializing ISRs for different SOC vectors */ + nv_spinlock_t soc_isr_lock; + void *soc_bh_mutex; + + struct nv_timer snapshot_timer; + nv_spinlock_t snapshot_timer_lock; + void (*snapshot_callback)(void *context); + + /* count for unhandled, total and timestamp of irq */ + nv_irq_count_info_t *irq_count; + + /* Max number of irq triggered and are getting tracked */ + NvU16 current_num_irq_tracked; + + NvBool is_forced_shutdown; + + struct nv_dma_device dma_dev; + struct nv_dma_device niso_dma_dev; + + /* + * Background kthread for handling deferred open operations + * (e.g. from O_NONBLOCK). + * + * Adding to open_q and reading/writing is_accepting_opens + * are protected by nvl->open_q_lock (not nvl->ldata_lock). + * This allows new deferred open operations to be enqueued without + * blocking behind previous ones (which hold nvl->ldata_lock). + * + * Adding to open_q is only safe if is_accepting_opens is true. + * This prevents open operations from racing with device removal. + * + * Stopping open_q is only safe after setting is_accepting_opens to false. + * This ensures that the open_q (and the larger nvl structure) will + * outlive any of the open operations enqueued. + */ + nv_kthread_q_t open_q; + NvBool is_accepting_opens; + struct semaphore open_q_lock; +#if defined(NV_VGPU_KVM_BUILD) + wait_queue_head_t wait; + NvS32 return_status; +#endif + +#if defined(CONFIG_PM_DEVFREQ) + const struct nv_pci_tegra_devfreq_data *devfreq_table; + unsigned int devfreq_table_size; + struct nv_pci_tegra_devfreq_dev *gpc_devfreq_dev; + struct nv_pci_tegra_devfreq_dev *nvd_devfreq_dev; + struct nv_pci_tegra_devfreq_dev *sys_devfreq_dev; + struct nv_pci_tegra_devfreq_dev *pwr_devfreq_dev; + + int (*devfreq_suspend)(struct device *dev); + int (*devfreq_resume)(struct device *dev); + int (*devfreq_enable_boost)(struct device *dev, unsigned int duration); + int (*devfreq_disable_boost)(struct device *dev); +#endif +} nv_linux_state_t; + +extern nv_linux_state_t *nv_linux_devices; + +/* + * Macros to protect operations on nv_linux_devices list + * Lock acquisition order while using the nv_linux_devices list + * 1. LOCK_NV_LINUX_DEVICES() + * 2. Traverse the list + * If the list is traversed to search for an element say nvl, + * acquire the nvl->ldata_lock before step 3 + * 3. UNLOCK_NV_LINUX_DEVICES() + * 4. Release nvl->ldata_lock after any read/write access to the + * nvl element is complete + */ +extern struct semaphore nv_linux_devices_lock; +#define LOCK_NV_LINUX_DEVICES() down(&nv_linux_devices_lock) +#define UNLOCK_NV_LINUX_DEVICES() up(&nv_linux_devices_lock) + +/* + * Lock to synchronize system power management transitions, + * and to protect the global system PM state. The procfs power + * management interface acquires this lock in write mode for + * the duration of the sleep operation, any other paths accessing + * device state must acquire the lock in read mode. + */ +extern struct rw_semaphore nv_system_pm_lock; + +extern NvBool nv_ats_supported; + +/* + * file-private data + * hide a pointer to our data structures in a file-private ptr + * there are times we need to grab this data back from the file + * data structure.. + */ + +typedef struct nvidia_event +{ + struct nvidia_event *next; + nv_event_t event; +} nvidia_event_t; + +typedef struct +{ + nv_file_private_t nvfp; + + nvidia_stack_t *sp; + nv_alloc_t *free_list; + nv_linux_state_t *nvptr; + nvidia_event_t *event_data_head, *event_data_tail; + NvBool dataless_event_pending; + nv_spinlock_t fp_lock; + wait_queue_head_t waitqueue; + nv_kthread_q_item_t deferred_close_q_item; + NvU32 *attached_gpus; + size_t num_attached_gpus; + nv_alloc_mapping_context_t mmap_context; + struct address_space mapping; + + nv_kthread_q_item_t open_q_item; + struct completion open_complete; + nv_linux_state_t *deferred_open_nvl; + int open_rc; + NV_STATUS adapter_status; + + struct list_head entry; +} nv_linux_file_private_t; + +static inline nv_linux_file_private_t *nv_get_nvlfp_from_nvfp(nv_file_private_t *nvfp) +{ + return container_of(nvfp, nv_linux_file_private_t, nvfp); +} + +static inline int nv_wait_open_complete_interruptible(nv_linux_file_private_t *nvlfp) +{ + return wait_for_completion_interruptible(&nvlfp->open_complete); +} + +static inline void nv_wait_open_complete(nv_linux_file_private_t *nvlfp) +{ + wait_for_completion(&nvlfp->open_complete); +} + +static inline NvBool nv_is_open_complete(nv_linux_file_private_t *nvlfp) +{ + return completion_done(&nvlfp->open_complete); +} + +#define NV_SET_FILE_PRIVATE(filep,data) ((filep)->private_data = (data)) +#define NV_GET_LINUX_FILE_PRIVATE(filep) ((nv_linux_file_private_t *)(filep)->private_data) + +/* for the card devices */ +#define NV_GET_NVL_FROM_FILEP(filep) (NV_GET_LINUX_FILE_PRIVATE(filep)->nvptr) +#define NV_GET_NVL_FROM_NV_STATE(nv) ((nv_linux_state_t *)nv->os_state) + +#define NV_STATE_PTR(nvl) &(((nv_linux_state_t *)(nvl))->nv_state) + +#define NV_ATOMIC_READ(data) atomic_read(&(data)) +#define NV_ATOMIC_SET(data,val) atomic_set(&(data), (val)) +#define NV_ATOMIC_INC(data) atomic_inc(&(data)) +#define NV_ATOMIC_DEC(data) atomic_dec(&(data)) +#define NV_ATOMIC_DEC_AND_TEST(data) atomic_dec_and_test(&(data)) + +static inline struct kmem_cache *nv_kmem_cache_create(const char *name, unsigned int size, + unsigned int align) +{ + char *name_unique; + struct kmem_cache *cache; + +#if !defined(NV_SYSFS_SLAB_UNLINK_PRESENT) && defined(NV_LINUX_SLUB_DEF_H_PRESENT) + size_t len; + NvU64 tm_ns = nv_ktime_get_raw_ns(); + + /* + * Wait for timer to change at least once. This ensures + * that the name generated below is always unique. + */ + while (tm_ns == nv_ktime_get_raw_ns()); + tm_ns = nv_ktime_get_raw_ns(); + + /* 20 is the max length of a 64-bit integer printed in decimal */ + len = strlen(name) + 20 + 1; + name_unique = kzalloc(len, GFP_KERNEL); + if (!name_unique) + return NULL; + + if (snprintf(name_unique, len, "%s-%llu", name, tm_ns) >= len) + { + WARN(1, "kmem cache name too long: %s\n", name); + kfree(name_unique); + return NULL; + } +#else + name_unique = (char *)name; +#endif + cache = kmem_cache_create(name_unique, size, align, 0, nv_kmem_ctor_dummy); + if (name_unique != name) + kfree(name_unique); + + return cache; +} + +#if defined(CONFIG_PCI_IOV) +#define NV_PCI_SRIOV_SUPPORT +#endif /* CONFIG_PCI_IOV */ + +#define NV_PCIE_CFG_MAX_OFFSET 0x1000 + +#include "nv-proto.h" + +/* + * Check if GPU is present on the bus by checking flag + * NV_FLAG_IN_SURPRISE_REMOVAL(set when eGPU is removed from TB3). + */ +static inline NV_STATUS nv_check_gpu_state(nv_state_t *nv) +{ + if (NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv)) + { + return NV_ERR_GPU_IS_LOST; + } + + return NV_OK; +} + +extern NvU32 NVreg_EnableUserNUMAManagement; +extern NvU32 NVreg_RegisterPCIDriver; +extern NvU32 NVreg_RegisterPlatformDeviceDriver; +extern NvU32 NVreg_EnableResizableBar; +extern NvU32 NVreg_TegraGpuPgMask; +extern NvU32 NVreg_EnableNonblockingOpen; + +extern NvU32 num_probed_nv_devices; +extern NvU32 num_nv_devices; + +#define NV_FILE_INODE(file) (file)->f_inode + +static inline int nv_is_control_device(struct inode *inode) +{ + return (minor((inode)->i_rdev) == NV_MINOR_DEVICE_NUMBER_CONTROL_DEVICE); +} + +#if defined(NV_DOM0_KERNEL_PRESENT) || defined(NV_VGPU_KVM_BUILD) || defined(NV_DEVICE_VM_BUILD) +#define NV_VGX_HYPER +#if defined(NV_XEN_IOEMU_INJECT_MSI) +#include +#endif +#endif + +/* + * Decrements the usage count of the allocation, and moves the allocation to + * the given nvlfp's free list if the usage count drops to zero. + * + * Returns NV_TRUE if the allocation is moved to the nvlfp's free list. + */ +static inline NvBool nv_alloc_release(nv_linux_file_private_t *nvlfp, nv_alloc_t *at) +{ + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + if (NV_ATOMIC_DEC_AND_TEST(at->usage_count)) + { + NV_ATOMIC_INC(at->usage_count); + + at->next = nvlfp->free_list; + nvlfp->free_list = at; + return NV_TRUE; + } + + return NV_FALSE; +} + +/* + * RB_EMPTY_ROOT was added in 2.6.18 by this commit: + * 2006-06-21 dd67d051529387f6e44d22d1d5540ef281965fdd + */ +#if !defined(RB_EMPTY_ROOT) +#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) +#endif + +// Default flags for ISRs +static inline NvU32 nv_default_irq_flags(nv_state_t *nv) +{ + NvU32 flags = 0; + + /* + * Request IRQs to be disabled in our ISRs to keep consistency across the + * supported kernel versions. + * + * IRQF_DISABLED has been made the default in 2.6.35 with commit e58aa3d2d0cc + * from March 2010. And it has been later completely removed in 4.1 with commit + * d8bf368d0631 from March 2015. Add it to our flags if it's defined to get the + * same behaviour on pre-2.6.35 kernels as on recent ones. + */ +#if defined(IRQF_DISABLED) + flags |= IRQF_DISABLED; +#endif + + /* + * For legacy interrupts, also allow sharing. Sharing doesn't make sense + * for MSI(-X) as on Linux they are never shared across different devices + * and we only register one ISR today. + */ + if ((nv->flags & (NV_FLAG_USES_MSI | NV_FLAG_USES_MSIX)) == 0) + flags |= IRQF_SHARED; + + return flags; +} + +#define MODULE_BASE_NAME "nvidia" +#define MODULE_INSTANCE_NUMBER 0 +#define MODULE_INSTANCE_STRING "" +#define MODULE_NAME MODULE_BASE_NAME MODULE_INSTANCE_STRING + +NvS32 nv_request_soc_irq(nv_linux_state_t *, NvU32, nv_soc_irq_type_t, NvU32, NvU32, const char*); +NV_STATUS nv_imp_icc_get(nv_state_t *nv); +void nv_imp_icc_put(nv_state_t *nv); + +static inline void nv_mutex_destroy(struct mutex *lock) +{ + mutex_destroy(lock); +} + +static inline NvBool nv_platform_supports_numa(nv_linux_state_t *nvl) +{ + return nvl->numa_info.node_id != NUMA_NO_NODE; +} + +static inline int nv_get_numa_status(nv_linux_state_t *nvl) +{ + if (!nv_platform_supports_numa(nvl)) + { + return NV_IOCTL_NUMA_STATUS_DISABLED; + } + + return NV_ATOMIC_READ(nvl->numa_info.status); +} + +static inline int nv_set_numa_status(nv_linux_state_t *nvl, int status) +{ + if (!nv_platform_supports_numa(nvl)) + { + return -EINVAL; + } + + NV_ATOMIC_SET(nvl->numa_info.status, status); + return 0; +} + +static inline NvBool nv_platform_use_auto_online(nv_linux_state_t *nvl) +{ + return nvl->numa_info.use_auto_online; +} + +typedef enum +{ + NV_NUMA_STATUS_DISABLED = 0, + NV_NUMA_STATUS_OFFLINE = 1, + NV_NUMA_STATUS_ONLINE_IN_PROGRESS = 2, + NV_NUMA_STATUS_ONLINE = 3, + NV_NUMA_STATUS_ONLINE_FAILED = 4, + NV_NUMA_STATUS_OFFLINE_IN_PROGRESS = 5, + NV_NUMA_STATUS_OFFLINE_FAILED = 6, + NV_NUMA_STATUS_COUNT +} nv_numa_status_t; + +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(NV_LINUX_INTERCONNECT_H_PRESENT) +#include +#endif + +#include +#include +#include + +#define NV_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL_GPL(symbol) +#define NV_CHECK_EXPORT_SYMBOL(symbol) NV_IS_EXPORT_SYMBOL_PRESENT_##symbol + +#endif /* _NV_LINUX_H_ */ diff --git a/kernel-open/common/inc/nv-list-helpers.h b/kernel-open/common/inc/nv-list-helpers.h new file mode 100644 index 0000000..15d51d2 --- /dev/null +++ b/kernel-open/common/inc/nv-list-helpers.h @@ -0,0 +1,76 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __NV_LIST_HELPERS_H__ +#define __NV_LIST_HELPERS_H__ + +#include +#include "conftest.h" + +/* + * list_first_entry_or_null added by commit 6d7581e62f8b ("list: introduce + * list_first_entry_or_null") in v3.10 (2013-05-29). + */ +#if !defined(list_first_entry_or_null) + #define list_first_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) +#endif + +/* + * Added by commit 93be3c2eb337 ("list: introduce list_last_entry(), use + * list_{first,last}_entry()") in v3.13 (2013-11-12). + */ +#if !defined(list_last_entry) + #define list_last_entry(ptr, type, member) \ + list_entry((ptr)->prev, type, member) +#endif + +/* list_last_entry_or_null() doesn't actually exist in the kernel */ +#if !defined(list_last_entry_or_null) + #define list_last_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_last_entry(ptr, type, member) : NULL) +#endif + +/* + * list_prev_entry() and list_next_entry added by commit 008208c6b26f + * ("list: introduce list_next_entry() and list_prev_entry()") in + * v3.13 (2013-11-12). + */ +#if !defined(list_prev_entry) + #define list_prev_entry(pos, member) \ + list_entry((pos)->member.prev, typeof(*(pos)), member) +#endif + +#if !defined(list_next_entry) + #define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) +#endif + +#if !defined(NV_LIST_IS_FIRST_PRESENT) + static inline int list_is_first(const struct list_head *list, + const struct list_head *head) + { + return list->prev == head; + } +#endif + +#endif // __NV_LIST_HELPERS_H__ diff --git a/kernel-open/common/inc/nv-lock.h b/kernel-open/common/inc/nv-lock.h new file mode 100644 index 0000000..6fa0b10 --- /dev/null +++ b/kernel-open/common/inc/nv-lock.h @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_LOCK_H_ +#define _NV_LOCK_H_ + +#include "conftest.h" + +#include +#include +#include /* cond_resched */ +#include +#include /* signal_pending */ + +typedef spinlock_t nv_spinlock_t; +#define NV_DEFINE_SPINLOCK(lock) DEFINE_SPINLOCK(lock) +#define NV_SPIN_LOCK_INIT(lock) spin_lock_init(lock) +#define NV_SPIN_LOCK_IRQ(lock) spin_lock_irq(lock) +#define NV_SPIN_UNLOCK_IRQ(lock) spin_unlock_irq(lock) +#define NV_SPIN_LOCK_IRQSAVE(lock,flags) spin_lock_irqsave(lock,flags) +#define NV_SPIN_UNLOCK_IRQRESTORE(lock,flags) spin_unlock_irqrestore(lock,flags) +#define NV_SPIN_LOCK(lock) spin_lock(lock) +#define NV_SPIN_UNLOCK(lock) spin_unlock(lock) +#define NV_SPIN_UNLOCK_WAIT(lock) spin_unlock_wait(lock) + +#define NV_INIT_MUTEX(mutex) sema_init(mutex, 1) + +static inline int nv_down_read_interruptible(struct rw_semaphore *lock) +{ + while (!down_read_trylock(lock)) + { + if (signal_pending(current)) + return -EINTR; + cond_resched(); + } + return 0; +} + + +#endif /* _NV_LOCK_H_ */ diff --git a/kernel-open/common/inc/nv-memdbg.h b/kernel-open/common/inc/nv-memdbg.h new file mode 100644 index 0000000..a749571 --- /dev/null +++ b/kernel-open/common/inc/nv-memdbg.h @@ -0,0 +1,49 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVMEMDBG_H_ +#define _NVMEMDBG_H_ + +#include + +void nv_memdbg_init(void); +void nv_memdbg_add(void *addr, NvU64 size, const char *file, int line); +void nv_memdbg_remove(void *addr, NvU64 size, const char *file, int line); +void nv_memdbg_exit(void); + +#if defined(NV_MEM_LOGGER) + +#define NV_MEMDBG_ADD(ptr, size) \ + nv_memdbg_add(ptr, size, __FILE__, __LINE__) + +#define NV_MEMDBG_REMOVE(ptr, size) \ + nv_memdbg_remove(ptr, size, __FILE__, __LINE__) + +#else + +#define NV_MEMDBG_ADD(ptr, size) +#define NV_MEMDBG_REMOVE(ptr, size) + +#endif /* NV_MEM_LOGGER */ + +#endif /* _NVMEMDBG_H_ */ diff --git a/kernel-open/common/inc/nv-mm.h b/kernel-open/common/inc/nv-mm.h new file mode 100644 index 0000000..8ca5bb5 --- /dev/null +++ b/kernel-open/common/inc/nv-mm.h @@ -0,0 +1,210 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __NV_MM_H__ +#define __NV_MM_H__ + +#include "conftest.h" + +#if !defined(NV_VM_FAULT_T_IS_PRESENT) +typedef int vm_fault_t; +#endif + +/* + * pin_user_pages() + * + * Presence of pin_user_pages() also implies the presence of unpin-user_page(). + * Both were added in the v5.6. + * + * pin_user_pages() was added by commit eddb1c228f79 + * ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") in v5.6. + * + * Removed vmas parameter from pin_user_pages() by commit 4c630f307455 + * ("mm/gup: remove vmas parameter from pin_user_pages()") in v6.5. + */ + +#include +#include + +/* + * FreeBSD's pin_user_pages's conftest breaks since pin_user_pages is an inline + * function. Because it simply maps to get_user_pages, we can just replace + * NV_PIN_USER_PAGES with NV_GET_USER_PAGES on FreeBSD + */ +#if defined(NV_PIN_USER_PAGES_PRESENT) && !defined(NV_BSD) + #if defined(NV_PIN_USER_PAGES_HAS_ARGS_VMAS) + #define NV_PIN_USER_PAGES(start, nr_pages, gup_flags, pages) \ + pin_user_pages(start, nr_pages, gup_flags, pages, NULL) + #else + #define NV_PIN_USER_PAGES pin_user_pages + #endif // NV_PIN_USER_PAGES_HAS_ARGS_VMAS + #define NV_UNPIN_USER_PAGE unpin_user_page +#else + #define NV_PIN_USER_PAGES NV_GET_USER_PAGES + #define NV_UNPIN_USER_PAGE put_page +#endif // NV_PIN_USER_PAGES_PRESENT + +/* + * get_user_pages() + * + * Removed vmas parameter from get_user_pages() by commit 54d020692b34 + * ("mm/gup: remove unused vmas parameter from get_user_pages()") in v6.5. + * + */ + +#if !defined(NV_GET_USER_PAGES_HAS_VMAS_ARG) + #define NV_GET_USER_PAGES get_user_pages +#else + #define NV_GET_USER_PAGES(start, nr_pages, flags, pages) \ + get_user_pages(start, nr_pages, flags, pages, NULL) +#endif + +/* + * pin_user_pages_remote() + * + * pin_user_pages_remote() was added by commit eddb1c228f79 + * ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") in v5.6. + * + * pin_user_pages_remote() removed 'tsk' parameter by commit + * 64019a2e467a ("mm/gup: remove task_struct pointer for all gup code") + * in v5.9. + * + * Removed unused vmas parameter from pin_user_pages_remote() by commit + * 0b295316b3a9 ("mm/gup: remove unused vmas parameter from + * pin_user_pages_remote()") in v6.5. + * + */ + +#if defined(NV_PIN_USER_PAGES_REMOTE_PRESENT) + #if defined(NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS) + #define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \ + pin_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, NULL, locked) + #elif defined(NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_VMAS) + #define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \ + pin_user_pages_remote(mm, start, nr_pages, flags, pages, NULL, locked) + #else + #define NV_PIN_USER_PAGES_REMOTE pin_user_pages_remote + #endif // NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS +#else + #define NV_PIN_USER_PAGES_REMOTE NV_GET_USER_PAGES_REMOTE +#endif // NV_PIN_USER_PAGES_REMOTE_PRESENT + +/* + * Note that get_user_pages_remote() requires the caller to hold a reference on + * the mm_struct. + * This will always be true when using current and current->mm. If the kernel passes + * the driver a vma via driver callback, the kernel holds a reference on vma->vm_mm + * over that callback. + * + * get_user_pages_remote() removed 'tsk' parameter by + * commit 64019a2e467a ("mm/gup: remove task_struct pointer for + * all gup code") in v5.9. + * + * Removed vmas parameter from get_user_pages_remote() by commit ca5e863233e8 + * ("mm/gup: remove vmas parameter from get_user_pages_remote()") in v6.5. + * + */ + +#if defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS) + #define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \ + get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, NULL, locked) +#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK) + #define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \ + get_user_pages_remote(mm, start, nr_pages, flags, pages, NULL, locked) + +#else + #define NV_GET_USER_PAGES_REMOTE get_user_pages_remote +#endif + +static inline void nv_mmap_read_lock(struct mm_struct *mm) +{ +#if defined(NV_MM_HAS_MMAP_LOCK) + mmap_read_lock(mm); +#else + down_read(&mm->mmap_sem); +#endif +} + +static inline void nv_mmap_read_unlock(struct mm_struct *mm) +{ +#if defined(NV_MM_HAS_MMAP_LOCK) + mmap_read_unlock(mm); +#else + up_read(&mm->mmap_sem); +#endif +} + +static inline void nv_mmap_write_lock(struct mm_struct *mm) +{ +#if defined(NV_MM_HAS_MMAP_LOCK) + mmap_write_lock(mm); +#else + down_write(&mm->mmap_sem); +#endif +} + +static inline void nv_mmap_write_unlock(struct mm_struct *mm) +{ +#if defined(NV_MM_HAS_MMAP_LOCK) + mmap_write_unlock(mm); +#else + up_write(&mm->mmap_sem); +#endif +} + +static inline int nv_mm_rwsem_is_locked(struct mm_struct *mm) +{ +#if defined(NV_MM_HAS_MMAP_LOCK) + return rwsem_is_locked(&mm->mmap_lock); +#else + return rwsem_is_locked(&mm->mmap_sem); +#endif +} + +static inline struct rw_semaphore *nv_mmap_get_lock(struct mm_struct *mm) +{ +#if defined(NV_MM_HAS_MMAP_LOCK) + return &mm->mmap_lock; +#else + return &mm->mmap_sem; +#endif +} + +static inline void nv_vm_flags_set(struct vm_area_struct *vma, vm_flags_t flags) +{ +#if defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS) + vm_flags_set(vma, flags); +#else + vma->vm_flags |= flags; +#endif +} + +static inline void nv_vm_flags_clear(struct vm_area_struct *vma, vm_flags_t flags) +{ +#if defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS) + vm_flags_clear(vma, flags); +#else + vma->vm_flags &= ~flags; +#endif +} + +#endif // __NV_MM_H__ diff --git a/kernel-open/common/inc/nv-modeset-interface.h b/kernel-open/common/inc/nv-modeset-interface.h new file mode 100644 index 0000000..e2e303f --- /dev/null +++ b/kernel-open/common/inc/nv-modeset-interface.h @@ -0,0 +1,122 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_MODESET_INTERFACE_H_ +#define _NV_MODESET_INTERFACE_H_ + +/* + * This file defines the interface between the nvidia and + * nvidia-modeset UNIX kernel modules. + * + * The nvidia-modeset kernel module calls the nvidia kernel module's + * nvidia_get_rm_ops() function to get the RM API function pointers + * which it will need. + */ + +#include "nvstatus.h" + +#include "nv-gpu-info.h" + +/* + * nvidia_stack_s is defined in nv.h, which pulls in a lot of other + * dependencies. The nvidia-modeset kernel module doesn't need to + * dereference the nvidia_stack_s pointer, so just treat is as an + * opaque pointer for purposes of this API definition. + */ +typedef struct nvidia_stack_s *nvidia_modeset_stack_ptr; + +/* + * Callback functions from the RM OS interface layer into the NVKMS OS interface + * layer. + * + * These functions should be called without the RM lock held, using the kernel's + * native calling convention. + */ +typedef struct { + /* + * Suspend & resume callbacks. Note that these are called once per GPU. + */ + void (*suspend)(NvU32 gpu_id); + void (*resume)(NvU32 gpu_id); +} nvidia_modeset_callbacks_t; + +/* + * The RM API entry points which the nvidia-modeset kernel module should + * call in the nvidia kernel module. + */ + +typedef struct { + /* + * The nvidia-modeset kernel module should assign version_string + * before passing the structure to the nvidia kernel module, so + * that a version match can be confirmed: it is not supported to + * mix nvidia and nvidia-modeset kernel modules from different + * releases. + */ + const char *version_string; + + /* + * Return system information. + */ + struct { + /* Availability of write combining support for video memory */ + NvBool allow_write_combining; + } system_info; + + /* + * Allocate and free an nvidia_stack_t to pass into + * nvidia_modeset_rm_ops_t::op(). An nvidia_stack_t must only be + * used by one thread at a time. + * + * Note that on architectures where an alternate stack is not + * used, alloc_stack() will set sp=NULL even when it returns 0 + * (success). I.e., check the return value, not the sp value. + */ + int (*alloc_stack)(nvidia_modeset_stack_ptr *sp); + void (*free_stack)(nvidia_modeset_stack_ptr sp); + + /* + * Enumerate list of gpus probed by nvidia driver. + * + * gpu_info is an array of NVIDIA_MAX_GPUS elements. The number of GPUs + * in the system is returned. + */ + NvU32 (*enumerate_gpus)(nv_gpu_info_t *gpu_info); + + /* + * {open,close}_gpu() raise and lower the reference count of the + * specified GPU. This is equivalent to opening and closing a + * /dev/nvidiaN device file from user-space. + */ + int (*open_gpu)(NvU32 gpu_id, nvidia_modeset_stack_ptr sp); + void (*close_gpu)(NvU32 gpu_id, nvidia_modeset_stack_ptr sp); + + void (*op)(nvidia_modeset_stack_ptr sp, void *ops_cmd); + + int (*set_callbacks)(const nvidia_modeset_callbacks_t *cb); + +} nvidia_modeset_rm_ops_t; + +NV_STATUS nvidia_get_rm_ops(nvidia_modeset_rm_ops_t *rm_ops); + +#endif /* _NV_MODESET_INTERFACE_H_ */ diff --git a/kernel-open/common/inc/nv-msi.h b/kernel-open/common/inc/nv-msi.h new file mode 100644 index 0000000..2f10c4e --- /dev/null +++ b/kernel-open/common/inc/nv-msi.h @@ -0,0 +1,101 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_MSI_H_ +#define _NV_MSI_H_ + +#include "nv-linux.h" + +#if (defined(CONFIG_X86_LOCAL_APIC) || defined(NVCPU_AARCH64)) && \ + (defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)) +#define NV_LINUX_PCIE_MSI_SUPPORTED +#endif + +#if !defined(NV_LINUX_PCIE_MSI_SUPPORTED) || !defined(CONFIG_PCI_MSI) +#define NV_PCI_DISABLE_MSI(pci_dev) +#else +#define NV_PCI_DISABLE_MSI(pci_dev) pci_disable_msi(pci_dev) +#endif + +irqreturn_t nvidia_isr (int, void *); +irqreturn_t nvidia_isr_msix (int, void *); +irqreturn_t nvidia_isr_kthread_bh (int, void *); +irqreturn_t nvidia_isr_msix_kthread_bh(int, void *); + +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) +void NV_API_CALL nv_init_msi (nv_state_t *); +void NV_API_CALL nv_init_msix (nv_state_t *); +NvS32 NV_API_CALL nv_request_msix_irq (nv_linux_state_t *); + +#define NV_PCI_MSIX_FLAGS 2 +#define NV_PCI_MSIX_FLAGS_QSIZE 0x7FF + +static inline void nv_free_msix_irq(nv_linux_state_t *nvl) +{ + int i; + + for (i = 0; i < nvl->num_intr; i++) + { + free_irq(nvl->msix_entries[i].vector, (void *)nvl); + } +} + +static inline int nv_get_max_irq(struct pci_dev *pci_dev) +{ + int nvec; + int cap_ptr; + NvU16 ctrl; + + cap_ptr = pci_find_capability(pci_dev, PCI_CAP_ID_MSIX); + /* + * The 'PCI_MSIX_FLAGS' was added in 2.6.21-rc3 by: + * 2007-03-05 f5f2b13129a6541debf8851bae843cbbf48298b7 + */ +#if defined(PCI_MSIX_FLAGS) + pci_read_config_word(pci_dev, cap_ptr + PCI_MSIX_FLAGS, &ctrl); + nvec = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; +#else + pci_read_config_word(pci_dev, cap_ptr + NV_PCI_MSIX_FLAGS, &ctrl); + nvec = (ctrl & NV_PCI_MSIX_FLAGS_QSIZE) + 1; +#endif + + return nvec; +} + +static inline int nv_pci_enable_msix(nv_linux_state_t *nvl, int nvec) +{ + int rc = 0; + + // We require all the vectors we are requesting so use the same min and max + rc = pci_enable_msix_range(nvl->pci_dev, nvl->msix_entries, nvec, nvec); + if (rc < 0) + { + return NV_ERR_OPERATING_SYSTEM; + } + WARN_ON(nvec != rc); + + nvl->num_intr = nvec; + return NV_OK; +} +#endif +#endif /* _NV_MSI_H_ */ diff --git a/kernel-open/common/inc/nv-pci-types.h b/kernel-open/common/inc/nv-pci-types.h new file mode 100644 index 0000000..9706d0e --- /dev/null +++ b/kernel-open/common/inc/nv-pci-types.h @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_PCI_TYPES_H_ +#define _NV_PCI_TYPES_H_ + +#include +#include "conftest.h" + +#if defined(NV_PCI_CHANNEL_STATE_PRESENT) +typedef enum pci_channel_state nv_pci_channel_state_t; +#else +typedef pci_channel_state_t nv_pci_channel_state_t; +#endif + +#endif diff --git a/kernel-open/common/inc/nv-pci.h b/kernel-open/common/inc/nv-pci.h new file mode 100644 index 0000000..cca4930 --- /dev/null +++ b/kernel-open/common/inc/nv-pci.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_PCI_H_ +#define _NV_PCI_H_ + +#include +#include "nv-linux.h" + +#define NV_GPU_BAR1 1 +#define NV_GPU_BAR3 3 + +int nv_pci_register_driver(void); +void nv_pci_unregister_driver(void); +int nv_pci_count_devices(void); +NvU8 nv_find_pci_capability(struct pci_dev *, NvU8); +int nvidia_dev_get_pci_info(const NvU8 *, struct pci_dev **, NvU64 *, NvU64 *); +nv_linux_state_t * find_pci(NvU32, NvU8, NvU8, NvU8); +NvBool nv_pci_is_valid_topology_for_direct_pci(nv_state_t *, struct pci_dev *); +NvBool nv_pci_has_common_pci_switch(nv_state_t *nv, struct pci_dev *); +#endif diff --git a/kernel-open/common/inc/nv-pgprot.h b/kernel-open/common/inc/nv-pgprot.h new file mode 100644 index 0000000..7e38846 --- /dev/null +++ b/kernel-open/common/inc/nv-pgprot.h @@ -0,0 +1,105 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_PGPROT_H__ + +#define __NV_PGPROT_H__ + +#include "cpuopsys.h" + +#include + +#if !defined(NV_VMWARE) +#if defined(NVCPU_X86_64) +/* mark memory UC-, rather than UC (don't use _PAGE_PWT) */ +static inline pgprot_t pgprot_noncached_weak(pgprot_t old_prot) + { + pgprot_t new_prot = old_prot; + if (boot_cpu_data.x86 > 3) + new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD); + return new_prot; + } + +#if !defined (pgprot_noncached) +static inline pgprot_t pgprot_noncached(pgprot_t old_prot) + { + pgprot_t new_prot = old_prot; + if (boot_cpu_data.x86 > 3) + new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD | _PAGE_PWT); + return new_prot; + } +#endif +static inline pgprot_t pgprot_modify_writecombine(pgprot_t old_prot) + { + pgprot_t new_prot = old_prot; + pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_PCD | _PAGE_PWT); + new_prot = __pgprot(pgprot_val(new_prot) | _PAGE_PWT); + return new_prot; + } +#endif /* defined(NVCPU_X86_64) */ +#endif /* !defined(NV_VMWARE) */ + +#if defined(NVCPU_AARCH64) +extern NvBool nvos_is_chipset_io_coherent(void); +/* + * Don't rely on the kernel's definition of pgprot_noncached(), as on 64-bit + * ARM that's not for system memory, but device memory instead. + */ +#define NV_PGPROT_UNCACHED(old_prot) \ + __pgprot_modify((old_prot), PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC)) +#else +#define NV_PGPROT_UNCACHED(old_prot) pgprot_noncached(old_prot) +#endif + +#define NV_PGPROT_UNCACHED_DEVICE(old_prot) pgprot_noncached(old_prot) +#if defined(NVCPU_AARCH64) +#define NV_PROT_WRITE_COMBINED_DEVICE (PROT_DEFAULT | PTE_PXN | PTE_UXN | \ + PTE_ATTRINDX(MT_DEVICE_nGnRE)) +#define NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) \ + __pgprot_modify(old_prot, PTE_ATTRINDX_MASK, NV_PROT_WRITE_COMBINED_DEVICE) +#define NV_PGPROT_WRITE_COMBINED(old_prot) NV_PGPROT_UNCACHED(old_prot) +#define NV_PGPROT_READ_ONLY(old_prot) \ + __pgprot_modify(old_prot, 0, PTE_RDONLY) +#elif defined(NVCPU_X86_64) +#define NV_PGPROT_UNCACHED_WEAK(old_prot) pgprot_noncached_weak(old_prot) +#define NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) \ + pgprot_modify_writecombine(old_prot) +#define NV_PGPROT_WRITE_COMBINED(old_prot) \ + NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) +#define NV_PGPROT_READ_ONLY(old_prot) \ + __pgprot(pgprot_val((old_prot)) & ~_PAGE_RW) +#elif defined(NVCPU_RISCV64) +#define NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) \ + pgprot_writecombine(old_prot) +/* Don't attempt to mark sysmem pages as write combined on riscv */ +#define NV_PGPROT_WRITE_COMBINED(old_prot) old_prot +#define NV_PGPROT_READ_ONLY(old_prot) \ + __pgprot(pgprot_val((old_prot)) & ~_PAGE_WRITE) +#else +/* Writecombine is not supported */ +#undef NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) +#undef NV_PGPROT_WRITE_COMBINED(old_prot) +#define NV_PGPROT_READ_ONLY(old_prot) +#endif + +#endif /* __NV_PGPROT_H__ */ diff --git a/kernel-open/common/inc/nv-platform.h b/kernel-open/common/inc/nv-platform.h new file mode 100644 index 0000000..c6da33e --- /dev/null +++ b/kernel-open/common/inc/nv-platform.h @@ -0,0 +1,48 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NV_PLATFORM_H +#define NV_PLATFORM_H + +#include "nv-linux.h" + +irqreturn_t nvidia_isr (int, void *); +irqreturn_t nvidia_isr_kthread_bh (int, void *); + +int nv_platform_register_driver(void); +void nv_platform_unregister_driver(void); +int nv_platform_count_devices(void); +int nv_soc_register_irqs(nv_state_t *nv); +void nv_soc_free_irqs(nv_state_t *nv); + +#define NV_SUPPORTS_PLATFORM_DEVICE NV_IS_EXPORT_SYMBOL_PRESENT___platform_driver_register + +#if defined(NV_LINUX_PLATFORM_TEGRA_DCE_DCE_CLIENT_IPC_H_PRESENT) +#define NV_SUPPORTS_DCE_CLIENT_IPC 1 +#else +#define NV_SUPPORTS_DCE_CLIENT_IPC 0 +#endif + +#define NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE (NV_SUPPORTS_PLATFORM_DEVICE && NV_SUPPORTS_DCE_CLIENT_IPC) + +#endif diff --git a/kernel-open/common/inc/nv-procfs-utils.h b/kernel-open/common/inc/nv-procfs-utils.h new file mode 100644 index 0000000..6ae773e --- /dev/null +++ b/kernel-open/common/inc/nv-procfs-utils.h @@ -0,0 +1,177 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_PROCFS_UTILS_H +#define _NV_PROCFS_UTILS_H + +#include "conftest.h" + +#ifdef CONFIG_PROC_FS +#include +#include + +/* + * Allow procfs to create file to exercise error forwarding. + * This is supported by CRAY platforms. + */ +#if defined(CONFIG_CRAY_XT) +#define EXERCISE_ERROR_FORWARDING NV_TRUE +#else +#define EXERCISE_ERROR_FORWARDING NV_FALSE +#endif + +#define IS_EXERCISE_ERROR_FORWARDING_ENABLED() (EXERCISE_ERROR_FORWARDING) + +#if defined(NV_PROC_OPS_PRESENT) +typedef struct proc_ops nv_proc_ops_t; + +#define NV_PROC_OPS_SET_OWNER() + +#define NV_PROC_OPS_OPEN proc_open +#define NV_PROC_OPS_READ proc_read +#define NV_PROC_OPS_WRITE proc_write +#define NV_PROC_OPS_LSEEK proc_lseek +#define NV_PROC_OPS_RELEASE proc_release +#else +typedef struct file_operations nv_proc_ops_t; + +#define NV_PROC_OPS_SET_OWNER() .owner = THIS_MODULE, + +#define NV_PROC_OPS_OPEN open +#define NV_PROC_OPS_READ read +#define NV_PROC_OPS_WRITE write +#define NV_PROC_OPS_LSEEK llseek +#define NV_PROC_OPS_RELEASE release +#endif + +#define NV_CREATE_PROC_FILE(filename,parent,__name,__data) \ + ({ \ + struct proc_dir_entry *__entry; \ + int mode = (S_IFREG | S_IRUGO); \ + const nv_proc_ops_t *fops = &nv_procfs_##__name##_fops; \ + if (fops->NV_PROC_OPS_WRITE != 0) \ + mode |= S_IWUSR; \ + __entry = proc_create_data(filename, mode, parent, fops, __data);\ + __entry; \ + }) + +# define NV_PROC_MKDIR_MODE(name, mode, parent) \ + proc_mkdir_mode(name, mode, parent) + +#define NV_CREATE_PROC_DIR(name,parent) \ + ({ \ + struct proc_dir_entry *__entry; \ + int mode = (S_IFDIR | S_IRUGO | S_IXUGO); \ + __entry = NV_PROC_MKDIR_MODE(name, mode, parent); \ + __entry; \ + }) + +#if defined(NV_PDE_DATA_LOWER_CASE_PRESENT) +#define NV_PDE_DATA(inode) pde_data(inode) +#else +#define NV_PDE_DATA(inode) PDE_DATA(inode) +#endif + +#define NV_DEFINE_SINGLE_PROCFS_FILE_HELPER(name, lock) \ + static ssize_t nv_procfs_read_lock_##name( \ + struct file *file, \ + char __user *buf, \ + size_t size, \ + loff_t *ppos \ + ) \ + { \ + int ret; \ + ret = nv_down_read_interruptible(&lock); \ + if (ret < 0) \ + { \ + return ret; \ + } \ + size = seq_read(file, buf, size, ppos); \ + up_read(&lock); \ + return size; \ + } \ + \ + static int nv_procfs_open_##name( \ + struct inode *inode, \ + struct file *filep \ + ) \ + { \ + int ret; \ + ret = single_open(filep, nv_procfs_read_##name, \ + NV_PDE_DATA(inode)); \ + if (ret < 0) \ + { \ + return ret; \ + } \ + return ret; \ + } \ + \ + static int nv_procfs_release_##name( \ + struct inode *inode, \ + struct file *filep \ + ) \ + { \ + return single_release(inode, filep); \ + } + +#define NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(name, lock) \ + NV_DEFINE_SINGLE_PROCFS_FILE_HELPER(name, lock) \ + \ + static const nv_proc_ops_t nv_procfs_##name##_fops = { \ + NV_PROC_OPS_SET_OWNER() \ + .NV_PROC_OPS_OPEN = nv_procfs_open_##name, \ + .NV_PROC_OPS_READ = nv_procfs_read_lock_##name, \ + .NV_PROC_OPS_LSEEK = seq_lseek, \ + .NV_PROC_OPS_RELEASE = nv_procfs_release_##name, \ + }; + +#define NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY_WITHOUT_LOCK(name) \ + static int nv_procfs_open_##name( \ + struct inode *inode, \ + struct file *filep \ + ) \ + { \ + int ret; \ + ret = single_open(filep, nv_procfs_read_##name, \ + NV_PDE_DATA(inode)); \ + return ret; \ + } \ + \ + static int nv_procfs_release_##name( \ + struct inode *inode, \ + struct file *filep \ + ) \ + { \ + return single_release(inode, filep); \ + } \ + \ + static const nv_proc_ops_t nv_procfs_##name##_fops = { \ + NV_PROC_OPS_SET_OWNER() \ + .NV_PROC_OPS_OPEN = nv_procfs_open_##name, \ + .NV_PROC_OPS_READ = seq_read, \ + .NV_PROC_OPS_LSEEK = seq_lseek, \ + .NV_PROC_OPS_RELEASE = nv_procfs_release_##name, \ + }; + +#endif /* CONFIG_PROC_FS */ + +#endif /* _NV_PROCFS_UTILS_H */ diff --git a/kernel-open/common/inc/nv-procfs.h b/kernel-open/common/inc/nv-procfs.h new file mode 100644 index 0000000..11f9585 --- /dev/null +++ b/kernel-open/common/inc/nv-procfs.h @@ -0,0 +1,28 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_PROCFS_H +#define _NV_PROCFS_H + +#include "nv-procfs-utils.h" + +#endif /* _NV_PROCFS_H */ diff --git a/kernel-open/common/inc/nv-proto.h b/kernel-open/common/inc/nv-proto.h new file mode 100644 index 0000000..eb154f7 --- /dev/null +++ b/kernel-open/common/inc/nv-proto.h @@ -0,0 +1,95 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_PROTO_H_ +#define _NV_PROTO_H_ + +#include "nv-pci.h" +#include "nv-platform.h" + +extern const char *nv_device_name; + +void nv_acpi_register_notifier (nv_linux_state_t *); +void nv_acpi_unregister_notifier (nv_linux_state_t *); + +NvU8 nv_find_pci_capability (struct pci_dev *, NvU8); + +int nv_procfs_init (void); +void nv_procfs_exit (void); +void nv_procfs_add_warning (const char *, const char *); +int nv_procfs_add_gpu (nv_linux_state_t *); +void nv_procfs_remove_gpu (nv_linux_state_t *); + +int nvidia_mmap (struct file *, struct vm_area_struct *); +int nvidia_mmap_helper (nv_state_t *, nv_linux_file_private_t *, nvidia_stack_t *, struct vm_area_struct *, void *); +int nv_encode_caching (pgprot_t *, NvU32, nv_memory_type_t); +void nv_revoke_gpu_mappings_locked(nv_state_t *); + +NvUPtr nv_vm_map_pages (struct page **, NvU32, NvBool, NvBool); +void nv_vm_unmap_pages (NvUPtr, NvU32); + +NV_STATUS nv_alloc_contig_pages (nv_state_t *, nv_alloc_t *); +void nv_free_contig_pages (nv_alloc_t *); +NV_STATUS nv_alloc_system_pages (nv_state_t *, nv_alloc_t *); +void nv_free_system_pages (nv_alloc_t *); + +int nv_uvm_init (void); +void nv_uvm_exit (void); +NV_STATUS nv_uvm_suspend (void); +NV_STATUS nv_uvm_resume (void); +NV_STATUS nv_uvm_event_interrupt (const NvU8 *uuid); +NV_STATUS nv_uvm_drain_P2P (const NvU8 *uuid); +NV_STATUS nv_uvm_resume_P2P (const NvU8 *uuid); + +/* Move these to nv.h once implemented by other UNIX platforms */ +NvBool nvidia_get_gpuid_list (NvU32 *gpu_ids, NvU32 *gpu_count); +int nvidia_dev_get (NvU32, nvidia_stack_t *); +void nvidia_dev_put (NvU32, nvidia_stack_t *); +int nvidia_dev_get_uuid (const NvU8 *, nvidia_stack_t *); +void nvidia_dev_put_uuid (const NvU8 *, nvidia_stack_t *); +int nvidia_dev_block_gc6 (const NvU8 *, nvidia_stack_t *); +int nvidia_dev_unblock_gc6 (const NvU8 *, nvidia_stack_t *); + +#if defined(CONFIG_PM) +NV_STATUS nv_set_system_power_state (nv_power_state_t, nv_pm_action_depth_t); +#endif + +void nvidia_modeset_suspend (NvU32 gpuId); +void nvidia_modeset_resume (NvU32 gpuId); +NvBool nv_is_uuid_in_gpu_exclusion_list (const char *); + +NV_STATUS nv_parse_per_device_option_string(nvidia_stack_t *sp); +nv_linux_state_t * find_uuid(const NvU8 *uuid); +void nv_report_error(struct pci_dev *dev, NvU32 error_number, const char *format, va_list ap); +void nv_shutdown_adapter(nvidia_stack_t *, nv_state_t *, nv_linux_state_t *); +void nv_dev_free_stacks(nv_linux_state_t *); +NvBool nv_lock_init_locks(nvidia_stack_t *, nv_state_t *); +void nv_lock_destroy_locks(nvidia_stack_t *, nv_state_t *); +int nv_linux_add_device_locked(nv_linux_state_t *); +void nv_linux_remove_device_locked(nv_linux_state_t *); +NvBool nv_acpi_power_resource_method_present(struct pci_dev *); + +int nv_linux_init_open_q(nv_linux_state_t *); +void nv_linux_stop_open_q(nv_linux_state_t *); + +#endif /* _NV_PROTO_H_ */ diff --git a/kernel-open/common/inc/nv-retpoline.h b/kernel-open/common/inc/nv-retpoline.h new file mode 100644 index 0000000..2495503 --- /dev/null +++ b/kernel-open/common/inc/nv-retpoline.h @@ -0,0 +1,82 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_RETPOLINE_H_ +#define _NV_RETPOLINE_H_ + +#include "cpuopsys.h" + +#if (NV_SPECTRE_V2 == 0) +#define NV_RETPOLINE_THUNK NV_SPEC_THUNK +#else +#define NV_RETPOLINE_THUNK NV_NOSPEC_THUNK +#endif + +#if defined(NVCPU_X86_64) +#define NV_SPEC_THUNK(REG) \ + __asm__( \ + ".weak __x86_indirect_thunk_" #REG ";" \ + ".type __x86_indirect_thunk_" #REG ", @function;" \ + "__x86_indirect_thunk_" #REG ":" \ + " .cfi_startproc;" \ + " jmp *%" #REG ";" \ + " .cfi_endproc;" \ + ".size __x86_indirect_thunk_" #REG ", .-__x86_indirect_thunk_" #REG) + +#define NV_NOSPEC_THUNK(REG) \ + __asm__( \ + ".weak __x86_indirect_thunk_" #REG ";" \ + ".type __x86_indirect_thunk_" #REG ", @function;" \ + "__x86_indirect_thunk_" #REG ":" \ + " .cfi_startproc;" \ + " call .Lnv_no_fence_" #REG ";" \ + ".Lnv_fence_" #REG ":" \ + " pause;" \ + " lfence;" \ + " jmp .Lnv_fence_" #REG ";" \ + ".Lnv_no_fence_" #REG ":" \ + " mov %" #REG ", (%rsp);" \ + " ret;" \ + " .cfi_endproc;" \ + ".size __x86_indirect_thunk_" #REG ", .-__x86_indirect_thunk_" #REG) + + __asm__(".pushsection .text"); + NV_RETPOLINE_THUNK(rax); + NV_RETPOLINE_THUNK(rbx); + NV_RETPOLINE_THUNK(rcx); + NV_RETPOLINE_THUNK(rdx); + NV_RETPOLINE_THUNK(rsi); + NV_RETPOLINE_THUNK(rdi); + NV_RETPOLINE_THUNK(rbp); + NV_RETPOLINE_THUNK(r8); + NV_RETPOLINE_THUNK(r9); + NV_RETPOLINE_THUNK(r10); + NV_RETPOLINE_THUNK(r11); + NV_RETPOLINE_THUNK(r12); + NV_RETPOLINE_THUNK(r13); + NV_RETPOLINE_THUNK(r14); + NV_RETPOLINE_THUNK(r15); + __asm__(".popsection"); +#endif + +#endif /* _NV_RETPOLINE_H_ */ diff --git a/kernel-open/common/inc/nv-time.h b/kernel-open/common/inc/nv-time.h new file mode 100644 index 0000000..3d14f9b --- /dev/null +++ b/kernel-open/common/inc/nv-time.h @@ -0,0 +1,191 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_TIME_H__ +#define __NV_TIME_H__ + +#include "conftest.h" +#include +#include +#include +#include + +#include + +#define NV_MAX_ISR_DELAY_US 20000 +#define NV_MAX_ISR_DELAY_MS (NV_MAX_ISR_DELAY_US / 1000) +#define NV_NSECS_TO_JIFFIES(nsec) ((nsec) * HZ / 1000000000) + +#if !defined(NV_KTIME_GET_RAW_TS64_PRESENT) +static inline void ktime_get_raw_ts64(struct timespec64 *ts64) +{ + struct timespec ts; + getrawmonotonic(&ts); + ts64->tv_sec = ts.tv_sec; + ts64->tv_nsec = ts.tv_nsec; +} +#endif + +static NvBool nv_timer_less_than +( + const struct timespec64 *a, + const struct timespec64 *b +) +{ + return (a->tv_sec == b->tv_sec) ? (a->tv_nsec < b->tv_nsec) + : (a->tv_sec < b->tv_sec); +} + +static inline NvU64 nv_ktime_get_raw_ns(void) +{ + struct timespec64 ts; + ktime_get_raw_ts64(&ts); + return (NvU64)timespec64_to_ns(&ts); +} + +// #define NV_CHECK_DELAY_ACCURACY 1 + +/* + * It is generally a bad idea to use udelay() to wait for more than + * a few milliseconds. Since the caller is most likely not aware of + * this, we use mdelay() for any full millisecond to be safe. + */ +static inline NV_STATUS nv_sleep_us(unsigned int us) +{ + + unsigned long mdelay_safe_msec; + unsigned long usec; + +#ifdef NV_CHECK_DELAY_ACCURACY + struct timespec64 tm1, tm2, tm_diff; + + ktime_get_raw_ts64(&tm1); +#endif + + if (in_irq() && (us > NV_MAX_ISR_DELAY_US)) + return NV_ERR_GENERIC; + + mdelay_safe_msec = us / 1000; + if (mdelay_safe_msec) + mdelay(mdelay_safe_msec); + + usec = us % 1000; + if (usec) + udelay(usec); + +#ifdef NV_CHECK_DELAY_ACCURACY + ktime_get_raw_ts64(&tm2); + tm_diff = timespec64_sub(tm2, tm1); + pr_info("NVRM: delay of %d usec results in actual delay of 0x%llu nsec\n", + us, timespec64_to_ns(&tm_diff)); +#endif + return NV_OK; +} + +/* + * Sleep for specified milliseconds. Yields the CPU to scheduler. + * + * On Linux, a jiffie represents the time passed in between two timer + * interrupts. The number of jiffies per second (HZ) varies across the + * supported platforms. On i386, where HZ is 100, a timer interrupt is + * generated every 10ms. NV_MSECS_TO_JIFFIES should be accurate independent of + * the actual value of HZ; any partial jiffies will be 'floor'ed, the + * remainder will be accounted for with mdelay(). + */ +static inline NV_STATUS nv_sleep_ms(unsigned int ms) +{ + NvU64 ns; + unsigned long jiffies; + unsigned long mdelay_safe_msec; + struct timespec64 tm_end, tm_aux; +#ifdef NV_CHECK_DELAY_ACCURACY + struct timespec64 tm_start; +#endif + + ktime_get_raw_ts64(&tm_aux); +#ifdef NV_CHECK_DELAY_ACCURACY + tm_start = tm_aux; +#endif + + if (in_irq() && (ms > NV_MAX_ISR_DELAY_MS)) + { + return NV_ERR_GENERIC; + } + + if (irqs_disabled() || in_interrupt() || in_atomic()) + { + mdelay(ms); + return NV_OK; + } + + ns = ms * (NvU64) NSEC_PER_MSEC; + tm_end.tv_nsec = ns; + tm_end.tv_sec = 0; + tm_end = timespec64_add(tm_aux, tm_end); + + /* do we have a full jiffie to wait? */ + jiffies = NV_NSECS_TO_JIFFIES(ns); + + if (jiffies) + { + // + // If we have at least one full jiffy to wait, give up + // up the CPU; since we may be rescheduled before + // the requested timeout has expired, loop until less + // than a jiffie of the desired delay remains. + // + set_current_state(TASK_INTERRUPTIBLE); + do + { + schedule_timeout(jiffies); + ktime_get_raw_ts64(&tm_aux); + if (nv_timer_less_than(&tm_aux, &tm_end)) + { + tm_aux = timespec64_sub(tm_end, tm_aux); + ns = (NvU64) timespec64_to_ns(&tm_aux); + } + else + ns = 0; + } while ((jiffies = NV_NSECS_TO_JIFFIES(ns)) != 0); + } + + if (ns > (NvU64) NSEC_PER_MSEC) + { + mdelay_safe_msec = ns / (NvU64) NSEC_PER_MSEC; + mdelay(mdelay_safe_msec); + ns %= (NvU64) NSEC_PER_MSEC; + } + if (ns) + { + ndelay(ns); + } +#ifdef NV_CHECK_DELAY_ACCURACY + ktime_get_raw_ts64(&tm_aux); + tm_aux = timespec64_sub(tm_aux, tm_start); + pr_info("NVRM: delay of %d msec results in actual delay of %lld.%09ld sec\n", + ms, tm_aux.tv_sec, tm_aux.tv_nsec); +#endif + return NV_OK; +} + +#endif // __NV_TIME_H__ diff --git a/kernel-open/common/inc/nv-timer.h b/kernel-open/common/inc/nv-timer.h new file mode 100644 index 0000000..27b44c4 --- /dev/null +++ b/kernel-open/common/inc/nv-timer.h @@ -0,0 +1,62 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __NV_TIMER_H__ +#define __NV_TIMER_H__ + +#include +#include // For container_of + +#include "conftest.h" + +struct nv_timer +{ + struct timer_list kernel_timer; + void (*nv_timer_callback)(struct nv_timer *nv_timer); +}; + +static inline void nv_timer_callback_typed_data(struct timer_list *timer) +{ + struct nv_timer *nv_timer = + container_of(timer, struct nv_timer, kernel_timer); + + nv_timer->nv_timer_callback(nv_timer); +} + +static inline void nv_timer_setup(struct nv_timer *nv_timer, + void (*callback)(struct nv_timer *nv_timer)) +{ + nv_timer->nv_timer_callback = callback; + + timer_setup(&nv_timer->kernel_timer, nv_timer_callback_typed_data, 0); +} + +static inline void nv_timer_delete_sync(struct timer_list *timer) +{ +#if !defined(NV_BSD) && NV_IS_EXPORT_SYMBOL_PRESENT_timer_delete_sync + timer_delete_sync(timer); +#else + del_timer_sync(timer); +#endif +} + +#endif // __NV_TIMER_H__ diff --git a/kernel-open/common/inc/nv.h b/kernel-open/common/inc/nv.h new file mode 100644 index 0000000..433bba3 --- /dev/null +++ b/kernel-open/common/inc/nv.h @@ -0,0 +1,1297 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _NV_H_ +#define _NV_H_ + + + +#include + +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(__FreeBSD__) + #include // NULL +#elif defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) + #include // NULL +#else + #include // NULL +#endif + +#include +#include "nv_stdarg.h" +#include +#include +#include +#include +#include +#include + +extern nv_cap_t *nvidia_caps_root; + +extern const NvBool nv_is_rm_firmware_supported_os; + +#include +#include + +#include + +#define GPU_UUID_LEN (16) + +/* + * Buffer size for an ASCII UUID: We need 2 digits per byte, plus space + * for "GPU", 5 dashes, and '\0' termination: + */ +#define GPU_UUID_ASCII_LEN (GPU_UUID_LEN * 2 + 9) + +/* + * #define an absolute maximum used as a sanity check for the + * NV_ESC_IOCTL_XFER_CMD ioctl() size argument. + */ +#define NV_ABSOLUTE_MAX_IOCTL_SIZE 16384 + +/* + * Solaris provides no more than 8 bits for the argument size in + * the ioctl() command encoding; make sure we don't exceed this + * limit. + */ +#define __NV_IOWR_ASSERT(type) ((sizeof(type) <= NV_PLATFORM_MAX_IOCTL_SIZE) ? 1 : -1) +#define __NV_IOWR(nr, type) ({ \ + typedef char __NV_IOWR_TYPE_SIZE_ASSERT[__NV_IOWR_ASSERT(type)]; \ + _IOWR(NV_IOCTL_MAGIC, (nr), type); \ +}) + +#define NV_PCI_DEV_FMT "%04x:%02x:%02x.%x" +#define NV_PCI_DEV_FMT_ARGS(nv) (nv)->pci_info.domain, (nv)->pci_info.bus, \ + (nv)->pci_info.slot, (nv)->pci_info.function + +#define NV_RM_DEVICE_INTR_ADDRESS 0x100 + +#define NV_TEGRA_PCI_IGPU_PG_MASK_DEFAULT 0xFFFFFFFF + +/* + * Clock domain identifier, which is used for fetching the engine + * load backed by the specified clock domain for Tegra platforms + * conforming linux devfreq framework to realize dynamic frequency + * scaling. + */ +typedef enum _TEGRASOC_DEVFREQ_CLK +{ + TEGRASOC_DEVFREQ_CLK_GPC, + TEGRASOC_DEVFREQ_CLK_NVD, +} TEGRASOC_DEVFREQ_CLK; + +/*! + * @brief The order of the display clocks in the below defined enum + * should be synced with below mapping array and macro. + * All four should be updated simultaneously in case + * of removal or addition of clocks in below order. + * Also, TEGRASOC_WHICH_CLK_MAX is used in various places + * in below mentioned files. + * arch/nvalloc/unix/Linux/nv-linux.h + * + * arch/nvalloc/unix/src/os.c + * dispClkMapRmToOsArr[] = {...}; + * + * arch/nvalloc/unix/Linux/nv-clk.c + * osMapClk[] = {...}; + * + */ +typedef enum _TEGRASOC_WHICH_CLK +{ + TEGRASOC_WHICH_CLK_NVDISPLAYHUB, + TEGRASOC_WHICH_CLK_NVDISPLAY_DISP, + TEGRASOC_WHICH_CLK_NVDISPLAY_P0, + TEGRASOC_WHICH_CLK_NVDISPLAY_P1, + TEGRASOC_WHICH_CLK_NVDISPLAY_P2, + TEGRASOC_WHICH_CLK_NVDISPLAY_P3, + TEGRASOC_WHICH_CLK_NVDISPLAY_P4, + TEGRASOC_WHICH_CLK_NVDISPLAY_P5, + TEGRASOC_WHICH_CLK_NVDISPLAY_P6, + TEGRASOC_WHICH_CLK_NVDISPLAY_P7, + TEGRASOC_WHICH_CLK_DPAUX0, + TEGRASOC_WHICH_CLK_FUSE, + TEGRASOC_WHICH_CLK_DSIPLL_VCO, + TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTPN, + TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTA, + TEGRASOC_WHICH_CLK_SPPLL0_VCO, + TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTA, + TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTB, + TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTPN, + TEGRASOC_WHICH_CLK_SPPLL1_CLKOUTPN, + TEGRASOC_WHICH_CLK_SPPLL0_DIV27, + TEGRASOC_WHICH_CLK_SPPLL1_DIV27, + TEGRASOC_WHICH_CLK_SPPLL0_DIV10, + TEGRASOC_WHICH_CLK_SPPLL0_DIV25, + TEGRASOC_WHICH_CLK_SPPLL1_VCO, + TEGRASOC_WHICH_CLK_VPLL0_REF, + TEGRASOC_WHICH_CLK_VPLL0, + TEGRASOC_WHICH_CLK_VPLL1, + TEGRASOC_WHICH_CLK_VPLL2, + TEGRASOC_WHICH_CLK_VPLL3, + TEGRASOC_WHICH_CLK_VPLL4, + TEGRASOC_WHICH_CLK_VPLL5, + TEGRASOC_WHICH_CLK_VPLL6, + TEGRASOC_WHICH_CLK_VPLL7, + TEGRASOC_WHICH_CLK_NVDISPLAY_P0_REF, + TEGRASOC_WHICH_CLK_RG0, + TEGRASOC_WHICH_CLK_RG1, + TEGRASOC_WHICH_CLK_RG2, + TEGRASOC_WHICH_CLK_RG3, + TEGRASOC_WHICH_CLK_RG4, + TEGRASOC_WHICH_CLK_RG5, + TEGRASOC_WHICH_CLK_RG6, + TEGRASOC_WHICH_CLK_RG7, + TEGRASOC_WHICH_CLK_DISPPLL, + TEGRASOC_WHICH_CLK_DISPHUBPLL, + TEGRASOC_WHICH_CLK_DSI_LP, + TEGRASOC_WHICH_CLK_DSI_CORE, + TEGRASOC_WHICH_CLK_DSI_PIXEL, + TEGRASOC_WHICH_CLK_PRE_SOR0, + TEGRASOC_WHICH_CLK_PRE_SOR1, + TEGRASOC_WHICH_CLK_PRE_SOR2, + TEGRASOC_WHICH_CLK_PRE_SOR3, + TEGRASOC_WHICH_CLK_DP_LINKA_REF, + TEGRASOC_WHICH_CLK_DP_LINKB_REF, + TEGRASOC_WHICH_CLK_DP_LINKC_REF, + TEGRASOC_WHICH_CLK_DP_LINKD_REF, + TEGRASOC_WHICH_CLK_SOR_LINKA_INPUT, + TEGRASOC_WHICH_CLK_SOR_LINKB_INPUT, + TEGRASOC_WHICH_CLK_SOR_LINKC_INPUT, + TEGRASOC_WHICH_CLK_SOR_LINKD_INPUT, + TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO, + TEGRASOC_WHICH_CLK_SOR_LINKB_AFIFO, + TEGRASOC_WHICH_CLK_SOR_LINKC_AFIFO, + TEGRASOC_WHICH_CLK_SOR_LINKD_AFIFO, + TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO_M, + TEGRASOC_WHICH_CLK_RG0_M, + TEGRASOC_WHICH_CLK_RG1_M, + TEGRASOC_WHICH_CLK_SOR0_M, + TEGRASOC_WHICH_CLK_SOR1_M, + TEGRASOC_WHICH_CLK_PLLHUB, + TEGRASOC_WHICH_CLK_SOR0, + TEGRASOC_WHICH_CLK_SOR1, + TEGRASOC_WHICH_CLK_SOR2, + TEGRASOC_WHICH_CLK_SOR3, + TEGRASOC_WHICH_CLK_SOR_PADA_INPUT, + TEGRASOC_WHICH_CLK_SOR_PADB_INPUT, + TEGRASOC_WHICH_CLK_SOR_PADC_INPUT, + TEGRASOC_WHICH_CLK_SOR_PADD_INPUT, + TEGRASOC_WHICH_CLK_SOR0_PAD, + TEGRASOC_WHICH_CLK_SOR1_PAD, + TEGRASOC_WHICH_CLK_SOR2_PAD, + TEGRASOC_WHICH_CLK_SOR3_PAD, + TEGRASOC_WHICH_CLK_PRE_SF0, + TEGRASOC_WHICH_CLK_SF0, + TEGRASOC_WHICH_CLK_SF1, + TEGRASOC_WHICH_CLK_SF2, + TEGRASOC_WHICH_CLK_SF3, + TEGRASOC_WHICH_CLK_SF4, + TEGRASOC_WHICH_CLK_SF5, + TEGRASOC_WHICH_CLK_SF6, + TEGRASOC_WHICH_CLK_SF7, + TEGRASOC_WHICH_CLK_DSI_PAD_INPUT, + TEGRASOC_WHICH_CLK_PRE_SOR0_REF, + TEGRASOC_WHICH_CLK_PRE_SOR1_REF, + TEGRASOC_WHICH_CLK_SOR0_PLL_REF, + TEGRASOC_WHICH_CLK_SOR1_PLL_REF, + TEGRASOC_WHICH_CLK_SOR2_PLL_REF, + TEGRASOC_WHICH_CLK_SOR3_PLL_REF, + TEGRASOC_WHICH_CLK_SOR0_REF, + TEGRASOC_WHICH_CLK_SOR1_REF, + TEGRASOC_WHICH_CLK_SOR2_REF, + TEGRASOC_WHICH_CLK_SOR3_REF, + TEGRASOC_WHICH_CLK_OSC, + TEGRASOC_WHICH_CLK_DSC, + TEGRASOC_WHICH_CLK_MAUD, + TEGRASOC_WHICH_CLK_AZA_2XBIT, + TEGRASOC_WHICH_CLK_AZA_BIT, + TEGRASOC_WHICH_CLK_MIPI_CAL, + TEGRASOC_WHICH_CLK_UART_FST_MIPI_CAL, + TEGRASOC_WHICH_CLK_SOR0_DIV, + TEGRASOC_WHICH_CLK_DISP_ROOT, + TEGRASOC_WHICH_CLK_HUB_ROOT, + TEGRASOC_WHICH_CLK_PLLA_DISP, + TEGRASOC_WHICH_CLK_PLLA_DISPHUB, + TEGRASOC_WHICH_CLK_PLLA, + TEGRASOC_WHICH_CLK_VPLLX_SOR0_MUXED, + TEGRASOC_WHICH_CLK_VPLLX_SOR1_MUXED, + TEGRASOC_WHICH_CLK_VPLLX_SOR2_MUXED, + TEGRASOC_WHICH_CLK_VPLLX_SOR3_MUXED, + TEGRASOC_WHICH_CLK_SF0_SOR, + TEGRASOC_WHICH_CLK_SF1_SOR, + TEGRASOC_WHICH_CLK_SF2_SOR, + TEGRASOC_WHICH_CLK_SF3_SOR, + TEGRASOC_WHICH_CLK_SF4_SOR, + TEGRASOC_WHICH_CLK_SF5_SOR, + TEGRASOC_WHICH_CLK_SF6_SOR, + TEGRASOC_WHICH_CLK_SF7_SOR, + TEGRASOC_WHICH_CLK_EMC, + TEGRASOC_WHICH_CLK_GPU_FIRST, + TEGRASOC_WHICH_CLK_GPU_SYS = TEGRASOC_WHICH_CLK_GPU_FIRST, + TEGRASOC_WHICH_CLK_GPU_NVD, + TEGRASOC_WHICH_CLK_GPU_UPROC, + TEGRASOC_WHICH_CLK_GPU_GPC0, + TEGRASOC_WHICH_CLK_GPU_GPC1, + TEGRASOC_WHICH_CLK_GPU_GPC2, + TEGRASOC_WHICH_CLK_GPU_LAST = TEGRASOC_WHICH_CLK_GPU_GPC2, + TEGRASOC_WHICH_CLK_MAX, // TEGRASOC_WHICH_CLK_MAX is defined for boundary checks only. +} TEGRASOC_WHICH_CLK; + +#ifdef NVRM + +extern const char *pNVRM_ID; + +/* + * ptr arithmetic convenience + */ + +typedef union +{ + volatile NvV8 Reg008[1]; + volatile NvV16 Reg016[1]; + volatile NvV32 Reg032[1]; +} nv_hwreg_t, * nv_phwreg_t; + + +#define NVRM_PCICFG_NUM_BARS 6 +#define NVRM_PCICFG_BAR_OFFSET(i) (0x10 + (i) * 4) +#define NVRM_PCICFG_BAR_REQTYPE_MASK 0x00000001 +#define NVRM_PCICFG_BAR_REQTYPE_MEMORY 0x00000000 +#define NVRM_PCICFG_BAR_MEMTYPE_MASK 0x00000006 +#define NVRM_PCICFG_BAR_MEMTYPE_64BIT 0x00000004 +#define NVRM_PCICFG_BAR_ADDR_MASK 0xfffffff0 + +#define NVRM_PCICFG_NUM_DWORDS 16 + +#define NV_GPU_NUM_BARS 3 +#define NV_GPU_BAR_INDEX_REGS 0 +#define NV_GPU_BAR_INDEX_FB 1 +#define NV_GPU_BAR_INDEX_IMEM 2 + +typedef struct +{ + NvU64 cpu_address; + NvU64 size; + NvU32 offset; + NvU32 *map; + nv_phwreg_t map_u; +} nv_aperture_t; + +typedef struct +{ + char *name; + NvU32 *data; +} nv_parm_t; + +#define NV_RM_PAGE_SHIFT 12 +#define NV_RM_PAGE_SIZE (1 << NV_RM_PAGE_SHIFT) +#define NV_RM_PAGE_MASK (NV_RM_PAGE_SIZE - 1) + +#define NV_RM_TO_OS_PAGE_SHIFT (os_page_shift - NV_RM_PAGE_SHIFT) +#define NV_RM_PAGES_TO_OS_PAGES(count) \ + ((((NvUPtr)(count)) >> NV_RM_TO_OS_PAGE_SHIFT) + \ + ((((count) & ((1 << NV_RM_TO_OS_PAGE_SHIFT) - 1)) != 0) ? 1 : 0)) + +#if defined(NVCPU_X86_64) +#define NV_STACK_SIZE (NV_RM_PAGE_SIZE * 3) +#else +#define NV_STACK_SIZE (NV_RM_PAGE_SIZE * 2) +#endif + +typedef struct nvidia_stack_s +{ + NvU32 size; + void *top; + NvU8 stack[NV_STACK_SIZE-16] __attribute__ ((aligned(16))); +} nvidia_stack_t; + +/* + * TODO: Remove once all UNIX layers have been converted to use nvidia_stack_t + */ +typedef nvidia_stack_t nv_stack_t; + +typedef struct nv_file_private_t nv_file_private_t; + +/* + * this is a wrapper for unix events + * unlike the events that will be returned to clients, this includes + * kernel-specific data, such as file pointer, etc.. + */ +typedef struct nv_event_s +{ + NvHandle hParent; + NvHandle hObject; + NvU32 index; + NvU32 info32; + NvU16 info16; + nv_file_private_t *nvfp; /* per file-descriptor data pointer */ + NvU32 fd; + NvBool active; /* whether the event should be signaled */ + NvU32 refcount; /* count of associated RM events */ + struct nv_event_s *next; +} nv_event_t; + +typedef struct nv_kern_mapping_s +{ + void *addr; + NvU64 size; + NvU32 modeFlag; + struct nv_kern_mapping_s *next; +} nv_kern_mapping_t; + +typedef struct nv_usermap_access_params_s +{ + NvU64 addr; + NvU64 size; + NvU64 offset; + NvU64 *page_array; + NvU64 num_pages; + MemoryArea memArea; + NvU64 access_start; + NvU64 access_size; + NvBool contig; + NvU32 caching; +} nv_usermap_access_params_t; + +/* + * It stores mapping context per mapping + */ +typedef struct nv_alloc_mapping_context_s { + void *alloc; + NvU64 page_index; + NvU64 *page_array; + NvU64 num_pages; + MemoryArea memArea; + NvU64 access_start; + NvU64 access_size; + NvU32 prot; + NvBool valid; + NvU32 caching; +} nv_alloc_mapping_context_t; + +typedef enum +{ + NV_SOC_IRQ_DISPLAY_TYPE = 0x1, + NV_SOC_IRQ_DPAUX_TYPE, + NV_SOC_IRQ_GPIO_TYPE, + NV_SOC_IRQ_HDACODEC_TYPE, + NV_SOC_IRQ_TCPC2DISP_TYPE, + NV_SOC_IRQ_HFRP0_TYPE, + NV_SOC_IRQ_HFRP1_TYPE, + NV_SOC_IRQ_INVALID_TYPE +} nv_soc_irq_type_t; + +/* + * It stores interrupt numbers and interrupt type and private data + */ +typedef struct nv_soc_irq_info_s { + NvU32 irq_num; + nv_soc_irq_type_t irq_type; + NvBool bh_pending; + union { + NvU32 gpio_num; + NvU32 dpaux_instance; + } irq_data; + NvS32 ref_count; +} nv_soc_irq_info_t; + +#define NV_MAX_SOC_IRQS 10 +#define NV_MAX_DPAUX_NUM_DEVICES 4 +#define NV_MAX_DPAUX_DEV_NAME_SIZE 10 + +#define NV_MAX_SOC_DPAUX_NUM_DEVICES 4 + +/* + * per device state + */ + +/* DMA-capable device data, defined by kernel interface layer */ +typedef struct nv_dma_device nv_dma_device_t; + +typedef struct nv_phys_addr_range +{ + NvU64 addr; + NvU64 len; +} nv_phys_addr_range_t; + +typedef struct nv_state_t +{ + void *priv; /* private data */ + void *os_state; /* os-specific device state */ + + int flags; + + /* PCI config info */ + nv_pci_info_t pci_info; + NvU16 subsystem_id; + NvU16 subsystem_vendor; + NvU32 gpu_id; + NvU32 iovaspace_id; + struct + { + NvBool valid; + NvU8 uuid[GPU_UUID_LEN]; + NvBool pci_uuid_read_attempted; + NV_STATUS pci_uuid_status; + } nv_uuid_cache; + void *handle; + + NvU32 pci_cfg_space[NVRM_PCICFG_NUM_DWORDS]; + + /* physical characteristics */ + nv_aperture_t bars[NV_GPU_NUM_BARS]; + nv_aperture_t *regs; + nv_aperture_t *dpaux[NV_MAX_DPAUX_NUM_DEVICES]; + nv_aperture_t *hdacodec_regs; + nv_aperture_t *mipical_regs; + nv_aperture_t *hfrp0_regs; + nv_aperture_t *hfrp1_regs; + nv_aperture_t *fb, ud; + nv_aperture_t *simregs; + + NvU32 num_dpaux_instance; + NvU32 interrupt_line; + NvU32 dpaux_irqs[NV_MAX_DPAUX_NUM_DEVICES]; + char dpaux_devname[NV_MAX_DPAUX_NUM_DEVICES][NV_MAX_DPAUX_DEV_NAME_SIZE]; + nv_soc_irq_info_t soc_irq_info[NV_MAX_SOC_IRQS]; + NvS32 current_soc_irq; + NvU32 num_soc_irqs; + NvU32 hdacodec_irq; + NvU32 tcpc2disp_irq; + NvU32 hfrp0_irq; + NvU32 hfrp1_irq; + NvU8 *soc_dcb_blob; + NvU32 soc_dcb_size; + NvU32 disp_sw_soc_chip_id; + NvBool soc_is_dpalt_mode_supported; + NvBool soc_is_hfrp_supported; + + NvU64 dma_mask; + + NvBool is_tegra_pci_igpu; + NvBool supports_tegra_igpu_rg; + NvBool is_tegra_pci_igpu_rg_enabled; + NvU32 tegra_pci_igpu_pg_mask; + + NvBool primary_vga; + + NvU32 sim_env; + + NvU32 rc_timer_enabled; + + /* list of events allocated for this device */ + nv_event_t *event_list; + + /* lock to protect event_list */ + void *event_spinlock; + + nv_kern_mapping_t *kern_mappings; + + /* Kernel interface DMA device data */ + nv_dma_device_t *dma_dev; + nv_dma_device_t *niso_dma_dev; + + /* + * Per-GPU queue. The actual queue object is usually allocated in the + * arch-specific parent structure (e.g. nv_linux_state_t), and this + * pointer just points to it. + */ + struct os_work_queue *queue; + + /* For loading RM as a firmware (DCE or GSP) client */ + NvBool request_firmware; /* request firmware from the OS */ + NvBool request_fw_client_rm; /* attempt to init RM as FW a client */ + NvBool allow_fallback_to_monolithic_rm; /* allow fallback to monolithic RM if FW client RM doesn't work out */ + NvBool enable_firmware_logs; /* attempt to enable firmware log decoding/printing */ + + /* Variable to track, if nvidia_remove is called */ + NvBool removed; + + NvBool console_device; + + /* Variable to track, if GPU is external GPU */ + NvBool is_external_gpu; + + /* Variable to track, if regkey PreserveVideoMemoryAllocations is set */ + NvBool preserve_vidmem_allocations; + + /* Variable to force allocation of 32-bit addressable memory */ + NvBool force_dma32_alloc; + + /* PCI power state should be D0 during system suspend */ + NvBool d0_state_in_suspend; + + /* Current cyclestats client and context */ + NvU32 profiler_owner; + void *profiler_context; + + /* + * RMAPI objects to use in the OS layer to talk to core RM. + * + * Note that we only need to store one subdevice handle: in SLI, we will + * have a separate nv_state_t per physical GPU. + */ + struct { + NvHandle hClient; + NvHandle hDevice; + NvHandle hSubDevice; + NvHandle hI2C; + NvHandle hDisp; + } rmapi; + + /* Bool to check if dma-buf is supported */ + NvBool dma_buf_supported; + + /* Check if NVPCF DSM function is implemented under NVPCF or GPU device scope */ + NvBool nvpcf_dsm_in_gpu_scope; + + /* Bool to check if the device received a shutdown notification */ + NvBool is_shutdown; + + /* Bool to check if the GPU has a coherent sysmem link */ + NvBool coherent; + + /* + * Bool to check if GPU memory is backed by struct page. + * False for non-coherent platforms. May also be false + * on coherent platforms if GPU memory is not onlined to the kernel. + */ + NvBool mem_has_struct_page; + + /* OS detected GPU has ATS capability */ + NvBool ats_support; + /* + * NUMA node ID of the CPU to which the GPU is attached. + * Holds NUMA_NO_NODE on platforms that don't support NUMA configuration. + */ + NvS32 cpu_numa_node_id; + + struct { + /* Bool to check if ISO iommu enabled */ + NvBool iso_iommu_present; + /* Bool to check if NISO iommu enabled */ + NvBool niso_iommu_present; + /* Display SMMU Stream IDs */ + NvU32 dispIsoStreamId; + NvU32 dispNisoStreamId; + } iommus; + + /* Console is managed by drm drivers or NVKMS */ + NvBool client_managed_console; +} nv_state_t; + +#define NVFP_TYPE_NONE 0x0 +#define NVFP_TYPE_REFCOUNTED 0x1 +#define NVFP_TYPE_REGISTERED 0x2 + +struct nv_file_private_t +{ + NvHandle *handles; + NvU16 maxHandles; + NvU32 deviceInstance; + NvU32 gpuInstanceId; + NvU8 metadata[64]; + + nv_file_private_t *ctl_nvfp; + void *ctl_nvfp_priv; + NvU32 register_or_refcount; + + // + // True if a client or an event was ever allocated on this fd. + // If false, RMAPI cleanup is skipped. + // + NvBool bCleanupRmapi; +}; + +// Forward define the gpu ops structures +typedef struct gpuSession *nvgpuSessionHandle_t; +typedef struct gpuDevice *nvgpuDeviceHandle_t; +typedef struct gpuAddressSpace *nvgpuAddressSpaceHandle_t; +typedef struct gpuTsg *nvgpuTsgHandle_t; +typedef struct UvmGpuTsgAllocParams_tag nvgpuTsgAllocParams_t; +typedef struct gpuChannel *nvgpuChannelHandle_t; +typedef struct UvmGpuChannelInfo_tag *nvgpuChannelInfo_t; +typedef struct UvmGpuChannelAllocParams_tag nvgpuChannelAllocParams_t; +typedef struct UvmGpuCaps_tag *nvgpuCaps_t; +typedef struct UvmGpuCopyEnginesCaps_tag *nvgpuCesCaps_t; +typedef struct UvmGpuAddressSpaceInfo_tag *nvgpuAddressSpaceInfo_t; +typedef struct UvmGpuAllocInfo_tag *nvgpuAllocInfo_t; +typedef struct UvmGpuP2PCapsParams_tag *nvgpuP2PCapsParams_t; +typedef struct UvmGpuFbInfo_tag *nvgpuFbInfo_t; +typedef struct UvmGpuNvlinkInfo_tag *nvgpuNvlinkInfo_t; +typedef struct UvmGpuEccInfo_tag *nvgpuEccInfo_t; +typedef struct UvmGpuFaultInfo_tag *nvgpuFaultInfo_t; +typedef struct UvmGpuAccessCntrInfo_tag *nvgpuAccessCntrInfo_t; +typedef struct UvmGpuAccessCntrConfig_tag nvgpuAccessCntrConfig_t; +typedef struct UvmGpuInfo_tag nvgpuInfo_t; +typedef struct UvmGpuClientInfo_tag nvgpuClientInfo_t; +typedef struct UvmPmaAllocationOptions_tag *nvgpuPmaAllocationOptions_t; +typedef struct UvmPmaStatistics_tag *nvgpuPmaStatistics_t; +typedef struct UvmGpuMemoryInfo_tag *nvgpuMemoryInfo_t; +typedef struct UvmGpuExternalMappingInfo_tag *nvgpuExternalMappingInfo_t; +typedef struct UvmGpuExternalPhysAddrInfo_tag *nvgpuExternalPhysAddrInfo_t; +typedef struct UvmGpuChannelResourceInfo_tag *nvgpuChannelResourceInfo_t; +typedef struct UvmGpuChannelInstanceInfo_tag *nvgpuChannelInstanceInfo_t; +typedef struct UvmGpuChannelResourceBindParams_tag *nvgpuChannelResourceBindParams_t; +typedef struct UvmGpuPagingChannelAllocParams_tag nvgpuPagingChannelAllocParams_t; +typedef struct UvmGpuPagingChannel_tag *nvgpuPagingChannelHandle_t; +typedef struct UvmGpuPagingChannelInfo_tag *nvgpuPagingChannelInfo_t; +typedef enum UvmPmaGpuMemoryType_tag nvgpuGpuMemoryType_t; +typedef NV_STATUS (*nvPmaEvictPagesCallback)(void *, NvU64, NvU64 *, NvU32, NvU64, NvU64, nvgpuGpuMemoryType_t); +typedef NV_STATUS (*nvPmaEvictRangeCallback)(void *, NvU64, NvU64, nvgpuGpuMemoryType_t); + +/* + * flags + */ + +#define NV_FLAG_OPEN 0x0001 +#define NV_FLAG_EXCLUDE 0x0002 +#define NV_FLAG_CONTROL 0x0004 +#define NV_FLAG_PCI_P2P_UNSUPPORTED_CHIPSET 0x0008 +#define NV_FLAG_SOC_DISPLAY 0x0010 +#define NV_FLAG_USES_MSI 0x0020 +#define NV_FLAG_USES_MSIX 0x0040 +#define NV_FLAG_PASSTHRU 0x0080 +#define NV_FLAG_SUSPENDED 0x0100 +/* To be set when an FLR needs to be triggered after device shut down. */ +#define NV_FLAG_TRIGGER_FLR 0x0400 +#define NV_FLAG_PERSISTENT_SW_STATE 0x0800 +#define NV_FLAG_IN_RECOVERY 0x1000 +#define NV_FLAG_PCI_REMOVE_IN_PROGRESS 0x2000 +#define NV_FLAG_UNBIND_LOCK 0x4000 +/* To be set when GPU is not present on the bus, to help device teardown */ +#define NV_FLAG_IN_SURPRISE_REMOVAL 0x8000 + +typedef enum +{ + NV_PM_ACTION_HIBERNATE, + NV_PM_ACTION_STANDBY, + NV_PM_ACTION_RESUME +} nv_pm_action_t; + +typedef enum +{ + NV_PM_ACTION_DEPTH_DEFAULT, + NV_PM_ACTION_DEPTH_MODESET, + NV_PM_ACTION_DEPTH_UVM +} nv_pm_action_depth_t; + +typedef enum +{ + NV_DYNAMIC_PM_NEVER, + NV_DYNAMIC_PM_COARSE, + NV_DYNAMIC_PM_FINE +} nv_dynamic_power_mode_t; + +typedef enum +{ + NV_POWER_STATE_IN_HIBERNATE, + NV_POWER_STATE_IN_STANDBY, + NV_POWER_STATE_RUNNING +} nv_power_state_t; + +typedef struct +{ + const char *vidmem_power_status; + const char *dynamic_power_status; + const char *gc6_support; + const char *gcoff_support; + const char *s0ix_status; + const char *db_support; +} nv_power_info_t; + +typedef enum +{ + NV_MEMORY_TYPE_SYSTEM, /* Memory mapped for ROM, SBIOS and physical RAM. */ + NV_MEMORY_TYPE_REGISTERS, + NV_MEMORY_TYPE_FRAMEBUFFER, + NV_MEMORY_TYPE_DEVICE_MMIO, /* All kinds of MMIO referred by NVRM e.g. BARs and MCFG of device */ +} nv_memory_type_t; + +#define NV_PRIMARY_VGA(nv) ((nv)->primary_vga) + +#define NV_IS_CTL_DEVICE(nv) ((nv)->flags & NV_FLAG_CONTROL) +#define NV_IS_SOC_DISPLAY_DEVICE(nv) \ + ((nv)->flags & NV_FLAG_SOC_DISPLAY) + +#define NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv) \ + (((nv)->flags & NV_FLAG_IN_SURPRISE_REMOVAL) != 0) + +/* + * For console setup by EFI GOP, the base address is BAR1. + * For console setup by VBIOS, the base address is BAR2 + 16MB. + */ +#define NV_IS_CONSOLE_MAPPED(nv, addr) \ + (((addr) == (nv)->bars[NV_GPU_BAR_INDEX_FB].cpu_address) || \ + ((addr) == ((nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + 0x1000000))) + +#define NV_SOC_IS_ISO_IOMMU_PRESENT(nv) \ + ((nv)->iommus.iso_iommu_present) + +#define NV_SOC_IS_NISO_IOMMU_PRESENT(nv) \ + ((nv)->iommus.niso_iommu_present) +/* + * GPU add/remove events + */ +#define NV_SYSTEM_GPU_ADD_EVENT 0x9001 +#define NV_SYSTEM_GPU_REMOVE_EVENT 0x9002 + +/* + * NVIDIA ACPI sub-event IDs (event types) to be passed into + * to core NVIDIA driver for ACPI events. + */ +#define NV_SYSTEM_ACPI_EVENT_VALUE_DISPLAY_SWITCH_DEFAULT 0 +#define NV_SYSTEM_ACPI_EVENT_VALUE_DOCK_EVENT_UNDOCKED 0 +#define NV_SYSTEM_ACPI_EVENT_VALUE_DOCK_EVENT_DOCKED 1 + +#define NV_ACPI_NVIF_HANDLE_PRESENT 0x01 +#define NV_ACPI_DSM_HANDLE_PRESENT 0x02 +#define NV_ACPI_WMMX_HANDLE_PRESENT 0x04 + +#define NV_EVAL_ACPI_METHOD_NVIF 0x01 +#define NV_EVAL_ACPI_METHOD_WMMX 0x02 + +typedef enum { + NV_I2C_CMD_READ = 1, + NV_I2C_CMD_WRITE, + NV_I2C_CMD_SMBUS_READ, + NV_I2C_CMD_SMBUS_WRITE, + NV_I2C_CMD_SMBUS_QUICK_WRITE, + NV_I2C_CMD_SMBUS_QUICK_READ, + NV_I2C_CMD_SMBUS_BLOCK_READ, + NV_I2C_CMD_SMBUS_BLOCK_WRITE, + NV_I2C_CMD_BLOCK_READ, + NV_I2C_CMD_BLOCK_WRITE +} nv_i2c_cmd_t; + +// Flags needed by OSAllocPagesNode +#define NV_ALLOC_PAGES_NODE_NONE 0x0 +#define NV_ALLOC_PAGES_NODE_SKIP_RECLAIM 0x1 + +/* +** where we hide our nv_state_t * ... +*/ +#define NV_SET_NV_STATE(pgpu,p) ((pgpu)->pOsGpuInfo = (p)) +#define NV_GET_NV_STATE(pGpu) \ + (nv_state_t *)((pGpu) ? (pGpu)->pOsGpuInfo : NULL) + +static inline NvBool IS_REG_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length) +{ + return ((offset >= nv->regs->cpu_address) && + ((offset + (length - 1)) >= offset) && + ((offset + (length - 1)) <= (nv->regs->cpu_address + (nv->regs->size - 1)))); +} + +static inline NvBool IS_FB_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length) +{ + return ((nv->fb) && (nv->fb->size != 0) && + (offset >= nv->fb->cpu_address) && + ((offset + (length - 1)) >= offset) && + ((offset + (length - 1)) <= (nv->fb->cpu_address + (nv->fb->size - 1)))); +} + +static inline NvBool IS_UD_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length) +{ + return ((nv->ud.cpu_address != 0) && (nv->ud.size != 0) && + (offset >= nv->ud.cpu_address) && + ((offset + (length - 1)) >= offset) && + ((offset + (length - 1)) <= (nv->ud.cpu_address + (nv->ud.size - 1)))); +} + +static inline NvBool IS_IMEM_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length) +{ + return ((nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address != 0) && + (nv->bars[NV_GPU_BAR_INDEX_IMEM].size != 0) && + (offset >= nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address) && + ((offset + (length - 1)) >= offset) && + ((offset + (length - 1)) <= (nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + + (nv->bars[NV_GPU_BAR_INDEX_IMEM].size - 1)))); +} + +#define NV_RM_MAX_MSIX_LINES 8 + +#define NV_MAX_ISR_DELAY_US 20000 +#define NV_MAX_ISR_DELAY_MS (NV_MAX_ISR_DELAY_US / 1000) + +#define NV_TIMERCMP(a, b, CMP) \ + (((a)->tv_sec == (b)->tv_sec) ? \ + ((a)->tv_usec CMP (b)->tv_usec) : ((a)->tv_sec CMP (b)->tv_sec)) + +#define NV_TIMERADD(a, b, result) \ + { \ + (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \ + (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \ + if ((result)->tv_usec >= 1000000) \ + { \ + ++(result)->tv_sec; \ + (result)->tv_usec -= 1000000; \ + } \ + } + +#define NV_TIMERSUB(a, b, result) \ + { \ + (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \ + (result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \ + if ((result)->tv_usec < 0) \ + { \ + --(result)->tv_sec; \ + (result)->tv_usec += 1000000; \ + } \ + } + +#define NV_TIMEVAL_TO_US(tv) ((NvU64)(tv).tv_sec * 1000000 + (tv).tv_usec) + +#ifndef NV_ALIGN_UP +#define NV_ALIGN_UP(v,g) (((v) + ((g) - 1)) & ~((g) - 1)) +#endif +#ifndef NV_ALIGN_DOWN +#define NV_ALIGN_DOWN(v,g) ((v) & ~((g) - 1)) +#endif + + +/* + * driver internal interfaces + */ + +/* + * --------------------------------------------------------------------------- + * + * Function prototypes for UNIX specific OS interface. + * + * --------------------------------------------------------------------------- + */ + +NvU32 NV_API_CALL nv_get_dev_minor (nv_state_t *); +void* NV_API_CALL nv_alloc_kernel_mapping (nv_state_t *, void *, NvU64, NvU32, NvU64, void **); +void NV_API_CALL nv_free_kernel_mapping (nv_state_t *, void *, void *, void *); +NV_STATUS NV_API_CALL nv_alloc_user_mapping (nv_state_t *, void *, NvU64, NvU32, NvU64, NvU32, NvU64 *, void **); +void NV_API_CALL nv_free_user_mapping (nv_state_t *, void *, NvU64, void *); +NV_STATUS NV_API_CALL nv_add_mapping_context_to_file (nv_state_t *, nv_usermap_access_params_t*, NvU32, void *, NvU64, NvU32); + +NvU64 NV_API_CALL nv_get_kern_phys_address (NvU64); +NvU64 NV_API_CALL nv_get_user_phys_address (NvU64); +nv_state_t* NV_API_CALL nv_get_adapter_state (NvU32, NvU8, NvU8); +nv_state_t* NV_API_CALL nv_get_ctl_state (void); + +void NV_API_CALL nv_set_dma_address_size (nv_state_t *, NvU32 ); + +NV_STATUS NV_API_CALL nv_alias_pages (nv_state_t *, NvU32, NvU64, NvU32, NvU32, NvU64, NvU64 *, NvBool, void **); +NV_STATUS NV_API_CALL nv_alloc_pages (nv_state_t *, NvU32, NvU64, NvBool, NvU32, NvBool, NvBool, NvS32, NvU64 *, void **); +NV_STATUS NV_API_CALL nv_free_pages (nv_state_t *, NvU32, NvBool, NvU32, void *); + +NV_STATUS NV_API_CALL nv_register_user_pages (nv_state_t *, NvU64, NvU64 *, void *, void **, NvBool); +void NV_API_CALL nv_unregister_user_pages (nv_state_t *, NvU64, void **, void **); + +NV_STATUS NV_API_CALL nv_register_peer_io_mem (nv_state_t *, NvU64 *, NvU64, void **); +void NV_API_CALL nv_unregister_peer_io_mem(nv_state_t *, void *); + +struct sg_table; + +NV_STATUS NV_API_CALL nv_register_sgt (nv_state_t *, NvU64 *, NvU64, NvU32, void **, + struct sg_table *, void *, NvBool); +void NV_API_CALL nv_unregister_sgt (nv_state_t *, struct sg_table **, void **, void *); +NV_STATUS NV_API_CALL nv_register_phys_pages (nv_state_t *, NvU64 *, NvU64, NvU32, void **); +void NV_API_CALL nv_unregister_phys_pages (nv_state_t *, void *); + +NV_STATUS NV_API_CALL nv_dma_map_sgt (nv_dma_device_t *, NvU64, NvU64 *, NvU32, void **); + +NV_STATUS NV_API_CALL nv_dma_map_alloc (nv_dma_device_t *, NvU64, NvU64 *, NvBool, void **); +NV_STATUS NV_API_CALL nv_dma_unmap_alloc (nv_dma_device_t *, NvU64, NvU64 *, void **); + +NV_STATUS NV_API_CALL nv_dma_map_peer (nv_dma_device_t *, nv_dma_device_t *, NvU8, NvU64, NvU64 *); +NV_STATUS NV_API_CALL nv_dma_map_non_pci_peer (nv_dma_device_t *, NvU64, NvU64 *); +void NV_API_CALL nv_dma_unmap_peer (nv_dma_device_t *, NvU64, NvU64); + +NV_STATUS NV_API_CALL nv_dma_map_mmio (nv_dma_device_t *, NvU64, NvU64 *); +void NV_API_CALL nv_dma_unmap_mmio (nv_dma_device_t *, NvU64, NvU64); + +void NV_API_CALL nv_dma_cache_invalidate (nv_dma_device_t *, void *); +NvBool NV_API_CALL nv_grdma_pci_topology_supported(nv_state_t *, nv_dma_device_t *); + +NvS32 NV_API_CALL nv_start_rc_timer (nv_state_t *); +NvS32 NV_API_CALL nv_stop_rc_timer (nv_state_t *); + +void NV_API_CALL nv_post_event (nv_event_t *, NvHandle, NvU32, NvU32, NvU16, NvBool); +NvS32 NV_API_CALL nv_get_event (nv_file_private_t *, nv_event_t *, NvU32 *); + +void* NV_API_CALL nv_i2c_add_adapter (nv_state_t *, NvU32); +void NV_API_CALL nv_i2c_del_adapter (nv_state_t *, void *); + +void NV_API_CALL nv_acpi_methods_init (NvU32 *); +void NV_API_CALL nv_acpi_methods_uninit (void); + +NV_STATUS NV_API_CALL nv_acpi_method (NvU32, NvU32, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *); +NV_STATUS NV_API_CALL nv_acpi_d3cold_dsm_for_upstream_port (nv_state_t *, NvU8 *, NvU32, NvU32, NvU32 *); +NV_STATUS NV_API_CALL nv_acpi_dsm_method (nv_state_t *, NvU8 *, NvU32, NvBool, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *); +NV_STATUS NV_API_CALL nv_acpi_ddc_method (nv_state_t *, void *, NvU32 *, NvBool); +NV_STATUS NV_API_CALL nv_acpi_dod_method (nv_state_t *, NvU32 *, NvU32 *); +NV_STATUS NV_API_CALL nv_acpi_rom_method (nv_state_t *, NvU32 *, NvU32 *); +NV_STATUS NV_API_CALL nv_acpi_get_powersource (NvU32 *); +NvBool NV_API_CALL nv_acpi_is_battery_present(void); + +NV_STATUS NV_API_CALL nv_acpi_mux_method (nv_state_t *, NvU32 *, NvU32, const char *); + +NV_STATUS NV_API_CALL nv_log_error (nv_state_t *, NvU32, const char *, va_list); + +NV_STATUS NV_API_CALL nv_set_primary_vga_status(nv_state_t *); +NvBool NV_API_CALL nv_requires_dma_remap (nv_state_t *); + +NvBool NV_API_CALL nv_is_rm_firmware_active(nv_state_t *); +const void*NV_API_CALL nv_get_firmware(nv_state_t *, nv_firmware_type_t, nv_firmware_chip_family_t, const void **, NvU32 *); +void NV_API_CALL nv_put_firmware(const void *); + +nv_file_private_t* NV_API_CALL nv_get_file_private(NvS32, NvBool, void **); +void NV_API_CALL nv_put_file_private(void *); + +NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU64 *, NvU32 *, NvS32 *); +NV_STATUS NV_API_CALL nv_get_egm_info(nv_state_t *, NvU64 *, NvU64 *, NvS32 *); + +void NV_API_CALL nv_p2p_free_platform_data(void *data); + +NV_STATUS NV_API_CALL nv_revoke_gpu_mappings (nv_state_t *); +void NV_API_CALL nv_acquire_mmap_lock (nv_state_t *); +void NV_API_CALL nv_release_mmap_lock (nv_state_t *); +NvBool NV_API_CALL nv_get_all_mappings_revoked_locked (nv_state_t *); +void NV_API_CALL nv_set_safe_to_mmap_locked (nv_state_t *, NvBool); + +NV_STATUS NV_API_CALL nv_indicate_idle (nv_state_t *); +NV_STATUS NV_API_CALL nv_indicate_not_idle (nv_state_t *); +void NV_API_CALL nv_idle_holdoff (nv_state_t *); + +NvBool NV_API_CALL nv_dynamic_power_available (nv_state_t *); +void NV_API_CALL nv_audio_dynamic_power (nv_state_t *); + +void NV_API_CALL nv_control_soc_irqs (nv_state_t *, NvBool bEnable); +NV_STATUS NV_API_CALL nv_get_current_irq_priv_data(nv_state_t *, NvU32 *); + +NV_STATUS NV_API_CALL nv_acquire_fabric_mgmt_cap (int, int*); +int NV_API_CALL nv_cap_drv_init(void); +void NV_API_CALL nv_cap_drv_exit(void); +NvBool NV_API_CALL nv_is_gpu_accessible(nv_state_t *); +NvBool NV_API_CALL nv_match_gpu_os_info(nv_state_t *, void *); + +void NV_API_CALL nv_get_updated_emu_seg(NvU32 *start, NvU32 *end); +void NV_API_CALL nv_get_screen_info(nv_state_t *, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU32 *, NvU64 *); +void NV_API_CALL nv_set_gpu_pg_mask(nv_state_t *); + +struct dma_buf; +typedef struct nv_dma_buf nv_dma_buf_t; +struct drm_gem_object; + +NV_STATUS NV_API_CALL nv_dma_import_sgt (nv_dma_device_t *, struct sg_table *, struct drm_gem_object *); +void NV_API_CALL nv_dma_release_sgt(struct sg_table *, struct drm_gem_object *); +NV_STATUS NV_API_CALL nv_dma_import_dma_buf (nv_dma_device_t *, struct dma_buf *, NvBool, NvU32 *, struct sg_table **, nv_dma_buf_t **); +NV_STATUS NV_API_CALL nv_dma_import_from_fd (nv_dma_device_t *, NvS32, NvBool, NvU32 *, struct sg_table **, nv_dma_buf_t **); +void NV_API_CALL nv_dma_release_dma_buf (nv_dma_buf_t *); + +void NV_API_CALL nv_schedule_uvm_isr (nv_state_t *); + +NV_STATUS NV_API_CALL nv_schedule_uvm_drain_p2p (NvU8 *); +void NV_API_CALL nv_schedule_uvm_resume_p2p (NvU8 *); + +NvBool NV_API_CALL nv_platform_supports_s0ix (void); +NvBool NV_API_CALL nv_s2idle_pm_configured (void); + +NvBool NV_API_CALL nv_pci_tegra_register_power_domain (nv_state_t *, NvBool); +NvBool NV_API_CALL nv_pci_tegra_pm_init (nv_state_t *); +void NV_API_CALL nv_pci_tegra_pm_deinit (nv_state_t *); + +NvBool NV_API_CALL nv_is_chassis_notebook (void); +void NV_API_CALL nv_allow_runtime_suspend (nv_state_t *nv); +void NV_API_CALL nv_disallow_runtime_suspend (nv_state_t *nv); + +typedef void (*nvTegraDceClientIpcCallback)(NvU32, NvU32, NvU32, void *, void *); + +NV_STATUS NV_API_CALL nv_get_num_phys_pages (void *, NvU32 *); +NV_STATUS NV_API_CALL nv_get_phys_pages (void *, void *, NvU32 *); +void NV_API_CALL nv_get_disp_smmu_stream_ids (nv_state_t *, NvU32 *, NvU32 *); + +typedef struct TEGRA_IMP_IMPORT_DATA TEGRA_IMP_IMPORT_DATA; +typedef struct nv_i2c_msg_s nv_i2c_msg_t; + +NV_STATUS NV_API_CALL nv_bpmp_send_mrq (nv_state_t *, NvU32, const void *, NvU32, void *, NvU32, NvS32 *, NvS32 *); +NV_STATUS NV_API_CALL nv_i2c_transfer(nv_state_t *, NvU32, NvU8, nv_i2c_msg_t *, int); +void NV_API_CALL nv_i2c_unregister_clients(nv_state_t *); +NV_STATUS NV_API_CALL nv_i2c_bus_status(nv_state_t *, NvU32, NvS32 *, NvS32 *); +NV_STATUS NV_API_CALL nv_imp_get_import_data (TEGRA_IMP_IMPORT_DATA *); +NV_STATUS NV_API_CALL nv_imp_enable_disable_rfl (nv_state_t *nv, NvBool bEnable); +NV_STATUS NV_API_CALL nv_imp_icc_set_bw (nv_state_t *nv, NvU32 avg_bw_kbps, NvU32 floor_bw_kbps); +NV_STATUS NV_API_CALL nv_get_num_dpaux_instances(nv_state_t *nv, NvU32 *num_instances); +NV_STATUS NV_API_CALL nv_get_tegra_brightness_level(nv_state_t *, NvU32 *); +NV_STATUS NV_API_CALL nv_set_tegra_brightness_level(nv_state_t *, NvU32); + +NV_STATUS NV_API_CALL nv_soc_device_reset (nv_state_t *); +NV_STATUS NV_API_CALL nv_soc_pm_powergate (nv_state_t *); +NV_STATUS NV_API_CALL nv_soc_pm_unpowergate (nv_state_t *); +NV_STATUS NV_API_CALL nv_gpio_get_pin_state(nv_state_t *, NvU32, NvU32 *); +void NV_API_CALL nv_gpio_set_pin_state(nv_state_t *, NvU32, NvU32); +NV_STATUS NV_API_CALL nv_gpio_set_pin_direction(nv_state_t *, NvU32, NvU32); +NV_STATUS NV_API_CALL nv_gpio_get_pin_direction(nv_state_t *, NvU32, NvU32 *); +NV_STATUS NV_API_CALL nv_gpio_get_pin_number(nv_state_t *, NvU32, NvU32 *); +NvBool NV_API_CALL nv_gpio_get_pin_interrupt_status(nv_state_t *, NvU32, NvU32); +NV_STATUS NV_API_CALL nv_gpio_set_pin_interrupt(nv_state_t *, NvU32, NvU32); +NvU32 NV_API_CALL nv_tegra_get_rm_interface_type(NvU32); +NV_STATUS NV_API_CALL nv_tegra_dce_register_ipc_client(NvU32, void *, nvTegraDceClientIpcCallback, NvU32 *); +NV_STATUS NV_API_CALL nv_tegra_dce_client_ipc_send_recv(NvU32, void *, NvU32); +NV_STATUS NV_API_CALL nv_tegra_dce_unregister_ipc_client(NvU32); +NV_STATUS NV_API_CALL nv_dsi_parse_panel_props(nv_state_t *, void *); +NvBool NV_API_CALL nv_dsi_is_panel_connected(nv_state_t *); +NV_STATUS NV_API_CALL nv_dsi_panel_enable(nv_state_t *, void *); +NV_STATUS NV_API_CALL nv_dsi_panel_reset(nv_state_t *, void *); +void NV_API_CALL nv_dsi_panel_disable(nv_state_t *, void *); +void NV_API_CALL nv_dsi_panel_cleanup(nv_state_t *, void *); +NV_STATUS NV_API_CALL nv_soc_mipi_cal_reset(nv_state_t *); +NvU32 NV_API_CALL nv_soc_fuse_register_read (NvU32 addr); +NvBool NV_API_CALL nv_get_hdcp_enabled(nv_state_t *nv); +NV_STATUS NV_API_CALL nv_get_valid_window_head_mask(nv_state_t *nv, NvU64 *); +NV_STATUS NV_API_CALL nv_dp_uphy_pll_init(nv_state_t *, NvU32, NvU32); +NV_STATUS NV_API_CALL nv_dp_uphy_pll_deinit(nv_state_t *); +NV_STATUS NV_API_CALL nv_soc_i2c_hsp_semaphore_acquire(NvU32 ownerId, NvBool bAcquire, NvU64 timeout); +typedef void (*nv_soc_tsec_cb_func_t)(void*, void*); +NvU32 NV_API_CALL nv_soc_tsec_send_cmd(void* cmd, nv_soc_tsec_cb_func_t cb_func, void* cb_context); +NvU32 NV_API_CALL nv_soc_tsec_event_register(nv_soc_tsec_cb_func_t cb_func, void* cb_context, NvBool is_init_event); +NvU32 NV_API_CALL nv_soc_tsec_event_unregister(NvBool is_init_event); +void* NV_API_CALL nv_soc_tsec_alloc_mem_desc(NvU32 num_bytes, NvU32 *flcn_addr); +void NV_API_CALL nv_soc_tsec_free_mem_desc(void *mem_desc); +NvBool NV_API_CALL nv_is_clk_enabled (nv_state_t *, TEGRASOC_WHICH_CLK); +NV_STATUS NV_API_CALL nv_set_parent (nv_state_t *, TEGRASOC_WHICH_CLK, TEGRASOC_WHICH_CLK); +NV_STATUS NV_API_CALL nv_get_parent (nv_state_t *, TEGRASOC_WHICH_CLK, TEGRASOC_WHICH_CLK*); +NV_STATUS NV_API_CALL nv_clk_get_handles (nv_state_t *); +void NV_API_CALL nv_clk_clear_handles (nv_state_t *); +NV_STATUS NV_API_CALL nv_enable_clk (nv_state_t *, TEGRASOC_WHICH_CLK); +void NV_API_CALL nv_disable_clk (nv_state_t *, TEGRASOC_WHICH_CLK); +NV_STATUS NV_API_CALL nv_get_curr_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32 *); +NV_STATUS NV_API_CALL nv_get_max_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32 *); +NV_STATUS NV_API_CALL nv_get_min_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32 *); +NV_STATUS NV_API_CALL nv_set_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32); + +/* + * --------------------------------------------------------------------------- + * + * Function prototypes for Resource Manager interface. + * + * --------------------------------------------------------------------------- + */ + +NvBool NV_API_CALL rm_init_rm (nvidia_stack_t *); +void NV_API_CALL rm_shutdown_rm (nvidia_stack_t *); +NvBool NV_API_CALL rm_init_private_state (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_free_private_state (nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_init_adapter (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_disable_adapter (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_shutdown_adapter (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_exclude_adapter (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_acquire_api_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_release_api_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_acquire_gpu_lock (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_release_gpu_lock (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_acquire_all_gpus_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_release_all_gpus_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_ioctl (nvidia_stack_t *, nv_state_t *, nv_file_private_t *, NvU32, void *, NvU32); +NvBool NV_API_CALL rm_isr (nvidia_stack_t *, nv_state_t *, NvU32 *); +void NV_API_CALL rm_isr_bh (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_isr_bh_unlocked (nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_is_msix_allowed (nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_wait_for_bar_firewall (nvidia_stack_t *, NvU32 domain, NvU8 bus, NvU8 device, NvU8 function, NvU16 devId, NvU16 subsystemId); +NV_STATUS NV_API_CALL rm_pmu_perfmon_get_load (nvidia_stack_t *, nv_state_t *, NvU32 *, TEGRASOC_DEVFREQ_CLK); +NV_STATUS NV_API_CALL rm_power_management (nvidia_stack_t *, nv_state_t *, nv_pm_action_t); +NV_STATUS NV_API_CALL rm_stop_user_channels (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_restart_user_channels (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_save_low_res_mode (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_get_vbios_version (nvidia_stack_t *, nv_state_t *, char *); +char* NV_API_CALL rm_get_gpu_uuid (nvidia_stack_t *, nv_state_t *); +const NvU8* NV_API_CALL rm_get_gpu_uuid_raw (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_set_rm_firmware_requested(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_get_firmware_version (nvidia_stack_t *, nv_state_t *, char *, NvLength); +void NV_API_CALL rm_cleanup_file_private (nvidia_stack_t *, nv_state_t *, nv_file_private_t *); +void NV_API_CALL rm_unbind_lock (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_read_registry_dword (nvidia_stack_t *, nv_state_t *, const char *, NvU32 *); +NV_STATUS NV_API_CALL rm_write_registry_dword (nvidia_stack_t *, nv_state_t *, const char *, NvU32); +NV_STATUS NV_API_CALL rm_write_registry_binary (nvidia_stack_t *, nv_state_t *, const char *, NvU8 *, NvU32); +NV_STATUS NV_API_CALL rm_write_registry_string (nvidia_stack_t *, nv_state_t *, const char *, const char *, NvU32); +void NV_API_CALL rm_parse_option_string (nvidia_stack_t *, const char *); +char* NV_API_CALL rm_remove_spaces (const char *); +char* NV_API_CALL rm_string_token (char **, const char); +void NV_API_CALL rm_vgpu_vfio_set_driver_vm(nvidia_stack_t *, NvBool); +NV_STATUS NV_API_CALL rm_get_adapter_status_external(nvidia_stack_t *, nv_state_t *); + +NV_STATUS NV_API_CALL rm_run_rc_callback (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_execute_work_item (nvidia_stack_t *, void *); +const char* NV_API_CALL rm_get_device_name (NvU16, NvU16, NvU16); + +NV_STATUS NV_API_CALL rm_is_supported_device (nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_is_supported_pci_device(NvU8 pci_class, + NvU8 pci_subclass, + NvU16 vendor, + NvU16 device, + NvU16 subsystem_vendor, + NvU16 subsystem_device, + NvBool print_legacy_warning); + +void NV_API_CALL rm_i2c_remove_adapters (nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_i2c_is_smbus_capable (nvidia_stack_t *, nv_state_t *, void *); +NV_STATUS NV_API_CALL rm_i2c_transfer (nvidia_stack_t *, nv_state_t *, void *, nv_i2c_cmd_t, NvU8, NvU8, NvU32, NvU8 *); + +NV_STATUS NV_API_CALL rm_perform_version_check (nvidia_stack_t *, void *, NvU32); + +void NV_API_CALL rm_power_source_change_event (nvidia_stack_t *, NvU32); + +void NV_API_CALL rm_request_dnotifier_state (nvidia_stack_t *, nv_state_t *); + +void NV_API_CALL rm_disable_gpu_state_persistence (nvidia_stack_t *sp, nv_state_t *); +NV_STATUS NV_API_CALL rm_p2p_init_mapping (nvidia_stack_t *, NvU64, NvU64 *, NvU64 *, NvU64 *, NvU64 *, NvU64, NvU64, NvU64, NvU64, void (*)(void *), void *); +NV_STATUS NV_API_CALL rm_p2p_destroy_mapping (nvidia_stack_t *, NvU64); +NV_STATUS NV_API_CALL rm_p2p_get_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, NvU64, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU8 **, void *, NvBool *); +NV_STATUS NV_API_CALL rm_p2p_get_gpu_info (nvidia_stack_t *, NvU64, NvU64, NvU8 **, void **); +NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent (nvidia_stack_t *, NvU64, NvU64, void **, NvU64 *, NvU32 *, NvBool, void *, void *, void **, NvBool *); +NV_STATUS NV_API_CALL rm_p2p_register_callback (nvidia_stack_t *, NvU64, NvU64, NvU64, void *, void (*)(void *), void *); +NV_STATUS NV_API_CALL rm_p2p_put_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, void *); +NV_STATUS NV_API_CALL rm_p2p_put_pages_persistent(nvidia_stack_t *, void *, void *, void *); +NV_STATUS NV_API_CALL rm_p2p_dma_map_pages (nvidia_stack_t *, nv_dma_device_t *, NvU8 *, NvU64, NvU32, NvU64 *, void **); +NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, + NvHandle, void *, NvHandle, NvU64, NvU64, NvHandle *, void **, + NvBool *, NvU32 *, NvBool *, nv_memory_type_t *); +void NV_API_CALL rm_dma_buf_undup_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle); +NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle (nvidia_stack_t *, nv_state_t *, + NvHandle, NvHandle, MemoryRange, + NvU8, void *, NvBool, MemoryArea *); +void NV_API_CALL rm_dma_buf_unmap_mem_handle(nvidia_stack_t *, nv_state_t *, + NvHandle, NvHandle, NvU8, void *, + NvBool, MemoryArea); +NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(nvidia_stack_t *, + nv_state_t *, NvHandle, NvHandle, + NvU8, NvHandle *, NvHandle *, + NvHandle *, void **, NvBool *, NvBool *); +void NV_API_CALL rm_dma_buf_put_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, void *); + +void NV_API_CALL rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd); +NvBool NV_API_CALL rm_get_device_remove_flag(nvidia_stack_t *sp, NvU32 gpu_id); +NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *); +NV_STATUS NV_API_CALL rm_gpu_handle_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *); +NvBool NV_API_CALL rm_gpu_need_4k_page_isolation(nv_state_t *); +NvBool NV_API_CALL rm_is_chipset_io_coherent(nv_stack_t *); +NvBool NV_API_CALL rm_init_event_locks(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_destroy_event_locks(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_get_gpu_numa_info(nvidia_stack_t *, nv_state_t *, nv_ioctl_numa_info_t *); +NV_STATUS NV_API_CALL rm_gpu_numa_online(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_gpu_numa_offline(nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_is_device_sequestered(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_check_for_gpu_surprise_removal(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_set_external_kernel_client_count(nvidia_stack_t *, nv_state_t *, NvBool); +NV_STATUS NV_API_CALL rm_schedule_gpu_wakeup(nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_disable_iomap_wc(void); + +void NV_API_CALL rm_init_tegra_dynamic_power_management(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_init_dynamic_power_management(nvidia_stack_t *, nv_state_t *, NvBool); +void NV_API_CALL rm_cleanup_dynamic_power_management(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_enable_dynamic_power_management(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_ref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t); +void NV_API_CALL rm_unref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t); +NV_STATUS NV_API_CALL rm_transition_dynamic_power(nvidia_stack_t *, nv_state_t *, NvBool, NvBool *); +void NV_API_CALL rm_get_power_info(nvidia_stack_t *, nv_state_t *, nv_power_info_t *); + +void NV_API_CALL rm_acpi_notify(nvidia_stack_t *, nv_state_t *, NvU32); +void NV_API_CALL rm_acpi_nvpcf_notify(nvidia_stack_t *); + +NvBool NV_API_CALL rm_is_altstack_in_use(void); + +void NV_API_CALL rm_notify_gpu_addition(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_notify_gpu_removal(nvidia_stack_t *, nv_state_t *); + +/* vGPU VFIO specific functions */ +NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, + NvU32 *, NvU32 *, NvU32); +NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16); +NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *, NvBool, NvU8, NvBool); +NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8); +NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, + NvU64 *, NvU64 *, NvU32 *, NvBool *, NvU8 *); +NV_STATUS NV_API_CALL nv_vgpu_update_sysfs_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU32 *); +NV_STATUS NV_API_CALL nv_vgpu_get_hbm_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU64 *); +NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(nvidia_stack_t *, nv_state_t *, NvU8, NvU32, NvU8, NvU8, NvU8, NvBool, void *); +NV_STATUS NV_API_CALL nv_gpu_bind_event(nvidia_stack_t *, NvU32, NvBool *); +NV_STATUS NV_API_CALL nv_gpu_unbind_event(nvidia_stack_t *, NvU32, NvBool *); + +NV_STATUS NV_API_CALL nv_check_usermap_access_params(nv_state_t*, const nv_usermap_access_params_t*); +nv_soc_irq_type_t NV_API_CALL nv_get_current_irq_type(nv_state_t*); +void NV_API_CALL nv_flush_coherent_cpu_cache_range(nv_state_t *nv, NvU64 cpu_virtual, NvU64 size); + +#if defined(NV_VMWARE) +const void* NV_API_CALL rm_get_firmware(nv_firmware_type_t fw_type, const void **fw_buf, NvU32 *fw_size); +#endif + +/* Callbacks should occur roughly every 10ms. */ +#define NV_SNAPSHOT_TIMER_HZ 100 +void NV_API_CALL nv_start_snapshot_timer(void (*snapshot_callback)(void *context)); +void NV_API_CALL nv_flush_snapshot_timer(void); +void NV_API_CALL nv_stop_snapshot_timer(void); + +static inline const NvU8 *nv_get_cached_uuid(nv_state_t *nv) +{ + return nv->nv_uuid_cache.valid ? nv->nv_uuid_cache.uuid : NULL; +} + +/* nano second resolution timer callback structure */ +typedef struct nv_nano_timer nv_nano_timer_t; + +/* nano timer functions */ +void NV_API_CALL nv_create_nano_timer(nv_state_t *, void *pTmrEvent, nv_nano_timer_t **); +void NV_API_CALL nv_start_nano_timer(nv_state_t *nv, nv_nano_timer_t *, NvU64 timens); +NV_STATUS NV_API_CALL rm_run_nano_timer_callback(nvidia_stack_t *, nv_state_t *, void *pTmrEvent); +void NV_API_CALL nv_cancel_nano_timer(nv_state_t *, nv_nano_timer_t *); +void NV_API_CALL nv_destroy_nano_timer(nv_state_t *nv, nv_nano_timer_t *); + +// Host1x specific functions. +NV_STATUS NV_API_CALL nv_get_syncpoint_aperture(NvU32, NvU64 *, NvU64 *, NvU32 *); + +#if defined(NVCPU_X86_64) + +static inline NvU64 nv_rdtsc(void) +{ + NvU64 val; + __asm__ __volatile__ ("rdtsc \t\n" + "shlq $0x20,%%rdx \t\n" + "orq %%rdx,%%rax \t\n" + : "=A" (val)); + return val; +} + +#endif + +#endif /* NVRM */ + +static inline int nv_count_bits(NvU64 word) +{ + NvU64 bits; + + bits = (word & 0x5555555555555555ULL) + ((word >> 1) & 0x5555555555555555ULL); + bits = (bits & 0x3333333333333333ULL) + ((bits >> 2) & 0x3333333333333333ULL); + bits = (bits & 0x0f0f0f0f0f0f0f0fULL) + ((bits >> 4) & 0x0f0f0f0f0f0f0f0fULL); + bits = (bits & 0x00ff00ff00ff00ffULL) + ((bits >> 8) & 0x00ff00ff00ff00ffULL); + bits = (bits & 0x0000ffff0000ffffULL) + ((bits >> 16) & 0x0000ffff0000ffffULL); + bits = (bits & 0x00000000ffffffffULL) + ((bits >> 32) & 0x00000000ffffffffULL); + + return (int)(bits); +} + +#endif diff --git a/kernel-open/common/inc/nvCpuUuid.h b/kernel-open/common/inc/nvCpuUuid.h new file mode 100644 index 0000000..0ab546b --- /dev/null +++ b/kernel-open/common/inc/nvCpuUuid.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_CPU_UUID_H_ +#define _NV_CPU_UUID_H_ + +#define NV_UUID_LEN 16 + +typedef struct nv_uuid +{ + NvU8 uuid[NV_UUID_LEN]; + +} NvUuid; + +#define NV_UUID_HI(pUuid) (*((NvU64*)((pUuid)->uuid + (NV_UUID_LEN >> 1)))) +#define NV_UUID_LO(pUuid) (*((NvU64*)((pUuid)->uuid + 0))) + +typedef NvUuid NvSystemUuid; + +typedef NvUuid NvProcessorUuid; + +extern const NvProcessorUuid NV_PROCESSOR_UUID_CPU_DEFAULT; + +#endif // _NV_CPU_UUID_H_ diff --git a/kernel-open/common/inc/nv_common_utils.h b/kernel-open/common/inc/nv_common_utils.h new file mode 100644 index 0000000..6b10e76 --- /dev/null +++ b/kernel-open/common/inc/nv_common_utils.h @@ -0,0 +1,120 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_COMMON_UTILS_H__ +#define __NV_COMMON_UTILS_H__ + +#include "nvtypes.h" +#include "nvmisc.h" + +#if !defined(TRUE) +#define TRUE NV_TRUE +#endif + +#if !defined(FALSE) +#define FALSE NV_FALSE +#endif + +#define NV_IS_UNSIGNED(x) ((__typeof__(x))-1 > 0) + +/* Get the length of a statically-sized array. */ +#define ARRAY_LEN(_arr) (sizeof(_arr) / sizeof(_arr[0])) + +#define NV_INVALID_HEAD 0xFFFFFFFF + +#define NV_INVALID_CONNECTOR_PHYSICAL_INFORMATION (~0) + +#if !defined(NV_MIN) +# define NV_MIN(a,b) (((a)<(b))?(a):(b)) +#endif + +#define NV_MIN3(a,b,c) NV_MIN(NV_MIN(a, b), c) +#define NV_MIN4(a,b,c,d) NV_MIN3(NV_MIN(a,b),c,d) + +#if !defined(NV_MAX) +# define NV_MAX(a,b) (((a)>(b))?(a):(b)) +#endif + +#define NV_MAX3(a,b,c) NV_MAX(NV_MAX(a, b), c) +#define NV_MAX4(a,b,c,d) NV_MAX3(NV_MAX(a,b),c,d) + +static inline int NV_LIMIT_VAL_TO_MIN_MAX(int val, int min, int max) +{ + if (val < min) { + return min; + } + if (val > max) { + return max; + } + return val; +} + +#define NV_ROUNDUP_DIV(x,y) ((x) / (y) + (((x) % (y)) ? 1 : 0)) + +/* + * Macros used for computing palette entries: + * + * NV_UNDER_REPLICATE(val, source_size, result_size) expands a value + * of source_size bits into a value of target_size bits by shifting + * the source value into the high bits and replicating the high bits + * of the value into the low bits of the result. + * + * PALETTE_DEPTH_SHIFT(val, w) maps a colormap entry for a component + * that has w bits to an appropriate entry in a LUT of 256 entries. + */ +static inline unsigned int NV_UNDER_REPLICATE(unsigned short val, + int source_size, + int result_size) +{ + return (val << (result_size - source_size)) | + (val >> ((source_size << 1) - result_size)); +} + + +static inline unsigned short PALETTE_DEPTH_SHIFT(unsigned short val, int depth) +{ + return NV_UNDER_REPLICATE(val, depth, 8); +} + +/* + * Use __builtin_ffs where it is supported, or provide an equivalent + * implementation for platforms like riscv where it is not. + */ +#if defined(__GNUC__) && !NVCPU_IS_RISCV64 +static inline int nv_ffs(int x) +{ + return __builtin_ffs(x); +} +#else +static inline int nv_ffs(int x) +{ + if (x == 0) + return 0; + + LOWESTBITIDX_32(x); + + return 1 + x; +} +#endif + +#endif /* __NV_COMMON_UTILS_H__ */ diff --git a/kernel-open/common/inc/nv_dpy_id.h b/kernel-open/common/inc/nv_dpy_id.h new file mode 100644 index 0000000..fe742a5 --- /dev/null +++ b/kernel-open/common/inc/nv_dpy_id.h @@ -0,0 +1,370 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * This header file defines the types NVDpyId and NVDpyIdList, as well + * as inline functions to manipulate these types. NVDpyId and + * NVDpyIdList should be treated as opaque by includers of this header + * file. + */ + +#ifndef __NV_DPY_ID_H__ +#define __NV_DPY_ID_H__ + +#include "nvtypes.h" +#include "nvmisc.h" +#include "nv_common_utils.h" +#include /* NV_MAX_SUBDEVICES */ + +typedef struct { + NvU32 opaqueDpyId; +} NVDpyId; + +typedef struct { + NvU32 opaqueDpyIdList; +} NVDpyIdList; + +#define NV_DPY_ID_MAX_SUBDEVICES NV_MAX_SUBDEVICES +#define NV_DPY_ID_MAX_DPYS_IN_LIST 32 + +/* + * For use in combination with nvDpyIdToPrintFormat(); e.g., + * + * printf("dpy id: " NV_DPY_ID_PRINT_FORMAT "\n", + * nvDpyIdToPrintFormat(dpyId)); + * + * The includer should not make assumptions about the return type of + * nvDpyIdToPrintFormat(). + */ +#define NV_DPY_ID_PRINT_FORMAT "0x%08x" + +/* functions to return an invalid DpyId and empty DpyIdList */ + +static inline NVDpyId nvInvalidDpyId(void) +{ + NVDpyId dpyId = { 0 }; + return dpyId; +} + +static inline NVDpyIdList nvEmptyDpyIdList(void) +{ + NVDpyIdList dpyIdList = { 0 }; + return dpyIdList; +} + +static inline NVDpyIdList nvAllDpyIdList(void) +{ + NVDpyIdList dpyIdList = { ~0U }; + return dpyIdList; +} + +static inline void +nvEmptyDpyIdListSubDeviceArray(NVDpyIdList dpyIdList[NV_DPY_ID_MAX_SUBDEVICES]) +{ + int dispIndex; + for (dispIndex = 0; dispIndex < NV_DPY_ID_MAX_SUBDEVICES; dispIndex++) { + dpyIdList[dispIndex] = nvEmptyDpyIdList(); + } +} + +/* set operations on DpyIds and DpyIdLists: Add, Subtract, Intersect, Xor */ + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvAddDpyIdToDpyIdList(NVDpyId dpyId, NVDpyIdList dpyIdList) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList | + dpyId.opaqueDpyId; + return tmpDpyIdList; +} + +/* Passing an invalid display ID makes this function return an empty list. */ +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvAddDpyIdToEmptyDpyIdList(NVDpyId dpyId) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyId.opaqueDpyId; + return tmpDpyIdList; +} + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvAddDpyIdListToDpyIdList(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdListB.opaqueDpyIdList | + dpyIdListA.opaqueDpyIdList; + return tmpDpyIdList; +} + +/* Returns: dpyIdList - dpyId */ +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvDpyIdListMinusDpyId(NVDpyIdList dpyIdList, NVDpyId dpyId) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList & + (~dpyId.opaqueDpyId); + return tmpDpyIdList; +} + +/* Returns: dpyIdListA - dpyIdListB */ +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvDpyIdListMinusDpyIdList(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdListA.opaqueDpyIdList & + (~dpyIdListB.opaqueDpyIdList); + return tmpDpyIdList; +} + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvIntersectDpyIdAndDpyIdList(NVDpyId dpyId, NVDpyIdList dpyIdList) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList & + dpyId.opaqueDpyId; + return tmpDpyIdList; +} + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvIntersectDpyIdListAndDpyIdList(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdListA.opaqueDpyIdList & + dpyIdListB.opaqueDpyIdList; + return tmpDpyIdList; +} + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvXorDpyIdAndDpyIdList(NVDpyId dpyId, NVDpyIdList dpyIdList) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList ^ + dpyId.opaqueDpyId; + return tmpDpyIdList; +} + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvXorDpyIdListAndDpyIdList(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdListA.opaqueDpyIdList ^ + dpyIdListB.opaqueDpyIdList; + return tmpDpyIdList; +} + + +/* boolean checks */ + +static inline NvBool nvDpyIdIsInDpyIdList(NVDpyId dpyId, + NVDpyIdList dpyIdList) +{ + return !!(dpyIdList.opaqueDpyIdList & dpyId.opaqueDpyId); +} + +static inline NvBool nvDpyIdIsInvalid(NVDpyId dpyId) +{ + return (dpyId.opaqueDpyId == 0); +} + +static inline NvBool nvDpyIdListIsEmpty(NVDpyIdList dpyIdList) +{ + return (dpyIdList.opaqueDpyIdList == 0); +} + +static inline NvBool +nvDpyIdListSubDeviceArrayIsEmpty(NVDpyIdList + dpyIdList[NV_DPY_ID_MAX_SUBDEVICES]) +{ + int dispIndex; + for (dispIndex = 0; dispIndex < NV_DPY_ID_MAX_SUBDEVICES; dispIndex++) { + if (!nvDpyIdListIsEmpty(dpyIdList[dispIndex])) { + return NV_FALSE; + } + } + return NV_TRUE; +} + + +static inline NvBool nvDpyIdsAreEqual(NVDpyId dpyIdA, NVDpyId dpyIdB) +{ + return (dpyIdA.opaqueDpyId == dpyIdB.opaqueDpyId); +} + +static inline NvBool nvDpyIdListsAreEqual(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + return (dpyIdListA.opaqueDpyIdList == dpyIdListB.opaqueDpyIdList); +} + +static inline NvBool nvDpyIdListIsASubSetofDpyIdList(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + NVDpyIdList intersectedDpyIdList = + nvIntersectDpyIdListAndDpyIdList(dpyIdListA, dpyIdListB); + + return nvDpyIdListsAreEqual(intersectedDpyIdList, dpyIdListA); +} + + +/* + * retrieve the individual dpyIds from dpyIdList; if dpyId is invalid, + * start at the beginning of the list; otherwise, start at the dpyId + * after the specified dpyId + */ + +static inline __attribute__ ((warn_unused_result)) +NVDpyId nvNextDpyIdInDpyIdListUnsorted(NVDpyId dpyId, NVDpyIdList dpyIdList) +{ + if (nvDpyIdIsInvalid(dpyId)) { + dpyId.opaqueDpyId = 1; + } else { + dpyId.opaqueDpyId <<= 1; + } + + while (dpyId.opaqueDpyId) { + + if (nvDpyIdIsInDpyIdList(dpyId, dpyIdList)) { + return dpyId; + } + + dpyId.opaqueDpyId <<= 1; + } + + /* no dpyIds left in dpyIdlist; return the invalid dpyId */ + + return nvInvalidDpyId(); +} + +#define FOR_ALL_DPY_IDS(_dpyId, _dpyIdList) \ + for ((_dpyId) = nvNextDpyIdInDpyIdListUnsorted(nvInvalidDpyId(), \ + (_dpyIdList)); \ + !nvDpyIdIsInvalid(_dpyId); \ + (_dpyId) = nvNextDpyIdInDpyIdListUnsorted((_dpyId), \ + (_dpyIdList))) + +/* report how many dpyIds are in the dpyIdList */ + +static inline int nvCountDpyIdsInDpyIdList(NVDpyIdList dpyIdList) +{ + return nvPopCount32(dpyIdList.opaqueDpyIdList); +} + +static inline int +nvCountDpyIdsInDpyIdListSubDeviceArray(NVDpyIdList + dpyIdList[NV_DPY_ID_MAX_SUBDEVICES]) +{ + int dispIndex, n = 0; + + for (dispIndex = 0; dispIndex < NV_DPY_ID_MAX_SUBDEVICES; dispIndex++) { + n += nvCountDpyIdsInDpyIdList(dpyIdList[dispIndex]); + } + + return n; +} + +/* convert between dpyId/dpyIdList and NV-CONTROL values */ + +static inline int nvDpyIdToNvControlVal(NVDpyId dpyId) +{ + return (int) dpyId.opaqueDpyId; +} + +static inline int nvDpyIdListToNvControlVal(NVDpyIdList dpyIdList) +{ + return (int) dpyIdList.opaqueDpyIdList; +} + +static inline NVDpyId nvNvControlValToDpyId(int val) +{ + NVDpyId dpyId; + dpyId.opaqueDpyId = (val == 0) ? 0 : 1 << (nv_ffs(val)-1); + return dpyId; +} + +static inline NVDpyIdList nvNvControlValToDpyIdList(int val) +{ + NVDpyIdList dpyIdList; + dpyIdList.opaqueDpyIdList = val; + return dpyIdList; +} + + +/* convert between dpyId and NvU32 */ + +static inline NVDpyId nvNvU32ToDpyId(NvU32 val) +{ + NVDpyId dpyId; + dpyId.opaqueDpyId = (val == 0) ? 0 : 1 << (nv_ffs(val)-1); + return dpyId; +} + +static inline NVDpyIdList nvNvU32ToDpyIdList(NvU32 val) +{ + NVDpyIdList dpyIdList; + dpyIdList.opaqueDpyIdList = val; + return dpyIdList; +} + +static inline NvU32 nvDpyIdToNvU32(NVDpyId dpyId) +{ + return dpyId.opaqueDpyId; +} + +static inline NvU32 nvDpyIdListToNvU32(NVDpyIdList dpyIdList) +{ + return dpyIdList.opaqueDpyIdList; +} + +/* Return the bit position of dpyId: a number in the range [0..31]. */ +static inline NvU32 nvDpyIdToIndex(NVDpyId dpyId) +{ + return nv_ffs(dpyId.opaqueDpyId) - 1; +} + +/* Return a display ID that is not in the list passed in. */ + +static inline NVDpyId nvNewDpyId(NVDpyIdList excludeList) +{ + NVDpyId dpyId; + if (~excludeList.opaqueDpyIdList == 0) { + return nvInvalidDpyId(); + } + dpyId.opaqueDpyId = + 1U << (nv_ffs(~excludeList.opaqueDpyIdList) - 1); + return dpyId; +} + +/* See comment for NV_DPY_ID_PRINT_FORMAT. */ +static inline NvU32 nvDpyIdToPrintFormat(NVDpyId dpyId) +{ + return nvDpyIdToNvU32(dpyId); +} + +/* Prevent usage of opaque values. */ +#define opaqueDpyId __ERROR_ACCESS_ME_VIA_NV_DPY_ID_H +#define opaqueDpyIdList __ERROR_ACCESS_ME_VIA_NV_DPY_ID_H + +#endif /* __NV_DPY_ID_H__ */ diff --git a/kernel-open/common/inc/nv_mig_types.h b/kernel-open/common/inc/nv_mig_types.h new file mode 100644 index 0000000..ae3ea3c --- /dev/null +++ b/kernel-open/common/inc/nv_mig_types.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __NV_MIG_TYPES_H__ +#define __NV_MIG_TYPES_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +typedef NvU32 MIGDeviceId; + +#define NO_MIG_DEVICE 0L + +/* Convert a MIGDeviceId into a 0-based per-GPU subdevice index. */ +#define MIG_DEVICE_ID_SUBDEV_MASK 0xf0000000 +#define MIG_DEVICE_ID_SUBDEV_SHIFT 28 + +#define MIG_DEVICE_ID_TO_SUBDEV(migDeviceId) (((migDeviceId) & MIG_DEVICE_ID_SUBDEV_MASK) >> MIG_DEVICE_ID_SUBDEV_SHIFT) + +#ifdef __cplusplus +} +#endif + +#endif /* __NV_MIG_TYPES_H__ */ diff --git a/kernel-open/common/inc/nv_speculation_barrier.h b/kernel-open/common/inc/nv_speculation_barrier.h new file mode 100644 index 0000000..b78fbf0 --- /dev/null +++ b/kernel-open/common/inc/nv_speculation_barrier.h @@ -0,0 +1,227 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * NVIDIA GPZ vulnerability mitigation definitions. + */ + +/* + * There are two copies of this file for legacy reasons: + * + * P4: <$NV_SOURCE/>drivers/common/inc/nv_speculation_barrier.h + * Git: include/nv_speculation_barrier.h + * + * Both files need to be kept in sync if any changes are required. + */ + +#ifndef _NV_SPECULATION_BARRIER_H_ +#define _NV_SPECULATION_BARRIER_H_ + +#define NV_SPECULATION_BARRIER_VERSION 2 + +/* + * GNU-C/MSC/clang - x86/x86_64 : x86_64, __i386, __i386__ + * GNU-C - THUMB mode : __GNUC__, __thumb__ + * GNU-C - ARM modes : __GNUC__, __arm__, __aarch64__ + * armclang - THUMB mode : __ARMCC_VERSION, __thumb__ + * armclang - ARM modes : __ARMCC_VERSION, __arm__, __aarch64__ + * GHS - THUMB mode : __ghs__, __THUMB__ + * GHS - ARM modes : __ghs__, __ARM__, __ARM64__ + */ + +#if defined(_M_IX86) || defined(__i386__) || defined(__i386) \ + || defined(__x86_64) || defined(AMD64) || defined(_M_AMD64) + /* All x86 */ + #define NV_SPECULATION_BARRIER_x86 + +#elif defined(macintosh) || defined(__APPLE__) \ + || defined(__powerpc) || defined(__powerpc__) || defined(__powerpc64__) \ + || defined(__POWERPC__) || defined(__ppc) || defined(__ppc__) \ + || defined(__ppc64__) || defined(__PPC__) \ + || defined(__PPC64__) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) + /* All PowerPC */ + #define NV_SPECULATION_BARRIER_PPC + +#elif (defined(__GNUC__) && defined(__thumb__)) \ + || (defined(__ARMCC_VERSION) && defined(__thumb__)) \ + || (defined(__ghs__) && defined(__THUMB__)) + /* ARM-thumb mode(<=ARMv7)/T32 (ARMv8) */ + #define NV_SPECULATION_BARRIER_ARM_COMMON + #define NV_SPEC_BARRIER_CSDB ".inst.w 0xf3af8014\n" + +#elif (defined(__GNUC__) && defined(__arm__)) \ + || (defined(__ARMCC_VERSION) && defined(__arm__)) \ + || (defined(__ghs__) && defined(__ARM__)) + /* aarch32(ARMv8) / arm(<=ARMv7) mode */ + #define NV_SPECULATION_BARRIER_ARM_COMMON + #define NV_SPEC_BARRIER_CSDB ".inst 0xe320f014\n" + +#elif (defined(__GNUC__) && defined(__aarch64__)) \ + || (defined(__ARMCC_VERSION) && defined(__aarch64__)) \ + || (defined(__ghs__) && defined(__ARM64__)) + /* aarch64(ARMv8) mode */ + #define NV_SPECULATION_BARRIER_ARM_COMMON + #define NV_SPEC_BARRIER_CSDB "HINT #20\n" +#elif (defined(_MSC_VER) && ( defined(_M_ARM64) || defined(_M_ARM)) ) + /* Not currently implemented for MSVC/ARM64. See bug 3366890. */ +# define nv_speculation_barrier() +# define speculation_barrier() nv_speculation_barrier() +#elif defined(NVCPU_IS_RISCV64) +# define nv_speculation_barrier() +#else + #error "Unknown compiler/chip family" +#endif + +/* + * nv_speculation_barrier -- General-purpose speculation barrier + * + * This approach provides full protection against variant-1 vulnerability. + * However, the recommended approach is detailed below (See: + * nv_array_index_no_speculate) + * + * Semantics: + * Any memory read that is sequenced after a nv_speculation_barrier(), + * and contained directly within the scope of nv_speculation_barrier() or + * directly within a nested scope, will not speculatively execute until all + * conditions for entering that scope have been architecturally resolved. + * + * Example: + * if (untrusted_index_from_user < bound) { + * ... + * nv_speculation_barrier(); + * ... + * x = array1[untrusted_index_from_user]; + * bit = x & 1; + * y = array2[0x100 * bit]; + * } + */ + +#if defined(NV_SPECULATION_BARRIER_x86) +// Delete after all references are changed to nv_speculation_barrier +#define speculation_barrier() nv_speculation_barrier() + +static inline void nv_speculation_barrier(void) +{ + +#if defined(_MSC_VER) && !defined(__clang__) + _mm_lfence(); +#endif + +#if defined(__GNUC__) || defined(__clang__) + __asm__ __volatile__ ("lfence" : : : "memory"); +#endif + +} + +#elif defined(NV_SPECULATION_BARRIER_PPC) + +static inline void nv_speculation_barrier(void) +{ + asm volatile("ori 31,31,0"); +} + +#elif defined(NV_SPECULATION_BARRIER_ARM_COMMON) + +/* Note: Cortex-A9 GNU-assembler seems to complain about DSB SY */ + #define nv_speculation_barrier() \ + asm volatile \ + ( \ + "DSB sy\n" \ + "ISB\n" \ + : : : "memory" \ + ) +#endif + +/* + * nv_array_index_no_speculate -- Recommended variant-1 mitigation approach + * + * The array-index-no-speculate approach "de-speculates" an array index that + * has already been bounds-checked. + * + * This approach is preferred over nv_speculation_barrier due to the following + * reasons: + * - It is just as effective as the general-purpose speculation barrier. + * - It clearly identifies what array index is being de-speculated and is thus + * self-commenting, whereas the general-purpose speculation barrier requires + * an explanation of what array index is being de-speculated. + * - It performs substantially better than the general-purpose speculation + * barrier on ARM Cortex-A cores (the difference is expected to be tens of + * cycles per invocation). Within tight loops, this difference may become + * noticeable. + * + * Semantics: + * Provided count is non-zero and the caller has already validated or otherwise + * established that index < count, any speculative use of the return value will + * use a speculative value that is less than count. + * + * Example: + * if (untrusted_index_from_user < bound) { + * untrusted_index_from_user = nv_array_index_no_speculate( + * untrusted_index_from_user, bound); + * ... + * x = array1[untrusted_index_from_user]; + * ... + * } + * + * The use of nv_array_index_no_speculate() in the above example ensures that + * subsequent uses of untrusted_index_from_user will not execute speculatively + * (they will wait for the bounds check to complete). + */ + +static inline unsigned long nv_array_index_no_speculate(unsigned long index, + unsigned long count) +{ +#if defined(NV_SPECULATION_BARRIER_x86) && (defined(__GNUC__) || defined(__clang__)) + unsigned long mask; + + __asm__ __volatile__ + ( + "CMP %2, %1 \n" + "SBB %0, %0 \n" + : "=r"(mask) : "r"(index), "r"(count) : "cc" + ); + + return (index & mask); + +#elif defined(NV_SPECULATION_BARRIER_ARM_COMMON) + unsigned long mask; + + asm volatile + ( + "CMP %[ind], %[cnt] \n" + "SBC %[res], %[cnt], %[cnt] \n" + NV_SPEC_BARRIER_CSDB + : [res] "=r" (mask) : [ind] "r" (index), [cnt] "r" (count): "cc" + ); + + return (index & mask); + +/* Fallback to generic speculation barrier for unsupported platforms */ +#else + nv_speculation_barrier(); + + return index; +#endif +} + +#endif //_NV_SPECULATION_BARRIER_H_ diff --git a/kernel-open/common/inc/nv_stdarg.h b/kernel-open/common/inc/nv_stdarg.h new file mode 100644 index 0000000..b23f7f7 --- /dev/null +++ b/kernel-open/common/inc/nv_stdarg.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _NV_STDARG_H_ +#define _NV_STDARG_H_ + +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) + #include "conftest.h" + #if defined(NV_LINUX_STDARG_H_PRESENT) + #include + #else + #include + #endif +#else + #include +#endif + +#endif // _NV_STDARG_H_ diff --git a/kernel-open/common/inc/nv_uvm_interface.h b/kernel-open/common/inc/nv_uvm_interface.h new file mode 100644 index 0000000..9ddeedf --- /dev/null +++ b/kernel-open/common/inc/nv_uvm_interface.h @@ -0,0 +1,1869 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +// This file provides the interface that RM exposes to UVM. +// + +#ifndef _NV_UVM_INTERFACE_H_ +#define _NV_UVM_INTERFACE_H_ + +// Forward references, to break circular header file dependencies: +struct UvmEventsLinux; + +#if defined(NVIDIA_UVM_ENABLED) + +// We are in the UVM build system, for a Linux target. +#include "uvm_linux.h" + +#else + +// We are in the RM build system, for a Linux target: +#include "nv-linux.h" + +#endif // NVIDIA_UVM_ENABLED + +#include "nvgputypes.h" +#include "nvstatus.h" +#include "nv_uvm_types.h" +#include "nv_uvm_user_types.h" + + +// Define the type here as it's Linux specific, used only by the Linux specific +// nvUvmInterfaceRegisterGpu() API. +typedef struct +{ + struct pci_dev *pci_dev; + + // DMA addressable range of the device, mirrors fields in nv_state_t. + NvU64 dma_addressable_start; + NvU64 dma_addressable_limit; +} UvmGpuPlatformInfo; + +/******************************************************************************* + nvUvmInterfaceRegisterGpu + + Registers the GPU with the provided physical UUID for use. A GPU must be + registered before its UUID can be used with any other API. This call is + ref-counted so every nvUvmInterfaceRegisterGpu must be paired with a + corresponding nvUvmInterfaceUnregisterGpu. + + You don't need to call nvUvmInterfaceSessionCreate before calling this. + + Error codes: + NV_ERR_GPU_UUID_NOT_FOUND + NV_ERR_NO_MEMORY + NV_ERR_GENERIC +*/ +NV_STATUS nvUvmInterfaceRegisterGpu(const NvProcessorUuid *gpuUuid, UvmGpuPlatformInfo *gpuInfo); + +/******************************************************************************* + nvUvmInterfaceUnregisterGpu + + Unregisters the GPU with the provided physical UUID. This drops the ref + count from nvUvmInterfaceRegisterGpu. Once the reference count goes to 0 + the device may no longer be accessible until the next + nvUvmInterfaceRegisterGpu call. No automatic resource freeing is performed, + so only make the last unregister call after destroying all your allocations + associated with that UUID (such as those from + nvUvmInterfaceAddressSpaceCreate). + + If the UUID is not found, no operation is performed. +*/ +void nvUvmInterfaceUnregisterGpu(const NvProcessorUuid *gpuUuid); + +/******************************************************************************* + nvUvmInterfaceSessionCreate + + TODO: Creates session object. All allocations are tied to the session. + + The platformInfo parameter is filled by the callee with miscellaneous system + information. Refer to the UvmPlatformInfo struct for details. + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY +*/ +NV_STATUS nvUvmInterfaceSessionCreate(uvmGpuSessionHandle *session, + UvmPlatformInfo *platformInfo); + +/******************************************************************************* + nvUvmInterfaceSessionDestroy + + Destroys a session object. All allocations are tied to the session will + be destroyed. + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY +*/ +NV_STATUS nvUvmInterfaceSessionDestroy(uvmGpuSessionHandle session); + +/******************************************************************************* + nvUvmInterfaceDeviceCreate + + Creates a device object under the given session for the GPU with the given + physical UUID. Also creates a partition object for the device iff + bCreateSmcPartition is true and pGpuInfo->smcEnabled is true. + pGpuInfo->smcUserClientInfo will be used to determine the SMC partition in + this case. A device handle is returned in the device output parameter. + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY + NV_ERR_INVALID_ARGUMENT + NV_ERR_INSUFFICIENT_RESOURCES + NV_ERR_OBJECT_NOT_FOUND +*/ +NV_STATUS nvUvmInterfaceDeviceCreate(uvmGpuSessionHandle session, + const UvmGpuInfo *pGpuInfo, + const NvProcessorUuid *gpuUuid, + uvmGpuDeviceHandle *device, + NvBool bCreateSmcPartition); + +/******************************************************************************* + nvUvmInterfaceDeviceDestroy + + Destroys the device object for the given handle. The handle must have been + obtained in a prior call to nvUvmInterfaceDeviceCreate. +*/ +void nvUvmInterfaceDeviceDestroy(uvmGpuDeviceHandle device); + +/******************************************************************************* + nvUvmInterfaceAddressSpaceCreate + + This function creates an address space. + This virtual address space is created on the GPU specified + by device. + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY +*/ +NV_STATUS nvUvmInterfaceAddressSpaceCreate(uvmGpuDeviceHandle device, + unsigned long long vaBase, + unsigned long long vaSize, + NvBool enableAts, + uvmGpuAddressSpaceHandle *vaSpace, + UvmGpuAddressSpaceInfo *vaSpaceInfo); + +/******************************************************************************* + nvUvmInterfaceDupAddressSpace + + This function will dup the given vaspace from the users client to the + kernel client was created as an ops session. + + By duping the vaspace it is guaranteed that RM will refcount the vaspace object. + + Error codes: + NV_ERR_GENERIC +*/ +NV_STATUS nvUvmInterfaceDupAddressSpace(uvmGpuDeviceHandle device, + NvHandle hUserClient, + NvHandle hUserVASpace, + uvmGpuAddressSpaceHandle *vaSpace, + UvmGpuAddressSpaceInfo *vaSpaceInfo); + +/******************************************************************************* + nvUvmInterfaceAddressSpaceDestroy + + Destroys an address space that was previously created via + nvUvmInterfaceAddressSpaceCreate. +*/ + +void nvUvmInterfaceAddressSpaceDestroy(uvmGpuAddressSpaceHandle vaSpace); + +/******************************************************************************* + nvUvmInterfaceMemoryAllocFB + + This function will allocate video memory and provide a mapped Gpu + virtual address to this allocation. It also returns the Gpu physical + offset if contiguous allocations are requested. + + This function will allocate a minimum page size if the length provided is 0 + and will return a unique GPU virtual address. + + The default page size will be the small page size (as returned by query + caps). The physical alignment will also be enforced to small page + size(64K/128K). + + Arguments: + vaSpace[IN] - Pointer to vaSpace object + length [IN] - Length of the allocation + gpuPointer[OUT] - GPU VA mapping + allocInfo[IN/OUT] - Pointer to allocation info structure which + contains below given fields + + allocInfo Members: + gpuPhysOffset[OUT] - Physical offset of allocation returned only + if contiguous allocation is requested. + pageSize[IN] - Override the default page size (see above). + alignment[IN] - gpuPointer GPU VA alignment. 0 means 4KB + alignment. + bContiguousPhysAlloc[IN] - Flag to request contiguous allocation. Default + will follow the vidHeapControl default policy. + bMemGrowsDown[IN] + bPersistentVidmem[IN] - Allocate persistent vidmem. + hPhysHandle[IN/OUT] - The handle will be used in allocation if provided. + If not provided; allocator will return the handle + it used eventually. + Error codes: + NV_ERR_INVALID_ARGUMENT + NV_ERR_NO_MEMORY - Not enough physical memory to service + allocation request with provided constraints + NV_ERR_INSUFFICIENT_RESOURCES - Not enough available resources to satisfy allocation request + NV_ERR_INVALID_OWNER - Target memory not accessible by specified owner + NV_ERR_NOT_SUPPORTED - Operation not supported on broken FB + +*/ +NV_STATUS nvUvmInterfaceMemoryAllocFB(uvmGpuAddressSpaceHandle vaSpace, + NvLength length, + UvmGpuPointer * gpuPointer, + UvmGpuAllocInfo * allocInfo); + +/******************************************************************************* + nvUvmInterfaceMemoryAllocSys + + This function will allocate system memory and provide a mapped Gpu + virtual address to this allocation. + + This function will allocate a minimum page size if the length provided is 0 + and will return a unique GPU virtual address. + + The default page size will be the small page size (as returned by query caps) + + Arguments: + vaSpace[IN] - Pointer to vaSpace object + length [IN] - Length of the allocation + gpuPointer[OUT] - GPU VA mapping + allocInfo[IN/OUT] - Pointer to allocation info structure which + contains below given fields + + allocInfo Members: + gpuPhysOffset[OUT] - Physical offset of allocation returned only + if contiguous allocation is requested. + pageSize[IN] - Override the default page size (see above). + alignment[IN] - gpuPointer GPU VA alignment. 0 means 4KB + alignment. + bContiguousPhysAlloc[IN] - Flag to request contiguous allocation. Default + will follow the vidHeapControl default policy. + bMemGrowsDown[IN] + bPersistentVidmem[IN] - Allocate persistent vidmem. + hPhysHandle[IN/OUT] - The handle will be used in allocation if provided. + If not provided; allocator will return the handle + it used eventually. + Error codes: + NV_ERR_INVALID_ARGUMENT + NV_ERR_NO_MEMORY - Not enough physical memory to service + allocation request with provided constraints + NV_ERR_INSUFFICIENT_RESOURCES - Not enough available resources to satisfy allocation request + NV_ERR_INVALID_OWNER - Target memory not accessible by specified owner + NV_ERR_NOT_SUPPORTED - Operation not supported +*/ +NV_STATUS nvUvmInterfaceMemoryAllocSys(uvmGpuAddressSpaceHandle vaSpace, + NvLength length, + UvmGpuPointer * gpuPointer, + UvmGpuAllocInfo * allocInfo); + +/******************************************************************************* + nvUvmInterfaceGetP2PCaps + + Obtain the P2P capabilities between two devices. + + Arguments: + device1[IN] - Device handle of the first GPU (required) + device2[IN] - Device handle of the second GPU (required) + p2pCapsParams [OUT] - P2P capabilities between the two GPUs + + Error codes: + NV_ERR_INVALID_ARGUMENT + NV_ERR_GENERIC: + Unexpected error. We try hard to avoid returning this error + code,because it is not very informative. + +*/ +NV_STATUS nvUvmInterfaceGetP2PCaps(uvmGpuDeviceHandle device1, + uvmGpuDeviceHandle device2, + UvmGpuP2PCapsParams * p2pCapsParams); + +/******************************************************************************* + nvUvmInterfaceGetPmaObject + + This function will return pointer to PMA object for the given GPU. This + PMA object handle is required for page allocation. + + Arguments: + device [IN] - Device handle allocated in + nvUvmInterfaceDeviceCreate + pPma [OUT] - Pointer to PMA object + pPmaPubStats [OUT] - Pointer to UvmPmaStatistics object + + Error codes: + NV_ERR_NOT_SUPPORTED - Operation not supported on broken FB + NV_ERR_GENERIC: + Unexpected error. We try hard to avoid returning this error + code,because it is not very informative. +*/ +NV_STATUS nvUvmInterfaceGetPmaObject(uvmGpuDeviceHandle device, + void **pPma, + const UvmPmaStatistics **pPmaPubStats); + +// Mirrors pmaEvictPagesCb_t, see its documentation in pma.h. +typedef NV_STATUS (*uvmPmaEvictPagesCallback)(void *callbackData, + NvU64 pageSize, + NvU64 *pPages, + NvU32 count, + NvU64 physBegin, + NvU64 physEnd, + UVM_PMA_GPU_MEMORY_TYPE mem_type); + +// Mirrors pmaEvictRangeCb_t, see its documentation in pma.h. +typedef NV_STATUS (*uvmPmaEvictRangeCallback)(void *callbackData, + NvU64 physBegin, + NvU64 physEnd, + UVM_PMA_GPU_MEMORY_TYPE mem_type); + +/******************************************************************************* + nvUvmInterfacePmaRegisterEvictionCallbacks + + Simple wrapper for pmaRegisterEvictionCb(), see its documentation in pma.h. +*/ +NV_STATUS nvUvmInterfacePmaRegisterEvictionCallbacks(void *pPma, + uvmPmaEvictPagesCallback evictPages, + uvmPmaEvictRangeCallback evictRange, + void *callbackData); + +/****************************************************************************** + nvUvmInterfacePmaUnregisterEvictionCallbacks + + Simple wrapper for pmaUnregisterEvictionCb(), see its documentation in pma.h. +*/ +void nvUvmInterfacePmaUnregisterEvictionCallbacks(void *pPma); + +/******************************************************************************* + nvUvmInterfacePmaAllocPages + + @brief Synchronous API for allocating pages from the PMA. + PMA will decide which pma regions to allocate from based on the provided + flags. PMA will also initiate UVM evictions to make room for this + allocation unless prohibited by PMA_FLAGS_DONT_EVICT. UVM callers must pass + this flag to avoid deadlock. Only UVM may allocated unpinned memory from + this API. + + For broadcast methods, PMA will guarantee the same physical frames are + allocated on multiple GPUs, specified by the PMA objects passed in. + + If allocation is contiguous, only one page in pPages will be filled. + Also, contiguous flag must be passed later to nvUvmInterfacePmaFreePages. + + Arguments: + pPma[IN] - Pointer to PMA object + pageCount [IN] - Number of pages required to be allocated. + pageSize [IN] - 64kb, 128kb or 2mb. No other values are permissible. + pPmaAllocOptions[IN] - Pointer to PMA allocation info structure. + pPages[OUT] - Array of pointers, containing the PA base + address of each page. + + Error codes: + NV_ERR_NO_MEMORY: + Internal memory allocation failed. + NV_ERR_GENERIC: + Unexpected error. We try hard to avoid returning this error + code,because it is not very informative. +*/ +NV_STATUS nvUvmInterfacePmaAllocPages(void *pPma, + NvLength pageCount, + NvU64 pageSize, + UvmPmaAllocationOptions *pPmaAllocOptions, + NvU64 *pPages); + +/******************************************************************************* + nvUvmInterfacePmaPinPages + + This function will pin the physical memory allocated using PMA. The pages + passed as input must be unpinned else this function will return an error and + rollback any change if any page is not previously marked "unpinned". + + Arguments: + pPma[IN] - Pointer to PMA object. + pPages[IN] - Array of pointers, containing the PA base + address of each page to be pinned. + pageCount [IN] - Number of pages required to be pinned. + pageSize [IN] - Page size of each page to be pinned. + flags [IN] - UVM_PMA_CALLED_FROM_PMA_EVICTION if called from + PMA eviction, 0 otherwise. + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid input arguments. + NV_ERR_GENERIC - Unexpected error. We try hard to avoid + returning this error code as is not very + informative. + NV_ERR_NOT_SUPPORTED - Operation not supported on broken FB +*/ +NV_STATUS nvUvmInterfacePmaPinPages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU64 pageSize, + NvU32 flags); + +/******************************************************************************* + nvUvmInterfaceMemoryFree + + Free up a GPU allocation +*/ +void nvUvmInterfaceMemoryFree(uvmGpuAddressSpaceHandle vaSpace, + UvmGpuPointer gpuPointer); + +/******************************************************************************* + nvUvmInterfacePmaFreePages + + This function will free physical memory allocated using PMA. It marks a list + of pages as free. This operation is also used by RM to mark pages as "scrubbed" + for the initial ECC sweep. This function does not fail. + + When allocation was contiguous, an appropriate flag needs to be passed. + + Arguments: + pPma[IN] - Pointer to PMA object + pPages[IN] - Array of pointers, containing the PA base + address of each page. + pageCount [IN] - Number of pages required to be allocated. + pageSize [IN] - Page size of each page + flags [IN] - Flags with information about allocation type + with the same meaning as flags in options for + nvUvmInterfacePmaAllocPages. When called from PMA + eviction, UVM_PMA_CALLED_FROM_PMA_EVICTION needs + to be added to flags. + Error codes: + NV_ERR_INVALID_ARGUMENT + NV_ERR_NO_MEMORY - Not enough physical memory to service + allocation request with provided constraints + NV_ERR_INSUFFICIENT_RESOURCES - Not enough available resources to satisfy allocation request + NV_ERR_INVALID_OWNER - Target memory not accessible by specified owner + NV_ERR_NOT_SUPPORTED - Operation not supported on broken FB +*/ +void nvUvmInterfacePmaFreePages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU64 pageSize, + NvU32 flags); + +/******************************************************************************* + nvUvmInterfaceMemoryCpuMap + + This function creates a CPU mapping to the provided GPU address. + If the address is not the same as what is returned by the Alloc + function, then the function will map it from the address provided. + This offset will be relative to the gpu offset obtained from the + memory alloc functions. + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY +*/ +NV_STATUS nvUvmInterfaceMemoryCpuMap(uvmGpuAddressSpaceHandle vaSpace, + UvmGpuPointer gpuPointer, + NvLength length, void **cpuPtr, + NvU64 pageSize); + +/******************************************************************************* + uvmGpuMemoryCpuUnmap + + Unmaps the cpuPtr provided from the process virtual address space. +*/ +void nvUvmInterfaceMemoryCpuUnMap(uvmGpuAddressSpaceHandle vaSpace, + void *cpuPtr); + +/******************************************************************************* + nvUvmInterfaceTsgAllocate + + This function allocates a Time-Slice Group (TSG). + + allocParams must contain an engineIndex as TSGs need to be bound to an + engine type at allocation time. The possible values are [0, + UVM_COPY_ENGINE_COUNT_MAX) for CE engine type. Notably only the copy engines + that have UvmGpuCopyEngineCaps::supported set to true can be allocated. + + Note that TSG is not supported on all GPU architectures for all engine + types, e.g., pre-Volta GPUs only support TSG for the GR/Compute engine type. + On devices that do not support HW TSGs on the requested engine, this API is + still required, i.e., a TSG handle is required in + nvUvmInterfaceChannelAllocate(), due to information stored in it necessary + for channel allocation. However, when HW TSGs aren't supported, a TSG handle + is essentially a "fake" TSG with no HW scheduling impact. + + tsg is filled with the address of the corresponding TSG handle. + + Arguments: + vaSpace[IN] - VA space linked to a client and a device under which + the TSG is allocated. + allocParams[IN] - structure with allocation settings. + tsg[OUT] - pointer to the new TSG handle. + + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT + NV_ERR_NO_MEMORY + NV_ERR_NOT_SUPPORTED +*/ +NV_STATUS nvUvmInterfaceTsgAllocate(uvmGpuAddressSpaceHandle vaSpace, + const UvmGpuTsgAllocParams *allocParams, + uvmGpuTsgHandle *tsg); + +/******************************************************************************* + nvUvmInterfaceTsgDestroy + + This function destroys a given TSG. + + Arguments: + tsg[IN] - Tsg handle +*/ +void nvUvmInterfaceTsgDestroy(uvmGpuTsgHandle tsg); + +/******************************************************************************* + nvUvmInterfaceChannelAllocate + + This function will allocate a channel bound to a copy engine(CE) or a SEC2 + engine. + + allocParams contains information relative to GPFIFO and GPPut. + + channel is filled with the address of the corresponding channel handle. + + channelInfo is filled out with channel get/put. The errorNotifier is filled + out when the channel hits an RC error. On Volta+ devices, it also computes + the work submission token and the work submission offset to be used in the + Host channel submission doorbell. + + Arguments: + tsg[IN] - Time-Slice Group that the channel will be a member. + allocParams[IN] - structure with allocation settings + channel[OUT] - pointer to the new channel handle + channelInfo[OUT] - structure filled with channel information + + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT + NV_ERR_NO_MEMORY + NV_ERR_NOT_SUPPORTED +*/ +NV_STATUS nvUvmInterfaceChannelAllocate(const uvmGpuTsgHandle tsg, + const UvmGpuChannelAllocParams *allocParams, + uvmGpuChannelHandle *channel, + UvmGpuChannelInfo *channelInfo); + +/******************************************************************************* + nvUvmInterfaceChannelDestroy + + This function destroys a given channel. + + Arguments: + channel[IN] - channel handle +*/ +void nvUvmInterfaceChannelDestroy(uvmGpuChannelHandle channel); + +/******************************************************************************* + nvUvmInterfaceQueryCaps + + Return capabilities for the provided GPU. + If GPU does not exist, an error will be returned. + + If the client is only interested in the capabilities of the Copy Engines of + the given GPU, use nvUvmInterfaceQueryCopyEnginesCaps instead. + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY + NV_ERR_INVALID_STATE + NV_ERR_NOT_SUPPORTED + NV_ERR_NOT_READY + NV_ERR_INVALID_LOCK_STATE + NV_ERR_INVALID_STATE + NV_ERR_NVLINK_FABRIC_NOT_READY + NV_ERR_NVLINK_FABRIC_FAILURE + NV_ERR_GPU_MEMORY_ONLINING_FAILURE +*/ +NV_STATUS nvUvmInterfaceQueryCaps(uvmGpuDeviceHandle device, + UvmGpuCaps *caps); + +/******************************************************************************* + nvUvmInterfaceQueryCopyEnginesCaps + + Return the capabilities of all the Copy Engines for the provided GPU. + If the GPU does not exist, an error will be returned. + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY +*/ +NV_STATUS nvUvmInterfaceQueryCopyEnginesCaps(uvmGpuDeviceHandle device, + UvmGpuCopyEnginesCaps *caps); + +/******************************************************************************* + nvUvmInterfaceGetGpuInfo + + Return various gpu info, refer to the UvmGpuInfo struct for details. + The input UUID is for the physical GPU and the pGpuClientInfo identifies + the SMC partition if SMC is enabled and the partition exists. + If no gpu matching the uuid is found, an error will be returned. + + On Ampere+ GPUs, pGpuClientInfo contains SMC information provided by the + client regarding the partition targeted in this operation. + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY + NV_ERR_GPU_UUID_NOT_FOUND + NV_ERR_INSUFFICIENT_PERMISSIONS + NV_ERR_INSUFFICIENT_RESOURCES +*/ +NV_STATUS nvUvmInterfaceGetGpuInfo(const NvProcessorUuid *gpuUuid, + const UvmGpuClientInfo *pGpuClientInfo, + UvmGpuInfo *pGpuInfo); + +/******************************************************************************* + nvUvmInterfaceServiceDeviceInterruptsRM + + Tells RM to service all pending interrupts. This is helpful in ECC error + conditions when ECC error interrupt is set & error can be determined only + after ECC notifier will be set or reset. + + Error codes: + NV_ERR_GENERIC + UVM_INVALID_ARGUMENTS +*/ +NV_STATUS nvUvmInterfaceServiceDeviceInterruptsRM(uvmGpuDeviceHandle device); + +/******************************************************************************* + nvUvmInterfaceSetPageDirectory + Sets pageDirectory in the provided location. Also moves the existing PDE to + the provided pageDirectory. + + RM will propagate the update to all channels using the provided VA space. + All channels must be idle when this call is made. + + If the pageDirectory is in system memory then a CPU physical address must be + provided. RM will establish and manage the DMA mapping for the + pageDirectory. + + Arguments: + vaSpace[IN} - VASpace Object + physAddress[IN] - Physical address of new page directory. If + !bVidMemAperture this is a CPU physical address. + numEntries[IN] - Number of entries including previous PDE which will be copied + bVidMemAperture[IN] - If set pageDirectory will reside in VidMem aperture else sysmem + pasid[IN] - PASID (Process Address Space IDentifier) of the process + corresponding to the VA space. Ignored unless the VA space + object has ATS enabled. + dmaAddress[OUT] - DMA mapping created for physAddress. + + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceSetPageDirectory(uvmGpuAddressSpaceHandle vaSpace, + NvU64 physAddress, unsigned numEntries, + NvBool bVidMemAperture, NvU32 pasid, + NvU64 *dmaAddress); + +/******************************************************************************* + nvUvmInterfaceUnsetPageDirectory + Unsets/Restores pageDirectory to RM's defined location. + + Arguments: + vaSpace[IN} - VASpace Object + + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceUnsetPageDirectory(uvmGpuAddressSpaceHandle vaSpace); + +/******************************************************************************* + nvUvmInterfaceDupAllocation + + Duplicate the given allocation in a different VA space. + + The physical handle backing the source allocation is duplicated in + the GPU device associated with the destination VA space, and a new mapping + is created in that VA space. + + The input allocation can be located in sysmem (i.e. allocated using + nvUvmInterfaceMemoryAllocSys) or vidmem (i.e. allocated using + nvUvmInterfaceMemoryAllocFB). If located in vidmem, duplication across + GPUs is not supported. + + For duplication of physical memory use nvUvmInterfaceDupMemory. + + Arguments: + srcVaSpace[IN] - Source VA space. + srcAddress[IN] - GPU VA in the source VA space. The provided address + should match one previously returned by + nvUvmInterfaceMemoryAllocFB or + nvUvmInterfaceMemoryAllocSys. + dstVaSpace[IN] - Destination VA space where the new mapping will be + created. + dstVaAlignment[IN] - Alignment of the GPU VA in the destination VA + space. 0 means 4KB alignment. + dstAddress[OUT] - Pointer to the GPU VA in the destination VA space. + + Error codes: + NV_ERR_INVALID_ARGUMENT - If any of the inputs is invalid, or the source + and destination VA spaces are identical. + NV_ERR_OBJECT_NOT_FOUND - If the input allocation is not found in under + the provided VA space. + NV_ERR_NO_MEMORY - If there is no memory to back the duplicate, + or the associated metadata. + NV_ERR_NOT_SUPPORTED - If trying to duplicate vidmem across GPUs. +*/ +NV_STATUS nvUvmInterfaceDupAllocation(uvmGpuAddressSpaceHandle srcVaSpace, + NvU64 srcAddress, + uvmGpuAddressSpaceHandle dstVaSpace, + NvU64 dstVaAlignment, + NvU64 *dstAddress); + +/******************************************************************************* + nvUvmInterfaceDupMemory + + Duplicates a physical memory allocation. If requested, provides information + about the allocation. + + Arguments: + device[IN] - Device linked to a client under which + the phys memory needs to be duped. + hClient[IN] - Client owning the memory. + hPhysMemory[IN] - Phys memory which is to be duped. + hDupedHandle[OUT] - Handle of the duped memory object. + pGpuMemoryInfo[OUT] - see nv_uvm_types.h for more information. + This parameter can be NULL. (optional) + Error codes: + NV_ERR_INVALID_ARGUMENT - If the parameter/s is invalid. + NV_ERR_NOT_SUPPORTED - If the allocation is not a physical allocation. + NV_ERR_OBJECT_NOT_FOUND - If the allocation is not found in under the provided client. +*/ +NV_STATUS nvUvmInterfaceDupMemory(uvmGpuDeviceHandle device, + NvHandle hClient, + NvHandle hPhysMemory, + NvHandle *hDupMemory, + UvmGpuMemoryInfo *pGpuMemoryInfo); + +/******************************************************************************* + nvUvmInterfaceFreeDupedAllocation + + Free the allocation represented by the physical handle used to create the + duped allocation. + + Arguments: + device[IN] - Device handle used to dup the memory. + hPhysHandle[IN] - Handle representing the phys allocation. + + Error codes: + NV_ERROR + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceFreeDupedHandle(uvmGpuDeviceHandle device, + NvHandle hPhysHandle); + +/******************************************************************************* + nvUvmInterfaceGetFbInfo + + Gets FB information from RM. + + Arguments: + device[IN] - GPU device handle + fbInfo [OUT] - Pointer to FbInfo structure which contains + reservedHeapSize & heapSize + Error codes: + NV_ERROR + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceGetFbInfo(uvmGpuDeviceHandle device, + UvmGpuFbInfo * fbInfo); + +/******************************************************************************* + nvUvmInterfaceGetEccInfo + + Gets ECC information from RM. + + Arguments: + device[IN] - GPU device handle + eccInfo [OUT] - Pointer to EccInfo structure + + Error codes: + NV_ERROR + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceGetEccInfo(uvmGpuDeviceHandle device, + UvmGpuEccInfo * eccInfo); + +/******************************************************************************* + nvUvmInterfaceOwnPageFaultIntr + + This function transfers ownership of the replayable page fault interrupt, + between RM and UVM, for a particular GPU. + + bOwnInterrupts == NV_TRUE: UVM is taking ownership from the RM. This causes + the following: RM will not service, enable or disable this interrupt and it + is up to the UVM driver to handle this interrupt. In this case, replayable + page fault interrupts are disabled by this function, before it returns. + + bOwnInterrupts == NV_FALSE: UVM is returning ownership to the RM: in this + case, replayable page fault interrupts MUST BE DISABLED BEFORE CALLING this + function. + + The cases above both result in transferring ownership of a GPU that has its + replayable page fault interrupts disabled. Doing otherwise would make it + very difficult to control which driver handles any interrupts that build up + during the hand-off. + + The calling pattern should look like this: + + UVM setting up a new GPU for operation: + UVM GPU LOCK + nvUvmInterfaceOwnPageFaultIntr(..., NV_TRUE) + UVM GPU UNLOCK + + Enable replayable page faults for that GPU + + UVM tearing down a GPU: + + Disable replayable page faults for that GPU + + UVM GPU GPU LOCK + nvUvmInterfaceOwnPageFaultIntr(..., NV_FALSE) + UVM GPU UNLOCK + + Arguments: + device[IN] - Device handle associated with the gpu + bOwnInterrupts - Set to NV_TRUE for UVM to take ownership of the + replayable page fault interrupts. Set to NV_FALSE + to return ownership of the page fault interrupts + to RM. + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceOwnPageFaultIntr(uvmGpuDeviceHandle device, NvBool bOwnInterrupts); + +/******************************************************************************* + nvUvmInterfaceInitFaultInfo + + This function obtains fault buffer address, size and a few register mappings + for replayable faults, and creates a shadow buffer to store non-replayable + faults if the GPU supports it. + + Arguments: + device[IN] - Device handle associated with the gpu + pFaultInfo[OUT] - information provided by RM for fault handling + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceInitFaultInfo(uvmGpuDeviceHandle device, + UvmGpuFaultInfo *pFaultInfo); + +/******************************************************************************* + nvUvmInterfaceDestroyFaultInfo + + This function obtains destroys unmaps the fault buffer and clears faultInfo + for replayable faults, and frees the shadow buffer for non-replayable faults. + + Arguments: + device[IN] - Device handle associated with the gpu + pFaultInfo[OUT] - information provided by RM for fault handling + + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceDestroyFaultInfo(uvmGpuDeviceHandle device, + UvmGpuFaultInfo *pFaultInfo); + +/******************************************************************************* + nvUvmInterfaceHasPendingNonReplayableFaults + + This function tells whether there are pending non-replayable faults in the + client shadow fault buffer ready to be consumed. + + NOTES: + - This function uses a pre-allocated stack per GPU (stored in the + UvmGpuFaultInfo object) for calls related to non-replayable faults from the + top half. + - Concurrent calls to this function using the same pFaultInfo are not + thread-safe due to pre-allocated stack. Therefore, locking is the caller's + responsibility. + - This function DOES NOT acquire the RM API or GPU locks. That is because + it is called during fault servicing, which could produce deadlocks. + + Arguments: + pFaultInfo[IN] - information provided by RM for fault handling. + Contains a pointer to the shadow fault buffer + hasPendingFaults[OUT] - return value that tells if there are + non-replayable faults ready to be consumed by + the client + + Error codes: + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceHasPendingNonReplayableFaults(UvmGpuFaultInfo *pFaultInfo, + NvBool *hasPendingFaults); + +/******************************************************************************* + nvUvmInterfaceGetNonReplayableFaults + + This function consumes all the non-replayable fault packets in the client + shadow fault buffer and copies them to the given buffer. It also returns the + number of faults that have been copied + + NOTES: + - This function uses a pre-allocated stack per GPU (stored in the + UvmGpuFaultInfo object) for calls from the bottom half that handles + non-replayable faults. + - See nvUvmInterfaceHasPendingNonReplayableFaults for the implications of + using a shared stack. + - This function DOES NOT acquire the RM API or GPU locks. That is because + it is called during fault servicing, which could produce deadlocks. + + Arguments: + pFaultInfo[IN] - information provided by RM for fault handling. + Contains a pointer to the shadow fault buffer + pFaultBuffer[OUT] - buffer provided by the client where fault buffers + are copied when they are popped out of the shadow + fault buffer (which is a circular queue). + numFaults[OUT] - return value that tells the number of faults copied + to the client's buffer + + Error codes: + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceGetNonReplayableFaults(UvmGpuFaultInfo *pFaultInfo, + void *pFaultBuffer, + NvU32 *numFaults); + +/******************************************************************************* + nvUvmInterfaceFlushReplayableFaultBuffer + + This function sends an RPC to GSP in order to flush the HW replayable fault buffer. + + NOTES: + - This function DOES NOT acquire the RM API or GPU locks. That is because + it is called during fault servicing, which could produce deadlocks. + - This function should not be called when interrupts are disabled. + + Arguments: + pFaultInfo[IN] - information provided by RM for fault handling. + used for obtaining the device handle without locks. + bCopyAndFlush[IN] - Instructs RM to perform the flush in the Copy+Flush mode. + In this mode, RM will perform a copy of the packets from + the HW buffer to UVM's SW buffer as part of performing + the flush. This mode gives UVM the opportunity to observe + the packets contained within the HW buffer at the time + of issuing the call. + + Error codes: + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceFlushReplayableFaultBuffer(UvmGpuFaultInfo *pFaultInfo, + NvBool bCopyAndFlush); + +/******************************************************************************* + nvUvmInterfaceTogglePrefetchFaults + + This function sends an RPC to GSP in order to toggle the prefetch fault PRI. + + NOTES: + - This function DOES NOT acquire the RM API or GPU locks. That is because + it is called during fault servicing, which could produce deadlocks. + - This function should not be called when interrupts are disabled. + + Arguments: + pFaultInfo[IN] - Information provided by RM for fault handling. + Used for obtaining the device handle without locks. + bEnable[IN] - Instructs RM whether to toggle generating faults on + prefetch on/off. + + Error codes: + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceTogglePrefetchFaults(UvmGpuFaultInfo *pFaultInfo, + NvBool bEnable); + +/******************************************************************************* + nvUvmInterfaceInitAccessCntrInfo + + This function obtains access counter buffer address, size and a few register mappings + + Arguments: + device[IN] - Device handle associated with the gpu + pAccessCntrInfo[OUT] - Information provided by RM for access counter handling + accessCntrIndex[IN] - Access counter index + + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceInitAccessCntrInfo(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo, + NvU32 accessCntrIndex); + +/******************************************************************************* + nvUvmInterfaceDestroyAccessCntrInfo + + This function obtains, destroys, unmaps the access counter buffer and clears accessCntrInfo + + Arguments: + device[IN] - Device handle associated with the gpu + pAccessCntrInfo[IN] - Information provided by RM for access counter handling + + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceDestroyAccessCntrInfo(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo); + +/******************************************************************************* + nvUvmInterfaceEnableAccessCntr + + This function enables access counters using the given configuration + UVM is also taking ownership from the RM. + This causes the following: RM will not service, enable or disable this + interrupt and it is up to the UVM driver to handle this interrupt. In + this case, access counter notificaion interrupts are enabled by this + function before it returns. + + Arguments: + device[IN] - Device handle associated with the gpu + pAccessCntrInfo[IN] - Pointer to structure filled out by nvUvmInterfaceInitAccessCntrInfo + pAccessCntrConfig[IN] - Configuration for access counters + + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceEnableAccessCntr(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo, + const UvmGpuAccessCntrConfig *pAccessCntrConfig); + +/******************************************************************************* + nvUvmInterfaceDisableAccessCntr + + This function disables acccess counters + UVM is also returning ownership to the RM: RM can service, enable or + disable this interrupt. In this case, access counter notificaion interrupts + are disabled by this function before it returns. + + Arguments: + device[IN] - Device handle associated with the gpu + pAccessCntrInfo[IN] - Pointer to structure filled out by nvUvmInterfaceInitAccessCntrInfo + + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceDisableAccessCntr(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo); + +// +// Called by the UVM driver to register event callbacks with RM. Only one set of +// callbacks can be registered by any driver at a time. If another set of +// callbacks was already registered, NV_ERR_IN_USE is returned. +// +NV_STATUS nvUvmInterfaceRegisterUvmEvents(struct UvmEventsLinux *importedEvents); + +// +// Counterpart to nvUvmInterfaceRegisterUvmEvents. This must only be called if +// nvUvmInterfaceRegisterUvmEvents returned NV_OK. +// +// Upon return, the caller is guaranteed that any outstanding callbacks are done +// and no new ones will be invoked. +// +void nvUvmInterfaceDeRegisterUvmEvents(void); + +/******************************************************************************* + nvUvmInterfaceGetNvlinkInfo + + Gets NVLINK information from RM. + + Arguments: + device[IN] - GPU device handle + nvlinkInfo [OUT] - Pointer to NvlinkInfo structure + + Error codes: + NV_ERROR + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceGetNvlinkInfo(uvmGpuDeviceHandle device, + UvmGpuNvlinkInfo *nvlinkInfo); + +/******************************************************************************* + nvUvmInterfaceP2pObjectCreate + + This API creates an NV50_P2P object for the GPUs with the given device + handles, and returns the handle to the object. + + Arguments: + device1[IN] - first GPU device handle + device2[IN] - second GPU device handle + hP2pObject[OUT] - handle to the created P2p object. + + Error codes: + NV_ERR_INVALID_ARGUMENT + NV_ERR_OBJECT_NOT_FOUND : If device object associated with the device + handles isn't found. +*/ +NV_STATUS nvUvmInterfaceP2pObjectCreate(uvmGpuDeviceHandle device1, + uvmGpuDeviceHandle device2, + NvHandle *hP2pObject); + +/******************************************************************************* + nvUvmInterfaceP2pObjectDestroy + + This API destroys the NV50_P2P associated with the passed handle. + + Arguments: + session[IN] - Session handle. + hP2pObject[IN] - handle to an P2p object. + + Error codes: NONE +*/ +void nvUvmInterfaceP2pObjectDestroy(uvmGpuSessionHandle session, + NvHandle hP2pObject); + +/******************************************************************************* + nvUvmInterfaceGetExternalAllocPtes + + The interface builds the RM PTEs using the provided input parameters. + + Arguments: + vaSpace[IN] - vaSpace handle. + hMemory[IN] - Memory handle. + offset [IN] - Offset from the beginning of the allocation + where PTE mappings should begin. + Should be aligned with mappingPagesize + in gpuExternalMappingInfo associated + with the allocation. + size [IN] - Length of the allocation for which PTEs + should be built. + Should be aligned with mappingPagesize + in gpuExternalMappingInfo associated + with the allocation. + size = 0 will be interpreted as the total size + of the allocation. + gpuExternalMappingInfo[IN/OUT] - See nv_uvm_types.h for more information. + + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid parameter/s is passed. + NV_ERR_INVALID_OBJECT_HANDLE - Invalid memory handle is passed. + NV_ERR_NOT_SUPPORTED - Functionality is not supported (see comments in nv_gpu_ops.c) + NV_ERR_INVALID_BASE - offset is beyond the allocation size + NV_ERR_INVALID_LIMIT - (offset + size) is beyond the allocation size. + NV_ERR_BUFFER_TOO_SMALL - gpuExternalMappingInfo.pteBufferSize is insufficient to + store single PTE. + NV_ERR_NOT_READY - Returned when querying the PTEs requires a deferred setup + which has not yet completed. It is expected that the caller + will reattempt the call until a different code is returned. +*/ +NV_STATUS nvUvmInterfaceGetExternalAllocPtes(uvmGpuAddressSpaceHandle vaSpace, + NvHandle hMemory, + NvU64 offset, + NvU64 size, + UvmGpuExternalMappingInfo *gpuExternalMappingInfo); + +/******************************************************************************* + nvUvmInterfaceGetExternalAllocPhysAddrs + + The interface builds the RM physical addrs using the provided input parameters. + + Arguments: + vaSpace[IN] - vaSpace handle. + hMemory[IN] - Memory handle. + offset [IN] - Offset from the beginning of the allocation + where PTE mappings should begin. + Should be aligned with mappingPagesize + in gpuExternalMappingInfo associated + with the allocation. + size [IN] - Length of the allocation for which PhysAddrs + should be built. + Should be aligned with mappingPagesize + in gpuExternalMappingInfo associated + with the allocation. + size = 0 will be interpreted as the total size + of the allocation. + gpuExternalMappingInfo[IN/OUT] - See nv_uvm_types.h for more information. + + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid parameter/s is passed. + NV_ERR_INVALID_OBJECT_HANDLE - Invalid memory handle is passed. + NV_ERR_NOT_SUPPORTED - Functionality is not supported (see comments in nv_gpu_ops.c) + NV_ERR_INVALID_BASE - offset is beyond the allocation size + NV_ERR_INVALID_LIMIT - (offset + size) is beyond the allocation size. + NV_ERR_BUFFER_TOO_SMALL - gpuExternalMappingInfo.physAddrBufferSize is insufficient to + store single physAddr. + NV_ERR_NOT_READY - Returned when querying the physAddrs requires a deferred setup + which has not yet completed. It is expected that the caller + will reattempt the call until a different code is returned. +*/ +NV_STATUS nvUvmInterfaceGetExternalAllocPhysAddrs(uvmGpuAddressSpaceHandle vaSpace, + NvHandle hMemory, + NvU64 offset, + NvU64 size, + UvmGpuExternalPhysAddrInfo *gpuExternalPhysAddrsInfo); + +/******************************************************************************* + nvUvmInterfaceRetainChannel + + Validates and returns information about the user's channel and its resources + (local CTX buffers + global CTX buffers). The state is refcounted and must be + released by calling nvUvmInterfaceReleaseChannel. + + Arguments: + vaSpace[IN] - vaSpace handle. + hClient[IN] - Client handle + hChannel[IN] - Channel handle + retainedChannel[OUT] - Opaque pointer to use to refer to this + channel in other nvUvmInterface APIs. + channelInstanceInfo[OUT] - Channel instance information to be filled out. + See nv_uvm_types.h for details. + + Error codes: + NV_ERR_INVALID_ARGUMENT : If the parameter/s are invalid. + NV_ERR_OBJECT_NOT_FOUND : If the object associated with the handle isn't found. + NV_ERR_INVALID_CHANNEL : If the channel verification fails. + NV_ERR_INSUFFICIENT_RESOURCES : If no memory available to store the resource information. + */ +NV_STATUS nvUvmInterfaceRetainChannel(uvmGpuAddressSpaceHandle vaSpace, + NvHandle hClient, + NvHandle hChannel, + void **retainedChannel, + UvmGpuChannelInstanceInfo *channelInstanceInfo); + +/******************************************************************************* + nvUvmInterfaceBindChannelResources + + Associates the mapping address of the channel resources (VAs) provided by the + caller with the channel. + + Arguments: + retainedChannel[IN] - Channel pointer returned by nvUvmInterfaceRetainChannel + channelResourceBindParams[IN] - Buffer of initialized UvmGpuChannelInstanceInfo::resourceCount + entries. See nv_uvm_types.h for details. + + Error codes: + NV_ERR_INVALID_ARGUMENT : If the parameter/s are invalid. + NV_ERR_OBJECT_NOT_FOUND : If the object associated with the handle aren't found. + NV_ERR_INSUFFICIENT_RESOURCES : If no memory available to store the resource information. + */ +NV_STATUS nvUvmInterfaceBindChannelResources(void *retainedChannel, + UvmGpuChannelResourceBindParams *channelResourceBindParams); + +/******************************************************************************* + nvUvmInterfaceReleaseChannel + + Releases state retained by nvUvmInterfaceRetainChannel. + */ +void nvUvmInterfaceReleaseChannel(void *retainedChannel); + +/******************************************************************************* + nvUvmInterfaceStopChannel + + Idles the channel and takes it off the runlist. + + Arguments: + retainedChannel[IN] - Channel pointer returned by nvUvmInterfaceRetainChannel + bImmediate[IN] - If true, kill the channel without attempting to wait for it to go idle. +*/ +void nvUvmInterfaceStopChannel(void *retainedChannel, NvBool bImmediate); + +/******************************************************************************* + nvUvmInterfaceGetChannelResourcePtes + + The interface builds the RM PTEs using the provided input parameters. + + Arguments: + vaSpace[IN] - vaSpace handle. + resourceDescriptor[IN] - The channel resource descriptor returned by returned by + nvUvmInterfaceRetainChannelResources. + offset[IN] - Offset from the beginning of the allocation + where PTE mappings should begin. + Should be aligned with pagesize associated + with the allocation. + size[IN] - Length of the allocation for which PTEs + should be built. + Should be aligned with pagesize associated + with the allocation. + size = 0 will be interpreted as the total size + of the allocation. + gpuExternalMappingInfo[IN/OUT] - See nv_uvm_types.h for more information. + + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid parameter/s is passed. + NV_ERR_INVALID_OBJECT_HANDLE - Invalid memory handle is passed. + NV_ERR_NOT_SUPPORTED - Functionality is not supported. + NV_ERR_INVALID_BASE - offset is beyond the allocation size + NV_ERR_INVALID_LIMIT - (offset + size) is beyond the allocation size. + NV_ERR_BUFFER_TOO_SMALL - gpuExternalMappingInfo.pteBufferSize is insufficient to + store single PTE. +*/ +NV_STATUS nvUvmInterfaceGetChannelResourcePtes(uvmGpuAddressSpaceHandle vaSpace, + NvP64 resourceDescriptor, + NvU64 offset, + NvU64 size, + UvmGpuExternalMappingInfo *externalMappingInfo); + +/******************************************************************************* + nvUvmInterfaceReportNonReplayableFault + + The interface communicates a nonreplayable fault packet from UVM to RM, which + will log the fault, notify the clients and then trigger RC on the channel. + + Arguments: + device[IN] - The device where the fault happened. + pFaultPacket[IN] - The opaque pointer from UVM that will be later + converted to a MMU_FAULT_PACKET type. + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid parameter/s is passed. + NV_ERR_NOT_SUPPORTED - Functionality is not supported. +*/ +NV_STATUS nvUvmInterfaceReportNonReplayableFault(uvmGpuDeviceHandle device, + const void *pFaultPacket); + +/******************************************************************************* + nvUvmInterfacePagingChannelAllocate + + In SR-IOV heavy, this function requests the allocation of a paging channel + (i.e. a privileged CE channel) bound to a specified copy engine. Unlike + channels allocated via nvUvmInterfaceChannelAllocate, the caller cannot push + methods to a paging channel directly, but instead relies on the + nvUvmInterfacePagingChannelPushStream API to do so. + + SR-IOV heavy only. The implementation of this interface can acquire + RM or GPU locks. + + Arguments: + device[IN] - device under which the paging channel will be allocated + allocParams[IN] - structure with allocation settings + channel[OUT] - pointer to the allocated paging channel handle + channelInfo[OUT] - structure filled with channel information + + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid parameter/s is passed. + NV_ERR_NO_MEMORY - Not enough memory to allocate + paging channel/shadow notifier. + NV_ERR_NOT_SUPPORTED - SR-IOV heavy mode is disabled. + + */ +NV_STATUS nvUvmInterfacePagingChannelAllocate(uvmGpuDeviceHandle device, + const UvmGpuPagingChannelAllocParams *allocParams, + UvmGpuPagingChannelHandle *channel, + UvmGpuPagingChannelInfo *channelInfo); + +/******************************************************************************* + nvUvmInterfacePagingChannelDestroy + + This function destroys a given paging channel. + + SR-IOV heavy only. The implementation of this interface can acquire + RM or GPU locks. + + Arguments: + channel[IN] - paging channel handle. If the passed handle is + the NULL pointer, the function returns immediately. +*/ +void nvUvmInterfacePagingChannelDestroy(UvmGpuPagingChannelHandle channel); + +/******************************************************************************* + + nvUvmInterfacePagingChannelsMap + + Map a guest allocation in the address space associated with all the paging + channels allocated under the given device. + + SR-IOV heavy only. The implementation of this interface can acquire + RM or GPU locks. + + Arguments: + srcVaSpace[IN] - VA space handle used to allocate the input pointer + srcAddress. + srcAddress[IN] - virtual address returned by nvUvmInterfaceMemoryAllocFB + or nvUvmInterfaceMemoryAllocSys. The entire allocation + backing this guest VA is mapped. + device[IN] - device under which paging channels were allocated + dstAddress[OUT] - a virtual address that is valid (i.e. is mapped) in + all the paging channels allocated under the given vaSpace. + + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid parameter/s is passed. + NV_ERR_NOT_SUPPORTED - SR-IOV heavy mode is disabled. +*/ +NV_STATUS nvUvmInterfacePagingChannelsMap(uvmGpuAddressSpaceHandle srcVaSpace, + UvmGpuPointer srcAddress, + uvmGpuDeviceHandle device, + NvU64 *dstAddress); + +/******************************************************************************* + + nvUvmInterfacePagingChannelsUnmap + + Unmap a VA returned by nvUvmInterfacePagingChannelsMap. + + SR-IOV heavy only. The implementation of this interface can acquire + RM or GPU locks. + + Arguments: + srcVaSpace[IN] - VA space handle that was passed to prevous mapping. + srcAddress[IN] - virtual address that was passed to prevous mapping. + device[IN] - device under which paging channels were allocated. + */ +void nvUvmInterfacePagingChannelsUnmap(uvmGpuAddressSpaceHandle srcVaSpace, + UvmGpuPointer srcAddress, + uvmGpuDeviceHandle device); + + +/******************************************************************************* + nvUvmInterfacePagingChannelPushStream + + Used for remote execution of the passed methods; the UVM driver uses this + interface to ask the vGPU plugin to execute certain HW methods on its + behalf. The callee should push the methods in the specified order i.e. is + not allowed to do any reordering. + + The API is asynchronous. The UVM driver can wait on the remote execution by + inserting a semaphore release method at the end of the method stream, and + then loop until the semaphore value reaches the completion value indicated + in the release method. + + The valid HW methods that can be passed by the UVM driver follow; the source + functions listed contain the exact formatting (encoding) of the HW method + used by the UVM driver for Ampere. + + - TLB invalidation targeting a VA range. See + uvm_hal_volta_host_tlb_invalidate_va. + + - TLB invalidation targeting certain levels in the page tree (including + the possibility of invalidating everything). + See uvm_hal_pascal_host_tlb_invalidate_all. + + - Replayable fault replay. See uvm_hal_volta_replay_faults. + + - Replayable fault cancellation targeting a guest virtual address. See + uvm_hal_volta_cancel_faults_va + + - Membar, scoped to device or to the entire system. See + uvm_hal_pascal_host_membar_gpu and uvm_hal_pascal_host_membar_sys + + - Host semaphore acquire, see uvm_hal_turing_host_semaphore_acquire. The + virtual address specified in the semaphore operation must lie within a + buffer previously mapped by nvUvmInterfacePagingChannelsMap. + + - CE semaphore release, see uvm_hal_pascal_ce_semaphore_release. The + virtual address specified in the semaphore operation must lie within a + buffer previously mapped by nvUvmInterfacePagingChannelsMap. + + - 64 bits-wide memset, see uvm_hal_kepler_ce_memset_8. The destination + address is a physical address in vidmem. + + - No-op, see uvm_hal_kepler_host_noop. Used to store the source buffer + of a memcopy method within the input stream itself. + + - Memcopy, see uvm_hal_kepler_ce_memcopy. The destination address is a + physical address in vidmem. The source address is an offset within + methodStream, in bytes, indicating the location of the (inlined) source + buffer. The copy size does not exceed 4KB. + + - CE semaphore release with timestamp, see + uvm_hal_kepler_ce_semaphore_timestamp. The virtual address specified in + the semaphore operation must lie within a buffer previously mapped by + nvUvmInterfacePagingChannelsMap. + + - CE semaphore reduction, see uvm_hal_kepler_ce_semaphore_reduction_inc. + The virtual address specified in the semaphore operation must lie within + a buffer previously mapped by nvUvmInterfacePagingChannelsMap. + + Only invoked in SR-IOV heavy mode. + + NOTES: + - This function uses a pre-allocated stack per paging channel + (stored in the UvmGpuPagingChannel object) + - This function DOES NOT acquire the RM API or GPU locks. That is because + it is called during fault servicing, which could produce deadlocks. + - Concurrent calls to this function using channels under same device are not + allowed due to: + a. pre-allocated stack + b. the fact that internal RPC infrastructure doesn't acquire GPU lock. + Therefore, locking is the caller's responsibility. + + Arguments: + channel[IN] - paging channel handle obtained via + nvUvmInterfacePagingChannelAllocate + + methodStream[IN] - HW methods to be pushed to the paging channel. + + methodStreamSize[IN] - Size of methodStream, in bytes. The maximum push + size is 128KB. + + + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid parameter/s is passed. + NV_ERR_NOT_SUPPORTED - SR-IOV heavy mode is disabled. +*/ +NV_STATUS nvUvmInterfacePagingChannelPushStream(UvmGpuPagingChannelHandle channel, + char *methodStream, + NvU32 methodStreamSize); + +/******************************************************************************* + nvUvmInterfaceReportFatalError + + Reports a global fatal error so RM can inform the clients that a node reboot + is necessary to recover from this error. This function can be called from + any lock environment, bottom half or non-interrupt context. + +*/ +void nvUvmInterfaceReportFatalError(NV_STATUS error); + +/******************************************************************************* + Cryptography Services Library (CSL) Interface +*/ + +/******************************************************************************* + nvUvmInterfaceCslInitContext + + Allocates and initializes a CSL context for a given secure channel. + + The lifetime of the context is the same as the lifetime of the secure channel + it is paired with. + + Locking: This function acquires an API lock. + Memory : This function dynamically allocates memory. + + Arguments: + uvmCslContext[IN/OUT] - The CSL context associated with a channel. + channel[IN] - Handle to a secure channel. + + Error codes: + NV_ERR_INVALID_STATE - The system is not operating in Confidential Compute mode. + NV_ERR_INVALID_CHANNEL - The associated channel is not a secure channel. + NV_ERR_IN_USE - The context has already been initialized. +*/ +NV_STATUS nvUvmInterfaceCslInitContext(UvmCslContext *uvmCslContext, + uvmGpuChannelHandle channel); + +/******************************************************************************* + nvUvmInterfaceDeinitCslContext + + Securely deinitializes and clears the contents of a context. + + If context is already deinitialized then function returns immediately. + + Locking: This function does not acquire an API or GPU lock. + Memory : This function may free memory. + + Arguments: + uvmCslContext[IN] - The CSL context associated with a channel. +*/ +void nvUvmInterfaceDeinitCslContext(UvmCslContext *uvmCslContext); + +/******************************************************************************* + nvUvmInterfaceCslRotateKey + + Disables channels and rotates keys. + + This function disables channels and rotates associated keys. The channels + associated with the given CSL contexts must be idled before this function is + called. To trigger key rotation all allocated channels for a given key must + be present in the list. If the function returns successfully then the CSL + contexts have been updated with the new key. + + Locking: This function attempts to acquire the GPU lock. In case of failure + to acquire the return code is NV_ERR_STATE_IN_USE. The caller must + guarantee that no CSL function, including this one, is invoked + concurrently with the CSL contexts in contextList. + Memory : This function dynamically allocates memory. + + Arguments: + contextList[IN/OUT] - An array of pointers to CSL contexts. + contextListCount[IN] - Number of CSL contexts in contextList. Its value + must be greater than 0. + Error codes: + NV_ERR_INVALID_ARGUMENT - contextList is NULL or contextListCount is 0. + NV_ERR_STATE_IN_USE - Unable to acquire lock / resource. Caller + can retry at a later time. + NV_ERR_GENERIC - A failure other than _STATE_IN_USE occurred + when attempting to acquire a lock. +*/ +NV_STATUS nvUvmInterfaceCslRotateKey(UvmCslContext *contextList[], + NvU32 contextListCount); + +/******************************************************************************* + nvUvmInterfaceCslRotateIv + + Rotates the IV for a given channel and operation. + + This function will rotate the IV on both the CPU and the GPU. + For a given operation the channel must be idle before calling this function. + This function can be called regardless of the value of the IV's message counter. + + Locking: This function attempts to acquire the GPU lock. In case of failure to + acquire the return code is NV_ERR_STATE_IN_USE. The caller must guarantee + that no CSL function, including this one, is invoked concurrently with + the same CSL context. + Memory : This function does not dynamically allocate memory. + +Arguments: + uvmCslContext[IN/OUT] - The CSL context associated with a channel. + operation[IN] - Either + - UVM_CSL_OPERATION_ENCRYPT + - UVM_CSL_OPERATION_DECRYPT + + Error codes: + NV_ERR_INSUFFICIENT_RESOURCES - The rotate operation would cause a counter + to overflow. + NV_ERR_STATE_IN_USE - Unable to acquire lock / resource. Caller + can retry at a later time. + NV_ERR_INVALID_ARGUMENT - Invalid value for operation. + NV_ERR_GENERIC - A failure other than _STATE_IN_USE occurred + when attempting to acquire a lock. +*/ +NV_STATUS nvUvmInterfaceCslRotateIv(UvmCslContext *uvmCslContext, + UvmCslOperation operation); + +/******************************************************************************* + nvUvmInterfaceCslEncrypt + + Encrypts data and produces an authentication tag. + + Auth, input, and output buffers must not overlap. If they do then calling + this function produces undefined behavior. Performance is typically + maximized when the input and output buffers are 16-byte aligned. This is + natural alignment for AES block. + The encryptIV can be obtained from nvUvmInterfaceCslIncrementIv. + However, it is optional. If it is NULL, the next IV in line will be used. + + Locking: This function does not acquire an API or GPU lock. + The caller must guarantee that no CSL function, including this one, + is invoked concurrently with the same CSL context. + Memory : This function does not dynamically allocate memory. + +Arguments: + uvmCslContext[IN/OUT] - The CSL context associated with a channel. + bufferSize[IN] - Size of the input and output buffers in + units of bytes. Value can range from 1 byte + to (2^32) - 1 bytes. + inputBuffer[IN] - Address of plaintext input buffer. + encryptIv[IN/OUT] - IV to use for encryption. Can be NULL. + outputBuffer[OUT] - Address of ciphertext output buffer. + authTagBuffer[OUT] - Address of authentication tag buffer. + Its size is UVM_CSL_CRYPT_AUTH_TAG_SIZE_BYTES. + + Error codes: + NV_ERR_INVALID_ARGUMENT - The CSL context is not associated with a channel. + - The size of the data is 0 bytes. + - The encryptIv has already been used. +*/ +NV_STATUS nvUvmInterfaceCslEncrypt(UvmCslContext *uvmCslContext, + NvU32 bufferSize, + NvU8 const *inputBuffer, + UvmCslIv *encryptIv, + NvU8 *outputBuffer, + NvU8 *authTagBuffer); + +/******************************************************************************* + nvUvmInterfaceCslDecrypt + + Verifies the authentication tag and decrypts data. + + Auth, input, and output buffers must not overlap. If they do then calling + this function produces undefined behavior. Performance is typically + maximized when the input and output buffers are 16-byte aligned. This is + natural alignment for AES block. + + During a key rotation event the previous key is stored in the CSL context. + This allows data encrypted by the GPU to be decrypted with the previous key. + The keyRotationId parameter identifies which key is used. The first key rotation + ID has a value of 0 that increments by one for each key rotation event. + + Locking: This function does not acquire an API or GPU lock. + The caller must guarantee that no CSL function, including this one, + is invoked concurrently with the same CSL context. + Memory : This function does not dynamically allocate memory. + + Arguments: + uvmCslContext[IN/OUT] - The CSL context. + bufferSize[IN] - Size of the input and output buffers in units of bytes. + Value can range from 1 byte to (2^32) - 1 bytes. + decryptIv[IN] - IV used to decrypt the ciphertext. Its value can either be given by + nvUvmInterfaceCslIncrementIv, or, if NULL, the CSL context's + internal counter is used. + keyRotationId[IN] - Specifies the key that is used for decryption. + A value of NV_U32_MAX specifies the current key. + inputBuffer[IN] - Address of ciphertext input buffer. + outputBuffer[OUT] - Address of plaintext output buffer. + addAuthData[IN] - Address of the plaintext additional authenticated data used to + calculate the authentication tag. Can be NULL. + addAuthDataSize[IN] - Size of the additional authenticated data in units of bytes. + Value can range from 1 byte to (2^32) - 1 bytes. + This parameter is ignored if addAuthData is NULL. + authTagBuffer[IN] - Address of authentication tag buffer. + Its size is UVM_CSL_CRYPT_AUTH_TAG_SIZE_BYTES. + + Error codes: + NV_ERR_INSUFFICIENT_RESOURCES - The decryption operation would cause a + counter overflow to occur. + NV_ERR_INVALID_ARGUMENT - The size of the data is 0 bytes. + NV_ERR_INVALID_DATA - Verification of the authentication tag fails. +*/ +NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext, + NvU32 bufferSize, + NvU8 const *inputBuffer, + UvmCslIv const *decryptIv, + NvU32 keyRotationId, + NvU8 *outputBuffer, + NvU8 const *addAuthData, + NvU32 addAuthDataSize, + NvU8 const *authTagBuffer); + +/******************************************************************************* + nvUvmInterfaceCslSign + + Generates an authentication tag for secure work launch. + + Auth and input buffers must not overlap. If they do then calling this function produces + undefined behavior. + + Locking: This function does not acquire an API or GPU lock. + The caller must guarantee that no CSL function, including this one, + is invoked concurrently with the same CSL context. + Memory : This function does not dynamically allocate memory. + + Arguments: + uvmCslContext[IN/OUT] - The CSL context associated with a channel. + bufferSize[IN] - Size of the input buffer in units of bytes. + Value can range from 1 byte to (2^32) - 1 bytes. + inputBuffer[IN] - Address of plaintext input buffer. + authTagBuffer[OUT] - Address of authentication tag buffer. + Its size is UVM_CSL_SIGN_AUTH_TAG_SIZE_BYTES. + + Error codes: + NV_ERR_INSUFFICIENT_RESOURCES - The signing operation would cause a counter overflow to occur. + NV_ERR_INVALID_ARGUMENT - The CSL context is not associated with a channel. + - The size of the data is 0 bytes. +*/ +NV_STATUS nvUvmInterfaceCslSign(UvmCslContext *uvmCslContext, + NvU32 bufferSize, + NvU8 const *inputBuffer, + NvU8 *authTagBuffer); + +/******************************************************************************* + nvUvmInterfaceCslQueryMessagePool + + Returns the number of messages that can be encrypted before the message counter will overflow. + + Locking: This function does not acquire an API or GPU lock. + Memory : This function does not dynamically allocate memory. + The caller must guarantee that no CSL function, including this one, + is invoked concurrently with the same CSL context. + + Arguments: + uvmCslContext[IN/OUT] - The CSL context. + operation[IN] - Either UVM_CSL_OPERATION_ENCRYPT or UVM_CSL_OPERATION_DECRYPT. + messageNum[OUT] - Number of messages left before overflow. + + Error codes: + NV_ERR_INVALID_ARGUMENT - The value of the operation parameter is illegal. +*/ +NV_STATUS nvUvmInterfaceCslQueryMessagePool(UvmCslContext *uvmCslContext, + UvmCslOperation operation, + NvU64 *messageNum); + +/******************************************************************************* + nvUvmInterfaceCslIncrementIv + + Increments the message counter by the specified amount. + + If iv is non-NULL then the incremented value is returned. + If operation is UVM_CSL_OPERATION_ENCRYPT then the returned IV's "freshness" bit is set and + can be used in nvUvmInterfaceCslEncrypt. If operation is UVM_CSL_OPERATION_DECRYPT then + the returned IV can be used in nvUvmInterfaceCslDecrypt. + + Locking: This function does not acquire an API or GPU lock. + The caller must guarantee that no CSL function, including this one, + is invoked concurrently with the same CSL context. + Memory : This function does not dynamically allocate memory. + +Arguments: + uvmCslContext[IN/OUT] - The CSL context. + operation[IN] - Either + - UVM_CSL_OPERATION_ENCRYPT + - UVM_CSL_OPERATION_DECRYPT + increment[IN] - The amount by which the IV is incremented. Can be 0. + iv[OUT] - If non-NULL, a buffer to store the incremented IV. + + Error codes: + NV_ERR_INVALID_ARGUMENT - The value of the operation parameter is illegal. + NV_ERR_INSUFFICIENT_RESOURCES - Incrementing the message counter would result + in an overflow. +*/ +NV_STATUS nvUvmInterfaceCslIncrementIv(UvmCslContext *uvmCslContext, + UvmCslOperation operation, + NvU64 increment, + UvmCslIv *iv); + +/******************************************************************************* + nvUvmInterfaceCslLogEncryption + + Checks and logs information about encryptions associated with the given + CSL context. + + For contexts associated with channels, this function does not modify elements of + the UvmCslContext, and must be called for every CPU/GPU encryption. + + For the context associated with fault buffers, bufferSize can encompass multiple + encryption invocations, and the UvmCslContext will be updated following a key + rotation event. + + In either case the IV remains unmodified after this function is called. + + Locking: This function does not acquire an API or GPU lock. + Memory : This function does not dynamically allocate memory. + The caller must guarantee that no CSL function, including this one, + is invoked concurrently with the same CSL context. + + Arguments: + uvmCslContext[IN/OUT] - The CSL context. + operation[IN] - If the CSL context is associated with a fault + buffer, this argument is ignored. If it is + associated with a channel, it must be either + - UVM_CSL_OPERATION_ENCRYPT + - UVM_CSL_OPERATION_DECRYPT + bufferSize[IN] - The size of the buffer(s) encrypted by the + external entity in units of bytes. + + Error codes: + NV_ERR_INSUFFICIENT_RESOURCES - The encryption would cause a counter + to overflow. +*/ +NV_STATUS nvUvmInterfaceCslLogEncryption(UvmCslContext *uvmCslContext, + UvmCslOperation operation, + NvU32 bufferSize); +#endif // _NV_UVM_INTERFACE_H_ diff --git a/kernel-open/common/inc/nv_uvm_types.h b/kernel-open/common/inc/nv_uvm_types.h new file mode 100644 index 0000000..0b4ea15 --- /dev/null +++ b/kernel-open/common/inc/nv_uvm_types.h @@ -0,0 +1,1114 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +// This file provides common types for both the UVM kernel driver and RM's UVM +// interface. +// + +#ifndef _NV_UVM_TYPES_H_ +#define _NV_UVM_TYPES_H_ + +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvgputypes.h" +#include "nvCpuUuid.h" +#include "nv_uvm_user_types.h" // For UvmGpuCachingType, UvmGpuMappingType, etc + + +// +// When modifying flags, make sure they are compatible with the mirrored +// PMA_* flags in phys_mem_allocator.h. +// +// Input flags +#define UVM_PMA_ALLOCATE_DONT_EVICT NVBIT(0) +#define UVM_PMA_ALLOCATE_PINNED NVBIT(1) +#define UVM_PMA_ALLOCATE_SPECIFY_MINIMUM_SPEED NVBIT(2) +#define UVM_PMA_ALLOCATE_SPECIFY_ADDRESS_RANGE NVBIT(3) +#define UVM_PMA_ALLOCATE_SPECIFY_REGION_ID NVBIT(4) +#define UVM_PMA_ALLOCATE_PREFER_SLOWEST NVBIT(5) +#define UVM_PMA_ALLOCATE_CONTIGUOUS NVBIT(6) +#define UVM_PMA_ALLOCATE_PERSISTENT NVBIT(7) +#define UVM_PMA_ALLOCATE_PROTECTED_REGION NVBIT(8) +#define UVM_PMA_ALLOCATE_FORCE_ALIGNMENT NVBIT(9) +#define UVM_PMA_ALLOCATE_NO_ZERO NVBIT(10) +#define UVM_PMA_ALLOCATE_TURN_BLACKLIST_OFF NVBIT(11) +#define UVM_PMA_ALLOCATE_ALLOW_PARTIAL NVBIT(12) + +// Output flags +#define UVM_PMA_ALLOCATE_RESULT_IS_ZERO NVBIT(0) + +// Input flags to pmaFree +#define UVM_PMA_FREE_IS_ZERO NVBIT(0) + +// +// Indicate that the PMA operation is being done from one of the PMA eviction +// callbacks. +// +// Notably this flag is currently used only by the UVM/RM interface and not +// mirrored in PMA. +// +#define UVM_PMA_CALLED_FROM_PMA_EVICTION 16384 + +typedef unsigned long long UvmGpuPointer; + +// +// The following typedefs serve to explain the resources they point to. +// The actual resources remain RM internal and not exposed. +// +typedef struct uvmGpuSession_tag *uvmGpuSessionHandle; // gpuSessionHandle +typedef struct uvmGpuDevice_tag *uvmGpuDeviceHandle; // gpuDeviceHandle +typedef struct uvmGpuAddressSpace_tag *uvmGpuAddressSpaceHandle; // gpuAddressSpaceHandle +typedef struct uvmGpuTsg_tag *uvmGpuTsgHandle; // gpuTsgHandle +typedef struct uvmGpuChannel_tag *uvmGpuChannelHandle; // gpuChannelHandle +typedef struct uvmGpuCopyEngine_tag *uvmGpuCopyEngineHandle; // gpuObjectHandle + +typedef struct UvmGpuMemoryInfo_tag +{ + // Out: Memory layout. + NvU32 kind; + + // Out: Set to TRUE, if the allocation is in sysmem. + NvBool sysmem; + + // Out: Set to TRUE, if this allocation is treated as EGM. + // sysmem is also TRUE when egm is TRUE. + NvBool egm; + + // Out: Set to TRUE, if the allocation is a constructed + // under a Device or Subdevice. + // All permutations of sysmem and deviceDescendant are valid. + // !sysmem && !deviceDescendant implies a fabric allocation. + NvBool deviceDescendant; + + // Out: Page size associated with the phys alloc. + NvU64 pageSize; + + // Out: Set to TRUE, if the allocation is contiguous. + NvBool contig; + + // Out: Starting Addr if the allocation is contiguous. + // This is only valid if contig is NV_TRUE. + NvU64 physAddr; + + // Out: Total size of the allocation. + NvU64 size; + + // Out: Uuid of the GPU to which the allocation belongs. + // This is only valid if deviceDescendant is NV_TRUE. + // When egm is NV_TRUE, this is also the UUID of the GPU + // for which EGM is local. + // If the GPU has SMC enabled, the UUID is the GI UUID. + // Otherwise, it is the UUID for the physical GPU. + // Note: If the allocation is owned by a device in + // an SLI group and the allocation is broadcast + // across the SLI group, this UUID will be any one + // of the subdevices in the SLI group. + NvProcessorUuid uuid; +} UvmGpuMemoryInfo; + +// Some resources must share the same virtual mappings across channels. A mapped +// resource must be shared by a channel iff: +// +// 1) The channel belongs to a TSG (UvmGpuChannelInstanceInfo::bTsgChannel is +// NV_TRUE). +// +// 2) The channel is in the same TSG as all other channels sharing that mapping +// (UvmGpuChannelInstanceInfo::tsgId matches among channels). +// +// 3) The channel is in the same GPU address space as the other channels +// sharing that mapping. +// +// 4) The resource handle(s) match those of the shared mapping +// (UvmGpuChannelResourceInfo::resourceDescriptor and +// UvmGpuChannelResourceInfo::resourceId). +typedef struct UvmGpuChannelResourceInfo_tag +{ + // Out: Ptr to the RM memDesc of the channel resource. + NvP64 resourceDescriptor; + + // Out: RM ID of the channel resource. + NvU32 resourceId; + + // Out: Alignment needed for the resource allocation. + NvU64 alignment; + + // Out: Info about the resource allocation. + UvmGpuMemoryInfo resourceInfo; +} UvmGpuChannelResourceInfo; + +typedef struct UvmGpuPagingChannelInfo_tag +{ + // Pointer to a shadown buffer mirroring the contents of the error notifier + // for the paging channel + NvNotification *shadowErrorNotifier; +} UvmGpuPagingChannelInfo; + +typedef enum +{ + UVM_GPU_CHANNEL_ENGINE_TYPE_GR = 1, + UVM_GPU_CHANNEL_ENGINE_TYPE_CE = 2, + UVM_GPU_CHANNEL_ENGINE_TYPE_SEC2 = 3, +} UVM_GPU_CHANNEL_ENGINE_TYPE; + +#define UVM_GPU_CHANNEL_MAX_RESOURCES 13 + +typedef struct UvmGpuChannelInstanceInfo_tag +{ + // Out: Starting address of the channel instance. + NvU64 base; + + // Out: Set to NV_TRUE, if the instance is in sysmem. + // Set to NV_FALSE, if the instance is in vidmem. + NvBool sysmem; + + // Out: Hardware runlist ID. + NvU32 runlistId; + + // Out: Hardware channel ID. + NvU32 chId; + + // Out: NV_TRUE if the channel belongs to a subcontext or NV_FALSE if it + // belongs to a regular context. + NvBool bInSubctx; + + // Out: ID of the subcontext to which the channel belongs. + NvU32 subctxId; + + // Out: Whether the channel belongs to a TSG or not + NvBool bTsgChannel; + + // Out: ID of the TSG to which the channel belongs + NvU32 tsgId; + + // Out: Maximum number of subcontexts in the TSG to which the channel belongs + NvU32 tsgMaxSubctxCount; + + // Out: Info of channel resources associated with the channel. + UvmGpuChannelResourceInfo resourceInfo[UVM_GPU_CHANNEL_MAX_RESOURCES]; + + // Out: Number of valid entries in resourceInfo array. + NvU32 resourceCount; + + // Out: Type of the engine the channel is bound to + NvU32 channelEngineType; + + // Out: Channel handle to be used in the CLEAR_FAULTED method + NvU32 clearFaultedToken; + + // Out: Address of the NV_CHRAM_CHANNEL register required to clear the + // ENG_FAULTED/PBDMA_FAULTED bits after servicing non-replayable faults on + // Ampere+ GPUs + volatile NvU32 *pChramChannelRegister; + + // Out: Address of the doorbell. + volatile NvU32 *workSubmissionOffset; + + // Out: channel handle required to ring the doorbell. + NvU32 workSubmissionToken; + + // Out: SMC engine id to which the GR channel is bound, or zero if the GPU + // does not support SMC or it is a CE channel + NvU32 smcEngineId; + + // Out: Start of the VEID range assigned to the SMC engine the GR channel + // is bound to, or zero if the GPU does not support SMC or it is a CE + // channel + NvU32 smcEngineVeIdOffset; +} UvmGpuChannelInstanceInfo; + +typedef struct UvmGpuChannelResourceBindParams_tag +{ + // In: RM ID of the channel resource. + NvU32 resourceId; + + // In: Starting VA at which the channel resource is mapped. + NvU64 resourceVa; +} UvmGpuChannelResourceBindParams; + +typedef struct UvmGpuChannelInfo_tag +{ + volatile unsigned *gpGet; + volatile unsigned *gpPut; + UvmGpuPointer *gpFifoEntries; + unsigned numGpFifoEntries; + unsigned channelClassNum; + + // The errorNotifier is filled out when the channel hits an RC error. + NvNotification *errorNotifier; + + NvNotification *keyRotationNotifier; + + NvU32 hwRunlistId; + NvU32 hwChannelId; + + volatile unsigned *dummyBar1Mapping; + + // These values are filled by nvUvmInterfaceCopyEngineAlloc. The work + // submission token requires the channel to be bound to a runlist and that + // happens after CE allocation. + volatile NvU32 *workSubmissionOffset; + + // To be deprecated. See pWorkSubmissionToken below. + NvU32 workSubmissionToken; + + // + // This is the memory location where the most recently updated work + // submission token for this channel will be written to. After submitting + // new work and updating GP_PUT with the appropriate fence, the token must + // be read from this location before writing it to the workSubmissionOffset + // to kick off the new work. + // + volatile NvU32 *pWorkSubmissionToken; + + // GPU VAs of both GPFIFO and GPPUT are needed in Confidential Computing + // so a channel can be controlled via another channel (SEC2 or WLC/LCIC) + NvU64 gpFifoGpuVa; + NvU64 gpPutGpuVa; + NvU64 gpGetGpuVa; + + // GPU VA of work submission offset is needed in Confidential Computing + // so CE channels can ring doorbell of other channels as required for + // WLC/LCIC work submission + NvU64 workSubmissionOffsetGpuVa; +} UvmGpuChannelInfo; + +typedef enum +{ + // This value must be passed by Pascal and pre-Pascal GPUs for those + // allocations for which a specific location cannot be enforced. + UVM_BUFFER_LOCATION_DEFAULT = 0, + + UVM_BUFFER_LOCATION_SYS = 1, + UVM_BUFFER_LOCATION_VID = 2, +} UVM_BUFFER_LOCATION; + +typedef struct UvmGpuTsgAllocParams_tag +{ + // Interpreted as UVM_GPU_CHANNEL_ENGINE_TYPE + NvU32 engineType; + + // Index of the engine the TSG is bound to. + // Ignored if engineType is anything other than + // UVM_GPU_CHANNEL_ENGINE_TYPE_CE. + NvU32 engineIndex; +} UvmGpuTsgAllocParams; + +typedef struct UvmGpuChannelAllocParams_tag +{ + NvU32 numGpFifoEntries; + + // The next two fields store UVM_BUFFER_LOCATION values + NvU32 gpFifoLoc; + NvU32 gpPutLoc; +} UvmGpuChannelAllocParams; + +typedef struct UvmGpuPagingChannelAllocParams_tag +{ + // Index of the LCE engine the channel will be bound to, a zero-based offset + // from NV2080_ENGINE_TYPE_COPY0. + NvU32 engineIndex; +} UvmGpuPagingChannelAllocParams; + +// The max number of Copy Engines supported by a GPU. +// The gpu ops build has a static assert that this is the correct number. +#define UVM_COPY_ENGINE_COUNT_MAX 64 + +typedef struct +{ + // True if the CE is supported at all + NvBool supported:1; + + // True if the CE is synchronous with GR + NvBool grce:1; + + // True if the CE shares physical CEs with any other CE + // + // The value returned by RM for this field may change when a GPU is + // registered with RM for the first time, so UVM needs to query it + // again each time a GPU is registered. + NvBool shared:1; + + // True if the CE can give enhanced performance for SYSMEM reads over other CEs + NvBool sysmemRead:1; + + // True if the CE can give enhanced performance for SYSMEM writes over other CEs + NvBool sysmemWrite:1; + + // True if the CE can be used for SYSMEM transactions + NvBool sysmem:1; + + // True if the CE can be used for P2P transactions using NVLINK + NvBool nvlinkP2p:1; + + // True if the CE can be used for P2P transactions + NvBool p2p:1; + + // True if the CE supports encryption + NvBool secure:1; + + // True if the CE can be used for fast scrub + NvBool scrub:1; + + // Mask of physical CEs assigned to this LCE + // + // The value returned by RM for this field may change when a GPU is + // registered with RM for the first time, so UVM needs to query it + // again each time a GPU is registered. + NvU32 cePceMask; +} UvmGpuCopyEngineCaps; + +typedef struct UvmGpuCopyEnginesCaps_tag +{ + // Supported CEs may not be contiguous + UvmGpuCopyEngineCaps copyEngineCaps[UVM_COPY_ENGINE_COUNT_MAX]; +} UvmGpuCopyEnginesCaps; + +typedef enum +{ + UVM_LINK_TYPE_NONE, + UVM_LINK_TYPE_PCIE, + UVM_LINK_TYPE_NVLINK_1, + UVM_LINK_TYPE_NVLINK_2, + UVM_LINK_TYPE_NVLINK_3, + UVM_LINK_TYPE_NVLINK_4, + UVM_LINK_TYPE_NVLINK_5, + UVM_LINK_TYPE_C2C, +} UVM_LINK_TYPE; + +typedef struct UvmGpuCaps_tag +{ + // If numaEnabled is NV_TRUE, then the system address of allocated GPU + // memory can be converted to struct pages. See + // UvmGpuInfo::systemMemoryWindowStart. + NvBool numaEnabled; + NvU32 numaNodeId; +} UvmGpuCaps; + +typedef struct UvmGpuAddressSpaceInfo_tag +{ + NvU64 bigPageSize; + + NvBool atsEnabled; + + // Mapped registers that contain the current GPU time + volatile NvU32 *time0Offset; + volatile NvU32 *time1Offset; + + // Maximum number of subcontexts supported under this GPU address space + NvU32 maxSubctxCount; + + NvBool smcEnabled; + + NvU32 smcSwizzId; + + NvU32 smcGpcCount; +} UvmGpuAddressSpaceInfo; + +typedef struct UvmGpuAllocInfo_tag +{ + NvU64 gpuPhysOffset; // Returns gpuPhysOffset if contiguous requested + NvU64 pageSize; // default is RM big page size - 64K or 128 K" else use 4K or 2M + NvU64 alignment; // Virtual alignment + NvBool bContiguousPhysAlloc; // Flag to request contiguous physical allocation + NvBool bMemGrowsDown; // Causes RM to reserve physical heap from top of FB + NvBool bPersistentVidmem; // Causes RM to allocate persistent video memory + NvHandle hPhysHandle; // Handle for phys allocation either provided or retrieved + NvBool bUnprotected; // Allocation to be made in unprotected memory whenever + // SEV or GPU CC modes are enabled. Ignored otherwise +} UvmGpuAllocInfo; + +typedef struct UvmGpuExternalMappingInfo_tag +{ + // In: GPU caching ability. + UvmGpuCachingType cachingType; + + // In: Virtual permissions. + UvmGpuMappingType mappingType; + + // In: RM virtual mapping memory format + UvmGpuFormatType formatType; + + // In: RM virtual mapping element bits + UvmGpuFormatElementBits elementBits; + + // In: RM virtual compression type + UvmGpuCompressionType compressionType; + + // In: Size of the buffer to store PTEs (in bytes). + NvU64 pteBufferSize; + + // In: Page size for mapping + // If this field is passed as 0, the page size + // of the allocation is used for mapping. + // nvUvmInterfaceGetExternalAllocPtes must pass + // this field as zero. + NvU64 mappingPageSize; + + // In: Pointer to a buffer to store PTEs. + // Out: The interface will fill the buffer with PTEs + NvU64 *pteBuffer; + + // Out: Number of PTEs filled in to the buffer. + NvU64 numWrittenPtes; + + // Out: Number of PTEs remaining to be filled + // if the buffer is not sufficient to accommodate + // requested PTEs. + NvU64 numRemainingPtes; + + // Out: PTE size (in bytes) + NvU32 pteSize; + + // Out: UVM needs to invalidate L2 at unmap + NvBool bNeedL2InvalidateAtUnmap; +} UvmGpuExternalMappingInfo; + +typedef struct UvmGpuExternalPhysAddrInfo_tag +{ + // In: Virtual permissions. Returns + // NV_ERR_INVALID_ACCESS_TYPE if input is + // inaccurate + UvmGpuMappingType mappingType; + + // In: Size of the buffer to store PhysAddrs (in bytes). + NvU64 physAddrBufferSize; + + // In: Page size for mapping + // If this field is passed as 0, the page size + // of the allocation is used for mapping. + // nvUvmInterfaceGetExternalAllocPtes must pass + // this field as zero. + NvU64 mappingPageSize; + + // In: Pointer to a buffer to store PhysAddrs. + // Out: The interface will fill the buffer with PhysAddrs + NvU64 *physAddrBuffer; + + // Out: Number of PhysAddrs filled in to the buffer. + NvU64 numWrittenPhysAddrs; + + // Out: Number of PhysAddrs remaining to be filled + // if the buffer is not sufficient to accommodate + // requested PhysAddrs. + NvU64 numRemainingPhysAddrs; +} UvmGpuExternalPhysAddrInfo; + +typedef struct UvmGpuP2PCapsParams_tag +{ + // Out: peerId[i] contains gpu[i]'s peer id of gpu[1 - i]. Only defined if + // the GPUs are direct peers. + NvU32 peerIds[2]; + + // Out: peerId[i] contains gpu[i]'s EGM peer id of gpu[1 - i]. Only defined + // if the GPUs are direct peers and EGM enabled in the system. + NvU32 egmPeerIds[2]; + + // Out: UVM_LINK_TYPE + NvU32 p2pLink; + + // Out: optimalNvlinkWriteCEs[i] contains gpu[i]'s optimal CE for writing to + // gpu[1 - i]. The CE indexes are valid only if the GPUs are NVLink peers. + // + // The value returned by RM for this field may change when a GPU is + // registered with RM for the first time, so UVM needs to query it again + // each time a GPU is registered. + NvU32 optimalNvlinkWriteCEs[2]; + + // Out: Maximum unidirectional bandwidth between the peers in megabytes per + // second, not taking into account the protocols overhead. The reported + // bandwidth for indirect peers is zero. + NvU32 totalLinkLineRateMBps; + + // Out: IOMMU/DMA mappings of bar1 of the respective peer vidmem. + // Size is 0 if bar1 p2p is not supported. + NvU64 bar1DmaAddress[2]; + NvU64 bar1DmaSize[2]; + + // True if GPU i can use PCIe atomics on locations in GPU[i-1] + // BAR1. This implies that GPU[i] can issue PCIe atomics, + // GPU[i-1] can accept PCIe atomics, and the bus interconnect + // between the two GPUs can correctly route PCIe atomics. + NvBool bar1PcieAtomics[2]; +} UvmGpuP2PCapsParams; + +// Platform-wide information +typedef struct UvmPlatformInfo_tag +{ + // Out: ATS (Address Translation Services) is supported + NvBool atsSupported; + + // Out: True if HW trusted execution, such as AMD's SEV-SNP or Intel's TDX, + // is enabled in the VM, indicating that Confidential Computing must be + // also enabled in the GPU(s); these two security features are either both + // enabled, or both disabled. + NvBool confComputingEnabled; +} UvmPlatformInfo; + +typedef struct UvmGpuClientInfo_tag +{ + NvHandle hClient; + + NvHandle hSmcPartRef; +} UvmGpuClientInfo; + +typedef struct UvmGpuConfComputeCaps_tag +{ + // Out: true if Confidential Computing is enabled on the GPU + NvBool bConfComputingEnabled; + + // Out: true if key rotation is enabled (for UVM keys) on the GPU + NvBool bKeyRotationEnabled; +} UvmGpuConfComputeCaps; + +#define UVM_GPU_NAME_LENGTH 0x40 + +typedef struct UvmGpuInfo_tag +{ + // Printable gpu name + char name[UVM_GPU_NAME_LENGTH]; + + // Uuid of the physical GPU or GI UUID if nvUvmInterfaceGetGpuInfo() + // requested information for a valid SMC partition. + NvProcessorUuid uuid; + + // Gpu architecture; NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_* + NvU32 gpuArch; + + // Gpu implementation; NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_* + NvU32 gpuImplementation; + + // Host (gpfifo) class; *_CHANNEL_GPFIFO_*, e.g. KEPLER_CHANNEL_GPFIFO_A + NvU32 hostClass; + + // Copy engine (dma) class; *_DMA_COPY_*, e.g. KEPLER_DMA_COPY_A + NvU32 ceClass; + + // Compute class; *_COMPUTE_*, e.g. KEPLER_COMPUTE_A + NvU32 computeClass; + + // Set if GPU supports TCC Mode & is in TCC mode. + NvBool gpuInTcc; + + // Number of subdevices in SLI group. + NvU32 subdeviceCount; + + // Virtualization mode of this gpu. + NvU32 virtMode; // UVM_VIRT_MODE + + // NV_TRUE if this is a simulated/emulated GPU. NV_FALSE, otherwise. + NvBool isSimulated; + + // Number of GPCs + // If SMC is enabled, this is the currently configured number of GPCs for + // the given partition (also see the smcSwizzId field below). + NvU32 gpcCount; + + // Maximum number of GPCs; NV_SCAL_LITTER_NUM_GPCS + // This number is independent of the partition configuration, and can be + // used to conservatively size GPU-global constructs. + NvU32 maxGpcCount; + + // Number of TPCs + NvU32 tpcCount; + + // Maximum number of TPCs per GPC + NvU32 maxTpcPerGpcCount; + + // Number of access counter buffers. + NvU32 accessCntrBufferCount; + + // NV_TRUE if SMC is enabled on this GPU. + NvBool smcEnabled; + + // SMC partition ID (unique per GPU); note: valid when first looked up in + // nvUvmInterfaceGetGpuInfo(), but not guaranteed to remain valid. + // nvUvmInterfaceDeviceCreate() re-verifies the swizzId and fails if it is + // no longer valid. + NvU32 smcSwizzId; + + UvmGpuClientInfo smcUserClientInfo; + + // Confidential Compute capabilities of this GPU + UvmGpuConfComputeCaps gpuConfComputeCaps; + + // UVM_LINK_TYPE + NvU32 sysmemLink; + + // See UvmGpuP2PCapsParams::totalLinkLineRateMBps + NvU32 sysmemLinkRateMBps; + + // On coherent systems each GPU maps its memory to a window in the System + // Physical Address (SPA) space. The following fields describe that window. + // + // systemMemoryWindowSize > 0 indicates that the window is valid. meaning + // that GPU memory can be mapped by the CPU as cache-coherent by adding the + // GPU address to the window start. + NvU64 systemMemoryWindowStart; + NvU64 systemMemoryWindowSize; + + // This tells if the GPU is connected to NVSwitch. On systems with NVSwitch + // all GPUs are connected to it. If connectedToSwitch is NV_TRUE, + // nvswitchMemoryWindowStart tells the base address for the GPU in the + // NVSwitch address space. It is used when creating PTEs of memory mappings + // to NVSwitch peers. + NvBool connectedToSwitch; + NvU64 nvswitchMemoryWindowStart; + + // local EGM properties + // NV_TRUE if EGM is enabled + NvBool egmEnabled; + + // Peer ID to reach local EGM when EGM is enabled + NvU8 egmPeerId; + + // EGM base address to offset in the GMMU PTE entry for EGM mappings + NvU64 egmBaseAddr; + + // If connectedToSwitch is NV_TRUE, + // nvswitchEgmMemoryWindowStart tells the base address for the GPU's EGM memory in the + // NVSwitch address space. It is used when creating PTEs of GPU memory mappings + // to NVSwitch peers. + NvU64 nvswitchEgmMemoryWindowStart; + + // GPU supports ATS capability + NvBool atsSupport; + + // GPU supports Non-PASID ATS capability + NvBool nonPasidAtsSupport; +} UvmGpuInfo; + +typedef struct UvmGpuFbInfo_tag +{ + // Max physical address that can be allocated by UVM. This excludes internal + // RM regions that are not registered with PMA either. + NvU64 maxAllocatableAddress; + + NvU32 heapSize; // RAM in KB available for user allocations + NvU32 reservedHeapSize; // RAM in KB reserved for internal RM allocation + NvBool bZeroFb; // Zero FB mode enabled. + NvU64 maxVidmemPageSize; // Largest GPU page size to access vidmem. + NvBool bStaticBar1Enabled; // Static BAR1 mode is enabled + NvBool bStaticBar1WriteCombined; // Write combined is enabled + NvU64 staticBar1StartOffset; // The start offset of the the static mapping + NvU64 staticBar1Size; // The size of the static mapping + NvU32 heapStart; // The start offset of heap in KB, helpful for MIG + // systems +} UvmGpuFbInfo; + +typedef struct UvmGpuEccInfo_tag +{ + unsigned eccMask; + unsigned eccOffset; + void *eccReadLocation; + NvBool *eccErrorNotifier; + NvBool bEccEnabled; +} UvmGpuEccInfo; + +typedef struct UvmGpuNvlinkInfo_tag +{ + unsigned nvlinkMask; + unsigned nvlinkOffset; + void *nvlinkReadLocation; + NvBool *nvlinkErrorNotifier; + NvBool bNvlinkRecoveryEnabled; +} UvmGpuNvlinkInfo; + +typedef struct UvmPmaAllocationOptions_tag +{ + NvU32 flags; + NvU32 minimumSpeed; // valid if flags & UVM_PMA_ALLOCATE_SPECIFY_MININUM_SPEED + NvU64 physBegin, physEnd; // valid if flags & UVM_PMA_ALLOCATE_SPECIFY_ADDRESS_RANGE + NvU32 regionId; // valid if flags & UVM_PMA_ALLOCATE_SPECIFY_REGION_ID + NvU64 alignment; // valid if flags & UVM_PMA_ALLOCATE_FORCE_ALIGNMENT + NvLength numPagesAllocated; // valid if flags & UVM_PMA_ALLOCATE_ALLOW_PARTIAL + + NvU32 resultFlags; // valid if the allocation function returns NV_OK +} UvmPmaAllocationOptions; + +/******************************************************************************* + uvmEventSuspend + This function will be called by the GPU driver to signal to UVM that the + system is about to enter a sleep state. When it is called, the + following assumptions/guarantees are valid/made: + + * User channels have been preempted and disabled + * UVM channels are still running normally and will continue to do + so until after this function returns control + * User threads are still running, but can no longer issue system + system calls to the GPU driver + * Until exit from this function, UVM is allowed to make full use of + the GPUs under its control, as well as of the GPU driver + + Upon return from this function, UVM may not access GPUs under its control + until the GPU driver calls uvmEventResume(). It may still receive + calls to uvmEventIsrTopHalf() during this time, which it should return + NV_ERR_NO_INTR_PENDING from. It will not receive any other calls. +*/ +typedef NV_STATUS (*uvmEventSuspend_t) (void); + +/******************************************************************************* + uvmEventResume + This function will be called by the GPU driver to signal to UVM that the + system has exited a previously entered sleep state. When it is called, + the following assumptions/guarantees are valid/made: + + * UVM is again allowed to make full use of the GPUs under its + control, as well as of the GPU driver + * UVM channels are running normally + * User channels are still preempted and disabled + * User threads are again running, but still cannot issue system + calls to the GPU driver, nor submit new work + + Upon return from this function, UVM is expected to be fully functional. +*/ +typedef NV_STATUS (*uvmEventResume_t) (void); + +/******************************************************************************* + uvmEventStartDevice + This function will be called by the GPU driver once it has finished its + initialization to tell the UVM driver that this physical GPU has come up. +*/ +typedef NV_STATUS (*uvmEventStartDevice_t) (const NvProcessorUuid *pGpuUuidStruct); + +/******************************************************************************* + uvmEventStopDevice + This function will be called by the GPU driver to let UVM know that a + physical GPU is going down. +*/ +typedef NV_STATUS (*uvmEventStopDevice_t) (const NvProcessorUuid *pGpuUuidStruct); + +#if defined (_WIN32) +/******************************************************************************* + uvmEventWddmResetDuringTimeout + This function will be called by KMD in a TDR servicing path to unmap channel + resources and to destroy channels. This is a Windows specific event. +*/ +typedef NV_STATUS (*uvmEventWddmResetDuringTimeout_t) (const NvProcessorUuid *pGpuUuidStruct); + +/******************************************************************************* + uvmEventWddmRestartAfterTimeout + This function will be called by KMD in a TDR servicing path to map channel + resources and to create channels. This is a Windows specific event. +*/ +typedef NV_STATUS (*uvmEventWddmRestartAfterTimeout_t) (const NvProcessorUuid *pGpuUuidStruct); + +/******************************************************************************* + uvmEventServiceInterrupt + This function gets called from RM's intr service routine when an interrupt + to service a page fault is triggered. +*/ +typedef NV_STATUS (*uvmEventServiceInterrupt_t) (void *pDeviceObject, + NvU32 deviceId, NvU32 subdeviceId); +#endif + +/******************************************************************************* + uvmEventIsrTopHalf_t + This function will be called by the GPU driver to let UVM know + that an interrupt has occurred on the given physical GPU. + + Returns: + NV_OK if the UVM driver handled the interrupt + NV_ERR_NO_INTR_PENDING if the interrupt is not for the UVM driver +*/ +typedef NV_STATUS (*uvmEventIsrTopHalf_t) (const NvProcessorUuid *pGpuUuidStruct); + +/******************************************************************************* + uvmEventDrainP2P + This function will be called by the GPU driver to signal to UVM that the + GPU has encountered an uncontained error, and all peer work must be drained + to recover. When it is called, the following assumptions/guarantees are + valid/made: + + * Impacted user channels have been preempted and disabled + * UVM channels are still running normally and will continue to do + so unless an unrecoverable error is hit on said channels + * UVM must not return from this function until all enqueued work on + * peer channels has drained + * In the context of this function call, RM will still service faults + * UVM must prevent new peer work from being enqueued until the + uvmEventResumeP2P callback is issued + + Returns: + NV_OK if UVM has idled peer work and will prevent new peer workloads. + NV_ERR_TIMEOUT if peer work was unable to be drained within a timeout + XXX NV_ERR_* for any other failure (TBD) + +*/ +typedef NV_STATUS (*uvmEventDrainP2P_t) (const NvProcessorUuid *pGpuUuidStruct); + +/******************************************************************************* + uvmEventResumeP2P + This function will be called by the GPU driver to signal to UVM that the + GPU has recovered from the previously reported uncontained NVLINK error. + When it is called, the following assumptions/guarantees are valid/made: + + * UVM is again allowed to enqueue peer work + * UVM channels are still running normally +*/ +typedef NV_STATUS (*uvmEventResumeP2P_t) (const NvProcessorUuid *pGpuUuidStruct); + +struct UvmEventsLinux +{ + uvmEventIsrTopHalf_t isrTopHalf; + uvmEventSuspend_t suspend; + uvmEventResume_t resume; + uvmEventDrainP2P_t drainP2P; + uvmEventResumeP2P_t resumeP2P; +}; + +struct UvmEventsWindows +{ + uvmEventStartDevice_t startDevice; + uvmEventStopDevice_t stopDevice; +#if defined (_WIN32) + uvmEventWddmResetDuringTimeout_t wddmResetDuringTimeout; + uvmEventWddmRestartAfterTimeout_t wddmRestartAfterTimeout; + uvmEventServiceInterrupt_t serviceInterrupt; +#endif +}; + +#define UVM_CSL_SIGN_AUTH_TAG_SIZE_BYTES 32 +#define UVM_CSL_CRYPT_AUTH_TAG_SIZE_BYTES 16 + +typedef union UvmFaultMetadataPacket_tag +{ + struct { + NvU8 authTag[UVM_CSL_CRYPT_AUTH_TAG_SIZE_BYTES]; + NvBool valid; + }; + // padding to 32Bytes + NvU8 _padding[32]; +} UvmFaultMetadataPacket; + +// This struct shall not be accessed nor modified directly by UVM as it is +// entirely managed by the RM layer +typedef struct UvmCslContext_tag +{ + struct ccslContext_t *ctx; + void *nvidia_stack; +} UvmCslContext; + +typedef struct UvmGpuFaultInfo_tag +{ + struct + { + // Fault buffer GET register mapping. + // + // When Confidential Computing is enabled, GET refers to the shadow + // buffer (see bufferAddress below), and not to the actual HW buffer. + // In this setup, writes of GET (by UVM) do not result on re-evaluation + // of any interrupt condition. + volatile NvU32* pFaultBufferGet; + + // Fault buffer PUT register mapping. + // + // When Confidential Computing is enabled, PUT refers to the shadow + // buffer (see bufferAddress below), and not to the actual HW buffer. + // In this setup, writes of PUT (by GSP-RM) do not result on + // re-evaluation of any interrupt condition. + volatile NvU32* pFaultBufferPut; + + // Note: this variable is deprecated since buffer overflow is not a + // separate register from future chips. + volatile NvU32* pFaultBufferInfo; + + // Register mapping used to clear a replayable fault interrupt in + // Turing+ GPUs. + volatile NvU32* pPmcIntr; + + // Register mapping used to enable replayable fault interrupts. + volatile NvU32* pPmcIntrEnSet; + + // Register mapping used to disable replayable fault interrupts. + volatile NvU32* pPmcIntrEnClear; + + // Register used to enable, or disable, faults on prefetches. + volatile NvU32* pPrefetchCtrl; + + // Replayable fault interrupt mask identifier. + NvU32 replayableFaultMask; + + // Fault buffer CPU mapping + // When Confidential Computing is disabled, the mapping points to the + // actual HW fault buffer. + // + // When Confidential Computing is enabled, the mapping points to a + // copy of the HW fault buffer. This "shadow buffer" is maintained + // by GSP-RM. + void* bufferAddress; + + // Size, in bytes, of the fault buffer pointed by bufferAddress. + NvU32 bufferSize; + + // Mapping pointing to the start of the fault buffer metadata containing + // a 16Byte authentication tag and a valid byte. Always NULL when + // Confidential Computing is disabled. + UvmFaultMetadataPacket *bufferMetadata; + + // CSL context used for performing decryption of replayable faults when + // Confidential Computing is enabled. + UvmCslContext cslCtx; + } replayable; + struct + { + // Shadow buffer for non-replayable faults on cpu memory. Resman copies + // here the non-replayable faults that need to be handled by UVM + void* shadowBufferAddress; + + // Execution context for the queue associated with the fault buffer + void* shadowBufferContext; + + // Fault buffer size + NvU32 bufferSize; + + // Preallocated stack for functions called from the UVM isr top half + void *isr_sp; + + // Preallocated stack for functions called from the UVM isr bottom half + void *isr_bh_sp; + + // Used only when Hopper Confidential Compute is enabled + // Register mappings obtained from RM + volatile NvU32* pFaultBufferPut; + + // Used only when Hopper Confidential Compute is enabled + // Cached get index of the non-replayable shadow buffer + NvU32 shadowBufferGet; + + // See replayable.bufferMetadata + UvmFaultMetadataPacket *shadowBufferMetadata; + } nonReplayable; + NvHandle faultBufferHandle; + struct Device *pDevice; +} UvmGpuFaultInfo; + +struct Device; + +typedef struct UvmGpuPagingChannel_tag +{ + struct gpuDevice *device; + NvNotification *errorNotifier; + NvHandle channelHandle; + NvHandle errorNotifierHandle; + void *pushStreamSp; + struct Device *pDevice; +} UvmGpuPagingChannel, *UvmGpuPagingChannelHandle; + +typedef struct UvmGpuAccessCntrInfo_tag +{ + // Register mappings obtained from RM + // pointer to the Get register for the access counter buffer + volatile NvU32* pAccessCntrBufferGet; + // pointer to the Put register for the access counter buffer + volatile NvU32* pAccessCntrBufferPut; + // pointer to the Full register for the access counter buffer + volatile NvU32* pAccessCntrBufferFull; + // pointer to the hub interrupt + volatile NvU32* pHubIntr; + // pointer to interrupt enable register + volatile NvU32* pHubIntrEnSet; + // pointer to interrupt disable register + volatile NvU32* pHubIntrEnClear; + // mask for the access counter buffer + NvU32 accessCounterMask; + // access counter buffer cpu mapping and size + void* bufferAddress; + NvU32 bufferSize; + NvHandle accessCntrBufferHandle; +} UvmGpuAccessCntrInfo; + +typedef struct UvmGpuAccessCntrConfig_tag +{ + NvU32 granularity; + NvU32 threshold; +} UvmGpuAccessCntrConfig; + +// +// When modifying this enum, make sure they are compatible with the mirrored +// MEMORY_PROTECTION enum in phys_mem_allocator.h. +// +typedef enum UvmPmaGpuMemoryType_tag +{ + UVM_PMA_GPU_MEMORY_TYPE_UNPROTECTED = 0, + UVM_PMA_GPU_MEMORY_TYPE_PROTECTED = 1 +} UVM_PMA_GPU_MEMORY_TYPE; + +typedef UvmGpuChannelInfo gpuChannelInfo; +typedef UvmGpuTsgAllocParams gpuTsgAllocParams; +typedef UvmGpuChannelAllocParams gpuChannelAllocParams; +typedef UvmGpuCaps gpuCaps; +typedef UvmGpuCopyEngineCaps gpuCeCaps; +typedef UvmGpuCopyEnginesCaps gpuCesCaps; +typedef UvmGpuP2PCapsParams getP2PCapsParams; +typedef UvmGpuAddressSpaceInfo gpuAddressSpaceInfo; +typedef UvmGpuAllocInfo gpuAllocInfo; +typedef UvmGpuInfo gpuInfo; +typedef UvmGpuClientInfo gpuClientInfo; +typedef UvmGpuAccessCntrInfo gpuAccessCntrInfo; +typedef UvmGpuAccessCntrConfig gpuAccessCntrConfig; +typedef UvmGpuFaultInfo gpuFaultInfo; +typedef UvmGpuMemoryInfo gpuMemoryInfo; +typedef UvmGpuExternalMappingInfo gpuExternalMappingInfo; +typedef UvmGpuExternalPhysAddrInfo gpuExternalPhysAddrInfo; +typedef UvmGpuChannelResourceInfo gpuChannelResourceInfo; +typedef UvmGpuChannelInstanceInfo gpuChannelInstanceInfo; +typedef UvmGpuChannelResourceBindParams gpuChannelResourceBindParams; +typedef UvmGpuFbInfo gpuFbInfo; +typedef UvmGpuEccInfo gpuEccInfo; +typedef UvmGpuNvlinkInfo gpuNvlinkInfo; +typedef UvmGpuPagingChannel *gpuPagingChannelHandle; +typedef UvmGpuPagingChannelInfo gpuPagingChannelInfo; +typedef UvmGpuPagingChannelAllocParams gpuPagingChannelAllocParams; +typedef UvmPmaAllocationOptions gpuPmaAllocationOptions; + +typedef struct UvmCslIv +{ + NvU8 iv[12]; + NvU8 fresh; +} UvmCslIv; + +typedef enum UvmCslOperation +{ + UVM_CSL_OPERATION_ENCRYPT, + UVM_CSL_OPERATION_DECRYPT +} UvmCslOperation; + +typedef enum UVM_KEY_ROTATION_STATUS { + // Key rotation complete/not in progress + UVM_KEY_ROTATION_STATUS_IDLE = 0, + // RM is waiting for clients to report their channels are idle for key rotation + UVM_KEY_ROTATION_STATUS_PENDING = 1, + // Key rotation is in progress + UVM_KEY_ROTATION_STATUS_IN_PROGRESS = 2, + // Key rotation timeout failure, RM will RC non-idle channels. + // UVM should never see this status value. + UVM_KEY_ROTATION_STATUS_FAILED_TIMEOUT = 3, + // Key rotation failed because upper threshold was crossed, RM will RC non-idle channels + UVM_KEY_ROTATION_STATUS_FAILED_THRESHOLD = 4, + // Internal RM failure while rotating keys for a certain channel, RM will RC the channel. + UVM_KEY_ROTATION_STATUS_FAILED_ROTATION = 5, + UVM_KEY_ROTATION_STATUS_MAX_COUNT = 6, +} UVM_KEY_ROTATION_STATUS; + +#endif // _NV_UVM_TYPES_H_ diff --git a/kernel-open/common/inc/nv_uvm_user_types.h b/kernel-open/common/inc/nv_uvm_user_types.h new file mode 100644 index 0000000..a222857 --- /dev/null +++ b/kernel-open/common/inc/nv_uvm_user_types.h @@ -0,0 +1,166 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +// This file provides common types for the UVM kernel driver, UVM user layer, +// and RM's UVM interface. +// + +#ifndef _NV_UVM_USER_TYPES_H_ +#define _NV_UVM_USER_TYPES_H_ + +#include "nvtypes.h" + +// +// Default Page Size if left "0" because in RM BIG page size is default & there +// are multiple BIG page sizes in RM. These defines are used as flags to "0" +// should be OK when user is not sure which pagesize allocation it wants +// +#define UVM_PAGE_SIZE_DEFAULT 0x0ULL +#define UVM_PAGE_SIZE_4K 0x1000ULL +#define UVM_PAGE_SIZE_64K 0x10000ULL +#define UVM_PAGE_SIZE_128K 0x20000ULL +#define UVM_PAGE_SIZE_2M 0x200000ULL +#define UVM_PAGE_SIZE_512M 0x20000000ULL +#define UVM_PAGE_SIZE_256G 0x4000000000ULL + +typedef enum +{ + UVM_VIRT_MODE_NONE = 0, // Baremetal or passthrough virtualization + UVM_VIRT_MODE_LEGACY = 1, // Virtualization without SRIOV support + UVM_VIRT_MODE_SRIOV_HEAVY = 2, // Virtualization with SRIOV Heavy configured + UVM_VIRT_MODE_SRIOV_STANDARD = 3, // Virtualization with SRIOV Standard configured + UVM_VIRT_MODE_COUNT = 4, +} UVM_VIRT_MODE; + +//------------------------------------------------------------------------------ +// UVM GPU mapping types +// +// These types indicate the kinds of accesses allowed from a given GPU at the +// specified virtual address range. There are 3 basic kinds of accesses: read, +// write and atomics. Each type indicates what kinds of accesses are allowed. +// Accesses of any disallowed kind are fatal. The "Default" type specifies that +// the UVM driver should decide on the types of accesses allowed. +//------------------------------------------------------------------------------ +typedef enum +{ + UvmGpuMappingTypeDefault = 0, + UvmGpuMappingTypeReadWriteAtomic = 1, + UvmGpuMappingTypeReadWrite = 2, + UvmGpuMappingTypeReadOnly = 3, + UvmGpuMappingTypeCount = 4 +} UvmGpuMappingType; + +//------------------------------------------------------------------------------ +// UVM GPU caching types +// +// These types indicate the cacheability of the specified virtual address range +// from a given GPU. The "Default" type specifies that the UVM driver should +// set caching on or off as required to follow the UVM coherence model. The +// "ForceUncached" and "ForceCached" types will always turn caching off or on +// respectively. These two types override the cacheability specified by the UVM +// coherence model. +//------------------------------------------------------------------------------ +typedef enum +{ + UvmGpuCachingTypeDefault = 0, + UvmGpuCachingTypeForceUncached = 1, + UvmGpuCachingTypeForceCached = 2, + UvmGpuCachingTypeCount = 3 +} UvmGpuCachingType; + +//------------------------------------------------------------------------------ +// UVM GPU format types +// +// These types indicate the memory format of the specified virtual address +// range for a given GPU. The "Default" type specifies that the UVM driver will +// detect the format based on the allocation and is mutually inclusive with +// UvmGpuFormatElementBitsDefault. +//------------------------------------------------------------------------------ +typedef enum { + UvmGpuFormatTypeDefault = 0, + UvmGpuFormatTypeBlockLinear = 1, + UvmGpuFormatTypeCount = 2 +} UvmGpuFormatType; + +//------------------------------------------------------------------------------ +// UVM GPU Element bits types +// +// These types indicate the element size of the specified virtual address range +// for a given GPU. The "Default" type specifies that the UVM driver will +// detect the element size based on the allocation and is mutually inclusive +// with UvmGpuFormatTypeDefault. The element size is specified in bits: +// UvmGpuFormatElementBits8 uses the 8-bits format. +//------------------------------------------------------------------------------ +typedef enum { + UvmGpuFormatElementBitsDefault = 0, + UvmGpuFormatElementBits8 = 1, + UvmGpuFormatElementBits16 = 2, + // Cuda does not support 24-bit width + UvmGpuFormatElementBits32 = 4, + UvmGpuFormatElementBits64 = 5, + UvmGpuFormatElementBits128 = 6, + UvmGpuFormatElementBitsCount = 7 +} UvmGpuFormatElementBits; + +//------------------------------------------------------------------------------ +// UVM GPU Compression types +// +// These types indicate the compression type of the specified virtual address +// range for a given GPU. The "Default" type specifies that the UVM driver will +// detect the compression attributes based on the allocation. Any type other +// than the default will override the compression behavior of the physical +// allocation. UvmGpuCompressionTypeEnabledNoPlc will disable PLC but enables +// generic compression. UvmGpuCompressionTypeEnabledNoPlc type is only supported +// on Turing plus GPUs. Since UvmGpuCompressionTypeEnabledNoPlc type enables +// generic compression, it can only be used when the compression attribute of +// the underlying physical allocation is enabled. +//------------------------------------------------------------------------------ +typedef enum { + UvmGpuCompressionTypeDefault = 0, + UvmGpuCompressionTypeEnabledNoPlc = 1, + UvmGpuCompressionTypeCount = 2 +} UvmGpuCompressionType; + +// +// Mirrored in PMA (PMA_STATS) +// +typedef struct UvmPmaStatistics_tag +{ + volatile NvU64 numPages2m; // PMA-wide 2MB pages count across all regions + volatile NvU64 numFreePages64k; // PMA-wide free 64KB page count across all regions + volatile NvU64 numFreePages2m; // PMA-wide free 2MB pages count across all regions + volatile NvU64 numPages2mProtected; // PMA-wide 2MB pages count in protected memory + volatile NvU64 numFreePages64kProtected; // PMA-wide free 64KB page count in protected memory + volatile NvU64 numFreePages2mProtected; // PMA-wide free 2MB pages count in protected memory +} UvmPmaStatistics; + +typedef enum +{ + UVM_ACCESS_COUNTER_GRANULARITY_64K = 1, + UVM_ACCESS_COUNTER_GRANULARITY_2M = 2, + UVM_ACCESS_COUNTER_GRANULARITY_16M = 3, + UVM_ACCESS_COUNTER_GRANULARITY_16G = 4, +} UVM_ACCESS_COUNTER_GRANULARITY; + +#endif // _NV_UVM_USER_TYPES_H_ diff --git a/kernel-open/common/inc/nvgputypes.h b/kernel-open/common/inc/nvgputypes.h new file mode 100644 index 0000000..d018414 --- /dev/null +++ b/kernel-open/common/inc/nvgputypes.h @@ -0,0 +1,177 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2006 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + + /***************************************************************************\ +|* *| +|* NV GPU Types *| +|* *| +|* This header contains definitions describing NVIDIA's GPU hardware state. *| +|* *| + \***************************************************************************/ + + +#ifndef NVGPUTYPES_INCLUDED +#define NVGPUTYPES_INCLUDED +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + + /***************************************************************************\ +|* NvNotification *| + \***************************************************************************/ + +/***** NvNotification Structure *****/ +/* + * NV objects return information about method completion to clients via an + * array of notification structures in main memory. + * + * The client sets the status field to NV???_NOTIFICATION_STATUS_IN_PROGRESS. + * NV fills in the NvNotification[] data structure in the following order: + * timeStamp, otherInfo32, otherInfo16, and then status. + */ + +/* memory data structures */ +typedef volatile struct NvNotificationRec { + struct { /* 0000- */ + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvV32 info32; /* info returned depends on method 0008-000b*/ + NvV16 info16; /* info returned depends on method 000c-000d*/ + NvV16 status; /* user sets bit 15, NV sets status 000e-000f*/ +} NvNotification; + + /***************************************************************************\ +|* NvGpuSemaphore *| + \***************************************************************************/ + +/***** NvGpuSemaphore Structure *****/ +/* + * NvGpuSemaphore objects are used by the GPU to synchronize multiple + * command-streams. + * + * Please refer to class documentation for details regarding the content of + * the data[] field. + */ + +/* memory data structures */ +typedef volatile struct NvGpuSemaphoreRec { + NvV32 data[2]; /* Payload/Report data 0000-0007*/ + struct { /* 0008- */ + NvV32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 8- f*/ + } timeStamp; /* -000f*/ +} NvGpuSemaphore; + + /***************************************************************************\ +|* NvGetReport *| + \***************************************************************************/ + +/* + * NV objects, starting with Kelvin, return information such as pixel counts to + * the user via the NV*_GET_REPORT method. + * + * The client fills in the "zero" field to any nonzero value and waits until it + * becomes zero. NV fills in the timeStamp, value, and zero fields. + */ +typedef volatile struct NVGetReportRec { + struct { /* 0000- */ + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvU32 value; /* info returned depends on method 0008-000b*/ + NvU32 zero; /* always written to zero 000c-000f*/ +} NvGetReport; + + /***************************************************************************\ +|* NvRcNotification *| + \***************************************************************************/ + +/* + * NV robust channel notification information is reported to clients via + * standard NV01_EVENT objects bound to instance of the NV*_CHANNEL_DMA and + * NV*_CHANNEL_GPFIFO objects. + */ +typedef struct NvRcNotificationRec { + struct { + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvU32 exceptLevel; /* exception level 000c-000f*/ + NvU32 exceptType; /* exception type 0010-0013*/ +} NvRcNotification; + + /***************************************************************************\ +|* NvSyncPointFence *| + \***************************************************************************/ + +/***** NvSyncPointFence Structure *****/ +/* + * NvSyncPointFence objects represent a syncpoint event. The syncPointID + * identifies the syncpoint register and the value is the value that the + * register will contain right after the event occurs. + * + * If syncPointID contains NV_INVALID_SYNCPOINT_ID then this is an invalid + * event. This is often used to indicate an event in the past (i.e. no need to + * wait). + * + * For more info on syncpoints refer to Mobile channel and syncpoint + * documentation. + */ +typedef struct NvSyncPointFenceRec { + NvU32 syncPointID; + NvU32 value; +} NvSyncPointFence; + +#define NV_INVALID_SYNCPOINT_ID ((NvU32)-1) + + /***************************************************************************\ +|* *| +|* 64 bit type definitions for use in interface structures. *| +|* *| + \***************************************************************************/ + +typedef NvU64 NvOffset; /* GPU address */ + +#define NvOffset_HI32(n) ((NvU32)(((NvU64)(n)) >> 32)) +#define NvOffset_LO32(n) ((NvU32)((NvU64)(n))) + +/* +* There are two types of GPU-UUIDs available: +* +* (1) a SHA-256 based 32 byte ID, formatted as a 64 character +* hexadecimal string as "GPU-%16x-%08x-%08x-%08x-%024x"; this is +* deprecated. +* +* (2) a SHA-1 based 16 byte ID, formatted as a 32 character +* hexadecimal string as "GPU-%08x-%04x-%04x-%04x-%012x" (the +* canonical format of a UUID); this is the default. +*/ +#define NV_GPU_UUID_SHA1_LEN (16) +#define NV_GPU_UUID_SHA256_LEN (32) +#define NV_GPU_UUID_LEN NV_GPU_UUID_SHA1_LEN + +#ifdef __cplusplus +}; +#endif + +#endif /* NVGPUTYPES_INCLUDED */ diff --git a/kernel-open/common/inc/nvi2c.h b/kernel-open/common/inc/nvi2c.h new file mode 100644 index 0000000..28c1ba5 --- /dev/null +++ b/kernel-open/common/inc/nvi2c.h @@ -0,0 +1,37 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_I2C_H_ +#define _NV_I2C_H_ + +#define NV_I2C_MSG_WR 0x0000 +#define NV_I2C_MSG_RD 0x0001 + +typedef struct nv_i2c_msg_s +{ + NvU16 addr; + NvU16 flags; + NvU16 len; + NvU8* buf; +} nv_i2c_msg_t; + +#endif diff --git a/kernel-open/common/inc/nvimpshared.h b/kernel-open/common/inc/nvimpshared.h new file mode 100644 index 0000000..202c41d --- /dev/null +++ b/kernel-open/common/inc/nvimpshared.h @@ -0,0 +1,96 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************************************************************\ +* * +* Description: * +* Accommodates sharing of IMP-related structures between kernel interface * +* files and core RM. * +* * +\******************************************************************************/ + +#pragma once + +#include +#if defined(_MSC_VER) +#pragma warning(disable:4324) +#endif + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: nvimpshared.finn +// + + + + +// +// There are only a small number of discrete dramclk frequencies available on +// the system. This structure contains IMP-relevant information associated +// with a specific dramclk frequency. +// +typedef struct DRAM_CLK_INSTANCE { + NvU32 dram_clk_freq_khz; + + NvU32 mchub_clk_khz; + + NvU32 mc_clk_khz; + + NvU32 max_iso_bw_kbps; + + // + // switch_latency_ns is the maximum time required to switch the dramclk + // frequency to the frequency specified in dram_clk_freq_khz. + // + NvU32 switch_latency_ns; +} DRAM_CLK_INSTANCE; + +// +// This table is used to collect information from other modules that is needed +// for RM IMP calculations. (Used on Tegra only.) +// +typedef struct TEGRA_IMP_IMPORT_DATA { + // + // max_iso_bw_kbps stores the maximum possible ISO bandwidth available to + // display, assuming display is the only active ISO client. (Note that ISO + // bandwidth will typically be allocated to multiple clients, so display + // will generally not have access to the maximum possible bandwidth.) + // + NvU32 max_iso_bw_kbps; + + // On Orin, each dram channel is 16 bits wide. + NvU32 num_dram_channels; + + // + // dram_clk_instance stores entries for all possible dramclk frequencies, + // sorted by dramclk frequency in increasing order. + // + // "24" is expected to be larger than the actual number of required entries + // (which is provided by a BPMP API), but it can be increased if necessary. + // + // num_dram_clk_entries is filled in with the actual number of distinct + // dramclk entries. + // + NvU32 num_dram_clk_entries; + DRAM_CLK_INSTANCE dram_clk_instance[24]; +} TEGRA_IMP_IMPORT_DATA; diff --git a/kernel-open/common/inc/nvkms-api-types.h b/kernel-open/common/inc/nvkms-api-types.h new file mode 100644 index 0000000..ff98237 --- /dev/null +++ b/kernel-open/common/inc/nvkms-api-types.h @@ -0,0 +1,788 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_API_TYPES_H) +#define NVKMS_API_TYPES_H + +#include +#include +#include + +#define NVKMS_MAX_SUBDEVICES NV_MAX_SUBDEVICES +#define NVKMS_MAX_HEADS_PER_DISP NV_MAX_HEADS + +#define NVKMS_LEFT 0 +#define NVKMS_RIGHT 1 +#define NVKMS_MAX_EYES 2 + +#define NVKMS_MAIN_LAYER 0 +#define NVKMS_OVERLAY_LAYER 1 +#define NVKMS_MAX_LAYERS_PER_HEAD 8 + +#define NVKMS_MAX_PLANES_PER_SURFACE 3 + +#define NVKMS_DP_ADDRESS_STRING_LENGTH 64 + +#define NVKMS_DEVICE_ID_TEGRA 0x0000ffff + +#define NVKMS_MAX_SUPERFRAME_VIEWS 4 + +#define NVKMS_LOG2_LUT_ARRAY_SIZE 10 +#define NVKMS_LUT_ARRAY_SIZE (1 << NVKMS_LOG2_LUT_ARRAY_SIZE) + +#define NVKMS_OLUT_FP_NORM_SCALE_DEFAULT 0xffffffff + +typedef NvU32 NvKmsDeviceHandle; +typedef NvU32 NvKmsDispHandle; +typedef NvU32 NvKmsConnectorHandle; +typedef NvU32 NvKmsSurfaceHandle; +typedef NvU32 NvKmsFrameLockHandle; +typedef NvU32 NvKmsDeferredRequestFifoHandle; +typedef NvU32 NvKmsSwapGroupHandle; +typedef NvU32 NvKmsVblankSyncObjectHandle; +typedef NvU32 NvKmsVblankSemControlHandle; + +struct NvKmsSize { + NvU16 width; + NvU16 height; +}; + +struct NvKmsPoint { + NvU16 x; + NvU16 y; +}; + +struct NvKmsSignedPoint { + NvS16 x; + NvS16 y; +}; + +struct NvKmsRect { + NvU16 x; + NvU16 y; + NvU16 width; + NvU16 height; +}; + +/* + * A 3x3 row-major matrix. + * + * The elements are 32-bit single-precision IEEE floating point values. The + * floating point bit pattern should be stored in NvU32s to be passed into the + * kernel. + */ +struct NvKmsMatrix { + NvU32 m[3][3]; +}; + +typedef enum { + NVKMS_CONNECTOR_TYPE_DP = 0, + NVKMS_CONNECTOR_TYPE_VGA = 1, + NVKMS_CONNECTOR_TYPE_DVI_I = 2, + NVKMS_CONNECTOR_TYPE_DVI_D = 3, + NVKMS_CONNECTOR_TYPE_ADC = 4, + NVKMS_CONNECTOR_TYPE_LVDS = 5, + NVKMS_CONNECTOR_TYPE_HDMI = 6, + NVKMS_CONNECTOR_TYPE_USBC = 7, + NVKMS_CONNECTOR_TYPE_DSI = 8, + NVKMS_CONNECTOR_TYPE_DP_SERIALIZER = 9, + NVKMS_CONNECTOR_TYPE_UNKNOWN = 10, + NVKMS_CONNECTOR_TYPE_MAX = NVKMS_CONNECTOR_TYPE_UNKNOWN, +} NvKmsConnectorType; + +static inline +const char *NvKmsConnectorTypeString(const NvKmsConnectorType connectorType) +{ + switch (connectorType) { + case NVKMS_CONNECTOR_TYPE_DP: return "DP"; + case NVKMS_CONNECTOR_TYPE_VGA: return "VGA"; + case NVKMS_CONNECTOR_TYPE_DVI_I: return "DVI-I"; + case NVKMS_CONNECTOR_TYPE_DVI_D: return "DVI-D"; + case NVKMS_CONNECTOR_TYPE_ADC: return "ADC"; + case NVKMS_CONNECTOR_TYPE_LVDS: return "LVDS"; + case NVKMS_CONNECTOR_TYPE_HDMI: return "HDMI"; + case NVKMS_CONNECTOR_TYPE_USBC: return "USB-C"; + case NVKMS_CONNECTOR_TYPE_DSI: return "DSI"; + case NVKMS_CONNECTOR_TYPE_DP_SERIALIZER: return "DP-SERIALIZER"; + default: break; + } + return "Unknown"; +} + +typedef enum { + NVKMS_CONNECTOR_SIGNAL_FORMAT_VGA = 0, + NVKMS_CONNECTOR_SIGNAL_FORMAT_LVDS = 1, + NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS = 2, + NVKMS_CONNECTOR_SIGNAL_FORMAT_DP = 3, + NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI = 4, + NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN = 5, + NVKMS_CONNECTOR_SIGNAL_FORMAT_MAX = + NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN, +} NvKmsConnectorSignalFormat; + +/*! + * Description of Notifiers and Semaphores (Non-isochronous (NISO) surfaces). + * + * When flipping, the client can optionally specify a notifier and/or + * a semaphore to use with the flip. The surfaces used for these + * should be registered with NVKMS to get an NvKmsSurfaceHandle. + * + * NvKmsNIsoSurface::offsetInWords indicates the starting location, in + * 32-bit words, within the surface where EVO should write the + * notifier or semaphore. Note that only the first 4096 bytes of a + * surface can be used by semaphores or notifiers; offsetInWords must + * allow for the semaphore or notifier to be written within the first + * 4096 bytes of the surface. I.e., this must be satisfied: + * + * ((offsetInWords * 4) + elementSizeInBytes) <= 4096 + * + * Where elementSizeInBytes is: + * + * if NISO_FORMAT_FOUR_WORD*, elementSizeInBytes = 16 + * if NISO_FORMAT_LEGACY, + * if overlay && notifier, elementSizeInBytes = 16 + * else, elementSizeInBytes = 4 + * + * Note that different GPUs support different semaphore and notifier formats. + * Check NvKmsAllocDeviceReply::validNIsoFormatMask to determine which are + * valid for the given device. + * + * Note also that FOUR_WORD and FOUR_WORD_NVDISPLAY are the same size, but + * FOUR_WORD uses a format compatible with display class 907[ce], and + * FOUR_WORD_NVDISPLAY uses a format compatible with c37e (actually defined by + * the NV_DISP_NOTIFIER definition in clc37d.h). + */ +enum NvKmsNIsoFormat { + NVKMS_NISO_FORMAT_LEGACY, + NVKMS_NISO_FORMAT_FOUR_WORD, + NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY, +}; + +enum NvKmsEventType { + NVKMS_EVENT_TYPE_DPY_CHANGED, + NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED, + NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED, + NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED, + NVKMS_EVENT_TYPE_FRAMELOCK_ATTRIBUTE_CHANGED, + NVKMS_EVENT_TYPE_FLIP_OCCURRED, +}; + +enum NvKmsFlipResult { + NV_KMS_FLIP_RESULT_SUCCESS = 0, /* Success */ + NV_KMS_FLIP_RESULT_INVALID_PARAMS, /* Parameter validation failed */ + NV_KMS_FLIP_RESULT_IN_PROGRESS, /* Flip would fail because an outstanding + flip containing changes that cannot be + queued is in progress */ +}; + +typedef enum { + NV_EVO_SCALER_1TAP = 0, + NV_EVO_SCALER_2TAPS = 1, + NV_EVO_SCALER_3TAPS = 2, + NV_EVO_SCALER_5TAPS = 3, + NV_EVO_SCALER_8TAPS = 4, + NV_EVO_SCALER_TAPS_MIN = NV_EVO_SCALER_1TAP, + NV_EVO_SCALER_TAPS_MAX = NV_EVO_SCALER_8TAPS, +} NVEvoScalerTaps; + +/* This structure describes the scaling bounds for a given layer. */ +struct NvKmsScalingUsageBounds { + /* + * Maximum vertical downscale factor (scaled by 1024) + * + * For example, if the downscale factor is 1.5, then maxVDownscaleFactor + * would be 1.5 x 1024 = 1536. + */ + NvU16 maxVDownscaleFactor; + + /* + * Maximum horizontal downscale factor (scaled by 1024) + * + * See the example above for maxVDownscaleFactor. + */ + NvU16 maxHDownscaleFactor; + + /* Maximum vertical taps allowed */ + NVEvoScalerTaps vTaps; + + /* Whether vertical upscaling is allowed */ + NvBool vUpscalingAllowed; +}; + +struct NvKmsUsageBounds { + struct { + NvBool usable; + struct NvKmsScalingUsageBounds scaling; + NvU64 supportedSurfaceMemoryFormats NV_ALIGN_BYTES(8); + } layer[NVKMS_MAX_LAYERS_PER_HEAD]; +}; + +/*! + * Per-component arrays of NvU16s describing the LUT; used for both the input + * LUT and output LUT. + */ +struct NvKmsLutRamps { + NvU16 red[NVKMS_LUT_ARRAY_SIZE]; /*! in */ + NvU16 green[NVKMS_LUT_ARRAY_SIZE]; /*! in */ + NvU16 blue[NVKMS_LUT_ARRAY_SIZE]; /*! in */ +}; + +/* Datatypes for LUT capabilities */ +enum NvKmsLUTFormat { + /* + * Normalized fixed-point format mapping [0, 1] to [0x0, 0xFFFF]. + */ + NVKMS_LUT_FORMAT_UNORM16, + + /* + * Half-precision floating point. + */ + NVKMS_LUT_FORMAT_FP16, + + /* + * 14-bit fixed-point format required to work around hardware bug 813188. + * + * To convert from UNORM16 to UNORM14_WAR_813188: + * unorm14_war_813188 = ((unorm16 >> 2) & ~7) + 0x6000 + */ + NVKMS_LUT_FORMAT_UNORM14_WAR_813188 +}; + +enum NvKmsLUTVssSupport { + NVKMS_LUT_VSS_NOT_SUPPORTED, + NVKMS_LUT_VSS_SUPPORTED, + NVKMS_LUT_VSS_REQUIRED, +}; + +enum NvKmsLUTVssType { + NVKMS_LUT_VSS_TYPE_NONE, + NVKMS_LUT_VSS_TYPE_LINEAR, + NVKMS_LUT_VSS_TYPE_LOGARITHMIC, +}; + +struct NvKmsLUTCaps { + /*! Whether this layer or head on this device supports this LUT stage. */ + NvBool supported; + + /*! Whether this LUT supports VSS. */ + enum NvKmsLUTVssSupport vssSupport; + + /*! + * The type of VSS segmenting this LUT uses. + */ + enum NvKmsLUTVssType vssType; + + /*! + * Expected number of VSS segments. + */ + NvU32 vssSegments; + + /*! + * Expected number of LUT entries. + */ + NvU32 lutEntries; + + /*! + * Format for each of the LUT entries. + */ + enum NvKmsLUTFormat entryFormat; +}; + +/* each LUT entry uses this many bytes */ +#define NVKMS_LUT_CAPS_LUT_ENTRY_SIZE (4 * sizeof(NvU16)) + +/* if the LUT surface uses VSS, size of the VSS header */ +#define NVKMS_LUT_VSS_HEADER_SIZE (4 * NVKMS_LUT_CAPS_LUT_ENTRY_SIZE) + +struct NvKmsLUTSurfaceParams { + NvKmsSurfaceHandle surfaceHandle; + NvU64 offset NV_ALIGN_BYTES(8); + NvU32 vssSegments; + NvU32 lutEntries; +}; + +/* + * A 3x4 row-major colorspace conversion matrix. + * + * The output color C' is the CSC matrix M times the column vector + * [ R, G, B, 1 ]. + * + * Each entry in the matrix is a signed 2's-complement fixed-point number with + * 3 integer bits and 16 fractional bits. + */ +struct NvKmsCscMatrix { + NvS32 m[3][4]; +}; + +#define NVKMS_IDENTITY_CSC_MATRIX \ + (struct NvKmsCscMatrix){{ \ + { 0x10000, 0, 0, 0 }, \ + { 0, 0x10000, 0, 0 }, \ + { 0, 0, 0x10000, 0 } \ + }} + +/*! + * A color key match bit used in the blend equations and one can select the src + * or dst Color Key when blending. Assert key bit means match, de-assert key + * bit means nomatch. + * + * The src Color Key means using the key bit from the current layer, the dst + * Color Key means using key bit from the previous layer composition stage. The + * src or dst key bit will be inherited by blended pixel for the preparation of + * next blending, as dst Color Key. + * + * src: Forward the color key match bit from the current layer pixel to next layer + * composition stage. + * + * dst: Forward the color key match bit from the previous composition stage + * pixel to next layer composition stage. + * + * disable: Forward “1” to the next layer composition stage as the color key. + */ +enum NvKmsCompositionColorKeySelect { + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE = 0, + NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC, + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST, +}; + +#define NVKMS_COMPOSITION_NUMBER_OF_COLOR_KEY_SELECTS 3 + +/*! + * Composition modes used for surfaces in general. + * The various types of composition are: + * + * Opaque: source pixels are opaque regardless of alpha, + * and will occlude the destination pixel. + * + * Alpha blending: aka opacity, which could be specified + * for a surface in its entirety, or on a per-pixel basis. + * + * Non-premultiplied: alpha value applies to source pixel, + * and also counter-weighs the destination pixel. + * Premultiplied: alpha already applied to source pixel, + * so it only counter-weighs the destination pixel. + * + * Color keying: use a color key structure to decide + * the criteria for matching and compositing. + * (See NVColorKey below.) + */ +enum NvKmsCompositionBlendingMode { + /*! + * Modes that use no other parameters. + */ + NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE, + + /*! + * Mode that ignores both per-pixel alpha provided + * by client and the surfaceAlpha, makes source pixel + * totally transparent. + */ + NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT, + + /*! + * Modes that use per-pixel alpha provided by client, + * and the surfaceAlpha must be set to 0. + */ + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA, + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA, + + /*! + * These use both the surface-wide and per-pixel alpha values. + * surfaceAlpha is treated as numerator ranging from 0 to 255 + * of a fraction whose denominator is 255. + */ + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA, + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA, +}; + +static inline NvBool +NvKmsIsCompositionModeUseAlpha(enum NvKmsCompositionBlendingMode mode) +{ + return mode == NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA || + mode == NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA || + mode == NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA || + mode == NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA; +} + +/*! + * Abstract description of a color key. + * + * a, r, g, and b are component values in the same width as the framebuffer + * values being scanned out. + * + * match[ARGB] defines whether that component is considered when matching the + * color key -- TRUE means that the value of the corresponding component must + * match the given value for the given pixel to be considered a 'key match'; + * FALSE means that the value of that component is not a key match criterion. + */ +typedef struct { + NvU16 a, r, g, b; + NvBool matchA, matchR, matchG, matchB; +} NVColorKey; + +/*! + * Describes the composition parameters for the single layer. + */ +struct NvKmsCompositionParams { + enum NvKmsCompositionColorKeySelect colorKeySelect; + NVColorKey colorKey; + /* + * It is possible to assign different blending mode for match pixels and + * nomatch pixels. blendingMode[0] is used to blend a pixel with the color key + * match bit "0", and blendingMode[1] is used to blend a pixel with the color + * key match bit "1". + * + * But because of the hardware restrictions match and nomatch pixels can + * not use blending mode PREMULT_ALPHA, NON_PREMULT_ALPHA, + * PREMULT_SURFACE_ALPHA, and NON_PREMULT_SURFACE_ALPHA at once. + */ + enum NvKmsCompositionBlendingMode blendingMode[2]; + NvU8 surfaceAlpha; /* Applies to all pixels of entire surface */ + /* + * Defines the composition order. A smaller value moves the layer closer to + * the top (away from the background). No need to pick consecutive values, + * requirements are that the value should be different for each of the + * layers owned by the head and the value for the main layer should be + * the greatest one. + * + * Cursor always remains at the top of all other layers, this parameter + * has no effect on cursor. NVKMS assigns default depth to each of the + * supported layers, by default depth of the layer is calculated as + * (NVKMS_MAX_LAYERS_PER_HEAD - index of the layer). If depth is set to + * '0' then default depth value will get used. + */ + NvU8 depth; +}; + +/*! + * Describes the composition capabilities supported by the hardware for + * cursor or layer. It describes supported the color key selects and for each + * of the supported color key selects it describes supported blending modes + * for match and nomatch pixles. + */ +struct NvKmsCompositionCapabilities { + + struct { + /* + * A bitmask of the supported blending modes for match and nomatch + * pixels. It should be the bitwise 'or' of one or more + * NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_*) values. + */ + NvU32 supportedBlendModes[2]; + } colorKeySelect[NVKMS_COMPOSITION_NUMBER_OF_COLOR_KEY_SELECTS]; + + /* + * A bitmask of the supported color key selects. + * + * It should be the bitwise 'or' of one or more + * NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_*) + * values. + */ + NvU32 supportedColorKeySelects; +}; + +struct NvKmsLayerCapabilities { + /*! + * Whether Layer supports the window mode. If window mode is supported, + * then clients can set the layer's dimensions so that they're smaller than + * the viewport, and can also change the output position of the layer to a + * non-(0, 0) position. + * + * NOTE: Dimension changes are currently unsupported for the main layer, + * and output position changes for the main layer are currently only + * supported via IOCTL_SET_LAYER_POSITION but not via flips. Support for + * these is coming soon, via changes to flip code. + */ + NvBool supportsWindowMode :1; + + /*! + * Whether layer supports ICtCp pipe. + */ + NvBool supportsICtCp :1; + + + /*! + * Describes the supported Color Key selects and blending modes for + * match and nomatch layer pixels. + */ + struct NvKmsCompositionCapabilities composition; + + /*! + * Which NvKmsSurfaceMemoryFormat enum values are supported by the NVKMS + * device on the given scanout surface layer. + * + * Iff a particular enum NvKmsSurfaceMemoryFormat 'value' is supported, + * then (1 << value) will be set in the appropriate bitmask. + * + * Note that these bitmasks just report the static SW/HW capabilities, + * and are a superset of the formats that IMP may allow. Clients are + * still expected to honor the NvKmsUsageBounds for each head. + */ + NvU64 supportedSurfaceMemoryFormats NV_ALIGN_BYTES(8); + + /* Capabilities for each LUT stage in the EVO3 precomp pipeline. */ + struct NvKmsLUTCaps ilut; + struct NvKmsLUTCaps tmo; +}; + +/*! + * Surface layouts. + * + * BlockLinear is the NVIDIA GPU native tiling format, arranging pixels into + * blocks or tiles for better locality during common GPU operations. + * + * Pitch is the naive "linear" surface layout with pixels laid out sequentially + * in memory line-by-line, optionally with some padding at the end of each line + * for alignment purposes. + */ +enum NvKmsSurfaceMemoryLayout { + NvKmsSurfaceMemoryLayoutBlockLinear = 0, + NvKmsSurfaceMemoryLayoutPitch = 1, +}; + +static inline const char *NvKmsSurfaceMemoryLayoutToString( + enum NvKmsSurfaceMemoryLayout layout) +{ + switch (layout) { + default: + return "Unknown"; + case NvKmsSurfaceMemoryLayoutBlockLinear: + return "BlockLinear"; + case NvKmsSurfaceMemoryLayoutPitch: + return "Pitch"; + } +} + +typedef enum { + MUX_STATE_GET = 0, + MUX_STATE_INTEGRATED = 1, + MUX_STATE_DISCRETE = 2, + MUX_STATE_UNKNOWN = 3, +} NvMuxState; + +enum NvKmsRotation { + NVKMS_ROTATION_0 = 0, + NVKMS_ROTATION_90 = 1, + NVKMS_ROTATION_180 = 2, + NVKMS_ROTATION_270 = 3, + NVKMS_ROTATION_MIN = NVKMS_ROTATION_0, + NVKMS_ROTATION_MAX = NVKMS_ROTATION_270, +}; + +struct NvKmsRRParams { + enum NvKmsRotation rotation; + NvBool reflectionX; + NvBool reflectionY; +}; + +/*! + * Convert each possible NvKmsRRParams to a unique integer [0..15], + * so that we can describe possible NvKmsRRParams with an NvU16 bitmask. + * + * E.g. + * rotation = 0, reflectionX = F, reflectionY = F == 0|0|0 == 0 + * ... + * rotation = 270, reflectionX = T, reflectionY = T == 3|4|8 == 15 + */ +static inline NvU8 NvKmsRRParamsToCapBit(const struct NvKmsRRParams *rrParams) +{ + NvU8 bitPosition = (NvU8)rrParams->rotation; + if (rrParams->reflectionX) { + bitPosition |= NVBIT(2); + } + if (rrParams->reflectionY) { + bitPosition |= NVBIT(3); + } + return bitPosition; +} + +/* + * NVKMS_MEMORY_ISO is used to tag surface memory that will be accessed via + * display's isochronous interface. Examples of this type of memory are pixel + * data and LUT entries. + * + * NVKMS_MEMORY_NISO is used to tag surface memory that will be accessed via + * display's non-isochronous interface. Examples of this type of memory are + * semaphores and notifiers. + */ +typedef enum { + NVKMS_MEMORY_ISO = 0, + NVKMS_MEMORY_NISO = 1, +} NvKmsMemoryIsoType; + +typedef struct { + NvBool coherent; + NvBool noncoherent; +} NvKmsDispIOCoherencyModes; + +enum NvKmsInputColorRange { + /* + * If DEFAULT is provided, driver will assume full range for RGB formats + * and limited range for YUV formats. + */ + NVKMS_INPUT_COLOR_RANGE_DEFAULT = 0, + + NVKMS_INPUT_COLOR_RANGE_LIMITED = 1, + + NVKMS_INPUT_COLOR_RANGE_FULL = 2, +}; + +enum NvKmsInputColorSpace { + /* Unknown colorspace */ + NVKMS_INPUT_COLOR_SPACE_NONE = 0, + + NVKMS_INPUT_COLOR_SPACE_BT601 = 1, + NVKMS_INPUT_COLOR_SPACE_BT709 = 2, + NVKMS_INPUT_COLOR_SPACE_BT2020 = 3, + NVKMS_INPUT_COLOR_SPACE_BT2100 = NVKMS_INPUT_COLOR_SPACE_BT2020, + + NVKMS_INPUT_COLOR_SPACE_SCRGB = 4 +}; + +enum NvKmsInputTf { + NVKMS_INPUT_TF_LINEAR = 0, + NVKMS_INPUT_TF_PQ = 1 +}; + +enum NvKmsOutputColorimetry { + NVKMS_OUTPUT_COLORIMETRY_DEFAULT = 0, + + NVKMS_OUTPUT_COLORIMETRY_BT2100 = 1, +}; + +enum NvKmsOutputTf { + /* + * NVKMS itself won't apply any OETF (clients are still + * free to provide a custom OLUT) + */ + NVKMS_OUTPUT_TF_NONE = 0, + NVKMS_OUTPUT_TF_TRADITIONAL_GAMMA_SDR = 1, + NVKMS_OUTPUT_TF_PQ = 2, +}; + +/*! + * EOTF Data Byte 1 as per CTA-861-G spec. + * This is expected to match exactly with the spec. + */ +enum NvKmsInfoFrameEOTF { + NVKMS_INFOFRAME_EOTF_SDR_GAMMA = 0, + NVKMS_INFOFRAME_EOTF_HDR_GAMMA = 1, + NVKMS_INFOFRAME_EOTF_ST2084 = 2, + NVKMS_INFOFRAME_EOTF_HLG = 3, +}; + +/*! + * HDR Static Metadata Type1 Descriptor as per CEA-861.3 spec. + * This is expected to match exactly with the spec. + */ +struct NvKmsHDRStaticMetadata { + /*! + * Color primaries of the data. + * These are coded as unsigned 16-bit values in units of 0.00002, + * where 0x0000 represents zero and 0xC350 represents 1.0000. + */ + struct { + NvU16 x, y; + } displayPrimaries[3]; + + /*! + * White point of colorspace data. + * These are coded as unsigned 16-bit values in units of 0.00002, + * where 0x0000 represents zero and 0xC350 represents 1.0000. + */ + struct { + NvU16 x, y; + } whitePoint; + + /** + * Maximum mastering display luminance. + * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, + * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. + */ + NvU16 maxDisplayMasteringLuminance; + + /*! + * Minimum mastering display luminance. + * This value is coded as an unsigned 16-bit value in units of + * 0.0001 cd/m2, where 0x0001 represents 0.0001 cd/m2 and 0xFFFF + * represents 6.5535 cd/m2. + */ + NvU16 minDisplayMasteringLuminance; + + /*! + * Maximum content light level. + * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, + * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. + */ + NvU16 maxCLL; + + /*! + * Maximum frame-average light level. + * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, + * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. + */ + NvU16 maxFALL; +}; + +/*! + * A superframe is made of two or more video streams that are combined in + * a specific way. A DP serializer (an external device connected to a Tegra + * ARM SOC over DP or HDMI) can receive a video stream comprising multiple + * videos combined into a single frame and then split it into multiple + * video streams. The following structure describes the number of views + * and dimensions of each view inside a superframe. + */ +struct NvKmsSuperframeInfo { + NvU8 numViews; + struct { + /* x offset inside superframe at which this view starts */ + NvU16 x; + + /* y offset inside superframe at which this view starts */ + NvU16 y; + + /* Horizontal active width in pixels for this view */ + NvU16 width; + + /* Vertical active height in lines for this view */ + NvU16 height; + } view[NVKMS_MAX_SUPERFRAME_VIEWS]; +}; + +/* Fields within NvKmsVblankSemControlDataOneHead::flags */ +#define NVKMS_VBLANK_SEM_CONTROL_SWAP_INTERVAL 15:0 + +struct NvKmsVblankSemControlDataOneHead { + NvU32 requestCounterAccel; + NvU32 requestCounter; + NvU32 flags; + + NvU32 semaphore; + NvU64 vblankCount NV_ALIGN_BYTES(8); +}; + +struct NvKmsVblankSemControlData { + struct NvKmsVblankSemControlDataOneHead head[NV_MAX_HEADS]; +}; + +#endif /* NVKMS_API_TYPES_H */ diff --git a/kernel-open/common/inc/nvkms-format.h b/kernel-open/common/inc/nvkms-format.h new file mode 100644 index 0000000..88b26b3 --- /dev/null +++ b/kernel-open/common/inc/nvkms-format.h @@ -0,0 +1,126 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_FORMAT_H) +#define NVKMS_FORMAT_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* + * In order to interpret these pixel format namings, please take note of these + * conventions: + * - The Y8_U8__Y8_V8_N422 and U8_Y8__V8_Y8_N422 formats are both packed formats + * that have an interleaved chroma component across every two pixels. The + * double-underscore is a separator between these two pixel groups. + * - The triple-underscore is a separator between planes. + * - The 'N' suffix is a delimiter for the chroma decimation factor. + * + * As examples of the above rules: + * - The Y8_U8__Y8_V8_N422 format has one 8-bit luma component (Y8) and one + * 8-bit chroma component (U8) in pixel N, and one 8-bit luma component (Y8) + * and one 8-bit chroma component (V8) in pixel (N + 1). This format is + * 422-decimated since the U and V chroma samples are shared between each + * pair of adjacent pixels per line. + * - The Y10___U10V10_N444 format has one plane of 10-bit luma (Y10) components, + * and another plane of 10-bit chroma components (U10V10). This format has no + * chroma decimation since the luma and chroma components are sampled at the + * same rate. + */ +enum NvKmsSurfaceMemoryFormat { + NvKmsSurfaceMemoryFormatI8 = 0, + NvKmsSurfaceMemoryFormatA1R5G5B5 = 1, + NvKmsSurfaceMemoryFormatX1R5G5B5 = 2, + NvKmsSurfaceMemoryFormatR5G6B5 = 3, + NvKmsSurfaceMemoryFormatA8R8G8B8 = 4, + NvKmsSurfaceMemoryFormatX8R8G8B8 = 5, + NvKmsSurfaceMemoryFormatA2B10G10R10 = 6, + NvKmsSurfaceMemoryFormatX2B10G10R10 = 7, + NvKmsSurfaceMemoryFormatA8B8G8R8 = 8, + NvKmsSurfaceMemoryFormatX8B8G8R8 = 9, + NvKmsSurfaceMemoryFormatRF16GF16BF16AF16 = 10, + NvKmsSurfaceMemoryFormatR16G16B16A16 = 11, + NvKmsSurfaceMemoryFormatRF32GF32BF32AF32 = 12, + NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422 = 13, + NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422 = 14, + NvKmsSurfaceMemoryFormatY8___U8V8_N444 = 15, + NvKmsSurfaceMemoryFormatY8___V8U8_N444 = 16, + NvKmsSurfaceMemoryFormatY8___U8V8_N422 = 17, + NvKmsSurfaceMemoryFormatY8___V8U8_N422 = 18, + NvKmsSurfaceMemoryFormatY8___U8V8_N420 = 19, + NvKmsSurfaceMemoryFormatY8___V8U8_N420 = 20, + NvKmsSurfaceMemoryFormatY10___U10V10_N444 = 21, + NvKmsSurfaceMemoryFormatY10___V10U10_N444 = 22, + NvKmsSurfaceMemoryFormatY10___U10V10_N422 = 23, + NvKmsSurfaceMemoryFormatY10___V10U10_N422 = 24, + NvKmsSurfaceMemoryFormatY10___U10V10_N420 = 25, + NvKmsSurfaceMemoryFormatY10___V10U10_N420 = 26, + NvKmsSurfaceMemoryFormatY12___U12V12_N444 = 27, + NvKmsSurfaceMemoryFormatY12___V12U12_N444 = 28, + NvKmsSurfaceMemoryFormatY12___U12V12_N422 = 29, + NvKmsSurfaceMemoryFormatY12___V12U12_N422 = 30, + NvKmsSurfaceMemoryFormatY12___U12V12_N420 = 31, + NvKmsSurfaceMemoryFormatY12___V12U12_N420 = 32, + NvKmsSurfaceMemoryFormatY8___U8___V8_N444 = 33, + NvKmsSurfaceMemoryFormatY8___U8___V8_N420 = 34, + NvKmsSurfaceMemoryFormatRF16GF16BF16XF16 = 35, + NvKmsSurfaceMemoryFormatMin = NvKmsSurfaceMemoryFormatI8, + NvKmsSurfaceMemoryFormatMax = NvKmsSurfaceMemoryFormatRF16GF16BF16XF16, +}; + +typedef struct NvKmsSurfaceMemoryFormatInfo { + enum NvKmsSurfaceMemoryFormat format; + const char *name; + NvU8 depth; + NvBool isYUV; + NvU8 numPlanes; + + union { + struct { + NvU8 bytesPerPixel; + NvU8 bitsPerPixel; + } rgb; + + struct { + NvU8 depthPerComponent; + NvU8 storageBitsPerComponent; + NvU8 horizChromaDecimationFactor; + NvU8 vertChromaDecimationFactor; + } yuv; + }; +} NvKmsSurfaceMemoryFormatInfo; + +const NvKmsSurfaceMemoryFormatInfo *nvKmsGetSurfaceMemoryFormatInfo( + const enum NvKmsSurfaceMemoryFormat format); + +const char *nvKmsSurfaceMemoryFormatToString( + const enum NvKmsSurfaceMemoryFormat format); + +#ifdef __cplusplus +}; +#endif + +#endif /* NVKMS_FORMAT_H */ diff --git a/kernel-open/common/inc/nvkms-kapi.h b/kernel-open/common/inc/nvkms-kapi.h new file mode 100644 index 0000000..5f229ff --- /dev/null +++ b/kernel-open/common/inc/nvkms-kapi.h @@ -0,0 +1,1624 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(__NVKMS_KAPI_H__) + +#include "nvtypes.h" +#include "nv_mig_types.h" + +#include "nv-gpu-info.h" +#include "nv_dpy_id.h" +#include "nvkms-api-types.h" +#include "nvkms-format.h" + +#define __NVKMS_KAPI_H__ + +#define NVKMS_KAPI_MAX_HEADS 4 + +#define NVKMS_KAPI_MAX_CONNECTORS 16 +#define NVKMS_KAPI_MAX_CLONE_DISPLAYS 16 + +#define NVKMS_KAPI_EDID_BUFFER_SIZE 2048 + +#define NVKMS_KAPI_MODE_NAME_LEN 32 + +/** + * \defgroup Objects + * @{ + */ + +struct NvKmsKapiDevice; +struct NvKmsKapiMemory; +struct NvKmsKapiSurface; +struct NvKmsKapiChannelEvent; +struct NvKmsKapiSemaphoreSurface; +struct NvKmsKapiSemaphoreSurfaceCallback; + +typedef NvU32 NvKmsKapiConnector; +typedef NvU32 NvKmsKapiDisplay; + +/** @} */ + +/** + * \defgroup FuncPtrs + * @{ + */ + +/* + * Note: The channel event proc should not call back into NVKMS-KAPI driver. + * The callback into NVKMS-KAPI from the channel event proc, may cause + * deadlock. + */ +typedef void NvKmsChannelEventProc(void *dataPtr, NvU32 dataU32); + +/* + * Note: Same as above, this function must not call back into NVKMS-KAPI, nor + * directly into RM. Doing so could cause deadlocks given the notification + * function will most likely be called from within RM's interrupt handler + * callchain. + */ +typedef void NvKmsSemaphoreSurfaceCallbackProc(void *pData); + +/** @} */ + +/** + * \defgroup Structs + * @{ + */ + +struct NvKmsKapiDisplayModeTimings { + + NvU32 refreshRate; + NvU32 pixelClockHz; + NvU32 hVisible; + NvU32 hSyncStart; + NvU32 hSyncEnd; + NvU32 hTotal; + NvU32 hSkew; + NvU32 vVisible; + NvU32 vSyncStart; + NvU32 vSyncEnd; + NvU32 vTotal; + + struct { + + NvU32 interlaced : 1; + NvU32 doubleScan : 1; + NvU32 hSyncPos : 1; + NvU32 hSyncNeg : 1; + NvU32 vSyncPos : 1; + NvU32 vSyncNeg : 1; + + } flags; + + NvU32 widthMM; + NvU32 heightMM; + +}; + +struct NvKmsKapiDisplayMode { + struct NvKmsKapiDisplayModeTimings timings; + char name[NVKMS_KAPI_MODE_NAME_LEN]; +}; + +#define NVKMS_KAPI_LAYER_MAX 8 + +#define NVKMS_KAPI_LAYER_INVALID_IDX 0xff +#define NVKMS_KAPI_LAYER_PRIMARY_IDX 0 + +struct NvKmsKapiLutCaps { + struct { + struct NvKmsLUTCaps ilut; + struct NvKmsLUTCaps tmo; + } layer[NVKMS_KAPI_LAYER_MAX]; + struct NvKmsLUTCaps olut; +}; + +struct NvKmsKapiDeviceResourcesInfo { + + NvU32 numHeads; + NvU32 numLayers[NVKMS_KAPI_MAX_HEADS]; + + NvU32 numConnectors; + NvKmsKapiConnector connectorHandles[NVKMS_KAPI_MAX_CONNECTORS]; + + struct { + NvU32 validCursorCompositionModes; + NvU64 supportedCursorSurfaceMemoryFormats; + + struct { + NvU64 maxSubmittedOffset; + NvU64 stride; + } semsurf; + + struct { + NvU16 validRRTransforms; + NvU32 validCompositionModes; + } layer[NVKMS_KAPI_LAYER_MAX]; + + NvU32 minWidthInPixels; + NvU32 maxWidthInPixels; + + NvU32 minHeightInPixels; + NvU32 maxHeightInPixels; + + NvU32 maxCursorSizeInPixels; + + NvU32 pitchAlignment; + + NvU32 hasVideoMemory; + + NvU32 numDisplaySemaphores; + + NvU8 genericPageKind; + + NvBool supportsSyncpts; + + NvBool requiresVrrSemaphores; + + NvBool supportsInputColorRange; + NvBool supportsInputColorSpace; + } caps; + + NvU64 supportedSurfaceMemoryFormats[NVKMS_KAPI_LAYER_MAX]; + NvBool supportsICtCp[NVKMS_KAPI_LAYER_MAX]; + + struct NvKmsKapiLutCaps lutCaps; + + NvU64 vtFbBaseAddress; + NvU64 vtFbSize; +}; + +#define NVKMS_KAPI_LAYER_MASK(layerType) (1 << (layerType)) + +typedef enum NvKmsKapiMappingTypeRec { + NVKMS_KAPI_MAPPING_TYPE_USER = 1, + NVKMS_KAPI_MAPPING_TYPE_KERNEL = 2, +} NvKmsKapiMappingType; + +struct NvKmsKapiConnectorInfo { + + NvKmsKapiConnector handle; + + NvU32 physicalIndex; + + NvKmsConnectorSignalFormat signalFormat; + NvKmsConnectorType type; + + /* + * List of connectors, not possible to serve together with this connector + * because they are competing for same resources. + */ + NvU32 numIncompatibleConnectors; + NvKmsKapiConnector incompatibleConnectorHandles[NVKMS_KAPI_MAX_CONNECTORS]; + + NVDpyIdList dynamicDpyIdList; +}; + +struct NvKmsKapiStaticDisplayInfo { + + NvKmsKapiDisplay handle; + + NvKmsKapiConnector connectorHandle; + + /* Set for DisplayPort MST displays (dynamic displays) */ + char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]; + + NvBool internal; + + /* List of potential sibling display for cloning */ + NvU32 numPossibleClones; + NvKmsKapiDisplay possibleCloneHandles[NVKMS_KAPI_MAX_CLONE_DISPLAYS]; + + NvU32 headMask; + + NvBool isDpMST; +}; + +struct NvKmsKapiSyncParams { + union { + struct { + /*! + * Possible syncpt use case in kapi. + * For pre-syncpt, use only id and value + * and for post-syncpt, use only fd. + */ + NvU32 preSyncptId; + NvU32 preSyncptValue; + } syncpt; + + struct { + NvU32 index; + } semaphore; + } u; + + NvBool preSyncptSpecified; + NvBool postSyncptRequested; + NvBool semaphoreSpecified; +}; + +struct NvKmsKapiLayerConfig { + struct NvKmsKapiSurface *surface; + struct { + enum NvKmsCompositionBlendingMode compMode; + NvU8 surfaceAlpha; + } compParams; + struct NvKmsRRParams rrParams; + struct NvKmsKapiSyncParams syncParams; + + struct { + struct NvKmsHDRStaticMetadata val; + NvBool enabled; + } hdrMetadata; + + enum NvKmsInputTf inputTf; + enum NvKmsOutputTf outputTf; + + NvU8 minPresentInterval; + NvBool tearing; + + NvU16 srcX, srcY; + NvU16 srcWidth, srcHeight; + + NvS16 dstX, dstY; + NvU16 dstWidth, dstHeight; + + enum NvKmsInputColorSpace inputColorSpace; + enum NvKmsInputColorRange inputColorRange; + + struct { + NvBool enabled; + struct NvKmsKapiSurface *lutSurface; + NvU64 offset; + NvU32 vssSegments; + NvU32 lutEntries; + } ilut; + + struct { + NvBool enabled; + struct NvKmsKapiSurface *lutSurface; + NvU64 offset; + NvU32 vssSegments; + NvU32 lutEntries; + } tmo; + + struct NvKmsCscMatrix csc; + NvBool cscUseMain; + + struct { + struct NvKmsCscMatrix lmsCtm; + struct NvKmsCscMatrix lmsToItpCtm; + struct NvKmsCscMatrix itpToLmsCtm; + struct NvKmsCscMatrix blendCtm; + struct { + NvBool lmsCtm : 1; + NvBool lmsToItpCtm : 1; + NvBool itpToLmsCtm : 1; + NvBool blendCtm : 1; + } enabled; + } matrixOverrides; +}; + +struct NvKmsKapiLayerRequestedConfig { + struct NvKmsKapiLayerConfig config; + struct { + NvBool surfaceChanged : 1; + NvBool srcXYChanged : 1; + NvBool srcWHChanged : 1; + NvBool dstXYChanged : 1; + NvBool dstWHChanged : 1; + NvBool cscChanged : 1; + NvBool inputTfChanged : 1; + NvBool outputTfChanged : 1; + NvBool inputColorSpaceChanged : 1; + NvBool inputColorRangeChanged : 1; + NvBool hdrMetadataChanged : 1; + NvBool matrixOverridesChanged : 1; + NvBool ilutChanged : 1; + NvBool tmoChanged : 1; + } flags; +}; + +struct NvKmsKapiCursorRequestedConfig { + struct NvKmsKapiSurface *surface; + struct { + enum NvKmsCompositionBlendingMode compMode; + NvU8 surfaceAlpha; + } compParams; + + NvS16 dstX, dstY; + + struct { + NvBool surfaceChanged : 1; + NvBool dstXYChanged : 1; + } flags; +}; + +struct NvKmsKapiHeadModeSetConfig { + /* + * DRM distinguishes between the head state "enabled" (the specified + * configuration for the head is valid, its resources are allocated, + * etc, but the head may not necessarily be currently driving pixels + * to its output resource) and the head state "active" (the head is + * "enabled" _and_ the head is actively driving pixels to its output + * resource). + * + * This distinction is for DPMS: + * + * DPMS On : enabled=true, active=true + * DPMS Off : enabled=true, active=false + * + * "Enabled" state is indicated by numDisplays != 0. + * "Active" state is indicated by bActive == true. + */ + NvBool bActive; + + NvU32 numDisplays; + NvKmsKapiDisplay displays[NVKMS_KAPI_MAX_CLONE_DISPLAYS]; + + struct NvKmsKapiDisplayMode mode; + + NvBool vrrEnabled; + + struct { + NvBool enabled; + enum NvKmsInfoFrameEOTF eotf; + struct NvKmsHDRStaticMetadata staticMetadata; + } hdrInfoFrame; + + enum NvKmsOutputColorimetry colorimetry; + + struct { + struct { + NvU32 depth; + NvU32 start; + NvU32 end; + struct NvKmsLutRamps *pRamps; + } input; + + struct { + NvBool enabled; + struct NvKmsLutRamps *pRamps; + } output; + } lut; + + struct { + NvBool enabled; + struct NvKmsKapiSurface *lutSurface; + NvU64 offset; + NvU32 vssSegments; + NvU32 lutEntries; + } olut; + + NvU32 olutFpNormScale; +}; + +struct NvKmsKapiHeadRequestedConfig { + struct NvKmsKapiHeadModeSetConfig modeSetConfig; + struct { + NvBool activeChanged : 1; + NvBool displaysChanged : 1; + NvBool modeChanged : 1; + NvBool hdrInfoFrameChanged : 1; + NvBool colorimetryChanged : 1; + NvBool legacyIlutChanged : 1; + NvBool legacyOlutChanged : 1; + NvBool olutChanged : 1; + NvBool olutFpNormScaleChanged : 1; + } flags; + + struct NvKmsKapiCursorRequestedConfig cursorRequestedConfig; + + struct NvKmsKapiLayerRequestedConfig + layerRequestedConfig[NVKMS_KAPI_LAYER_MAX]; +}; + +struct NvKmsKapiRequestedModeSetConfig { + NvU32 headsMask; + struct NvKmsKapiHeadRequestedConfig + headRequestedConfig[NVKMS_KAPI_MAX_HEADS]; +}; + +struct NvKmsKapiLayerReplyConfig { + int postSyncptFd; +}; + +struct NvKmsKapiHeadReplyConfig { + struct NvKmsKapiLayerReplyConfig + layerReplyConfig[NVKMS_KAPI_LAYER_MAX]; +}; + +struct NvKmsKapiModeSetReplyConfig { + enum NvKmsFlipResult flipResult; + NvBool vrrFlip; + NvS32 vrrSemaphoreIndex; + struct NvKmsKapiHeadReplyConfig + headReplyConfig[NVKMS_KAPI_MAX_HEADS]; +}; + +struct NvKmsKapiEventDisplayChanged { + NvKmsKapiDisplay display; +}; + +struct NvKmsKapiEventDynamicDisplayConnected { + NvKmsKapiDisplay display; +}; + +struct NvKmsKapiEventFlipOccurred { + NvU32 head; + NvU32 layer; +}; + +struct NvKmsKapiDpyCRC32 { + NvU32 value; + NvBool supported; +}; + +struct NvKmsKapiCrcs { + struct NvKmsKapiDpyCRC32 compositorCrc32; + struct NvKmsKapiDpyCRC32 rasterGeneratorCrc32; + struct NvKmsKapiDpyCRC32 outputCrc32; +}; + +struct NvKmsKapiEvent { + enum NvKmsEventType type; + + struct NvKmsKapiDevice *device; + + void *privateData; + + union { + struct NvKmsKapiEventDisplayChanged displayChanged; + struct NvKmsKapiEventDynamicDisplayConnected dynamicDisplayConnected; + struct NvKmsKapiEventFlipOccurred flipOccurred; + } u; +}; + +struct NvKmsKapiAllocateDeviceParams { + /* [IN] GPU ID obtained from enumerateGpus() */ + NvU32 gpuId; + /* [IN] MIG device if requested */ + MIGDeviceId migDevice; + + /* [IN] Private data of device allocator */ + void *privateData; + /* [IN] Event callback */ + void (*eventCallback)(const struct NvKmsKapiEvent *event); +}; + +struct NvKmsKapiDynamicDisplayParams { + /* [IN] Display Handle returned by getDisplays() */ + NvKmsKapiDisplay handle; + + /* [OUT] Connection status */ + NvU32 connected; + + /* [OUT] VRR status */ + NvBool vrrSupported; + + /* [IN/OUT] EDID of connected monitor/ Input to override EDID */ + struct { + NvU16 bufferSize; + NvU8 buffer[NVKMS_KAPI_EDID_BUFFER_SIZE]; + } edid; + + /* [IN] Set true to override EDID */ + NvBool overrideEdid; + + /* [IN] Set true to force connected status */ + NvBool forceConnected; + + /* [IN] Set true to force disconnect status */ + NvBool forceDisconnected; +}; + +struct NvKmsKapiCreateSurfaceParams { + + /* [IN] Parameter of each plane */ + struct { + /* [IN] Memory allocated for plane, using allocateMemory() */ + struct NvKmsKapiMemory *memory; + /* [IN] Offsets within the memory object */ + NvU32 offset; + /* [IN] Byte pitch of plane */ + NvU32 pitch; + } planes[NVKMS_MAX_PLANES_PER_SURFACE]; + + /* [IN] Width of the surface, in pixels */ + NvU32 width; + /* [IN] Height of the surface, in pixels */ + NvU32 height; + + /* [IN] The format describing number of planes and their content */ + enum NvKmsSurfaceMemoryFormat format; + + /* [IN] Whether to override the surface objects memory layout parameters + * with those provided here. */ + NvBool explicit_layout; + /* [IN] Whether the surface layout is block-linear or pitch. Used only + * if explicit_layout is NV_TRUE */ + enum NvKmsSurfaceMemoryLayout layout; + /* [IN] block-linear block height of surface. Used only when + * explicit_layout is NV_TRUE and layout is + * NvKmsSurfaceMemoryLayoutBlockLinear */ + NvU8 log2GobsPerBlockY; +}; + +enum NvKmsKapiAllocationType { + NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT = 0, + NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER = 1, + NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN = 2, +}; + +struct NvKmsKapiAllocateMemoryParams { + /* [IN] BlockLinear or Pitch */ + enum NvKmsSurfaceMemoryLayout layout; + + /* [IN] Allocation type */ + enum NvKmsKapiAllocationType type; + + /* [IN] Size, in bytes, of the memory to allocate */ + NvU64 size; + + /* [IN] Whether memory can be updated directly on the screen */ + NvBool noDisplayCaching; + + /* [IN] Whether to allocate memory from video memory or system memory */ + NvBool useVideoMemory; + + /* [IN/OUT] For input, non-zero if compression backing store should be + * allocated for the memory, for output, non-zero if compression backing + * store was allocated for the memory */ + NvU8 *compressible; +}; + +typedef enum NvKmsKapiRegisterWaiterResultRec { + NVKMS_KAPI_REG_WAITER_FAILED, + NVKMS_KAPI_REG_WAITER_SUCCESS, + NVKMS_KAPI_REG_WAITER_ALREADY_SIGNALLED, +} NvKmsKapiRegisterWaiterResult; + +typedef void NvKmsKapiSuspendResumeCallbackFunc(NvBool suspend); + +struct NvKmsKapiGpuInfo { + nv_gpu_info_t gpuInfo; + MIGDeviceId migDevice; +}; + +struct NvKmsKapiFunctionsTable { + + /*! + * NVIDIA Driver version string. + */ + const char *versionString; + + /*! + * System Information. + */ + struct { + /* Availability of write combining support for video memory */ + NvBool bAllowWriteCombining; + } systemInfo; + + /*! + * Enumerate the available GPUs that can be used with NVKMS. + * + * The gpuCallback will be called with a NvKmsKapiGpuInfo for each + * physical and MIG GPU currently available in the system. + * + * \param [in] gpuCallback Client function to handle each GPU. + * + * \return Count of enumerated gpus. + */ + NvU32 (*enumerateGpus) + ( + void (*gpuCallback)(const struct NvKmsKapiGpuInfo *info) + ); + + /*! + * Allocate an NVK device using which you can query/allocate resources on + * GPU and do modeset. + * + * \param [in] params Parameters required for device allocation. + * + * \return An valid device handle on success, NULL on failure. + */ + struct NvKmsKapiDevice* (*allocateDevice) + ( + const struct NvKmsKapiAllocateDeviceParams *params + ); + + /*! + * Frees a device allocated by allocateDevice() and all its resources. + * + * \param [in] device A device returned by allocateDevice(). + * This function is a no-op if device is not valid. + */ + void (*freeDevice)(struct NvKmsKapiDevice *device); + + /*! + * Grab ownership of device, ownership is required to do modeset. + * + * \param [in] device A device returned by allocateDevice(). + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*grabOwnership)(struct NvKmsKapiDevice *device); + + /*! + * Release ownership of device. + * + * \param [in] device A device returned by allocateDevice(). + */ + void (*releaseOwnership)(struct NvKmsKapiDevice *device); + + /*! + * Grant modeset permissions for a display to fd. Only one (dispIndex, head, + * display) is currently supported. + * + * \param [in] fd fd from opening /dev/nvidia-modeset. + * + * \param [in] device A device returned by allocateDevice(). + * + * \param [in] head head of display. + * + * \param [in] display The display to grant. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*grantPermissions) + ( + NvS32 fd, + struct NvKmsKapiDevice *device, + NvU32 head, + NvKmsKapiDisplay display + ); + + /*! + * Revoke modeset permissions previously granted. Only one (dispIndex, + * head, display) is currently supported. + * + * \param [in] device A device returned by allocateDevice(). + * + * \param [in] head head of display. + * + * \param [in] display The display to revoke. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*revokePermissions) + ( + struct NvKmsKapiDevice *device, + NvU32 head, + NvKmsKapiDisplay display + ); + + /*! + * Grant modeset sub-owner permissions to fd. This is used by clients to + * convert drm 'master' permissions into nvkms sub-owner permission. + * + * \param [in] fd fd from opening /dev/nvidia-modeset. + * + * \param [in] device A device returned by allocateDevice(). + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*grantSubOwnership) + ( + NvS32 fd, + struct NvKmsKapiDevice *device + ); + + /*! + * Revoke sub-owner permissions previously granted. + * + * \param [in] device A device returned by allocateDevice(). + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*revokeSubOwnership) + ( + struct NvKmsKapiDevice *device + ); + + /*! + * Registers for notification, via + * NvKmsKapiAllocateDeviceParams::eventCallback, of the events specified + * in interestMask. + * + * This call does nothing if eventCallback is NULL when NvKmsKapiDevice + * is allocated. + * + * Supported events are DPY_CHANGED and DYNAMIC_DPY_CONNECTED. + * + * \param [in] device A device returned by allocateDevice(). + * + * \param [in] interestMask A mask of events requested to listen. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*declareEventInterest) + ( + const struct NvKmsKapiDevice *device, + const NvU32 interestMask + ); + + /*! + * Retrieve various static resources like connector, head etc. present on + * device and capacities. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in/out] info A pointer to an NvKmsKapiDeviceResourcesInfo + * struct that the call will fill out with number + * of resources and their handles. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getDeviceResourcesInfo) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiDeviceResourcesInfo *info + ); + + /*! + * Retrieve the number of displays on a device and an array of handles to + * those displays. + * + * \param [in] device A device allocated using + * allocateDevice(). + * + * \param [in/out] displayCount The caller should set this to the size + * of the displayHandles array it passed + * in. The function will set it to the + * number of displays returned, or the + * total number of displays on the device + * if displayHandles is NULL or array size + * of less than number of number of displays. + * + * \param [out] displayHandles An array of display handles with + * displayCount entries. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getDisplays) + ( + struct NvKmsKapiDevice *device, + NvU32 *numDisplays, NvKmsKapiDisplay *displayHandles + ); + + /*! + * Retrieve information about a specified connector. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] connector Which connector to query, handle return by + * getDeviceResourcesInfo(). + * + * \param [out] info A pointer to an NvKmsKapiConnectorInfo struct + * that the call will fill out with information + * about connector. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getConnectorInfo) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiConnector connector, struct NvKmsKapiConnectorInfo *info + ); + + /*! + * Retrieve information about a specified display. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] display Which connector to query, handle return by + * getDisplays(). + * + * \param [out] info A pointer to an NvKmsKapiStaticDisplayInfo struct + * that the call will fill out with information + * about display. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getStaticDisplayInfo) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, struct NvKmsKapiStaticDisplayInfo *info + ); + + /*! + * Detect/force connection status/EDID of display. + * + * \param [in/out] params Parameters containing display + * handle, EDID and flags to force connection + * status. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getDynamicDisplayInfo) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiDynamicDisplayParams *params + ); + + /*! + * Allocate some unformatted video or system memory of the specified size. + * + * This function allocates video or system memory on the specified GPU. It + * should be suitable for mapping on the CPU as a pitch linear or + * block-linear surface. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in/out] params Parameters required for memory allocation. + * + * \return An valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* (*allocateMemory) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiAllocateMemoryParams *params + ); + + /*! + * Import some unformatted memory of the specified size. + * + * This function accepts a driver-specific parameter structure representing + * memory allocated elsewhere and imports it to a NVKMS KAPI memory object + * of the specified size. + * + * \param [in] device A device allocated using allocateDevice(). The + * memory being imported must have been allocated + * against the same physical device this device object + * represents. + * + * \param [in] size Size, in bytes, of the memory being imported. + * + * \param [in] nvKmsParamsUser Userspace pointer to driver-specific + * parameters describing the memory object being + * imported. + * + * \param [in] nvKmsParamsSize Size of the driver-specific parameter struct. + * + * \return A valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* (*importMemory) + ( + struct NvKmsKapiDevice *device, NvU64 size, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize + ); + + /*! + * Duplicate an existing NVKMS KAPI memory object, taking a reference on the + * underlying memory. + * + * \param [in] device A device allocated using allocateDevice(). The + * memory being imported need not have been allocated + * against the same physical device this device object + * represents. + * + * \param [in] srcDevice The device associated with srcMemory. + * + * \param [in] srcMemory The memory object to duplicate. + * + * \return A valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* (*dupMemory) + ( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiDevice *srcDevice, + const struct NvKmsKapiMemory *srcMemory + ); + + /*! + * Export the specified memory object to a userspace object handle. + * + * This function accepts a driver-specific parameter structure representing + * a new handle to be assigned to an existing NVKMS KAPI memory object. + * + * \param [in] device A device allocated using allocateDevice(). The + * memory being exported must have been created against + * or imported to the same device object, and the + * destination object handle must be valid for this + * device as well. + * + * \param [in] memory The memory object to export. + * + * \param [in] nvKmsParamsUser Userspace pointer to driver-specific + * parameters specifying a handle to add to the + * memory object being exported. + * + * \param [in] nvKmsParamsSize Size of the driver-specific parameter struct. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*exportMemory) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize + ); + + /*! + * Free memory allocated using allocateMemory() + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory Memory allocated using allocateMemory(). + * + * \return NV_TRUE on success, NV_FALSE if memory is in use. + */ + void (*freeMemory) + ( + struct NvKmsKapiDevice *device, struct NvKmsKapiMemory *memory + ); + + /*! + * Create MMIO mappings for a memory object allocated using + * allocateMemory(). + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory Memory allocated using allocateMemory() + * + * \param [in] type Userspace or kernelspace mapping + * + * \param [out] ppLinearAddress The MMIO address where memory object is + * mapped. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*mapMemory) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, NvKmsKapiMappingType type, + void **ppLinearAddress + ); + + /*! + * Destroy MMIO mappings created for a memory object allocated using + * allocateMemory(). + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory Memory allocated using allocateMemory() + * + * \param [in] type Userspace or kernelspace mapping + * + * \param [in] pLinearAddress The MMIO address return by mapMemory() + */ + void (*unmapMemory) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, NvKmsKapiMappingType type, + const void *pLinearAddress + ); + + /*! + * Check if memory object allocated is video memory. + * + * \param [in] memory Memory allocated using allocateMemory() + * + * \return NV_TRUE if memory is vidmem, NV_FALSE otherwise. + */ + NvBool (*isVidmem)( + const struct NvKmsKapiMemory *memory + ); + + /*! + * Create a formatted surface from an NvKmsKapiMemory object. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] params Parameters to the surface creation. + * + * \return An valid surface handle on success. NULL on failure. + */ + struct NvKmsKapiSurface* (*createSurface) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiCreateSurfaceParams *params + ); + + /*! + * Destroy a surface created by createSurface(). + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] surface A surface created using createSurface() + */ + void (*destroySurface) + ( + struct NvKmsKapiDevice *device, struct NvKmsKapiSurface *surface + ); + + /*! + * Enumerate the mode timings available on a given display. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] display A display handle returned by getDisplays(). + * + * \param [in] modeIndex A mode index (Any integer >= 0). + * + * \param [out] mode A pointer to an NvKmsKapiDisplayMode struct that + * the call will fill out with mode-timings of mode + * at index modeIndex. + * + * \param [out] valid Returns TRUE in this param if mode-timings of + * mode at index modeIndex are valid on display. + * + * \param [out] preferredMode Returns TRUE if this mode is marked as + * "preferred" by the EDID. + * + * \return Value >= 1 if more modes are available, 0 if no more modes are + * available, and Value < 0 on failure. + */ + int (*getDisplayMode) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, NvU32 modeIndex, + struct NvKmsKapiDisplayMode *mode, NvBool *valid, + NvBool *preferredMode + ); + + /*! + * Validate given mode timings available on a given display. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] display A display handle returned by getDisplays(). + * + * \param [in] mode A pointer to an NvKmsKapiDisplayMode struct that + * filled with mode-timings to validate. + * + * \return NV_TRUE if mode-timings are valid, NV_FALSE on failure. + */ + NvBool (*validateDisplayMode) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, const struct NvKmsKapiDisplayMode *mode + ); + + /*! + * Apply a mode configuration to the device. + * + * Client can describe damaged part of configuration but still it is must + * to describe entire configuration. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] requestedConfig Parameters describing a device-wide + * display configuration. + * + * \param [in] commit If set to 0 them call will only validate + * mode configuration, will not apply it. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*applyModeSetConfig) + ( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, + struct NvKmsKapiModeSetReplyConfig *replyConfig, + const NvBool commit + ); + + /*! + * Return status of flip. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] head A head returned by getDeviceResourcesInfo(). + * + * \param [in] layer A layer index. + * + * \param [out] pending Return TRUE if head has pending flip for + * given layer. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getFlipPendingStatus) + ( + const struct NvKmsKapiDevice *device, + const NvU32 head, + const NvU32 layer, + NvBool *pending + ); + + /*! + * Allocate an event callback. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] proc Function pointer to call when triggered. + * + * \param [in] data Argument to pass into function. + * + * \param [in] nvKmsParamsUser Userspace pointer to driver-specific + * parameters describing the event callback + * being created. + * + * \param [in] nvKmsParamsSize Size of the driver-specific parameter struct. + * + * \return struct NvKmsKapiChannelEvent* on success, NULL on failure. + */ + struct NvKmsKapiChannelEvent* (*allocateChannelEvent) + ( + struct NvKmsKapiDevice *device, + NvKmsChannelEventProc *proc, + void *data, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize + ); + + /*! + * Free an event callback. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] cb struct NvKmsKapiChannelEvent* returned from + * allocateChannelEvent() + */ + void (*freeChannelEvent) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiChannelEvent *cb + ); + + /*! + * Get 32-bit CRC value for the last contents presented on the specified + * head. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] head A head returned by getDeviceResourcesInfo(). + * + * \param [out] crc32 The CRC32 generated from the content currently + * presented onto the given head + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getCRC32) + ( + struct NvKmsKapiDevice *device, + NvU32 head, + struct NvKmsKapiCrcs *crc32 + ); + + /*! + * Get the list allocation pages corresponding to the specified memory object. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory The memory object for which we need to find the + * list of allocation pages and number of pages. + * + * \param [out] pPages A pointer to the list of NvU64 pointers. Caller + * should free pPages on success using freeMemoryPages(). + * + * \param [out] pNumPages It gives the total number of NvU64 pointers + * returned in pPages. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getMemoryPages) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, + NvU64 **pPages, + NvU32 *pNumPages + ); + + /*! + * Free the list of allocation pages returned by getMemoryPages() + * + * \param [in] pPages A list of NvU64 pointers allocated by getMemoryPages(). + * + */ + void (*freeMemoryPages) + ( + NvU64 *pPages + ); + + /* + * Import SGT as a memory handle. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] sgt SGT pointer. + * \param [in] gem GEM pointer that pinned SGT, to be refcounted. + * + * \param [in] limit Size, in bytes, of the memory backed by the SGT. + * + * \return A valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* + (*getSystemMemoryHandleFromSgt)(struct NvKmsKapiDevice *device, + NvP64 sgt, + NvP64 gem, + NvU32 limit); + + /* + * Import dma-buf in the memory handle. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] dmaBuf DMA-BUF pointer. + * + * \param [in] limit Size, in bytes, of the dma-buf. + * + * \return An valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* + (*getSystemMemoryHandleFromDmaBuf)(struct NvKmsKapiDevice *device, + NvP64 dmaBuf, + NvU32 limit); + + /*! + * Import a semaphore surface allocated elsewhere to NVKMS and return a + * handle to the new object. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] nvKmsParamsUser Userspace pointer to driver-specific + * parameters describing the semaphore + * surface being imported. + * + * \param [in] nvKmsParamsSize Size of the driver-specific parameter + * struct. + * + * \param [out] pSemaphoreMap Returns a CPU mapping of the semaphore + * surface's semaphore memory to the client. + * + * \param [out] pMaxSubmittedMap Returns a CPU mapping of the semaphore + * surface's semaphore memory to the client. + * + * \return struct NvKmsKapiSemaphoreSurface* on success, NULL on failure. + */ + struct NvKmsKapiSemaphoreSurface* (*importSemaphoreSurface) + ( + struct NvKmsKapiDevice *device, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize, + void **pSemaphoreMap, + void **pMaxSubmittedMap + ); + + /*! + * Free an imported semaphore surface. + * + * \param [in] device The device passed to + * importSemaphoreSurface() when creating + * semaphoreSurface. + * + * \param [in] semaphoreSurface A semaphore surface returned by + * importSemaphoreSurface(). + */ + void (*freeSemaphoreSurface) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiSemaphoreSurface *semaphoreSurface + ); + + /*! + * Register a callback to be called when a semaphore reaches a value. + * + * The callback will be called when the semaphore at index in + * semaphoreSurface reaches the value wait_value. The callback will + * be called at most once and is automatically unregistered when called. + * It may also be unregistered (i.e., cancelled) explicitly using the + * unregisterSemaphoreSurfaceCallback() function. To avoid leaking the + * memory used to track the registered callback, callers must ensure one + * of these methods of unregistration is used for every successful + * callback registration that returns a non-NULL pCallbackHandle. + * + * \param [in] device The device passed to + * importSemaphoreSurface() when creating + * semaphoreSurface. + * + * \param [in] semaphoreSurface A semaphore surface returned by + * importSemaphoreSurface(). + * + * \param [in] pCallback A pointer to the function to call when + * the specified value is reached. NULL + * means no callback. + * + * \param [in] pData Arbitrary data to be passed back to the + * callback as its sole parameter. + * + * \param [in] index The index of the semaphore within + * semaphoreSurface. + * + * \param [in] wait_value The value the semaphore must reach or + * exceed before the callback is called. + * + * \param [in] new_value The value the semaphore will be set to + * when it reaches or exceeds . + * 0 means do not update the value. + * + * \param [out] pCallbackHandle On success, the value pointed to will + * contain an opaque handle to the + * registered callback that may be used to + * cancel it if needed. Unused if pCallback + * is NULL. + * + * \return NVKMS_KAPI_REG_WAITER_SUCCESS if the waiter was registered or if + * no callback was requested and the semaphore at has + * already reached or exceeded + * + * NVKMS_KAPI_REG_WAITER_ALREADY_SIGNALLED if a callback was + * requested and the semaphore at has already reached or + * exceeded + * + * NVKMS_KAPI_REG_WAITER_FAILED if waiter registration failed. + */ + NvKmsKapiRegisterWaiterResult + (*registerSemaphoreSurfaceCallback) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiSemaphoreSurface *semaphoreSurface, + NvKmsSemaphoreSurfaceCallbackProc *pCallback, + void *pData, + NvU64 index, + NvU64 wait_value, + NvU64 new_value, + struct NvKmsKapiSemaphoreSurfaceCallback **pCallbackHandle + ); + + /*! + * Unregister a callback registered via registerSemaphoreSurfaceCallback() + * + * If the callback has not yet been called, this function will cancel the + * callback and free its associated resources. + * + * Note this function treats the callback handle as a pointer. While this + * function does not dereference that pointer itself, the underlying call + * to RM does within a properly guarded critical section that first ensures + * it is not in the process of being used within a callback. This means + * the callstack must take into consideration that pointers are not in + * general unique handles if they may have been freed, since a subsequent + * malloc could return the same pointer value at that point. This callchain + * avoids that by leveraging the behavior of the underlying RM APIs: + * + * 1) A callback handle is referenced relative to its corresponding + * (semaphore surface, index, wait_value) tuple here and within RM. It + * is not a valid handle outside of that scope. + * + * 2) A callback can not be registered against an already-reached value + * for a given semaphore surface index. + * + * 3) A given callback handle can not be registered twice against the same + * (semaphore surface, index, wait_value) tuple, so unregistration will + * never race with registration at the RM level, and would only race at + * a higher level if used incorrectly. Since this is kernel code, we + * can safely assume there won't be malicious clients purposely misuing + * the API, but the burden is placed on the caller to ensure its usage + * does not lead to races at higher levels. + * + * These factors considered together ensure any valid registered handle is + * either still in the relevant waiter list and refers to the same event/ + * callback as when it was registered, or has been removed from the list + * as part of a critical section that also destroys the list itself and + * makes future lookups in that list impossible, and hence eliminates the + * chance of comparing a stale handle with a new handle of the same value + * as part of a lookup. + * + * \param [in] device The device passed to + * importSemaphoreSurface() when creating + * semaphoreSurface. + * + * \param [in] semaphoreSurface The semaphore surface passed to + * registerSemaphoreSurfaceCallback() when + * registering the callback. + * + * \param [in] index The index passed to + * registerSemaphoreSurfaceCallback() when + * registering the callback. + * + * \param [in] wait_value The wait_value passed to + * registerSemaphoreSurfaceCallback() when + * registering the callback. + * + * \param [in] callbackHandle The callback handle returned by + * registerSemaphoreSurfaceCallback(). + */ + NvBool + (*unregisterSemaphoreSurfaceCallback) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiSemaphoreSurface *semaphoreSurface, + NvU64 index, + NvU64 wait_value, + struct NvKmsKapiSemaphoreSurfaceCallback *callbackHandle + ); + + /*! + * Update the value of a semaphore surface from the CPU. + * + * Update the semaphore value at the specified index from the CPU, then + * wake up any pending CPU waiters associated with that index that are + * waiting on it reaching a value <= the new value. + */ + NvBool + (*setSemaphoreSurfaceValue) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiSemaphoreSurface *semaphoreSurface, + NvU64 index, + NvU64 new_value + ); + + /*! + * Set the callback function for suspending and resuming the display system. + */ + void + (*setSuspendResumeCallback) + ( + NvKmsKapiSuspendResumeCallbackFunc *function + ); + + /*! + * Immediately initialize the specified display semaphore to the pending state. + * + * Must be called prior to applying a mode set that utilizes the specified + * display semaphore for synchronization. + * + * \param [in] device The device which will utilize the semaphore. + * + * \param [in] semaphoreIndex Index of the desired semaphore within the + * NVKMS semaphore pool. Must be less than + * NvKmsKapiDeviceResourcesInfo::caps::numDisplaySemaphores + * for the specified device. + */ + NvBool + (*tryInitDisplaySemaphore) + ( + struct NvKmsKapiDevice *device, + NvU32 semaphoreIndex + ); + + /*! + * Immediately set the specified display semaphore to the displayable state. + * + * Must be called after \ref tryInitDisplaySemaphore to indicate a mode + * configuration change that utilizes the specified display semaphore for + * synchronization may proceed. + * + * \param [in] device The device which will utilize the semaphore. + * + * \param [in] semaphoreIndex Index of the desired semaphore within the + * NVKMS semaphore pool. Must be less than + * NvKmsKapiDeviceResourcesInfo::caps::numDisplaySemaphores + * for the specified device. + */ + void + (*signalDisplaySemaphore) + ( + struct NvKmsKapiDevice *device, + NvU32 semaphoreIndex + ); + + /*! + * Immediately cancel use of a display semaphore by resetting its value to + * its initial state. + * + * This can be used by clients to restore a semaphore to a consistent state + * when they have prepared it for use by previously calling + * \ref tryInitDisplaySemaphore() on it, but are then prevented from + * submitting the associated hardware operations to consume it due to the + * subsequent failure of some software or hardware operation. + * + * \param [in] device The device which will utilize the semaphore. + * + * \param [in] semaphoreIndex Index of the desired semaphore within the + * NVKMS semaphore pool. Must be less than + * NvKmsKapiDeviceResourcesInfo::caps::numDisplaySemaphores + * for the specified device. + */ + void + (*cancelDisplaySemaphore) + ( + struct NvKmsKapiDevice *device, + NvU32 semaphoreIndex + ); + + /*! + * Signal the VRR semaphore at the specified index from the CPU. + * If device does not support VRR semaphores, this is a no-op. + * Returns true if signal is success or no-op, otherwise returns false. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] index The VRR semaphore index to be signalled. + */ + NvBool + (*signalVrrSemaphore) + ( + struct NvKmsKapiDevice *device, + NvS32 index + ); + + /*! + * Check or wait on a head's LUT notifier. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] head The head to check for LUT completion. + * + * \param [in] waitForCompletion If true, wait for the notifier in NvKms + * before returning. + * + * \param [out] complete Returns whether the notifier has completed. + */ + NvBool + (*checkLutNotifier) + ( + struct NvKmsKapiDevice *device, + NvU32 head, + NvBool waitForCompletion + ); + + /* + * Notify NVKMS that the system's framebuffer console has been disabled and + * the reserved allocation for the old framebuffer console can be unmapped. + */ + void + (*framebufferConsoleDisabled) + ( + struct NvKmsKapiDevice *device + ); +}; + +/** @} */ + +/** + * \defgroup Functions + * @{ + */ + +NvBool nvKmsKapiGetFunctionsTable +( + struct NvKmsKapiFunctionsTable *funcsTable +); + +NvU32 nvKmsKapiF16ToF32(NvU16 a); + +NvU16 nvKmsKapiF32ToF16(NvU32 a); + +NvU32 nvKmsKapiF32Mul(NvU32 a, NvU32 b); + +NvU32 nvKmsKapiF32Div(NvU32 a, NvU32 b); + +NvU32 nvKmsKapiF32Add(NvU32 a, NvU32 b); + +NvU32 nvKmsKapiF32ToUI32RMinMag(NvU32 a, NvBool exact); + +NvU32 nvKmsKapiUI32ToF32(NvU32 a); + +/** @} */ + +#endif /* defined(__NVKMS_KAPI_H__) */ diff --git a/kernel-open/common/inc/nvlimits.h b/kernel-open/common/inc/nvlimits.h new file mode 100644 index 0000000..e7fad3f --- /dev/null +++ b/kernel-open/common/inc/nvlimits.h @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: nvlimits.finn +// + + + + +/* + * This is the maximum number of GPUs supported in a single system. + */ +#define NV_MAX_DEVICES 32 + +/* + * This is the maximum number of subdevices within a single device. + */ +#define NV_MAX_SUBDEVICES 8 + +/* + * This is the maximum length of the process name string. + */ +#define NV_PROC_NAME_MAX_LENGTH 100U + +/* + * This is the maximum number of heads per GPU. + */ +#define NV_MAX_HEADS 4 + +/* + * Maximum length of a MIG device UUID. It is a 36-byte UUID string plus a + * 4-byte prefix and NUL terminator: 'M' 'I' 'G' '-' UUID '\0x0' + */ +#define NV_MIG_DEVICE_UUID_STR_LENGTH 41U diff --git a/kernel-open/common/inc/nvmisc.h b/kernel-open/common/inc/nvmisc.h new file mode 100644 index 0000000..4407cdb --- /dev/null +++ b/kernel-open/common/inc/nvmisc.h @@ -0,0 +1,1000 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * nvmisc.h + */ +#ifndef __NV_MISC_H +#define __NV_MISC_H + +#ifdef __cplusplus +extern "C" { +#endif //__cplusplus + +#include "nvtypes.h" + +// Miscellaneous macros useful for bit field manipulations. +#ifndef NVBIT +#define NVBIT(b) (1U<<(b)) +#endif +#ifndef NVBIT_TYPE +#define NVBIT_TYPE(b, t) (((t)1U)<<(b)) +#endif +#ifndef NVBIT32 +#define NVBIT32(b) NVBIT_TYPE(b, NvU32) +#endif +#ifndef NVBIT64 +#define NVBIT64(b) NVBIT_TYPE(b, NvU64) +#endif + +//Concatenate 2 32bit values to a 64bit value +#define NV_CONCAT_32_TO_64(hi, lo) ((((NvU64)hi) << 32) | ((NvU64)lo)) + +// Helper macro's for 32 bit bitmasks +#define NV_BITMASK32_ELEMENT_SIZE (sizeof(NvU32) << 3) +#define NV_BITMASK32_IDX(chId) (((chId) & ~(0x1F)) >> 5) +#define NV_BITMASK32_OFFSET(chId) ((chId) & (0x1F)) +#define NV_BITMASK32_SET(pChannelMask, chId) \ + (pChannelMask)[NV_BITMASK32_IDX(chId)] |= NVBIT(NV_BITMASK32_OFFSET(chId)) +#define NV_BITMASK32_GET(pChannelMask, chId) \ + ((pChannelMask)[NV_BITMASK32_IDX(chId)] & NVBIT(NV_BITMASK32_OFFSET(chId))) + + +// Index of the 'on' bit (assuming that there is only one). +// Even if multiple bits are 'on', result is in range of 0-31. +#define BIT_IDX_32(n) \ + (((((n) & 0xFFFF0000U) != 0U) ? 0x10U: 0U) | \ + ((((n) & 0xFF00FF00U) != 0U) ? 0x08U: 0U) | \ + ((((n) & 0xF0F0F0F0U) != 0U) ? 0x04U: 0U) | \ + ((((n) & 0xCCCCCCCCU) != 0U) ? 0x02U: 0U) | \ + ((((n) & 0xAAAAAAAAU) != 0U) ? 0x01U: 0U) ) + +// Index of the 'on' bit (assuming that there is only one). +// Even if multiple bits are 'on', result is in range of 0-63. +#define BIT_IDX_64(n) \ + (((((n) & 0xFFFFFFFF00000000ULL) != 0U) ? 0x20U: 0U) | \ + ((((n) & 0xFFFF0000FFFF0000ULL) != 0U) ? 0x10U: 0U) | \ + ((((n) & 0xFF00FF00FF00FF00ULL) != 0U) ? 0x08U: 0U) | \ + ((((n) & 0xF0F0F0F0F0F0F0F0ULL) != 0U) ? 0x04U: 0U) | \ + ((((n) & 0xCCCCCCCCCCCCCCCCULL) != 0U) ? 0x02U: 0U) | \ + ((((n) & 0xAAAAAAAAAAAAAAAAULL) != 0U) ? 0x01U: 0U) ) + +/*! + * DRF MACRO README: + * + * Glossary: + * DRF: Device, Register, Field + * FLD: Field + * REF: Reference + * + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA 0xDEADBEEF + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_GAMMA 27:0 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA 31:28 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_ZERO 0x00000000 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_ONE 0x00000001 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_TWO 0x00000002 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_THREE 0x00000003 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_FOUR 0x00000004 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_FIVE 0x00000005 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_SIX 0x00000006 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_SEVEN 0x00000007 + * + * + * Device = _DEVICE_OMEGA + * This is the common "base" that a group of registers in a manual share + * + * Register = _REGISTER_ALPHA + * Register for a given block of defines is the common root for one or more fields and constants + * + * Field(s) = _FIELD_GAMMA, _FIELD_ZETA + * These are the bit ranges for a given field within the register + * Fields are not required to have defined constant values (enumerations) + * + * Constant(s) = _ZERO, _ONE, _TWO, ... + * These are named values (enums) a field can contain; the width of the constants should not be larger than the field width + * + * MACROS: + * + * DRF_SHIFT: + * Bit index of the lower bound of a field + * DRF_SHIFT(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 28 + * + * DRF_SHIFT_RT: + * Bit index of the higher bound of a field + * DRF_SHIFT_RT(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 31 + * + * DRF_MASK: + * Produces a mask of 1-s equal to the width of a field + * DRF_MASK(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 0xF (four 1s starting at bit 0) + * + * DRF_SHIFTMASK: + * Produces a mask of 1s equal to the width of a field at the location of the field + * DRF_SHIFTMASK(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 0xF0000000 + * + * DRF_DEF: + * Shifts a field constant's value to the correct field offset + * DRF_DEF(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, _THREE) == 0x30000000 + * + * DRF_NUM: + * Shifts a number to the location of a particular field + * DRF_NUM(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 3) == 0x30000000 + * NOTE: If the value passed in is wider than the field, the value's high bits will be truncated + * + * DRF_SIZE: + * Provides the width of the field in bits + * DRF_SIZE(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 4 + * + * DRF_VAL: + * Provides the value of an input within the field specified + * DRF_VAL(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 0xABCD1234) == 0xA + * This is sort of like the inverse of DRF_NUM + * + * DRF_IDX...: + * These macros are similar to the above but for fields that accept an index argumment + * + * FLD_SET_DRF: + * Set the field bits in a given value with the given field constant + * NvU32 x = 0x00001234; + * x = FLD_SET_DRF(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, _THREE, x); + * x == 0x30001234; + * + * FLD_SET_DRF_NUM: + * Same as FLD_SET_DRF but instead of using a field constant a literal/variable is passed in + * NvU32 x = 0x00001234; + * x = FLD_SET_DRF_NUM(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 0xF, x); + * x == 0xF0001234; + * + * FLD_IDX...: + * These macros are similar to the above but for fields that accept an index argumment + * + * FLD_TEST_DRF: + * Test if location specified by drf in 'v' has the same value as NV_drfc + * FLD_TEST_DRF(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, _THREE, 0x3000ABCD) == NV_TRUE + * + * FLD_TEST_DRF_NUM: + * Test if locations specified by drf in 'v' have the same value as n + * FLD_TEST_DRF_NUM(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 0x3, 0x3000ABCD) == NV_TRUE + * + * REF_DEF: + * Like DRF_DEF but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * REF_DEF(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, _THREE) == 0x30000000 + * + * REF_VAL: + * Like DRF_VAL but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * REF_VAL(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, 0xABCD1234) == 0xA + * + * REF_NUM: + * Like DRF_NUM but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * REF_NUM(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, 0xA) == 0xA00000000 + * + * FLD_SET_REF_NUM: + * Like FLD_SET_DRF_NUM but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * NvU32 x = 0x00001234; + * x = FLD_SET_REF_NUM(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, 0xF, x); + * x == 0xF0001234; + * + * FLD_TEST_REF: + * Like FLD_TEST_DRF but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * FLD_TEST_REF(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, _THREE, 0x3000ABCD) == NV_TRUE + * + * Other macros: + * There a plethora of other macros below that extend the above (notably Multi-Word (MW), 64-bit, and some + * reg read/write variations). I hope these are self explanatory. If you have a need to use them, you + * probably have some knowledge of how they work. + */ + +// tegra mobile uses nvmisc_macros.h and can't access nvmisc.h... and sometimes both get included. +#ifndef _NVMISC_MACROS_H +// Use Coverity Annotation to mark issues as false positives/ignore when using single bit defines. +#define DRF_ISBIT(bitval,drf) \ + ( /* coverity[identical_branches] */ \ + (bitval != 0) ? drf ) +#define DEVICE_BASE(d) (0?d) // what's up with this name? totally non-parallel to the macros below +#define DEVICE_EXTENT(d) (1?d) // what's up with this name? totally non-parallel to the macros below +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +#ifdef MISRA_14_3 +#define DRF_BASE(drf) (drf##_LOW_FIELD) +#define DRF_EXTENT(drf) (drf##_HIGH_FIELD) +#define DRF_SHIFT(drf) ((drf##_LOW_FIELD) % 32U) +#define DRF_SHIFT_RT(drf) ((drf##_HIGH_FIELD) % 32U) +#define DRF_SIZE(drf) ((drf##_HIGH_FIELD)-(drf##_LOW_FIELD)+1U) +#define DRF_MASK(drf) (0xFFFFFFFFU >> (31U - ((drf##_HIGH_FIELD) % 32U) + ((drf##_LOW_FIELD) % 32U))) +#else +#define DRF_BASE(drf) (NV_FALSE?drf) // much better +#define DRF_EXTENT(drf) (NV_TRUE?drf) // much better +#define DRF_SHIFT(drf) (((NvU32)DRF_BASE(drf)) % 32U) +#define DRF_SHIFT_RT(drf) (((NvU32)DRF_EXTENT(drf)) % 32U) +#define DRF_SIZE(drf) (DRF_EXTENT(drf)-DRF_BASE(drf)+1U) +#define DRF_MASK(drf) (0xFFFFFFFFU>>(31U - DRF_SHIFT_RT(drf) + DRF_SHIFT(drf))) +#endif +#define DRF_DEF(d,r,f,c) (((NvU32)(NV ## d ## r ## f ## c))<>(31-((DRF_ISBIT(1,drf)) % 32)+((DRF_ISBIT(0,drf)) % 32))) +#define DRF_DEF(d,r,f,c) ((NV ## d ## r ## f ## c)<>DRF_SHIFT(NV ## d ## r ## f))&DRF_MASK(NV ## d ## r ## f)) +#endif + +// Signed version of DRF_VAL, which takes care of extending sign bit. +#define DRF_VAL_SIGNED(d,r,f,v) (((DRF_VAL(d,r,f,(v)) ^ (NVBIT(DRF_SIZE(NV ## d ## r ## f)-1U)))) - (NVBIT(DRF_SIZE(NV ## d ## r ## f)-1U))) +#define DRF_IDX_DEF(d,r,f,i,c) ((NV ## d ## r ## f ## c)<>DRF_SHIFT(NV##d##r##f(i)))&DRF_MASK(NV##d##r##f(i))) +#define DRF_IDX_OFFSET_VAL(d,r,f,i,o,v) (((v)>>DRF_SHIFT(NV##d##r##f(i,o)))&DRF_MASK(NV##d##r##f(i,o))) +// Fractional version of DRF_VAL which reads Fx.y fixed point number (x.y)*z +#define DRF_VAL_FRAC(d,r,x,y,v,z) ((DRF_VAL(d,r,x,(v))*z) + ((DRF_VAL(d,r,y,v)*z) / (1<>(63-((DRF_ISBIT(1,drf)) % 64)+((DRF_ISBIT(0,drf)) % 64))) +#define DRF_SHIFTMASK64(drf) (DRF_MASK64(drf)<<(DRF_SHIFT64(drf))) + +#define DRF_DEF64(d,r,f,c) (((NvU64)(NV ## d ## r ## f ## c))<>DRF_SHIFT64(NV ## d ## r ## f))&DRF_MASK64(NV ## d ## r ## f)) + +#define DRF_VAL_SIGNED64(d,r,f,v) (((DRF_VAL64(d,r,f,(v)) ^ (NVBIT64(DRF_SIZE(NV ## d ## r ## f)-1)))) - (NVBIT64(DRF_SIZE(NV ## d ## r ## f)-1))) +#define DRF_IDX_DEF64(d,r,f,i,c) (((NvU64)(NV ## d ## r ## f ## c))<>DRF_SHIFT64(NV##d##r##f(i)))&DRF_MASK64(NV##d##r##f(i))) +#define DRF_IDX_OFFSET_VAL64(d,r,f,i,o,v) (((NvU64)(v)>>DRF_SHIFT64(NV##d##r##f(i,o)))&DRF_MASK64(NV##d##r##f(i,o))) + +#define FLD_SET_DRF64(d,r,f,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_DEF64(d,r,f,c)) +#define FLD_SET_DRF_NUM64(d,r,f,n,v) ((((NvU64)(v)) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_NUM64(d,r,f,n)) +#define FLD_IDX_SET_DRF64(d,r,f,i,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_DEF64(d,r,f,i,c)) +#define FLD_IDX_OFFSET_SET_DRF64(d,r,f,i,o,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i,o))) | DRF_IDX_OFFSET_DEF64(d,r,f,i,o,c)) +#define FLD_IDX_SET_DRF_DEF64(d,r,f,i,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_DEF64(d,r,f,i,c)) +#define FLD_IDX_SET_DRF_NUM64(d,r,f,i,n,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_NUM64(d,r,f,i,n)) +#define FLD_SET_DRF_IDX64(d,r,f,c,i,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_DEF64(d,r,f,c(i))) + +#define FLD_TEST_DRF64(d,r,f,c,v) (DRF_VAL64(d, r, f, (v)) == NV##d##r##f##c) +#define FLD_TEST_DRF_AND64(d,r,f,c,v) (DRF_VAL64(d, r, f, (v)) & NV##d##r##f##c) +#define FLD_TEST_DRF_NUM64(d,r,f,n,v) (DRF_VAL64(d, r, f, (v)) == (n)) +#define FLD_IDX_TEST_DRF64(d,r,f,i,c,v) (DRF_IDX_VAL64(d, r, f, i, (v)) == NV##d##r##f##c) +#define FLD_IDX_OFFSET_TEST_DRF64(d,r,f,i,o,c,v) (DRF_IDX_OFFSET_VAL64(d, r, f, i, o, (v)) == NV##d##r##f##c) + +#define REF_DEF64(drf,d) (((drf ## d)&DRF_MASK64(drf))<>DRF_SHIFT64(drf))&DRF_MASK64(drf)) +#if defined(NV_MISRA_COMPLIANCE_REQUIRED) && defined(MISRA_14_3) +#define REF_NUM64(drf,n) (((NvU64)(n)&(0xFFFFFFFFFFFFFFFFU>>(63U-((drf##_HIGH_FIELD) % 63U)+((drf##_LOW_FIELD) % 63U)))) << ((drf##_LOW_FIELD) % 63U)) +#else +#define REF_NUM64(drf,n) (((NvU64)(n)&DRF_MASK64(drf))<>DRF_SHIFT(drf))&DRF_MASK(drf)) +#if defined(NV_MISRA_COMPLIANCE_REQUIRED) && defined(MISRA_14_3) +#define REF_NUM(drf,n) (((n)&(0xFFFFFFFFU>>(31U-((drf##_HIGH_FIELD) % 32U)+((drf##_LOW_FIELD) % 32U)))) << ((drf##_LOW_FIELD) % 32U)) +#else +#define REF_NUM(drf,n) (((n)&DRF_MASK(drf))<>DRF_SHIFT(CR ## d ## r ## f))&DRF_MASK(CR ## d ## r ## f)) + +// Multi-word (MW) field manipulations. For multi-word structures (e.g., Fermi SPH), +// fields may have bit numbers beyond 32. To avoid errors using "classic" multi-word macros, +// all the field extents are defined as "MW(X)". For example, MW(127:96) means +// the field is in bits 0-31 of word number 3 of the structure. +// +// DRF_VAL_MW() macro is meant to be used for native endian 32-bit aligned 32-bit word data, +// not for byte stream data. +// +// DRF_VAL_BS() macro is for byte stream data used in fbQueryBIOS_XXX(). +// +#define DRF_EXPAND_MW(drf) drf // used to turn "MW(a:b)" into "a:b" +#define DRF_PICK_MW(drf,v) ((v)? DRF_EXPAND_##drf) // picks low or high bits +#define DRF_WORD_MW(drf) (DRF_PICK_MW(drf,0)/32) // which word in a multi-word array +#define DRF_BASE_MW(drf) (DRF_PICK_MW(drf,0)%32) // which start bit in the selected word? +#define DRF_EXTENT_MW(drf) (DRF_PICK_MW(drf,1)%32) // which end bit in the selected word +#define DRF_SHIFT_MW(drf) (DRF_PICK_MW(drf,0)%32) +#define DRF_MASK_MW(drf) (0xFFFFFFFFU>>((31-(DRF_EXTENT_MW(drf))+(DRF_BASE_MW(drf)))%32)) +#define DRF_SHIFTMASK_MW(drf) ((DRF_MASK_MW(drf))<<(DRF_SHIFT_MW(drf))) +#define DRF_SIZE_MW(drf) (DRF_EXTENT_MW(drf)-DRF_BASE_MW(drf)+1) + +#define DRF_DEF_MW(d,r,f,c) ((NV##d##r##f##c) << DRF_SHIFT_MW(NV##d##r##f)) +#define DRF_NUM_MW(d,r,f,n) (((n)&DRF_MASK_MW(NV##d##r##f))<>DRF_SHIFT_MW(NV##d##r##f))&DRF_MASK_MW(NV##d##r##f)) +#define DRF_SPANS(drf) ((DRF_PICK_MW(drf,0)/32) != (DRF_PICK_MW(drf,1)/32)) +#define DRF_WORD_MW_LOW(drf) (DRF_PICK_MW(drf,0)/32) +#define DRF_WORD_MW_HIGH(drf) (DRF_PICK_MW(drf,1)/32) +#define DRF_MASK_MW_LOW(drf) (0xFFFFFFFFU) +#define DRF_MASK_MW_HIGH(drf) (0xFFFFFFFFU>>(31-(DRF_EXTENT_MW(drf)))) +#define DRF_SHIFT_MW_LOW(drf) (DRF_PICK_MW(drf,0)%32) +#define DRF_SHIFT_MW_HIGH(drf) (0) +#define DRF_MERGE_SHIFT(drf) ((32-((DRF_PICK_MW(drf,0)%32)))%32) +#define DRF_VAL_MW_2WORD(d,r,f,v) (((((v)[DRF_WORD_MW_LOW(NV##d##r##f)])>>DRF_SHIFT_MW_LOW(NV##d##r##f))&DRF_MASK_MW_LOW(NV##d##r##f)) | \ + (((((v)[DRF_WORD_MW_HIGH(NV##d##r##f)])>>DRF_SHIFT_MW_HIGH(NV##d##r##f))&DRF_MASK_MW_HIGH(NV##d##r##f)) << DRF_MERGE_SHIFT(NV##d##r##f))) +#define DRF_VAL_MW(d,r,f,v) ( DRF_SPANS(NV##d##r##f) ? DRF_VAL_MW_2WORD(d,r,f,v) : DRF_VAL_MW_1WORD(d,r,f,v) ) + +#define DRF_IDX_DEF_MW(d,r,f,i,c) ((NV##d##r##f##c)<>DRF_SHIFT_MW(NV##d##r##f(i)))&DRF_MASK_MW(NV##d##r##f(i))) + +// +// Logically OR all DRF_DEF constants indexed from zero to s (semiinclusive). +// Caution: Target variable v must be pre-initialized. +// +#define FLD_IDX_OR_DRF_DEF(d,r,f,c,s,v) \ +do \ +{ NvU32 idx; \ + for (idx = 0; idx < (NV ## d ## r ## f ## s); ++idx)\ + { \ + v |= DRF_IDX_DEF(d,r,f,idx,c); \ + } \ +} while(0) + + +#define FLD_MERGE_MW(drf,n,v) (((v)[DRF_WORD_MW(drf)] & ~DRF_SHIFTMASK_MW(drf)) | n) +#define FLD_ASSIGN_MW(drf,n,v) ((v)[DRF_WORD_MW(drf)] = FLD_MERGE_MW(drf, n, v)) +#define FLD_IDX_MERGE_MW(drf,i,n,v) (((v)[DRF_WORD_MW(drf(i))] & ~DRF_SHIFTMASK_MW(drf(i))) | n) +#define FLD_IDX_ASSIGN_MW(drf,i,n,v) ((v)[DRF_WORD_MW(drf(i))] = FLD_MERGE_MW(drf(i), n, v)) + +#define FLD_SET_DRF_MW(d,r,f,c,v) FLD_MERGE_MW(NV##d##r##f, DRF_DEF_MW(d,r,f,c), v) +#define FLD_SET_DRF_NUM_MW(d,r,f,n,v) FLD_ASSIGN_MW(NV##d##r##f, DRF_NUM_MW(d,r,f,n), v) +#define FLD_SET_DRF_DEF_MW(d,r,f,c,v) FLD_ASSIGN_MW(NV##d##r##f, DRF_DEF_MW(d,r,f,c), v) +#define FLD_IDX_SET_DRF_MW(d,r,f,i,c,v) FLD_IDX_MERGE_MW(NV##d##r##f, i, DRF_IDX_DEF_MW(d,r,f,i,c), v) +#define FLD_IDX_SET_DRF_DEF_MW(d,r,f,i,c,v) FLD_IDX_MERGE_MW(NV##d##r##f, i, DRF_IDX_DEF_MW(d,r,f,i,c), v) +#define FLD_IDX_SET_DRF_NUM_MW(d,r,f,i,n,v) FLD_IDX_ASSIGN_MW(NV##d##r##f, i, DRF_IDX_NUM_MW(d,r,f,i,n), v) + +#define FLD_TEST_DRF_MW(d,r,f,c,v) ((DRF_VAL_MW(d, r, f, (v)) == NV##d##r##f##c)) +#define FLD_TEST_DRF_NUM_MW(d,r,f,n,v) ((DRF_VAL_MW(d, r, f, (v)) == n)) +#define FLD_IDX_TEST_DRF_MW(d,r,f,i,c,v) ((DRF_IDX_VAL_MW(d, r, f, i, (v)) == NV##d##r##f##c)) + +#define DRF_VAL_BS(d,r,f,v) ( DRF_SPANS(NV##d##r##f) ? DRF_VAL_BS_2WORD(d,r,f,(v)) : DRF_VAL_BS_1WORD(d,r,f,(v)) ) + +//------------------------------------------------------------------------// +// // +// Common defines for engine register reference wrappers // +// // +// New engine addressing can be created like: // +// \#define ENG_REG_PMC(o,d,r) NV##d##r // +// \#define ENG_IDX_REG_CE(o,d,i,r) CE_MAP(o,r,i) // +// // +// See FB_FBPA* for more examples // +//------------------------------------------------------------------------// + +#define ENG_RD_REG(g,o,d,r) GPU_REG_RD32(g, ENG_REG##d(o,d,r)) +#define ENG_WR_REG(g,o,d,r,v) GPU_REG_WR32(g, ENG_REG##d(o,d,r), (v)) +#define ENG_RD_DRF(g,o,d,r,f) ((GPU_REG_RD32(g, ENG_REG##d(o,d,r))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define ENG_WR_DRF_DEF(g,o,d,r,f,c) GPU_REG_WR32(g, ENG_REG##d(o,d,r),(GPU_REG_RD32(g,ENG_REG##d(o,d,r))&~(GPU_DRF_MASK(NV##d##r##f)<>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define ENG_TEST_IDX_DRF_DEF(g,o,d,r,f,c,i) (ENG_RD_IDX_DRF(g, o, d, r, f, (i)) == NV##d##r##f##c) + +#define ENG_IDX_RD_REG(g,o,d,i,r) GPU_REG_RD32(g, ENG_IDX_REG##d(o,d,i,r)) +#define ENG_IDX_WR_REG(g,o,d,i,r,v) GPU_REG_WR32(g, ENG_IDX_REG##d(o,d,i,r), (v)) + +#define ENG_IDX_RD_DRF(g,o,d,i,r,f) ((GPU_REG_RD32(g, ENG_IDX_REG##d(o,d,i,r))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) + +// +// DRF_READ_1WORD_BS() and DRF_READ_1WORD_BS_HIGH() do not read beyond the bytes that contain +// the requested value. Reading beyond the actual data causes a page fault panic when the +// immediately following page happened to be protected or not mapped. +// +#define DRF_VAL_BS_1WORD(d,r,f,v) ((DRF_READ_1WORD_BS(d,r,f,v)>>DRF_SHIFT_MW(NV##d##r##f))&DRF_MASK_MW(NV##d##r##f)) +#define DRF_VAL_BS_2WORD(d,r,f,v) (((DRF_READ_4BYTE_BS(NV##d##r##f,v)>>DRF_SHIFT_MW_LOW(NV##d##r##f))&DRF_MASK_MW_LOW(NV##d##r##f)) | \ + (((DRF_READ_1WORD_BS_HIGH(d,r,f,v)>>DRF_SHIFT_MW_HIGH(NV##d##r##f))&DRF_MASK_MW_HIGH(NV##d##r##f)) << DRF_MERGE_SHIFT(NV##d##r##f))) + +#define DRF_READ_1BYTE_BS(drf,v) ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4])) +#define DRF_READ_2BYTE_BS(drf,v) (DRF_READ_1BYTE_BS(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+1])<<8)) +#define DRF_READ_3BYTE_BS(drf,v) (DRF_READ_2BYTE_BS(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+2])<<16)) +#define DRF_READ_4BYTE_BS(drf,v) (DRF_READ_3BYTE_BS(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+3])<<24)) + +#define DRF_READ_1BYTE_BS_HIGH(drf,v) ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4])) +#define DRF_READ_2BYTE_BS_HIGH(drf,v) (DRF_READ_1BYTE_BS_HIGH(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+1])<<8)) +#define DRF_READ_3BYTE_BS_HIGH(drf,v) (DRF_READ_2BYTE_BS_HIGH(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+2])<<16)) +#define DRF_READ_4BYTE_BS_HIGH(drf,v) (DRF_READ_3BYTE_BS_HIGH(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+3])<<24)) + +// Calculate 2^n - 1 and avoid shift counter overflow +// +// On Windows amd64, 64 << 64 => 1 +// +#define NV_TWO_N_MINUS_ONE(n) (((1ULL<<(n/2))<<((n+1)/2))-1) + +// +// Create a 64b bitmask with n bits set +// This is the same as ((1ULL<>((n>64) ? 0 : (64-n)))) + +#define DRF_READ_1WORD_BS(d,r,f,v) \ + ((DRF_EXTENT_MW(NV##d##r##f)<8)?DRF_READ_1BYTE_BS(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<16)?DRF_READ_2BYTE_BS(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<24)?DRF_READ_3BYTE_BS(NV##d##r##f,(v)): \ + DRF_READ_4BYTE_BS(NV##d##r##f,(v))))) + +#define DRF_READ_1WORD_BS_HIGH(d,r,f,v) \ + ((DRF_EXTENT_MW(NV##d##r##f)<8)?DRF_READ_1BYTE_BS_HIGH(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<16)?DRF_READ_2BYTE_BS_HIGH(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<24)?DRF_READ_3BYTE_BS_HIGH(NV##d##r##f,(v)): \ + DRF_READ_4BYTE_BS_HIGH(NV##d##r##f,(v))))) + +#define LOWESTBIT(x) ( (x) & (((x) - 1U) ^ (x)) ) +// Destructive operation on n32 +#define HIGHESTBIT(n32) \ +{ \ + HIGHESTBITIDX_32(n32); \ + n32 = NVBIT(n32); \ +} +#define ONEBITSET(x) ( ((x) != 0U) && (((x) & ((x) - 1U)) == 0U) ) + +// Destructive operation on n32 +#define NUMSETBITS_32(n32) \ +{ \ + n32 = n32 - ((n32 >> 1) & 0x55555555); \ + n32 = (n32 & 0x33333333) + ((n32 >> 2) & 0x33333333); \ + n32 = (((n32 + (n32 >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; \ +} + +/*! + * Calculate number of bits set in a 32-bit unsigned integer. + * Pure typesafe alternative to @ref NUMSETBITS_32. + */ +static NV_FORCEINLINE NvU32 +nvPopCount32(const NvU32 x) +{ + NvU32 temp = x; + temp = temp - ((temp >> 1) & 0x55555555U); + temp = (temp & 0x33333333U) + ((temp >> 2) & 0x33333333U); + temp = (((temp + (temp >> 4)) & 0x0F0F0F0FU) * 0x01010101U) >> 24; + return temp; +} + +/*! + * Calculate number of bits set in a 64-bit unsigned integer. + */ +static NV_FORCEINLINE NvU32 +nvPopCount64(const NvU64 x) +{ + NvU64 temp = x; + temp = temp - ((temp >> 1) & 0x5555555555555555ULL); + temp = (temp & 0x3333333333333333ULL) + ((temp >> 2) & 0x3333333333333333ULL); + temp = (temp + (temp >> 4)) & 0x0F0F0F0F0F0F0F0FULL; + temp = (temp * 0x0101010101010101ULL) >> 56; + return (NvU32)temp; +} + +/*! + * Determine how many bits are set below a bit index within a mask. + * This assigns a dense ordering to the set bits in the mask. + * + * For example the mask 0xCD contains 5 set bits: + * nvMaskPos32(0xCD, 0) == 0 + * nvMaskPos32(0xCD, 2) == 1 + * nvMaskPos32(0xCD, 3) == 2 + * nvMaskPos32(0xCD, 6) == 3 + * nvMaskPos32(0xCD, 7) == 4 + */ +static NV_FORCEINLINE NvU32 +nvMaskPos32(const NvU32 mask, const NvU32 bitIdx) +{ + return nvPopCount32(mask & (NVBIT32(bitIdx) - 1U)); +} + +// Destructive operation on n32 +#define LOWESTBITIDX_32(n32) \ +{ \ + n32 = BIT_IDX_32(LOWESTBIT(n32));\ +} + +// Destructive operation on n64 +#define LOWESTBITIDX_64(n64) \ +{ \ + n64 = BIT_IDX_64(LOWESTBIT(n64));\ +} + +// Destructive operation on n32 +#define HIGHESTBITIDX_32(n32) \ +{ \ + NvU32 count = 0; \ + while (n32 >>= 1) \ + { \ + count++; \ + } \ + n32 = count; \ +} + +// Destructive operation on n64 +#define HIGHESTBITIDX_64(n64) \ +{ \ + NvU64 count = 0; \ + while (n64 >>= 1) \ + { \ + count++; \ + } \ + n64 = count; \ +} + +// Destructive operation on n32 +#define ROUNDUP_POW2(n32) \ +{ \ + n32--; \ + n32 |= n32 >> 1; \ + n32 |= n32 >> 2; \ + n32 |= n32 >> 4; \ + n32 |= n32 >> 8; \ + n32 |= n32 >> 16; \ + n32++; \ +} + +/*! + * Round up a 32-bit unsigned integer to the next power of 2. + * Pure typesafe alternative to @ref ROUNDUP_POW2. + * + * param[in] x must be in range [0, 2^31] to avoid overflow. + */ +static NV_FORCEINLINE NvU32 +nvNextPow2_U32(const NvU32 x) +{ + NvU32 y = x; + y--; + y |= y >> 1; + y |= y >> 2; + y |= y >> 4; + y |= y >> 8; + y |= y >> 16; + y++; + return y; +} + + +static NV_FORCEINLINE NvU32 +nvPrevPow2_U32(const NvU32 x ) +{ + NvU32 y = x; + y |= (y >> 1); + y |= (y >> 2); + y |= (y >> 4); + y |= (y >> 8); + y |= (y >> 16); + return y - (y >> 1); +} + +static NV_FORCEINLINE NvU64 +nvPrevPow2_U64(const NvU64 x ) +{ + NvU64 y = x; + y |= (y >> 1); + y |= (y >> 2); + y |= (y >> 4); + y |= (y >> 8); + y |= (y >> 16); + y |= (y >> 32); + return y - (y >> 1); +} + +// Destructive operation on n64 +#define ROUNDUP_POW2_U64(n64) \ +{ \ + n64--; \ + n64 |= n64 >> 1; \ + n64 |= n64 >> 2; \ + n64 |= n64 >> 4; \ + n64 |= n64 >> 8; \ + n64 |= n64 >> 16; \ + n64 |= n64 >> 32; \ + n64++; \ +} + +#define NV_SWAP_U8(a,b) \ +{ \ + NvU8 temp; \ + temp = a; \ + a = b; \ + b = temp; \ +} + +#define NV_SWAP_U32(a,b) \ +{ \ + NvU32 temp; \ + temp = a; \ + a = b; \ + b = temp; \ +} + +/*! + * @brief Macros allowing simple iteration over bits set in a given mask. + * + * @param[in] maskWidth bit-width of the mask (allowed: 8, 16, 32, 64) + * + * @param[in,out] index lvalue that is used as a bit index in the loop + * (can be declared as any NvU* or NvS* variable) + * @param[in] mask expression, loop will iterate over set bits only + */ +#define FOR_EACH_INDEX_IN_MASK(maskWidth,index,mask) \ +{ \ + NvU##maskWidth lclMsk = (NvU##maskWidth)(mask); \ + for ((index) = 0U; lclMsk != 0U; (index)++, lclMsk >>= 1U)\ + { \ + if (((NvU##maskWidth)NVBIT64(0) & lclMsk) == 0U) \ + { \ + continue; \ + } +#define FOR_EACH_INDEX_IN_MASK_END \ + } \ +} + +/*! + * Returns the position of nth set bit in the given mask. + * + * Returns -1 if mask has fewer than n bits set. + * + * n is 0 indexed and has valid values 0..31 inclusive, so "zeroth" set bit is + * the first set LSB. + * + * Example, if mask = 0x000000F0u and n = 1, the return value will be 5. + * Example, if mask = 0x000000F0u and n = 4, the return value will be -1. + */ +static NV_FORCEINLINE NvS32 +nvGetNthSetBitIndex32(NvU32 mask, NvU32 n) +{ + NvU32 seenSetBitsCount = 0; + NvS32 index; + FOR_EACH_INDEX_IN_MASK(32, index, mask) + { + if (seenSetBitsCount == n) + { + return index; + } + ++seenSetBitsCount; + } + FOR_EACH_INDEX_IN_MASK_END; + + return -1; +} + +// +// Size to use when declaring variable-sized arrays +// +#define NV_ANYSIZE_ARRAY 1 + +// +// Returns ceil(a/b) +// +#define NV_CEIL(a,b) (((a)+(b)-1)/(b)) + +// Clearer name for NV_CEIL +#ifndef NV_DIV_AND_CEIL +#define NV_DIV_AND_CEIL(a, b) NV_CEIL(a,b) +#endif + +#ifndef NV_MIN +#define NV_MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif + +#ifndef NV_MAX +#define NV_MAX(a, b) (((a) > (b)) ? (a) : (b)) +#endif + +// +// Returns absolute value of provided integer expression +// +#define NV_ABS(a) ((a)>=0?(a):(-(a))) + +// +// Returns 1 if input number is positive, 0 if 0 and -1 if negative. Avoid +// macro parameter as function call which will have side effects. +// +#define NV_SIGN(s) ((NvS8)(((s) > 0) - ((s) < 0))) + +// +// Returns 1 if input number is >= 0 or -1 otherwise. This assumes 0 has a +// positive sign. +// +#define NV_ZERO_SIGN(s) ((NvS8)((((s) >= 0) * 2) - 1)) + +// Returns the offset (in bytes) of 'member' in struct 'type'. +#ifndef NV_OFFSETOF + #if defined(__GNUC__) && (__GNUC__ > 3) + #define NV_OFFSETOF(type, member) ((NvUPtr) __builtin_offsetof(type, member)) + #else + #define NV_OFFSETOF(type, member) ((NvUPtr) &(((type *)0)->member)) + #endif +#endif + +// Given a pointer and the member it is of the parent struct, return a pointer to the parent struct +#define NV_CONTAINEROF(ptr, type, member) ((type *) (((NvUPtr) ptr) - NV_OFFSETOF(type, member))) + +// +// Performs a rounded division of b into a (unsigned). For SIGNED version of +// NV_ROUNDED_DIV() macro check the comments in bug 769777. +// +#define NV_UNSIGNED_ROUNDED_DIV(a,b) (((a) + ((b) / 2U)) / (b)) + +/*! + * Performs a ceiling division of b into a (unsigned). A "ceiling" division is + * a division is one with rounds up result up if a % b != 0. + * + * @param[in] a Numerator + * @param[in] b Denominator + * + * @return a / b + a % b != 0 ? 1 : 0. + */ +#define NV_UNSIGNED_DIV_CEIL(a, b) (((a) + (b - 1)) / (b)) + +/*! + * Performs subtraction where a negative difference is raised to zero. + * Can be used to avoid underflowing an unsigned subtraction. + * + * @param[in] a Minuend + * @param[in] b Subtrahend + * + * @return a > b ? a - b : 0. + */ +#define NV_SUBTRACT_NO_UNDERFLOW(a, b) ((a)>(b) ? (a)-(b) : 0) + +/*! + * Performs a rounded right-shift of 32-bit unsigned value "a" by "shift" bits. + * Will round result away from zero. + * + * @param[in] a 32-bit unsigned value to shift. + * @param[in] shift Number of bits by which to shift. + * + * @return Resulting shifted value rounded away from zero. + */ +#define NV_RIGHT_SHIFT_ROUNDED(a, shift) \ + (((a) >> (shift)) + !!((NVBIT((shift) - 1) & (a)) == NVBIT((shift) - 1))) + +// +// Power of 2 alignment. +// (Will give unexpected results if 'gran' is not a power of 2.) +// +#ifndef NV_ALIGN_DOWN +// +// Notably using v - v + gran ensures gran gets promoted to the same type as v if gran has a smaller type. +// Otherwise, if aligning a NVU64 with NVU32 granularity, the top 4 bytes get zeroed. +// +#define NV_ALIGN_DOWN(v, gran) ((v) & ~((v) - (v) + (gran) - 1)) +#endif + +#ifndef NV_ALIGN_UP +// +// Notably using v - v + gran ensures gran gets promoted to the same type as v if gran has a smaller type. +// Otherwise, if aligning a NVU64 with NVU32 granularity, the top 4 bytes get zeroed. +// +#define NV_ALIGN_UP(v, gran) (((v) + ((gran) - 1)) & ~((v) - (v) + (gran) - 1)) +#endif + +#ifndef NV_ALIGN_DOWN64 +#define NV_ALIGN_DOWN64(v, gran) ((v) & ~(((NvU64)gran) - 1)) +#endif + +#ifndef NV_ALIGN_UP64 +#define NV_ALIGN_UP64(v, gran) (((v) + ((gran) - 1)) & ~(((NvU64)gran)-1)) +#endif + +#ifndef NV_IS_ALIGNED +#define NV_IS_ALIGNED(v, gran) (0U == ((v) & ((gran) - 1U))) +#endif + +#ifndef NV_IS_ALIGNED64 +#define NV_IS_ALIGNED64(v, gran) (0U == ((v) & (((NvU64)gran) - 1U))) +#endif + +#ifndef NVMISC_MEMSET +static NV_FORCEINLINE void *NVMISC_MEMSET(void *s, NvU8 c, NvLength n) +{ + NvU8 *b = (NvU8 *) s; + NvLength i; + + for (i = 0; i < n; i++) + { + b[i] = c; + } + + return s; +} +#endif + +#ifndef NVMISC_MEMCPY +static NV_FORCEINLINE void *NVMISC_MEMCPY(void *dest, const void *src, NvLength n) +{ + NvU8 *destByte = (NvU8 *) dest; + const NvU8 *srcByte = (const NvU8 *) src; + NvLength i; + + for (i = 0; i < n; i++) + { + destByte[i] = srcByte[i]; + } + + return dest; +} +#endif + +static NV_FORCEINLINE char *NVMISC_STRNCPY(char *dest, const char *src, NvLength n) +{ + NvLength i; + + for (i = 0; i < n; i++) + { + dest[i] = src[i]; + if (src[i] == '\0') + { + break; + } + } + + for (; i < n; i++) + { + dest[i] = '\0'; + } + + return dest; +} + +/*! + * Convert a void* to an NvUPtr. This is used when MISRA forbids us from doing a direct cast. + * + * @param[in] ptr Pointer to be converted + * + * @return Resulting NvUPtr + */ +static NV_FORCEINLINE NvUPtr NV_PTR_TO_NVUPTR(void *ptr) +{ + union + { + NvUPtr v; + void *p; + } uAddr; + + uAddr.p = ptr; + return uAddr.v; +} + +/*! + * Convert an NvUPtr to a void*. This is used when MISRA forbids us from doing a direct cast. + * + * @param[in] ptr Pointer to be converted + * + * @return Resulting void * + */ +static NV_FORCEINLINE void *NV_NVUPTR_TO_PTR(NvUPtr address) +{ + union + { + NvUPtr v; + void *p; + } uAddr; + + uAddr.v = address; + return uAddr.p; +} + +// Get bit at pos (k) from x +#define NV_BIT_GET(k, x) (((x) >> (k)) & 1) +// Get bit at pos (n) from (hi) if >= 64, otherwise from (lo). This is paired with NV_BIT_SET_128 which sets the bit. +#define NV_BIT_GET_128(n, lo, hi) (((n) < 64) ? NV_BIT_GET((n), (lo)) : NV_BIT_GET((n) - 64, (hi))) +// +// Set the bit at pos (b) for U64 which is < 128. Since the (b) can be >= 64, we need 2 U64 to store this. +// Use (lo) if (b) is less than 64, and (hi) if >= 64. +// +#define NV_BIT_SET_128(b, lo, hi) { nvAssert( (b) < 128 ); if ( (b) < 64 ) (lo) |= NVBIT64(b); else (hi) |= NVBIT64( b & 0x3F ); } +// +// Clear the bit at pos (b) for U64 which is < 128. +// Use (lo) if (b) is less than 64, and (hi) if >= 64. +// +#define NV_BIT_CLEAR_128(b, lo, hi) { nvAssert( (b) < 128 ); if ( (b) < 64 ) (lo) &= ~NVBIT64(b); else (hi) &= ~NVBIT64( b & 0x3F ); } + +// Get the number of elements the specified fixed-size array +#define NV_ARRAY_ELEMENTS(x) ((sizeof(x)/sizeof((x)[0]))) + +#if !defined(NVIDIA_UNDEF_LEGACY_BIT_MACROS) +// +// Deprecated macros whose definition can be removed once the code base no longer references them. +// Use the NVBIT* macros instead of these macros. +// +#ifndef BIT +#define BIT(b) (1U<<(b)) +#endif +#ifndef BIT32 +#define BIT32(b) ((NvU32)1U<<(b)) +#endif +#ifndef BIT64 +#define BIT64(b) ((NvU64)1U<<(b)) +#endif +#endif + +#ifdef __cplusplus +} +#endif //__cplusplus + +#endif // __NV_MISC_H + diff --git a/kernel-open/common/inc/nvstatus.h b/kernel-open/common/inc/nvstatus.h new file mode 100644 index 0000000..4f5284d --- /dev/null +++ b/kernel-open/common/inc/nvstatus.h @@ -0,0 +1,123 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef SDK_NVSTATUS_H +#define SDK_NVSTATUS_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +typedef NvU32 NV_STATUS; + +#define NV_STATUS_CODE( name, code, string ) name = (code), + +enum +{ + #include "nvstatuscodes.h" +}; + +#undef NV_STATUS_CODE + +/*! + * @def NV_STATUS_LEVEL_OK + * @see NV_STATUS_LEVEL + * @brief Success: No error or special condition + */ +#define NV_STATUS_LEVEL_OK 0 + +/*! + * @def NV_STATUS_LEVEL_WARN + * @see NV_STATUS_LEVEL + * @brief Success, but there is an special condition + * + * @details In general, NV_STATUS_LEVEL_WARN status codes are handled the + * same as NV_STATUS_LEVEL_OK, but are usefil to indicate that + * there is a condition that may be specially handled. + * + * Therefore, in most cases, client function should test for + * status <= NV_STATUS_LEVEL_WARN or status > NV_STATUS_LEVEL_WARN + * to determine success v. failure of a call. + */ +#define NV_STATUS_LEVEL_WARN 1 + +/*! + * @def NV_STATUS_LEVEL_ERR + * @see NV_STATUS_LEVEL + * @brief Unrecoverable error condition + */ +#define NV_STATUS_LEVEL_ERR 3 + +/*! + * @def NV_STATUS_LEVEL + * @see NV_STATUS_LEVEL_OK + * @see NV_STATUS_LEVEL_WARN + * @see NV_STATUS_LEVEL_ERR + * @brief Level of the status code + * + * @warning IMPORTANT: When comparing NV_STATUS_LEVEL(_S) against one of + * these constants, it is important to use '<=' or '>' (rather + * than '<' or '>='). + * + * For example. do: + * if (NV_STATUS_LEVEL(status) <= NV_STATUS_LEVEL_WARN) + * rather than: + * if (NV_STATUS_LEVEL(status) < NV_STATUS_LEVEL_ERR) + * + * By being consistent in this manner, it is easier to systematically + * add additional level constants. New levels are likely to lower + * (rather than raise) the severity of _ERR codes. For example, + * if we were to add NV_STATUS_LEVEL_RETRY to indicate hardware + * failures that may be recoverable (e.g. RM_ERR_TIMEOUT_RETRY + * or RM_ERR_BUSY_RETRY), it would be less severe than + * NV_STATUS_LEVEL_ERR the level to which these status codes now + * belong. Using '<=' and '>' ensures your code is not broken in + * cases like this. + */ +#define NV_STATUS_LEVEL(_S) \ + ((_S) == NV_OK? NV_STATUS_LEVEL_OK: \ + ((_S) != NV_ERR_GENERIC && (_S) & 0x00010000? NV_STATUS_LEVEL_WARN: \ + NV_STATUS_LEVEL_ERR)) + +/*! + * @def NV_STATUS_LEVEL + * @see NV_STATUS_LEVEL_OK + * @see NV_STATUS_LEVEL_WARN + * @see NV_STATUS_LEVEL_ERR + * @brief Character representing status code level + */ +#define NV_STATUS_LEVEL_CHAR(_S) \ + ((_S) == NV_OK? '0': \ + ((_S) != NV_ERR_GENERIC && (_S) & 0x00010000? 'W': \ + 'E')) + +// Function definitions +const char *nvstatusToString(NV_STATUS nvStatusIn); + +#ifdef __cplusplus +} +#endif + +#endif /* SDK_NVSTATUS_H */ diff --git a/kernel-open/common/inc/nvstatuscodes.h b/kernel-open/common/inc/nvstatuscodes.h new file mode 100644 index 0000000..98ebb7b --- /dev/null +++ b/kernel-open/common/inc/nvstatuscodes.h @@ -0,0 +1,180 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef SDK_NVSTATUSCODES_H +#define SDK_NVSTATUSCODES_H + +NV_STATUS_CODE(NV_OK, 0x00000000, "Success") +NV_STATUS_CODE(NV_ERR_GENERIC, 0x0000FFFF, "Failure: Generic Error") + +NV_STATUS_CODE(NV_ERR_BROKEN_FB, 0x00000001, "Frame-Buffer broken") +NV_STATUS_CODE(NV_ERR_BUFFER_TOO_SMALL, 0x00000002, "Buffer passed in is too small") +NV_STATUS_CODE(NV_ERR_BUSY_RETRY, 0x00000003, "System is busy, retry later") +NV_STATUS_CODE(NV_ERR_CALLBACK_NOT_SCHEDULED, 0x00000004, "The requested callback API not scheduled") +NV_STATUS_CODE(NV_ERR_CARD_NOT_PRESENT, 0x00000005, "Card not detected") +NV_STATUS_CODE(NV_ERR_CYCLE_DETECTED, 0x00000006, "Call cycle detected") +NV_STATUS_CODE(NV_ERR_DMA_IN_USE, 0x00000007, "Requested DMA is in use") +NV_STATUS_CODE(NV_ERR_DMA_MEM_NOT_LOCKED, 0x00000008, "Requested DMA memory is not locked") +NV_STATUS_CODE(NV_ERR_DMA_MEM_NOT_UNLOCKED, 0x00000009, "Requested DMA memory is not unlocked") +NV_STATUS_CODE(NV_ERR_DUAL_LINK_INUSE, 0x0000000A, "Dual-Link is in use") +NV_STATUS_CODE(NV_ERR_ECC_ERROR, 0x0000000B, "Generic ECC error") +NV_STATUS_CODE(NV_ERR_FIFO_BAD_ACCESS, 0x0000000C, "FIFO: Invalid access") +NV_STATUS_CODE(NV_ERR_FREQ_NOT_SUPPORTED, 0x0000000D, "Requested frequency is not supported") +NV_STATUS_CODE(NV_ERR_GPU_DMA_NOT_INITIALIZED, 0x0000000E, "Requested DMA not initialized") +NV_STATUS_CODE(NV_ERR_GPU_IS_LOST, 0x0000000F, "GPU lost from the bus") +NV_STATUS_CODE(NV_ERR_GPU_IN_FULLCHIP_RESET, 0x00000010, "GPU currently in full-chip reset") +NV_STATUS_CODE(NV_ERR_GPU_NOT_FULL_POWER, 0x00000011, "GPU not in full power") +NV_STATUS_CODE(NV_ERR_GPU_UUID_NOT_FOUND, 0x00000012, "GPU UUID not found") +NV_STATUS_CODE(NV_ERR_HOT_SWITCH, 0x00000013, "System in hot switch") +NV_STATUS_CODE(NV_ERR_I2C_ERROR, 0x00000014, "I2C Error") +NV_STATUS_CODE(NV_ERR_I2C_SPEED_TOO_HIGH, 0x00000015, "I2C Error: Speed too high") +NV_STATUS_CODE(NV_ERR_ILLEGAL_ACTION, 0x00000016, "Current action is not allowed") +NV_STATUS_CODE(NV_ERR_IN_USE, 0x00000017, "Generic busy error") +NV_STATUS_CODE(NV_ERR_INFLATE_COMPRESSED_DATA_FAILED, 0x00000018, "Failed to inflate compressed data") +NV_STATUS_CODE(NV_ERR_INSERT_DUPLICATE_NAME, 0x00000019, "Found a duplicate entry in the requested btree") +NV_STATUS_CODE(NV_ERR_INSUFFICIENT_RESOURCES, 0x0000001A, "Ran out of a critical resource, other than memory") +NV_STATUS_CODE(NV_ERR_INSUFFICIENT_PERMISSIONS, 0x0000001B, "The requester does not have sufficient permissions") +NV_STATUS_CODE(NV_ERR_INSUFFICIENT_POWER, 0x0000001C, "Generic Error: Low power") +NV_STATUS_CODE(NV_ERR_INVALID_ACCESS_TYPE, 0x0000001D, "This type of access is not allowed") +NV_STATUS_CODE(NV_ERR_INVALID_ADDRESS, 0x0000001E, "Address not valid") +NV_STATUS_CODE(NV_ERR_INVALID_ARGUMENT, 0x0000001F, "Invalid argument to call") +NV_STATUS_CODE(NV_ERR_INVALID_BASE, 0x00000020, "Invalid base") +NV_STATUS_CODE(NV_ERR_INVALID_CHANNEL, 0x00000021, "Given channel-id not valid") +NV_STATUS_CODE(NV_ERR_INVALID_CLASS, 0x00000022, "Given class-id not valid") +NV_STATUS_CODE(NV_ERR_INVALID_CLIENT, 0x00000023, "Given client not valid") +NV_STATUS_CODE(NV_ERR_INVALID_COMMAND, 0x00000024, "Command passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_DATA, 0x00000025, "Invalid data passed") +NV_STATUS_CODE(NV_ERR_INVALID_DEVICE, 0x00000026, "Current device is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_DMA_SPECIFIER, 0x00000027, "The requested DMA specifier is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_EVENT, 0x00000028, "Invalid event occurred") +NV_STATUS_CODE(NV_ERR_INVALID_FLAGS, 0x00000029, "Invalid flags passed") +NV_STATUS_CODE(NV_ERR_INVALID_FUNCTION, 0x0000002A, "Called function is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_HEAP, 0x0000002B, "Heap corrupted") +NV_STATUS_CODE(NV_ERR_INVALID_INDEX, 0x0000002C, "Index invalid") +NV_STATUS_CODE(NV_ERR_INVALID_IRQ_LEVEL, 0x0000002D, "Requested IRQ level is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_LIMIT, 0x0000002E, "Generic Error: Invalid limit") +NV_STATUS_CODE(NV_ERR_INVALID_LOCK_STATE, 0x0000002F, "Requested lock state not valid") +NV_STATUS_CODE(NV_ERR_INVALID_METHOD, 0x00000030, "Requested method not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT, 0x00000031, "Object not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_BUFFER, 0x00000032, "Object buffer passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_HANDLE, 0x00000033, "Object handle is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_NEW, 0x00000034, "New object is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_OLD, 0x00000035, "Old object is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_PARENT, 0x00000036, "Object parent is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OFFSET, 0x00000037, "The offset passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OPERATION, 0x00000038, "Requested operation is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OWNER, 0x00000039, "Owner not valid") +NV_STATUS_CODE(NV_ERR_INVALID_PARAM_STRUCT, 0x0000003A, "Invalid structure parameter") +NV_STATUS_CODE(NV_ERR_INVALID_PARAMETER, 0x0000003B, "At least one of the parameters passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_PATH, 0x0000003C, "The requested path is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_POINTER, 0x0000003D, "Pointer not valid") +NV_STATUS_CODE(NV_ERR_INVALID_REGISTRY_KEY, 0x0000003E, "Found an invalid registry key") +NV_STATUS_CODE(NV_ERR_INVALID_REQUEST, 0x0000003F, "Generic Error: Invalid request") +NV_STATUS_CODE(NV_ERR_INVALID_STATE, 0x00000040, "Generic Error: Invalid state") +NV_STATUS_CODE(NV_ERR_INVALID_STRING_LENGTH, 0x00000041, "The string length is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_READ, 0x00000042, "The requested read operation is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_WRITE, 0x00000043, "The requested write operation is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_XLATE, 0x00000044, "The requested translate operation is not valid") +NV_STATUS_CODE(NV_ERR_IRQ_NOT_FIRING, 0x00000045, "Requested IRQ is not firing") +NV_STATUS_CODE(NV_ERR_IRQ_EDGE_TRIGGERED, 0x00000046, "IRQ is edge triggered") +NV_STATUS_CODE(NV_ERR_MEMORY_TRAINING_FAILED, 0x00000047, "Failed memory training sequence") +NV_STATUS_CODE(NV_ERR_MISMATCHED_SLAVE, 0x00000048, "Slave mismatch") +NV_STATUS_CODE(NV_ERR_MISMATCHED_TARGET, 0x00000049, "Target mismatch") +NV_STATUS_CODE(NV_ERR_MISSING_TABLE_ENTRY, 0x0000004A, "Requested entry missing not found in the table") +NV_STATUS_CODE(NV_ERR_MODULE_LOAD_FAILED, 0x0000004B, "Failed to load the requested module") +NV_STATUS_CODE(NV_ERR_MORE_DATA_AVAILABLE, 0x0000004C, "There is more data available") +NV_STATUS_CODE(NV_ERR_MORE_PROCESSING_REQUIRED, 0x0000004D, "More processing required for the given call") +NV_STATUS_CODE(NV_ERR_MULTIPLE_MEMORY_TYPES, 0x0000004E, "Multiple memory types found") +NV_STATUS_CODE(NV_ERR_NO_FREE_FIFOS, 0x0000004F, "No more free FIFOs found") +NV_STATUS_CODE(NV_ERR_NO_INTR_PENDING, 0x00000050, "No interrupt pending") +NV_STATUS_CODE(NV_ERR_NO_MEMORY, 0x00000051, "Out of memory") +NV_STATUS_CODE(NV_ERR_NO_SUCH_DOMAIN, 0x00000052, "Requested domain does not exist") +NV_STATUS_CODE(NV_ERR_NO_VALID_PATH, 0x00000053, "Caller did not specify a valid path") +NV_STATUS_CODE(NV_ERR_NOT_COMPATIBLE, 0x00000054, "Generic Error: Incompatible types") +NV_STATUS_CODE(NV_ERR_NOT_READY, 0x00000055, "Generic Error: Not ready") +NV_STATUS_CODE(NV_ERR_NOT_SUPPORTED, 0x00000056, "Call not supported") +NV_STATUS_CODE(NV_ERR_OBJECT_NOT_FOUND, 0x00000057, "Requested object not found") +NV_STATUS_CODE(NV_ERR_OBJECT_TYPE_MISMATCH, 0x00000058, "Specified objects do not match") +NV_STATUS_CODE(NV_ERR_OPERATING_SYSTEM, 0x00000059, "Generic operating system error") +NV_STATUS_CODE(NV_ERR_OTHER_DEVICE_FOUND, 0x0000005A, "Found other device instead of the requested one") +NV_STATUS_CODE(NV_ERR_OUT_OF_RANGE, 0x0000005B, "The specified value is out of bounds") +NV_STATUS_CODE(NV_ERR_OVERLAPPING_UVM_COMMIT, 0x0000005C, "Overlapping unified virtual memory commit") +NV_STATUS_CODE(NV_ERR_PAGE_TABLE_NOT_AVAIL, 0x0000005D, "Requested page table not available") +NV_STATUS_CODE(NV_ERR_PID_NOT_FOUND, 0x0000005E, "Process-Id not found") +NV_STATUS_CODE(NV_ERR_PROTECTION_FAULT, 0x0000005F, "Protection fault") +NV_STATUS_CODE(NV_ERR_RC_ERROR, 0x00000060, "Generic RC error") +NV_STATUS_CODE(NV_ERR_REJECTED_VBIOS, 0x00000061, "Given Video BIOS rejected/invalid") +NV_STATUS_CODE(NV_ERR_RESET_REQUIRED, 0x00000062, "Reset required") +NV_STATUS_CODE(NV_ERR_STATE_IN_USE, 0x00000063, "State in use") +NV_STATUS_CODE(NV_ERR_SIGNAL_PENDING, 0x00000064, "Signal pending") +NV_STATUS_CODE(NV_ERR_TIMEOUT, 0x00000065, "Call timed out") +NV_STATUS_CODE(NV_ERR_TIMEOUT_RETRY, 0x00000066, "Call timed out, please retry later") +NV_STATUS_CODE(NV_ERR_TOO_MANY_PRIMARIES, 0x00000067, "Too many primaries") +NV_STATUS_CODE(NV_ERR_UVM_ADDRESS_IN_USE, 0x00000068, "Unified virtual memory requested address already in use") +NV_STATUS_CODE(NV_ERR_MAX_SESSION_LIMIT_REACHED, 0x00000069, "Maximum number of sessions reached") +NV_STATUS_CODE(NV_ERR_LIB_RM_VERSION_MISMATCH, 0x0000006A, "Library version doesn't match driver version") //Contained within the RMAPI library +NV_STATUS_CODE(NV_ERR_PRIV_SEC_VIOLATION, 0x0000006B, "Priv security violation") +NV_STATUS_CODE(NV_ERR_GPU_IN_DEBUG_MODE, 0x0000006C, "GPU currently in debug mode") +NV_STATUS_CODE(NV_ERR_FEATURE_NOT_ENABLED, 0x0000006D, "Requested Feature functionality is not enabled") +NV_STATUS_CODE(NV_ERR_RESOURCE_LOST, 0x0000006E, "Requested resource has been destroyed") +NV_STATUS_CODE(NV_ERR_PMU_NOT_READY, 0x0000006F, "PMU is not ready or has not yet been initialized") +NV_STATUS_CODE(NV_ERR_FLCN_ERROR, 0x00000070, "Generic falcon assert or halt") +NV_STATUS_CODE(NV_ERR_FATAL_ERROR, 0x00000071, "Fatal/unrecoverable error") +NV_STATUS_CODE(NV_ERR_MEMORY_ERROR, 0x00000072, "Generic memory error") +NV_STATUS_CODE(NV_ERR_INVALID_LICENSE, 0x00000073, "License provided is rejected or invalid") +NV_STATUS_CODE(NV_ERR_NVLINK_INIT_ERROR, 0x00000074, "Nvlink Init Error") +NV_STATUS_CODE(NV_ERR_NVLINK_MINION_ERROR, 0x00000075, "Nvlink Minion Error") +NV_STATUS_CODE(NV_ERR_NVLINK_CLOCK_ERROR, 0x00000076, "Nvlink Clock Error") +NV_STATUS_CODE(NV_ERR_NVLINK_TRAINING_ERROR, 0x00000077, "Nvlink Training Error") +NV_STATUS_CODE(NV_ERR_NVLINK_CONFIGURATION_ERROR, 0x00000078, "Nvlink Configuration Error") +NV_STATUS_CODE(NV_ERR_RISCV_ERROR, 0x00000079, "Generic RISC-V assert or halt") +NV_STATUS_CODE(NV_ERR_FABRIC_MANAGER_NOT_PRESENT, 0x0000007A, "Fabric Manager is not loaded") +NV_STATUS_CODE(NV_ERR_ALREADY_SIGNALLED, 0x0000007B, "Semaphore Surface value already >= requested wait value") +NV_STATUS_CODE(NV_ERR_QUEUE_TASK_SLOT_NOT_AVAILABLE, 0x0000007C, "PMU RPC error due to no queue slot available for this event") +NV_STATUS_CODE(NV_ERR_KEY_ROTATION_IN_PROGRESS, 0x0000007D, "Operation not allowed as key rotation is in progress") +NV_STATUS_CODE(NV_ERR_TEST_ONLY_CODE_NOT_ENABLED, 0x0000007E, "Test-only code path not enabled") +NV_STATUS_CODE(NV_ERR_SECURE_BOOT_FAILED, 0x0000007F, "GFW secure boot failed") +NV_STATUS_CODE(NV_ERR_INSUFFICIENT_ZBC_ENTRY, 0x00000080, "No more ZBC entry for the client") +NV_STATUS_CODE(NV_ERR_NVLINK_FABRIC_NOT_READY, 0x00000081, "Nvlink Fabric Status or Fabric Probe is not yet complete, caller needs to retry") +NV_STATUS_CODE(NV_ERR_NVLINK_FABRIC_FAILURE, 0x00000082, "Nvlink Fabric Probe failed") +NV_STATUS_CODE(NV_ERR_GPU_MEMORY_ONLINING_FAILURE, 0x00000083, "GPU Memory Onlining failed") +NV_STATUS_CODE(NV_ERR_REDUCTION_MANAGER_NOT_AVAILABLE, 0x00000084, "Reduction Manager is not available") +NV_STATUS_CODE(NV_ERR_THRESHOLD_CROSSED, 0x00000085, "A fatal threshold has been crossed") +NV_STATUS_CODE(NV_ERR_RESOURCE_RETIREMENT_ERROR, 0x00000086, "An error occurred while trying to retire a resource") +NV_STATUS_CODE(NV_ERR_FABRIC_STATE_OUT_OF_SYNC, 0x00000087, "NVLink fabric state cached by the driver is out of sync") +NV_STATUS_CODE(NV_ERR_BUFFER_FULL, 0x00000088, "Buffer is full") +NV_STATUS_CODE(NV_ERR_BUFFER_EMPTY, 0x00000089, "Buffer is empty") +NV_STATUS_CODE(NV_ERR_MC_FLA_OFFSET_TABLE_FULL, 0x0000008A, "Multicast FLA offset table has no available slots") + +// Warnings: +NV_STATUS_CODE(NV_WARN_HOT_SWITCH, 0x00010001, "WARNING Hot switch") +NV_STATUS_CODE(NV_WARN_INCORRECT_PERFMON_DATA, 0x00010002, "WARNING Incorrect performance monitor data") +NV_STATUS_CODE(NV_WARN_MISMATCHED_SLAVE, 0x00010003, "WARNING Slave mismatch") +NV_STATUS_CODE(NV_WARN_MISMATCHED_TARGET, 0x00010004, "WARNING Target mismatch") +NV_STATUS_CODE(NV_WARN_MORE_PROCESSING_REQUIRED, 0x00010005, "WARNING More processing required for the call") +NV_STATUS_CODE(NV_WARN_NOTHING_TO_DO, 0x00010006, "WARNING Nothing to do") +NV_STATUS_CODE(NV_WARN_NULL_OBJECT, 0x00010007, "WARNING NULL object found") +NV_STATUS_CODE(NV_WARN_OUT_OF_RANGE, 0x00010008, "WARNING value out of range") +NV_STATUS_CODE(NV_WARN_THRESHOLD_CROSSED, 0x00010009, "WARNING Threshold has been crossed") + +#endif /* SDK_NVSTATUSCODES_H */ diff --git a/kernel-open/common/inc/nvtypes.h b/kernel-open/common/inc/nvtypes.h new file mode 100644 index 0000000..2965de5 --- /dev/null +++ b/kernel-open/common/inc/nvtypes.h @@ -0,0 +1,671 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVTYPES_INCLUDED +#define NVTYPES_INCLUDED + +#include "cpuopsys.h" + +#ifndef NVTYPES_USE_STDINT +#define NVTYPES_USE_STDINT 0 +#endif + +#if NVTYPES_USE_STDINT +#ifdef __cplusplus +#include +#include +#else +#include +#include +#endif // __cplusplus +#endif // NVTYPES_USE_STDINT + +#ifndef __cplusplus +// Header includes to make sure wchar_t is defined for C-file compilation +// (C++ is not affected as it is a fundamental type there) +// _MSC_VER is a hack to avoid failures for old setup of UEFI builds which are +// currently set to msvc100 but do not properly set the include paths +#if defined(NV_WINDOWS) && (!defined(_MSC_VER) || (_MSC_VER > 1600)) +#include +#define NV_HAS_WCHAR_T_TYPEDEF 1 +#endif +#endif // __cplusplus + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(MAKE_NV64TYPES_8BYTES_ALIGNED) && defined(__i386__) +// ensure or force 8-bytes alignment of NV 64-bit types +#define OPTIONAL_ALIGN8_ATTR __attribute__((aligned(8))) +#else +// nothing needed +#define OPTIONAL_ALIGN8_ATTR +#endif // MAKE_NV64TYPES_8BYTES_ALIGNED && i386 + + /***************************************************************************\ +|* Typedefs *| + \***************************************************************************/ + +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +//Typedefs for MISRA COMPLIANCE +typedef unsigned long long UInt64; +typedef signed long long Int64; +typedef unsigned int UInt32; +typedef signed int Int32; +typedef unsigned short UInt16; +typedef signed short Int16; +typedef unsigned char UInt8 ; +typedef signed char Int8 ; + +typedef void Void; +typedef float float32_t; +typedef double float64_t; +#endif + + +// Floating point types +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef float32_t NvF32; /* IEEE Single Precision (S1E8M23) */ +typedef float64_t NvF64 OPTIONAL_ALIGN8_ATTR; /* IEEE Double Precision (S1E11M52) */ +#else +typedef float NvF32; /* IEEE Single Precision (S1E8M23) */ +typedef double NvF64 OPTIONAL_ALIGN8_ATTR; /* IEEE Double Precision (S1E11M52) */ +#endif + + +// 8-bit: 'char' is the only 8-bit in the C89 standard and after. +#if NVTYPES_USE_STDINT +typedef uint8_t NvV8; /* "void": enumerated or multiple fields */ +typedef uint8_t NvU8; /* 0 to 255 */ +typedef int8_t NvS8; /* -128 to 127 */ +#else +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt8 NvV8; /* "void": enumerated or multiple fields */ +typedef UInt8 NvU8; /* 0 to 255 */ +typedef Int8 NvS8; /* -128 to 127 */ +#else +typedef unsigned char NvV8; /* "void": enumerated or multiple fields */ +typedef unsigned char NvU8; /* 0 to 255 */ +typedef signed char NvS8; /* -128 to 127 */ +#endif +#endif // NVTYPES_USE_STDINT + + +#if NVTYPES_USE_STDINT +typedef uint16_t NvV16; /* "void": enumerated or multiple fields */ +typedef uint16_t NvU16; /* 0 to 65535 */ +typedef int16_t NvS16; /* -32768 to 32767 */ +#else +// 16-bit: If the compiler tells us what we can use, then use it. +#ifdef __INT16_TYPE__ +typedef unsigned __INT16_TYPE__ NvV16; /* "void": enumerated or multiple fields */ +typedef unsigned __INT16_TYPE__ NvU16; /* 0 to 65535 */ +typedef signed __INT16_TYPE__ NvS16; /* -32768 to 32767 */ + +// The minimal standard for C89 and after +#else // __INT16_TYPE__ +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt16 NvV16; /* "void": enumerated or multiple fields */ +typedef UInt16 NvU16; /* 0 to 65535 */ +typedef Int16 NvS16; /* -32768 to 32767 */ +#else +typedef unsigned short NvV16; /* "void": enumerated or multiple fields */ +typedef unsigned short NvU16; /* 0 to 65535 */ +typedef signed short NvS16; /* -32768 to 32767 */ +#endif +#endif // __INT16_TYPE__ +#endif // NVTYPES_USE_STDINT + +// wchar type (fixed size types consistent across Linux/Windows boundaries) +#if defined(NV_HAS_WCHAR_T_TYPEDEF) + typedef wchar_t NvWchar; +#else + typedef NvV16 NvWchar; +#endif + +// Macro to build an NvU32 from four bytes, listed from msb to lsb +#define NvU32_BUILD(a, b, c, d) \ + ((NvU32)( \ + (((NvU32)(a) & 0xff) << 24) | \ + (((NvU32)(b) & 0xff) << 16) | \ + (((NvU32)(c) & 0xff) << 8) | \ + (((NvU32)(d) & 0xff)))) + +// Macro to build an NvU64 from two DWORDS, listed from msb to lsb +#define NvU64_BUILD(a, b) \ + ((NvU64)( \ + (((NvU64)(a) & ~0U) << 32) | \ + (((NvU64)(b) & ~0U)))) + +#if NVTYPES_USE_STDINT +typedef uint32_t NvV32; /* "void": enumerated or multiple fields */ +typedef uint32_t NvU32; /* 0 to 4294967295 */ +typedef int32_t NvS32; /* -2147483648 to 2147483647 */ +#else +// 32-bit: If the compiler tells us what we can use, then use it. +#ifdef __INT32_TYPE__ +typedef unsigned __INT32_TYPE__ NvV32; /* "void": enumerated or multiple fields */ +typedef unsigned __INT32_TYPE__ NvU32; /* 0 to 4294967295 */ +typedef signed __INT32_TYPE__ NvS32; /* -2147483648 to 2147483647 */ + +// Older compilers +#else // __INT32_TYPE__ + +// For historical reasons, NvU32/NvV32 are defined to different base intrinsic +// types than NvS32 on some platforms. +// Mainly for 64-bit linux, where long is 64 bits and win9x, where int is 16 bit. +#if (defined(NV_UNIX) || defined(vxworks) || defined(NV_WINDOWS_CE) || \ + defined(__arm) || defined(__IAR_SYSTEMS_ICC__) || defined(NV_QNX) || \ + defined(NV_INTEGRITY) || defined(NV_MODS) || \ + defined(__GNUC__) || defined(__clang__) || defined(NV_MACINTOSH_64)) && \ + (!defined(NV_MACINTOSH) || defined(NV_MACINTOSH_64)) +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt32 NvV32; /* "void": enumerated or multiple fields */ +typedef UInt32 NvU32; /* 0 to 4294967295 */ +#else +typedef unsigned int NvV32; /* "void": enumerated or multiple fields */ +typedef unsigned int NvU32; /* 0 to 4294967295 */ +#endif + +// The minimal standard for C89 and after +#else // (defined(NV_UNIX) || defined(vxworks) || ... +typedef unsigned long NvV32; /* "void": enumerated or multiple fields */ +typedef unsigned long NvU32; /* 0 to 4294967295 */ +#endif // (defined(NV_UNIX) || defined(vxworks) || ... + +// Mac OS 32-bit still needs this +#if defined(NV_MACINTOSH) && !defined(NV_MACINTOSH_64) +typedef signed long NvS32; /* -2147483648 to 2147483647 */ +#else +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef Int32 NvS32; /* -2147483648 to 2147483647 */ +#else +typedef signed int NvS32; /* -2147483648 to 2147483647 */ +#endif +#endif // defined(NV_MACINTOSH) && !defined(NV_MACINTOSH_64) +#endif // __INT32_TYPE__ +#endif // NVTYPES_USE_STDINT + + + +#if NVTYPES_USE_STDINT +typedef uint64_t NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef int64_t NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ + +#define NvU64_fmtX PRIX64 +#define NvU64_fmtx PRIx64 +#define NvU64_fmtu PRIu64 +#define NvU64_fmto PRIo64 +#define NvS64_fmtd PRId64 +#define NvS64_fmti PRIi64 +#else +// 64-bit types for compilers that support them, plus some obsolete variants +#if defined(__GNUC__) || defined(__clang__) || defined(__arm) || \ + defined(__IAR_SYSTEMS_ICC__) || defined(__ghs__) || defined(_WIN64) || \ + defined(__SUNPRO_C) || defined(__SUNPRO_CC) || defined (__xlC__) +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt64 NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef Int64 NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ +#else +typedef unsigned long long NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef long long NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ +#endif + +#define NvU64_fmtX "llX" +#define NvU64_fmtx "llx" +#define NvU64_fmtu "llu" +#define NvU64_fmto "llo" +#define NvS64_fmtd "lld" +#define NvS64_fmti "lli" + +// Microsoft since 2003 -- https://msdn.microsoft.com/en-us/library/29dh1w7z.aspx +#else +typedef unsigned __int64 NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef __int64 NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ + +#define NvU64_fmtX "I64X" +#define NvU64_fmtx "I64x" +#define NvU64_fmtu "I64u" +#define NvU64_fmto "I64o" +#define NvS64_fmtd "I64d" +#define NvS64_fmti "I64i" + +#endif +#endif // NVTYPES_USE_STDINT + +#ifdef NV_TYPESAFE_HANDLES +/* + * Can't use opaque pointer as clients might be compiled with mismatched + * pointer sizes. TYPESAFE check will eventually be removed once all clients + * have transistioned safely to NvHandle. + * The plan is to then eventually scale up the handle to be 64-bits. + */ +typedef struct +{ + NvU32 val; +} NvHandle; +#else +/* + * For compatibility with modules that haven't moved typesafe handles. + */ +typedef NvU32 NvHandle; +#endif // NV_TYPESAFE_HANDLES + +/* Boolean type */ +typedef NvU8 NvBool; +#define NV_TRUE ((NvBool)(0 == 0)) +#define NV_FALSE ((NvBool)(0 != 0)) + +/* Tristate type: NV_TRISTATE_FALSE, NV_TRISTATE_TRUE, NV_TRISTATE_INDETERMINATE */ +typedef NvU8 NvTristate; +#define NV_TRISTATE_FALSE ((NvTristate) 0) +#define NV_TRISTATE_TRUE ((NvTristate) 1) +#define NV_TRISTATE_INDETERMINATE ((NvTristate) 2) + +/* Macros to extract the low and high parts of a 64-bit unsigned integer */ +/* Also designed to work if someone happens to pass in a 32-bit integer */ +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +#define NvU64_HI32(n) ((NvU32)((((NvU64)(n)) >> 32) & 0xffffffffU)) +#define NvU64_LO32(n) ((NvU32)(( (NvU64)(n)) & 0xffffffffU)) +#else +#define NvU64_HI32(n) ((NvU32)((((NvU64)(n)) >> 32) & 0xffffffff)) +#define NvU64_LO32(n) ((NvU32)(( (NvU64)(n)) & 0xffffffff)) +#endif +#define NvU40_HI32(n) ((NvU32)((((NvU64)(n)) >> 8) & 0xffffffffU)) +#define NvU40_HI24of32(n) ((NvU32)( (NvU64)(n) & 0xffffff00U)) + +/* Macros to get the MSB and LSB of a 32 bit unsigned number */ +#define NvU32_HI16(n) ((NvU16)((((NvU32)(n)) >> 16) & 0xffffU)) +#define NvU32_LO16(n) ((NvU16)(( (NvU32)(n)) & 0xffffU)) + + /***************************************************************************\ +|* *| +|* 64 bit type definitions for use in interface structures. *| +|* *| + \***************************************************************************/ + +#if defined(NV_64_BITS) + +typedef void* NvP64; /* 64 bit void pointer */ +typedef NvU64 NvUPtr; /* pointer sized unsigned int */ +typedef NvS64 NvSPtr; /* pointer sized signed int */ +typedef NvU64 NvLength; /* length to agree with sizeof */ + +#define NvP64_VALUE(n) (n) +#define NvP64_fmt "%p" + +#define KERNEL_POINTER_FROM_NvP64(p,v) ((p)(v)) +#define NvP64_PLUS_OFFSET(p,o) (NvP64)((NvU64)(p) + (NvU64)(o)) + +#define NvUPtr_fmtX NvU64_fmtX +#define NvUPtr_fmtx NvU64_fmtx +#define NvUPtr_fmtu NvU64_fmtu +#define NvUPtr_fmto NvU64_fmto +#define NvSPtr_fmtd NvS64_fmtd +#define NvSPtr_fmti NvS64_fmti + +#else + +typedef NvU64 NvP64; /* 64 bit void pointer */ +typedef NvU32 NvUPtr; /* pointer sized unsigned int */ +typedef NvS32 NvSPtr; /* pointer sized signed int */ +typedef NvU32 NvLength; /* length to agree with sizeof */ + +#define NvP64_VALUE(n) ((void *)(NvUPtr)(n)) +#define NvP64_fmt "0x%llx" + +#define KERNEL_POINTER_FROM_NvP64(p,v) ((p)(NvUPtr)(v)) +#define NvP64_PLUS_OFFSET(p,o) ((p) + (NvU64)(o)) + +#define NvUPtr_fmtX "X" +#define NvUPtr_fmtx "x" +#define NvUPtr_fmtu "u" +#define NvUPtr_fmto "o" +#define NvSPtr_fmtd "d" +#define NvSPtr_fmti "i" + +#endif + +#define NvP64_NULL (NvP64)0 + +/*! + * Helper macro to pack an @ref NvU64_ALIGN32 structure from a @ref NvU64. + * + * @param[out] pDst Pointer to NvU64_ALIGN32 structure to pack + * @param[in] pSrc Pointer to NvU64 with which to pack + */ +#define NvU64_ALIGN32_PACK(pDst, pSrc) \ +do { \ + (pDst)->lo = NvU64_LO32(*(pSrc)); \ + (pDst)->hi = NvU64_HI32(*(pSrc)); \ +} while (NV_FALSE) + +/*! + * Helper macro to unpack a @ref NvU64_ALIGN32 structure into a @ref NvU64. + * + * @param[out] pDst Pointer to NvU64 in which to unpack + * @param[in] pSrc Pointer to NvU64_ALIGN32 structure from which to unpack + */ +#define NvU64_ALIGN32_UNPACK(pDst, pSrc) \ +do { \ + (*(pDst)) = NvU64_ALIGN32_VAL(pSrc); \ +} while (NV_FALSE) + +/*! + * Helper macro to unpack a @ref NvU64_ALIGN32 structure as a @ref NvU64. + * + * @param[in] pSrc Pointer to NvU64_ALIGN32 structure to unpack + */ +#define NvU64_ALIGN32_VAL(pSrc) \ + ((NvU64) ((NvU64)((pSrc)->lo) | (((NvU64)(pSrc)->hi) << 32U))) + +/*! + * Helper macro to check whether the 32 bit aligned 64 bit number is zero. + * + * @param[in] _pU64 Pointer to NvU64_ALIGN32 structure. + * + * @return + * NV_TRUE _pU64 is zero. + * NV_FALSE otherwise. + */ +#define NvU64_ALIGN32_IS_ZERO(_pU64) \ + (((_pU64)->lo == 0U) && ((_pU64)->hi == 0U)) + +/*! + * Helper macro to sub two 32 aligned 64 bit numbers on 64 bit processor. + * + * @param[in] pSrc1 Pointer to NvU64_ALIGN32 source 1 structure. + * @param[in] pSrc2 Pointer to NvU64_ALIGN32 source 2 structure. + * @param[in/out] pDst Pointer to NvU64_ALIGN32 dest. structure. + */ +#define NvU64_ALIGN32_ADD(pDst, pSrc1, pSrc2) \ +do { \ + NvU64 __dst, __src1, __scr2; \ + \ + NvU64_ALIGN32_UNPACK(&__src1, (pSrc1)); \ + NvU64_ALIGN32_UNPACK(&__scr2, (pSrc2)); \ + __dst = __src1 + __scr2; \ + NvU64_ALIGN32_PACK((pDst), &__dst); \ +} while (NV_FALSE) + +/*! + * Helper macro to sub two 32 aligned 64 bit numbers on 64 bit processor. + * + * @param[in] pSrc1 Pointer to NvU64_ALIGN32 source 1 structure. + * @param[in] pSrc2 Pointer to NvU64_ALIGN32 source 2 structure. + * @param[in/out] pDst Pointer to NvU64_ALIGN32 dest. structure. + */ +#define NvU64_ALIGN32_SUB(pDst, pSrc1, pSrc2) \ +do { \ + NvU64 __dst, __src1, __scr2; \ + \ + NvU64_ALIGN32_UNPACK(&__src1, (pSrc1)); \ + NvU64_ALIGN32_UNPACK(&__scr2, (pSrc2)); \ + __dst = __src1 - __scr2; \ + NvU64_ALIGN32_PACK((pDst), &__dst); \ +} while (NV_FALSE) + +/*! + * Structure for representing 32 bit aligned NvU64 (64-bit unsigned integer) + * structures. This structure must be used because the 32 bit processor and + * 64 bit processor compilers will pack/align NvU64 differently. + * + * One use case is RM being 64 bit proc whereas PMU being 32 bit proc, this + * alignment difference will result in corrupted transactions between the RM + * and PMU. + * + * See the @ref NvU64_ALIGN32_PACK and @ref NvU64_ALIGN32_UNPACK macros for + * packing and unpacking these structures. + * + * @note The intention of this structure is to provide a datatype which will + * packed/aligned consistently and efficiently across all platforms. + * We don't want to use "NV_DECLARE_ALIGNED(NvU64, 8)" because that + * leads to memory waste on our 32-bit uprocessors (e.g. FALCONs) where + * DMEM efficiency is vital. + */ +typedef struct +{ + /*! + * Low 32 bits. + */ + NvU32 lo; + /*! + * High 32 bits. + */ + NvU32 hi; +} NvU64_ALIGN32; + +/* Useful macro to hide required double cast */ +#define NV_PTR_TO_NvP64(n) (NvP64)(NvUPtr)(n) +#define NV_SIGN_EXT_PTR_TO_NvP64(p) ((NvP64)(NvS64)(NvSPtr)(p)) +#define KERNEL_POINTER_TO_NvP64(p) ((NvP64)(uintptr_t)(p)) + + /***************************************************************************\ +|* *| +|* Limits for common types. *| +|* *| + \***************************************************************************/ + +/* Explanation of the current form of these limits: + * + * - Decimal is used, as hex values are by default positive. + * - Casts are not used, as usage in the preprocessor itself (#if) ends poorly. + * - The subtraction of 1 for some MIN values is used to get around the fact + * that the C syntax actually treats -x as NEGATE(x) instead of a distinct + * number. Since 214748648 isn't a valid positive 32-bit signed value, we + * take the largest valid positive signed number, negate it, and subtract 1. + */ +#define NV_S8_MIN (-128) +#define NV_S8_MAX (+127) +#define NV_U8_MIN (0U) +#define NV_U8_MAX (+255U) +#define NV_S16_MIN (-32768) +#define NV_S16_MAX (+32767) +#define NV_U16_MIN (0U) +#define NV_U16_MAX (+65535U) +#define NV_S32_MIN (-2147483647 - 1) +#define NV_S32_MAX (+2147483647) +#define NV_U32_MIN (0U) +#define NV_U32_MAX (+4294967295U) +#define NV_S64_MIN (-9223372036854775807LL - 1LL) +#define NV_S64_MAX (+9223372036854775807LL) +#define NV_U64_MIN (0ULL) +#define NV_U64_MAX (+18446744073709551615ULL) + +/* Aligns fields in structs so they match up between 32 and 64 bit builds */ +#if defined(__GNUC__) || defined(__clang__) || defined(NV_QNX) +#define NV_ALIGN_BYTES(size) __attribute__ ((aligned (size))) +#elif defined(__arm) +#define NV_ALIGN_BYTES(size) __align(ALIGN) +#else +// XXX This is dangerously nonportable! We really shouldn't provide a default +// version of this that doesn't do anything. +#define NV_ALIGN_BYTES(size) +#endif + +// NV_DECLARE_ALIGNED() can be used on all platforms. +// This macro form accounts for the fact that __declspec on Windows is required +// before the variable type, +// and NV_ALIGN_BYTES is required after the variable name. +#if defined(__GNUC__) || defined(__clang__) || defined(NV_QNX) +#define NV_DECLARE_ALIGNED(TYPE_VAR, ALIGN) TYPE_VAR __attribute__ ((aligned (ALIGN))) +#elif defined(_MSC_VER) +#define NV_DECLARE_ALIGNED(TYPE_VAR, ALIGN) __declspec(align(ALIGN)) TYPE_VAR +#elif defined(__arm) +#define NV_DECLARE_ALIGNED(TYPE_VAR, ALIGN) __align(ALIGN) TYPE_VAR +#endif + + /***************************************************************************\ +|* Function Declaration Types *| + \***************************************************************************/ + +// stretching the meaning of "nvtypes", but this seems to least offensive +// place to re-locate these from nvos.h which cannot be included by a number +// of builds that need them + +#if defined(__GNUC__) || defined(__clang__) || defined(__INTEL_COMPILER) + #define NV_ATTRIBUTE_UNUSED __attribute__((__unused__)) +#else + #define NV_ATTRIBUTE_UNUSED +#endif + +#if defined(_MSC_VER) + + #if _MSC_VER >= 1310 + #define NV_NOINLINE __declspec(noinline) + #else + #define NV_NOINLINE + #endif + + #define NV_INLINE __inline + + #if _MSC_VER >= 1200 + #define NV_FORCEINLINE __forceinline + #else + #define NV_FORCEINLINE __inline + #endif + + #define NV_APIENTRY __stdcall + #define NV_FASTCALL __fastcall + #define NV_CDECLCALL __cdecl + #define NV_STDCALL __stdcall + + #define NV_FORCERESULTCHECK + + #define NV_FORMAT_PRINTF(_f, _a) + +#else // ! defined(_MSC_VER) + + #if defined(__GNUC__) + #if (__GNUC__ > 3) || \ + ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) && (__GNUC_PATCHLEVEL__ >= 1)) + #define NV_NOINLINE __attribute__((__noinline__)) + #endif + #elif defined(__clang__) + #if __has_attribute(noinline) + #define NV_NOINLINE __attribute__((__noinline__)) + #endif + #elif defined(__arm) && (__ARMCC_VERSION >= 300000) + #define NV_NOINLINE __attribute__((__noinline__)) + #elif (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590)) ||\ + (defined(__SUNPRO_CC) && (__SUNPRO_CC >= 0x590)) + #define NV_NOINLINE __attribute__((__noinline__)) + #elif defined (__INTEL_COMPILER) + #define NV_NOINLINE __attribute__((__noinline__)) + #endif + + #if !defined(NV_NOINLINE) + #define NV_NOINLINE + #endif + + /* GreenHills compiler defines __GNUC__, but doesn't support + * __inline__ keyword. */ + #if defined(__ghs__) + #define NV_INLINE inline + #elif defined(__GNUC__) || defined(__clang__) || defined(__INTEL_COMPILER) + #define NV_INLINE __inline__ + #elif defined (macintosh) || defined(__SUNPRO_C) || defined(__SUNPRO_CC) + #define NV_INLINE inline + #elif defined(__arm) + #define NV_INLINE __inline + #else + #define NV_INLINE + #endif + + /* Don't force inline on DEBUG builds -- it's annoying for debuggers. */ + #if !defined(DEBUG) + /* GreenHills compiler defines __GNUC__, but doesn't support + * __attribute__ or __inline__ keyword. */ + #if defined(__ghs__) + #define NV_FORCEINLINE inline + #elif defined(__GNUC__) + // GCC 3.1 and beyond support the always_inline function attribute. + #if (__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1)) + #define NV_FORCEINLINE __attribute__((__always_inline__)) __inline__ + #else + #define NV_FORCEINLINE __inline__ + #endif + #elif defined(__clang__) + #if __has_attribute(always_inline) + #define NV_FORCEINLINE __attribute__((__always_inline__)) __inline__ + #else + #define NV_FORCEINLINE __inline__ + #endif + #elif defined(__arm) && (__ARMCC_VERSION >= 220000) + // RVDS 2.2 also supports forceinline, but ADS 1.2 does not + #define NV_FORCEINLINE __forceinline + #else /* defined(__GNUC__) */ + #define NV_FORCEINLINE NV_INLINE + #endif + #else + #define NV_FORCEINLINE NV_INLINE + #endif + + #define NV_APIENTRY + #define NV_FASTCALL + #define NV_CDECLCALL + #define NV_STDCALL + + /* + * The 'warn_unused_result' function attribute prompts GCC to issue a + * warning if the result of a function tagged with this attribute + * is ignored by a caller. In combination with '-Werror', it can be + * used to enforce result checking in RM code; at this point, this + * is only done on UNIX. + */ + #if defined(__GNUC__) && defined(NV_UNIX) + #if (__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4)) + #define NV_FORCERESULTCHECK __attribute__((__warn_unused_result__)) + #else + #define NV_FORCERESULTCHECK + #endif + #elif defined(__clang__) + #if __has_attribute(warn_unused_result) + #define NV_FORCERESULTCHECK __attribute__((__warn_unused_result__)) + #else + #define NV_FORCERESULTCHECK + #endif + #else /* defined(__GNUC__) */ + #define NV_FORCERESULTCHECK + #endif + + /* + * Functions decorated with NV_FORMAT_PRINTF(f, a) have a format string at + * parameter number 'f' and variadic arguments start at parameter number 'a'. + * (Note that for C++ methods, there is an implicit 'this' parameter so + * explicit parameters are numbered from 2.) + */ + #if defined(__GNUC__) + #define NV_FORMAT_PRINTF(_f, _a) __attribute__((format(printf, _f, _a))) + #else + #define NV_FORMAT_PRINTF(_f, _a) + #endif + +#endif // defined(_MSC_VER) + +#ifdef __cplusplus +} +#endif + +#endif /* NVTYPES_INCLUDED */ diff --git a/kernel-open/common/inc/os-interface.h b/kernel-open/common/inc/os-interface.h new file mode 100644 index 0000000..f5de274 --- /dev/null +++ b/kernel-open/common/inc/os-interface.h @@ -0,0 +1,279 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* + * Os interface definitions needed by os-interface.c + */ + +#ifndef OS_INTERFACE_H +#define OS_INTERFACE_H + +/******************* Operating System Interface Routines *******************\ +* * +* Operating system wrapper functions used to abstract the OS. * +* * +\***************************************************************************/ + +#include +#include +#include "nv_stdarg.h" +#include +#include +#include +#include + +#include "rs_access.h" + + + +typedef struct +{ + NvU32 os_major_version; + NvU32 os_minor_version; + NvU32 os_build_number; + const char * os_build_version_str; + const char * os_build_date_plus_str; +}os_version_info; + +/* Each OS defines its own version of this opaque type */ +struct os_work_queue; + +/* Each OS defines its own version of this opaque type */ +typedef struct os_wait_queue os_wait_queue; + +/* + * --------------------------------------------------------------------------- + * + * Function prototypes for OS interface. + * + * --------------------------------------------------------------------------- + */ + +NV_STATUS NV_API_CALL os_alloc_mem (void **, NvU64); +void NV_API_CALL os_free_mem (void *); +NV_STATUS NV_API_CALL os_get_system_time (NvU32 *, NvU32 *); +NvU64 NV_API_CALL os_get_monotonic_time_ns (void); +NvU64 NV_API_CALL os_get_monotonic_time_ns_hr (void); +NvU64 NV_API_CALL os_get_monotonic_tick_resolution_ns (void); +NV_STATUS NV_API_CALL os_delay (NvU32); +NV_STATUS NV_API_CALL os_delay_us (NvU32); +NvU64 NV_API_CALL os_get_cpu_frequency (void); +NvU32 NV_API_CALL os_get_current_process (void); +void NV_API_CALL os_get_current_process_name (char *, NvU32); +NV_STATUS NV_API_CALL os_get_current_thread (NvU64 *); +char* NV_API_CALL os_string_copy (char *, const char *); +NvU32 NV_API_CALL os_string_length (const char *); +NvU32 NV_API_CALL os_strtoul (const char *, char **, NvU32); +NvS32 NV_API_CALL os_string_compare (const char *, const char *); +NvS32 NV_API_CALL os_snprintf (char *, NvU32, const char *, ...); +NvS32 NV_API_CALL os_vsnprintf (char *, NvU32, const char *, va_list); +void NV_API_CALL os_log_error (const char *, va_list); +void* NV_API_CALL os_mem_copy (void *, const void *, NvU32); +NV_STATUS NV_API_CALL os_memcpy_from_user (void *, const void *, NvU32); +NV_STATUS NV_API_CALL os_memcpy_to_user (void *, const void *, NvU32); +void* NV_API_CALL os_mem_set (void *, NvU8, NvU32); +NvS32 NV_API_CALL os_mem_cmp (const NvU8 *, const NvU8 *, NvU32); +void* NV_API_CALL os_pci_init_handle (NvU32, NvU8, NvU8, NvU8, NvU16 *, NvU16 *); +NV_STATUS NV_API_CALL os_pci_read_byte (void *, NvU32, NvU8 *); +NV_STATUS NV_API_CALL os_pci_read_word (void *, NvU32, NvU16 *); +NV_STATUS NV_API_CALL os_pci_read_dword (void *, NvU32, NvU32 *); +NV_STATUS NV_API_CALL os_pci_write_byte (void *, NvU32, NvU8); +NV_STATUS NV_API_CALL os_pci_write_word (void *, NvU32, NvU16); +NV_STATUS NV_API_CALL os_pci_write_dword (void *, NvU32, NvU32); +NvBool NV_API_CALL os_pci_remove_supported (void); +void NV_API_CALL os_pci_remove (void *); +void* NV_API_CALL os_map_kernel_space (NvU64, NvU64, NvU32); +void NV_API_CALL os_unmap_kernel_space (void *, NvU64); +#if defined(NV_VMWARE) +void* NV_API_CALL os_map_user_space (MemoryArea *, NvU32, NvU32, void **); +void NV_API_CALL os_unmap_user_space (void *, NvU64, void *); +#endif +NV_STATUS NV_API_CALL os_flush_cpu_cache_all (void); +NV_STATUS NV_API_CALL os_flush_user_cache (void); +void NV_API_CALL os_flush_cpu_write_combine_buffer(void); +NvU8 NV_API_CALL os_io_read_byte (NvU32); +NvU16 NV_API_CALL os_io_read_word (NvU32); +NvU32 NV_API_CALL os_io_read_dword (NvU32); +void NV_API_CALL os_io_write_byte (NvU32, NvU8); +void NV_API_CALL os_io_write_word (NvU32, NvU16); +void NV_API_CALL os_io_write_dword (NvU32, NvU32); +NvBool NV_API_CALL os_is_administrator (void); +NvBool NV_API_CALL os_check_access (RsAccessRight accessRight); +void NV_API_CALL os_dbg_init (void); +void NV_API_CALL os_dbg_breakpoint (void); +void NV_API_CALL os_dbg_set_level (NvU32); +NvU32 NV_API_CALL os_get_cpu_count (void); +NvU32 NV_API_CALL os_get_cpu_number (void); +void NV_API_CALL os_disable_console_access (void); +void NV_API_CALL os_enable_console_access (void); +NV_STATUS NV_API_CALL os_registry_init (void); +NvU64 NV_API_CALL os_get_max_user_va (void); +NV_STATUS NV_API_CALL os_schedule (void); +NV_STATUS NV_API_CALL os_alloc_spinlock (void **); +void NV_API_CALL os_free_spinlock (void *); +NvU64 NV_API_CALL os_acquire_spinlock (void *); +void NV_API_CALL os_release_spinlock (void *, NvU64); +NV_STATUS NV_API_CALL os_queue_work_item (struct os_work_queue *, void *); +NV_STATUS NV_API_CALL os_flush_work_queue (struct os_work_queue *, NvBool); +NvBool NV_API_CALL os_is_queue_flush_ongoing (struct os_work_queue *); +NV_STATUS NV_API_CALL os_alloc_mutex (void **); +void NV_API_CALL os_free_mutex (void *); +NV_STATUS NV_API_CALL os_acquire_mutex (void *); +NV_STATUS NV_API_CALL os_cond_acquire_mutex (void *); +void NV_API_CALL os_release_mutex (void *); +void* NV_API_CALL os_alloc_semaphore (NvU32); +void NV_API_CALL os_free_semaphore (void *); +NV_STATUS NV_API_CALL os_acquire_semaphore (void *); +NV_STATUS NV_API_CALL os_cond_acquire_semaphore (void *); +NV_STATUS NV_API_CALL os_release_semaphore (void *); +void* NV_API_CALL os_alloc_rwlock (void); +void NV_API_CALL os_free_rwlock (void *); +NV_STATUS NV_API_CALL os_acquire_rwlock_read (void *); +NV_STATUS NV_API_CALL os_acquire_rwlock_write (void *); +NV_STATUS NV_API_CALL os_cond_acquire_rwlock_read (void *); +NV_STATUS NV_API_CALL os_cond_acquire_rwlock_write (void *); +void NV_API_CALL os_release_rwlock_read (void *); +void NV_API_CALL os_release_rwlock_write (void *); +NvBool NV_API_CALL os_semaphore_may_sleep (void); +NV_STATUS NV_API_CALL os_get_version_info (os_version_info*); +NV_STATUS NV_API_CALL os_get_is_openrm (NvBool *); +NvBool NV_API_CALL os_is_isr (void); +NvBool NV_API_CALL os_pat_supported (void); +void NV_API_CALL os_dump_stack (void); +NvBool NV_API_CALL os_is_efi_enabled (void); +NvBool NV_API_CALL os_is_xen_dom0 (void); +NvBool NV_API_CALL os_is_vgx_hyper (void); +NV_STATUS NV_API_CALL os_inject_vgx_msi (NvU16, NvU64, NvU32); +NvBool NV_API_CALL os_is_grid_supported (void); +NvU32 NV_API_CALL os_get_grid_csp_support (void); +void NV_API_CALL os_bug_check (NvU32, const char *); +NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32); +NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **); +NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *, NvU32); +NV_STATUS NV_API_CALL os_match_mmap_offset (void *, NvU64, NvU64 *); +NV_STATUS NV_API_CALL os_get_euid (NvU32 *); +NV_STATUS NV_API_CALL os_get_smbios_header (NvU64 *pSmbsAddr); +NV_STATUS NV_API_CALL os_get_acpi_rsdp_from_uefi (NvU32 *); +void NV_API_CALL os_add_record_for_crashLog (void *, NvU32); +void NV_API_CALL os_delete_record_for_crashLog (void *); +NV_STATUS NV_API_CALL os_call_vgpu_vfio (void *, NvU32); +NV_STATUS NV_API_CALL os_device_vm_present (void); +NV_STATUS NV_API_CALL os_numa_memblock_size (NvU64 *); +NV_STATUS NV_API_CALL os_alloc_pages_node (NvS32, NvU32, NvU32, NvU64 *); +NV_STATUS NV_API_CALL os_get_page (NvU64 address); +NV_STATUS NV_API_CALL os_put_page (NvU64 address); +NvU32 NV_API_CALL os_get_page_refcount (NvU64 address); +NvU32 NV_API_CALL os_count_tail_pages (NvU64 address); +void NV_API_CALL os_free_pages_phys (NvU64, NvU32); +NV_STATUS NV_API_CALL os_open_temporary_file (void **); +void NV_API_CALL os_close_file (void *); +NV_STATUS NV_API_CALL os_write_file (void *, NvU8 *, NvU64, NvU64); +NV_STATUS NV_API_CALL os_read_file (void *, NvU8 *, NvU64, NvU64); +NV_STATUS NV_API_CALL os_open_readonly_file (const char *, void **); +NV_STATUS NV_API_CALL os_open_and_read_file (const char *, NvU8 *, NvU64); +NvBool NV_API_CALL os_is_nvswitch_present (void); +NV_STATUS NV_API_CALL os_get_random_bytes (NvU8 *, NvU16); +NV_STATUS NV_API_CALL os_alloc_wait_queue (os_wait_queue **); +void NV_API_CALL os_free_wait_queue (os_wait_queue *); +void NV_API_CALL os_wait_uninterruptible (os_wait_queue *); +void NV_API_CALL os_wait_interruptible (os_wait_queue *); +void NV_API_CALL os_wake_up (os_wait_queue *); +nv_cap_t* NV_API_CALL os_nv_cap_init (const char *); +nv_cap_t* NV_API_CALL os_nv_cap_create_dir_entry (nv_cap_t *, const char *, int); +nv_cap_t* NV_API_CALL os_nv_cap_create_file_entry (nv_cap_t *, const char *, int); +void NV_API_CALL os_nv_cap_destroy_entry (nv_cap_t *); +int NV_API_CALL os_nv_cap_validate_and_dup_fd (const nv_cap_t *, int); +void NV_API_CALL os_nv_cap_close_fd (int); +NvS32 NV_API_CALL os_imex_channel_get (NvU64); +NvS32 NV_API_CALL os_imex_channel_count (void); +NV_STATUS NV_API_CALL os_tegra_igpu_perf_boost (void *, NvBool, NvU32); + +NV_STATUS NV_API_CALL os_get_tegra_platform (NvU32 *); +enum os_pci_req_atomics_type { + OS_INTF_PCIE_REQ_ATOMICS_32BIT, + OS_INTF_PCIE_REQ_ATOMICS_64BIT, + OS_INTF_PCIE_REQ_ATOMICS_128BIT +}; +NV_STATUS NV_API_CALL os_enable_pci_req_atomics (void *, enum os_pci_req_atomics_type); +void NV_API_CALL os_pci_trigger_flr(void *handle); +NV_STATUS NV_API_CALL os_get_numa_node_memory_usage (NvS32, NvU64 *, NvU64 *); +NV_STATUS NV_API_CALL os_numa_add_gpu_memory (void *, NvU64, NvU64, NvU32 *); +NV_STATUS NV_API_CALL os_numa_remove_gpu_memory (void *, NvU64, NvU64, NvU32); +NV_STATUS NV_API_CALL os_offline_page_at_address(NvU64 address); +void* NV_API_CALL os_get_pid_info(void); +void NV_API_CALL os_put_pid_info(void *pid_info); +NV_STATUS NV_API_CALL os_find_ns_pid(void *pid_info, NvU32 *ns_pid); +NvBool NV_API_CALL os_is_init_ns(void); +NV_STATUS NV_API_CALL os_iommu_sva_bind(void *arg, void **handle, NvU32 *pasid); +void NV_API_CALL os_iommu_sva_unbind(void *handle); + +extern NvU64 os_page_size; +extern NvU64 os_max_page_size; +extern NvU64 os_page_mask; +extern NvU8 os_page_shift; +extern NvBool os_cc_enabled; +extern NvBool os_cc_sev_snp_enabled; +extern NvBool os_cc_sme_enabled; +extern NvBool os_cc_snp_vtom_enabled; +extern NvBool os_cc_tdx_enabled; +extern NvBool os_dma_buf_enabled; +extern NvBool os_imex_channel_is_supported; + +/* + * --------------------------------------------------------------------------- + * + * Debug macros. + * + * --------------------------------------------------------------------------- + */ + +#define NV_DBG_INFO 0x0 +#define NV_DBG_SETUP 0x1 +#define NV_DBG_USERERRORS 0x2 +#define NV_DBG_WARNINGS 0x3 +#define NV_DBG_ERRORS 0x4 + + +void NV_API_CALL out_string(const char *str); +int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...); + +#define NV_DEV_PRINTF(debuglevel, nv, format, ... ) \ + nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format, NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__) + +#define NV_DEV_PRINTF_STATUS(debuglevel, nv, status, format, ... ) \ + nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format " (0x%x)\n", NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__, status) + +/* + * Fields for os_lock_user_pages flags parameter + */ +#define NV_LOCK_USER_PAGES_FLAGS_WRITE 0:0 +#define NV_LOCK_USER_PAGES_FLAGS_WRITE_NO 0x00000000 +#define NV_LOCK_USER_PAGES_FLAGS_WRITE_YES 0x00000001 + +// NV OS Tegra platform type defines +#define NV_OS_TEGRA_PLATFORM_SIM 0 +#define NV_OS_TEGRA_PLATFORM_FPGA 1 +#define NV_OS_TEGRA_PLATFORM_SILICON 2 + +#endif /* OS_INTERFACE_H */ diff --git a/kernel-open/common/inc/os/nv_memory_area.h b/kernel-open/common/inc/os/nv_memory_area.h new file mode 100644 index 0000000..55d7fb0 --- /dev/null +++ b/kernel-open/common/inc/os/nv_memory_area.h @@ -0,0 +1,104 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NV_MEMORY_AREA_H +#define NV_MEMORY_AREA_H + +typedef struct MemoryRange +{ + NvU64 start; + NvU64 size; +} MemoryRange; + +typedef struct MemoryArea +{ + MemoryRange *pRanges; + NvU64 numRanges; +} MemoryArea; + +static inline NvU64 memareaSize(MemoryArea memArea) +{ + NvU64 size = 0; + NvU64 idx = 0; + for (idx = 0; idx < memArea.numRanges; idx++) + { + size += memArea.pRanges[idx].size; + } + return size; +} + +static inline MemoryRange +mrangeMake +( + NvU64 start, + NvU64 size +) +{ + MemoryRange range; + range.start = start; + range.size = size; + return range; +} + +static inline NvU64 +mrangeLimit +( + MemoryRange a +) +{ + return a.start + a.size; +} + +static inline NvBool +mrangeIntersects +( + MemoryRange a, + MemoryRange b +) +{ + return ((a.start >= b.start) && (a.start < mrangeLimit(b))) || + ((b.start >= a.start) && (b.start < mrangeLimit(a))); +} + +static inline NvBool +mrangeContains +( + MemoryRange outer, + MemoryRange inner +) +{ + return (inner.start >= outer.start) && (mrangeLimit(inner) <= mrangeLimit(outer)); +} + +static inline MemoryRange +mrangeOffset +( + MemoryRange range, + NvU64 amt +) +{ + range.start += amt; + return range; +} + +#endif /* NV_MEMORY_AREA_H */ diff --git a/kernel-open/common/inc/os/nv_memory_type.h b/kernel-open/common/inc/os/nv_memory_type.h new file mode 100644 index 0000000..34255c7 --- /dev/null +++ b/kernel-open/common/inc/os/nv_memory_type.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NV_MEMORY_TYPE_H +#define NV_MEMORY_TYPE_H + +#define NV_MEMORY_NONCONTIGUOUS 0 +#define NV_MEMORY_CONTIGUOUS 1 + +#define NV_MEMORY_CACHED 0 +#define NV_MEMORY_UNCACHED 1 +#define NV_MEMORY_WRITECOMBINED 2 +#define NV_MEMORY_WRITEBACK 5 +#define NV_MEMORY_DEFAULT 6 +#define NV_MEMORY_UNCACHED_WEAK 7 + +#define NV_PROTECT_READABLE 1 +#define NV_PROTECT_WRITEABLE 2 +#define NV_PROTECT_READ_WRITE (NV_PROTECT_READABLE | NV_PROTECT_WRITEABLE) + +#endif /* NV_MEMORY_TYPE_H */ diff --git a/kernel-open/common/inc/os_dsi_panel_props.h b/kernel-open/common/inc/os_dsi_panel_props.h new file mode 100644 index 0000000..89c5e9e --- /dev/null +++ b/kernel-open/common/inc/os_dsi_panel_props.h @@ -0,0 +1,387 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _OS_DSI_PANEL_PARAMS_H_ +#define _OS_DSI_PANEL_PARAMS_H_ + +#define DSI_GENERIC_LONG_WRITE 0x29 +#define DSI_DCS_LONG_WRITE 0x39 +#define DSI_GENERIC_SHORT_WRITE_1_PARAMS 0x13 +#define DSI_GENERIC_SHORT_WRITE_2_PARAMS 0x23 +#define DSI_DCS_WRITE_0_PARAM 0x05 +#define DSI_DCS_WRITE_1_PARAM 0x15 +#define DSI_DCS_READ_PARAM 0x06 +#define DSI_DCS_COMPRESSION_MODE 0x07 +#define DSI_DCS_PPS_LONG_WRITE 0x0A + +#define DSI_DCS_SET_ADDR_MODE 0x36 +#define DSI_DCS_EXIT_SLEEP_MODE 0x11 +#define DSI_DCS_ENTER_SLEEP_MODE 0x10 +#define DSI_DCS_SET_DISPLAY_ON 0x29 +#define DSI_DCS_SET_DISPLAY_OFF 0x28 +#define DSI_DCS_SET_TEARING_EFFECT_OFF 0x34 +#define DSI_DCS_SET_TEARING_EFFECT_ON 0x35 +#define DSI_DCS_NO_OP 0x0 +#define DSI_NULL_PKT_NO_DATA 0x9 +#define DSI_BLANKING_PKT_NO_DATA 0x19 +#define DSI_DCS_SET_COMPRESSION_METHOD 0xC0 + +/* DCS commands for command mode */ +#define DSI_ENTER_PARTIAL_MODE 0x12 +#define DSI_SET_PIXEL_FORMAT 0x3A +#define DSI_AREA_COLOR_MODE 0x4C +#define DSI_SET_PARTIAL_AREA 0x30 +#define DSI_SET_PAGE_ADDRESS 0x2B +#define DSI_SET_ADDRESS_MODE 0x36 +#define DSI_SET_COLUMN_ADDRESS 0x2A +#define DSI_WRITE_MEMORY_START 0x2C +#define DSI_WRITE_MEMORY_CONTINUE 0x3C + +#define PKT_ID0(id) ((((id) & 0x3f) << 3) | \ + (((DSI_ENABLE) & 0x1) << 9)) +#define PKT_LEN0(len) (((len) & 0x7) << 0) +#define PKT_ID1(id) ((((id) & 0x3f) << 13) | \ + (((DSI_ENABLE) & 0x1) << 19)) +#define PKT_LEN1(len) (((len) & 0x7) << 10) +#define PKT_ID2(id) ((((id) & 0x3f) << 23) | \ + (((DSI_ENABLE) & 0x1) << 29)) +#define PKT_LEN2(len) (((len) & 0x7) << 20) +#define PKT_ID3(id) ((((id) & 0x3f) << 3) | \ + (((DSI_ENABLE) & 0x1) << 9)) +#define PKT_LEN3(len) (((len) & 0x7) << 0) +#define PKT_ID4(id) ((((id) & 0x3f) << 13) | \ + (((DSI_ENABLE) & 0x1) << 19)) +#define PKT_LEN4(len) (((len) & 0x7) << 10) +#define PKT_ID5(id) ((((id) & 0x3f) << 23) | \ + (((DSI_ENABLE) & 0x1) << 29)) +#define PKT_LEN5(len) (((len) & 0x7) << 20) +#define PKT_LP (((DSI_ENABLE) & 0x1) << 30) +#define NUMOF_PKT_SEQ 12 + +/* DSI pixel data format, enum values should match with dt-bindings in tegra-panel.h */ +typedef enum +{ + DSI_PIXEL_FORMAT_16BIT_P, + DSI_PIXEL_FORMAT_18BIT_P, + DSI_PIXEL_FORMAT_18BIT_NP, + DSI_PIXEL_FORMAT_24BIT_P, + DSI_PIXEL_FORMAT_8BIT_DSC, + DSI_PIXEL_FORMAT_12BIT_DSC, + DSI_PIXEL_FORMAT_16BIT_DSC, + DSI_PIXEL_FORMAT_10BIT_DSC, + DSI_PIXEL_FORMAT_30BIT_P, + DSI_PIXEL_FORMAT_36BIT_P, +} DSIPIXELFORMAT; + +/* DSI virtual channel number */ +typedef enum +{ + DSI_VIRTUAL_CHANNEL_0, + DSI_VIRTUAL_CHANNEL_1, + DSI_VIRTUAL_CHANNEL_2, + DSI_VIRTUAL_CHANNEL_3, +} DSIVIRTUALCHANNEL; + +/* DSI transmit method for video data */ +typedef enum +{ + DSI_VIDEO_TYPE_VIDEO_MODE, + DSI_VIDEO_TYPE_COMMAND_MODE, +} DSIVIDEODATAMODE; + +/* DSI HS clock mode */ +typedef enum +{ + DSI_VIDEO_CLOCK_CONTINUOUS, + DSI_VIDEO_CLOCK_TX_ONLY, +} DSICLOCKMODE; + +/* DSI burst mode setting in video mode. Each mode is assigned with a + * fixed value. The rationale behind this is to avoid change of these + * values, since the calculation of dsi clock depends on them. */ +typedef enum +{ + DSI_VIDEO_NON_BURST_MODE = 0, + DSI_VIDEO_NON_BURST_MODE_WITH_SYNC_END = 1, + DSI_VIDEO_BURST_MODE_LOWEST_SPEED = 2, + DSI_VIDEO_BURST_MODE_LOW_SPEED = 3, + DSI_VIDEO_BURST_MODE_MEDIUM_SPEED = 4, + DSI_VIDEO_BURST_MODE_FAST_SPEED = 5, + DSI_VIDEO_BURST_MODE_FASTEST_SPEED = 6, +} DSIVIDEOBURSTMODE; + +/* DSI Ganged Mode */ +typedef enum +{ + DSI_GANGED_SYMMETRIC_LEFT_RIGHT = 1, + DSI_GANGED_SYMMETRIC_EVEN_ODD = 2, + DSI_GANGED_SYMMETRIC_LEFT_RIGHT_OVERLAP = 3, +} DSIGANGEDTYPE; + +typedef enum +{ + DSI_LINK0, + DSI_LINK1, +} DSILINKNUM; + +/* DSI Command Packet type */ +typedef enum +{ + DSI_PACKET_CMD, + DSI_DELAY_MS, + DSI_GPIO_SET, + DSI_SEND_FRAME, + DSI_PACKET_VIDEO_VBLANK_CMD, + DSI_DELAY_US, +} DSICMDPKTTYPE; + +/* DSI Phy type */ +typedef enum +{ + DSI_DPHY, + DSI_CPHY, +} DSIPHYTYPE; + +enum { + DSI_GPIO_LCD_RESET, + DSI_GPIO_PANEL_EN, + DSI_GPIO_PANEL_EN_1, + DSI_GPIO_BL_ENABLE, + DSI_GPIO_BL_PWM, + DSI_GPIO_AVDD_AVEE_EN, + DSI_GPIO_VDD_1V8_LCD_EN, + DSI_GPIO_TE, + DSI_GPIO_BRIDGE_EN_0, + DSI_GPIO_BRIDGE_EN_1, + DSI_GPIO_BRIDGE_REFCLK_EN, + DSI_N_GPIO_PANEL, /* add new gpio above this entry */ +}; + +enum +{ + DSI_DISABLE, + DSI_ENABLE, +}; + + +typedef struct +{ + NvU8 cmd_type; + NvU8 data_id; + union + { + NvU16 data_len; + NvU16 delay_ms; + NvU16 delay_us; + NvU32 gpio; + NvU16 frame_cnt; + struct + { + NvU8 data0; + NvU8 data1; + } sp; + } sp_len_dly; + NvU32 *pdata; + NvU8 link_id; + NvBool club_cmd; +} DSI_CMD, *PDSICMD; + +typedef struct +{ + NvU16 t_hsdexit_ns; + NvU16 t_hstrail_ns; + NvU16 t_datzero_ns; + NvU16 t_hsprepare_ns; + NvU16 t_hsprebegin_ns; + NvU16 t_hspost_ns; + + NvU16 t_clktrail_ns; + NvU16 t_clkpost_ns; + NvU16 t_clkzero_ns; + NvU16 t_tlpx_ns; + + NvU16 t_clkprepare_ns; + NvU16 t_clkpre_ns; + NvU16 t_wakeup_ns; + + NvU16 t_taget_ns; + NvU16 t_tasure_ns; + NvU16 t_tago_ns; +} DSI_PHY_TIMING_IN_NS; + +typedef struct +{ + NvU32 hActive; + NvU32 vActive; + NvU32 hFrontPorch; + NvU32 vFrontPorch; + NvU32 hBackPorch; + NvU32 vBackPorch; + NvU32 hSyncWidth; + NvU32 vSyncWidth; + NvU32 hPulsePolarity; + NvU32 vPulsePolarity; + NvU32 pixelClkRate; +} DSITIMINGS, *PDSITIMINGS; + +typedef struct +{ + NvU8 n_data_lanes; /* required */ + NvU8 pixel_format; /* required */ + NvU8 refresh_rate; /* required */ + NvU8 rated_refresh_rate; + NvU8 panel_reset; /* required */ + NvU8 virtual_channel; /* required */ + NvU8 dsi_instance; + NvU16 dsi_panel_rst_gpio; + NvU16 dsi_panel_bl_en_gpio; + NvU16 dsi_panel_bl_pwm_gpio; + NvU16 even_odd_split_width; + NvU8 controller_vs; + + NvBool panel_has_frame_buffer; /* required*/ + + /* Deprecated. Use DSI_SEND_FRAME panel command instead. */ + NvBool panel_send_dc_frames; + + DSI_CMD *dsi_init_cmd; /* required */ + NvU16 n_init_cmd; /* required */ + NvU32 *dsi_init_cmd_array; + NvU32 init_cmd_array_size; + NvBool sendInitCmdsEarly; + + DSI_CMD *dsi_early_suspend_cmd; + NvU16 n_early_suspend_cmd; + NvU32 *dsi_early_suspend_cmd_array; + NvU32 early_suspend_cmd_array_size; + + DSI_CMD *dsi_late_resume_cmd; + NvU16 n_late_resume_cmd; + NvU32 *dsi_late_resume_cmd_array; + NvU32 late_resume_cmd_array_size; + + DSI_CMD *dsi_postvideo_cmd; + NvU16 n_postvideo_cmd; + NvU32 *dsi_postvideo_cmd_array; + NvU32 postvideo_cmd_array_size; + + DSI_CMD *dsi_suspend_cmd; /* required */ + NvU16 n_suspend_cmd; /* required */ + NvU32 *dsi_suspend_cmd_array; + NvU32 suspend_cmd_array_size; + + NvU8 video_data_type; /* required */ + NvU8 video_clock_mode; + NvU8 video_burst_mode; + NvU8 ganged_type; + NvU16 ganged_overlap; + NvBool ganged_swap_links; + NvBool ganged_write_to_all_links; + NvU8 split_link_type; + + NvU8 suspend_aggr; + + NvU16 panel_buffer_size_byte; + NvU16 panel_reset_timeout_msec; + + NvBool hs_cmd_mode_supported; + NvBool hs_cmd_mode_on_blank_supported; + NvBool enable_hs_clock_on_lp_cmd_mode; + NvBool no_pkt_seq_eot; /* 1st generation panel may not + * support eot. Don't set it for + * most panels.*/ + const NvU32 *pktSeq; + NvU32 *pktSeq_array; + NvU32 pktSeq_array_size; + NvBool skip_dsi_pkt_header; + NvBool power_saving_suspend; + NvBool suspend_stop_stream_late; + NvBool dsi2lvds_bridge_enable; + NvBool dsi2edp_bridge_enable; + + NvU32 max_panel_freq_khz; + NvU32 lp_cmd_mode_freq_khz; + NvU32 lp_read_cmd_mode_freq_khz; + NvU32 hs_clk_in_lp_cmd_mode_freq_khz; + NvU32 burst_mode_freq_khz; + NvU32 fpga_freq_khz; + + NvU32 te_gpio; + NvBool te_polarity_low; + NvBool dsiEnVRR; + NvBool dsiVrrPanelSupportsTe; + NvBool dsiForceSetTePin; + + int panel_gpio[DSI_N_GPIO_PANEL]; + NvBool panel_gpio_populated; + + NvU32 dpd_dsi_pads; + + DSI_PHY_TIMING_IN_NS phyTimingNs; + + NvU8 *bl_name; + + NvBool lp00_pre_panel_wakeup; + NvBool ulpm_not_supported; + NvBool use_video_host_fifo_for_cmd; + NvBool dsi_csi_loopback; + NvBool set_max_timeout; + NvBool use_legacy_dphy_core; + // Swap P/N pins polarity of all data lanes + NvBool swap_data_lane_polarity; + // Swap P/N pins polarity of clock lane + NvBool swap_clock_lane_polarity; + // Reverse clock polarity for partition A/B. 1st SOT bit goes on negedge of Clock lane + NvBool reverse_clock_polarity; + // DSI Lane Crossbar. Allocating xbar array for max number of lanes + NvBool lane_xbar_exists; + NvU32 lane_xbar_ctrl[8]; + NvU32 refresh_rate_adj; + + NvU8 dsiPhyType; + NvBool en_data_scrambling; + + NvU32 dsipll_vco_rate_hz; + NvU32 dsipll_clkoutpn_rate_hz; + NvU32 dsipll_clkouta_rate_hz; + NvU32 vpll0_rate_hz; + + DSITIMINGS dsiTimings; + + // DSC Parameters + NvBool dsiDscEnable; + NvU32 dsiDscBpp; + NvU32 dsiDscNumSlices; + NvU32 dsiDscSliceWidth; + NvU32 dsiDscSliceHeight; + NvBool dsiDscEnBlockPrediction; + NvBool dsiDscEnDualDsc; + NvU32 dsiDscDecoderMajorVersion; + NvU32 dsiDscDecoderMinorVersion; + NvBool dsiDscUseCustomPPS; + NvU32 dsiDscCustomPPSData[32]; + + // Driver allocates memory for PPS cmd to be sent to Panel + NvBool ppsCmdMemAllocated; +} DSI_PANEL_INFO; + +#endif diff --git a/kernel-open/common/inc/os_gpio.h b/kernel-open/common/inc/os_gpio.h new file mode 100644 index 0000000..b6a96d2 --- /dev/null +++ b/kernel-open/common/inc/os_gpio.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _OS_GPIO_H_ +#define _OS_GPIO_H_ + +typedef enum +{ + NV_OS_GPIO_FUNC_HOTPLUG_A, + NV_OS_GPIO_FUNC_HOTPLUG_B, + NV_OS_GPIO_FUNC_HOTPLUG_C, + NV_OS_GPIO_FUNC_HOTPLUG_D, +} NV_OS_GPIO_FUNC_NAMES; + +#endif diff --git a/kernel-open/common/inc/rm-gpu-ops.h b/kernel-open/common/inc/rm-gpu-ops.h new file mode 100644 index 0000000..7ea3414 --- /dev/null +++ b/kernel-open/common/inc/rm-gpu-ops.h @@ -0,0 +1,119 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _RM_GPU_OPS_H_ +#define _RM_GPU_OPS_H_ + + + +#include +#include +#include "nv_stdarg.h" +#include +#include + +NV_STATUS NV_API_CALL rm_gpu_ops_create_session (nvidia_stack_t *, nvgpuSessionHandle_t *); +NV_STATUS NV_API_CALL rm_gpu_ops_destroy_session (nvidia_stack_t *, nvgpuSessionHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_device_create (nvidia_stack_t *, nvgpuSessionHandle_t, const nvgpuInfo_t *, const NvProcessorUuid *, nvgpuDeviceHandle_t *, NvBool); +NV_STATUS NV_API_CALL rm_gpu_ops_device_destroy (nvidia_stack_t *, nvgpuDeviceHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_address_space_create(nvidia_stack_t *, nvgpuDeviceHandle_t, unsigned long long, unsigned long long, NvBool, nvgpuAddressSpaceHandle_t *, nvgpuAddressSpaceInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_dup_address_space(nvidia_stack_t *, nvgpuDeviceHandle_t, NvHandle, NvHandle, nvgpuAddressSpaceHandle_t *, nvgpuAddressSpaceInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_address_space_destroy(nvidia_stack_t *, nvgpuAddressSpaceHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_memory_alloc_fb(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvLength, NvU64 *, nvgpuAllocInfo_t); + +NV_STATUS NV_API_CALL rm_gpu_ops_pma_alloc_pages(nvidia_stack_t *, void *, NvLength, NvU32 , nvgpuPmaAllocationOptions_t, NvU64 *); +NV_STATUS NV_API_CALL rm_gpu_ops_pma_free_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32, NvU32); +NV_STATUS NV_API_CALL rm_gpu_ops_pma_pin_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32, NvU32); +NV_STATUS NV_API_CALL rm_gpu_ops_get_pma_object(nvidia_stack_t *, nvgpuDeviceHandle_t, void **, const nvgpuPmaStatistics_t *); +NV_STATUS NV_API_CALL rm_gpu_ops_pma_register_callbacks(nvidia_stack_t *sp, void *, nvPmaEvictPagesCallback, nvPmaEvictRangeCallback, void *); +void NV_API_CALL rm_gpu_ops_pma_unregister_callbacks(nvidia_stack_t *sp, void *); + +NV_STATUS NV_API_CALL rm_gpu_ops_memory_alloc_sys(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvLength, NvU64 *, nvgpuAllocInfo_t); + +NV_STATUS NV_API_CALL rm_gpu_ops_get_p2p_caps(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuDeviceHandle_t, nvgpuP2PCapsParams_t); + +NV_STATUS NV_API_CALL rm_gpu_ops_memory_cpu_map(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, NvLength, void **, NvU32); +NV_STATUS NV_API_CALL rm_gpu_ops_memory_cpu_ummap(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, void*); +NV_STATUS NV_API_CALL rm_gpu_ops_tsg_allocate(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, const nvgpuTsgAllocParams_t *, nvgpuTsgHandle_t *); +NV_STATUS NV_API_CALL rm_gpu_ops_tsg_destroy(nvidia_stack_t *, nvgpuTsgHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_channel_allocate(nvidia_stack_t *, const nvgpuTsgHandle_t, const nvgpuChannelAllocParams_t *, nvgpuChannelHandle_t *, nvgpuChannelInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_channel_destroy(nvidia_stack_t *, nvgpuChannelHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_memory_free(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64); +NV_STATUS NV_API_CALL rm_gpu_ops_query_caps(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuCaps_t); +NV_STATUS NV_API_CALL rm_gpu_ops_query_ces_caps(nvidia_stack_t *sp, nvgpuDeviceHandle_t, nvgpuCesCaps_t); +NV_STATUS NV_API_CALL rm_gpu_ops_get_gpu_info(nvidia_stack_t *, const NvProcessorUuid *pUuid, const nvgpuClientInfo_t *, nvgpuInfo_t *); +NV_STATUS NV_API_CALL rm_gpu_ops_service_device_interrupts_rm(nvidia_stack_t *, nvgpuDeviceHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_dup_allocation(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, nvgpuAddressSpaceHandle_t, NvU64, NvU64 *); + +NV_STATUS NV_API_CALL rm_gpu_ops_dup_memory (nvidia_stack_t *, nvgpuDeviceHandle_t, NvHandle, NvHandle, NvHandle *, nvgpuMemoryInfo_t); + +NV_STATUS NV_API_CALL rm_gpu_ops_free_duped_handle(nvidia_stack_t *, nvgpuDeviceHandle_t, NvHandle); +NV_STATUS NV_API_CALL rm_gpu_ops_get_fb_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuFbInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_get_ecc_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuEccInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_own_page_fault_intr(nvidia_stack_t *, nvgpuDeviceHandle_t, NvBool); +NV_STATUS NV_API_CALL rm_gpu_ops_init_fault_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuFaultInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_destroy_fault_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuFaultInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_get_non_replayable_faults(nvidia_stack_t *, nvgpuFaultInfo_t, void *, NvU32 *); +NV_STATUS NV_API_CALL rm_gpu_ops_flush_replayable_fault_buffer(nvidia_stack_t *, nvgpuFaultInfo_t, NvBool); +NV_STATUS NV_API_CALL rm_gpu_ops_toggle_prefetch_faults(nvidia_stack_t *, nvgpuFaultInfo_t, NvBool); +NV_STATUS NV_API_CALL rm_gpu_ops_has_pending_non_replayable_faults(nvidia_stack_t *, nvgpuFaultInfo_t, NvBool *); +NV_STATUS NV_API_CALL rm_gpu_ops_init_access_cntr_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t, NvU32); +NV_STATUS NV_API_CALL rm_gpu_ops_destroy_access_cntr_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_own_access_cntr_intr(nvidia_stack_t *, nvgpuSessionHandle_t, nvgpuAccessCntrInfo_t, NvBool); +NV_STATUS NV_API_CALL rm_gpu_ops_enable_access_cntr(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t, const nvgpuAccessCntrConfig_t *); +NV_STATUS NV_API_CALL rm_gpu_ops_disable_access_cntr(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_set_page_directory (nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, unsigned, NvBool, NvU32, NvU64 *); +NV_STATUS NV_API_CALL rm_gpu_ops_unset_page_directory (nvidia_stack_t *, nvgpuAddressSpaceHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_get_nvlink_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuNvlinkInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_p2p_object_create(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuDeviceHandle_t, NvHandle *); +void NV_API_CALL rm_gpu_ops_p2p_object_destroy(nvidia_stack_t *, nvgpuSessionHandle_t, NvHandle); +NV_STATUS NV_API_CALL rm_gpu_ops_get_external_alloc_ptes(nvidia_stack_t*, nvgpuAddressSpaceHandle_t, NvHandle, NvU64, NvU64, nvgpuExternalMappingInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_get_external_alloc_phys_addrs(nvidia_stack_t*, nvgpuAddressSpaceHandle_t, NvHandle, NvU64, NvU64, nvgpuExternalPhysAddrInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_retain_channel(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvHandle, NvHandle, void **, nvgpuChannelInstanceInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_bind_channel_resources(nvidia_stack_t *, void *, nvgpuChannelResourceBindParams_t); +void NV_API_CALL rm_gpu_ops_release_channel(nvidia_stack_t *, void *); +void NV_API_CALL rm_gpu_ops_stop_channel(nvidia_stack_t *, void *, NvBool); +NV_STATUS NV_API_CALL rm_gpu_ops_get_channel_resource_ptes(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvP64, NvU64, NvU64, nvgpuExternalMappingInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_report_non_replayable_fault(nvidia_stack_t *, nvgpuDeviceHandle_t, const void *); + +NV_STATUS NV_API_CALL rm_gpu_ops_paging_channel_allocate(nvidia_stack_t *, nvgpuDeviceHandle_t, const nvgpuPagingChannelAllocParams_t *, nvgpuPagingChannelHandle_t *, nvgpuPagingChannelInfo_t); +void NV_API_CALL rm_gpu_ops_paging_channel_destroy(nvidia_stack_t *, nvgpuPagingChannelHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_paging_channels_map(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, nvgpuDeviceHandle_t, NvU64 *); +void NV_API_CALL rm_gpu_ops_paging_channels_unmap(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, nvgpuDeviceHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_paging_channel_push_stream(nvidia_stack_t *, nvgpuPagingChannelHandle_t, char *, NvU32); +void NV_API_CALL rm_gpu_ops_report_fatal_error(nvidia_stack_t *, NV_STATUS error); + +NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_init(nvidia_stack_t *, struct ccslContext_t **, nvgpuChannelHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_clear(nvidia_stack_t *, struct ccslContext_t *); +NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_rotate_key(nvidia_stack_t *, UvmCslContext *[], NvU32); +NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_rotate_iv(nvidia_stack_t *, struct ccslContext_t *, NvU8); +NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 *, NvU8 *); +NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt_with_iv(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8*, NvU8 *, NvU8 *); +NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_decrypt(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 const *, NvU32, NvU8 *, NvU8 const *, NvU32, NvU8 const *); +NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_sign(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 *); +NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_query_message_pool(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU64 *); +NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_increment_iv(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU64, NvU8 *); +NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_log_encryption(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU32); + +#endif diff --git a/kernel-open/common/inc/rs_access.h b/kernel-open/common/inc/rs_access.h new file mode 100644 index 0000000..221fd1e --- /dev/null +++ b/kernel-open/common/inc/rs_access.h @@ -0,0 +1,276 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include +#if defined(_MSC_VER) +#pragma warning(disable:4324) +#endif + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: rs_access.finn +// + + + + +#include "nvtypes.h" +#include "nvmisc.h" + + +/****************************************************************************/ +/* Access right definitions */ +/****************************************************************************/ + +// +// The meaning of each access right is documented in +// resman/docs/rmapi/resource_server/rm_capabilities.adoc +// +// RS_ACCESS_COUNT is the number of access rights that have been defined +// and are in use. All integers in the range [0, RS_ACCESS_COUNT) should +// represent valid access rights. +// +// When adding a new access right, don't forget to update +// 1) The descriptions in the resman/docs/rmapi/resource_server/rm_capabilities.adoc +// 2) RS_ACCESS_COUNT, defined below +// 3) The declaration of g_rsAccessMetadata in rs_access_rights.c +// 4) The list of access rights in drivers/common/chip-config/Chipcontrols.pm +// 5) Any relevant access right callbacks +// + +#define RS_ACCESS_DUP_OBJECT 0U +#define RS_ACCESS_NICE 1U +#define RS_ACCESS_DEBUG 2U +#define RS_ACCESS_PERFMON 3U +#define RS_ACCESS_COUNT 4U + + +/****************************************************************************/ +/* Access right data structures */ +/****************************************************************************/ + +/*! + * @brief A type that can be used to represent any access right. + */ +typedef NvU16 RsAccessRight; + +/*! + * @brief An internal type used to represent one limb in an access right mask. + */ +typedef NvU32 RsAccessLimb; +#define SDK_RS_ACCESS_LIMB_BITS 32 + +/*! + * @brief The number of limbs in the RS_ACCESS_MASK struct. + */ +#define SDK_RS_ACCESS_MAX_LIMBS 1 + +/*! + * @brief The maximum number of possible access rights supported by the + * current data structure definition. + * + * You probably want RS_ACCESS_COUNT instead, which is the number of actual + * access rights defined. + */ +#define SDK_RS_ACCESS_MAX_COUNT (0x20) /* finn: Evaluated from "(SDK_RS_ACCESS_LIMB_BITS * SDK_RS_ACCESS_MAX_LIMBS)" */ + +/** + * @brief A struct representing a set of access rights. + * + * Note that the values of bit positions larger than RS_ACCESS_COUNT is + * undefined, and should not be assumed to be 0 (see RS_ACCESS_MASK_FILL). + */ +typedef struct RS_ACCESS_MASK { + RsAccessLimb limbs[SDK_RS_ACCESS_MAX_LIMBS]; +} RS_ACCESS_MASK; + +/** + * @brief A struct representing auxiliary information about each access right. + */ +typedef struct RS_ACCESS_INFO { + NvU32 flags; +} RS_ACCESS_INFO; + + +/****************************************************************************/ +/* Access right macros */ +/****************************************************************************/ + +#define SDK_RS_ACCESS_LIMB_INDEX(index) ((index) / SDK_RS_ACCESS_LIMB_BITS) +#define SDK_RS_ACCESS_LIMB_POS(index) ((index) % SDK_RS_ACCESS_LIMB_BITS) + +#define SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) \ + ((pAccessMask)->limbs[SDK_RS_ACCESS_LIMB_INDEX(index)]) +#define SDK_RS_ACCESS_OFFSET_MASK(index) \ + NVBIT_TYPE(SDK_RS_ACCESS_LIMB_POS(index), RsAccessLimb) + +/*! + * @brief Checks that accessRight represents a valid access right. + * + * The valid range of access rights is [0, RS_ACCESS_COUNT). + * + * @param[in] accessRight The access right value to check + * + * @return true if accessRight is valid + * @return false otherwise + */ +#define RS_ACCESS_BOUNDS_CHECK(accessRight) \ + (accessRight < RS_ACCESS_COUNT) + +/*! + * @brief Test whether an access right is present in a set + * + * @param[in] pAccessMask The set of access rights to read + * @param[in] index The access right to examine + * + * @return NV_TRUE if the access right specified by index was present in the set, + * and NV_FALSE otherwise + */ +#define RS_ACCESS_MASK_TEST(pAccessMask, index) \ + (RS_ACCESS_BOUNDS_CHECK(index) && \ + (SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) & SDK_RS_ACCESS_OFFSET_MASK(index)) != 0) + +/*! + * @brief Add an access right to a mask + * + * @param[in] pAccessMask The set of access rights to modify + * @param[in] index The access right to set + */ +#define RS_ACCESS_MASK_ADD(pAccessMask, index) \ + do \ + { \ + if (RS_ACCESS_BOUNDS_CHECK(index)) { \ + SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) |= SDK_RS_ACCESS_OFFSET_MASK(index); \ + } \ + } while (NV_FALSE) + +/*! + * @brief Remove an access right from a mask + * + * @param[in] pAccessMask The set of access rights to modify + * @param[in] index The access right to unset + */ +#define RS_ACCESS_MASK_REMOVE(pAccessMask, index) \ + do \ + { \ + if (RS_ACCESS_BOUNDS_CHECK(index)) { \ + SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) &= ~SDK_RS_ACCESS_OFFSET_MASK(index); \ + } \ + } while (NV_FALSE) + +/*! + * @brief Performs an in-place union between two access right masks + * + * @param[in,out] pMaskOut The access rights mask to be updated + * @param[in] pMaskIn The set of access rights to be added to pMaskOut + */ +#define RS_ACCESS_MASK_UNION(pMaskOut, pMaskIn) \ + do \ + { \ + NvLength limb; \ + for (limb = 0; limb < SDK_RS_ACCESS_MAX_LIMBS; limb++) \ + { \ + SDK_RS_ACCESS_LIMB_ELT(pMaskOut, limb) |= SDK_RS_ACCESS_LIMB_ELT(pMaskIn, limb); \ + } \ + } while (NV_FALSE) + +/*! + * @brief Performs an in-place subtract of one mask's rights from another + * + * @param[in,out] pMaskOut The access rights mask to be updated + * @param[in] pMaskIn The set of access rights to be removed from pMaskOut + */ +#define RS_ACCESS_MASK_SUBTRACT(pMaskOut, pMaskIn) \ + do \ + { \ + NvLength limb; \ + for (limb = 0; limb < SDK_RS_ACCESS_MAX_LIMBS; limb++) \ + { \ + SDK_RS_ACCESS_LIMB_ELT(pMaskOut, limb) &= ~SDK_RS_ACCESS_LIMB_ELT(pMaskIn, limb); \ + } \ + } while (NV_FALSE) + +/*! + * @brief Removes all rights from an access rights mask + * + * @param[in,out] pAccessMask The access rights mask to be updated + */ +#define RS_ACCESS_MASK_CLEAR(pAccessMask) \ + do \ + { \ + portMemSet(pAccessMask, 0, sizeof(*pAccessMask)); \ + } while (NV_FALSE) + +/*! + * @brief Adds all rights to an access rights mask + * + * @param[in,out] pAccessMask The access rights mask to be updated + */ +#define RS_ACCESS_MASK_FILL(pAccessMask) \ + do \ + { \ + portMemSet(pAccessMask, 0xff, sizeof(*pAccessMask)); \ + } while (NV_FALSE) + + +/****************************************************************************/ +/* Share definitions */ +/****************************************************************************/ + +// +// The usage of Share Policy and the meaning of each share type is documented in +// resman/docs/rmapi/resource_server/rm_capabilities.adoc +// +#define RS_SHARE_TYPE_NONE (0U) +#define RS_SHARE_TYPE_ALL (1U) +#define RS_SHARE_TYPE_OS_SECURITY_TOKEN (2U) +#define RS_SHARE_TYPE_CLIENT (3U) +#define RS_SHARE_TYPE_PID (4U) +#define RS_SHARE_TYPE_SMC_PARTITION (5U) +#define RS_SHARE_TYPE_GPU (6U) +#define RS_SHARE_TYPE_FM_CLIENT (7U) +// Must be last. Update when a new SHARE_TYPE is added +#define RS_SHARE_TYPE_MAX (8U) + + +// +// Use Revoke to remove an existing policy from the list. +// Allow is based on OR logic, Require is based on AND logic. +// To share a right, at least one Allow (non-Require) must match, and all Require must pass. +// If Compose is specified, policies will be added to the list. Otherwise, they will replace the list. +// +#define RS_SHARE_ACTION_FLAG_REVOKE NVBIT(0) +#define RS_SHARE_ACTION_FLAG_REQUIRE NVBIT(1) +#define RS_SHARE_ACTION_FLAG_COMPOSE NVBIT(2) + +/****************************************************************************/ +/* Share flag data structures */ +/****************************************************************************/ + +typedef struct RS_SHARE_POLICY { + NvU32 target; + RS_ACCESS_MASK accessMask; + NvU16 type; ///< RS_SHARE_TYPE_ + NvU8 action; ///< RS_SHARE_ACTION_ +} RS_SHARE_POLICY; diff --git a/kernel-open/conftest.sh b/kernel-open/conftest.sh new file mode 100755 index 0000000..fdec0d6 --- /dev/null +++ b/kernel-open/conftest.sh @@ -0,0 +1,5513 @@ +#!/bin/sh + +PATH="${PATH}:/bin:/sbin:/usr/bin" + +# make sure we are in the directory containing this script +SCRIPTDIR=`dirname $0` +cd $SCRIPTDIR + +CC="$1" +ARCH=$2 +SOURCES=$3 +HEADERS=$SOURCES/include +OUTPUT=$4 +XEN_PRESENT=1 +PREEMPT_RT_PRESENT=0 + +NVIDIA_OOT_PATH="/usr/src/nvidia/nvidia-oot" +MODULE_SYMVERS_PATHS="$OUTPUT/Module.symvers" + +# Also search in out-of-tree Module.symvers on Tegra +if [ -d ${NVIDIA_OOT_PATH} ]; then + MODULE_SYMVERS_PATHS="${MODULE_SYMVERS_PATHS} ${NVIDIA_OOT_PATH}/Module.symvers" +fi + +# We also use conftest.sh on FreeBSD to check for which symbols are provided +# by the linux kernel programming interface (linuxkpi) when compiling nvidia-drm.ko +OS_FREEBSD=0 +if [ "$OS" = "FreeBSD" ] ; then + OS_FREEBSD=1 +fi + +# VGX_BUILD parameter defined only for VGX builds (vGPU Host driver) +# VGX_KVM_BUILD parameter defined only vGPU builds on KVM hypervisor +# GRID_BUILD parameter defined only for GRID builds (GRID Guest driver) +# GRID_BUILD_CSP parameter defined only for GRID CSP builds (GRID Guest driver for CSPs) +# VGX_DEVICE_VM_BUILD parameter defined only for Device VM VGX build (vGPU Host driver) + +test_xen() { + # + # Determine if the target kernel is a Xen kernel. It used to be + # sufficient to check for CONFIG_XEN, but the introduction of + # modular para-virtualization (CONFIG_PARAVIRT, etc.) and + # Xen guest support, it is no longer possible to determine the + # target environment at build time. Therefore, if both + # CONFIG_XEN and CONFIG_PARAVIRT are present, text_xen() treats + # the kernel as a stand-alone kernel. + # + if ! test_configuration_option CONFIG_XEN || + test_configuration_option CONFIG_PARAVIRT; then + XEN_PRESENT=0 + fi +} + +append_conftest() { + # + # Echo data from stdin: this is a transitional function to make it easier + # to port conftests from drivers with parallel conftest generation to + # older driver versions + # + + while read LINE; do + echo ${LINE} + done +} + +test_header_presence() { + # + # Determine if the given header file (which may or may not be + # present) is provided by the target kernel. + # + # Input: + # $1: relative file path + # + # This routine creates an upper case, underscore version of each of the + # relative file paths, and uses that as the token to either define or + # undefine in a C header file. For example, linux/fence.h becomes + # NV_LINUX_FENCE_H_PRESENT, and that is either defined or undefined, in the + # output (which goes to stdout, just like the rest of this file). + + TEST_CFLAGS="-E -M -I${NVIDIA_OOT_PATH}/include $CFLAGS" + + file="$1" + file_define=NV_`echo $file | tr '/.-' '___' | tr 'a-z' 'A-Z'`_PRESENT + + CODE="#include <$file>" + + if echo "$CODE" | $CC $TEST_CFLAGS - > /dev/null 2>&1; then + echo "#define $file_define" + else + # If preprocessing failed, it could have been because the header + # file under test is not present, or because it is present but + # depends upon the inclusion of other header files. Attempting + # preprocessing again with -MG will ignore a missing header file + # but will still fail if the header file is present. + if echo "$CODE" | $CC $TEST_CFLAGS -MG - > /dev/null 2>&1; then + echo "#undef $file_define" + else + echo "#define $file_define" + fi + fi +} + +build_cflags() { + ISYSTEM=`$CC -print-file-name=include 2> /dev/null` + BASE_CFLAGS="-O2 -D__KERNEL__ \ +-DKBUILD_BASENAME=\"#conftest$$\" -DKBUILD_MODNAME=\"#conftest$$\" \ +-nostdinc -isystem $ISYSTEM \ +-Wno-implicit-function-declaration -Wno-strict-prototypes" + + if [ "$OUTPUT" != "$SOURCES" ]; then + OUTPUT_CFLAGS="-I$OUTPUT/include2 -I$OUTPUT/include" + if [ -f "$OUTPUT/include/generated/autoconf.h" ]; then + AUTOCONF_FILE="$OUTPUT/include/generated/autoconf.h" + else + AUTOCONF_FILE="$OUTPUT/include/linux/autoconf.h" + fi + else + if [ -f "$HEADERS/generated/autoconf.h" ]; then + AUTOCONF_FILE="$HEADERS/generated/autoconf.h" + else + AUTOCONF_FILE="$HEADERS/linux/autoconf.h" + fi + fi + + test_xen + + if [ "$XEN_PRESENT" != "0" ]; then + MACH_CFLAGS="-I$HEADERS/asm/mach-xen" + fi + + KERNEL_ARCH="$ARCH" + + if [ "$ARCH" = "i386" -o "$ARCH" = "x86_64" ]; then + if [ -d "$SOURCES/arch/x86" ]; then + KERNEL_ARCH="x86" + fi + fi + + SOURCE_HEADERS="$HEADERS" + SOURCE_ARCH_HEADERS="$SOURCES/arch/$KERNEL_ARCH/include" + OUTPUT_HEADERS="$OUTPUT/include" + OUTPUT_ARCH_HEADERS="$OUTPUT/arch/$KERNEL_ARCH/include" + + # Look for mach- directories on this arch, and add it to the list of + # includes if that platform is enabled in the configuration file, which + # may have a definition like this: + # #define CONFIG_ARCH_ 1 + for _mach_dir in `ls -1d $SOURCES/arch/$KERNEL_ARCH/mach-* 2>/dev/null`; do + _mach=`echo $_mach_dir | \ + sed -e "s,$SOURCES/arch/$KERNEL_ARCH/mach-,," | \ + tr 'a-z' 'A-Z'` + grep "CONFIG_ARCH_$_mach \+1" $AUTOCONF_FILE > /dev/null 2>&1 + if [ $? -eq 0 ]; then + MACH_CFLAGS="$MACH_CFLAGS -I$_mach_dir/include" + fi + done + + if [ "$ARCH" = "arm" ]; then + MACH_CFLAGS="$MACH_CFLAGS -D__LINUX_ARM_ARCH__=7" + fi + + # Add the mach-default includes (only found on x86/older kernels) + MACH_CFLAGS="$MACH_CFLAGS -I$SOURCE_HEADERS/asm-$KERNEL_ARCH/mach-default" + MACH_CFLAGS="$MACH_CFLAGS -I$SOURCE_ARCH_HEADERS/asm/mach-default" + + CFLAGS="$BASE_CFLAGS $MACH_CFLAGS $OUTPUT_CFLAGS -include $AUTOCONF_FILE" + CFLAGS="$CFLAGS -I$SOURCE_HEADERS" + CFLAGS="$CFLAGS -I$SOURCE_HEADERS/uapi" + CFLAGS="$CFLAGS -I$SOURCE_HEADERS/xen" + CFLAGS="$CFLAGS -I$OUTPUT_HEADERS/generated/uapi" + CFLAGS="$CFLAGS -I$SOURCE_ARCH_HEADERS" + CFLAGS="$CFLAGS -I$SOURCE_ARCH_HEADERS/uapi" + CFLAGS="$CFLAGS -I$OUTPUT_ARCH_HEADERS/generated" + CFLAGS="$CFLAGS -I$OUTPUT_ARCH_HEADERS/generated/uapi" + + if [ -n "$BUILD_PARAMS" ]; then + CFLAGS="$CFLAGS -D$BUILD_PARAMS" + fi + + # Check if gcc supports asm goto and set CC_HAVE_ASM_GOTO if it does. + # Older kernels perform this check and set this flag in Kbuild, and since + # conftest.sh runs outside of Kbuild it ends up building without this flag. + # Starting with commit e9666d10a5677a494260d60d1fa0b73cc7646eb3 this test + # is done within Kconfig, and the preprocessor flag is no longer needed. + + GCC_GOTO_SH="$SOURCES/build/gcc-goto.sh" + + if [ -f "$GCC_GOTO_SH" ]; then + # Newer versions of gcc-goto.sh don't print anything on success, but + # this is okay, since it's no longer necessary to set CC_HAVE_ASM_GOTO + # based on the output of those versions of gcc-goto.sh. + if [ `/bin/sh "$GCC_GOTO_SH" "$CC"` = "y" ]; then + CFLAGS="$CFLAGS -DCC_HAVE_ASM_GOTO" + fi + fi + + # + # If CONFIG_HAVE_FENTRY is enabled and gcc supports -mfentry flags then set + # CC_USING_FENTRY and add -mfentry into cflags. + # + # linux/ftrace.h file indirectly gets included into the conftest source and + # fails to get compiled, because conftest.sh runs outside of Kbuild it ends + # up building without -mfentry and CC_USING_FENTRY flags. + # + grep "CONFIG_HAVE_FENTRY \+1" $AUTOCONF_FILE > /dev/null 2>&1 + if [ $? -eq 0 ]; then + echo "" > conftest$$.c + + $CC -mfentry -c -x c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + + CFLAGS="$CFLAGS -mfentry -DCC_USING_FENTRY" + fi + fi +} + +CONFTEST_PREAMBLE="#include \"conftest/headers.h\" + #include + #if defined(CONFIG_XEN) && \ + defined(CONFIG_XEN_INTERFACE_VERSION) && !defined(__XEN_INTERFACE_VERSION__) + #define __XEN_INTERFACE_VERSION__ CONFIG_XEN_INTERFACE_VERSION + #endif + #if defined(CONFIG_KASAN) && defined(CONFIG_ARM64) + #if defined(CONFIG_KASAN_SW_TAGS) + #define KASAN_SHADOW_SCALE_SHIFT 4 + #else + #define KASAN_SHADOW_SCALE_SHIFT 3 + #endif + #endif" + +# FreeBSD's Linux compatibility does not have autoconf.h defined +# anywhere yet, only add this part on Linux +if [ ${OS_FREEBSD} -ne 1 ] ; then + CONFTEST_PREAMBLE="${CONFTEST_PREAMBLE} + #if defined(NV_GENERATED_AUTOCONF_H_PRESENT) + #include + #else + #include + #endif" +fi + +test_configuration_option() { + # + # Check to see if the given configuration option is defined + # + + get_configuration_option $1 >/dev/null 2>&1 + + return $? + +} + +set_configuration() { + # + # Set a specific configuration option. This function is called to always + # enable a configuration, in order to verify whether the test code for that + # configuration is no longer required and the corresponding + # conditionally-compiled code in the driver can be removed. + # + DEF="$1" + + if [ "$3" = "" ] + then + VAL="" + CAT="$2" + else + VAL="$2" + CAT="$3" + fi + + echo "#define ${DEF} ${VAL}" | append_conftest "${CAT}" +} + +unset_configuration() { + # + # Un-set a specific configuration option. This function is called to + # always disable a configuration, in order to verify whether the test + # code for that configuration is no longer required and the corresponding + # conditionally-compiled code in the driver can be removed. + # + DEF="$1" + CAT="$2" + + echo "#undef ${DEF}" | append_conftest "${CAT}" +} + +compile_check_conftest() { + # + # Compile the current conftest C file and check+output the result + # + CODE="$1" + DEF="$2" + VAL="$3" + CAT="$4" + + echo "$CONFTEST_PREAMBLE + $CODE" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + if [ "${CAT}" = "functions" ]; then + # + # The logic for "functions" compilation tests is inverted compared to + # other compilation steps: if the function is present, the code + # snippet will fail to compile because the function call won't match + # the prototype. If the function is not present, the code snippet + # will produce an object file with the function as an unresolved + # symbol. + # + echo "#undef ${DEF}" | append_conftest "${CAT}" + else + echo "#define ${DEF} ${VAL}" | append_conftest "${CAT}" + fi + return + else + if [ "${CAT}" = "functions" ]; then + echo "#define ${DEF} ${VAL}" | append_conftest "${CAT}" + else + echo "#undef ${DEF}" | append_conftest "${CAT}" + fi + return + fi +} + +check_symbol_exists() { + # Check that the given symbol is available + + SYMBOL="$1" + TAB=' ' + + if [ ${OS_FREEBSD} -ne 1 ] ; then + # Linux: + # ------ + # + # Check Module.symvers to see whether the given symbol is present. + # + if grep -e "${TAB}${SYMBOL}${TAB}.*${TAB}EXPORT_SYMBOL.*\$" \ + ${MODULE_SYMVERS_PATHS} >/dev/null 2>&1; then + return 0 + fi + else + # FreeBSD: + # ------ + # + # Check if any of the linuxkpi or drm kernel module files contain + # references to this symbol. + + # Get the /boot/kernel/ and /boot/modules paths, convert the list to a + # space separated list instead of semicolon separated so we can iterate + # over it. + if [ -z "${CONFTEST_BSD_KMODPATHS}" ] ; then + KMODPATHS=`sysctl -n kern.module_path | sed -e "s/;/ /g"` + else + KMODPATHS="${CONFTEST_BSD_KMODPATHS}" + fi + + for KMOD in linuxkpi.ko linuxkpi_gplv2.ko drm.ko dmabuf.ko ; do + for KMODPATH in $KMODPATHS; do + if [ -e "$KMODPATH/$KMOD" ] ; then + if nm "$KMODPATH/$KMOD" | grep "$SYMBOL" >/dev/null 2>&1 ; then + return 0 + fi + fi + done + done + fi + + return 1 +} + +export_symbol_present_conftest() { + + SYMBOL="$1" + + if check_symbol_exists $SYMBOL; then + echo "#define NV_IS_EXPORT_SYMBOL_PRESENT_$SYMBOL 1" | + append_conftest "symbols" + else + # May be a false negative if Module.symvers is absent or incomplete, + # or if the Module.symvers format changes. + echo "#define NV_IS_EXPORT_SYMBOL_PRESENT_$SYMBOL 0" | + append_conftest "symbols" + fi +} + +export_symbol_gpl_conftest() { + # + # Check Module.symvers to see whether the given symbol is present and its + # export type is GPL-only (including deprecated GPL-only symbols). + # + + SYMBOL="$1" + TAB=' ' + + if grep -e "${TAB}${SYMBOL}${TAB}.*${TAB}EXPORT_\(UNUSED_\)*SYMBOL_GPL\s*\$" \ + ${MODULE_SYMVERS_PATHS} >/dev/null 2>&1; then + echo "#define NV_IS_EXPORT_SYMBOL_GPL_$SYMBOL 1" | + append_conftest "symbols" + else + # May be a false negative if Module.symvers is absent or incomplete, + # or if the Module.symvers format changes. + echo "#define NV_IS_EXPORT_SYMBOL_GPL_$SYMBOL 0" | + append_conftest "symbols" + fi +} + +get_configuration_option() { + # + # Print the value of given configuration option, if defined + # + RET=1 + OPTION=$1 + + OLD_FILE="linux/autoconf.h" + NEW_FILE="generated/autoconf.h" + FILE="" + + if [ -f $HEADERS/$NEW_FILE -o -f $OUTPUT/include/$NEW_FILE ]; then + FILE=$NEW_FILE + elif [ -f $HEADERS/$OLD_FILE -o -f $OUTPUT/include/$OLD_FILE ]; then + FILE=$OLD_FILE + fi + + if [ -n "$FILE" ]; then + # + # We are looking at a configured source tree; verify + # that its configuration includes the given option + # via a compile check, and print the option's value. + # + + if [ -f $HEADERS/$FILE ]; then + INCLUDE_DIRECTORY=$HEADERS + elif [ -f $OUTPUT/include/$FILE ]; then + INCLUDE_DIRECTORY=$OUTPUT/include + else + return 1 + fi + + echo "#include <$FILE> + #ifndef $OPTION + #error $OPTION not defined! + #endif + + $OPTION + " > conftest$$.c + + $CC -E -P -I$INCLUDE_DIRECTORY -o conftest$$ conftest$$.c > /dev/null 2>&1 + + if [ -e conftest$$ ]; then + tr -d '\r\n\t ' < conftest$$ + RET=$? + fi + + rm -f conftest$$.c conftest$$ + else + CONFIG=$OUTPUT/.config + if [ -f $CONFIG ] && grep "^$OPTION=" $CONFIG; then + grep "^$OPTION=" $CONFIG | cut -f 2- -d "=" + RET=$? + fi + fi + + return $RET + +} + +check_for_ib_peer_memory_symbols() { + kernel_dir="$1" + module_symvers="${kernel_dir}/Module.symvers" + + sym_ib_register="ib_register_peer_memory_client" + sym_ib_unregister="ib_unregister_peer_memory_client" + tab=' ' + + # Return 0 for true(no errors), 1 for false + if [ ! -f "${module_symvers}" ]; then + return 1 + fi + + if grep -e "${tab}${sym_ib_register}${tab}.*${tab}EXPORT_SYMBOL.*\$" \ + "${module_symvers}" > /dev/null 2>&1 && + grep -e "${tab}${sym_ib_unregister}${tab}.*${tab}EXPORT_SYMBOL.*\$" \ + "${module_symvers}" > /dev/null 2>&1; then + return 0 + else + return 1 + fi +} + +compile_test() { + case "$1" in + set_memory_uc) + # + # Determine if the set_memory_uc() function is present. + # It does not exist on all architectures. + # + CODE=" + #include + #if defined(NV_ASM_SET_MEMORY_H_PRESENT) + #if defined(NV_ASM_PGTABLE_TYPES_H_PRESENT) + #include + #endif + #include + #include + #else + #include + #endif + void conftest_set_memory_uc(void) { + set_memory_uc(); + }" + + compile_check_conftest "$CODE" "NV_SET_MEMORY_UC_PRESENT" "" "functions" + ;; + + set_memory_array_uc) + # + # Determine if the set_memory_array_uc() function is present. + # It does not exist on all architectures. + # + CODE=" + #include + #if defined(NV_ASM_SET_MEMORY_H_PRESENT) + #if defined(NV_ASM_PGTABLE_TYPES_H_PRESENT) + #include + #endif + #include + #include + #else + #include + #endif + void conftest_set_memory_array_uc(void) { + set_memory_array_uc(); + }" + + compile_check_conftest "$CODE" "NV_SET_MEMORY_ARRAY_UC_PRESENT" "" "functions" + ;; + + sysfs_slab_unlink) + # + # Determine if the sysfs_slab_unlink() function is present. + # + # This test is useful to check for the presence a fix for the deferred + # kmem_cache destroy feature (see nvbug: 2543505). + # + # Added by commit d50d82faa0c9 ("slub: fix failure when we delete and + # create a slab cache") in 4.18 (2018-06-27). + # + CODE=" + #include + void conftest_sysfs_slab_unlink(void) { + sysfs_slab_unlink(); + }" + + compile_check_conftest "$CODE" "NV_SYSFS_SLAB_UNLINK_PRESENT" "" "functions" + ;; + + list_is_first) + # + # Determine if the list_is_first() function is present. + # + # Added by commit 70b44595eafe ("mm, compaction: use free lists + # to quickly locate a migration source") in 5.1 (2019-03-05) + # + CODE=" + #include + void conftest_list_is_first(void) { + list_is_first(); + }" + + compile_check_conftest "$CODE" "NV_LIST_IS_FIRST_PRESENT" "" "functions" + ;; + + set_pages_uc) + # + # Determine if the set_pages_uc() function is present. + # It does not exist on all architectures. + # + CODE=" + #include + #if defined(NV_ASM_SET_MEMORY_H_PRESENT) + #if defined(NV_ASM_PGTABLE_TYPES_H_PRESENT) + #include + #endif + #include + #include + #else + #include + #endif + void conftest_set_pages_uc(void) { + set_pages_uc(); + }" + + compile_check_conftest "$CODE" "NV_SET_PAGES_UC_PRESENT" "" "functions" + ;; + + set_pages_array_uc) + # + # Determine if the set_pages_array_uc() function is present. + # It does not exist on all architectures. + # + # Added by commit 0f3507555f6f ("x86, CPA: Add set_pages_arrayuc + # and set_pages_array_wb") in v2.6.30. + # + CODE=" + #include + #if defined(NV_ASM_SET_MEMORY_H_PRESENT) + #if defined(NV_ASM_PGTABLE_TYPES_H_PRESENT) + #include + #endif + #include + #include + #else + #include + #endif + void conftest_set_pages_array_uc(void) { + set_pages_array_uc(); + }" + + compile_check_conftest "$CODE" "NV_SET_PAGES_ARRAY_UC_PRESENT" "" "functions" + ;; + + flush_cache_all) + # + # Determine if flush_cache_all() function is present + # + # flush_cache_all() was removed by commit id + # 68234df4ea79 ("arm64: kill flush_cache_all()") in 4.2 (2015-04-20) + # for aarch64 + # + CODE=" + #include + int conftest_flush_cache_all(void) { + return flush_cache_all(); + }" + compile_check_conftest "$CODE" "NV_FLUSH_CACHE_ALL_PRESENT" "" "functions" + ;; + + ioremap_cache) + # + # Determine if the ioremap_cache() function is present. + # It does not exist on all architectures. + # + CODE=" + #include + void conftest_ioremap_cache(void) { + ioremap_cache(); + }" + + compile_check_conftest "$CODE" "NV_IOREMAP_CACHE_PRESENT" "" "functions" + ;; + + ioremap_wc) + # + # Determine if the ioremap_wc() function is present. + # It does not exist on all architectures. + # + CODE=" + #include + void conftest_ioremap_wc(void) { + ioremap_wc(); + }" + + compile_check_conftest "$CODE" "NV_IOREMAP_WC_PRESENT" "" "functions" + ;; + + ioremap_driver_hardened) + # + # Determine if the ioremap_driver_hardened() function is present. + # It does not exist on all architectures. + # TODO: Update the commit ID once the API is upstreamed. + # + CODE=" + #include + void conftest_ioremap_driver_hardened(void) { + ioremap_driver_hardened(); + }" + + compile_check_conftest "$CODE" "NV_IOREMAP_DRIVER_HARDENED_PRESENT" "" "functions" + ;; + + ioremap_driver_hardened_wc) + # + # Determine if the ioremap_driver_hardened_wc() function is present. + # It does not exist on all architectures. + # TODO: Update the commit ID once the API is upstreamed. + # + CODE=" + #include + void conftest_ioremap_driver_hardened_wc(void) { + ioremap_driver_hardened_wc(); + }" + + compile_check_conftest "$CODE" "NV_IOREMAP_DRIVER_HARDENED_WC_PRESENT" "" "functions" + ;; + + ioremap_cache_shared) + # + # Determine if the ioremap_cache_shared() function is present. + # It does not exist on all architectures. + # TODO: Update the commit ID once the API is upstreamed. + # + CODE=" + #include + void conftest_ioremap_cache_shared(void) { + ioremap_cache_shared(); + }" + + compile_check_conftest "$CODE" "NV_IOREMAP_CACHE_SHARED_PRESENT" "" "functions" + ;; + dom0_kernel_present) + # Add config parameter if running on DOM0. + if [ -n "$VGX_BUILD" ]; then + echo "#define NV_DOM0_KERNEL_PRESENT" | append_conftest "generic" + else + echo "#undef NV_DOM0_KERNEL_PRESENT" | append_conftest "generic" + fi + return + ;; + + nvidia_vgpu_kvm_build) + # Add config parameter if running on KVM host. + if [ -n "$VGX_KVM_BUILD" ]; then + echo "#define NV_VGPU_KVM_BUILD" | append_conftest "generic" + else + echo "#undef NV_VGPU_KVM_BUILD" | append_conftest "generic" + fi + return + ;; + + device_vm_build) + # Add config parameter if running on Device VM. + if [ -n "$VGX_DEVICE_VM_BUILD" ]; then + echo "#define NV_DEVICE_VM_BUILD" | append_conftest "generic" + else + echo "#undef NV_DEVICE_VM_BUILD" | append_conftest "generic" + fi + return + ;; + + vfio_info_add_capability_has_cap_type_id_arg) + # + # Check if vfio_info_add_capability() has cap_type_id parameter. + # + # Removed by commit dda01f787df9 ("vfio: Simplify capability + # helper") in v4.16 (2017-12-12) + # + CODE=" + #include + int vfio_info_add_capability(struct vfio_info_cap *caps, + int cap_type_id, + void *cap_type) { + return 0; + }" + + compile_check_conftest "$CODE" "NV_VFIO_INFO_ADD_CAPABILITY_HAS_CAP_TYPE_ID_ARGS" "" "types" + ;; + + nvidia_grid_build) + if [ -n "$GRID_BUILD" ]; then + echo "#define NV_GRID_BUILD" | append_conftest "generic" + else + echo "#undef NV_GRID_BUILD" | append_conftest "generic" + fi + return + ;; + + nvidia_grid_csp_build) + if [ -n "$GRID_BUILD_CSP" ]; then + echo "#define NV_GRID_BUILD_CSP $GRID_BUILD_CSP" | append_conftest "generic" + else + echo "#undef NV_GRID_BUILD_CSP" | append_conftest "generic" + fi + return + ;; + + mdev_uuid) + # + # Determine if mdev_uuid() function is present or not + # + # Added by commit 99e3123e3d72 ("vfio-mdev: Make mdev_device + # private and abstract interfaces") in v4.10 + # + # Removed by commit 2a3d15f270e ("vfio/mdev: Add missing typesafety + # around mdev_device") in v5.13 + # + CODE=" + #include + #include + void conftest_mdev_uuid() { + mdev_uuid(); + }" + + compile_check_conftest "$CODE" "NV_MDEV_UUID_PRESENT" "" "functions" + + # + # Determine if mdev_uuid() returns 'const guid_t *'. + # + # mdev_uuid() function prototype updated to return 'const guid_t *' + # by commit 278bca7f318e ("vfio-mdev: Switch to use new generic UUID + # API") in v5.1 (2019-01-10). + # + CODE=" + #include + #include + const guid_t *conftest_mdev_uuid_return_guid_ptr(struct mdev_device *mdev) { + return mdev_uuid(mdev); + }" + + compile_check_conftest "$CODE" "NV_MDEV_UUID_RETURN_GUID_PTR" "" "types" + ;; + + mdev_get_type_group_id) + # + # Determine if mdev_get_type_group_id() function is present or not + # + # Added by commit 15fcc44be0c7a ("vfio/mdev: Add + # mdev/mtype_get_type_group_id()") in v5.13 + # + # Removed by commit da44c340c4f ("vfio/mdev: simplify mdev_type + # handling") in v6.1 + # + CODE=" + #include + #include + void conftest_mdev_get_type_group_id() { + mdev_get_type_group_id(); + }" + + compile_check_conftest "$CODE" "NV_MDEV_GET_TYPE_GROUP_ID_PRESENT" "" "functions" + ;; + + vfio_device_mig_state) + # + # Determine if vfio_device_mig_state enum is present or not + # + # Added by commit 115dcec65f61d ("vfio: Define device + # migration protocol v2") in v5.18 + # + CODE=" + #include + #include + enum vfio_device_mig_state device_state; + " + + compile_check_conftest "$CODE" "NV_VFIO_DEVICE_MIG_STATE_PRESENT" "" "types" + ;; + + vfio_migration_ops) + # + # Determine if vfio_migration_ops struct is present or not + # + # Added by commit 6e97eba8ad874 ("vfio: Split migration ops + # from main device ops") in v6.0 + # + CODE=" + #include + #include + struct vfio_migration_ops mig_ops; + " + + compile_check_conftest "$CODE" "NV_VFIO_MIGRATION_OPS_PRESENT" "" "types" + ;; + + vfio_precopy_info) + # + # Determine if vfio_precopy_info struct is present or not + # + # Added by commit 4db52602a6074 ("vfio: Extend the device migration + # protocol with PRE_COPY" in v6.2 + # + CODE=" + #include + struct vfio_precopy_info precopy_info; + " + + compile_check_conftest "$CODE" "NV_VFIO_PRECOPY_INFO_PRESENT" "" "types" + ;; + + vfio_log_ops) + # + # Determine if vfio_log_ops struct is present or not + # + # Added by commit 80c4b92a2dc48 ("vfio: Introduce the DMA + # logging feature support") in v6.1 + # + CODE=" + #include + #include + struct vfio_log_ops log_ops; + " + + compile_check_conftest "$CODE" "NV_VFIO_LOG_OPS_PRESENT" "" "types" + ;; + + vfio_migration_ops_has_migration_get_data_size) + # + # Determine if vfio_migration_ops struct has .migration_get_data_size field. + # + # Added by commit in 4e016f969529f ("vfio: Add an option to get migration + # data size") in v6.2 kernel. + # + CODE=" + #include + #include + int conftest_mdev_vfio_migration_ops_has_migration_get_data_size(void) { + return offsetof(struct vfio_migration_ops, migration_get_data_size); + }" + + compile_check_conftest "$CODE" "NV_VFIO_MIGRATION_OPS_HAS_MIGRATION_GET_DATA_SIZE" "" "types" + ;; + + mdev_parent_ops) + # + # Determine if the struct mdev_parent_ops type is present. + # + # Added by commit 42930553a7c1 ("vfio-mdev: de-polute the + # namespace, rename parent_device & parent_ops") in v4.10 + # + # Removed by commit 6b42f491e17 ("vfio/mdev: Remove + # mdev_parent_ops") in v5.19 + # + CODE=" + #include + #include + struct mdev_parent_ops conftest_mdev_parent_ops; + " + + compile_check_conftest "$CODE" "NV_MDEV_PARENT_OPS_STRUCT_PRESENT" "" "types" + ;; + + mdev_parent) + # + # Determine if the struct mdev_parent type is present. + # + # Added by commit 89345d5177aa ("vfio/mdev: embedd struct mdev_parent in + # the parent data structure") in v6.1 + # + CODE=" + #include + #include + struct mdev_parent conftest_mdev_parent; + " + + compile_check_conftest "$CODE" "NV_MDEV_PARENT_STRUCT_PRESENT" "" "types" + ;; + + mdev_parent_dev) + # + # Determine if mdev_parent_dev() function is present or not + # + # Added by commit 9372e6feaafb ("vfio-mdev: Make mdev_parent + # private") in v4.10 + # + # Removed by commit 062e720cd20 ("vfio/mdev: remove + # mdev_parent_dev") in v6.1 + # + CODE=" + #include + #include + void conftest_mdev_parent_dev() { + mdev_parent_dev(); + }" + + compile_check_conftest "$CODE" "NV_MDEV_PARENT_DEV_PRESENT" "" "functions" + ;; + + vfio_free_device) + # + # Determine if vfio_free_device() function is present or not + # + # Removed by commit 913447d06f03 ("vfio: Remove vfio_free_device") + # in v6.2 + # + CODE=" + #include + #include + void conftest_vfio_free_device() { + vfio_free_device(); + }" + + compile_check_conftest "$CODE" "NV_VFIO_FREE_DEVICE_PRESENT" "" "functions" + ;; + + mdev_from_dev) + # + # Determine if mdev_from_dev() function is present or not. + # + # Added by commit 99e3123e3d72 ("vfio-mdev: Make mdev_device + # private and abstract interfaces") in v4.10 (2016-12-30) + # + # Removed by commit cbf3bb28aae ("vfio/mdev: remove mdev_from_dev") + # in v6.1 + # + CODE=" + #include + #include + void conftest_mdev_from_dev() { + mdev_from_dev(); + }" + + compile_check_conftest "$CODE" "NV_MDEV_FROM_DEV_PRESENT" "" "functions" + ;; + + mdev_set_iommu_device) + # + # Determine if mdev_set_iommu_device() function is present or not. + # + # Added by commit 8ac13175cbe9 ("vfio/mdev: Add iommu related member + # in mdev_device) in v5.1 (2019-04-12) + # + CODE=" + #include + #include + void conftest_mdev_set_iommu_device() { + mdev_set_iommu_device(); + }" + + compile_check_conftest "$CODE" "NV_MDEV_SET_IOMMU_DEVICE_PRESENT" "" "functions" + ;; + + mdev_parent_ops_has_open_device) + # Determine if 'mdev_parent_ops' structure has a 'open_device' + # field. + # + # Added by commit 2fd585f4ed9d ("vfio: Provide better generic support + # for open/release vfio_device_ops") in 5.15 (2021-08-05) + # + CODE=" + #include + #include + int conftest_mdev_parent_ops_has_open_device(void) { + return offsetof(struct mdev_parent_ops, open_device); + }" + + compile_check_conftest "$CODE" "NV_MDEV_PARENT_OPS_HAS_OPEN_DEVICE" "" "types" + ;; + + mdev_parent_ops_has_device_driver) + # + # Determine if 'mdev_parent_ops' structure has 'device_driver' field. + # + # Added by commit 88a21f265ce5 ("vfio/mdev: Allow the mdev_parent_ops + # to specify the device driver to bind) in v5.14 (2021-06-17) + # + CODE=" + #include + #include + int conftest_mdev_parent_ops_has_device_driver(void) { + return offsetof(struct mdev_parent_ops, device_driver); + }" + + compile_check_conftest "$CODE" "NV_MDEV_PARENT_OPS_HAS_DEVICE_DRIVER" "" "types" + ;; + + mdev_driver_has_supported_type_groups) + # + # Determine if 'mdev_driver' structure has 'supported_type_groups' field. + # + # Added by commit 6b42f491e17c ("vfio/mdev: Remove mdev_parent_ops) + # in v5.19 (2022-04-11) + # + CODE=" + #include + #include + int conftest_mdev_driver_has_supported_type_groups(void) { + return offsetof(struct mdev_driver, supported_type_groups); + }" + + compile_check_conftest "$CODE" "NV_MDEV_DRIVER_HAS_SUPPORTED_TYPE_GROUPS" "" "types" + ;; + + vfio_device_ops_has_dma_unmap) + # + # Determine if 'vfio_device_ops' struct has 'dma_unmap' field. + # + # Added by commit ce4b4657ff18 ("vfio: Replace the DMA unmapping + # notifier with a callback") in v6.0 + # + CODE=" + #include + #include + int conftest_vfio_device_ops_has_dma_unmap(void) { + return offsetof(struct vfio_device_ops, dma_unmap); + }" + + compile_check_conftest "$CODE" "NV_VFIO_DEVICE_OPS_HAS_DMA_UNMAP" "" "types" + ;; + + vfio_device_ops_has_bind_iommufd) + # + # Determine if 'vfio_device_ops' struct has 'bind_iommufd' field. + # + # Added by commit a4d1f91db5021 ("vfio-iommufd: Support iommufd + # for physical VFIO devices") in v6.2 + # + CODE=" + #include + #include + int conftest_vfio_device_ops_has_bind_iommufd(void) { + return offsetof(struct vfio_device_ops, bind_iommufd); + }" + + compile_check_conftest "$CODE" "NV_VFIO_DEVICE_OPS_HAS_BIND_IOMMUFD" "" "types" + ;; + + vfio_device_ops_has_detach_ioas) + # + # Determine if 'vfio_device_ops' struct has 'detach_ioas' field. + # + # Added by commit 9048c7341c4d ("vfio-iommufd: Add detach_ioas + # support for physical VFIO devices") in v6.6 + # + CODE=" + #include + #include + int conftest_vfio_device_ops_has_detach_ioas(void) { + return offsetof(struct vfio_device_ops, detach_ioas); + }" + + compile_check_conftest "$CODE" "NV_VFIO_DEVICE_OPS_HAS_DETACH_IOAS" "" "types" + ;; + + pfn_address_space) + # + # Determine if 'struct pfn_address_space' structure is present or not. + # + CODE=" + #include + void conftest_pfn_address_space() { + struct pfn_address_space pfn_address_space; + }" + + compile_check_conftest "$CODE" "NV_PFN_ADDRESS_SPACE_STRUCT_PRESENT" "" "types" + ;; + + egm_module_helper_api_present) + # + # Determine if egm management api are present or not. + # + CODE=" + #include + #include + void conftest_egm_module_helper_api_present() { + struct pci_dev *pdev; + register_egm_node(pdev); + unregister_egm_node(0); + } + " + compile_check_conftest "$CODE" "NV_EGM_MODULE_HELPER_API_PRESENT" "" "types" + ;; + + egm_bad_pages_handling_support) + # + # Determine if egm_bad_pages_list is present or not. + # + CODE=" + #include + #include + void conftest_egm_bad_pages_handle() { + int ioctl = EGM_BAD_PAGES_LIST; + struct egm_bad_pages_list list; + } + " + + compile_check_conftest "$CODE" "NV_EGM_BAD_PAGES_HANDLING_SUPPORT" "" "types" + ;; + + class_create_has_no_owner_arg) + # + # Determine if the class_create API with the new signature + # is present or not. + # + # Added by commit 1aaba11da9aa ("driver core: class: remove + # module * from class_create()") in v6.4 (2023-03-13) + # + CODE=" + #include + void conftest_class_create() { + struct class *class; + class = class_create(\"test\"); + }" + + compile_check_conftest "$CODE" "NV_CLASS_CREATE_HAS_NO_OWNER_ARG" "" "types" + ;; + + class_devnode_has_const_arg) + # + # Determine if the class.devnode is present with the new signature. + # + # Added by commit ff62b8e6588f ("driver core: make struct + # class.devnode() take a const *") in v6.2 (2022-11-23) + # + CODE=" + #include + static char *conftest_devnode(const struct device *device, umode_t *mode) { + return NULL; + } + + void conftest_class_devnode() { + struct class class; + class.devnode = conftest_devnode; + }" + + compile_check_conftest "$CODE" "NV_CLASS_DEVNODE_HAS_CONST_ARG" "" "types" + ;; + + vfio_device_gfx_plane_info) + # + # determine if the 'struct vfio_device_gfx_plane_info' type is present. + # + # Added by commit e20eaa2382e7 ("vfio: ABI for mdev display + # dma-buf operation") in v4.16 (2017-11-23) + # + CODE=" + #include + struct vfio_device_gfx_plane_info info;" + + compile_check_conftest "$CODE" "NV_VFIO_DEVICE_GFX_PLANE_INFO_PRESENT" "" "types" + ;; + + vfio_uninit_group_dev) + # + # Determine if vfio_uninit_group_dev() function is present or not. + # + # Added by commit ae03c3771b8c (vfio: Introduce a vfio_uninit_group_dev() + # API call) in v5.15 + # + CODE=" + #include + void conftest_vfio_uninit_group_dev() { + vfio_uninit_group_dev(); + }" + + compile_check_conftest "$CODE" "NV_VFIO_UNINIT_GROUP_DEV_PRESENT" "" "functions" + ;; + + vfio_pci_core_available) + # Determine if VFIO_PCI_CORE is available + # + # Added by commit 7fa005caa35e ("vfio/pci: Introduce + # vfio_pci_core.ko") in v5.16 (2021-08-26) + # + + CODE=" + #if defined(NV_LINUX_VFIO_PCI_CORE_H_PRESENT) + #include + #endif + + #if !defined(CONFIG_VFIO_PCI_CORE) && !defined(CONFIG_VFIO_PCI_CORE_MODULE) + #error VFIO_PCI_CORE not enabled + #endif + void conftest_vfio_pci_core_available(void) { + struct vfio_pci_core_device dev; + }" + + compile_check_conftest "$CODE" "NV_VFIO_PCI_CORE_PRESENT" "" "generic" + ;; + + vfio_alloc_device) + # + # Determine if vfio_alloc_device() function is present or not. + # + # Added by commit cb9ff3f3b84c (vfio: Add helpers for unifying vfio_device + # life cycle) in v6.1 + # + CODE=" + #include + void conftest_vfio_alloc_device() { + vfio_alloc_device(); + }" + + compile_check_conftest "$CODE" "NV_VFIO_ALLOC_DEVICE_PRESENT" "" "functions" + ;; + + vfio_register_emulated_iommu_dev) + # + # Determine if vfio_register_emulated_iommu_dev() function is present or not. + # + # Added by commit c68ea0d00ad8 (vfio: simplify iommu group allocation + # for mediated devices) in v5.16 + # + CODE=" + #include + void conftest_vfio_register_emulated_iommu_dev() { + vfio_register_emulated_iommu_dev(); + }" + + compile_check_conftest "$CODE" "NV_VFIO_REGISTER_EMULATED_IOMMU_DEV_PRESENT" "" "functions" + ;; + + bus_type_has_iommu_ops) + # + # Determine if 'bus_type' structure has a 'iommu_ops' field. + # + # This field was removed by commit 17de3f5fdd35 (iommu: Retire bus ops) + # in v6.8 + # + CODE=" + #include + + int conftest_bus_type_has_iommu_ops(void) { + return offsetof(struct bus_type, iommu_ops); + }" + + compile_check_conftest "$CODE" "NV_BUS_TYPE_HAS_IOMMU_OPS" "" "types" + ;; + + eventfd_signal_has_counter_arg) + # + # Determine if eventfd_signal() function has an additional 'counter' argument. + # + # This argument was removed by commit 3652117f8548 (eventfd: simplify + # eventfd_signal()) in v6.8 + # + CODE=" + #include + + void conftest_eventfd_signal_has_counter_arg(void) { + struct eventfd_ctx *ctx; + + eventfd_signal(ctx, 1); + }" + + compile_check_conftest "$CODE" "NV_EVENTFD_SIGNAL_HAS_COUNTER_ARG" "" "types" + ;; + + drm_available) + # Determine if the DRM subsystem is usable + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #include + + #if !defined(CONFIG_DRM) && !defined(CONFIG_DRM_MODULE) && !defined(__FreeBSD__) + #error DRM not enabled + #endif + + void conftest_drm_available(void) { + struct drm_driver drv; + + /* 2013-10-02 1bb72532ac260a2d3982b40bdd4c936d779d0d16 */ + (void)drm_dev_alloc; + + /* 2013-10-02 c22f0ace1926da399d9a16dfaf09174c1b03594c */ + (void)drm_dev_register; + + /* 2013-10-02 c3a49737ef7db0bdd4fcf6cf0b7140a883e32b2a */ + (void)drm_dev_unregister; + }" + + compile_check_conftest "$CODE" "NV_DRM_AVAILABLE" "" "generic" + ;; + + drm_sysfs_connector_property_event) + # + # Determine if drm_sysfs_connector_property_event() is present. + # + # Commit 0cf8d292ba5e ("drm/sysfs: rename drm_sysfs_connector_status_event()") + # renamed drm_sysfs_connector_status_event() to + # drm_sysfs_connector_property_event() in Linux v6.5. + # + CODE=" + #include + void conftest_drm_sysfs_connector_property_event(void) { + drm_sysfs_connector_property_event(); + }" + compile_check_conftest "$CODE" "NV_DRM_SYSFS_CONNECTOR_PROPERTY_EVENT_PRESENT" "" "functions" + ;; + + drm_sysfs_connector_status_event) + # + # Determine if drm_sysfs_connector_status_event() is present. + # + # + CODE=" + #include + void conftest_drm_sysfs_connector_status_event(void) { + drm_sysfs_connector_status_event(); + }" + compile_check_conftest "$CODE" "NV_DRM_SYSFS_CONNECTOR_STATUS_EVENT_PRESENT" "" "functions" + ;; + + pde_data) + # + # Determine if the pde_data() function is present. + # + # PDE_DATA() was replaced with pde_data() by commit 359745d78351 + # ("proc: remove PDE_DATA() completely") in v5.17. + # + CODE=" + #include + void conftest_pde_data(void) { + pde_data(); + }" + + compile_check_conftest "$CODE" "NV_PDE_DATA_LOWER_CASE_PRESENT" "" "functions" + ;; + + xen_ioemu_inject_msi) + # Determine if the xen_ioemu_inject_msi() function is present. + CODE=" + #if defined(NV_XEN_IOEMU_H_PRESENT) + #include + #include + #include + #include + #endif + void conftest_xen_ioemu_inject_msi(void) { + xen_ioemu_inject_msi(); + }" + + compile_check_conftest "$CODE" "NV_XEN_IOEMU_INJECT_MSI" "" "functions" + ;; + + phys_to_dma) + # + # Determine if the phys_to_dma function is present. + # It does not exist on all architectures. + # + CODE=" + #include + void conftest_phys_to_dma(void) { + phys_to_dma(); + }" + + compile_check_conftest "$CODE" "NV_PHYS_TO_DMA_PRESENT" "" "functions" + ;; + + dma_attr_macros) + # + # Determine if the NV_DMA_ATTR_SKIP_CPU_SYNC_PRESENT macro present. + # It does not exist on all architectures. + # + CODE=" + #include + void conftest_dma_attr_macros(void) { + int ret; + ret = DMA_ATTR_SKIP_CPU_SYNC(); + }" + compile_check_conftest "$CODE" "NV_DMA_ATTR_SKIP_CPU_SYNC_PRESENT" "" "functions" + ;; + + dma_map_page_attrs) + # + # Determine if the dma_map_page_attrs function is present. + # It does not exist on all architectures. + # + CODE=" + #include + void conftest_dma_map_page_attrs(void) { + dma_map_page_attrs(); + }" + + compile_check_conftest "$CODE" "NV_DMA_MAP_PAGE_ATTRS_PRESENT" "" "functions" + ;; + + nvhost_dma_fence_unpack) + # + # Determine if the nvhost_dma_fence_unpack function is present. + # This is only present in NVIDIA Tegra downstream kernels. + # + CODE=" + #if defined(NV_LINUX_NVHOST_H_PRESENT) + #include + #endif + void conftest_nvhost_dma_fence_unpack(void) { + nvhost_dma_fence_unpack(); + }" + + compile_check_conftest "$CODE" "NV_NVHOST_DMA_FENCE_UNPACK_PRESENT" "" "functions" + ;; + + vmf_insert_pfn_prot) + # + # Determine if vmf_insert_pfn_prot function is present + # + # Added by commit f5e6d1d5f8f3 ("mm: introduce + # vmf_insert_pfn_prot()") in v4.20. + # + CODE=" + #include + void conftest_vmf_insert_pfn_prot() { + vmf_insert_pfn_prot(); + }" + + compile_check_conftest "$CODE" "NV_VMF_INSERT_PFN_PROT_PRESENT" "" "functions" + ;; + + drm_atomic_available) + # + # Determine if the DRM atomic modesetting subsystem is usable + # + # Added by commit 036ef5733ba4 + # ("drm/atomic: Allow drivers to subclass drm_atomic_state, v3") in + # v4.2 (2018-05-18). + # + # Make conftest more robust by adding test for + # drm_atomic_set_mode_prop_for_crtc(), this function added by + # commit 955f3c334f0f ("drm/atomic: Add MODE_ID property") in v4.2 + # (2015-05-25). If the DRM atomic modesetting subsystem is + # back ported to Linux kernel older than v4.2, then commit + # 955f3c334f0f must be back ported in order to get NVIDIA-DRM KMS + # support. + # Commit 72fdb40c1a4b ("drm: extract drm_atomic_uapi.c") in v4.20 + # (2018-09-05), moved drm_atomic_set_mode_prop_for_crtc() function + # prototype from drm/drm_atomic.h to drm/drm_atomic_uapi.h. + # + echo "$CONFTEST_PREAMBLE + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + #include + #if !defined(CONFIG_DRM) && !defined(CONFIG_DRM_MODULE) && !defined(__FreeBSD__) + #error DRM not enabled + #endif + void conftest_drm_atomic_modeset_available(void) { + size_t a; + + a = offsetof(struct drm_mode_config_funcs, atomic_state_alloc); + }" > conftest$$.c; + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + + echo "$CONFTEST_PREAMBLE + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + #include + #if defined(NV_DRM_DRM_ATOMIC_UAPI_H_PRESENT) + #include + #endif + void conftest_drm_atomic_set_mode_prop_for_crtc(void) { + drm_atomic_set_mode_prop_for_crtc(); + }" > conftest$$.c; + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#undef NV_DRM_ATOMIC_MODESET_AVAILABLE" | append_conftest "generic" + else + echo "#define NV_DRM_ATOMIC_MODESET_AVAILABLE" | append_conftest "generic" + fi + else + echo "#undef NV_DRM_ATOMIC_MODESET_AVAILABLE" | append_conftest "generic" + fi + ;; + + drm_driver_has_legacy_dev_list) + # + # Determine if the 'drm_driver' structure has a 'legacy_dev_list' field. + # + # Renamed from device_list to legacy_device_list by commit + # b3f2333de8e8 ("drm: restrict the device list for shadow + # attached drivers") in v3.14 (2013-12-11) + # + # The commit 57bb1ee60340 ("drm: Compile out legacy chunks from + # struct drm_device") in v5.11 compiles out the legacy chunks like + # drm_driver::legacy_dev_list. + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #include + + int conftest_drm_driver_has_legacy_dev_list(void) { + return offsetof(struct drm_driver, legacy_dev_list); + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_HAS_LEGACY_DEV_LIST" "" "types" + ;; + + jiffies_to_timespec) + # + # Determine if jiffies_to_timespec() is present + # + # Removed by commit 751addac78b6 ("y2038: remove obsolete jiffies + # conversion functions") in v5.6. + # + CODE=" + #include + void conftest_jiffies_to_timespec(void){ + jiffies_to_timespec(); + }" + compile_check_conftest "$CODE" "NV_JIFFIES_TO_TIMESPEC_PRESENT" "" "functions" + ;; + + drm_driver_has_gem_prime_res_obj) + # + # Determine if the drm_driver structure has a 'gem_prime_res_obj' + # callback field. + # + # Added by commit 3aac4502fd3f ("dma-buf: use reservation + # objects") in v3.17 (2014-07-01). + # + # Removed by commit 51c98747113e (drm/prime: Ditch + # gem_prime_res_obj hook) in v5.4. + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + int conftest_drm_driver_has_gem_prime_res_obj(void) { + return offsetof(struct drm_driver, gem_prime_res_obj); + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ" "" "types" + ;; + + drm_atomic_helper_legacy_gamma_set) + # + # Determine if the function drm_atomic_helper_legacy_gamma_set() is + # present. + # + # Added by commit 5488dc16fde7 ("drm: introduce pipe color + # correction properties") in v4.6 (2016-03-08) + # + # Accidentally moved to drm_atomic_state_helper.[ch] by commit + # 9ef8a9dc4b21 ("drm: Extract drm_atomic_state_helper.[ch]") + # and moved back to drm_atomic_helper.[ch] by commit 1d8224e790c7 + # ("drm: Fix up drm_atomic_state_helper.[hc] extraction") in v5.0. + # + # Removed by commit 6ca2ab8086af ("drm: automatic legacy gamma + # support") in v5.12 (2020-12-15) + # + CODE=" + #include + #if defined(NV_DRM_DRM_ATOMIC_STATE_HELPER_H_PRESENT) + #include + #endif + void conftest_drm_atomic_helper_legacy_gamma_set(void) { + drm_atomic_helper_legacy_gamma_set(); + }" + + compile_check_conftest "$CODE" "NV_DRM_ATOMIC_HELPER_LEGACY_GAMMA_SET_PRESENT" "" "functions" + ;; + + drm_plane_create_color_properties) + # + # Determine if the function drm_plane_create_color_properties() is + # present. + # + # Added by commit 80f690e9e3a6 ("drm: Add optional COLOR_ENCODING + # and COLOR_RANGE properties to drm_plane") in v4.17 (2018-02-19). + # + CODE=" + #include + #include + void conftest_drm_plane_create_color_properties(void) { + drm_plane_create_color_properties(); + }" + + compile_check_conftest "$CODE" "NV_DRM_PLANE_CREATE_COLOR_PROPERTIES_PRESENT" "" "functions" + ;; + + drm_format_info_has_is_yuv) + # + # Determine if struct drm_format_info has .is_yuv member. + # + # Added by commit ce2d54619a10 ("drm/fourcc: Add is_yuv field to + # drm_format_info to denote if format is yuv") in v4.19 + # (2018-07-17). + # + CODE=" + #include + int conftest_drm_format_info_has_is_yuv(void) { + return offsetof(struct drm_format_info, is_yuv); + }" + + compile_check_conftest "$CODE" "NV_DRM_FORMAT_INFO_HAS_IS_YUV" "" "types" + ;; + + get_user_pages) + # + # Determine if get_user_pages() + # + # Removed vmas parameter from get_user_pages() by commit 54d020692b34 + # ("mm/gup: remove unused vmas parameter from get_user_pages()") + # in v6.5. + # + CODE="$CONFTEST_PREAMBLE + #include + long get_user_pages(unsigned long start, + unsigned long nr_pages, + unsigned int gup_flags, + struct page **pages, + struct vm_area_struct **vmas) { + return 0; + }" + + compile_check_conftest "$CODE" "NV_GET_USER_PAGES_HAS_VMAS_ARG" "" "types" + + return + ;; + + get_user_pages_remote) + # + # Determine if the function get_user_pages_remote() has tsk/vmas + # parameters. + # + # get_user_pages_remote() removed 'tsk' parameter by + # commit 64019a2e467a ("mm/gup: remove task_struct pointer for + # all gup code") in v5.9. + # + # Removed vmas parameter from get_user_pages_remote() by commit + # ca5e863233e8 ("mm/gup: remove vmas parameter from + # get_user_pages_remote()") in v6.5. + # + + # + # This function sets the NV_GET_USER_PAGES_REMOTE_* macros as per + # the below passing conftest's + # + set_get_user_pages_remote_defines () { + if [ "$1" = "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS" ]; then + echo "#define NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS" | append_conftest "functions" + else + echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS" | append_conftest "functions" + fi + + if [ "$1" = "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK" ]; then + echo "#define NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK" | append_conftest "functions" + else + echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK" | append_conftest "functions" + fi + } + + # + # conftest #1: check if get_user_pages_remote() has tsk and + # vmas arguments + # Return if these arguments are present. Fall through to conftest #2 + # if these args are absent. + # + echo "$CONFTEST_PREAMBLE + #include + long get_user_pages_remote(struct task_struct *tsk, + struct mm_struct *mm, + unsigned long start, + unsigned long nr_pages, + unsigned int gup_flags, + struct page **pages, + struct vm_area_struct **vmas, + int *locked) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + set_get_user_pages_remote_defines "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS" + rm -f conftest$$.o + return + fi + + # + # conftest #2: check if get_user_pages_remote() does not take the + # tsk argument. + # + echo "$CONFTEST_PREAMBLE + #include + long get_user_pages_remote(struct mm_struct *mm, + unsigned long start, + unsigned long nr_pages, + unsigned int gup_flags, + struct page **pages, + struct vm_area_struct **vmas, + int *locked) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + set_get_user_pages_remote_defines "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK" + rm -f conftest$$.o + fi + + # + # conftest #3: check if get_user_pages_remote() does not take + # vmas or tsk arguments. + # + echo "$CONFTEST_PREAMBLE + #include + long get_user_pages_remote(struct mm_struct *mm, + unsigned long start, + unsigned long nr_pages, + unsigned int gup_flags, + struct page **pages, + int *locked) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + set_get_user_pages_remote_defines "" + rm -f conftest$$.o + fi + + ;; + + pin_user_pages) + # + # Determine if the function pin_user_pages() is present. + # Presence of pin_user_pages() also implies the presence of + # unpin-user_page(). + # + # pin_user_pages() was added by commit eddb1c228f79 ("mm/gup: + # introduce pin_user_pages*() and FOLL_PIN") in v5.6. + # + # Removed vmas parameter from pin_user_pages() by commit + # 4c630f307455 ("mm/gup: remove vmas parameter from + # pin_user_pages()") in v6.5. + + set_pin_user_pages_defines () { + if [ "$1" = "" ]; then + echo "#undef NV_PIN_USER_PAGES_PRESENT" | append_conftest "functions" + else + echo "#define NV_PIN_USER_PAGES_PRESENT" | append_conftest "functions" + fi + + if [ "$1" = "NV_PIN_USER_PAGES_HAS_ARGS_VMAS" ]; then + echo "#define NV_PIN_USER_PAGES_HAS_ARGS_VMAS" | append_conftest "functions" + else + echo "#undef NV_PIN_USER_PAGES_HAS_ARGS_VMAS" | append_conftest "functions" + fi + + } + + # conftest #1: check if pin_user_pages() is available + # return if not available. + # Fall through to conftest #2 if it is present + # + echo "$CONFTEST_PREAMBLE + #include + void conftest_pin_user_pages(void) { + pin_user_pages(); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + set_pin_user_pages_defines "" + rm -f conftest$$.o + return + fi + + # conftest #2: Check if pin_user_pages() has vmas argument + echo "$CONFTEST_PREAMBLE + #include + long pin_user_pages(unsigned long start, + unsigned long nr_pages, + unsigned int gup_flags, + struct page **pages, + struct vm_area_struct **vmas) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + set_pin_user_pages_defines "NV_PIN_USER_PAGES_HAS_ARGS_VMAS" + rm -f conftest$$.o + else + set_pin_user_pages_defines "NV_PIN_USER_PAGES_PRESENT" + fi + ;; + + pin_user_pages_remote) + # Determine if the function pin_user_pages_remote() is present + # + # pin_user_pages_remote() was added by commit eddb1c228f7951d399240 + # ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") + # in v5.6 (2020-01-30) + + # pin_user_pages_remote() removed 'tsk' parameter by commit + # 64019a2e467a ("mm/gup: remove task_struct pointer for all gup + # code") in v5.9. + # + # Removed unused vmas parameter from pin_user_pages_remote() by + # commit 0b295316b3a9 ("mm/gup: remove unused vmas parameter from + # pin_user_pages_remote()") in v6.5. + + # + # This function sets the NV_PIN_USER_PAGES_REMOTE_* macros as per + # the below passing conftest's + # + set_pin_user_pages_remote_defines () { + if [ "$1" = "" ]; then + echo "#undef NV_PIN_USER_PAGES_REMOTE_PRESENT" | append_conftest "functions" + else + echo "#define NV_PIN_USER_PAGES_REMOTE_PRESENT" | append_conftest "functions" + fi + + if [ "$1" = "NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS" ]; then + echo "#define NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS" | append_conftest "functions" + else + echo "#undef NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS" | append_conftest "functions" + fi + + if [ "$1" = "NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_VMAS" ]; then + echo "#define NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_VMAS" | append_conftest "functions" + else + echo "#undef NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_VMAS" | append_conftest "functions" + fi + } + + # conftest #1: check if pin_user_pages_remote() is available + # return if not available. + # Fall through to conftest #2 if it is present + # + echo "$CONFTEST_PREAMBLE + #include + void conftest_pin_user_pages_remote(void) { + pin_user_pages_remote(); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + set_pin_user_pages_remote_defines "" + rm -f conftest$$.o + return + fi + + # conftest #2: Check if pin_user_pages_remote() has tsk and + # vmas argument + # Return if these arguments are present else fall through to + # conftest #3 + + echo "$CONFTEST_PREAMBLE + #include + long pin_user_pages_remote(struct task_struct *tsk, + struct mm_struct *mm, + unsigned long start, + unsigned long nr_pages, + unsigned int gup_flags, + struct page **pages, + struct vm_area_struct **vmas, + int *locked) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + set_pin_user_pages_remote_defines "NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS" + rm -f conftest$$.o + return + fi + + # conftest #3: Check if pin_user_pages_remote() has vmas argument + echo "$CONFTEST_PREAMBLE + #include + long pin_user_pages_remote(struct mm_struct *mm, + unsigned long start, + unsigned long nr_pages, + unsigned int gup_flags, + struct page **pages, + struct vm_area_struct **vmas, + int *locked) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + set_pin_user_pages_remote_defines "NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_VMAS" + rm -f conftest$$.o + else + set_pin_user_pages_remote_defines "NV_PIN_USER_PAGES_REMOTE_PRESENT" + fi + + ;; + + foll_longterm_present) + # + # Determine if FOLL_LONGTERM enum is present or not + # + # Added by commit 932f4a630a69 ("mm/gup: replace + # get_user_pages_longterm() with FOLL_LONGTERM") in + # v5.2 + # + CODE=" + #include + int foll_longterm = FOLL_LONGTERM; + " + + compile_check_conftest "$CODE" "NV_FOLL_LONGTERM_PRESENT" "" "types" + ;; + + has_enum_pidtype_tgid) + # Determine if PIDTYPE_TGID is present in the kernel as an enum + # + # Added by commit 6883f81aac6f ("pid: Implement PIDTYPE_TGID") + # in v4.19 + # + CODE=" + #include + + enum pid_type type = PIDTYPE_TGID; + " + + compile_check_conftest "$CODE" "NV_HAS_ENUM_PIDTYPE_TGID" "" "types" + ;; + + vfio_pin_pages_has_vfio_device_arg) + # + # Determine if vfio_pin_pages() kABI accepts "struct vfio_device *" + # argument instead of "struct device *" + # + # Replaced "struct device *" with "struct vfio_device *" by commit + # 8e432bb015b6c ("vfio/mdev: Pass in a struct vfio_device * to + # vfio_pin/unpin_pages()") in v5.19 + # + echo "$CONFTEST_PREAMBLE + #include + #include + int vfio_pin_pages(struct vfio_device *device, + unsigned long *user_pfn, + int npage, + int prot, + unsigned long *phys_pfn) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_VFIO_PIN_PAGES_HAS_VFIO_DEVICE_ARG" | append_conftest "functions" + rm -f conftest$$.o + else + echo "#undef NV_VFIO_PIN_PAGES_HAS_VFIO_DEVICE_ARG" | append_conftest "functions" + fi + ;; + + vfio_pin_pages_has_pages_arg) + # + # Determine if vfio_pin_pages() kABI accepts "struct pages **: + # argument instead of "unsigned long *phys_pfn" + # + # Replaced "unsigned long *phys_pfn" with "struct pages **pages" + # in commit 34a255e676159 ("vfio: Replace phys_pfn with pages for + # vfio_pin_pages()") in v6.0. + # + echo "$CONFTEST_PREAMBLE + #include + #include + int vfio_pin_pages(struct vfio_device *device, + dma_addr_t iova, + int npage, + int prot, + struct page **pages) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_VFIO_PIN_PAGES_HAS_PAGES_ARG" | append_conftest "functions" + rm -f conftest$$.o + else + echo "#undef NV_VFIO_PIN_PAGES_HAS_PAGES_ARG" | append_conftest "functions" + fi + ;; + + enable_apicv) + # + # Determine if enable_apicv boolean is exported by kernel. + # + # Added by commit fdf513e37a3b ("KVM: x86: Use common + # 'enable_apicv' variable for both APICv and AVIC") in v5.14. + # + CODE=" + $CONFTEST_PREAMBLE + #include + + bool is_enable_apicv_present() { + return enable_apicv; + }" + + compile_check_conftest "$CODE" "NV_ENABLE_APICV_PRESENT" "" "types" + ;; + + pci_driver_has_driver_managed_dma) + # + # Determine if "struct pci_driver" has .driver_managed_dma member. + # + # Added by commit 512881eacfa7 ("bus: platform,amba,fsl-mc,PCI: + # Add device DMA ownership management") in v5.19 + # + CODE=" + #include + int conftest_pci_driver_has_driver_managed_dma(void) { + return offsetof(struct pci_driver, driver_managed_dma); + }" + + compile_check_conftest "$CODE" "NV_PCI_DRIVER_HAS_DRIVER_MANAGED_DMA" "" "types" + ;; + + drm_file_get_master) + # + # Determine if function drm_file_get_master() is present. + # + # Added by commit 56f0729a510f ("drm: protect drm_master pointers in drm_lease.c") + # in v5.15 (2021-07-20) + # + + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + #include + + void conftest_drm_file_get_master(void) { + drm_file_get_master(); + }" + + compile_check_conftest "$CODE" "NV_DRM_FILE_GET_MASTER_PRESENT" "" "functions" + ;; + + drm_modeset_lock_all_end) + # + # Determine the number of arguments of the + # DRM_MODESET_LOCK_ALL_END() macro. + # + # DRM_MODESET_LOCK_ALL_END() is added with two arguments by commit + # b7ea04d299c7 (drm: drm: Add DRM_MODESET_LOCK_BEGIN/END helpers) + # in v5.0 (2018-11-29). The definition and prototype is changed to + # also take the third argument drm_device, by commit 77ef38574beb + # (drm/modeset-lock: Take the modeset BKL for legacy drivers) + # in v5.9 (2020-08-17). + # + DRM_MODESET_3_COMPILED=0 + DRM_MODESET_2_COMPILED=0 + DRM_MODESET_INCLUDES=" + #include + #include + #include " + + echo "$CONFTEST_PREAMBLE + $DRM_MODESET_INCLUDES + + void conftest_drm_modeset_lock_all_end( + struct drm_device *dev, + struct drm_modeset_acquire_ctx ctx, + int ret) { + DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret); + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + DRM_MODESET_3_COMPILED=1 + rm -f conftest$$.o + fi + + echo "$CONFTEST_PREAMBLE + $DRM_MODESET_INCLUDES + + void conftest_drm_modeset_lock_all_end( + struct drm_device *dev, + struct drm_modeset_acquire_ctx ctx, + int ret) { + DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret); + DRM_MODESET_LOCK_ALL_END(ctx, ret); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + DRM_MODESET_2_COMPILED=1 + rm -f conftest$$.o + fi + + # If the macro is undefined, both code snippets will still compile, + # so we need to check both and make sure only one compiles successfully. + if [ "$DRM_MODESET_3_COMPILED" = "1" ] && + [ "$DRM_MODESET_2_COMPILED" = "0" ]; then + echo "#define NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT 3" | append_conftest "functions" + elif [ "$DRM_MODESET_3_COMPILED" = "0" ] && + [ "$DRM_MODESET_2_COMPILED" = "1" ]; then + echo "#define NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT 2" | append_conftest "functions" + else + echo "#define NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT 0" | append_conftest "functions" + fi + ;; + + vm_ops_fault_removed_vma_arg) + # + # Determine if vma.vm_ops.fault takes (vma, vmf), or just (vmf) + # args. Acronym key: + # vma: struct vm_area_struct + # vm_ops: struct vm_operations_struct + # vmf: struct vm_fault + # + # The redundant vma arg was removed from BOTH vma.vm_ops.fault and + # vma.vm_ops.page_mkwrite by commit 11bac8000449 ("mm, fs: reduce + # fault, page_mkwrite, and pfn_mkwrite to take only vmf") in + # v4.11 (2017-02-24) + # + CODE=" + #include + void conftest_vm_ops_fault_removed_vma_arg(void) { + struct vm_operations_struct vm_ops; + struct vm_fault *vmf; + (void)vm_ops.fault(vmf); + }" + + compile_check_conftest "$CODE" "NV_VM_OPS_FAULT_REMOVED_VMA_ARG" "" "types" + ;; + + is_export_symbol_present_*) + export_symbol_present_conftest $(echo $1 | cut -f5- -d_) + ;; + + is_export_symbol_gpl_*) + export_symbol_gpl_conftest $(echo $1 | cut -f5- -d_) + ;; + + get_backlight_device_by_name) + # + # Determine if the get_backlight_device_by_name() function is present + # + CODE=" + #include + int conftest_get_backlight_device_by_name(void) { + return get_backlight_device_by_name(); + }" + compile_check_conftest "$CODE" "NV_GET_BACKLIGHT_DEVICE_BY_NAME_PRESENT" "" "functions" + ;; + + dma_buf_ops_has_map) + # + # Determine if .map exists in dma_buf_ops. + # In some kernels, this is a mandatory callback. + # + # Added by commit f9b67f0014cb + # ("dma-buf: Rename dma-ops to prevent conflict with kunmap_atomic macro") + # in v4.12 (2017-04-19) + # + # Removed as a mandatory callback by commit f82aab2d521e + # ("dma-buf: Remove requirement for ops->map() from dma_buf_export") + # in v4.20 (2018-08-07) + # + # Completely removed from dma-buf by commit 4337ebbbbda3 + # ("dma-buf: Remove kernel map/unmap hooks") in v5.6 (2019-11-18) + # + echo "$CONFTEST_PREAMBLE + #include + int conftest_dma_buf_ops_has_map(void) { + return offsetof(struct dma_buf_ops, map); + } + int conftest_dma_buf_ops_has_unmap(void) { + return offsetof(struct dma_buf_ops, unmap); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DMA_BUF_OPS_HAS_MAP" | append_conftest "types" + rm -f conftest$$.o + return + else + echo "#undef NV_DMA_BUF_OPS_HAS_MAP" | append_conftest "types" + return + fi + ;; + + dma_buf_ops_has_map_atomic) + # + # Determine if map_atomic/unmap_atomic exists in dma_buf_ops. + # In some kernels, this is a mandatory callback. + # + # Added by commit f9b67f0014cb + # ("dma-buf: Rename dma-ops to prevent conflict with kunmap_atomic macro") + # in v4.12 (2017-04-19) + # + # Removed by commit f664a5269542 + # ("dma-buf: remove kmap_atomic interface") in v4.19 (2018-05-28) + # + echo "$CONFTEST_PREAMBLE + #include + int conftest_dma_buf_ops_has_map_atomic(void) { + return offsetof(struct dma_buf_ops, map_atomic); + } + int conftest_dma_buf_ops_has_unmap_atomic(void) { + return offsetof(struct dma_buf_ops, unmap_atomic); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DMA_BUF_OPS_HAS_MAP_ATOMIC" | append_conftest "types" + rm -f conftest$$.o + return + else + echo "#undef NV_DMA_BUF_OPS_HAS_MAP_ATOMIC" | append_conftest "types" + return + fi + ;; + + dma_buf_attachment_has_peer2peer) + # + # Determine if peer2peer is present in struct dma_buf_attachment. + # peer2peer being true indicates that a dma-buf importer is able + # to handle peer resources not backed by struct page. + # + # Added by commit: 09606b5446c2 + # ("dma-buf: add peer2peer flag") in v5.8 (2018-03-22) + # + echo "$CONFTEST_PREAMBLE + #include + int conftest_dma_buf_peer2peer(void) { + return offsetof(struct dma_buf_attachment, peer2peer); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DMA_BUF_ATTACHMENT_HAS_PEER2PEER" | append_conftest "types" + rm -f conftest$$.o + return + else + echo "#undef NV_DMA_BUF_ATTACHMENT_HAS_PEER2PEER" | append_conftest "types" + return + fi + ;; + + drm_connector_funcs_have_mode_in_name) + # + # Determine if _mode_ is present in connector function names. We + # only test drm_mode_connector_attach_encoder() and assume the + # other functions are changed in sync. + # + # drm_mode_connector_attach_encoder() was renamed to + # drm_connector_attach_encoder() by commit cde4c44d8769 ("drm: + # drop _mode_ from drm_mode_connector_attach_encoder") in v4.19 + # (2018-07-09) + # + # drm_mode_connector_update_edid_property() was renamed by commit + # c555f02371c3 ("drm: drop _mode_ from update_edit_property()") + # in v4.19 (2018-07-09). + # + # The other DRM functions were renamed by commit 97e14fbeb53f + # ("drm: drop _mode_ from remaining connector functions") in v4.19 + # (2018-07-09) + # + # Note that drm_connector.h by introduced by commit 522171951761 + # ("drm: Extract drm_connector.[hc]") in v4.9 (2016-08-12) + # + # Note: up to 4.9 function was provided by drm_crtc.h by commit + # f453ba046074 in 2.6.29 (2008-12-29) + # + CODE=" + #include + #include + void conftest_drm_connector_funcs_have_mode_in_name(void) { + drm_mode_connector_attach_encoder(); + }" + + compile_check_conftest "$CODE" "NV_DRM_CONNECTOR_FUNCS_HAVE_MODE_IN_NAME" "" "functions" + ;; + + drm_connector_has_vrr_capable_property) + # + # Determine if drm_connector_attach_vrr_capable_property and + # drm_connector_set_vrr_capable_property is present + # + # Added by commit ba1b0f6c73d4 ("drm: Add vrr_capable property to + # the drm connector") in v5.0. + # + CODE=" + #include + + void conftest_drm_connector_has_vrr_capable_property(void) { + drm_connector_attach_vrr_capable_property(); + }" + + compile_check_conftest "$CODE" "NV_DRM_CONNECTOR_HAS_VRR_CAPABLE_PROPERTY" "" "functions" + ;; + + vm_fault_t) + # + # Determine if vm_fault_t is present + # + # Added by commit 1c8f422059ae5da07db7406ab916203f9417e396 ("mm: + # change return type to vm_fault_t") in v4.17 (2018-04-05) + # + CODE=" + #include + vm_fault_t conftest_vm_fault_t; + " + compile_check_conftest "$CODE" "NV_VM_FAULT_T_IS_PRESENT" "" "types" + ;; + + vmf_insert_pfn) + # + # Determine if the function vmf_insert_pfn() is + # present. + # + # Added by commit 1c8f422059ae5da07db7406ab916203f9417e396 ("mm: + # change return type to vm_fault_t") in v4.17 (2018-04-05) + # + CODE=" + #include + void conftest_vmf_insert_pfn(void) { + vmf_insert_pfn(); + }" + + compile_check_conftest "$CODE" "NV_VMF_INSERT_PFN_PRESENT" "" "functions" + ;; + + pm_runtime_available) + # + # Determine if struct dev_pm_info has the 'usage_count' field. + # + # This was added to the kernel in commit 5e928f77a09a0 in v2.6.32 + # (2008-08-18), but originally were dependent on CONFIG_PM_RUNTIME, + # which was folded into the more generic CONFIG_PM in commit + # d30d819dc8310 in v3.19 (2014-11-27). + # Rather than attempt to select the appropriate CONFIG option, + # simply check if this member is present. + # + CODE=" + #include + void pm_runtime_conftest(void) { + struct dev_pm_info dpmi; + atomic_set(&dpmi.usage_count, 1); + }" + + compile_check_conftest "$CODE" "NV_PM_RUNTIME_AVAILABLE" "" "generic" + ;; + + dma_direct_map_resource) + # + # Determine whether dma_is_direct() exists. + # + # dma_is_direct() was added by commit 356da6d0cde3 ("dma-mapping: + # bypass indirect calls for dma-direct") in 5.1 (2018-12-06). + # + # If dma_is_direct() does exist, then we assume that + # dma_direct_map_resource() exists. Both functions were added + # as part of the same patchset. + # + # The presence of dma_is_direct() and dma_direct_map_resource() + # means that dma_direct can perform DMA mappings itself. + # + CODE=" + #include + void conftest_dma_is_direct(void) { + dma_is_direct(); + }" + + compile_check_conftest "$CODE" "NV_DMA_IS_DIRECT_PRESENT" "" "functions" + ;; + + cmd_uphy_display_port_init) + # + # Determine if CMD_UPHY_DISPLAY_PORT_INIT enum present in bpmp-abi header + # This enum is used only in Tegra down-stream kernel. + # + CODE=" + #include + #include + + int conftest_cmd_uphy_display_port_init(void) { + return CMD_UPHY_DISPLAY_PORT_INIT; + } + " + compile_check_conftest "$CODE" "NV_CMD_UPHY_DISPLAY_PORT_INIT_PRESENT" "" "generic" + + ;; + + cmd_uphy_display_port_off) + # + # Determine if CMD_UPHY_DISPLAY_PORT_OFF enum present in bpmp-abi header + # This enum is used only in Tegra down-stream kernel. + # + CODE=" + #include + #include + + int conftest_cmd_uphy_display_port_off(void) { + return CMD_UPHY_DISPLAY_PORT_OFF; + } + " + compile_check_conftest "$CODE" "NV_CMD_UPHY_DISPLAY_PORT_OFF_PRESENT" "" "generic" + + ;; + + drm_alpha_blending_available) + # + # Determine if the DRM subsystem supports alpha blending + # + # This conftest using "generic" rather than "functions" because + # with the logic of "functions" the presence of + # *either*_alpha_property or _blend_mode_property would be enough + # to cause NV_DRM_ALPHA_BLENDING_AVAILABLE to be defined. + + # drm_plane_create_alpha_property was added by commit + # ae0e28265e21 ("drm/blend: Add a generic alpha property") in + # v4.18. + # + # drm_plane_create_blend_mode_property was added by commit + # a5ec8332d428 ("drm: Add per-plane pixel blend mode property") + # in v4.20. + # + CODE=" + #include + void conftest_drm_alpha_blending_available(void) { + (void)drm_plane_create_alpha_property; + (void)drm_plane_create_blend_mode_property; + }" + + compile_check_conftest "$CODE" "NV_DRM_ALPHA_BLENDING_AVAILABLE" "" "generic" + ;; + + drm_driver_prime_flag_present) + # + # Determine whether driver feature flag DRIVER_PRIME is present. + # + # The DRIVER_PRIME flag was added by commit 3248877ea179 (drm: + # base prime/dma-buf support (v5)) in v3.4 (2011-11-25) and is + # removed by commit 0424fdaf883a ("drm/prime: Actually remove + # DRIVER_PRIME everywhere") in v5.4. + # + # DRIVER_PRIME definition moved from drmP.h to drm_drv.h by + # commit 85e634bce01a (drm: Extract drm_drv.h) in v4.10 + # (2016-11-14). + # + # DRIVER_PRIME define is changed to enum value by commit + # 0e2a933b02c9 (drm: Switch DRIVER_ flags to an enum) in v5.1 + # (2019-01-29). + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #include + + unsigned int drm_driver_prime_flag_present_conftest(void) { + return DRIVER_PRIME; + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_PRIME_FLAG_PRESENT" "" "types" + ;; + + drm_connector_for_each_possible_encoder) + # + # Determine the number of arguments of the + # drm_connector_for_each_possible_encoder() macro. + # + # drm_connector_for_each_possible_encoder() is added by commit + # 83aefbb887b5 (drm: Add drm_connector_for_each_possible_encoder()) + # in v4.19. The definition and prototype is changed to take only + # two arguments connector and encoder by commit 62afb4ad425a + # ("drm/connector: Allow max possible encoders to attach to a + # connector") in v5.5. + # + echo "$CONFTEST_PREAMBLE + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #include + + void conftest_drm_connector_for_each_possible_encoder( + struct drm_connector *connector, + struct drm_encoder *encoder, + int i) { + + drm_connector_for_each_possible_encoder(connector, encoder, i) { + } + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DRM_CONNECTOR_FOR_EACH_POSSIBLE_ENCODER_ARGUMENT_COUNT 3" | append_conftest "functions" + rm -f conftest$$.o + return + else + echo "#define NV_DRM_CONNECTOR_FOR_EACH_POSSIBLE_ENCODER_ARGUMENT_COUNT 2" | append_conftest "functions" + fi + ;; + + mmu_notifier_ops_arch_invalidate_secondary_tlbs) + # + # Determine if the mmu_notifier_ops struct has the + # 'arch_invalidate_secondary_tlbs' member. + # + # struct mmu_notifier_ops.invalidate_range was renamed to + # arch_invalidate_secondary_tlbs by commit 1af5a8109904 + # ("mmu_notifiers: rename invalidate_range notifier") in v6.6 + # (2023-07-25). + CODE=" + #include + int conftest_mmu_notifier_ops_arch_invalidate_secondary_tlbs(void) { + return offsetof(struct mmu_notifier_ops, arch_invalidate_secondary_tlbs); + }" + + compile_check_conftest "$CODE" "NV_MMU_NOTIFIER_OPS_HAS_ARCH_INVALIDATE_SECONDARY_TLBS" "" "types" + ;; + + drm_format_num_planes) + # + # Determine if drm_format_num_planes() function is present. + # + # The drm_format_num_planes() function was added by commit + # d0d110e09629 drm: Add drm_format_num_planes() utility function in + # v3.3 (2011-12-20). Prototype was moved from drm_crtc.h to + # drm_fourcc.h by commit ae4df11a0f53 (drm: Move format-related + # helpers to drm_fourcc.c) in v4.8 (2016-06-09). + # drm_format_num_planes() has been removed by commit 05c452c115bf + # (drm: Remove users of drm_format_num_planes) removed v5.3 + # (2019-05-16). + # + CODE=" + #include + #include + + void conftest_drm_format_num_planes(void) { + drm_format_num_planes(); + } + " + + compile_check_conftest "$CODE" "NV_DRM_FORMAT_NUM_PLANES_PRESENT" "" "functions" + ;; + + drm_gem_object_has_resv) + # + # Determine if the 'drm_gem_object' structure has a 'resv' field. + # + # A 'resv' filed in the 'drm_gem_object' structure, is added by + # commit 1ba627148ef5 (drm: Add reservation_object to + # drm_gem_object) in v5.2. + # + CODE="$CONFTEST_PREAMBLE + #include + + int conftest_drm_gem_object_has_resv(void) { + return offsetof(struct drm_gem_object, resv); + }" + + compile_check_conftest "$CODE" "NV_DRM_GEM_OBJECT_HAS_RESV" "" "types" + ;; + + proc_ops) + # + # Determine if the 'struct proc_ops' type is present. + # + # Added by commit d56c0d45f0e2 ("proc: decouple proc from VFS + # with "struct proc_ops"") in v5.6. + # + CODE=" + #include + + struct proc_ops p_ops; + " + + compile_check_conftest "$CODE" "NV_PROC_OPS_PRESENT" "" "types" + ;; + + drm_crtc_state_has_async_flip) + # + # Determine if the 'drm_crtc_state' structure has a 'async_flip' + # field. + # + # Commit 4d85f45c73a2 (drm/atomic: Rename crtc_state->pageflip_flags + # to async_flip) replaced 'pageflip_flags' by 'async_flip' in v5.4. + # + CODE=" + #include + + int conftest_drm_crtc_state_has_async_flip(void) { + return offsetof(struct drm_crtc_state, async_flip); + }" + + compile_check_conftest "$CODE" "NV_DRM_CRTC_STATE_HAS_ASYNC_FLIP" "" "types" + ;; + + drm_crtc_state_has_pageflip_flags) + # + # Determine if the 'drm_crtc_state' structure has a + # 'pageflip_flags' field. + # + # 'pageflip_flags' added by commit 6cbe5c466d73 (drm/atomic: Save + # flip flags in drm_crtc_state) in v4.12. Commit 4d85f45c73a2 + # (drm/atomic: Rename crtc_state->pageflip_flags to async_flip) + # replaced 'pageflip_flags' by 'async_flip' in v5.4. + # + CODE=" + #include + + int conftest_drm_crtc_state_has_pageflip_flags(void) { + return offsetof(struct drm_crtc_state, pageflip_flags); + }" + + compile_check_conftest "$CODE" "NV_DRM_CRTC_STATE_HAS_PAGEFLIP_FLAGS" "" "types" + ;; + + drm_crtc_state_has_vrr_enabled) + # + # Determine if 'drm_crtc_state' structure has a + # 'vrr_enabled' field. + # + # Added by commit 1398958cfd8d ("drm: Add vrr_enabled property to + # drm CRTC") in v5.0. + # + CODE=" + #include + + int conftest_drm_crtc_state_has_vrr_enabled(void) { + return offsetof(struct drm_crtc_state, vrr_enabled); + }" + + compile_check_conftest "$CODE" "NV_DRM_CRTC_STATE_HAS_VRR_ENABLED" "" "types" + ;; + + ktime_get_raw_ts64) + # + # Determine if ktime_get_raw_ts64() is present + # + # Added by commit fb7fcc96a86cf ("timekeeping: Standardize on + # ktime_get_*() naming") in 4.18 (2018-04-27) + # + CODE=" + #include + void conftest_ktime_get_raw_ts64(void){ + ktime_get_raw_ts64(); + }" + compile_check_conftest "$CODE" "NV_KTIME_GET_RAW_TS64_PRESENT" "" "functions" + ;; + + vmalloc_has_pgprot_t_arg) + # + # Determine if __vmalloc has the 'pgprot' argument. + # + # The third argument to __vmalloc, page protection + # 'pgprot_t prot', was removed by commit 88dca4ca5a93 + # (mm: remove the pgprot argument to __vmalloc) + # in v5.8. + # + CODE=" + #include + + void conftest_vmalloc_has_pgprot_t_arg(void) { + pgprot_t prot; + (void)__vmalloc(0, 0, prot); + }" + compile_check_conftest "$CODE" "NV_VMALLOC_HAS_PGPROT_T_ARG" "" "types" + + ;; + + mm_has_mmap_lock) + # + # Determine if the 'mm_struct' structure has a 'mmap_lock' field. + # + # Kernel commit da1c55f1b272 ("mmap locking API: rename mmap_sem + # to mmap_lock") replaced the field 'mmap_sem' by 'mmap_lock' + # in v5.8. + # + CODE=" + #include + + int conftest_mm_has_mmap_lock(void) { + return offsetof(struct mm_struct, mmap_lock); + }" + + compile_check_conftest "$CODE" "NV_MM_HAS_MMAP_LOCK" "" "types" + ;; + + drm_vma_offset_node_has_readonly) + # + # Determine if the 'drm_vma_offset_node' structure has a 'readonly' + # field. + # + # Added by commit 3e977ac6179b ("drm/i915: Prevent writing into a + # read-only object via a GGTT mmap") in v4.19. + # + CODE=" + #include + + int conftest_drm_vma_offset_node_has_readonly(void) { + return offsetof(struct drm_vma_offset_node, readonly); + }" + + compile_check_conftest "$CODE" "NV_DRM_VMA_OFFSET_NODE_HAS_READONLY" "" "types" + + ;; + + pci_enable_atomic_ops_to_root) + # + # pci_enable_atomic_ops_to_root was added by commit 430a23689dea + # ("PCI: Add pci_enable_atomic_ops_to_root()") in v4.16. + # + CODE=" + #include + void conftest_pci_enable_atomic_ops_to_root(void) { + pci_enable_atomic_ops_to_root(); + }" + compile_check_conftest "$CODE" "NV_PCI_ENABLE_ATOMIC_OPS_TO_ROOT_PRESENT" "" "functions" + ;; + + drm_gem_object_put_unlocked) + # + # Determine if the function drm_gem_object_put_unlocked() is present. + # + # Replaced with a transient macro by commit 2f4dd13d4bb8 ("drm/gem: + # add drm_gem_object_put helper") in v5.9. + # + # Finally removed by commit ab15d56e27be ("drm: remove transient + # drm_gem_object_put_unlocked()") in v5.9. + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #include + void conftest_drm_gem_object_put_unlocked(void) { + drm_gem_object_put_unlocked(); + }" + + compile_check_conftest "$CODE" "NV_DRM_GEM_OBJECT_PUT_UNLOCK_PRESENT" "" "functions" + ;; + + drm_display_mode_has_vrefresh) + # + # Determine if the 'drm_display_mode' structure has a 'vrefresh' + # field. + # + # Removed by commit 0425662fdf05 ("drm: Nuke mode->vrefresh") in + # v5.9. + # + CODE=" + #include + + int conftest_drm_display_mode_has_vrefresh(void) { + return offsetof(struct drm_display_mode, vrefresh); + }" + + compile_check_conftest "$CODE" "NV_DRM_DISPLAY_MODE_HAS_VREFRESH" "types" + + ;; + + drm_driver_master_set_has_int_return_type) + # + # Determine if drm_driver::master_set() returns integer value + # + # Changed to void by commit 907f53200f98 ("drm: vmwgfx: remove + # drm_driver::master_set() return type") in v5.9. + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #include + + int conftest_drm_driver_master_set_has_int_return_type(struct drm_driver *drv, + struct drm_device *dev, struct drm_file *file_priv, bool from_open) { + + return drv->master_set(dev, file_priv, from_open); + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_SET_MASTER_HAS_INT_RETURN_TYPE" "" "types" + ;; + + drm_driver_has_gem_free_object) + # + # Determine if the 'drm_driver' structure has a 'gem_free_object' + # function pointer. + # + # drm_driver::gem_free_object is removed by commit 1a9458aeb8eb + # ("drm: remove drm_driver::gem_free_object") in v5.9. + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #include + + int conftest_drm_driver_has_gem_free_object(void) { + return offsetof(struct drm_driver, gem_free_object); + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_HAS_GEM_FREE_OBJECT" "" "types" + ;; + + vga_tryget) + # + # Determine if vga_tryget() is present + # + # vga_tryget() was removed by commit f369bc3f9096 ("vgaarb: mark + # vga_tryget static") in v5.9. + # + CODE=" + #include + void conftest_vga_tryget(void) { + vga_tryget(); + }" + + compile_check_conftest "$CODE" "NV_VGA_TRYGET_PRESENT" "" "functions" + ;; + + pci_channel_state) + # + # Determine if pci_channel_state enum type is present. + # + # pci_channel_state was removed by commit 16d79cd4e23b ("PCI: Use + # 'pci_channel_state_t' instead of 'enum pci_channel_state'") in + # v5.9. + # + CODE=" + #include + + enum pci_channel_state state; + " + + compile_check_conftest "$CODE" "NV_PCI_CHANNEL_STATE_PRESENT" "" "types" + ;; + + cc_platform_has) + # + # Determine if 'cc_platform_has()' is present. + # + # Added by commit aa5a461171f9 ("x86/sev: Add an x86 version of + # cc_platform_has()") in v5.16. + # + CODE=" + #if defined(NV_LINUX_CC_PLATFORM_H_PRESENT) + #include + #endif + + void conftest_cc_platfrom_has(void) { + cc_platform_has(); + }" + + compile_check_conftest "$CODE" "NV_CC_PLATFORM_PRESENT" "" "functions" + ;; + + cc_attr_guest_sev_snp) + # + # Determine if 'CC_ATTR_GUEST_SEV_SNP' is present. + # + # Added by commit aa5a461171f9 ("x86/mm: Extend cc_attr to + # include AMD SEV-SNP") in v5.19. + # + CODE=" + #if defined(NV_LINUX_CC_PLATFORM_H_PRESENT) + #include + #endif + + enum cc_attr cc_attributes = CC_ATTR_GUEST_SEV_SNP; + " + + compile_check_conftest "$CODE" "NV_CC_ATTR_SEV_SNP" "" "types" + ;; + + hv_get_isolation_type) + # + # Determine if 'hv_get_isolation_type()' is present. + # Added by commit faff44069ff5 ("x86/hyperv: Add Write/Read MSR + # registers via ghcb page") in v5.16. + # + CODE=" + #if defined(NV_ASM_MSHYPERV_H_PRESENT) + #include + #endif + void conftest_hv_get_isolation_type(void) { + int i; + hv_get_isolation_type(i); + }" + + compile_check_conftest "$CODE" "NV_HV_GET_ISOLATION_TYPE" "" "functions" + ;; + + drm_prime_pages_to_sg_has_drm_device_arg) + # + # Determine if drm_prime_pages_to_sg() has 'dev' argument. + # + # drm_prime_pages_to_sg() is updated to take 'dev' argument by + # commit 707d561f77b5 ("drm: allow limiting the scatter list + # size.") in v5.10. + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + #include + + struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, + struct page **pages, + unsigned int nr_pages) { + return 0; + }" + + compile_check_conftest "$CODE" "NV_DRM_PRIME_PAGES_TO_SG_HAS_DRM_DEVICE_ARG" "" "types" + ;; + + drm_driver_has_gem_prime_callbacks) + # + # Determine if drm_driver structure has the GEM and PRIME callback + # function pointers. + # + # The GEM and PRIME callbacks are removed from drm_driver + # structure by commit d693def4fd1c ("drm: Remove obsolete GEM and + # PRIME callbacks from struct drm_driver") in v5.11. + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #include + + void conftest_drm_driver_has_gem_and_prime_callbacks(void) { + struct drm_driver drv; + + drv.gem_prime_pin = 0; + drv.gem_prime_get_sg_table = 0; + drv.gem_prime_vmap = 0; + drv.gem_prime_vunmap = 0; + drv.gem_vm_ops = 0; + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS" "" "types" + ;; + + drm_crtc_atomic_check_has_atomic_state_arg) + # + # Determine if drm_crtc_helper_funcs::atomic_check takes 'state' + # argument of 'struct drm_atomic_state' type. + # + # Commit 29b77ad7b9ca ("drm/atomic: Pass the full state to CRTC + # atomic_check") in v5.11 passed the full atomic state to + # drm_crtc_helper_funcs::atomic_check() + # + # To test the signature of drm_crtc_helper_funcs::atomic_check(), + # declare a function prototype with typeof ::atomic_check(), and then + # define the corresponding function implementation with the expected + # signature. Successful compilation indicates that ::atomic_check() + # has the expected signature. + # + echo "$CONFTEST_PREAMBLE + #include + + static const struct drm_crtc_helper_funcs *funcs; + typeof(*funcs->atomic_check) conftest_drm_crtc_atomic_check_has_atomic_state_arg; + + int conftest_drm_crtc_atomic_check_has_atomic_state_arg( + struct drm_crtc *crtc, struct drm_atomic_state *state) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_DRM_CRTC_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG" | append_conftest "types" + else + echo "#undef NV_DRM_CRTC_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG" | append_conftest "types" + fi + ;; + + drm_gem_object_vmap_has_map_arg) + # + # Determine if drm_gem_object_funcs::vmap takes 'map' + # argument of 'struct dma_buf_map' type. + # + # drm_gem_object_funcs::vmap is updated to take 'map' argument by + # commit 49a3f51dfeee ("drm/gem: Use struct dma_buf_map in GEM + # vmap ops and convert GEM backends") in v5.11. + # + # Note that the 'map' argument type is changed from 'struct dma_buf_map' + # to 'struct iosys_map' by commit 7938f4218168 ("dma-buf-map: Rename + # to iosys-map) in v5.18. + # + CODE=" + #include + int conftest_drm_gem_object_vmap_has_map_arg( + struct drm_gem_object *obj) { + return obj->funcs->vmap(obj, NULL); + }" + + compile_check_conftest "$CODE" "NV_DRM_GEM_OBJECT_VMAP_HAS_MAP_ARG" "" "types" + ;; + + seq_read_iter) + # + # Determine if seq_read_iter() is present + # + # seq_read_iter() was added by commit d4d50710a8b4 ("seq_file: + # add seq_read_iter") in v5.10. + # + CODE=" + #include + void conftest_seq_read_iter(void) { + seq_read_iter(); + }" + + compile_check_conftest "$CODE" "NV_SEQ_READ_ITER_PRESENT" "" "functions" + ;; + + pci_class_multimedia_hd_audio) + # + # Determine if 'PCI_CLASS_MULTIMEDIA_HD_AUDIO' macro is present + # in . + # + # The commit 07f4f97d7b4b ("vga_switcheroo: Use device link for HDA + # controller") has moved 'PCI_CLASS_MULTIMEDIA_HD_AUDIO' macro from + # to in v4.17. + # + CODE=" + #include + unsigned int conftest_pci_class_multimedia_hd_audio(void) { + return PCI_CLASS_MULTIMEDIA_HD_AUDIO; + }" + + compile_check_conftest "$CODE" "NV_PCI_CLASS_MULTIMEDIA_HD_AUDIO_PRESENT" "" "generic" + ;; + + follow_pfn) + # + # Determine if follow_pfn() is present. + # + # follow_pfn() was added by commit 3b6748e2dd69 + # ("mm: introduce follow_pfn()") in v2.6.31-rc1, and removed + # by commit 233eb0bf3b94 ("mm: remove follow_pfn") + # from linux-next 233eb0bf3b94. + # + CODE=" + #include + void conftest_follow_pfn(void) { + follow_pfn(); + }" + + compile_check_conftest "$CODE" "NV_FOLLOW_PFN_PRESENT" "" "functions" + ;; + + follow_pte_arg_vma) + # + # Determine if the first argument of follow_pte is + # mm_struct or vm_area_struct. + # + # The first argument was changed from mm_struct to vm_area_struct by + # commit 29ae7d96d166 ("mm: pass VMA instead of MM to follow_pte()") + # + CODE=" + #include + + typeof(follow_pte) conftest_follow_pte_has_vma_arg; + int conftest_follow_pte_has_vma_arg(struct vm_area_struct *vma, + unsigned long address, + pte_t **ptep, + spinlock_t **ptl) { + return 0; + }" + + compile_check_conftest "$CODE" "NV_FOLLOW_PTE_ARG1_VMA" "" "types" + ;; + + dma_buf_ops_attach_has_arg_dev) + # + # Determine if the .attach callback in struct dma_buf_ops + # has second arg as struct device*. + # + # This callback had struct device* when dma-buf was first introduced + # in commit d15bd7ee445d + # ("dma-buf: Introduce dma buffer sharing mechanism") in v3.3. + # + # The struct device arg was removed by commit a19741e5e5a9 + # ("dma_buf: remove device parameter from attach callback v2") in v4.19. + # + CODE=" + #include + + static const struct dma_buf_ops *funcs; + typeof(*funcs->attach) conftest_dma_buf_ops_attach_has_dev_arg; + int conftest_dma_buf_ops_attach_has_dev_arg(struct dma_buf *buf, + struct device *dev, + struct dma_buf_attachment *attach) { + return 0; + }" + + compile_check_conftest "$CODE" "NV_DMA_BUF_OPS_ATTACH_ARG2_DEV" "" "types" + ;; + + ptep_get) + # + # Determine if ptep_get() is present. + # + # ptep_get() was added by commit 481e980a7c19 + # ("mm: Allow arches to provide ptep_get()") + # + CODE=" + #include + void conftest_ptep_get(void) { + ptep_get(); + }" + + compile_check_conftest "$CODE" "NV_PTEP_GET_PRESENT" "" "functions" + ;; + + drm_plane_atomic_check_has_atomic_state_arg) + # + # Determine if drm_plane_helper_funcs::atomic_check takes 'state' + # argument of 'struct drm_atomic_state' type. + # + # Commit 7c11b99a8e58 ("drm/atomic: Pass the full state to planes + # atomic_check") in v5.13 passes the full atomic state to + # drm_plane_helper_funcs::atomic_check() + # + # To test the signature of drm_plane_helper_funcs::atomic_check(), + # declare a function prototype with typeof ::atomic_check(), and then + # define the corresponding function implementation with the expected + # signature. Successful compilation indicates that ::atomic_check() + # has the expected signature. + # + echo "$CONFTEST_PREAMBLE + #include + + static const struct drm_plane_helper_funcs *funcs; + typeof(*funcs->atomic_check) conftest_drm_plane_atomic_check_has_atomic_state_arg; + + int conftest_drm_plane_atomic_check_has_atomic_state_arg( + struct drm_plane *plane, struct drm_atomic_state *state) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_DRM_PLANE_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG" | append_conftest "types" + else + echo "#undef NV_DRM_PLANE_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG" | append_conftest "types" + fi + ;; + + ib_peer_memory_symbols) + # + # Determine if the following symbols exist in Module.symvers: + # 1. ib_register_peer_memory_client + # 2. ib_unregister_peer_memory_client + # The conftest first checks in the kernel's own Module.symvers in + # the regular path. If the symbols are not found there, it's possible + # that MOFED is installed and check for these symbols in MOFED's + # Module.symvers whose path is different from the kernel's symvers. + # + # Note: KERNELRELEASE and ARCH are defined by Kbuild and automatically + # passed down to conftest.sh as env vars. + + MLNX_OFED_KERNEL_DIR=/usr/src/ofa_kernel + VAR_DKMS_SOURCES_DIR=$(test -d /var/lib/dkms/mlnx-ofed-kernel && + ls -d /var/lib/dkms/mlnx-ofed-kernel/*/build 2>/dev/null) + + if check_for_ib_peer_memory_symbols "$OUTPUT" || \ + check_for_ib_peer_memory_symbols "$MLNX_OFED_KERNEL_DIR/$ARCH/$KERNELRELEASE" || \ + check_for_ib_peer_memory_symbols "$MLNX_OFED_KERNEL_DIR/$KERNELRELEASE" || \ + check_for_ib_peer_memory_symbols "$MLNX_OFED_KERNEL_DIR/default" || \ + check_for_ib_peer_memory_symbols "$VAR_DKMS_SOURCES_DIR"; then + echo "#define NV_MLNX_IB_PEER_MEM_SYMBOLS_PRESENT" | append_conftest "symbols" + else + echo "#undef NV_MLNX_IB_PEER_MEM_SYMBOLS_PRESENT" | append_conftest "symbols" + fi + ;; + + add_memory_driver_managed) + # + # Determine if the add_memory_driver_managed function is present + # + # Added by commit 7b7b27214bba ("mm/memory_hotplug: introduce + # add_memory_driver_managed()") in v5.8. + # + # Before commit 3a0aaefe4134 ("mm/memory_hotplug: guard more + # declarations by CONFIG_MEMORY_HOTPLUG") in v5.10, the + # add_memory_driver_managed() was not guarded. + # + CODE=" + #include + void conftest_add_memory_driver_managed() { + #if defined(CONFIG_MEMORY_HOTPLUG) + add_memory_driver_managed(); + #endif + }" + + compile_check_conftest "$CODE" "NV_ADD_MEMORY_DRIVER_MANAGED_PRESENT" "" "functions" + ;; + + add_memory_driver_managed_has_mhp_flags_arg) + # + # Check if add_memory_driver_managed() has mhp_flags arg. + # + # Added by commit b6117199787c ("mm/memory_hotplug: prepare + # passing flags to add_memory() and friends") in v5.10. + # + CODE=" + #include + int add_memory_driver_managed(int nid, u64 start, u64 size, + const char *resource_name, + mhp_t mhp_flags) { + return 0; + }" + + compile_check_conftest "$CODE" "NV_ADD_MEMORY_DRIVER_MANAGED_HAS_MHP_FLAGS_ARG" "" "types" + ;; + + remove_memory_has_nid_arg) + # + # Check if remove_memory() has nid parameter. + # + # Removed by commit e1c158e49566 ("mm/memory_hotplug: remove nid + # parameter from remove_memory() and friends") in v5.15. + # + CODE=" + #include + int remove_memory(int nid, u64 start, u64 size) { + return 0; + }" + + compile_check_conftest "$CODE" "NV_REMOVE_MEMORY_HAS_NID_ARG" "" "types" + ;; + + offline_and_remove_memory) + # + # Determine if the offline_and_remove_memory function is present. + # + # Added by commit 08b3acd7a68f ("mm/memory_hotplug: Introduce + # offline_and_remove_memory()") in v5.8. + # + CODE=" + #include + void conftest_offline_and_remove_memory() { + offline_and_remove_memory(); + }" + + compile_check_conftest "$CODE" "NV_OFFLINE_AND_REMOVE_MEMORY_PRESENT" "" "functions" + ;; + + of_property_for_each_u32_has_internal_args) + # + # Determine if the internal arguments for the macro + # of_property_for_each_u32() are present. + # + # Commit 9722c3b66e21 ("of: remove internal arguments from + # of_property_for_each_u32()") removes two arguments from + # of_property_for_each_u32() which are used internally within + # the macro and so do not need to be passed. This change was + # made for Linux v6.11. + # + CODE=" + #include + void conftest_of_property_for_each_u32(struct device_node *np, + char *propname) { + struct property *iparam1; + const __be32 *iparam2; + u32 val; + + of_property_for_each_u32(np, propname, iparam1, iparam2, val); + }" + + compile_check_conftest "$CODE" "NV_OF_PROPERTY_FOR_EACH_U32_HAS_INTERNAL_ARGS" "" "types" + ;; + + of_dma_configure) + # + # Determine how many arguments of_dma_configure() takes. + # + # It began taking a third parameter with commit 3d6ce86ee794 + # ("drivers: remove force dma flag from buses") in v4.18. + # + + echo "$CONFTEST_PREAMBLE + #include + void conftest_of_dma_configure(void) { + of_dma_configure(NULL, NULL, false); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_OF_DMA_CONFIGURE_ARGUMENT_COUNT 3" | append_conftest "functions" + else + echo "#define NV_OF_DMA_CONFIGURE_ARGUMENT_COUNT 2" | append_conftest "functions" + fi + ;; + + icc_get) + # + # Determine if icc_get() function is present + # + # Added by commit 11f1ceca7031 ("interconnect: Add generic + # on-chip interconnect API") in v5.1. + # + CODE=" + #if defined(NV_LINUX_INTERCONNECT_H_PRESENT) + #include + #endif + void conftest_icc_get(void) + { + icc_get(); + } + " + + compile_check_conftest "$CODE" "NV_ICC_GET_PRESENT" "" "functions" + ;; + + devm_of_icc_get) + # + # Determine if devm_of_icc_get() function is present + # + # Added by commit e145d9a ("interconnect: Add devm_of_icc_get() as + # exported API for user interconnect API") + # + CODE=" + #if defined(NV_LINUX_INTERCONNECT_H_PRESENT) + #include + #endif + void conftest_devm_of_icc_get(void) + { + devm_of_icc_get(); + } + " + + compile_check_conftest "$CODE" "NV_DEVM_ICC_GET_PRESENT" "" "functions" + ;; + + icc_set_bw) + # + # Determine if icc_set_bw() function is present + # + # Added by commit 11f1ceca7031 ("interconnect: Add generic + # on-chip interconnect API") in v5.1. + # + CODE=" + #if defined(NV_LINUX_INTERCONNECT_H_PRESENT) + #include + #endif + void conftest_icc_set_bw(void) + { + icc_set_bw(); + } + " + + compile_check_conftest "$CODE" "NV_ICC_SET_BW_PRESENT" "" "functions" + ;; + + icc_put) + # + # Determine if icc_put() function is present + # + # Added by commit 11f1ceca7031 ("interconnect: Add generic + # on-chip interconnect API") in v5.1. + # + CODE=" + #if defined(NV_LINUX_INTERCONNECT_H_PRESENT) + #include + #endif + void conftest_icc_put(void) + { + icc_put(); + } + " + + compile_check_conftest "$CODE" "NV_ICC_PUT_PRESENT" "" "functions" + ;; + + i2c_new_client_device) + # + # Determine if i2c_new_client_device() function is present + # + # Added by commit 390fd0475af5 ("i2c: remove deprecated + # i2c_new_device API") in v5.8. + # + CODE=" + #include + void conftest_i2c_new_client_device(void) + { + i2c_new_client_device(); + } + " + + compile_check_conftest "$CODE" "NV_I2C_NEW_CLIENT_DEVICE_PRESENT" "" "functions" + ;; + + migrate_vma_added_flags) + # + # Determine if migrate_vma structure has flags + # + # Added by commit 5143192cd410 ("mm/migrate: add a flags + # parameter to migrate_vma") in v5.9. + # + CODE=" + #include + int conftest_migrate_vma_added_flags(void) { + return offsetof(struct migrate_vma, flags); + }" + + compile_check_conftest "$CODE" "NV_MIGRATE_VMA_FLAGS_PRESENT" "" "types" + ;; + + drm_device_has_pdev) + # + # Determine if the 'drm_device' structure has a 'pdev' field. + # + # Removed by commit b347e04452ff ("drm: Remove pdev field from + # struct drm_device") in v5.14. + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #include + + int conftest_drm_device_has_pdev(void) { + return offsetof(struct drm_device, pdev); + }" + + compile_check_conftest "$CODE" "NV_DRM_DEVICE_HAS_PDEV" "" "types" + ;; + + migrate_device_range) + # + # Determine if the migrate_device_range() function is present + # + # migrate_device_range() function was added by commit + # e778406b40dbb ("mm/migrate_device.c: add migrate_device_range()") + # in v6.1 (2022-09-28). + CODE=" + #include + int conftest_migrate_device_range(void) { + migrate_device_range(); + }" + + compile_check_conftest "$CODE" "NV_MIGRATE_DEVICE_RANGE_PRESENT" "" "functions" + ;; + + ioasid_get) + # + # Determine if ioasid_get() function is present + # + # Added by commit cb4789b0d19f ("iommu/ioasid: Add ioasid + # references") in v5.11. + # + CODE=" + #if defined(NV_LINUX_IOASID_H_PRESENT) + #include + #endif + void conftest_ioasid_get(void) { + ioasid_get(); + }" + + compile_check_conftest "$CODE" "NV_IOASID_GET_PRESENT" "" "functions" + ;; + + mm_pasid_drop) + # + # Determine if mm_pasid_drop() function is present + # + # Added by commit 701fac40384f ("iommu/sva: Assign a PASID to mm + # on PASID allocation and free it on mm exit") in v5.18. + # Moved to linux/iommu.h in commit cd3891158a77 ("iommu/sva: Move + # PASID helpers to sva code") in v6.4. + # + CODE=" + #include + #include + void conftest_mm_pasid_drop(void) { + mm_pasid_drop(); + }" + + compile_check_conftest "$CODE" "NV_MM_PASID_DROP_PRESENT" "" "functions" + ;; + + iommu_is_dma_domain) + # + # Determine if iommu_is_dma_domain() function is present + # this also assumes that iommu_get_domain_for_dev() function is + # present. + # + # Added by commit bf3aed4660c6 ("iommu: Introduce explicit type + # for non-strict DMA domains") in v5.15 + # + CODE=" + #include + void conftest_iommu_is_dma_domain(void) { + iommu_is_dma_domain(); + }" + + compile_check_conftest "$CODE" "NV_IOMMU_IS_DMA_DOMAIN_PRESENT" "" "functions" + ;; + + drm_crtc_state_has_no_vblank) + # + # Determine if the 'drm_crtc_state' structure has 'no_vblank'. + # + # Added by commit b25c60af7a87 ("drm/crtc: Add a generic + # infrastructure to fake VBLANK events") in v4.19. + # + CODE=" + #include + void conftest_drm_crtc_state_has_no_vblank(void) { + struct drm_crtc_state foo; + (void)foo.no_vblank; + }" + + compile_check_conftest "$CODE" "NV_DRM_CRTC_STATE_HAS_NO_VBLANK" "" "types" + ;; + + drm_mode_config_has_allow_fb_modifiers) + # + # Determine if the 'drm_mode_config' structure has + # an 'allow_fb_modifiers' field. + # + # an 'allow_fb_modifiers' field in the 'drm_mode_config' structure, + # is added by commit e3eb3250d84e ("drm: add support for + # tiled/compressed/etc modifier in addfb2") in v4.1, and removed by + # commit 3d082157a242 ("drm: remove allow_fb_modifiers") in v5.18. + # + # The 'struct drm_mode_config' definition, is moved to + # drm_mode_config.h file by commit 28575f165d36 ("drm: Extract + # drm_mode_config.[hc]") in v4.10. + # + CODE="$CONFTEST_PREAMBLE + #include + int conftest_drm_mode_config_has_allow_fb_modifiers(void) { + return offsetof(struct drm_mode_config, allow_fb_modifiers); + }" + + compile_check_conftest "$CODE" "NV_DRM_MODE_CONFIG_HAS_ALLOW_FB_MODIFIERS" "" "types" + ;; + + drm_has_hdr_output_metadata) + # + # Determine if drm_mode.h has 'hdr_output_metadata' structure. + # + # Added by commit fbb5d0353c62 ("drm: Add HDR source metadata + # property") in v5.3. + # + CODE=" + #include + void conftest_drm_has_hdr_output_metadata(void) { + struct hdr_output_metadata foo; + (void)foo; + }" + + compile_check_conftest "$CODE" "NV_DRM_HAS_HDR_OUTPUT_METADATA" "" "types" + ;; + + uts_release) + # + # print the kernel's UTS_RELEASE string. + # + echo "#include + UTS_RELEASE" > conftest$$.c + + $CC $CFLAGS -E -P conftest$$.c + rm -f conftest$$.c + ;; + + pcie_reset_flr) + # + # Determine if the pcie_reset_flr() function is present + # + # Added by commit 56f107d ("PCI: Add pcie_reset_flr() with + # 'probe' argument") in v5.15. + CODE=" + #include + int conftest_pcie_reset_flr(void) { + return pcie_reset_flr(); + }" + compile_check_conftest "$CODE" "NV_PCIE_RESET_FLR_PRESENT" "" "functions" + ;; + + devm_clk_bulk_get_all) + # + # Determine if devm_clk_bulk_get_all() function is present + # + # Added by commit f08c2e2865f6 ("clk: add managed version of + # clk_bulk_get_all") in v4.20. + # + CODE=" + #include + void conftest_devm_clk_bulk_get_all(void) + { + devm_clk_bulk_get_all(); + } + " + compile_check_conftest "$CODE" "NV_DEVM_CLK_BULK_GET_ALL_PRESENT" "" "functions" + ;; + + thermal_zone_for_each_trip) + # + # Determine if thermal_zone_for_each_trip() function is present + # + # Added by commit a56cc0a83385 ("thermal: core: Add function to + # walk trips under zone lock") in v6.6-rc3 + # + CODE=" + #include + void conftest_thermal_zone_for_each_trip(void) + { + thermal_zone_for_each_trip(); + } + " + compile_check_conftest "$CODE" "NV_THERMAL_ZONE_FOR_EACH_TRIP_PRESENT" "" "functions" + ;; + + thermal_bind_cdev_to_trip) + # + # Determine if thermal_bind_cdev_to_trip() function is present + # + # Added by commit d069ed6b752f ("thermal: core: Allow trip + # pointers to be used for cooling device binding") in v6.6-rc3 + # + CODE=" + #include + void conftest_thermal_bind_cdev_to_trip(void) + { + thermal_bind_cdev_to_trip(); + } + " + compile_check_conftest "$CODE" "NV_THERMAL_BIND_CDEV_TO_TRIP_PRESENT" "" "functions" + ;; + + thermal_unbind_cdev_from_trip) + # + # Determine if thermal_unbind_cdev_from_trip() function is present + # + # Added by commit d069ed6b752f ("thermal: core: Allow trip + # pointers to be used for cooling device binding") in v6.6-rc3 + # + CODE=" + #include + void conftest_thermal_unbind_cdev_from_trip(void) + { + thermal_unbind_cdev_from_trip(); + } + " + compile_check_conftest "$CODE" "NV_THERMAL_UNBIND_CDEV_FROM_TRIP_PRESENT" "" "functions" + ;; + + update_devfreq) + # + # Determine if update_devfreq() function is present + # + # Added by commit b596d895fa29 ("PM / devfreq: Make update_devfreq() + # public") in v4.20 + # + CODE=" + #include + void conftest_update_devfreq(void) + { + update_devfreq(); + } + " + compile_check_conftest "$CODE" "NV_UPDATE_DEVFREQ_PRESENT" "" "functions" + ;; + + devfreq_dev_profile_has_is_cooling_device) + # + # Determine if the 'devfreq_dev_profile' structure has 'is_cooling_device' + # + # Added by commit 1224451bb6f93 ("PM / devfreq: Register devfreq as a cooling device + # on demand") in v5.12-rc1 + # + CODE=" + #include + int conftest_devfreq_dev_profile_has_is_cooling_device(void) { + return offsetof(struct devfreq_dev_profile, is_cooling_device); + } + " + compile_check_conftest "$CODE" "NV_DEVFREQ_DEV_PROFILE_HAS_IS_COOLING_DEVICE" "" "types" + ;; + + devfreq_has_freq_table) + # + # Determine if the 'devfreq' structure has 'freq_table' + # + # Commit b5d281f6c16d ("PM / devfreq: Rework freq_table + # to be local to devfreq struct") updated the devfreq + # and add the freq_table field in v5.19. + # + CODE=" + #include + int conftest_devfreq_has_freq_table(void) { + return offsetof(struct devfreq, freq_table); + } + " + compile_check_conftest "$CODE" "NV_DEVFREQ_HAS_FREQ_TABLE" "" "types" + ;; + + devfreq_has_suspend_freq) + # + # Determine if the 'devfreq' structure has 'suspend_freq' + # + # Commit 83f8ca45afbf ("PM / devfreq: add support for + # suspend/resume of a devfreq device") updated the devfreq + # and add the suspend_freq field in v5.0. + # + CODE=" + #include + int conftest_devfreq_has_suspend_freq(void) { + return offsetof(struct devfreq, suspend_freq); + } + " + compile_check_conftest "$CODE" "NV_DEVFREQ_HAS_SUSPEND_FREQ" "" "types" + ;; + + bpmp_mrq_has_strap_set) + # + # Determine if STRAP_SET is present in the bpmp MRQ ABI. + # + # STRAP_SET was added by commit 4bef358c9071 ("soc/tegra: + #bpmp: Update ABI header") in v5.0. + # + CODE=" + #include + #include + int bpmp_mrq_has_strap = STRAP_SET; + " + compile_check_conftest "$CODE" "NV_BPMP_MRQ_HAS_STRAP_SET" "" "types" + ;; + + dma_resv_add_fence) + # + # Determine if the dma_resv_add_fence() function is present. + # + # dma_resv_add_excl_fence() and dma_resv_add_shared_fence() were + # removed and replaced with dma_resv_add_fence() by commit + # 73511edf8b19 ("dma-buf: specify usage while adding fences to + # dma_resv obj v7") in v5.19. + # + CODE=" + #if defined(NV_LINUX_DMA_RESV_H_PRESENT) + #include + #endif + void conftest_dma_resv_add_fence(void) { + dma_resv_add_fence(); + }" + + compile_check_conftest "$CODE" "NV_DMA_RESV_ADD_FENCE_PRESENT" "" "functions" + ;; + + dma_resv_reserve_fences) + # + # Determine if the dma_resv_reserve_fences() function is present. + # + # dma_resv_reserve_shared() was removed and replaced with + # dma_resv_reserve_fences() by commit c8d4c18bfbc4 + # ("dma-buf/drivers: make reserving a shared slot mandatory v4") in + # v5.19. + # + CODE=" + #if defined(NV_LINUX_DMA_RESV_H_PRESENT) + #include + #endif + void conftest_dma_resv_reserve_fences(void) { + dma_resv_reserve_fences(); + }" + + compile_check_conftest "$CODE" "NV_DMA_RESV_RESERVE_FENCES_PRESENT" "" "functions" + ;; + + reservation_object_reserve_shared_has_num_fences_arg) + # + # Determine if reservation_object_reserve_shared() has 'num_fences' + # argument. + # + # reservation_object_reserve_shared() function prototype was updated + # to take 'num_fences' argument by commit ca05359f1e64 ("dma-buf: + # allow reserving more than one shared fence slot") in v5.0. + # + CODE=" + #include + void conftest_reservation_object_reserve_shared_has_num_fences_arg( + struct reservation_object *obj, + unsigned int num_fences) { + (void) reservation_object_reserve_shared(obj, num_fences); + }" + + compile_check_conftest "$CODE" "NV_RESERVATION_OBJECT_RESERVE_SHARED_HAS_NUM_FENCES_ARG" "" "types" + ;; + + get_task_ioprio) + # + # Determine if the __get_task_ioprio() function is present. + # + # Added by commit 893e5d32d583 ("block: Generalize + # get_current_ioprio() for any task") in v6.0. + # + CODE=" + #include + void conftest_get_task_ioprio(void) { + __get_task_ioprio(); + }" + + compile_check_conftest "$CODE" "NV_GET_TASK_IOPRIO_PRESENT" "" "functions" + ;; + + num_registered_fb) + # + # Determine if 'num_registered_fb' variable is present. + # + # Removed by commit 5727dcfd8486 ("fbdev: Make registered_fb[] + # private to fbmem.c") in v6.1. + # + CODE=" + #include + int conftest_num_registered_fb(void) { + return num_registered_fb; + }" + + compile_check_conftest "$CODE" "NV_NUM_REGISTERED_FB_PRESENT" "" "types" + ;; + + acpi_video_register_backlight) + # + # Determine if acpi_video_register_backlight() function is present + # + # acpi_video_register_backlight was added by commit 3dbc80a3e4c55c + # (ACPI: video: Make backlight class device registration a separate + # step (v2)) for v6.0 (2022-09-02). + # Note: the include directive for in this conftest is + # necessary in order to support kernels between commit 0b9f7d93ca61 + # ("ACPI / i915: ignore firmware requests backlight change") for + # v3.16 (2014-07-07) and commit 3bd6bce369f5 ("ACPI / video: Port + # to new backlight interface selection API") for v4.2 (2015-07-16). + # Kernels within this range use the 'bool' type and the related + # 'false' value in without first including the + # definitions of that type and value. + # + CODE=" + #include + #include + void conftest_acpi_video_register_backlight(void) { + acpi_video_register_backlight(0); + }" + + compile_check_conftest "$CODE" "NV_ACPI_VIDEO_REGISTER_BACKLIGHT" "" "functions" + ;; + + acpi_video_backlight_use_native) + # + # Determine if acpi_video_backlight_use_native() function is present + # + # acpi_video_backlight_use_native was added by commit 2600bfa3df99 + # (ACPI: video: Add acpi_video_backlight_use_native() helper) for + # v6.0 (2022-08-17). Note: the include directive for + # in this conftest is necessary in order to support kernels between + # commit 0b9f7d93ca61 ("ACPI / i915: ignore firmware requests for + # backlight change") for v3.16 (2014-07-07) and commit 3bd6bce369f5 + # ("ACPI / video: Port to new backlight interface selection API") + # for v4.2 (2015-07-16). Kernels within this range use the 'bool' + # type and the related 'false' value in without first + # including the definitions of that type and value. + # + CODE=" + #include + #include + void conftest_acpi_video_backglight_use_native(void) { + acpi_video_backlight_use_native(0); + }" + + compile_check_conftest "$CODE" "NV_ACPI_VIDEO_BACKLIGHT_USE_NATIVE" "" "functions" + ;; + + handle_mm_fault_has_pt_regs_arg) + # + # Determine if handle_mm_fault() has pt_regs argument. + # + # pt_regs argument was added to handle_mm_fault by commit + # bce617edecada007aee8610fbe2c14d10b8de2f6 (08/12/2020) ("mm: do + # page fault accounting in handle_mm_fault") in v5.9. + # + # To test if handle_mm_fault() has pt_regs argument, define a + # function with the expected signature and then define the + # corresponding function implementation with the expected signature. + # Successful compilation indicates that handle_mm_fault has the + # pt_regs argument. + # + CODE=" + #include + #include + + typeof(handle_mm_fault) conftest_handle_mm_fault_has_pt_regs_arg; + vm_fault_t conftest_handle_mm_fault_has_pt_regs_arg(struct vm_area_struct *vma, + unsigned long address, + unsigned int flags, + struct pt_regs *regs) { + return 0; + }" + + compile_check_conftest "$CODE" "NV_HANDLE_MM_FAULT_HAS_PT_REGS_ARG" "" "types" + ;; + + pci_rebar_get_possible_sizes) + # + # Determine if the pci_rebar_get_possible_sizes() function is present. + # + # Added by commit 8fbdbb66f8c10 ("PCI: Add resizable BAR infrastructure + # ") in v5.12 + # + CODE=" + #include + void conftest_pci_rebar_get_possible_sizes(void) { + pci_rebar_get_possible_sizes(); + }" + + compile_check_conftest "$CODE" "NV_PCI_REBAR_GET_POSSIBLE_SIZES_PRESENT" "" "functions" + ;; + + drm_connector_has_override_edid) + # + # Determine if 'struct drm_connector' has an 'override_edid' member. + # + # Removed by commit 90b575f52c6a ("drm/edid: detach debugfs EDID + # override from EDID property update") in v6.2. + # + CODE=" + #include + #include + int conftest_drm_connector_has_override_edid(void) { + return offsetof(struct drm_connector, override_edid); + }" + + compile_check_conftest "$CODE" "NV_DRM_CONNECTOR_HAS_OVERRIDE_EDID" "" "types" + ;; + + iommu_sva_bind_device_has_drvdata_arg) + # + # Check if iommu_sva_bind_device() has drvdata parameter. + # + # drvdata argument was removed by commit + # 942fd5435dccb273f90176b046ae6bbba60cfbd8 ("iommu: Remove + # SVM_FLAG_SUPERVISOR_MODE support") in v6.2 (2022-10-31) + # + CODE=" + #include + #include + #include + void conftest_iommu_sva_bind_device_has_drvdata_arg(struct device *dev, + struct mm_struct *mm, + void *drvdata) { + (void) iommu_sva_bind_device(dev, mm, drvdata); + }" + + compile_check_conftest "$CODE" "NV_IOMMU_SVA_BIND_DEVICE_HAS_DRVDATA_ARG" "" "types" + ;; + + vm_area_struct_has_const_vm_flags) + # + # Determine if the 'vm_area_struct' structure has + # const 'vm_flags'. + # + # A union of '__vm_flags' and 'const vm_flags' was added by + # commit bc292ab00f6c ("mm: introduce vma->vm_flags wrapper + # functions") in v6.3. + # + CODE=" + #include + int conftest_vm_area_struct_has_const_vm_flags(void) { + return offsetof(struct vm_area_struct, __vm_flags); + }" + + compile_check_conftest "$CODE" "NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS" "" "types" + ;; + + drm_driver_has_dumb_destroy) + # + # Determine if the 'drm_driver' structure has a 'dumb_destroy' + # function pointer. + # + # Removed by commit 96a7b60f6ddb ("drm: remove dumb_destroy + # callback") in v6.4. + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #include + + int conftest_drm_driver_has_dumb_destroy(void) { + return offsetof(struct drm_driver, dumb_destroy); + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_HAS_DUMB_DESTROY" "" "types" + ;; + + memory_failure_queue_has_trapno_arg) + # + # Check if memory_failure_queue() has trapno parameter. + # + # Removed by commit 83b57531c58f ("mm/memory_failure: Remove + # unused trapno from memory_failure") in v4.16. + # + CODE=" + #include + void conftest_memory_failure_queue_has_trapno_arg(unsigned long pfn, + int trapno, + int flags) { + memory_failure_queue(pfn, trapno, flags); + }" + + compile_check_conftest "$CODE" "NV_MEMORY_FAILURE_QUEUE_HAS_TRAPNO_ARG" "" "types" + ;; + + memory_failure_mf_sw_simulated_defined) + # + # Check if memory_failure() flag MF_SW_SIMULATED is defined. + # + # Added by commit 67f22ba7750f ("mm/memory-failure: disable + # unpoison once hw error happens") in v5.19. + # + CODE=" + #include + int conftest_memory_failure_mf_sw_simulated_defined(void) { + return MF_SW_SIMULATED; + }" + + compile_check_conftest "$CODE" "NV_MEMORY_FAILURE_MF_SW_SIMULATED_DEFINED" "" "types" + ;; + + fence_ops_use_64bit_seqno) + # + # Determine if dma_fence_ops has the use_64bit_seqno member + # + # 64-bit fence seqno support was actually added by commit + # b312d8ca3a7c ("dma-buf: make fence sequence numbers 64 bit v2") + # in v5.1, but the field to explicitly declare support for it + # didn't get added until commit 5e498abf1485 ("dma-buf: + # explicitely note that dma-fence-chains use 64bit seqno") in + # v5.2. Since it is currently trivial to work around the lack of + # native 64-bit seqno in our driver, we'll use the work-around path + # for kernels prior to v5.2 to avoid further ifdefing of the code. + # + CODE=" + #include + int conftest_fence_ops(void) + { + return offsetof(struct dma_fence_ops, use_64bit_seqno); + }" + + compile_check_conftest "$CODE" "NV_DMA_FENCE_OPS_HAS_USE_64BIT_SEQNO" "" "types" + ;; + + drm_fbdev_generic_setup) + # + # Determine whether drm_fbdev_generic_setup is present. + # + # Added by commit 9060d7f49376 ("drm/fb-helper: Finish the + # generic fbdev emulation") in v4.19. Removed by commit + # aae4682e5d66 ("drm/fbdev-generic: Convert to fbdev-ttm") + # in v6.11. + # + CODE=" + #include + #if defined(NV_DRM_DRM_FBDEV_GENERIC_H_PRESENT) + #include + #endif + void conftest_drm_fbdev_generic_setup(void) { + drm_fbdev_generic_setup(); + }" + + compile_check_conftest "$CODE" "NV_DRM_FBDEV_GENERIC_SETUP_PRESENT" "" "functions" + ;; + + drm_fbdev_ttm_setup) + # + # Determine whether drm_fbdev_ttm_setup is present. + # + # Added by commit aae4682e5d66 ("drm/fbdev-generic: + # Convert to fbdev-ttm") in v6.11. Removed by commit + # 1000634477d8 ("drm/fbdev-ttm:Convert to client-setup") in v6.13. + # + CODE=" + #include + #if defined(NV_DRM_DRM_FBDEV_TTM_H_PRESENT) + #include + #endif + void conftest_drm_fbdev_ttm_setup(void) { + drm_fbdev_ttm_setup(); + }" + + compile_check_conftest "$CODE" "NV_DRM_FBDEV_TTM_SETUP_PRESENT" "" "functions" + ;; + + drm_client_setup) + # + # Determine whether drm_client_setup is present. + # + # Added by commit d07fdf922592 ("drm/fbdev-ttm: Convert to + # client-setup") in v6.13 in drm/drm_client_setup.h, but then moved + # to drm/clients/drm_client_setup.h by commit b86711c6d6e2 + # ("drm/client: Move public client header to clients/ subdirectory") + # in linux-next b86711c6d6e2. + # + CODE=" + #include + #if defined(NV_DRM_DRM_CLIENT_SETUP_H_PRESENT) + #include + #elif defined(NV_DRM_CLIENTS_DRM_CLIENT_SETUP_H_PRESENT) + #include + #endif + void conftest_drm_client_setup(void) { + drm_client_setup(); + }" + + compile_check_conftest "$CODE" "NV_DRM_CLIENT_SETUP_PRESENT" "" "functions" + ;; + + drm_output_poll_changed) + # + # Determine whether drm_mode_config_funcs.output_poll_changed + # callback is present + # + # Removed by commit 446d0f4849b1 ("drm: Remove struct + # drm_mode_config_funcs.output_poll_changed") in v6.12. Hotplug + # event support is handled through the fbdev emulation interface + # going forward. + # + CODE=" + #include + int conftest_drm_output_poll_changed_available(void) { + return offsetof(struct drm_mode_config_funcs, output_poll_changed); + }" + + compile_check_conftest "$CODE" "NV_DRM_OUTPUT_POLL_CHANGED_PRESENT" "" "types" + ;; + + aperture_remove_conflicting_devices) + # + # Determine whether aperture_remove_conflicting_devices is present. + # + # Added by commit 7283f862bd991 ("drm: Implement DRM aperture + # helpers under video/") in v6.0 + CODE=" + #if defined(NV_LINUX_APERTURE_H_PRESENT) + #include + #endif + void conftest_aperture_remove_conflicting_devices(void) { + aperture_remove_conflicting_devices(); + }" + compile_check_conftest "$CODE" "NV_APERTURE_REMOVE_CONFLICTING_DEVICES_PRESENT" "" "functions" + ;; + + aperture_remove_conflicting_pci_devices) + # + # Determine whether aperture_remove_conflicting_pci_devices is present. + # + # Added by commit 7283f862bd991 ("drm: Implement DRM aperture + # helpers under video/") in v6.0 + CODE=" + #if defined(NV_LINUX_APERTURE_H_PRESENT) + #include + #endif + void conftest_aperture_remove_conflicting_pci_devices(void) { + aperture_remove_conflicting_pci_devices(); + }" + compile_check_conftest "$CODE" "NV_APERTURE_REMOVE_CONFLICTING_PCI_DEVICES_PRESENT" "" "functions" + ;; + + drm_aperture_remove_conflicting_pci_framebuffers) + # + # Determine whether drm_aperture_remove_conflicting_pci_framebuffers is present. + # + # Added by commit 2916059147ea ("drm/aperture: Add infrastructure + # for aperture ownership") in v5.14. + # + CODE=" + #if defined(NV_DRM_DRM_APERTURE_H_PRESENT) + #include + #endif + void conftest_drm_aperture_remove_conflicting_pci_framebuffers(void) { + drm_aperture_remove_conflicting_pci_framebuffers(); + }" + + compile_check_conftest "$CODE" "NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT" "" "functions" + ;; + + drm_aperture_remove_conflicting_pci_framebuffers_has_driver_arg) + # + # Determine whether drm_aperture_remove_conflicting_pci_framebuffers + # takes a struct drm_driver * as its second argument. + # + # Prior to commit 97c9bfe3f6605d41eb8f1206e6e0f62b31ba15d6, the + # second argument was a char * pointer to the driver's name. + # + # To test if drm_aperture_remove_conflicting_pci_framebuffers() has + # a req_driver argument, define a function with the expected + # signature and then define the corresponding function + # implementation with the expected signature. Successful compilation + # indicates that this function has the expected signature. + # + # This change occurred in commit 97c9bfe3f660 ("drm/aperture: Pass + # DRM driver structure instead of driver name") in v5.15 + # (2021-06-29). + # + CODE=" + #include + #if defined(NV_DRM_DRM_APERTURE_H_PRESENT) + #include + #endif + typeof(drm_aperture_remove_conflicting_pci_framebuffers) conftest_drm_aperture_remove_conflicting_pci_framebuffers; + int conftest_drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, + const struct drm_driver *req_driver) + { + return 0; + }" + + compile_check_conftest "$CODE" "NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_HAS_DRIVER_ARG" "" "types" + ;; + + find_next_bit_wrap) + # Determine if 'find_next_bit_wrap' is defined. + # + # The function was added by commit 6cc18331a987 ("lib/find_bit: + # add find_next{,_and}_bit_wrap") in v6.1-rc1 (2022-09-19). + # + # Ideally, we would want to be able to include linux/find.h. + # However, linux/find.h does not allow direct inclusion. Rather + # it has to be included through linux/bitmap.h. + # + CODE=" + #include + void conftest_find_next_bit_wrap(void) { + (void)find_next_bit_wrap(); + }" + + compile_check_conftest "$CODE" "NV_FIND_NEXT_BIT_WRAP_PRESENT" "" "functions" + ;; + + crypto_tfm_ctx_aligned) + # Determine if 'crypto_tfm_ctx_aligned' is defined. + # + # Removed by commit 25c74a39e0f6 ("crypto: hmac - remove unnecessary + # alignment logic") in v6.7. + # + CODE=" + #include + void conftest_crypto_tfm_ctx_aligned(void) { + (void)crypto_tfm_ctx_aligned(); + }" + + compile_check_conftest "$CODE" "NV_CRYPTO_TFM_CTX_ALIGNED_PRESENT" "" "functions" + ;; + + crypto) + # + # Determine if we support various crypto functions. + # This test is not complete and may return false positive. + # + CODE=" + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + void conftest_crypto(void) { + struct shash_desc sd; + struct crypto_shash cs; + (void)crypto_shash_tfm_digest; + }" + + compile_check_conftest "$CODE" "NV_CRYPTO_PRESENT" "" "symbols" + ;; + + crypto_akcipher_verify) + # + # Determine whether the crypto_akcipher_verify API is still present. + # It was removed by commit 6b34562 ('crypto: akcipher - Drop sign/verify operations') + # in v6.13-rc1 (2024-10-04). + # + # This test is dependent on the crypto conftest to determine whether crypto should be + # enabled at all. That means that if the kernel is old enough such that crypto_akcipher_verify + # + # The test merely checks for the presence of the API, as it assumes that if the API + # is no longer present, the new API to replace it (crypto_sig_verify) must be present. + # If the kernel version is too old to have crypto_akcipher_verify, it will fail the crypto + # conftest above and all crypto code will be compiled out. + # + CODE=" + #include + #include + void conftest_crypto_akcipher_verify(void) { + (void)crypto_akcipher_verify; + }" + + compile_check_conftest "$CODE" "NV_CRYPTO_AKCIPHER_VERIFY_PRESENT" "" "symbols" + ;; + + ecc_digits_from_bytes) + # + # Determine whether ecc_digits_from_bytes is present. + # It was added in commit c6ab5c915da4 ('crypto: ecc - Prevent ecc_digits_from_bytes from + # reading too many bytes') in v6.10. + # + # This functionality is needed when crypto_akcipher_verify is not present. + # + CODE=" + #include + #include + void conftest_ecc_digits_from_bytes(void) { + (void)ecc_digits_from_bytes; + }" + + compile_check_conftest "$CODE" "NV_ECC_DIGITS_FROM_BYTES_PRESENT" "" "symbols" + ;; + + mempolicy_has_unified_nodes) + # + # Determine if the 'mempolicy' structure has + # nodes union. + # + # nodes field was added by commit 269fbe72cd ("mm/mempolicy: + # use unified 'nodes' for bind/interleave/prefer policies") in + # v5.14 (2021-06-30). + # + CODE=" + #include + int conftest_mempolicy_has_unified_nodes(void) { + return offsetof(struct mempolicy, nodes); + }" + + compile_check_conftest "$CODE" "NV_MEMPOLICY_HAS_UNIFIED_NODES" "" "types" + ;; + + mempolicy_has_home_node) + # + # Determine if the 'mempolicy' structure has + # home_node field. + # + # home_node field was added by commit c6018b4b254 + # ("mm/mempolicy: add set_mempolicy_home_node syscall") in v5.17 + # (2022-01-14). + # + CODE=" + #include + int conftest_mempolicy_has_home_node(void) { + return offsetof(struct mempolicy, home_node); + }" + + compile_check_conftest "$CODE" "NV_MEMPOLICY_HAS_HOME_NODE" "" "types" + ;; + + mpol_preferred_many_present) + # + # Determine if MPOL_PREFERRED_MANY enum is present or not + # + # Added by commit b27abaccf8e8b ("mm/mempolicy: add + # MPOL_PREFERRED_MANY for multiple preferred nodes") in + # v5.15 + # + CODE=" + #include + int mpol_preferred_many = MPOL_PREFERRED_MANY; + " + + compile_check_conftest "$CODE" "NV_MPOL_PREFERRED_MANY_PRESENT" "" "types" + ;; + + drm_connector_attach_hdr_output_metadata_property) + # + # Determine if the function + # drm_connector_attach_hdr_output_metadata_property() is present. + # + # Added by commit e057b52c1d90 ("drm/connector: Create a helper to + # attach the hdr_output_metadata property") in v5.14. + # + CODE=" + #include + #include + + void conftest_drm_connector_attach_hdr_output_metadata_property(void) { + drm_connector_attach_hdr_output_metadata_property(); + }" + + compile_check_conftest "$CODE" "NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT" "" "functions" + ;; + + mmu_interval_notifier) + # + # Determine if mmu_interval_notifier struct is present or not + # + # Added by commit 99cb252f5 ("mm/mmu_notifier: add an interval tree + # notifier") in v5.10 (2019-11-12). + # + CODE=" + #include + struct mmu_interval_notifier interval_notifier; + " + + compile_check_conftest "$CODE" "NV_MMU_INTERVAL_NOTIFIER" "" "types" + ;; + + drm_mode_create_dp_colorspace_property_has_supported_colorspaces_arg) + # Determine if drm_mode_create_dp_colorspace_property() takes the + # 'supported_colorspaces' argument. + # + # The 'u32 supported_colorspaces' argument was added to + # drm_mode_create_dp_colorspace_property() by commit + # c265f340eaa8 ("drm/connector: Allow drivers to pass list of + # supported colorspaces") in v6.5. + # + # To test if drm_mode_create_dp_colorspace_property() has the + # 'supported_colorspaces' argument, declare a function prototype + # with typeof drm_mode_create_dp_colorspace_property and then + # define the corresponding function implementation with the + # expected signature. Successful compilation indicates that + # drm_mode_create_dp_colorspace_property() has the + # 'supported_colorspaces' argument. + # + CODE=" + #include + #include + + typeof(drm_mode_create_dp_colorspace_property) conftest_drm_mode_create_dp_colorspace_property_has_supported_colorspaces_arg; + int conftest_drm_mode_create_dp_colorspace_property_has_supported_colorspaces_arg(struct drm_connector *connector, + u32 supported_colorspaces) + { + return 0; + }" + + compile_check_conftest "$CODE" "NV_DRM_MODE_CREATE_DP_COLORSPACE_PROPERTY_HAS_SUPPORTED_COLORSPACES_ARG" "" "types" + ;; + + drm_syncobj_features_present) + # Determine if DRIVER_SYNCOBJ and DRIVER_SYNCOBJ_TIMELINE DRM + # driver features are present. Timeline DRM synchronization objects + # may only be used if both of these are supported by the driver. + # + # DRIVER_SYNCOBJ_TIMELINE Added by commit 060cebb20cdb ("drm: + # introduce a capability flag for syncobj timeline support") in + # v5.2 + # + # DRIVER_SYNCOBJ Added by commit e9083420bbac ("drm: introduce + # sync objects (v4)") in v4.12 + CODE=" + #include + int features = DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE;" + + compile_check_conftest "$CODE" "NV_DRM_SYNCOBJ_FEATURES_PRESENT" "" "types" + ;; + + stack_trace) + # Determine if functions stack_trace_{save,print} are present. + # Added by commit e9b98e162 ("stacktrace: Provide helpers for + # common stack trace operations") in v5.2. + CODE=" + #include + void conftest_stack_trace(void) { + stack_trace_save(); + stack_trace_print(); + }" + + compile_check_conftest "$CODE" "NV_STACK_TRACE_PRESENT" "" "functions" + ;; + + drm_unlocked_ioctl_flag_present) + # Determine if DRM_UNLOCKED IOCTL flag is present. + # + # DRM_UNLOCKED was removed by commit 2798ffcc1d6a ("drm: Remove + # locking for legacy ioctls and DRM_UNLOCKED") in v6.8. + # + # DRM_UNLOCKED definition was moved from drmP.h to drm_ioctl.h by + # commit 2640981f3600 ("drm: document drm_ioctl.[hc]") in v4.12. + CODE=" + #include + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + int flags = DRM_UNLOCKED;" + + compile_check_conftest "$CODE" "NV_DRM_UNLOCKED_IOCTL_FLAG_PRESENT" "" "types" + ;; + + drm_color_ctm_3x4_present) + # Determine if struct drm_color_ctm_3x4 is present. + # + # struct drm_color_ctm_3x4 was added by commit 6872a189be50 + # ("drm/amd/display: Add 3x4 CTM support for plane CTM") in v6.8. + CODE=" + #include + struct drm_color_ctm_3x4 ctm;" + + compile_check_conftest "$CODE" "NV_DRM_COLOR_CTM_3X4_PRESENT" "" "types" + ;; + + drm_driver_has_gem_prime_mmap) + # + # Determine if the 'drm_driver' structure has a 'gem_prime_mmap' + # function pointer. + # + # Removed by commit 0adec22702d4 ("drm: Remove struct + # drm_driver.gem_prime_mmap") in v6.6. + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #include + + int conftest_drm_driver_has_gem_prime_mmap(void) { + return offsetof(struct drm_driver, gem_prime_mmap); + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_HAS_GEM_PRIME_MMAP" "" "types" + ;; + + drm_gem_prime_mmap) + # + # Determine if the function drm_gem_prime_mmap() is present. + # + # Added by commit 7698799f95 ("drm/prime: Add drm_gem_prime_mmap() + # in v5.0 + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + #include + void conftest_drm_gem_prime_mmap(void) { + drm_gem_prime_mmap(); + }" + + compile_check_conftest "$CODE" "NV_DRM_GEM_PRIME_MMAP_PRESENT" "" "functions" + ;; + + vmf_insert_mixed) + # + # Determine if the function vmf_insert_mixed() is present. + # + # Added by commit 1c8f422059ae ("mm: change return type to + # vm_fault_t") in v4.17. + # + CODE=" + #include + void conftest_vmf_insert_mixed() { + vmf_insert_mixed(); + }" + + compile_check_conftest "$CODE" "NV_VMF_INSERT_MIXED_PRESENT" "" "functions" + ;; + + sg_dma_page_iter) + # + # Determine if the struct sg_dma_page_iter is present. + # This also serves to know if the argument type of the macro + # sg_page_iter_dma_address() changed: + # - before: struct sg_page_iter *piter + # - after: struct sg_dma_page_iter *dma_iter + # + # Added by commit d901b2760dc6c ("lib/scatterlist: Provide a DMA + # page iterator") v5.0. + # + CODE=" + #include + struct sg_dma_page_iter conftest_dma_page_iter;" + + compile_check_conftest "$CODE" "NV_SG_DMA_PAGE_ITER_PRESENT" "" "types" + ;; + + # FIXME: See if we can remove this test + for_each_sgtable_dma_page) + # + # Determine if macro for_each_sgtable_dma_page is present. + # + # Added by commit 709d6d73c756 ("scatterlist: add generic wrappers + # for iterating over sgtable objects") v5.7. + # + CODE=" + #include + void conftest_for_each_sgtable_dma_page(void) { + for_each_sgtable_dma_page(); + }" + + compile_check_conftest "$CODE" "NV_FOR_EACH_SGTABLE_DMA_PAGE_PRESENT" "" "functions" + ;; + + drm_aperture_remove_conflicting_framebuffers) + # + # Determine whether drm_aperture_remove_conflicting_framebuffers is present. + # + # drm_aperture_remove_conflicting_framebuffers was added in commit 2916059147ea + # ("drm/aperture: Add infrastructure for aperture ownership) in + # v5.14-rc1 (2021-04-12) + # + CODE=" + #if defined(NV_DRM_DRM_APERTURE_H_PRESENT) + #include + #endif + void conftest_drm_aperture_remove_conflicting_framebuffers(void) { + drm_aperture_remove_conflicting_framebuffers(); + }" + + compile_check_conftest "$CODE" "NV_DRM_APERTURE_REMOVE_CONFLICTING_FRAMEBUFFERS_PRESENT" "" "functions" + ;; + + drm_aperture_remove_conflicting_framebuffers_has_driver_arg) + # + # Determine whether drm_aperture_remove_conflicting_framebuffers + # takes a struct drm_driver * as its fourth argument. + # + # Prior to commit 97c9bfe3f6605d41eb8f1206e6e0f62b31ba15d6, the + # second argument was a char * pointer to the driver's name. + # + # To test if drm_aperture_remove_conflicting_framebuffers() has + # a req_driver argument, define a function with the expected + # signature and then define the corresponding function + # implementation with the expected signature. Successful compilation + # indicates that this function has the expected signature. + # + # This change occurred in commit 97c9bfe3f660 ("drm/aperture: Pass + # DRM driver structure instead of driver name") in v5.15 + # (2021-06-29). + # + CODE=" + #include + #if defined(NV_DRM_DRM_APERTURE_H_PRESENT) + #include + #endif + typeof(drm_aperture_remove_conflicting_framebuffers) conftest_drm_aperture_remove_conflicting_framebuffers; + int conftest_drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size, + bool primary, const struct drm_driver *req_driver) + { + return 0; + }" + + compile_check_conftest "$CODE" "NV_DRM_APERTURE_REMOVE_CONFLICTING_FRAMEBUFFERS_HAS_DRIVER_ARG" "" "types" + ;; + + drm_aperture_remove_conflicting_framebuffers_has_no_primary_arg) + # + # Determine whether drm_aperture_remove_conflicting_framebuffers + # has its third argument as a bool. + # + # Prior to commit 62aeaeaa1b267c5149abee6b45967a5df3feed58, the + # third argument was a bool for figuring out whether the legacy vga + # stuff should be nuked, but it's only for pci devices and not + # really needed in this function. + # + # To test if drm_aperture_remove_conflicting_framebuffers() has + # a bool primary argument, define a function with the expected + # signature and then define the corresponding function + # implementation with the expected signature. Successful compilation + # indicates that this function has the expected signature. + # + # This change occurred in commit 62aeaeaa1b26 ("drm/aperture: Remove + # primary argument") in v6.5 (2023-04-16). + # + CODE=" + #include + #if defined(NV_DRM_DRM_APERTURE_H_PRESENT) + #include + #endif + typeof(drm_aperture_remove_conflicting_framebuffers) conftest_drm_aperture_remove_conflicting_framebuffers; + int conftest_drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size, + const struct drm_driver *req_driver) + { + return 0; + }" + + compile_check_conftest "$CODE" "NV_DRM_APERTURE_REMOVE_CONFLICTING_FRAMEBUFFERS_HAS_NO_PRIMARY_ARG" "" "types" + ;; + + struct_page_has_zone_device_data) + # + # Determine if struct page has a 'zone_device_data' field. + # + # Added by commit 8a164fef9c4c ("mm: simplify ZONE_DEVICE page + # private data") in v5.3. + # + CODE=" + #include + int conftest_struct_page_has_zone_device_data(void) { + return offsetof(struct page, zone_device_data); + }" + + compile_check_conftest "$CODE" "NV_STRUCT_PAGE_HAS_ZONE_DEVICE_DATA" "" "types" + ;; + + page_pgmap) + # + # Determine if the page_pgmap() function is present. + # + # Added by commit 82ba975e4c43 ("mm: allow compound zone device + # pages") in v6.14 + # + CODE=" + #include + int conftest_page_pgmap(void) { + return page_pgmap(); + }" + + compile_check_conftest "$CODE" "NV_PAGE_PGMAP_PRESENT" "" "functions" + ;; + + folio_test_swapcache) + # + # Determine if the folio_test_swapcache() function is present. + # + # folio_test_swapcache() was exported by commit d389a4a811551 ("mm: + # Add folio flag manipulation functions") in v5.16. + # + CODE=" + #include + void conftest_folio_test_swapcache(void) { + folio_test_swapcache(); + }" + + compile_check_conftest "$CODE" "NV_FOLIO_TEST_SWAPCACHE_PRESENT" "" "functions" + ;; + + platform_driver_struct_remove_returns_void) + # + # Determine if the 'platform_driver' structure 'remove' function + # pointer returns void. + # + # Commit 0edb555a65d1 ("platform: Make platform_driver::remove() + # return void") updated the platform_driver structure 'remove' + # callback to return void instead of int in Linux v6.11-rc1. + # + echo "$CONFTEST_PREAMBLE + #include + int conftest_platform_driver_struct_remove_returns_void(struct platform_device *pdev, + struct platform_driver *driver) { + return driver->remove(pdev); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + + echo "#undef NV_PLATFORM_DRIVER_STRUCT_REMOVE_RETURNS_VOID" | append_conftest "types" + else + echo "#define NV_PLATFORM_DRIVER_STRUCT_REMOVE_RETURNS_VOID" | append_conftest "types" + fi + ;; + + module_import_ns_takes_constant) + # + # Determine if the MODULE_IMPORT_NS macro takes a string literal + # or constant. + # + # Commit cdd30ebb1b9f ("module: Convert symbol namespace to + # string literal") changed MODULE_IMPORT_NS to take a string + # literal in Linux kernel v6.13. + # + CODE=" + #include + + MODULE_IMPORT_NS(DMA_BUF);" + + compile_check_conftest "$CODE" "NV_MODULE_IMPORT_NS_TAKES_CONSTANT" "" "generic" + ;; + + assign_str) + # + # Determine whether the __assign_str() macro, used in tracepoint + # event definitions, has the 'src' parameter. + # + # The 'src' parameter was removed by commit 2c92ca849fcc + # ("tracing/treewide: Remove second parameter of __assign_str()") in + # v6.10. + # + # The expected usage of __assign_str() inside the TRACE_EVENT() + # macro, which involves multiple include passes and assumes it is + # in a header file, requires a non-standard conftest approach of + # producing both a header and a C file. + # + echo "$CONFTEST_PREAMBLE + #undef TRACE_SYSTEM + #define TRACE_SYSTEM conftest + + #if !defined(_TRACE_CONFTEST_H) || defined(TRACE_HEADER_MULTI_READ) + #define _TRACE_CONFTEST_H + #include + TRACE_EVENT(conftest, + TP_PROTO(const char *s), + TP_ARGS(s), + TP_STRUCT__entry(__string(s, s)), + TP_fast_assign(__assign_str(s);), + TP_printk(\"%s\", __get_str(s)) + ); + #endif + + #undef TRACE_INCLUDE_PATH + #define TRACE_INCLUDE_PATH . + #define TRACE_INCLUDE_FILE conftest$$ + #include + " > conftest$$.h + + echo "$CONFTEST_PREAMBLE + #define CREATE_TRACE_POINTS + #include \"conftest$$.h\" + + void conftest_assign_str(void) { + trace_conftest(\"conftest\"); + } + " > conftest$$.c + + $CC $CFLAGS -c conftest$$.c >/dev/null 2>&1 + rm -f conftest$$.c conftest$$.h + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + + echo "#define NV_ASSIGN_STR_ARGUMENT_COUNT 1" | append_conftest "functions" + else + echo "#define NV_ASSIGN_STR_ARGUMENT_COUNT 2" | append_conftest "functions" + fi + ;; + + drm_driver_has_date) + # + # Determine if the 'drm_driver' structure has a 'date' field. + # + # Removed by commit cb2e1c2136f7 ("drm: remove driver date from + # struct drm_driver and all drivers") in linux-next, expected in + # v6.14. + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #include + + int conftest_drm_driver_has_date(void) { + return offsetof(struct drm_driver, date); + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_HAS_DATE" "" "types" + ;; + + drm_connector_helper_funcs_mode_valid_has_const_mode_arg) + # + # Determine if the 'mode' pointer argument is const in + # drm_connector_helper_funcs::mode_valid. + # + # The 'mode' pointer argument in + # drm_connector_helper_funcs::mode_valid was made const by commit + # 26d6fd81916e ("drm/connector: make mode_valid take a const struct + # drm_display_mode") in linux-next, expected in v6.15. + # + CODE=" + #include + + static int conftest_drm_connector_mode_valid(struct drm_connector *connector, + const struct drm_display_mode *mode) { + return 0; + } + + const struct drm_connector_helper_funcs conftest_drm_connector_helper_funcs = { + .mode_valid = conftest_drm_connector_mode_valid, + };" + + compile_check_conftest "$CODE" "NV_DRM_CONNECTOR_HELPER_FUNCS_MODE_VALID_HAS_CONST_MODE_ARG" "" "types" + ;; + + register_shrinker_has_format_arg) + # TODO:desc + # Determine if the 'mode' pointer argument is const in + # drm_connector_helper_funcs::mode_valid. + # + # The 'mode' pointer argument in + # drm_connector_helper_funcs::mode_valid was made const by commit + # 26d6fd81916e ("drm/connector: make mode_valid take a const struct + # drm_display_mode") in linux-next, expected in v6.15. + # + CODE=" + #include + + void conftest_register_shrinker_has_format_arg(void) { + register_shrinker(NULL, \"%d\", 0); + }" + + compile_check_conftest "$CODE" "NV_REGISTER_SHRINKER_HAS_FMT_ARG" "" "types" + ;; + + shrinker_alloc) + # TODO:desc + # Determine if the 'mode' pointer argument is const in + # drm_connector_helper_funcs::mode_valid. + # + # The 'mode' pointer argument in + # drm_connector_helper_funcs::mode_valid was made const by commit + # 26d6fd81916e ("drm/connector: make mode_valid take a const struct + # drm_display_mode") in linux-next, expected in v6.15. + # + CODE=" + #include + + void conftest_shrinker_alloc(void) { + shrinker_alloc(); + }" + + compile_check_conftest "$CODE" "NV_SHRINKER_ALLOC_PRESENT" "" "functions" + ;; + + # When adding a new conftest entry, please use the correct format for + # specifying the relevant upstream Linux kernel commit. Please + # avoid specifying -rc kernels, and only use SHAs that actually exist + # in the upstream Linux kernel git repository. + # + # Added|Removed|etc by commit (". + + *) + # Unknown test name given + echo "Error: unknown conftest '$1' requested" >&2 + exit 1 + ;; + esac +} + +case "$5" in + cc_sanity_check) + # + # Check if the selected compiler can create object files + # in the current environment. + # + VERBOSE=$6 + + echo "int cc_sanity_check(void) { + return 0; + }" > conftest$$.c + + $CC -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ ! -f conftest$$.o ]; then + if [ "$VERBOSE" = "full_output" ]; then + echo ""; + fi + if [ "$CC" != "cc" ]; then + echo "The C compiler '$CC' does not appear to be able to" + echo "create object files. Please make sure you have " + echo "your Linux distribution's libc development package" + echo "installed and that '$CC' is a valid C compiler"; + echo "name." + else + echo "The C compiler '$CC' does not appear to be able to" + echo "create executables. Please make sure you have " + echo "your Linux distribution's gcc and libc development" + echo "packages installed." + fi + if [ "$VERBOSE" = "full_output" ]; then + echo ""; + echo "*** Failed CC sanity check. Bailing out! ***"; + echo ""; + fi + exit 1 + else + rm -f conftest$$.o + exit 0 + fi + ;; + + cc_version_check) + # + # Verify that the same compiler major and minor version is + # used for the kernel and kernel module. A mismatch condition is + # not considered fatal, so this conftest returns a success status + # code, even if it fails. Failure of the test can be distinguished + # by testing for empty (success) versus non-empty (failure) output. + # + # Some gcc version strings that have proven problematic for parsing + # in the past: + # + # gcc.real (GCC) 3.3 (Debian) + # gcc-Version 3.3 (Debian) + # gcc (GCC) 3.1.1 20020606 (Debian prerelease) + # version gcc 3.2.3 + # + # As of this writing, GCC uses a version number as x.y.z and below + # are the typical version strings seen with various distributions. + # gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-23) + # gcc version 4.8.5 20150623 (Red Hat 4.8.5-39) (GCC) + # gcc (GCC) 8.3.1 20190507 (Red Hat 8.3.1-4) + # gcc (GCC) 10.2.1 20200723 (Red Hat 10.2.1-1) + # gcc (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0 + # gcc (Ubuntu 7.5.0-3ubuntu1~16.04) 7.5.0 + # gcc (Debian 8.3.0-6) 8.3.0 + # aarch64-linux-gcc.br_real (Buildroot 2020.08-14-ge5a2a90) 9.3.0, GNU ld (GNU Binutils) 2.33.1 + # + # In order to extract GCC version correctly for version strings + # like the last one above, we first check for x.y.z and if that + # fails, we fallback to x.y format. + VERBOSE=$6 + + kernel_compile_h=$OUTPUT/include/generated/compile.h + + if [ ! -f ${kernel_compile_h} ]; then + # The kernel's compile.h file is not present, so there + # isn't a convenient way to identify the compiler version + # used to build the kernel. + IGNORE_CC_MISMATCH=1 + fi + + if [ -n "$IGNORE_CC_MISMATCH" ]; then + exit 0 + fi + + kernel_cc_string=`cat ${kernel_compile_h} | \ + grep LINUX_COMPILER | cut -f 2 -d '"'` + + kernel_cc_version=`echo ${kernel_cc_string} | grep -o '[0-9]\+\.[0-9]\+\.[0-9]\+' | head -n 1` + if [ -z "${kernel_cc_version}" ]; then + kernel_cc_version=`echo ${kernel_cc_string} | grep -o '[0-9]\+\.[0-9]\+' | head -n 1` + fi + kernel_cc_major=`echo ${kernel_cc_version} | cut -d '.' -f 1` + kernel_cc_minor=`echo ${kernel_cc_version} | cut -d '.' -f 2` + + echo " + #if (__GNUC__ != ${kernel_cc_major}) || (__GNUC_MINOR__ != ${kernel_cc_minor}) + #error \"cc version mismatch\" + #endif + " > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + exit 0; + else + # + # The gcc version check failed + # + + if [ "$VERBOSE" = "full_output" ]; then + echo ""; + echo "Warning: Compiler version check failed:"; + echo ""; + echo "The major and minor number of the compiler used to"; + echo "compile the kernel:"; + echo ""; + echo "${kernel_cc_string}"; + echo ""; + echo "does not match the compiler used here:"; + echo ""; + $CC --version + echo ""; + echo "It is recommended to set the CC environment variable"; + echo "to the compiler that was used to compile the kernel."; + echo "" + echo "To skip the test and silence this warning message, set"; + echo "the IGNORE_CC_MISMATCH environment variable to \"1\"."; + echo "However, mixing compiler versions between the kernel"; + echo "and kernel modules can result in subtle bugs that are"; + echo "difficult to diagnose."; + echo ""; + echo "*** Failed CC version check. ***"; + echo ""; + elif [ "$VERBOSE" = "just_msg" ]; then + echo "Warning: The kernel was built with ${kernel_cc_string}, but the" \ + "current compiler version is `$CC --version | head -n 1`."; + fi + exit 0; + fi + ;; + + xen_sanity_check) + # + # Check if the target kernel is a Xen kernel. If so, exit, since + # the RM doesn't currently support Xen. + # + VERBOSE=$6 + + if [ -n "$IGNORE_XEN_PRESENCE" -o -n "$VGX_BUILD" ]; then + exit 0 + fi + + test_xen + + if [ "$XEN_PRESENT" != "0" ]; then + echo "The kernel you are installing for is a Xen kernel!"; + echo ""; + echo "The NVIDIA driver does not currently support Xen kernels. If "; + echo "you are using a stock distribution kernel, please install "; + echo "a variant of this kernel without Xen support; if this is a "; + echo "custom kernel, please install a standard Linux kernel. Then "; + echo "try installing the NVIDIA kernel module again."; + echo ""; + if [ "$VERBOSE" = "full_output" ]; then + echo "*** Failed Xen sanity check. Bailing out! ***"; + echo ""; + fi + exit 1 + else + exit 0 + fi + ;; + + preempt_rt_sanity_check) + # + # Check if the target kernel has the PREEMPT_RT patch set applied. If + # so, exit, since the RM doesn't support this configuration. + # + VERBOSE=$6 + + if [ -n "$IGNORE_PREEMPT_RT_PRESENCE" ]; then + exit 0 + fi + + if test_configuration_option CONFIG_PREEMPT_RT; then + PREEMPT_RT_PRESENT=1 + elif test_configuration_option CONFIG_PREEMPT_RT_FULL; then + PREEMPT_RT_PRESENT=1 + fi + + if [ "$PREEMPT_RT_PRESENT" != "0" ]; then + echo "The kernel you are installing for is a PREEMPT_RT kernel!"; + echo ""; + echo "The NVIDIA driver does not support real-time kernels. If you "; + echo "are using a stock distribution kernel, please install "; + echo "a variant of this kernel that does not have the PREEMPT_RT "; + echo "patch set applied; if this is a custom kernel, please "; + echo "install a standard Linux kernel. Then try installing the "; + echo "NVIDIA kernel module again."; + echo ""; + if [ "$VERBOSE" = "full_output" ]; then + echo "*** Failed PREEMPT_RT sanity check. Bailing out! ***"; + echo ""; + fi + exit 1 + else + exit 0 + fi + ;; + + patch_check) + # + # Check for any "official" patches that may have been applied and + # construct a description table for reporting purposes. + # + PATCHES="" + + for PATCH in patch-*.h; do + if [ -f $PATCH ]; then + echo "#include \"$PATCH\"" + PATCHES="$PATCHES "`echo $PATCH | sed -s 's/patch-\(.*\)\.h/\1/'` + fi + done + + echo "static struct { + const char *short_description; + const char *description; + } __nv_patches[] = {" + for i in $PATCHES; do + echo "{ \"$i\", NV_PATCH_${i}_DESCRIPTION }," + done + echo "{ NULL, NULL } };" + + exit 0 + ;; + + compile_tests) + # + # Run a series of compile tests to determine the set of interfaces + # and features available in the target kernel. + # + shift 5 + + CFLAGS=$1 + shift + + for i in $*; do compile_test $i; done + + exit 0 + ;; + + dom0_sanity_check) + # + # Determine whether running in DOM0. + # + VERBOSE=$6 + + if [ -n "$VGX_BUILD" ]; then + if [ -f /proc/xen/capabilities ]; then + if [ "`cat /proc/xen/capabilities`" == "control_d" ]; then + exit 0 + fi + else + echo "The kernel is not running in DOM0."; + echo ""; + if [ "$VERBOSE" = "full_output" ]; then + echo "*** Failed DOM0 sanity check. Bailing out! ***"; + echo ""; + fi + fi + exit 1 + fi + ;; + vgpu_kvm_sanity_check) + # + # Determine whether we are running a vGPU on KVM host. + # + VERBOSE=$6 + iommu=CONFIG_VFIO_IOMMU_TYPE1 + iommufd_vfio_container=CONFIG_IOMMUFD_VFIO_CONTAINER + mdev=CONFIG_VFIO_MDEV + kvm=CONFIG_KVM_VFIO + vfio_pci_core=CONFIG_VFIO_PCI_CORE + VFIO_IOMMU_PRESENT=0 + VFIO_IOMMUFD_VFIO_CONTAINER_PRESENT=0 + VFIO_MDEV_PRESENT=0 + KVM_PRESENT=0 + VFIO_PCI_CORE_PRESENT=0 + + if [ -n "$VGX_KVM_BUILD" ]; then + if (test_configuration_option ${iommu} || test_configuration_option ${iommu}_MODULE); then + VFIO_IOMMU_PRESENT=1 + fi + + if (test_configuration_option ${iommufd_vfio_container} || test_configuration_option ${iommufd_vfio_container}_MODULE); then + VFIO_IOMMUFD_VFIO_CONTAINER_PRESENT=1 + fi + + if (test_configuration_option ${mdev} || test_configuration_option ${mdev}_MODULE); then + VFIO_MDEV_PRESENT=1 + fi + + if (test_configuration_option ${kvm} || test_configuration_option ${kvm}_MODULE); then + KVM_PRESENT=1 + fi + + if (test_configuration_option ${vfio_pci_core} || test_configuration_option ${vfio_pci_core}_MODULE); then + VFIO_PCI_CORE_PRESENT=1 + fi + + if ([ "$VFIO_IOMMU_PRESENT" != "0" ] || [ "$VFIO_IOMMUFD_VFIO_CONTAINER_PRESENT" != "0" ])&& [ "$KVM_PRESENT" != "0" ] ; then + # vGPU requires either MDEV or vfio-pci-core framework to be present. + if [ "$VFIO_MDEV_PRESENT" != "0" ] || [ "$VFIO_PCI_CORE_PRESENT" != "0" ]; then + exit 0 + fi + fi + + echo "Below CONFIG options are missing on the kernel for installing"; + echo "NVIDIA vGPU driver on KVM host"; + if [ "$VFIO_IOMMU_PRESENT" = "0" ] && [ "$VFIO_IOMMUFD_VFIO_CONTAINER_PRESENT" = "0" ]; then + echo "either CONFIG_VFIO_IOMMU_TYPE1 or CONFIG_IOMMUFD_VFIO_CONTAINER"; + fi + + if [ "$VFIO_MDEV_PRESENT" = "0" ] && [ "$VFIO_PCI_CORE_PRESENT" = "0" ]; then + echo "either CONFIG_VFIO_MDEV or CONFIG_VFIO_PCI_CORE"; + fi + + if [ "$KVM_PRESENT" = "0" ]; then + echo "CONFIG_KVM"; + fi + echo "Please install the kernel with above CONFIG options set, then"; + echo "try installing again"; + echo ""; + + if [ "$VERBOSE" = "full_output" ]; then + echo "*** Failed vGPU on KVM sanity check. Bailing out! ***"; + echo ""; + fi + exit 1 + else + exit 0 + fi + ;; + test_configuration_option) + # + # Check to see if the given config option is set. + # + OPTION=$6 + + test_configuration_option $OPTION + exit $? + ;; + + get_configuration_option) + # + # Get the value of the given config option. + # + OPTION=$6 + + get_configuration_option $OPTION + exit $? + ;; + + + guess_module_signing_hash) + # + # Determine the best cryptographic hash to use for module signing, + # to the extent that is possible. + # + + HASH=$(get_configuration_option CONFIG_MODULE_SIG_HASH) + + if [ $? -eq 0 ] && [ -n "$HASH" ]; then + echo $HASH + exit 0 + else + for SHA in 512 384 256 224 1; do + if test_configuration_option CONFIG_MODULE_SIG_SHA$SHA; then + echo sha$SHA + exit 0 + fi + done + fi + exit 1 + ;; + + + test_kernel_header) + # + # Check for the availability of the given kernel header + # + + CFLAGS=$6 + + test_header_presence "${7}" + + exit $? + ;; + + + build_cflags) + # + # Generate CFLAGS for use in the compile tests + # + + build_cflags + echo $CFLAGS + exit 0 + ;; + + module_symvers_sanity_check) + # + # Check whether Module.symvers exists and contains at least one + # EXPORT_SYMBOL* symbol from vmlinux + # + + if [ -n "$IGNORE_MISSING_MODULE_SYMVERS" ]; then + exit 0 + fi + + TAB=' ' + + if [ -f "$OUTPUT/Module.symvers" ] && \ + grep -e "^[^${TAB}]*${TAB}[^${TAB}]*${TAB}\+vmlinux" \ + "$OUTPUT/Module.symvers" >/dev/null 2>&1; then + exit 0 + fi + + echo "The Module.symvers file is missing, or does not contain any" + echo "symbols exported from the kernel. This could cause the NVIDIA" + echo "kernel modules to be built against a configuration that does" + echo "not accurately reflect the actual target kernel." + echo "The Module.symvers file check can be disabled by setting the" + echo "environment variable IGNORE_MISSING_MODULE_SYMVERS to 1." + + exit 1 + ;; +esac diff --git a/kernel-open/count-lines.mk b/kernel-open/count-lines.mk new file mode 100644 index 0000000..397db0a --- /dev/null +++ b/kernel-open/count-lines.mk @@ -0,0 +1,25 @@ +count: + @echo "conftests:$(words $(ALL_CONFTESTS))" \ + "objects:$(words $(NV_OBJECTS_DEPEND_ON_CONFTEST))" \ + "modules:$(words $(NV_KERNEL_MODULES))" + +.PHONY: count + +# Include the top-level makefile to get $(NV_KERNEL_MODULES) +include Makefile + +# Set $(src) for the to-be-included nvidia*.Kbuild files +src := $(CURDIR) + +# Include nvidia*.Kbuild and append the nvidia*-y objects to ALL_OBJECTS +$(foreach _module, $(NV_KERNEL_MODULES), \ + $(eval include $(_module)/$(_module).Kbuild) \ + ) + +# Concatenate all of the conftest lists; use $(sort ) to remove duplicates +ALL_CONFTESTS := $(sort $(NV_CONFTEST_FUNCTION_COMPILE_TESTS) \ + $(NV_CONFTEST_GENERIC_COMPILE_TESTS) \ + $(NV_CONFTEST_MACRO_COMPILE_TESTS) \ + $(NV_CONFTEST_SYMBOL_COMPILE_TESTS) \ + $(NV_CONFTEST_TYPE_COMPILE_TESTS) \ + ) diff --git a/kernel-open/dkms.conf b/kernel-open/dkms.conf new file mode 100644 index 0000000..aef54d3 --- /dev/null +++ b/kernel-open/dkms.conf @@ -0,0 +1,12 @@ +PACKAGE_NAME="nvidia" +PACKAGE_VERSION="__VERSION_STRING" +AUTOINSTALL="yes" + +# By default, DKMS will add KERNELRELEASE to the make command line; however, +# this will cause the kernel module build to infer that it was invoked via +# Kbuild directly instead of DKMS. The dkms(8) manual page recommends quoting +# the 'make' command name to suppress this behavior. +MAKE[0]="'make' -j__JOBS NV_EXCLUDE_BUILD_MODULES='__EXCLUDE_MODULES' KERNEL_UNAME=${kernelver} modules" + +# The list of kernel modules will be generated by nvidia-installer at runtime. +__DKMS_MODULES diff --git a/kernel-open/header-presence-tests.mk b/kernel-open/header-presence-tests.mk new file mode 100644 index 0000000..33de4ce --- /dev/null +++ b/kernel-open/header-presence-tests.mk @@ -0,0 +1,45 @@ +# Each of these headers is checked for presence with a test #include; a +# corresponding #define will be generated in conftest/headers.h. +NV_HEADER_PRESENCE_TESTS = \ + asm/system.h \ + drm/drm_hdcp.h \ + drm/display/drm_hdcp.h \ + drm/display/drm_hdcp_helper.h \ + drm/drmP.h \ + drm/drm_aperture.h \ + drm/drm_atomic_state_helper.h \ + drm/drm_atomic_uapi.h \ + drm/drm_fbdev_generic.h \ + drm/drm_fbdev_ttm.h \ + drm/drm_client_setup.h \ + drm/drm_probe_helper.h \ + drm/clients/drm_client_setup.h \ + dt-bindings/interconnect/tegra_icc_id.h \ + generated/autoconf.h \ + generated/compile.h \ + generated/utsrelease.h \ + linux/aperture.h \ + linux/dma-direct.h \ + linux/platform/tegra/mc_utils.h \ + xen/ioemu.h \ + linux/fence.h \ + linux/dma-resv.h \ + soc/tegra/tegra_bpmp.h \ + linux/platform/tegra/dce/dce-client-ipc.h \ + linux/nvhost.h \ + linux/nvhost_t194.h \ + linux/host1x-next.h \ + asm/set_memory.h \ + asm/pgtable_types.h \ + linux/dma-map-ops.h \ + sound/hda_codec.h \ + linux/interconnect.h \ + linux/ioasid.h \ + linux/stdarg.h \ + linux/iosys-map.h \ + linux/vfio_pci_core.h \ + linux/cc_platform.h \ + linux/slub_def.h \ + asm/mshyperv.h \ + crypto/sig.h + diff --git a/kernel-open/nvidia-drm/nv-kthread-q.c b/kernel-open/nvidia-drm/nv-kthread-q.c new file mode 100644 index 0000000..edc4cbb --- /dev/null +++ b/kernel-open/nvidia-drm/nv-kthread-q.c @@ -0,0 +1,329 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-kthread-q.h" +#include "nv-list-helpers.h" + +#include +#include +#include +#include +#include +#include + +// Today's implementation is a little simpler and more limited than the +// API description allows for in nv-kthread-q.h. Details include: +// +// 1. Each nv_kthread_q instance is a first-in, first-out queue. +// +// 2. Each nv_kthread_q instance is serviced by exactly one kthread. +// +// You can create any number of queues, each of which gets its own +// named kernel thread (kthread). You can then insert arbitrary functions +// into the queue, and those functions will be run in the context of the +// queue's kthread. + +#ifndef WARN + // Only *really* old kernels (2.6.9) end up here. Just use a simple printk + // to implement this, because such kernels won't be supported much longer. + #define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + printk(KERN_ERR format); \ + unlikely(__ret_warn_on); \ + }) +#endif + +#define NVQ_WARN(fmt, ...) \ + do { \ + if (in_interrupt()) { \ + WARN(1, "nv_kthread_q: [in interrupt]: " fmt, \ + ##__VA_ARGS__); \ + } \ + else { \ + WARN(1, "nv_kthread_q: task: %s: " fmt, \ + current->comm, \ + ##__VA_ARGS__); \ + } \ + } while (0) + +static int _main_loop(void *args) +{ + nv_kthread_q_t *q = (nv_kthread_q_t *)args; + nv_kthread_q_item_t *q_item = NULL; + unsigned long flags; + + while (1) { + // Normally this thread is never interrupted. However, + // down_interruptible (instead of down) is called here, + // in order to avoid being classified as a potentially + // hung task, by the kernel watchdog. + while (down_interruptible(&q->q_sem)) + NVQ_WARN("Interrupted during semaphore wait\n"); + + if (atomic_read(&q->main_loop_should_exit)) + break; + + spin_lock_irqsave(&q->q_lock, flags); + + // The q_sem semaphore prevents us from getting here unless there is + // at least one item in the list, so an empty list indicates a bug. + if (unlikely(list_empty(&q->q_list_head))) { + spin_unlock_irqrestore(&q->q_lock, flags); + NVQ_WARN("_main_loop: Empty queue: q: 0x%p\n", q); + continue; + } + + // Consume one item from the queue + q_item = list_first_entry(&q->q_list_head, + nv_kthread_q_item_t, + q_list_node); + + list_del_init(&q_item->q_list_node); + + spin_unlock_irqrestore(&q->q_lock, flags); + + // Run the item + q_item->function_to_run(q_item->function_args); + + // Make debugging a little simpler by clearing this between runs: + q_item = NULL; + } + + while (!kthread_should_stop()) + schedule(); + + return 0; +} + +void nv_kthread_q_stop(nv_kthread_q_t *q) +{ + // check if queue has been properly initialized + if (unlikely(!q->q_kthread)) + return; + + nv_kthread_q_flush(q); + + // If this assertion fires, then a caller likely either broke the API rules, + // by adding items after calling nv_kthread_q_stop, or possibly messed up + // with inadequate flushing of self-rescheduling q_items. + if (unlikely(!list_empty(&q->q_list_head))) + NVQ_WARN("list not empty after flushing\n"); + + if (likely(!atomic_read(&q->main_loop_should_exit))) { + + atomic_set(&q->main_loop_should_exit, 1); + + // Wake up the kthread so that it can see that it needs to stop: + up(&q->q_sem); + + kthread_stop(q->q_kthread); + q->q_kthread = NULL; + } +} + +// When CONFIG_VMAP_STACK is defined, the kernel thread stack allocator used by +// kthread_create_on_node relies on a 2 entry, per-core cache to minimize +// vmalloc invocations. The cache is NUMA-unaware, so when there is a hit, the +// stack location ends up being a function of the core assigned to the current +// thread, instead of being a function of the specified NUMA node. The cache was +// added to the kernel in commit ac496bf48d97f2503eaa353996a4dd5e4383eaf0 +// ("fork: Optimize task creation by caching two thread stacks per CPU if +// CONFIG_VMAP_STACK=y") +// +// To work around the problematic cache, we create up to three kernel threads +// -If the first thread's stack is resident on the preferred node, return this +// thread. +// -Otherwise, create a second thread. If its stack is resident on the +// preferred node, stop the first thread and return this one. +// -Otherwise, create a third thread. The stack allocator does not find a +// cached stack, and so falls back to vmalloc, which takes the NUMA hint into +// consideration. The first two threads are then stopped. +// +// When CONFIG_VMAP_STACK is not defined, the first kernel thread is returned. +// +// This function is never invoked when there is no NUMA preference (preferred +// node is NUMA_NO_NODE). +static struct task_struct *thread_create_on_node(int (*threadfn)(void *data), + nv_kthread_q_t *q, + int preferred_node, + const char *q_name) +{ + + unsigned i, j; + static const unsigned attempts = 3; + struct task_struct *thread[3]; + + for (i = 0;; i++) { + struct page *stack; + + thread[i] = kthread_create_on_node(threadfn, q, preferred_node, q_name); + + if (unlikely(IS_ERR(thread[i]))) { + + // Instead of failing, pick the previous thread, even if its + // stack is not allocated on the preferred node. + if (i > 0) + i--; + + break; + } + + // vmalloc is not used to allocate the stack, so simply return the + // thread, even if its stack may not be allocated on the preferred node + if (!is_vmalloc_addr(thread[i]->stack)) + break; + + // Ran out of attempts - return thread even if its stack may not be + // allocated on the preferred node + if (i == (attempts - 1)) + break; + + // Get the NUMA node where the first page of the stack is resident. If + // it is the preferred node, select this thread. + stack = vmalloc_to_page(thread[i]->stack); + if (page_to_nid(stack) == preferred_node) + break; + } + + for (j = i; j > 0; j--) + kthread_stop(thread[j - 1]); + + return thread[i]; +} + +int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferred_node) +{ + memset(q, 0, sizeof(*q)); + + INIT_LIST_HEAD(&q->q_list_head); + spin_lock_init(&q->q_lock); + sema_init(&q->q_sem, 0); + + if (preferred_node == NV_KTHREAD_NO_NODE) { + q->q_kthread = kthread_create(_main_loop, q, q_name); + } + else { + q->q_kthread = thread_create_on_node(_main_loop, q, preferred_node, q_name); + } + + if (IS_ERR(q->q_kthread)) { + int err = PTR_ERR(q->q_kthread); + + // Clear q_kthread before returning so that nv_kthread_q_stop() can be + // safely called on it making error handling easier. + q->q_kthread = NULL; + + return err; + } + + wake_up_process(q->q_kthread); + + return 0; +} + +int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname) +{ + return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE); +} + +// Returns true (non-zero) if the item was actually scheduled, and false if the +// item was already pending in a queue. +static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item) +{ + unsigned long flags; + int ret = 1; + + spin_lock_irqsave(&q->q_lock, flags); + + if (likely(list_empty(&q_item->q_list_node))) + list_add_tail(&q_item->q_list_node, &q->q_list_head); + else + ret = 0; + + spin_unlock_irqrestore(&q->q_lock, flags); + + if (likely(ret)) + up(&q->q_sem); + + return ret; +} + +void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item, + nv_q_func_t function_to_run, + void *function_args) +{ + INIT_LIST_HEAD(&q_item->q_list_node); + q_item->function_to_run = function_to_run; + q_item->function_args = function_args; +} + +// Returns true (non-zero) if the q_item got scheduled, false otherwise. +int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q, + nv_kthread_q_item_t *q_item) +{ + if (unlikely(atomic_read(&q->main_loop_should_exit))) { + NVQ_WARN("Not allowed: nv_kthread_q_schedule_q_item was " + "called with a non-alive q: 0x%p\n", q); + return 0; + } + + return _raw_q_schedule(q, q_item); +} + +static void _q_flush_function(void *args) +{ + struct completion *completion = (struct completion *)args; + complete(completion); +} + + +static void _raw_q_flush(nv_kthread_q_t *q) +{ + nv_kthread_q_item_t q_item; + DECLARE_COMPLETION_ONSTACK(completion); + + nv_kthread_q_item_init(&q_item, _q_flush_function, &completion); + + _raw_q_schedule(q, &q_item); + + // Wait for the flush item to run. Once it has run, then all of the + // previously queued items in front of it will have run, so that means + // the flush is complete. + wait_for_completion(&completion); +} + +void nv_kthread_q_flush(nv_kthread_q_t *q) +{ + if (unlikely(atomic_read(&q->main_loop_should_exit))) { + NVQ_WARN("Not allowed: nv_kthread_q_flush was called after " + "nv_kthread_q_stop. q: 0x%p\n", q); + return; + } + + // This 2x flush is not a typing mistake. The queue really does have to be + // flushed twice, in order to take care of the case of a q_item that + // reschedules itself. + _raw_q_flush(q); + _raw_q_flush(q); +} diff --git a/kernel-open/nvidia-drm/nv-pci-table.c b/kernel-open/nvidia-drm/nv-pci-table.c new file mode 100644 index 0000000..ac730d5 --- /dev/null +++ b/kernel-open/nvidia-drm/nv-pci-table.c @@ -0,0 +1,90 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +#include "nv-pci-table.h" +#include "cpuopsys.h" + +#if defined(NV_BSD) +/* Define PCI classes that FreeBSD's linuxkpi is missing */ +#define PCI_VENDOR_ID_NVIDIA 0x10de +#define PCI_CLASS_DISPLAY_VGA 0x0300 +#define PCI_CLASS_DISPLAY_3D 0x0302 +#define PCI_CLASS_BRIDGE_OTHER 0x0680 +#endif + +/* Devices supported by RM */ +struct pci_device_id nv_pci_table[] = { + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_VGA << 8), + .class_mask = ~0 + }, + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_3D << 8), + .class_mask = ~0 + }, + { } +}; + +/* Devices supported by all drivers in nvidia.ko */ +struct pci_device_id nv_module_device_table[4] = { + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_VGA << 8), + .class_mask = ~0 + }, + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_3D << 8), + .class_mask = ~0 + }, + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_BRIDGE_OTHER << 8), + .class_mask = ~0 + }, + { } +}; + +#if defined(NV_LINUX) +MODULE_DEVICE_TABLE(pci, nv_module_device_table); +#endif diff --git a/kernel-open/nvidia-drm/nv-pci-table.h b/kernel-open/nvidia-drm/nv-pci-table.h new file mode 100644 index 0000000..25daaf4 --- /dev/null +++ b/kernel-open/nvidia-drm/nv-pci-table.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_PCI_TABLE_H_ +#define _NV_PCI_TABLE_H_ + +#include + +extern struct pci_device_id nv_pci_table[]; +extern struct pci_device_id nv_module_device_table[4]; + +#endif /* _NV_PCI_TABLE_H_ */ diff --git a/kernel-open/nvidia-drm/nv_common_utils.h b/kernel-open/nvidia-drm/nv_common_utils.h new file mode 100644 index 0000000..6b10e76 --- /dev/null +++ b/kernel-open/nvidia-drm/nv_common_utils.h @@ -0,0 +1,120 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_COMMON_UTILS_H__ +#define __NV_COMMON_UTILS_H__ + +#include "nvtypes.h" +#include "nvmisc.h" + +#if !defined(TRUE) +#define TRUE NV_TRUE +#endif + +#if !defined(FALSE) +#define FALSE NV_FALSE +#endif + +#define NV_IS_UNSIGNED(x) ((__typeof__(x))-1 > 0) + +/* Get the length of a statically-sized array. */ +#define ARRAY_LEN(_arr) (sizeof(_arr) / sizeof(_arr[0])) + +#define NV_INVALID_HEAD 0xFFFFFFFF + +#define NV_INVALID_CONNECTOR_PHYSICAL_INFORMATION (~0) + +#if !defined(NV_MIN) +# define NV_MIN(a,b) (((a)<(b))?(a):(b)) +#endif + +#define NV_MIN3(a,b,c) NV_MIN(NV_MIN(a, b), c) +#define NV_MIN4(a,b,c,d) NV_MIN3(NV_MIN(a,b),c,d) + +#if !defined(NV_MAX) +# define NV_MAX(a,b) (((a)>(b))?(a):(b)) +#endif + +#define NV_MAX3(a,b,c) NV_MAX(NV_MAX(a, b), c) +#define NV_MAX4(a,b,c,d) NV_MAX3(NV_MAX(a,b),c,d) + +static inline int NV_LIMIT_VAL_TO_MIN_MAX(int val, int min, int max) +{ + if (val < min) { + return min; + } + if (val > max) { + return max; + } + return val; +} + +#define NV_ROUNDUP_DIV(x,y) ((x) / (y) + (((x) % (y)) ? 1 : 0)) + +/* + * Macros used for computing palette entries: + * + * NV_UNDER_REPLICATE(val, source_size, result_size) expands a value + * of source_size bits into a value of target_size bits by shifting + * the source value into the high bits and replicating the high bits + * of the value into the low bits of the result. + * + * PALETTE_DEPTH_SHIFT(val, w) maps a colormap entry for a component + * that has w bits to an appropriate entry in a LUT of 256 entries. + */ +static inline unsigned int NV_UNDER_REPLICATE(unsigned short val, + int source_size, + int result_size) +{ + return (val << (result_size - source_size)) | + (val >> ((source_size << 1) - result_size)); +} + + +static inline unsigned short PALETTE_DEPTH_SHIFT(unsigned short val, int depth) +{ + return NV_UNDER_REPLICATE(val, depth, 8); +} + +/* + * Use __builtin_ffs where it is supported, or provide an equivalent + * implementation for platforms like riscv where it is not. + */ +#if defined(__GNUC__) && !NVCPU_IS_RISCV64 +static inline int nv_ffs(int x) +{ + return __builtin_ffs(x); +} +#else +static inline int nv_ffs(int x) +{ + if (x == 0) + return 0; + + LOWESTBITIDX_32(x); + + return 1 + x; +} +#endif + +#endif /* __NV_COMMON_UTILS_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-dma-resv-helper.h b/kernel-open/nvidia-drm/nvidia-dma-resv-helper.h new file mode 100644 index 0000000..da28f4a --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-dma-resv-helper.h @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DMA_RESV_HELPER_H__ +#define __NVIDIA_DMA_RESV_HELPER_H__ + +#include "nvidia-drm-conftest.h" + +/* + * linux/reservation.h is renamed to linux/dma-resv.h, by commit + * 52791eeec1d9 (dma-buf: rename reservation_object to dma_resv) + * in v5.4. + */ + +#if defined(NV_LINUX_DMA_RESV_H_PRESENT) +#include +#else +#include +#endif + +#include + +#if defined(NV_LINUX_DMA_RESV_H_PRESENT) +typedef struct dma_resv nv_dma_resv_t; +#else +typedef struct reservation_object nv_dma_resv_t; +#endif + +static inline void nv_dma_resv_init(nv_dma_resv_t *obj) +{ +#if defined(NV_LINUX_DMA_RESV_H_PRESENT) + dma_resv_init(obj); +#else + reservation_object_init(obj); +#endif +} + +static inline void nv_dma_resv_fini(nv_dma_resv_t *obj) +{ +#if defined(NV_LINUX_DMA_RESV_H_PRESENT) + dma_resv_fini(obj); +#else + reservation_object_init(obj); +#endif +} + +static inline void nv_dma_resv_lock(nv_dma_resv_t *obj, + struct ww_acquire_ctx *ctx) +{ +#if defined(NV_LINUX_DMA_RESV_H_PRESENT) + dma_resv_lock(obj, ctx); +#else + ww_mutex_lock(&obj->lock, ctx); +#endif +} + +static inline void nv_dma_resv_unlock(nv_dma_resv_t *obj) +{ +#if defined(NV_LINUX_DMA_RESV_H_PRESENT) + dma_resv_unlock(obj); +#else + ww_mutex_unlock(&obj->lock); +#endif +} + +static inline int nv_dma_resv_reserve_fences(nv_dma_resv_t *obj, + unsigned int num_fences, + NvBool shared) +{ +#if defined(NV_DMA_RESV_RESERVE_FENCES_PRESENT) + return dma_resv_reserve_fences(obj, num_fences); +#else + if (shared) { +#if defined(NV_LINUX_DMA_RESV_H_PRESENT) + return dma_resv_reserve_shared(obj, num_fences); +#elif defined(NV_RESERVATION_OBJECT_RESERVE_SHARED_HAS_NUM_FENCES_ARG) + return reservation_object_reserve_shared(obj, num_fences); +#else + unsigned int i; + for (i = 0; i < num_fences; i++) { + reservation_object_reserve_shared(obj); + } +#endif + } + return 0; +#endif +} + +static inline void nv_dma_resv_add_excl_fence(nv_dma_resv_t *obj, + struct dma_fence *fence) +{ +#if defined(NV_LINUX_DMA_RESV_H_PRESENT) +#if defined(NV_DMA_RESV_ADD_FENCE_PRESENT) + dma_resv_add_fence(obj, fence, DMA_RESV_USAGE_WRITE); +#else + dma_resv_add_excl_fence(obj, fence); +#endif +#else + reservation_object_add_excl_fence(obj, fence); +#endif +} + +static inline void nv_dma_resv_add_shared_fence(nv_dma_resv_t *obj, + struct dma_fence *fence) +{ +#if defined(NV_LINUX_DMA_RESV_H_PRESENT) +#if defined(NV_DMA_RESV_ADD_FENCE_PRESENT) + dma_resv_add_fence(obj, fence, DMA_RESV_USAGE_READ); +#else + dma_resv_add_shared_fence(obj, fence); +#endif +#else + reservation_object_add_shared_fence(obj, fence); +#endif +} + +#endif /* __NVIDIA_DMA_RESV_HELPER_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-conftest.h b/kernel-open/nvidia-drm/nvidia-drm-conftest.h new file mode 100644 index 0000000..26b778d --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-conftest.h @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_CONFTEST_H__ +#define __NVIDIA_DRM_CONFTEST_H__ + +#include "conftest.h" +#include "nvtypes.h" + +/* + * NOTE: This file is expected to get included at the top before including any + * of linux/drm headers. + * + * The goal is to redefine refcount_dec_and_test and refcount_inc before + * including drm header files, so that the drm macro/inline calls to + * refcount_dec_and_test* and refcount_inc get redirected to + * alternate implementation in this file. + */ + +#if NV_IS_EXPORT_SYMBOL_GPL_refcount_inc + +#include + +#define refcount_inc(__ptr) \ + do { \ + atomic_inc(&(__ptr)->refs); \ + } while(0) + +#endif + +#if NV_IS_EXPORT_SYMBOL_GPL_refcount_dec_and_test + +#include + +#define refcount_dec_and_test(__ptr) atomic_dec_and_test(&(__ptr)->refs) + +#endif + +#if defined(NV_DRM_FBDEV_GENERIC_SETUP_PRESENT) && \ + defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT) +#define NV_DRM_FBDEV_AVAILABLE +#define NV_DRM_FBDEV_GENERIC_AVAILABLE +#endif + +#if defined(NV_DRM_FBDEV_TTM_SETUP_PRESENT) && \ + defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT) +#if IS_ENABLED(CONFIG_DRM_TTM_HELPER) +#define NV_DRM_FBDEV_AVAILABLE +#define NV_DRM_FBDEV_TTM_AVAILABLE +#endif +#endif + +#if defined(NV_DRM_CLIENT_SETUP_PRESENT) && \ + (defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT) || \ + defined(NV_APERTURE_REMOVE_CONFLICTING_PCI_DEVICES_PRESENT)) +// XXX remove dependency on DRM_TTM_HELPER by implementing nvidia-drm's own +// .fbdev_probe callback that uses NVKMS kapi +#if IS_ENABLED(CONFIG_DRM_TTM_HELPER) +#define NV_DRM_FBDEV_AVAILABLE +#define NV_DRM_CLIENT_AVAILABLE +#endif +#endif + +/* + * Adapt to quirks in FreeBSD's Linux kernel compatibility layer. + */ +#if defined(NV_BSD) + +#include +#include +#include +#include + +/* For nv_drm_gem_prime_force_fence_signal */ +#ifndef spin_is_locked +#if ((__FreeBSD_version >= 1500000) && (__FreeBSD_version < 1500018)) || (__FreeBSD_version < 1401501) +#define spin_is_locked(lock) mtx_owned(lock.m) +#else +#define spin_is_locked(lock) mtx_owned(lock) +#endif +#endif + +#ifndef rwsem_is_locked +#define rwsem_is_locked(sem) (((sem)->sx.sx_lock & (SX_LOCK_SHARED)) \ + || ((sem)->sx.sx_lock & ~(SX_LOCK_FLAGMASK & ~SX_LOCK_SHARED))) +#endif + +/* + * FreeBSD does not define vm_flags_t in its linuxkpi, since there is already + * a FreeBSD vm_flags_t (of a different size) and they don't want the names to + * collide. Temporarily redefine it when including nv-mm.h + */ +#define vm_flags_t unsigned long +#include "nv-mm.h" +#undef vm_flags_t + +/* + * sys/nv.h and nvidia/nv.h have the same header guard + * we need to clear it for nvlist_t to get loaded + */ +#undef _NV_H_ +#include + +/* + * For now just use set_page_dirty as the lock variant + * is not ported for FreeBSD. (in progress). This calls + * vm_page_dirty. Used in nv-mm.h + */ +#define set_page_dirty_lock set_page_dirty + +/* + * FreeBSD does not implement drm_atomic_state_free, simply + * default to drm_atomic_state_put + */ +#define drm_atomic_state_free drm_atomic_state_put + +#if __FreeBSD_version < 1300000 +/* redefine LIST_HEAD_INIT to the linux version */ +#include +#define LIST_HEAD_INIT(name) LINUX_LIST_HEAD_INIT(name) +#endif + +/* + * FreeBSD currently has only vmf_insert_pfn_prot defined, and it has a + * static assert warning not to use it since all of DRM's usages are in + * loops with the vm obj lock(s) held. Instead we should use the lkpi + * function itself directly. For us none of this applies so we can just + * wrap it in our own definition of vmf_insert_pfn + */ +#ifndef NV_VMF_INSERT_PFN_PRESENT +#define NV_VMF_INSERT_PFN_PRESENT 1 + +#if __FreeBSD_version < 1300000 +#define VM_SHARED (1 << 17) + +/* Not present in 12.2 */ +static inline vm_fault_t +lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, pgprot_t prot) +{ + vm_object_t vm_obj = vma->vm_obj; + vm_page_t page; + vm_pindex_t pindex; + + VM_OBJECT_ASSERT_WLOCKED(vm_obj); + pindex = OFF_TO_IDX(addr - vma->vm_start); + if (vma->vm_pfn_count == 0) + vma->vm_pfn_first = pindex; + MPASS(pindex <= OFF_TO_IDX(vma->vm_end)); + + page = vm_page_grab(vm_obj, pindex, VM_ALLOC_NORMAL); + if (page == NULL) { + page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn)); + vm_page_xbusy(page); + if (vm_page_insert(page, vm_obj, pindex)) { + vm_page_xunbusy(page); + return (VM_FAULT_OOM); + } + page->valid = VM_PAGE_BITS_ALL; + } + pmap_page_set_memattr(page, pgprot2cachemode(prot)); + vma->vm_pfn_count++; + + return (VM_FAULT_NOPAGE); +} +#endif + +static inline vm_fault_t +vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn) +{ + vm_fault_t ret; + + VM_OBJECT_WLOCK(vma->vm_obj); + ret = lkpi_vmf_insert_pfn_prot_locked(vma, addr, pfn, vma->vm_page_prot); + VM_OBJECT_WUNLOCK(vma->vm_obj); + + return (ret); +} + +#endif + +#endif /* defined(NV_BSD) */ + +#endif /* defined(__NVIDIA_DRM_CONFTEST_H__) */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-connector.c b/kernel-open/nvidia-drm/nvidia-drm-connector.c new file mode 100644 index 0000000..e1c80a5 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-connector.c @@ -0,0 +1,640 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-helper.h" +#include "nvidia-drm-priv.h" +#include "nvidia-drm-connector.h" +#include "nvidia-drm-crtc.h" +#include "nvidia-drm-utils.h" +#include "nvidia-drm-encoder.h" + +/* + * Commit fcd70cd36b9b ("drm: Split out drm_probe_helper.h") + * moves a number of helper function definitions from + * drm/drm_crtc_helper.h to a new drm_probe_helper.h. + */ +#if defined(NV_DRM_DRM_PROBE_HELPER_H_PRESENT) +#include +#endif +#include + +#include +#include +#include + +static void nv_drm_connector_destroy(struct drm_connector *connector) +{ + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + + drm_connector_unregister(connector); + + drm_connector_cleanup(connector); + + if (nv_connector->edid != NULL) { + nv_drm_free(nv_connector->edid); + } + + nv_drm_free(nv_connector); +} + +static bool +__nv_drm_detect_encoder(struct NvKmsKapiDynamicDisplayParams *pDetectParams, + struct drm_connector *connector, + struct drm_encoder *encoder) +{ + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + struct drm_device *dev = connector->dev; + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_encoder *nv_encoder; + + /* + * DVI-I connectors can drive both digital and analog + * encoders. If a digital connection has been forced then + * skip analog encoders. + */ + + if (connector->connector_type == DRM_MODE_CONNECTOR_DVII && + connector->force == DRM_FORCE_ON_DIGITAL && + encoder->encoder_type == DRM_MODE_ENCODER_DAC) { + return false; + } + + nv_encoder = to_nv_encoder(encoder); + + memset(pDetectParams, 0, sizeof(*pDetectParams)); + + pDetectParams->handle = nv_encoder->hDisplay; + + switch (connector->force) { + case DRM_FORCE_ON: + case DRM_FORCE_ON_DIGITAL: + pDetectParams->forceConnected = NV_TRUE; + break; + case DRM_FORCE_OFF: + pDetectParams->forceDisconnected = NV_TRUE; + break; + case DRM_FORCE_UNSPECIFIED: + break; + } + +#if defined(NV_DRM_CONNECTOR_HAS_OVERRIDE_EDID) + if (connector->override_edid) { +#else + if (drm_edid_override_connector_update(connector) > 0) { +#endif + const struct drm_property_blob *edid = connector->edid_blob_ptr; + + if (edid->length <= sizeof(pDetectParams->edid.buffer)) { + memcpy(pDetectParams->edid.buffer, edid->data, edid->length); + pDetectParams->edid.bufferSize = edid->length; + pDetectParams->overrideEdid = NV_TRUE; + } else { + WARN_ON(edid->length > + sizeof(pDetectParams->edid.buffer)); + } + } + + if (!nvKms->getDynamicDisplayInfo(nv_dev->pDevice, pDetectParams)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to detect display state"); + return false; + } + +#if defined(NV_DRM_CONNECTOR_HAS_VRR_CAPABLE_PROPERTY) + drm_connector_attach_vrr_capable_property(&nv_connector->base); + drm_connector_set_vrr_capable_property(&nv_connector->base, pDetectParams->vrrSupported ? true : false); +#endif + + if (pDetectParams->connected) { + if (!pDetectParams->overrideEdid && pDetectParams->edid.bufferSize) { + + if ((nv_connector->edid = nv_drm_calloc( + 1, + pDetectParams->edid.bufferSize)) != NULL) { + + memcpy(nv_connector->edid, + pDetectParams->edid.buffer, + pDetectParams->edid.bufferSize); + } else { + NV_DRM_LOG_ERR("Out of Memory"); + } + } + + return true; + } + + return false; +} + +static enum drm_connector_status __nv_drm_connector_detect_internal( + struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + + enum drm_connector_status status = connector_status_disconnected; + + struct drm_encoder *detected_encoder = NULL; + struct nv_drm_encoder *nv_detected_encoder = NULL; + struct drm_encoder *encoder; + + struct NvKmsKapiDynamicDisplayParams *pDetectParams = NULL; + + BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); + + if (nv_connector->edid != NULL) { + nv_drm_free(nv_connector->edid); + nv_connector->edid = NULL; + } + + if ((pDetectParams = nv_drm_calloc( + 1, + sizeof(*pDetectParams))) == NULL) { + WARN_ON(pDetectParams == NULL); + goto done; + } + + nv_drm_connector_for_each_possible_encoder(connector, encoder) { + if (__nv_drm_detect_encoder(pDetectParams, connector, encoder)) { + detected_encoder = encoder; + break; + } + } nv_drm_connector_for_each_possible_encoder_end; + + if (detected_encoder == NULL) { + goto done; + } + + nv_detected_encoder = to_nv_encoder(detected_encoder); + + status = connector_status_connected; + + nv_connector->nv_detected_encoder = nv_detected_encoder; + + if (nv_connector->type == NVKMS_CONNECTOR_TYPE_DVI_I) { + drm_object_property_set_value( + &connector->base, + dev->mode_config.dvi_i_subconnector_property, + detected_encoder->encoder_type == DRM_MODE_ENCODER_DAC ? + DRM_MODE_SUBCONNECTOR_DVIA : + DRM_MODE_SUBCONNECTOR_DVID); + } + +done: + + nv_drm_free(pDetectParams); + + if (status == connector_status_disconnected && + nv_connector->modeset_permission_filep) { + nv_drm_connector_revoke_permissions(dev, nv_connector); + } + + return status; +} + +static void __nv_drm_connector_force(struct drm_connector *connector) +{ + __nv_drm_connector_detect_internal(connector); +} + +static enum drm_connector_status +nv_drm_connector_detect(struct drm_connector *connector, bool force) +{ + return __nv_drm_connector_detect_internal(connector); +} + +static struct drm_connector_funcs nv_connector_funcs = { + .destroy = nv_drm_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .force = __nv_drm_connector_force, + .detect = nv_drm_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int nv_drm_connector_get_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + struct nv_drm_encoder *nv_detected_encoder = + nv_connector->nv_detected_encoder; + NvU32 modeIndex = 0; + int count = 0; + + + if (nv_connector->edid != NULL) { + nv_drm_connector_update_edid_property(connector, nv_connector->edid); + } + + while (1) { + struct drm_display_mode *mode; + struct NvKmsKapiDisplayMode displayMode; + NvBool valid = 0; + NvBool preferredMode = NV_FALSE; + int ret; + + ret = nvKms->getDisplayMode(nv_dev->pDevice, + nv_detected_encoder->hDisplay, + modeIndex++, &displayMode, &valid, + &preferredMode); + + if (ret < 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to get mode at modeIndex %d of NvKmsKapiDisplay 0x%08x", + modeIndex, nv_detected_encoder->hDisplay); + break; + } + + /* Is end of mode-list */ + + if (ret == 0) { + break; + } + + /* Ignore invalid modes */ + + if (!valid) { + continue; + } + + mode = drm_mode_create(connector->dev); + + if (mode == NULL) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to create mode for NvKmsKapiDisplay 0x%08x", + nv_detected_encoder->hDisplay); + continue; + } + + nvkms_display_mode_to_drm_mode(&displayMode, mode); + + if (preferredMode) { + mode->type |= DRM_MODE_TYPE_PREFERRED; + } + + /* Add a mode to a connector's probed_mode list */ + + drm_mode_probed_add(connector, mode); + + count++; + } + + return count; +} + +static int nv_drm_connector_mode_valid(struct drm_connector *connector, +#if defined(NV_DRM_CONNECTOR_HELPER_FUNCS_MODE_VALID_HAS_CONST_MODE_ARG) + const struct drm_display_mode *mode) +#else + struct drm_display_mode *mode) +#endif +{ + struct drm_device *dev = connector->dev; + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_encoder *nv_detected_encoder = + to_nv_connector(connector)->nv_detected_encoder; + struct NvKmsKapiDisplayMode displayMode; + + if (nv_detected_encoder == NULL) { + return MODE_BAD; + } + + drm_mode_to_nvkms_display_mode(mode, &displayMode); + + if (!nvKms->validateDisplayMode(nv_dev->pDevice, + nv_detected_encoder->hDisplay, + &displayMode)) { + return MODE_BAD; + } + + return MODE_OK; +} + +static struct drm_encoder* +nv_drm_connector_best_encoder(struct drm_connector *connector) +{ + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + + if (nv_connector->nv_detected_encoder != NULL) { + return &nv_connector->nv_detected_encoder->base; + } + + return NULL; +} + +#if defined(NV_DRM_MODE_CREATE_DP_COLORSPACE_PROPERTY_HAS_SUPPORTED_COLORSPACES_ARG) +static const NvU32 __nv_drm_connector_supported_colorspaces = + BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) | + BIT(DRM_MODE_COLORIMETRY_BT2020_YCC); +#endif + +#if defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT) +static int +__nv_drm_connector_atomic_check(struct drm_connector *connector, + struct drm_atomic_state *state) +{ + struct drm_connector_state *new_connector_state = + drm_atomic_get_new_connector_state(state, connector); + struct drm_connector_state *old_connector_state = + drm_atomic_get_old_connector_state(state, connector); + struct nv_drm_device *nv_dev = to_nv_device(connector->dev); + + struct drm_crtc *crtc = new_connector_state->crtc; + struct drm_crtc_state *crtc_state; + struct nv_drm_crtc_state *nv_crtc_state; + struct NvKmsKapiHeadRequestedConfig *req_config; + + if (!crtc) { + return 0; + } + + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + nv_crtc_state = to_nv_crtc_state(crtc_state); + req_config = &nv_crtc_state->req_config; + + /* + * Override metadata for the entire head instead of allowing NVKMS to derive + * it from the layers' metadata. + * + * This is the metadata that will sent to the display, and if applicable, + * layers will be tone mapped to this metadata rather than that of the + * display. + */ + req_config->flags.hdrInfoFrameChanged = + !drm_connector_atomic_hdr_metadata_equal(old_connector_state, + new_connector_state); + if (new_connector_state->hdr_output_metadata && + new_connector_state->hdr_output_metadata->data) { + + /* + * Note that HDMI definitions are used here even though we might not + * be using HDMI. While that seems odd, it is consistent with + * upstream behavior. + */ + + struct hdr_output_metadata *hdr_metadata = + new_connector_state->hdr_output_metadata->data; + struct hdr_metadata_infoframe *info_frame = + &hdr_metadata->hdmi_metadata_type1; + unsigned int i; + + if (hdr_metadata->metadata_type != HDMI_STATIC_METADATA_TYPE1) { + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(info_frame->display_primaries); i++) { + req_config->modeSetConfig.hdrInfoFrame.staticMetadata.displayPrimaries[i].x = + info_frame->display_primaries[i].x; + req_config->modeSetConfig.hdrInfoFrame.staticMetadata.displayPrimaries[i].y = + info_frame->display_primaries[i].y; + } + + req_config->modeSetConfig.hdrInfoFrame.staticMetadata.whitePoint.x = + info_frame->white_point.x; + req_config->modeSetConfig.hdrInfoFrame.staticMetadata.whitePoint.y = + info_frame->white_point.y; + req_config->modeSetConfig.hdrInfoFrame.staticMetadata.maxDisplayMasteringLuminance = + info_frame->max_display_mastering_luminance; + req_config->modeSetConfig.hdrInfoFrame.staticMetadata.minDisplayMasteringLuminance = + info_frame->min_display_mastering_luminance; + req_config->modeSetConfig.hdrInfoFrame.staticMetadata.maxCLL = + info_frame->max_cll; + req_config->modeSetConfig.hdrInfoFrame.staticMetadata.maxFALL = + info_frame->max_fall; + + req_config->modeSetConfig.hdrInfoFrame.eotf = info_frame->eotf; + + req_config->modeSetConfig.hdrInfoFrame.enabled = NV_TRUE; + } else { + req_config->modeSetConfig.hdrInfoFrame.enabled = NV_FALSE; + } + + req_config->flags.colorimetryChanged = + (old_connector_state->colorspace != new_connector_state->colorspace); + // When adding a case here, also add to __nv_drm_connector_supported_colorspaces + switch (new_connector_state->colorspace) { + case DRM_MODE_COLORIMETRY_DEFAULT: + req_config->modeSetConfig.colorimetry = + NVKMS_OUTPUT_COLORIMETRY_DEFAULT; + break; + case DRM_MODE_COLORIMETRY_BT2020_RGB: + case DRM_MODE_COLORIMETRY_BT2020_YCC: + // Ignore RGB/YCC + // See https://patchwork.freedesktop.org/patch/525496/?series=111865&rev=4 + req_config->modeSetConfig.colorimetry = + NVKMS_OUTPUT_COLORIMETRY_BT2100; + break; + default: + // XXX HDR TODO: Add support for more color spaces + NV_DRM_DEV_LOG_ERR(nv_dev, "Unsupported color space"); + return -EINVAL; + } + + return 0; +} +#endif /* defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT) */ + +static const struct drm_connector_helper_funcs nv_connector_helper_funcs = { + .get_modes = nv_drm_connector_get_modes, + .mode_valid = nv_drm_connector_mode_valid, + .best_encoder = nv_drm_connector_best_encoder, +#if defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT) + .atomic_check = __nv_drm_connector_atomic_check, +#endif +}; + +static struct drm_connector* +nv_drm_connector_new(struct drm_device *dev, + NvU32 physicalIndex, NvKmsConnectorType type, + NvBool internal, + char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_connector *nv_connector = NULL; + int ret = -ENOMEM; + + if ((nv_connector = nv_drm_calloc(1, sizeof(*nv_connector))) == NULL) { + goto failed; + } + + if ((nv_connector->base.state = + nv_drm_calloc(1, sizeof(*nv_connector->base.state))) == NULL) { + goto failed_state_alloc; + } + nv_connector->base.state->connector = &nv_connector->base; + + nv_connector->physicalIndex = physicalIndex; + nv_connector->type = type; + nv_connector->internal = internal; + nv_connector->modeset_permission_filep = NULL; + nv_connector->modeset_permission_crtc = NULL; + + strcpy(nv_connector->dpAddress, dpAddress); + + ret = drm_connector_init( + dev, + &nv_connector->base, &nv_connector_funcs, + nvkms_connector_type_to_drm_connector_type(type, internal)); + + if (ret != 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to initialize connector created from physical index %u", + nv_connector->physicalIndex); + goto failed_connector_init; + } + + drm_connector_helper_add(&nv_connector->base, &nv_connector_helper_funcs); + + nv_connector->base.polled = DRM_CONNECTOR_POLL_HPD; + + if (nv_connector->type == NVKMS_CONNECTOR_TYPE_VGA) { + nv_connector->base.polled = + DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + } + +#if defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT) + if (nv_connector->type == NVKMS_CONNECTOR_TYPE_HDMI) { +#if defined(NV_DRM_MODE_CREATE_DP_COLORSPACE_PROPERTY_HAS_SUPPORTED_COLORSPACES_ARG) + if (drm_mode_create_hdmi_colorspace_property( + &nv_connector->base, + __nv_drm_connector_supported_colorspaces) == 0) { +#else + if (drm_mode_create_hdmi_colorspace_property(&nv_connector->base) == 0) { +#endif + drm_connector_attach_colorspace_property(&nv_connector->base); + } + drm_connector_attach_hdr_output_metadata_property(&nv_connector->base); + } else if (nv_connector->type == NVKMS_CONNECTOR_TYPE_DP) { +#if defined(NV_DRM_MODE_CREATE_DP_COLORSPACE_PROPERTY_HAS_SUPPORTED_COLORSPACES_ARG) + if (drm_mode_create_dp_colorspace_property( + &nv_connector->base, + __nv_drm_connector_supported_colorspaces) == 0) { +#else + if (drm_mode_create_dp_colorspace_property(&nv_connector->base) == 0) { +#endif + drm_connector_attach_colorspace_property(&nv_connector->base); + } + drm_connector_attach_hdr_output_metadata_property(&nv_connector->base); + } +#endif /* defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT) */ + + /* Register connector with DRM subsystem */ + + ret = drm_connector_register(&nv_connector->base); + + if (ret != 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to register connector created from physical index %u", + nv_connector->physicalIndex); + goto failed_connector_register; + } + + return &nv_connector->base; + +failed_connector_register: + drm_connector_cleanup(&nv_connector->base); + +failed_connector_init: + nv_drm_free(nv_connector->base.state); + +failed_state_alloc: + nv_drm_free(nv_connector); + +failed: + return ERR_PTR(ret); +} + +/* + * Get connector with given physical index one exists. Otherwise, create and + * return a new connector. + */ +struct drm_connector* +nv_drm_get_connector(struct drm_device *dev, + NvU32 physicalIndex, NvKmsConnectorType type, + NvBool internal, + char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]) +{ + struct drm_connector *connector = NULL; + struct drm_connector_list_iter conn_iter; + drm_connector_list_iter_begin(dev, &conn_iter); + + /* Lookup for existing connector with same physical index */ + drm_for_each_connector_iter(connector, &conn_iter) { + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + + if (nv_connector->physicalIndex == physicalIndex) { + BUG_ON(nv_connector->type != type || + nv_connector->internal != internal); + + if (strcmp(nv_connector->dpAddress, dpAddress) == 0) { + goto done; + } + } + } + connector = NULL; + +done: + drm_connector_list_iter_end(&conn_iter); + + if (!connector) { + connector = nv_drm_connector_new(dev, + physicalIndex, type, internal, + dpAddress); + } + + return connector; +} + +/* + * Revoke the permissions on this connector. + */ +bool nv_drm_connector_revoke_permissions(struct drm_device *dev, + struct nv_drm_connector* nv_connector) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + bool ret = true; + + if (nv_connector->modeset_permission_crtc) { + if (nv_connector->nv_detected_encoder) { + ret = nvKms->revokePermissions( + nv_dev->pDevice, nv_connector->modeset_permission_crtc->head, + nv_connector->nv_detected_encoder->hDisplay); + } + nv_connector->modeset_permission_crtc->modeset_permission_filep = NULL; + nv_connector->modeset_permission_crtc = NULL; + } + nv_connector->modeset_permission_filep = NULL; + return ret; +} + +#endif diff --git a/kernel-open/nvidia-drm/nvidia-drm-connector.h b/kernel-open/nvidia-drm/nvidia-drm-connector.h new file mode 100644 index 0000000..5ee2814 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-connector.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_CONNECTOR_H__ +#define __NVIDIA_DRM_CONNECTOR_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#include + +#include "nvtypes.h" +#include "nvkms-api-types.h" + +struct nv_drm_connector { + NvU32 physicalIndex; + + NvBool internal; + NvKmsConnectorType type; + + char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]; + + struct nv_drm_encoder *nv_detected_encoder; + struct edid *edid; + + atomic_t connection_status_dirty; + + /** + * @modeset_permission_filep: + * + * The filep using this connector with DRM_IOCTL_NVIDIA_GRANT_PERMISSIONS. + */ + struct drm_file *modeset_permission_filep; + + /** + * @modeset_permission_crtc: + * + * The crtc using this connector with DRM_IOCTL_NVIDIA_GRANT_PERMISSIONS. + */ + struct nv_drm_crtc *modeset_permission_crtc; + + struct drm_connector base; +}; + +static inline struct nv_drm_connector *to_nv_connector( + struct drm_connector *connector) +{ + if (connector == NULL) { + return NULL; + } + return container_of(connector, struct nv_drm_connector, base); +} + +static inline void nv_drm_connector_mark_connection_status_dirty( + struct nv_drm_connector *nv_connector) +{ + atomic_cmpxchg(&nv_connector->connection_status_dirty, false, true); +} + +static inline bool nv_drm_connector_check_connection_status_dirty_and_clear( + struct nv_drm_connector *nv_connector) +{ + return atomic_cmpxchg( + &nv_connector->connection_status_dirty, + true, + false) == true; +} + +struct drm_connector* +nv_drm_get_connector(struct drm_device *dev, + NvU32 physicalIndex, NvKmsConnectorType type, + NvBool internal, + char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]); + +bool nv_drm_connector_revoke_permissions(struct drm_device *dev, + struct nv_drm_connector *nv_connector); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#endif /* __NVIDIA_DRM_CONNECTOR_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-crtc.c b/kernel-open/nvidia-drm/nvidia-drm-crtc.c new file mode 100644 index 0000000..e8c92d4 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-crtc.c @@ -0,0 +1,3131 @@ +/* + * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-helper.h" +#include "nvidia-drm-priv.h" +#include "nvidia-drm-crtc.h" +#include "nvidia-drm-connector.h" +#include "nvidia-drm-encoder.h" +#include "nvidia-drm-utils.h" +#include "nvidia-drm-fb.h" +#include "nvidia-drm-ioctl.h" +#include "nvidia-drm-format.h" + +#include "nvmisc.h" +#include "nv_common_utils.h" + +#include +#include + +#include +#include +#include + +/* + * The two arrays below specify the PQ EOTF transfer function that's used to + * convert from PQ encoded L'M'S' fixed-point to linear LMS FP16. This transfer + * function is the inverse of the OETF curve. + * + * TODO: Generate table with max number of entries for ILUT. + */ +static const NvU32 __eotf_pq_512_seg_sizes_log2[] = { + 6, 6, 4, 4, 4, 3, 4, 3, 3, 3, 2, 2, 2, 3, 3, 2, + 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 6, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 1, 2, + 2, 1, 1, 2, 2, 2, 2, 1, 2, 1, 1, 2, 1, 4, 2, 2, +}; +static const NvU16 __eotf_pq_512_entries[] = { + 0x0000, 0x0001, 0x0003, 0x0005, 0x0008, 0x000C, 0x0011, 0x0016, 0x001B, 0x0022, 0x0028, 0x002F, 0x0037, 0x003F, 0x0048, 0x0051, + 0x005A, 0x0064, 0x006F, 0x007A, 0x0085, 0x0091, 0x009E, 0x00AB, 0x00B8, 0x00C6, 0x00D4, 0x00E3, 0x00F3, 0x0102, 0x0113, 0x0123, + 0x0135, 0x0146, 0x0158, 0x016B, 0x017E, 0x0192, 0x01A6, 0x01BB, 0x01D0, 0x01E5, 0x01FC, 0x0212, 0x0229, 0x0241, 0x0259, 0x0272, + 0x028B, 0x02A4, 0x02BE, 0x02D9, 0x02F4, 0x0310, 0x032C, 0x0349, 0x0366, 0x0384, 0x03A2, 0x03C1, 0x03E0, 0x0400, 0x0421, 0x0442, + 0x0463, 0x0485, 0x04A8, 0x04CB, 0x04EF, 0x0513, 0x0538, 0x055D, 0x0583, 0x05AA, 0x05D1, 0x05F9, 0x0621, 0x064A, 0x0673, 0x069D, + 0x06C7, 0x06F3, 0x071E, 0x074B, 0x0777, 0x07A5, 0x07D3, 0x0801, 0x0819, 0x0830, 0x0849, 0x0861, 0x087A, 0x0893, 0x08AD, 0x08C7, + 0x08E1, 0x08FB, 0x0916, 0x0931, 0x094C, 0x0968, 0x0984, 0x09A0, 0x09BD, 0x09DA, 0x09F7, 0x0A15, 0x0A33, 0x0A51, 0x0A70, 0x0A8F, + 0x0AAE, 0x0ACE, 0x0AEE, 0x0B0E, 0x0B2F, 0x0B50, 0x0B71, 0x0B93, 0x0BB5, 0x0BD7, 0x0BFA, 0x0C0F, 0x0C20, 0x0C32, 0x0C44, 0x0C56, + 0x0C69, 0x0CB5, 0x0D03, 0x0D55, 0x0DA9, 0x0E01, 0x0E5B, 0x0EB9, 0x0F1B, 0x0F7F, 0x0FE7, 0x1029, 0x1061, 0x109A, 0x10D5, 0x1111, + 0x1150, 0x1190, 0x11D3, 0x1217, 0x125E, 0x12A6, 0x12F0, 0x133D, 0x138B, 0x13DC, 0x1417, 0x1442, 0x146D, 0x149A, 0x14C8, 0x14F7, + 0x1527, 0x1558, 0x158B, 0x15BF, 0x15F4, 0x162A, 0x1662, 0x169B, 0x16D5, 0x1711, 0x174E, 0x178C, 0x17CC, 0x1806, 0x1828, 0x184A, + 0x186D, 0x18B4, 0x18FF, 0x194D, 0x199E, 0x19F3, 0x1A4B, 0x1AA7, 0x1B06, 0x1B37, 0x1B69, 0x1B9B, 0x1BCF, 0x1C02, 0x1C1D, 0x1C38, + 0x1C54, 0x1C70, 0x1C8D, 0x1CAB, 0x1CC9, 0x1CE7, 0x1D06, 0x1D26, 0x1D46, 0x1D88, 0x1DCC, 0x1E13, 0x1E5C, 0x1EA8, 0x1EF6, 0x1F47, + 0x1F9A, 0x1FF1, 0x2025, 0x2053, 0x2082, 0x20B3, 0x20E6, 0x211A, 0x214F, 0x2187, 0x21C0, 0x21FA, 0x2237, 0x2275, 0x22B5, 0x22F7, + 0x233B, 0x23C9, 0x2430, 0x247F, 0x24D3, 0x252B, 0x2589, 0x25EB, 0x2653, 0x26C1, 0x2734, 0x27AD, 0x2817, 0x2838, 0x285A, 0x287C, + 0x28A0, 0x28C5, 0x28EA, 0x2911, 0x2938, 0x2960, 0x298A, 0x29B4, 0x29DF, 0x2A0C, 0x2A39, 0x2A68, 0x2A98, 0x2AFA, 0x2B62, 0x2BCE, + 0x2C20, 0x2C5B, 0x2C99, 0x2CDA, 0x2D1E, 0x2D65, 0x2DB0, 0x2DFD, 0x2E4E, 0x2EA3, 0x2EFC, 0x2F58, 0x2FB8, 0x300E, 0x3043, 0x307A, + 0x30B3, 0x30D0, 0x30EE, 0x310D, 0x312C, 0x314C, 0x316D, 0x318E, 0x31B0, 0x31D3, 0x31F6, 0x321A, 0x323F, 0x3265, 0x328B, 0x32B2, + 0x32DA, 0x332D, 0x3383, 0x33DC, 0x341D, 0x344D, 0x347F, 0x34B4, 0x34EA, 0x3523, 0x355E, 0x359B, 0x35DB, 0x361D, 0x3662, 0x36A9, + 0x36F3, 0x3740, 0x3791, 0x37E4, 0x381D, 0x384A, 0x3879, 0x38A9, 0x38DB, 0x3910, 0x3946, 0x397E, 0x39B8, 0x39F5, 0x3A34, 0x3A75, + 0x3AB9, 0x3AFF, 0x3B48, 0x3B94, 0x3BE2, 0x3C1A, 0x3C44, 0x3C70, 0x3C9D, 0x3CA0, 0x3CA3, 0x3CA6, 0x3CA9, 0x3CAC, 0x3CAF, 0x3CB1, + 0x3CB4, 0x3CB7, 0x3CBA, 0x3CBD, 0x3CC0, 0x3CC3, 0x3CC6, 0x3CC9, 0x3CCC, 0x3CCF, 0x3CD2, 0x3CD5, 0x3CD8, 0x3CDB, 0x3CDE, 0x3CE1, + 0x3CE4, 0x3CE7, 0x3CEA, 0x3CEE, 0x3CF1, 0x3CF4, 0x3CF7, 0x3CFA, 0x3CFD, 0x3D00, 0x3D03, 0x3D06, 0x3D09, 0x3D0D, 0x3D10, 0x3D13, + 0x3D16, 0x3D19, 0x3D1C, 0x3D20, 0x3D23, 0x3D26, 0x3D29, 0x3D2C, 0x3D30, 0x3D33, 0x3D36, 0x3D39, 0x3D3D, 0x3D40, 0x3D43, 0x3D46, + 0x3D4A, 0x3D4D, 0x3D50, 0x3D54, 0x3D57, 0x3D5A, 0x3D5D, 0x3D61, 0x3D64, 0x3D9B, 0x3DD3, 0x3E0D, 0x3E4A, 0x3E89, 0x3ECA, 0x3F0E, + 0x3F54, 0x3F9C, 0x3FE8, 0x401B, 0x4043, 0x406D, 0x4099, 0x40C6, 0x40F4, 0x4124, 0x4156, 0x418A, 0x41C0, 0x41F8, 0x4232, 0x426D, + 0x42AB, 0x42EB, 0x432E, 0x4373, 0x43BA, 0x4428, 0x4479, 0x44D0, 0x452D, 0x4591, 0x45FC, 0x466F, 0x46EB, 0x472C, 0x476F, 0x47B5, + 0x47FE, 0x4824, 0x484B, 0x4874, 0x489D, 0x48F5, 0x4954, 0x4986, 0x49B9, 0x49EF, 0x4A26, 0x4A5F, 0x4A9B, 0x4AD9, 0x4B19, 0x4B9F, + 0x4C18, 0x4C66, 0x4CBA, 0x4CE6, 0x4D13, 0x4D43, 0x4D74, 0x4DA7, 0x4DDC, 0x4E12, 0x4E4B, 0x4E86, 0x4EC3, 0x4F02, 0x4F44, 0x4F88, + 0x4FCE, 0x500C, 0x5032, 0x5082, 0x50D8, 0x5106, 0x5135, 0x5166, 0x5199, 0x5205, 0x5278, 0x52F5, 0x537C, 0x53C3, 0x5406, 0x542D, + 0x5454, 0x54A9, 0x5503, 0x550F, 0x551B, 0x5527, 0x5533, 0x5540, 0x554C, 0x5559, 0x5565, 0x5572, 0x557F, 0x558C, 0x5599, 0x55A7, + 0x55B4, 0x55C1, 0x55CF, 0x5607, 0x5641, 0x567E, 0x56BC, 0x56FE, 0x5741, 0x5788, 0x57D0, +}; + +/* + * The two arrays below specify the PQ OETF transfer function that's used to + * convert from linear LMS FP16 to PQ encoded L'M'S' fixed-point. + * + * TODO: Generate table with max number of entries for ILUT. + */ +static const NvU32 __oetf_pq_512_seg_sizes_log2[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, + 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, + 5, +}; +static const NvU16 __oetf_pq_512_entries[] = { + 0x0000, 0x000C, 0x0014, 0x001C, 0x0028, 0x003C, 0x005C, 0x008C, 0x00D0, 0x0134, 0x0184, 0x01C8, 0x0238, 0x029C, 0x033C, 0x03C4, + 0x043C, 0x04A4, 0x0504, 0x0560, 0x0600, 0x0690, 0x0714, 0x078C, 0x07FC, 0x0864, 0x08C8, 0x0924, 0x0980, 0x09D4, 0x0A24, 0x0A70, + 0x0B04, 0x0B90, 0x0C10, 0x0C88, 0x0CFC, 0x0D68, 0x0DD4, 0x0E38, 0x0EF4, 0x0FA4, 0x1048, 0x10E4, 0x1174, 0x1200, 0x1284, 0x1304, + 0x13F4, 0x14D0, 0x159C, 0x165C, 0x1714, 0x17C0, 0x1864, 0x1900, 0x1A28, 0x1B34, 0x1C30, 0x1D1C, 0x1DFC, 0x1ECC, 0x1F94, 0x2050, + 0x2104, 0x21B0, 0x2258, 0x22F8, 0x2390, 0x2424, 0x24B4, 0x2540, 0x25C4, 0x2648, 0x26C4, 0x2740, 0x27B8, 0x282C, 0x289C, 0x290C, + 0x29E0, 0x2AAC, 0x2B70, 0x2C2C, 0x2CE0, 0x2D90, 0x2E38, 0x2ED8, 0x2F74, 0x300C, 0x30A0, 0x3130, 0x31BC, 0x3244, 0x32C8, 0x3348, + 0x3440, 0x352C, 0x360C, 0x36E4, 0x37B4, 0x387C, 0x393C, 0x39F8, 0x3AA8, 0x3B58, 0x3C00, 0x3CA4, 0x3D44, 0x3DDC, 0x3E74, 0x3F04, + 0x401C, 0x4128, 0x4228, 0x431C, 0x4408, 0x44E8, 0x45C4, 0x4694, 0x475C, 0x4820, 0x48DC, 0x4994, 0x4A48, 0x4AF4, 0x4B9C, 0x4C3C, + 0x4D78, 0x4EA0, 0x4FBC, 0x50CC, 0x51D0, 0x52CC, 0x53BC, 0x54A0, 0x5580, 0x5658, 0x5728, 0x57F0, 0x58B4, 0x5974, 0x5A2C, 0x5ADC, + 0x5C34, 0x5D7C, 0x5EB4, 0x5FDC, 0x60F4, 0x6204, 0x630C, 0x6404, 0x64F8, 0x65E0, 0x66C4, 0x679C, 0x6870, 0x693C, 0x6A04, 0x6AC4, + 0x6C38, 0x6D94, 0x6EE4, 0x7020, 0x7150, 0x7274, 0x738C, 0x7498, 0x7598, 0x7694, 0x7784, 0x786C, 0x794C, 0x7A24, 0x7AF8, 0x7BC4, + 0x7D50, 0x7EC4, 0x8024, 0x8174, 0x82B4, 0x83E8, 0x850C, 0x8628, 0x8738, 0x883C, 0x8938, 0x8A2C, 0x8B18, 0x8BFC, 0x8CD8, 0x8DB0, + 0x8F4C, 0x90D0, 0x9240, 0x939C, 0x94EC, 0x962C, 0x975C, 0x9880, 0x999C, 0x9AAC, 0x9BB0, 0x9CAC, 0x9DA0, 0x9E8C, 0x9F70, 0xA04C, + 0xA1F4, 0xA384, 0xA500, 0xA664, 0xA7BC, 0xA904, 0xAA3C, 0xAB6C, 0xAC8C, 0xADA0, 0xAEAC, 0xAFAC, 0xB0A4, 0xB194, 0xB27C, 0xB360, + 0xB510, 0xB6A4, 0xB824, 0xB994, 0xBAF0, 0xBC3C, 0xBD78, 0xBEA8, 0xBFCC, 0xC0E4, 0xC1F0, 0xC2F4, 0xC3F0, 0xC4E4, 0xC5CC, 0xC6B0, + 0xC78C, 0xC860, 0xC930, 0xC9F8, 0xCABC, 0xCB7C, 0xCC38, 0xCCEC, 0xCD9C, 0xCE48, 0xCEF0, 0xCF94, 0xD034, 0xD0D4, 0xD16C, 0xD200, + 0xD294, 0xD324, 0xD3B4, 0xD43C, 0xD4C4, 0xD54C, 0xD5CC, 0xD650, 0xD6CC, 0xD748, 0xD7C4, 0xD83C, 0xD8B0, 0xD924, 0xD994, 0xDA08, + 0xDAE0, 0xDBB4, 0xDC84, 0xDD4C, 0xDE10, 0xDECC, 0xDF84, 0xE038, 0xE0E8, 0xE194, 0xE238, 0xE2DC, 0xE37C, 0xE418, 0xE4B0, 0xE544, + 0xE5D4, 0xE664, 0xE6F0, 0xE778, 0xE800, 0xE884, 0xE904, 0xE984, 0xEA00, 0xEA7C, 0xEAF4, 0xEB68, 0xEBDC, 0xEC50, 0xECC0, 0xED30, + 0xEE08, 0xEED8, 0xEFA4, 0xF068, 0xF128, 0xF1E4, 0xF298, 0xF348, 0xF3F4, 0xF49C, 0xF540, 0xF5E0, 0xF67C, 0xF714, 0xF7A8, 0xF83C, + 0xF8CC, 0xF958, 0xF9E0, 0xFA68, 0xFAEC, 0xFB6C, 0xFBE8, 0xFC64, 0xFCE0, 0xFD58, 0xFDCC, 0xFE40, 0xFEB4, 0xFF24, 0xFF90, 0xFFFF, +}; + +#define NUM_VSS_HEADER_ENTRIES (NVKMS_LUT_VSS_HEADER_SIZE / NVKMS_LUT_CAPS_LUT_ENTRY_SIZE) + +static int +nv_drm_atomic_replace_property_blob_from_id(struct drm_device *dev, + struct drm_property_blob **blob, + uint64_t blob_id, + ssize_t expected_size, + NvBool *replaced) +{ + struct drm_property_blob *old_blob = *blob; + struct drm_property_blob *new_blob = NULL; + + if (blob_id != 0) { + new_blob = drm_property_lookup_blob(dev, blob_id); + if (new_blob == NULL) { + return -EINVAL; + } + + if ((expected_size > 0) && + (new_blob->length != expected_size)) { + drm_property_blob_put(new_blob); + return -EINVAL; + } + } + + if (old_blob != new_blob) { + drm_property_blob_put(old_blob); + if (new_blob) { + drm_property_blob_get(new_blob); + } + *blob = new_blob; + *replaced = true; + } else { + *replaced = false; + } + + drm_property_blob_put(new_blob); + + return 0; +} + +static void nv_drm_plane_destroy(struct drm_plane *plane) +{ + struct nv_drm_plane *nv_plane = to_nv_plane(plane); + + /* plane->state gets freed here */ + drm_plane_cleanup(plane); + + nv_drm_free(nv_plane); +} + +static inline void +plane_config_clear(struct NvKmsKapiLayerConfig *layerConfig) +{ + if (layerConfig == NULL) { + return; + } + + memset(layerConfig, 0, sizeof(*layerConfig)); + layerConfig->csc = NVKMS_IDENTITY_CSC_MATRIX; +} + +static inline void +plane_req_config_disable(struct NvKmsKapiLayerRequestedConfig *req_config) +{ + /* Clear layer config */ + plane_config_clear(&req_config->config); + + /* Set flags to get cleared layer config applied */ + req_config->flags.surfaceChanged = NV_TRUE; + req_config->flags.srcXYChanged = NV_TRUE; + req_config->flags.srcWHChanged = NV_TRUE; + req_config->flags.dstXYChanged = NV_TRUE; + req_config->flags.dstWHChanged = NV_TRUE; +} + +static inline void +cursor_req_config_disable(struct NvKmsKapiCursorRequestedConfig *req_config) +{ + req_config->surface = NULL; + req_config->flags.surfaceChanged = NV_TRUE; +} + +static NvU64 ctm_val_to_csc_val(NvU64 ctm_val) +{ + /* + * Values in the CTM are encoded in S31.32 sign-magnitude fixed- + * point format, while NvKms CSC values are signed 2's-complement + * S15.16 (Ssign-extend12-3.16?) fixed-point format. + */ + NvU64 sign_bit = ctm_val & (1ULL << 63); + NvU64 magnitude = ctm_val & ~sign_bit; + + /* + * Drop the low 16 bits of the fractional part and the high 17 bits + * of the integral part. Drop 17 bits to avoid corner cases where + * the highest resulting bit is a 1, causing the `cscVal = -cscVal` + * line to result in a positive number. + * + * NOTE: Upstream precedent is to clamp to the range supported by hardware. + * Here, we truncate the integral part to 14 bits, and will later truncate + * further to the 3-5 bits supported by hardware within the display HAL. + * + * TODO: Clamping would be better, in the rare event that we receive + * coefficients that are large enough for it to matter. + */ + NvS32 csc_val = (magnitude >> 16) & ((1ULL << 31) - 1); + if (sign_bit) { + csc_val = -csc_val; + } + + return csc_val; +} + +static void ctm_to_csc(struct NvKmsCscMatrix *nvkms_csc, + struct drm_color_ctm *drm_ctm) +{ + int y; + + /* CTM is a 3x3 matrix while ours is 3x4. Zero out the last column. */ + nvkms_csc->m[0][3] = nvkms_csc->m[1][3] = nvkms_csc->m[2][3] = 0; + + for (y = 0; y < 3; y++) { + int x; + + for (x = 0; x < 3; x++) { + nvkms_csc->m[y][x] = ctm_val_to_csc_val(drm_ctm->matrix[y*3 + x]); + } + } +} + +static void ctm_3x4_to_csc(struct NvKmsCscMatrix *nvkms_csc, + struct drm_color_ctm_3x4 *drm_ctm_3x4) +{ + int y; + + for (y = 0; y < 3; y++) { + int x; + + for (x = 0; x < 4; x++) { + nvkms_csc->m[y][x] = + ctm_val_to_csc_val(drm_ctm_3x4->matrix[y*4 + x]); + } + } +} + +static void +cursor_plane_req_config_update(struct drm_plane *plane, + struct drm_plane_state *plane_state, + struct NvKmsKapiCursorRequestedConfig *req_config) +{ + struct nv_drm_plane *nv_plane = to_nv_plane(plane); + struct NvKmsKapiCursorRequestedConfig old_config = *req_config; + + if (plane_state->fb == NULL) { + cursor_req_config_disable(req_config); + return; + } + + memset(req_config, 0, sizeof(*req_config)); + req_config->surface = to_nv_framebuffer(plane_state->fb)->pSurface; + req_config->dstX = plane_state->crtc_x; + req_config->dstY = plane_state->crtc_y; + +#if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE) + if (plane->blend_mode_property != NULL && plane->alpha_property != NULL) { + + switch (plane_state->pixel_blend_mode) { + case DRM_MODE_BLEND_PREMULTI: + req_config->compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA; + break; + case DRM_MODE_BLEND_COVERAGE: + req_config->compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA; + break; + default: + /* + * We should not hit this, because + * plane_state->pixel_blend_mode should only have values + * registered in + * __nv_drm_plane_create_alpha_blending_properties(). + */ + WARN_ON("Unsupported blending mode"); + break; + + } + + req_config->compParams.surfaceAlpha = + plane_state->alpha >> 8; + + } else if (plane->blend_mode_property != NULL) { + + switch (plane_state->pixel_blend_mode) { + case DRM_MODE_BLEND_PREMULTI: + req_config->compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA; + break; + case DRM_MODE_BLEND_COVERAGE: + req_config->compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA; + break; + default: + /* + * We should not hit this, because + * plane_state->pixel_blend_mode should only have values + * registered in + * __nv_drm_plane_create_alpha_blending_properties(). + */ + WARN_ON("Unsupported blending mode"); + break; + + } + + } else { + req_config->compParams.compMode = + nv_plane->defaultCompositionMode; + } +#else + req_config->compParams.compMode = nv_plane->defaultCompositionMode; +#endif + + /* + * Unconditionally mark the surface as changed, even if nothing changed, + * so that we always get a flip event: a DRM client may flip with + * the same surface and wait for a flip event. + */ + req_config->flags.surfaceChanged = NV_TRUE; + + if (old_config.surface == NULL && + old_config.surface != req_config->surface) { + req_config->flags.dstXYChanged = NV_TRUE; + return; + } + + req_config->flags.dstXYChanged = + old_config.dstX != req_config->dstX || + old_config.dstY != req_config->dstY; +} + +static void release_drm_nvkms_surface(struct nv_drm_nvkms_surface *drm_nvkms_surface) +{ + struct NvKmsKapiDevice *pDevice = drm_nvkms_surface->pDevice; + + BUG_ON(drm_nvkms_surface->nvkms_surface == NULL); + BUG_ON(drm_nvkms_surface->nvkms_memory == NULL); + BUG_ON(drm_nvkms_surface->buffer == NULL); + + nvKms->destroySurface(pDevice, drm_nvkms_surface->nvkms_surface); + nvKms->unmapMemory(pDevice, drm_nvkms_surface->nvkms_memory, + NVKMS_KAPI_MAPPING_TYPE_KERNEL, + drm_nvkms_surface->buffer); + nvKms->freeMemory(pDevice, drm_nvkms_surface->nvkms_memory); +} + +static int init_drm_nvkms_surface(struct nv_drm_device *nv_dev, + struct nv_drm_nvkms_surface *drm_nvkms_surface, + struct nv_drm_nvkms_surface_params *surface_params) +{ + struct NvKmsKapiDevice *pDevice = nv_dev->pDevice; + NvU8 compressible = 0; // No compression + + struct NvKmsKapiAllocateMemoryParams allocParams = { + .layout = NvKmsSurfaceMemoryLayoutPitch, + .type = NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT, + .size = surface_params->surface_size, + .useVideoMemory = nv_dev->hasVideoMemory, + .compressible = &compressible, + }; + + struct NvKmsKapiCreateSurfaceParams params = {}; + struct NvKmsKapiMemory *surface_mem; + struct NvKmsKapiSurface *surface; + void *buffer; + + params.format = surface_params->format; + params.width = surface_params->width; + params.height = surface_params->height; + + /* Allocate displayable memory. */ + surface_mem = nvKms->allocateMemory(nv_dev->pDevice, &allocParams); + if (surface_mem == NULL) { + return -ENOMEM; + } + + /* Map memory in order to populate it. */ + if (!nvKms->mapMemory(pDevice, surface_mem, + NVKMS_KAPI_MAPPING_TYPE_KERNEL, + &buffer)) { + nvKms->freeMemory(pDevice, surface_mem); + return -ENOMEM; + } + + params.planes[0].memory = surface_mem; + params.planes[0].offset = 0; + params.planes[0].pitch = surface_params->surface_size; + + /* Create surface. */ + surface = nvKms->createSurface(pDevice, ¶ms); + if (surface == NULL) { + nvKms->unmapMemory(pDevice, surface_mem, + NVKMS_KAPI_MAPPING_TYPE_KERNEL, buffer); + nvKms->freeMemory(pDevice, surface_mem); + return -ENOMEM; + } + + /* Pack into struct nv_drm_nvkms_surface. */ + drm_nvkms_surface->pDevice = pDevice; + drm_nvkms_surface->nvkms_memory = surface_mem; + drm_nvkms_surface->nvkms_surface = surface; + drm_nvkms_surface->buffer = buffer; + + /* Init refcount. */ + kref_init(&drm_nvkms_surface->refcount); + + return 0; +} + +static struct nv_drm_lut_surface *alloc_drm_lut_surface( + struct nv_drm_device *nv_dev, + enum NvKmsLUTFormat entry_format, + enum NvKmsLUTVssType vss_type, + NvU32 num_vss_header_segments, + NvU32 num_vss_header_entries, + NvU32 num_entries) +{ + struct nv_drm_lut_surface *drm_lut_surface; + const size_t surface_size = + (((num_vss_header_entries + num_entries) * + NVKMS_LUT_CAPS_LUT_ENTRY_SIZE) + 255) & ~255; // 256-byte aligned + + struct nv_drm_nvkms_surface_params params = {}; + + params.format = NvKmsSurfaceMemoryFormatR16G16B16A16; + params.width = num_vss_header_entries + num_entries; + params.height = 1; + params.surface_size = surface_size; + + drm_lut_surface = nv_drm_calloc(1, sizeof(struct nv_drm_lut_surface)); + if (drm_lut_surface == NULL) { + return NULL; + } + + if (init_drm_nvkms_surface(nv_dev, &drm_lut_surface->base, ¶ms) != 0) { + nv_drm_free(drm_lut_surface); + return NULL; + } + + drm_lut_surface->properties.vssSegments = num_vss_header_segments; + drm_lut_surface->properties.vssType = vss_type; + drm_lut_surface->properties.lutEntries = num_entries; + drm_lut_surface->properties.entryFormat = entry_format; + + return drm_lut_surface; +} + +static void free_drm_lut_surface(struct kref *ref) +{ + struct nv_drm_nvkms_surface *drm_nvkms_surface = + container_of(ref, struct nv_drm_nvkms_surface, refcount); + struct nv_drm_lut_surface *drm_lut_surface = + container_of(drm_nvkms_surface, struct nv_drm_lut_surface, base); + + // Clean up base + release_drm_nvkms_surface(drm_nvkms_surface); + + nv_drm_free(drm_lut_surface); +} + +static NvU32 fp32_lut_interp( + NvU16 entry0, + NvU16 entry1, + NvU32 interp, + NvU32 interp_max) +{ + NvU32 fp32_entry0 = nvKmsKapiUI32ToF32((NvU32) entry0); + NvU32 fp32_entry1 = nvKmsKapiUI32ToF32((NvU32) entry1); + + NvU32 fp32_num0 = nvKmsKapiUI32ToF32(interp_max - interp); + NvU32 fp32_num1 = nvKmsKapiUI32ToF32(interp); + NvU32 fp32_denom = nvKmsKapiUI32ToF32(interp_max); + + fp32_entry0 = nvKmsKapiF32Mul(fp32_entry0, fp32_num0); + fp32_entry0 = nvKmsKapiF32Div(fp32_entry0, fp32_denom); + + fp32_entry1 = nvKmsKapiF32Mul(fp32_entry1, fp32_num1); + fp32_entry1 = nvKmsKapiF32Div(fp32_entry1, fp32_denom); + + return nvKmsKapiF32Add(fp32_entry0, fp32_entry1); +} + +static struct nv_drm_lut_surface *create_drm_ilut_surface_vss( + struct nv_drm_device *nv_dev, + struct nv_drm_plane *nv_plane, + struct nv_drm_plane_state *nv_drm_plane_state) +{ + static const NvU32 fp_norm = 0x42FA0000; // FP32 125.0 + static const NvU32 u10_norm = 0x447FC000; // FP32 1023.0 + static const NvU32 u16_norm = 0x477FFF00; // FP32 UINT16_MAX + // FP32 UINT32_MAX (Precision limited to 2^32) + static const NvU32 u32_norm = 0x4F800000; + + struct nv_drm_lut_surface *drm_lut_surface; + + NvU32 entry_idx; + NvU32 num_entries; + NvU16 *lut_data; + + const NvU32 *vss_header_seg_sizes = NULL; + NvU32 num_vss_header_segments = 0; + const NvU16 *vss_entries = NULL; + enum NvKmsLUTVssType vss_type = NVKMS_LUT_VSS_TYPE_NONE; + + NvBool multiply = false; + NvU32 fp32_multiplier; + + WARN_ON(!nv_plane->ilut_caps.supported); + WARN_ON(nv_plane->ilut_caps.entryFormat != NVKMS_LUT_FORMAT_FP16); + WARN_ON(nv_plane->ilut_caps.vssSupport != NVKMS_LUT_VSS_SUPPORTED); + WARN_ON(nv_plane->ilut_caps.vssType != NVKMS_LUT_VSS_TYPE_LINEAR); + + /* Convert multiplier from S31.32 Sign-Magnitude format to FP32. */ + if (nv_drm_plane_state->degamma_multiplier != (((NvU64) 1) << 32)) { + NvU32 upper = (NvU32) (nv_drm_plane_state->degamma_multiplier >> 32); + NvU32 lower = (NvU32) nv_drm_plane_state->degamma_multiplier; + + /* Range property is configured to ensure sign bit = 0. */ + WARN_ON(nv_drm_plane_state->degamma_multiplier & (((NvU64) 1) << 63)); + + fp32_multiplier = + nvKmsKapiF32Add( + nvKmsKapiUI32ToF32(upper), + nvKmsKapiF32Div(nvKmsKapiUI32ToF32(lower), u32_norm)); + + multiply = true; + } + + /* Determine configuration based on specified EOTF. */ + if (nv_drm_plane_state->degamma_tf == NV_DRM_TRANSFER_FUNCTION_PQ) { + /* Need VSS for PQ. */ + vss_header_seg_sizes = __eotf_pq_512_seg_sizes_log2; + num_vss_header_segments = ARRAY_LEN(__eotf_pq_512_seg_sizes_log2); + vss_type = NVKMS_LUT_VSS_TYPE_LINEAR; + + vss_entries = __eotf_pq_512_entries; + num_entries = ARRAY_LEN(__eotf_pq_512_entries) + 1; + } else { + WARN_ON((nv_drm_plane_state->degamma_tf != NV_DRM_TRANSFER_FUNCTION_DEFAULT) && + (nv_drm_plane_state->degamma_tf != NV_DRM_TRANSFER_FUNCTION_LINEAR)); + + num_entries = NVKMS_LUT_ARRAY_SIZE + 1; + } + WARN_ON((vss_entries != NULL) && + (num_vss_header_segments != nv_plane->ilut_caps.vssSegments)); + WARN_ON((vss_entries != NULL) && (num_entries > nv_plane->ilut_caps.lutEntries)); + WARN_ON((vss_entries == NULL) && (num_entries != nv_plane->ilut_caps.lutEntries)); + + /* + * Allocate displayable LUT surface. + * Space for the VSS header must be included even for non-VSS LUTs. + */ + drm_lut_surface = + alloc_drm_lut_surface(nv_dev, + NVKMS_LUT_FORMAT_FP16, + vss_type, + num_vss_header_segments, + NUM_VSS_HEADER_ENTRIES, + num_entries); + if (!drm_lut_surface) { + return NULL; + } + + lut_data = (NvU16 *) drm_lut_surface->base.buffer; + + /* Calculate VSS header. */ + if (vss_header_seg_sizes != NULL) { + for (entry_idx = 0; entry_idx < NUM_VSS_HEADER_ENTRIES; entry_idx++) { + int i; + NvU64 vss_header_entry = 0; + for (i = 0; (i < 16) && + (((entry_idx * 16) + i) < num_vss_header_segments); i++) { + vss_header_entry |= + ((NvU64) vss_header_seg_sizes[(entry_idx * 16) + i]) << (i * 3); + } + ((NvU64 *) lut_data)[entry_idx] = vss_header_entry; + } + } + + /* Calculate LUT content. */ + for (entry_idx = 0; + entry_idx < num_entries - 1; entry_idx++) { + NvU32 fp32_r, fp32_g, fp32_b; + NvU32 data_idx = entry_idx + NUM_VSS_HEADER_ENTRIES; + + if (nv_drm_plane_state->degamma_lut != NULL) { + /* Use provided Degamma LUT. */ + static const NvU32 interp_max = (((NvU32) 1) << (32 - 10)) - 1; + + const struct drm_color_lut *degamma_lut = + (struct drm_color_lut *) nv_drm_plane_state->degamma_lut->data; + + NvU16 lut_idx; + NvU32 interp = 0; + + if (vss_entries != NULL) { + /* Merge with provided VSS LUT. */ + NvU16 fp16_entry = vss_entries[entry_idx]; + + /* Convert from FP16 to UNORM32. */ + // TODO: Use pre-UNORM32-normalized VSS LUT table? + NvU32 unorm32_entry = + nvKmsKapiF32ToUI32RMinMag( + nvKmsKapiF32Mul( + nvKmsKapiF32Div(nvKmsKapiF16ToF32(fp16_entry), + fp_norm), + u32_norm), + false); + + /* Index using upper 10 bits from UNORM32 VSS LUT. */ + lut_idx = unorm32_entry >> (32 - 10); + /* Interpolate using lower 22 bits from UNORM32 VSS LUT. */ + interp = unorm32_entry & interp_max; + } else { + /* Direct index. */ + lut_idx = entry_idx; + } + + BUG_ON(lut_idx >= NVKMS_LUT_ARRAY_SIZE); + + /* Perform interpolation or direct indexing. */ + if (interp > 0 && ((lut_idx + 1) < NVKMS_LUT_ARRAY_SIZE)) { + fp32_r = + fp32_lut_interp(degamma_lut[lut_idx].red, + degamma_lut[lut_idx + 1].red, + interp, + interp_max); + fp32_g = + fp32_lut_interp(degamma_lut[lut_idx].green, + degamma_lut[lut_idx + 1].green, + interp, + interp_max); + fp32_b = + fp32_lut_interp(degamma_lut[lut_idx].blue, + degamma_lut[lut_idx + 1].blue, + interp, + interp_max); + } else { + fp32_r = nvKmsKapiUI32ToF32((NvU32) degamma_lut[lut_idx].red); + fp32_g = nvKmsKapiUI32ToF32((NvU32) degamma_lut[lut_idx].green); + fp32_b = nvKmsKapiUI32ToF32((NvU32) degamma_lut[lut_idx].blue); + } + + /* Convert UNORM16 to 1.0-normalized FP32. */ + fp32_r = nvKmsKapiF32Div(fp32_r, u16_norm); + fp32_g = nvKmsKapiF32Div(fp32_g, u16_norm); + fp32_b = nvKmsKapiF32Div(fp32_b, u16_norm); + } else if (vss_entries != NULL) { + /* Use VSS LUT directly, but normalized to 1.0. */ + // TODO: Use pre-1.0-normalized VSS LUT table? + NvU16 fp16_entry = vss_entries[entry_idx]; + NvU32 fp32_entry = nvKmsKapiF16ToF32(fp16_entry); + + fp32_r = fp32_g = fp32_b = nvKmsKapiF32Div(fp32_entry, fp_norm); + } else { + /* Use implicit identity. */ + // TODO: Use LUT table? + fp32_r = fp32_g = fp32_b = + nvKmsKapiF32Div(nvKmsKapiUI32ToF32(entry_idx), u10_norm); + } + + /* Apply multiplier. */ + if (multiply) { + fp32_r = nvKmsKapiF32Mul(fp32_r, fp32_multiplier); + fp32_g = nvKmsKapiF32Mul(fp32_g, fp32_multiplier); + fp32_b = nvKmsKapiF32Mul(fp32_b, fp32_multiplier); + } + + /* Convert from FP32 to FP16 to populate LUT. */ + lut_data[(data_idx * 4) + 0] = nvKmsKapiF32ToF16(fp32_r); + lut_data[(data_idx * 4) + 1] = nvKmsKapiF32ToF16(fp32_g); + lut_data[(data_idx * 4) + 2] = nvKmsKapiF32ToF16(fp32_b); + } + ((NvU64 *) lut_data)[NUM_VSS_HEADER_ENTRIES + num_entries - 1] = + ((NvU64 *) lut_data)[NUM_VSS_HEADER_ENTRIES + num_entries - 2]; + + return drm_lut_surface; +} + +#define UNORM16_TO_UNORM14_WAR_813188(u16) ((u16 >> 2) & ~7) + 0x6000 + +static struct nv_drm_lut_surface *create_drm_ilut_surface_legacy( + struct nv_drm_device *nv_dev, + struct nv_drm_plane *nv_plane, + struct nv_drm_plane_state *nv_drm_plane_state) + +{ + struct nv_drm_lut_surface *drm_lut_surface; + NvU16 *lut_data; + NvU32 entry_idx; + + const struct drm_color_lut *degamma_lut; + + WARN_ON(!nv_plane->ilut_caps.supported); + WARN_ON(nv_plane->ilut_caps.entryFormat != NVKMS_LUT_FORMAT_UNORM14_WAR_813188); + WARN_ON(nv_plane->ilut_caps.vssSupport == NVKMS_LUT_VSS_REQUIRED); + WARN_ON((NVKMS_LUT_ARRAY_SIZE + 1) > nv_plane->ilut_caps.lutEntries); + + BUG_ON(nv_drm_plane_state->degamma_lut == NULL); + + degamma_lut = + (struct drm_color_lut *) nv_drm_plane_state->degamma_lut->data; + + /* Allocate displayable LUT surface. */ + drm_lut_surface = + alloc_drm_lut_surface(nv_dev, + NVKMS_LUT_FORMAT_UNORM14_WAR_813188, + NVKMS_LUT_VSS_TYPE_NONE, + 0, 0, + NVKMS_LUT_ARRAY_SIZE + 1); + if (drm_lut_surface == NULL) { + return NULL; + } + + lut_data = (NvU16 *) drm_lut_surface->base.buffer; + + /* Fill LUT surface. */ + for (entry_idx = 0; entry_idx < NVKMS_LUT_ARRAY_SIZE; entry_idx++) { + lut_data[(entry_idx * 4) + 0] = + UNORM16_TO_UNORM14_WAR_813188(degamma_lut[entry_idx].red); + lut_data[(entry_idx * 4) + 1] = + UNORM16_TO_UNORM14_WAR_813188(degamma_lut[entry_idx].green); + lut_data[(entry_idx * 4) + 2] = + UNORM16_TO_UNORM14_WAR_813188(degamma_lut[entry_idx].blue); + } + ((NvU64 *) lut_data)[NVKMS_LUT_ARRAY_SIZE] = + ((NvU64 *) lut_data)[NVKMS_LUT_ARRAY_SIZE - 1]; + + return drm_lut_surface; +} + +static struct nv_drm_lut_surface *create_drm_tmo_surface( + struct nv_drm_device *nv_dev, + struct nv_drm_plane *nv_plane, + struct nv_drm_plane_state *nv_drm_plane_state) + +{ + struct nv_drm_lut_surface *drm_lut_surface; + NvU16 *lut_data; + NvU32 entry_idx; + + const struct drm_color_lut *tmo_lut; + + const NvU32 num_vss_header_segments = 64; + const NvU32 tmo_seg_size_log2 = 4; + + WARN_ON(!nv_plane->tmo_caps.supported); + WARN_ON(nv_plane->tmo_caps.entryFormat != NVKMS_LUT_FORMAT_UNORM16); + WARN_ON(nv_plane->tmo_caps.vssSupport != NVKMS_LUT_VSS_REQUIRED); + WARN_ON(nv_plane->tmo_caps.vssType != NVKMS_LUT_VSS_TYPE_LINEAR); + WARN_ON(num_vss_header_segments != nv_plane->tmo_caps.vssSegments); + WARN_ON((NVKMS_LUT_ARRAY_SIZE + 1) > nv_plane->tmo_caps.lutEntries); + + BUG_ON(nv_drm_plane_state->tmo_lut == NULL); + + tmo_lut = (struct drm_color_lut *) nv_drm_plane_state->tmo_lut->data; + + /* Verify that all channels are equal. */ + for (entry_idx = 0; entry_idx < NVKMS_LUT_ARRAY_SIZE; entry_idx++) { + if ((tmo_lut[entry_idx].red != tmo_lut[entry_idx].green) || + (tmo_lut[entry_idx].red != tmo_lut[entry_idx].blue)) { + return NULL; + } + } + + /* + * Allocate displayable LUT surface. + * The TMO LUT always uses VSS. + */ + drm_lut_surface = + alloc_drm_lut_surface(nv_dev, + NVKMS_LUT_FORMAT_UNORM16, + NVKMS_LUT_VSS_TYPE_LINEAR, + num_vss_header_segments, + NUM_VSS_HEADER_ENTRIES, + NVKMS_LUT_ARRAY_SIZE + 1); + if (drm_lut_surface == NULL) { + return NULL; + } + + lut_data = (NvU16 *) drm_lut_surface->base.buffer; + + /* Calculate linear VSS header. */ + for (entry_idx = 0; entry_idx < NUM_VSS_HEADER_ENTRIES; entry_idx++) { + int i; + NvU64 vss_header_entry = 0; + for (i = 0; (i < 16) && + (((entry_idx * 16) + i) < num_vss_header_segments); i++) { + vss_header_entry |= + ((NvU64) tmo_seg_size_log2) << (i * 3); + } + ((NvU64 *) lut_data)[entry_idx] = vss_header_entry; + } + + /* Fill LUT surface. */ + for (entry_idx = 0; entry_idx < NVKMS_LUT_ARRAY_SIZE; entry_idx++) { + NvU32 data_idx = entry_idx + NUM_VSS_HEADER_ENTRIES; + + lut_data[(data_idx * 4) + 0] = tmo_lut[entry_idx].red; + lut_data[(data_idx * 4) + 1] = tmo_lut[entry_idx].green; + lut_data[(data_idx * 4) + 2] = tmo_lut[entry_idx].blue; + } + ((NvU64 *) lut_data)[NUM_VSS_HEADER_ENTRIES + NVKMS_LUT_ARRAY_SIZE] = + ((NvU64 *) lut_data)[NUM_VSS_HEADER_ENTRIES + NVKMS_LUT_ARRAY_SIZE - 1]; + + return drm_lut_surface; +} + +static NvU16 unorm16_lut_interp( + NvU16 entry0, + NvU16 entry1, + NvU16 interp, + NvU16 interp_max) +{ + NvU64 u64_entry0 = (NvU64) entry0; + NvU64 u64_entry1 = (NvU64) entry1; + + u64_entry0 *= (NvU64) (interp_max - interp); + u64_entry0 /= (NvU64) interp_max; + + u64_entry1 *= (NvU64) interp; + u64_entry1 /= (NvU64) interp_max; + + return (NvU16) (u64_entry0 + u64_entry1); +} + +static struct nv_drm_lut_surface *create_drm_olut_surface_vss( + struct nv_drm_device *nv_dev, + struct nv_drm_crtc *nv_crtc, + struct nv_drm_crtc_state *nv_drm_crtc_state) +{ + struct nv_drm_lut_surface *drm_lut_surface; + + NvU32 entry_idx; + NvU32 num_entries; + NvU16 *lut_data; + + const NvU32 *vss_header_seg_sizes = NULL; + NvU32 num_vss_header_segments = 0; + const NvU16 *vss_entries = NULL; + enum NvKmsLUTVssType vss_type = NVKMS_LUT_VSS_TYPE_NONE; + + WARN_ON(!nv_crtc->olut_caps.supported); + WARN_ON(nv_crtc->olut_caps.entryFormat != NVKMS_LUT_FORMAT_UNORM16); + WARN_ON(nv_crtc->olut_caps.vssSupport != NVKMS_LUT_VSS_SUPPORTED); + WARN_ON(nv_crtc->olut_caps.vssType != NVKMS_LUT_VSS_TYPE_LOGARITHMIC); + + /* Determine configuration based on specified OETF. */ + if (nv_drm_crtc_state->regamma_tf == NV_DRM_TRANSFER_FUNCTION_PQ) { + /* Need VSS for PQ. */ + vss_header_seg_sizes = __oetf_pq_512_seg_sizes_log2; + num_vss_header_segments = ARRAY_LEN(__oetf_pq_512_seg_sizes_log2); + vss_type = NVKMS_LUT_VSS_TYPE_LOGARITHMIC; + + vss_entries = __oetf_pq_512_entries; + num_entries = ARRAY_LEN(__oetf_pq_512_entries) + 1; + } else { + WARN_ON((nv_drm_crtc_state->regamma_tf != NV_DRM_TRANSFER_FUNCTION_DEFAULT) && + (nv_drm_crtc_state->regamma_tf != NV_DRM_TRANSFER_FUNCTION_LINEAR)); + + num_entries = NVKMS_LUT_ARRAY_SIZE + 1; + } + WARN_ON((vss_entries != NULL) && + (num_vss_header_segments != nv_crtc->olut_caps.vssSegments)); + WARN_ON((vss_entries != NULL) && (num_entries > nv_crtc->olut_caps.lutEntries)); + WARN_ON((vss_entries == NULL) && (num_entries != nv_crtc->olut_caps.lutEntries)); + + /* + * Allocate displayable LUT surface. + * Space for the VSS header must be included even for non-VSS LUTs. + */ + drm_lut_surface = + alloc_drm_lut_surface(nv_dev, + NVKMS_LUT_FORMAT_UNORM16, + vss_type, + num_vss_header_segments, + NUM_VSS_HEADER_ENTRIES, + num_entries); + if (!drm_lut_surface) { + return NULL; + } + + lut_data = (NvU16 *) drm_lut_surface->base.buffer; + + /* Calculate VSS header. */ + if (vss_header_seg_sizes != NULL) { + for (entry_idx = 0; entry_idx < NUM_VSS_HEADER_ENTRIES; entry_idx++) { + int i; + NvU64 vss_header_entry = 0; + for (i = 0; (i < 16) && + (((entry_idx * 16) + i) < num_vss_header_segments); i++) { + vss_header_entry |= + ((NvU64) vss_header_seg_sizes[(entry_idx * 16) + i]) << (i * 3); + } + ((NvU64 *) lut_data)[entry_idx] = vss_header_entry; + } + } + + /* Calculate LUT content. */ + for (entry_idx = 0; + entry_idx < num_entries - 1; entry_idx++) { + NvU32 data_idx = entry_idx + NUM_VSS_HEADER_ENTRIES; + + NvU16 r, g, b = 0; + + if (nv_drm_crtc_state->regamma_lut != NULL) { + /* Use provided Regamma LUT. */ + static const NvU16 interp_max = (((NvU16) 1) << (16 - 10)) - 1; + + const struct drm_color_lut *regamma_lut = + (struct drm_color_lut *) nv_drm_crtc_state->regamma_lut->data; + + NvU16 lut_idx; + NvU16 interp = 0; + + if (vss_entries != NULL) { + /* Merge with provided VSS LUT. */ + NvU16 unorm16_entry = vss_entries[entry_idx]; + + /* Index using upper 10 bits from UNORM16 VSS LUT. */ + lut_idx = unorm16_entry >> (16 - 10); + /* Interpolate using lower 6 bits from UNORM16 VSS LUT. */ + interp = unorm16_entry & interp_max; + } else { + /* Direct index. */ + lut_idx = entry_idx; + } + + BUG_ON(lut_idx >= NVKMS_LUT_ARRAY_SIZE); + + /* Perform interpolation or direct indexing. */ + if (interp > 0 && ((lut_idx + 1) < NVKMS_LUT_ARRAY_SIZE)) { + r = unorm16_lut_interp(regamma_lut[lut_idx].red, + regamma_lut[lut_idx + 1].red, + interp, + interp_max); + g = unorm16_lut_interp(regamma_lut[lut_idx].green, + regamma_lut[lut_idx + 1].green, + interp, + interp_max); + b = unorm16_lut_interp(regamma_lut[lut_idx].blue, + regamma_lut[lut_idx + 1].blue, + interp, + interp_max); + } else { + r = regamma_lut[lut_idx].red; + g = regamma_lut[lut_idx].green; + b = regamma_lut[lut_idx].blue; + } + } else if (vss_entries != NULL) { + /* Use VSS LUT directly. */ + r = g = b = vss_entries[entry_idx]; + } else { + /* Use implicit identity. */ + WARN_ON_ONCE(num_entries != (NVKMS_LUT_ARRAY_SIZE + 1)); + r = g = b = entry_idx << (16 - 10); + } + + /* Populate LUT. */ + lut_data[(data_idx * 4) + 0] = r; + lut_data[(data_idx * 4) + 1] = g; + lut_data[(data_idx * 4) + 2] = b; + } + ((NvU64 *) lut_data)[NUM_VSS_HEADER_ENTRIES + num_entries - 1] = + ((NvU64 *) lut_data)[NUM_VSS_HEADER_ENTRIES + num_entries - 2]; + + return drm_lut_surface; +} + +static struct nv_drm_lut_surface *create_drm_olut_surface_legacy( + struct nv_drm_device *nv_dev, + struct nv_drm_crtc *nv_crtc, + struct nv_drm_crtc_state *nv_drm_crtc_state) + +{ + struct nv_drm_lut_surface *drm_lut_surface; + NvU16 *lut_data; + NvU32 entry_idx; + + const struct drm_color_lut *regamma_lut; + + WARN_ON(!nv_crtc->olut_caps.supported); + WARN_ON(nv_crtc->olut_caps.entryFormat != NVKMS_LUT_FORMAT_UNORM14_WAR_813188); + WARN_ON(nv_crtc->olut_caps.vssSupport == NVKMS_LUT_VSS_REQUIRED); + WARN_ON((NVKMS_LUT_ARRAY_SIZE + 1) > nv_crtc->olut_caps.lutEntries); + + BUG_ON(nv_drm_crtc_state->regamma_lut == NULL); + + regamma_lut = + (struct drm_color_lut *) nv_drm_crtc_state->regamma_lut->data; + + /* Allocate displayable LUT surface. */ + drm_lut_surface = + alloc_drm_lut_surface(nv_dev, + NVKMS_LUT_FORMAT_UNORM14_WAR_813188, + NVKMS_LUT_VSS_TYPE_NONE, + 0, 0, + NVKMS_LUT_ARRAY_SIZE + 1); + if (drm_lut_surface == NULL) { + return NULL; + } + + lut_data = (NvU16 *) drm_lut_surface->base.buffer; + + /* Fill LUT surface. */ + for (entry_idx = 0; entry_idx < NVKMS_LUT_ARRAY_SIZE; entry_idx++) { + lut_data[(entry_idx * 4) + 0] = + UNORM16_TO_UNORM14_WAR_813188(regamma_lut[entry_idx].red); + lut_data[(entry_idx * 4) + 1] = + UNORM16_TO_UNORM14_WAR_813188(regamma_lut[entry_idx].green); + lut_data[(entry_idx * 4) + 2] = + UNORM16_TO_UNORM14_WAR_813188(regamma_lut[entry_idx].blue); + } + ((NvU64 *) lut_data)[NVKMS_LUT_ARRAY_SIZE] = + ((NvU64 *) lut_data)[NVKMS_LUT_ARRAY_SIZE - 1]; + + return drm_lut_surface; +} + +static bool +update_matrix_override(struct drm_property_blob *blob, + struct NvKmsCscMatrix *new_matrix, + const struct NvKmsCscMatrix *old_matrix, + bool old_enabled, + bool *changed) +{ + bool enabled; + if (blob != NULL) { + ctm_3x4_to_csc(new_matrix, (struct drm_color_ctm_3x4 *) blob->data); + enabled = true; + } else { + enabled = false; + } + *changed |= (enabled != old_enabled) || + memcmp(new_matrix, old_matrix, sizeof(*old_matrix)); + return enabled; +} + +static enum NvKmsInputColorSpace nv_get_nvkms_input_colorspace( + enum nv_drm_input_color_space colorSpace) +{ + switch (colorSpace) { + case NV_DRM_INPUT_COLOR_SPACE_NONE: + return NVKMS_INPUT_COLOR_SPACE_NONE; + case NV_DRM_INPUT_COLOR_SPACE_SCRGB_LINEAR: + return NVKMS_INPUT_COLOR_SPACE_BT709; + case NV_DRM_INPUT_COLOR_SPACE_BT2100_PQ: + return NVKMS_INPUT_COLOR_SPACE_BT2100; + default: + /* We shouldn't hit this */ + WARN_ON("Unsupported input colorspace"); + return NVKMS_INPUT_COLOR_SPACE_NONE; + } +} + +static enum NvKmsInputTf nv_get_nvkms_input_tf( + enum nv_drm_input_color_space colorSpace) +{ + switch (colorSpace) { + case NV_DRM_INPUT_COLOR_SPACE_NONE: + return NVKMS_INPUT_TF_LINEAR; + case NV_DRM_INPUT_COLOR_SPACE_SCRGB_LINEAR: + return NVKMS_INPUT_TF_LINEAR; + case NV_DRM_INPUT_COLOR_SPACE_BT2100_PQ: + return NVKMS_INPUT_TF_PQ; + default: + /* We shouldn't hit this */ + WARN_ON("Unsupported input colorspace"); + return NVKMS_INPUT_TF_LINEAR; + } +} + +#if defined(NV_DRM_PLANE_CREATE_COLOR_PROPERTIES_PRESENT) +static enum NvKmsInputColorSpace nv_drm_color_encoding_to_nvkms_colorspace( + enum drm_color_encoding color_encoding) +{ + switch(color_encoding) { + case DRM_COLOR_YCBCR_BT601: + return NVKMS_INPUT_COLOR_SPACE_BT601; + case DRM_COLOR_YCBCR_BT709: + return NVKMS_INPUT_COLOR_SPACE_BT709; + case DRM_COLOR_YCBCR_BT2020: + return NVKMS_INPUT_COLOR_SPACE_BT2020; + default: + /* We shouldn't hit this */ + WARN_ON("Unsupported DRM color_encoding"); + return NVKMS_INPUT_COLOR_SPACE_NONE; + } +} + +static enum NvKmsInputColorRange nv_drm_color_range_to_nvkms_color_range( + enum drm_color_range color_range) +{ + switch(color_range) { + case DRM_COLOR_YCBCR_FULL_RANGE: + return NVKMS_INPUT_COLOR_RANGE_FULL; + case DRM_COLOR_YCBCR_LIMITED_RANGE: + return NVKMS_INPUT_COLOR_RANGE_LIMITED; + default: + /* We shouldn't hit this */ + WARN_ON("Unsupported DRM color_range"); + return NVKMS_INPUT_COLOR_RANGE_DEFAULT; + } +} +#endif + +static int +plane_req_config_update(struct drm_plane *plane, + struct drm_plane_state *plane_state, + struct NvKmsKapiLayerRequestedConfig *req_config) +{ + struct nv_drm_device *nv_dev = to_nv_device(plane->dev); + struct nv_drm_plane *nv_plane = to_nv_plane(plane); + struct NvKmsKapiLayerConfig old_config = req_config->config; + struct nv_drm_plane_state *nv_drm_plane_state = + to_nv_drm_plane_state(plane_state); + bool matrix_overrides_changed = 0; + + if (plane_state->fb == NULL) { + plane_req_config_disable(req_config); + return 0; + } + + memset(req_config, 0, sizeof(*req_config)); + + req_config->config.surface = to_nv_framebuffer(plane_state->fb)->pSurface; + + /* Source values are 16.16 fixed point */ + req_config->config.srcX = plane_state->src_x >> 16; + req_config->config.srcY = plane_state->src_y >> 16; + req_config->config.srcWidth = plane_state->src_w >> 16; + req_config->config.srcHeight = plane_state->src_h >> 16; + + req_config->config.dstX = plane_state->crtc_x; + req_config->config.dstY = plane_state->crtc_y; + req_config->config.dstWidth = plane_state->crtc_w; + req_config->config.dstHeight = plane_state->crtc_h; + + req_config->config.csc = old_config.csc; + + /* + * plane_state->rotation is only valid when plane->rotation_property + * is non-NULL. + */ + if (plane->rotation_property != NULL) { + if (plane_state->rotation & DRM_MODE_REFLECT_X) { + req_config->config.rrParams.reflectionX = true; + } + + if (plane_state->rotation & DRM_MODE_REFLECT_Y) { + req_config->config.rrParams.reflectionY = true; + } + + switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { + case DRM_MODE_ROTATE_0: + req_config->config.rrParams.rotation = NVKMS_ROTATION_0; + break; + case DRM_MODE_ROTATE_90: + req_config->config.rrParams.rotation = NVKMS_ROTATION_90; + break; + case DRM_MODE_ROTATE_180: + req_config->config.rrParams.rotation = NVKMS_ROTATION_180; + break; + case DRM_MODE_ROTATE_270: + req_config->config.rrParams.rotation = NVKMS_ROTATION_270; + break; + default: + /* + * We should not hit this, because + * plane_state->rotation should only have values + * registered in + * __nv_drm_plane_create_rotation_property(). + */ + WARN_ON("Unsupported rotation"); + break; + } + } + +#if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE) + if (plane->blend_mode_property != NULL && plane->alpha_property != NULL) { + + switch (plane_state->pixel_blend_mode) { + case DRM_MODE_BLEND_PREMULTI: + req_config->config.compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA; + break; + case DRM_MODE_BLEND_COVERAGE: + req_config->config.compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA; + break; + default: + /* + * We should not hit this, because + * plane_state->pixel_blend_mode should only have values + * registered in + * __nv_drm_plane_create_alpha_blending_properties(). + */ + WARN_ON("Unsupported blending mode"); + break; + + } + + req_config->config.compParams.surfaceAlpha = + plane_state->alpha >> 8; + + } else if (plane->blend_mode_property != NULL) { + + switch (plane_state->pixel_blend_mode) { + case DRM_MODE_BLEND_PREMULTI: + req_config->config.compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA; + break; + case DRM_MODE_BLEND_COVERAGE: + req_config->config.compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA; + break; + default: + /* + * We should not hit this, because + * plane_state->pixel_blend_mode should only have values + * registered in + * __nv_drm_plane_create_alpha_blending_properties(). + */ + WARN_ON("Unsupported blending mode"); + break; + + } + + } else { + req_config->config.compParams.compMode = + nv_plane->defaultCompositionMode; + } +#else + req_config->config.compParams.compMode = + nv_plane->defaultCompositionMode; +#endif + +#if defined(NV_DRM_PLANE_CREATE_COLOR_PROPERTIES_PRESENT) + if ((nv_drm_plane_state->input_colorspace == NV_DRM_INPUT_COLOR_SPACE_NONE) && + nv_drm_format_is_yuv(plane_state->fb->format->format)) { + + if (nv_plane->supportsColorProperties) { + req_config->config.inputColorSpace = + nv_drm_color_encoding_to_nvkms_colorspace(plane_state->color_encoding); + req_config->config.inputColorRange = + nv_drm_color_range_to_nvkms_color_range(plane_state->color_range); + } else { + req_config->config.inputColorSpace = NVKMS_INPUT_COLOR_SPACE_NONE; + req_config->config.inputColorRange = NVKMS_INPUT_COLOR_RANGE_DEFAULT; + } + req_config->config.inputTf = NVKMS_INPUT_TF_LINEAR; + } else { +#endif + req_config->config.inputColorSpace = + nv_get_nvkms_input_colorspace(nv_drm_plane_state->input_colorspace); + req_config->config.inputColorRange = NVKMS_INPUT_COLOR_RANGE_DEFAULT; + req_config->config.inputTf = + nv_get_nvkms_input_tf(nv_drm_plane_state->input_colorspace); +#if defined(NV_DRM_PLANE_CREATE_COLOR_PROPERTIES_PRESENT) + } +#endif + + req_config->flags.inputTfChanged = + (old_config.inputTf != req_config->config.inputTf); + req_config->flags.inputColorSpaceChanged = + (old_config.inputColorSpace != req_config->config.inputColorSpace); + req_config->flags.inputColorRangeChanged = + (old_config.inputColorRange != req_config->config.inputColorRange); + + req_config->config.syncParams.preSyncptSpecified = false; + req_config->config.syncParams.postSyncptRequested = false; + req_config->config.syncParams.semaphoreSpecified = false; + + if (nv_drm_plane_state->fd_user_ptr) { + if (nv_dev->supportsSyncpts) { + req_config->config.syncParams.postSyncptRequested = true; + } else { + return -1; + } + } + +#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA) + if (nv_drm_plane_state->hdr_output_metadata != NULL) { + struct hdr_output_metadata *hdr_metadata = + nv_drm_plane_state->hdr_output_metadata->data; + struct hdr_metadata_infoframe *info_frame = + &hdr_metadata->hdmi_metadata_type1; + uint32_t i; + + if (hdr_metadata->metadata_type != HDMI_STATIC_METADATA_TYPE1) { + NV_DRM_DEV_LOG_ERR(nv_dev, "Unsupported Metadata Type"); + return -1; + } + + for (i = 0; i < ARRAY_SIZE(info_frame->display_primaries); i ++) { + req_config->config.hdrMetadata.val.displayPrimaries[i].x = + info_frame->display_primaries[i].x; + req_config->config.hdrMetadata.val.displayPrimaries[i].y = + info_frame->display_primaries[i].y; + } + + req_config->config.hdrMetadata.val.whitePoint.x = + info_frame->white_point.x; + req_config->config.hdrMetadata.val.whitePoint.y = + info_frame->white_point.y; + req_config->config.hdrMetadata.val.maxDisplayMasteringLuminance = + info_frame->max_display_mastering_luminance; + req_config->config.hdrMetadata.val.minDisplayMasteringLuminance = + info_frame->min_display_mastering_luminance; + req_config->config.hdrMetadata.val.maxCLL = + info_frame->max_cll; + req_config->config.hdrMetadata.val.maxFALL = + info_frame->max_fall; + + switch (info_frame->eotf) { + case HDMI_EOTF_SMPTE_ST2084: + req_config->config.outputTf = NVKMS_OUTPUT_TF_PQ; + break; + case HDMI_EOTF_TRADITIONAL_GAMMA_SDR: + req_config->config.outputTf = + NVKMS_OUTPUT_TF_TRADITIONAL_GAMMA_SDR; + break; + default: + NV_DRM_DEV_LOG_ERR(nv_dev, "Unsupported EOTF"); + return -1; + } + + req_config->config.hdrMetadata.enabled = true; + } else { + req_config->config.hdrMetadata.enabled = false; + req_config->config.outputTf = NVKMS_OUTPUT_TF_NONE; + } + + req_config->flags.hdrMetadataChanged = + ((old_config.hdrMetadata.enabled != + req_config->config.hdrMetadata.enabled) || + memcmp(&old_config.hdrMetadata.val, + &req_config->config.hdrMetadata.val, + sizeof(struct NvKmsHDRStaticMetadata))); + + req_config->flags.outputTfChanged = (old_config.outputTf != req_config->config.outputTf); +#endif + + req_config->config.matrixOverrides.enabled.lmsCtm = + update_matrix_override(nv_drm_plane_state->lms_ctm, + &req_config->config.matrixOverrides.lmsCtm, + &old_config.matrixOverrides.lmsCtm, + old_config.matrixOverrides.enabled.lmsCtm, + &matrix_overrides_changed); + req_config->config.matrixOverrides.enabled.lmsToItpCtm = + update_matrix_override(nv_drm_plane_state->lms_to_itp_ctm, + &req_config->config.matrixOverrides.lmsToItpCtm, + &old_config.matrixOverrides.lmsToItpCtm, + old_config.matrixOverrides.enabled.lmsToItpCtm, + &matrix_overrides_changed); + req_config->config.matrixOverrides.enabled.itpToLmsCtm = + update_matrix_override(nv_drm_plane_state->itp_to_lms_ctm, + &req_config->config.matrixOverrides.itpToLmsCtm, + &old_config.matrixOverrides.itpToLmsCtm, + old_config.matrixOverrides.enabled.itpToLmsCtm, + &matrix_overrides_changed); + req_config->config.matrixOverrides.enabled.blendCtm = + update_matrix_override(nv_drm_plane_state->blend_ctm, + &req_config->config.matrixOverrides.blendCtm, + &old_config.matrixOverrides.blendCtm, + old_config.matrixOverrides.enabled.blendCtm, + &matrix_overrides_changed); + req_config->flags.matrixOverridesChanged = matrix_overrides_changed; + + if (nv_drm_plane_state->degamma_changed) { + if (nv_drm_plane_state->degamma_drm_lut_surface != NULL) { + kref_put(&nv_drm_plane_state->degamma_drm_lut_surface->base.refcount, + free_drm_lut_surface); + nv_drm_plane_state->degamma_drm_lut_surface = NULL; + } + + if (nv_plane->ilut_caps.vssSupport == NVKMS_LUT_VSS_SUPPORTED) { + if ((nv_drm_plane_state->degamma_tf != NV_DRM_TRANSFER_FUNCTION_DEFAULT) || + (nv_drm_plane_state->degamma_lut != NULL) || + (nv_drm_plane_state->degamma_multiplier != ((uint64_t) 1) << 32)) { + + nv_drm_plane_state->degamma_drm_lut_surface = + create_drm_ilut_surface_vss(nv_dev, nv_plane, + nv_drm_plane_state); + if (nv_drm_plane_state->degamma_drm_lut_surface == NULL) { + return -1; + } + } + } else { + WARN_ON(nv_plane->ilut_caps.vssSupport != NVKMS_LUT_VSS_NOT_SUPPORTED); + if (nv_drm_plane_state->degamma_lut != NULL) { + nv_drm_plane_state->degamma_drm_lut_surface = + create_drm_ilut_surface_legacy(nv_dev, nv_plane, + nv_drm_plane_state); + if (nv_drm_plane_state->degamma_drm_lut_surface == NULL) { + return -1; + } + } + } + + if (nv_drm_plane_state->degamma_drm_lut_surface != NULL) { + req_config->config.ilut.enabled = NV_TRUE; + req_config->config.ilut.lutSurface = + nv_drm_plane_state->degamma_drm_lut_surface->base.nvkms_surface; + req_config->config.ilut.offset = 0; + req_config->config.ilut.vssSegments = + nv_drm_plane_state->degamma_drm_lut_surface->properties.vssSegments; + req_config->config.ilut.lutEntries = + nv_drm_plane_state->degamma_drm_lut_surface->properties.lutEntries; + } else { + req_config->config.ilut.enabled = NV_FALSE; + req_config->config.ilut.lutSurface = NULL; + req_config->config.ilut.offset = 0; + req_config->config.ilut.vssSegments = 0; + req_config->config.ilut.lutEntries = 0; + + } + req_config->flags.ilutChanged = NV_TRUE; + } + + if (nv_drm_plane_state->tmo_changed) { + if (nv_drm_plane_state->tmo_drm_lut_surface != NULL) { + kref_put(&nv_drm_plane_state->tmo_drm_lut_surface->base.refcount, + free_drm_lut_surface); + nv_drm_plane_state->tmo_drm_lut_surface = NULL; + } + + if (nv_drm_plane_state->tmo_lut != NULL) { + nv_drm_plane_state->tmo_drm_lut_surface = + create_drm_tmo_surface(nv_dev, nv_plane, + nv_drm_plane_state); + if (nv_drm_plane_state->tmo_drm_lut_surface == NULL) { + return -1; + } + } + + if (nv_drm_plane_state->tmo_drm_lut_surface != NULL) { + req_config->config.tmo.enabled = NV_TRUE; + req_config->config.tmo.lutSurface = + nv_drm_plane_state->tmo_drm_lut_surface->base.nvkms_surface; + req_config->config.tmo.offset = 0; + req_config->config.tmo.vssSegments = + nv_drm_plane_state->tmo_drm_lut_surface->properties.vssSegments; + req_config->config.tmo.lutEntries = + nv_drm_plane_state->tmo_drm_lut_surface->properties.lutEntries; + } else { + req_config->config.tmo.enabled = NV_FALSE; + req_config->config.tmo.lutSurface = NULL; + req_config->config.tmo.offset = 0; + req_config->config.tmo.vssSegments = 0; + req_config->config.tmo.lutEntries = 0; + } + req_config->flags.tmoChanged = NV_TRUE; + } + + /* + * Unconditionally mark the surface as changed, even if nothing changed, + * so that we always get a flip event: a DRM client may flip with + * the same surface and wait for a flip event. + */ + req_config->flags.surfaceChanged = NV_TRUE; + + if (old_config.surface == NULL && + old_config.surface != req_config->config.surface) { + req_config->flags.srcXYChanged = NV_TRUE; + req_config->flags.srcWHChanged = NV_TRUE; + req_config->flags.dstXYChanged = NV_TRUE; + req_config->flags.dstWHChanged = NV_TRUE; + return 0; + } + + req_config->flags.srcXYChanged = + old_config.srcX != req_config->config.srcX || + old_config.srcY != req_config->config.srcY; + + req_config->flags.srcWHChanged = + old_config.srcWidth != req_config->config.srcWidth || + old_config.srcHeight != req_config->config.srcHeight; + + req_config->flags.dstXYChanged = + old_config.dstX != req_config->config.dstX || + old_config.dstY != req_config->config.dstY; + + req_config->flags.dstWHChanged = + old_config.dstWidth != req_config->config.dstWidth || + old_config.dstHeight != req_config->config.dstHeight; + + return 0; +} + +static bool __is_async_flip_requested(const struct drm_plane *plane, + const struct drm_crtc_state *crtc_state) +{ + if (plane->type == DRM_PLANE_TYPE_PRIMARY) { +#if defined(NV_DRM_CRTC_STATE_HAS_ASYNC_FLIP) + return crtc_state->async_flip; +#elif defined(NV_DRM_CRTC_STATE_HAS_PAGEFLIP_FLAGS) + return !!(crtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC); +#endif + } + + return false; +} + +static int __nv_drm_cursor_atomic_check(struct drm_plane *plane, + struct drm_plane_state *plane_state) +{ + struct nv_drm_plane *nv_plane = to_nv_plane(plane); + int i; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + + WARN_ON(nv_plane->layer_idx != NVKMS_KAPI_LAYER_INVALID_IDX); + + nv_drm_for_each_crtc_in_state(plane_state->state, crtc, crtc_state, i) { + struct nv_drm_crtc_state *nv_crtc_state = to_nv_crtc_state(crtc_state); + struct NvKmsKapiHeadRequestedConfig *head_req_config = + &nv_crtc_state->req_config; + struct NvKmsKapiCursorRequestedConfig *cursor_req_config = + &head_req_config->cursorRequestedConfig; + + if (plane->state->crtc == crtc && + plane->state->crtc != plane_state->crtc) { + cursor_req_config_disable(cursor_req_config); + continue; + } + + if (plane_state->crtc == crtc) { + cursor_plane_req_config_update(plane, plane_state, + cursor_req_config); + } + } + + return 0; +} + +#if defined(NV_DRM_PLANE_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG) +static int nv_drm_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +#else +static int nv_drm_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *plane_state) +#endif +{ + struct nv_drm_plane *nv_plane = to_nv_plane(plane); +#if defined(NV_DRM_PLANE_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG) + struct drm_plane_state *plane_state = + drm_atomic_get_new_plane_state(state, plane); +#endif + int i; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + int ret; + + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + return __nv_drm_cursor_atomic_check(plane, plane_state); + } + + WARN_ON(nv_plane->layer_idx == NVKMS_KAPI_LAYER_INVALID_IDX); + + nv_drm_for_each_crtc_in_state(plane_state->state, crtc, crtc_state, i) { + struct nv_drm_crtc_state *nv_crtc_state = to_nv_crtc_state(crtc_state); + struct NvKmsKapiHeadRequestedConfig *head_req_config = + &nv_crtc_state->req_config; + struct NvKmsKapiLayerRequestedConfig *plane_requested_config = + &head_req_config->layerRequestedConfig[nv_plane->layer_idx]; + + if (plane->state->crtc == crtc && + plane->state->crtc != plane_state->crtc) { + plane_req_config_disable(plane_requested_config); + continue; + } + + if (plane_state->crtc == crtc) { + ret = plane_req_config_update(plane, + plane_state, + plane_requested_config); + if (ret != 0) { + return ret; + } + + if (crtc_state->color_mgmt_changed) { + /* + * According to the comment in the Linux kernel's + * drivers/gpu/drm/drm_color_mgmt.c, if this property is NULL, + * the CTM needs to be changed to the identity matrix + */ + if (crtc_state->ctm) { + ctm_to_csc(&plane_requested_config->config.csc, + (struct drm_color_ctm *)crtc_state->ctm->data); + } else { + plane_requested_config->config.csc = NVKMS_IDENTITY_CSC_MATRIX; + } + plane_requested_config->config.cscUseMain = NV_FALSE; + plane_requested_config->flags.cscChanged = NV_TRUE; + } + + if (__is_async_flip_requested(plane, crtc_state)) { + /* + * Async flip requests that the flip happen 'as soon as + * possible', meaning that it not delay waiting for vblank. + * This may cause tearing on the screen. + */ + plane_requested_config->config.minPresentInterval = 0; + plane_requested_config->config.tearing = NV_TRUE; + } else { + plane_requested_config->config.minPresentInterval = 1; + plane_requested_config->config.tearing = NV_FALSE; + } + } + } + + return 0; +} + +static bool nv_drm_plane_format_mod_supported(struct drm_plane *plane, + uint32_t format, + uint64_t modifier) +{ + /* All supported modifiers are compatible with all supported formats */ + return true; +} + +static int nv_drm_atomic_crtc_get_property( + struct drm_crtc *crtc, + const struct drm_crtc_state *state, + struct drm_property *property, + uint64_t *val) +{ + struct nv_drm_device *nv_dev = to_nv_device(crtc->dev); + const struct nv_drm_crtc_state *nv_drm_crtc_state = + to_nv_crtc_state_const(state); + + if (property == nv_dev->nv_crtc_regamma_tf_property) { + *val = nv_drm_crtc_state->regamma_tf; + return 0; + } else if (property == nv_dev->nv_crtc_regamma_lut_property) { + *val = nv_drm_crtc_state->regamma_lut ? + nv_drm_crtc_state->regamma_lut->base.id : 0; + return 0; + } else if (property == nv_dev->nv_crtc_regamma_divisor_property) { + *val = nv_drm_crtc_state->regamma_divisor; + return 0; + } else if (property == nv_dev->nv_crtc_regamma_lut_size_property) { + /* + * This shouldn't be necessary, because read-only properties are stored + * in obj->properties->values[]. To be safe, check for it anyway. + */ + *val = NVKMS_LUT_ARRAY_SIZE; + return 0; + } + + return -EINVAL; + +} + +static int nv_drm_atomic_crtc_set_property( + struct drm_crtc *crtc, + struct drm_crtc_state *state, + struct drm_property *property, + uint64_t val) +{ + struct nv_drm_device *nv_dev = to_nv_device(crtc->dev); + struct nv_drm_crtc_state *nv_drm_crtc_state = + to_nv_crtc_state(state); + NvBool replaced = false; + + if (property == nv_dev->nv_crtc_regamma_tf_property) { + if (val != nv_drm_crtc_state->regamma_tf) { + nv_drm_crtc_state->regamma_tf = val; + nv_drm_crtc_state->regamma_changed = true; + } + return 0; + } else if (property == nv_dev->nv_crtc_regamma_lut_property) { + int ret = nv_drm_atomic_replace_property_blob_from_id( + nv_dev->dev, + &nv_drm_crtc_state->regamma_lut, + val, + sizeof(struct drm_color_lut) * NVKMS_LUT_ARRAY_SIZE, + &replaced); + if (replaced) { + nv_drm_crtc_state->regamma_changed = true; + } + return ret; + } else if (property == nv_dev->nv_crtc_regamma_divisor_property) { + if (val != nv_drm_crtc_state->regamma_divisor) { + nv_drm_crtc_state->regamma_divisor = val; + nv_drm_crtc_state->regamma_changed = true; + } + return 0; + } + + return -EINVAL; +} + +static int nv_drm_plane_atomic_set_property( + struct drm_plane *plane, + struct drm_plane_state *state, + struct drm_property *property, + uint64_t val) +{ + struct nv_drm_device *nv_dev = to_nv_device(plane->dev); + struct nv_drm_plane_state *nv_drm_plane_state = + to_nv_drm_plane_state(state); + NvBool replaced = false; + + if (property == nv_dev->nv_out_fence_property) { + nv_drm_plane_state->fd_user_ptr = (void __user *)(uintptr_t)(val); + return 0; + } else if (property == nv_dev->nv_input_colorspace_property) { + nv_drm_plane_state->input_colorspace = val; + return 0; + } +#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA) + else if (property == nv_dev->nv_hdr_output_metadata_property) { + return nv_drm_atomic_replace_property_blob_from_id( + nv_dev->dev, + &nv_drm_plane_state->hdr_output_metadata, + val, + sizeof(struct hdr_output_metadata), + &replaced); + } +#endif + else if (property == nv_dev->nv_plane_lms_ctm_property) { + return nv_drm_atomic_replace_property_blob_from_id( + nv_dev->dev, + &nv_drm_plane_state->lms_ctm, + val, + sizeof(struct drm_color_ctm_3x4), + &replaced); + } else if (property == nv_dev->nv_plane_lms_to_itp_ctm_property) { + return nv_drm_atomic_replace_property_blob_from_id( + nv_dev->dev, + &nv_drm_plane_state->lms_to_itp_ctm, + val, + sizeof(struct drm_color_ctm_3x4), + &replaced); + } else if (property == nv_dev->nv_plane_itp_to_lms_ctm_property) { + return nv_drm_atomic_replace_property_blob_from_id( + nv_dev->dev, + &nv_drm_plane_state->itp_to_lms_ctm, + val, + sizeof(struct drm_color_ctm_3x4), + &replaced); + } else if (property == nv_dev->nv_plane_blend_ctm_property) { + return nv_drm_atomic_replace_property_blob_from_id( + nv_dev->dev, + &nv_drm_plane_state->blend_ctm, + val, + sizeof(struct drm_color_ctm_3x4), + &replaced); + } else if (property == nv_dev->nv_plane_degamma_tf_property) { + if (val != nv_drm_plane_state->degamma_tf) { + nv_drm_plane_state->degamma_tf = val; + nv_drm_plane_state->degamma_changed = true; + } + return 0; + } else if (property == nv_dev->nv_plane_degamma_lut_property) { + int ret = nv_drm_atomic_replace_property_blob_from_id( + nv_dev->dev, + &nv_drm_plane_state->degamma_lut, + val, + sizeof(struct drm_color_lut) * NVKMS_LUT_ARRAY_SIZE, + &replaced); + if (replaced) { + nv_drm_plane_state->degamma_changed = true; + } + return ret; + } else if (property == nv_dev->nv_plane_degamma_multiplier_property) { + if (val != nv_drm_plane_state->degamma_multiplier) { + nv_drm_plane_state->degamma_multiplier = val; + nv_drm_plane_state->degamma_changed = true; + } + return 0; + } else if (property == nv_dev->nv_plane_tmo_lut_property) { + int ret = nv_drm_atomic_replace_property_blob_from_id( + nv_dev->dev, + &nv_drm_plane_state->tmo_lut, + val, + sizeof(struct drm_color_lut) * NVKMS_LUT_ARRAY_SIZE, + &replaced); + if (replaced) { + nv_drm_plane_state->tmo_changed = true; + } + return ret; + } + + return -EINVAL; +} + +static int nv_drm_plane_atomic_get_property( + struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, + uint64_t *val) +{ + struct nv_drm_device *nv_dev = to_nv_device(plane->dev); + const struct nv_drm_plane_state *nv_drm_plane_state = + to_nv_drm_plane_state_const(state); + + if (property == nv_dev->nv_out_fence_property) { + return 0; + } else if (property == nv_dev->nv_input_colorspace_property) { + *val = nv_drm_plane_state->input_colorspace; + return 0; + } +#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA) + else if (property == nv_dev->nv_hdr_output_metadata_property) { + *val = nv_drm_plane_state->hdr_output_metadata ? + nv_drm_plane_state->hdr_output_metadata->base.id : 0; + return 0; + } +#endif + else if (property == nv_dev->nv_plane_lms_ctm_property) { + *val = nv_drm_plane_state->lms_ctm ? + nv_drm_plane_state->lms_ctm->base.id : 0; + return 0; + } else if (property == nv_dev->nv_plane_lms_to_itp_ctm_property) { + *val = nv_drm_plane_state->lms_to_itp_ctm ? + nv_drm_plane_state->lms_to_itp_ctm->base.id : 0; + return 0; + } else if (property == nv_dev->nv_plane_itp_to_lms_ctm_property) { + *val = nv_drm_plane_state->itp_to_lms_ctm ? + nv_drm_plane_state->itp_to_lms_ctm->base.id : 0; + return 0; + } else if (property == nv_dev->nv_plane_blend_ctm_property) { + *val = nv_drm_plane_state->blend_ctm ? + nv_drm_plane_state->blend_ctm->base.id : 0; + return 0; + } else if (property == nv_dev->nv_plane_degamma_tf_property) { + *val = nv_drm_plane_state->degamma_tf; + return 0; + } else if (property == nv_dev->nv_plane_degamma_lut_property) { + *val = nv_drm_plane_state->degamma_lut ? + nv_drm_plane_state->degamma_lut->base.id : 0; + return 0; + } else if (property == nv_dev->nv_plane_degamma_multiplier_property) { + *val = nv_drm_plane_state->degamma_multiplier; + return 0; + } else if (property == nv_dev->nv_plane_tmo_lut_property) { + *val = nv_drm_plane_state->tmo_lut ? + nv_drm_plane_state->tmo_lut->base.id : 0; + return 0; + } else if ((property == nv_dev->nv_plane_degamma_lut_size_property) || + (property == nv_dev->nv_plane_tmo_lut_size_property)) { + /* + * This shouldn't be necessary, because read-only properties are stored + * in obj->properties->values[]. To be safe, check for it anyway. + */ + *val = NVKMS_LUT_ARRAY_SIZE; + return 0; + } + + return -EINVAL; +} + +/** + * nv_drm_plane_atomic_reset - plane state reset hook + * @plane: DRM plane + * + * Allocate an empty DRM plane state. + */ +static void nv_drm_plane_atomic_reset(struct drm_plane *plane) +{ + struct nv_drm_plane_state *nv_plane_state = + nv_drm_calloc(1, sizeof(*nv_plane_state)); + + if (!nv_plane_state) { + return; + } + + drm_atomic_helper_plane_reset(plane); + + /* + * The drm atomic helper function allocates a state object that is the wrong + * size. Copy its contents into the one we allocated above and replace the + * pointer. + */ + if (plane->state) { + nv_plane_state->base = *plane->state; + kfree(plane->state); + plane->state = &nv_plane_state->base; + } else { + kfree(nv_plane_state); + } +} + + +static struct drm_plane_state * +nv_drm_plane_atomic_duplicate_state(struct drm_plane *plane) +{ + struct nv_drm_plane_state *nv_old_plane_state = + to_nv_drm_plane_state(plane->state); + struct nv_drm_plane_state *nv_plane_state = + nv_drm_calloc(1, sizeof(*nv_plane_state)); + + if (nv_plane_state == NULL) { + return NULL; + } + + __drm_atomic_helper_plane_duplicate_state(plane, &nv_plane_state->base); + + nv_plane_state->fd_user_ptr = nv_old_plane_state->fd_user_ptr; + nv_plane_state->input_colorspace = nv_old_plane_state->input_colorspace; + +#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA) + nv_plane_state->hdr_output_metadata = nv_old_plane_state->hdr_output_metadata; + if (nv_plane_state->hdr_output_metadata) { + drm_property_blob_get(nv_plane_state->hdr_output_metadata); + } +#endif + + nv_plane_state->lms_ctm = nv_old_plane_state->lms_ctm; + if (nv_plane_state->lms_ctm) { + drm_property_blob_get(nv_plane_state->lms_ctm); + } + + nv_plane_state->lms_to_itp_ctm = nv_old_plane_state->lms_to_itp_ctm; + if (nv_plane_state->lms_to_itp_ctm) { + drm_property_blob_get(nv_plane_state->lms_to_itp_ctm); + } + + nv_plane_state->itp_to_lms_ctm = nv_old_plane_state->itp_to_lms_ctm; + if (nv_plane_state->itp_to_lms_ctm) { + drm_property_blob_get(nv_plane_state->itp_to_lms_ctm); + } + + nv_plane_state->blend_ctm = nv_old_plane_state->blend_ctm; + if (nv_plane_state->blend_ctm) { + drm_property_blob_get(nv_plane_state->blend_ctm); + } + + nv_plane_state->degamma_tf = nv_old_plane_state->degamma_tf; + nv_plane_state->degamma_lut = nv_old_plane_state->degamma_lut; + if (nv_plane_state->degamma_lut) { + drm_property_blob_get(nv_plane_state->degamma_lut); + } + nv_plane_state->degamma_multiplier = nv_old_plane_state->degamma_multiplier; + nv_plane_state->degamma_changed = false; + nv_plane_state->degamma_drm_lut_surface = + nv_old_plane_state->degamma_drm_lut_surface; + if (nv_plane_state->degamma_drm_lut_surface) { + kref_get(&nv_plane_state->degamma_drm_lut_surface->base.refcount); + } + + nv_plane_state->tmo_lut = nv_old_plane_state->tmo_lut; + if (nv_plane_state->tmo_lut) { + drm_property_blob_get(nv_plane_state->tmo_lut); + } + nv_plane_state->tmo_changed = false; + nv_plane_state->tmo_drm_lut_surface = + nv_old_plane_state->tmo_drm_lut_surface; + if (nv_plane_state->tmo_drm_lut_surface) { + kref_get(&nv_plane_state->tmo_drm_lut_surface->base.refcount); + } + + return &nv_plane_state->base; +} + +static inline void __nv_drm_plane_atomic_destroy_state( + struct drm_plane_state *state) +{ + struct nv_drm_plane_state *nv_drm_plane_state = + to_nv_drm_plane_state(state); + __drm_atomic_helper_plane_destroy_state(state); + +#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA) + drm_property_blob_put(nv_drm_plane_state->hdr_output_metadata); +#endif + drm_property_blob_put(nv_drm_plane_state->lms_ctm); + drm_property_blob_put(nv_drm_plane_state->lms_to_itp_ctm); + drm_property_blob_put(nv_drm_plane_state->itp_to_lms_ctm); + drm_property_blob_put(nv_drm_plane_state->blend_ctm); + + drm_property_blob_put(nv_drm_plane_state->degamma_lut); + if (nv_drm_plane_state->degamma_drm_lut_surface != NULL) { + kref_put(&nv_drm_plane_state->degamma_drm_lut_surface->base.refcount, + free_drm_lut_surface); + } + + drm_property_blob_put(nv_drm_plane_state->tmo_lut); + if (nv_drm_plane_state->tmo_drm_lut_surface != NULL) { + kref_put(&nv_drm_plane_state->tmo_drm_lut_surface->base.refcount, + free_drm_lut_surface); + } +} + +static void nv_drm_plane_atomic_destroy_state( + struct drm_plane *plane, + struct drm_plane_state *state) +{ + __nv_drm_plane_atomic_destroy_state(state); + + nv_drm_free(to_nv_drm_plane_state(state)); +} + +static const struct drm_plane_funcs nv_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = nv_drm_plane_destroy, + .reset = nv_drm_plane_atomic_reset, + .atomic_get_property = nv_drm_plane_atomic_get_property, + .atomic_set_property = nv_drm_plane_atomic_set_property, + .atomic_duplicate_state = nv_drm_plane_atomic_duplicate_state, + .atomic_destroy_state = nv_drm_plane_atomic_destroy_state, + .format_mod_supported = nv_drm_plane_format_mod_supported, +}; + +static const struct drm_plane_helper_funcs nv_plane_helper_funcs = { + .atomic_check = nv_drm_plane_atomic_check, +}; + +static void nv_drm_crtc_destroy(struct drm_crtc *crtc) +{ + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + + drm_crtc_cleanup(crtc); + + nv_drm_free(nv_crtc); +} + +static inline bool nv_drm_crtc_duplicate_req_head_modeset_config( + const struct NvKmsKapiHeadRequestedConfig *old, + struct NvKmsKapiHeadRequestedConfig *new) +{ + uint32_t i; + + /* + * Do not duplicate fields like 'modeChanged' flags expressing delta changed + * in new configuration with respect to previous/old configuration because + * there is no change in new configuration yet with respect + * to older one! + */ + memset(new, 0, sizeof(*new)); + new->modeSetConfig = old->modeSetConfig; + + for (i = 0; i < ARRAY_SIZE(old->layerRequestedConfig); i++) { + new->layerRequestedConfig[i].config = + old->layerRequestedConfig[i].config; + } + + if (old->modeSetConfig.lut.input.pRamps) { + new->modeSetConfig.lut.input.pRamps = + nv_drm_calloc(1, sizeof(*new->modeSetConfig.lut.input.pRamps)); + + if (!new->modeSetConfig.lut.input.pRamps) { + return false; + } + *new->modeSetConfig.lut.input.pRamps = + *old->modeSetConfig.lut.input.pRamps; + } + if (old->modeSetConfig.lut.output.pRamps) { + new->modeSetConfig.lut.output.pRamps = + nv_drm_calloc(1, sizeof(*new->modeSetConfig.lut.output.pRamps)); + + if (!new->modeSetConfig.lut.output.pRamps) { + /* + * new->modeSetConfig.lut.input.pRamps is either NULL or it was + * just allocated + */ + nv_drm_free(new->modeSetConfig.lut.input.pRamps); + new->modeSetConfig.lut.input.pRamps = NULL; + return false; + } + *new->modeSetConfig.lut.output.pRamps = + *old->modeSetConfig.lut.output.pRamps; + } + return true; +} + +static inline struct nv_drm_crtc_state *nv_drm_crtc_state_alloc(void) +{ + struct nv_drm_crtc_state *nv_state = nv_drm_calloc(1, sizeof(*nv_state)); + int i; + + if (nv_state == NULL) { + return NULL; + } + + nv_state->req_config.modeSetConfig.olutFpNormScale = NVKMS_OLUT_FP_NORM_SCALE_DEFAULT; + for (i = 0; i < ARRAY_SIZE(nv_state->req_config.layerRequestedConfig); i++) { + plane_config_clear(&nv_state->req_config.layerRequestedConfig[i].config); + } + return nv_state; +} + + +/** + * nv_drm_atomic_crtc_reset - crtc state reset hook + * @crtc: DRM crtc + * + * Allocate an empty DRM crtc state. + */ +static void nv_drm_atomic_crtc_reset(struct drm_crtc *crtc) +{ + struct nv_drm_crtc_state *nv_state = nv_drm_crtc_state_alloc(); + + if (!nv_state) { + return; + } + + drm_atomic_helper_crtc_reset(crtc); + + /* + * The drm atomic helper function allocates a state object that is the wrong + * size. Copy its contents into the one we allocated above and replace the + * pointer. + */ + if (crtc->state) { + nv_state->base = *crtc->state; + kfree(crtc->state); + crtc->state = &nv_state->base; + } else { + kfree(nv_state); + } +} + +/** + * nv_drm_atomic_crtc_duplicate_state - crtc state duplicate hook + * @crtc: DRM crtc + * + * Allocate and accosiate flip state with DRM crtc state, this flip state will + * be getting consumed at the time of atomic update commit to hardware by + * nv_drm_atomic_helper_commit_tail(). + */ +static struct drm_crtc_state* +nv_drm_atomic_crtc_duplicate_state(struct drm_crtc *crtc) +{ + struct nv_drm_crtc_state *nv_old_state = to_nv_crtc_state(crtc->state); + struct nv_drm_crtc_state *nv_state = nv_drm_crtc_state_alloc(); + + if (nv_state == NULL) { + return NULL; + } + + if ((nv_state->nv_flip = + nv_drm_calloc(1, sizeof(*(nv_state->nv_flip)))) == NULL) { + nv_drm_free(nv_state); + return NULL; + } + + INIT_LIST_HEAD(&nv_state->nv_flip->list_entry); + INIT_LIST_HEAD(&nv_state->nv_flip->deferred_flip_list); + + /* + * nv_drm_crtc_duplicate_req_head_modeset_config potentially allocates + * nv_state->req_config.modeSetConfig.lut.{in,out}put.pRamps, so they should + * be freed in any following failure paths. + */ + if (!nv_drm_crtc_duplicate_req_head_modeset_config( + &nv_old_state->req_config, + &nv_state->req_config)) { + + nv_drm_free(nv_state->nv_flip); + nv_drm_free(nv_state); + return NULL; + } + + __drm_atomic_helper_crtc_duplicate_state(crtc, &nv_state->base); + + nv_state->regamma_tf = nv_old_state->regamma_tf; + nv_state->regamma_lut = nv_old_state->regamma_lut; + if (nv_state->regamma_lut) { + drm_property_blob_get(nv_state->regamma_lut); + } + nv_state->regamma_divisor = nv_old_state->regamma_divisor; + if (nv_state->regamma_drm_lut_surface) { + kref_get(&nv_state->regamma_drm_lut_surface->base.refcount); + } + nv_state->regamma_changed = false; + + return &nv_state->base; +} + +/** + * nv_drm_atomic_crtc_destroy_state - crtc state destroy hook + * @crtc: DRM crtc + * @state: DRM crtc state object to destroy + * + * Destroy flip state associated with the given crtc state if it haven't get + * consumed because failure of atomic commit. + */ +static void nv_drm_atomic_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct nv_drm_crtc_state *nv_state = to_nv_crtc_state(state); + + if (nv_state->nv_flip != NULL) { + nv_drm_free(nv_state->nv_flip); + nv_state->nv_flip = NULL; + } + + __drm_atomic_helper_crtc_destroy_state(&nv_state->base); + + drm_property_blob_put(nv_state->regamma_lut); + if (nv_state->regamma_drm_lut_surface != NULL) { + kref_put(&nv_state->regamma_drm_lut_surface->base.refcount, + free_drm_lut_surface); + } + + nv_drm_free(nv_state->req_config.modeSetConfig.lut.input.pRamps); + nv_drm_free(nv_state->req_config.modeSetConfig.lut.output.pRamps); + + nv_drm_free(nv_state); +} + +static struct drm_crtc_funcs nv_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .reset = nv_drm_atomic_crtc_reset, + .destroy = nv_drm_crtc_destroy, + .atomic_get_property = nv_drm_atomic_crtc_get_property, + .atomic_set_property = nv_drm_atomic_crtc_set_property, + .atomic_duplicate_state = nv_drm_atomic_crtc_duplicate_state, + .atomic_destroy_state = nv_drm_atomic_crtc_destroy_state, +#if defined(NV_DRM_ATOMIC_HELPER_LEGACY_GAMMA_SET_PRESENT) + .gamma_set = drm_atomic_helper_legacy_gamma_set, +#endif +}; + +static int head_modeset_config_attach_connector( + struct nv_drm_connector *nv_connector, + struct NvKmsKapiHeadModeSetConfig *head_modeset_config) +{ + struct nv_drm_encoder *nv_encoder = nv_connector->nv_detected_encoder; + + if (NV_DRM_WARN(nv_encoder == NULL || + head_modeset_config->numDisplays >= + ARRAY_SIZE(head_modeset_config->displays))) { + return -EINVAL; + } + head_modeset_config->displays[head_modeset_config->numDisplays++] = + nv_encoder->hDisplay; + return 0; +} + +static int color_mgmt_config_copy_lut(struct NvKmsLutRamps *nvkms_lut, + struct drm_color_lut *drm_lut, + uint64_t lut_len) +{ + uint64_t i = 0; + if (lut_len != NVKMS_LUT_ARRAY_SIZE) { + return -EINVAL; + } + + /* + * Both NvKms and drm LUT values are 16-bit linear values. NvKms LUT ramps + * are in arrays in a single struct while drm LUT ramps are an array of + * structs. + */ + for (i = 0; i < lut_len; i++) { + nvkms_lut->red[i] = drm_lut[i].red; + nvkms_lut->green[i] = drm_lut[i].green; + nvkms_lut->blue[i] = drm_lut[i].blue; + } + return 0; +} + +static int color_mgmt_config_set_luts(struct nv_drm_crtc_state *nv_crtc_state, + struct NvKmsKapiHeadRequestedConfig *req_config) +{ + struct NvKmsKapiHeadModeSetConfig *modeset_config = + &req_config->modeSetConfig; + struct drm_crtc_state *crtc_state = &nv_crtc_state->base; + int ret = 0; + + /* + * According to the comment in the Linux kernel's + * drivers/gpu/drm/drm_color_mgmt.c, if either property is NULL, that LUT + * needs to be changed to a linear LUT + * + * On failure, any LUT ramps allocated in this function are freed when the + * subsequent atomic state cleanup calls nv_drm_atomic_crtc_destroy_state. + */ + + if (crtc_state->degamma_lut) { + struct drm_color_lut *degamma_lut = NULL; + uint64_t degamma_len = 0; + + if (!modeset_config->lut.input.pRamps) { + modeset_config->lut.input.pRamps = + nv_drm_calloc(1, sizeof(*modeset_config->lut.input.pRamps)); + if (!modeset_config->lut.input.pRamps) { + return -ENOMEM; + } + } + + degamma_lut = (struct drm_color_lut *)crtc_state->degamma_lut->data; + degamma_len = crtc_state->degamma_lut->length / + sizeof(struct drm_color_lut); + + if ((ret = color_mgmt_config_copy_lut(modeset_config->lut.input.pRamps, + degamma_lut, + degamma_len)) != 0) { + return ret; + } + + modeset_config->lut.input.depth = 30; /* specify the full LUT */ + modeset_config->lut.input.start = 0; + modeset_config->lut.input.end = degamma_len - 1; + } else { + /* setting input.end to 0 is equivalent to disabling the LUT, which + * should be equivalent to a linear LUT */ + modeset_config->lut.input.depth = 30; /* specify the full LUT */ + modeset_config->lut.input.start = 0; + modeset_config->lut.input.end = 0; + + nv_drm_free(modeset_config->lut.input.pRamps); + modeset_config->lut.input.pRamps = NULL; + } + req_config->flags.legacyIlutChanged = NV_TRUE; + + if (crtc_state->gamma_lut) { + struct drm_color_lut *gamma_lut = NULL; + uint64_t gamma_len = 0; + + if (!modeset_config->lut.output.pRamps) { + modeset_config->lut.output.pRamps = + nv_drm_calloc(1, sizeof(*modeset_config->lut.output.pRamps)); + if (!modeset_config->lut.output.pRamps) { + return -ENOMEM; + } + } + + gamma_lut = (struct drm_color_lut *)crtc_state->gamma_lut->data; + gamma_len = crtc_state->gamma_lut->length / + sizeof(struct drm_color_lut); + + if ((ret = color_mgmt_config_copy_lut(modeset_config->lut.output.pRamps, + gamma_lut, + gamma_len)) != 0) { + return ret; + } + + modeset_config->lut.output.enabled = NV_TRUE; + } else { + /* disabling the output LUT should be equivalent to setting a linear + * LUT */ + modeset_config->lut.output.enabled = NV_FALSE; + + nv_drm_free(modeset_config->lut.output.pRamps); + modeset_config->lut.output.pRamps = NULL; + } + req_config->flags.legacyOlutChanged = NV_TRUE; + + return 0; +} + +/** + * nv_drm_crtc_atomic_check() can fail after it has modified + * the 'nv_drm_crtc_state::req_config', that is fine because 'nv_drm_crtc_state' + * will be discarded if ->atomic_check() fails. + */ +#if defined(NV_DRM_CRTC_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG) +static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_atomic_state *state) +#else +static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *crtc_state) +#endif +{ +#if defined(NV_DRM_CRTC_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG) + struct drm_crtc_state *crtc_state = + drm_atomic_get_new_crtc_state(state, crtc); +#endif + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + struct nv_drm_device *nv_dev = to_nv_device(crtc->dev); + struct nv_drm_crtc_state *nv_crtc_state = to_nv_crtc_state(crtc_state); + struct NvKmsKapiHeadRequestedConfig *req_config = + &nv_crtc_state->req_config; + int ret = 0; + + if (crtc_state->mode_changed) { + drm_mode_to_nvkms_display_mode(&crtc_state->mode, + &req_config->modeSetConfig.mode); + req_config->flags.modeChanged = NV_TRUE; + } + + if (crtc_state->connectors_changed) { + struct NvKmsKapiHeadModeSetConfig *config = &req_config->modeSetConfig; + struct drm_connector *connector; + struct drm_connector_state *connector_state; + int j; + + config->numDisplays = 0; + + memset(config->displays, 0, sizeof(config->displays)); + + req_config->flags.displaysChanged = NV_TRUE; + + nv_drm_for_each_connector_in_state(crtc_state->state, + connector, connector_state, j) { + if (connector_state->crtc != crtc) { + continue; + } + + if ((ret = head_modeset_config_attach_connector( + to_nv_connector(connector), + config)) != 0) { + return ret; + } + } + } + + if (crtc_state->active_changed) { + req_config->modeSetConfig.bActive = crtc_state->active; + req_config->flags.activeChanged = NV_TRUE; + } + +#if defined(NV_DRM_CRTC_STATE_HAS_VRR_ENABLED) + req_config->modeSetConfig.vrrEnabled = crtc_state->vrr_enabled; +#endif + + if (crtc_state->color_mgmt_changed) { + if ((ret = color_mgmt_config_set_luts(nv_crtc_state, req_config)) != 0) { + return ret; + } + } + + if (nv_crtc_state->regamma_changed) { + if (nv_crtc_state->regamma_drm_lut_surface != NULL) { + kref_put(&nv_crtc_state->regamma_drm_lut_surface->base.refcount, + free_drm_lut_surface); + nv_crtc_state->regamma_drm_lut_surface = NULL; + } + + if (nv_crtc->olut_caps.vssSupport == NVKMS_LUT_VSS_SUPPORTED) { + if ((nv_crtc_state->regamma_tf != NV_DRM_TRANSFER_FUNCTION_DEFAULT) || + (nv_crtc_state->regamma_lut != NULL)) { + + nv_crtc_state->regamma_drm_lut_surface = + create_drm_olut_surface_vss(nv_dev, nv_crtc, + nv_crtc_state); + if (nv_crtc_state->regamma_drm_lut_surface == NULL) { + return -1; + } + } + } else { + WARN_ON(nv_crtc->olut_caps.vssSupport != NVKMS_LUT_VSS_NOT_SUPPORTED); + if (nv_crtc_state->regamma_lut != NULL) { + nv_crtc_state->regamma_drm_lut_surface = + create_drm_olut_surface_legacy(nv_dev, nv_crtc, + nv_crtc_state); + if (nv_crtc_state->regamma_drm_lut_surface == NULL) { + return -1; + } + } + } + + if (nv_crtc_state->regamma_drm_lut_surface != NULL) { + req_config->modeSetConfig.olut.enabled = NV_TRUE; + req_config->modeSetConfig.olut.lutSurface = + nv_crtc_state->regamma_drm_lut_surface->base.nvkms_surface; + req_config->modeSetConfig.olut.offset = 0; + req_config->modeSetConfig.olut.vssSegments = + nv_crtc_state->regamma_drm_lut_surface->properties.vssSegments; + req_config->modeSetConfig.olut.lutEntries = + nv_crtc_state->regamma_drm_lut_surface->properties.lutEntries; + } else { + req_config->modeSetConfig.olut.enabled = NV_FALSE; + req_config->modeSetConfig.olut.lutSurface = NULL; + req_config->modeSetConfig.olut.offset = 0; + req_config->modeSetConfig.olut.vssSegments = 0; + req_config->modeSetConfig.olut.lutEntries = 0; + } + req_config->flags.olutChanged = NV_TRUE; + + /* + * Range property is configured to ensure sign bit = 0 and + * value is >= 1, but it may still default to 0 if it's unsupported. + */ + WARN_ON(nv_crtc_state->regamma_divisor & (((NvU64) 1) << 63)); + + req_config->flags.olutFpNormScaleChanged = NV_TRUE; + if (nv_crtc_state->regamma_divisor < (((NvU64) 1) << 32)) { + req_config->modeSetConfig.olutFpNormScale = + NVKMS_OLUT_FP_NORM_SCALE_DEFAULT; + } else { + /* + * Since the sign bit of the regamma_divisor is unset, we treat it as + * unsigned and do 32.32 unsigned fixed-point division to get the + * fpNormScale. + */ + req_config->modeSetConfig.olutFpNormScale = + (NvU32)(((NvU64)NVKMS_OLUT_FP_NORM_SCALE_DEFAULT << 32) / + nv_crtc_state->regamma_divisor); + } + } + + return ret; +} + +static bool +nv_drm_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static const struct drm_crtc_helper_funcs nv_crtc_helper_funcs = { + .atomic_check = nv_drm_crtc_atomic_check, + .mode_fixup = nv_drm_crtc_mode_fixup, +}; + +static void nv_drm_crtc_install_properties( + struct drm_crtc *crtc) +{ + struct nv_drm_device *nv_dev = to_nv_device(crtc->dev); + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + struct nv_drm_crtc_state *nv_crtc_state = to_nv_crtc_state(crtc->state); + + if (nv_crtc->olut_caps.supported) { + if (nv_crtc->olut_caps.vssSupport == NVKMS_LUT_VSS_SUPPORTED) { + if (nv_dev->nv_crtc_regamma_tf_property) { + drm_object_attach_property( + &crtc->base, nv_dev->nv_crtc_regamma_tf_property, + NV_DRM_TRANSFER_FUNCTION_DEFAULT); + } + if (nv_dev->nv_crtc_regamma_divisor_property) { + /* Default to 1 */ + nv_crtc_state->regamma_divisor = (((NvU64) 1) << 32); + drm_object_attach_property( + &crtc->base, nv_dev->nv_crtc_regamma_divisor_property, + nv_crtc_state->regamma_divisor); + } + } + if (nv_dev->nv_crtc_regamma_lut_property) { + drm_object_attach_property( + &crtc->base, nv_dev->nv_crtc_regamma_lut_property, 0); + } + if (nv_dev->nv_crtc_regamma_lut_size_property) { + drm_object_attach_property( + &crtc->base, nv_dev->nv_crtc_regamma_lut_size_property, + NVKMS_LUT_ARRAY_SIZE); + } + } +} + +static void nv_drm_plane_install_properties( + struct drm_plane *plane, + NvBool supportsICtCp) +{ + struct nv_drm_device *nv_dev = to_nv_device(plane->dev); + struct nv_drm_plane *nv_plane = to_nv_plane(plane); + struct nv_drm_plane_state *nv_plane_state = + to_nv_drm_plane_state(plane->state); + + if (nv_dev->nv_out_fence_property) { + drm_object_attach_property( + &plane->base, nv_dev->nv_out_fence_property, 0); + } + + if (nv_dev->nv_input_colorspace_property) { + drm_object_attach_property( + &plane->base, nv_dev->nv_input_colorspace_property, + NV_DRM_INPUT_COLOR_SPACE_NONE); + } + + if (supportsICtCp) { +#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA) + if (nv_dev->nv_hdr_output_metadata_property) { + drm_object_attach_property( + &plane->base, nv_dev->nv_hdr_output_metadata_property, 0); + } +#endif + } + + /* + * Per-plane HDR properties get us dangerously close to the 24 property + * limit on kernels that don't support NV_DRM_USE_EXTENDED_PROPERTIES. + */ + if (NV_DRM_USE_EXTENDED_PROPERTIES) { + if (supportsICtCp) { + if (nv_dev->nv_plane_lms_ctm_property) { + drm_object_attach_property( + &plane->base, nv_dev->nv_plane_lms_ctm_property, 0); + } + + if (nv_dev->nv_plane_lms_to_itp_ctm_property) { + drm_object_attach_property( + &plane->base, nv_dev->nv_plane_lms_to_itp_ctm_property, 0); + } + + if (nv_dev->nv_plane_itp_to_lms_ctm_property) { + drm_object_attach_property( + &plane->base, nv_dev->nv_plane_itp_to_lms_ctm_property, 0); + } + + WARN_ON(!nv_plane->tmo_caps.supported); + if (nv_dev->nv_plane_tmo_lut_property) { + drm_object_attach_property( + &plane->base, nv_dev->nv_plane_tmo_lut_property, 0); + } + if (nv_dev->nv_plane_tmo_lut_size_property) { + drm_object_attach_property( + &plane->base, nv_dev->nv_plane_tmo_lut_size_property, + NVKMS_LUT_ARRAY_SIZE); + } + } + + if (nv_dev->nv_plane_blend_ctm_property) { + drm_object_attach_property( + &plane->base, nv_dev->nv_plane_blend_ctm_property, 0); + } + + if (nv_plane->ilut_caps.supported) { + if (nv_plane->ilut_caps.vssSupport == NVKMS_LUT_VSS_SUPPORTED) { + if (nv_dev->nv_plane_degamma_tf_property) { + drm_object_attach_property( + &plane->base, nv_dev->nv_plane_degamma_tf_property, + NV_DRM_TRANSFER_FUNCTION_DEFAULT); + } + if (nv_dev->nv_plane_degamma_multiplier_property) { + /* Default to 1 in S31.32 Sign-Magnitude Format */ + nv_plane_state->degamma_multiplier = ((uint64_t) 1) << 32; + drm_object_attach_property( + &plane->base, nv_dev->nv_plane_degamma_multiplier_property, + nv_plane_state->degamma_multiplier); + } + } + if (nv_dev->nv_plane_degamma_lut_property) { + drm_object_attach_property( + &plane->base, nv_dev->nv_plane_degamma_lut_property, 0); + } + if (nv_dev->nv_plane_degamma_lut_size_property) { + drm_object_attach_property( + &plane->base, nv_dev->nv_plane_degamma_lut_size_property, + NVKMS_LUT_ARRAY_SIZE); + } + } + } +} + +static void +__nv_drm_plane_create_alpha_blending_properties(struct drm_plane *plane, + NvU32 validCompModes) +{ +#if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE) + if ((validCompModes & + NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA)) != 0x0 && + (validCompModes & + NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA)) != 0x0) { + + drm_plane_create_alpha_property(plane); + drm_plane_create_blend_mode_property(plane, + NVBIT(DRM_MODE_BLEND_PREMULTI) | + NVBIT(DRM_MODE_BLEND_COVERAGE)); + } else if ((validCompModes & + NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA)) != 0x0 && + (validCompModes & + NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA)) != 0x0) { + + drm_plane_create_blend_mode_property(plane, + NVBIT(DRM_MODE_BLEND_PREMULTI) | + NVBIT(DRM_MODE_BLEND_COVERAGE)); + } +#endif +} + +static void +__nv_drm_plane_create_rotation_property(struct drm_plane *plane, + NvU16 validLayerRRTransforms) +{ + enum NvKmsRotation curRotation; + NvU32 supported_rotations = 0; + struct NvKmsRRParams rrParams = { + .rotation = NVKMS_ROTATION_0, + .reflectionX = true, + .reflectionY = true, + }; + + if ((NVBIT(NvKmsRRParamsToCapBit(&rrParams)) & + validLayerRRTransforms) != 0) { + supported_rotations |= DRM_MODE_REFLECT_X; + supported_rotations |= DRM_MODE_REFLECT_Y; + } + + rrParams.reflectionX = false; + rrParams.reflectionY = false; + + for (curRotation = NVKMS_ROTATION_MIN; + curRotation <= NVKMS_ROTATION_MAX; curRotation++) { + rrParams.rotation = curRotation; + if ((NVBIT(NvKmsRRParamsToCapBit(&rrParams)) & + validLayerRRTransforms) == 0) { + continue; + } + + switch (curRotation) { + case NVKMS_ROTATION_0: + supported_rotations |= DRM_MODE_ROTATE_0; + break; + case NVKMS_ROTATION_90: + supported_rotations |= DRM_MODE_ROTATE_90; + break; + case NVKMS_ROTATION_180: + supported_rotations |= DRM_MODE_ROTATE_180; + break; + case NVKMS_ROTATION_270: + supported_rotations |= DRM_MODE_ROTATE_270; + break; + default: + break; + } + + } + + if (supported_rotations != 0) { + drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, + supported_rotations); + } +} + +static struct drm_plane* +nv_drm_plane_create(struct drm_device *dev, + enum drm_plane_type plane_type, + uint32_t layer_idx, + NvU32 head, + const struct NvKmsKapiDeviceResourcesInfo *pResInfo) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + const NvU64 linear_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID, + }; + enum NvKmsCompositionBlendingMode defaultCompositionMode; + struct nv_drm_plane *nv_plane = NULL; + struct nv_drm_plane_state *nv_plane_state = NULL; + struct drm_plane *plane = NULL; + int ret = -ENOMEM; + uint32_t *formats = NULL; + unsigned int formats_count = 0; + const NvU32 validCompositionModes = + (plane_type == DRM_PLANE_TYPE_CURSOR) ? + pResInfo->caps.validCursorCompositionModes : + pResInfo->caps.layer[layer_idx].validCompositionModes; + const long unsigned int nvkms_formats_mask = + (plane_type == DRM_PLANE_TYPE_CURSOR) ? + pResInfo->caps.supportedCursorSurfaceMemoryFormats : + pResInfo->supportedSurfaceMemoryFormats[layer_idx]; + const NvU16 validLayerRRTransforms = + (plane_type == DRM_PLANE_TYPE_CURSOR) ? + 0x0 : pResInfo->caps.layer[layer_idx].validRRTransforms; + + if ((validCompositionModes & + NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE)) != 0x0) { + defaultCompositionMode = NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE; + } else if ((validCompositionModes & + NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA)) != 0x0) { + defaultCompositionMode = NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA; + } else { + goto failed; + } + + formats = + nv_drm_format_array_alloc(&formats_count, + nvkms_formats_mask); + if (formats == NULL) { + goto failed; + } + + if ((nv_plane = nv_drm_calloc(1, sizeof(*nv_plane))) == NULL) { + goto failed_plane_alloc; + } + plane = &nv_plane->base; + + nv_plane->defaultCompositionMode = defaultCompositionMode; + nv_plane->layer_idx = layer_idx; + + if ((nv_plane_state = + nv_drm_calloc(1, sizeof(*nv_plane_state))) == NULL) { + goto failed_state_alloc; + } + + plane->state = &nv_plane_state->base; + plane->state->plane = plane; + + /* + * Possible_crtcs for primary and cursor plane is zero because + * drm_crtc_init_with_planes() will assign the plane's possible_crtcs + * after the crtc is successfully initialized. + */ + ret = drm_universal_plane_init( + dev, + plane, + (plane_type == DRM_PLANE_TYPE_OVERLAY) ? + (1 << head) : 0, + &nv_plane_funcs, + formats, formats_count, + (plane_type == DRM_PLANE_TYPE_CURSOR) ? + linear_modifiers : nv_dev->modifiers, + plane_type, + NULL); + if (ret != 0) { + goto failed_plane_init; + } + +#if defined(NV_DRM_PLANE_CREATE_COLOR_PROPERTIES_PRESENT) + if (pResInfo->caps.supportsInputColorSpace && + pResInfo->caps.supportsInputColorRange) { + + nv_plane->supportsColorProperties = true; + + drm_plane_create_color_properties( + plane, + NVBIT(DRM_COLOR_YCBCR_BT601) | + NVBIT(DRM_COLOR_YCBCR_BT709) | + NVBIT(DRM_COLOR_YCBCR_BT2020), + NVBIT(DRM_COLOR_YCBCR_FULL_RANGE) | + NVBIT(DRM_COLOR_YCBCR_LIMITED_RANGE), + DRM_COLOR_YCBCR_BT709, + DRM_COLOR_YCBCR_FULL_RANGE + ); + } else { + nv_plane->supportsColorProperties = false; + } +#else + nv_plane->supportsColorProperties = false; +#endif + + drm_plane_helper_add(plane, &nv_plane_helper_funcs); + + if (plane_type != DRM_PLANE_TYPE_CURSOR) { + nv_plane->ilut_caps = pResInfo->lutCaps.layer[layer_idx].ilut; + nv_plane->tmo_caps = pResInfo->lutCaps.layer[layer_idx].tmo; + + nv_drm_plane_install_properties( + plane, + pResInfo->supportsICtCp[layer_idx]); + } + + __nv_drm_plane_create_alpha_blending_properties( + plane, + validCompositionModes); + + __nv_drm_plane_create_rotation_property( + plane, + validLayerRRTransforms); + + nv_drm_free(formats); + + return plane; + +failed_plane_init: + nv_drm_free(nv_plane_state); + +failed_state_alloc: + nv_drm_free(nv_plane); + +failed_plane_alloc: + nv_drm_free(formats); + +failed: + return ERR_PTR(ret); +} + +/* + * Add drm crtc for given head and supported enum NvKmsSurfaceMemoryFormats. + */ +static struct drm_crtc *__nv_drm_crtc_create(struct nv_drm_device *nv_dev, + struct drm_plane *primary_plane, + struct drm_plane *cursor_plane, + unsigned int head, + const struct NvKmsKapiDeviceResourcesInfo *pResInfo) +{ + struct nv_drm_crtc *nv_crtc = NULL; + struct nv_drm_crtc_state *nv_state = NULL; + int ret = -ENOMEM; + + if ((nv_crtc = nv_drm_calloc(1, sizeof(*nv_crtc))) == NULL) { + goto failed; + } + + nv_state = nv_drm_crtc_state_alloc(); + if (nv_state == NULL) { + goto failed_state_alloc; + } + + nv_crtc->base.state = &nv_state->base; + nv_crtc->base.state->crtc = &nv_crtc->base; + + nv_crtc->head = head; + INIT_LIST_HEAD(&nv_crtc->flip_list); + spin_lock_init(&nv_crtc->flip_list_lock); + nv_crtc->modeset_permission_filep = NULL; + + ret = drm_crtc_init_with_planes(nv_dev->dev, + &nv_crtc->base, + primary_plane, cursor_plane, + &nv_crtc_funcs, + NULL); + if (ret != 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to init crtc for head %u with planes", head); + goto failed_init_crtc; + } + + /* Add crtc to drm sub-system */ + + drm_crtc_helper_add(&nv_crtc->base, &nv_crtc_helper_funcs); + + nv_crtc->olut_caps = pResInfo->lutCaps.olut; + + nv_drm_crtc_install_properties(&nv_crtc->base); + + drm_crtc_enable_color_mgmt(&nv_crtc->base, NVKMS_LUT_ARRAY_SIZE, true, + NVKMS_LUT_ARRAY_SIZE); + ret = drm_mode_crtc_set_gamma_size(&nv_crtc->base, NVKMS_LUT_ARRAY_SIZE); + if (ret != 0) { + NV_DRM_DEV_LOG_WARN( + nv_dev, + "Failed to initialize legacy gamma support for head %u", head); + } + + return &nv_crtc->base; + +failed_init_crtc: + nv_drm_free(nv_state); + +failed_state_alloc: + nv_drm_free(nv_crtc); + +failed: + return ERR_PTR(ret); +} + +void nv_drm_enumerate_crtcs_and_planes( + struct nv_drm_device *nv_dev, + const struct NvKmsKapiDeviceResourcesInfo *pResInfo) +{ + unsigned int i; + + for (i = 0; i < pResInfo->numHeads; i++) { + struct drm_plane *primary_plane = NULL, *cursor_plane = NULL; + NvU32 layer; + + if (pResInfo->numLayers[i] <= NVKMS_KAPI_LAYER_PRIMARY_IDX) { + continue; + } + + primary_plane = + nv_drm_plane_create(nv_dev->dev, + DRM_PLANE_TYPE_PRIMARY, + NVKMS_KAPI_LAYER_PRIMARY_IDX, + i, + pResInfo); + + if (IS_ERR(primary_plane)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to create primary plane for head %u, error = %ld", + i, PTR_ERR(primary_plane)); + continue; + } + + cursor_plane = + nv_drm_plane_create(nv_dev->dev, + DRM_PLANE_TYPE_CURSOR, + NVKMS_KAPI_LAYER_INVALID_IDX, + i, + pResInfo); + if (IS_ERR(cursor_plane)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to create cursor plane for head %u, error = %ld", + i, PTR_ERR(cursor_plane)); + cursor_plane = NULL; + } + + /* Create crtc with the primary and cursor planes */ + { + struct drm_crtc *crtc = + __nv_drm_crtc_create(nv_dev, + primary_plane, cursor_plane, + i, pResInfo); + if (IS_ERR(crtc)) { + nv_drm_plane_destroy(primary_plane); + + if (cursor_plane != NULL) { + nv_drm_plane_destroy(cursor_plane); + } + + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to add DRM CRTC for head %u, error = %ld", + i, PTR_ERR(crtc)); + continue; + } + } + + for (layer = 0; layer < pResInfo->numLayers[i]; layer++) { + struct drm_plane *overlay_plane = NULL; + + if (layer == NVKMS_KAPI_LAYER_PRIMARY_IDX) { + continue; + } + + overlay_plane = + nv_drm_plane_create(nv_dev->dev, + DRM_PLANE_TYPE_OVERLAY, + layer, + i, + pResInfo); + + if (IS_ERR(overlay_plane)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to create plane for layer-%u of head %u, error = %ld", + layer, i, PTR_ERR(overlay_plane)); + } + } + + } +} +/* + * Helper function to convert NvKmsKapiCrcs to drm_nvidia_crtc_crc32_out. + */ +static void NvKmsKapiCrcsToDrm(const struct NvKmsKapiCrcs *crcs, + struct drm_nvidia_crtc_crc32_v2_out *drmCrcs) +{ + drmCrcs->outputCrc32.value = crcs->outputCrc32.value; + drmCrcs->outputCrc32.supported = crcs->outputCrc32.supported; + drmCrcs->outputCrc32.__pad0 = 0; + drmCrcs->outputCrc32.__pad1 = 0; + drmCrcs->rasterGeneratorCrc32.value = crcs->rasterGeneratorCrc32.value; + drmCrcs->rasterGeneratorCrc32.supported = crcs->rasterGeneratorCrc32.supported; + drmCrcs->rasterGeneratorCrc32.__pad0 = 0; + drmCrcs->rasterGeneratorCrc32.__pad1 = 0; + drmCrcs->compositorCrc32.value = crcs->compositorCrc32.value; + drmCrcs->compositorCrc32.supported = crcs->compositorCrc32.supported; + drmCrcs->compositorCrc32.__pad0 = 0; + drmCrcs->compositorCrc32.__pad1 = 0; +} + +int nv_drm_get_crtc_crc32_v2_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct drm_nvidia_get_crtc_crc32_v2_params *params = data; + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_crtc *crtc = NULL; + struct nv_drm_crtc *nv_crtc = NULL; + struct NvKmsKapiCrcs crc32; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + return -EOPNOTSUPP; + } + + crtc = drm_crtc_find(dev, filep, params->crtc_id); + if (!crtc) { + return -ENOENT; + } + + nv_crtc = to_nv_crtc(crtc); + + if (!nvKms->getCRC32(nv_dev->pDevice, nv_crtc->head, &crc32)) { + return -ENODEV; + } + NvKmsKapiCrcsToDrm(&crc32, ¶ms->crc32); + + return 0; +} + +int nv_drm_get_crtc_crc32_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct drm_nvidia_get_crtc_crc32_params *params = data; + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_crtc *crtc = NULL; + struct nv_drm_crtc *nv_crtc = NULL; + struct NvKmsKapiCrcs crc32; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + return -EOPNOTSUPP; + } + + crtc = drm_crtc_find(dev, filep, params->crtc_id); + if (!crtc) { + return -ENOENT; + } + + nv_crtc = to_nv_crtc(crtc); + + if (!nvKms->getCRC32(nv_dev->pDevice, nv_crtc->head, &crc32)) { + return -ENODEV; + } + params->crc32 = crc32.outputCrc32.value; + + return 0; +} + +#endif diff --git a/kernel-open/nvidia-drm/nvidia-drm-crtc.h b/kernel-open/nvidia-drm/nvidia-drm-crtc.h new file mode 100644 index 0000000..65b2391 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-crtc.h @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_CRTC_H__ +#define __NVIDIA_DRM_CRTC_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-helper.h" + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#include + +#include "nvtypes.h" +#include "nvkms-kapi.h" + +enum nv_drm_transfer_function { + NV_DRM_TRANSFER_FUNCTION_DEFAULT, + NV_DRM_TRANSFER_FUNCTION_LINEAR, + NV_DRM_TRANSFER_FUNCTION_PQ, + NV_DRM_TRANSFER_FUNCTION_MAX, +}; + +struct nv_drm_crtc { + NvU32 head; + + /** + * @flip_list: + * + * List of flips pending to get processed by __nv_drm_handle_flip_event(). + * Protected by @flip_list_lock. + */ + struct list_head flip_list; + + /** + * @flip_list_lock: + * + * Spinlock to protect @flip_list. + */ + spinlock_t flip_list_lock; + + /** + * @modeset_permission_filep: + * + * The filep using this crtc with DRM_IOCTL_NVIDIA_GRANT_PERMISSIONS. + */ + struct drm_file *modeset_permission_filep; + + struct NvKmsLUTCaps olut_caps; + + struct drm_crtc base; +}; + +/** + * struct nv_drm_flip - flip state + * + * This state is getting used to consume DRM completion event associated + * with each crtc state from atomic commit. + * + * Function nv_drm_atomic_apply_modeset_config() consumes DRM completion + * event, save it into flip state associated with crtc and queue flip state into + * crtc's flip list and commits atomic update to hardware. + */ +struct nv_drm_flip { + /** + * @event: + * + * Optional pointer to a DRM event to signal upon completion of + * the state update. + */ + struct drm_pending_vblank_event *event; + + /** + * @pending_events + * + * Number of HW events pending to signal completion of the state + * update. + */ + uint32_t pending_events; + + /** + * @list_entry: + * + * Entry on the per-CRTC &nv_drm_crtc.flip_list. Protected by + * &nv_drm_crtc.flip_list_lock. + */ + struct list_head list_entry; + + /** + * @deferred_flip_list + * + * List flip objects whose processing is deferred until processing of + * this flip object. Protected by &nv_drm_crtc.flip_list_lock. + * nv_drm_atomic_commit() gets last flip object from + * nv_drm_crtc:flip_list and add deferred flip objects into + * @deferred_flip_list, __nv_drm_handle_flip_event() processes + * @deferred_flip_list. + */ + struct list_head deferred_flip_list; +}; + +struct nv_drm_crtc_state { + /** + * @base: + * + * Base DRM crtc state object for this. + */ + struct drm_crtc_state base; + + /** + * @head_req_config: + * + * Requested head's modeset configuration corresponding to this crtc state. + */ + struct NvKmsKapiHeadRequestedConfig req_config; + + /** + * @nv_flip: + * + * Flip state associated with this crtc state, gets allocated + * by nv_drm_atomic_crtc_duplicate_state(), on successful commit it gets + * consumed and queued into flip list by + * nv_drm_atomic_apply_modeset_config() and finally gets destroyed + * by __nv_drm_handle_flip_event() after getting processed. + * + * In case of failure of atomic commit, this flip state getting destroyed by + * nv_drm_atomic_crtc_destroy_state(). + */ + struct nv_drm_flip *nv_flip; + + enum nv_drm_transfer_function regamma_tf; + struct drm_property_blob *regamma_lut; + uint64_t regamma_divisor; + struct nv_drm_lut_surface *regamma_drm_lut_surface; + NvBool regamma_changed; +}; + +static inline struct nv_drm_crtc_state *to_nv_crtc_state(struct drm_crtc_state *state) +{ + return container_of(state, struct nv_drm_crtc_state, base); +} + +static inline const struct nv_drm_crtc_state *to_nv_crtc_state_const(const struct drm_crtc_state *state) +{ + return container_of(state, struct nv_drm_crtc_state, base); +} + +struct nv_drm_plane { + /** + * @base: + * + * Base DRM plane object for this plane. + */ + struct drm_plane base; + + /** + * @defaultCompositionMode: + * + * Default composition blending mode of this plane. + */ + enum NvKmsCompositionBlendingMode defaultCompositionMode; + + /** + * @layer_idx + * + * Index of this plane in the per head array of layers. + */ + uint32_t layer_idx; + + /** + * @supportsColorProperties + * + * If true, supports the COLOR_ENCODING and COLOR_RANGE properties. + */ + bool supportsColorProperties; + + struct NvKmsLUTCaps ilut_caps; + struct NvKmsLUTCaps tmo_caps; +}; + +static inline struct nv_drm_plane *to_nv_plane(struct drm_plane *plane) +{ + if (plane == NULL) { + return NULL; + } + return container_of(plane, struct nv_drm_plane, base); +} + +struct nv_drm_nvkms_surface { + struct NvKmsKapiDevice *pDevice; + struct NvKmsKapiMemory *nvkms_memory; + struct NvKmsKapiSurface *nvkms_surface; + void *buffer; + struct kref refcount; +}; + +struct nv_drm_nvkms_surface_params { + NvU32 width; + NvU32 height; + size_t surface_size; + enum NvKmsSurfaceMemoryFormat format; +}; + +struct nv_drm_lut_surface { + struct nv_drm_nvkms_surface base; + struct { + NvU32 vssSegments; + enum NvKmsLUTVssType vssType; + + NvU32 lutEntries; + enum NvKmsLUTFormat entryFormat; + + } properties; +}; + +struct nv_drm_plane_state { + struct drm_plane_state base; + s32 __user *fd_user_ptr; + enum nv_drm_input_color_space input_colorspace; +#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA) + struct drm_property_blob *hdr_output_metadata; +#endif + struct drm_property_blob *lms_ctm; + struct drm_property_blob *lms_to_itp_ctm; + struct drm_property_blob *itp_to_lms_ctm; + struct drm_property_blob *blend_ctm; + + enum nv_drm_transfer_function degamma_tf; + struct drm_property_blob *degamma_lut; + uint64_t degamma_multiplier; /* S31.32 Sign-Magnitude Format */ + struct nv_drm_lut_surface *degamma_drm_lut_surface; + NvBool degamma_changed; + + struct drm_property_blob *tmo_lut; + struct nv_drm_lut_surface *tmo_drm_lut_surface; + NvBool tmo_changed; +}; + +static inline struct nv_drm_plane_state *to_nv_drm_plane_state(struct drm_plane_state *state) +{ + return container_of(state, struct nv_drm_plane_state, base); +} + +static inline const struct nv_drm_plane_state *to_nv_drm_plane_state_const(const struct drm_plane_state *state) +{ + return container_of(state, const struct nv_drm_plane_state, base); +} + +static inline struct nv_drm_crtc *to_nv_crtc(struct drm_crtc *crtc) +{ + if (crtc == NULL) { + return NULL; + } + return container_of(crtc, struct nv_drm_crtc, base); +} + +/* + * CRTCs are static objects, list does not change once after initialization and + * before teardown of device. Initialization/teardown paths are single + * threaded, so no locking required. + */ +static inline +struct nv_drm_crtc *nv_drm_crtc_lookup(struct nv_drm_device *nv_dev, NvU32 head) +{ + struct drm_crtc *crtc; + nv_drm_for_each_crtc(crtc, nv_dev->dev) { + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + + if (nv_crtc->head == head) { + return nv_crtc; + } + } + return NULL; +} + +/** + * nv_drm_crtc_enqueue_flip - Enqueue nv_drm_flip object to flip_list of crtc. + */ +static inline void nv_drm_crtc_enqueue_flip(struct nv_drm_crtc *nv_crtc, + struct nv_drm_flip *nv_flip) +{ + spin_lock(&nv_crtc->flip_list_lock); + list_add(&nv_flip->list_entry, &nv_crtc->flip_list); + spin_unlock(&nv_crtc->flip_list_lock); +} + +/** + * nv_drm_crtc_dequeue_flip - Dequeue nv_drm_flip object to flip_list of crtc. + */ +static inline +struct nv_drm_flip *nv_drm_crtc_dequeue_flip(struct nv_drm_crtc *nv_crtc) +{ + struct nv_drm_flip *nv_flip = NULL; + uint32_t pending_events = 0; + + spin_lock(&nv_crtc->flip_list_lock); + nv_flip = list_first_entry_or_null(&nv_crtc->flip_list, + struct nv_drm_flip, list_entry); + if (likely(nv_flip != NULL)) { + /* + * Decrement pending_event count and dequeue flip object if + * pending_event count becomes 0. + */ + pending_events = --nv_flip->pending_events; + if (!pending_events) { + list_del(&nv_flip->list_entry); + } + } + spin_unlock(&nv_crtc->flip_list_lock); + + if (WARN_ON(nv_flip == NULL) || pending_events) { + return NULL; + } + + return nv_flip; +} + +void nv_drm_enumerate_crtcs_and_planes( + struct nv_drm_device *nv_dev, + const struct NvKmsKapiDeviceResourcesInfo *pResInfo); + +int nv_drm_get_crtc_crc32_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +int nv_drm_get_crtc_crc32_v2_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#endif /* __NVIDIA_DRM_CRTC_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-drv.c b/kernel-open/nvidia-drm/nvidia-drm-drv.c new file mode 100644 index 0000000..42baa11 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-drv.c @@ -0,0 +1,2236 @@ +/* + * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE */ + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-drv.h" +#include "nvidia-drm-fb.h" +#include "nvidia-drm-modeset.h" +#include "nvidia-drm-encoder.h" +#include "nvidia-drm-connector.h" +#include "nvidia-drm-gem.h" +#include "nvidia-drm-crtc.h" +#include "nvidia-drm-fence.h" +#include "nvidia-drm-helper.h" +#include "nvidia-drm-gem-nvkms-memory.h" +#include "nvidia-drm-gem-user-memory.h" +#include "nvidia-drm-gem-dma-buf.h" +#include "nvidia-drm-utils.h" +#include "nv_dpy_id.h" + +#if defined(NV_DRM_AVAILABLE) + +#include "nvidia-drm-ioctl.h" + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_ATOMIC_UAPI_H_PRESENT) +#include +#endif + +#include +#include +#include +#include + +#if defined(NV_LINUX_APERTURE_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_APERTURE_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_FBDEV_AVAILABLE) +#include +#endif + +#if defined(NV_DRM_DRM_CLIENT_SETUP_H_PRESENT) +#include +#elif defined(NV_DRM_CLIENTS_DRM_CLIENT_SETUP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_FBDEV_TTM_H_PRESENT) +#include +#elif defined(NV_DRM_DRM_FBDEV_GENERIC_H_PRESENT) +#include +#endif + +#include +#include +#include + +/* + * Commit fcd70cd36b9b ("drm: Split out drm_probe_helper.h") + * moves a number of helper function definitions from + * drm/drm_crtc_helper.h to a new drm_probe_helper.h. + */ +#if defined(NV_DRM_DRM_PROBE_HELPER_H_PRESENT) +#include +#endif +#include +#include +#include + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) +#include +#endif + +static int nv_drm_revoke_modeset_permission(struct drm_device *dev, + struct drm_file *filep, + NvU32 dpyId); +static int nv_drm_revoke_sub_ownership(struct drm_device *dev); + +static struct nv_drm_device *dev_list = NULL; + +static const char* nv_get_input_colorspace_name( + enum nv_drm_input_color_space colorSpace) +{ + switch (colorSpace) { + case NV_DRM_INPUT_COLOR_SPACE_NONE: + return "None"; + case NV_DRM_INPUT_COLOR_SPACE_SCRGB_LINEAR: + return "scRGB Linear FP16"; + case NV_DRM_INPUT_COLOR_SPACE_BT2100_PQ: + return "BT.2100 PQ"; + default: + /* We shoudn't hit this */ + WARN_ON("Unsupported input colorspace"); + return "None"; + } +}; + +static char* nv_get_transfer_function_name( + enum nv_drm_transfer_function tf) +{ + switch (tf) { + case NV_DRM_TRANSFER_FUNCTION_LINEAR: + return "Linear"; + case NV_DRM_TRANSFER_FUNCTION_PQ: + return "PQ (Perceptual Quantizer)"; + default: + /* We shoudn't hit this */ + WARN_ON("Unsupported transfer function"); +#if defined(fallthrough) + fallthrough; +#else + /* Fallthrough */ +#endif + case NV_DRM_TRANSFER_FUNCTION_DEFAULT: + return "Default"; + } +}; + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#if defined(NV_DRM_OUTPUT_POLL_CHANGED_PRESENT) +static void nv_drm_output_poll_changed(struct drm_device *dev) +{ + struct drm_connector *connector = NULL; + struct drm_mode_config *config = &dev->mode_config; + struct drm_connector_list_iter conn_iter; + drm_connector_list_iter_begin(dev, &conn_iter); + /* + * Here drm_mode_config::mutex has been acquired unconditionally. The + * mutex must be held for the duration of a fill_modes() call chain: + * connector->funcs->fill_modes() + * |-> drm_helper_probe_single_connector_modes() + * + * It is easiest to always acquire the mutex for the entire connector + * loop. + */ + mutex_lock(&config->mutex); + + drm_for_each_connector_iter(connector, &conn_iter) { + + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + + if (!nv_drm_connector_check_connection_status_dirty_and_clear( + nv_connector)) { + continue; + } + + connector->funcs->fill_modes( + connector, + dev->mode_config.max_width, dev->mode_config.max_height); + } + + mutex_unlock(&config->mutex); + drm_connector_list_iter_end(&conn_iter); +} +#endif /* NV_DRM_OUTPUT_POLL_CHANGED_PRESENT */ + +static const struct drm_mode_config_funcs nv_mode_config_funcs = { + .fb_create = nv_drm_framebuffer_create, + + .atomic_state_alloc = nv_drm_atomic_state_alloc, + .atomic_state_clear = nv_drm_atomic_state_clear, + .atomic_state_free = nv_drm_atomic_state_free, + .atomic_check = nv_drm_atomic_check, + .atomic_commit = nv_drm_atomic_commit, + + #if defined(NV_DRM_OUTPUT_POLL_CHANGED_PRESENT) + .output_poll_changed = nv_drm_output_poll_changed, + #endif +}; + +static void nv_drm_event_callback(const struct NvKmsKapiEvent *event) +{ + struct nv_drm_device *nv_dev = event->privateData; + + mutex_lock(&nv_dev->lock); + + if (!atomic_read(&nv_dev->enable_event_handling)) { + goto done; + } + + switch (event->type) { + case NVKMS_EVENT_TYPE_DPY_CHANGED: + nv_drm_handle_display_change( + nv_dev, + event->u.displayChanged.display); + break; + + case NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED: + nv_drm_handle_dynamic_display_connected( + nv_dev, + event->u.dynamicDisplayConnected.display); + break; + case NVKMS_EVENT_TYPE_FLIP_OCCURRED: + nv_drm_handle_flip_occurred( + nv_dev, + event->u.flipOccurred.head, + event->u.flipOccurred.layer); + break; + default: + break; + } + +done: + + mutex_unlock(&nv_dev->lock); +} + +struct nv_drm_mst_display_info { + NvKmsKapiDisplay handle; + NvBool isDpMST; + char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]; +}; + +/* + * Helper function to get DpMST display info. + * dpMSTDisplayInfos is allocated dynamically, + * so it needs to be freed after finishing the query. + */ +static int nv_drm_get_mst_display_infos +( + struct nv_drm_device *nv_dev, + NvKmsKapiDisplay hDisplay, + struct nv_drm_mst_display_info **dpMSTDisplayInfos, + NvU32 *nDynamicDisplays +) +{ + struct NvKmsKapiStaticDisplayInfo *displayInfo = NULL; + struct NvKmsKapiStaticDisplayInfo *dynamicDisplayInfo = NULL; + struct NvKmsKapiConnectorInfo *connectorInfo = NULL; + struct nv_drm_mst_display_info *displayInfos = NULL; + NvU32 i = 0; + int ret = 0; + NVDpyId dpyId; + *nDynamicDisplays = 0; + + /* Query NvKmsKapiStaticDisplayInfo and NvKmsKapiConnectorInfo */ + + if ((displayInfo = nv_drm_calloc(1, sizeof(*displayInfo))) == NULL) { + ret = -ENOMEM; + goto done; + } + + if ((dynamicDisplayInfo = nv_drm_calloc(1, sizeof(*dynamicDisplayInfo))) == NULL) { + ret = -ENOMEM; + goto done; + } + + if (!nvKms->getStaticDisplayInfo(nv_dev->pDevice, hDisplay, displayInfo)) { + ret = -EINVAL; + goto done; + } + + connectorInfo = nvkms_get_connector_info(nv_dev->pDevice, + displayInfo->connectorHandle); + + if (IS_ERR(connectorInfo)) { + ret = PTR_ERR(connectorInfo); + goto done; + } + + + *nDynamicDisplays = nvCountDpyIdsInDpyIdList(connectorInfo->dynamicDpyIdList); + + if (*nDynamicDisplays == 0) { + goto done; + } + + if ((displayInfos = nv_drm_calloc(*nDynamicDisplays, sizeof(*displayInfos))) == NULL) { + ret = -ENOMEM; + goto done; + } + + FOR_ALL_DPY_IDS(dpyId, connectorInfo->dynamicDpyIdList) { + if (!nvKms->getStaticDisplayInfo(nv_dev->pDevice, + nvDpyIdToNvU32(dpyId), + dynamicDisplayInfo)) { + ret = -EINVAL; + nv_drm_free(displayInfos); + goto done; + } + + displayInfos[i].handle = dynamicDisplayInfo->handle; + displayInfos[i].isDpMST = dynamicDisplayInfo->isDpMST; + memcpy(displayInfos[i].dpAddress, dynamicDisplayInfo->dpAddress, sizeof(dynamicDisplayInfo->dpAddress)); + + i++; + } + + *dpMSTDisplayInfos = displayInfos; + +done: + + nv_drm_free(displayInfo); + + nv_drm_free(dynamicDisplayInfo); + + nv_drm_free(connectorInfo); + + return ret; +} + +static int nv_drm_disp_cmp (const void *l, const void *r) +{ + const struct nv_drm_mst_display_info *l_info = (const struct nv_drm_mst_display_info *)l; + const struct nv_drm_mst_display_info *r_info = (const struct nv_drm_mst_display_info *)r; + + return strcmp(l_info->dpAddress, r_info->dpAddress); +} + +/* + * Helper function to sort the dpAddress in terms of string. + * This function is to create DRM connectors ID order deterministically. + * It's not numerically. + */ +static void nv_drm_sort_dynamic_displays_by_dp_addr +( + struct nv_drm_mst_display_info *infos, + int nDynamicDisplays +) +{ + sort(infos, nDynamicDisplays, sizeof(*infos), nv_drm_disp_cmp, NULL); +} + + +/* + * Helper function to initialize drm_device::mode_config from + * NvKmsKapiDevice's resource information. + */ +static void +nv_drm_init_mode_config(struct nv_drm_device *nv_dev, + const struct NvKmsKapiDeviceResourcesInfo *pResInfo) +{ + struct drm_device *dev = nv_dev->dev; + + drm_mode_config_init(dev); + drm_mode_create_dvi_i_properties(dev); + + dev->mode_config.funcs = &nv_mode_config_funcs; + + dev->mode_config.min_width = pResInfo->caps.minWidthInPixels; + dev->mode_config.min_height = pResInfo->caps.minHeightInPixels; + + dev->mode_config.max_width = pResInfo->caps.maxWidthInPixels; + dev->mode_config.max_height = pResInfo->caps.maxHeightInPixels; + + dev->mode_config.cursor_width = pResInfo->caps.maxCursorSizeInPixels; + dev->mode_config.cursor_height = pResInfo->caps.maxCursorSizeInPixels; + + /* + * NVIDIA GPUs have no preferred depth. Arbitrarily report 24, to be + * consistent with other DRM drivers. + */ + + dev->mode_config.preferred_depth = 24; + dev->mode_config.prefer_shadow = 1; + +#if defined(NV_DRM_CRTC_STATE_HAS_ASYNC_FLIP) || \ + defined(NV_DRM_CRTC_STATE_HAS_PAGEFLIP_FLAGS) + dev->mode_config.async_page_flip = true; +#else + dev->mode_config.async_page_flip = false; +#endif + +#if defined(NV_DRM_MODE_CONFIG_HAS_ALLOW_FB_MODIFIERS) + /* Allow clients to define framebuffer layouts using DRM format modifiers */ + dev->mode_config.allow_fb_modifiers = true; +#endif + + /* Initialize output polling support */ + + drm_kms_helper_poll_init(dev); + + /* Disable output polling, because we don't support it yet */ + + drm_kms_helper_poll_disable(dev); +} + +/* + * Helper function to enumerate encoders/connectors from NvKmsKapiDevice. + */ +static void nv_drm_enumerate_encoders_and_connectors +( + struct nv_drm_device *nv_dev +) +{ + struct drm_device *dev = nv_dev->dev; + NvU32 nDisplays = 0; + + if (!nvKms->getDisplays(nv_dev->pDevice, &nDisplays, NULL)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to enumurate NvKmsKapiDisplay count"); + } + + if (nDisplays != 0) { + NvKmsKapiDisplay *hDisplays = + nv_drm_calloc(nDisplays, sizeof(*hDisplays)); + + if (hDisplays != NULL) { + if (!nvKms->getDisplays(nv_dev->pDevice, &nDisplays, hDisplays)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to enumurate NvKmsKapiDisplay handles"); + } else { + NvU32 i, j; + NvU32 nDynamicDisplays = 0; + + for (i = 0; i < nDisplays; i++) { + struct nv_drm_mst_display_info *displayInfos = NULL; + struct drm_encoder *encoder = + nv_drm_add_encoder(dev, hDisplays[i]); + + if (IS_ERR(encoder)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to add connector for NvKmsKapiDisplay 0x%08x", + hDisplays[i]); + } + + if (nv_drm_get_mst_display_infos(nv_dev, hDisplays[i], + &displayInfos, &nDynamicDisplays)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to get dynamic displays"); + } else if (nDynamicDisplays) { + nv_drm_sort_dynamic_displays_by_dp_addr(displayInfos, nDynamicDisplays); + + for (j = 0; j < nDynamicDisplays; j++) { + if (displayInfos[j].isDpMST) { + struct drm_encoder *mst_encoder = + nv_drm_add_encoder(dev, displayInfos[j].handle); + + NV_DRM_DEV_DEBUG_DRIVER(nv_dev, "found DP MST port display handle %u", + displayInfos[j].handle); + + if (IS_ERR(mst_encoder)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to add connector for NvKmsKapiDisplay 0x%08x", + displayInfos[j].handle); + } + } + } + + nv_drm_free(displayInfos); + } + } + } + + nv_drm_free(hDisplays); + } else { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to allocate memory for NvKmsKapiDisplay array"); + } + } +} + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +/*! + * 'NV_DRM_OUT_FENCE_PTR' is an atomic per-plane property that clients can use + * to request an out-fence fd for a particular plane that's being flipped. + * 'NV_DRM_OUT_FENCE_PTR' does NOT have the same behavior as the standard + * 'OUT_FENCE_PTR' property - the fd that's returned via 'NV_DRM_OUT_FENCE_PTR' + * will only be signaled once the buffers in the corresponding flip are flipped + * away from. + * In order to use this property, client needs to call set property function + * with user mode pointer as value. Once driver have post syncpt fd from flip reply, + * it will copy post syncpt fd at location pointed by user mode pointer. + */ +static int nv_drm_create_properties(struct nv_drm_device *nv_dev) +{ + struct drm_prop_enum_list colorspace_enum_list[3] = { }; + struct drm_prop_enum_list tf_enum_list[NV_DRM_TRANSFER_FUNCTION_MAX] = { }; + int i, len = 0; + + for (i = 0; i < 3; i++) { + colorspace_enum_list[len].type = i; + colorspace_enum_list[len].name = nv_get_input_colorspace_name(i); + len++; + } + + for (i = 0; i < NV_DRM_TRANSFER_FUNCTION_MAX; i++) { + tf_enum_list[i].type = i; + tf_enum_list[i].name = nv_get_transfer_function_name(i); + } + + if (nv_dev->supportsSyncpts) { + nv_dev->nv_out_fence_property = + drm_property_create_range(nv_dev->dev, DRM_MODE_PROP_ATOMIC, + "NV_DRM_OUT_FENCE_PTR", 0, U64_MAX); + if (nv_dev->nv_out_fence_property == NULL) { + return -ENOMEM; + } + } + + nv_dev->nv_input_colorspace_property = + drm_property_create_enum(nv_dev->dev, 0, "NV_INPUT_COLORSPACE", + colorspace_enum_list, len); + if (nv_dev->nv_input_colorspace_property == NULL) { + NV_DRM_LOG_ERR("Failed to create NV_INPUT_COLORSPACE property"); + return -ENOMEM; + } + +#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA) + nv_dev->nv_hdr_output_metadata_property = + drm_property_create(nv_dev->dev, DRM_MODE_PROP_BLOB, + "NV_HDR_STATIC_METADATA", 0); + if (nv_dev->nv_hdr_output_metadata_property == NULL) { + return -ENOMEM; + } +#endif + + nv_dev->nv_plane_lms_ctm_property = + drm_property_create(nv_dev->dev, DRM_MODE_PROP_BLOB, + "NV_PLANE_LMS_CTM", 0); + if (nv_dev->nv_plane_lms_ctm_property == NULL) { + return -ENOMEM; + } + + nv_dev->nv_plane_lms_to_itp_ctm_property = + drm_property_create(nv_dev->dev, DRM_MODE_PROP_BLOB, + "NV_PLANE_LMS_TO_ITP_CTM", 0); + if (nv_dev->nv_plane_lms_to_itp_ctm_property == NULL) { + return -ENOMEM; + } + + nv_dev->nv_plane_itp_to_lms_ctm_property = + drm_property_create(nv_dev->dev, DRM_MODE_PROP_BLOB, + "NV_PLANE_ITP_TO_LMS_CTM", 0); + if (nv_dev->nv_plane_itp_to_lms_ctm_property == NULL) { + return -ENOMEM; + } + + nv_dev->nv_plane_blend_ctm_property = + drm_property_create(nv_dev->dev, DRM_MODE_PROP_BLOB, + "NV_PLANE_BLEND_CTM", 0); + if (nv_dev->nv_plane_blend_ctm_property == NULL) { + return -ENOMEM; + } + + // Degamma TF + LUT + LUT Size + Multiplier + + nv_dev->nv_plane_degamma_tf_property = + drm_property_create_enum(nv_dev->dev, 0, + "NV_PLANE_DEGAMMA_TF", tf_enum_list, + NV_DRM_TRANSFER_FUNCTION_MAX); + if (nv_dev->nv_plane_degamma_tf_property == NULL) { + return -ENOMEM; + } + nv_dev->nv_plane_degamma_lut_property = + drm_property_create(nv_dev->dev, DRM_MODE_PROP_BLOB, + "NV_PLANE_DEGAMMA_LUT", 0); + if (nv_dev->nv_plane_degamma_lut_property == NULL) { + return -ENOMEM; + } + nv_dev->nv_plane_degamma_lut_size_property = + drm_property_create_range(nv_dev->dev, DRM_MODE_PROP_IMMUTABLE, + "NV_PLANE_DEGAMMA_LUT_SIZE", 0, UINT_MAX); + if (nv_dev->nv_plane_degamma_lut_size_property == NULL) { + return -ENOMEM; + } + nv_dev->nv_plane_degamma_multiplier_property = + drm_property_create_range(nv_dev->dev, 0, + "NV_PLANE_DEGAMMA_MULTIPLIER", 0, + U64_MAX & ~(((NvU64) 1) << 63)); // No negative values + if (nv_dev->nv_plane_degamma_multiplier_property == NULL) { + return -ENOMEM; + } + + // TMO LUT + LUT Size + + nv_dev->nv_plane_tmo_lut_property = + drm_property_create(nv_dev->dev, DRM_MODE_PROP_BLOB, + "NV_PLANE_TMO_LUT", 0); + if (nv_dev->nv_plane_tmo_lut_property == NULL) { + return -ENOMEM; + } + nv_dev->nv_plane_tmo_lut_size_property = + drm_property_create_range(nv_dev->dev, DRM_MODE_PROP_IMMUTABLE, + "NV_PLANE_TMO_LUT_SIZE", 0, UINT_MAX); + if (nv_dev->nv_plane_tmo_lut_size_property == NULL) { + return -ENOMEM; + } + + // REGAMMA TF + LUT + LUT Size + Divisor + + nv_dev->nv_crtc_regamma_tf_property = + drm_property_create_enum(nv_dev->dev, 0, + "NV_CRTC_REGAMMA_TF", tf_enum_list, + NV_DRM_TRANSFER_FUNCTION_MAX); + if (nv_dev->nv_crtc_regamma_tf_property == NULL) { + return -ENOMEM; + } + nv_dev->nv_crtc_regamma_lut_property = + drm_property_create(nv_dev->dev, DRM_MODE_PROP_BLOB, + "NV_CRTC_REGAMMA_LUT", 0); + if (nv_dev->nv_crtc_regamma_lut_property == NULL) { + return -ENOMEM; + } + nv_dev->nv_crtc_regamma_lut_size_property = + drm_property_create_range(nv_dev->dev, DRM_MODE_PROP_IMMUTABLE, + "NV_CRTC_REGAMMA_LUT_SIZE", 0, UINT_MAX); + if (nv_dev->nv_crtc_regamma_lut_size_property == NULL) { + return -ENOMEM; + } + // S31.32 + nv_dev->nv_crtc_regamma_divisor_property = + drm_property_create_range(nv_dev->dev, 0, + "NV_CRTC_REGAMMA_DIVISOR", + (((NvU64) 1) << 32), // No values between 0 and 1 + U64_MAX & ~(((NvU64) 1) << 63)); // No negative values + if (nv_dev->nv_crtc_regamma_divisor_property == NULL) { + return -ENOMEM; + } + + return 0; +} + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) +/* + * We can't just call drm_kms_helper_hotplug_event directly because + * fbdev_generic may attempt to set a mode from inside the hotplug event + * handler. Because kapi event handling runs on nvkms_kthread_q, this blocks + * other event processing including the flip completion notifier expected by + * nv_drm_atomic_commit. + * + * Defer hotplug event handling to a work item so that nvkms_kthread_q can + * continue processing events while a DRM modeset is in progress. + */ +static void nv_drm_handle_hotplug_event(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct nv_drm_device *nv_dev = + container_of(dwork, struct nv_drm_device, hotplug_event_work); + + drm_kms_helper_hotplug_event(nv_dev->dev); +} +#endif + +static int nv_drm_dev_load(struct drm_device *dev) +{ +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + struct NvKmsKapiDevice *pDevice; + + struct NvKmsKapiAllocateDeviceParams allocateDeviceParams; + struct NvKmsKapiDeviceResourcesInfo resInfo; +#endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */ + NvU64 kind; + NvU64 gen; + int i; + int ret; + + struct nv_drm_device *nv_dev = to_nv_device(dev); + + NV_DRM_DEV_LOG_INFO(nv_dev, "Loading driver"); + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + return 0; + } + + /* Allocate NvKmsKapiDevice from GPU ID */ + + memset(&allocateDeviceParams, 0, sizeof(allocateDeviceParams)); + + allocateDeviceParams.gpuId = nv_dev->gpu_info.gpu_id; + allocateDeviceParams.migDevice = nv_dev->gpu_mig_device; + + allocateDeviceParams.privateData = nv_dev; + allocateDeviceParams.eventCallback = nv_drm_event_callback; + + pDevice = nvKms->allocateDevice(&allocateDeviceParams); + + if (pDevice == NULL) { + NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to allocate NvKmsKapiDevice"); + return -ENODEV; + } + + /* Query information of resources available on device */ + + if (!nvKms->getDeviceResourcesInfo(pDevice, &resInfo)) { + + nvKms->freeDevice(pDevice); + + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to query NvKmsKapiDevice resources info"); + return -ENODEV; + } + +#if defined(NV_DRM_FBDEV_AVAILABLE) + /* + * If fbdev is enabled, take modeset ownership now before other DRM clients + * can take master (and thus NVKMS ownership). + */ + if (nv_drm_fbdev_module_param) { + if (!nvKms->grabOwnership(pDevice)) { + nvKms->freeDevice(pDevice); + NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to grab NVKMS modeset ownership"); + return -EBUSY; + } + + nv_dev->hasFramebufferConsole = NV_TRUE; + } +#endif + + mutex_lock(&nv_dev->lock); + + /* Set NvKmsKapiDevice */ + + nv_dev->pDevice = pDevice; + + nv_dev->pitchAlignment = resInfo.caps.pitchAlignment; + + nv_dev->hasVideoMemory = resInfo.caps.hasVideoMemory; + + nv_dev->genericPageKind = resInfo.caps.genericPageKind; + + // Fermi-Volta use generation 0, Turing+ uses generation 2. + nv_dev->pageKindGeneration = (nv_dev->genericPageKind == 0x06) ? 2 : 0; + + // Desktop GPUs and mobile GPUs Xavier and later use the same sector layout + nv_dev->sectorLayout = 1; + + nv_dev->supportsSyncpts = resInfo.caps.supportsSyncpts; + + nv_dev->semsurf_stride = resInfo.caps.semsurf.stride; + + nv_dev->semsurf_max_submitted_offset = + resInfo.caps.semsurf.maxSubmittedOffset; + + nv_dev->display_semaphores.count = + resInfo.caps.numDisplaySemaphores; + nv_dev->display_semaphores.next_index = 0; + + nv_dev->requiresVrrSemaphores = resInfo.caps.requiresVrrSemaphores; + + nv_dev->vtFbBaseAddress = resInfo.vtFbBaseAddress; + nv_dev->vtFbSize = resInfo.vtFbSize; + + gen = nv_dev->pageKindGeneration; + kind = nv_dev->genericPageKind; + + for (i = 0; i <= 5; i++) { + nv_dev->modifiers[i] = + /* Log2(block height) ----------------------------------+ * + * Page Kind ------------------------------------+ | * + * Gob Height/Page Kind Generation --------+ | | * + * Sector layout ---------------------+ | | | * + * Compression --------------------+ | | | | * + * | | | | | */ + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, gen, kind, 5 - i); + } + + nv_dev->modifiers[i++] = DRM_FORMAT_MOD_LINEAR; + nv_dev->modifiers[i++] = DRM_FORMAT_MOD_INVALID; + + /* Initialize drm_device::mode_config */ + + nv_drm_init_mode_config(nv_dev, &resInfo); + + ret = nv_drm_create_properties(nv_dev); + if (ret < 0) { + drm_mode_config_cleanup(dev); +#if defined(NV_DRM_FBDEV_AVAILABLE) + if (nv_dev->hasFramebufferConsole) { + nvKms->releaseOwnership(nv_dev->pDevice); + } +#endif + nvKms->freeDevice(nv_dev->pDevice); + return -ENODEV; + } + + if (!nvKms->declareEventInterest( + nv_dev->pDevice, + ((1 << NVKMS_EVENT_TYPE_DPY_CHANGED) | + (1 << NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED) | + (1 << NVKMS_EVENT_TYPE_FLIP_OCCURRED)))) { + NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to register event mask"); + } + + /* Add crtcs */ + + nv_drm_enumerate_crtcs_and_planes(nv_dev, &resInfo); + + /* Add connectors and encoders */ + + nv_drm_enumerate_encoders_and_connectors(nv_dev); + +#if !defined(NV_DRM_CRTC_STATE_HAS_NO_VBLANK) + drm_vblank_init(dev, dev->mode_config.num_crtc); +#endif + + /* + * Trigger hot-plug processing, to update connection status of + * all HPD supported connectors. + */ + + drm_helper_hpd_irq_event(dev); + + /* Enable event handling */ + + INIT_DELAYED_WORK(&nv_dev->hotplug_event_work, nv_drm_handle_hotplug_event); + atomic_set(&nv_dev->enable_event_handling, true); + + init_waitqueue_head(&nv_dev->flip_event_wq); + + mutex_unlock(&nv_dev->lock); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + + return 0; +} + +static void nv_drm_dev_unload(struct drm_device *dev) +{ +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + struct NvKmsKapiDevice *pDevice = NULL; +#endif + + struct nv_drm_device *nv_dev = to_nv_device(dev); + + NV_DRM_DEV_LOG_INFO(nv_dev, "Unloading driver"); + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + return; + } + + /* Release modeset ownership if fbdev is enabled */ + +#if defined(NV_DRM_FBDEV_AVAILABLE) + if (nv_dev->hasFramebufferConsole) { + drm_atomic_helper_shutdown(dev); + nvKms->releaseOwnership(nv_dev->pDevice); + } +#endif + + cancel_delayed_work_sync(&nv_dev->hotplug_event_work); + mutex_lock(&nv_dev->lock); + + WARN_ON(nv_dev->subOwnershipGranted); + + /* Disable event handling */ + + atomic_set(&nv_dev->enable_event_handling, false); + + /* Clean up output polling */ + + drm_kms_helper_poll_fini(dev); + + /* Clean up mode configuration */ + + drm_mode_config_cleanup(dev); + + if (!nvKms->declareEventInterest(nv_dev->pDevice, 0x0)) { + NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to stop event listening"); + } + + /* Unset NvKmsKapiDevice */ + + pDevice = nv_dev->pDevice; + nv_dev->pDevice = NULL; + + mutex_unlock(&nv_dev->lock); + + nvKms->freeDevice(pDevice); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ +} + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +static int __nv_drm_master_set(struct drm_device *dev, + struct drm_file *file_priv, bool from_open) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + + /* + * If this device is driving a framebuffer, then nvidia-drm already has + * modeset ownership. Otherwise, grab ownership now. + */ + if (!nv_dev->hasFramebufferConsole && + !nvKms->grabOwnership(nv_dev->pDevice)) { + return -EINVAL; + } + + return 0; +} + +#if defined(NV_DRM_DRIVER_SET_MASTER_HAS_INT_RETURN_TYPE) +static int nv_drm_master_set(struct drm_device *dev, + struct drm_file *file_priv, bool from_open) +{ + return __nv_drm_master_set(dev, file_priv, from_open); +} +#else +static void nv_drm_master_set(struct drm_device *dev, + struct drm_file *file_priv, bool from_open) +{ + if (__nv_drm_master_set(dev, file_priv, from_open) != 0) { + NV_DRM_DEV_LOG_ERR(to_nv_device(dev), "Failed to grab modeset ownership"); + } +} +#endif + +static +int nv_drm_reset_input_colorspace(struct drm_device *dev) +{ + struct drm_atomic_state *state; + struct drm_plane_state *plane_state; + struct drm_plane *plane; + struct nv_drm_plane_state *nv_drm_plane_state; + struct drm_modeset_acquire_ctx ctx; + int ret = 0; + bool do_reset = false; + NvU32 flags = 0; + + state = drm_atomic_state_alloc(dev); + if (!state) + return -ENOMEM; + +#if defined(DRM_MODESET_ACQUIRE_INTERRUPTIBLE) + flags |= DRM_MODESET_ACQUIRE_INTERRUPTIBLE; +#endif + drm_modeset_acquire_init(&ctx, flags); + state->acquire_ctx = &ctx; + + nv_drm_for_each_plane(plane, dev) { + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) { + ret = PTR_ERR(plane_state); + goto out; + } + + nv_drm_plane_state = to_nv_drm_plane_state(plane_state); + if (nv_drm_plane_state) { + if (nv_drm_plane_state->input_colorspace != NV_DRM_INPUT_COLOR_SPACE_NONE) { + nv_drm_plane_state->input_colorspace = NV_DRM_INPUT_COLOR_SPACE_NONE; + do_reset = true; + } + } + } + + if (do_reset) { + ret = drm_atomic_commit(state); + } + +out: + drm_atomic_state_put(state); + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + return ret; +} + +static +void nv_drm_master_drop(struct drm_device *dev, struct drm_file *file_priv) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + + nv_drm_revoke_modeset_permission(dev, file_priv, 0); + nv_drm_revoke_sub_ownership(dev); + + if (!nv_dev->hasFramebufferConsole) { + int err; + + /* + * After dropping nvkms modeset onwership, it is not guaranteed that drm + * and nvkms modeset state will remain in sync. Therefore, disable all + * outputs and crtcs before dropping nvkms modeset ownership. + * + * First disable all active outputs atomically and then disable each + * crtc one by one, there is not helper function available to disable + * all crtcs atomically. + */ + + drm_modeset_lock_all(dev); + + if ((err = nv_drm_atomic_helper_disable_all( + dev, + dev->mode_config.acquire_ctx)) != 0) { + + NV_DRM_DEV_LOG_ERR( + nv_dev, + "nv_drm_atomic_helper_disable_all failed with error code %d !", + err); + } + + drm_modeset_unlock_all(dev); + + nvKms->releaseOwnership(nv_dev->pDevice); + } else { + int err = nv_drm_reset_input_colorspace(dev); + if (err != 0) { + NV_DRM_DEV_LOG_WARN(nv_dev, + "nv_drm_reset_input_colorspace failed with error code: %d !", err); + } + } +} +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +static int nv_drm_get_dev_info_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_get_dev_info_params *params = data; + + if (dev->primary == NULL) { + return -ENOENT; + } + + params->gpu_id = nv_dev->gpu_info.gpu_id; + params->mig_device = nv_dev->gpu_mig_device; + params->primary_index = dev->primary->index; + params->supports_alloc = false; + params->generic_page_kind = 0; + params->page_kind_generation = 0; + params->sector_layout = 0; + params->supports_sync_fd = false; + params->supports_semsurf = false; + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + /* Memory allocation and semaphore surfaces are only supported + * if the modeset = 1 parameter is set */ + if (nv_dev->pDevice != NULL) { + params->supports_alloc = true; + params->generic_page_kind = nv_dev->genericPageKind; + params->page_kind_generation = nv_dev->pageKindGeneration; + params->sector_layout = nv_dev->sectorLayout; + + if (nv_dev->semsurf_stride != 0) { + params->supports_semsurf = true; + params->supports_sync_fd = true; + } + } +#endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */ + + return 0; +} + +static int nv_drm_get_drm_file_unique_id_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct drm_nvidia_get_drm_file_unique_id_params *params = data; + params->id = (u64)(filep->driver_priv); + return 0; +} + +static int nv_drm_dmabuf_supported_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + /* check the pDevice since this only gets set if modeset = 1 + * which is a requirement for the dma_buf extension to work + */ + struct nv_drm_device *nv_dev = to_nv_device(dev); + return nv_dev->pDevice ? 0 : -EINVAL; +} + +static +int nv_drm_get_client_capability_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct drm_nvidia_get_client_capability_params *params = data; + + switch (params->capability) { +#if defined(DRM_CLIENT_CAP_STEREO_3D) + case DRM_CLIENT_CAP_STEREO_3D: + params->value = filep->stereo_allowed; + break; +#endif +#if defined(DRM_CLIENT_CAP_UNIVERSAL_PLANES) + case DRM_CLIENT_CAP_UNIVERSAL_PLANES: + params->value = filep->universal_planes; + break; +#endif +#if defined(DRM_CLIENT_CAP_ATOMIC) + case DRM_CLIENT_CAP_ATOMIC: + params->value = filep->atomic; + break; +#endif + default: + return -EINVAL; + } + + return 0; +} + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) +static bool nv_drm_connector_is_dpy_id(struct drm_connector *connector, + NvU32 dpyId) +{ + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + return nv_connector->nv_detected_encoder && + nv_connector->nv_detected_encoder->hDisplay == dpyId; +} + +static int nv_drm_get_dpy_id_for_connector_id_ioctl(struct drm_device *dev, + void *data, + struct drm_file *filep) +{ + struct drm_nvidia_get_dpy_id_for_connector_id_params *params = data; + struct drm_connector *connector; + struct nv_drm_connector *nv_connector; + int ret = 0; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + return -EOPNOTSUPP; + } + + // Importantly, drm_connector_lookup will only return the + // connector if we are master, a lessee with the connector, or not master at + // all. It will return NULL if we are a lessee with other connectors. + connector = drm_connector_lookup(dev, filep, params->connectorId); + + if (!connector) { + return -EINVAL; + } + + nv_connector = to_nv_connector(connector); + if (!nv_connector) { + ret = -EINVAL; + goto done; + } + + if (!nv_connector->nv_detected_encoder) { + ret = -EINVAL; + goto done; + } + + params->dpyId = nv_connector->nv_detected_encoder->hDisplay; + +done: + drm_connector_put(connector); + return ret; +} + +static int nv_drm_get_connector_id_for_dpy_id_ioctl(struct drm_device *dev, + void *data, + struct drm_file *filep) +{ + struct drm_nvidia_get_connector_id_for_dpy_id_params *params = data; + struct drm_connector *connector; + int ret = -EINVAL; + struct drm_connector_list_iter conn_iter; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + return -EOPNOTSUPP; + } + + /* Lookup for existing connector with same dpyId */ + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (nv_drm_connector_is_dpy_id(connector, params->dpyId)) { + params->connectorId = connector->base.id; + ret = 0; + break; + } + } + drm_connector_list_iter_end(&conn_iter); + + return ret; +} + +static NvU32 nv_drm_get_head_bit_from_connector(struct drm_connector *connector) +{ + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + + if (connector->state && connector->state->crtc) { + struct nv_drm_crtc *nv_crtc = to_nv_crtc(connector->state->crtc); + return NVBIT(nv_crtc->head); + } else if (nv_connector->nv_detected_encoder && + nv_connector->nv_detected_encoder->base.crtc) { + struct nv_drm_crtc *nv_crtc = + to_nv_crtc(nv_connector->nv_detected_encoder->base.crtc); + return NVBIT(nv_crtc->head); + } + + return 0; +} + +static int nv_drm_grant_modeset_permission(struct drm_device *dev, + struct drm_nvidia_grant_permissions_params *params, + struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_connector *target_nv_connector = NULL; + struct nv_drm_crtc *target_nv_crtc = NULL; + struct drm_connector *connector, *target_connector = NULL; + struct drm_crtc *crtc; + NvU32 head = 0, freeHeadBits, targetHeadBit, possible_crtcs; + int ret = 0; + struct drm_connector_list_iter conn_iter; +#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3 + struct drm_modeset_acquire_ctx ctx; + DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE, + ret); +#else + mutex_lock(&dev->mode_config.mutex); +#endif + + /* Get the connector for the dpyId. */ + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (nv_drm_connector_is_dpy_id(connector, params->dpyId)) { + target_connector = + drm_connector_lookup(dev, filep, connector->base.id); + break; + } + } + drm_connector_list_iter_end(&conn_iter); + + // Importantly, drm_connector_lookup/drm_crtc_find (with filep) will only + // return the object if we are master, a lessee with the object, or not + // master at all. It will return NULL if we are a lessee with other objects. + if (!target_connector) { + ret = -EINVAL; + goto done; + } + target_nv_connector = to_nv_connector(target_connector); + possible_crtcs = + target_nv_connector->nv_detected_encoder->base.possible_crtcs; + + /* Target connector must not be previously granted. */ + if (target_nv_connector->modeset_permission_filep) { + ret = -EINVAL; + goto done; + } + + /* Add all heads that are owned and not already granted. */ + freeHeadBits = 0; + nv_drm_for_each_crtc(crtc, dev) { + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + if (drm_crtc_find(dev, filep, crtc->base.id) && + !nv_crtc->modeset_permission_filep && + (drm_crtc_mask(crtc) & possible_crtcs)) { + freeHeadBits |= NVBIT(nv_crtc->head); + } + } + + targetHeadBit = nv_drm_get_head_bit_from_connector(target_connector); + if (targetHeadBit & freeHeadBits) { + /* If a crtc is already being used by this connector, use it. */ + freeHeadBits = targetHeadBit; + } else { + /* Otherwise, remove heads that are in use by other connectors. */ + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + freeHeadBits &= ~nv_drm_get_head_bit_from_connector(connector); + } + drm_connector_list_iter_end(&conn_iter); + } + + /* Fail if no heads are available. */ + if (!freeHeadBits) { + ret = -EINVAL; + goto done; + } + + /* + * Loop through the crtc again and find a matching head. + * Record the filep that is using the crtc and the connector. + */ + nv_drm_for_each_crtc(crtc, dev) { + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + if (freeHeadBits & NVBIT(nv_crtc->head)) { + target_nv_crtc = nv_crtc; + head = nv_crtc->head; + break; + } + } + + if (!nvKms->grantPermissions(params->fd, nv_dev->pDevice, head, + params->dpyId)) { + ret = -EINVAL; + goto done; + } + + target_nv_connector->modeset_permission_crtc = target_nv_crtc; + target_nv_connector->modeset_permission_filep = filep; + target_nv_crtc->modeset_permission_filep = filep; + +done: + if (target_connector) { + drm_connector_put(target_connector); + } + +#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3 + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); +#else + mutex_unlock(&dev->mode_config.mutex); +#endif + + return ret; +} + +static int nv_drm_grant_sub_ownership(struct drm_device *dev, + struct drm_nvidia_grant_permissions_params *params) +{ + int ret = -EINVAL; + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_modeset_acquire_ctx *pctx; +#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3 + struct drm_modeset_acquire_ctx ctx; + DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE, + ret); + pctx = &ctx; +#else + mutex_lock(&dev->mode_config.mutex); + pctx = dev->mode_config.acquire_ctx; +#endif + + if (nv_dev->subOwnershipGranted || + !nvKms->grantSubOwnership(params->fd, nv_dev->pDevice)) { + goto done; + } + + /* + * When creating an ownership grant, shut down all heads and disable flip + * notifications. + */ + ret = nv_drm_atomic_helper_disable_all(dev, pctx); + if (ret != 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "nv_drm_atomic_helper_disable_all failed with error code %d!", + ret); + } + + atomic_set(&nv_dev->enable_event_handling, false); + nv_dev->subOwnershipGranted = NV_TRUE; + + ret = 0; + +done: +#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3 + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); +#else + mutex_unlock(&dev->mode_config.mutex); +#endif + return 0; +} + +static int nv_drm_grant_permission_ioctl(struct drm_device *dev, void *data, + struct drm_file *filep) +{ + struct drm_nvidia_grant_permissions_params *params = data; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + return -EOPNOTSUPP; + } + + if (params->type == NV_DRM_PERMISSIONS_TYPE_MODESET) { + return nv_drm_grant_modeset_permission(dev, params, filep); + } else if (params->type == NV_DRM_PERMISSIONS_TYPE_SUB_OWNER) { + return nv_drm_grant_sub_ownership(dev, params); + } + + return -EINVAL; +} + +static int +nv_drm_atomic_disable_connector(struct drm_atomic_state *state, + struct nv_drm_connector *nv_connector) +{ + struct drm_crtc_state *crtc_state; + struct drm_connector_state *connector_state; + int ret = 0; + + if (nv_connector->modeset_permission_crtc) { + crtc_state = drm_atomic_get_crtc_state( + state, &nv_connector->modeset_permission_crtc->base); + if (!crtc_state) { + return -EINVAL; + } + + crtc_state->active = false; + ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL); + if (ret < 0) { + return ret; + } + } + + connector_state = drm_atomic_get_connector_state(state, &nv_connector->base); + if (!connector_state) { + return -EINVAL; + } + + return drm_atomic_set_crtc_for_connector(connector_state, NULL); +} + +static int nv_drm_revoke_modeset_permission(struct drm_device *dev, + struct drm_file *filep, NvU32 dpyId) +{ + struct drm_modeset_acquire_ctx *pctx; + struct drm_atomic_state *state; + struct drm_connector *connector; + struct drm_crtc *crtc; + int ret = 0; + struct drm_connector_list_iter conn_iter; +#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3 + struct drm_modeset_acquire_ctx ctx; + DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE, + ret); + pctx = &ctx; +#else + mutex_lock(&dev->mode_config.mutex); + pctx = dev->mode_config.acquire_ctx; +#endif + + state = drm_atomic_state_alloc(dev); + if (!state) { + ret = -ENOMEM; + goto done; + } + state->acquire_ctx = pctx; + + /* + * If dpyId is set, only revoke those specific resources. Otherwise, + * it is from closing the file so revoke all resources for that filep. + */ + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + if (nv_connector->modeset_permission_filep == filep && + (!dpyId || nv_drm_connector_is_dpy_id(connector, dpyId))) { + ret = nv_drm_atomic_disable_connector(state, nv_connector); + if (ret < 0) { + goto done; + } + + // Continue trying to revoke as much as possible. + nv_drm_connector_revoke_permissions(dev, nv_connector); + } + } + drm_connector_list_iter_end(&conn_iter); + + nv_drm_for_each_crtc(crtc, dev) { + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + if (nv_crtc->modeset_permission_filep == filep && !dpyId) { + nv_crtc->modeset_permission_filep = NULL; + } + } + + ret = drm_atomic_commit(state); +done: + drm_atomic_state_put(state); + +#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3 + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); +#else + mutex_unlock(&dev->mode_config.mutex); +#endif + + return ret; +} + +static int nv_drm_revoke_sub_ownership(struct drm_device *dev) +{ + int ret = -EINVAL; + struct nv_drm_device *nv_dev = to_nv_device(dev); +#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3 + struct drm_modeset_acquire_ctx ctx; + DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE, + ret); +#else + mutex_lock(&dev->mode_config.mutex); +#endif + + if (!nv_dev->subOwnershipGranted) { + goto done; + } + + if (!nvKms->revokeSubOwnership(nv_dev->pDevice)) { + NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to revoke sub-ownership from NVKMS"); + goto done; + } + + nv_dev->subOwnershipGranted = NV_FALSE; + atomic_set(&nv_dev->enable_event_handling, true); + ret = 0; + +done: +#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3 + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); +#else + mutex_unlock(&dev->mode_config.mutex); +#endif + return ret; +} + +static int nv_drm_revoke_permission_ioctl(struct drm_device *dev, void *data, + struct drm_file *filep) +{ + struct drm_nvidia_revoke_permissions_params *params = data; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + return -EOPNOTSUPP; + } + + if (params->type == NV_DRM_PERMISSIONS_TYPE_MODESET) { + if (!params->dpyId) { + return -EINVAL; + } + return nv_drm_revoke_modeset_permission(dev, filep, params->dpyId); + } else if (params->type == NV_DRM_PERMISSIONS_TYPE_SUB_OWNER) { + return nv_drm_revoke_sub_ownership(dev); + } + + return -EINVAL; +} + +static void nv_drm_postclose(struct drm_device *dev, struct drm_file *filep) +{ + /* + * Some systems like android can reach here without initializing the + * device, so check for that. + */ + if (dev->mode_config.num_crtc > 0 && + dev->mode_config.crtc_list.next != NULL && + dev->mode_config.crtc_list.prev != NULL && + dev->mode_config.num_connector > 0 && + dev->mode_config.connector_list.next != NULL && + dev->mode_config.connector_list.prev != NULL) { + nv_drm_revoke_modeset_permission(dev, filep, 0); + } +} +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +static int nv_drm_open(struct drm_device *dev, struct drm_file *filep) +{ + _Static_assert(sizeof(filep->driver_priv) >= sizeof(u64), + "filep->driver_priv can not hold an u64"); + static atomic64_t id = ATOMIC_INIT(0); + + filep->driver_priv = (void *)atomic64_inc_return(&id); + + return 0; +} + +static struct drm_master *nv_drm_find_lessee(struct drm_master *master, + int lessee_id) +{ + int object; + void *entry; + + while (master->lessor != NULL) { + master = master->lessor; + } + + idr_for_each_entry(&master->lessee_idr, entry, object) + { + if (object == lessee_id) { + return entry; + } + } + + return NULL; +} + +static void nv_drm_get_revoked_objects(struct drm_device *dev, + struct drm_file *filep, unsigned int cmd, + unsigned long arg, int **objects, + int *objects_count) +{ + unsigned int ioc_size; + struct drm_mode_revoke_lease revoke_lease; + struct drm_master *lessor, *lessee; + void *entry; + int *objs; + int obj, obj_count, obj_i; + + ioc_size = _IOC_SIZE(cmd); + if (ioc_size > sizeof(revoke_lease)) { + return; + } + + if (copy_from_user(&revoke_lease, (void __user *)arg, ioc_size) != 0) { + return; + } + + lessor = nv_drm_file_get_master(filep); + if (lessor == NULL) { + return; + } + + mutex_lock(&dev->mode_config.idr_mutex); + lessee = nv_drm_find_lessee(lessor, revoke_lease.lessee_id); + + if (lessee == NULL) { + goto done; + } + + obj_count = 0; + idr_for_each_entry(&lessee->leases, entry, obj) { + ++obj_count; + } + if (obj_count == 0) { + goto done; + } + + objs = nv_drm_calloc(obj_count, sizeof(int)); + if (objs == NULL) { + goto done; + } + + obj_i = 0; + idr_for_each_entry(&lessee->leases, entry, obj) { + objs[obj_i++] = obj; + } + *objects = objs; + *objects_count = obj_count; + +done: + mutex_unlock(&dev->mode_config.idr_mutex); + drm_master_put(&lessor); +} + +static bool nv_drm_is_in_objects(int object, int *objects, int objects_count) +{ + int i; + for (i = 0; i < objects_count; ++i) { + if (objects[i] == object) { + return true; + } + } + return false; +} + +static void nv_drm_finish_revoking_objects(struct drm_device *dev, + struct drm_file *filep, int *objects, + int objects_count) +{ + struct drm_connector *connector; + struct drm_crtc *crtc; + struct drm_connector_list_iter conn_iter; +#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3 + int ret = 0; + struct drm_modeset_acquire_ctx ctx; + DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE, + ret); +#else + mutex_lock(&dev->mode_config.mutex); +#endif + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + if (nv_connector->modeset_permission_filep && + nv_drm_is_in_objects(connector->base.id, objects, objects_count)) { + nv_drm_connector_revoke_permissions(dev, nv_connector); + } + } + drm_connector_list_iter_end(&conn_iter); + + nv_drm_for_each_crtc(crtc, dev) { + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + if (nv_crtc->modeset_permission_filep && + nv_drm_is_in_objects(crtc->base.id, objects, objects_count)) { + nv_crtc->modeset_permission_filep = NULL; + } + } + +#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3 + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); +#else + mutex_unlock(&dev->mode_config.mutex); +#endif +} + +/* + * Wrapper around drm_ioctl to hook in to upstream ioctl. + * + * Currently used to add additional handling to REVOKE_LEASE. + */ +static long nv_drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + long retcode; + + struct drm_file *file_priv = filp->private_data; + struct drm_device *dev = file_priv->minor->dev; + int *objects = NULL; + int objects_count = 0; + + if (cmd == DRM_IOCTL_MODE_REVOKE_LEASE) { + // Save the revoked objects before revoking. + nv_drm_get_revoked_objects(dev, file_priv, cmd, arg, &objects, + &objects_count); + } + + retcode = drm_ioctl(filp, cmd, arg); + + if (cmd == DRM_IOCTL_MODE_REVOKE_LEASE && objects) { + if (retcode == 0) { + // If revoking was successful, finish revoking the objects. + nv_drm_finish_revoking_objects(dev, file_priv, objects, + objects_count); + } + nv_drm_free(objects); + } + + return retcode; +} + +static const struct file_operations nv_drm_fops = { + .owner = THIS_MODULE, + + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = nv_drm_ioctl, +#if defined(CONFIG_COMPAT) + .compat_ioctl = drm_compat_ioctl, +#endif + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + .mmap = nv_drm_mmap, +#endif + + .poll = drm_poll, + .read = drm_read, + + .llseek = noop_llseek, + +#if defined(FOP_UNSIGNED_OFFSET) + .fop_flags = FOP_UNSIGNED_OFFSET, +#endif +}; + +static const struct drm_ioctl_desc nv_drm_ioctls[] = { +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_IMPORT_NVKMS_MEMORY, + nv_drm_gem_import_nvkms_memory_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_IMPORT_USERSPACE_MEMORY, + nv_drm_gem_import_userspace_memory_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_MAP_OFFSET, + nv_drm_gem_map_offset_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GET_DEV_INFO, + nv_drm_get_dev_info_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GET_DRM_FILE_UNIQUE_ID, + nv_drm_get_drm_file_unique_id_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + + DRM_IOCTL_DEF_DRV(NVIDIA_FENCE_SUPPORTED, + nv_drm_fence_supported_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_PRIME_FENCE_CONTEXT_CREATE, + nv_drm_prime_fence_context_create_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_PRIME_FENCE_ATTACH, + nv_drm_gem_prime_fence_attach_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_SEMSURF_FENCE_CTX_CREATE, + nv_drm_semsurf_fence_ctx_create_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_SEMSURF_FENCE_CREATE, + nv_drm_semsurf_fence_create_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_SEMSURF_FENCE_WAIT, + nv_drm_semsurf_fence_wait_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_SEMSURF_FENCE_ATTACH, + nv_drm_semsurf_fence_attach_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + + /* + * DRM_UNLOCKED is implicit for all non-legacy DRM driver IOCTLs since Linux + * v4.10 commit fa5386459f06 "drm: Used DRM_LEGACY for all legacy functions" + * (Linux v4.4 commit ea487835e887 "drm: Enforce unlocked ioctl operation + * for kms driver ioctls" previously did it only for drivers that set the + * DRM_MODESET flag), so this will race with SET_CLIENT_CAP. Linux v4.11 + * commit dcf727ab5d17 "drm: setclientcap doesn't need the drm BKL" also + * removed locking from SET_CLIENT_CAP so there is no use attempting to lock + * manually. The latter commit acknowledges that this can expose userspace + * to inconsistent behavior when racing with itself, but accepts that risk. + */ + DRM_IOCTL_DEF_DRV(NVIDIA_GET_CLIENT_CAPABILITY, + nv_drm_get_client_capability_ioctl, + 0), + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + DRM_IOCTL_DEF_DRV(NVIDIA_GET_CRTC_CRC32, + nv_drm_get_crtc_crc32_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GET_CRTC_CRC32_V2, + nv_drm_get_crtc_crc32_v2_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_EXPORT_NVKMS_MEMORY, + nv_drm_gem_export_nvkms_memory_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_ALLOC_NVKMS_MEMORY, + nv_drm_gem_alloc_nvkms_memory_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_EXPORT_DMABUF_MEMORY, + nv_drm_gem_export_dmabuf_memory_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_IDENTIFY_OBJECT, + nv_drm_gem_identify_object_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_DMABUF_SUPPORTED, + nv_drm_dmabuf_supported_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GET_DPY_ID_FOR_CONNECTOR_ID, + nv_drm_get_dpy_id_for_connector_id_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GET_CONNECTOR_ID_FOR_DPY_ID, + nv_drm_get_connector_id_for_dpy_id_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GRANT_PERMISSIONS, + nv_drm_grant_permission_ioctl, + DRM_UNLOCKED|DRM_MASTER), + DRM_IOCTL_DEF_DRV(NVIDIA_REVOKE_PERMISSIONS, + nv_drm_revoke_permission_ioctl, + DRM_UNLOCKED|DRM_MASTER), +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ +}; + +static struct drm_driver nv_drm_driver = { + + .driver_features = +#if defined(NV_DRM_DRIVER_PRIME_FLAG_PRESENT) + DRIVER_PRIME | +#endif +#if defined(NV_DRM_SYNCOBJ_FEATURES_PRESENT) + DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE | +#endif + DRIVER_GEM | DRIVER_RENDER, + +#if defined(NV_DRM_DRIVER_HAS_GEM_FREE_OBJECT) + .gem_free_object = nv_drm_gem_free, +#endif + + .ioctls = nv_drm_ioctls, + .num_ioctls = ARRAY_SIZE(nv_drm_ioctls), + +/* + * Linux kernel v6.6 commit 6b85aa68d9d5 ("drm: Enable PRIME import/export for + * all drivers") made drm_gem_prime_handle_to_fd() / + * drm_gem_prime_fd_to_handle() the default when .prime_handle_to_fd / + * .prime_fd_to_handle are unspecified, respectively. + * + * Linux kernel v6.6 commit 71a7974ac701 ("drm/prime: Unexport helpers for + * fd/handle conversion") unexports drm_gem_prime_handle_to_fd() and + * drm_gem_prime_fd_to_handle(). However, because of the aforementioned commit, + * it's fine to just skip specifying them in this case. + * + * Linux kernel v6.7 commit 0514f63cfff3 ("Revert "drm/prime: Unexport helpers + * for fd/handle conversion"") exported the helpers again, but left the default + * behavior intact. Nonetheless, it does not hurt to specify them. + */ +#if NV_IS_EXPORT_SYMBOL_PRESENT_drm_gem_prime_handle_to_fd + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, +#endif +#if NV_IS_EXPORT_SYMBOL_PRESENT_drm_gem_prime_fd_to_handle + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, +#endif + + .gem_prime_import = nv_drm_gem_prime_import, + .gem_prime_import_sg_table = nv_drm_gem_prime_import_sg_table, + +/* + * Linux kernel v5.0 commit 7698799f95 ("drm/prime: Add drm_gem_prime_mmap()") + * added drm_gem_prime_mmap(). + * + * Linux kernel v6.6 commit 0adec22702d4 ("drm: Remove struct + * drm_driver.gem_prime_mmap") removed .gem_prime_mmap, but replaced it with a + * direct call to drm_gem_prime_mmap(). + * + * TODO: Support .gem_prime_mmap on Linux < v5.0 using internal implementation. + */ +#if defined(NV_DRM_GEM_PRIME_MMAP_PRESENT) && \ + defined(NV_DRM_DRIVER_HAS_GEM_PRIME_MMAP) + .gem_prime_mmap = drm_gem_prime_mmap, +#endif + +#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS) + .gem_prime_export = drm_gem_prime_export, + .gem_prime_get_sg_table = nv_drm_gem_prime_get_sg_table, + .gem_prime_vmap = nv_drm_gem_prime_vmap, + .gem_prime_vunmap = nv_drm_gem_prime_vunmap, + + .gem_vm_ops = &nv_drm_gem_vma_ops, +#endif + +#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) + .gem_prime_res_obj = nv_drm_gem_prime_res_obj, +#endif + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + .postclose = nv_drm_postclose, +#endif + .open = nv_drm_open, + + .fops = &nv_drm_fops, + + .name = "nvidia-drm", + + .desc = "NVIDIA DRM driver", + +#if defined(NV_DRM_DRIVER_HAS_DATE) + .date = "20160202", +#endif + +#if defined(NV_DRM_DRIVER_HAS_LEGACY_DEV_LIST) + .legacy_dev_list = LIST_HEAD_INIT(nv_drm_driver.legacy_dev_list), +#endif +// XXX implement nvidia-drm's own .fbdev_probe callback that uses NVKMS kapi directly +#if defined(NV_DRM_FBDEV_AVAILABLE) && defined(DRM_FBDEV_TTM_DRIVER_OPS) + DRM_FBDEV_TTM_DRIVER_OPS, +#endif +}; + + +/* + * Update the global nv_drm_driver for the intended features. + * + * It defaults to PRIME-only, but is upgraded to atomic modeset if the + * kernel supports atomic modeset and the 'modeset' kernel module + * parameter is true. + */ +void nv_drm_update_drm_driver_features(void) +{ +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + + if (!nv_drm_modeset_module_param) { + return; + } + + nv_drm_driver.driver_features |= DRIVER_MODESET | DRIVER_ATOMIC; + + nv_drm_driver.master_set = nv_drm_master_set; + nv_drm_driver.master_drop = nv_drm_master_drop; + + nv_drm_driver.dumb_create = nv_drm_dumb_create; + nv_drm_driver.dumb_map_offset = nv_drm_dumb_map_offset; +#if defined(NV_DRM_DRIVER_HAS_DUMB_DESTROY) + nv_drm_driver.dumb_destroy = nv_drm_dumb_destroy; +#endif /* NV_DRM_DRIVER_HAS_DUMB_DESTROY */ +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ +} + + + +/* + * Helper function for allocate/register DRM device for given NVIDIA GPU ID. + */ +void nv_drm_register_drm_device(const struct NvKmsKapiGpuInfo *gpu_info) +{ + struct nv_drm_device *nv_dev = NULL; + struct drm_device *dev = NULL; + struct device *device = gpu_info->gpuInfo.os_device_ptr; + bool bus_is_pci; + + DRM_DEBUG( + "Registering device for NVIDIA GPU ID 0x08%x", + gpu_info->gpuInfo.gpu_id); + + /* Allocate NVIDIA-DRM device */ + + nv_dev = nv_drm_calloc(1, sizeof(*nv_dev)); + + if (nv_dev == NULL) { + NV_DRM_LOG_ERR( + "Failed to allocate memory for NVIDIA-DRM device object"); + return; + } + + nv_dev->gpu_info = gpu_info->gpuInfo; + nv_dev->gpu_mig_device = gpu_info->migDevice; + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + mutex_init(&nv_dev->lock); +#endif + + /* Allocate DRM device */ + + dev = drm_dev_alloc(&nv_drm_driver, device); + + if (dev == NULL) { + NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to allocate device"); + goto failed_drm_alloc; + } + + dev->dev_private = nv_dev; + nv_dev->dev = dev; + + bus_is_pci = +#if defined(NV_LINUX) + device->bus == &pci_bus_type; +#elif defined(NV_BSD) + devclass_find("pci"); +#endif + +#if defined(NV_DRM_DEVICE_HAS_PDEV) + if (bus_is_pci) { + dev->pdev = to_pci_dev(device); + } +#endif + + /* Load DRM device before registering it */ + if (nv_drm_dev_load(dev) != 0) { + NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to load device"); + goto failed_drm_load; + } + + /* Register DRM device to DRM sub-system */ + + if (drm_dev_register(dev, 0) != 0) { + NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to register device"); + goto failed_drm_register; + } + +#if defined(NV_DRM_FBDEV_AVAILABLE) + if (nv_drm_fbdev_module_param && + drm_core_check_feature(dev, DRIVER_MODESET)) { + + if (bus_is_pci) { + struct pci_dev *pdev = to_pci_dev(device); + +#if defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT) + +#if defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_HAS_DRIVER_ARG) + drm_aperture_remove_conflicting_pci_framebuffers(pdev, &nv_drm_driver); +#else + drm_aperture_remove_conflicting_pci_framebuffers(pdev, nv_drm_driver.name); +#endif + +#elif defined(NV_APERTURE_REMOVE_CONFLICTING_PCI_DEVICES_PRESENT) + aperture_remove_conflicting_pci_devices(pdev, nv_drm_driver.name); +#endif + nvKms->framebufferConsoleDisabled(nv_dev->pDevice); + } else { + resource_size_t base = (resource_size_t) nv_dev->vtFbBaseAddress; + resource_size_t size = (resource_size_t) nv_dev->vtFbSize; + + if (base > 0 && size > 0) { +#if defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_FRAMEBUFFERS_PRESENT) + +#if defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_FRAMEBUFFERS_HAS_DRIVER_ARG) + drm_aperture_remove_conflicting_framebuffers(base, size, false, &nv_drm_driver); +#elif defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_FRAMEBUFFERS_HAS_NO_PRIMARY_ARG) + drm_aperture_remove_conflicting_framebuffers(base, size, &nv_drm_driver); +#else + drm_aperture_remove_conflicting_framebuffers(base, size, false, nv_drm_driver.name); +#endif + +#elif defined(NV_APERTURE_REMOVE_CONFLICTING_DEVICES_PRESENT) + aperture_remove_conflicting_devices(base, size, nv_drm_driver.name); +#endif + } else { + NV_DRM_DEV_LOG_INFO(nv_dev, "Invalid framebuffer console info"); + } + } + #if defined(NV_DRM_CLIENT_AVAILABLE) + drm_client_setup(dev, NULL); + #elif defined(NV_DRM_FBDEV_TTM_AVAILABLE) + drm_fbdev_ttm_setup(dev, 32); + #elif defined(NV_DRM_FBDEV_GENERIC_AVAILABLE) + drm_fbdev_generic_setup(dev, 32); + #endif + } +#endif /* defined(NV_DRM_FBDEV_AVAILABLE) */ + + /* Add NVIDIA-DRM device into list */ + + nv_dev->next = dev_list; + dev_list = nv_dev; + + return; /* Success */ + +failed_drm_register: + + nv_drm_dev_unload(dev); + +failed_drm_load: + + drm_dev_put(dev); + +failed_drm_alloc: + + nv_drm_free(nv_dev); +} + +/* + * Enumerate NVIDIA GPUs and allocate/register DRM device for each of them. + */ +#if defined(NV_LINUX) +int nv_drm_probe_devices(void) +{ + NvU32 gpu_count; + + nv_drm_update_drm_driver_features(); + + /* Register DRM device for each NVIDIA GPU available via NVKMS. */ + gpu_count = nvKms->enumerateGpus(nv_drm_register_drm_device); + + if (gpu_count == 0) { + NV_DRM_LOG_INFO("No NVIDIA GPUs found"); + } + + return 0; +} +#endif + +/* + * Unregister all NVIDIA DRM devices. + */ +void nv_drm_remove_devices(void) +{ + while (dev_list != NULL) { + struct nv_drm_device *next = dev_list->next; + struct drm_device *dev = dev_list->dev; + + drm_dev_unregister(dev); + nv_drm_dev_unload(dev); + drm_dev_put(dev); + + nv_drm_free(dev_list); + + dev_list = next; + } +} + +/* + * Handle system suspend and resume. + * + * Normally, a DRM driver would use drm_mode_config_helper_suspend() to save the + * current state on suspend and drm_mode_config_helper_resume() to restore it + * after resume. This works for upstream drivers because user-mode tasks are + * frozen before the suspend hook is called. + * + * In the case of nvidia-drm, the suspend hook is also called when 'suspend' is + * written to /proc/driver/nvidia/suspend, before user-mode tasks are frozen. + * However, we don't actually need to save and restore the display state because + * the driver requires a VT switch to an unused VT before suspending and a + * switch back to the application (or fbdev console) on resume. The DRM client + * (or fbdev helper functions) will restore the appropriate mode on resume. + * + */ +void nv_drm_suspend_resume(NvBool suspend) +{ + static DEFINE_MUTEX(nv_drm_suspend_mutex); + static NvU32 nv_drm_suspend_count = 0; + struct nv_drm_device *nv_dev; + + mutex_lock(&nv_drm_suspend_mutex); + + /* + * Count the number of times the driver is asked to suspend. Suspend all DRM + * devices on the first suspend call and resume them on the last resume + * call. This is necessary because the kernel may call nvkms_suspend() + * simultaneously for each GPU, but NVKMS itself also suspends all GPUs on + * the first call. + */ + if (suspend) { + if (nv_drm_suspend_count++ > 0) { + goto done; + } + } else { + BUG_ON(nv_drm_suspend_count == 0); + + if (--nv_drm_suspend_count > 0) { + goto done; + } + } + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + nv_dev = dev_list; + + /* + * NVKMS shuts down all heads on suspend. Update DRM state accordingly. + */ + for (nv_dev = dev_list; nv_dev; nv_dev = nv_dev->next) { + struct drm_device *dev = nv_dev->dev; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + continue; + } + + if (suspend) { + drm_kms_helper_poll_disable(dev); +#if defined(NV_DRM_FBDEV_AVAILABLE) + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); +#endif + drm_mode_config_reset(dev); + } else { +#if defined(NV_DRM_FBDEV_AVAILABLE) + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); +#endif + drm_kms_helper_poll_enable(dev); + } + } +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +done: + mutex_unlock(&nv_drm_suspend_mutex); +} + +#endif /* NV_DRM_AVAILABLE */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-drv.h b/kernel-open/nvidia-drm/nvidia-drm-drv.h new file mode 100644 index 0000000..9cb42fe --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-drv.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_DRV_H__ +#define __NVIDIA_DRM_DRV_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +struct NvKmsKapiGpuInfo; + +int nv_drm_probe_devices(void); + +void nv_drm_remove_devices(void); + +void nv_drm_suspend_resume(NvBool suspend); + +void nv_drm_register_drm_device(const struct NvKmsKapiGpuInfo *); + +void nv_drm_update_drm_driver_features(void); + +#endif /* defined(NV_DRM_AVAILABLE) */ + +#endif /* __NVIDIA_DRM_DRV_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-encoder.c b/kernel-open/nvidia-drm/nvidia-drm-encoder.c new file mode 100644 index 0000000..3657004 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-encoder.c @@ -0,0 +1,337 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-encoder.h" +#include "nvidia-drm-utils.h" +#include "nvidia-drm-connector.h" +#include "nvidia-drm-crtc.h" +#include "nvidia-drm-helper.h" + +#include "nvmisc.h" + +/* + * Commit fcd70cd36b9b ("drm: Split out drm_probe_helper.h") + * moves a number of helper function definitions from + * drm/drm_crtc_helper.h to a new drm_probe_helper.h. + */ +#if defined(NV_DRM_DRM_PROBE_HELPER_H_PRESENT) +#include +#endif +#include + +#include +#include + +static void nv_drm_encoder_destroy(struct drm_encoder *encoder) +{ + struct nv_drm_encoder *nv_encoder = to_nv_encoder(encoder); + + drm_encoder_cleanup(encoder); + + nv_drm_free(nv_encoder); +} + +static const struct drm_encoder_funcs nv_encoder_funcs = { + .destroy = nv_drm_encoder_destroy, +}; + +static bool nv_drm_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void nv_drm_encoder_prepare(struct drm_encoder *encoder) +{ + +} + +static void nv_drm_encoder_commit(struct drm_encoder *encoder) +{ + +} + +static void nv_drm_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + +} + +static const struct drm_encoder_helper_funcs nv_encoder_helper_funcs = { + .mode_fixup = nv_drm_encoder_mode_fixup, + .prepare = nv_drm_encoder_prepare, + .commit = nv_drm_encoder_commit, + .mode_set = nv_drm_encoder_mode_set, +}; + +static uint32_t get_crtc_mask(struct drm_device *dev, uint32_t headMask) +{ + struct drm_crtc *crtc = NULL; + uint32_t crtc_mask = 0x0; + + nv_drm_for_each_crtc(crtc, dev) { + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + + if (headMask & NVBIT(nv_crtc->head)) { + crtc_mask |= drm_crtc_mask(crtc); + } + } + + return crtc_mask; +} + +/* + * Helper function to create new encoder for given NvKmsKapiDisplay + * with given signal format. + */ +static struct drm_encoder* +nv_drm_encoder_new(struct drm_device *dev, + NvKmsKapiDisplay hDisplay, + NvKmsConnectorSignalFormat format, + unsigned int crtc_mask) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + + struct nv_drm_encoder *nv_encoder = NULL; + + int ret = 0; + + /* Allocate an NVIDIA encoder object */ + + nv_encoder = nv_drm_calloc(1, sizeof(*nv_encoder)); + + if (nv_encoder == NULL) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to allocate memory for NVIDIA-DRM encoder object"); + return ERR_PTR(-ENOMEM); + } + + nv_encoder->hDisplay = hDisplay; + + /* Initialize the base encoder object and add it to the drm subsystem */ + + ret = drm_encoder_init(dev, + &nv_encoder->base, &nv_encoder_funcs, + nvkms_connector_signal_to_drm_encoder_signal(format), + NULL); + if (ret != 0) { + nv_drm_free(nv_encoder); + + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to initialize encoder created from NvKmsKapiDisplay 0x%08x", + hDisplay); + return ERR_PTR(ret); + } + + nv_encoder->base.possible_crtcs = crtc_mask; + + drm_encoder_helper_add(&nv_encoder->base, &nv_encoder_helper_funcs); + + return &nv_encoder->base; +} + +/* + * Add encoder for given NvKmsKapiDisplay + */ +struct drm_encoder* +nv_drm_add_encoder(struct drm_device *dev, NvKmsKapiDisplay hDisplay) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + + struct NvKmsKapiStaticDisplayInfo *displayInfo = NULL; + struct NvKmsKapiConnectorInfo *connectorInfo = NULL; + + struct drm_encoder *encoder = NULL; + struct nv_drm_encoder *nv_encoder = NULL; + + struct drm_connector *connector = NULL; + + int ret = 0; + + /* Query NvKmsKapiStaticDisplayInfo and NvKmsKapiConnectorInfo */ + + if ((displayInfo = nv_drm_calloc(1, sizeof(*displayInfo))) == NULL) { + ret = -ENOMEM; + goto done; + } + + if (!nvKms->getStaticDisplayInfo(nv_dev->pDevice, hDisplay, displayInfo)) { + ret = -EINVAL; + goto done; + } + + connectorInfo = nvkms_get_connector_info(nv_dev->pDevice, + displayInfo->connectorHandle); + + if (IS_ERR(connectorInfo)) { + ret = PTR_ERR(connectorInfo); + goto done; + } + + /* Create and add drm encoder */ + + encoder = nv_drm_encoder_new(dev, + displayInfo->handle, + connectorInfo->signalFormat, + get_crtc_mask(dev, displayInfo->headMask)); + + if (IS_ERR(encoder)) { + ret = PTR_ERR(encoder); + goto done; + } + + /* Get connector from respective physical index */ + + connector = + nv_drm_get_connector(dev, + connectorInfo->physicalIndex, + connectorInfo->type, + displayInfo->internal, displayInfo->dpAddress); + + if (IS_ERR(connector)) { + ret = PTR_ERR(connector); + goto failed_connector_encoder_attach; + } + + /* Attach encoder and connector */ + + ret = nv_drm_connector_attach_encoder(connector, encoder); + + if (ret != 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to attach encoder created from NvKmsKapiDisplay 0x%08x " + "to connector", + hDisplay); + goto failed_connector_encoder_attach; + } + + nv_encoder = to_nv_encoder(encoder); + + mutex_lock(&dev->mode_config.mutex); + + nv_encoder->nv_connector = to_nv_connector(connector); + + nv_drm_connector_mark_connection_status_dirty(nv_encoder->nv_connector); + + mutex_unlock(&dev->mode_config.mutex); + + goto done; + +failed_connector_encoder_attach: + + drm_encoder_cleanup(encoder); + + nv_drm_free(encoder); + +done: + + nv_drm_free(displayInfo); + + nv_drm_free(connectorInfo); + + return ret != 0 ? ERR_PTR(ret) : encoder; +} + +static inline struct nv_drm_encoder* +get_nv_encoder_from_nvkms_display(struct drm_device *dev, + NvKmsKapiDisplay hDisplay) +{ + struct drm_encoder *encoder; + + nv_drm_for_each_encoder(encoder, dev) { + struct nv_drm_encoder *nv_encoder = to_nv_encoder(encoder); + + if (nv_encoder->hDisplay == hDisplay) { + return nv_encoder; + } + } + + return NULL; +} + +void nv_drm_handle_display_change(struct nv_drm_device *nv_dev, + NvKmsKapiDisplay hDisplay) +{ + struct drm_device *dev = nv_dev->dev; + struct nv_drm_encoder *nv_encoder = NULL; + + mutex_lock(&dev->mode_config.mutex); + + nv_encoder = get_nv_encoder_from_nvkms_display(dev, hDisplay); + + mutex_unlock(&dev->mode_config.mutex); + + if (nv_encoder == NULL) { + return; + } + + nv_drm_connector_mark_connection_status_dirty(nv_encoder->nv_connector); + + schedule_delayed_work(&nv_dev->hotplug_event_work, 0); +} + +void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev, + NvKmsKapiDisplay hDisplay) +{ + struct drm_device *dev = nv_dev->dev; + + struct drm_encoder *encoder = NULL; + struct nv_drm_encoder *nv_encoder = NULL; + + /* + * Look for an existing encoder with the same hDisplay and + * use it if available. + */ + + nv_encoder = get_nv_encoder_from_nvkms_display(dev, hDisplay); + + if (nv_encoder != NULL) { + NV_DRM_DEV_LOG_INFO( + nv_dev, + "Encoder with NvKmsKapiDisplay 0x%08x already exists.", + hDisplay); + return; + } + + encoder = nv_drm_add_encoder(dev, hDisplay); + + if (IS_ERR(encoder)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to add encoder for NvKmsKapiDisplay 0x%08x", + hDisplay); + return; + } + + schedule_delayed_work(&nv_dev->hotplug_event_work, 0); +} +#endif diff --git a/kernel-open/nvidia-drm/nvidia-drm-encoder.h b/kernel-open/nvidia-drm/nvidia-drm-encoder.h new file mode 100644 index 0000000..c4efe07 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-encoder.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_ENCODER_H__ +#define __NVIDIA_DRM_ENCODER_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-priv.h" + +#include + +#include "nvkms-kapi.h" + +struct nv_drm_encoder { + NvKmsKapiDisplay hDisplay; + + struct nv_drm_connector *nv_connector; + + struct drm_encoder base; +}; + +static inline struct nv_drm_encoder *to_nv_encoder( + struct drm_encoder *encoder) +{ + if (encoder == NULL) { + return NULL; + } + return container_of(encoder, struct nv_drm_encoder, base); +} + +struct drm_encoder* +nv_drm_add_encoder(struct drm_device *dev, NvKmsKapiDisplay hDisplay); + +void nv_drm_handle_display_change(struct nv_drm_device *nv_dev, + NvKmsKapiDisplay hDisplay); + +void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev, + NvKmsKapiDisplay hDisplay); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#endif /* __NVIDIA_DRM_ENCODER_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-fb.c b/kernel-open/nvidia-drm/nvidia-drm-fb.c new file mode 100644 index 0000000..f8558fa --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-fb.c @@ -0,0 +1,309 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-ioctl.h" +#include "nvidia-drm-fb.h" +#include "nvidia-drm-utils.h" +#include "nvidia-drm-gem.h" +#include "nvidia-drm-helper.h" +#include "nvidia-drm-format.h" + +#include + +static void __nv_drm_framebuffer_free(struct nv_drm_framebuffer *nv_fb) +{ + struct drm_framebuffer *fb = &nv_fb->base; + uint32_t i; + + /* Unreference gem object */ + for (i = 0; i < NVKMS_MAX_PLANES_PER_SURFACE; i++) { + struct drm_gem_object *gem = fb->obj[i]; + if (gem != NULL) { + struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem); + nv_drm_gem_object_unreference_unlocked(nv_gem); + } + } + + /* Free framebuffer */ + nv_drm_free(nv_fb); +} + +static void nv_drm_framebuffer_destroy(struct drm_framebuffer *fb) +{ + struct nv_drm_device *nv_dev = to_nv_device(fb->dev); + struct nv_drm_framebuffer *nv_fb = to_nv_framebuffer(fb); + + /* Cleaup core framebuffer object */ + + drm_framebuffer_cleanup(fb); + + /* Free NvKmsKapiSurface associated with this framebuffer object */ + + nvKms->destroySurface(nv_dev->pDevice, nv_fb->pSurface); + + __nv_drm_framebuffer_free(nv_fb); +} + +static int +nv_drm_framebuffer_create_handle(struct drm_framebuffer *fb, + struct drm_file *file, unsigned int *handle) +{ + return nv_drm_gem_handle_create(file, + to_nv_gem_object(fb->obj[0]), + handle); +} + +static struct drm_framebuffer_funcs nv_framebuffer_funcs = { + .destroy = nv_drm_framebuffer_destroy, + .create_handle = nv_drm_framebuffer_create_handle, +}; + +static struct nv_drm_framebuffer *nv_drm_framebuffer_alloc( + struct nv_drm_device *nv_dev, + struct drm_file *file, + const struct drm_mode_fb_cmd2 *cmd) +{ + struct nv_drm_framebuffer *nv_fb; + struct nv_drm_gem_object *nv_gem; + const int num_planes = nv_drm_format_num_planes(cmd->pixel_format); + uint32_t i; + + /* Allocate memory for the framebuffer object */ + nv_fb = nv_drm_calloc(1, sizeof(*nv_fb)); + + if (nv_fb == NULL) { + NV_DRM_DEV_DEBUG_DRIVER( + nv_dev, + "Failed to allocate memory for framebuffer object"); + return ERR_PTR(-ENOMEM); + } + + if (num_planes > NVKMS_MAX_PLANES_PER_SURFACE) { + NV_DRM_DEV_DEBUG_DRIVER(nv_dev, "Unsupported number of planes"); + goto failed; + } + + for (i = 0; i < num_planes; i++) { + nv_gem = nv_drm_gem_object_lookup(file, cmd->handles[i]); + + if (nv_gem == NULL) { + NV_DRM_DEV_DEBUG_DRIVER( + nv_dev, + "Failed to find gem object of type nvkms memory"); + goto failed; + } + + nv_fb->base.obj[i] = &nv_gem->base; + } + + return nv_fb; + +failed: + + __nv_drm_framebuffer_free(nv_fb); + + return ERR_PTR(-ENOENT); +} + +static int nv_drm_framebuffer_init(struct drm_device *dev, + struct nv_drm_framebuffer *nv_fb, + enum NvKmsSurfaceMemoryFormat format, + bool have_modifier, + uint64_t modifier) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct NvKmsKapiCreateSurfaceParams params = { }; + struct nv_drm_gem_object *nv_gem; + struct drm_framebuffer *fb = &nv_fb->base; + uint32_t i; + int ret; + + /* Initialize the base framebuffer object and add it to drm subsystem */ + + ret = drm_framebuffer_init(dev, fb, &nv_framebuffer_funcs); + if (ret != 0) { + NV_DRM_DEV_DEBUG_DRIVER( + nv_dev, + "Failed to initialize framebuffer object"); + return ret; + } + + for (i = 0; i < NVKMS_MAX_PLANES_PER_SURFACE; i++) { + struct drm_gem_object *gem = fb->obj[i]; + if (gem != NULL) { + nv_gem = to_nv_gem_object(gem); + + params.planes[i].memory = nv_gem->pMemory; + params.planes[i].offset = fb->offsets[i]; + params.planes[i].pitch = fb->pitches[i]; + } + } + params.height = fb->height; + params.width = fb->width; + params.format = format; + + if (have_modifier) { + params.explicit_layout = true; + params.layout = (modifier & 0x10) ? + NvKmsSurfaceMemoryLayoutBlockLinear : + NvKmsSurfaceMemoryLayoutPitch; + + // See definition of DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D, we are testing + // 'c', the lossless compression field of the modifier + if (params.layout == NvKmsSurfaceMemoryLayoutBlockLinear && + (modifier >> 23) & 0x7) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Cannot create FB from compressible surface allocation"); + goto fail; + } + + params.log2GobsPerBlockY = modifier & 0xf; + } else { + params.explicit_layout = false; + } + + /* + * XXX work around an invalid pitch assumption in DRM. + * + * The smallest pitch the display hardware allows is 256. + * + * If a DRM client allocates a 32x32 cursor surface through + * DRM_IOCTL_MODE_CREATE_DUMB, we'll correctly round the pitch to 256: + * + * pitch = round(32width * 4Bpp, 256) = 256 + * + * and then allocate an 8k surface: + * + * size = pitch * 32height = 8196 + * + * and report the rounded pitch and size back to the client through the + * struct drm_mode_create_dumb ioctl params. + * + * But when the DRM client passes that buffer object handle to + * DRM_IOCTL_MODE_CURSOR, the client has no way to specify the pitch. This + * path in drm: + * + * DRM_IOCTL_MODE_CURSOR + * drm_mode_cursor_ioctl() + * drm_mode_cursor_common() + * drm_mode_cursor_universal() + * + * will implicitly create a framebuffer from the buffer object, and compute + * the pitch as width x 32 (without aligning to our minimum pitch). + * + * Intercept this case and force the pitch back to 256. + */ + if ((params.width == 32) && + (params.height == 32) && + (params.planes[0].pitch == 128)) { + params.planes[0].pitch = 256; + } + + /* Create NvKmsKapiSurface */ + + nv_fb->pSurface = nvKms->createSurface(nv_dev->pDevice, ¶ms); + if (nv_fb->pSurface == NULL) { + NV_DRM_DEV_DEBUG_DRIVER(nv_dev, "Failed to create NvKmsKapiSurface"); + goto fail; + } + + return 0; + +fail: + drm_framebuffer_cleanup(fb); + return -EINVAL; +} + +struct drm_framebuffer *nv_drm_framebuffer_create( + struct drm_device *dev, + struct drm_file *file, + const struct drm_mode_fb_cmd2 *cmd) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_framebuffer *nv_fb; + uint64_t modifier = 0; + int ret; + enum NvKmsSurfaceMemoryFormat format; + int i; + bool have_modifier = false; + + /* Check whether NvKms supports the given pixel format */ + if (!nv_drm_format_to_nvkms_format(cmd->pixel_format, &format)) { + NV_DRM_DEV_DEBUG_DRIVER( + nv_dev, + "Unsupported drm pixel format 0x%08x", cmd->pixel_format); + return ERR_PTR(-EINVAL); + } + + if (cmd->flags & DRM_MODE_FB_MODIFIERS) { + have_modifier = true; + modifier = cmd->modifier[0]; + + for (i = 0; nv_dev->modifiers[i] != DRM_FORMAT_MOD_INVALID; i++) { + if (nv_dev->modifiers[i] == modifier) { + break; + } + } + + if (nv_dev->modifiers[i] == DRM_FORMAT_MOD_INVALID) { + NV_DRM_DEV_DEBUG_DRIVER( + nv_dev, + "Invalid format modifier for framebuffer object: 0x%016" NvU64_fmtx, + modifier); + return ERR_PTR(-EINVAL); + } + } + + nv_fb = nv_drm_framebuffer_alloc(nv_dev, file, cmd); + if (IS_ERR(nv_fb)) { + return (struct drm_framebuffer *)nv_fb; + } + + /* Fill out framebuffer metadata from the userspace fb creation request */ + + drm_helper_mode_fill_fb_struct( + dev, + &nv_fb->base, + cmd); + + /* + * Finish up FB initialization by creating the backing NVKMS surface and + * publishing the DRM fb + */ + + ret = nv_drm_framebuffer_init(dev, nv_fb, format, have_modifier, modifier); + + if (ret != 0) { + __nv_drm_framebuffer_free(nv_fb); + return ERR_PTR(ret); + } + + return &nv_fb->base; +} + +#endif diff --git a/kernel-open/nvidia-drm/nvidia-drm-fb.h b/kernel-open/nvidia-drm/nvidia-drm-fb.h new file mode 100644 index 0000000..3a0e5e5 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-fb.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_FB_H__ +#define __NVIDIA_DRM_FB_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#include + +#include "nvkms-kapi.h" + +struct nv_drm_framebuffer { + struct NvKmsKapiSurface *pSurface; + + struct drm_framebuffer base; +}; + +static inline struct nv_drm_framebuffer *to_nv_framebuffer( + struct drm_framebuffer *fb) +{ + if (fb == NULL) { + return NULL; + } + return container_of(fb, struct nv_drm_framebuffer, base); +} + +struct drm_framebuffer *nv_drm_framebuffer_create( + struct drm_device *dev, + struct drm_file *file, + const struct drm_mode_fb_cmd2 *cmd); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#endif /* __NVIDIA_DRM_FB_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-fence.c b/kernel-open/nvidia-drm/nvidia-drm-fence.c new file mode 100644 index 0000000..ef885bd --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-fence.c @@ -0,0 +1,1829 @@ +/* + * Copyright (c) 2016-2025, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-ioctl.h" +#include "nvidia-drm-gem.h" +#include "nvidia-drm-fence.h" +#include "nvidia-dma-resv-helper.h" + +#include + +#define NV_DRM_SEMAPHORE_SURFACE_FENCE_MAX_TIMEOUT_MS 5000 + +struct nv_drm_fence_context; + +struct nv_drm_fence_context_ops { + void (*destroy)(struct nv_drm_fence_context *nv_fence_context); +}; + +struct nv_drm_fence_context { + struct nv_drm_gem_object base; + + const struct nv_drm_fence_context_ops *ops; + + struct nv_drm_device *nv_dev; + uint64_t context; + + NvU64 fenceSemIndex; /* Index into semaphore surface */ +}; + +struct nv_drm_prime_fence_context { + struct nv_drm_fence_context base; + + /* Mapped semaphore surface */ + struct NvKmsKapiMemory *pSemSurface; + NvU32 *pLinearAddress; + + /* Protects nv_drm_fence_context::{pending, last_seqno} */ + spinlock_t lock; + + /* + * Software signaling structures. __nv_drm_prime_fence_context_new() + * allocates channel event and __nv_drm_prime_fence_context_destroy() frees + * it. There are no simultaneous read/write access to 'cb', therefore it + * does not require spin-lock protection. + */ + struct NvKmsKapiChannelEvent *cb; + + /* List of pending fences which are not yet signaled */ + struct list_head pending; + + unsigned last_seqno; +}; + +struct nv_drm_prime_fence { + struct list_head list_entry; + struct dma_fence base; + spinlock_t lock; +}; + +static inline +struct nv_drm_prime_fence *to_nv_drm_prime_fence(struct dma_fence *fence) +{ + return container_of(fence, struct nv_drm_prime_fence, base); +} + +static const char* +nv_drm_gem_fence_op_get_driver_name(struct dma_fence *fence) +{ + return "NVIDIA"; +} + +static const char* +nv_drm_gem_prime_fence_op_get_timeline_name(struct dma_fence *fence) +{ + return "nvidia.prime"; +} + +static bool nv_drm_gem_prime_fence_op_enable_signaling(struct dma_fence *fence) +{ + // DO NOTHING + return true; +} + +static void nv_drm_gem_prime_fence_op_release(struct dma_fence *fence) +{ + struct nv_drm_prime_fence *nv_fence = to_nv_drm_prime_fence(fence); + nv_drm_free(nv_fence); +} + +static signed long +nv_drm_gem_prime_fence_op_wait(struct dma_fence *fence, + bool intr, signed long timeout) +{ + /* + * If the waiter requests to wait with no timeout, force a timeout to ensure + * that it won't get stuck forever in the kernel if something were to go + * wrong with signaling, such as a malicious userspace not releasing the + * semaphore. + * + * 96 ms (roughly 6 frames @ 60 Hz) is arbitrarily chosen to be long enough + * that it should never get hit during normal operation, but not so long + * that the system becomes unresponsive. + */ + return dma_fence_default_wait(fence, intr, + (timeout == MAX_SCHEDULE_TIMEOUT) ? + msecs_to_jiffies(96) : timeout); +} + +static const struct dma_fence_ops nv_drm_gem_prime_fence_ops = { + .get_driver_name = nv_drm_gem_fence_op_get_driver_name, + .get_timeline_name = nv_drm_gem_prime_fence_op_get_timeline_name, + .enable_signaling = nv_drm_gem_prime_fence_op_enable_signaling, + .release = nv_drm_gem_prime_fence_op_release, + .wait = nv_drm_gem_prime_fence_op_wait, +}; + +static inline void +__nv_drm_prime_fence_signal(struct nv_drm_prime_fence *nv_fence) +{ + list_del(&nv_fence->list_entry); + dma_fence_signal(&nv_fence->base); + dma_fence_put(&nv_fence->base); +} + +static void nv_drm_gem_prime_force_fence_signal( + struct nv_drm_prime_fence_context *nv_fence_context) +{ + WARN_ON(!spin_is_locked(&nv_fence_context->lock)); + + while (!list_empty(&nv_fence_context->pending)) { + struct nv_drm_prime_fence *nv_fence = list_first_entry( + &nv_fence_context->pending, + typeof(*nv_fence), + list_entry); + + __nv_drm_prime_fence_signal(nv_fence); + } +} + +static void nv_drm_gem_prime_fence_event +( + void *dataPtr, + NvU32 dataU32 +) +{ + struct nv_drm_prime_fence_context *nv_fence_context = dataPtr; + + spin_lock(&nv_fence_context->lock); + + while (!list_empty(&nv_fence_context->pending)) { + struct nv_drm_prime_fence *nv_fence = list_first_entry( + &nv_fence_context->pending, + typeof(*nv_fence), + list_entry); + + /* Index into surface with 16 byte stride */ + unsigned int seqno = *((nv_fence_context->pLinearAddress) + + (nv_fence_context->base.fenceSemIndex * 4)); + + if (nv_fence->base.seqno > seqno) { + /* + * Fences in list are placed in increasing order of sequence + * number, breaks a loop once found first fence not + * ready to signal. + */ + break; + } + + __nv_drm_prime_fence_signal(nv_fence); + } + + spin_unlock(&nv_fence_context->lock); +} + +static inline struct nv_drm_prime_fence_context* +to_nv_prime_fence_context(struct nv_drm_fence_context *nv_fence_context) { + return container_of(nv_fence_context, struct nv_drm_prime_fence_context, base); +} + +static void __nv_drm_prime_fence_context_destroy( + struct nv_drm_fence_context *nv_fence_context) +{ + struct nv_drm_device *nv_dev = nv_fence_context->nv_dev; + struct nv_drm_prime_fence_context *nv_prime_fence_context = + to_nv_prime_fence_context(nv_fence_context); + + /* + * Free channel event before destroying the fence context, otherwise event + * callback continue to get called. + */ + nvKms->freeChannelEvent(nv_dev->pDevice, nv_prime_fence_context->cb); + + /* Force signal all pending fences and empty pending list */ + spin_lock(&nv_prime_fence_context->lock); + + nv_drm_gem_prime_force_fence_signal(nv_prime_fence_context); + + spin_unlock(&nv_prime_fence_context->lock); + + /* Free nvkms resources */ + + nvKms->unmapMemory(nv_dev->pDevice, + nv_prime_fence_context->pSemSurface, + NVKMS_KAPI_MAPPING_TYPE_KERNEL, + (void *) nv_prime_fence_context->pLinearAddress); + + nvKms->freeMemory(nv_dev->pDevice, nv_prime_fence_context->pSemSurface); + + nv_drm_free(nv_fence_context); +} + +static struct nv_drm_fence_context_ops nv_drm_prime_fence_context_ops = { + .destroy = __nv_drm_prime_fence_context_destroy, +}; + +static inline struct nv_drm_prime_fence_context * +__nv_drm_prime_fence_context_new( + struct nv_drm_device *nv_dev, + struct drm_nvidia_prime_fence_context_create_params *p) +{ + struct nv_drm_prime_fence_context *nv_prime_fence_context; + struct NvKmsKapiMemory *pSemSurface; + NvU32 *pLinearAddress; + + /* Allocate backup nvkms resources */ + + pSemSurface = nvKms->importMemory(nv_dev->pDevice, + p->size, + p->import_mem_nvkms_params_ptr, + p->import_mem_nvkms_params_size); + if (!pSemSurface) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to import fence semaphore surface"); + + goto failed; + } + + if (!nvKms->mapMemory(nv_dev->pDevice, + pSemSurface, + NVKMS_KAPI_MAPPING_TYPE_KERNEL, + (void **) &pLinearAddress)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to map fence semaphore surface"); + + goto failed_to_map_memory; + } + + /* + * Allocate a fence context object, initialize it and allocate channel + * event for it. + */ + + if ((nv_prime_fence_context = nv_drm_calloc( + 1, + sizeof(*nv_prime_fence_context))) == NULL) { + goto failed_alloc_fence_context; + } + + /* + * dma_fence_context_alloc() cannot fail, so we do not need + * to check a return value. + */ + + nv_prime_fence_context->base.ops = &nv_drm_prime_fence_context_ops; + nv_prime_fence_context->base.nv_dev = nv_dev; + nv_prime_fence_context->base.context = dma_fence_context_alloc(1); + nv_prime_fence_context->base.fenceSemIndex = p->index; + nv_prime_fence_context->pSemSurface = pSemSurface; + nv_prime_fence_context->pLinearAddress = pLinearAddress; + + INIT_LIST_HEAD(&nv_prime_fence_context->pending); + + spin_lock_init(&nv_prime_fence_context->lock); + + /* + * Except 'cb', the fence context should be completely initialized + * before channel event allocation because the fence context may start + * receiving events immediately after allocation. + * + * There are no simultaneous read/write access to 'cb', therefore it does + * not require spin-lock protection. + */ + nv_prime_fence_context->cb = + nvKms->allocateChannelEvent(nv_dev->pDevice, + nv_drm_gem_prime_fence_event, + nv_prime_fence_context, + p->event_nvkms_params_ptr, + p->event_nvkms_params_size); + if (!nv_prime_fence_context->cb) { + NV_DRM_DEV_LOG_ERR(nv_dev, + "Failed to allocate fence signaling event"); + goto failed_to_allocate_channel_event; + } + + return nv_prime_fence_context; + +failed_to_allocate_channel_event: + nv_drm_free(nv_prime_fence_context); + +failed_alloc_fence_context: + + nvKms->unmapMemory(nv_dev->pDevice, + pSemSurface, + NVKMS_KAPI_MAPPING_TYPE_KERNEL, + (void *) pLinearAddress); + +failed_to_map_memory: + nvKms->freeMemory(nv_dev->pDevice, pSemSurface); + +failed: + return NULL; +} + +static struct dma_fence *__nv_drm_prime_fence_context_create_fence( + struct nv_drm_prime_fence_context *nv_prime_fence_context, + unsigned int seqno) +{ + struct nv_drm_prime_fence *nv_fence; + int ret = 0; + + if ((nv_fence = nv_drm_calloc(1, sizeof(*nv_fence))) == NULL) { + ret = -ENOMEM; + goto out; + } + + spin_lock(&nv_prime_fence_context->lock); + + /* + * If seqno wrapped, force signal fences to make sure none of them + * get stuck. + */ + if (seqno < nv_prime_fence_context->last_seqno) { + nv_drm_gem_prime_force_fence_signal(nv_prime_fence_context); + } + + INIT_LIST_HEAD(&nv_fence->list_entry); + + spin_lock_init(&nv_fence->lock); + + dma_fence_init(&nv_fence->base, &nv_drm_gem_prime_fence_ops, + &nv_fence->lock, nv_prime_fence_context->base.context, + seqno); + + /* The context maintains a reference to any pending fences. */ + dma_fence_get(&nv_fence->base); + + list_add_tail(&nv_fence->list_entry, &nv_prime_fence_context->pending); + + nv_prime_fence_context->last_seqno = seqno; + + spin_unlock(&nv_prime_fence_context->lock); + +out: + return ret != 0 ? ERR_PTR(ret) : &nv_fence->base; +} + +int nv_drm_fence_supported_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + return nv_dev->pDevice ? 0 : -EINVAL; +} + +static inline struct nv_drm_fence_context *to_nv_fence_context( + struct nv_drm_gem_object *nv_gem) +{ + if (nv_gem != NULL) { + return container_of(nv_gem, struct nv_drm_fence_context, base); + } + + return NULL; +} + +/* + * Tear down of the 'struct nv_drm_fence_context' object is not expected + * to be happen from any worker thread, if that happen it causes dead-lock + * because tear down sequence calls to flush all existing + * worker thread. + */ +static void +__nv_drm_fence_context_gem_free(struct nv_drm_gem_object *nv_gem) +{ + struct nv_drm_fence_context *nv_fence_context = to_nv_fence_context(nv_gem); + + nv_fence_context->ops->destroy(nv_fence_context); +} + +const struct nv_drm_gem_object_funcs nv_fence_context_gem_ops = { + .free = __nv_drm_fence_context_gem_free, +}; + +static inline +struct nv_drm_fence_context * +__nv_drm_fence_context_lookup( + struct drm_file *filp, + u32 handle) +{ + struct nv_drm_gem_object *nv_gem = + nv_drm_gem_object_lookup(filp, handle); + + if (nv_gem != NULL && nv_gem->ops != &nv_fence_context_gem_ops) { + nv_drm_gem_object_unreference_unlocked(nv_gem); + return NULL; + } + + return to_nv_fence_context(nv_gem); +} + +static int +__nv_drm_fence_context_gem_init(struct drm_device *dev, + struct nv_drm_fence_context *nv_fence_context, + u32 *handle, + struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + + nv_drm_gem_object_init(nv_dev, + &nv_fence_context->base, + &nv_fence_context_gem_ops, + 0 /* size */, + NULL /* pMemory */); + + return nv_drm_gem_handle_create_drop_reference(filep, + &nv_fence_context->base, + handle); +} + +int nv_drm_prime_fence_context_create_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_prime_fence_context_create_params *p = data; + struct nv_drm_prime_fence_context *nv_prime_fence_context; + int err; + + if (nv_dev->pDevice == NULL) { + return -EOPNOTSUPP; + } + + nv_prime_fence_context = __nv_drm_prime_fence_context_new(nv_dev, p); + + if (!nv_prime_fence_context) { + goto done; + } + + err = __nv_drm_fence_context_gem_init(dev, + &nv_prime_fence_context->base, + &p->handle, + filep); + if (err) { + __nv_drm_prime_fence_context_destroy(&nv_prime_fence_context->base); + } + + return err; + +done: + return -ENOMEM; +} + +static int __nv_drm_gem_attach_fence(struct nv_drm_gem_object *nv_gem, + struct dma_fence *fence, + bool shared) +{ + nv_dma_resv_t *resv = nv_drm_gem_res_obj(nv_gem); + int ret; + + nv_dma_resv_lock(resv, NULL); + + ret = nv_dma_resv_reserve_fences(resv, 1, shared); + if (ret == 0) { + if (shared) { + nv_dma_resv_add_shared_fence(resv, fence); + } else { + nv_dma_resv_add_excl_fence(resv, fence); + } + } else { + NV_DRM_LOG_ERR("Failed to reserve fence. Error code: %d", ret); + } + + nv_dma_resv_unlock(resv); + + return ret; +} + +int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + int ret = -EINVAL; + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_gem_prime_fence_attach_params *p = data; + + struct nv_drm_gem_object *nv_gem; + struct nv_drm_fence_context *nv_fence_context; + struct dma_fence *fence; + + if (nv_dev->pDevice == NULL) { + ret = -EOPNOTSUPP; + goto done; + } + + if (p->__pad != 0) { + NV_DRM_DEV_LOG_ERR(nv_dev, "Padding fields must be zeroed"); + goto done; + } + + nv_gem = nv_drm_gem_object_lookup(filep, p->handle); + + if (!nv_gem) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lookup gem object for fence attach: 0x%08x", + p->handle); + + goto done; + } + + if((nv_fence_context = __nv_drm_fence_context_lookup( + filep, + p->fence_context_handle)) == NULL) { + + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lookup gem object for fence context: 0x%08x", + p->fence_context_handle); + + goto fence_context_lookup_failed; + } + + if (nv_fence_context->ops != + &nv_drm_prime_fence_context_ops) { + + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Wrong fence context type: 0x%08x", + p->fence_context_handle); + + goto fence_context_create_fence_failed; + } + + fence = __nv_drm_prime_fence_context_create_fence( + to_nv_prime_fence_context(nv_fence_context), + p->sem_thresh); + + if (IS_ERR(fence)) { + ret = PTR_ERR(fence); + + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to allocate fence: 0x%08x", p->handle); + + goto fence_context_create_fence_failed; + } + + ret = __nv_drm_gem_attach_fence(nv_gem, fence, true /* exclusive */); + + dma_fence_put(fence); + +fence_context_create_fence_failed: + nv_drm_gem_object_unreference_unlocked(&nv_fence_context->base); + +fence_context_lookup_failed: + nv_drm_gem_object_unreference_unlocked(nv_gem); + +done: + return ret; +} + +struct nv_drm_semsurf_fence { + struct dma_fence base; + spinlock_t lock; + + /* + * When unsignaled, node in the associated fence context's pending fence + * list. The list holds a reference to the fence + */ + struct list_head pending_node; + +#if !defined(NV_DMA_FENCE_OPS_HAS_USE_64BIT_SEQNO) + /* 64-bit version of base.seqno on kernels with 32-bit fence seqno */ + NvU64 wait_value; +#endif + + /* + * Raw absolute kernel time (time domain and scale are treated as opaque) + * when this fence times out. + */ + unsigned long timeout; +}; + +struct nv_drm_semsurf_fence_callback { + struct nv_drm_semsurf_fence_ctx *ctx; + nv_drm_work work; + NvU64 wait_value; +}; + +struct nv_drm_sync_fd_wait_data { + struct dma_fence_cb dma_fence_cb; + struct nv_drm_semsurf_fence_ctx *ctx; + nv_drm_work work; /* Deferred second half of fence wait callback */ + + /* Could use a lockless list data structure here instead */ + struct list_head pending_node; + + NvU64 pre_wait_value; + NvU64 post_wait_value; +}; + +struct nv_drm_semsurf_fence_ctx { + struct nv_drm_fence_context base; + + /* The NVKMS KAPI reference to the context's semaphore surface */ + struct NvKmsKapiSemaphoreSurface *pSemSurface; + + /* CPU mapping of the semaphore slot values */ + union { + volatile void *pVoid; + volatile NvU32 *p32; + volatile NvU64 *p64; + } pSemMapping; + volatile NvU64 *pMaxSubmittedMapping; + + /* work thread for fence timeouts and waits */ + nv_drm_workthread worker; + + /* Timeout timer and associated workthread work */ + nv_drm_timer timer; + nv_drm_work timeout_work; + + /* Protects access to everything below */ + spinlock_t lock; + + /* List of pending fences which are not yet signaled */ + struct list_head pending_fences; + + /* List of pending fence wait operations */ + struct list_head pending_waits; + + /* + * Tracking data for the single in-flight callback associated with this + * context. Either both pointers will be valid, or both will be NULL. + * + * Note it is not safe to dereference these values outside of the context + * lock unless it is certain the associated callback is not yet active, + * or has been canceled. Their memory is owned by the callback itself as + * soon as it is registered. Subtly, this means these variables can not + * be used as output parameters to the function that registers the callback. + */ + struct { + struct nv_drm_semsurf_fence_callback *local; + struct NvKmsKapiSemaphoreSurfaceCallback *nvKms; + } callback; + + /* + * Wait value associated with either the above or a being-registered + * callback. May differ from callback->local->wait_value if it is the + * latter. Zero if no callback is currently needed. + */ + NvU64 current_wait_value; +}; + +static inline struct nv_drm_semsurf_fence_ctx* +to_semsurf_fence_ctx( + struct nv_drm_fence_context *nv_fence_context +) +{ + return container_of(nv_fence_context, + struct nv_drm_semsurf_fence_ctx, + base); +} + +static inline NvU64 +__nv_drm_get_semsurf_fence_seqno(const struct nv_drm_semsurf_fence *nv_fence) +{ +#if defined(NV_DMA_FENCE_OPS_HAS_USE_64BIT_SEQNO) + return nv_fence->base.seqno; +#else + return nv_fence->wait_value; +#endif +} + +#ifndef READ_ONCE +#define READ_ONCE(x) ACCESS_ONCE(x) +#endif + +static inline NvU64 +__nv_drm_get_semsurf_ctx_seqno(struct nv_drm_semsurf_fence_ctx *ctx) +{ + NvU64 semVal; + + if (ctx->pMaxSubmittedMapping) { + /* 32-bit GPU semaphores */ + NvU64 maxSubmitted = READ_ONCE(*ctx->pMaxSubmittedMapping); + + /* + * Must happen after the max submitted read! See + * NvTimeSemFermiGetPayload() for full details. + */ + semVal = READ_ONCE(*ctx->pSemMapping.p32); + + if ((maxSubmitted & 0xFFFFFFFFull) < semVal) { + maxSubmitted -= 0x100000000ull; + } + + semVal |= (maxSubmitted & 0xffffffff00000000ull); + } else { + /* 64-bit GPU semaphores */ + semVal = READ_ONCE(*ctx->pSemMapping.p64); + } + + return semVal; +} + +static void +__nv_drm_semsurf_force_complete_pending(struct nv_drm_semsurf_fence_ctx *ctx) +{ + unsigned long flags; + + /* + * No locks are needed for the pending_fences list. This code runs after all + * other possible references to the fence context have been removed. The + * fences have their own individual locks to protect themselves. + */ + while (!list_empty(&ctx->pending_fences)) { + struct nv_drm_semsurf_fence *nv_fence = list_first_entry( + &ctx->pending_fences, + typeof(*nv_fence), + pending_node); + struct dma_fence *fence = &nv_fence->base; + + list_del(&nv_fence->pending_node); + + dma_fence_set_error(fence, -ETIMEDOUT); + dma_fence_signal(fence); + + /* Remove the pending list's reference */ + dma_fence_put(fence); + } + + /* + * The pending waits are also referenced by the fences they are waiting on, + * but those fences are guaranteed to complete in finite time. Just keep the + * the context alive until they do so. + */ + spin_lock_irqsave(&ctx->lock, flags); + while (!list_empty(&ctx->pending_waits)) { + spin_unlock_irqrestore(&ctx->lock, flags); + nv_drm_yield(); + spin_lock_irqsave(&ctx->lock, flags); + } + spin_unlock_irqrestore(&ctx->lock, flags); +} + +/* Forward declaration */ +static void +__nv_drm_semsurf_ctx_reg_callbacks(struct nv_drm_semsurf_fence_ctx *ctx); + +static void +__nv_drm_semsurf_ctx_fence_callback_work(void *data) +{ + struct nv_drm_semsurf_fence_callback *callback = data; + + __nv_drm_semsurf_ctx_reg_callbacks(callback->ctx); + + nv_drm_free(callback); +} + +static struct nv_drm_semsurf_fence_callback* +__nv_drm_semsurf_new_callback(struct nv_drm_semsurf_fence_ctx *ctx) +{ + struct nv_drm_semsurf_fence_callback *newCallback = + nv_drm_calloc(1, sizeof(*newCallback)); + + if (!newCallback) { + return NULL; + } + + newCallback->ctx = ctx; + nv_drm_workthread_work_init(&newCallback->work, + __nv_drm_semsurf_ctx_fence_callback_work, + newCallback); + + return newCallback; +} + +static void +__nv_drm_semsurf_ctx_process_completed(struct nv_drm_semsurf_fence_ctx *ctx, + NvU64 *newWaitValueOut, + unsigned long *newTimeoutOut) +{ + struct list_head finished; + struct list_head timed_out; + struct nv_drm_semsurf_fence *nv_fence; + struct dma_fence *fence; + NvU64 currentSeqno = __nv_drm_get_semsurf_ctx_seqno(ctx); + NvU64 fenceSeqno = 0; + unsigned long flags; + unsigned long fenceTimeout = 0; + unsigned long now = nv_drm_timer_now(); + + INIT_LIST_HEAD(&finished); + INIT_LIST_HEAD(&timed_out); + + spin_lock_irqsave(&ctx->lock, flags); + + while (!list_empty(&ctx->pending_fences)) { + nv_fence = list_first_entry(&ctx->pending_fences, + typeof(*nv_fence), + pending_node); + + fenceSeqno = __nv_drm_get_semsurf_fence_seqno(nv_fence); + fenceTimeout = nv_fence->timeout; + + if (fenceSeqno <= currentSeqno) { + list_move_tail(&nv_fence->pending_node, &finished); + } else if (fenceTimeout <= now) { + list_move_tail(&nv_fence->pending_node, &timed_out); + } else { + break; + } + } + + /* + * If the caller passes non-NULL newWaitValueOut and newTimeoutOut + * parameters, it establishes a contract. If the returned values are + * non-zero, the caller must attempt to register a callback associated with + * the new wait value and reset the context's timer to the specified + * timeout. + */ + if (newWaitValueOut && newTimeoutOut) { + if (list_empty(&ctx->pending_fences)) { + /* No pending fences, so no waiter is needed. */ + ctx->current_wait_value = fenceSeqno = 0; + fenceTimeout = 0; + } else if (fenceSeqno == ctx->current_wait_value) { + /* + * The context already has a waiter registered, or in the process of + * being registered, for this fence. Indicate to the caller no new + * waiter registration is needed, and leave the ctx state alone. + */ + fenceSeqno = 0; + fenceTimeout = 0; + } else { + /* A new waiter must be registered. Prep the context */ + ctx->current_wait_value = fenceSeqno; + } + + *newWaitValueOut = fenceSeqno; + *newTimeoutOut = fenceTimeout; + } + + spin_unlock_irqrestore(&ctx->lock, flags); + + while (!list_empty(&finished)) { + nv_fence = list_first_entry(&finished, typeof(*nv_fence), pending_node); + list_del_init(&nv_fence->pending_node); + fence = &nv_fence->base; + dma_fence_signal(fence); + dma_fence_put(fence); /* Drops the pending list's reference */ + } + + while (!list_empty(&timed_out)) { + nv_fence = list_first_entry(&timed_out, typeof(*nv_fence), + pending_node); + list_del_init(&nv_fence->pending_node); + fence = &nv_fence->base; + dma_fence_set_error(fence, -ETIMEDOUT); + dma_fence_signal(fence); + dma_fence_put(fence); /* Drops the pending list's reference */ + } +} + +static void +__nv_drm_semsurf_ctx_callback(void *data) +{ + struct nv_drm_semsurf_fence_callback *callback = data; + struct nv_drm_semsurf_fence_ctx *ctx = callback->ctx; + unsigned long flags; + + spin_lock_irqsave(&ctx->lock, flags); + /* If this was the context's currently registered callback, clear it. */ + if (ctx->callback.local == callback) { + ctx->callback.local = NULL; + ctx->callback.nvKms = NULL; + } + /* If storing of this callback may have been pending, prevent it. */ + if (ctx->current_wait_value == callback->wait_value) { + ctx->current_wait_value = 0; + } + spin_unlock_irqrestore(&ctx->lock, flags); + + /* + * This is redundant with the __nv_drm_semsurf_ctx_reg_callbacks() call from + * __nv_drm_semsurf_ctx_fence_callback_work(), which will be called by the + * work enqueued below, but calling it here as well allows unblocking + * waiters with less latency. + */ + __nv_drm_semsurf_ctx_process_completed(ctx, NULL, NULL); + + if (!nv_drm_workthread_add_work(&ctx->worker, &callback->work)) { + /* + * The context is shutting down. It will force-signal all fences when + * doing so, so there's no need for any more callback handling. + */ + nv_drm_free(callback); + } +} + +/* + * Take spin lock, attempt to stash newNvKmsCallback/newCallback in ctx. + * If current_wait_value in fence context != new_wait_value, we raced with + * someone registering a newer waiter. Release spin lock, and unregister our + * waiter. It isn't needed anymore. + */ +static bool +__nv_drm_semsurf_ctx_store_callback( + struct nv_drm_semsurf_fence_ctx *ctx, + NvU64 new_wait_value, + struct NvKmsKapiSemaphoreSurfaceCallback *newNvKmsCallback, + struct nv_drm_semsurf_fence_callback *newCallback) +{ + struct nv_drm_device *nv_dev = ctx->base.nv_dev; + struct NvKmsKapiSemaphoreSurfaceCallback *oldNvKmsCallback; + struct nv_drm_semsurf_fence_callback *oldCallback = NULL; + NvU64 oldWaitValue; + unsigned long flags; + bool installed = false; + + spin_lock_irqsave(&ctx->lock, flags); + if (ctx->current_wait_value == new_wait_value) { + oldCallback = ctx->callback.local; + oldNvKmsCallback = ctx->callback.nvKms; + oldWaitValue = oldCallback ? oldCallback->wait_value : 0; + ctx->callback.local = newCallback; + ctx->callback.nvKms = newNvKmsCallback; + installed = true; + } + spin_unlock_irqrestore(&ctx->lock, flags); + + if (oldCallback) { + if (nvKms->unregisterSemaphoreSurfaceCallback(nv_dev->pDevice, + ctx->pSemSurface, + ctx->base.fenceSemIndex, + oldWaitValue, + oldNvKmsCallback)) { + /* + * The old callback was successfully canceled, and its NVKMS and RM + * resources have been freed. Free its local tracking data. + */ + nv_drm_free(oldCallback); + } else { + /* + * The new callback is already running. It will do no harm, and free + * itself. + */ + } + } + + return installed; +} + +/* + * Processes completed fences and registers an RM callback and a timeout timer + * for the next incomplete fence, if any. To avoid calling in to RM while + * holding a spinlock, this is done in a loop until the state settles. + * + * Can NOT be called from in an atomic context/interrupt handler. + */ +static void +__nv_drm_semsurf_ctx_reg_callbacks(struct nv_drm_semsurf_fence_ctx *ctx) + +{ + struct nv_drm_device *nv_dev = ctx->base.nv_dev; + struct nv_drm_semsurf_fence_callback *newCallback = + __nv_drm_semsurf_new_callback(ctx); + struct NvKmsKapiSemaphoreSurfaceCallback *newNvKmsCallback; + NvU64 newWaitValue; + unsigned long newTimeout; + NvKmsKapiRegisterWaiterResult kapiRet; + + if (!newCallback) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to allocate new fence signal callback data"); + return; + } + + do { + /* + * Process any completed or timed out fences. This returns the wait + * value and timeout of the first remaining pending fence, or 0/0 + * if no pending fences remain. It will also tag the context as + * waiting for the value returned. + */ + __nv_drm_semsurf_ctx_process_completed(ctx, + &newWaitValue, + &newTimeout); + + if (newWaitValue == 0) { + /* No fences remain, so no callback is needed. */ + nv_drm_free(newCallback); + newCallback = NULL; + return; + } + + newCallback->wait_value = newWaitValue; + + /* + * Attempt to register a callback for the remaining fences. Note this + * code may be running concurrently in multiple places, attempting to + * register a callback for the same value, a value greater than + * newWaitValue if more fences have since completed, or a value less + * than newWaitValue if new fences have been created tracking lower + * values than the previously lowest pending one. Hence, even if this + * registration succeeds, the callback may be discarded + */ + kapiRet = + nvKms->registerSemaphoreSurfaceCallback(nv_dev->pDevice, + ctx->pSemSurface, + __nv_drm_semsurf_ctx_callback, + newCallback, + ctx->base.fenceSemIndex, + newWaitValue, + 0, + &newNvKmsCallback); + } while (kapiRet == NVKMS_KAPI_REG_WAITER_ALREADY_SIGNALLED); + + /* Can't deref newCallback at this point unless kapiRet indicates failure */ + + if (kapiRet != NVKMS_KAPI_REG_WAITER_SUCCESS) { + /* + * This is expected if another thread concurrently registered a callback + * for the same value, which is fine. That thread's callback will do the + * same work this thread's would have. Clean this one up and return. + * + * Another possibility is that an allocation or some other low-level + * operation that can spuriously fail has caused this failure, or of + * course a bug resulting in invalid usage of the + * registerSemaphoreSurfaceCallback() API. There is no good way to + * handle such failures, so the fence timeout will be relied upon to + * guarantee forward progress in those cases. + */ + nv_drm_free(newCallback); + return; + } + + nv_drm_mod_timer(&ctx->timer, newTimeout); + + if (!__nv_drm_semsurf_ctx_store_callback(ctx, + newWaitValue, + newNvKmsCallback, + newCallback)) { + /* + * Another thread registered a callback for a different value before + * this thread's callback could be stored in the context, or the + * callback is already running. That's OK. One of the following is true: + * + * -A new fence with a lower value has been registered, and the callback + * associated with that fence is now active and associated with the + * context. + * + * -This fence has already completed, and a new callback associated with + * a higher value has been registered and associated with the context. + * This lower-value callback is no longer needed, as any fences + * associated with it must have been marked completed before + * registering the higher-value callback. + * + * -The callback started running and cleared ctx->current_wait_value + * before the callback could be stored in the context. Work to signal + * the fence is now pending. + * + * Hence, it is safe to request cancellation of the callback and free + * the associated data if cancellation succeeds. + */ + if (nvKms->unregisterSemaphoreSurfaceCallback(nv_dev->pDevice, + ctx->pSemSurface, + ctx->base.fenceSemIndex, + newWaitValue, + newNvKmsCallback)) { + /* RM callback successfully canceled. Free local tracking data */ + nv_drm_free(newCallback); + } + } +} + +static void __nv_drm_semsurf_fence_ctx_destroy( + struct nv_drm_fence_context *nv_fence_context) +{ + struct nv_drm_device *nv_dev = nv_fence_context->nv_dev; + struct nv_drm_semsurf_fence_ctx *ctx = + to_semsurf_fence_ctx(nv_fence_context); + struct NvKmsKapiSemaphoreSurfaceCallback *pendingNvKmsCallback; + NvU64 pendingWaitValue; + unsigned long flags; + + /* + * The workthread must be shut down before the timer is stopped to ensure + * the timer does not queue work that restarts itself. + */ + nv_drm_workthread_shutdown(&ctx->worker); + + nv_timer_delete_sync(&ctx->timer.kernel_timer); + + /* + * The semaphore surface could still be sending callbacks, so it is still + * not safe to dereference the ctx->callback pointers. However, + * unregistering a callback via its handle is safe, as that code in NVKMS + * takes care to avoid dereferencing the handle until it knows the callback + * has been canceled in RM. This unregistration must be done to ensure the + * callback data is not leaked in NVKMS if it is still pending, as freeing + * the semaphore surface only cleans up RM's callback data. + */ + spin_lock_irqsave(&ctx->lock, flags); + pendingNvKmsCallback = ctx->callback.nvKms; + pendingWaitValue = ctx->callback.local ? + ctx->callback.local->wait_value : 0; + spin_unlock_irqrestore(&ctx->lock, flags); + + if (pendingNvKmsCallback) { + WARN_ON(pendingWaitValue == 0); + nvKms->unregisterSemaphoreSurfaceCallback(nv_dev->pDevice, + ctx->pSemSurface, + ctx->base.fenceSemIndex, + pendingWaitValue, + pendingNvKmsCallback); + } + + nvKms->freeSemaphoreSurface(nv_dev->pDevice, ctx->pSemSurface); + + /* + * Now that the semaphore surface, the timer, and the workthread are gone: + * + * -No more RM/NVKMS callbacks will arrive, nor are any in progress. Freeing + * the semaphore surface cancels all its callbacks associated with this + * instance of it, and idles any pending callbacks. + * + * -No more timer callbacks will arrive, nor are any in flight. + * + * -The workthread has been idled and is no longer running. + * + * Further, given the destructor is running, no other references to the + * fence context exist, so this code can assume no concurrent access to the + * fence context's data will happen from here on out. + */ + + if (ctx->callback.local) { + nv_drm_free(ctx->callback.local); + ctx->callback.local = NULL; + ctx->callback.nvKms = NULL; + } + + __nv_drm_semsurf_force_complete_pending(ctx); + + nv_drm_free(nv_fence_context); +} + +static void +__nv_drm_semsurf_ctx_timeout_work(void *data) +{ + struct nv_drm_semsurf_fence_ctx *ctx = data; + + __nv_drm_semsurf_ctx_reg_callbacks(ctx); +} + +static void +__nv_drm_semsurf_ctx_timeout_callback(nv_drm_timer *timer) +{ + struct nv_drm_semsurf_fence_ctx *ctx = + container_of(timer, typeof(*ctx), timer); + + /* + * Schedule work to register new waiter & timer on a worker thread. + * + * It does not matter if this fails. There are two possible failure cases: + * + * - ctx->timeout_work is already scheduled. That existing scheduled work + * will do at least as much as work scheduled right now and executed + * immediately, which is sufficient. + * + * - The context is shutting down. In this case, all fences will be force- + * signalled, so no further callbacks or timeouts are needed. + * + * Note this work may schedule a new timeout timer. To ensure that doesn't + * happen while context shutdown is shutting down and idling the timer, the + * the worker thread must be shut down before the timer is stopped. + */ + nv_drm_workthread_add_work(&ctx->worker, &ctx->timeout_work); +} + +static struct nv_drm_fence_context_ops +nv_drm_semsurf_fence_ctx_ops = { + .destroy = __nv_drm_semsurf_fence_ctx_destroy, +}; + +static struct nv_drm_semsurf_fence_ctx* +__nv_drm_semsurf_fence_ctx_new( + struct nv_drm_device *nv_dev, + struct drm_nvidia_semsurf_fence_ctx_create_params *p +) +{ + struct nv_drm_semsurf_fence_ctx *ctx; + struct NvKmsKapiSemaphoreSurface *pSemSurface; + uint8_t *semMapping; + uint8_t *maxSubmittedMapping; + char worker_name[20+16+1]; /* strlen(nvidia-drm/timeline-) + 16 for %llx + NUL */ + + pSemSurface = nvKms->importSemaphoreSurface(nv_dev->pDevice, + p->nvkms_params_ptr, + p->nvkms_params_size, + (void **)&semMapping, + (void **)&maxSubmittedMapping); + if (!pSemSurface) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to import semaphore surface"); + + goto failed; + } + + /* + * Allocate a fence context object and initialize it. + */ + + if ((ctx = nv_drm_calloc(1, sizeof(*ctx))) == NULL) { + goto failed_alloc_fence_context; + } + + semMapping += (p->index * nv_dev->semsurf_stride); + if (maxSubmittedMapping) { + maxSubmittedMapping += (p->index * nv_dev->semsurf_stride) + + nv_dev->semsurf_max_submitted_offset; + } + + /* + * dma_fence_context_alloc() cannot fail, so we do not need + * to check a return value. + */ + + ctx->base.ops = &nv_drm_semsurf_fence_ctx_ops; + ctx->base.nv_dev = nv_dev; + ctx->base.context = dma_fence_context_alloc(1); + ctx->base.fenceSemIndex = p->index; + ctx->pSemSurface = pSemSurface; + ctx->pSemMapping.pVoid = semMapping; + ctx->pMaxSubmittedMapping = (volatile NvU64 *)maxSubmittedMapping; + ctx->callback.local = NULL; + ctx->callback.nvKms = NULL; + ctx->current_wait_value = 0; + + spin_lock_init(&ctx->lock); + INIT_LIST_HEAD(&ctx->pending_fences); + INIT_LIST_HEAD(&ctx->pending_waits); + + sprintf(worker_name, "nvidia-drm/timeline-%llx", + (long long unsigned)ctx->base.context); + if (!nv_drm_workthread_init(&ctx->worker, worker_name)) { + goto failed_alloc_worker; + } + + nv_drm_workthread_work_init(&ctx->timeout_work, + __nv_drm_semsurf_ctx_timeout_work, + ctx); + + nv_drm_timer_setup(&ctx->timer, __nv_drm_semsurf_ctx_timeout_callback); + + return ctx; + +failed_alloc_worker: + nv_drm_free(ctx); + +failed_alloc_fence_context: + nvKms->freeSemaphoreSurface(nv_dev->pDevice, pSemSurface); + +failed: + return NULL; + +} + +int nv_drm_semsurf_fence_ctx_create_ioctl(struct drm_device *dev, + void *data, + struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_semsurf_fence_ctx_create_params *p = data; + struct nv_drm_semsurf_fence_ctx *ctx; + int err; + + if (nv_dev->pDevice == NULL) { + return -EOPNOTSUPP; + } + + if (p->__pad != 0) { + NV_DRM_DEV_LOG_ERR(nv_dev, "Padding fields must be zeroed"); + return -EINVAL; + } + + ctx = __nv_drm_semsurf_fence_ctx_new(nv_dev, p); + + if (!ctx) { + return -ENOMEM; + } + + err = __nv_drm_fence_context_gem_init(dev, &ctx->base, &p->handle, filep); + + if (err) { + __nv_drm_semsurf_fence_ctx_destroy(&ctx->base); + } + + return err; +} + +static inline struct nv_drm_semsurf_fence* +to_nv_drm_semsurf_fence(struct dma_fence *fence) +{ + return container_of(fence, struct nv_drm_semsurf_fence, base); +} + +static const char* +__nv_drm_semsurf_fence_op_get_timeline_name(struct dma_fence *fence) +{ + return "nvidia.semaphore_surface"; +} + +static bool +__nv_drm_semsurf_fence_op_enable_signaling(struct dma_fence *fence) +{ + // DO NOTHING - Could defer RM callback registration until this point + return true; +} + +static void +__nv_drm_semsurf_fence_op_release(struct dma_fence *fence) +{ + struct nv_drm_semsurf_fence *nv_fence = + to_nv_drm_semsurf_fence(fence); + + nv_drm_free(nv_fence); +} + +static const struct dma_fence_ops nv_drm_semsurf_fence_ops = { + .get_driver_name = nv_drm_gem_fence_op_get_driver_name, + .get_timeline_name = __nv_drm_semsurf_fence_op_get_timeline_name, + .enable_signaling = __nv_drm_semsurf_fence_op_enable_signaling, + .release = __nv_drm_semsurf_fence_op_release, + .wait = dma_fence_default_wait, +#if defined(NV_DMA_FENCE_OPS_HAS_USE_64BIT_SEQNO) + .use_64bit_seqno = true, +#endif +}; + +/* + * Completes fence initialization, places a new reference to the fence in the + * context's pending fence list, and updates/registers any RM callbacks and + * timeout timers if necessary. + * + * Can NOT be called from in an atomic context/interrupt handler. + */ +static void +__nv_drm_semsurf_ctx_add_pending(struct nv_drm_semsurf_fence_ctx *ctx, + struct nv_drm_semsurf_fence *nv_fence, + NvU64 timeoutMS) +{ + struct list_head *pending; + unsigned long flags; + + if (timeoutMS > NV_DRM_SEMAPHORE_SURFACE_FENCE_MAX_TIMEOUT_MS) { + timeoutMS = NV_DRM_SEMAPHORE_SURFACE_FENCE_MAX_TIMEOUT_MS; + } + + /* Add a reference to the fence for the list */ + dma_fence_get(&nv_fence->base); + INIT_LIST_HEAD(&nv_fence->pending_node); + + nv_fence->timeout = nv_drm_timeout_from_ms(timeoutMS); + + spin_lock_irqsave(&ctx->lock, flags); + + list_for_each(pending, &ctx->pending_fences) { + struct nv_drm_semsurf_fence *pending_fence = + list_entry(pending, typeof(*pending_fence), pending_node); + if (__nv_drm_get_semsurf_fence_seqno(pending_fence) > + __nv_drm_get_semsurf_fence_seqno(nv_fence)) { + /* Inserts 'nv_fence->pending_node' before 'pending' */ + list_add_tail(&nv_fence->pending_node, pending); + break; + } + } + + if (list_empty(&nv_fence->pending_node)) { + /* + * Inserts 'fence->pending_node' at the end of 'ctx->pending_fences', + * or as the head if the list is empty + */ + list_add_tail(&nv_fence->pending_node, &ctx->pending_fences); + } + + /* Fence is live starting... now! */ + spin_unlock_irqrestore(&ctx->lock, flags); + + /* Register new wait and timeout callbacks, if necessary */ + __nv_drm_semsurf_ctx_reg_callbacks(ctx); +} + +static struct dma_fence *__nv_drm_semsurf_fence_ctx_create_fence( + struct nv_drm_device *nv_dev, + struct nv_drm_semsurf_fence_ctx *ctx, + NvU64 wait_value, + NvU64 timeout_value_ms) +{ + struct nv_drm_semsurf_fence *nv_fence; + struct dma_fence *fence; + int ret = 0; + + if (timeout_value_ms == 0 || + timeout_value_ms > NV_DRM_SEMAPHORE_SURFACE_FENCE_MAX_TIMEOUT_MS) { + timeout_value_ms = NV_DRM_SEMAPHORE_SURFACE_FENCE_MAX_TIMEOUT_MS; + } + + if ((nv_fence = nv_drm_calloc(1, sizeof(*nv_fence))) == NULL) { + ret = -ENOMEM; + goto out; + } + + fence = &nv_fence->base; + spin_lock_init(&nv_fence->lock); +#if !defined(NV_DMA_FENCE_OPS_HAS_USE_64BIT_SEQNO) + nv_fence->wait_value = wait_value; +#endif + + /* Initializes the fence with one reference (for the caller) */ + dma_fence_init(fence, &nv_drm_semsurf_fence_ops, + &nv_fence->lock, + ctx->base.context, wait_value); + + __nv_drm_semsurf_ctx_add_pending(ctx, nv_fence, timeout_value_ms); + +out: + /* Returned fence has one reference reserved for the caller. */ + return ret != 0 ? ERR_PTR(ret) : &nv_fence->base; +} + +int nv_drm_semsurf_fence_create_ioctl(struct drm_device *dev, + void *data, + struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_semsurf_fence_create_params *p = data; + struct nv_drm_fence_context *nv_fence_context; + struct dma_fence *fence; + int ret = -EINVAL; + int fd; + + if (nv_dev->pDevice == NULL) { + ret = -EOPNOTSUPP; + goto done; + } + + if (p->__pad != 0) { + NV_DRM_DEV_LOG_ERR(nv_dev, "Padding fields must be zeroed"); + goto done; + } + + if ((nv_fence_context = __nv_drm_fence_context_lookup( + filep, + p->fence_context_handle)) == NULL) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lookup gem object for fence context: 0x%08x", + p->fence_context_handle); + + goto done; + } + + if (nv_fence_context->ops != &nv_drm_semsurf_fence_ctx_ops) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Wrong fence context type: 0x%08x", + p->fence_context_handle); + + goto fence_context_create_fence_failed; + } + + fence = __nv_drm_semsurf_fence_ctx_create_fence( + nv_dev, + to_semsurf_fence_ctx(nv_fence_context), + p->wait_value, + p->timeout_value_ms); + + if (IS_ERR(fence)) { + ret = PTR_ERR(fence); + + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to allocate fence: 0x%08x", p->fence_context_handle); + + goto fence_context_create_fence_failed; + } + + if ((fd = nv_drm_create_sync_file(fence)) < 0) { + ret = fd; + + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to create sync file from fence on ctx 0x%08x", + p->fence_context_handle); + + goto fence_context_create_sync_failed; + } + + p->fd = fd; + ret = 0; + +fence_context_create_sync_failed: + /* + * Release this function's reference to the fence. If successful, the sync + * FD will still hold a reference, and the pending list (if the fence hasn't + * already been signaled) will also retain a reference. + */ + dma_fence_put(fence); + +fence_context_create_fence_failed: + nv_drm_gem_object_unreference_unlocked(&nv_fence_context->base); + +done: + return ret; +} + +static void +__nv_drm_semsurf_free_wait_data(struct nv_drm_sync_fd_wait_data *wait_data) +{ + struct nv_drm_semsurf_fence_ctx *ctx = wait_data->ctx; + unsigned long flags; + + spin_lock_irqsave(&ctx->lock, flags); + list_del(&wait_data->pending_node); + spin_unlock_irqrestore(&ctx->lock, flags); + + nv_drm_free(wait_data); +} + +static void +__nv_drm_semsurf_wait_fence_work_cb +( + void *arg +) +{ + struct nv_drm_sync_fd_wait_data *wait_data = arg; + struct nv_drm_semsurf_fence_ctx *ctx = wait_data->ctx; + struct nv_drm_device *nv_dev = ctx->base.nv_dev; + NvKmsKapiRegisterWaiterResult ret; + + /* + * Note this command applies "newValue" immediately if the semaphore has + * already reached "waitValue." It only returns NVKMS_KAPI_ALREADY_SIGNALLED + * if a separate notification was requested as well. + */ + ret = nvKms->registerSemaphoreSurfaceCallback(nv_dev->pDevice, + ctx->pSemSurface, + NULL, + NULL, + ctx->base.fenceSemIndex, + wait_data->pre_wait_value, + wait_data->post_wait_value, + NULL); + + if (ret != NVKMS_KAPI_REG_WAITER_SUCCESS) { + NV_DRM_DEV_LOG_ERR(nv_dev, + "Failed to register auto-value-update on pre-wait value for sync FD semaphore surface"); + } + + __nv_drm_semsurf_free_wait_data(wait_data); +} + +static void +__nv_drm_semsurf_wait_fence_cb +( + struct dma_fence *fence, + struct dma_fence_cb *cb +) +{ + struct nv_drm_sync_fd_wait_data *wait_data = + container_of(cb, typeof(*wait_data), dma_fence_cb); + struct nv_drm_semsurf_fence_ctx *ctx = wait_data->ctx; + + /* + * Defer registering the wait with RM to a worker thread, since + * this function may be called in interrupt context, which + * could mean arriving here directly from RM's top/bottom half + * handler when the fence being waited on came from an RM-managed GPU. + */ + if (!nv_drm_workthread_add_work(&ctx->worker, &wait_data->work)) { + /* + * The context is shutting down. RM would likely just drop + * the wait anyway as part of that, so do nothing. Either the + * client is exiting uncleanly, or it is a bug in the client + * in that it didn't consume its wait before destroying the + * fence context used to instantiate it. + */ + __nv_drm_semsurf_free_wait_data(wait_data); + } + + /* Don't need to reference the fence anymore, just the fence context. */ + dma_fence_put(fence); +} + +int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev, + void *data, + struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_semsurf_fence_wait_params *p = data; + struct nv_drm_fence_context *nv_fence_context; + struct nv_drm_semsurf_fence_ctx *ctx; + struct nv_drm_sync_fd_wait_data *wait_data = NULL; + struct dma_fence *fence; + unsigned long flags; + int ret = -EINVAL; + + if (nv_dev->pDevice == NULL) { + return -EOPNOTSUPP; + } + + if (p->pre_wait_value >= p->post_wait_value) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Non-monotonic wait values specified to fence wait: 0x%" NvU64_fmtu ", 0x%" NvU64_fmtu, + p->pre_wait_value, p->post_wait_value); + goto done; + } + + if ((nv_fence_context = __nv_drm_fence_context_lookup( + filep, + p->fence_context_handle)) == NULL) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lookup gem object for fence context: 0x%08x", + p->fence_context_handle); + + goto done; + } + + if (nv_fence_context->ops != &nv_drm_semsurf_fence_ctx_ops) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Wrong fence context type: 0x%08x", + p->fence_context_handle); + + goto fence_context_sync_lookup_failed; + } + + ctx = to_semsurf_fence_ctx(nv_fence_context); + + wait_data = nv_drm_calloc(1, sizeof(*wait_data)); + + if (!wait_data) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to allocate callback data for sync FD wait: %d", p->fd); + + goto fence_context_sync_lookup_failed; + } + + fence = nv_drm_sync_file_get_fence(p->fd); + + if (!fence) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Attempt to wait on invalid sync FD: %d", p->fd); + + goto fence_context_sync_lookup_failed; + } + + wait_data->ctx = ctx; + wait_data->pre_wait_value = p->pre_wait_value; + wait_data->post_wait_value = p->post_wait_value; + nv_drm_workthread_work_init(&wait_data->work, + __nv_drm_semsurf_wait_fence_work_cb, + wait_data); + + spin_lock_irqsave(&ctx->lock, flags); + list_add(&wait_data->pending_node, &ctx->pending_waits); + spin_unlock_irqrestore(&ctx->lock, flags); + + ret = dma_fence_add_callback(fence, + &wait_data->dma_fence_cb, + __nv_drm_semsurf_wait_fence_cb); + + if (ret) { + if (ret == -ENOENT) { + /* The fence is already signaled */ + } else { + NV_DRM_LOG_ERR( + "Failed to add dma_fence callback. Signaling early!"); + /* Proceed as if the fence wait succeeded */ + } + + /* Execute second half of wait immediately, avoiding the worker thread */ + dma_fence_put(fence); + __nv_drm_semsurf_wait_fence_work_cb(wait_data); + } + + ret = 0; + +fence_context_sync_lookup_failed: + if (ret && wait_data) { + /* + * Do not use __nv_drm_semsurf_free_wait_data() here, as the wait_data + * has not been added to the pending list yet. + */ + nv_drm_free(wait_data); + } + + nv_drm_gem_object_unreference_unlocked(&nv_fence_context->base); + +done: + return 0; +} + +int nv_drm_semsurf_fence_attach_ioctl(struct drm_device *dev, + void *data, + struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_semsurf_fence_attach_params *p = data; + struct nv_drm_gem_object *nv_gem = NULL; + struct nv_drm_fence_context *nv_fence_context = NULL; + struct dma_fence *fence; + int ret = -EINVAL; + + if (nv_dev->pDevice == NULL) { + ret = -EOPNOTSUPP; + goto done; + } + + nv_gem = nv_drm_gem_object_lookup(filep, p->handle); + + if (!nv_gem) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lookup gem object for fence attach: 0x%08x", + p->handle); + + goto done; + } + + nv_fence_context = __nv_drm_fence_context_lookup( + filep, + p->fence_context_handle); + + if (!nv_fence_context) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lookup gem object for fence context: 0x%08x", + p->fence_context_handle); + + goto done; + } + + if (nv_fence_context->ops != &nv_drm_semsurf_fence_ctx_ops) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Wrong fence context type: 0x%08x", + p->fence_context_handle); + + goto done; + } + + fence = __nv_drm_semsurf_fence_ctx_create_fence( + nv_dev, + to_semsurf_fence_ctx(nv_fence_context), + p->wait_value, + p->timeout_value_ms); + + if (IS_ERR(fence)) { + ret = PTR_ERR(fence); + + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to allocate fence: 0x%08x", p->handle); + + goto done; + } + + ret = __nv_drm_gem_attach_fence(nv_gem, fence, p->shared); + + dma_fence_put(fence); + +done: + if (nv_fence_context) { + nv_drm_gem_object_unreference_unlocked(&nv_fence_context->base); + } + + if (nv_gem) { + nv_drm_gem_object_unreference_unlocked(nv_gem); + } + + return ret; +} + +#endif /* NV_DRM_AVAILABLE */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-fence.h b/kernel-open/nvidia-drm/nvidia-drm-fence.h new file mode 100644 index 0000000..3b33283 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-fence.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_PRIME_FENCE_H__ +#define __NVIDIA_DRM_PRIME_FENCE_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +struct drm_file; +struct drm_device; + +int nv_drm_fence_supported_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +int nv_drm_prime_fence_context_create_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +int nv_drm_semsurf_fence_ctx_create_ioctl(struct drm_device *dev, + void *data, + struct drm_file *filep); + +int nv_drm_semsurf_fence_create_ioctl(struct drm_device *dev, + void *data, + struct drm_file *filep); + +int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev, + void *data, + struct drm_file *filep); + +int nv_drm_semsurf_fence_attach_ioctl(struct drm_device *dev, + void *data, + struct drm_file *filep); + +#endif /* NV_DRM_AVAILABLE */ + +#endif /* __NVIDIA_DRM_PRIME_FENCE_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-format.c b/kernel-open/nvidia-drm/nvidia-drm-format.c new file mode 100644 index 0000000..a539d9d --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-format.c @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2019-2025, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif +#include +#include + +#include "nvidia-drm-format.h" +#include "nvidia-drm-os-interface.h" + +static const u32 nvkms_to_drm_format[] = { + /* RGB formats */ + [NvKmsSurfaceMemoryFormatA1R5G5B5] = DRM_FORMAT_ARGB1555, + [NvKmsSurfaceMemoryFormatX1R5G5B5] = DRM_FORMAT_XRGB1555, + [NvKmsSurfaceMemoryFormatR5G6B5] = DRM_FORMAT_RGB565, + [NvKmsSurfaceMemoryFormatA8R8G8B8] = DRM_FORMAT_ARGB8888, + [NvKmsSurfaceMemoryFormatX8R8G8B8] = DRM_FORMAT_XRGB8888, + [NvKmsSurfaceMemoryFormatX8B8G8R8] = DRM_FORMAT_XBGR8888, + [NvKmsSurfaceMemoryFormatA2B10G10R10] = DRM_FORMAT_ABGR2101010, + [NvKmsSurfaceMemoryFormatX2B10G10R10] = DRM_FORMAT_XBGR2101010, + [NvKmsSurfaceMemoryFormatA8B8G8R8] = DRM_FORMAT_ABGR8888, +#if defined(DRM_FORMAT_ABGR16161616) + /* + * DRM_FORMAT_ABGR16161616 was introduced by Linux kernel commit + * ff92ecf575a92 (v5.14). + */ + [NvKmsSurfaceMemoryFormatR16G16B16A16] = DRM_FORMAT_ABGR16161616, +#endif +#if defined(DRM_FORMAT_ABGR16161616F) + [NvKmsSurfaceMemoryFormatRF16GF16BF16AF16] = DRM_FORMAT_ABGR16161616F, +#endif +#if defined(DRM_FORMAT_XBGR16161616F) + [NvKmsSurfaceMemoryFormatRF16GF16BF16XF16] = DRM_FORMAT_XBGR16161616F, +#endif + + [NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422] = DRM_FORMAT_YUYV, + [NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422] = DRM_FORMAT_UYVY, + + /* YUV semi-planar formats + * + * NVKMS YUV semi-planar formats are MSB aligned. Yx__UxVx means + * that the UV components are packed like UUUUUVVVVV (MSB to LSB) + * and Yx_VxUx means VVVVVUUUUU (MSB to LSB). + */ + + /* + * 2 plane YCbCr + * index 0 = Y plane, [7:0] Y + * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian + * or + * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian + */ + [NvKmsSurfaceMemoryFormatY8___V8U8_N444] = DRM_FORMAT_NV24, /* non-subsampled Cr:Cb plane */ + [NvKmsSurfaceMemoryFormatY8___U8V8_N444] = DRM_FORMAT_NV42, /* non-subsampled Cb:Cr plane */ + [NvKmsSurfaceMemoryFormatY8___V8U8_N422] = DRM_FORMAT_NV16, /* 2x1 subsampled Cr:Cb plane */ + [NvKmsSurfaceMemoryFormatY8___U8V8_N422] = DRM_FORMAT_NV61, /* 2x1 subsampled Cb:Cr plane */ + [NvKmsSurfaceMemoryFormatY8___V8U8_N420] = DRM_FORMAT_NV12, /* 2x2 subsampled Cr:Cb plane */ + [NvKmsSurfaceMemoryFormatY8___U8V8_N420] = DRM_FORMAT_NV21, /* 2x2 subsampled Cb:Cr plane */ + +#if defined(DRM_FORMAT_P210) + /* + * 2 plane YCbCr MSB aligned + * index 0 = Y plane, [15:0] Y:x [10:6] little endian + * index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian + * + * 2x1 subsampled Cr:Cb plane, 10 bit per channel + */ + [NvKmsSurfaceMemoryFormatY10___V10U10_N422] = DRM_FORMAT_P210, +#endif + +#if defined(DRM_FORMAT_P010) + /* + * 2 plane YCbCr MSB aligned + * index 0 = Y plane, [15:0] Y:x [10:6] little endian + * index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian + * + * 2x2 subsampled Cr:Cb plane 10 bits per channel + */ + [NvKmsSurfaceMemoryFormatY10___V10U10_N420] = DRM_FORMAT_P010, +#endif + +#if defined(DRM_FORMAT_P012) + /* + * 2 plane YCbCr MSB aligned + * index 0 = Y plane, [15:0] Y:x [12:4] little endian + * index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [12:4:12:4] little endian + * + * 2x2 subsampled Cr:Cb plane 12 bits per channel + */ + [NvKmsSurfaceMemoryFormatY12___V12U12_N420] = DRM_FORMAT_P012, +#endif +}; + +bool nv_drm_format_to_nvkms_format(u32 format, + enum NvKmsSurfaceMemoryFormat *nvkms_format) +{ + enum NvKmsSurfaceMemoryFormat i; + for (i = 0; i < ARRAY_SIZE(nvkms_to_drm_format); i++) { + /* + * Note nvkms_to_drm_format[] is sparsely populated: it doesn't + * handle all NvKmsSurfaceMemoryFormat values, so be sure to skip 0 + * entries when iterating through it. + */ + if (nvkms_to_drm_format[i] != 0 && nvkms_to_drm_format[i] == format) { + *nvkms_format = i; + return true; + } + } + return false; +} + +uint32_t *nv_drm_format_array_alloc( + unsigned int *count, + const long unsigned int nvkms_format_mask) +{ + enum NvKmsSurfaceMemoryFormat i; + unsigned int max_count = hweight64(nvkms_format_mask); + uint32_t *array = nv_drm_calloc(1, sizeof(uint32_t) * max_count); + + if (array == NULL) { + return NULL; + } + + *count = 0; + for_each_set_bit(i, &nvkms_format_mask, + sizeof(nvkms_format_mask) * BITS_PER_BYTE) { + + if (i >= ARRAY_SIZE(nvkms_to_drm_format)) { + break; + } + + /* + * Note nvkms_to_drm_format[] is sparsely populated: it doesn't + * handle all NvKmsSurfaceMemoryFormat values, so be sure to skip 0 + * entries when iterating through it. + */ + if (nvkms_to_drm_format[i] == 0) { + continue; + } + array[(*count)++] = nvkms_to_drm_format[i]; + } + + if (*count == 0) { + nv_drm_free(array); + return NULL; + } + + return array; +} + +bool nv_drm_format_is_yuv(u32 format) +{ +#if defined(NV_DRM_FORMAT_INFO_HAS_IS_YUV) + const struct drm_format_info *format_info = drm_format_info(format); + return (format_info != NULL) && format_info->is_yuv; +#else + switch (format) { + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + + case DRM_FORMAT_NV24: + case DRM_FORMAT_NV42: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + +#if defined(DRM_FORMAT_P210) + case DRM_FORMAT_P210: +#endif +#if defined(DRM_FORMAT_P010) + case DRM_FORMAT_P010: +#endif +#if defined(DRM_FORMAT_P012) + case DRM_FORMAT_P012: +#endif + return true; + default: + return false; + } +#endif +} + +#endif diff --git a/kernel-open/nvidia-drm/nvidia-drm-format.h b/kernel-open/nvidia-drm/nvidia-drm-format.h new file mode 100644 index 0000000..59ed908 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-format.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_FORMAT_H__ +#define __NVIDIA_DRM_FORMAT_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include + +#include "nvkms-format.h" + +bool nv_drm_format_to_nvkms_format(u32 format, + enum NvKmsSurfaceMemoryFormat *nvkms_format); + +uint32_t *nv_drm_format_array_alloc( + unsigned int *count, + const long unsigned int nvkms_format_mask); + +bool nv_drm_format_is_yuv(u32 format); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#endif /* __NVIDIA_DRM_FORMAT_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c b/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c new file mode 100644 index 0000000..e9a73d5 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c @@ -0,0 +1,256 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#include + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#include + +#include "nvidia-drm-gem-dma-buf.h" +#include "nvidia-drm-ioctl.h" + +#include "linux/dma-buf.h" + +static inline +void __nv_drm_gem_dma_buf_free(struct nv_drm_gem_object *nv_gem) +{ + struct nv_drm_device *nv_dev = nv_gem->nv_dev; + struct nv_drm_gem_dma_buf *nv_dma_buf = to_nv_dma_buf(nv_gem); + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + if (nv_dma_buf->base.pMemory) { + /* Free NvKmsKapiMemory handle associated with this gem object */ + nvKms->freeMemory(nv_dev->pDevice, nv_dma_buf->base.pMemory); + } +#endif + + drm_prime_gem_destroy(&nv_gem->base, nv_dma_buf->sgt); + + nv_drm_free(nv_dma_buf); +} + +static int __nv_drm_gem_dma_buf_create_mmap_offset( + struct nv_drm_device *nv_dev, + struct nv_drm_gem_object *nv_gem, + uint64_t *offset) +{ + (void)nv_dev; + return nv_drm_gem_create_mmap_offset(nv_gem, offset); +} + +static int __nv_drm_gem_dma_buf_mmap(struct nv_drm_gem_object *nv_gem, + struct vm_area_struct *vma) +{ +#if defined(NV_LINUX) + struct dma_buf_attachment *attach = nv_gem->base.import_attach; + struct dma_buf *dma_buf = attach->dmabuf; +#endif + struct file *old_file; + int ret; + + /* check if buffer supports mmap */ +#if defined(NV_BSD) + /* + * Most of the FreeBSD DRM code refers to struct file*, which is actually + * a struct linux_file*. The dmabuf code in FreeBSD is not actually plumbed + * through the same linuxkpi bits it seems (probably so it can be used + * elsewhere), so dma_buf->file really is a native FreeBSD struct file... + */ + if (!nv_gem->base.filp->f_op->mmap) + return -EINVAL; + + /* readjust the vma */ + get_file(nv_gem->base.filp); + old_file = vma->vm_file; + vma->vm_file = nv_gem->base.filp; + vma->vm_pgoff -= drm_vma_node_start(&nv_gem->base.vma_node); + + ret = nv_gem->base.filp->f_op->mmap(nv_gem->base.filp, vma); + + if (ret) { + /* restore old parameters on failure */ + vma->vm_file = old_file; + vma->vm_pgoff += drm_vma_node_start(&nv_gem->base.vma_node); + fput(nv_gem->base.filp); + } else { + if (old_file) + fput(old_file); + } +#else + if (!dma_buf->file->f_op->mmap) + return -EINVAL; + + /* readjust the vma */ + get_file(dma_buf->file); + old_file = vma->vm_file; + vma->vm_file = dma_buf->file; + vma->vm_pgoff -= drm_vma_node_start(&nv_gem->base.vma_node); + + ret = dma_buf->file->f_op->mmap(dma_buf->file, vma); + + if (ret) { + /* restore old parameters on failure */ + vma->vm_file = old_file; + vma->vm_pgoff += drm_vma_node_start(&nv_gem->base.vma_node); + fput(dma_buf->file); + } else { + if (old_file) + fput(old_file); + } +#endif + + return ret; +} + +const struct nv_drm_gem_object_funcs __nv_gem_dma_buf_ops = { + .free = __nv_drm_gem_dma_buf_free, + .create_mmap_offset = __nv_drm_gem_dma_buf_create_mmap_offset, + .mmap = __nv_drm_gem_dma_buf_mmap, +}; + +struct drm_gem_object* +nv_drm_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct dma_buf *dma_buf = attach->dmabuf; + struct nv_drm_gem_dma_buf *nv_dma_buf; + struct NvKmsKapiMemory *pMemory; + + if ((nv_dma_buf = + nv_drm_calloc(1, sizeof(*nv_dma_buf))) == NULL) { + return NULL; + } + + // dma_buf->size must be a multiple of PAGE_SIZE + BUG_ON(dma_buf->size % PAGE_SIZE); + + pMemory = NULL; +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + pMemory = nvKms->getSystemMemoryHandleFromDmaBuf(nv_dev->pDevice, + (NvP64)(NvUPtr)dma_buf, + dma_buf->size - 1); + } +#endif + + nv_drm_gem_object_init(nv_dev, &nv_dma_buf->base, + &__nv_gem_dma_buf_ops, dma_buf->size, pMemory); + + nv_dma_buf->sgt = sgt; + + return &nv_dma_buf->base.base; +} + +int nv_drm_gem_export_dmabuf_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_gem_export_dmabuf_memory_params *p = data; + struct nv_drm_gem_dma_buf *nv_dma_buf = NULL; + int ret = 0; + struct NvKmsKapiMemory *pTmpMemory = NULL; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = -EINVAL; + goto done; + } + + if (p->__pad != 0) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR(nv_dev, "Padding fields must be zeroed"); + goto done; + } + + if ((nv_dma_buf = nv_drm_gem_object_dma_buf_lookup( + filep, p->handle)) == NULL) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lookup DMA-BUF GEM object for export: 0x%08x", + p->handle); + goto done; + } + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + if (!nv_dma_buf->base.pMemory) { + /* + * Get RM system memory handle from SGT - RM will take a reference + * on this GEM object to prevent the DMA-BUF from being unpinned + * prematurely. + */ + pTmpMemory = nvKms->getSystemMemoryHandleFromSgt( + nv_dev->pDevice, + (NvP64)(NvUPtr)nv_dma_buf->sgt, + (NvP64)(NvUPtr)&nv_dma_buf->base.base, + nv_dma_buf->base.base.size - 1); + } + } +#endif + + if (!nv_dma_buf->base.pMemory && !pTmpMemory) { + ret = -ENOMEM; + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to get memory to export from DMA-BUF GEM object: 0x%08x", + p->handle); + goto done; + } + + if (!nvKms->exportMemory(nv_dev->pDevice, + nv_dma_buf->base.pMemory ? + nv_dma_buf->base.pMemory : pTmpMemory, + p->nvkms_params_ptr, + p->nvkms_params_size)) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to export memory from DMA-BUF GEM object: 0x%08x", + p->handle); + goto done; + } + +done: + if (pTmpMemory) { + /* + * Release reference on RM system memory to prevent circular + * refcounting. Another refcount will still be held by RM FD. + */ + nvKms->freeMemory(nv_dev->pDevice, pTmpMemory); + } + + if (nv_dma_buf != NULL) { + nv_drm_gem_object_unreference_unlocked(&nv_dma_buf->base); + } + + return ret; +} +#endif diff --git a/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h b/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h new file mode 100644 index 0000000..5ca7710 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_GEM_DMA_BUF_H__ +#define __NVIDIA_DRM_GEM_DMA_BUF_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#include "nvidia-drm-gem.h" + +struct nv_drm_gem_dma_buf { + struct nv_drm_gem_object base; + struct sg_table *sgt; +}; + +extern const struct nv_drm_gem_object_funcs __nv_gem_dma_buf_ops; + +static inline struct nv_drm_gem_dma_buf *to_nv_dma_buf( + struct nv_drm_gem_object *nv_gem) +{ + if (nv_gem != NULL) { + return container_of(nv_gem, struct nv_drm_gem_dma_buf, base); + } + + return NULL; +} + +static inline +struct nv_drm_gem_dma_buf *nv_drm_gem_object_dma_buf_lookup( + struct drm_file *filp, + u32 handle) +{ + struct nv_drm_gem_object *nv_gem = + nv_drm_gem_object_lookup(filp, handle); + + if (nv_gem != NULL && nv_gem->ops != &__nv_gem_dma_buf_ops) { + nv_drm_gem_object_unreference_unlocked(nv_gem); + return NULL; + } + + return to_nv_dma_buf(nv_gem); +} + +struct drm_gem_object* +nv_drm_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt); + +int nv_drm_gem_export_dmabuf_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +#endif + +#endif /* __NVIDIA_DRM_GEM_DMA_BUF_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c b/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c new file mode 100644 index 0000000..8286a53 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c @@ -0,0 +1,643 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-gem-nvkms-memory.h" +#include "nvidia-drm-helper.h" +#include "nvidia-drm-ioctl.h" + +#include +#include + +#include +#if defined(NV_BSD) +#include +#endif + +static void __nv_drm_gem_nvkms_memory_free(struct nv_drm_gem_object *nv_gem) +{ + struct nv_drm_device *nv_dev = nv_gem->nv_dev; + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = + to_nv_nvkms_memory(nv_gem); + + if (nv_nvkms_memory->physically_mapped) { + if (nv_nvkms_memory->pWriteCombinedIORemapAddress != NULL) { + iounmap(nv_nvkms_memory->pWriteCombinedIORemapAddress); + } + + nvKms->unmapMemory(nv_dev->pDevice, + nv_nvkms_memory->base.pMemory, + NVKMS_KAPI_MAPPING_TYPE_USER, + nv_nvkms_memory->pPhysicalAddress); + } + + if (nv_nvkms_memory->pages_count != 0) { + nvKms->freeMemoryPages((NvU64 *)nv_nvkms_memory->pages); + } + + /* Free NvKmsKapiMemory handle associated with this gem object */ + + nvKms->freeMemory(nv_dev->pDevice, nv_nvkms_memory->base.pMemory); + + nv_drm_free(nv_nvkms_memory); +} + +static int __nv_drm_gem_nvkms_map( + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory); + +static int __nv_drm_gem_nvkms_mmap(struct nv_drm_gem_object *nv_gem, + struct vm_area_struct *vma) +{ + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = + to_nv_nvkms_memory(nv_gem); + + int ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory); + if (ret) { + return ret; + } + + return drm_gem_mmap_obj(&nv_gem->base, + drm_vma_node_size(&nv_gem->base.vma_node) << PAGE_SHIFT, vma); +} + +static vm_fault_t __nv_drm_gem_nvkms_handle_vma_fault( + struct nv_drm_gem_object *nv_gem, + struct vm_area_struct *vma, + struct vm_fault *vmf) +{ +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = + to_nv_nvkms_memory(nv_gem); + unsigned long address = vmf->address; + struct drm_gem_object *gem = vma->vm_private_data; + unsigned long page_offset, pfn; + vm_fault_t ret; + + page_offset = vmf->pgoff - drm_vma_node_start(&gem->vma_node); + + if (nv_nvkms_memory->pages_count == 0) { + pfn = (unsigned long)(uintptr_t)nv_nvkms_memory->pPhysicalAddress; + pfn >>= PAGE_SHIFT; +#if defined(NV_LINUX) + /* + * FreeBSD doesn't set pgoff. We instead have pfn be the base physical + * address, and we will calculate the index pidx from the virtual address. + * + * This only works because linux_cdev_pager_populate passes the pidx as + * vmf->virtual_address. Then we turn the virtual address + * into a physical page number. + */ + pfn += page_offset; +#endif + } else { + BUG_ON(page_offset >= nv_nvkms_memory->pages_count); + pfn = page_to_pfn(nv_nvkms_memory->pages[page_offset]); + } + +#if defined(NV_VMF_INSERT_PFN_PRESENT) + ret = vmf_insert_pfn(vma, address, pfn); +#else + ret = vm_insert_pfn(vma, address, pfn); + switch (ret) { + case 0: + case -EBUSY: + /* + * EBUSY indicates that another thread already handled + * the faulted range. + */ + ret = VM_FAULT_NOPAGE; + break; + case -ENOMEM: + ret = VM_FAULT_OOM; + break; + default: + WARN_ONCE(1, "Unhandled error in %s: %d\n", __FUNCTION__, ret); + ret = VM_FAULT_SIGBUS; + break; + } +#endif /* defined(NV_VMF_INSERT_PFN_PRESENT) */ + return ret; +#endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */ + return VM_FAULT_SIGBUS; +} + +static struct drm_gem_object *__nv_drm_gem_nvkms_prime_dup( + struct drm_device *dev, + const struct nv_drm_gem_object *nv_gem_src); + +static int __nv_drm_gem_nvkms_map( + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory) +{ + int ret = 0; + struct nv_drm_device *nv_dev = nv_nvkms_memory->base.nv_dev; + struct NvKmsKapiMemory *pMemory = nv_nvkms_memory->base.pMemory; + + mutex_lock(&nv_nvkms_memory->map_lock); + + if (nv_nvkms_memory->physically_mapped) { + goto done; + } + + if (!nvKms->isVidmem(pMemory)) { + goto done; + } + + if (!nvKms->mapMemory(nv_dev->pDevice, + pMemory, + NVKMS_KAPI_MAPPING_TYPE_USER, + &nv_nvkms_memory->pPhysicalAddress)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to map NvKmsKapiMemory 0x%p", + pMemory); + ret = -ENOMEM; + goto done; + } + + nv_nvkms_memory->pWriteCombinedIORemapAddress = ioremap_wc( + (uintptr_t)nv_nvkms_memory->pPhysicalAddress, + nv_nvkms_memory->base.base.size); + + if (!nv_nvkms_memory->pWriteCombinedIORemapAddress) { + NV_DRM_DEV_LOG_INFO( + nv_dev, + "Failed to ioremap_wc NvKmsKapiMemory 0x%p", + pMemory); + } + + nv_nvkms_memory->physically_mapped = true; + +done: + mutex_unlock(&nv_nvkms_memory->map_lock); + return ret; +} + +static void *__nv_drm_gem_nvkms_prime_vmap( + struct nv_drm_gem_object *nv_gem) +{ + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = + to_nv_nvkms_memory(nv_gem); + + int ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory); + if (ret) { + return ERR_PTR(ret); + } + + if (nv_nvkms_memory->physically_mapped) { + return nv_nvkms_memory->pWriteCombinedIORemapAddress; + } + + /* + * If this buffer isn't physically mapped, it might be backed by struct + * pages. Use vmap in that case. Do a noncached mapping for system memory + * as display is non io-coherent device in case of Tegra. + */ + if (nv_nvkms_memory->pages_count > 0) { + return nv_drm_vmap(nv_nvkms_memory->pages, + nv_nvkms_memory->pages_count, + false); + } + + return ERR_PTR(-ENOMEM); +} + +static void __nv_drm_gem_nvkms_prime_vunmap( + struct nv_drm_gem_object *nv_gem, + void *address) +{ + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = + to_nv_nvkms_memory(nv_gem); + + if (!nv_nvkms_memory->physically_mapped && + nv_nvkms_memory->pages_count > 0) { + nv_drm_vunmap(address); + } +} + +static int __nv_drm_gem_map_nvkms_memory_offset( + struct nv_drm_device *nv_dev, + struct nv_drm_gem_object *nv_gem, + uint64_t *offset) +{ + return nv_drm_gem_create_mmap_offset(nv_gem, offset); +} + +static struct sg_table *__nv_drm_gem_nvkms_memory_prime_get_sg_table( + struct nv_drm_gem_object *nv_gem) +{ + struct nv_drm_device *nv_dev = nv_gem->nv_dev; + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = + to_nv_nvkms_memory(nv_gem); + struct sg_table *sg_table; + + if (nv_nvkms_memory->pages_count == 0) { + NV_DRM_DEV_DEBUG_DRIVER( + nv_dev, + "Cannot create sg_table for NvKmsKapiMemory 0x%p", + nv_gem->pMemory); + return ERR_PTR(-ENOMEM); + } + + sg_table = nv_drm_prime_pages_to_sg(nv_dev->dev, + nv_nvkms_memory->pages, + nv_nvkms_memory->pages_count); + + return sg_table; +} + +const struct nv_drm_gem_object_funcs nv_gem_nvkms_memory_ops = { + .free = __nv_drm_gem_nvkms_memory_free, + .prime_dup = __nv_drm_gem_nvkms_prime_dup, + .prime_vmap = __nv_drm_gem_nvkms_prime_vmap, + .prime_vunmap = __nv_drm_gem_nvkms_prime_vunmap, + .mmap = __nv_drm_gem_nvkms_mmap, + .handle_vma_fault = __nv_drm_gem_nvkms_handle_vma_fault, + .create_mmap_offset = __nv_drm_gem_map_nvkms_memory_offset, + .prime_get_sg_table = __nv_drm_gem_nvkms_memory_prime_get_sg_table, +}; + +static int __nv_drm_nvkms_gem_obj_init( + struct nv_drm_device *nv_dev, + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory, + struct NvKmsKapiMemory *pMemory, + uint64_t size) +{ + NvU64 *pages = NULL; + NvU32 numPages = 0; + + if ((size % PAGE_SIZE) != 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "NvKmsKapiMemory 0x%p size should be in a multiple of page size to " + "create a gem object", + pMemory); + return -EINVAL; + } + + mutex_init(&nv_nvkms_memory->map_lock); + nv_nvkms_memory->pPhysicalAddress = NULL; + nv_nvkms_memory->pWriteCombinedIORemapAddress = NULL; + nv_nvkms_memory->physically_mapped = false; + + if (!nvKms->isVidmem(pMemory) && + !nvKms->getMemoryPages(nv_dev->pDevice, + pMemory, + &pages, + &numPages)) { + /* GetMemoryPages will fail for vidmem allocations, + * but it should not fail for sysmem allocations. */ + NV_DRM_DEV_LOG_ERR(nv_dev, + "Failed to get memory pages for NvKmsKapiMemory 0x%p", + pMemory); + return -ENOMEM; + } + nv_nvkms_memory->pages_count = numPages; + nv_nvkms_memory->pages = (struct page **)pages; + + nv_drm_gem_object_init(nv_dev, + &nv_nvkms_memory->base, + &nv_gem_nvkms_memory_ops, + size, + pMemory); + + return 0; +} + +int nv_drm_dumb_create( + struct drm_file *file_priv, + struct drm_device *dev, struct drm_mode_create_dumb *args) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory; + uint8_t compressible = 0; + struct NvKmsKapiMemory *pMemory; + struct NvKmsKapiAllocateMemoryParams allocParams = { }; + int ret = 0; + + args->pitch = roundup(args->width * ((args->bpp + 7) >> 3), + nv_dev->pitchAlignment); + + args->size = args->height * args->pitch; + + /* Core DRM requires gem object size to be aligned with PAGE_SIZE */ + + args->size = roundup(args->size, PAGE_SIZE); + + if ((nv_nvkms_memory = + nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) { + ret = -ENOMEM; + goto fail; + } + + allocParams.layout = NvKmsSurfaceMemoryLayoutPitch; + allocParams.type = NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT; + allocParams.size = args->size; + allocParams.noDisplayCaching = true; + allocParams.useVideoMemory = nv_dev->hasVideoMemory; + allocParams.compressible = &compressible; + + pMemory = nvKms->allocateMemory(nv_dev->pDevice, &allocParams); + if (pMemory == NULL) { + ret = -ENOMEM; + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to allocate NvKmsKapiMemory for dumb object of size %" NvU64_fmtu, + args->size); + goto nvkms_alloc_memory_failed; + } + + ret = __nv_drm_nvkms_gem_obj_init(nv_dev, nv_nvkms_memory, pMemory, args->size); + if (ret) { + goto nvkms_gem_obj_init_failed; + } + + /* Always map dumb buffer memory up front. Clients are only expected + * to use dumb buffers for software rendering, so they're not much use + * without a CPU mapping. + */ + ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory); + if (ret) { + nv_drm_gem_object_unreference_unlocked(&nv_nvkms_memory->base); + goto fail; + } + + return nv_drm_gem_handle_create_drop_reference(file_priv, + &nv_nvkms_memory->base, + &args->handle); + +nvkms_gem_obj_init_failed: + nvKms->freeMemory(nv_dev->pDevice, pMemory); + +nvkms_alloc_memory_failed: + nv_drm_free(nv_nvkms_memory); + +fail: + return ret; +} + +int nv_drm_gem_import_nvkms_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_gem_import_nvkms_memory_params *p = data; + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory; + struct NvKmsKapiMemory *pMemory; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = -EOPNOTSUPP; + goto failed; + } + + if ((nv_nvkms_memory = + nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) { + ret = -ENOMEM; + goto failed; + } + + pMemory = nvKms->importMemory(nv_dev->pDevice, + p->mem_size, + p->nvkms_params_ptr, + p->nvkms_params_size); + + if (pMemory == NULL) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to import NVKMS memory to GEM object"); + goto nvkms_import_memory_failed; + } + + ret = __nv_drm_nvkms_gem_obj_init(nv_dev, nv_nvkms_memory, pMemory, p->mem_size); + if (ret) { + goto nvkms_gem_obj_init_failed; + } + + return nv_drm_gem_handle_create_drop_reference(filep, + &nv_nvkms_memory->base, + &p->handle); +nvkms_gem_obj_init_failed: + nvKms->freeMemory(nv_dev->pDevice, pMemory); + +nvkms_import_memory_failed: + nv_drm_free(nv_nvkms_memory); + +failed: + return ret; +} + +int nv_drm_gem_export_nvkms_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_gem_export_nvkms_memory_params *p = data; + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = NULL; + int ret = 0; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = -EOPNOTSUPP; + goto done; + } + + if (p->__pad != 0) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR(nv_dev, "Padding fields must be zeroed"); + goto done; + } + + if ((nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup( + filep, + p->handle)) == NULL) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lookup NVKMS gem object for export: 0x%08x", + p->handle); + goto done; + } + + if (!nvKms->exportMemory(nv_dev->pDevice, + nv_nvkms_memory->base.pMemory, + p->nvkms_params_ptr, + p->nvkms_params_size)) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to export memory from NVKMS GEM object: 0x%08x", p->handle); + goto done; + } + +done: + if (nv_nvkms_memory != NULL) { + nv_drm_gem_object_unreference_unlocked(&nv_nvkms_memory->base); + } + + return ret; +} + +int nv_drm_gem_alloc_nvkms_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_gem_alloc_nvkms_memory_params *p = data; + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = NULL; + struct NvKmsKapiMemory *pMemory; + struct NvKmsKapiAllocateMemoryParams allocParams = { }; + int ret = 0; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = -EOPNOTSUPP; + goto failed; + } + + if ((p->__pad0 != 0) || (p->__pad1 != 0)) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR(nv_dev, "non-zero value in padding field"); + goto failed; + } + + if ((nv_nvkms_memory = + nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) { + ret = -ENOMEM; + goto failed; + } + + allocParams.layout = p->block_linear ? + NvKmsSurfaceMemoryLayoutBlockLinear : NvKmsSurfaceMemoryLayoutPitch; + allocParams.type = (p->flags & NV_GEM_ALLOC_NO_SCANOUT) ? + NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN : NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT; + allocParams.size = p->memory_size; + allocParams.useVideoMemory = nv_dev->hasVideoMemory; + allocParams.compressible = &p->compressible; + + pMemory = nvKms->allocateMemory(nv_dev->pDevice, &allocParams); + if (pMemory == NULL) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR(nv_dev, + "Failed to allocate NVKMS memory for GEM object"); + goto nvkms_alloc_memory_failed; + } + + ret = __nv_drm_nvkms_gem_obj_init(nv_dev, nv_nvkms_memory, pMemory, + p->memory_size); + if (ret) { + goto nvkms_gem_obj_init_failed; + } + + return nv_drm_gem_handle_create_drop_reference(filep, + &nv_nvkms_memory->base, + &p->handle); + +nvkms_gem_obj_init_failed: + nvKms->freeMemory(nv_dev->pDevice, pMemory); + +nvkms_alloc_memory_failed: + nv_drm_free(nv_nvkms_memory); + +failed: + return ret; +} + +static struct drm_gem_object *__nv_drm_gem_nvkms_prime_dup( + struct drm_device *dev, + const struct nv_drm_gem_object *nv_gem_src) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + const struct nv_drm_device *nv_dev_src; + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory; + struct NvKmsKapiMemory *pMemory; + + BUG_ON(nv_gem_src == NULL || nv_gem_src->ops != &nv_gem_nvkms_memory_ops); + + nv_dev_src = to_nv_device(nv_gem_src->base.dev); + + if ((nv_nvkms_memory = + nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) { + return NULL; + } + + pMemory = nvKms->dupMemory(nv_dev->pDevice, + nv_dev_src->pDevice, nv_gem_src->pMemory); + if (pMemory == NULL) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to import NVKMS memory to GEM object"); + goto nvkms_dup_memory_failed; + } + + if (__nv_drm_nvkms_gem_obj_init(nv_dev, + nv_nvkms_memory, + pMemory, + nv_gem_src->base.size)) { + goto nvkms_gem_obj_init_failed; + } + + return &nv_nvkms_memory->base.base; + +nvkms_gem_obj_init_failed: + nvKms->freeMemory(nv_dev->pDevice, pMemory); + +nvkms_dup_memory_failed: + nv_drm_free(nv_nvkms_memory); + + return NULL; +} + +int nv_drm_dumb_map_offset(struct drm_file *file, + struct drm_device *dev, uint32_t handle, + uint64_t *offset) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory; + int ret = -EINVAL; + + if ((nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup( + file, + handle)) == NULL) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lookup gem object for mapping: 0x%08x", + handle); + return ret; + } + + ret = __nv_drm_gem_map_nvkms_memory_offset(nv_dev, + &nv_nvkms_memory->base, offset); + + nv_drm_gem_object_unreference_unlocked(&nv_nvkms_memory->base); + + return ret; +} + +#if defined(NV_DRM_DRIVER_HAS_DUMB_DESTROY) +int nv_drm_dumb_destroy(struct drm_file *file, + struct drm_device *dev, + uint32_t handle) +{ + return drm_gem_handle_delete(file, handle); +} +#endif /* NV_DRM_DRIVER_HAS_DUMB_DESTROY */ + +#endif diff --git a/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h b/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h new file mode 100644 index 0000000..17e7a61 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_GEM_NVKMS_MEMORY_H__ +#define __NVIDIA_DRM_GEM_NVKMS_MEMORY_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-gem.h" + +struct nv_drm_gem_nvkms_memory { + struct nv_drm_gem_object base; + + /* + * Lock to protect concurrent writes to physically_mapped, pPhysicalAddress, + * and pWriteCombinedIORemapAddress. + * + * __nv_drm_gem_nvkms_map(), the sole writer, is structured such that + * readers are not required to hold the lock. + */ + struct mutex map_lock; + bool physically_mapped; + void *pPhysicalAddress; + void *pWriteCombinedIORemapAddress; + + struct page **pages; + unsigned long pages_count; +}; + +extern const struct nv_drm_gem_object_funcs nv_gem_nvkms_memory_ops; + +static inline struct nv_drm_gem_nvkms_memory *to_nv_nvkms_memory( + struct nv_drm_gem_object *nv_gem) +{ + if (nv_gem != NULL) { + return container_of(nv_gem, struct nv_drm_gem_nvkms_memory, base); + } + + return NULL; +} + +static inline struct nv_drm_gem_nvkms_memory *to_nv_nvkms_memory_const( + const struct nv_drm_gem_object *nv_gem) +{ + if (nv_gem != NULL) { + return container_of(nv_gem, struct nv_drm_gem_nvkms_memory, base); + } + + return NULL; +} + +static inline +struct nv_drm_gem_nvkms_memory *nv_drm_gem_object_nvkms_memory_lookup( + struct drm_file *filp, + u32 handle) +{ + struct nv_drm_gem_object *nv_gem = + nv_drm_gem_object_lookup(filp, handle); + + if (nv_gem != NULL && nv_gem->ops != &nv_gem_nvkms_memory_ops) { + nv_drm_gem_object_unreference_unlocked(nv_gem); + return NULL; + } + + return to_nv_nvkms_memory(nv_gem); +} + +int nv_drm_dumb_create( + struct drm_file *file_priv, + struct drm_device *dev, struct drm_mode_create_dumb *args); + +int nv_drm_gem_import_nvkms_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +int nv_drm_gem_export_nvkms_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +int nv_drm_gem_alloc_nvkms_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +int nv_drm_dumb_map_offset(struct drm_file *file, + struct drm_device *dev, uint32_t handle, + uint64_t *offset); + +#if defined(NV_DRM_DRIVER_HAS_DUMB_DESTROY) +int nv_drm_dumb_destroy(struct drm_file *file, + struct drm_device *dev, + uint32_t handle); +#endif /* NV_DRM_DRIVER_HAS_DUMB_DESTROY */ + +struct drm_gem_object *nv_drm_gem_nvkms_prime_import( + struct drm_device *dev, + struct drm_gem_object *gem); + +#endif + +#endif /* __NVIDIA_DRM_GEM_NVKMS_MEMORY_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c b/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c new file mode 100644 index 0000000..a0b02ef --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#include + +#include "nvidia-drm-gem-user-memory.h" +#include "nvidia-drm-helper.h" +#include "nvidia-drm-ioctl.h" + +#include "linux/dma-buf.h" +#include "linux/mm.h" +#include "nv-mm.h" +#include "linux/pfn_t.h" + +#if defined(NV_BSD) +#include +#endif + +static inline +void __nv_drm_gem_user_memory_free(struct nv_drm_gem_object *nv_gem) +{ + struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem); + + nv_drm_unlock_user_pages(nv_user_memory->pages_count, + nv_user_memory->pages); + + nv_drm_free(nv_user_memory); +} + +static struct sg_table *__nv_drm_gem_user_memory_prime_get_sg_table( + struct nv_drm_gem_object *nv_gem) +{ + struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem); + struct drm_gem_object *gem = &nv_gem->base; + + return nv_drm_prime_pages_to_sg(gem->dev, + nv_user_memory->pages, + nv_user_memory->pages_count); +} + +static void *__nv_drm_gem_user_memory_prime_vmap( + struct nv_drm_gem_object *nv_gem) +{ + struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem); + + return nv_drm_vmap(nv_user_memory->pages, + nv_user_memory->pages_count, + true); +} + +static void __nv_drm_gem_user_memory_prime_vunmap( + struct nv_drm_gem_object *gem, + void *address) +{ + nv_drm_vunmap(address); +} + +static int __nv_drm_gem_user_memory_mmap(struct nv_drm_gem_object *nv_gem, + struct vm_area_struct *vma) +{ + int ret = drm_gem_mmap_obj(&nv_gem->base, + drm_vma_node_size(&nv_gem->base.vma_node) << PAGE_SHIFT, vma); + + if (ret < 0) { + return ret; + } + + /* + * Enforce that user-memory GEM mappings are MAP_SHARED, to prevent COW + * with MAP_PRIVATE and VM_MIXEDMAP + */ + if (!(vma->vm_flags & VM_SHARED)) { + return -EINVAL; + } + + nv_vm_flags_clear(vma, VM_PFNMAP); + nv_vm_flags_clear(vma, VM_IO); + nv_vm_flags_set(vma, VM_MIXEDMAP); + + return 0; +} + +#if defined(NV_LINUX) && !defined(NV_VMF_INSERT_MIXED_PRESENT) +static vm_fault_t __nv_vm_insert_mixed_helper( + struct vm_area_struct *vma, + unsigned long address, + unsigned long pfn) +{ + int ret; + + ret = vm_insert_mixed(vma, address, pfn_to_pfn_t(pfn)); + + switch (ret) { + case 0: + case -EBUSY: + /* + * EBUSY indicates that another thread already handled + * the faulted range. + */ + return VM_FAULT_NOPAGE; + case -ENOMEM: + return VM_FAULT_OOM; + default: + WARN_ONCE(1, "Unhandled error in %s: %d\n", __FUNCTION__, ret); + return VM_FAULT_SIGBUS; + } +} +#endif + +static vm_fault_t __nv_drm_gem_user_memory_handle_vma_fault( + struct nv_drm_gem_object *nv_gem, + struct vm_area_struct *vma, + struct vm_fault *vmf) +{ + struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem); + unsigned long address = vmf->address; + struct drm_gem_object *gem = vma->vm_private_data; + unsigned long page_offset; + unsigned long pfn; + + page_offset = vmf->pgoff - drm_vma_node_start(&gem->vma_node); + BUG_ON(page_offset >= nv_user_memory->pages_count); + pfn = page_to_pfn(nv_user_memory->pages[page_offset]); + +#if !defined(NV_LINUX) + return vmf_insert_pfn(vma, address, pfn); +#elif defined(NV_VMF_INSERT_MIXED_PRESENT) + return vmf_insert_mixed(vma, address, pfn_to_pfn_t(pfn)); +#else + return __nv_vm_insert_mixed_helper(vma, address, pfn); +#endif +} + +static int __nv_drm_gem_user_create_mmap_offset( + struct nv_drm_device *nv_dev, + struct nv_drm_gem_object *nv_gem, + uint64_t *offset) +{ + (void)nv_dev; + return nv_drm_gem_create_mmap_offset(nv_gem, offset); +} + +const struct nv_drm_gem_object_funcs __nv_gem_user_memory_ops = { + .free = __nv_drm_gem_user_memory_free, + .prime_get_sg_table = __nv_drm_gem_user_memory_prime_get_sg_table, + .prime_vmap = __nv_drm_gem_user_memory_prime_vmap, + .prime_vunmap = __nv_drm_gem_user_memory_prime_vunmap, + .mmap = __nv_drm_gem_user_memory_mmap, + .handle_vma_fault = __nv_drm_gem_user_memory_handle_vma_fault, + .create_mmap_offset = __nv_drm_gem_user_create_mmap_offset, +}; + +int nv_drm_gem_import_userspace_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + + struct drm_nvidia_gem_import_userspace_memory_params *params = data; + struct nv_drm_gem_user_memory *nv_user_memory; + + struct page **pages = NULL; + unsigned long pages_count = 0; + + int ret = 0; + + if ((params->size % PAGE_SIZE) != 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Userspace memory 0x%" NvU64_fmtx " size should be in a multiple of page " + "size to create a gem object", + params->address); + return -EINVAL; + } + + pages_count = params->size / PAGE_SIZE; + + ret = nv_drm_lock_user_pages(params->address, pages_count, &pages); + + if (ret != 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lock user pages for address 0x%" NvU64_fmtx ": %d", + params->address, ret); + return ret; + } + + if ((nv_user_memory = + nv_drm_calloc(1, sizeof(*nv_user_memory))) == NULL) { + ret = -ENOMEM; + goto failed; + } + + nv_user_memory->pages = pages; + nv_user_memory->pages_count = pages_count; + + nv_drm_gem_object_init(nv_dev, + &nv_user_memory->base, + &__nv_gem_user_memory_ops, + params->size, + NULL /* pMemory */); + + return nv_drm_gem_handle_create_drop_reference(filep, + &nv_user_memory->base, + ¶ms->handle); + +failed: + nv_drm_unlock_user_pages(pages_count, pages); + + return ret; +} + +#endif diff --git a/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h b/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h new file mode 100644 index 0000000..56cb5fb --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_GEM_USER_MEMORY_H__ +#define __NVIDIA_DRM_GEM_USER_MEMORY_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#include "nvidia-drm-gem.h" + +struct nv_drm_gem_user_memory { + struct nv_drm_gem_object base; + struct page **pages; + unsigned long pages_count; +}; + +extern const struct nv_drm_gem_object_funcs __nv_gem_user_memory_ops; + +static inline struct nv_drm_gem_user_memory *to_nv_user_memory( + struct nv_drm_gem_object *nv_gem) +{ + if (nv_gem != NULL) { + return container_of(nv_gem, struct nv_drm_gem_user_memory, base); + } + + return NULL; +} + +int nv_drm_gem_import_userspace_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +static inline +struct nv_drm_gem_user_memory *nv_drm_gem_object_user_memory_lookup( + struct drm_file *filp, + u32 handle) +{ + struct nv_drm_gem_object *nv_gem = + nv_drm_gem_object_lookup(filp, handle); + + if (nv_gem != NULL && nv_gem->ops != &__nv_gem_user_memory_ops) { + nv_drm_gem_object_unreference_unlocked(nv_gem); + return NULL; + } + + return to_nv_user_memory(nv_gem); +} + +#endif + +#endif /* __NVIDIA_DRM_GEM_USER_MEMORY_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-gem.c b/kernel-open/nvidia-drm/nvidia-drm-gem.c new file mode 100644 index 0000000..a0bac20 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-gem.c @@ -0,0 +1,399 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-ioctl.h" +#include "nvidia-drm-fence.h" +#include "nvidia-drm-gem.h" +#include "nvidia-drm-gem-nvkms-memory.h" +#include "nvidia-drm-gem-user-memory.h" +#include "nvidia-dma-resv-helper.h" +#include "nvidia-drm-helper.h" +#include "nvidia-drm-gem-dma-buf.h" +#include "nvidia-drm-gem-nvkms-memory.h" + +#include +#include +#include + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) +#include +#endif + +#include "linux/dma-buf.h" + +#include "nv-mm.h" + +void nv_drm_gem_free(struct drm_gem_object *gem) +{ + struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem); + + /* Cleanup core gem object */ + drm_gem_object_release(&nv_gem->base); + +#if !defined(NV_DRM_GEM_OBJECT_HAS_RESV) + nv_dma_resv_fini(&nv_gem->resv); +#endif + + nv_gem->ops->free(nv_gem); +} + +#if !defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS) && \ + defined(NV_DRM_GEM_OBJECT_VMAP_HAS_MAP_ARG) + +/* + * The 'dma_buf_map' structure is renamed to 'iosys_map' by the commit + * 7938f4218168 ("dma-buf-map: Rename to iosys-map"). + */ +#if defined(NV_LINUX_IOSYS_MAP_H_PRESENT) +typedef struct iosys_map nv_sysio_map_t; +#else +typedef struct dma_buf_map nv_sysio_map_t; +#endif + +static int nv_drm_gem_vmap(struct drm_gem_object *gem, + nv_sysio_map_t *map) +{ + void *vaddr = nv_drm_gem_prime_vmap(gem); + if (vaddr == NULL) { + return -ENOMEM; + } else if (IS_ERR(vaddr)) { + return PTR_ERR(vaddr); + } + map->vaddr = vaddr; + map->is_iomem = true; + return 0; +} + +static void nv_drm_gem_vunmap(struct drm_gem_object *gem, + nv_sysio_map_t *map) +{ + nv_drm_gem_prime_vunmap(gem, map->vaddr); + map->vaddr = NULL; +} +#endif + +#if !defined(NV_DRM_DRIVER_HAS_GEM_FREE_OBJECT) || \ + !defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS) +static struct drm_gem_object_funcs nv_drm_gem_funcs = { + .free = nv_drm_gem_free, + .get_sg_table = nv_drm_gem_prime_get_sg_table, + +#if !defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS) + .export = drm_gem_prime_export, +#if defined(NV_DRM_GEM_OBJECT_VMAP_HAS_MAP_ARG) + .vmap = nv_drm_gem_vmap, + .vunmap = nv_drm_gem_vunmap, +#else + .vmap = nv_drm_gem_prime_vmap, + .vunmap = nv_drm_gem_prime_vunmap, +#endif + .vm_ops = &nv_drm_gem_vma_ops, +#endif +}; +#endif + +void nv_drm_gem_object_init(struct nv_drm_device *nv_dev, + struct nv_drm_gem_object *nv_gem, + const struct nv_drm_gem_object_funcs * const ops, + size_t size, + struct NvKmsKapiMemory *pMemory) +{ + struct drm_device *dev = nv_dev->dev; + + nv_gem->nv_dev = nv_dev; + nv_gem->ops = ops; + + nv_gem->pMemory = pMemory; + + /* Initialize the gem object */ + +#if !defined(NV_DRM_GEM_OBJECT_HAS_RESV) + nv_dma_resv_init(&nv_gem->resv); +#endif + +#if !defined(NV_DRM_DRIVER_HAS_GEM_FREE_OBJECT) + nv_gem->base.funcs = &nv_drm_gem_funcs; +#endif + + drm_gem_private_object_init(dev, &nv_gem->base, size); + + /* Create mmap offset early for drm_gem_prime_mmap(), if possible. */ + if (nv_gem->ops->create_mmap_offset) { + uint64_t offset; + nv_gem->ops->create_mmap_offset(nv_dev, nv_gem, &offset); + } +} + +struct drm_gem_object *nv_drm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct drm_gem_object *gem_dst; + struct nv_drm_gem_object *nv_gem_src; + + if (dma_buf->owner == dev->driver->fops->owner) { + nv_gem_src = to_nv_gem_object(dma_buf->priv); + + if (nv_gem_src->base.dev != dev && + nv_gem_src->ops->prime_dup != NULL) { + /* + * If we're importing from another NV device, try to handle the + * import internally rather than attaching through the dma-buf + * mechanisms. Importing from the same device is even easier, + * and drm_gem_prime_import() handles that just fine. + */ + gem_dst = nv_gem_src->ops->prime_dup(dev, nv_gem_src); + + if (gem_dst == NULL) { + return ERR_PTR(-ENOTSUPP); + } + + return gem_dst; + } + } + + return drm_gem_prime_import(dev, dma_buf); +} + +struct sg_table *nv_drm_gem_prime_get_sg_table(struct drm_gem_object *gem) +{ + struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem); + + if (nv_gem->ops->prime_get_sg_table != NULL) { + return nv_gem->ops->prime_get_sg_table(nv_gem); + } + + return ERR_PTR(-ENOTSUPP); +} + +void *nv_drm_gem_prime_vmap(struct drm_gem_object *gem) +{ + struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem); + + if (nv_gem->ops->prime_vmap != NULL) { + return nv_gem->ops->prime_vmap(nv_gem); + } + + return ERR_PTR(-ENOTSUPP); +} + +void nv_drm_gem_prime_vunmap(struct drm_gem_object *gem, void *address) +{ + struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem); + + if (nv_gem->ops->prime_vunmap != NULL) { + nv_gem->ops->prime_vunmap(nv_gem, address); + } +} + +#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) +nv_dma_resv_t* nv_drm_gem_prime_res_obj(struct drm_gem_object *obj) +{ + struct nv_drm_gem_object *nv_gem = to_nv_gem_object(obj); + return nv_drm_gem_res_obj(nv_gem); +} +#endif + +int nv_drm_gem_map_offset_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_gem_map_offset_params *params = data; + struct nv_drm_gem_object *nv_gem; + int ret; + + if ((nv_gem = nv_drm_gem_object_lookup(filep, + params->handle)) == NULL) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lookup gem object for map: 0x%08x", + params->handle); + return -EINVAL; + } + + /* mmap offset creation is idempotent, fetch it by creating it again. */ + if (nv_gem->ops->create_mmap_offset) { + ret = nv_gem->ops->create_mmap_offset(nv_dev, nv_gem, ¶ms->offset); + } else { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Gem object type does not support mapping: 0x%08x", + params->handle); + ret = -EINVAL; + } + + nv_drm_gem_object_unreference_unlocked(nv_gem); + + return ret; +} + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) +int nv_drm_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct drm_file *priv = file->private_data; + struct drm_device *dev = priv->minor->dev; + struct drm_gem_object *obj = NULL; + struct drm_vma_offset_node *node; + int ret = 0; + struct nv_drm_gem_object *nv_gem; + + drm_vma_offset_lock_lookup(dev->vma_offset_manager); + node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, + vma->vm_pgoff, vma_pages(vma)); + if (likely(node)) { + obj = container_of(node, struct drm_gem_object, vma_node); + /* + * When the object is being freed, after it hits 0-refcnt it proceeds + * to tear down the object. In the process it will attempt to remove + * the VMA offset and so acquire this mgr->vm_lock. Therefore if we + * find an object with a 0-refcnt that matches our range, we know it is + * in the process of being destroyed and will be freed as soon as we + * release the lock - so we have to check for the 0-refcnted object and + * treat it as invalid. + */ + if (!kref_get_unless_zero(&obj->refcount)) + obj = NULL; + } + drm_vma_offset_unlock_lookup(dev->vma_offset_manager); + + if (!obj) + return -EINVAL; + + nv_gem = to_nv_gem_object(obj); + if (nv_gem->ops->mmap == NULL) { + ret = -EINVAL; + goto done; + } + + if (!drm_vma_node_is_allowed(node, file->private_data)) { + ret = -EACCES; + goto done; + } + +#if defined(NV_DRM_VMA_OFFSET_NODE_HAS_READONLY) + if (node->readonly) { + if (vma->vm_flags & VM_WRITE) { + ret = -EINVAL; + goto done; + } + nv_vm_flags_clear(vma, VM_MAYWRITE); + } +#endif + + ret = nv_gem->ops->mmap(nv_gem, vma); + +done: + nv_drm_gem_object_unreference_unlocked(nv_gem); + + return ret; +} +#endif + +int nv_drm_gem_identify_object_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct drm_nvidia_gem_identify_object_params *p = data; + struct nv_drm_gem_dma_buf *nv_dma_buf; + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory; + struct nv_drm_gem_user_memory *nv_user_memory; + struct nv_drm_gem_object *nv_gem = NULL; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + return -EOPNOTSUPP; + } + + nv_dma_buf = nv_drm_gem_object_dma_buf_lookup(filep, p->handle); + if (nv_dma_buf) { + p->object_type = NV_GEM_OBJECT_DMABUF; + nv_gem = &nv_dma_buf->base; + goto done; + } + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup(filep, p->handle); + if (nv_nvkms_memory) { + p->object_type = NV_GEM_OBJECT_NVKMS; + nv_gem = &nv_nvkms_memory->base; + goto done; + } +#endif + + nv_user_memory = nv_drm_gem_object_user_memory_lookup(filep, p->handle); + if (nv_user_memory) { + p->object_type = NV_GEM_OBJECT_USERMEMORY; + nv_gem = &nv_user_memory->base; + goto done; + } + + p->object_type = NV_GEM_OBJECT_UNKNOWN; + +done: + if (nv_gem) { + nv_drm_gem_object_unreference_unlocked(nv_gem); + } + return 0; +} + +/* XXX Move these vma operations to os layer */ + +static vm_fault_t __nv_drm_vma_fault(struct vm_area_struct *vma, + struct vm_fault *vmf) +{ + struct drm_gem_object *gem = vma->vm_private_data; + struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem); + + if (!nv_gem) { + return VM_FAULT_SIGBUS; + } + + return nv_gem->ops->handle_vma_fault(nv_gem, vma, vmf); +} + +/* + * Note that nv_drm_vma_fault() can be called for different or same + * ranges of the same drm_gem_object simultaneously. + */ + +#if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG) +static vm_fault_t nv_drm_vma_fault(struct vm_fault *vmf) +{ + return __nv_drm_vma_fault(vmf->vma, vmf); +} +#else +static vm_fault_t nv_drm_vma_fault(struct vm_area_struct *vma, + struct vm_fault *vmf) +{ + return __nv_drm_vma_fault(vma, vmf); +} +#endif + +const struct vm_operations_struct nv_drm_gem_vma_ops = { + .open = drm_gem_vm_open, + .fault = nv_drm_vma_fault, + .close = drm_gem_vm_close, +}; + +#endif /* NV_DRM_AVAILABLE */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-gem.h b/kernel-open/nvidia-drm/nvidia-drm-gem.h new file mode 100644 index 0000000..efb590e --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-gem.h @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_GEM_H__ +#define __NVIDIA_DRM_GEM_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#include "nvidia-drm-priv.h" + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#include + +#include "nvkms-kapi.h" +#include "nv-mm.h" + +#include "nvidia-dma-resv-helper.h" + +#include "linux/dma-buf.h" + +struct nv_drm_gem_object; + +struct nv_drm_gem_object_funcs { + void (*free)(struct nv_drm_gem_object *nv_gem); + struct sg_table *(*prime_get_sg_table)(struct nv_drm_gem_object *nv_gem); + void *(*prime_vmap)(struct nv_drm_gem_object *nv_gem); + void (*prime_vunmap)(struct nv_drm_gem_object *nv_gem, void *address); + struct drm_gem_object *(*prime_dup)(struct drm_device *dev, + const struct nv_drm_gem_object *nv_gem_src); + int (*mmap)(struct nv_drm_gem_object *nv_gem, struct vm_area_struct *vma); + vm_fault_t (*handle_vma_fault)(struct nv_drm_gem_object *nv_gem, + struct vm_area_struct *vma, + struct vm_fault *vmf); + int (*create_mmap_offset)(struct nv_drm_device *nv_dev, + struct nv_drm_gem_object *nv_gem, + uint64_t *offset); +}; + +struct nv_drm_gem_object { + struct drm_gem_object base; + + struct nv_drm_device *nv_dev; + const struct nv_drm_gem_object_funcs *ops; + + struct NvKmsKapiMemory *pMemory; + +#if !defined(NV_DRM_GEM_OBJECT_HAS_RESV) + nv_dma_resv_t resv; +#endif +}; + +static inline struct nv_drm_gem_object *to_nv_gem_object( + struct drm_gem_object *gem) +{ + if (gem != NULL) { + return container_of(gem, struct nv_drm_gem_object, base); + } + + return NULL; +} + +static inline void +nv_drm_gem_object_unreference_unlocked(struct nv_drm_gem_object *nv_gem) +{ +#if defined(NV_DRM_GEM_OBJECT_PUT_UNLOCK_PRESENT) + drm_gem_object_put_unlocked(&nv_gem->base); +#else + drm_gem_object_put(&nv_gem->base); +#endif +} + +static inline int nv_drm_gem_handle_create_drop_reference( + struct drm_file *file_priv, + struct nv_drm_gem_object *nv_gem, + uint32_t *handle) +{ + int ret = drm_gem_handle_create(file_priv, &nv_gem->base, handle); + + /* drop reference from allocate - handle holds it now */ + + nv_drm_gem_object_unreference_unlocked(nv_gem); + + return ret; +} + +static inline int nv_drm_gem_create_mmap_offset( + struct nv_drm_gem_object *nv_gem, + uint64_t *offset) +{ + int ret; + + if ((ret = drm_gem_create_mmap_offset(&nv_gem->base)) < 0) { + NV_DRM_DEV_LOG_ERR( + nv_gem->nv_dev, + "drm_gem_create_mmap_offset failed with error code %d", + ret); + goto done; + } + + *offset = drm_vma_node_offset_addr(&nv_gem->base.vma_node); + +done: + + return ret; +} + +void nv_drm_gem_free(struct drm_gem_object *gem); + +static inline struct nv_drm_gem_object *nv_drm_gem_object_lookup( + struct drm_file *filp, + u32 handle) +{ + return to_nv_gem_object(drm_gem_object_lookup(filp, handle)); +} + +static inline int nv_drm_gem_handle_create(struct drm_file *filp, + struct nv_drm_gem_object *nv_gem, + uint32_t *handle) +{ + return drm_gem_handle_create(filp, &nv_gem->base, handle); +} + +static inline nv_dma_resv_t *nv_drm_gem_res_obj(struct nv_drm_gem_object *nv_gem) +{ +#if defined(NV_DRM_GEM_OBJECT_HAS_RESV) + return nv_gem->base.resv; +#else + return nv_gem->base.dma_buf ? nv_gem->base.dma_buf->resv : &nv_gem->resv; +#endif +} + +void nv_drm_gem_object_init(struct nv_drm_device *nv_dev, + struct nv_drm_gem_object *nv_gem, + const struct nv_drm_gem_object_funcs * const ops, + size_t size, + struct NvKmsKapiMemory *pMemory); + +struct drm_gem_object *nv_drm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf); + +struct sg_table *nv_drm_gem_prime_get_sg_table(struct drm_gem_object *gem); + +void *nv_drm_gem_prime_vmap(struct drm_gem_object *gem); + +void nv_drm_gem_prime_vunmap(struct drm_gem_object *gem, void *address); + +#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) +nv_dma_resv_t* nv_drm_gem_prime_res_obj(struct drm_gem_object *obj); +#endif + +extern const struct vm_operations_struct nv_drm_gem_vma_ops; + +int nv_drm_gem_map_offset_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +int nv_drm_mmap(struct file *file, struct vm_area_struct *vma); + +int nv_drm_gem_identify_object_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +#endif /* NV_DRM_AVAILABLE */ + +#endif /* __NVIDIA_DRM_GEM_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-helper.c b/kernel-open/nvidia-drm/nvidia-drm-helper.c new file mode 100644 index 0000000..9562e60 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-helper.c @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file contains snapshots of DRM helper functions from the + * Linux kernel which are used by nvidia-drm.ko if the target kernel + * predates the helper function. Having these functions consistently + * present simplifies nvidia-drm.ko source. + */ + +#include "nvidia-drm-helper.h" +#include "nvidia-drm-priv.h" +#include "nvidia-drm-crtc.h" + +#include "nvmisc.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_ATOMIC_UAPI_H_PRESENT) +#include +#endif + +#include + +/* + * drm_atomic_helper_disable_all() has been added by commit + * 1494276000db789c6d2acd85747be4707051c801, which is Signed-off-by: + * Thierry Reding + * Daniel Vetter + * + * drm_atomic_helper_disable_all() is copied from + * linux/drivers/gpu/drm/drm_atomic_helper.c and modified to use + * nv_drm_for_each_crtc instead of drm_for_each_crtc to loop over all crtcs, + * use nv_drm_for_each_*_in_state instead of for_each_connector_in_state to loop + * over all modeset object states, and use drm_atomic_state_free() if + * drm_atomic_state_put() is not available. + * + * drm_atomic_helper_disable_all() is copied from + * linux/drivers/gpu/drm/drm_atomic_helper.c @ + * 49d70aeaeca8f62b72b7712ecd1e29619a445866, which has the following + * copyright and license information: + * + * Copyright (C) 2014 Red Hat + * Copyright (C) 2014 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark + * Daniel Vetter + */ +int nv_drm_atomic_helper_disable_all(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_atomic_state *state; + struct drm_connector_state *conn_state; + struct drm_connector *conn; + struct drm_plane_state *plane_state; + struct drm_plane *plane; + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + unsigned plane_mask = 0; + int ret, i; + + state = drm_atomic_state_alloc(dev); + if (!state) + return -ENOMEM; + + state->acquire_ctx = ctx; + + nv_drm_for_each_crtc(crtc, dev) { + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); + goto free; + } + + crtc_state->active = false; + + ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL); + if (ret < 0) + goto free; + + ret = drm_atomic_add_affected_planes(state, crtc); + if (ret < 0) + goto free; + + ret = drm_atomic_add_affected_connectors(state, crtc); + if (ret < 0) + goto free; + } + + nv_drm_for_each_plane(plane, dev) { + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) { + ret = PTR_ERR(plane_state); + goto free; + } + + plane_state->rotation = DRM_MODE_ROTATE_0; + } + + nv_drm_for_each_connector_in_state(state, conn, conn_state, i) { + ret = drm_atomic_set_crtc_for_connector(conn_state, NULL); + if (ret < 0) + goto free; + } + + nv_drm_for_each_plane_in_state(state, plane, plane_state, i) { + ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); + if (ret < 0) + goto free; + + drm_atomic_set_fb_for_plane(plane_state, NULL); + plane_mask |= NVBIT(drm_plane_index(plane)); + plane->old_fb = plane->fb; + } + + ret = drm_atomic_commit(state); +free: + if (plane_mask) { + drm_for_each_plane_mask(plane, dev, plane_mask) { + if (ret == 0) { + plane->fb = NULL; + plane->crtc = NULL; + + WARN_ON(plane->state->fb); + WARN_ON(plane->state->crtc); + + if (plane->old_fb) + drm_framebuffer_put(plane->old_fb); + } + plane->old_fb = NULL; + } + } + + drm_atomic_state_put(state); + + return ret; +} + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-helper.h b/kernel-open/nvidia-drm/nvidia-drm-helper.h new file mode 100644 index 0000000..c993188 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-helper.h @@ -0,0 +1,476 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_HELPER_H__ +#define __NVIDIA_DRM_HELPER_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#include + +#if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE) +#include +#endif + +/* + * For DRM_MODE_ROTATE_*, DRM_MODE_REFLECT_*, struct drm_color_ctm_3x4, and + * struct drm_color_lut. + */ +#include + +/* + * Commit 1e13c5644c44 ("drm/drm_mode_object: increase max objects to + * accommodate new color props") in Linux v6.8 increased the pre-object + * property limit to from 24 to 64. + */ +#define NV_DRM_USE_EXTENDED_PROPERTIES (DRM_OBJECT_MAX_PROPERTY >= 64) + +#include + +static inline struct sg_table* +nv_drm_prime_pages_to_sg(struct drm_device *dev, + struct page **pages, unsigned int nr_pages) +{ +#if defined(NV_DRM_PRIME_PAGES_TO_SG_HAS_DRM_DEVICE_ARG) + return drm_prime_pages_to_sg(dev, pages, nr_pages); +#else + return drm_prime_pages_to_sg(pages, nr_pages); +#endif +} + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +/* + * drm_for_each_connector(), drm_for_each_crtc(), drm_for_each_fb(), + * drm_for_each_encoder and drm_for_each_plane() were added by kernel + * commit 6295d607ad34ee4e43aab3f20714c2ef7a6adea1 which was + * Signed-off-by: + * Daniel Vetter + * drm_for_each_connector(), drm_for_each_crtc(), drm_for_each_fb(), + * drm_for_each_encoder and drm_for_each_plane() are copied from + * include/drm/drm_crtc @ + * 6295d607ad34ee4e43aab3f20714c2ef7a6adea1 + * which has the following copyright and license information: + * + * Copyright © 2006 Keith Packard + * Copyright © 2007-2008 Dave Airlie + * Copyright © 2007-2008 Intel Corporation + * Jesse Barnes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include + +#if defined(drm_for_each_plane) +#define nv_drm_for_each_plane(plane, dev) \ + drm_for_each_plane(plane, dev) +#else +#define nv_drm_for_each_plane(plane, dev) \ + list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) +#endif + +#if defined(drm_for_each_crtc) +#define nv_drm_for_each_crtc(crtc, dev) \ + drm_for_each_crtc(crtc, dev) +#else +#define nv_drm_for_each_crtc(crtc, dev) \ + list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head) +#endif + +#if defined(drm_for_each_encoder) +#define nv_drm_for_each_encoder(encoder, dev) \ + drm_for_each_encoder(encoder, dev) +#else +#define nv_drm_for_each_encoder(encoder, dev) \ + list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head) +#endif + +#if defined(drm_for_each_fb) +#define nv_drm_for_each_fb(fb, dev) \ + drm_for_each_fb(fb, dev) +#else +#define nv_drm_for_each_fb(fb, dev) \ + list_for_each_entry(fb, &(dev)->mode_config.fb_list, head) +#endif + +#include +#include + +int nv_drm_atomic_helper_disable_all(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx); + +/* + * for_each_connector_in_state(), for_each_crtc_in_state() and + * for_each_plane_in_state() were added by kernel commit + * df63b9994eaf942afcdb946d27a28661d7dfbf2a which was Signed-off-by: + * Ander Conselvan de Oliveira + * Daniel Vetter + * + * for_each_connector_in_state(), for_each_crtc_in_state() and + * for_each_plane_in_state() were copied from + * include/drm/drm_atomic.h @ + * 21a01abbe32a3cbeb903378a24e504bfd9fe0648 + * which has the following copyright and license information: + * + * Copyright (C) 2014 Red Hat + * Copyright (C) 2014 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark + * Daniel Vetter + */ + +/** + * nv_drm_for_each_connector_in_state - iterate over all connectors in an + * atomic update + * @__state: &struct drm_atomic_state pointer + * @connector: &struct drm_connector iteration cursor + * @connector_state: &struct drm_connector_state iteration cursor + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all connectors in an atomic update. Note that before the + * software state is committed (by calling drm_atomic_helper_swap_state(), this + * points to the new state, while afterwards it points to the old state. Due to + * this tricky confusion this macro is deprecated. + */ +#if !defined(for_each_connector_in_state) +#define nv_drm_for_each_connector_in_state(__state, \ + connector, connector_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_connector && \ + ((connector) = (__state)->connectors[__i].ptr, \ + (connector_state) = (__state)->connectors[__i].state, 1); \ + (__i)++) \ + for_each_if (connector) +#else +#define nv_drm_for_each_connector_in_state(__state, \ + connector, connector_state, __i) \ + for_each_connector_in_state(__state, connector, connector_state, __i) +#endif + + +/** + * nv_drm_for_each_crtc_in_state - iterate over all CRTCs in an atomic update + * @__state: &struct drm_atomic_state pointer + * @crtc: &struct drm_crtc iteration cursor + * @crtc_state: &struct drm_crtc_state iteration cursor + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all CRTCs in an atomic update. Note that before the + * software state is committed (by calling drm_atomic_helper_swap_state(), this + * points to the new state, while afterwards it points to the old state. Due to + * this tricky confusion this macro is deprecated. + */ +#if !defined(for_each_crtc_in_state) +#define nv_drm_for_each_crtc_in_state(__state, crtc, crtc_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_crtc && \ + ((crtc) = (__state)->crtcs[__i].ptr, \ + (crtc_state) = (__state)->crtcs[__i].state, 1); \ + (__i)++) \ + for_each_if (crtc_state) +#else +#define nv_drm_for_each_crtc_in_state(__state, crtc, crtc_state, __i) \ + for_each_crtc_in_state(__state, crtc, crtc_state, __i) +#endif + +/** + * nv_drm_for_each_plane_in_state - iterate over all planes in an atomic update + * @__state: &struct drm_atomic_state pointer + * @plane: &struct drm_plane iteration cursor + * @plane_state: &struct drm_plane_state iteration cursor + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all planes in an atomic update. Note that before the + * software state is committed (by calling drm_atomic_helper_swap_state(), this + * points to the new state, while afterwards it points to the old state. Due to + * this tricky confusion this macro is deprecated. + */ +#if !defined(for_each_plane_in_state) +#define nv_drm_for_each_plane_in_state(__state, plane, plane_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_total_plane && \ + ((plane) = (__state)->planes[__i].ptr, \ + (plane_state) = (__state)->planes[__i].state, 1); \ + (__i)++) \ + for_each_if (plane_state) +#else +#define nv_drm_for_each_plane_in_state(__state, plane, plane_state, __i) \ + for_each_plane_in_state(__state, plane, plane_state, __i) +#endif + +/* + * for_each_new_plane_in_state() was added by kernel commit + * 581e49fe6b411f407102a7f2377648849e0fa37f which was Signed-off-by: + * Maarten Lankhorst + * Daniel Vetter + * + * This commit also added the old_state and new_state pointers to + * __drm_planes_state. Because of this, the best that can be done on kernel + * versions without this macro is for_each_plane_in_state. + */ + +/** + * nv_drm_for_each_new_plane_in_state - iterate over all planes in an atomic update + * @__state: &struct drm_atomic_state pointer + * @plane: &struct drm_plane iteration cursor + * @new_plane_state: &struct drm_plane_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all planes in an atomic update, tracking only the new + * state. This is useful in enable functions, where we need the new state the + * hardware should be in when the atomic commit operation has completed. + */ +#if !defined(for_each_new_plane_in_state) +#define nv_drm_for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \ + nv_drm_for_each_plane_in_state(__state, plane, new_plane_state, __i) +#else +#define nv_drm_for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \ + for_each_new_plane_in_state(__state, plane, new_plane_state, __i) +#endif + +#include +#include + +/* + * drm_file_get_master() added by commit 56f0729a510f ("drm: protect drm_master + * pointers in drm_lease.c") in v5.15 (2021-07-20) + */ +static inline struct drm_master *nv_drm_file_get_master(struct drm_file *filep) +{ +#if defined(NV_DRM_FILE_GET_MASTER_PRESENT) + return drm_file_get_master(filep); +#else + if (filep->master) { + return drm_master_get(filep->master); + } else { + return NULL; + } +#endif +} + +/* + * drm_connector_for_each_possible_encoder() is added by commit + * 83aefbb887b59df0b3520965c3701e01deacfc52 which was Signed-off-by: + * Ville Syrjälä + * + * drm_connector_for_each_possible_encoder() is copied from + * include/drm/drm_connector.h @ + * 83aefbb887b59df0b3520965c3701e01deacfc52 + * which has the following copyright and license information: + * + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include + +/** + * nv_drm_connector_for_each_possible_encoder - iterate connector's possible + * encoders + * @connector: &struct drm_connector pointer + * @encoder: &struct drm_encoder pointer used as cursor + * @__i: int iteration cursor, for macro-internal use + */ +#if !defined(drm_connector_for_each_possible_encoder) + +#if !defined(for_each_if) +#define for_each_if(condition) if (!(condition)) {} else +#endif + +#define __nv_drm_connector_for_each_possible_encoder(connector, encoder, __i) \ + for ((__i) = 0; (__i) < ARRAY_SIZE((connector)->encoder_ids) && \ + (connector)->encoder_ids[(__i)] != 0; (__i)++) \ + for_each_if((encoder) = \ + drm_encoder_find((connector)->dev, NULL, \ + (connector)->encoder_ids[(__i)])) + + +#define nv_drm_connector_for_each_possible_encoder(connector, encoder) \ + { \ + unsigned int __i; \ + __nv_drm_connector_for_each_possible_encoder(connector, encoder, __i) + +#define nv_drm_connector_for_each_possible_encoder_end \ + } + +#else + +#if NV_DRM_CONNECTOR_FOR_EACH_POSSIBLE_ENCODER_ARGUMENT_COUNT == 3 + +#define nv_drm_connector_for_each_possible_encoder(connector, encoder) \ + { \ + unsigned int __i; \ + drm_connector_for_each_possible_encoder(connector, encoder, __i) + +#define nv_drm_connector_for_each_possible_encoder_end \ + } + +#else + +#define nv_drm_connector_for_each_possible_encoder(connector, encoder) \ + drm_connector_for_each_possible_encoder(connector, encoder) + +#define nv_drm_connector_for_each_possible_encoder_end + +#endif + +#endif + +static inline int +nv_drm_connector_attach_encoder(struct drm_connector *connector, + struct drm_encoder *encoder) +{ +#if defined(NV_DRM_CONNECTOR_FUNCS_HAVE_MODE_IN_NAME) + return drm_mode_connector_attach_encoder(connector, encoder); +#else + return drm_connector_attach_encoder(connector, encoder); +#endif +} + +static inline int +nv_drm_connector_update_edid_property(struct drm_connector *connector, + const struct edid *edid) +{ +#if defined(NV_DRM_CONNECTOR_FUNCS_HAVE_MODE_IN_NAME) + return drm_mode_connector_update_edid_property(connector, edid); +#else + return drm_connector_update_edid_property(connector, edid); +#endif +} + +#include + +static inline int nv_drm_format_num_planes(uint32_t format) +{ + const struct drm_format_info *info = drm_format_info(format); + return info != NULL ? info->num_planes : 1; +} + +/* + * DRM_FORMAT_MOD_VENDOR_NVIDIA was previously called + * DRM_FORMAT_MOD_VNEDOR_NV. + */ +#if !defined(DRM_FORMAT_MOD_VENDOR_NVIDIA) +#define DRM_FORMAT_MOD_VENDOR_NVIDIA DRM_FORMAT_MOD_VENDOR_NV +#endif + +/* + * DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D is a relatively new addition to the + * upstream kernel headers compared to the other format modifiers. + */ +#if !defined(DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D) +#define DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(c, s, g, k, h) \ + fourcc_mod_code(NVIDIA, (0x10 | \ + ((h) & 0xf) | \ + (((k) & 0xff) << 12) | \ + (((g) & 0x3) << 20) | \ + (((s) & 0x1) << 22) | \ + (((c) & 0x7) << 23))) +#endif + +/* + * DRM_UNLOCKED was removed with commit 2798ffcc1d6a ("drm: Remove locking for + * legacy ioctls and DRM_UNLOCKED") in v6.8, but it was previously made + * implicit for all non-legacy DRM driver IOCTLs since Linux v4.10 commit + * fa5386459f06 "drm: Used DRM_LEGACY for all legacy functions" (Linux v4.4 + * commit ea487835e887 "drm: Enforce unlocked ioctl operation for kms driver + * ioctls" previously did it only for drivers that set the DRM_MODESET flag), so + * it was effectively a no-op anyway. + */ +#if !defined(NV_DRM_UNLOCKED_IOCTL_FLAG_PRESENT) +#define DRM_UNLOCKED 0 +#endif + +/* + * struct drm_color_ctm_3x4 was added by commit 6872a189be50 ("drm/amd/display: + * Add 3x4 CTM support for plane CTM") in v6.8. For backwards compatibility, + * define it when not present. + */ +#if !defined(NV_DRM_COLOR_CTM_3X4_PRESENT) +struct drm_color_ctm_3x4 { + __u64 matrix[12]; +}; +#endif + +#endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */ + +#endif /* defined(NV_DRM_AVAILABLE) */ + +#endif /* __NVIDIA_DRM_HELPER_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-ioctl.h b/kernel-open/nvidia-drm/nvidia-drm-ioctl.h new file mode 100644 index 0000000..3daab9e --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-ioctl.h @@ -0,0 +1,399 @@ +/* + * Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _UAPI_NVIDIA_DRM_IOCTL_H_ +#define _UAPI_NVIDIA_DRM_IOCTL_H_ + +#include + +/* + * We should do our best to keep these values constant. Any change to these will + * be backwards incompatible with client applications that might be using them + */ +#define DRM_NVIDIA_GET_CRTC_CRC32 0x00 +#define DRM_NVIDIA_GEM_IMPORT_NVKMS_MEMORY 0x01 +#define DRM_NVIDIA_GEM_IMPORT_USERSPACE_MEMORY 0x02 +#define DRM_NVIDIA_GET_DEV_INFO 0x03 +#define DRM_NVIDIA_FENCE_SUPPORTED 0x04 +#define DRM_NVIDIA_PRIME_FENCE_CONTEXT_CREATE 0x05 +#define DRM_NVIDIA_GEM_PRIME_FENCE_ATTACH 0x06 +#define DRM_NVIDIA_GET_CLIENT_CAPABILITY 0x08 +#define DRM_NVIDIA_GEM_EXPORT_NVKMS_MEMORY 0x09 +#define DRM_NVIDIA_GEM_MAP_OFFSET 0x0a +#define DRM_NVIDIA_GEM_ALLOC_NVKMS_MEMORY 0x0b +#define DRM_NVIDIA_GET_CRTC_CRC32_V2 0x0c +#define DRM_NVIDIA_GEM_EXPORT_DMABUF_MEMORY 0x0d +#define DRM_NVIDIA_GEM_IDENTIFY_OBJECT 0x0e +#define DRM_NVIDIA_DMABUF_SUPPORTED 0x0f +#define DRM_NVIDIA_GET_DPY_ID_FOR_CONNECTOR_ID 0x10 +#define DRM_NVIDIA_GET_CONNECTOR_ID_FOR_DPY_ID 0x11 +#define DRM_NVIDIA_GRANT_PERMISSIONS 0x12 +#define DRM_NVIDIA_REVOKE_PERMISSIONS 0x13 +#define DRM_NVIDIA_SEMSURF_FENCE_CTX_CREATE 0x14 +#define DRM_NVIDIA_SEMSURF_FENCE_CREATE 0x15 +#define DRM_NVIDIA_SEMSURF_FENCE_WAIT 0x16 +#define DRM_NVIDIA_SEMSURF_FENCE_ATTACH 0x17 +#define DRM_NVIDIA_GET_DRM_FILE_UNIQUE_ID 0x18 + +#define DRM_IOCTL_NVIDIA_GEM_IMPORT_NVKMS_MEMORY \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IMPORT_NVKMS_MEMORY), \ + struct drm_nvidia_gem_import_nvkms_memory_params) + +#define DRM_IOCTL_NVIDIA_GEM_IMPORT_USERSPACE_MEMORY \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IMPORT_USERSPACE_MEMORY), \ + struct drm_nvidia_gem_import_userspace_memory_params) + +#define DRM_IOCTL_NVIDIA_GET_DEV_INFO \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_DEV_INFO), \ + struct drm_nvidia_get_dev_info_params) + +/* + * XXX Solaris compiler has issues with DRM_IO. None of this is supported on + * Solaris anyway, so just skip it. + * + * 'warning: suggest parentheses around arithmetic in operand of |' + */ +#if defined(NV_LINUX) || defined(NV_BSD) +#define DRM_IOCTL_NVIDIA_FENCE_SUPPORTED \ + DRM_IO(DRM_COMMAND_BASE + DRM_NVIDIA_FENCE_SUPPORTED) +#define DRM_IOCTL_NVIDIA_DMABUF_SUPPORTED \ + DRM_IO(DRM_COMMAND_BASE + DRM_NVIDIA_DMABUF_SUPPORTED) +#else +#define DRM_IOCTL_NVIDIA_FENCE_SUPPORTED 0 +#define DRM_IOCTL_NVIDIA_DMABUF_SUPPORTED 0 +#endif + +#define DRM_IOCTL_NVIDIA_PRIME_FENCE_CONTEXT_CREATE \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_PRIME_FENCE_CONTEXT_CREATE),\ + struct drm_nvidia_prime_fence_context_create_params) + +#define DRM_IOCTL_NVIDIA_GEM_PRIME_FENCE_ATTACH \ + DRM_IOW((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_PRIME_FENCE_ATTACH), \ + struct drm_nvidia_gem_prime_fence_attach_params) + +#define DRM_IOCTL_NVIDIA_GET_CLIENT_CAPABILITY \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CLIENT_CAPABILITY), \ + struct drm_nvidia_get_client_capability_params) + +#define DRM_IOCTL_NVIDIA_GET_CRTC_CRC32 \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CRTC_CRC32), \ + struct drm_nvidia_get_crtc_crc32_params) + +#define DRM_IOCTL_NVIDIA_GET_CRTC_CRC32_V2 \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CRTC_CRC32_V2), \ + struct drm_nvidia_get_crtc_crc32_v2_params) + +#define DRM_IOCTL_NVIDIA_GEM_EXPORT_NVKMS_MEMORY \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_EXPORT_NVKMS_MEMORY), \ + struct drm_nvidia_gem_export_nvkms_memory_params) + +#define DRM_IOCTL_NVIDIA_GEM_MAP_OFFSET \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_MAP_OFFSET), \ + struct drm_nvidia_gem_map_offset_params) + +#define DRM_IOCTL_NVIDIA_GEM_ALLOC_NVKMS_MEMORY \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_ALLOC_NVKMS_MEMORY), \ + struct drm_nvidia_gem_alloc_nvkms_memory_params) + +#define DRM_IOCTL_NVIDIA_GEM_EXPORT_DMABUF_MEMORY \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_EXPORT_DMABUF_MEMORY), \ + struct drm_nvidia_gem_export_dmabuf_memory_params) + +#define DRM_IOCTL_NVIDIA_GEM_IDENTIFY_OBJECT \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IDENTIFY_OBJECT), \ + struct drm_nvidia_gem_identify_object_params) + +#define DRM_IOCTL_NVIDIA_GET_DPY_ID_FOR_CONNECTOR_ID \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_DPY_ID_FOR_CONNECTOR_ID),\ + struct drm_nvidia_get_dpy_id_for_connector_id_params) + +#define DRM_IOCTL_NVIDIA_GET_CONNECTOR_ID_FOR_DPY_ID \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CONNECTOR_ID_FOR_DPY_ID),\ + struct drm_nvidia_get_connector_id_for_dpy_id_params) + +#define DRM_IOCTL_NVIDIA_GRANT_PERMISSIONS \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GRANT_PERMISSIONS), \ + struct drm_nvidia_grant_permissions_params) + +#define DRM_IOCTL_NVIDIA_REVOKE_PERMISSIONS \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_REVOKE_PERMISSIONS), \ + struct drm_nvidia_revoke_permissions_params) + +#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_CTX_CREATE \ + DRM_IOWR((DRM_COMMAND_BASE + \ + DRM_NVIDIA_SEMSURF_FENCE_CTX_CREATE), \ + struct drm_nvidia_semsurf_fence_ctx_create_params) + +#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_CREATE \ + DRM_IOWR((DRM_COMMAND_BASE + \ + DRM_NVIDIA_SEMSURF_FENCE_CREATE), \ + struct drm_nvidia_semsurf_fence_create_params) + +#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_WAIT \ + DRM_IOW((DRM_COMMAND_BASE + \ + DRM_NVIDIA_SEMSURF_FENCE_WAIT), \ + struct drm_nvidia_semsurf_fence_wait_params) + +#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_ATTACH \ + DRM_IOW((DRM_COMMAND_BASE + \ + DRM_NVIDIA_SEMSURF_FENCE_ATTACH), \ + struct drm_nvidia_semsurf_fence_attach_params) + +#define DRM_IOCTL_NVIDIA_GET_DRM_FILE_UNIQUE_ID \ + DRM_IOWR((DRM_COMMAND_BASE + \ + DRM_NVIDIA_GET_DRM_FILE_UNIQUE_ID), \ + struct drm_nvidia_get_drm_file_unique_id_params) + +struct drm_nvidia_gem_import_nvkms_memory_params { + uint64_t mem_size; /* IN */ + + uint64_t nvkms_params_ptr; /* IN */ + uint64_t nvkms_params_size; /* IN */ + + uint32_t handle; /* OUT */ + + uint32_t __pad; +}; + +struct drm_nvidia_gem_import_userspace_memory_params { + uint64_t size; /* IN Size of memory in bytes */ + uint64_t address; /* IN Virtual address of userspace memory */ + uint32_t handle; /* OUT Handle to gem object */ +}; + +struct drm_nvidia_get_dev_info_params { + uint32_t gpu_id; /* OUT */ + uint32_t mig_device; /* OUT */ + uint32_t primary_index; /* OUT; the "card%d" value */ + + uint32_t supports_alloc; /* OUT */ + /* The generic_page_kind, page_kind_generation, and sector_layout + * fields are only valid if supports_alloc is true. + * See DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D definitions of these. */ + uint32_t generic_page_kind; /* OUT */ + uint32_t page_kind_generation; /* OUT */ + uint32_t sector_layout; /* OUT */ + uint32_t supports_sync_fd; /* OUT */ + uint32_t supports_semsurf; /* OUT */ +}; + +struct drm_nvidia_prime_fence_context_create_params { + uint32_t handle; /* OUT GEM handle to fence context */ + + uint32_t index; /* IN Index of semaphore to use for fencing */ + uint64_t size; /* IN Size of semaphore surface in bytes */ + + /* Params for importing userspace semaphore surface */ + uint64_t import_mem_nvkms_params_ptr; /* IN */ + uint64_t import_mem_nvkms_params_size; /* IN */ + + /* Params for creating software signaling event */ + uint64_t event_nvkms_params_ptr; /* IN */ + uint64_t event_nvkms_params_size; /* IN */ +}; + +struct drm_nvidia_gem_prime_fence_attach_params { + uint32_t handle; /* IN GEM handle to attach fence to */ + uint32_t fence_context_handle; /* IN GEM handle to fence context on which fence is run on */ + uint32_t sem_thresh; /* IN Semaphore value to reach before signal */ + uint32_t __pad; +}; + +struct drm_nvidia_get_client_capability_params { + uint64_t capability; /* IN Client capability enum */ + uint64_t value; /* OUT Client capability value */ +}; + +/* Struct that stores Crc value and if it is supported by hardware */ +struct drm_nvidia_crtc_crc32 { + uint32_t value; /* Read value, undefined if supported is false */ + uint8_t supported; /* Supported boolean, true if readable by hardware */ + uint8_t __pad0; + uint16_t __pad1; +}; + +struct drm_nvidia_crtc_crc32_v2_out { + struct drm_nvidia_crtc_crc32 compositorCrc32; /* OUT compositor hardware CRC32 value */ + struct drm_nvidia_crtc_crc32 rasterGeneratorCrc32; /* OUT raster generator CRC32 value */ + struct drm_nvidia_crtc_crc32 outputCrc32; /* OUT SF/SOR CRC32 value */ +}; + +struct drm_nvidia_get_crtc_crc32_v2_params { + uint32_t crtc_id; /* IN CRTC identifier */ + struct drm_nvidia_crtc_crc32_v2_out crc32; /* OUT Crc32 output structure */ +}; + +struct drm_nvidia_get_crtc_crc32_params { + uint32_t crtc_id; /* IN CRTC identifier */ + uint32_t crc32; /* OUT CRC32 value */ +}; + +struct drm_nvidia_gem_export_nvkms_memory_params { + uint32_t handle; /* IN */ + uint32_t __pad; + + uint64_t nvkms_params_ptr; /* IN */ + uint64_t nvkms_params_size; /* IN */ +}; + +struct drm_nvidia_gem_map_offset_params { + uint32_t handle; /* IN Handle to gem object */ + uint32_t __pad; + + uint64_t offset; /* OUT Fake offset */ +}; + +#define NV_GEM_ALLOC_NO_SCANOUT (1 << 0) + +struct drm_nvidia_gem_alloc_nvkms_memory_params { + uint32_t handle; /* OUT */ + uint8_t block_linear; /* IN */ + uint8_t compressible; /* IN/OUT */ + uint16_t __pad0; + + uint64_t memory_size; /* IN */ + uint32_t flags; /* IN */ + uint32_t __pad1; +}; + +struct drm_nvidia_gem_export_dmabuf_memory_params { + uint32_t handle; /* IN GEM Handle*/ + uint32_t __pad; + + uint64_t nvkms_params_ptr; /* IN */ + uint64_t nvkms_params_size; /* IN */ +}; + +typedef enum { + NV_GEM_OBJECT_NVKMS, + NV_GEM_OBJECT_DMABUF, + NV_GEM_OBJECT_USERMEMORY, + + NV_GEM_OBJECT_UNKNOWN = 0x7fffffff /* Force size of 32-bits. */ +} drm_nvidia_gem_object_type; + +struct drm_nvidia_gem_identify_object_params { + uint32_t handle; /* IN GEM handle*/ + drm_nvidia_gem_object_type object_type; /* OUT GEM object type */ +}; + +struct drm_nvidia_get_dpy_id_for_connector_id_params { + uint32_t connectorId; /* IN */ + uint32_t dpyId; /* OUT */ +}; + +struct drm_nvidia_get_connector_id_for_dpy_id_params { + uint32_t dpyId; /* IN */ + uint32_t connectorId; /* OUT */ +}; + +enum drm_nvidia_permissions_type { + NV_DRM_PERMISSIONS_TYPE_MODESET = 2, + NV_DRM_PERMISSIONS_TYPE_SUB_OWNER = 3 +}; + +struct drm_nvidia_grant_permissions_params { + int32_t fd; /* IN */ + uint32_t dpyId; /* IN */ + uint32_t type; /* IN */ +}; + +struct drm_nvidia_revoke_permissions_params { + uint32_t dpyId; /* IN */ + uint32_t type; /* IN */ +}; + +struct drm_nvidia_semsurf_fence_ctx_create_params { + uint64_t index; /* IN Index of the desired semaphore in the + * fence context's semaphore surface */ + + /* Params for importing userspace semaphore surface */ + uint64_t nvkms_params_ptr; /* IN */ + uint64_t nvkms_params_size; /* IN */ + + uint32_t handle; /* OUT GEM handle to fence context */ + uint32_t __pad; +}; + +struct drm_nvidia_semsurf_fence_create_params { + uint32_t fence_context_handle; /* IN GEM handle to fence context on which + * fence is run on */ + + uint32_t timeout_value_ms; /* IN Timeout value in ms for the fence + * after which the fence will be signaled + * with its error status set to -ETIMEDOUT. + * Default timeout value is 5000ms */ + + uint64_t wait_value; /* IN Semaphore value to reach before signal */ + + int32_t fd; /* OUT sync FD object representing the + * semaphore at the specified index reaching + * a value >= wait_value */ + uint32_t __pad; +}; + +/* + * Note there is no provision for timeouts in this ioctl. The kernel + * documentation asserts timeouts should be handled by fence producers, and + * that waiters should not second-guess their logic, as it is producers rather + * than consumers that have better information when it comes to determining a + * reasonable timeout for a given workload. + */ +struct drm_nvidia_semsurf_fence_wait_params { + uint32_t fence_context_handle; /* IN GEM handle to fence context which will + * be used to wait on the sync FD. Need not + * be the fence context used to create the + * sync FD. */ + + int32_t fd; /* IN sync FD object to wait on */ + + uint64_t pre_wait_value; /* IN Wait for the semaphore represented by + * fence_context to reach this value before + * waiting for the sync file. */ + + uint64_t post_wait_value; /* IN Signal the semaphore represented by + * fence_context to this value after waiting + * for the sync file */ +}; + +struct drm_nvidia_semsurf_fence_attach_params { + uint32_t handle; /* IN GEM handle of buffer */ + + uint32_t fence_context_handle; /* IN GEM handle of fence context */ + + uint32_t timeout_value_ms; /* IN Timeout value in ms for the fence + * after which the fence will be signaled + * with its error status set to -ETIMEDOUT. + * Default timeout value is 5000ms */ + + uint32_t shared; /* IN If true, fence will reserve shared + * access to the buffer, otherwise it will + * reserve exclusive access */ + + uint64_t wait_value; /* IN Semaphore value to reach before signal */ +}; + +struct drm_nvidia_get_drm_file_unique_id_params { + uint64_t id; /* OUT Unique ID of the DRM file */ +}; + +#endif /* _UAPI_NVIDIA_DRM_IOCTL_H_ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-linux.c b/kernel-open/nvidia-drm/nvidia-drm-linux.c new file mode 100644 index 0000000..3cb1815 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-linux.c @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2015-2023, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include "nvidia-drm-os-interface.h" +#include "nvidia-drm.h" + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +MODULE_PARM_DESC( + modeset, + "Enable atomic kernel modesetting (1 = enable, 0 = disable (default))"); +module_param_named(modeset, nv_drm_modeset_module_param, bool, 0400); + +#if defined(NV_DRM_FBDEV_AVAILABLE) +MODULE_PARM_DESC( + fbdev, + "Create a framebuffer device (1 = enable (default), 0 = disable)"); +module_param_named(fbdev, nv_drm_fbdev_module_param, bool, 0400); +#endif + +#endif /* NV_DRM_AVAILABLE */ + +/************************************************************************* + * Linux loading support code. + *************************************************************************/ + +static int __init nv_linux_drm_init(void) +{ + return nv_drm_init(); +} + +static void __exit nv_linux_drm_exit(void) +{ + nv_drm_exit(); +} + +module_init(nv_linux_drm_init); +module_exit(nv_linux_drm_exit); + + MODULE_LICENSE("Dual MIT/GPL"); + +MODULE_INFO(supported, "external"); +MODULE_VERSION(NV_VERSION_STRING); diff --git a/kernel-open/nvidia-drm/nvidia-drm-modeset.c b/kernel-open/nvidia-drm/nvidia-drm-modeset.c new file mode 100644 index 0000000..90fc89e --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-modeset.c @@ -0,0 +1,865 @@ +/* + * Copyright (c) 2015, 2025, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-modeset.h" +#include "nvidia-drm-crtc.h" +#include "nvidia-drm-os-interface.h" +#include "nvidia-drm-helper.h" + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#include +#include +#include +#include + +#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST) +#include +#elif defined(NV_LINUX_HOST1X_NEXT_H_PRESENT) +#include +#endif + +#include + +struct nv_drm_atomic_state { + struct NvKmsKapiRequestedModeSetConfig config; + struct drm_atomic_state base; +}; + +static inline struct nv_drm_atomic_state *to_nv_atomic_state( + struct drm_atomic_state *state) +{ + return container_of(state, struct nv_drm_atomic_state, base); +} + +struct drm_atomic_state *nv_drm_atomic_state_alloc(struct drm_device *dev) +{ + struct nv_drm_atomic_state *nv_state = + nv_drm_calloc(1, sizeof(*nv_state)); + + if (nv_state == NULL || drm_atomic_state_init(dev, &nv_state->base) < 0) { + nv_drm_free(nv_state); + return NULL; + } + + return &nv_state->base; +} + +void nv_drm_atomic_state_clear(struct drm_atomic_state *state) +{ + drm_atomic_state_default_clear(state); +} + +void nv_drm_atomic_state_free(struct drm_atomic_state *state) +{ + struct nv_drm_atomic_state *nv_state = + to_nv_atomic_state(state); + drm_atomic_state_default_release(state); + nv_drm_free(nv_state); +} + +/** + * __will_generate_flip_event - Check whether event is going to be generated by + * hardware when it flips from old crtc/plane state to current one. This + * function is called after drm_atomic_helper_swap_state(), therefore new state + * is swapped into current state. + */ +static bool __will_generate_flip_event(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct drm_crtc_state *new_crtc_state = crtc->state; + struct nv_drm_crtc_state *nv_new_crtc_state = + to_nv_crtc_state(new_crtc_state); + struct drm_plane_state *old_plane_state = NULL; + struct drm_plane *plane = NULL; + int i; + + if (!old_crtc_state->active && !new_crtc_state->active) { + /* + * crtc is not active in old and new states therefore all planes are + * disabled, hardware can not generate flip events. + */ + return false; + } + + /* Find out whether primary & overlay flip done events will be generated. */ + nv_drm_for_each_plane_in_state(old_crtc_state->state, + plane, old_plane_state, i) { + if (old_plane_state->crtc != crtc) { + continue; + } + + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + continue; + } + + /* + * Hardware generates flip event for only those + * planes which were active previously. + */ + if (old_crtc_state->active && old_plane_state->fb != NULL) { + nv_new_crtc_state->nv_flip->pending_events++; + } + } + + return nv_new_crtc_state->nv_flip->pending_events != 0; +} + +static int __nv_drm_put_back_post_fence_fd( + struct nv_drm_plane_state *plane_state, + const struct NvKmsKapiLayerReplyConfig *layer_reply_config) +{ + int fd = layer_reply_config->postSyncptFd; + int ret = 0; + + if ((fd >= 0) && (plane_state->fd_user_ptr != NULL)) { + ret = copy_to_user(plane_state->fd_user_ptr, &fd, sizeof(fd)); + if (ret != 0) { + return ret; + } + + /*! set back to Null and let set_property specify it again */ + plane_state->fd_user_ptr = NULL; + } + + return ret; +} + +struct nv_drm_plane_fence_cb_data { + struct dma_fence_cb dma_fence_cb; + struct nv_drm_device *nv_dev; + NvU32 semaphore_index; +}; + +static void +__nv_drm_plane_fence_cb( + struct dma_fence *fence, + struct dma_fence_cb *cb_data +) +{ + struct nv_drm_plane_fence_cb_data *fence_data = + container_of(cb_data, typeof(*fence_data), dma_fence_cb); + struct nv_drm_device *nv_dev = fence_data->nv_dev; + + dma_fence_put(fence); + nvKms->signalDisplaySemaphore(nv_dev->pDevice, fence_data->semaphore_index); + nv_drm_free(fence_data); +} + +static int __nv_drm_convert_in_fences( + struct nv_drm_device *nv_dev, + struct drm_atomic_state *state, + struct drm_crtc *crtc, + struct drm_crtc_state *crtc_state) +{ + struct drm_plane *plane = NULL; + struct drm_plane_state *plane_state = NULL; + struct nv_drm_plane *nv_plane = NULL; + struct NvKmsKapiLayerRequestedConfig *plane_req_config = NULL; + struct NvKmsKapiHeadRequestedConfig *head_req_config = + &to_nv_crtc_state(crtc_state)->req_config; + struct nv_drm_plane_fence_cb_data *fence_data; + uint32_t semaphore_index; + uint32_t idx_count; + int ret, i; + + if (!crtc_state->active) { + return 0; + } + + nv_drm_for_each_new_plane_in_state(state, plane, plane_state, i) { + if ((plane->type == DRM_PLANE_TYPE_CURSOR) || + (plane_state->crtc != crtc) || + (plane_state->fence == NULL)) { + continue; + } + + nv_plane = to_nv_plane(plane); + plane_req_config = + &head_req_config->layerRequestedConfig[nv_plane->layer_idx]; + + if (nv_dev->supportsSyncpts) { +#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST) +#if defined(NV_NVHOST_DMA_FENCE_UNPACK_PRESENT) + int ret = + nvhost_dma_fence_unpack( + plane_state->fence, + &plane_req_config->config.syncParams.u.syncpt.preSyncptId, + &plane_req_config->config.syncParams.u.syncpt.preSyncptValue); + if (ret == 0) { + plane_req_config->config.syncParams.preSyncptSpecified = true; + continue; + } +#endif +#elif defined(NV_LINUX_HOST1X_NEXT_H_PRESENT) + int ret = + host1x_fence_extract( + plane_state->fence, + &plane_req_config->config.syncParams.u.syncpt.preSyncptId, + &plane_req_config->config.syncParams.u.syncpt.preSyncptValue); + if (ret == 0) { + plane_req_config->config.syncParams.preSyncptSpecified = true; + continue; + } +#endif + } + + /* + * Syncpt extraction failed, or syncpts are not supported. + * Use general DRM fence support with semaphores instead. + */ + if (plane_req_config->config.syncParams.postSyncptRequested) { + // Can't mix Syncpts and semaphores in a given request. + return -EINVAL; + } + + for (idx_count = 0; idx_count < nv_dev->display_semaphores.count; idx_count++) { + semaphore_index = nv_drm_next_display_semaphore(nv_dev); + if (nvKms->tryInitDisplaySemaphore(nv_dev->pDevice, semaphore_index)) { + break; + } + } + + if (idx_count == nv_dev->display_semaphores.count) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to initialize semaphore for plane fence"); + /* + * This should only happen if the semaphore pool was somehow + * exhausted. Waiting a bit and retrying may help in that case. + */ + return -EAGAIN; + } + + plane_req_config->config.syncParams.semaphoreSpecified = true; + plane_req_config->config.syncParams.u.semaphore.index = semaphore_index; + + fence_data = nv_drm_calloc(1, sizeof(*fence_data)); + + if (!fence_data) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to allocate callback data for plane fence"); + nvKms->cancelDisplaySemaphore(nv_dev->pDevice, semaphore_index); + return -ENOMEM; + } + + fence_data->nv_dev = nv_dev; + fence_data->semaphore_index = semaphore_index; + + ret = dma_fence_add_callback(plane_state->fence, + &fence_data->dma_fence_cb, + __nv_drm_plane_fence_cb); + + switch (ret) { + case -ENOENT: + /* The fence is already signaled */ + __nv_drm_plane_fence_cb(plane_state->fence, + &fence_data->dma_fence_cb); +#if defined(fallthrough) + fallthrough; +#else + /* Fallthrough */ +#endif + case 0: + /* + * The plane state's fence reference has either been consumed or + * belongs to the outstanding callback now. + */ + plane_state->fence = NULL; + break; + default: + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed plane fence callback registration"); + /* Fence callback registration failed */ + nvKms->cancelDisplaySemaphore(nv_dev->pDevice, semaphore_index); + nv_drm_free(fence_data); + return ret; + } + } + + return 0; +} + +static int __nv_drm_get_syncpt_data( + struct nv_drm_device *nv_dev, + struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state, + struct NvKmsKapiRequestedModeSetConfig *requested_config, + struct NvKmsKapiModeSetReplyConfig *reply_config) +{ + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + struct NvKmsKapiHeadReplyConfig *head_reply_config; + struct nv_drm_plane_state *plane_state; + struct drm_crtc_state *new_crtc_state = crtc->state; + struct drm_plane_state *old_plane_state = NULL; + struct drm_plane_state *new_plane_state = NULL; + struct drm_plane *plane = NULL; + int i, ret; + + if (!old_crtc_state->active && !new_crtc_state->active) { + /* + * crtc is not active in old and new states therefore all planes are + * disabled, exit early. + */ + return 0; + } + + head_reply_config = &reply_config->headReplyConfig[nv_crtc->head]; + + nv_drm_for_each_plane_in_state(old_crtc_state->state, plane, old_plane_state, i) { + struct nv_drm_plane *nv_plane = to_nv_plane(plane); + + if (plane->type == DRM_PLANE_TYPE_CURSOR || old_plane_state->crtc != crtc) { + continue; + } + + new_plane_state = plane->state; + + if (new_plane_state->crtc != crtc) { + continue; + } + + plane_state = to_nv_drm_plane_state(new_plane_state); + + ret = __nv_drm_put_back_post_fence_fd( + plane_state, + &head_reply_config->layerReplyConfig[nv_plane->layer_idx]); + + if (ret != 0) { + return ret; + } + } + + return 0; +} + +/** + * nv_drm_atomic_commit - validate/commit modeset config + * @dev: DRM device + * @state: atomic state tracking atomic update + * @commit: commit/check modeset config associated with atomic update + * + * @state tracks atomic update and modeset objects affected + * by the atomic update, but the state of the modeset objects it contains + * depends on the current stage of the update. + * At the commit stage, the proposed state is already stored in the current + * state, and @state contains old state for all affected modeset objects. + * At the check/validation stage, @state contains the proposed state for + * all affected objects. + * + * Sequence of atomic update - + * 1. The check/validation of proposed atomic state, + * 2. Do any other steps that might fail, + * 3. Put the proposed state into the current state pointers, + * 4. Actually commit the hardware state, + * 5. Cleanup old state. + * + * The function nv_drm_atomic_apply_modeset_config() is getting called + * at stages (1) and (4) after drm_atomic_helper_swap_state(). + */ +static int +nv_drm_atomic_apply_modeset_config(struct drm_device *dev, + struct drm_atomic_state *state, + bool commit) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct NvKmsKapiRequestedModeSetConfig *requested_config = + &(to_nv_atomic_state(state)->config); + struct NvKmsKapiModeSetReplyConfig reply_config = { }; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + int i; + int ret; + + /* + * If sub-owner permission was granted to another NVKMS client, disallow + * modesets through the DRM interface. + */ + if (nv_dev->subOwnershipGranted) { + return -EINVAL; + } + + if (commit) { + /* + * This function does what is necessary to prepare the framebuffers + * attached to each new plane in the state for scan out, mostly by + * calling back into driver callbacks the NVIDIA driver does not + * provide. The end result is that all it does on the NVIDIA driver + * is populate the plane state's dma fence pointers with any implicit + * sync fences attached to the GEM objects associated with those planes + * in the new state, prefering explicit sync fences when appropriate. + * This must be done prior to converting the per-plane fences to + * semaphore waits below. + */ + ret = drm_atomic_helper_prepare_planes(dev, state); + + if (ret) { + return ret; + } + } + + memset(requested_config, 0, sizeof(*requested_config)); + + /* Loop over affected crtcs and construct NvKmsKapiRequestedModeSetConfig */ + nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) { + /* + * When committing a state, the new state is already stored in + * crtc->state. When checking a proposed state, the proposed state is + * stored in crtc_state. + */ + struct drm_crtc_state *new_crtc_state = + commit ? crtc->state : crtc_state; + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + + if (commit) { + struct drm_crtc_state *old_crtc_state = crtc_state; + struct nv_drm_crtc_state *nv_new_crtc_state = + to_nv_crtc_state(new_crtc_state); + + nv_new_crtc_state->nv_flip->event = new_crtc_state->event; + nv_new_crtc_state->nv_flip->pending_events = 0; + new_crtc_state->event = NULL; + + /* + * If flip event will be generated by hardware + * then defer flip object processing to flip event from hardware. + */ + if (__will_generate_flip_event(crtc, old_crtc_state)) { + nv_drm_crtc_enqueue_flip(nv_crtc, + nv_new_crtc_state->nv_flip); + + nv_new_crtc_state->nv_flip = NULL; + } + + ret = __nv_drm_convert_in_fences(nv_dev, + state, + crtc, + new_crtc_state); + + if (ret != 0) { + return ret; + } + } + + /* + * Do this deep copy after calling __nv_drm_convert_in_fences, + * which modifies the new CRTC state's req_config member + */ + requested_config->headRequestedConfig[nv_crtc->head] = + to_nv_crtc_state(new_crtc_state)->req_config; + + requested_config->headsMask |= 1 << nv_crtc->head; + } + + if (commit && nvKms->systemInfo.bAllowWriteCombining) { + /* + * XXX This call is required only if dumb buffer is going + * to be presented. + */ + nv_drm_write_combine_flush(); + } + + if (!nvKms->applyModeSetConfig(nv_dev->pDevice, + requested_config, + &reply_config, + commit)) { + if (commit || reply_config.flipResult != NV_KMS_FLIP_RESULT_IN_PROGRESS) { + return -EINVAL; + } + } + + if (commit && nv_dev->supportsSyncpts) { + nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) { + /*! loop over affected crtcs and get NvKmsKapiModeSetReplyConfig */ + ret = __nv_drm_get_syncpt_data( + nv_dev, crtc, crtc_state, requested_config, &reply_config); + if (ret != 0) { + return ret; + } + } + } + + if (commit && nv_dev->requiresVrrSemaphores && reply_config.vrrFlip) { + nvKms->signalVrrSemaphore(nv_dev->pDevice, reply_config.vrrSemaphoreIndex); + } + + return 0; +} + +int nv_drm_atomic_check(struct drm_device *dev, + struct drm_atomic_state *state) +{ + int ret = 0; + + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + int i; + + struct drm_plane *plane; + struct drm_plane_state *plane_state; + int j; + bool cursor_surface_changed; + bool cursor_only_commit; + + nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) { + + /* + * Committing cursor surface change without any other plane change can + * cause cursor surface in use by HW to be freed prematurely. Add all + * planes to the commit to avoid this. This is a workaround for bug 4966645. + */ + cursor_surface_changed = false; + cursor_only_commit = true; + nv_drm_for_each_plane_in_state(crtc_state->state, plane, plane_state, j) { + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + if (plane_state->fb != plane->state->fb) { + cursor_surface_changed = true; + } + } else { + cursor_only_commit = false; + break; + } + } + + /* + * if the color management changed on the crtc, we need to update the + * crtc's plane's CSC matrices, so add the crtc's planes to the commit + */ + if (crtc_state->color_mgmt_changed || + (cursor_surface_changed && cursor_only_commit)) { + if ((ret = drm_atomic_add_affected_planes(state, crtc)) != 0) { + goto done; + } + } + } + + if ((ret = drm_atomic_helper_check(dev, state)) != 0) { + goto done; + } + + ret = nv_drm_atomic_apply_modeset_config(dev, + state, false /* commit */); + +done: + return ret; +} + +/** + * __nv_drm_handle_flip_event - handle flip occurred event + * @nv_crtc: crtc on which flip has been occurred + * + * This handler dequeues the first nv_drm_flip from the crtc's flip_list, + * generates an event if requested at flip time, and frees the nv_drm_flip. + */ +static void __nv_drm_handle_flip_event(struct nv_drm_crtc *nv_crtc) +{ + struct drm_device *dev = nv_crtc->base.dev; + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_flip *nv_flip; + + /* + * Acquire event_lock before nv_flip object dequeue, otherwise immediate + * flip event delivery from nv_drm_atomic_commit() races ahead and + * messes up with event delivery order. + */ + spin_lock(&dev->event_lock); + nv_flip = nv_drm_crtc_dequeue_flip(nv_crtc); + if (likely(nv_flip != NULL)) { + struct nv_drm_flip *nv_deferred_flip, *nv_next_deferred_flip; + + if (nv_flip->event != NULL) { + drm_crtc_send_vblank_event(&nv_crtc->base, nv_flip->event); + } + + /* + * Process flips that were deferred until processing of this nv_flip + * object. + */ + list_for_each_entry_safe(nv_deferred_flip, + nv_next_deferred_flip, + &nv_flip->deferred_flip_list, list_entry) { + + if (nv_deferred_flip->event != NULL) { + drm_crtc_send_vblank_event(&nv_crtc->base, + nv_deferred_flip->event); + } + list_del(&nv_deferred_flip->list_entry); + + nv_drm_free(nv_deferred_flip); + } + } + spin_unlock(&dev->event_lock); + + wake_up_all(&nv_dev->flip_event_wq); + + nv_drm_free(nv_flip); +} + +int nv_drm_atomic_commit(struct drm_device *dev, + struct drm_atomic_state *state, + bool nonblock) +{ + int ret = -EBUSY; + + int i; + struct drm_crtc *crtc = NULL; + struct drm_crtc_state *crtc_state = NULL; + struct nv_drm_device *nv_dev = to_nv_device(dev); + + /* + * XXX: drm_mode_config_funcs::atomic_commit() mandates to return -EBUSY + * for nonblocking commit if the commit would need to wait for previous + * updates (commit tasks/flip event) to complete. In case of blocking + * commits it mandates to wait for previous updates to complete. However, + * the kernel DRM-KMS documentation does explicitly allow maintaining a + * queue of outstanding commits. + * + * Our system already implements such a queue, but due to + * bug 4054608, it is currently not used. + */ + nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) { + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + + /* + * Here you aren't required to hold nv_drm_crtc::flip_list_lock + * because: + * + * The core DRM driver acquires lock for all affected crtcs before + * calling into ->commit() hook, therefore it is not possible for + * other threads to call into ->commit() hook affecting same crtcs + * and enqueue flip objects into flip_list - + * + * nv_drm_atomic_commit_internal() + * |-> nv_drm_atomic_apply_modeset_config(commit=true) + * |-> nv_drm_crtc_enqueue_flip() + * + * Only possibility is list_empty check races with code path + * dequeuing flip object - + * + * __nv_drm_handle_flip_event() + * |-> nv_drm_crtc_dequeue_flip() + * + * But this race condition can't lead list_empty() to return + * incorrect result. nv_drm_crtc_dequeue_flip() in the middle of + * updating the list could not trick us into thinking the list is + * empty when it isn't. + */ + if (nonblock) { + if (!list_empty(&nv_crtc->flip_list)) { + return -EBUSY; + } + } else { + if (wait_event_timeout( + nv_dev->flip_event_wq, + list_empty(&nv_crtc->flip_list), + 3 * HZ /* 3 second */) == 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Flip event timeout on head %u", nv_crtc->head); + } + } + + /* + * If the legacy LUT needs to be updated, ensure that the previous LUT + * update is complete first. + */ + if (crtc_state->color_mgmt_changed) { + NvBool complete = nvKms->checkLutNotifier(nv_dev->pDevice, + nv_crtc->head, + !nonblock /* waitForCompletion */); + + /* If checking the LUT notifier failed, assume no LUT notifier is set. */ + if (!complete) { + if (nonblock) { + return -EBUSY; + } else { + /* + * checkLutNotifier should wait on the notifier in this + * case, so we should only get here if the wait timed out. + */ + NV_DRM_DEV_LOG_ERR( + nv_dev, + "LUT notifier timeout on head %u", nv_crtc->head); + } + } + } + } + + /* + * nv_drm_atomic_commit_internal() + * implements blocking/non-blocking atomic commit using + * nv_drm_crtc::flip_list, it does not require any help from core DRM + * helper functions to stall commit processing. Therefore passing false to + * 'stall' parameter. + * In this context, failure from drm_atomic_helper_swap_state() is not + * expected. + */ + + ret = drm_atomic_helper_swap_state(state, false /* stall */); + if (WARN_ON(ret != 0)) { + return ret; + } + + /* + * nv_drm_atomic_commit_internal() must not return failure after + * calling drm_atomic_helper_swap_state(). + */ + + if ((ret = nv_drm_atomic_apply_modeset_config( + dev, + state, true /* commit */)) != 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to apply atomic modeset. Error code: %d", + ret); + + goto done; + } + + nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) { + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + struct nv_drm_crtc_state *nv_new_crtc_state = + to_nv_crtc_state(crtc->state); + + /* + * If nv_drm_atomic_apply_modeset_config() hasn't consumed the flip + * object, no event will be generated for this flip, and we need process + * it: + */ + + if (nv_new_crtc_state->nv_flip != NULL) { + /* + * First, defer processing of all pending flips for this crtc until + * last flip in the queue has been processed. This is to ensure a + * correct order in event delivery. + */ + spin_lock(&nv_crtc->flip_list_lock); + if (!list_empty(&nv_crtc->flip_list)) { + struct nv_drm_flip *nv_last_flip = + list_last_entry(&nv_crtc->flip_list, + struct nv_drm_flip, list_entry); + + list_add(&nv_new_crtc_state->nv_flip->list_entry, + &nv_last_flip->deferred_flip_list); + + nv_new_crtc_state->nv_flip = NULL; + } + spin_unlock(&nv_crtc->flip_list_lock); + } + + if (nv_new_crtc_state->nv_flip != NULL) { + /* + * Then, if no more pending flips for this crtc, deliver event for the + * current flip. + */ + if (nv_new_crtc_state->nv_flip->event != NULL) { + spin_lock(&dev->event_lock); + drm_crtc_send_vblank_event(crtc, + nv_new_crtc_state->nv_flip->event); + spin_unlock(&dev->event_lock); + } + + nv_drm_free(nv_new_crtc_state->nv_flip); + nv_new_crtc_state->nv_flip = NULL; + } + + if (!nonblock) { + /* + * Here you aren't required to hold nv_drm_crtc::flip_list_lock + * because: + * + * The core DRM driver acquires lock for all affected crtcs before + * calling into ->commit() hook, therefore it is not possible for + * other threads to call into ->commit() hook affecting same crtcs + * and enqueue flip objects into flip_list - + * + * nv_drm_atomic_commit_internal() + * |-> nv_drm_atomic_apply_modeset_config(commit=true) + * |-> nv_drm_crtc_enqueue_flip() + * + * Only possibility is list_empty check races with code path + * dequeuing flip object - + * + * __nv_drm_handle_flip_event() + * |-> nv_drm_crtc_dequeue_flip() + * + * But this race condition can't lead list_empty() to return + * incorrect result. nv_drm_crtc_dequeue_flip() in the middle of + * updating the list could not trick us into thinking the list is + * empty when it isn't. + */ + if (wait_event_timeout( + nv_dev->flip_event_wq, + list_empty(&nv_crtc->flip_list), + 3 * HZ /* 3 second */) == 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Flip event timeout on head %u", nv_crtc->head); + while (!list_empty(&nv_crtc->flip_list)) { + __nv_drm_handle_flip_event(nv_crtc); + } + } + + if (crtc_state->color_mgmt_changed) { + NvBool complete = nvKms->checkLutNotifier(nv_dev->pDevice, + nv_crtc->head, + true /* waitForCompletion */); + if (!complete) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "LUT notifier timeout on head %u", nv_crtc->head); + } + } + } + } + +done: + + /* + * State will be freed when the caller drops its reference after we return. + */ + + return 0; +} + +void nv_drm_handle_flip_occurred(struct nv_drm_device *nv_dev, + NvU32 head, NvU32 plane) +{ + struct nv_drm_crtc *nv_crtc = nv_drm_crtc_lookup(nv_dev, head); + + if (NV_DRM_WARN(nv_crtc == NULL)) { + return; + } + + __nv_drm_handle_flip_event(nv_crtc); +} + +#endif diff --git a/kernel-open/nvidia-drm/nvidia-drm-modeset.h b/kernel-open/nvidia-drm/nvidia-drm-modeset.h new file mode 100644 index 0000000..40df631 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-modeset.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_MODESET_H__ +#define __NVIDIA_DRM_MODESET_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvkms-kapi.h" + +struct drm_device; +struct drm_atomic_state; + +struct drm_atomic_state *nv_drm_atomic_state_alloc(struct drm_device *dev); +void nv_drm_atomic_state_clear(struct drm_atomic_state *state); +void nv_drm_atomic_state_free(struct drm_atomic_state *state); + +int nv_drm_atomic_check(struct drm_device *dev, + struct drm_atomic_state *state); + +int nv_drm_atomic_commit(struct drm_device *dev, + struct drm_atomic_state *state, bool nonblock); + + +void nv_drm_handle_flip_occurred(struct nv_drm_device *nv_dev, + NvU32 head, NvU32 plane); + +int nv_drm_shut_down_all_crtcs(struct drm_device *dev); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#endif /* __NVIDIA_DRM_MODESET_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-os-interface.c b/kernel-open/nvidia-drm/nvidia-drm-os-interface.c new file mode 100644 index 0000000..a6b3525 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-os-interface.c @@ -0,0 +1,257 @@ +/* + * Copyright (c) 2015-2025, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include "nvidia-drm-os-interface.h" + +#if defined(NV_DRM_AVAILABLE) + +#include +#include +#include +#include +#include + +#include "nv-mm.h" + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +bool nv_drm_modeset_module_param = false; +bool nv_drm_fbdev_module_param = true; + +void *nv_drm_calloc(size_t nmemb, size_t size) +{ + size_t total_size = nmemb * size; + // + // Check for overflow. + // + if ((nmemb != 0) && ((total_size / nmemb) != size)) + { + return NULL; + } + return kzalloc(nmemb * size, GFP_KERNEL); +} + +void nv_drm_free(void *ptr) +{ + if (IS_ERR(ptr)) { + return; + } + + kfree(ptr); +} + +#if defined(NVCPU_X86) || defined(NVCPU_X86_64) + #define WRITE_COMBINE_FLUSH() asm volatile("sfence":::"memory") +#elif defined(NVCPU_PPC64LE) + #define WRITE_COMBINE_FLUSH() asm volatile("sync":::"memory") +#else + #define WRITE_COMBINE_FLUSH() mb() +#endif + +void nv_drm_write_combine_flush(void) +{ + WRITE_COMBINE_FLUSH(); +} + +int nv_drm_lock_user_pages(unsigned long address, + unsigned long pages_count, struct page ***pages) +{ + struct mm_struct *mm = current->mm; + struct page **user_pages; + int pages_pinned; + + user_pages = nv_drm_calloc(pages_count, sizeof(*user_pages)); + + if (user_pages == NULL) { + return -ENOMEM; + } + + nv_mmap_read_lock(mm); + + pages_pinned = NV_PIN_USER_PAGES(address, pages_count, FOLL_WRITE, + user_pages); + nv_mmap_read_unlock(mm); + + if (pages_pinned < 0 || (unsigned)pages_pinned < pages_count) { + goto failed; + } + + *pages = user_pages; + + return 0; + +failed: + + if (pages_pinned > 0) { + int i; + + for (i = 0; i < pages_pinned; i++) { + NV_UNPIN_USER_PAGE(user_pages[i]); + } + } + + nv_drm_free(user_pages); + + return (pages_pinned < 0) ? pages_pinned : -EINVAL; +} + +void nv_drm_unlock_user_pages(unsigned long pages_count, struct page **pages) +{ + unsigned long i; + + for (i = 0; i < pages_count; i++) { + set_page_dirty_lock(pages[i]); + NV_UNPIN_USER_PAGE(pages[i]); + } + + nv_drm_free(pages); +} + +/* + * linuxkpi vmap doesn't use the flags argument as it + * doesn't seem to be needed. Define VM_USERMAP to 0 + * to make errors go away + * + * vmap: sys/compat/linuxkpi/common/src/linux_compat.c + */ +#if defined(NV_BSD) +#define VM_USERMAP 0 +#endif + +void *nv_drm_vmap(struct page **pages, unsigned long pages_count, bool cached) +{ + pgprot_t prot = PAGE_KERNEL; + + if (!cached) { + prot = pgprot_noncached(PAGE_KERNEL); + } + + return vmap(pages, pages_count, VM_USERMAP, prot); +} + +void nv_drm_vunmap(void *address) +{ + vunmap(address); +} + +bool nv_drm_workthread_init(nv_drm_workthread *worker, const char *name) +{ + worker->shutting_down = false; + if (nv_kthread_q_init(&worker->q, name)) { + return false; + } + + spin_lock_init(&worker->lock); + + return true; +} + +void nv_drm_workthread_shutdown(nv_drm_workthread *worker) +{ + unsigned long flags; + + spin_lock_irqsave(&worker->lock, flags); + worker->shutting_down = true; + spin_unlock_irqrestore(&worker->lock, flags); + + nv_kthread_q_stop(&worker->q); +} + +void nv_drm_workthread_work_init(nv_drm_work *work, + void (*callback)(void *), + void *arg) +{ + nv_kthread_q_item_init(work, callback, arg); +} + +int nv_drm_workthread_add_work(nv_drm_workthread *worker, nv_drm_work *work) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&worker->lock, flags); + if (!worker->shutting_down) { + ret = nv_kthread_q_schedule_q_item(&worker->q, work); + } + spin_unlock_irqrestore(&worker->lock, flags); + + return ret; +} + +void nv_drm_timer_setup(nv_drm_timer *timer, void (*callback)(nv_drm_timer *nv_drm_timer)) +{ + nv_timer_setup(timer, callback); +} + +void nv_drm_mod_timer(nv_drm_timer *timer, unsigned long timeout_native) +{ + mod_timer(&timer->kernel_timer, timeout_native); +} + +unsigned long nv_drm_timer_now(void) +{ + return jiffies; +} + +unsigned long nv_drm_timeout_from_ms(NvU64 relative_timeout_ms) +{ + return jiffies + msecs_to_jiffies(relative_timeout_ms); +} + +int nv_drm_create_sync_file(struct dma_fence *fence) +{ + struct sync_file *sync; + int fd = get_unused_fd_flags(O_CLOEXEC); + + if (fd < 0) { + return fd; + } + + /* sync_file_create() generates its own reference to the fence */ + sync = sync_file_create(fence); + + if (IS_ERR(sync)) { + put_unused_fd(fd); + return PTR_ERR(sync); + } + + fd_install(fd, sync->file); + + return fd; +} + +struct dma_fence *nv_drm_sync_file_get_fence(int fd) +{ + return sync_file_get_fence(fd); +} + +void nv_drm_yield(void) +{ + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); +} + +#endif /* NV_DRM_AVAILABLE */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-os-interface.h b/kernel-open/nvidia-drm/nvidia-drm-os-interface.h new file mode 100644 index 0000000..a1161df --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-os-interface.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2015-2025, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_OS_INTERFACE_H__ +#define __NVIDIA_DRM_OS_INTERFACE_H__ + +#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE */ + +#include "nvtypes.h" + +#if defined(NV_DRM_AVAILABLE) + +#include "linux/dma-fence.h" + +#if defined(NV_LINUX) || defined(NV_BSD) +#include "nv-kthread-q.h" +#include "linux/spinlock.h" + +typedef struct nv_drm_workthread { + spinlock_t lock; + struct nv_kthread_q q; + bool shutting_down; +} nv_drm_workthread; + +typedef nv_kthread_q_item_t nv_drm_work; + +#else +#error "Need to define deferred work primitives for this OS" +#endif + +#if defined(NV_LINUX) || defined(NV_BSD) +#include "nv-timer.h" + +typedef struct nv_timer nv_drm_timer; + +#else +#error "Need to define kernel timer callback primitives for this OS" +#endif + +struct page; + +/* Set to true when the atomic modeset feature is enabled. */ +extern bool nv_drm_modeset_module_param; +#if defined(NV_DRM_FBDEV_AVAILABLE) +/* Set to true when the nvidia-drm driver should install a framebuffer device */ +extern bool nv_drm_fbdev_module_param; +#endif + +void *nv_drm_calloc(size_t nmemb, size_t size); + +void nv_drm_free(void *ptr); + +void nv_drm_write_combine_flush(void); + +int nv_drm_lock_user_pages(unsigned long address, + unsigned long pages_count, struct page ***pages); + +void nv_drm_unlock_user_pages(unsigned long pages_count, struct page **pages); + +void *nv_drm_vmap(struct page **pages, unsigned long pages_count, bool cached); + +void nv_drm_vunmap(void *address); + +bool nv_drm_workthread_init(nv_drm_workthread *worker, const char *name); + +/* Can be called concurrently with nv_drm_workthread_add_work() */ +void nv_drm_workthread_shutdown(nv_drm_workthread *worker); + +void nv_drm_workthread_work_init(nv_drm_work *work, + void (*callback)(void *), + void *arg); + +/* Can be called concurrently with nv_drm_workthread_shutdown() */ +int nv_drm_workthread_add_work(nv_drm_workthread *worker, nv_drm_work *work); + +void nv_drm_timer_setup(nv_drm_timer *timer, + void (*callback)(nv_drm_timer *nv_drm_timer)); + +void nv_drm_mod_timer(nv_drm_timer *timer, unsigned long relative_timeout_ms); + +unsigned long nv_drm_timer_now(void); + +unsigned long nv_drm_timeout_from_ms(NvU64 relative_timeout_ms); + +int nv_drm_create_sync_file(struct dma_fence *fence); + +struct dma_fence *nv_drm_sync_file_get_fence(int fd); + +void nv_drm_yield(void); + +#endif /* defined(NV_DRM_AVAILABLE) */ + +#endif /* __NVIDIA_DRM_OS_INTERFACE_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-priv.h b/kernel-open/nvidia-drm/nvidia-drm-priv.h new file mode 100644 index 0000000..45bc068 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-priv.h @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_PRIV_H__ +#define __NVIDIA_DRM_PRIV_H__ + +#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE */ + +#if defined(NV_DRM_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#include +#include + +#include "nvidia-drm-os-interface.h" + +#include "nvkms-kapi.h" + +#define NV_DRM_LOG_ERR(__fmt, ...) \ + DRM_ERROR("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__) + +/* + * DRM_WARN() was added in v4.9 by kernel commit + * 30b0da8d556e65ff935a56cd82c05ba0516d3e4a + * + * Before this commit, only DRM_INFO and DRM_ERROR were defined and + * DRM_INFO(fmt, ...) was defined as + * printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__). So, if + * DRM_WARN is undefined this defines NV_DRM_LOG_WARN following the + * same pattern as DRM_INFO. + */ +#ifdef DRM_WARN +#define NV_DRM_LOG_WARN(__fmt, ...) \ + DRM_WARN("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__) +#else +#define NV_DRM_LOG_WARN(__fmt, ...) \ + printk(KERN_WARNING "[" DRM_NAME "] [nvidia-drm] " __fmt "\n", ##__VA_ARGS__) +#endif + +#define NV_DRM_LOG_INFO(__fmt, ...) \ + DRM_INFO("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__) + +#define NV_DRM_DEV_LOG_INFO(__dev, __fmt, ...) \ + NV_DRM_LOG_INFO("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__) + +#define NV_DRM_DEV_LOG_WARN(__dev, __fmt, ...) \ + NV_DRM_LOG_WARN("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__) + +#define NV_DRM_DEV_LOG_ERR(__dev, __fmt, ...) \ + NV_DRM_LOG_ERR("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__) + +#define NV_DRM_WARN(__condition) WARN_ON((__condition)) + +#define NV_DRM_DEBUG_DRIVER(__fmt, ...) \ + DRM_DEBUG_DRIVER("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__) + +#define NV_DRM_DEV_DEBUG_DRIVER(__dev, __fmt, ...) \ + DRM_DEBUG_DRIVER("[GPU ID 0x%08x] " __fmt, \ + __dev->gpu_info.gpu_id, ##__VA_ARGS__) + +enum nv_drm_input_color_space { + NV_DRM_INPUT_COLOR_SPACE_NONE, + NV_DRM_INPUT_COLOR_SPACE_SCRGB_LINEAR, + NV_DRM_INPUT_COLOR_SPACE_BT2100_PQ +}; + +struct nv_drm_device { + nv_gpu_info_t gpu_info; + MIGDeviceId gpu_mig_device; + + struct drm_device *dev; + + struct NvKmsKapiDevice *pDevice; + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + /* + * Lock to protect drm-subsystem and fields of this structure + * from concurrent access. + * + * Do not hold this lock if some lock from core drm-subsystem + * is already held, locking order should be like this - + * + * mutex_lock(nv_drm_device::lock); + * .... + * mutex_lock(drm_device::mode_config::lock); + * .... + * ....... + * mutex_unlock(drm_device::mode_config::lock); + * ........ + * .. + * mutex_lock(drm_device::struct_mutex); + * .... + * ........ + * mutex_unlock(drm_device::struct_mutex); + * .. + * mutex_unlock(nv_drm_device::lock); + */ + struct mutex lock; + + NvU32 pitchAlignment; + + NvU8 genericPageKind; + NvU8 pageKindGeneration; + NvU8 sectorLayout; + NvU64 modifiers[6 /* block linear */ + 1 /* linear */ + 1 /* terminator */]; + + struct delayed_work hotplug_event_work; + atomic_t enable_event_handling; + + /** + * @flip_event_wq: + * + * The wait queue on which nv_drm_atomic_commit_internal() sleeps until + * next flip event occurs. + */ + wait_queue_head_t flip_event_wq; + +#endif + + NvU64 semsurf_stride; + NvU64 semsurf_max_submitted_offset; + + NvBool hasVideoMemory; + + NvBool supportsSyncpts; + NvBool requiresVrrSemaphores; + NvBool subOwnershipGranted; + NvBool hasFramebufferConsole; + + struct drm_property *nv_out_fence_property; + struct drm_property *nv_input_colorspace_property; + + struct { + NvU32 count; + NvU32 next_index; + } display_semaphores; + +#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA) + struct drm_property *nv_hdr_output_metadata_property; +#endif + + struct drm_property *nv_plane_lms_ctm_property; + struct drm_property *nv_plane_lms_to_itp_ctm_property; + struct drm_property *nv_plane_itp_to_lms_ctm_property; + struct drm_property *nv_plane_blend_ctm_property; + + struct drm_property *nv_plane_degamma_tf_property; + struct drm_property *nv_plane_degamma_lut_property; + struct drm_property *nv_plane_degamma_lut_size_property; + struct drm_property *nv_plane_degamma_multiplier_property; + + struct drm_property *nv_plane_tmo_lut_property; + struct drm_property *nv_plane_tmo_lut_size_property; + + struct drm_property *nv_crtc_regamma_tf_property; + struct drm_property *nv_crtc_regamma_lut_property; + struct drm_property *nv_crtc_regamma_lut_size_property; + struct drm_property *nv_crtc_regamma_divisor_property; + + struct nv_drm_device *next; + + NvU64 vtFbBaseAddress; + NvU64 vtFbSize; +}; + +static inline NvU32 nv_drm_next_display_semaphore( + struct nv_drm_device *nv_dev) +{ + NvU32 current_index = nv_dev->display_semaphores.next_index++; + + if (nv_dev->display_semaphores.next_index >= + nv_dev->display_semaphores.count) { + nv_dev->display_semaphores.next_index = 0; + } + + return current_index; +} + +static inline struct nv_drm_device *to_nv_device( + struct drm_device *dev) +{ + return dev->dev_private; +} + +extern const struct NvKmsKapiFunctionsTable* const nvKms; + +#endif /* defined(NV_DRM_AVAILABLE) */ + +#endif /* __NVIDIA_DRM_PRIV_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-sources.mk b/kernel-open/nvidia-drm/nvidia-drm-sources.mk new file mode 100644 index 0000000..2e51ca1 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-sources.mk @@ -0,0 +1,111 @@ +########################################################################### +# Kbuild fragment for nvidia-drm.ko +########################################################################### + +# +# Define NVIDIA_DRM_SOURCES +# + +NVIDIA_DRM_SOURCES = +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-drv.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-utils.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-crtc.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-encoder.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-connector.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-fb.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-modeset.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-fence.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-helper.c +NVIDIA_DRM_SOURCES += nvidia-drm/nv-kthread-q.c +NVIDIA_DRM_SOURCES += nvidia-drm/nv-pci-table.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-nvkms-memory.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-user-memory.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-dma-buf.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-format.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-os-interface.c + +# +# Register the conftests needed by nvidia-drm.ko +# + +NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_available +NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_atomic_available +NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_inc +NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_dec_and_test +NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_alpha_blending_available +NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_fd_to_handle +NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_handle_to_fd +NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_timer_delete_sync + +NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages_remote +NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pin_user_pages_remote +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pin_user_pages +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_driver_has_gem_prime_res_obj +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_funcs_have_mode_in_name +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_has_vrr_capable_property +NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_pfn +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_for_each_possible_encoder +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked +NV_CONFTEST_FUNCTION_COMPILE_TESTS += nvhost_dma_fence_unpack +NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_aperture_remove_conflicting_framebuffers +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_aperture_remove_conflicting_pci_framebuffers +NV_CONFTEST_FUNCTION_COMPILE_TESTS += aperture_remove_conflicting_devices +NV_CONFTEST_FUNCTION_COMPILE_TESTS += aperture_remove_conflicting_pci_devices +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_fbdev_generic_setup +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_fbdev_ttm_setup +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_client_setup +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_attach_hdr_output_metadata_property +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_plane_create_color_properties +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_helper_legacy_gamma_set +NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_mixed +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_prime_mmap +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_sysfs_connector_property_event +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_sysfs_connector_status_event + +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_legacy_dev_list +NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_prime_flag_present +NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_t +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_has_resv +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_async_flip +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_pageflip_flags +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_vrr_enabled +NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_vma_offset_node_has_readonly +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_display_mode_has_vrefresh +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_master_set_has_int_return_type +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_free_object +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_prime_pages_to_sg_has_drm_device_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_prime_callbacks +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_atomic_check_has_atomic_state_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_vmap_has_map_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_plane_atomic_check_has_atomic_state_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_device_has_pdev +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_no_vblank +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_config_has_allow_fb_modifiers +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_has_hdr_output_metadata +NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_add_fence +NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_reserve_fences +NV_CONFTEST_TYPE_COMPILE_TESTS += reservation_object_reserve_shared_has_num_fences_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_has_override_edid +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_file_get_master +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_modeset_lock_all_end +NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_dumb_destroy +NV_CONFTEST_TYPE_COMPILE_TESTS += fence_ops_use_64bit_seqno +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_aperture_remove_conflicting_framebuffers_has_driver_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_aperture_remove_conflicting_framebuffers_has_no_primary_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_aperture_remove_conflicting_pci_framebuffers_has_driver_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_create_dp_colorspace_property_has_supported_colorspaces_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_syncobj_features_present +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_unlocked_ioctl_flag_present +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_color_ctm_3x4_present +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_format_info_has_is_yuv +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_prime_mmap +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_output_poll_changed +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_date +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_helper_funcs_mode_valid_has_const_mode_arg diff --git a/kernel-open/nvidia-drm/nvidia-drm-utils.c b/kernel-open/nvidia-drm/nvidia-drm-utils.c new file mode 100644 index 0000000..85820a6 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-utils.c @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#include +#include +#include + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-utils.h" + +struct NvKmsKapiConnectorInfo* +nvkms_get_connector_info(struct NvKmsKapiDevice *pDevice, + NvKmsKapiConnector hConnector) +{ + struct NvKmsKapiConnectorInfo *connectorInfo = + nv_drm_calloc(1, sizeof(*connectorInfo)); + + if (connectorInfo == NULL) { + return ERR_PTR(-ENOMEM); + } + + if (!nvKms->getConnectorInfo(pDevice, hConnector, connectorInfo)) { + nv_drm_free(connectorInfo); + + return ERR_PTR(-EINVAL); + } + + return connectorInfo; +} + +int +nvkms_connector_signal_to_drm_encoder_signal(NvKmsConnectorSignalFormat format) +{ + switch (format) { + default: + case NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN: + return DRM_MODE_ENCODER_NONE; + case NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS: + case NVKMS_CONNECTOR_SIGNAL_FORMAT_DP: + return DRM_MODE_ENCODER_TMDS; + case NVKMS_CONNECTOR_SIGNAL_FORMAT_LVDS: + return DRM_MODE_ENCODER_LVDS; + case NVKMS_CONNECTOR_SIGNAL_FORMAT_VGA: + return DRM_MODE_ENCODER_DAC; + case NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI: + return DRM_MODE_ENCODER_DSI; + } +} + +int nvkms_connector_type_to_drm_connector_type(NvKmsConnectorType type, + NvBool internal) +{ + switch (type) { + default: + case NVKMS_CONNECTOR_TYPE_UNKNOWN: + return DRM_MODE_CONNECTOR_Unknown; + case NVKMS_CONNECTOR_TYPE_DP: + return + internal ? + DRM_MODE_CONNECTOR_eDP : DRM_MODE_CONNECTOR_DisplayPort; + case NVKMS_CONNECTOR_TYPE_HDMI: + return DRM_MODE_CONNECTOR_HDMIA; + case NVKMS_CONNECTOR_TYPE_DVI_D: + return DRM_MODE_CONNECTOR_DVID; + case NVKMS_CONNECTOR_TYPE_DVI_I: + return DRM_MODE_CONNECTOR_DVII; + case NVKMS_CONNECTOR_TYPE_LVDS: + return DRM_MODE_CONNECTOR_LVDS; + case NVKMS_CONNECTOR_TYPE_VGA: + return DRM_MODE_CONNECTOR_VGA; + case NVKMS_CONNECTOR_TYPE_DSI: + return DRM_MODE_CONNECTOR_DSI; + case NVKMS_CONNECTOR_TYPE_DP_SERIALIZER: + return DRM_MODE_CONNECTOR_DisplayPort; + } +} + +void +nvkms_display_mode_to_drm_mode(const struct NvKmsKapiDisplayMode *displayMode, + struct drm_display_mode *mode) +{ +#if defined(NV_DRM_DISPLAY_MODE_HAS_VREFRESH) + mode->vrefresh = (displayMode->timings.refreshRate + 500) / 1000; /* In Hz */ +#endif + + mode->clock = (displayMode->timings.pixelClockHz + 500) / 1000; /* In Hz */ + + mode->hdisplay = displayMode->timings.hVisible; + mode->hsync_start = displayMode->timings.hSyncStart; + mode->hsync_end = displayMode->timings.hSyncEnd; + mode->htotal = displayMode->timings.hTotal; + mode->hskew = displayMode->timings.hSkew; + + mode->vdisplay = displayMode->timings.vVisible; + mode->vsync_start = displayMode->timings.vSyncStart; + mode->vsync_end = displayMode->timings.vSyncEnd; + mode->vtotal = displayMode->timings.vTotal; + + if (displayMode->timings.flags.interlaced) { + mode->flags |= DRM_MODE_FLAG_INTERLACE; + } + + if (displayMode->timings.flags.doubleScan) { + mode->flags |= DRM_MODE_FLAG_DBLSCAN; + } + + if (displayMode->timings.flags.hSyncPos) { + mode->flags |= DRM_MODE_FLAG_PHSYNC; + } + + if (displayMode->timings.flags.hSyncNeg) { + mode->flags |= DRM_MODE_FLAG_NHSYNC; + } + + if (displayMode->timings.flags.vSyncPos) { + mode->flags |= DRM_MODE_FLAG_PVSYNC; + } + + if (displayMode->timings.flags.vSyncNeg) { + mode->flags |= DRM_MODE_FLAG_NVSYNC; + } + + mode->width_mm = displayMode->timings.widthMM; + mode->height_mm = displayMode->timings.heightMM; + + if (strlen(displayMode->name) != 0) { + memcpy( + mode->name, displayMode->name, + min(sizeof(mode->name), sizeof(displayMode->name))); + + mode->name[sizeof(mode->name) - 1] = '\0'; + } else { + drm_mode_set_name(mode); + } +} + +void drm_mode_to_nvkms_display_mode(const struct drm_display_mode *src, + struct NvKmsKapiDisplayMode *dst) +{ +#if defined(NV_DRM_DISPLAY_MODE_HAS_VREFRESH) + dst->timings.refreshRate = src->vrefresh * 1000; +#else + dst->timings.refreshRate = drm_mode_vrefresh(src) * 1000; +#endif + + dst->timings.pixelClockHz = src->clock * 1000; /* In Hz */ + + dst->timings.hVisible = src->hdisplay; + dst->timings.hSyncStart = src->hsync_start; + dst->timings.hSyncEnd = src->hsync_end; + dst->timings.hTotal = src->htotal; + dst->timings.hSkew = src->hskew; + + dst->timings.vVisible = src->vdisplay; + dst->timings.vSyncStart = src->vsync_start; + dst->timings.vSyncEnd = src->vsync_end; + dst->timings.vTotal = src->vtotal; + + if (src->flags & DRM_MODE_FLAG_INTERLACE) { + dst->timings.flags.interlaced = NV_TRUE; + } else { + dst->timings.flags.interlaced = NV_FALSE; + } + + if (src->flags & DRM_MODE_FLAG_DBLSCAN) { + dst->timings.flags.doubleScan = NV_TRUE; + } else { + dst->timings.flags.doubleScan = NV_FALSE; + } + + if (src->flags & DRM_MODE_FLAG_PHSYNC) { + dst->timings.flags.hSyncPos = NV_TRUE; + } else { + dst->timings.flags.hSyncPos = NV_FALSE; + } + + if (src->flags & DRM_MODE_FLAG_NHSYNC) { + dst->timings.flags.hSyncNeg = NV_TRUE; + } else { + dst->timings.flags.hSyncNeg = NV_FALSE; + } + + if (src->flags & DRM_MODE_FLAG_PVSYNC) { + dst->timings.flags.vSyncPos = NV_TRUE; + } else { + dst->timings.flags.vSyncPos = NV_FALSE; + } + + if (src->flags & DRM_MODE_FLAG_NVSYNC) { + dst->timings.flags.vSyncNeg = NV_TRUE; + } else { + dst->timings.flags.vSyncNeg = NV_FALSE; + } + + dst->timings.widthMM = src->width_mm; + dst->timings.heightMM = src->height_mm; + + memcpy(dst->name, src->name, min(sizeof(dst->name), sizeof(src->name))); +} + +#endif diff --git a/kernel-open/nvidia-drm/nvidia-drm-utils.h b/kernel-open/nvidia-drm/nvidia-drm-utils.h new file mode 100644 index 0000000..2c0588a --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-utils.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_UTILS_H__ +#define __NVIDIA_DRM_UTILS_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvkms-kapi.h" + +enum drm_plane_type; +struct drm_display_mode; + +struct NvKmsKapiConnectorInfo* +nvkms_get_connector_info(struct NvKmsKapiDevice *pDevice, + NvKmsKapiConnector hConnector); + +int nvkms_connector_signal_to_drm_encoder_signal( + NvKmsConnectorSignalFormat format); + +int nvkms_connector_type_to_drm_connector_type(NvKmsConnectorType type, + NvBool internal); + +void nvkms_display_mode_to_drm_mode( + const struct NvKmsKapiDisplayMode *displayMode, + struct drm_display_mode *mode); + +void drm_mode_to_nvkms_display_mode(const struct drm_display_mode *src, + struct NvKmsKapiDisplayMode *dst); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#endif /* __NVIDIA_DRM_UTILS_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm.Kbuild b/kernel-open/nvidia-drm/nvidia-drm.Kbuild new file mode 100644 index 0000000..892fa4d --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm.Kbuild @@ -0,0 +1,33 @@ +########################################################################### +# Kbuild fragment for nvidia-drm.ko +########################################################################### + +# Get our source file list and conftest list from the common file +include $(src)/nvidia-drm/nvidia-drm-sources.mk + +# Linux-specific sources +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-linux.c + +# +# Define NVIDIA_DRM_{SOURCES,OBJECTS} +# + +NVIDIA_DRM_OBJECTS = $(patsubst %.c,%.o,$(NVIDIA_DRM_SOURCES)) + +obj-m += nvidia-drm.o +nvidia-drm-y := $(NVIDIA_DRM_OBJECTS) + +NVIDIA_DRM_KO = nvidia-drm/nvidia-drm.ko + +NV_KERNEL_MODULE_TARGETS += $(NVIDIA_DRM_KO) + +# +# Define nvidia-drm.ko-specific CFLAGS. +# + +NVIDIA_DRM_CFLAGS += -I$(src)/nvidia-drm +NVIDIA_DRM_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_BUILD_MODULE_INSTANCES=0 + +$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_DRM_OBJECTS), $(NVIDIA_DRM_CFLAGS)) + +NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_DRM_OBJECTS) diff --git a/kernel-open/nvidia-drm/nvidia-drm.c b/kernel-open/nvidia-drm/nvidia-drm.c new file mode 100644 index 0000000..e42496a --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm.c @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm.h" + +#if defined(NV_DRM_AVAILABLE) + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-drv.h" + +static struct NvKmsKapiFunctionsTable nvKmsFuncsTable = { + .versionString = NV_VERSION_STRING, +}; + +const struct NvKmsKapiFunctionsTable* const nvKms = &nvKmsFuncsTable; + +#endif + +int nv_drm_init(void) +{ +#if defined(NV_DRM_AVAILABLE) + if (!nvKmsKapiGetFunctionsTable(&nvKmsFuncsTable)) { + NV_DRM_LOG_ERR( + "Version mismatch: nvidia-modeset.ko(%s) nvidia-drm.ko(%s)", + nvKmsFuncsTable.versionString, NV_VERSION_STRING); + return -EINVAL; + } + + nvKms->setSuspendResumeCallback(nv_drm_suspend_resume); + return nv_drm_probe_devices(); +#else + return 0; +#endif +} + +void nv_drm_exit(void) +{ +#if defined(NV_DRM_AVAILABLE) + nvKms->setSuspendResumeCallback(NULL); + nv_drm_remove_devices(); +#endif +} diff --git a/kernel-open/nvidia-drm/nvidia-drm.h b/kernel-open/nvidia-drm/nvidia-drm.h new file mode 100644 index 0000000..9f1c31c --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_H__ +#define __NVIDIA_DRM_H__ + +#include "nvidia-drm-conftest.h" + +int nv_drm_init(void); +void nv_drm_exit(void); + +#endif /* __NVIDIA_DRM_H__ */ diff --git a/kernel-open/nvidia-modeset/nv-kthread-q.c b/kernel-open/nvidia-modeset/nv-kthread-q.c new file mode 100644 index 0000000..edc4cbb --- /dev/null +++ b/kernel-open/nvidia-modeset/nv-kthread-q.c @@ -0,0 +1,329 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-kthread-q.h" +#include "nv-list-helpers.h" + +#include +#include +#include +#include +#include +#include + +// Today's implementation is a little simpler and more limited than the +// API description allows for in nv-kthread-q.h. Details include: +// +// 1. Each nv_kthread_q instance is a first-in, first-out queue. +// +// 2. Each nv_kthread_q instance is serviced by exactly one kthread. +// +// You can create any number of queues, each of which gets its own +// named kernel thread (kthread). You can then insert arbitrary functions +// into the queue, and those functions will be run in the context of the +// queue's kthread. + +#ifndef WARN + // Only *really* old kernels (2.6.9) end up here. Just use a simple printk + // to implement this, because such kernels won't be supported much longer. + #define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + printk(KERN_ERR format); \ + unlikely(__ret_warn_on); \ + }) +#endif + +#define NVQ_WARN(fmt, ...) \ + do { \ + if (in_interrupt()) { \ + WARN(1, "nv_kthread_q: [in interrupt]: " fmt, \ + ##__VA_ARGS__); \ + } \ + else { \ + WARN(1, "nv_kthread_q: task: %s: " fmt, \ + current->comm, \ + ##__VA_ARGS__); \ + } \ + } while (0) + +static int _main_loop(void *args) +{ + nv_kthread_q_t *q = (nv_kthread_q_t *)args; + nv_kthread_q_item_t *q_item = NULL; + unsigned long flags; + + while (1) { + // Normally this thread is never interrupted. However, + // down_interruptible (instead of down) is called here, + // in order to avoid being classified as a potentially + // hung task, by the kernel watchdog. + while (down_interruptible(&q->q_sem)) + NVQ_WARN("Interrupted during semaphore wait\n"); + + if (atomic_read(&q->main_loop_should_exit)) + break; + + spin_lock_irqsave(&q->q_lock, flags); + + // The q_sem semaphore prevents us from getting here unless there is + // at least one item in the list, so an empty list indicates a bug. + if (unlikely(list_empty(&q->q_list_head))) { + spin_unlock_irqrestore(&q->q_lock, flags); + NVQ_WARN("_main_loop: Empty queue: q: 0x%p\n", q); + continue; + } + + // Consume one item from the queue + q_item = list_first_entry(&q->q_list_head, + nv_kthread_q_item_t, + q_list_node); + + list_del_init(&q_item->q_list_node); + + spin_unlock_irqrestore(&q->q_lock, flags); + + // Run the item + q_item->function_to_run(q_item->function_args); + + // Make debugging a little simpler by clearing this between runs: + q_item = NULL; + } + + while (!kthread_should_stop()) + schedule(); + + return 0; +} + +void nv_kthread_q_stop(nv_kthread_q_t *q) +{ + // check if queue has been properly initialized + if (unlikely(!q->q_kthread)) + return; + + nv_kthread_q_flush(q); + + // If this assertion fires, then a caller likely either broke the API rules, + // by adding items after calling nv_kthread_q_stop, or possibly messed up + // with inadequate flushing of self-rescheduling q_items. + if (unlikely(!list_empty(&q->q_list_head))) + NVQ_WARN("list not empty after flushing\n"); + + if (likely(!atomic_read(&q->main_loop_should_exit))) { + + atomic_set(&q->main_loop_should_exit, 1); + + // Wake up the kthread so that it can see that it needs to stop: + up(&q->q_sem); + + kthread_stop(q->q_kthread); + q->q_kthread = NULL; + } +} + +// When CONFIG_VMAP_STACK is defined, the kernel thread stack allocator used by +// kthread_create_on_node relies on a 2 entry, per-core cache to minimize +// vmalloc invocations. The cache is NUMA-unaware, so when there is a hit, the +// stack location ends up being a function of the core assigned to the current +// thread, instead of being a function of the specified NUMA node. The cache was +// added to the kernel in commit ac496bf48d97f2503eaa353996a4dd5e4383eaf0 +// ("fork: Optimize task creation by caching two thread stacks per CPU if +// CONFIG_VMAP_STACK=y") +// +// To work around the problematic cache, we create up to three kernel threads +// -If the first thread's stack is resident on the preferred node, return this +// thread. +// -Otherwise, create a second thread. If its stack is resident on the +// preferred node, stop the first thread and return this one. +// -Otherwise, create a third thread. The stack allocator does not find a +// cached stack, and so falls back to vmalloc, which takes the NUMA hint into +// consideration. The first two threads are then stopped. +// +// When CONFIG_VMAP_STACK is not defined, the first kernel thread is returned. +// +// This function is never invoked when there is no NUMA preference (preferred +// node is NUMA_NO_NODE). +static struct task_struct *thread_create_on_node(int (*threadfn)(void *data), + nv_kthread_q_t *q, + int preferred_node, + const char *q_name) +{ + + unsigned i, j; + static const unsigned attempts = 3; + struct task_struct *thread[3]; + + for (i = 0;; i++) { + struct page *stack; + + thread[i] = kthread_create_on_node(threadfn, q, preferred_node, q_name); + + if (unlikely(IS_ERR(thread[i]))) { + + // Instead of failing, pick the previous thread, even if its + // stack is not allocated on the preferred node. + if (i > 0) + i--; + + break; + } + + // vmalloc is not used to allocate the stack, so simply return the + // thread, even if its stack may not be allocated on the preferred node + if (!is_vmalloc_addr(thread[i]->stack)) + break; + + // Ran out of attempts - return thread even if its stack may not be + // allocated on the preferred node + if (i == (attempts - 1)) + break; + + // Get the NUMA node where the first page of the stack is resident. If + // it is the preferred node, select this thread. + stack = vmalloc_to_page(thread[i]->stack); + if (page_to_nid(stack) == preferred_node) + break; + } + + for (j = i; j > 0; j--) + kthread_stop(thread[j - 1]); + + return thread[i]; +} + +int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferred_node) +{ + memset(q, 0, sizeof(*q)); + + INIT_LIST_HEAD(&q->q_list_head); + spin_lock_init(&q->q_lock); + sema_init(&q->q_sem, 0); + + if (preferred_node == NV_KTHREAD_NO_NODE) { + q->q_kthread = kthread_create(_main_loop, q, q_name); + } + else { + q->q_kthread = thread_create_on_node(_main_loop, q, preferred_node, q_name); + } + + if (IS_ERR(q->q_kthread)) { + int err = PTR_ERR(q->q_kthread); + + // Clear q_kthread before returning so that nv_kthread_q_stop() can be + // safely called on it making error handling easier. + q->q_kthread = NULL; + + return err; + } + + wake_up_process(q->q_kthread); + + return 0; +} + +int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname) +{ + return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE); +} + +// Returns true (non-zero) if the item was actually scheduled, and false if the +// item was already pending in a queue. +static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item) +{ + unsigned long flags; + int ret = 1; + + spin_lock_irqsave(&q->q_lock, flags); + + if (likely(list_empty(&q_item->q_list_node))) + list_add_tail(&q_item->q_list_node, &q->q_list_head); + else + ret = 0; + + spin_unlock_irqrestore(&q->q_lock, flags); + + if (likely(ret)) + up(&q->q_sem); + + return ret; +} + +void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item, + nv_q_func_t function_to_run, + void *function_args) +{ + INIT_LIST_HEAD(&q_item->q_list_node); + q_item->function_to_run = function_to_run; + q_item->function_args = function_args; +} + +// Returns true (non-zero) if the q_item got scheduled, false otherwise. +int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q, + nv_kthread_q_item_t *q_item) +{ + if (unlikely(atomic_read(&q->main_loop_should_exit))) { + NVQ_WARN("Not allowed: nv_kthread_q_schedule_q_item was " + "called with a non-alive q: 0x%p\n", q); + return 0; + } + + return _raw_q_schedule(q, q_item); +} + +static void _q_flush_function(void *args) +{ + struct completion *completion = (struct completion *)args; + complete(completion); +} + + +static void _raw_q_flush(nv_kthread_q_t *q) +{ + nv_kthread_q_item_t q_item; + DECLARE_COMPLETION_ONSTACK(completion); + + nv_kthread_q_item_init(&q_item, _q_flush_function, &completion); + + _raw_q_schedule(q, &q_item); + + // Wait for the flush item to run. Once it has run, then all of the + // previously queued items in front of it will have run, so that means + // the flush is complete. + wait_for_completion(&completion); +} + +void nv_kthread_q_flush(nv_kthread_q_t *q) +{ + if (unlikely(atomic_read(&q->main_loop_should_exit))) { + NVQ_WARN("Not allowed: nv_kthread_q_flush was called after " + "nv_kthread_q_stop. q: 0x%p\n", q); + return; + } + + // This 2x flush is not a typing mistake. The queue really does have to be + // flushed twice, in order to take care of the case of a q_item that + // reschedules itself. + _raw_q_flush(q); + _raw_q_flush(q); +} diff --git a/kernel-open/nvidia-modeset/nvidia-modeset-linux.c b/kernel-open/nvidia-modeset/nvidia-modeset-linux.c new file mode 100644 index 0000000..5660734 --- /dev/null +++ b/kernel-open/nvidia-modeset/nvidia-modeset-linux.c @@ -0,0 +1,2198 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include /* do_div() */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "nvstatus.h" + +#include "nv-modeset-interface.h" +#include "nv-kref.h" + +#include "nvidia-modeset-os-interface.h" +#include "nvkms.h" +#include "nvkms-ioctl.h" + +#include "conftest.h" +#include "nv-procfs.h" +#include "nv-kthread-q.h" +#include "nv-time.h" +#include "nv-timer.h" +#include "nv-lock.h" +#include "nv-chardev-numbers.h" + +/* + * Commit aefb2f2e619b ("x86/bugs: Rename CONFIG_RETPOLINE => + * CONFIG_MITIGATION_RETPOLINE) in v6.8 renamed CONFIG_RETPOLINE. + */ +#if !defined(CONFIG_RETPOLINE) && !defined(CONFIG_MITIGATION_RETPOLINE) +#include "nv-retpoline.h" +#endif + +#include + +#define NVKMS_LOG_PREFIX "nvidia-modeset: " + +static bool output_rounding_fix = true; +module_param_named(output_rounding_fix, output_rounding_fix, bool, 0400); + +static bool disable_hdmi_frl = false; +module_param_named(disable_hdmi_frl, disable_hdmi_frl, bool, 0400); + +static bool disable_vrr_memclk_switch = false; +module_param_named(disable_vrr_memclk_switch, disable_vrr_memclk_switch, bool, 0400); + +static bool hdmi_deepcolor = true; +module_param_named(hdmi_deepcolor, hdmi_deepcolor, bool, 0400); + +static bool vblank_sem_control = true; +module_param_named(vblank_sem_control, vblank_sem_control, bool, 0400); + +static bool opportunistic_display_sync = true; +module_param_named(opportunistic_display_sync, opportunistic_display_sync, bool, 0400); + +static enum NvKmsDebugForceColorSpace debug_force_color_space = NVKMS_DEBUG_FORCE_COLOR_SPACE_NONE; +module_param_named(debug_force_color_space, debug_force_color_space, uint, 0400); + +static bool enable_overlay_layers = true; +module_param_named(enable_overlay_layers, enable_overlay_layers, bool, 0400); + +/* These parameters are used for fault injection tests. Normally the defaults + * should be used. */ +MODULE_PARM_DESC(fail_malloc, "Fail the Nth call to nvkms_alloc"); +static int fail_malloc_num = -1; +module_param_named(fail_malloc, fail_malloc_num, int, 0400); + +MODULE_PARM_DESC(malloc_verbose, "Report information about malloc calls on module unload"); +static bool malloc_verbose = false; +module_param_named(malloc_verbose, malloc_verbose, bool, 0400); + +MODULE_PARM_DESC(conceal_vrr_caps, + "Conceal all display VRR capabilities"); +static bool conceal_vrr_caps = false; +module_param_named(conceal_vrr_caps, conceal_vrr_caps, bool, 0400); + +/* Fail allocating the RM core channel for NVKMS using the i-th method (see + * FailAllocCoreChannelMethod). Failures not using the i-th method are ignored. */ +MODULE_PARM_DESC(fail_alloc_core_channel, "Control testing for hardware core channel allocation failure"); +static int fail_alloc_core_channel_method = -1; +module_param_named(fail_alloc_core_channel, fail_alloc_core_channel_method, int, 0400); + +#if NVKMS_CONFIG_FILE_SUPPORTED +/* This parameter is used to find the dpy override conf file */ +#define NVKMS_CONF_FILE_SPECIFIED (nvkms_conf != NULL) + +MODULE_PARM_DESC(config_file, + "Path to the nvidia-modeset configuration file (default: disabled)"); +static char *nvkms_conf = NULL; +module_param_named(config_file, nvkms_conf, charp, 0400); +#endif + +static atomic_t nvkms_alloc_called_count; + +NvBool nvkms_test_fail_alloc_core_channel( + enum FailAllocCoreChannelMethod method +) +{ + if (method != fail_alloc_core_channel_method) { + // don't fail if it's not the currently specified method + return NV_FALSE; + } + + printk(KERN_INFO NVKMS_LOG_PREFIX + "Failing core channel allocation using method %d", + fail_alloc_core_channel_method); + + return NV_TRUE; +} + +NvBool nvkms_conceal_vrr_caps(void) +{ + return conceal_vrr_caps; +} + +NvBool nvkms_output_rounding_fix(void) +{ + return output_rounding_fix; +} + +NvBool nvkms_disable_hdmi_frl(void) +{ + return disable_hdmi_frl; +} + +NvBool nvkms_disable_vrr_memclk_switch(void) +{ + return disable_vrr_memclk_switch; +} + +NvBool nvkms_hdmi_deepcolor(void) +{ + return hdmi_deepcolor; +} + +NvBool nvkms_vblank_sem_control(void) +{ + return vblank_sem_control; +} + +NvBool nvkms_opportunistic_display_sync(void) +{ + return opportunistic_display_sync; +} + +enum NvKmsDebugForceColorSpace nvkms_debug_force_color_space(void) +{ + if (debug_force_color_space >= NVKMS_DEBUG_FORCE_COLOR_SPACE_MAX) { + return NVKMS_DEBUG_FORCE_COLOR_SPACE_NONE; + } + return debug_force_color_space; +} + +NvBool nvkms_enable_overlay_layers(void) +{ + return enable_overlay_layers; +} + +NvBool nvkms_kernel_supports_syncpts(void) +{ +/* + * Note this only checks that the kernel has the prerequisite + * support for syncpts; callers must also check that the hardware + * supports syncpts. + */ +#if (defined(CONFIG_TEGRA_GRHOST) || defined(NV_LINUX_HOST1X_NEXT_H_PRESENT)) + return NV_TRUE; +#else + return NV_FALSE; +#endif +} + +#define NVKMS_SYNCPT_STUBS_NEEDED + +/************************************************************************* + * NVKMS interface for nvhost unit for sync point APIs. + *************************************************************************/ +#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST) + +#undef NVKMS_SYNCPT_STUBS_NEEDED + +#include + +NvBool nvkms_syncpt_op( + enum NvKmsSyncPtOp op, + NvKmsSyncPtOpParams *params) +{ + struct platform_device *pdev = nvhost_get_default_device(); + + switch (op) { + + case NVKMS_SYNCPT_OP_ALLOC: + params->alloc.id = nvhost_get_syncpt_client_managed( + pdev, params->alloc.syncpt_name); + break; + + case NVKMS_SYNCPT_OP_PUT: + nvhost_syncpt_put_ref_ext(pdev, params->put.id); + break; + + case NVKMS_SYNCPT_OP_FD_TO_ID_AND_THRESH: { + + struct nvhost_fence *fence; + NvU32 id, thresh; + + fence = nvhost_fence_get(params->fd_to_id_and_thresh.fd); + if (fence == NULL) { + return NV_FALSE; + } + + if (nvhost_fence_num_pts(fence) > 1) { + /*! Syncpoint fence fd contains more than one syncpoint */ + nvhost_fence_put(fence); + return NV_FALSE; + } + + if (nvhost_fence_get_pt(fence, 0, &id, &thresh) != 0) { + nvhost_fence_put(fence); + return NV_FALSE; + } + + params->fd_to_id_and_thresh.id = id; + params->fd_to_id_and_thresh.thresh = thresh; + + nvhost_fence_put(fence); + + break; + } + + case NVKMS_SYNCPT_OP_ID_AND_THRESH_TO_FD: + nvhost_syncpt_create_fence_single_ext( + pdev, + params->id_and_thresh_to_fd.id, + params->id_and_thresh_to_fd.thresh, + "nvkms-fence", + ¶ms->id_and_thresh_to_fd.fd); + break; + + case NVKMS_SYNCPT_OP_READ_MINVAL: + params->read_minval.minval = + nvhost_syncpt_read_minval(pdev, params->read_minval.id); + break; + + } + + return NV_TRUE; +} + +#elif defined(NV_LINUX_HOST1X_NEXT_H_PRESENT) && defined(NV_LINUX_NVHOST_H_PRESENT) + +#include +#include +#include +#include + +/* + * If the host1x.h header is present, then we are using the upstream + * host1x driver and so make sure CONFIG_TEGRA_HOST1X is defined to pick + * up the correct prototypes/definitions in nvhost.h. + */ +#define CONFIG_TEGRA_HOST1X + +#include + +#undef NVKMS_SYNCPT_STUBS_NEEDED + +NvBool nvkms_syncpt_op( + enum NvKmsSyncPtOp op, + NvKmsSyncPtOpParams *params) +{ + struct host1x_syncpt *host1x_sp; + struct platform_device *pdev; + struct host1x *host1x; + + pdev = nvhost_get_default_device(); + if (pdev == NULL) { + nvkms_log(NVKMS_LOG_LEVEL_ERROR, NVKMS_LOG_PREFIX, + "Failed to get nvhost default pdev"); + return NV_FALSE; + } + + host1x = nvhost_get_host1x(pdev); + if (host1x == NULL) { + nvkms_log(NVKMS_LOG_LEVEL_ERROR, NVKMS_LOG_PREFIX, + "Failed to get host1x"); + return NV_FALSE; + } + + switch (op) { + + case NVKMS_SYNCPT_OP_ALLOC: + host1x_sp = host1x_syncpt_alloc(host1x, + HOST1X_SYNCPT_CLIENT_MANAGED, + params->alloc.syncpt_name); + if (host1x_sp == NULL) { + return NV_FALSE; + } + + params->alloc.id = host1x_syncpt_id(host1x_sp); + break; + + case NVKMS_SYNCPT_OP_PUT: + host1x_sp = host1x_syncpt_get_by_id_noref(host1x, params->put.id); + if (host1x_sp == NULL) { + return NV_FALSE; + } + + host1x_syncpt_put(host1x_sp); + break; + + case NVKMS_SYNCPT_OP_FD_TO_ID_AND_THRESH: { + + struct dma_fence *f; + NvU32 id, thresh; + int err; + + f = sync_file_get_fence(params->fd_to_id_and_thresh.fd); + if (f == NULL) { + return NV_FALSE; + } + + if (dma_fence_is_array(f)) { + struct dma_fence_array *array = to_dma_fence_array(f); + + if (array->num_fences > 1) { + /* Syncpoint fence fd contains more than one syncpoint */ + dma_fence_put(f); + return NV_FALSE; + } + + f = array->fences[0]; + } + + err = host1x_fence_extract(f, &id, &thresh); + dma_fence_put(f); + + if (err < 0) { + return NV_FALSE; + } + + params->fd_to_id_and_thresh.id = id; + params->fd_to_id_and_thresh.thresh = thresh; + + break; + } + + case NVKMS_SYNCPT_OP_ID_AND_THRESH_TO_FD: { + + struct sync_file *file; + struct dma_fence *f; + int fd; + + host1x_sp = host1x_syncpt_get_by_id_noref(host1x, + params->id_and_thresh_to_fd.id); + if (host1x_sp == NULL) { + return NV_FALSE; + } + + f = host1x_fence_create(host1x_sp, + params->id_and_thresh_to_fd.thresh, true); + if (IS_ERR(f)) { + return NV_FALSE; + } + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) { + dma_fence_put(f); + return NV_FALSE; + } + + file = sync_file_create(f); + dma_fence_put(f); + + if (!file) { + return NV_FALSE; + } + + fd_install(fd, file->file); + + params->id_and_thresh_to_fd.fd = fd; + break; + } + + case NVKMS_SYNCPT_OP_READ_MINVAL: + host1x_sp = host1x_syncpt_get_by_id_noref(host1x, params->read_minval.id); + if (host1x_sp == NULL) { + return NV_FALSE; + } + + params->read_minval.minval = host1x_syncpt_read(host1x_sp); + break; + } + + return NV_TRUE; +} +#endif + +#ifdef NVKMS_SYNCPT_STUBS_NEEDED +/* Unsupported STUB for nvkms_syncpt APIs */ +NvBool nvkms_syncpt_op( + enum NvKmsSyncPtOp op, + NvKmsSyncPtOpParams *params) +{ + return NV_FALSE; +} +#endif + +#define NVKMS_MAJOR_DEVICE_NUMBER 195 +#define NVKMS_MINOR_DEVICE_NUMBER 254 + +/* + * Convert from microseconds to jiffies. The conversion is: + * ((usec) * HZ / 1000000) + * + * Use do_div() to avoid gcc-generated references to __udivdi3(). + * Note that the do_div() macro divides the first argument in place. + */ +static inline unsigned long NVKMS_USECS_TO_JIFFIES(NvU64 usec) +{ + unsigned long result = usec * HZ; + do_div(result, 1000000); + return result; +} + + +/************************************************************************* + * NVKMS uses a global lock, nvkms_lock. The lock is taken in the + * file operation callback functions when calling into core NVKMS. + *************************************************************************/ + +static struct semaphore nvkms_lock; + +/************************************************************************* + * User clients of NVKMS may need to be synchronized with suspend/resume + * operations. This depends on the state of the system when the NVKMS + * suspend/resume callbacks are invoked. NVKMS uses a single + * RW lock, nvkms_pm_lock, for this synchronization. + *************************************************************************/ + +static struct rw_semaphore nvkms_pm_lock; + +/************************************************************************* + * NVKMS executes almost all of its queued work items on a single + * kthread. The exception are deferred close() handlers, which typically + * block for long periods of time and stall their queue. + *************************************************************************/ + +static struct nv_kthread_q nvkms_kthread_q; +static struct nv_kthread_q nvkms_deferred_close_kthread_q; + +/************************************************************************* + * The nvkms_per_open structure tracks data that is specific to a + * single open. + *************************************************************************/ + +struct nvkms_per_open { + void *data; + + enum NvKmsClientType type; + + union { + struct { + struct { + atomic_t available; + wait_queue_head_t wait_queue; + } events; + } user; + + struct { + struct { + nv_kthread_q_item_t nv_kthread_q_item; + } events; + } kernel; + } u; + + nv_kthread_q_item_t deferred_close_q_item; +}; + +/************************************************************************* + * nvkms_pm_lock helper functions. Since no down_read_interruptible() + * or equivalent interface is available, it needs to be approximated with + * down_read_trylock() to enable the kernel's freezer to round up user + * threads going into suspend. + *************************************************************************/ + +static inline int nvkms_read_trylock_pm_lock(void) +{ + return !down_read_trylock(&nvkms_pm_lock); +} + +static inline void nvkms_read_lock_pm_lock(void) +{ + if ((current->flags & PF_NOFREEZE)) { + /* + * Non-freezable tasks (i.e. kthreads in this case) don't have to worry + * about being frozen during system suspend, but do need to block so + * that the CPU can go idle during s2idle. Do a normal uninterruptible + * blocking wait for the PM lock. + */ + down_read(&nvkms_pm_lock); + } else { + /* + * For freezable tasks, make sure we give the kernel an opportunity to + * freeze if taking the PM lock fails. + */ + while (!down_read_trylock(&nvkms_pm_lock)) { + try_to_freeze(); + cond_resched(); + } + } +} + +static inline void nvkms_read_unlock_pm_lock(void) +{ + up_read(&nvkms_pm_lock); +} + +static inline void nvkms_write_lock_pm_lock(void) +{ + down_write(&nvkms_pm_lock); +} + +static inline void nvkms_write_unlock_pm_lock(void) +{ + up_write(&nvkms_pm_lock); +} + +/************************************************************************* + * nvidia-modeset-os-interface.h functions. It is assumed that these + * are called while nvkms_lock is held. + *************************************************************************/ + +/* Don't use kmalloc for allocations larger than one page */ +#define KMALLOC_LIMIT PAGE_SIZE + +void* nvkms_alloc(size_t size, NvBool zero) +{ + void *p; + + if (malloc_verbose || fail_malloc_num >= 0) { + int this_alloc = atomic_inc_return(&nvkms_alloc_called_count) - 1; + if (fail_malloc_num >= 0 && fail_malloc_num == this_alloc) { + printk(KERN_WARNING NVKMS_LOG_PREFIX "Failing alloc %d\n", + fail_malloc_num); + return NULL; + } + } + + if (size <= KMALLOC_LIMIT) { + p = kmalloc(size, GFP_KERNEL); + } else { + p = vmalloc(size); + } + + if (zero && (p != NULL)) { + memset(p, 0, size); + } + + return p; +} + +void nvkms_free(void *ptr, size_t size) +{ + if (size <= KMALLOC_LIMIT) { + kfree(ptr); + } else { + vfree(ptr); + } +} + +void* nvkms_memset(void *ptr, NvU8 c, size_t size) +{ + return memset(ptr, c, size); +} + +void* nvkms_memcpy(void *dest, const void *src, size_t n) +{ + return memcpy(dest, src, n); +} + +void* nvkms_memmove(void *dest, const void *src, size_t n) +{ + return memmove(dest, src, n); +} + +int nvkms_memcmp(const void *s1, const void *s2, size_t n) +{ + return memcmp(s1, s2, n); +} + +size_t nvkms_strlen(const char *s) +{ + return strlen(s); +} + +int nvkms_strcmp(const char *s1, const char *s2) +{ + return strcmp(s1, s2); +} + +char* nvkms_strncpy(char *dest, const char *src, size_t n) +{ + return strncpy(dest, src, n); +} + +void nvkms_usleep(NvU64 usec) +{ + if (usec < 1000) { + /* + * If the period to wait is less than one millisecond, sleep + * using udelay(); note this is a busy wait. + */ + udelay(usec); + } else { + /* + * Otherwise, sleep with millisecond precision. Clamp the + * time to ~4 seconds (0xFFF/1000 => 4.09 seconds). + * + * Note that the do_div() macro divides the first argument in + * place. + */ + + int msec; + NvU64 tmp = usec + 500; + do_div(tmp, 1000); + msec = (int) (tmp & 0xFFF); + + /* + * XXX NVKMS TODO: this may need to be msleep_interruptible(), + * though the callers would need to be made to handle + * returning early. + */ + msleep(msec); + } +} + +NvU64 nvkms_get_usec(void) +{ + struct timespec64 ts; + NvU64 ns; + + ktime_get_raw_ts64(&ts); + + ns = timespec64_to_ns(&ts); + return ns / 1000; +} + +int nvkms_copyin(void *kptr, NvU64 uaddr, size_t n) +{ + if (!nvKmsNvU64AddressIsSafe(uaddr)) { + return -EINVAL; + } + + if (copy_from_user(kptr, nvKmsNvU64ToPointer(uaddr), n) != 0) { + return -EFAULT; + } + + return 0; +} + +int nvkms_copyout(NvU64 uaddr, const void *kptr, size_t n) +{ + if (!nvKmsNvU64AddressIsSafe(uaddr)) { + return -EINVAL; + } + + if (copy_to_user(nvKmsNvU64ToPointer(uaddr), kptr, n) != 0) { + return -EFAULT; + } + + return 0; +} + +void nvkms_yield(void) +{ + schedule(); +} + +void nvkms_dump_stack(void) +{ + dump_stack(); +} + +int nvkms_snprintf(char *str, size_t size, const char *format, ...) +{ + int ret; + va_list ap; + + va_start(ap, format); + ret = vsnprintf(str, size, format, ap); + va_end(ap); + + return ret; +} + +int nvkms_vsnprintf(char *str, size_t size, const char *format, va_list ap) +{ + return vsnprintf(str, size, format, ap); +} + +void nvkms_log(const int level, const char *gpuPrefix, const char *msg) +{ + const char *levelString; + const char *levelPrefix; + + switch (level) { + default: + case NVKMS_LOG_LEVEL_INFO: + levelPrefix = ""; + levelString = KERN_INFO; + break; + case NVKMS_LOG_LEVEL_WARN: + levelPrefix = "WARNING: "; + levelString = KERN_WARNING; + break; + case NVKMS_LOG_LEVEL_ERROR: + levelPrefix = "ERROR: "; + levelString = KERN_ERR; + break; + } + + printk("%s%s%s%s%s\n", + levelString, NVKMS_LOG_PREFIX, levelPrefix, gpuPrefix, msg); +} + +void +nvkms_event_queue_changed(nvkms_per_open_handle_t *pOpenKernel, + NvBool eventsAvailable) +{ + struct nvkms_per_open *popen = pOpenKernel; + + switch (popen->type) { + case NVKMS_CLIENT_USER_SPACE: + /* + * Write popen->events.available atomically, to avoid any races or + * memory barrier issues interacting with nvkms_poll(). + */ + atomic_set(&popen->u.user.events.available, eventsAvailable); + + wake_up_interruptible(&popen->u.user.events.wait_queue); + + break; + case NVKMS_CLIENT_KERNEL_SPACE: + if (eventsAvailable) { + nv_kthread_q_schedule_q_item( + &nvkms_kthread_q, + &popen->u.kernel.events.nv_kthread_q_item); + } + + break; + } +} + +static void nvkms_suspend(NvU32 gpuId) +{ + nvKmsKapiSuspendResume(NV_TRUE /* suspend */); + + if (gpuId == 0) { + nvkms_write_lock_pm_lock(); + } + + down(&nvkms_lock); + nvKmsSuspend(gpuId); + up(&nvkms_lock); +} + +static void nvkms_resume(NvU32 gpuId) +{ + down(&nvkms_lock); + nvKmsResume(gpuId); + up(&nvkms_lock); + + if (gpuId == 0) { + nvkms_write_unlock_pm_lock(); + } + + nvKmsKapiSuspendResume(NV_FALSE /* suspend */); +} + + +/************************************************************************* + * Interface with resman. + *************************************************************************/ + +static nvidia_modeset_rm_ops_t __rm_ops = { 0 }; +static nvidia_modeset_callbacks_t nvkms_rm_callbacks = { + .suspend = nvkms_suspend, + .resume = nvkms_resume +}; + +static int nvkms_alloc_rm(void) +{ + NV_STATUS nvstatus; + int ret; + + __rm_ops.version_string = NV_VERSION_STRING; + + nvstatus = nvidia_get_rm_ops(&__rm_ops); + + if (nvstatus != NV_OK) { + printk(KERN_ERR NVKMS_LOG_PREFIX "Version mismatch: " + "nvidia.ko(%s) nvidia-modeset.ko(%s)\n", + __rm_ops.version_string, NV_VERSION_STRING); + return -EINVAL; + } + + ret = __rm_ops.set_callbacks(&nvkms_rm_callbacks); + if (ret < 0) { + printk(KERN_ERR NVKMS_LOG_PREFIX "Failed to register callbacks\n"); + return ret; + } + + return 0; +} + +static void nvkms_free_rm(void) +{ + __rm_ops.set_callbacks(NULL); +} + +void nvkms_call_rm(void *ops) +{ + nvidia_modeset_stack_ptr stack = NULL; + + if (__rm_ops.alloc_stack(&stack) != 0) { + return; + } + + __rm_ops.op(stack, ops); + + __rm_ops.free_stack(stack); +} + +/************************************************************************* + * ref_ptr implementation. + *************************************************************************/ + +struct nvkms_ref_ptr { + nv_kref_t refcnt; + // Access to ptr is guarded by the nvkms_lock. + void *ptr; +}; + +struct nvkms_ref_ptr* nvkms_alloc_ref_ptr(void *ptr) +{ + struct nvkms_ref_ptr *ref_ptr = nvkms_alloc(sizeof(*ref_ptr), NV_FALSE); + if (ref_ptr) { + // The ref_ptr owner counts as a reference on the ref_ptr itself. + nv_kref_init(&ref_ptr->refcnt); + ref_ptr->ptr = ptr; + } + return ref_ptr; +} + +void nvkms_free_ref_ptr(struct nvkms_ref_ptr *ref_ptr) +{ + if (ref_ptr) { + ref_ptr->ptr = NULL; + // Release the owner's reference of the ref_ptr. + nvkms_dec_ref(ref_ptr); + } +} + +void nvkms_inc_ref(struct nvkms_ref_ptr *ref_ptr) +{ + nv_kref_get(&ref_ptr->refcnt); +} + +static void ref_ptr_free(nv_kref_t *ref) +{ + struct nvkms_ref_ptr *ref_ptr = container_of(ref, struct nvkms_ref_ptr, + refcnt); + nvkms_free(ref_ptr, sizeof(*ref_ptr)); +} + +void* nvkms_dec_ref(struct nvkms_ref_ptr *ref_ptr) +{ + void *ptr = ref_ptr->ptr; + nv_kref_put(&ref_ptr->refcnt, ref_ptr_free); + return ptr; +} + +/************************************************************************* + * Timer support + * + * Core NVKMS needs to be able to schedule work to execute in the + * future, within process context. + * + * To achieve this, use struct timer_list to schedule a timer + * callback, nvkms_timer_callback(). This will execute in softirq + * context, so from there schedule an nv_kthread_q item, + * nvkms_kthread_q_callback(), which will execute in process context. + *************************************************************************/ + +struct nvkms_timer_t { + nv_kthread_q_item_t nv_kthread_q_item; + struct timer_list kernel_timer; + NvBool cancel; + NvBool complete; + NvBool isRefPtr; + NvBool kernel_timer_created; + nvkms_timer_proc_t *proc; + void *dataPtr; + NvU32 dataU32; + struct list_head timers_list; +}; + +/* + * Global list with pending timers, any change requires acquiring lock + */ +static struct { + spinlock_t lock; + struct list_head list; +} nvkms_timers; + +static void nvkms_kthread_q_callback(void *arg) +{ + struct nvkms_timer_t *timer = arg; + void *dataPtr; + unsigned long flags = 0; + + /* + * We can delete this timer from pending timers list - it's being + * processed now. + */ + spin_lock_irqsave(&nvkms_timers.lock, flags); + list_del(&timer->timers_list); + spin_unlock_irqrestore(&nvkms_timers.lock, flags); + + /* + * After kthread_q_callback we want to be sure that timer_callback + * for this timer also have finished. It's important during module + * unload - this way we can safely unload this module by first deleting + * pending timers and than waiting for workqueue callbacks. + */ + if (timer->kernel_timer_created) { + nv_timer_delete_sync(&timer->kernel_timer); + } + + /* + * Block the kthread during system suspend & resume in order to defer + * handling of events such as DP_IRQ and hotplugs until after resume. + */ + nvkms_read_lock_pm_lock(); + + down(&nvkms_lock); + + if (timer->isRefPtr) { + // If the object this timer refers to was destroyed, treat the timer as + // canceled. + dataPtr = nvkms_dec_ref(timer->dataPtr); + if (!dataPtr) { + timer->cancel = NV_TRUE; + } + } else { + dataPtr = timer->dataPtr; + } + + if (!timer->cancel) { + timer->proc(dataPtr, timer->dataU32); + timer->complete = NV_TRUE; + } + + if (timer->isRefPtr) { + // ref_ptr-based timers are allocated with kmalloc(GFP_ATOMIC). + kfree(timer); + } else if (timer->cancel) { + nvkms_free(timer, sizeof(*timer)); + } + + up(&nvkms_lock); + + nvkms_read_unlock_pm_lock(); +} + +static void nvkms_queue_work(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item) +{ + int ret = nv_kthread_q_schedule_q_item(q, q_item); + /* + * nv_kthread_q_schedule_q_item should only fail (which it indicates by + * returning false) if the item is already scheduled or the queue is + * stopped. Neither of those should happen in NVKMS. + */ + WARN_ON(!ret); +} + +static void _nvkms_timer_callback_internal(struct nvkms_timer_t *nvkms_timer) +{ + /* In softirq context, so schedule nvkms_kthread_q_callback(). */ + nvkms_queue_work(&nvkms_kthread_q, &nvkms_timer->nv_kthread_q_item); +} + +/* + * Why the "inline" keyword? Because only one of these next two functions will + * be used, thus leading to a "defined but not used function" warning. The + * "inline" keyword is redefined in the Kbuild system + * (see: /include/linux/compiler-gcc.h) so as to suppress that warning. + */ +inline static void nvkms_timer_callback_typed_data(struct timer_list *timer) +{ + struct nvkms_timer_t *nvkms_timer = + container_of(timer, struct nvkms_timer_t, kernel_timer); + + _nvkms_timer_callback_internal(nvkms_timer); +} + +static void +nvkms_init_timer(struct nvkms_timer_t *timer, nvkms_timer_proc_t *proc, + void *dataPtr, NvU32 dataU32, NvBool isRefPtr, NvU64 usec) +{ + unsigned long flags = 0; + + memset(timer, 0, sizeof(*timer)); + timer->cancel = NV_FALSE; + timer->complete = NV_FALSE; + timer->isRefPtr = isRefPtr; + + timer->proc = proc; + timer->dataPtr = dataPtr; + timer->dataU32 = dataU32; + + nv_kthread_q_item_init(&timer->nv_kthread_q_item, nvkms_kthread_q_callback, + timer); + + /* + * After adding timer to timers_list we need to finish referencing it + * (calling nvkms_queue_work() or mod_timer()) before releasing the lock. + * Otherwise, if the code to free the timer were ever updated to + * run in parallel with this, it could race against nvkms_init_timer() + * and free the timer before its initialization is complete. + */ + spin_lock_irqsave(&nvkms_timers.lock, flags); + list_add(&timer->timers_list, &nvkms_timers.list); + + if (usec == 0) { + timer->kernel_timer_created = NV_FALSE; + nvkms_queue_work(&nvkms_kthread_q, &timer->nv_kthread_q_item); + } else { + timer_setup(&timer->kernel_timer, nvkms_timer_callback_typed_data, 0); + + timer->kernel_timer_created = NV_TRUE; + mod_timer(&timer->kernel_timer, jiffies + NVKMS_USECS_TO_JIFFIES(usec)); + } + spin_unlock_irqrestore(&nvkms_timers.lock, flags); +} + +nvkms_timer_handle_t* +nvkms_alloc_timer(nvkms_timer_proc_t *proc, + void *dataPtr, NvU32 dataU32, + NvU64 usec) +{ + // nvkms_alloc_timer cannot be called from an interrupt context. + struct nvkms_timer_t *timer = nvkms_alloc(sizeof(*timer), NV_FALSE); + if (timer) { + nvkms_init_timer(timer, proc, dataPtr, dataU32, NV_FALSE, usec); + } + return timer; +} + +NvBool +nvkms_alloc_timer_with_ref_ptr(nvkms_timer_proc_t *proc, + struct nvkms_ref_ptr *ref_ptr, + NvU32 dataU32, NvU64 usec) +{ + // nvkms_alloc_timer_with_ref_ptr is called from an interrupt bottom half + // handler, which runs in a tasklet (i.e. atomic) context. + struct nvkms_timer_t *timer = kmalloc(sizeof(*timer), GFP_ATOMIC); + if (timer) { + // Reference the ref_ptr to make sure that it doesn't get freed before + // the timer fires. + nvkms_inc_ref(ref_ptr); + nvkms_init_timer(timer, proc, ref_ptr, dataU32, NV_TRUE, usec); + } + + return timer != NULL; +} + +void nvkms_free_timer(nvkms_timer_handle_t *handle) +{ + struct nvkms_timer_t *timer = handle; + + if (timer == NULL) { + return; + } + + if (timer->complete) { + nvkms_free(timer, sizeof(*timer)); + return; + } + + timer->cancel = NV_TRUE; +} + +NvBool nvkms_fd_is_nvidia_chardev(int fd) +{ + struct file *filp = fget(fd); + dev_t rdev = 0; + NvBool ret = NV_FALSE; + + if (filp == NULL) { + return ret; + } + + if (filp->f_inode == NULL) { + goto done; + } + rdev = filp->f_inode->i_rdev; + + if (MAJOR(rdev) == NVKMS_MAJOR_DEVICE_NUMBER) { + ret = NV_TRUE; + } + +done: + fput(filp); + + return ret; +} + +NvBool nvkms_open_gpu(NvU32 gpuId) +{ + nvidia_modeset_stack_ptr stack = NULL; + NvBool ret; + + if (__rm_ops.alloc_stack(&stack) != 0) { + return NV_FALSE; + } + + ret = __rm_ops.open_gpu(gpuId, stack) == 0; + + __rm_ops.free_stack(stack); + + return ret; +} + +void nvkms_close_gpu(NvU32 gpuId) +{ + nvidia_modeset_stack_ptr stack = NULL; + + if (__rm_ops.alloc_stack(&stack) != 0) { + return; + } + + __rm_ops.close_gpu(gpuId, stack); + + __rm_ops.free_stack(stack); +} + +NvU32 nvkms_enumerate_gpus(nv_gpu_info_t *gpu_info) +{ + return __rm_ops.enumerate_gpus(gpu_info); +} + +NvBool nvkms_allow_write_combining(void) +{ + return __rm_ops.system_info.allow_write_combining; +} + +#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) +/************************************************************************* + * Implementation of sysfs interface to control backlight + *************************************************************************/ + +struct nvkms_backlight_device { + NvU32 gpu_id; + NvU32 display_id; + + void *drv_priv; + + struct backlight_device * dev; +}; + +static int nvkms_update_backlight_status(struct backlight_device *bd) +{ + struct nvkms_backlight_device *nvkms_bd = bl_get_data(bd); + NvBool status; + int ret; + + ret = down_interruptible(&nvkms_lock); + + if (ret != 0) { + return ret; + } + + status = nvKmsSetBacklight(nvkms_bd->display_id, nvkms_bd->drv_priv, + bd->props.brightness); + + up(&nvkms_lock); + + return status ? 0 : -EINVAL; +} + +static int nvkms_get_backlight_brightness(struct backlight_device *bd) +{ + struct nvkms_backlight_device *nvkms_bd = bl_get_data(bd); + NvU32 brightness = 0; + NvBool status; + int ret; + + ret = down_interruptible(&nvkms_lock); + + if (ret != 0) { + return ret; + } + + status = nvKmsGetBacklight(nvkms_bd->display_id, nvkms_bd->drv_priv, + &brightness); + + up(&nvkms_lock); + + return status ? brightness : -1; +} + +static const struct backlight_ops nvkms_backlight_ops = { + .update_status = nvkms_update_backlight_status, + .get_brightness = nvkms_get_backlight_brightness, +}; +#endif /* IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) */ + +struct nvkms_backlight_device* +nvkms_register_backlight(NvU32 gpu_id, NvU32 display_id, void *drv_priv, + NvU32 current_brightness) +{ +#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) + char name[18]; + struct backlight_properties props = { + .brightness = current_brightness, + .max_brightness = 100, + .type = BACKLIGHT_RAW, + }; + nv_gpu_info_t *gpu_info = NULL; + NvU32 gpu_count = 0; + struct nvkms_backlight_device *nvkms_bd = NULL; + int i; + +#if defined(NV_ACPI_VIDEO_BACKLIGHT_USE_NATIVE) + if (!acpi_video_backlight_use_native()) { +#if defined(NV_ACPI_VIDEO_REGISTER_BACKLIGHT) + nvkms_log(NVKMS_LOG_LEVEL_INFO, NVKMS_LOG_PREFIX, + "ACPI reported no NVIDIA native backlight available; attempting to use ACPI backlight."); + acpi_video_register_backlight(); +#endif + return NULL; + } +#endif + + gpu_info = nvkms_alloc(NV_MAX_GPUS * sizeof(*gpu_info), NV_TRUE); + if (gpu_info == NULL) { + return NULL; + } + + gpu_count = __rm_ops.enumerate_gpus(gpu_info); + if (gpu_count == 0) { + goto done; + } + + for (i = 0; i < gpu_count; i++) { + if (gpu_info[i].gpu_id == gpu_id) { + break; + } + } + + if (i == gpu_count) { + goto done; + } + + nvkms_bd = nvkms_alloc(sizeof(*nvkms_bd), NV_TRUE); + if (nvkms_bd == NULL) { + goto done; + } + + snprintf(name, sizeof(name), "nvidia_%d", i); + name[sizeof(name) - 1] = '\0'; + + nvkms_bd->gpu_id = gpu_id; + nvkms_bd->display_id = display_id; + nvkms_bd->drv_priv = drv_priv; + + nvkms_bd->dev = + backlight_device_register(name, + gpu_info[i].os_device_ptr, + nvkms_bd, + &nvkms_backlight_ops, + &props); + +done: + nvkms_free(gpu_info, NV_MAX_GPUS * sizeof(*gpu_info)); + + return nvkms_bd; +#else + return NULL; +#endif /* IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) */ +} + +void nvkms_unregister_backlight(struct nvkms_backlight_device *nvkms_bd) +{ +#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) + if (nvkms_bd->dev) { + backlight_device_unregister(nvkms_bd->dev); + } + + nvkms_free(nvkms_bd, sizeof(*nvkms_bd)); +#endif /* IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) */ +} + +/************************************************************************* + * Common to both user-space and kapi NVKMS interfaces + *************************************************************************/ + +static void nvkms_kapi_event_kthread_q_callback(void *arg) +{ + struct NvKmsKapiDevice *device = arg; + + nvKmsKapiHandleEventQueueChange(device); +} + +static struct nvkms_per_open *nvkms_open_common(enum NvKmsClientType type, + struct NvKmsKapiDevice *device, + NvBool interruptible, + int *status) +{ + struct nvkms_per_open *popen = NULL; + + popen = nvkms_alloc(sizeof(*popen), NV_TRUE); + + if (popen == NULL) { + *status = -ENOMEM; + goto failed; + } + + popen->type = type; + + if (interruptible) { + *status = down_interruptible(&nvkms_lock); + if (*status != 0) { + goto failed; + } + } else { + down(&nvkms_lock); + } + + popen->data = nvKmsOpen(current->tgid, type, popen); + + up(&nvkms_lock); + + if (popen->data == NULL) { + *status = -EPERM; + goto failed; + } + + switch (popen->type) { + case NVKMS_CLIENT_USER_SPACE: + init_waitqueue_head(&popen->u.user.events.wait_queue); + break; + case NVKMS_CLIENT_KERNEL_SPACE: + nv_kthread_q_item_init(&popen->u.kernel.events.nv_kthread_q_item, + nvkms_kapi_event_kthread_q_callback, + device); + break; + } + + *status = 0; + + return popen; + +failed: + + nvkms_free(popen, sizeof(*popen)); + + return NULL; +} + +static void nvkms_close_pm_locked(struct nvkms_per_open *popen) +{ + /* + * Don't use down_interruptible(): we need to free resources + * during close, so we have no choice but to wait to take the + * mutex. + */ + + down(&nvkms_lock); + + nvKmsClose(popen->data); + + popen->data = NULL; + + up(&nvkms_lock); + + if (popen->type == NVKMS_CLIENT_KERNEL_SPACE) { + /* + * Flush any outstanding nvkms_kapi_event_kthread_q_callback() work + * items before freeing popen. + * + * Note that this must be done after the above nvKmsClose() call, to + * guarantee that no more nvkms_kapi_event_kthread_q_callback() work + * items get scheduled. + * + * Also, note that though popen->data is freed above, any subsequent + * nvkms_kapi_event_kthread_q_callback()'s for this popen should be + * safe: if any nvkms_kapi_event_kthread_q_callback()-initiated work + * attempts to call back into NVKMS, the popen->data==NULL check in + * nvkms_ioctl_common() should reject the request. + */ + + nv_kthread_q_flush(&nvkms_kthread_q); + } + + nvkms_free(popen, sizeof(*popen)); +} + +static void nvkms_close_pm_unlocked(void *data) +{ + struct nvkms_per_open *popen = data; + + nvkms_read_lock_pm_lock(); + + nvkms_close_pm_locked(popen); + + nvkms_read_unlock_pm_lock(); +} + +static void nvkms_close_popen(struct nvkms_per_open *popen) +{ + if (nvkms_read_trylock_pm_lock() == 0) { + nvkms_close_pm_locked(popen); + nvkms_read_unlock_pm_lock(); + } else { + nv_kthread_q_item_init(&popen->deferred_close_q_item, + nvkms_close_pm_unlocked, + popen); + nvkms_queue_work(&nvkms_deferred_close_kthread_q, + &popen->deferred_close_q_item); + } +} + +static int nvkms_ioctl_common +( + struct nvkms_per_open *popen, + NvU32 cmd, NvU64 address, const size_t size, + NvBool interruptible +) +{ + NvBool ret; + + if (interruptible) { + int status = down_interruptible(&nvkms_lock); + if (status != 0) { + return status; + } + } else { + down(&nvkms_lock); + } + + if (popen->data != NULL) { + ret = nvKmsIoctl(popen->data, cmd, address, size); + } else { + ret = NV_FALSE; + } + + up(&nvkms_lock); + + return ret ? 0 : -EPERM; +} + +/************************************************************************* + * NVKMS interface for kernel space NVKMS clients like KAPI + *************************************************************************/ + +struct nvkms_per_open* nvkms_open_from_kapi +( + struct NvKmsKapiDevice *device +) +{ + int status = 0; + struct nvkms_per_open *ret; + + nvkms_read_lock_pm_lock(); + ret = nvkms_open_common(NVKMS_CLIENT_KERNEL_SPACE, + device, + NV_FALSE /* interruptible */, + &status); + nvkms_read_unlock_pm_lock(); + + return ret; +} + +void nvkms_close_from_kapi(struct nvkms_per_open *popen) +{ + nvkms_close_pm_unlocked(popen); +} + +NvBool nvkms_ioctl_from_kapi_try_pmlock +( + struct nvkms_per_open *popen, + NvU32 cmd, void *params_address, const size_t param_size +) +{ + NvBool ret; + + // XXX PM lock must be allowed to fail, see bug 4432810. + if (nvkms_read_trylock_pm_lock()) { + return NV_FALSE; + } + + ret = nvkms_ioctl_common(popen, + cmd, + (NvU64)(NvUPtr)params_address, param_size, + NV_FALSE /* interruptible */) == 0; + nvkms_read_unlock_pm_lock(); + + return ret; +} + +NvBool nvkms_ioctl_from_kapi +( + struct nvkms_per_open *popen, + NvU32 cmd, void *params_address, const size_t param_size +) +{ + NvBool ret; + + nvkms_read_lock_pm_lock(); + ret = nvkms_ioctl_common(popen, + cmd, + (NvU64)(NvUPtr)params_address, param_size, + NV_FALSE /* interruptible */) == 0; + nvkms_read_unlock_pm_lock(); + + return ret; +} + +/************************************************************************* + * APIs for locking. + *************************************************************************/ + +struct nvkms_sema_t { + struct semaphore os_sema; +}; + +nvkms_sema_handle_t* nvkms_sema_alloc(void) +{ + nvkms_sema_handle_t *sema = nvkms_alloc(sizeof(*sema), NV_TRUE); + + if (sema != NULL) { + sema_init(&sema->os_sema, 1); + } + + return sema; +} + +void nvkms_sema_free(nvkms_sema_handle_t *sema) +{ + nvkms_free(sema, sizeof(*sema)); +} + +void nvkms_sema_down(nvkms_sema_handle_t *sema) +{ + down(&sema->os_sema); +} + +void nvkms_sema_up(nvkms_sema_handle_t *sema) +{ + up(&sema->os_sema); +} + +/************************************************************************* + * Procfs files support code. + *************************************************************************/ + +#if defined(CONFIG_PROC_FS) + +#define NV_DEFINE_SINGLE_NVKMS_PROCFS_FILE(name) \ + NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(name, nvkms_pm_lock) + +#define NVKMS_PROCFS_FOLDER "driver/nvidia-modeset" + +struct proc_dir_entry *nvkms_proc_dir; + +static void nv_procfs_out_string(void *data, const char *str) +{ + struct seq_file *s = data; + + seq_puts(s, str); +} + +static int nv_procfs_read_nvkms_proc(struct seq_file *s, void *arg) +{ + char *buffer; + nvkms_procfs_proc_t *func; + +#define NVKMS_PROCFS_STRING_SIZE 8192 + + func = s->private; + if (func == NULL) { + return 0; + } + + buffer = nvkms_alloc(NVKMS_PROCFS_STRING_SIZE, NV_TRUE); + + if (buffer != NULL) { + int status = down_interruptible(&nvkms_lock); + + if (status != 0) { + nvkms_free(buffer, NVKMS_PROCFS_STRING_SIZE); + return status; + } + + func(s, buffer, NVKMS_PROCFS_STRING_SIZE, &nv_procfs_out_string); + + up(&nvkms_lock); + + nvkms_free(buffer, NVKMS_PROCFS_STRING_SIZE); + } + + return 0; +} + +NV_DEFINE_SINGLE_NVKMS_PROCFS_FILE(nvkms_proc); + +static NvBool +nvkms_add_proc_file(const nvkms_procfs_file_t *file) +{ + struct proc_dir_entry *new_proc_dir; + + if (nvkms_proc_dir == NULL) { + return NV_FALSE; + } + + new_proc_dir = proc_create_data(file->name, 0, nvkms_proc_dir, + &nv_procfs_nvkms_proc_fops, file->func); + return (new_proc_dir != NULL); +} + +#endif /* defined(CONFIG_PROC_FS) */ + +static void nvkms_proc_init(void) +{ +#if defined(CONFIG_PROC_FS) + const nvkms_procfs_file_t *file; + + nvkms_proc_dir = NULL; + nvKmsGetProcFiles(&file); + + if (file == NULL || file->name == NULL) { + return; + } + + nvkms_proc_dir = NV_CREATE_PROC_DIR(NVKMS_PROCFS_FOLDER, NULL); + if (nvkms_proc_dir == NULL) { + return; + } + + while (file->name != NULL) { + if (!nvkms_add_proc_file(file)) { + nvkms_log(NVKMS_LOG_LEVEL_WARN, NVKMS_LOG_PREFIX, + "Failed to create proc file"); + break; + } + file++; + } +#endif +} + +static void nvkms_proc_exit(void) +{ +#if defined(CONFIG_PROC_FS) + if (nvkms_proc_dir == NULL) { + return; + } + + proc_remove(nvkms_proc_dir); +#endif /* CONFIG_PROC_FS */ +} + +/************************************************************************* + * NVKMS Config File Read + ************************************************************************/ +#if NVKMS_CONFIG_FILE_SUPPORTED +static NvBool nvkms_fs_mounted(void) +{ + return current->fs != NULL; +} + +static size_t nvkms_config_file_open +( + char *fname, + char ** const buff +) +{ + int i = 0; + struct file *file; + struct inode *file_inode; + size_t file_size = 0; + size_t read_size = 0; + loff_t pos = 0; + + *buff = NULL; + + if (!nvkms_fs_mounted()) { + printk(KERN_ERR NVKMS_LOG_PREFIX "ERROR: Filesystems not mounted\n"); + return 0; + } + + file = filp_open(fname, O_RDONLY, 0); + if (file == NULL || IS_ERR(file)) { + printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: Failed to open %s\n", + fname); + return 0; + } + + file_inode = file->f_inode; + if (file_inode == NULL || IS_ERR(file_inode)) { + printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: Inode is invalid\n"); + goto done; + } + file_size = file_inode->i_size; + if (file_size > NVKMS_READ_FILE_MAX_SIZE) { + printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: File exceeds maximum size\n"); + goto done; + } + + // Do not alloc a 0 sized buffer + if (file_size == 0) { + goto done; + } + + *buff = nvkms_alloc(file_size, NV_FALSE); + if (*buff == NULL) { + printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: Out of memory\n"); + goto done; + } + + /* + * TODO: Once we have access to GPL symbols, this can be replaced with + * kernel_read_file for kernels >= 4.6 + */ + while ((read_size < file_size) && (i++ < NVKMS_READ_FILE_MAX_LOOPS)) { + ssize_t ret = kernel_read(file, *buff + read_size, + file_size - read_size, &pos); + if (ret <= 0) { + break; + } + read_size += ret; + } + + if (read_size != file_size) { + printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: Failed to read %s\n", + fname); + goto done; + } + + filp_close(file, current->files); + return file_size; + +done: + nvkms_free(*buff, file_size); + filp_close(file, current->files); + return 0; +} + +/* must be called with nvkms_lock locked */ +static void nvkms_read_config_file_locked(void) +{ + char *buffer = NULL; + size_t buf_size = 0; + + /* only read the config file if the kernel parameter is set */ + if (!NVKMS_CONF_FILE_SPECIFIED) { + return; + } + + buf_size = nvkms_config_file_open(nvkms_conf, &buffer); + + if (buf_size == 0) { + return; + } + + if (nvKmsReadConf(buffer, buf_size, nvkms_config_file_open)) { + printk(KERN_INFO NVKMS_LOG_PREFIX "Successfully read %s\n", + nvkms_conf); + } + + nvkms_free(buffer, buf_size); +} +#else +static void nvkms_read_config_file_locked(void) +{ +} +#endif + +/************************************************************************* + * NVKMS KAPI functions + ************************************************************************/ + +NvBool nvKmsKapiGetFunctionsTable +( + struct NvKmsKapiFunctionsTable *funcsTable +) +{ + return nvKmsKapiGetFunctionsTableInternal(funcsTable); +} +EXPORT_SYMBOL(nvKmsKapiGetFunctionsTable); + +NvU32 nvKmsKapiF16ToF32(NvU16 a) +{ + return nvKmsKapiF16ToF32Internal(a); +} +EXPORT_SYMBOL(nvKmsKapiF16ToF32); + +NvU16 nvKmsKapiF32ToF16(NvU32 a) +{ + return nvKmsKapiF32ToF16Internal(a); +} +EXPORT_SYMBOL(nvKmsKapiF32ToF16); + +NvU32 nvKmsKapiF32Mul(NvU32 a, NvU32 b) +{ + return nvKmsKapiF32MulInternal(a, b); +} +EXPORT_SYMBOL(nvKmsKapiF32Mul); + +NvU32 nvKmsKapiF32Div(NvU32 a, NvU32 b) +{ + return nvKmsKapiF32DivInternal(a, b); +} +EXPORT_SYMBOL(nvKmsKapiF32Div); + +NvU32 nvKmsKapiF32Add(NvU32 a, NvU32 b) +{ + return nvKmsKapiF32AddInternal(a, b); +} +EXPORT_SYMBOL(nvKmsKapiF32Add); + +NvU32 nvKmsKapiF32ToUI32RMinMag(NvU32 a, NvBool exact) +{ + return nvKmsKapiF32ToUI32RMinMagInternal(a, exact); +} +EXPORT_SYMBOL(nvKmsKapiF32ToUI32RMinMag); + +NvU32 nvKmsKapiUI32ToF32(NvU32 a) +{ + return nvKmsKapiUI32ToF32Internal(a); +} +EXPORT_SYMBOL(nvKmsKapiUI32ToF32); + +/************************************************************************* + * File operation callback functions. + *************************************************************************/ + +static int nvkms_open(struct inode *inode, struct file *filp) +{ + int status; + + status = nv_down_read_interruptible(&nvkms_pm_lock); + if (status != 0) { + return status; + } + + filp->private_data = + nvkms_open_common(NVKMS_CLIENT_USER_SPACE, + NULL, + NV_TRUE /* interruptible */, + &status); + + nvkms_read_unlock_pm_lock(); + + return status; +} + +static int nvkms_close(struct inode *inode, struct file *filp) +{ + struct nvkms_per_open *popen = filp->private_data; + + if (popen == NULL) { + return -EINVAL; + } + + nvkms_close_popen(popen); + return 0; +} + +static int nvkms_mmap(struct file *filp, struct vm_area_struct *vma) +{ + return -EPERM; +} + +static int nvkms_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + size_t size; + unsigned int nr; + int status; + struct NvKmsIoctlParams params; + struct nvkms_per_open *popen = filp->private_data; + + if ((popen == NULL) || (popen->data == NULL)) { + return -EINVAL; + } + + size = _IOC_SIZE(cmd); + nr = _IOC_NR(cmd); + + /* The only supported ioctl is NVKMS_IOCTL_CMD. */ + + if ((nr != NVKMS_IOCTL_CMD) || (size != sizeof(struct NvKmsIoctlParams))) { + return -ENOTTY; + } + + status = copy_from_user(¶ms, (void *) arg, size); + if (status != 0) { + return -EFAULT; + } + + status = nv_down_read_interruptible(&nvkms_pm_lock); + if (status != 0) { + return status; + } + + status = nvkms_ioctl_common(popen, + params.cmd, + params.address, + params.size, + NV_TRUE /* interruptible */); + + nvkms_read_unlock_pm_lock(); + + return status; +} + +static long nvkms_unlocked_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + return nvkms_ioctl(filp->f_inode, filp, cmd, arg); +} + +static unsigned int nvkms_poll(struct file *filp, poll_table *wait) +{ + unsigned int mask = 0; + struct nvkms_per_open *popen = filp->private_data; + + if ((popen == NULL) || (popen->data == NULL)) { + return mask; + } + + BUG_ON(popen->type != NVKMS_CLIENT_USER_SPACE); + + if ((filp->f_flags & O_NONBLOCK) == 0) { + poll_wait(filp, &popen->u.user.events.wait_queue, wait); + } + + if (atomic_read(&popen->u.user.events.available)) { + mask = POLLPRI | POLLIN; + } + + return mask; +} + + +/************************************************************************* + * Module loading support code. + *************************************************************************/ + +#define NVKMS_RDEV (MKDEV(NV_MAJOR_DEVICE_NUMBER, \ + NV_MINOR_DEVICE_NUMBER_MODESET_DEVICE)) + +static struct file_operations nvkms_fops = { + .owner = THIS_MODULE, + .poll = nvkms_poll, + .unlocked_ioctl = nvkms_unlocked_ioctl, +#if NVCPU_IS_X86_64 || NVCPU_IS_AARCH64 + .compat_ioctl = nvkms_unlocked_ioctl, +#endif + .mmap = nvkms_mmap, + .open = nvkms_open, + .release = nvkms_close, +}; + +static struct cdev nvkms_device_cdev; + +static int __init nvkms_register_chrdev(void) +{ + int ret; + + ret = register_chrdev_region(NVKMS_RDEV, 1, "nvidia-modeset"); + if (ret < 0) { + return ret; + } + + cdev_init(&nvkms_device_cdev, &nvkms_fops); + ret = cdev_add(&nvkms_device_cdev, NVKMS_RDEV, 1); + if (ret < 0) { + unregister_chrdev_region(NVKMS_RDEV, 1); + return ret; + } + + return ret; +} + +static void nvkms_unregister_chrdev(void) +{ + cdev_del(&nvkms_device_cdev); + unregister_chrdev_region(NVKMS_RDEV, 1); +} + +void* nvkms_get_per_open_data(int fd) +{ + struct file *filp = fget(fd); + void *data = NULL; + + if (filp) { + if (filp->f_op == &nvkms_fops && filp->private_data) { + struct nvkms_per_open *popen = filp->private_data; + data = popen->data; + } + + /* + * fget() incremented the struct file's reference count, which needs to + * be balanced with a call to fput(). It is safe to decrement the + * reference count before returning filp->private_data because core + * NVKMS is currently holding the nvkms_lock, which prevents the + * nvkms_close() => nvKmsClose() call chain from freeing the file out + * from under the caller of nvkms_get_per_open_data(). + */ + fput(filp); + } + + return data; +} + +static int __init nvkms_init(void) +{ + int ret; + + atomic_set(&nvkms_alloc_called_count, 0); + + ret = nvkms_alloc_rm(); + + if (ret != 0) { + return ret; + } + + sema_init(&nvkms_lock, 1); + init_rwsem(&nvkms_pm_lock); + + ret = nv_kthread_q_init(&nvkms_kthread_q, + "nvidia-modeset/kthread_q"); + if (ret != 0) { + goto fail_kthread; + } + + ret = nv_kthread_q_init(&nvkms_deferred_close_kthread_q, + "nvidia-modeset/deferred_close_kthread_q"); + if (ret != 0) { + goto fail_deferred_close_kthread; + } + + INIT_LIST_HEAD(&nvkms_timers.list); + spin_lock_init(&nvkms_timers.lock); + + ret = nvkms_register_chrdev(); + if (ret != 0) { + goto fail_register_chrdev; + } + + down(&nvkms_lock); + if (!nvKmsModuleLoad()) { + ret = -ENOMEM; + } + if (ret != 0) { + up(&nvkms_lock); + goto fail_module_load; + } + nvkms_read_config_file_locked(); + up(&nvkms_lock); + + nvkms_proc_init(); + + return 0; + +fail_module_load: + nvkms_unregister_chrdev(); +fail_register_chrdev: + nv_kthread_q_stop(&nvkms_deferred_close_kthread_q); +fail_deferred_close_kthread: + nv_kthread_q_stop(&nvkms_kthread_q); +fail_kthread: + nvkms_free_rm(); + + return ret; +} + +static void __exit nvkms_exit(void) +{ + struct nvkms_timer_t *timer, *tmp_timer; + unsigned long flags = 0; + + nvkms_proc_exit(); + + down(&nvkms_lock); + nvKmsModuleUnload(); + up(&nvkms_lock); + + /* + * At this point, any pending tasks should be marked canceled, but + * we still need to drain them, so that nvkms_kthread_q_callback() doesn't + * get called after the module is unloaded. + */ +restart: + spin_lock_irqsave(&nvkms_timers.lock, flags); + + list_for_each_entry_safe(timer, tmp_timer, &nvkms_timers.list, timers_list) { + if (timer->kernel_timer_created) { + /* + * We delete pending timers and check whether it was being executed + * (returns 0) or we have deactivated it before execution (returns 1). + * If it began execution, the kthread_q callback will wait for timer + * completion, and we wait for queue completion with + * nv_kthread_q_stop below. + */ +#if !defined(NV_BSD) && NV_IS_EXPORT_SYMBOL_PRESENT_timer_delete_sync + if (timer_delete_sync(&timer->kernel_timer) == 1) { +#else + if (del_timer_sync(&timer->kernel_timer) == 1) { +#endif + /* We've deactivated timer so we need to clean after it */ + list_del(&timer->timers_list); + + /* We need to unlock spinlock because we are freeing memory which + * may sleep */ + spin_unlock_irqrestore(&nvkms_timers.lock, flags); + + if (timer->isRefPtr) { + nvkms_dec_ref(timer->dataPtr); + kfree(timer); + } else { + nvkms_free(timer, sizeof(*timer)); + } + + /* List could change when we were freeing memory. */ + goto restart; + } + } + } + + spin_unlock_irqrestore(&nvkms_timers.lock, flags); + + nv_kthread_q_stop(&nvkms_deferred_close_kthread_q); + nv_kthread_q_stop(&nvkms_kthread_q); + + nvkms_unregister_chrdev(); + nvkms_free_rm(); + + if (malloc_verbose) { + printk(KERN_INFO NVKMS_LOG_PREFIX "Total allocations: %d\n", + atomic_read(&nvkms_alloc_called_count)); + } +} + +module_init(nvkms_init); +module_exit(nvkms_exit); + + MODULE_LICENSE("Dual MIT/GPL"); + +MODULE_INFO(supported, "external"); +MODULE_VERSION(NV_VERSION_STRING); diff --git a/kernel-open/nvidia-modeset/nvidia-modeset-os-interface.h b/kernel-open/nvidia-modeset/nvidia-modeset-os-interface.h new file mode 100644 index 0000000..25bc5d0 --- /dev/null +++ b/kernel-open/nvidia-modeset/nvidia-modeset-os-interface.h @@ -0,0 +1,387 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * Define the entry points which the NVKMS kernel interface layer + * provides to core NVKMS. + */ + +#if !defined(_NVIDIA_MODESET_OS_INTERFACE_H_) +#define _NVIDIA_MODESET_OS_INTERFACE_H_ + +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) +#include /* size_t */ +#else +#include /* size_t */ +#endif +#include "nvtypes.h" /* NvU8 */ + +#include "nvkms.h" +#include "nv_stdarg.h" + +enum NvKmsSyncPtOp { + /* + * Call into Tegra's kernel nvhost driver, and allocate a syncpoint that can + * be exclusively used by the caller. Internally, this operation will call + * get() to set the initial refcount of the syncpoint to 1. + */ + NVKMS_SYNCPT_OP_ALLOC, + /* + * Decrease the refcount of an already allocated syncpoint. Once the + * refcount drops to 0, the syncpoint will be returned to the free pool that + * nvhost manages, so PUT can also be used to balance out an ALLOC. + */ + NVKMS_SYNCPT_OP_PUT, + /* + * Extract syncpt id and thresh from the sync-file file descriptor + */ + NVKMS_SYNCPT_OP_FD_TO_ID_AND_THRESH, + /* + * Create dma-fence from syncpt id and thresh value and create sync_file + * file descriptor for the dma-fence handle created. + */ + NVKMS_SYNCPT_OP_ID_AND_THRESH_TO_FD, + /* + * read syncpt minimum value of given syncpt + */ + NVKMS_SYNCPT_OP_READ_MINVAL, +}; + +enum NvKmsDebugForceColorSpace { + NVKMS_DEBUG_FORCE_COLOR_SPACE_NONE, + NVKMS_DEBUG_FORCE_COLOR_SPACE_RGB, + NVKMS_DEBUG_FORCE_COLOR_SPACE_YUV444, + NVKMS_DEBUG_FORCE_COLOR_SPACE_YUV422, + NVKMS_DEBUG_FORCE_COLOR_SPACE_MAX, +}; + +typedef struct { + + struct { + const char *syncpt_name; /* in */ + NvU32 id; /* out */ + } alloc; + + struct { + NvU32 id; /* in */ + } put; + + struct { + NvS32 fd; /* in */ + NvU32 id; /* out */ + NvU32 thresh; /* out */ + } fd_to_id_and_thresh; + + struct { + NvU32 id; /* in */ + NvU32 thresh; /* in */ + NvS32 fd; /* out */ + } id_and_thresh_to_fd; + + struct { + NvU32 id; /* in */ + NvU32 minval; /* out */ + } read_minval; +} NvKmsSyncPtOpParams; + +enum FailAllocCoreChannelMethod { + FAIL_ALLOC_CORE_CHANNEL_RM_SETUP_CORE_CHANNEL = 0, + FAIL_ALLOC_CORE_CHANNEL_RESTORE_CONSOLE = 1, +}; + +NvBool nvkms_test_fail_alloc_core_channel(enum FailAllocCoreChannelMethod method); +NvBool nvkms_conceal_vrr_caps(void); +NvBool nvkms_output_rounding_fix(void); +NvBool nvkms_disable_hdmi_frl(void); +NvBool nvkms_disable_vrr_memclk_switch(void); +NvBool nvkms_hdmi_deepcolor(void); +NvBool nvkms_vblank_sem_control(void); +NvBool nvkms_opportunistic_display_sync(void); +enum NvKmsDebugForceColorSpace nvkms_debug_force_color_space(void); +NvBool nvkms_enable_overlay_layers(void); + +void nvkms_call_rm (void *ops); +void* nvkms_alloc (size_t size, + NvBool zero); +void nvkms_free (void *ptr, + size_t size); +void* nvkms_memset (void *ptr, + NvU8 c, + size_t size); +void* nvkms_memcpy (void *dest, + const void *src, + size_t n); +void* nvkms_memmove (void *dest, + const void *src, + size_t n); +int nvkms_memcmp (const void *s1, + const void *s2, + size_t n); +size_t nvkms_strlen (const char *s); +int nvkms_strcmp (const char *s1, + const char *s2); +char* nvkms_strncpy (char *dest, + const char *src, + size_t n); +void nvkms_usleep (NvU64 usec); +NvU64 nvkms_get_usec (void); +int nvkms_copyin (void *kptr, + NvU64 uaddr, + size_t n); +int nvkms_copyout (NvU64 uaddr, + const void *kptr, + size_t n); +void nvkms_yield (void); +void nvkms_dump_stack (void); +NvBool nvkms_syncpt_op (enum NvKmsSyncPtOp op, + NvKmsSyncPtOpParams *params); +int nvkms_snprintf (char *str, + size_t size, + const char *format, ...) + __attribute__((format (printf, 3, 4))); + +int nvkms_vsnprintf (char *str, + size_t size, + const char *format, + va_list ap); + +#define NVKMS_LOG_LEVEL_INFO 0 +#define NVKMS_LOG_LEVEL_WARN 1 +#define NVKMS_LOG_LEVEL_ERROR 2 + +void nvkms_log (const int level, + const char *gpuPrefix, + const char *msg); + +/*! + * Refcounted pointer to an object that may be freed while references still + * exist. + * + * This structure is intended to be used for nvkms timers to refer to objects + * that may be freed while timers with references to the object are still + * pending. + * + * When the owner of an nvkms_ref_ptr is freed, the teardown code should call + * nvkms_free_ref_ptr(). That marks the pointer as invalid so that later calls + * to nvkms_dec_ref() (i.e. from a workqueue callback) return NULL rather than + * the pointer originally passed to nvkms_alloc_ref_ptr(). + */ +struct nvkms_ref_ptr; + +/*! + * Allocate and initialize a ref_ptr. + * + * The pointer stored in the ref_ptr is initialized to ptr, and its refcount is + * initialized to 1. + */ +struct nvkms_ref_ptr* nvkms_alloc_ref_ptr(void *ptr); + +/*! + * Clear a ref_ptr. + * + * This function sets the pointer stored in the ref_ptr to NULL and drops the + * reference created by nvkms_alloc_ref_ptr(). This function should be called + * when the object pointed to by the ref_ptr is freed. + * + * A caller should make sure that no code that can call nvkms_inc_ref() can + * execute after nvkms_free_ref_ptr() is called. + */ +void nvkms_free_ref_ptr(struct nvkms_ref_ptr *ref_ptr); + +/*! + * Increment the refcount of a ref_ptr. + * + * This function should be used when a pointer to the ref_ptr is stored + * somewhere. For example, when the ref_ptr is used as the argument to + * nvkms_alloc_timer. + * + * This may be called outside of the nvkms_lock, for example by an RM callback. + */ +void nvkms_inc_ref(struct nvkms_ref_ptr *ref_ptr); + +/*! + * Decrement the refcount of a ref_ptr and extract the embedded pointer. + * + * This should be used by code that needs to atomically determine whether the + * object pointed to by the ref_ptr still exists. To prevent the object from + * being destroyed while the current thread is executing, this should be called + * from inside the nvkms_lock. + */ +void* nvkms_dec_ref(struct nvkms_ref_ptr *ref_ptr); + +typedef void nvkms_timer_proc_t(void *dataPtr, NvU32 dataU32); +typedef struct nvkms_timer_t nvkms_timer_handle_t; + +/*! + * Schedule a callback function to be called in the future. + * + * The callback function 'proc' will be called with the arguments + * 'dataPtr' and 'dataU32' at 'usec' (or later) microseconds from now. + * If usec==0, the callback will be scheduled to be called as soon as + * possible. + * + * The callback function is guaranteed to be called back with the + * nvkms_lock held, and in process context. + * + * Returns an opaque handle, nvkms_timer_handle_t*, or NULL on + * failure. If non-NULL, the caller is responsible for caching the + * handle and eventually calling nvkms_free_timer() to free the + * memory. + * + * The nvkms_lock may be held when nvkms_alloc_timer() is called, but + * the nvkms_lock is not required. + */ +nvkms_timer_handle_t* nvkms_alloc_timer (nvkms_timer_proc_t *proc, + void *dataPtr, NvU32 dataU32, + NvU64 usec); + +/*! + * Schedule a callback function to be called in the future. + * + * This function is like nvkms_alloc_timer() except that instead of returning a + * pointer to a structure that the caller should free later, the timer will free + * itself after executing the callback function. This is only intended for + * cases where the caller cannot cache the nvkms_alloc_timer() return value. + */ +NvBool +nvkms_alloc_timer_with_ref_ptr(nvkms_timer_proc_t *proc, + struct nvkms_ref_ptr *ref_ptr, + NvU32 dataU32, NvU64 usec); + +/*! + * Free the nvkms_timer_t object. If the callback function has not + * yet been called, freeing the nvkms_timer_handle_t will guarantee + * that it is not called. + * + * The nvkms_lock must be held when calling nvkms_free_timer(). + */ +void nvkms_free_timer (nvkms_timer_handle_t *handle); + + + +/*! + * Notify the NVKMS kernel interface that the event queue has changed. + * + * \param[in] pOpenKernel This indicates the file descriptor + * ("per-open") of the client whose event queue + * has been updated. This is the pointer + * passed by the kernel interface to nvKmsOpen(). + * \param[in] eventsAvailable If TRUE, a new event has been added to the + * event queue. If FALSE, the last event has + * been removed from the event queue. + */ +void +nvkms_event_queue_changed(nvkms_per_open_handle_t *pOpenKernel, + NvBool eventsAvailable); + + +/*! + * Get the "per-open" data (the pointer returned by nvKmsOpen()) + * associated with this fd. + */ +void* nvkms_get_per_open_data(int fd); + + +/*! + * Raise and lower the reference count of the specified GPU. + */ +NvBool nvkms_open_gpu(NvU32 gpuId); +void nvkms_close_gpu(NvU32 gpuId); + + +/*! + * Enumerate nvidia gpus. + */ + +NvU32 nvkms_enumerate_gpus(nv_gpu_info_t *gpu_info); + +/*! + * Availability of write combining support for video memory. + */ + +NvBool nvkms_allow_write_combining(void); + +/*! + * Check if OS supports syncpoints. + */ +NvBool nvkms_kernel_supports_syncpts(void); + +/*! + * Checks whether the fd is associated with an nvidia character device. + */ +NvBool nvkms_fd_is_nvidia_chardev(int fd); + +/*! + * NVKMS interface for kernel space NVKMS clients like KAPI + */ + +struct nvkms_per_open; + +struct nvkms_per_open* nvkms_open_from_kapi +( + struct NvKmsKapiDevice *device +); + +void nvkms_close_from_kapi(struct nvkms_per_open *popen); + +NvBool nvkms_ioctl_from_kapi +( + struct nvkms_per_open *popen, + NvU32 cmd, void *params_address, const size_t params_size +); + +/*! + * Like nvkms_ioctl_from_kapi, but return NV_FALSE instead of waiting if the + * power management read lock cannot be acquired. + */ +NvBool nvkms_ioctl_from_kapi_try_pmlock +( + struct nvkms_per_open *popen, + NvU32 cmd, void *params_address, const size_t params_size +); + +/*! + * APIs for locking. + */ + +typedef struct nvkms_sema_t nvkms_sema_handle_t; + +nvkms_sema_handle_t* + nvkms_sema_alloc (void); +void nvkms_sema_free (nvkms_sema_handle_t *sema); +void nvkms_sema_down (nvkms_sema_handle_t *sema); +void nvkms_sema_up (nvkms_sema_handle_t *sema); + +/*! + * APIs to register/unregister backlight device. + */ +struct nvkms_backlight_device; + +struct nvkms_backlight_device* +nvkms_register_backlight(NvU32 gpu_id, NvU32 display_id, void *drv_priv, + NvU32 current_brightness); + +void nvkms_unregister_backlight(struct nvkms_backlight_device *nvkms_bd); + +#endif /* _NVIDIA_MODESET_OS_INTERFACE_H_ */ + diff --git a/kernel-open/nvidia-modeset/nvidia-modeset.Kbuild b/kernel-open/nvidia-modeset/nvidia-modeset.Kbuild new file mode 100644 index 0000000..a980d3b --- /dev/null +++ b/kernel-open/nvidia-modeset/nvidia-modeset.Kbuild @@ -0,0 +1,103 @@ +########################################################################### +# Kbuild fragment for nvidia-modeset.ko +########################################################################### + +# +# Define NVIDIA_MODESET_{SOURCES,OBJECTS} +# + +NVIDIA_MODESET_SOURCES = nvidia-modeset/nvidia-modeset-linux.c +NVIDIA_MODESET_SOURCES += nvidia-modeset/nv-kthread-q.c + +NVIDIA_MODESET_OBJECTS = $(patsubst %.c,%.o,$(NVIDIA_MODESET_SOURCES)) + +obj-m += nvidia-modeset.o +nvidia-modeset-y := $(NVIDIA_MODESET_OBJECTS) + +NVIDIA_MODESET_KO = nvidia-modeset/nvidia-modeset.ko + +NV_KERNEL_MODULE_TARGETS += $(NVIDIA_MODESET_KO) + + +# +# nv-modeset-kernel.o_binary is the core binary component of nvidia-modeset.ko, +# shared across all UNIX platforms. Create a symlink, "nv-modeset-kernel.o" +# that points to nv-modeset-kernel.o_binary, and add nv-modeset-kernel.o to the +# list of objects to link into nvidia-modeset.ko. +# +# Note that: +# - The kbuild "clean" rule will delete all objects in nvidia-modeset-y (which +# is why we use a symlink instead of just adding nv-modeset-kernel.o_binary +# to nvidia-modeset-y). +# - kbuild normally uses the naming convention of ".o_shipped" for +# binary files. That is not used here, because the kbuild rule to +# create the "normal" object file from ".o_shipped" does a copy, not +# a symlink. This file is quite large, so a symlink is preferred. +# - The file added to nvidia-modeset-y should be relative to gmake's cwd. +# But, the target for the symlink rule should be prepended with $(obj). +# + +NVIDIA_MODESET_BINARY_OBJECT := $(src)/nvidia-modeset/nv-modeset-kernel.o_binary +NVIDIA_MODESET_BINARY_OBJECT_O := nvidia-modeset/nv-modeset-kernel.o + +targets += $(NVIDIA_MODESET_BINARY_OBJECT_O) + +$(obj)/$(NVIDIA_MODESET_BINARY_OBJECT_O): $(NVIDIA_MODESET_BINARY_OBJECT) FORCE + $(call if_changed,symlink) + +nvidia-modeset-y += $(NVIDIA_MODESET_BINARY_OBJECT_O) + + +# +# Define nvidia-modeset.ko-specific CFLAGS. +# + +NVIDIA_MODESET_CFLAGS += -I$(src)/nvidia-modeset -I$(src)/common/inc +NVIDIA_MODESET_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_BUILD_MODULE_INSTANCES=0 + +# Some Android kernels prohibit driver use of filesystem functions like +# filp_open() and kernel_read(). Disable the NVKMS_CONFIG_FILE_SUPPORTED +# functionality that uses those functions when building for Android. + +PLATFORM_IS_ANDROID ?= 0 + +ifeq ($(PLATFORM_IS_ANDROID),1) + NVIDIA_MODESET_CFLAGS += -DNVKMS_CONFIG_FILE_SUPPORTED=0 +else + NVIDIA_MODESET_CFLAGS += -DNVKMS_CONFIG_FILE_SUPPORTED=1 +endif + +$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_MODESET_OBJECTS), $(NVIDIA_MODESET_CFLAGS)) + + +# +# Build nv-modeset-interface.o from the kernel interface layer +# objects, suitable for further processing by the installer and +# inclusion as a precompiled kernel interface file. +# + +NVIDIA_MODESET_INTERFACE := nvidia-modeset/nv-modeset-interface.o + +# Linux kernel v5.12 and later looks at "always-y", Linux kernel versions +# before v5.6 looks at "always"; kernel versions between v5.12 and v5.6 +# look at both. + +always += $(NVIDIA_MODESET_INTERFACE) +always-y += $(NVIDIA_MODESET_INTERFACE) + +$(obj)/$(NVIDIA_MODESET_INTERFACE): $(addprefix $(obj)/,$(NVIDIA_MODESET_OBJECTS)) + $(LD) -r -o $@ $^ + +# +# Register the conftests needed by nvidia-modeset.ko +# + +NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_MODESET_OBJECTS) + +NV_CONFTEST_TYPE_COMPILE_TESTS += proc_ops +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pde_data +NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first +NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64 +NV_CONFTEST_FUNCTION_COMPILE_TESTS += acpi_video_backlight_use_native +NV_CONFTEST_FUNCTION_COMPILE_TESTS += acpi_video_register_backlight +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_timer_delete_sync diff --git a/kernel-open/nvidia-modeset/nvkms-ioctl.h b/kernel-open/nvidia-modeset/nvkms-ioctl.h new file mode 100644 index 0000000..cb27573 --- /dev/null +++ b/kernel-open/nvidia-modeset/nvkms-ioctl.h @@ -0,0 +1,73 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_IOCTL_H) +#define NVKMS_IOCTL_H + +#include "nvtypes.h" + +/*! + * Some of the NVKMS ioctl parameter data structures are quite large + * and would exceed the parameter size constraints on at least SunOS. + * + * Redirect ioctls through a level of indirection: user-space assigns + * NvKmsIoctlParams with the real command, size, and pointer, and + * passes the NvKmsIoctlParams through the ioctl. + */ + +struct NvKmsIoctlParams { + NvU32 cmd; + NvU32 size; + NvU64 address NV_ALIGN_BYTES(8); +}; + +#define NVKMS_IOCTL_MAGIC 'm' +#define NVKMS_IOCTL_CMD 0 + +#define NVKMS_IOCTL_IOWR \ + _IOWR(NVKMS_IOCTL_MAGIC, NVKMS_IOCTL_CMD, struct NvKmsIoctlParams) + +/*! + * User-space pointers are always passed to NVKMS in an NvU64. + * This user-space address is eventually passed into the platform's + * copyin/copyout functions, in a void* argument. + * + * This utility function converts from an NvU64 to a pointer. + */ + +static inline void *nvKmsNvU64ToPointer(NvU64 value) +{ + return (void *)(NvUPtr)value; +} + +/*! + * Before casting the NvU64 to a void*, check that casting to a pointer + * size within the kernel does not lose any precision in the current + * environment. + */ +static inline NvBool nvKmsNvU64AddressIsSafe(NvU64 address) +{ + return address == (NvU64)(NvUPtr)address; +} + +#endif /* NVKMS_IOCTL_H */ diff --git a/kernel-open/nvidia-modeset/nvkms.h b/kernel-open/nvidia-modeset/nvkms.h new file mode 100644 index 0000000..0ac3f79 --- /dev/null +++ b/kernel-open/nvidia-modeset/nvkms.h @@ -0,0 +1,127 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_KMS_H__ +#define __NV_KMS_H__ + +#include "nvtypes.h" +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) +#include /* size_t */ +#else +#include /* size_t */ +#endif + +#include "nvkms-kapi.h" + +typedef struct nvkms_per_open nvkms_per_open_handle_t; + +typedef void nvkms_procfs_out_string_func_t(void *data, + const char *str); + +typedef void nvkms_procfs_proc_t(void *data, + char *buffer, size_t size, + nvkms_procfs_out_string_func_t *outString); + +/* max number of loops to prevent hanging the kernel if an edge case is hit */ +#define NVKMS_READ_FILE_MAX_LOOPS 1000 +/* max size for any file read by the config system */ +#define NVKMS_READ_FILE_MAX_SIZE 8192 + +/* + * The read file callback should allocate a buffer pointed to by *buff, fill it + * with the contents of fname, and return the size of the buffer. Buffer is not + * guaranteed to be null-terminated. The caller is responsible for freeing the + * buffer with nvkms_free, not nvFree. + */ +typedef size_t nvkms_config_read_file_func_t(char *fname, + char ** const buff); + +typedef struct { + const char *name; + nvkms_procfs_proc_t *func; +} nvkms_procfs_file_t; + +enum NvKmsClientType { + NVKMS_CLIENT_USER_SPACE, + NVKMS_CLIENT_KERNEL_SPACE, +}; + +struct NvKmsPerOpenDev; + +NvBool nvKmsIoctl( + void *pOpenVoid, + NvU32 cmd, + NvU64 paramsAddress, + const size_t paramSize); + +void nvKmsClose(void *pOpenVoid); + +void* nvKmsOpen( + NvU32 pid, + enum NvKmsClientType clientType, + nvkms_per_open_handle_t *pOpenKernel); + +NvBool nvKmsModuleLoad(void); + +void nvKmsModuleUnload(void); + +void nvKmsSuspend(NvU32 gpuId); +void nvKmsResume(NvU32 gpuId); + +void nvKmsGetProcFiles(const nvkms_procfs_file_t **ppProcFiles); + +NvBool nvKmsReadConf(const char *buff, size_t size, + nvkms_config_read_file_func_t readfile); + +void nvKmsKapiHandleEventQueueChange +( + struct NvKmsKapiDevice *device +); + +NvBool nvKmsKapiGetFunctionsTableInternal +( + struct NvKmsKapiFunctionsTable *funcsTable +); + +void nvKmsKapiSuspendResume(NvBool suspend); + +NvBool nvKmsGetBacklight(NvU32 display_id, void *drv_priv, NvU32 *brightness); +NvBool nvKmsSetBacklight(NvU32 display_id, void *drv_priv, NvU32 brightness); + +NvBool nvKmsOpenDevHasSubOwnerPermissionOrBetter(const struct NvKmsPerOpenDev *pOpenDev); + +NvU32 nvKmsKapiF16ToF32Internal(NvU16 a); + +NvU16 nvKmsKapiF32ToF16Internal(NvU32 a); + +NvU32 nvKmsKapiF32MulInternal(NvU32 a, NvU32 b); + +NvU32 nvKmsKapiF32DivInternal(NvU32 a, NvU32 b); + +NvU32 nvKmsKapiF32AddInternal(NvU32 a, NvU32 b); + +NvU32 nvKmsKapiF32ToUI32RMinMagInternal(NvU32 a, NvBool exact); + +NvU32 nvKmsKapiUI32ToF32Internal(NvU32 a); + +#endif /* __NV_KMS_H__ */ diff --git a/kernel-open/nvidia/detect-self-hosted.h b/kernel-open/nvidia/detect-self-hosted.h new file mode 100644 index 0000000..f7daea4 --- /dev/null +++ b/kernel-open/nvidia/detect-self-hosted.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __DETECT_SELF_HOSTED_H__ +#define __DETECT_SELF_HOSTED_H__ + + + +static inline int pci_devid_is_self_hosted_hopper(unsigned short devid) +{ + return devid >= 0x2340 && devid <= 0x237f; // GH100 Self-Hosted +} + +static inline int pci_devid_is_self_hosted_blackwell(unsigned short devid) +{ + return (devid >= 0x2940 && devid <= 0x297f) // GB100 Self-Hosted + || (devid >= 0x31c0 && devid <= 0x31ff); // GB110 Self-Hosted +} + +static inline int pci_devid_is_self_hosted(unsigned short devid) +{ + return pci_devid_is_self_hosted_hopper(devid) || + pci_devid_is_self_hosted_blackwell(devid) + ; +} + +#endif diff --git a/kernel-open/nvidia/hal/library/cryptlib/cryptlib_aead.h b/kernel-open/nvidia/hal/library/cryptlib/cryptlib_aead.h new file mode 100644 index 0000000..3d8f645 --- /dev/null +++ b/kernel-open/nvidia/hal/library/cryptlib/cryptlib_aead.h @@ -0,0 +1,211 @@ +/** + * Copyright Notice: + * Copyright 2021-2022 DMTF. All rights reserved. + * License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md + **/ + +#ifndef CRYPTLIB_AEAD_H +#define CRYPTLIB_AEAD_H + +/*===================================================================================== + * Authenticated Encryption with Associated data (AEAD) Cryptography Primitives + *===================================================================================== + */ + +#if LIBSPDM_AEAD_GCM_SUPPORT +/** + * Performs AEAD AES-GCM authenticated encryption on a data buffer and additional authenticated + * data. + * + * iv_size must be 12, otherwise false is returned. + * key_size must be 16 or 32, otherwise false is returned. + * tag_size must be 12, 13, 14, 15, 16, otherwise false is returned. + * + * @param[in] key Pointer to the encryption key. + * @param[in] key_size Size of the encryption key in bytes. + * @param[in] iv Pointer to the IV value. + * @param[in] iv_size Size of the IV value in bytes. + * @param[in] a_data Pointer to the additional authenticated data. + * @param[in] a_data_size Size of the additional authenticated data in bytes. + * @param[in] data_in Pointer to the input data buffer to be encrypted. + * @param[in] data_in_size Size of the input data buffer in bytes. + * @param[out] tag_out Pointer to a buffer that receives the authentication tag output. + * @param[in] tag_size Size of the authentication tag in bytes. + * @param[out] data_out Pointer to a buffer that receives the encryption output. + * @param[out] data_out_size Size of the output data buffer in bytes. + * + * @retval true AEAD AES-GCM authenticated encryption succeeded. + * @retval false AEAD AES-GCM authenticated encryption failed. + **/ +extern bool libspdm_aead_aes_gcm_encrypt(const uint8_t *key, size_t key_size, + const uint8_t *iv, size_t iv_size, + const uint8_t *a_data, size_t a_data_size, + const uint8_t *data_in, size_t data_in_size, + uint8_t *tag_out, size_t tag_size, + uint8_t *data_out, size_t *data_out_size); + +/** + * Performs AEAD AES-GCM authenticated decryption on a data buffer and additional authenticated + * data. + * + * iv_size must be 12, otherwise false is returned. + * key_size must be 16 or 32, otherwise false is returned. + * tag_size must be 12, 13, 14, 15, 16, otherwise false is returned. + * + * If data verification fails, false is returned. + * + * @param[in] key Pointer to the encryption key. + * @param[in] key_size Size of the encryption key in bytes. + * @param[in] iv Pointer to the IV value. + * @param[in] iv_size Size of the IV value in bytes. + * @param[in] a_data Pointer to the additional authenticated data. + * @param[in] a_data_size Size of the additional authenticated data in bytes. + * @param[in] data_in Pointer to the input data buffer to be decrypted. + * @param[in] data_in_size Size of the input data buffer in bytes. + * @param[in] tag Pointer to a buffer that contains the authentication tag. + * @param[in] tag_size Size of the authentication tag in bytes. + * @param[out] data_out Pointer to a buffer that receives the decryption output. + * @param[out] data_out_size Size of the output data buffer in bytes. + * + * @retval true AEAD AES-GCM authenticated decryption succeeded. + * @retval false AEAD AES-GCM authenticated decryption failed. + **/ +extern bool libspdm_aead_aes_gcm_decrypt(const uint8_t *key, size_t key_size, + const uint8_t *iv, size_t iv_size, + const uint8_t *a_data, size_t a_data_size, + const uint8_t *data_in, size_t data_in_size, + const uint8_t *tag, size_t tag_size, + uint8_t *data_out, size_t *data_out_size); +#endif /* LIBSPDM_AEAD_GCM_SUPPORT */ + +#if LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT +/** + * Performs AEAD ChaCha20Poly1305 authenticated encryption on a data buffer and additional + * authenticated data. + * + * iv_size must be 12, otherwise false is returned. + * key_size must be 32, otherwise false is returned. + * tag_size must be 16, otherwise false is returned. + * + * @param[in] key Pointer to the encryption key. + * @param[in] key_size Size of the encryption key in bytes. + * @param[in] iv Pointer to the IV value. + * @param[in] iv_size Size of the IV value in bytes. + * @param[in] a_data Pointer to the additional authenticated data. + * @param[in] a_data_size Size of the additional authenticated data in bytes. + * @param[in] data_in Pointer to the input data buffer to be encrypted. + * @param[in] data_in_size Size of the input data buffer in bytes. + * @param[out] tag_out Pointer to a buffer that receives the authentication tag output. + * @param[in] tag_size Size of the authentication tag in bytes. + * @param[out] data_out Pointer to a buffer that receives the encryption output. + * @param[out] data_out_size Size of the output data buffer in bytes. + * + * @retval true AEAD ChaCha20Poly1305 authenticated encryption succeeded. + * @retval false AEAD ChaCha20Poly1305 authenticated encryption failed. + **/ +extern bool libspdm_aead_chacha20_poly1305_encrypt( + const uint8_t *key, size_t key_size, const uint8_t *iv, + size_t iv_size, const uint8_t *a_data, size_t a_data_size, + const uint8_t *data_in, size_t data_in_size, uint8_t *tag_out, + size_t tag_size, uint8_t *data_out, size_t *data_out_size); + +/** + * Performs AEAD ChaCha20Poly1305 authenticated decryption on a data buffer and additional authenticated data (AAD). + * + * iv_size must be 12, otherwise false is returned. + * key_size must be 32, otherwise false is returned. + * tag_size must be 16, otherwise false is returned. + * + * If data verification fails, false is returned. + * + * @param[in] key Pointer to the encryption key. + * @param[in] key_size Size of the encryption key in bytes. + * @param[in] iv Pointer to the IV value. + * @param[in] iv_size Size of the IV value in bytes. + * @param[in] a_data Pointer to the additional authenticated data. + * @param[in] a_data_size Size of the additional authenticated data in bytes. + * @param[in] data_in Pointer to the input data buffer to be decrypted. + * @param[in] data_in_size Size of the input data buffer in bytes. + * @param[in] tag Pointer to a buffer that contains the authentication tag. + * @param[in] tag_size Size of the authentication tag in bytes. + * @param[out] data_out Pointer to a buffer that receives the decryption output. + * @param[out] data_out_size Size of the output data buffer in bytes. + * + * @retval true AEAD ChaCha20Poly1305 authenticated decryption succeeded. + * @retval false AEAD ChaCha20Poly1305 authenticated decryption failed. + * + **/ +extern bool libspdm_aead_chacha20_poly1305_decrypt( + const uint8_t *key, size_t key_size, const uint8_t *iv, + size_t iv_size, const uint8_t *a_data, size_t a_data_size, + const uint8_t *data_in, size_t data_in_size, const uint8_t *tag, + size_t tag_size, uint8_t *data_out, size_t *data_out_size); +#endif /* LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT */ + +#if LIBSPDM_AEAD_SM4_SUPPORT +/** + * Performs AEAD SM4-GCM authenticated encryption on a data buffer and additional authenticated + * data. + * + * iv_size must be 12, otherwise false is returned. + * key_size must be 16, otherwise false is returned. + * tag_size must be 16, otherwise false is returned. + * + * @param[in] key Pointer to the encryption key. + * @param[in] key_size Size of the encryption key in bytes. + * @param[in] iv Pointer to the IV value. + * @param[in] iv_size Size of the IV value in bytes. + * @param[in] a_data Pointer to the additional authenticated data. + * @param[in] a_data_size Size of the additional authenticated data in bytes. + * @param[in] data_in Pointer to the input data buffer to be encrypted. + * @param[in] data_in_size Size of the input data buffer in bytes. + * @param[out] tag_out Pointer to a buffer that receives the authentication tag output. + * @param[in] tag_size Size of the authentication tag in bytes. + * @param[out] data_out Pointer to a buffer that receives the encryption output. + * @param[out] data_out_size Size of the output data buffer in bytes. + * + * @retval true AEAD SM4-GCM authenticated encryption succeeded. + * @retval false AEAD SM4-GCM authenticated encryption failed. + **/ +extern bool libspdm_aead_sm4_gcm_encrypt(const uint8_t *key, size_t key_size, + const uint8_t *iv, size_t iv_size, + const uint8_t *a_data, size_t a_data_size, + const uint8_t *data_in, size_t data_in_size, + uint8_t *tag_out, size_t tag_size, + uint8_t *data_out, size_t *data_out_size); + +/** + * Performs AEAD SM4-GCM authenticated decryption on a data buffer and additional authenticated + * data. + * + * iv_size must be 12, otherwise false is returned. + * key_size must be 16, otherwise false is returned. + * tag_size must be 16, otherwise false is returned. + * + * If data verification fails, false is returned. + * + * @param[in] key Pointer to the encryption key. + * @param[in] key_size Size of the encryption key in bytes. + * @param[in] iv Pointer to the IV value. + * @param[in] iv_size Size of the IV value in bytes. + * @param[in] a_data Pointer to the additional authenticated data. + * @param[in] a_data_size Size of the additional authenticated data in bytes. + * @param[in] data_in Pointer to the input data buffer to be decrypted. + * @param[in] data_in_size Size of the input data buffer in bytes. + * @param[in] tag Pointer to a buffer that contains the authentication tag. + * @param[in] tag_size Size of the authentication tag in bytes. + * @param[out] data_out Pointer to a buffer that receives the decryption output. + * @param[out] data_out_size Size of the output data buffer in bytes. + * + * @retval true AEAD SM4-GCM authenticated decryption succeeded. + * @retval false AEAD SM4-GCM authenticated decryption failed. + **/ +extern bool libspdm_aead_sm4_gcm_decrypt(const uint8_t *key, size_t key_size, + const uint8_t *iv, size_t iv_size, + const uint8_t *a_data, size_t a_data_size, + const uint8_t *data_in, size_t data_in_size, + const uint8_t *tag, size_t tag_size, + uint8_t *data_out, size_t *data_out_size); +#endif /* LIBSPDM_AEAD_SM4_SUPPORT */ + +#endif /* CRYPTLIB_AEAD_H */ diff --git a/kernel-open/nvidia/hal/library/cryptlib/cryptlib_cert.h b/kernel-open/nvidia/hal/library/cryptlib/cryptlib_cert.h new file mode 100644 index 0000000..c7e4cfe --- /dev/null +++ b/kernel-open/nvidia/hal/library/cryptlib/cryptlib_cert.h @@ -0,0 +1,416 @@ +/** + * Copyright Notice: + * Copyright 2021-2024 DMTF. All rights reserved. + * License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md + **/ + +#ifndef CRYPTLIB_CERT_H +#define CRYPTLIB_CERT_H + +#if LIBSPDM_CERT_PARSE_SUPPORT + +/** + * Retrieve the tag and length of the tag. + * + * @param ptr The position in the ASN.1 data. + * @param end End of data. + * @param length The variable that will receive the length. + * @param tag The expected tag. + * + * @retval true Get tag successful. + * @retval false Failed to get tag or tag not match. + **/ +extern bool libspdm_asn1_get_tag(uint8_t **ptr, const uint8_t *end, size_t *length, uint32_t tag); + +/** + * Retrieve the subject bytes from one X.509 certificate. + * + * If cert is NULL, then return false. + * If subject_size is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in] cert Pointer to the DER-encoded X509 certificate. + * @param[in] cert_size Size of the X509 certificate in bytes. + * @param[out] cert_subject Pointer to the retrieved certificate subject bytes. + * @param[in, out] subject_size The size in bytes of the cert_subject buffer on input, + * and the size of buffer returned cert_subject on output. + * + * @retval true If the subject_size is not equal 0. The certificate subject retrieved successfully. + * @retval true If the subject_size is equal 0. The certificate parse successful. But the cert doesn't have subject. + * @retval false If the subject_size is not equal 0. The certificate subject retrieved successfully.But the subject_size is too small for the result. + * @retval false If the subject_size is equal 0. Invalid certificate. + **/ +extern bool libspdm_x509_get_subject_name(const uint8_t *cert, size_t cert_size, + uint8_t *cert_subject, + size_t *subject_size); + +/** + * Retrieve the version from one X.509 certificate. + * + * If cert is NULL, then return false. + * If cert_size is 0, then return false. + * If this interface is not supported, then return false. + * + * @param[in] cert Pointer to the DER-encoded X509 certificate. + * @param[in] cert_size Size of the X509 certificate in bytes. + * @param[out] version Pointer to the retrieved version integer. + * + * @retval true + * @retval false + **/ +extern bool libspdm_x509_get_version(const uint8_t *cert, size_t cert_size, size_t *version); + +/** + * Retrieve the serialNumber from one X.509 certificate. + * + * If cert is NULL, then return false. + * If cert_size is 0, then return false. + * If this interface is not supported, then return false. + * + * @param[in] cert Pointer to the DER-encoded X509 certificate. + * @param[in] cert_size Size of the X509 certificate in bytes. + * @param[out] serial_number Pointer to the retrieved certificate serial_number bytes. + * @param[in, out] serial_number_size The size in bytes of the serial_number buffer on input, + * and the size of buffer returned serial_number on output. + * + * @retval true + * @retval false + **/ +extern bool libspdm_x509_get_serial_number(const uint8_t *cert, size_t cert_size, + uint8_t *serial_number, + size_t *serial_number_size); + +#if LIBSPDM_ADDITIONAL_CHECK_CERT +/** + * Retrieve the signature algorithm from one X.509 certificate. + * + * @param[in] cert Pointer to the DER-encoded X509 certificate. + * @param[in] cert_size Size of the X509 certificate in bytes. + * @param[out] oid Signature algorithm Object identifier buffer. + * @param[in,out] oid_size Signature algorithm Object identifier buffer size. + * + * @retval true if the oid_size is equal 0, the cert parse successfully, but cert doesn't have signature algo. + * @retval true if the oid_size is not equal 0, the cert parse and get signature algo successfully. + * @retval false if the oid_size is equal 0, the cert parse failed. + * @retval false if the oid_size is not equal 0, the cert parse and get signature algo successfully, but the input buffer size is small. + **/ +extern bool libspdm_x509_get_signature_algorithm(const uint8_t *cert, + size_t cert_size, uint8_t *oid, + size_t *oid_size); +#endif /* LIBSPDM_ADDITIONAL_CHECK_CERT */ + +/** + * Retrieve the issuer bytes from one X.509 certificate. + * + * If cert is NULL, then return false. + * If issuer_size is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in] cert Pointer to the DER-encoded X509 certificate. + * @param[in] cert_size Size of the X509 certificate in bytes. + * @param[out] cert_issuer Pointer to the retrieved certificate subject bytes. + * @param[in, out] issuer_size The size in bytes of the cert_issuer buffer on input, + * and the size of buffer returned cert_issuer on output. + * + * @retval true If the issuer_size is not equal 0. The certificate issuer retrieved successfully. + * @retval true If the issuer_size is equal 0. The certificate parse successful. But the cert doesn't have issuer. + * @retval false If the issuer_size is not equal 0. The certificate issuer retrieved successfully. But the issuer_size is too small for the result. + * @retval false If the issuer_size is equal 0. Invalid certificate. + **/ +extern bool libspdm_x509_get_issuer_name(const uint8_t *cert, size_t cert_size, + uint8_t *cert_issuer, + size_t *issuer_size); + +/** + * Retrieve Extension data from one X.509 certificate. + * + * @param[in] cert Pointer to the DER-encoded X509 certificate. + * @param[in] cert_size Size of the X509 certificate in bytes. + * @param[in] oid Object identifier buffer + * @param[in] oid_size Object identifier buffer size + * @param[out] extension_data Extension bytes. + * @param[in, out] extension_data_size Extension bytes size. + * + * @retval true If the returned extension_data_size == 0, it means that cert and oid are valid, but the oid extension is not found; + * If the returned extension_data_size != 0, it means that cert and oid are valid, and the oid extension is found; + * @retval false If the returned extension_data_size == 0, it means that cert or oid are invalid; + * If the returned extension_data_size != 0, it means that cert and oid are valid, and the oid extension is found, + * but the store buffer is too small. + **/ +extern bool libspdm_x509_get_extension_data(const uint8_t *cert, size_t cert_size, + const uint8_t *oid, size_t oid_size, + uint8_t *extension_data, + size_t *extension_data_size); + +/** + * Retrieve the Validity from one X.509 certificate + * + * If cert is NULL, then return false. + * If CertIssuerSize is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in] cert Pointer to the DER-encoded X509 certificate. + * @param[in] cert_size Size of the X509 certificate in bytes. + * @param[out] from notBefore Pointer to date_time object. + * @param[in,out] from_size notBefore date_time object size. + * @param[out] to notAfter Pointer to date_time object. + * @param[in,out] to_size notAfter date_time object size. + * + * Note: libspdm_x509_compare_date_time to compare date_time oject + * x509SetDateTime to get a date_time object from a date_time_str + * + * @retval true if the from_size and from_size are not equal 0. + * The certificate Validity retrieved successfully. + * @retval true if the from_size and from_size are equal 0. + * The certificate Validity does not exist. + * @retval false if the from_size and from_size are not equal 0. + * The certificate Validity retrieved successfully, but the input buffer size is small. + * @retval false if the from_size and from_size are equal 0. + * Invalid certificate, or Validity retrieve failed. + **/ +extern bool libspdm_x509_get_validity(const uint8_t *cert, size_t cert_size, + uint8_t *from, size_t *from_size, uint8_t *to, + size_t *to_size); + +/** + * Format a date_time object into DataTime buffer + * + * If date_time_str is NULL, then return false. + * If date_time_size is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in] date_time_str date_time string like YYYYMMDDhhmmssZ + * Ref: https://www.w3.org/TR/NOTE-datetime + * Z stand for UTC time + * @param[out] date_time Pointer to a date_time object. + * @param[in,out] date_time_size date_time object buffer size. + * + * @retval true + * @retval false + **/ +extern bool libspdm_x509_set_date_time(const char *date_time_str, void *date_time, + size_t *date_time_size); + +/** + * Compare date_time1 object and date_time2 object. + * + * If date_time1 is NULL, then return -2. + * If date_time2 is NULL, then return -2. + * If date_time1 == date_time2, then return 0 + * If date_time1 > date_time2, then return 1 + * If date_time1 < date_time2, then return -1 + * + * @param[in] date_time1 Pointer to a date_time Ojbect + * @param[in] date_time2 Pointer to a date_time Object + * + * @retval 0 If date_time1 == date_time2 + * @retval 1 If date_time1 > date_time2 + * @retval -1 If date_time1 < date_time2 + **/ +extern int32_t libspdm_x509_compare_date_time(const void *date_time1, const void *date_time2); + +/** + * Retrieve the key usage from one X.509 certificate. + * + * @param[in] cert Pointer to the DER-encoded X509 certificate. + * @param[in] cert_size Size of the X509 certificate in bytes. + * @param[out] usage Key usage (LIBSPDM_CRYPTO_X509_KU_*) + * + * @retval true if the usage is no equal 0. The certificate key usage retrieved successfully. + * @retval true if the usage is equal 0. The certificate parse successfully, but the cert doesn't have key usage. + * @retval false Invalid certificate, or usage is NULL. + **/ +extern bool libspdm_x509_get_key_usage(const uint8_t *cert, size_t cert_size, size_t *usage); + +/** + * Retrieve the Extended key usage from one X.509 certificate. + * + * @param[in] cert Pointer to the DER-encoded X509 certificate. + * @param[in] cert_size Size of the X509 certificate in bytes. + * @param[out] usage Key usage bytes. + * @param[in, out] usage_size Key usage buffer size in bytes. + * + * @retval true If the returned usage_size == 0, it means that cert and oid are valid, but the Extended key usage is not found; + * If the returned usage_size != 0, it means that cert and oid are valid, and the Extended key usage is found; + * @retval false If the returned usage_size == 0, it means that cert or oid are invalid; + * If the returned usage_size != 0, it means that cert and oid are valid, and the Extended key usage is found, + * but the store buffer is too small. + **/ +extern bool libspdm_x509_get_extended_key_usage(const uint8_t *cert, + size_t cert_size, uint8_t *usage, + size_t *usage_size); + +/** + * Retrieve the basic constraints from one X.509 certificate. + * + * @param[in] cert Pointer to the DER-encoded X509 certificate. + * @param[in] cert_size Size of the X509 certificate in bytes. + * @param[out] basic_constraints Basic constraints bytes. + * @param[in, out] basic_constraints_size Basic constraints buffer size in bytes. + * + * @retval true If the returned basic_constraints_size == 0, it means that cert and oid are valid, but the basic_constraints is not found; + * If the returned basic_constraints_size != 0, it means that cert and oid are valid, and the basic_constraints is found; + * @retval false If the returned basic_constraints_size == 0, it means that cert or oid are invalid; + * If the returned basic_constraints_size != 0, it means that cert and oid are valid, and the basic_constraints is found, + * but the store buffer is too small. + **/ +extern bool libspdm_x509_get_extended_basic_constraints(const uint8_t *cert, + size_t cert_size, + uint8_t *basic_constraints, + size_t *basic_constraints_size); + +/** + * Verify one X509 certificate was issued by the trusted CA. + * + * If cert is NULL, then return false. + * If ca_cert is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in] cert Pointer to the DER-encoded X509 certificate to be verified. + * @param[in] cert_size Size of the X509 certificate in bytes. + * @param[in] ca_cert Pointer to the DER-encoded trusted CA certificate. + * @param[in] ca_cert_size Size of the CA Certificate in bytes. + * + * @retval true The certificate was issued by the trusted CA. + * @retval false Invalid certificate or the certificate was not issued by the given + * trusted CA. + * @retval false This interface is not supported. + * + **/ +extern bool libspdm_x509_verify_cert(const uint8_t *cert, size_t cert_size, + const uint8_t *ca_cert, size_t ca_cert_size); + +/** + * Verify one X509 certificate was issued by the trusted CA. + * + * @param[in] cert_chain One or more ASN.1 DER-encoded X.509 certificates + * where the first certificate is signed by the Root + * Certificate or is the Root Certificate itself. and + * subsequent certificate is signed by the preceding + * certificate. + * @param[in] cert_chain_length Total length of the certificate chain, in bytes. + * + * @param[in] root_cert Trusted Root Certificate buffer. + * + * @param[in] root_cert_length Trusted Root Certificate buffer length. + * + * @retval true All certificates were issued by the first certificate in X509Certchain. + * @retval false Invalid certificate or the certificate was not issued by the given + * trusted CA. + **/ +extern bool libspdm_x509_verify_cert_chain(const uint8_t *root_cert, size_t root_cert_length, + const uint8_t *cert_chain, + size_t cert_chain_length); + +/** + * Get one X509 certificate from cert_chain. + * + * @param[in] cert_chain One or more ASN.1 DER-encoded X.509 certificates + * where the first certificate is signed by the Root + * Certificate or is the Root Certificate itself. and + * subsequent certificate is signed by the preceding + * certificate. + * @param[in] cert_chain_length Total length of the certificate chain, in bytes. + * + * @param[in] cert_index Index of certificate. If index is -1 indicates the + * last certificate in cert_chain. + * + * @param[out] cert The certificate at the index of cert_chain. + * @param[out] cert_length The length certificate at the index of cert_chain. + * + * @retval true Success. + * @retval false Failed to get certificate from certificate chain. + **/ +extern bool libspdm_x509_get_cert_from_cert_chain(const uint8_t *cert_chain, + size_t cert_chain_length, + const int32_t cert_index, const uint8_t **cert, + size_t *cert_length); + +#if (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT) +/** + * Retrieve the RSA public key from one DER-encoded X509 certificate. + * + * If cert is NULL, then return false. + * If rsa_context is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in] cert Pointer to the DER-encoded X509 certificate. + * @param[in] cert_size Size of the X509 certificate in bytes. + * @param[out] rsa_context Pointer to newly generated RSA context which contain the retrieved + * RSA public key component. Use libspdm_rsa_free() function to free the + * resource. + * + * @retval true RSA public key was retrieved successfully. + * @retval false Fail to retrieve RSA public key from X509 certificate. + * @retval false This interface is not supported. + **/ +extern bool libspdm_rsa_get_public_key_from_x509(const uint8_t *cert, size_t cert_size, + void **rsa_context); +#endif /* (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT) */ + +#if LIBSPDM_ECDSA_SUPPORT +/** + * Retrieve the EC public key from one DER-encoded X509 certificate. + * + * @param[in] cert Pointer to the DER-encoded X509 certificate. + * @param[in] cert_size Size of the X509 certificate in bytes. + * @param[out] ec_context Pointer to newly generated EC DSA context which contain the retrieved + * EC public key component. Use libspdm_ec_free() function to free the + * resource. + * + * If cert is NULL, then return false. + * If ec_context is NULL, then return false. + * + * @retval true EC public key was retrieved successfully. + * @retval false Fail to retrieve EC public key from X509 certificate. + * + **/ +extern bool libspdm_ec_get_public_key_from_x509(const uint8_t *cert, size_t cert_size, + void **ec_context); +#endif /* LIBSPDM_ECDSA_SUPPORT */ + +#if (LIBSPDM_EDDSA_ED25519_SUPPORT) || (LIBSPDM_EDDSA_ED448_SUPPORT) +/** + * Retrieve the Ed public key from one DER-encoded X509 certificate. + * + * @param[in] cert Pointer to the DER-encoded X509 certificate. + * @param[in] cert_size Size of the X509 certificate in bytes. + * @param[out] ecd_context Pointer to newly generated Ed DSA context which contain the retrieved + * Ed public key component. Use libspdm_ecd_free() function to free the + * resource. + * + * If cert is NULL, then return false. + * If ecd_context is NULL, then return false. + * + * @retval true Ed public key was retrieved successfully. + * @retval false Fail to retrieve Ed public key from X509 certificate. + * + **/ +extern bool libspdm_ecd_get_public_key_from_x509(const uint8_t *cert, size_t cert_size, + void **ecd_context); +#endif /* (LIBSPDM_EDDSA_ED25519_SUPPORT) || (LIBSPDM_EDDSA_ED448_SUPPORT) */ + +#if LIBSPDM_SM2_DSA_SUPPORT +/** + * Retrieve the sm2 public key from one DER-encoded X509 certificate. + * + * @param[in] cert Pointer to the DER-encoded X509 certificate. + * @param[in] cert_size Size of the X509 certificate in bytes. + * @param[out] sm2_context Pointer to newly generated sm2 context which contain the retrieved + * sm2 public key component. Use sm2_free() function to free the + * resource. + * + * If cert is NULL, then return false. + * If sm2_context is NULL, then return false. + * + * @retval true sm2 public key was retrieved successfully. + * @retval false Fail to retrieve sm2 public key from X509 certificate. + * + **/ +extern bool libspdm_sm2_get_public_key_from_x509(const uint8_t *cert, size_t cert_size, + void **sm2_context); +#endif /* LIBSPDM_SM2_DSA_SUPPORT */ + +#endif /* LIBSPDM_CERT_PARSE_SUPPORT */ + +#endif /* CRYPTLIB_CERT_H */ diff --git a/kernel-open/nvidia/hal/library/cryptlib/cryptlib_dh.h b/kernel-open/nvidia/hal/library/cryptlib/cryptlib_dh.h new file mode 100644 index 0000000..779c470 --- /dev/null +++ b/kernel-open/nvidia/hal/library/cryptlib/cryptlib_dh.h @@ -0,0 +1,98 @@ +/** + * Copyright Notice: + * Copyright 2021-2022 DMTF. All rights reserved. + * License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md + **/ + +#ifndef CRYPTLIB_DH_H +#define CRYPTLIB_DH_H + +/*===================================================================================== + * Diffie-Hellman Key Exchange Primitives + *===================================================================================== + */ + +#if LIBSPDM_FFDHE_SUPPORT +/** + * Allocates and initializes one Diffie-Hellman context for subsequent use with the NID. + * + * @param nid cipher NID + * + * @return Pointer to the Diffie-Hellman context that has been initialized. + * If the allocations fails, libspdm_dh_new_by_nid() returns NULL. + * If the interface is not supported, libspdm_dh_new_by_nid() returns NULL. + **/ +extern void *libspdm_dh_new_by_nid(size_t nid); + +/** + * Release the specified DH context. + * + * @param[in] dh_context Pointer to the DH context to be released. + **/ +void libspdm_dh_free(void *dh_context); + +/** + * Generates DH public key. + * + * This function generates random secret exponent, and computes the public key, which is + * returned via parameter public_key and public_key_size. DH context is updated accordingly. + * If the public_key buffer is too small to hold the public key, false is returned and + * public_key_size is set to the required buffer size to obtain the public key. + * + * If dh_context is NULL, then return false. + * If public_key_size is NULL, then return false. + * If public_key_size is large enough but public_key is NULL, then return false. + * If this interface is not supported, then return false. + * + * For FFDHE2048, the public_size is 256. + * For FFDHE3072, the public_size is 384. + * For FFDHE4096, the public_size is 512. + * + * @param[in, out] dh_context Pointer to the DH context. + * @param[out] public_key Pointer to the buffer to receive generated public key. + * @param[in, out] public_key_size On input, the size of public_key buffer in bytes. + * On output, the size of data returned in public_key buffer in + * bytes. + * + * @retval true DH public key generation succeeded. + * @retval false DH public key generation failed. + * @retval false public_key_size is not large enough. + * @retval false This interface is not supported. + **/ +extern bool libspdm_dh_generate_key(void *dh_context, uint8_t *public_key, size_t *public_key_size); + +/** + * Computes exchanged common key. + * + * Given peer's public key, this function computes the exchanged common key, based on its own + * context including value of prime modulus and random secret exponent. + * + * If dh_context is NULL, then return false. + * If peer_public_key is NULL, then return false. + * If key_size is NULL, then return false. + * If key is NULL, then return false. + * If key_size is not large enough, then return false. + * If this interface is not supported, then return false. + * + * For FFDHE2048, the peer_public_size and key_size is 256. + * For FFDHE3072, the peer_public_size and key_size is 384. + * For FFDHE4096, the peer_public_size and key_size is 512. + * + * @param[in, out] dh_context Pointer to the DH context. + * @param[in] peer_public_key Pointer to the peer's public key. + * @param[in] peer_public_key_size size of peer's public key in bytes. + * @param[out] key Pointer to the buffer to receive generated key. + * @param[in, out] key_size On input, the size of key buffer in bytes. + * On output, the size of data returned in key buffer in + * bytes. + * + * @retval true DH exchanged key generation succeeded. + * @retval false DH exchanged key generation failed. + * @retval false key_size is not large enough. + * @retval false This interface is not supported. + **/ +extern bool libspdm_dh_compute_key(void *dh_context, const uint8_t *peer_public_key, + size_t peer_public_key_size, uint8_t *key, + size_t *key_size); +#endif /* LIBSPDM_FFDHE_SUPPORT */ +#endif /* CRYPTLIB_DH_H */ diff --git a/kernel-open/nvidia/hal/library/cryptlib/cryptlib_ec.h b/kernel-open/nvidia/hal/library/cryptlib/cryptlib_ec.h new file mode 100644 index 0000000..75389a0 --- /dev/null +++ b/kernel-open/nvidia/hal/library/cryptlib/cryptlib_ec.h @@ -0,0 +1,246 @@ +/** + * Copyright Notice: + * Copyright 2021-2022 DMTF. All rights reserved. + * License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md + **/ + +#ifndef CRYPTLIB_EC_H +#define CRYPTLIB_EC_H + +/*===================================================================================== + * Elliptic Curve Primitives + *=====================================================================================*/ + +#if (LIBSPDM_ECDHE_SUPPORT) || (LIBSPDM_ECDSA_SUPPORT) +/** + * Allocates and Initializes one Elliptic Curve context for subsequent use with the NID. + * + * @param nid cipher NID + * + * @return Pointer to the Elliptic Curve context that has been initialized. + * If the allocations fails, libspdm_ec_new_by_nid() returns NULL. + **/ +extern void *libspdm_ec_new_by_nid(size_t nid); + +/** + * Release the specified EC context. + * + * @param[in] ec_context Pointer to the EC context to be released. + **/ +extern void libspdm_ec_free(void *ec_context); + +#if LIBSPDM_FIPS_MODE +/** + * Sets the private key component into the established EC context. + * + * For P-256, the private_key_size is 32 byte. + * For P-384, the private_key_size is 48 byte. + * For P-521, the private_key_size is 66 byte. + * + * @param[in, out] ec_context Pointer to EC context being set. + * @param[in] private_key Pointer to the private key buffer. + * @param[in] private_key_size The size of private key buffer in bytes. + * + * @retval true EC private key component was set successfully. + * @retval false Invalid EC private key component. + * + **/ +extern bool libspdm_ec_set_priv_key(void *ec_context, const uint8_t *private_key, + size_t private_key_size); + +/** + * Sets the public key component into the established EC context. + * + * For P-256, the public_size is 64. first 32-byte is X, second 32-byte is Y. + * For P-384, the public_size is 96. first 48-byte is X, second 48-byte is Y. + * For P-521, the public_size is 132. first 66-byte is X, second 66-byte is Y. + * + * @param[in, out] ec_context Pointer to EC context being set. + * @param[in] public Pointer to the buffer to receive generated public X,Y. + * @param[in] public_size The size of public buffer in bytes. + * + * @retval true EC public key component was set successfully. + * @retval false Invalid EC public key component. + **/ +extern bool libspdm_ec_set_pub_key(void *ec_context, const uint8_t *public_key, + size_t public_key_size); +#endif /* LIBSPDM_FIPS_MODE */ + +#endif /* (LIBSPDM_ECDHE_SUPPORT) || (LIBSPDM_ECDSA_SUPPORT) */ + +#if LIBSPDM_ECDHE_SUPPORT +/** + * Generates EC key and returns EC public key (X, Y). + * + * This function generates random secret, and computes the public key (X, Y), which is + * returned via parameter public, public_size. + * X is the first half of public with size being public_size / 2, + * Y is the second half of public with size being public_size / 2. + * EC context is updated accordingly. + * If the public buffer is too small to hold the public X, Y, false is returned and + * public_size is set to the required buffer size to obtain the public X, Y. + * + * For P-256, the public_size is 64. first 32-byte is X, second 32-byte is Y. + * For P-384, the public_size is 96. first 48-byte is X, second 48-byte is Y. + * For P-521, the public_size is 132. first 66-byte is X, second 66-byte is Y. + * + * If ec_context is NULL, then return false. + * If public_size is NULL, then return false. + * If public_size is large enough but public is NULL, then return false. + * + * @param[in, out] ec_context Pointer to the EC context. + * @param[out] public Pointer to the buffer to receive generated public X,Y. + * @param[in, out] public_size On input, the size of public buffer in bytes. + * On output, the size of data returned in public buffer in bytes. + * + * @retval true EC public X,Y generation succeeded. + * @retval false EC public X,Y generation failed. + * @retval false public_size is not large enough. + **/ +extern bool libspdm_ec_generate_key(void *ec_context, uint8_t *public_key, size_t *public_key_size); + +/** + * Computes exchanged common key. + * + * Given peer's public key (X, Y), this function computes the exchanged common key, + * based on its own context including value of curve parameter and random secret. + * X is the first half of peer_public with size being peer_public_size / 2, + * Y is the second half of peer_public with size being peer_public_size / 2. + * + * If ec_context is NULL, then return false. + * If peer_public is NULL, then return false. + * If peer_public_size is 0, then return false. + * If key is NULL, then return false. + * If key_size is not large enough, then return false. + * + * For P-256, the peer_public_size is 64. first 32-byte is X, second 32-byte is Y. + * The key_size is 32. + * For P-384, the peer_public_size is 96. first 48-byte is X, second 48-byte is Y. + * The key_size is 48. + * For P-521, the peer_public_size is 132. first 66-byte is X, second 66-byte is Y. + * The key_size is 66. + * + * @param[in, out] ec_context Pointer to the EC context. + * @param[in] peer_public Pointer to the peer's public X,Y. + * @param[in] peer_public_size Size of peer's public X,Y in bytes. + * @param[out] key Pointer to the buffer to receive generated key. + * @param[in, out] key_size On input, the size of key buffer in bytes. + * On output, the size of data returned in key buffer in bytes. + * + * @retval true EC exchanged key generation succeeded. + * @retval false EC exchanged key generation failed. + * @retval false key_size is not large enough. + **/ +extern bool libspdm_ec_compute_key(void *ec_context, const uint8_t *peer_public, + size_t peer_public_size, uint8_t *key, + size_t *key_size); +#endif /* LIBSPDM_ECDHE_SUPPORT */ + +#if LIBSPDM_ECDSA_SUPPORT +/** + * Generates Elliptic Curve context from DER-encoded public key data. + * + * The public key is ASN.1 DER-encoded as RFC7250 describes, + * namely, the SubjectPublicKeyInfo structure of a X.509 certificate. + * + * @param[in] der_data Pointer to the DER-encoded public key data. + * @param[in] der_size Size of the DER-encoded public key data in bytes. + * @param[out] ec_context Pointer to newly generated EC context which contains the + * EC public key component. + * Use libspdm_ec_free() function to free the resource. + * + * If der_data is NULL, then return false. + * If ec_context is NULL, then return false. + * + * @retval true EC context was generated successfully. + * @retval false Invalid DER public key data. + * + **/ +extern bool libspdm_ec_get_public_key_from_der(const uint8_t *der_data, + size_t der_size, + void **ec_context); + +/** + * Carries out the EC-DSA signature. + * + * This function carries out the EC-DSA signature. + * If the signature buffer is too small to hold the contents of signature, false + * is returned and sig_size is set to the required buffer size to obtain the signature. + * + * If ec_context is NULL, then return false. + * If message_hash is NULL, then return false. + * If hash_size need match the hash_nid. hash_nid could be SHA256, SHA384, SHA512, SHA3_256, + * SHA3_384, SHA3_512. + * If sig_size is large enough but signature is NULL, then return false. + * + * For P-256, the sig_size is 64. first 32-byte is R, second 32-byte is S. + * For P-384, the sig_size is 96. first 48-byte is R, second 48-byte is S. + * For P-521, the sig_size is 132. first 66-byte is R, second 66-byte is S. + * + * @param[in] ec_context Pointer to EC context for signature generation. + * @param[in] hash_nid hash NID + * @param[in] message_hash Pointer to octet message hash to be signed. + * @param[in] hash_size Size of the message hash in bytes. + * @param[out] signature Pointer to buffer to receive EC-DSA signature. + * @param[in, out] sig_size On input, the size of signature buffer in bytes. + * On output, the size of data returned in signature buffer in bytes. + * + * @retval true signature successfully generated in EC-DSA. + * @retval false signature generation failed. + * @retval false sig_size is too small. + **/ +extern bool libspdm_ecdsa_sign(void *ec_context, size_t hash_nid, + const uint8_t *message_hash, size_t hash_size, + uint8_t *signature, size_t *sig_size); + +#if LIBSPDM_FIPS_MODE +/** + * Carries out the EC-DSA signature with caller input random function. This API can be used for FIPS test. + * + * @param[in] ec_context Pointer to EC context for signature generation. + * @param[in] hash_nid hash NID + * @param[in] message_hash Pointer to octet message hash to be signed. + * @param[in] hash_size Size of the message hash in bytes. + * @param[out] signature Pointer to buffer to receive EC-DSA signature. + * @param[in, out] sig_size On input, the size of signature buffer in bytes. + * On output, the size of data returned in signature buffer in bytes. + * @param[in] random_func random number function + * + * @retval true signature successfully generated in EC-DSA. + * @retval false signature generation failed. + * @retval false sig_size is too small. + **/ +extern bool libspdm_ecdsa_sign_ex(void *ec_context, size_t hash_nid, + const uint8_t *message_hash, size_t hash_size, + uint8_t *signature, size_t *sig_size, + int (*random_func)(void *, unsigned char *, size_t)); +#endif/*LIBSPDM_FIPS_MODE*/ + +/** + * Verifies the EC-DSA signature. + * + * If ec_context is NULL, then return false. + * If message_hash is NULL, then return false. + * If signature is NULL, then return false. + * If hash_size need match the hash_nid. hash_nid could be SHA256, SHA384, SHA512, SHA3_256, + * SHA3_384, SHA3_512. + * + * For P-256, the sig_size is 64. first 32-byte is R, second 32-byte is S. + * For P-384, the sig_size is 96. first 48-byte is R, second 48-byte is S. + * For P-521, the sig_size is 132. first 66-byte is R, second 66-byte is S. + * + * @param[in] ec_context Pointer to EC context for signature verification. + * @param[in] hash_nid hash NID + * @param[in] message_hash Pointer to octet message hash to be checked. + * @param[in] hash_size Size of the message hash in bytes. + * @param[in] signature Pointer to EC-DSA signature to be verified. + * @param[in] sig_size Size of signature in bytes. + * + * @retval true Valid signature encoded in EC-DSA. + * @retval false Invalid signature or invalid EC context. + **/ +extern bool libspdm_ecdsa_verify(void *ec_context, size_t hash_nid, + const uint8_t *message_hash, size_t hash_size, + const uint8_t *signature, size_t sig_size); +#endif /* LIBSPDM_ECDSA_SUPPORT */ +#endif /* CRYPTLIB_EC_H */ diff --git a/kernel-open/nvidia/hal/library/cryptlib/cryptlib_ecd.h b/kernel-open/nvidia/hal/library/cryptlib/cryptlib_ecd.h new file mode 100644 index 0000000..74e3bca --- /dev/null +++ b/kernel-open/nvidia/hal/library/cryptlib/cryptlib_ecd.h @@ -0,0 +1,173 @@ +/** + * Copyright Notice: + * Copyright 2021-2022 DMTF. All rights reserved. + * License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md + **/ + +#ifndef CRYPTLIB_ECD_H +#define CRYPTLIB_ECD_H + +/*===================================================================================== + * Edwards-Curve Primitives + *=====================================================================================*/ + +#if (LIBSPDM_EDDSA_ED25519_SUPPORT) || (LIBSPDM_EDDSA_ED448_SUPPORT) +/** + * Allocates and Initializes one Edwards-Curve context for subsequent use with the NID. + * + * @param nid cipher NID + * + * @return Pointer to the Edwards-Curve context that has been initialized. + * If the allocations fails, libspdm_ecd_new_by_nid() returns NULL. + **/ +extern void *libspdm_ecd_new_by_nid(size_t nid); + +/** + * Generates Edwards-Curve context from DER-encoded public key data. + * + * The public key is ASN.1 DER-encoded as RFC7250 describes, + * namely, the SubjectPublicKeyInfo structure of a X.509 certificate. + * + * @param[in] der_data Pointer to the DER-encoded public key data. + * @param[in] der_size Size of the DER-encoded public key data in bytes. + * @param[out] ec_context Pointer to newly generated Ed context which contains the + * Ed public key component. + * Use libspdm_ecd_free() function to free the resource. + * + * If der_data is NULL, then return false. + * If ecd_context is NULL, then return false. + * + * @retval true Ed context was generated successfully. + * @retval false Invalid DER public key data. + * + **/ +extern bool libspdm_ecd_get_public_key_from_der(const uint8_t *der_data, + size_t der_size, + void **ecd_context); + +/** + * Release the specified Ed context. + * + * @param[in] ecd_context Pointer to the Ed context to be released. + **/ +extern void libspdm_ecd_free(void *ecd_context); + +/** + * Sets the public key component into the established Ed context. + * + * For ed25519, the public_size is 32. + * For ed448, the public_size is 57. + * + * @param[in, out] ecd_context Pointer to Ed context being set. + * @param[in] public_key Pointer to the buffer to receive generated public X,Y. + * @param[in] public_size The size of public buffer in bytes. + * + * @retval true Ed public key component was set successfully. + * @retval false Invalid EC public key component. + **/ +extern bool libspdm_ecd_set_pub_key(void *ecd_context, const uint8_t *public_key, + size_t public_key_size); + +/** + * Sets the private key component into the established Ed context. + * + * For ed25519, the private_size is 32. + * For ed448, the private_size is 57. + * + * @param[in, out] ecd_context Pointer to Ed context being set. + * @param[in] private Pointer to the buffer to receive generated private X,Y. + * @param[in] private_size The size of private buffer in bytes. + * + * @retval true Ed private key component was set successfully. + * @retval false Invalid EC private key component. + * + **/ +bool libspdm_ecd_set_pri_key(void *ecd_context, const uint8_t *private_key, + size_t private_key_size); + +/** + * Gets the public key component from the established Ed context. + * + * For ed25519, the public_size is 32. + * For ed448, the public_size is 57. + * + * @param[in, out] ecd_context Pointer to Ed context being set. + * @param[out] public Pointer to the buffer to receive generated public X,Y. + * @param[in, out] public_size On input, the size of public buffer in bytes. + * On output, the size of data returned in public buffer in bytes. + * + * @retval true Ed key component was retrieved successfully. + * @retval false Invalid EC public key component. + **/ +extern bool libspdm_ecd_get_pub_key(void *ecd_context, uint8_t *public_key, + size_t *public_key_size); + +/** + * Carries out the Ed-DSA signature. + * + * This function carries out the Ed-DSA signature. + * If the signature buffer is too small to hold the contents of signature, false + * is returned and sig_size is set to the required buffer size to obtain the signature. + * + * If ecd_context is NULL, then return false. + * If message is NULL, then return false. + * hash_nid must be NULL. + * If sig_size is large enough but signature is NULL, then return false. + * + * For ed25519, context must be NULL and context_size must be 0. + * For ed448, context must be maximum of 255 octets. + * + * For ed25519, the sig_size is 64. first 32-byte is R, second 32-byte is S. + * For ed448, the sig_size is 114. first 57-byte is R, second 57-byte is S. + * + * @param[in] ecd_context Pointer to Ed context for signature generation. + * @param[in] hash_nid hash NID + * @param[in] context The EDDSA signing context. + * @param[in] context_size Size of EDDSA signing context. + * @param[in] message Pointer to octet message to be signed (before hash). + * @param[in] size size of the message in bytes. + * @param[out] signature Pointer to buffer to receive Ed-DSA signature. + * @param[in, out] sig_size On input, the size of signature buffer in bytes. + * On output, the size of data returned in signature buffer in bytes. + * + * @retval true signature successfully generated in Ed-DSA. + * @retval false signature generation failed. + * @retval false sig_size is too small. + **/ +extern bool libspdm_eddsa_sign(const void *ecd_context, size_t hash_nid, + const uint8_t *context, size_t context_size, + const uint8_t *message, size_t size, uint8_t *signature, + size_t *sig_size); + +/** + * Verifies the Ed-DSA signature. + * + * If ecd_context is NULL, then return false. + * If message is NULL, then return false. + * If signature is NULL, then return false. + * hash_nid must be NULL. + * + * For ed25519, context must be NULL and context_size must be 0. + * For ed448, context must be maximum of 255 octets. + * + * For ed25519, the sig_size is 64. first 32-byte is R, second 32-byte is S. + * For ed448, the sig_size is 114. first 57-byte is R, second 57-byte is S. + * + * @param[in] ecd_context Pointer to Ed context for signature verification. + * @param[in] hash_nid hash NID + * @param[in] context The EDDSA signing context. + * @param[in] context_size Size of EDDSA signing context. + * @param[in] message Pointer to octet message to be checked (before hash). + * @param[in] size Size of the message in bytes. + * @param[in] signature Pointer to Ed-DSA signature to be verified. + * @param[in] sig_size Size of signature in bytes. + * + * @retval true Valid signature encoded in Ed-DSA. + * @retval false Invalid signature or invalid Ed context. + **/ +extern bool libspdm_eddsa_verify(const void *ecd_context, size_t hash_nid, + const uint8_t *context, size_t context_size, + const uint8_t *message, size_t size, + const uint8_t *signature, size_t sig_size); +#endif /* (LIBSPDM_EDDSA_ED25519_SUPPORT) || (LIBSPDM_EDDSA_ED448_SUPPORT) */ +#endif /* CRYPTLIB_ECD_H */ diff --git a/kernel-open/nvidia/hal/library/cryptlib/cryptlib_hash.h b/kernel-open/nvidia/hal/library/cryptlib/cryptlib_hash.h new file mode 100644 index 0000000..e59c567 --- /dev/null +++ b/kernel-open/nvidia/hal/library/cryptlib/cryptlib_hash.h @@ -0,0 +1,772 @@ +/** + * Copyright Notice: + * Copyright 2021-2022 DMTF. All rights reserved. + * License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md + **/ + +#ifndef CRYPTLIB_HASH_H +#define CRYPTLIB_HASH_H + +/* SHA-256 digest size in bytes. */ +#define LIBSPDM_SHA256_DIGEST_SIZE 32 + +/* SHA-384 digest size in bytes. */ +#define LIBSPDM_SHA384_DIGEST_SIZE 48 + +/* SHA-512 digest size in bytes. */ +#define LIBSPDM_SHA512_DIGEST_SIZE 64 + +/* SHA3-256 digest size in bytes. */ +#define LIBSPDM_SHA3_256_DIGEST_SIZE 32 + +/* SHA3-384 digest size in bytes. */ +#define LIBSPDM_SHA3_384_DIGEST_SIZE 48 + +/* SHA3-512 digest size in bytes. */ +#define LIBSPDM_SHA3_512_DIGEST_SIZE 64 + +/* SM3_256 digest size in bytes. */ +#define LIBSPDM_SM3_256_DIGEST_SIZE 32 + +/*===================================================================================== + * One-way cryptographic hash SHA2 primitives. + *===================================================================================== + */ +#if LIBSPDM_SHA256_SUPPORT +/** + * Allocates and initializes one HASH_CTX context for subsequent SHA-256 use. + * + * @return Pointer to the HASH_CTX context that has been initialized. + * If the allocations fails, sha256_new() returns NULL. * + **/ +extern void *libspdm_sha256_new(void); + +/** + * Release the specified HASH_CTX context. + * + * @param[in] sha256_context Pointer to the HASH_CTX context to be released. + **/ +extern void libspdm_sha256_free(void *sha256_context); + +/** + * Initializes user-supplied memory pointed to by sha256_context as SHA-256 hash context for + * subsequent use. + * + * If sha256_context is NULL, then return false. + * + * @param[out] sha256_context Pointer to SHA-256 context being initialized. + * + * @retval true SHA-256 context initialization succeeded. + * @retval false SHA-256 context initialization failed. + **/ +extern bool libspdm_sha256_init(void *sha256_context); + +/** + * Makes a copy of an existing SHA-256 context. + * + * If sha256_context is NULL, then return false. + * If new_sha256_context is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in] sha256_context Pointer to SHA-256 context being copied. + * @param[out] new_sha256_context Pointer to new SHA-256 context. + * + * @retval true SHA-256 context copy succeeded. + * @retval false SHA-256 context copy failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_sha256_duplicate(const void *sha256_context, void *new_sha256_context); + +/** + * Digests the input data and updates SHA-256 context. + * + * This function performs SHA-256 digest on a data buffer of the specified size. + * It can be called multiple times to compute the digest of long or discontinuous data streams. + * SHA-256 context should be already correctly initialized by libspdm_sha256_init(), and must not + * have been finalized by libspdm_sha256_final(). Behavior with invalid context is undefined. + * + * If sha256_context is NULL, then return false. + * + * @param[in, out] sha256_context Pointer to the SHA-256 context. + * @param[in] data Pointer to the buffer containing the data to be hashed. + * @param[in] data_size Size of data buffer in bytes. + * + * @retval true SHA-256 data digest succeeded. + * @retval false SHA-256 data digest failed. + **/ +extern bool libspdm_sha256_update(void *sha256_context, const void *data, size_t data_size); + +/** + * Completes computation of the SHA-256 digest value. + * + * This function completes SHA-256 hash computation and populates the digest value into + * the specified memory. After this function has been called, the SHA-256 context cannot + * be used again. SHA-256 context should be already correctly initialized by libspdm_sha256_init(), + * and must not have been finalized by libspdm_sha256_final(). Behavior with invalid SHA-256 context + * is undefined. + * + * If sha256_context is NULL, then return false. + * If hash_value is NULL, then return false. + * + * @param[in, out] sha256_context Pointer to the SHA-256 context. + * @param[out] hash_value Pointer to a buffer that receives the SHA-256 digest + * value (32 bytes). + * + * @retval true SHA-256 digest computation succeeded. + * @retval false SHA-256 digest computation failed. + **/ +extern bool libspdm_sha256_final(void *sha256_context, uint8_t *hash_value); + +/** + * Computes the SHA-256 message digest of an input data buffer. + * + * This function performs the SHA-256 message digest of a given data buffer, and places + * the digest value into the specified memory. + * + * If this interface is not supported, then return false. + * + * @param[in] data Pointer to the buffer containing the data to be hashed. + * @param[in] data_size Size of data buffer in bytes. + * @param[out] hash_value Pointer to a buffer that receives the SHA-256 digest value (32 bytes). + * + * @retval true SHA-256 digest computation succeeded. + * @retval false SHA-256 digest computation failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_sha256_hash_all(const void *data, size_t data_size, uint8_t *hash_value); +#endif /* LIBSPDM_SHA256_SUPPORT */ + +#if LIBSPDM_SHA384_SUPPORT +/** + * Allocates and initializes one HASH_CTX context for subsequent SHA-384 use. + * + * @return Pointer to the HASH_CTX context that has been initialized. + * If the allocations fails, libspdm_sha384_new() returns NULL. + **/ +extern void *libspdm_sha384_new(void); + +/** + * Release the specified HASH_CTX context. + * + * @param[in] sha384_context Pointer to the HASH_CTX context to be released. + **/ +extern void libspdm_sha384_free(void *sha384_context); + +/** + * Initializes user-supplied memory pointed to by sha384_context as SHA-384 hash context for + * subsequent use. + * + * If sha384_context is NULL, then return false. + * + * @param[out] sha384_context Pointer to SHA-384 context being initialized. + * + * @retval true SHA-384 context initialization succeeded. + * @retval false SHA-384 context initialization failed. + **/ +extern bool libspdm_sha384_init(void *sha384_context); + +/** + * Makes a copy of an existing SHA-384 context. + * + * If sha384_context is NULL, then return false. + * If new_sha384_context is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in] sha384_context Pointer to SHA-384 context being copied. + * @param[out] new_sha384_context Pointer to new SHA-384 context. + * + * @retval true SHA-384 context copy succeeded. + * @retval false SHA-384 context copy failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_sha384_duplicate(const void *sha384_context, void *new_sha384_context); + +/** + * Digests the input data and updates SHA-384 context. + * + * This function performs SHA-384 digest on a data buffer of the specified size. + * It can be called multiple times to compute the digest of long or discontinuous data streams. + * SHA-384 context should be already correctly initialized by libspdm_sha384_init(), and must not + * have been finalized by libspdm_sha384_final(). Behavior with invalid context is undefined. + * + * If sha384_context is NULL, then return false. + * + * @param[in, out] sha384_context Pointer to the SHA-384 context. + * @param[in] data Pointer to the buffer containing the data to be hashed. + * @param[in] data_size Size of data buffer in bytes. + * + * @retval true SHA-384 data digest succeeded. + * @retval false SHA-384 data digest failed. + **/ +extern bool libspdm_sha384_update(void *sha384_context, const void *data, size_t data_size); + +/** + * Completes computation of the SHA-384 digest value. + * + * This function completes SHA-384 hash computation and populates the digest value into + * the specified memory. After this function has been called, the SHA-384 context cannot + * be used again. SHA-384 context should be already correctly initialized by libspdm_sha384_init(), + * and must not have been finalized by libspdm_sha384_final(). Behavior with invalid SHA-384 context + * is undefined. + * + * If sha384_context is NULL, then return false. + * If hash_value is NULL, then return false. + * + * @param[in, out] sha384_context Pointer to the SHA-384 context. + * @param[out] hash_value Pointer to a buffer that receives the SHA-384 digest + * value (48 bytes). + * + * @retval true SHA-384 digest computation succeeded. + * @retval false SHA-384 digest computation failed. + **/ +extern bool libspdm_sha384_final(void *sha384_context, uint8_t *hash_value); + +/** + * Computes the SHA-384 message digest of an input data buffer. + * + * This function performs the SHA-384 message digest of a given data buffer, and places + * the digest value into the specified memory. + * + * If this interface is not supported, then return false. + * + * @param[in] data Pointer to the buffer containing the data to be hashed. + * @param[in] data_size Size of data buffer in bytes. + * @param[out] hash_value Pointer to a buffer that receives the SHA-384 digest value (48 bytes). + * + * @retval true SHA-384 digest computation succeeded. + * @retval false SHA-384 digest computation failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_sha384_hash_all(const void *data, size_t data_size, uint8_t *hash_value); +#endif /* LIBSPDM_SHA384_SUPPORT */ + +#if LIBSPDM_SHA512_SUPPORT +/** + * Allocates and initializes one HASH_CTX context for subsequent SHA-512 use. + * + * @return Pointer to the HASH_CTX context that has been initialized. + * If the allocations fails, libspdm_sha512_new() returns NULL. + **/ +extern void *libspdm_sha512_new(void); + +/** + * Release the specified HASH_CTX context. + * + * @param[in] sha512_context Pointer to the HASH_CTX context to be released. + **/ +extern void libspdm_sha512_free(void *sha512_context); + +/** + * Initializes user-supplied memory pointed by sha512_context as SHA-512 hash context for + * subsequent use. + * + * If sha512_context is NULL, then return false. + * + * @param[out] sha512_context Pointer to SHA-512 context being initialized. + * + * @retval true SHA-512 context initialization succeeded. + * @retval false SHA-512 context initialization failed. + **/ +extern bool libspdm_sha512_init(void *sha512_context); + +/** + * Makes a copy of an existing SHA-512 context. + * + * If sha512_context is NULL, then return false. + * If new_sha512_context is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in] sha512_context Pointer to SHA-512 context being copied. + * @param[out] new_sha512_context Pointer to new SHA-512 context. + * + * @retval true SHA-512 context copy succeeded. + * @retval false SHA-512 context copy failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_sha512_duplicate(const void *sha512_context, void *new_sha512_context); + +/** + * Digests the input data and updates SHA-512 context. + * + * This function performs SHA-512 digest on a data buffer of the specified size. + * It can be called multiple times to compute the digest of long or discontinuous data streams. + * SHA-512 context should be already correctly initialized by libspdm_sha512_init(), and must not + * have been finalized by libspdm_sha512_final(). Behavior with invalid context is undefined. + * + * If sha512_context is NULL, then return false. + * + * @param[in, out] sha512_context Pointer to the SHA-512 context. + * @param[in] data Pointer to the buffer containing the data to be hashed. + * @param[in] data_size Size of data buffer in bytes. + * + * @retval true SHA-512 data digest succeeded. + * @retval false SHA-512 data digest failed. + **/ +extern bool libspdm_sha512_update(void *sha512_context, const void *data, size_t data_size); + +/** + * Completes computation of the SHA-512 digest value. + * + * This function completes SHA-512 hash computation and populates the digest value into + * the specified memory. After this function has been called, the SHA-512 context cannot + * be used again. SHA-512 context should be already correctly initialized by libspdm_sha512_init(), + * and must not have been finalized by libspdm_sha512_final(). Behavior with invalid SHA-512 context + * is undefined. + * + * If sha512_context is NULL, then return false. + * If hash_value is NULL, then return false. + * + * @param[in, out] sha512_context Pointer to the SHA-512 context. + * @param[out] hash_value Pointer to a buffer that receives the SHA-512 digest + * value (64 bytes). + * + * @retval true SHA-512 digest computation succeeded. + * @retval false SHA-512 digest computation failed. + **/ +extern bool libspdm_sha512_final(void *sha512_context, uint8_t *hash_value); + +/** + * Computes the SHA-512 message digest of an input data buffer. + * + * This function performs the SHA-512 message digest of a given data buffer, and places + * the digest value into the specified memory. + * + * If this interface is not supported, then return false. + * + * @param[in] data Pointer to the buffer containing the data to be hashed. + * @param[in] data_size Size of data buffer in bytes. + * @param[out] hash_value Pointer to a buffer that receives the SHA-512 digest value (64 bytes). + * + * @retval true SHA-512 digest computation succeeded. + * @retval false SHA-512 digest computation failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_sha512_hash_all(const void *data, size_t data_size, uint8_t *hash_value); +#endif /* LIBSPDM_SHA512_SUPPORT */ + +/*===================================================================================== + * One-way cryptographic hash SHA3 primitives. + *===================================================================================== + */ +#if LIBSPDM_SHA3_256_SUPPORT +/** + * Allocates and initializes one HASH_CTX context for subsequent SHA3-256 use. + * + * @return Pointer to the HASH_CTX context that has been initialized. + * If the allocations fails, libspdm_sha3_256_new() returns NULL. + **/ +extern void *libspdm_sha3_256_new(void); + +/** + * Release the specified HASH_CTX context. + * + * @param[in] sha3_256_context Pointer to the HASH_CTX context to be released. + **/ +extern void libspdm_sha3_256_free(void *sha3_256_context); + +/** + * Initializes user-supplied memory pointed by sha3_256_context as SHA3-256 hash context for + * subsequent use. + * + * If sha3_256_context is NULL, then return false. + * + * @param[out] sha3_256_context Pointer to SHA3-256 context being initialized. + * + * @retval true SHA3-256 context initialization succeeded. + * @retval false SHA3-256 context initialization failed. + **/ +extern bool libspdm_sha3_256_init(void *sha3_256_context); + +/** + * Makes a copy of an existing SHA3-256 context. + * + * If sha3_256_context is NULL, then return false. + * If new_sha3_256_context is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in] sha3_256_context Pointer to SHA3-256 context being copied. + * @param[out] new_sha3_256_context Pointer to new SHA3-256 context. + * + * @retval true SHA3-256 context copy succeeded. + * @retval false SHA3-256 context copy failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_sha3_256_duplicate(const void *sha3_256_context, void *new_sha3_256_context); + +/** + * Digests the input data and updates SHA3-256 context. + * + * This function performs SHA3-256 digest on a data buffer of the specified size. + * It can be called multiple times to compute the digest of long or discontinuous data streams. + * SHA3-256 context should be already correctly initialized by libspdm_sha3_256_init(), and must not + * have been finalized by libspdm_sha3_256_final(). Behavior with invalid context is undefined. + * + * If sha3_256_context is NULL, then return false. + * + * @param[in, out] sha3_256_context Pointer to the SHA3-256 context. + * @param[in] data Pointer to the buffer containing the data to be hashed. + * @param[in] data_size size of data buffer in bytes. + * + * @retval true SHA3-256 data digest succeeded. + * @retval false SHA3-256 data digest failed. + **/ +extern bool libspdm_sha3_256_update(void *sha3_256_context, const void *data, size_t data_size); + +/** + * Completes computation of the SHA3-256 digest value. + * + * This function completes SHA3-256 hash computation and populates the digest value into + * the specified memory. After this function has been called, the SHA3-512 context cannot + * be used again. SHA3-256 context should be already correctly initialized by + * libspdm_sha3_256_init(), and must not have been finalized by libspdm_sha3_256_final(). + * Behavior with invalid SHA3-256 context is undefined. + * + * If sha3_256_context is NULL, then return false. + * If hash_value is NULL, then return false. + * + * @param[in, out] sha3_256_context Pointer to the SHA3-256 context. + * @param[out] hash_value Pointer to a buffer that receives the SHA3-256 digest + * value (32 bytes). + * + * @retval true SHA3-256 digest computation succeeded. + * @retval false SHA3-256 digest computation failed. + **/ +extern bool libspdm_sha3_256_final(void *sha3_256_context, uint8_t *hash_value); + +/** + * Computes the SHA3-256 message digest of an input data buffer. + * + * This function performs the SHA3-256 message digest of a given data buffer, and places + * the digest value into the specified memory. + * + * If this interface is not supported, then return false. + * + * @param[in] data Pointer to the buffer containing the data to be hashed. + * @param[in] data_size Size of data buffer in bytes. + * @param[out] hash_value Pointer to a buffer that receives the SHA3-256 digest value (32 bytes). + * + * @retval true SHA3-256 digest computation succeeded. + * @retval false SHA3-256 digest computation failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_sha3_256_hash_all(const void *data, size_t data_size, uint8_t *hash_value); +#endif /* LIBSPDM_SHA3_256_SUPPORT */ + +#if LIBSPDM_SHA3_384_SUPPORT +/** + * Allocates and initializes one HASH_CTX context for subsequent SHA3-384 use. + * + * @return Pointer to the HASH_CTX context that has been initialized. + * If the allocations fails, libspdm_sha3_384_new() returns NULL. + **/ +extern void *libspdm_sha3_384_new(void); + +/** + * Release the specified HASH_CTX context. + * + * @param[in] sha3_384_context Pointer to the HASH_CTX context to be released. + **/ +extern void libspdm_sha3_384_free(void *sha3_384_context); + +/** + * Initializes user-supplied memory pointed by sha3_384_context as SHA3-384 hash context for + * subsequent use. + * + * If sha3_384_context is NULL, then return false. + * + * @param[out] sha3_384_context Pointer to SHA3-384 context being initialized. + * + * @retval true SHA3-384 context initialization succeeded. + * @retval false SHA3-384 context initialization failed. + **/ +extern bool libspdm_sha3_384_init(void *sha3_384_context); + +/** + * Makes a copy of an existing SHA3-384 context. + * + * If sha3_384_context is NULL, then return false. + * If new_sha3_384_context is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in] sha3_384_context Pointer to SHA3-384 context being copied. + * @param[out] new_sha3_384_context Pointer to new SHA3-384 context. + * + * @retval true SHA3-384 context copy succeeded. + * @retval false SHA3-384 context copy failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_sha3_384_duplicate(const void *sha3_384_context, void *new_sha3_384_context); + +/** + * Digests the input data and updates SHA3-384 context. + * + * This function performs SHA3-384 digest on a data buffer of the specified size. + * It can be called multiple times to compute the digest of long or discontinuous data streams. + * SHA3-384 context should be already correctly initialized by libspdm_sha3_384_init(), and must not + * have been finalized by libspdm_sha3_384_final(). Behavior with invalid context is undefined. + * + * If sha3_384_context is NULL, then return false. + * + * @param[in, out] sha3_384_context Pointer to the SHA3-384 context. + * @param[in] data Pointer to the buffer containing the data to be hashed. + * @param[in] data_size Size of data buffer in bytes. + * + * @retval true SHA3-384 data digest succeeded. + * @retval false SHA3-384 data digest failed. + **/ +extern bool libspdm_sha3_384_update(void *sha3_384_context, const void *data, size_t data_size); + +/** + * Completes computation of the SHA3-384 digest value. + * + * This function completes SHA3-384 hash computation and populates the digest value into + * the specified memory. After this function has been called, the SHA3-384 context cannot + * be used again. SHA3-384 context should be already correctly initialized by + * libspdm_sha3_384_init(), and must not have been finalized by libspdm_sha3_384_final(). + * Behavior with invalid SHA3-384 context is undefined. + * + * If sha3_384_context is NULL, then return false. + * If hash_value is NULL, then return false. + * + * @param[in, out] sha3_384_context Pointer to the SHA3-384 context. + * @param[out] hash_value Pointer to a buffer that receives the SHA3-384 digest + * value (48 bytes). + * + * @retval true SHA3-384 digest computation succeeded. + * @retval false SHA3-384 digest computation failed. + * + **/ +extern bool libspdm_sha3_384_final(void *sha3_384_context, uint8_t *hash_value); + +/** + * Computes the SHA3-384 message digest of an input data buffer. + * + * This function performs the SHA3-384 message digest of a given data buffer, and places + * the digest value into the specified memory. + * + * If this interface is not supported, then return false. + * + * @param[in] data Pointer to the buffer containing the data to be hashed. + * @param[in] data_size Size of data buffer in bytes. + * @param[out] hash_value Pointer to a buffer that receives the SHA3-384 digest value (48 bytes). + * + * @retval true SHA3-384 digest computation succeeded. + * @retval false SHA3-384 digest computation failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_sha3_384_hash_all(const void *data, size_t data_size, uint8_t *hash_value); +#endif /* LIBSPDM_SHA3_384_SUPPORT */ + +#if LIBSPDM_SHA3_512_SUPPORT +/** + * Allocates and initializes one HASH_CTX context for subsequent SHA3-512 use. + * + * @return Pointer to the HASH_CTX context that has been initialized. + * If the allocations fails, libspdm_sha3_512_new() returns NULL. + **/ +extern void *libspdm_sha3_512_new(void); + +/** + * Release the specified HASH_CTX context. + * + * @param[in] sha3_512_context Pointer to the HASH_CTX context to be released. + **/ +extern void libspdm_sha3_512_free(void *sha3_512_context); + +/** + * Initializes user-supplied memory pointed by sha3_512_context as SHA3-512 hash context for + * subsequent use. + * + * If sha3_512_context is NULL, then return false. + * + * @param[out] sha3_512_context Pointer to SHA3-512 context being initialized. + * + * @retval true SHA3-512 context initialization succeeded. + * @retval false SHA3-512 context initialization failed. + **/ +extern bool libspdm_sha3_512_init(void *sha3_512_context); + +/** + * Makes a copy of an existing SHA3-512 context. + * + * If sha3_512_context is NULL, then return false. + * If new_sha3_512_context is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in] sha3_512_context Pointer to SHA3-512 context being copied. + * @param[out] new_sha3_512_context Pointer to new SHA3-512 context. + * + * @retval true SHA3-512 context copy succeeded. + * @retval false SHA3-512 context copy failed. + * @retval false This interface is not supported. + * + **/ +extern bool libspdm_sha3_512_duplicate(const void *sha3_512_context, void *new_sha3_512_context); + +/** + * Digests the input data and updates SHA3-512 context. + * + * This function performs SHA3-512 digest on a data buffer of the specified size. + * It can be called multiple times to compute the digest of long or discontinuous data streams. + * SHA3-512 context should be already correctly initialized by libspdm_sha3_512_init(), and must not + * have been finalized by libspdm_sha3_512_final(). Behavior with invalid context is undefined. + * + * If sha3_512_context is NULL, then return false. + * + * @param[in, out] sha3_512_context Pointer to the SHA3-512 context. + * @param[in] data Pointer to the buffer containing the data to be hashed. + * @param[in] data_size Size of data buffer in bytes. + * + * @retval true SHA3-512 data digest succeeded. + * @retval false SHA3-512 data digest failed. + **/ +extern bool libspdm_sha3_512_update(void *sha3_512_context, const void *data, size_t data_size); + +/** + * Completes computation of the SHA3-512 digest value. + * + * This function completes SHA3-512 hash computation and populates the digest value into + * the specified memory. After this function has been called, the SHA3-512 context cannot + * be used again. SHA3-512 context should be already correctly initialized by + * libspdm_sha3_512_init(), and must not have been finalized by libspdm_sha3_512_final(). + * Behavior with invalid SHA3-512 context is undefined. + * + * If sha3_512_context is NULL, then return false. + * If hash_value is NULL, then return false. + * + * @param[in, out] sha3_512_context Pointer to the SHA3-512 context. + * @param[out] hash_value Pointer to a buffer that receives the SHA3-512 digest + * value (64 bytes). + * + * @retval true SHA3-512 digest computation succeeded. + * @retval false SHA3-512 digest computation failed. + **/ +extern bool libspdm_sha3_512_final(void *sha3_512_context, uint8_t *hash_value); + +/** + * Computes the SHA3-512 message digest of an input data buffer. + * + * This function performs the SHA3-512 message digest of a given data buffer, and places + * the digest value into the specified memory. + * + * If this interface is not supported, then return false. + * + * @param[in] data Pointer to the buffer containing the data to be hashed. + * @param[in] data_size Size of data buffer in bytes. + * @param[out] hash_value Pointer to a buffer that receives the SHA3-512 digest value (64 bytes). + * + * @retval true SHA3-512 digest computation succeeded. + * @retval false SHA3-512 digest computation failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_sha3_512_hash_all(const void *data, size_t data_size, uint8_t *hash_value); +#endif /* LIBSPDM_SHA3_512_SUPPORT */ + +/*===================================================================================== + * One-Way Cryptographic hash SM3 Primitives + *===================================================================================== + */ + +#if LIBSPDM_SM3_256_SUPPORT +/** + * Allocates and initializes one HASH_CTX context for subsequent SM3-256 use. + * + * @return Pointer to the HASH_CTX context that has been initialized. + * If the allocations fails, libspdm_sm3_256_new() returns NULL. + **/ +extern void *libspdm_sm3_256_new(void); + +/** + * Release the specified HASH_CTX context. + * + * @param[in] sm3_context Pointer to the HASH_CTX context to be released. + **/ +extern void libspdm_sm3_256_free(void *sm3_context); + +/** + * Initializes user-supplied memory pointed by sm3_context as SM3 hash context for + * subsequent use. + * + * If sm3_context is NULL, then return false. + * + * @param[out] sm3_context Pointer to SM3 context being initialized. + * + * @retval true SM3 context initialization succeeded. + * @retval false SM3 context initialization failed. + **/ +extern bool libspdm_sm3_256_init(void *sm3_context); + +/** + * Makes a copy of an existing SM3 context. + * + * If sm3_context is NULL, then return false. + * If new_sm3_context is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in] sm3_context Pointer to SM3 context being copied. + * @param[out] new_sm3_context Pointer to new SM3 context. + * + * @retval true SM3 context copy succeeded. + * @retval false SM3 context copy failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_sm3_256_duplicate(const void *sm3_context, void *new_sm3_context); + +/** + * Digests the input data and updates SM3 context. + * + * This function performs SM3 digest on a data buffer of the specified size. + * It can be called multiple times to compute the digest of long or discontinuous data streams. + * SM3 context should be already correctly initialized by sm3_init(), and should not be finalized + * by sm3_final(). Behavior with invalid context is undefined. + * + * If sm3_context is NULL, then return false. + * + * @param[in, out] sm3_context Pointer to the SM3 context. + * @param[in] data Pointer to the buffer containing the data to be hashed. + * @param[in] data_size Size of data buffer in bytes. + * + * @retval true SM3 data digest succeeded. + * @retval false SM3 data digest failed. + **/ +extern bool libspdm_sm3_256_update(void *sm3_context, const void *data, size_t data_size); + +/** + * Completes computation of the SM3 digest value. + * + * This function completes SM3 hash computation and retrieves the digest value into + * the specified memory. After this function has been called, the SM3 context cannot + * be used again. SM3 context should be already correctly initialized by sm3_init(), and should not + * be finalized by sm3_final(). Behavior with invalid SM3 context is undefined. + * + * If sm3_context is NULL, then return false. + * If hash_value is NULL, then return false. + * + * @param[in, out] sm3_context Pointer to the SM3 context. + * @param[out] hash_value Pointer to a buffer that receives the SM3 digest value (32 bytes). + * + * @retval true SM3 digest computation succeeded. + * @retval false SM3 digest computation failed. + **/ +extern bool libspdm_sm3_256_final(void *sm3_context, uint8_t *hash_value); + +/** + * Computes the SM3 message digest of an input data buffer. + * + * This function performs the SM3 message digest of a given data buffer, and places + * the digest value into the specified memory. + * + * If this interface is not supported, then return false. + * + * @param[in] data Pointer to the buffer containing the data to be hashed. + * @param[in] data_size Size of data buffer in bytes. + * @param[out] hash_value Pointer to a buffer that receives the SM3 digest value (32 bytes). + * + * @retval true SM3 digest computation succeeded. + * @retval false SM3 digest computation failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_sm3_256_hash_all(const void *data, size_t data_size, uint8_t *hash_value); +#endif /* LIBSPDM_SM3_256_SUPPORT */ + +#endif /* CRYPTLIB_HASH_H */ diff --git a/kernel-open/nvidia/hal/library/cryptlib/cryptlib_hkdf.h b/kernel-open/nvidia/hal/library/cryptlib/cryptlib_hkdf.h new file mode 100644 index 0000000..5ed5ce0 --- /dev/null +++ b/kernel-open/nvidia/hal/library/cryptlib/cryptlib_hkdf.h @@ -0,0 +1,266 @@ +/** + * Copyright Notice: + * Copyright 2021-2022 DMTF. All rights reserved. + * License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md + **/ + +#ifndef CRYPTLIB_HKDF_H +#define CRYPTLIB_HKDF_H + +/*===================================================================================== + * Key Derivation Function Primitives + *=====================================================================================*/ + +#if LIBSPDM_SHA256_SUPPORT +/** + * Derive SHA-256 HMAC-based Extract key Derivation Function (HKDF). + * + * @param[in] key Pointer to the user-supplied key. + * @param[in] key_size Key size in bytes. + * @param[in] salt Pointer to the salt value. + * @param[in] salt_size Salt size in bytes. + * @param[out] prk_out Pointer to buffer to receive prk value. + * @param[in] prk_out_size Size of prk bytes to generate. + * + * @retval true Hkdf generated successfully. + * @retval false Hkdf generation failed. + **/ +extern bool libspdm_hkdf_sha256_extract(const uint8_t *key, size_t key_size, + const uint8_t *salt, size_t salt_size, + uint8_t *prk_out, size_t prk_out_size); + +/** + * Derive SHA256 HMAC-based Expand key Derivation Function (HKDF). + * + * @param[in] prk Pointer to the user-supplied key. + * @param[in] prk_size Key size in bytes. + * @param[in] info Pointer to the application specific info. + * @param[in] info_size Info size in bytes. + * @param[out] out Pointer to buffer to receive hkdf value. + * @param[in] out_size Size of hkdf bytes to generate. + * + * @retval true Hkdf generated successfully. + * @retval false Hkdf generation failed. + **/ +extern bool libspdm_hkdf_sha256_expand(const uint8_t *prk, size_t prk_size, + const uint8_t *info, size_t info_size, + uint8_t *out, size_t out_size); +#endif /* LIBSPDM_SHA256_SUPPORT */ + +#if LIBSPDM_SHA384_SUPPORT +/** + * Derive SHA384 HMAC-based Extract key Derivation Function (HKDF). + * + * @param[in] key Pointer to the user-supplied key. + * @param[in] key_size Key size in bytes. + * @param[in] salt Pointer to the salt value. + * @param[in] salt_size Salt size in bytes. + * @param[out] prk_out Pointer to buffer to receive hkdf value. + * @param[in] prk_out_size Size of hkdf bytes to generate. + * + * @retval true Hkdf generated successfully. + * @retval false Hkdf generation failed. + **/ +extern bool libspdm_hkdf_sha384_extract(const uint8_t *key, size_t key_size, + const uint8_t *salt, size_t salt_size, + uint8_t *prk_out, size_t prk_out_size); + +/** + * Derive SHA384 HMAC-based Expand key Derivation Function (HKDF). + * + * @param[in] prk Pointer to the user-supplied key. + * @param[in] prk_size Key size in bytes. + * @param[in] info Pointer to the application specific info. + * @param[in] info_size Info size in bytes. + * @param[out] out Pointer to buffer to receive hkdf value. + * @param[in] out_size Size of hkdf bytes to generate. + * + * @retval true Hkdf generated successfully. + * @retval false Hkdf generation failed. + **/ +extern bool libspdm_hkdf_sha384_expand(const uint8_t *prk, size_t prk_size, + const uint8_t *info, size_t info_size, + uint8_t *out, size_t out_size); +#endif /* LIBSPDM_SHA384_SUPPORT */ + +#if LIBSPDM_SHA512_SUPPORT +/** + * Derive SHA512 HMAC-based Extract key Derivation Function (HKDF). + * + * @param[in] key Pointer to the user-supplied key. + * @param[in] key_size Key size in bytes. + * @param[in] salt Pointer to the salt value. + * @param[in] salt_size Salt size in bytes. + * @param[out] prk_out Pointer to buffer to receive hkdf value. + * @param[in] prk_out_size Size of hkdf bytes to generate. + * + * @retval true Hkdf generated successfully. + * @retval false Hkdf generation failed. + **/ +extern bool libspdm_hkdf_sha512_extract(const uint8_t *key, size_t key_size, + const uint8_t *salt, size_t salt_size, + uint8_t *prk_out, size_t prk_out_size); + +/** + * Derive SHA512 HMAC-based Expand key Derivation Function (HKDF). + * + * @param[in] prk Pointer to the user-supplied key. + * @param[in] prk_size Key size in bytes. + * @param[in] info Pointer to the application specific info. + * @param[in] info_size Info size in bytes. + * @param[out] out Pointer to buffer to receive hkdf value. + * @param[in] out_size Size of hkdf bytes to generate. + * + * @retval true Hkdf generated successfully. + * @retval false Hkdf generation failed. + **/ +extern bool libspdm_hkdf_sha512_expand(const uint8_t *prk, size_t prk_size, + const uint8_t *info, size_t info_size, + uint8_t *out, size_t out_size); +#endif /* LIBSPDM_SHA512_SUPPORT */ + +#if LIBSPDM_SHA3_256_SUPPORT +/** + * Derive SHA3_256 HMAC-based Extract key Derivation Function (HKDF). + * + * @param[in] key Pointer to the user-supplied key. + * @param[in] key_size Key size in bytes. + * @param[in] salt Pointer to the salt value. + * @param[in] salt_size Salt size in bytes. + * @param[out] prk_out Pointer to buffer to receive hkdf value. + * @param[in] prk_out_size Size of hkdf bytes to generate. + * + * @retval true Hkdf generated successfully. + * @retval false Hkdf generation failed. + **/ +extern bool libspdm_hkdf_sha3_256_extract(const uint8_t *key, size_t key_size, + const uint8_t *salt, size_t salt_size, + uint8_t *prk_out, size_t prk_out_size); + +/** + * Derive SHA3_256 HMAC-based Expand key Derivation Function (HKDF). + * + * @param[in] prk Pointer to the user-supplied key. + * @param[in] prk_size Key size in bytes. + * @param[in] info Pointer to the application specific info. + * @param[in] info_size Info size in bytes. + * @param[out] out Pointer to buffer to receive hkdf value. + * @param[in] out_size Size of hkdf bytes to generate. + * + * @retval true Hkdf generated successfully. + * @retval false Hkdf generation failed. + **/ +extern bool libspdm_hkdf_sha3_256_expand(const uint8_t *prk, size_t prk_size, + const uint8_t *info, size_t info_size, + uint8_t *out, size_t out_size); +#endif /* LIBSPDM_SHA3_256_SUPPORT */ + +#if LIBSPDM_SHA3_384_SUPPORT +/** + * Derive SHA3_384 HMAC-based Extract key Derivation Function (HKDF). + * + * @param[in] key Pointer to the user-supplied key. + * @param[in] key_size Key size in bytes. + * @param[in] salt Pointer to the salt value. + * @param[in] salt_size Salt size in bytes. + * @param[out] prk_out Pointer to buffer to receive hkdf value. + * @param[in] prk_out_size Size of hkdf bytes to generate. + * + * @retval true Hkdf generated successfully. + * @retval false Hkdf generation failed. + **/ +extern bool libspdm_hkdf_sha3_384_extract(const uint8_t *key, size_t key_size, + const uint8_t *salt, size_t salt_size, + uint8_t *prk_out, size_t prk_out_size); + +/** + * Derive SHA3_384 HMAC-based Expand key Derivation Function (HKDF). + * + * @param[in] prk Pointer to the user-supplied key. + * @param[in] prk_size Key size in bytes. + * @param[in] info Pointer to the application specific info. + * @param[in] info_size Info size in bytes. + * @param[out] out Pointer to buffer to receive hkdf value. + * @param[in] out_size Size of hkdf bytes to generate. + * + * @retval true Hkdf generated successfully. + * @retval false Hkdf generation failed. + **/ +extern bool libspdm_hkdf_sha3_384_expand(const uint8_t *prk, size_t prk_size, + const uint8_t *info, size_t info_size, + uint8_t *out, size_t out_size); +#endif /* LIBSPDM_SHA3_384_SUPPORT */ + +#if LIBSPDM_SHA3_512_SUPPORT +/** + * Derive SHA3_512 HMAC-based Extract key Derivation Function (HKDF). + * + * @param[in] key Pointer to the user-supplied key. + * @param[in] key_size Key size in bytes. + * @param[in] salt Pointer to the salt value. + * @param[in] salt_size Salt size in bytes. + * @param[out] prk_out Pointer to buffer to receive hkdf value. + * @param[in] prk_out_size Size of hkdf bytes to generate. + * + * @retval true Hkdf generated successfully. + * @retval false Hkdf generation failed. + **/ +extern bool libspdm_hkdf_sha3_512_extract(const uint8_t *key, size_t key_size, + const uint8_t *salt, size_t salt_size, + uint8_t *prk_out, size_t prk_out_size); + +/** + * Derive SHA3_512 HMAC-based Expand key Derivation Function (HKDF). + * + * @param[in] prk Pointer to the user-supplied key. + * @param[in] prk_size Key size in bytes. + * @param[in] info Pointer to the application specific info. + * @param[in] info_size Info size in bytes. + * @param[out] out Pointer to buffer to receive hkdf value. + * @param[in] out_size Size of hkdf bytes to generate. + * + * @retval true Hkdf generated successfully. + * @retval false Hkdf generation failed. + **/ +extern bool libspdm_hkdf_sha3_512_expand(const uint8_t *prk, size_t prk_size, + const uint8_t *info, size_t info_size, + uint8_t *out, size_t out_size); +#endif /* LIBSPDM_SHA3_512_SUPPORT */ + +#if LIBSPDM_SM3_256_SUPPORT +/** + * Derive SM3_256 HMAC-based Extract key Derivation Function (HKDF). + * + * @param[in] key Pointer to the user-supplied key. + * @param[in] key_size Key size in bytes. + * @param[in] salt Pointer to the salt value. + * @param[in] salt_size Salt size in bytes. + * @param[out] prk_out Pointer to buffer to receive hkdf value. + * @param[in] prk_out_size Size of hkdf bytes to generate. + * + * @retval true Hkdf generated successfully. + * @retval false Hkdf generation failed. + **/ +extern bool libspdm_hkdf_sm3_256_extract(const uint8_t *key, size_t key_size, + const uint8_t *salt, size_t salt_size, + uint8_t *prk_out, size_t prk_out_size); + +/** + * Derive SM3_256 HMAC-based Expand key Derivation Function (HKDF). + * + * @param[in] prk Pointer to the user-supplied key. + * @param[in] prk_size Key size in bytes. + * @param[in] info Pointer to the application specific info. + * @param[in] info_size Info size in bytes. + * @param[out] out Pointer to buffer to receive hkdf value. + * @param[in] out_size Size of hkdf bytes to generate. + * + * @retval true Hkdf generated successfully. + * @retval false Hkdf generation failed. + **/ +extern bool libspdm_hkdf_sm3_256_expand(const uint8_t *prk, size_t prk_size, + const uint8_t *info, size_t info_size, + uint8_t *out, size_t out_size); +#endif /* LIBSPDM_SM3_256_SUPPORT */ + +#endif /* CRYPTLIB_HKDF_H */ diff --git a/kernel-open/nvidia/hal/library/cryptlib/cryptlib_mac.h b/kernel-open/nvidia/hal/library/cryptlib/cryptlib_mac.h new file mode 100644 index 0000000..d9d8bc3 --- /dev/null +++ b/kernel-open/nvidia/hal/library/cryptlib/cryptlib_mac.h @@ -0,0 +1,833 @@ +/** + * Copyright Notice: + * Copyright 2021-2022 DMTF. All rights reserved. + * License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md + **/ + +#ifndef CRYPTLIB_MAC_H +#define CRYPTLIB_MAC_H + +/*===================================================================================== + * Message Authentication Code (MAC) Primitives + *===================================================================================== + */ + +#if LIBSPDM_SHA256_SUPPORT +/** + * Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA256 use. + * + * @return Pointer to the HMAC_CTX context that has been initialized. + * If the allocations fails, libspdm_hmac_sha256_new() returns NULL. + **/ +extern void *libspdm_hmac_sha256_new(void); + +/** + * Release the specified HMAC_CTX context. + * + * @param[in] hmac_sha256_ctx Pointer to the HMAC_CTX context to be released. + **/ +extern void libspdm_hmac_sha256_free(void *hmac_sha256_ctx); + +/** + * Set user-supplied key for subsequent use. It must be done before any + * calling to libspdm_hmac_sha256_update(). + * + * If hmac_sha256_ctx is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[out] hmac_sha256_ctx Pointer to HMAC-SHA256 context. + * @param[in] key Pointer to the user-supplied key. + * @param[in] key_size Key size in bytes. + * + * @retval true The key is set successfully. + * @retval false The key is set unsuccessfully. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha256_set_key(void *hmac_sha256_ctx, const uint8_t *key, size_t key_size); + +/** + * Makes a copy of an existing HMAC-SHA256 context. + * + * If hmac_sha256_ctx is NULL, then return false. + * If new_hmac_sha256_ctx is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in] hmac_sha256_ctx Pointer to HMAC-SHA256 context being copied. + * @param[out] new_hmac_sha256_ctx Pointer to new HMAC-SHA256 context. + * + * @retval true HMAC-SHA256 context copy succeeded. + * @retval false HMAC-SHA256 context copy failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha256_duplicate(const void *hmac_sha256_ctx, void *new_hmac_sha256_ctx); + +/** + * Digests the input data and updates HMAC-SHA256 context. + * + * This function performs HMAC-SHA256 digest on a data buffer of the specified size. + * It can be called multiple times to compute the digest of long or discontinuous data streams. + * HMAC-SHA256 context should be initialized by libspdm_hmac_sha256_new(), and should not be + * finalized by libspdm_hmac_sha256_final(). Behavior with invalid context is undefined. + * + * If hmac_sha256_ctx is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in, out] hmac_sha256_ctx Pointer to the HMAC-SHA256 context. + * @param[in] data Pointer to the buffer containing the data to be digested. + * @param[in] data_size Size of data buffer in bytes. + * + * @retval true HMAC-SHA256 data digest succeeded. + * @retval false HMAC-SHA256 data digest failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha256_update(void *hmac_sha256_ctx, const void *data, size_t data_size); + +/** + * Completes computation of the HMAC-SHA256 digest value. + * + * This function completes HMAC-SHA256 hash computation and retrieves the digest value into + * the specified memory. After this function has been called, the HMAC-SHA256 context cannot + * be used again. HMAC-SHA256 context should be initialized by libspdm_hmac_sha256_new(), and should + * not be finalized by libspdm_hmac_sha256_final(). Behavior with invalid HMAC-SHA256 context is + * undefined. + * + * If hmac_sha256_ctx is NULL, then return false. + * If hmac_value is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in, out] hmac_sha256_ctx Pointer to the HMAC-SHA256 context. + * @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA256 digest + * value (32 bytes). + * + * @retval true HMAC-SHA256 digest computation succeeded. + * @retval false HMAC-SHA256 digest computation failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha256_final(void *hmac_sha256_ctx, uint8_t *hmac_value); + +/** + * Computes the HMAC-SHA256 digest of a input data buffer. + * + * This function performs the HMAC-SHA256 digest of a given data buffer, and places + * the digest value into the specified memory. + * + * If this interface is not supported, then return false. + * + * @param[in] data Pointer to the buffer containing the data to be digested. + * @param[in] data_size Size of data buffer in bytes. + * @param[in] key Pointer to the user-supplied key. + * @param[in] key_size Key size in bytes. + * @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA256 digest + * value (32 bytes). + * + * @retval true HMAC-SHA256 digest computation succeeded. + * @retval false HMAC-SHA256 digest computation failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha256_all(const void *data, size_t data_size, + const uint8_t *key, size_t key_size, + uint8_t *hmac_value); +#endif /* LIBSPDM_SHA256_SUPPORT */ + +#if LIBSPDM_SHA384_SUPPORT +/** + * Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA384 use. + * + * @return Pointer to the HMAC_CTX context that has been initialized. + * If the allocations fails, libspdm_hmac_sha384_new() returns NULL. + **/ +extern void *libspdm_hmac_sha384_new(void); + +/** + * Release the specified HMAC_CTX context. + * + * @param[in] hmac_sha384_ctx Pointer to the HMAC_CTX context to be released. + **/ +extern void libspdm_hmac_sha384_free(void *hmac_sha384_ctx); + +/** + * Set user-supplied key for subsequent use. It must be done before any + * calling to libspdm_hmac_sha384_update(). + * + * If hmac_sha384_ctx is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[out] hmac_sha384_ctx Pointer to HMAC-SHA384 context. + * @param[in] key Pointer to the user-supplied key. + * @param[in] key_size key size in bytes. + * + * @retval true The key is set successfully. + * @retval false The key is set unsuccessfully. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha384_set_key(void *hmac_sha384_ctx, const uint8_t *key, size_t key_size); + +/** + * Makes a copy of an existing HMAC-SHA384 context. + * + * If hmac_sha384_ctx is NULL, then return false. + * If new_hmac_sha384_ctx is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in] hmac_sha384_ctx Pointer to HMAC-SHA384 context being copied. + * @param[out] new_hmac_sha384_ctx Pointer to new HMAC-SHA384 context. + * + * @retval true HMAC-SHA384 context copy succeeded. + * @retval false HMAC-SHA384 context copy failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha384_duplicate(const void *hmac_sha384_ctx, void *new_hmac_sha384_ctx); + +/** + * Digests the input data and updates HMAC-SHA384 context. + * + * This function performs HMAC-SHA384 digest on a data buffer of the specified size. + * It can be called multiple times to compute the digest of long or discontinuous data streams. + * HMAC-SHA384 context should be initialized by libspdm_hmac_sha384_new(), and should not be + * finalized by libspdm_hmac_sha384_final(). Behavior with invalid context is undefined. + * + * If hmac_sha384_ctx is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in, out] hmac_sha384_ctx Pointer to the HMAC-SHA384 context. + * @param[in] data Pointer to the buffer containing the data to be digested. + * @param[in] data_size Size of data buffer in bytes. + * + * @retval true HMAC-SHA384 data digest succeeded. + * @retval false HMAC-SHA384 data digest failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha384_update(void *hmac_sha384_ctx, const void *data, size_t data_size); + +/** + * Completes computation of the HMAC-SHA384 digest value. + * + * This function completes HMAC-SHA384 hash computation and retrieves the digest value into + * the specified memory. After this function has been called, the HMAC-SHA384 context cannot + * be used again. HMAC-SHA384 context should be initialized by libspdm_hmac_sha384_new(), and should + * not be finalized by libspdm_hmac_sha384_final(). Behavior with invalid HMAC-SHA384 context is + * undefined. + * + * If hmac_sha384_ctx is NULL, then return false. + * If hmac_value is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in, out] hmac_sha384_ctx Pointer to the HMAC-SHA384 context. + * @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA384 digest + * value (48 bytes). + * + * @retval true HMAC-SHA384 digest computation succeeded. + * @retval false HMAC-SHA384 digest computation failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha384_final(void *hmac_sha384_ctx, uint8_t *hmac_value); + +/** + * Computes the HMAC-SHA384 digest of a input data buffer. + * + * This function performs the HMAC-SHA384 digest of a given data buffer, and places + * the digest value into the specified memory. + * + * If this interface is not supported, then return false. + * + * @param[in] data Pointer to the buffer containing the data to be digested. + * @param[in] data_size Size of data buffer in bytes. + * @param[in] key Pointer to the user-supplied key. + * @param[in] key_size Key size in bytes. + * @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA384 digest + * value (48 bytes). + * + * @retval true HMAC-SHA384 digest computation succeeded. + * @retval false HMAC-SHA384 digest computation failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha384_all(const void *data, size_t data_size, + const uint8_t *key, size_t key_size, + uint8_t *hmac_value); +#endif /* LIBSPDM_SHA384_SUPPORT */ + +#if LIBSPDM_SHA512_SUPPORT +/** + * Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA512 use. + * + * @return Pointer to the HMAC_CTX context that has been initialized. + * If the allocations fails, libspdm_hmac_sha512_new() returns NULL. + **/ +extern void *libspdm_hmac_sha512_new(void); + +/** + * Release the specified HMAC_CTX context. + * + * @param[in] hmac_sha512_ctx Pointer to the HMAC_CTX context to be released. + **/ +extern void libspdm_hmac_sha512_free(void *hmac_sha512_ctx); + +/** + * Set user-supplied key for subsequent use. It must be done before any + * calling to libspdm_hmac_sha512_update(). + * + * If hmac_sha512_ctx is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[out] hmac_sha512_ctx Pointer to HMAC-SHA512 context. + * @param[in] key Pointer to the user-supplied key. + * @param[in] key_size Key size in bytes. + * + * @retval true The key is set successfully. + * @retval false The key is set unsuccessfully. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha512_set_key(void *hmac_sha512_ctx, const uint8_t *key, size_t key_size); + +/** + * Makes a copy of an existing HMAC-SHA512 context. + * + * If hmac_sha512_ctx is NULL, then return false. + * If new_hmac_sha512_ctx is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in] hmac_sha512_ctx Pointer to HMAC-SHA512 context being copied. + * @param[out] new_hmac_sha512_ctx Pointer to new HMAC-SHA512 context. + * + * @retval true HMAC-SHA512 context copy succeeded. + * @retval false HMAC-SHA512 context copy failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha512_duplicate(const void *hmac_sha512_ctx, void *new_hmac_sha512_ctx); + +/** + * Digests the input data and updates HMAC-SHA512 context. + * + * This function performs HMAC-SHA512 digest on a data buffer of the specified size. + * It can be called multiple times to compute the digest of long or discontinuous data streams. + * HMAC-SHA512 context should be initialized by libspdm_hmac_sha512_new(), and should not be + * finalized by libspdm_hmac_sha512_final(). Behavior with invalid context is undefined. + * + * If hmac_sha512_ctx is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in, out] hmac_sha512_ctx Pointer to the HMAC-SHA512 context. + * @param[in] data Pointer to the buffer containing the data to be digested. + * @param[in] data_size Size of data buffer in bytes. + * + * @retval true HMAC-SHA512 data digest succeeded. + * @retval false HMAC-SHA512 data digest failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha512_update(void *hmac_sha512_ctx, const void *data, size_t data_size); + +/** + * Completes computation of the HMAC-SHA512 digest value. + * + * This function completes HMAC-SHA512 hash computation and retrieves the digest value into + * the specified memory. After this function has been called, the HMAC-SHA512 context cannot + * be used again. HMAC-SHA512 context should be initialized by libspdm_hmac_sha512_new(), and should + * not be finalized by libspdm_hmac_sha512_final(). Behavior with invalid HMAC-SHA512 context is + * undefined. + * + * If hmac_sha512_ctx is NULL, then return false. + * If hmac_value is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in, out] hmac_sha512_ctx Pointer to the HMAC-SHA512 context. + * @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA512 digest + * value (64 bytes). + * + * @retval true HMAC-SHA512 digest computation succeeded. + * @retval false HMAC-SHA512 digest computation failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha512_final(void *hmac_sha512_ctx, uint8_t *hmac_value); + +/** + * Computes the HMAC-SHA512 digest of a input data buffer. + * + * This function performs the HMAC-SHA512 digest of a given data buffer, and places + * the digest value into the specified memory. + * + * If this interface is not supported, then return false. + * + * @param[in] data Pointer to the buffer containing the data to be digested. + * @param[in] data_size Size of data buffer in bytes. + * @param[in] key Pointer to the user-supplied key. + * @param[in] key_size Key size in bytes. + * @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA512 digest + * value (64 bytes). + * + * @retval true HMAC-SHA512 digest computation succeeded. + * @retval false HMAC-SHA512 digest computation failed. + * @retval false This interface is not supported. + * + **/ +extern bool libspdm_hmac_sha512_all(const void *data, size_t data_size, + const uint8_t *key, size_t key_size, + uint8_t *hmac_value); +#endif /* LIBSPDM_SHA512_SUPPORT */ + +#if LIBSPDM_SHA3_256_SUPPORT +/** + * Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA3-256 use. + * + * @return Pointer to the HMAC_CTX context that has been initialized. + * If the allocations fails, libspdm_hmac_sha3_256_new() returns NULL. + **/ +extern void *libspdm_hmac_sha3_256_new(void); + +/** + * Release the specified HMAC_CTX context. + * + * @param[in] hmac_sha3_256_ctx Pointer to the HMAC_CTX context to be released. + **/ +extern void libspdm_hmac_sha3_256_free(void *hmac_sha3_256_ctx); + +/** + * Set user-supplied key for subsequent use. It must be done before any + * calling to libspdm_hmac_sha3_256_update(). + * + * If hmac_sha3_256_ctx is NULL, then return false. + * + * @param[out] hmac_sha3_256_ctx Pointer to HMAC-SHA3-256 context. + * @param[in] key Pointer to the user-supplied key. + * @param[in] key_size Key size in bytes. + * + * @retval true The key is set successfully. + * @retval false The key is set unsuccessfully. + **/ +extern bool libspdm_hmac_sha3_256_set_key(void *hmac_sha3_256_ctx, + const uint8_t *key, + size_t key_size); + +/** + * Makes a copy of an existing HMAC-SHA3-256 context. + * + * If hmac_sha3_256_ctx is NULL, then return false. + * If new_hmac_sha3_256_ctx is NULL, then return false. + * + * @param[in] hmac_sha3_256_ctx Pointer to HMAC-SHA3-256 context being copied. + * @param[out] new_hmac_sha3_256_ctx Pointer to new HMAC-SHA3-256 context. + * + * @retval true HMAC-SHA3-256 context copy succeeded. + * @retval false HMAC-SHA3-256 context copy failed. + **/ +extern bool libspdm_hmac_sha3_256_duplicate(const void *hmac_sha3_256_ctx, + void *new_hmac_sha3_256_ctx); + +/** + * Digests the input data and updates HMAC-SHA3-256 context. + * + * This function performs HMAC-SHA3-256 digest on a data buffer of the specified size. + * It can be called multiple times to compute the digest of long or discontinuous data streams. + * HMAC-SHA3-256 context should be initialized by libspdm_hmac_sha3_256_new(), and should not be + * finalized by libspdm_hmac_sha3_256_final(). Behavior with invalid context is undefined. + * + * If hmac_sha3_256_ctx is NULL, then return false. + * + * @param[in, out] hmac_sha3_256_ctx Pointer to the HMAC-SHA3-256 context. + * @param[in] data Pointer to the buffer containing the data to be digested. + * @param[in] data_size Size of data buffer in bytes. + * + * @retval true HMAC-SHA3-256 data digest succeeded. + * @retval false HMAC-SHA3-256 data digest failed. + **/ +extern bool libspdm_hmac_sha3_256_update(void *hmac_sha3_256_ctx, + const void *data, size_t data_size); + +/** + * Completes computation of the HMAC-SHA3-256 digest value. + * + * This function completes HMAC-SHA3-256 hash computation and retrieves the digest value into + * the specified memory. After this function has been called, the HMAC-SHA3-256 context cannot + * be used again. HMAC-SHA3-256 context should be initialized by libspdm_hmac_sha3_256_new(), and + * should not be finalized by libspdm_hmac_sha3_256_final(). Behavior with invalid HMAC-SHA3-256 + * context is undefined. + * + * If hmac_sha3_256_ctx is NULL, then return false. + * If hmac_value is NULL, then return false. + * + * @param[in, out] hmac_sha3_256_ctx Pointer to the HMAC-SHA3-256 context. + * @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-256 digest + * value (32 bytes). + * + * @retval true HMAC-SHA3-256 digest computation succeeded. + * @retval false HMAC-SHA3-256 digest computation failed. + **/ +extern bool libspdm_hmac_sha3_256_final(void *hmac_sha3_256_ctx, uint8_t *hmac_value); + +/** + * Computes the HMAC-SHA3-256 digest of a input data buffer. + * + * This function performs the HMAC-SHA3-256 digest of a given data buffer, and places + * the digest value into the specified memory. + * + * If this interface is not supported, then return false. + * + * @param[in] data Pointer to the buffer containing the data to be digested. + * @param[in] data_size Size of data buffer in bytes. + * @param[in] key Pointer to the user-supplied key. + * @param[in] key_size Key size in bytes. + * @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-256 digest + * value (32 bytes). + * + * @retval true HMAC-SHA3-256 digest computation succeeded. + * @retval false HMAC-SHA3-256 digest computation failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha3_256_all(const void *data, size_t data_size, + const uint8_t *key, size_t key_size, + uint8_t *hmac_value); +#endif /* LIBSPDM_SHA3_256_SUPPORT */ + +#if LIBSPDM_SHA3_384_SUPPORT +/** + * Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA3-384 use. + * + * @return Pointer to the HMAC_CTX context that has been initialized. + * If the allocations fails, libspdm_hmac_sha3_384_new() returns NULL. + **/ +extern void *libspdm_hmac_sha3_384_new(void); + +/** + * Release the specified HMAC_CTX context. + * + * @param[in] hmac_sha3_384_ctx Pointer to the HMAC_CTX context to be released. + **/ +extern void libspdm_hmac_sha3_384_free(void *hmac_sha3_384_ctx); + +/** + * Set user-supplied key for subsequent use. It must be done before any + * calling to libspdm_hmac_sha3_384_update(). + * + * If hmac_sha3_384_ctx is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[out] hmac_sha3_384_ctx Pointer to HMAC-SHA3-384 context. + * @param[in] key Pointer to the user-supplied key. + * @param[in] key_size Key size in bytes. + * + * @retval true The key is set successfully. + * @retval false The key is set unsuccessfully. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha3_384_set_key(void *hmac_sha3_384_ctx, + const uint8_t *key, + size_t key_size); + +/** + * Makes a copy of an existing HMAC-SHA3-384 context. + * + * If hmac_sha3_384_ctx is NULL, then return false. + * If new_hmac_sha3_384_ctx is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in] hmac_sha3_384_ctx Pointer to HMAC-SHA3-384 context being copied. + * @param[out] new_hmac_sha3_384_ctx Pointer to new HMAC-SHA3-384 context. + * + * @retval true HMAC-SHA3-384 context copy succeeded. + * @retval false HMAC-SHA3-384 context copy failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha3_384_duplicate(const void *hmac_sha3_384_ctx, + void *new_hmac_sha3_384_ctx); + +/** + * Digests the input data and updates HMAC-SHA3-384 context. + * + * This function performs HMAC-SHA3-384 digest on a data buffer of the specified size. + * It can be called multiple times to compute the digest of long or discontinuous data streams. + * HMAC-SHA3-384 context should be initialized by libspdm_hmac_sha3_384_new(), and should not be + * finalized by libspdm_hmac_sha3_384_final(). Behavior with invalid context is undefined. + * + * If hmac_sha3_384_ctx is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in, out] hmac_sha3_384_ctx Pointer to the HMAC-SHA3-384 context. + * @param[in] data Pointer to the buffer containing the data to be digested. + * @param[in] data_size Size of data buffer in bytes. + * + * @retval true HMAC-SHA3-384 data digest succeeded. + * @retval false HMAC-SHA3-384 data digest failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha3_384_update(void *hmac_sha3_384_ctx, const void *data, + size_t data_size); + +/** + * Completes computation of the HMAC-SHA3-384 digest value. + * + * This function completes HMAC-SHA3-384 hash computation and retrieves the digest value into + * the specified memory. After this function has been called, the HMAC-SHA3-384 context cannot + * be used again. HMAC-SHA3-384 context should be initialized by libspdm_hmac_sha3_384_new(), and + * should not be finalized by libspdm_hmac_sha3_384_final(). Behavior with invalid HMAC-SHA3-384 + * context is undefined. + * + * If hmac_sha3_384_ctx is NULL, then return false. + * If hmac_value is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in, out] hmac_sha3_384_ctx Pointer to the HMAC-SHA3-384 context. + * @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-384 digest + * value (48 bytes). + * + * @retval true HMAC-SHA3-384 digest computation succeeded. + * @retval false HMAC-SHA3-384 digest computation failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha3_384_final(void *hmac_sha3_384_ctx, uint8_t *hmac_value); + +/** + * Computes the HMAC-SHA3-384 digest of a input data buffer. + * + * This function performs the HMAC-SHA3-384 digest of a given data buffer, and places + * the digest value into the specified memory. + * + * If this interface is not supported, then return false. + * + * @param[in] data Pointer to the buffer containing the data to be digested. + * @param[in] data_size Size of data buffer in bytes. + * @param[in] key Pointer to the user-supplied key. + * @param[in] key_size Key size in bytes. + * @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-384 digest + * value (48 bytes). + * + * @retval true HMAC-SHA3-384 digest computation succeeded. + * @retval false HMAC-SHA3-384 digest computation failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha3_384_all(const void *data, size_t data_size, + const uint8_t *key, size_t key_size, + uint8_t *hmac_value); +#endif /* LIBSPDM_SHA3_384_SUPPORT */ + +#if LIBSPDM_SHA3_512_SUPPORT +/** + * Allocates and initializes one HMAC_CTX context for subsequent HMAC-SHA3-512 use. + * + * @return Pointer to the HMAC_CTX context that has been initialized. + * If the allocations fails, libspdm_hmac_sha3_512_new() returns NULL. + **/ +extern void *libspdm_hmac_sha3_512_new(void); + +/** + * Release the specified HMAC_CTX context. + * + * @param[in] hmac_sha3_512_ctx Pointer to the HMAC_CTX context to be released. + **/ +extern void libspdm_hmac_sha3_512_free(void *hmac_sha3_512_ctx); + +/** + * Set user-supplied key for subsequent use. It must be done before any + * calling to libspdm_hmac_sha3_512_update(). + * + * If hmac_sha3_512_ctx is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[out] hmac_sha3_512_ctx Pointer to HMAC-SHA3-512 context. + * @param[in] key Pointer to the user-supplied key. + * @param[in] key_size Key size in bytes. + * + * @retval true The key is set successfully. + * @retval false The key is set unsuccessfully. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha3_512_set_key(void *hmac_sha3_512_ctx, + const uint8_t *key, + size_t key_size); + +/** + * Makes a copy of an existing HMAC-SHA3-512 context. + * + * If hmac_sha3_512_ctx is NULL, then return false. + * If new_hmac_sha3_512_ctx is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in] hmac_sha3_512_ctx Pointer to HMAC-SHA3-512 context being copied. + * @param[out] new_hmac_sha3_512_ctx Pointer to new HMAC-SHA3-512 context. + * + * @retval true HMAC-SHA3-512 context copy succeeded. + * @retval false HMAC-SHA3-512 context copy failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha3_512_duplicate(const void *hmac_sha3_512_ctx, + void *new_hmac_sha3_512_ctx); + +/** + * Digests the input data and updates HMAC-SHA3-512 context. + * + * This function performs HMAC-SHA3-512 digest on a data buffer of the specified size. + * It can be called multiple times to compute the digest of long or discontinuous data streams. + * HMAC-SHA3-512 context should be initialized by libspdm_hmac_sha3_512_new(), and should not be + * finalized by libspdm_hmac_sha3_512_final(). Behavior with invalid context is undefined. + * + * If hmac_sha3_512_ctx is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in, out] hmac_sha3_512_ctx Pointer to the HMAC-SHA3-512 context. + * @param[in] data Pointer to the buffer containing the data to be digested. + * @param[in] data_size Size of data buffer in bytes. + * + * @retval true HMAC-SHA3-512 data digest succeeded. + * @retval false HMAC-SHA3-512 data digest failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha3_512_update(void *hmac_sha3_512_ctx, + const void *data, size_t data_size); + +/** + * Completes computation of the HMAC-SHA3-512 digest value. + * + * This function completes HMAC-SHA3-512 hash computation and retrieves the digest value into + * the specified memory. After this function has been called, the HMAC-SHA3-512 context cannot + * be used again. HMAC-SHA3-512 context should be initialized by libspdm_hmac_sha3_512_new(), and + * should not be finalized by libspdm_hmac_sha3_512_final(). Behavior with invalid HMAC-SHA3-512 + * context is undefined. + * + * If hmac_sha3_512_ctx is NULL, then return false. + * If hmac_value is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in, out] hmac_sha3_512_ctx Pointer to the HMAC-SHA3-512 context. + * @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-512 digest + * value (64 bytes). + * + * @retval true HMAC-SHA3-512 digest computation succeeded. + * @retval false HMAC-SHA3-512 digest computation failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha3_512_final(void *hmac_sha3_512_ctx, uint8_t *hmac_value); + +/** + * Computes the HMAC-SHA3-512 digest of a input data buffer. + * + * This function performs the HMAC-SHA3-512 digest of a given data buffer, and places + * the digest value into the specified memory. + * + * If this interface is not supported, then return false. + * + * @param[in] data Pointer to the buffer containing the data to be digested. + * @param[in] data_size Size of data buffer in bytes. + * @param[in] key Pointer to the user-supplied key. + * @param[in] key_size Key size in bytes. + * @param[out] hmac_value Pointer to a buffer that receives the HMAC-SHA3-512 digest + * value (64 bytes). + * + * @retval true HMAC-SHA3-512 digest computation succeeded. + * @retval false HMAC-SHA3-512 digest computation failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sha3_512_all(const void *data, size_t data_size, + const uint8_t *key, size_t key_size, + uint8_t *hmac_value); +#endif /* LIBSPDM_SHA3_512_SUPPORT */ + +#if LIBSPDM_SM3_256_SUPPORT +/** + * Allocates and initializes one HMAC_CTX context for subsequent HMAC-SM3-256 use. + * + * @return Pointer to the HMAC_CTX context that has been initialized. + * If the allocations fails, libspdm_hmac_sm3_256_new() returns NULL. + **/ +extern void *libspdm_hmac_sm3_256_new(void); + +/** + * Release the specified HMAC_CTX context. + * + * @param[in] hmac_sm3_256_ctx Pointer to the HMAC_CTX context to be released. + **/ +extern void libspdm_hmac_sm3_256_free(void *hmac_sm3_256_ctx); + +/** + * Set user-supplied key for subsequent use. It must be done before any + * calling to libspdm_hmac_sm3_256_update(). + * + * If hmac_sm3_256_ctx is NULL, then return false. + * + * @param[out] hmac_sm3_256_ctx Pointer to HMAC-SM3-256 context. + * @param[in] key Pointer to the user-supplied key. + * @param[in] key_size Key size in bytes. + * + * @retval true The key is set successfully. + * @retval false The key is set unsuccessfully. + **/ +extern bool libspdm_hmac_sm3_256_set_key(void *hmac_sm3_256_ctx, + const uint8_t *key, size_t key_size); + +/** + * Makes a copy of an existing HMAC-SM3-256 context. + * + * If hmac_sm3_256_ctx is NULL, then return false. + * If new_hmac_sm3_256_ctx is NULL, then return false. + * + * @param[in] hmac_sm3_256_ctx Pointer to HMAC-SM3-256 context being copied. + * @param[out] new_hmac_sm3_256_ctx Pointer to new HMAC-SM3-256 context. + * + * @retval true HMAC-SM3-256 context copy succeeded. + * @retval false HMAC-SM3-256 context copy failed. + **/ +extern bool libspdm_hmac_sm3_256_duplicate(const void *hmac_sm3_256_ctx, + void *new_hmac_sm3_256_ctx); + +/** + * Digests the input data and updates HMAC-SM3-256 context. + * + * This function performs HMAC-SM3-256 digest on a data buffer of the specified size. + * It can be called multiple times to compute the digest of long or discontinuous data streams. + * HMAC-SM3-256 context should be initialized by libspdm_hmac_sm3_256_new(), and should not be + * finalized by libspdm_hmac_sm3_256_final(). Behavior with invalid context is undefined. + * + * If hmac_sm3_256_ctx is NULL, then return false. + * + * @param[in, out] hmac_sm3_256_ctx Pointer to the HMAC-SM3-256 context. + * @param[in] data Pointer to the buffer containing the data to be digested. + * @param[in] data_size Size of data buffer in bytes. + * + * @retval true HMAC-SM3-256 data digest succeeded. + * @retval false HMAC-SM3-256 data digest failed. + **/ +extern bool libspdm_hmac_sm3_256_update(void *hmac_sm3_256_ctx, const void *data, size_t data_size); + +/** + * Completes computation of the HMAC-SM3-256 digest value. + * + * This function completes HMAC-SM3-256 hash computation and retrieves the digest value into + * the specified memory. After this function has been called, the HMAC-SM3-256 context cannot + * be used again. HMAC-SM3-256 context should be initialized by libspdm_hmac_sm3_256_new(), and + * should not be finalized by libspdm_hmac_sm3_256_final(). Behavior with invalid HMAC-SM3-256 + * context is undefined. + * + * If hmac_sm3_256_ctx is NULL, then return false. + * If hmac_value is NULL, then return false. + * + * @param[in, out] hmac_sm3_256_ctx Pointer to the HMAC-SM3-256 context. + * @param[out] hmac_value Pointer to a buffer that receives the HMAC-SM3-256 digest + * value (32 bytes). + * + * @retval true HMAC-SM3-256 digest computation succeeded. + * @retval false HMAC-SM3-256 digest computation failed. + **/ +extern bool libspdm_hmac_sm3_256_final(void *hmac_sm3_256_ctx, uint8_t *hmac_value); + +/** + * Computes the HMAC-SM3-256 digest of a input data buffer. + * + * This function performs the HMAC-SM3-256 digest of a given data buffer, and places + * the digest value into the specified memory. + * + * If this interface is not supported, then return false. + * + * @param[in] data Pointer to the buffer containing the data to be digested. + * @param[in] data_size Size of data buffer in bytes. + * @param[in] key Pointer to the user-supplied key. + * @param[in] key_size Key size in bytes. + * @param[out] hmac_value Pointer to a buffer that receives the HMAC-SM3-256 digest + * value (32 bytes). + * + * @retval true HMAC-SM3-256 digest computation succeeded. + * @retval false HMAC-SM3-256 digest computation failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_hmac_sm3_256_all(const void *data, size_t data_size, + const uint8_t *key, size_t key_size, + uint8_t *hmac_value); +#endif /* LIBSPDM_SM3_256_SUPPORT */ + +#endif /* CRYPTLIB_MAC_H */ diff --git a/kernel-open/nvidia/hal/library/cryptlib/cryptlib_rng.h b/kernel-open/nvidia/hal/library/cryptlib/cryptlib_rng.h new file mode 100644 index 0000000..7da2bbb --- /dev/null +++ b/kernel-open/nvidia/hal/library/cryptlib/cryptlib_rng.h @@ -0,0 +1,30 @@ +/** + * Copyright Notice: + * Copyright 2021-2022 DMTF. All rights reserved. + * License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md + **/ + +#ifndef CRYPTLIB_RNG_H +#define CRYPTLIB_RNG_H + +/*===================================================================================== + * Random Number Generation Primitive + *=====================================================================================*/ + +/** + * Generates a random byte stream of the specified size. If initialization, testing, or seeding of + * the (pseudo)random number generator is required it should be done before this function is called. + * + * If output is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[out] output Pointer to buffer to receive random value. + * @param[in] size Size of random bytes to generate. + * + * @retval true Random byte stream generated successfully. + * @retval false Generation of random byte stream failed. + * @retval false This interface is not supported. + **/ +extern bool libspdm_random_bytes(uint8_t *output, size_t size); + +#endif /* CRYPTLIB_RNG_H */ diff --git a/kernel-open/nvidia/hal/library/cryptlib/cryptlib_rsa.h b/kernel-open/nvidia/hal/library/cryptlib/cryptlib_rsa.h new file mode 100644 index 0000000..b7e68a3 --- /dev/null +++ b/kernel-open/nvidia/hal/library/cryptlib/cryptlib_rsa.h @@ -0,0 +1,274 @@ +/** + * Copyright Notice: + * Copyright 2021-2022 DMTF. All rights reserved. + * License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md + **/ + +#ifndef CRYPTLIB_RSA_H +#define CRYPTLIB_RSA_H + +/*===================================================================================== + * RSA Cryptography Primitives + *===================================================================================== + */ + +#if (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT) +/* RSA key Tags Definition used in libspdm_rsa_set_key() function for key component + * identification. + */ +typedef enum { + LIBSPDM_RSA_KEY_N, /*< RSA public Modulus (N)*/ + LIBSPDM_RSA_KEY_E, /*< RSA public exponent (e)*/ + LIBSPDM_RSA_KEY_D, /*< RSA Private exponent (d)*/ + LIBSPDM_RSA_KEY_P, /*< RSA secret prime factor of Modulus (p)*/ + LIBSPDM_RSA_KEY_Q, /*< RSA secret prime factor of Modules (q)*/ + LIBSPDM_RSA_KEY_DP, /*< p's CRT exponent (== d mod (p - 1))*/ + LIBSPDM_RSA_KEY_DQ, /*< q's CRT exponent (== d mod (q - 1))*/ + LIBSPDM_RSA_KEY_Q_INV /*< The CRT coefficient (== 1/q mod p)*/ +} libspdm_rsa_key_tag_t; + +/** + * Allocates and initializes one RSA context for subsequent use. + * + * @return Pointer to the RSA context that has been initialized. + * If the allocations fails, libspdm_rsa_new() returns NULL. + **/ +extern void *libspdm_rsa_new(void); + +/** + * Generates RSA context from DER-encoded public key data. + * + * The public key is ASN.1 DER-encoded as RFC7250 describes, + * namely, the SubjectPublicKeyInfo structure of a X.509 certificate. + * + * @param[in] der_data Pointer to the DER-encoded public key data. + * @param[in] der_size Size of the DER-encoded public key data in bytes. + * @param[out] rsa_context Pointer to newly generated RSA context which contains the + * RSA public key component. + * Use libspdm_rsa_free() function to free the resource. + * + * If der_data is NULL, then return false. + * If rsa_context is NULL, then return false. + * + * @retval true RSA context was generated successfully. + * @retval false Invalid DER public key data. + **/ +extern bool libspdm_rsa_get_public_key_from_der(const uint8_t *der_data, + size_t der_size, + void **rsa_context); + +/** + * Release the specified RSA context. + * + * If rsa_context is NULL, then return false. + * + * @param[in] rsa_context Pointer to the RSA context to be released. + **/ +extern void libspdm_rsa_free(void *rsa_context); + +/** + * Sets the tag-designated key component into the established RSA context. + * + * This function sets the tag-designated RSA key component into the established + * RSA context from the user-specified non-negative integer (octet string format + * represented in RSA PKCS#1). + * If big_number is NULL, then the specified key component in RSA context is cleared. + * If rsa_context is NULL, then return false. + * + * @param[in, out] rsa_context Pointer to RSA context being set. + * @param[in] key_tag tag of RSA key component being set. + * @param[in] big_number Pointer to octet integer buffer. + * If NULL, then the specified key component in RSA + * context is cleared. + * @param[in] bn_size Size of big number buffer in bytes. + * If big_number is NULL, then it is ignored. + * + * @retval true RSA key component was set successfully. + * @retval false Invalid RSA key component tag. + **/ +extern bool libspdm_rsa_set_key(void *rsa_context, const libspdm_rsa_key_tag_t key_tag, + const uint8_t *big_number, size_t bn_size); + +#endif /* (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT) */ + +#if LIBSPDM_RSA_SSA_SUPPORT +/** + * Carries out the RSA-SSA signature generation with EMSA-PKCS1-v1_5 encoding scheme. + * + * This function carries out the RSA-SSA signature generation with EMSA-PKCS1-v1_5 encoding scheme + * defined in RSA PKCS#1. If the signature buffer is too small to hold the contents of signature, + * false is returned and sig_size is set to the required buffer size to obtain the signature. + * + * If rsa_context is NULL, then return false. + * If message_hash is NULL, then return false. + * If hash_size need match the hash_nid. hash_nid could be SHA256, SHA384, SHA512, SHA3_256, + * SHA3_384, SHA3_512. + * If sig_size is large enough but signature is NULL, then return false. + * If this interface is not supported, then return false. + * + * @param[in] rsa_context Pointer to RSA context for signature generation. + * @param[in] hash_nid hash NID + * @param[in] message_hash Pointer to octet message hash to be signed. + * @param[in] hash_size Size of the message hash in bytes. + * @param[out] signature Pointer to buffer to receive RSA PKCS1-v1_5 signature. + * @param[in, out] sig_size On input, the size of signature buffer in bytes. + * On output, the size of data returned in signature buffer in bytes. + * + * @retval true signature successfully generated in PKCS1-v1_5. + * @retval false signature generation failed. + * @retval false sig_size is too small. + * @retval false This interface is not supported. + **/ +extern bool libspdm_rsa_pkcs1_sign_with_nid(void *rsa_context, size_t hash_nid, + const uint8_t *message_hash, + size_t hash_size, uint8_t *signature, + size_t *sig_size); + +/** + * Verifies the RSA-SSA signature with EMSA-PKCS1-v1_5 encoding scheme defined in RSA PKCS#1. + * + * If rsa_context is NULL, then return false. + * If message_hash is NULL, then return false. + * If signature is NULL, then return false. + * If hash_size need match the hash_nid. hash_nid could be SHA256, SHA384, SHA512, SHA3_256, + * SHA3_384, SHA3_512. + * + * @param[in] rsa_context Pointer to RSA context for signature verification. + * @param[in] hash_nid hash NID + * @param[in] message_hash Pointer to octet message hash to be checked. + * @param[in] hash_size Size of the message hash in bytes. + * @param[in] signature Pointer to RSA PKCS1-v1_5 signature to be verified. + * @param[in] sig_size Size of signature in bytes. + * + * @retval true Valid signature encoded in PKCS1-v1_5. + * @retval false Invalid signature or invalid RSA context. + **/ +extern bool libspdm_rsa_pkcs1_verify_with_nid(void *rsa_context, size_t hash_nid, + const uint8_t *message_hash, + size_t hash_size, const uint8_t *signature, + size_t sig_size); +#endif /* LIBSPDM_RSA_SSA_SUPPORT */ + +#if LIBSPDM_RSA_PSS_SUPPORT +/** + * Carries out the RSA-SSA signature generation with EMSA-PSS encoding scheme. + * + * This function carries out the RSA-SSA signature generation with EMSA-PSS encoding scheme defined + * in RSA PKCS#1 v2.2. + * + * The salt length is same as digest length. + * + * If the signature buffer is too small to hold the contents of signature, false + * is returned and sig_size is set to the required buffer size to obtain the signature. + * + * If rsa_context is NULL, then return false. + * If message_hash is NULL, then return false. + * If hash_size need match the hash_nid. nid could be SHA256, SHA384, SHA512, SHA3_256, SHA3_384, + * SHA3_512. + * If sig_size is large enough but signature is NULL, then return false. + * + * @param[in] rsa_context Pointer to RSA context for signature generation. + * @param[in] hash_nid hash NID + * @param[in] message_hash Pointer to octet message hash to be signed. + * @param[in] hash_size Size of the message hash in bytes. + * @param[out] signature Pointer to buffer to receive RSA-SSA PSS signature. + * @param[in, out] sig_size On input, the size of signature buffer in bytes. + * On output, the size of data returned in signature buffer in bytes. + * + * @retval true signature successfully generated in RSA-SSA PSS. + * @retval false signature generation failed. + * @retval false sig_size is too small. + **/ +extern bool libspdm_rsa_pss_sign(void *rsa_context, size_t hash_nid, + const uint8_t *message_hash, size_t hash_size, + uint8_t *signature, size_t *sig_size); + +/** + * Verifies the RSA-SSA signature with EMSA-PSS encoding scheme defined in + * RSA PKCS#1 v2.2. + * + * The salt length is same as digest length. + * + * If rsa_context is NULL, then return false. + * If message_hash is NULL, then return false. + * If signature is NULL, then return false. + * If hash_size need match the hash_nid. nid could be SHA256, SHA384, SHA512, SHA3_256, SHA3_384, + * SHA3_512. + * + * @param[in] rsa_context Pointer to RSA context for signature verification. + * @param[in] hash_nid hash NID + * @param[in] message_hash Pointer to octet message hash to be checked. + * @param[in] hash_size Size of the message hash in bytes. + * @param[in] signature Pointer to RSA-SSA PSS signature to be verified. + * @param[in] sig_size Size of signature in bytes. + * + * @retval true Valid signature encoded in RSA-SSA PSS. + * @retval false Invalid signature or invalid RSA context. + **/ +extern bool libspdm_rsa_pss_verify(void *rsa_context, size_t hash_nid, + const uint8_t *message_hash, size_t hash_size, + const uint8_t *signature, size_t sig_size); + +#if LIBSPDM_FIPS_MODE +/** + * Carries out the RSA-SSA signature generation with EMSA-PSS encoding scheme for FIPS test. + * + * This function carries out the RSA-SSA signature generation with EMSA-PSS encoding scheme defined in + * RSA PKCS#1 v2.2 for FIPS test. + * + * The salt length is zero. + * + * If the signature buffer is too small to hold the contents of signature, false + * is returned and sig_size is set to the required buffer size to obtain the signature. + * + * If rsa_context is NULL, then return false. + * If message_hash is NULL, then return false. + * If hash_size need match the hash_nid. nid could be SHA256, SHA384, SHA512, SHA3_256, SHA3_384, SHA3_512. + * If sig_size is large enough but signature is NULL, then return false. + * + * @param[in] rsa_context Pointer to RSA context for signature generation. + * @param[in] hash_nid hash NID + * @param[in] message_hash Pointer to octet message hash to be signed. + * @param[in] hash_size size of the message hash in bytes. + * @param[out] signature Pointer to buffer to receive RSA-SSA PSS signature. + * @param[in, out] sig_size On input, the size of signature buffer in bytes. + * On output, the size of data returned in signature buffer in bytes. + * + * @retval true signature successfully generated in RSA-SSA PSS. + * @retval false signature generation failed. + * @retval false sig_size is too small. + * + **/ +extern bool libspdm_rsa_pss_sign_fips(void *rsa_context, size_t hash_nid, + const uint8_t *message_hash, size_t hash_size, + uint8_t *signature, size_t *sig_size); + +/** + * Verifies the RSA-SSA signature with EMSA-PSS encoding scheme defined in + * RSA PKCS#1 v2.2 for FIPS test. + * + * The salt length is zero. + * + * If rsa_context is NULL, then return false. + * If message_hash is NULL, then return false. + * If signature is NULL, then return false. + * If hash_size need match the hash_nid. nid could be SHA256, SHA384, SHA512, SHA3_256, SHA3_384, SHA3_512. + * + * @param[in] rsa_context Pointer to RSA context for signature verification. + * @param[in] hash_nid hash NID + * @param[in] message_hash Pointer to octet message hash to be checked. + * @param[in] hash_size size of the message hash in bytes. + * @param[in] signature Pointer to RSA-SSA PSS signature to be verified. + * @param[in] sig_size size of signature in bytes. + * + * @retval true Valid signature encoded in RSA-SSA PSS. + * @retval false Invalid signature or invalid RSA context. + * + **/ +extern bool libspdm_rsa_pss_verify_fips(void *rsa_context, size_t hash_nid, + const uint8_t *message_hash, size_t hash_size, + const uint8_t *signature, size_t sig_size); +#endif /*LIBSPDM_FIPS_MODE*/ + +#endif /* LIBSPDM_RSA_PSS_SUPPORT */ +#endif /* CRYPTLIB_RSA_H */ diff --git a/kernel-open/nvidia/hal/library/cryptlib/cryptlib_sm2.h b/kernel-open/nvidia/hal/library/cryptlib/cryptlib_sm2.h new file mode 100644 index 0000000..b733331 --- /dev/null +++ b/kernel-open/nvidia/hal/library/cryptlib/cryptlib_sm2.h @@ -0,0 +1,217 @@ +/** + * Copyright Notice: + * Copyright 2021-2022 DMTF. All rights reserved. + * License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md + **/ + +#ifndef CRYPTLIB_SM2_H +#define CRYPTLIB_SM2_H + +/*===================================================================================== + * Shang-Mi2 Primitives + *=====================================================================================*/ + +#if LIBSPDM_SM2_DSA_SUPPORT +/** + * Allocates and Initializes one Shang-Mi2 context for subsequent use. + * + * @param nid cipher NID + * + * @return Pointer to the Shang-Mi2 context that has been initialized. + * If the allocations fails, sm2_new_by_nid() returns NULL. + **/ +extern void *libspdm_sm2_dsa_new_by_nid(size_t nid); + +/** + * Generates Shang-Mi2 context from DER-encoded public key data. + * + * The public key is ASN.1 DER-encoded as RFC7250 describes, + * namely, the SubjectPublicKeyInfo structure of a X.509 certificate. + * + * @param[in] der_data Pointer to the DER-encoded public key data. + * @param[in] der_size Size of the DER-encoded public key data in bytes. + * @param[out] sm2_context Pointer to newly generated SM2 context which contains the + * SM2 public key component. + * Use libspdm_sm2_free() function to free the resource. + * + * If der_data is NULL, then return false. + * If sm2_context is NULL, then return false. + * + * @retval true SM2 context was generated successfully. + * @retval false Invalid DER public key data. + * + **/ +extern bool libspdm_sm2_get_public_key_from_der(const uint8_t *der_data, + size_t der_size, + void **sm2_context); + +/** + * Release the specified sm2 context. + * + * @param[in] sm2_context Pointer to the sm2 context to be released. + **/ +extern void libspdm_sm2_dsa_free(void *sm2_context); + +/** + * Carries out the SM2 signature, based upon GB/T 32918.2-2016: SM2 - Part2. + * + * This function carries out the SM2 signature. + * If the signature buffer is too small to hold the contents of signature, false + * is returned and sig_size is set to the required buffer size to obtain the signature. + * + * If sm2_context is NULL, then return false. + * If message is NULL, then return false. + * hash_nid must be SM3_256. + * If sig_size is large enough but signature is NULL, then return false. + * + * The id_a_size must be smaller than 2^16-1. + * The sig_size is 64. first 32-byte is R, second 32-byte is S. + * + * @param[in] sm2_context Pointer to sm2 context for signature generation. + * @param[in] hash_nid hash NID + * @param[in] id_a The ID-A of the signing context. + * @param[in] id_a_size Size of ID-A signing context. + * @param[in] message Pointer to octet message to be signed (before hash). + * @param[in] size Size of the message in bytes. + * @param[out] signature Pointer to buffer to receive SM2 signature. + * @param[in, out] sig_size On input, the size of signature buffer in bytes. + * On output, the size of data returned in signature buffer in bytes. + * + * @retval true signature successfully generated in SM2. + * @retval false signature generation failed. + * @retval false sig_size is too small. + **/ +extern bool libspdm_sm2_dsa_sign(const void *sm2_context, size_t hash_nid, + const uint8_t *id_a, size_t id_a_size, + const uint8_t *message, size_t size, + uint8_t *signature, size_t *sig_size); + +/** + * Verifies the SM2 signature, based upon GB/T 32918.2-2016: SM2 - Part2. + * + * If sm2_context is NULL, then return false. + * If message is NULL, then return false. + * If signature is NULL, then return false. + * hash_nid must be SM3_256. + * + * The id_a_size must be smaller than 2^16-1. + * The sig_size is 64. first 32-byte is R, second 32-byte is S. + * + * @param[in] sm2_context Pointer to SM2 context for signature verification. + * @param[in] hash_nid hash NID + * @param[in] id_a The ID-A of the signing context. + * @param[in] id_a_size Size of ID-A signing context. + * @param[in] message Pointer to octet message to be checked (before hash). + * @param[in] size Size of the message in bytes. + * @param[in] signature Pointer to SM2 signature to be verified. + * @param[in] sig_size Size of signature in bytes. + * + * @retval true Valid signature encoded in SM2. + * @retval false Invalid signature or invalid sm2 context. + * + **/ +extern bool libspdm_sm2_dsa_verify(const void *sm2_context, size_t hash_nid, + const uint8_t *id_a, size_t id_a_size, + const uint8_t *message, size_t size, + const uint8_t *signature, size_t sig_size); +#endif /* LIBSPDM_SM2_DSA_SUPPORT */ + +#if LIBSPDM_SM2_KEY_EXCHANGE_SUPPORT +/** + * Allocates and Initializes one Shang-Mi2 context for subsequent use. + * + * @param nid cipher NID + * + * @return Pointer to the Shang-Mi2 context that has been initialized. + * If the allocations fails, sm2_new_by_nid() returns NULL. + **/ +extern void *libspdm_sm2_key_exchange_new_by_nid(size_t nid); + +/** + * Release the specified sm2 context. + * + * @param[in] sm2_context Pointer to the sm2 context to be released. + * + **/ +extern void libspdm_sm2_key_exchange_free(void *sm2_context); + +/** + * Initialize the specified sm2 context. + * + * @param[in] sm2_context Pointer to the sm2 context to be released. + * @param[in] hash_nid hash NID, only SM3 is valid. + * @param[in] id_a The ID-A of the key exchange context. + * @param[in] id_a_size Size of ID-A key exchange context. + * @param[in] id_b The ID-B of the key exchange context. + * @param[in] id_b_size Size of ID-B key exchange context. + * @param[in] is_initiator If the caller is initiator. + * + * @retval true sm2 context is initialized. + * @retval false sm2 context is not initialized. + **/ +extern bool libspdm_sm2_key_exchange_init(const void *sm2_context, size_t hash_nid, + const uint8_t *id_a, size_t id_a_size, + const uint8_t *id_b, size_t id_b_size, + bool is_initiator); + +/** + * Generates sm2 key and returns sm2 public key (X, Y), based upon GB/T 32918.3-2016: SM2 - Part3. + * + * This function generates random secret, and computes the public key (X, Y), which is + * returned via parameter public, public_size. + * X is the first half of public with size being public_size / 2, + * Y is the second half of public with size being public_size / 2. + * sm2 context is updated accordingly. + * If the public buffer is too small to hold the public X, Y, false is returned and + * public_size is set to the required buffer size to obtain the public X, Y. + * + * The public_size is 64. first 32-byte is X, second 32-byte is Y. + * + * If sm2_context is NULL, then return false. + * If public_size is NULL, then return false. + * If public_size is large enough but public is NULL, then return false. + * + * @param[in, out] sm2_context Pointer to the sm2 context. + * @param[out] public_data Pointer to the buffer to receive generated public X,Y. + * @param[in, out] public_size On input, the size of public buffer in bytes. + * On output, the size of data returned in public buffer in bytes. + * + * @retval true sm2 public X,Y generation succeeded. + * @retval false sm2 public X,Y generation failed. + * @retval false public_size is not large enough. + **/ +extern bool libspdm_sm2_key_exchange_generate_key(void *sm2_context, uint8_t *public_data, + size_t *public_size); + +/** + * Computes exchanged common key, based upon GB/T 32918.3-2016: SM2 - Part3. + * + * Given peer's public key (X, Y), this function computes the exchanged common key, + * based on its own context including value of curve parameter and random secret. + * X is the first half of peer_public with size being peer_public_size / 2, + * Y is the second half of peer_public with size being peer_public_size / 2. + * + * If sm2_context is NULL, then return false. + * If peer_public is NULL, then return false. + * If peer_public_size is 0, then return false. + * If key is NULL, then return false. + * + * The id_a_size and id_b_size must be smaller than 2^16-1. + * The peer_public_size is 64. first 32-byte is X, second 32-byte is Y. + * The key_size must be smaller than 2^32-1, limited by KDF function. + * + * @param[in, out] sm2_context Pointer to the sm2 context. + * @param[in] peer_public Pointer to the peer's public X,Y. + * @param[in] peer_public_size Size of peer's public X,Y in bytes. + * @param[out] key Pointer to the buffer to receive generated key. + * @param[in] key_size On input, the size of key buffer in bytes. + * + * @retval true sm2 exchanged key generation succeeded. + * @retval false sm2 exchanged key generation failed. + **/ +extern bool libspdm_sm2_key_exchange_compute_key(void *sm2_context, + const uint8_t *peer_public, + size_t peer_public_size, uint8_t *key, + size_t *key_size); +#endif /* LIBSPDM_SM2_KEY_EXCHANGE_SUPPORT */ +#endif /* CRYPTLIB_SM2_H */ diff --git a/kernel-open/nvidia/internal/libspdm_lib_config.h b/kernel-open/nvidia/internal/libspdm_lib_config.h new file mode 100644 index 0000000..bab6e7a --- /dev/null +++ b/kernel-open/nvidia/internal/libspdm_lib_config.h @@ -0,0 +1,91 @@ +/** + * Copyright Notice: + * Copyright 2021-2024 DMTF. All rights reserved. + * License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md + **/ + +#ifndef LIBSPDM_LIB_CONFIG_H +#define LIBSPDM_LIB_CONFIG_H + +#ifndef LIBSPDM_CONFIG +#include "library/spdm_lib_config.h" +#else +#include LIBSPDM_CONFIG +#endif + +#if defined(LIBSPDM_DEBUG_ENABLE) +#undef LIBSPDM_DEBUG_ASSERT_ENABLE +#undef LIBSPDM_DEBUG_PRINT_ENABLE +#undef LIBSPDM_DEBUG_BLOCK_ENABLE + +#define LIBSPDM_DEBUG_ASSERT_ENABLE (LIBSPDM_DEBUG_ENABLE) +#define LIBSPDM_DEBUG_PRINT_ENABLE (LIBSPDM_DEBUG_ENABLE) +#define LIBSPDM_DEBUG_BLOCK_ENABLE (LIBSPDM_DEBUG_ENABLE) +#endif /* defined(LIBSPDM_DEBUG_ENABLE) */ + +/*when in FIPS mode, only support approved algo in FIPS */ +#if LIBSPDM_FIPS_MODE +#undef LIBSPDM_SM2_DSA_P256_SUPPORT +#define LIBSPDM_SM2_DSA_P256_SUPPORT 0 + +#undef LIBSPDM_SM2_KEY_EXCHANGE_P256_SUPPORT +#define LIBSPDM_SM2_KEY_EXCHANGE_P256_SUPPORT 0 + +#undef LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT +#define LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT 0 + +#undef LIBSPDM_AEAD_SM4_128_GCM_SUPPORT +#define LIBSPDM_AEAD_SM4_128_GCM_SUPPORT 0 + +#undef LIBSPDM_SM3_256_SUPPORT +#define LIBSPDM_SM3_256_SUPPORT 0 +#endif /*LIBSPDM_FIPS_MODE*/ + +/* define crypto algorithm without parameter */ +#define LIBSPDM_RSA_SSA_SUPPORT ((LIBSPDM_RSA_SSA_2048_SUPPORT) || \ + (LIBSPDM_RSA_SSA_3072_SUPPORT) || \ + (LIBSPDM_RSA_SSA_4096_SUPPORT)) + +#define LIBSPDM_RSA_PSS_SUPPORT ((LIBSPDM_RSA_PSS_2048_SUPPORT) || \ + (LIBSPDM_RSA_PSS_3072_SUPPORT) || \ + (LIBSPDM_RSA_PSS_4096_SUPPORT)) + +#define LIBSPDM_ECDSA_SUPPORT ((LIBSPDM_ECDSA_P256_SUPPORT) || \ + (LIBSPDM_ECDSA_P384_SUPPORT) || \ + (LIBSPDM_ECDSA_P521_SUPPORT)) + +#define LIBSPDM_SM2_DSA_SUPPORT (LIBSPDM_SM2_DSA_P256_SUPPORT) + +#define LIBSPDM_EDDSA_SUPPORT ((LIBSPDM_EDDSA_ED25519_SUPPORT) || \ + (LIBSPDM_EDDSA_ED448_SUPPORT)) + +#define LIBSPDM_FFDHE_SUPPORT ((LIBSPDM_FFDHE_2048_SUPPORT) || \ + (LIBSPDM_FFDHE_3072_SUPPORT) || \ + (LIBSPDM_FFDHE_4096_SUPPORT)) + +#define LIBSPDM_ECDHE_SUPPORT ((LIBSPDM_ECDHE_P256_SUPPORT) || \ + (LIBSPDM_ECDHE_P384_SUPPORT) || \ + (LIBSPDM_ECDHE_P521_SUPPORT)) + +#define LIBSPDM_SM2_KEY_EXCHANGE_SUPPORT (LIBSPDM_SM2_KEY_EXCHANGE_P256_SUPPORT) + +#define LIBSPDM_AEAD_GCM_SUPPORT ((LIBSPDM_AEAD_AES_128_GCM_SUPPORT) || \ + (LIBSPDM_AEAD_AES_256_GCM_SUPPORT)) + +#define LIBSPDM_AEAD_SM4_SUPPORT (LIBSPDM_AEAD_SM4_128_GCM_SUPPORT) + +#define LIBSPDM_SHA2_SUPPORT ((LIBSPDM_SHA256_SUPPORT) || \ + (LIBSPDM_SHA384_SUPPORT) || \ + (LIBSPDM_SHA512_SUPPORT)) + +#define LIBSPDM_SHA3_SUPPORT ((LIBSPDM_SHA3_256_SUPPORT) || \ + (LIBSPDM_SHA3_384_SUPPORT) || \ + (LIBSPDM_SHA3_512_SUPPORT)) + +#define LIBSPDM_SM3_SUPPORT (LIBSPDM_SM3_256_SUPPORT) + +#if LIBSPDM_CHECK_MACRO +#include "internal/libspdm_macro_check.h" +#endif /* LIBSPDM_CHECK_MACRO */ + +#endif /* LIBSPDM_LIB_CONFIG_H */ diff --git a/kernel-open/nvidia/internal_crypt_lib.h b/kernel-open/nvidia/internal_crypt_lib.h new file mode 100644 index 0000000..917acb2 --- /dev/null +++ b/kernel-open/nvidia/internal_crypt_lib.h @@ -0,0 +1,170 @@ +/* +* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +* SPDX-License-Identifier: MIT +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +*/ + +#ifndef __INTERNAL_CRYPT_LIB_H__ +#define __INTERNAL_CRYPT_LIB_H__ + +/* + * This code uses Linux Kernel Crypto API extensively. Web page written by + * Stephan Mueller and Marek Vasut is a good starting reference on how linux + * kernel provides crypto api. + */ +#include "conftest.h" + +#include +#include +#include +#include +#include + +// Check if ECDH/ECDSA are there, on some platforms they might not be... +#ifndef AUTOCONF_INCLUDED +#if defined(NV_GENERATED_AUTOCONF_H_PRESENT) +#include +#else +#include +#endif +#endif +#if \ + (defined(CONFIG_CRYPTO_AEAD) || defined(CONFIG_CRYPTO_AEAD_MODULE)) && \ + (defined(CONFIG_CRYPTO_AKCIPHER) || defined(CONFIG_CRYPTO_AKCIPHER_MODULE)) && \ + (defined(CONFIG_CRYPTO_SKCIPHER) || defined(CONFIG_CRYPTO_SKCIPHER_MODULE)) && \ + (defined(CONFIG_CRYPTO_HASH) || defined(CONFIG_CRYPTO_HASH_MODULE)) && \ + (defined(CONFIG_CRYPTO_HMAC) || defined(CONFIG_CRYPTO_HMAC_MODULE)) && \ + (defined(CONFIG_CRYPTO_ECDH) || defined(CONFIG_CRYPTO_ECDH_MODULE)) && \ + (defined(CONFIG_CRYPTO_ECDSA) || defined(CONFIG_CRYPTO_ECDSA_MODULE)) && \ + (defined(CONFIG_CRYPTO_RSA) || defined(CONFIG_CRYPTO_RSA_MODULE)) && \ + (defined(CONFIG_X509_CERTIFICATE_PARSER) || defined(CONFIG_X509_CERTIFICATE_PARSER_MODULE)) +#define NV_CONFIG_CRYPTO_PRESENT 1 +#endif + +/* + * It is possible that we don't have access to all the functions we have. This + * could be either because we are running non-gpl kernel, because kernel is too + * old or even just user disabled. If we should use LKCA, include headers, else + * define stubs to return errors. + */ +#if defined(NV_CRYPTO_PRESENT) && defined (NV_CONFIG_CRYPTO_PRESENT) && \ + (defined(NV_CRYPTO_AKCIPHER_VERIFY_PRESENT) || \ + (defined(NV_CRYPTO_SIG_H_PRESENT) && defined(NV_ECC_DIGITS_FROM_BYTES_PRESENT))) +#define USE_LKCA 1 +#endif + +#ifdef USE_LKCA +#include +#include +#include +#include +#include +#include + +// HASH_MAX_DIGESTSIZE is available since 4.20. +// This value is accurate as of 6.1 +#ifndef HASH_MAX_DIGESTSIZE +#define HASH_MAX_DIGESTSIZE 64 +#endif + +#else +// Just stub everything out +struct shash_desc; +struct crypto_shash; +#define crypto_shash_setkey(...) -ENOMEM +#define crypto_shash_init(...) -ENOMEM +#define crypto_shash_update(...) -ENOMEM +#define crypto_shash_update(...) -ENOMEM +#define crypto_shash_final(...) -ENOMEM +#endif + +#define CHAR_BIT 8U +#undef SIZE_MAX +#define SIZE_MAX 8 + +#include "library/cryptlib.h" + +#define LIBSPDM_ASSERT(...) +struct lkca_aead_ctx; +int lkca_aead_alloc(struct lkca_aead_ctx **ctx, char const *alg); +void lkca_aead_free(struct lkca_aead_ctx *ctx); +int lkca_aead_ex(struct lkca_aead_ctx *ctx, + const uint8_t *key, size_t key_size, + uint8_t *iv, size_t iv_size, + const uint8_t *data_in, size_t data_in_size, + uint8_t *tag, size_t tag_size, + uint8_t *data_out, size_t *data_out_size, + bool enc); + +int libspdm_aead(const uint8_t *key, size_t key_size, + const uint8_t *iv, size_t iv_size, + const uint8_t *a_data, size_t a_data_size, + const uint8_t *data_in, size_t data_in_size, + const uint8_t *tag, size_t tag_size, + uint8_t *data_out, size_t *data_out_size, + bool enc, char const *alg); + +void *lkca_hash_new(const char* alg_name); +void lkca_hash_free(struct shash_desc *ctx); +bool lkca_hash_duplicate(struct shash_desc *dst, struct shash_desc const *src); +bool lkca_hash_all(const char* alg_name, const void *data, + size_t data_size, uint8_t *hash_value); +bool lkca_hmac_duplicate(struct shash_desc *dst, struct shash_desc const *src); +bool lkca_hmac_set_key(struct shash_desc *ctx, const uint8_t *key, size_t key_size); +bool lkca_hmac_all(const char* alg_name, const uint8_t *key, size_t key_size, + const uint8_t *data, size_t data_size, uint8_t *hash_value); +bool lkca_hkdf_extract_and_expand(const char *alg_name, + const uint8_t *key, size_t key_size, + const uint8_t *salt, size_t salt_size, + const uint8_t *info, size_t info_size, + uint8_t *out, size_t out_size); +bool lkca_hkdf_expand(const char *alg_name, + const uint8_t *prk, size_t prk_size, + const uint8_t *info, size_t info_size, + uint8_t *out, size_t out_size); + + +bool lkca_ecdsa_set_priv_key(void *context, uint8_t *key, size_t key_size); +bool lkca_ec_set_pub_key(void *ec_context, const uint8_t *public_key, + size_t public_key_size); +bool lkca_ec_get_pub_key(void *ec_context, uint8_t *public_key, + size_t *public_key_size); +bool lkca_ec_generate_key(void *ec_context, uint8_t *public_data, + size_t *public_size); +bool lkca_ec_compute_key(void *ec_context, const uint8_t *peer_public, + size_t peer_public_size, uint8_t *key, + size_t *key_size); +bool lkca_ecdsa_verify(void *ec_context, size_t hash_nid, + const uint8_t *message_hash, size_t hash_size, + const uint8_t *signature, size_t sig_size); + +bool lkca_rsa_verify(void *rsa_context, size_t hash_nid, + const uint8_t *message_hash, size_t hash_size, + const uint8_t *signature, size_t sig_size); + +bool lkca_rsa_pkcs1_sign(void *rsa_context, size_t hash_nid, + const uint8_t *message_hash, size_t hash_size, + uint8_t *signature, size_t *sig_size); + +bool lkca_rsa_pss_sign(void *rsa_context, size_t hash_nid, + const uint8_t *message_hash, size_t hash_size, + uint8_t *signature, size_t *sig_size); + +#endif diff --git a/kernel-open/nvidia/library/cryptlib.h b/kernel-open/nvidia/library/cryptlib.h new file mode 100644 index 0000000..94be665 --- /dev/null +++ b/kernel-open/nvidia/library/cryptlib.h @@ -0,0 +1,109 @@ +/** + * Copyright Notice: + * Copyright 2021-2022 DMTF. All rights reserved. + * License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md + **/ + +/** @file + * Defines base cryptographic library APIs. + * The Base Cryptographic Library provides implementations of basic cryptography + * primitives (hash Serials, HMAC, AES, RSA, Diffie-Hellman, Elliptic Curve, etc) for security + * functionality enabling. + **/ + +#ifndef CRYPTLIB_H +#define CRYPTLIB_H + +#include "internal/libspdm_lib_config.h" + +#define LIBSPDM_CRYPTO_NID_NULL 0x0000 + +/* Hash */ +#define LIBSPDM_CRYPTO_NID_SHA256 0x0001 +#define LIBSPDM_CRYPTO_NID_SHA384 0x0002 +#define LIBSPDM_CRYPTO_NID_SHA512 0x0003 +#define LIBSPDM_CRYPTO_NID_SHA3_256 0x0004 +#define LIBSPDM_CRYPTO_NID_SHA3_384 0x0005 +#define LIBSPDM_CRYPTO_NID_SHA3_512 0x0006 +#define LIBSPDM_CRYPTO_NID_SM3_256 0x0007 + +/* Signing */ +#define LIBSPDM_CRYPTO_NID_RSASSA2048 0x0101 +#define LIBSPDM_CRYPTO_NID_RSASSA3072 0x0102 +#define LIBSPDM_CRYPTO_NID_RSASSA4096 0x0103 +#define LIBSPDM_CRYPTO_NID_RSAPSS2048 0x0104 +#define LIBSPDM_CRYPTO_NID_RSAPSS3072 0x0105 +#define LIBSPDM_CRYPTO_NID_RSAPSS4096 0x0106 +#define LIBSPDM_CRYPTO_NID_ECDSA_NIST_P256 0x0107 +#define LIBSPDM_CRYPTO_NID_ECDSA_NIST_P384 0x0108 +#define LIBSPDM_CRYPTO_NID_ECDSA_NIST_P521 0x0109 +#define LIBSPDM_CRYPTO_NID_SM2_DSA_P256 0x010A +#define LIBSPDM_CRYPTO_NID_EDDSA_ED25519 0x010B +#define LIBSPDM_CRYPTO_NID_EDDSA_ED448 0x010C + +/* Key Exchange */ +#define LIBSPDM_CRYPTO_NID_FFDHE2048 0x0201 +#define LIBSPDM_CRYPTO_NID_FFDHE3072 0x0202 +#define LIBSPDM_CRYPTO_NID_FFDHE4096 0x0203 +#define LIBSPDM_CRYPTO_NID_SECP256R1 0x0204 +#define LIBSPDM_CRYPTO_NID_SECP384R1 0x0205 +#define LIBSPDM_CRYPTO_NID_SECP521R1 0x0206 +#define LIBSPDM_CRYPTO_NID_SM2_KEY_EXCHANGE_P256 0x0207 +#define LIBSPDM_CRYPTO_NID_CURVE_X25519 0x0208 +#define LIBSPDM_CRYPTO_NID_CURVE_X448 0x0209 + +/* AEAD */ +#define LIBSPDM_CRYPTO_NID_AES_128_GCM 0x0301 +#define LIBSPDM_CRYPTO_NID_AES_256_GCM 0x0302 +#define LIBSPDM_CRYPTO_NID_CHACHA20_POLY1305 0x0303 +#define LIBSPDM_CRYPTO_NID_SM4_128_GCM 0x0304 + +/* X.509 v3 key usage extension flags. */ +#define LIBSPDM_CRYPTO_X509_KU_DIGITAL_SIGNATURE 0x80 +#define LIBSPDM_CRYPTO_X509_KU_NON_REPUDIATION 0x40 +#define LIBSPDM_CRYPTO_X509_KU_KEY_ENCIPHERMENT 0x20 +#define LIBSPDM_CRYPTO_X509_KU_DATA_ENCIPHERMENT 0x10 +#define LIBSPDM_CRYPTO_X509_KU_KEY_AGREEMENT 0x08 +#define LIBSPDM_CRYPTO_X509_KU_KEY_CERT_SIGN 0x04 +#define LIBSPDM_CRYPTO_X509_KU_CRL_SIGN 0x02 +#define LIBSPDM_CRYPTO_X509_KU_ENCIPHER_ONLY 0x01 +#define LIBSPDM_CRYPTO_X509_KU_DECIPHER_ONLY 0x8000 + +/* These constants comply with the DER encoded ASN.1 type tags. */ +#define LIBSPDM_CRYPTO_ASN1_BOOLEAN 0x01 +#define LIBSPDM_CRYPTO_ASN1_INTEGER 0x02 +#define LIBSPDM_CRYPTO_ASN1_BIT_STRING 0x03 +#define LIBSPDM_CRYPTO_ASN1_OCTET_STRING 0x04 +#define LIBSPDM_CRYPTO_ASN1_NULL 0x05 +#define LIBSPDM_CRYPTO_ASN1_OID 0x06 +#define LIBSPDM_CRYPTO_ASN1_UTF8_STRING 0x0C +#define LIBSPDM_CRYPTO_ASN1_SEQUENCE 0x10 +#define LIBSPDM_CRYPTO_ASN1_SET 0x11 +#define LIBSPDM_CRYPTO_ASN1_PRINTABLE_STRING 0x13 +#define LIBSPDM_CRYPTO_ASN1_T61_STRING 0x14 +#define LIBSPDM_CRYPTO_ASN1_IA5_STRING 0x16 +#define LIBSPDM_CRYPTO_ASN1_UTC_TIME 0x17 +#define LIBSPDM_CRYPTO_ASN1_GENERALIZED_TIME 0x18 +#define LIBSPDM_CRYPTO_ASN1_UNIVERSAL_STRING 0x1C +#define LIBSPDM_CRYPTO_ASN1_BMP_STRING 0x1E +#define LIBSPDM_CRYPTO_ASN1_PRIMITIVE 0x00 +#define LIBSPDM_CRYPTO_ASN1_CONSTRUCTED 0x20 +#define LIBSPDM_CRYPTO_ASN1_CONTEXT_SPECIFIC 0x80 + +#define LIBSPDM_CRYPTO_ASN1_TAG_CLASS_MASK 0xC0 +#define LIBSPDM_CRYPTO_ASN1_TAG_PC_MASK 0x20 +#define LIBSPDM_CRYPTO_ASN1_TAG_VALUE_MASK 0x1F + +#include "hal/library/cryptlib/cryptlib_hash.h" +#include "hal/library/cryptlib/cryptlib_mac.h" +#include "hal/library/cryptlib/cryptlib_aead.h" +#include "hal/library/cryptlib/cryptlib_cert.h" +#include "hal/library/cryptlib/cryptlib_hkdf.h" +#include "hal/library/cryptlib/cryptlib_rsa.h" +#include "hal/library/cryptlib/cryptlib_ec.h" +#include "hal/library/cryptlib/cryptlib_dh.h" +#include "hal/library/cryptlib/cryptlib_ecd.h" +#include "hal/library/cryptlib/cryptlib_sm2.h" +#include "hal/library/cryptlib/cryptlib_rng.h" + +#endif /* CRYPTLIB_H */ diff --git a/kernel-open/nvidia/library/spdm_lib_config.h b/kernel-open/nvidia/library/spdm_lib_config.h new file mode 100644 index 0000000..bed17b2 --- /dev/null +++ b/kernel-open/nvidia/library/spdm_lib_config.h @@ -0,0 +1,445 @@ +/** + * Copyright Notice: + * Copyright 2021-2024 DMTF. All rights reserved. + * License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md + **/ + +#ifndef SPDM_LIB_CONFIG_H +#define SPDM_LIB_CONFIG_H + +/* Code space optimization for optional messages. + * + * An Integrator of libspdm may not need all of the optional SPDM messages. The + * LIBSPDM_ENABLE_CAPABILITY_***_CAP compile time switches allow the Integrator to enable or disable + * capabilities and messages. + */ + +/* SPDM 1.0 capabilities and messages. */ +#ifndef LIBSPDM_ENABLE_CAPABILITY_CERT_CAP +#define LIBSPDM_ENABLE_CAPABILITY_CERT_CAP 1 +#endif + +#ifndef LIBSPDM_ENABLE_CAPABILITY_CHAL_CAP +#define LIBSPDM_ENABLE_CAPABILITY_CHAL_CAP 1 +#endif + +#ifndef LIBSPDM_ENABLE_CAPABILITY_MEAS_CAP +#define LIBSPDM_ENABLE_CAPABILITY_MEAS_CAP 1 +#endif + +#ifndef LIBSPDM_ENABLE_VENDOR_DEFINED_MESSAGES +#define LIBSPDM_ENABLE_VENDOR_DEFINED_MESSAGES 1 +#endif + +/* SPDM 1.1 capabilities. */ +#ifndef LIBSPDM_ENABLE_CAPABILITY_KEY_EX_CAP +#define LIBSPDM_ENABLE_CAPABILITY_KEY_EX_CAP 1 +#endif + +#ifndef LIBSPDM_ENABLE_CAPABILITY_PSK_CAP +#define LIBSPDM_ENABLE_CAPABILITY_PSK_CAP 1 +#endif + +#ifndef LIBSPDM_ENABLE_CAPABILITY_HBEAT_CAP +#define LIBSPDM_ENABLE_CAPABILITY_HBEAT_CAP 1 +#endif + +#ifndef LIBSPDM_ENABLE_CAPABILITY_MUT_AUTH_CAP +#define LIBSPDM_ENABLE_CAPABILITY_MUT_AUTH_CAP 1 +#endif + +#ifndef LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP +#define LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP 1 +#endif + +/* SPDM 1.2 capabilities. */ +#ifndef LIBSPDM_ENABLE_CAPABILITY_CSR_CAP +#define LIBSPDM_ENABLE_CAPABILITY_CSR_CAP 1 +#endif + +#ifndef LIBSPDM_ENABLE_CAPABILITY_SET_CERT_CAP +#define LIBSPDM_ENABLE_CAPABILITY_SET_CERT_CAP 1 +#endif + +#ifndef LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP +#define LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP 1 +#endif + +/* SPDM 1.3 capabilities. */ +#ifndef LIBSPDM_ENABLE_CAPABILITY_MEL_CAP +#define LIBSPDM_ENABLE_CAPABILITY_MEL_CAP 1 +#endif + +#ifndef LIBSPDM_ENABLE_CAPABILITY_EVENT_CAP +#define LIBSPDM_ENABLE_CAPABILITY_EVENT_CAP 1 +#endif + +#ifndef LIBSPDM_ENABLE_CAPABILITY_GET_KEY_PAIR_INFO_CAP +#define LIBSPDM_ENABLE_CAPABILITY_GET_KEY_PAIR_INFO_CAP 1 +#endif + +#ifndef LIBSPDM_ENABLE_CAPABILITY_SET_KEY_PAIR_INFO_CAP +#define LIBSPDM_ENABLE_CAPABILITY_SET_KEY_PAIR_INFO_CAP 1 +#endif + +/* Includes SPDM 1.3 features for CSR messages. If enabled then LIBSPDM_ENABLE_CAPABILITY_CSR_CAP + * must also be enabled. + */ +#ifndef LIBSPDM_ENABLE_CAPABILITY_CSR_CAP_EX +#define LIBSPDM_ENABLE_CAPABILITY_CSR_CAP_EX 1 +#endif + +/* If 1 then endpoint supports sending GET_CERTIFICATE and GET_DIGESTS requests. + * If enabled and endpoint is a Responder then LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP + * must also be enabled. + */ +#ifndef LIBSPDM_SEND_GET_CERTIFICATE_SUPPORT +#define LIBSPDM_SEND_GET_CERTIFICATE_SUPPORT 1 +#endif + +/* If 1 then endpoint supports sending CHALLENGE request. + * If enabled and endpoint is a Responder then LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP + * must also be enabled. + */ +#ifndef LIBSPDM_SEND_CHALLENGE_SUPPORT +#define LIBSPDM_SEND_CHALLENGE_SUPPORT 1 +#endif + +/* If 1 then endpoint supports sending the GET_SUPPORTED_EVENT_TYPES, SUBSCRIBE_EVENT_TYPES, and + * encapsulated EVENT_ACK messages. In addition, LIBSPDM_ENABLE_CAPABILITY_ENCAP_CAP must also be + * 1. + */ +#ifndef LIBSPDM_EVENT_RECIPIENT_SUPPORT +#define LIBSPDM_EVENT_RECIPIENT_SUPPORT 1 +#endif + +/* When LIBSPDM_RESPOND_IF_READY_SUPPORT is 0 then + * - For a Requester, if the Responder sends a ResponseNotReady ERROR response then the error + * is immediately returned to the Integrator. The Requester cannot send a RESPOND_IF_READY + * request. + * - For a Responder, it cannot send a RESPOND_IF_READY ERROR response and does not support + * RESPOND_IF_READY. + * When LIBSPDM_RESPOND_IF_READY_SUPPORT is 1 then + * - For a Requester, if the Responder sends a ResponseNotReady ERROR response then libspdm + * waits an amount of time, as specified by the RDTExponent parameter, before sending + * RESPOND_IF_READY. + * - For a Responder, if its response state is NOT_READY then it will send a ResponseNotReady + * ERROR response to the Requester, and will accept a subsequent RESPOND_IF_READY request. + */ +#ifndef LIBSPDM_RESPOND_IF_READY_SUPPORT +#define LIBSPDM_RESPOND_IF_READY_SUPPORT 1 +#endif + +/* Enables FIPS 140-3 mode. */ +#ifndef LIBSPDM_FIPS_MODE +#define LIBSPDM_FIPS_MODE 0 +#endif + +/* Enables assertions and debug printing. When `LIBSPDM_DEBUG_ENABLE` is defined it overrides or + * sets the values of `LIBSPDM_DEBUG_PRINT_ENABLE`, `LIBSPDM_DEBUG_ASSERT_ENABLE`, and + * `LIBSPDM_BLOCK_ENABLE` to the value of `LIBSPDM_DEBUG_ENABLE`. + * + * Note that if this file is used with CMake and `DTARGET=Release` is defined, then all debugging + * is disabled. + */ +#ifndef LIBSPDM_DEBUG_ENABLE +#define LIBSPDM_DEBUG_ENABLE 1 +#endif + +/* The SPDM specification allows a Responder to return up to 255 version entries in the `VERSION` + * response to the Requester, including duplicate entries. For a Requester this value specifies the + * maximum number of entries that libspdm will tolerate in a `VERSION` response before returning an + * error. A similar macro, `SPDM_MAX_VERSION_COUNT`, exists for the Responder. However this macro + * is not meant to be configured by the Integrator. + */ +#ifndef LIBSPDM_MAX_VERSION_COUNT +#define LIBSPDM_MAX_VERSION_COUNT 5 +#endif + +#if LIBSPDM_ENABLE_CAPABILITY_PSK_CAP +/* This value specifies the maximum size, in bytes, of the `PSK_EXCHANGE.RequesterContext` and, + * if supported by the Responder, `PSK_EXCHANGE_RSP.ResponderContext` fields. The fields are + * typically random or monotonically increasing numbers. + */ +#ifndef LIBSPDM_PSK_CONTEXT_LENGTH +#define LIBSPDM_PSK_CONTEXT_LENGTH LIBSPDM_MAX_HASH_SIZE +#endif + +/* This value specifies the maximum size, in bytes, of the `PSK_EXCHANGE.PSKHint` field. */ +#ifndef LIBSPDM_PSK_MAX_HINT_LENGTH +#define LIBSPDM_PSK_MAX_HINT_LENGTH 16 +#endif +#endif /* LIBSPDM_ENABLE_CAPABILITY_PSK_CAP */ + +/* libspdm allows an Integrator to specify multiple root certificates as trust anchors when + * verifying certificate chains from an endpoint. This value specifies the maximum number of root + * certificates that libspdm can support. + */ +#ifndef LIBSPDM_MAX_ROOT_CERT_SUPPORT +#define LIBSPDM_MAX_ROOT_CERT_SUPPORT 10 +#endif + +/* If the Responder supports it a Requester is allowed to establish multiple secure sessions with + * the Responder. This value specifies the maximum number of sessions libspdm can support. + */ +#ifndef LIBSPDM_MAX_SESSION_COUNT +#define LIBSPDM_MAX_SESSION_COUNT 4 +#endif + +/* This value specifies the maximum size, in bytes, of a certificate chain that can be stored in a + * libspdm context. + */ +#ifndef LIBSPDM_MAX_CERT_CHAIN_SIZE +#define LIBSPDM_MAX_CERT_CHAIN_SIZE 0x1000 +#endif + +#ifndef LIBSPDM_MAX_MEASUREMENT_RECORD_SIZE +#define LIBSPDM_MAX_MEASUREMENT_RECORD_SIZE 0x1000 +#endif + +/* Partial certificates can be retrieved from a Requester or Responder and through multiple messages + * the complete certificate chain can be constructed. This value specifies the maximum size, + * in bytes, of a partial certificate that can be sent or received. + */ +#ifndef LIBSPDM_MAX_CERT_CHAIN_BLOCK_LEN +#define LIBSPDM_MAX_CERT_CHAIN_BLOCK_LEN 1024 +#endif + +/* Partial measurement extension log (MEL) can be retrieved from a Responder and through multiple + * messages the complete MEL can be constructed. This value specifies the maximum size, in bytes, of + * a partial MEL that can be sent or received. + */ +#ifndef LIBSPDM_MAX_MEL_BLOCK_LEN +#define LIBSPDM_MAX_MEL_BLOCK_LEN 1024 +#endif + +/* To ensure integrity in communication between the Requester and the Responder libspdm calculates + * cryptographic digests and signatures over multiple requests and responses. This value specifies + * whether libspdm will use a running calculation over the transcript, where requests and responses + * are discarded as they are cryptographically consumed, or whether libspdm will buffer the entire + * transcript before calculating the digest or signature. + * + * When LIBSPDM_RECORD_TRANSCRIPT_DATA_SUPPORT is 0 then a running calculation is used and less + * memory is needed. + * When LIBSPDM_RECORD_TRANSCRIPT_DATA_SUPPORT is 1 then the entire transcript is buffered and more + * memory is needed. + */ +#ifndef LIBSPDM_RECORD_TRANSCRIPT_DATA_SUPPORT +#define LIBSPDM_RECORD_TRANSCRIPT_DATA_SUPPORT 0 +#endif + +/* Cryptography Configuration + * In each category, at least one should be selected. + * NOTE: Not all combination can be supported. E.g. Don't mix NIST algo with SMx.*/ + +#ifndef LIBSPDM_RSA_SSA_2048_SUPPORT +#define LIBSPDM_RSA_SSA_2048_SUPPORT 1 +#endif +#ifndef LIBSPDM_RSA_SSA_3072_SUPPORT +#define LIBSPDM_RSA_SSA_3072_SUPPORT 1 +#endif +#ifndef LIBSPDM_RSA_SSA_4096_SUPPORT +#define LIBSPDM_RSA_SSA_4096_SUPPORT 1 +#endif + +#ifndef LIBSPDM_RSA_PSS_2048_SUPPORT +#define LIBSPDM_RSA_PSS_2048_SUPPORT 1 +#endif +#ifndef LIBSPDM_RSA_PSS_3072_SUPPORT +#define LIBSPDM_RSA_PSS_3072_SUPPORT 1 +#endif +#ifndef LIBSPDM_RSA_PSS_4096_SUPPORT +#define LIBSPDM_RSA_PSS_4096_SUPPORT 1 +#endif + +#ifndef LIBSPDM_ECDSA_P256_SUPPORT +#define LIBSPDM_ECDSA_P256_SUPPORT 1 +#endif +#ifndef LIBSPDM_ECDSA_P384_SUPPORT +#define LIBSPDM_ECDSA_P384_SUPPORT 1 +#endif +#ifndef LIBSPDM_ECDSA_P521_SUPPORT +#define LIBSPDM_ECDSA_P521_SUPPORT 1 +#endif + +#ifndef LIBSPDM_SM2_DSA_P256_SUPPORT +#define LIBSPDM_SM2_DSA_P256_SUPPORT 1 +#endif + +#ifndef LIBSPDM_EDDSA_ED25519_SUPPORT +#define LIBSPDM_EDDSA_ED25519_SUPPORT 1 +#endif +#ifndef LIBSPDM_EDDSA_ED448_SUPPORT +#define LIBSPDM_EDDSA_ED448_SUPPORT 1 +#endif + +#ifndef LIBSPDM_FFDHE_2048_SUPPORT +#define LIBSPDM_FFDHE_2048_SUPPORT 1 +#endif +#ifndef LIBSPDM_FFDHE_3072_SUPPORT +#define LIBSPDM_FFDHE_3072_SUPPORT 1 +#endif +#ifndef LIBSPDM_FFDHE_4096_SUPPORT +#define LIBSPDM_FFDHE_4096_SUPPORT 1 +#endif + +#ifndef LIBSPDM_ECDHE_P256_SUPPORT +#define LIBSPDM_ECDHE_P256_SUPPORT 1 +#endif +#ifndef LIBSPDM_ECDHE_P384_SUPPORT +#define LIBSPDM_ECDHE_P384_SUPPORT 1 +#endif +#ifndef LIBSPDM_ECDHE_P521_SUPPORT +#define LIBSPDM_ECDHE_P521_SUPPORT 1 +#endif + +#ifndef LIBSPDM_SM2_KEY_EXCHANGE_P256_SUPPORT +#define LIBSPDM_SM2_KEY_EXCHANGE_P256_SUPPORT 1 +#endif + +#ifndef LIBSPDM_AEAD_AES_128_GCM_SUPPORT +#define LIBSPDM_AEAD_AES_128_GCM_SUPPORT 1 +#endif +#ifndef LIBSPDM_AEAD_AES_256_GCM_SUPPORT +#define LIBSPDM_AEAD_AES_256_GCM_SUPPORT 1 +#endif + +#ifndef LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT +#define LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT 1 +#endif + +#ifndef LIBSPDM_AEAD_SM4_128_GCM_SUPPORT +#define LIBSPDM_AEAD_SM4_128_GCM_SUPPORT 1 +#endif + +#ifndef LIBSPDM_SHA256_SUPPORT +#define LIBSPDM_SHA256_SUPPORT 1 +#endif +#ifndef LIBSPDM_SHA384_SUPPORT +#define LIBSPDM_SHA384_SUPPORT 1 +#endif +#ifndef LIBSPDM_SHA512_SUPPORT +#define LIBSPDM_SHA512_SUPPORT 1 +#endif + +#ifndef LIBSPDM_SHA3_256_SUPPORT +#define LIBSPDM_SHA3_256_SUPPORT 1 +#endif +#ifndef LIBSPDM_SHA3_384_SUPPORT +#define LIBSPDM_SHA3_384_SUPPORT 1 +#endif +#ifndef LIBSPDM_SHA3_512_SUPPORT +#define LIBSPDM_SHA3_512_SUPPORT 1 +#endif + +#ifndef LIBSPDM_SM3_256_SUPPORT +#define LIBSPDM_SM3_256_SUPPORT 1 +#endif + +/* If 1 then endpoint supports parsing X.509 certificate chains. */ +#ifndef LIBSPDM_CERT_PARSE_SUPPORT +#define LIBSPDM_CERT_PARSE_SUPPORT 1 +#endif + + + +/* + * MinDataTransferSize = 42 + * + * H = HashLen = HmacLen = [32, 64] + * S = SigLen = [64, 512] + * D = ExchangeDataLen = [64, 512] + * R = RequesterContextLen >= 32 + * R = ResponderContextLen >= 0 + * O = OpaqueDataLen <= 1024 + * + * Max Chunk No = 1, if (message size <= 42) + * Max Chunk No = [(message size + 4) / 30] roundup, if (message size > 42) + * + * +==========================+==========================================+=========+ + * | Command | Size |MaxChunk | + * +==========================+==========================================+=========+ + * | GET_VERSION | 4 | 1 | + * | VERSION {1.0, 1.1, 1.2} | 6 + 2 * 3 = 12 | 1 | + * +--------------------------+------------------------------------------+---------+ + * | GET_CAPABILITIES 1.2 | 20 | 1 | + * | CAPABILITIES 1.2 | 20 | 1 | + * +--------------------------+------------------------------------------+---------+ + * | ERROR | 4 | 1 | + * | ERROR(ResponseTooLarge) | 4 + 4 = 8 | 1 | + * | ERROR(LargeResponse) | 4 + 1 = 5 | 1 | + * | ERROR(ResponseNotReady) | 4 + 4 = 8 | 1 | + * +--------------------------+------------------------------------------+---------+ + * | CHUNK_SEND header | 12 + L0 (0 or 4) | 1 | + * | CHUNK_RESPONSE header | 12 + L0 (0 or 4) | 1 | + * +==========================+==========================================+=========+ + * | NEGOTIATE_ALGORITHMS 1.2 | 32 + 4 * 4 = 48 | 2 | + * | ALGORITHMS 1.2 | 36 + 4 * 4 = 52 | 2 | + * +--------------------------+------------------------------------------+---------+ + * | GET_DIGESTS 1.2 | 4 | 1 | + * | DIGESTS 1.2 | 4 + H * SlotNum = [36, 516] | [1, 18] | + * +--------------------------+------------------------------------------+---------+ + * | GET_CERTIFICATE 1.2 | 8 | 1 | + * | CERTIFICATE 1.2 | 8 + PortionLen | [1, ] | + * +--------------------------+------------------------------------------+---------+ + * | CHALLENGE 1.2 | 40 | 1 | + * | CHALLENGE_AUTH 1.2 | 38 + H * 2 + S [+ O] = [166, 678] | [6, 23] | + * +--------------------------+------------------------------------------+---------+ + * | GET_MEASUREMENTS 1.2 | 5 + Nonce (0 or 32) | 1 | + * | MEASUREMENTS 1.2 | 42 + MeasRecLen (+ S) [+ O] = [106, 554] | [4, 19] | + * +--------------------------+------------------------------------------+---------+ + * | KEY_EXCHANGE 1.2 | 42 + D [+ O] = [106, 554] | [4, 19] | + * | KEY_EXCHANGE_RSP 1.2 | 42 + D + H + S (+ H) [+ O] = [234, 1194] | [8, 40] | + * +--------------------------+------------------------------------------+---------+ + * | FINISH 1.2 | 4 (+ S) + H = [100, 580] | [4, 20] | + * | FINISH_RSP 1.2 | 4 (+ H) = [36, 69] | [1, 3] | + * +--------------------------+------------------------------------------+---------+ + * | PSK_EXCHANGE 1.2 | 12 [+ PSKHint] + R [+ O] = 44 | 2 | + * | PSK_EXCHANGE_RSP 1.2 | 12 + R + H (+ H) [+ O] = [108, 172] | [4, 6] | + * +--------------------------+------------------------------------------+---------+ + * | PSK_FINISH 1.2 | 4 + H = [36, 68] | [1, 3] | + * | PSK_FINISH_RSP 1.2 | 4 | 1 | + * +--------------------------+------------------------------------------+---------+ + * | GET_CSR 1.2 | 8 + RequesterInfoLen [+ O] | [1, ] | + * | CSR 1.2 | 8 + CSRLength | [1, ] | + * +--------------------------+------------------------------------------+---------+ + * | SET_CERTIFICATE 1.2 | 4 + CertChainLen | [1, ] | + * | SET_CERTIFICATE_RSP 1.2 | 4 | 1 | + * +==========================+==========================================+=========+ + */ + +/* Enable message logging. + * See https://github.com/DMTF/libspdm/blob/main/doc/user_guide.md#message-logging + * for more information. + */ +#ifndef LIBSPDM_ENABLE_MSG_LOG +#define LIBSPDM_ENABLE_MSG_LOG 1 +#endif + +/* Enable macro checking during compilation. */ +#ifndef LIBSPDM_CHECK_MACRO +#define LIBSPDM_CHECK_MACRO 0 +#endif + +/* Enable compilation of libspdm_check_context function. After a libspdm context has been + * configured libspdm_check_context can be called to check that its configuration is correct. + */ +#ifndef LIBSPDM_CHECK_SPDM_CONTEXT +#define LIBSPDM_CHECK_SPDM_CONTEXT 1 +#endif + +/* Enable passing the SPDM context to HAL functions. + * This macro will be removed when libspdm 4.0 is released. + */ +#ifndef LIBSPDM_HAL_PASS_SPDM_CONTEXT +#define LIBSPDM_HAL_PASS_SPDM_CONTEXT 0 +#endif + +/* Enable additional checks for certificates. + * This macro will be removed when libspdm 4.0 is released. + */ +#ifndef LIBSPDM_ADDITIONAL_CHECK_CERT +#define LIBSPDM_ADDITIONAL_CHECK_CERT 0 +#endif + +#endif /* SPDM_LIB_CONFIG_H */ diff --git a/kernel-open/nvidia/libspdm_aead.c b/kernel-open/nvidia/libspdm_aead.c new file mode 100644 index 0000000..df3fb15 --- /dev/null +++ b/kernel-open/nvidia/libspdm_aead.c @@ -0,0 +1,477 @@ +/* +* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +* SPDX-License-Identifier: MIT +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +*/ + +#include "internal_crypt_lib.h" +#include "nvspdm_cryptlib_extensions.h" + +#ifdef USE_LKCA +#define BUFFER_SIZE (2 * 1024 * 1024) +#define AUTH_TAG_SIZE 16 +struct lkca_aead_ctx +{ + struct crypto_aead *aead; + struct aead_request *req; + char *a_data_buffer; + char *in_buffer; + char *out_buffer; + char tag[AUTH_TAG_SIZE]; +}; +#endif + +static int libspdm_aead_prealloc(void **context, char const *alg) +{ +#ifndef USE_LKCA + return -ENODEV; +#else + struct lkca_aead_ctx *ctx; + + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + if (ctx == NULL) { + return -ENOMEM; + } + + memset(ctx, 0, sizeof(*ctx)); + + ctx->aead = crypto_alloc_aead(alg, CRYPTO_ALG_TYPE_AEAD, 0); + if (IS_ERR(ctx->aead)) { + pr_notice("could not allocate AEAD algorithm\n"); + kfree(ctx); + return -ENODEV; + } + + ctx->req = aead_request_alloc(ctx->aead, GFP_KERNEL); + if (ctx->req == NULL) { + pr_info("could not allocate skcipher request\n"); + crypto_free_aead(ctx->aead); + kfree(ctx); + return -ENOMEM; + } + + ctx->a_data_buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL); + if (ctx->a_data_buffer == NULL) { + aead_request_free(ctx->req); + crypto_free_aead(ctx->aead); + kfree(ctx); + return -ENOMEM; + } + + ctx->in_buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL); + if (ctx->in_buffer == NULL) { + kfree(ctx->a_data_buffer); + aead_request_free(ctx->req); + crypto_free_aead(ctx->aead); + kfree(ctx); + return -ENOMEM; + } + + ctx->out_buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL); + if (ctx->out_buffer == NULL) { + kfree(ctx->a_data_buffer); + kfree(ctx->in_buffer); + aead_request_free(ctx->req); + crypto_free_aead(ctx->aead); + kfree(ctx); + return -ENOMEM; + } + + *context = ctx; + return 0; +#endif +} + +void libspdm_aead_free(void *context) +{ +#ifdef USE_LKCA + struct lkca_aead_ctx *ctx = context; + crypto_free_aead(ctx->aead); + aead_request_free(ctx->req); + kfree(ctx->a_data_buffer); + kfree(ctx->in_buffer); + kfree(ctx->out_buffer); + kfree(ctx); +#endif +} + +#define SG_AEAD_AAD 0 +#define SG_AEAD_TEXT 1 +#define SG_AEAD_SIG 2 +// Number of fields in AEAD scatterlist +#define SG_AEAD_LEN 3 + +#ifdef USE_LKCA +// This function doesn't do any allocs, it uses temp buffers instead +static int lkca_aead_internal(struct crypto_aead *aead, + struct aead_request *req, + const uint8_t *key, size_t key_size, + const uint8_t *iv, size_t iv_size, + struct scatterlist sg_in[], + struct scatterlist sg_out[], + size_t a_data_size, + size_t data_in_size, + size_t *data_out_size, + size_t tag_size, + bool enc) +{ + DECLARE_CRYPTO_WAIT(wait); + int rc = 0; + + if (crypto_aead_setkey(aead, key, key_size)) { + pr_info("key could not be set\n"); + return -EINVAL; + } + + if (crypto_aead_ivsize(aead) != iv_size) { + pr_info("iv could not be set\n"); + return -EINVAL; + } + + aead_request_set_ad(req, a_data_size); + + aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait); + + if (enc) { + aead_request_set_crypt(req, sg_in, sg_out, data_in_size, (u8 *) iv); + rc = crypto_wait_req(crypto_aead_encrypt(req), &wait); + } else { + aead_request_set_crypt(req, sg_in, sg_out, data_in_size + tag_size, (u8 *) iv); + rc = crypto_wait_req(crypto_aead_decrypt(req), &wait); + } + + if (rc != 0) { + if (enc) { + pr_info("aead.c: Encryption failed with error %i\n", rc); + } else { + pr_info("aead.c: Decryption failed with error %i\n", rc); + if (rc == -EBADMSG) { + pr_info("aead.c: Authentication tag mismatch!\n"); + } + } + } + + *data_out_size = data_in_size; + + return rc; +} +#endif + +static int libspdm_aead_prealloced(void *context, + const uint8_t *key, size_t key_size, + const uint8_t *iv, size_t iv_size, + const uint8_t *a_data, size_t a_data_size, + const uint8_t *data_in, size_t data_in_size, + uint8_t *tag, size_t tag_size, + uint8_t *data_out, size_t *data_out_size, + bool enc) +{ +#ifndef USE_LKCA + return -ENODEV; +#else + int rc = 0; + struct scatterlist sg_in[SG_AEAD_LEN]; + struct scatterlist sg_out[SG_AEAD_LEN]; + struct lkca_aead_ctx *ctx = context; + + + sg_init_table(sg_in, SG_AEAD_LEN); + sg_init_table(sg_out, SG_AEAD_LEN); + + if (!virt_addr_valid(a_data)) { + if (a_data_size > BUFFER_SIZE) { + return -ENOMEM; + } + sg_set_buf(&sg_in[SG_AEAD_AAD], ctx->a_data_buffer, a_data_size); + sg_set_buf(&sg_out[SG_AEAD_AAD], ctx->a_data_buffer, a_data_size); + + memcpy(ctx->a_data_buffer, a_data, a_data_size); + } else { + sg_set_buf(&sg_in[SG_AEAD_AAD], a_data, a_data_size); + sg_set_buf(&sg_out[SG_AEAD_AAD], a_data, a_data_size); + } + + if (!virt_addr_valid(data_in)) { + if (data_in_size > BUFFER_SIZE) { + return -ENOMEM; + } + sg_set_buf(&sg_in[SG_AEAD_TEXT], ctx->in_buffer, data_in_size); + memcpy(ctx->in_buffer, data_in, data_in_size); + } else { + sg_set_buf(&sg_in[SG_AEAD_TEXT], data_in, data_in_size); + } + + if (!virt_addr_valid(data_out)) { + if (data_in_size > BUFFER_SIZE) { + return -ENOMEM; + } + sg_set_buf(&sg_out[SG_AEAD_TEXT], ctx->out_buffer, data_in_size); + } else { + sg_set_buf(&sg_out[SG_AEAD_TEXT], data_out, data_in_size); + } + + // Tag is small enough that memcpy is cheaper than checking if page is virtual + if(tag_size > AUTH_TAG_SIZE) { + return -ENOMEM; + } + sg_set_buf(&sg_in[SG_AEAD_SIG], ctx->tag, tag_size); + sg_set_buf(&sg_out[SG_AEAD_SIG], ctx->tag, tag_size); + + if(!enc) + memcpy(ctx->tag, tag, tag_size); + + rc = lkca_aead_internal(ctx->aead, ctx->req, key, key_size, iv, iv_size, + sg_in, sg_out, a_data_size, data_in_size, + data_out_size, tag_size, enc); + + if (enc) { + memcpy(tag, ctx->tag, tag_size); + } + + if (!virt_addr_valid(data_out)) { + memcpy(data_out, ctx->out_buffer, data_in_size); + } + + return rc; +#endif +} + +int libspdm_aead(const uint8_t *key, size_t key_size, + const uint8_t *iv, size_t iv_size, + const uint8_t *a_data, size_t a_data_size, + const uint8_t *data_in, size_t data_in_size, + const uint8_t *tag, size_t tag_size, + uint8_t *data_out, size_t *data_out_size, + bool enc, char const *alg) +{ +#ifndef USE_LKCA + return -ENODEV; +#else + struct crypto_aead *aead = NULL; + struct aead_request *req = NULL; + struct scatterlist sg_in[SG_AEAD_LEN]; + struct scatterlist sg_out[SG_AEAD_LEN]; + uint8_t *a_data_shadow = NULL; + uint8_t *data_in_shadow = NULL; + uint8_t *data_out_shadow = NULL; + uint8_t *tag_shadow = NULL; + int rc = 0; + + aead = crypto_alloc_aead(alg, CRYPTO_ALG_TYPE_AEAD, 0); + if (IS_ERR(aead)) { + pr_notice("could not allocate AEAD algorithm\n"); + return -ENODEV; + } + + req = aead_request_alloc(aead, GFP_KERNEL); + if (req == NULL) { + pr_info("could not allocate skcipher request\n"); + rc = -ENOMEM; + goto out; + } + + sg_init_table(sg_in, SG_AEAD_LEN); + sg_init_table(sg_out, SG_AEAD_LEN); + + if (!virt_addr_valid(a_data)) { + a_data_shadow = kmalloc(a_data_size, GFP_KERNEL); + if (a_data_shadow == NULL) { + rc = -ENOMEM; + goto out; + } + + sg_set_buf(&sg_in[SG_AEAD_AAD], a_data_shadow, a_data_size); + sg_set_buf(&sg_out[SG_AEAD_AAD], a_data_shadow, a_data_size); + + memcpy(a_data_shadow, a_data, a_data_size); + } else { + sg_set_buf(&sg_in[SG_AEAD_AAD], a_data, a_data_size); + sg_set_buf(&sg_out[SG_AEAD_AAD], a_data, a_data_size); + } + + if (!virt_addr_valid(data_in)) { + data_in_shadow = kmalloc(data_in_size, GFP_KERNEL); + if (data_in_shadow == NULL) { + rc = -ENOMEM; + goto out; + } + + sg_set_buf(&sg_in[SG_AEAD_TEXT], data_in_shadow, data_in_size); + + memcpy(data_in_shadow, data_in, data_in_size); + } else { + sg_set_buf(&sg_in[SG_AEAD_TEXT], data_in, data_in_size); + } + + if (!virt_addr_valid(data_out)) { + data_out_shadow = kmalloc(data_in_size, GFP_KERNEL); + if (data_out_shadow == NULL) { + rc = -ENOMEM; + goto out; + } + + sg_set_buf(&sg_out[SG_AEAD_TEXT], data_out_shadow, data_in_size); + } else { + sg_set_buf(&sg_out[SG_AEAD_TEXT], data_out, data_in_size); + } + + if (!virt_addr_valid(tag)) { + tag_shadow = kmalloc(tag_size, GFP_KERNEL); + if (tag_shadow == NULL) { + rc = -ENOMEM; + goto out; + } + + sg_set_buf(&sg_in[SG_AEAD_SIG], tag_shadow, tag_size); + sg_set_buf(&sg_out[SG_AEAD_SIG], tag_shadow, tag_size); + + if(!enc) + memcpy(tag_shadow, tag, tag_size); + } else { + sg_set_buf(&sg_in[SG_AEAD_SIG], tag, tag_size); + sg_set_buf(&sg_out[SG_AEAD_SIG], tag, tag_size); + } + + rc = lkca_aead_internal(aead, req, key, key_size, iv, iv_size, + sg_in, sg_out, a_data_size, data_in_size, + data_out_size, tag_size, enc); + + if (enc && (tag_shadow != NULL)) + memcpy((uint8_t *) tag, tag_shadow, tag_size); + + if (data_out_shadow != NULL) + memcpy(data_out, data_out_shadow, data_in_size); + +out: + if (a_data_shadow != NULL) + kfree(a_data_shadow); + if (data_in_shadow != NULL) + kfree(data_in_shadow); + if (data_out != NULL) + kfree(data_out_shadow); + if (tag != NULL) + kfree(tag_shadow); + if (aead != NULL) + crypto_free_aead(aead); + if (req != NULL) + aead_request_free(req); + return rc; +#endif +} + +// Wrapper to make look like libspdm +bool libspdm_aead_gcm_prealloc(void **context) +{ + return libspdm_aead_prealloc(context, "gcm(aes)") == 0; +} + +bool libspdm_aead_aes_gcm_encrypt_prealloc(void *context, + const uint8_t *key, size_t key_size, + const uint8_t *iv, size_t iv_size, + const uint8_t *a_data, size_t a_data_size, + const uint8_t *data_in, size_t data_in_size, + uint8_t *tag_out, size_t tag_size, + uint8_t *data_out, size_t *data_out_size) +{ + int32_t ret; + + if (data_in_size > INT_MAX) { + return false; + } + if (a_data_size > INT_MAX) { + return false; + } + if (iv_size != 12) { + return false; + } + switch (key_size) { + case 16: + case 24: + case 32: + break; + default: + return false; + } + if ((tag_size < 12) || (tag_size > 16)) { + return false; + } + if (data_out_size != NULL) { + if ((*data_out_size > INT_MAX) || + (*data_out_size < data_in_size)) { + return false; + } + } + + ret = libspdm_aead_prealloced(context, key, key_size, iv, iv_size, + a_data, a_data_size, data_in, data_in_size, + tag_out, tag_size, data_out, data_out_size, true); + + *data_out_size = data_in_size; + + return ret == 0; +} + +bool libspdm_aead_aes_gcm_decrypt_prealloc(void *context, + const uint8_t *key, size_t key_size, + const uint8_t *iv, size_t iv_size, + const uint8_t *a_data, size_t a_data_size, + const uint8_t *data_in, size_t data_in_size, + const uint8_t *tag, size_t tag_size, + uint8_t *data_out, size_t *data_out_size) +{ + int ret; + if (data_in_size > INT_MAX) { + return false; + } + if (a_data_size > INT_MAX) { + return false; + } + if (iv_size != 12) { + return false; + } + switch (key_size) { + case 16: + case 24: + case 32: + break; + default: + return false; + } + if ((tag_size < 12) || (tag_size > 16)) { + return false; + } + if (data_out_size != NULL) { + if ((*data_out_size > INT_MAX) || + (*data_out_size < data_in_size)) { + return false; + } + } + + ret = libspdm_aead_prealloced(context, key, key_size, iv, iv_size, + a_data, a_data_size, data_in, data_in_size, + (uint8_t *) tag, tag_size, data_out, data_out_size, false); + + *data_out_size = data_in_size; + + return ret == 0; + +} + diff --git a/kernel-open/nvidia/libspdm_aead_aes_gcm.c b/kernel-open/nvidia/libspdm_aead_aes_gcm.c new file mode 100644 index 0000000..ae0c7a6 --- /dev/null +++ b/kernel-open/nvidia/libspdm_aead_aes_gcm.c @@ -0,0 +1,117 @@ +/* +* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +* SPDX-License-Identifier: MIT +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +* Prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved. +* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md +*/ + +#include "internal_crypt_lib.h" + +bool libspdm_aead_aes_gcm_encrypt(const uint8_t *key, size_t key_size, + const uint8_t *iv, size_t iv_size, + const uint8_t *a_data, size_t a_data_size, + const uint8_t *data_in, size_t data_in_size, + uint8_t *tag_out, size_t tag_size, + uint8_t *data_out, size_t *data_out_size) +{ + int32_t ret; + + if (data_in_size > INT_MAX) { + return false; + } + if (a_data_size > INT_MAX) { + return false; + } + if (iv_size != 12) { + return false; + } + switch (key_size) { + case 16: + case 24: + case 32: + break; + default: + return false; + } + if ((tag_size < 12) || (tag_size > 16)) { + return false; + } + if (data_out_size != NULL) { + if ((*data_out_size > INT_MAX) || + (*data_out_size < data_in_size)) { + return false; + } + } + + ret = libspdm_aead(key, key_size, iv, iv_size, a_data, a_data_size, + data_in, data_in_size, tag_out, tag_size, + data_out, data_out_size, true, "gcm(aes)"); + + *data_out_size = data_in_size; + + return ret == 0; +} + +bool libspdm_aead_aes_gcm_decrypt(const uint8_t *key, size_t key_size, + const uint8_t *iv, size_t iv_size, + const uint8_t *a_data, size_t a_data_size, + const uint8_t *data_in, size_t data_in_size, + const uint8_t *tag, size_t tag_size, + uint8_t *data_out, size_t *data_out_size) +{ + int ret; + if (data_in_size > INT_MAX) { + return false; + } + if (a_data_size > INT_MAX) { + return false; + } + if (iv_size != 12) { + return false; + } + switch (key_size) { + case 16: + case 24: + case 32: + break; + default: + return false; + } + if ((tag_size < 12) || (tag_size > 16)) { + return false; + } + if (data_out_size != NULL) { + if ((*data_out_size > INT_MAX) || + (*data_out_size < data_in_size)) { + return false; + } + } + + ret = libspdm_aead(key, key_size, iv, iv_size, a_data, a_data_size, + data_in, data_in_size, tag, tag_size, + data_out, data_out_size, false, "gcm(aes)"); + + *data_out_size = data_in_size; + + return ret == 0; + +} diff --git a/kernel-open/nvidia/libspdm_ec.c b/kernel-open/nvidia/libspdm_ec.c new file mode 100644 index 0000000..ab8146b --- /dev/null +++ b/kernel-open/nvidia/libspdm_ec.c @@ -0,0 +1,172 @@ +/* +* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +* SPDX-License-Identifier: MIT +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +* Comments, prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved. +* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md +*/ + +#include "internal_crypt_lib.h" + +static bool lkca_ecdsa_sign(void *ec_context, + const uint8_t *message_hash, size_t hash_size, + uint8_t *signature, size_t *sig_size) +{ + return false; +} + +bool libspdm_ec_set_pub_key(void *ec_context, const uint8_t *public_key, + size_t public_key_size) +{ + if (ec_context == NULL || public_key == NULL) { + return false; + } + + return lkca_ec_set_pub_key(ec_context, public_key, public_key_size); +} + +bool libspdm_ec_get_pub_key(void *ec_context, uint8_t *public_key, + size_t *public_key_size) +{ + if (ec_context == NULL || public_key_size == NULL) { + return false; + } + + if (public_key == NULL && *public_key_size != 0) { + return false; + } + + return lkca_ec_get_pub_key(ec_context, public_key, public_key_size); +} + +bool libspdm_ec_check_key(const void *ec_context) +{ + /* TBD*/ + return true; +} + +bool libspdm_ec_generate_key(void *ec_context, uint8_t *public_data, + size_t *public_size) +{ + if (ec_context == NULL || public_size == NULL) { + return false; + } + + if (public_data == NULL && *public_size != 0) { + return false; + } + + return lkca_ec_generate_key(ec_context, public_data, public_size); +} + +bool libspdm_ec_compute_key(void *ec_context, const uint8_t *peer_public, + size_t peer_public_size, uint8_t *key, + size_t *key_size) +{ + if (ec_context == NULL || peer_public == NULL || key_size == NULL || + key == NULL) { + return false; + } + + if (peer_public_size > INT_MAX) { + return false; + } + + return lkca_ec_compute_key(ec_context, peer_public, peer_public_size, key, + key_size); +} + +bool libspdm_ecdsa_sign(void *ec_context, size_t hash_nid, + const uint8_t *message_hash, size_t hash_size, + uint8_t *signature, size_t *sig_size) +{ + if (ec_context == NULL || message_hash == NULL) { + return false; + } + + if (signature == NULL) { + return false; + } + + switch (hash_nid) { + case LIBSPDM_CRYPTO_NID_SHA256: + if (hash_size != LIBSPDM_SHA256_DIGEST_SIZE) { + return false; + } + break; + + case LIBSPDM_CRYPTO_NID_SHA384: + if (hash_size != LIBSPDM_SHA384_DIGEST_SIZE) { + return false; + } + break; + + case LIBSPDM_CRYPTO_NID_SHA512: + if (hash_size != LIBSPDM_SHA512_DIGEST_SIZE) { + return false; + } + break; + + default: + return false; + } + + return lkca_ecdsa_sign(ec_context, message_hash, hash_size, signature, sig_size); +} + +bool libspdm_ecdsa_verify(void *ec_context, size_t hash_nid, + const uint8_t *message_hash, size_t hash_size, + const uint8_t *signature, size_t sig_size) +{ + if (ec_context == NULL || message_hash == NULL || signature == NULL) { + return false; + } + + if (sig_size > INT_MAX || sig_size == 0) { + return false; + } + + switch (hash_nid) { + case LIBSPDM_CRYPTO_NID_SHA256: + if (hash_size != LIBSPDM_SHA256_DIGEST_SIZE) { + return false; + } + break; + + case LIBSPDM_CRYPTO_NID_SHA384: + if (hash_size != LIBSPDM_SHA384_DIGEST_SIZE) { + return false; + } + break; + + case LIBSPDM_CRYPTO_NID_SHA512: + if (hash_size != LIBSPDM_SHA512_DIGEST_SIZE) { + return false; + } + break; + + default: + return false; + } + + return lkca_ecdsa_verify(ec_context, hash_nid, message_hash, hash_size, + signature, sig_size); +} diff --git a/kernel-open/nvidia/libspdm_ecc.c b/kernel-open/nvidia/libspdm_ecc.c new file mode 100644 index 0000000..3c0d5c8 --- /dev/null +++ b/kernel-open/nvidia/libspdm_ecc.c @@ -0,0 +1,410 @@ +/* +* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +* SPDX-License-Identifier: MIT +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +*/ + +#include "internal_crypt_lib.h" + +#ifdef USE_LKCA +#include +MODULE_SOFTDEP("pre: ecdh_generic,ecdsa_generic"); + +#include +#include +#include +#ifndef NV_CRYPTO_AKCIPHER_VERIFY_PRESENT +#include + +struct signature +{ + u64 r[ECC_MAX_DIGITS]; + u64 s[ECC_MAX_DIGITS]; +}; +#endif // NV_CRYPTO_AKCIPHER_VERIFY_PRESENT + +#define ECDSA_PUBKEY_HEADER_XY_PRESENT (0x4) + +struct ecc_ctx { + unsigned int curve_id; + u64 priv_key[ECC_MAX_DIGITS]; // In big endian + + struct { + // ecdsa pubkey has header indicating length of pubkey + u8 padding[7]; + u8 pub_key_prefix; + u64 pub_key[2 * ECC_MAX_DIGITS]; + }; + + bool pub_key_set; + bool priv_key_set; + char const *name; + int size; +}; +#endif // USE_LKCA + +void *libspdm_ec_new_by_nid(size_t nid) +{ +#ifndef USE_LKCA + return NULL; +#else + struct ecc_ctx *ctx; + + if ((nid != LIBSPDM_CRYPTO_NID_SECP256R1) && (nid != LIBSPDM_CRYPTO_NID_SECP384R1)){ + return NULL; + } + + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) { + return NULL; + } + + if (nid == LIBSPDM_CRYPTO_NID_SECP256R1) { + ctx->curve_id = ECC_CURVE_NIST_P256; + ctx->size = 64; + ctx->name = "ecdsa-nist-p256"; + } else { + ctx->curve_id = ECC_CURVE_NIST_P384; + ctx->size = 96; + ctx->name = "ecdsa-nist-p384"; + } + ctx->pub_key_set = false; + ctx->priv_key_set = false; + + return ctx; +#endif // USE_LKCA +} + +void libspdm_ec_free(void *ec_context) +{ +#ifdef USE_LKCA + kfree(ec_context); +#endif +} + +bool lkca_ecdsa_set_priv_key(void *context, uint8_t *key, size_t key_size) +{ +#ifndef USE_LKCA + return false; +#else + struct ecc_ctx *ctx = context; + unsigned int ndigits = ctx->size / 16; + + if (key_size != (ctx->size / 2)) { + return false; + } + + memcpy(ctx->priv_key, key, key_size); + + // XXX: if this fails, do we want to retry generating new key? + if(ecc_make_pub_key(ctx->curve_id, ndigits, ctx->priv_key, ctx->pub_key)) { + return false; + } + + ctx->pub_key_set = true; + ctx->priv_key_set = true; + return true; +#endif // USE_LKCA +} + +bool lkca_ec_set_pub_key(void *ec_context, const uint8_t *public_key, + size_t public_key_size) +{ +#ifndef USE_LKCA + return false; +#else + struct ecc_ctx *ctx = ec_context; + struct ecc_point pub_key; + unsigned int ndigits; + + if (public_key_size != ctx->size) { + return false; + } + + // We can reuse pub_key for now + ndigits = ctx->size / 16; + pub_key = ECC_POINT_INIT(ctx->pub_key, ctx->pub_key + ndigits, ndigits); + + ecc_swap_digits(public_key, ctx->pub_key, ndigits); + ecc_swap_digits(((u64 *)public_key) + ndigits, ctx->pub_key + ndigits, ndigits); + if(ecc_is_pubkey_valid_full(ecc_get_curve(ctx->curve_id), &pub_key)) { + return false; + } + + memcpy(ctx->pub_key, public_key, public_key_size); + ctx->pub_key_set = true; + return true; +#endif // USE_LKCA +} + +bool lkca_ec_get_pub_key(void *ec_context, uint8_t *public_key, + size_t *public_key_size) +{ +#ifndef USE_LKCA + return false; +#else + struct ecc_ctx *ctx = ec_context; + + if (*public_key_size < ctx->size) { + *public_key_size = ctx->size; + return false; + } + *public_key_size = ctx->size; + + memcpy(public_key, ctx->pub_key, ctx->size); + return true; +#endif // USE_LKCA +} + +bool lkca_ec_generate_key(void *ec_context, uint8_t *public_data, + size_t *public_size) +{ +#ifndef USE_LKCA + return false; +#else + struct ecc_ctx *ctx = ec_context; + + unsigned int ndigits = ctx->size / 16; + + if(ecc_gen_privkey(ctx->curve_id, ndigits, ctx->priv_key)) { + return false; + } + // XXX: if this fails, do we want to retry generating new key? + if(ecc_make_pub_key(ctx->curve_id, ndigits, ctx->priv_key, ctx->pub_key)) { + return false; + } + + memcpy(public_data, ctx->pub_key, ctx->size); + *public_size = ctx->size; + ctx->priv_key_set = true; + ctx->pub_key_set = true; + + return true; +#endif // USE_LKCA +} + +bool lkca_ec_compute_key(void *ec_context, const uint8_t *peer_public, + size_t peer_public_size, uint8_t *key, + size_t *key_size) +{ +#ifndef USE_LKCA + return false; +#else + struct ecc_ctx *ctx = ec_context; + + if (peer_public_size != ctx->size) { + return false; + } + + if (!ctx->priv_key_set) { + return false; + } + + if ((ctx->size / 2) > *key_size) { + return false; + } + + if (crypto_ecdh_shared_secret(ctx->curve_id, ctx->size / 16, + (const u64 *) ctx->priv_key, + (const u64 *) peer_public, + (u64 *) key)) { + return false; + } + + *key_size = ctx->size / 2; + return true; +#endif // USE_LKCA +} + +#ifndef NV_CRYPTO_AKCIPHER_VERIFY_PRESENT +static bool lkca_ecdsa_verify_crypto_sig(void *ec_context, size_t hash_nid, + const uint8_t *message_hash, size_t hash_size, + const uint8_t *signature, size_t sig_size) +{ +#ifndef USE_LKCA + return false; +#else // USE_LKCA + struct ecc_ctx *ctx = ec_context; + u8 *pub_key; + int err; + DECLARE_CRYPTO_WAIT(wait); + struct crypto_sig * tfm = NULL; + struct signature sig; + + if (sig_size != ctx->size || !ctx->pub_key_set) + { + return false; + } + + tfm = crypto_alloc_sig(ctx->name, CRYPTO_ALG_TYPE_SIG, 0); + if (IS_ERR(tfm)) { + pr_info("crypto_alloc_sig failed in lkca_ecdsa_verify\n"); + return false; + } + + // modify header of pubkey to indicate size + pub_key = (u8 *) &(ctx->pub_key_prefix); + *pub_key = ECDSA_PUBKEY_HEADER_XY_PRESENT; + err = crypto_sig_set_pubkey(tfm, pub_key, ctx->size + 1); + if (err != 0) + { + pr_info("crypto_sig_set_pubkey failed in lkca_ecdsa_verify: %d", -err); + goto failTfm; + } + + // + // Compared to the way we receive the signature, we need to: + // - swap order of all digits + // - swap endianness for each digit + // + memset(&sig, 0, sizeof(sig)); + ecc_digits_from_bytes(signature, ctx->size/2, sig.r, ECC_MAX_DIGITS); + ecc_digits_from_bytes(signature + ctx->size/2, ctx->size/2, sig.s, ECC_MAX_DIGITS); + + err = crypto_sig_verify(tfm, (void *)&sig, sizeof(sig), message_hash, hash_size); + if (err != 0) + { + pr_info("crypto_sig_verify failed in lkca_ecdsa_verify %d\n", -err); + } + +failTfm: + crypto_free_sig(tfm); + + return err == 0; +#endif // USE_LKCA +} + +#else // NV_CRYPTO_AKCIPHER_VERIFY_PRESENT +static bool lkca_ecdsa_verify_akcipher(void *ec_context, size_t hash_nid, + const uint8_t *message_hash, size_t hash_size, + const uint8_t *signature, size_t sig_size) +{ +#ifndef USE_LKCA + return false; +#else // USE_LKCA + struct ecc_ctx *ctx = ec_context; + u8 *pub_key; + int err; + DECLARE_CRYPTO_WAIT(wait); + + // Roundabout way + u64 ber_max_len = 3 + 2 * (4 + (ECC_MAX_BYTES)); + u64 ber_len = 0; + u8 *ber = NULL; + struct akcipher_request *req = NULL; + struct crypto_akcipher *tfm = NULL; + struct scatterlist sg; + + if (sig_size != ctx->size) { + return false; + } + + if(ctx->pub_key_set == false){ + return false; + } + + tfm = crypto_alloc_akcipher(ctx->name, CRYPTO_ALG_TYPE_AKCIPHER, 0); + if (IS_ERR(tfm)) { + pr_info("crypto_alloc_akcipher failed in lkca_ecdsa_verify\n"); + return false; + } + + // modify header of pubkey to indicate size + pub_key = (u8 *) &(ctx->pub_key_prefix); + *pub_key = ECDSA_PUBKEY_HEADER_XY_PRESENT; + if ((err = crypto_akcipher_set_pub_key(tfm, pub_key, ctx->size + 1)) != 0) { + pr_info("crypto_akcipher_set_pub_key failed in lkca_ecdsa_verify: %d\n", -err); + goto failTfm; + } + + req = akcipher_request_alloc(tfm, GFP_KERNEL); + if (IS_ERR(req)) { + pr_info("akcipher_request_alloc failed in lkca_ecdsa_verify\n"); + goto failTfm; + } + + // We concatenate signature and hash and ship it to kernel + ber = kmalloc(ber_max_len + hash_size, GFP_KERNEL); + if (ber == NULL) { + goto failReq; + } + + // XXX: NOTE THIS WILL WORK ONLY FOR 256 AND 384 bits. For larger keys + // length field will be longer than 1 byte and I haven't taken care of that! + + // Signature + ber[ber_len++] = 0x30; + ber[ber_len++] = 2 * (2 + ctx->size / 2); + ber[ber_len++] = 0x02; + if (signature[0] > 127) { + ber[ber_len++] = ctx->size / 2 + 1; + ber[1]++; + ber[ber_len++] = 0; + } else { + ber[ber_len++] = ctx->size / 2; + } + memcpy(ber + ber_len, signature, sig_size / 2); + ber_len += sig_size / 2; + ber[ber_len++] = 0x02; + if (signature[sig_size / 2] > 127) { + ber[ber_len++] = ctx->size / 2 + 1; + ber[1]++; + ber[ber_len++] = 0; + } else { + ber[ber_len++] = ctx->size / 2; + } + memcpy(ber + ber_len, signature + sig_size / 2, sig_size / 2); + ber_len += sig_size / 2; + + // Just append hash, for scatterlists it can't be on stack anyway + memcpy(ber + ber_len, message_hash, hash_size); + + sg_init_one(&sg, ber, ber_len + hash_size); + akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait); + akcipher_request_set_crypt(req, &sg, NULL, ber_len, hash_size); + err = crypto_wait_req(crypto_akcipher_verify(req), &wait); + if (err != 0){ + pr_info("crypto_akcipher_verify failed in lkca_ecdsa_verify %d\n", -err); + } + + kfree(ber); +failReq: + akcipher_request_free(req); +failTfm: + crypto_free_akcipher(tfm); + + return err == 0; +#endif // USE_LKCA +} +#endif // NV_CRYPTO_AKCIPHER_VERIFY_PRESENT + +bool lkca_ecdsa_verify(void *ec_context, size_t hash_nid, + const uint8_t *message_hash, size_t hash_size, + const uint8_t *signature, size_t sig_size) +{ +#ifndef NV_CRYPTO_AKCIPHER_VERIFY_PRESENT + return lkca_ecdsa_verify_crypto_sig(ec_context, hash_nid, message_hash, hash_size, + signature, sig_size); +#else // NV_CRYPTO_AKCIPHER_VERIFY_PRESENT + return lkca_ecdsa_verify_akcipher(ec_context, hash_nid, message_hash, hash_size, + signature, sig_size); +#endif // NV_CRYPTO_AKCIPHER_VERIFY_PRESENT +} diff --git a/kernel-open/nvidia/libspdm_hkdf.c b/kernel-open/nvidia/libspdm_hkdf.c new file mode 100644 index 0000000..92235de --- /dev/null +++ b/kernel-open/nvidia/libspdm_hkdf.c @@ -0,0 +1,158 @@ +/* +* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +* SPDX-License-Identifier: MIT +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +*/ + +#include "internal_crypt_lib.h" + +// RFC 5869 has some very non-intuitive points, reading it is advised +static bool lkca_hkdf_expand_only(struct crypto_shash *alg, + const uint8_t *prk, size_t prk_size, + const uint8_t *info, size_t info_size, + uint8_t *out, size_t out_size) +{ +#ifndef USE_LKCA + return false; +#else + int ret; + int i; + uint8_t ctr = 1; + uint8_t tmp[HASH_MAX_DIGESTSIZE]; + SHASH_DESC_ON_STACK(desc, alg); + desc->tfm = alg; + + ret = crypto_shash_setkey(desc->tfm, prk, prk_size); + if (ret != 0) { + pr_info("key size mismatch %ld\n", prk_size); + return false; + } + + for (i = 0, ctr = 1; i < out_size; i += prk_size, ctr++) { + ret = crypto_shash_init(desc); + if (ret) { + return false; + } + + if (i != 0) { + ret = crypto_shash_update(desc, out + i - prk_size, prk_size); + if (ret) { + return false; + } + } + + if (info_size > 0) { + ret = crypto_shash_update(desc, info, info_size); + if (ret) { + return false; + } + } + + ret = crypto_shash_update(desc, &ctr, 1); + if (ret) + return false; + + if ((out_size - i) < prk_size) { + ret = crypto_shash_final(desc, tmp); + if (ret) { + return false; + } + memcpy(out + i, tmp, out_size - i); + memzero_explicit(tmp, sizeof(tmp)); + } else { + ret = crypto_shash_final(desc, out + i); + if (ret) { + return false; + } + } + } + + return true; +#endif +} + +bool lkca_hkdf_extract_and_expand(const char *alg_name, + const uint8_t *key, size_t key_size, + const uint8_t *salt, size_t salt_size, + const uint8_t *info, size_t info_size, + uint8_t *out, size_t out_size) +{ +#ifndef USE_LKCA + return false; +#else + int ret = 0; + struct crypto_shash *alg; + uint8_t prk[HASH_MAX_DIGESTSIZE]; + + if (key == NULL || salt == NULL || info == NULL || out == NULL || + key_size > sizeof(prk) || salt_size > INT_MAX || info_size > INT_MAX || + out_size > (sizeof(prk) * 255)) { + return false; + } + + alg = crypto_alloc_shash(alg_name, 0, 0); + if (IS_ERR(alg)) { + return false; + } + + ret = crypto_shash_setkey(alg, salt, salt_size); + if (ret != 0) { + goto out; + } + ret = crypto_shash_tfm_digest(alg, key, key_size, prk); + if (ret != 0) { + goto out; + } + + ret = !lkca_hkdf_expand_only(alg, prk, crypto_shash_digestsize(alg), info, info_size, out, out_size); + +out: + crypto_free_shash(alg); + return ret == 0; +#endif +} + +bool lkca_hkdf_expand(const char *alg_name, + const uint8_t *prk, size_t prk_size, + const uint8_t *info, size_t info_size, + uint8_t *out, size_t out_size) +{ +#ifndef USE_LKCA + return false; +#else + bool ret = false; + struct crypto_shash *alg; + + if (prk == NULL || info == NULL || out == NULL || prk_size > (512 / 8) || + info_size > INT_MAX || (out_size > (prk_size * 255))) { + return false; + } + + alg = crypto_alloc_shash(alg_name, 0, 0); + if (IS_ERR(alg)) { + return false; + } + + ret = lkca_hkdf_expand_only(alg, prk, prk_size, info, info_size, out, out_size); + + crypto_free_shash(alg); + return ret; +#endif +} diff --git a/kernel-open/nvidia/libspdm_hkdf_sha.c b/kernel-open/nvidia/libspdm_hkdf_sha.c new file mode 100644 index 0000000..4c260c9 --- /dev/null +++ b/kernel-open/nvidia/libspdm_hkdf_sha.c @@ -0,0 +1,111 @@ +/* +* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +* SPDX-License-Identifier: MIT +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +* Prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved. +* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md +*/ + +#include "internal_crypt_lib.h" + +bool libspdm_hkdf_sha256_extract_and_expand(const uint8_t *key, size_t key_size, + const uint8_t *salt, size_t salt_size, + const uint8_t *info, size_t info_size, + uint8_t *out, size_t out_size) +{ + return lkca_hkdf_extract_and_expand("hmac(sha256)", key, key_size, + salt, salt_size, info, info_size, + out, out_size); +} + +bool libspdm_hkdf_sha256_extract(const uint8_t *key, size_t key_size, + const uint8_t *salt, size_t salt_size, + uint8_t *prk_out, size_t prk_out_size) +{ + if (prk_out_size != (256 / 8)) + return false; + + return libspdm_hmac_sha256_all(key, key_size, salt, salt_size, prk_out); +} + +bool libspdm_hkdf_sha256_expand(const uint8_t *prk, size_t prk_size, + const uint8_t *info, size_t info_size, + uint8_t *out, size_t out_size) +{ + return lkca_hkdf_expand("hmac(sha256)", prk, prk_size, info, info_size, + out, out_size); +} + +bool libspdm_hkdf_sha384_extract_and_expand(const uint8_t *key, size_t key_size, + const uint8_t *salt, size_t salt_size, + const uint8_t *info, size_t info_size, + uint8_t *out, size_t out_size) +{ + return lkca_hkdf_extract_and_expand("hmac(sha384)", key, key_size, + salt, salt_size, info, info_size, + out, out_size); +} + +bool libspdm_hkdf_sha384_extract(const uint8_t *key, size_t key_size, + const uint8_t *salt, size_t salt_size, + uint8_t *prk_out, size_t prk_out_size) +{ + if (prk_out_size != (384 / 8)) + return false; + + return libspdm_hmac_sha384_all(key, key_size, salt, salt_size, prk_out); +} + +bool libspdm_hkdf_sha384_expand(const uint8_t *prk, size_t prk_size, + const uint8_t *info, size_t info_size, + uint8_t *out, size_t out_size) +{ + return lkca_hkdf_expand("hmac(sha384)", prk, prk_size, info, info_size, + out, out_size); +} + +bool libspdm_hkdf_sha512_extract_and_expand(const uint8_t *key, size_t key_size, + const uint8_t *salt, size_t salt_size, + const uint8_t *info, size_t info_size, + uint8_t *out, size_t out_size) +{ + return lkca_hkdf_extract_and_expand("hmac(sha512)", key, key_size, + salt, salt_size, info, info_size, out, + out_size); +} + +bool libspdm_hkdf_sha512_extract(const uint8_t *key, size_t key_size, + const uint8_t *salt, size_t salt_size, + uint8_t *prk_out, size_t prk_out_size) +{ + if (prk_out_size != (512 / 8)) + return false; + + return libspdm_hmac_sha512_all(key, key_size, salt, salt_size, prk_out); +} + +bool libspdm_hkdf_sha512_expand(const uint8_t *prk, size_t prk_size, + const uint8_t *info, size_t info_size, + uint8_t *out, size_t out_size) +{ + return lkca_hkdf_expand("hmac(sha512)", prk, prk_size, info, info_size, + out, out_size); +} diff --git a/kernel-open/nvidia/libspdm_hmac_sha.c b/kernel-open/nvidia/libspdm_hmac_sha.c new file mode 100644 index 0000000..37c9b29 --- /dev/null +++ b/kernel-open/nvidia/libspdm_hmac_sha.c @@ -0,0 +1,282 @@ +/* +* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +* SPDX-License-Identifier: MIT +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +* Prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved. +* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md +*/ + +#include "internal_crypt_lib.h" + +void *libspdm_hmac_sha256_new(void) +{ + return lkca_hash_new("hmac(sha256)"); +} + +void libspdm_hmac_sha256_free(void *hmac_sha256_ctx) +{ + lkca_hash_free(hmac_sha256_ctx); +} + +bool libspdm_hmac_sha256_set_key(void *hmac_sha256_ctx, const uint8_t *key, + size_t key_size) +{ + if (hmac_sha256_ctx == NULL) + return false; + + return lkca_hmac_set_key(hmac_sha256_ctx, key, key_size); +} + +bool libspdm_hmac_sha256_duplicate(const void *hmac_sha256_ctx, + void *new_hmac_sha256_ctx) +{ + if (hmac_sha256_ctx == NULL || new_hmac_sha256_ctx == NULL) { + return false; + } + + return lkca_hmac_duplicate(new_hmac_sha256_ctx, hmac_sha256_ctx); +} + +bool libspdm_hmac_sha256_update(void *hmac_sha256_ctx, const void *data, + size_t data_size) +{ + int32_t ret; + + if (hmac_sha256_ctx == NULL) { + return false; + } + + if (data == NULL && data_size != 0) { + return false; + } + if (data_size > INT_MAX) { + return false; + } + + ret = crypto_shash_update(hmac_sha256_ctx, data, data_size); + if (ret != 0) { + return false; + } + return true; +} + +bool libspdm_hmac_sha256_final(void *hmac_sha256_ctx, uint8_t *hmac_value) +{ + int32_t ret; + + if (hmac_sha256_ctx == NULL || hmac_value == NULL) { + return false; + } + + ret = crypto_shash_final(hmac_sha256_ctx, hmac_value); + + if (ret != 0) { + return false; + } + return true; +} + +bool libspdm_hmac_sha256_all(const void *data, size_t data_size, + const uint8_t *key, size_t key_size, + uint8_t *hmac_value) +{ + if (hmac_value == NULL) { + return false; + } + if (data == NULL && data_size != 0) { + return false; + } + if (data_size > INT_MAX) { + return false; + } + + return lkca_hmac_all("hmac(sha256)", key, key_size, data, data_size, hmac_value); +} + +void *libspdm_hmac_sha384_new(void) +{ + return lkca_hash_new("hmac(sha384)"); +} + +void libspdm_hmac_sha384_free(void *hmac_sha384_ctx) +{ + lkca_hash_free(hmac_sha384_ctx); +} + +bool libspdm_hmac_sha384_set_key(void *hmac_sha384_ctx, const uint8_t *key, + size_t key_size) +{ + if (hmac_sha384_ctx == NULL) + return false; + + return lkca_hmac_set_key(hmac_sha384_ctx, key, key_size); +} + +bool libspdm_hmac_sha384_duplicate(const void *hmac_sha384_ctx, + void *new_hmac_sha384_ctx) +{ + if (hmac_sha384_ctx == NULL || new_hmac_sha384_ctx == NULL) { + return false; + } + + return lkca_hmac_duplicate(new_hmac_sha384_ctx, hmac_sha384_ctx); +} + +bool libspdm_hmac_sha384_update(void *hmac_sha384_ctx, const void *data, + size_t data_size) +{ + int32_t ret; + + if (hmac_sha384_ctx == NULL) { + return false; + } + + if (data == NULL && data_size != 0) { + return false; + } + if (data_size > INT_MAX) { + return false; + } + + ret = crypto_shash_update(hmac_sha384_ctx, data, data_size); + if (ret != 0) { + return false; + } + return true; +} + +bool libspdm_hmac_sha384_final(void *hmac_sha384_ctx, uint8_t *hmac_value) +{ + int32_t ret; + + if (hmac_sha384_ctx == NULL || hmac_value == NULL) { + return false; + } + + ret = crypto_shash_final(hmac_sha384_ctx, hmac_value); + + if (ret != 0) { + return false; + } + return true; +} + +bool libspdm_hmac_sha384_all(const void *data, size_t data_size, + const uint8_t *key, size_t key_size, + uint8_t *hmac_value) +{ + if (hmac_value == NULL) { + return false; + } + if (data == NULL && data_size != 0) { + return false; + } + if (data_size > INT_MAX) { + return false; + } + + return lkca_hmac_all("hmac(sha384)", key, key_size, data, data_size, hmac_value); +} + +void *libspdm_hmac_sha512_new(void) +{ + return lkca_hash_new("hmac(sha512)"); +} + +void libspdm_hmac_sha512_free(void *hmac_sha512_ctx) +{ + lkca_hash_free(hmac_sha512_ctx); +} + +bool libspdm_hmac_sha512_set_key(void *hmac_sha512_ctx, const uint8_t *key, + size_t key_size) +{ + if (hmac_sha512_ctx == NULL) + return false; + + return lkca_hmac_set_key(hmac_sha512_ctx, key, key_size); +} + +bool libspdm_hmac_sha512_duplicate(const void *hmac_sha512_ctx, + void *new_hmac_sha512_ctx) +{ + if (new_hmac_sha512_ctx == NULL || new_hmac_sha512_ctx == NULL) { + return false; + } + + return lkca_hmac_duplicate(new_hmac_sha512_ctx, hmac_sha512_ctx); +} + +bool libspdm_hmac_sha512_update(void *hmac_sha512_ctx, const void *data, + size_t data_size) +{ + int32_t ret; + + if (hmac_sha512_ctx == NULL) { + return false; + } + + if (data == NULL && data_size != 0) { + return false; + } + if (data_size > INT_MAX) { + return false; + } + + ret = crypto_shash_update(hmac_sha512_ctx, data, data_size); + if (ret != 0) { + return false; + } + return true; +} + +bool libspdm_hmac_sha512_final(void *hmac_sha512_ctx, uint8_t *hmac_value) +{ + int32_t ret; + + if (hmac_sha512_ctx == NULL || hmac_value == NULL) { + return false; + } + + ret = crypto_shash_final(hmac_sha512_ctx, hmac_value); + + if (ret != 0) { + return false; + } + return true; +} + +bool libspdm_hmac_sha512_all(const void *data, size_t data_size, + const uint8_t *key, size_t key_size, + uint8_t *hmac_value) +{ + if (hmac_value == NULL) { + return false; + } + if (data == NULL && data_size != 0) { + return false; + } + if (data_size > INT_MAX) { + return false; + } + + return lkca_hmac_all("hmac(sha512)", key, key_size, data, data_size, hmac_value); +} diff --git a/kernel-open/nvidia/libspdm_internal_crypt_lib.c b/kernel-open/nvidia/libspdm_internal_crypt_lib.c new file mode 100644 index 0000000..2b8792e --- /dev/null +++ b/kernel-open/nvidia/libspdm_internal_crypt_lib.c @@ -0,0 +1,42 @@ +/* +* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +* SPDX-License-Identifier: MIT +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +* Comments, prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved. +* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md +*/ + +#include "os-interface.h" +#include "internal_crypt_lib.h" +#include "library/cryptlib.h" + +bool libspdm_check_crypto_backend(void) +{ +#ifdef USE_LKCA + nv_printf(NV_DBG_INFO, "libspdm_check_crypto_backend: LKCA wrappers found.\n"); + nv_printf(NV_DBG_INFO, "libspdm_check_crypto_backend: LKCA calls may still fail if modules have not been loaded!\n"); + return true; +#else + nv_printf(NV_DBG_ERRORS, "libspdm_check_crypto_backend: Error - libspdm expects LKCA but found stubs!\n"); + return false; +#endif +} + diff --git a/kernel-open/nvidia/libspdm_rand.c b/kernel-open/nvidia/libspdm_rand.c new file mode 100644 index 0000000..d0b2a3e --- /dev/null +++ b/kernel-open/nvidia/libspdm_rand.c @@ -0,0 +1,37 @@ +/* +* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +* SPDX-License-Identifier: MIT +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +*/ + +#include "internal_crypt_lib.h" + +// This is non-gpl symbol and not part of LKCA so no need to stub it out +bool libspdm_random_bytes(uint8_t *output, size_t size) +{ + get_random_bytes(output, size); + return true; +} + +// This is specifically allowed by spdm +bool libspdm_random_seed(const uint8_t *seed, size_t seed_size) +{ + return true; +} diff --git a/kernel-open/nvidia/libspdm_rsa.c b/kernel-open/nvidia/libspdm_rsa.c new file mode 100644 index 0000000..8bedea3 --- /dev/null +++ b/kernel-open/nvidia/libspdm_rsa.c @@ -0,0 +1,613 @@ +/* +* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +* SPDX-License-Identifier: MIT +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +*/ + +#include "internal_crypt_lib.h" +#include "library/cryptlib.h" + +#ifdef USE_LKCA +#include +#include +#include + +#include +#include + +/* ------------------------ Macros & Defines ------------------------------- */ +#define GET_MOST_SIGNIFICANT_BIT(keySize) (keySize > 0 ? ((keySize - 1) & 7) : 0) +#define GET_ENC_MESSAGE_SIZE_BYTE(keySize) (keySize + 7) >> 3; +#define PKCS1_MGF1_COUNTER_SIZE_BYTE (4) +#define RSA_PSS_PADDING_ZEROS_SIZE_BYTE (8) +#define RSA_PSS_TRAILER_FIELD (0xbc) +#define SHIFT_RIGHT_AND_GET_BYTE(val, x) ((val >> x) & 0xFF) +#ifndef BITS_TO_BYTES +#define BITS_TO_BYTES(b) (b >> 3) +#endif + +static const unsigned char zeroes[RSA_PSS_PADDING_ZEROS_SIZE_BYTE] = { 0 }; + +struct rsa_ctx +{ + struct rsa_key key; + bool pub_key_set; + bool priv_key_set; + int size; +}; +#endif // #ifdef USE_LKCA + +/*! + * Creating and initializing a RSA context. + * + * @return : A void pointer points to a RSA context + * +*/ +void *libspdm_rsa_new +( + void +) +{ +#ifndef USE_LKCA + return NULL; +#else + struct rsa_ctx *ctx; + + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + + if (ctx == NULL) + { + return NULL; + } + + memset(ctx, 0, sizeof(*ctx)); + + ctx->pub_key_set = false; + ctx->priv_key_set = false; + + return ctx; +#endif +} + +/*! + * To free a RSA context. + * + * @param rsa_context : A RSA context pointer + * +*/ +void libspdm_rsa_free +( + void *rsa_context +) +{ +#ifdef USE_LKCA + struct rsa_ctx *ctx = rsa_context; + + if (ctx != NULL) + { + if (ctx->key.n) kfree(ctx->key.n); + if (ctx->key.e) kfree(ctx->key.e); + if (ctx->key.d) kfree(ctx->key.d); + if (ctx->key.q) kfree(ctx->key.q); + if (ctx->key.p) kfree(ctx->key.p); + if (ctx->key.dq) kfree(ctx->key.dq); + if (ctx->key.dp) kfree(ctx->key.dp); + if (ctx->key.qinv) kfree(ctx->key.qinv); + kfree(ctx); + } +#endif +} + +#define rsa_set_key_case(a, a_sz, A) \ + case A: \ + { \ + if (ctx->key.a) { \ + kfree(ctx->key.a); \ + } \ + ctx->key.a = shadow_num; \ + ctx->key.a_sz = bn_size; \ + break; \ + } +/*! + * To set key into RSA context. + * + * @param rsa_context : A RSA context pointer + * @param key_tag : Indicate key tag for RSA key + * @param big_number : A big nuMber buffer to store rsa KEY + * @param bn_size : The size of bug number + * + * @Return : True if OK; otherwise return False +*/ +bool libspdm_rsa_set_key +( + void *rsa_context, + const libspdm_rsa_key_tag_t key_tag, + const uint8_t *big_number, + size_t bn_size +) +{ +#ifndef USE_LKCA + return false; +#else + struct rsa_ctx *ctx = rsa_context; + uint8_t *shadow_num; + + if (ctx == NULL) + { + return false; + } + + // Quick sanity check if tag is valid + switch (key_tag) + { + case LIBSPDM_RSA_KEY_N: + case LIBSPDM_RSA_KEY_E: + case LIBSPDM_RSA_KEY_D: + case LIBSPDM_RSA_KEY_Q: + case LIBSPDM_RSA_KEY_P: + case LIBSPDM_RSA_KEY_DP: + case LIBSPDM_RSA_KEY_DQ: + case LIBSPDM_RSA_KEY_Q_INV: + break; + default: + return false; + break; + } + + if (big_number != NULL) + { + shadow_num = kmalloc(bn_size, GFP_KERNEL); + if (shadow_num == NULL) + { + return false; + } + memcpy(shadow_num, big_number, bn_size); + } + else + { + shadow_num = NULL; + bn_size = 0; + } + + switch (key_tag) + { + rsa_set_key_case(n, n_sz, LIBSPDM_RSA_KEY_N) + rsa_set_key_case(e, e_sz, LIBSPDM_RSA_KEY_E) + rsa_set_key_case(d, d_sz, LIBSPDM_RSA_KEY_D) + rsa_set_key_case(q, q_sz, LIBSPDM_RSA_KEY_Q) + rsa_set_key_case(p, p_sz, LIBSPDM_RSA_KEY_P) + rsa_set_key_case(dq, dq_sz, LIBSPDM_RSA_KEY_DQ) + rsa_set_key_case(dp, dp_sz, LIBSPDM_RSA_KEY_DP) + rsa_set_key_case(qinv, qinv_sz, LIBSPDM_RSA_KEY_Q_INV) + default: + // We can't get here ever + break; + } + + return true; +#endif +} + +/*! + * Perform PKCS1 MGF1 operation. + * + * @param mask : A mask pointer to store return data + * @param maskedDB_length : Indicate mask data block length + * @param seed : A seed pointer to store random values + * @param seed_length : The seed length + * @param hash_nid : The hash NID + * + * @Return : True if OK; otherwise return False + */ +static bool NV_PKCS1_MGF1 +( + uint8_t *mask, + size_t maskedDB_length, + const uint8_t *seed, + size_t seed_length, + size_t hash_nid +) +{ +#ifndef USE_LKCA + return false; +#else + size_t mdLength; + size_t counter; + size_t outLength; + uint8_t counterBuf[4]; + void *sha384_ctx = NULL; + uint8_t hash_value[LIBSPDM_SHA384_DIGEST_SIZE]; + bool status = false; + + if (mask == NULL || seed == NULL) + { + return false; + } + + // Only support SHA384 for MGF1 now. + if (hash_nid == LIBSPDM_CRYPTO_NID_SHA384) + { + mdLength = LIBSPDM_SHA384_DIGEST_SIZE; + } + else + { + return false; + } + + sha384_ctx = libspdm_sha384_new(); + + if (sha384_ctx == NULL) + { + pr_err("%s : libspdm_sha384_new() failed \n", __FUNCTION__); + return false; + } + + for (counter = 0, outLength = 0; outLength < maskedDB_length; counter++) + { + counterBuf[0] = (uint8_t)SHIFT_RIGHT_AND_GET_BYTE(counter, 24); + counterBuf[1] = (uint8_t)SHIFT_RIGHT_AND_GET_BYTE(counter, 16); + counterBuf[2] = (uint8_t)SHIFT_RIGHT_AND_GET_BYTE(counter, 8); + counterBuf[3] = (uint8_t)SHIFT_RIGHT_AND_GET_BYTE(counter, 0); + + status = libspdm_sha384_init(sha384_ctx); + + if (!status) + { + pr_err("%s: libspdm_sha384_init() failed !! \n", __FUNCTION__); + goto _error_exit; + } + + status = libspdm_sha384_update(sha384_ctx, seed, seed_length); + + if (!status) + { + pr_err("%s: libspdm_sha384_update() failed(seed) !! \n", __FUNCTION__); + goto _error_exit; + } + + status = libspdm_sha384_update(sha384_ctx, counterBuf, 4); + + if (!status) + { + pr_err("%s: libspdm_sha384_update() failed(counterBuf) !! \n", __FUNCTION__); + goto _error_exit; + } + + if (outLength + mdLength <= maskedDB_length) + { + status = libspdm_sha384_final(sha384_ctx, mask + outLength); + + if (!status) + { + pr_err("%s: libspdm_sha384_final() failed (<= maskedDB_length) !! \n", __FUNCTION__); + goto _error_exit; + } + outLength += mdLength; + } + else + { + status = libspdm_sha384_final(sha384_ctx, hash_value); + + if (!status) + { + pr_err("%s: libspdm_sha384_final() failed(> maskedDB_length) !! \n", __FUNCTION__); + goto _error_exit; + } + + memcpy(mask + outLength, hash_value, maskedDB_length - outLength); + outLength = maskedDB_length; + } + } + status = true; + +_error_exit: + libspdm_sha384_free(sha384_ctx); + return status; +#endif +} + +/* + 0xbc : Trailer Field + +-----------+ + | M | + +-----------+ + | + V + Hash + | + V + +--------+----------+----------+ + M' = |Padding1| mHash | salt | + +--------+----------+----------+ + |--------------|---------------| + | + +--------+----------+ V + DB = |Padding2| salt | Hash + +--------+----------+ | + | | + V | + xor <--- MGF <---| + | | + | | + V V + +-------------------+----------+----+ + EM = | maskedDB | H |0xbc| + +-------------------+----------+----+ + +salt : The random number, we hardcode its size as hash size here. +M' : The concatenation of padding1 + message hash + salt +MGF : Mask generation function. + A mask generation function takes an octet string of variable length + and a desired output length as input, and outputs an octet string of + the desired length + MGF1 is a Mask Generation Function based on a hash function. + +Padding1 : 8 zeros +Padding2 : 0x01 + +The detail spec is at https://datatracker.ietf.org/doc/html/rfc2437 +*/ + +/*! + * Set keys and call PKCS1_MGF1 to generate signature. + * + * @param rsa_context : A RSA context pointer + * @param hash_nid : The hash NID + * @param message_hash : The pointer to message hash + * @param signature : The pointer is used to store generated signature + * @param sig_size : For input, a pointer store signature buffer size. + * For output, a pointer store generate signature size. + * @param salt_Length : The salt length for RSA-PSS algorithm + * + * @Return : True if OK; otherwise return False + */ +static bool nvRsaPaddingAddPkcs1PssMgf1 +( + void *rsa_context, + size_t hash_nid, + const uint8_t *message_hash, + size_t hash_size, + uint8_t *signature, + size_t *sig_size, + int salt_length +) +{ +#ifndef USE_LKCA + return false; +#else + bool status = false; + struct rsa_ctx *ctx = rsa_context; + void *sha384_ctx = NULL; + uint32_t keySize; + uint32_t msBits; + size_t emLength; + uint8_t saltBuf[64]; + size_t maskedDB_length; + size_t i; + uint8_t *tmp_H; + uint8_t *tmp_P; + int rc; + unsigned int ret_data_size; + MPI mpi_n = NULL; + MPI mpi_d = NULL; + MPI mpi_c = mpi_alloc(0); + MPI mpi_p = mpi_alloc(0); + + // read modulus to BN struct + mpi_n = mpi_read_raw_data(ctx->key.n, ctx->key.n_sz); + if (mpi_n == NULL) + { + pr_err("%s : mpi_n create failed !! \n", __FUNCTION__); + goto _error_exit; + } + + // read private exponent to BN struct + mpi_d = mpi_read_raw_data(ctx->key.d, ctx->key.d_sz); + if (mpi_d == NULL) + { + pr_err("%s : mpi_d create failed !! \n", __FUNCTION__); + goto _error_exit; + } + + keySize = mpi_n->nbits; + msBits = GET_MOST_SIGNIFICANT_BIT(keySize); + emLength = BITS_TO_BYTES(keySize); + + if (msBits == 0) + { + *signature++ = 0; + emLength--; + } + + if (emLength < hash_size + 2) + { + pr_err("%s : emLength < hash_size + 2 !! \n", __FUNCTION__); + goto _error_exit; + } + + // Now, we only support salt_length == LIBSPDM_SHA384_DIGEST_SIZE + if (salt_length != LIBSPDM_SHA384_DIGEST_SIZE || + hash_nid != LIBSPDM_CRYPTO_NID_SHA384) + { + pr_err("%s : Invalid salt_length (%x) \n", __FUNCTION__, salt_length); + goto _error_exit; + } + + get_random_bytes(saltBuf, salt_length); + + maskedDB_length = emLength - hash_size - 1; + tmp_H = signature + maskedDB_length; + sha384_ctx = libspdm_sha384_new(); + + if (sha384_ctx == NULL) + { + pr_err("%s : libspdm_sha384_new() failed !! \n", __FUNCTION__); + goto _error_exit; + } + + status = libspdm_sha384_init(sha384_ctx); + if (!status) + { + pr_err("%s : libspdm_sha384_init() failed !! \n", __FUNCTION__); + goto _error_exit; + } + + status = libspdm_sha384_update(sha384_ctx, zeroes, sizeof(zeroes)); + + if (!status) + { + pr_err("%s : libspdm_sha384_update() with zeros failed !!\n", __FUNCTION__); + goto _error_exit; + } + + status = libspdm_sha384_update(sha384_ctx, message_hash, hash_size); + + if (!status) + { + pr_err("%s: libspdm_sha384_update() with message_hash failed !!\n", __FUNCTION__); + goto _error_exit; + } + + if (salt_length) + { + status = libspdm_sha384_update(sha384_ctx, saltBuf, salt_length); + if (!status) + { + pr_err("%s : libspdm_sha384_update() with saltBuf failed !!\n", __FUNCTION__); + goto _error_exit; + } + } + + status = libspdm_sha384_final(sha384_ctx, tmp_H); + if (!status) + { + pr_err("%s : libspdm_sha384_final() with tmp_H failed !!\n", __FUNCTION__); + goto _error_exit; + } + + /* Generate dbMask in place then perform XOR on it */ + status = NV_PKCS1_MGF1(signature, maskedDB_length, tmp_H, hash_size, hash_nid); + + if (!status) + { + pr_err("%s : NV_PKCS1_MGF1() failed \n", __FUNCTION__); + goto _error_exit; + } + + tmp_P = signature; + tmp_P += emLength - salt_length - hash_size - 2; + *tmp_P++ ^= 0x1; + + if (salt_length > 0) + { + for (i = 0; i < salt_length; i++) + { + *tmp_P++ ^= saltBuf[i]; + } + } + + if (msBits) + { + signature[0] &= 0xFF >> (8 - msBits); + } + + /* H is already in place so just set final 0xbc */ + signature[emLength - 1] = RSA_PSS_TRAILER_FIELD; + + // read signature to BN struct + mpi_p = mpi_read_raw_data(signature, emLength); + if (mpi_p == NULL) + { + pr_err("%s : mpi_p() create failed !!\n", __FUNCTION__); + goto _error_exit; + } + + // Staring RSA encryption with private key over signature. + rc = mpi_powm(mpi_c, mpi_p, mpi_d, mpi_n); + if (rc != 0) + { + pr_err("%s : mpi_powm() failed \n", __FUNCTION__); + goto _error_exit; + } + + rc = mpi_read_buffer(mpi_c, signature, *sig_size, &ret_data_size, NULL); + if (rc != 0) + { + pr_err("%s : mpi_read_buffer() failed \n", __FUNCTION__); + goto _error_exit; + } + + if (ret_data_size > *sig_size) + { + goto _error_exit; + } + + *sig_size = ret_data_size; + status = true; + +_error_exit: + + mpi_free(mpi_n); + mpi_free(mpi_d); + mpi_free(mpi_c); + mpi_free(mpi_p); + + libspdm_sha384_free(sha384_ctx); + + return status; + +#endif +} + +/*! + * Perform RSA-PSS sigaature sign process with LKCA library. + * + * @param rsa_context : A RSA context pointer + * @param hash_nid : The hash NID + * @param message_hash : The pointer to message hash + * @param signature : The pointer is used to store generated signature + * @param sig_size : For input, a pointer store signature buffer size. + * For output, a pointer store generate signature size. + * + * @Return : True if OK; otherwise return False + */ +bool lkca_rsa_pss_sign +( + void *rsa_context, + size_t hash_nid, + const uint8_t *message_hash, + size_t hash_size, + uint8_t *signature, + size_t *sig_size +) +{ +#ifndef USE_LKCA + return true; +#else + return nvRsaPaddingAddPkcs1PssMgf1(rsa_context, + hash_nid, + message_hash, + hash_size, + signature, + sig_size, + LIBSPDM_SHA384_DIGEST_SIZE); +#endif +} + diff --git a/kernel-open/nvidia/libspdm_rsa_ext.c b/kernel-open/nvidia/libspdm_rsa_ext.c new file mode 100644 index 0000000..63d960f --- /dev/null +++ b/kernel-open/nvidia/libspdm_rsa_ext.c @@ -0,0 +1,85 @@ +/* +* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +* SPDX-License-Identifier: MIT +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +* Comments, prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved. +* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md +*/ + +/** @file + * RSA Asymmetric Cipher Wrapper Implementation. + * + * This file implements following APIs which provide more capabilities for RSA: + * 1) rsa_pss_sign + * + * RFC 8017 - PKCS #1: RSA Cryptography Specifications version 2.2 + **/ + +#include "internal_crypt_lib.h" +#include "library/cryptlib.h" + +/** + * Carries out the RSA-PSS signature generation with EMSA-PSS encoding scheme. + * + * This function carries out the RSA-PSS signature generation with EMSA-PSS encoding scheme defined in + * RSA PKCS#1 v2.2. + * + * The salt length is same as digest length. + * + * If the signature buffer is too small to hold the contents of signature, false + * is returned and sig_size is set to the required buffer size to obtain the signature. + * + * If rsa_context is NULL, then return false. + * If message_hash is NULL, then return false. + * If hash_size need match the hash_nid. nid could be SHA256, SHA384, SHA512, SHA3_256, SHA3_384, SHA3_512. + * If sig_size is large enough but signature is NULL, then return false. + * + * @param[in] rsa_context Pointer to RSA context for signature generation. + * @param[in] hash_nid hash NID + * @param[in] message_hash Pointer to octet message hash to be signed. + * @param[in] hash_size size of the message hash in bytes. + * @param[out] signature Pointer to buffer to receive RSA-SSA PSS signature. + * @param[in, out] sig_size On input, the size of signature buffer in bytes. + * On output, the size of data returned in signature buffer in bytes. + * + * @retval true signature successfully generated in RSA-SSA PSS. + * @retval false signature generation failed. + * @retval false sig_size is too small. + * + **/ +bool libspdm_rsa_pss_sign(void *rsa_context, size_t hash_nid, + const uint8_t *message_hash, size_t hash_size, + uint8_t *signature, size_t *sig_size) +{ + return lkca_rsa_pss_sign(rsa_context, hash_nid, message_hash, hash_size, + signature, sig_size); +} +// +// In RM, we just need sign process; so we stub verification function. +// Verification function is needed in GSP code only, +// +bool libspdm_rsa_pss_verify(void *rsa_context, size_t hash_nid, + const uint8_t *message_hash, size_t hash_size, + const uint8_t *signature, size_t sig_size) +{ + return false; +} + diff --git a/kernel-open/nvidia/libspdm_sha.c b/kernel-open/nvidia/libspdm_sha.c new file mode 100644 index 0000000..af3dd54 --- /dev/null +++ b/kernel-open/nvidia/libspdm_sha.c @@ -0,0 +1,264 @@ +/* +* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +* SPDX-License-Identifier: MIT +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +* Comments, prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved. +* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md +*/ + +#include "internal_crypt_lib.h" + +void *libspdm_sha256_new(void) +{ + return lkca_hash_new("sha256"); +} + +void libspdm_sha256_free(void *sha256_ctx) +{ + lkca_hash_free(sha256_ctx); +} + +bool libspdm_sha256_init(void *sha256_context) +{ + return crypto_shash_init(sha256_context) == 0; +} + +bool libspdm_sha256_duplicate(const void *sha256_context, + void *new_sha256_context) +{ + if (sha256_context == NULL || new_sha256_context == NULL) { + return false; + } + + return lkca_hash_duplicate(new_sha256_context, sha256_context); +} + +bool libspdm_sha256_update(void *sha256_context, const void *data, + size_t data_size) +{ + int32_t ret; + + if (sha256_context == NULL) { + return false; + } + + if (data == NULL && data_size != 0) { + return false; + } + if (data_size > INT_MAX) { + return false; + } + + ret = crypto_shash_update(sha256_context, data, data_size); + if (ret != 0) { + return false; + } + return true; +} + +bool libspdm_sha256_final(void *sha256_context, uint8_t *hash_value) +{ + int32_t ret; + + if (sha256_context == NULL || hash_value == NULL) { + return false; + } + + ret = crypto_shash_final(sha256_context, hash_value); + if (ret != 0) { + return false; + } + return true; +} + +bool libspdm_sha256_hash_all(const void *data, size_t data_size, + uint8_t *hash_value) +{ + if (hash_value == NULL) { + return false; + } + if (data == NULL && data_size != 0) { + return false; + } + if (data_size > INT_MAX) { + return false; + } + + return lkca_hash_all("sha256", data, data_size, hash_value); +} + +void *libspdm_sha384_new(void) +{ + return lkca_hash_new("sha384"); +} + +void libspdm_sha384_free(void *sha384_ctx) +{ + lkca_hash_free(sha384_ctx); +} + +bool libspdm_sha384_init(void *sha384_context) +{ + return crypto_shash_init(sha384_context) == 0; +} + +bool libspdm_sha384_duplicate(const void *sha384_context, + void *new_sha384_context) +{ + if (sha384_context == NULL || new_sha384_context == NULL) { + return false; + } + + return lkca_hash_duplicate(new_sha384_context, sha384_context); +} + +bool libspdm_sha384_update(void *sha384_context, const void *data, + size_t data_size) +{ + int32_t ret; + + if (sha384_context == NULL) { + return false; + } + + if (data == NULL && data_size != 0) { + return false; + } + if (data_size > INT_MAX) { + return false; + } + + ret = crypto_shash_update(sha384_context, data, data_size); + if (ret != 0) { + return false; + } + return true; +} + +bool libspdm_sha384_final(void *sha384_context, uint8_t *hash_value) +{ + int32_t ret; + + if (sha384_context == NULL || hash_value == NULL) { + return false; + } + + ret = crypto_shash_final(sha384_context, hash_value); + if (ret != 0) { + return false; + } + return true; +} + +bool libspdm_sha384_hash_all(const void *data, size_t data_size, + uint8_t *hash_value) +{ + if (hash_value == NULL) { + return false; + } + if (data == NULL && data_size != 0) { + return false; + } + if (data_size > INT_MAX) { + return false; + } + + return lkca_hash_all("sha384", data, data_size, hash_value); +} + +void *libspdm_sha512_new(void) +{ + return lkca_hash_new("sha512"); +} + +void libspdm_sha512_free(void *sha512_ctx) +{ + lkca_hash_free(sha512_ctx); +} + +bool libspdm_sha512_init(void *sha512_context) +{ + return crypto_shash_init(sha512_context) == 0; +} + +bool libspdm_sha512_duplicate(const void *sha512_context, + void *new_sha512_context) +{ + if (sha512_context == NULL || new_sha512_context == NULL) { + return false; + } + + return lkca_hash_duplicate(new_sha512_context, sha512_context); +} + +bool libspdm_sha512_update(void *sha512_context, const void *data, + size_t data_size) +{ + int32_t ret; + + if (sha512_context == NULL) { + return false; + } + + if (data == NULL && data_size != 0) { + return false; + } + if (data_size > INT_MAX) { + return false; + } + + ret = crypto_shash_update(sha512_context, data, data_size); + if (ret != 0) { + return false; + } + return true; +} + +bool libspdm_sha512_final(void *sha512_context, uint8_t *hash_value) +{ + int32_t ret; + + if (sha512_context == NULL || hash_value == NULL) { + return false; + } + + ret = crypto_shash_final(sha512_context, hash_value); + if (ret != 0) { + return false; + } + return true; +} + +bool libspdm_sha512_hash_all(const void *data, size_t data_size, + uint8_t *hash_value) +{ + if (hash_value == NULL) { + return false; + } + if (data == NULL && data_size != 0) { + return false; + } + if (data_size > INT_MAX) { + return false; + } + + return lkca_hash_all("sha512", data, data_size, hash_value); +} diff --git a/kernel-open/nvidia/libspdm_shash.c b/kernel-open/nvidia/libspdm_shash.c new file mode 100644 index 0000000..5a96501 --- /dev/null +++ b/kernel-open/nvidia/libspdm_shash.c @@ -0,0 +1,181 @@ +/* +* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +* SPDX-License-Identifier: MIT +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +*/ + +#include "internal_crypt_lib.h" + +#ifdef USE_LKCA +#ifndef NV_CRYPTO_TFM_CTX_ALIGNED_PRESENT +#include +#endif +#endif + +void *lkca_hash_new(const char* alg_name) +{ +#ifndef USE_LKCA + return NULL; +#else + //XXX: can we reuse crypto_shash part and just allocate desc + struct crypto_shash *alg; + struct shash_desc *desc; + + alg = crypto_alloc_shash(alg_name, 0, 0); + if (IS_ERR(alg)) { + printk (KERN_INFO "Failed to alloc %s\n", alg_name); + return NULL; + } + + desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(alg), GFP_KERNEL); + if (desc == NULL){ + printk (KERN_INFO "Kernel out of mem\n"); + crypto_free_shash(alg); + return NULL; + } + + desc->tfm = alg; + + return desc; +#endif +} + +void lkca_hash_free(struct shash_desc *ctx) +{ +#ifndef USE_LKCA +#else + crypto_free_shash(ctx->tfm); + kfree(ctx); +#endif +} + +bool lkca_hash_duplicate(struct shash_desc *dst, struct shash_desc const *src) +{ +#ifndef USE_LKCA + return false; +#else + SHASH_DESC_ON_STACK(tmp, src); + + if (crypto_shash_export((struct shash_desc *) src, tmp)) { + return false; + } + if (crypto_shash_import(dst, tmp)) { + return false; + } + + return true; +#endif +} + +bool lkca_hmac_duplicate(struct shash_desc *dst, struct shash_desc const *src) +{ +#ifndef USE_LKCA + return false; +#else + // in LKCA hmac export doesn't export ipad/opad, so we need to WAR it + + struct crypto_shash *src_tfm = src->tfm; + struct crypto_shash *dst_tfm = dst->tfm; + int ss = crypto_shash_statesize(dst_tfm); + +#ifdef NV_CRYPTO_TFM_CTX_ALIGNED_PRESENT + char *src_ipad = crypto_tfm_ctx_aligned(&src_tfm->base); + char *dst_ipad = crypto_tfm_ctx_aligned(&dst_tfm->base); +#else + int ctx_size = crypto_shash_alg(dst_tfm)->base.cra_ctxsize; + char *src_ipad = crypto_shash_ctx(src_tfm); + char *dst_ipad = crypto_shash_ctx(dst_tfm); + /* + * Actual struct definition is hidden, so I assume data we need is at + * the end. In 6.0 the struct has a pointer to crpyto_shash followed by: + * 'u8 ipad[statesize];', then 'u8 opad[statesize];' + */ + src_ipad += ctx_size - 2 * ss; + dst_ipad += ctx_size - 2 * ss; +#endif + + memcpy(dst_ipad, src_ipad, crypto_shash_blocksize(src->tfm)); + memcpy(dst_ipad + ss, src_ipad + ss, crypto_shash_blocksize(src->tfm)); + crypto_shash_clear_flags(dst->tfm, CRYPTO_TFM_NEED_KEY); + + return lkca_hash_duplicate(dst, src); +#endif +} + +bool lkca_hash_all(const char* alg_name, const void *data, + size_t data_size, uint8_t *hash_value) +{ +#ifndef USE_LKCA + return false; +#else + int ret; + struct crypto_shash *alg; + alg = crypto_alloc_shash(alg_name, 0, 0); + if (IS_ERR(alg)) { + return false; + } + + ret = crypto_shash_tfm_digest(alg, data, data_size, hash_value); + + crypto_free_shash(alg); + + return (ret == 0); +#endif +} + +bool lkca_hmac_set_key(struct shash_desc *desc, const uint8_t *key, size_t key_size) +{ +#ifndef USE_LKCA + return false; +#else + int ret; + ret = crypto_shash_setkey(desc->tfm, key, key_size); + if (ret == 0) { + ret = crypto_shash_init(desc); + } + return ret == 0; +#endif +} + +bool lkca_hmac_all(const char* alg_name, const uint8_t *key, size_t key_size, + const uint8_t *data, size_t data_size, uint8_t *hash_value) +{ +#ifndef USE_LKCA + return false; +#else + int ret; + struct crypto_shash *alg; + alg = crypto_alloc_shash(alg_name, 0, 0); + if (IS_ERR(alg)) { + return false; + } + + ret = crypto_shash_setkey(alg, key, key_size); + + if (ret == 0){ + ret = crypto_shash_tfm_digest(alg, data, data_size, hash_value); + } + + crypto_free_shash(alg); + + return (ret == 0); +#endif +} + diff --git a/kernel-open/nvidia/libspdm_x509.c b/kernel-open/nvidia/libspdm_x509.c new file mode 100644 index 0000000..4333046 --- /dev/null +++ b/kernel-open/nvidia/libspdm_x509.c @@ -0,0 +1,682 @@ +/* +* SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +* SPDX-License-Identifier: MIT +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +* libspdm_x509_verify_cert_chain, libspdm_x509_get_cert_from_cert_chain, check +* and prototypes taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved. +* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md +*/ + +#include "internal_crypt_lib.h" + +#ifdef USE_LKCA +#include +#include +#endif + +bool libspdm_x509_construct_certificate(const uint8_t *cert, size_t cert_size, + uint8_t **single_x509_cert) +{ + LIBSPDM_ASSERT(false); + return false; +} + +bool libspdm_x509_construct_certificate_stack(uint8_t **x509_stack, ...) +{ + LIBSPDM_ASSERT(false); + return false; +} + +void libspdm_x509_free(void *x509_cert) +{ + LIBSPDM_ASSERT(false); +} + +void libspdm_x509_stack_free(void *x509_stack) +{ + LIBSPDM_ASSERT(false); +} + +#ifdef USE_LKCA +bool libspdm_encode_base64(const uint8_t *src, uint8_t *dst, size_t srclen, size_t *p_dstlen) +{ + static const uint8_t base64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + size_t i; + size_t tmp; + size_t size; + uint8_t *ptr = dst; + + for (i = 0; (i + 2) < srclen; i += 3) + { + if (ptr - dst + 4 > *p_dstlen) + { + goto Exit; + } + tmp = (src[i] << 16) | (src[i+1] << 8) | (src[i+2]); + *ptr++ = base64[(tmp >> 18) & 63]; + *ptr++ = base64[(tmp >> 12) & 63]; + *ptr++ = base64[(tmp >> 6) & 63]; + *ptr++ = base64[tmp & 63]; + } + + // 1 byte extra + if (i == srclen - 1) + { + if (ptr - dst + 4 > *p_dstlen) + { + goto Exit; + } + tmp = src[i] << 4; + *ptr++ = base64[(tmp >> 6) & 63]; + *ptr++ = base64[tmp & 63]; + *ptr++ = '='; + *ptr++ = '='; + } + + // 2 byte extra + if (i == srclen - 2) + { + if (ptr - dst + 4 > *p_dstlen) + { + goto Exit; + } + tmp = ((src[i] << 8) | (src[i+1])) << 2; + *ptr++ = base64[(tmp >> 12) & 63]; + *ptr++ = base64[(tmp >> 6) & 63]; + *ptr++ = base64[tmp & 63]; + *ptr++ = '='; + } + + *p_dstlen = ptr - dst; + return true; +Exit: + *p_dstlen = 0; + return false; +} + +typedef enum { + BASE64_CONV_VALID, + BASE64_CONV_PAD, + BASE64_CONV_INVALID +} BASE64_CONV; + +static BASE64_CONV libspdm_decode_base64_chr(uint8_t b64_chr, uint8_t *value) +{ + if (b64_chr >= 'A' && b64_chr <= 'Z') + { + *value = b64_chr - 'A'; + } + else if (b64_chr >= 'a' && b64_chr <= 'z') + { + *value = b64_chr - 'a' + 26; + } + else if (b64_chr >= '0' && b64_chr <= '9') + { + *value = b64_chr -'0' + 52; + } + else if (b64_chr == '+' || b64_chr == '-') + { + *value = 62; + } + else if (b64_chr == '/' || b64_chr == '_') + { + *value = 63; + } + else if (b64_chr == '=') + { + *value = 0; + return BASE64_CONV_PAD; + } + else + { + return BASE64_CONV_INVALID; + } + + return BASE64_CONV_VALID; +} + +static bool libspdm_decode_base64_stripped(const uint8_t *src, uint8_t *dst, size_t srclen, size_t *p_dstlen) +{ + const uint8_t *p_read; + uint8_t *p_write; + uint8_t i; + uint8_t bytes; + uint32_t bin_value; + uint8_t char_value; + + if (src == NULL || dst == NULL || srclen % 4 != 0) + { + return false; + } + for (p_read = src, p_write = dst; p_read < src + srclen; p_read += 4) + { + for (i = 0, bytes = 3, bin_value = 0; i < 4; i++) + { + if (libspdm_decode_base64_chr(p_read[i], &char_value) == BASE64_CONV_PAD) + { + bytes--; + // fallthrough + bin_value <<= 6; + bin_value |= char_value; + } + else if (libspdm_decode_base64_chr(p_read[i], &char_value) == BASE64_CONV_VALID) + { + bin_value <<= 6; + bin_value |= char_value; + } + else + { + // attempting to decode an invalid character + goto Exit; + } + } + + if (p_write - dst + bytes > *p_dstlen) + { + // buffer too small + goto Exit; + } + + switch (bytes) + { + case 3: + *p_write++ = (bin_value & 0x00ff0000) >> 16; + *p_write++ = (bin_value & 0x0000ff00) >> 8; + *p_write++ = (bin_value & 0x000000ff); + break; + case 2: + *p_write++ = (bin_value & 0x00ff0000) >> 16; + *p_write++ = (bin_value & 0x0000ff00) >> 8; + break; + case 1: + *p_write++ = (bin_value & 0x00ff0000) >> 16; + break; + default: + // invalid state in base64 + goto Exit; + } + } + *p_dstlen = p_write - dst; + return true; +Exit: + *p_dstlen = 0; + return false; +} + +bool libspdm_decode_base64(const uint8_t *src, uint8_t *dst, size_t srclen, size_t *p_dstlen) +{ + size_t s_progress; + size_t d_progress; + size_t decode_size; + size_t decoded_size; + + // for each round we decode 64 bytes and skip the linebreaks + for (s_progress = d_progress = 0; s_progress < srclen; s_progress += 65) + { + if (s_progress + 65 < srclen) + { + decode_size = 64; + } + else + { + // -1 to avoid decoding the '\n' byte in the end + decode_size = srclen - s_progress - 1; + } + // calculate the size after decoding + decoded_size = (decode_size / 4) * 3; + if (src[decode_size - 1] == '=') + { + decoded_size--; + } + if (src[decoded_size - 2] == '=') + { + decoded_size--; + } + // break early if the buffer is too small + if (*p_dstlen - d_progress < decoded_size) + { + break; + } + if (!libspdm_decode_base64_stripped(src + s_progress, dst + d_progress, decode_size, &decoded_size)) + { + return false; + } + d_progress += decoded_size; + } + if (s_progress < srclen) + { + *p_dstlen = 0; + return false; + } + *p_dstlen = d_progress; + return true; +} +#else // USE_LKCA +bool libspdm_encode_base64(const uint8_t *src, uint8_t *dst, size_t srclen, size_t *p_dstlen) +{ + return false; +} + +bool libspdm_decode_base64(const uint8_t *src, uint8_t *dst, size_t srclen, size_t *p_dstlen) +{ + return false; +} +#endif // USE_LKCA + +static bool lkca_asn1_get_tag(uint8_t const *ptr, uint8_t const *end, + size_t *length, uint32_t tag) +{ + uint64_t max_len = end - ptr; + + // Chain must be less than 1 GB + if ((max_len < 2) || (max_len > (1024 * 1024 * 1024))) { + return false; + } + + // We only deal with universal and application tags + if (ptr[0] != tag) { + return false; + } + + if (ptr[1] < 0x80) { + *length = ptr[1] + 2; + } else if (ptr[1] == 0x81) { + if (max_len < 3) { + return false; + } + *length = ptr[2] + 3; + } else if (ptr[1] == 0x82) { + if (max_len < 4) { + return false; + } + *length = (ptr[2] << 8) + ptr[3] + 4; + } else { + // In theory it could be bigger than 64KB + return false; + } + + if (*length > max_len) { + return false; + } + + return true; +} + +bool libspdm_asn1_get_tag(uint8_t **ptr, const uint8_t *end, size_t *length, + uint32_t tag) +{ + return lkca_asn1_get_tag(*ptr, end, length, tag); +} + +bool libspdm_x509_get_subject_name(const uint8_t *cert, size_t cert_size, + uint8_t *cert_subject, + size_t *subject_size) +{ + LIBSPDM_ASSERT(false); + return false; +} + +bool libspdm_x509_get_common_name(const uint8_t *cert, size_t cert_size, + char *common_name, + size_t *common_name_size) +{ + LIBSPDM_ASSERT(false); + return false; +} + +bool +libspdm_x509_get_organization_name(const uint8_t *cert, size_t cert_size, + char *name_buffer, + size_t *name_buffer_size) +{ + LIBSPDM_ASSERT(false); + return false; +} + +#if (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT) +bool libspdm_rsa_get_public_key_from_x509(const uint8_t *cert, size_t cert_size, + void **rsa_context) +{ + LIBSPDM_ASSERT(false); + return false; +} +#endif /* (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT) */ + +bool libspdm_ec_get_public_key_from_x509(const uint8_t *cert, size_t cert_size, + void **ec_context) +{ +#ifdef USE_LKCA + bool ret = false; + uint32_t key_size = 0; + struct key_preparsed_payload lkca_cert; + struct public_key *pub; + + lkca_cert.data = cert; + lkca_cert.datalen = cert_size; + + if (cert == NULL) { + return false; + } + + if(key_type_asymmetric.preparse(&lkca_cert)) { + return false; + } + + pub = lkca_cert.payload.data[asym_crypto]; + // -1 is since lkca prepends '4' to public keys... + key_size = pub->keylen - 1; + + if (key_size == (2 * 256 / 8)) { + *ec_context = libspdm_ec_new_by_nid(LIBSPDM_CRYPTO_NID_SECP256R1); + } else if (key_size == (2 * 384 / 8)) { + *ec_context = libspdm_ec_new_by_nid(LIBSPDM_CRYPTO_NID_SECP384R1); + } else { + goto err; + } + + if (*ec_context == NULL) { + goto err; + } + + // Again skip '4' in key to be in line with spdm protocol. We will add it + // back in ecda_verify + if (!lkca_ec_set_pub_key(*ec_context, (char *) pub->key + 1, key_size)) { + libspdm_ec_free(*ec_context); + goto err; + } + + ret = true; +err: + key_type_asymmetric.free_preparse(&lkca_cert); + return ret; +#else + return false; +#endif +} + +bool libspdm_ecd_get_public_key_from_x509(const uint8_t *cert, size_t cert_size, + void **ecd_context) +{ + LIBSPDM_ASSERT(false); + return false; +} + +bool libspdm_sm2_get_public_key_from_x509(const uint8_t *cert, size_t cert_size, + void **sm2_context) +{ + LIBSPDM_ASSERT(false); + return false; +} + +static int lkca_x509_verify_cert(const uint8_t *cert, size_t cert_size, + const uint8_t *ca_cert, size_t ca_cert_size) +{ +#ifdef USE_LKCA + int ret; + struct key_preparsed_payload lkca_cert; + struct key_preparsed_payload lkca_ca_cert; + + lkca_cert.data = cert; + lkca_cert.datalen = cert_size; + lkca_ca_cert.data = ca_cert; + lkca_ca_cert.datalen = ca_cert_size; + + ret = key_type_asymmetric.preparse(&lkca_cert); + if (ret) { + return ret; + } + + ret = key_type_asymmetric.preparse(&lkca_ca_cert); + if (ret) { + key_type_asymmetric.free_preparse(&lkca_cert); + return ret; + } + + ret = public_key_verify_signature(lkca_ca_cert.payload.data[asym_crypto], + lkca_cert.payload.data[asym_auth]); + + key_type_asymmetric.free_preparse(&lkca_cert); + key_type_asymmetric.free_preparse(&lkca_ca_cert); + + return ret; +#else + return false; +#endif +} + +bool libspdm_x509_verify_cert(const uint8_t *cert, size_t cert_size, + const uint8_t *ca_cert, size_t ca_cert_size) +{ + return lkca_x509_verify_cert(cert, cert_size, ca_cert, ca_cert_size) == 0; +} + +bool libspdm_x509_verify_cert_chain(const uint8_t *root_cert, size_t root_cert_length, + const uint8_t *cert_chain, size_t cert_chain_length) +{ + size_t preceding_cert_len; + const uint8_t *preceding_cert; + size_t current_cert_len; + const uint8_t *current_cert; + bool verify_flag; + int ret; + + verify_flag = false; + preceding_cert = root_cert; + preceding_cert_len = root_cert_length; + + current_cert = cert_chain; + + + /* Get Current certificate from certificates buffer and Verify with preceding cert*/ + do { + if (!lkca_asn1_get_tag( + current_cert, cert_chain + cert_chain_length, ¤t_cert_len, + LIBSPDM_CRYPTO_ASN1_CONSTRUCTED | LIBSPDM_CRYPTO_ASN1_SEQUENCE)) { + break; + } + + ret = lkca_x509_verify_cert(current_cert, current_cert_len, + preceding_cert, preceding_cert_len); + if (ret != 0) { + verify_flag = false; + break; + } else { + verify_flag = true; + } + + preceding_cert = current_cert; + preceding_cert_len = current_cert_len; + + current_cert = current_cert + current_cert_len; + } while (true); + + return verify_flag; +} + +bool libspdm_x509_get_cert_from_cert_chain(const uint8_t *cert_chain, + size_t cert_chain_length, + const int32_t cert_index, const uint8_t **cert, + size_t *cert_length) +{ + size_t asn1_len; + int32_t current_index; + size_t current_cert_len; + const uint8_t *current_cert; + + current_cert_len = 0; + + /* Check input parameters.*/ + if ((cert_chain == NULL) || (cert == NULL) || (cert_index < -1) || + (cert_length == NULL)) { + return false; + } + + current_cert = cert_chain; + current_index = -1; + + /* Traverse the certificate chain*/ + while (true) { + /* Get asn1 tag len*/ + if (!lkca_asn1_get_tag( + current_cert, cert_chain + cert_chain_length, &asn1_len, + LIBSPDM_CRYPTO_ASN1_CONSTRUCTED | LIBSPDM_CRYPTO_ASN1_SEQUENCE)) { + break; + } + + current_cert_len = asn1_len; + current_index++; + + if (current_index == cert_index) { + *cert = current_cert; + *cert_length = current_cert_len; + return true; + } + + current_cert = current_cert + current_cert_len; + } + + /* If cert_index is -1, Return the last certificate*/ + if (cert_index == -1 && current_index >= 0) { + *cert = current_cert - current_cert_len; + *cert_length = current_cert_len; + return true; + } + + return false; +} + +bool libspdm_x509_get_tbs_cert(const uint8_t *cert, size_t cert_size, + uint8_t **tbs_cert, size_t *tbs_cert_size) +{ + LIBSPDM_ASSERT(false); + return false; +} + +bool libspdm_x509_get_version(const uint8_t *cert, size_t cert_size, + size_t *version) +{ + LIBSPDM_ASSERT(false); + return false; +} + +bool libspdm_x509_get_serial_number(const uint8_t *cert, size_t cert_size, + uint8_t *serial_number, + size_t *serial_number_size) +{ + LIBSPDM_ASSERT(false); + return false; +} + +bool libspdm_x509_get_issuer_name(const uint8_t *cert, size_t cert_size, + uint8_t *cert_issuer, + size_t *issuer_size) +{ + LIBSPDM_ASSERT(false); + return false; +} + +bool +libspdm_x509_get_issuer_common_name(const uint8_t *cert, size_t cert_size, + char *common_name, + size_t *common_name_size) +{ + LIBSPDM_ASSERT(false); + return false; +} + +bool +libspdm_x509_get_issuer_orgnization_name(const uint8_t *cert, size_t cert_size, + char *name_buffer, + size_t *name_buffer_size) +{ + LIBSPDM_ASSERT(false); + return false; +} + +bool libspdm_x509_get_signature_algorithm(const uint8_t *cert, + size_t cert_size, uint8_t *oid, + size_t *oid_size) +{ + LIBSPDM_ASSERT(false); + return false; +} + +bool libspdm_x509_get_extension_data(const uint8_t *cert, size_t cert_size, + const uint8_t *oid, size_t oid_size, + uint8_t *extension_data, + size_t *extension_data_size) +{ + LIBSPDM_ASSERT(false); + return false; +} + +bool libspdm_x509_get_validity(const uint8_t *cert, size_t cert_size, + uint8_t *from, size_t *from_size, uint8_t *to, + size_t *to_size) +{ + LIBSPDM_ASSERT(false); + return false; +} + +bool libspdm_x509_get_key_usage(const uint8_t *cert, size_t cert_size, + size_t *usage) +{ + LIBSPDM_ASSERT(false); + return false; +} + +bool libspdm_x509_get_extended_key_usage(const uint8_t *cert, + size_t cert_size, uint8_t *usage, + size_t *usage_size) +{ + LIBSPDM_ASSERT(false); + return false; +} + +bool libspdm_x509_get_extended_basic_constraints(const uint8_t *cert, + size_t cert_size, + uint8_t *basic_constraints, + size_t *basic_constraints_size) +{ + LIBSPDM_ASSERT(false); + return false; +} + +bool libspdm_x509_set_date_time(char const *date_time_str, void *date_time, size_t *date_time_size) +{ + LIBSPDM_ASSERT(false); + return false; +} + +int32_t libspdm_x509_compare_date_time(const void *date_time1, const void *date_time2) +{ + LIBSPDM_ASSERT(false); + return -3; +} + +bool libspdm_gen_x509_csr(size_t hash_nid, size_t asym_nid, + uint8_t *requester_info, size_t requester_info_length, + void *context, char *subject_name, + size_t *csr_len, uint8_t **csr_pointer) +{ + LIBSPDM_ASSERT(false); + return false; +} diff --git a/kernel-open/nvidia/nv-acpi.c b/kernel-open/nvidia/nv-acpi.c new file mode 100644 index 0000000..94ab90f --- /dev/null +++ b/kernel-open/nvidia/nv-acpi.c @@ -0,0 +1,1546 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" +#include "nv-reg.h" + +#include + +#if defined(NV_LINUX_ACPI_EVENTS_SUPPORTED) +static NV_STATUS nv_acpi_extract_integer (const union acpi_object *, void *, NvU32, NvU32 *); +static NV_STATUS nv_acpi_extract_buffer (const union acpi_object *, void *, NvU32, NvU32 *); +static NV_STATUS nv_acpi_extract_package (const union acpi_object *, void *, NvU32, NvU32 *); +static NV_STATUS nv_acpi_extract_object (const union acpi_object *, void *, NvU32, NvU32 *); + +static void nv_acpi_powersource_hotplug_event(acpi_handle, u32, void *); +static void nv_acpi_nvpcf_event (acpi_handle, u32, void *); +static acpi_status nv_acpi_find_methods (acpi_handle, u32, void *, void **); +static NV_STATUS nv_acpi_nvif_method (NvU32, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *); + +static NV_STATUS nv_acpi_wmmx_method (NvU32, NvU8 *, NvU16 *); + +static acpi_handle nvif_handle = NULL; +static acpi_handle wmmx_handle = NULL; + +// Used for AC Power Source Hotplug Handling +static acpi_handle psr_handle = NULL; +static acpi_handle psr_device_handle = NULL; +static nv_acpi_t *psr_nv_acpi_object = NULL; + +static NvBool battery_present = NV_FALSE; + +#define BIX_BATTERY_TECHNOLOGY_OFFSET 0x4 +#define BIF_BATTERY_TECHNOLOGY_OFFSET 0x3 +#define BATTERY_RECHARGABLE 0x1 + +/* Moved into acpi/video.h in Linux 4.10 */ +#ifndef ACPI_VIDEO_NOTIFY_PROBE +#define ACPI_VIDEO_NOTIFY_PROBE 0x81 +#endif + +/* Added to acpi/video.h in Linux 3.1 */ +#ifndef ACPI_VIDEO_CLASS +#define ACPI_VIDEO_CLASS "video" +#endif + +/* Maximum size of ACPI _DSM method's 4th argument */ +#define NV_MAX_ACPI_DSM_PARAM_SIZE 1024 + +// Used for NVPCF event handling +static acpi_handle nvpcf_handle = NULL; +static acpi_handle nvpcf_device_handle = NULL; +static nv_acpi_t *nvpcf_nv_acpi_object = NULL; + +#define ACPI_NVPCF_EVENT_CHANGE 0xC0 + +/* + * This callback will be invoked by the acpi_notifier_call_chain() + */ +static int nv_acpi_notifier_call_chain_handler( + struct notifier_block *nb, + unsigned long val, + void *data +) +{ + struct acpi_bus_event *info = data; + + /* + * The ACPI_VIDEO_NOTIFY_PROBE will be sent for display hot-plug/unplug. + * This event will be received first by the acpi-video driver + * and then it will be notified through acpi_notifier_call_chain(). + */ + if (!strcmp(info->device_class, ACPI_VIDEO_CLASS) && + (info->type == ACPI_VIDEO_NOTIFY_PROBE)) + { + /* + * Intentionally return NOTIFY_BAD to inform acpi-video to stop + * generating keypresses for this event. The default behavior in the + * acpi-video driver for an ACPI_VIDEO_NOTIFY_PROBE, is to send a + * KEY_SWITCHVIDEOMODE evdev event, which causes the desktop settings + * daemons like gnome-setting-daemon to switch mode and this impacts + * the notebooks having external HDMI connected. + */ + return NOTIFY_BAD; + } + + return NOTIFY_DONE; +} + +NV_STATUS NV_API_CALL nv_acpi_get_powersource(NvU32 *ac_plugged) +{ + unsigned long long val; + int status = 0; + + if (!ac_plugged) + return NV_ERR_INVALID_ARGUMENT; + + if (!psr_device_handle) + return NV_ERR_INVALID_ARGUMENT; + + // Check whether or not AC power is plugged in + status = acpi_evaluate_integer(psr_device_handle, "_PSR", NULL, &val); + if (ACPI_FAILURE(status)) + return NV_ERR_GENERIC; + + // AC Power Source Plug State + // - 0x0 unplugged + // - 0x1 plugged + *ac_plugged = (val == 0x1); + + return NV_OK; +} + +#define ACPI_POWER_SOURCE_BUS_CHANGE_EVENT 0x00 +#define ACPI_POWER_SOURCE_CHANGE_EVENT 0x80 +static void nv_acpi_powersource_hotplug_event(acpi_handle handle, u32 event_type, void *data) +{ + /* + * This function will handle acpi events from the linux kernel, used + * to detect notifications from Power Source device + */ + nv_acpi_t *pNvAcpiObject = data; + u32 ac_plugged = 0; + + if (event_type == ACPI_POWER_SOURCE_CHANGE_EVENT || event_type == ACPI_POWER_SOURCE_BUS_CHANGE_EVENT) + { + if (nv_acpi_get_powersource(&ac_plugged) != NV_OK) + return; + + rm_power_source_change_event(pNvAcpiObject->sp, !ac_plugged); + } +} + +static void nv_acpi_nvpcf_event(acpi_handle handle, u32 event_type, void *data) +{ + nv_acpi_t *pNvAcpiObject = data; + + if (event_type == ACPI_NVPCF_EVENT_CHANGE) + { + rm_acpi_nvpcf_notify(pNvAcpiObject->sp); + } + else + { + nv_printf(NV_DBG_INFO,"NVRM: %s: NVPCF event 0x%x is not supported\n", __FUNCTION__, event_type); + } +} + +/* + * End of ACPI event handler functions + */ + +/* Do the necessary allocations and install notifier "handler" on the device-node "device" */ +static nv_acpi_t* nv_install_notifier( + struct acpi_handle *handle, + acpi_notify_handler handler, + void *notifier_data +) +{ + nvidia_stack_t *sp = NULL; + nv_acpi_t *pNvAcpiObject = NULL; + NV_STATUS rmStatus = NV_ERR_GENERIC; + acpi_status status = -1; + + if (!handle) + return NULL; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NULL; + } + + rmStatus = os_alloc_mem((void **) &pNvAcpiObject, sizeof(nv_acpi_t)); + if (rmStatus != NV_OK) + goto return_error; + + os_mem_set((void *)pNvAcpiObject, 0, sizeof(nv_acpi_t)); + + // store a handle reference in our object + pNvAcpiObject->handle = handle; + pNvAcpiObject->sp = sp; + pNvAcpiObject->notifier_data = notifier_data; + + if (handle == psr_device_handle) + { + status = acpi_install_notify_handler(handle, ACPI_ALL_NOTIFY, + handler, pNvAcpiObject); + } + else + { + status = acpi_install_notify_handler(handle, ACPI_DEVICE_NOTIFY, + handler, pNvAcpiObject); + } + if (!ACPI_FAILURE(status)) + { + pNvAcpiObject->notify_handler_installed = 1; + + return pNvAcpiObject; + } + +return_error: + nv_kmem_cache_free_stack(sp); + if (pNvAcpiObject) + os_free_mem((void *)pNvAcpiObject); + + return NULL; +} + +/* Tear-down and remove whatever nv_install_notifier did */ +static void nv_uninstall_notifier(nv_acpi_t *pNvAcpiObject, acpi_notify_handler handler) +{ + acpi_status status; + + if (pNvAcpiObject && pNvAcpiObject->notify_handler_installed) + { + if (pNvAcpiObject->handle == psr_device_handle) + { + status = acpi_remove_notify_handler(pNvAcpiObject->handle, ACPI_ALL_NOTIFY, handler); + } + else + { + status = acpi_remove_notify_handler(pNvAcpiObject->handle, ACPI_DEVICE_NOTIFY, handler); + } + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: nv_acpi_methods_uninit: failed to remove event notification handler (%d)!\n", status); + } + else + { + nv_kmem_cache_free_stack(pNvAcpiObject->sp); + os_free_mem((void *)pNvAcpiObject); + } + } + + return; +} + +static void nv_acpi_notify_event(acpi_handle handle, u32 event_type, void *data) +{ + nv_acpi_t *pNvAcpiObject = data; + nv_state_t *nvl = pNvAcpiObject->notifier_data; + + /* + * Function to handle device specific ACPI events such as display hotplug, + * GPS and D-notifier events. + */ + rm_acpi_notify(pNvAcpiObject->sp, NV_STATE_PTR(nvl), event_type); +} + +void nv_acpi_register_notifier(nv_linux_state_t *nvl) +{ + acpi_handle dev_handle = ACPI_HANDLE(nvl->dev); + + /* Install the ACPI notifier corresponding to dGPU ACPI device. */ + if ((nvl->nv_acpi_object == NULL) && + (dev_handle != NULL)) + { + nvl->nv_acpi_object = nv_install_notifier(dev_handle, nv_acpi_notify_event, nvl); + if (nvl->nv_acpi_object == NULL) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: nv_acpi_register_notifier: failed to install notifier\n"); + } + } + + nvl->acpi_nb.notifier_call = nv_acpi_notifier_call_chain_handler; + register_acpi_notifier(&nvl->acpi_nb); +} + +void nv_acpi_unregister_notifier(nv_linux_state_t *nvl) +{ + unregister_acpi_notifier(&nvl->acpi_nb); + if (nvl->nv_acpi_object != NULL) + { + nv_uninstall_notifier(nvl->nv_acpi_object, nv_acpi_notify_event); + nvl->nv_acpi_object = NULL; + } +} + +/* + * acpi methods init function. + * check if the NVIF, _DSM and WMMX methods are present in the acpi namespace. + * store NVIF, _DSM and WMMX handle if found. + */ + +void NV_API_CALL nv_acpi_methods_init(NvU32 *handlesPresent) +{ + if (!handlesPresent) // Caller passed us invalid pointer. + return; + + *handlesPresent = 0; + + NV_ACPI_WALK_NAMESPACE(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, + ACPI_UINT32_MAX, nv_acpi_find_methods, NULL, NULL); + + if (nvif_handle) + { + *handlesPresent = NV_ACPI_NVIF_HANDLE_PRESENT; + } + + if (wmmx_handle) + *handlesPresent = *handlesPresent | NV_ACPI_WMMX_HANDLE_PRESENT; + + if (psr_handle) + { + // Since _PSR is not a per-GPU construct we only need to register a + // single notifier for the _PSR event. Skip registration for subsequent + // devices + if (psr_nv_acpi_object == NULL) + { + psr_nv_acpi_object = nv_install_notifier(psr_device_handle, nv_acpi_powersource_hotplug_event, NULL); + } + } + + if (nvpcf_handle && (nvpcf_nv_acpi_object == NULL)) + { + nvpcf_nv_acpi_object = nv_install_notifier(nvpcf_device_handle, nv_acpi_nvpcf_event, NULL); + } + + return; +} + +acpi_status nv_acpi_find_methods( + acpi_handle handle, + u32 nest_level, + void *dummy1, + void **dummy2 +) +{ + acpi_handle method_handle; + + if (!acpi_get_handle(handle, "NVIF", &method_handle)) + { + nvif_handle = method_handle; + } + + if (!acpi_get_handle(handle, "WMMX", &method_handle)) + { + wmmx_handle = method_handle; + } + + if (!acpi_get_handle(handle, "_PSR", &method_handle)) + { + psr_handle = method_handle; + psr_device_handle = handle; + } + + if (!acpi_get_handle(handle, "NPCF", &method_handle)) + { + nvpcf_handle = method_handle; + nvpcf_device_handle = handle; + } + + return 0; +} + +void NV_API_CALL nv_acpi_methods_uninit(void) +{ + nvif_handle = NULL; + wmmx_handle = NULL; + + if (psr_nv_acpi_object != NULL) + { + nv_uninstall_notifier(psr_nv_acpi_object, nv_acpi_powersource_hotplug_event); + + psr_handle = NULL; + psr_device_handle = NULL; + psr_nv_acpi_object = NULL; + } + + if (nvpcf_nv_acpi_object != NULL) + { + nv_uninstall_notifier(nvpcf_nv_acpi_object, nv_acpi_nvpcf_event); + + nvpcf_handle = NULL; + nvpcf_device_handle = NULL; + nvpcf_nv_acpi_object = NULL; + } +} + +static NV_STATUS nv_acpi_extract_integer( + const union acpi_object *acpi_object, + void *buffer, + NvU32 buffer_size, + NvU32 *data_size +) +{ + if (acpi_object->type != ACPI_TYPE_INTEGER) + return NV_ERR_INVALID_ARGUMENT; + + if (acpi_object->integer.value & ~0xffffffffULL) + *data_size = sizeof(acpi_object->integer.value); + else + *data_size = sizeof(NvU32); + + if ((buffer_size < sizeof(NvU32)) || + ((buffer_size < sizeof(acpi_object->integer.value)) && + (acpi_object->integer.value & ~0xffffffffULL))) + { + return NV_ERR_BUFFER_TOO_SMALL; + } + + memcpy(buffer, &acpi_object->integer.value, *data_size); + + return NV_OK; +} + +static NV_STATUS nv_acpi_extract_buffer( + const union acpi_object *acpi_object, + void *buffer, + NvU32 buffer_size, + NvU32 *data_size +) +{ + if (acpi_object->type != ACPI_TYPE_BUFFER) + return NV_ERR_INVALID_ARGUMENT; + + *data_size = acpi_object->buffer.length; + + if (buffer_size < acpi_object->buffer.length) + return NV_ERR_BUFFER_TOO_SMALL; + + memcpy(buffer, acpi_object->buffer.pointer, *data_size); + + return NV_OK; +} + +static NV_STATUS nv_acpi_extract_package( + const union acpi_object *acpi_object, + void *buffer, + NvU32 buffer_size, + NvU32 *data_size +) +{ + NV_STATUS status = NV_OK; + NvU32 i, element_size = 0; + + if (acpi_object->type != ACPI_TYPE_PACKAGE) + return NV_ERR_INVALID_ARGUMENT; + + *data_size = 0; + for (i = 0; i < acpi_object->package.count; i++) + { + buffer = ((char *)buffer + element_size); + buffer_size -= element_size; + + status = nv_acpi_extract_object(&acpi_object->package.elements[i], + buffer, buffer_size, &element_size); + if (status != NV_OK) + break; + + *data_size += element_size; + } + + return status; +} + +static NV_STATUS nv_acpi_extract_object( + const union acpi_object *acpi_object, + void *buffer, + NvU32 buffer_size, + NvU32 *data_size +) +{ + NV_STATUS status; + + switch (acpi_object->type) + { + case ACPI_TYPE_INTEGER: + status = nv_acpi_extract_integer(acpi_object, buffer, + buffer_size, data_size); + break; + + case ACPI_TYPE_BUFFER: + status = nv_acpi_extract_buffer(acpi_object, buffer, + buffer_size, data_size); + break; + + case ACPI_TYPE_PACKAGE: + status = nv_acpi_extract_package(acpi_object, buffer, + buffer_size, data_size); + break; + + case ACPI_TYPE_ANY: + /* + * ACPI_TYPE_ANY is used to represent a NULL/Uninitialized object which is objectType 0 + * in the ACPI SPEC. This should not be treated as error. + */ + status = NV_OK; + break; + + default: + status = NV_ERR_NOT_SUPPORTED; + } + + return status; +} + +NV_STATUS NV_API_CALL nv_acpi_method( + NvU32 acpi_method, + NvU32 function, + NvU32 subFunction, + void *inParams, + NvU16 inParamSize, + NvU32 *outStatus, + void *outData, + NvU16 *outDataSize +) +{ + NV_STATUS status; + + switch (acpi_method) + { + case NV_EVAL_ACPI_METHOD_NVIF: + status = nv_acpi_nvif_method(function, + subFunction, + inParams, + inParamSize, + outStatus, + outData, + outDataSize); + break; + + case NV_EVAL_ACPI_METHOD_WMMX: + status = nv_acpi_wmmx_method(function, outData, outDataSize); + break; + + default: + status = NV_ERR_NOT_SUPPORTED; + } + + return status; +} + +/* + * This function executes an NVIF ACPI method. + */ +static NV_STATUS nv_acpi_nvif_method( + NvU32 function, + NvU32 subFunction, + void *inParams, + NvU16 inParamSize, + NvU32 *outStatus, + void *outData, + NvU16 *outDataSize +) +{ + acpi_status status; + struct acpi_object_list input; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *nvif = NULL; + union acpi_object nvif_params[3]; + NvU16 localOutDataSize; + NvU8 localInParams[8]; + + if (!nvif_handle) + return NV_ERR_NOT_SUPPORTED; + + if (!NV_MAY_SLEEP()) + { +#if defined(DEBUG) + nv_printf(NV_DBG_ERRORS, + "NVRM: nv_acpi_nvif_method: invalid context!\n"); +#endif + return NV_ERR_NOT_SUPPORTED; + } + + nvif_params[0].integer.type = ACPI_TYPE_INTEGER; + nvif_params[0].integer.value = function; + + nvif_params[1].integer.type = ACPI_TYPE_INTEGER; + nvif_params[1].integer.value = subFunction; + + nvif_params[2].buffer.type = ACPI_TYPE_BUFFER; + + if (inParams && (inParamSize > 0)) + { + nvif_params[2].buffer.length = inParamSize; + nvif_params[2].buffer.pointer = inParams; + } + else + { + memset(localInParams, 0, 8); + nvif_params[2].buffer.length = 8; + nvif_params[2].buffer.pointer = localInParams; + } + + input.count = 3; + input.pointer = nvif_params; + + status = acpi_evaluate_object(nvif_handle, NULL, &input, &output); + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: nv_acpi_nvif_method: failed to get NVIF data, " + "status 0x%x, function 0x%x, subFunction 0x%x!\n", + status, function, subFunction); + return NV_ERR_GENERIC; + } + + nvif = output.pointer; + if (nvif && (nvif->type == ACPI_TYPE_BUFFER) && (nvif->buffer.length >= 4)) + { + if (outStatus) + { + *outStatus = nvif->buffer.pointer[3] << 24 | + nvif->buffer.pointer[2] << 16 | + nvif->buffer.pointer[1] << 8 | + nvif->buffer.pointer[0]; + } + + if (outData && outDataSize) + { + localOutDataSize = nvif->buffer.length - 4; + if (localOutDataSize <= *outDataSize) + { + *outDataSize = NV_MIN(*outDataSize, localOutDataSize); + memcpy(outData, &nvif->buffer.pointer[4], *outDataSize); + } + else + { + *outDataSize = localOutDataSize; + kfree(output.pointer); + return NV_ERR_BUFFER_TOO_SMALL; + } + } + } + else + { + nv_printf(NV_DBG_INFO, + "NVRM: nv_acpi_nvif_method: NVIF data invalid, function 0x%x, " + "subFunction 0x%x!\n", function, subFunction); + kfree(output.pointer); + return NV_ERR_GENERIC; + } + + kfree(output.pointer); + return NV_OK; +} + +static NV_STATUS nv_acpi_evaluate_dsm_method( + acpi_handle dev_handle, + NvU8 *pathname, + NvU8 *pAcpiDsmGuid, + NvU32 acpiDsmRev, + NvU32 acpiDsmSubFunction, + void *arg3, + NvU16 arg3Size, + NvBool bArg3Integer, + NvU32 *outStatus, + void *pOutData, + NvU16 *pSize +) +{ + NV_STATUS rmStatus = NV_OK; + acpi_status status; + struct acpi_object_list input; + union acpi_object *dsm = NULL; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object dsm_params[4]; + NvU32 data_size; + + if (!NV_MAY_SLEEP()) + { +#if defined(DEBUG) + nv_printf(NV_DBG_ERRORS, "NVRM: %s: invalid context!\n", __FUNCTION__); +#endif + return NV_ERR_NOT_SUPPORTED; + } + + dsm_params[0].buffer.type = ACPI_TYPE_BUFFER; + dsm_params[0].buffer.length = 0x10; + dsm_params[0].buffer.pointer = pAcpiDsmGuid; + + dsm_params[1].integer.type = ACPI_TYPE_INTEGER; + dsm_params[1].integer.value = acpiDsmRev; + + dsm_params[2].integer.type = ACPI_TYPE_INTEGER; + dsm_params[2].integer.value = acpiDsmSubFunction; + + if (bArg3Integer) + { + dsm_params[3].integer.type = ACPI_TYPE_INTEGER; + dsm_params[3].integer.value = *((NvU32 *)arg3); + } + else + { + dsm_params[3].buffer.type = ACPI_TYPE_BUFFER; + dsm_params[3].buffer.length = arg3Size; + dsm_params[3].buffer.pointer = arg3; + } + + // parameters for dsm calls (GUID, rev, subfunction, data) + input.count = 4; + input.pointer = dsm_params; + + status = acpi_evaluate_object(dev_handle, pathname, &input, &output); + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: failed to evaluate _DSM method!\n", __FUNCTION__); + return NV_ERR_OPERATING_SYSTEM; + } + + dsm = output.pointer; + if (dsm != NULL) + { + if (outStatus) + { + *outStatus = dsm->buffer.pointer[3] << 24 | + dsm->buffer.pointer[2] << 16 | + dsm->buffer.pointer[1] << 8 | + dsm->buffer.pointer[0]; + } + + rmStatus = nv_acpi_extract_object(dsm, pOutData, *pSize, &data_size); + *pSize = data_size; + + kfree(output.pointer); + } + else + { + *pSize = 0; + } + + if (rmStatus != NV_OK) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %s: DSM data invalid!\n", __FUNCTION__); + } + + return rmStatus; +} + +/* + * This function executes a _DSM ACPI method. + */ +NV_STATUS NV_API_CALL nv_acpi_dsm_method( + nv_state_t *nv, + NvU8 *pAcpiDsmGuid, + NvU32 acpiDsmRev, + NvBool acpiNvpcfDsmFunction, + NvU32 acpiDsmSubFunction, + void *pInParams, + NvU16 inParamSize, + NvU32 *outStatus, + void *pOutData, + NvU16 *pSize +) +{ + NV_STATUS rmStatus = NV_ERR_OPERATING_SYSTEM; + NvU8 *argument3 = NULL; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + acpi_handle dev_handle = ACPI_HANDLE(nvl->dev); + NvU8 *pathname = "_DSM"; + + if (!dev_handle) + return NV_ERR_INVALID_ARGUMENT; + + if ((!pInParams) || (inParamSize > NV_MAX_ACPI_DSM_PARAM_SIZE) || (!pOutData) || (!pSize)) + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: invalid argument(s)!\n", __FUNCTION__); + return NV_ERR_INVALID_ARGUMENT; + } + + rmStatus = os_alloc_mem((void **)&argument3, inParamSize); + if (rmStatus != NV_OK) + return rmStatus; + + memcpy(argument3, pInParams, inParamSize); + + if (acpiNvpcfDsmFunction) + { + // + // acpi_evaluate_object() can operate with either valid object pathname or + // valid object handle. For NVPCF DSM function, use valid pathname as we do + // not have device handle for NVPCF device + // + dev_handle = NULL; + pathname = "\\_SB.NPCF._DSM"; + } + + rmStatus = nv_acpi_evaluate_dsm_method(dev_handle, pathname, pAcpiDsmGuid, acpiDsmRev, + acpiDsmSubFunction, argument3, inParamSize, + NV_FALSE, NULL, pOutData, pSize); + + os_free_mem(argument3); + return rmStatus; +} + +/* + * This function executes a _DDC ACPI method. + */ +NV_STATUS NV_API_CALL nv_acpi_ddc_method( + nv_state_t *nv, + void *pEdidBuffer, + NvU32 *pSize, + NvBool bReadMultiBlock +) +{ + acpi_status status; + union acpi_object *ddc = NULL; + NvU32 i, largestEdidSize; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + acpi_handle dev_handle = ACPI_HANDLE(nvl->dev); + acpi_handle lcd_dev_handle = NULL; + acpi_handle handle = NULL; + + if (!dev_handle) + return NV_ERR_INVALID_ARGUMENT; + + if (!NV_MAY_SLEEP()) + { +#if defined(DEBUG) + nv_printf(NV_DBG_ERRORS, + "NVRM: %s: invalid context!\n", + __FUNCTION__); +#endif + return NV_ERR_NOT_SUPPORTED; + } + + while (lcd_dev_handle == NULL) + { + unsigned long long device_id = 0; + + status = acpi_get_next_object(ACPI_TYPE_DEVICE, dev_handle, + handle, &handle); + if (ACPI_FAILURE(status) || (handle == NULL)) + break; + + status = acpi_evaluate_integer(handle, "_ADR", NULL, &device_id); + if (ACPI_FAILURE(status)) + /* Couldnt query device_id for this device */ + continue; + + switch (device_id & 0xffff) { + case 0x0110: + case 0x0118: + case 0x0400: + case 0xA420: + lcd_dev_handle = handle; + nv_printf(NV_DBG_INFO, "NVRM: %s Found LCD: %llx\n", + __FUNCTION__, device_id); + break; + default: + break; + } + } + + if (lcd_dev_handle == NULL) + { + nv_printf(NV_DBG_INFO, "NVRM: %s LCD not found\n", __FUNCTION__); + return NV_ERR_GENERIC; + } + + // + // As per ACPI Spec 3.0: + // ARG0 = 0x1 for 128 bytes edid buffer + // ARG0 = 0x2 for 256 bytes edid buffer + // + + largestEdidSize = bReadMultiBlock ? 2 : 1; + + for (i = largestEdidSize; i >= 1; i--) + { + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object ddc_arg0 = { ACPI_TYPE_INTEGER }; + struct acpi_object_list input = { 1, &ddc_arg0 }; + + ddc_arg0.integer.value = i; + status = acpi_evaluate_object(lcd_dev_handle, "_DDC", &input, &output); + if (ACPI_SUCCESS(status)) { + ddc = output.pointer; + break; + } + } + + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: failed status: %08x \n", + __FUNCTION__, + status); + return NV_ERR_GENERIC; + } + else + { + if (ddc && (ddc->type == ACPI_TYPE_BUFFER) && (ddc->buffer.length > 0)) + { + if (ddc->buffer.length <= *pSize) + { + *pSize = NV_MIN(*pSize, ddc->buffer.length); + memcpy(pEdidBuffer, ddc->buffer.pointer, *pSize); + } + else + { + kfree(ddc); + return NV_ERR_BUFFER_TOO_SMALL; + } + } + } + + kfree(ddc); + return NV_OK; +} + +/* + * This function executes a _ROM ACPI method. + */ +NV_STATUS NV_API_CALL nv_acpi_rom_method( + nv_state_t *nv, + NvU32 *pInData, + NvU32 *pOutData +) +{ + acpi_status status; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *rom; + union acpi_object rom_arg[2]; + struct acpi_object_list input = { 2, rom_arg }; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + acpi_handle dev_handle = ACPI_HANDLE(nvl->dev); + uint32_t offset, length; + + if (!dev_handle) + return NV_ERR_INVALID_ARGUMENT; + + if (!NV_MAY_SLEEP()) + { +#if defined(DEBUG) + nv_printf(NV_DBG_ERRORS, + "NVRM: %s: invalid context!\n", __FUNCTION__); +#endif + return NV_ERR_NOT_SUPPORTED; + } + + offset = pInData[0]; + length = pInData[1]; + + rom_arg[0].type = ACPI_TYPE_INTEGER; + rom_arg[0].integer.value = offset; + rom_arg[1].type = ACPI_TYPE_INTEGER; + rom_arg[1].integer.value = length; + + status = acpi_evaluate_object(dev_handle, "_ROM", &input, &output); + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: failed to evaluate _ROM method!\n", __FUNCTION__); + return NV_ERR_GENERIC; + } + else + { + rom = output.pointer; + + if ((rom != NULL) && (rom->type == ACPI_TYPE_BUFFER) && + (rom->buffer.length >= length)) + { + memcpy(pOutData, rom->buffer.pointer, length); + } + else + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: Invalid _ROM data\n", __FUNCTION__); + kfree(output.pointer); + return NV_ERR_GENERIC; + } + } + + kfree(output.pointer); + return NV_OK; +} + +/* + * This function executes a _DOD ACPI method. + */ +NV_STATUS NV_API_CALL nv_acpi_dod_method( + nv_state_t *nv, + NvU32 *pOutData, + NvU32 *pSize +) +{ + acpi_status status; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *dod; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + acpi_handle dev_handle = ACPI_HANDLE(nvl->dev); + NvU32 i, count = (*pSize / sizeof(NvU32)); + + if (!dev_handle) + return NV_ERR_INVALID_ARGUMENT; + + if (!NV_MAY_SLEEP()) + { +#if defined(DEBUG) + nv_printf(NV_DBG_ERRORS, + "NVRM: %s: invalid context!\n", __FUNCTION__); +#endif + return NV_ERR_NOT_SUPPORTED; + } + + status = acpi_evaluate_object(dev_handle, "_DOD", NULL, &output); + + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: failed to evaluate _DOD method!\n", __FUNCTION__); + return NV_ERR_GENERIC; + } + else + { + dod = output.pointer; + *pSize = 0; + + if ((dod != NULL) && (dod->type == ACPI_TYPE_PACKAGE) && + (dod->package.count <= count)) + { + for (i = 0; i < dod->package.count; i++) + { + if (dod->package.elements[i].type != ACPI_TYPE_INTEGER) + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: _DOD entry invalid!\n", __FUNCTION__); + kfree(output.pointer); + return NV_ERR_GENERIC; + } + + pOutData[i] = dod->package.elements[i].integer.value; + *pSize += sizeof(NvU32); + } + } + else + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: _DOD data too large!\n", __FUNCTION__); + kfree(output.pointer); + return NV_ERR_GENERIC; + } + } + + kfree(output.pointer); + return NV_OK; +} + +/* + * This function executes a WMMX ACPI method. + */ +static NV_STATUS nv_acpi_wmmx_method( + NvU32 arg2, + NvU8 *outData, + NvU16 *outDataSize +) +{ + acpi_status status; + struct acpi_object_list input; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *mmx = NULL; + union acpi_object mmx_params[3]; + + if (!wmmx_handle) + { + *outDataSize = 0; + return NV_ERR_NOT_SUPPORTED; + } + + if (!NV_MAY_SLEEP()) + { +#if defined(DEBUG) + nv_printf(NV_DBG_ERRORS, + "NVRM: nv_acpi_wmmx_method: invalid context!\n"); +#endif + return NV_ERR_NOT_SUPPORTED; + } + + /* argument 0 and argument 1 are not used in WMMX method, passing 0 */ + + mmx_params[0].integer.type = ACPI_TYPE_INTEGER; + mmx_params[0].integer.value = 0; + + mmx_params[1].integer.type = ACPI_TYPE_INTEGER; + mmx_params[1].integer.value = 0; + + mmx_params[2].integer.type = ACPI_TYPE_INTEGER; + mmx_params[2].integer.value = arg2; + + input.count = 3; + input.pointer = mmx_params; + + status = acpi_evaluate_object(wmmx_handle, NULL, &input, &output); + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: nv_acpi_wmmx_method: failed to get WMMX data, " + "status 0x%x!\n", status); + return NV_ERR_GENERIC; + } + + mmx = output.pointer; + if (mmx && (mmx->type == ACPI_TYPE_BUFFER) && (mmx->buffer.length > 0)) + { + if (outData && outDataSize) + { + if (mmx->buffer.length <= *outDataSize) + { + *outDataSize = NV_MIN(*outDataSize, mmx->buffer.length); + memcpy(outData, mmx->buffer.pointer, *outDataSize); + } + else + { + kfree(output.pointer); + return NV_ERR_BUFFER_TOO_SMALL; + } + } + } + else + { + nv_printf(NV_DBG_ERRORS, + "NVRM: nv_acpi_wmmx_method: WMMX data invalid.\n"); + kfree(output.pointer); + return NV_ERR_GENERIC; + } + + kfree(output.pointer); + return NV_OK; +} + +NvBool nv_acpi_power_resource_method_present( + struct pci_dev *pdev +) +{ + acpi_handle handle = ACPI_HANDLE(&pdev->dev); + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *object_package, *object_reference; + acpi_status status; + + if (!handle) + return NV_FALSE; + + status = acpi_evaluate_object(handle, "_PR3", NULL, &buf); + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO,"NVRM: Failed to evaluate _PR3 object\n"); + return NV_FALSE; + } + + if (!buf.pointer) + { + nv_printf(NV_DBG_INFO, "NVRM: output buffer pointer is null" + " for _PR3 method\n"); + return NV_FALSE; + } + + object_package = buf.pointer; + + /* + * _PR3 object should be of type package and + * it should contain only one reference + */ + if ((object_package->type != ACPI_TYPE_PACKAGE) && + (object_package->package.count != 0x1)) + { + nv_printf(NV_DBG_ERRORS,"NVRM: _PR3 object is not a type 'package'\n"); + return NV_FALSE; + } + + object_reference = object_package->package.elements; + + /* Check for the reference and the actual type of the reference. */ + if ((object_reference->reference.actual_type != ACPI_TYPE_POWER) && + (object_reference->type != ACPI_TYPE_LOCAL_REFERENCE)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: _PR3 object does not contain POWER Reference\n"); + return NV_FALSE; + } + return NV_TRUE; +} + +/* + * This function executes MUX ACPI methods. + */ +NV_STATUS NV_API_CALL nv_acpi_mux_method( + nv_state_t *nv, + NvU32 *pInOut, + NvU32 muxAcpiId, + const char *pMethodName +) +{ + acpi_status status; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *mux = NULL; + union acpi_object mux_arg = { ACPI_TYPE_INTEGER }; + struct acpi_object_list input = { 1, &mux_arg }; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + acpi_handle dev_handle = ACPI_HANDLE(nvl->dev); + acpi_handle mux_dev_handle = NULL; + acpi_handle handle = NULL; + unsigned long long device_id = 0; + + if ((strcmp(pMethodName, "MXDS") != 0) + && (strcmp(pMethodName, "MXDM") != 0)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: %s: Unsupported ACPI method %s\n", + __FUNCTION__, pMethodName); + return NV_ERR_NOT_SUPPORTED; + } + else + { + nv_printf(NV_DBG_INFO, "NVRM: %s: Call for %s ACPI method \n", + __FUNCTION__, pMethodName); + } + + if (!dev_handle) + return NV_ERR_INVALID_ARGUMENT; + + if (!NV_MAY_SLEEP()) + { +#if defined(DEBUG) + nv_printf(NV_DBG_ERRORS, "NVRM: %s: invalid context!\n", __FUNCTION__); +#endif + return NV_ERR_NOT_SUPPORTED; + } + + while (mux_dev_handle == NULL) + { + status = acpi_get_next_object(ACPI_TYPE_DEVICE, dev_handle, + handle, &handle); + if (ACPI_FAILURE(status) || (handle == NULL)) + break; + + status = acpi_evaluate_integer(handle, "_ADR", NULL, &device_id); + if (ACPI_SUCCESS(status) && (device_id == muxAcpiId)) + mux_dev_handle = handle; + } + + if (mux_dev_handle == NULL) + { + nv_printf(NV_DBG_INFO, + "NVRM: %s Mux device handle not found\n", __FUNCTION__); + return NV_ERR_GENERIC; + } + + mux_arg.integer.type = ACPI_TYPE_INTEGER; + mux_arg.integer.value = (NvU64) *pInOut; + + status = acpi_evaluate_object(mux_dev_handle, (acpi_string)pMethodName, + &input, &output); + + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, "NVRM: %s: Failed to evaluate %s method!\n", + __FUNCTION__, pMethodName); + return NV_ERR_GENERIC; + } + else + { + mux = output.pointer; + + if (mux && (mux->type == ACPI_TYPE_INTEGER)) + { + *pInOut = mux->integer.value; + } + else + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: Invalid MUX data\n", __FUNCTION__); + kfree(output.pointer); + return NV_ERR_GENERIC; + } + } + + kfree(output.pointer); + return NV_OK; +} + +static acpi_status nv_acpi_find_battery_info( + acpi_handle handle, + NvBool bUseBix +) +{ + acpi_status status = AE_OK; + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *object_package; + NvU32 battery_technology_offset; + + status = acpi_evaluate_object(handle, NULL, NULL, &buf); + + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, "NVRM: Failed to evaluate battery's object\n"); + return AE_OK; + } + + if (!buf.pointer) + { + nv_printf(NV_DBG_INFO, "NVRM: Battery object output buffer is null\n"); + return AE_OK; + } + + object_package = buf.pointer; + + if (object_package->type != ACPI_TYPE_PACKAGE) + { + nv_printf(NV_DBG_INFO, "NVRM: Battery method output is not package\n"); + return AE_OK; + } + + if (bUseBix) + { + battery_technology_offset = BIX_BATTERY_TECHNOLOGY_OFFSET; + } + else + { + battery_technology_offset = BIF_BATTERY_TECHNOLOGY_OFFSET; + } + + /* + * Only checking here for Battery technology type. + * Other fields like Battery Model/Serial number could also be checked but + * driver need to support the case where user has removed battery from the + * system. + * _STA method on the battery device handle couldn't be used due to the same + * reason. + * Hence just cheking if battery technology of slot is rechargable or not. + */ + + if ((object_package->package.elements[battery_technology_offset].type != ACPI_TYPE_INTEGER) || + (object_package->package.elements[battery_technology_offset].integer.value != BATTERY_RECHARGABLE)) + { + return AE_OK; + } + + battery_present = NV_TRUE; + + /* Stop traversing acpi tree. */ + return AE_CTRL_TERMINATE; +} + +static acpi_status nv_acpi_find_battery_device( + acpi_handle handle, + u32 nest_level, + void *dummy1, + void **dummy2 +) +{ + acpi_handle bif_method_handle; + acpi_handle bix_method_handle; + acpi_status status = AE_OK; + + // Find method Battery Information /Extended/ (_BIX or _BIF) and then Battery type. + if (!acpi_get_handle(handle, "_BIX", &bix_method_handle)) + { + status = nv_acpi_find_battery_info(bix_method_handle, NV_TRUE/*bUseBix*/); + } + + if ((battery_present == NV_FALSE) && + !acpi_get_handle(handle, "_BIF", &bif_method_handle)) + { + status = nv_acpi_find_battery_info(bif_method_handle, NV_FALSE/*bUseBix*/); + } + + return status; +} + +NvBool NV_API_CALL nv_acpi_is_battery_present(void) +{ + NV_ACPI_WALK_NAMESPACE(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, + nv_acpi_find_battery_device, NULL, NULL); + + if (battery_present == NV_TRUE) + { + return NV_TRUE; + } + + return NV_FALSE; +} + +NV_STATUS NV_API_CALL nv_acpi_d3cold_dsm_for_upstream_port( + nv_state_t *nv, + NvU8 *pAcpiDsmGuid, + NvU32 acpiDsmRev, + NvU32 acpiDsmSubFunction, + NvU32 *data +) +{ + NV_STATUS rmStatus = NV_ERR_OPERATING_SYSTEM; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + acpi_handle dev_handle = ACPI_HANDLE(nvl->dev->parent); + NvU32 outData = 0; + NvU16 outDatasize = sizeof(NvU32); + NvU16 inParamSize = sizeof(NvU32); + + if (!dev_handle) + return NV_ERR_INVALID_ARGUMENT; + + rmStatus = nv_acpi_evaluate_dsm_method(dev_handle, "_DSM", pAcpiDsmGuid, acpiDsmRev, + acpiDsmSubFunction, data, inParamSize, NV_TRUE, + NULL, &outData, &outDatasize); + + if (rmStatus == NV_OK) + *data = outData; + + return rmStatus; +} + +#else // NV_LINUX_ACPI_EVENTS_SUPPORTED + +void NV_API_CALL nv_acpi_methods_init(NvU32 *handlePresent) +{ + *handlePresent = 0; +} + +void NV_API_CALL nv_acpi_methods_uninit(void) +{ + return; +} + +NV_STATUS NV_API_CALL nv_acpi_method( + NvU32 acpi_method, + NvU32 function, + NvU32 subFunction, + void *inParams, + NvU16 inParamSize, + NvU32 *outStatus, + void *outData, + NvU16 *outDataSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_acpi_dsm_method( + nv_state_t *nv, + NvU8 *pAcpiDsmGuid, + NvU32 acpiDsmRev, + NvBool acpiNvpcfDsmFunction, + NvU32 acpiDsmSubFunction, + void *pInParams, + NvU16 inParamSize, + NvU32 *outStatus, + void *pOutData, + NvU16 *pSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_acpi_d3cold_dsm_for_upstream_port( + nv_state_t *nv, + NvU8 *pAcpiDsmGuid, + NvU32 acpiDsmRev, + NvU32 acpiDsmSubFunction, + NvU32 *data +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_acpi_ddc_method( + nv_state_t *nv, + void *pEdidBuffer, + NvU32 *pSize, + NvBool bReadMultiBlock +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_acpi_rom_method( + nv_state_t *nv, + NvU32 *pInData, + NvU32 *pOutData +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_acpi_dod_method( + nv_state_t *nv, + NvU32 *pOutData, + NvU32 *pSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NvBool nv_acpi_power_resource_method_present( + struct pci_dev *pdev +) +{ + return NV_FALSE; +} + +NV_STATUS NV_API_CALL nv_acpi_get_powersource(NvU32 *ac_plugged) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void nv_acpi_register_notifier(nv_linux_state_t *nvl) +{ + return; +} + +void nv_acpi_unregister_notifier(nv_linux_state_t *nvl) +{ + return; +} + +NV_STATUS NV_API_CALL nv_acpi_mux_method( + nv_state_t *nv, + NvU32 *pInOut, + NvU32 muxAcpiId, + const char *pMethodName +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NvBool NV_API_CALL nv_acpi_is_battery_present(void) +{ + return NV_FALSE; +} +#endif diff --git a/kernel-open/nvidia/nv-backlight.c b/kernel-open/nvidia/nv-backlight.c new file mode 100644 index 0000000..a4f2d04 --- /dev/null +++ b/kernel-open/nvidia/nv-backlight.c @@ -0,0 +1,81 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include + +#include "os-interface.h" +#include "nv-linux.h" + +NV_STATUS NV_API_CALL nv_get_tegra_brightness_level +( + nv_state_t *nv, + NvU32 *brightness +) +{ +#ifdef NV_GET_BACKLIGHT_DEVICE_BY_NAME_PRESENT + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct backlight_device *bd; + + bd = get_backlight_device_by_name(nvl->backlight.device_name); + if (bd == NULL) + { + nv_printf(NV_DBG_ERRORS, "Unable to get backlight device\n"); + return NV_ERR_GENERIC; + } + + *brightness = bd->props.brightness; + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NV_STATUS NV_API_CALL nv_set_tegra_brightness_level +( + nv_state_t *nv, + NvU32 brightness +) +{ +#ifdef NV_GET_BACKLIGHT_DEVICE_BY_NAME_PRESENT + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct backlight_device *bd; + + bd = get_backlight_device_by_name(nvl->backlight.device_name); + if (bd == NULL) + { + nv_printf(NV_DBG_ERRORS, "Unable to get backlight device\n"); + return NV_ERR_GENERIC; + } + + bd->props.brightness = brightness; + + backlight_update_status(bd); + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} diff --git a/kernel-open/nvidia/nv-bpmp.c b/kernel-open/nvidia/nv-bpmp.c new file mode 100644 index 0000000..9919c1b --- /dev/null +++ b/kernel-open/nvidia/nv-bpmp.c @@ -0,0 +1,108 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +#if IS_ENABLED(CONFIG_TEGRA_BPMP) +#include +#include +#endif // IS_ENABLED(CONFIG_TEGRA_BPMP) + +/*! + * @brief Sends an MRQ (message-request) to BPMP + * + * The request, response, and ret parameters of this function correspond to the + * components of the tegra_bpmp_message struct, which BPMP uses to receive + * MRQs. + * + * @param[in] nv Per GPU Linux state + * @param[in] mrq MRQ_xxx ID specifying what is requested + * @param[in] request_data Pointer to request input data + * @param[in] request_data_size Size of structure pointed to by pRequestData + * @param[out] response_data Pointer to response output data + * @param[in] response_data_size Size of structure pointed to by pResponseData + * @param[out] ret MRQ return code (from "ret" element of + * tegra_bpmp_message struct) + * @param[out] api_ret Return code from tegra_bpmp_transfer call + * + * @returns NV_OK if successful, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, + * NV_ERR_INVALID_POINTER if the tegra_bpmp struct pointer could not + * be obtained from nv, or + * NV_ERR_GENERIC if the tegra_bpmp_transfer call failed (see apiRet + * for Linux error code). + */ +NV_STATUS NV_API_CALL +nv_bpmp_send_mrq +( + nv_state_t *nv, + NvU32 mrq, + const void *request_data, + NvU32 request_data_size, + void *response_data, + NvU32 response_data_size, + NvS32 *ret, + NvS32 *api_ret +) +{ +#if IS_ENABLED(CONFIG_TEGRA_BPMP) && NV_SUPPORTS_PLATFORM_DEVICE + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct tegra_bpmp *bpmp; + struct tegra_bpmp_message msg; + + bpmp = tegra_bpmp_get(nvl->dev); + if (IS_ERR(bpmp)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Error getting bpmp struct: %s\n", + PTR_ERR(bpmp)); + return NV_ERR_INVALID_POINTER; + } + + // Send the MRQ request to BPMP. + memset(&msg, 0, sizeof(msg)); + msg.mrq = mrq; + msg.tx.data = request_data; + msg.tx.size = (size_t) request_data_size; + msg.rx.data = response_data; + msg.rx.size = (size_t) response_data_size; + + *api_ret = (NvS32) tegra_bpmp_transfer(bpmp, &msg); + + if (*api_ret == 0) + { + *ret = (NvS32) msg.rx.ret; + return NV_OK; + } + else + { + return NV_ERR_GENERIC; + } +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + diff --git a/kernel-open/nvidia/nv-caps-imex.c b/kernel-open/nvidia/nv-caps-imex.c new file mode 100644 index 0000000..aff658f --- /dev/null +++ b/kernel-open/nvidia/nv-caps-imex.c @@ -0,0 +1,240 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-linux.h" +#include "nv-caps-imex.h" + +extern int NVreg_ImexChannelCount; +extern int NVreg_CreateImexChannel0; + +static int nv_caps_imex_open(struct inode *inode, struct file *file) +{ + return 0; +} + +static int nv_caps_imex_release(struct inode *inode, struct file *file) +{ + return 0; +} + +static struct file_operations g_nv_caps_imex_fops = +{ + .owner = THIS_MODULE, + .open = nv_caps_imex_open, + .release = nv_caps_imex_release +}; + +static struct class *g_nv_caps_imex_class; + +static struct +{ + NvBool initialized; + struct cdev cdev; + dev_t channel0; + + struct device *dev_channel0; +} g_nv_caps_imex; + +int NV_API_CALL nv_caps_imex_channel_get(int fd) +{ +#if NV_FILESYSTEM_ACCESS_AVAILABLE + struct file *file; + struct inode *inode; + int channel = -1; + + file = fget(fd); + if (file == NULL) + { + return channel; + } + + inode = NV_FILE_INODE(file); + if (inode == NULL) + { + goto out; + } + + /* Make sure the fd belongs to the nv-caps-imex-drv */ + if (file->f_op != &g_nv_caps_imex_fops) + { + goto out; + } + + /* minor number is same as channel */ + channel = MINOR(inode->i_rdev); + +out: + fput(file); + + return channel; +#else + return -1; +#endif +} + +int NV_API_CALL nv_caps_imex_channel_count(void) +{ + return NVreg_ImexChannelCount; +} + +static void nv_caps_imex_remove_channel0(void) +{ + if (g_nv_caps_imex_class == NULL) + return; + + device_destroy(g_nv_caps_imex_class, g_nv_caps_imex.channel0); + + class_destroy(g_nv_caps_imex_class); + + g_nv_caps_imex_class = NULL; +} + +#if defined(NV_CLASS_DEVNODE_HAS_CONST_ARG) +static char *nv_caps_imex_devnode(const struct device *dev, umode_t *mode) +#else +static char *nv_caps_imex_devnode(struct device *dev, umode_t *mode) +#endif +{ + if (!mode) + return NULL; + + // + // Handle only world visible channel0, otherwise let the kernel apply + // defaults (root only access) + // + if (dev->devt == g_nv_caps_imex.channel0) + *mode = S_IRUGO | S_IWUGO; + + return NULL; +} + +static int nv_caps_imex_add_channel0(void) +{ + +#if defined(NV_CLASS_CREATE_HAS_NO_OWNER_ARG) + g_nv_caps_imex_class = class_create("nvidia-caps-imex-channels"); +#else + g_nv_caps_imex_class = class_create(THIS_MODULE, "nvidia-caps-imex-channels"); +#endif + + if (IS_ERR(g_nv_caps_imex_class)) + { + nv_printf(NV_DBG_ERRORS, "nv-caps-imex failed to register class.\n"); + return -1; + } + + // Install udev callback + g_nv_caps_imex_class->devnode = nv_caps_imex_devnode; + + g_nv_caps_imex.dev_channel0 = device_create(g_nv_caps_imex_class, NULL, + g_nv_caps_imex.channel0, NULL, + "nvidia-caps-imex-channels!channel%d", 0); + if (IS_ERR(g_nv_caps_imex.dev_channel0)) + { + nv_printf(NV_DBG_ERRORS, "nv-caps-imex failed to create channel0.\n"); + class_destroy(g_nv_caps_imex_class); + g_nv_caps_imex_class = NULL; + return -1; + } + + nv_printf(NV_DBG_ERRORS, "nv-caps-imex channel0 created. " + "Make sure you are aware of the IMEX security model.\n"); + + return 0; +} + +int NV_API_CALL nv_caps_imex_init(void) +{ + int rc; + + if (g_nv_caps_imex.initialized) + { + nv_printf(NV_DBG_ERRORS, "nv-caps-imex is already initialized.\n"); + return -EBUSY; + } + + if (NVreg_ImexChannelCount == 0) + { + nv_printf(NV_DBG_INFO, "nv-caps-imex is disabled.\n"); + return 0; + } + + g_nv_caps_imex_class = NULL; + g_nv_caps_imex.dev_channel0 = NULL; + + rc = alloc_chrdev_region(&g_nv_caps_imex.channel0, 0, + NVreg_ImexChannelCount, + "nvidia-caps-imex-channels"); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "nv-caps-imex failed to create cdev.\n"); + return rc; + } + + cdev_init(&g_nv_caps_imex.cdev, &g_nv_caps_imex_fops); + + g_nv_caps_imex.cdev.owner = THIS_MODULE; + + rc = cdev_add(&g_nv_caps_imex.cdev, g_nv_caps_imex.channel0, + NVreg_ImexChannelCount); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "nv-caps-imex failed to add cdev.\n"); + goto cdev_add_fail; + } + + if (NVreg_CreateImexChannel0 == 1) + { + rc = nv_caps_imex_add_channel0(); + if (rc < 0) + goto channel0_add_fail; + } + + g_nv_caps_imex.initialized = NV_TRUE; + + return 0; + +channel0_add_fail: + cdev_del(&g_nv_caps_imex.cdev); + +cdev_add_fail: + unregister_chrdev_region(g_nv_caps_imex.channel0, NVreg_ImexChannelCount); + + return rc; +} + +void NV_API_CALL nv_caps_imex_exit(void) +{ + if (!g_nv_caps_imex.initialized) + { + return; + } + + nv_caps_imex_remove_channel0(); + + cdev_del(&g_nv_caps_imex.cdev); + + unregister_chrdev_region(g_nv_caps_imex.channel0, NVreg_ImexChannelCount); + + g_nv_caps_imex.initialized = NV_FALSE; +} diff --git a/kernel-open/nvidia/nv-caps-imex.h b/kernel-open/nvidia/nv-caps-imex.h new file mode 100644 index 0000000..2b03642 --- /dev/null +++ b/kernel-open/nvidia/nv-caps-imex.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_CAPS_IMEX_H_ +#define _NV_CAPS_IMEX_H_ + +#include + +int NV_API_CALL nv_caps_imex_init(void); +void NV_API_CALL nv_caps_imex_exit(void); +int NV_API_CALL nv_caps_imex_channel_get(int fd); +int NV_API_CALL nv_caps_imex_channel_count(void); + +#endif /* _NV_CAPS_IMEX_H_ */ diff --git a/kernel-open/nvidia/nv-caps.c b/kernel-open/nvidia/nv-caps.c new file mode 100644 index 0000000..b8a63dc --- /dev/null +++ b/kernel-open/nvidia/nv-caps.c @@ -0,0 +1,878 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-linux.h" +#include "nv-caps.h" +#include "nv-procfs.h" +#include "nv-hash.h" + +#include "nvmisc.h" + +extern int NVreg_ModifyDeviceFiles; + +/* sys_close() or __close_fd() */ +#include + +#define NV_CAP_DRV_MINOR_COUNT 8192 + +/* Hash table with 512 buckets */ +#define NV_CAP_HASH_BITS 9 +NV_DECLARE_HASHTABLE(g_nv_cap_hash_table, NV_CAP_HASH_BITS); + +#define NV_CAP_HASH_SIZE NV_HASH_SIZE(g_nv_cap_hash_table) + +#define nv_cap_hash_key(path) (nv_string_hash(path) % NV_CAP_HASH_SIZE) + +typedef struct nv_cap_table_entry +{ + /* name must be the first element */ + const char *name; + int minor; + struct hlist_node hlist; +} nv_cap_table_entry_t; + +#define NV_CAP_NUM_ENTRIES(_table) (NV_ARRAY_ELEMENTS(_table)) + +static nv_cap_table_entry_t g_nv_cap_nvlink_table[] = +{ + {"/driver/nvidia-nvlink/capabilities/fabric-mgmt"} +}; + +static nv_cap_table_entry_t g_nv_cap_mig_table[] = +{ + {"/driver/nvidia/capabilities/mig/config"}, + {"/driver/nvidia/capabilities/mig/monitor"} +}; + +static nv_cap_table_entry_t g_nv_cap_sys_table[] = +{ +}; + +#define NV_CAP_MIG_CI_ENTRIES(_gi) \ + {_gi "/ci0/access"}, \ + {_gi "/ci1/access"}, \ + {_gi "/ci2/access"}, \ + {_gi "/ci3/access"}, \ + {_gi "/ci4/access"}, \ + {_gi "/ci5/access"}, \ + {_gi "/ci6/access"}, \ + {_gi "/ci7/access"} + +#define NV_CAP_MIG_GI_ENTRIES(_gpu) \ + {_gpu "/gi0/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi0"), \ + {_gpu "/gi1/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi1"), \ + {_gpu "/gi2/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi2"), \ + {_gpu "/gi3/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi3"), \ + {_gpu "/gi4/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi4"), \ + {_gpu "/gi5/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi5"), \ + {_gpu "/gi6/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi6"), \ + {_gpu "/gi7/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi7"), \ + {_gpu "/gi8/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi8"), \ + {_gpu "/gi9/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi9"), \ + {_gpu "/gi10/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi10"), \ + {_gpu "/gi11/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi11"), \ + {_gpu "/gi12/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi12"), \ + {_gpu "/gi13/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi13"), \ + {_gpu "/gi14/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi14") + +static nv_cap_table_entry_t g_nv_cap_mig_gpu_table[] = +{ + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu0/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu1/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu2/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu3/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu4/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu5/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu6/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu7/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu8/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu9/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu10/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu11/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu12/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu13/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu14/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu15/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu16/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu17/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu18/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu19/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu20/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu21/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu22/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu23/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu24/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu25/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu26/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu27/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu28/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu29/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu30/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu31/mig") +}; + +struct nv_cap +{ + char *path; + char *name; + int minor; + int permissions; + int modify; + struct proc_dir_entry *parent; + struct proc_dir_entry *entry; +}; + +#define NV_CAP_PROCFS_WRITE_BUF_SIZE 128 + +typedef struct nv_cap_file_private +{ + int minor; + int permissions; + int modify; + char buffer[NV_CAP_PROCFS_WRITE_BUF_SIZE]; + off_t offset; +} nv_cap_file_private_t; + +struct +{ + NvBool initialized; + struct cdev cdev; + dev_t devno; +} g_nv_cap_drv; + +#define NV_CAP_PROCFS_DIR "driver/nvidia-caps" +#define NV_CAP_NAME_BUF_SIZE 128 + +static struct proc_dir_entry *nv_cap_procfs_dir; + +static int nv_procfs_read_nvlink_minors(struct seq_file *s, void *v) +{ + int i, count; + char name[NV_CAP_NAME_BUF_SIZE]; + + count = NV_CAP_NUM_ENTRIES(g_nv_cap_nvlink_table); + for (i = 0; i < count; i++) + { + if (sscanf(g_nv_cap_nvlink_table[i].name, + "/driver/nvidia-nvlink/capabilities/%s", name) == 1) + { + name[sizeof(name) - 1] = '\0'; + seq_printf(s, "%s %d\n", name, g_nv_cap_nvlink_table[i].minor); + } + } + + return 0; +} + +static int nv_procfs_read_sys_minors(struct seq_file *s, void *v) +{ + int i, count; + char name[NV_CAP_NAME_BUF_SIZE]; + + count = NV_CAP_NUM_ENTRIES(g_nv_cap_sys_table); + for (i = 0; i < count; i++) + { + if (sscanf(g_nv_cap_sys_table[i].name, + "/driver/nvidia/capabilities/%s", name) == 1) + { + name[sizeof(name) - 1] = '\0'; + seq_printf(s, "%s %d\n", name, g_nv_cap_sys_table[i].minor); + } + } + + return 0; +} + +static int nv_procfs_read_mig_minors(struct seq_file *s, void *v) +{ + int i, count, gpu; + char name[NV_CAP_NAME_BUF_SIZE]; + + count = NV_CAP_NUM_ENTRIES(g_nv_cap_mig_table); + for (i = 0; i < count; i++) + { + if (sscanf(g_nv_cap_mig_table[i].name, + "/driver/nvidia/capabilities/mig/%s", name) == 1) + { + name[sizeof(name) - 1] = '\0'; + seq_printf(s, "%s %d\n", name, g_nv_cap_mig_table[i].minor); + } + } + + count = NV_CAP_NUM_ENTRIES(g_nv_cap_mig_gpu_table); + for (i = 0; i < count; i++) + { + if (sscanf(g_nv_cap_mig_gpu_table[i].name, + "/driver/nvidia/capabilities/gpu%d/mig/%s", &gpu, name) == 2) + { + name[sizeof(name) - 1] = '\0'; + seq_printf(s, "gpu%d/%s %d\n", + gpu, name, g_nv_cap_mig_gpu_table[i].minor); + } + } + + return 0; +} + +NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(nvlink_minors, nv_system_pm_lock); + +NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(mig_minors, nv_system_pm_lock); + +NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(sys_minors, nv_system_pm_lock); + +static void nv_cap_procfs_exit(void) +{ + if (!nv_cap_procfs_dir) + { + return; + } + +#if defined(CONFIG_PROC_FS) + proc_remove(nv_cap_procfs_dir); +#endif + nv_cap_procfs_dir = NULL; +} + +static int nv_cap_procfs_init(void) +{ + static struct proc_dir_entry *file_entry; + + nv_cap_procfs_dir = NV_CREATE_PROC_DIR(NV_CAP_PROCFS_DIR, NULL); + if (nv_cap_procfs_dir == NULL) + { + return -EACCES; + } + + file_entry = NV_CREATE_PROC_FILE("mig-minors", nv_cap_procfs_dir, + mig_minors, NULL); + if (file_entry == NULL) + { + goto cleanup; + } + + file_entry = NV_CREATE_PROC_FILE("nvlink-minors", nv_cap_procfs_dir, + nvlink_minors, NULL); + if (file_entry == NULL) + { + goto cleanup; + } + + file_entry = NV_CREATE_PROC_FILE("sys-minors", nv_cap_procfs_dir, + sys_minors, NULL); + if (file_entry == NULL) + { + goto cleanup; + } + + return 0; + +cleanup: + nv_cap_procfs_exit(); + + return -EACCES; +} + +static int nv_cap_find_minor(char *path) +{ + unsigned int key = nv_cap_hash_key(path); + nv_cap_table_entry_t *entry; + + nv_hash_for_each_possible(g_nv_cap_hash_table, entry, hlist, key) + { + if (strcmp(path, entry->name) == 0) + { + return entry->minor; + } + } + + return -1; +} + +static void _nv_cap_table_init(nv_cap_table_entry_t *table, int count) +{ + int i; + unsigned int key; + static int minor = 0; + + for (i = 0; i < count; i++) + { + table[i].minor = minor++; + INIT_HLIST_NODE(&table[i].hlist); + key = nv_cap_hash_key(table[i].name); + nv_hash_add(g_nv_cap_hash_table, &table[i].hlist, key); + } + + WARN_ON(minor > NV_CAP_DRV_MINOR_COUNT); +} + +#define nv_cap_table_init(table) \ + _nv_cap_table_init(table, NV_CAP_NUM_ENTRIES(table)) + +static void nv_cap_tables_init(void) +{ + BUILD_BUG_ON(offsetof(nv_cap_table_entry_t, name) != 0); + + nv_hash_init(g_nv_cap_hash_table); + + nv_cap_table_init(g_nv_cap_nvlink_table); + nv_cap_table_init(g_nv_cap_mig_table); + nv_cap_table_init(g_nv_cap_mig_gpu_table); + nv_cap_table_init(g_nv_cap_sys_table); +} + +static ssize_t nv_cap_procfs_write(struct file *file, + const char __user *buffer, + size_t count, loff_t *pos) +{ + nv_cap_file_private_t *private = NULL; + unsigned long bytes_left; + char *proc_buffer; + int status; + + status = nv_down_read_interruptible(&nv_system_pm_lock); + if (status < 0) + { + nv_printf(NV_DBG_ERRORS, "nv-caps: failed to lock the nv_system_pm_lock!\n"); + return status; + } + + private = ((struct seq_file *)file->private_data)->private; + bytes_left = (sizeof(private->buffer) - private->offset - 1); + + if (count == 0) + { + count = -EINVAL; + goto done; + } + + if ((bytes_left == 0) || (count > bytes_left)) + { + count = -ENOSPC; + goto done; + } + + proc_buffer = &private->buffer[private->offset]; + + if (copy_from_user(proc_buffer, buffer, count)) + { + nv_printf(NV_DBG_ERRORS, "nv-caps: failed to copy in proc data!\n"); + count = -EFAULT; + goto done; + } + + private->offset += count; + proc_buffer[count] = '\0'; + + *pos = private->offset; + +done: + up_read(&nv_system_pm_lock); + + return count; +} + +static int nv_cap_procfs_read(struct seq_file *s, void *v) +{ + int status; + nv_cap_file_private_t *private = s->private; + + status = nv_down_read_interruptible(&nv_system_pm_lock); + if (status < 0) + { + return status; + } + + seq_printf(s, "%s: %d\n", "DeviceFileMinor", private->minor); + seq_printf(s, "%s: %d\n", "DeviceFileMode", private->permissions); + seq_printf(s, "%s: %d\n", "DeviceFileModify", private->modify); + + up_read(&nv_system_pm_lock); + return 0; +} + +static int nv_cap_procfs_open(struct inode *inode, struct file *file) +{ + nv_cap_file_private_t *private = NULL; + int rc; + nv_cap_t *cap = NV_PDE_DATA(inode); + + NV_KMALLOC(private, sizeof(nv_cap_file_private_t)); + if (private == NULL) + { + return -ENOMEM; + } + + private->minor = cap->minor; + private->permissions = cap->permissions; + private->offset = 0; + private->modify = cap->modify; + + rc = single_open(file, nv_cap_procfs_read, private); + if (rc < 0) + { + NV_KFREE(private, sizeof(nv_cap_file_private_t)); + } + + return rc; +} + +static int nv_cap_procfs_release(struct inode *inode, struct file *file) +{ + struct seq_file *s = file->private_data; + nv_cap_file_private_t *private = NULL; + char *buffer; + int modify; + nv_cap_t *cap = NV_PDE_DATA(inode); + + if (s != NULL) + { + private = s->private; + } + + single_release(inode, file); + + if (private != NULL) + { + buffer = private->buffer; + + if (private->offset != 0) + { + if (sscanf(buffer, "DeviceFileModify: %d", &modify) == 1) + { + cap->modify = modify; + } + } + + NV_KFREE(private, sizeof(nv_cap_file_private_t)); + } + + /* + * All open files using the proc entry will be invalidated + * if the entry is removed. + */ + file->private_data = NULL; + + return 0; +} + +static nv_proc_ops_t g_nv_cap_procfs_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_cap_procfs_open, + .NV_PROC_OPS_RELEASE = nv_cap_procfs_release, + .NV_PROC_OPS_WRITE = nv_cap_procfs_write, + .NV_PROC_OPS_READ = seq_read, + .NV_PROC_OPS_LSEEK = seq_lseek, +}; + +/* forward declaration of g_nv_cap_drv_fops */ +static struct file_operations g_nv_cap_drv_fops; + +int NV_API_CALL nv_cap_validate_and_dup_fd(const nv_cap_t *cap, int fd) +{ +#if NV_FILESYSTEM_ACCESS_AVAILABLE + struct file *file; + int dup_fd; + struct inode *inode = NULL; + dev_t rdev = 0; + + if (cap == NULL) + { + return -1; + } + + file = fget(fd); + if (file == NULL) + { + return -1; + } + + inode = NV_FILE_INODE(file); + if (inode == NULL) + { + goto err; + } + + /* Make sure the fd belongs to the nv-cap-drv */ + if (file->f_op != &g_nv_cap_drv_fops) + { + goto err; + } + + /* Make sure the fd has the expected capability */ + rdev = inode->i_rdev; + if (MINOR(rdev) != cap->minor) + { + goto err; + } + + dup_fd = get_unused_fd_flags(O_CLOEXEC); + if (dup_fd < 0) + { + goto err; + } + + fd_install(dup_fd, file); + return dup_fd; + +err: + fput(file); + return -1; +#else + return -1; +#endif +} + +void NV_API_CALL nv_cap_close_fd(int fd) +{ +#if NV_FILESYSTEM_ACCESS_AVAILABLE + struct file *file; + NvBool is_nv_cap_fd; + + if (fd == -1) + { + return; + } + + /* + * Acquire task_lock as we access current->files explicitly (__close_fd) + * and implicitly (sys_close), and it will race with the exit path. + */ + task_lock(current); + + /* Nothing to do, we are in exit path */ + if (current->files == NULL) + { + task_unlock(current); + return; + } + + file = fget(fd); + if (file == NULL) + { + task_unlock(current); + return; + } + + /* Make sure the fd belongs to the nv-cap-drv */ + is_nv_cap_fd = (file->f_op == &g_nv_cap_drv_fops); + + fput(file); + + /* + * In some cases, we may be in shutdown path and execute + * in context of unrelated process. In that case we should + * not access any 'current' state, but instead let kernel + * clean up capability files on its own. + */ + if (!is_nv_cap_fd) + { + task_unlock(current); + return; + } + +/* + * From v4.17-rc1 (to v5.10.8) kernels have stopped exporting sys_close(fd) + * and started exporting __close_fd, as of this commit: + * 2018-04-02 2ca2a09d6215 ("fs: add ksys_close() wrapper; remove in-kernel + * calls to sys_close()") + * Kernels v5.11-rc1 onwards have stopped exporting __close_fd, and started + * exporting close_fd, as of this commit: + * 2020-12-20 8760c909f54a ("file: Rename __close_fd to close_fd and remove + * the files parameter") + */ +#if NV_IS_EXPORT_SYMBOL_PRESENT_close_fd + close_fd(fd); +#elif NV_IS_EXPORT_SYMBOL_PRESENT___close_fd + __close_fd(current->files, fd); +#else + sys_close(fd); +#endif + + task_unlock(current); +#endif +} + +static nv_cap_t* nv_cap_alloc(nv_cap_t *parent_cap, const char *name) +{ + nv_cap_t *cap; + int len; + + if (parent_cap == NULL || name == NULL) + { + return NULL; + } + + NV_KMALLOC(cap, sizeof(nv_cap_t)); + if (cap == NULL) + { + return NULL; + } + + len = strlen(name) + strlen(parent_cap->path) + 2; + NV_KMALLOC(cap->path, len); + if (cap->path == NULL) + { + NV_KFREE(cap, sizeof(nv_cap_t)); + return NULL; + } + + strcpy(cap->path, parent_cap->path); + strcat(cap->path, "/"); + strcat(cap->path, name); + + len = strlen(name) + 1; + NV_KMALLOC(cap->name, len); + if (cap->name == NULL) + { + NV_KFREE(cap->path, strlen(cap->path) + 1); + NV_KFREE(cap, sizeof(nv_cap_t)); + return NULL; + } + + strcpy(cap->name, name); + + cap->minor = -1; + cap->modify = NVreg_ModifyDeviceFiles; + + return cap; +} + +static void nv_cap_free(nv_cap_t *cap) +{ + if (cap == NULL) + { + return; + } + + NV_KFREE(cap->path, strlen(cap->path) + 1); + NV_KFREE(cap->name, strlen(cap->name) + 1); + NV_KFREE(cap, sizeof(nv_cap_t)); +} + +nv_cap_t* NV_API_CALL nv_cap_create_file_entry(nv_cap_t *parent_cap, + const char *name, int mode) +{ + nv_cap_t *cap = NULL; + int minor; + + cap = nv_cap_alloc(parent_cap, name); + if (cap == NULL) + { + return NULL; + } + + cap->parent = parent_cap->entry; + cap->permissions = mode; + + mode = (S_IFREG | S_IRUGO); + + minor = nv_cap_find_minor(cap->path); + if (minor < 0) + { + nv_cap_free(cap); + return NULL; + } + + cap->minor = minor; + + cap->entry = proc_create_data(name, mode, parent_cap->entry, + &g_nv_cap_procfs_fops, (void*)cap); + if (cap->entry == NULL) + { + nv_cap_free(cap); + return NULL; + } + + return cap; +} + +nv_cap_t* NV_API_CALL nv_cap_create_dir_entry(nv_cap_t *parent_cap, + const char *name, int mode) +{ + nv_cap_t *cap = NULL; + + cap = nv_cap_alloc(parent_cap, name); + if (cap == NULL) + { + return NULL; + } + + cap->parent = parent_cap->entry; + cap->permissions = mode; + cap->minor = -1; + + mode = (S_IFDIR | S_IRUGO | S_IXUGO); + + cap->entry = NV_PROC_MKDIR_MODE(name, mode, parent_cap->entry); + if (cap->entry == NULL) + { + nv_cap_free(cap); + return NULL; + } + + return cap; +} + +nv_cap_t* NV_API_CALL nv_cap_init(const char *path) +{ + nv_cap_t parent_cap; + nv_cap_t *cap; + int mode; + char *name = NULL; + char dir[] = "/capabilities"; + + if (path == NULL) + { + return NULL; + } + + NV_KMALLOC(name, (strlen(path) + strlen(dir)) + 1); + if (name == NULL) + { + return NULL; + } + + strcpy(name, path); + strcat(name, dir); + parent_cap.entry = NULL; + parent_cap.path = ""; + parent_cap.name = ""; + mode = S_IRUGO | S_IXUGO; + cap = nv_cap_create_dir_entry(&parent_cap, name, mode); + + NV_KFREE(name, strlen(name) + 1); + return cap; +} + +void NV_API_CALL nv_cap_destroy_entry(nv_cap_t *cap) +{ + if (WARN_ON(cap == NULL)) + { + return; + } + + remove_proc_entry(cap->name, cap->parent); + nv_cap_free(cap); +} + +static int nv_cap_drv_open(struct inode *inode, struct file *file) +{ + return 0; +} + +static int nv_cap_drv_release(struct inode *inode, struct file *file) +{ + return 0; +} + +static struct file_operations g_nv_cap_drv_fops = +{ + .owner = THIS_MODULE, + .open = nv_cap_drv_open, + .release = nv_cap_drv_release +}; + +int NV_API_CALL nv_cap_drv_init(void) +{ + int rc; + + nv_cap_tables_init(); + + if (g_nv_cap_drv.initialized) + { + nv_printf(NV_DBG_ERRORS, "nv-caps-drv is already initialized.\n"); + return -EBUSY; + } + + rc = alloc_chrdev_region(&g_nv_cap_drv.devno, + 0, + NV_CAP_DRV_MINOR_COUNT, + "nvidia-caps"); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "nv-caps-drv failed to create cdev region.\n"); + return rc; + } + + cdev_init(&g_nv_cap_drv.cdev, &g_nv_cap_drv_fops); + + g_nv_cap_drv.cdev.owner = THIS_MODULE; + + rc = cdev_add(&g_nv_cap_drv.cdev, g_nv_cap_drv.devno, + NV_CAP_DRV_MINOR_COUNT); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "nv-caps-drv failed to create cdev.\n"); + goto cdev_add_fail; + } + + rc = nv_cap_procfs_init(); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "nv-caps-drv: unable to init proc\n"); + goto proc_init_fail; + } + + g_nv_cap_drv.initialized = NV_TRUE; + + return 0; + +proc_init_fail: + cdev_del(&g_nv_cap_drv.cdev); + +cdev_add_fail: + unregister_chrdev_region(g_nv_cap_drv.devno, NV_CAP_DRV_MINOR_COUNT); + + return rc; +} + +void NV_API_CALL nv_cap_drv_exit(void) +{ + if (!g_nv_cap_drv.initialized) + { + return; + } + + nv_cap_procfs_exit(); + + cdev_del(&g_nv_cap_drv.cdev); + + unregister_chrdev_region(g_nv_cap_drv.devno, NV_CAP_DRV_MINOR_COUNT); + + g_nv_cap_drv.initialized = NV_FALSE; +} diff --git a/kernel-open/nvidia/nv-clk.c b/kernel-open/nvidia/nv-clk.c new file mode 100644 index 0000000..a66860f --- /dev/null +++ b/kernel-open/nvidia/nv-clk.c @@ -0,0 +1,932 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" +#include "nv-platform.h" + +#include +#include + +// Use the CCF APIs if enabled in Kernel config and RM build +// has Dual license define enabled. +#if defined(CONFIG_COMMON_CLK) +#define HAS_COMMON_CLOCK_FRAMEWORK 1 +#else +#define HAS_COMMON_CLOCK_FRAMEWORK 0 +#endif + +#if HAS_COMMON_CLOCK_FRAMEWORK +#if defined(NV_DEVM_CLK_BULK_GET_ALL_PRESENT) +/*! + * @brief The below defined static const array points to the + * clock mentioned in enum defined in below file. + * + * arch/nvalloc/unix/include/nv.h + * enum TEGRASOC_WHICH_CLK + * + * The order should be maintained/updated together. + */ +static const char *osMapClk[] = { + [TEGRASOC_WHICH_CLK_NVDISPLAYHUB] = "nvdisplayhub_clk", + [TEGRASOC_WHICH_CLK_NVDISPLAY_DISP] = "nvdisplay_disp_clk", + [TEGRASOC_WHICH_CLK_NVDISPLAY_P0] = "nvdisplay_p0_clk", + [TEGRASOC_WHICH_CLK_NVDISPLAY_P1] = "nvdisplay_p1_clk", + [TEGRASOC_WHICH_CLK_NVDISPLAY_P2] = "nvdisplay_p2_clk", + [TEGRASOC_WHICH_CLK_NVDISPLAY_P3] = "nvdisplay_p3_clk", + [TEGRASOC_WHICH_CLK_NVDISPLAY_P4] = "nvdisplay_p4_clk", + [TEGRASOC_WHICH_CLK_NVDISPLAY_P5] = "nvdisplay_p5_clk", + [TEGRASOC_WHICH_CLK_NVDISPLAY_P6] = "nvdisplay_p6_clk", + [TEGRASOC_WHICH_CLK_NVDISPLAY_P7] = "nvdisplay_p7_clk", + [TEGRASOC_WHICH_CLK_DPAUX0] = "dpaux0_clk", + [TEGRASOC_WHICH_CLK_FUSE] = "fuse_clk", + [TEGRASOC_WHICH_CLK_DSIPLL_VCO] = "dsipll_vco_clk", + [TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTPN] = "dsipll_clkoutpn_clk", + [TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTA] = "dsipll_clkouta_clk", + [TEGRASOC_WHICH_CLK_SPPLL0_VCO] = "sppll0_vco_clk", + [TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTA] = "sppll0_clkouta_clk", + [TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTB] = "sppll0_clkoutb_clk", + [TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTPN] = "sppll0_clkoutpn_clk", + [TEGRASOC_WHICH_CLK_SPPLL1_CLKOUTPN] = "sppll1_clkoutpn_clk", + [TEGRASOC_WHICH_CLK_SPPLL0_DIV27] = "sppll0_div27_clk", + [TEGRASOC_WHICH_CLK_SPPLL1_DIV27] = "sppll1_div27_clk", + [TEGRASOC_WHICH_CLK_SPPLL0_DIV10] = "sppll0_div10_clk", + [TEGRASOC_WHICH_CLK_SPPLL0_DIV25] = "sppll0_div25_clk", + [TEGRASOC_WHICH_CLK_SPPLL1_VCO] = "sppll1_vco_clk", + [TEGRASOC_WHICH_CLK_VPLL0_REF] = "vpll0_ref_clk", + [TEGRASOC_WHICH_CLK_VPLL0] = "vpll0_clk", + [TEGRASOC_WHICH_CLK_VPLL1] = "vpll1_clk", + [TEGRASOC_WHICH_CLK_VPLL2] = "vpll2_clk", + [TEGRASOC_WHICH_CLK_VPLL3] = "vpll3_clk", + [TEGRASOC_WHICH_CLK_VPLL4] = "vpll4_clk", + [TEGRASOC_WHICH_CLK_VPLL5] = "vpll5_clk", + [TEGRASOC_WHICH_CLK_VPLL6] = "vpll6_clk", + [TEGRASOC_WHICH_CLK_VPLL7] = "vpll7_clk", + [TEGRASOC_WHICH_CLK_NVDISPLAY_P0_REF] = "nvdisplay_p0_ref_clk", + [TEGRASOC_WHICH_CLK_RG0] = "rg0_clk", + [TEGRASOC_WHICH_CLK_RG1] = "rg1_clk", + [TEGRASOC_WHICH_CLK_RG2] = "rg2_clk", + [TEGRASOC_WHICH_CLK_RG3] = "rg3_clk", + [TEGRASOC_WHICH_CLK_RG4] = "rg4_clk", + [TEGRASOC_WHICH_CLK_RG5] = "rg5_clk", + [TEGRASOC_WHICH_CLK_RG6] = "rg6_clk", + [TEGRASOC_WHICH_CLK_RG7] = "rg7_clk", + [TEGRASOC_WHICH_CLK_DISPPLL] = "disppll_clk", + [TEGRASOC_WHICH_CLK_DISPHUBPLL] = "disphubpll_clk", + [TEGRASOC_WHICH_CLK_DSI_LP] = "dsi_lp_clk", + [TEGRASOC_WHICH_CLK_DSI_CORE] = "dsi_core_clk", + [TEGRASOC_WHICH_CLK_DSI_PIXEL] = "dsi_pixel_clk", + [TEGRASOC_WHICH_CLK_PRE_SOR0] = "pre_sor0_clk", + [TEGRASOC_WHICH_CLK_PRE_SOR1] = "pre_sor1_clk", + [TEGRASOC_WHICH_CLK_PRE_SOR2] = "pre_sor2_clk", + [TEGRASOC_WHICH_CLK_PRE_SOR3] = "pre_sor3_clk", + [TEGRASOC_WHICH_CLK_DP_LINKA_REF] = "dp_link_ref_clk", + [TEGRASOC_WHICH_CLK_DP_LINKB_REF] = "dp_linkb_ref_clk", + [TEGRASOC_WHICH_CLK_DP_LINKC_REF] = "dp_linkc_ref_clk", + [TEGRASOC_WHICH_CLK_DP_LINKD_REF] = "dp_linkd_ref_clk", + [TEGRASOC_WHICH_CLK_SOR_LINKA_INPUT] = "sor_linka_input_clk", + [TEGRASOC_WHICH_CLK_SOR_LINKB_INPUT] = "sor_linkb_input_clk", + [TEGRASOC_WHICH_CLK_SOR_LINKC_INPUT] = "sor_linkc_input_clk", + [TEGRASOC_WHICH_CLK_SOR_LINKD_INPUT] = "sor_linkd_input_clk", + [TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO] = "sor_linka_afifo_clk", + [TEGRASOC_WHICH_CLK_SOR_LINKB_AFIFO] = "sor_linkb_afifo_clk", + [TEGRASOC_WHICH_CLK_SOR_LINKC_AFIFO] = "sor_linkc_afifo_clk", + [TEGRASOC_WHICH_CLK_SOR_LINKD_AFIFO] = "sor_linkd_afifo_clk", + [TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO_M] = "sor_linka_afifo_m_clk", + [TEGRASOC_WHICH_CLK_RG0_M] = "rg0_m_clk", + [TEGRASOC_WHICH_CLK_RG1_M] = "rg1_m_clk", + [TEGRASOC_WHICH_CLK_SOR0_M] = "sor0_m_clk", + [TEGRASOC_WHICH_CLK_SOR1_M] = "sor1_m_clk", + [TEGRASOC_WHICH_CLK_PLLHUB] = "pllhub_clk", + [TEGRASOC_WHICH_CLK_SOR0] = "sor0_clk", + [TEGRASOC_WHICH_CLK_SOR1] = "sor1_clk", + [TEGRASOC_WHICH_CLK_SOR2] = "sor2_clk", + [TEGRASOC_WHICH_CLK_SOR3] = "sor3_clk", + [TEGRASOC_WHICH_CLK_SOR_PADA_INPUT] = "sor_pad_input_clk", + [TEGRASOC_WHICH_CLK_SOR_PADB_INPUT] = "sor_padb_input_clk", + [TEGRASOC_WHICH_CLK_SOR_PADC_INPUT] = "sor_padc_input_clk", + [TEGRASOC_WHICH_CLK_SOR_PADD_INPUT] = "sor_padd_input_clk", + [TEGRASOC_WHICH_CLK_SOR0_PAD] = "sor0_pad_clk", + [TEGRASOC_WHICH_CLK_SOR1_PAD] = "sor1_pad_clk", + [TEGRASOC_WHICH_CLK_SOR2_PAD] = "sor2_pad_clk", + [TEGRASOC_WHICH_CLK_SOR3_PAD] = "sor3_pad_clk", + [TEGRASOC_WHICH_CLK_PRE_SF0] = "pre_sf0_clk", + [TEGRASOC_WHICH_CLK_SF0] = "sf0_clk", + [TEGRASOC_WHICH_CLK_SF1] = "sf1_clk", + [TEGRASOC_WHICH_CLK_SF2] = "sf2_clk", + [TEGRASOC_WHICH_CLK_SF3] = "sf3_clk", + [TEGRASOC_WHICH_CLK_SF4] = "sf4_clk", + [TEGRASOC_WHICH_CLK_SF5] = "sf5_clk", + [TEGRASOC_WHICH_CLK_SF6] = "sf6_clk", + [TEGRASOC_WHICH_CLK_SF7] = "sf7_clk", + [TEGRASOC_WHICH_CLK_DSI_PAD_INPUT] = "dsi_pad_input_clk", + [TEGRASOC_WHICH_CLK_PRE_SOR0_REF] = "pre_sor0_ref_clk", + [TEGRASOC_WHICH_CLK_PRE_SOR1_REF] = "pre_sor1_ref_clk", + [TEGRASOC_WHICH_CLK_SOR0_PLL_REF] = "sor0_ref_pll_clk", + [TEGRASOC_WHICH_CLK_SOR1_PLL_REF] = "sor1_ref_pll_clk", + [TEGRASOC_WHICH_CLK_SOR2_PLL_REF] = "sor2_ref_pll_clk", + [TEGRASOC_WHICH_CLK_SOR3_PLL_REF] = "sor3_ref_pll_clk", + [TEGRASOC_WHICH_CLK_SOR0_REF] = "sor0_ref_clk", + [TEGRASOC_WHICH_CLK_SOR1_REF] = "sor1_ref_clk", + [TEGRASOC_WHICH_CLK_SOR2_REF] = "sor2_ref_clk", + [TEGRASOC_WHICH_CLK_SOR3_REF] = "sor3_ref_clk", + [TEGRASOC_WHICH_CLK_OSC] = "osc_clk", + [TEGRASOC_WHICH_CLK_DSC] = "dsc_clk", + [TEGRASOC_WHICH_CLK_MAUD] = "maud_clk", + [TEGRASOC_WHICH_CLK_AZA_2XBIT] = "aza_2xbit_clk", + [TEGRASOC_WHICH_CLK_AZA_BIT] = "aza_bit_clk", + [TEGRASOC_WHICH_CLK_MIPI_CAL] = "mipi_cal_clk", + [TEGRASOC_WHICH_CLK_UART_FST_MIPI_CAL] = "uart_fst_mipi_cal_clk", + [TEGRASOC_WHICH_CLK_SOR0_DIV] = "sor0_div_clk", + [TEGRASOC_WHICH_CLK_DISP_ROOT] = "disp_root", + [TEGRASOC_WHICH_CLK_HUB_ROOT] = "hub_root", + [TEGRASOC_WHICH_CLK_PLLA_DISP] = "plla_disp", + [TEGRASOC_WHICH_CLK_PLLA_DISPHUB] = "plla_disphub", + [TEGRASOC_WHICH_CLK_PLLA] = "plla", + [TEGRASOC_WHICH_CLK_VPLLX_SOR0_MUXED] = "vpllx_sor0_muxed_clk", + [TEGRASOC_WHICH_CLK_VPLLX_SOR1_MUXED] = "vpllx_sor1_muxed_clk", + [TEGRASOC_WHICH_CLK_VPLLX_SOR2_MUXED] = "vpllx_sor2_muxed_clk", + [TEGRASOC_WHICH_CLK_VPLLX_SOR3_MUXED] = "vpllx_sor3_muxed_clk", + [TEGRASOC_WHICH_CLK_SF0_SOR] = "sf0_sor_clk", + [TEGRASOC_WHICH_CLK_SF1_SOR] = "sf1_sor_clk", + [TEGRASOC_WHICH_CLK_SF2_SOR] = "sf2_sor_clk", + [TEGRASOC_WHICH_CLK_SF3_SOR] = "sf3_sor_clk", + [TEGRASOC_WHICH_CLK_SF4_SOR] = "sf4_sor_clk", + [TEGRASOC_WHICH_CLK_SF5_SOR] = "sf5_sor_clk", + [TEGRASOC_WHICH_CLK_SF6_SOR] = "sf6_sor_clk", + [TEGRASOC_WHICH_CLK_SF7_SOR] = "sf7_sor_clk", + [TEGRASOC_WHICH_CLK_EMC] = "emc_clk", + [TEGRASOC_WHICH_CLK_GPU_SYS] = "sysclk", + [TEGRASOC_WHICH_CLK_GPU_NVD] = "nvdclk", + [TEGRASOC_WHICH_CLK_GPU_UPROC] = "uprocclk", + [TEGRASOC_WHICH_CLK_GPU_GPC0] = "gpc0clk", + [TEGRASOC_WHICH_CLK_GPU_GPC1] = "gpc1clk", + [TEGRASOC_WHICH_CLK_GPU_GPC2] = "gpc2clk", +}; +#endif + +/*! + * @brief Get the clock handles. + * + * Look up and obtain the clock handles for each display + * clock at boot-time and later using all those handles + * for rest of the operations. for example, enable/disable + * clocks, get current/max frequency of the clock. + * + * For more details on CCF functions, please check below file: + * + * In the Linux kernel: include/linux/clk.h + * or + * https://www.kernel.org/doc/htmldocs/kernel-api/ + * + * @param[in] nv Per gpu linux state + * + * @returns NV_STATUS + */ +NV_STATUS NV_API_CALL nv_clk_get_handles( + nv_state_t *nv) +{ + NV_STATUS status = NV_OK; +#if defined(NV_DEVM_CLK_BULK_GET_ALL_PRESENT) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvU32 i, j; + int clk_count; + struct clk_bulk_data *clks; + + clk_count = devm_clk_bulk_get_all(nvl->dev, &clks); + + if (clk_count <= 0) + { + nv_printf(NV_DBG_INFO,"NVRM: No clk handles for the dev\n"); + status = NV_ERR_OBJECT_NOT_FOUND; + } + + // + // TEGRASOC_WHICH_CLK_MAX is maximum clock defined in below enum + // arch/nvalloc/unix/include/nv.h + // enum TEGRASOC_WHICH_CLK + // + for (i = 0U; i < clk_count; i++) + { + for (j = 0U; j < TEGRASOC_WHICH_CLK_MAX; j++) + { + if (!strcmp(osMapClk[j], clks[i].id)) + { + nvl->soc_clk_handles.clk[j].handles = clks[i].clk; + nvl->soc_clk_handles.clk[j].clkName = __clk_get_name(clks[i].clk); + break; + } + } + if (j == TEGRASOC_WHICH_CLK_MAX) + { + nv_printf(NV_DBG_ERRORS,"NVRM: nv_clk_get_handles, failed to find TEGRA_SOC_WHICH_CLK for %s\n", clks[i].id); + return NV_ERR_OBJECT_NOT_FOUND; + } + } +#else + nv_printf(NV_DBG_INFO, "NVRM: devm_clk_bulk_get_all API is not present\n"); + status = NV_ERR_FEATURE_NOT_ENABLED; +#endif + + return status; +} + +/*! + * @brief Enable the clock. + * + * Enabling the clock before performing any operation + * on it. The below function will prepare the clock for use + * and enable them. + * + * for more details on CCF functions, please check below file: + * + * In the Linux kernel: include/linux/clk.h + * or + * https://www.kernel.org/doc/htmldocs/kernel-api/ + * + * @param[in] nv Per gpu linux state + * @param[in] whichClkOS Enum value of the target clock + * + * @returns NV_STATUS + */ +NV_STATUS NV_API_CALL nv_enable_clk( + nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NV_STATUS status; + int ret; + + if (nvl->soc_clk_handles.clk[whichClkOS].handles != NULL) + { + ret = clk_prepare_enable(nvl->soc_clk_handles.clk[whichClkOS].handles); + + if (ret == 0) + { + status = NV_OK; + } + else + { + status = NV_ERR_FEATURE_NOT_ENABLED; + nv_printf(NV_DBG_ERRORS, "NVRM: clk_prepare_enable failed with error: %d\n", ret); + } + } + else + { + status = NV_ERR_OBJECT_NOT_FOUND; + } + + return status; +} + +/*! + * @brief Disable the clock. + * + * Disabling the clock after performing operation or required + * work with that clock is done with that particular clock. + * The below function will unprepare the clock for further use + * and disable them. + * + * Note: make sure to disable clock before clk_put is called. + * + * For more details on CCF functions, please check below file: + * + * In the Linux kernel: include/linux/clk.h + * or + * https://www.kernel.org/doc/htmldocs/kernel-api/ + * + * @param[in] nv Per gpu linux state + * @param[in] whichClkOS Enum value of the target clock + */ +void NV_API_CALL nv_disable_clk( + nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + clk_disable_unprepare(nvl->soc_clk_handles.clk[whichClkOS].handles); +} + +/*! + * @brief Get current clock frequency. + * + * Obtain the current clock rate for a clock source. + * This is only valid once the clock source has been enabled. + * + * For more details on CCF functions, please check below file: + * + * In the Linux kernel: include/linux/clk.h + * or + * https://www.kernel.org/doc/htmldocs/kernel-api/ + * + * @param[in] nv Per gpu linux state + * @param[in] whichClkOS Enum value of the target clock + * @param[out] pCurrFreqKHz Current clock frequency + */ +NV_STATUS NV_API_CALL nv_get_curr_freq( + nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 *pCurrFreqKHz) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NV_STATUS status; + unsigned long currFreqHz; + + if (nvl->soc_clk_handles.clk[whichClkOS].handles != NULL) + { + currFreqHz = clk_get_rate(nvl->soc_clk_handles.clk[whichClkOS].handles); + *pCurrFreqKHz = currFreqHz / 1000U; + + if (*pCurrFreqKHz > 0U) + { + status = NV_OK; + } + else + { + status = NV_ERR_FEATURE_NOT_ENABLED; + } + } + else + { + status = NV_ERR_OBJECT_NOT_FOUND; + } + + return status; +} + +/*! + * @brief Get maximum clock frequency. + * + * Obtain the maximum clock rate a clock source can provide. + * This is only valid once the clock source has been enabled. + * + * For more details on CCF functions, please check below file: + * + * In the Linux kernel: include/linux/clk.h + * or + * https://www.kernel.org/doc/htmldocs/kernel-api/ + * + * @param[in] nv Per gpu linux state + * @param[in] whichClkOS Enum value of the target clock + * @param[out] pMaxFreqKHz Maximum clock frequency + */ +NV_STATUS NV_API_CALL nv_get_max_freq( + nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 *pMaxFreqKHz) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NV_STATUS status; + long ret; + + if (nvl->soc_clk_handles.clk[whichClkOS].handles != NULL) + { + // + // clk_round_rate(struct clk *clk, rate); + // rate is the maximum possible rate we give, + // it returns rounded clock rate in Hz, i.e., + // maximum clock rate the source clock can + // support or negative errno. + // Here, rate = NV_S64_MAX + // 0 < currFreq < maxFreq < NV_S64_MAX + // clk_round_rate() round of and return the + // nearest freq what a clock can provide. + // sending NV_S64_MAX will return maxFreq. + // + ret = clk_round_rate(nvl->soc_clk_handles.clk[whichClkOS].handles, NV_U32_MAX); + + if (ret >= 0) + { + *pMaxFreqKHz = (NvU32) (ret / 1000); + status = NV_OK; + } + else + { + status = NV_ERR_FEATURE_NOT_ENABLED; + nv_printf(NV_DBG_ERRORS, "NVRM: clk_round_rate failed with error: %ld\n", ret); + } + } + else + { + status = NV_ERR_OBJECT_NOT_FOUND; + } + + return status; +} + +/*! + * @brief Get minimum clock frequency. + * + * Obtain the minimum clock rate a clock source can provide. + * This is only valid once the clock source has been enabled. + * + * For more details on CCF functions, please check below file: + * + * In the Linux kernel: include/linux/clk.h + * or + * https://www.kernel.org/doc/htmldocs/kernel-api/ + * + * @param[in] nv Per gpu linux state + * @param[in] whichClkOS Enum value of the target clock + * @param[out] pMinFreqKHz Minimum clock frequency + */ +NV_STATUS NV_API_CALL nv_get_min_freq( + nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 *pMinFreqKHz) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NV_STATUS status; + long ret; + + if (nvl->soc_clk_handles.clk[whichClkOS].handles != NULL) + { + // + // clk_round_rate(struct clk *clk, rate); + // rate is the minimum possible rate we give, + // it returns rounded clock rate in Hz, i.e., + // minimum clock rate the source clock can + // support or negative errno. + // Here, rate = NV_S64_MAX + // 0 < minFreq currFreq < maxFreq < NV_S64_MAX + // clk_round_rate() round of and return the + // nearest freq what a clock can provide. + // sending 0 will return minFreq. + // + ret = clk_round_rate(nvl->soc_clk_handles.clk[whichClkOS].handles, 0); + + if (ret >= 0) + { + *pMinFreqKHz = (NvU32) (ret / 1000); + status = NV_OK; + } + else + { + status = NV_ERR_FEATURE_NOT_ENABLED; + nv_printf(NV_DBG_ERRORS, "NVRM: clk_round_rate failed with error: %ld\n", ret); + } + } + else + { + status = NV_ERR_OBJECT_NOT_FOUND; + } + + return status; +} + + +/*! + * @brief set clock frequency. + * + * Setting the frequency of clock source. + * This is only valid once the clock source has been enabled. + * + * For more details on CCF functions, please check below file: + * + * In the Linux kernel: include/linux/clk.h + * or + * https://www.kernel.org/doc/htmldocs/kernel-api/ + * + * @param[in] nv Per gpu linux state + * @param[in] whichClkOS Enum value of the target clock + * @param[in] reqFreqKHz Required frequency + */ +NV_STATUS NV_API_CALL nv_set_freq( + nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 reqFreqKHz) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NV_STATUS status; + int ret; + + if (nvl->soc_clk_handles.clk[whichClkOS].handles != NULL) + { + ret = clk_set_rate(nvl->soc_clk_handles.clk[whichClkOS].handles, + reqFreqKHz * 1000U); + if (ret == 0) + { + status = NV_OK; + } + else + { + status = NV_ERR_INVALID_REQUEST; + nv_printf(NV_DBG_ERRORS, "NVRM: clk_set_rate failed with error: %d\n", ret); + } + } + else + { + status = NV_ERR_OBJECT_NOT_FOUND; + } + + return status; +} +#else +NV_STATUS NV_API_CALL nv_clk_get_handles +( + nv_state_t *nv +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_enable_clk +( + nv_state_t *nv, + TEGRASOC_WHICH_CLK whichClkOS +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void NV_API_CALL nv_disable_clk +( + nv_state_t *nv, + TEGRASOC_WHICH_CLK whichClkOS +) +{ + return; +} + +NV_STATUS NV_API_CALL nv_get_curr_freq +( + nv_state_t *nv, + TEGRASOC_WHICH_CLK whichClkOS, + NvU32 *pCurrFreqKHz +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_get_max_freq +( + nv_state_t *nv, + TEGRASOC_WHICH_CLK whichClkOS, + NvU32 *pMaxFreqKHz +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_get_min_freq +( + nv_state_t *nv, + TEGRASOC_WHICH_CLK whichClkOS, + NvU32 *pMinFreqKHz +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_set_freq +( + nv_state_t *nv, + TEGRASOC_WHICH_CLK whichClkOS, + NvU32 freqKHz +) +{ + return NV_ERR_NOT_SUPPORTED; +} +#endif + +/*! + * @brief Clear the clock handles assigned by nv_clk_get_handles() + * + * Clear the clock handle for each display of the clocks at shutdown-time. + * Since clock handles are obtained by devm managed devm_clk_bulk_get_all() + * API, devm_clk_bulk_release_all() API is called on all the enumerated + * clk handles automatically when module gets unloaded. Hence, no need + * to explicitly free those handles. + * + * For more details on CCF functions, please check below file: + * + * In the Linux kernel: include/linux/clk.h + * or + * https://www.kernel.org/doc/htmldocs/kernel-api/ + * + * @param[in] nv Per gpu linux state + */ +void NV_API_CALL nv_clk_clear_handles( + nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvU32 i; + + // + // TEGRASOC_WHICH_CLK_MAX is maximum clock defined in below enum + // arch/nvalloc/unix/include/nv.h + // enum TEGRASOC_WHICH_CLK + // + for (i = 0U; i < TEGRASOC_WHICH_CLK_MAX; i++) + { + if (nvl->soc_clk_handles.clk[i].handles != NULL) + { + nvl->soc_clk_handles.clk[i].handles = NULL; + } + } +} + +#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE +/*! + * @brief set parent clock. + * + * Setting the parent clock of clock source. + * This is only valid once the clock source and the parent + * clock have been enabled. + * + * For more details on CCF functions, please check below file: + * + * In the Linux kernel: include/linux/clk.h + * or + * https://www.kernel.org/doc/htmldocs/kernel-api/ + * + * @param[in] nv Per gpu linux state + * @param[in] whichClkOSsource Enum value of the source clock + * @param[in] whichClkOSparent Enum value of the parent clock + */ +NV_STATUS NV_API_CALL nv_set_parent +( + nv_state_t *nv, + TEGRASOC_WHICH_CLK whichClkOSsource, + TEGRASOC_WHICH_CLK whichClkOSparent +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NV_STATUS status; + int ret; + + if ((nvl->soc_clk_handles.clk[whichClkOSsource].handles != NULL) && + (nvl->soc_clk_handles.clk[whichClkOSparent].handles != NULL)) + { + ret = clk_set_parent(nvl->soc_clk_handles.clk[whichClkOSsource].handles, + nvl->soc_clk_handles.clk[whichClkOSparent].handles); + if (ret == 0) + { + status = NV_OK; + } + else + { + status = NV_ERR_INVALID_REQUEST; + nv_printf(NV_DBG_ERRORS, "NVRM: clk_set_parent failed with error: %d\n", ret); + } + } + else + { + status = NV_ERR_OBJECT_NOT_FOUND; + } + + return status; +} + +/*! + * @brief get parent clock. + * + * Getting the parent clock of clock source. + * This is only valid once the clock source and the parent + * clock have been enabled. + * + * For more details on CCF functions, please check below file: + * + * In the Linux kernel: include/linux/clk.h + * or + * https://www.kernel.org/doc/htmldocs/kernel-api/ + * + * @param[in] nv Per gpu linux state + * @param[in] whichClkOSsource Enum value of the source clock + * @param[in] pWhichClkOSparent Enum value of the parent clock + */ +NV_STATUS NV_API_CALL nv_get_parent +( + nv_state_t *nv, + TEGRASOC_WHICH_CLK whichClkOSsource, + TEGRASOC_WHICH_CLK *pWhichClkOSparent +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct clk *ret; + NvU32 i; + + if (nvl->soc_clk_handles.clk[whichClkOSsource].handles != NULL) + { + ret = clk_get_parent(nvl->soc_clk_handles.clk[whichClkOSsource].handles); + if (!IS_ERR_OR_NULL(ret)) + { + const char *parentClkName = __clk_get_name(ret); + // + // TEGRASOC_WHICH_CLK_MAX is maximum clock defined in below enum + // arch/nvalloc/unix/include/nv.h + // enum TEGRASOC_WHICH_CLK + // + for (i = 0U; i < TEGRASOC_WHICH_CLK_MAX; i++) + { + // + // soc_clk_handles has array of clks supported on all chips. + // So depending on the chip, some clks may not be present. + // + if (nvl->soc_clk_handles.clk[i].clkName == NULL) + { + continue; + } + + if (!strcmp(nvl->soc_clk_handles.clk[i].clkName, parentClkName)) + { + *pWhichClkOSparent = i; + return NV_OK; + } + } + nv_printf(NV_DBG_ERRORS, "NVRM: unexpected parent clock ref addr: %p\n", ret); + return NV_ERR_INVALID_OBJECT_PARENT; + } + else + { + nv_printf(NV_DBG_ERRORS, "NVRM: clk_get_parent failed with error: %ld\n", PTR_ERR(ret)); + return NV_ERR_INVALID_POINTER; + } + } + + nv_printf(NV_DBG_ERRORS, "NVRM: invalid source clock requested\n"); + return NV_ERR_OBJECT_NOT_FOUND; +} + +/*! + * @brief Check if clock is enable or not. + * + * Checking the clock status if it is enabled or not before + * enabling or disabling it. + * + * for more details on CCF functions, please check below file: + * + * In the Linux kernel: include/linux/clk.h + * or + * https://www.kernel.org/doc/htmldocs/kernel-api/ + * + * @param[in] nv Per gpu linux state + * @param[in] whichClkOS Enum value of the target clock + * + * @returns clock status. + */ +NvBool NV_API_CALL nv_is_clk_enabled( + nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + bool ret = false; + + if (nvl->soc_clk_handles.clk[whichClkOS].handles == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: clock handle requested not found.\n"); + return NV_FALSE; + } + + ret = __clk_is_enabled(nvl->soc_clk_handles.clk[whichClkOS].handles); + return ret == true; +} + +NV_STATUS NV_API_CALL nv_dp_uphy_pll_init +( + nv_state_t *nv, + NvU32 link_rate, + NvU32 lanes_bitmap +) +{ +#if defined(NV_CMD_UPHY_DISPLAY_PORT_INIT_PRESENT) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct tegra_bpmp *bpmp; + struct tegra_bpmp_message msg; + struct mrq_uphy_request req; + struct mrq_uphy_response resp; + int rc; + NV_STATUS status = NV_OK; + + bpmp = tegra_bpmp_get(nvl->dev); + if (IS_ERR(bpmp)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Error getting bpmp struct: %s\n", + PTR_ERR(bpmp)); + return NV_ERR_GENERIC; + } + + req.cmd = CMD_UPHY_DISPLAY_PORT_INIT; + req.display_port_init.link_rate = link_rate; + req.display_port_init.lanes_bitmap = lanes_bitmap; + + memset(&msg, 0, sizeof(msg)); + msg.mrq = MRQ_UPHY; + msg.tx.data = &req; + msg.tx.size = sizeof(req); + msg.rx.data = &resp; + msg.rx.size = sizeof(resp); + + rc = tegra_bpmp_transfer(bpmp, &msg); + if (rc) + { + nv_printf(NV_DBG_ERRORS, "DP UPHY pll initialization failed, rc - %d\n", rc); + status = NV_ERR_GENERIC; + } + + tegra_bpmp_put(bpmp); + + return status; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NV_STATUS NV_API_CALL nv_dp_uphy_pll_deinit(nv_state_t *nv) +{ +#if defined(NV_CMD_UPHY_DISPLAY_PORT_OFF_PRESENT) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct tegra_bpmp *bpmp; + struct tegra_bpmp_message msg; + struct mrq_uphy_request req; + struct mrq_uphy_response resp; + int rc; + NV_STATUS status = NV_OK; + + bpmp = tegra_bpmp_get(nvl->dev); + if (IS_ERR(bpmp)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Error getting bpmp struct: %s\n", + PTR_ERR(bpmp)); + return NV_ERR_GENERIC; + } + + req.cmd = CMD_UPHY_DISPLAY_PORT_OFF; + + memset(&msg, 0, sizeof(msg)); + msg.mrq = MRQ_UPHY; + msg.tx.data = &req; + msg.tx.size = sizeof(req); + msg.rx.data = &resp; + msg.rx.size = sizeof(resp); + + rc = tegra_bpmp_transfer(bpmp, &msg); + if (rc) + { + nv_printf(NV_DBG_ERRORS, "DP UPHY pll de-initialization failed, rc - %d\n", rc); + status = NV_ERR_GENERIC; + } + + tegra_bpmp_put(bpmp); + + return status; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} +#else +NV_STATUS NV_API_CALL nv_set_parent +( + nv_state_t *nv, + TEGRASOC_WHICH_CLK whichClkOSsource, + TEGRASOC_WHICH_CLK whichClkOSparent +) +{ + return NV_ERR_NOT_SUPPORTED; +} + + +NvBool NV_API_CALL nv_is_clk_enabled( + nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS) +{ + return NV_FALSE; +} + +NV_STATUS NV_API_CALL nv_dp_uphy_pll_deinit(nv_state_t *nv) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_get_parent +( + nv_state_t *nv, + TEGRASOC_WHICH_CLK whichClkOSsource, + TEGRASOC_WHICH_CLK *pWhichClkOSparent +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_dp_uphy_pll_init +( + nv_state_t *nv, + NvU32 link_rate, + NvU32 lanes_bitmap +) +{ + return NV_ERR_NOT_SUPPORTED; +} +#endif diff --git a/kernel-open/nvidia/nv-cray.c b/kernel-open/nvidia/nv-cray.c new file mode 100644 index 0000000..ad7f1f5 --- /dev/null +++ b/kernel-open/nvidia/nv-cray.c @@ -0,0 +1,217 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +#if defined(CONFIG_CRAY_XT) +enum { + NV_FORMAT_STATE_ORDINARY, + NV_FORMAT_STATE_INTRODUCTION, + NV_FORMAT_STATE_FLAGS, + NV_FORMAT_STATE_FIELD_WIDTH, + NV_FORMAT_STATE_PRECISION, + NV_FORMAT_STATE_LENGTH_MODIFIER, + NV_FORMAT_STATE_CONVERSION_SPECIFIER +}; + +enum { + NV_LENGTH_MODIFIER_NONE, + NV_LENGTH_MODIFIER_CHAR, + NV_LENGTH_MODIFIER_SHORT_INT, + NV_LENGTH_MODIFIER_LONG_INT, + NV_LENGTH_MODIFIER_LONG_LONG_INT +}; + +#define NV_IS_FLAG(c) \ + ((c) == '#' || (c) == '0' || (c) == '-' || (c) == ' ' || (c) == '+') +#define NV_IS_LENGTH_MODIFIER(c) \ + ((c) == 'h' || (c) == 'l' || (c) == 'L' || (c) == 'q' || (c) == 'j' || \ + (c) == 'z' || (c) == 't') +#define NV_IS_CONVERSION_SPECIFIER(c) \ + ((c) == 'd' || (c) == 'i' || (c) == 'o' || (c) == 'u' || (c) == 'x' || \ + (c) == 'X' || (c) == 'e' || (c) == 'E' || (c) == 'f' || (c) == 'F' || \ + (c) == 'g' || (c) == 'G' || (c) == 'a' || (c) == 'A' || (c) == 'c' || \ + (c) == 's' || (c) == 'p') + +#define NV_MAX_NUM_INFO_MMRS 6 + +NV_STATUS nvos_forward_error_to_cray( + struct pci_dev *dev, + NvU32 error_number, + const char *format, + va_list ap +) +{ + NvU32 num_info_mmrs; + NvU64 x = 0, info_mmrs[NV_MAX_NUM_INFO_MMRS]; + int state = NV_FORMAT_STATE_ORDINARY; + int modifier = NV_LENGTH_MODIFIER_NONE; + NvU32 i, n = 0, m = 0; + + memset(info_mmrs, 0, sizeof(info_mmrs)); + while (*format != '\0') + { + switch (state) + { + case NV_FORMAT_STATE_ORDINARY: + if (*format == '%') + state = NV_FORMAT_STATE_INTRODUCTION; + break; + case NV_FORMAT_STATE_INTRODUCTION: + if (*format == '%') + { + state = NV_FORMAT_STATE_ORDINARY; + break; + } + case NV_FORMAT_STATE_FLAGS: + if (NV_IS_FLAG(*format)) + { + state = NV_FORMAT_STATE_FLAGS; + break; + } + else if (*format == '*') + { + state = NV_FORMAT_STATE_FIELD_WIDTH; + break; + } + case NV_FORMAT_STATE_FIELD_WIDTH: + if ((*format >= '0') && (*format <= '9')) + { + state = NV_FORMAT_STATE_FIELD_WIDTH; + break; + } + else if (*format == '.') + { + state = NV_FORMAT_STATE_PRECISION; + break; + } + case NV_FORMAT_STATE_PRECISION: + if ((*format >= '0') && (*format <= '9')) + { + state = NV_FORMAT_STATE_PRECISION; + break; + } + else if (NV_IS_LENGTH_MODIFIER(*format)) + { + state = NV_FORMAT_STATE_LENGTH_MODIFIER; + break; + } + else if (NV_IS_CONVERSION_SPECIFIER(*format)) + { + state = NV_FORMAT_STATE_CONVERSION_SPECIFIER; + break; + } + case NV_FORMAT_STATE_LENGTH_MODIFIER: + if ((*format == 'h') || (*format == 'l')) + { + state = NV_FORMAT_STATE_LENGTH_MODIFIER; + break; + } + else if (NV_IS_CONVERSION_SPECIFIER(*format)) + { + state = NV_FORMAT_STATE_CONVERSION_SPECIFIER; + break; + } + } + switch (state) + { + case NV_FORMAT_STATE_INTRODUCTION: + modifier = NV_LENGTH_MODIFIER_NONE; + break; + case NV_FORMAT_STATE_LENGTH_MODIFIER: + switch (*format) + { + case 'h': + modifier = (modifier == NV_LENGTH_MODIFIER_NONE) + ? NV_LENGTH_MODIFIER_SHORT_INT + : NV_LENGTH_MODIFIER_CHAR; + break; + case 'l': + modifier = (modifier == NV_LENGTH_MODIFIER_NONE) + ? NV_LENGTH_MODIFIER_LONG_INT + : NV_LENGTH_MODIFIER_LONG_LONG_INT; + break; + case 'q': + modifier = NV_LENGTH_MODIFIER_LONG_LONG_INT; + default: + return NV_ERR_INVALID_ARGUMENT; + } + break; + case NV_FORMAT_STATE_CONVERSION_SPECIFIER: + switch (*format) + { + case 'c': + case 'd': + case 'i': + x = (unsigned int)va_arg(ap, int); + break; + case 'o': + case 'u': + case 'x': + case 'X': + switch (modifier) + { + case NV_LENGTH_MODIFIER_LONG_LONG_INT: + x = va_arg(ap, unsigned long long int); + break; + case NV_LENGTH_MODIFIER_LONG_INT: + x = va_arg(ap, unsigned long int); + break; + case NV_LENGTH_MODIFIER_CHAR: + case NV_LENGTH_MODIFIER_SHORT_INT: + case NV_LENGTH_MODIFIER_NONE: + x = va_arg(ap, unsigned int); + break; + } + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + state = NV_FORMAT_STATE_ORDINARY; + for (i = 0; i < ((modifier == NV_LENGTH_MODIFIER_LONG_LONG_INT) + ? 2 : 1); i++) + { + if (m == NV_MAX_NUM_INFO_MMRS) + return NV_ERR_INSUFFICIENT_RESOURCES; + info_mmrs[m] = ((info_mmrs[m] << 32) | (x & 0xffffffff)); + x >>= 32; + if (++n == 2) + { + m++; + n = 0; + } + } + } + format++; + } + + num_info_mmrs = (m + (n != 0)); + if (num_info_mmrs > 0) + cray_nvidia_report_error(dev, error_number, num_info_mmrs, info_mmrs); + + return NV_OK; +} +#endif diff --git a/kernel-open/nvidia/nv-dma.c b/kernel-open/nvidia/nv-dma.c new file mode 100644 index 0000000..b93f4ec --- /dev/null +++ b/kernel-open/nvidia/nv-dma.c @@ -0,0 +1,1003 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" +#include "nv-reg.h" + +#define NV_DMA_DEV_PRINTF(debuglevel, dma_dev, format, ... ) \ + nv_printf(debuglevel, "NVRM: %s: " format, \ + (((dma_dev) && ((dma_dev)->dev)) ? dev_name((dma_dev)->dev) : \ + NULL), \ + ## __VA_ARGS__) + +NvU32 nv_dma_remap_peer_mmio = NV_DMA_REMAP_PEER_MMIO_ENABLE; + +NV_STATUS nv_create_dma_map_scatterlist (nv_dma_map_t *dma_map); +void nv_destroy_dma_map_scatterlist(nv_dma_map_t *dma_map); +NV_STATUS nv_map_dma_map_scatterlist (nv_dma_map_t *dma_map); +void nv_unmap_dma_map_scatterlist (nv_dma_map_t *dma_map); +static void nv_dma_unmap_contig (nv_dma_map_t *dma_map); +static void nv_dma_unmap_scatterlist (nv_dma_map_t *dma_map); + +static inline NvBool nv_dma_is_addressable( + nv_dma_device_t *dma_dev, + NvU64 start, + NvU64 size +) +{ + NvU64 limit = start + size - 1; + + return (start >= dma_dev->addressable_range.start) && + (limit <= dma_dev->addressable_range.limit) && + (limit >= start); +} + +static NV_STATUS nv_dma_map_contig( + nv_dma_device_t *dma_dev, + nv_dma_map_t *dma_map, + NvU64 *va +) +{ +#if defined(NV_DMA_MAP_PAGE_ATTRS_PRESENT) && defined(NV_DMA_ATTR_SKIP_CPU_SYNC_PRESENT) + *va = dma_map_page_attrs(dma_map->dev, dma_map->pages[0], 0, + dma_map->page_count * PAGE_SIZE, + DMA_BIDIRECTIONAL, + (dma_map->cache_type == NV_MEMORY_UNCACHED) ? + DMA_ATTR_SKIP_CPU_SYNC : 0); +#else + *va = dma_map_page(dma_map->dev, dma_map->pages[0], 0, + dma_map->page_count * PAGE_SIZE, DMA_BIDIRECTIONAL); +#endif + if (dma_mapping_error(dma_map->dev, *va)) + { + return NV_ERR_OPERATING_SYSTEM; + } + + dma_map->mapping.contig.dma_addr = *va; + + if (!nv_dma_is_addressable(dma_dev, *va, dma_map->page_count * PAGE_SIZE)) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "DMA address not in addressable range of device " + "(0x%llx-0x%llx, 0x%llx-0x%llx)\n", + *va, *va + (dma_map->page_count * PAGE_SIZE - 1), + dma_dev->addressable_range.start, + dma_dev->addressable_range.limit); + nv_dma_unmap_contig(dma_map); + return NV_ERR_INVALID_ADDRESS; + } + + return NV_OK; +} + +static void nv_dma_unmap_contig(nv_dma_map_t *dma_map) +{ +#if defined(NV_DMA_MAP_PAGE_ATTRS_PRESENT) && defined(NV_DMA_ATTR_SKIP_CPU_SYNC_PRESENT) + dma_unmap_page_attrs(dma_map->dev, dma_map->mapping.contig.dma_addr, + dma_map->page_count * PAGE_SIZE, + DMA_BIDIRECTIONAL, + (dma_map->cache_type == NV_MEMORY_UNCACHED) ? + DMA_ATTR_SKIP_CPU_SYNC : 0); +#else + dma_unmap_page(dma_map->dev, dma_map->mapping.contig.dma_addr, + dma_map->page_count * PAGE_SIZE, DMA_BIDIRECTIONAL); +#endif +} + +static void nv_fill_scatterlist +( + struct scatterlist *sgl, + struct page **pages, + unsigned int page_count +) +{ + unsigned int i; + struct scatterlist *sg; +#if defined(for_each_sg) + for_each_sg(sgl, sg, page_count, i) + { + sg_set_page(sg, pages[i], PAGE_SIZE, 0); + } +#else + for (i = 0; i < page_count; i++) + { + sg = &(sgl)[i]; + sg->page = pages[i]; + sg->length = PAGE_SIZE; + sg->offset = 0; + } +#endif +} + +NV_STATUS nv_create_dma_map_scatterlist(nv_dma_map_t *dma_map) +{ + /* + * We need to split our mapping into at most 4GB - PAGE_SIZE chunks. + * The Linux kernel stores the length (and offset) of a scatter-gather + * segment as an unsigned int, so it will overflow if we try to do + * anything larger. + */ + NV_STATUS status; + nv_dma_submap_t *submap; + NvU32 i; + NvU64 allocated_size = 0; + NvU64 num_submaps = dma_map->page_count + NV_DMA_SUBMAP_MAX_PAGES - 1; + NvU64 total_size = dma_map->page_count << PAGE_SHIFT; + + /* + * This turns into 64-bit division, which the ARMv7 kernel doesn't provide + * implicitly. Instead, we need to use the platform's do_div() to perform + * the division. + */ + do_div(num_submaps, NV_DMA_SUBMAP_MAX_PAGES); + + WARN_ON(NvU64_HI32(num_submaps) != 0); + + if (dma_map->import_sgt && (num_submaps != 1)) + { + return -EINVAL; + } + + dma_map->mapping.discontig.submap_count = NvU64_LO32(num_submaps); + + status = os_alloc_mem((void **)&dma_map->mapping.discontig.submaps, + sizeof(nv_dma_submap_t) * dma_map->mapping.discontig.submap_count); + if (status != NV_OK) + { + return status; + } + + os_mem_set((void *)dma_map->mapping.discontig.submaps, 0, + sizeof(nv_dma_submap_t) * dma_map->mapping.discontig.submap_count); + + /* If we have an imported SGT, just use that directly. */ + if (dma_map->import_sgt) + { + dma_map->mapping.discontig.submaps[0].page_count = dma_map->page_count; + dma_map->mapping.discontig.submaps[0].sgt = *dma_map->import_sgt; + dma_map->mapping.discontig.submaps[0].imported = NV_TRUE; + + return status; + } + + NV_FOR_EACH_DMA_SUBMAP(dma_map, submap, i) + { + NvU64 submap_size = NV_MIN(NV_DMA_SUBMAP_MAX_PAGES << PAGE_SHIFT, + total_size - allocated_size); + + submap->page_count = (NvU32)(submap_size >> PAGE_SHIFT); + + status = NV_ALLOC_DMA_SUBMAP_SCATTERLIST(dma_map, submap, i); + if (status != NV_OK) + { + submap->page_count = 0; + break; + } + +#if defined(NV_DOM0_KERNEL_PRESENT) + { + NvU64 page_idx = NV_DMA_SUBMAP_IDX_TO_PAGE_IDX(i); + nv_fill_scatterlist(submap->sgt.sgl, + &dma_map->pages[page_idx], submap->page_count); + } +#endif + + allocated_size += submap_size; + } + + WARN_ON(allocated_size != total_size); + + if (status != NV_OK) + { + nv_destroy_dma_map_scatterlist(dma_map); + } + + return status; +} + +NV_STATUS nv_map_dma_map_scatterlist(nv_dma_map_t *dma_map) +{ + NV_STATUS status = NV_OK; + nv_dma_submap_t *submap; + NvU64 i; + + NV_FOR_EACH_DMA_SUBMAP(dma_map, submap, i) + { + /* Imported SGTs will have already been mapped by the exporter. */ + submap->sg_map_count = submap->imported ? + submap->sgt.orig_nents : + dma_map_sg(dma_map->dev, + submap->sgt.sgl, + submap->sgt.orig_nents, + DMA_BIDIRECTIONAL); + if (submap->sg_map_count == 0) + { + status = NV_ERR_OPERATING_SYSTEM; + break; + } + } + + if (status != NV_OK) + { + nv_unmap_dma_map_scatterlist(dma_map); + } + + return status; +} + +void nv_unmap_dma_map_scatterlist(nv_dma_map_t *dma_map) +{ + nv_dma_submap_t *submap; + NvU64 i; + + NV_FOR_EACH_DMA_SUBMAP(dma_map, submap, i) + { + if (submap->sg_map_count == 0) + { + break; + } + + if (submap->imported) + { + /* Imported SGTs will be unmapped by the exporter. */ + continue; + } + + dma_unmap_sg(dma_map->dev, submap->sgt.sgl, + submap->sgt.orig_nents, + DMA_BIDIRECTIONAL); + } +} + +void nv_destroy_dma_map_scatterlist(nv_dma_map_t *dma_map) +{ + nv_dma_submap_t *submap; + NvU64 i; + + NV_FOR_EACH_DMA_SUBMAP(dma_map, submap, i) + { + if ((submap->page_count == 0) || submap->imported) + { + break; + } + + sg_free_table(&submap->sgt); + } + + os_free_mem(dma_map->mapping.discontig.submaps); +} + +static void nv_load_dma_map_scatterlist( + nv_dma_map_t *dma_map, + NvU64 *va_array +) +{ + unsigned int i, j; + struct scatterlist *sg; + nv_dma_submap_t *submap; + NvU64 sg_addr, sg_off, sg_len, k, l = 0; + + NV_FOR_EACH_DMA_SUBMAP(dma_map, submap, i) + { + for_each_sg(submap->sgt.sgl, sg, submap->sg_map_count, j) + { + /* + * It is possible for pci_map_sg() to merge scatterlist entries, so + * make sure we account for that here. + */ + for (sg_addr = sg_dma_address(sg), sg_len = sg_dma_len(sg), + sg_off = 0, k = 0; + (sg_off < sg_len) && (k < submap->page_count); + sg_off += PAGE_SIZE, l++, k++) + { + va_array[l] = sg_addr + sg_off; + } + } + } +} + +static NV_STATUS nv_dma_map_scatterlist( + nv_dma_device_t *dma_dev, + nv_dma_map_t *dma_map, + NvU64 *va_array +) +{ + NV_STATUS status; + NvU64 i; + + status = nv_create_dma_map_scatterlist(dma_map); + if (status != NV_OK) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Failed to allocate DMA mapping scatterlist!\n"); + return status; + } + + status = nv_map_dma_map_scatterlist(dma_map); + if (status != NV_OK) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Failed to create a DMA mapping!\n"); + nv_destroy_dma_map_scatterlist(dma_map); + return status; + } + + nv_load_dma_map_scatterlist(dma_map, va_array); + + for (i = 0; i < dma_map->page_count; i++) + { + if (!nv_dma_is_addressable(dma_dev, va_array[i], PAGE_SIZE)) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "DMA address not in addressable range of device " + "(0x%llx, 0x%llx-0x%llx)\n", + va_array[i], dma_dev->addressable_range.start, + dma_dev->addressable_range.limit); + nv_dma_unmap_scatterlist(dma_map); + return NV_ERR_INVALID_ADDRESS; + } + } + + return NV_OK; +} + +static void nv_dma_unmap_scatterlist(nv_dma_map_t *dma_map) +{ + nv_unmap_dma_map_scatterlist(dma_map); + nv_destroy_dma_map_scatterlist(dma_map); +} + +NV_STATUS NV_API_CALL nv_dma_map_sgt( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 *va_array, + NvU32 cache_type, + void **priv +) +{ + NV_STATUS status; + nv_dma_map_t *dma_map = NULL; + + if (priv == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + if (page_count > get_num_physpages()) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "DMA mapping request too large!\n"); + return NV_ERR_INVALID_REQUEST; + } + + status = os_alloc_mem((void **)&dma_map, sizeof(nv_dma_map_t)); + if (status != NV_OK) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Failed to allocate nv_dma_map_t!\n"); + return status; + } + + dma_map->dev = dma_dev->dev; + dma_map->pages = NULL; + dma_map->import_sgt = (struct sg_table *) *priv; + dma_map->page_count = page_count; + dma_map->contiguous = NV_FALSE; + dma_map->cache_type = cache_type; + + dma_map->mapping.discontig.submap_count = 0; + status = nv_dma_map_scatterlist(dma_dev, dma_map, va_array); + + if (status != NV_OK) + { + os_free_mem(dma_map); + } + else + { + *priv = dma_map; + } + + return status; +} + +static NV_STATUS NV_API_CALL nv_dma_unmap_sgt( + nv_dma_device_t *dma_dev, + void **priv +) +{ + nv_dma_map_t *dma_map; + + if (priv == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + dma_map = *priv; + + *priv = NULL; + + nv_dma_unmap_scatterlist(dma_map); + + os_free_mem(dma_map); + + return NV_OK; +} + +static NV_STATUS NV_API_CALL nv_dma_map_pages( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 *va_array, + NvBool contig, + NvU32 cache_type, + void **priv +) +{ + NV_STATUS status; + nv_dma_map_t *dma_map = NULL; + + if (priv == NULL) + { + /* + * IOMMU path has not been implemented yet to handle + * anything except a nv_dma_map_t as the priv argument. + */ + return NV_ERR_NOT_SUPPORTED; + } + + if (page_count > get_num_physpages()) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "DMA mapping request too large!\n"); + return NV_ERR_INVALID_REQUEST; + } + + status = os_alloc_mem((void **)&dma_map, sizeof(nv_dma_map_t)); + if (status != NV_OK) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Failed to allocate nv_dma_map_t!\n"); + return status; + } + + dma_map->dev = dma_dev->dev; + dma_map->pages = *priv; + dma_map->import_sgt = NULL; + dma_map->page_count = page_count; + dma_map->contiguous = contig; + dma_map->cache_type = cache_type; + + if (dma_map->page_count > 1 && !dma_map->contiguous) + { + dma_map->mapping.discontig.submap_count = 0; + status = nv_dma_map_scatterlist(dma_dev, dma_map, va_array); + } + else + { + /* + * Force single-page mappings to be contiguous to avoid scatterlist + * overhead. + */ + dma_map->contiguous = NV_TRUE; + + status = nv_dma_map_contig(dma_dev, dma_map, va_array); + } + + if (status != NV_OK) + { + os_free_mem(dma_map); + } + else + { + *priv = dma_map; + } + + return status; +} + +static NV_STATUS NV_API_CALL nv_dma_unmap_pages( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 *va_array, + void **priv +) +{ + nv_dma_map_t *dma_map; + + if (priv == NULL) + { + /* + * IOMMU path has not been implemented yet to handle + * anything except a nv_dma_map_t as the priv argument. + */ + return NV_ERR_NOT_SUPPORTED; + } + + dma_map = *priv; + + if (page_count > get_num_physpages()) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "DMA unmapping request too large!\n"); + return NV_ERR_INVALID_REQUEST; + } + + if (page_count != dma_map->page_count) + { + NV_DMA_DEV_PRINTF(NV_DBG_WARNINGS, dma_dev, + "Requested to DMA unmap %llu pages, but there are %llu " + "in the mapping\n", page_count, dma_map->page_count); + return NV_ERR_INVALID_REQUEST; + } + + *priv = dma_map->pages; + + if (dma_map->contiguous) + { + nv_dma_unmap_contig(dma_map); + } + else + { + nv_dma_unmap_scatterlist(dma_map); + } + + os_free_mem(dma_map); + + return NV_OK; +} + +/* + * Wrappers used for DMA-remapping an nv_alloc_t during transition to more + * generic interfaces. + */ +NV_STATUS NV_API_CALL nv_dma_map_alloc +( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 *va_array, + NvBool contig, + void **priv +) +{ + NV_STATUS status; + NvU64 i; + nv_alloc_t *at = *priv; + struct page **pages = NULL; + NvU32 cache_type = NV_MEMORY_CACHED; + NvU64 pages_size = sizeof(struct page *) * (contig ? 1 : page_count); + + /* If we have an imported SGT, just use that directly. */ + if (at && at->import_sgt) + { + *priv = at->import_sgt; + status = nv_dma_map_sgt(dma_dev, page_count, va_array, at->cache_type, + priv); + if (status != NV_OK) + { + *priv = at; + } + return status; + } + + /* + * Convert the nv_alloc_t into a struct page * array for + * nv_dma_map_pages(). + */ + status = os_alloc_mem((void **)&pages, pages_size); + if (status != NV_OK) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Failed to allocate page array for DMA mapping!\n"); + return status; + } + + os_mem_set(pages, 0, pages_size); + + if (at != NULL) + { + WARN_ON(page_count != at->num_pages); + + if (at->flags.user) + { + pages[0] = at->user_pages[0]; + if (!contig) + { + for (i = 1; i < page_count; i++) + { + pages[i] = at->user_pages[i]; + } + } + } + else if (at->flags.physical && contig) + { + /* Supplied pages hold physical address */ + pages[0] = pfn_to_page(PFN_DOWN(va_array[0])); + } + cache_type = at->cache_type; + } + + if (pages[0] == NULL) + { + pages[0] = NV_GET_PAGE_STRUCT(va_array[0]); + if (!contig) + { + for (i = 1; i < page_count; i++) + { + pages[i] = NV_GET_PAGE_STRUCT(va_array[i]); + } + } + } + + *priv = pages; + status = nv_dma_map_pages(dma_dev, page_count, va_array, contig, cache_type, + priv); + if (status != NV_OK) + { + *priv = at; + os_free_mem(pages); + } + + return status; +} + +NV_STATUS NV_API_CALL nv_dma_unmap_alloc +( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 *va_array, + void **priv +) +{ + NV_STATUS status = NV_OK; + nv_dma_map_t *dma_map; + + if (priv == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + dma_map = *priv; + + if (!dma_map->import_sgt) + { + status = nv_dma_unmap_pages(dma_dev, page_count, va_array, priv); + if (status != NV_OK) + { + /* + * If nv_dma_unmap_pages() fails, we hit an assert condition and the + * priv argument won't be the page array we allocated in + * nv_dma_map_alloc(), so we skip the free here. But note that since + * this is an assert condition it really should never happen. + */ + return status; + } + + /* Free the struct page * array allocated by nv_dma_map_alloc() */ + os_free_mem(*priv); + } else { + status = nv_dma_unmap_sgt(dma_dev, priv); + } + + return status; +} + +static NvBool nv_dma_use_map_resource +( + nv_dma_device_t *dma_dev +) +{ + const struct dma_map_ops *ops = get_dma_ops(dma_dev->dev); + + if (nv_dma_remap_peer_mmio == NV_DMA_REMAP_PEER_MMIO_DISABLE) + { + return NV_FALSE; + } + + if (ops == NULL) + { + /* On pre-5.0 kernels, if dma_map_resource() is present, then we + * assume that ops != NULL. With direct_dma handling swiotlb on 5.0+ + * kernels, ops == NULL. + */ +#if defined(NV_DMA_IS_DIRECT_PRESENT) + return NV_TRUE; +#else + return NV_FALSE; +#endif + } + + return (ops->map_resource != NULL); +} + +/* DMA-map a peer device's C2C aperture for peer access. */ +NV_STATUS NV_API_CALL nv_dma_map_non_pci_peer +( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 *va +) +{ + NV_STATUS status; + + if (nv_dma_use_map_resource(dma_dev)) + { + status = nv_dma_map_mmio(dma_dev, page_count, va); + } + else + { + /* + * Best effort - can't map through the iommu but at least try to + * use SPA as is. + */ + status = NV_OK; + } + + return status; +} + +/* DMA-map a peer PCI device's BAR for peer access. */ +NV_STATUS NV_API_CALL nv_dma_map_peer +( + nv_dma_device_t *dma_dev, + nv_dma_device_t *peer_dma_dev, + NvU8 nv_bar_index, + NvU64 page_count, + NvU64 *va +) +{ + struct pci_dev *peer_pci_dev = to_pci_dev(peer_dma_dev->dev); + struct resource *res; + NvU8 bar_index; + NV_STATUS status; + + if (peer_pci_dev == NULL) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, peer_dma_dev, + "Not a PCI device"); + return NV_ERR_INVALID_REQUEST; + } + + bar_index = nv_bar_index_to_os_bar_index(peer_pci_dev, nv_bar_index); + res = &peer_pci_dev->resource[bar_index]; + if (res->start == 0) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, peer_dma_dev, + "Resource %u not valid", + bar_index); + return NV_ERR_INVALID_REQUEST; + } + + if ((*va < res->start) || ((*va + (page_count * PAGE_SIZE)) > res->end)) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, peer_dma_dev, + "Mapping requested (start = 0x%llx, page_count = 0x%llx)" + " outside of resource bounds (start = 0x%llx, end = 0x%llx)\n", + *va, page_count, res->start, res->end); + return NV_ERR_INVALID_REQUEST; + } + + if (nv_dma_use_map_resource(dma_dev)) + { + status = nv_dma_map_mmio(dma_dev, page_count, va); + } + else + { + /* + * Best effort - can't map through the iommu but at least try to + * convert to a bus address. + */ + NvU64 offset = *va - res->start; + *va = pci_bus_address(peer_pci_dev, bar_index) + offset; + status = NV_OK; + } + + return status; +} + +void NV_API_CALL nv_dma_unmap_peer +( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 va +) +{ + if (nv_dma_use_map_resource(dma_dev)) + { + nv_dma_unmap_mmio(dma_dev, page_count, va); + } +} + +/* DMA-map another anonymous device's MMIO region for peer access. */ +NV_STATUS NV_API_CALL nv_dma_map_mmio +( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 *va +) +{ + BUG_ON(!va); + + if (nv_dma_use_map_resource(dma_dev)) + { + NvU64 mmio_addr = *va; + *va = dma_map_resource(dma_dev->dev, mmio_addr, page_count * PAGE_SIZE, + DMA_BIDIRECTIONAL, 0); + if (dma_mapping_error(dma_dev->dev, *va)) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Failed to DMA map MMIO range [0x%llx-0x%llx]\n", + mmio_addr, mmio_addr + page_count * PAGE_SIZE - 1); + return NV_ERR_OPERATING_SYSTEM; + } + } + else + { + /* + * If dma_map_resource is not available, pass through the source address + * without failing. Further, adjust it using the DMA start address to + * keep RM's validation schemes happy. + */ + *va = *va + dma_dev->addressable_range.start; + } + + return NV_OK; +} + +void NV_API_CALL nv_dma_unmap_mmio +( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 va +) +{ + if (nv_dma_use_map_resource(dma_dev)) + { + dma_unmap_resource(dma_dev->dev, va, page_count * PAGE_SIZE, + DMA_BIDIRECTIONAL, 0); + } +} + +/* + * Invalidate DMA mapping in CPU caches by "syncing" to the device. + * + * This is only implemented for ARM platforms, since other supported + * platforms are cache coherent and have not required this (we + * explicitly haven't supported SWIOTLB bounce buffering either where + * this would be needed). + */ +void NV_API_CALL nv_dma_cache_invalidate +( + nv_dma_device_t *dma_dev, + void *priv +) +{ +#if defined(NVCPU_AARCH64) + nv_dma_map_t *dma_map = priv; + + if (dma_map->contiguous) + { + dma_sync_single_for_device(dma_dev->dev, + dma_map->mapping.contig.dma_addr, + (size_t) PAGE_SIZE * dma_map->page_count, + DMA_FROM_DEVICE); + } + else + { + nv_dma_submap_t *submap; + NvU64 i; + + NV_FOR_EACH_DMA_SUBMAP(dma_map, submap, i) + { + dma_sync_sg_for_device(dma_dev->dev, + submap->sgt.sgl, + submap->sgt.orig_nents, + DMA_FROM_DEVICE); + } + } +#endif +} + +#if defined(NV_DRM_AVAILABLE) + +static inline void +nv_dma_gem_object_put_unlocked(struct drm_gem_object *gem) +{ +#if defined(NV_DRM_GEM_OBJECT_PUT_UNLOCK_PRESENT) + drm_gem_object_put_unlocked(gem); +#else + drm_gem_object_put(gem); +#endif +} + +NV_STATUS NV_API_CALL nv_dma_import_sgt +( + nv_dma_device_t *dma_dev, + struct sg_table *sgt, + struct drm_gem_object *gem +) +{ + if ((dma_dev == NULL) || + (sgt == NULL) || + (gem == NULL)) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Import arguments are NULL!\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + // Prevent the kernel module controlling GEM from being unloaded + if (!try_module_get(gem->dev->driver->fops->owner)) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Couldn't reference the GEM object's owner!\n"); + return NV_ERR_INVALID_DEVICE; + } + + // Do nothing with SGT, it is already mapped and pinned by the exporter + + drm_gem_object_get(gem); + + return NV_OK; +} + +void NV_API_CALL nv_dma_release_sgt +( + struct sg_table *sgt, + struct drm_gem_object *gem +) +{ + if (gem == NULL) + { + return; + } + + // Do nothing with SGT, it will be unmapped and unpinned by the exporter + WARN_ON(sgt == NULL); + + nv_dma_gem_object_put_unlocked(gem); + + module_put(gem->dev->driver->fops->owner); +} + +#else + +NV_STATUS NV_API_CALL nv_dma_import_sgt +( + nv_dma_device_t *dma_dev, + struct sg_table *sgt, + struct drm_gem_object *gem +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void NV_API_CALL nv_dma_release_sgt +( + struct sg_table *sgt, + struct drm_gem_object *gem +) +{ +} +#endif /* NV_DRM_AVAILABLE */ diff --git a/kernel-open/nvidia/nv-dmabuf.c b/kernel-open/nvidia/nv-dmabuf.c new file mode 100644 index 0000000..a57a776 --- /dev/null +++ b/kernel-open/nvidia/nv-dmabuf.c @@ -0,0 +1,1866 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include +#include "nv-dmabuf.h" + +#if defined(CONFIG_DMA_SHARED_BUFFER) + +typedef struct nv_dma_buf_mem_handle +{ + // Memory handle, offset and size + NvHandle h_memory; + NvU64 offset; + NvU64 size; + + // RM memdesc specific data + void *mem_info; + + // + // Refcount for phys addresses + // If refcount > 0, phys address ranges in memArea are reused. + // + NvU64 phys_refcount; + + // Scatterlist of all the memory ranges associated with the buf + MemoryArea memArea; +} nv_dma_buf_mem_handle_t; + +typedef struct nv_dma_buf_file_private +{ + // GPU device state + nv_state_t *nv; + + // Client, device, subdevice handles + NvHandle h_client; + NvHandle h_device; + NvHandle h_subdevice; + + // Total number of handles supposed to be attached to this dma-buf + NvU32 total_objects; + + // + // Number of handles actually attached to this dma-buf. + // This should equal total_objects, or map fails. + // + NvU32 num_objects; + + // Total size of all handles supposed to be attached to this dma-buf + NvU64 total_size; + + // + // Size of all handles actually attached to the dma-buf + // If all handles are attached, total_size and attached_size must match. + // + NvU64 attached_size; + + // Mutex to lock priv state during dma-buf callbacks + struct mutex lock; + + // Handle info: see nv_dma_buf_mem_handle_t + nv_dma_buf_mem_handle_t *handles; + + // RM-private info for MIG configs + void *mig_info; + + // + // Flag to indicate if phys addresses are static and can be + // fetched during dma-buf create/reuse instead of in map. + // + NvBool static_phys_addrs; + + // + // Type of mapping requested, one of: + // NV_DMABUF_EXPORT_MAPPING_TYPE_DEFAULT + // NV_DMABUF_EXPORT_MAPPING_TYPE_FORCE_PCIE + // + NvU8 mapping_type; + + // + // On some coherent platforms requesting mapping_type FORCE_PCIE, + // peer-to-peer is expected to bypass the IOMMU due to hardware + // limitations. On such systems, IOMMU map/unmap will be skipped. + // + NvBool skip_iommu; + + struct + { + // True if the map attributes are cached + NvBool cached; + + // Flag to indicate if dma-buf mmap is allowed + NvBool can_mmap; + + // + // Flag to indicate if client/user is allowed dma-buf mmap or not. + // That way user can enable mmap for testing/specific + // use cases and not for any all handles. + // + NvU64 allow_mmap; + + // RM-private info for cache type settings (cached/uncached/writecombined). + NvU32 cache_type; + + // Flag to indicate if dma-buf is RO or RW memory. + NvBool read_only_mem; + + // Memory type info: see nv_memory_type_t. + nv_memory_type_t memory_type; + } map_attrs; + + // + // Flag to indicate if all GPU locks to be acquired/released before/after calling + // rm_dma_buf_dup_mem_handle(). + // nv_dma_buf_dup_mem_handles() acquires GPU lock only for calling pGPU + // instance. However, it is not sufficient as per DupObject() SYSMEM's design + // since it expects either all GPU locks to be acquired by the caller or + // do not take any GPU locks. This flag is set to TRUE only for + // ZERO_FB chips. + // + NvBool acquire_release_all_gpu_lock_on_dup; +} nv_dma_buf_file_private_t; + +static void +nv_dma_buf_free_file_private( + nv_dma_buf_file_private_t *priv +) +{ + if (priv == NULL) + { + return; + } + + if (priv->handles != NULL) + { + os_free_mem(priv->handles); + priv->handles = NULL; + } + + mutex_destroy(&priv->lock); + + NV_KFREE(priv, sizeof(nv_dma_buf_file_private_t)); +} + +static nv_dma_buf_file_private_t* +nv_dma_buf_alloc_file_private( + NvU32 num_handles +) +{ + nv_dma_buf_file_private_t *priv = NULL; + NvU64 handles_size = num_handles * sizeof(priv->handles[0]); + NV_STATUS status; + + NV_KZALLOC(priv, sizeof(nv_dma_buf_file_private_t)); + if (priv == NULL) + { + return NULL; + } + + mutex_init(&priv->lock); + + status = os_alloc_mem((void **) &priv->handles, handles_size); + if (status != NV_OK) + { + goto failed; + } + os_mem_set(priv->handles, 0, handles_size); + + return priv; + +failed: + nv_dma_buf_free_file_private(priv); + + return NULL; +} + +static void +nv_reset_phys_refcount( + nv_dma_buf_file_private_t *priv, + NvU32 start_index, + NvU32 handle_count +) +{ + NvU32 i; + for (i = 0; i < handle_count; i++) + { + NvU32 index = start_index + i; + priv->handles[index].phys_refcount = 0; + } +} + +static NvBool +nv_dec_and_check_zero_phys_refcount( + nv_dma_buf_file_private_t *priv, + NvU32 start_index, + NvU32 handle_count +) +{ + NvU32 i; + NvBool is_zero = NV_FALSE; + + for (i = 0; i < handle_count; i++) + { + NvU32 index = start_index + i; + priv->handles[index].phys_refcount--; + if (priv->handles[index].phys_refcount == 0) + { + is_zero = NV_TRUE; + } + } + + return is_zero; +} + +static NvBool +nv_inc_and_check_one_phys_refcount( + nv_dma_buf_file_private_t *priv, + NvU32 start_index, + NvU32 handle_count +) +{ + NvU32 i; + NvBool is_one = NV_FALSE; + + for (i = 0; i < handle_count; i++) + { + NvU32 index = start_index + i; + priv->handles[index].phys_refcount++; + if (priv->handles[index].phys_refcount == 1) + { + is_one = NV_TRUE; + } + } + + return is_one; +} + +// Must be called with RMAPI lock and GPU lock taken +static void +nv_dma_buf_undup_mem_handles_unlocked( + nvidia_stack_t *sp, + NvU32 start_index, + NvU32 num_objects, + nv_dma_buf_file_private_t *priv +) +{ + NvU32 index, i; + + for (i = 0; i < num_objects; i++) + { + index = start_index + i; + + if (priv->handles[index].h_memory == 0) + { + continue; + } + + rm_dma_buf_undup_mem_handle(sp, priv->nv, priv->h_client, + priv->handles[index].h_memory); + + priv->attached_size -= priv->handles[index].size; + priv->handles[index].h_memory = 0; + priv->handles[index].offset = 0; + priv->handles[index].size = 0; + priv->num_objects--; + } +} + +static void +nv_dma_buf_undup_mem_handles( + nvidia_stack_t *sp, + NvU32 index, + NvU32 num_objects, + nv_dma_buf_file_private_t *priv +) +{ + NV_STATUS status; + + status = rm_acquire_api_lock(sp); + if (WARN_ON(status != NV_OK)) + { + return; + } + + status = rm_acquire_all_gpus_lock(sp); + if (WARN_ON(status != NV_OK)) + { + goto unlock_api_lock; + } + + nv_dma_buf_undup_mem_handles_unlocked(sp, index, num_objects, priv); + + rm_release_all_gpus_lock(sp); + +unlock_api_lock: + rm_release_api_lock(sp); +} + +// +// TODO: Temporary work around for SYSMEM Dup issue. +// Take all GPU locks before calling the DupObject(). +// DupObject() requires the caller to either acquire all GPU locks beforehand or +// refrain from acquiring any GPU locks before invoking it. +// Otherwise DupObject() will fail for already locked gpu instance with below error print +// for multi gpu instance use case: +// "GPU lock already acquired by this thread" for gpuInst which is already locked during +// nv_dma_buf_dup_mem_handles(). +// In TOT, nv_dma_buf_dup_mem_handles() acquires GPU lock only for calling pGPU +// instance. However, it is not sufficient as per DupObject() SYSMEM's design since it expects +// either all GPU locks to be acquired by the caller or do not take any GPU locks. +// PDB_PROP_GPU_ZERO_FB chips (iGPU) doesn't have local memory. In this case, +// SYSMEM is used as Device resources. priv->acquire_release_all_gpu_lock_on_dup flag set as +// NV_TRUE only for PDB_PROP_GPU_ZERO_FB chips. +// +// Proper Fix (Bug 4866388): +// The RS_FLAGS_ACQUIRE_RELAXED_GPUS_LOCK_ON_DUP flag was introduced to allow an +// RM class to take GPU Group Lock if the source and the destination object +// belongs to the same pGpu. Take all GPUs lock otherwise. +// With above change, we are seeing test failures. +// Until the above proper fix is added, we need to rely on temporary work around. +// +static inline NV_STATUS +nv_dma_buf_acquire_gpu_lock( + nvidia_stack_t *sp, + nv_dma_buf_file_private_t *priv +) +{ + return (priv->acquire_release_all_gpu_lock_on_dup ? + rm_acquire_all_gpus_lock(sp): rm_acquire_gpu_lock(sp, priv->nv)); +} + +static inline NV_STATUS +nv_dma_buf_release_gpu_lock( + nvidia_stack_t *sp, + nv_dma_buf_file_private_t *priv +) +{ + return (priv->acquire_release_all_gpu_lock_on_dup ? + rm_release_all_gpus_lock(sp): rm_release_gpu_lock(sp, priv->nv)); +} + +static NV_STATUS +nv_dma_buf_dup_mem_handles( + nvidia_stack_t *sp, + nv_dma_buf_file_private_t *priv, + nv_ioctl_export_to_dma_buf_fd_t *params +) +{ + NV_STATUS status = NV_OK; + NvU32 index = params->index; + NvU32 count = 0; + NvU32 i = 0; + + status = rm_acquire_api_lock(sp); + if (status != NV_OK) + { + return status; + } + + status = nv_dma_buf_acquire_gpu_lock(sp, priv); + if (status != NV_OK) + { + goto unlock_api_lock; + } + + for (i = 0; i < params->numObjects; i++) + { + NvHandle h_memory_duped = 0; + void *mem_info = NULL; + nv_memory_type_t memory_type = NV_MEMORY_TYPE_SYSTEM; + NvBool can_mmap; + NvU32 cache_type; + NvBool read_only_mem; + + if (priv->handles[index].h_memory != 0) + { + status = NV_ERR_IN_USE; + goto failed; + } + + if (params->sizes[i] > priv->total_size - priv->attached_size) + { + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + + status = rm_dma_buf_dup_mem_handle(sp, priv->nv, + params->hClient, + priv->h_client, + priv->h_device, + priv->h_subdevice, + priv->mig_info, + params->handles[i], + params->offsets[i], + params->sizes[i], + &h_memory_duped, + &mem_info, + &can_mmap, + &cache_type, + &read_only_mem, + &memory_type); + if (status != NV_OK) + { + goto failed; + } + + if (priv->map_attrs.cached) + { + if ((can_mmap != priv->map_attrs.can_mmap) || + (cache_type != priv->map_attrs.cache_type) || + (read_only_mem != priv->map_attrs.read_only_mem) || + (memory_type != priv->map_attrs.memory_type)) + { + // Creating mixed dma_buf is not supported. + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + } + else + { + // Store the handle's mmap, RO and cache type info. + priv->map_attrs.can_mmap = can_mmap; + priv->map_attrs.cache_type = cache_type; + priv->map_attrs.read_only_mem = read_only_mem; + priv->map_attrs.memory_type = memory_type; + priv->map_attrs.cached = NV_TRUE; + } + + priv->attached_size += params->sizes[i]; + priv->handles[index].h_memory = h_memory_duped; + priv->handles[index].offset = params->offsets[i]; + priv->handles[index].size = params->sizes[i]; + priv->handles[index].mem_info = mem_info; + priv->num_objects++; + index++; + count++; + } + + if ((priv->num_objects == priv->total_objects) && + (priv->attached_size != priv->total_size)) + { + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + + nv_dma_buf_release_gpu_lock(sp, priv); + + rm_release_api_lock(sp); + + return NV_OK; + +failed: + nv_dma_buf_undup_mem_handles_unlocked(sp, params->index, count, priv); + + nv_dma_buf_release_gpu_lock(sp, priv); + +unlock_api_lock: + rm_release_api_lock(sp); + + return status; +} + +static void +nv_put_phys_addresses( + nvidia_stack_t *sp, + nv_dma_buf_file_private_t *priv, + NvU32 start_index, + NvU32 mapped_handle_count +) +{ + NvU32 i; + + for (i = 0; i < mapped_handle_count; i++) + { + NvU32 index = start_index + i; + + if (priv->handles[index].phys_refcount > 0) + { + continue; + } + + // Per-handle memArea is freed by RM + rm_dma_buf_unmap_mem_handle(sp, priv->nv, priv->h_client, + priv->handles[index].h_memory, + priv->mapping_type, + priv->handles[index].mem_info, + priv->static_phys_addrs, + priv->handles[index].memArea); + + priv->handles[index].memArea.numRanges = 0; + } +} + +static void +nv_dma_buf_put_phys_addresses ( + nv_dma_buf_file_private_t *priv, + NvU32 start_index, + NvU32 handle_count +) +{ + NV_STATUS status; + nvidia_stack_t *sp = NULL; + NvBool api_lock_taken = NV_FALSE; + NvBool gpu_lock_taken = NV_FALSE; + int rc = 0; + + if (!nv_dec_and_check_zero_phys_refcount(priv, start_index, handle_count)) + { + return; + } + + rc = nv_kmem_cache_alloc_stack(&sp); + if (WARN_ON(rc != 0)) + { + return; + } + + if (!priv->static_phys_addrs) + { + status = rm_acquire_api_lock(sp); + if (WARN_ON(status != NV_OK)) + { + goto free_sp; + } + api_lock_taken = NV_TRUE; + + status = rm_acquire_gpu_lock(sp, priv->nv); + if (WARN_ON(status != NV_OK)) + { + goto unlock_api_lock; + } + gpu_lock_taken = NV_TRUE; + } + + nv_put_phys_addresses(sp, priv, start_index, handle_count); + + if (gpu_lock_taken) + { + rm_release_gpu_lock(sp, priv->nv); + } + +unlock_api_lock: + if (api_lock_taken) + { + rm_release_api_lock(sp); + } + +free_sp: + nv_kmem_cache_free_stack(sp); +} + +static NV_STATUS +nv_dma_buf_get_phys_addresses ( + nv_dma_buf_file_private_t *priv, + NvU32 start_index, + NvU32 handle_count +) +{ + NV_STATUS status = NV_OK; + nvidia_stack_t *sp = NULL; + NvBool api_lock_taken = NV_FALSE; + NvBool gpu_lock_taken = NV_FALSE; + NvU32 i; + int rc = 0; + + if (!nv_inc_and_check_one_phys_refcount(priv, start_index, handle_count)) + { + return NV_OK; + } + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + status = NV_ERR_NO_MEMORY; + goto failed; + } + + // + // Locking is not needed for static phys address configs because the memdesc + // is not expected to change in this case and we hold the refcount on the + // owner GPU and memory before referencing it. + // + if (!priv->static_phys_addrs) + { + status = rm_acquire_api_lock(sp); + if (status != NV_OK) + { + goto free_sp; + } + api_lock_taken = NV_TRUE; + + status = rm_acquire_gpu_lock(sp, priv->nv); + if (status != NV_OK) + { + goto unlock_api_lock; + } + gpu_lock_taken = NV_TRUE; + } + + for (i = 0; i < handle_count; i++) + { + NvU32 index = start_index + i; + + if (priv->handles[index].phys_refcount > 1) + { + continue; + } + + // Per-handle memArea is allocated by RM + status = rm_dma_buf_map_mem_handle(sp, priv->nv, priv->h_client, + priv->handles[index].h_memory, + mrangeMake(priv->handles[index].offset, + priv->handles[index].size), + priv->mapping_type, + priv->handles[index].mem_info, + priv->static_phys_addrs, + &priv->handles[index].memArea); + if (status != NV_OK) + { + goto unmap_handles; + } + } + + if (gpu_lock_taken) + { + rm_release_gpu_lock(sp, priv->nv); + } + + if (api_lock_taken) + { + rm_release_api_lock(sp); + } + + nv_kmem_cache_free_stack(sp); + + return NV_OK; + +unmap_handles: + nv_put_phys_addresses(sp, priv, start_index, i); + + if (gpu_lock_taken) + { + rm_release_gpu_lock(sp, priv->nv); + } + +unlock_api_lock: + if (api_lock_taken) + { + rm_release_api_lock(sp); + } + +free_sp: + nv_kmem_cache_free_stack(sp); + +failed: + nv_reset_phys_refcount(priv, start_index, handle_count); + + return status; +} + +static inline unsigned long +nv_dma_buf_get_dev_attrs( + struct device *dev +) +{ + unsigned long dev_attrs = 0; + +#if defined(NV_DMA_ATTR_SKIP_CPU_SYNC_PRESENT) + dev_attrs |= DMA_ATTR_SKIP_CPU_SYNC; +#endif // defined(NV_DMA_ATTR_SKIP_CPU_SYNC_PRESENT) + + return dev_attrs; +} + +static void +nv_dma_buf_unmap_pages( + struct device *dev, + struct sg_table *sgt, + nv_dma_buf_file_private_t *priv +) +{ + if (priv->skip_iommu) + { + return; + } + + dma_unmap_sg_attrs(dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL, nv_dma_buf_get_dev_attrs(dev)); +} + +static void +nv_dma_buf_unmap_pfns( + struct device *dev, + struct sg_table *sgt, + nv_dma_buf_file_private_t *priv +) +{ + nv_dma_device_t peer_dma_dev = {{ 0 }}; + struct scatterlist *sg = sgt->sgl; + NvU32 i; + + if (priv->skip_iommu) + { + return; + } + + peer_dma_dev.dev = dev; + peer_dma_dev.addressable_range.limit = (NvU64)dev->dma_mask; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) + { + nv_dma_unmap_peer(&peer_dma_dev, + (sg_dma_len(sg) >> PAGE_SHIFT), + sg_dma_address(sg)); + } +} + +static NvU32 +nv_dma_buf_get_sg_count ( + struct device *dev, + nv_dma_buf_file_private_t *priv, + NvU32 *max_seg_size +) +{ + NvU32 dma_max_seg_size, i; + NvU32 nents = 0; + + dma_max_seg_size = NV_ALIGN_DOWN(dma_get_max_seg_size(dev), PAGE_SIZE); + if (dma_max_seg_size < PAGE_SIZE) + { + return 0; + } + + // Calculate nents needed to allocate sg_table + for (i = 0; i < priv->num_objects; i++) + { + NvU32 range_count = priv->handles[i].memArea.numRanges; + NvU32 index; + + for (index = 0; index < range_count; index++) + { + NvU64 length = priv->handles[i].memArea.pRanges[index].size; + NvU64 count = length + dma_max_seg_size - 1; + do_div(count, dma_max_seg_size); + nents += count; + } + } + + *max_seg_size = dma_max_seg_size; + + return nents; +} + +static struct sg_table* +nv_dma_buf_map_pages ( + struct device *dev, + nv_dma_buf_file_private_t *priv +) +{ + struct sg_table *sgt = NULL; + struct scatterlist *sg; + NvU32 dma_max_seg_size = 0; + NvU32 i, nents; + int rc; + + nents = nv_dma_buf_get_sg_count(dev, priv, &dma_max_seg_size); + + NV_KZALLOC(sgt, sizeof(struct sg_table)); + if (sgt == NULL) + { + return NULL; + } + + rc = sg_alloc_table(sgt, nents, GFP_KERNEL); + if (rc != 0) + { + goto free_sgt; + } + + sg = sgt->sgl; + + for (i = 0; i < priv->num_objects; i++) + { + NvU32 range_count = priv->handles[i].memArea.numRanges; + NvU32 index = 0; + for (index = 0; index < range_count; index++) + { + NvU64 dma_addr = priv->handles[i].memArea.pRanges[index].start; + NvU64 dma_len = priv->handles[i].memArea.pRanges[index].size; + + // Split each range into dma_max_seg_size chunks + while(dma_len != 0) + { + NvU32 sg_len = NV_MIN(dma_len, dma_max_seg_size); + struct page *page = NV_GET_PAGE_STRUCT(dma_addr); + + if ((page == NULL) || (sg == NULL)) + { + goto free_table; + } + + sg_set_page(sg, page, sg_len, NV_GET_OFFSET_IN_PAGE(dma_addr)); + dma_addr += sg_len; + dma_len -= sg_len; + sg = sg_next(sg); + } + } + } + + WARN_ON(sg != NULL); + + // DMA map the sg_table + rc = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, DMA_BIDIRECTIONAL, nv_dma_buf_get_dev_attrs(dev)); + if (rc <= 0) + { + goto free_table; + } + sgt->nents = rc; + + return sgt; + +free_table: + sg_free_table(sgt); + +free_sgt: + NV_KFREE(sgt, sizeof(struct sg_table)); + + return NULL; +} + +static struct sg_table* +nv_dma_buf_map_pfns ( + struct device *dev, + nv_dma_buf_file_private_t *priv +) +{ + NV_STATUS status; + struct sg_table *sgt = NULL; + struct scatterlist *sg; + nv_dma_device_t peer_dma_dev = {{ 0 }}; + NvU32 dma_max_seg_size = 0; + NvU32 mapped_nents = 0; + NvU32 i = 0; + NvU32 nents; + int rc = 0; + + peer_dma_dev.dev = dev; + peer_dma_dev.addressable_range.limit = (NvU64)dev->dma_mask; + + nents = nv_dma_buf_get_sg_count(dev, priv, &dma_max_seg_size); + + NV_KZALLOC(sgt, sizeof(struct sg_table)); + if (sgt == NULL) + { + return NULL; + } + + rc = sg_alloc_table(sgt, nents, GFP_KERNEL); + if (rc != 0) + { + goto free_sgt; + } + + sg = sgt->sgl; + for (i = 0; i < priv->num_objects; i++) + { + NvU32 range_count = priv->handles[i].memArea.numRanges; + NvU32 index = 0; + + for (index = 0; index < range_count; index++) + { + NvU64 dma_addr = priv->handles[i].memArea.pRanges[index].start; + NvU64 dma_len = priv->handles[i].memArea.pRanges[index].size; + + // Break the scatterlist into dma_max_seg_size chunks + while(dma_len != 0) + { + NvU32 sg_len = NV_MIN(dma_len, dma_max_seg_size); + + if (sg == NULL) + { + goto unmap_pfns; + } + + if (!priv->skip_iommu) + { + if (priv->nv->coherent) + { + status = nv_dma_map_non_pci_peer(&peer_dma_dev, + (sg_len >> PAGE_SHIFT), + &dma_addr); + } + else + { + status = nv_dma_map_peer(&peer_dma_dev, priv->nv->dma_dev, 0x1, + (sg_len >> PAGE_SHIFT), &dma_addr); + } + if (status != NV_OK) + { + goto unmap_pfns; + } + } + + sg_set_page(sg, NULL, sg_len, 0); + sg_dma_address(sg) = (dma_addr_t) dma_addr; + sg_dma_len(sg) = sg_len; + dma_addr += sg_len; + dma_len -= sg_len; + mapped_nents++; + sg = sg_next(sg); + } + } + } + + WARN_ON(sg != NULL); + + sgt->nents = mapped_nents; + + WARN_ON(sgt->nents != sgt->orig_nents); + + return sgt; + +unmap_pfns: + sgt->nents = mapped_nents; + + nv_dma_buf_unmap_pfns(dev, sgt, priv); + + sg_free_table(sgt); + +free_sgt: + NV_KFREE(sgt, sizeof(struct sg_table)); + + return NULL; +} + +static int +nv_dma_buf_attach( + struct dma_buf *buf, +#if defined(NV_DMA_BUF_OPS_ATTACH_ARG2_DEV) + struct device *dev, +#endif + struct dma_buf_attachment *attachment +) +{ + int rc = 0; + nv_dma_buf_file_private_t *priv = buf->priv; + + mutex_lock(&priv->lock); + + if (priv->mapping_type == NV_DMABUF_EXPORT_MAPPING_TYPE_FORCE_PCIE) + { + if(!nv_pci_is_valid_topology_for_direct_pci(priv->nv, + to_pci_dev(attachment->dev))) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: dma-buf attach failed: " + "topology not supported for mapping type FORCE_PCIE\n"); + rc = -ENOTSUPP; + goto unlock_priv; + } + + priv->skip_iommu = NV_TRUE; + } + else + { + nv_dma_device_t peer_dma_dev = {{ 0 }}; + + peer_dma_dev.dev = &to_pci_dev(attachment->dev)->dev; + peer_dma_dev.addressable_range.limit = to_pci_dev(attachment->dev)->dma_mask; + + if (!nv_grdma_pci_topology_supported(priv->nv, &peer_dma_dev)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: dma-buf attach failed: " + "PCI topology not supported for dma-buf\n"); + rc = -ENOTSUPP; + goto unlock_priv; + } + } + +#if defined(NV_DMA_BUF_ATTACHMENT_HAS_PEER2PEER) + if ((attachment->importer_ops != NULL) && + (!attachment->peer2peer) && + (!priv->nv->mem_has_struct_page)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: dma-buf attach failed: " + "importer unable to handle MMIO without struct page\n"); + rc = -ENOTSUPP; + goto unlock_priv; + } +#endif + +unlock_priv: + mutex_unlock(&priv->lock); + + return rc; +} + +static struct sg_table* +nv_dma_buf_map( + struct dma_buf_attachment *attachment, + enum dma_data_direction direction +) +{ + NV_STATUS status; + struct sg_table *sgt = NULL; + struct dma_buf *buf = attachment->dmabuf; + nv_dma_buf_file_private_t *priv = buf->priv; + + mutex_lock(&priv->lock); + + if (priv->num_objects != priv->total_objects) + { + goto unlock_priv; + } + + if (!priv->static_phys_addrs) + { + status = nv_dma_buf_get_phys_addresses(priv, 0, priv->num_objects); + if (status != NV_OK) + { + goto unlock_priv; + } + } + + // + // For MAPPING_TYPE_FORCE_PCIE on coherent platforms, + // get the BAR1 PFN scatterlist instead of C2C pages. + // + if (priv->nv->mem_has_struct_page && + (priv->mapping_type == NV_DMABUF_EXPORT_MAPPING_TYPE_DEFAULT)) + { + sgt = nv_dma_buf_map_pages(attachment->dev, priv); + } + else + { + sgt = nv_dma_buf_map_pfns(attachment->dev, priv); + } + if (sgt == NULL) + { + goto unmap_handles; + } + + mutex_unlock(&priv->lock); + + return sgt; + +unmap_handles: + if (!priv->static_phys_addrs) + { + nv_dma_buf_put_phys_addresses(priv, 0, priv->num_objects); + } + +unlock_priv: + mutex_unlock(&priv->lock); + + return NULL; +} + +static void +nv_dma_buf_unmap( + struct dma_buf_attachment *attachment, + struct sg_table *sgt, + enum dma_data_direction direction +) +{ + struct dma_buf *buf = attachment->dmabuf; + nv_dma_buf_file_private_t *priv = buf->priv; + + mutex_lock(&priv->lock); + + if (priv->nv->mem_has_struct_page && + (priv->mapping_type == NV_DMABUF_EXPORT_MAPPING_TYPE_DEFAULT)) + { + nv_dma_buf_unmap_pages(attachment->dev, sgt, priv); + } + else + { + nv_dma_buf_unmap_pfns(attachment->dev, sgt, priv); + } + + // + // For static_phys_addrs platforms, this operation is done in release + // since getting the phys_addrs was done in create/reuse. + // + if (!priv->static_phys_addrs) + { + nv_dma_buf_put_phys_addresses(priv, 0, priv->num_objects); + } + + sg_free_table(sgt); + + NV_KFREE(sgt, sizeof(struct sg_table)); + + mutex_unlock(&priv->lock); +} + +static void +nv_dma_buf_release( + struct dma_buf *buf +) +{ + int rc = 0; + NvU32 i; + nvidia_stack_t *sp = NULL; + nv_dma_buf_file_private_t *priv = buf->priv; + nv_state_t *nv; + + if (priv == NULL) + { + return; + } + + nv = priv->nv; + + if (priv->static_phys_addrs) + { + nv_dma_buf_put_phys_addresses(priv, 0, priv->num_objects); + } + + rc = nv_kmem_cache_alloc_stack(&sp); + if (WARN_ON(rc != 0)) + { + return; + } + + // phys_addr refcounts must be zero at this point + for (i = 0; i < priv->num_objects; i++) + { + WARN_ON(priv->handles[i].phys_refcount > 0); + } + + nv_dma_buf_undup_mem_handles(sp, 0, priv->num_objects, priv); + + rm_dma_buf_put_client_and_device(sp, priv->nv, priv->h_client, priv->h_device, + priv->h_subdevice, priv->mig_info); + + WARN_ON(priv->attached_size > 0); + WARN_ON(priv->num_objects > 0); + + nv_dma_buf_free_file_private(priv); + buf->priv = NULL; + + nvidia_dev_put(nv->gpu_id, sp); + + nv_kmem_cache_free_stack(sp); + + return; +} + +static int +nv_dma_buf_mmap( + struct dma_buf *buf, + struct vm_area_struct *vma +) +{ + int ret = 0; + NvU32 i = 0; + nv_dma_buf_file_private_t *priv = buf->priv; + unsigned long addr = vma->vm_start; + NvU32 total_skip_size = 0; + NvU64 total_map_len = NV_VMA_SIZE(vma); + NvU64 off_in_range_array = 0; + NvU32 index; + + if (priv == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: nv_dma_buf_mmap: priv == NULL.\n"); + return -EINVAL; + } + + mutex_lock(&priv->lock); + + if (!priv->map_attrs.can_mmap) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: nv_dma_buf_mmap: mmap is not allowed can_mmap[%d] \n", + priv->map_attrs.can_mmap); + ret = -ENOTSUPP; + goto unlock_priv; + } + + // Check for offset overflow. + if ((NV_VMA_OFFSET(vma) + NV_VMA_SIZE(vma)) < NV_VMA_OFFSET(vma)) + { + ret = -EOVERFLOW; + goto unlock_priv; + } + + if ((NV_VMA_OFFSET(vma) + NV_VMA_SIZE(vma)) > priv->total_size) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: nv_dma_buf_mmap: Vaddr_start[%llx] Vaddr_end[%llx] " + "vm_pgoff[%llx] page_offset[%llx] " + "page_prot[%x] total_size[%llx] \n", + vma->vm_start, vma->vm_end, NV_VMA_PGOFF(vma), + NV_VMA_OFFSET(vma), pgprot_val(vma->vm_page_prot), + priv->total_size); + ret = -EINVAL; + goto unlock_priv; + } + + nv_printf(NV_DBG_INFO, + "NVRM: nv_dma_buf_mmap: Vaddr_start[%llx] Vaddr_end[%llx] " + "os_page_size[%llx] vm_pgoff[%llx] page_offset[%llx] " + "page_prot[%x] total_size[%llx] total_map_len[%llx] \n", + vma->vm_start, vma->vm_end, PAGE_SIZE, NV_VMA_PGOFF(vma), + NV_VMA_OFFSET(vma), pgprot_val(vma->vm_page_prot), priv->total_size, + total_map_len); + + // Find the first range from which map should start. + for (i = 0; i < priv->num_objects; i++) + { + NvU32 range_count = priv->handles[i].memArea.numRanges; + + for (index = 0; index < range_count; index++) + { + NvU64 len = priv->handles[i].memArea.pRanges[index].size; + + total_skip_size += len; + // + // Skip memArea.pRanges[index] until to find out the + // first mapping page start in the memArea range_count. + // skip pages which lie outside of offset/map length. + // + if (NV_VMA_OFFSET(vma) >= total_skip_size) + { + continue; + } + total_skip_size -= len; + + // + // First mapping page start can be anywhere in the specific + // memArea.pRanges[index]. So adjust off_in_range_array accordingly. + // + off_in_range_array = (NV_VMA_OFFSET(vma) - total_skip_size); + total_skip_size += off_in_range_array; + goto found_start_page; + } + } + + // Could not find first map page. + nv_printf(NV_DBG_ERRORS, + "NVRM: [nv_dma_buf_mmap-failed] Could not find first map page \n"); + ret = -EINVAL; + goto unlock_priv; + +found_start_page: + + // RO and cache type settings + if (nv_encode_caching(&vma->vm_page_prot, + priv->map_attrs.cache_type, + priv->map_attrs.memory_type)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: [nv_dma_buf_mmap-failed] i[%u] cache_type[%llx] memory_type[%d] page_prot[%x] \n", + i, priv->map_attrs.cache_type, priv->map_attrs.memory_type, pgprot_val(vma->vm_page_prot)); + ret = -ENXIO; + goto unlock_priv; + } + + if (priv->map_attrs.read_only_mem) + { + vma->vm_page_prot = NV_PGPROT_READ_ONLY(vma->vm_page_prot); + nv_vm_flags_clear(vma, VM_WRITE); + nv_vm_flags_clear(vma, VM_MAYWRITE); + } + + nv_vm_flags_set(vma, VM_SHARED | VM_DONTEXPAND | VM_DONTDUMP); + + // Create user mapping + for (; i < (priv->num_objects && (addr < vma->vm_end)); i++) + { + NvU32 range_count = priv->handles[i].memArea.numRanges; + + for (; (index < range_count && (addr < vma->vm_end)); index++) + { + NvU64 len = priv->handles[i].memArea.pRanges[index].size; + NvU64 map_len = 0; + NvU64 phy_addr; + + phy_addr = (priv->handles[i].memArea.pRanges[index].start + off_in_range_array); + len -= off_in_range_array; + + // Reset to 0, after its initial use. + off_in_range_array = 0; + + map_len = NV_MIN(len, total_map_len); + + // + // nv_remap_page_range() map a contiguous physical address space + // into the user virtual space. + // Use PFN based mapping api to create the mapping for + // reserved carveout (OS invisible memory, not managed by OS) too. + // Basically nv_remap_page_range() works for all kind of memory regions. + // These are the downsides of using nv_remap_page_range() + // 1. We can't use vm_insert_pages() batching API, so perf overhead to + // map every page individually. + // 2. We can't support use case to call pin_user_pages() on dma-buf's CPU VA. + // We will revisit this code path in the future if needed. + // + ret = nv_remap_page_range(vma, addr, phy_addr, map_len, + vma->vm_page_prot); + if (ret) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: nv_dma_buf_mmap: remap_pfn_range - failed\n", ret); + // Partial mapping is going to be freed by kernel if nv_dma_buf_mmap() fails. + goto unlock_priv; + } + + nv_printf(NV_DBG_INFO, + "NVRM: nv_dma_buf_mmap: index[%u] range_count[%u] Vaddr[%llx] " + "page_prot[%x] phyAddr[%llx] mapLen[%llx] len[%llx] " + "total_map_len[%llx] \n", + index, range_count, addr, pgprot_val(vma->vm_page_prot), phy_addr, + map_len, len, total_map_len); + + total_map_len -= map_len; + addr += map_len; + } + } + + mutex_unlock(&priv->lock); + + return 0; + +unlock_priv: + mutex_unlock(&priv->lock); + + return ret; +} + +#if defined(NV_DMA_BUF_OPS_HAS_MAP) +static void* +nv_dma_buf_map_stub( + struct dma_buf *buf, + unsigned long page_num +) +{ + return NULL; +} + +static void +nv_dma_buf_unmap_stub( + struct dma_buf *buf, + unsigned long page_num, + void *addr +) +{ + return; +} +#endif + +#if defined(NV_DMA_BUF_OPS_HAS_MAP_ATOMIC) +static void* +nv_dma_buf_map_atomic_stub( + struct dma_buf *buf, + unsigned long page_num +) +{ + return NULL; +} + +static void +nv_dma_buf_unmap_atomic_stub( + struct dma_buf *buf, + unsigned long page_num, + void *addr +) +{ + return; +} +#endif + +// +// Note: Some of the dma-buf operations are mandatory in some kernels. +// So stubs are added to prevent dma_buf_export() failure. +// The actual implementations of these interfaces is not really required +// for the export operation to work. +// +static const struct dma_buf_ops nv_dma_buf_ops = { + .attach = nv_dma_buf_attach, + .map_dma_buf = nv_dma_buf_map, + .unmap_dma_buf = nv_dma_buf_unmap, + .release = nv_dma_buf_release, + .mmap = nv_dma_buf_mmap, +#if defined(NV_DMA_BUF_OPS_HAS_MAP) + .map = nv_dma_buf_map_stub, + .unmap = nv_dma_buf_unmap_stub, +#endif +#if defined(NV_DMA_BUF_OPS_HAS_MAP_ATOMIC) + .map_atomic = nv_dma_buf_map_atomic_stub, + .unmap_atomic = nv_dma_buf_unmap_atomic_stub, +#endif +}; + +static NV_STATUS +nv_dma_buf_create( + nv_state_t *nv, + nv_ioctl_export_to_dma_buf_fd_t *params +) +{ + int rc = 0; + NV_STATUS status; + nvidia_stack_t *sp = NULL; + struct dma_buf *buf = NULL; + nv_dma_buf_file_private_t *priv = NULL; + NvU32 gpu_id = nv->gpu_id; + + if (!nv->dma_buf_supported) + { + return NV_ERR_NOT_SUPPORTED; + } + + if (params->index > (params->totalObjects - params->numObjects)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + priv = nv_dma_buf_alloc_file_private(params->totalObjects); + if (priv == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate dma-buf private\n"); + return NV_ERR_NO_MEMORY; + } + + priv->total_objects = params->totalObjects; + priv->total_size = params->totalSize; + priv->nv = nv; + priv->mapping_type = params->mappingType; + priv->skip_iommu = NV_FALSE; + priv->map_attrs.allow_mmap = params->bAllowMmap; + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + status = NV_ERR_NO_MEMORY; + goto cleanup_priv; + } + + rc = nvidia_dev_get(gpu_id, sp); + if (rc != 0) + { + status = NV_ERR_OPERATING_SYSTEM; + goto cleanup_sp; + } + + status = rm_dma_buf_get_client_and_device(sp, priv->nv, + params->hClient, + params->handles[0], + priv->mapping_type, + &priv->h_client, + &priv->h_device, + &priv->h_subdevice, + &priv->mig_info, + &priv->static_phys_addrs, + &priv->acquire_release_all_gpu_lock_on_dup); + if (status != NV_OK) + { + goto cleanup_device; + } + + status = nv_dma_buf_dup_mem_handles(sp, priv, params); + if (status != NV_OK) + { + goto cleanup_client_and_device; + } + + if (priv->map_attrs.allow_mmap && + !priv->map_attrs.can_mmap) + { + nv_printf(NV_DBG_ERRORS, "NVRM: mmap is not allowed for the specific handles\n"); + status = NV_ERR_NOT_SUPPORTED; + goto cleanup_handles; + } + + // User can enable mmap for testing/specific use cases and not for any all handles. + if (!priv->map_attrs.allow_mmap) + { + priv->map_attrs.can_mmap = NV_FALSE; + } + + // Get CPU static phys addresses if possible to do so at this time. + if (priv->static_phys_addrs) + { + status = nv_dma_buf_get_phys_addresses(priv, params->index, + params->numObjects); + if (status != NV_OK) + { + goto cleanup_handles; + } + } + + { + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + exp_info.ops = &nv_dma_buf_ops; + exp_info.size = params->totalSize; + exp_info.flags = O_RDWR | O_CLOEXEC; + exp_info.priv = priv; + exp_info.exp_name = "nv_dmabuf"; + + buf = dma_buf_export(&exp_info); + } + + if (IS_ERR(buf)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to create dma-buf\n"); + + status = NV_ERR_OPERATING_SYSTEM; + + goto put_phys_addrs; + } + + nv_kmem_cache_free_stack(sp); + + rc = dma_buf_fd(buf, O_RDWR | O_CLOEXEC); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get dma-buf file descriptor\n"); + + // + // If dma-buf is successfully created, the dup'd handles + // clean-up should be done by the release callback. + // + dma_buf_put(buf); + + return NV_ERR_OPERATING_SYSTEM; + } + + params->fd = rc; + + return NV_OK; + +put_phys_addrs: + if (priv->static_phys_addrs) + { + nv_dma_buf_put_phys_addresses(priv, params->index, params->numObjects); + } + +cleanup_handles: + nv_dma_buf_undup_mem_handles(sp, params->index, params->numObjects, priv); + +cleanup_client_and_device: + rm_dma_buf_put_client_and_device(sp, priv->nv, priv->h_client, priv->h_device, + priv->h_subdevice, priv->mig_info); + +cleanup_device: + nvidia_dev_put(gpu_id, sp); + +cleanup_sp: + nv_kmem_cache_free_stack(sp); + +cleanup_priv: + nv_dma_buf_free_file_private(priv); + + return status; +} + +static NV_STATUS +nv_dma_buf_reuse( + nv_state_t *nv, + nv_ioctl_export_to_dma_buf_fd_t *params +) +{ + int rc = 0; + NV_STATUS status = NV_OK; + nvidia_stack_t *sp = NULL; + struct dma_buf *buf = NULL; + nv_dma_buf_file_private_t *priv = NULL; + + buf = dma_buf_get(params->fd); + if (IS_ERR(buf)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get dma-buf\n"); + return NV_ERR_OPERATING_SYSTEM; + } + + if (buf->ops != &nv_dma_buf_ops) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Invalid dma-buf fd\n"); + status = NV_ERR_INVALID_ARGUMENT; + goto cleanup_dmabuf; + } + + priv = buf->priv; + + if (priv == NULL) + { + status = NV_ERR_OPERATING_SYSTEM; + goto cleanup_dmabuf; + } + + rc = mutex_lock_interruptible(&priv->lock); + if (rc != 0) + { + status = NV_ERR_OPERATING_SYSTEM; + goto cleanup_dmabuf; + } + + if ((priv->total_objects < params->numObjects) || + (params->index > (priv->total_objects - params->numObjects)) || + (params->mappingType != priv->mapping_type) || + (params->bAllowMmap != priv->map_attrs.allow_mmap)) + { + status = NV_ERR_INVALID_ARGUMENT; + goto unlock_priv; + } + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + status = NV_ERR_NO_MEMORY; + goto unlock_priv; + } + + status = nv_dma_buf_dup_mem_handles(sp, priv, params); + if (status != NV_OK) + { + goto cleanup_sp; + } + + // Get CPU static phys addresses if possible to do so at this time. + if (priv->static_phys_addrs) + { + status = nv_dma_buf_get_phys_addresses(priv, params->index, + params->numObjects); + if (status != NV_OK) + { + goto cleanup_handles; + } + } + + nv_kmem_cache_free_stack(sp); + + mutex_unlock(&priv->lock); + + dma_buf_put(buf); + + return NV_OK; + +cleanup_handles: + nv_dma_buf_undup_mem_handles(sp, params->index, params->numObjects, priv); + +cleanup_sp: + nv_kmem_cache_free_stack(sp); + +unlock_priv: + mutex_unlock(&priv->lock); + +cleanup_dmabuf: + dma_buf_put(buf); + + return status; +} +#endif // CONFIG_DMA_SHARED_BUFFER + +NV_STATUS +nv_dma_buf_export( + nv_state_t *nv, + nv_ioctl_export_to_dma_buf_fd_t *params +) +{ +#if defined(CONFIG_DMA_SHARED_BUFFER) + NV_STATUS status; + + if ((params == NULL) || + (params->totalSize == 0) || + (params->numObjects == 0) || + (params->totalObjects == 0) || + (params->numObjects > NV_DMABUF_EXPORT_MAX_HANDLES) || + (params->numObjects > params->totalObjects)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if ((params->mappingType != NV_DMABUF_EXPORT_MAPPING_TYPE_DEFAULT) && + (params->mappingType != NV_DMABUF_EXPORT_MAPPING_TYPE_FORCE_PCIE)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // If fd >= 0, dma-buf already exists with this fd, so get dma-buf from fd. + // If fd == -1, dma-buf is not created yet, so create it and then store + // additional handles. + // + if (params->fd == -1) + { + status = nv_dma_buf_create(nv, params); + } + else if (params->fd >= 0) + { + status = nv_dma_buf_reuse(nv, params); + } + else + { + status = NV_ERR_INVALID_ARGUMENT; + } + + return status; +#else + return NV_ERR_NOT_SUPPORTED; +#endif // CONFIG_DMA_SHARED_BUFFER +} + +NV_STATUS NV_API_CALL nv_dma_import_dma_buf +( + nv_dma_device_t *dma_dev, + struct dma_buf *dma_buf, + NvBool is_ro_device_map, + NvU32 *size, + struct sg_table **sgt, + nv_dma_buf_t **import_priv +) +{ +#if defined(CONFIG_DMA_SHARED_BUFFER) + nv_dma_buf_t *nv_dma_buf = NULL; + struct dma_buf_attachment *dma_attach = NULL; + struct sg_table *map_sgt = NULL; + NV_STATUS status = NV_OK; + + if ((dma_dev == NULL) || + (dma_buf == NULL) || + (size == NULL) || + (sgt == NULL) || + (import_priv == NULL)) + { + nv_printf(NV_DBG_ERRORS, "Import arguments are NULL!\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + status = os_alloc_mem((void **)&nv_dma_buf, sizeof(*nv_dma_buf)); + if (status != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "Can't allocate mem for nv_buf!\n"); + return status; + } + + get_dma_buf(dma_buf); + + dma_attach = dma_buf_attach(dma_buf, dma_dev->dev); + if (IS_ERR_OR_NULL(dma_attach)) + { + nv_printf(NV_DBG_ERRORS, "Can't attach dma_buf!\n"); + status = NV_ERR_OPERATING_SYSTEM; + + goto dma_buf_attach_fail; + } + + if (is_ro_device_map) + { + // Try RO only dma mapping. + nv_dma_buf->direction = DMA_TO_DEVICE; + nv_printf(NV_DBG_INFO, + "NVRM: nv_dma_import_dma_buf -Try RO [DMA_TO_DEVICE] only mapping \n"); + } + else + { + nv_dma_buf->direction = DMA_BIDIRECTIONAL; + } + + map_sgt = dma_buf_map_attachment(dma_attach, nv_dma_buf->direction); + if (IS_ERR_OR_NULL(map_sgt)) + { + nv_printf(NV_DBG_ERRORS, "Can't map dma attachment!\n"); + status = NV_ERR_OPERATING_SYSTEM; + + goto dma_buf_map_fail; + } + + nv_dma_buf->dma_buf = dma_buf; + nv_dma_buf->dma_attach = dma_attach; + nv_dma_buf->sgt = map_sgt; + + *size = dma_buf->size; + *import_priv = nv_dma_buf; + *sgt = map_sgt; + + return NV_OK; + +dma_buf_map_fail: + dma_buf_detach(dma_buf, dma_attach); +dma_buf_attach_fail: + os_free_mem(nv_dma_buf); + dma_buf_put(dma_buf); + + return status; +#else + return NV_ERR_NOT_SUPPORTED; +#endif // CONFIG_DMA_SHARED_BUFFER +} + +NV_STATUS NV_API_CALL nv_dma_import_from_fd +( + nv_dma_device_t *dma_dev, + NvS32 fd, + NvBool is_ro_device_map, + NvU32 *size, + struct sg_table **sgt, + nv_dma_buf_t **import_priv +) +{ +#if defined(CONFIG_DMA_SHARED_BUFFER) + struct dma_buf *dma_buf = dma_buf_get(fd); + NV_STATUS status; + + if (IS_ERR_OR_NULL(dma_buf)) + { + nv_printf(NV_DBG_ERRORS, "Can't get dma_buf from fd!\n"); + return NV_ERR_OPERATING_SYSTEM; + } + + status = nv_dma_import_dma_buf(dma_dev, + dma_buf, is_ro_device_map, size, + sgt, import_priv); + dma_buf_put(dma_buf); + + return status; +#else + return NV_ERR_NOT_SUPPORTED; +#endif // CONFIG_DMA_SHARED_BUFFER +} + +void NV_API_CALL nv_dma_release_dma_buf +( + nv_dma_buf_t *import_priv +) +{ +#if defined(CONFIG_DMA_SHARED_BUFFER) + nv_dma_buf_t *nv_dma_buf = NULL; + + if (import_priv == NULL) + { + return; + } + + nv_dma_buf = (nv_dma_buf_t *)import_priv; + dma_buf_unmap_attachment(nv_dma_buf->dma_attach, nv_dma_buf->sgt, + nv_dma_buf->direction); + dma_buf_detach(nv_dma_buf->dma_buf, nv_dma_buf->dma_attach); + dma_buf_put(nv_dma_buf->dma_buf); + + os_free_mem(nv_dma_buf); +#endif // CONFIG_DMA_SHARED_BUFFER +} + diff --git a/kernel-open/nvidia/nv-dsi-parse-panel-props.c b/kernel-open/nvidia/nv-dsi-parse-panel-props.c new file mode 100644 index 0000000..ed5c108 --- /dev/null +++ b/kernel-open/nvidia/nv-dsi-parse-panel-props.c @@ -0,0 +1,1014 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +#include "os_dsi_panel_props.h" + +int bl_name_len; + +static u32 *dsi_read_prop_array +( + const struct device_node *np, + struct property *prop, + u32 *array_size +) +{ +#if NV_SUPPORTS_PLATFORM_DEVICE + u32 *val_array = NULL; + u32 count = 0; + int ret = 0; + + if (!prop) + return NULL; + + count = of_property_count_elems_of_size(np, prop->name, sizeof(u32)); + + if (count > 0) + { + NV_KMALLOC(val_array, sizeof(u32) * count); + if (val_array == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: dsi_read_prop_array, failed to allocate memory for values of DSI property %s", prop->name); + return ERR_PTR(-ENOMEM); + } + } + else + { + nv_printf(NV_DBG_ERRORS, "NVRM: dsi_read_prop_array, failed to get elements count in property %s\n", prop->name); + return ERR_PTR(-ENOSYS); + } + + ret = of_property_read_variable_u32_array(np, prop->name, + val_array, 0, count); + if (IS_ERR(&ret)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: dsi_read_prop_array, failed to read property %s", prop->name); + NV_KFREE(val_array, sizeof(u32) * count); + val_array = NULL; + return ERR_PTR(ret); + } + + *array_size = count; + + return val_array; +#else + nv_printf(NV_DBG_ERRORS, "NVRM: dsi_read_prop_array, platform device not supported\n"); + return ERR_PTR(-ENOSYS); +#endif +} + +static int dsi_get_panel_timings(struct device_node *np_panel, DSI_PANEL_INFO *panelInfo) +{ +#if NV_SUPPORTS_PLATFORM_DEVICE + struct device_node *np = NULL; + NvU32 temp; + DSITIMINGS *modes = &panelInfo->dsiTimings; + + // Get Panel Node from active-panel phandle + np = of_parse_phandle(np_panel, "nvidia,panel-timings", 0); + if (!np) { + nv_printf(NV_DBG_ERRORS, "NVRM: could not find panel timings node for DSI Panel\n"); + return -ENOENT; + } + + if (!of_property_read_u32(np, "clock-frequency", &temp)) { + modes->pixelClkRate = temp; + } else { + goto parse_mode_timings_fail; + } + if (!of_property_read_u32(np, "hsync-len", &temp)) { + modes->hSyncWidth = temp; + } else { + goto parse_mode_timings_fail; + } + if (!of_property_read_u32(np, "vsync-len", &temp)) { + modes->vSyncWidth = temp; + } else { + goto parse_mode_timings_fail; + } + if (!of_property_read_u32(np, "hback-porch", &temp)) { + modes->hBackPorch = temp; + } else { + goto parse_mode_timings_fail; + } + if (!of_property_read_u32(np, "vback-porch", &temp)) { + modes->vBackPorch = temp; + } else { + goto parse_mode_timings_fail; + } + if (!of_property_read_u32(np, "hactive", &temp)) { + modes->hActive = temp; + } else { + goto parse_mode_timings_fail; + } + if (!of_property_read_u32(np, "vactive", &temp)) { + modes->vActive = temp; + } else { + goto parse_mode_timings_fail; + } + if (!of_property_read_u32(np, "hfront-porch", &temp)) { + modes->hFrontPorch = temp; + } else { + goto parse_mode_timings_fail; + } + if (!of_property_read_u32(np, "vfront-porch", &temp)) { + modes->vFrontPorch = temp; + } else { + goto parse_mode_timings_fail; + } + + of_node_put(np); + return 0U; + +parse_mode_timings_fail: + nv_printf(NV_DBG_ERRORS, "NVRM: One of the mode timings is missing in DSI Panel mode-timings!\n"); + of_node_put(np); +#endif + + return -ENOENT; +} + +static int dsi_get_panel_gpio(struct device_node *node, DSI_PANEL_INFO *panel) +{ + char *label = NULL; + int count; + + (void)label; + (void)count; + + // If gpios are already populated, just return + if (panel->panel_gpio_populated) + return 0; + + if (!node) { + nv_printf(NV_DBG_ERRORS, "NVRM: DSI Panel node not available\n"); + return -ENOENT; + } + +#if NV_SUPPORTS_PLATFORM_DEVICE + panel->panel_gpio[DSI_GPIO_LCD_RESET] = + of_get_named_gpio(node, "nvidia,panel-rst-gpio", 0); + + panel->panel_gpio[DSI_GPIO_PANEL_EN] = + of_get_named_gpio(node, "nvidia,panel-en-gpio", 0); + + panel->panel_gpio[DSI_GPIO_PANEL_EN_1] = + of_get_named_gpio(node, "nvidia,panel-en-1-gpio", 0); + + panel->panel_gpio[DSI_GPIO_BL_ENABLE] = + of_get_named_gpio(node, "nvidia,panel-bl-en-gpio", 0); + + panel->panel_gpio[DSI_GPIO_BL_PWM] = + of_get_named_gpio(node, "nvidia,panel-bl-pwm-gpio", 0); + + panel->panel_gpio[DSI_GPIO_TE] = + of_get_named_gpio(node, "nvidia,te-gpio", 0); + + panel->panel_gpio[DSI_GPIO_AVDD_AVEE_EN] = + of_get_named_gpio(node, "nvidia,avdd-avee-en-gpio", 0); + + panel->panel_gpio[DSI_GPIO_VDD_1V8_LCD_EN] = + of_get_named_gpio(node, "nvidia,vdd-1v8-lcd-en-gpio", 0); + + panel->panel_gpio[DSI_GPIO_BRIDGE_EN_0] = + of_get_named_gpio(node, "nvidia,panel-bridge-en-0-gpio", 0); + + panel->panel_gpio[DSI_GPIO_BRIDGE_EN_1] = + of_get_named_gpio(node, "nvidia,panel-bridge-en-1-gpio", 0); + + panel->panel_gpio[DSI_GPIO_BRIDGE_REFCLK_EN] = + of_get_named_gpio(node, "nvidia,panel-bridge-refclk-en-gpio", 0); + + + for (count = 0; count < DSI_N_GPIO_PANEL; count++) { + if (gpio_is_valid(panel->panel_gpio[count])) { + switch (count) { + case DSI_GPIO_LCD_RESET: + label = "dsi-panel-reset"; + break; + case DSI_GPIO_PANEL_EN: + label = "dsi-panel-en"; + break; + case DSI_GPIO_PANEL_EN_1: + label = "dsi-panel-en-1"; + break; + case DSI_GPIO_BL_ENABLE: + label = "dsi-panel-bl-enable"; + break; + case DSI_GPIO_BL_PWM: + label = "dsi-panel-pwm"; + break; + case DSI_GPIO_TE: + if (panel->dsiEnVRR != NV_TRUE) { + panel->panel_gpio[count] = -1; + } else { + label = "dsi-panel-te"; + panel->dsiVrrPanelSupportsTe = NV_TRUE; + } + break; + case DSI_GPIO_AVDD_AVEE_EN: + label = "dsi-panel-avdd-avee-en"; + break; + case DSI_GPIO_VDD_1V8_LCD_EN: + label = "dsi-panel-vdd-1v8-lcd-en"; + break; + case DSI_GPIO_BRIDGE_EN_0: + label = "dsi-panel-bridge-en-0"; + break; + case DSI_GPIO_BRIDGE_EN_1: + label = "dsi-panel-bridge-en-1"; + break; + case DSI_GPIO_BRIDGE_REFCLK_EN: + label = "dsi-panel-bridge-refclk-en"; + break; + default: + nv_printf(NV_DBG_INFO, "NVRM: DSI Panel invalid gpio entry at index %d\n", count); + } + if (label) { + gpio_request(panel->panel_gpio[count], label); + label = NULL; + } + } + } + + panel->panel_gpio_populated = true; + return 0U; +#else + return -EINVAL; +#endif +} + +static int dsi_parse_pps_data +( + const struct device_node *node, + struct property *prop, + NvU32 *pps +) +{ + __be32 *prop_val_ptr; + u32 count = 0; + + if (!prop) + return -ENOENT; + + prop_val_ptr = prop->value; + +#define PPS_COUNT 32 + for (count = 0; count < PPS_COUNT; count++) { + pps[count] = be32_to_cpu(*prop_val_ptr++); + } +#undef PPS_COUNT + + return 0U; +} + +static int parse_dsi_properties(const struct device_node *np_dsi, DSI_PANEL_INFO *dsi) +{ +#if NV_SUPPORTS_PLATFORM_DEVICE + u32 temp; + int ret = 0; +#if defined(NV_OF_PROPERTY_FOR_EACH_U32_HAS_INTERNAL_ARGS) + const __be32 *p; + struct property *prop; +#endif + struct device_node *np_dsi_panel; + + // Get Panel Node from active-panel phandle + np_dsi_panel = of_parse_phandle(np_dsi, "nvidia,active-panel", 0); + if (np_dsi_panel == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: None of the dsi panel nodes enabled in DT!\n"); + return -EINVAL; + } + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,enable-hs-clk-in-lp-mode", &temp)) + dsi->enable_hs_clock_on_lp_cmd_mode = (u8)temp; + + if (of_property_read_bool(np_dsi_panel, + "nvidia,set-max-dsi-timeout")) + dsi->set_max_timeout = true; + + if (of_property_read_bool(np_dsi_panel, + "nvidia,use-legacy-dphy-core")) + dsi->use_legacy_dphy_core = true; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-refresh-rate-adj", &temp)) + dsi->refresh_rate_adj = (u8)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-n-data-lanes", &temp)) + dsi->n_data_lanes = (u8)temp; + + if (of_property_read_bool(np_dsi_panel, + "nvidia,swap-data-lane-polarity")) + dsi->swap_data_lane_polarity = true; + + if (of_property_read_bool(np_dsi_panel, + "nvidia,swap-clock-lane-polarity")) + dsi->swap_clock_lane_polarity = true; + + if (of_property_read_bool(np_dsi_panel, + "nvidia,reverse-clock-polarity")) + dsi->reverse_clock_polarity = true; + + if (!of_property_read_u32_array(np_dsi_panel, + "nvidia,lane-xbar-ctrl", + dsi->lane_xbar_ctrl, dsi->n_data_lanes)) + dsi->lane_xbar_exists = true; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-type", &temp)) + { + dsi->dsiPhyType = (u8)temp; + if ((temp != DSI_DPHY) && + (temp != DSI_CPHY)) + { + nv_printf(NV_DBG_ERRORS,"NVRM: invalid dsi phy type 0x%x\n", temp); + ret = -EINVAL; + goto parse_dsi_settings_fail; + } + } + + if (of_property_read_bool(np_dsi_panel, + "nvidia,cphy-data-scrambling")) + dsi->en_data_scrambling = true; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-video-burst-mode", &temp)) + { + dsi->video_burst_mode = (u8)temp; + if ((temp != DSI_VIDEO_NON_BURST_MODE) && + (temp != DSI_VIDEO_NON_BURST_MODE_WITH_SYNC_END) && + (temp != DSI_VIDEO_BURST_MODE_LOWEST_SPEED) && + (temp != DSI_VIDEO_BURST_MODE_LOW_SPEED) && + (temp != DSI_VIDEO_BURST_MODE_MEDIUM_SPEED) && + (temp != DSI_VIDEO_BURST_MODE_FAST_SPEED) && + (temp != DSI_VIDEO_BURST_MODE_FASTEST_SPEED)) + { + nv_printf(NV_DBG_ERRORS,"NVRM: invalid dsi video burst mode\n"); + ret = -EINVAL; + goto parse_dsi_settings_fail; + } + } + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-pixel-format", &temp)) + { + dsi->pixel_format = (u8)temp; + if ((temp != DSI_PIXEL_FORMAT_16BIT_P) && + (temp != DSI_PIXEL_FORMAT_18BIT_P) && + (temp != DSI_PIXEL_FORMAT_18BIT_NP) && + (temp != DSI_PIXEL_FORMAT_24BIT_P) && + (temp != DSI_PIXEL_FORMAT_30BIT_P) && + (temp != DSI_PIXEL_FORMAT_36BIT_P) && + (temp != DSI_PIXEL_FORMAT_8BIT_DSC) && + (temp != DSI_PIXEL_FORMAT_10BIT_DSC) && + (temp != DSI_PIXEL_FORMAT_12BIT_DSC) && + (temp != DSI_PIXEL_FORMAT_16BIT_DSC)) + { + nv_printf(NV_DBG_ERRORS,"NVRM: invalid dsi pixel format\n"); + ret = -EINVAL; + goto parse_dsi_settings_fail; + } + } + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-refresh-rate", &temp)) + dsi->refresh_rate = (u8)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-rated-refresh-rate", &temp)) + dsi->rated_refresh_rate = (u8)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-virtual-channel", &temp)) + { + dsi->virtual_channel = (u8)temp; + if ((temp != DSI_VIRTUAL_CHANNEL_0) && + (temp != DSI_VIRTUAL_CHANNEL_1) && + (temp != DSI_VIRTUAL_CHANNEL_2) && + (temp != DSI_VIRTUAL_CHANNEL_3)) + { + nv_printf(NV_DBG_ERRORS,"NVRM: invalid dsi virtual channel\n"); + ret = -EINVAL; + goto parse_dsi_settings_fail; + } + } + + if (!of_property_read_u32(np_dsi_panel, "nvidia,dsi-instance", &temp)) + dsi->dsi_instance = (u8)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-panel-reset", &temp)) + dsi->panel_reset = (u8)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-te-polarity-low", &temp)) + dsi->te_polarity_low = (u8)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-lp00-pre-panel-wakeup", &temp)) + dsi->lp00_pre_panel_wakeup = (u8)temp; + + if (of_find_property(np_dsi_panel, + "nvidia,dsi-bl-name", &bl_name_len)) + { + NV_KMALLOC(dsi->bl_name, sizeof(u8) * bl_name_len); + if (!of_property_read_string(np_dsi_panel, + "nvidia,dsi-bl-name", + (const char **)&dsi->bl_name)) { + } else { + nv_printf(NV_DBG_ERRORS, "NVRM: dsi error parsing bl name\n"); + NV_KFREE(dsi->bl_name, sizeof(u8) * bl_name_len); + } + } + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-ganged-type", &temp)) { + dsi->ganged_type = (u8)temp; + /* Set pixel width to 1 by default for even-odd split */ + if (dsi->ganged_type == DSI_GANGED_SYMMETRIC_EVEN_ODD) + dsi->even_odd_split_width = 1; + } + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-even-odd-pixel-width", &temp)) + dsi->even_odd_split_width = temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-ganged-overlap", &temp)) { + dsi->ganged_overlap = (u16)temp; + if (!dsi->ganged_type) + nv_printf(NV_DBG_ERRORS, "NVRM: specified ganged overlap, but no ganged type\n"); + } + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-ganged-swap-links", &temp)) { + dsi->ganged_swap_links = (bool)temp; + if (!dsi->ganged_type) + nv_printf(NV_DBG_ERRORS, "NVRM: specified ganged swapped links, but no ganged type\n"); + } + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-ganged-write-to-all-links", &temp)) { + dsi->ganged_write_to_all_links = (bool)temp; + if (!dsi->ganged_type) + nv_printf(NV_DBG_ERRORS, "NVRM: specified ganged write to all links, but no ganged type\n"); + } + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-split-link-type", &temp)) + dsi->split_link_type = (u8)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-suspend-aggr", &temp)) + dsi->suspend_aggr = (u8)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-edp-bridge", &temp)) + dsi->dsi2edp_bridge_enable = (bool)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-lvds-bridge", &temp)) + dsi->dsi2lvds_bridge_enable = (bool)temp; + +#if defined(NV_OF_PROPERTY_FOR_EACH_U32_HAS_INTERNAL_ARGS) + of_property_for_each_u32(np_dsi_panel, "nvidia,dsi-dpd-pads", prop, p, temp) +#else + of_property_for_each_u32(np_dsi_panel, "nvidia,dsi-dpd-pads", temp) +#endif + dsi->dpd_dsi_pads |= (u32)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-power-saving-suspend", &temp)) + dsi->power_saving_suspend = (bool)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-ulpm-not-support", &temp)) + dsi->ulpm_not_supported = (bool)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-video-data-type", &temp)) { + dsi->video_data_type = (u8)temp; + if ((temp != DSI_VIDEO_TYPE_VIDEO_MODE) && + (temp != DSI_VIDEO_TYPE_COMMAND_MODE)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: invalid dsi video data type\n"); + ret = -EINVAL; + goto parse_dsi_settings_fail; + } + } + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-video-clock-mode", &temp)) { + dsi->video_clock_mode = (u8)temp; + if ((temp != DSI_VIDEO_CLOCK_CONTINUOUS) && + (temp != DSI_VIDEO_CLOCK_TX_ONLY)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: invalid dsi video clk mode\n"); + ret = -EINVAL; + goto parse_dsi_settings_fail; + } + } + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,enable-vrr", &temp)) + dsi->dsiEnVRR = (u8)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,vrr-force-set-te-pin", &temp)) + dsi->dsiForceSetTePin = (u8)temp; + + if (of_property_read_bool(np_dsi_panel, + "nvidia,send-init-cmds-early")) + dsi->sendInitCmdsEarly = true; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-n-init-cmd", &temp)) { + dsi->n_init_cmd = (u16)temp; + } + if (dsi->n_init_cmd > 0) { + dsi->dsi_init_cmd_array = dsi_read_prop_array(np_dsi_panel, + of_find_property(np_dsi_panel, "nvidia,dsi-init-cmd", NULL), + &dsi->init_cmd_array_size); + } + if (dsi->n_init_cmd && + IS_ERR_OR_NULL(dsi->dsi_init_cmd_array)) { + nv_printf(NV_DBG_ERRORS, "NVRM: DSI init cmd parsing from DT failed\n"); + ret = -EINVAL; + goto parse_dsi_settings_fail; + }; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-n-postvideo-cmd", &temp)) { + dsi->n_postvideo_cmd = (u16)temp; + } + if (dsi->n_postvideo_cmd > 0) { + dsi->dsi_postvideo_cmd_array = dsi_read_prop_array(np_dsi_panel, + of_find_property(np_dsi_panel, "nvidia,dsi-postvideo-cmd", NULL), + &dsi->postvideo_cmd_array_size); + } + if (dsi->n_postvideo_cmd && + IS_ERR_OR_NULL(dsi->dsi_postvideo_cmd_array)) { + nv_printf(NV_DBG_ERRORS, "NVRM: DSI postvideo cmd parsing from DT failed\n"); + goto parse_dsi_settings_fail; + }; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-n-suspend-cmd", &temp)) { + dsi->n_suspend_cmd = (u16)temp; + } + if (dsi->n_suspend_cmd > 0) { + dsi->dsi_suspend_cmd_array = dsi_read_prop_array(np_dsi_panel, + of_find_property(np_dsi_panel, "nvidia,dsi-suspend-cmd", NULL), + &dsi->suspend_cmd_array_size); + } + if (dsi->n_suspend_cmd && + IS_ERR_OR_NULL(dsi->dsi_suspend_cmd_array)) { + nv_printf(NV_DBG_ERRORS, "NVRM: DSI suspend cmd parsing from DT failed\n"); + ret = -EINVAL; + goto parse_dsi_settings_fail; + }; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-n-early-suspend-cmd", &temp)) { + dsi->n_early_suspend_cmd = (u16)temp; + } + if (dsi->n_early_suspend_cmd > 0) { + dsi->dsi_early_suspend_cmd_array = dsi_read_prop_array(np_dsi_panel, + of_find_property(np_dsi_panel, "nvidia,dsi-early-suspend-cmd", NULL), + &dsi->early_suspend_cmd_array_size); + } + if (dsi->n_early_suspend_cmd && + IS_ERR_OR_NULL(dsi->dsi_early_suspend_cmd_array)) { + nv_printf(NV_DBG_ERRORS, "NVRM: DSI early suspend cmd parsing from DT failed\n"); + ret = -EINVAL; + goto parse_dsi_settings_fail; + }; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-suspend-stop-stream-late", &temp)) { + dsi->suspend_stop_stream_late = (bool)temp; + } + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-n-late-resume-cmd", &temp)) { + dsi->n_late_resume_cmd = (u16)temp; + } + if (dsi->n_late_resume_cmd > 0) { + dsi->dsi_late_resume_cmd_array = dsi_read_prop_array(np_dsi_panel, + of_find_property(np_dsi_panel, "nvidia,dsi-late-resume-cmd", NULL), + &dsi->late_resume_cmd_array_size); + } + if (dsi->n_late_resume_cmd && + IS_ERR_OR_NULL(dsi->dsi_late_resume_cmd_array)) { + nv_printf(NV_DBG_ERRORS, "NVRM: DSI late resume cmd parsing from DT failed\n"); + ret = -EINVAL; + goto parse_dsi_settings_fail; + }; + + dsi->pktSeq_array = dsi_read_prop_array(np_dsi_panel, + of_find_property(np_dsi_panel, "nvidia,dsi-pkt-seq", NULL), + &dsi->pktSeq_array_size); + if (IS_ERR(dsi->pktSeq_array)) { + nv_printf(NV_DBG_ERRORS, "NVRM: DSI packet seq parsing from DT fail\n"); + ret = -EINVAL; + goto parse_dsi_settings_fail; + } + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-hsdexit", &temp)) + dsi->phyTimingNs.t_hsdexit_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-hstrail", &temp)) + dsi->phyTimingNs.t_hstrail_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-datzero", &temp)) + dsi->phyTimingNs.t_datzero_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-hsprepare", &temp)) + dsi->phyTimingNs.t_hsprepare_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-hsprebegin", &temp)) + dsi->phyTimingNs.t_hsprebegin_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-hspost", &temp)) + dsi->phyTimingNs.t_hspost_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-clktrail", &temp)) + dsi->phyTimingNs.t_clktrail_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-clkpost", &temp)) + dsi->phyTimingNs.t_clkpost_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-clkzero", &temp)) + dsi->phyTimingNs.t_clkzero_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-tlpx", &temp)) + dsi->phyTimingNs.t_tlpx_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-clkprepare", &temp)) + dsi->phyTimingNs.t_clkprepare_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-clkpre", &temp)) + dsi->phyTimingNs.t_clkpre_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-wakeup", &temp)) + dsi->phyTimingNs.t_wakeup_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-taget", &temp)) + dsi->phyTimingNs.t_taget_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-tasure", &temp)) + dsi->phyTimingNs.t_tasure_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-tago", &temp)) + dsi->phyTimingNs.t_tago_ns = (u16)temp; + + if (of_property_read_bool(np_dsi_panel, + "nvidia,enable-link-compression")) + dsi->dsiDscEnable = true; + + if (of_property_read_bool(np_dsi_panel, + "nvidia,enable-dual-dsc")) + dsi->dsiDscEnDualDsc = true; + + if (of_property_read_bool(np_dsi_panel, + "nvidia,enable-block-pred")) + dsi->dsiDscEnBlockPrediction = true; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,slice-height", &temp)) + dsi->dsiDscSliceHeight = (u32)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,num-of-slices", &temp)) + dsi->dsiDscNumSlices = (u32)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,comp-rate", &temp)) + dsi->dsiDscBpp = (u32)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,version-major", &temp)) + dsi->dsiDscDecoderMajorVersion = (u32)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,version-minor", &temp)) + dsi->dsiDscDecoderMinorVersion = (u32)temp; + + if (of_property_read_bool(np_dsi_panel, + "nvidia,use-custom-pps")) { + dsi->dsiDscUseCustomPPS = true; + + ret = dsi_parse_pps_data(np_dsi_panel, + of_find_property(np_dsi_panel, + "nvidia,custom-pps-data", NULL), + dsi->dsiDscCustomPPSData); + + if (ret != NV_OK) { + nv_printf(NV_DBG_ERRORS, "NVRM: Parsing DSI Panel Custom PPS data failed\n"); + goto parse_dsi_settings_fail; + } + } + + if (of_property_read_bool(np_dsi, "nvidia,dsi-csi-loopback")) + dsi->dsi_csi_loopback = 1; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,vpll0-rate-hz", &temp)) + dsi->vpll0_rate_hz = temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsipll-vco-rate-hz", &temp)) + dsi->dsipll_vco_rate_hz = temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsipll-clkouta-rate-hz", &temp)) + dsi->dsipll_clkouta_rate_hz = temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsipll-clkoutpn-rate-hz", &temp)) + dsi->dsipll_clkoutpn_rate_hz = temp; + + ret = dsi_get_panel_timings(np_dsi_panel, dsi); + if (ret != NV_OK) { + nv_printf(NV_DBG_ERRORS, "NVRM: Parsing DSI Panel Timings failed\n"); + goto parse_dsi_settings_fail; + } + + ret = dsi_get_panel_gpio(np_dsi_panel, dsi); + if (ret != NV_OK) { + nv_printf(NV_DBG_ERRORS, "NVRM: Parsing DSI Panel GPIOs failed\n"); + goto parse_dsi_settings_fail; + } + +parse_dsi_settings_fail: + return ret; +#else + return -1; +#endif +} + +NvBool +nv_dsi_is_panel_connected +( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct device_node *np_dsi = NULL; + struct device_node *np_dsi_panel = NULL; + NvBool ret = NV_TRUE; + + np_dsi = of_get_child_by_name(nvl->dev->of_node, "dsi"); + + if (np_dsi && !of_device_is_available(np_dsi)) { + ret = NV_FALSE; + goto fail; + } + + np_dsi_panel = of_parse_phandle(np_dsi, "nvidia,active-panel", 0); + if (np_dsi_panel == NULL) + { + ret = NV_FALSE; + } + +fail: + of_node_put(np_dsi_panel); + of_node_put(np_dsi); + return ret; +} + +NV_STATUS +nv_dsi_parse_panel_props +( + nv_state_t *nv, + void *dsiPanelInfo +) +{ + int ret = NV_OK; + struct device_node *np_dsi = NULL; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + np_dsi = of_get_child_by_name(nvl->dev->of_node, "dsi"); + + if (np_dsi && !of_device_is_available(np_dsi)) { + nv_printf(NV_DBG_ERRORS, "NVRM: dsi node not enabled in DT\n"); + of_node_put(np_dsi); + return NV_ERR_NOT_SUPPORTED; + } + + ret = parse_dsi_properties(np_dsi, (DSI_PANEL_INFO *)dsiPanelInfo); + + return ret; +} + +NV_STATUS +nv_dsi_panel_enable +( + nv_state_t *nv, + void *dsiPanelInfo +) +{ +#if NV_SUPPORTS_PLATFORM_DEVICE + int ret = NV_OK; + DSI_PANEL_INFO *panelInfo = dsiPanelInfo; + + if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_VDD_1V8_LCD_EN])) { + gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_VDD_1V8_LCD_EN], 1); + } + + mdelay(10); //Required 1ms delay + + if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_AVDD_AVEE_EN])) { + gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_AVDD_AVEE_EN], 1); + } + + mdelay(20); //Required 10ms delay + + // If backlight enable gpio is specified, set it to output direction and pull high + if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_BL_ENABLE])) { + gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_BL_ENABLE], 1); + } + + mdelay(10); + + if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_PANEL_EN])) { + gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_PANEL_EN], 1); + } + + mdelay(20); // Requied 10ms + + return ret; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NV_STATUS +nv_dsi_panel_reset +( + nv_state_t *nv, + void *dsiPanelInfo +) +{ +#if NV_SUPPORTS_PLATFORM_DEVICE + int ret = NV_OK; + int en_panel_rst = -1; + DSI_PANEL_INFO *panelInfo = dsiPanelInfo; + + // Assert and deassert Panel reset GPIO + if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_LCD_RESET])) { + en_panel_rst = panelInfo->panel_gpio[DSI_GPIO_LCD_RESET]; + } else { + nv_printf(NV_DBG_ERRORS, "DSI Panel reset gpio invalid\n"); + goto fail; + } + + ret = gpio_direction_output(en_panel_rst, 1); + if (ret < 0) { + nv_printf(NV_DBG_ERRORS, "Deasserting DSI panel reset gpio failed\n"); + goto fail; + } + + mdelay(10); + + ret = gpio_direction_output(en_panel_rst, 0); + if (ret < 0) { + nv_printf(NV_DBG_ERRORS, "Asserting DSI panel reset gpio failed\n"); + goto fail; + } + + mdelay(10); + + ret = gpio_direction_output(en_panel_rst, 1); + if (ret < 0) { + nv_printf(NV_DBG_ERRORS, "Deasserting Dsi panel reset gpio after asserting failed\n"); + goto fail; + } + +fail: + return ret; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +void nv_dsi_panel_disable +( + nv_state_t *nv, + void *dsiPanelInfo +) +{ +#if NV_SUPPORTS_PLATFORM_DEVICE + DSI_PANEL_INFO *panelInfo = dsiPanelInfo; + + if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_BL_ENABLE])) { + gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_BL_ENABLE], 0); + } + + mdelay(10); + + if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_PANEL_EN])) { + gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_PANEL_EN], 0); + } + + // Assert Panel reset GPIO + if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_LCD_RESET])) { + gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_LCD_RESET], 0); + } + + mdelay(20); + + if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_AVDD_AVEE_EN])) { + gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_AVDD_AVEE_EN], 0); + } + + mdelay(10); + + if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_VDD_1V8_LCD_EN])) { + gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_VDD_1V8_LCD_EN], 0); + } +#endif +} + +void nv_dsi_panel_cleanup +( + nv_state_t *nv, + void *dsiPanelInfo +) +{ +#if NV_SUPPORTS_PLATFORM_DEVICE + int count; + DSI_PANEL_INFO *panelInfo = dsiPanelInfo; + + if (!IS_ERR_OR_NULL(panelInfo->dsi_init_cmd_array)) + NV_KFREE(panelInfo->dsi_init_cmd_array, sizeof(u32) * panelInfo->init_cmd_array_size); + + if (!IS_ERR_OR_NULL(panelInfo->dsi_early_suspend_cmd_array)) + NV_KFREE(panelInfo->dsi_early_suspend_cmd_array, sizeof(u32) * panelInfo->early_suspend_cmd_array_size); + + if (!IS_ERR_OR_NULL(panelInfo->dsi_late_resume_cmd_array)) + NV_KFREE(panelInfo->dsi_late_resume_cmd_array, sizeof(u32) * panelInfo->late_resume_cmd_array_size); + + if (!IS_ERR_OR_NULL(panelInfo->dsi_postvideo_cmd_array)) + NV_KFREE(panelInfo->dsi_postvideo_cmd_array, sizeof(u32) * panelInfo->postvideo_cmd_array_size); + + if (!IS_ERR_OR_NULL(panelInfo->dsi_suspend_cmd_array)) + NV_KFREE(panelInfo->dsi_suspend_cmd_array, sizeof(u32) * panelInfo->suspend_cmd_array_size); + + if (!IS_ERR_OR_NULL(panelInfo->pktSeq_array)) + NV_KFREE(panelInfo->pktSeq_array, sizeof(u32) * panelInfo->pktSeq_array_size); + + if (panelInfo->bl_name != NULL) { + NV_KFREE(panelInfo->bl_name, sizeof(u8) * bl_name_len); + } + + for (count = 0; count < DSI_N_GPIO_PANEL; count++) { + if (gpio_is_valid(panelInfo->panel_gpio[count])) { + gpio_free(panelInfo->panel_gpio[count]); + } + } + panelInfo->panel_gpio_populated = false; +#endif +} diff --git a/kernel-open/nvidia/nv-gpio.c b/kernel-open/nvidia/nv-gpio.c new file mode 100644 index 0000000..9806f57 --- /dev/null +++ b/kernel-open/nvidia/nv-gpio.c @@ -0,0 +1,263 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 - 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +#include "os_gpio.h" + +#define NV_GPIOF_DIR_IN (1 << 0) + +NV_STATUS NV_API_CALL nv_gpio_get_pin_state +( + nv_state_t *nv, + NvU32 pinNum, + NvU32 *pinValue +) +{ +#if NV_SUPPORTS_PLATFORM_DEVICE + int ret; + + ret = gpio_get_value(pinNum); + if (ret < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: %s: failed with err: %d\n", + __func__, ret); + return NV_ERR_GENERIC; + } + + *pinValue = ret; + + return NV_OK; +#else + nv_printf(NV_DBG_ERRORS, "NVRM: platform device support not present\n"); + return NV_ERR_GENERIC; +#endif +} + +void NV_API_CALL nv_gpio_set_pin_state +( + nv_state_t *nv, + NvU32 pinNum, + NvU32 pinValue +) +{ +#if NV_SUPPORTS_PLATFORM_DEVICE + gpio_set_value(pinNum, pinValue); +#else + nv_printf(NV_DBG_ERRORS, "NVRM: platform device support not present\n"); +#endif +} + +NV_STATUS NV_API_CALL nv_gpio_set_pin_direction +( + nv_state_t *nv, + NvU32 pinNum, + NvU32 direction +) +{ +#if NV_SUPPORTS_PLATFORM_DEVICE + int ret; + + if (direction) + { + ret = gpio_direction_input(pinNum); + } + else + { + ret = gpio_direction_output(pinNum, 0); + } + + if (ret) + { + nv_printf(NV_DBG_ERRORS, "NVRM: %s: failed with err: %d\n", + __func__, ret); + return NV_ERR_GENERIC; + } + + return NV_OK; +#else + nv_printf(NV_DBG_ERRORS, "NVRM: platform device support not present\n"); + return NV_ERR_GENERIC; +#endif +} + +NV_STATUS NV_API_CALL nv_gpio_get_pin_direction +( + nv_state_t *nv, + NvU32 pinNum, + NvU32 *direction +) +{ +/*! + * TODO: Commenting out until gpio_get_direction wrapper + * support is added in kernel. + */ +#if 0 + int ret; + + ret = nv_gpio_get_direction(pinNum); + if (ret) + { + nv_printf(NV_DBG_ERRORS, "NVRM: %s: failed with err: %d\n", + __func__, ret); + return NV_ERR_GENERIC; + } + *direction = ret; +#endif + + return NV_OK; +} + +NV_STATUS NV_API_CALL nv_gpio_get_pin_number +( + nv_state_t *nv, + NvU32 function, + NvU32 *pinNum +) +{ +#if NV_SUPPORTS_PLATFORM_DEVICE + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int rc; + + /*! + * @brief Mapping array of OS GPIO function ID to OS function name, + * this name is used to get GPIO number from Device Tree. + */ + static const char *osMapGpioFunc[] = { + [NV_OS_GPIO_FUNC_HOTPLUG_A] = "os_gpio_hotplug_a", + [NV_OS_GPIO_FUNC_HOTPLUG_B] = "os_gpio_hotplug_b", + [NV_OS_GPIO_FUNC_HOTPLUG_C] = "os_gpio_hotplug_c", + [NV_OS_GPIO_FUNC_HOTPLUG_D] = "os_gpio_hotplug_d", + }; + + rc = of_get_named_gpio(nvl->dev->of_node, osMapGpioFunc[function], 0); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: of_get_name_gpio failed for gpio - %s, rc - %d\n", + osMapGpioFunc[function], rc); + return NV_ERR_GENERIC; + } + *pinNum = rc; + + rc = devm_gpio_request_one(nvl->dev, *pinNum, NV_GPIOF_DIR_IN, + osMapGpioFunc[function]); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: request gpio failed for gpio - %s, rc - %d\n", + osMapGpioFunc[function], rc); + return NV_ERR_GENERIC; + } + + return NV_OK; +#else + nv_printf(NV_DBG_ERRORS, "NVRM: platform device support not present\n"); + return NV_ERR_GENERIC; +#endif +} + +NvBool NV_API_CALL nv_gpio_get_pin_interrupt_status +( + nv_state_t *nv, + NvU32 pinNum, + NvU32 direction +) +{ +#if NV_SUPPORTS_PLATFORM_DEVICE + NvU32 irqGpioPin; + NvU32 pinValue; + + if (nv_get_current_irq_type(nv) != NV_SOC_IRQ_GPIO_TYPE) + { + return NV_FALSE; + } + + nv_get_current_irq_priv_data(nv, &irqGpioPin); + if (pinNum != irqGpioPin) + { + return NV_FALSE; + } + + pinValue = gpio_get_value(pinNum); + if (pinValue != direction) + { + return NV_FALSE; + } + + return NV_TRUE; +#else + nv_printf(NV_DBG_ERRORS, "NVRM: platform device support not present\n"); + return NV_FALSE; +#endif +} + +NV_STATUS NV_API_CALL nv_gpio_set_pin_interrupt +( + nv_state_t * nv, + NvU32 pinNum, + NvU32 trigger_level +) +{ +#if NV_SUPPORTS_PLATFORM_DEVICE + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int rc; + int irq_num; + + irq_num = gpio_to_irq(pinNum); + + /* + * Ignore setting interrupt for falling trigger for hotplug gpio pin + * as hotplug sequence calls this function twice to set the level + * (rising/falling) of interrupt for same gpio pin. Linux interrupt + * registration allows only once to register the interrupt with required + * trigger levels. So to avoid re-registration, skip registering for + * falling trigger level but when this function called with rising trigger + * then itself register for both rising/falling triggers. + */ + if (trigger_level == 0) + { + return NV_OK; + } + + rc = nv_request_soc_irq(nvl, irq_num, NV_SOC_IRQ_GPIO_TYPE, + (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | + IRQF_ONESHOT), pinNum, + "hdmi-hotplug"); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: IRQ registration failed for gpio - %d, rc - %d\n", + pinNum, rc); + return NV_ERR_GENERIC; + } + + /* Disable the irq after registration as RM init sequence re-enables it */ + disable_irq_nosync(irq_num); + + return NV_OK; +#else + nv_printf(NV_DBG_ERRORS, "NVRM: platform device support not present\n"); + return NV_ERR_GENERIC; +#endif +} diff --git a/kernel-open/nvidia/nv-host1x.c b/kernel-open/nvidia/nv-host1x.c new file mode 100644 index 0000000..751ac23 --- /dev/null +++ b/kernel-open/nvidia/nv-host1x.c @@ -0,0 +1,83 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +#if defined(NV_LINUX_NVHOST_H_PRESENT) +#include +#include +#if defined(NV_LINUX_NVHOST_T194_H_PRESENT) +#include +#endif + +NV_STATUS nv_get_syncpoint_aperture +( + NvU32 syncpointId, + NvU64 *physAddr, + NvU64 *limit, + NvU32 *offset +) +{ + struct platform_device *host1x_pdev; + phys_addr_t base; + struct host1x *host1x; + NvU32 stride; + NvU32 num_syncpts; + NvS32 ret; + + host1x_pdev = nvhost_get_default_device(); + if (host1x_pdev == NULL) + { + return NV_ERR_INVALID_DEVICE; + } + + host1x = platform_get_drvdata(host1x_pdev); + ret = host1x_syncpt_get_shim_info(host1x, &base, &stride, &num_syncpts); + if ((ret != 0) || (syncpointId >= num_syncpts)) + { + return NV_ERR_INVALID_DATA; + } + + *physAddr = base; + *limit = stride; + *offset = stride * syncpointId; + + return NV_OK; +} +#else + +NV_STATUS nv_get_syncpoint_aperture +( + NvU32 syncpointId, + NvU64 *physAddr, + NvU64 *limit, + NvU32 *offset +) +{ + return NV_ERR_NOT_SUPPORTED; +} +#endif + diff --git a/kernel-open/nvidia/nv-i2c.c b/kernel-open/nvidia/nv-i2c.c new file mode 100644 index 0000000..a0f61ed --- /dev/null +++ b/kernel-open/nvidia/nv-i2c.c @@ -0,0 +1,558 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include + +#include "os-interface.h" +#include "nv-linux.h" +#include "nvi2c.h" + +#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) + +static int nv_i2c_algo_master_xfer(struct i2c_adapter *adapter, struct i2c_msg msgs[], int num) +{ + nv_state_t *nv = (nv_state_t *)adapter->algo_data; + unsigned int i = 0; + int rc; + NV_STATUS rmStatus = NV_OK; + nvidia_stack_t *sp = NULL; + const unsigned int supported_i2c_flags = I2C_M_RD +#if defined(I2C_M_DMA_SAFE) + | I2C_M_DMA_SAFE +#endif + ; + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + return rc; + } + + rc = -EIO; + + for (i = 0; ((i < (unsigned int)num) && (rmStatus == NV_OK)); i++) + { + if (msgs[i].flags & ~supported_i2c_flags) + { + /* we only support basic I2C reads/writes, reject any other commands */ + rc = -EINVAL; + nv_printf(NV_DBG_ERRORS, "NVRM: Unsupported I2C flags used. (flags:0x%08x)\n", + msgs[i].flags); + rmStatus = NV_ERR_INVALID_ARGUMENT; + } + else + { + rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter, + (msgs[i].flags & I2C_M_RD) ? + NV_I2C_CMD_READ : NV_I2C_CMD_WRITE, + (NvU8)(msgs[i].addr & 0x7f), 0, + (NvU32)(msgs[i].len & 0xffffUL), + (NvU8 *)msgs[i].buf); + } + } + + nv_kmem_cache_free_stack(sp); + + return (rmStatus != NV_OK) ? rc : num; +} + +static int nv_i2c_algo_smbus_xfer( + struct i2c_adapter *adapter, + u16 addr, + unsigned short flags, + char read_write, + u8 command, + int size, + union i2c_smbus_data *data +) +{ + nv_state_t *nv = (nv_state_t *)adapter->algo_data; + int rc; + NV_STATUS rmStatus = NV_OK; + nvidia_stack_t *sp = NULL; + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + return rc; + } + + rc = -EIO; + + switch (size) + { + case I2C_SMBUS_QUICK: + rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter, + (read_write == I2C_SMBUS_READ) ? + NV_I2C_CMD_SMBUS_QUICK_READ : + NV_I2C_CMD_SMBUS_QUICK_WRITE, + (NvU8)(addr & 0x7f), 0, 0, NULL); + break; + + case I2C_SMBUS_BYTE: + if (read_write == I2C_SMBUS_READ) + { + rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter, + NV_I2C_CMD_READ, + (NvU8)(addr & 0x7f), 0, 1, + (NvU8 *)&data->byte); + } + else + { + u8 data = command; + rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter, + NV_I2C_CMD_WRITE, + (NvU8)(addr & 0x7f), 0, 1, + (NvU8 *)&data); + } + break; + + case I2C_SMBUS_BYTE_DATA: + rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter, + (read_write == I2C_SMBUS_READ) ? + NV_I2C_CMD_SMBUS_READ : + NV_I2C_CMD_SMBUS_WRITE, + (NvU8)(addr & 0x7f), (NvU8)command, 1, + (NvU8 *)&data->byte); + break; + + case I2C_SMBUS_WORD_DATA: + if (read_write != I2C_SMBUS_READ) + { + u16 word = data->word; + data->block[1] = (word & 0xff); + data->block[2] = (word >> 8); + } + + rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter, + (read_write == I2C_SMBUS_READ) ? + NV_I2C_CMD_SMBUS_READ : + NV_I2C_CMD_SMBUS_WRITE, + (NvU8)(addr & 0x7f), (NvU8)command, 2, + (NvU8 *)&data->block[1]); + + if (read_write == I2C_SMBUS_READ) + { + data->word = ((NvU16)data->block[1]) | + ((NvU16)data->block[2] << 8); + } + break; + + case I2C_SMBUS_BLOCK_DATA: + rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter, + (read_write == I2C_SMBUS_READ) ? + NV_I2C_CMD_SMBUS_BLOCK_READ : + NV_I2C_CMD_SMBUS_BLOCK_WRITE, + (NvU8)(addr & 0x7f), (NvU8)command, + sizeof(data->block), + (NvU8 *)data->block); + break; + + case I2C_SMBUS_I2C_BLOCK_DATA: + rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter, + (read_write == I2C_SMBUS_READ) ? + NV_I2C_CMD_BLOCK_READ : + NV_I2C_CMD_BLOCK_WRITE, + (NvU8)(addr & 0x7f), (NvU8)command, + (NvU8)data->block[0], + (NvU8 *)&data->block[1]); + break; + + default: + rc = -EINVAL; + rmStatus = NV_ERR_INVALID_ARGUMENT; + } + + nv_kmem_cache_free_stack(sp); + + return (rmStatus != NV_OK) ? rc : 0; +} + +static u32 nv_i2c_algo_functionality(struct i2c_adapter *adapter) +{ + nv_state_t *nv = (nv_state_t *)adapter->algo_data; + u32 ret = I2C_FUNC_I2C; + nvidia_stack_t *sp = NULL; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return 0; + } + + if (rm_i2c_is_smbus_capable(sp, nv, adapter)) + { + ret |= (I2C_FUNC_SMBUS_QUICK | + I2C_FUNC_SMBUS_BYTE | + I2C_FUNC_SMBUS_BYTE_DATA | + I2C_FUNC_SMBUS_WORD_DATA | + I2C_FUNC_SMBUS_BLOCK_DATA | + I2C_FUNC_SMBUS_I2C_BLOCK); + } + + nv_kmem_cache_free_stack(sp); + + return ret; +} + +static struct i2c_algorithm nv_i2c_algo = { + .master_xfer = nv_i2c_algo_master_xfer, + .smbus_xfer = nv_i2c_algo_smbus_xfer, + .functionality = nv_i2c_algo_functionality, +}; + +struct i2c_adapter nv_i2c_adapter_prototype = { + .owner = THIS_MODULE, + .algo = &nv_i2c_algo, + .algo_data = NULL, +}; + +void* NV_API_CALL nv_i2c_add_adapter(nv_state_t *nv, NvU32 port) +{ + NV_STATUS rmStatus; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct i2c_adapter *pI2cAdapter = NULL; + int osstatus = 0; + + // get a i2c adapter + rmStatus = os_alloc_mem((void **)&pI2cAdapter,sizeof(struct i2c_adapter)); + + if (rmStatus != NV_OK) + return NULL; + + // fill in with default structure + os_mem_copy(pI2cAdapter, &nv_i2c_adapter_prototype, sizeof(struct i2c_adapter)); + + pI2cAdapter->dev.parent = nvl->dev; + + if (nvl->pci_dev != NULL) + { + snprintf(pI2cAdapter->name, sizeof(pI2cAdapter->name), + "NVIDIA i2c adapter %u at %x:%02x.%u\n", port, nv->pci_info.bus, + nv->pci_info.slot, PCI_FUNC(nvl->pci_dev->devfn)); + } + else + { + snprintf(pI2cAdapter->name, sizeof(pI2cAdapter->name), + "NVIDIA SOC i2c adapter %u\n", port); + } + + // add our data to the structure + pI2cAdapter->algo_data = (void *)nv; + + // attempt to register with the kernel + osstatus = i2c_add_adapter(pI2cAdapter); + + if (osstatus) + { + // free the memory and NULL the ptr + os_free_mem(pI2cAdapter); + + pI2cAdapter = NULL; + } + + return ((void *)pI2cAdapter); +} + +void NV_API_CALL nv_i2c_del_adapter(nv_state_t *nv, void *data) +{ + struct i2c_adapter *pI2cAdapter = (struct i2c_adapter *)data; + + if (pI2cAdapter) + { + // release with the OS + i2c_del_adapter(pI2cAdapter); + os_free_mem(pI2cAdapter); + } +} + +#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE +static struct i2c_client * nv_i2c_register_client( + nv_state_t *nv, + NvU32 linuxI2CSwPort, + NvU8 address) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct i2c_adapter *i2c_adapter; + struct i2c_client *client; + int c_index; + struct i2c_board_info i2c_dev_info = { + .type = "tegra_display", + .addr = address, + }; + + /* Get the adapter using i2c port */ + i2c_adapter = i2c_get_adapter(linuxI2CSwPort); + if (i2c_adapter == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Unable to get i2c adapter for port(%d)\n", + linuxI2CSwPort); + return NULL; + } + +#if defined(NV_I2C_NEW_CLIENT_DEVICE_PRESENT) + client = i2c_new_client_device(i2c_adapter, &i2c_dev_info); +#else + nv_printf(NV_DBG_ERRORS, "NVRM: nv_i2c_new_device not present\n"); + client = NULL; +#endif + if (client == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Unable to register client for address(0x%x)\n", + address); + i2c_put_adapter(i2c_adapter); + return NULL; + } + i2c_put_adapter(i2c_adapter); + + /* Save the Port and i2c client */ + nvl->i2c_clients[linuxI2CSwPort].port = linuxI2CSwPort; + for (c_index = 0; c_index < MAX_CLIENTS_PER_ADAPTER; c_index++) + { + if (nvl->i2c_clients[linuxI2CSwPort].pOsClient[c_index] == NULL) + { + nvl->i2c_clients[linuxI2CSwPort].pOsClient[c_index] = client; + break; + } + } + + return client; +} + +static struct i2c_client *nv_i2c_get_registered_client( + nv_state_t *nv, + NvU32 linuxI2CSwPort, + NvU8 address) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int c_index; + + for (c_index = 0; c_index < MAX_CLIENTS_PER_ADAPTER; c_index++) + { + struct i2c_client *client; + + client = (struct i2c_client *)nvl->i2c_clients[linuxI2CSwPort].pOsClient[c_index]; + if (client) + { + if (address == (NvU8)client->addr) + { + return client; + } + } + else + { + break; + } + } + + return NULL; +} + +NV_STATUS NV_API_CALL nv_i2c_transfer( + nv_state_t *nv, + NvU32 linuxI2CSwPort, + NvU8 address, + nv_i2c_msg_t *nv_msgs, + int num_msgs +) +{ + struct i2c_client *client; + struct i2c_msg *msgs; + int count; + int rc; + NV_STATUS status = NV_OK; + + // + // RM style client address is 8-bit addressing, but Linux use 7-bit + // addressing, so convert to 7-bit addressing format. + // + address = address >> 1; + + // + // Check if its valid port + // + if (!(linuxI2CSwPort >= 0 && linuxI2CSwPort < MAX_TEGRA_I2C_PORTS)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Invalid I2C port:%d\n", linuxI2CSwPort); + return NV_ERR_INVALID_ARGUMENT; + } + + for (count = 0; count < num_msgs; count++) { + // + // RM style client address is 8-bit addressing, but Linux use 7-bit + // addressing, so convert to 7-bit addressing format. + // + nv_msgs[count].addr = nv_msgs[count].addr >> 1; + + client = nv_i2c_get_registered_client(nv, linuxI2CSwPort, nv_msgs[count].addr); + if (client == NULL) + { + client = nv_i2c_register_client(nv, linuxI2CSwPort, nv_msgs[count].addr); + if (client == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: i2c client register failed for addr:0x%x\n", + nv_msgs[count].addr); + return NV_ERR_GENERIC; + } + } + } + + msgs = kzalloc((num_msgs * sizeof(*msgs)), GFP_KERNEL); + if (msgs == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: i2c message allocation failed\n"); + return NV_ERR_NO_MEMORY; + } + + for (count = 0; count < num_msgs; count++) { + msgs[count].addr = nv_msgs[count].addr; + msgs[count].flags = nv_msgs[count].flags; + msgs[count].len = nv_msgs[count].len; + msgs[count].buf = nv_msgs[count].buf; + } + + rc = i2c_transfer(client->adapter, msgs, num_msgs); + if (rc != num_msgs) + { + nv_printf(NV_DBG_ERRORS, "NVRM: i2c transfer failed for addr: 0x%x\n", + address); + status = NV_ERR_GENERIC; + } + + kfree(msgs); + + return status; +} + +void NV_API_CALL nv_i2c_unregister_clients(nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int p_index, c_index; + + for (p_index = 0; p_index < MAX_TEGRA_I2C_PORTS; p_index++) + { + for (c_index = 0; + c_index < MAX_CLIENTS_PER_ADAPTER; + c_index++) + { + struct i2c_client *client; + + client = (struct i2c_client *)nvl->i2c_clients[p_index].pOsClient[c_index]; + if (client) + { + i2c_unregister_device(client); + nvl->i2c_clients[p_index].pOsClient[c_index] = NULL; + } + } + } +} + +NV_STATUS NV_API_CALL nv_i2c_bus_status( + nv_state_t *nv, + NvU32 linuxI2CSwPort, + NvS32 *scl, + NvS32 *sda) +{ +#if NV_IS_EXPORT_SYMBOL_PRESENT_i2c_bus_status + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct i2c_adapter *i2c_adapter; + int ret; + + // + // Check if its valid port + // + if (!(linuxI2CSwPort >= 0 && linuxI2CSwPort < MAX_TEGRA_I2C_PORTS)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Invalid I2C port:%d\n", linuxI2CSwPort); + return NV_ERR_INVALID_ARGUMENT; + } + + /* Get the adapter using i2c port */ + i2c_adapter = i2c_get_adapter(linuxI2CSwPort); + if (i2c_adapter == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Unable to get i2c adapter for port(%d)\n", + linuxI2CSwPort); + return NULL; + } + + + ret = i2c_bus_status(i2c_adapter, scl, sda); + if (ret < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: i2c_bus_status failed:%d\n", ret); + return NV_ERR_GENERIC; + } + i2c_put_adapter(i2c_adapter); + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} +#endif // NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE +#endif // defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) + +#if !defined(CONFIG_I2C) && !defined(CONFIG_I2C_MODULE) + +void NV_API_CALL nv_i2c_del_adapter(nv_state_t *nv, void *data) +{ +} + +void* NV_API_CALL nv_i2c_add_adapter(nv_state_t *nv, NvU32 port) +{ + return NULL; +} +#endif + + +#if !NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE || \ + (NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE && \ + (!defined(CONFIG_I2C) && !defined(CONFIG_I2C_MODULE))) + +NV_STATUS NV_API_CALL nv_i2c_transfer( + nv_state_t *nv, + NvU32 linuxI2CSwPort, + NvU8 address, + nv_i2c_msg_t *nv_msgs, + int num_msgs +) +{ + return NV_OK; +} + +void NV_API_CALL nv_i2c_unregister_clients(nv_state_t *nv) +{ +} + +NV_STATUS NV_API_CALL nv_i2c_bus_status( + nv_state_t *nv, + NvU32 linuxI2CSwPort, + NvS32 *scl, + NvS32 *sda) +{ + return NV_ERR_GENERIC; +} + +#endif diff --git a/kernel-open/nvidia/nv-imp.c b/kernel-open/nvidia/nv-imp.c new file mode 100644 index 0000000..9b5212f --- /dev/null +++ b/kernel-open/nvidia/nv-imp.c @@ -0,0 +1,350 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +#if defined(NV_SOC_TEGRA_TEGRA_BPMP_H_PRESENT) || IS_ENABLED(CONFIG_TEGRA_BPMP) +#include +#endif + +#if IS_ENABLED(CONFIG_TEGRA_BPMP) +#include +#elif defined(NV_SOC_TEGRA_TEGRA_BPMP_H_PRESENT) +#include +#endif // IS_ENABLED(CONFIG_TEGRA_BPMP) + +#if defined NV_DT_BINDINGS_INTERCONNECT_TEGRA_ICC_ID_H_PRESENT +#include +#endif + +#ifdef NV_LINUX_PLATFORM_TEGRA_MC_UTILS_H_PRESENT +#include +#endif + +// +// IMP requires information from various BPMP and MC driver functions. The +// macro below checks that all of the required functions are present. +// +#define IMP_SUPPORT_FUNCTIONS_PRESENT \ + (defined(NV_SOC_TEGRA_TEGRA_BPMP_H_PRESENT) || \ + IS_ENABLED(CONFIG_TEGRA_BPMP)) && \ + defined(NV_LINUX_PLATFORM_TEGRA_MC_UTILS_H_PRESENT) + +// +// Also create a macro to check if all the required ICC symbols are present. +// DT endpoints are defined in dt-bindings/interconnect/tegra_icc_id.h. +// +#define ICC_SUPPORT_FUNCTIONS_PRESENT \ + defined(NV_DT_BINDINGS_INTERCONNECT_TEGRA_ICC_ID_H_PRESENT) + +/*! + * @brief Returns IMP-relevant data collected from other modules + * + * @param[out] tegra_imp_import_data Structure to receive the data + * + * @returns NV_OK if successful, + * NV_ERR_NOT_SUPPORTED if the functionality is not available. + */ +NV_STATUS NV_API_CALL +nv_imp_get_import_data +( + TEGRA_IMP_IMPORT_DATA *tegra_imp_import_data +) +{ +#if IMP_SUPPORT_FUNCTIONS_PRESENT + tegra_imp_import_data->num_dram_channels = get_dram_num_channels(); + nv_printf(NV_DBG_INFO, "NVRM: num_dram_channels = %u\n", + tegra_imp_import_data->num_dram_channels); + + return NV_OK; +#else // IMP_SUPPORT_FUNCTIONS_PRESENT + return NV_ERR_NOT_SUPPORTED; +#endif +} + +/*! + * @brief Tells BPMP whether or not RFL is valid + * + * Display HW generates an ok_to_switch signal which asserts when mempool + * occupancy is high enough to be able to turn off memory long enough to + * execute a dramclk frequency switch without underflowing display output. + * ok_to_switch drives the RFL ("request for latency") signal in the memory + * unit, and the switch sequencer waits for this signal to go active before + * starting a dramclk switch. However, if the signal is not valid (e.g., if + * display HW or SW has not been initialized yet), the switch sequencer ignores + * the signal. This API tells BPMP whether or not the signal is valid. + * + * @param[in] nv Per GPU Linux state + * @param[in] bEnable True if RFL will be valid; false if invalid + * + * @returns NV_OK if successful, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, or + * NV_ERR_GENERIC if some other kind of error occurred. + */ +NV_STATUS NV_API_CALL +nv_imp_enable_disable_rfl +( + nv_state_t *nv, + NvBool bEnable +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; +#if IMP_SUPPORT_FUNCTIONS_PRESENT +#if IS_ENABLED(CONFIG_TEGRA_BPMP) && NV_SUPPORTS_PLATFORM_DEVICE + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct tegra_bpmp *bpmp = tegra_bpmp_get(nvl->dev); + struct tegra_bpmp_message msg; + struct mrq_emc_disp_rfl_request emc_disp_rfl_request; + int rc; + + memset(&emc_disp_rfl_request, 0, sizeof(emc_disp_rfl_request)); + emc_disp_rfl_request.mode = bEnable ? EMC_DISP_RFL_MODE_ENABLED : + EMC_DISP_RFL_MODE_DISABLED; + msg.mrq = MRQ_EMC_DISP_RFL; + msg.tx.data = &emc_disp_rfl_request; + msg.tx.size = sizeof(emc_disp_rfl_request); + msg.rx.data = NULL; + msg.rx.size = 0; + + rc = tegra_bpmp_transfer(bpmp, &msg); + if (rc == 0) + { + nv_printf(NV_DBG_INFO, + "\"Wait for RFL\" is %s via MRQ_EMC_DISP_RFL\n", + bEnable ? "enabled" : "disabled"); + status = NV_OK; + } + else + { + nv_printf(NV_DBG_ERRORS, + "MRQ_EMC_DISP_RFL failed to %s \"Wait for RFL\" (error code = %d)\n", + bEnable ? "enable" : "disable", + rc); + status = NV_ERR_GENERIC; + } +#else + nv_printf(NV_DBG_ERRORS, "nv_imp_enable_disable_rfl stub called!\n"); +#endif +#endif + return status; +} + +/*! + * @brief Obtains a handle for the display data path + * + * If a handle is obtained successfully, it is not returned to the caller; it + * is saved for later use by subsequent nv_imp_icc_set_bw calls. + * nv_imp_icc_get must be called prior to calling nv_imp_icc_set_bw. + * + * @param[out] nv Per GPU Linux state + * + * @returns NV_OK if successful, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, or + * NV_ERR_GENERIC if some other error occurred. + */ +NV_STATUS NV_API_CALL +nv_imp_icc_get +( + nv_state_t *nv +) +{ +#if ICC_SUPPORT_FUNCTIONS_PRESENT && NV_SUPPORTS_PLATFORM_DEVICE + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NV_STATUS status = NV_OK; + +#if defined(NV_DEVM_ICC_GET_PRESENT) + // Needs to use devm_of_icc_get function as per the latest ICC driver + nvl->nv_imp_icc_path = + devm_of_icc_get(nvl->dev, "read-1"); + + if (nvl->nv_imp_icc_path == NULL) + { + nv_printf(NV_DBG_INFO, "NVRM: devm_of_icc_get failed\n"); + return NV_ERR_NOT_SUPPORTED; + } + else if (!IS_ERR(nvl->nv_imp_icc_path)) + { + nvl->is_upstream_icc_path = NV_TRUE; + return NV_OK; + } + // + // Till we modify all DTs to have interconnect node specified as per + // the latest ICC driver, fallback to older ICC mechanism. + // +#endif + + nvl->nv_imp_icc_path = NULL; + +#if defined(NV_ICC_GET_PRESENT) + struct device_node *np; + // Check if ICC is present in the device tree, and enabled. + np = of_find_node_by_path("/icc"); + if (np != NULL) + { + if (of_device_is_available(np)) + { + // Get the ICC data path. + nvl->nv_imp_icc_path = + icc_get(nvl->dev, TEGRA_ICC_DISPLAY, TEGRA_ICC_PRIMARY); + } + of_node_put(np); + } +#else + nv_printf(NV_DBG_ERRORS, "NVRM: icc_get() not present\n"); + return NV_ERR_NOT_SUPPORTED; +#endif + + if (nvl->nv_imp_icc_path == NULL) + { + nv_printf(NV_DBG_INFO, "NVRM: icc_get disabled\n"); + status = NV_ERR_NOT_SUPPORTED; + } + else if (IS_ERR(nvl->nv_imp_icc_path)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: invalid path = %ld\n", + PTR_ERR(nvl->nv_imp_icc_path)); + nvl->nv_imp_icc_path = NULL; + status = NV_ERR_GENERIC; + } + return status; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +/*! + * @brief Releases the handle obtained by nv_imp_icc_get + * + * @param[in] nv Per GPU Linux state + */ +void +nv_imp_icc_put +( + nv_state_t *nv +) +{ +#if ICC_SUPPORT_FUNCTIONS_PRESENT + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + +#if defined(NV_DEVM_ICC_GET_PRESENT) + // + // If devm_of_icc_get API is used for requesting the bandwidth, + // it does not require to call put explicitly. + // + if (nvl->is_upstream_icc_path) + { + goto done; + } +#endif + +#if defined(NV_ICC_PUT_PRESENT) && NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE + if (nvl->nv_imp_icc_path != NULL) + { + icc_put(nvl->nv_imp_icc_path); + } +#else + nv_printf(NV_DBG_ERRORS, "icc_put() not present\n"); +#endif + +done: + nvl->nv_imp_icc_path = NULL; +#endif +} + +/*! + * @brief Allocates a specified amount of ISO memory bandwidth for display + * + * floor_bw_kbps is the minimum required (i.e., floor) dramclk frequency + * multiplied by the width of the pipe over which the display data will travel. + * (It is understood that the bandwidth calculated by multiplying the clock + * frequency by the pipe width will not be realistically achievable, due to + * overhead in the memory subsystem. ICC will not actually use the bandwidth + * value, except to reverse the calculation to get the required dramclk + * frequency.) + * + * nv_imp_icc_get must be called prior to calling this function. + * + * @param[in] nv Per GPU Linux state + * @param[in] avg_bw_kbps Amount of ISO memory bandwidth requested + * @param[in] floor_bw_kbps Min required dramclk freq * pipe width + * + * @returns NV_OK if successful, + * NV_ERR_INSUFFICIENT_RESOURCES if one of the bandwidth values is too + * high, and bandwidth cannot be allocated, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, or + * NV_ERR_GENERIC if some other kind of error occurred. + */ +NV_STATUS NV_API_CALL +nv_imp_icc_set_bw +( + nv_state_t *nv, + NvU32 avg_bw_kbps, + NvU32 floor_bw_kbps +) +{ +#if ICC_SUPPORT_FUNCTIONS_PRESENT && NV_SUPPORTS_PLATFORM_DEVICE + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int rc; + NV_STATUS status = NV_OK; + + // + // avg_bw_kbps can be either ISO bw request or NISO bw request. + // Use floor_bw_kbps to make floor requests. + // +#if defined(NV_ICC_SET_BW_PRESENT) + // + // nv_imp_icc_path will be NULL on AV + L systems because ICC is disabled. + // In this case, skip the allocation call, and just return a success + // status. + // + if (nvl->nv_imp_icc_path == NULL) + { + return NV_OK; + } + rc = icc_set_bw(nvl->nv_imp_icc_path, avg_bw_kbps, floor_bw_kbps); +#else + nv_printf(NV_DBG_ERRORS, "icc_set_bw() not present\n"); + return NV_ERR_NOT_SUPPORTED; +#endif + + if (rc < 0) + { + // A negative return value indicates an error. + if (rc == -ENOMEM) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + } + else + { + status = NV_ERR_GENERIC; + } + } + return status; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + diff --git a/kernel-open/nvidia/nv-ipc-soc.c b/kernel-open/nvidia/nv-ipc-soc.c new file mode 100644 index 0000000..8a3beb5 --- /dev/null +++ b/kernel-open/nvidia/nv-ipc-soc.c @@ -0,0 +1,143 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +#include "dce_rm_client_ipc.h" + +#if defined(NV_LINUX_PLATFORM_TEGRA_DCE_DCE_CLIENT_IPC_H_PRESENT) +#include + +static const NvU32 dceClientRmIpcTypeMap[DCE_CLIENT_RM_IPC_TYPE_MAX] = { + [DCE_CLIENT_RM_IPC_TYPE_SYNC] = DCE_CLIENT_IPC_TYPE_CPU_RM, + [DCE_CLIENT_RM_IPC_TYPE_EVENT] = DCE_CLIENT_IPC_TYPE_RM_EVENT, +}; + +static NV_STATUS validate_dce_client_ipc_interface_type(NvU32 interfaceType) +{ + if (interfaceType >= DCE_CLIENT_RM_IPC_TYPE_MAX) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (dceClientRmIpcTypeMap[interfaceType] >= DCE_CLIENT_IPC_TYPE_MAX) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +NvU32 nv_tegra_get_rm_interface_type(NvU32 clientIpcType) +{ + NvU32 interfaceType = DCE_CLIENT_RM_IPC_TYPE_SYNC; + + for (interfaceType = DCE_CLIENT_RM_IPC_TYPE_SYNC; + interfaceType < DCE_CLIENT_RM_IPC_TYPE_MAX; + interfaceType++) + { + if (dceClientRmIpcTypeMap[interfaceType] == clientIpcType) + return interfaceType; + } + + return NV_ERR_INVALID_DATA; +} + +NV_STATUS nv_tegra_dce_register_ipc_client +( + NvU32 interfaceType, + void *usrCtx, + nvTegraDceClientIpcCallback callbackFn, + NvU32 *handle +) +{ + NvU32 dceClientInterfaceType = DCE_CLIENT_IPC_TYPE_MAX; + + if (validate_dce_client_ipc_interface_type(interfaceType) != NV_OK) + { + return NV_ERR_INVALID_ARGUMENT; + } + + dceClientInterfaceType = dceClientRmIpcTypeMap[interfaceType]; + + return tegra_dce_register_ipc_client(dceClientInterfaceType, callbackFn, usrCtx, handle); +} + +NV_STATUS nv_tegra_dce_client_ipc_send_recv +( + NvU32 clientId, + void *msg, + NvU32 msgLength +) +{ + struct dce_ipc_message dce_ipc_msg; + + memset(&dce_ipc_msg, 0, sizeof(struct dce_ipc_message)); + dce_ipc_msg.tx.data = msg; + dce_ipc_msg.rx.data = msg; + dce_ipc_msg.tx.size = msgLength; + dce_ipc_msg.rx.size = msgLength; + + return tegra_dce_client_ipc_send_recv(clientId, &dce_ipc_msg); +} + +NV_STATUS nv_tegra_dce_unregister_ipc_client(NvU32 clientId) +{ + return tegra_dce_unregister_ipc_client(clientId); +} +#else +NvU32 nv_tegra_get_rm_interface_type(NvU32 clientIpcType) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS nv_tegra_dce_register_ipc_client +( + NvU32 interfaceType, + void *usrCtx, + nvTegraDceClientIpcCallback callbackFn, + NvU32 *handle +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS nv_tegra_dce_client_ipc_send_recv +( + NvU32 clientId, + void *msg, + NvU32 msgLength +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS nv_tegra_dce_unregister_ipc_client(NvU32 clientId) +{ + return NV_ERR_NOT_SUPPORTED; +} +#endif + diff --git a/kernel-open/nvidia/nv-kthread-q.c b/kernel-open/nvidia/nv-kthread-q.c new file mode 100644 index 0000000..edc4cbb --- /dev/null +++ b/kernel-open/nvidia/nv-kthread-q.c @@ -0,0 +1,329 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-kthread-q.h" +#include "nv-list-helpers.h" + +#include +#include +#include +#include +#include +#include + +// Today's implementation is a little simpler and more limited than the +// API description allows for in nv-kthread-q.h. Details include: +// +// 1. Each nv_kthread_q instance is a first-in, first-out queue. +// +// 2. Each nv_kthread_q instance is serviced by exactly one kthread. +// +// You can create any number of queues, each of which gets its own +// named kernel thread (kthread). You can then insert arbitrary functions +// into the queue, and those functions will be run in the context of the +// queue's kthread. + +#ifndef WARN + // Only *really* old kernels (2.6.9) end up here. Just use a simple printk + // to implement this, because such kernels won't be supported much longer. + #define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + printk(KERN_ERR format); \ + unlikely(__ret_warn_on); \ + }) +#endif + +#define NVQ_WARN(fmt, ...) \ + do { \ + if (in_interrupt()) { \ + WARN(1, "nv_kthread_q: [in interrupt]: " fmt, \ + ##__VA_ARGS__); \ + } \ + else { \ + WARN(1, "nv_kthread_q: task: %s: " fmt, \ + current->comm, \ + ##__VA_ARGS__); \ + } \ + } while (0) + +static int _main_loop(void *args) +{ + nv_kthread_q_t *q = (nv_kthread_q_t *)args; + nv_kthread_q_item_t *q_item = NULL; + unsigned long flags; + + while (1) { + // Normally this thread is never interrupted. However, + // down_interruptible (instead of down) is called here, + // in order to avoid being classified as a potentially + // hung task, by the kernel watchdog. + while (down_interruptible(&q->q_sem)) + NVQ_WARN("Interrupted during semaphore wait\n"); + + if (atomic_read(&q->main_loop_should_exit)) + break; + + spin_lock_irqsave(&q->q_lock, flags); + + // The q_sem semaphore prevents us from getting here unless there is + // at least one item in the list, so an empty list indicates a bug. + if (unlikely(list_empty(&q->q_list_head))) { + spin_unlock_irqrestore(&q->q_lock, flags); + NVQ_WARN("_main_loop: Empty queue: q: 0x%p\n", q); + continue; + } + + // Consume one item from the queue + q_item = list_first_entry(&q->q_list_head, + nv_kthread_q_item_t, + q_list_node); + + list_del_init(&q_item->q_list_node); + + spin_unlock_irqrestore(&q->q_lock, flags); + + // Run the item + q_item->function_to_run(q_item->function_args); + + // Make debugging a little simpler by clearing this between runs: + q_item = NULL; + } + + while (!kthread_should_stop()) + schedule(); + + return 0; +} + +void nv_kthread_q_stop(nv_kthread_q_t *q) +{ + // check if queue has been properly initialized + if (unlikely(!q->q_kthread)) + return; + + nv_kthread_q_flush(q); + + // If this assertion fires, then a caller likely either broke the API rules, + // by adding items after calling nv_kthread_q_stop, or possibly messed up + // with inadequate flushing of self-rescheduling q_items. + if (unlikely(!list_empty(&q->q_list_head))) + NVQ_WARN("list not empty after flushing\n"); + + if (likely(!atomic_read(&q->main_loop_should_exit))) { + + atomic_set(&q->main_loop_should_exit, 1); + + // Wake up the kthread so that it can see that it needs to stop: + up(&q->q_sem); + + kthread_stop(q->q_kthread); + q->q_kthread = NULL; + } +} + +// When CONFIG_VMAP_STACK is defined, the kernel thread stack allocator used by +// kthread_create_on_node relies on a 2 entry, per-core cache to minimize +// vmalloc invocations. The cache is NUMA-unaware, so when there is a hit, the +// stack location ends up being a function of the core assigned to the current +// thread, instead of being a function of the specified NUMA node. The cache was +// added to the kernel in commit ac496bf48d97f2503eaa353996a4dd5e4383eaf0 +// ("fork: Optimize task creation by caching two thread stacks per CPU if +// CONFIG_VMAP_STACK=y") +// +// To work around the problematic cache, we create up to three kernel threads +// -If the first thread's stack is resident on the preferred node, return this +// thread. +// -Otherwise, create a second thread. If its stack is resident on the +// preferred node, stop the first thread and return this one. +// -Otherwise, create a third thread. The stack allocator does not find a +// cached stack, and so falls back to vmalloc, which takes the NUMA hint into +// consideration. The first two threads are then stopped. +// +// When CONFIG_VMAP_STACK is not defined, the first kernel thread is returned. +// +// This function is never invoked when there is no NUMA preference (preferred +// node is NUMA_NO_NODE). +static struct task_struct *thread_create_on_node(int (*threadfn)(void *data), + nv_kthread_q_t *q, + int preferred_node, + const char *q_name) +{ + + unsigned i, j; + static const unsigned attempts = 3; + struct task_struct *thread[3]; + + for (i = 0;; i++) { + struct page *stack; + + thread[i] = kthread_create_on_node(threadfn, q, preferred_node, q_name); + + if (unlikely(IS_ERR(thread[i]))) { + + // Instead of failing, pick the previous thread, even if its + // stack is not allocated on the preferred node. + if (i > 0) + i--; + + break; + } + + // vmalloc is not used to allocate the stack, so simply return the + // thread, even if its stack may not be allocated on the preferred node + if (!is_vmalloc_addr(thread[i]->stack)) + break; + + // Ran out of attempts - return thread even if its stack may not be + // allocated on the preferred node + if (i == (attempts - 1)) + break; + + // Get the NUMA node where the first page of the stack is resident. If + // it is the preferred node, select this thread. + stack = vmalloc_to_page(thread[i]->stack); + if (page_to_nid(stack) == preferred_node) + break; + } + + for (j = i; j > 0; j--) + kthread_stop(thread[j - 1]); + + return thread[i]; +} + +int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferred_node) +{ + memset(q, 0, sizeof(*q)); + + INIT_LIST_HEAD(&q->q_list_head); + spin_lock_init(&q->q_lock); + sema_init(&q->q_sem, 0); + + if (preferred_node == NV_KTHREAD_NO_NODE) { + q->q_kthread = kthread_create(_main_loop, q, q_name); + } + else { + q->q_kthread = thread_create_on_node(_main_loop, q, preferred_node, q_name); + } + + if (IS_ERR(q->q_kthread)) { + int err = PTR_ERR(q->q_kthread); + + // Clear q_kthread before returning so that nv_kthread_q_stop() can be + // safely called on it making error handling easier. + q->q_kthread = NULL; + + return err; + } + + wake_up_process(q->q_kthread); + + return 0; +} + +int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname) +{ + return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE); +} + +// Returns true (non-zero) if the item was actually scheduled, and false if the +// item was already pending in a queue. +static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item) +{ + unsigned long flags; + int ret = 1; + + spin_lock_irqsave(&q->q_lock, flags); + + if (likely(list_empty(&q_item->q_list_node))) + list_add_tail(&q_item->q_list_node, &q->q_list_head); + else + ret = 0; + + spin_unlock_irqrestore(&q->q_lock, flags); + + if (likely(ret)) + up(&q->q_sem); + + return ret; +} + +void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item, + nv_q_func_t function_to_run, + void *function_args) +{ + INIT_LIST_HEAD(&q_item->q_list_node); + q_item->function_to_run = function_to_run; + q_item->function_args = function_args; +} + +// Returns true (non-zero) if the q_item got scheduled, false otherwise. +int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q, + nv_kthread_q_item_t *q_item) +{ + if (unlikely(atomic_read(&q->main_loop_should_exit))) { + NVQ_WARN("Not allowed: nv_kthread_q_schedule_q_item was " + "called with a non-alive q: 0x%p\n", q); + return 0; + } + + return _raw_q_schedule(q, q_item); +} + +static void _q_flush_function(void *args) +{ + struct completion *completion = (struct completion *)args; + complete(completion); +} + + +static void _raw_q_flush(nv_kthread_q_t *q) +{ + nv_kthread_q_item_t q_item; + DECLARE_COMPLETION_ONSTACK(completion); + + nv_kthread_q_item_init(&q_item, _q_flush_function, &completion); + + _raw_q_schedule(q, &q_item); + + // Wait for the flush item to run. Once it has run, then all of the + // previously queued items in front of it will have run, so that means + // the flush is complete. + wait_for_completion(&completion); +} + +void nv_kthread_q_flush(nv_kthread_q_t *q) +{ + if (unlikely(atomic_read(&q->main_loop_should_exit))) { + NVQ_WARN("Not allowed: nv_kthread_q_flush was called after " + "nv_kthread_q_stop. q: 0x%p\n", q); + return; + } + + // This 2x flush is not a typing mistake. The queue really does have to be + // flushed twice, in order to take care of the case of a q_item that + // reschedules itself. + _raw_q_flush(q); + _raw_q_flush(q); +} diff --git a/kernel-open/nvidia/nv-memdbg.c b/kernel-open/nvidia/nv-memdbg.c new file mode 100644 index 0000000..62d1025 --- /dev/null +++ b/kernel-open/nvidia/nv-memdbg.c @@ -0,0 +1,259 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-memdbg.h" +#include "nv-linux.h" + +/* track who's allocating memory and print out a list of leaked allocations at + * teardown. + */ + +#define NV_MEM_LOGGER_STACK_TRACE 0 + +#if defined(NV_STACK_TRACE_PRESENT) && defined(NV_MEM_LOGGER) && defined(DEBUG) +#define NV_MEM_LOGGER_STACK_TRACE 1 +#endif + +typedef struct { + struct rb_node rb_node; + void *addr; + NvU64 size; + NvU32 line; + const char *file; +#if NV_MEM_LOGGER_STACK_TRACE == 1 + unsigned long stack_trace[32]; +#endif +} nv_memdbg_node_t; + +struct +{ + struct rb_root rb_root; + NvU64 untracked_bytes; + NvU64 num_untracked_allocs; + nv_spinlock_t lock; +} g_nv_memdbg; + +void nv_memdbg_init(void) +{ + NV_SPIN_LOCK_INIT(&g_nv_memdbg.lock); + g_nv_memdbg.rb_root = RB_ROOT; +} + +static nv_memdbg_node_t *nv_memdbg_node_entry(struct rb_node *rb_node) +{ + return rb_entry(rb_node, nv_memdbg_node_t, rb_node); +} + +static void nv_memdbg_insert_node(nv_memdbg_node_t *new) +{ + nv_memdbg_node_t *node; + struct rb_node **rb_node = &g_nv_memdbg.rb_root.rb_node; + struct rb_node *rb_parent = NULL; + + while (*rb_node) + { + node = nv_memdbg_node_entry(*rb_node); + + WARN_ON(new->addr == node->addr); + + rb_parent = *rb_node; + + if (new->addr < node->addr) + rb_node = &(*rb_node)->rb_left; + else + rb_node = &(*rb_node)->rb_right; + } + + rb_link_node(&new->rb_node, rb_parent, rb_node); + rb_insert_color(&new->rb_node, &g_nv_memdbg.rb_root); +} + +static nv_memdbg_node_t *nv_memdbg_remove_node(void *addr) +{ + nv_memdbg_node_t *node = NULL; + struct rb_node *rb_node = g_nv_memdbg.rb_root.rb_node; + + while (rb_node) + { + node = nv_memdbg_node_entry(rb_node); + if (addr == node->addr) + break; + else if (addr < node->addr) + rb_node = rb_node->rb_left; + else + rb_node = rb_node->rb_right; + } + + WARN_ON(!node || node->addr != addr); + + rb_erase(&node->rb_node, &g_nv_memdbg.rb_root); + return node; +} + +void nv_memdbg_add(void *addr, NvU64 size, const char *file, int line) +{ + nv_memdbg_node_t *node; + unsigned long flags; + + if (addr == NULL) + { + return; + } + + /* If node allocation fails, we can still update the untracked counters */ + node = kmalloc(sizeof(*node), + NV_MAY_SLEEP() ? NV_GFP_KERNEL : NV_GFP_ATOMIC); + if (node) + { + node->addr = addr; + node->size = size; + node->file = file; + node->line = line; + +#if NV_MEM_LOGGER_STACK_TRACE == 1 + memset(node->stack_trace, '\0', sizeof(node->stack_trace)); + + stack_trace_save(node->stack_trace, NV_ARRAY_ELEMENTS(node->stack_trace), 0); +#endif + } + + NV_SPIN_LOCK_IRQSAVE(&g_nv_memdbg.lock, flags); + + if (node) + { + nv_memdbg_insert_node(node); + } + else + { + ++g_nv_memdbg.num_untracked_allocs; + g_nv_memdbg.untracked_bytes += size; + } + + NV_SPIN_UNLOCK_IRQRESTORE(&g_nv_memdbg.lock, flags); +} + +void nv_memdbg_remove(void *addr, NvU64 size, const char *file, int line) +{ + nv_memdbg_node_t *node; + unsigned long flags; + + if (addr == NULL) + { + return; + } + + NV_SPIN_LOCK_IRQSAVE(&g_nv_memdbg.lock, flags); + + node = nv_memdbg_remove_node(addr); + if (!node) + { + WARN_ON(g_nv_memdbg.num_untracked_allocs == 0); + WARN_ON(g_nv_memdbg.untracked_bytes < size); + --g_nv_memdbg.num_untracked_allocs; + g_nv_memdbg.untracked_bytes -= size; + } + + NV_SPIN_UNLOCK_IRQRESTORE(&g_nv_memdbg.lock, flags); + + if (node) + { + if ((size != 0) && (node->size != size)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: size mismatch on free: %llu != %llu\n", + size, node->size); + if (node->file) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: allocation: 0x%p @ %s:%d\n", + node->addr, node->file, node->line); + } + else + { + nv_printf(NV_DBG_ERRORS, + "NVRM: allocation: 0x%p\n", + node->addr); + } + os_dbg_breakpoint(); + } + + kfree(node); + } +} + +void nv_memdbg_exit(void) +{ + nv_memdbg_node_t *node; + NvU64 leaked_bytes = 0, num_leaked_allocs = 0; + + if (!RB_EMPTY_ROOT(&g_nv_memdbg.rb_root)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: list of leaked memory allocations:\n"); + } + + while (!RB_EMPTY_ROOT(&g_nv_memdbg.rb_root)) + { + node = nv_memdbg_node_entry(rb_first(&g_nv_memdbg.rb_root)); + + leaked_bytes += node->size; + ++num_leaked_allocs; + + if (node->file) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %llu bytes, 0x%p @ %s:%d\n", + node->size, node->addr, node->file, node->line); + } + else + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %llu bytes, 0x%p\n", + node->size, node->addr); + } + +#if NV_MEM_LOGGER_STACK_TRACE == 1 + stack_trace_print(node->stack_trace, NV_ARRAY_ELEMENTS(node->stack_trace), 1); +#endif + + rb_erase(&node->rb_node, &g_nv_memdbg.rb_root); + kfree(node); + } + + /* If we failed to allocate a node at some point, we may have leaked memory + * even if the tree is empty */ + if (num_leaked_allocs > 0 || g_nv_memdbg.num_untracked_allocs > 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: total leaked memory: %llu bytes in %llu allocations\n", + leaked_bytes + g_nv_memdbg.untracked_bytes, + num_leaked_allocs + g_nv_memdbg.num_untracked_allocs); + + if (g_nv_memdbg.num_untracked_allocs > 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %llu bytes in %llu allocations untracked\n", + g_nv_memdbg.untracked_bytes, g_nv_memdbg.num_untracked_allocs); + } + } +} diff --git a/kernel-open/nvidia/nv-mmap.c b/kernel-open/nvidia/nv-mmap.c new file mode 100644 index 0000000..f911e36 --- /dev/null +++ b/kernel-open/nvidia/nv-mmap.c @@ -0,0 +1,860 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" +#include "nv_speculation_barrier.h" + +/* + * The 'struct vm_operations' open() callback is called by the Linux + * kernel when the parent VMA is split or copied, close() when the + * current VMA is about to be deleted. + * + * We implement these callbacks to keep track of the number of user + * mappings of system memory allocations. This was motivated by a + * subtle interaction problem between the driver and the kernel with + * respect to the bookkeeping of pages marked reserved and later + * mapped with mmap(). + * + * Traditionally, the Linux kernel ignored reserved pages, such that + * when they were mapped via mmap(), the integrity of their usage + * counts depended on the reserved bit being set for as long as user + * mappings existed. + * + * Since we mark system memory pages allocated for DMA reserved and + * typically map them with mmap(), we need to ensure they remain + * reserved until the last mapping has been torn down. This worked + * correctly in most cases, but in a few, the RM API called into the + * RM to free memory before calling munmap() to unmap it. + * + * In the past, we allowed nv_free_pages() to remove the 'at' from + * the parent device's allocation list in this case, but didn't + * release the underlying pages until the last user mapping had been + * destroyed: + * + * In nvidia_vma_release(), we freed any resources associated with + * the allocation (IOMMU mappings, etc.) and cleared the + * underlying pages' reserved bits, but didn't free them. The kernel + * was expected to do this. + * + * This worked in practise, but made dangerous assumptions about the + * kernel's behavior and could fail in some cases. We now handle + * this case differently (see below). + */ +static void +nvidia_vma_open(struct vm_area_struct *vma) +{ + nv_alloc_t *at = NV_VMA_PRIVATE(vma); + + NV_PRINT_VMA(NV_DBG_MEMINFO, vma); + + if (at != NULL) + { + NV_ATOMIC_INC(at->usage_count); + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + } +} + +/* + * (see above for additional information) + * + * If the 'at' usage count drops to zero with the updated logic, the + * the allocation is recorded in the free list of the private + * data associated with the file pointer; nvidia_close() uses this + * list to perform deferred free operations when the parent file + * descriptor is closed. This will typically happen when the process + * exits. + * + * Since this is technically a workaround to handle possible fallout + * from misbehaving clients, we additionally print a warning. + */ +static void +nvidia_vma_release(struct vm_area_struct *vma) +{ + nv_alloc_t *at = NV_VMA_PRIVATE(vma); + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(NV_VMA_FILE(vma)); + static int count = 0; + + NV_PRINT_VMA(NV_DBG_MEMINFO, vma); + + if (at != NULL && nv_alloc_release(nvlfp, at)) + { + if ((at->pid == os_get_current_process()) && + (count++ < NV_MAX_RECURRING_WARNING_MESSAGES)) + { + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: late unmap, comm: %s, 0x%p\n", + __FUNCTION__, current->comm, at); + } + } +} + +static int +nvidia_vma_access( + struct vm_area_struct *vma, + unsigned long addr, + void *buffer, + int length, + int write +) +{ + nv_alloc_t *at = NULL; + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(NV_VMA_FILE(vma)); + nv_state_t *nv = NV_STATE_PTR(nvlfp->nvptr); + NvU32 pageIndex, pageOffset; + void *kernel_mapping; + const nv_alloc_mapping_context_t *mmap_context = &nvlfp->mmap_context; + NvU64 offsInVma = addr - vma->vm_start; + + pageIndex = (offsInVma >> PAGE_SHIFT); + pageOffset = (offsInVma & ~PAGE_MASK); + + if (length < 0) + { + return -EINVAL; + } + + if (!mmap_context->valid) + { + nv_printf(NV_DBG_ERRORS, "NVRM: VM: invalid mmap context\n"); + return -EINVAL; + } + + if (write && !(mmap_context->prot & NV_PROTECT_WRITEABLE)) + { + return -EACCES; + } + + if (nv->flags & NV_FLAG_CONTROL) + { + at = NV_VMA_PRIVATE(vma); + + /* + * at can be NULL for peer IO mem. + */ + if (!at) + return -EINVAL; + + if (pageIndex >= at->num_pages) + return -EINVAL; + + pageIndex = nv_array_index_no_speculate(pageIndex, at->num_pages); + kernel_mapping = (void *)(at->page_table[pageIndex].virt_addr + pageOffset); + } + else + { + NvU64 idx = 0; + NvU64 curOffs = 0; + for(; idx < mmap_context->memArea.numRanges; idx++) + { + NvU64 nextOffs = mmap_context->memArea.pRanges[idx].size + curOffs; + if (curOffs <= offsInVma && nextOffs > offsInVma) + { + NvU64 realAddr = offsInVma - curOffs + mmap_context->memArea.pRanges[idx].start; + addr = realAddr & PAGE_MASK; + goto found; + } + curOffs = nextOffs; + } + return -EINVAL; +found: + kernel_mapping = os_map_kernel_space(addr, PAGE_SIZE, NV_MEMORY_UNCACHED); + if (kernel_mapping == NULL) + return -ENOMEM; + + kernel_mapping = ((char *)kernel_mapping + pageOffset); + } + + length = NV_MIN(length, (int)(PAGE_SIZE - pageOffset)); + + if (write) + memcpy(kernel_mapping, buffer, length); + else + memcpy(buffer, kernel_mapping, length); + + if (at == NULL) + { + kernel_mapping = ((char *)kernel_mapping - pageOffset); + os_unmap_kernel_space(kernel_mapping, PAGE_SIZE); + } + + return length; +} + +static vm_fault_t nvidia_fault( +#if !defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG) + struct vm_area_struct *vma, +#endif + struct vm_fault *vmf +) +{ +#if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG) + struct vm_area_struct *vma = vmf->vma; +#endif + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(NV_VMA_FILE(vma)); + nv_linux_state_t *nvl = nvlfp->nvptr; + nv_state_t *nv = NV_STATE_PTR(nvl); + vm_fault_t ret = VM_FAULT_NOPAGE; + + if (vma->vm_pgoff != 0) + { + return VM_FAULT_SIGBUS; + } + + // Mapping revocation is only supported for GPU mappings. + if (NV_IS_CTL_DEVICE(nv)) + { + return VM_FAULT_SIGBUS; + } + + + // Wake up GPU and reinstate mappings only if we are not in S3/S4 entry + if (!down_read_trylock(&nv_system_pm_lock)) + { + return VM_FAULT_NOPAGE; + } + + down(&nvl->mmap_lock); + + // Wake up the GPU if it is not currently safe to mmap. + if (!nvl->safe_to_mmap) + { + NV_STATUS status; + + if (!nvl->gpu_wakeup_callback_needed) + { + // GPU wakeup callback already scheduled. + up(&nvl->mmap_lock); + up_read(&nv_system_pm_lock); + return VM_FAULT_NOPAGE; + } + + /* + * GPU wakeup cannot be completed directly in the fault handler due to the + * inability to take the GPU lock while mmap_lock is held. + */ + status = rm_schedule_gpu_wakeup(nvl->sp[NV_DEV_STACK_GPU_WAKEUP], nv); + if (status != NV_OK) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: VM: rm_schedule_gpu_wakeup failed: %x\n", status); + up(&nvl->mmap_lock); + up_read(&nv_system_pm_lock); + return VM_FAULT_SIGBUS; + } + // Ensure that we do not schedule duplicate GPU wakeup callbacks. + nvl->gpu_wakeup_callback_needed = NV_FALSE; + + up(&nvl->mmap_lock); + up_read(&nv_system_pm_lock); + return VM_FAULT_NOPAGE; + } + { + NvU64 idx; + NvU64 curOffs = 0; + NvBool bRevoked = NV_TRUE; + nv_alloc_mapping_context_t *mmap_context = &nvlfp->mmap_context; + for(idx = 0; idx < mmap_context->memArea.numRanges; idx++) + { + NvU64 nextOffs = curOffs + mmap_context->memArea.pRanges[idx].size; + NvU64 pfn = mmap_context->memArea.pRanges[idx].start >> PAGE_SHIFT; + NvU64 numPages = mmap_context->memArea.pRanges[idx].size >> PAGE_SHIFT; + while (numPages != 0) + { + ret = nv_insert_pfn(vma, curOffs + vma->vm_start, pfn); + if (ret != VM_FAULT_NOPAGE) + { + goto err; + } + bRevoked = NV_FALSE; + curOffs += PAGE_SIZE; + pfn++; + numPages--; + } + curOffs = nextOffs; + } +err: + nvl->all_mappings_revoked &= bRevoked; + } + + up(&nvl->mmap_lock); + up_read(&nv_system_pm_lock); + + return ret; +} + +static struct vm_operations_struct nv_vm_ops = { + .open = nvidia_vma_open, + .close = nvidia_vma_release, + .fault = nvidia_fault, + .access = nvidia_vma_access, +}; + +int nv_encode_caching( + pgprot_t *prot, + NvU32 cache_type, + nv_memory_type_t memory_type +) +{ + pgprot_t tmp; + + if (prot == NULL) + { + tmp = __pgprot(0); + prot = &tmp; + } + + switch (cache_type) + { + case NV_MEMORY_UNCACHED_WEAK: +#if defined(NV_PGPROT_UNCACHED_WEAK) + *prot = NV_PGPROT_UNCACHED_WEAK(*prot); + break; +#endif + case NV_MEMORY_UNCACHED: + *prot = (memory_type == NV_MEMORY_TYPE_SYSTEM) ? + NV_PGPROT_UNCACHED(*prot) : + NV_PGPROT_UNCACHED_DEVICE(*prot); + break; +#if defined(NV_PGPROT_WRITE_COMBINED) && \ + defined(NV_PGPROT_WRITE_COMBINED_DEVICE) + case NV_MEMORY_DEFAULT: + case NV_MEMORY_WRITECOMBINED: + if (NV_ALLOW_WRITE_COMBINING(memory_type)) + { + *prot = (memory_type == NV_MEMORY_TYPE_FRAMEBUFFER) ? + NV_PGPROT_WRITE_COMBINED_DEVICE(*prot) : + NV_PGPROT_WRITE_COMBINED(*prot); + break; + } + + /* + * If WC support is unavailable, we need to return an error + * code to the caller, but need not print a warning. + * + * For frame buffer memory, callers are expected to use the + * UC- memory type if we report WC as unsupported, which + * translates to the effective memory type WC if a WC MTRR + * exists or else UC. + */ + return 1; +#endif + case NV_MEMORY_CACHED: + if (!NV_ALLOW_CACHING(memory_type)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: VM: memory type %d does not allow caching!\n", + memory_type); + return 1; + } + break; + + default: + nv_printf(NV_DBG_ERRORS, + "NVRM: VM: cache type %d not supported for memory type %d!\n", + cache_type, memory_type); + return 1; + } + return 0; +} + +static int nvidia_mmap_peer_io( + struct vm_area_struct *vma, + nv_alloc_t *at, + NvU64 page_index, + NvU64 pages +) +{ + int ret; + NvU64 start; + NvU64 size; + + BUG_ON(!at->flags.contig); + + start = at->page_table[page_index].phys_addr; + size = pages * PAGE_SIZE; + + ret = nv_io_remap_page_range(vma, start, size, vma->vm_start); + + return ret; +} + +static int nvidia_mmap_sysmem( + struct vm_area_struct *vma, + nv_alloc_t *at, + NvU64 page_index, + NvU64 pages +) +{ + NvU64 j; + int ret = 0; + unsigned long start = 0; + + NV_ATOMIC_INC(at->usage_count); + + start = vma->vm_start; + for (j = page_index; j < (page_index + pages); j++) + { + j = nv_array_index_no_speculate(j, (page_index + pages)); + + // + // nv_remap_page_range() map a contiguous physical address space + // into the user virtual space. + // Use PFN based mapping api to create the mapping for + // reserved carveout (OS invisible memory, not managed by OS) too. + // Basically nv_remap_page_range() works for all kind of memory regions. + // Imported buffer can be either from OS or Non OS managed regions (reserved carveout). + // nv_remap_page_range() works well for all type of import buffers. + // + if ( +#if defined(NV_VGPU_KVM_BUILD) + at->flags.guest || +#endif + at->flags.carveout || at->import_sgt) + { + ret = nv_remap_page_range(vma, start, at->page_table[j].phys_addr, + PAGE_SIZE, vma->vm_page_prot); + } + else + { + if (at->flags.unencrypted) + vma->vm_page_prot = nv_adjust_pgprot(vma->vm_page_prot); + + ret = vm_insert_page(vma, start, + NV_GET_PAGE_STRUCT(at->page_table[j].phys_addr)); + } + + if (ret) + { + NV_ATOMIC_DEC(at->usage_count); + nv_printf(NV_DBG_ERRORS, + "NVRM: Userspace mapping creation failed [%d]!\n", ret); + return -EAGAIN; + } + start += PAGE_SIZE; + } + + return ret; +} + +static int nvidia_mmap_numa( + struct vm_area_struct *vma, + const nv_alloc_mapping_context_t *mmap_context) +{ + NvU64 start, addr; + NvU64 pages; + NvU64 i; + + pages = NV_VMA_SIZE(vma) >> PAGE_SHIFT; + start = vma->vm_start; + + if (mmap_context->num_pages < pages) + { + return -EINVAL; + } + + // Needed for the linux kernel for mapping compound pages + nv_vm_flags_set(vma, VM_MIXEDMAP); + + for (i = 0, addr = mmap_context->page_array[0]; i < pages; + addr = mmap_context->page_array[++i], start += PAGE_SIZE) + { + if (vm_insert_page(vma, start, NV_GET_PAGE_STRUCT(addr)) != 0) + { + return -EAGAIN; + } + } + + return 0; +} + +int nvidia_mmap_helper( + nv_state_t *nv, + nv_linux_file_private_t *nvlfp, + nvidia_stack_t *sp, + struct vm_area_struct *vma, + void *vm_priv +) +{ + NvU32 prot = 0; + int ret; + const nv_alloc_mapping_context_t *mmap_context = &nvlfp->mmap_context; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NV_STATUS status; + + if (nvlfp == NULL) + return NV_ERR_INVALID_ARGUMENT; + + /* + * If mmap context is not valid on this file descriptor, this mapping wasn't + * previously validated with the RM so it must be rejected. + */ + if (!mmap_context->valid) + { + nv_printf(NV_DBG_ERRORS, "NVRM: VM: invalid mmap\n"); + return -EINVAL; + } + + if (vma->vm_pgoff != 0) + { + return -EINVAL; + } + + NV_PRINT_VMA(NV_DBG_MEMINFO, vma); + + status = nv_check_gpu_state(nv); + if (status != NV_OK) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, + "GPU is lost, skipping nvidia_mmap_helper\n"); + return status; + } + + NV_VMA_PRIVATE(vma) = vm_priv; + + prot = mmap_context->prot; + + /* + * Nvidia device node(nvidia#) maps device's BAR memory, + * Nvidia control node(nvidiactrl) maps system memory. + */ + if (!NV_IS_CTL_DEVICE(nv)) + { + NvU64 access_start = mmap_context->access_start; + NvU64 access_len = mmap_context->access_size; + + // Ensure size is correct. + if (NV_VMA_SIZE(vma) != memareaSize(mmap_context->memArea)) + { + return -ENXIO; + } + + if (IS_REG_OFFSET(nv, access_start, access_len)) + { + if (nv_encode_caching(&vma->vm_page_prot, NV_MEMORY_UNCACHED, + NV_MEMORY_TYPE_REGISTERS)) + { + return -ENXIO; + } + } + else if (IS_FB_OFFSET(nv, access_start, access_len)) + { + if (IS_UD_OFFSET(nv, access_start, access_len)) + { + if (nv_encode_caching(&vma->vm_page_prot, NV_MEMORY_UNCACHED, + NV_MEMORY_TYPE_FRAMEBUFFER)) + { + return -ENXIO; + } + } + else + { + if (nv_encode_caching(&vma->vm_page_prot, + rm_disable_iomap_wc() ? NV_MEMORY_UNCACHED : mmap_context->caching, + NV_MEMORY_TYPE_FRAMEBUFFER)) + { + if (nv_encode_caching(&vma->vm_page_prot, + NV_MEMORY_UNCACHED_WEAK, NV_MEMORY_TYPE_FRAMEBUFFER)) + { + return -ENXIO; + } + } + } + } + + down(&nvl->mmap_lock); + if (nvl->safe_to_mmap) + { + nvl->all_mappings_revoked = NV_FALSE; + + // + // This path is similar to the sysmem mapping code. + // TODO: Refactor is needed as part of bug#2001704. + // + if ((nv_get_numa_status(nvl) == NV_NUMA_STATUS_ONLINE) && + !IS_REG_OFFSET(nv, access_start, access_len) && + (mmap_context->num_pages != 0)) + { + ret = nvidia_mmap_numa(vma, mmap_context); + if (ret) + { + up(&nvl->mmap_lock); + return ret; + } + } + else + { + NvU64 idx = 0; + NvU64 curOffs = 0; + for(; idx < mmap_context->memArea.numRanges; idx++) + { + NvU64 nextOffs = curOffs + mmap_context->memArea.pRanges[idx].size; + if (nv_io_remap_page_range(vma, + mmap_context->memArea.pRanges[idx].start, + mmap_context->memArea.pRanges[idx].size, + vma->vm_start + curOffs) != 0) + { + up(&nvl->mmap_lock); + return -EAGAIN; + } + curOffs = nextOffs; + } + } + } + up(&nvl->mmap_lock); + + nv_vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND); + } + else + { + nv_alloc_t *at; + NvU64 page_index; + NvU64 pages; + NvU64 mmap_size; + + at = (nv_alloc_t *)mmap_context->alloc; + page_index = mmap_context->page_index; + mmap_size = NV_VMA_SIZE(vma); + pages = mmap_size >> PAGE_SHIFT; + + if ((page_index + pages) > at->num_pages) + { + return -ERANGE; + } + + /* + * Callers that pass in non-NULL VMA private data must never reach this + * code. They should be mapping on a non-control node. + */ + BUG_ON(NV_VMA_PRIVATE(vma)); + + if (at->flags.peer_io) + { + if (nv_encode_caching(&vma->vm_page_prot, + at->cache_type, + NV_MEMORY_TYPE_DEVICE_MMIO)) + { + return -ENXIO; + } + + /* + * There is no need to keep 'peer IO at' alive till vma_release like + * 'sysmem at' because there are no security concerns where a client + * could free RM allocated sysmem before unmapping it. Hence, vm_ops + * are NOP, and at->usage_count is never being used. + */ + NV_VMA_PRIVATE(vma) = NULL; + + ret = nvidia_mmap_peer_io(vma, at, page_index, pages); + + BUG_ON(NV_VMA_PRIVATE(vma)); + + if (ret) + { + return ret; + } + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + nv_vm_flags_set(vma, VM_IO); + nv_vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); + } + else + { + if (nv_encode_caching(&vma->vm_page_prot, + at->cache_type, + NV_MEMORY_TYPE_SYSTEM)) + { + return -ENXIO; + } + + NV_VMA_PRIVATE(vma) = at; + + ret = nvidia_mmap_sysmem(vma, at, page_index, pages); + + if (ret) + { + return ret; + } + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + // + // VM_MIXEDMAP will be set by vm_insert_page() in nvidia_mmap_sysmem(). + // VM_SHARED is added to avoid any undesired copy-on-write effects. + // + nv_vm_flags_set(vma, VM_SHARED); + nv_vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); + } + } + + if ((prot & NV_PROTECT_WRITEABLE) == 0) + { + vma->vm_page_prot = NV_PGPROT_READ_ONLY(vma->vm_page_prot); + nv_vm_flags_clear(vma, VM_WRITE); + nv_vm_flags_clear(vma, VM_MAYWRITE); + } + + vma->vm_ops = &nv_vm_ops; + + return 0; +} + +int nvidia_mmap( + struct file *file, + struct vm_area_struct *vma +) +{ + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file); + nv_linux_state_t *nvl; + nv_state_t *nv; + nvidia_stack_t *sp = NULL; + int status; + + // + // Do not allow mmap operation if this is a fd into + // which rm objects have been exported. + // + if (nvlfp->nvfp.handles != NULL) + { + return -EINVAL; + } + + if (!nv_is_control_device(NV_FILE_INODE(file))) + { + status = nv_wait_open_complete_interruptible(nvlfp); + if (status != 0) + return status; + } + + nvl = nvlfp->nvptr; + if (nvl == NULL) + return -EIO; + + nv = NV_STATE_PTR(nvl); + + status = nv_kmem_cache_alloc_stack(&sp); + if (status != 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Unable to allocate altstack for mmap\n"); + return status; + } + + status = nvidia_mmap_helper(nv, nvlfp, sp, vma, NULL); + + nv_kmem_cache_free_stack(sp); + + return status; +} + +void +nv_revoke_gpu_mappings_locked( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + nv_linux_file_private_t *nvlfp; + + /* Revoke all mappings for every open file */ + list_for_each_entry (nvlfp, &nvl->open_files, entry) + { + unmap_mapping_range(&nvlfp->mapping, 0, ~0, 1); + } + + nvl->all_mappings_revoked = NV_TRUE; +} + +NV_STATUS NV_API_CALL nv_revoke_gpu_mappings( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + // Mapping revocation is only supported for GPU mappings. + if (NV_IS_CTL_DEVICE(nv)) + { + return NV_ERR_NOT_SUPPORTED; + } + + down(&nvl->mmap_lock); + + nv_revoke_gpu_mappings_locked(nv); + + up(&nvl->mmap_lock); + + return NV_OK; +} + +void NV_API_CALL nv_acquire_mmap_lock( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + down(&nvl->mmap_lock); +} + +void NV_API_CALL nv_release_mmap_lock( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + up(&nvl->mmap_lock); +} + +NvBool NV_API_CALL nv_get_all_mappings_revoked_locked( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + // Caller must hold nvl->mmap_lock for all decisions based on this + return nvl->all_mappings_revoked; +} + +void NV_API_CALL nv_set_safe_to_mmap_locked( + nv_state_t *nv, + NvBool safe_to_mmap +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + // Caller must hold nvl->mmap_lock + + /* + * If nvl->safe_to_mmap is transitioning from TRUE to FALSE, we expect to + * need to schedule a GPU wakeup callback when we fault. + * + * nvl->gpu_wakeup_callback_needed will be set to FALSE in nvidia_fault() + * after scheduling the GPU wakeup callback, preventing us from scheduling + * duplicates. + */ + if (!safe_to_mmap && nvl->safe_to_mmap) + { + nvl->gpu_wakeup_callback_needed = NV_TRUE; + } + + nvl->safe_to_mmap = safe_to_mmap; +} diff --git a/kernel-open/nvidia/nv-modeset-interface.c b/kernel-open/nvidia/nv-modeset-interface.c new file mode 100644 index 0000000..39890ee --- /dev/null +++ b/kernel-open/nvidia/nv-modeset-interface.c @@ -0,0 +1,146 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-modeset-interface.h" + +#include "os-interface.h" +#include "nv-linux.h" +#include "nvstatus.h" +#include "nv.h" + +static const nvidia_modeset_callbacks_t *nv_modeset_callbacks; + +static int nvidia_modeset_rm_ops_alloc_stack(nvidia_stack_t **sp) +{ + return nv_kmem_cache_alloc_stack(sp); +} + +static void nvidia_modeset_rm_ops_free_stack(nvidia_stack_t *sp) +{ + if (sp != NULL) + { + nv_kmem_cache_free_stack(sp); + } +} + +static int nvidia_modeset_set_callbacks(const nvidia_modeset_callbacks_t *cb) +{ + if ((nv_modeset_callbacks != NULL && cb != NULL) || + (nv_modeset_callbacks == NULL && cb == NULL)) + { + return -EINVAL; + } + + nv_modeset_callbacks = cb; + return 0; +} + +void nvidia_modeset_suspend(NvU32 gpuId) +{ + if (nv_modeset_callbacks) + { + nv_modeset_callbacks->suspend(gpuId); + } +} + +void nvidia_modeset_resume(NvU32 gpuId) +{ + if (nv_modeset_callbacks) + { + nv_modeset_callbacks->resume(gpuId); + } +} + +static NvU32 nvidia_modeset_enumerate_gpus(nv_gpu_info_t *gpu_info) +{ + nv_linux_state_t *nvl; + unsigned int count; + + LOCK_NV_LINUX_DEVICES(); + + count = 0; + + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + { + nv_state_t *nv = NV_STATE_PTR(nvl); + + /* + * The gpu_info[] array has NV_MAX_GPUS elements. Fail if there + * are more GPUs than that. + */ + if (count >= NV_MAX_GPUS) { + nv_printf(NV_DBG_WARNINGS, "NVRM: More than %d GPUs found.", + NV_MAX_GPUS); + count = 0; + break; + } + + gpu_info[count].gpu_id = nv->gpu_id; + + gpu_info[count].pci_info.domain = nv->pci_info.domain; + gpu_info[count].pci_info.bus = nv->pci_info.bus; + gpu_info[count].pci_info.slot = nv->pci_info.slot; + gpu_info[count].pci_info.function = nv->pci_info.function; + + gpu_info[count].os_device_ptr = nvl->dev; + + count++; + } + + UNLOCK_NV_LINUX_DEVICES(); + + return count; +} + +NV_STATUS nvidia_get_rm_ops(nvidia_modeset_rm_ops_t *rm_ops) +{ + const nvidia_modeset_rm_ops_t local_rm_ops = { + .version_string = NV_VERSION_STRING, + .system_info = { + .allow_write_combining = NV_FALSE, + }, + .alloc_stack = nvidia_modeset_rm_ops_alloc_stack, + .free_stack = nvidia_modeset_rm_ops_free_stack, + .enumerate_gpus = nvidia_modeset_enumerate_gpus, + .open_gpu = nvidia_dev_get, + .close_gpu = nvidia_dev_put, + .op = rm_kernel_rmapi_op, /* provided by nv-kernel.o */ + .set_callbacks = nvidia_modeset_set_callbacks, + }; + + if (strcmp(rm_ops->version_string, NV_VERSION_STRING) != 0) + { + rm_ops->version_string = NV_VERSION_STRING; + return NV_ERR_GENERIC; + } + + *rm_ops = local_rm_ops; + + if (NV_ALLOW_WRITE_COMBINING(NV_MEMORY_TYPE_FRAMEBUFFER)) { + rm_ops->system_info.allow_write_combining = NV_TRUE; + } + + return NV_OK; +} + +NV_EXPORT_SYMBOL(nvidia_get_rm_ops); diff --git a/kernel-open/nvidia/nv-msi.c b/kernel-open/nvidia/nv-msi.c new file mode 100644 index 0000000..959bc34 --- /dev/null +++ b/kernel-open/nvidia/nv-msi.c @@ -0,0 +1,167 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-msi.h" +#include "nv-proto.h" + +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) +void NV_API_CALL nv_init_msi(nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int rc = 0; + + rc = pci_enable_msi(nvl->pci_dev); + if (rc == 0) + { + nv->interrupt_line = nvl->pci_dev->irq; + nv->flags |= NV_FLAG_USES_MSI; + nvl->num_intr = 1; + NV_KZALLOC(nvl->irq_count, sizeof(nv_irq_count_info_t) * nvl->num_intr); + + if (nvl->irq_count == NULL) + { + nv->flags &= ~NV_FLAG_USES_MSI; + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Failed to allocate counter for MSI entry; " + "falling back to PCIe virtual-wire interrupts.\n"); + } + else + { + nvl->current_num_irq_tracked = 0; + } + } + else + { + nv->flags &= ~NV_FLAG_USES_MSI; + if (nvl->pci_dev->irq != 0) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Failed to enable MSI; " + "falling back to PCIe virtual-wire interrupts.\n"); + } + } + + return; +} + +void NV_API_CALL nv_init_msix(nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int num_intr = 0; + struct msix_entry *msix_entries; + int rc = 0; + int i; + + NV_SPIN_LOCK_INIT(&nvl->msix_isr_lock); + + rc = os_alloc_mutex(&nvl->msix_bh_mutex); + if (rc != 0) + goto failed; + + num_intr = nv_get_max_irq(nvl->pci_dev); + + if (num_intr > NV_RM_MAX_MSIX_LINES) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "Reducing MSI-X count from %d to the " + "driver-supported maximum %d.\n", num_intr, NV_RM_MAX_MSIX_LINES); + num_intr = NV_RM_MAX_MSIX_LINES; + } + + NV_KMALLOC(nvl->msix_entries, sizeof(struct msix_entry) * num_intr); + if (nvl->msix_entries == NULL) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to allocate MSI-X entries.\n"); + goto failed; + } + + for (i = 0, msix_entries = nvl->msix_entries; i < num_intr; i++, msix_entries++) + { + msix_entries->entry = i; + } + + NV_KZALLOC(nvl->irq_count, sizeof(nv_irq_count_info_t) * num_intr); + + if (nvl->irq_count == NULL) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to allocate counter for MSI-X entries.\n"); + goto failed; + } + else + { + nvl->current_num_irq_tracked = 0; + } + rc = nv_pci_enable_msix(nvl, num_intr); + if (rc != NV_OK) + goto failed; + + nv->flags |= NV_FLAG_USES_MSIX; + return; + +failed: + nv->flags &= ~NV_FLAG_USES_MSIX; + + if (nvl->msix_entries) + { + NV_KFREE(nvl->msix_entries, sizeof(struct msix_entry) * num_intr); + } + + if (nvl->irq_count) + { + NV_KFREE(nvl->irq_count, sizeof(nv_irq_count_info_t) * num_intr); + } + + if (nvl->msix_bh_mutex) + { + os_free_mutex(nvl->msix_bh_mutex); + nvl->msix_bh_mutex = NULL; + } + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to enable MSI-X.\n"); +} + +NvS32 NV_API_CALL nv_request_msix_irq(nv_linux_state_t *nvl) +{ + int i; + int j; + struct msix_entry *msix_entries; + int rc = NV_ERR_INVALID_ARGUMENT; + nv_state_t *nv = NV_STATE_PTR(nvl); + + for (i = 0, msix_entries = nvl->msix_entries; i < nvl->num_intr; + i++, msix_entries++) + { + rc = request_threaded_irq(msix_entries->vector, nvidia_isr_msix, + nvidia_isr_msix_kthread_bh, nv_default_irq_flags(nv), + nv_device_name, (void *)nvl); + if (rc) + { + for( j = 0; j < i; j++) + { + free_irq(nvl->msix_entries[j].vector, (void *)nvl); + } + break; + } + } + + return rc; +} +#endif diff --git a/kernel-open/nvidia/nv-nano-timer.c b/kernel-open/nvidia/nv-nano-timer.c new file mode 100644 index 0000000..6dbfdb2 --- /dev/null +++ b/kernel-open/nvidia/nv-nano-timer.c @@ -0,0 +1,215 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include // For container_of +#include +#include +#include +#include "os-interface.h" +#include "nv-linux.h" + +#define NV_NANO_TIMER_USE_HRTIMER 1 + +struct nv_nano_timer +{ +#if NV_NANO_TIMER_USE_HRTIMER + struct hrtimer hr_timer; // This parameter holds linux high resolution timer object + // can get replaced with platform specific timer object +#else + struct timer_list jiffy_timer; +#endif + nv_linux_state_t *nv_linux_state; + void (*nv_nano_timer_callback)(struct nv_nano_timer *nv_nstimer); + void *pTmrEvent; +}; + +/*! + * @brief runs nano second resolution timer callback +* + * @param[in] nv_nstimer Pointer to nv_nano_timer_t object + */ +static void +nvidia_nano_timer_callback( + nv_nano_timer_t *nv_nstimer) +{ + nv_state_t *nv = NULL; + nv_linux_state_t *nvl = nv_nstimer->nv_linux_state; + nvidia_stack_t *sp = NULL; + + if (nv_kmem_cache_alloc_stack_atomic(&sp) != 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: no cache memory \n"); + return; + } + + nv = NV_STATE_PTR(nvl); + + if (rm_run_nano_timer_callback(sp, nv, nv_nstimer->pTmrEvent) != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Error in service of callback \n"); + } + + nv_kmem_cache_free_stack(sp); +} + +/*! + * @brief Allocates nano second resolution timer object + * + * @returns nv_nano_timer_t allocated pointer + */ +static nv_nano_timer_t *nv_alloc_nano_timer(void) +{ + nv_nano_timer_t *nv_nstimer; + + NV_KMALLOC(nv_nstimer, sizeof(nv_nano_timer_t)); + + if (nv_nstimer == NULL) + { + return NULL; + } + + memset(nv_nstimer, 0, sizeof(nv_nano_timer_t)); + + return nv_nstimer; +} + +#if NV_NANO_TIMER_USE_HRTIMER +static enum hrtimer_restart nv_nano_timer_callback_typed_data(struct hrtimer *hrtmr) +{ + struct nv_nano_timer *nv_nstimer = + container_of(hrtmr, struct nv_nano_timer, hr_timer); + + nv_nstimer->nv_nano_timer_callback(nv_nstimer); + + return HRTIMER_NORESTART; +} +#else +static inline void nv_jiffy_timer_callback_typed_data(struct timer_list *timer) +{ + struct nv_nano_timer *nv_nstimer = + container_of(timer, struct nv_nano_timer, jiffy_timer); + + nv_nstimer->nv_nano_timer_callback(nv_nstimer); +} +#endif + +/*! + * @brief Creates & initializes nano second resolution timer object + * + * @param[in] nv Per gpu linux state + * @param[in] tmrEvent pointer to TMR_EVENT + * @param[in] nv_nstimer Pointer to nv_nano_timer_t object + */ +void NV_API_CALL nv_create_nano_timer( + nv_state_t *nv, + void *pTmrEvent, + nv_nano_timer_t **pnv_nstimer) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + nv_nano_timer_t *nv_nstimer = nv_alloc_nano_timer(); + + if (nv_nstimer == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Not able to create timer object \n"); + *pnv_nstimer = NULL; + return; + } + + nv_nstimer->nv_linux_state = nvl; + nv_nstimer->pTmrEvent = pTmrEvent; + + nv_nstimer->nv_nano_timer_callback = nvidia_nano_timer_callback; + +#if NV_NANO_TIMER_USE_HRTIMER +#if NV_IS_EXPORT_SYMBOL_PRESENT_hrtimer_setup + hrtimer_setup(&nv_nstimer->hr_timer, &nv_nano_timer_callback_typed_data, + CLOCK_MONOTONIC, HRTIMER_MODE_REL); +#else + hrtimer_init(&nv_nstimer->hr_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + nv_nstimer->hr_timer.function = nv_nano_timer_callback_typed_data; +#endif // NV_IS_EXPORT_SYMBOL_PRESENT_hrtimer_setup +#else + timer_setup(&nv_nstimer->jiffy_timer, nv_jiffy_timer_callback_typed_data, 0); +#endif // NV_NANO_TIMER_USE_HRTIMER + + *pnv_nstimer = nv_nstimer; +} + +/*! + * @brief Starts nano second resolution timer + * + * @param[in] nv Per gpu linux state + * @param[in] nv_nstimer Pointer to nv_nano_timer_t object + * @param[in] time_ns Relative time in nano seconds + */ +void NV_API_CALL nv_start_nano_timer( + nv_state_t *nv, + nv_nano_timer_t *nv_nstimer, + NvU64 time_ns) +{ +#if NV_NANO_TIMER_USE_HRTIMER + ktime_t ktime = ktime_set(0, time_ns); + hrtimer_start(&nv_nstimer->hr_timer, ktime, HRTIMER_MODE_REL); +#else + unsigned long time_jiffies; + NvU32 time_us; + + time_us = (NvU32)(time_ns / 1000); + time_jiffies = usecs_to_jiffies(time_us); + mod_timer(&nv_nstimer->jiffy_timer, jiffies + time_jiffies); +#endif +} + +/*! + * @brief Cancels nano second resolution timer + * + * @param[in] nv Per gpu linux state + * @param[in] nv_nstimer Pointer to nv_nano_timer_t object + */ +void NV_API_CALL nv_cancel_nano_timer( + nv_state_t *nv, + nv_nano_timer_t *nv_nstimer) +{ +#if NV_NANO_TIMER_USE_HRTIMER + hrtimer_cancel(&nv_nstimer->hr_timer); +#else + nv_timer_delete_sync(&nv_nstimer->jiffy_timer); +#endif + +} + +/*! + * @brief Cancels & deletes nano second resolution timer object + * + * @param[in] nv Per gpu linux state + * @param[in] nv_nstimer Pointer to nv_nano_timer_t object + */ +void NV_API_CALL nv_destroy_nano_timer( + nv_state_t *nv, + nv_nano_timer_t *nv_nstimer) +{ + nv_cancel_nano_timer(nv, nv_nstimer); + NV_KFREE(nv_nstimer, sizeof(nv_nano_timer_t)); +} diff --git a/kernel-open/nvidia/nv-p2p.c b/kernel-open/nvidia/nv-p2p.c new file mode 100644 index 0000000..bc4ff36 --- /dev/null +++ b/kernel-open/nvidia/nv-p2p.c @@ -0,0 +1,1027 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" +#include "nv-rsync.h" + +#include "nv-p2p.h" +#include "rmp2pdefines.h" + +#include "nvmisc.h" + +typedef enum nv_p2p_page_table_type { + NV_P2P_PAGE_TABLE_TYPE_NON_PERSISTENT = 0, + NV_P2P_PAGE_TABLE_TYPE_PERSISTENT, +} nv_p2p_page_table_type_t; + +typedef struct nv_p2p_dma_mapping { + struct list_head list_node; + struct nvidia_p2p_dma_mapping *dma_mapping; +} nv_p2p_dma_mapping_t; + +typedef struct nv_p2p_mem_info { + void (*free_callback)(void *data); + void *data; + struct nvidia_p2p_page_table page_table; + struct { + struct list_head list_head; + struct semaphore lock; + } dma_mapping_list; + void *private; + void *mig_info; + NvBool force_pcie; +} nv_p2p_mem_info_t; + +// declared and created in nv.c +extern void *nvidia_p2p_page_t_cache; + +static struct nvidia_status_mapping { + NV_STATUS status; + int error; +} nvidia_status_mappings[] = { + { NV_ERR_GENERIC, -EIO }, + { NV_ERR_INSUFFICIENT_RESOURCES, -ENOMEM }, + { NV_ERR_NO_MEMORY, -ENOMEM }, + { NV_ERR_INVALID_ARGUMENT, -EINVAL }, + { NV_ERR_INVALID_OBJECT_HANDLE, -EINVAL }, + { NV_ERR_INVALID_STATE, -EIO }, + { NV_ERR_NOT_SUPPORTED, -ENOTSUPP }, + { NV_ERR_OBJECT_NOT_FOUND, -EINVAL }, + { NV_ERR_STATE_IN_USE, -EBUSY }, + { NV_ERR_GPU_UUID_NOT_FOUND, -ENODEV }, + { NV_OK, 0 }, +}; + +#define NVIDIA_STATUS_MAPPINGS \ + NV_ARRAY_ELEMENTS(nvidia_status_mappings) + +static int nvidia_p2p_map_status(NV_STATUS status) +{ + int error = -EIO; + uint8_t i; + + for (i = 0; i < NVIDIA_STATUS_MAPPINGS; i++) + { + if (nvidia_status_mappings[i].status == status) + { + error = nvidia_status_mappings[i].error; + break; + } + } + return error; +} + +static NvU32 nvidia_p2p_page_size_mappings[NVIDIA_P2P_PAGE_SIZE_COUNT] = { + NVRM_P2P_PAGESIZE_SMALL_4K, NVRM_P2P_PAGESIZE_BIG_64K, NVRM_P2P_PAGESIZE_BIG_128K +}; + +static NV_STATUS nvidia_p2p_map_page_size(NvU32 page_size, NvU32 *page_size_index) +{ + NvU32 i; + + for (i = 0; i < NVIDIA_P2P_PAGE_SIZE_COUNT; i++) + { + if (nvidia_p2p_page_size_mappings[i] == page_size) + { + *page_size_index = i; + break; + } + } + + if (i == NVIDIA_P2P_PAGE_SIZE_COUNT) + return NV_ERR_GENERIC; + + return NV_OK; +} + +static NV_STATUS nv_p2p_insert_dma_mapping( + struct nv_p2p_mem_info *mem_info, + struct nvidia_p2p_dma_mapping *dma_mapping +) +{ + NV_STATUS status; + struct nv_p2p_dma_mapping *node; + + status = os_alloc_mem((void**)&node, sizeof(*node)); + if (status != NV_OK) + { + return status; + } + + down(&mem_info->dma_mapping_list.lock); + + node->dma_mapping = dma_mapping; + list_add_tail(&node->list_node, &mem_info->dma_mapping_list.list_head); + + up(&mem_info->dma_mapping_list.lock); + + return NV_OK; +} + +static struct nvidia_p2p_dma_mapping* nv_p2p_remove_dma_mapping( + struct nv_p2p_mem_info *mem_info, + struct nvidia_p2p_dma_mapping *dma_mapping +) +{ + struct nv_p2p_dma_mapping *cur; + struct nvidia_p2p_dma_mapping *ret_dma_mapping = NULL; + + down(&mem_info->dma_mapping_list.lock); + + list_for_each_entry(cur, &mem_info->dma_mapping_list.list_head, list_node) + { + if (dma_mapping == NULL || dma_mapping == cur->dma_mapping) + { + ret_dma_mapping = cur->dma_mapping; + list_del(&cur->list_node); + os_free_mem(cur); + break; + } + } + + up(&mem_info->dma_mapping_list.lock); + + return ret_dma_mapping; +} + +static void nv_p2p_free_dma_mapping( + struct nvidia_p2p_dma_mapping *dma_mapping +) +{ + nv_dma_device_t peer_dma_dev = {{ 0 }}; + NvU32 page_size; + NV_STATUS status; + + peer_dma_dev.dev = &dma_mapping->pci_dev->dev; + peer_dma_dev.addressable_range.limit = dma_mapping->pci_dev->dma_mask; + + page_size = nvidia_p2p_page_size_mappings[dma_mapping->page_size_type]; + + if (dma_mapping->private != NULL) + { + /* + * If OS page size is smaller than P2P page size, + * page inflation logic applies for DMA unmapping too. + * Bigger P2P page needs to be split in smaller OS pages. + */ + if (page_size > PAGE_SIZE) + { + NvU64 *os_dma_addresses = NULL; + NvU32 os_pages_per_p2p_page = page_size; + NvU32 os_page_count; + NvU32 index, i, j; + + do_div(os_pages_per_p2p_page, PAGE_SIZE); + + os_page_count = os_pages_per_p2p_page * dma_mapping->entries; + + status = os_alloc_mem((void **)&os_dma_addresses, + (os_page_count * sizeof(NvU64))); + if(WARN_ON(status != NV_OK)) + { + goto failed; + } + + index = 0; + for (i = 0; i < dma_mapping->entries; i++) + { + os_dma_addresses[index] = dma_mapping->dma_addresses[i]; + index++; + + for (j = 1; j < os_pages_per_p2p_page; j++) + { + os_dma_addresses[index] = os_dma_addresses[index - 1] + PAGE_SIZE; + index++; + } + } + + status = nv_dma_unmap_alloc(&peer_dma_dev, + os_page_count, + os_dma_addresses, + &dma_mapping->private); + WARN_ON(status != NV_OK); + + os_free_mem(os_dma_addresses); + } + else + { + WARN_ON(page_size != PAGE_SIZE); + + status = nv_dma_unmap_alloc(&peer_dma_dev, + dma_mapping->entries, + dma_mapping->dma_addresses, + &dma_mapping->private); + WARN_ON(status != NV_OK); + } + } + else + { + NvU32 i; + for (i = 0; i < dma_mapping->entries; i++) + { + nv_dma_unmap_peer(&peer_dma_dev, page_size / PAGE_SIZE, + dma_mapping->dma_addresses[i]); + } + } + +failed: + os_free_mem(dma_mapping->dma_addresses); + + os_free_mem(dma_mapping); +} + +static void nv_p2p_free_page_table( + struct nvidia_p2p_page_table *page_table +) +{ + NvU32 i; + struct nvidia_p2p_dma_mapping *dma_mapping; + struct nv_p2p_mem_info *mem_info = NULL; + + mem_info = container_of(page_table, nv_p2p_mem_info_t, page_table); + + dma_mapping = nv_p2p_remove_dma_mapping(mem_info, NULL); + while (dma_mapping != NULL) + { + nv_p2p_free_dma_mapping(dma_mapping); + + dma_mapping = nv_p2p_remove_dma_mapping(mem_info, NULL); + } + + for (i = 0; i < page_table->entries; i++) + { + NV_KMEM_CACHE_FREE(page_table->pages[i], nvidia_p2p_page_t_cache); + } + + if (page_table->gpu_uuid != NULL) + { + os_free_mem(page_table->gpu_uuid); + } + + if (page_table->pages != NULL) + { + os_free_mem(page_table->pages); + } + + os_free_mem(mem_info); +} + +static NV_STATUS nv_p2p_put_pages( + nv_p2p_page_table_type_t pt_type, + nvidia_stack_t * sp, + uint64_t p2p_token, + uint32_t va_space, + uint64_t virtual_address, + struct nvidia_p2p_page_table **page_table +) +{ + NV_STATUS status; + + /* + * rm_p2p_put_pages returns NV_OK if the page_table was found and + * got unlinked from the RM's tracker (atomically). This ensures that + * RM's tear-down path does not race with this path. + * + * rm_p2p_put_pages returns NV_ERR_OBJECT_NOT_FOUND if the page_table + * was already unlinked. + */ + if (pt_type == NV_P2P_PAGE_TABLE_TYPE_PERSISTENT) + { + struct nv_p2p_mem_info *mem_info = NULL; + + /* + * It is safe to access persistent page_table as there is no async + * callback which can free it unlike non-persistent page_table. + */ + mem_info = container_of(*page_table, nv_p2p_mem_info_t, page_table); + status = rm_p2p_put_pages_persistent(sp, mem_info->private, *page_table, mem_info->mig_info); + } + else + { + status = rm_p2p_put_pages(sp, p2p_token, va_space, + virtual_address, *page_table); + } + + if (status == NV_OK) + { + nv_p2p_free_page_table(*page_table); + *page_table = NULL; + } + else if ((pt_type == NV_P2P_PAGE_TABLE_TYPE_NON_PERSISTENT) && + (status == NV_ERR_OBJECT_NOT_FOUND)) + { + status = NV_OK; + *page_table = NULL; + } + else + { + WARN_ON(status != NV_OK); + } + + return status; +} + +void NV_API_CALL nv_p2p_free_platform_data( + void *data +) +{ + if (data == NULL) + { + WARN_ON(data == NULL); + return; + } + + nv_p2p_free_page_table((struct nvidia_p2p_page_table*)data); +} + +int nvidia_p2p_init_mapping( + uint64_t p2p_token, + struct nvidia_p2p_params *params, + void (*destroy_callback)(void *data), + void *data +) +{ + return -ENOTSUPP; +} + +NV_EXPORT_SYMBOL(nvidia_p2p_init_mapping); + +int nvidia_p2p_destroy_mapping(uint64_t p2p_token) +{ + return -ENOTSUPP; +} + +NV_EXPORT_SYMBOL(nvidia_p2p_destroy_mapping); + +static void nv_p2p_mem_info_free_callback(void *data) +{ + nv_p2p_mem_info_t *mem_info = (nv_p2p_mem_info_t*) data; + + mem_info->free_callback(mem_info->data); + + nv_p2p_free_platform_data(&mem_info->page_table); +} + +static int nv_p2p_get_pages( + nv_p2p_page_table_type_t pt_type, + uint64_t p2p_token, + uint32_t va_space, + uint64_t virtual_address, + uint64_t length, + uint8_t flags, + struct nvidia_p2p_page_table **page_table, + void (*free_callback)(void * data), + void *data +) +{ + NV_STATUS status; + nvidia_stack_t *sp = NULL; + struct nvidia_p2p_page *page; + struct nv_p2p_mem_info *mem_info = NULL; + NvU32 entries; + NvU32 *wreqmb_h = NULL; + NvU32 *rreqmb_h = NULL; + NvU64 *physical_addresses = NULL; + NvU32 page_count; + NvU32 i = 0; + NvBool bGetPages = NV_FALSE; + NvBool bGetUuid = NV_FALSE; + NvU32 page_size = NVRM_P2P_PAGESIZE_BIG_64K; + NvU32 page_size_index; + NvU64 temp_length; + NvU8 *gpu_uuid = NULL; + NvU8 uuid[NVIDIA_P2P_GPU_UUID_LEN] = {0}; + NvBool force_pcie = !!(flags & NVIDIA_P2P_FLAGS_FORCE_BAR1_MAPPING); + NvBool cpu_cacheable; + int rc; + + if (!NV_IS_ALIGNED64(virtual_address, NVRM_P2P_PAGESIZE_BIG_64K) || + !NV_IS_ALIGNED64(length, NVRM_P2P_PAGESIZE_BIG_64K)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Invalid argument in nv_p2p_get_pages," + "address or length are not aligned " + "address=0x%llx, length=0x%llx\n", + virtual_address, length); + return -EINVAL; + } + + // Forced PCIe mappings are not supported for non-persistent APIs + if ((free_callback != NULL) && force_pcie) + { + return -ENOTSUPP; + } + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + return rc; + } + + *page_table = NULL; + status = os_alloc_mem((void **)&mem_info, sizeof(*mem_info)); + if (status != NV_OK) + { + goto failed; + } + + memset(mem_info, 0, sizeof(*mem_info)); + + INIT_LIST_HEAD(&mem_info->dma_mapping_list.list_head); + NV_INIT_MUTEX(&mem_info->dma_mapping_list.lock); + + mem_info->force_pcie = force_pcie; + + *page_table = &(mem_info->page_table); + + /* + * assign length to temporary variable since do_div macro does in-place + * division + */ + temp_length = length; + do_div(temp_length, page_size); + page_count = temp_length; + + if (length & (page_size - 1)) + { + page_count++; + } + + status = os_alloc_mem((void **)&physical_addresses, + (page_count * sizeof(NvU64))); + if (status != NV_OK) + { + goto failed; + } + status = os_alloc_mem((void **)&wreqmb_h, (page_count * sizeof(NvU32))); + if (status != NV_OK) + { + goto failed; + } + status = os_alloc_mem((void **)&rreqmb_h, (page_count * sizeof(NvU32))); + if (status != NV_OK) + { + goto failed; + } + + if (pt_type == NV_P2P_PAGE_TABLE_TYPE_PERSISTENT) + { + void *gpu_info = NULL; + + if ((p2p_token != 0) || (va_space != 0)) + { + status = -ENOTSUPP; + goto failed; + } + + status = rm_p2p_get_gpu_info(sp, virtual_address, length, + &gpu_uuid, &gpu_info); + if (status != NV_OK) + { + goto failed; + } + + (*page_table)->gpu_uuid = gpu_uuid; + + rc = nvidia_dev_get_uuid(gpu_uuid, sp); + if (rc != 0) + { + status = NV_ERR_GPU_UUID_NOT_FOUND; + goto failed; + } + + os_mem_copy(uuid, gpu_uuid, NVIDIA_P2P_GPU_UUID_LEN); + + bGetUuid = NV_TRUE; + + status = rm_p2p_get_pages_persistent(sp, virtual_address, length, + &mem_info->private, + physical_addresses, &entries, + force_pcie, *page_table, gpu_info, + &mem_info->mig_info, &cpu_cacheable); + if (status != NV_OK) + { + goto failed; + } + } + else + { + // Get regular old-style, non-persistent mappings + status = rm_p2p_get_pages(sp, p2p_token, va_space, + virtual_address, length, physical_addresses, wreqmb_h, + rreqmb_h, &entries, &gpu_uuid, *page_table, &cpu_cacheable); + if (status != NV_OK) + { + goto failed; + } + + (*page_table)->gpu_uuid = gpu_uuid; + } + + bGetPages = NV_TRUE; + + status = os_alloc_mem((void *)&(*page_table)->pages, + (entries * sizeof(page))); + if (status != NV_OK) + { + goto failed; + } + + (*page_table)->version = NVIDIA_P2P_PAGE_TABLE_VERSION; + + for (i = 0; i < entries; i++) + { + page = NV_KMEM_CACHE_ALLOC(nvidia_p2p_page_t_cache); + if (page == NULL) + { + status = NV_ERR_NO_MEMORY; + goto failed; + } + + memset(page, 0, sizeof(*page)); + + page->physical_address = physical_addresses[i]; + page->registers.fermi.wreqmb_h = wreqmb_h[i]; + page->registers.fermi.rreqmb_h = rreqmb_h[i]; + + (*page_table)->pages[i] = page; + (*page_table)->entries++; + } + + status = nvidia_p2p_map_page_size(page_size, &page_size_index); + if (status != NV_OK) + { + goto failed; + } + + (*page_table)->page_size = page_size_index; + + if (cpu_cacheable) + { + (*page_table)->flags |= NVIDIA_P2P_PAGE_TABLE_FLAGS_CPU_CACHEABLE; + } + + os_free_mem(physical_addresses); + physical_addresses = NULL; + + os_free_mem(wreqmb_h); + wreqmb_h = NULL; + + os_free_mem(rreqmb_h); + rreqmb_h = NULL; + + if (free_callback != NULL) + { + mem_info->free_callback = free_callback; + mem_info->data = data; + + status = rm_p2p_register_callback(sp, p2p_token, virtual_address, length, + *page_table, nv_p2p_mem_info_free_callback, mem_info); + if (status != NV_OK) + { + goto failed; + } + } + + nv_kmem_cache_free_stack(sp); + + return nvidia_p2p_map_status(status); + +failed: + if (physical_addresses != NULL) + { + os_free_mem(physical_addresses); + } + + if (wreqmb_h != NULL) + { + os_free_mem(wreqmb_h); + } + + if (rreqmb_h != NULL) + { + os_free_mem(rreqmb_h); + } + + if (bGetPages) + { + (void)nv_p2p_put_pages(pt_type, sp, p2p_token, va_space, + virtual_address, page_table); + } + + if (bGetUuid) + { + nvidia_dev_put_uuid(uuid, sp); + } + + if (*page_table != NULL) + { + nv_p2p_free_page_table(*page_table); + } + + nv_kmem_cache_free_stack(sp); + + return nvidia_p2p_map_status(status); +} + +int nvidia_p2p_get_pages( + uint64_t p2p_token, + uint32_t va_space, + uint64_t virtual_address, + uint64_t length, + struct nvidia_p2p_page_table **page_table, + void (*free_callback)(void * data), + void *data +) +{ + if (free_callback == NULL) + { + return -EINVAL; + } + + return nv_p2p_get_pages(NV_P2P_PAGE_TABLE_TYPE_NON_PERSISTENT, + p2p_token, va_space, virtual_address, + length, NVIDIA_P2P_FLAGS_DEFAULT, + page_table, free_callback, data); +} +NV_EXPORT_SYMBOL(nvidia_p2p_get_pages); + +int nvidia_p2p_get_pages_persistent( + uint64_t virtual_address, + uint64_t length, + struct nvidia_p2p_page_table **page_table, + uint32_t flags +) +{ + return nv_p2p_get_pages(NV_P2P_PAGE_TABLE_TYPE_PERSISTENT, 0, 0, + virtual_address, length, flags, page_table, + NULL, NULL); +} +NV_EXPORT_SYMBOL(nvidia_p2p_get_pages_persistent); + +/* + * This function is a no-op, but is left in place (for now), in order to allow + * third-party callers to build and run without errors or warnings. This is OK, + * because the missing functionality is provided by nv_p2p_free_platform_data, + * which is being called as part of the RM's cleanup path. + */ +int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table) +{ + return 0; +} + +NV_EXPORT_SYMBOL(nvidia_p2p_free_page_table); + +int nvidia_p2p_put_pages( + uint64_t p2p_token, + uint32_t va_space, + uint64_t virtual_address, + struct nvidia_p2p_page_table *page_table +) +{ + NV_STATUS status; + nvidia_stack_t *sp = NULL; + int rc = 0; + + if (page_table == NULL) + { + return 0; + } + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + return -ENOMEM; + } + + status = nv_p2p_put_pages(NV_P2P_PAGE_TABLE_TYPE_NON_PERSISTENT, + sp, p2p_token, va_space, + virtual_address, &page_table); + nv_kmem_cache_free_stack(sp); + + return nvidia_p2p_map_status(status); +} +NV_EXPORT_SYMBOL(nvidia_p2p_put_pages); + +int nvidia_p2p_put_pages_persistent( + uint64_t virtual_address, + struct nvidia_p2p_page_table *page_table, + uint32_t flags +) +{ + NvU8 uuid[NVIDIA_P2P_GPU_UUID_LEN] = {0}; + NV_STATUS status; + nvidia_stack_t *sp = NULL; + int rc = 0; + + if (flags != 0) + { + return -EINVAL; + } + + if (page_table == NULL) + { + return 0; + } + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + return -ENOMEM; + } + + os_mem_copy(uuid, page_table->gpu_uuid, NVIDIA_P2P_GPU_UUID_LEN); + + status = nv_p2p_put_pages(NV_P2P_PAGE_TABLE_TYPE_PERSISTENT, + sp, 0, 0, virtual_address, &page_table); + + nvidia_dev_put_uuid(uuid, sp); + + nv_kmem_cache_free_stack(sp); + + return nvidia_p2p_map_status(status); +} +NV_EXPORT_SYMBOL(nvidia_p2p_put_pages_persistent); + +int nvidia_p2p_dma_map_pages( + struct pci_dev *peer, + struct nvidia_p2p_page_table *page_table, + struct nvidia_p2p_dma_mapping **dma_mapping +) +{ + NV_STATUS status; + nv_dma_device_t peer_dma_dev = {{ 0 }}; + nvidia_stack_t *sp = NULL; + NvU64 *dma_addresses = NULL; + NvU32 page_count; + NvU32 page_size; + enum nvidia_p2p_page_size_type page_size_type; + struct nv_p2p_mem_info *mem_info = NULL; + NvU32 i; + void *priv; + int rc; + + if (peer == NULL || page_table == NULL || dma_mapping == NULL || + page_table->gpu_uuid == NULL) + { + return -EINVAL; + } + + mem_info = container_of(page_table, nv_p2p_mem_info_t, page_table); + + // + // Only CPU mappings are supported for forced PCIe config through + // nv-p2p APIs. IO mappings will not be supported. + // + if (mem_info->force_pcie) + { + return -ENOTSUPP; + } + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + return rc; + } + + *dma_mapping = NULL; + status = os_alloc_mem((void **)dma_mapping, sizeof(**dma_mapping)); + if (status != NV_OK) + { + goto failed; + } + memset(*dma_mapping, 0, sizeof(**dma_mapping)); + + page_count = page_table->entries; + + status = os_alloc_mem((void **)&dma_addresses, + (page_count * sizeof(NvU64))); + if (status != NV_OK) + { + goto failed; + } + + page_size_type = page_table->page_size; + + BUG_ON((page_size_type <= NVIDIA_P2P_PAGE_SIZE_4KB) || + (page_size_type >= NVIDIA_P2P_PAGE_SIZE_COUNT)); + + peer_dma_dev.dev = &peer->dev; + peer_dma_dev.addressable_range.limit = peer->dma_mask; + + page_size = nvidia_p2p_page_size_mappings[page_size_type]; + + for (i = 0; i < page_count; i++) + { + dma_addresses[i] = page_table->pages[i]->physical_address; + } + + status = rm_p2p_dma_map_pages(sp, &peer_dma_dev, + page_table->gpu_uuid, page_size, page_count, dma_addresses, &priv); + if (status != NV_OK) + { + goto failed; + } + + (*dma_mapping)->version = NVIDIA_P2P_DMA_MAPPING_VERSION; + (*dma_mapping)->page_size_type = page_size_type; + (*dma_mapping)->entries = page_count; + (*dma_mapping)->dma_addresses = dma_addresses; + (*dma_mapping)->private = priv; + (*dma_mapping)->pci_dev = peer; + + /* + * All success, it is safe to insert dma_mapping now. + */ + status = nv_p2p_insert_dma_mapping(mem_info, *dma_mapping); + if (status != NV_OK) + { + goto failed_insert; + } + + nv_kmem_cache_free_stack(sp); + + return 0; + +failed_insert: + nv_p2p_free_dma_mapping(*dma_mapping); + dma_addresses = NULL; + *dma_mapping = NULL; + +failed: + if (dma_addresses != NULL) + { + os_free_mem(dma_addresses); + } + + if (*dma_mapping != NULL) + { + os_free_mem(*dma_mapping); + *dma_mapping = NULL; + } + + nv_kmem_cache_free_stack(sp); + + return nvidia_p2p_map_status(status); +} + +NV_EXPORT_SYMBOL(nvidia_p2p_dma_map_pages); + +int nvidia_p2p_dma_unmap_pages( + struct pci_dev *peer, + struct nvidia_p2p_page_table *page_table, + struct nvidia_p2p_dma_mapping *dma_mapping +) +{ + struct nv_p2p_mem_info *mem_info = NULL; + + if (peer == NULL || dma_mapping == NULL || page_table == NULL) + { + return -EINVAL; + } + + mem_info = container_of(page_table, nv_p2p_mem_info_t, page_table); + + /* + * nv_p2p_remove_dma_mapping returns dma_mapping if the dma_mapping was + * found and got unlinked from the mem_info->dma_mapping_list (atomically). + * This ensures that the RM's tear-down path does not race with this path. + * + * nv_p2p_remove_dma_mappings returns NULL if the dma_mapping was already + * unlinked. + */ + if (nv_p2p_remove_dma_mapping(mem_info, dma_mapping) == NULL) + { + return 0; + } + + WARN_ON(peer != dma_mapping->pci_dev); + + BUG_ON((dma_mapping->page_size_type <= NVIDIA_P2P_PAGE_SIZE_4KB) || + (dma_mapping->page_size_type >= NVIDIA_P2P_PAGE_SIZE_COUNT)); + + nv_p2p_free_dma_mapping(dma_mapping); + + return 0; +} + +NV_EXPORT_SYMBOL(nvidia_p2p_dma_unmap_pages); + +/* + * This function is a no-op, but is left in place (for now), in order to allow + * third-party callers to build and run without errors or warnings. This is OK, + * because the missing functionality is provided by nv_p2p_free_platform_data, + * which is being called as part of the RM's cleanup path. + */ +int nvidia_p2p_free_dma_mapping( + struct nvidia_p2p_dma_mapping *dma_mapping +) +{ + return 0; +} + +NV_EXPORT_SYMBOL(nvidia_p2p_free_dma_mapping); + +int nvidia_p2p_register_rsync_driver( + nvidia_p2p_rsync_driver_t *driver, + void *data +) +{ + if (driver == NULL) + { + return -EINVAL; + } + + if (!NVIDIA_P2P_RSYNC_DRIVER_VERSION_COMPATIBLE(driver)) + { + return -EINVAL; + } + + if (driver->get_relaxed_ordering_mode == NULL || + driver->put_relaxed_ordering_mode == NULL || + driver->wait_for_rsync == NULL) + { + return -EINVAL; + } + + return nv_register_rsync_driver(driver->get_relaxed_ordering_mode, + driver->put_relaxed_ordering_mode, + driver->wait_for_rsync, data); +} + +NV_EXPORT_SYMBOL(nvidia_p2p_register_rsync_driver); + +void nvidia_p2p_unregister_rsync_driver( + nvidia_p2p_rsync_driver_t *driver, + void *data +) +{ + if (driver == NULL) + { + WARN_ON(1); + return; + } + + if (!NVIDIA_P2P_RSYNC_DRIVER_VERSION_COMPATIBLE(driver)) + { + WARN_ON(1); + return; + } + + if (driver->get_relaxed_ordering_mode == NULL || + driver->put_relaxed_ordering_mode == NULL || + driver->wait_for_rsync == NULL) + { + WARN_ON(1); + return; + } + + nv_unregister_rsync_driver(driver->get_relaxed_ordering_mode, + driver->put_relaxed_ordering_mode, + driver->wait_for_rsync, data); +} + +NV_EXPORT_SYMBOL(nvidia_p2p_unregister_rsync_driver); + +int nvidia_p2p_get_rsync_registers( + nvidia_p2p_rsync_reg_info_t **reg_info +) +{ + // TODO: Remove this interface. + return -ENODEV; +} + +NV_EXPORT_SYMBOL(nvidia_p2p_get_rsync_registers); + +void nvidia_p2p_put_rsync_registers( + nvidia_p2p_rsync_reg_info_t *reg_info +) +{ + // TODO: Remove this interface. There is nothing to do because + // nvidia_p2p_get_rsync_registers always fails. +} + +NV_EXPORT_SYMBOL(nvidia_p2p_put_rsync_registers); diff --git a/kernel-open/nvidia/nv-p2p.h b/kernel-open/nvidia/nv-p2p.h new file mode 100644 index 0000000..e9e62c0 --- /dev/null +++ b/kernel-open/nvidia/nv-p2p.h @@ -0,0 +1,478 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_P2P_H_ +#define _NV_P2P_H_ + +/* + * NVIDIA P2P Structure Versioning + * + * For the nvidia_p2p_*_t structures allocated by the NVIDIA driver, it will + * set the version field of the structure according to the definition used by + * the NVIDIA driver. The "major" field of the version is defined as the upper + * 16 bits, and the "minor" field of the version is defined as the lower 16 + * bits. The version field will always be the first 4 bytes of the structure, + * and third-party drivers should check the value of this field in structures + * allocated by the NVIDIA driver to ensure runtime compatibility. + * + * In general, version numbers will be incremented as follows: + * - When a backwards-compatible change is made to the structure layout, the + * minor version for that structure will be incremented. Third-party drivers + * built against an older minor version will continue to work with the newer + * minor version used by the NVIDIA driver, without recompilation. + * - When a breaking change is made to the structure layout, the major version + * will be incremented. Third-party drivers built against an older major + * version require at least recompilation and potentially additional updates + * to use the new API. + */ +#define NVIDIA_P2P_MAJOR_VERSION_MASK 0xffff0000 +#define NVIDIA_P2P_MINOR_VERSION_MASK 0x0000ffff + +#define NVIDIA_P2P_MAJOR_VERSION(v) \ + (((v) & NVIDIA_P2P_MAJOR_VERSION_MASK) >> 16) + +#define NVIDIA_P2P_MINOR_VERSION(v) \ + (((v) & NVIDIA_P2P_MINOR_VERSION_MASK)) + +#define NVIDIA_P2P_MAJOR_VERSION_MATCHES(p, v) \ + (NVIDIA_P2P_MAJOR_VERSION((p)->version) == NVIDIA_P2P_MAJOR_VERSION(v)) + +#define NVIDIA_P2P_VERSION_COMPATIBLE(p, v) \ + (NVIDIA_P2P_MAJOR_VERSION_MATCHES(p, v) && \ + (NVIDIA_P2P_MINOR_VERSION((p)->version) >= (NVIDIA_P2P_MINOR_VERSION(v)))) + +enum { + NVIDIA_P2P_ARCHITECTURE_TESLA = 0, + NVIDIA_P2P_ARCHITECTURE_FERMI, + NVIDIA_P2P_ARCHITECTURE_CURRENT = NVIDIA_P2P_ARCHITECTURE_FERMI +}; + +#define NVIDIA_P2P_PARAMS_VERSION 0x00010001 + +enum { + NVIDIA_P2P_PARAMS_ADDRESS_INDEX_GPU = 0, + NVIDIA_P2P_PARAMS_ADDRESS_INDEX_THIRD_PARTY_DEVICE, + NVIDIA_P2P_PARAMS_ADDRESS_INDEX_MAX = \ + NVIDIA_P2P_PARAMS_ADDRESS_INDEX_THIRD_PARTY_DEVICE +}; + +#define NVIDIA_P2P_GPU_UUID_LEN 16 + +typedef +struct nvidia_p2p_params { + uint32_t version; + uint32_t architecture; + union nvidia_p2p_mailbox_addresses { + struct { + uint64_t wmb_addr; + uint64_t wmb_data; + uint64_t rreq_addr; + uint64_t rcomp_addr; + uint64_t reserved[2]; + } fermi; + } addresses[NVIDIA_P2P_PARAMS_ADDRESS_INDEX_MAX+1]; +} nvidia_p2p_params_t; + +/* + * Macro for users to detect + * driver support for persistent pages. + */ +#define NVIDIA_P2P_CAP_GET_PAGES_PERSISTENT_API + +/* + * This API is not supported. + */ +int nvidia_p2p_init_mapping(uint64_t p2p_token, + struct nvidia_p2p_params *params, + void (*destroy_callback)(void *data), + void *data); + +/* + * This API is not supported. + */ +int nvidia_p2p_destroy_mapping(uint64_t p2p_token); + +enum nvidia_p2p_page_size_type { + NVIDIA_P2P_PAGE_SIZE_4KB = 0, + NVIDIA_P2P_PAGE_SIZE_64KB, + NVIDIA_P2P_PAGE_SIZE_128KB, + NVIDIA_P2P_PAGE_SIZE_COUNT +}; + +typedef +struct nvidia_p2p_page { + uint64_t physical_address; + union nvidia_p2p_request_registers { + struct { + uint32_t wreqmb_h; + uint32_t rreqmb_h; + uint32_t rreqmb_0; + uint32_t reserved[3]; + } fermi; + } registers; +} nvidia_p2p_page_t; + +#define NVIDIA_P2P_PAGE_TABLE_VERSION 0x00020000 + +#define NVIDIA_P2P_PAGE_TABLE_VERSION_COMPATIBLE(p) \ + NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_PAGE_TABLE_VERSION) + +/* + * Page Table Flags + */ +#define NVIDIA_P2P_PAGE_TABLE_FLAGS_CPU_CACHEABLE 0x1 + +typedef +struct nvidia_p2p_page_table { + uint32_t version; + uint32_t page_size; /* enum nvidia_p2p_page_size_type */ + struct nvidia_p2p_page **pages; + uint32_t entries; + uint8_t *gpu_uuid; + uint32_t flags; +} nvidia_p2p_page_table_t; + +/* + * @brief + * Make the pages underlying a range of GPU virtual memory + * accessible to a third-party device. + * + * This API only supports pinned, GPU-resident memory, such as that provided + * by cudaMalloc(). + * + * This API may sleep. + * + * @param[in] p2p_token + * A token that uniquely identifies the P2P mapping. + * @param[in] va_space + * A GPU virtual address space qualifier. + * @param[in] virtual_address + * The start address in the specified virtual address space. + * Address must be aligned to the 64KB boundary. + * @param[in] length + * The length of the requested P2P mapping. + * Length must be a multiple of 64KB. + * @param[out] page_table + * A pointer to an array of structures with P2P PTEs. + * @param[in] free_callback + * A pointer to the function to be invoked when the pages + * underlying the virtual address range are freed + * implicitly. + * @param[in] data + * A non-NULL opaque pointer to private data to be passed to the + * callback function. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + * -ENOTSUPP if the requested operation is not supported. + * -ENOMEM if the driver failed to allocate memory or if + * insufficient resources were available to complete the operation. + * -EIO if an unknown error occurred. + */ +int nvidia_p2p_get_pages( uint64_t p2p_token, uint32_t va_space, + uint64_t virtual_address, uint64_t length, + struct nvidia_p2p_page_table **page_table, + void (*free_callback)(void *data), void *data); + +/* + * Flags to be used with persistent APIs + */ +#define NVIDIA_P2P_FLAGS_DEFAULT 0 +#define NVIDIA_P2P_FLAGS_FORCE_BAR1_MAPPING 1 + +/* + * @brief + * Pin and make the pages underlying a range of GPU virtual memory + * accessible to a third-party device. The pages will persist until + * explicitly freed by nvidia_p2p_put_pages_persistent(). + * + * Persistent GPU memory mappings are not supported on + * MIG-enabled devices and vGPU. + * + * This API only supports pinned, GPU-resident memory, such as that provided + * by cudaMalloc(). + * + * This API may sleep. + * + * @param[in] virtual_address + * The start address in the specified virtual address space. + * Address must be aligned to the 64KB boundary. + * @param[in] length + * The length of the requested P2P mapping. + * Length must be a multiple of 64KB. + * @param[out] page_table + * A pointer to an array of structures with P2P PTEs. + * @param[in] flags + * NVIDIA_P2P_FLAGS_DEFAULT: + * Default value to be used if no specific behavior is expected. + * NVIDIA_P2P_FLAGS_FORCE_BAR1_MAPPING: + * Force BAR1 mappings on certain coherent platforms, + * subject to capability and supported topology. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + * -ENOTSUPP if the requested operation is not supported. + * -ENOMEM if the driver failed to allocate memory or if + * insufficient resources were available to complete the operation. + * -EIO if an unknown error occurred. + */ +int nvidia_p2p_get_pages_persistent(uint64_t virtual_address, + uint64_t length, + struct nvidia_p2p_page_table **page_table, + uint32_t flags); + +#define NVIDIA_P2P_DMA_MAPPING_VERSION 0x00020003 + +#define NVIDIA_P2P_DMA_MAPPING_VERSION_COMPATIBLE(p) \ + NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_DMA_MAPPING_VERSION) + +struct pci_dev; + +typedef +struct nvidia_p2p_dma_mapping { + uint32_t version; + enum nvidia_p2p_page_size_type page_size_type; + uint32_t entries; + uint64_t *dma_addresses; + void *private; + struct pci_dev *pci_dev; +} nvidia_p2p_dma_mapping_t; + +/* + * @brief + * Make the physical pages retrieved using nvidia_p2p_get_pages accessible to + * a third-party device. + * + * @param[in] peer + * The struct pci_dev * of the peer device that needs to DMA to/from the + * mapping. + * @param[in] page_table + * The page table outlining the physical pages underlying the mapping, as + * retrieved with nvidia_p2p_get_pages(). + * @param[out] dma_mapping + * The DMA mapping containing the DMA addresses to use on the third-party + * device. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + * -ENOTSUPP if the requested operation is not supported. + * -EIO if an unknown error occurred. + */ +int nvidia_p2p_dma_map_pages(struct pci_dev *peer, + struct nvidia_p2p_page_table *page_table, + struct nvidia_p2p_dma_mapping **dma_mapping); + +/* + * @brief + * Unmap the physical pages previously mapped to the third-party device by + * nvidia_p2p_dma_map_pages(). + * + * @param[in] peer + * The struct pci_dev * of the peer device that the DMA mapping belongs to. + * @param[in] page_table + * The page table backing the DMA mapping to be unmapped. + * @param[in] dma_mapping + * The DMA mapping containing the DMA addresses used by the third-party + * device, as retrieved with nvidia_p2p_dma_map_pages(). After this call + * returns, neither this struct nor the addresses contained within will be + * valid for use by the third-party device. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + * -EIO if an unknown error occurred. + */ +int nvidia_p2p_dma_unmap_pages(struct pci_dev *peer, + struct nvidia_p2p_page_table *page_table, + struct nvidia_p2p_dma_mapping *dma_mapping); + +/* + * @brief + * Release a set of pages previously made accessible to + * a third-party device. + * + * This API may sleep. + * + * @param[in] p2p_token + * A token that uniquely identifies the P2P mapping. + * @param[in] va_space + * A GPU virtual address space qualifier. + * @param[in] virtual_address + * The start address in the specified virtual address space. + * @param[in] page_table + * A pointer to the array of structures with P2P PTEs. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + * -EIO if an unknown error occurred. + */ +int nvidia_p2p_put_pages(uint64_t p2p_token, + uint32_t va_space, uint64_t virtual_address, + struct nvidia_p2p_page_table *page_table); + +/* + * @brief + * Release a set of persistent pages previously made accessible to + * a third-party device. + * + * This API may sleep. + * + * @param[in] virtual_address + * The start address in the specified virtual address space. + * @param[in] page_table + * A pointer to the array of structures with P2P PTEs. + * @param[in] flags + * Must be set to zero for now. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + * -EIO if an unknown error occurred. + */ +int nvidia_p2p_put_pages_persistent(uint64_t virtual_address, + struct nvidia_p2p_page_table *page_table, + uint32_t flags); + +/* + * @brief + * Free a third-party P2P page table. (This function is a no-op.) + * + * @param[in] page_table + * A pointer to the array of structures with P2P PTEs. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + */ +int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table); + +/* + * @brief + * Free a third-party P2P DMA mapping. (This function is a no-op.) + * + * @param[in] dma_mapping + * A pointer to the DMA mapping structure. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + */ +int nvidia_p2p_free_dma_mapping(struct nvidia_p2p_dma_mapping *dma_mapping); + +#define NVIDIA_P2P_RSYNC_DRIVER_VERSION 0x00010001 + +#define NVIDIA_P2P_RSYNC_DRIVER_VERSION_COMPATIBLE(p) \ + NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_RSYNC_DRIVER_VERSION) + +typedef +struct nvidia_p2p_rsync_driver { + uint32_t version; + int (*get_relaxed_ordering_mode)(int *mode, void *data); + void (*put_relaxed_ordering_mode)(int mode, void *data); + void (*wait_for_rsync)(struct pci_dev *gpu, void *data); +} nvidia_p2p_rsync_driver_t; + +/* + * @brief + * Registers the rsync driver. + * + * @param[in] driver + * A pointer to the rsync driver structure. The NVIDIA driver would use, + * + * get_relaxed_ordering_mode to obtain a reference to the current relaxed + * ordering mode (treated as a boolean) from the rsync driver. + * + * put_relaxed_ordering_mode to release a reference to the current relaxed + * ordering mode back to the rsync driver. The NVIDIA driver will call this + * function once for each successful call to get_relaxed_ordering_mode, and + * the relaxed ordering mode must not change until the last reference is + * released. + * + * wait_for_rsync to call into the rsync module to issue RSYNC. This callback + * can't sleep or re-schedule as it may arrive under spinlocks. + * @param[in] data + * A pointer to the rsync driver's private data. + * + * @Returns + * 0 upon successful completion. + * -EINVAL parameters are incorrect. + * -EBUSY if a module is already registered or GPU devices are in use. + */ +int nvidia_p2p_register_rsync_driver(nvidia_p2p_rsync_driver_t *driver, + void *data); + +/* + * @brief + * Unregisters the rsync driver. + * + * @param[in] driver + * A pointer to the rsync driver structure. + * @param[in] data + * A pointer to the rsync driver's private data. + */ +void nvidia_p2p_unregister_rsync_driver(nvidia_p2p_rsync_driver_t *driver, + void *data); + +#define NVIDIA_P2P_RSYNC_REG_INFO_VERSION 0x00020001 + +#define NVIDIA_P2P_RSYNC_REG_INFO_VERSION_COMPATIBLE(p) \ + NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_RSYNC_REG_INFO_VERSION) + +typedef struct nvidia_p2p_rsync_reg { + void *ptr; + size_t size; + struct pci_dev *ibmnpu; + struct pci_dev *gpu; + uint32_t cluster_id; + uint32_t socket_id; +} nvidia_p2p_rsync_reg_t; + +typedef struct nvidia_p2p_rsync_reg_info { + uint32_t version; + nvidia_p2p_rsync_reg_t *regs; + size_t entries; +} nvidia_p2p_rsync_reg_info_t; + +/* + * @brief + * This interface is no longer supported and will always return an error. It + * is left in place (for now) to allow third-party callers to build without + * any errors. + * + * @Returns + * -ENODEV + */ +int nvidia_p2p_get_rsync_registers(nvidia_p2p_rsync_reg_info_t **reg_info); + +/* + * @brief + * This interface is no longer supported. It is left in place (for now) to + * allow third-party callers to build without any errors. + */ +void nvidia_p2p_put_rsync_registers(nvidia_p2p_rsync_reg_info_t *reg_info); + +#endif /* _NV_P2P_H_ */ diff --git a/kernel-open/nvidia/nv-pat.c b/kernel-open/nvidia/nv-pat.c new file mode 100644 index 0000000..870dedb --- /dev/null +++ b/kernel-open/nvidia/nv-pat.c @@ -0,0 +1,405 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" +#include "nv-reg.h" +#include "nv-pat.h" + +int nv_pat_mode = NV_PAT_MODE_DISABLED; + +#if defined(NV_ENABLE_PAT_SUPPORT) +/* + * Private PAT support for use by the NVIDIA driver. This is used on + * kernels that do not modify the PAT to include a write-combining + * entry. + * + * On kernels that have CONFIG_X86_PAT, the NVIDIA driver still checks that the + * WC entry is as expected before using PAT. + */ + +#if defined(CONFIG_X86_PAT) +#define NV_ENABLE_BUILTIN_PAT_SUPPORT 0 +#else +#define NV_ENABLE_BUILTIN_PAT_SUPPORT 1 +#endif + + +#define NV_READ_PAT_ENTRIES(pat1, pat2) rdmsr(0x277, (pat1), (pat2)) +#define NV_WRITE_PAT_ENTRIES(pat1, pat2) wrmsr(0x277, (pat1), (pat2)) +#define NV_PAT_ENTRY(pat, index) \ + (((pat) & (0xff << ((index)*8))) >> ((index)*8)) + +#if NV_ENABLE_BUILTIN_PAT_SUPPORT + +static unsigned long orig_pat1, orig_pat2; + +static inline void nv_disable_caches(unsigned long *cr4) +{ + unsigned long cr0 = read_cr0(); + write_cr0(((cr0 & (0xdfffffff)) | 0x40000000)); + wbinvd(); + *cr4 = __read_cr4(); + if (*cr4 & 0x80) __write_cr4(*cr4 & ~0x80); + __flush_tlb(); +} + +static inline void nv_enable_caches(unsigned long cr4) +{ + unsigned long cr0 = read_cr0(); + wbinvd(); + __flush_tlb(); + write_cr0((cr0 & 0x9fffffff)); + if (cr4 & 0x80) __write_cr4(cr4); +} + +static void nv_setup_pat_entries(void *info) +{ + unsigned long pat1, pat2, cr4; + unsigned long eflags; + +#if defined(NV_ENABLE_HOTPLUG_CPU) + int cpu = (NvUPtr)info; + if ((cpu != 0) && (cpu != (int)smp_processor_id())) + return; +#endif + + NV_SAVE_FLAGS(eflags); + NV_CLI(); + nv_disable_caches(&cr4); + + NV_READ_PAT_ENTRIES(pat1, pat2); + + pat1 &= 0xffff00ff; + pat1 |= 0x00000100; + + NV_WRITE_PAT_ENTRIES(pat1, pat2); + + nv_enable_caches(cr4); + NV_RESTORE_FLAGS(eflags); +} + +static void nv_restore_pat_entries(void *info) +{ + unsigned long cr4; + unsigned long eflags; + +#if defined(NV_ENABLE_HOTPLUG_CPU) + int cpu = (NvUPtr)info; + if ((cpu != 0) && (cpu != (int)smp_processor_id())) + return; +#endif + + NV_SAVE_FLAGS(eflags); + NV_CLI(); + nv_disable_caches(&cr4); + + NV_WRITE_PAT_ENTRIES(orig_pat1, orig_pat2); + + nv_enable_caches(cr4); + NV_RESTORE_FLAGS(eflags); +} + +static int +nvidia_cpu_teardown(unsigned int cpu) +{ +#if defined(NV_ENABLE_HOTPLUG_CPU) + unsigned int this_cpu = get_cpu(); + + if (this_cpu == cpu) + nv_restore_pat_entries(NULL); + else + smp_call_function(nv_restore_pat_entries, &cpu, 1); + + put_cpu(); +#endif + return 0; +} + +static int +nvidia_cpu_online(unsigned int cpu) +{ +#if defined(NV_ENABLE_HOTPLUG_CPU) + unsigned int this_cpu = get_cpu(); + + if (this_cpu == cpu) + nv_setup_pat_entries(NULL); + else + smp_call_function(nv_setup_pat_entries, &cpu, 1); + + put_cpu(); +#endif + return 0; +} + +static int nv_enable_builtin_pat_support(void) +{ + unsigned long pat1, pat2; + + NV_READ_PAT_ENTRIES(orig_pat1, orig_pat2); + nv_printf(NV_DBG_SETUP, "saved orig pats as 0x%lx 0x%lx\n", orig_pat1, orig_pat2); + + on_each_cpu(nv_setup_pat_entries, NULL, 1); + + NV_READ_PAT_ENTRIES(pat1, pat2); + nv_printf(NV_DBG_SETUP, "changed pats to 0x%lx 0x%lx\n", pat1, pat2); + return 1; +} + +static void nv_disable_builtin_pat_support(void) +{ + unsigned long pat1, pat2; + + on_each_cpu(nv_restore_pat_entries, NULL, 1); + + nv_pat_mode = NV_PAT_MODE_DISABLED; + + NV_READ_PAT_ENTRIES(pat1, pat2); + nv_printf(NV_DBG_SETUP, "restored orig pats as 0x%lx 0x%lx\n", pat1, pat2); +} + +static int +nvidia_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) +{ +/* CPU_DOWN_FAILED was added by the following commit + * 2004 Oct 18: 71da3667be80d30121df3972caa0bf5684228379 + * + * CPU_DOWN_PREPARE was added by the following commit + * 2004 Oct 18: d13d28de21d913aacd3c91e76e307fa2eb7835d8 + * + * We use one ifdef for both macros since they were added on the same day. + */ +#if defined(CPU_DOWN_FAILED) + switch (action) + { + case CPU_DOWN_FAILED: + case CPU_ONLINE: + nvidia_cpu_online((NvUPtr)hcpu); + break; + case CPU_DOWN_PREPARE: + nvidia_cpu_teardown((NvUPtr)hcpu); + break; + } +#endif + return NOTIFY_OK; +} + +static enum cpuhp_state nvidia_pat_online; + +static int +nvidia_register_cpu_hotplug_notifier(void) +{ + int ret; + + /* + * cpuhp_setup_state() returns positive number on success when state is + * CPUHP_AP_ONLINE_DYN. On failure, it returns a negative number. + */ + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "nvidia/pat:online", + nvidia_cpu_online, + nvidia_cpu_teardown); + if (ret < 0) + { + /* + * If cpuhp_setup_state() fails, the cpuhp_remove_state() + * should never be called. If it gets called, we might remove + * some other state. Hence, explicitly set + * nvidia_pat_online to zero. This will trigger a BUG() + * in cpuhp_remove_state(). + */ + nvidia_pat_online = 0; + } + else + { + nvidia_pat_online = ret; + } + + if (ret < 0) + { + nv_disable_pat_support(); + nv_printf(NV_DBG_ERRORS, + "NVRM: CPU hotplug notifier registration failed!\n"); + return -EIO; + } + return 0; +} + +static void +nvidia_unregister_cpu_hotplug_notifier(void) +{ + cpuhp_remove_state(nvidia_pat_online); +} + + +#else /* NV_ENABLE_BUILTIN_PAT_SUPPORT */ + +static int nv_enable_builtin_pat_support(void) +{ + return 0; +} +static void nv_disable_builtin_pat_support(void) +{ +} +static int nvidia_register_cpu_hotplug_notifier(void) +{ + return -EIO; +} +static void nvidia_unregister_cpu_hotplug_notifier(void) +{ +} + +#endif /* NV_ENABLE_BUILTIN_PAT_SUPPORT */ + +static int nv_determine_pat_mode(void) +{ + unsigned int pat1, pat2, i; + NvU8 PAT_WC_index; + + if (!test_bit(X86_FEATURE_PAT, + (volatile unsigned long *)&boot_cpu_data.x86_capability)) + { + if ((boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) || + (boot_cpu_data.cpuid_level < 1) || + ((cpuid_edx(1) & (1 << 16)) == 0) || + (boot_cpu_data.x86 != 6) || (boot_cpu_data.x86_model >= 15)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: CPU does not support the PAT.\n"); + return NV_PAT_MODE_DISABLED; + } + } + + NV_READ_PAT_ENTRIES(pat1, pat2); + PAT_WC_index = 0xf; + + for (i = 0; i < 4; i++) + { + if (NV_PAT_ENTRY(pat1, i) == 0x01) + { + PAT_WC_index = i; + break; + } + + if (NV_PAT_ENTRY(pat2, i) == 0x01) + { + PAT_WC_index = (i + 4); + break; + } + } + + if (PAT_WC_index == 1) + { + return NV_PAT_MODE_KERNEL; + } + else if (PAT_WC_index != 0xf) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: PAT configuration unsupported.\n"); + return NV_PAT_MODE_DISABLED; + } + else + { +#if NV_ENABLE_BUILTIN_PAT_SUPPORT + return NV_PAT_MODE_BUILTIN; +#else + return NV_PAT_MODE_DISABLED; +#endif /* NV_ENABLE_BUILTIN_PAT_SUPPORT */ + } +} + + +int nv_enable_pat_support(void) +{ + if (nv_pat_mode != NV_PAT_MODE_DISABLED) + return 1; + + nv_pat_mode = nv_determine_pat_mode(); + + switch (nv_pat_mode) + { + case NV_PAT_MODE_DISABLED: + /* avoid the PAT if unavailable/unusable */ + return 0; + case NV_PAT_MODE_KERNEL: + /* inherit the kernel's PAT layout */ + return 1; + case NV_PAT_MODE_BUILTIN: + /* use builtin code to modify the PAT layout */ + break; + } + + return nv_enable_builtin_pat_support(); +} + +void nv_disable_pat_support(void) +{ + if (nv_pat_mode != NV_PAT_MODE_BUILTIN) + return; + + nv_disable_builtin_pat_support(); +} + +int nv_init_pat_support(nvidia_stack_t *sp) +{ + NV_STATUS status; + NvU32 data; + int disable_pat = 0; + int ret = 0; + + status = rm_read_registry_dword(sp, NULL, + NV_USE_PAGE_ATTRIBUTE_TABLE, &data); + if ((status == NV_OK) && ((int)data != ~0)) + { + disable_pat = (data == 0); + } + + if (!disable_pat) + { + nv_enable_pat_support(); + if (nv_pat_mode == NV_PAT_MODE_BUILTIN) + { + ret = nvidia_register_cpu_hotplug_notifier(); + return ret; + } + } + else + { + nv_printf(NV_DBG_ERRORS, + "NVRM: builtin PAT support disabled.\n"); + } + + return 0; +} + +void nv_teardown_pat_support(void) +{ + if (nv_pat_mode == NV_PAT_MODE_BUILTIN) + { + nv_disable_pat_support(); + nvidia_unregister_cpu_hotplug_notifier(); + } +} +#endif /* defined(NV_ENABLE_PAT_SUPPORT) */ diff --git a/kernel-open/nvidia/nv-pat.h b/kernel-open/nvidia/nv-pat.h new file mode 100644 index 0000000..0d26a84 --- /dev/null +++ b/kernel-open/nvidia/nv-pat.h @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _NV_PAT_H_ +#define _NV_PAT_H_ + +#include "nv-linux.h" + + +#if defined(NV_ENABLE_PAT_SUPPORT) +extern int nv_init_pat_support(nvidia_stack_t *sp); +extern void nv_teardown_pat_support(void); +extern int nv_enable_pat_support(void); +extern void nv_disable_pat_support(void); +#else +static inline int nv_init_pat_support(nvidia_stack_t *sp) +{ + (void)sp; + return 0; +} + +static inline void nv_teardown_pat_support(void) +{ + return; +} + +static inline int nv_enable_pat_support(void) +{ + return 1; +} + +static inline void nv_disable_pat_support(void) +{ + return; +} +#endif + +#endif /* _NV_PAT_H_ */ diff --git a/kernel-open/nvidia/nv-pci-table.c b/kernel-open/nvidia/nv-pci-table.c new file mode 100644 index 0000000..ac730d5 --- /dev/null +++ b/kernel-open/nvidia/nv-pci-table.c @@ -0,0 +1,90 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +#include "nv-pci-table.h" +#include "cpuopsys.h" + +#if defined(NV_BSD) +/* Define PCI classes that FreeBSD's linuxkpi is missing */ +#define PCI_VENDOR_ID_NVIDIA 0x10de +#define PCI_CLASS_DISPLAY_VGA 0x0300 +#define PCI_CLASS_DISPLAY_3D 0x0302 +#define PCI_CLASS_BRIDGE_OTHER 0x0680 +#endif + +/* Devices supported by RM */ +struct pci_device_id nv_pci_table[] = { + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_VGA << 8), + .class_mask = ~0 + }, + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_3D << 8), + .class_mask = ~0 + }, + { } +}; + +/* Devices supported by all drivers in nvidia.ko */ +struct pci_device_id nv_module_device_table[4] = { + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_VGA << 8), + .class_mask = ~0 + }, + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_3D << 8), + .class_mask = ~0 + }, + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_BRIDGE_OTHER << 8), + .class_mask = ~0 + }, + { } +}; + +#if defined(NV_LINUX) +MODULE_DEVICE_TABLE(pci, nv_module_device_table); +#endif diff --git a/kernel-open/nvidia/nv-pci-table.h b/kernel-open/nvidia/nv-pci-table.h new file mode 100644 index 0000000..25daaf4 --- /dev/null +++ b/kernel-open/nvidia/nv-pci-table.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_PCI_TABLE_H_ +#define _NV_PCI_TABLE_H_ + +#include + +extern struct pci_device_id nv_pci_table[]; +extern struct pci_device_id nv_module_device_table[4]; + +#endif /* _NV_PCI_TABLE_H_ */ diff --git a/kernel-open/nvidia/nv-pci.c b/kernel-open/nvidia/nv-pci.c new file mode 100644 index 0000000..9c4d042 --- /dev/null +++ b/kernel-open/nvidia/nv-pci.c @@ -0,0 +1,2608 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-pci-table.h" +#include "nv-pci-types.h" +#include "nv-pci.h" +#include "nv-msi.h" +#include "nv-hypervisor.h" +#include "nv-reg.h" + +#if defined(NV_VGPU_KVM_BUILD) +#include "nv-vgpu-vfio-interface.h" +#endif +#include + +#include +#include +#include +#include +#include + +#if defined(CONFIG_PM_DEVFREQ) +#include + +#if defined(CONFIG_DEVFREQ_THERMAL) \ + && defined(NV_DEVFREQ_DEV_PROFILE_HAS_IS_COOLING_DEVICE) \ + && defined(NV_THERMAL_ZONE_FOR_EACH_TRIP_PRESENT) \ + && defined(NV_THERMAL_BIND_CDEV_TO_TRIP_PRESENT) \ + && defined(NV_THERMAL_UNBIND_CDEV_FROM_TRIP_PRESENT) +#include +#define NV_HAS_COOLING_SUPPORTED 1 +#else +#define NV_HAS_COOLING_SUPPORTED 0 +#endif + +#endif + +#if defined(CONFIG_INTERCONNECT) \ + && defined(NV_ICC_SET_BW_PRESENT) \ + && defined(NV_DEVM_ICC_GET_PRESENT) +#include +#define NV_HAS_ICC_SUPPORTED 1 +#else +#define NV_HAS_ICC_SUPPORTED 0 +#endif + +#if defined(NV_SEQ_READ_ITER_PRESENT) +#include +#include +#endif + +#include "detect-self-hosted.h" + +#if !defined(NV_BUS_TYPE_HAS_IOMMU_OPS) +#include +#endif +#if NV_IS_EXPORT_SYMBOL_GPL_pci_ats_supported +#include +#endif + +extern int NVreg_GrdmaPciTopoCheckOverride; + +static void +nv_check_and_exclude_gpu( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + char *uuid_str; + + uuid_str = rm_get_gpu_uuid(sp, nv); + if (uuid_str == NULL) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "Unable to read UUID"); + return; + } + + if (nv_is_uuid_in_gpu_exclusion_list(uuid_str)) + { + NV_STATUS rm_status = rm_exclude_adapter(sp, nv); + if (rm_status != NV_OK) + { + NV_DEV_PRINTF_STATUS(NV_DBG_ERRORS, nv, rm_status, + "Failed to exclude GPU %s", uuid_str); + goto done; + } + nv->flags |= NV_FLAG_EXCLUDE; + NV_DEV_PRINTF(NV_DBG_INFO, nv, "Excluded GPU %s successfully\n", + uuid_str); + } + +done: + os_free_mem(uuid_str); +} + +static NvBool nv_treat_missing_irq_as_error(void) +{ +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) + return (nv_get_hypervisor_type() != OS_HYPERVISOR_HYPERV); +#else + return NV_TRUE; +#endif +} + +static void nv_get_pci_sysfs_config +( + struct pci_dev *pci_dev, + nv_linux_state_t *nvl +) +{ +#if NV_FILESYSTEM_ACCESS_AVAILABLE + char filename[50]; + int ret; + + ret = snprintf(filename, sizeof(filename), + "/sys/bus/pci/devices/%04x:%02x:%02x.0/config", + NV_PCI_DOMAIN_NUMBER(pci_dev), + NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev)); + if (ret > 0 && ret < sizeof(filename)) + { + struct file *file = filp_open(filename, O_RDONLY, 0); + if (!IS_ERR(file)) + { +#if defined(NV_SEQ_READ_ITER_PRESENT) + /* + * Sanity check for confirming if file path is mounted over + * sysfs file system. + */ + if ((file->f_inode != NULL) && (file->f_inode->i_sb != NULL) && + (strcmp(file->f_inode->i_sb->s_id, "sysfs") == 0)) + { + struct seq_file *sf = file->private_data; + + /* + * Sanity check for confirming if 'file->private_data' + * actually points to 'struct seq_file'. + */ + if ((sf != NULL) && (sf->file == file) && (sf->op == NULL)) + { + struct kernfs_open_file *of = sf->private; + + /* + * Sanity check for confirming if 'sf->private' + * actually points to 'struct kernfs_open_file'. + */ + if ((of != NULL) && (of->file == file) && + (of->seq_file == sf)) + { + nvl->sysfs_config_file = file; + } + } + } + + if (nvl->sysfs_config_file == NULL) + { + filp_close(file, NULL); + } +#else + nvl->sysfs_config_file = file; +#endif + } + } +#endif +} + +static int nv_resize_pcie_bars(struct pci_dev *pci_dev) { +#if defined(NV_PCI_REBAR_GET_POSSIBLE_SIZES_PRESENT) + u16 cmd; + int r, old_size, requested_size; + unsigned long sizes; + int ret = 0; +#if NV_IS_EXPORT_SYMBOL_PRESENT_pci_find_host_bridge + struct pci_host_bridge *host; +#endif + + if (NVreg_EnableResizableBar == 0) + { + nv_printf(NV_DBG_INFO, "NVRM: resizable BAR disabled by regkey, skipping\n"); + return 0; + } + + // Check if BAR1 has PCIe rebar capabilities + sizes = pci_rebar_get_possible_sizes(pci_dev, NV_GPU_BAR1); + if (sizes == 0) { + /* ReBAR not available. Nothing to do. */ + return 0; + } + + /* Try to resize the BAR to the largest supported size */ + requested_size = fls(sizes) - 1; + + /* Save the current size, just in case things go wrong */ + old_size = pci_rebar_bytes_to_size(pci_resource_len(pci_dev, NV_GPU_BAR1)); + + if (old_size == requested_size) { + nv_printf(NV_DBG_INFO, "NVRM: %04x:%02x:%02x.%x: BAR1 already at requested size.\n", + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); + return 0; + } +#if NV_IS_EXPORT_SYMBOL_PRESENT_pci_find_host_bridge + /* If the kernel will refuse us, don't even try to resize, + but give an informative error */ + host = pci_find_host_bridge(pci_dev->bus); + if (host->preserve_config) { + nv_printf(NV_DBG_INFO, "NVRM: Not resizing BAR because the firmware forbids moving windows.\n"); + return 0; + } +#endif + nv_printf(NV_DBG_INFO, "NVRM: %04x:%02x:%02x.%x: Attempting to resize BAR1.\n", + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); + + /* Disable memory decoding - required by the kernel APIs */ + pci_read_config_word(pci_dev, PCI_COMMAND, &cmd); + pci_write_config_word(pci_dev, PCI_COMMAND, cmd & ~PCI_COMMAND_MEMORY); + + /* Release BAR1 */ + pci_release_resource(pci_dev, NV_GPU_BAR1); + + /* Release BAR3 - we don't want to resize it, it's in the same bridge, so we'll want to move it */ + pci_release_resource(pci_dev, NV_GPU_BAR3); + +resize: + /* Attempt to resize BAR1 to the largest supported size */ + r = pci_resize_resource(pci_dev, NV_GPU_BAR1, requested_size); + + if (r) { + if (r == -ENOSPC) + { + /* step through smaller sizes down to original size */ + if (requested_size > old_size) + { + clear_bit(fls(sizes) - 1, &sizes); + requested_size = fls(sizes) - 1; + goto resize; + } + else + { + nv_printf(NV_DBG_ERRORS, "NVRM: No address space to allocate resized BAR1.\n"); + } + } + else if (r == -EOPNOTSUPP) + { + nv_printf(NV_DBG_WARNINGS, "NVRM: BAR resize resource not supported.\n"); + } + else + { + nv_printf(NV_DBG_WARNINGS, "NVRM: BAR resizing failed with error `%d`.\n", r); + } + } + + /* Re-attempt assignment of PCIe resources */ + pci_assign_unassigned_bus_resources(pci_dev->bus); + + if ((pci_resource_flags(pci_dev, NV_GPU_BAR1) & IORESOURCE_UNSET) || + (pci_resource_flags(pci_dev, NV_GPU_BAR3) & IORESOURCE_UNSET)) { + if (requested_size != old_size) { + /* Try to get the BAR back with the original size */ + requested_size = old_size; + goto resize; + } + /* Something went horribly wrong and the kernel didn't manage to re-allocate BAR1. + This is unlikely (because we had space before), but can happen. */ + nv_printf(NV_DBG_ERRORS, "NVRM: FATAL: Failed to re-allocate BAR1.\n"); + ret = -ENODEV; + } + + /* Re-enable memory decoding */ + pci_write_config_word(pci_dev, PCI_COMMAND, cmd); + + return ret; +#else + nv_printf(NV_DBG_INFO, "NVRM: Resizable BAR is not supported on this kernel version.\n"); + return 0; +#endif /* NV_PCI_REBAR_GET_POSSIBLE_SIZES_PRESENT */ +} + +#if defined(CONFIG_ACPI_NUMA) && NV_IS_EXPORT_SYMBOL_PRESENT_pxm_to_node +/* + * Parse the SRAT table to look for numa node associated with the GPU. + * + * find_gpu_numa_nodes_in_srat() is strongly associated with + * nv_init_coherent_link_info(). Hence matching the conditions wrapping. + */ +static NvU32 find_gpu_numa_nodes_in_srat(nv_linux_state_t *nvl) +{ + NvU32 gi_dbdf, dev_dbdf, pxm_count = 0; + struct acpi_table_header *table_header; + struct acpi_subtable_header *subtable_header; + unsigned long table_end, subtable_header_length; + struct acpi_srat_generic_affinity *gi; + NvU32 numa_node = NUMA_NO_NODE; + + if (NV_PCI_DEVFN(nvl->pci_dev) != 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Failing to parse SRAT GI for %04x:%02x:%02x.%x " + "since non-zero device function is not supported.\n", + NV_PCI_DOMAIN_NUMBER(nvl->pci_dev), NV_PCI_BUS_NUMBER(nvl->pci_dev), + NV_PCI_SLOT_NUMBER(nvl->pci_dev), PCI_FUNC(nvl->pci_dev->devfn)); + return 0; + } + + if (acpi_get_table(ACPI_SIG_SRAT, 0, &table_header)) { + nv_printf(NV_DBG_INFO, "NVRM: Failed to parse the SRAT table.\n"); + return 0; + } + + table_end = (unsigned long)table_header + table_header->length; + subtable_header = (struct acpi_subtable_header *) + ((unsigned long)table_header + sizeof(struct acpi_table_srat)); + subtable_header_length = subtable_header->length; + + dev_dbdf = NV_PCI_DOMAIN_NUMBER(nvl->pci_dev) << 16 | + NV_PCI_BUS_NUMBER(nvl->pci_dev) << 8 | + NV_PCI_DEVFN(nvl->pci_dev); + + /* + * On baremetal and passthrough, there could be upto 8 generic initiators. + * This is not a hack as a device can have any number of initiators hardware + * supports. + */ + while (subtable_header_length && + (((unsigned long)subtable_header) + subtable_header_length <= table_end)) { + + if (subtable_header->type == ACPI_SRAT_TYPE_GENERIC_AFFINITY) { + NvU8 busAtByte2, busAtByte3; + gi = (struct acpi_srat_generic_affinity *) subtable_header; + busAtByte2 = gi->device_handle[2]; + busAtByte3 = gi->device_handle[3]; + + // Device and function should be zero enforced by above check + gi_dbdf = *((NvU16 *)(&gi->device_handle[0])) << 16 | + (busAtByte2 != 0 ? busAtByte2 : busAtByte3) << 8; + + if (gi_dbdf == dev_dbdf) { + numa_node = pxm_to_node(gi->proximity_domain); + if (numa_node < MAX_NUMNODES) { + pxm_count++; + set_bit(numa_node, nvl->coherent_link_info.free_node_bitmap); + } + else { + /* We shouldn't be here. This is a mis-configuration. */ + nv_printf(NV_DBG_INFO, "NVRM: Invalid node-id found.\n"); + pxm_count = 0; + goto exit; + } + nv_printf(NV_DBG_INFO, + "NVRM: matching SRAT GI entry: 0x%x 0x%x 0x%x 0x%x PXM: %d\n", + gi->device_handle[3], + gi->device_handle[2], + gi->device_handle[1], + gi->device_handle[0], + gi->proximity_domain); + if ((busAtByte2) == 0 && + (busAtByte3) != 0) + { + /* + * TODO: Remove this WAR once Hypervisor stack is updated + * to fix this bug and after all CSPs have moved to using + * the updated Hypervisor stack with fix. + */ + nv_printf(NV_DBG_WARNINGS, + "NVRM: PCIe bus value picked from byte 3 offset in SRAT GI entry: 0x%x 0x%x 0x%x 0x%x PXM: %d\n" + "NVRM: Hypervisor stack is old and not following ACPI spec defined offset.\n" + "NVRM: Please consider upgrading the Hypervisor stack as this workaround will be removed in future release.\n", + gi->device_handle[3], + gi->device_handle[2], + gi->device_handle[1], + gi->device_handle[0], + gi->proximity_domain); + } + } + } + + subtable_header = (struct acpi_subtable_header *) + ((unsigned long) subtable_header + subtable_header_length); + subtable_header_length = subtable_header->length; + } + +exit: + acpi_put_table(table_header); + return pxm_count; +} +#endif // defined(CONFIG_ACPI_NUMA) && NV_IS_EXPORT_SYMBOL_PRESENT_pxm_to_node + +static void +nv_init_coherent_link_info +( + nv_state_t *nv +) +{ +#if defined(CONFIG_ACPI_NUMA) && NV_IS_EXPORT_SYMBOL_PRESENT_pxm_to_node + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvU64 pa = 0; + NvU64 pxm_start = 0; + NvU64 pxm_count = 0; + NvU32 pxm; + NvU32 gi_found = 0, node; + + if (!NVCPU_IS_AARCH64) + return; + + if (!dev_is_pci(nvl->dev)) + return; + + gi_found = find_gpu_numa_nodes_in_srat(nvl); + + if (!gi_found && + (device_property_read_u64(nvl->dev, "nvidia,gpu-mem-pxm-start", &pxm_start) != 0 || + device_property_read_u64(nvl->dev, "nvidia,gpu-mem-pxm-count", &pxm_count) != 0)) + goto failed; + + if (device_property_read_u64(nvl->dev, "nvidia,gpu-mem-base-pa", &pa) == 0) + { + nvl->coherent_link_info.gpu_mem_pa = pa; + } + else + { + unsigned int gpu_bar1_offset, gpu_bar2_offset; + + /* + * This implies that the DSD key for PXM start and count is present + * while the one for Physical Address (PA) is absent. + */ + if (nv_get_hypervisor_type() == OS_HYPERVISOR_UNKNOWN) + { + /* Fail for the baremetal case */ + goto failed; + } + + /* + * For the virtualization usecase on SHH, the coherent GPU memory + * PA is exposed as BAR2 to the VM and the "nvidia,gpu-mem-base-pa" + * is not present. Set the GPU memory PA to the BAR2 start address. + * + * In the case of passthrough, reserved memory portion of the coherent + * GPU memory is exposed as BAR1 + */ + + /* + * Hopper+ uses 64-bit BARs, so GPU BAR2 should be at BAR4/5 and + * GPU BAR1 is at BAR2/3 + */ + gpu_bar1_offset = 2; + gpu_bar2_offset = 4; + + /* + * cannot use nv->bars[] here as it is not populated correctly if BAR1 is + * not present but BAR2 is, even though PCIe spec allows it. Not fixing + * nv->bars[] since this is not a valid scenario with the actual HW and + * possible only with this host emulated BAR scenario. + */ + if (!((NV_PCI_RESOURCE_VALID(nvl->pci_dev, gpu_bar2_offset)) && + (NV_PCI_RESOURCE_FLAGS(nvl->pci_dev, gpu_bar2_offset) & PCI_BASE_ADDRESS_SPACE) + == PCI_BASE_ADDRESS_SPACE_MEMORY)) + { + // BAR2 contains the cacheable part of the coherent FB region and must have. + goto failed; + } + nvl->coherent_link_info.gpu_mem_pa = + NV_PCI_RESOURCE_START(nvl->pci_dev, gpu_bar2_offset); + + if ((pci_devid_is_self_hosted_hopper(nv->pci_info.device_id)) && + (NV_PCI_RESOURCE_VALID(nvl->pci_dev, gpu_bar1_offset)) && + (NV_PCI_RESOURCE_FLAGS(nvl->pci_dev, gpu_bar1_offset) & PCI_BASE_ADDRESS_SPACE) + == PCI_BASE_ADDRESS_SPACE_MEMORY) + { + // Present only in passthrough case for self-hosted hopper. + nvl->coherent_link_info.rsvd_mem_pa = NV_PCI_RESOURCE_START(nvl->pci_dev, gpu_bar1_offset); + + // + // Unset nv->bars[1] only for self-hosted Hopper as BAR1 in virtualization case + // for hopper is used to convey RM reserved memory information and doesn't contain + // the traditional GPU BAR2. Starting from Blackwell BAR1 will be the real BAR1. + // + memset(&nv->bars[1], 0, sizeof(nv->bars[1])); + } + + // + // Unset nv->bars[2] for all self-hosted systems as BAR2 in the virtualization case + // is used only to convey the coherent GPU memory information and doesn't contain + // the traditional GPU BAR2. This is to ensure the coherent FB addresses don't + // inadvertently pass the IS_FB_OFFSET or IS_IMEM_OFFSET checks. + // + memset(&nv->bars[2], 0, sizeof(nv->bars[2])); + } + + + NV_DEV_PRINTF(NV_DBG_INFO, nv, "DSD properties: \n"); + NV_DEV_PRINTF(NV_DBG_INFO, nv, "\tGPU memory PA: 0x%lx \n", + nvl->coherent_link_info.gpu_mem_pa); + if (pci_devid_is_self_hosted_hopper(nv->pci_info.device_id)) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "\tGPU reserved memory PA: 0x%lx \n", + nvl->coherent_link_info.rsvd_mem_pa); + } + + if (!gi_found) + { + for (pxm = pxm_start; pxm < (pxm_start + pxm_count); pxm++) + { + node = pxm_to_node(pxm); + if (node != NUMA_NO_NODE) + { + set_bit(node, nvl->coherent_link_info.free_node_bitmap); + } + } + } + + for (node = 0; (node = find_next_bit(nvl->coherent_link_info.free_node_bitmap, + MAX_NUMNODES, node)) != MAX_NUMNODES; node++) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "\tNVRM: GPU memory NUMA node: %u\n", node); + } + + if (NVreg_EnableUserNUMAManagement && !os_is_vgx_hyper()) + { + NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_OFFLINE); + nvl->numa_info.use_auto_online = NV_TRUE; + + if (!bitmap_empty(nvl->coherent_link_info.free_node_bitmap, MAX_NUMNODES)) + { + nvl->numa_info.node_id = find_first_bit(nvl->coherent_link_info.free_node_bitmap, MAX_NUMNODES); + } + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "GPU NUMA information: node id: %u PA: 0x%llx\n", + nvl->numa_info.node_id, nvl->coherent_link_info.gpu_mem_pa); + } + else + { + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "User-mode NUMA onlining disabled.\n"); + } + + return; + +failed: + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "Cannot get coherent link info.\n"); +#endif // defined(CONFIG_ACPI_NUMA) && NV_IS_EXPORT_SYMBOL_PRESENT_pxm_to_node + return; +} + +#if defined(CONFIG_PM_DEVFREQ) + +#define to_tegra_devfreq_dev(x) \ + container_of(x, struct nv_pci_tegra_devfreq_dev, dev) + +struct nv_pci_tegra_devfreq_data { + const char *clk_name; + const char *icc_name; + const unsigned int gpc_fuse_field; + const TEGRASOC_DEVFREQ_CLK devfreq_clk; +}; + +#if NV_HAS_COOLING_SUPPORTED +struct nv_pci_tegra_thermal_data { + const char *tz_name; + const struct thermal_trip *passive_trip; + struct list_head zones; +}; +#endif + +struct nv_pci_tegra_devfreq_dev { + TEGRASOC_DEVFREQ_CLK devfreq_clk; + int domain; + struct device dev; + struct list_head gpc_cluster; + struct list_head nvd_cluster; + struct nv_pci_tegra_devfreq_dev *gpc_master; + struct nv_pci_tegra_devfreq_dev *nvd_master; + struct clk *clk; + struct devfreq *devfreq; + bool boost_enabled; + struct delayed_work boost_disable; +#if NV_HAS_ICC_SUPPORTED + struct icc_path *icc_path; +#endif +#if NV_HAS_COOLING_SUPPORTED + struct list_head therm_zones; +#endif +}; + +static const struct nv_pci_tegra_devfreq_data gb10b_tegra_devfreq_table[] = { + { + .clk_name = "gpc0clk", + .icc_name = "gpu-write", + .gpc_fuse_field = BIT(3), + .devfreq_clk = TEGRASOC_DEVFREQ_CLK_GPC, + }, + { + .clk_name = "gpc1clk", + .icc_name = "gpu-write", + .gpc_fuse_field = BIT(4), + .devfreq_clk = TEGRASOC_DEVFREQ_CLK_GPC, + }, + { + .clk_name = "gpc2clk", + .icc_name = "gpu-write", + .gpc_fuse_field = BIT(5), + .devfreq_clk = TEGRASOC_DEVFREQ_CLK_GPC, + }, + { + .clk_name = "nvdclk", + .icc_name = "video-write", + .devfreq_clk = TEGRASOC_DEVFREQ_CLK_NVD, + }, + { + .clk_name = "sysclk" + }, + { + .clk_name = "uprocclk" + }, +}; + +static void nv_pci_gb10b_device_release(struct device *dev) +{ + ; +} + +static int +nv_pci_gb10b_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) +{ + struct pci_dev *pdev = to_pci_dev(dev->parent); + nv_linux_state_t *nvl = pci_get_drvdata(pdev); + struct nv_pci_tegra_devfreq_dev *tdev = to_tegra_devfreq_dev(dev), *tptr; + unsigned long rate; +#if NV_HAS_ICC_SUPPORTED + static const unsigned num_mss_ports = 8; + static const unsigned mss_port_bandwidth = 32; + static const unsigned gpu_bus_bandwidth = num_mss_ports * mss_port_bandwidth; + u32 kBps; +#endif + + // + // When GPU is suspended(railgated), the PM runtime suspend callback should + // suspend all devfreq devices, and devfreq cycle should not be triggered. + // + // However, users are still able to change the devfreq governor from the + // sysfs interface and indirectly invoke the update_devfreq function, which + // will further call the target callback function. + // + // Early stop the process here before clk_set_rate/clk_get_rate, since these + // calls served by BPMP will awake the GPU. + // + if (pm_runtime_suspended(&pdev->dev)) + { + return 0; + } + + clk_set_rate(tdev->clk, *freq); + *freq = clk_get_rate(tdev->clk); + +#if NV_HAS_ICC_SUPPORTED + if (tdev->icc_path != NULL) + { + kBps = Bps_to_icc(*freq * gpu_bus_bandwidth * 400 / 1000); + if (tdev->boost_enabled) + { + kBps = UINT_MAX; + } + icc_set_bw(tdev->icc_path, kBps, 0); + } +#endif + + rate = 0; + list_for_each_entry(tptr, &tdev->gpc_cluster, gpc_cluster) + { + if (tptr->gpc_master != NULL) + { + rate = max(rate, clk_get_rate(tptr->gpc_master->clk)); + } + + if (tptr->nvd_master != NULL) + { + rate = max(rate, clk_get_rate(tptr->nvd_master->clk)); + } + + if (tdev->boost_enabled + && (tptr == nvl->sys_devfreq_dev || tptr == nvl->pwr_devfreq_dev)) + { + clk_set_rate(tptr->clk, ULONG_MAX); + } + else + { + clk_set_rate(tptr->clk, rate); + } + } + + rate = 0; + list_for_each_entry(tptr, &tdev->nvd_cluster, nvd_cluster) + { + if (tptr->gpc_master != NULL) + { + rate = max(rate, clk_get_rate(tptr->gpc_master->clk)); + } + + if (tptr->nvd_master != NULL) + { + rate = max(rate, clk_get_rate(tptr->nvd_master->clk)); + } + + if (tdev->boost_enabled + && (tptr == nvl->sys_devfreq_dev || tptr == nvl->pwr_devfreq_dev)) + { + clk_set_rate(tptr->clk, ULONG_MAX); + } + else + { + clk_set_rate(tptr->clk, rate); + } + } + + return 0; +} + +static int +nv_pci_tegra_devfreq_get_cur_freq(struct device *dev, unsigned long *freq) +{ + struct nv_pci_tegra_devfreq_dev *tdev = to_tegra_devfreq_dev(dev); + + *freq = clk_get_rate(tdev->clk); + + return 0; +} + +static int +nv_pci_tegra_devfreq_get_dev_status(struct device *dev, + struct devfreq_dev_status *stat) +{ + struct pci_dev *pdev = to_pci_dev(dev->parent); + nv_linux_state_t *nvl = pci_get_drvdata(pdev); + nv_state_t *nv = NV_STATE_PTR(nvl); + nvidia_stack_t *sp = NULL; + struct nv_pci_tegra_devfreq_dev *tdev = to_tegra_devfreq_dev(dev); + unsigned int load = 0; + int retval = 0; + NV_STATUS status; + + // + // When GPU is suspended(railgated), the PM runtime suspend callback should + // suspend all devfreq devices, and devfreq cycle should not be triggered. + // + // However, users are still able to change the devfreq governor from the + // sysfs interface and indirectly invoke the update_devfreq function, which + // will further call the get_dev_status callback function. + // + if (pm_runtime_suspended(&pdev->dev)) + { + stat->total_time = 100; + stat->busy_time = 0; + stat->current_frequency = clk_get_rate(tdev->clk); + return 0; + } + + retval = nv_kmem_cache_alloc_stack(&sp); + if (retval != 0) + { + dev_warn(&pdev->dev, "fail to nv_kmem_cache_alloc_stack: %d\n", retval); + return -ENOMEM; + } + + // + // Fetch the load value in percentage from the specified clock domain. If the + // load information is unavailable, just consider the load as 100% so that the + // devfreq core will scale the underlying clock to Fmax to prevent any + // performance drop. + // + status = rm_pmu_perfmon_get_load(sp, nv, &load, tdev->devfreq_clk); + if (status != NV_OK) + { + load = 100; + } + + // Load calculation equals to (busy_time / total_time) in devfreq governors + // and devfreq governors expect total_time and busy_time in the same unit + stat->total_time = 100; + stat->busy_time = load; + stat->current_frequency = clk_get_rate(tdev->clk); + + nv_kmem_cache_free_stack(sp); + + return retval; +} + +static void +populate_opp_table(struct nv_pci_tegra_devfreq_dev *tdev) +{ + unsigned long max_rate, min_rate, step, rate; + long val; + + /* Get the max rate of the clock */ + val = clk_round_rate(tdev->clk, ULONG_MAX); + max_rate = (val < 0) ? ULONG_MAX : (unsigned long)val; + + /* Get the min rate of the clock */ + val = clk_round_rate(tdev->clk, 0); + min_rate = (val < 0) ? ULONG_MAX : (unsigned long)val; + + /* Get the step size of the clock */ + step = (min_rate == ULONG_MAX) ? ULONG_MAX : (min_rate + 1); + val = clk_round_rate(tdev->clk, step); + if ((val < 0) || (val < min_rate)) + { + step = 0; + } + else + { + step = (unsigned long)val - min_rate; + } + + /* Create the OPP table */ + rate = min_rate; + do { + dev_pm_opp_add(&tdev->dev, rate, 0); + rate += step; + } while (rate <= max_rate && step); +} + +static void +nv_pci_tegra_devfreq_remove_opps(struct nv_pci_tegra_devfreq_dev *tdev) +{ +#if defined(NV_DEVFREQ_HAS_FREQ_TABLE) + unsigned long *freq_table = tdev->devfreq->freq_table; + unsigned int max_state = tdev->devfreq->max_state; +#else + unsigned long *freq_table = tdev->devfreq->profile->freq_table; + unsigned int max_state = tdev->devfreq->profile->max_state; +#endif + int i; + + for (i = 0; i < max_state; i++) + { + dev_pm_opp_remove(&tdev->dev, freq_table[i]); + } +} + +#if NV_HAS_COOLING_SUPPORTED +static int +nv_pci_tegra_thermal_get_passive_trip_cb(struct thermal_trip *trip, void *arg) +{ + const struct thermal_trip **ptrip = arg; + + /* Return zero to continue the search */ + if (trip->type != THERMAL_TRIP_PASSIVE) + return 0; + + /* Return nonzero to terminate the search */ + *ptrip = trip; + return -1; +} + +static int +nv_pci_tegra_init_cooling_device(struct nv_pci_tegra_devfreq_dev *tdev) +{ + struct device *pdev = tdev->dev.parent; + const struct thermal_trip *passive_trip = NULL; + struct devfreq *devfreq = tdev->devfreq; + struct nv_pci_tegra_thermal_data *data; + struct thermal_zone_device *tzdev; + int i, err, val, n_strings, n_elems; + u32 temp_min, temp_max; + const char *tz_name; + + if (!devfreq->cdev) + { + nv_printf(NV_DBG_ERRORS, "NVRM: devfreq cooling cannot be found\n"); + return -ENODEV; + } + + if (!pdev->of_node) + { + nv_printf(NV_DBG_ERRORS, "NVRM: associated OF node cannot be found\n"); + return -ENODEV; + } + + val = of_property_count_strings(pdev->of_node, "nvidia,thermal-zones"); + if (val == -EINVAL) + { + return 0; + } + else if (val < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: nvidia,thermal-zones DT property format error\n"); + return val; + } + n_strings = val; + + val = of_property_count_u32_elems(pdev->of_node, "nvidia,cooling-device"); + if (val == -EINVAL) + { + return 0; + } + else if (val < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: nvidia,cooling-device DT property format error\n"); + return val; + } + n_elems = val; + + if ((n_elems >> 1) != n_strings) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: number of strings specified in nvidia,thermal-zones needs to" + "be exact half the number of elements specified nvidia,cooling-device\n"); + return -EINVAL; + } + + if (((n_elems >> 1) == 0) && ((n_elems & 1) == 1)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: number of elements specified in nvidia,cooling-device needs" + "to be an even number\n"); + return -EINVAL; + } + + for (i = 0; i < n_strings; i++) + { + data = devm_kzalloc(pdev, sizeof(*data), GFP_KERNEL); + if (data == NULL) + { + err = -ENOMEM; + goto err_nv_pci_tegra_init_cooling_device; + } + + of_property_read_string_index(pdev->of_node, + "nvidia,thermal-zones", i, &tz_name); + of_property_read_u32_index(pdev->of_node, + "nvidia,cooling-device", (i << 1) + 0, &temp_min); + of_property_read_u32_index(pdev->of_node, + "nvidia,cooling-device", (i << 1) + 1, &temp_max); + + tzdev = thermal_zone_get_zone_by_name(tz_name); + if (IS_ERR(tzdev)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: fail to get %s thermal_zone_device\n", tz_name); + err = -ENODEV; + goto err_nv_pci_tegra_init_cooling_device; + } + + thermal_zone_for_each_trip(tzdev, nv_pci_tegra_thermal_get_passive_trip_cb, &passive_trip); + if (passive_trip == NULL) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: fail to find passive_trip in %s thermal_zone_device\n", tz_name); + err = -ENODEV; + goto err_nv_pci_tegra_init_cooling_device; + } + + val = thermal_bind_cdev_to_trip(tzdev, + passive_trip, + devfreq->cdev, + temp_max, temp_min, THERMAL_WEIGHT_DEFAULT); + if (val != 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: fail to bind devfreq cooling device with %s thermal_zone_device\n", tz_name); + err = -ENODEV; + goto err_nv_pci_tegra_init_cooling_device; + } + + data->tz_name = tz_name; + data->passive_trip = passive_trip; + list_add_tail(&data->zones, &tdev->therm_zones); + } + + return 0; + +err_nv_pci_tegra_init_cooling_device: + list_for_each_entry(data, &tdev->therm_zones, zones) + { + tzdev = thermal_zone_get_zone_by_name(data->tz_name); + if (IS_ERR(tzdev)) + { + continue; + } + + thermal_unbind_cdev_from_trip(tzdev, data->passive_trip, devfreq->cdev); + } + + return err; +} +#endif + +static int +nv_pci_gb10b_add_devfreq_device(struct nv_pci_tegra_devfreq_dev *tdev) +{ + struct devfreq_dev_profile *profile; + int err; + + populate_opp_table(tdev); + + profile = devm_kzalloc(&tdev->dev, sizeof(*profile), GFP_KERNEL); + if (profile == NULL) + { + err = -ENOMEM; + goto err_nv_pci_gb10b_add_devfreq_device_opp; + } + + profile->target = nv_pci_gb10b_devfreq_target; + profile->get_cur_freq = nv_pci_tegra_devfreq_get_cur_freq; + profile->get_dev_status = nv_pci_tegra_devfreq_get_dev_status; + profile->initial_freq = clk_get_rate(tdev->clk); + profile->polling_ms = 25; +#if NV_HAS_COOLING_SUPPORTED + profile->is_cooling_device = true; +#endif + + tdev->devfreq = devm_devfreq_add_device(&tdev->dev, + profile, + DEVFREQ_GOV_PERFORMANCE, + NULL); + if (IS_ERR(tdev->devfreq)) + { + err = PTR_ERR(tdev->devfreq); + goto err_nv_pci_gb10b_add_devfreq_device_opp; + } + +#if defined(NV_DEVFREQ_HAS_SUSPEND_FREQ) + tdev->devfreq->suspend_freq = tdev->devfreq->scaling_max_freq; +#endif + +#if NV_HAS_COOLING_SUPPORTED + err = nv_pci_tegra_init_cooling_device(tdev); + if (err) + { + goto err_nv_pci_gb10b_add_devfreq_device; + } +#endif + + return 0; + +#if NV_HAS_COOLING_SUPPORTED +err_nv_pci_gb10b_add_devfreq_device: + devm_devfreq_remove_device(&tdev->dev, tdev->devfreq); +#endif +err_nv_pci_gb10b_add_devfreq_device_opp: + nv_pci_tegra_devfreq_remove_opps(tdev); + + return err; +} + +static int +nv_pci_gb10b_register_devfreq(struct pci_dev *pdev) +{ + nv_linux_state_t *nvl = pci_get_drvdata(pdev); + nv_state_t *nv = NV_STATE_PTR(nvl); + struct pci_bus *pbus = pdev->bus; + const struct nv_pci_tegra_devfreq_data *tdata; + struct nv_pci_tegra_devfreq_dev *tdev; +#if NV_HAS_ICC_SUPPORTED + struct icc_path *icc_path; +#endif + struct clk *clk; + int i, err, node; + u32 gpu_pg_mask; + + while (pbus->parent != NULL) + { + pbus = pbus->parent; + } + + node = max(0, dev_to_node(to_pci_host_bridge(pbus->bridge)->dev.parent)); + + if (nv->tegra_pci_igpu_pg_mask == NV_TEGRA_PCI_IGPU_PG_MASK_DEFAULT) + { + gpu_pg_mask = 0; + } + else + { + gpu_pg_mask = nv->tegra_pci_igpu_pg_mask; + nv_printf(NV_DBG_INFO, "NVRM: devfreq register receives gpu_pg_mask = %u\n", gpu_pg_mask); + } + + for (i = 0; i < nvl->devfreq_table_size; i++) + { + tdata = &nvl->devfreq_table[i]; + + if (gpu_pg_mask && (gpu_pg_mask & tdata->gpc_fuse_field)) + { + continue; + } + +#if NV_HAS_ICC_SUPPORTED + if (tdata->icc_name != NULL) + { + icc_path = devm_of_icc_get(&pdev->dev, tdata->icc_name); + if (IS_ERR_OR_NULL(icc_path)) + { + icc_path = NULL; + } + } +#endif + + clk = devm_clk_get(&pdev->dev, tdata->clk_name); + if (IS_ERR_OR_NULL(clk)) + { + continue; + } + + tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL); + if (tdev == NULL) + { + return -ENOMEM; + } + + INIT_LIST_HEAD(&tdev->gpc_cluster); + INIT_LIST_HEAD(&tdev->nvd_cluster); +#if NV_HAS_COOLING_SUPPORTED + INIT_LIST_HEAD(&tdev->therm_zones); +#endif +#if NV_HAS_ICC_SUPPORTED + tdev->icc_path = icc_path; +#endif + tdev->clk = clk; + tdev->domain = node; + tdev->devfreq_clk = tdata->devfreq_clk; + + tdev->dev.parent = &pdev->dev; + tdev->dev.release = nv_pci_gb10b_device_release; + dev_set_name(&tdev->dev, "%s-%d", tdata->clk_name, node); + + if (strstr(tdata->clk_name, "gpc")) + { + if (nvl->gpc_devfreq_dev != NULL) + { + list_add_tail(&tdev->gpc_cluster, &nvl->gpc_devfreq_dev->gpc_cluster); + tdev->gpc_master = nvl->gpc_devfreq_dev; + } + else + { + nvl->gpc_devfreq_dev = tdev; + dev_set_name(&tdev->dev, "gpu-gpc-%d", node); + } + } + else if (strstr(tdata->clk_name, "nvd")) + { + nvl->nvd_devfreq_dev = tdev; + dev_set_name(&tdev->dev, "gpu-nvd-%d", node); + } + else if (strstr(tdata->clk_name, "sys")) + { + nvl->sys_devfreq_dev = tdev; + dev_set_name(&tdev->dev, "gpu-sys-%d", node); + } + else if (strstr(tdata->clk_name, "uproc")) + { + nvl->pwr_devfreq_dev = tdev; + dev_set_name(&tdev->dev, "gpu-pwr-%d", node); + } + + err = device_register(&tdev->dev); + if (err != 0) + { + goto error_return; + } + } + + if (nvl->gpc_devfreq_dev != NULL) + { + err = nv_pci_gb10b_add_devfreq_device(nvl->gpc_devfreq_dev); + if (err != 0) + { + nvl->gpc_devfreq_dev->devfreq = NULL; + goto error_slave_teardown; + } + + if (nvl->sys_devfreq_dev != NULL) + { + list_add_tail(&nvl->sys_devfreq_dev->gpc_cluster, &nvl->gpc_devfreq_dev->gpc_cluster); + nvl->sys_devfreq_dev->gpc_master = nvl->gpc_devfreq_dev; + } + + if (nvl->pwr_devfreq_dev != NULL) + { + list_add_tail(&nvl->pwr_devfreq_dev->gpc_cluster, &nvl->gpc_devfreq_dev->gpc_cluster); + nvl->pwr_devfreq_dev->gpc_master = nvl->gpc_devfreq_dev; + } + } + + if (nvl->nvd_devfreq_dev != NULL) + { + err = nv_pci_gb10b_add_devfreq_device(nvl->nvd_devfreq_dev); + if (err != 0) + { + nvl->nvd_devfreq_dev->devfreq = NULL; + goto error_slave_teardown; + } + + if (nvl->sys_devfreq_dev != NULL) + { + list_add_tail(&nvl->sys_devfreq_dev->nvd_cluster, &nvl->nvd_devfreq_dev->nvd_cluster); + nvl->sys_devfreq_dev->nvd_master = nvl->nvd_devfreq_dev; + } + + if (nvl->pwr_devfreq_dev != NULL) + { + list_add_tail(&nvl->pwr_devfreq_dev->nvd_cluster, &nvl->nvd_devfreq_dev->nvd_cluster); + nvl->pwr_devfreq_dev->nvd_master = nvl->nvd_devfreq_dev; + } + } + + return 0; + +error_slave_teardown: + if (nvl->sys_devfreq_dev != NULL) + { + if (nvl->sys_devfreq_dev->gpc_master != NULL) + { + list_del(&nvl->sys_devfreq_dev->gpc_cluster); + nvl->sys_devfreq_dev->gpc_master = NULL; + } + + device_unregister(&nvl->sys_devfreq_dev->dev); + nvl->sys_devfreq_dev = NULL; + } + if (nvl->pwr_devfreq_dev != NULL) + { + if (nvl->pwr_devfreq_dev->gpc_master != NULL) + { + list_del(&nvl->pwr_devfreq_dev->gpc_cluster); + nvl->pwr_devfreq_dev->gpc_master = NULL; + } + + device_unregister(&nvl->pwr_devfreq_dev->dev); + nvl->pwr_devfreq_dev = NULL; + } +error_return: + /* The caller will call unregister to unwind on failure */ + return err; +} + +static int +nv_pci_gb10b_suspend_devfreq(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + nv_linux_state_t *nvl = pci_get_drvdata(pci_dev); + int err = 0; + + if (nvl->gpc_devfreq_dev != NULL && nvl->gpc_devfreq_dev->devfreq != NULL) + { + err = devfreq_suspend_device(nvl->gpc_devfreq_dev->devfreq); + if (err) + { + return err; + } + +#if NV_HAS_ICC_SUPPORTED + if (nvl->gpc_devfreq_dev->icc_path != NULL) + { + icc_set_bw(nvl->gpc_devfreq_dev->icc_path, 0, 0); + } +#endif + } + + if (nvl->nvd_devfreq_dev != NULL && nvl->nvd_devfreq_dev->devfreq != NULL) + { + err = devfreq_suspend_device(nvl->nvd_devfreq_dev->devfreq); + if (err) + { + return err; + } + +#if NV_HAS_ICC_SUPPORTED + if (nvl->nvd_devfreq_dev->icc_path != NULL) + { + icc_set_bw(nvl->nvd_devfreq_dev->icc_path, 0, 0); + } +#endif + } + + return err; +} + +static int +nv_pci_gb10b_resume_devfreq(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + nv_linux_state_t *nvl = pci_get_drvdata(pci_dev); + int err = 0; + + if (nvl->gpc_devfreq_dev != NULL && nvl->gpc_devfreq_dev->devfreq != NULL) + { + err = devfreq_resume_device(nvl->gpc_devfreq_dev->devfreq); + if (err) + { + return err; + } + } + + if (nvl->nvd_devfreq_dev != NULL && nvl->nvd_devfreq_dev->devfreq != NULL) + { + err = devfreq_resume_device(nvl->nvd_devfreq_dev->devfreq); + if (err) + { + return err; + } + } + + return err; +} + +static void nv_pci_devfreq_disable_boost(struct work_struct *work) +{ +#if defined(NV_UPDATE_DEVFREQ_PRESENT) + struct nv_pci_tegra_devfreq_dev *tdev; + + tdev = container_of(work, struct nv_pci_tegra_devfreq_dev, boost_disable.work); + tdev->boost_enabled = 0; +#endif +} + +static int +nv_pci_gb10b_devfreq_enable_boost(struct device *dev, unsigned int duration) +{ +#if defined(NV_UPDATE_DEVFREQ_PRESENT) + struct pci_dev *pci_dev = to_pci_dev(dev); + nv_linux_state_t *nvl = pci_get_drvdata(pci_dev); + struct nv_pci_tegra_devfreq_dev *tdev; + unsigned long delay; + + if (duration == 0) + return 0; + + delay = msecs_to_jiffies(duration * 1000); + + tdev = nvl->gpc_devfreq_dev; + if (tdev != NULL && tdev->devfreq != NULL && tdev->boost_enabled == 0) + { + tdev->boost_enabled = 1; + INIT_DELAYED_WORK(&tdev->boost_disable, nv_pci_devfreq_disable_boost); + schedule_delayed_work(&tdev->boost_disable, delay); + } + + tdev = nvl->nvd_devfreq_dev; + if (tdev != NULL && tdev->devfreq != NULL && tdev->boost_enabled == 0) + { + tdev->boost_enabled = 1; + INIT_DELAYED_WORK(&tdev->boost_disable, nv_pci_devfreq_disable_boost); + schedule_delayed_work(&tdev->boost_disable, delay); + } + + return 0; +#else // !defined(NV_UPDATE_DEVFREQ_PRESENT) + return -1; +#endif +} + +static int +nv_pci_gb10b_devfreq_disable_boost(struct device *dev) +{ +#if defined(NV_UPDATE_DEVFREQ_PRESENT) + struct pci_dev *pci_dev = to_pci_dev(dev); + nv_linux_state_t *nvl = pci_get_drvdata(pci_dev); + struct nv_pci_tegra_devfreq_dev *tdev; + + tdev = nvl->gpc_devfreq_dev; + if (tdev != NULL && tdev->devfreq != NULL && tdev->boost_enabled) + { + tdev->boost_enabled = 0; + cancel_delayed_work_sync(&tdev->boost_disable); + } + + tdev = nvl->nvd_devfreq_dev; + if (tdev != NULL && tdev->devfreq != NULL && tdev->boost_enabled) + { + tdev->boost_enabled = 0; + cancel_delayed_work_sync(&tdev->boost_disable); + } + + return 0; +#else // !defined(NV_UPDATE_DEVFREQ_PRESENT) + return -1; +#endif +} + +struct nv_pci_tegra_data { + unsigned short vendor; + unsigned short device; + const struct nv_pci_tegra_devfreq_data *devfreq_table; + unsigned int devfreq_table_size; + int (*devfreq_register)(struct pci_dev*); + int (*devfreq_suspend)(struct device*); + int (*devfreq_resume)(struct device*); + int (*devfreq_enable_boost)(struct device*, unsigned int); + int (*devfreq_disable_boost)(struct device*); +}; + +static const struct nv_pci_tegra_data nv_pci_tegra_table[] = { + { + .vendor = 0x10de, + .device = 0x2b00, + .devfreq_table = gb10b_tegra_devfreq_table, + .devfreq_table_size = ARRAY_SIZE(gb10b_tegra_devfreq_table), + .devfreq_register = nv_pci_gb10b_register_devfreq, + .devfreq_suspend = nv_pci_gb10b_suspend_devfreq, + .devfreq_resume = nv_pci_gb10b_resume_devfreq, + .devfreq_enable_boost = nv_pci_gb10b_devfreq_enable_boost, + .devfreq_disable_boost = nv_pci_gb10b_devfreq_disable_boost, + }, +}; + +static void +nv_pci_tegra_devfreq_remove(struct nv_pci_tegra_devfreq_dev *tdev) +{ + struct nv_pci_tegra_devfreq_dev *tptr, *next; +#if NV_HAS_COOLING_SUPPORTED + struct nv_pci_tegra_thermal_data *data; + struct thermal_zone_device *tzdev; +#endif + + if (tdev->devfreq != NULL) + { +#if NV_HAS_COOLING_SUPPORTED + list_for_each_entry(data, &tdev->therm_zones, zones) + { + tzdev = thermal_zone_get_zone_by_name(data->tz_name); + if (IS_ERR(tzdev)) + { + continue; + } + + thermal_unbind_cdev_from_trip(tzdev, data->passive_trip, tdev->devfreq->cdev); + } +#endif + devm_devfreq_remove_device(&tdev->dev, tdev->devfreq); + nv_pci_tegra_devfreq_remove_opps(tdev); + tdev->devfreq = NULL; + } + +#if NV_HAS_ICC_SUPPORTED + if (tdev->icc_path != NULL) + { + icc_set_bw(tdev->icc_path, 0, 0); + } +#endif + + list_for_each_entry_safe(tptr, next, &tdev->gpc_cluster, gpc_cluster) + { + if (tptr->clk != NULL) + { + devm_clk_put(tdev->dev.parent, tptr->clk); + tptr->clk = NULL; + device_unregister(&tptr->dev); + } + + list_del(&tptr->gpc_cluster); + tptr->gpc_master = NULL; + } + + list_for_each_entry_safe(tptr, next, &tdev->nvd_cluster, nvd_cluster) + { + if (tptr->clk != NULL) + { + devm_clk_put(tdev->dev.parent, tptr->clk); + tptr->clk = NULL; + device_unregister(&tptr->dev); + } + + list_del(&tptr->nvd_cluster); + tptr->nvd_master = NULL; + } + + if (tdev->clk != NULL) + { + devm_clk_put(tdev->dev.parent, tptr->clk); + tdev->clk = NULL; + device_unregister(&tdev->dev); + } +} + +static void +nv_pci_tegra_unregister_devfreq(struct pci_dev *pdev) +{ + nv_linux_state_t *nvl = pci_get_drvdata(pdev); + + if (nvl->gpc_devfreq_dev != NULL) + { + nv_pci_tegra_devfreq_remove(nvl->gpc_devfreq_dev); + nvl->gpc_devfreq_dev = NULL; + } + + if (nvl->nvd_devfreq_dev != NULL) + { + nv_pci_tegra_devfreq_remove(nvl->nvd_devfreq_dev); + nvl->nvd_devfreq_dev = NULL; + } +} + +static const struct nv_pci_tegra_data* +nv_pci_get_tegra_igpu_data(struct pci_dev *pdev) +{ + const struct nv_pci_tegra_data *tegra_data = NULL; + int i; + + for (i = 0; i < ARRAY_SIZE(nv_pci_tegra_table); i++) + { + tegra_data = &nv_pci_tegra_table[i]; + + if ((tegra_data->vendor == pdev->vendor) + && (tegra_data->device == pdev->device)) + { + return tegra_data; + } + } + + return NULL; +} + +static int +nv_pci_tegra_register_devfreq(struct pci_dev *pdev) +{ + nv_linux_state_t *nvl = pci_get_drvdata(pdev); + const struct nv_pci_tegra_data *tegra_data = NULL; + int err; + + tegra_data = nv_pci_get_tegra_igpu_data(pdev); + + if (tegra_data == NULL) + { + return 0; + } + + nvl->devfreq_table = tegra_data->devfreq_table; + nvl->devfreq_table_size = tegra_data->devfreq_table_size; + nvl->devfreq_suspend = tegra_data->devfreq_suspend; + nvl->devfreq_resume = tegra_data->devfreq_resume; + nvl->devfreq_enable_boost = tegra_data->devfreq_enable_boost; + nvl->devfreq_disable_boost = tegra_data->devfreq_disable_boost; + + err = tegra_data->devfreq_register(pdev); + if (err != 0) + { + nv_pci_tegra_unregister_devfreq(pdev); + return err; + } + + return 0; +} +#endif + +static void nv_init_dynamic_power_management +( + nvidia_stack_t *sp, + struct pci_dev *pci_dev +) +{ + nv_linux_state_t *nvl = pci_get_drvdata(pci_dev); + nv_state_t *nv = NV_STATE_PTR(nvl); + NvBool pr3_acpi_method_present = NV_FALSE; + + nvl->sysfs_config_file = NULL; + + nv_get_pci_sysfs_config(pci_dev, nvl); + + if (nv_get_hypervisor_type() != OS_HYPERVISOR_UNKNOWN) + { + pr3_acpi_method_present = nv_acpi_power_resource_method_present(pci_dev); + } + else if (pci_dev->bus && pci_dev->bus->self) + { + pr3_acpi_method_present = nv_acpi_power_resource_method_present(pci_dev->bus->self); + } + + rm_init_dynamic_power_management(sp, nv, pr3_acpi_method_present); +} + +static void nv_init_tegra_gpu_pg_mask(nvidia_stack_t *sp, struct pci_dev *pci_dev) +{ + nv_linux_state_t *nvl = pci_get_drvdata(pci_dev); + nv_state_t *nv = NV_STATE_PTR(nvl); + struct device_node *np = pci_dev->dev.of_node; + u32 gpu_pg_mask = 0; + + /* Only continue with certain Tegra PCI iGPUs */ + if (!nv->supports_tegra_igpu_rg) + { + return; + } + + nv->tegra_pci_igpu_pg_mask = NV_TEGRA_PCI_IGPU_PG_MASK_DEFAULT; + + of_property_read_u32(np, "nvidia,fuse-overrides", &gpu_pg_mask); + if (gpu_pg_mask != 0) { + nv_printf(NV_DBG_INFO, + "NVRM: nvidia,fuse-overrides parsed from device tree: 0x%x\n", gpu_pg_mask); + nv->tegra_pci_igpu_pg_mask = gpu_pg_mask; + } + + nv_set_gpu_pg_mask(nv); +} + +static NvBool +nv_pci_validate_bars(const struct pci_dev *pci_dev, NvBool only_bar0) +{ + unsigned int i, j; + NvBool last_bar_64bit = NV_FALSE; + + for (i = 0, j = 0; i < NVRM_PCICFG_NUM_BARS && j < NV_GPU_NUM_BARS; i++) + { + if (NV_PCI_RESOURCE_VALID(pci_dev, i)) + { + if ((NV_PCI_RESOURCE_FLAGS(pci_dev, i) & PCI_BASE_ADDRESS_MEM_TYPE_64) && + (NV_PCI_RESOURCE_FLAGS(pci_dev, i) & PCI_BASE_ADDRESS_MEM_PREFETCH)) + { + last_bar_64bit = NV_TRUE; + } + + // + // If we are here, then we have found a valid BAR -- 32 or 64-bit. + // + j++; + + if (only_bar0) + return NV_TRUE; + + continue; + } + + // + // If last_bar_64bit is "true" then, we are looking at the 2nd (upper) + // half of the 64-bit BAR. This is typically all 0s which looks invalid + // but it's normal and not a problem and we can ignore it and continue. + // + if (last_bar_64bit) + { + last_bar_64bit = NV_FALSE; + continue; + } + + // Invalid 32 or 64-bit BAR. + nv_printf(NV_DBG_ERRORS, + "NVRM: This PCI I/O region assigned to your NVIDIA device is invalid:\n" + "NVRM: BAR%d is %" NvU64_fmtu "M @ 0x%" NvU64_fmtx " (PCI:%04x:%02x:%02x.%x)\n", i, + (NvU64)(NV_PCI_RESOURCE_SIZE(pci_dev, i) >> 20), + (NvU64)NV_PCI_RESOURCE_START(pci_dev, i), + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); + + return NV_FALSE; + } + + return NV_TRUE; +} + +/* find nvidia devices and set initial state */ +static int +nv_pci_probe +( + struct pci_dev *pci_dev, + const struct pci_device_id *id_table +) +{ + nv_state_t *nv = NULL; + nv_linux_state_t *nvl = NULL; + unsigned int i, j; + int flags = 0; + nvidia_stack_t *sp = NULL; + NvBool prev_nv_ats_supported = nv_ats_supported; + NV_STATUS status; + NvU8 regs_bar_index = nv_bar_index_to_os_bar_index(pci_dev, + NV_GPU_BAR_INDEX_REGS); + NvBool bar0_requested = NV_FALSE; + + nv_printf(NV_DBG_SETUP, "NVRM: probing 0x%x 0x%x, class 0x%x\n", + pci_dev->vendor, pci_dev->device, pci_dev->class); + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return -1; + } + +#ifdef NV_PCI_SRIOV_SUPPORT + if (pci_dev->is_virtfn) + { +#if defined(NV_VGPU_KVM_BUILD) +#if defined(NV_BUS_TYPE_HAS_IOMMU_OPS) + if (pci_dev->dev.bus->iommu_ops == NULL) +#else + if ((pci_dev->dev.iommu != NULL) && (pci_dev->dev.iommu->iommu_dev != NULL) && + (pci_dev->dev.iommu->iommu_dev->ops == NULL)) +#endif + { + nv_printf(NV_DBG_ERRORS, "NVRM: Aborting probe for VF %04x:%02x:%02x.%x " + "since IOMMU is not present on the system.\n", + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); + goto failed; + } + + nv_kmem_cache_free_stack(sp); + return 0; +#else + nv_printf(NV_DBG_ERRORS, "NVRM: Ignoring probe for VF %04x:%02x:%02x.%x ", + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); + + goto failed; +#endif /* NV_VGPU_KVM_BUILD */ + } +#endif /* NV_PCI_SRIOV_SUPPORT */ + + if (!rm_wait_for_bar_firewall( + sp, + NV_PCI_DOMAIN_NUMBER(pci_dev), + NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), + PCI_FUNC(pci_dev->devfn), + pci_dev->device, + pci_dev->subsystem_device)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to wait for bar firewall to lower\n"); + goto failed; + } + + if (!rm_is_supported_pci_device( + (pci_dev->class >> 16) & 0xFF, + (pci_dev->class >> 8) & 0xFF, + pci_dev->vendor, + pci_dev->device, + pci_dev->subsystem_vendor, + pci_dev->subsystem_device, + NV_FALSE /* print_legacy_warning */)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: ignoring the legacy GPU %04x:%02x:%02x.%x\n", + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); + goto failed; + } + + num_probed_nv_devices++; + + if (pci_enable_device(pci_dev) != 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: pci_enable_device failed, aborting\n"); + goto failed; + } + + if ((pci_dev->irq == 0 && !pci_find_capability(pci_dev, PCI_CAP_ID_MSIX)) + && nv_treat_missing_irq_as_error()) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Can't find an IRQ for your NVIDIA card!\n"); + nv_printf(NV_DBG_ERRORS, "NVRM: Please check your BIOS settings.\n"); + nv_printf(NV_DBG_ERRORS, "NVRM: [Plug & Play OS] should be set to NO\n"); + nv_printf(NV_DBG_ERRORS, "NVRM: [Assign IRQ to VGA] should be set to YES \n"); + goto failed; + } + + // Validate if BAR0 is usable + if (!nv_pci_validate_bars(pci_dev, /* only_bar0 = */ NV_TRUE)) + goto failed; + + if (!request_mem_region(NV_PCI_RESOURCE_START(pci_dev, regs_bar_index), + NV_PCI_RESOURCE_SIZE(pci_dev, regs_bar_index), + nv_device_name)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: request_mem_region failed for %" NvU64_fmtu "M @ 0x%" NvU64_fmtx ". This can\n" + "NVRM: occur when a driver such as rivatv is loaded and claims\n" + "NVRM: ownership of the device's registers.\n", + (NvU64)(NV_PCI_RESOURCE_SIZE(pci_dev, regs_bar_index) >> 20), + (NvU64)NV_PCI_RESOURCE_START(pci_dev, regs_bar_index)); + goto failed; + } + + bar0_requested = NV_TRUE; + + NV_KZALLOC(nvl, sizeof(nv_linux_state_t)); + if (nvl == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate memory\n"); + goto err_not_supported; + } + + nv = NV_STATE_PTR(nvl); + + // Map BAR0 + for (i = 0; i < NVRM_PCICFG_NUM_BARS; i++) + { + if ((NV_PCI_RESOURCE_VALID(pci_dev, i)) && + (NV_PCI_RESOURCE_FLAGS(pci_dev, i) & PCI_BASE_ADDRESS_SPACE) + == PCI_BASE_ADDRESS_SPACE_MEMORY) + { + nv->bars[NV_GPU_BAR_INDEX_REGS].offset = NVRM_PCICFG_BAR_OFFSET(i); + nv->bars[NV_GPU_BAR_INDEX_REGS].cpu_address = NV_PCI_RESOURCE_START(pci_dev, i); + nv->bars[NV_GPU_BAR_INDEX_REGS].size = NV_PCI_RESOURCE_SIZE(pci_dev, i); + + break; + } + } + nv->regs = &nv->bars[NV_GPU_BAR_INDEX_REGS]; + + pci_set_drvdata(pci_dev, (void *)nvl); + + /* default to 32-bit PCI bus address space */ + pci_dev->dma_mask = 0xffffffffULL; + + nvl->dev = &pci_dev->dev; + nvl->pci_dev = pci_dev; + nvl->dma_dev.dev = nvl->dev; + + nv->pci_info.vendor_id = pci_dev->vendor; + nv->pci_info.device_id = pci_dev->device; + nv->subsystem_id = pci_dev->subsystem_device; + nv->subsystem_vendor = pci_dev->subsystem_vendor; + nv->os_state = (void *) nvl; + nv->dma_dev = &nvl->dma_dev; + nv->pci_info.domain = NV_PCI_DOMAIN_NUMBER(pci_dev); + nv->pci_info.bus = NV_PCI_BUS_NUMBER(pci_dev); + nv->pci_info.slot = NV_PCI_SLOT_NUMBER(pci_dev); + nv->handle = pci_dev; + nv->flags |= flags; + + if (!nv_lock_init_locks(sp, nv)) + { + goto err_not_supported; + } + + // Wire RM HAL + if (!rm_init_private_state(sp, nv)) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "rm_init_private_state() failed!\n"); + goto err_zero_dev; + } + + if (!nv->is_tegra_pci_igpu && + !pci_devid_is_self_hosted(pci_dev->device) && + !nv_pci_validate_bars(pci_dev, /* only_bar0 = */ NV_FALSE)) + goto err_zero_dev; + + if (nv_resize_pcie_bars(pci_dev)) { + nv_printf(NV_DBG_ERRORS, + "NVRM: Fatal Error while attempting to resize PCIe BARs.\n"); + goto err_zero_dev; + } + + nvl->all_mappings_revoked = NV_TRUE; + nvl->safe_to_mmap = NV_TRUE; + nvl->gpu_wakeup_callback_needed = NV_TRUE; + INIT_LIST_HEAD(&nvl->open_files); + + // Map BAR>=1 + if (!nv->is_tegra_pci_igpu) + { + for (i = 0, j = 0; i < NVRM_PCICFG_NUM_BARS && j < NV_GPU_NUM_BARS; i++) + { + if (j == NV_GPU_BAR_INDEX_REGS) + { + j++; + continue; + } + + if ((NV_PCI_RESOURCE_VALID(pci_dev, i)) && + (NV_PCI_RESOURCE_FLAGS(pci_dev, i) & PCI_BASE_ADDRESS_SPACE) + == PCI_BASE_ADDRESS_SPACE_MEMORY) + { + nv->bars[j].offset = NVRM_PCICFG_BAR_OFFSET(i); + nv->bars[j].cpu_address = NV_PCI_RESOURCE_START(pci_dev, i); + nv->bars[j].size = NV_PCI_RESOURCE_SIZE(pci_dev, i); + j++; + } + } + } + + // Assign empty BAR1 if absent + nv->fb = &nv->bars[NV_GPU_BAR_INDEX_FB]; + nv->interrupt_line = pci_dev->irq; + + NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_DISABLED); + nvl->numa_info.node_id = NUMA_NO_NODE; + +#if NV_IS_EXPORT_SYMBOL_GPL_pci_ats_supported + nv->ats_support = pci_ats_supported(nvl->pci_dev); +#else + nv->ats_support = nvl->pci_dev->ats_enabled; +#endif + + if (nv->ats_support) + { + int ret __attribute__ ((unused)); + + NV_DEV_PRINTF(NV_DBG_INFO, nv, "ATS supported by this GPU!\n"); + +#if defined(CONFIG_IOMMU_SVA) && \ + (defined(NV_IOASID_GET_PRESENT) || defined(NV_MM_PASID_DROP_PRESENT)) + ret = iommu_dev_enable_feature(nvl->dev, IOMMU_DEV_FEAT_SVA); + if (ret == 0) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "Enabled SMMU SVA feature! \n"); + } + else if (ret == -EBUSY) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "SMMU SVA feature already enabled!\n"); + } + else + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Enabling SMMU SVA feature failed! ret: %d\n", ret); + nv->ats_support = NV_FALSE; + } +#else + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Enabling SMMU SVA feature failed due to lack of necessary kernel configs.\n"); + nv->ats_support = NV_FALSE; +#endif + } + + if (pci_devid_is_self_hosted(pci_dev->device)) + { + nv_init_coherent_link_info(nv); + } + + nv_ats_supported |= nv->ats_support; + + nv_clk_get_handles(nv); + + pci_set_master(pci_dev); + +#if defined(CONFIG_VGA_ARB) +#if defined(VGA_DEFAULT_DEVICE) +#if defined(NV_VGA_TRYGET_PRESENT) + vga_tryget(VGA_DEFAULT_DEVICE, VGA_RSRC_LEGACY_MASK); +#endif +#endif + vga_set_legacy_decoding(pci_dev, VGA_RSRC_NONE); +#endif + + status = nv_check_gpu_state(nv); + if (status == NV_ERR_GPU_IS_LOST) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "GPU is lost, skipping nv_pci_probe\n"); + goto err_not_supported; + } + + if ((rm_is_supported_device(sp, nv)) != NV_OK) + goto err_not_supported; + + nv->cpu_numa_node_id = dev_to_node(nvl->dev); + + if (nv_linux_init_open_q(nvl) != 0) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "nv_linux_init_open_q() failed!\n"); + goto err_zero_dev; + } + + nv_printf(NV_DBG_INFO, + "NVRM: PCI:%04x:%02x:%02x.%x (%04x:%04x): BAR0 @ 0x%llx (%lluMB)\n", + nv->pci_info.domain, nv->pci_info.bus, nv->pci_info.slot, + PCI_FUNC(pci_dev->devfn), nv->pci_info.vendor_id, nv->pci_info.device_id, + nv->regs->cpu_address, (nv->regs->size >> 20)); + nv_printf(NV_DBG_INFO, + "NVRM: PCI:%04x:%02x:%02x.%x (%04x:%04x): BAR1 @ 0x%llx (%lluMB)\n", + nv->pci_info.domain, nv->pci_info.bus, nv->pci_info.slot, + PCI_FUNC(pci_dev->devfn), nv->pci_info.vendor_id, nv->pci_info.device_id, + nv->fb->cpu_address, (nv->fb->size >> 20)); + + num_nv_devices++; + + /* + * The newly created nvl object is added to the nv_linux_devices global list + * only after all the initialization operations for that nvl object are + * completed, so as to protect against simultaneous lookup operations which + * may discover a partially initialized nvl object in the list + */ + LOCK_NV_LINUX_DEVICES(); + + if (nv_linux_add_device_locked(nvl) != 0) + { + UNLOCK_NV_LINUX_DEVICES(); + goto err_add_device; + } + + UNLOCK_NV_LINUX_DEVICES(); + + pm_vt_switch_required(nvl->dev, NV_TRUE); + +#if defined(CONFIG_PM_DEVFREQ) + // Support dynamic power management if device is a tegra PCI iGPU + rm_init_tegra_dynamic_power_management(sp, nv); +#endif + + nv_init_dynamic_power_management(sp, pci_dev); + + nv_init_tegra_gpu_pg_mask(sp, pci_dev); + + nv_procfs_add_gpu(nvl); + + /* Parse and set any per-GPU registry keys specified. */ + nv_parse_per_device_option_string(sp); + + rm_set_rm_firmware_requested(sp, nv); + +#if defined(NV_VGPU_KVM_BUILD) + if (nvidia_vgpu_vfio_probe(nvl->pci_dev) != NV_OK) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to register device to vGPU VFIO module"); + goto err_free_all; + } +#endif + + nv_check_and_exclude_gpu(sp, nv); + +#if defined(DPM_FLAG_NO_DIRECT_COMPLETE) + dev_pm_set_driver_flags(nvl->dev, DPM_FLAG_NO_DIRECT_COMPLETE); +#elif defined(DPM_FLAG_NEVER_SKIP) + dev_pm_set_driver_flags(nvl->dev, DPM_FLAG_NEVER_SKIP); +#endif + +#if defined(CONFIG_PM_DEVFREQ) + /* + * Expose clock control interface via devfreq framework for Tegra iGPU and + * let the linux kernel itself to support all the clock scaling logic for + * Tegra iGPU. + * + * On Tegra platforms, most of the clocks are managed by the BPMP. PMU inside + * the iGPU does not have direct communication path to the BPMP, so using + * existing clock management features (e.g. Pstates, PerfCf, and etc) with + * PMU will not work. + */ + if (nv_pci_tegra_register_devfreq(pci_dev) != 0) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to register linux devfreq"); + goto err_free_all; + }; +#endif + + /* + * Dynamic power management should be enabled as the last step. + * Kernel runtime power management framework can put the device + * into the suspended state. Hardware register access should not be done + * after enabling dynamic power management. + */ + rm_enable_dynamic_power_management(sp, nv); + + /* + * This must be the last action in nv_pci_probe(). Do not add code after this line. + */ + rm_notify_gpu_addition(sp, nv); + + nv_kmem_cache_free_stack(sp); + + return 0; + +goto err_free_all; +err_free_all: + nv_procfs_remove_gpu(nvl); + rm_cleanup_dynamic_power_management(sp, nv); + pm_vt_switch_unregister(nvl->dev); + LOCK_NV_LINUX_DEVICES(); + nv_linux_remove_device_locked(nvl); + UNLOCK_NV_LINUX_DEVICES(); +err_add_device: + nv_linux_stop_open_q(nvl); +err_zero_dev: + rm_free_private_state(sp, nv); +err_not_supported: + nv_clk_clear_handles(nv); + nv_ats_supported = prev_nv_ats_supported; + nv_lock_destroy_locks(sp, nv); +failed: + if (bar0_requested) + { + release_mem_region(NV_PCI_RESOURCE_START(pci_dev, regs_bar_index), + NV_PCI_RESOURCE_SIZE(pci_dev, regs_bar_index)); + } + NV_PCI_DISABLE_DEVICE(pci_dev); + pci_set_drvdata(pci_dev, NULL); + if (nvl != NULL) + { + NV_KFREE(nvl, sizeof(nv_linux_state_t)); + } + nv_kmem_cache_free_stack(sp); + return -1; +} + +static void +nv_pci_remove(struct pci_dev *pci_dev) +{ + nv_linux_state_t *nvl = NULL; + nv_state_t *nv; + nvidia_stack_t *sp = NULL; + NvU8 regs_bar_index = nv_bar_index_to_os_bar_index(pci_dev, + NV_GPU_BAR_INDEX_REGS); + + nv_printf(NV_DBG_SETUP, "NVRM: removing GPU %04x:%02x:%02x.%x\n", + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); + +#ifdef NV_PCI_SRIOV_SUPPORT + if (pci_dev->is_virtfn) + { +#if defined(NV_VGPU_KVM_BUILD) + /* Arg 2 == NV_TRUE means that the PCI device should be removed */ + nvidia_vgpu_vfio_remove(pci_dev, NV_TRUE); +#endif /* NV_VGPU_KVM_BUILD */ + return; + } +#endif /* NV_PCI_SRIOV_SUPPORT */ + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return; + } + + nvl = pci_get_drvdata(pci_dev); + if (!nvl || (nvl->pci_dev != pci_dev)) + { + nv_kmem_cache_free_stack(sp); + return; + } + + nv = NV_STATE_PTR(nvl); + +#if defined(CONFIG_IOMMU_SVA) && \ + (defined(NV_IOASID_GET_PRESENT) || defined(NV_MM_PASID_DROP_PRESENT)) + if (nv->ats_support) + { + int ret; + + ret = iommu_dev_disable_feature(nvl->dev, IOMMU_DEV_FEAT_SVA); + if (ret == 0) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "Disabled SMMU SVA feature! \n"); + } + else + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Disabling SMMU SVA feature failed! ret: %d\n", ret); + } + } +#endif + + /* + * Flush and stop open_q before proceeding with removal to ensure nvl + * outlives all enqueued work items. + */ + nv_linux_stop_open_q(nvl); + + LOCK_NV_LINUX_DEVICES(); + down(&nvl->ldata_lock); + nv->flags |= NV_FLAG_PCI_REMOVE_IN_PROGRESS; + + rm_notify_gpu_removal(sp, nv); + + /* + * Sanity check: A removed device shouldn't have a non-zero usage_count. + * For eGPU, fall off the bus along with clients active is a valid scenario. + * Hence skipping the sanity check for eGPU. + */ + if ((NV_ATOMIC_READ(nvl->usage_count) != 0) && !(nv->is_external_gpu)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Attempting to remove device %04x:%02x:%02x.%x with non-zero usage count!\n", + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); + + /* + * We can't return from this function without corrupting state, so we wait for + * the usage count to go to zero. + */ + while (NV_ATOMIC_READ(nvl->usage_count) != 0) + { + + /* + * While waiting, release the locks so that other threads can make + * forward progress. + */ + up(&nvl->ldata_lock); + UNLOCK_NV_LINUX_DEVICES(); + + os_delay(500); + + /* Re-acquire the locks before checking again */ + LOCK_NV_LINUX_DEVICES(); + nvl = pci_get_drvdata(pci_dev); + if (!nvl) + { + /* The device was not found, which should not happen */ + nv_printf(NV_DBG_ERRORS, + "NVRM: Failed removal of device %04x:%02x:%02x.%x!\n", + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); + WARN_ON(1); + goto done; + } + nv = NV_STATE_PTR(nvl); + down(&nvl->ldata_lock); + } + + nv_printf(NV_DBG_ERRORS, + "NVRM: Continuing with GPU removal for device %04x:%02x:%02x.%x\n", + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); + } + + rm_check_for_gpu_surprise_removal(sp, nv); + + nv_linux_remove_device_locked(nvl); + + /* Remove proc entry for this GPU */ + nv_procfs_remove_gpu(nvl); + +#if defined(CONFIG_PM_DEVFREQ) + nv_pci_tegra_unregister_devfreq(pci_dev); +#endif + + nv_clk_clear_handles(nv); + + rm_cleanup_dynamic_power_management(sp, nv); + + nv->removed = NV_TRUE; + + UNLOCK_NV_LINUX_DEVICES(); + + pm_vt_switch_unregister(&pci_dev->dev); + +#if defined(NV_VGPU_KVM_BUILD) + /* Arg 2 == NV_TRUE means that the PCI device should be removed */ + nvidia_vgpu_vfio_remove(pci_dev, NV_TRUE); +#endif + + if ((nv->flags & NV_FLAG_PERSISTENT_SW_STATE) || (nv->flags & NV_FLAG_OPEN)) + { + nv_acpi_unregister_notifier(nvl); + if (nv->flags & NV_FLAG_PERSISTENT_SW_STATE) + { + rm_disable_gpu_state_persistence(sp, nv); + } + nv_shutdown_adapter(sp, nv, nvl); + nv_dev_free_stacks(nvl); + } + + if (nvl->sysfs_config_file != NULL) + { + filp_close(nvl->sysfs_config_file, NULL); + nvl->sysfs_config_file = NULL; + } + + if (NV_ATOMIC_READ(nvl->usage_count) == 0) + { + nv_lock_destroy_locks(sp, nv); + } + + num_probed_nv_devices--; + + pci_set_drvdata(pci_dev, NULL); + + rm_i2c_remove_adapters(sp, nv); + rm_free_private_state(sp, nv); + release_mem_region(NV_PCI_RESOURCE_START(pci_dev, regs_bar_index), + NV_PCI_RESOURCE_SIZE(pci_dev, regs_bar_index)); + + num_nv_devices--; + + if (NV_ATOMIC_READ(nvl->usage_count) == 0) + { + NV_PCI_DISABLE_DEVICE(pci_dev); + NV_KFREE(nvl, sizeof(nv_linux_state_t)); + } + else + { + up(&nvl->ldata_lock); + } + + nv_kmem_cache_free_stack(sp); + return; + +done: + UNLOCK_NV_LINUX_DEVICES(); + nv_kmem_cache_free_stack(sp); +} + +static void +nv_pci_shutdown(struct pci_dev *pci_dev) +{ + nv_linux_state_t *nvl = pci_get_drvdata(pci_dev); + + if ((nvl != NULL) && nvl->is_forced_shutdown) + { + nvl->is_forced_shutdown = NV_FALSE; + return; + } + + if (nvl != NULL) + { + nvl->nv_state.is_shutdown = NV_TRUE; + } + +#if defined(CONFIG_PM_DEVFREQ) + nv_pci_tegra_unregister_devfreq(pci_dev); +#endif + + /* pci_clear_master is not defined for !CONFIG_PCI */ +#ifdef CONFIG_PCI + pci_clear_master(pci_dev); +#endif + + /* SHH HW mandates 1us delay to realise the effects of + * Bus Mater Enable(BME) disable. Adding 1us delay for + * all the chips as the delay is not in the data path + * and not big. Creating HAL for this would be a overkill. + */ + udelay(1); +} + +/*! + * @brief This function accepts pci information corresponding to a GPU + * and returns a reference to the nv_linux_state_t corresponding to that GPU. + * + * @param[in] domain Pci domain number for the GPU to be found. + * @param[in] bus Pci bus number for the GPU to be found. + * @param[in] slot Pci slot number for the GPU to be found. + * @param[in] function Pci function number for the GPU to be found. + * + * @return Pointer to nv_linux_state_t for the GPU if it is found, or NULL otherwise. + */ +nv_linux_state_t * find_pci(NvU32 domain, NvU8 bus, NvU8 slot, NvU8 function) +{ + nv_linux_state_t *nvl = NULL; + + LOCK_NV_LINUX_DEVICES(); + + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + { + nv_state_t *nv = NV_STATE_PTR(nvl); + + if (nv->pci_info.domain == domain && + nv->pci_info.bus == bus && + nv->pci_info.slot == slot && + nv->pci_info.function == function) + { + break; + } + } + + UNLOCK_NV_LINUX_DEVICES(); + return nvl; +} + +int nvidia_dev_get_pci_info(const NvU8 *uuid, struct pci_dev **pci_dev_out, + NvU64 *dma_start, NvU64 *dma_limit) +{ + nv_linux_state_t *nvl; + + /* Takes nvl->ldata_lock */ + nvl = find_uuid(uuid); + if (!nvl) + return -ENODEV; + + *pci_dev_out = nvl->pci_dev; + *dma_start = nvl->dma_dev.addressable_range.start; + *dma_limit = nvl->dma_dev.addressable_range.limit; + + up(&nvl->ldata_lock); + + return 0; +} + +NvU8 nv_find_pci_capability(struct pci_dev *pci_dev, NvU8 capability) +{ + u16 status = 0; + u8 cap_ptr = 0, cap_id = 0xff; + + pci_read_config_word(pci_dev, PCI_STATUS, &status); + status &= PCI_STATUS_CAP_LIST; + if (!status) + return 0; + + switch (pci_dev->hdr_type) { + case PCI_HEADER_TYPE_NORMAL: + case PCI_HEADER_TYPE_BRIDGE: + pci_read_config_byte(pci_dev, PCI_CAPABILITY_LIST, &cap_ptr); + break; + default: + return 0; + } + + do { + cap_ptr &= 0xfc; + pci_read_config_byte(pci_dev, cap_ptr + PCI_CAP_LIST_ID, &cap_id); + if (cap_id == capability) + return cap_ptr; + pci_read_config_byte(pci_dev, cap_ptr + PCI_CAP_LIST_NEXT, &cap_ptr); + } while (cap_ptr && cap_id != 0xff); + + return 0; +} + +static void check_for_bound_driver(struct pci_dev *pci_dev) +{ + if (pci_dev->dev.driver) + { + const char *driver_name = pci_dev->dev.driver->name; + + nv_printf(NV_DBG_WARNINGS, "NVRM: GPU %04x:%02x:%02x.%x is already " + "bound to %s.\n", + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn), + driver_name ? driver_name : "another driver" + ); + } +} + +/* make sure the pci_driver called probe for all of our devices. + * we've seen cases where rivafb claims the device first and our driver + * doesn't get called. + */ +int +nv_pci_count_devices(void) +{ + struct pci_dev *pci_dev; + int count = 0; + + if (NVreg_RegisterPCIDriver == 0) + { + return 0; + } + + pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL); + while (pci_dev) + { + if (rm_is_supported_pci_device( + PCI_BASE_CLASS_DISPLAY, + PCI_CLASS_DISPLAY_VGA & 0xFF, + pci_dev->vendor, + pci_dev->device, + pci_dev->subsystem_vendor, + pci_dev->subsystem_device, + NV_TRUE /* print_legacy_warning */)) + { + check_for_bound_driver(pci_dev); + count++; + } + pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pci_dev); + } + + pci_dev = pci_get_class(PCI_CLASS_DISPLAY_3D << 8, NULL); + while (pci_dev) + { + if (rm_is_supported_pci_device( + (pci_dev->class >> 16) & 0xFF, + (pci_dev->class >> 8) & 0xFF, + pci_dev->vendor, + pci_dev->device, + pci_dev->subsystem_vendor, + pci_dev->subsystem_device, + NV_TRUE /* print_legacy_warning */)) + { + check_for_bound_driver(pci_dev); + count++; + } + pci_dev = pci_get_class(PCI_CLASS_DISPLAY_3D << 8, pci_dev); + } + + return count; +} + +/* + * On coherent platforms that support BAR1 mappings for GPUDirect RDMA, + * dma-buf and nv-p2p subsystems need to ensure the 2 devices belong to + * the same IOMMU group. + */ +NvBool nv_pci_is_valid_topology_for_direct_pci( + nv_state_t *nv, + struct pci_dev *peer +) +{ + struct pci_dev *pdev0 = to_pci_dev(nv->dma_dev->dev); + struct pci_dev *pdev1 = peer; + + if (!nv->coherent) + { + return NV_FALSE; + } + + switch (NVreg_GrdmaPciTopoCheckOverride) { + case NV_REG_GRDMA_PCI_TOPO_CHECK_OVERRIDE_ALLOW_ACCESS: + return NV_TRUE; + case NV_REG_GRDMA_PCI_TOPO_CHECK_OVERRIDE_DENY_ACCESS: + return NV_FALSE; + default: + return (pdev0->dev.iommu_group == pdev1->dev.iommu_group); + } +} + +NvBool nv_pci_has_common_pci_switch( + nv_state_t *nv, + struct pci_dev *peer +) +{ + struct pci_dev *pci_dev0, *pci_dev1; + + pci_dev0 = pci_upstream_bridge(to_pci_dev(nv->dma_dev->dev)); + + while (pci_dev0 != NULL) + { + pci_dev1 = pci_upstream_bridge(peer); + + while (pci_dev1 != NULL) + { + if (pci_dev0 == pci_dev1) + return NV_TRUE; + + pci_dev1 = pci_upstream_bridge(pci_dev1); + } + + pci_dev0 = pci_upstream_bridge(pci_dev0); + } + + return NV_FALSE; +} + +NvBool NV_API_CALL nv_grdma_pci_topology_supported( + nv_state_t *nv, + nv_dma_device_t *dma_peer +) +{ + // + // Skip topo check on coherent platforms since + // NIC can map over C2C anyway and PCIe topology shouldn't matter. + // + if (nv->coherent) + { + return NV_TRUE; + } + + switch (NVreg_GrdmaPciTopoCheckOverride) + { + case NV_REG_GRDMA_PCI_TOPO_CHECK_OVERRIDE_ALLOW_ACCESS: + return NV_TRUE; + case NV_REG_GRDMA_PCI_TOPO_CHECK_OVERRIDE_DENY_ACCESS: + return NV_FALSE; + default: + break; + } + + // Allow RDMA by default on passthrough VMs. + if ((nv->flags & NV_FLAG_PASSTHRU) != 0) + return NV_TRUE; + + // + // Only allow RDMA on unsupported chipsets if there exists + // a common PCI switch between the GPU and the other device + // + if ((nv->flags & NV_FLAG_PCI_P2P_UNSUPPORTED_CHIPSET) != 0) + return nv_pci_has_common_pci_switch(nv, to_pci_dev(dma_peer->dev)); + + return NV_TRUE; +} + +#if defined(CONFIG_PM) +extern struct dev_pm_ops nv_pm_ops; +#endif + +struct pci_driver nv_pci_driver = { + .name = MODULE_NAME, + .id_table = nv_pci_table, + .probe = nv_pci_probe, + .remove = nv_pci_remove, + .shutdown = nv_pci_shutdown, +#if defined(NV_USE_VFIO_PCI_CORE) && \ + defined(NV_PCI_DRIVER_HAS_DRIVER_MANAGED_DMA) + .driver_managed_dma = NV_TRUE, +#endif +#if defined(CONFIG_PM) + .driver.pm = &nv_pm_ops, +#endif +}; + +void nv_pci_unregister_driver(void) +{ + if (NVreg_RegisterPCIDriver == 0) + { + return; + } + return pci_unregister_driver(&nv_pci_driver); +} + +int nv_pci_register_driver(void) +{ + if (NVreg_RegisterPCIDriver == 0) + { + return 0; + } + return pci_register_driver(&nv_pci_driver); +} diff --git a/kernel-open/nvidia/nv-platform-pm.c b/kernel-open/nvidia/nv-platform-pm.c new file mode 100644 index 0000000..7bbd6e2 --- /dev/null +++ b/kernel-open/nvidia/nv-platform-pm.c @@ -0,0 +1,130 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +/*! + * @brief Unpowergate the display. + * + * Increment the device's usage counter, run pm_request_resume(dev) + * and return its result. + * + * For more details on runtime pm functions, please check the below + * files in the Linux kernel: + * + * include/linux/pm_runtime.h + * include/linux/pm.h + * or + * https://www.kernel.org/doc/Documentation/power/runtime_pm.txt + * + * pm_request_resume() submits a request to execute the subsystem-level + * resume callback for the device (the request is represented by a work + * item in pm_wq); returns 0 on success, 1 if the device's runtime PM + * status was already 'active', or error code if the request hasn't + * been queued up. + * + * @param[in] nv Per gpu linux state + * + * @returns NV_STATUS + */ +NV_STATUS NV_API_CALL nv_soc_pm_unpowergate( + nv_state_t *nv) +{ +#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvS32 ret = -EBUSY; + + ret = pm_runtime_get(nvl->dev); + + if (ret == 1) + { + nv_printf(NV_DBG_INFO, "NVRM: device was already unpowergated\n"); + } + else if (ret == -EINPROGRESS) + { + /* + * pm_runtime_get() internally calls __pm_runtime_resume(...RPM_ASYNC) + * which internally calls rpm_resume() and this function will throw + * "-EINPROGRESS" if it is being called when device state is + * RPM_RESUMING and RPM_ASYNC or RPM_NOWAIT is set. + */ + nv_printf(NV_DBG_INFO, "NVRM: device is already unpowergating\n"); + } + else if (ret < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: unpowergate unsuccessful. ret: %d\n", ret); + return NV_ERR_GENERIC; + } + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +/*! + * @brief Powergate the display. + * + * Decrement the device's usage counter; if the result is 0 then run + * pm_request_idle(dev) and return its result. + * + * For more details on runtime pm functions, please check the below + * files in the Linux kernel: + * + * include/linux/pm_runtime.h + * include/linux/pm.h + * or + * https://www.kernel.org/doc/Documentation/power/runtime_pm.txt + * + * @param[in] nv Per gpu linux state + * + * @returns NV_STATUS + */ +NV_STATUS NV_API_CALL nv_soc_pm_powergate( + nv_state_t *nv) +{ +#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE + NV_STATUS status = NV_ERR_GENERIC; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvS32 ret = -EBUSY; + + ret = pm_runtime_put(nvl->dev); + + if (ret == 0) + { + status = NV_OK; + } + else + { + nv_printf(NV_DBG_ERRORS, "NVRM: powergate unsuccessful. ret: %d\n", ret); + } + + return status; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + diff --git a/kernel-open/nvidia/nv-platform.c b/kernel-open/nvidia/nv-platform.c new file mode 100644 index 0000000..ab2557b --- /dev/null +++ b/kernel-open/nvidia/nv-platform.c @@ -0,0 +1,1700 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include + +#include "nv-platform.h" +#include "nv-linux.h" +#include + +static irqreturn_t +nvidia_soc_isr_kthread_bh( + int irq, + void *data +) +{ + NV_STATUS status; + irqreturn_t ret; + nv_state_t *nv = (nv_state_t *) data; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvU32 irq_count; + unsigned long flags; + + // + // Synchronize kthreads servicing bottom halves + // + status = os_acquire_mutex(nvl->soc_bh_mutex); + + // os_acquire_mutex can only fail if we cannot sleep and we can + WARN_ON(status != NV_OK); + + ret = nvidia_isr_kthread_bh(irq, data); + + os_release_mutex(nvl->soc_bh_mutex); + + NV_SPIN_LOCK_IRQSAVE(&nvl->soc_isr_lock, flags); + for (irq_count = 0; irq_count < nv->num_soc_irqs; irq_count++) + { + if (nv->soc_irq_info[irq_count].irq_num == irq) + { + nv->soc_irq_info[irq_count].bh_pending = NV_FALSE; + } + if (nv->soc_irq_info[irq_count].ref_count == 0) + { + nv->soc_irq_info[irq_count].ref_count++; + enable_irq(nv->soc_irq_info[irq_count].irq_num); + } + } + nv->current_soc_irq = -1; + NV_SPIN_UNLOCK_IRQRESTORE(&nvl->soc_isr_lock, flags); + + return ret; +} + +static irqreturn_t nvidia_soc_isr(int irq, void *arg) +{ + unsigned long flags; + irqreturn_t ret; + nv_linux_state_t *nvl = (void *) arg; + nv_state_t *nv = NV_STATE_PTR(nvl); + NvU32 irq_count; + + NV_SPIN_LOCK_IRQSAVE(&nvl->soc_isr_lock, flags); + + /* + * > Only 1 interrupt at a time is allowed to be serviced. + * > So when bh_pending is true, bottom half is scheduled/active + * and serving previous interrupt by disabling all interrupts + * at interrupt controller level, also here GPU lock is already + * taken so this interrupt will anyways be blocked until bottom + * half releases GPU lock, so return early for now. + * > Once bottom half processed earlier interrupt, it will release + * GPU lock and re-enable all interrupts and set bh_pending to + * false. Upon re-enabling, this interrupt will be serviced + * again because all interrupts that we care are level triggered. + */ + for (irq_count = 0; irq_count < nv->num_soc_irqs; irq_count++) + { + if (nv->soc_irq_info[irq_count].bh_pending == NV_TRUE) + { + NV_SPIN_UNLOCK_IRQRESTORE(&nvl->soc_isr_lock, flags); + /* + * Return IRQ_HANDLED to prevent the Linux kernel from thinking + * that this interrupt is spurious. Since all interrupts that we + * care are level triggered the interrupt should still continue + * to fire later. + */ + return IRQ_HANDLED; + } + } + nv->current_soc_irq = irq; + for (irq_count = 0; irq_count < nv->num_soc_irqs; irq_count++) + { + if (nv->soc_irq_info[irq_count].ref_count == 1) + { + nv->soc_irq_info[irq_count].ref_count--; + disable_irq_nosync(nv->soc_irq_info[irq_count].irq_num); + } + } + + ret = nvidia_isr(irq, arg); + if (ret == IRQ_WAKE_THREAD) + { + for (irq_count = 0; irq_count < nv->num_soc_irqs; irq_count++) + { + if (nv->soc_irq_info[irq_count].irq_num == irq) + { + nv->soc_irq_info[irq_count].bh_pending = NV_TRUE; + } + } + } + else + { + for (irq_count = 0; irq_count < nv->num_soc_irqs; irq_count++) + { + if (nv->soc_irq_info[irq_count].ref_count == 0) + { + nv->soc_irq_info[irq_count].ref_count++; + enable_irq(nv->soc_irq_info[irq_count].irq_num); + } + } + nv->current_soc_irq = -1; + } + + NV_SPIN_UNLOCK_IRQRESTORE(&nvl->soc_isr_lock, flags); + + /* + * Return IRQ_HANDLED to prevent the Linux kernel from thinking + * that this interrupt is spurious. Since all interrupts that we + * care are level triggered the interrupt should still continue + * to fire later. + */ + if (ret == IRQ_NONE) + { + ret = IRQ_HANDLED; + } + + return ret; +} + +NvS32 nv_request_soc_irq( + nv_linux_state_t *nvl, + NvU32 irq, + nv_soc_irq_type_t type, + NvU32 flags, + NvU32 priv_data, + const char *device_name) +{ + nv_state_t *nv = NV_STATE_PTR(nvl); + NvS32 ret; + NvU32 irq_index; + + if (nv->num_soc_irqs >= NV_MAX_SOC_IRQS) + { + nv_printf(NV_DBG_ERRORS, "Exceeds Maximum SOC interrupts\n"); + return -EINVAL; + } + + ret = request_threaded_irq(irq, nvidia_soc_isr, nvidia_soc_isr_kthread_bh, + flags, device_name, (void *)nvl); + if (ret != 0) + { + nv_printf(NV_DBG_ERRORS, "nv_request_soc_irq for irq %d failed\n", irq); + return ret; + } + + disable_irq_nosync(irq); + + irq_index = nv->num_soc_irqs; + nv->soc_irq_info[irq_index].irq_num = irq; + nv->soc_irq_info[irq_index].irq_type = type; + if (type == NV_SOC_IRQ_GPIO_TYPE) + { + nv->soc_irq_info[irq_index].irq_data.gpio_num = priv_data; + } + else if (type == NV_SOC_IRQ_DPAUX_TYPE) + { + nv->soc_irq_info[irq_index].irq_data.dpaux_instance = priv_data; + } + nv->num_soc_irqs++; + nv->soc_irq_info[irq_index].ref_count = 0; + + return ret; +} + +nv_soc_irq_type_t NV_API_CALL nv_get_current_irq_type(nv_state_t *nv) +{ + int count; + + for (count = 0; count < nv->num_soc_irqs; count++) + { + if (nv->soc_irq_info[count].irq_num == nv->current_soc_irq) + { + return nv->soc_irq_info[count].irq_type; + } + } + + return NV_SOC_IRQ_INVALID_TYPE; +} + +NV_STATUS NV_API_CALL nv_get_current_irq_priv_data(nv_state_t *nv, NvU32 *priv_data) +{ + int count; + + if (nv->current_soc_irq == -1) + { + nv_printf(NV_DBG_ERRORS, "%s:No SOC interrupt in progress\n", __func__); + return NV_ERR_GENERIC; + } + + for (count = 0; count < nv->num_soc_irqs; count++) + { + if (nv->soc_irq_info[count].irq_num == nv->current_soc_irq) + { + if (nv->soc_irq_info[count].irq_type == NV_SOC_IRQ_GPIO_TYPE) + { + *priv_data = nv->soc_irq_info[count].irq_data.gpio_num; + } + else if (nv->soc_irq_info[count].irq_type == NV_SOC_IRQ_DPAUX_TYPE) + { + *priv_data = nv->soc_irq_info[count].irq_data.dpaux_instance; + } + } + } + + return NV_OK; +} + +static void nv_soc_free_irq_by_type(nv_state_t *nv, nv_soc_irq_type_t type) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int count; + + if ((nv->num_soc_irqs == 0) || (type == 0)) + { + return; + } + + for (count = 0; count < NV_MAX_SOC_IRQS; count++) + { + if (type == nv->soc_irq_info[count].irq_type) + { + free_irq(nv->soc_irq_info[count].irq_num, (void *)nvl); + nv->soc_irq_info[count].irq_type = 0; + nv->soc_irq_info[count].irq_num = 0; + nv->soc_irq_info[count].bh_pending = NV_FALSE; + nv->soc_irq_info[count].ref_count = 0; + nv->num_soc_irqs--; + } + } +} + +int nv_soc_register_irqs(nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int rc; + int dpauxindex; + + /* Skip registering interrupts for OpenRM */ + if (nv->request_firmware) + return 0; + + nv->current_soc_irq = -1; + + rc = nv_request_soc_irq(nvl, nv->interrupt_line, + NV_SOC_IRQ_DISPLAY_TYPE, + nv_default_irq_flags(nv), 0, + "nvdisplay"); + if (rc != 0) + { + nv_printf(NV_DBG_ERRORS, "failed to request display irq (%d)\n", rc); + return rc; + } + + rc = nv_request_soc_irq(nvl, nv->hdacodec_irq, NV_SOC_IRQ_HDACODEC_TYPE, + nv_default_irq_flags(nv), 0, "hdacodec"); + if (rc != 0) + { + nv_printf(NV_DBG_ERRORS, "failed to request hdacodec irq (%d)\n", rc); + free_irq(nv->interrupt_line, (void *) nvl); + return rc; + } + + if (nv->soc_is_dpalt_mode_supported) + { + /* Type-C port controller to display interrupt*/ + rc = nv_request_soc_irq(nvl, nv->tcpc2disp_irq, NV_SOC_IRQ_TCPC2DISP_TYPE, + nv_default_irq_flags(nv), 0, "tcpc2disp"); + if (rc != 0) + { + nv_printf(NV_DBG_ERRORS, "failed to request Tcpc2disp irq (%d)\n", rc); + free_irq(nv->interrupt_line, (void *) nvl); + free_irq(nv->hdacodec_irq, (void *)nvl); + return rc; + } + } + + if (nv->soc_is_hfrp_supported) + { + rc = nv_request_soc_irq(nvl, nv->hfrp0_irq, NV_SOC_IRQ_HFRP0_TYPE, + nv_default_irq_flags(nv), 0, "hfrp0"); + if (rc != 0) + { + nv_printf(NV_DBG_ERRORS, "failed to request hfrp0 irq (%d)\n", rc); + free_irq(nv->interrupt_line, (void *) nvl); + free_irq(nv->hdacodec_irq, (void *)nvl); + if (nv->soc_is_dpalt_mode_supported) + { + free_irq(nv->tcpc2disp_irq, (void *) nvl); + } + return rc; + } + + rc = nv_request_soc_irq(nvl, nv->hfrp1_irq, NV_SOC_IRQ_HFRP1_TYPE, + nv_default_irq_flags(nv), 0, "hfrp1"); + if (rc != 0) + { + nv_printf(NV_DBG_ERRORS, "failed to request hfrp1 irq (%d)\n", rc); + free_irq(nv->interrupt_line, (void *) nvl); + free_irq(nv->hdacodec_irq, (void *)nvl); + if (nv->soc_is_dpalt_mode_supported) + { + free_irq(nv->tcpc2disp_irq, (void *) nvl); + } + free_irq(nv->hfrp0_irq, (void *) nvl); + return rc; + } + } + + for (dpauxindex = 0; dpauxindex < nv->num_dpaux_instance; dpauxindex++) + { + rc = nv_request_soc_irq(nvl, nv->dpaux_irqs[dpauxindex], + NV_SOC_IRQ_DPAUX_TYPE, + nv_default_irq_flags(nv), + dpauxindex, + nv->dpaux_devname[dpauxindex]); + if (rc != 0) + { + nv_printf(NV_DBG_ERRORS, "failed to request dpaux irq (%d)\n", rc); + free_irq(nv->interrupt_line, (void *)nvl); + free_irq(nv->hdacodec_irq, (void *)nvl); + if (nv->soc_is_dpalt_mode_supported) + { + free_irq(nv->tcpc2disp_irq, (void *)nvl); + } + if (nv->soc_is_hfrp_supported) + { + free_irq(nv->hfrp0_irq, (void *) nvl); + free_irq(nv->hfrp1_irq, (void *) nvl); + } + return rc; + } + } + + return 0; +} + +void nv_soc_free_irqs(nv_state_t *nv) +{ + nv_soc_free_irq_by_type(nv, NV_SOC_IRQ_DISPLAY_TYPE); + nv_soc_free_irq_by_type(nv, NV_SOC_IRQ_HDACODEC_TYPE); + nv_soc_free_irq_by_type(nv, NV_SOC_IRQ_DPAUX_TYPE); + nv_soc_free_irq_by_type(nv, NV_SOC_IRQ_GPIO_TYPE); + + if (nv->soc_is_dpalt_mode_supported) + { + nv_soc_free_irq_by_type(nv, NV_SOC_IRQ_TCPC2DISP_TYPE); + } + if (nv->soc_is_hfrp_supported) + { + nv_soc_free_irq_by_type(nv, NV_SOC_IRQ_HFRP0_TYPE); + nv_soc_free_irq_by_type(nv, NV_SOC_IRQ_HFRP1_TYPE); + } +} + +static void nv_platform_free_device_dpaux(nv_state_t *nv) +{ + int dpauxindex; + + for (dpauxindex = 0; dpauxindex < nv->num_dpaux_instance; dpauxindex++) + { + /* Note that the memory region is being released only for dpaux0. This is + * because the memory mapping is done only for dpaux0. Refer to the memory + * mapping section in nv_platform_alloc_device_dpaux() for details on + * why this is done, and how the same mapping is reused for other dpaux + * instances. + */ + if ((dpauxindex == 0) && + (nv->dpaux[dpauxindex] != NULL) && + (nv->dpaux[dpauxindex]->size != 0) && + (nv->dpaux[dpauxindex]->cpu_address != 0)) + { + release_mem_region(nv->dpaux[dpauxindex]->cpu_address, + nv->dpaux[dpauxindex]->size); + } + + if (nv->dpaux[dpauxindex] != NULL) + { + NV_KFREE(nv->dpaux[dpauxindex], sizeof(*(nv->dpaux[dpauxindex]))); + } + } +} + +static int nv_platform_alloc_device_dpaux(struct platform_device *plat_dev, nv_state_t *nv) +{ +#if NV_SUPPORTS_PLATFORM_DEVICE + const char *sdpaux = "dpaux"; + int dpauxindex = 0; + int irq = 0; + int rc = 0; + int num_dpaux_instance = 0; + const struct resource *res; + phys_addr_t res_addr = 0; + resource_size_t res_size = 0; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + nv->num_dpaux_instance = 0; + if (!of_property_read_u32(nvl->dev->of_node, "nvidia,num-dpaux-instance", &num_dpaux_instance)) + { + nv->num_dpaux_instance = (unsigned) num_dpaux_instance; + nv_printf(NV_DBG_INFO, "NVRM: Found %d dpAux instances in device tree.\n", + num_dpaux_instance); + } + + if (nv->num_dpaux_instance > NV_MAX_DPAUX_NUM_DEVICES) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Number of dpAux instances [%d] in device tree are more than" + "that of allowed [%d]. Initilizing %d dpAux instances.\n", nv->num_dpaux_instance, + NV_MAX_DPAUX_NUM_DEVICES, NV_MAX_DPAUX_NUM_DEVICES); + nv->num_dpaux_instance = NV_MAX_DPAUX_NUM_DEVICES; + } + + /* Memory region is being mapped only for dpaux0 because the size specified + * in device tree for dpaux0 register space is large enough to accomodate + * the register spaces of all dpaux instances. The iomapped memory of + * dpaux0 is reused for all other dpaux instances. + * + * This is also required for RM because RM indexes the dpaux register space + * for all dpaux instances using dpaux0 base address. + */ + res = platform_get_resource_byname(plat_dev, IORESOURCE_MEM, "dpaux0"); + if (!res) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get IO memory resource\n"); + rc = -ENXIO; + goto err_free_dpaux_dev; + } + res_addr = res->start; + res_size = res->end - res->start; + + if (!request_mem_region(res_addr, res_size, nv_device_name)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: request_mem_region failed for %pa\n", + &res_addr); + rc = -ENXIO; + goto err_free_dpaux_dev; + } + + for (dpauxindex = 0; dpauxindex < nv->num_dpaux_instance; dpauxindex++) + { + snprintf(nv->dpaux_devname[dpauxindex], sizeof(nv->dpaux_devname[dpauxindex]), "%s%d", sdpaux, dpauxindex); + + NV_KMALLOC(nv->dpaux[dpauxindex], sizeof(*(nv->dpaux[dpauxindex]))); + if (nv->dpaux[dpauxindex] == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate nv->dpaux[%d] memory\n", dpauxindex); + rc = -ENOMEM; + goto err_free_dpaux_dev; + } + + os_mem_set(nv->dpaux[dpauxindex], 0, sizeof(*(nv->dpaux[dpauxindex]))); + + irq = platform_get_irq_byname(plat_dev, nv->dpaux_devname[dpauxindex]); + if (irq < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get IO irq resource\n"); + rc = irq; + goto err_free_dpaux_dev; + } + + nv->dpaux[dpauxindex]->cpu_address = res_addr; + nv->dpaux[dpauxindex]->size = res_size; + nv->dpaux_irqs[dpauxindex] = irq; + } + + return rc; + +err_free_dpaux_dev: + nv_platform_free_device_dpaux(nv); + + return rc; +#else + return -1; +#endif +} + +NV_STATUS NV_API_CALL nv_soc_device_reset(nv_state_t *nv) +{ +#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE + NV_STATUS status = NV_OK; + int rc = 0; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + /* + * Skip all reset functions if the 'nvidia,skip-clk-rsts' DT property + * is present. This property is currently present in the System FPGA device + * tree because the BPMP firmware isn't available on FPGA yet. + */ + bool skip_clk_rsts = of_property_read_bool(nvl->dev->of_node, "nvidia,skip-clk-rsts"); + if (!skip_clk_rsts) + { + if (nvl->nvdisplay_reset != NULL) + { + rc = reset_control_reset(nvl->nvdisplay_reset); + if (rc != 0) + { + status = NV_ERR_GENERIC; + nv_printf(NV_DBG_ERRORS, "NVRM: reset_control_reset failed, rc: %d\n", rc); + goto out; + } + } + + if (nvl->dpaux0_reset != NULL) + { + rc = reset_control_reset(nvl->dpaux0_reset); + if (rc != 0) + { + status = NV_ERR_GENERIC; + nv_printf(NV_DBG_ERRORS, "NVRM: reset_control_reset failed, rc: %d\n", rc); + goto out; + } + } + + if (nvl->dsi_core_reset != NULL) + { + rc = reset_control_reset(nvl->dsi_core_reset); + if (rc != 0) + { + status = NV_ERR_GENERIC; + nv_printf(NV_DBG_ERRORS, "NVRM: reset_control_reset failed, rc: %d\n", rc); + goto out; + } + } + + if (nvl->hdacodec_reset != NULL) + { + /* + * HDACODEC reset control is shared between display driver and audio driver. + * Since reset_control_reset toggles the reset signal, we prefer to use + * reset_control_deassert. Additionally, since Audio driver uses + * reset_control_bulk_deassert() which internally calls reset_control_deassert, + * we must use reset_control_deassert, because consumers must not use + * reset_control_reset on shared reset lines when reset_control_deassert has + * been used. + */ + rc = reset_control_deassert(nvl->hdacodec_reset); + if (rc != 0) + { + status = NV_ERR_GENERIC; + nv_printf(NV_DBG_ERRORS, "NVRM: hdacodec reset_control_deassert failed, rc: %d\n", rc); + goto out; + } + } + } +out: + return status; +#else + return NV_OK; +#endif +} + +NV_STATUS NV_API_CALL nv_soc_mipi_cal_reset(nv_state_t *nv) +{ + NV_STATUS status = NV_OK; +#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE + int rc = 0; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + bool skip_clk_rsts = of_property_read_bool(nvl->dev->of_node, "nvidia,skip-clk-rsts"); + + if (skip_clk_rsts) + return NV_OK; + + if (nvl->mipi_cal_reset != NULL) + { + rc = reset_control_reset(nvl->mipi_cal_reset); + if (rc != 0) + { + status = NV_ERR_GENERIC; + nv_printf(NV_DBG_ERRORS, "NVRM: mipi_cal reset_control_reset failed, rc: %d\n", rc); + } + } + else + { + status = NV_ERR_GENERIC; + } +#endif + return status; +} + +// This function gets called only for Tegra +static NV_STATUS nv_platform_get_iommu_availability(struct platform_device *plat_dev, + nv_state_t *nv) +{ + struct device_node *np = plat_dev->dev.of_node; + struct device_node *niso_np_with_iommus = NULL; + struct device_node *niso_np = NULL; + struct device_node *iso_np = NULL; + NV_STATUS status = NV_OK; + + nv->iommus.iso_iommu_present = NV_FALSE; + nv->iommus.niso_iommu_present = NV_FALSE; + + iso_np = of_parse_phandle(np, "iommus", 0); + if (iso_np && of_device_is_available(iso_np)) { + nv->iommus.iso_iommu_present = NV_TRUE; + } + + niso_np = of_get_child_by_name(np, "nvdisplay-niso"); + if (niso_np) { + niso_np_with_iommus = of_parse_phandle(niso_np, "iommus", 0); + if (niso_np_with_iommus && of_device_is_available(niso_np_with_iommus)) { + nv->iommus.niso_iommu_present = NV_TRUE; + } + } + + if (niso_np_with_iommus) + of_node_put(niso_np_with_iommus); + + if (niso_np) + of_node_put(niso_np); + + if (iso_np) + of_node_put(iso_np); + + return status; +} + +#define DISP_DT_SMMU_STREAM_ID_MASK 0xFF +// This function gets called only for Tegra +static NV_STATUS nv_platform_get_iso_niso_stream_ids(struct platform_device *plat_dev, + nv_state_t *nv) +{ + struct device_node *np = plat_dev->dev.of_node; + NvU32 value = 0; + NV_STATUS status = NV_OK; + int ret = 0; + + /* NV_U32_MAX is used to indicate that the platform does not support SMMU */ + nv->iommus.dispIsoStreamId = NV_U32_MAX; + nv->iommus.dispNisoStreamId = NV_U32_MAX; + + /* Parse ISO StreamID */ + ret = of_property_read_u32(np, "iso_sid", &value); + if (ret == 0) + { + nv->iommus.dispIsoStreamId = (value & DISP_DT_SMMU_STREAM_ID_MASK); + } + else if (ret == -EINVAL) + { + /* iso_sid will not be specified in device tree if SMMU needs to be bypassed. Continue without failing */ + nv_printf(NV_DBG_INFO, "NVRM: nv_platform_get_iso_niso_stream_ids, iso_sid not specified under display node\n"); + } + else + { + nv_printf(NV_DBG_ERRORS, "NVRM: nv_platform_get_iso_niso_stream_ids, iso_sid has invalid value\n"); + status = NV_ERR_GENERIC; + goto fail; + } + + /* Parse NISO StreamID */ + ret = of_property_read_u32(np, "niso_sid", &value); + if (ret == 0) + { + nv->iommus.dispNisoStreamId = (value & DISP_DT_SMMU_STREAM_ID_MASK); + } + else if (ret == -EINVAL) + { + /* niso_sid will not be specified in device tree if SMMU needs to be bypassed. Continue without failing */ + nv_printf(NV_DBG_INFO, "NVRM: nv_platform_get_iso_niso_stream_ids, niso_sid not specified under display node\n"); + } + else + { + nv_printf(NV_DBG_ERRORS, "NVRM: nv_platform_get_iso_niso_stream_ids, niso_sid has invalid value\n"); + status = NV_ERR_GENERIC; + goto fail; + } + +fail: + return status; +} + +static int nv_platform_register_mapping_devs(struct platform_device *plat_dev, + nv_state_t *nv) +{ +#if NV_SUPPORTS_PLATFORM_DEVICE + struct device_node *np = plat_dev->dev.of_node; + struct device_node *niso_np = NULL; + struct platform_device *niso_plat_dev = NULL; + int rc = 0; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + nv->niso_dma_dev = NULL; + + niso_np = of_get_child_by_name(np, "nvdisplay-niso"); + if (niso_np == NULL) + { + nv_printf(NV_DBG_INFO, "NVRM: no nvdisplay-niso child node\n"); + goto register_mapping_devs_end; + } + + rc = devm_of_platform_populate(&plat_dev->dev); + if (rc != 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: devm_of_platform_populate failed\n"); + goto register_mapping_devs_end; + } + + niso_plat_dev = of_find_device_by_node(niso_np); + if (niso_plat_dev == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: no nvdisplay-niso platform devices\n"); + rc = -ENODEV; + goto register_mapping_devs_end; + } + + rc = of_dma_configure( + &niso_plat_dev->dev, + niso_np +#if NV_OF_DMA_CONFIGURE_ARGUMENT_COUNT > 2 + , true +#endif + ); + if (rc != 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: nv_of_dma_configure failed for niso\n"); + goto register_mapping_devs_end; + } + + nvl->niso_dma_dev.dev = &niso_plat_dev->dev; + nvl->niso_dma_dev.addressable_range.start = 0; + nvl->niso_dma_dev.addressable_range.limit = NV_U64_MAX; + nv->niso_dma_dev = &nvl->niso_dma_dev; + +register_mapping_devs_end: + of_node_put(niso_np); + return rc; +#else + return -ENOSYS; +#endif +} + +static int nv_platform_parse_dcb(struct platform_device *plat_dev, + nv_state_t *nv) +{ + int ret; + + struct device_node *np = plat_dev->dev.of_node; + ret = of_property_count_elems_of_size(np, "nvidia,dcb-image", sizeof(u8)); + if (ret > 0) + { + nv->soc_dcb_size = ret; + /* Allocate dcb array */ + NV_KMALLOC(nv->soc_dcb_blob, nv->soc_dcb_size); + if (nv->soc_dcb_blob == NULL) + { + nv_printf(NV_DBG_ERRORS, "failed to allocate dcb array"); + return -ENOMEM; + } + } + + ret = of_property_read_variable_u8_array(np, "nvidia,dcb-image", + nv->soc_dcb_blob, 0, nv->soc_dcb_size); + if (IS_ERR(&ret)) + { + nv_printf(NV_DBG_ERRORS, "failed to read dcb blob"); + NV_KFREE(nv->soc_dcb_blob, nv->soc_dcb_size); + nv->soc_dcb_blob = NULL; + nv->soc_dcb_size = 0; + return ret; + } + + return 0; +} + +#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE +static int nv_platform_device_display_probe(struct platform_device *plat_dev) +{ + nv_state_t *nv = NULL; + nv_linux_state_t *nvl = NULL; + nvidia_stack_t *sp = NULL; + phys_addr_t res_addr = 0; + resource_size_t res_size = 0; + int irq = 0; + int rc = 0; + const struct resource *res; + bool skip_clk_rsts; + NV_STATUS status; + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: %s: failed to allocate stack!\n", + __FUNCTION__); + return rc; + } + + NV_KMALLOC(nvl, sizeof(*nvl)); + if (nvl == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate nvl memory\n"); + rc = -ENOMEM; + goto err_free_stack; + } + os_mem_set(nvl, 0, sizeof(*nvl)); + + nv = NV_STATE_PTR(nvl); + + platform_set_drvdata(plat_dev, (void *)nvl); + + nvl->dev = &plat_dev->dev; + + /* + * fill SOC dma device information + */ + nvl->dma_dev.dev = nvl->dev; + nvl->dma_dev.addressable_range.start = 0; + nvl->dma_dev.addressable_range.limit = NV_U64_MAX; + nv->dma_dev = &nvl->dma_dev; + + nvl->tce_bypass_enabled = NV_TRUE; + + NV_KMALLOC(nv->regs, sizeof(*(nv->regs))); + if (nv->regs == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate nv->regs memory\n"); + rc = -ENOMEM; + goto err_free_nvl; + } + os_mem_set(nv->regs, 0, sizeof(*(nv->regs))); + + res = platform_get_resource_byname(plat_dev, IORESOURCE_MEM, "nvdisplay"); + if (!res) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get IO memory resource\n"); + rc = -ENODEV; + goto err_free_nv_regs; + } + res_addr = res->start; + res_size = res->end - res->start; + + irq = platform_get_irq_byname(plat_dev, "nvdisplay"); + if (irq < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get IO irq resource\n"); + rc = -ENODEV; + goto err_free_nv_regs; + } + + if (!request_mem_region(res_addr, res_size, nv_device_name)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: request_mem_region failed for %pa\n", + &res_addr); + rc = -ENOMEM; + goto err_free_nv_regs; + } + + nv->regs->cpu_address = res_addr; + nv->regs->size = res_size; + nv->interrupt_line = irq; + nv->flags = NV_FLAG_SOC_DISPLAY; + + nv->os_state = (void *) nvl; + + // Check ISO/NISO SMMU status + status = nv_platform_get_iommu_availability(plat_dev, nv); + if (status != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: nv_platform_device_display_probe: parsing iommu node failed\n"); + goto err_release_mem_region_regs; + } + + // Parse ISO/NISO SMMU StreamIDs + status = nv_platform_get_iso_niso_stream_ids(plat_dev, nv); + if (status != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: nv_platform_device_display_probe: parsing ISO/NISO StreamIDs failed\n"); + goto err_release_mem_region_regs; + } + + rc = nv_platform_register_mapping_devs(plat_dev, nv); + if (rc != 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate niso platform device\n"); + goto err_release_mem_region_regs; + } + + rc = nv_platform_alloc_device_dpaux(plat_dev, nv); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to alloc DpAux device\n"); + goto err_release_mem_region_regs; + } + + NV_KMALLOC(nv->hdacodec_regs, sizeof(*(nv->hdacodec_regs))); + if (nv->hdacodec_regs == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate hdacodecregs memory\n"); + rc = -ENOMEM; + goto err_remove_dpaux_device; + } + os_mem_set(nv->hdacodec_regs, 0, sizeof(*(nv->hdacodec_regs))); + + res = platform_get_resource_byname(plat_dev, IORESOURCE_MEM, "hdacodec"); + if (!res) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get hdacodec IO memory resource\n"); + rc = -ENODEV; + goto err_free_nv_codec_regs; + } + res_addr = res->start; + res_size = res->end - res->start; + + if (!request_mem_region(res_addr, res_size, nv_device_name)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: request_mem_region of hdacodec failed for %pa\n", + &res_addr); + rc = -ENOMEM; + goto err_free_nv_codec_regs; + } + + nv->hdacodec_regs->cpu_address = res_addr; + nv->hdacodec_regs->size = res_size; + nv->soc_is_dpalt_mode_supported = false; + nv->soc_is_hfrp_supported = false; + + nv->hdacodec_irq = platform_get_irq_byname(plat_dev, "hdacodec"); + if (nv->hdacodec_irq < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get HDACODEC IO irq resource\n"); + rc = -ENODEV; + goto err_release_mem_hdacodec_region_regs; + } + + rc = of_property_read_bool(nvl->dev->of_node, "nvidia,dpalt-supported"); + if (rc == true) + { + irq = platform_get_irq_byname(plat_dev, "tcpc2disp"); + if (irq < 0) + { + nv->soc_is_dpalt_mode_supported = false; + } + else + { + nv->tcpc2disp_irq = irq; + nv->soc_is_dpalt_mode_supported = true; + } + } + + rc = of_property_read_bool(nvl->dev->of_node, "nvidia,hfrp-supported"); + if (rc == true) + { + nv->soc_is_hfrp_supported = true; + } + + NV_KMALLOC(nv->mipical_regs, sizeof(*(nv->mipical_regs))); + if (nv->mipical_regs == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate mipical registers memory\n"); + rc = -ENOMEM; + goto err_release_mem_hdacodec_region_regs; + } + os_mem_set(nv->mipical_regs, 0, sizeof(*(nv->mipical_regs))); + + res = platform_get_resource_byname(plat_dev, IORESOURCE_MEM, "mipical"); + if (!res) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get mipical IO memory resource\n"); + rc = -ENODEV; + goto err_free_mipical_regs; + } + res_addr = res->start; + res_size = res->end - res->start; + + if (!request_mem_region(res_addr, res_size, nv_device_name)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: request_mem_region of mipical failed for %pa\n", + &res_addr); + rc = -ENOMEM; + goto err_free_mipical_regs; + } + + nv->mipical_regs->cpu_address = res_addr; + nv->mipical_regs->size = res_size; + + if (nv->soc_is_hfrp_supported) + { + NV_KMALLOC(nv->hfrp0_regs, sizeof(*(nv->hfrp0_regs))); + if (nv->hfrp0_regs == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate hfrp0 registers memory\n"); + rc = -ENOMEM; + goto err_release_mem_mipical_region_regs; + } + os_mem_set(nv->hfrp0_regs, 0, sizeof(*(nv->hfrp0_regs))); + + NV_KMALLOC(nv->hfrp1_regs, sizeof(*(nv->hfrp1_regs))); + if (nv->hfrp1_regs == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate hfrp1 registers memory\n"); + rc = -ENOMEM; + goto err_free_hfrp_regs; + } + os_mem_set(nv->hfrp1_regs, 0, sizeof(*(nv->hfrp1_regs))); + + res = platform_get_resource_byname(plat_dev, IORESOURCE_MEM, "hfrp0"); + if (!res) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get hfrp0 IO memory resource\n"); + rc = -ENODEV; + goto err_free_hfrp_regs; + } + + res_addr = res->start; + res_size = res->end - res->start; + + if (!request_mem_region(res_addr, res_size, nv_device_name)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: request_mem_region of hfrp0 failed for %pa\n", + &res_addr); + rc = -ENOMEM; + goto err_free_hfrp_regs; + } + + nv->hfrp0_regs->cpu_address = res_addr; + nv->hfrp0_regs->size = res_size; + + res = platform_get_resource_byname(plat_dev, IORESOURCE_MEM, "hfrp1"); + if (!res) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get hfrp1 IO memory resource\n"); + rc = -ENODEV; + goto err_release_mem_hfrp_region_regs; + } + + res_addr = res->start; + res_size = res->end - res->start; + + if (!request_mem_region(res_addr, res_size, nv_device_name)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: request_mem_region of hfrp1 failed for %pa\n", + &res_addr); + rc = -ENOMEM; + goto err_release_mem_hfrp_region_regs; + } + + nv->hfrp1_regs->cpu_address = res_addr; + nv->hfrp1_regs->size = res_size; + + nv->hfrp0_irq = platform_get_irq_byname(plat_dev, "hfrp0"); + if (nv->hfrp0_irq < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get HFRP0 irq resource\n"); + rc = -ENODEV; + goto err_free_hfrp_regs; + } + + nv->hfrp1_irq = platform_get_irq_byname(plat_dev, "hfrp1"); + if (nv->hfrp1_irq < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get HFRP1 irq resource\n"); + rc = -ENODEV; + goto err_free_hfrp_regs; + } + } + + pm_vt_switch_required(&plat_dev->dev, NV_TRUE); + + // Enabling power management for the device. + pm_runtime_enable(&plat_dev->dev); + + /* + * Skip all clock/reset functions if the 'nvidia,skip-clk-rsts' DT property + * is present. This property is currently present in the System FPGA device + * tree because the BPMP firmware isn't available on FPGA yet. + */ + skip_clk_rsts = of_property_read_bool(nvl->dev->of_node, "nvidia,skip-clk-rsts"); + if (!skip_clk_rsts) + { + /* + * Getting all the display-clock handles + * from BPMP FW at the time of probe. + */ + status = nv_clk_get_handles(nv); + if (status != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get clock handles\n"); + rc = -EPERM; + goto err_release_mem_hfrp_region_regs; + } + + /* + * Getting dpaux-reset handles + * from device tree at the time of probe. + */ + nvl->dpaux0_reset = devm_reset_control_get(nvl->dev, "dpaux0_reset"); + if (IS_ERR(nvl->dpaux0_reset)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: devm_reset_control_get failed, err: %ld\n", PTR_ERR(nvl->dpaux0_reset)); + nvl->dpaux0_reset = NULL; + } + + /* + * Getting display-reset handles + * from device tree at the time of probe. + */ + nvl->nvdisplay_reset = devm_reset_control_get(nvl->dev, "nvdisplay_reset"); + if (IS_ERR(nvl->nvdisplay_reset)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: devm_reset_control_get failed, err: %ld\n", PTR_ERR(nvl->nvdisplay_reset)); + nvl->nvdisplay_reset = NULL; + } + + /* + * Getting dsi-core reset handles + * from device tree at the time of probe. + */ + nvl->dsi_core_reset = devm_reset_control_get(nvl->dev, "dsi_core_reset"); + if (IS_ERR(nvl->dsi_core_reset)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: devm_reset_control_get failed, err: %ld\n", PTR_ERR(nvl->dsi_core_reset)); + nvl->dsi_core_reset = NULL; + } + + /* + * Getting mipi_cal reset handle + * from device tree at the time of probe. + */ + nvl->mipi_cal_reset = devm_reset_control_get(nvl->dev, "mipi_cal_reset"); + if (IS_ERR(nvl->mipi_cal_reset)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: mipi_cal devm_reset_control_get failed, err: %ld\n", PTR_ERR(nvl->mipi_cal_reset)); + nvl->mipi_cal_reset = NULL; + } + + /* + * In T23x, HDACODEC is part of the same power domain as NVDisplay, so + * unpowergating the DISP domain also results in the HDACODEC reset + * being de-asserted. However, in T26x, HDACODEC is being moved + * out to a separate always-on domain, so we need to explicitly de-assert + * the HDACODEC reset in RM. We don't have good way to differentiate + * between T23x vs T264x at this place. So if there is failure to read + * "hdacodec_reset" from DT silently ignore it for now. In long term we + * should really look into using the devm_reset_control_bulk* APIs and + * see if this is feasible if we're ultimately just getting and + * asserting/deasserting all of the resets specified in DT together all of + * the time, and if there's no scenarios in which we need to only use a + * specific set of reset(s) at a given point. + */ + nvl->hdacodec_reset = devm_reset_control_get(nvl->dev, "hdacodec_reset"); + if (IS_ERR(nvl->hdacodec_reset)) + { + nvl->hdacodec_reset = NULL; + } + } + + status = nv_imp_icc_get(nv); + if (status != NV_OK) + { + // + // nv_imp_icc_get errors are normally treated as fatal, but ICC is + // expected to be disabled on AV + L (causing NV_ERR_NOT_SUPPORTED to + // be returned), so this is not treated as fatal. + // + if (status != NV_ERR_NOT_SUPPORTED) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get icc handle\n"); + rc = -EPERM; + goto err_destroy_clk_handles; + } + } + /* + * Get the backlight device name + */ + of_property_read_string(nvl->dev->of_node, "nvidia,backlight-name", + &nvl->backlight.device_name); + + /* + * TODO bug 2100708: the fake domain is used to opt out of some RM paths + * that cause issues otherwise, see the bug for details. + */ + nv->pci_info.domain = 2; + nv->pci_info.bus = 0; + nv->pci_info.slot = 0; + + num_probed_nv_devices++; + + if (!nv_lock_init_locks(sp, nv)) + { + rc = -EPERM; + goto err_put_icc_handle; + } + + nvl->safe_to_mmap = NV_TRUE; + INIT_LIST_HEAD(&nvl->open_files); + NV_SPIN_LOCK_INIT(&nvl->soc_isr_lock); + + if (!rm_init_private_state(sp, nv)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: rm_init_private_state() failed!\n"); + rc = -EPERM; + goto err_destroy_lock; + } + + if (nv_linux_init_open_q(nvl) != 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: nv_linux_init_open_q() failed!\n"); + goto err_free_private_state; + } + + num_nv_devices++; + + /* + * The newly created nvl object is added to the nv_linux_devices global list + * only after all the initialization operations for that nvl object are + * completed, so as to protect against simultaneous lookup operations which + * may discover a partially initialized nvl object in the list + */ + LOCK_NV_LINUX_DEVICES(); + + if (nv_linux_add_device_locked(nvl) != 0) + { + UNLOCK_NV_LINUX_DEVICES(); + nv_printf(NV_DBG_ERRORS, "NVRM: failed to add device\n"); + rc = -ENODEV; + goto err_stop_open_q; + } + + UNLOCK_NV_LINUX_DEVICES(); + + rm_set_rm_firmware_requested(sp, nv); + + /* + * Parse DCB blob + */ + rc = nv_platform_parse_dcb(plat_dev, nv); + if (rc != 0) + { + goto err_remove_device; + } + + /* + * Parse display rm sw-soc-chip-id + */ + rc = of_property_read_u32(nvl->dev->of_node, "nvidia,disp-sw-soc-chip-id", + &nv->disp_sw_soc_chip_id); + if (rc != 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Unable to read disp_sw_soc_chip_id\n"); + goto err_remove_device; + } + + /* + * TODO: procfs, vt_switch, dynamic_power_management + */ + + dma_set_mask(nv->dma_dev->dev, DMA_BIT_MASK(39)); + if (nv->niso_dma_dev != NULL) + { + dma_set_mask_and_coherent(nv->niso_dma_dev->dev, DMA_BIT_MASK(39)); + } + + rc = os_alloc_mutex(&nvl->soc_bh_mutex); + if (rc != 0) + { + goto err_remove_device; + } + + nv_kmem_cache_free_stack(sp); + + return 0; + +err_remove_device: + LOCK_NV_LINUX_DEVICES(); + nv_linux_remove_device_locked(nvl); + UNLOCK_NV_LINUX_DEVICES(); +err_stop_open_q: + nv_linux_stop_open_q(nvl); +err_free_private_state: + rm_free_private_state(sp, nv); +err_destroy_lock: + nv_lock_destroy_locks(sp, nv); +err_put_icc_handle: + nv_imp_icc_put(nv); +err_destroy_clk_handles: + if (!skip_clk_rsts) + { + nv_clk_clear_handles(nv); + } +err_release_mem_hfrp_region_regs: + if (nv->soc_is_hfrp_supported) + { + release_mem_region(nv->hfrp0_regs->cpu_address, nv->hfrp0_regs->size); + release_mem_region(nv->hfrp1_regs->cpu_address, nv->hfrp1_regs->size); + } +err_free_hfrp_regs: + if (nv->soc_is_hfrp_supported) + { + NV_KFREE(nv->hfrp0_regs, sizeof(*(nv->hfrp0_regs))); + NV_KFREE(nv->hfrp1_regs, sizeof(*(nv->hfrp1_regs))); + } +err_release_mem_mipical_region_regs: + release_mem_region(nv->mipical_regs->cpu_address, nv->mipical_regs->size); +err_free_mipical_regs: + NV_KFREE(nv->mipical_regs, sizeof(*(nv->mipical_regs))); +err_release_mem_hdacodec_region_regs: + release_mem_region(nv->hdacodec_regs->cpu_address, nv->hdacodec_regs->size); +err_free_nv_codec_regs: + NV_KFREE(nv->hdacodec_regs, sizeof(*(nv->hdacodec_regs))); +err_remove_dpaux_device: + nv_platform_free_device_dpaux(nv); +err_release_mem_region_regs: + release_mem_region(nv->regs->cpu_address, nv->regs->size); +err_free_nv_regs: + NV_KFREE(nv->regs, sizeof(*(nv->regs))); +err_free_nvl: + platform_set_drvdata(plat_dev, NULL); + NV_KFREE(nvl, sizeof(*nvl)); +err_free_stack: + nv_kmem_cache_free_stack(sp); + + return rc; +} +#endif + +#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE +static void nv_platform_device_display_remove(struct platform_device *plat_dev) +{ + nv_linux_state_t *nvl = NULL; + nv_state_t *nv; + nvidia_stack_t *sp = NULL; + + nv_printf(NV_DBG_SETUP, "NVRM: removing SOC Display device\n"); + + nvl = platform_get_drvdata(plat_dev); + if (WARN_ON(!nvl || (nvl->dev != &plat_dev->dev))) + { + return; + } + + if (WARN_ON(nv_kmem_cache_alloc_stack(&sp) < 0)) + { + return; + } + + /* + * Flush and stop open_q before proceeding with removal to ensure nvl + * outlives all enqueued work items. + */ + nv_linux_stop_open_q(nvl); + + LOCK_NV_LINUX_DEVICES(); + + nv_linux_remove_device_locked(nvl); + + /* + * TODO: procfs + */ + + down(&nvl->ldata_lock); + UNLOCK_NV_LINUX_DEVICES(); + + /* + * TODO: vt_switch, dynamic_power_management + */ + + nv = NV_STATE_PTR(nvl); + + if ((nv->flags & NV_FLAG_PERSISTENT_SW_STATE) || (nv->flags & NV_FLAG_OPEN)) + { + nv_acpi_unregister_notifier(nvl); + if (nv->flags & NV_FLAG_PERSISTENT_SW_STATE) + { + rm_disable_gpu_state_persistence(sp, nv); + } + nv_shutdown_adapter(sp, nv, nvl); + nv_dev_free_stacks(nvl); + } + + nv_lock_destroy_locks(sp, nv); + + num_probed_nv_devices--; + + rm_free_private_state(sp, nv); + + if (nv->soc_is_hfrp_supported) + { + release_mem_region(nv->hfrp0_regs->cpu_address, nv->hfrp0_regs->size); + + NV_KFREE(nv->hfrp0_regs, sizeof(*(nv->hfrp0_regs))); + + release_mem_region(nv->hfrp1_regs->cpu_address, nv->hfrp1_regs->size); + + NV_KFREE(nv->hfrp1_regs, sizeof(*(nv->hfrp1_regs))); + } + + release_mem_region(nv->mipical_regs->cpu_address, nv->mipical_regs->size); + + NV_KFREE(nv->mipical_regs, sizeof(*(nv->mipical_regs))); + + release_mem_region(nv->hdacodec_regs->cpu_address, nv->hdacodec_regs->size); + + NV_KFREE(nv->hdacodec_regs, sizeof(*(nv->hdacodec_regs))); + + release_mem_region(nv->regs->cpu_address, nv->regs->size); + + NV_KFREE(nv->regs, sizeof(*(nv->regs))); + + nv_imp_icc_put(nv); + + nv_platform_free_device_dpaux(nv); + + /* + * Clearing all the display-clock handles + * at the time of device remove. + */ + nv_clk_clear_handles(nv); + + // Disabling power management for the device. + pm_runtime_disable(&plat_dev->dev); + + num_nv_devices--; + + NV_KFREE(nv->soc_dcb_blob, nv->soc_dcb_size); + + os_free_mutex(nvl->soc_bh_mutex); + + NV_KFREE(nvl, sizeof(*nvl)); + + nv_kmem_cache_free_stack(sp); +} +#endif + +static int nv_platform_device_probe(struct platform_device *plat_dev) +{ + int rc = 0; + +#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE + rc = nv_platform_device_display_probe(plat_dev); +#endif + + return rc; +} + +static void nv_platform_device_remove(struct platform_device *plat_dev) +{ +#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE + nv_platform_device_display_remove(plat_dev); +#endif +} + +#if defined(NV_PLATFORM_DRIVER_STRUCT_REMOVE_RETURNS_VOID) /* Linux v6.11 */ +static void nv_platform_device_remove_wrapper(struct platform_device *pdev) +{ + nv_platform_device_remove(pdev); +} +#else +static int nv_platform_device_remove_wrapper(struct platform_device *pdev) +{ + nv_platform_device_remove(pdev); + + return 0; +} +#endif + +const struct of_device_id nv_platform_device_table[] = +{ + { .compatible = "nvidia,tegra234-display",}, + { .compatible = "nvidia,tegra264-display",}, + { .compatible = "nvidia,tegra256-display",}, + {}, +}; +MODULE_DEVICE_TABLE(of, nv_platform_device_table); + +#if defined(CONFIG_PM) +extern struct dev_pm_ops nv_pm_ops; +#endif + +struct platform_driver nv_platform_driver = { + .driver = { + .name = "nv_platform", + .of_match_table = nv_platform_device_table, + .owner = THIS_MODULE, +#if defined(CONFIG_PM) + .pm = &nv_pm_ops, +#endif + }, + .probe = nv_platform_device_probe, + .remove = nv_platform_device_remove_wrapper, +}; + +int nv_platform_count_devices(void) +{ + int count = 0; + struct device_node *np = NULL; + + if (NVreg_RegisterPlatformDeviceDriver == 0) + { + return 0; + } + + while ((np = of_find_matching_node(np, nv_platform_device_table))) + { + count++; + } + + return count; +} + +int nv_platform_register_driver(void) +{ +#if NV_SUPPORTS_PLATFORM_DEVICE + if (NVreg_RegisterPlatformDeviceDriver > 0) + { + return platform_driver_register(&nv_platform_driver); + } + + return 0; +#else + nv_printf(NV_DBG_ERRORS, "NVRM: Not registering platform driver\n"); + return -1; +#endif +} + +void nv_platform_unregister_driver(void) +{ +#if NV_SUPPORTS_PLATFORM_DEVICE + if (NVreg_RegisterPlatformDeviceDriver > 0) + { + platform_driver_unregister(&nv_platform_driver); + } +#endif +} + +extern int tegra_fuse_control_read(unsigned long addr, unsigned int *data); + +unsigned int NV_API_CALL nv_soc_fuse_register_read (unsigned int addr) +{ + unsigned int data = 0; + +#if NV_SUPPORTS_PLATFORM_DEVICE && NV_IS_EXPORT_SYMBOL_PRESENT_tegra_fuse_control_read + tegra_fuse_control_read ((unsigned long)(addr), &data); +#endif + + return data; +} + +#if NV_IS_EXPORT_SYMBOL_PRESENT_tsec_comms_send_cmd +extern int tsec_comms_send_cmd(void* cmd, unsigned int queue_id, nv_soc_tsec_cb_func_t cb_func, void* cb_context); +unsigned int NV_API_CALL nv_soc_tsec_send_cmd(void* cmd, nv_soc_tsec_cb_func_t cb_func, void* cb_context) +{ + return (unsigned int)tsec_comms_send_cmd(cmd, 0, cb_func, cb_context); +} +#else +unsigned int NV_API_CALL nv_soc_tsec_send_cmd(void* cmd, nv_soc_tsec_cb_func_t cb_func, void* cb_context) +{ + return (unsigned int)NV_ERR_NOT_SUPPORTED; +} +#endif // NV_IS_EXPORT_SYMBOL_PRESENT_tsec_comms_send_cmd + +#if NV_IS_EXPORT_SYMBOL_PRESENT_tsec_comms_set_init_cb +extern int tsec_comms_set_init_cb(nv_soc_tsec_cb_func_t cb_func, void* cb_context); +unsigned int NV_API_CALL nv_soc_tsec_event_register(nv_soc_tsec_cb_func_t cb_func, void* cb_context, NvBool is_init_event) +{ + if (is_init_event) + { + return (unsigned int)tsec_comms_set_init_cb(cb_func, cb_context); + } + else + { + // TODO: Add DeInit Event support for TSEC if required + return 0; + } +} +#else +unsigned int NV_API_CALL nv_soc_tsec_event_register(nv_soc_tsec_cb_func_t cb_func, void* cb_context, NvBool is_init_event) +{ + return (unsigned int)NV_ERR_NOT_SUPPORTED; +} +#endif // NV_IS_EXPORT_SYMBOL_PRESENT_tsec_comms_set_init_cb + +#if NV_IS_EXPORT_SYMBOL_PRESENT_tsec_comms_clear_init_cb +extern void tsec_comms_clear_init_cb(void); +unsigned int NV_API_CALL nv_soc_tsec_event_unregister(NvBool is_init_event) +{ + if (is_init_event) + { + tsec_comms_clear_init_cb(); + return 0; + } + else + { + // TODO: Add DeInit Event support for TSEC if required + return 0; + } +} +#else +unsigned int NV_API_CALL nv_soc_tsec_event_unregister(NvBool is_init_event) +{ + return (unsigned int)NV_ERR_NOT_SUPPORTED; +} +#endif // NV_IS_EXPORT_SYMBOL_PRESENT_tsec_comms_clear_init_cb + +void* NV_API_CALL nv_soc_tsec_alloc_mem_desc(NvU32 num_bytes, NvU32 *flcn_addr) +{ +#if NV_IS_EXPORT_SYMBOL_PRESENT_tsec_comms_alloc_mem_from_gscco + extern void *tsec_comms_alloc_mem_from_gscco(u32 size_in_bytes, u32 *gscco_offset); + return tsec_comms_alloc_mem_from_gscco(num_bytes, flcn_addr); +#else + return NULL; +#endif // NV_IS_EXPORT_SYMBOL_PRESENT_tsec_comms_alloc_mem_from_gscco +} + +void NV_API_CALL nv_soc_tsec_free_mem_desc(void *mem_desc) +{ +#if NV_IS_EXPORT_SYMBOL_PRESENT_tsec_comms_free_gscco_mem + extern void tsec_comms_free_gscco_mem(void *page_va); + tsec_comms_free_gscco_mem(mem_desc); +#endif // NV_IS_EXPORT_SYMBOL_PRESENT_tsec_comms_free_gscco_mem +} + +NV_STATUS nv_get_valid_window_head_mask(nv_state_t *nv, NvU64 *window_head_mask) +{ +#if NV_SUPPORTS_PLATFORM_DEVICE + int ret = 0; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if ((nvl == NULL) || (window_head_mask == NULL)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Wrong input arguments \n"); + return NV_ERR_INVALID_ARGUMENT; + } + + ret = of_property_read_u64(nvl->dev->of_node, "nvidia,window-head-mask", window_head_mask); + if (ret == -EINVAL) + { + // Property does not exist. + return NV_ERR_NOT_SUPPORTED; + } + else if (ret) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to read device node window-head-mask ret=%d\n", ret); + return NV_ERR_GENERIC; + } + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NV_STATUS NV_API_CALL nv_soc_i2c_hsp_semaphore_acquire(NvU32 ownerId, NvBool bAcquire, NvU64 timeout) +{ + // TODO: This needs to be updated once HSP side implementation is done. + return NV_OK; +} + +NvBool nv_get_hdcp_enabled(nv_state_t *nv) +{ + bool rc = false; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + rc = of_property_read_bool(nvl->dev->of_node, "hdcp_enabled"); + if (rc == true) + { + return NV_TRUE; + } + + return NV_FALSE; +} diff --git a/kernel-open/nvidia/nv-procfs.c b/kernel-open/nvidia/nv-procfs.c new file mode 100644 index 0000000..b77b152 --- /dev/null +++ b/kernel-open/nvidia/nv-procfs.c @@ -0,0 +1,1465 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +#if defined(CONFIG_PROC_FS) + +#include "nv-procfs.h" +#include "nv_compiler.h" +#include "nv-reg.h" +#include "conftest/patches.h" + +#define NV_DEFINE_SINGLE_NVRM_PROCFS_FILE(name) \ + NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(name, nv_system_pm_lock) + +static const char *__README_warning = \ + "The NVIDIA graphics driver tries to detect potential problems\n" + "with the host system and warns about them using the system's\n" + "logging mechanisms. Important warning message are also logged\n" + "to dedicated text files in this directory.\n"; + +static const char *__README_patches = \ + "The NVIDIA graphics driver's kernel interface files can be\n" + "patched to improve compatibility with new Linux kernels or to\n" + "fix bugs in these files. When applied, each official patch\n" + "provides a short text file with a short description of itself\n" + "in this directory.\n"; + +static struct proc_dir_entry *proc_nvidia; +static struct proc_dir_entry *proc_nvidia_warnings; +static struct proc_dir_entry *proc_nvidia_patches; +static struct proc_dir_entry *proc_nvidia_gpus; + +extern char *NVreg_CoherentGPUMemoryMode; +extern char *NVreg_RegistryDwords; +extern char *NVreg_RegistryDwordsPerDevice; +extern char *NVreg_RmMsg; +extern char *NVreg_GpuBlacklist; +extern char *NVreg_TemporaryFilePath; +extern char *NVreg_ExcludedGpus; + +static char nv_registry_keys[NV_MAX_REGISTRY_KEYS_LENGTH]; + +#if defined(CONFIG_PM) +static nv_pm_action_depth_t nv_pm_action_depth = NV_PM_ACTION_DEPTH_DEFAULT; +#endif + +static int nv_procfs_read_registry(struct seq_file *s, void *v); + +#define NV_NUMA_STATUS_MSG_LEN (32) +#define NV_PROC_WRITE_BUFFER_SIZE (512 * PAGE_SIZE) + +typedef struct +{ + nvidia_stack_t *sp; + struct semaphore sp_lock; + + nv_state_t *nv; + + void *data; + off_t off; +} nv_procfs_private_t; + +/* + * Status messages directly corresponding to states in nv_numa_states_t. + */ +static const char *nv_numa_status_messages[] = +{ + "disabled", + "offline", + "online_in_progress", + "online", + "online_failed", + "offline_in_progress", + "offline_failed", +}; + +static int +nv_procfs_read_gpu_info( + struct seq_file *s, + void *v +) +{ + nv_state_t *nv = s->private; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct pci_dev *pci_dev = nvl->pci_dev; + char *type; + const char *name; + char *uuid; + char vbios_version[15]; + nvidia_stack_t *sp = NULL; + char firmware_version[64] = { 0 }; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return 0; + } + + if (rm_ref_dynamic_power(sp, nv, NV_DYNAMIC_PM_COARSE) != NV_OK) + { + nv_kmem_cache_free_stack(sp); + return 0; + } + + name = rm_get_device_name(pci_dev->device, + pci_dev->subsystem_vendor, + pci_dev->subsystem_device); + + seq_printf(s, "Model: \t\t %s\n", name); + seq_printf(s, "IRQ: \t\t %d\n", nv->interrupt_line); + + uuid = rm_get_gpu_uuid(sp, nv); + + if (uuid != NULL) + { + seq_printf(s, "GPU UUID: \t %s\n", uuid); + os_free_mem(uuid); + uuid = NULL; + } + + rm_get_vbios_version(sp, nv, vbios_version); + + seq_printf(s, "Video BIOS: \t %s\n", vbios_version); + + if (nv_find_pci_capability(pci_dev, PCI_CAP_ID_EXP)) + type = "PCIe"; + else + type = "PCI"; + seq_printf(s, "Bus Type: \t %s\n", type); + + seq_printf(s, "DMA Size: \t %d bits\n", + nv_count_bits(pci_dev->dma_mask)); + seq_printf(s, "DMA Mask: \t 0x%llx\n", pci_dev->dma_mask); + seq_printf(s, "Bus Location: \t %04x:%02x:%02x.%x\n", + nv->pci_info.domain, nv->pci_info.bus, + nv->pci_info.slot, PCI_FUNC(pci_dev->devfn)); + seq_printf(s, "Device Minor: \t %u\n", nvl->minor_num); + + rm_get_firmware_version(sp, nv, firmware_version, sizeof(firmware_version)); + if (firmware_version[0] != '\0') + { + seq_printf(s, "GPU Firmware: \t %s\n", firmware_version); + } + +#if defined(DEBUG) + do + { + int j; + for (j = 0; j < NV_GPU_NUM_BARS; j++) + { + seq_printf(s, "BAR%u: \t\t 0x%llx (%lluMB)\n", + j, nv->bars[j].cpu_address, (nv->bars[j].size >> 20)); + } + } while (0); +#endif + + seq_printf(s, "GPU Excluded:\t %s\n", + ((nv->flags & NV_FLAG_EXCLUDE) != 0) ? "Yes" : "No"); + + rm_unref_dynamic_power(sp, nv, NV_DYNAMIC_PM_COARSE); + + nv_kmem_cache_free_stack(sp); + + return 0; +} + +NV_DEFINE_SINGLE_NVRM_PROCFS_FILE(gpu_info); + +static int +nv_procfs_read_power( + struct seq_file *s, + void *v +) +{ + nv_state_t *nv = s->private; + nvidia_stack_t *sp = NULL; + nv_power_info_t power_info; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return 0; + } + + rm_get_power_info(sp, nv, &power_info); + seq_printf(s, "Runtime D3 status: %s\n", power_info.dynamic_power_status); + seq_printf(s, "Tegra iGPU Rail-Gating: %s\n", + nv->is_tegra_pci_igpu_rg_enabled ? "Enabled" : "Disabled"); + seq_printf(s, "Video Memory: %s\n\n", power_info.vidmem_power_status); + + seq_printf(s, "GPU Hardware Support:\n"); + seq_printf(s, " Video Memory Self Refresh: %s\n", power_info.gc6_support); + seq_printf(s, " Video Memory Off: %s\n\n", power_info.gcoff_support); + + seq_printf(s, "S0ix Power Management:\n"); + seq_printf(s, " Platform Support: %s\n", + nv_platform_supports_s0ix() ? "Supported" : "Not Supported"); + seq_printf(s, " Status: %s\n\n", power_info.s0ix_status); + seq_printf(s, "Notebook Dynamic Boost: %s\n", power_info.db_support); + + nv_kmem_cache_free_stack(sp); + return 0; +} + +NV_DEFINE_SINGLE_NVRM_PROCFS_FILE(power); + +static int +nv_procfs_read_version( + struct seq_file *s, + void *v +) +{ + seq_printf(s, "NVRM version: %s\n", pNVRM_ID); + seq_printf(s, "GCC version: %s\n", NV_COMPILER); + + return 0; +} + +NV_DEFINE_SINGLE_NVRM_PROCFS_FILE(version); + +static void +nv_procfs_close_file( + nv_procfs_private_t *nvpp +) +{ + nvidia_stack_t *sp; + + if (nvpp->data != NULL) + { + os_free_mem(nvpp->data); + } + + sp = nvpp->sp; + if (sp != NULL) + { + nv_kmem_cache_free_stack(sp); + } + + NV_KFREE(nvpp, sizeof(*nvpp)); +} + +static int +nv_procfs_open_file( + struct inode *inode, + struct file *file, + nv_procfs_private_t **pnvpp +) +{ + int retval = 0; + NV_STATUS status; + nv_procfs_private_t *nvpp = NULL; + nvidia_stack_t *sp = NULL; + + NV_KZALLOC(nvpp, sizeof(nv_procfs_private_t)); + if (nvpp == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate procfs private!\n"); + return -ENOMEM; + } + + NV_INIT_MUTEX(&nvpp->sp_lock); + + nvpp->nv = NV_PDE_DATA(inode); + + if (0 == (file->f_mode & FMODE_WRITE)) + goto done; + + retval = nv_kmem_cache_alloc_stack(&sp); + if (retval != 0) + { + goto done; + } + + status = os_alloc_mem((void **)&nvpp->data, NV_PROC_WRITE_BUFFER_SIZE); + if (status != NV_OK) + { + retval = -ENOMEM; + goto done; + } + + os_mem_set((void *)nvpp->data, 0, NV_PROC_WRITE_BUFFER_SIZE); + nvpp->sp = sp; + +done: + if (retval < 0) + { + nv_procfs_close_file(nvpp); + return retval; + } + + *pnvpp = nvpp; + + return 0; +} + +static int +nv_procfs_open_registry( + struct inode *inode, + struct file *file +) +{ + nv_procfs_private_t *nvpp = NULL; + int retval; + + retval = nv_procfs_open_file(inode, file, &nvpp); + if (retval < 0) + { + return retval; + } + + retval = single_open(file, nv_procfs_read_registry, nvpp); + if (retval < 0) + { + nv_procfs_close_file(nvpp); + return retval; + } + + retval = nv_down_read_interruptible(&nv_system_pm_lock); + if (retval < 0) + { + single_release(inode, file); + nv_procfs_close_file(nvpp); + } + + return retval; +} + +static int +nv_procfs_close_registry( + struct inode *inode, + struct file *file +) +{ + struct seq_file *s = file->private_data; + nv_procfs_private_t *nvpp = s->private; + nv_state_t *nv; + nv_linux_state_t *nvl = NULL; + nvidia_stack_t *sp = nvpp->sp; + char *key_name, *key_value, *registry_keys; + size_t key_len, len; + long count; + NV_STATUS rm_status; + int rc = 0; + + if (0 != nvpp->off) + { + nv = nvpp->nv; + if (nv != NULL) + nvl = NV_GET_NVL_FROM_NV_STATE(nv); + key_value = (char *)nvpp->data; + + key_name = strsep(&key_value, "="); + + if (NULL == key_name || NULL == key_value) + { + rc = -EINVAL; + goto done; + } + + key_len = (strlen(key_name) + 1); + count = (nvpp->off - key_len); + + if (count <= 0) + { + rc = -EINVAL; + goto done; + } + + rm_status = rm_write_registry_binary(sp, nv, key_name, + key_value, count); + if (rm_status != NV_OK) + { + rc = -EFAULT; + goto done; + } + + registry_keys = ((nvl != NULL) ? + nvl->registry_keys : nv_registry_keys); + if (strstr(registry_keys, key_name) != NULL) + goto done; + len = strlen(registry_keys); + + if ((len + key_len + 2) <= NV_MAX_REGISTRY_KEYS_LENGTH) + { + if (len != 0) + strcat(registry_keys, ", "); + strcat(registry_keys, key_name); + } + } + +done: + up_read(&nv_system_pm_lock); + + single_release(inode, file); + + nv_procfs_close_file(nvpp); + + return rc; +} + +static int +nv_procfs_read_params( + struct seq_file *s, + void *v +) +{ + unsigned int i; + nv_parm_t *entry; + + for (i = 0; (entry = &nv_parms[i])->name != NULL; i++) + seq_printf(s, "%s: %u\n", entry->name, *entry->data); + + seq_printf(s, "CoherentGPUMemoryMode: \"%s\"\n", + (NVreg_CoherentGPUMemoryMode != NULL) ? NVreg_CoherentGPUMemoryMode : ""); + seq_printf(s, "RegistryDwords: \"%s\"\n", + (NVreg_RegistryDwords != NULL) ? NVreg_RegistryDwords : ""); + seq_printf(s, "RegistryDwordsPerDevice: \"%s\"\n", + (NVreg_RegistryDwordsPerDevice != NULL) ? NVreg_RegistryDwordsPerDevice : ""); + seq_printf(s, "RmMsg: \"%s\"\n", + (NVreg_RmMsg != NULL) ? NVreg_RmMsg : ""); + seq_printf(s, "GpuBlacklist: \"%s\"\n", + (NVreg_GpuBlacklist != NULL) ? NVreg_GpuBlacklist : ""); + seq_printf(s, "TemporaryFilePath: \"%s\"\n", + (NVreg_TemporaryFilePath != NULL) ? NVreg_TemporaryFilePath : ""); + seq_printf(s, "ExcludedGpus: \"%s\"\n", + (NVreg_ExcludedGpus != NULL) ? NVreg_ExcludedGpus : ""); + + return 0; +} + +NV_DEFINE_SINGLE_NVRM_PROCFS_FILE(params); + +static int +nv_procfs_read_registry( + struct seq_file *s, + void *v +) +{ + nv_procfs_private_t *nvpp = s->private; + nv_state_t *nv = nvpp->nv; + nv_linux_state_t *nvl = NULL; + char *registry_keys; + + if (nv != NULL) + nvl = NV_GET_NVL_FROM_NV_STATE(nv); + registry_keys = ((nvl != NULL) ? + nvl->registry_keys : nv_registry_keys); + + seq_printf(s, "Binary: \"%s\"\n", registry_keys); + return 0; +} + +static ssize_t +nv_procfs_write_file( + struct file *file, + const char __user *buffer, + size_t count, + loff_t *pos +) +{ + int status = 0; + struct seq_file *s = file->private_data; + nv_procfs_private_t *nvpp = s->private; + char *proc_buffer; + unsigned long bytes_left; + + down(&nvpp->sp_lock); + + bytes_left = (NV_PROC_WRITE_BUFFER_SIZE - nvpp->off - 1); + + if (count == 0) + { + status = -EINVAL; + goto done; + } + else if ((bytes_left == 0) || (count > bytes_left)) + { + status = -ENOSPC; + goto done; + } + + proc_buffer = &((char *)nvpp->data)[nvpp->off]; + + if (copy_from_user(proc_buffer, buffer, count)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to copy in proc data!\n"); + status = -EFAULT; + } + else + { + nvpp->off += count; + } + + *pos = nvpp->off; + +done: + up(&nvpp->sp_lock); + + return ((status < 0) ? status : (int)count); +} + +static nv_proc_ops_t nv_procfs_registry_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_procfs_open_registry, + .NV_PROC_OPS_READ = seq_read, + .NV_PROC_OPS_WRITE = nv_procfs_write_file, + .NV_PROC_OPS_LSEEK = seq_lseek, + .NV_PROC_OPS_RELEASE = nv_procfs_close_registry, +}; + +#if defined(CONFIG_PM) +static int +nv_procfs_show_suspend_depth( + struct seq_file *m, + void *v +) +{ + seq_printf(m, "default modeset uvm\n"); + return 0; +} + +static ssize_t +nv_procfs_write_suspend_depth( + struct file *file, + const char __user *buf, + size_t count, + loff_t *pos +) +{ + char kbuf[sizeof("modeset\n")]; + unsigned i; + + if (!NV_IS_SUSER()) + { + return -EPERM; + } + + if (count < strlen("uvm") || count > sizeof(kbuf)) + { + return -EINVAL; + } + + if (copy_from_user(kbuf, buf, count)) + { + return -EFAULT; + } + + count = min(count, sizeof(kbuf) - 1); + for (i = 0; i < count && isalpha(kbuf[i]); i++); + kbuf[i] = '\0'; + + if (strcasecmp(kbuf, "uvm") == 0) + { + nv_pm_action_depth = NV_PM_ACTION_DEPTH_UVM; + } + else if (strcasecmp(kbuf, "modeset") == 0) + { + nv_pm_action_depth = NV_PM_ACTION_DEPTH_MODESET; + } + else if (strcasecmp(kbuf, "default") == 0) + { + nv_pm_action_depth = NV_PM_ACTION_DEPTH_DEFAULT; + } + else + { + return -EINVAL; + } + + return count; +} + +static int +nv_procfs_open_suspend_depth( + struct inode *inode, + struct file *file +) +{ + return single_open(file, nv_procfs_show_suspend_depth, NULL); +} + +static nv_proc_ops_t nv_procfs_suspend_depth_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_procfs_open_suspend_depth, + .NV_PROC_OPS_READ = seq_read, + .NV_PROC_OPS_WRITE = nv_procfs_write_suspend_depth, + .NV_PROC_OPS_LSEEK = seq_lseek, + .NV_PROC_OPS_RELEASE = single_release +}; + +static int +nv_procfs_show_suspend( + struct seq_file *m, + void *v +) +{ + seq_printf(m, "suspend hibernate resume\n"); + return 0; +} + +static ssize_t +nv_procfs_write_suspend( + struct file *file, + const char __user *buf, + size_t count, + loff_t *pos +) +{ + NV_STATUS status; + char kbuf[sizeof("hibernate\n")]; + nv_power_state_t power_state; + unsigned i; + + if (!NV_IS_SUSER()) + { + return -EPERM; + } + + if (count < strlen("resume") || count > sizeof(kbuf)) + { + return -EINVAL; + } + + if (copy_from_user(kbuf, buf, count)) + { + return -EFAULT; + } + + count = min(count, sizeof(kbuf) - 1); + for (i = 0; i < count && isalpha(kbuf[i]); i++); + kbuf[i] = '\0'; + + if (strcasecmp(kbuf, "suspend") == 0) + { + power_state = NV_POWER_STATE_IN_STANDBY; + } + else if (strcasecmp(kbuf, "hibernate") == 0) + { + power_state = NV_POWER_STATE_IN_HIBERNATE; + } + else if (strcasecmp(kbuf, "resume") == 0) + { + power_state = NV_POWER_STATE_RUNNING; + } + else + { + return -EINVAL; + } + + status = nv_set_system_power_state(power_state, nv_pm_action_depth); + + return (status != NV_OK) ? -EIO : count; +} + +static int +nv_procfs_open_suspend( + struct inode *inode, + struct file *file +) +{ + return single_open(file, nv_procfs_show_suspend, NULL); +} + +static nv_proc_ops_t nv_procfs_suspend_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_procfs_open_suspend, + .NV_PROC_OPS_READ = seq_read, + .NV_PROC_OPS_WRITE = nv_procfs_write_suspend, + .NV_PROC_OPS_LSEEK = seq_lseek, + .NV_PROC_OPS_RELEASE = single_release +}; + +#endif + +/* + * Forwards error to nv_log_error which exposes data to vendor callback + */ +static void +exercise_error_forwarding_va( + nv_state_t *nv, + NvU32 err, + const char *fmt, + ... +) +{ + va_list arguments; + + va_start(arguments, fmt); + nv_log_error(nv, err, fmt, arguments); + va_end(arguments); +} + +static int +nv_procfs_show_exercise_error_forwarding( + struct seq_file *m, + void *v +) +{ + return 0; +} + +static int +nv_procfs_open_exercise_error_forwarding( + struct inode *inode, + struct file *file +) +{ + nv_procfs_private_t *nvpp = NULL; + int retval; + + retval = nv_procfs_open_file(inode, file, &nvpp); + if (retval < 0) + { + return retval; + } + + retval = single_open(file, nv_procfs_show_exercise_error_forwarding, nvpp); + if (retval < 0) + { + nv_procfs_close_file(nvpp); + return retval; + } + + retval = nv_down_read_interruptible(&nv_system_pm_lock); + if (retval < 0) + { + single_release(inode, file); + nv_procfs_close_file(nvpp); + } + + return retval; +} + +static int +nv_procfs_close_exercise_error_forwarding( + struct inode *inode, + struct file *file +) +{ + struct seq_file *s = file->private_data; + nv_procfs_private_t *nvpp = s->private; + nv_state_t *nv = nvpp->nv; + char *proc_buffer = &((char *)nvpp->data)[0]; + size_t count = nvpp->off; + int i = 0, status = 0; + NvU32 xid = 0; + const NvU8 MAX_XID_DIGITS = 3; + + while (i < count && i <= MAX_XID_DIGITS && proc_buffer[i] != ',') + { + if (proc_buffer[i] < '0' || proc_buffer[i] > '9') + { + status = -EINVAL; + goto done; + } + + xid = xid * 10 + (proc_buffer[i++] - '0'); + } + + if (count > (i + 1) && proc_buffer[i] == ',') + exercise_error_forwarding_va(nv, xid, &proc_buffer[i + 1], 0xdeadbee0, + 0xdeadbee1, 0xdeadbee2, 0xdeadbee3, 0xdeadbee4, 0xdeadbee5); + else + status = -EINVAL; + +done: + up_read(&nv_system_pm_lock); + + single_release(inode, file); + + nv_procfs_close_file(nvpp); + + return status; +} + +static nv_proc_ops_t nv_procfs_exercise_error_forwarding_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_procfs_open_exercise_error_forwarding, + .NV_PROC_OPS_WRITE = nv_procfs_write_file, + .NV_PROC_OPS_RELEASE = nv_procfs_close_exercise_error_forwarding, +}; + +static int +nv_procfs_read_unbind_lock( + struct seq_file *s, + void *v +) +{ + nv_procfs_private_t *nvpp = s->private; + nv_state_t *nv = nvpp->nv; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + down(&nvl->ldata_lock); + if (nv->flags & NV_FLAG_UNBIND_LOCK) + { + seq_printf(s, "1\n"); + } + else + { + seq_printf(s, "0\n"); + } + up(&nvl->ldata_lock); + + return 0; +} + +static int +nv_procfs_open_unbind_lock( + struct inode *inode, + struct file *file +) +{ + nv_procfs_private_t *nvpp = NULL; + int retval; + + retval = nv_procfs_open_file(inode, file, &nvpp); + if (retval < 0) + { + return retval; + } + + retval = single_open(file, nv_procfs_read_unbind_lock, nvpp); + if (retval < 0) + { + nv_procfs_close_file(nvpp); + return retval; + } + + retval = nv_down_read_interruptible(&nv_system_pm_lock); + if (retval < 0) + { + single_release(inode, file); + nv_procfs_close_file(nvpp); + } + + return retval; +} + +static int +nv_procfs_close_unbind_lock( + struct inode *inode, + struct file *file +) +{ + struct seq_file *s = file->private_data; + nv_procfs_private_t *nvpp = s->private; + nv_state_t *nv; + nvidia_stack_t *sp = nvpp->sp; + int rc = 0; + nv_linux_state_t * nvl; + int value; + + if (0 != nvpp->off) + { + nv = nvpp->nv; + nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (NULL == nvpp->data || NULL == nv) + { + rc = -EINVAL; + goto done; + } + + if (sscanf((char *)nvpp->data, "%u\n", &value) != 1) + { + rc = -EINVAL; + goto done; + } + + down(&nvl->ldata_lock); + if ((value == 1) && !(nv->flags & NV_FLAG_UNBIND_LOCK)) + { + if (NV_ATOMIC_READ(nvl->usage_count) == 0) + rm_unbind_lock(sp, nv); + + if (nv->flags & NV_FLAG_UNBIND_LOCK) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "UnbindLock acquired\n"); + } + else + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Could not acquire UnbindLock\n"); + } + } + else if ((value == 0) && (nv->flags & NV_FLAG_UNBIND_LOCK)) + { + nv->flags &= ~NV_FLAG_UNBIND_LOCK; + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "UnbindLock released\n"); + } + up(&nvl->ldata_lock); + } + +done: + up_read(&nv_system_pm_lock); + + single_release(inode, file); + + nv_procfs_close_file(nvpp); + + return rc; +} + +static nv_proc_ops_t nv_procfs_unbind_lock_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_procfs_open_unbind_lock, + .NV_PROC_OPS_READ = seq_read, + .NV_PROC_OPS_WRITE = nv_procfs_write_file, + .NV_PROC_OPS_LSEEK = seq_lseek, + .NV_PROC_OPS_RELEASE = nv_procfs_close_unbind_lock, +}; + +static const char* +numa_status_describe(nv_numa_status_t state) +{ + if (state < 0 || state >= NV_NUMA_STATUS_COUNT) + return "invalid"; + + return nv_numa_status_messages[state]; +} + +static NvBool +numa_is_change_allowed(nv_numa_status_t current_state, nv_numa_status_t requested) +{ + NvBool allowed = NV_TRUE; + + switch (requested) { + case NV_NUMA_STATUS_OFFLINE: + case NV_NUMA_STATUS_OFFLINE_FAILED: + allowed = (current_state == NV_NUMA_STATUS_OFFLINE_IN_PROGRESS); + break; + + /* All except Offline. */ + case NV_NUMA_STATUS_OFFLINE_IN_PROGRESS: + allowed = (current_state != NV_NUMA_STATUS_OFFLINE); + break; + + case NV_NUMA_STATUS_ONLINE: + allowed = (current_state == NV_NUMA_STATUS_ONLINE_IN_PROGRESS); + break; + + case NV_NUMA_STATUS_ONLINE_FAILED: + allowed = (current_state == NV_NUMA_STATUS_ONLINE_IN_PROGRESS) || + (current_state == NV_NUMA_STATUS_ONLINE); + break; + + case NV_NUMA_STATUS_ONLINE_IN_PROGRESS: + allowed = (current_state == NV_NUMA_STATUS_OFFLINE); + break; + + /* Fallthrough. */ + case NV_NUMA_STATUS_DISABLED: + default: + return NV_FALSE; + } + + return allowed; +} + +static NV_STATUS +numa_status_read( + nv_state_t *nv, + nv_stack_t *sp, + nv_ioctl_numa_info_t *numa_info +) +{ + NV_STATUS rm_status; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + down(&nvl->ldata_lock); + + /* + * If GPU has not been initialized but NUMA info is valid, populate + * NUMA node ID and status. Memory range and offline addresses cannot + * be read at this point so fill in dummy values. + */ + if (!(nv->flags & NV_FLAG_OPEN)) + { + if (nv_platform_supports_numa(nvl)) + { + memset(numa_info, 0x0, sizeof(*numa_info)); + numa_info->nid = nvl->numa_info.node_id; + numa_info->status = nv_get_numa_status(nvl); + } + + rm_status = NV_ERR_NOT_READY; + goto done; + } + + rm_status = rm_get_gpu_numa_info(sp, nv, numa_info); + if (rm_status == NV_OK && numa_info->nid == NUMA_NO_NODE) + { + // + // RM returns NUMA_NO_NODE when running MIG instances because + // this rmClient is not subscribed to any MIG partition since + // it was subscribed to whole GPU only during RMInit and is not + // updated when MIG partitions are created. + // Returning error here so that numa_status results in EIO + // because of missing support in numa_status to use it for multiple + // numa nodes. + // + // TODO: add support for multiple numa nodes in numa_status interface + // and remove this check, bug 4006012 + // + rm_status = NV_ERR_NOT_SUPPORTED; + } + numa_info->status = nv_get_numa_status(nvl); + +done: + up(&nvl->ldata_lock); + return rm_status; +} + +static int +nv_procfs_read_offline_pages( + struct seq_file *s, + void *v +) +{ + NvU32 i; + int retval = 0; + NV_STATUS rm_status; + nv_ioctl_numa_info_t numa_info = { 0 }; + nv_procfs_private_t *nvpp = s->private; + nv_stack_t *sp = nvpp->sp; + nv_state_t *nv = nvpp->nv; + + numa_info.offline_addresses.numEntries = + ARRAY_SIZE(numa_info.offline_addresses.addresses); + + rm_status = numa_status_read(nv, sp, &numa_info); + if (rm_status != NV_OK) + return -EIO; + + for (i = 0; i < numa_info.offline_addresses.numEntries; ++i) + { + seq_printf(s, "%p\n", + (void*) numa_info.offline_addresses.addresses[i]); + } + + return retval; +} + +static int +nv_procfs_open_offline_pages( + struct inode *inode, + struct file *file +) +{ + int retval; + nv_procfs_private_t *nvpp = NULL; + + retval = nv_procfs_open_file(inode, file, &nvpp); + if (retval < 0) + { + return retval; + } + + retval = single_open(file, nv_procfs_read_offline_pages, nvpp); + if (retval < 0) + { + nv_procfs_close_file(nvpp); + return retval; + } + + retval = nv_down_read_interruptible(&nv_system_pm_lock); + if (retval < 0) + { + single_release(inode, file); + nv_procfs_close_file(nvpp); + } + + return retval; +} + +static int +nv_procfs_close_offline_pages( + struct inode *inode, + struct file *file +) +{ + struct seq_file *s = file->private_data; + nv_procfs_private_t *nvpp = s->private; + + up_read(&nv_system_pm_lock); + + single_release(inode, file); + + nv_procfs_close_file(nvpp); + + return 0; +} + +static int +nv_procfs_read_numa_status( + struct seq_file *s, + void *v +) +{ + int retval = 0; + NV_STATUS rm_status; + nv_ioctl_numa_info_t numa_info = { 0 }; + nv_procfs_private_t *nvpp = s->private; + nv_stack_t *sp = nvpp->sp; + nv_state_t *nv = nvpp->nv; + + /* + * Note: we leave numa_info.offline_addresses.numEntries as 0, so that + * the numa_status_read() callchain doesn't perform expensive page + * querying that we don't need here. + */ + rm_status = numa_status_read(nv, sp, &numa_info); + if ((rm_status != NV_OK) && (rm_status != NV_ERR_NOT_READY)) + return -EIO; + + /* Note: RM clients need to read block size from sysfs. */ + seq_printf(s, "Node: %d\n", numa_info.nid); + seq_printf(s, "Status: %s\n", numa_status_describe(numa_info.status)); + + if (rm_status == NV_OK) + { + seq_printf(s, "Address: %llx\n", numa_info.numa_mem_addr); + seq_printf(s, "Size: %llx\n", numa_info.numa_mem_size); + } + + return retval; +} + +static int +nv_procfs_open_numa_status( + struct inode *inode, + struct file *file +) +{ + int retval; + nv_procfs_private_t *nvpp = NULL; + + retval = nv_procfs_open_file(inode, file, &nvpp); + if (retval < 0) + { + return retval; + } + + retval = single_open(file, nv_procfs_read_numa_status, nvpp); + if (retval < 0) + { + nv_procfs_close_file(nvpp); + return retval; + } + + retval = nv_down_read_interruptible(&nv_system_pm_lock); + if (retval < 0) + { + single_release(inode, file); + nv_procfs_close_file(nvpp); + } + + return retval; +} + +static int +nv_procfs_close_numa_status( + struct inode *inode, + struct file *file +) +{ + int retval = 0; + struct seq_file *s = file->private_data; + nv_procfs_private_t *nvpp = s->private; + nvidia_stack_t *sp = nvpp->sp; + nv_state_t *nv = nvpp->nv; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + const size_t MAX_STATES = ARRAY_SIZE(nv_numa_status_messages); + nv_numa_status_t current_state = nv_get_numa_status(nvl); + char *cmd = nvpp->data; + + down(&nvl->ldata_lock); + + if (nvpp->off != 0) + { + NvU32 state; + nv_numa_status_t requested = NV_NUMA_STATUS_DISABLED; + NV_STATUS rm_status = NV_OK; + + for (state = 0; state < MAX_STATES; ++state) + { + if (strncmp(nv_numa_status_messages[state], + cmd, + NV_NUMA_STATUS_MSG_LEN) == 0) + { + requested = state; + break; + } + } + + if (requested != current_state) + { + /* Validate state transition. */ + if (!numa_is_change_allowed(current_state, requested)) + { + retval = -EINVAL; + goto done; + } + + if (requested == NV_NUMA_STATUS_OFFLINE_IN_PROGRESS) + { + /* + * If this call fails, RM is not ready to offline + * memory => retain status. + */ + rm_status = rm_gpu_numa_offline(sp, nv); + } + + if (rm_status == NV_OK) + { + retval = nv_set_numa_status(nvl, requested); + if (retval < 0) + goto done; + + if (requested == NV_NUMA_STATUS_ONLINE) + { + rm_status = rm_gpu_numa_online(sp, nv); + } + } + + retval = (rm_status == NV_OK) ? retval: -EBUSY; + } + } + +done: + up(&nvl->ldata_lock); + + up_read(&nv_system_pm_lock); + + single_release(inode, file); + + nv_procfs_close_file(nvpp); + + return retval; +} + +static const nv_proc_ops_t nv_procfs_numa_status_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_procfs_open_numa_status, + .NV_PROC_OPS_READ = seq_read, + .NV_PROC_OPS_WRITE = nv_procfs_write_file, + .NV_PROC_OPS_LSEEK = seq_lseek, + .NV_PROC_OPS_RELEASE = nv_procfs_close_numa_status, +}; + +static const nv_proc_ops_t nv_procfs_offline_pages_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_procfs_open_offline_pages, + .NV_PROC_OPS_READ = seq_read, + .NV_PROC_OPS_LSEEK = seq_lseek, + .NV_PROC_OPS_RELEASE = nv_procfs_close_offline_pages, +}; + +static int +nv_procfs_read_text_file( + struct seq_file *s, + void *v +) +{ + seq_puts(s, s->private); + return 0; +} + +NV_DEFINE_SINGLE_NVRM_PROCFS_FILE(text_file); + +static void +nv_procfs_add_text_file( + struct proc_dir_entry *parent, + const char *filename, + const char *text +) +{ + NV_CREATE_PROC_FILE(filename, parent, text_file, (void *)text); +} +#endif + +void nv_procfs_add_warning( + const char *filename, + const char *text +) +{ +#if defined(CONFIG_PROC_FS) + nv_procfs_add_text_file(proc_nvidia_warnings, filename, text); +#endif +} + +int nv_procfs_init(void) +{ +#if defined(CONFIG_PROC_FS) + NvU32 i = 0; + char nv_dir_name[20]; + struct proc_dir_entry *entry; + + snprintf(nv_dir_name, sizeof(nv_dir_name), "driver/%s", nv_device_name); + + nv_dir_name[sizeof(nv_dir_name) - 1] = '\0'; + + proc_nvidia = NV_CREATE_PROC_DIR(nv_dir_name, NULL); + + if (!proc_nvidia) + goto failed; + + entry = NV_CREATE_PROC_FILE("params", proc_nvidia, params, NULL); + if (!entry) + goto failed; + + entry = NV_CREATE_PROC_FILE("registry", proc_nvidia, registry, NULL); + if (!entry) + goto failed; + +#if defined(CONFIG_PM) + entry = NV_CREATE_PROC_FILE("suspend_depth", proc_nvidia, suspend_depth, NULL); + if (!entry) + goto failed; + + entry = NV_CREATE_PROC_FILE("suspend", proc_nvidia, suspend, NULL); + if (!entry) + goto failed; +#endif + + proc_nvidia_warnings = NV_CREATE_PROC_DIR("warnings", proc_nvidia); + if (!proc_nvidia_warnings) + goto failed; + nv_procfs_add_text_file(proc_nvidia_warnings, "README", __README_warning); + + proc_nvidia_patches = NV_CREATE_PROC_DIR("patches", proc_nvidia); + if (!proc_nvidia_patches) + goto failed; + + for (i = 0; __nv_patches[i].short_description; i++) + { + nv_procfs_add_text_file(proc_nvidia_patches, + __nv_patches[i].short_description, __nv_patches[i].description); + } + + nv_procfs_add_text_file(proc_nvidia_patches, "README", __README_patches); + + entry = NV_CREATE_PROC_FILE("version", proc_nvidia, version, NULL); + if (!entry) + goto failed; + + proc_nvidia_gpus = NV_CREATE_PROC_DIR("gpus", proc_nvidia); + if (!proc_nvidia_gpus) + goto failed; +#endif + return 0; +#if defined(CONFIG_PROC_FS) +failed: + proc_remove(proc_nvidia); + return -ENOMEM; +#endif +} + +void nv_procfs_exit(void) +{ +#if defined(CONFIG_PROC_FS) + proc_remove(proc_nvidia); +#endif +} + +int nv_procfs_add_gpu(nv_linux_state_t *nvl) +{ +#if defined(CONFIG_PROC_FS) + nv_state_t *nv; + + /* Buffer size is 32 in order to fit the full name when PCI domain is 32 bit. */ + char name[32]; + struct proc_dir_entry *proc_nvidia_gpu, *entry; + + nv = NV_STATE_PTR(nvl); + + snprintf(name, sizeof(name), "%04x:%02x:%02x.%1x", + nv->pci_info.domain, nv->pci_info.bus, + nv->pci_info.slot, PCI_FUNC(nvl->pci_dev->devfn)); + + proc_nvidia_gpu = NV_CREATE_PROC_DIR(name, proc_nvidia_gpus); + if (!proc_nvidia_gpu) + goto failed; + + entry = NV_CREATE_PROC_FILE("information", proc_nvidia_gpu, gpu_info, + nv); + if (!entry) + goto failed; + + entry = NV_CREATE_PROC_FILE("registry", proc_nvidia_gpu, registry, nv); + if (!entry) + goto failed; + + entry = NV_CREATE_PROC_FILE("power", proc_nvidia_gpu, power, nv); + if (!entry) + goto failed; + + if (IS_EXERCISE_ERROR_FORWARDING_ENABLED()) + { + entry = NV_CREATE_PROC_FILE("exercise_error_forwarding", proc_nvidia_gpu, + exercise_error_forwarding, nv); + if (!entry) + goto failed; + } + + if (os_is_vgx_hyper()) + { + entry = NV_CREATE_PROC_FILE("unbindLock", proc_nvidia_gpu, unbind_lock, nv); + if (!entry) + goto failed; + } + + if (nv_get_numa_status(nvl) != NV_IOCTL_NUMA_STATUS_DISABLED) + { + entry = NV_CREATE_PROC_FILE("numa_status", proc_nvidia_gpu, numa_status, + nv); + if (!entry) + goto failed; + + entry = NV_CREATE_PROC_FILE("offline_pages", proc_nvidia_gpu, offline_pages, + nv); + if (!entry) + goto failed; + } + + nvl->proc_dir = proc_nvidia_gpu; +#endif + return 0; +#if defined(CONFIG_PROC_FS) +failed: + if (proc_nvidia_gpu) + { + proc_remove(proc_nvidia_gpu); + } + return -1; +#endif +} + +void nv_procfs_remove_gpu(nv_linux_state_t *nvl) +{ +#if defined(CONFIG_PROC_FS) + proc_remove(nvl->proc_dir); +#endif +} diff --git a/kernel-open/nvidia/nv-reg.h b/kernel-open/nvidia/nv-reg.h new file mode 100644 index 0000000..d0d60fc --- /dev/null +++ b/kernel-open/nvidia/nv-reg.h @@ -0,0 +1,1093 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +// This file holds Unix-specific NVIDIA driver options +// + +#ifndef _RM_REG_H_ +#define _RM_REG_H_ + +#include "nvtypes.h" +#include "nv-firmware-registry.h" + +/* + * use NV_REG_STRING to stringify a registry key when using that registry key + */ + +#define __NV_REG_STRING(regkey) #regkey +#define NV_REG_STRING(regkey) __NV_REG_STRING(regkey) + +/* + * use NV_DEFINE_REG_ENTRY and NV_DEFINE_PARAMS_TABLE_ENTRY to simplify definition + * of registry keys in the kernel module source code. + */ + +#define __NV_REG_VAR(regkey) NVreg_##regkey + +#if defined(NV_MODULE_PARAMETER) +#define NV_DEFINE_REG_ENTRY(regkey, default_value) \ + static NvU32 __NV_REG_VAR(regkey) = (default_value); \ + NV_MODULE_PARAMETER(__NV_REG_VAR(regkey)) +#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \ + NvU32 __NV_REG_VAR(regkey) = (default_value); \ + NV_MODULE_PARAMETER(__NV_REG_VAR(regkey)) +#else +#define NV_DEFINE_REG_ENTRY(regkey, default_value) \ + static NvU32 __NV_REG_VAR(regkey) = (default_value) +#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \ + NvU32 __NV_REG_VAR(regkey) = (default_value) +#endif + +#if defined(NV_MODULE_STRING_PARAMETER) +#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \ + char *__NV_REG_VAR(regkey) = (default_value); \ + NV_MODULE_STRING_PARAMETER(__NV_REG_VAR(regkey)) +#else +#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \ + char *__NV_REG_VAR(regkey) = (default_value) +#endif + +#define NV_DEFINE_PARAMS_TABLE_ENTRY(regkey) \ + { NV_REG_STRING(regkey), &__NV_REG_VAR(regkey) } + +/* + * Like NV_DEFINE_PARMS_TABLE_ENTRY, but allows a mismatch between the name of + * the regkey and the name of the module parameter. When using this macro, the + * name of the parameter is passed to the extra "parameter" argument, and it is + * this name that must be used in the NV_DEFINE_REG_ENTRY() macro. + */ + +#define NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(regkey, parameter) \ + { NV_REG_STRING(regkey), &__NV_REG_VAR(parameter)} + +/* + *----------------- registry key definitions-------------------------- + */ + +/* + * Option: ModifyDeviceFiles + * + * Description: + * + * When this option is enabled, the NVIDIA driver will verify the validity + * of the NVIDIA device files in /dev and attempt to dynamically modify + * and/or (re-)create them, if necessary. If you don't wish for the NVIDIA + * driver to touch the device files, you can use this registry key. + * + * This module parameter is only honored by the NVIDIA GPU driver and NVIDIA + * capability driver. Furthermore, the NVIDIA capability driver provides + * modifiable /proc file entry (DeviceFileModify=0/1) to alter the behavior of + * this module parameter per device file. + * + * Possible Values: + * 0 = disable dynamic device file management + * 1 = enable dynamic device file management (default) + */ + +#define __NV_MODIFY_DEVICE_FILES ModifyDeviceFiles +#define NV_REG_MODIFY_DEVICE_FILES NV_REG_STRING(__NV_MODIFY_DEVICE_FILES) + +/* + * Option: DeviceFileUID + * + * Description: + * + * This registry key specifies the UID assigned to the NVIDIA device files + * created and/or modified by the NVIDIA driver when dynamic device file + * management is enabled. + * + * This module parameter is only honored by the NVIDIA GPU driver. + * + * The default UID is 0 ('root'). + */ + +#define __NV_DEVICE_FILE_UID DeviceFileUID +#define NV_REG_DEVICE_FILE_UID NV_REG_STRING(__NV_DEVICE_FILE_UID) + +/* + * Option: DeviceFileGID + * + * Description: + * + * This registry key specifies the GID assigned to the NVIDIA device files + * created and/or modified by the NVIDIA driver when dynamic device file + * management is enabled. + * + * This module parameter is only honored by the NVIDIA GPU driver. + * + * The default GID is 0 ('root'). + */ + +#define __NV_DEVICE_FILE_GID DeviceFileGID +#define NV_REG_DEVICE_FILE_GID NV_REG_STRING(__NV_DEVICE_FILE_GID) + +/* + * Option: DeviceFileMode + * + * Description: + * + * This registry key specifies the device file mode assigned to the NVIDIA + * device files created and/or modified by the NVIDIA driver when dynamic + * device file management is enabled. + * + * This module parameter is only honored by the NVIDIA GPU driver. + * + * The default mode is 0666 (octal, rw-rw-rw-). + */ + +#define __NV_DEVICE_FILE_MODE DeviceFileMode +#define NV_REG_DEVICE_FILE_MODE NV_REG_STRING(__NV_DEVICE_FILE_MODE) + +/* + * Option: ResmanDebugLevel + * + * Default value: ~0 + */ + +#define __NV_RESMAN_DEBUG_LEVEL ResmanDebugLevel +#define NV_REG_RESMAN_DEBUG_LEVEL NV_REG_STRING(__NV_RESMAN_DEBUG_LEVEL) + +/* + * Option: RmLogonRC + * + * Default value: 1 + */ + +#define __NV_RM_LOGON_RC RmLogonRC +#define NV_REG_RM_LOGON_RC NV_REG_STRING(__NV_RM_LOGON_RC) + +/* + * Option: InitializeSystemMemoryAllocations + * + * Description: + * + * The NVIDIA Linux driver normally clears system memory it allocates + * for use with GPUs or within the driver stack. This is to ensure + * that potentially sensitive data is not rendered accessible by + * arbitrary user applications. + * + * Owners of single-user systems or similar trusted configurations may + * choose to disable the aforementioned clears using this option and + * potentially improve performance. + * + * Possible values: + * + * 1 = zero out system memory allocations (default) + * 0 = do not perform memory clears + */ + +#define __NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \ + InitializeSystemMemoryAllocations +#define NV_REG_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \ + NV_REG_STRING(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS) + +/* + * Option: RegistryDwords + * + * Description: + * + * This option accepts a semicolon-separated list of key=value pairs. Each + * key name is checked against the table of static options; if a match is + * found, the static option value is overridden, but invalid options remain + * invalid. Pairs that do not match an entry in the static option table + * are passed on to the RM directly. + * + * Format: + * + * NVreg_RegistryDwords=";;..." + */ + +#define __NV_REGISTRY_DWORDS RegistryDwords +#define NV_REG_REGISTRY_DWORDS NV_REG_STRING(__NV_REGISTRY_DWORDS) + +/* + * Option: RegistryDwordsPerDevice + * + * Description: + * + * This option allows to specify registry keys per GPU device. It helps to + * control registry at GPU level of granularity. It accepts a semicolon + * separated list of key=value pairs. The first key value pair MUST be + * "pci=DDDD:BB:DD.F;" where DDDD is Domain, BB is Bus Id, DD is device slot + * number and F is the Function. This PCI BDF is used to identify which GPU to + * assign the registry keys that follows next. + * If a GPU corresponding to the value specified in "pci=DDDD:BB:DD.F;" is NOT + * found, then all the registry keys that follows are skipped, until we find next + * valid pci identified "pci=DDDD:BB:DD.F;". Following are the valid formats for + * the value of the "pci" string: + * 1) bus:slot : Domain and function defaults to 0. + * 2) domain:bus:slot : Function defaults to 0. + * 3) domain:bus:slot.func : Complete PCI dev id string. + * + * For each of the registry keys that follows, key name is checked against the + * table of static options; if a match is found, the static option value is + * overridden, but invalid options remain invalid. Pairs that do not match an + * entry in the static option table are passed on to the RM directly. + * + * Format: + * + * NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F;;;..; \ + * pci=DDDD:BB:DD.F;;..;" + */ + +#define __NV_REGISTRY_DWORDS_PER_DEVICE RegistryDwordsPerDevice +#define NV_REG_REGISTRY_DWORDS_PER_DEVICE NV_REG_STRING(__NV_REGISTRY_DWORDS_PER_DEVICE) + +#define __NV_RM_MSG RmMsg +#define NV_RM_MSG NV_REG_STRING(__NV_RM_MSG) + +/* + * Option: UsePageAttributeTable + * + * Description: + * + * Enable/disable use of the page attribute table (PAT) available in + * modern x86/x86-64 processors to set the effective memory type of memory + * mappings to write-combining (WC). + * + * If enabled, an x86 processor with PAT support is present and the host + * system's Linux kernel did not configure one of the PAT entries to + * indicate the WC memory type, the driver will change the second entry in + * the PAT from its default (write-through (WT)) to WC at module load + * time. If the kernel did update one of the PAT entries, the driver will + * not modify the PAT. + * + * In both cases, the driver will honor attempts to map memory with the WC + * memory type by selecting the appropriate PAT entry using the correct + * set of PTE flags. + * + * Possible values: + * + * ~0 = use the NVIDIA driver's default logic (default) + * 1 = enable use of the PAT for WC mappings. + * 0 = disable use of the PAT for WC mappings. + */ + +#define __NV_USE_PAGE_ATTRIBUTE_TABLE UsePageAttributeTable +#define NV_USE_PAGE_ATTRIBUTE_TABLE NV_REG_STRING(__NV_USE_PAGE_ATTRIBUTE_TABLE) + +/* + * Option: EnableMSI + * + * Description: + * + * When this option is enabled and the host kernel supports the MSI feature, + * the NVIDIA driver will enable the PCI-E MSI capability of GPUs with the + * support for this feature instead of using PCI-E wired interrupt. + * + * Possible Values: + * + * 0 = disable MSI interrupt + * 1 = enable MSI interrupt (default) + * + */ + +#define __NV_ENABLE_MSI EnableMSI +#define NV_REG_ENABLE_MSI NV_REG_STRING(__NV_ENABLE_MSI) + +/* + * Option: EnablePCIeGen3 + * + * Description: + * + * Due to interoperability problems seen with Kepler PCIe Gen3 capable GPUs + * when configured on SandyBridge E desktop platforms, NVIDIA feels that + * delivering a reliable, high-quality experience is not currently possible in + * PCIe Gen3 mode on all PCIe Gen3 platforms. Therefore, Quadro, Tesla and + * NVS Kepler products operate in PCIe Gen2 mode by default. You may use this + * option to enable PCIe Gen3 support. + * + * This is completely unsupported! + * + * Possible Values: + * + * 0: disable PCIe Gen3 support (default) + * 1: enable PCIe Gen3 support + */ + +#define __NV_ENABLE_PCIE_GEN3 EnablePCIeGen3 +#define NV_REG_ENABLE_PCIE_GEN3 NV_REG_STRING(__NV_ENABLE_PCIE_GEN3) + +/* + * Option: MemoryPoolSize + * + * Description: + * + * When set to a non-zero value, this option specifies the size of the + * memory pool, given as a multiple of 1 GB, created on VMware ESXi to + * satisfy any system memory allocations requested by the NVIDIA kernel + * module. + */ + +#define __NV_MEMORY_POOL_SIZE MemoryPoolSize +#define NV_REG_MEMORY_POOL_SIZE NV_REG_STRING(__NV_MEMORY_POOL_SIZE) + +/* + * Option: KMallocHeapMaxSize + * + * Description: + * + * When set to a non-zero value, this option specifies the maximum size of the + * heap memory space reserved for kmalloc operations. Given as a + * multiple of 1 MB created on VMware ESXi to satisfy any system memory + * allocations requested by the NVIDIA kernel module. + */ + +#define __NV_KMALLOC_HEAP_MAX_SIZE KMallocHeapMaxSize +#define NV_KMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_KMALLOC_HEAP_MAX_SIZE) + +/* + * Option: VMallocHeapMaxSize + * + * Description: + * + * When set to a non-zero value, this option specifies the maximum size of the + * heap memory space reserved for vmalloc operations. Given as a + * multiple of 1 MB created on VMware ESXi to satisfy any system memory + * allocations requested by the NVIDIA kernel module. + */ + +#define __NV_VMALLOC_HEAP_MAX_SIZE VMallocHeapMaxSize +#define NV_VMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_VMALLOC_HEAP_MAX_SIZE) + +/* + * Option: IgnoreMMIOCheck + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will ignore + * MMIO limit check during device probe on VMWare ESXi kernel. This is + * typically necessary when VMware ESXi MMIO limit differs between any + * base version and its updates. Customer using updates can set regkey + * to avoid probe failure. + */ + +#define __NV_IGNORE_MMIO_CHECK IgnoreMMIOCheck +#define NV_REG_IGNORE_MMIO_CHECK NV_REG_STRING(__NV_IGNORE_MMIO_CHECK) + +/* + * Option: pci + * + * Description: + * + * On Unix platforms, per GPU based registry key can be specified as: + * NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F,". + * where DDDD:BB:DD.F refers to Domain:Bus:Device.Function. + * We need this key "pci" to identify what follows next is a PCI BDF identifier, + * for which the registry keys are to be applied. + * + * This define is not used on non-UNIX platforms. + * + * Possible Formats for value: + * + * 1) bus:slot : Domain and function defaults to 0. + * 2) domain:bus:slot : Function defaults to 0. + * 3) domain:bus:slot.func : Complete PCI BDF identifier string. + */ +#define __NV_PCI_DEVICE_BDF pci +#define NV_REG_PCI_DEVICE_BDF NV_REG_STRING(__NV_PCI_DEVICE_BDF) + +/* + * Option: EnableStreamMemOPs + * + * Description: + * + * When this option is enabled, the CUDA driver will enable support for + * CUDA Stream Memory Operations in user-mode applications, which are so + * far required to be disabled by default due to limited support in + * devtools. + * + * Note: this is treated as a hint. MemOPs may still be left disabled by CUDA + * driver for other reasons. + * + * Possible Values: + * + * 0 = disable feature (default) + * 1 = enable feature + */ +#define __NV_ENABLE_STREAM_MEMOPS EnableStreamMemOPs +#define NV_REG_ENABLE_STREAM_MEMOPS NV_REG_STRING(__NV_ENABLE_STREAM_MEMOPS) + +/* + * Option: EnableUserNUMAManagement + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will require the + * user-mode NVIDIA Persistence daemon to manage the onlining and offlining + * of its NUMA device memory. + * + * This option has no effect on platforms that do not support onlining + * device memory to a NUMA node (this feature is only supported on certain + * POWER9 systems). + * + * Possible Values: + * + * 0: disable user-mode NUMA management + * 1: enable user-mode NUMA management (default) + */ +#define __NV_ENABLE_USER_NUMA_MANAGEMENT EnableUserNUMAManagement +#define NV_REG_ENABLE_USER_NUMA_MANAGEMENT NV_REG_STRING(__NV_ENABLE_USER_NUMA_MANAGEMENT) + +/* + * Option: CoherentGPUMemoryMode + * + * Description: + * + * This option can be set to control how GPU Memory is accessed through + * the coherent link. + * + * This option has no effect on platforms that do not support onlining + * device memory to a NUMA node. + * + * Possible string values: + * + * "driver" : disable onlining coherent memory to the OS as a NUMA node. The driver + * will manage it in this case + * "numa" (or unset) : enable onlining coherent memory to the OS as a NUMA node (default) + */ +#define __NV_COHERENT_GPU_MEMORY_MODE CoherentGPUMemoryMode +#define NV_REG_COHERENT_GPU_MEMORY_MODE NV_REG_STRING(__NV_COHERENT_GPU_MEMORY_MODE) + +/* + * Option: GpuBlacklist + * + * Description: + * + * This option accepts a list of blacklisted GPUs, separated by commas, that + * cannot be attached or used. Each blacklisted GPU is identified by a UUID in + * the ASCII format with leading "GPU-". An exact match is required; no partial + * UUIDs. This regkey is deprecated and will be removed in the future. Use + * NV_REG_EXCLUDED_GPUS instead. + */ +#define __NV_GPU_BLACKLIST GpuBlacklist +#define NV_REG_GPU_BLACKLIST NV_REG_STRING(__NV_GPU_BLACKLIST) + +/* + * Option: ExcludedGpus + * + * Description: + * + * This option accepts a list of excluded GPUs, separated by commas, that + * cannot be attached or used. Each excluded GPU is identified by a UUID in + * the ASCII format with leading "GPU-". An exact match is required; no partial + * UUIDs. + */ +#define __NV_EXCLUDED_GPUS ExcludedGpus +#define NV_REG_EXCLUDED_GPUS NV_REG_STRING(__NV_EXCLUDED_GPUS) + +/* + * Option: NvLinkDisable + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will not attempt to + * initialize or train NVLink connections for any GPUs. System reboot is required + * for changes to take affect. + * + * This option has no effect if no GPUs support NVLink. + * + * Possible Values: + * + * 0: Do not disable NVLink (default) + * 1: Disable NVLink + */ +#define __NV_NVLINK_DISABLE NvLinkDisable +#define NV_REG_NVLINK_DISABLE NV_REG_STRING(__NV_NVLINK_DISABLE) + +/* + * Option: RestrictProfilingToAdminUsers + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will prevent users + * without administrative access (i.e., the CAP_SYS_ADMIN capability) from + * using GPU performance counters. + * + * Possible Values: + * + * 0: Do not restrict GPU counters (default) + * 1: Restrict GPU counters to system administrators only + */ + +#define __NV_RM_PROFILING_ADMIN_ONLY RmProfilingAdminOnly +#define __NV_RM_PROFILING_ADMIN_ONLY_PARAMETER RestrictProfilingToAdminUsers +#define NV_REG_RM_PROFILING_ADMIN_ONLY NV_REG_STRING(__NV_RM_PROFILING_ADMIN_ONLY) + +/* + * Option: TemporaryFilePath + * + * Description: + * + * When specified, this option changes the location in which the + * NVIDIA kernel module will create unnamed temporary files (e.g. to + * save the contents of video memory in). The indicated file must + * be a directory. By default, temporary files are created in /tmp. + */ +#define __NV_TEMPORARY_FILE_PATH TemporaryFilePath +#define NV_REG_TEMPORARY_FILE_PATH NV_REG_STRING(__NV_TEMPORARY_FILE_PATH) + +/* + * Option: PreserveVideoMemoryAllocations + * + * If enabled, this option prompts the NVIDIA kernel module to save and + * restore all video memory allocations across system power management + * cycles, i.e. suspend/resume and hibernate/restore. Otherwise, + * only select allocations are preserved. + * + * Possible Values: + * + * 0: Preserve only select video memory allocations (default) + * 1: Preserve all video memory allocations + */ +#define __NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS PreserveVideoMemoryAllocations +#define NV_REG_PRESERVE_VIDEO_MEMORY_ALLOCATIONS \ + NV_REG_STRING(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS) + +/* + * Option: EnableS0ixPowerManagement + * + * When this option is enabled, the NVIDIA driver will use S0ix-based + * power management for system suspend/resume, if both the platform and + * the GPU support S0ix. + * + * During system suspend, if S0ix is enabled and + * video memory usage is above the threshold configured by + * 'S0ixPowerManagementVideoMemoryThreshold', video memory will be kept + * in self-refresh mode while the rest of the GPU is powered down. + * + * Otherwise, the driver will copy video memory contents to system memory + * and power off the video memory along with the GPU. + * + * Possible Values: + * + * 0: Disable S0ix based power management (default) + * 1: Enable S0ix based power management + */ + +#define __NV_ENABLE_S0IX_POWER_MANAGEMENT EnableS0ixPowerManagement +#define NV_REG_ENABLE_S0IX_POWER_MANAGEMENT \ + NV_REG_STRING(__NV_ENABLE_S0IX_POWER_MANAGEMENT) + +/* + * Option: S0ixPowerManagementVideoMemoryThreshold + * + * This option controls the threshold that the NVIDIA driver will use during + * S0ix-based system power management. + * + * When S0ix is enabled and the system is suspended, the driver will + * compare the amount of video memory in use with this threshold, + * to decide whether to keep video memory in self-refresh or copy video + * memory content to system memory. + * + * See the 'EnableS0ixPowerManagement' option. + * + * Values are expressed in Megabytes (1048576 bytes). + * + * Default value for this option is 256MB. + * + */ +#define __NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + S0ixPowerManagementVideoMemoryThreshold +#define NV_REG_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + NV_REG_STRING(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD) + +/* + * Option: DynamicPowerManagement + * + * This option controls how aggressively the NVIDIA kernel module will manage + * GPU power through kernel interfaces. + * + * Possible Values: + * + * 0: Never allow the GPU to be powered down (default). + * 1: Power down the GPU when it is not initialized. + * 2: Power down the GPU after it has been inactive for some time. + * 3: (Default) Power down the GPU after a period of inactivity (i.e., + * mode 2) on Ampere or later notebooks. Otherwise, do not power down + * the GPU. + */ +#define __NV_DYNAMIC_POWER_MANAGEMENT DynamicPowerManagement +#define NV_REG_DYNAMIC_POWER_MANAGEMENT \ + NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT) + +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_NEVER 0 +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_COARSE 1 +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_FINE 2 +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_DEFAULT 3 + +/* + * Option: DynamicPowerManagementVideoMemoryThreshold + * + * This option controls the threshold that the NVIDIA driver will use + * when selecting the dynamic power management scheme. + * + * When the driver detects that the GPU is idle, it will compare the amount + * of video memory in use with this threshold. + * + * If the current video memory usage is less than the threshold, the + * driver may preserve video memory contents in system memory and power off + * the video memory along with the GPU itself, if supported. Otherwise, + * the video memory will be kept in self-refresh mode while powering down + * the rest of the GPU, if supported. + * + * Values are expressed in Megabytes (1048576 bytes). + * + * If the requested value is greater than 200MB (the default), then it + * will be capped to 200MB. + */ +#define __NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + DynamicPowerManagementVideoMemoryThreshold +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD) + +/* + * Option: TegraGpuPgMask + * + * This option controls the TPC/GPC/FBP power-gating mask for Tegra iGPU. + * + */ +#define __NV_TEGRA_GPU_PG_MASK TegraGpuPgMask +#define NV_REG_TEGRA_GPU_PG_MASK \ + NV_REG_STRING(__NV_TEGRA_GPU_PG_MASK) + +/* + * Option: RegisterPCIDriver + * + * Description: + * + * When this option is enabled, the NVIDIA driver will register with + * PCI subsystem. + * + * Possible values: + * + * 1 - register as PCI driver (default) + * 0 - do not register as PCI driver + */ + +#define __NV_REGISTER_PCI_DRIVER RegisterPCIDriver +#define NV_REG_REGISTER_PCI_DRIVER NV_REG_STRING(__NV_REGISTER_PCI_DRIVER) + +/* + * Option: RegisterPlatformDeviceDriver + * + * Description: + * + * When this option is enabled, the NVIDIA driver will register with + * platform subsystem. + * + * Possible values: + * + * 1 - register as platform driver (default) + * 0 - do not register as platform driver + */ + +#define __NV_REGISTER_PLATFORM_DEVICE_DRIVER RegisterPlatformDeviceDriver +#define NV_REG_REGISTER_PLATFORM_DEVICE_DRIVER NV_REG_STRING(__NV_REGISTER_PLATFORM_DEVICE_DRIVER) + +/* + * Option: EnablePCIERelaxedOrderingMode + * + * Description: + * + * When this option is enabled, the registry key RmSetPCIERelaxedOrdering will + * be set to NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_FORCE_ENABLE, causing + * every device to set the relaxed ordering bit to 1 in all outbound MWr + * transaction-layer packets. This is equivalent to setting the regkey to + * FORCE_ENABLE as a non-per-device registry key. + * + * Possible values: + * 0 - Do not enable PCIe TLP relaxed ordering bit-setting (default) + * 1 - Enable PCIe TLP relaxed ordering bit-setting + */ +#define __NV_ENABLE_PCIE_RELAXED_ORDERING_MODE EnablePCIERelaxedOrderingMode +#define NV_REG_ENABLE_PCIE_RELAXED_ORDERING_MODE \ + NV_REG_STRING(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE) + +/* + * Option: EnableResizableBar + * + * Description: + * + * When this option is enabled, the NVIDIA driver will attempt to resize + * BAR1 to match framebuffer size, or the next largest available size on + * supported machines. This is currently only implemented for Linux. + * + * Possible values: + * 0 - Do not enable PCI BAR resizing + * 1 - Enable PCI BAR resizing + */ +#define __NV_ENABLE_RESIZABLE_BAR EnableResizableBar +#define NV_REG_ENABLE_RESIZABLE_BAR NV_REG_STRING(__NV_ENABLE_RESIZABLE_BAR) + +/* + * Option: EnableGpuFirmware + * + * Description: + * + * When this option is enabled, the NVIDIA driver will enable use of GPU + * firmware. + * + * If this key is set globally to the system, the driver may still attempt + * to apply some policies to maintain uniform firmware modes across all + * GPUS. This may result in the driver failing initialization on some GPUs + * to maintain such a policy. + * + * If this key is set using NVreg_RegistryDwordsPerDevice, then the driver + * will attempt to honor whatever configuration is specified without applying + * additional policies. This may also result in failed GPU initialzations if + * the configuration is not possible (for example if the firmware is missing + * from the filesystem, or the GPU is not capable). + * + * NOTE: More details for this regkey can be found in nv-firmware-registry.h + */ +#define __NV_ENABLE_GPU_FIRMWARE EnableGpuFirmware +#define NV_REG_ENABLE_GPU_FIRMWARE NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE) + +/* + * Option: EnableGpuFirmwareLogs + * + * When this option is enabled, the NVIDIA driver will send GPU firmware logs + * to the system log, when possible. + * + * NOTE: More details for this regkey can be found in nv-firmware-registry.h + */ +#define __NV_ENABLE_GPU_FIRMWARE_LOGS EnableGpuFirmwareLogs +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE_LOGS) + +/* + * Option: EnableDbgBreakpoint + * + * When this option is set to a non-zero value, and the kernel is configured + * appropriately, assertions within resman will trigger a CPU breakpoint (e.g., + * INT3 on x86_64), assumed to be caught by an attached debugger. + * + * When this option is set to the value zero (the default), assertions within + * resman will print to the system log, but no CPU breakpoint will be triggered. + */ +#define __NV_ENABLE_DBG_BREAKPOINT EnableDbgBreakpoint + + +/* + * Option: OpenRmEnableUnsupportedGpus + * + * This option to require opt in for use of Open RM on non-Data Center + * GPUs is deprecated and no longer required. The kernel module parameter + * is left here, though ignored, for backwards compatibility. + */ +#define __NV_OPENRM_ENABLE_UNSUPPORTED_GPUS OpenRmEnableUnsupportedGpus + +/* + * Option: NVreg_DmaRemapPeerMmio + * + * Description: + * + * When this option is enabled, the NVIDIA driver will use device driver + * APIs provided by the Linux kernel for DMA-remapping part of a device's + * MMIO region to another device, creating e.g., IOMMU mappings as necessary. + * When this option is disabled, the NVIDIA driver will instead only apply a + * fixed offset, which may be zero, to CPU physical addresses to produce the + * DMA address for the peer's MMIO region, and no IOMMU mappings will be + * created. + * + * This option only affects peer MMIO DMA mappings, and not system memory + * mappings. + * + * Possible Values: + * 0 = disable dynamic DMA remapping of peer MMIO regions + * 1 = enable dynamic DMA remapping of peer MMIO regions (default) + */ +#define __NV_DMA_REMAP_PEER_MMIO DmaRemapPeerMmio +#define NV_DMA_REMAP_PEER_MMIO NV_REG_STRING(__NV_DMA_REMAP_PEER_MMIO) +#define NV_DMA_REMAP_PEER_MMIO_DISABLE 0x00000000 +#define NV_DMA_REMAP_PEER_MMIO_ENABLE 0x00000001 + +/* + * Option: NVreg_RmNvlinkBandwidthLinkCount + * + * Description: + * + * This option allows user to reduce the GPU nvlink bandwidth to save power. + * + * This option is only for Blackwell+ GPU with NVLINK version 5.0. + */ +#define __NV_RM_NVLINK_BW_LINK_COUNT RmNvlinkBandwidthLinkCount +#define NV_RM_NVLINK_BW_LINK_COUNT NV_REG_STRING(__NV_RM_NVLINK_BW_LINK_COUNT) + +/* + * Option: NVreg_RmNvlinkBandwidth + * + * Description: + * + * This option allows user to reduce the NVLINK P2P bandwidth to save power. + * The option is in the string format. + * + * Possible string values: + * OFF: 0% bandwidth + * MIN: 15%-25% bandwidth depending on the system's NVLink topology + * HALF: 50% bandwidth + * 3QUARTER: 75% bandwidth + * FULL: 100% bandwidth (default) + * + * This option is only for Hopper+ GPU with NVLINK version 4.0. + */ +#define __NV_RM_NVLINK_BW RmNvlinkBandwidth +#define NV_RM_NVLINK_BW NV_REG_STRING(__NV_RM_NVLINK_BW) + +/* + * Option: NVreg_EnableNonblockingOpen + * + * Description: + * + * When this option is enabled, the NVIDIA driver will try to perform any + * required device initialization in the background when /dev/nvidiaN devices + * are opened with the flag O_NONBLOCK. + * + * Possible Values: + * 0 = O_NONBLOCK flag when opening devices is ignored + * 1 = O_NONBLOCK flag when opening devices results in background device + * initialization (default) + */ +#define __NV_ENABLE_NONBLOCKING_OPEN EnableNonblockingOpen +#define NV_ENABLE_NONBLOCKING_OPEN NV_REG_STRING(__NV_ENABLE_NONBLOCKING_OPEN) + +/* + * Option: NVreg_ImexChannelCount + * + * Description: + * + * This option allows users to specify the number of IMEX (import/export) + * channels. Within an IMEX domain, the channels allow sharing memory + * securely in a multi-user environment using the CUDA driver's fabric handle + * based APIs. + * + * An IMEX domain is either an OS instance or a group of securely + * connected OS instances using the NVIDIA IMEX daemon. The option must + * be set to the same value on each OS instance within the IMEX domain. + * + * An IMEX channel is a logical entity that is represented by a /dev node. + * The IMEX channels are global resources within the IMEX domain. When + * exporter and importer CUDA processes have been granted access to the + * same IMEX channel, they can securely share memory. + * + * Note that the NVIDIA driver will not attempt to create the /dev nodes. Thus, + * the related CUDA APIs will fail with an insufficient permission error until + * the /dev nodes are set up. The creation of these /dev nodes, + * /dev/nvidia-caps-imex-channels/channelN, must be handled by the + * administrator, where N is the minor number. The major number can be + * queried from /proc/devices. + * + * nvidia-modprobe CLI support is available to set up the /dev nodes. + * NVreg_ModifyDeviceFiles, NVreg_DeviceFileGID, NVreg_DeviceFileUID + * and NVreg_DeviceFileMode will be honored by nvidia-modprobe. + * + * Also, refer to the NVreg_CreateImexChannel0 option. + * + * Possible values: + * 0 - Disable IMEX using CUDA driver's fabric handles. + * N - N IMEX channels will be enabled in the driver to facilitate N + * concurrent users. Default value is 2048 channels, and the current + * maximum value is 20-bit, same as Linux dev_t's minor number limit. + */ +#define __NV_IMEX_CHANNEL_COUNT ImexChannelCount +#define NV_REG_IMEX_CHANNEL_COUNT NV_REG_STRING(__NV_IMEX_CHANNEL_COUNT) + +/* + * Option: NVreg_CreateImexChannel0 + * + * Description: + * + * This option allows users to specify whether the NVIDIA driver must create + * the IMEX channel 0 by default. The channel will be created automatically + * when the NVIDIA open GPU kernel module is loaded. + * + * Note that users are advised to enable this option only in trusted + * environments where it is acceptable for applications to share the same + * IMEX channel. + * + * For more details on IMEX channels, refer to the NVreg_ImexChannelCount + * option. + * + * Possible values: + * 0 - Do not create IMEX channel 0 (default). + * 1 - Create IMEX channel 0. + */ +#define __NV_CREATE_IMEX_CHANNEL_0 CreateImexChannel0 +#define NV_CREATE_IMEX_CHANNEL_0 NV_REG_STRING(__CREATE_IMEX_CHANNEL_0) + +/* + * Option: NVreg_GrdmaPciTopoCheckOverride + * + * Description: + * + * This option allows users to override the PCI topology validation enforced by + * the GPU driver's dma-buf and nv-p2p subsystems. + * + * Possible values: + * 0 - Driver's topology check to allow or deny access (default). + * 1 - Override driver's topology check to allow access. + * 2 - Override driver's topology check to deny access. + */ +#define __NV_GRDMA_PCI_TOPO_CHECK_OVERRIDE GrdmaPciTopoCheckOverride +#define NV_GRDMA_PCI_TOPO_CHECK_OVERRIDE NV_REG_STRING(__NV_GRDMA_PCI_TOPO_CHECK_OVERRIDE) +#define NV_REG_GRDMA_PCI_TOPO_CHECK_OVERRIDE_DEFAULT 0 +#define NV_REG_GRDMA_PCI_TOPO_CHECK_OVERRIDE_ALLOW_ACCESS 1 +#define NV_REG_GRDMA_PCI_TOPO_CHECK_OVERRIDE_DENY_ACCESS 2 + +/* + * Option: NVreg_EnableSystemMemoryPools + * + * Description: + * + * This option controls system memory page pools creation for different page sizes + * Pool for pageSize is enabled by setting bit (pageSize >> NV_ENABLE_SYSTEM_MEMORY_POOLS_SHIFT) + * The pools keep memory cached once freed to speed-up reallocation + * Pools are shared by all adapters + * + * This feature is only supported by OpenRM driver + * + * By default 4K, 64K, 2M page size pools are enabled + */ +#define __NV_ENABLE_SYSTEM_MEMORY_POOLS EnableSystemMemoryPools +#define NV_ENABLE_SYSTEM_MEMORY_POOLS NV_REG_STRING(__NV_ENABLE_SYSTEM_MEMORY_POOLS) +#define NV_ENABLE_SYSTEM_MEMORY_POOLS_DEFAULT 0x00000211 +#define NV_ENABLE_SYSTEM_MEMORY_POOLS_SHIFT 12 + +#if defined(NV_DEFINE_REGISTRY_KEY_TABLE) + +/* + *---------registry key parameter declarations-------------- + */ + +NV_DEFINE_REG_ENTRY(__NV_RESMAN_DEBUG_LEVEL, ~0); +NV_DEFINE_REG_ENTRY(__NV_RM_LOGON_RC, 1); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MODIFY_DEVICE_FILES, 1); +NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_UID, 0); +NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_GID, 0); +NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_MODE, 0666); +NV_DEFINE_REG_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS, 1); +NV_DEFINE_REG_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE, ~0); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_PCIE_GEN3, 0); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_MSI, 1); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_STREAM_MEMOPS, 0); +NV_DEFINE_REG_ENTRY(__NV_RM_PROFILING_ADMIN_ONLY_PARAMETER, 1); +NV_DEFINE_REG_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS, 0); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT, 0); +NV_DEFINE_REG_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 256); +NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT, 3); +NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 200); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE, NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS, NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG); +NV_DEFINE_REG_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS, 1); + +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_USER_NUMA_MANAGEMENT, 1); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MEMORY_POOL_SIZE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_KMALLOC_HEAP_MAX_SIZE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_VMALLOC_HEAP_MAX_SIZE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_IGNORE_MMIO_CHECK, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_NVLINK_DISABLE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_REGISTER_PCI_DRIVER, 1); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_REGISTER_PLATFORM_DEVICE_DRIVER, 1); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_RESIZABLE_BAR, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_DBG_BREAKPOINT, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_TEGRA_GPU_PG_MASK, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_NONBLOCKING_OPEN, 1); + +NV_DEFINE_REG_STRING_ENTRY(__NV_COHERENT_GPU_MEMORY_MODE, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS_PER_DEVICE, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_RM_MSG, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_GPU_BLACKLIST, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_TEMPORARY_FILE_PATH, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_EXCLUDED_GPUS, NULL); +NV_DEFINE_REG_ENTRY(__NV_DMA_REMAP_PEER_MMIO, NV_DMA_REMAP_PEER_MMIO_ENABLE); +NV_DEFINE_REG_STRING_ENTRY(__NV_RM_NVLINK_BW, NULL); +NV_DEFINE_REG_ENTRY(__NV_RM_NVLINK_BW_LINK_COUNT, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_IMEX_CHANNEL_COUNT, 2048); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_CREATE_IMEX_CHANNEL_0, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_GRDMA_PCI_TOPO_CHECK_OVERRIDE, + NV_REG_GRDMA_PCI_TOPO_CHECK_OVERRIDE_DEFAULT); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_SYSTEM_MEMORY_POOLS, NV_ENABLE_SYSTEM_MEMORY_POOLS_DEFAULT); + +/* + *----------------registry database definition---------------------- + */ + +/* + * You can enable any of the registry options disabled by default by + * editing their respective entries in the table below. The last field + * determines if the option is considered valid - in order for the + * changes to take effect, you need to recompile and reload the NVIDIA + * kernel module. + */ +nv_parm_t nv_parms[] = { + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RESMAN_DEBUG_LEVEL), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RM_LOGON_RC), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MODIFY_DEVICE_FILES), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_UID), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_GID), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_MODE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_MSI), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_GEN3), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MEMORY_POOL_SIZE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_KMALLOC_HEAP_MAX_SIZE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_VMALLOC_HEAP_MAX_SIZE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_IGNORE_MMIO_CHECK), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_STREAM_MEMOPS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_USER_NUMA_MANAGEMENT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_NVLINK_DISABLE), + NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(__NV_RM_PROFILING_ADMIN_ONLY, + __NV_RM_PROFILING_ADMIN_ONLY_PARAMETER), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_TEGRA_GPU_PG_MASK), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_REGISTER_PCI_DRIVER), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_RESIZABLE_BAR), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RM_NVLINK_BW_LINK_COUNT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_DBG_BREAKPOINT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DMA_REMAP_PEER_MMIO), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_IMEX_CHANNEL_COUNT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_CREATE_IMEX_CHANNEL_0), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_GRDMA_PCI_TOPO_CHECK_OVERRIDE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_SYSTEM_MEMORY_POOLS), + {NULL, NULL} +}; + +#elif defined(NVRM) + +extern nv_parm_t nv_parms[]; + +#endif /* NV_DEFINE_REGISTRY_KEY_TABLE */ + +#endif /* _RM_REG_H_ */ diff --git a/kernel-open/nvidia/nv-report-err.c b/kernel-open/nvidia/nv-report-err.c new file mode 100644 index 0000000..acf471f --- /dev/null +++ b/kernel-open/nvidia/nv-report-err.c @@ -0,0 +1,80 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ +#include "nv-linux.h" +#include "os-interface.h" +#include "nv-report-err.h" + +#define CREATE_TRACE_POINTS +#include "nv-tracepoint.h" + +nv_report_error_cb_t nv_error_cb_handle = NULL; + +int nvidia_register_error_cb(nv_report_error_cb_t report_error_cb) +{ + if (report_error_cb == NULL) + return -EINVAL; + + if (nv_error_cb_handle != NULL) + return -EBUSY; + + nv_error_cb_handle = report_error_cb; + return 0; +} + +EXPORT_SYMBOL(nvidia_register_error_cb); + +int nvidia_unregister_error_cb(void) +{ + if (nv_error_cb_handle == NULL) + return -EPERM; + + nv_error_cb_handle = NULL; + return 0; +} + +EXPORT_SYMBOL(nvidia_unregister_error_cb); + +void nv_report_error( + struct pci_dev *dev, + NvU32 error_number, + const char *format, + va_list ap +) +{ + char *buffer; + gfp_t gfp = NV_MAY_SLEEP() ? NV_GFP_NO_OOM : NV_GFP_ATOMIC; + + buffer = kvasprintf(gfp, format, ap); + + if (buffer == NULL) + return; + + trace_nvidia_dev_xid(dev, error_number, buffer); + + if (nv_error_cb_handle != NULL) + nv_error_cb_handle(dev, error_number, buffer, strlen(buffer) + 1); + + kfree(buffer); +} diff --git a/kernel-open/nvidia/nv-report-err.h b/kernel-open/nvidia/nv-report-err.h new file mode 100644 index 0000000..815c659 --- /dev/null +++ b/kernel-open/nvidia/nv-report-err.h @@ -0,0 +1,66 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_REPORT_ERR_H_ +#define _NV_REPORT_ERR_H_ + +/* + * @brief + * Callback definition for obtaining XID error string and data. + * + * @param[in] pci_dev * + * Structure describring GPU PCI device. + * @param[in] uint32_t + * XID number + * @param[in] char * + * Error string with HWERR info. + * @param[in] int + * Length of error string. + */ +typedef void (*nv_report_error_cb_t)(struct pci_dev *, uint32_t, char *, size_t); + +/* + * @brief + * Register callback function to obtain XID error string and data. + * + * @param[in] report_error_cb + * A function pointer to recieve callback. + * + * @return + * 0 upon successful completion. + * -EINVAL callback handle is NULL. + * -EBUSY callback handle is already registered. + */ +int nvidia_register_error_cb(nv_report_error_cb_t report_error_cb); + +/* + * @brief + * Unregisters callback function handle. + * + * @return + * 0 upon successful completion. + * -EPERM unregister not permitted on NULL callback handle. + */ +int nvidia_unregister_error_cb(void); + +#endif /* _NV_REPORT_ERR_H_ */ diff --git a/kernel-open/nvidia/nv-rsync.c b/kernel-open/nvidia/nv-rsync.c new file mode 100644 index 0000000..88863da --- /dev/null +++ b/kernel-open/nvidia/nv-rsync.c @@ -0,0 +1,161 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-linux.h" +#include "nv-rsync.h" + +nv_rsync_info_t g_rsync_info; + +void nv_init_rsync_info( + void +) +{ + g_rsync_info.relaxed_ordering_mode = NV_FALSE; + g_rsync_info.usage_count = 0; + g_rsync_info.data = NULL; + NV_INIT_MUTEX(&g_rsync_info.lock); +} + +void nv_destroy_rsync_info( + void +) +{ + WARN_ON(g_rsync_info.data); + WARN_ON(g_rsync_info.usage_count); + WARN_ON(g_rsync_info.relaxed_ordering_mode); +} + +int nv_get_rsync_info( + void +) +{ + int mode; + int rc = 0; + + down(&g_rsync_info.lock); + + if (g_rsync_info.usage_count == 0) + { + if (g_rsync_info.get_relaxed_ordering_mode) + { + rc = g_rsync_info.get_relaxed_ordering_mode(&mode, + g_rsync_info.data); + if (rc != 0) + { + goto done; + } + + g_rsync_info.relaxed_ordering_mode = !!mode; + } + } + + g_rsync_info.usage_count++; + +done: + up(&g_rsync_info.lock); + + return rc; +} + +void nv_put_rsync_info( + void +) +{ + int mode; + + down(&g_rsync_info.lock); + + g_rsync_info.usage_count--; + + if (g_rsync_info.usage_count == 0) + { + if (g_rsync_info.put_relaxed_ordering_mode) + { + mode = g_rsync_info.relaxed_ordering_mode; + g_rsync_info.put_relaxed_ordering_mode(mode, g_rsync_info.data); + g_rsync_info.relaxed_ordering_mode = NV_FALSE; + } + } + + up(&g_rsync_info.lock); +} + +int nv_register_rsync_driver( + int (*get_relaxed_ordering_mode)(int *mode, void *data), + void (*put_relaxed_ordering_mode)(int mode, void *data), + void (*wait_for_rsync)(struct pci_dev *gpu, void *data), + void *data +) +{ + int rc = 0; + + down(&g_rsync_info.lock); + + if (g_rsync_info.get_relaxed_ordering_mode != NULL) + { + rc = -EBUSY; + goto done; + } + + if (g_rsync_info.usage_count != 0) + { + rc = -EBUSY; + goto done; + } + + g_rsync_info.get_relaxed_ordering_mode = get_relaxed_ordering_mode; + g_rsync_info.put_relaxed_ordering_mode = put_relaxed_ordering_mode; + g_rsync_info.wait_for_rsync = wait_for_rsync; + g_rsync_info.data = data; + +done: + up(&g_rsync_info.lock); + + return rc; +} + +void nv_unregister_rsync_driver( + int (*get_relaxed_ordering_mode)(int *mode, void *data), + void (*put_relaxed_ordering_mode)(int mode, void *data), + void (*wait_for_rsync)(struct pci_dev *gpu, void *data), + void *data +) +{ + down(&g_rsync_info.lock); + + WARN_ON(g_rsync_info.usage_count != 0); + + WARN_ON(g_rsync_info.get_relaxed_ordering_mode != + get_relaxed_ordering_mode); + WARN_ON(g_rsync_info.put_relaxed_ordering_mode != + put_relaxed_ordering_mode); + WARN_ON(g_rsync_info.wait_for_rsync != wait_for_rsync); + WARN_ON(g_rsync_info.data != data); + + g_rsync_info.get_relaxed_ordering_mode = NULL; + g_rsync_info.put_relaxed_ordering_mode = NULL; + g_rsync_info.wait_for_rsync = NULL; + g_rsync_info.data = NULL; + + up(&g_rsync_info.lock); +} diff --git a/kernel-open/nvidia/nv-rsync.h b/kernel-open/nvidia/nv-rsync.h new file mode 100644 index 0000000..cc0e1a2 --- /dev/null +++ b/kernel-open/nvidia/nv-rsync.h @@ -0,0 +1,55 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_RSYNC_H_ +#define _NV_RSYNC_H_ + +#include "nv-linux.h" + +typedef struct nv_rsync_info +{ + struct semaphore lock; + uint32_t usage_count; + NvBool relaxed_ordering_mode; + int (*get_relaxed_ordering_mode)(int *mode, void *data); + void (*put_relaxed_ordering_mode)(int mode, void *data); + void (*wait_for_rsync)(struct pci_dev *gpu, void *data); + void *data; +} nv_rsync_info_t; + +void nv_init_rsync_info(void); +void nv_destroy_rsync_info(void); +int nv_get_rsync_info(void); +void nv_put_rsync_info(void); +int nv_register_rsync_driver( + int (*get_relaxed_ordering_mode)(int *mode, void *data), + void (*put_relaxed_ordering_mode)(int mode, void *data), + void (*wait_for_rsync)(struct pci_dev *gpu, void *data), + void *data); +void nv_unregister_rsync_driver( + int (*get_relaxed_ordering_mode)(int *mode, void *data), + void (*put_relaxed_ordering_mode)(int mode, void *data), + void (*wait_for_rsync)(struct pci_dev *gpu, void *data), + void *data); + +#endif diff --git a/kernel-open/nvidia/nv-tracepoint.h b/kernel-open/nvidia/nv-tracepoint.h new file mode 100644 index 0000000..3b887fe --- /dev/null +++ b/kernel-open/nvidia/nv-tracepoint.h @@ -0,0 +1,66 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "conftest.h" + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM nvidia + +#if !defined(_TRACE_NV_REPORT_ERR_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_NV_REPORT_ERR_H + +#include + +TRACE_EVENT(nvidia_dev_xid, + + TP_PROTO(const struct pci_dev *pdev, uint32_t error_code, const char *msg), + + TP_ARGS(pdev, error_code, msg), + + TP_STRUCT__entry( + __string(dev, pci_name(pdev)) + __field (u32, error_code) + __string(msg, msg) + ), + + TP_fast_assign( +#if NV_ASSIGN_STR_ARGUMENT_COUNT == 1 + __assign_str(dev); + __assign_str(msg); +#else + __assign_str(dev, pci_name(pdev)); + __assign_str(msg, msg); +#endif + __entry->error_code = error_code; + ), + + TP_printk("Xid (PCI:%s): %u, %s", __get_str(dev), __entry->error_code, __get_str(msg)) +); + +#endif // !defined(_TRACE_NV_REPORT_ERR_H) || defined(TRACE_HEADER_MULTI_READ) + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE nv-tracepoint +#include + diff --git a/kernel-open/nvidia/nv-usermap.c b/kernel-open/nvidia/nv-usermap.c new file mode 100644 index 0000000..d4f27a8 --- /dev/null +++ b/kernel-open/nvidia/nv-usermap.c @@ -0,0 +1,155 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +NV_STATUS NV_API_CALL nv_add_mapping_context_to_file( + nv_state_t *nv, + nv_usermap_access_params_t *nvuap, + NvU32 prot, + void *pAllocPriv, + NvU64 pageIndex, + NvU32 fd +) +{ + NV_STATUS status = NV_OK; + nv_alloc_mapping_context_t *nvamc = NULL; + nv_file_private_t *nvfp = NULL; + nv_linux_file_private_t *nvlfp = NULL; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + void *priv = NULL; + + nvfp = nv_get_file_private(fd, NV_IS_CTL_DEVICE(nv), &priv); + if (nvfp == NULL) + return NV_ERR_INVALID_ARGUMENT; + + nvlfp = nv_get_nvlfp_from_nvfp(nvfp); + + nvamc = &nvlfp->mmap_context; + + if (nvamc->valid) + { + status = NV_ERR_STATE_IN_USE; + goto done; + } + + os_mem_set((void*) nvamc, 0, sizeof(nv_alloc_mapping_context_t)); + + if (NV_IS_CTL_DEVICE(nv)) + { + nvamc->alloc = pAllocPriv; + nvamc->page_index = pageIndex; + } + else + { + if (NV_STATE_PTR(nvlfp->nvptr) != nv) + { + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + status = os_alloc_mem((void**) &nvamc->memArea.pRanges, + sizeof(MemoryRange) * nvuap->memArea.numRanges); + + if (status != NV_OK) + { + nvamc->memArea.pRanges = NULL; + goto done; + } + nvamc->memArea.numRanges = nvuap->memArea.numRanges; + os_mem_copy(nvamc->memArea.pRanges, nvuap->memArea.pRanges, + sizeof(MemoryRange) * nvuap->memArea.numRanges); + + if (nv_get_numa_status(nvl) == NV_NUMA_STATUS_ONLINE) + { + nvamc->page_array = nvuap->page_array; + nvamc->num_pages = nvuap->num_pages; + } + nvamc->access_start = nvuap->access_start; + nvamc->access_size = nvuap->access_size; + } + + nvamc->prot = prot; + nvamc->valid = NV_TRUE; + nvamc->caching = nvuap->caching; + +done: + nv_put_file_private(priv); + + return status; +} + +NV_STATUS NV_API_CALL nv_alloc_user_mapping( + nv_state_t *nv, + void *pAllocPrivate, + NvU64 pageIndex, + NvU32 pageOffset, + NvU64 size, + NvU32 protect, + NvU64 *pUserAddress, + void **ppPrivate +) +{ + nv_alloc_t *at = pAllocPrivate; + + if (at->flags.contig) + *pUserAddress = (at->page_table[0].phys_addr + (pageIndex * PAGE_SIZE) + pageOffset); + else + *pUserAddress = (at->page_table[pageIndex].phys_addr + pageOffset); + + return NV_OK; +} + +void NV_API_CALL nv_free_user_mapping( + nv_state_t *nv, + void *pAllocPrivate, + NvU64 userAddress, + void *pPrivate +) +{ +} + +/* + * This function checks if a user mapping should be allowed given the GPU's 4K + * page isolation requirements. + */ +NV_STATUS NV_API_CALL nv_check_usermap_access_params( + nv_state_t *nv, + const nv_usermap_access_params_t *nvuap +) +{ + const NvU64 addr = nvuap->addr; + const NvU64 size = nvuap->size; + + if (rm_gpu_need_4k_page_isolation(nv) && + NV_4K_PAGE_ISOLATION_REQUIRED(addr, size)) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "4K page isolation required but not available!\n"); + return NV_ERR_OPERATING_SYSTEM; + } + + return NV_OK; +} diff --git a/kernel-open/nvidia/nv-vm.c b/kernel-open/nvidia/nv-vm.c new file mode 100644 index 0000000..2896dbd --- /dev/null +++ b/kernel-open/nvidia/nv-vm.c @@ -0,0 +1,1104 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os-interface.h" +#include "nv.h" +#include "nv-linux.h" +#include "nv-reg.h" + +extern NvU32 NVreg_EnableSystemMemoryPools; + +static inline void nv_set_contig_memory_uc(nvidia_pte_t *page_ptr, NvU32 num_pages) +{ +#if defined(NV_SET_MEMORY_UC_PRESENT) + struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr); + unsigned long addr = (unsigned long)page_address(page); + set_memory_uc(addr, num_pages); +#elif defined(NV_SET_PAGES_UC_PRESENT) + struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr); + set_pages_uc(page, num_pages); +#endif +} + +static inline void nv_set_contig_memory_wb(nvidia_pte_t *page_ptr, NvU32 num_pages) +{ +#if defined(NV_SET_MEMORY_UC_PRESENT) + struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr); + unsigned long addr = (unsigned long)page_address(page); + set_memory_wb(addr, num_pages); +#elif defined(NV_SET_PAGES_UC_PRESENT) + struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr); + set_pages_wb(page, num_pages); +#endif +} + +static inline int nv_set_memory_array_type_present(NvU32 type) +{ + switch (type) + { +#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT) + case NV_MEMORY_UNCACHED: + return 1; + case NV_MEMORY_WRITEBACK: + return 1; +#endif + default: + return 0; + } +} + +static inline int nv_set_pages_array_type_present(NvU32 type) +{ + switch (type) + { +#if defined(NV_SET_PAGES_ARRAY_UC_PRESENT) + case NV_MEMORY_UNCACHED: + return 1; + case NV_MEMORY_WRITEBACK: + return 1; +#endif + default: + return 0; + } +} + +static inline void nv_set_memory_array_type( + unsigned long *pages, + NvU32 num_pages, + NvU32 type +) +{ + switch (type) + { +#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT) + case NV_MEMORY_UNCACHED: + set_memory_array_uc(pages, num_pages); + break; + case NV_MEMORY_WRITEBACK: + set_memory_array_wb(pages, num_pages); + break; +#endif + default: + nv_printf(NV_DBG_ERRORS, + "NVRM: %s(): type %d unimplemented\n", + __FUNCTION__, type); + break; + } +} + +static inline void nv_set_pages_array_type( + struct page **pages, + NvU32 num_pages, + NvU32 type +) +{ + switch (type) + { +#if defined(NV_SET_PAGES_ARRAY_UC_PRESENT) + case NV_MEMORY_UNCACHED: + set_pages_array_uc(pages, num_pages); + break; + case NV_MEMORY_WRITEBACK: + set_pages_array_wb(pages, num_pages); + break; +#endif + default: + nv_printf(NV_DBG_ERRORS, + "NVRM: %s(): type %d unimplemented\n", + __FUNCTION__, type); + break; + } +} + +static inline void nv_set_contig_memory_type( + nvidia_pte_t *page_ptr, + NvU32 num_pages, + NvU32 type +) +{ + switch (type) + { + case NV_MEMORY_UNCACHED: + nv_set_contig_memory_uc(page_ptr, num_pages); + break; + case NV_MEMORY_WRITEBACK: + nv_set_contig_memory_wb(page_ptr, num_pages); + break; + default: + nv_printf(NV_DBG_ERRORS, + "NVRM: %s(): type %d unimplemented\n", + __FUNCTION__, type); + } +} + +static inline void nv_set_memory_type(nv_alloc_t *at, NvU32 type) +{ + NvU32 i; + NV_STATUS status = NV_OK; +#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT) + unsigned long *pages = NULL; +#elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT) + struct page **pages = NULL; +#else + unsigned long *pages = NULL; +#endif + + nvidia_pte_t *page_ptr; + struct page *page; + + if (at->flags.contig) + { + nv_set_contig_memory_type(&at->page_table[0], at->num_pages, type); + return; + } + + if (nv_set_memory_array_type_present(type)) + { + status = os_alloc_mem((void **)&pages, + at->num_pages * sizeof(unsigned long)); + + } + else if (nv_set_pages_array_type_present(type)) + { + status = os_alloc_mem((void **)&pages, + at->num_pages * sizeof(struct page*)); + } + + if (status != NV_OK) + pages = NULL; + + // + // If the set_{memory,page}_array_* functions are in the kernel interface, + // it's faster to use them since they work on non-contiguous memory, + // whereas the set_{memory,page}_* functions do not. + // + if (pages) + { + for (i = 0; i < at->num_pages; i++) + { + page_ptr = &at->page_table[i]; + page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr); +#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT) + pages[i] = (unsigned long)page_address(page); +#elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT) + pages[i] = page; +#endif + } +#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT) + nv_set_memory_array_type(pages, at->num_pages, type); +#elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT) + nv_set_pages_array_type(pages, at->num_pages, type); +#endif + os_free_mem(pages); + } + + // + // If the set_{memory,page}_array_* functions aren't present in the kernel + // interface, each page has to be set individually, which has been measured + // to be ~10x slower than using the set_{memory,page}_array_* functions. + // + else + { + for (i = 0; i < at->num_pages; i++) + nv_set_contig_memory_type(&at->page_table[i], 1, type); + } +} + +static NvU64 nv_get_max_sysmem_address(void) +{ + NvU64 global_max_pfn = 0ULL; + int node_id; + + for_each_online_node(node_id) + { + global_max_pfn = max(global_max_pfn, (NvU64)node_end_pfn(node_id)); + } + + return ((global_max_pfn + 1) << PAGE_SHIFT) - 1; +} + +static unsigned int nv_compute_gfp_mask( + nv_state_t *nv, + nv_alloc_t *at +) +{ + unsigned int gfp_mask = NV_GFP_KERNEL; + struct device *dev = at->dev; + + /* + * If we know that SWIOTLB is enabled (and therefore we avoid calling the + * kernel to DMA-remap the pages), or if we are using dma_direct (which may + * transparently use the SWIOTLB for pages that are unaddressable by the + * device, in kernel versions 5.0 and later), limit our allocation pool + * to the first 4GB to avoid allocating pages outside of our device's + * addressable limit. + * Also, limit the allocation to the first 4GB if explicitly requested by + * setting the "nv->force_dma32_alloc" variable. + */ + if (!nv || !nv_requires_dma_remap(nv) || nv_is_dma_direct(dev) || nv->force_dma32_alloc) + { + NvU64 max_sysmem_address = nv_get_max_sysmem_address(); + if ((dev && dev->dma_mask && (*(dev->dma_mask) < max_sysmem_address)) || + (nv && nv->force_dma32_alloc)) + { + gfp_mask = NV_GFP_KERNEL | NV_GFP_DMA32; + } + } + + gfp_mask |= __GFP_RETRY_MAYFAIL; + + if (at->flags.zeroed) + gfp_mask |= __GFP_ZERO; + + if (at->flags.node) + gfp_mask |= __GFP_THISNODE; + + // Compound pages are required by vm_insert_page for high-order page + // allocations + if (at->order > 0) + gfp_mask |= __GFP_COMP; + + return gfp_mask; +} + +// set subpages describing page +static void +nv_alloc_set_page +( + nv_alloc_t *at, + unsigned int page_idx, + unsigned long virt_addr +) +{ + unsigned long phys_addr = nv_get_kern_phys_address(virt_addr); + unsigned int os_pages_in_page = 1 << at->order; + unsigned int base_os_page = page_idx * os_pages_in_page; + unsigned int num_os_pages = NV_MIN(at->num_pages - base_os_page, os_pages_in_page); + unsigned int i; + + for (i = 0; i < num_os_pages; i++) + { + at->page_table[base_os_page + i].virt_addr = virt_addr + i * PAGE_SIZE; + at->page_table[base_os_page + i].phys_addr = phys_addr + i * PAGE_SIZE; + } +} + +/* + * This function is needed for allocating contiguous physical memory in xen + * dom0. Because of the use of xen sw iotlb in xen dom0, memory allocated by + * NV_GET_FREE_PAGES may not be machine contiguous when size is more than + * 1 page. nv_alloc_coherent_pages() will give us machine contiguous memory. + * Even though we get dma_address directly in this function, we will + * still call pci_map_page() later to get dma address. This is fine as it + * will return the same machine address. + */ +static NV_STATUS nv_alloc_coherent_pages( + nv_state_t *nv, + nv_alloc_t *at +) +{ + nvidia_pte_t *page_ptr; + NvU32 i; + unsigned int gfp_mask; + unsigned long virt_addr = 0; + nv_linux_state_t *nvl; + struct device *dev; + + if (!nv) + { + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: coherent page alloc on nvidiactl not supported\n", __FUNCTION__); + return NV_ERR_NOT_SUPPORTED; + } + + nvl = NV_GET_NVL_FROM_NV_STATE(nv); + dev = nvl->dev; + + gfp_mask = nv_compute_gfp_mask(nv, at); + + virt_addr = (unsigned long)dma_alloc_coherent(dev, + at->num_pages * PAGE_SIZE, + &at->dma_handle, + gfp_mask); + if (!virt_addr) + { + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__); + return NV_ERR_NO_MEMORY; + } + + for (i = 0; i < at->num_pages; i++) + { + page_ptr = &at->page_table[i]; + + page_ptr->virt_addr = virt_addr + i * PAGE_SIZE; + page_ptr->phys_addr = virt_to_phys((void *)page_ptr->virt_addr); + } + + if (at->cache_type != NV_MEMORY_CACHED) + { + nv_set_memory_type(at, NV_MEMORY_UNCACHED); + } + + at->flags.coherent = NV_TRUE; + return NV_OK; +} + +static void nv_free_coherent_pages( + nv_alloc_t *at +) +{ + nvidia_pte_t *page_ptr; + struct device *dev = at->dev; + + page_ptr = &at->page_table[0]; + + if (at->cache_type != NV_MEMORY_CACHED) + { + nv_set_memory_type(at, NV_MEMORY_WRITEBACK); + } + + dma_free_coherent(dev, at->num_pages * PAGE_SIZE, + (void *)page_ptr->virt_addr, at->dma_handle); +} + +typedef struct +{ + unsigned long virt_addr; + struct list_head list_node; +} nv_page_pool_entry_t; + +#define NV_MEM_POOL_LIST_HEAD(list) list_first_entry_or_null(list, nv_page_pool_entry_t, list_node) + +typedef struct nv_page_pool_t +{ + struct list_head clean_list; + struct list_head dirty_list; + nv_kthread_q_t scrubber_queue; + nv_kthread_q_item_t scrubber_queue_item; + unsigned int order; + unsigned long pages_owned; + void *lock; + struct shrinker *shrinker; + +#ifndef NV_SHRINKER_ALLOC_PRESENT + struct shrinker _shrinker; +#endif +} nv_page_pool_t; + +nv_page_pool_t *sysmem_page_pools[NV_MAX_PAGE_ORDER + 1]; + +#ifdef NV_SHRINKER_ALLOC_PRESENT +static nv_page_pool_t *nv_mem_pool_get_from_shrinker(struct shrinker *shrinker) +{ + return shrinker->private_data; +} + +static void nv_mem_pool_shrinker_free(nv_page_pool_t *mem_pool) +{ + if (mem_pool->shrinker != NULL) + { + shrinker_free(mem_pool->shrinker); + } +} + +static struct shrinker *nv_mem_pool_shrinker_alloc(nv_page_pool_t *mem_pool) +{ + return shrinker_alloc(0, "nv-sysmem-alloc-order-%u", mem_pool->order); +} + +static void nv_mem_pool_shrinker_register(nv_page_pool_t *mem_pool, struct shrinker *shrinker) +{ + shrinker->private_data = mem_pool; + shrinker_register(shrinker); +} +#else + +static nv_page_pool_t *nv_mem_pool_get_from_shrinker(struct shrinker *shrinker) +{ + return container_of(shrinker, nv_page_pool_t, _shrinker); +} + +static void nv_mem_pool_shrinker_free(nv_page_pool_t *mem_pool) +{ + if (mem_pool->shrinker != NULL) + { + unregister_shrinker(mem_pool->shrinker); + } +} + +static struct shrinker *nv_mem_pool_shrinker_alloc(nv_page_pool_t *mem_pool) +{ + return &mem_pool->_shrinker; +} + +static void nv_mem_pool_shrinker_register(nv_page_pool_t *mem_pool, struct shrinker *shrinker) +{ + register_shrinker(shrinker +#ifdef NV_REGISTER_SHRINKER_HAS_FMT_ARG + , "nv-sysmem-alloc-order-%u", mem_pool->order +#endif // NV_REGISTER_SHRINKER_HAS_FMT_ARG + ); +} +#endif // NV_SHRINKER_ALLOC_PRESENT + +static unsigned long +nv_mem_pool_move_pages +( + struct list_head *dst_list, + struct list_head *src_list, + unsigned long max_entries_to_move +) +{ + while (max_entries_to_move > 0) + { + nv_page_pool_entry_t *pool_entry = NV_MEM_POOL_LIST_HEAD(src_list); + if (pool_entry == NULL) + break; + + list_del(&pool_entry->list_node); + list_add(&pool_entry->list_node, dst_list); + max_entries_to_move--; + } + + return max_entries_to_move; +} + +static void +nv_mem_pool_free_page_list +( + struct list_head *free_list, + unsigned int order +) +{ + while (!list_empty(free_list)) + { + nv_page_pool_entry_t *pool_entry = NV_MEM_POOL_LIST_HEAD(free_list); + list_del(&pool_entry->list_node); + NV_FREE_PAGES(pool_entry->virt_addr, order) + NV_KFREE(pool_entry, sizeof(*pool_entry)); + } +} + +static unsigned long +nv_mem_pool_shrinker_count +( + struct shrinker *shrinker, + struct shrink_control *sc +) +{ + nv_page_pool_t *mem_pool = nv_mem_pool_get_from_shrinker(shrinker); + unsigned long pages_owned; + NV_STATUS status; + + status = os_acquire_mutex(mem_pool->lock); + WARN_ON(status != NV_OK); + // Page that is being scrubbed by worker is not counted + pages_owned = mem_pool->pages_owned; + os_release_mutex(mem_pool->lock); + + nv_printf(NV_DBG_MEMINFO, "NVRM: VM: %s: pool_order=%u: %lu pages in pool\n", + __FUNCTION__, mem_pool->order, pages_owned); + + return pages_owned; +} + +static unsigned long +nv_mem_pool_shrinker_scan +( + struct shrinker *shrinker, + struct shrink_control *sc +) +{ + nv_page_pool_t *mem_pool = nv_mem_pool_get_from_shrinker(shrinker); + unsigned long pages_remaining; + unsigned long pages_freed; + NV_STATUS status; + + struct list_head reclaim_list; + INIT_LIST_HEAD(&reclaim_list); + + status = os_acquire_mutex(mem_pool->lock); + WARN_ON(status != NV_OK); + pages_remaining = sc->nr_to_scan; + pages_remaining = nv_mem_pool_move_pages(&reclaim_list, &mem_pool->dirty_list, pages_remaining); + pages_remaining = nv_mem_pool_move_pages(&reclaim_list, &mem_pool->clean_list, pages_remaining); + pages_freed = sc->nr_to_scan - pages_remaining; + mem_pool->pages_owned -= pages_freed; + os_release_mutex(mem_pool->lock); + + nv_mem_pool_free_page_list(&reclaim_list, mem_pool->order); + + nv_printf(NV_DBG_MEMINFO, "NVRM: VM: %s: pool_order=%u: %lu/%lu pages freed\n", + __FUNCTION__, mem_pool->order, pages_freed, sc->nr_to_scan); + + return (pages_remaining == sc->nr_to_scan) ? SHRINK_STOP : pages_freed; +} + +static unsigned long +nv_mem_pool_get_page_size(nv_page_pool_t *mem_pool) +{ + return ((unsigned long) PAGE_SIZE) << mem_pool->order; +} + +unsigned int +nv_mem_pool_alloc_pages +( + nv_page_pool_t *mem_pool, + nv_alloc_t *at +) +{ + unsigned int os_pages_in_page = 1 << at->order; + unsigned int max_num_pages = NV_CEIL(at->num_pages, os_pages_in_page); + nv_page_pool_entry_t *pool_entry; + unsigned int pages_remaining = max_num_pages; + unsigned int pages_allocated; + unsigned int pages_allocated_clean; + unsigned long pages_owned; + unsigned int i = 0; + struct list_head alloc_clean_pages; + struct list_head alloc_dirty_pages; + NV_STATUS status; + + if (!NV_MAY_SLEEP()) + { + // can't wait for the mutex + return 0; + } + + INIT_LIST_HEAD(&alloc_clean_pages); + INIT_LIST_HEAD(&alloc_dirty_pages); + + status = os_acquire_mutex(mem_pool->lock); + WARN_ON(status != NV_OK); + pages_remaining = nv_mem_pool_move_pages(&alloc_clean_pages, &mem_pool->clean_list, pages_remaining); + pages_allocated_clean = (max_num_pages - pages_remaining); + pages_remaining = nv_mem_pool_move_pages(&alloc_dirty_pages, &mem_pool->dirty_list, pages_remaining); + pages_allocated = (max_num_pages - pages_remaining); + mem_pool->pages_owned -= pages_allocated; + pages_owned = mem_pool->pages_owned; + os_release_mutex(mem_pool->lock); + + while ((pool_entry = NV_MEM_POOL_LIST_HEAD(&alloc_clean_pages))) + { + nv_alloc_set_page(at, i, pool_entry->virt_addr); + list_del(&pool_entry->list_node); + NV_KFREE(pool_entry, sizeof(*pool_entry)); + i++; + } + + while ((pool_entry = NV_MEM_POOL_LIST_HEAD(&alloc_dirty_pages))) + { + memset((void *)pool_entry->virt_addr, 0, nv_mem_pool_get_page_size(mem_pool)); + + nv_alloc_set_page(at, i, pool_entry->virt_addr); + list_del(&pool_entry->list_node); + NV_KFREE(pool_entry, sizeof(*pool_entry)); + i++; + } + + if (i != pages_allocated) + { + os_dbg_breakpoint(); + } + + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: pool_order=%u: %lu/%lu pages allocated (%lu already cleared, %lu left in pool)\n", + __FUNCTION__, mem_pool->order, pages_allocated, max_num_pages, pages_allocated_clean, pages_owned); + + return pages_allocated; +} + +static void +nv_mem_pool_queue_worker(nv_page_pool_t *mem_pool) +{ + nv_kthread_q_schedule_q_item(&mem_pool->scrubber_queue, + &mem_pool->scrubber_queue_item); +} + +static void +nv_mem_pool_worker(void *arg) +{ + nv_page_pool_t *mem_pool = arg; + nv_page_pool_entry_t *pool_entry = NULL; + NV_STATUS status; + + for (;;) + { + status = os_acquire_mutex(mem_pool->lock); + WARN_ON(status != NV_OK); + if (pool_entry != NULL) + { + // add the entry from the last pass, avoid getting the lock again + list_add(&pool_entry->list_node, &mem_pool->clean_list); + } + + pool_entry = NV_MEM_POOL_LIST_HEAD(&mem_pool->dirty_list); + if (pool_entry == NULL) + { + os_release_mutex(mem_pool->lock); + break; + } + list_del(&pool_entry->list_node); + os_release_mutex(mem_pool->lock); + + memset((void *)pool_entry->virt_addr, 0, nv_mem_pool_get_page_size(mem_pool)); + } +} + +void +nv_mem_pool_destroy(nv_page_pool_t *mem_pool) +{ + NV_STATUS status; + + status = os_acquire_mutex(mem_pool->lock); + WARN_ON(status != NV_OK); + nv_mem_pool_free_page_list(&mem_pool->dirty_list, mem_pool->order); + os_release_mutex(mem_pool->lock); + + // All pages are freed, so scrubber won't attempt to requeue + nv_kthread_q_stop(&mem_pool->scrubber_queue); + + status = os_acquire_mutex(mem_pool->lock); + WARN_ON(status != NV_OK); + // free clean pages after scrubber can't add any new + nv_mem_pool_free_page_list(&mem_pool->clean_list, mem_pool->order); + os_release_mutex(mem_pool->lock); + + nv_mem_pool_shrinker_free(mem_pool); + + os_free_mutex(mem_pool->lock); + + NV_KFREE(mem_pool, sizeof(*mem_pool)); +} + +nv_page_pool_t* nv_mem_pool_init(unsigned int order) +{ + struct shrinker *shrinker; + nv_page_pool_t *mem_pool; + + NV_KZALLOC(mem_pool, sizeof(*mem_pool)); + if (mem_pool == NULL) + { + nv_printf(NV_DBG_SETUP, "NVRM: %s: failed allocating memory\n", __FUNCTION__); + return NULL; + } + + mem_pool->order = order; + + INIT_LIST_HEAD(&mem_pool->clean_list); + INIT_LIST_HEAD(&mem_pool->dirty_list); + + if (os_alloc_mutex(&mem_pool->lock)) + { + nv_printf(NV_DBG_SETUP, "NVRM: %s: failed allocating mutex for worker thread\n", __FUNCTION__); + goto failed; + } + + if (nv_kthread_q_init(&mem_pool->scrubber_queue, "nv_mem_pool_scrubber_queue")) + { + nv_printf(NV_DBG_SETUP, "NVRM: %s: failed allocating worker thread\n", __FUNCTION__); + goto failed; + } + nv_kthread_q_item_init(&mem_pool->scrubber_queue_item, nv_mem_pool_worker, mem_pool); + + shrinker = nv_mem_pool_shrinker_alloc(mem_pool); + + if (shrinker == NULL) + { + nv_printf(NV_DBG_SETUP, "NVRM: %s: failed allocating shrinker\n", __FUNCTION__); + goto failed; + } + + shrinker->count_objects = nv_mem_pool_shrinker_count; + shrinker->scan_objects = nv_mem_pool_shrinker_scan; + shrinker->seeks = 1; + + nv_mem_pool_shrinker_register(mem_pool, shrinker); + + mem_pool->shrinker = shrinker; + return mem_pool; + +failed: + nv_mem_pool_destroy(mem_pool); + return NULL; +} + +NV_STATUS +nv_mem_pool_free_pages +( + nv_page_pool_t *mem_pool, + nv_alloc_t *at +) +{ + unsigned int os_pages_in_page = 1 << at->order; + unsigned int num_pages = NV_CEIL(at->num_pages, os_pages_in_page); + NvBool queue_worker; + nv_page_pool_entry_t *pool_entry; + struct list_head freed_pages; + unsigned int num_added_pages = 0; + unsigned long pages_owned; + unsigned int i; + NV_STATUS status; + + if (!NV_MAY_SLEEP()) + { + // can't wait for the mutex + return NV_ERR_INVALID_ARGUMENT; + } + + INIT_LIST_HEAD(&freed_pages); + + for (i = 0; i < num_pages; i++) + { + nvidia_pte_t *page_ptr = &at->page_table[i * os_pages_in_page]; + + if (page_ptr->virt_addr == 0) + { + // alloc failed + break; + } + + NV_KZALLOC(pool_entry, sizeof(*pool_entry)); + if (pool_entry == NULL) + { + NV_FREE_PAGES(page_ptr->virt_addr, mem_pool->order); + continue; + } + + pool_entry->virt_addr = page_ptr->virt_addr; + list_add(&pool_entry->list_node, &freed_pages); + num_added_pages++; + } + + if (num_added_pages == 0) + return NV_OK; + + status = os_acquire_mutex(mem_pool->lock); + WARN_ON(status != NV_OK); + // Worker is already queued if list is not empty + queue_worker = list_empty(&mem_pool->dirty_list); + list_splice_init(&freed_pages, &mem_pool->dirty_list); + mem_pool->pages_owned += num_added_pages; + pages_owned = mem_pool->pages_owned; + os_release_mutex(mem_pool->lock); + + nv_printf(NV_DBG_MEMINFO, "NVRM: VM: %s: pool_order=%u: %lu/%lu pages added to pool (%lu now in pool)\n", + __FUNCTION__, mem_pool->order, num_added_pages, num_pages, pages_owned); + + if (queue_worker) + { + nv_mem_pool_queue_worker(mem_pool); + } + + return NV_OK; +} + +NV_STATUS nv_init_page_pools(void) +{ + unsigned int order; + + for (order = 0; order <= NV_MAX_PAGE_ORDER; order++) + { + unsigned long page_size = PAGE_SIZE << order; + + if (!(NVreg_EnableSystemMemoryPools & (page_size >> NV_ENABLE_SYSTEM_MEMORY_POOLS_SHIFT))) + continue; + + sysmem_page_pools[order] = nv_mem_pool_init(order); + + if (sysmem_page_pools[order] == NULL) + { + return NV_ERR_NO_MEMORY; + } + } + + return NV_OK; +} + +void nv_destroy_page_pools(void) +{ + unsigned int order; + + for (order = 0; order <= NV_MAX_PAGE_ORDER; order++) + { + if (sysmem_page_pools[order]) + nv_mem_pool_destroy(sysmem_page_pools[order]); + } +} + +static nv_page_pool_t *nv_mem_pool_get(unsigned int order) +{ + // get_order() is not limited by NV_MAX_PAGE_ORDER + if (order >= sizeof(sysmem_page_pools) / sizeof(*sysmem_page_pools)) + return NULL; + + return sysmem_page_pools[order]; +} + +void +nv_free_system_pages +( + nv_alloc_t *at +) +{ + nv_page_pool_t *page_pool = nv_mem_pool_get(at->order); + unsigned int os_pages_in_page = 1 << at->order; + unsigned int num_pages = NV_CEIL(at->num_pages, os_pages_in_page); + unsigned int i; + + if (at->cache_type != NV_MEMORY_CACHED) + { + nv_set_memory_type(at, NV_MEMORY_WRITEBACK); + } + + for (i = 0; i < num_pages; i++) + { + nvidia_pte_t *page_ptr = &at->page_table[i * os_pages_in_page]; + + if (page_ptr->virt_addr == 0) + { + // alloc failed + break; + } + + // For unprotected sysmem in CC, memory is marked as unencrypted during allocation. + // NV_FREE_PAGES only deals with protected sysmem. Mark memory as encrypted and protected before free. + nv_set_memory_encrypted(at->flags.unencrypted, page_ptr->virt_addr, 1 << at->order); + } + + if (page_pool == NULL || at->flags.node != 0 || + nv_mem_pool_free_pages(page_pool, at) != NV_OK) + { + // nv_mem_pool_free_pages() fails if !NV_MAY_SLEEP() + for (i = 0; i < num_pages; i++) + { + unsigned int base_os_page = i * os_pages_in_page; + nvidia_pte_t *page_ptr = &at->page_table[base_os_page]; + + if (page_ptr->virt_addr == 0) + { + // alloc failed + break; + } + + NV_FREE_PAGES(page_ptr->virt_addr, at->order); + } + } + + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: %u/%u order0 pages\n", __FUNCTION__, i * os_pages_in_page, at->num_pages); +} + +NV_STATUS +nv_alloc_system_pages +( + nv_state_t *nv, + nv_alloc_t *at +) +{ + unsigned int gfp_mask = nv_compute_gfp_mask(nv, at); + unsigned int i; + unsigned int num_pool_allocated_pages = 0; + unsigned int os_pages_in_page = 1 << at->order; + unsigned int num_pages = NV_CEIL(at->num_pages, os_pages_in_page); + nv_page_pool_t *page_pool = nv_mem_pool_get(at->order); + + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: %u order0 pages, %u order\n", __FUNCTION__, at->num_pages, at->order); + + if (page_pool != NULL && at->flags.node == 0 && !(gfp_mask & NV_GFP_DMA32)) + { + num_pool_allocated_pages = nv_mem_pool_alloc_pages(page_pool, at); + } + + for (i = num_pool_allocated_pages; i < num_pages; i++) + { + unsigned long virt_addr = 0; + + if (at->flags.node) + { + unsigned long ptr = 0ULL; + NV_ALLOC_PAGES_NODE(ptr, at->node_id, at->order, gfp_mask); + if (ptr != 0) + { + virt_addr = (unsigned long) page_address((void *)ptr); + } + } + else + { + NV_GET_FREE_PAGES(virt_addr, at->order, gfp_mask); + } + + if (virt_addr == 0) + { + goto failed; + } + + nv_alloc_set_page(at, i, virt_addr); + } + + for (i = 0; i < num_pages; i++) + { + unsigned int base_os_page = i * os_pages_in_page; + nvidia_pte_t *page_ptr = &at->page_table[base_os_page]; + unsigned int num_os_pages = NV_MIN(at->num_pages - base_os_page, os_pages_in_page); + + // In CC, NV_GET_FREE_PAGES only allocates protected sysmem. + // To get unprotected sysmem, this memory is marked as unencrypted. + nv_set_memory_decrypted_zeroed(at->flags.unencrypted, page_ptr->virt_addr, 1 << at->order, + num_os_pages * PAGE_SIZE); + } + + if (at->cache_type != NV_MEMORY_CACHED) + { + nv_set_memory_type(at, NV_MEMORY_UNCACHED); + } + + return NV_OK; + +failed: + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__); + nv_free_system_pages(at); + return NV_ERR_NO_MEMORY; +} + +NV_STATUS nv_alloc_contig_pages( + nv_state_t *nv, + nv_alloc_t *at +) +{ + NV_STATUS status; + + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages); + + if (os_is_xen_dom0()) + return nv_alloc_coherent_pages(nv, at); + + at->order = get_order(at->num_pages * PAGE_SIZE); + + status = nv_alloc_system_pages(nv, at); + if (status != NV_OK) + { + if (os_is_vgx_hyper()) + { + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: failed to allocate memory, trying coherent memory \n", __FUNCTION__); + + status = nv_alloc_coherent_pages(nv, at); + return status; + } + + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__); + return NV_ERR_NO_MEMORY; + } + + return NV_OK; +} + +void nv_free_contig_pages( + nv_alloc_t *at +) +{ + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages); + + if (at->flags.coherent) + return nv_free_coherent_pages(at); + + nv_free_system_pages(at); +} + +static NvUPtr nv_vmap(struct page **pages, NvU32 page_count, + NvBool cached, NvBool unencrypted) +{ + void *ptr; + pgprot_t prot = PAGE_KERNEL; +#if defined(NVCPU_X86_64) +#if defined(PAGE_KERNEL_NOENC) + if (unencrypted) + { + prot = cached ? nv_adjust_pgprot(PAGE_KERNEL_NOENC) : + nv_adjust_pgprot(NV_PAGE_KERNEL_NOCACHE_NOENC); + } + else +#endif + { + prot = cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE; + } +#elif defined(NVCPU_AARCH64) + prot = cached ? PAGE_KERNEL : NV_PGPROT_UNCACHED(PAGE_KERNEL); +#endif + ptr = vmap(pages, page_count, VM_MAP, prot); + NV_MEMDBG_ADD(ptr, page_count * PAGE_SIZE); + + return (NvUPtr)ptr; +} + +static void nv_vunmap(NvUPtr vaddr, NvU32 page_count) +{ + vunmap((void *)vaddr); + NV_MEMDBG_REMOVE((void *)vaddr, page_count * PAGE_SIZE); +} + +NvUPtr nv_vm_map_pages( + struct page **pages, + NvU32 count, + NvBool cached, + NvBool unencrypted +) +{ + NvUPtr virt_addr = 0; + + if (!NV_MAY_SLEEP()) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %s: can't map %d pages, invalid context!\n", + __FUNCTION__, count); + os_dbg_breakpoint(); + return virt_addr; + } + + virt_addr = nv_vmap(pages, count, cached, unencrypted); + return virt_addr; +} + +void nv_vm_unmap_pages( + NvUPtr virt_addr, + NvU32 count +) +{ + if (!NV_MAY_SLEEP()) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %s: can't unmap %d pages at 0x%0llx, " + "invalid context!\n", __FUNCTION__, count, virt_addr); + os_dbg_breakpoint(); + return; + } + + nv_vunmap(virt_addr, count); +} diff --git a/kernel-open/nvidia/nv-vtophys.c b/kernel-open/nvidia/nv-vtophys.c new file mode 100644 index 0000000..fcae701 --- /dev/null +++ b/kernel-open/nvidia/nv-vtophys.c @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +NvU64 NV_API_CALL nv_get_kern_phys_address(NvU64 address) +{ + /* direct-mapped kernel address */ + if (virt_addr_valid((void *)address)) + return __pa(address); + + nv_printf(NV_DBG_ERRORS, + "NVRM: can't translate address in %s()!\n", __FUNCTION__); + return 0; +} + diff --git a/kernel-open/nvidia/nv.c b/kernel-open/nvidia/nv.c new file mode 100644 index 0000000..dd49e04 --- /dev/null +++ b/kernel-open/nvidia/nv.c @@ -0,0 +1,6285 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include // for MODULE_FIRMWARE + +// must precede "nv.h" and "nv-firmware.h" includes +#define NV_FIRMWARE_FOR_NAME(name) "nvidia/" NV_VERSION_STRING "/" name ".bin" +#define NV_FIRMWARE_DECLARE_GSP(name) \ + MODULE_FIRMWARE(NV_FIRMWARE_FOR_NAME(name)); +#include "nv-firmware.h" + +#include "nvmisc.h" +#include "os-interface.h" +#include "nv-linux.h" +#include "nv-p2p.h" +#include "nv-reg.h" +#include "nv-msi.h" +#include "nv-pci-table.h" +#include "nv-chardev-numbers.h" + +#if defined(NV_UVM_ENABLE) +#include "nv_uvm_interface.h" +#endif + +#if defined(NV_VGPU_KVM_BUILD) +#include "nv-vgpu-vfio-interface.h" +#endif + +#include "nv-hypervisor.h" +#include "nv-rsync.h" +#include "nv-kthread-q.h" +#include "nv-pat.h" +#include "nv-dmabuf.h" +#include "nv-caps-imex.h" + +/* + * Commit aefb2f2e619b ("x86/bugs: Rename CONFIG_RETPOLINE => + * CONFIG_MITIGATION_RETPOLINE) in v6.8 renamed CONFIG_RETPOLINE. + */ +#if !defined(CONFIG_RETPOLINE) && !defined(CONFIG_MITIGATION_RETPOLINE) +#include "nv-retpoline.h" +#endif + +#include +#include + +#include /* HDA struct snd_card */ + +#include + +#include "sound/hdaudio.h" + +#if defined(NV_SOUND_HDA_CODEC_H_PRESENT) +#include +#include +#include +#endif + +#if defined(NV_SEQ_READ_ITER_PRESENT) +#include +#include +#include +#endif + +#include /* System DMI info */ + +#include + +#include +#include + +#if defined(NV_LINUX_CC_PLATFORM_H_PRESENT) +#include +#endif + +#if defined(NV_ASM_MSHYPERV_H_PRESENT) && defined(NVCPU_X86_64) +#include +#endif + +#include + +#include "conftest/patches.h" +#include "detect-self-hosted.h" + +#if defined(NV_BPMP_MRQ_HAS_STRAP_SET) && defined(NV_PM_RUNTIME_AVAILABLE) +#include +#endif + +#define RM_THRESHOLD_TOTAL_IRQ_COUNT 100000 +#define RM_THRESHOLD_UNAHNDLED_IRQ_COUNT 99900 +#define RM_UNHANDLED_TIMEOUT_US 100000 + +#define TEGRA264_STRAP_NV_FUSE_CTRL_OPT_GPU 1U + +MODULE_LICENSE("Dual MIT/GPL"); + +MODULE_INFO(supported, "external"); +MODULE_VERSION(NV_VERSION_STRING); +MODULE_ALIAS_CHARDEV_MAJOR(NV_MAJOR_DEVICE_NUMBER); + +/* + * MODULE_IMPORT_NS() is added by commit id 8651ec01daeda + * ("module: add support for symbol namespaces") in 5.4 + */ +#if defined(MODULE_IMPORT_NS) +/* + * DMA_BUF namespace is added by commit id 16b0314aa746 + * ("dma-buf: move dma-buf symbols into the DMA_BUF module namespace") in 5.16 + */ +#if defined(NV_MODULE_IMPORT_NS_TAKES_CONSTANT) +MODULE_IMPORT_NS(DMA_BUF); +#else +MODULE_IMPORT_NS("DMA_BUF"); +#endif // defined(NV_MODULE_IMPORT_NS_TAKES_CONSTANT) +#endif // defined(MODULE_IMPORT_NS) + +const NvBool nv_is_rm_firmware_supported_os = NV_TRUE; + +// Deprecated, use NV_REG_ENABLE_GPU_FIRMWARE instead +char *rm_firmware_active = NULL; +NV_MODULE_STRING_PARAMETER(rm_firmware_active); + +/* + * Global NVIDIA capability state, for GPU driver + */ +nv_cap_t *nvidia_caps_root = NULL; + +/* + * Global counts for tracking if all devices were initialized properly + */ +NvU32 num_nv_devices = 0; +NvU32 num_probed_nv_devices = 0; + +/* + * Global list and table of per-device state + * note: both nv_linux_devices and nv_linux_minor_num_table + * are protected by nv_linux_devices_lock + */ +nv_linux_state_t *nv_linux_devices; +static nv_linux_state_t *nv_linux_minor_num_table[NV_MINOR_DEVICE_NUMBER_REGULAR_MAX + 1]; + +// Global state for the control device +nv_linux_state_t nv_ctl_device = { { 0 } }; + +// cdev covering the region of regular (non-control) devices +static struct cdev nv_linux_devices_cdev; + +// cdev covering the control device +static struct cdev nv_linux_control_device_cdev; + +extern NvU32 nv_dma_remap_peer_mmio; + +nv_kthread_q_t nv_kthread_q; +nv_kthread_q_t nv_deferred_close_kthread_q; + +struct rw_semaphore nv_system_pm_lock; + +#if defined(CONFIG_PM) +static nv_power_state_t nv_system_power_state; +static nv_pm_action_depth_t nv_system_pm_action_depth; +struct semaphore nv_system_power_state_lock; +#endif + +void *nvidia_p2p_page_t_cache; +void *nvidia_stack_t_cache; +static nvidia_stack_t *__nv_init_sp; + +struct semaphore nv_linux_devices_lock; + +// True if at least one of the successfully probed devices support ATS +// Assigned at device probe (module init) time +NvBool nv_ats_supported; + +// allow an easy way to convert all debug printfs related to events +// back and forth between 'info' and 'errors' +#if defined(NV_DBG_EVENTS) +#define NV_DBG_EVENTINFO NV_DBG_ERRORS +#else +#define NV_DBG_EVENTINFO NV_DBG_INFO +#endif + +#if defined(HDA_MAX_CODECS) +#define NV_HDA_MAX_CODECS HDA_MAX_CODECS +#else +#define NV_HDA_MAX_CODECS 8 +#endif + +/*** + *** STATIC functions, only in this file + ***/ + +/* nvos_ functions.. do not take a state device parameter */ +static int nvos_count_devices(int *, int *); + +static nv_alloc_t *nvos_create_alloc(struct device *, NvU64); +static int nvos_free_alloc(nv_alloc_t *); + +/*** + *** EXPORTS to Linux Kernel + ***/ + +static irqreturn_t nvidia_isr_common_bh (void *); +static void nvidia_isr_bh_unlocked (void *); +static int nvidia_ctl_open (struct inode *, struct file *); +static int nvidia_ctl_close (struct inode *, struct file *); + +const char *nv_device_name = MODULE_NAME; +static const char *nvidia_stack_cache_name = MODULE_NAME "_stack_cache"; +static const char *nvidia_p2p_page_cache_name = MODULE_NAME "_p2p_page_cache"; + +static int nvidia_open (struct inode *, struct file *); +static int nvidia_close (struct inode *, struct file *); +static unsigned int nvidia_poll (struct file *, poll_table *); +static int nvidia_ioctl (struct inode *, struct file *, unsigned int, unsigned long); +static long nvidia_unlocked_ioctl (struct file *, unsigned int, unsigned long); + +/* character device entry points*/ +static struct file_operations nvidia_fops = { + .owner = THIS_MODULE, + .poll = nvidia_poll, + .unlocked_ioctl = nvidia_unlocked_ioctl, +#if NVCPU_IS_X86_64 || NVCPU_IS_AARCH64 + .compat_ioctl = nvidia_unlocked_ioctl, +#endif + .mmap = nvidia_mmap, + .open = nvidia_open, + .release = nvidia_close, +}; + +#if defined(CONFIG_PM) +static int nv_pmops_suspend (struct device *dev); +static int nv_pmops_resume (struct device *dev); +static int nv_pmops_freeze (struct device *dev); +static int nv_pmops_thaw (struct device *dev); +static int nv_pmops_restore (struct device *dev); +static int nv_pmops_poweroff (struct device *dev); +static int nv_pmops_runtime_suspend (struct device *dev); +static int nv_pmops_runtime_resume (struct device *dev); + +struct dev_pm_ops nv_pm_ops = { + .suspend = nv_pmops_suspend, + .resume = nv_pmops_resume, + .freeze = nv_pmops_freeze, + .thaw = nv_pmops_thaw, + .poweroff = nv_pmops_poweroff, + .restore = nv_pmops_restore, + .runtime_suspend = nv_pmops_runtime_suspend, + .runtime_resume = nv_pmops_runtime_resume, +}; +#endif + +/*** + *** see nv.h for functions exported to other parts of resman + ***/ + +/*** + *** STATIC functions + ***/ + +#if defined(NVCPU_X86_64) +#define NV_AMD_SME_BIT BIT(0) + +static +NvBool nv_is_sme_supported( + void +) +{ + unsigned int eax, ebx, ecx, edx; + + /* Check for the SME/SEV support leaf */ + eax = 0x80000000; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (eax < 0x8000001f) + { + return NV_FALSE; + } + + eax = 0x8000001f; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + /* Check whether SME is supported */ + if (!(eax & NV_AMD_SME_BIT)) + { + return NV_FALSE; + } + + return NV_TRUE; +} +#endif + +static +NvBool nv_detect_sme_enabled( + void +) +{ +#if (defined(MSR_K8_SYSCFG) || defined(MSR_AMD64_SYSCFG)) && defined(NVCPU_X86_64) + NvU32 lo_val, hi_val; + + if (!nv_is_sme_supported()) + { + return NV_FALSE; + } + +#if defined(MSR_AMD64_SYSCFG) + rdmsr(MSR_AMD64_SYSCFG, lo_val, hi_val); +#if defined(MSR_AMD64_SYSCFG_MEM_ENCRYPT) + return (lo_val & MSR_AMD64_SYSCFG_MEM_ENCRYPT) ? NV_TRUE : NV_FALSE; +#endif //defined(MSR_AMD64_SYSCFG_MEM_ENCRYPT) +#elif defined(MSR_K8_SYSCFG) + rdmsr(MSR_K8_SYSCFG, lo_val, hi_val); +#if defined(MSR_K8_SYSCFG_MEM_ENCRYPT) + return (lo_val & MSR_K8_SYSCFG_MEM_ENCRYPT) ? NV_TRUE : NV_FALSE; +#endif //defined(MSR_K8_SYSCFG_MEM_ENCRYPT) +#endif //defined(MSR_AMD64_SYSCFG) +#else + return NV_FALSE; +#endif //(defined(MSR_K8_SYSCFG) || defined(MSR_AMD64_SYSCFG)) && defined(NVCPU_X86_64) +} + +static +void nv_detect_conf_compute_platform( + void +) +{ +#if defined(NV_CC_PLATFORM_PRESENT) + os_cc_enabled = cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT); + +#if defined(NV_CC_ATTR_SEV_SNP) + os_cc_sev_snp_enabled = cc_platform_has(CC_ATTR_GUEST_SEV_SNP); +#endif + + os_cc_sme_enabled = cc_platform_has(CC_ATTR_MEM_ENCRYPT); + +#if defined(NV_HV_GET_ISOLATION_TYPE) && IS_ENABLED(CONFIG_HYPERV) && defined(NVCPU_X86_64) + if (hv_get_isolation_type() == HV_ISOLATION_TYPE_SNP) + { + os_cc_snp_vtom_enabled = NV_TRUE; + } +#endif + +#if defined(X86_FEATURE_TDX_GUEST) + if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) + { + os_cc_tdx_enabled = NV_TRUE; + } +#endif +#else + os_cc_enabled = NV_FALSE; + os_cc_sev_snp_enabled = NV_FALSE; + os_cc_sme_enabled = nv_detect_sme_enabled(); + os_cc_snp_vtom_enabled = NV_FALSE; + os_cc_tdx_enabled = NV_FALSE; +#endif //NV_CC_PLATFORM_PRESENT +} + +static +nv_alloc_t *nvos_create_alloc( + struct device *dev, + NvU64 num_pages +) +{ + nv_alloc_t *at; + NvU64 pt_size = num_pages * sizeof(nvidia_pte_t); + + // Sanity check inputs + if ((num_pages != 0) && ((pt_size / num_pages) != sizeof(nvidia_pte_t))) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Invalid page table allocation - Number of pages exceeds max value.\n"); + return NULL; + } + + NV_KZALLOC(at, sizeof(nv_alloc_t)); + if (at == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate alloc info\n"); + return NULL; + } + + // at->num_pages is an unsigned int, check that the requested page count fits + at->num_pages = num_pages; + if (at->num_pages != num_pages) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Invalid page table allocation - requested size overflows.\n"); + NV_KFREE(at, sizeof(nv_alloc_t)); + return NULL; + } + + at->page_table = kvzalloc(pt_size, NV_GFP_KERNEL); + if (at->page_table == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate page table\n"); + NV_KFREE(at, sizeof(nv_alloc_t)); + return NULL; + } + + NV_ATOMIC_SET(at->usage_count, 0); + at->pid = os_get_current_process(); + at->dev = dev; + + return at; +} + +static +int nvos_free_alloc( + nv_alloc_t *at +) +{ + if (at == NULL) + return -1; + + if (NV_ATOMIC_READ(at->usage_count)) + return 1; + + kvfree(at->page_table); + + NV_KFREE(at, sizeof(nv_alloc_t)); + + return 0; +} + +static void +nv_module_resources_exit(nv_stack_t *sp) +{ + nv_kmem_cache_free_stack(sp); + + NV_KMEM_CACHE_DESTROY(nvidia_p2p_page_t_cache); + NV_KMEM_CACHE_DESTROY(nvidia_stack_t_cache); +} + +static int __init +nv_module_resources_init(nv_stack_t **sp) +{ + int rc = -ENOMEM; + + nvidia_stack_t_cache = NV_KMEM_CACHE_CREATE(nvidia_stack_cache_name, + nvidia_stack_t); + if (nvidia_stack_t_cache == NULL) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: nvidia_stack_t cache allocation failed.\n"); + goto exit; + } + + nvidia_p2p_page_t_cache = NV_KMEM_CACHE_CREATE(nvidia_p2p_page_cache_name, + nvidia_p2p_page_t); + if (nvidia_p2p_page_t_cache == NULL) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: nvidia_p2p_page_t cache allocation failed.\n"); + goto exit; + } + + rc = nv_kmem_cache_alloc_stack(sp); + if (rc < 0) + { + goto exit; + } + +exit: + if (rc < 0) + { + nv_kmem_cache_free_stack(*sp); + + NV_KMEM_CACHE_DESTROY(nvidia_p2p_page_t_cache); + NV_KMEM_CACHE_DESTROY(nvidia_stack_t_cache); + } + + return rc; +} + +int nv_init_page_pools(void); +void nv_destroy_page_pools(void); + +static void +nv_module_state_exit(nv_stack_t *sp) +{ + nv_state_t *nv = NV_STATE_PTR(&nv_ctl_device); + + nv_teardown_pat_support(); + + nv_kthread_q_stop(&nv_deferred_close_kthread_q); + nv_kthread_q_stop(&nv_kthread_q); + + nv_destroy_page_pools(); + + nv_lock_destroy_locks(sp, nv); +} + +static int +nv_module_state_init(nv_stack_t *sp) +{ + int rc; + nv_state_t *nv = NV_STATE_PTR(&nv_ctl_device); + + nv->os_state = (void *)&nv_ctl_device; + + if (!nv_lock_init_locks(sp, nv)) + { + return -ENOMEM; + } + + rc = nv_kthread_q_init(&nv_kthread_q, "nv_queue"); + if (rc != 0) + { + goto exit; + } + + rc = nv_init_page_pools(); + if (rc != 0) + { + goto exit; + } + + rc = nv_kthread_q_init(&nv_deferred_close_kthread_q, "nv_queue"); + if (rc != 0) + { + nv_kthread_q_stop(&nv_kthread_q); + goto exit; + } + + rc = nv_init_pat_support(sp); + if (rc < 0) + { + nv_kthread_q_stop(&nv_deferred_close_kthread_q); + nv_kthread_q_stop(&nv_kthread_q); + goto exit; + } + + nv_linux_devices = NULL; + memset(nv_linux_minor_num_table, 0, sizeof(nv_linux_minor_num_table)); + NV_INIT_MUTEX(&nv_linux_devices_lock); + init_rwsem(&nv_system_pm_lock); + +#if defined(CONFIG_PM) + NV_INIT_MUTEX(&nv_system_power_state_lock); + nv_system_power_state = NV_POWER_STATE_RUNNING; + nv_system_pm_action_depth = NV_PM_ACTION_DEPTH_DEFAULT; +#endif + + NV_SPIN_LOCK_INIT(&nv_ctl_device.snapshot_timer_lock); + +exit: + if (rc < 0) + { + nv_destroy_page_pools(); + nv_lock_destroy_locks(sp, nv); + } + + return rc; +} + +static void __init +nv_registry_keys_init(nv_stack_t *sp) +{ + NV_STATUS status; + nv_state_t *nv = NV_STATE_PTR(&nv_ctl_device); + NvU32 data; + + status = rm_read_registry_dword(sp, nv, NV_DMA_REMAP_PEER_MMIO, &data); + if (status == NV_OK) + { + nv_dma_remap_peer_mmio = data; + } +} + +static void __init +nv_report_applied_patches(void) +{ + unsigned i; + + for (i = 0; __nv_patches[i].short_description; i++) + { + if (i == 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Applied patches:\n"); + } + + nv_printf(NV_DBG_ERRORS, + "NVRM: Patch #%d: %s\n", i + 1, __nv_patches[i].short_description); + } +} + +static void +nv_drivers_exit(void) +{ +#if NV_SUPPORTS_PLATFORM_DEVICE + nv_platform_unregister_driver(); +#endif + nv_pci_unregister_driver(); +} + +static int __init +nv_drivers_init(void) +{ + int rc; + + rc = nv_pci_register_driver(); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: No NVIDIA PCI devices found.\n"); + rc = -ENODEV; + goto exit; + } + +#if NV_SUPPORTS_PLATFORM_DEVICE + rc = nv_platform_register_driver(); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: SOC driver registration failed!\n"); + nv_pci_unregister_driver(); + rc = -ENODEV; + } +#endif + +exit: + return rc; +} + +static void +nv_module_exit(nv_stack_t *sp) +{ + nv_module_state_exit(sp); + + rm_shutdown_rm(sp); + + nv_destroy_rsync_info(); + + nv_cap_drv_exit(); + + nv_module_resources_exit(sp); +} + +static int __init +nv_module_init(nv_stack_t **sp) +{ + int rc; + + rc = nv_module_resources_init(sp); + if (rc < 0) + { + return rc; + } + + rc = nv_cap_drv_init(); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: nv-cap-drv init failed.\n"); + goto cap_drv_exit; + } + + nv_init_rsync_info(); + nv_detect_conf_compute_platform(); + + if (!rm_init_rm(*sp)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: rm_init_rm() failed!\n"); + rc = -EIO; + goto nvlink_exit; + } + + rc = nv_module_state_init(*sp); + if (rc < 0) + { + goto init_rm_exit; + } + + return rc; + +init_rm_exit: + rm_shutdown_rm(*sp); + +nvlink_exit: + nv_destroy_rsync_info(); + +cap_drv_exit: + nv_cap_drv_exit(); + nv_module_resources_exit(*sp); + + return rc; +} + +/* + * In this function we check for the cases where GPU exclusion is not + * honored, and issue a warning. + * + * Only GPUs that support a mechanism to query UUID prior to + * initializing the GPU can be excluded, so that we can detect and + * exclude them during device probe. This function checks that an + * initialized GPU was not specified in the exclusion list, and issues a + * warning if so. + */ +static void +nv_assert_not_in_gpu_exclusion_list( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + char *uuid = rm_get_gpu_uuid(sp, nv); + + if (uuid == NULL) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "Unable to read UUID"); + return; + } + + if (nv_is_uuid_in_gpu_exclusion_list(uuid)) + { + NV_DEV_PRINTF(NV_DBG_WARNINGS, nv, + "Could not exclude GPU %s because PBI is not supported\n", + uuid); + WARN_ON(1); + } + + os_free_mem(uuid); + + return; +} + +static int __init nv_caps_root_init(void) +{ + nvidia_caps_root = os_nv_cap_init("driver/" MODULE_NAME); + + return (nvidia_caps_root == NULL) ? -ENOENT : 0; +} + +static void nv_caps_root_exit(void) +{ + os_nv_cap_destroy_entry(nvidia_caps_root); + nvidia_caps_root = NULL; +} + +static int nv_register_chrdev( + unsigned int minor, + unsigned int count, + struct cdev *cdev, + const char *name, + struct file_operations *fops +) +{ + int rc; + + rc = register_chrdev_region(MKDEV(NV_MAJOR_DEVICE_NUMBER, minor), + count, name); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: register_chrdev_region() failed for %s!\n", name); + return rc; + } + + cdev_init(cdev, fops); + rc = cdev_add(cdev, MKDEV(NV_MAJOR_DEVICE_NUMBER, minor), count); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: cdev_add() failed for %s!\n", name); + unregister_chrdev_region(MKDEV(NV_MAJOR_DEVICE_NUMBER, minor), count); + return rc; + } + + return rc; +} + +static void nv_unregister_chrdev( + unsigned int minor, + unsigned int count, + struct cdev *cdev +) +{ + cdev_del(cdev); + unregister_chrdev_region(MKDEV(NV_MAJOR_DEVICE_NUMBER, minor), count); +} + +static int __init nvidia_init_module(void) +{ + int rc; + NvU32 count; + NvBool warn_unprobed = NV_FALSE; + nvidia_stack_t *sp = NULL; + const NvBool is_nvswitch_present = os_is_nvswitch_present(); + int num_pci_devices = 0, num_platform_devices = 0; + + nv_memdbg_init(); + + rc = nv_procfs_init(); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to initialize procfs.\n"); + return rc; + } + + rc = nv_caps_root_init(); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to initialize capabilities.\n"); + goto procfs_exit; + } + + rc = nv_caps_imex_init(); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to initialize IMEX channels.\n"); + goto caps_root_exit; + } + + rc = nv_module_init(&sp); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to initialize module.\n"); + goto caps_imex_exit; + } + + count = nvos_count_devices(&num_pci_devices, &num_platform_devices); + if ((count == 0) && (!is_nvswitch_present)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: No NVIDIA GPU found.\n"); + rc = -ENODEV; + goto module_exit; + } + + rc = nv_drivers_init(); + if (rc < 0) + { + goto module_exit; + } + + warn_unprobed = (num_probed_nv_devices != count); + WARN_ON(num_probed_nv_devices > count); + + if (num_platform_devices > 0 && + !NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE) + { + // RM was configured for tegra display but some conftests failed + nv_printf(NV_DBG_WARNINGS, + "NVRM: Failed to probe Tegra Display platform device.\n"); + nv_printf(NV_DBG_WARNINGS, + "NVRM: This kernel is not compatible with Tegra Display.\n"); + + // Warn if any PCI GPUs weren't probed + if (count > num_probed_nv_devices) + warn_unprobed = (count - num_probed_nv_devices != num_platform_devices); + } + + if (warn_unprobed) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: The NVIDIA probe routine was not called for %d device(s).\n", + count - num_probed_nv_devices); + nv_printf(NV_DBG_ERRORS, + "NVRM: This can occur when another driver was loaded and \n" + "NVRM: obtained ownership of the NVIDIA device(s).\n"); + nv_printf(NV_DBG_ERRORS, + "NVRM: Try unloading the conflicting kernel module (and/or\n" + "NVRM: reconfigure your kernel without the conflicting\n" + "NVRM: driver(s)), then try loading the NVIDIA kernel module\n" + "NVRM: again.\n"); + } + + if ((num_probed_nv_devices == 0) && (!is_nvswitch_present)) + { + rc = -ENODEV; + nv_printf(NV_DBG_ERRORS, "NVRM: No NVIDIA devices probed.\n"); + goto drivers_exit; + } + + if (num_probed_nv_devices != num_nv_devices) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: The NVIDIA probe routine failed for %d device(s).\n", + num_probed_nv_devices - num_nv_devices); + } + + if ((num_nv_devices == 0) && (!is_nvswitch_present)) + { + rc = -ENODEV; + nv_printf(NV_DBG_ERRORS, + "NVRM: None of the NVIDIA devices were initialized.\n"); + goto drivers_exit; + } + + /* + * Initialize registry keys after PCI driver registration has + * completed successfully to support per-device module + * parameters. + */ + nv_registry_keys_init(sp); + + nv_report_applied_patches(); + + nv_printf(NV_DBG_ERRORS, "NVRM: loading %s\n", pNVRM_ID); + +#if defined(NV_UVM_ENABLE) + rc = nv_uvm_init(); + if (rc != 0) + { + goto drivers_exit; + } +#endif + + /* + * Register char devices for both the region of regular devices + * as well as the control device. + * + * NOTE: THIS SHOULD BE DONE LAST. + */ + rc = nv_register_chrdev(0, NV_MINOR_DEVICE_NUMBER_REGULAR_MAX + 1, + &nv_linux_devices_cdev, "nvidia", &nvidia_fops); + if (rc < 0) + { + goto no_chrdev_exit; + } + + rc = nv_register_chrdev(NV_MINOR_DEVICE_NUMBER_CONTROL_DEVICE, 1, + &nv_linux_control_device_cdev, "nvidiactl", &nvidia_fops); + if (rc < 0) + { + goto partial_chrdev_exit; + } + + __nv_init_sp = sp; + + return 0; + +partial_chrdev_exit: + nv_unregister_chrdev(0, NV_MINOR_DEVICE_NUMBER_REGULAR_MAX + 1, + &nv_linux_devices_cdev); + +no_chrdev_exit: +#if defined(NV_UVM_ENABLE) + nv_uvm_exit(); +#endif + +drivers_exit: + nv_drivers_exit(); + +module_exit: + nv_module_exit(sp); + +caps_imex_exit: + nv_caps_imex_exit(); + +caps_root_exit: + nv_caps_root_exit(); + +procfs_exit: + nv_procfs_exit(); + + return rc; +} + +static void __exit nvidia_exit_module(void) +{ + nvidia_stack_t *sp = __nv_init_sp; + + nv_unregister_chrdev(NV_MINOR_DEVICE_NUMBER_CONTROL_DEVICE, 1, + &nv_linux_control_device_cdev); + nv_unregister_chrdev(0, NV_MINOR_DEVICE_NUMBER_REGULAR_MAX + 1, + &nv_linux_devices_cdev); + +#if defined(NV_UVM_ENABLE) + nv_uvm_exit(); +#endif + + nv_drivers_exit(); + + nv_module_exit(sp); + + nv_caps_imex_exit(); + + nv_caps_root_exit(); + + nv_procfs_exit(); + + nv_memdbg_exit(); +} + +static void *nv_alloc_file_private(void) +{ + nv_linux_file_private_t *nvlfp; + + NV_KZALLOC(nvlfp, sizeof(nv_linux_file_private_t)); + if (!nvlfp) + return NULL; + + init_waitqueue_head(&nvlfp->waitqueue); + NV_SPIN_LOCK_INIT(&nvlfp->fp_lock); + + return nvlfp; +} + +static void nv_free_file_private(nv_linux_file_private_t *nvlfp) +{ + nvidia_event_t *nvet; + + if (nvlfp == NULL) + return; + + for (nvet = nvlfp->event_data_head; nvet != NULL; nvet = nvlfp->event_data_head) + { + nvlfp->event_data_head = nvlfp->event_data_head->next; + NV_KFREE(nvet, sizeof(nvidia_event_t)); + } + + if (nvlfp->mmap_context.valid) + { + if (nvlfp->mmap_context.page_array != NULL) + { + os_free_mem(nvlfp->mmap_context.page_array); + } + if (nvlfp->mmap_context.memArea.pRanges != NULL) + { + os_free_mem(nvlfp->mmap_context.memArea.pRanges); + } + } + + NV_KFREE(nvlfp, sizeof(nv_linux_file_private_t)); +} + +/* + * Find the nv device with the given minor device number in the minor number + * table. Caller should hold nv_linux_devices_lock using + * LOCK_NV_LINUX_DEVICES. This function does not automatically take + * nvl->ldata_lock, so the caller must do that if required. + */ +static nv_linux_state_t *find_minor_locked(NvU32 minor) +{ + nv_linux_state_t *nvl; + + if (minor > NV_MINOR_DEVICE_NUMBER_REGULAR_MAX) + return NULL; + + nvl = nv_linux_minor_num_table[minor]; + if (nvl == NULL) + { + // there isn't actually a GPU present for nv_linux_minor_num_table[minor] + } + else if (nvl->minor_num != minor) + { + // nv_linux_minor_num_table out of sync -- this shouldn't happen + WARN_ON(1); + nvl = NULL; + } + + return nvl; +} + +/* + * Find the nv device with the given minor device number in the minor number + * table. If found, nvl is returned with nvl->ldata_lock taken. + */ +static nv_linux_state_t *find_minor(NvU32 minor) +{ + nv_linux_state_t *nvl; + + if (minor > NV_MINOR_DEVICE_NUMBER_REGULAR_MAX) + return NULL; + + LOCK_NV_LINUX_DEVICES(); + + nvl = find_minor_locked(minor); + if (nvl != NULL) + { + down(&nvl->ldata_lock); + } + + UNLOCK_NV_LINUX_DEVICES(); + return nvl; +} + +/* + * Search the global list of nv devices for the one with the given gpu_id. + * If found, nvl is returned with nvl->ldata_lock taken. + */ +static nv_linux_state_t *find_gpu_id(NvU32 gpu_id) +{ + nv_linux_state_t *nvl; + + LOCK_NV_LINUX_DEVICES(); + nvl = nv_linux_devices; + while (nvl != NULL) + { + nv_state_t *nv = NV_STATE_PTR(nvl); + if (nv->gpu_id == gpu_id) + { + down(&nvl->ldata_lock); + break; + } + nvl = nvl->next; + } + + UNLOCK_NV_LINUX_DEVICES(); + return nvl; +} + +/* + * Search the global list of nv devices for the one with the given UUID. Devices + * with missing UUID information are ignored. If found, nvl is returned with + * nvl->ldata_lock taken. + */ +nv_linux_state_t *find_uuid(const NvU8 *uuid) +{ + nv_linux_state_t *nvl = NULL; + nv_state_t *nv; + const NvU8 *dev_uuid; + + LOCK_NV_LINUX_DEVICES(); + + for (nvl = nv_linux_devices; nvl; nvl = nvl->next) + { + nv = NV_STATE_PTR(nvl); + down(&nvl->ldata_lock); + dev_uuid = nv_get_cached_uuid(nv); + if (dev_uuid && memcmp(dev_uuid, uuid, GPU_UUID_LEN) == 0) + goto out; + up(&nvl->ldata_lock); + } + +out: + UNLOCK_NV_LINUX_DEVICES(); + return nvl; +} + +/* + * Search the global list of nv devices. The search logic is: + * + * 1) If any device has the given UUID, return it + * + * 2) If no device has the given UUID but at least one device is missing + * its UUID (for example because rm_init_adapter has not run on it yet), + * return that device. + * + * 3) If no device has the given UUID and all UUIDs are present, return NULL. + * + * In cases 1 and 2, nvl is returned with nvl->ldata_lock taken. + * + * The reason for this weird logic is because UUIDs aren't always available. See + * bug 1642200. + */ +static nv_linux_state_t *find_uuid_candidate(const NvU8 *uuid) +{ + nv_linux_state_t *nvl = NULL; + nv_state_t *nv; + const NvU8 *dev_uuid; + int use_missing; + int has_missing = 0; + + LOCK_NV_LINUX_DEVICES(); + + /* + * Take two passes through the list. The first pass just looks for the UUID. + * The second looks for the target or missing UUIDs. It would be nice if + * this could be done in a single pass by remembering which nvls are missing + * UUIDs, but we have to hold the nvl lock after we check for the UUID. + */ + for (use_missing = 0; use_missing <= 1; use_missing++) + { + for (nvl = nv_linux_devices; nvl; nvl = nvl->next) + { + nv = NV_STATE_PTR(nvl); + down(&nvl->ldata_lock); + dev_uuid = nv_get_cached_uuid(nv); + if (dev_uuid) + { + /* Case 1: If a device has the given UUID, return it */ + if (memcmp(dev_uuid, uuid, GPU_UUID_LEN) == 0) + goto out; + } + else + { + /* Case 2: If no device has the given UUID but at least one + * device is missing its UUID, return that device. */ + if (use_missing) + goto out; + has_missing = 1; + } + up(&nvl->ldata_lock); + } + + /* Case 3: If no device has the given UUID and all UUIDs are present, + * return NULL. */ + if (!has_missing) + break; + } + +out: + UNLOCK_NV_LINUX_DEVICES(); + return nvl; +} + +void nv_dev_free_stacks(nv_linux_state_t *nvl) +{ + NvU32 i; + for (i = 0; i < NV_DEV_STACK_COUNT; i++) + { + if (nvl->sp[i]) + { + nv_kmem_cache_free_stack(nvl->sp[i]); + nvl->sp[i] = NULL; + } + } +} + +static int nv_dev_alloc_stacks(nv_linux_state_t *nvl) +{ + NvU32 i; + int rc; + + for (i = 0; i < NV_DEV_STACK_COUNT; i++) + { + rc = nv_kmem_cache_alloc_stack(&nvl->sp[i]); + if (rc != 0) + { + nv_dev_free_stacks(nvl); + return rc; + } + } + + return 0; +} + +static int validate_numa_start_state(nv_linux_state_t *nvl) +{ + int rc = 0; + int numa_status = nv_get_numa_status(nvl); + + if (numa_status != NV_IOCTL_NUMA_STATUS_DISABLED) + { + if (nv_ctl_device.numa_memblock_size == 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: numa memblock size of zero " + "found during device start"); + rc = -EINVAL; + } + else + { + /* Keep the individual devices consistent with the control device */ + nvl->numa_memblock_size = nv_ctl_device.numa_memblock_size; + } + } + + return rc; +} + +NV_STATUS NV_API_CALL nv_get_num_dpaux_instances(nv_state_t *nv, NvU32 *num_instances) +{ + *num_instances = nv->num_dpaux_instance; + return NV_OK; +} + +void NV_API_CALL +nv_schedule_uvm_isr(nv_state_t *nv) +{ +#if defined(NV_UVM_ENABLE) + nv_uvm_event_interrupt(nv_get_cached_uuid(nv)); +#endif +} + +NV_STATUS NV_API_CALL +nv_schedule_uvm_drain_p2p(NvU8 *pUuid) +{ +#if defined(NV_UVM_ENABLE) + return nv_uvm_drain_P2P(pUuid); +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +void NV_API_CALL +nv_schedule_uvm_resume_p2p(NvU8 *pUuid) +{ +#if defined(NV_UVM_ENABLE) + nv_uvm_resume_P2P(pUuid); +#endif +} + +/* + * Brings up the device on the first file open. Assumes nvl->ldata_lock is held. + */ +static int nv_start_device(nv_state_t *nv, nvidia_stack_t *sp) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) + NvU32 msi_config = 0; +#endif + int rc = 0; + NvBool kthread_init = NV_FALSE; + NvBool remove_numa_memory_kthread_init = NV_FALSE; + NvBool power_ref = NV_FALSE; + + rc = nv_get_rsync_info(); + if (rc != 0) + { + return rc; + } + + rc = validate_numa_start_state(nvl); + if (rc != 0) + { + goto failed; + } + + if (dev_is_pci(nvl->dev) && (nv->pci_info.device_id == 0)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: open of non-existent GPU with minor number %d\n", nvl->minor_num); + rc = -ENXIO; + goto failed; + } + + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + if (rm_ref_dynamic_power(sp, nv, NV_DYNAMIC_PM_COARSE) != NV_OK) + { + rc = -EINVAL; + goto failed; + } + power_ref = NV_TRUE; + } + else + { + if (rm_ref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE) != NV_OK) + { + rc = -EINVAL; + goto failed; + } + power_ref = NV_TRUE; + } + + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + rc = nv_dev_alloc_stacks(nvl); + if (rc != 0) + goto failed; + } + +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) + if (dev_is_pci(nvl->dev)) + { + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + rm_read_registry_dword(sp, nv, NV_REG_ENABLE_MSI, &msi_config); + if (msi_config == 1) + { + if (nvl->pci_dev->msix_cap && rm_is_msix_allowed(sp, nv)) + { + nv_init_msix(nv); + } + if (nvl->pci_dev->msi_cap && !(nv->flags & NV_FLAG_USES_MSIX)) + { + nv_init_msi(nv); + } + } + } + } +#endif + + if (((!(nv->flags & NV_FLAG_USES_MSI)) && (!(nv->flags & NV_FLAG_USES_MSIX))) + && (nv->interrupt_line == 0) && !(nv->flags & NV_FLAG_SOC_DISPLAY)) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "No interrupts of any type are available. Cannot use this GPU.\n"); + rc = -EIO; + goto failed; + } + + rc = 0; + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + if (nv->flags & NV_FLAG_SOC_DISPLAY) + { + rc = nv_soc_register_irqs(nv); + } + else if (!(nv->flags & NV_FLAG_USES_MSIX)) + { + rc = request_threaded_irq(nv->interrupt_line, nvidia_isr, + nvidia_isr_kthread_bh, nv_default_irq_flags(nv), + nv_device_name, (void *)nvl); + } +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) + else + { + rc = nv_request_msix_irq(nvl); + } +#endif + } + if (rc != 0) + { + if ((nv->interrupt_line != 0) && (rc == -EBUSY)) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Tried to get IRQ %d, but another driver\n", + (unsigned int) nv->interrupt_line); + nv_printf(NV_DBG_ERRORS, "NVRM: has it and is not sharing it.\n"); + nv_printf(NV_DBG_ERRORS, "NVRM: You may want to verify that no audio driver"); + nv_printf(NV_DBG_ERRORS, " is using the IRQ.\n"); + } + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "request_irq() failed (%d)\n", rc); + goto failed; + } + + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + rc = os_alloc_mutex(&nvl->isr_bh_unlocked_mutex); + if (rc != 0) + goto failed; + nv_kthread_q_item_init(&nvl->bottom_half_q_item, nvidia_isr_bh_unlocked, (void *)nv); + rc = nv_kthread_q_init(&nvl->bottom_half_q, nv_device_name); + if (rc != 0) + goto failed; + kthread_init = NV_TRUE; + + rc = nv_kthread_q_init(&nvl->queue.nvk, "nv_queue"); + if (rc) + goto failed; + nv->queue = &nvl->queue; + + if (nv_platform_use_auto_online(nvl)) + { + rc = nv_kthread_q_init(&nvl->remove_numa_memory_q, + "nv_remove_numa_memory"); + if (rc) + goto failed; + remove_numa_memory_kthread_init = NV_TRUE; + } + } + + if (!rm_init_adapter(sp, nv)) + { + if (!(nv->flags & NV_FLAG_USES_MSIX) && + !(nv->flags & NV_FLAG_SOC_DISPLAY)) + { + free_irq(nv->interrupt_line, (void *) nvl); + } + else if (nv->flags & NV_FLAG_SOC_DISPLAY) + { + nv_soc_free_irqs(nv); + } +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) + else + { + nv_free_msix_irq(nvl); + } +#endif + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "rm_init_adapter failed, device minor number %d\n", + nvl->minor_num); + rc = -EIO; + goto failed; + } + + /* Generate and cache the UUID for future callers */ + (void)rm_get_gpu_uuid_raw(sp, nv); + + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + nv_acpi_register_notifier(nvl); + } + + nv->flags |= NV_FLAG_OPEN; + + rm_request_dnotifier_state(sp, nv); + + /* + * Now that RM init is done, allow dynamic power to control the GPU in FINE + * mode, if enabled. (If the mode is COARSE, this unref will do nothing + * which will cause the GPU to remain powered up.) + * This is balanced by a FINE ref increment at the beginning of + * nv_stop_device(). + */ + rm_unref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE); + + return 0; + +failed: +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) + if (nv->flags & NV_FLAG_USES_MSI) + { + nv->flags &= ~NV_FLAG_USES_MSI; + NV_PCI_DISABLE_MSI(nvl->pci_dev); + if(nvl->irq_count) + NV_KFREE(nvl->irq_count, nvl->num_intr * sizeof(nv_irq_count_info_t)); + } + else if (nv->flags & NV_FLAG_USES_MSIX) + { + nv->flags &= ~NV_FLAG_USES_MSIX; + pci_disable_msix(nvl->pci_dev); + NV_KFREE(nvl->irq_count, nvl->num_intr*sizeof(nv_irq_count_info_t)); + NV_KFREE(nvl->msix_entries, nvl->num_intr*sizeof(struct msix_entry)); + } + + if (nvl->msix_bh_mutex) + { + os_free_mutex(nvl->msix_bh_mutex); + nvl->msix_bh_mutex = NULL; + } +#endif + + if (nv->queue && !(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + nv->queue = NULL; + nv_kthread_q_stop(&nvl->queue.nvk); + } + + if (kthread_init && !(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + nv_kthread_q_stop(&nvl->bottom_half_q); + + if (remove_numa_memory_kthread_init && + !(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + nv_kthread_q_stop(&nvl->remove_numa_memory_q); + } + + if (nvl->isr_bh_unlocked_mutex) + { + os_free_mutex(nvl->isr_bh_unlocked_mutex); + nvl->isr_bh_unlocked_mutex = NULL; + } + + if (nv->flags & NV_FLAG_TRIGGER_FLR) + { + if (nvl->pci_dev) + { + nv_printf(NV_DBG_INFO, "NVRM: Trigger FLR on Failure!\n"); + os_pci_trigger_flr((void *)nvl->pci_dev); + } + nv->flags &= ~NV_FLAG_TRIGGER_FLR; + } + + nv_dev_free_stacks(nvl); + + if (power_ref) + { + rm_unref_dynamic_power(sp, nv, NV_DYNAMIC_PM_COARSE); + } + + nv_put_rsync_info(); + + return rc; +} + +/* + * Makes sure the device is ready for operations and increases nvl->usage_count. + * Assumes nvl->ldata_lock is held. + */ +static int nv_open_device(nv_state_t *nv, nvidia_stack_t *sp) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int rc; + NV_STATUS status; + + if ((nv->flags & NV_FLAG_PCI_REMOVE_IN_PROGRESS) != 0) + { + return -ENODEV; + } + + if ((nv->flags & NV_FLAG_EXCLUDE) != 0) + { + char *uuid = rm_get_gpu_uuid(sp, nv); + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "open() not permitted for excluded %s\n", + (uuid != NULL) ? uuid : "GPU"); + if (uuid != NULL) + os_free_mem(uuid); + return -EPERM; + } + + if (os_is_vgx_hyper()) + { + /* fail open if GPU is being unbound */ + if (nv->flags & NV_FLAG_UNBIND_LOCK) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Open failed as GPU is locked for unbind operation\n"); + return -ENODEV; + } + } + + NV_DEV_PRINTF(NV_DBG_INFO, nv, "Opening GPU with minor number %d\n", + nvl->minor_num); + + status = nv_check_gpu_state(nv); + if (status == NV_ERR_GPU_IS_LOST) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "Device in removal process\n"); + return -ENODEV; + } + + if (unlikely(NV_ATOMIC_READ(nvl->usage_count) >= NV_S32_MAX)) + return -EMFILE; + + if ( ! (nv->flags & NV_FLAG_OPEN)) + { + /* Sanity check: !NV_FLAG_OPEN requires usage_count == 0 */ + if (NV_ATOMIC_READ(nvl->usage_count) != 0) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Minor device %u is referenced without being open!\n", + nvl->minor_num); + WARN_ON(1); + return -EBUSY; + } + + rc = nv_start_device(nv, sp); + if (rc != 0) + return rc; + } + else if (rm_is_device_sequestered(sp, nv)) + { + /* Do not increment the usage count of sequestered devices. */ + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Device is currently unavailable\n"); + return -EBUSY; + } + + nv_assert_not_in_gpu_exclusion_list(sp, nv); + + NV_ATOMIC_INC(nvl->usage_count); + return 0; +} + +static void nv_init_mapping_revocation(nv_linux_state_t *nvl, + struct file *file, + nv_linux_file_private_t *nvlfp, + struct inode *inode) +{ + /* Set up struct address_space for use with unmap_mapping_range() */ + address_space_init_once(&nvlfp->mapping); + nvlfp->mapping.host = inode; + nvlfp->mapping.a_ops = inode->i_mapping->a_ops; + file->f_mapping = &nvlfp->mapping; +} + +/* Adds nvlfp to list of open files for mapping revocation */ +static void nv_add_open_file(nv_linux_state_t *nvl, + nv_linux_file_private_t *nvlfp) +{ + nvlfp->nvptr = nvl; + + /* + * nvl->open_files and other mapping revocation members in nv_linux_state_t + * are protected by nvl->mmap_lock instead of nvl->ldata_lock. + */ + down(&nvl->mmap_lock); + list_add(&nvlfp->entry, &nvl->open_files); + up(&nvl->mmap_lock); +} + +/* + * Like nv_open_device but stores rc and adapter status in the given nvlfp. + * Assumes nvl->ldata_lock is held. + */ +static int nv_open_device_for_nvlfp( + nv_state_t *nv, + nvidia_stack_t *sp, + nv_linux_file_private_t *nvlfp +) +{ + nvlfp->open_rc = nv_open_device(nv, sp); + + if (nvlfp->open_rc == 0) + { + nvlfp->adapter_status = NV_OK; + } + else + { + nvlfp->adapter_status = rm_get_adapter_status_external(sp, nv); + } + + return nvlfp->open_rc; +} + +static void nvidia_open_deferred(void *nvlfp_raw) +{ + nv_linux_file_private_t *nvlfp = (nv_linux_file_private_t *) nvlfp_raw; + nv_linux_state_t *nvl = nvlfp->deferred_open_nvl; + int rc; + + /* + * Deferred opens and device removal are synchronized via + * nvl->is_accepting_opens and nvl->open_q flushes so that nvl is + * guaranteed to outlive any pending open operation. + * + * So, it is safe to take nvl->ldata_lock here without holding + * any refcount or larger lock. + * + * Deferred opens and system suspend are synchronized by an explicit + * nvl->open_q flush before suspending. + * + * So, it is safe to proceed without nv_system_pm_lock here (in fact, it + * must not be taken to ensure nvl->open_q can make forward progress). + */ + down(&nvl->ldata_lock); + rc = nv_open_device_for_nvlfp(NV_STATE_PTR(nvl), nvlfp->sp, nvlfp); + + /* Only add open file tracking where nvl->usage_count is incremented */ + if (rc == 0) + nv_add_open_file(nvl, nvlfp); + + up(&nvl->ldata_lock); + + complete_all(&nvlfp->open_complete); +} + +/* + * Tries to prepare (by taking nvl->ldata_lock) for an open in the foreground + * for the given file and device. + * + * This succeeds if: + * - O_NONBLOCK is not passed (or non-blocking opens are disabled), or + * - O_NONBLOCK is passed, but we are able to determine (without blocking) + * that the device is already initialized + * + * Returns 0 with nvl->ldata_lock taken if open can occur in the foreground. + * Otherwise, returns non-zero (without nvl->ldata_lock taken). + */ +static int nv_try_lock_foreground_open( + struct file *file, + nv_linux_state_t *nvl +) +{ + nv_state_t *nv = NV_STATE_PTR(nvl); + + if (NVreg_EnableNonblockingOpen && (file->f_flags & O_NONBLOCK)) + { + if (down_trylock(&nvl->ldata_lock) == 0) + { + if (nv->flags & NV_FLAG_OPEN) + { + /* device already initialized */ + return 0; + } + else + { + /* device not initialized yet */ + up(&nvl->ldata_lock); + return -EWOULDBLOCK; + } + } + else + { + /* unable to check nv->flags safely without blocking */ + return -EWOULDBLOCK; + } + } + + /* O_NONBLOCK not passed or non-blocking opens are disabled */ + down(&nvl->ldata_lock); + return 0; +} + +/* +** nvidia_open +** +** nv driver open entry point. Sessions are created here. +*/ +int +nvidia_open( + struct inode *inode, + struct file *file +) +{ + nv_state_t *nv = NULL; + nv_linux_state_t *nvl = NULL; + int rc = 0; + nv_linux_file_private_t *nvlfp = NULL; + nvidia_stack_t *sp = NULL; + + nv_printf(NV_DBG_INFO, "NVRM: nvidia_open...\n"); + + nvlfp = nv_alloc_file_private(); + if (nvlfp == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate file private!\n"); + return -ENOMEM; + } + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + nv_free_file_private(nvlfp); + return rc; + } + + NV_SET_FILE_PRIVATE(file, nvlfp); + nvlfp->sp = sp; + + /* for control device, just jump to its open routine */ + /* after setting up the private data */ + if (nv_is_control_device(inode)) + { + rc = nvidia_ctl_open(inode, file); + if (rc != 0) + goto failed; + return rc; + } + + rc = nv_down_read_interruptible(&nv_system_pm_lock); + if (rc < 0) + goto failed; + + /* nvptr will get set to actual nvl upon successful open */ + nvlfp->nvptr = NULL; + + init_completion(&nvlfp->open_complete); + + LOCK_NV_LINUX_DEVICES(); + + nvl = find_minor_locked(NV_DEVICE_MINOR_NUMBER(inode)); + if (nvl == NULL) + { + rc = -ENODEV; + UNLOCK_NV_LINUX_DEVICES(); + up_read(&nv_system_pm_lock); + goto failed; + } + + nv = NV_STATE_PTR(nvl); + nv_init_mapping_revocation(nvl, file, nvlfp, inode); + + if (nv_try_lock_foreground_open(file, nvl) == 0) + { + /* Proceed in foreground */ + /* nvl->ldata_lock is already taken at this point */ + + UNLOCK_NV_LINUX_DEVICES(); + + rc = nv_open_device_for_nvlfp(nv, nvlfp->sp, nvlfp); + + /* Only add open file tracking where nvl->usage_count is incremented */ + if (rc == 0) + nv_add_open_file(nvl, nvlfp); + + up(&nvl->ldata_lock); + + complete_all(&nvlfp->open_complete); + } + else + { + /* Defer to background kthread */ + int item_scheduled = 0; + + /* + * Take nvl->open_q_lock in order to check nvl->is_accepting_opens and + * schedule work items on nvl->open_q. + * + * Continue holding nv_linux_devices_lock (LOCK_NV_LINUX_DEVICES) + * until the work item gets onto nvl->open_q in order to ensure the + * lifetime of nvl. + */ + down(&nvl->open_q_lock); + + if (!nvl->is_accepting_opens) + { + /* Background kthread is not accepting opens, bail! */ + rc = -EBUSY; + goto nonblock_end; + } + + nvlfp->deferred_open_nvl = nvl; + nv_kthread_q_item_init(&nvlfp->open_q_item, + nvidia_open_deferred, + nvlfp); + + item_scheduled = nv_kthread_q_schedule_q_item( + &nvl->open_q, &nvlfp->open_q_item); + + if (!item_scheduled) + { + WARN_ON(!item_scheduled); + rc = -EBUSY; + } + +nonblock_end: + up(&nvl->open_q_lock); + UNLOCK_NV_LINUX_DEVICES(); + } + + up_read(&nv_system_pm_lock); +failed: + if (rc != 0) + { + if (nvlfp != NULL) + { + nv_free_file_private(nvlfp); + NV_SET_FILE_PRIVATE(file, NULL); + } + } + + return rc; +} + +void nv_shutdown_adapter(nvidia_stack_t *sp, + nv_state_t *nv, + nv_linux_state_t *nvl) +{ + rm_disable_adapter(sp, nv); + + // It's safe to call nv_kthread_q_stop even if queue is not initialized + nv_kthread_q_stop(&nvl->bottom_half_q); + + if (nv->queue != NULL) + { + nv->queue = NULL; + nv_kthread_q_stop(&nvl->queue.nvk); + } + + if (nvl->isr_bh_unlocked_mutex) + { + os_free_mutex(nvl->isr_bh_unlocked_mutex); + nvl->isr_bh_unlocked_mutex = NULL; + } + + if (!(nv->flags & NV_FLAG_USES_MSIX) && + !(nv->flags & NV_FLAG_SOC_DISPLAY)) + { + free_irq(nv->interrupt_line, (void *)nvl); + if (nv->flags & NV_FLAG_USES_MSI) + { + NV_PCI_DISABLE_MSI(nvl->pci_dev); + if(nvl->irq_count) + NV_KFREE(nvl->irq_count, nvl->num_intr * sizeof(nv_irq_count_info_t)); + } + } + else if (nv->flags & NV_FLAG_SOC_DISPLAY) + { + nv_soc_free_irqs(nv); + } +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) + else + { + nv_free_msix_irq(nvl); + pci_disable_msix(nvl->pci_dev); + nv->flags &= ~NV_FLAG_USES_MSIX; + NV_KFREE(nvl->msix_entries, nvl->num_intr*sizeof(struct msix_entry)); + NV_KFREE(nvl->irq_count, nvl->num_intr*sizeof(nv_irq_count_info_t)); + } +#endif + + if (nvl->msix_bh_mutex) + { + os_free_mutex(nvl->msix_bh_mutex); + nvl->msix_bh_mutex = NULL; + } + + rm_shutdown_adapter(sp, nv); + + if (nv->flags & NV_FLAG_TRIGGER_FLR) + { + if (nvl->pci_dev) + { + nv_printf(NV_DBG_INFO, "NVRM: Trigger FLR!\n"); + os_pci_trigger_flr((void *)nvl->pci_dev); + } + else + { + nv_printf(NV_DBG_ERRORS, "NVRM: FLR not supported by the device!\n"); + } + nv->flags &= ~NV_FLAG_TRIGGER_FLR; + } + + if (nv_platform_use_auto_online(nvl)) + nv_kthread_q_stop(&nvl->remove_numa_memory_q); +} + +/* + * Tears down the device on the last file close. Assumes nvl->ldata_lock is + * held. + */ +static void nv_stop_device(nv_state_t *nv, nvidia_stack_t *sp) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + static int persistence_mode_notice_logged; + + /* + * The GPU needs to be powered on to go through the teardown sequence. + * This balances the FINE unref at the end of nv_start_device(). + */ + rm_ref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE); + + /* Adapter is already shutdown as part of nvidia_pci_remove */ + if (!nv->removed) + { + if (nv->flags & NV_FLAG_PERSISTENT_SW_STATE) + { + rm_disable_adapter(sp, nv); + } + else + { + nv_acpi_unregister_notifier(nvl); + nv_shutdown_adapter(sp, nv, nvl); + } + } + + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + nv_dev_free_stacks(nvl); + } + + if ((nv->flags & NV_FLAG_PERSISTENT_SW_STATE) && + (!persistence_mode_notice_logged) && (!os_is_vgx_hyper())) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Persistence mode is deprecated and" + " will be removed in a future release. Please use" + " nvidia-persistenced instead.\n"); + persistence_mode_notice_logged = 1; + } + + /* leave INIT flag alone so we don't reinit every time */ + nv->flags &= ~NV_FLAG_OPEN; + + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + rm_unref_dynamic_power(sp, nv, NV_DYNAMIC_PM_COARSE); + } + else + { + /* If in legacy persistence mode, only unref FINE refcount. */ + rm_unref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE); + } + + nv_put_rsync_info(); +} + +/* + * Decreases nvl->usage_count, stopping the device when it reaches 0. Assumes + * nvl->ldata_lock is held. + */ +static void nv_close_device(nv_state_t *nv, nvidia_stack_t *sp) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (NV_ATOMIC_READ(nvl->usage_count) == 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Attempting to close unopened minor device %u!\n", + nvl->minor_num); + WARN_ON(1); + return; + } + + if (NV_ATOMIC_DEC_AND_TEST(nvl->usage_count)) + nv_stop_device(nv, sp); +} + +/* +** nvidia_close +** +** Primary driver close entry point. +*/ + +static void +nvidia_close_callback( + nv_linux_file_private_t *nvlfp +) +{ + nv_linux_state_t *nvl; + nv_state_t *nv; + nvidia_stack_t *sp = nvlfp->sp; + NvBool bRemove = NV_FALSE; + + nvl = nvlfp->nvptr; + if (nvl == NULL) + { + /* + * If nvlfp has no associated nvl device (meaning the open operation + * failed), then there is no state outside of nvlfp to cleanup. + */ + + nv_free_file_private(nvlfp); + nv_kmem_cache_free_stack(sp); + return; + } + + nv = NV_STATE_PTR(nvl); + + rm_cleanup_file_private(sp, nv, &nvlfp->nvfp); + + down(&nvl->mmap_lock); + list_del(&nvlfp->entry); + up(&nvl->mmap_lock); + + down(&nvl->ldata_lock); + nv_close_device(nv, sp); + + bRemove = (!NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv)) && + (NV_ATOMIC_READ(nvl->usage_count) == 0) && + rm_get_device_remove_flag(sp, nv->gpu_id); + + nv_free_file_private(nvlfp); + + /* + * In case of surprise removal of device, we have 2 cases as below: + * + * 1> When nvidia_pci_remove is scheduled prior to nvidia_close. + * nvidia_pci_remove will not destroy linux layer locks & nv linux state + * struct but will set variable nv->removed for nvidia_close. + * Once all the clients are closed, last nvidia_close will clean up linux + * layer locks and nv linux state struct. + * + * 2> When nvidia_close is scheduled prior to nvidia_pci_remove. + * This will be treated as normal working case. nvidia_close will not do + * any cleanup related to linux layer locks and nv linux state struct. + * nvidia_pci_remove when scheduled will do necessary cleanup. + */ + if ((NV_ATOMIC_READ(nvl->usage_count) == 0) && nv->removed) + { + nv_lock_destroy_locks(sp, nv); + NV_KFREE(nvl, sizeof(nv_linux_state_t)); + } + else + { + up(&nvl->ldata_lock); + + if (bRemove) + { + pci_stop_and_remove_bus_device(nvl->pci_dev); + } + } + + nv_kmem_cache_free_stack(sp); +} + +static void nvidia_close_deferred(void *data) +{ + nv_linux_file_private_t *nvlfp = data; + + nv_wait_open_complete(nvlfp); + + down_read(&nv_system_pm_lock); + + nvidia_close_callback(nvlfp); + + up_read(&nv_system_pm_lock); +} + +int +nvidia_close( + struct inode *inode, + struct file *file +) +{ + int rc; + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file); + + nv_printf(NV_DBG_INFO, + "NVRM: nvidia_close on GPU with minor number %d\n", + NV_DEVICE_MINOR_NUMBER(inode)); + + if (nv_is_control_device(inode)) + { + return nvidia_ctl_close(inode, file); + } + + NV_SET_FILE_PRIVATE(file, NULL); + + rc = nv_wait_open_complete_interruptible(nvlfp); + if (rc == 0) + { + rc = nv_down_read_interruptible(&nv_system_pm_lock); + } + + if (rc == 0) + { + nvidia_close_callback(nvlfp); + up_read(&nv_system_pm_lock); + } + else + { + nv_kthread_q_item_init(&nvlfp->deferred_close_q_item, + nvidia_close_deferred, + nvlfp); + rc = nv_kthread_q_schedule_q_item(&nv_deferred_close_kthread_q, + &nvlfp->deferred_close_q_item); + WARN_ON(rc == 0); + } + + return 0; +} + +unsigned int +nvidia_poll( + struct file *file, + poll_table *wait +) +{ + unsigned int mask = 0; + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file); + unsigned long eflags; + nv_linux_state_t *nvl; + nv_state_t *nv; + NV_STATUS status; + + if (!nv_is_control_device(NV_FILE_INODE(file))) + { + if (!nv_is_open_complete(nvlfp)) + { + return POLLERR; + } + } + + nvl = nvlfp->nvptr; + if (nvl == NULL) + { + return POLLERR; + } + + nv = NV_STATE_PTR(nvl); + + status = nv_check_gpu_state(nv); + if (status == NV_ERR_GPU_IS_LOST) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "GPU is lost, skipping nvidia_poll\n"); + return POLLHUP; + } + + if ((file->f_flags & O_NONBLOCK) == 0) + poll_wait(file, &nvlfp->waitqueue, wait); + + NV_SPIN_LOCK_IRQSAVE(&nvlfp->fp_lock, eflags); + + if ((nvlfp->event_data_head != NULL) || nvlfp->dataless_event_pending) + { + mask = (POLLPRI | POLLIN); + nvlfp->dataless_event_pending = NV_FALSE; + } + + NV_SPIN_UNLOCK_IRQRESTORE(&nvlfp->fp_lock, eflags); + + return mask; +} + +#define NV_CTL_DEVICE_ONLY(nv) \ +{ \ + if (((nv)->flags & NV_FLAG_CONTROL) == 0) \ + { \ + status = -EINVAL; \ + goto done; \ + } \ +} + +#define NV_ACTUAL_DEVICE_ONLY(nv) \ +{ \ + if (((nv)->flags & NV_FLAG_CONTROL) != 0) \ + { \ + status = -EINVAL; \ + goto done; \ + } \ +} + +/* + * Fills the ci array with the state of num_entries devices. Returns -EINVAL if + * num_entries isn't big enough to hold all available devices. + */ +static int nvidia_read_card_info(nv_ioctl_card_info_t *ci, size_t num_entries) +{ + nv_state_t *nv; + nv_linux_state_t *nvl; + size_t i = 0; + int rc = 0; + + /* Clear each card's flags field the lazy way */ + memset(ci, 0, num_entries * sizeof(ci[0])); + + LOCK_NV_LINUX_DEVICES(); + + if (num_entries < num_nv_devices) + { + rc = -EINVAL; + goto out; + } + + for (nvl = nv_linux_devices; nvl && i < num_entries; nvl = nvl->next) + { + nv = NV_STATE_PTR(nvl); + + /* We do not include excluded GPUs in the list... */ + if ((nv->flags & NV_FLAG_EXCLUDE) != 0) + continue; + + ci[i].valid = NV_TRUE; + ci[i].pci_info.domain = nv->pci_info.domain; + ci[i].pci_info.bus = nv->pci_info.bus; + ci[i].pci_info.slot = nv->pci_info.slot; + ci[i].pci_info.vendor_id = nv->pci_info.vendor_id; + ci[i].pci_info.device_id = nv->pci_info.device_id; + ci[i].gpu_id = nv->gpu_id; + ci[i].interrupt_line = nv->interrupt_line; + ci[i].reg_address = nv->regs->cpu_address; + ci[i].reg_size = nv->regs->size; + ci[i].minor_number = nvl->minor_num; + if (dev_is_pci(nvl->dev)) + { + ci[i].fb_address = nv->fb->cpu_address; + ci[i].fb_size = nv->fb->size; + } + i++; + } + +out: + UNLOCK_NV_LINUX_DEVICES(); + return rc; +} + +int +nvidia_ioctl( + struct inode *inode, + struct file *file, + unsigned int cmd, + unsigned long i_arg) +{ + NV_STATUS rmStatus; + int status = 0; + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file); + nv_linux_state_t *nvl; + nv_state_t *nv; + nvidia_stack_t *sp = NULL; + nv_ioctl_xfer_t ioc_xfer; + void *arg_ptr = (void *) i_arg; + void *arg_copy = NULL; + size_t arg_size = 0; + int arg_cmd; + + nv_printf(NV_DBG_INFO, "NVRM: ioctl(0x%x, 0x%x, 0x%x)\n", + _IOC_NR(cmd), (unsigned int) i_arg, _IOC_SIZE(cmd)); + + if (!nv_is_control_device(inode)) + { + status = nv_wait_open_complete_interruptible(nvlfp); + if (status != 0) + goto done_early; + } + + arg_size = _IOC_SIZE(cmd); + arg_cmd = _IOC_NR(cmd); + + if (arg_cmd == NV_ESC_IOCTL_XFER_CMD) + { + if (arg_size != sizeof(nv_ioctl_xfer_t)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: invalid ioctl XFER structure size!\n"); + status = -EINVAL; + goto done_early; + } + + if (NV_COPY_FROM_USER(&ioc_xfer, arg_ptr, sizeof(ioc_xfer))) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to copy in ioctl XFER data!\n"); + status = -EFAULT; + goto done_early; + } + + arg_cmd = ioc_xfer.cmd; + arg_size = ioc_xfer.size; + arg_ptr = NvP64_VALUE(ioc_xfer.ptr); + + if (arg_size > NV_ABSOLUTE_MAX_IOCTL_SIZE) + { + nv_printf(NV_DBG_ERRORS, "NVRM: invalid ioctl XFER size!\n"); + status = -EINVAL; + goto done_early; + } + } + + NV_KMALLOC(arg_copy, arg_size); + if (arg_copy == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate ioctl memory\n"); + status = -ENOMEM; + goto done_early; + } + + if (NV_COPY_FROM_USER(arg_copy, arg_ptr, arg_size)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to copy in ioctl data!\n"); + status = -EFAULT; + goto done_early; + } + + /* + * Handle NV_ESC_WAIT_OPEN_COMPLETE early as it is allowed to work + * with or without nvl. + */ + if (arg_cmd == NV_ESC_WAIT_OPEN_COMPLETE) + { + nv_ioctl_wait_open_complete_t *params = arg_copy; + params->rc = nvlfp->open_rc; + params->adapterStatus = nvlfp->adapter_status; + goto done_early; + } + + nvl = nvlfp->nvptr; + if (nvl == NULL) + { + status = -EIO; + goto done_early; + } + + nv = NV_STATE_PTR(nvl); + + status = nv_down_read_interruptible(&nv_system_pm_lock); + if (status < 0) + { + goto done_early; + } + + status = nv_kmem_cache_alloc_stack(&sp); + if (status != 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Unable to allocate altstack for ioctl\n"); + goto done_pm_unlock; + } + + rmStatus = nv_check_gpu_state(nv); + if (rmStatus == NV_ERR_GPU_IS_LOST) + { + nv_printf(NV_DBG_INFO, "NVRM: GPU is lost, skipping nvidia_ioctl\n"); + status = -EINVAL; + goto done; + } + + switch (arg_cmd) + { + case NV_ESC_QUERY_DEVICE_INTR: + { + nv_ioctl_query_device_intr *query_intr = arg_copy; + + NV_ACTUAL_DEVICE_ONLY(nv); + + if ((arg_size < sizeof(*query_intr)) || + (!nv->regs->map)) + { + status = -EINVAL; + goto done; + } + + query_intr->intrStatus = + *(nv->regs->map + (NV_RM_DEVICE_INTR_ADDRESS >> 2)); + query_intr->status = NV_OK; + break; + } + + /* pass out info about the card */ + case NV_ESC_CARD_INFO: + { + size_t num_arg_devices = arg_size / sizeof(nv_ioctl_card_info_t); + + NV_CTL_DEVICE_ONLY(nv); + + status = nvidia_read_card_info(arg_copy, num_arg_devices); + break; + } + + case NV_ESC_ATTACH_GPUS_TO_FD: + { + size_t num_arg_gpus = arg_size / sizeof(NvU32); + size_t i; + + NV_CTL_DEVICE_ONLY(nv); + + if (num_arg_gpus == 0 || nvlfp->num_attached_gpus != 0 || + arg_size % sizeof(NvU32) != 0) + { + status = -EINVAL; + goto done; + } + + NV_KMALLOC(nvlfp->attached_gpus, arg_size); + if (nvlfp->attached_gpus == NULL) + { + status = -ENOMEM; + goto done; + } + memcpy(nvlfp->attached_gpus, arg_copy, arg_size); + nvlfp->num_attached_gpus = num_arg_gpus; + + for (i = 0; i < nvlfp->num_attached_gpus; i++) + { + if (nvlfp->attached_gpus[i] == 0) + { + continue; + } + + if (nvidia_dev_get(nvlfp->attached_gpus[i], sp)) + { + while (i--) + { + if (nvlfp->attached_gpus[i] != 0) + nvidia_dev_put(nvlfp->attached_gpus[i], sp); + } + NV_KFREE(nvlfp->attached_gpus, arg_size); + nvlfp->num_attached_gpus = 0; + + status = -EINVAL; + break; + } + } + + break; + } + + case NV_ESC_CHECK_VERSION_STR: + { + NV_CTL_DEVICE_ONLY(nv); + + rmStatus = rm_perform_version_check(sp, arg_copy, arg_size); + status = ((rmStatus == NV_OK) ? 0 : -EINVAL); + break; + } + + case NV_ESC_SYS_PARAMS: + { + nv_ioctl_sys_params_t *api = arg_copy; + + NV_CTL_DEVICE_ONLY(nv); + + if (arg_size != sizeof(nv_ioctl_sys_params_t)) + { + status = -EINVAL; + goto done; + } + + /* numa_memblock_size should only be set once */ + if (nvl->numa_memblock_size == 0) + { + nvl->numa_memblock_size = api->memblock_size; + } + else + { + status = (nvl->numa_memblock_size == api->memblock_size) ? + 0 : -EBUSY; + goto done; + } + break; + } + + case NV_ESC_NUMA_INFO: + { + nv_ioctl_numa_info_t *api = arg_copy; + rmStatus = NV_OK; + + NV_ACTUAL_DEVICE_ONLY(nv); + + if (arg_size != sizeof(nv_ioctl_numa_info_t)) + { + status = -EINVAL; + goto done; + } + + rmStatus = rm_get_gpu_numa_info(sp, nv, api); + if (rmStatus != NV_OK) + { + status = -EBUSY; + goto done; + } + + api->status = nv_get_numa_status(nvl); + api->use_auto_online = nv_platform_use_auto_online(nvl); + api->memblock_size = nv_ctl_device.numa_memblock_size; + break; + } + + case NV_ESC_SET_NUMA_STATUS: + { + nv_ioctl_set_numa_status_t *api = arg_copy; + rmStatus = NV_OK; + + if (!NV_IS_SUSER()) + { + status = -EACCES; + goto done; + } + + NV_ACTUAL_DEVICE_ONLY(nv); + + if (arg_size != sizeof(nv_ioctl_set_numa_status_t)) + { + status = -EINVAL; + goto done; + } + + /* + * The nv_linux_state_t for the device needs to be locked + * in order to prevent additional open()/close() calls from + * manipulating the usage count for the device while we + * determine if NUMA state can be changed. + */ + down(&nvl->ldata_lock); + + if (nv_get_numa_status(nvl) != api->status) + { + if (api->status == NV_IOCTL_NUMA_STATUS_OFFLINE_IN_PROGRESS) + { + /* + * Only the current client should have an open file + * descriptor for the device, to allow safe offlining. + */ + if (NV_ATOMIC_READ(nvl->usage_count) > 1) + { + status = -EBUSY; + goto unlock; + } + else + { + /* + * If this call fails, it indicates that RM + * is not ready to offline memory, and we should keep + * the current NUMA status of ONLINE. + */ + rmStatus = rm_gpu_numa_offline(sp, nv); + if (rmStatus != NV_OK) + { + status = -EBUSY; + goto unlock; + } + } + } + + status = nv_set_numa_status(nvl, api->status); + if (status < 0) + { + if (api->status == NV_IOCTL_NUMA_STATUS_OFFLINE_IN_PROGRESS) + (void) rm_gpu_numa_online(sp, nv); + goto unlock; + } + + if (api->status == NV_IOCTL_NUMA_STATUS_ONLINE) + { + rmStatus = rm_gpu_numa_online(sp, nv); + if (rmStatus != NV_OK) + { + status = -EBUSY; + goto unlock; + } + } + } + +unlock: + up(&nvl->ldata_lock); + + break; + } + + case NV_ESC_EXPORT_TO_DMABUF_FD: + { + nv_ioctl_export_to_dma_buf_fd_t *params = arg_copy; + + if (arg_size != sizeof(nv_ioctl_export_to_dma_buf_fd_t)) + { + status = -EINVAL; + goto done; + } + + NV_ACTUAL_DEVICE_ONLY(nv); + + params->status = nv_dma_buf_export(nv, params); + + break; + } + + default: + rmStatus = rm_ioctl(sp, nv, &nvlfp->nvfp, arg_cmd, arg_copy, arg_size); + status = ((rmStatus == NV_OK) ? 0 : -EINVAL); + break; + } + +done: + nv_kmem_cache_free_stack(sp); + +done_pm_unlock: + up_read(&nv_system_pm_lock); + +done_early: + if (arg_copy != NULL) + { + if (status != -EFAULT) + { + if (NV_COPY_TO_USER(arg_ptr, arg_copy, arg_size)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to copy out ioctl data\n"); + status = -EFAULT; + } + } + NV_KFREE(arg_copy, arg_size); + } + + return status; +} + +long nvidia_unlocked_ioctl( + struct file *file, + unsigned int cmd, + unsigned long i_arg +) +{ + return nvidia_ioctl(NV_FILE_INODE(file), file, cmd, i_arg); +} + +irqreturn_t +nvidia_isr_msix( + int irq, + void *arg +) +{ + irqreturn_t ret; + nv_linux_state_t *nvl = (void *) arg; + + // nvidia_isr_msix() is called for each of the MSI-X vectors and they can + // run in parallel on different CPUs (cores), but this is not currently + // supported by nvidia_isr() and its children. As a big hammer fix just + // spinlock around the nvidia_isr() call to serialize them. + // + // At this point interrupts are disabled on the CPU running our ISR (see + // comments for nv_default_irq_flags()) so a plain spinlock is enough. + NV_SPIN_LOCK(&nvl->msix_isr_lock); + + ret = nvidia_isr(irq, arg); + + NV_SPIN_UNLOCK(&nvl->msix_isr_lock); + + return ret; +} + +/* + * driver receives an interrupt + * if someone waiting, then hand it off. + */ +irqreturn_t +nvidia_isr( + int irq, + void *arg +) +{ + nv_linux_state_t *nvl = (void *) arg; + nv_state_t *nv = NV_STATE_PTR(nvl); + NvU32 need_to_run_bottom_half_gpu_lock_held = 0; + NvBool rm_handled = NV_FALSE, uvm_handled = NV_FALSE, rm_fault_handling_needed = NV_FALSE; + NvU32 rm_serviceable_fault_cnt = 0; + NvU32 sec, usec; + NvU16 index = 0; + NvU64 currentTime = 0; + NvBool found_irq = NV_FALSE; + + rm_gpu_handle_mmu_faults(nvl->sp[NV_DEV_STACK_ISR], nv, &rm_serviceable_fault_cnt); + rm_fault_handling_needed = (rm_serviceable_fault_cnt != 0); + +#if defined (NV_UVM_ENABLE) + // + // Returns NV_OK if the UVM driver handled the interrupt + // + // Returns NV_ERR_NO_INTR_PENDING if the interrupt is not for + // the UVM driver. + // + // Returns NV_WARN_MORE_PROCESSING_REQUIRED if the UVM top-half ISR was + // unable to get its lock(s), due to other (UVM) threads holding them. + // + // RM can normally treat NV_WARN_MORE_PROCESSING_REQUIRED the same as + // NV_ERR_NO_INTR_PENDING, but in some cases the extra information may + // be helpful. + // + if (nv_uvm_event_interrupt(nv_get_cached_uuid(nv)) == NV_OK) + uvm_handled = NV_TRUE; +#endif + + rm_handled = rm_isr(nvl->sp[NV_DEV_STACK_ISR], nv, + &need_to_run_bottom_half_gpu_lock_held); + + /* Replicating the logic in linux kernel to track unhandled interrupt crossing a threshold */ + if ((nv->flags & NV_FLAG_USES_MSI) || (nv->flags & NV_FLAG_USES_MSIX)) + { + if (nvl->irq_count != NULL) + { + for (index = 0; index < nvl->current_num_irq_tracked; index++) + { + if (nvl->irq_count[index].irq == irq) + { + found_irq = NV_TRUE; + break; + } + + found_irq = NV_FALSE; + } + + if (!found_irq && nvl->current_num_irq_tracked < nvl->num_intr) + { + index = nvl->current_num_irq_tracked; + nvl->irq_count[index].irq = irq; + nvl->current_num_irq_tracked++; + found_irq = NV_TRUE; + } + + if (found_irq) + { + nvl->irq_count[index].total++; + + if(rm_handled == NV_FALSE) + { + os_get_system_time(&sec, &usec); + currentTime = ((NvU64)sec) * 1000000 + (NvU64)usec; + + /* Reset unhandled count if it's been more than 0.1 seconds since the last unhandled IRQ */ + if ((currentTime - nvl->irq_count[index].last_unhandled) > RM_UNHANDLED_TIMEOUT_US) + nvl->irq_count[index].unhandled = 1; + else + nvl->irq_count[index].unhandled++; + + nvl->irq_count[index].last_unhandled = currentTime; + rm_handled = NV_TRUE; + } + + if (nvl->irq_count[index].total >= RM_THRESHOLD_TOTAL_IRQ_COUNT) + { + if (nvl->irq_count[index].unhandled > RM_THRESHOLD_UNAHNDLED_IRQ_COUNT) + nv_printf(NV_DBG_ERRORS,"NVRM: Going over RM unhandled interrupt threshold for irq %d\n", irq); + + nvl->irq_count[index].total = 0; + nvl->irq_count[index].unhandled = 0; + nvl->irq_count[index].last_unhandled = 0; + } + } + else + nv_printf(NV_DBG_ERRORS,"NVRM: IRQ number out of valid range\n"); + } + } + + if (need_to_run_bottom_half_gpu_lock_held) + { + return IRQ_WAKE_THREAD; + } + else + { + // + // If rm_isr does not need to run a bottom half and mmu_faults_copied + // indicates that bottom half is needed, then we enqueue a kthread based + // bottom half, as this specific bottom_half will acquire the GPU lock + // + if (rm_fault_handling_needed) + nv_kthread_q_schedule_q_item(&nvl->bottom_half_q, &nvl->bottom_half_q_item); + } + + return IRQ_RETVAL(rm_handled || uvm_handled || rm_fault_handling_needed); +} + +irqreturn_t +nvidia_isr_kthread_bh( + int irq, + void *data +) +{ + return nvidia_isr_common_bh(data); +} + +irqreturn_t +nvidia_isr_msix_kthread_bh( + int irq, + void *data +) +{ + NV_STATUS status; + irqreturn_t ret; + nv_state_t *nv = (nv_state_t *) data; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + // + // Synchronize kthreads servicing bottom halves for different MSI-X vectors + // as they share same pre-allocated alt-stack. + // + status = os_acquire_mutex(nvl->msix_bh_mutex); + // os_acquire_mutex can only fail if we cannot sleep and we can + WARN_ON(status != NV_OK); + + ret = nvidia_isr_common_bh(data); + + os_release_mutex(nvl->msix_bh_mutex); + + return ret; +} + +static irqreturn_t +nvidia_isr_common_bh( + void *data +) +{ + nv_state_t *nv = (nv_state_t *) data; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + nvidia_stack_t *sp = nvl->sp[NV_DEV_STACK_ISR_BH]; + NV_STATUS status; + + status = nv_check_gpu_state(nv); + if (status == NV_ERR_GPU_IS_LOST) + { + nv_printf(NV_DBG_INFO, "NVRM: GPU is lost, skipping ISR bottom half\n"); + } + else + { + rm_isr_bh(sp, nv); + } + + return IRQ_HANDLED; +} + +static void +nvidia_isr_bh_unlocked( + void * args +) +{ + nv_state_t *nv = (nv_state_t *) args; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + nvidia_stack_t *sp; + NV_STATUS status; + + // + // Synchronize kthreads servicing unlocked bottom half as they + // share same pre-allocated stack for alt-stack + // + status = os_acquire_mutex(nvl->isr_bh_unlocked_mutex); + if (status != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: %s: Unable to take bottom_half mutex!\n", + __FUNCTION__); + WARN_ON(1); + } + + sp = nvl->sp[NV_DEV_STACK_ISR_BH_UNLOCKED]; + + status = nv_check_gpu_state(nv); + if (status == NV_ERR_GPU_IS_LOST) + { + nv_printf(NV_DBG_INFO, + "NVRM: GPU is lost, skipping unlocked ISR bottom half\n"); + } + else + { + rm_isr_bh_unlocked(sp, nv); + } + + os_release_mutex(nvl->isr_bh_unlocked_mutex); +} + +static void +nvidia_rc_timer_callback( + struct nv_timer *nv_timer +) +{ + nv_linux_state_t *nvl = container_of(nv_timer, nv_linux_state_t, rc_timer); + nv_state_t *nv = NV_STATE_PTR(nvl); + nvidia_stack_t *sp = nvl->sp[NV_DEV_STACK_TIMER]; + NV_STATUS status; + + status = nv_check_gpu_state(nv); + if (status == NV_ERR_GPU_IS_LOST) + { + nv_printf(NV_DBG_INFO, + "NVRM: GPU is lost, skipping device timer callbacks\n"); + return; + } + + if (rm_run_rc_callback(sp, nv) == NV_OK) + { + // set another timeout 1 sec in the future: + mod_timer(&nvl->rc_timer.kernel_timer, jiffies + HZ); + } +} + +/* +** nvidia_ctl_open +** +** nv control driver open entry point. Sessions are created here. +*/ +static int +nvidia_ctl_open( + struct inode *inode, + struct file *file +) +{ + nv_linux_state_t *nvl = &nv_ctl_device; + nv_state_t *nv = NV_STATE_PTR(nvl); + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file); + + nv_printf(NV_DBG_INFO, "NVRM: nvidia_ctl_open\n"); + + down(&nvl->ldata_lock); + + /* save the nv away in file->private_data */ + nvlfp->nvptr = nvl; + + if (NV_ATOMIC_READ(nvl->usage_count) == 0) + { + nv->flags |= (NV_FLAG_OPEN | NV_FLAG_CONTROL); + } + + NV_ATOMIC_INC(nvl->usage_count); + up(&nvl->ldata_lock); + + return 0; +} + + +/* +** nvidia_ctl_close +*/ +static int +nvidia_ctl_close( + struct inode *inode, + struct file *file +) +{ + nv_alloc_t *at, *next; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_FILEP(file); + nv_state_t *nv = NV_STATE_PTR(nvl); + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file); + nvidia_stack_t *sp = nvlfp->sp; + + nv_printf(NV_DBG_INFO, "NVRM: nvidia_ctl_close\n"); + + down(&nvl->ldata_lock); + if (NV_ATOMIC_DEC_AND_TEST(nvl->usage_count)) + { + nv->flags &= ~NV_FLAG_OPEN; + } + up(&nvl->ldata_lock); + + rm_cleanup_file_private(sp, nv, &nvlfp->nvfp); + + if (nvlfp->free_list != NULL) + { + at = nvlfp->free_list; + while (at != NULL) + { + next = at->next; + if (at->pid == os_get_current_process()) + NV_PRINT_AT(NV_DBG_MEMINFO, at); + nv_free_pages(nv, at->num_pages, + at->flags.contig, + at->cache_type, + (void *)at); + at = next; + } + } + + if (nvlfp->num_attached_gpus != 0) + { + size_t i; + + for (i = 0; i < nvlfp->num_attached_gpus; i++) + { + if (nvlfp->attached_gpus[i] != 0) + nvidia_dev_put(nvlfp->attached_gpus[i], sp); + } + + NV_KFREE(nvlfp->attached_gpus, sizeof(NvU32) * nvlfp->num_attached_gpus); + nvlfp->num_attached_gpus = 0; + } + + nv_free_file_private(nvlfp); + NV_SET_FILE_PRIVATE(file, NULL); + + nv_kmem_cache_free_stack(sp); + + return 0; +} + + +void NV_API_CALL +nv_set_dma_address_size( + nv_state_t *nv, + NvU32 phys_addr_bits +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvU64 new_mask = (((NvU64)1) << phys_addr_bits) - 1; + + nvl->dma_dev.addressable_range.limit = new_mask; + + if (!nvl->tce_bypass_enabled) + { + dma_set_mask(&nvl->pci_dev->dev, new_mask); + /* Certain kernels have a bug which causes pci_set_consistent_dma_mask + * to call GPL sme_active symbol, this bug has already been fixed in a + * minor release update but detect the failure scenario here to prevent + * an installation regression */ +#if !NV_IS_EXPORT_SYMBOL_GPL_sme_active + dma_set_coherent_mask(&nvl->pci_dev->dev, new_mask); +#endif + } +} + +static NvUPtr +nv_map_guest_pages(nv_alloc_t *at, + NvU64 address, + NvU32 page_count, + NvU32 page_idx) +{ + struct page **pages; + NvU32 j; + NvUPtr virt_addr; + + NV_KMALLOC(pages, sizeof(struct page *) * page_count); + if (pages == NULL) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to allocate vmap() page descriptor table!\n"); + return 0; + } + + for (j = 0; j < page_count; j++) + { + pages[j] = NV_GET_PAGE_STRUCT(at->page_table[page_idx+j].phys_addr); + } + + virt_addr = nv_vm_map_pages(pages, page_count, + at->cache_type == NV_MEMORY_CACHED, at->flags.unencrypted); + NV_KFREE(pages, sizeof(struct page *) * page_count); + + return virt_addr; +} + +NV_STATUS NV_API_CALL +nv_alias_pages( + nv_state_t *nv, + NvU32 page_cnt, + NvU64 page_size, + NvU32 contiguous, + NvU32 cache_type, + NvU64 guest_id, + NvU64 *pte_array, + NvBool carveout, + void **priv_data +) +{ + nv_alloc_t *at; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvU32 i=0; + nvidia_pte_t *page_ptr = NULL; + + at = nvos_create_alloc(nvl->dev, page_cnt); + + if (at == NULL) + { + return NV_ERR_NO_MEMORY; + } + + at->cache_type = cache_type; + if (contiguous) + { + at->flags.contig = NV_TRUE; + at->order = get_order(at->num_pages * PAGE_SIZE); + } + else + { + at->order = get_order(page_size); + } +#if defined(NVCPU_AARCH64) + if (at->cache_type != NV_MEMORY_CACHED) + at->flags.aliased = NV_TRUE; +#endif + + at->flags.guest = NV_TRUE; + at->flags.carveout = carveout; + + for (i=0; i < at->num_pages; ++i) + { + page_ptr = &at->page_table[i]; + + if (contiguous && i>0) + { + page_ptr->phys_addr = pte_array[0] + (i << PAGE_SHIFT); + } + else + { + page_ptr->phys_addr = pte_array[i]; + } + + /* aliased pages will be mapped on demand. */ + page_ptr->virt_addr = 0x0; + } + + at->guest_id = guest_id; + *priv_data = at; + NV_ATOMIC_INC(at->usage_count); + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + return NV_OK; +} + +/* + * This creates a dummy nv_alloc_t for peer IO mem, so that it can + * be mapped using NvRmMapMemory. + */ +NV_STATUS NV_API_CALL nv_register_peer_io_mem( + nv_state_t *nv, + NvU64 *phys_addr, + NvU64 page_count, + void **priv_data +) +{ + nv_alloc_t *at; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvU64 i; + NvU64 addr; + + at = nvos_create_alloc(nvl->dev, page_count); + + if (at == NULL) + return NV_ERR_NO_MEMORY; + + // IO regions should be uncached and contiguous + at->cache_type = NV_MEMORY_UNCACHED; + at->flags.contig = NV_TRUE; +#if defined(NVCPU_AARCH64) + at->flags.aliased = NV_TRUE; +#endif + at->flags.peer_io = NV_TRUE; + + at->order = get_order(at->num_pages * PAGE_SIZE); + + addr = phys_addr[0]; + + for (i = 0; i < page_count; i++) + { + at->page_table[i].phys_addr = addr; + addr += PAGE_SIZE; + } + + // No struct page array exists for this memory. + at->user_pages = NULL; + + *priv_data = at; + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + return NV_OK; +} + +void NV_API_CALL nv_unregister_peer_io_mem( + nv_state_t *nv, + void *priv_data +) +{ + nv_alloc_t *at = priv_data; + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + nvos_free_alloc(at); +} + +/* + * By registering user pages, we create a dummy nv_alloc_t for it, so that the + * rest of the RM can treat it like any other alloc. + * + * This also converts the page array to an array of physical addresses. + */ +NV_STATUS NV_API_CALL nv_register_user_pages( + nv_state_t *nv, + NvU64 page_count, + NvU64 *phys_addr, + void *import_priv, + void **priv_data, + NvBool unencrypted +) +{ + nv_alloc_t *at; + NvU64 i; + struct page **user_pages; + nv_linux_state_t *nvl; + + nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_register_user_pages: 0x%" NvU64_fmtx"\n", page_count); + user_pages = *priv_data; + nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + at = nvos_create_alloc(nvl->dev, page_count); + + if (at == NULL) + { + return NV_ERR_NO_MEMORY; + } + + /* + * Anonymous memory currently must be write-back cacheable, and we can't + * enforce contiguity. + */ + at->cache_type = NV_MEMORY_UNCACHED; +#if defined(NVCPU_AARCH64) + at->flags.aliased = NV_TRUE; +#endif + + at->flags.user = NV_TRUE; + + if (unencrypted) + at->flags.unencrypted = NV_TRUE; + + at->order = get_order(at->num_pages * PAGE_SIZE); + + for (i = 0; i < page_count; i++) + { + /* + * We only assign the physical address and not the DMA address, since + * this allocation hasn't been DMA-mapped yet. + */ + at->page_table[i].phys_addr = phys_addr[i] = page_to_phys(user_pages[i]); + } + + /* Save off the user pages array to be restored later */ + at->user_pages = user_pages; + + /* Save off the import private data to be returned later */ + if (import_priv != NULL) + { + at->import_priv = import_priv; + } + + *priv_data = at; + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + return NV_OK; +} + +void NV_API_CALL nv_unregister_user_pages( + nv_state_t *nv, + NvU64 page_count, + void **import_priv, + void **priv_data +) +{ + nv_alloc_t *at = *priv_data; + + nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_unregister_user_pages: 0x%" NvU64_fmtx "\n", page_count); + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + WARN_ON(!at->flags.user); + + /* Restore the user pages array for the caller to handle */ + *priv_data = at->user_pages; + + /* Return the import private data for the caller to handle */ + if (import_priv != NULL) + { + *import_priv = at->import_priv; + } + + nvos_free_alloc(at); +} + +/* + * This creates a dummy nv_alloc_t for existing physical allocations, so + * that it can be mapped using NvRmMapMemory and BAR2 code path. + */ +NV_STATUS NV_API_CALL nv_register_phys_pages( + nv_state_t *nv, + NvU64 *phys_addr, + NvU64 page_count, + NvU32 cache_type, + void **priv_data +) +{ + nv_alloc_t *at; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvU64 i; + + at = nvos_create_alloc(nvl->dev, page_count); + + if (at == NULL) + return NV_ERR_NO_MEMORY; + /* + * Setting memory flags to cacheable and discontiguous. + */ + at->cache_type = cache_type; + + /* + * Only physical address is available so we don't try to reuse existing + * mappings + */ + at->flags.physical = NV_TRUE; + + at->order = get_order(at->num_pages * PAGE_SIZE); + + for (i = 0; i < page_count; i++) + { + at->page_table[i].phys_addr = phys_addr[i]; + } + + at->user_pages = NULL; + *priv_data = at; + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + return NV_OK; +} + +NV_STATUS NV_API_CALL nv_register_sgt( + nv_state_t *nv, + NvU64 *phys_addr, + NvU64 page_count, + NvU32 cache_type, + void **priv_data, + struct sg_table *import_sgt, + void *import_priv, + NvBool is_peer_mmio +) +{ + nv_alloc_t *at; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + unsigned int i, j = 0; + NvU64 sg_addr, sg_off, sg_len; + struct scatterlist *sg; + + at = nvos_create_alloc(nvl->dev, page_count); + + if (at == NULL) + return NV_ERR_NO_MEMORY; + + // + // TODO: When ISO SMMU is not present, dma mapping of imported ISO memory + // causes crash during __clean_dcache_area_poc. dma mapping of ISO + // memory allocated by RM (via __get_free_pages) still works. + // Skip dma mapping of imported ISO memory to unblock Tegra Display in + // AV+L. Bug 200765629 and 3396656. + // RM will not allow CPU mapping support for DMA addrs (IOVA) based SGTs. + // + /* For DMA addrs (IOVA) based SGT */ + if (!sg_page(import_sgt->sgl) || + NV_IS_SOC_DISPLAY_DEVICE(nv)) + { + /* Populate phys addrs with DMA addrs from SGT */ + for_each_sg(import_sgt->sgl, sg, import_sgt->nents, i) + { + /* + * It is possible for dma_map_sg() to merge scatterlist entries, so + * make sure we account for that here. + */ + for (sg_addr = sg_dma_address(sg), sg_len = sg_dma_len(sg), sg_off = 0; + (sg_off < sg_len) && (j < page_count); + sg_off += PAGE_SIZE, j++) + { + phys_addr[j] = sg_addr + sg_off; + } + } + } + else + { + /* Populate phys addrs from SGT */ + for_each_sg(import_sgt->sgl, sg, import_sgt->orig_nents, i) + { + if (WARN_ON(sg->offset != 0)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: RM is not supporting sg->offset != 0 use case now.!\n"); + nvos_free_alloc(at); + return NV_ERR_NOT_SUPPORTED; + } + + /* + * Store the phys_addr instead of dma_address. + * Use sg_phys() instead of sg_dma_address(). + */ + for ((sg_addr = sg_phys(sg), sg_len = sg->length, sg_off = 0); + ((sg_off < sg_len) && (j < page_count)); + (sg_off += PAGE_SIZE, j++)) + { + phys_addr[j] = sg_addr + sg_off; + at->page_table[j].phys_addr = phys_addr[j]; + } + } + WARN_ON(j != page_count); + + // Setting memory flags to io and contiguous. + at->flags.peer_io = is_peer_mmio; + if (import_sgt->orig_nents == 1) + { + at->flags.contig = NV_TRUE; + } + } + + /* + * Setting memory flags to cacheable. + */ + at->cache_type = cache_type; + + at->import_sgt = import_sgt; + + /* Save off the import private data to be returned later */ + if (import_priv != NULL) + { + at->import_priv = import_priv; + } + + at->order = get_order(at->num_pages * PAGE_SIZE); + + NV_ATOMIC_INC(at->usage_count); + + *priv_data = at; + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + return NV_OK; +} + +void NV_API_CALL nv_unregister_sgt( + nv_state_t *nv, + struct sg_table **import_sgt, + void **import_priv, + void *priv_data +) +{ + nv_alloc_t *at = priv_data; + + nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_unregister_sgt\n"); + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + /* Restore the imported SGT for the caller to handle */ + *import_sgt = at->import_sgt; + + /* Return the import private data for the caller to handle */ + if (import_priv != NULL) + { + *import_priv = at->import_priv; + } + + if (NV_ATOMIC_DEC_AND_TEST(at->usage_count)) + { + nvos_free_alloc(at); + } +} + +void NV_API_CALL nv_unregister_phys_pages( + nv_state_t *nv, + void *priv_data +) +{ + nv_alloc_t *at = priv_data; + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + nvos_free_alloc(at); +} + +NV_STATUS NV_API_CALL nv_get_num_phys_pages( + void *pAllocPrivate, + NvU32 *pNumPages +) +{ + nv_alloc_t *at = pAllocPrivate; + + if (!pNumPages) { + return NV_ERR_INVALID_ARGUMENT; + } + + *pNumPages = at->num_pages; + + return NV_OK; +} + +NV_STATUS NV_API_CALL nv_get_phys_pages( + void *pAllocPrivate, + void *pPages, + NvU32 *pNumPages +) +{ + nv_alloc_t *at = pAllocPrivate; + struct page **pages = (struct page **)pPages; + NvU32 page_count; + int i; + + if (!pNumPages || !pPages) { + return NV_ERR_INVALID_ARGUMENT; + } + + page_count = NV_MIN(*pNumPages, at->num_pages); + + for (i = 0; i < page_count; i++) { + pages[i] = NV_GET_PAGE_STRUCT(at->page_table[i].phys_addr); + } + + *pNumPages = page_count; + + return NV_OK; +} + +void nv_get_disp_smmu_stream_ids +( + nv_state_t *nv, + NvU32 *dispIsoStreamId, + NvU32 *dispNisoStreamId) +{ + *dispIsoStreamId = nv->iommus.dispIsoStreamId; + *dispNisoStreamId = nv->iommus.dispNisoStreamId; +} + +void* NV_API_CALL nv_alloc_kernel_mapping( + nv_state_t *nv, + void *pAllocPrivate, + NvU64 pageIndex, + NvU32 pageOffset, + NvU64 size, + void **pPrivate +) +{ + nv_alloc_t *at = pAllocPrivate; + NvU32 j, page_count; + NvUPtr virt_addr; + struct page **pages; + NvBool isUserAllocatedMem; + + // + // For User allocated memory (like ErrorNotifier's) which is NOT allocated + // nor owned by RM, the RM driver just stores the physical address + // corresponding to that memory and does not map it until required. + // In that case, in page tables the virt_addr == 0, so first we need to map + // those pages to obtain virtual address. + // + isUserAllocatedMem = at->flags.user && + !at->page_table[pageIndex].virt_addr && + at->page_table[pageIndex].phys_addr; + + // + // User memory may NOT have kernel VA. So check this and fallback to else + // case to create one. + // + if (((size + pageOffset) <= PAGE_SIZE) && + !at->flags.guest && !at->flags.aliased && + !isUserAllocatedMem && !at->flags.physical && + !at->import_sgt) + { + *pPrivate = NULL; + return (void *)(at->page_table[pageIndex].virt_addr + pageOffset); + } + else + { + size += pageOffset; + page_count = (size >> PAGE_SHIFT) + ((size & ~NV_PAGE_MASK) ? 1 : 0); + + if (at->flags.guest) + { + virt_addr = nv_map_guest_pages(at, + nv->bars[NV_GPU_BAR_INDEX_REGS].cpu_address, + page_count, pageIndex); + } + else + { + NV_KMALLOC(pages, sizeof(struct page *) * page_count); + if (pages == NULL) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to allocate vmap() page descriptor table!\n"); + return NULL; + } + + for (j = 0; j < page_count; j++) + pages[j] = NV_GET_PAGE_STRUCT(at->page_table[pageIndex+j].phys_addr); + + virt_addr = nv_vm_map_pages(pages, page_count, + at->cache_type == NV_MEMORY_CACHED, at->flags.unencrypted); + NV_KFREE(pages, sizeof(struct page *) * page_count); + } + + if (virt_addr == 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to map pages!\n"); + return NULL; + } + + *pPrivate = (void *)(NvUPtr)page_count; + return (void *)(virt_addr + pageOffset); + } + + return NULL; +} + +void NV_API_CALL nv_free_kernel_mapping( + nv_state_t *nv, + void *pAllocPrivate, + void *address, + void *pPrivate +) +{ + nv_alloc_t *at = pAllocPrivate; + NvUPtr virt_addr; + NvU32 page_count; + + virt_addr = ((NvUPtr)address & NV_PAGE_MASK); + page_count = (NvUPtr)pPrivate; + + if (at->flags.guest) + { + nv_iounmap((void *)virt_addr, (page_count * PAGE_SIZE)); + } + else if (pPrivate != NULL) + { + nv_vm_unmap_pages(virt_addr, page_count); + } +} + +NV_STATUS NV_API_CALL nv_alloc_pages( + nv_state_t *nv, + NvU32 page_count, + NvU64 page_size, + NvBool contiguous, + NvU32 cache_type, + NvBool zeroed, + NvBool unencrypted, + NvS32 node_id, + NvU64 *pte_array, + void **priv_data +) +{ + nv_alloc_t *at; + NV_STATUS status = NV_ERR_NO_MEMORY; + nv_linux_state_t *nvl = NULL; + NvBool will_remap = NV_FALSE; + NvU32 i; + struct device *dev = NULL; + + nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_alloc_pages: %d pages, nodeid %d\n", page_count, node_id); + nv_printf(NV_DBG_MEMINFO, "NVRM: VM: contig %d cache_type %d\n", + contiguous, cache_type); + + // + // system memory allocation can be associated with a client instead of a gpu + // handle the case where per device state is NULL + // + if(nv) + { + nvl = NV_GET_NVL_FROM_NV_STATE(nv); + will_remap = nv_requires_dma_remap(nv); + dev = nvl->dev; + } + + if (nv_encode_caching(NULL, cache_type, NV_MEMORY_TYPE_SYSTEM)) + return NV_ERR_NOT_SUPPORTED; + + at = nvos_create_alloc(dev, page_count); + if (at == NULL) + return NV_ERR_NO_MEMORY; + + at->cache_type = cache_type; + + if (contiguous) + at->flags.contig = NV_TRUE; + if (zeroed) + at->flags.zeroed = NV_TRUE; +#if defined(NVCPU_AARCH64) + if (at->cache_type != NV_MEMORY_CACHED) + at->flags.aliased = NV_TRUE; +#endif + if (unencrypted) + at->flags.unencrypted = NV_TRUE; + + if (node_id != NUMA_NO_NODE) + { + at->flags.node = NV_TRUE; + at->node_id = node_id; + } + + if (at->flags.contig) + { + status = nv_alloc_contig_pages(nv, at); + } + else + { + if (page_size == 0) + { + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + at->order = get_order(page_size); + status = nv_alloc_system_pages(nv, at); + } + + if (status != NV_OK) + goto failed; + + for (i = 0; i < ((contiguous) ? 1 : page_count); i++) + { + /* + * The contents of the pte_array[] depend on whether or not this device + * requires DMA-remapping. If it does, it should be the phys addresses + * used by the DMA-remapping paths, otherwise it should be the actual + * address that the device should use for DMA (which, confusingly, may + * be different than the CPU physical address, due to a static DMA + * offset). + */ + if ((nv == NULL) || will_remap) + { + pte_array[i] = at->page_table[i].phys_addr; + } + else + { + pte_array[i] = nv_phys_to_dma(dev, at->page_table[i].phys_addr); + } + } + + *priv_data = at; + NV_ATOMIC_INC(at->usage_count); + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + return NV_OK; + +failed: + nvos_free_alloc(at); + + return status; +} + +NV_STATUS NV_API_CALL nv_free_pages( + nv_state_t *nv, + NvU32 page_count, + NvBool contiguous, + NvU32 cache_type, + void *priv_data +) +{ + NV_STATUS rmStatus = NV_OK; + nv_alloc_t *at = priv_data; + + nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_free_pages: 0x%x\n", page_count); + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + /* + * If the 'at' usage count doesn't drop to zero here, not all of + * the user mappings have been torn down in time - we can't + * safely free the memory. We report success back to the RM, but + * defer the actual free operation until later. + * + * This is described in greater detail in the comments above the + * nvidia_vma_(open|release)() callbacks in nv-mmap.c. + */ + if (!NV_ATOMIC_DEC_AND_TEST(at->usage_count)) + return NV_OK; + + if (!at->flags.guest && !at->import_sgt) + { + if (at->flags.contig) + nv_free_contig_pages(at); + else + nv_free_system_pages(at); + } + + nvos_free_alloc(at); + + return rmStatus; +} + +NvBool nv_lock_init_locks +( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + nv_linux_state_t *nvl; + nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + NV_INIT_MUTEX(&nvl->ldata_lock); + NV_INIT_MUTEX(&nvl->mmap_lock); + NV_INIT_MUTEX(&nvl->open_q_lock); + + NV_ATOMIC_SET(nvl->usage_count, 0); + + if (!rm_init_event_locks(sp, nv)) + return NV_FALSE; + + return NV_TRUE; +} + +void nv_lock_destroy_locks +( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + rm_destroy_event_locks(sp, nv); +} + +void NV_API_CALL nv_post_event( + nv_event_t *event, + NvHandle handle, + NvU32 index, + NvU32 info32, + NvU16 info16, + NvBool data_valid +) +{ + nv_linux_file_private_t *nvlfp = nv_get_nvlfp_from_nvfp(event->nvfp); + unsigned long eflags; + nvidia_event_t *nvet; + + NV_SPIN_LOCK_IRQSAVE(&nvlfp->fp_lock, eflags); + + if (data_valid) + { + NV_KMALLOC_ATOMIC(nvet, sizeof(nvidia_event_t)); + if (nvet == NULL) + { + NV_SPIN_UNLOCK_IRQRESTORE(&nvlfp->fp_lock, eflags); + return; + } + + if (nvlfp->event_data_tail != NULL) + nvlfp->event_data_tail->next = nvet; + if (nvlfp->event_data_head == NULL) + nvlfp->event_data_head = nvet; + nvlfp->event_data_tail = nvet; + nvet->next = NULL; + + nvet->event = *event; + nvet->event.hObject = handle; + nvet->event.index = index; + nvet->event.info32 = info32; + nvet->event.info16 = info16; + } + // + // 'event_pending' is interpreted by nvidia_poll() and nv_get_event() to + // mean that an event without data is pending. Therefore, only set it to + // true here if newly posted event is dataless. + // + else + { + nvlfp->dataless_event_pending = NV_TRUE; + } + + NV_SPIN_UNLOCK_IRQRESTORE(&nvlfp->fp_lock, eflags); + + wake_up_interruptible(&nvlfp->waitqueue); +} + +NvBool NV_API_CALL nv_is_rm_firmware_active( + nv_state_t *nv +) +{ + if (rm_firmware_active) + { + // "all" here means all GPUs + if (strcmp(rm_firmware_active, "all") == 0) + return NV_TRUE; + } + return NV_FALSE; +} + +const void* NV_API_CALL nv_get_firmware( + nv_state_t *nv, + nv_firmware_type_t fw_type, + nv_firmware_chip_family_t fw_chip_family, + const void **fw_buf, + NvU32 *fw_size +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + const struct firmware *fw; + + // path is relative to /lib/firmware + // if this fails it will print an error to dmesg + if (request_firmware(&fw, + nv_firmware_for_chip_family(fw_type, fw_chip_family), + nvl->dev) != 0) + return NULL; + + *fw_size = fw->size; + *fw_buf = fw->data; + + return fw; +} + +void NV_API_CALL nv_put_firmware( + const void *fw_handle +) +{ + release_firmware(fw_handle); +} + +nv_file_private_t* NV_API_CALL nv_get_file_private( + NvS32 fd, + NvBool ctl, + void **os_private +) +{ + struct file *filp = NULL; + nv_linux_file_private_t *nvlfp = NULL; + dev_t rdev = 0; + + filp = fget(fd); + + if (filp == NULL || !NV_FILE_INODE(filp)) + { + goto fail; + } + + rdev = (NV_FILE_INODE(filp))->i_rdev; + + if (MAJOR(rdev) != NV_MAJOR_DEVICE_NUMBER) + { + goto fail; + } + + if (ctl) + { + if (MINOR(rdev) != NV_MINOR_DEVICE_NUMBER_CONTROL_DEVICE) + goto fail; + } + else + { + NvBool found = NV_FALSE; + int i; + + for (i = 0; i <= NV_MINOR_DEVICE_NUMBER_REGULAR_MAX; i++) + { + if ((nv_linux_minor_num_table[i] != NULL) && (MINOR(rdev) == i)) + { + found = NV_TRUE; + break; + } + } + + if (!found) + goto fail; + } + + nvlfp = NV_GET_LINUX_FILE_PRIVATE(filp); + + *os_private = filp; + + return &nvlfp->nvfp; + +fail: + + if (filp != NULL) + { + fput(filp); + } + + return NULL; +} + +void NV_API_CALL nv_put_file_private( + void *os_private +) +{ + struct file *filp = os_private; + fput(filp); +} + +int NV_API_CALL nv_get_event( + nv_file_private_t *nvfp, + nv_event_t *event, + NvU32 *pending +) +{ + nv_linux_file_private_t *nvlfp = nv_get_nvlfp_from_nvfp(nvfp); + nvidia_event_t *nvet; + unsigned long eflags; + + // + // Note that the head read/write is not atomic when done outside of the + // spinlock, so this might not be a valid pointer at all. But if we read + // NULL here that means that the value indeed was NULL and we can bail + // early since there's no events. Otherwise, we have to do a proper read + // under a spinlock. + // + if (nvlfp->event_data_head == NULL) + return NV_ERR_GENERIC; + + NV_SPIN_LOCK_IRQSAVE(&nvlfp->fp_lock, eflags); + + nvet = nvlfp->event_data_head; + if (nvet == NULL) + { + NV_SPIN_UNLOCK_IRQRESTORE(&nvlfp->fp_lock, eflags); + return NV_ERR_GENERIC; + } + + *event = nvet->event; + + if (nvlfp->event_data_tail == nvet) + nvlfp->event_data_tail = NULL; + nvlfp->event_data_head = nvet->next; + + *pending = (nvlfp->event_data_head != NULL); + + NV_SPIN_UNLOCK_IRQRESTORE(&nvlfp->fp_lock, eflags); + + NV_KFREE(nvet, sizeof(nvidia_event_t)); + + return NV_OK; +} + +int NV_API_CALL nv_start_rc_timer( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (nv->rc_timer_enabled) + return -1; + + nv_printf(NV_DBG_INFO, "NVRM: initializing rc timer\n"); + + nv_timer_setup(&nvl->rc_timer, nvidia_rc_timer_callback); + + nv->rc_timer_enabled = 1; + + // set the timeout for 1 second in the future: + mod_timer(&nvl->rc_timer.kernel_timer, jiffies + HZ); + + nv_printf(NV_DBG_INFO, "NVRM: rc timer initialized\n"); + + return 0; +} + +int NV_API_CALL nv_stop_rc_timer( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (!nv->rc_timer_enabled) + return -1; + + nv_printf(NV_DBG_INFO, "NVRM: stopping rc timer\n"); + nv->rc_timer_enabled = 0; + nv_timer_delete_sync(&nvl->rc_timer.kernel_timer); + nv_printf(NV_DBG_INFO, "NVRM: rc timer stopped\n"); + + return 0; +} + +#define SNAPSHOT_TIMER_FREQ (jiffies + HZ / NV_SNAPSHOT_TIMER_HZ) + +static void snapshot_timer_callback(struct nv_timer *timer) +{ + nv_linux_state_t *nvl = &nv_ctl_device; + nv_state_t *nv = NV_STATE_PTR(nvl); + unsigned long flags; + + NV_SPIN_LOCK_IRQSAVE(&nvl->snapshot_timer_lock, flags); + if (nvl->snapshot_callback != NULL) + { + nvl->snapshot_callback(nv->profiler_context); + mod_timer(&timer->kernel_timer, SNAPSHOT_TIMER_FREQ); + } + NV_SPIN_UNLOCK_IRQRESTORE(&nvl->snapshot_timer_lock, flags); +} + +void NV_API_CALL nv_start_snapshot_timer(void (*snapshot_callback)(void *context)) +{ + nv_linux_state_t *nvl = &nv_ctl_device; + + nvl->snapshot_callback = snapshot_callback; + nv_timer_setup(&nvl->snapshot_timer, snapshot_timer_callback); + mod_timer(&nvl->snapshot_timer.kernel_timer, SNAPSHOT_TIMER_FREQ); +} + +void NV_API_CALL nv_stop_snapshot_timer(void) +{ + nv_linux_state_t *nvl = &nv_ctl_device; + NvBool timer_active; + unsigned long flags; + + NV_SPIN_LOCK_IRQSAVE(&nvl->snapshot_timer_lock, flags); + timer_active = nvl->snapshot_callback != NULL; + nvl->snapshot_callback = NULL; + NV_SPIN_UNLOCK_IRQRESTORE(&nvl->snapshot_timer_lock, flags); + + if (timer_active) + nv_timer_delete_sync(&nvl->snapshot_timer.kernel_timer); +} + +void NV_API_CALL nv_flush_snapshot_timer(void) +{ + nv_linux_state_t *nvl = &nv_ctl_device; + nv_state_t *nv = NV_STATE_PTR(nvl); + unsigned long flags; + + NV_SPIN_LOCK_IRQSAVE(&nvl->snapshot_timer_lock, flags); + if (nvl->snapshot_callback != NULL) + nvl->snapshot_callback(nv->profiler_context); + NV_SPIN_UNLOCK_IRQRESTORE(&nvl->snapshot_timer_lock, flags); +} + +static int __init +nvos_count_devices(int *num_pci_devices, int *num_platform_devices) +{ + int nplatform = 0; + int npci = nv_pci_count_devices(); + + nplatform = nv_platform_count_devices(); + + if (num_pci_devices != NULL) + *num_pci_devices = npci; + + if (num_platform_devices != NULL) + *num_platform_devices = nplatform; + + return npci + nplatform; +} + +#if NVCPU_IS_AARCH64 +NvBool nvos_is_chipset_io_coherent(void) +{ + static NvTristate nv_chipset_is_io_coherent = NV_TRISTATE_INDETERMINATE; + + if (nv_chipset_is_io_coherent == NV_TRISTATE_INDETERMINATE) + { + nvidia_stack_t *sp = NULL; + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: cannot allocate stack for platform coherence check callback \n"); + WARN_ON(1); + return NV_FALSE; + } + + nv_chipset_is_io_coherent = rm_is_chipset_io_coherent(sp); + + nv_kmem_cache_free_stack(sp); + } + + return nv_chipset_is_io_coherent; +} +#endif // NVCPU_IS_AARCH64 + +#if defined(CONFIG_PM) +static NV_STATUS +nv_power_management( + nv_state_t *nv, + nv_pm_action_t pm_action +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int status = NV_OK; + nvidia_stack_t *sp = NULL; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = nv_check_gpu_state(nv); + if (status == NV_ERR_GPU_IS_LOST) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "GPU is lost, skipping PM event\n"); + goto failure; + } + + switch (pm_action) + { + case NV_PM_ACTION_STANDBY: + /* fall through */ + case NV_PM_ACTION_HIBERNATE: + { + /* + * Flush nvl->open_q before suspend/hibernate to ensure deferred + * opens do not get attempted during the PM transition. + * + * Note: user space is either frozen by the kernel or locked out + * by nv_system_pm_lock, so no further deferred opens can be + * enqueued before resume (meaning we do not need to unset + * nvl->is_accepting_opens). + */ + nv_kthread_q_flush(&nvl->open_q); + + status = rm_power_management(sp, nv, pm_action); + + nv_kthread_q_stop(&nvl->bottom_half_q); + + nv_disable_pat_support(); + break; + } + case NV_PM_ACTION_RESUME: + { + nv_enable_pat_support(); + + nv_kthread_q_item_init(&nvl->bottom_half_q_item, + nvidia_isr_bh_unlocked, (void *)nv); + + status = nv_kthread_q_init(&nvl->bottom_half_q, nv_device_name); + if (status != NV_OK) + break; + + status = rm_power_management(sp, nv, pm_action); + break; + } + default: + status = NV_ERR_INVALID_ARGUMENT; + break; + } + +failure: + nv_kmem_cache_free_stack(sp); + + return status; +} + +static NV_STATUS +nv_restore_user_channels( + nv_state_t *nv +) +{ + NV_STATUS status = NV_OK; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + nv_stack_t *sp = NULL; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + down(&nvl->ldata_lock); + + if ((nv->flags & NV_FLAG_OPEN) == 0) + { + goto done; + } + + status = rm_restart_user_channels(sp, nv); + WARN_ON(status != NV_OK); + + down(&nvl->mmap_lock); + + nv_set_safe_to_mmap_locked(nv, NV_TRUE); + + up(&nvl->mmap_lock); + + rm_unref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE); + +done: + up(&nvl->ldata_lock); + + nv_kmem_cache_free_stack(sp); + + return status; +} + +static NV_STATUS +nv_preempt_user_channels( + nv_state_t *nv +) +{ + NV_STATUS status = NV_OK; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + nv_stack_t *sp = NULL; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + down(&nvl->ldata_lock); + + if ((nv->flags & NV_FLAG_OPEN) == 0) + { + goto done; + } + + status = rm_ref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE); + WARN_ON(status != NV_OK); + + down(&nvl->mmap_lock); + + nv_set_safe_to_mmap_locked(nv, NV_FALSE); + nv_revoke_gpu_mappings_locked(nv); + + up(&nvl->mmap_lock); + + status = rm_stop_user_channels(sp, nv); + WARN_ON(status != NV_OK); + +done: + up(&nvl->ldata_lock); + + nv_kmem_cache_free_stack(sp); + + return status; +} + +static NV_STATUS +nvidia_suspend( + struct device *dev, + nv_pm_action_t pm_action, + NvBool is_procfs_suspend +) +{ + NV_STATUS status = NV_OK; + struct pci_dev *pci_dev = NULL; + nv_linux_state_t *nvl; + nv_state_t *nv; + + if (dev_is_pci(dev)) + { + pci_dev = to_pci_dev(dev); + nvl = pci_get_drvdata(pci_dev); + } + else + { + nvl = dev_get_drvdata(dev); + } + nv = NV_STATE_PTR(nvl); + +#if defined(NV_PM_RUNTIME_AVAILABLE) + /* Handle GenPD suspend sequence for Tegra PCI iGPU */ + if (dev_is_pci(dev) && nv->is_tegra_pci_igpu_rg_enabled) + { + /* Turn on the GPU power before saving PCI configuration */ + pm_runtime_forbid(dev); + + /* + * If a PCI device is attached to a GenPD power domain, + * resume_early callback in PCI framework will not be + * executed during static resume. That leads to the PCI + * configuration couldn't be properly restored. + * + * Clear the power domain of PCI GPU before static suspend + * to make sure its PCI configuration could be properly + * restored during static resume. + */ + nv_printf(NV_DBG_INFO, + "NVRM: set GPU pm_domain to NULL before suspend\n"); + dev_pm_domain_set(dev, NULL); + } +#endif + + down(&nvl->ldata_lock); + + if (((nv->flags & NV_FLAG_OPEN) == 0) && + ((nv->flags & NV_FLAG_PERSISTENT_SW_STATE) == 0)) + { + goto done; + } + + if ((nv->flags & NV_FLAG_SUSPENDED) != 0) + { + nvl->suspend_count++; + goto pci_pm; + } + + if (nv->preserve_vidmem_allocations && !is_procfs_suspend) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "PreserveVideoMemoryAllocations module parameter is set. " + "System Power Management attempted without driver procfs suspend interface. " + "Please refer to the 'Configuring Power Management Support' section in the driver README.\n"); + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + + nvidia_modeset_suspend(nv->gpu_id); + + status = nv_power_management(nv, pm_action); + + nv->flags |= NV_FLAG_SUSPENDED; + +pci_pm: + /* + * Check if PCI power state should be D0 during system suspend. The PCI PM + * core will change the power state only if the driver has not saved the + * state in it's suspend callback. + */ + if ((nv->d0_state_in_suspend) && (pci_dev != NULL) && + !is_procfs_suspend && (pm_action == NV_PM_ACTION_STANDBY)) + { + pci_save_state(pci_dev); + } + +done: + up(&nvl->ldata_lock); + + return status; +} + +static NV_STATUS +nvidia_resume( + struct device *dev, + nv_pm_action_t pm_action +) +{ + NV_STATUS status = NV_OK; + struct pci_dev *pci_dev; +#if defined(NV_PM_RUNTIME_AVAILABLE) + struct pci_bus *bus; + struct pci_host_bridge *bridge; + struct device *ctrl; +#endif + nv_linux_state_t *nvl; + nv_state_t *nv; + + if (dev_is_pci(dev)) + { + pci_dev = to_pci_dev(dev); + nvl = pci_get_drvdata(pci_dev); + } + else + { + nvl = dev_get_drvdata(dev); + } + nv = NV_STATE_PTR(nvl); + +#if defined(NV_PM_RUNTIME_AVAILABLE) + /* Handle GenPD resume sequence for Tegra PCI iGPU */ + if (dev_is_pci(dev) && nv->is_tegra_pci_igpu_rg_enabled) + { + // Get PCI controller device + bus = pci_dev->bus; + while (bus->parent) + bus = bus->parent; + + bridge = to_pci_host_bridge(bus->bridge); + ctrl = bridge->dev.parent; + + /* + * Attach GPU power domain back, this driver cannot directly use + * dev_pm_domain_set to recover the pm_domain because kernel warning + * will be triggered if the caller driver is already bounded. + */ + nv_printf(NV_DBG_INFO, + "NVRM: restore GPU pm_domain after suspend\n"); + dev->pm_domain = ctrl->pm_domain; + + pm_runtime_allow(dev); + } +#endif + + down(&nvl->ldata_lock); + + if ((nv->flags & NV_FLAG_SUSPENDED) == 0) + { + goto done; + } + + if (nvl->suspend_count != 0) + { + nvl->suspend_count--; + } + else + { + status = nv_power_management(nv, pm_action); + + if (status == NV_OK) + { + nvidia_modeset_resume(nv->gpu_id); + nv->flags &= ~NV_FLAG_SUSPENDED; + } + } + +done: + up(&nvl->ldata_lock); + + return status; +} + +static NV_STATUS +nv_resume_devices( + nv_pm_action_t pm_action, + nv_pm_action_depth_t pm_action_depth +) +{ + nv_linux_state_t *nvl; + NvBool resume_devices = NV_TRUE; + NV_STATUS status; + + if (pm_action_depth == NV_PM_ACTION_DEPTH_MODESET) + { + goto resume_modeset; + } + + if (pm_action_depth == NV_PM_ACTION_DEPTH_UVM) + { + resume_devices = NV_FALSE; + } + + LOCK_NV_LINUX_DEVICES(); + + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + { + if (resume_devices) + { + status = nvidia_resume(nvl->dev, pm_action); + WARN_ON(status != NV_OK); + } + } + + UNLOCK_NV_LINUX_DEVICES(); + + status = nv_uvm_resume(); + WARN_ON(status != NV_OK); + + LOCK_NV_LINUX_DEVICES(); + + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + { + status = nv_restore_user_channels(NV_STATE_PTR(nvl)); + WARN_ON(status != NV_OK); + } + + UNLOCK_NV_LINUX_DEVICES(); + +resume_modeset: + nvidia_modeset_resume(0); + + return NV_OK; +} + +static NV_STATUS +nv_suspend_devices( + nv_pm_action_t pm_action, + nv_pm_action_depth_t pm_action_depth +) +{ + nv_linux_state_t *nvl; + NvBool resume_devices = NV_FALSE; + NV_STATUS status = NV_OK; +#if defined(NV_PM_RUNTIME_AVAILABLE) + nv_state_t *nv; + struct device *dev; + + LOCK_NV_LINUX_DEVICES(); + + /* For Tegra PCI iGPU, forbid the GPU suspend via procfs */ + for (nvl = nv_linux_devices; nvl != NULL && status == NV_OK; nvl = nvl->next) + { + nv = NV_STATE_PTR(nvl); + dev = nvl->dev; + if (dev_is_pci(dev) && nv->is_tegra_pci_igpu_rg_enabled) + { + nv_printf(NV_DBG_INFO, + "NVRM: GPU suspend through procfs is forbidden with Tegra iGPU\n"); + UNLOCK_NV_LINUX_DEVICES(); + + return NV_ERR_NOT_SUPPORTED; + } + } + + UNLOCK_NV_LINUX_DEVICES(); +#endif + + nvidia_modeset_suspend(0); + + if (pm_action_depth == NV_PM_ACTION_DEPTH_MODESET) + { + return NV_OK; + } + + LOCK_NV_LINUX_DEVICES(); + + for (nvl = nv_linux_devices; nvl != NULL && status == NV_OK; nvl = nvl->next) + { + status = nv_preempt_user_channels(NV_STATE_PTR(nvl)); + WARN_ON(status != NV_OK); + } + + UNLOCK_NV_LINUX_DEVICES(); + + if (status == NV_OK) + { + status = nv_uvm_suspend(); + WARN_ON(status != NV_OK); + } + if (status != NV_OK) + { + goto done; + } + + if (pm_action_depth == NV_PM_ACTION_DEPTH_UVM) + { + return NV_OK; + } + + LOCK_NV_LINUX_DEVICES(); + + for (nvl = nv_linux_devices; nvl != NULL && status == NV_OK; nvl = nvl->next) + { + status = nvidia_suspend(nvl->dev, pm_action, NV_TRUE); + WARN_ON(status != NV_OK); + } + if (status != NV_OK) + { + resume_devices = NV_TRUE; + } + + UNLOCK_NV_LINUX_DEVICES(); + +done: + if (status != NV_OK) + { + LOCK_NV_LINUX_DEVICES(); + + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + { + if (resume_devices) + { + nvidia_resume(nvl->dev, NV_PM_ACTION_RESUME); + } + + nv_restore_user_channels(NV_STATE_PTR(nvl)); + } + + UNLOCK_NV_LINUX_DEVICES(); + + nv_uvm_resume(); + + nvidia_modeset_resume(0); + } + + return status; +} + +NV_STATUS +nv_set_system_power_state( + nv_power_state_t power_state, + nv_pm_action_depth_t pm_action_depth +) +{ + NV_STATUS status; + nv_pm_action_t pm_action; + + switch (power_state) + { + case NV_POWER_STATE_IN_HIBERNATE: + pm_action = NV_PM_ACTION_HIBERNATE; + break; + case NV_POWER_STATE_IN_STANDBY: + pm_action = NV_PM_ACTION_STANDBY; + break; + case NV_POWER_STATE_RUNNING: + pm_action = NV_PM_ACTION_RESUME; + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + + down(&nv_system_power_state_lock); + + if (nv_system_power_state == power_state) + { + status = NV_OK; + goto done; + } + + if (power_state == NV_POWER_STATE_RUNNING) + { + status = nv_resume_devices(pm_action, nv_system_pm_action_depth); + up_write(&nv_system_pm_lock); + } + else + { + if (nv_system_power_state != NV_POWER_STATE_RUNNING) + { + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + nv_system_pm_action_depth = pm_action_depth; + + down_write(&nv_system_pm_lock); + status = nv_suspend_devices(pm_action, nv_system_pm_action_depth); + if (status != NV_OK) + { + up_write(&nv_system_pm_lock); + goto done; + } + } + + nv_system_power_state = power_state; + +done: + up(&nv_system_power_state_lock); + + return status; +} + +int nv_pmops_suspend( + struct device *dev +) +{ + NV_STATUS status; + + status = nvidia_suspend(dev, NV_PM_ACTION_STANDBY, NV_FALSE); + + if (status != NV_OK) + nvidia_resume(dev, NV_PM_ACTION_RESUME); + + return (status == NV_OK) ? 0 : -EIO; +} + +int nv_pmops_resume( + struct device *dev +) +{ + NV_STATUS status; + + status = nvidia_resume(dev, NV_PM_ACTION_RESUME); + return (status == NV_OK) ? 0 : -EIO; +} + +int nv_pmops_freeze( + struct device *dev +) +{ + NV_STATUS status; + + status = nvidia_suspend(dev, NV_PM_ACTION_HIBERNATE, NV_FALSE); + + if (status != NV_OK) + nvidia_resume(dev, NV_PM_ACTION_RESUME); + + return (status == NV_OK) ? 0 : -EIO; +} + +int nv_pmops_thaw( + struct device *dev +) +{ + return 0; +} + +int nv_pmops_restore( + struct device *dev +) +{ + NV_STATUS status; + + status = nvidia_resume(dev, NV_PM_ACTION_RESUME); + return (status == NV_OK) ? 0 : -EIO; +} + +int nv_pmops_poweroff( + struct device *dev +) +{ + return 0; +} + +static int +nvidia_transition_dynamic_power( + struct device *dev, + NvBool enter +) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + nv_linux_state_t *nvl = pci_get_drvdata(pci_dev); + nv_state_t *nv = NV_STATE_PTR(nvl); + nvidia_stack_t *sp = NULL; + NvBool bTryAgain = NV_FALSE; + NV_STATUS status; + + if ((nv->flags & (NV_FLAG_OPEN | NV_FLAG_PERSISTENT_SW_STATE)) == 0) + { + return 0; + } + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return -ENOMEM; + } + + status = rm_transition_dynamic_power(sp, nv, enter, &bTryAgain); + + nv_kmem_cache_free_stack(sp); + + if (bTryAgain) + { + /* + * Return -EAGAIN so that kernel PM core will not treat this as a fatal error and + * reschedule the callback again in the future. + */ + return -EAGAIN; + } + + return (status == NV_OK) ? 0 : -EIO; +} + +int nv_pmops_runtime_suspend( + struct device *dev +) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + nv_linux_state_t *nvl = pci_get_drvdata(pci_dev); + nv_state_t *nv = NV_STATE_PTR(nvl);; + int err = 0; + +#if defined(CONFIG_PM_DEVFREQ) + if (nvl->devfreq_suspend != NULL) + { + err = nvl->devfreq_suspend(dev); + if (err) + { + return err; + } + } +#endif + + err = nvidia_transition_dynamic_power(dev, NV_TRUE); + if (err) + { + goto nv_pmops_runtime_suspend_exit; + } + + if (nv->flags & NV_FLAG_TRIGGER_FLR) + { + if (nvl->pci_dev) + { + os_pci_trigger_flr((void *)nvl->pci_dev); + } + } + + return err; + +nv_pmops_runtime_suspend_exit: +#if defined(CONFIG_PM_DEVFREQ) + if (nvl->devfreq_resume != NULL) + { + nvl->devfreq_resume(dev); + } +#endif + return err; +} + +int nv_pmops_runtime_resume( + struct device *dev +) +{ +#if defined(CONFIG_PM_DEVFREQ) + struct pci_dev *pci_dev = to_pci_dev(dev); + nv_linux_state_t *nvl = pci_get_drvdata(pci_dev); +#endif + int err; + + err = nvidia_transition_dynamic_power(dev, NV_FALSE); + if (err) + { + return err; + } + +#if defined(CONFIG_PM_DEVFREQ) + if (nvl->devfreq_resume != NULL) + { + err = nvl->devfreq_resume(dev); + if (err) + { + goto nv_pmops_runtime_resume_exit; + } + } + + return err; + +nv_pmops_runtime_resume_exit: + nvidia_transition_dynamic_power(dev, NV_TRUE); +#endif + return err; +} +#endif /* defined(CONFIG_PM) */ + +nv_state_t* NV_API_CALL nv_get_adapter_state( + NvU32 domain, + NvU8 bus, + NvU8 slot +) +{ + nv_linux_state_t *nvl; + + LOCK_NV_LINUX_DEVICES(); + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + { + nv_state_t *nv = NV_STATE_PTR(nvl); + if (nv->pci_info.domain == domain && nv->pci_info.bus == bus + && nv->pci_info.slot == slot) + { + UNLOCK_NV_LINUX_DEVICES(); + return nv; + } + } + UNLOCK_NV_LINUX_DEVICES(); + + return NULL; +} + +nv_state_t* NV_API_CALL nv_get_ctl_state(void) +{ + return NV_STATE_PTR(&nv_ctl_device); +} + +NV_STATUS NV_API_CALL nv_log_error( + nv_state_t *nv, + NvU32 error_number, + const char *format, + va_list ap +) +{ + NV_STATUS status = NV_OK; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + nv_report_error(nvl->pci_dev, error_number, format, ap); +#if defined(CONFIG_CRAY_XT) + status = nvos_forward_error_to_cray(nvl->pci_dev, error_number, + format, ap); +#endif + + return status; +} + +NV_STATUS NV_API_CALL nv_set_primary_vga_status( + nv_state_t *nv +) +{ + /* IORESOURCE_ROM_SHADOW wasn't added until 2.6.10 */ +#if defined(IORESOURCE_ROM_SHADOW) + nv_linux_state_t *nvl; + struct pci_dev *pci_dev; + + nvl = NV_GET_NVL_FROM_NV_STATE(nv); + pci_dev = nvl->pci_dev; + + nv->primary_vga = ((NV_PCI_RESOURCE_FLAGS(pci_dev, PCI_ROM_RESOURCE) & + IORESOURCE_ROM_SHADOW) == IORESOURCE_ROM_SHADOW); + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NvBool NV_API_CALL nv_requires_dma_remap( + nv_state_t *nv +) +{ + NvBool dma_remap = NV_FALSE; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + dma_remap = !nv_dma_maps_swiotlb(nvl->dev); + return dma_remap; +} + +/* + * Intended for use by external kernel modules to list nvidia gpu ids. + */ +NvBool nvidia_get_gpuid_list(NvU32 *gpu_ids, NvU32 *gpu_count) +{ + nv_linux_state_t *nvl; + unsigned int count; + NvBool ret = NV_TRUE; + + LOCK_NV_LINUX_DEVICES(); + + count = 0; + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + count++; + + if (*gpu_count == 0) + { + goto done; + } + else if ((*gpu_count) < count) + { + ret = NV_FALSE; + goto done; + } + + count = 0; + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + { + nv_state_t *nv = NV_STATE_PTR(nvl); + gpu_ids[count++] = nv->gpu_id; + } + + +done: + + *gpu_count = count; + + UNLOCK_NV_LINUX_DEVICES(); + + return ret; +} + +/* + * Kernel-level analog to nvidia_open, intended for use by external + * kernel modules. This increments the ref count of the device with + * the given gpu_id and makes sure the device has been initialized. + * + * Clients of this interface are counted by the RM reset path, to ensure a + * GPU is not reset while the GPU is active. + * + * Returns -ENODEV if the given gpu_id does not exist. + */ +int nvidia_dev_get(NvU32 gpu_id, nvidia_stack_t *sp) +{ + nv_linux_state_t *nvl; + int rc; + + /* Takes nvl->ldata_lock */ + nvl = find_gpu_id(gpu_id); + if (!nvl) + return -ENODEV; + + rc = nv_open_device(NV_STATE_PTR(nvl), sp); + + if (rc == 0) + WARN_ON(rm_set_external_kernel_client_count(sp, NV_STATE_PTR(nvl), NV_TRUE) != NV_OK); + + up(&nvl->ldata_lock); + return rc; +} + +/* + * Kernel-level analog to nvidia_close, intended for use by external + * kernel modules. This decrements the ref count of the device with + * the given gpu_id, potentially tearing it down. + */ +void nvidia_dev_put(NvU32 gpu_id, nvidia_stack_t *sp) +{ + nv_linux_state_t *nvl; + + /* Takes nvl->ldata_lock */ + nvl = find_gpu_id(gpu_id); + if (!nvl) + return; + + nv_close_device(NV_STATE_PTR(nvl), sp); + + WARN_ON(rm_set_external_kernel_client_count(sp, NV_STATE_PTR(nvl), NV_FALSE) != NV_OK); + + up(&nvl->ldata_lock); +} + +/* + * Like nvidia_dev_get but uses UUID instead of gpu_id. Note that this may + * trigger initialization and teardown of unrelated devices to look up their + * UUIDs. + * + * Clients of this interface are counted by the RM reset path, to ensure a + * GPU is not reset while the GPU is active. + */ +int nvidia_dev_get_uuid(const NvU8 *uuid, nvidia_stack_t *sp) +{ + nv_state_t *nv = NULL; + nv_linux_state_t *nvl = NULL; + const NvU8 *dev_uuid; + int rc = 0; + + /* Takes nvl->ldata_lock */ + nvl = find_uuid_candidate(uuid); + while (nvl) + { + nv = NV_STATE_PTR(nvl); + + /* + * If the device is missing its UUID, this call exists solely so + * rm_get_gpu_uuid_raw will be called and we can inspect the UUID. + */ + rc = nv_open_device(nv, sp); + if (rc != 0) + goto out; + + /* The UUID should always be present following nv_open_device */ + dev_uuid = nv_get_cached_uuid(nv); + WARN_ON(!dev_uuid); + if (dev_uuid && memcmp(dev_uuid, uuid, GPU_UUID_LEN) == 0) + break; + + /* No match, try again. */ + nv_close_device(nv, sp); + up(&nvl->ldata_lock); + nvl = find_uuid_candidate(uuid); + } + + if (nvl) + { + rc = 0; + WARN_ON(rm_set_external_kernel_client_count(sp, NV_STATE_PTR(nvl), NV_TRUE) != NV_OK); + } + else + rc = -ENODEV; + +out: + if (nvl) + up(&nvl->ldata_lock); + return rc; +} + +/* + * Like nvidia_dev_put but uses UUID instead of gpu_id. + */ +void nvidia_dev_put_uuid(const NvU8 *uuid, nvidia_stack_t *sp) +{ + nv_linux_state_t *nvl; + + /* Callers must already have called nvidia_dev_get_uuid() */ + + /* Takes nvl->ldata_lock */ + nvl = find_uuid(uuid); + if (!nvl) + return; + + nv_close_device(NV_STATE_PTR(nvl), sp); + + WARN_ON(rm_set_external_kernel_client_count(sp, NV_STATE_PTR(nvl), NV_FALSE) != NV_OK); + + up(&nvl->ldata_lock); +} + +int nvidia_dev_block_gc6(const NvU8 *uuid, nvidia_stack_t *sp) + +{ + nv_linux_state_t *nvl; + + /* Callers must already have called nvidia_dev_get_uuid() */ + + /* Takes nvl->ldata_lock */ + nvl = find_uuid(uuid); + if (!nvl) + return -ENODEV; + + if (rm_ref_dynamic_power(sp, NV_STATE_PTR(nvl), NV_DYNAMIC_PM_FINE) != NV_OK) + { + up(&nvl->ldata_lock); + return -EINVAL; + } + + up(&nvl->ldata_lock); + + return 0; +} + +int nvidia_dev_unblock_gc6(const NvU8 *uuid, nvidia_stack_t *sp) + +{ + nv_linux_state_t *nvl; + + /* Callers must already have called nvidia_dev_get_uuid() */ + + /* Takes nvl->ldata_lock */ + nvl = find_uuid(uuid); + if (!nvl) + return -ENODEV; + + rm_unref_dynamic_power(sp, NV_STATE_PTR(nvl), NV_DYNAMIC_PM_FINE); + + up(&nvl->ldata_lock); + + return 0; +} + +NV_STATUS NV_API_CALL nv_get_device_memory_config( + nv_state_t *nv, + NvU64 *compr_addr_sys_phys, + NvU64 *addr_guest_phys, + NvU64 *rsvd_phys, + NvU32 *addr_width, + NvS32 *node_id +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + +#if defined(NVCPU_AARCH64) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (node_id != NULL) + { + *node_id = nvl->numa_info.node_id; + } + + if (compr_addr_sys_phys) + { + *compr_addr_sys_phys = nvl->coherent_link_info.gpu_mem_pa; + } + if (addr_guest_phys) + { + *addr_guest_phys = nvl->coherent_link_info.gpu_mem_pa; + } + if (rsvd_phys) + { + *rsvd_phys = nvl->coherent_link_info.rsvd_mem_pa; + } + if (addr_width) + { + // TH500 PA width - NV_PFB_PRI_MMU_ATS_ADDR_RANGE_GRANULARITY + *addr_width = 48 - 37; + } + + status = NV_OK; +#endif + + return status; +} + +NV_STATUS NV_API_CALL nv_indicate_idle( + nv_state_t *nv +) +{ +#if NV_FILESYSTEM_ACCESS_AVAILABLE +#if defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct device *dev = nvl->dev; + struct file *file = nvl->sysfs_config_file; + loff_t f_pos = 0; + char buf; + + pm_runtime_put_noidle(dev); + +#if defined(NV_SEQ_READ_ITER_PRESENT) + { + struct kernfs_open_file *of = ((struct seq_file *)file->private_data)->private; + struct kernfs_node *kn; + + mutex_lock(&of->mutex); + kn = of->kn; + if (kn != NULL && atomic_inc_unless_negative(&kn->active)) + { + if ((kn->attr.ops != NULL) && (kn->attr.ops->read != NULL)) + { + kn->attr.ops->read(of, &buf, 1, f_pos); + } + atomic_dec(&kn->active); + } + mutex_unlock(&of->mutex); + } +#else + kernel_read(file, &buf, 1, &f_pos); +#endif + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NV_STATUS NV_API_CALL nv_indicate_not_idle( + nv_state_t *nv +) +{ +#if defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct device *dev = nvl->dev; + + pm_runtime_get_noresume(dev); + + nvl->is_forced_shutdown = NV_TRUE; + pci_bus_type.shutdown(dev); + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +void NV_API_CALL nv_idle_holdoff( + nv_state_t *nv +) +{ +#if defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct device *dev = nvl->dev; + + pm_runtime_get_noresume(dev); +#endif +} + +NvBool NV_API_CALL nv_dynamic_power_available( + nv_state_t *nv +) +{ +#if defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + return nvl->sysfs_config_file != NULL; +#else + return NV_FALSE; +#endif +} + +/* caller should hold nv_linux_devices_lock using LOCK_NV_LINUX_DEVICES */ +int nv_linux_add_device_locked(nv_linux_state_t *nvl) +{ + int rc = -1; + int i; + + // look for free a minor number and assign unique minor number to this device + for (i = 0; i <= NV_MINOR_DEVICE_NUMBER_REGULAR_MAX; i++) + { + if (nv_linux_minor_num_table[i] == NULL) + { + nv_linux_minor_num_table[i] = nvl; + nvl->minor_num = i; + rc = 0; + break; + } + } + + // bail if no minor number is free + if (rc != 0) + return rc; + + if (nv_linux_devices == NULL) { + nv_linux_devices = nvl; + } + else + { + nv_linux_state_t *tnvl; + for (tnvl = nv_linux_devices; tnvl->next != NULL; tnvl = tnvl->next); + tnvl->next = nvl; + } + + return rc; +} + +/* caller should hold nv_linux_devices_lock using LOCK_NV_LINUX_DEVICES */ +void nv_linux_remove_device_locked(nv_linux_state_t *nvl) +{ + if (nvl == nv_linux_devices) { + nv_linux_devices = nvl->next; + } + else + { + nv_linux_state_t *tnvl; + for (tnvl = nv_linux_devices; tnvl->next != nvl; tnvl = tnvl->next); + tnvl->next = nvl->next; + } + + nv_linux_minor_num_table[nvl->minor_num] = NULL; +} + +int nv_linux_init_open_q(nv_linux_state_t *nvl) +{ + int rc; + rc = nv_kthread_q_init(&nvl->open_q, "nv_open_q"); + if (rc != 0) + return rc; + + down(&nvl->open_q_lock); + nvl->is_accepting_opens = NV_TRUE; + up(&nvl->open_q_lock); + return 0; +} + +void nv_linux_stop_open_q(nv_linux_state_t *nvl) +{ + NvBool should_stop = NV_FALSE; + + down(&nvl->open_q_lock); + if (nvl->is_accepting_opens) + { + should_stop = NV_TRUE; + nvl->is_accepting_opens = NV_FALSE; + } + up(&nvl->open_q_lock); + + if (should_stop) + nv_kthread_q_stop(&nvl->open_q); +} + +void NV_API_CALL nv_control_soc_irqs(nv_state_t *nv, NvBool bEnable) +{ + int count; + unsigned long flags; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (nv->current_soc_irq != -1) + return; + + NV_SPIN_LOCK_IRQSAVE(&nvl->soc_isr_lock, flags); + if (bEnable) + { + for (count = 0; count < nv->num_soc_irqs; count++) + { + if (nv->soc_irq_info[count].ref_count == 0) + { + nv->soc_irq_info[count].ref_count++; + enable_irq(nv->soc_irq_info[count].irq_num); + } + } + } + else + { + for (count = 0; count < nv->num_soc_irqs; count++) + { + if (nv->soc_irq_info[count].ref_count == 1) + { + nv->soc_irq_info[count].ref_count--; + disable_irq_nosync(nv->soc_irq_info[count].irq_num); + } + } + } + NV_SPIN_UNLOCK_IRQRESTORE(&nvl->soc_isr_lock, flags); +} + +NvU32 NV_API_CALL nv_get_dev_minor(nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + return nvl->minor_num; +} + +NV_STATUS NV_API_CALL nv_acquire_fabric_mgmt_cap(int fd, int *duped_fd) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/* + * Wakes up the NVIDIA GPU HDA codec and contoller by reading + * codec proc file. + */ +void NV_API_CALL nv_audio_dynamic_power( + nv_state_t *nv +) +{ +/* + * The runtime power management for nvidia HDA controller can be possible + * after commit 07f4f97d7b4b ("vga_switcheroo: Use device link for HDA + * controller"). This commit has also moved 'PCI_CLASS_MULTIMEDIA_HD_AUDIO' + * macro from to . + * If 'NV_PCI_CLASS_MULTIMEDIA_HD_AUDIO_PRESENT' is not defined, then + * this function will be stub function. + * + * Also, check if runtime PM is enabled in the kernel (with + * 'NV_PM_RUNTIME_AVAILABLE') and stub this function if it is disabled. This + * function uses kernel fields only present when the kconfig has runtime PM + * enabled. + */ +#if defined(NV_PCI_CLASS_MULTIMEDIA_HD_AUDIO_PRESENT) && defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct device *dev = nvl->dev; + struct pci_dev *audio_pci_dev, *pci_dev; + struct snd_card *card; + + if (!dev_is_pci(dev)) + return; + + pci_dev = to_pci_dev(dev); + + audio_pci_dev = os_pci_init_handle(NV_PCI_DOMAIN_NUMBER(pci_dev), + NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), + 1, NULL, NULL); + + if (audio_pci_dev == NULL) + return; + + /* + * Check if HDA controller is in pm suspended state. The HDA contoller + * can not be runtime resumed if this API is called during system + * suspend/resume time and HDA controller is in pm suspended state. + */ + if (audio_pci_dev->dev.power.is_suspended) + return; + + card = pci_get_drvdata(audio_pci_dev); + if (card == NULL) + return; + + /* + * Commit be57bfffb7b5 ("ALSA: hda: move hda_codec.h to include/sound") + * in v4.20-rc1 moved "hda_codec.h" header file from the private sound + * folder to include/sound. + */ +#if defined(NV_SOUND_HDA_CODEC_H_PRESENT) + { + struct list_head *p; + struct hda_codec *codec = NULL; + unsigned int cmd, res; + + /* + * Traverse the list of devices which the sound card maintains and + * search for HDA codec controller. + */ + list_for_each_prev(p, &card->devices) + { + struct snd_device *pdev = list_entry(p, struct snd_device, list); + + if (pdev->type == SNDRV_DEV_CODEC) + { + codec = pdev->device_data; + + /* + * NVIDIA HDA codec controller uses linux kernel HDA codec + * driver. Commit 05852448690d ("ALSA: hda - Support indirect + * execution of verbs") added support for overriding exec_verb. + * This codec->core.exec_verb will be codec_exec_verb() for + * NVIDIA HDA codec driver. + */ + if (codec->core.exec_verb == NULL) + { + return; + } + + break; + } + } + + if (codec == NULL) + { + return; + } + + /* If HDA codec controller is already runtime active, then return */ + if (snd_hdac_is_power_on(&codec->core)) + { + return; + } + + /* + * Encode codec verb for getting vendor ID from root node. + * Refer Intel High Definition Audio Specification for more details. + */ + cmd = (codec->addr << 28) | (AC_NODE_ROOT << 20) | + (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID; + + /* + * It will internally increment the runtime PM refcount, + * wake-up the audio codec controller and send the HW + * command for getting vendor ID. Once the vendor ID will be + * returned back, then it will decrement the runtime PM refcount + * and runtime suspend audio codec controller again (If refcount is + * zero) once auto suspend counter expires. + */ + codec->core.exec_verb(&codec->core, cmd, 0, &res); + } +#else + { + int codec_addr; + + /* + * The filp_open() call below depends on the current task's fs_struct + * (current->fs), which may already be NULL if this is called during + * process teardown. + */ + if (current->fs == NULL) + return; + + /* If device is runtime active, then return */ + if (audio_pci_dev->dev.power.runtime_status == RPM_ACTIVE) + return; + + for (codec_addr = 0; codec_addr < NV_HDA_MAX_CODECS; codec_addr++) + { + char filename[48]; + NvU8 buf; + int ret; + + ret = snprintf(filename, sizeof(filename), + "/proc/asound/card%d/codec#%d", + card->number, codec_addr); + + if (ret > 0 && ret < sizeof(filename) && + (os_open_and_read_file(filename, &buf, 1) == NV_OK)) + { + break; + } + } + } +#endif +#endif +} + +static int nv_match_dev_state(const void *data, struct file *filp, unsigned fd) +{ + nv_linux_state_t *nvl = NULL; + + if (filp == NULL || + filp->f_op != &nvidia_fops || + filp->private_data == NULL) + return 0; + + nvl = NV_GET_NVL_FROM_FILEP(filp); + if (nvl == NULL) + return 0; + + return (data == nvl); +} + +NvBool NV_API_CALL nv_match_gpu_os_info(nv_state_t *nv, void *os_info) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + return nv_match_dev_state(nvl, os_info, -1); +} + +NvBool NV_API_CALL nv_is_gpu_accessible(nv_state_t *nv) +{ + struct files_struct *files = current->files; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + return !!iterate_fd(files, 0, nv_match_dev_state, nvl); +} + +NvBool NV_API_CALL nv_platform_supports_s0ix(void) +{ +#if defined(CONFIG_ACPI) + return (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) != 0; +#else + return NV_FALSE; +#endif +} + +NvBool NV_API_CALL nv_s2idle_pm_configured(void) +{ + NvU8 buf[8]; + +#if defined(NV_SEQ_READ_ITER_PRESENT) + struct file *file; + ssize_t num_read; + struct kiocb kiocb; + struct iov_iter iter; + struct kvec iov = { + .iov_base = &buf, + .iov_len = sizeof(buf), + }; + + if (os_open_readonly_file("/sys/power/mem_sleep", (void **)&file) != NV_OK) + { + return NV_FALSE; + } + + /* + * init_sync_kiocb() internally uses GPL licensed __get_task_ioprio() from + * v5.20-rc1. + */ +#if defined(NV_GET_TASK_IOPRIO_PRESENT) + memset(&kiocb, 0, sizeof(kiocb)); + kiocb.ki_filp = file; + kiocb.ki_flags = iocb_flags(file); + kiocb.ki_ioprio = IOPRIO_DEFAULT; +#else + init_sync_kiocb(&kiocb, file); +#endif + + kiocb.ki_pos = 0; + iov_iter_kvec(&iter, READ, &iov, 1, sizeof(buf)); + + num_read = seq_read_iter(&kiocb, &iter); + + os_close_file((void *)file); + + if (num_read != sizeof(buf)) + { + return NV_FALSE; + } +#else + if (os_open_and_read_file("/sys/power/mem_sleep", buf, + sizeof(buf)) != NV_OK) + { + return NV_FALSE; + } +#endif + + return (memcmp(buf, "[s2idle]", 8) == 0); +} + +NvBool NV_API_CALL nv_pci_tegra_register_power_domain +( + nv_state_t *nv, + NvBool attach +) +{ +#if defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct pci_dev *pci_dev = nvl->pci_dev; + struct device_node *node = pci_dev->dev.of_node; + + if (attach) + { + if (!node) + { + nv_printf(NV_DBG_WARNINGS, "NVRM: No dt node associated with this device\n"); + return NV_FALSE; + } + if (!of_find_property(node, "power-domains", NULL)) + { + nv_printf(NV_DBG_WARNINGS, "NVRM: No power-domains is defined in the dt node\n"); + return NV_FALSE; + } + + nv_printf(NV_DBG_INFO, "NVRM: Attaching device to GPU power domain \n"); + return (dev_pm_domain_attach(&pci_dev->dev, true) == 0); + } + else + { + nv_printf(NV_DBG_INFO, "NVRM: Detaching device to GPU power domain \n"); + dev_pm_domain_detach(&pci_dev->dev, true); + } +#endif + return NV_TRUE; +} + +NvBool NV_API_CALL nv_pci_tegra_pm_init +( + nv_state_t *nv +) +{ +#if defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct pci_dev *pci_dev = nvl->pci_dev; + struct pci_bus *bus = pci_dev->bus; + struct pci_host_bridge *bridge; + struct device *ctrl; + + if (pci_dev->dev.pm_domain != NULL || + nv_pci_tegra_register_power_domain(nv, NV_TRUE) == NV_FALSE) + { + return NV_FALSE; + } + + // Enable runtime PM for PCIe controller of GPU to avoid + // PCIe enumeration failure with tegra iGPU + while (bus->parent) + bus = bus->parent; + + bridge = to_pci_host_bridge(bus->bridge); + ctrl = bridge->dev.parent; + + nv_printf(NV_DBG_INFO, "NVRM: Enable runtime PM for PCIe Controller\n"); + pm_runtime_enable(ctrl); + + // Use autosuspend for GPU with idleness threshold 500 ms + pm_runtime_set_autosuspend_delay(&pci_dev->dev, 500); + pm_runtime_use_autosuspend(&pci_dev->dev); + return NV_TRUE; +#endif + return NV_FALSE; +} + +void NV_API_CALL nv_pci_tegra_pm_deinit +( + nv_state_t *nv +) +{ +#if defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct pci_dev *pci_dev = nvl->pci_dev; + struct pci_bus *bus = pci_dev->bus; + struct pci_host_bridge *bridge; + struct device *ctrl; + + if (pci_dev->dev.pm_domain == NULL) + { + return; + } + + // Stop autosuspend for GPU + pm_runtime_dont_use_autosuspend(&pci_dev->dev); + + /* + * Enable runtime PM for PCIe controller of GPU. Because PCIe controller + * is also registered to GPU power domain on L4T, runtime PM of PCIe + * controller needs to be enabled so that it won't hold GPU power domain + * on when GPU is idle and being runtime suspended. + */ + while (bus->parent) + bus = bus->parent; + + bridge = to_pci_host_bridge(bus->bridge); + ctrl = bridge->dev.parent; + + nv_printf(NV_DBG_INFO, "NVRM: Disable runtime PM for PCIe Controller\n"); + pm_runtime_disable(ctrl); + + nv_pci_tegra_register_power_domain(nv, NV_FALSE); +#endif +} + +/* + * Function query system chassis info, to figure out if the platform is + * Laptop or Notebook. + * This function should be used when querying GPU form factor information is + * not possible via core RM or if querying both system and GPU form factor + * information is necessary. + */ +NvBool NV_API_CALL nv_is_chassis_notebook(void) +{ + const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); + + // + // Return true only for Laptop & Notebook + // As per SMBIOS spec Laptop = 9 and Notebook = 10 + // + return (chassis_type && (!strcmp(chassis_type, "9") || !strcmp(chassis_type, "10"))); +} + +void NV_API_CALL nv_allow_runtime_suspend +( + nv_state_t *nv +) +{ +#if defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct device *dev = nvl->dev; + + spin_lock_irq(&dev->power.lock); + + if (dev->power.runtime_auto == false) + { + dev->power.runtime_auto = true; + atomic_add_unless(&dev->power.usage_count, -1, 0); + } + + spin_unlock_irq(&dev->power.lock); +#endif +} + +void NV_API_CALL nv_disallow_runtime_suspend +( + nv_state_t *nv +) +{ +#if defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct device *dev = nvl->dev; + + spin_lock_irq(&dev->power.lock); + + if (dev->power.runtime_auto == true) + { + dev->power.runtime_auto = false; + atomic_inc(&dev->power.usage_count); + } + + spin_unlock_irq(&dev->power.lock); +#endif +} + +void NV_API_CALL nv_flush_coherent_cpu_cache_range(nv_state_t *nv, NvU64 cpu_virtual, NvU64 size) +{ +#if NVCPU_IS_AARCH64 + NvU64 va, cbsize; + NvU64 end_cpu_virtual = cpu_virtual + size; + + nv_printf(NV_DBG_INFO, + "Flushing CPU virtual range [0x%llx, 0x%llx)\n", + cpu_virtual, end_cpu_virtual); + + cbsize = cache_line_size(); + + // Force eviction of any cache lines from the NUMA-onlined region. + for (va = cpu_virtual; va < end_cpu_virtual; va += cbsize) + { + asm volatile("dc civac, %0" : : "r" (va): "memory"); + // Reschedule if necessary to avoid lockup warnings + cond_resched(); + } + asm volatile("dsb sy" : : : "memory"); +#endif +} + +static struct resource *nv_next_resource(struct resource *p) +{ + if (p->child != NULL) + return p->child; + + while ((p->sibling == NULL) && (p->parent != NULL)) + p = p->parent; + + return p->sibling; +} + +/* + * Function to get the correct PCI Bus memory window which can be mapped + * in the real mode emulator (emu). + * The function gets called during the initialization of the emu before + * remapping it to OS. + */ +void NV_API_CALL nv_get_updated_emu_seg( + NvU32 *start, + NvU32 *end +) +{ + struct resource *p; + + if (*start >= *end) + return; + + for (p = iomem_resource.child; (p != NULL); p = nv_next_resource(p)) + { + /* If we passed the resource we are looking for, stop */ + if (p->start > *end) + { + p = NULL; + break; + } + + /* Skip until we find a range that matches what we look for */ + if (p->end < *start) + continue; + + if ((p->end > *end) && (p->child)) + continue; + + if ((p->flags & IORESOURCE_MEM) != IORESOURCE_MEM) + continue; + + /* Found a match, break */ + break; + } + + if (p != NULL) + { + *start = max((resource_size_t)*start, p->start); + *end = min((resource_size_t)*end, p->end); + } +} + +NV_STATUS NV_API_CALL nv_get_egm_info( + nv_state_t *nv, + NvU64 *phys_addr, + NvU64 *size, + NvS32 *egm_node_id +) +{ +#if defined(CONFIG_ACPI_NUMA) && NV_IS_EXPORT_SYMBOL_PRESENT_pxm_to_node + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvU64 pa, sz, pxm; + + if (device_property_read_u64(nvl->dev, "nvidia,egm-pxm", &pxm) != 0) + { + goto failed; + } + + if (device_property_read_u64(nvl->dev, "nvidia,egm-base-pa", &pa) != 0) + { + goto failed; + } + + if (device_property_read_u64(nvl->dev, "nvidia,egm-size", &sz) != 0) + { + goto failed; + } + + NV_DEV_PRINTF(NV_DBG_INFO, nv, "DSD properties: \n"); + NV_DEV_PRINTF(NV_DBG_INFO, nv, "\tEGM base PA: 0x%llx \n", pa); + NV_DEV_PRINTF(NV_DBG_INFO, nv, "\tEGM size: 0x%llx \n", sz); + NV_DEV_PRINTF(NV_DBG_INFO, nv, "\tEGM _PXM: 0x%llx \n", pxm); + + if (egm_node_id != NULL) + { + *egm_node_id = pxm_to_node(pxm); + nv_printf(NV_DBG_INFO, "EGM node id: %d\n", *egm_node_id); + } + + if (phys_addr != NULL) + { + *phys_addr = pa; + nv_printf(NV_DBG_INFO, "EGM base addr: 0x%llx\n", *phys_addr); + } + + if (size != NULL) + { + *size = sz; + nv_printf(NV_DBG_INFO, "EGM size: 0x%llx\n", *size); + } + + return NV_OK; + +failed: +#endif // defined(CONFIG_ACPI_NUMA) && NV_IS_EXPORT_SYMBOL_PRESENT_pxm_to_node + + NV_DEV_PRINTF(NV_DBG_INFO, nv, "Cannot get EGM info\n"); + return NV_ERR_NOT_SUPPORTED; +} + +void NV_API_CALL nv_get_screen_info( + nv_state_t *nv, + NvU64 *pPhysicalAddress, + NvU32 *pFbWidth, + NvU32 *pFbHeight, + NvU32 *pFbDepth, + NvU32 *pFbPitch, + NvU64 *pFbSize +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct pci_dev *pci_dev = nvl->pci_dev; + int i; + + *pPhysicalAddress = 0; + *pFbWidth = *pFbHeight = *pFbDepth = *pFbPitch = *pFbSize = 0; + +#if defined(CONFIG_FB) && defined(NV_NUM_REGISTERED_FB_PRESENT) + if (num_registered_fb > 0) + { + for (i = 0; i < num_registered_fb; i++) + { + if (!registered_fb[i]) + continue; + + /* Make sure base address is mapped to GPU BAR */ + if (NV_IS_CONSOLE_MAPPED(nv, registered_fb[i]->fix.smem_start)) + { + *pPhysicalAddress = registered_fb[i]->fix.smem_start; + *pFbWidth = registered_fb[i]->var.xres; + *pFbHeight = registered_fb[i]->var.yres; + *pFbDepth = registered_fb[i]->var.bits_per_pixel; + *pFbPitch = registered_fb[i]->fix.line_length; + *pFbSize = registered_fb[i]->fix.smem_len; + return; + } + } + } +#endif + + /* + * If the screen info is not found in the registered FBs then fallback + * to the screen_info structure. + * + * The SYSFB_SIMPLEFB option, if enabled, marks VGA/VBE/EFI framebuffers as + * generic framebuffers so the new generic system-framebuffer drivers can + * be used instead. DRM_SIMPLEDRM drives the generic system-framebuffers + * device created by SYSFB_SIMPLEFB. + * + * SYSFB_SIMPLEFB registers a dummy framebuffer which does not contain the + * information required by nv_get_screen_info(), therefore you need to + * fall back onto the screen_info structure. + * + * After commit b8466fe82b79 ("efi: move screen_info into efi init code") + * in v6.7, 'screen_info' is exported as GPL licensed symbol for ARM64. + */ + +#if NV_CHECK_EXPORT_SYMBOL(screen_info) + /* + * If there is not a framebuffer console, return 0 size. + * + * orig_video_isVGA is set to 1 during early Linux kernel + * initialization, and then will be set to a value, such as + * VIDEO_TYPE_VLFB or VIDEO_TYPE_EFI if an fbdev console is used. + */ + if (screen_info.orig_video_isVGA > 1) + { + NvU64 physAddr = screen_info.lfb_base; +#if defined(VIDEO_CAPABILITY_64BIT_BASE) + if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) + { + physAddr |= (NvU64)screen_info.ext_lfb_base << 32; + } +#endif + + /* Make sure base address is mapped to GPU BAR */ + if (NV_IS_CONSOLE_MAPPED(nv, physAddr)) + { + *pPhysicalAddress = physAddr; + *pFbWidth = screen_info.lfb_width; + *pFbHeight = screen_info.lfb_height; + *pFbDepth = screen_info.lfb_depth; + *pFbPitch = screen_info.lfb_linelength; + *pFbSize = (NvU64)(*pFbHeight) * (NvU64)(*pFbPitch); + return; + } + } +#endif + + /* + * If screen info can't be fetched with previous methods, then try + * to get the base address and size from the memory resource tree. + */ + if (pci_dev != NULL) + { + BUILD_BUG_ON(NV_GPU_BAR_INDEX_IMEM != NV_GPU_BAR_INDEX_FB + 1); + for (i = NV_GPU_BAR_INDEX_FB; i <= NV_GPU_BAR_INDEX_IMEM; i++) + { + int bar_index = nv_bar_index_to_os_bar_index(pci_dev, i); + struct resource *gpu_bar_res = &pci_dev->resource[bar_index]; + struct resource *res = gpu_bar_res->child; + + /* + * Console resource will become child resource of pci-dev resource. + * Check if child resource start address matches with expected + * console start address. + */ + if ((res != NULL) && + NV_IS_CONSOLE_MAPPED(nv, res->start)) + { + NvU32 res_name_len = strlen(res->name); + + /* + * The resource name ends with 'fb' (efifb, vesafb, etc.). + * For simple-framebuffer, the resource name is 'BOOTFB'. + * Confirm if the resources name either ends with 'fb' or 'FB'. + */ + if ((res_name_len > 2) && + !strcasecmp((res->name + res_name_len - 2), "fb")) + { + *pPhysicalAddress = res->start; + *pFbSize = resource_size(res); + return; + } + } + } + } +} + +void NV_API_CALL nv_set_gpu_pg_mask +( + nv_state_t *nv +) +{ +/* + * This function is used to set the GPU PG mask for the Tegra PCI iGPU. + * After sending the PG mask to BPMP, GPU needs a FLR(function level reset) or + * a GPU reset to make PG mask effective. + * + * As Tegra iGPU rail-ungate itself is a GPU reset or GPU cold-boot, GPU PG mask could + * rely on it and it would be triggered when runtime PM is enabled. + * + * Make sure the GPU PG feature is allowable only when runtime PM is supported here. + */ +#if defined(NV_BPMP_MRQ_HAS_STRAP_SET) && defined(NV_PM_RUNTIME_AVAILABLE) + struct mrq_strap_request request; + NvS32 ret, api_ret; + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + /* + * Only certain Tegra which supports Rail-Gating could use this feature + * because making PG mask effective requires a GPU FLR or GPU cold-boot. + */ + if (!nv->is_tegra_pci_igpu_rg_enabled || (nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + nv_printf(NV_DBG_INFO, "NVRM: gpu_pg_mask is not supported.\n"); + return; + } + + // overlay the gpu_pg_mask from module parameter + if (NVreg_TegraGpuPgMask != NV_TEGRA_PCI_IGPU_PG_MASK_DEFAULT) { + nv_printf(NV_DBG_INFO, "NVRM: overlay gpu_pg_mask with module parameter.\n"); + nv->tegra_pci_igpu_pg_mask = NVreg_TegraGpuPgMask; + } + + if (nv->tegra_pci_igpu_pg_mask == NV_TEGRA_PCI_IGPU_PG_MASK_DEFAULT) { + nv_printf(NV_DBG_INFO, "NVRM: Using default gpu_pg_mask. "\ + "There's no need to send BPMP MRQ.\n"); + return; + } + + memset(&request, 0, sizeof(request)); + request.cmd = STRAP_SET; + request.id = TEGRA264_STRAP_NV_FUSE_CTRL_OPT_GPU; + request.value = nv->tegra_pci_igpu_pg_mask; + + status = nv_bpmp_send_mrq(nv, + MRQ_STRAP, + &request, + sizeof(request), + NULL, + 0, + &ret, + &api_ret); + + if (status != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to call bpmp_send_mrq\n"); + return; + } + if (api_ret) + { + nv_printf(NV_DBG_ERRORS, "NVRM: BPMP call for gpu_pg_mask %d failed, rv = %d\n",\ + nv->tegra_pci_igpu_pg_mask, api_ret); + return; + } + + nv_printf(NV_DBG_INFO, "NVRM: set gpu_pg_mask %d success\n", nv->tegra_pci_igpu_pg_mask); +#else + nv_printf(NV_DBG_INFO, "NVRM: gpu_pg_mask configuration is not supported\n"); +#endif // defined(NV_BPMP_MRQ_HAS_STRAP_SET) && defined(NV_PM_RUNTIME_AVAILABLE) +} + +module_init(nvidia_init_module); +module_exit(nvidia_exit_module); diff --git a/kernel-open/nvidia/nv_gpu_ops.h b/kernel-open/nvidia/nv_gpu_ops.h new file mode 100644 index 0000000..60836d6 --- /dev/null +++ b/kernel-open/nvidia/nv_gpu_ops.h @@ -0,0 +1,348 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* + * nv_gpu_ops.h + * + * This file defines the interface between the common RM layer + * and the OS specific platform layers. (Currently supported + * are Linux and KMD) + * + */ + +#ifndef _NV_GPU_OPS_H_ +#define _NV_GPU_OPS_H_ +#include "nvgputypes.h" +#include "nv_uvm_types.h" +#include "nv_uvm_user_types.h" + +typedef struct gpuSession *gpuSessionHandle; +typedef struct gpuDevice *gpuDeviceHandle; +typedef struct gpuAddressSpace *gpuAddressSpaceHandle; +typedef struct gpuTsg *gpuTsgHandle; +typedef struct gpuChannel *gpuChannelHandle; +typedef struct gpuObject *gpuObjectHandle; + +typedef struct gpuRetainedChannel_struct gpuRetainedChannel; + + +NV_STATUS calculatePCIELinkRateMBps(NvU32 lanes, + NvU32 pciLinkMaxSpeed, + NvU32 *pcieLinkRate); + +NV_STATUS nvGpuOpsCreateSession(struct gpuSession **session); + +NV_STATUS nvGpuOpsDestroySession(struct gpuSession *session); + +NV_STATUS nvGpuOpsDeviceCreate(struct gpuSession *session, + const gpuInfo *pGpuInfo, + const NvProcessorUuid *gpuGuid, + struct gpuDevice **device, + NvBool bCreateSmcPartition); + +NV_STATUS nvGpuOpsDeviceDestroy(struct gpuDevice *device); + +NV_STATUS nvGpuOpsAddressSpaceCreate(struct gpuDevice *device, + NvU64 vaBase, + NvU64 vaSize, + NvBool enableAts, + gpuAddressSpaceHandle *vaSpace, + UvmGpuAddressSpaceInfo *vaSpaceInfo); + +NV_STATUS nvGpuOpsGetP2PCaps(gpuDeviceHandle device1, + gpuDeviceHandle device2, + getP2PCapsParams *p2pCaps); + +void nvGpuOpsAddressSpaceDestroy(gpuAddressSpaceHandle vaSpace); + +NV_STATUS nvGpuOpsMemoryAllocFb (gpuAddressSpaceHandle vaSpace, + NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo); + +NV_STATUS nvGpuOpsMemoryAllocSys (gpuAddressSpaceHandle vaSpace, + NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo); + +NV_STATUS nvGpuOpsPmaAllocPages(void *pPma, + NvLength pageCount, + NvU64 pageSize, + gpuPmaAllocationOptions *pPmaAllocOptions, + NvU64 *pPages); + +void nvGpuOpsPmaFreePages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU64 pageSize, + NvU32 flags); + +NV_STATUS nvGpuOpsPmaPinPages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU64 pageSize, + NvU32 flags); + +NV_STATUS nvGpuOpsTsgAllocate(gpuAddressSpaceHandle vaSpace, + const gpuTsgAllocParams *params, + gpuTsgHandle *tsgHandle); + +NV_STATUS nvGpuOpsChannelAllocate(const gpuTsgHandle tsgHandle, + const gpuChannelAllocParams *params, + gpuChannelHandle *channelHandle, + gpuChannelInfo *channelInfo); + +NV_STATUS nvGpuOpsMemoryReopen(struct gpuAddressSpace *vaSpace, + NvHandle hSrcClient, NvHandle hSrcAllocation, NvLength length, NvU64 *gpuOffset); + +void nvGpuOpsTsgDestroy(struct gpuTsg *tsg); + +void nvGpuOpsChannelDestroy(struct gpuChannel *channel); + +void nvGpuOpsMemoryFree(gpuAddressSpaceHandle vaSpace, + NvU64 pointer); + +NV_STATUS nvGpuOpsMemoryCpuMap(gpuAddressSpaceHandle vaSpace, + NvU64 memory, NvLength length, + void **cpuPtr, NvU64 pageSize); + +void nvGpuOpsMemoryCpuUnMap(gpuAddressSpaceHandle vaSpace, + void* cpuPtr); + +NV_STATUS nvGpuOpsQueryCaps(struct gpuDevice *device, + gpuCaps *caps); + +NV_STATUS nvGpuOpsQueryCesCaps(struct gpuDevice *device, + gpuCesCaps *caps); + +NV_STATUS nvGpuOpsDupAllocation(struct gpuAddressSpace *srcVaSpace, + NvU64 srcAddress, + struct gpuAddressSpace *dstVaSpace, + NvU64 dstVaAlignment, + NvU64 *dstAddress); + +NV_STATUS nvGpuOpsDupMemory(struct gpuDevice *device, + NvHandle hClient, + NvHandle hPhysMemory, + NvHandle *hDupMemory, + gpuMemoryInfo *pGpuMemoryInfo); + +NV_STATUS nvGpuOpsGetGuid(NvHandle hClient, NvHandle hDevice, + NvHandle hSubDevice, NvU8 *gpuGuid, + unsigned guidLength); + +NV_STATUS nvGpuOpsGetClientInfoFromPid(unsigned pid, + const NvU8 *gpuUuid, + NvHandle *hClient, + NvHandle *hDevice, + NvHandle *hSubDevice); + +NV_STATUS nvGpuOpsFreeDupedHandle(struct gpuDevice *device, + NvHandle hPhysHandle); + +NV_STATUS nvGpuOpsGetAttachedGpus(NvU8 *guidList, unsigned *numGpus); + +NV_STATUS nvGpuOpsGetGpuInfo(const NvProcessorUuid *gpuUuid, + const gpuClientInfo *pGpuClientInfo, + gpuInfo *pGpuInfo); + +NV_STATUS nvGpuOpsGetGpuIds(const NvU8 *pUuid, unsigned uuidLength, NvU32 *pDeviceId, + NvU32 *pSubdeviceId); + +NV_STATUS nvGpuOpsOwnPageFaultIntr(struct gpuDevice *device, NvBool bOwnInterrupts); + +NV_STATUS nvGpuOpsServiceDeviceInterruptsRM(struct gpuDevice *device); + +NV_STATUS nvGpuOpsCheckEccErrorSlowpath(struct gpuChannel * channel, NvBool *bEccDbeSet); + +NV_STATUS nvGpuOpsSetPageDirectory(struct gpuAddressSpace * vaSpace, + NvU64 physAddress, unsigned numEntries, + NvBool bVidMemAperture, NvU32 pasid, + NvU64 *dmaAdress); + +NV_STATUS nvGpuOpsUnsetPageDirectory(struct gpuAddressSpace * vaSpace); + +NV_STATUS nvGpuOpsGetGmmuFmt(struct gpuAddressSpace * vaSpace, void ** pFmt); + +NV_STATUS nvGpuOpsInvalidateTlb(struct gpuAddressSpace * vaSpace); + +NV_STATUS nvGpuOpsGetFbInfo(struct gpuDevice *device, gpuFbInfo * fbInfo); + +NV_STATUS nvGpuOpsGetEccInfo(struct gpuDevice *device, gpuEccInfo * eccInfo); + +NV_STATUS nvGpuOpsGetNvlinkInfo(struct gpuDevice *device, gpuNvlinkInfo * nvlinkInfo); + +NV_STATUS nvGpuOpsInitFaultInfo(struct gpuDevice *device, gpuFaultInfo *pFaultInfo); + +NV_STATUS nvGpuOpsDestroyFaultInfo(struct gpuDevice *device, + gpuFaultInfo *pFaultInfo); + +NV_STATUS nvGpuOpsHasPendingNonReplayableFaults(gpuFaultInfo *pFaultInfo, NvBool *hasPendingFaults); + +NV_STATUS nvGpuOpsGetNonReplayableFaults(gpuFaultInfo *pFaultInfo, void *faultBuffer, NvU32 *numFaults); + +NV_STATUS nvGpuOpsDupAddressSpace(struct gpuDevice *device, + NvHandle hUserClient, + NvHandle hUserVASpace, + struct gpuAddressSpace **vaSpace, + UvmGpuAddressSpaceInfo *vaSpaceInfo); + +NV_STATUS nvGpuOpsGetPmaObject(struct gpuDevice *device, + void **pPma, + const UvmPmaStatistics **pPmaPubStats); + +NV_STATUS nvGpuOpsInitAccessCntrInfo(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo, NvU32 accessCntrIndex); + +NV_STATUS nvGpuOpsDestroyAccessCntrInfo(struct gpuDevice *device, + gpuAccessCntrInfo *pAccessCntrInfo); + +NV_STATUS nvGpuOpsOwnAccessCntrIntr(struct gpuSession *session, + gpuAccessCntrInfo *pAccessCntrInfo, + NvBool bOwnInterrupts); + +NV_STATUS nvGpuOpsEnableAccessCntr(struct gpuDevice *device, + gpuAccessCntrInfo *pAccessCntrInfo, + const gpuAccessCntrConfig *pAccessCntrConfig); + +NV_STATUS nvGpuOpsDisableAccessCntr(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo); + +NV_STATUS nvGpuOpsP2pObjectCreate(struct gpuDevice *device1, + struct gpuDevice *device2, + NvHandle *hP2pObject); + +NV_STATUS nvGpuOpsP2pObjectDestroy(struct gpuSession *session, + NvHandle hP2pObject); + +NV_STATUS nvGpuOpsGetExternalAllocPtes(struct gpuAddressSpace *vaSpace, + NvHandle hDupedMemory, + NvU64 offset, + NvU64 size, + gpuExternalMappingInfo *pGpuExternalMappingInfo); + +NV_STATUS nvGpuOpsGetExternalAllocPhysAddrs(struct gpuAddressSpace *vaSpace, + NvHandle hDupedMemory, + NvU64 offset, + NvU64 size, + gpuExternalPhysAddrInfo *pGpuExternalPhysAddrInfo); + +NV_STATUS nvGpuOpsRetainChannel(struct gpuAddressSpace *vaSpace, + NvHandle hClient, + NvHandle hChannel, + gpuRetainedChannel **retainedChannel, + gpuChannelInstanceInfo *channelInstanceInfo); + +void nvGpuOpsReleaseChannel(gpuRetainedChannel *retainedChannel); + +NV_STATUS nvGpuOpsBindChannelResources(gpuRetainedChannel *retainedChannel, + gpuChannelResourceBindParams *channelResourceBindParams); + +void nvGpuOpsStopChannel(gpuRetainedChannel *retainedChannel, NvBool bImmediate); + +NV_STATUS nvGpuOpsGetChannelResourcePtes(struct gpuAddressSpace *vaSpace, + NvP64 resourceDescriptor, + NvU64 offset, + NvU64 size, + gpuExternalMappingInfo *pGpuExternalMappingInfo); + +NV_STATUS nvGpuOpsReportNonReplayableFault(struct gpuDevice *device, + const void *pFaultPacket); + +// Private interface used for windows only + +#if defined(NV_WINDOWS) +NV_STATUS nvGpuOpsGetRmHandleForSession(gpuSessionHandle hSession, NvHandle *hRmClient); + +NV_STATUS nvGpuOpsGetRmHandleForChannel(gpuChannelHandle hChannel, NvHandle *hRmChannel); +#endif // WINDOWS + +// Interface used for SR-IOV heavy + +NV_STATUS nvGpuOpsPagingChannelAllocate(struct gpuDevice *device, + const gpuPagingChannelAllocParams *params, + gpuPagingChannelHandle *channelHandle, + gpuPagingChannelInfo *channelinfo); + +void nvGpuOpsPagingChannelDestroy(UvmGpuPagingChannel *channel); + +NV_STATUS nvGpuOpsPagingChannelsMap(struct gpuAddressSpace *srcVaSpace, + NvU64 srcAddress, + struct gpuDevice *device, + NvU64 *dstAddress); + +void nvGpuOpsPagingChannelsUnmap(struct gpuAddressSpace *srcVaSpace, + NvU64 srcAddress, + struct gpuDevice *device); + +NV_STATUS nvGpuOpsPagingChannelPushStream(UvmGpuPagingChannel *channel, + char *methodStream, + NvU32 methodStreamSize); + +NV_STATUS nvGpuOpsFlushReplayableFaultBuffer(gpuFaultInfo *pFaultInfo, + NvBool bCopyAndFlush); + +NV_STATUS nvGpuOpsTogglePrefetchFaults(gpuFaultInfo *pFaultInfo, + NvBool bEnable); + +void nvGpuOpsReportFatalError(NV_STATUS error); + +// Interface used for CCSL +NV_STATUS nvGpuOpsCcslContextInit(struct ccslContext_t **ctx, + gpuChannelHandle channel); +NV_STATUS nvGpuOpsCcslContextClear(struct ccslContext_t *ctx); +NV_STATUS nvGpuOpsCcslRotateKey(UvmCslContext *contextList[], + NvU32 contextListCount); +NV_STATUS nvGpuOpsCcslRotateIv(struct ccslContext_t *ctx, + NvU8 direction); +NV_STATUS nvGpuOpsCcslEncrypt(struct ccslContext_t *ctx, + NvU32 bufferSize, + NvU8 const *inputBuffer, + NvU8 *outputBuffer, + NvU8 *authTagBuffer); +NV_STATUS nvGpuOpsCcslEncryptWithIv(struct ccslContext_t *ctx, + NvU32 bufferSize, + NvU8 const *inputBuffer, + NvU8 *encryptIv, + NvU8 *outputBuffer, + NvU8 *authTagBuffer); +NV_STATUS nvGpuOpsCcslDecrypt(struct ccslContext_t *ctx, + NvU32 bufferSize, + NvU8 const *inputBuffer, + NvU8 const *decryptIv, + NvU32 keyRotationId, + NvU8 *outputBuffer, + NvU8 const *addAuthData, + NvU32 addAuthDataSize, + NvU8 const *authTagBuffer); +NV_STATUS nvGpuOpsCcslSign(struct ccslContext_t *ctx, + NvU32 bufferSize, + NvU8 const *inputBuffer, + NvU8 *authTagBuffer); +NV_STATUS nvGpuOpsQueryMessagePool(struct ccslContext_t *ctx, + NvU8 direction, + NvU64 *messageNum); +NV_STATUS nvGpuOpsIncrementIv(struct ccslContext_t *ctx, + NvU8 direction, + NvU64 increment, + NvU8 *iv); +NV_STATUS nvGpuOpsLogEncryption(struct ccslContext_t *ctx, + NvU8 direction, + NvU32 bufferSize); + +#endif /* _NV_GPU_OPS_H_*/ diff --git a/kernel-open/nvidia/nv_uvm_interface.c b/kernel-open/nvidia/nv_uvm_interface.c new file mode 100644 index 0000000..28788ed --- /dev/null +++ b/kernel-open/nvidia/nv_uvm_interface.c @@ -0,0 +1,1723 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file sets up the communication between the UVM driver and RM. RM will + * call the UVM driver providing to it the set of OPS it supports. UVM will + * then return by filling out the structure with the callbacks it supports. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +#if defined(NV_UVM_ENABLE) + +#include "nv_uvm_interface.h" +#include "nv_gpu_ops.h" +#include "rm-gpu-ops.h" + +// This is really a struct UvmEventsLinux *. It needs to be an atomic because it +// can be read outside of the g_pNvUvmEventsLock. Use getUvmEvents and +// setUvmEvents to access it. +static atomic_long_t g_pNvUvmEvents; +static struct semaphore g_pNvUvmEventsLock; + +static struct UvmEventsLinux *getUvmEvents(void) +{ + return (struct UvmEventsLinux *)atomic_long_read(&g_pNvUvmEvents); +} + +static void setUvmEvents(struct UvmEventsLinux *newEvents) +{ + atomic_long_set(&g_pNvUvmEvents, (long)newEvents); +} + +static nvidia_stack_t *g_sp; +static struct semaphore g_spLock; + +// Use these to test g_sp usage. When DEBUG_GLOBAL_STACK, one out of every +// DEBUG_GLOBAL_STACK_THRESHOLD calls to nvUvmGetSafeStack will use g_sp. +#define DEBUG_GLOBAL_STACK 0 +#define DEBUG_GLOBAL_STACK_THRESHOLD 2 + +static atomic_t g_debugGlobalStackCount = ATOMIC_INIT(0); + +// Called at module load, not by an external client +int nv_uvm_init(void) +{ + int rc = nv_kmem_cache_alloc_stack(&g_sp); + if (rc != 0) + return rc; + + NV_INIT_MUTEX(&g_spLock); + NV_INIT_MUTEX(&g_pNvUvmEventsLock); + return 0; +} + +void nv_uvm_exit(void) +{ + // If this fires, the dependent driver never unregistered its callbacks with + // us before going away, leaving us potentially making callbacks to garbage + // memory. + WARN_ON(getUvmEvents() != NULL); + + nv_kmem_cache_free_stack(g_sp); +} + + +// Testing code to force use of the global stack every now and then +static NvBool forceGlobalStack(void) +{ + // Make sure that we do not try to allocate memory in interrupt or atomic + // context + if (DEBUG_GLOBAL_STACK || !NV_MAY_SLEEP()) + { + if ((atomic_inc_return(&g_debugGlobalStackCount) % + DEBUG_GLOBAL_STACK_THRESHOLD) == 0) + return NV_TRUE; + } + return NV_FALSE; +} + +// Guaranteed to always return a valid stack. It first attempts to allocate one +// from the pool. If that fails, it falls back to the global pre-allocated +// stack. This fallback will serialize. +// +// This is required so paths that free resources do not themselves require +// allocation of resources. +static nvidia_stack_t *nvUvmGetSafeStack(void) +{ + nvidia_stack_t *sp; + if (forceGlobalStack() || nv_kmem_cache_alloc_stack(&sp) != 0) + { + sp = g_sp; + down(&g_spLock); + } + return sp; +} + +static void nvUvmFreeSafeStack(nvidia_stack_t *sp) +{ + if (sp == g_sp) + up(&g_spLock); + else + nv_kmem_cache_free_stack(sp); +} + +static NV_STATUS nvUvmDestroyFaultInfoAndStacks(nvidia_stack_t *sp, + uvmGpuDeviceHandle device, + UvmGpuFaultInfo *pFaultInfo) +{ + nv_kmem_cache_free_stack(pFaultInfo->replayable.cslCtx.nvidia_stack); + nv_kmem_cache_free_stack(pFaultInfo->nonReplayable.isr_bh_sp); + nv_kmem_cache_free_stack(pFaultInfo->nonReplayable.isr_sp); + + return rm_gpu_ops_destroy_fault_info(sp, + (gpuDeviceHandle)device, + pFaultInfo); +} + +NV_STATUS nvUvmInterfaceRegisterGpu(const NvProcessorUuid *gpuUuid, UvmGpuPlatformInfo *gpuInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + int rc; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + return NV_ERR_NO_MEMORY; + + rc = nvidia_dev_get_uuid(gpuUuid->uuid, sp); + if (rc == 0) + { + rc = nvidia_dev_get_pci_info(gpuUuid->uuid, + &gpuInfo->pci_dev, + &gpuInfo->dma_addressable_start, + &gpuInfo->dma_addressable_limit); + + // Block GPU from entering GC6 while used by UVM. + if (rc == 0) + rc = nvidia_dev_block_gc6(gpuUuid->uuid, sp); + + // Avoid leaking reference on GPU if we failed. + if (rc != 0) + nvidia_dev_put_uuid(gpuUuid->uuid, sp); + } + + switch (rc) + { + case 0: + status = NV_OK; + break; + case -ENOMEM: + status = NV_ERR_NO_MEMORY; + break; + case -ENODEV: + status = NV_ERR_GPU_UUID_NOT_FOUND; + break; + default: + status = NV_ERR_GENERIC; + break; + } + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceRegisterGpu); + +void nvUvmInterfaceUnregisterGpu(const NvProcessorUuid *gpuUuid) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + nvidia_dev_unblock_gc6(gpuUuid->uuid, sp); + nvidia_dev_put_uuid(gpuUuid->uuid, sp); + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceUnregisterGpu); + +NV_STATUS nvUvmInterfaceSessionCreate(uvmGpuSessionHandle *session, + UvmPlatformInfo *platformInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + memset(platformInfo, 0, sizeof(*platformInfo)); + platformInfo->atsSupported = nv_ats_supported; + platformInfo->confComputingEnabled = os_cc_enabled; + + status = rm_gpu_ops_create_session(sp, (gpuSessionHandle *)session); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceSessionCreate); + +NV_STATUS nvUvmInterfaceSessionDestroy(uvmGpuSessionHandle session) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + NV_STATUS status; + + status = rm_gpu_ops_destroy_session(sp, (gpuSessionHandle)session); + + nvUvmFreeSafeStack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceSessionDestroy); + +NV_STATUS nvUvmInterfaceDeviceCreate(uvmGpuSessionHandle session, + const UvmGpuInfo *pGpuInfo, + const NvProcessorUuid *gpuUuid, + uvmGpuDeviceHandle *device, + NvBool bCreateSmcPartition) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_device_create(sp, + (gpuSessionHandle)session, + (const gpuInfo *)pGpuInfo, + gpuUuid, + (gpuDeviceHandle *)device, + bCreateSmcPartition); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceDeviceCreate); + +void nvUvmInterfaceDeviceDestroy(uvmGpuDeviceHandle device) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_device_destroy(sp, (gpuDeviceHandle)device); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceDeviceDestroy); + +NV_STATUS nvUvmInterfaceDupAddressSpace(uvmGpuDeviceHandle device, + NvHandle hUserClient, + NvHandle hUserVASpace, + uvmGpuAddressSpaceHandle *vaSpace, + UvmGpuAddressSpaceInfo *vaSpaceInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_dup_address_space(sp, + (gpuDeviceHandle)device, + hUserClient, + hUserVASpace, + (gpuAddressSpaceHandle *)vaSpace, + vaSpaceInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceDupAddressSpace); + +NV_STATUS nvUvmInterfaceAddressSpaceCreate(uvmGpuDeviceHandle device, + unsigned long long vaBase, + unsigned long long vaSize, + NvBool enableAts, + uvmGpuAddressSpaceHandle *vaSpace, + UvmGpuAddressSpaceInfo *vaSpaceInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_address_space_create(sp, + (gpuDeviceHandle)device, + vaBase, + vaSize, + enableAts, + (gpuAddressSpaceHandle *)vaSpace, + vaSpaceInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceAddressSpaceCreate); + +void nvUvmInterfaceAddressSpaceDestroy(uvmGpuAddressSpaceHandle vaSpace) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_address_space_destroy( + sp, (gpuAddressSpaceHandle)vaSpace); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceAddressSpaceDestroy); + +NV_STATUS nvUvmInterfaceMemoryAllocFB(uvmGpuAddressSpaceHandle vaSpace, + NvLength length, UvmGpuPointer * gpuPointer, + UvmGpuAllocInfo * allocInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_memory_alloc_fb( + sp, (gpuAddressSpaceHandle)vaSpace, + length, (NvU64 *) gpuPointer, + allocInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceMemoryAllocFB); + +NV_STATUS nvUvmInterfaceMemoryAllocSys(uvmGpuAddressSpaceHandle vaSpace, + NvLength length, UvmGpuPointer * gpuPointer, + UvmGpuAllocInfo * allocInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_memory_alloc_sys( + sp, (gpuAddressSpaceHandle)vaSpace, + length, (NvU64 *) gpuPointer, + allocInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} + +EXPORT_SYMBOL(nvUvmInterfaceMemoryAllocSys); + +NV_STATUS nvUvmInterfaceGetP2PCaps(uvmGpuDeviceHandle device1, + uvmGpuDeviceHandle device2, + UvmGpuP2PCapsParams * p2pCapsParams) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_get_p2p_caps(sp, + (gpuDeviceHandle)device1, + (gpuDeviceHandle)device2, + p2pCapsParams); + nv_kmem_cache_free_stack(sp); + return status; +} + +EXPORT_SYMBOL(nvUvmInterfaceGetP2PCaps); + +NV_STATUS nvUvmInterfaceGetPmaObject(uvmGpuDeviceHandle device, + void **pPma, + const UvmPmaStatistics **pPmaPubStats) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_get_pma_object(sp, (gpuDeviceHandle)device, pPma, (const nvgpuPmaStatistics_t *)pPmaPubStats); + + nv_kmem_cache_free_stack(sp); + return status; +} + +EXPORT_SYMBOL(nvUvmInterfaceGetPmaObject); + +NV_STATUS nvUvmInterfacePmaRegisterEvictionCallbacks(void *pPma, + uvmPmaEvictPagesCallback evictPages, + uvmPmaEvictRangeCallback evictRange, + void *callbackData) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_pma_register_callbacks(sp, pPma, evictPages, evictRange, callbackData); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfacePmaRegisterEvictionCallbacks); + +void nvUvmInterfacePmaUnregisterEvictionCallbacks(void *pPma) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_pma_unregister_callbacks(sp, pPma); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfacePmaUnregisterEvictionCallbacks); + +NV_STATUS nvUvmInterfacePmaAllocPages(void *pPma, + NvLength pageCount, + NvU64 pageSize, + UvmPmaAllocationOptions *pPmaAllocOptions, + NvU64 *pPages) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_pma_alloc_pages( + sp, pPma, + pageCount, + pageSize, + (nvgpuPmaAllocationOptions_t)pPmaAllocOptions, + pPages); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfacePmaAllocPages); + +NV_STATUS nvUvmInterfacePmaPinPages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU64 pageSize, + NvU32 flags) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_pma_pin_pages(sp, pPma, pPages, pageCount, pageSize, flags); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfacePmaPinPages); + +void nvUvmInterfaceMemoryFree(uvmGpuAddressSpaceHandle vaSpace, + UvmGpuPointer gpuPointer) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_memory_free( + sp, (gpuAddressSpaceHandle)vaSpace, + (NvU64) gpuPointer); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceMemoryFree); + +void nvUvmInterfacePmaFreePages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU64 pageSize, + NvU32 flags) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_pma_free_pages(sp, pPma, pPages, pageCount, pageSize, flags); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfacePmaFreePages); + +NV_STATUS nvUvmInterfaceMemoryCpuMap(uvmGpuAddressSpaceHandle vaSpace, + UvmGpuPointer gpuPointer, NvLength length, void **cpuPtr, + NvU64 pageSize) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_memory_cpu_map( + sp, (gpuAddressSpaceHandle)vaSpace, + (NvU64) gpuPointer, length, cpuPtr, pageSize); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceMemoryCpuMap); + +void nvUvmInterfaceMemoryCpuUnMap(uvmGpuAddressSpaceHandle vaSpace, + void *cpuPtr) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + rm_gpu_ops_memory_cpu_ummap(sp, (gpuAddressSpaceHandle)vaSpace, cpuPtr); + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceMemoryCpuUnMap); + +NV_STATUS nvUvmInterfaceTsgAllocate(uvmGpuAddressSpaceHandle vaSpace, + const UvmGpuTsgAllocParams *allocParams, + uvmGpuTsgHandle *tsg) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_tsg_allocate(sp, + (gpuAddressSpaceHandle)vaSpace, + allocParams, + (gpuTsgHandle *)tsg); + + nv_kmem_cache_free_stack(sp); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceTsgAllocate); + +void nvUvmInterfaceTsgDestroy(uvmGpuTsgHandle tsg) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + rm_gpu_ops_tsg_destroy(sp, (gpuTsgHandle)tsg); + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceTsgDestroy); + + +NV_STATUS nvUvmInterfaceChannelAllocate(const uvmGpuTsgHandle tsg, + const UvmGpuChannelAllocParams *allocParams, + uvmGpuChannelHandle *channel, + UvmGpuChannelInfo *channelInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_channel_allocate(sp, + (gpuTsgHandle)tsg, + allocParams, + (gpuChannelHandle *)channel, + channelInfo); + + nv_kmem_cache_free_stack(sp); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceChannelAllocate); + +void nvUvmInterfaceChannelDestroy(uvmGpuChannelHandle channel) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + rm_gpu_ops_channel_destroy(sp, (gpuChannelHandle)channel); + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceChannelDestroy); + +NV_STATUS nvUvmInterfaceQueryCaps(uvmGpuDeviceHandle device, + UvmGpuCaps * caps) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_query_caps(sp, (gpuDeviceHandle)device, caps); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceQueryCaps); + +NV_STATUS nvUvmInterfaceQueryCopyEnginesCaps(uvmGpuDeviceHandle device, + UvmGpuCopyEnginesCaps *caps) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_query_ces_caps(sp, (gpuDeviceHandle)device, caps); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceQueryCopyEnginesCaps); + +NV_STATUS nvUvmInterfaceGetGpuInfo(const NvProcessorUuid *gpuUuid, + const UvmGpuClientInfo *pGpuClientInfo, + UvmGpuInfo *pGpuInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_get_gpu_info(sp, gpuUuid, pGpuClientInfo, pGpuInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceGetGpuInfo); + +NV_STATUS nvUvmInterfaceServiceDeviceInterruptsRM(uvmGpuDeviceHandle device) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_service_device_interrupts_rm(sp, + (gpuDeviceHandle)device); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceServiceDeviceInterruptsRM); + +NV_STATUS nvUvmInterfaceSetPageDirectory(uvmGpuAddressSpaceHandle vaSpace, + NvU64 physAddress, unsigned numEntries, + NvBool bVidMemAperture, NvU32 pasid, + NvU64 *dmaAddress) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_set_page_directory(sp, (gpuAddressSpaceHandle)vaSpace, + physAddress, numEntries, bVidMemAperture, pasid, + dmaAddress); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceSetPageDirectory); + +NV_STATUS nvUvmInterfaceUnsetPageDirectory(uvmGpuAddressSpaceHandle vaSpace) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + NV_STATUS status; + + status = + rm_gpu_ops_unset_page_directory(sp, (gpuAddressSpaceHandle)vaSpace); + nvUvmFreeSafeStack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceUnsetPageDirectory); + +NV_STATUS nvUvmInterfaceDupAllocation(uvmGpuAddressSpaceHandle srcVaSpace, + NvU64 srcAddress, + uvmGpuAddressSpaceHandle dstVaSpace, + NvU64 dstVaAlignment, + NvU64 *dstAddress) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_dup_allocation(sp, + (gpuAddressSpaceHandle)srcVaSpace, + srcAddress, + (gpuAddressSpaceHandle)dstVaSpace, + dstVaAlignment, + dstAddress); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceDupAllocation); + +NV_STATUS nvUvmInterfaceDupMemory(uvmGpuDeviceHandle device, + NvHandle hClient, + NvHandle hPhysMemory, + NvHandle *hDupMemory, + UvmGpuMemoryInfo *pGpuMemoryInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_dup_memory(sp, + (gpuDeviceHandle)device, + hClient, + hPhysMemory, + hDupMemory, + pGpuMemoryInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceDupMemory); + + +NV_STATUS nvUvmInterfaceFreeDupedHandle(uvmGpuDeviceHandle device, + NvHandle hPhysHandle) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + NV_STATUS status; + + status = rm_gpu_ops_free_duped_handle(sp, + (gpuDeviceHandle)device, + hPhysHandle); + + nvUvmFreeSafeStack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceFreeDupedHandle); + +NV_STATUS nvUvmInterfaceGetFbInfo(uvmGpuDeviceHandle device, + UvmGpuFbInfo * fbInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_get_fb_info(sp, (gpuDeviceHandle)device, fbInfo); + + nv_kmem_cache_free_stack(sp); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceGetFbInfo); + +NV_STATUS nvUvmInterfaceGetEccInfo(uvmGpuDeviceHandle device, + UvmGpuEccInfo * eccInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_get_ecc_info(sp, (gpuDeviceHandle)device, eccInfo); + + nv_kmem_cache_free_stack(sp); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceGetEccInfo); + +NV_STATUS nvUvmInterfaceOwnPageFaultIntr(uvmGpuDeviceHandle device, NvBool bOwnInterrupts) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_own_page_fault_intr(sp, (gpuDeviceHandle)device, bOwnInterrupts); + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceOwnPageFaultIntr); + + +NV_STATUS nvUvmInterfaceInitFaultInfo(uvmGpuDeviceHandle device, + UvmGpuFaultInfo *pFaultInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + int err; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_init_fault_info(sp, + (gpuDeviceHandle)device, + pFaultInfo); + if (status != NV_OK) + { + goto done; + } + + // Preallocate a stack for functions called from ISR top half + pFaultInfo->nonReplayable.isr_sp = NULL; + pFaultInfo->nonReplayable.isr_bh_sp = NULL; + pFaultInfo->replayable.cslCtx.nvidia_stack = NULL; + + // NOTE: nv_kmem_cache_alloc_stack does not allocate a stack on PPC. + // Therefore, the pointer can be NULL on success. Always use the + // returned error code to determine if the operation was successful. + err = nv_kmem_cache_alloc_stack((nvidia_stack_t **)&pFaultInfo->nonReplayable.isr_sp); + if (err) + { + goto error; + } + + err = nv_kmem_cache_alloc_stack((nvidia_stack_t **)&pFaultInfo->nonReplayable.isr_bh_sp); + if (err) + { + goto error; + } + + // The cslCtx.ctx pointer is not NULL only when ConfidentialComputing is enabled. + if (pFaultInfo->replayable.cslCtx.ctx != NULL) + { + err = nv_kmem_cache_alloc_stack((nvidia_stack_t **)&pFaultInfo->replayable.cslCtx.nvidia_stack); + if (err) + { + goto error; + } + } + + goto done; + +error: + nvUvmDestroyFaultInfoAndStacks(sp, + device, + pFaultInfo); + status = NV_ERR_NO_MEMORY; +done: + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceInitFaultInfo); + +NV_STATUS nvUvmInterfaceInitAccessCntrInfo(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo, + NvU32 accessCntrIndex) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_init_access_cntr_info(sp, + (gpuDeviceHandle)device, + pAccessCntrInfo, + accessCntrIndex); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceInitAccessCntrInfo); + +NV_STATUS nvUvmInterfaceEnableAccessCntr(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo, + const UvmGpuAccessCntrConfig *pAccessCntrConfig) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_enable_access_cntr (sp, + (gpuDeviceHandle)device, + pAccessCntrInfo, + pAccessCntrConfig); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceEnableAccessCntr); + +NV_STATUS nvUvmInterfaceDestroyFaultInfo(uvmGpuDeviceHandle device, + UvmGpuFaultInfo *pFaultInfo) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + NV_STATUS status; + + status = nvUvmDestroyFaultInfoAndStacks(sp, + device, + pFaultInfo); + nvUvmFreeSafeStack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceDestroyFaultInfo); + +NV_STATUS nvUvmInterfaceHasPendingNonReplayableFaults(UvmGpuFaultInfo *pFaultInfo, + NvBool *hasPendingFaults) +{ + return rm_gpu_ops_has_pending_non_replayable_faults(pFaultInfo->nonReplayable.isr_sp, + pFaultInfo, + hasPendingFaults); +} +EXPORT_SYMBOL(nvUvmInterfaceHasPendingNonReplayableFaults); + +NV_STATUS nvUvmInterfaceGetNonReplayableFaults(UvmGpuFaultInfo *pFaultInfo, + void *pFaultBuffer, + NvU32 *numFaults) +{ + return rm_gpu_ops_get_non_replayable_faults(pFaultInfo->nonReplayable.isr_bh_sp, + pFaultInfo, + pFaultBuffer, + numFaults); +} +EXPORT_SYMBOL(nvUvmInterfaceGetNonReplayableFaults); + +NV_STATUS nvUvmInterfaceFlushReplayableFaultBuffer(UvmGpuFaultInfo *pFaultInfo, + NvBool bCopyAndFlush) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + NV_STATUS status; + + status = rm_gpu_ops_flush_replayable_fault_buffer(sp, + pFaultInfo, + bCopyAndFlush); + + nvUvmFreeSafeStack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceFlushReplayableFaultBuffer); + +NV_STATUS nvUvmInterfaceTogglePrefetchFaults(UvmGpuFaultInfo *pFaultInfo, + NvBool bEnable) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + NV_STATUS status; + + status = rm_gpu_ops_toggle_prefetch_faults(sp, + pFaultInfo, + bEnable); + + nvUvmFreeSafeStack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceTogglePrefetchFaults); + +NV_STATUS nvUvmInterfaceDestroyAccessCntrInfo(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + NV_STATUS status; + + status = rm_gpu_ops_destroy_access_cntr_info(sp, + (gpuDeviceHandle)device, + pAccessCntrInfo); + + nvUvmFreeSafeStack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceDestroyAccessCntrInfo); + +NV_STATUS nvUvmInterfaceDisableAccessCntr(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + NV_STATUS status; + + status = rm_gpu_ops_disable_access_cntr(sp, + (gpuDeviceHandle)device, + pAccessCntrInfo); + + nvUvmFreeSafeStack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceDisableAccessCntr); + +// this function is called by the UVM driver to register the event callbacks +NV_STATUS nvUvmInterfaceRegisterUvmEvents(struct UvmEventsLinux *importedEvents) +{ + NV_STATUS status = NV_OK; + + if (!importedEvents) + { + return NV_ERR_INVALID_ARGUMENT; + } + + down(&g_pNvUvmEventsLock); + if (getUvmEvents() != NULL) + { + status = NV_ERR_IN_USE; + } + else + { + // Be careful: as soon as the pointer is assigned, top half ISRs can + // start reading it to make callbacks, even before we drop the lock. + setUvmEvents(importedEvents); + } + up(&g_pNvUvmEventsLock); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceRegisterUvmEvents); + +static void flush_top_half(void *info) +{ + // Prior top halves on this core must have completed for this callback to + // run at all, so we're done. + return; +} + +void nvUvmInterfaceDeRegisterUvmEvents(void) +{ + // Taking the lock forces us to wait for non-interrupt callbacks to finish + // up. + down(&g_pNvUvmEventsLock); + setUvmEvents(NULL); + up(&g_pNvUvmEventsLock); + + // We cleared the pointer so nv_uvm_event_interrupt can't invoke any new + // top half callbacks, but prior ones could still be executing on other + // cores. We can wait for them to finish by waiting for a context switch to + // happen on every core. + // + // This is slow, but since nvUvmInterfaceDeRegisterUvmEvents is very rare + // (module unload) it beats having the top half synchronize with a spin lock + // every time. + // + // Note that since we dropped the lock, another set of callbacks could have + // already been registered. That's ok, since we just need to wait for old + // ones to finish. + on_each_cpu(flush_top_half, NULL, 1); +} +EXPORT_SYMBOL(nvUvmInterfaceDeRegisterUvmEvents); + +NV_STATUS nv_uvm_suspend(void) +{ + NV_STATUS status = NV_OK; + struct UvmEventsLinux *events; + + // Synchronize callbacks with unregistration + down(&g_pNvUvmEventsLock); + + // It's not strictly necessary to use a cached local copy of the events + // pointer here since it can't change under the lock, but we'll do it for + // consistency. + events = getUvmEvents(); + if (events && events->suspend) + { + status = events->suspend(); + } + + up(&g_pNvUvmEventsLock); + + return status; +} + +NV_STATUS nv_uvm_resume(void) +{ + NV_STATUS status = NV_OK; + struct UvmEventsLinux *events; + + // Synchronize callbacks with unregistration + down(&g_pNvUvmEventsLock); + + // It's not strictly necessary to use a cached local copy of the events + // pointer here since it can't change under the lock, but we'll do it for + // consistency. + events = getUvmEvents(); + if (events && events->resume) + { + status = events->resume(); + } + + up(&g_pNvUvmEventsLock); + + return status; +} + +NV_STATUS nv_uvm_event_interrupt(const NvU8 *pUuid) +{ + // + // This is called from interrupt context, so we can't take + // g_pNvUvmEventsLock to prevent the callbacks from being unregistered. Even + // if we could take the lock, we don't want to slow down the ISR more than + // absolutely necessary. + // + // Instead, we allow this function to be called concurrently with + // nvUvmInterfaceDeRegisterUvmEvents. That function will clear the events + // pointer, then wait for all top halves to finish out. This means the + // pointer may change out from under us, but the callbacks are still safe to + // invoke while we're in this function. + // + // This requires that we read the pointer exactly once here so neither we + // nor the compiler make assumptions about the pointer remaining valid while + // in this function. + // + struct UvmEventsLinux *events = getUvmEvents(); + + if (events && events->isrTopHalf) + return events->isrTopHalf((const NvProcessorUuid *)pUuid); + + // + // NV_OK means that the interrupt was for the UVM driver, so use + // NV_ERR_NO_INTR_PENDING to tell the caller that we didn't do anything. + // + return NV_ERR_NO_INTR_PENDING; +} + +NV_STATUS nvUvmInterfaceGetNvlinkInfo(uvmGpuDeviceHandle device, + UvmGpuNvlinkInfo *nvlinkInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_get_nvlink_info(sp, (gpuDeviceHandle)device, nvlinkInfo); + + nv_kmem_cache_free_stack(sp); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceGetNvlinkInfo); + +NV_STATUS nv_uvm_drain_P2P(const NvU8 *uuid) +{ + NvProcessorUuid uvmUuid; + struct UvmEventsLinux *events; + NV_STATUS ret = NV_ERR_NOT_SUPPORTED; + + memcpy(uvmUuid.uuid, uuid, NV_UUID_LEN); + + // Synchronize callbacks with unregistration + down(&g_pNvUvmEventsLock); + + // It's not strictly necessary to use a cached local copy of the events + // pointer here since it can't change under the lock, but we'll do it for + // consistency. + events = getUvmEvents(); + if(events && events->drainP2P) + { + ret = events->drainP2P(&uvmUuid); + } + up(&g_pNvUvmEventsLock); + + return ret; +} + +NV_STATUS nv_uvm_resume_P2P(const NvU8 *uuid) +{ + NvProcessorUuid uvmUuid; + struct UvmEventsLinux *events; + NV_STATUS ret = NV_ERR_NOT_SUPPORTED; + + memcpy(uvmUuid.uuid, uuid, NV_UUID_LEN); + + // Synchronize callbacks with unregistration + down(&g_pNvUvmEventsLock); + + // It's not strictly necessary to use a cached local copy of the events + // pointer here since it can't change under the lock, but we'll do it for + // consistency. + events = getUvmEvents(); + if(events && events->resumeP2P) + { + ret = events->resumeP2P(&uvmUuid); + } + up(&g_pNvUvmEventsLock); + + return ret; +} + +NV_STATUS nvUvmInterfaceP2pObjectCreate(uvmGpuDeviceHandle device1, + uvmGpuDeviceHandle device2, + NvHandle *hP2pObject) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_p2p_object_create(sp, + (gpuDeviceHandle)device1, + (gpuDeviceHandle)device2, + hP2pObject); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceP2pObjectCreate); + +void nvUvmInterfaceP2pObjectDestroy(uvmGpuSessionHandle session, + NvHandle hP2pObject) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_p2p_object_destroy(sp, (gpuSessionHandle)session, hP2pObject); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceP2pObjectDestroy); + +NV_STATUS nvUvmInterfaceGetExternalAllocPtes(uvmGpuAddressSpaceHandle vaSpace, + NvHandle hDupedMemory, + NvU64 offset, + NvU64 size, + UvmGpuExternalMappingInfo *gpuExternalMappingInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_get_external_alloc_ptes(sp, + (gpuAddressSpaceHandle)vaSpace, + hDupedMemory, + offset, + size, + gpuExternalMappingInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceGetExternalAllocPtes); + +NV_STATUS nvUvmInterfaceGetExternalAllocPhysAddrs(uvmGpuAddressSpaceHandle vaSpace, + NvHandle hDupedMemory, + NvU64 offset, + NvU64 size, + UvmGpuExternalPhysAddrInfo *gpuExternalPhysAddrInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_get_external_alloc_phys_addrs(sp, + (gpuAddressSpaceHandle)vaSpace, + hDupedMemory, + offset, + size, + gpuExternalPhysAddrInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceGetExternalAllocPhysAddrs); + +NV_STATUS nvUvmInterfaceRetainChannel(uvmGpuAddressSpaceHandle vaSpace, + NvHandle hClient, + NvHandle hChannel, + void **retainedChannel, + UvmGpuChannelInstanceInfo *channelInstanceInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_retain_channel(sp, + (gpuAddressSpaceHandle)vaSpace, + hClient, + hChannel, + retainedChannel, + channelInstanceInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceRetainChannel); + +NV_STATUS nvUvmInterfaceBindChannelResources(void *retainedChannel, + UvmGpuChannelResourceBindParams *channelResourceBindParams) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_bind_channel_resources(sp, + retainedChannel, + channelResourceBindParams); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceBindChannelResources); + +void nvUvmInterfaceReleaseChannel(void *retainedChannel) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_release_channel(sp, retainedChannel); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceReleaseChannel); + +void nvUvmInterfaceStopChannel(void *retainedChannel, NvBool bImmediate) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_stop_channel(sp, retainedChannel, bImmediate); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceStopChannel); + +NV_STATUS nvUvmInterfaceGetChannelResourcePtes(uvmGpuAddressSpaceHandle vaSpace, + NvP64 resourceDescriptor, + NvU64 offset, + NvU64 size, + UvmGpuExternalMappingInfo *externalMappingInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_get_channel_resource_ptes(sp, + (gpuAddressSpaceHandle)vaSpace, + resourceDescriptor, + offset, + size, + externalMappingInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceGetChannelResourcePtes); + +NV_STATUS nvUvmInterfaceReportNonReplayableFault(uvmGpuDeviceHandle device, + const void *pFaultPacket) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + NV_STATUS status; + + status = rm_gpu_ops_report_non_replayable_fault(sp, (gpuDeviceHandle)device, pFaultPacket); + + nvUvmFreeSafeStack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceReportNonReplayableFault); + +NV_STATUS nvUvmInterfacePagingChannelAllocate(uvmGpuDeviceHandle device, + const UvmGpuPagingChannelAllocParams *allocParams, + UvmGpuPagingChannelHandle *channel, + UvmGpuPagingChannelInfo *channelInfo) +{ + nvidia_stack_t *sp = NULL; + nvidia_stack_t *pushStreamSp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + return NV_ERR_NO_MEMORY; + + if (nv_kmem_cache_alloc_stack(&pushStreamSp) != 0) + { + nv_kmem_cache_free_stack(sp); + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_paging_channel_allocate(sp, + (gpuDeviceHandle)device, + allocParams, + (gpuPagingChannelHandle *)channel, + channelInfo); + + if (status == NV_OK) + (*channel)->pushStreamSp = pushStreamSp; + else + nv_kmem_cache_free_stack(pushStreamSp); + + nv_kmem_cache_free_stack(sp); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfacePagingChannelAllocate); + +void nvUvmInterfacePagingChannelDestroy(UvmGpuPagingChannelHandle channel) +{ + nvidia_stack_t *sp; + + if (channel == NULL) + return; + + sp = nvUvmGetSafeStack(); + nv_kmem_cache_free_stack(channel->pushStreamSp); + rm_gpu_ops_paging_channel_destroy(sp, (gpuPagingChannelHandle)channel); + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfacePagingChannelDestroy); + +NV_STATUS nvUvmInterfacePagingChannelsMap(uvmGpuAddressSpaceHandle srcVaSpace, + UvmGpuPointer srcAddress, + uvmGpuDeviceHandle device, + NvU64 *dstAddress) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + return NV_ERR_NO_MEMORY; + + status = rm_gpu_ops_paging_channels_map(sp, + (gpuAddressSpaceHandle)srcVaSpace, + (NvU64)srcAddress, + (gpuDeviceHandle)device, + dstAddress); + + nv_kmem_cache_free_stack(sp); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfacePagingChannelsMap); + +void nvUvmInterfacePagingChannelsUnmap(uvmGpuAddressSpaceHandle srcVaSpace, + UvmGpuPointer srcAddress, + uvmGpuDeviceHandle device) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + rm_gpu_ops_paging_channels_unmap(sp, + (gpuAddressSpaceHandle)srcVaSpace, + (NvU64)srcAddress, + (gpuDeviceHandle)device); + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfacePagingChannelsUnmap); + +NV_STATUS nvUvmInterfacePagingChannelPushStream(UvmGpuPagingChannelHandle channel, + char *methodStream, + NvU32 methodStreamSize) +{ + return rm_gpu_ops_paging_channel_push_stream(channel->pushStreamSp, + (gpuPagingChannelHandle)channel, + methodStream, + methodStreamSize); +} +EXPORT_SYMBOL(nvUvmInterfacePagingChannelPushStream); + +void nvUvmInterfaceReportFatalError(NV_STATUS error) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + rm_gpu_ops_report_fatal_error(sp, error); + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceReportFatalError); + +NV_STATUS nvUvmInterfaceCslInitContext(UvmCslContext *uvmCslContext, + uvmGpuChannelHandle channel) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_ccsl_context_init(sp, &uvmCslContext->ctx, (gpuChannelHandle)channel); + + // Saving the stack in the context allows UVM to safely use the CSL layer + // in interrupt context without making new allocations. UVM serializes CSL + // API usage for a given context so the stack pointer does not need + // additional protection. + if (status != NV_OK) + { + nv_kmem_cache_free_stack(sp); + } + else + { + uvmCslContext->nvidia_stack = sp; + } + + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceCslInitContext); + +void nvUvmInterfaceDeinitCslContext(UvmCslContext *uvmCslContext) +{ + nvidia_stack_t *sp = uvmCslContext->nvidia_stack; + rm_gpu_ops_ccsl_context_clear(sp, uvmCslContext->ctx); + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceDeinitCslContext); + +NV_STATUS nvUvmInterfaceCslRotateKey(UvmCslContext *contextList[], + NvU32 contextListCount) +{ + NV_STATUS status; + nvidia_stack_t *sp; + + if ((contextList == NULL) || (contextListCount == 0) || (contextList[0] == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + sp = contextList[0]->nvidia_stack; + status = rm_gpu_ops_ccsl_rotate_key(sp, contextList, contextListCount); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceCslRotateKey); + +NV_STATUS nvUvmInterfaceCslRotateIv(UvmCslContext *uvmCslContext, + UvmCslOperation operation) +{ + NV_STATUS status; + nvidia_stack_t *sp = uvmCslContext->nvidia_stack; + + status = rm_gpu_ops_ccsl_rotate_iv(sp, uvmCslContext->ctx, operation); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceCslRotateIv); + +NV_STATUS nvUvmInterfaceCslEncrypt(UvmCslContext *uvmCslContext, + NvU32 bufferSize, + NvU8 const *inputBuffer, + UvmCslIv *encryptIv, + NvU8 *outputBuffer, + NvU8 *authTagBuffer) +{ + NV_STATUS status; + nvidia_stack_t *sp = uvmCslContext->nvidia_stack; + + if (encryptIv != NULL) + status = rm_gpu_ops_ccsl_encrypt_with_iv(sp, uvmCslContext->ctx, bufferSize, inputBuffer, (NvU8*)encryptIv, outputBuffer, authTagBuffer); + else + status = rm_gpu_ops_ccsl_encrypt(sp, uvmCslContext->ctx, bufferSize, inputBuffer, outputBuffer, authTagBuffer); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceCslEncrypt); + +NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext, + NvU32 bufferSize, + NvU8 const *inputBuffer, + UvmCslIv const *decryptIv, + NvU32 keyRotationId, + NvU8 *outputBuffer, + NvU8 const *addAuthData, + NvU32 addAuthDataSize, + NvU8 const *authTagBuffer) +{ + NV_STATUS status; + nvidia_stack_t *sp = uvmCslContext->nvidia_stack; + + status = rm_gpu_ops_ccsl_decrypt(sp, + uvmCslContext->ctx, + bufferSize, + inputBuffer, + (NvU8 *)decryptIv, + keyRotationId, + outputBuffer, + addAuthData, + addAuthDataSize, + authTagBuffer); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceCslDecrypt); + +NV_STATUS nvUvmInterfaceCslSign(UvmCslContext *uvmCslContext, + NvU32 bufferSize, + NvU8 const *inputBuffer, + NvU8 *authTagBuffer) +{ + NV_STATUS status; + nvidia_stack_t *sp = uvmCslContext->nvidia_stack; + + status = rm_gpu_ops_ccsl_sign(sp, uvmCslContext->ctx, bufferSize, inputBuffer, authTagBuffer); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceCslSign); + +NV_STATUS nvUvmInterfaceCslQueryMessagePool(UvmCslContext *uvmCslContext, + UvmCslOperation operation, + NvU64 *messageNum) +{ + NV_STATUS status; + nvidia_stack_t *sp = uvmCslContext->nvidia_stack; + + status = rm_gpu_ops_ccsl_query_message_pool(sp, uvmCslContext->ctx, operation, messageNum); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceCslQueryMessagePool); + +NV_STATUS nvUvmInterfaceCslIncrementIv(UvmCslContext *uvmCslContext, + UvmCslOperation operation, + NvU64 increment, + UvmCslIv *iv) +{ + NV_STATUS status; + nvidia_stack_t *sp = uvmCslContext->nvidia_stack; + + status = rm_gpu_ops_ccsl_increment_iv(sp, uvmCslContext->ctx, operation, increment, (NvU8 *)iv); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceCslIncrementIv); + +NV_STATUS nvUvmInterfaceCslLogEncryption(UvmCslContext *uvmCslContext, + UvmCslOperation operation, + NvU32 bufferSize) +{ + NV_STATUS status; + nvidia_stack_t *sp = uvmCslContext->nvidia_stack; + + status = rm_gpu_ops_ccsl_log_encryption(sp, uvmCslContext->ctx, operation, bufferSize); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceCslLogEncryption); + +#else // NV_UVM_ENABLE + +NV_STATUS nv_uvm_suspend(void) +{ + return NV_OK; +} + +NV_STATUS nv_uvm_resume(void) +{ + return NV_OK; +} + +#endif // NV_UVM_ENABLE diff --git a/kernel-open/nvidia/nvidia-sources.Kbuild b/kernel-open/nvidia/nvidia-sources.Kbuild new file mode 100644 index 0000000..bd40ce2 --- /dev/null +++ b/kernel-open/nvidia/nvidia-sources.Kbuild @@ -0,0 +1,57 @@ +NVIDIA_SOURCES ?= +NVIDIA_SOURCES_CXX ?= + +NVIDIA_SOURCES += nvidia/nv-platform.c +NVIDIA_SOURCES += nvidia/nv-dsi-parse-panel-props.c +NVIDIA_SOURCES += nvidia/nv-bpmp.c +NVIDIA_SOURCES += nvidia/nv-gpio.c +NVIDIA_SOURCES += nvidia/nv-backlight.c +NVIDIA_SOURCES += nvidia/nv-imp.c +NVIDIA_SOURCES += nvidia/nv-platform-pm.c +NVIDIA_SOURCES += nvidia/nv-ipc-soc.c +NVIDIA_SOURCES += nvidia/nv.c +NVIDIA_SOURCES += nvidia/nv-pci.c +NVIDIA_SOURCES += nvidia/nv-dmabuf.c +NVIDIA_SOURCES += nvidia/nv-nano-timer.c +NVIDIA_SOURCES += nvidia/nv-acpi.c +NVIDIA_SOURCES += nvidia/nv-cray.c +NVIDIA_SOURCES += nvidia/nv-dma.c +NVIDIA_SOURCES += nvidia/nv-i2c.c +NVIDIA_SOURCES += nvidia/nv-mmap.c +NVIDIA_SOURCES += nvidia/nv-p2p.c +NVIDIA_SOURCES += nvidia/nv-pat.c +NVIDIA_SOURCES += nvidia/nv-procfs.c +NVIDIA_SOURCES += nvidia/nv-usermap.c +NVIDIA_SOURCES += nvidia/nv-vm.c +NVIDIA_SOURCES += nvidia/nv-vtophys.c +NVIDIA_SOURCES += nvidia/os-interface.c +NVIDIA_SOURCES += nvidia/os-mlock.c +NVIDIA_SOURCES += nvidia/os-pci.c +NVIDIA_SOURCES += nvidia/os-registry.c +NVIDIA_SOURCES += nvidia/os-usermap.c +NVIDIA_SOURCES += nvidia/nv-modeset-interface.c +NVIDIA_SOURCES += nvidia/nv-pci-table.c +NVIDIA_SOURCES += nvidia/nv-kthread-q.c +NVIDIA_SOURCES += nvidia/nv-memdbg.c +NVIDIA_SOURCES += nvidia/nv-report-err.c +NVIDIA_SOURCES += nvidia/nv-rsync.c +NVIDIA_SOURCES += nvidia/nv-msi.c +NVIDIA_SOURCES += nvidia/nv-caps.c +NVIDIA_SOURCES += nvidia/nv-caps-imex.c +NVIDIA_SOURCES += nvidia/nv-clk.c +NVIDIA_SOURCES += nvidia/nv-host1x.c +NVIDIA_SOURCES += nvidia/nv_uvm_interface.c +NVIDIA_SOURCES += nvidia/libspdm_aead.c +NVIDIA_SOURCES += nvidia/libspdm_ecc.c +NVIDIA_SOURCES += nvidia/libspdm_hkdf.c +NVIDIA_SOURCES += nvidia/libspdm_rand.c +NVIDIA_SOURCES += nvidia/libspdm_shash.c +NVIDIA_SOURCES += nvidia/libspdm_rsa.c +NVIDIA_SOURCES += nvidia/libspdm_aead_aes_gcm.c +NVIDIA_SOURCES += nvidia/libspdm_sha.c +NVIDIA_SOURCES += nvidia/libspdm_hmac_sha.c +NVIDIA_SOURCES += nvidia/libspdm_internal_crypt_lib.c +NVIDIA_SOURCES += nvidia/libspdm_hkdf_sha.c +NVIDIA_SOURCES += nvidia/libspdm_ec.c +NVIDIA_SOURCES += nvidia/libspdm_x509.c +NVIDIA_SOURCES += nvidia/libspdm_rsa_ext.c diff --git a/kernel-open/nvidia/nvidia.Kbuild b/kernel-open/nvidia/nvidia.Kbuild new file mode 100644 index 0000000..b85fcf5 --- /dev/null +++ b/kernel-open/nvidia/nvidia.Kbuild @@ -0,0 +1,246 @@ +########################################################################### +# Kbuild fragment for nvidia.ko +########################################################################### + +# +# Define NVIDIA_{SOURCES,OBJECTS} +# + +include $(src)/nvidia/nvidia-sources.Kbuild +NVIDIA_OBJECTS = $(patsubst %.c,%.o,$(NVIDIA_SOURCES)) + +obj-m += nvidia.o +nvidia-y := $(NVIDIA_OBJECTS) + +NVIDIA_KO = nvidia/nvidia.ko + + +# +# nv-kernel.o_binary is the core binary component of nvidia.ko, shared +# across all UNIX platforms. Create a symlink, "nv-kernel.o" that +# points to nv-kernel.o_binary, and add nv-kernel.o to the list of +# objects to link into nvidia.ko. +# +# Note that: +# - The kbuild "clean" rule will delete all objects in nvidia-y (which +# is why we use a symlink instead of just adding nv-kernel.o_binary +# to nvidia-y). +# - kbuild normally uses the naming convention of ".o_shipped" for +# binary files. That is not used here, because the kbuild rule to +# create the "normal" object file from ".o_shipped" does a copy, not +# a symlink. This file is quite large, so a symlink is preferred. +# - The file added to nvidia-y should be relative to gmake's cwd. +# But, the target for the symlink rule should be prepended with $(obj). +# - The "symlink" command is called using kbuild's if_changed macro to +# generate an .nv-kernel.o.cmd file which can be used on subsequent +# runs to determine if the command line to create the symlink changed +# and needs to be re-executed. +# + +NVIDIA_BINARY_OBJECT := $(src)/nvidia/nv-kernel.o_binary +NVIDIA_BINARY_OBJECT_O := nvidia/nv-kernel.o + +targets += $(NVIDIA_BINARY_OBJECT_O) + +$(obj)/$(NVIDIA_BINARY_OBJECT_O): $(NVIDIA_BINARY_OBJECT) FORCE + $(call if_changed,symlink) + +nvidia-y += $(NVIDIA_BINARY_OBJECT_O) + + +# +# Define nvidia.ko-specific CFLAGS. +# + +NVIDIA_CFLAGS += -I$(src)/nvidia +NVIDIA_CFLAGS += -DNVIDIA_UNDEF_LEGACY_BIT_MACROS + +ifeq ($(NV_BUILD_TYPE),release) + NVIDIA_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG +endif + +ifeq ($(NV_BUILD_TYPE),develop) + NVIDIA_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_MEM_LOGGER +endif + +ifeq ($(NV_BUILD_TYPE),debug) + NVIDIA_CFLAGS += -DDEBUG -D_DEBUG -UNDEBUG -DNV_MEM_LOGGER +endif + +$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_OBJECTS), $(NVIDIA_CFLAGS)) + + +# +# nv-procfs.c requires nv-compiler.h +# + +NV_COMPILER_VERSION_HEADER = $(obj)/nv_compiler.h + +$(NV_COMPILER_VERSION_HEADER): + @echo \#define NV_COMPILER \"`$(CC) -v 2>&1 | tail -n 1`\" > $@ + +$(obj)/nvidia/nv-procfs.o: $(NV_COMPILER_VERSION_HEADER) + +clean-files += $(NV_COMPILER_VERSION_HEADER) + + +# +# Build nv-interface.o from the kernel interface layer objects, suitable +# for further processing by the top-level makefile to produce a precompiled +# kernel interface file. +# + +NVIDIA_INTERFACE := nvidia/nv-interface.o + +# Linux kernel v5.12 and later looks at "always-y", Linux kernel versions +# before v5.6 looks at "always"; kernel versions between v5.12 and v5.6 +# look at both. + +always += $(NVIDIA_INTERFACE) +always-y += $(NVIDIA_INTERFACE) + +$(obj)/$(NVIDIA_INTERFACE): $(addprefix $(obj)/,$(NVIDIA_OBJECTS)) + $(LD) -r -o $@ $^ + + +# +# Register the conftests needed by nvidia.ko +# + +NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_OBJECTS) + +NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_pages_uc +NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first +NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_memory_uc +NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_memory_array_uc +NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_pages_array_uc +NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_cache +NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_wc +NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_driver_hardened +NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_driver_hardened_wc +NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_cache_shared +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pde_data +NV_CONFTEST_FUNCTION_COMPILE_TESTS += xen_ioemu_inject_msi +NV_CONFTEST_FUNCTION_COMPILE_TESTS += phys_to_dma +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_attr_macros +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_map_page_attrs +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_rebar_get_possible_sizes +NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_backlight_device_by_name +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_direct_map_resource +NV_CONFTEST_FUNCTION_COMPILE_TESTS += flush_cache_all +NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_pfn +NV_CONFTEST_FUNCTION_COMPILE_TESTS += jiffies_to_timespec +NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64 +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_enable_atomic_ops_to_root +NV_CONFTEST_FUNCTION_COMPILE_TESTS += vga_tryget +NV_CONFTEST_FUNCTION_COMPILE_TESTS += cc_platform_has +NV_CONFTEST_FUNCTION_COMPILE_TESTS += cc_attr_guest_sev_snp +NV_CONFTEST_FUNCTION_COMPILE_TESTS += hv_get_isolation_type +NV_CONFTEST_FUNCTION_COMPILE_TESTS += seq_read_iter +NV_CONFTEST_FUNCTION_COMPILE_TESTS += follow_pfn +NV_CONFTEST_FUNCTION_COMPILE_TESTS += ptep_get +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked +NV_CONFTEST_FUNCTION_COMPILE_TESTS += add_memory_driver_managed +NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_dma_configure +NV_CONFTEST_FUNCTION_COMPILE_TESTS += i2c_new_client_device +NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_get +NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_of_icc_get +NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_put +NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_set_bw +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_map +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_map_atomic +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_attachment_has_peer2peer +NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_clk_bulk_get_all +NV_CONFTEST_FUNCTION_COMPILE_TESTS += thermal_zone_for_each_trip +NV_CONFTEST_FUNCTION_COMPILE_TESTS += thermal_bind_cdev_to_trip +NV_CONFTEST_FUNCTION_COMPILE_TESTS += thermal_unbind_cdev_from_trip +NV_CONFTEST_FUNCTION_COMPILE_TESTS += update_devfreq +NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_task_ioprio +NV_CONFTEST_FUNCTION_COMPILE_TESTS += mdev_set_iommu_device +NV_CONFTEST_FUNCTION_COMPILE_TESTS += offline_and_remove_memory +NV_CONFTEST_FUNCTION_COMPILE_TESTS += stack_trace +NV_CONFTEST_FUNCTION_COMPILE_TESTS += crypto_tfm_ctx_aligned +NV_CONFTEST_FUNCTION_COMPILE_TESTS += assign_str +NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioasid_get +NV_CONFTEST_FUNCTION_COMPILE_TESTS += mm_pasid_drop +NV_CONFTEST_FUNCTION_COMPILE_TESTS += iommu_sva_bind_device_has_drvdata_arg +NV_CONFTEST_FUNCTION_COMPILE_TESTS += shrinker_alloc + +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_sme_active +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_swiotlb_map_sg_attrs +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_swiotlb_dma_ops +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present___close_fd +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_close_fd +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_pxm_to_node +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_screen_info +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_screen_info +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_i2c_bus_status +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_fuse_control_read +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_pci_find_host_bridge +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_send_cmd +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_set_init_cb +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_clear_init_cb +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_alloc_mem_from_gscco +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_free_gscco_mem +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_memory_block_size_bytes +NV_CONFTEST_SYMBOL_COMPILE_TESTS += crypto +NV_CONFTEST_SYMBOL_COMPILE_TESTS += crypto_akcipher_verify +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_follow_pte +NV_CONFTEST_SYMBOL_COMPILE_TESTS += follow_pte_arg_vma +NV_CONFTEST_SYMBOL_COMPILE_TESTS += dma_buf_ops_attach_has_arg_dev +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_follow_pfnmap_start +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_pci_ats_supported +NV_CONFTEST_SYMBOL_COMPILE_TESTS += ecc_digits_from_bytes +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_set_memory_encrypted +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_set_memory_decrypted +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl___platform_driver_register +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present___platform_driver_register +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_mutex_destroy +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_hrtimer_setup +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_timer_delete_sync + +NV_CONFTEST_TYPE_COMPILE_TESTS += vmf_insert_pfn_prot +NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += sysfs_slab_unlink +NV_CONFTEST_TYPE_COMPILE_TESTS += proc_ops +NV_CONFTEST_TYPE_COMPILE_TESTS += vmalloc_has_pgprot_t_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock +NV_CONFTEST_TYPE_COMPILE_TESTS += pci_channel_state +NV_CONFTEST_TYPE_COMPILE_TESTS += remove_memory_has_nid_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += add_memory_driver_managed_has_mhp_flags_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += num_registered_fb +NV_CONFTEST_TYPE_COMPILE_TESTS += pci_driver_has_driver_managed_dma +NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags +NV_CONFTEST_TYPE_COMPILE_TESTS += memory_failure_queue_has_trapno_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += foll_longterm_present +NV_CONFTEST_TYPE_COMPILE_TESTS += bus_type_has_iommu_ops +NV_CONFTEST_TYPE_COMPILE_TESTS += of_property_for_each_u32_has_internal_args +NV_CONFTEST_TYPE_COMPILE_TESTS += platform_driver_struct_remove_returns_void +NV_CONFTEST_TYPE_COMPILE_TESTS += class_create_has_no_owner_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += class_devnode_has_const_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += devfreq_dev_profile_has_is_cooling_device +NV_CONFTEST_TYPE_COMPILE_TESTS += devfreq_has_freq_table +NV_CONFTEST_TYPE_COMPILE_TESTS += devfreq_has_suspend_freq +NV_CONFTEST_TYPE_COMPILE_TESTS += has_enum_pidtype_tgid +NV_CONFTEST_TYPE_COMPILE_TESTS += bpmp_mrq_has_strap_set +NV_CONFTEST_TYPE_COMPILE_TESTS += register_shrinker_has_format_arg + +NV_CONFTEST_GENERIC_COMPILE_TESTS += dom0_kernel_present +NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_vgpu_kvm_build +NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_grid_build +NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_grid_csp_build +NV_CONFTEST_GENERIC_COMPILE_TESTS += get_user_pages +NV_CONFTEST_GENERIC_COMPILE_TESTS += get_user_pages_remote +NV_CONFTEST_GENERIC_COMPILE_TESTS += pin_user_pages +NV_CONFTEST_GENERIC_COMPILE_TESTS += pin_user_pages_remote +NV_CONFTEST_GENERIC_COMPILE_TESTS += pm_runtime_available +NV_CONFTEST_GENERIC_COMPILE_TESTS += vm_fault_t +NV_CONFTEST_GENERIC_COMPILE_TESTS += pci_class_multimedia_hd_audio +NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_available +NV_CONFTEST_GENERIC_COMPILE_TESTS += vfio_pci_core_available +NV_CONFTEST_GENERIC_COMPILE_TESTS += cmd_uphy_display_port_init +NV_CONFTEST_GENERIC_COMPILE_TESTS += cmd_uphy_display_port_off +NV_CONFTEST_GENERIC_COMPILE_TESTS += memory_failure_mf_sw_simulated_defined +NV_CONFTEST_GENERIC_COMPILE_TESTS += device_vm_build +NV_CONFTEST_GENERIC_COMPILE_TESTS += pcie_reset_flr +NV_CONFTEST_GENERIC_COMPILE_TESTS += module_import_ns_takes_constant diff --git a/kernel-open/nvidia/nvspdm_cryptlib_extensions.h b/kernel-open/nvidia/nvspdm_cryptlib_extensions.h new file mode 100644 index 0000000..20d7a94 --- /dev/null +++ b/kernel-open/nvidia/nvspdm_cryptlib_extensions.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +bool libspdm_aead_gcm_prealloc(void **context); +void libspdm_aead_free(void *context); +bool libspdm_aead_aes_gcm_encrypt_prealloc(void *context, + const uint8_t *key, size_t key_size, + const uint8_t *iv, size_t iv_size, + const uint8_t *a_data, size_t a_data_size, + const uint8_t *data_in, size_t data_in_size, + uint8_t *tag_out, size_t tag_size, + uint8_t *data_out, size_t *data_out_size); +bool libspdm_aead_aes_gcm_decrypt_prealloc(void *context, + const uint8_t *key, size_t key_size, + const uint8_t *iv, size_t iv_size, + const uint8_t *a_data, size_t a_data_size, + const uint8_t *data_in, size_t data_in_size, + const uint8_t *tag, size_t tag_size, + uint8_t *data_out, size_t *data_out_size); +bool libspdm_check_crypto_backend(void); + +bool libspdm_encode_base64(const uint8_t *src, uint8_t *dst, size_t srclen, size_t *p_dstlen); +bool libspdm_decode_base64(const uint8_t *src, uint8_t *dst, size_t srclen, size_t *p_dstlen); +bool libspdm_pem_to_der(const uint8_t *pem_cert, uint8_t *der_cert, size_t pem_size, size_t *p_der_size); +bool libspdm_der_to_pem(const uint8_t *der_cert, uint8_t *pem_cert, size_t der_size, size_t *p_pem_size); \ No newline at end of file diff --git a/kernel-open/nvidia/os-interface.c b/kernel-open/nvidia/os-interface.c new file mode 100644 index 0000000..e727a32 --- /dev/null +++ b/kernel-open/nvidia/os-interface.c @@ -0,0 +1,2767 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" +#include +#include "nv-caps-imex.h" + +#include "nv-platform.h" + +#include "nv-time.h" + +#include +#include +#include +#include + +#include +#include +#if defined(CONFIG_LOCKDEP) +#include +#endif // CONFIG_LOCKDEP + +extern char *NVreg_TemporaryFilePath; + +#define MAX_ERROR_STRING 528 +static char nv_error_string[MAX_ERROR_STRING]; +static NV_DEFINE_SPINLOCK(nv_error_string_lock); + +extern nv_linux_state_t nv_ctl_device; + +extern nv_kthread_q_t nv_kthread_q; + +NvU64 os_page_size = PAGE_SIZE; +NvU64 os_max_page_size = PAGE_SIZE << NV_MAX_PAGE_ORDER; +NvU64 os_page_mask = NV_PAGE_MASK; +NvU8 os_page_shift = PAGE_SHIFT; + +NvBool os_cc_enabled = 0; +NvBool os_cc_sev_snp_enabled = 0; +NvBool os_cc_snp_vtom_enabled = 0; +NvBool os_cc_tdx_enabled = 0; +NvBool os_cc_sme_enabled = 0; + +#if defined(CONFIG_DMA_SHARED_BUFFER) +NvBool os_dma_buf_enabled = NV_TRUE; +#else +NvBool os_dma_buf_enabled = NV_FALSE; +#endif // CONFIG_DMA_SHARED_BUFFER + +NvBool os_imex_channel_is_supported = NV_TRUE; + +void NV_API_CALL os_disable_console_access(void) +{ + console_lock(); +} + +void NV_API_CALL os_enable_console_access(void) +{ + console_unlock(); +} + +typedef struct semaphore os_mutex_t; + +// +// os_alloc_mutex - Allocate the RM mutex +// +// ppMutex - filled in with pointer to opaque structure to mutex data type +// +NV_STATUS NV_API_CALL os_alloc_mutex +( + void **ppMutex +) +{ + NV_STATUS rmStatus; + os_mutex_t *os_mutex; + + rmStatus = os_alloc_mem(ppMutex, sizeof(os_mutex_t)); + if (rmStatus != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate mutex!\n"); + return rmStatus; + } + os_mutex = (os_mutex_t *)*ppMutex; + NV_INIT_MUTEX(os_mutex); + + return NV_OK; +} + +// +// os_free_mutex - Free resources associated with mutex allocated +// via os_alloc_mutex above. +// +// pMutex - Pointer to opaque structure to mutex data type +// +void NV_API_CALL os_free_mutex +( + void *pMutex +) +{ + os_mutex_t *os_mutex = (os_mutex_t *)pMutex; + + if (os_mutex != NULL) + { + os_free_mem(pMutex); + } +} + +// +// pMutex - Pointer to opaque structure to mutex data type +// + +NV_STATUS NV_API_CALL os_acquire_mutex +( + void *pMutex +) +{ + os_mutex_t *os_mutex = (os_mutex_t *)pMutex; + + if (!NV_MAY_SLEEP()) + { + return NV_ERR_INVALID_REQUEST; + } + down(os_mutex); + + return NV_OK; +} + +NV_STATUS NV_API_CALL os_cond_acquire_mutex +( + void * pMutex +) +{ + os_mutex_t *os_mutex = (os_mutex_t *)pMutex; + if (!NV_MAY_SLEEP()) + { + return NV_ERR_INVALID_REQUEST; + } + + if (down_trylock(os_mutex)) + { + return NV_ERR_TIMEOUT_RETRY; + } + + return NV_OK; +} + + +void NV_API_CALL os_release_mutex +( + void *pMutex +) +{ + os_mutex_t *os_mutex = (os_mutex_t *)pMutex; + up(os_mutex); +} + +typedef struct semaphore os_semaphore_t; + + +void* NV_API_CALL os_alloc_semaphore +( + NvU32 initialValue +) +{ + NV_STATUS rmStatus; + os_semaphore_t *os_sema; + + rmStatus = os_alloc_mem((void *)&os_sema, sizeof(os_semaphore_t)); + if (rmStatus != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate semaphore!\n"); + return NULL; + } + + sema_init(os_sema, initialValue); + + return (void *)os_sema; +} + +void NV_API_CALL os_free_semaphore +( + void *pSema +) +{ + os_semaphore_t *os_sema = (os_semaphore_t *)pSema; + + os_free_mem(os_sema); +} + +NV_STATUS NV_API_CALL os_acquire_semaphore +( + void *pSema +) +{ + os_semaphore_t *os_sema = (os_semaphore_t *)pSema; + + if (!NV_MAY_SLEEP()) + { + return NV_ERR_INVALID_REQUEST; + } + down(os_sema); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_cond_acquire_semaphore +( + void * pSema +) +{ + os_semaphore_t *os_sema = (os_semaphore_t *)pSema; + // + // NOTE: down_trylock() is safe to call from IRQ, se we don't need an + // NV_MAY_SLEEP() check here. We do check it in os_cond_acquire_mutex(), + // even though it is also calling down_trylock(), since that keeps it + // in line with the kernel's 'struct mutex' API. + // + if (down_trylock(os_sema)) + { + return NV_ERR_TIMEOUT_RETRY; + } + + return NV_OK; +} + +NV_STATUS NV_API_CALL os_release_semaphore +( + void *pSema +) +{ + os_semaphore_t *os_sema = (os_semaphore_t *)pSema; + up(os_sema); + return NV_OK; +} + +typedef struct +{ + struct rw_semaphore sem; + +#if defined(CONFIG_LOCKDEP) + /** + * A key of lock class. It would be registered to Lockdep validator so all + * instances' usages and dependencies will contribute to constructing correct + * locking rules and this lock will be tracked by the Lockdep validator. + * + */ + struct lock_class_key key; +#endif // CONFIG_LOCKDEP +} os_rwlock_t; + +void* NV_API_CALL os_alloc_rwlock(void) +{ + os_rwlock_t *os_rwlock = NULL; + + NV_STATUS rmStatus = os_alloc_mem((void *)&os_rwlock, sizeof(os_rwlock_t)); + if (rmStatus != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate a struct os_rwlock_t!\n"); + return NULL; + } + + init_rwsem(&os_rwlock->sem); + +#if defined(CONFIG_LOCKDEP) + // Register the dynamically allocated key to Lockdep. + lockdep_register_key(&os_rwlock->key); + lockdep_set_class(&os_rwlock->sem, &os_rwlock->key); +#endif // CONFIG_LOCKDEP + + return os_rwlock; +} + +void NV_API_CALL os_free_rwlock(void *pRwLock) +{ + os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock; + +#if defined(CONFIG_LOCKDEP) + // Unregister the dynamically allocated key. + lockdep_unregister_key(&os_rwlock->key); +#endif // CONFIG_LOCKDEP + + os_free_mem(os_rwlock); +} + +NV_STATUS NV_API_CALL os_acquire_rwlock_read(void *pRwLock) +{ + os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock; + + if (!NV_MAY_SLEEP()) + { + return NV_ERR_INVALID_REQUEST; + } + down_read(&os_rwlock->sem); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_acquire_rwlock_write(void *pRwLock) +{ + os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock; + + if (!NV_MAY_SLEEP()) + { + return NV_ERR_INVALID_REQUEST; + } + down_write(&os_rwlock->sem); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_cond_acquire_rwlock_read(void *pRwLock) +{ + os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock; + + if (down_read_trylock(&os_rwlock->sem)) + { + return NV_ERR_TIMEOUT_RETRY; + } + + return NV_OK; +} + +NV_STATUS NV_API_CALL os_cond_acquire_rwlock_write(void *pRwLock) +{ + os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock; + + if (down_write_trylock(&os_rwlock->sem)) + { + return NV_ERR_TIMEOUT_RETRY; + } + + return NV_OK; +} + +void NV_API_CALL os_release_rwlock_read(void *pRwLock) +{ + os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock; + up_read(&os_rwlock->sem); +} + +void NV_API_CALL os_release_rwlock_write(void *pRwLock) +{ + os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock; + up_write(&os_rwlock->sem); +} + +NvBool NV_API_CALL os_semaphore_may_sleep(void) +{ + return NV_MAY_SLEEP(); +} + +NvBool NV_API_CALL os_is_isr(void) +{ + return (in_irq()); +} + +// return TRUE if the caller is the super-user +NvBool NV_API_CALL os_is_administrator(void) +{ + return NV_IS_SUSER(); +} + +NvBool NV_API_CALL os_check_access(RsAccessRight accessRight) +{ + switch (accessRight) + { + case RS_ACCESS_PERFMON: + { +#if defined(CAP_PERFMON) + return capable(CAP_PERFMON); +#else + return os_is_administrator(); +#endif + } + case RS_ACCESS_NICE: + { + return capable(CAP_SYS_NICE); + } + default: + { + return NV_FALSE; + } + } +} + +char* NV_API_CALL os_string_copy( + char *dst, + const char *src +) +{ + return strcpy(dst, src); +} + +NvU32 NV_API_CALL os_string_length( + const char* str +) +{ + return strlen(str); +} + +NvU32 NV_API_CALL os_strtoul(const char *str, char **endp, NvU32 base) +{ + return (NvU32)simple_strtoul(str, endp, base); +} + +NvS32 NV_API_CALL os_string_compare(const char *str1, const char *str2) +{ + return strcmp(str1, str2); +} + +static void *os_mem_copy_custom( + void *dstPtr, + const void *srcPtr, + NvU32 length +) +{ + void *ret = dstPtr; + NvU32 dwords, bytes = length; + NvU8 *dst = dstPtr; + const NvU8 *src = srcPtr; + + if ((length >= 128) && + (((NvUPtr)dst & 3) == 0) & (((NvUPtr)src & 3) == 0)) + { + dwords = (length / sizeof(NvU32)); + bytes = (length % sizeof(NvU32)); + + while (dwords != 0) + { + *(NvU32 *)dst = *(const NvU32 *)src; + dst += sizeof(NvU32); + src += sizeof(NvU32); + dwords--; + } + } + + while (bytes != 0) + { + *dst = *src; + dst++; + src++; + bytes--; + } + + return ret; +} + +void *NV_API_CALL os_mem_copy( + void *dst, + const void *src, + NvU32 length +) +{ +#if defined(NVCPU_AARCH64) + /* + * TODO: Remove once memset/memcpy restructure is complete + * + * When performing memcpy for memory mapped as device, memcpy_[to/from]io + * must be used. WAR to check the source and destination to determine the + * correct memcpy_io to use. + */ + if (is_vmalloc_addr(dst) && !is_vmalloc_addr(src)) + { + memcpy_toio(dst, src, length); + return dst; + } + else if (!is_vmalloc_addr(dst) && is_vmalloc_addr(src)) + { + memcpy_fromio(dst, src, length); + return dst; + } + else if (is_vmalloc_addr(dst) && is_vmalloc_addr(src)) + { + return os_mem_copy_custom(dst, src, length); + } + else +#endif + { +#if defined(CONFIG_CC_OPTIMIZE_FOR_SIZE) + /* + * When the kernel is configured with CC_OPTIMIZE_FOR_SIZE=y, Kbuild uses + * -Os universally. With -Os, GCC will aggressively inline builtins, even + * if -fno-builtin is specified, including memcpy with a tiny byte-copy + * loop on x86 (rep movsb). This is horrible for performance - a strict + * dword copy is much faster - so when we detect this case, just provide + * our own implementation. + */ + return os_mem_copy_custom(dst, src, length); +#else + /* + * Generally speaking, the kernel-provided memcpy will be the fastest, + * (optimized much better for the target architecture than the above + * loop), so we want to use that whenever we can get to it. + */ + return memcpy(dst, src, length); +#endif + } +} + +NV_STATUS NV_API_CALL os_memcpy_from_user( + void *to, + const void *from, + NvU32 n +) +{ + return (NV_COPY_FROM_USER(to, from, n) ? NV_ERR_INVALID_ADDRESS : NV_OK); +} + +NV_STATUS NV_API_CALL os_memcpy_to_user( + void *to, + const void *from, + NvU32 n +) +{ + return (NV_COPY_TO_USER(to, from, n) ? NV_ERR_INVALID_ADDRESS : NV_OK); +} + +void* NV_API_CALL os_mem_set( + void *dst, + NvU8 c, + NvU32 length +) +{ +#if defined(NVCPU_AARCH64) + /* + * TODO: Remove once memset/memcpy restructure is complete + * + * WAR to check the destination to determine if the memory is of type Device + * or Normal, and use the correct memset. + */ + if (is_vmalloc_addr(dst)) + { + memset_io(dst, (int)c, length); + return dst; + } + else +#endif + return memset(dst, (int)c, length); +} + +NvS32 NV_API_CALL os_mem_cmp( + const NvU8 *buf0, + const NvU8* buf1, + NvU32 length +) +{ + return memcmp(buf0, buf1, length); +} + + +/* + * Operating System Memory Functions + * + * There are 2 interesting aspects of resource manager memory allocations + * that need special consideration on Linux: + * + * 1. They are typically very large, (e.g. single allocations of 164KB) + * + * 2. The resource manager assumes that it can safely allocate memory in + * interrupt handlers. + * + * The first requires that we call vmalloc, the second kmalloc. We decide + * which one to use at run time, based on the size of the request and the + * context. Allocations larger than 128KB require vmalloc, in the context + * of an ISR they fail. + */ + +#if defined(NV_VGX_HYPER) +/* + * Citrix Hypervisor-8.0 Dom0 sysmem ends up getting fragmented because + * of which high-order kmalloc allocations fail. We try to avoid it by + * requesting allocations not larger than 8K. + * + * KVM will be affected low memory pressure situation a lot, + * particularly if hugetlbfs hugepages are being used. Hence, 8K applies + * here too. + */ +#define KMALLOC_LIMIT 8192 +#else +#define KMALLOC_LIMIT 131072 +#endif + +#define VMALLOC_ALLOCATION_SIZE_FLAG (1 << 0) + +NV_STATUS NV_API_CALL os_alloc_mem( + void **address, + NvU64 size +) +{ + NvU64 original_size = size; + unsigned long alloc_size; + + if (address == NULL) + return NV_ERR_INVALID_ARGUMENT; + + *address = NULL; + NV_MEM_TRACKING_PAD_SIZE(size); + + // check for integer overflow on size + if (size < original_size) + return NV_ERR_INVALID_ARGUMENT; + + // + // NV_KMALLOC, nv_vmalloc take an input of 4 bytes in x86. To avoid + // truncation and wrong allocation, below check is required. + // + alloc_size = size; + + if (alloc_size != size) + return NV_ERR_INVALID_PARAMETER; + + if (!NV_MAY_SLEEP()) + { + if (alloc_size <= KMALLOC_LIMIT) + NV_KMALLOC_ATOMIC(*address, alloc_size); + } + else + { + if (alloc_size <= KMALLOC_LIMIT) + { + NV_KMALLOC_NO_OOM(*address, alloc_size); + } + if (*address == NULL) + { + *address = nv_vmalloc(alloc_size); + alloc_size |= VMALLOC_ALLOCATION_SIZE_FLAG; + } + } + + NV_MEM_TRACKING_HIDE_SIZE(address, alloc_size); + + return ((*address != NULL) ? NV_OK : NV_ERR_NO_MEMORY); +} + +void NV_API_CALL os_free_mem(void *address) +{ + NvU64 size; + + NV_MEM_TRACKING_RETRIEVE_SIZE(address, size); + + if (size & VMALLOC_ALLOCATION_SIZE_FLAG) + { + size &= ~VMALLOC_ALLOCATION_SIZE_FLAG; + nv_vfree(address, size); + } + else + NV_KFREE(address, size); +} + + +/***************************************************************************** +* +* Name: osGetSystemTime +* +*****************************************************************************/ + +NV_STATUS NV_API_CALL os_get_system_time( + NvU32 *seconds, + NvU32 *useconds +) +{ + struct timespec64 tm; + + ktime_get_real_ts64(&tm); + + *seconds = tm.tv_sec; + *useconds = tm.tv_nsec / NSEC_PER_USEC; + + return NV_OK; +} + +// +// Get the High resolution tick count of the system uptime +// +NvU64 NV_API_CALL os_get_monotonic_time_ns_hr(void) +{ + struct timespec64 tm; + ktime_get_raw_ts64(&tm); + return (NvU64) timespec64_to_ns(&tm); +} + +NvU64 NV_API_CALL os_get_monotonic_time_ns(void) +{ +#if defined(NV_JIFFIES_TO_TIMESPEC_PRESENT) + struct timespec ts; + jiffies_to_timespec(jiffies, &ts); + return (NvU64) timespec_to_ns(&ts); +#else + struct timespec64 ts; + jiffies_to_timespec64(jiffies, &ts); + return (NvU64) timespec64_to_ns(&ts); +#endif +} + +NvU64 NV_API_CALL os_get_monotonic_tick_resolution_ns(void) +{ + return (NvU64)jiffies_to_usecs(1) * NSEC_PER_USEC; +} + +//--------------------------------------------------------------------------- + +// +// Misc services. +// +//--------------------------------------------------------------------------- + +NV_STATUS NV_API_CALL os_delay_us(NvU32 MicroSeconds) +{ + return nv_sleep_us(MicroSeconds); +} + +NV_STATUS NV_API_CALL os_delay(NvU32 MilliSeconds) +{ + return nv_sleep_ms(MilliSeconds); +} + +NvU64 NV_API_CALL os_get_cpu_frequency(void) +{ + NvU64 cpu_hz = 0; +#if defined(CONFIG_CPU_FREQ) + cpu_hz = (cpufreq_get(0) * 1000); +#elif defined(NVCPU_X86_64) + NvU64 tsc[2]; + + tsc[0] = nv_rdtsc(); + mdelay(250); + tsc[1] = nv_rdtsc(); + + cpu_hz = ((tsc[1] - tsc[0]) * 4); +#endif + return cpu_hz; +} + +NvU32 NV_API_CALL os_get_current_process(void) +{ + return NV_GET_CURRENT_PROCESS(); +} + +void NV_API_CALL os_get_current_process_name(char *buf, NvU32 len) +{ + task_lock(current); + strncpy(buf, current->comm, len - 1); + buf[len - 1] = '\0'; + task_unlock(current); +} + +NV_STATUS NV_API_CALL os_iommu_sva_bind(void *arg, void **handle, NvU32 *pasid) +{ + nv_state_t *nv = arg; +#if defined(CONFIG_IOMMU_SVA) && \ + (defined(NV_IOASID_GET_PRESENT) || defined(NV_MM_PASID_DROP_PRESENT)) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct iommu_sva *sva_handle; + + if (pasid == NULL || handle == NULL) + return NV_ERR_INVALID_ARGUMENT; + + *pasid = 0; + *handle = NULL; + + if (nv->ats_support && current && current->mm) + { +#if defined(NV_IOMMU_SVA_BIND_DEVICE_HAS_DRVDATA_ARG) + sva_handle = iommu_sva_bind_device(nvl->dev, current->mm, NULL); +#else + sva_handle = iommu_sva_bind_device(nvl->dev, current->mm); +#endif + if (!IS_ERR(sva_handle)) + { + *pasid = iommu_sva_get_pasid(sva_handle); + *handle = sva_handle; + NV_DEV_PRINTF(NV_DBG_INFO, nv, "PASID: %u\n", *pasid); + + return NV_OK; + } + } +#endif + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "IOMMU SVA bind failed\n"); + + return NV_ERR_INVALID_STATE; +} + +void NV_API_CALL os_iommu_sva_unbind(void *handle) +{ +#if defined(CONFIG_IOMMU_SVA) && \ + (defined(NV_IOASID_GET_PRESENT) || defined(NV_MM_PASID_DROP_PRESENT)) + iommu_sva_unbind_device(handle); +#endif +} + +NV_STATUS NV_API_CALL os_get_current_thread(NvU64 *threadId) +{ + if (in_interrupt()) + *threadId = 0; + else + *threadId = (NvU64) current->pid; + + return NV_OK; +} + +/*******************************************************************************/ +/* */ +/* Debug and logging utilities follow */ +/* */ +/*******************************************************************************/ + +// The current debug display level (default to maximum debug level) +NvU32 cur_debuglevel = 0xffffffff; + +/* + * The binary core of RM (nv-kernel.o) calls both out_string, and nv_printf. + */ +inline void NV_API_CALL out_string(const char *str) +{ + printk("%s", str); +} + +/* + * nv_printf() prints to the kernel log for the driver. + * Returns the number of characters written. + */ +int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...) +{ + va_list arglist; + int chars_written = 0; + + if (debuglevel >= ((cur_debuglevel >> 4) & 0x3)) + { + size_t length; + unsigned long flags; + + // When printk is called to extend the output of the previous line + // (i.e. when the previous line did not end in \n), the printk call + // must contain KERN_CONT. Older kernels still print the line + // correctly, but KERN_CONT was technically always required. + + // This means that every call to printk() needs to have a KERN_xxx + // prefix. The only way to get this is to rebuild the format string + // into a new buffer, with a KERN_xxx prefix prepended. + + // Unfortunately, we can't guarantee that two calls to nv_printf() + // won't be interrupted by a printk from another driver. So to be + // safe, we always append KERN_CONT. It's still technically wrong, + // but it works. + + // The long-term fix is to modify all NV_PRINTF-ish calls so that the + // string always contains only one \n (at the end) and NV_PRINTF_EX + // is deleted. But that is unlikely to ever happen. + + length = strlen(printf_format); + if (length < 1) + return 0; + + NV_SPIN_LOCK_IRQSAVE(&nv_error_string_lock, flags); + + // KERN_CONT changed in the 3.6 kernel, so we can't assume its + // composition or size. + memcpy(nv_error_string, KERN_CONT, sizeof(KERN_CONT) - 1); + memcpy(nv_error_string + sizeof(KERN_CONT) - 1, printf_format, length + 1); + + va_start(arglist, printf_format); + chars_written = vprintk(nv_error_string, arglist); + va_end(arglist); + + NV_SPIN_UNLOCK_IRQRESTORE(&nv_error_string_lock, flags); + } + + return chars_written; +} + +NvS32 NV_API_CALL os_snprintf(char *buf, NvU32 size, const char *fmt, ...) +{ + va_list arglist; + int chars_written; + + va_start(arglist, fmt); + chars_written = vsnprintf(buf, size, fmt, arglist); + va_end(arglist); + + return chars_written; +} + +NvS32 NV_API_CALL os_vsnprintf(char *buf, NvU32 size, const char *fmt, va_list arglist) +{ + return vsnprintf(buf, size, fmt, arglist); +} + +void NV_API_CALL os_log_error(const char *fmt, va_list ap) +{ + unsigned long flags; + + NV_SPIN_LOCK_IRQSAVE(&nv_error_string_lock, flags); + + vsnprintf(nv_error_string, MAX_ERROR_STRING, fmt, ap); + nv_error_string[MAX_ERROR_STRING - 1] = 0; + printk(KERN_ERR "%s", nv_error_string); + + NV_SPIN_UNLOCK_IRQRESTORE(&nv_error_string_lock, flags); +} + +void NV_API_CALL os_io_write_byte( + NvU32 address, + NvU8 value +) +{ + outb(value, address); +} + +void NV_API_CALL os_io_write_word( + NvU32 address, + NvU16 value +) +{ + outw(value, address); +} + +void NV_API_CALL os_io_write_dword( + NvU32 address, + NvU32 value +) +{ + outl(value, address); +} + +NvU8 NV_API_CALL os_io_read_byte( + NvU32 address +) +{ + return inb(address); +} + +NvU16 NV_API_CALL os_io_read_word( + NvU32 address +) +{ + return inw(address); +} + +NvU32 NV_API_CALL os_io_read_dword( + NvU32 address +) +{ + return inl(address); +} + + +static NvBool NV_API_CALL xen_support_fully_virtualized_kernel(void) +{ +#if defined(NV_XEN_SUPPORT_FULLY_VIRTUALIZED_KERNEL) + return (os_is_vgx_hyper()); +#endif + return NV_FALSE; +} + +void* NV_API_CALL os_map_kernel_space( + NvU64 start, + NvU64 size_bytes, + NvU32 mode +) +{ + void *vaddr; + + if (!xen_support_fully_virtualized_kernel() && start == 0) + { + if (mode != NV_MEMORY_CACHED) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: os_map_kernel_space: won't map address 0x%0llx UC!\n", start); + return NULL; + } + else + return (void *)PAGE_OFFSET; + } + + if (!NV_MAY_SLEEP()) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: os_map_kernel_space: can't map 0x%0llx, invalid context!\n", start); + os_dbg_breakpoint(); + return NULL; + } + + switch (mode) + { + case NV_MEMORY_CACHED: + vaddr = nv_ioremap_cache(start, size_bytes); + break; + case NV_MEMORY_WRITECOMBINED: + vaddr = rm_disable_iomap_wc() ? + nv_ioremap_nocache(start, size_bytes) : + nv_ioremap_wc(start, size_bytes); + break; + case NV_MEMORY_UNCACHED: + case NV_MEMORY_DEFAULT: + vaddr = nv_ioremap_nocache(start, size_bytes); + break; + default: + nv_printf(NV_DBG_ERRORS, + "NVRM: os_map_kernel_space: unsupported mode!\n"); + return NULL; + } + + return vaddr; +} + +void NV_API_CALL os_unmap_kernel_space( + void *addr, + NvU64 size_bytes +) +{ + if (addr == (void *)PAGE_OFFSET) + return; + + nv_iounmap(addr, size_bytes); +} + +#if NVCPU_IS_AARCH64 + +static inline void nv_flush_cache_cpu(void *info) +{ + if (!nvos_is_chipset_io_coherent()) + { +#if defined(NV_FLUSH_CACHE_ALL_PRESENT) + flush_cache_all(); +#else + WARN_ONCE(0, "kernel does not provide flush_cache_all()\n"); +#endif + } +} + +// flush the cache of all cpus +NV_STATUS NV_API_CALL os_flush_cpu_cache_all(void) +{ + on_each_cpu(nv_flush_cache_cpu, NULL, 1); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_flush_user_cache(void) +{ + if (!NV_MAY_SLEEP()) + { + return NV_ERR_NOT_SUPPORTED; + } + + // + // The Linux kernel does not export an interface for flushing a range, + // although it is possible. For now, just flush the entire cache to be + // safe. + // + on_each_cpu(nv_flush_cache_cpu, NULL, 1); + return NV_OK; +} + +#else // NVCPU_IS_AARCH64 + +NV_STATUS NV_API_CALL os_flush_cpu_cache_all(void) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL os_flush_user_cache(void) +{ + return NV_ERR_NOT_SUPPORTED; +} + +#endif + +void NV_API_CALL os_flush_cpu_write_combine_buffer(void) +{ +#if defined(NVCPU_X86_64) + asm volatile("sfence" ::: "memory"); +#elif defined(NVCPU_AARCH64) + asm volatile("dsb st" : : : "memory"); +#else + mb(); +#endif +} + +// override initial debug level from registry +void NV_API_CALL os_dbg_init(void) +{ + NvU32 new_debuglevel; + nvidia_stack_t *sp = NULL; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return; + } + + if (NV_OK == rm_read_registry_dword(sp, NULL, + "ResmanDebugLevel", + &new_debuglevel)) + { + if (new_debuglevel != (NvU32)~0) + cur_debuglevel = new_debuglevel; + } + + nv_kmem_cache_free_stack(sp); +} + +void NV_API_CALL os_dbg_set_level(NvU32 new_debuglevel) +{ + nv_printf(NV_DBG_SETUP, "NVRM: Changing debuglevel from 0x%x to 0x%x\n", + cur_debuglevel, new_debuglevel); + cur_debuglevel = new_debuglevel; +} + +NvU64 NV_API_CALL os_get_max_user_va(void) +{ + return TASK_SIZE; +} + +NV_STATUS NV_API_CALL os_schedule(void) +{ + if (NV_MAY_SLEEP()) + { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + return NV_OK; + } + else + { + nv_printf(NV_DBG_ERRORS, "NVRM: os_schedule: Attempted to yield" + " the CPU while in atomic or interrupt" + " context\n"); + return NV_ERR_ILLEGAL_ACTION; + } +} + +typedef struct { + nv_kthread_q_item_t item; + void *data; +} os_queue_data_t; + +static void os_execute_work_item(void *_oqd) +{ + os_queue_data_t *oqd = _oqd; + nvidia_stack_t *sp = NULL; + void *data = oqd->data; + + NV_KFREE(oqd, sizeof(os_queue_data_t)); + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return; + } + + rm_execute_work_item(sp, data); + + nv_kmem_cache_free_stack(sp); +} + +NV_STATUS NV_API_CALL os_queue_work_item(struct os_work_queue *queue, void *data) +{ + os_queue_data_t *oqd; + nv_kthread_q_t *kthread; + + /* Use the global queue unless a valid queue was provided */ + kthread = queue ? &queue->nvk : &nv_kthread_q; + + /* Make sure the kthread is active */ + if (unlikely(!kthread->q_kthread)) { + nv_printf(NV_DBG_ERRORS, "NVRM: queue is not enabled\n"); + return NV_ERR_NOT_READY; + } + + /* Allocate atomically just in case we're called in atomic context. */ + NV_KMALLOC_ATOMIC(oqd, sizeof(os_queue_data_t)); + if (!oqd) + return NV_ERR_NO_MEMORY; + + nv_kthread_q_item_init(&oqd->item, os_execute_work_item, oqd); + oqd->data = data; + + nv_kthread_q_schedule_q_item(kthread, &oqd->item); + + return NV_OK; +} + +NV_STATUS NV_API_CALL os_flush_work_queue(struct os_work_queue *queue, NvBool is_unload) +{ + nv_kthread_q_t *kthread; + + /* Use the global queue unless a valid queue was provided */ + kthread = queue ? &queue->nvk : &nv_kthread_q; + + if (NV_MAY_SLEEP()) + { + kthread->is_unload_flush_ongoing = is_unload; + + if (kthread->q_kthread) + nv_kthread_q_flush(kthread); + + kthread->is_unload_flush_ongoing = NV_FALSE; + + return NV_OK; + } + else + { + nv_printf(NV_DBG_ERRORS, + "NVRM: os_flush_work_queue: attempted to execute passive" + "work from an atomic or interrupt context.\n"); + return NV_ERR_ILLEGAL_ACTION; + } +} + +NvBool NV_API_CALL os_is_queue_flush_ongoing(struct os_work_queue *queue) +{ + nv_kthread_q_t *kthread = queue ? &queue->nvk : &nv_kthread_q; + + return kthread->is_unload_flush_ongoing; +} + +extern NvU32 NVreg_EnableDbgBreakpoint; + +void NV_API_CALL os_dbg_breakpoint(void) +{ + if (NVreg_EnableDbgBreakpoint == 0) + { + return; + } + +#if defined(CONFIG_X86_REMOTE_DEBUG) || defined(CONFIG_KGDB) || defined(CONFIG_XMON) + #if defined(NVCPU_X86_64) + __asm__ __volatile__ ("int $3"); + #elif defined(NVCPU_ARM) + __asm__ __volatile__ (".word %c0" :: "i" (KGDB_COMPILED_BREAK)); + #elif defined(NVCPU_AARCH64) + # warning "Need to implement os_dbg_breakpoint() for aarch64" + #endif // NVCPU_* +#elif defined(CONFIG_KDB) + KDB_ENTER(); +#endif // CONFIG_X86_REMOTE_DEBUG || CONFIG_KGDB || CONFIG_XMON +} + +NvU32 NV_API_CALL os_get_cpu_number(void) +{ + NvU32 cpu_id = get_cpu(); + put_cpu(); + return cpu_id; +} + +NvU32 NV_API_CALL os_get_cpu_count(void) +{ + return NV_NUM_CPUS(); +} + +NvBool NV_API_CALL os_pat_supported(void) +{ + return (nv_pat_mode != NV_PAT_MODE_DISABLED); +} + +NvBool NV_API_CALL os_is_efi_enabled(void) +{ + return efi_enabled(EFI_BOOT); +} + +void NV_API_CALL os_dump_stack(void) +{ + dump_stack(); +} + +typedef struct os_spinlock_s +{ + nv_spinlock_t lock; + unsigned long eflags; +} os_spinlock_t; + +NV_STATUS NV_API_CALL os_alloc_spinlock(void **ppSpinlock) +{ + NV_STATUS rmStatus; + os_spinlock_t *os_spinlock; + + rmStatus = os_alloc_mem(ppSpinlock, sizeof(os_spinlock_t)); + if (rmStatus != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate spinlock!\n"); + return rmStatus; + } + + os_spinlock = (os_spinlock_t *)*ppSpinlock; + NV_SPIN_LOCK_INIT(&os_spinlock->lock); + os_spinlock->eflags = 0; + return NV_OK; +} + +void NV_API_CALL os_free_spinlock(void *pSpinlock) +{ + os_free_mem(pSpinlock); +} + +NvU64 NV_API_CALL os_acquire_spinlock(void *pSpinlock) +{ + os_spinlock_t *os_spinlock = (os_spinlock_t *)pSpinlock; + unsigned long eflags; + + NV_SPIN_LOCK_IRQSAVE(&os_spinlock->lock, eflags); + os_spinlock->eflags = eflags; + +#if defined(NVCPU_X86_64) + eflags &= X86_EFLAGS_IF; +#elif defined(NVCPU_AARCH64) + eflags &= PSR_I_BIT; +#endif + return eflags; +} + +void NV_API_CALL os_release_spinlock(void *pSpinlock, NvU64 oldIrql) +{ + os_spinlock_t *os_spinlock = (os_spinlock_t *)pSpinlock; + unsigned long eflags; + + eflags = os_spinlock->eflags; + os_spinlock->eflags = 0; + NV_SPIN_UNLOCK_IRQRESTORE(&os_spinlock->lock, eflags); +} + +#define NV_KERNEL_RELEASE ((LINUX_VERSION_CODE >> 16) & 0x0ff) +#define NV_KERNEL_VERSION ((LINUX_VERSION_CODE >> 8) & 0x0ff) +#define NV_KERNEL_SUBVERSION ((LINUX_VERSION_CODE) & 0x0ff) + +NV_STATUS NV_API_CALL os_get_version_info(os_version_info * pOsVersionInfo) +{ + NV_STATUS status = NV_OK; + + pOsVersionInfo->os_major_version = NV_KERNEL_RELEASE; + pOsVersionInfo->os_minor_version = NV_KERNEL_VERSION; + pOsVersionInfo->os_build_number = NV_KERNEL_SUBVERSION; + +#if defined(UTS_RELEASE) + pOsVersionInfo->os_build_version_str = UTS_RELEASE; +#endif + +#if defined(UTS_VERSION) + pOsVersionInfo->os_build_date_plus_str = UTS_VERSION; +#endif + + return status; +} + +NV_STATUS NV_API_CALL os_get_is_openrm(NvBool *bIsOpenRm) +{ +#if defined(NVCPU_X86_64) || defined(NVCPU_AARCH64) + *bIsOpenRm = NV_TRUE; + return NV_OK; +#else // defined(NVCPU_X86_64) || defined(NVCPU_AARCH64) + return NV_ERR_NOT_SUPPORTED; +#endif // defined(NVCPU_X86_64) || defined(NVCPU_AARCH64) +} + +NvBool NV_API_CALL os_is_xen_dom0(void) +{ +#if defined(NV_DOM0_KERNEL_PRESENT) + return NV_TRUE; +#else + return NV_FALSE; +#endif +} + +NvBool NV_API_CALL os_is_vgx_hyper(void) +{ +#if defined(NV_VGX_HYPER) + return NV_TRUE; +#else + return NV_FALSE; +#endif +} + +NV_STATUS NV_API_CALL os_inject_vgx_msi(NvU16 guestID, NvU64 msiAddr, NvU32 msiData) +{ +#if defined(NV_VGX_HYPER) && defined(NV_DOM0_KERNEL_PRESENT) && \ + defined(NV_XEN_IOEMU_INJECT_MSI) + int rc = 0; + rc = xen_ioemu_inject_msi(guestID, msiAddr, msiData); + if (rc) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %s: can't inject MSI to guest:%d, addr:0x%x, data:0x%x, err:%d\n", + __FUNCTION__, guestID, msiAddr, msiData, rc); + return NV_ERR_OPERATING_SYSTEM; + } + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NvBool NV_API_CALL os_is_grid_supported(void) +{ +#if defined(NV_GRID_BUILD) + return NV_TRUE; +#else + return NV_FALSE; +#endif +} + +NvU32 NV_API_CALL os_get_grid_csp_support(void) +{ +#if defined(NV_GRID_BUILD_CSP) + return NV_GRID_BUILD_CSP; +#else + return 0; +#endif +} + +void NV_API_CALL os_bug_check(NvU32 bugCode, const char *bugCodeStr) +{ + panic(bugCodeStr); +} + +NV_STATUS NV_API_CALL os_get_euid(NvU32 *pSecToken) +{ + *pSecToken = NV_CURRENT_EUID(); + return NV_OK; +} + +#if defined(NVCPU_X86_64) || defined(NVCPU_AARCH64) + +static NvBool os_verify_checksum(const NvU8 *pMappedAddr, NvU32 length) +{ + NvU8 sum = 0; + NvU32 iter = 0; + + for (iter = 0; iter < length; iter++) + sum += pMappedAddr[iter]; + + return sum == 0; +} + +#define _VERIFY_SMBIOS3(_pMappedAddr) \ + _pMappedAddr && \ + (os_mem_cmp(_pMappedAddr, "_SM3_", 5) == 0 && \ + _pMappedAddr[6] < 32 && \ + _pMappedAddr[6] > 0 && \ + os_verify_checksum(_pMappedAddr, _pMappedAddr[6])) + +#define OS_VERIFY_SMBIOS3(pMappedAddr) _VERIFY_SMBIOS3((pMappedAddr)) + +#define _VERIFY_SMBIOS(_pMappedAddr) \ + _pMappedAddr && \ + (os_mem_cmp(_pMappedAddr, "_SM_", 4) == 0 && \ + _pMappedAddr[5] < 32 && \ + _pMappedAddr[5] > 0 && \ + os_verify_checksum(_pMappedAddr, _pMappedAddr[5]) && \ + os_mem_cmp((_pMappedAddr + 16), "_DMI_", 5) == 0 && \ + os_verify_checksum((_pMappedAddr + 16), 15)) + +#define OS_VERIFY_SMBIOS(pMappedAddr) _VERIFY_SMBIOS((pMappedAddr)) + +#define SMBIOS_LEGACY_BASE 0xF0000 +#define SMBIOS_LEGACY_SIZE 0x10000 + +static NV_STATUS os_get_smbios_header_legacy(NvU64 *pSmbsAddr) +{ +#if !defined(NVCPU_X86_64) + return NV_ERR_NOT_SUPPORTED; +#else + NV_STATUS status = NV_ERR_OPERATING_SYSTEM; + NvU8 *pMappedAddr = NULL; + NvU8 *pIterAddr = NULL; + + pMappedAddr = (NvU8*)os_map_kernel_space(SMBIOS_LEGACY_BASE, + SMBIOS_LEGACY_SIZE, + NV_MEMORY_CACHED); + if (pMappedAddr == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + pIterAddr = pMappedAddr; + + for (; pIterAddr < (pMappedAddr + SMBIOS_LEGACY_SIZE); pIterAddr += 16) + { + if (OS_VERIFY_SMBIOS3(pIterAddr)) + { + *pSmbsAddr = SMBIOS_LEGACY_BASE + (pIterAddr - pMappedAddr); + status = NV_OK; + break; + } + + if (OS_VERIFY_SMBIOS(pIterAddr)) + { + *pSmbsAddr = SMBIOS_LEGACY_BASE + (pIterAddr - pMappedAddr); + status = NV_OK; + break; + } + } + + os_unmap_kernel_space(pMappedAddr, SMBIOS_LEGACY_SIZE); + + return status; +#endif +} + +// This function is needed only if "efi" is enabled. +#if defined(CONFIG_EFI) +static NV_STATUS os_verify_smbios_header_uefi(NvU64 smbsAddr) +{ + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + NvU64 start= 0, offset =0 , size = 32; + NvU8 *pMappedAddr = NULL, *pBufAddr = NULL; + + start = smbsAddr; + offset = (start & ~os_page_mask); + start &= os_page_mask; + size = ((size + offset + ~os_page_mask) & os_page_mask); + + pBufAddr = (NvU8*)os_map_kernel_space(start, + size, + NV_MEMORY_CACHED); + if (pBufAddr == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + pMappedAddr = pBufAddr + offset; + + if (OS_VERIFY_SMBIOS3(pMappedAddr)) + { + status = NV_OK; + goto done; + } + + if (OS_VERIFY_SMBIOS(pMappedAddr)) + { + status = NV_OK; + } + +done: + os_unmap_kernel_space(pBufAddr, size); + return status; +} +#endif + +static NV_STATUS os_get_smbios_header_uefi(NvU64 *pSmbsAddr) +{ + NV_STATUS status = NV_ERR_OPERATING_SYSTEM; + +#if defined(CONFIG_EFI) + +// Make sure that efi.h has SMBIOS3_TABLE_GUID present. +#if defined(SMBIOS3_TABLE_GUID) + if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) + { + status = os_verify_smbios_header_uefi(efi.smbios3); + if (status == NV_OK) + { + *pSmbsAddr = efi.smbios3; + return NV_OK; + } + } +#endif + + if (efi.smbios != EFI_INVALID_TABLE_ADDR) + { + status = os_verify_smbios_header_uefi(efi.smbios); + if (status == NV_OK) + { + *pSmbsAddr = efi.smbios; + return NV_OK; + } + } +#endif + + return status; +} + +#endif // defined(NVCPU_X86_64) || defined(NVCPU_AARCH64) + +// The function locates the SMBIOS entry point. +NV_STATUS NV_API_CALL os_get_smbios_header(NvU64 *pSmbsAddr) +{ + +#if !defined(NVCPU_X86_64) && !defined(NVCPU_AARCH64) + return NV_ERR_NOT_SUPPORTED; +#else + NV_STATUS status = NV_OK; + + if (os_is_efi_enabled()) + { + status = os_get_smbios_header_uefi(pSmbsAddr); + } + else + { + status = os_get_smbios_header_legacy(pSmbsAddr); + } + + return status; +#endif +} + +NV_STATUS NV_API_CALL os_get_acpi_rsdp_from_uefi +( + NvU32 *pRsdpAddr +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + if (pRsdpAddr == NULL) + { + return NV_ERR_INVALID_STATE; + } + + *pRsdpAddr = 0; + +#if defined(CONFIG_EFI) + if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) + { + *pRsdpAddr = efi.acpi20; + status = NV_OK; + } + else if (efi.acpi != EFI_INVALID_TABLE_ADDR) + { + *pRsdpAddr = efi.acpi; + status = NV_OK; + } + else + { + nv_printf(NV_DBG_ERRORS, "NVRM: RSDP Not found!\n"); + status = NV_ERR_OPERATING_SYSTEM; + } +#endif + + return status; +} + +void NV_API_CALL os_add_record_for_crashLog(void *pbuffer, NvU32 size) +{ +} + +void NV_API_CALL os_delete_record_for_crashLog(void *pbuffer) +{ +} + +#if !defined(NV_VGPU_KVM_BUILD) +NV_STATUS NV_API_CALL os_call_vgpu_vfio(void *pvgpu_vfio_info, NvU32 cmd_type) +{ + return NV_ERR_NOT_SUPPORTED; +} +#endif + +NV_STATUS NV_API_CALL os_alloc_pages_node +( + NvS32 nid, + NvU32 size, + NvU32 flag, + NvU64 *pAddress +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + +#if defined(__GFP_THISNODE) && defined(GFP_HIGHUSER_MOVABLE) && \ + defined(__GFP_COMP) && defined(__GFP_NOWARN) + gfp_t gfp_mask; + struct page *alloc_addr; + unsigned int order = get_order(size); + + /* + * Explanation of flags used: + * + * 1. __GFP_THISNODE: This will make sure the allocation happens + * on the node specified by nid. + * + * 2. GFP_HIGHUSER_MOVABLE: This makes allocations from ZONE_MOVABLE. + * + * 3. __GFP_COMP: This will make allocations with compound + * pages, which is needed in order to use + * vm_insert_page API. + * + * 4. __GFP_NOWARN: Used to avoid a WARN_ON in the slowpath if + * the requested order is too large (just fail + * instead). + * + * 5. (Optional) __GFP_RECLAIM: Used to allow/forbid reclaim. + * This is part of GFP_USER and consequently + * GFP_HIGHUSER_MOVABLE. + * + * Some of these flags are relatively more recent, with the last of them + * (GFP_HIGHUSER_MOVABLE) having been added with this Linux kernel commit: + * + * 2007-07-17 769848c03895b63e5662eb7e4ec8c4866f7d0183 + * + * Assume that this feature will only be used on kernels that support all + * of the needed GFP flags. + */ + + gfp_mask = __GFP_THISNODE | GFP_HIGHUSER_MOVABLE | __GFP_COMP | + __GFP_NOWARN; + +#if defined(__GFP_RETRY_MAYFAIL) + + /* + * __GFP_RETRY_MAYFAIL : Used to avoid the Linux kernel OOM killer. + * To help PMA on paths where UVM might be + * in memory over subscription. This gives UVM + * a chance to free memory before invoking any + * action from the OOM killer. + * Freeing non-essential memory will also benefit + * the system as a whole. + */ + + gfp_mask |= __GFP_RETRY_MAYFAIL; +#elif defined(__GFP_NORETRY) + + /* + * __GFP_NORETRY : Use __GFP_NORETRY on older kernels where + * __GFP_RETRY_MAYFAIL is not present. + */ + + gfp_mask |= __GFP_NORETRY; +#endif + +#if defined(__GFP_RECLAIM) + if (flag & NV_ALLOC_PAGES_NODE_SKIP_RECLAIM) + { + gfp_mask &= ~(__GFP_RECLAIM); + } +#endif // defined(__GFP_RECLAIM) + + alloc_addr = alloc_pages_node(nid, gfp_mask, order); + if (alloc_addr == NULL) + { + nv_printf(NV_DBG_INFO, + "NVRM: alloc_pages_node(node = %d, order = %u) failed\n", + nid, order); + status = NV_ERR_NO_MEMORY; + } + else if (page_to_nid(alloc_addr) != nid) + { + // + // We can hit this case when a Linux kernel bug is not patched. + // The needed patch is https://patchwork.kernel.org/patch/10427387/ + // + nv_printf(NV_DBG_ERRORS, + "NVRM: alloc_pages_node(node = %d, order = %u) wrong node ID.\n", + nid, order); + __free_pages(alloc_addr, order); + status = NV_ERR_NO_MEMORY; + } + else + { + *pAddress = (NvU64)page_to_phys(alloc_addr); + status = NV_OK; + } +#endif // GFP flags + + return status; +} + +NV_STATUS NV_API_CALL os_get_page +( + NvU64 address +) +{ + get_page(NV_GET_PAGE_STRUCT(address)); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_put_page +( + NvU64 address +) +{ + put_page(NV_GET_PAGE_STRUCT(address)); + return NV_OK; +} + +NvU32 NV_API_CALL os_get_page_refcount +( + NvU64 address +) +{ + return NV_PAGE_COUNT(NV_GET_PAGE_STRUCT(address)); +} + +NvU32 NV_API_CALL os_count_tail_pages +( + NvU64 address +) +{ + NvU32 order = compound_order(compound_head(NV_GET_PAGE_STRUCT(address))); + + return 1 << order; +} + +void NV_API_CALL os_free_pages_phys +( + NvU64 address, + NvU32 size +) +{ + __free_pages(NV_GET_PAGE_STRUCT(address), get_order(size)); +} + +NV_STATUS NV_API_CALL os_numa_memblock_size +( + NvU64 *memblock_size +) +{ +#if NV_IS_EXPORT_SYMBOL_PRESENT_memory_block_size_bytes + *memblock_size = memory_block_size_bytes(); + return NV_OK; +#endif + if (nv_ctl_device.numa_memblock_size == 0) + return NV_ERR_INVALID_STATE; + *memblock_size = nv_ctl_device.numa_memblock_size; + return NV_OK; +} + +NV_STATUS NV_API_CALL os_open_temporary_file +( + void **ppFile +) +{ +#if NV_FILESYSTEM_ACCESS_AVAILABLE +#if defined(O_TMPFILE) + struct file *file; + const char *default_path = "/tmp"; + const int flags = O_TMPFILE | O_LARGEFILE | O_RDWR; + const char *path = NVreg_TemporaryFilePath; + + /* + * The filp_open() call below depends on the current task's fs_struct + * (current->fs), which may already be NULL if this is called during + * process teardown. + */ + if (current->fs == NULL) + { + return NV_ERR_OPERATING_SYSTEM; + } + + if (!path) + { + path = default_path; + } + + file = filp_open(path, flags, 0); + if (IS_ERR(file)) + { + if ((path != default_path) && (PTR_ERR(file) == -ENOENT)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: The temporary file path specified via the NVreg_TemporaryFilePath\n" + "NVRM: module parameter does not exist. Defaulting to /tmp.\n"); + + file = filp_open(default_path, flags, 0); + } + } + + if (IS_ERR(file)) + { + return NV_ERR_OPERATING_SYSTEM; + } + + *ppFile = (void *)file; + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +void NV_API_CALL os_close_file +( + void *pFile +) +{ +#if NV_FILESYSTEM_ACCESS_AVAILABLE + filp_close(pFile, NULL); +#endif +} + +#define NV_MAX_NUM_FILE_IO_RETRIES 10 + +NV_STATUS NV_API_CALL os_write_file +( + void *pFile, + NvU8 *pBuffer, + NvU64 size, + NvU64 offset +) +{ +#if NV_FILESYSTEM_ACCESS_AVAILABLE + loff_t f_pos = offset; + ssize_t num_written; + int num_retries = NV_MAX_NUM_FILE_IO_RETRIES; + +retry: + num_written = kernel_write(pFile, pBuffer, size, &f_pos); + if (num_written < 0) + { + return NV_ERR_OPERATING_SYSTEM; + } + else if (num_written < size) + { + if (num_written > 0) + { + pBuffer += num_written; + size -= num_written; + } + if (--num_retries > 0) + { + cond_resched(); + goto retry; + } + return NV_ERR_OPERATING_SYSTEM; + } + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NV_STATUS NV_API_CALL os_read_file +( + void *pFile, + NvU8 *pBuffer, + NvU64 size, + NvU64 offset +) +{ +#if NV_FILESYSTEM_ACCESS_AVAILABLE + loff_t f_pos = offset; + ssize_t num_read; + int num_retries = NV_MAX_NUM_FILE_IO_RETRIES; + +retry: + num_read = kernel_read(pFile, pBuffer, size, &f_pos); + if (num_read < 0) + { + return NV_ERR_OPERATING_SYSTEM; + } + else if (num_read < size) + { + if (num_read > 0) + { + pBuffer += num_read; + size -= num_read; + } + if (--num_retries > 0) + { + cond_resched(); + goto retry; + } + return NV_ERR_OPERATING_SYSTEM; + } + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NV_STATUS NV_API_CALL os_open_readonly_file +( + const char *filename, + void **ppFile +) +{ +#if NV_FILESYSTEM_ACCESS_AVAILABLE + struct file *file; + + /* + * The filp_open() call below depends on the current task's fs_struct + * (current->fs), which may already be NULL if this is called during + * process teardown. + */ + if (current->fs == NULL) + { + return NV_ERR_OPERATING_SYSTEM; + } + + file = filp_open(filename, O_RDONLY, 0); + if (IS_ERR(file)) + { + return NV_ERR_OPERATING_SYSTEM; + } + + *ppFile = (void *)file; + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NV_STATUS NV_API_CALL os_open_and_read_file +( + const char *filename, + NvU8 *buf, + NvU64 count +) +{ + void *fileHandle; + NV_STATUS status; + + status = os_open_readonly_file(filename, &fileHandle); + if (status != NV_OK) + { + return status; + } + + status = os_read_file(fileHandle, buf, count, 0); + + os_close_file(fileHandle); + + return status; +} + +NvBool NV_API_CALL os_is_nvswitch_present(void) +{ + struct pci_device_id nvswitch_pci_table[] = { + { + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID), + .class = PCI_CLASS_BRIDGE_OTHER << 8, + .class_mask = PCI_ANY_ID + }, + {0} + }; + + return !!pci_dev_present(nvswitch_pci_table); +} + +/* + * This function may sleep (interruptible). + */ +NV_STATUS NV_API_CALL os_get_random_bytes +( + NvU8 *bytes, + NvU16 numBytes +) +{ + if (wait_for_random_bytes() < 0) + return NV_ERR_NOT_READY; + + get_random_bytes(bytes, numBytes); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_alloc_wait_queue +( + os_wait_queue **wq +) +{ + NV_KMALLOC(*wq, sizeof(os_wait_queue)); + if (*wq == NULL) + return NV_ERR_NO_MEMORY; + + init_completion(&(*wq)->q); + + return NV_OK; +} + +void NV_API_CALL os_free_wait_queue +( + os_wait_queue *wq +) +{ + NV_KFREE(wq, sizeof(os_wait_queue)); +} + +void NV_API_CALL os_wait_uninterruptible +( + os_wait_queue *wq +) +{ + wait_for_completion(&wq->q); +} + +void NV_API_CALL os_wait_interruptible +( + os_wait_queue *wq +) +{ + wait_for_completion_interruptible(&wq->q); +} + +void NV_API_CALL os_wake_up +( + os_wait_queue *wq +) +{ + complete_all(&wq->q); +} + +static bool os_platform_is_fpga(void) +{ + const struct soc_device_attribute soc_attrs[] = { + { .revision = "*FPGA" }, + {/* sentinel */} + }; + + if (soc_device_match(soc_attrs)) { + return true; + } + + return false; +} + +static bool os_platform_is_vdk(void) +{ + const struct soc_device_attribute soc_attrs[] = { + { .revision = "VDK" }, + {/* sentinel */} + }; + + if (soc_device_match(soc_attrs)) { + return true; + } + + return false; +} + + NV_STATUS NV_API_CALL os_get_tegra_platform +( + NvU32 *mode +) +{ +#if NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE + if (os_platform_is_fpga()) + { + *mode = NV_OS_TEGRA_PLATFORM_FPGA; + } + else if (os_platform_is_vdk()) + { + *mode = NV_OS_TEGRA_PLATFORM_SIM; + } + else + { + *mode = NV_OS_TEGRA_PLATFORM_SILICON; + } + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +nv_cap_t* NV_API_CALL os_nv_cap_init +( + const char *path +) +{ + return nv_cap_init(path); +} + +nv_cap_t* NV_API_CALL os_nv_cap_create_dir_entry +( + nv_cap_t *parent_cap, + const char *name, + int mode +) +{ + return nv_cap_create_dir_entry(parent_cap, name, mode); +} + +nv_cap_t* NV_API_CALL os_nv_cap_create_file_entry +( + nv_cap_t *parent_cap, + const char *name, + int mode +) +{ + return nv_cap_create_file_entry(parent_cap, name, mode); +} + +void NV_API_CALL os_nv_cap_destroy_entry +( + nv_cap_t *cap +) +{ + nv_cap_destroy_entry(cap); +} + +int NV_API_CALL os_nv_cap_validate_and_dup_fd +( + const nv_cap_t *cap, + int fd +) +{ + return nv_cap_validate_and_dup_fd(cap, fd); +} + +void NV_API_CALL os_nv_cap_close_fd +( + int fd +) +{ + nv_cap_close_fd(fd); +} + +NvS32 NV_API_CALL os_imex_channel_count +( + void +) +{ + return nv_caps_imex_channel_count(); +} + +NvS32 NV_API_CALL os_imex_channel_get +( + NvU64 descriptor +) +{ + return nv_caps_imex_channel_get((int)descriptor); +} + +NV_STATUS NV_API_CALL os_tegra_igpu_perf_boost +( + void *handle, + NvBool enable, + NvU32 duration +) +{ +#if defined(CONFIG_PM_DEVFREQ) && defined(NV_UPDATE_DEVFREQ_PRESENT) + nv_state_t *nv = handle; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int err; + + if (enable) + { + if (nvl->devfreq_enable_boost == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + err = nvl->devfreq_enable_boost(nvl->dev, duration); + if (err != 0) + { + return NV_ERR_OPERATING_SYSTEM; + } + } + else + { + if (nvl->devfreq_disable_boost == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + err = nvl->devfreq_disable_boost(nvl->dev); + if (err != 0) + { + return NV_ERR_OPERATING_SYSTEM; + } + } + + return NV_OK; +#else // !defined(CONFIG_PM_DEVFREQ) || !defined(NV_UPDATE_DEVFREQ_PRESENT) + return NV_ERR_NOT_SUPPORTED; +#endif +} + +/* + * Reads the total memory and free memory of a NUMA node from the kernel. + */ +NV_STATUS NV_API_CALL os_get_numa_node_memory_usage +( + NvS32 node_id, + NvU64 *free_memory_bytes, + NvU64 *total_memory_bytes +) +{ + struct pglist_data *pgdat; + struct zone *zone; + NvU32 zone_id; + + if (node_id >= MAX_NUMNODES) + { + nv_printf(NV_DBG_ERRORS, "Invalid NUMA node ID\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + pgdat = NODE_DATA(node_id); + + *free_memory_bytes = 0; + *total_memory_bytes = 0; + + for (zone_id = 0; zone_id < MAX_NR_ZONES; zone_id++) + { + zone = &(pgdat->node_zones[zone_id]); + if (!populated_zone(zone)) + continue; + *free_memory_bytes += (zone_page_state_snapshot(zone, NR_FREE_PAGES) * PAGE_SIZE); + *total_memory_bytes += (zone->present_pages * PAGE_SIZE); + } + + return NV_OK; +} + +typedef struct os_numa_gpu_mem_hotplug_notifier_s +{ + NvU64 start_pa; + NvU64 size; + nv_pci_info_t pci_info; + struct notifier_block memory_notifier; +} os_numa_gpu_mem_hotplug_notifier_t; + +static int os_numa_verify_gpu_memory_zone(struct notifier_block *nb, + unsigned long action, void *data) +{ + os_numa_gpu_mem_hotplug_notifier_t *notifier = container_of(nb, + os_numa_gpu_mem_hotplug_notifier_t, + memory_notifier); + struct memory_notify *mhp = data; + NvU64 start_pa = PFN_PHYS(mhp->start_pfn); + NvU64 size = PFN_PHYS(mhp->nr_pages); + + if (action == MEM_GOING_ONLINE) + { + // Check if onlining memory falls in the GPU memory range + if ((start_pa >= notifier->start_pa) && + (start_pa + size) <= (notifier->start_pa + notifier->size)) + { + /* + * Verify GPU memory NUMA node has memory only in ZONE_MOVABLE before + * onlining the memory so that incorrect auto online setting doesn't + * cause the memory onlined in a zone where kernel allocations + * could happen, resulting in GPU memory hot unpluggable and requiring + * system reboot. + */ + if (page_zonenum((pfn_to_page(mhp->start_pfn))) != ZONE_MOVABLE) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Failing GPU memory onlining as the onlining zone " + "is not movable. pa: 0x%llx size: 0x%llx\n" + "NVRM: The NVIDIA GPU %04x:%02x:%02x.%x installed in the system\n" + "NVRM: requires auto onlining mode online_movable enabled in\n" + "NVRM: /sys/devices/system/memory/auto_online_blocks\n", + start_pa, size, notifier->pci_info.domain, notifier->pci_info.bus, + notifier->pci_info.slot, notifier->pci_info.function); + return NOTIFY_BAD; + } + } + } + return NOTIFY_OK; +} + +#define ADD_REMOVE_GPU_MEMORY_NUM_SEGMENTS 4 + +NV_STATUS NV_API_CALL os_numa_add_gpu_memory +( + void *handle, + NvU64 offset, + NvU64 size, + NvU32 *nodeId +) +{ +#if defined(NV_ADD_MEMORY_DRIVER_MANAGED_PRESENT) + int node = 0; + nv_linux_state_t *nvl = pci_get_drvdata(handle); + nv_state_t *nv = NV_STATE_PTR(nvl); + NvU64 base = offset + nvl->coherent_link_info.gpu_mem_pa; + int ret = 0; + NvU64 memblock_size; + NvU64 size_remaining; + NvU64 calculated_segment_size; + NvU64 segment_size; + NvU64 segment_base; + os_numa_gpu_mem_hotplug_notifier_t notifier = + { + .start_pa = base, + .size = size, + .pci_info = nv->pci_info, + .memory_notifier.notifier_call = os_numa_verify_gpu_memory_zone, + }; + + if (nodeId == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (bitmap_empty(nvl->coherent_link_info.free_node_bitmap, MAX_NUMNODES)) + { + return NV_ERR_IN_USE; + } + node = find_first_bit(nvl->coherent_link_info.free_node_bitmap, MAX_NUMNODES); + if (node == MAX_NUMNODES) + { + return NV_ERR_INVALID_STATE; + } + + NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_ONLINE_IN_PROGRESS); + + ret = register_memory_notifier(¬ifier.memory_notifier); + if (ret) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Memory hotplug notifier registration failed\n"); + goto failed; + } + + // + // Adding all memory at once can take a long time. Split up memory into segments + // with schedule() in between to prevent soft lockups. Memory segments for + // add_memory_driver_managed() need to be aligned to memblock size. + // + // If there are any issues splitting into segments, then add all memory at once. + // + if (os_numa_memblock_size(&memblock_size) == NV_OK) + { + calculated_segment_size = NV_ALIGN_UP(size / ADD_REMOVE_GPU_MEMORY_NUM_SEGMENTS, memblock_size); + } + else + { + // Don't split into segments, add all memory at once + calculated_segment_size = size; + } + + segment_size = calculated_segment_size; + segment_base = base; + size_remaining = size; + + while ((size_remaining > 0) && + (ret == 0)) + { + if (segment_size > size_remaining) + { + segment_size = size_remaining; + } + +#ifdef NV_ADD_MEMORY_DRIVER_MANAGED_HAS_MHP_FLAGS_ARG + ret = add_memory_driver_managed(node, segment_base, segment_size, "System RAM (NVIDIA)", MHP_NONE); +#else + ret = add_memory_driver_managed(node, segment_base, segment_size, "System RAM (NVIDIA)"); +#endif + nv_printf(NV_DBG_SETUP, "NVRM: add_memory_driver_managed() returns: %d for segment_base: 0x%llx, segment_size: 0x%llx\n", + ret, segment_base, segment_size); + + segment_base += segment_size; + size_remaining -= segment_size; + + // Yield CPU to prevent soft lockups + schedule(); + } + unregister_memory_notifier(¬ifier.memory_notifier); + + if (ret == 0) + { + struct zone *zone = &NODE_DATA(node)->node_zones[ZONE_MOVABLE]; + NvU64 start_pfn = base >> PAGE_SHIFT; + NvU64 end_pfn = (base + size) >> PAGE_SHIFT; + + /* Verify the full GPU memory range passed on is onlined */ + if (zone->zone_start_pfn != start_pfn || + zone_end_pfn(zone) != end_pfn) + { + nv_printf(NV_DBG_ERRORS, "NVRM: GPU memory zone movable auto onlining failed!\n"); + +#ifdef NV_OFFLINE_AND_REMOVE_MEMORY_PRESENT + // Since zone movable auto onlining failed, need to remove the added memory. + segment_size = calculated_segment_size; + segment_base = base; + size_remaining = size; + + while (size_remaining > 0) + { + if (segment_size > size_remaining) + { + segment_size = size_remaining; + } + +#ifdef NV_REMOVE_MEMORY_HAS_NID_ARG + ret = offline_and_remove_memory(node, segment_base, segment_size); +#else + ret = offline_and_remove_memory(segment_base, segment_size); +#endif + nv_printf(NV_DBG_SETUP, "NVRM: offline_and_remove_memory() returns: %d for segment_base: 0x%llx, segment_size: 0x%llx\n", + ret, segment_base, segment_size); + + segment_base += segment_size; + size_remaining -= segment_size; + + // Yield CPU to prevent soft lockups + schedule(); + } +#endif + goto failed; + } + + /* + * On systems with cpuset cgroup controller enabled, memory alloc on + * this just hotplugged GPU memory node can fail if the + * cpuset_hotplug_work is not scheduled yet. cpuset_hotplug_work is + * where the current->mems_allowed is updated in the path + * cpuset_hotplug_workfn->update_tasks_nodemask. When cpuset is + * enabled and current->mems_allowed is not updated, memory allocation + * with __GFP_THISNODE and this node id fails. cpuset_wait_for_hotplug + * kernel function can be used to wait for the work to finish but that + * is not exported. Adding a time loop to wait for + * current->mems_allowed to be updated as a WAR while an upstream + * kernel fix is being explored. Bug 4385903 + */ + if (!node_isset(node, cpuset_current_mems_allowed)) + { + unsigned long delay; + + delay = jiffies + (HZ / 10); // 100ms + while(time_before(jiffies, delay) && + !node_isset(node, cpuset_current_mems_allowed)) + { + os_schedule(); + } + + if (!node_isset(node, cpuset_current_mems_allowed)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Hotplugged GPU memory NUMA node: %d " + "not set in current->mems_allowed!\n", node); + } + } + + *nodeId = node; + clear_bit(node, nvl->coherent_link_info.free_node_bitmap); + NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_ONLINE); + return NV_OK; + } + nv_printf(NV_DBG_ERRORS, "NVRM: Memory add failed. base: 0x%lx size: 0x%lx ret: %d\n", + base, size, ret); +failed: + NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_ONLINE_FAILED); + return NV_ERR_OPERATING_SYSTEM; +#endif + return NV_ERR_NOT_SUPPORTED; +} + + +typedef struct { + NvU64 base; + NvU64 size; + NvU32 nodeId; + int ret; +} remove_numa_memory_info_t; + +static void offline_numa_memory_callback +( + void *args +) +{ +#ifdef NV_OFFLINE_AND_REMOVE_MEMORY_PRESENT + remove_numa_memory_info_t *pNumaInfo = (remove_numa_memory_info_t *)args; + int ret = 0; + NvU64 memblock_size; + NvU64 size_remaining; + NvU64 calculated_segment_size; + NvU64 segment_size; + NvU64 segment_base; + + // + // Removing all memory at once can take a long time. Split up memory into segments + // with schedule() in between to prevent soft lockups. Memory segments for + // offline_and_remove_memory() need to be aligned to memblock size. + // + // If there are any issues splitting into segments, then remove all memory at once. + // + if (os_numa_memblock_size(&memblock_size) == NV_OK) + { + calculated_segment_size = NV_ALIGN_UP(pNumaInfo->size / ADD_REMOVE_GPU_MEMORY_NUM_SEGMENTS, memblock_size); + } + else + { + // Don't split into segments, remove all memory at once + calculated_segment_size = pNumaInfo->size; + } + + segment_size = calculated_segment_size; + segment_base = pNumaInfo->base; + size_remaining = pNumaInfo->size; + + while (size_remaining > 0) + { + if (segment_size > size_remaining) + { + segment_size = size_remaining; + } + +#ifdef NV_REMOVE_MEMORY_HAS_NID_ARG + ret = offline_and_remove_memory(pNumaInfo->nodeId, + segment_base, + segment_size); +#else + ret = offline_and_remove_memory(segment_base, + segment_size); +#endif + nv_printf(NV_DBG_SETUP, "NVRM: offline_and_remove_memory() returns: %d for segment_base: 0x%llx, segment_size: 0x%llx\n", + ret, segment_base, segment_size); + pNumaInfo->ret |= ret; + + segment_base += segment_size; + size_remaining -= segment_size; + + // Yield CPU to prevent soft lockups + schedule(); + } +#endif +} + +NV_STATUS NV_API_CALL os_numa_remove_gpu_memory +( + void *handle, + NvU64 offset, + NvU64 size, + NvU32 nodeId +) +{ +#ifdef NV_ADD_MEMORY_DRIVER_MANAGED_PRESENT + nv_linux_state_t *nvl = pci_get_drvdata(handle); +#ifdef NV_OFFLINE_AND_REMOVE_MEMORY_PRESENT + NvU64 base = offset + nvl->coherent_link_info.gpu_mem_pa; + remove_numa_memory_info_t numa_info; + nv_kthread_q_item_t remove_numa_memory_q_item; + int ret; +#endif + + if (nodeId >= MAX_NUMNODES) + { + return NV_ERR_INVALID_ARGUMENT; + } + if ((nodeId == NUMA_NO_NODE) || test_bit(nodeId, nvl->coherent_link_info.free_node_bitmap)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_OFFLINE_IN_PROGRESS); + +#ifdef NV_OFFLINE_AND_REMOVE_MEMORY_PRESENT + numa_info.base = base; + numa_info.size = size; + numa_info.nodeId = nodeId; + numa_info.ret = 0; + + nv_kthread_q_item_init(&remove_numa_memory_q_item, + offline_numa_memory_callback, + &numa_info); + nv_kthread_q_schedule_q_item(&nvl->remove_numa_memory_q, + &remove_numa_memory_q_item); + nv_kthread_q_flush(&nvl->remove_numa_memory_q); + + ret = numa_info.ret; + + if (ret == 0) + { + set_bit(nodeId, nvl->coherent_link_info.free_node_bitmap); + + NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_OFFLINE); + return NV_OK; + } + + nv_printf(NV_DBG_ERRORS, "NVRM: Memory remove failed. base: 0x%lx size: 0x%lx ret: %d\n", + base, size, ret); +#endif + NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_OFFLINE_FAILED); + return NV_ERR_OPERATING_SYSTEM; +#endif + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL os_offline_page_at_address +( + NvU64 address +) +{ +#if defined(CONFIG_MEMORY_FAILURE) + int flags = 0; + NvU64 pfn; + struct page *page = NV_GET_PAGE_STRUCT(address); + + if (page == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Failed to get page struct for address: 0x%llx\n", + address); + return NV_ERR_INVALID_ARGUMENT; + } + + pfn = page_to_pfn(page); + +#ifdef NV_MEMORY_FAILURE_MF_SW_SIMULATED_DEFINED + // + // Set MF_SW_SIMULATED flag so Linux kernel can differentiate this from a HW + // memory failure. HW memory failures cannot be unset via unpoison_memory() API. + // + // Currently, RM does not use unpoison_memory(), so it makes no difference + // whether or not MF_SW_SIMULATED is set. Regardless, it is semantically more + // correct to set MF_SW_SIMULATED. + // + flags |= MF_SW_SIMULATED; +#endif + + nv_printf(NV_DBG_INFO, "NVRM: offlining page at address: 0x%llx pfn: 0x%llx\n", + address, pfn); + +#ifdef NV_MEMORY_FAILURE_QUEUE_HAS_TRAPNO_ARG + memory_failure_queue(pfn, 0, flags); +#else + memory_failure_queue(pfn, flags); +#endif + + return NV_OK; +#else // !defined(CONFIG_MEMORY_FAILURE) + nv_printf(NV_DBG_ERRORS, "NVRM: memory_failure_queue() not supported by kernel. page offlining failed. address: 0x%llx\n", + address); + return NV_ERR_NOT_SUPPORTED; +#endif +} + +void* NV_API_CALL os_get_pid_info(void) +{ +#if defined(NV_HAS_ENUM_PIDTYPE_TGID) + return get_task_pid(current, PIDTYPE_TGID); +#else + return get_task_pid(current->group_leader, PIDTYPE_PID); +#endif +} + +void NV_API_CALL os_put_pid_info(void *pid_info) +{ + if (pid_info != NULL) + put_pid(pid_info); +} + +NV_STATUS NV_API_CALL os_find_ns_pid(void *pid_info, NvU32 *ns_pid) +{ + if ((pid_info == NULL) || (ns_pid == NULL)) + return NV_ERR_INVALID_ARGUMENT; + + *ns_pid = pid_vnr((struct pid *)pid_info); + + // The call returns 0 if the PID is not found in the current ns + if (*ns_pid == 0) + return NV_ERR_OBJECT_NOT_FOUND; + + return NV_OK; +} + +NvBool NV_API_CALL os_is_init_ns(void) +{ + return (task_active_pid_ns(current) == &init_pid_ns); +} + +NV_STATUS NV_API_CALL os_device_vm_present(void) +{ +#if defined(NV_DEVICE_VM_BUILD) + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} diff --git a/kernel-open/nvidia/os-mlock.c b/kernel-open/nvidia/os-mlock.c new file mode 100644 index 0000000..e32b9b7 --- /dev/null +++ b/kernel-open/nvidia/os-mlock.c @@ -0,0 +1,331 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +#if defined(NVCPU_FAMILY_X86) && defined(NV_FOLL_LONGTERM_PRESENT) && \ + (defined(NV_PIN_USER_PAGES_HAS_ARGS_VMAS) || \ + defined(NV_GET_USER_PAGES_HAS_VMAS_ARG)) +#define NV_NUM_PIN_PAGES_PER_ITERATION 0x80000 +#endif + +static inline int nv_follow_flavors(struct vm_area_struct *vma, + unsigned long address, + unsigned long *pfn) +{ +#if NV_IS_EXPORT_SYMBOL_PRESENT_follow_pfnmap_start + struct follow_pfnmap_args args = {}; + int rc; + + args.address = address; + args.vma = vma; + + rc = follow_pfnmap_start(&args); + if (rc) + return rc; + + *pfn = args.pfn; + + follow_pfnmap_end(&args); + + return 0; +#elif NV_IS_EXPORT_SYMBOL_PRESENT_follow_pte + int status = 0; + spinlock_t *ptl; + pte_t *ptep; + + if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) + return status; + + // + // The first argument of follow_pte() was changed from + // mm_struct to vm_area_struct in kernel 6.10. + // +#if defined(NV_FOLLOW_PTE_ARG1_VMA) + status = follow_pte(vma, address, &ptep, &ptl); +#else + status = follow_pte(vma->vm_mm, address, &ptep, &ptl); +#endif + if (status) + return status; + +#if defined(NV_PTEP_GET_PRESENT) + *pfn = pte_pfn(ptep_get(ptep)); +#else + *pfn = pte_pfn(READ_ONCE(*ptep)); +#endif + + // The lock is acquired inside follow_pte() + pte_unmap_unlock(ptep, ptl); + return 0; +#else + return -1; +#endif // NV_IS_EXPORT_SYMBOL_PRESENT_follow_pfnmap_start +} + +static inline int nv_follow_pfn(struct vm_area_struct *vma, + unsigned long address, + unsigned long *pfn) +{ +#if defined(NV_FOLLOW_PFN_PRESENT) + return follow_pfn(vma, address, pfn); +#else + return nv_follow_flavors(vma, address, pfn); +#endif +} + +/*! + * @brief Locates the PFNs for a user IO address range, and converts those to + * their associated PTEs. + * + * @param[in] vma VMA that contains the virtual address range given by the + * start and page count parameters. + * @param[in] start Beginning of the virtual address range of the IO PTEs. + * @param[in] page_count Number of pages containing the IO range being + * mapped. + * @param[in,out] pte_array Storage array for PTE addresses. Must be large + * enough to contain at least page_count pointers. + * + * @return NV_OK if the PTEs were identified successfully, error otherwise. + */ +static NV_STATUS get_io_ptes(struct vm_area_struct *vma, + NvUPtr start, + NvU64 page_count, + NvU64 **pte_array) +{ + NvU64 i; + unsigned long pfn; + + for (i = 0; i < page_count; i++) + { + if (nv_follow_pfn(vma, (start + (i * PAGE_SIZE)), &pfn) < 0) + { + return NV_ERR_INVALID_ADDRESS; + } + + pte_array[i] = (NvU64 *)(pfn << PAGE_SHIFT); + + if (i == 0) + continue; + + // + // This interface is to be used for contiguous, uncacheable I/O regions. + // Internally, osCreateOsDescriptorFromIoMemory() checks the user-provided + // flags against this, and creates a single memory descriptor with the same + // attributes. This check ensures the actual mapping supplied matches the + // user's declaration. Ensure the PFNs represent a contiguous range, + // error if they do not. + // + if ((NvU64)pte_array[i] != (((NvU64)pte_array[i-1]) + PAGE_SIZE)) + { + return NV_ERR_INVALID_ADDRESS; + } + } + return NV_OK; +} + +NV_STATUS NV_API_CALL os_lookup_user_io_memory( + void *address, + NvU64 page_count, + NvU64 **pte_array +) +{ + NV_STATUS rmStatus; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long pfn; + NvUPtr start = (NvUPtr)address; + void **result_array; + + if (!NV_MAY_SLEEP()) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %s(): invalid context!\n", __FUNCTION__); + return NV_ERR_NOT_SUPPORTED; + } + + rmStatus = os_alloc_mem((void **)&result_array, (page_count * sizeof(NvP64))); + if (rmStatus != NV_OK) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to allocate page table!\n"); + return rmStatus; + } + + nv_mmap_read_lock(mm); + + // find the first VMA which intersects the interval start_addr..end_addr-1, + vma = find_vma_intersection(mm, start, start+1); + + // Verify that the given address range is contained in a single vma + if ((vma == NULL) || ((vma->vm_flags & (VM_IO | VM_PFNMAP)) == 0) || + !((vma->vm_start <= start) && + ((vma->vm_end - start) >> PAGE_SHIFT >= page_count))) + { + nv_printf(NV_DBG_ERRORS, + "Cannot map memory with base addr 0x%llx and size of 0x%llx pages\n", + start ,page_count); + rmStatus = NV_ERR_INVALID_ADDRESS; + goto done; + } + + if (nv_follow_pfn(vma, start, &pfn) < 0) + { + rmStatus = NV_ERR_INVALID_ADDRESS; + goto done; + } + + rmStatus = get_io_ptes(vma, start, page_count, (NvU64 **)result_array); + if (rmStatus == NV_OK) + *pte_array = (NvU64 *)result_array; + +done: + nv_mmap_read_unlock(mm); + + if (rmStatus != NV_OK) + { + os_free_mem(result_array); + } + + return rmStatus; +} + +NV_STATUS NV_API_CALL os_lock_user_pages( + void *address, + NvU64 page_count, + void **page_array, + NvU32 flags +) +{ + NV_STATUS rmStatus; + struct mm_struct *mm = current->mm; + struct page **user_pages; + NvU64 i; + NvU64 npages = page_count; + NvU64 pinned = 0; + unsigned int gup_flags = DRF_VAL(_LOCK_USER_PAGES, _FLAGS, _WRITE, flags) ? FOLL_WRITE : 0; + long ret; + +#if defined(NVCPU_FAMILY_X86) && defined(NV_FOLL_LONGTERM_PRESENT) + gup_flags |= FOLL_LONGTERM; +#endif + + if (!NV_MAY_SLEEP()) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %s(): invalid context!\n", __FUNCTION__); + return NV_ERR_NOT_SUPPORTED; + } + + rmStatus = os_alloc_mem((void **)&user_pages, + (page_count * sizeof(*user_pages))); + if (rmStatus != NV_OK) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to allocate page table!\n"); + return rmStatus; + } + + nv_mmap_read_lock(mm); + ret = NV_PIN_USER_PAGES((unsigned long)address, + npages, gup_flags, user_pages); + if (ret > 0) + { + pinned = ret; + } +#if defined(NVCPU_FAMILY_X86) && defined(NV_FOLL_LONGTERM_PRESENT) && \ + (defined(NV_PIN_USER_PAGES_HAS_ARGS_VMAS) || \ + defined(NV_GET_USER_PAGES_HAS_VMAS_ARG)) + // + // NV_PIN_USER_PAGES() passes in NULL for the vmas parameter (if required) + // in pin_user_pages() (or get_user_pages() if pin_user_pages() does not + // exist). For kernels which do not contain the commit 52650c8b466b + // (mm/gup: remove the vma allocation from gup_longterm_locked()), if + // FOLL_LONGTERM is passed in, this results in the kernel trying to kcalloc + // the vmas array, and since the limit for kcalloc is 4 MB, it results in + // NV_PIN_USER_PAGES() failing with ENOMEM if more than + // NV_NUM_PIN_PAGES_PER_ITERATION pages are requested on 64-bit systems. + // + // As a workaround, if we requested more than + // NV_NUM_PIN_PAGES_PER_ITERATION pages and failed with ENOMEM, try again + // with multiple calls of NV_NUM_PIN_PAGES_PER_ITERATION pages at a time. + // + else if ((ret == -ENOMEM) && + (page_count > NV_NUM_PIN_PAGES_PER_ITERATION)) + { + for (pinned = 0; pinned < page_count; pinned += ret) + { + npages = page_count - pinned; + if (npages > NV_NUM_PIN_PAGES_PER_ITERATION) + { + npages = NV_NUM_PIN_PAGES_PER_ITERATION; + } + + ret = NV_PIN_USER_PAGES(((unsigned long) address) + (pinned * PAGE_SIZE), + npages, gup_flags, &user_pages[pinned]); + if (ret <= 0) + { + break; + } + } + } +#endif + nv_mmap_read_unlock(mm); + + if (pinned < page_count) + { + for (i = 0; i < pinned; i++) + NV_UNPIN_USER_PAGE(user_pages[i]); + os_free_mem(user_pages); + return NV_ERR_INVALID_ADDRESS; + } + + *page_array = user_pages; + + return NV_OK; +} + +NV_STATUS NV_API_CALL os_unlock_user_pages( + NvU64 page_count, + void *page_array, + NvU32 flags +) +{ + NvBool write = FLD_TEST_DRF(_LOCK_USER_PAGES, _FLAGS, _WRITE, _YES, flags); + struct page **user_pages = page_array; + NvU32 i; + + for (i = 0; i < page_count; i++) + { + if (write) + set_page_dirty_lock(user_pages[i]); + NV_UNPIN_USER_PAGE(user_pages[i]); + } + + os_free_mem(user_pages); + + return NV_OK; +} diff --git a/kernel-open/nvidia/os-pci.c b/kernel-open/nvidia/os-pci.c new file mode 100644 index 0000000..d16d2f6 --- /dev/null +++ b/kernel-open/nvidia/os-pci.c @@ -0,0 +1,225 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +void* NV_API_CALL os_pci_init_handle( + NvU32 domain, + NvU8 bus, + NvU8 slot, + NvU8 function, + NvU16 *vendor, + NvU16 *device +) +{ + struct pci_dev *dev; + unsigned int devfn = PCI_DEVFN(slot, function); + + if (!NV_MAY_SLEEP()) + return NULL; + + dev = NV_GET_DOMAIN_BUS_AND_SLOT(domain, bus, devfn); + if (dev != NULL) + { + if (vendor) *vendor = dev->vendor; + if (device) *device = dev->device; + pci_dev_put(dev); /* TODO: Fix me! (hotplug) */ + } + return (void *) dev; +} + +NV_STATUS NV_API_CALL os_pci_read_byte( + void *handle, + NvU32 offset, + NvU8 *pReturnValue +) +{ + if (offset >= NV_PCIE_CFG_MAX_OFFSET) + { + *pReturnValue = 0xff; + return NV_ERR_NOT_SUPPORTED; + } + pci_read_config_byte( (struct pci_dev *) handle, offset, pReturnValue); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_pci_read_word( + void *handle, + NvU32 offset, + NvU16 *pReturnValue +) +{ + if (offset >= NV_PCIE_CFG_MAX_OFFSET) + { + *pReturnValue = 0xffff; + return NV_ERR_NOT_SUPPORTED; + } + pci_read_config_word( (struct pci_dev *) handle, offset, pReturnValue); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_pci_read_dword( + void *handle, + NvU32 offset, + NvU32 *pReturnValue +) +{ + if (offset >= NV_PCIE_CFG_MAX_OFFSET) + { + *pReturnValue = 0xffffffff; + return NV_ERR_NOT_SUPPORTED; + } + pci_read_config_dword( (struct pci_dev *) handle, offset, pReturnValue); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_pci_write_byte( + void *handle, + NvU32 offset, + NvU8 value +) +{ + if (offset >= NV_PCIE_CFG_MAX_OFFSET) + return NV_ERR_NOT_SUPPORTED; + + pci_write_config_byte( (struct pci_dev *) handle, offset, value); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_pci_write_word( + void *handle, + NvU32 offset, + NvU16 value +) +{ + if (offset >= NV_PCIE_CFG_MAX_OFFSET) + return NV_ERR_NOT_SUPPORTED; + + pci_write_config_word( (struct pci_dev *) handle, offset, value); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_pci_write_dword( + void *handle, + NvU32 offset, + NvU32 value +) +{ + if (offset >= NV_PCIE_CFG_MAX_OFFSET) + return NV_ERR_NOT_SUPPORTED; + + pci_write_config_dword( (struct pci_dev *) handle, offset, value); + return NV_OK; +} + +NvBool NV_API_CALL os_pci_remove_supported(void) +{ + return NV_TRUE; +} + +void NV_API_CALL os_pci_remove( + void *handle +) +{ + pci_stop_and_remove_bus_device(handle); +} + +NV_STATUS NV_API_CALL +os_enable_pci_req_atomics( + void *handle, + enum os_pci_req_atomics_type type +) +{ +#ifdef NV_PCI_ENABLE_ATOMIC_OPS_TO_ROOT_PRESENT + int ret; + u16 val; + + switch (type) + { + case OS_INTF_PCIE_REQ_ATOMICS_32BIT: + ret = pci_enable_atomic_ops_to_root(handle, + PCI_EXP_DEVCAP2_ATOMIC_COMP32); + break; + case OS_INTF_PCIE_REQ_ATOMICS_64BIT: + ret = pci_enable_atomic_ops_to_root(handle, + PCI_EXP_DEVCAP2_ATOMIC_COMP64); + break; + case OS_INTF_PCIE_REQ_ATOMICS_128BIT: + ret = pci_enable_atomic_ops_to_root(handle, + PCI_EXP_DEVCAP2_ATOMIC_COMP128); + break; + default: + ret = -1; + break; + } + + if (ret == 0) + { + /* + * GPUs that don't support Requester Atomics have its + * PCI_EXP_DEVCTL2_ATOMIC_REQ always set to 0 even after SW enables it. + */ + if ((pcie_capability_read_word(handle, PCI_EXP_DEVCTL2, &val) == 0) && + (val & PCI_EXP_DEVCTL2_ATOMIC_REQ)) + { + return NV_OK; + } + } +#endif + return NV_ERR_NOT_SUPPORTED; +} + +void NV_API_CALL os_pci_trigger_flr(void *handle) +{ + struct pci_dev *pdev = (struct pci_dev *) handle; + int ret; + + ret = pci_save_state(pdev); + if (ret) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %s() PCI save state failed, Skip FLR\n", __FUNCTION__); + return; + } +#if defined(NV_PCIE_RESET_FLR_PRESENT) + // If PCI_RESET_DO_RESET is not defined in a particular kernel version + // define it as 0. Boolean value 0 will trigger a reset of the device. +#ifndef PCI_RESET_DO_RESET +#define PCI_RESET_DO_RESET 0 +#endif + ret = pcie_reset_flr(pdev, PCI_RESET_DO_RESET); + if (ret) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %s() PCI FLR might have failed\n", __FUNCTION__); + } +#else + nv_printf(NV_DBG_ERRORS, + "NVRM: %s() PCI FLR not supported\n", __FUNCTION__); +#endif + pci_restore_state(pdev); + return; +} diff --git a/kernel-open/nvidia/os-registry.c b/kernel-open/nvidia/os-registry.c new file mode 100644 index 0000000..14c0628 --- /dev/null +++ b/kernel-open/nvidia/os-registry.c @@ -0,0 +1,356 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2000-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ +#define NV_DEFINE_REGISTRY_KEY_TABLE +#include "os-interface.h" +#include "nv-linux.h" +#include "nv-reg.h" +#include "nv-gpu-info.h" + +/*! + * @brief This function parses the PCI BDF identifier string and returns the + * Domain, Bus, Device and function components from the PCI BDF string. + * + * This parser is highly adaptable and hence allows PCI BDF string in following + * 3 formats. + * + * 1) bus:slot : Domain and function defaults to 0. + * 2) domain:bus:slot : Function defaults to 0. + * 3) domain:bus:slot.func : Complete PCI dev id string. + * + * @param[in] pci_dev_str String containing the BDF to be parsed. + * @param[out] pci_domain Pointer where pci_domain is to be returned. + * @param[out] pci_bus Pointer where pci_bus is to be returned. + * @param[out] pci_slot Pointer where pci_slot is to be returned. + * @param[out] pci_func Pointer where pci_func is to be returned. + * + * @return NV_TRUE if succeeds, or NV_FALSE otherwise. + */ +static NV_STATUS pci_str_to_bdf(char *pci_dev_str, NvU32 *pci_domain, + NvU32 *pci_bus, NvU32 *pci_slot, NvU32 *pci_func) +{ + char *option_string = NULL; + char *token, *string; + NvU32 domain, bus, slot; + NV_STATUS status = NV_OK; + + // + // remove_spaces() allocates memory, hence we need to keep a pointer + // to the original string for freeing at end of function. + // + if ((option_string = rm_remove_spaces(pci_dev_str)) == NULL) + { + // memory allocation failed, returning + return NV_ERR_GENERIC; + } + + string = option_string; + + if (!strlen(string) || !pci_domain || !pci_bus || !pci_slot || !pci_func) + { + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + if ((token = strsep(&string, ".")) != NULL) + { + // PCI device can have maximum 8 functions only. + if ((string != NULL) && (!(*string >= '0' && *string <= '7') || + (strlen(string) > 1))) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Invalid PCI function in token %s\n", + pci_dev_str); + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + else if (string == NULL) + { + *pci_func = 0; + } + else + { + *pci_func = (NvU32)(*string - '0'); + } + + domain = simple_strtoul(token, &string, 16); + + if ((string == NULL) || (*string != ':') || (*(string + 1) == '\0')) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Invalid PCI domain/bus in token %s\n", + pci_dev_str); + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + token = string; + bus = simple_strtoul((token + 1), &string, 16); + + if (string == NULL) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Invalid PCI bus/slot in token %s\n", + pci_dev_str); + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + if (*string != '\0') + { + if ((*string != ':') || (*(string + 1) == '\0')) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Invalid PCI slot in token %s\n", + pci_dev_str); + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + token = string; + slot = (NvU32)simple_strtoul(token + 1, &string, 16); + if ((slot == 0) && ((token + 1) == string)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Invalid PCI slot in token %s\n", + pci_dev_str); + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + *pci_domain = domain; + *pci_bus = bus; + *pci_slot = slot; + } + else + { + *pci_slot = bus; + *pci_bus = domain; + *pci_domain = 0; + } + status = NV_OK; + } + else + { + status = NV_ERR_INVALID_ARGUMENT; + } + +done: + // Freeing the memory allocated by remove_spaces(). + os_free_mem(option_string); + return status; +} + +/*! + * @brief This function parses the registry keys per GPU device. It accepts a + * semicolon separated list of key=value pairs. The first key value pair MUST be + * "pci=DDDD:BB:DD.F;" where DDDD is Domain, BB is Bus Id, DD is device slot + * number and F is the Function. This PCI BDF is used to identify which GPU to + * assign the registry keys that follows next. + * If a GPU corresponding to the value specified in "pci=DDDD:BB:DD.F;" is NOT + * found, then all the registry keys that follows are skipped, until we find next + * valid pci identified "pci=DDDD:BB:DD.F;". Following are the valid formats for + * the value of the "pci" string: + * 1) bus:slot : Domain and function defaults to 0. + * 2) domain:bus:slot : Function defaults to 0. + * 3) domain:bus:slot.func : Complete PCI dev id string. + * + * + * @param[in] sp pointer to nvidia_stack_t struct. + * + * @return NV_OK if succeeds, or NV_STATUS error code otherwise. + */ +NV_STATUS nv_parse_per_device_option_string(nvidia_stack_t *sp) +{ + NV_STATUS status = NV_OK; + char *option_string = NULL; + char *ptr, *token; + char *name, *value; + NvU32 data, domain, bus, slot, func; + nv_linux_state_t *nvl = NULL; + nv_state_t *nv = NULL; + + if (NVreg_RegistryDwordsPerDevice != NULL) + { + if ((option_string = rm_remove_spaces(NVreg_RegistryDwordsPerDevice)) == NULL) + { + return NV_ERR_GENERIC; + } + + ptr = option_string; + + while ((token = strsep(&ptr, ";")) != NULL) + { + if (!(name = strsep(&token, "=")) || !strlen(name)) + { + continue; + } + + if (!(value = strsep(&token, "=")) || !strlen(value)) + { + continue; + } + + if (strsep(&token, "=") != NULL) + { + continue; + } + + // If this key is "pci", then value is pci_dev id string + // which needs special parsing as it is NOT a dword. + if (strcmp(name, NV_REG_PCI_DEVICE_BDF) == 0) + { + status = pci_str_to_bdf(value, &domain, &bus, &slot, &func); + + // Check if PCI_DEV id string was in a valid format or NOT. + if (NV_OK != status) + { + // lets reset cached pci dev + nv = NULL; + } + else + { + nvl = find_pci(domain, bus, slot, func); + // + // If NO GPU found corresponding to this GPU, then reset + // cached state. This helps ignore the following registry + // keys until valid PCI BDF is found in the commandline. + // + if (!nvl) + { + nv = NULL; + } + else + { + nv = NV_STATE_PTR(nvl); + } + } + continue; + } + + // + // Check if cached pci_dev string in the commandline is in valid + // format, else we will skip all the successive registry entries + // ( pairs) until a valid PCI_DEV string is encountered + // in the commandline. + // + if (!nv) + continue; + + data = (NvU32)simple_strtoul(value, NULL, 0); + + rm_write_registry_dword(sp, nv, name, data); + } + + os_free_mem(option_string); + } + return status; +} + +/* + * Compare given string UUID with the GpuBlacklist or ExcludedGpus registry + * parameter string and return whether the UUID is in the GPU exclusion list + */ +NvBool nv_is_uuid_in_gpu_exclusion_list(const char *uuid) +{ + const char *input; + char *list; + char *ptr; + char *token; + + // + // When both NVreg_GpuBlacklist and NVreg_ExcludedGpus are defined + // NVreg_ExcludedGpus takes precedence. + // + if (NVreg_ExcludedGpus != NULL) + input = NVreg_ExcludedGpus; + else if (NVreg_GpuBlacklist != NULL) + input = NVreg_GpuBlacklist; + else + return NV_FALSE; + + if ((list = rm_remove_spaces(input)) == NULL) + return NV_FALSE; + + ptr = list; + + while ((token = strsep(&ptr, ",")) != NULL) + { + if (strcmp(token, uuid) == 0) + { + os_free_mem(list); + return NV_TRUE; + } + } + os_free_mem(list); + return NV_FALSE; +} + +NV_STATUS NV_API_CALL os_registry_init(void) +{ + nv_parm_t *entry; + unsigned int i; + nvidia_stack_t *sp = NULL; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + if (NVreg_RmNvlinkBandwidth != NULL) + { + rm_write_registry_string(sp, NULL, + "RmNvlinkBandwidth", + NVreg_RmNvlinkBandwidth, + strlen(NVreg_RmNvlinkBandwidth)); + } + + if (NVreg_RmMsg != NULL) + { + rm_write_registry_string(sp, NULL, + "RmMsg", NVreg_RmMsg, strlen(NVreg_RmMsg)); + } + + // + // CoherentGPUMemoryMode=driver just implies the older + // EnableUserNUMAManagement=0 option + // + if (NVreg_CoherentGPUMemoryMode != NULL) + { + if (strcmp(NVreg_CoherentGPUMemoryMode, "driver") == 0) + { + NVreg_EnableUserNUMAManagement = 0; + } + } + + rm_parse_option_string(sp, NVreg_RegistryDwords); + + for (i = 0; (entry = &nv_parms[i])->name != NULL; i++) + { + rm_write_registry_dword(sp, NULL, entry->name, *entry->data); + } + + nv_kmem_cache_free_stack(sp); + + return NV_OK; +} diff --git a/kernel-open/nvidia/os-usermap.c b/kernel-open/nvidia/os-usermap.c new file mode 100644 index 0000000..244d4b8 --- /dev/null +++ b/kernel-open/nvidia/os-usermap.c @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +NV_STATUS NV_API_CALL os_match_mmap_offset( + void *pAllocPrivate, + NvU64 offset, + NvU64 *pPageIndex +) +{ + nv_alloc_t *at = pAllocPrivate; + NvU64 i; + + for (i = 0; i < at->num_pages; i++) + { + if (at->flags.contig) + { + if (offset == (at->page_table[0].phys_addr + (i * PAGE_SIZE))) + { + *pPageIndex = i; + return NV_OK; + } + } + else + { + if (offset == at->page_table[i].phys_addr) + { + *pPageIndex = i; + return NV_OK; + } + } + } + + return NV_ERR_OBJECT_NOT_FOUND; +} diff --git a/kernel-open/nvidia/rmp2pdefines.h b/kernel-open/nvidia/rmp2pdefines.h new file mode 100644 index 0000000..2ef8458 --- /dev/null +++ b/kernel-open/nvidia/rmp2pdefines.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _RMP2PDEFINES_H_ +#define _RMP2PDEFINES_H_ + +#define NVRM_P2P_PAGESIZE_SMALL_4K (4 << 10) +#define NVRM_P2P_PAGESIZE_BIG_64K (64 << 10) +#define NVRM_P2P_PAGESIZE_BIG_128K (128 << 10) + +#endif diff --git a/nv-compiler.sh b/nv-compiler.sh new file mode 100755 index 0000000..a89f2c6 --- /dev/null +++ b/nv-compiler.sh @@ -0,0 +1,82 @@ +#!/bin/sh +# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: MIT +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +# + +set -e + +get_compiler_type() +{ + printf "#if defined(__clang__) + clang + #elif defined(__GNUC__) + gcc + #elif defined(__INTEL_COMPILER) + icc + #else + unknown + #endif" | $1 -E -P - +} + +get_original_version() +{ + printf "#if defined(__clang__) + __clang_major__ __clang_minor__ __clang_patchlevel__ + #elif defined(__GNUC__) + __GNUC__ __GNUC_MINOR__ __GNUC_PATCHLEVEL__ + #elif defined(__INTEL_COMPILER) + __INTEL_COMPILER __INTEL_COMPILER_UPDATE + #endif" | $1 -E -P - +} + +get_canonical_version() +{ + type=$(get_compiler_type $1) + set -- $(get_original_version $1) + + # get the version based on the type + if [ "$type" = "unknown" ]; then + echo >&2 "unknown compiler: bailing out" + exit 1 + elif [ "$type" = "icc" ]; then + echo >&2 "icc is not supported" + exit 1 + else + major=$1 + minor=$2 + fi + patch=$3 + echo $(($3 + $2 * 100 + $1 * 10000)) +} + +if [ "$1" = "type" ]; then + echo $(get_compiler_type $2) +elif [ "$1" = "version_is_at_least" ]; then + if [ -z "$3" ]; then + echo >&2 "minimum compiler version cannot be empty" + exit 1 + fi + version=$(get_canonical_version $2) + if [ "$version" -gt $(($3-1)) ]; then + echo "1" + fi +fi + diff --git a/push_info.txt b/push_info.txt new file mode 100644 index 0000000..6be9858 --- /dev/null +++ b/push_info.txt @@ -0,0 +1 @@ +jetson_38.2 diff --git a/src/common/displayport/inc/dp_address.h b/src/common/displayport/inc/dp_address.h new file mode 100644 index 0000000..d49eebc --- /dev/null +++ b/src/common/displayport/inc/dp_address.h @@ -0,0 +1,288 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_address.h * +* Basic class for AUX Address * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_ADDRESS_H +#define INCLUDED_DP_ADDRESS_H + +#include "dp_internal.h" + +namespace DisplayPort +{ + class Address + { + public: + enum + { + maxHops = 15, // update DP_MAX_ADDRESS_HOPS when changed (in displayportCommon.h) + maxHopsHDCP = 7, + maxPortCount = 15 + }; + + Address() + { + clear(); + } + + Address(unsigned hop0) + { + clear(); + hop[hops++] = hop0; + } + + Address(unsigned hop0, unsigned hop1) + { + clear(); + hop[hops++] = hop0; + hop[hops++] = hop1; + } + + Address(const Address & other) + { + clear(); + for(unsigned i = 0; i < other.size(); i++) + { + append(other[i]); + } + } + + void clear() + { + hops = 0; + for (unsigned i = 0; i < maxHops; i++) + { + hop[i] = 0; + } + } + + Address parent() const + { + if (hops == 0) + { + DP_ASSERT(hops != 0); + return 0; + } + Address addr = *this; + addr.hops --; + return addr; + } + + unsigned tail() const + { + if (hops == 0) + { + DP_ASSERT(hops != 0); + return 0; + } + return hop[hops-1]; + } + + void append(unsigned port) + { + if (hops >= maxHops) + { + DP_ASSERT(0); + return; + } + hop[hops++] = port; + } + + void prepend(unsigned port) + { + if (hops >= maxHops) + { + DP_ASSERT(0); + return; + } + hops++; + for (unsigned i = hops - 1; i > 0; i--) + hop[i] = hop[i-1]; + hop[0] = port; + } + + void pop() + { + if (hops == 0) + { + DP_ASSERT(0); + return; + } + hops--; + } + + // Just to keep clear copy + Address & operator = (const Address & other) + { + clear(); + for(unsigned i = 0; i < other.size(); i++) + { + append(other[i]); + } + + return *this; + } + + bool operator == (const Address & other) const + { + if (other.size() != size()) + return false; + + for (unsigned i = 0; i < hops; i++) + if (other[i] != (*this)[i]) + return false; + + return true; + } + + // + // Sort by size first, then "alphabetically" (lexicographical see wikipedia) + // + bool operator > (const Address & other) const + { + if (size() > other.size()) + return true; + else if (size() < other.size()) + return false; + + for (unsigned i = 0; i < hops; i++) + { + if ((*this)[i] > other[i]) + return true; + else if ((*this)[i] < other[i]) + return false; + } + + return false; + } + + // + // Sort by size first, then "alphabetically" (lexicographical see wikipedia) + // + bool operator < (const Address & other) const + { + if (size() < other.size()) + return true; + else if (size() > other.size()) + return false; + + for (unsigned i = 0; i < hops; i++) + { + if ((*this)[i] < other[i]) + return true; + else if ((*this)[i] > other[i]) + return false; + } + + return false; + } + + bool operator >= (const Address & other) const + { + return !((*this) < other); + } + + bool operator <= (const Address & other) const + { + return !((*this) > other); + } + + bool operator != (const Address & other) const + { + return !((*this) == other); + } + + unsigned size() const + { + return hops; + } + + unsigned & operator [](unsigned index) + { + DP_ASSERT(index < hops); + return hop[index]; + } + + const unsigned & operator [](unsigned index) const + { + DP_ASSERT(index < hops); + return hop[index]; + } + + bool under(const Address & root) const + { + if (size() < root.size()) + return false; + + for (unsigned i = 0; i < root.size(); i++) + if ((*this)[i] != root[i]) + return false; + + return true; + } + + typedef char StringBuffer[maxHops*3+1]; + char * toString(StringBuffer & buffer, bool removeLeadingZero = false) const + { + char * p = &buffer[0]; + int hopsWritten = 0; + for (unsigned i = 0; i < hops; i++) + { + if (i == 0 && hop[0] == 0 && removeLeadingZero) + continue; + if (hopsWritten > 0) + *p++ = '.'; + if (hop[i] >= 10) + *p++ = (char)(hop[i] / 10 +'0'); + *p++ = (char)(hop[i] % 10 + '0'); + hopsWritten++; + } + + *p++= 0; + return (char *)&buffer[0]; + } + + // Large enough to fit 4 hops into every NvU32 + typedef NvU32 NvU32Buffer[(maxHops-1)/4+1 < 4 ? 4 : (maxHops-1)/4+1]; + NvU32 * toNvU32Buffer(NvU32Buffer & buffer) const + { + for (unsigned i = 0; i < hops; i++) + { + buffer[i/4] |= ((NvU8) hop[i]) << (i % 4) * 8; + } + + return (NvU32 *)&buffer[0]; + } + + private: + unsigned hop[maxHops]; + unsigned hops; + }; +} + +#endif //INCLUDED_DP_ADDRESS_H diff --git a/src/common/displayport/inc/dp_auxbus.h b/src/common/displayport/inc/dp_auxbus.h new file mode 100644 index 0000000..12d0388 --- /dev/null +++ b/src/common/displayport/inc/dp_auxbus.h @@ -0,0 +1,80 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_auxbus.h * +* Interface for low level access to the aux bus. * +* This is the synchronous version of the interface. * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_AUXBUS_H +#define INCLUDED_DP_AUXBUS_H + +namespace DisplayPort +{ + class AuxBus : virtual public Object + { + public: + enum status + { + success, + defer, + nack, + unSupported, + }; + + enum Action + { + read, + write, + writeStatusUpdateRequest, // I2C only + }; + + enum Type + { + native, + i2c, + i2cMot + }; + + virtual status transaction(Action action, Type type, int address, + NvU8 * buffer, unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason = NULL, + NvU8 offset = 0, NvU8 nWriteTransactions = 0) = 0; + + virtual unsigned transactionSize() = 0; + virtual status fecTransaction(NvU8 *fecStatus, NvU16 **fecErrorCount, NvU32 flags) { return nack; } + virtual void setDevicePlugged(bool) {} + virtual ~AuxBus() {} + }; + + // + // Wraps an auxbus interface with one that prints all the input and output + // + AuxBus * CreateAuxLogger(AuxBus * auxBus); +} + +#endif //INCLUDED_DP_AUXBUS_H diff --git a/src/common/displayport/inc/dp_auxdefs.h b/src/common/displayport/inc/dp_auxdefs.h new file mode 100644 index 0000000..a99dca4 --- /dev/null +++ b/src/common/displayport/inc/dp_auxdefs.h @@ -0,0 +1,96 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_auxdefs.h * +* Definitions for DPCD AUX offsets * +* Should be used sparingly (DPCD HAL preferred) * +* * +\***************************************************************************/ + +#ifndef __DP_AUXDEFS_H__ +#define __DP_AUXDEFS_H__ + +#define DPCD_MESSAGEBOX_SIZE 48 + +// +// If a message is outstanding for at least 4 seconds +// assume no reply is coming through +// +#define DPCD_MESSAGE_REPLY_TIMEOUT 4000 + +#define DPCD_LINK_ADDRESS_MESSAGE_RETRIES 20 // 20 retries +#define DPCD_LINK_ADDRESS_MESSAGE_COOLDOWN 10 // 10ms between attempts + +// pointing to the defaults for LAM settings to start with +#define DPCD_REMOTE_DPCD_WRITE_MESSAGE_RETRIES DPCD_LINK_ADDRESS_MESSAGE_RETRIES +#define DPCD_REMOTE_DPCD_WRITE_MESSAGE_COOLDOWN DPCD_LINK_ADDRESS_MESSAGE_COOLDOWN + +#define DPCD_REMOTE_DPCD_READ_MESSAGE_RETRIES 7 // 7 retries +#define DPCD_REMOTE_DPCD_READ_MESSAGE_COOLDOWN DPCD_LINK_ADDRESS_MESSAGE_COOLDOWN +#define DPCD_REMOTE_DPCD_READ_MESSAGE_COOLDOWN_BKSV 20 // 20ms between attempts + +#define DPCD_QUERY_STREAM_MESSAGE_RETRIES 7 // 7 retries +#define DPCD_QUERY_STREAM_MESSAGE_COOLDOWN 20 // 20ms between attempts + +#define MST_EDID_RETRIES 20 +#define MST_EDID_COOLDOWN 10 + +#define MST_ALLOCATE_RETRIES 10 +#define MST_ALLOCATE_COOLDOWN 10 + +#define HDCP_AUTHENTICATION_RETRIES 6 // 6 retries +#define HDCP_CPIRQ_RXSTAUS_RETRIES 3 +#define HDCP_AUTHENTICATION_COOLDOWN 1000// 1 sec between attempts +#define HDCP22_AUTHENTICATION_COOLDOWN 30000// 30 sec between attempts +#define HDCP_AUTHENTICATION_COOLDOWN_HPD 3000// 3 sec for first stream Add +#define HDCP_CPIRQ_RXSTATUS_COOLDOWN 20 // 20ms between attempts + +#define HDCP_QSEANDSETECF_RETRIES 6 // 6 retries as authentication retires +#define HDCP_QSEANDSETECF_COOLDOWN 3000// 3 sec between attempts as authentication period + +// Need to re-submit Stream Validation request to falcon microcontroller after 1 sec if current request fails +#define HDCP_STREAM_VALIDATION_RESUBMIT_COOLDOWN 1000 + +// +// Wait till 8secs for completion of the KSV and Stream Validation, if that doesn't complete +// then timeout. +// +#define HDCP_STREAM_VALIDATION_REQUEST_COOLDOWN 8000 + +// +// Wait till 1 sec to check if still have active QSE message then send QSE message or queue +// to check next time. 1sec should be enough that sink reply QSE request. +// +#define HDCP_SEND_QSE_MESSAGE_COOLDOWN 1000 + +#define DPCD_OUI_NVIDIA 0x00044B + +// +// Define maximum retry count that checking Payload ID table updated before +// trigger ACT sequence. +// +#define PAYLOADIDTABLE_UPDATED_CHECK_RETRIES 300 + +#endif // __DP_AUXDEFS_H__ diff --git a/src/common/displayport/inc/dp_auxretry.h b/src/common/displayport/inc/dp_auxretry.h new file mode 100644 index 0000000..2f20949 --- /dev/null +++ b/src/common/displayport/inc/dp_auxretry.h @@ -0,0 +1,181 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_auxretry.h * +* Adapter interface for friendlier AuxBus * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_AUXRETRY_H +#define INCLUDED_DP_AUXRETRY_H + +#include "dp_auxbus.h" +#include "dp_timeout.h" + +namespace DisplayPort +{ + enum + { + minimumRetriesOnDefer = 7 + }; + + class AuxRetry + { + AuxBus * aux; + public: + AuxRetry(AuxBus * aux = 0) + : aux(aux) + { + } + + AuxBus * getDirect() + { + return aux; + } + + enum status + { + ack, + nack, + unsupportedRegister, + defer + }; + + // + // Perform an aux read transaction. + // - Automatically handles defers up to retry limit + // - Retries on partial read + // + virtual status readTransaction(int address, NvU8 * buffer, unsigned size, unsigned retries = minimumRetriesOnDefer); + + // + // Similar to readTransaction except that it supports reading + // larger spans than AuxBus::transactionSize() + // + virtual status read(int address, NvU8 * buffer, unsigned size, unsigned retries = minimumRetriesOnDefer); + + // + // Perform an aux write transaction. + // - Automatically handles defers up to retry limit + // - Retries on partial write + // + virtual status writeTransaction(int address, NvU8 * buffer, unsigned size, unsigned retries = minimumRetriesOnDefer); + + // + // Similar to writeTransaction except that it supports writin + // larger spans than AuxBus::transactionSize() + // + virtual status write(int address, NvU8 * buffer, unsigned size, unsigned retries = minimumRetriesOnDefer); + }; + + class AuxLogger : public AuxBus + { + AuxBus * bus; + char hex[256]; + char hex_body[256]; + char hint[128]; + + public: + AuxLogger(AuxBus * bus) : bus(bus) + { + } + + const char * getAction(Action action) + { + if (action == read) + return "rd "; + else if (action == write) + return "wr "; + else if (action == writeStatusUpdateRequest) + return "writeStatusUpdateRequest "; + else + DP_ASSERT(0); + return "???"; + } + + const char * getType(Type typ) + { + if (typ == native) + return ""; + else if (typ == i2c) + return "i2c "; + else if (typ == i2cMot) + return "i2cMot "; + else + DP_ASSERT(0); + return "???"; + } + + const char * getStatus(status stat) + { + if (stat == success) + return ""; + else if (stat == nack) + return "(nack) "; + else if (stat == defer) + return "(defer) "; + else + DP_ASSERT(0); + return "???"; + } + + const char * getRequestId(unsigned requestIdentifier) + { + switch(requestIdentifier) + { + case 0x1: return "LINK_ADDRESS"; + case 0x4: return "CLEAR_PAT"; + case 0x10: return "ENUM_PATH"; + case 0x11: return "ALLOCATE"; + case 0x12: return "QUERY"; + case 0x20: return "DPCD_READ"; + case 0x21: return "DPCD_WRITE"; + case 0x22: return "I2C_READ"; + case 0x23: return "I2C_WRITE"; + case 0x24: return "POWER_UP_PHY"; + case 0x25: return "POWER_DOWN_PHY"; + case 0x38: return "HDCP_STATUS"; + default: return ""; + } + } + + virtual status transaction(Action action, Type type, int address, + NvU8 * buffer, unsigned sizeRequested, + unsigned * sizeCompleted, unsigned * pNakReason, + NvU8 offset, NvU8 nWriteTransactions); + + virtual unsigned transactionSize() + { + return bus->transactionSize(); + } + + virtual status fecTransaction(NvU8 *fecStatus, NvU16 **fecErrorCount, NvU32 flags) + { + return bus->fecTransaction(fecStatus, fecErrorCount, flags); + } + }; +} + +#endif //INCLUDED_DP_AUXRETRY_H diff --git a/src/common/displayport/inc/dp_bitstream.h b/src/common/displayport/inc/dp_bitstream.h new file mode 100644 index 0000000..3d01f74 --- /dev/null +++ b/src/common/displayport/inc/dp_bitstream.h @@ -0,0 +1,98 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_bitstream.h * +* This is an implementation of the big endian bit stream * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_BITSTREAM_H +#define INCLUDED_DP_BITSTREAM_H + +#include "dp_buffer.h" + +namespace DisplayPort +{ + // + // Bitstream reader interface + // - reads a packed stream of bits in Big Endian format + // - handles alignment, buffering, and buffer bounds checking + // + class BitStreamReader + { + Buffer * sourceBuffer; + unsigned bitsOffset; + unsigned bitsEnd; + + public: + // Read 1-32 bits from the stream into *value. Returns true on success + bool read(unsigned * value, unsigned bits); + + // Read 1-32 bits from stream. Returns 'default' on failure. + unsigned readOrDefault(unsigned bits, unsigned defaultValue); + + // Skip bits until we're aligned to the power of two alignment + bool align(unsigned align); + + unsigned offset(); + Buffer * buffer(); + BitStreamReader(Buffer * buffer, unsigned bitsOffset, unsigned bitsCount); + }; + + // + // Bitstream writer interface + // + class BitStreamWriter + { + Buffer * targetBuffer; + unsigned bitsOffset; + public: + // + // Create a bitstream writer at a specific bit offset + // into an already existing buffer + // + BitStreamWriter(Buffer * buffer, unsigned bitsOffset = 0); + + // + // Write n bits to the buffer in big endian format. + // No buffering is performed. + // + bool write(unsigned value, unsigned bits); + + // + // Emit zero's until the offset is divisible by align. + // CAVEAT: align must be a power of 2 (eg 8) + // + bool align(unsigned align); + + // + // Get current offset and buffer target + // + unsigned offset(); + Buffer * buffer(); + }; +} + +#endif //INCLUDED_DP_BITSTREAM_H diff --git a/src/common/displayport/inc/dp_buffer.h b/src/common/displayport/inc/dp_buffer.h new file mode 100644 index 0000000..6d3e6f5 --- /dev/null +++ b/src/common/displayport/inc/dp_buffer.h @@ -0,0 +1,97 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_buffer.h * +* Resizable byte buffer and stream classes * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_BUFFER_H +#define INCLUDED_DP_BUFFER_H + +#include "dp_internal.h" + +namespace DisplayPort +{ + class Buffer + { + public: + NvU8 *data; // Data buffer + unsigned length; // bytes used + unsigned capacity; // size of allocation + bool errorState; // did we lose a malloc in there? + public: + // + // Write will only fail if we're unable to reallocate the buffer. In this case + // the buffer will be reset to its empty state. + // + const NvU8 * getData() const { return data; } + NvU8 * getData() { return data; } + bool resize(unsigned newSize); + void memZero(); + void reset(); + unsigned getLength() const { return length; } + + // Is in error state? This happens if malloc fails. Error state is + // held until reset is called. + bool isError() const; + + Buffer(const Buffer & other); + Buffer(NvU8 * data, unsigned size); + Buffer & operator = (const Buffer & other); + Buffer(); + ~Buffer(); + + void swap(Buffer & other) { + swap_args(other.data, data); + swap_args(other.length, length); + swap_args(other.capacity, capacity); + swap_args(other.errorState, errorState); + } + + bool operator== (const Buffer & other) const; + }; + + class Stream + { + protected: + Buffer * parent; + unsigned byteOffset; + public: + Stream(Buffer * buffer); + bool seek(unsigned where); + bool read(NvU8 * buffer, unsigned size); + bool write(NvU8 * buffer, unsigned size); + + // returns error state of buffer + bool isError() const; + unsigned remaining(); + unsigned offset(); + }; + + void swapBuffers(Buffer & left, Buffer & right); +} + +#endif //INCLUDED_DP_BUFFER_H diff --git a/src/common/displayport/inc/dp_configcaps.h b/src/common/displayport/inc/dp_configcaps.h new file mode 100644 index 0000000..1a44d07 --- /dev/null +++ b/src/common/displayport/inc/dp_configcaps.h @@ -0,0 +1,1455 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_configcaps.h * +* Abstraction for basic caps registers * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_CONFIGCAPS_H +#define INCLUDED_DP_CONFIGCAPS_H + +#include "dp_connector.h" +#include "dp_auxretry.h" +#include "dp_linkconfig.h" +#include "dp_regkeydatabase.h" + +namespace DisplayPort +{ + enum PowerState + { + PowerStateD0 = 1, + PowerStateD3 = 2, + PowerStateD3AuxOn = 5 + }; + + // Extended caps = offset 0x80 + enum DwnStreamPortType + { + DISPLAY_PORT = 0, + ANALOG_VGA, + DVI, + HDMI, + WITHOUT_EDID, + DISPLAY_PORT_PLUSPLUS + } ; + + enum DwnStreamPortAttribute + { + RESERVED = 0, + IL_720_480_60HZ, + IL_720_480_50HZ, + IL_1920_1080_60HZ, + IL_1920_1080_50HZ, + PG_1280_720_60HZ, + PG_1280_720_50_HZ, + } ; + + // DPCD Offset 102 enums + enum TrainingPatternSelectType + { + TRAINING_DISABLED, + TRAINING_PAT_ONE, + TRAINING_PAT_TWO, + TRAINING_PAT_THREE, + }; + + enum SymbolErrorSelectType + { + DISPARITY_ILLEGAL_SYMBOL_ERROR, + DISPARITY_ERROR, + ILLEGAL_SYMBOL_ERROR, + }; + + // DPCD Offset 1A1 enums + enum MultistreamHotplugMode + { + HPD_LONG_PULSE, + IRQ_HPD, + }; + + // DPCD Offset 220 + enum TestPatternType + { + NO_PATTERN, + COLOR_RAMPS, + BLACK_WHITE, + COLOR_SQUARE, + } ; + + // DPCD Offset 232, 233 + enum ColorFormatType + { + RGB, + YCbCr_422, + YCbCr_444, + } ; + + enum DynamicRangeType + { + VESA, + CEA, + } ; + + enum YCBCRCoeffType + { + ITU601, + ITU709, + } ; + + #define HDCP_BCAPS_SIZE (0x1) + #define HDCP_VPRIME_SIZE (0x14) + #define HDCP_KSV_FIFO_SIZE (0xF) + #define HDCP_KSV_FIFO_WINDOWS_RETRY (0x3) + #define HDCP22_BCAPS_SIZE (0x1) + + // Bstatus DPCD offset 0x68029 + #define HDCPREADY (0x1) + #define R0PRIME_AVAILABLE (0x2) + #define LINK_INTEGRITY_FAILURE (0x4) + #define REAUTHENTICATION_REQUEST (0x8) + + struct BInfo + { + bool maxCascadeExceeded; + unsigned depth; + bool maxDevsExceeded; + unsigned deviceCount; + }; + + struct BCaps + { + bool repeater; + bool HDCPCapable; + }; + + enum + { + PHYSICAL_PORT_START = 0x0, + PHYSICAL_PORT_END = 0x7, + LOGICAL_PORT_START = 0x8, + LOGICAL_PORT_END = 0xF + }; + + class LaneStatus + { + public: + // + // Lane Status + // CAUTION: Only updated on IRQ/HPD right now + // + virtual bool getLaneStatusClockRecoveryDone(int lane) = 0; // DPCD offset 202, 203 + virtual bool getLaneStatusSymbolLock(int lane)= 0; + virtual bool getLaneStatusChannelEqualizationDone(int lane)= 0; + virtual bool getInterlaneAlignDone() = 0; + virtual bool getDownStreamPortStatusChange() = 0; + }; + + class TestRequest + { + public: + virtual bool getPendingTestRequestTraining() = 0; // DPCD offset 218 + virtual void getTestRequestTraining(LinkRate & rate, unsigned & lanes) = 0; // DPCD offset 219, 220 + virtual bool getPendingAutomatedTestRequest() = 0; // DPCD offset 218 + virtual bool getPendingTestRequestEdidRead() = 0; // DPCD offset 218 + virtual bool getPendingTestRequestPhyCompliance() = 0; // DPCD offset 218 + virtual LinkQualityPatternType getPhyTestPattern() = 0; // DPCD offset 248 + virtual AuxRetry::status setTestResponse(bool ack, bool edidChecksumWrite = false) = 0; + virtual AuxRetry::status setTestResponseChecksum(NvU8 checksum) = 0; + }; + + class LegacyPort + { + public: + virtual DwnStreamPortType getDownstreamPortType() = 0; + virtual DwnStreamPortAttribute getDownstreamNonEDIDPortAttribute() = 0; + + // For port type = HDMI + virtual NvU64 getMaxTmdsClkRate() = 0; + }; + + class LinkState + { + public: + // + // Link state + // + virtual bool isPostLtAdjustRequestSupported() = 0; + virtual void setPostLtAdjustRequestGranted(bool bGrantPostLtRequest) = 0; + virtual bool getIsPostLtAdjRequestInProgress() = 0; // DPCD offset 204 + virtual TrainingPatternSelectType getTrainingPatternSelect() = 0; // DPCD offset 102 + + virtual bool setTrainingMultiLaneSet(NvU8 numLanes, + NvU8 *voltSwingSet, + NvU8 *preEmphasisSet) = 0; + + virtual bool readTraining(NvU8* voltageSwingLane, + NvU8* preemphasisLane = 0, + NvU8* trainingScoreLane = 0, + NvU8* postCursor = 0, + NvU8 activeLaneCount = 0) = 0; + + virtual bool isLaneSettingsChanged(NvU8* oldVoltageSwingLane, + NvU8* newVoltageSwingLane, + NvU8* oldPreemphasisLane, + NvU8* newPreemphasisLane, + NvU8 activeLaneCount) = 0; + + virtual AuxRetry::status setIgnoreMSATimingParamters(bool msaTimingParamIgnoreEn) = 0; + virtual AuxRetry::status setLinkQualLaneSet(unsigned lane, LinkQualityPatternType linkQualPattern) = 0; + virtual AuxRetry::status setLinkQualPatternSet(LinkQualityPatternType linkQualPattern, unsigned laneCount = 0) = 0; + }; + + class LinkCapabilities + { + public: + + // + // Physical layer feature set + // + virtual NvU64 getMaxLinkRate() = 0; // Maximum byte-block in Hz + virtual unsigned getMaxLaneCount() = 0; // DPCD offset 0x0002h + virtual unsigned getMaxLaneCountSupportedAtLinkRate(LinkRate linkRate) = 0; + virtual bool getEnhancedFraming() = 0; + virtual bool getSupportsNoHandshakeTraining() = 0; + virtual bool getMsaTimingparIgnored() = 0; + virtual bool getDownstreamPort(NvU8 *portType) = 0; // DPCD offset 0x0005h + virtual bool getSupportsMultistream() = 0; // DPCD offset 0x0021h + virtual bool getNoLinkTraining() = 0; // DPCD offset 0x0330h + virtual unsigned getPhyRepeaterCount() = 0; // DPCD offset 0xF0002h + }; + + class OUI + { + public: + virtual bool getOuiSupported() = 0; + virtual AuxRetry::status setOuiSource(unsigned ouiId, const char * model, size_t modelNameLength, NvU8 chipRevision) = 0; + virtual bool getOuiSource(unsigned &ouiId, char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision) = 0; + virtual bool getOuiSink(unsigned &ouiId, unsigned char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision) = 0; + }; + + class HDCP + { + public: + virtual bool getBKSV(NvU8 *bKSV) = 0; // DPCD offset 0x68000 + virtual bool getBCaps(BCaps &bCaps, NvU8 * rawByte = 0) = 0; // DPCD offset 0x68028 + virtual bool getHdcp22BCaps(BCaps &bCaps, NvU8 * rawByte = 0) = 0; // DPCD offset 0x6921D + virtual bool getBinfo(BInfo &bInfo) = 0; // DPCD offset 0x6802A + + // Generic interfaces for HDCP 1.x / 2.2 + virtual bool getRxStatus(const HDCPState &hdcpState, NvU8 *data) = 0; + }; + + class DPCDHAL : + virtual public Object, + public TestRequest, + public LaneStatus, + public LinkState, + public LinkCapabilities, + public OUI, + public HDCP + { + public: + // + // Notifications of external events + // We sent IRQ/HPD events to the HAL so that it knows + // when to re-read the registers. All the remaining + // calls are either accessors to cached state (caps), + // or DPCD get/setters + // + virtual void notifyIRQ() = 0; + virtual void notifyHPD(bool status, bool bSkipDPCDRead = false) = 0; + + virtual void populateFakeDpcd() = 0; + + // DPCD override routines + virtual void overrideMaxLinkRate(NvU32 overrideMaxLinkRate) = 0; + virtual void overrideMaxLaneCount(NvU32 maxLaneCount) = 0; + virtual void skipCableBWCheck(NvU32 maxLaneAtHighRate, NvU32 maxLaneAtLowRate) = 0; + virtual void overrideOptimalLinkCfg(LinkRate optimalLinkRate, NvU32 optimalLaneCount) = 0; + virtual void overrideOptimalLinkRate(LinkRate optimalLinkRate) = 0; + + virtual bool isDpcdOffline() = 0; + virtual void setAuxBus(AuxBus * bus) = 0; + virtual NvU32 getVideoFallbackSupported() = 0; + // + // Cached CAPS + // These are only re-read when notifyHPD is called + // + virtual unsigned getRevisionMajor() = 0; + virtual unsigned getRevisionMinor() = 0; + + virtual unsigned lttprGetRevisionMajor() = 0; + virtual unsigned lttprGetRevisionMinor() = 0; + + virtual bool getSDPExtnForColorimetry() = 0; + virtual bool getRootAsyncSDPSupported() = 0; + + bool isAtLeastVersion(unsigned major, unsigned minor) + { + if (getRevisionMajor() > major) + return true; + + if (getRevisionMajor() < major) + return false; + + return getRevisionMinor() >= minor; + } + + bool isVersion(unsigned major, unsigned minor) + { + if ((getRevisionMajor() == major) && + (getRevisionMinor() == minor)) + return true; + + return false; + } + + bool lttprIsAtLeastVersion(unsigned major, unsigned minor) + { + if (lttprGetRevisionMajor() > major) + return true; + + if (lttprGetRevisionMajor() < major) + return false; + + return lttprGetRevisionMinor() >= minor; + } + + bool lttprIsVersion(unsigned major, unsigned minor) + { + if ((lttprGetRevisionMajor() == major) && + (lttprGetRevisionMinor() == minor)) + return true; + + return false; + } + + // Convert Link Bandwidth read from DPCD 00001h/2201h 8b10b_MAX_LINK_RATE to 10M convention link rate + NvU32 mapLinkBandiwdthToLinkrate(NvU32 linkBandwidth) + { + if (FLD_TEST_DRF(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, _1_62_GBPS, linkBandwidth)) + return dp2LinkRate_1_62Gbps; + else if (FLD_TEST_DRF(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, _2_70_GBPS, linkBandwidth)) + return dp2LinkRate_2_70Gbps; + else if (FLD_TEST_DRF(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, _5_40_GBPS, linkBandwidth)) + return dp2LinkRate_5_40Gbps; + else if (FLD_TEST_DRF(_DPCD14, _MAX_LINK_BANDWIDTH, _VAL, _8_10_GBPS, linkBandwidth)) + return dp2LinkRate_8_10Gbps; + else + { + DP_ASSERT(0 && "Unknown link bandwidth. Assuming HBR"); + return dp2LinkRate_2_70Gbps; + } + } + + // + // Native aux transaction size (16 for AUX) + // + virtual size_t getTransactionSize() = 0; + + // + // SST Branching device/dongle/repeater + // - Describes downstream port limitations + // - Not for use with MST + // - Primarily used for dongles (look at port 0 for pclk limits) + // + virtual LegacyPort * getLegacyPort(unsigned index) = 0; + virtual unsigned getLegacyPortCount() = 0; + + virtual PCONCaps * getPCONCaps() = 0; + + // + // Single stream specific caps + // + virtual unsigned getNumberOfAudioEndpoints() = 0; + virtual int getSinkCount() = 0; + virtual void setSinkCount(int sinkCount) = 0; + + // + // MISC + // + virtual bool isPC2Disabled() = 0; + virtual void setPC2Disabled(bool disabled) = 0; + + virtual void setDPCDOffline(bool enable) = 0; + virtual void updateDPCDOffline() = 0; + virtual bool auxAccessAvailable() = 0; + + virtual void setSupportsESI(bool bIsESISupported) = 0; + virtual void setLttprSupported(bool isLttprSupported) = 0; + + // + // Intermediate Link Rate (eDP ILR) + // + virtual void setIndexedLinkrateEnabled(bool newVal) = 0; + virtual bool isIndexedLinkrateEnabled() = 0; + virtual bool isIndexedLinkrateCapable() = 0; + virtual NvU16 *getLinkRateTable() = 0; + virtual bool getRawLinkRateTable(NvU8 *buffer = NULL) = 0; + + // + // Link power state management + // + virtual bool setPowerState(PowerState newState) = 0; + virtual PowerState getPowerState() = 0; + // + // Multistream + // + virtual bool getGUID(GUID & guid) = 0; // DPCD offset 30 + virtual AuxRetry::status setGUID(GUID & guid) = 0; + virtual AuxRetry::status setMessagingEnable(bool uprequestEnable, bool upstreamIsSource) = 0; + virtual AuxRetry::status setMultistreamLink(bool bMultistream) = 0; + virtual void payloadTableClearACT() = 0; + virtual bool payloadWaitForACTReceived() = 0; + virtual bool payloadAllocate(unsigned streamId, unsigned begin, unsigned count) = 0; + virtual bool clearPendingMsg() = 0; + virtual bool isMessagingEnabled() = 0; + + // + // If set to IRQ we'll receive CSN messages on hotplugs (which are actually easy to miss). + // If set to HPD mode we'll always receive an HPD whenever the topology changes. + // The library supports using both modes. + // + virtual AuxRetry::status setMultistreamHotplugMode(MultistreamHotplugMode notifyType) = 0; + + // + // Interrupts + // + virtual bool interruptContentProtection() = 0; + virtual void clearInterruptContentProtection() = 0; + + virtual bool intteruptMCCS() = 0; + virtual void clearInterruptMCCS() = 0; + + virtual bool interruptDownReplyReady() = 0; + virtual void clearInterruptDownReplyReady() = 0; + + virtual bool interruptUpRequestReady() = 0; + virtual void clearInterruptUpRequestReady() = 0; + + virtual bool interruptCapabilitiesChanged() = 0; + virtual void clearInterruptCapabilitiesChanged() = 0; + + virtual bool getLinkStatusChanged() = 0; + virtual void clearLinkStatusChanged() = 0; + + virtual bool isPanelReplayErrorSet() = 0; + virtual void clearPanelReplayError() = 0; + virtual void readPanelReplayError() = 0; + + virtual bool getHdmiLinkStatusChanged() = 0; + virtual void clearHdmiLinkStatusChanged() = 0; + + virtual bool getStreamStatusChanged() = 0; + virtual void clearStreamStatusChanged() =0; + + virtual bool getDpTunnelingIrq() = 0; + virtual void clearDpTunnelingIrq() = 0; + + virtual void setDirtyLinkStatus(bool dirty) = 0; + virtual void refreshLinkStatus() = 0; + virtual bool isLinkStatusValid(unsigned lanes) = 0; + + // DPCD offset 250 - 259 + virtual void get80BitsCustomTestPattern(NvU8 *testPattern) = 0; + // + // Message Boxes + // + virtual AuxRetry::status writeDownRequestMessageBox(NvU8 * data, size_t length) = 0; + virtual size_t getDownRequestMessageBoxSize() = 0; + + virtual AuxRetry::status writeUpReplyMessageBox(NvU8 * data, size_t length) = 0; + virtual size_t getUpReplyMessageBoxSize() = 0; + + virtual AuxRetry::status readDownReplyMessageBox(NvU32 offset, NvU8 * data, size_t length) = 0; + virtual size_t getDownReplyMessageBoxSize() = 0; + + virtual AuxRetry::status readUpRequestMessageBox(NvU32 offset, NvU8 * data, size_t length) = 0; + virtual size_t getUpRequestMessageBoxSize() = 0; + + // MST<->SST override + virtual void overrideMultiStreamCap(bool mstCapable) = 0; + virtual bool getMultiStreamCapOverride() = 0; + + virtual bool getDpcdMultiStreamCap(void) = 0; + + // Set GPU DP support capability + virtual void setGpuDPSupportedVersions(NvU32 gpuDPSupportedVersions) = 0; + + // Set GPU FEC support capability + virtual void setGpuFECSupported(bool bSupportFEC) = 0; + + virtual void applyRegkeyOverrides(const DP_REGKEY_DATABASE& dpRegkeyDatabase) = 0; + + // PCON configuration + + // Reset PCON (to default state) + virtual void resetProtocolConverter() = 0; + // Source control mode and FRL/HDMI mode selection. + virtual bool setSourceControlMode(bool bEnableSourceControlMode, bool bEnableFRLMode) = 0; + + virtual bool checkPCONFrlReady(bool *bFrlReady) = 0; + virtual bool setupPCONFrlLinkAssessment(NvU32 linkBw, + bool bEnableExtendLTMode = false, + bool bEnableConcurrentMode = false) = 0; + + virtual bool checkPCONFrlLinkStatus(NvU32 *frlRate) = 0; + + virtual bool queryHdmiLinkStatus(bool *bLinkActive, bool *bLinkReady) = 0; + virtual NvU32 restorePCONFrlLink(NvU32 linkBwMask, + bool bEnableExtendLTMode = false, + bool bEnableConcurrentMode = false) = 0; + + virtual void readPsrCapabilities(vesaPsrSinkCaps *caps) = 0; + virtual bool updatePsrConfiguration(vesaPsrConfig config) = 0; + virtual bool readPsrConfiguration(vesaPsrConfig *config) = 0; + virtual bool readPsrState(vesaPsrState *psrState) = 0; + virtual bool readPsrDebugInfo(vesaPsrDebugStatus *psrDbgState) = 0; + virtual bool writePsrErrorStatus(vesaPsrErrorStatus psrErr) = 0; + virtual bool readPsrErrorStatus(vesaPsrErrorStatus *psrErr) = 0; + virtual bool writePsrEvtIndicator(vesaPsrEventIndicator psrErr) = 0; + virtual bool readPsrEvtIndicator(vesaPsrEventIndicator *psrErr) = 0; + virtual bool readPrSinkDebugInfo(panelReplaySinkDebugInfo *prDbgInfo) = 0; + + virtual bool isDpInTunnelingSupported() = 0; + virtual void enableDpTunnelingBwAllocationSupport() = 0; + virtual bool isDpTunnelBwAllocationEnabled() = 0; + virtual bool getDpTunnelEstimatedBw(NvU8 &estimatedBw) = 0; + virtual bool getDpTunnelGranularityMultiplier(NvU8 &granularityMultiplier) = 0; + virtual TriState getDpTunnelBwRequestStatus() = 0; + virtual bool setDpTunnelBwAllocation(bool bEnable) = 0; + virtual bool hasDpTunnelEstimatedBwChanged() = 0; + virtual bool hasDpTunnelBwAllocationCapabilityChanged() = 0; + virtual bool writeDpTunnelRequestedBw(NvU8 requestedBw) = 0; + virtual bool clearDpTunnelingBwRequestStatus() = 0; + virtual bool clearDpTunnelingEstimatedBwStatus() = 0; + virtual bool clearDpTunnelingBwAllocationCapStatus() = 0; + + virtual ~DPCDHAL() {} + }; + + // + // Implement interface + // + DPCDHAL * MakeDPCDHAL(AuxBus * bus, Timer * timer, MainLink * main); + + struct DPCDHALImpl : DPCDHAL + { + AuxRetry bus; + Timer * timer; + bool dpcdOffline; + bool bGrantsPostLtRequest; + bool pc2Disabled; + bool uprequestEnable; + bool upstreamIsSource; + bool bMultistream; + bool bGpuFECSupported; + bool bLttprSupported; + bool bBypassILREdpRevCheck; + NvU32 overrideDpcdMaxLinkRate; + NvU32 overrideDpcdRev; + NvU32 overrideDpcdMaxLaneCount; + + NvU32 gpuDPSupportedVersions; + + struct _LegacyPort: public LegacyPort + { + DwnStreamPortType type; + DwnStreamPortAttribute nonEDID; + + NvU64 maxTmdsClkRate; + + DwnStreamPortType getDownstreamPortType() + { + return type; + } + + DwnStreamPortAttribute getDownstreamNonEDIDPortAttribute() + { + return nonEDID; + } + + NvU64 getMaxTmdsClkRate() + { + return maxTmdsClkRate; + } + + } legacyPort[16]; + + struct + { + unsigned revisionMajor, revisionMinor; // DPCD offset 0 + bool supportsESI; + LinkRate maxLinkRate; // DPCD offset 1 + unsigned maxLaneCount; // DPCD offset 2 + unsigned maxLanesAtHBR; + unsigned maxLanesAtRBR; + bool enhancedFraming; + bool bPostLtAdjustmentSupport; + + bool supportsNoHandshakeTraining; + bool bSupportsTPS4; + unsigned NORP; // DPCD offset 4 + + bool detailedCapInfo; // DPCD offset 5 + bool downStreamPortPresent; + NvU8 downStreamPortType; + + unsigned downStreamPortCount; // DPCD offset 7 + bool ouiSupported; + bool msaTimingParIgnored; + + NvU16 linkRateTable[NV_DPCD_SUPPORTED_LINK_RATES__SIZE]; // DPCD offset 10 ~ 1F + + bool supportsMultistream; // DPCD offset 21 + unsigned numberAudioEndpoints; // DPCD offset 22 + bool overrideToSST; // force to SST even if MST capable + bool noLinkTraining; // DPCD offset 330h + + bool extendedRxCapsPresent; // DPCD offset 000Eh [7] - Extended Receiver Capability present + + // DPCD Offset 2211h; + unsigned extendedSleepWakeTimeoutRequestMs; + // DPCD Offset 0119h [0] - If we grant the extendedSleepWakeTimeoutRequest + bool bExtendedSleepWakeTimeoutGranted; + + bool bFECSupported; + + // DPCD Offset F0002h - Number of Physical Repeaters present (after mapping) between Source and Sink + unsigned phyRepeaterCount; + // DPCD offset 700 - EDP_DPCD_REV + unsigned eDpRevision; + + struct + { + unsigned revisionMajor, revisionMinor; // DPCD offset F0000h + LinkRate maxLinkRate; // DPCD offset F0001h + unsigned maxLaneCount; // DPCD offset F0004h + unsigned phyRepeaterExtendedWakeTimeoutMs; // DPCD offset F0005h + // The array to keep track of FEC capability of each LTTPR + bool bFECSupportedRepeater[NV_DPCD14_PHY_REPEATER_CNT_MAX]; + // If all the LTTPRs supports FEC + bool bFECSupported; + bool bAuxlessALPMSupported; // DPCD offset F0009 + } repeaterCaps; + + struct + { + bool bIsSupported; + bool bUsb4DriverBwAllocationSupport; + bool bIsPanelReplayOptimizationSupported; + bool bIsBwAllocationSupported; + NvU8 maxLaneCount; + LinkRate maxLinkRate; + } dpInTunnelingCaps; + + PCONCaps pconCaps; + vesaPsrSinkCaps psrCaps; + NvU32 videoFallbackFormats; // DPCD offset 0200h + + } caps; + + // This is set by connectorImpl depending on the request from client/regkey + bool bEnableDpTunnelBwAllocationSupport; + bool bIsDpTunnelBwAllocationEnabled; // This is set to true after we succeed in enabling BW allocation + + struct + { + unsigned sinkCount; // DPCD offset 200 + bool automatedTestRequest; + bool cpIRQ; + bool mccsIRQ; + bool downRepMsgRdy; + bool upReqMsgRdy; + bool prErrorStatus; // DPCD offset 2004h[3] + bool rxCapChanged; // DPCD offset 2005 + bool linkStatusChanged; // DPCD offset 2005 + bool streamStatusChanged; // DPCD offset 2005 + bool hdmiLinkStatusChanged; // DPCD offset 2005 + bool dpTunnelingIrq; // DPCD offset 2005 + + // DPCD offset 250 - 259 + NvU8 cstm80Bits[NV_DPCD_TEST_80BIT_CUSTOM_PATTERN__SIZE]; + struct + { + struct + { + bool clockRecoveryDone; + bool channelEqualizationDone; + bool symbolLocked; + } laneStatus[4]; // DPCD offset 202, 203 + + bool interlaneAlignDone; // DPCD offset 204 + bool downstmPortChng; + bool linkStatusUpdated; + + // + // (ESI specific) signifies that we have link trained and should + // update the link status in the next query to isLinkLost. Keep in + // mind that linkStatusChanged might still be zero. + // + bool linkStatusDirtied; + } laneStatusIntr; + + struct + { + bool testRequestTraining; // DPCD offset 218 + LinkRate testRequestLinkRate; // DPCD offset 219 + unsigned testRequestLaneCount; // DPCD offset 220 + } testTraining; + + struct + { + bool testRequestEdidRead; // DPCD offset 218 + } testEdid; + + struct + { + bool testRequestPattern; // DPCD offset 218 + TestPatternType testPatRequested; // DPCD offset 221 + NvU16 testHorTotalPixels; // DPCD offset 222, 223 + NvU16 testVerTotalLines; // DPCD offset 224, 225 + NvU16 testHorStartPixels; // DPCD offset 226, 227 + NvU16 testVerStartLines; // DPCD offset 228, 229 + NvU16 testHsyncWidthPixels; // DPCD offset 22A, 22B + bool testHsyncPolarity; + NvU16 testVsyncWidthLines; // DPCD offset 22C, 22D + bool testVsyncPolarity; + NvU16 testActiveWidthPixels; // DPCD offset 22E, 22F + NvU16 testActiveHeightLines; // DPCD offset 230, 231 + } testPattern; + + struct + { + bool testRequestPhyCompliance; // DPCD offset 218 + LinkQualityPatternType phyTestPattern; // DPCD offset 248 + } testPhyCompliance; + + } interrupts; + + bool bIndexedLinkrateCapable, bIndexedLinkrateEnabled; + + public: + DPCDHALImpl(AuxBus * bus, Timer * timer) + : bus(bus), timer(timer), bGrantsPostLtRequest(false), uprequestEnable(false), + upstreamIsSource(false), bMultistream(false), bGpuFECSupported(false), + bBypassILREdpRevCheck(false), overrideDpcdMaxLinkRate(0), + overrideDpcdRev(0), gpuDPSupportedVersions(0) + { + // start with default caps. + dpcdOffline = true; + + // + // fill out the bare minimum caps required ... + // this should be extended in for more dpcd offsets in future. + // + caps.revisionMajor = 0x1; + caps.revisionMinor = 0x1; + caps.supportsESI = false; + caps.maxLinkRate = dp2LinkRate_8_10Gbps; + caps.maxLaneCount = 4; + caps.enhancedFraming = true; + caps.downStreamPortPresent = true; + caps.downStreamPortCount = 1; + + // populate the sinkcount interrupt + interrupts.sinkCount = 1; + } + + ~DPCDHALImpl() + { + } + + virtual void setAuxBus(AuxBus * bus) + { + this->bus = bus; + } + + bool isDpcdOffline() + { + return dpcdOffline; + } + + void setDPCDOffline(bool bOffline) + { + dpcdOffline = bOffline; + } + + void updateDPCDOffline(); + bool auxAccessAvailable(); + + void setPC2Disabled(bool disabled) + { + pc2Disabled = disabled; + } + + void setLttprSupported(bool isLttprSupported) + { + bLttprSupported = isLttprSupported; + } + + bool isPC2Disabled() + { + return pc2Disabled; + } + + virtual void parseAndReadCaps(); + virtual PCONCaps * getPCONCaps() + { + return &(caps.pconCaps); + } + + // DPCD offset 0 + virtual unsigned getRevisionMajor() + { + return caps.revisionMajor; + } + + virtual unsigned getRevisionMinor() + { + return caps.revisionMinor; + } + + // DPCD offset F0000h + virtual unsigned lttprGetRevisionMajor() + { + return caps.repeaterCaps.revisionMajor; + } + + virtual unsigned lttprGetRevisionMinor() + { + return caps.repeaterCaps.revisionMinor; + } + + virtual LinkRate getMaxLinkRate(); + + // DPCD offset 2 + virtual unsigned getMaxLaneCount(); + + virtual bool getNoLinkTraining() + { + return caps.noLinkTraining; + } + + virtual unsigned getPhyRepeaterCount() + { + return caps.phyRepeaterCount; + } + + // Max lanes supported at the desired link rate. + virtual unsigned getMaxLaneCountSupportedAtLinkRate(LinkRate linkRate); + + virtual bool getEnhancedFraming() + { + return caps.enhancedFraming; + } + + // DPCD offset 5 + virtual bool getDownstreamPort(NvU8 *portType) + { + *portType = caps.downStreamPortType; + return caps.downStreamPortPresent; + } + + virtual bool getSupportsNoHandshakeTraining() + { + return caps.supportsNoHandshakeTraining; + } + + // DPCD offset 7 + virtual unsigned getLegacyPortCount() + { + return caps.downStreamPortCount; + } + + virtual LegacyPort * getLegacyPort(unsigned index) + { + return &legacyPort[index]; + } + + virtual bool getMsaTimingparIgnored() + { + return caps.msaTimingParIgnored; + } + + virtual bool getOuiSupported() + { + return caps.ouiSupported; + } + + virtual bool getSDPExtnForColorimetry(); + + virtual bool getRootAsyncSDPSupported(); + + virtual AuxRetry::status setOuiSource(unsigned ouiId, const char * model, + size_t modelNameLength, NvU8 chipRevision); + virtual bool getOuiSource(unsigned &ouiId, char * modelName, + size_t modelNameBufferSize, NvU8 & chipRevision); + virtual bool getOuiSink(unsigned &ouiId, unsigned char * modelName, + size_t modelNameBufferSize, NvU8 & chipRevision); + + // DPCD offset 21h + virtual bool getSupportsMultistream() + { + return caps.supportsMultistream && (!caps.overrideToSST); + } + + virtual void setSupportsESI(bool bIsESISupported) + { + caps.supportsESI = bIsESISupported; + } + + // + // Single stream specific caps + // DPCD offset 22h + // + virtual unsigned getNumberOfAudioEndpoints(); + + // DPCD offset 30h + virtual bool getGUID(GUID & guid); + virtual AuxRetry::status setGUID(GUID & guid); + + void parsePortDescriptors(); + + // + // Notifications of external events + // + virtual void notifyIRQ() + { + parseAndReadInterrupts(); + } + + virtual void populateFakeDpcd(); + + // DPCD override routine: Max link rate override. + void overrideMaxLinkRate(NvU32 overrideMaxLinkRate); + + // DPCD override routine: Max lane count override. + void overrideMaxLaneCount(NvU32 maxLaneCount) + { + caps.maxLaneCount = maxLaneCount; + overrideDpcdMaxLaneCount = maxLaneCount; + } + + // DPCD override routine: Max lane count override at a given link rate. + void skipCableBWCheck(NvU32 maxLaneAtHighRate, NvU32 maxLaneAtLowRate) + { + caps.maxLanesAtHBR = maxLaneAtHighRate; + caps.maxLanesAtRBR = maxLaneAtLowRate; + } + + // DPCD override routine: Optimal link config (link rate and lane count) override. + void overrideOptimalLinkCfg(LinkRate optimalLinkRate, + NvU32 optimalLaneCount) + { + caps.maxLinkRate = optimalLinkRate; + caps.maxLaneCount = optimalLaneCount; + } + + // DPCD override routine: Optimal link rate + void overrideOptimalLinkRate(LinkRate optimalLinkRate) + { + caps.maxLinkRate = optimalLinkRate; + } + + virtual void notifyHPD(bool status, bool bSkipDPCDRead); + virtual bool isPostLtAdjustRequestSupported() + { + // + // If the upstream DPTX and downstream DPRX both support TPS4, + // TPS4 shall be used instead of POST_LT_ADJ_REQ. + // + NvBool bTps4Supported = FLD_TEST_DRF(0073_CTRL_CMD_DP, _GET_CAPS_DP_VERSIONS_SUPPORTED, + _DP1_4, _YES, gpuDPSupportedVersions) && + caps.bSupportsTPS4; + return bGrantsPostLtRequest && !bTps4Supported; + } + + virtual void setPostLtAdjustRequestGranted(bool bGrantPostLtRequest); + virtual bool getIsPostLtAdjRequestInProgress(); + virtual TrainingPatternSelectType getTrainingPatternSelect(); + virtual bool setTrainingMultiLaneSet(NvU8 numLanes, + NvU8 *voltSwingSet, + NvU8 *preEmphasisSet); + + virtual AuxRetry::status setIgnoreMSATimingParamters(bool msaTimingParamIgnoreEn); + + virtual AuxRetry::status setLinkQualPatternSet(LinkQualityPatternType linkQualPattern, unsigned laneCount); + virtual AuxRetry::status setLinkQualLaneSet(unsigned lane, LinkQualityPatternType linkQualPattern); + + virtual AuxRetry::status setMessagingEnable(bool _uprequestEnable, bool _upstreamIsSource); + virtual AuxRetry::status setMultistreamLink(bool enable); + virtual AuxRetry::status setMultistreamHotplugMode(MultistreamHotplugMode notifyType); + + virtual bool parseTestRequestTraining(NvU8 * buffer /* 0x18-0x28 valid */); + void parseAutomatedTestRequest(bool testRequestPending); + + virtual bool parseTestRequestPhy(); + + virtual bool interruptCapabilitiesChanged() + { + return interrupts.rxCapChanged; + } + + virtual void clearInterruptCapabilitiesChanged() + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _RX_CAP_CHANGED, _YES, irqVector); + bus.write(NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + + virtual bool isPanelReplayErrorSet() + { + return interrupts.prErrorStatus; + } + + virtual void readPanelReplayError(); + virtual void clearPanelReplayError() + { + NvU8 irqVector = 0U; + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI1, + _PANEL_REPLAY_ERROR_STATUS, _YES, irqVector); + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI1, &irqVector, + sizeof irqVector); + } + + virtual bool getLinkStatusChanged() + { + return interrupts.linkStatusChanged; + } + + virtual void clearLinkStatusChanged() + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _LINK_STATUS_CHANGED, _YES, irqVector); + bus.write(NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + + virtual bool getHdmiLinkStatusChanged() + { + return interrupts.hdmiLinkStatusChanged; + } + + virtual void clearHdmiLinkStatusChanged() + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _HDMI_LINK_STATUS_CHANGED, _YES, irqVector); + bus.write(NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + + virtual bool getStreamStatusChanged() + { + return interrupts.streamStatusChanged; + } + + virtual void clearStreamStatusChanged() + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _STREAM_STATUS_CHANGED, _YES, irqVector); + bus.write(NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + + virtual bool getDpTunnelingIrq() + { + return interrupts.dpTunnelingIrq; + } + + virtual void clearDpTunnelingIrq() + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD20, _LINK_SERVICE_IRQ_VECTOR_ESI0, _DP_TUNNELING_IRQ, _YES, irqVector); + bus.write(NV_DPCD20_LINK_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + + virtual bool isLinkStatusValid(unsigned lanes); + virtual void refreshLinkStatus(); + virtual void setDirtyLinkStatus(bool dirty) + { + interrupts.laneStatusIntr.linkStatusDirtied = dirty; + } + + void parseAndReadInterruptsESI(); + + void readLTTPRLinkStatus(NvS32 rxIndex, NvU8 *buffer); + void resetIntrLaneStatus(); + + void fetchLinkStatusESI(); + void fetchLinkStatusLegacy(); + + virtual bool readTraining(NvU8* voltageSwingLane, NvU8* preemphasisLane, + NvU8* trainingScoreLane, NvU8* postCursor, + NvU8 activeLaneCount); + + virtual bool isLaneSettingsChanged(NvU8* oldVoltageSwingLane, + NvU8* newVoltageSwingLane, + NvU8* oldPreemphasisLane, + NvU8* newPreemphasisLane, + NvU8 activeLaneCount); + + void parseAndReadInterruptsLegacy(); + + void parseAndReadInterrupts() + { + if (caps.supportsESI) + parseAndReadInterruptsESI(); // DP 1.2 should use the new ESI region + else + parseAndReadInterruptsLegacy(); + + } + + virtual int getSinkCount() // DPCD offset 200 + { + return interrupts.sinkCount; + } + + // + // This was introduced as part of WAR for HP SDC Panel since their + // TCON sets DPCD 0x200 SINK_COUNT=0. It should never be called to + // set the SinkCount in other cases since SinkCount comes from DPCD. + // + virtual void setSinkCount(int sinkCount) + { + interrupts.sinkCount = sinkCount; + } + + virtual bool interruptContentProtection() + { + return interrupts.cpIRQ; + } + + virtual void clearInterruptContentProtection(); + + virtual bool intteruptMCCS() + { + return interrupts.mccsIRQ; + } + + virtual void clearInterruptMCCS(); + + virtual bool interruptDownReplyReady() + { + return interrupts.downRepMsgRdy; + } + + virtual bool interruptUpRequestReady() + { + return interrupts.upReqMsgRdy; + } + + virtual void clearInterruptDownReplyReady(); + virtual void clearInterruptUpRequestReady(); + + virtual bool getLaneStatusSymbolLock(int lane) + { + return interrupts.laneStatusIntr.laneStatus[lane].symbolLocked; + } + + virtual bool getLaneStatusClockRecoveryDone(int lane) + { + return interrupts.laneStatusIntr.laneStatus[lane].clockRecoveryDone; + } + + virtual bool getLaneStatusChannelEqualizationDone(int lane) + { + return interrupts.laneStatusIntr.laneStatus[lane].channelEqualizationDone; + } + + virtual bool getInterlaneAlignDone() // DPCD offset 204 + { + return interrupts.laneStatusIntr.interlaneAlignDone; + } + + virtual bool getDownStreamPortStatusChange() + { + return interrupts.laneStatusIntr.downstmPortChng; + } + + virtual bool getPendingTestRequestTraining() // DPCD offset 218 + { + return interrupts.testTraining.testRequestTraining; + } + + virtual bool getPendingAutomatedTestRequest() + { + return interrupts.automatedTestRequest; + } + + virtual bool getPendingTestRequestEdidRead() + { + return interrupts.testEdid.testRequestEdidRead; + } + + virtual bool getPendingTestRequestPhyCompliance() + { + return interrupts.testPhyCompliance.testRequestPhyCompliance; + } + + // DPCD offset 219, 220 + virtual void getTestRequestTraining(LinkRate & rate, unsigned & lanes) + { + rate = interrupts.testTraining.testRequestLinkRate; + lanes = interrupts.testTraining.testRequestLaneCount; + } + + // DPCD offset 248 + virtual LinkQualityPatternType getPhyTestPattern() + { + return interrupts.testPhyCompliance.phyTestPattern; + } + + // DPCD offset 250 - 259 + virtual void get80BitsCustomTestPattern(NvU8 *testPattern) + { + int i; + + for (i = 0; i < 10; i++) + { + testPattern[i] = interrupts.cstm80Bits[i]; + } + } + + virtual bool getBKSV(NvU8 *bKSV); + virtual bool getBCaps(BCaps &bCaps, NvU8 * rawByte); + virtual bool getHdcp22BCaps(BCaps &bCaps, NvU8 *rawByte); + virtual bool getBinfo(BInfo &bInfo); + virtual bool getRxStatus(const HDCPState &hdcpState, NvU8 *data); + + virtual AuxRetry::status setTestResponseChecksum(NvU8 checksum) + { + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + return bus.write(NV_DPCD_TEST_EDID_CHKSUM, &checksum, sizeof checksum); + } + + virtual AuxRetry::status setTestResponse(bool ack, bool edidChecksumWrite); + + // Message box encoding + virtual AuxRetry::status writeDownRequestMessageBox(NvU8 * data, size_t length) + { + // + // We can assume no message was sent if this fails. + // Reasoning: + // Sinks are not allowed to DEFER except on the first 16 byte write. + // If there isn't enough room for the 48 byte packet, that write + // will defer. + // + return bus.write(NV_DPCD_MBOX_DOWN_REQ, data, (unsigned)length); + } + + virtual size_t getDownRequestMessageBoxSize() + { + return DP_MESSAGEBOX_SIZE; + } + + virtual AuxRetry::status writeUpReplyMessageBox(NvU8 * data, size_t length) + { + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + // + // We can assume no message was sent if this fails. + // Reasoning: + // Sinks are not allowed to DEFER except on the first 16 byte write. + // If there isn't enough room for the 48 byte packet, that write + // will defer. + // + return bus.write(NV_DPCD_MBOX_UP_REP, data, (unsigned)length); + } + + virtual size_t getUpReplyMessageBoxSize() + { + return DP_MESSAGEBOX_SIZE; + } + + virtual AuxRetry::status readDownReplyMessageBox(NvU32 offset, NvU8 * data, size_t length) + { + // if (caps.revisionMajor <= 0) + // DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + DP_ASSERT(offset + length <= DP_MESSAGEBOX_SIZE); + + return bus.read(NV_DPCD_MBOX_DOWN_REP + offset, data, (unsigned)length); + } + + virtual size_t getDownReplyMessageBoxSize() + { + return DP_MESSAGEBOX_SIZE; + } + + virtual AuxRetry::status readUpRequestMessageBox(NvU32 offset, NvU8 * data, size_t length) + { + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + DP_ASSERT(offset + length <= DP_MESSAGEBOX_SIZE); + + return bus.read(NV_DPCD_MBOX_UP_REQ + offset, data, (unsigned)length); + } + + virtual size_t getUpRequestMessageBoxSize() + { + return DP_MESSAGEBOX_SIZE; + } + + virtual size_t getTransactionSize() + { + return bus.getDirect()->transactionSize(); + } + + virtual PowerState getPowerState(); + virtual bool setPowerState(PowerState newState); + virtual void payloadTableClearACT(); + virtual bool payloadWaitForACTReceived(); + virtual bool payloadAllocate(unsigned streamId, unsigned begin, unsigned count); + + void overrideMultiStreamCap(bool mstCapable) + { + caps.overrideToSST = !mstCapable; + } + + bool getMultiStreamCapOverride() + { + return caps.overrideToSST; + } + + bool getDpcdMultiStreamCap(void) + { + return caps.supportsMultistream; + } + + virtual void setGpuDPSupportedVersions(NvU32 _gpuDPSupportedVersions); + + void setGpuFECSupported(bool bSupportFEC) + { + bGpuFECSupported = bSupportFEC; + } + + void applyRegkeyOverrides(const DP_REGKEY_DATABASE& dpRegkeyDatabase); + + // To clear pending message {DOWN_REP/UP_REQ} and reply true if existed. + virtual bool clearPendingMsg(); + + virtual bool isMessagingEnabled(); + + virtual void setIndexedLinkrateEnabled(bool val) + { + bIndexedLinkrateEnabled = val; + } + + virtual bool isIndexedLinkrateEnabled() + { + return bIndexedLinkrateEnabled; + } + + virtual bool isIndexedLinkrateCapable() + { + return bIndexedLinkrateCapable; + } + + virtual NvU16 *getLinkRateTable(); + + virtual NvU32 getVideoFallbackSupported() + { + return caps.videoFallbackFormats; + } + + virtual bool getRawLinkRateTable(NvU8 *buffer); + + virtual void resetProtocolConverter() + { + NvU8 data = 0; + bus.write(NV_DPCD14_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data)); + bus.write(NV_DPCD14_PCON_FRL_LINK_CONFIG_2, &data, sizeof(data)); + } + + virtual bool setSourceControlMode(bool bEnableSourceControlMode, bool bEnableFRLMode); + + virtual bool checkPCONFrlReady(bool *bFrlReady); + + virtual bool setupPCONFrlLinkAssessment(NvU32 linkBwMask, + bool bEnableExtendLTMode = false, + bool bEnableConcurrentMode = false); + + virtual bool checkPCONFrlLinkStatus(NvU32 *frlRateMask); + virtual bool queryHdmiLinkStatus(bool *bLinkActive, bool *bLinkReady); + + virtual NvU32 restorePCONFrlLink(NvU32 linkBwMask, + bool bEnableExtendLTMode = false, + bool bEnableConcurrentMode = false); + + virtual void readPsrCapabilities(vesaPsrSinkCaps *caps) + { + dpMemCopy(caps, &this->caps.psrCaps, sizeof(vesaPsrSinkCaps)); + } + + virtual bool updatePsrConfiguration(vesaPsrConfig psrcfg); + virtual bool readPsrConfiguration(vesaPsrConfig *psrcfg); + + virtual bool readPsrState(vesaPsrState *psrState); + virtual bool readPsrDebugInfo(vesaPsrDebugStatus *psrDbgState); + + virtual bool writePsrErrorStatus(vesaPsrErrorStatus psrErr); + virtual bool readPsrErrorStatus(vesaPsrErrorStatus *psrErr); + + virtual bool writePsrEvtIndicator(vesaPsrEventIndicator psrEvt); + virtual bool readPsrEvtIndicator(vesaPsrEventIndicator *psrEvt); + + virtual bool readPrSinkDebugInfo(panelReplaySinkDebugInfo *prDbgInfo); + + virtual void configureDpTunnelBwAllocation(); + virtual bool getDpTunnelGranularityMultiplier(NvU8 &granularityMultiplier); + virtual TriState getDpTunnelBwRequestStatus(); + virtual bool setDpTunnelBwAllocation(bool bEnable); + + virtual bool isDpInTunnelingSupported() + { + return caps.dpInTunnelingCaps.bIsSupported; + } + + virtual void enableDpTunnelingBwAllocationSupport() + { + bEnableDpTunnelBwAllocationSupport = true; + } + + virtual bool isDpTunnelBwAllocationEnabled() + { + return bIsDpTunnelBwAllocationEnabled; + } + + bool getDpTunnelEstimatedBw(NvU8 &estimatedBw); + bool hasDpTunnelEstimatedBwChanged(); + bool hasDpTunnelBwAllocationCapabilityChanged(); + bool writeDpTunnelRequestedBw(NvU8 requestedBw); + bool clearDpTunnelingBwRequestStatus(); + bool clearDpTunnelingEstimatedBwStatus(); + bool clearDpTunnelingBwAllocationCapStatus(); + + }; + +} + +#endif //INCLUDED_DP_CONFIGCAPS_H diff --git a/src/common/displayport/inc/dp_connector.h b/src/common/displayport/inc/dp_connector.h new file mode 100644 index 0000000..9c3b344 --- /dev/null +++ b/src/common/displayport/inc/dp_connector.h @@ -0,0 +1,783 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_connector.h * +* This is the primary client interface. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_CONNECTOR_H +#define INCLUDED_DP_CONNECTOR_H + +#include "dp_auxdefs.h" +#include "dp_object.h" +#include "dp_mainlink.h" +#include "dp_auxbus.h" +#include "dp_address.h" +#include "dp_guid.h" +#include "dp_evoadapter.h" +#include "dp_auxbus.h" +#include "dp_auxretry.h" +#include "displayport.h" +#include "dp_vrr.h" +#include "../../modeset/timing/nvt_dsc_pps.h" +#include "ctrl/ctrl0073/ctrl0073dp.h" +#include "nvcfg_sdk.h" + +namespace DisplayPort +{ + class EvoInterface; + +#define SET_DP_IMP_ERROR(pErrorCode, errorCode) \ + if (pErrorCode && *pErrorCode == DP_IMP_ERROR_NONE) *pErrorCode = errorCode; + + typedef enum + { + DP_IMP_ERROR_NONE, + DP_IMP_ERROR_ZERO_VALUE_PARAMS, + DP_IMP_ERROR_AUDIO_BEYOND_48K, + DP_IMP_ERROR_DSC_SYNAPTICS_COLOR_FORMAT, + DP_IMP_ERROR_PPS_DSC_DUAL_FORCE, + DP_IMP_ERROR_DSC_PCON_FRL_BANDWIDTH, + DP_IMP_ERROR_DSC_PCON_HDMI2_BANDWIDTH, + DP_IMP_ERROR_DSC_LAST_HOP_BANDWIDTH, + DP_IMP_ERROR_INSUFFICIENT_BANDWIDTH, + DP_IMP_ERROR_INSUFFICIENT_BANDWIDTH_DSC, + DP_IMP_ERROR_INSUFFICIENT_BANDWIDTH_NO_DSC, + DP_IMP_ERROR_INSUFFICIENT_DP_TUNNELING_BANDWIDTH, + DP_IMP_ERROR_WATERMARK_BLANKING, + DP_IMP_ERROR_PPS_COLOR_FORMAT_NOT_SUPPORTED, + DP_IMP_ERROR_PPS_INVALID_HBLANK, + DP_IMP_ERROR_PPS_INVALID_BPC, + DP_IMP_ERROR_PPS_MAX_LINE_BUFFER_ERROR, + DP_IMP_ERROR_PPS_OVERALL_THROUGHPUT_ERROR, + DP_IMP_ERROR_PPS_DSC_SLICE_ERROR, + DP_IMP_ERROR_PPS_PPS_SLICE_COUNT_ERROR, + DP_IMP_ERROR_PPS_PPS_SLICE_HEIGHT_ERROR, + DP_IMP_ERROR_PPS_PPS_SLICE_WIDTH_ERROR, + DP_IMP_ERROR_PPS_INVALID_PEAK_THROUGHPUT, + DP_IMP_ERROR_PPS_MIN_SLICE_COUNT_ERROR, + DP_IMP_ERROR_PPS_GENERIC_ERROR, + } DP_IMP_ERROR; + + typedef enum + { + DP_TESTMESSAGE_STATUS_SUCCESS = 0, + DP_TESTMESSAGE_STATUS_ERROR = 0xDEADBEEF, + DP_TESTMESSAGE_STATUS_ERROR_INSUFFICIENT_INPUT_BUFFER = 0xDEADBEED, + DP_TESTMESSAGE_STATUS_ERROR_INVALID_PARAM = 0xDEADBEEC + // new error code should be here + } DP_TESTMESSAGE_STATUS; + + typedef enum + { + False = 0, + True = 1, + Indeterminate = 2 + } TriState; + + enum ConnectorType + { + connectorDisplayPort, + connectorHDMI, + connectorDVI, + connectorVGA + }; + + typedef struct portMap + { + NvU16 validMap; // port i is valid = bit i is high + NvU16 inputMap; // port i is input port = bit i is high && validMap bit i is high + NvU16 internalMap; // port i is internal = bit i is high && validMap bit i is high + } PortMap; + + enum ForceDsc + { + DSC_DEFAULT, + DSC_FORCE_ENABLE, + DSC_FORCE_DISABLE + }; + + struct DpModesetParams + { + unsigned headIndex; + ModesetInfo modesetInfo; + DP_COLORFORMAT colorFormat; + NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS msaparams; + + DpModesetParams() : headIndex(0), modesetInfo(), colorFormat(dpColorFormat_Unknown), msaparams() {} + + DpModesetParams(unsigned newHeadIndex, + ModesetInfo newModesetInfo, + DP_COLORFORMAT newColorFormat = dpColorFormat_Unknown) : + headIndex(newHeadIndex), + modesetInfo(newModesetInfo), + colorFormat(newColorFormat), + msaparams() {} + + DpModesetParams(unsigned newHeadIndex, + ModesetInfo *newModesetInfo, + DP_COLORFORMAT newColorFormat = dpColorFormat_Unknown) : + headIndex(newHeadIndex), + modesetInfo(*newModesetInfo), + colorFormat(newColorFormat), + msaparams() {} + + }; + + struct DscOutParams + { + unsigned PPS[DSC_MAX_PPS_SIZE_DWORD]; // Out - PPS SDP data + }; + + struct DscParams + { + bool bCheckWithDsc; // [IN] - Client telling DP Library to check with DSC. + ForceDsc forceDsc; // [IN] - Client telling DP Library to force enable/disable DSC + DSC_INFO::FORCED_DSC_PARAMS* forcedParams; // [IN] - Client telling DP Library to force certain DSC params. + bool bEnableDsc; // [OUT] - DP Library telling client that DSC is needed for this mode. + NvU32 sliceCountMask; // [OUT] - DP Library telling client what all slice counts can be used for the mode. + unsigned bitsPerPixelX16; // [IN/OUT] - Bits per pixel value multiplied by 16 + DscOutParams *pDscOutParams; // [OUT] - DSC parameters + + DscParams() : bCheckWithDsc(false), forceDsc(DSC_DEFAULT), forcedParams(NULL), bEnableDsc(false), sliceCountMask(0), bitsPerPixelX16(0), pDscOutParams(NULL) {} + }; + + class Group; + + struct DpLinkIsModePossibleParams + { + struct + { + Group * pTarget; + DpModesetParams *pModesetParams; + DP_IMP_ERROR *pErrorStatus; + DscParams *pDscParams; + } head[NV_MAX_HEADS]; + }; + + struct DpPreModesetParams + { + struct + { + Group *pTarget; + const DpModesetParams *pModesetParams; + } head[NV_MAX_HEADS]; + NvU32 headMask; + }; + + bool SetConfigSingleHeadMultiStreamMode(Group **targets, // Array of group pointers given for getting configured in single head multistream mode. + NvU32 displayIDs[], // Array of displayIDs given for getting configured in single head multistream mode. + NvU32 numStreams, // Number of streams driven out from single head. + DP_SINGLE_HEAD_MULTI_STREAM_MODE mode, // Configuration mode : SST or MST + bool bSetConfig, // Set or clear the configuration. + NvU8 vbiosPrimaryDispIdIndex = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, // VBIOS primary display ID index in displayIDs[] array + bool bEnableAudioOverRightPanel = false); // Audio MUX config : right or left panel + + // + // Device object + // This object represents a displayport device. Devices are not reported + // to clients until the EDID is already on file. + // + class Device : virtual public Object + { + public: + + virtual bool isPlugged() = 0; + virtual bool isVideoSink() = 0; // Invariant: won't change once reported + virtual bool isAudioSink() = 0; // Invariant + + virtual bool isLoop() = 0; // the address starts and ends at th same device + virtual bool isRedundant() = 0; + virtual bool isMustDisconnect() = 0; // Is this monitor's head being attach preventing + // us from enumerating other panels? + virtual bool isZombie() = 0; // Head is attached but we're not connected + virtual bool isCableOk() = 0; // cable can be not ok, whenwe saw hpd, device connected, but can't talk over aux + + virtual bool isLogical() = 0; // Is device connected to logical port + + virtual Address getTopologyAddress() const = 0; // Invariant + virtual bool isMultistream() = 0; + + virtual ConnectorType getConnectorType() = 0; // Invariant + + virtual unsigned getEDIDSize() const= 0; // Invariant + // Copies EDID into client buffer. Fails if the buffer is too small + virtual bool getEDID(char * buffer, unsigned size) const = 0; + + virtual unsigned getRawEDIDSize() const= 0; + // Copies RawEDID into client buffer. Fails if the buffer is too small + virtual bool getRawEDID(char * buffer, unsigned size) const = 0; + + virtual bool getPCONCaps(PCONCaps *pPCONCaps) = 0; + + virtual bool isFallbackEdid() = 0; // is the device edid a fallback one? + virtual GUID getGUID() const = 0; // Returns the GUID for the device + virtual bool isPowerSuspended() = 0; + virtual bool isActive() = 0; // Whether the device has a head attached to it + virtual TriState hdcpAvailableHop() = 0; // Whether the device support HDCP, + // regardless of whether the path leading to it supports HDCP. + virtual TriState hdcpAvailable() = 0; // Whether HDCP can be enabled. + // Note this checks that the entire path to the node support HDCP. + + virtual PortMap getPortMap() const = 0; + + virtual void setPanelPowerParams(bool bSinkPowerStateD0, bool bPanelPowerStateOn) = 0; + virtual Group * getOwningGroup() = 0; // Return the group this device is currently a member of + + virtual AuxBus * getRawAuxChannel() = 0; // No automatic retry on DEFER. See limitations in dp_auxbus.h + virtual AuxRetry * getAuxChannel() = 0; // User friendly AUX interface + + virtual Device * getParent() = 0; + virtual Device * getChild(unsigned portNumber) = 0; + + virtual void dpcdOverrides() = 0; // Apply DPCD overrides if required + + virtual bool getDpcdRevision(unsigned * major, unsigned * minor) = 0; // get the dpcd revision (maybe cached) + + virtual bool getSDPExtnForColorimetrySupported() = 0; + virtual bool getAsyncSDPSupported() = 0; + + virtual bool getPanelFwRevision(NvU16 *revision) = 0; + + virtual bool getIgnoreMSACap() = 0; + + virtual AuxRetry::status setIgnoreMSAEnable(bool msaTimingParamIgnoreEn) = 0; + + virtual NvBool isDSCPossible() = 0; + + virtual NvBool isDSCSupported() = 0; + + virtual NvBool isDSCDecompressionSupported() = 0; + + virtual NvBool isDSCPassThroughSupported() = 0; + + virtual DscCaps getDscCaps() = 0; + + virtual NvBool isDynamicPPSSupported() = 0; + + virtual NvBool isDynamicDscToggleSupported() = 0; + + // + // This function returns the device itself or its parent device that is doing + // DSC decompression for it. + // + virtual Device* getDevDoingDscDecompression() = 0; + virtual void markDeviceForDeletion() = 0; + + virtual bool getRawDscCaps(NvU8 *buffer, NvU32 bufferSize) = 0; + virtual bool setRawDscCaps(const NvU8 *buffer, NvU32 bufferSize) = 0; + virtual bool setValidatedRawDscCaps(NvU8 *buffer, NvU32 bufferSize) = 0; + virtual bool validatePPSData(DSCPPSDATA *pPps) = 0; + virtual bool readAndParseDSCCaps() = 0; + + // This interface is still nascent. Please don't use it. Read size limit is 16 bytes. + virtual AuxBus::status getDpcdData(unsigned offset, NvU8 * buffer, + unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason = NULL) = 0; + + virtual AuxBus::status setDpcdData(unsigned offset, NvU8 * buffer, + unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason = NULL) = 0; + + virtual AuxBus::status dscCrcControl(NvBool bEnable, gpuDscCrc *gpuData, sinkDscCrc *sinkData) = 0; + virtual AuxBus::status queryFecData(NvU8 *fecStatus, NvU16 **fecErrorCount, NvU32 flags) = 0; + + // + // The address send here will be right shifted by the library. DD should + // send the DDC address without the shift. + // Parameter bForceMot in both getI2cData and setI2cData is used to forfully set + // the MOT bit. It is needed for some special cases where the MOT bit shouldn't + // be set but some customers need it to please their monitors. + // + virtual bool getI2cData(unsigned offset, NvU8 * buffer, unsigned sizeRequested, unsigned * sizeCompleted, bool bForceMot = false) = 0; + virtual bool setI2cData(unsigned offset, NvU8 * buffer, unsigned sizeRequested, unsigned * sizeCompleted, bool bForceMot = false) = 0; + + // + // Calls VRR enablement implementation in dp_vrr.cpp. + // The enablement steps include interaction over DPAux in the vendor specific + // DPCD space. + // + virtual bool startVrrEnablement() = 0; // VF: calls actual enablement code. + virtual void resetVrrEnablement() = 0; // VF: resets enablement state. + virtual bool isVrrMonitorEnabled() = 0; // VF: gets monitor enablement state. + virtual bool isVrrDriverEnabled() = 0; // VF: gets driver enablement state. + + // If the sink support MSA override in MST environment. + virtual bool isMSAOverMSTCapable() = 0; + virtual bool isFakedMuxDevice() = 0; + virtual bool setPanelReplayConfig(panelReplayConfig prcfg) = 0; + virtual bool getPanelReplayConfig(panelReplayConfig *pPrcfg) = 0; + virtual bool isPanelReplaySupported() = 0; + virtual bool getPanelReplayStatus(PanelReplayStatus *pPrStatus) = 0; + virtual bool getDeviceSpecificData(NvU8 *oui, NvU8 *deviceIdString, + NvU8 *hwRevision, NvU8 *swMajorRevision, + NvU8 *swMinorRevision) = 0; + virtual bool getParentSpecificData(NvU8 *oui, NvU8 *deviceIdString, + NvU8 *hwRevision, NvU8 *swMajorRevision, + NvU8 *swMinorRevision) = 0; + + virtual bool setModeList(DisplayPort::DpModesetParams *pModeList, unsigned numModes) = 0; + + protected: + virtual ~Device() {} + + }; + + class Group : virtual public Object + { + public: + + // + // Routines for changing which panels are in a group. To move a stream to a new + // monitor without a modeset: + // remove(old_panel) + // insert(new_panel) + // The library will automatically switch over to the new configuration + // + virtual void insert(Device * dev) = 0; + virtual void remove(Device * dev) = 0; + + // + // group->enumDevices(0) - Get first element + // group->enumDevices(i) - Get next element + // + // for (Device * i = group->enumDevices(0); i; i = group->enumDevices(i)) + // + virtual Device * enumDevices(Device * previousDevice) = 0; + + virtual void destroy() = 0; // Destroy the group object + + // Toggles the encryption status for the stream. + virtual bool hdcpSetEncrypted(bool encrypted, NvU8 streamType = NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_TYPE_0, NvBool bForceClear = NV_FALSE, NvBool bAddStreamBack = NV_FALSE) = 0; + // Returns whether encryption is currently enabled. + virtual bool hdcpGetEncrypted() = 0; + + protected: + virtual ~Group() {} + + }; + + class Connector : virtual public Object + { + public: + // + // Normally the Connector::EventSink callsback can occur in response to the following + // 1. Timer callbacks + // 2. notifyLongPulse/notifyShortPulse + // + class EventSink + { + public: + virtual void newDevice(Device * dev) = 0; // New device appears in topology + virtual void lostDevice(Device * dev) = 0; // Lost device from topology + // Device object ceases to exist after this call + + virtual void notifyMustDisconnect(Group * grp) = 0; // Notification that an attached head is preventing + // us from completing detection of a newly connected device + + virtual void notifyDetectComplete() = 0; // Rolling call. Happens every time we've done another full + // detect on the topology + + virtual void bandwidthChangeNotification(Device * dev, bool isComplianceMode) = 0; // Available bandwidth to panel has changed, or panel has + // become a zombie + + virtual void notifyZombieStateChange(Device * dev, bool zombied) = 0; // Notification that zombie device was attached or dettached + virtual void notifyCableOkStateChange(Device * dev, bool cableOk) = 0; // Notification that device got cable state chagne (true - cable is good, false - cables is bad) + virtual void notifyHDCPCapDone(Device * dev, bool hdcpCap) = 0; // Notification that device's HDCP cap detection is done and get state change. + virtual void notifyMCCSEvent(Device * dev) = 0; // Notification that an MCCS event is coming + }; + + // Query current Device topology + virtual Device * enumDevices(Device * previousDevice) = 0; + + // Called before system enters an S3 state + virtual void pause() = 0; + + // Get maximum link configuration + virtual LinkConfiguration getMaxLinkConfig() = 0; + + // Get currently active link configuration + virtual LinkConfiguration getActiveLinkConfig() = 0; + + // Get Current link configuration + virtual void getCurrentLinkConfig(unsigned &laneCount, NvU64 &linkRate) = 0; + + // Get the clock calculation supported by the panel + virtual unsigned getPanelDataClockMultiplier() = 0; + + // Get the clock calculation supported by the GPU + virtual unsigned getGpuDataClockMultiplier() = 0; + + // Power Down the link + virtual void powerdownLink(bool bPowerdownPanel = false) = 0; + + // Resume from standby/initial boot notification + // The library is considered to start up in the suspended state. You must make this + // API call to enable the library. None of the library APIs are functional before + // this call. + // + // Returns the group representing the firmware panel if any is active. + // + // plugged Does RM report the root-port DisplayId in + // its plugged connector mask + // + // firmwareLinkHandsOff RM does NOT report the rootport displayId as active, + // but one of the active panels shares the same SOR. + // + // firmwareDPActive RM reports the rootport displayId in the active device list + // but display-driver hasn't yet performed its first modeset. + // + // isUefiSystem DD tells the library whether this system is a UEFI based + // one so that the library can get the current and max link config + // from RM/UEFI instead of trying to determine them on its own. + // + // firmwareHead Head being used to drive the firmware + // display, if firmwareDPActive is true. + // + // bFirmwareLinkUseMultistream + // Specifies whether the firmware connector is being driven in SST + // (false) or MST (true) mode. + // + // bDisableVbiosScratchRegisterUpdate + // Disables update of + // NV_PDISP_SOR_DP_SCRATCH_RAD/MISC scratch + // pad registers with last lit up display + // address. This address is used by VBIOS in + // case of driver unload or BSOD. + // + // bAllowMST Allow/Disallow Multi-streaming + // + virtual Group * resume(bool firmwareLinkHandsOff, + bool firmwareDPActive, + bool plugged, + bool isUefiSystem = false, + unsigned firmwareHead = 0, + bool bFirmwareLinkUseMultistream = false, + bool bDisableVbiosScratchRegisterUpdate = false, + bool bAllowMST = true) = 0; + + // The display-driver should enable hands off mode when attempting + // to use a shared resource (such as the SOR) in a non-DP configuration. + virtual void enableLinkHandsOff() = 0; + virtual void releaseLinkHandsOff() = 0; + + // Usage scenario: + // beginCompoundQuery() + // compoundQueryAttach(1280x1024) + // compoundQueryAttach(1920x1080) + // .endCompoundQuery() + // Will tell you if you have sufficient bandwidth to operate + // two panels at 1920x1080 and 1280x1024 assuming all currently + // attached panels are detached. + virtual void beginCompoundQuery(const bool bForceEnableFEC = false) = 0; + + // + // twoChannelAudioHz + // If you need 192khz stereo specify 192000 here. + // + // eightChannelAudioHz + // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + // + // pixelClockHz + // Requested pixel clock for the mode + // + // depth + // Requested color depth + // + virtual bool compoundQueryAttach(Group * target, + unsigned twoChannelAudioHz, + unsigned eightChannelAudioHz, + NvU64 pixelClockHz, + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth, + DP_IMP_ERROR *errorStatus = NULL) = 0; + + virtual bool compoundQueryAttach(Group * target, + const DpModesetParams &modesetParams, // Modeset info + DscParams *pDscParams, // DSC parameters + DP_IMP_ERROR *errorStatus = NULL) = 0; // Error Status code + + virtual bool endCompoundQuery() = 0; + + virtual bool dpLinkIsModePossible(const DpLinkIsModePossibleParams ¶ms) = 0; + + // Interface to indicate if clients need to perform a head shutdown before a modeset + virtual bool isHeadShutDownNeeded(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth) = 0; + + // Interface to indicate if clients need to perform a head shutdown before a modeset + virtual bool isHeadShutDownNeeded(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + ModesetInfo modesetInfo) = 0; // Modeset info relevant DSC data + + // + // Interface for clients to query library if the link is going to be trained during notifyAttachBegin(modeset). + // Note: This API is not intended to know if a link training will be performed during assessment of the link. + // This API is added to see if library can avoid link training during modeset so that client can take necessary decision + // to avoid a destructive modeset from UEFI mode at post to a GPU driver detected mode + // (thus prevent a visible glitch - i.e. Smooth Transition) + // + // How isLinkTrainingNeededForModeset API is different from isHeadShutDownNeeded API - + // In case of MST : we always shutdown head and link train if link is inactive, so both APIs return TRUE + // In case of SST : + // - If requested link config < active link config, we shutdown head to prevent overflow + // as head will still be driving at higher mode during link training to lower mode + // So both APIs return TRUE + // - If requested link config >= active link config, we don't need a head shutdown since + // SOR clocks can be changed by entering flush mode but will need to link train for mode change + // So isHeadShutDownNeeded returns FALSE and isLinkTrainingNeededForModeset returns TRUE + // + virtual bool isLinkTrainingNeededForModeset (ModesetInfo modesetInfo) = 0; + + // Notify library before/after modeset (update) + virtual bool notifyAttachBegin(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth) = 0; + + // Group of panels we're attaching to this head + virtual bool notifyAttachBegin(Group * target, const DpModesetParams &modesetParams) = 0; + + virtual void dpPreModeset(const DpPreModesetParams &modesetParams) = 0; + virtual void dpPostModeset(void) = 0; + + virtual void readRemoteHdcpCaps() = 0; + + // modeset might be cancelled when NAB failed + virtual void notifyAttachEnd(bool modesetCancelled) = 0; + + // + // Client needs to be notified about the SST<->MST transition, + // based on which null modeset will be sent. + // + virtual bool isLinkAwaitingTransition() = 0; + + virtual void resetLinkTrainingCounter() = 0; + + // Notify library before/after shutdown (update) + virtual void notifyDetachBegin(Group * target) = 0; + virtual void notifyDetachEnd(bool bKeepOdAlive = false) = 0; + + // Notify library to assess PCON link capability + virtual bool assessPCONLinkCapability(PCONLinkControl *params) = 0; + + // Notify library of hotplug/IRQ + virtual void notifyLongPulse(bool statusConnected) = 0; + virtual void notifyShortPulse() = 0; + + // Notify Library when ACPI initialization is done + virtual void notifyAcpiInitDone() = 0; + + // Notify Library when GPU capability changes. Usually because power event. + virtual void notifyGPUCapabilityChange() = 0; + virtual void notifyHBR2WAREngage() = 0; + + virtual bool dpUpdateDscStream(Group *target, NvU32 dscBpp) = 0; + + // Create a new Group. Note that if you wish to do a modeset but send the + // stream nowhere, you may do a modeset with an EMPTY group. This is expected + // to be the mechanism by which monitor faking is implemented. + virtual Group * newGroup() = 0; + + // Shutdown and the destroy the connector manager + virtual void destroy() = 0; + + virtual void createFakeMuxDevice(const NvU8 *buffer, NvU32 bufferSize) = 0; + virtual void deleteFakeMuxDevice() = 0; + virtual bool getRawDscCaps(NvU8 *buffer, NvU32 bufferSize) = 0; + + // + // OS Modeset Order mitigation causes the library to delay the reporting + // of new devices until they can be safely turned on. + // When enabled the library client will not see connection events until + // MustDisconnect messages are processed. + // + // Policy state should be set before the library is brought out of + // the suspended state. + // + // Important Note: This option changes the definition of QueryMode. + // Without OS order mitigation query mode assumes that you will + // deatach all of the heads from any zombied monitors *before* + // activating the new panel. If your driver cannot guarantee + // this invariant, then it must enable order mitigation. + // + virtual void setPolicyModesetOrderMitigation(bool enabled) = 0; + + // + // force LT at NAB for compliance test (Power Management) in Win10 RS2+ (WDDM 2.2) + // + // RS2 no longer sends an explicit call for setPanelPowerParams during the Resume. + // It does that by specifying an additional flag during the call to SetTimings. Due to + // this DP lib doesn't get chance to perform this transition from setPanelPowerParams + // and since it was already skipping LT in NAB/modeswitch, so LT get missed out on the + // compliance device during resume from S3/S4. + // + virtual void setPolicyForceLTAtNAB(bool enabled) = 0; + + // + // There are cases where OS does not detach heads from connector immediately after hot-unplug, + // on next hot-plug there is no guarantee that newly connected sink is capable to drive existing + // raster timings. Flush mode has following restriction + // When exiting flush mode S/W should ensure that the final + // link clock & lane count should be able to support existing raster. + // If we run into this situation and use flush mode then that will cause display engine to hang. + // This variable ensures to assess link safely in this situation and instead of using flush mode ask + // DD to detach/reattach heads for link training. + // + virtual void setPolicyAssessLinkSafely(bool enabled) = 0; + + // + // These interfaces are meant to be used *ONLY* for tool purposes. + // Clients should *NOT* use them for their own implementation. + // + // Sets the preferred link config which the tool has requested to train to. + // Each set call should be paired with a reset call. Also, preferred link configs won't persist across HPDs. + // It is advisable to do compound queries before setting a mode on a preferred config. + // Compound queries and notify attaches(link train) would use the preferred link config unless it is reset again. + // (not advisable to leave a preferred link config always ON). + // + virtual bool setPreferredLinkConfig(LinkConfiguration &lc, bool commit, + bool force = false, + LinkTrainingType forceTrainType = NORMAL_LINK_TRAINING, + bool forcePreferredLinkConfig = false) = 0; + + // + // Resets the preferred link config and lets the library go back to default LT policy. + // Should follow a previous set call. + // + virtual bool resetPreferredLinkConfig(bool force=false) = 0; + + // + // These interfaces are used by client to allow/disallow + // Multi-streaming. + // + // If connected sink is MST capable then: + // Client should detach all active MST video/audio streams before + // disallowing MST, vise-versa client should detach active SST + // stream before allowing MST. + // + virtual void setAllowMultiStreaming(bool bAllowMST) = 0; + virtual bool getAllowMultiStreaming(void) = 0; + + // This function reads sink MST capability from DPCD register(s). + virtual bool getSinkMultiStreamCap(void) = 0; + + // These interfaces are Deprecated, use setAllowMultiStreaming() + virtual void setDp11ProtocolForced() = 0; + virtual void resetDp11ProtocolForced() = 0; + virtual bool isDp11ProtocolForced() = 0; + + // Operates at the Link Level. Causes reauthentication of the entire link. + virtual void hdcpRenegotiate(NvU64 cN, NvU64 cKsv) = 0; + virtual bool getHDCPAbortCodesDP12(NvU32 &hdcpAbortCodesDP12) = 0; + + virtual bool getOuiSink(unsigned &ouiId, unsigned char * modelName, + size_t modelNameBufferSize, NvU8 &chipRevision) = 0; + + virtual bool getIgnoreSourceOuiHandshake() = 0; + virtual void setIgnoreSourceOuiHandshake(bool bIgnore) = 0; + + // + // The following function is to be used to get the capability bit that tells the client whether the connector + // can do multistream. + // + virtual bool isMultiStreamCapable() = 0; + virtual bool isFlushSupported() = 0; + virtual bool isStreamCloningEnabled() = 0; + virtual NvU32 maxLinkRateSupported() = 0; + virtual bool isFECSupported() = 0; + virtual bool isFECCapable() = 0; + + // Following APIs are for link test/config for DP Test Utility + virtual bool getTestPattern(NV0073_CTRL_DP_TESTPATTERN *pTestPattern) = 0; + virtual bool setTestPattern(NV0073_CTRL_DP_TESTPATTERN testPattern, + NvU8 laneMask, NV0073_CTRL_DP_CSTM cstm, + NvBool bIsHBR2, NvBool bSkipLaneDataOverride) = 0; + + // "data" is an array of NV0073_CTRL_MAX_LANES unsigned ints + virtual bool getLaneConfig(NvU32 *numLanes, NvU32 *data) = 0; + // "data" is an array of NV0073_CTRL_MAX_LANES unsigned ints + virtual bool setLaneConfig(NvU32 numLanes, NvU32 *data) = 0; + + virtual DP_TESTMESSAGE_STATUS sendDPTestMessage(void *pBuffer, + NvU32 requestSize, + NvU32 *pDpStatus) = 0; + + virtual DP_TESTMESSAGE_STATUS getStreamIDs(NvU32 *pStreamIDs, NvU32 *pCount) = 0; + // Function to configure power up/down for DP Main Link + virtual void configurePowerState(bool bPowerUp) = 0; + + virtual void readPsrCapabilities(vesaPsrSinkCaps *caps) = 0; + virtual bool updatePsrConfiguration(vesaPsrConfig config) = 0; + virtual bool readPsrConfiguration(vesaPsrConfig *config) = 0; + virtual bool readPsrState(vesaPsrState *psrState) = 0; + virtual bool readPsrDebugInfo(vesaPsrDebugStatus *psrDbgState) = 0; + virtual bool writePsrErrorStatus(vesaPsrErrorStatus psrErr) = 0; + virtual bool readPsrErrorStatus(vesaPsrErrorStatus *psrErr) = 0; + virtual bool writePsrEvtIndicator(vesaPsrEventIndicator psrErr) = 0; + virtual bool readPsrEvtIndicator(vesaPsrEventIndicator *psrErr) = 0; + virtual bool updatePsrLinkState(bool bTurnOnLink) = 0; + + virtual bool readPrSinkDebugInfo(panelReplaySinkDebugInfo *prDbgInfo) = 0; + virtual void enableDpTunnelingBwAllocationSupport() = 0; + virtual bool willLinkSupportModeSST(const LinkConfiguration &linkConfig, + const ModesetInfo &modesetInfo, + const DscParams *pDscParams = NULL) = 0; + + protected: + virtual ~Connector() {} + }; + + // + // Library routine to create primary port interface + // (Not intended to be used by display driver) + Connector * createConnector(MainLink * mainInterface, // DisplayDriver implemented MainLink object + AuxBus * auxInterface, // DisplayDriver implemented AuxRetry wrapper + Timer * timerInterface, // DisplayDriver provided Timer services + Connector::EventSink * sink); // Interface to notify DisplayDriver of events +} +#endif //INCLUDED_DP_CONNECTOR_H diff --git a/src/common/displayport/inc/dp_connectorimpl.h b/src/common/displayport/inc/dp_connectorimpl.h new file mode 100644 index 0000000..1b0c090 --- /dev/null +++ b/src/common/displayport/inc/dp_connectorimpl.h @@ -0,0 +1,811 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_connectorimpl.cpp * +* DP connector implementation * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_CONNECTORIMPL_H +#define INCLUDED_DP_CONNECTORIMPL_H + +#include "dp_internal.h" +#include "dp_guid.h" +#include "dp_connector.h" +#include "dp_configcaps.h" +#include "dp_list.h" +#include "dp_buffer.h" +#include "dp_auxdefs.h" +#include "dp_watermark.h" +#include "dp_edid.h" +#include "dp_discovery.h" +#include "dp_groupimpl.h" +#include "dp_deviceimpl.h" +#include "dp_qse.h" +#include "./dptestutil/dp_testmessage.h" + +// HDCP abort codes +#define HDCP_FLAGS_ABORT_DEVICE_REVOKED 0x00000800 // Abort due to a revoked device in DP1.2 topology +#define HDCP_FLAGS_ABORT_DEVICE_INVALID 0x00080000 // Abort due to an invalid device in DP1.2 topology +#define HDCP_FLAGS_ABORT_HOP_LIMIT_EXCEEDED 0x80000000 // Abort, number of devices in DP1.2 topology exceeds supported limit + +#define DP_TUNNEL_REQUEST_BW_MAX_TIME_MS (1000U) +#define DP_TUNNEL_REQUEST_BW_POLLING_INTERVAL_MS (10U) + +static inline unsigned getDataClockMultiplier(NvU64 linkRate, NvU64 laneCount) +{ + // + // To get the clock multiplier: + // - Convert the linkRate from Hz to 10kHz by dividing it by 10000. + // - Multiply the 10kHz linkRate by the laneCount. + // - Multiply by 10.0/8, to account for the 8b/10b encoding overhead in the DP protocol layer. + // + // Avoid floating point in the arithmetic in the calculation + // through the following conversions: + // linkRate/10000.0 * laneCount * 10.0/8 + // (linkRate * laneCount * 10) / (10000 * 8) + // (linkRate * laneCount) / (1000 * 8) + // + return (unsigned) DisplayPort::axb_div_c_64(linkRate, laneCount, 8000); +} + +namespace DisplayPort +{ + + class QSENonceGenerator; + + typedef enum + { + DP_TRANSPORT_MODE_INIT = 0, + DP_TRANSPORT_MODE_SINGLE_STREAM = 1, + DP_TRANSPORT_MODE_MULTI_STREAM = 2, + } DP_TRANSPORT_MODE; + + // Information required during compound query attach for MST + typedef struct _CompoundQueryAttachMSTInfo + { + ModesetInfo localModesetInfo; + LinkConfiguration lc; + } CompoundQueryAttachMSTInfo; + + struct ConnectorImpl : public Connector, DiscoveryManager::DiscoveryManagerEventSink, Timer::TimerCallback, MessageManager::MessageReceiver::MessageReceiverEventSink + { + // DPCD HAL Layer - We should use this in place of direct register accesses + DPCDHAL * hal; + + MainLink * main; // Main link controls + AuxBus * auxBus; + + TestMessage testMessage; // TestMessage instance + + Timer * timer; // OS provided timer services + Connector::EventSink * sink; // Event Sink + + // Cached Source OUI for restoring eDP OUI when powering up + unsigned cachedSourceOUI; + char cachedSourceModelName[NV_DPCD_SOURCE_DEV_ID_STRING__SIZE + 1]; + NvU8 cachedSourceChipRevision; + bool bOuiCached; + + unsigned ouiId; // Sink ouiId + unsigned char modelName[NV_DPCD_SOURCE_DEV_ID_STRING__SIZE + 1]; // Device Model-name + bool bIgnoreSrcOuiHandshake; // Skip writing source OUI + + LinkPolicy linkPolicy; + + bool linkGuessed; // True when link was "guessed" during HPD in TMDS mode + bool isLinkQuiesced; // True when link was set to quiet mode by TMDS modeset + + bool bNoLtDoneAfterHeadDetach; // True when head is disconnected in NDE + + bool isDP12AuthCap; // To tell whether this DP1.2 connector/ upmost device has the authentication Cap. + bool isHDCPAuthOn; // To tell whether this connector has the authentication on. + bool isHDCPReAuthPending; // To tell whether HDCP Auth is pending (at every stream addition and cleared at handler). + bool isHDCPAuthTriggered; // To tell whether HDCP Auth is triggered and only cleared at unplug/device detach for MST. + bool isHopLimitExceeded; // To tell the current topology is over limitation. + bool bIsDiscoveryDetectActive; // To tell device discovery is active ( isDiscoveryDetectComplete is also used as DD notify and not want to impacts that. ) + bool isDiscoveryDetectComplete; // To tell device discovery is finished. + bool bDeferNotifyLostDevice; // To tell if we should defer notify lost device event to client. + + HDCPValidateData hdcpValidateData; // Cache the HDCP ValidateData. + unsigned authRetries; // Retry counter for the authentication. + unsigned retryLT; // Retry counter for link training in case of link lost in PostLQA + unsigned hdcpCapsRetries; // Retry counter for Hdcp Caps read. + unsigned hdcpCpIrqRxStatusRetries; // Retry counter for CPIRQ RxStatus read. + bool bLTPhyRepeater; // Link Train PHY Repeaters between Source and Sink + bool bFromResumeToNAB; // True if from resume to NAB, WAR flag for unblocking GA1.5 + bool bAttachOnResume; // True if notifyLongPulse is called for resume (reboot/S3/S4) + bool bSkipAssessLinkForEDP; // Skip assessLink() for eDP. Assuming max is reachable. + bool bPConConnected; // HDMI2.1-Protocol Converter (Support SRC control mode) connected. + bool bSkipAssessLinkForPCon; // Skip assessLink() for PCON. DD will call assessFRLLink later. + bool bHdcpAuthOnlyOnDemand; // True if only initiate Hdcp authentication on demand and MST won't auto-trigger authenticate at device attach. + bool bHdcpStrmEncrEnblOnlyOnDemand; // True if only initiate Hdcp Stream Encryption Enable on demand and MST won't auto-trigger. + bool bReassessMaxLink; // Retry assessLink() if the first assessed link config is lower than the panel max config. + + bool constructorFailed; + + // + // OS Modeset Order mitigation causes the library to delay the reporting + // of new devices until they can be safely turned on. + // When enabled the library client will not see connection events until + // MustDisconnect messages are processed. + // + // Policy state should be set before the library is brought out of + // the suspended state. + // + bool policyModesetOrderMitigation; + + // + // force LT at NAB for compliance test (Power Management) in Win10 RS2+ (WDDM 2.2) + // + // RS2 no longer sends an explicit call for setPanelPowerParams during the Resume. + // It does that by specifying an additional flag during the call to SetTimings. Due to + // this DP lib doesn't get chance to perform this transition from setPanelPowerParams + // and since it was already skipping LT in NAB/modeswitch, so LT get missed out on the + // compliance device during resume from S3/S4. + // + bool policyForceLTAtNAB; + + // + // There are cases where OS does not detach heads from connector immediately after hot-unplug, + // on next hot-plug there is no guarantee that newly connected sink is capable to drive existing + // raster timings. Flush mode has following restriction + // When exiting flush mode S/W should ensure that the final + // link clock & lane count should be able to support existing raster. + // If we run into this situation and use flush mode then that will cause display engine to hang. + // This variable ensures to assess link safely in this situation: if newly connected sink is + // not capable to drive existing raster then just restore link configuration which was there + // before enabling flush mode, through fake link training. + // + bool policyAssessLinkSafely; + + bool bDisableVbiosScratchRegisterUpdate; + + // Only works when policyModesetOrderMitigation is true. + // To record if we should report newDevice. + bool modesetOrderMitigation; + + List deviceList; + List activeGroups; + LinkedList intransitionGroups; + LinkedList addStreamMSTIntransitionGroups; + LinkedList hdcpEnableTransitionGroups; + List inactiveGroups; + + LinkedList dscEnabledDevices; + + // Compound query + bool compoundQueryActive; + bool compoundQueryResult; + unsigned compoundQueryCount; + unsigned compoundQueryLocalLinkPBN; + NvU64 compoundQueryUsedTunnelingBw; + bool compoundQueryForceEnableFEC; + bool bDP2XPreferNonDSCForLowPClk; + + unsigned freeSlots; + unsigned maximumSlots; + int firstFreeSlot; + + // Multistream messaging + MessageManager * messageManager; + DiscoveryManager * discoveryManager; + + // Multistream timeslot management (on local link) + LinkConfiguration highestAssessedLC; // As of last assess, the highest possible link configuration + + LinkConfiguration activeLinkConfig; // Current link config. + + // this is the link config requested by a client. + // can be set and reset by the client for a given operation. + LinkConfiguration preferredLinkConfig; + bool forcePreferredLinkConfig; + + // + // Desired link configuration of single head multiple sst secondary connector. + // + LinkConfiguration oneHeadSSTSecPrefLnkCfg; + + // All possible link configs + LinkConfiguration * allPossibleLinkCfgs; + unsigned numPossibleLnkCfg; + + PCONLinkControl activePConLinkControl; + + // + // We're waiting for an MST<->SST transition + // The transition cannot be made without the DD + // disconnecting all heads. All devices are reported + // as must_disconnect. Once the last device blocking + // the transition is deattached from a head - we transition. + // + bool linkAwaitingTransition; + + // Unless we're awaiting transition this is identical to hal->getSupportsMultistream() + DP_TRANSPORT_MODE linkState; + + bool bAudioOverRightPanel; + + bool previousPlugged; + bool connectorActive; // Keep track of if connector is active to serve any IRQ + + Group * firmwareGroup; // The group used for book-keeping when we're in firmware mode + + List pendingEdidReads; // List of DevicePendingEDIDRead structures. + // This list tracks the currently in progress MST Edid Reads + + Device * lastDeviceSetForVbios; + + QSENonceGenerator * qseNonceGenerator; + + // Tells whether requests made by library to Downstream Device (i.e QSE messages sent to Branch Device) and RM + // (i.e KSV validation and Stream Validation requests sent by library to RM after getting QSE message reply from Downstream) + // during querying stream status is valid or not. + bool bValidQSERequest; + ListElement * message; // Outstanding QSE message pointer for which Stream Validation submission failed. + NvU8 * clientId; // ClientId of the group for which Stream Validation submission failed. + + // Flag which gets set when ACPI init is done. DD calls notifyAcpiInitDone to tell client that ACPI init is completed + // & client can now initiate DDC EDID read for a device which supports EDID through SBIOS + bool bAcpiInitDone; + + // Flag to check if the system is UEFI. + bool bIsUefiSystem; + + // Flag to check if LT should be skipped. + bool bSkipLt; + + // Flag to make sure that zombie gets triggred when a powerChange event happens + bool bMitigateZombie; + + // + // HP Valor QHD+ N15P-Q3 EDP needs 50ms delay after D3 + // during trainLinkOptimized to come up on S4 + // + bool bDelayAfterD3; + + // + // ASUS and Samsung monitors have inconsistent behavior when + // DPCD 0x600 updated to D3. Skip D3 only in case these monitors + // are driven in SST config + // + bool bKeepLinkAlive; + + // + // HP Trump dock link training is unstable during S4 resume, which causes + // system to hang. Keep the link alive to increase stability. + // See Bug 2109823. + // + bool bKeepLinkAliveMST; + + // Keep the link alive when connector is in SST + bool bKeepLinkAliveSST; + + // + // HTC Vive Link box is not happy when we power down the link + // during link training when there is no stream present. It requests + // for a link retraining pulse which is not required. + // WAR to address this - NV Bug# 1793084 + // + bool bKeepOptLinkAlive; + + // Keep both DP and FRL link alive to save time. + bool bKeepLinkAliveForPCON; + + // + // Remote HDCP DCPD access should be D0 but won't introduce extra Dx + // state toggle. Use the counter to avoid powerdownlink when HDCP probe. + // + unsigned pendingRemoteHdcpDetections; + + // + // ASUS PQ 321 tiled monitor sometimes loses link while assessing link + // or link training .So if we lower config from HBR2 to HBR and when + // we retrain the link , we see black screen. + // So WAR is to retry link training with same config for 3 times before + // lowering link config. NV Bug #1846925 + // + bool bNoFallbackInPostLQA; + + // + // Set to true when we do not want DSC to be limited + // to 16 BPP for multitile on Blackwell++ + // + bool bDisableDscMaxBppLimit; + + // + // Set to true when we want to force head shutdown + // when DSC mode or bpc is changed but LT is still same + // + bool bForceHeadShutdownOnModeTransition; + + // Flag to tell whether to send QSE after stream encryption on + bool bIsEncryptionQseValid; + + bool bReportDeviceLostBeforeNew; + bool bDisableSSC; + bool bEnableFastLT; + NvU32 maxLinkRateFromRegkey; + + // + // Latency(ms) to apply between link-train and FEC enable for bug + // 2561206. + // + NvU32 LT2FecLatencyMs; + + // On eDP, do not cache the source OUI if it reads 0. See bug 4793112 + bool bSkipZeroOuiCache; + + bool bForceHeadShutdownFromRegkey; + + bool bForceHeadShutdownPerMonitor; + + // + // Dual SST Partner connector object pointer + ConnectorImpl *pCoupledConnector; + + // Set to true when a DSC mode is requested. + bool bFECEnable; + + // Save link config before entering PSR. + LinkConfiguration psrLinkConfig; + + // + // Apply MST DSC caps WAR based on OUI ID of sink + // + bool bDscMstCapBug3143315; + + // + // Synaptics branch device doesn't support Virtual Peer Devices so DSC + // capability of downstream device should be decided based on device's own + // and its parent's DSC capability + // + bool bDscCapBasedOnParent; + + // + // MST device connnected to dock may issue IRQ for link lost. + // Send PowerDown path msg to suppress that. + // + bool bPowerDownPhyBeforeD3; + + // + // Reset the MSTM_CTRL registers on branch device irrespective of + // IRQ VECTOR register having stale message. Certain branch devices + // need to reset the topology before issuing new discovery commands + // as there can be case where previous is still in process and a + // possibility that clearPendingMessage() might not be able to catch + // the stale messages from previous discovery. + // + bool bForceClearPendingMsg; + NvU64 allocatedDpTunnelBw; + NvU64 allocatedDpTunnelBwShadow; + bool bForceDisableTunnelBwAllocation; + bool bDisableEffBppSST8b10b; + + Group *perHeadAttachedGroup[NV_MAX_HEADS]; + NvU32 inTransitionHeadMask; + + void sharedInit(); + ConnectorImpl(MainLink * main, AuxBus * auxBus, Timer * timer, Connector::EventSink * sink); + void setPolicyModesetOrderMitigation(bool enabled); + void setPolicyForceLTAtNAB(bool enabled); + void setPolicyAssessLinkSafely(bool enabled); + + void discoveryDetectComplete(); + void discoveryNewDevice(const DiscoveryManager::Device &device); + void discoveryLostDevice(const Address &address); + void processNewDevice(const DiscoveryManager::Device &device, + const Edid &edid, + bool isMultistream, + DwnStreamPortType portType, + DwnStreamPortAttribute portAttribute, + bool isCompliance = false); + + void applyEdidWARs(Edid &edid, DiscoveryManager::Device &device); + virtual void handleEdidWARs(Edid &edid, DiscoveryManager::Device &device){}; + void applyRegkeyOverrides(const DP_REGKEY_DATABASE &dpRegkeyDatabase); + + ResStatusNotifyMessage ResStatus; + + void messageProcessed(MessageManager::MessageReceiver * from); + + ~ConnectorImpl(); + + // + // Utility functions + // + virtual void hardwareWasReset(); + virtual LinkConfiguration getMaxLinkConfig(); + virtual LinkConfiguration getActiveLinkConfig(); + void powerdownLink(bool bPowerdownPanel = false); + LinkConfiguration initMaxLinkConfig(); + + GroupImpl * getActiveGroupForSST(); + bool detectSinkCountChange(); + + virtual bool handlePhyPatternRequest(); + virtual bool handleTestLinkTrainRequest(); + virtual void applyOuiWARs(); + bool linkUseMultistream() + { + return (linkState == DP_TRANSPORT_MODE_MULTI_STREAM); + } + + virtual void populateAllDpConfigs(); + virtual LinkRates* importDpLinkRates(); + + // + // Suspend resume API + // + virtual Group * resume(bool firmwareLinkHandsOff, + bool firmwareDPActive, + bool plugged, + bool isUefiSystem = false, + unsigned firmwareHead = 0, + bool bFirmwareLinkUseMultistream = false, + bool bDisableVbiosScratchRegisterUpdate = false, + bool bAllowMST = true); + virtual void pause(); + + virtual Device * enumDevices(Device * previousDevice) ; + + + virtual void beginCompoundQuery(const bool bForceEnableFEC = false) ; + virtual bool compoundQueryAttach(Group * target, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth, + DP_IMP_ERROR *errorStatus = NULL); + + virtual bool compoundQueryAttach(Group * target, + const DpModesetParams &modesetParams, // Modeset info + DscParams *pDscParams = NULL, // DSC parameters + DP_IMP_ERROR *pErrorCode = NULL); // Error Status code + virtual bool compoundQueryAttachTunneling(const DpModesetParams &modesetParams, + DscParams *pDscParams = NULL, + DP_IMP_ERROR *pErrorCode = NULL); + + virtual bool endCompoundQuery(); + + virtual bool dpLinkIsModePossible(const DpLinkIsModePossibleParams ¶ms); + + virtual bool compoundQueryAttachMST(Group * target, + const DpModesetParams &modesetParams, // Modeset info + DscParams *pDscParams = NULL, // DSC parameters + DP_IMP_ERROR *pErrorCode = NULL); // Error Status code + + virtual bool compoundQueryAttachMSTIsDscPossible + ( + Group * target, + const DpModesetParams &modesetParams, // Modeset info + DscParams *pDscParams = NULL // DSC parameters + ); + + // Calculate and Configure SW state based on DSC + virtual bool compoundQueryAttachMSTDsc + ( + Group * target, + const DpModesetParams &modesetParams, // Modeset info + CompoundQueryAttachMSTInfo * info, // local info to update for later use + DscParams *pDscParams = NULL, // DSC parameters + DP_IMP_ERROR *pErrorCode = NULL // Error Status code + ); + + // General part of CQA MST for DSC/non-DSC + virtual bool compoundQueryAttachMSTGeneric + ( + Group * target, + const DpModesetParams &modesetParams, // Modeset info + CompoundQueryAttachMSTInfo * info, // local info with updates for DSC + DscParams *pDscParams = NULL, // DSC parameters + DP_IMP_ERROR *pErrorCode = NULL // Error Status code + ); + + virtual bool compoundQueryAttachSST(Group * target, + const DpModesetParams &modesetParams, // Modeset info + DscParams *pDscParams = NULL, // DSC parameters + DP_IMP_ERROR *pErrorCode = NULL); // Error Status code + + virtual bool compoundQueryAttachSSTIsDscPossible(const DpModesetParams &modesetParams, + DscParams *pDscParams = NULL); + + virtual bool compoundQueryAttachSSTDsc(const DpModesetParams &modesetParams, + LinkConfiguration lc, + DscParams *pDscParams = NULL, + DP_IMP_ERROR *pErrorCode = NULL); + + + // + // Timer callback tags. + // (we pass the address of these variables as context to ::expired) + char tagFireEvents; + char tagDelayedLinkTrain; + char tagHDCPReauthentication; + char tagDelayedHdcpCapRead; + char tagDelayedHDCPCPIrqHandling; + char tagSendQseMessage; + char tagDpBwAllocationChanged; + char tagHDCPStreamEncrEnable; + + // + // Enable disable TMDS mode + // + virtual void enableLinkHandsOff(); + virtual void releaseLinkHandsOff(); + + // + // Timer callback for event management + // Uses: fireEvents() + virtual void expired(const void * tag); + + // Generate Events. + // useTimer specifies whether we fire the events on the timer + // context, or this context. + void fireEvents(); + + // returns the number of pending notifications. + void fireEventsInternal(); + + virtual bool isHeadShutDownNeeded(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + ModesetInfo modesetInfo); + + virtual bool isLinkTrainingNeededForModeset(ModesetInfo modesetInfo); + + virtual bool notifyAttachBegin(Group * target, // Group of panels we're attaching to this head + const DpModesetParams &modesetParams); + + bool needToEnableFEC(const DpPreModesetParams ¶ms); + + virtual void dpPreModeset(const DpPreModesetParams &modesetParams); + virtual void dpPostModeset(void); + + virtual bool isHeadShutDownNeeded(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth) ; + + virtual bool notifyAttachBegin(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth) ; + + virtual void readRemoteHdcpCaps(); + virtual void notifyAttachEnd(bool modesetCancelled); + virtual void notifyDetachBegin(Group * target); + virtual void notifyDetachEnd(bool bKeepOdAlive = false); + virtual bool willLinkSupportModeSST(const LinkConfiguration &linkConfig, + const ModesetInfo &modesetInfo, + const DscParams *pDscParams = NULL); + + bool performIeeeOuiHandshake(); + void setIgnoreSourceOuiHandshake(bool bIgnore); + bool getIgnoreSourceOuiHandshake(); + void forceLinkTraining(); + + bool updateDpTunnelBwAllocation(); + TriState requestDpTunnelBw(NvU8 requestedBw); + bool allocateDpTunnelBw(NvU64 bandwidth); + bool allocateMaxDpTunnelBw(); + NvU64 getMaxTunnelBw(); + void enableDpTunnelingBwAllocationSupport(); + + void assessLink(LinkTrainingType trainType = NORMAL_LINK_TRAINING); + + bool isLinkInD3(); + bool isLinkActive(); + bool isLinkLost(); + bool trainSingleHeadMultipleSSTLinkNotAlive(GroupImpl *pGroupAttached); + bool isLinkAwaitingTransition(); + bool isNoActiveStreamAndPowerdown(); + void incPendingRemoteHdcpDetection() + { + pendingRemoteHdcpDetections++; + } + void decPendingRemoteHdcpDetection() + { + if (pendingRemoteHdcpDetections > 0) + { + pendingRemoteHdcpDetections--; + } + } + bool trainLinkOptimized(LinkConfiguration lConfig); + bool trainLinkOptimizedSingleHeadMultipleSST(GroupImpl * group); + virtual bool getValidLowestLinkConfig(LinkConfiguration &lConfig, LinkConfiguration &lowestSelected, + ModesetInfo queryModesetInfo, const DscParams *pDscParams = NULL); + bool postLTAdjustment(const LinkConfiguration &, bool force); + void populateUpdatedLaneSettings(NvU8* voltageSwingLane, NvU8* preemphasisLane, NvU32 *data); + void populateDscCaps(DSC_INFO* dscInfo, DeviceImpl * dev, DSC_INFO::FORCED_DSC_PARAMS* forcedParams); + void populateDscGpuCaps(DSC_INFO* dscInfo); + void populateForcedDscParams(DSC_INFO* dscInfo, DSC_INFO::FORCED_DSC_PARAMS* forcedParams); + void populateDscSinkCaps(DSC_INFO* dscInfo, DeviceImpl * dev); + void populateDscBranchCaps(DSC_INFO* dscInfo, DeviceImpl * dev); + void populateDscModesetInfo(MODESET_INFO * pModesetInfo, const DpModesetParams * pModesetParams); + + virtual bool train(const LinkConfiguration &lConfig, bool force, LinkTrainingType trainType = NORMAL_LINK_TRAINING); + virtual bool validateLinkConfiguration(const LinkConfiguration &lConfig); + + virtual bool assessPCONLinkCapability(PCONLinkControl *params); + bool trainPCONFrlLink(PCONLinkControl *pConControl); + + // Set Device DSC state based on current DSC state of all active devices on this connector + bool setDeviceDscState(Device * dev, bool bEnableDsc); + + // the lowest level function(nearest to the hal) for the connector. + bool rawTrain(const LinkConfiguration &lConfig, bool force, LinkTrainingType linkTrainingType); + + virtual bool enableFlush(); + virtual bool beforeAddStream(GroupImpl * group, bool force=false, bool forFlushMode = false); + virtual void afterAddStream(GroupImpl * group); + virtual void beforeDeleteStream(GroupImpl * group, bool forFlushMode = false); + virtual void afterDeleteStream(GroupImpl * group); + virtual void disableFlush(bool test=false); + + bool beforeAddStreamMST(GroupImpl * group, bool force = false, bool forFlushMode = false); + + virtual bool checkIsModePossibleMST(GroupImpl * group); + + bool deleteAllVirtualChannels(); + void clearTimeslices(); + virtual void applyTimeslotWAR(unsigned &slot_count){}; + virtual bool allocateTimeslice(GroupImpl * targetGroup); + void freeTimeslice(GroupImpl * targetGroup); + void flushTimeslotsToHardware(); + void hdcpRenegotiate(NvU64 cN, NvU64 cKsv); + bool getHDCPAbortCodesDP12(NvU32 &hdcpAbortCodesDP12); + bool getOuiSink(unsigned &ouiId, unsigned char * modelName, size_t modelNameBufferSize, NvU8 &chipRevision); + bool hdcpValidateKsv(const NvU8 *ksv, NvU32 Size); + void cancelHdcpCallbacks(); + bool handleCPIRQ(); + void handleSSC(); + void handleMCCSIRQ(); + void handleDpTunnelingIrq(); + void handleHdmiLinkStatusChanged(); + void sortActiveGroups(bool ascending); + void handlePanelReplayError(); + + virtual void configInit(); + virtual DeviceImpl* findDeviceInList(const Address &address); + virtual void disconnectDeviceList(); + void notifyLongPulseInternal(bool statusConnected); + virtual void notifyLongPulse(bool status); + virtual void notifyShortPulse(); + virtual Group * newGroup(); + virtual void destroy(); + virtual void createFakeMuxDevice(const NvU8 *buffer, NvU32 bufferSize); + virtual void deleteFakeMuxDevice(); + virtual bool getRawDscCaps(NvU8 *buffer, NvU32 bufferSize); + virtual bool isMultiStreamCapable(); + virtual bool isFlushSupported(); + virtual bool isStreamCloningEnabled(); + virtual bool isFECSupported(); + virtual bool isFECCapable(); + virtual NvU32 maxLinkRateSupported(); + bool setPreferredLinkConfig(LinkConfiguration &lc, bool commit, + bool force = false, + LinkTrainingType trainType = NORMAL_LINK_TRAINING, + bool forcePreferredLinkConfig = false); + virtual bool resetPreferredLinkConfig(bool force = false); + virtual void setAllowMultiStreaming(bool bAllowMST); + virtual bool getAllowMultiStreaming(void); + virtual bool getSinkMultiStreamCap(void); + virtual void setDp11ProtocolForced(); + virtual void resetDp11ProtocolForced(); + virtual bool isDp11ProtocolForced(); + + bool isAcpiInitDone(); + virtual void notifyAcpiInitDone(); + Group * createFirmwareGroup(); + virtual void notifyGPUCapabilityChange(); + virtual void notifyHBR2WAREngage(); + bool dpUpdateDscStream(Group *target, NvU32 dscBpp); + + bool getTestPattern(NV0073_CTRL_DP_TESTPATTERN *testPattern); + bool setTestPattern(NV0073_CTRL_DP_TESTPATTERN testPattern, NvU8 laneMask, NV0073_CTRL_DP_CSTM cstm, NvBool bIsHBR2, NvBool bSkipLaneDataOverride = false); + bool getLaneConfig(NvU32 *numLanes, NvU32 *data); // "data" is an array of NV0073_CTRL_MAX_LANES unsigned ints + bool setLaneConfig(NvU32 numLanes, NvU32 *data); // "data" is an array of NV0073_CTRL_MAX_LANES unsigned ints + + virtual void setDisableDownspread(bool _bDisableDownspread) + { + return; + } + virtual bool getDownspreadDisabled() + { + return false; + } + + void getCurrentLinkConfig(unsigned &laneCount, NvU64 &linkRate); // CurrentLink Configuration + void getCurrentLinkConfigWithFEC(unsigned &laneCount, NvU64 &linkRate, bool &bFECEnabled); + unsigned getPanelDataClockMultiplier(); + unsigned getGpuDataClockMultiplier(); + void configurePowerState(bool bPowerUp); + virtual void readPsrCapabilities(vesaPsrSinkCaps *caps); + virtual bool updatePsrConfiguration(vesaPsrConfig config); + virtual bool readPsrConfiguration(vesaPsrConfig *config); + virtual bool readPsrDebugInfo(vesaPsrDebugStatus *psrDbgState); + virtual bool writePsrErrorStatus(vesaPsrErrorStatus psrErr); + virtual bool readPsrErrorStatus(vesaPsrErrorStatus *psrErr); + virtual bool writePsrEvtIndicator(vesaPsrEventIndicator psrErr); + virtual bool readPsrEvtIndicator(vesaPsrEventIndicator *psrErr); + virtual bool readPsrState(vesaPsrState *psrState); + virtual bool updatePsrLinkState(bool bTurnOnLink); + + virtual bool readPrSinkDebugInfo(panelReplaySinkDebugInfo *prDbgInfo); + + // for dp test utility. pBuffer is the request buffer of type DP_STATUS_REQUEST_xxxx + DP_TESTMESSAGE_STATUS sendDPTestMessage(void *pBuffer, + NvU32 requestSize, + NvU32 *pDpStatus); + + DP_TESTMESSAGE_STATUS getStreamIDs(NvU32 *pStreamIDs, NvU32 *pCount); // for dp test utility, called by DD + + // Reset link training counter for the active link configuration. + virtual void resetLinkTrainingCounter() + { + activeLinkConfig.setLTCounter(0); + } + }; + + // + // New devices do not get a DeviceImpl created until after + // the EDID read has completed. This object is used + // to track the necessary state. + // + struct DevicePendingEDIDRead : protected EdidReadMultistream::EdidReadMultistreamEventSink, public ListElement + { + ConnectorImpl * parent; + DiscoveryManager::Device device; + EdidReadMultistream reader; + + void mstEdidCompleted(EdidReadMultistream * from); + void mstEdidReadFailed(EdidReadMultistream * from); + + public: + DevicePendingEDIDRead(ConnectorImpl * _parent, MessageManager * manager, DiscoveryManager::Device dev) + : parent(_parent), device(dev), reader(_parent->timer, manager, this, dev.address) + { + } + }; +} + +#endif //INCLUDED_DP_CONNECTORIMPL_H diff --git a/src/common/displayport/inc/dp_crc.h b/src/common/displayport/inc/dp_crc.h new file mode 100644 index 0000000..27d5341 --- /dev/null +++ b/src/common/displayport/inc/dp_crc.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_crc.h * +* CRC Algorithms for the messaging subsystem. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_CRC_H +#define INCLUDED_DP_CRC_H + +#include "dp_bitstream.h" + +namespace DisplayPort +{ + unsigned dpCalculateHeaderCRC(BitStreamReader * reader); + unsigned dpCalculateBodyCRC(BitStreamReader * writer); +} + +#endif //INCLUDED_DP_CRC_H diff --git a/src/common/displayport/inc/dp_deviceimpl.h b/src/common/displayport/inc/dp_deviceimpl.h new file mode 100644 index 0000000..e56335f --- /dev/null +++ b/src/common/displayport/inc/dp_deviceimpl.h @@ -0,0 +1,556 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort*********************************\ +* * +* Module: dp_connector.cpp * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_DEVICEIMPL_H +#define INCLUDED_DP_DEVICEIMPL_H + +#include "dp_connector.h" +#include "dp_internal.h" +#include "dp_edid.h" +#include "dp_list.h" +#include "dp_auxdefs.h" +#include "dp_qse.h" +#include "dp_vrr.h" + +namespace DisplayPort +{ + #define PREDEFINED_DSC_MST_BPPX16 160; + #define MAX_DSC_COMPRESSION_BPPX16 128; + #define HDCP_BCAPS_DDC_OFFSET 0x40 + #define HDCP_BCAPS_DDC_EN_BIT 0x80 + #define HDCP_BCAPS_DP_EN_BIT 0x01 + #define HDCP_I2C_CLIENT_ADDR 0x74 + #define DEVICE_OUI_SIZE 3 + #define DSC_CAPS_SIZE 16 + + struct GroupImpl; + struct ConnectorImpl; + class DeviceHDCPDetection; + class VrrEnablement; + + struct DeviceImpl : public Device, + public AuxBus, + public ListElement + { + // + // Shadow state: This is the last state delivered to DD. + // see the ConnectorImpl::fireEvents() function for handling. + // + // State is double buffered to allow for allow announces + // to happen at the end of the state updates. We assume + // the DD can call any Connector API in response to the + // event. + // + struct Shadow + { + bool plugged; + bool zombie; + bool cableOk; + bool mustDisconnect; + bool hdcpCapDone; + LinkConfiguration highestAssessedLC; + } shadow; + + struct BandWidth + { + struct _Enum_Path + { + unsigned availableStreams, total, free, dfpLinkAvailable; + bool bPathFECCapable; + bool dataValid; // Is the cache valid? + bool availablePbnUpdated; + } enum_path; + + struct Compound_Query_State + { + unsigned totalTimeSlots; // Total timeslots available for allocation across this node + + unsigned timeslots_used_by_query; // Timeslots accounted for. + + unsigned bandwidthAllocatedForIndex; // Compound query is compromised of several + // qeuery attaches. These query attaches + // may have more than one device associated. + // this mask keeps track of which queryAttach's + // have already had the stream "rounted" past + // this node. + } compound_query_state; + + LinkConfiguration lastHopLinkConfig; // inferred from enum_path.total + + } bandwidth; + + enum rawEprState + { + software, + hardware + }; + + void resetCacheInferredLink(); + LinkConfiguration * inferLeafLink(unsigned * totalLinkSlots); + void inferPathConstraints(); + + + DeviceImpl * parent; // Upstream parent device + DeviceImpl * children[16]; + PortMap portMap; + + Edid rawEDID; + Edid processedEdid; + Edid ddcEdid; + DPCDHAL * hal; + GroupImpl * activeGroup; + ConnectorImpl * connector; + ConnectorType connectorType; + Address address; + GUID guid; + GUID guid2; + bool bVirtualPeerDevice; + NvU8 peerDevice; + NvU8 dpcdRevisionMajor; + NvU8 dpcdRevisionMinor; + bool multistream; + bool videoSink, audioSink; + bool plugged; + bool bApplyPclkWarBug4949066; + + AuxRetry friendlyAux; + bool payloadAllocated; // did the allocate payload go through? + + unsigned char BCAPS[HDCP_BCAPS_SIZE]; // Hdcp1.x bCaps raw data + unsigned char BKSV[HDCP_KSV_SIZE]; // Hdcp1.x bKsv raw data + unsigned char nvBCaps[HDCP_BCAPS_SIZE]; // NV generic HDCP BCAPS including 1.x, 2.2, ... + NvU64 maxTmdsClkRate; + + + bool isPendingNewDevice(); + bool isPendingLostDevice(); + bool isPendingZombie(); + bool isPendingCableOk(); + bool isPendingBandwidthChange(); + bool isPendingHDCPCapDone(); + + TriState isHDCPCap; + bool isDeviceHDCPDetectionAlive; + DeviceHDCPDetection * deviceHDCPDetection; + + PCONCaps pconCaps; + + // this flag signifies that the compliance device has requested EDID read test and may follow + // hidden and lazy zombie policy. + bool complianceDeviceEdidReadTest; + + bool lazyExitNow; + + // VRR Enablement structure + VrrEnablement *vrrEnablement; + + // DSC fields + NvU8 rawDscCaps[16]; + DscCaps dscCaps; + + // Panel replay Caps + PanelReplayCaps prCaps; + bool bIsFakedMuxDevice; + bool bIsPreviouslyFakedMuxDevice; + bool bisMarkedForDeletion; + bool bIgnoreMsaCap; + bool bIgnoreMsaCapCached; + + // + // Device doing the DSC decompression for this device. This could be device itself + // or its parent + // + DeviceImpl* devDoingDscDecompression; + // + // If DSC stream can be sent to this device or not. Either device itself or it's + // parent can do DSC decompression + // + bool bDSCPossible; + + bool bFECSupported; + bool bFECUncorrectedSupported; + bool bFECCorrectedSupported; + bool bFECBitSupported; + bool bFECParityBlockSupported; + bool bFECParitySupported; + + TriState bSdpExtCapable; + TriState bAsyncSDPCapable; + bool bMSAOverMSTCapable; + bool bDscPassThroughColorFormatWar; + + NvU64 maxModeBwRequired; + + DeviceImpl(DPCDHAL * hal, ConnectorImpl * connector, DeviceImpl * parent); + ~DeviceImpl(); + + virtual bool isCableOk(); + virtual bool isLogical(); + virtual bool isZombie(); + + virtual unsigned getEDIDSize() const; + virtual bool getEDID(char * buffer, unsigned size) const; + virtual unsigned getRawEDIDSize() const; + virtual bool getRawEDID(char * buffer, unsigned size) const; + + virtual bool getPCONCaps(PCONCaps *pPCONCaps); + + virtual Group * getOwningGroup() + { + return (Group *)activeGroup; + } + + bool isActive(); + + void applyOUIOverrides(); + + virtual Device * getParent() + { + return parent; + } + + virtual Device * getChild(unsigned portNumber) + { + return children[portNumber]; + } + + virtual bool isMultistream() // Sink supports multistream, remember we can have 1.1 targets + { + return address.size() != 0; + } + + virtual bool isNativeDPCD() + { + return (address.size() < 2); + } + + virtual bool isVideoSink() + { + return videoSink; + } + + virtual bool isAudioSink() + { + return audioSink; + } + + virtual bool isLoop() + { + // implementation is pending (bug 791059) + return false; + } + + virtual bool isRedundant() + { + // implementation is pending (bug 791059) + return false; + } + + virtual bool isMustDisconnect(); + + virtual bool isPlugged() + { + return plugged; + } + + virtual Address getTopologyAddress() const + { + return address; + } + + virtual ConnectorType getConnectorType() + { + return connectorType; + } + + virtual bool isFallbackEdid() + { + return this->processedEdid.isFallbackEdid(); + } + + virtual GUID getGUID() const + { + return guid; + } + + virtual PortMap getPortMap() const + { + return portMap; + } + + virtual TriState hdcpAvailableHop(); + virtual TriState hdcpAvailable(); + + virtual bool isMSAOverMSTCapable() + { + return bMSAOverMSTCapable; + } + + virtual bool isFakedMuxDevice(); + virtual bool isPreviouslyFakedMuxDevice(); + + bool bypassDpcdPowerOff() + { + return processedEdid.WARFlags.disableDpcdPowerOff; + } + + bool powerOnMonitorBeforeLt() + { + return processedEdid.WARFlags.powerOnBeforeLt; + } + + bool forceMaxLinkConfig() + { + return processedEdid.WARFlags.forceMaxLinkConfig; + } + + bool skipRedundantLt() + { + return processedEdid.WARFlags.skipRedundantLt; + } + + bool ignoreRedundantHotplug() + { + return processedEdid.WARFlags.ignoreRedundantHotplug; + } + + bool isOptimalLinkConfigOverridden() + { + return processedEdid.WARFlags.overrideOptimalLinkCfg; + } + + // Apply DPCD overrides if required + void dpcdOverrides(); + + bool getDpcdRevision(unsigned * major, unsigned * minor) + { + if (!major || !minor) + { + DP_ASSERT(0 && "Null pointers passed in."); + return false; + } + + *major = this->dpcdRevisionMajor; + *minor = this->dpcdRevisionMinor; + return true; + } + + bool getIgnoreMSACap(); + + AuxRetry::status setIgnoreMSAEnable(bool msaTimingParamIgnoreEn); + + bool isVirtualPeerDevice() + { + return bVirtualPeerDevice; + } + + bool isBranchDevice() + { + return !isVideoSink() && !isAudioSink(); + } + + bool isAtLeastVersion(unsigned major, unsigned minor) + { + if (dpcdRevisionMajor > major) + return true; + + if (dpcdRevisionMajor < major) + return false; + + return dpcdRevisionMinor >= minor; + } + + NvU64 getMaxModeBwRequired() + { + return maxModeBwRequired; + } + + virtual void queryGUID2(); + + virtual bool getSDPExtnForColorimetrySupported(); + virtual bool getAsyncSDPSupported(); + + virtual bool getPanelFwRevision(NvU16 *revision); + + virtual bool isPowerSuspended(); + + virtual void setPanelPowerParams(bool bSinkPowerStateD0, bool bPanelPowerStateOn); + + virtual status transaction(Action action, Type type, int address, + NvU8 * buffer, unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned *pNakReason= NULL, + NvU8 offset= 0, NvU8 nWriteTransactions= 0); + virtual unsigned transactionSize(); + // default behaviour is querying first three registers for every lane --> flags = 0x7 + virtual status fecTransaction(NvU8 *fecStatus, NvU16 **fecErrorCount, NvU32 flags = NV_DP_FEC_FLAGS_SELECT_ALL); + virtual AuxBus * getRawAuxChannel() { return this; } + virtual AuxRetry * getAuxChannel() { return &friendlyAux; } + virtual AuxBus::status getDpcdData(unsigned offset, NvU8 * buffer, + unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason=NULL); + virtual AuxBus::status setDpcdData(unsigned offset, NvU8 * buffer, + unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason=NULL); + virtual AuxBus::status queryFecData(NvU8 *fecStatus, NvU16 **fecErrorCount, NvU32 flags); + + virtual DscCaps getDscCaps(); + + // + // This function returns the device itself or its parent device that is doing + // DSC decompression for it. + // + virtual Device* getDevDoingDscDecompression(); + virtual void markDeviceForDeletion() {bisMarkedForDeletion = true;}; + virtual bool isMarkedForDeletion() {return bisMarkedForDeletion;}; + virtual bool getRawDscCaps(NvU8 *buffer, NvU32 bufferSize); + virtual bool setRawDscCaps(const NvU8 *buffer, NvU32 bufferSize); + virtual bool setValidatedRawDscCaps(NvU8 *buffer, NvU32 bufferSize); + virtual bool validatePPSData(DSCPPSDATA *pPps); + + virtual AuxBus::status dscCrcControl(NvBool bEnable, gpuDscCrc *dataGpu, sinkDscCrc *dataSink); + + // + // Parameter bForceMot in both getI2cData and setI2cData is used to forfully set + // the MOT bit. It is needed for some special cases where the MOT bit shouldn't + // be set but some customers need it to please their monitors. + // + virtual bool getI2cData(unsigned offset, NvU8 * buffer, unsigned sizeRequested, unsigned * sizeCompleted, bool bForceMot = false); + virtual bool setI2cData(unsigned offset, NvU8 * buffer, unsigned sizeRequested, unsigned * sizeCompleted, bool bForceMot = false); + virtual bool getRawEpr(unsigned * totalEpr, unsigned * freeEpr, rawEprState eprState); + + void switchToComplianceFallback(); + + // VRR Display Enablement Functions + bool startVrrEnablement(void); + void resetVrrEnablement(void); + bool isVrrMonitorEnabled(void); + bool isVrrDriverEnabled(void); + + // Panel replay related functions + bool isPanelReplaySupported(void); + void getPanelReplayCaps(void); + bool setPanelReplayConfig(panelReplayConfig prcfg); + bool getPanelReplayConfig(panelReplayConfig *pPrcfg); + bool getPanelReplayStatus(PanelReplayStatus *pPrStatus); + + NvBool getDSCSupport(); + bool getFECSupport(); + NvBool isDSCPassThroughSupported(); + NvBool isDynamicPPSSupported(); + NvBool isDynamicDscToggleSupported(); + NvBool isDSCSupported(); + NvBool isDSCDecompressionSupported(); + NvBool isDSCPossible(); + bool isFECSupported(); + bool readAndParseDSCCaps(); + bool readAndParseBranchSpecificDSCCaps(); + bool parseDscCaps(const NvU8 *buffer, NvU32 bufferSize); + bool parseBranchSpecificDscCaps(const NvU8 *buffer, NvU32 bufferSize); + bool setDscEnable(bool enable); + bool setDscEnableDPToHDMIPCON(bool bDscEnable, bool bEnablePassThroughForPCON); + bool getDscEnable(bool *pEnable); + unsigned getDscVersionMajor(); + unsigned getDscVersionMinor(); + unsigned getDscRcBufferSize(); + unsigned getDscRcBufferBlockSize(); + unsigned getDscMaxSlicesPerSink(); + unsigned getDscLineBufferBitDepth(); + NvBool isDscBlockPredictionSupported(); + unsigned getDscMaxBitsPerPixel(); + NvBool isDscRgbSupported(); + NvBool isDscYCbCr444Supported(); + NvBool isDscYCbCrSimple422Supported(); + NvBool isDscYCbCr422NativeSupported(); + NvBool isDscYCbCr420NativeSupported(); + unsigned getDscPeakThroughputMode0(); + unsigned getDscPeakThroughputModel(); + unsigned getDscMaxSliceWidth(); + unsigned getDscDecoderColorDepthSupportMask(); + void setDscDecompressionDevice(bool bDscCapBasedOnParent); + virtual bool getDeviceSpecificData(NvU8 *oui, NvU8 *deviceIdString, + NvU8 *hwRevision, NvU8 *swMajorRevision, + NvU8 *swMinorRevision); + virtual bool getParentSpecificData(NvU8 *oui, NvU8 *deviceIdString, + NvU8 *hwRevision, NvU8 *swMajorRevision, + NvU8 *swMinorRevision); + + virtual bool setModeList(DisplayPort::DpModesetParams *pModeList, unsigned numModes); + }; + class DeviceHDCPDetection : public Object, MessageManager::Message::MessageEventSink, Timer::TimerCallback + { + DeviceImpl* parent; + RemoteDpcdReadMessage remoteBKSVReadMessage; + RemoteDpcdReadMessage remoteBCapsReadMessage; + RemoteDpcdReadMessage remote22BCapsReadMessage; + MessageManager * messageManager; // For transmit and receive + Timer * timer; + bool bksvReadCompleted; + bool bCapsReadCompleted; + bool isValidBKSV; + bool isBCapsHDCP; + unsigned retriesRemoteBKSVReadMessage; + unsigned retriesRemoteBCapsReadMessage; + unsigned retriesRemote22BCapsReadMessage; + bool retryRemoteBKSVReadMessage; + bool retryRemoteBCapsReadMessage; + bool retryRemote22BCapsReadMessage; + bool bBKSVReadMessagePending; + bool bBCapsReadMessagePending; + + public: + + DeviceHDCPDetection(DeviceImpl * parent, MessageManager * messageManager, Timer * timer) + : bksvReadCompleted(false),bCapsReadCompleted(false),isValidBKSV(false), + isBCapsHDCP(false), retriesRemoteBKSVReadMessage(0), retriesRemoteBCapsReadMessage(0), + retriesRemote22BCapsReadMessage(0), retryRemoteBKSVReadMessage(false), + retryRemoteBCapsReadMessage(false), retryRemote22BCapsReadMessage(false), + bBKSVReadMessagePending(false), bBCapsReadMessagePending(false) + + { + this->parent = parent; + this->messageManager = messageManager; + this->timer = timer; + } + + ~DeviceHDCPDetection(); + void expired(const void * tag); + void start(); + void waivePendingHDCPCapDoneNotification(); + + bool hdcpValidateKsv(const NvU8 *ksv, NvU32 Size); + void handleRemoteDpcdReadDownReply(MessageManager::Message * from); + void messageFailed(MessageManager::Message * from, NakData * nakData); + void messageCompleted(MessageManager::Message * from); + }; +} + +#endif //INCLUDED_DP_DEVICEIMPL_H + diff --git a/src/common/displayport/inc/dp_discovery.h b/src/common/displayport/inc/dp_discovery.h new file mode 100644 index 0000000..c950136 --- /dev/null +++ b/src/common/displayport/inc/dp_discovery.h @@ -0,0 +1,329 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_discovery.h * +* Class definition for discovery manager. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_DISCOVERY_H +#define INCLUDED_DP_DISCOVERY_H + +#include "dp_address.h" +#include "dp_list.h" +#include "dp_messages.h" +#include "dp_messagecodings.h" + +namespace DisplayPort +{ + class DiscoveryManager : virtual public Object + { + public: + struct Device + { + Address address; // direct topology address + + bool legacy; // legacy (NON DP) device emulated on this port + bool branch; // DP 1.2 style branching device + PeerDevice peerDevice; // connector type of the device on this port + unsigned dpcdRevisionMajor; + unsigned dpcdRevisionMinor; + GUID peerGuid; // device guid + unsigned SDPStreams; // maximum number of audio streams supported + unsigned SDPStreamSinks; // number of outputs to select from + bool dirty; // got updates for the same device + PortMap portMap; + bool videoSink; // Should be true when a video sink is supported + NvU64 maxTmdsClkRate; + + Device():peerDevice(None),SDPStreams(0),SDPStreamSinks(0),dirty(false),videoSink(false),maxTmdsClkRate(0) + { + portMap.validMap = portMap.inputMap = portMap.internalMap = 0; + } + + ~Device(){} + + }; + + struct ReceiverSink : + virtual public Object, + public MessageManager::MessageReceiver::MessageReceiverEventSink + { + DiscoveryManager * parent; + + // will handle CSN (up_req) and generate a up_reply for it. + virtual void messageProcessed(MessageManager::MessageReceiver * from); + void handleCSN(MessageManager::MessageReceiver * from); + + ReceiverSink(DiscoveryManager * parent) + :parent(parent) + {} + + virtual ~ReceiverSink() + {} + }; + + // This will account for upreplies and their failures/retries. + struct CsnUpReplyContainer : ListElement, Timer::TimerCallback, MessageManager::Message::MessageEventSink + { + struct CsnUpReply: public GenericUpReplyMessage + { + CsnUpReplyContainer * container; + + CsnUpReply(CsnUpReplyContainer * container, const Address & target) + : GenericUpReplyMessage(target, 0x2), container(container) + {} + + ~CsnUpReply() + {} + + }; + + DiscoveryManager * parent; + CsnUpReply upReplyMessage; + unsigned delayInUsec; + unsigned retries; + Address target; + + virtual void messageFailed(MessageManager::Message * from, NakData * nakData) + { + // if reason of failure is not timeout or defer; just forget trying again. + if (!(nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + messageCompleted(from); + return; + } + + // queue a callback to reset and send again + queueUpReply(); + return; + } + + virtual void messageCompleted(MessageManager::Message * from) + { + // don't delete now. Queue callback to delete later + retries = 0; + parent->timer->queueCallback(this, "CSNF", 5000); + } + + void queueUpReply() + { + parent->timer->queueCallback(this, "CSNF", delayInUsec/1000); + } + + void postUpReply() + { + upReplyMessage.set(target); + parent->messageManager->postReply(&this->upReplyMessage, this); + } + + virtual void expired(const void * tag) + { + if (retries) + retries--; + + if (retries) + postUpReply(); + else + { + // enough retries. wrap up. + delete this; + } + } + + CsnUpReplyContainer(DiscoveryManager * parent) + :parent(parent), upReplyMessage(this, target), delayInUsec(200000), retries(4), target(Address(0)) + {} + + virtual ~CsnUpReplyContainer() + { + // remove self from queue and delete + // cancel all pending callbacks + parent->timer->cancelCallbacks(this); + parent->pendingCsnUpReplies.remove(this); + } + + }; + + ReceiverSink receiverSink; + + ConnStatusNotifyMessage connectionStatusNotifyProcessor; + + GUIDBuilder guidBuilder; + + List pendingCsnUpReplies; + + public: + + struct DiscoveryManagerEventSink + { + virtual void discoveryDetectComplete() = 0; // reply to processDetect + virtual void discoveryNewDevice(const DiscoveryManager::Device & device) = 0; // these can go out anytime + virtual void discoveryLostDevice(const Address & address) = 0; + }; + + enum { + maximumTopologyNodes = 128 + }; + + Device currentDevices[maximumTopologyNodes]; + unsigned currentDevicesCount; + + Device * findDevice(const Address & address); + Device * findDevice(GUID & guid); + void addDevice(const Device & device); + void removeDevice(Device * device); + void removeDeviceTree(const Address & prefix); + Device * findChildDeviceForBranchWithGuid(GUID guid, unsigned port, Address & childAddr); + + // + // This is responsible for a "complete" detection of a sink. Specifically using remote dpcd reads and writes + // + struct SinkDetection : MessageManager::Message::MessageEventSink, ListElement, Timer::TimerCallback + { + Device device; + Address address; + RemoteDpcdWriteMessage remoteDpcdWriteMessage; + RemoteDpcdReadMessage remoteDpcdReadMessage; + PowerUpPhyMessage powerUpPhyMessage; + LinkAddressMessage linkAddressMessage; + DiscoveryManager * parent; + bool completed; + unsigned retriesRemoteDpcdWriteMessage; + bool retryRemoteDpcdWriteMessage; + unsigned retriesRemoteDpcdReadMessage; + bool retryRemoteDpcdReadMessage; + unsigned retriesLinkAddressMessage; + bool retryLinkAddressMessage; + + bool bFromCSN; + + SinkDetection(DiscoveryManager * parent, const Device & device, bool bFromCSN) + : device(device), address(device.address), parent(parent), completed(false), + retriesRemoteDpcdWriteMessage(0), retryRemoteDpcdWriteMessage(false), + retriesRemoteDpcdReadMessage(0), retryRemoteDpcdReadMessage(false), + retriesLinkAddressMessage(0), retryLinkAddressMessage(false), + bFromCSN(bFromCSN) + {} + + ~SinkDetection(); + void expired(const void * tag); + void start(); + + void detectCompleted(bool passed); + void messageFailed(MessageManager::Message * from, NakData * nakData); + void handleRemoteDpcdReadDownReply(); + void handleRemoteDpcdWriteDownReply(); + void handleLinkAddressDownReply(); + + void messageCompleted(MessageManager::Message * from); + + }; + + // + // This object represents an address in some stage of detection + // + struct BranchDetection : MessageManager::Message::MessageEventSink, ListElement, Timer::TimerCallback + { + Device parentDevice; + Address address; + LinkAddressMessage::Result child[16]; + unsigned childCount; + + LinkAddressMessage linkAddressMessage; + RemoteDpcdWriteMessage remoteDpcdWriteMessage; + + DiscoveryManager * parent; + bool completed; + bool retryLinkAddressMessage; + unsigned retriesLinkAddressMessage; + unsigned retriesRemoteDpcdWriteMessage; + bool retryRemoteDpcdWriteMessage; + + BranchDetection(DiscoveryManager * parent, const Device & device) + : parentDevice(device), address(parentDevice.address), + parent(parent), completed(false), + retryLinkAddressMessage(false), retriesLinkAddressMessage(0), + retriesRemoteDpcdWriteMessage(0), retryRemoteDpcdWriteMessage(false) + {} + + void expired(const void * tag); + void start(); + ~BranchDetection(); + + void detectCompleted(bool present); + void messageFailed(MessageManager::Message * from, NakData * nakData) ; + void handleLinkAddressDownReply(); + void handleRemoteDpcdReadDownReply(); + void messageCompleted(MessageManager::Message * from); + }; + + void detect(const Address & address); + void detectBranch(Device device); + void detectSink(Device newDevice, bool bFromCSN); + +public: + + List outstandingBranchDetections; + List outstandingSinkDetections; + DiscoveryManagerEventSink * sink; // To call NotifyDetectComplete() + MessageManager * messageManager; // For transmit and receive + Timer * timer; + DPCDHAL * hal; + + DiscoveryManager(MessageManager * messageManager, DiscoveryManagerEventSink * sink, Timer * timer, DPCDHAL * hal) + : receiverSink(this), + connectionStatusNotifyProcessor(&receiverSink), + guidBuilder(timer, 0x10DE9070), + currentDevicesCount(0), + sink(sink), + messageManager(messageManager), + timer(timer), + hal(hal) + { + + // + // Register to filter all the upmessages. We want to know when + // connection status notify events are on their way. + // + messageManager->registerReceiver(&connectionStatusNotifyProcessor); + } + + ~DiscoveryManager() + { + while (!this->outstandingBranchDetections.isEmpty()) + delete this->outstandingBranchDetections.front(); + + while (!this->outstandingSinkDetections.isEmpty()) + delete this->outstandingSinkDetections.front(); + + while (!this->pendingCsnUpReplies.isEmpty()) + delete this->pendingCsnUpReplies.front(); + } + + void notifyLongPulse(bool status); + + }; +} +#endif //INCLUDED_DP_DISCOVERY_H diff --git a/src/common/displayport/inc/dp_edid.h b/src/common/displayport/inc/dp_edid.h new file mode 100644 index 0000000..61b841c --- /dev/null +++ b/src/common/displayport/inc/dp_edid.h @@ -0,0 +1,308 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_edid.h * +* reading EDID from SST/MST Device * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_EDID_H +#define INCLUDED_DP_EDID_H + +#include "dp_buffer.h" +#include "dp_auxbus.h" +#include "dp_address.h" +#include "dp_messages.h" +#include "dp_messagecodings.h" +#include "dp_timer.h" + +namespace DisplayPort +{ + class Edid; + + // + // Shared utility object for MST/SST edid reading. + // This object handles the retry, CRC validating, + // identification of EDID length, DDC ping, etc. + // + // It's designed as an asynchronous state machine + // because of the way MST EDID reads are built. + // + class EdidAssembler + { + public: + EdidAssembler(Edid * const edid, bool bPatchCrc = false); + + // + // returns false - when existing data in Edid is invalid + // returns seg - segment from which to read next block + // returns offset - offset within block from which to start reading next block + // + bool readNextRequest(NvU8 & seg, NvU8 & offset); + + // returns false when Edid read is completed + void postReply(const Buffer & buffer, unsigned sizeCompleted, bool success); + void postReply(unsigned char * data, unsigned sizeCompleted, bool success); + + // returns true when it read all the required blocks + bool readIsComplete(); + void reset(); + private: + Edid * edid; + Stream stream; + + NvU8 oldBlockChecksum; + unsigned blocksRead; + unsigned totalBlockCnt; + unsigned retriesCount; + bool bPatchCrc; + }; + + // + // EDID + // + class Edid + { + public: + Edid(); + ~Edid(); + + Buffer * getBuffer() const { return &buffer; } + NvU8 getFirstPageChecksum(); // Get checksum byte + NvU8 getLastPageChecksum(); // Get checksum byte for last block + + bool verifyCRC(); + unsigned getEdidVersion(); + unsigned getBlockCount(); + const char * getName() const; + unsigned getEdidSize() const; + bool isChecksumValid() const; + bool isJunkEdid() const; + bool isFallbackEdid() const; + void swap(Edid & right); + void applyEdidWorkArounds(NvU32 warFlag, const DpMonitorDenylistData *pDenylistData); + void patchCrc(); + void setForcedEdidChecksum(bool set) + { + this->forcedCheckSum = set; + } + + void setFallbackFlag(bool set) + { + this->fallbackEdid = set; + } + + void setPatchedChecksum(bool set) + { + this->patchedChecksum = set; + } + + bool isPatchedChecksum() const + { + return this->patchedChecksum; + } + + bool isValidHeader() const; + + unsigned getManufId() const + { + if (buffer.getLength() < 0xa) + return 0; + + return ((buffer.data[0x9] << 8) | (buffer.data[0x8])); + } + + unsigned getProductId() const + { + if (buffer.getLength() < 0xc) + return 0; + + return ((buffer.data[0xb] << 8) | (buffer.data[0xa])); + } + + unsigned getYearWeek() const + { + if (buffer.getLength() < 0x12) + return 0; + + return ((buffer.data[0x11] << 8) | (buffer.data[0x10])); + } + + typedef struct + { + bool extensionCountDisabled; + bool dataForced; + bool disableDpcdPowerOff; + bool forceMaxLinkConfig; + bool powerOnBeforeLt; + bool skipRedundantLt; + bool skipCableBWCheck; + bool overrideOptimalLinkCfg; + bool overrideMaxLaneCount; + bool ignoreRedundantHotplug; + bool delayAfterD3; + bool keepLinkAlive; + bool useLegacyAddress; + bool bIgnoreDscCap; // Ignore DSC even if sink reports DSC capability + bool bDisableDownspread; + bool bForceHeadShutdown; + bool bDisableDscMaxBppLimit; + bool bForceHeadShutdownOnModeTransition; + bool bDP2XPreferNonDSCForLowPClk; + }_WARFlags; + + _WARFlags WARFlags; + + typedef struct + { + unsigned maxLaneCount; // Max lane count value to override + unsigned maxLaneAtHighRate; // Max lane count supported at HBR + unsigned maxLaneAtLowRate; // Max lane count supported at RBR + unsigned optimalLinkRate; // Optimal link rate value to override + unsigned optimalLaneCount; // Optimal lane count value to override + }_WARData; + + _WARData WARData; + + void resetData() + { + buffer.reset(); + checkSumValid = false; + forcedCheckSum = false; + fallbackEdid = false; + // clear the WARFlags + _WARFlags temp = {0}; + WARFlags = temp; + } + + bool operator== (const Edid & other) + { + return (buffer == other.buffer); + } + + bool operator!= (const Edid & other) + { + return !(buffer == other.buffer); + } + + private: + void validateCheckSum(); + + mutable Buffer buffer; + bool checkSumValid; + bool forcedCheckSum; + bool fallbackEdid; + bool patchedChecksum; + }; + + // + // SST EDID Read API + // + bool EdidReadSST(Edid & edid, AuxBus * aux, Timer * timer, bool pendingTestRequestEdidRead = false, bool bBypassAssembler = false, MainLink *main = NULL); + + enum EDID_DDC + { + EDID_DDC_NONE = 0x00, + EDID_DDC_ADR0 = 0xA0, + EDID_DDC_ADR1 = 0xA2, + EDID_DDC_ADR2 = 0xA6, + EDID_SEG_SELECTOR_OFFSET = 0x60, + }; + EDID_DDC sstDDCPing(AuxBus & dpAux); + + // + // MST EDID Read API + // + + class EdidReadMultistream : public Object, protected MessageManager::Message::MessageEventSink, Timer::TimerCallback + { + public: + class EdidReadMultistreamEventSink // Connector will inherit from this + { + public: + virtual void mstEdidCompleted(EdidReadMultistream * from) = 0; + virtual void mstEdidReadFailed(EdidReadMultistream * from) = 0; + }; + + EdidReadMultistream(Timer * timer, MessageManager * manager, EdidReadMultistream::EdidReadMultistreamEventSink * sink, Address topologyAddress) + : topologyAddress(topologyAddress), manager(manager), edidReaderManager(&edid), ddcIndex(0), + retries(0), timer(timer), sink(sink) + { + startReadingEdid(); + } + + Edid edid; + Address topologyAddress; + ~EdidReadMultistream(); + + private: + void startReadingEdid(); + + MessageManager * manager; + RemoteI2cReadMessage remoteI2cRead; + EdidAssembler edidReaderManager; // come up another word besides edidReaderManager eg Manager + NvU8 DDCAddress; + NvU8 ddcIndex; + unsigned retries; + Timer * timer; + + void readNextBlock(NvU8 seg, NvU8 offset); + void failedToReadEdid(); + void expired(const void * tag); + + EdidReadMultistreamEventSink * sink; + + virtual void messageFailed(MessageManager::Message * from, NakData * nakData); + virtual void messageCompleted(MessageManager::Message * from); + void edidAttemptDone(bool succeeded); + }; + + // + // Useful defines + // + enum + { + EDID_BLOCK_SIZE = 0x80, + EDID_SEGMENT_SIZE = 2*EDID_BLOCK_SIZE, + EDID_POLICY_BLOCK_READ_MAX_RETRY_COUNT = 3, + // DID EDID CTS v1.3 d12 currently outlines that Source shall support up to 16 blocks of EDID data. + EDID_MAX_BLOCK_COUNT = 16, + }; + + static const NvU8 ddcAddrList[] = {EDID_DDC_ADR0, EDID_DDC_ADR1, EDID_DDC_ADR2}; + const NvU8 ddcAddrListSize = sizeof(ddcAddrList)/sizeof(NvU8); + + // HDMI 1.4 Section 8.5: HDMI Sink can have up to 100ms to get EDID ready. + const NvU8 EDID_READ_RETRY_TIMEOUT_MS = 100; + const NvU8 EDID_MAX_AUX_RETRIES = 10; + const NvU8 EDID_AUX_WAIT_TIME = 1; + NvU8 getEDIDBlockChecksum(const Buffer &); + + void makeEdidFallback(Edid & edid, NvU32 fallbackFormatSupported = 0); + void makeEdidFallbackVGA(Edid & edid); + +} + +#endif //INCLUDED_DP_EDID_H diff --git a/src/common/displayport/inc/dp_evoadapter.h b/src/common/displayport/inc/dp_evoadapter.h new file mode 100644 index 0000000..80f81af --- /dev/null +++ b/src/common/displayport/inc/dp_evoadapter.h @@ -0,0 +1,446 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* List **************************************\ +* * +* Module: dp_evoadapter.h * +* Interface for low level access to the aux bus. * +* This is the synchronous version of the interface. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_EVOADAPTER_H +#define INCLUDED_DP_EVOADAPTER_H + +#include "dp_timer.h" +#include "dp_auxbus.h" +#include "dp_mainlink.h" +#include "dp_wardatabase.h" +#include "dp_auxdefs.h" +#include "dp_regkeydatabase.h" + +#include +#include +#include + +#define HDCP_DUMMY_CN (0x1) +#define HDCP_DUMMY_CKSV (0xFFFFF) + +namespace DisplayPort +{ + class EvoInterface + { + public: + // + // IOCTL access to RM class DISPLAY_COMMON and NV50_DISPLAY + // + virtual NvU32 rmControl0073(NvU32 command, void * params, NvU32 paramSize) = 0; + virtual NvU32 rmControl5070(NvU32 command, void * params, NvU32 paramSize) = 0; + + virtual bool getMaxLinkConfigFromUefi(NvU8 &linkRate, NvU8 &laneCount) + { + linkRate = 0; laneCount = 0; + return true; + } + + // + // Call to tell DD that linkTraining will be performed. + // Required when head is attached & we enter in flush mode GPUs. + // Required to enable/disable Audio. + // + // Derived classes that override these functions must call down to + // DisplayPort::EvoInterface::pre/postLinkTraining() to inherit this + // implementation. + // + virtual void preLinkTraining(NvU32 head) + { + } + virtual void postLinkTraining(NvU32 head) + { + } + + virtual NvU32 getSubdeviceIndex() = 0; + virtual NvU32 getDisplayId() = 0; + virtual NvU32 getSorIndex() = 0; + virtual NvU32 getLinkIndex() = 0; // Link A = 0, Link B = 1 + // + // Query the value of a registry key. Implementations should return 0 + // if the regkey is not set. + // + virtual NvU32 getRegkeyValue(const char *key) + { + return 0; + } + virtual NvU32 monitorDenylistInfo(NvU32 manufId, NvU32 productId, DpMonitorDenylistData *pDenylistData) + { + return 0; + } + + virtual bool isInbandStereoSignalingSupported() + { + return false; + } + }; + + MainLink * MakeEvoMainLink(EvoInterface * provider, Timer * timer); + AuxBus * MakeEvoAuxBus(EvoInterface * provider, Timer * timer); + + class EvoAuxBus : public AuxBus + { + public: + EvoAuxBus(EvoInterface * provider, Timer * timer) + : provider(provider), + timer(timer), + displayId(provider->getDisplayId()), + subdeviceIndex(provider->getSubdeviceIndex()), + devicePlugged(false) + { + } + + virtual status transaction(Action action, Type type, int address, NvU8 * buffer, + unsigned sizeRequested, unsigned * sizeCompleted, + unsigned * pNakReason = NULL, + NvU8 offset = 0, NvU8 nWriteTransactions = 0); + virtual unsigned transactionSize(); + virtual void setDevicePlugged(bool); + + private: + EvoInterface * provider; + Timer * timer; + NvU32 displayId; + NvU32 subdeviceIndex; + bool devicePlugged; + }; + + class EvoMainLink : public MainLink + { + private: + NvU32 _maxLinkRateSupportedGpu; + NvU32 _maxLinkRateSupportedDfp; + bool _hasIncreasedWatermarkLimits; + bool _hasMultistream; + bool _isPC2Disabled; + bool _isEDP; + + // + // Bit mask for GPU supported DP versions. + // Defines the same as NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS.dpVersionsSupported + // + NvU32 _gpuSupportedDpVersions; + + bool _isStreamCloningEnabled; + bool _needForceRmEdid; + bool _skipPowerdownEDPPanelWhenHeadDetach; + bool _isDscDisabledByRegkey; + bool _isMstDisabledByRegkey; + bool _isFECSupported; + bool _useDfpMaxLinkRateCaps; + bool _applyLinkBwOverrideWarRegVal; + bool _isDynamicMuxCapable; + bool _enableMSAOverrideOverMST; + bool _isLTPhyRepeaterSupported; + bool _isMSTPCONCapsReadDisabled; + bool _isDownspreadSupported; + bool _bAvoidHBR3; + bool _bAvoidHBR3DisabledByRegkey; + bool _bIsDpTunnelingHwBugWarEnabled; + // + // LTTPR count reported by RM, it might not be the same with DPLib probe + // For example, some Intel LTTPR might not be ready to response 0xF0000 probe + // done by RM, but when DPLib checks the same DPCD offsets it responses + // properly. This will cause serious LT problem. + // + unsigned _rmPhyRepeaterCount; + + struct DSC + { + bool isDscSupported; + unsigned encoderColorFormatMask; + unsigned lineBufferSizeKB; + unsigned rateBufferSizeKB; + unsigned bitsPerPixelPrecision; + unsigned maxNumHztSlices; + unsigned lineBufferBitDepth; + }_DSC; + NV0073_CTRL_SPECIFIC_HDCP_CTRL_PARAMS paramsHdcpCtrl; + void initializeRegkeyDatabase(); + void applyRegkeyOverrides(); + + protected: + EvoInterface * provider; + Timer * timer; + + NvU32 displayId; + NvU32 subdeviceIndex; + unsigned allHeadMask; + + NV0073_CTRL_DFP_GET_INFO_PARAMS dfpParams; + NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS dpParams; + + bool _isDownspreadDisabledByRegkey; + + public: + EvoMainLink(EvoInterface * provider, Timer * timer); + + virtual bool hasIncreasedWatermarkLimits() + { + return _hasIncreasedWatermarkLimits; + } + + virtual bool hasMultistream() + { + return _hasMultistream; + } + + virtual bool isPC2Disabled() + { + return _isPC2Disabled; + } + + virtual NvU32 getGpuDpSupportedVersions() + { + return _gpuSupportedDpVersions; + } + virtual bool isFECSupported() + { + return _isFECSupported; + } + + virtual bool isStreamCloningEnabled() + { + return _isStreamCloningEnabled; + } + + virtual NvU32 maxLinkRateSupported() + { + // + // For cases where RM asks dplib to honor the maxLinkRate limit defined in DCB, always use + // this as the limit. Regkey has no meaning in this case. + // In other cases, based on regkey either honor the dcb limit or the max link rate for the + // specific GPU architecture. This is needed to avoid regressions on existing chips. + // + if ((_applyLinkBwOverrideWarRegVal || _useDfpMaxLinkRateCaps) && + (_maxLinkRateSupportedDfp < _maxLinkRateSupportedGpu)) + { + return (LINK_RATE_TO_DATA_RATE_8B_10B(_maxLinkRateSupportedDfp)); + } + return (LINK_RATE_TO_DATA_RATE_8B_10B(_maxLinkRateSupportedGpu)); + } + + virtual bool isForceRmEdidRequired() + { + return _needForceRmEdid; + } + + virtual bool fetchEdidByRmCtrl(NvU8* edidBuffer, NvU32 bufferSize); + virtual bool applyEdidOverrideByRmCtrl(NvU8* edidBuffer, NvU32 bufferSize); + + virtual bool isDynamicMuxCapable() + { + return _isDynamicMuxCapable; + } + + virtual bool isInternalPanelDynamicMuxCapable() + { + return (_isDynamicMuxCapable && _isEDP); + } + + virtual bool isDownspreadSupported() + { + return _isDownspreadSupported; + } + + virtual bool isAvoidHBR3WAREnabled() + { + return _bAvoidHBR3 && !_bAvoidHBR3DisabledByRegkey; + } + virtual bool isDpTunnelingHwBugWarEnabled() + { + return _bIsDpTunnelingHwBugWarEnabled; + } + // Get GPU DSC capabilities + virtual void getDscCaps(bool *pbDscSupported, + unsigned *pEncoderColorFormatMask, + unsigned *pLineBufferSizeKB, + unsigned *pRateBufferSizeKB, + unsigned *pBitsPerPixelPrecision, + unsigned *pMaxNumHztSlices, + unsigned *pLineBufferBitDepth) + { + if (pbDscSupported) + { + *pbDscSupported = _DSC.isDscSupported; + } + + if (pEncoderColorFormatMask) + { + *pEncoderColorFormatMask = _DSC.encoderColorFormatMask; + } + + if (pLineBufferSizeKB) + { + *pLineBufferSizeKB = _DSC.lineBufferSizeKB; + } + + if (pRateBufferSizeKB) + { + *pRateBufferSizeKB = _DSC.rateBufferSizeKB; + } + + if (pBitsPerPixelPrecision) + { + *pBitsPerPixelPrecision = _DSC.bitsPerPixelPrecision; + } + + if (pMaxNumHztSlices) + { + *pMaxNumHztSlices = _DSC.maxNumHztSlices; + } + + if (pLineBufferBitDepth) + { + *pLineBufferBitDepth = _DSC.lineBufferBitDepth; + } + } + + virtual NvU32 getRootDisplayId() + { + return this->displayId; + } + + virtual bool isLttprSupported() + { + return this->_isLTPhyRepeaterSupported; + } + + EvoInterface * getProvider() + { + return this->provider; + } + + // Return the current mux state. Returns false if device is not mux capable + bool getDynamicMuxState(NvU32 *muxState); + + virtual bool physicalLayerSetTestPattern(PatternInfo * patternInfo); + virtual void preLinkTraining(NvU32 head); + virtual void postLinkTraining(NvU32 head); + virtual NvU32 getRegkeyValue(const char *key); + virtual const DP_REGKEY_DATABASE& getRegkeyDatabase(); + virtual NvU32 getSorIndex(); + virtual bool isInbandStereoSignalingSupported(); + virtual bool train(const LinkConfiguration & link, bool force, LinkTrainingType linkTrainingType, + LinkConfiguration *retLink, bool bSkipLt = false, bool isPostLtAdjRequestGranted = false, + unsigned phyRepeaterCount = 0); + virtual bool retrieveRingBuffer(NvU8 dpRingBuffertype, NvU32 numRecords); + virtual void getLinkConfig(unsigned & laneCount, NvU64 & linkRate); + void getLinkConfigWithFEC(unsigned & laneCount, NvU64 & linkRate, bool &bFECEnabled); + virtual bool getMaxLinkConfigFromUefi(NvU8 &linkRate, NvU8 &laneCount); + virtual bool setDpMSAParameters(bool bStereoEnable, const NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS &msaparams); + virtual bool setDpStereoMSAParameters(bool bStereoEnable, const NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS &msaparams); + bool setFlushMode(); + void clearFlushMode(unsigned headMask, bool testMode=false); + + virtual bool dscCrcTransaction(NvBool bEnable, gpuDscCrc *data, NvU16 *headIndex); + + void triggerACT(); + void configureAndTriggerECF(NvU64 ecf, NvBool bForceClearEcf = NV_FALSE, NvBool bAddStreamBack = NV_FALSE); // This function program as well as trigger ECF on branch devices. + virtual void disableAlternateScramblerReset(); + void configureHDCPDisableAuthentication(); + void configureHDCPAbortAuthentication(AbortAuthReason abortAuthReason); + bool setStreamType(unsigned streamIndex, NvU8 streamType, bool * bNeedReNegotiate); + void configureHDCPValidateLink(HDCPValidateData &hdcpValidateData, NvU64 cN = HDCP_DUMMY_CN, NvU64 cKsv = HDCP_DUMMY_CKSV); + void forwardPendingKsvListReady(NvBool bKsvListReady); + void configureHDCPRenegotiate(NvU64 cN = HDCP_DUMMY_CN, NvU64 cKsv = HDCP_DUMMY_CKSV, bool bForceReAuth = false, + bool bRxIDMsgPending = false); + void configureHDCPGetHDCPState(HDCPState &hdcpState); + bool rmUpdateDynamicDfpCache(NvU32 headIndex, RmDfpCache * dfpCache, NvBool bResetDfp); + + virtual NvU32 headToStream(NvU32 head, bool bSidebandMessageSupported, DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY); + + void configureSingleStream(NvU32 head, + NvU32 hBlankSym, + NvU32 vBlankSym, + bool bEnhancedFraming, + NvU32 tuSize, + NvU32 waterMark, + DP_COLORFORMAT colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamId = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, + DP_SINGLE_HEAD_MULTI_STREAM_MODE singleHeadMultistreamMode = DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE, + bool bEnableAudioOverRightPanel = false, + bool bEnable2Head1Or = false); + + void configureMultiStream(NvU32 head, + NvU32 hBlankSym, + NvU32 vBlankSym, + NvU32 slotStart, + NvU32 slotEnd, + NvU32 PBN, + NvU32 Timeslice, + DP_COLORFORMAT colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, + DP_SINGLE_HEAD_MULTI_STREAM_MODE singleHeadMultistreamMode = DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE, + bool bEnableAudioOverRightPanel = false, + bool bEnable2Head1Or = false); + + void configureSingleHeadMultiStreamMode(NvU32 displayIDs[], + NvU32 numStreams, + NvU32 mode, + bool bSetConfig, + NvU8 vbiosPrimaryDispIdIndex = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY); + + void configureMsScratchRegisters(NvU32 address, + NvU32 hopCount, + NvU32 driverState); + + bool isActive(); + bool isEDP(); + bool skipPowerdownEdpPanelWhenHeadDetach(); + bool isMSTPCONCapsReadDisabled(); + bool supportMSAOverMST(); + bool controlRateGoverning(NvU32 head, bool enable, bool updateNow); + + bool getDpTestPattern(NV0073_CTRL_DP_TESTPATTERN *testPattern); + bool setDpTestPattern(NV0073_CTRL_DP_TESTPATTERN testPattern, + NvU8 laneMask, NV0073_CTRL_DP_CSTM cstm, + NvBool bIsHBR2, NvBool bSkipLaneDataOverride); + bool getDpLaneData(NvU32 *numLanes, NvU32 *data); + bool setDpLaneData(NvU32 numLanes, NvU32 *data); + void configurePowerState(bool bPowerUp); + NvU32 monitorDenylistInfo(NvU32 ManufacturerID, NvU32 ProductID, DpMonitorDenylistData *pDenylistData); + NvU32 allocDisplayId(); + bool freeDisplayId(NvU32 displayId); + virtual bool queryAndUpdateDfpParams(); + virtual bool queryGPUCapability(); + + bool getEdpPowerData(bool *panelPowerOn, bool *dpcdPowerStateD0); + virtual bool vrrRunEnablementStage(unsigned stage, NvU32 *status); + + void configureTriggerSelect(NvU32 head, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY); + void configureTriggerAll(NvU32 head, bool enable); + virtual bool configureLinkRateTable(const NvU16 *pLinkRateTable, LinkRates *pLinkRates); + bool configureFec(const bool bEnableFec); + }; + +} + +#endif //INCLUDED_DP_EVOADAPTER_H diff --git a/src/common/displayport/inc/dp_groupimpl.h b/src/common/displayport/inc/dp_groupimpl.h new file mode 100644 index 0000000..375a148 --- /dev/null +++ b/src/common/displayport/inc/dp_groupimpl.h @@ -0,0 +1,147 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_groupimpl.h * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_GROUPIMPL_H +#define INCLUDED_DP_GROUPIMPL_H + +#include "dp_connector.h" +#include "dp_deviceimpl.h" +#include "dp_linkedlist.h" +#include "dp_watermark.h" +#include "dp_auxdefs.h" + +namespace DisplayPort +{ + class StreamEncryptionStatusDetection; + + struct GroupImpl : public Group, ListElement, Timer::TimerCallback + { + ConnectorImpl * parent; + LinkedList members; + StreamEncryptionStatusDetection * streamEncryptionStatusDetection; + NvU8 clientId[CLIENT_ID_SIZE]; + List elements; + unsigned headIndex; + unsigned streamIndex; + bool streamValidationDone; + bool headInFirmware; // Set if this is a firmware run mode. If set lastModesetInfo is NOT valid + bool bIsHeadShutdownNeeded; // Set if head shutdown is requested during modeset + bool hdcpEnabled; + bool hdcpPreviousStatus; + bool qseEncryptionStatusMismatch; + bool bWaitForDeAllocACT; + bool bDeferredPayloadAlloc; + ModesetInfo lastModesetInfo; + DSC_MODE dscModeRequest; // DSC mode requested during NAB + DSC_MODE dscModeActive; // DSC mode currently active, set in NAE + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID singleHeadMultiStreamID; + DP_SINGLE_HEAD_MULTI_STREAM_MODE singleHeadMultiStreamMode; + DP_COLORFORMAT colorFormat; + + struct + { + unsigned PBN; + int count; + int begin; + bool hardwareDirty; // Does the configureStream need to be called again? + Watermark watermarks; // Cached watermark calculations + } timeslot; + + GroupImpl(ConnectorImpl * parent, bool isFirmwareGroup = false) + : parent(parent), + streamValidationDone(true), + headInFirmware(false), + bIsHeadShutdownNeeded(true), + hdcpEnabled(false), + hdcpPreviousStatus(false), + qseEncryptionStatusMismatch(false), + bWaitForDeAllocACT(false), + dscModeRequest(DSC_MODE_NONE), + dscModeActive(DSC_MODE_NONE), + singleHeadMultiStreamID(DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY), + singleHeadMultiStreamMode(DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE), + headAttached(false), timeslotAllocated(false) + { + if (isFirmwareGroup) + streamEncryptionStatusDetection = 0; + else + { + streamEncryptionStatusDetection = new StreamEncryptionStatusDetection(this, parent); + } + timeslot.count = 0; + } + + ~GroupImpl() + { + if (streamEncryptionStatusDetection) + { + delete streamEncryptionStatusDetection; + streamEncryptionStatusDetection = 0; + } + } + + virtual void insert(Device * dev); + virtual void remove(Device * dev); + void update(Device * dev, bool allocationState); // send the allocatepayload/deallocatepayload message + bool contains(Device * dev) { return members.contains(dev); } + virtual Device * enumDevices(Device * previousDevice); + + void updateVbiosScratchRegister(Device * lastDevice); // Update the VBIOS scratch register with last lit display + + // + // Timer callback tags. + // (we pass the address of these variables as context to ::expired) + // + char tagHDCPReauthentication; + char tagStreamValidation; + + char tagMSTQSEandSetECF; + unsigned QSESetECFRetries; // Retry counter for MST QSE and set ECF. + virtual void hdcpMSTQSEandSetECF(); + + unsigned authRetries; // Retry counter for the authentication. + + virtual void expired(const void * tag); + virtual bool hdcpSetEncrypted(bool encrypted, NvU8 streamType = NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_TYPE_0, NvBool bForceClear = NV_FALSE, NvBool bAddStreamBack = NV_FALSE); + virtual bool hdcpGetEncrypted(); + virtual void destroy(); + void cancelHdcpCallbacks(); + + bool isHeadAttached() { return headAttached; } + void setHeadAttached(bool attached); + + bool isTimeslotAllocated() { return timeslotAllocated; } + void setTimeslotAllocated(bool allocated) {timeslotAllocated = allocated;} + + private: + bool headAttached; // True if modeset started (during NAB). Sets back to False during NDE + bool timeslotAllocated; // True if timeslot is allocated for the group (beforeAddStream). Sets back to False during afterDeleteStream + }; +} + +#endif //INCLUDED_DP_GROUPIMPL_H diff --git a/src/common/displayport/inc/dp_guid.h b/src/common/displayport/inc/dp_guid.h new file mode 100644 index 0000000..2a1318d --- /dev/null +++ b/src/common/displayport/inc/dp_guid.h @@ -0,0 +1,120 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_guid.h * +* GUID struct and builder class * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_GUID_H +#define INCLUDED_DP_GUID_H + +#include "dp_internal.h" +#include "dp_timer.h" + +namespace DisplayPort +{ +#define DPCD_GUID_SIZE 16 + + struct GUID + { + NvU8 data[DPCD_GUID_SIZE]; + + GUID() + { + dpMemZero(&data, sizeof(data)); + } + + bool isGuidZero() + { + for (unsigned i = 0 ; i < DPCD_GUID_SIZE; i++) + if (data[i]) + return false; + + return true; + } + + bool operator == (const GUID & other) const + { + for (unsigned i = 0 ; i < DPCD_GUID_SIZE; i++) + if (data[i] != other.data[i]) + return false; + + return true; + } + + bool operator != (const GUID & other) const + { + return !((*this) == other); + } + + void copyFrom(const NvU8 * buffer) + { + dpMemCopy(&this->data[0], buffer, sizeof data); + } + + // XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + // Two Xs per byte, plus four dashes and a NUL byte. + typedef char StringBuffer[DPCD_GUID_SIZE*2 + 5]; + char * toString(StringBuffer & buffer) const + { + char *p = &buffer[0]; + + for (unsigned i = 0; i < DPCD_GUID_SIZE; i++) { + dpByteToHexChar(p, data[i]); + p += 2; + if (i == 3 || i == 5 || i == 7 || i == 9) + *p++ = '-'; + } + + *p++ = '\0'; + + DP_ASSERT(p == buffer + sizeof(buffer)); + + return buffer; + } + }; + + class GUIDBuilder + { + NvU32 salt; + NvU32 previousRandom; + Timer * source; + + + // + // Linear congruential random number generator + // Seed values chosen from numerical methods + // + NvU32 random(); + + public: + GUIDBuilder(Timer * source, NvU32 salt); + + void makeGuid(GUID & guid); + }; +} + +#endif //INCLUDED_DP_GUID_H diff --git a/src/common/displayport/inc/dp_hostimp.h b/src/common/displayport/inc/dp_hostimp.h new file mode 100644 index 0000000..847f21f --- /dev/null +++ b/src/common/displayport/inc/dp_hostimp.h @@ -0,0 +1,57 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_hostimp.h * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_HOSTIMP_H +#define INCLUDED_DP_HOSTIMP_H + +#include "nvtypes.h" +#include "dp_tracing.h" +#include "dp_printf.h" + +extern "C" void * dpMalloc(NvLength size); +extern "C" void dpFree(void * ptr); +extern "C" void dpDebugBreakpoint(); +// Note: dpPrint() implementations are expected to append a newline themselves. +extern "C" void dpPrint(const char * formatter, ...); +extern "C" void dpPrintf(DP_LOG_LEVEL severity, const char * formatter, ...); +extern "C" void dpTraceEvent(NV_DP_TRACING_EVENT event, + NV_DP_TRACING_PRIORITY priority, NvU32 numArgs, ...); + +#if defined(_DEBUG) || defined(DEBUG) + #define NV_DP_ASSERT_ENABLED 1 +#else + #define NV_DP_ASSERT_ENABLED 0 +#endif + +#if NV_DP_ASSERT_ENABLED +extern "C" void dpAssert(const char *expression, const char *file, + const char *function, int line); +#endif + +#endif // INCLUDED_DP_HOSTIMP_H diff --git a/src/common/displayport/inc/dp_internal.h b/src/common/displayport/inc/dp_internal.h new file mode 100644 index 0000000..571e900 --- /dev/null +++ b/src/common/displayport/inc/dp_internal.h @@ -0,0 +1,129 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_internal.h * +* RM stubs to allow unit testing. * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_INTERNAL_H +#define INCLUDED_DP_INTERNAL_H + +// +// Clients should not include this file +// This file provides the private malloc implementation. +// + +#include +#include // size_t + +#include "dp_object.h" +#include "dp_ringbuffer.h" + +static inline void dpByteToHexChar(char *output, NvU8 c) +{ + char dig = (c>>4) & 0xF; + output[0] = dig < 10 ? dig + '0' : dig + 'A' - 10; + dig = c & 0xF; + output[1] = dig < 10 ? dig + '0' : dig + 'A' - 10; +} + +static inline void dpHexDump(char * output, unsigned outSize, NvU8 * buffer, unsigned size) +{ + char * tail = output; + if (outSize < size * 3 + 1) + return; + + for (unsigned i = 0; i < size; i++) + { + dpByteToHexChar(tail, buffer[i]); + tail += 2; + *tail++ = ' '; + } + *tail = 0; +} + +namespace DisplayPort +{ + template + inline void swap_args(T & left, T & right) + { + T temp = left; + left = right; + right = temp; + } + + inline NvU64 divide_ceil(NvU64 a, NvU64 b) + { + return (a + b - 1) / b; + } + + inline NvU64 divide_floor(NvU64 a, NvU64 b) + { + return a / b; + } + + inline NvU64 axb_div_c_64(NvU64 a, NvU64 b, NvU64 c) + { + // NvU64 arithmetic to keep precision and avoid floats + // a*b/c = (a/c)*b + ((a%c)*b + c/2)/c + return ((a/c)*b + ((a%c)*b + c/2)/c); + } +} + +#define DP_MIN(x,y) ((x)<(y)?(x):(y)) +#define DP_MAX(x,y) ((x)<(y)?(y):(x)) + +// +// Macro to suppress unused local variable +// +template void dp_used(const T & /*x*/) {} +#define DP_USED(x) dp_used(x) + + +// +// Basic debug logging facility +// + +#if NV_DP_ASSERT_ENABLED +#define DP_ASSERT(x) \ + if (!(x)) \ + { \ + addDpAssertRecord(); \ + dpAssert(#x, __FILE__, __FUNCTION__, __LINE__); \ + dpDebugBreakpoint(); \ + } +#else +#define DP_ASSERT(x) \ + { \ + DP_USED(x); \ + if (!(x)) \ + { \ + addDpAssertRecord(); \ + } \ + } +#endif + +#endif //INCLUDED_DP_INTERNAL_H diff --git a/src/common/displayport/inc/dp_linkconfig.h b/src/common/displayport/inc/dp_linkconfig.h new file mode 100644 index 0000000..782ca62 --- /dev/null +++ b/src/common/displayport/inc/dp_linkconfig.h @@ -0,0 +1,559 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* List **************************************\ +* * +* Module: dp_linkconfig.h * +* Link Configuration object implementation * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_LINKCONFIG_H +#define INCLUDED_DP_LINKCONFIG_H + +#include "dp_auxdefs.h" +#include "dp_internal.h" +#include "dp_watermark.h" +#include "ctrl/ctrl0073/ctrl0073specific.h" // NV0073_CTRL_HDCP_VPRIME_SIZE +#include "displayport.h" + +#define NV_SUPPORTED_DP_LINK_RATES__SIZE NV_SUPPORTED_DP1X_LINK_RATES__SIZE +namespace DisplayPort +{ + typedef NvU64 LinkRate; + + class LinkRates : virtual public Object + { + public: + NvU8 entries; + // Store link rate in multipler of 10MBPS to save space + NvU16 element[NV_SUPPORTED_DP_LINK_RATES__SIZE]; + + LinkRates() + { + entries = 0; + for (int i = 0; i < NV_SUPPORTED_DP_LINK_RATES__SIZE; i++) + { + element[i] = 0; + } + } + + void clear() + { + entries = 0; + for (int i = 0; i < NV_SUPPORTED_DP_LINK_RATES__SIZE; i++) + { + element[i] = 0; + } + } + + // Only use import if element can be added at the end + // given element list needs to be sorted + bool import(NvU16 linkBw) + { + if (!IS_VALID_LINKBW_10M(linkBw)) + { + DP_ASSERT(0 && "Unsupported Link Bandwidth"); + return false; + } + + if (entries < NV_SUPPORTED_DP_LINK_RATES__SIZE) + { + element[entries] = linkBw; + entries++; + return true; + } + else + return false; + } + + // Use insert to import to the right spot in the sorted + // element list if you are not sure of where linkBw is compared + // to existing entries in the element list + bool insert(NvU16 linkBw) + { + if (!IS_VALID_LINKBW_10M(linkBw)) + { + DP_ASSERT(0 && "Unsupported Link Bandwidth"); + return false; + } + + for (int i = 0; i < entries; i++) + { + if (element[i] == linkBw) + { + // element already present, nothing to do here + return true; + } + else if (element[i] > linkBw) + { + // make space for the new element + if (entries >= NV_SUPPORTED_DP_LINK_RATES__SIZE) { + DP_ASSERT(0 && "No more space for adding additional link rate"); + return false; + } + + for (int j = entries-1; j >= i; j--) + { + element[j+1] = element[j]; + } + entries++; + // space is made, insert linkBw to the right spot + element[i] = linkBw; + return true; + } + + } + // if we are here and not returned, that means linkBw is bigger than + // the current entries, just import + return import(linkBw); + } + + LinkRate getLowerRate(LinkRate rate) + { + int i; + + if ((entries == 0) || (rate <= element[0])) + return 0; + + for (i = entries - 1; i > 0; i--) + { + if (rate > element[i]) + break; + } + + return ((LinkRate)element[i]); + } + + LinkRate getMaxRate() + { + LinkRate rate = 0; + if ((entries > 0) && + (entries <= NV_SUPPORTED_DP_LINK_RATES__SIZE)) + { + rate = (LinkRate)element[entries - 1]; + } + return rate; + } + + NvU8 getNumElements() + { + return NV_SUPPORTED_DP_LINK_RATES__SIZE; + } + + NvU8 getNumLinkRates() + { + return entries; + } + }; + class LinkPolicy : virtual public Object + { + protected: + bool bNoFallback; // No fallback when LT fails + LinkRates linkRates; + + public: + LinkPolicy() : bNoFallback(false) + { + } + bool skipFallback() + { + return bNoFallback; + } + void setSkipFallBack(bool bSkipFallback) + { + bNoFallback = bSkipFallback; + } + + LinkRates *getLinkRates() + { + return &linkRates; + } + }; + + enum + { + totalTimeslots = 64, + totalUsableTimeslots = totalTimeslots - 1 + }; + + // + // Link Data Rate per DP Lane, in MBPS, + // For 8b/10b channel coding: + // Link Data Rate = link rate * (8 / 10) / 8 + // = link rate * 0.1 + // For 128b/132b channel coding: + // Link Data Rate = link rate * (128 / 132) / 8 + // = link rate * 4 / 33 + // ~= link rate * 0.12 + // + // Link Bandwidth = Lane Count * Link Data Rate + // + enum + { + RBR = 162000000, + EDP_2_16GHZ = 216000000, + EDP_2_43GHZ = 243000000, + HBR = 270000000, + EDP_3_24GHZ = 324000000, + EDP_4_32GHZ = 432000000, + HBR2 = 540000000, + HBR3 = 810000000 + }; + + struct HDCPState + { + bool HDCP_State_Encryption; + bool HDCP_State_1X_Capable; + bool HDCP_State_22_Capable; + bool HDCP_State_Authenticated; + bool HDCP_State_Repeater_Capable; + }; + + struct HDCPValidateData + { + NvU8 vP[NV0073_CTRL_HDCP_VPRIME_SIZE]; + NvU64 aN; + NvU64 mP; + }; + + typedef enum + { + DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE, + DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST, + DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST, + }DP_SINGLE_HEAD_MULTI_STREAM_MODE; + +#define HEAD_INVALID_STREAMS 0 +#define HEAD_DEFAULT_STREAMS 1 + + typedef enum + { + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY = 0, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY = 1, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_MAX = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY, + } DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID; + +#define DP_INVALID_SOR_INDEX 0xFFFFFFFF +#define DSC_DEPTH_FACTOR 16 + + class LinkConfiguration : virtual public Object + { + public: + LinkPolicy policy; + unsigned lanes; + LinkRate peakRatePossible; + LinkRate peakRate; + LinkRate minRate; + bool enhancedFraming; + bool multistream; + bool disablePostLTRequest; + bool bEnableFEC; + bool bDisableLTTPR; + bool bDisableDownspread; + // + // The counter to record how many times link training happens. + // Client can reset the counter by calling setLTCounter(0) + // + unsigned linkTrainCounter; + + LinkConfiguration() : + lanes(0), peakRatePossible(0), peakRate(0), minRate(0), + enhancedFraming(false), multistream(false), disablePostLTRequest(false), + bEnableFEC(false), bDisableLTTPR(false), bDisableDownspread(false), + linkTrainCounter(0) {}; + + LinkConfiguration(LinkPolicy * p, unsigned lanes, LinkRate peakRate, + bool enhancedFraming, bool MST, + bool disablePostLTRequest = false, + bool bEnableFEC = false, + bool bDisableLTTPR = false, + bool bDisableDownspread = false); + + void setLTCounter(unsigned counter) + { + linkTrainCounter = counter; + } + + unsigned getLTCounter() + { + return linkTrainCounter; + } + + // Returns data rate in Bytes per second + NvU64 convertLinkRateToDataRate(LinkRate linkRate) const + { + NvU64 dataRate; + dataRate = LINK_RATE_TO_DATA_RATE_8B_10B(linkRate); + return dataRate; + } + + // Returns minRate in data rate in Bytes per second + NvU64 convertMinRateToDataRate() const + { + NvU64 dataRate; + dataRate = DP_LINK_RATE_BITSPS_TO_BYTESPS(OVERHEAD_8B_10B(minRate)); + return dataRate; + } + + NvU64 getTotalDataRate() const + { + return (convertLinkRateToDataRate(peakRate) * lanes); + } + + NvU64 linkOverhead(NvU64 rate10M) + { + NvU64 rate; + if(IS_VALID_LINKBW_10M(rate10M)) + { + // Converting here so that minRate from 10M is converted to bps + rate = DP_LINK_RATE_10M_TO_BPS(rate10M); + } + else + { + // Convert from data rate to bps + rate = DATA_RATE_8B_10B_TO_LINK_RATE_BPS(rate10M); + } + + if(bEnableFEC) + { + // if FEC is enabled, we have to account for 3% overhead + // for FEC+downspread according to DP 1.4 spec + + return rate - 3 * rate/ 100; + } + else + { + // if FEC is not enabled, link overhead comprises only of + // 0.6% downspread. + return rate - 6 * rate/ 1000; + } + } + + void enableFEC(bool setFEC) + { + bEnableFEC = setFEC; + // If FEC is enabled, update minRate with FEC+downspread overhead. + minRate = linkOverhead(peakRate); + } + + LinkConfiguration(unsigned long TotalLinkPBN) + : enhancedFraming(true), + multistream(true), + disablePostLTRequest(false), + bEnableFEC(false), + bDisableLTTPR(false), + bDisableDownspread(false), + linkTrainCounter(0) + { + // + // Reverse engineer a link configuration from Total TotalLinkPBN + // Note that HBR2 twice HBR. The table below treats HBR2x1 and HBRx2, etc. + // + // PBN Calculation + // Definition of PBN is "54/64 MBps". + // Note this is the "data" actually transmitted in the main link. + // So we need to take channel coding into consideration. + // Formula: PBN = Lane Count * Link Rate (Gbps) * 1000 * (1/8) * ChannelCoding Efficiency * (64 / 54) + // Example: + // 1. 4 * HBR2: 4 * 5.4 * 1000 * (1/8) * (8/10) * (64/54) = 2560 + // 2. 2 * UHBR10: 2 * 10 * 1000 * (1/8) * (128/132) * (64/54) = 2873 + // + // Full list: + // + // BW (Gbps) Lanes TotalLinkPBN + // 1.62 1 192 + // 1.62 2 384 + // 1.62 4 768 + // 2.70 1 320 + // 2.70 2 640 + // 2.70 4 1280 + // 5.40 1 640 + // 5.40 2 1280 + // 5.40 4 2560 + // 8.10 1 960 + // 8.10 2 1920 + // 8.10 4 3840 + // 10.00 1 1436 + // 10.00 2 2873 + // 10.00 4 5746 + // 13.50 1 1939 + // 13.50 2 3878 + // 13.50 4 7757 + // 20.00 1 2873 + // 20.00 2 5746 + // 20.00 4 11492 + // + + if (TotalLinkPBN <= 90) + { + peakRatePossible = dp2LinkRate_1_62Gbps; + peakRate = peakRatePossible; + minRate = linkOverhead(dp2LinkRate_1_62Gbps); + lanes = 0; // FAIL + } + if (TotalLinkPBN <= 192) + { + peakRatePossible = dp2LinkRate_1_62Gbps; + peakRate = peakRatePossible; + minRate = linkOverhead(dp2LinkRate_1_62Gbps); + lanes = 1; + } + else if (TotalLinkPBN <= 320) + { + peakRatePossible = dp2LinkRate_2_70Gbps; + peakRate = peakRatePossible; + minRate = linkOverhead(dp2LinkRate_2_70Gbps); + lanes = 1; + } + else if (TotalLinkPBN <= 384) + { + peakRatePossible = dp2LinkRate_1_62Gbps; + peakRate = peakRatePossible; + minRate = linkOverhead(dp2LinkRate_1_62Gbps); + lanes = 2; + } + else if (TotalLinkPBN <= 640) + { + // could be HBR2 x 1, but TotalLinkPBN works out same + peakRatePossible = dp2LinkRate_2_70Gbps; + peakRate = peakRatePossible; + minRate = linkOverhead(dp2LinkRate_2_70Gbps); + lanes = 2; + } + else if (TotalLinkPBN <= 768) + { + peakRatePossible = dp2LinkRate_1_62Gbps; + peakRate = peakRatePossible; + minRate = linkOverhead(dp2LinkRate_1_62Gbps); + lanes = 4; + } + else if (TotalLinkPBN <= 960) + { + peakRatePossible = dp2LinkRate_8_10Gbps; + peakRate = peakRatePossible; + minRate = linkOverhead(dp2LinkRate_8_10Gbps); + lanes = 1; + } + else if (TotalLinkPBN <= 1280) + { + // could be HBR2 x 2 + peakRatePossible = dp2LinkRate_2_70Gbps; + peakRate = peakRatePossible; + minRate = linkOverhead(dp2LinkRate_2_70Gbps); + lanes = 4; + } + else if (TotalLinkPBN <= 1920) + { + peakRatePossible = dp2LinkRate_8_10Gbps; + peakRate = peakRatePossible; + minRate = linkOverhead(dp2LinkRate_8_10Gbps); + lanes = 2; + } + else if (TotalLinkPBN <= 2560) + { + peakRatePossible = dp2LinkRate_5_40Gbps; + peakRate = peakRatePossible; + minRate = linkOverhead(dp2LinkRate_5_40Gbps); + lanes = 4; + } + else if (TotalLinkPBN <= 3840) + { + peakRatePossible = dp2LinkRate_8_10Gbps; + peakRate = peakRatePossible; + minRate = linkOverhead(dp2LinkRate_8_10Gbps); + lanes = 4; + } + else + { + peakRatePossible = dp2LinkRate_1_62Gbps; + peakRate = peakRatePossible; + minRate = linkOverhead(dp2LinkRate_1_62Gbps); + lanes = 0; // FAIL + DP_ASSERT(0 && "Unknown configuration"); + } + } + + void setEnhancedFraming(bool newEnhancedFraming) + { + enhancedFraming = newEnhancedFraming; + } + + bool isValid() + { + return lanes != laneCount_0; + } + + bool lowerConfig(bool bReduceLaneCnt = false); + + void setLaneRate(LinkRate newRate, unsigned newLanes) + { + peakRate = newRate; + lanes = newLanes; + minRate = linkOverhead(peakRate); + } + + unsigned pbnTotal() + { + return PBNForSlots(totalUsableTimeslots); + } + + NvU64 getBytesPerTimeslot(); + + void pbnRequired(const ModesetInfo & modesetInfo, unsigned & base_pbn, unsigned & slots, unsigned & slots_pbn); + + NvU32 slotsForPBN(NvU32 allocatedPBN, bool usable = false); + + NvU32 PBNForSlots(NvU32 slots); + + bool operator!= (const LinkConfiguration & right) const + { + return !(*this == right); + } + + bool operator== (const LinkConfiguration & right) const + { + return (this->lanes == right.lanes && + this->peakRate == right.peakRate && + this->enhancedFraming == right.enhancedFraming && + this->multistream == right.multistream && + this->bEnableFEC == right.bEnableFEC); + } + + bool operator< (const LinkConfiguration & right) const + { + NvU64 leftMKBps = getTotalDataRate(); + NvU64 rightMKBps = right.getTotalDataRate(); + + if (leftMKBps == rightMKBps) + { + return (lanes < right.lanes); + } + else + { + return (leftMKBps < rightMKBps); + } + } + }; + +} +#endif //INCLUDED_DP_LINKCONFIG_H diff --git a/src/common/displayport/inc/dp_linkedlist.h b/src/common/displayport/inc/dp_linkedlist.h new file mode 100644 index 0000000..cb0b6f2 --- /dev/null +++ b/src/common/displayport/inc/dp_linkedlist.h @@ -0,0 +1,143 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/******************************* DisplayPort *******************************\ +* * +* Module: dp_linkedlist.h * +* A linked list that uses DislayPort::List as a backend, but which * +* allocates the list backbone dynamically. * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_LINKEDLIST_H +#define INCLUDED_DP_LINKEDLIST_H + +#include "dp_list.h" + +namespace DisplayPort +{ + template + class LinkedList : public Object + { + // The Element class forms the list backbone and contains pointers to + // each item in the list. + class Element : public ListElement + { + public: + Element(T *item) : item(item) { } + T *item; + }; + + List list; + + // No public copy constructor. + LinkedList(LinkedList &other) { } + + // Find the Element containing an item. + Element *containing(T *item) + { + for (ListElement *le = list.begin(); le != list.end(); le = le->next) + { + Element *e = static_cast(le); + if (e->item == item) + return e; + } + return NULL; + } + + public: + // The list starts out empty. + LinkedList() { } + + // Insert an item at the front of the list. + void insertFront(T *item) + { + // Construct an element and add it to the list. + Element *e = new Element(item); + DP_ASSERT(e); + if (e) + { + list.insertFront(e); + } + } + + // Remove an item from the list. + // O(n) to find the item to remove. + // It is an error to try to remove an item that is not in the list. + void remove(T *item) + { + Element *e = containing(item); + DP_ASSERT(e && "Item was not a member of the list"); + delete e; + } + + // Find the next item in the list after the specified item. If item is + // NULL, this returns the first item. + T *next(T *prev) + { + if (list.isEmpty()) + return NULL; + + // If prev is NULL or not in the list, return the first item. + Element *e = containing(prev); + if (!e) + { + e = static_cast(list.begin()); + return e->item; + } + else if (e->next != list.end()) + { + e = static_cast(e->next); + return e->item; + } + else + { + // prev was the last element in the list. + return NULL; + } + } + + // Query whether an item is a member of the list. + // O(n) + bool contains(T *item) + { + Element *e = containing(item); + return e != NULL; + } + + bool isEmpty() + { + return list.isEmpty(); + } + + T *pop() + { + DP_ASSERT(!list.isEmpty()); + Element *e = static_cast(list.last()); + T *item = e->item; + delete e; + return item; + } + }; +} + +#endif // INCLUDED_DP_LINKEDLIST_H diff --git a/src/common/displayport/inc/dp_list.h b/src/common/displayport/inc/dp_list.h new file mode 100644 index 0000000..77fd759 --- /dev/null +++ b/src/common/displayport/inc/dp_list.h @@ -0,0 +1,84 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_list.h * +* Simple doubly linked list queue * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_LIST_H +#define INCLUDED_DP_LIST_H + +#include "dp_object.h" + +namespace DisplayPort +{ + // + // List is an intrusive container, it may + // only contain elements that derive from ListElement + // + // NOTE! Deleting an element automatically unlinks it + // from the enclosing container. + // + struct ListElement : virtual public Object + { + ListElement * next, * prev; + + ListElement(); + virtual ~ListElement(); + }; + + + class List : public ListElement + { + public: + bool isEmpty(); + void insertFront(ListElement * item); + void insertBack(ListElement * item); + void insertBefore(ListElement * insertBeforeThis, ListElement * item); + void clear(); + ListElement* front(); + ListElement* last(); + + ListElement* begin() { return this->next; } + ListElement* end() { return this; } + + static ListElement * remove(ListElement * item); // Removes but does not delete + bool contains(ListElement * item); + ListElement * replace(ListElement * replacement, ListElement * replacee); + List(); + ~List(); + + unsigned size() + { + unsigned count = 0; + for (ListElement * i = begin(); i!=end(); i = i->next) + count++; + return count; + } + }; +} + +#endif //INCLUDED_DP_LIST_H diff --git a/src/common/displayport/inc/dp_mainlink.h b/src/common/displayport/inc/dp_mainlink.h new file mode 100644 index 0000000..8707b5f --- /dev/null +++ b/src/common/displayport/inc/dp_mainlink.h @@ -0,0 +1,276 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* List **************************************\ +* * +* Module: dp_mainlink.h * +* Mainlink interface implemented by client. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_MAINLINK_H +#define INCLUDED_DP_MAINLINK_H + +#include "dp_linkconfig.h" +#include "dp_vrr.h" +#include "dp_wardatabase.h" +#include "dp_auxdefs.h" +#include "displayport.h" +#include "ctrl/ctrl0073/ctrl0073dp.h" +#include "dp_regkeydatabase.h" + +#define HDCP_DUMMY_CN (0x1) +#define HDCP_DUMMY_CKSV (0xFFFFF) + +namespace DisplayPort +{ + typedef enum + { + NONE, //Abort it manually + UNTRUST, //Abort due to Kp mismatch + UNRELBL, //Abort due to repeated link failure + KSV_LEN, //Abort due to KSV length + KSV_SIG, //Abort due to KSV signature + SRM_SIG, //Abort due to SRM signature + SRM_REV, //Abort due to SRM revocation + NORDY, //Abort due to repeater not ready + KSVTOP, //Abort due to KSV topology error + BADBKSV //Abort due to invalid Bksv + }AbortAuthReason; + + // This is also used for DPCD offset 10B. 249 + enum LinkQualityPatternType + { + LINK_QUAL_DISABLED, + LINK_QUAL_D10_2, + LINK_QUAL_SYM_ERROR, + LINK_QUAL_PRBS7, + LINK_QUAL_80BIT_CUST, + LINK_QUAL_HBR2_COMPLIANCE_EYE, + LINK_QUAL_CP2520PAT3, + }; + + typedef struct + { + LinkQualityPatternType lqsPattern; + + // + // 80 bits DP CSTM Test Pattern data; + // ctsmLower takes bits 31:0 (lowest 32 bits) + // ctsmMiddle takes bits 63:32 (middle 32 bits) + // ctsmUpper takes bits 79:64 (highest 16 bits) + // + int ctsmLower; + int ctsmMiddle; + int ctsmUpper; + } PatternInfo; + + typedef struct + { + unsigned char bcaps; + unsigned char bksv[5]; + bool hdcpCapable; + unsigned char updMask; + }RmDfpCache; + + typedef enum + { + NORMAL_LINK_TRAINING, // full LT + NO_LINK_TRAINING, + FAST_LINK_TRAINING, + }LinkTrainingType; + + class MainLink : virtual public Object + { + public: + virtual bool physicalLayerSetTestPattern(PatternInfo * patternInfo) = 0; + // + // Wrappers for existing link training RM control calls + // + virtual bool train(const LinkConfiguration & link, bool force, LinkTrainingType linkTrainingType, + LinkConfiguration *retLink, bool bSkipLt = false, bool isPostLtAdjRequestGranted = false, + unsigned phyRepeaterCount = 0) = 0; + + // RM control call to retrieve buffer from RM for DP Library to dump logs + virtual bool retrieveRingBuffer(NvU8 dpRingBuffertype, NvU32 numRecords) = 0; + + // + // Requests to DD to perform pre & post link training steps + // which may disconnect and later reconnect the head (For Pre-gf119 GPUs) + // + virtual void preLinkTraining(NvU32 head) = 0; + virtual void postLinkTraining(NvU32 head) = 0; + virtual NvU32 getRegkeyValue(const char *key) = 0; + virtual const DP_REGKEY_DATABASE& getRegkeyDatabase() = 0; + virtual NvU32 getSorIndex() = 0; + virtual bool isInbandStereoSignalingSupported() = 0; + + + virtual bool isEDP() = 0; + virtual bool supportMSAOverMST() = 0; + virtual bool isForceRmEdidRequired() = 0; + virtual bool fetchEdidByRmCtrl(NvU8* edidBuffer, NvU32 bufferSize) = 0; + virtual bool applyEdidOverrideByRmCtrl(NvU8* edidBuffer, NvU32 bufferSize) = 0; + + // Return if Panel is Dynamic MUX capable + virtual bool isDynamicMuxCapable() = 0; + + // Return the current mux state. Returns false if not mux capable + virtual bool getDynamicMuxState(NvU32 *muxState) = 0; + + // Return if Internal panel is Dynamic Mux capable + virtual bool isInternalPanelDynamicMuxCapable() = 0; + + // Check if we should skip power down eDP when head detached. + virtual bool skipPowerdownEdpPanelWhenHeadDetach() = 0; + + // Check if we should skip reading PCON Caps in MST case. + virtual bool isMSTPCONCapsReadDisabled() = 0; + + // Get GPU DSC capabilities + virtual void getDscCaps(bool *pbDscSupported = NULL, + unsigned *pEncoderColorFormatMask = NULL, + unsigned *pLineBufferSizeKB = NULL, + unsigned *pRateBufferSizeKB = NULL, + unsigned *pBitsPerPixelPrecision = NULL, + unsigned *pMaxNumHztSlices = NULL, + unsigned *pLineBufferBitDepth = NULL) = 0; + + // + // Get the current link config. + // (Used for the boot case where EFI/VBIOS may have already trained + // the link. We need this to confirm the programming since + // we cannot rely on the DPCD registers being correct or sane) + // + virtual void getLinkConfig(unsigned &laneCount, NvU64 & linkRate) = 0; + + // Get the current link config with FEC + virtual void getLinkConfigWithFEC(unsigned &laneCount, NvU64 &linkRate, bool &bFECEnable) {}; + // Get the max link config from UEFI. + virtual bool getMaxLinkConfigFromUefi(NvU8 &linkRate, NvU8 &laneCount) = 0; + // + // Query if a head is attached to this DisplayId + // + virtual bool isActive() = 0; + + virtual bool hasIncreasedWatermarkLimits() = 0; + virtual bool hasMultistream() = 0; + virtual bool isPC2Disabled() = 0; + virtual NvU32 getGpuDpSupportedVersions() = 0; + virtual bool isStreamCloningEnabled() = 0; + virtual bool isDpTunnelingHwBugWarEnabled() = 0; + virtual NvU32 maxLinkRateSupported() = 0; + virtual bool isLttprSupported() = 0; + virtual bool isFECSupported() = 0; + + virtual bool setDpMSAParameters(bool bStereoEnable, const NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS &msaparams) = 0; + virtual bool setDpStereoMSAParameters(bool bStereoEnable, const NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS &msaparams) = 0; + virtual bool setFlushMode() = 0; + virtual void clearFlushMode(unsigned headMask, bool testMode=false) = 0; + + // + // HDCP Renegotiate and trigger ACT. + // + virtual void configureHDCPRenegotiate(NvU64 cN = HDCP_DUMMY_CN, NvU64 cKsv = HDCP_DUMMY_CKSV, bool bForceReAuth = false, bool bRxIDMsgPending = false) = 0; + // HDCP set ECF + virtual void configureAndTriggerECF(NvU64 ecf, NvBool bForceClearEcf = NV_FALSE, NvBool bAddStreamBack = NV_FALSE) = 0; + // + // Enable of disable alternate scrambler SR (ASSR) + // + // (used for embedded displayport) + virtual void disableAlternateScramblerReset() = 0; + virtual void configureHDCPDisableAuthentication() = 0; + virtual void configureHDCPAbortAuthentication(AbortAuthReason abortAuthReason) = 0; + virtual bool setStreamType(unsigned streamIndex, NvU8 streamType, bool * bNeedReNegotiate) = 0; + virtual void configureHDCPValidateLink(HDCPValidateData &hdcpValidateData, NvU64 cN = HDCP_DUMMY_CN, NvU64 cKsv = HDCP_DUMMY_CKSV) = 0; + virtual void forwardPendingKsvListReady(NvBool bKsvListReady) = 0; + virtual void triggerACT() = 0; + virtual void configureHDCPGetHDCPState(HDCPState &hdcpState) = 0; + + virtual NvU32 headToStream(NvU32 head, bool bSidebandMessageSupported, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY) = 0; + + virtual void configureSingleStream(NvU32 head, + NvU32 hBlankSym, + NvU32 vBlankSym, + bool bEnhancedFraming, + NvU32 tuSize, + NvU32 waterMark, + DP_COLORFORMAT colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamId = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, + DP_SINGLE_HEAD_MULTI_STREAM_MODE singleHeadMultistreamMode = DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE, + bool bEnableAudioOverRightPanel = false, + bool bEnable2Head1Or = false)= 0; + + virtual void configureMultiStream(NvU32 head, + NvU32 hBlankSym, + NvU32 vBlankSym, + NvU32 slotStart, + NvU32 slotEnd, + NvU32 PBN, + NvU32 Timeslice, + DP_COLORFORMAT colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, + DP_SINGLE_HEAD_MULTI_STREAM_MODE singleHeadMultistreamMode = DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE, + bool bEnableAudioOverRightPanel = false, + bool bEnable2Head1Or = false)= 0; + + virtual void configureSingleHeadMultiStreamMode(NvU32 displayIDs[], + NvU32 numStreams, + NvU32 mode, + bool bSetConfig, + NvU8 vbiosPrimaryDispIdIndex = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY)= 0; + + virtual void configureMsScratchRegisters(NvU32 address, + NvU32 hopCount, + NvU32 driverState) = 0; + + virtual bool controlRateGoverning(NvU32 head, bool enable, bool updateNow = true) = 0; + virtual bool getDpTestPattern(NV0073_CTRL_DP_TESTPATTERN * testPattern) = 0; + virtual bool setDpTestPattern(NV0073_CTRL_DP_TESTPATTERN testPattern, + NvU8 laneMask, NV0073_CTRL_DP_CSTM cstm, + NvBool bIsHBR2, NvBool bSkipLaneDataOverride = false) = 0; + virtual bool getDpLaneData(NvU32 *numLanes, NvU32 *data) = 0; + virtual bool setDpLaneData(NvU32 numLanes, NvU32 *data) = 0; + virtual bool rmUpdateDynamicDfpCache(NvU32 headIndex, RmDfpCache * dfpCache, NvBool bResetDfp) = 0; + virtual void configurePowerState(bool bPowerUp) = 0; + virtual NvU32 monitorDenylistInfo(NvU32 ManufacturerID, NvU32 ProductID, DpMonitorDenylistData *pDenylistData) = 0; + virtual NvU32 getRootDisplayId() = 0; + virtual NvU32 allocDisplayId() = 0; + virtual bool freeDisplayId(NvU32 displayId) = 0; + virtual bool queryGPUCapability() {return false;} + virtual bool isAvoidHBR3WAREnabled() = 0; + virtual bool queryAndUpdateDfpParams() = 0; + virtual bool getEdpPowerData(bool *panelPowerOn, bool *bDPCDPowerStateD0) = 0; + virtual bool vrrRunEnablementStage(unsigned stage, NvU32 *status) = 0; + + virtual void configureTriggerSelect(NvU32 head, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY) = 0; + + virtual void configureTriggerAll(NvU32 head, bool enable) = 0; + virtual bool dscCrcTransaction(NvBool bEnable, gpuDscCrc *data, NvU16 *headIndex){ return false; } + virtual bool configureLinkRateTable(const NvU16 *pLinkRateTable, LinkRates *pLinkRates) = 0; + virtual bool configureFec(const bool bEnableFec) = 0; + }; +} + +#endif //INCLUDED_DP_MAINLINK_H diff --git a/src/common/displayport/inc/dp_merger.h b/src/common/displayport/inc/dp_merger.h new file mode 100644 index 0000000..ff57de7 --- /dev/null +++ b/src/common/displayport/inc/dp_merger.h @@ -0,0 +1,148 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_merger.h * +* Asynchronous Message merger * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_MERGER_H +#define INCLUDED_DP_MERGER_H + +#include "dp_list.h" +#include "dp_auxretry.h" +#include "dp_timer.h" +#include "dp_bitstream.h" +#include "dp_address.h" +#include "dp_messageheader.h" +#include "dp_configcaps.h" + +namespace DisplayPort +{ + // after 4 secs delete dead transactions + #define DP_INCOMPLETE_MESSAGE_TIMEOUT_USEC 4000000 + struct EncodedMessage; + + class MessageTransactionMerger : virtual public Object + { + class IncompleteMessage : public ListElement + { + public: + EncodedMessage message; + NvU64 lastUpdated; + + }; + + List incompleteMessages; + Timer * timer; + NvU64 incompleteMessageTimeoutMs; + IncompleteMessage * freeOnNextCall; // we don't need to delete it on destruct + // since this is ALSO a member of the list we own + + IncompleteMessage * getTransactionRecord(const Address & address, unsigned messageNumber); + public: + MessageTransactionMerger(Timer * timer, unsigned incompleteMessageTimeoutMs) + : timer(timer), incompleteMessageTimeoutMs(incompleteMessageTimeoutMs), freeOnNextCall(0) + { + } + + // + // Pushes data into the queue and returns an encoded + // message if an entire message is assembled. + // + EncodedMessage * pushTransaction(MessageHeader * header, Buffer * data); + }; + + class IncomingTransactionManager : virtual public Object + { + public: + class IncomingTransactionManagerEventSink + { + public: + virtual void messagedReceived(IncomingTransactionManager * from, EncodedMessage * message) = 0; + }; + + void mailboxInterrupt(); + + // + // Create a message merger object + // - sink is called whenever a new message is received + // Callback::fired is passed an IncompleteMessage as the data arg. + // + IncomingTransactionManager(Timer * timerInterface, const Address & addressPrefix, IncomingTransactionManagerEventSink * sink); + virtual ~IncomingTransactionManager(); + + protected: + virtual AuxRetry::status readMessageBox(NvU32 offset, NvU8 * data, size_t length) = 0; + virtual size_t getMessageBoxSize() = 0; + virtual size_t getTransactionSize() = 0; + virtual void clearMessageBoxInterrupt() = 0; + private: + MessageTransactionMerger incompleteMessages; // List + + Buffer localWindow; + Timer * timer; + IncomingTransactionManagerEventSink * sink; + Address addressPrefix; // This is the aux address of the downstream port + // This field will be prepended to the address decoded. + }; + + class DownReplyManager : public IncomingTransactionManager + { + public: + DownReplyManager(DPCDHAL * hal, Timer * timer, const Address & addressPrefix, IncomingTransactionManagerEventSink * sink) + : IncomingTransactionManager(timer, addressPrefix, sink), hal(hal) + { + } + virtual ~DownReplyManager() {} + + protected: + DPCDHAL * hal; + + virtual AuxRetry::status readMessageBox(NvU32 offset, NvU8 * data, size_t length); + virtual size_t getMessageBoxSize(); + virtual size_t getTransactionSize(); + virtual void clearMessageBoxInterrupt(); + }; + + class UpRequestManager : public IncomingTransactionManager + { + public: + UpRequestManager(DPCDHAL * hal, Timer * timer, const Address & addressPrefix, IncomingTransactionManagerEventSink * sink) + : IncomingTransactionManager(timer, addressPrefix, sink), hal(hal) + { + } + virtual ~UpRequestManager() {} + protected: + DPCDHAL * hal; + + virtual AuxRetry::status readMessageBox(NvU32 offset, NvU8 * data, size_t length); + virtual size_t getMessageBoxSize(); + virtual size_t getTransactionSize(); + virtual void clearMessageBoxInterrupt(); + }; +} + +#endif //INCLUDED_DP_MERGER_H diff --git a/src/common/displayport/inc/dp_messagecodings.h b/src/common/displayport/inc/dp_messagecodings.h new file mode 100644 index 0000000..3a5c5c8 --- /dev/null +++ b/src/common/displayport/inc/dp_messagecodings.h @@ -0,0 +1,663 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_messagecodings.h * +* Encoding routines for various messages. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_MESSAGECODINGS_H +#define INCLUDED_DP_MESSAGECODINGS_H + +#include "dp_messages.h" +#include "displayport.h" +#include "dp_auxdefs.h" + +/* Fields for the HDCP stream status */ +#define NV_DP_HDCP_STREAM_STATE 1:0 +#define NV_DP_HDCP_STREAM_STATE_NO_EXIST (0x00000000) +#define NV_DP_HDCP_STREAM_STATE_NOT_ACTIVE (0x00000001) +#define NV_DP_HDCP_STREAM_STATE_ACTIVE (0x00000002) +#define NV_DP_HDCP_STREAM_STATE_ERROR (0x00000003) +#define NV_DP_HDCP_STREAM_REPEATER 2:2 +#define NV_DP_HDCP_STREAM_REPEATER_SIMPLE (0x00000000) +#define NV_DP_HDCP_STREAM_REPEATER_REPEATER (0x00000001) +#define NV_DP_HDCP_STREAM_ENCRYPTION 3:3 +#define NV_DP_HDCP_STREAM_ENCRYPTION_OFF (0x00000000) +#define NV_DP_HDCP_STREAM_ENCRYPTION_ON (0x00000001) +#define NV_DP_HDCP_STREAM_AUTHENTICATION 4:4 +#define NV_DP_HDCP_STREAM_AUTHENTICATION_OFF (0x00000000) +#define NV_DP_HDCP_STREAM_AUTHENTICATION_IP (0x00000000) +#define NV_DP_HDCP_STREAM_AUTHENTICATION_ON (0x00000001) +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_LEGACY 8:8 +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_LEGACY_NO (0x00000000) +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_LEGACY_YES (0x00000001) +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_NON_DP1_2_CP 9:9 +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_NON_DP1_2_CP_NO (0x00000000) +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_NON_DP1_2_CP_YES (0x00000001) +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_MULTI 10:10 +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_MULTI_NO (0x00000000) +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_MULTI_YES (0x00000001) +#define NV_DP_HDCP_STREAM_OUTPUT_CP_TYPE_HDCP1X 11:11 +#define NV_DP_HDCP_STREAM_OUTPUT_CP_TYPE_HDCP1X_NO (0x00000000) +#define NV_DP_HDCP_STREAM_OUTPUT_CP_TYPE_HDCP1X_YES (0x00000001) +#define NV_DP_HDCP_STREAM_OUTPUT_CP_TYPE_HDCP2X 12:12 +#define NV_DP_HDCP_STREAM_OUTPUT_CP_TYPE_HDCP2X_NO (0x00000000) +#define NV_DP_HDCP_STREAM_OUTPUT_CP_TYPE_HDCP2X_YES (0x00000001) + +namespace DisplayPort +{ + typedef NakData Message_NakData; + + enum + { + REMOTE_READ_BUFFER_SIZE = 128, + }; + + typedef enum + { + None, + UpstreamSourceOrSSTBranch, + DownstreamBranch, + DownstreamSink, + Dongle + }PeerDevice; + + struct I2cWriteTransaction + { + I2cWriteTransaction(unsigned WriteI2cDeviceId, unsigned NumBytes, + unsigned char * buffer, bool NoStopBit = false, + unsigned I2cTransactionDelay = 0); + I2cWriteTransaction(); + unsigned WriteI2cDeviceId; + unsigned NumBytes; + unsigned char *I2cData; + bool NoStopBit; + unsigned I2cTransactionDelay; + }; + + typedef enum + { + DoesNotExist = 0, + NotActive = 1, + Active = 2, + }StreamState; + + typedef enum + { + CP_IRQ_ON = 0, + No_EVENT = 1 + }StreamEvent; + + typedef enum + { + STREAM_BEHAVIOUR_MASK_OFF = 0, + STREAM_BEHAVIOUR_MASK_ON = 1 + }StreamBehaviorMask; + + typedef enum + { + STREAM_EVENT_MASK_OFF = 0, + STREAM_EVENT_MASK_ON = 1 + }StreamEventMask; + + typedef enum + { + Force_Reauth = 0, + BlockFlow = 1 + }StreamBehavior; + + + typedef enum + { + StreamUnconnected = 0, + NonAuthLegacyDevice = 1, // TV or CRT + Non12CPOrNonQSE = 2, // DVI/HDMI or DP 1.1 sink/repeater + DP_MST = 4 + }OutputSinkType; + + typedef enum + { + HDCP1x = 1, + HDCP2x = 2 + }OutputCPType; + + typedef enum + { + SinkEvent0, + SinkEvent255 = 0xFF + }SinkEvent; + + // + // LINK_ADDRESS 0x1 + // + class LinkAddressMessage : public MessageManager::Message + { + public: + struct Result + { + bool isInputPort; + PeerDevice peerDeviceType; + unsigned portNumber; + bool hasMessaging; + bool dpPlugged; + + bool legacyPlugged; + unsigned dpcdRevisionMajor; + unsigned dpcdRevisionMinor; + GUID peerGUID; + unsigned SDPStreams; + unsigned SDPStreamSinks; + }; + + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + private: + struct + { + GUID guid; // originating branch device + unsigned numberOfPorts; + Result res[16]; + } reply; + + public: + LinkAddressMessage() : Message(NV_DP_SBMSG_REQUEST_ID_LINK_ADDRESS, + NV_DP_SBMSG_PRIORITY_LEVEL_2) + { + dpMemZero(&reply, sizeof(reply)); + } + + // Second stage init kept separate from constructor (reusable message) + void set(const Address & target); + + void getGUID(GUID & guid){guid = reply.guid;} + + // Number of ports described + unsigned resultCount(){return reply.numberOfPorts;} + const Result * result(unsigned index) + { + return &reply.res[index]; + } + }; + + + // + // CONNECTION_STATUS_NOTIFY 0x2 + // + class ConnStatusNotifyMessage : public MessageManager::MessageReceiver + { + public: + typedef struct + { + GUID guid; + unsigned port; + bool legacyPlugged; + bool devicePlugged; + bool messagingCapability; + bool isInputPort; + PeerDevice peerDeviceType; + }Request; + + protected: + Request request; + + public: + Request * getUpRequestData(){ return &request; } + virtual bool processByType(EncodedMessage * message, BitStreamReader * reader); + ConnStatusNotifyMessage(MessageReceiverEventSink * sink); + }; + + // + // GENERIC_UP_REPLY 0xnn + // + class GenericUpReplyMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + virtual void expired(const void * tag) + { } + + public: + GenericUpReplyMessage(const Address & target, unsigned requestId, + bool bReplyIsNack = false, bool bBroadcast = true, + bool bPath = false); + GenericUpReplyMessage(unsigned requestId, bool bReplyIsNack, + bool bBroadcast, bool bPath); + void set(const Address & target, bool bReplyIsNack = false, + bool bBroadcast = true, bool bPath = false); + + }; + + // + // CLEAR_PAYLOAD_ID_TABLE 0x14 + // + class ClearPayloadIdTableMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + virtual ParseResponseStatus parseResponse(EncodedMessage * message); + public: + ClearPayloadIdTableMessage(); + }; + + // + // ENUM_PATH_RESOURCES 0x10 + // + class EnumPathResMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + public: + struct + { + unsigned portNumber; + unsigned availableStreams; + bool bFECCapability; + unsigned TotalPBN; + unsigned FreePBN; + unsigned DFPLinkAvailablePBN; + } reply; + EnumPathResMessage(const Address & target, unsigned port, bool point); + }; + + // + // ALLOCATE_PAYLOAD 0x11 + // + class AllocatePayloadMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + private: + struct + { + unsigned portNumber; + unsigned PBN; + unsigned virtualChannelPayloadId; + }reply; + + public: + + AllocatePayloadMessage() : Message(NV_DP_SBMSG_REQUEST_ID_ALLOCATE_PAYLOAD, + NV_DP_SBMSG_PRIORITY_LEVEL_4) + { + dpMemZero(&reply, sizeof(reply)); + } + + void set(const Address & target, + unsigned port, + unsigned nSDPStreams, + unsigned vcPayloadId, + unsigned PBN, + unsigned* SDPStreamSink, + bool entirePath); + + unsigned replyPortNumber(){return reply.portNumber;} + unsigned replyPBN(){return reply.PBN;} + unsigned replyVirtualChannelPayloadId(){return reply.virtualChannelPayloadId;} + + }; + + // + // QUERY_PAYLOAD 0x12 + // + class QueryPayloadMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + private: + struct + { + unsigned portNumber; + unsigned allocatedPBN; + } reply; + + public: + QueryPayloadMessage(const Address & target, + unsigned port, + unsigned vcPayloadId); + + unsigned replyPortNumber() {return reply.portNumber;} + unsigned replyAllocatedPBN() {return reply.allocatedPBN;} + }; + + // + // RESOURCE_STATUS_NOTIFY 0x13 + // + class ResStatusNotifyMessage : public MessageManager::MessageReceiver + { + virtual bool processByType(EncodedMessage * message, + BitStreamReader * reader); + public: + struct + { + unsigned port; + unsigned availableStreams; + bool bFECCapability; + GUID guid; + unsigned PBN; + } request; + + public: + ResStatusNotifyMessage(MessageReceiverEventSink * sink); + }; + + // + // REMOTE_DPCD_READ 0x20 + // + class RemoteDpcdReadMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + private: + struct + { + unsigned portNumber; + unsigned numBytesReadDPCD; + unsigned char readData[REMOTE_READ_BUFFER_SIZE]; // Buffer + } reply; + + public: + void set(const Address & target, + unsigned port, + unsigned dpcdAddress, + unsigned nBytesToRead); + + RemoteDpcdReadMessage() : Message(NV_DP_SBMSG_REQUEST_ID_REMOTE_DPCD_READ, + NV_DP_SBMSG_PRIORITY_LEVEL_3) + { + dpMemZero(&reply, sizeof(reply)); + } + + + unsigned replyPortNumber(){return reply.portNumber;} + unsigned replyNumOfBytesReadDPCD(){return reply.numBytesReadDPCD;} + + const NvU8 * replyGetData() + { + return reply.readData; + } + }; + + // + // REMOTE_DPCD_WRITE 0x21 + // + class RemoteDpcdWriteMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + public: + void set(const Address & target, + unsigned port, + unsigned dpcdAddress, + unsigned nBytesToWrite, + const NvU8 * writeData); + + RemoteDpcdWriteMessage() : Message(NV_DP_SBMSG_REQUEST_ID_REMOTE_DPCD_WRITE, + NV_DP_SBMSG_PRIORITY_LEVEL_3) {} + }; + + // + // REMOTE_I2C_READ 0x22 + // + class RemoteI2cReadMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + private: + struct + { + unsigned portNumber; + unsigned numBytesReadI2C; + unsigned char readData[REMOTE_READ_BUFFER_SIZE]; + } reply; + + public: + + RemoteI2cReadMessage() : Message(NV_DP_SBMSG_REQUEST_ID_REMOTE_I2C_READ, + NV_DP_SBMSG_PRIORITY_LEVEL_3) + { + dpMemZero(&reply, sizeof(reply)); + } + + void set(const Address & target, + unsigned nWriteTransactions, + unsigned port, + I2cWriteTransaction* transactions, + unsigned readI2cDeviceId, + unsigned nBytesToRead); + + unsigned replyPortNumber(){return reply.portNumber;} + unsigned replyNumOfBytesReadI2C(){return reply.numBytesReadI2C;} + unsigned char* replyGetI2CData(unsigned* numBytes) + { + *numBytes = this->replyNumOfBytesReadI2C(); + return reply.readData; + } + }; + + // + // REMOTE_I2C_WRITE 0x23 + // + class RemoteI2cWriteMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + private: + struct + { + unsigned portNumber; + } reply; + + public: + + RemoteI2cWriteMessage() : Message(NV_DP_SBMSG_REQUEST_ID_REMOTE_I2C_WRITE, + NV_DP_SBMSG_PRIORITY_LEVEL_3) + { + dpMemZero(&reply, sizeof(reply)); + } + + void set(const Address & target, + unsigned port, + unsigned writeI2cDeviceId, + unsigned nBytesToWrite, + unsigned char* writeData); + + unsigned replyPortNumber() {return reply.portNumber;} + }; + + // + // POWER_UP_PHY 0x24 + // + class PowerUpPhyMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + private: + struct + { + unsigned portNumber; + } reply; + + public: + PowerUpPhyMessage() : Message(NV_DP_SBMSG_REQUEST_ID_POWER_UP_PHY, + NV_DP_SBMSG_PRIORITY_LEVEL_3) + { + dpMemZero(&reply, sizeof(reply)); + } + + void set(const Address & target, + unsigned port, + bool entirePath); + + unsigned replyPortNumber(){return reply.portNumber;} + }; + + // + // POWER_DOWN_PHY 0x25 + // + class PowerDownPhyMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + private: + struct + { + unsigned portNumber; + } reply; + + public: + PowerDownPhyMessage() : Message(NV_DP_SBMSG_REQUEST_ID_POWER_DOWN_PHY, + NV_DP_SBMSG_PRIORITY_LEVEL_3) + { + dpMemZero(&reply, sizeof(reply)); + } + + void set(const Address & target, + unsigned port, + bool entirePath); + + unsigned replyPortNumber(){return reply.portNumber;} + }; + + // + // SINK_EVENT_NOTIFY 0x30 + // + class SinkEventNotifyMessage : public MessageManager::MessageReceiver + { + virtual bool processByType(EncodedMessage * message, BitStreamReader * reader); + + public: + SinkEventNotifyMessage(MessageReceiverEventSink * sink, unsigned requestId); + }; + + // + // QUERY_STREAM_ENCRYPTION_STATUS 0x38 + // + class QueryStreamEncryptionMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + private: + struct QSES_REPLY + { + StreamState streamState; + bool repeaterFuncPresent; + bool encryption; + bool authentication; + OutputSinkType sinkType; + OutputCPType cpType; + bool signedLPrime; + NvU8 streamId; + } reply; + + bool bIsHdcp22Qse; + + public: + QueryStreamEncryptionMessage() : + Message(NV_DP_SBMSG_REQUEST_ID_QUERY_STREAM_ENCRYPTION_STATUS, + NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT) + { + dpMemZero(&reply, sizeof(reply)); + bIsHdcp22Qse = false; + } + + void set(const Address & target, + unsigned streamId, + NvU8* clientId, + StreamEvent streamEvent, + bool streamEventMask, + StreamBehavior streamBehavior, + bool streamBehaviorMask); + NvU8 getStreamId() + { + return reply.streamId; + } + + void getReply(void *p) + { + *(struct QSES_REPLY *)p = reply; + } + + NvU16 getStreamStatus() + { + NvU16 streamStatus = 0; + + streamStatus = (NvU16)reply.streamState; + + if (reply.repeaterFuncPresent) + streamStatus |= 1 << (1 ? NV_DP_HDCP_STREAM_REPEATER); + if (reply.encryption) + streamStatus |= 1 << (1 ? NV_DP_HDCP_STREAM_ENCRYPTION); + if (reply.authentication) + streamStatus |= 1 << (1 ? NV_DP_HDCP_STREAM_AUTHENTICATION); + + if (reply.sinkType != StreamUnconnected) + { + if (reply.sinkType & DP_MST) + { + streamStatus |= 1 << (1 ? NV_DP_HDCP_STREAM_OUTPUT_SINK_MULTI); + } + + if (reply.sinkType & Non12CPOrNonQSE) + { + streamStatus |= 1 << (1 ? NV_DP_HDCP_STREAM_OUTPUT_SINK_NON_DP1_2_CP); + } + + if (reply.sinkType & NonAuthLegacyDevice) + { + streamStatus |= 1 << (1 ? NV_DP_HDCP_STREAM_OUTPUT_SINK_LEGACY); + } + } + + if (reply.cpType == HDCP1x) + { + streamStatus |= 1 << (1 ? NV_DP_HDCP_STREAM_OUTPUT_CP_TYPE_HDCP1X); + } + else if (reply.cpType == HDCP2x) + { + streamStatus |= 1 << (1 ? NV_DP_HDCP_STREAM_OUTPUT_CP_TYPE_HDCP2X); + } + + return streamStatus; + + } + + void setHdcp22Qse(bool bHdcp22Qse) + { + bIsHdcp22Qse = bHdcp22Qse; + } + }; + +} + +#endif //INCLUDED_DP_MESSAGECODINGS_H diff --git a/src/common/displayport/inc/dp_messageheader.h b/src/common/displayport/inc/dp_messageheader.h new file mode 100644 index 0000000..3d09e6d --- /dev/null +++ b/src/common/displayport/inc/dp_messageheader.h @@ -0,0 +1,94 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_messageheader.h * +* DP message header parser * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_MESSAGEHEADER_H +#define INCLUDED_DP_MESSAGEHEADER_H + +#include "dp_internal.h" +#include "dp_list.h" +#include "dp_auxretry.h" +#include "dp_timer.h" +#include "dp_bitstream.h" +#include "dp_address.h" + +namespace DisplayPort +{ + // + // User filled message structure + // + #define MAX_MESSAGE_SIZE 64 + struct EncodedMessage : public Object + { + unsigned messageNumber; // 0 or 1 + Address address; // target device for message (source for reply) + Buffer buffer; + bool isBroadcast; + bool isPathMessage; + + EncodedMessage() + : messageNumber(0), isBroadcast(false), isPathMessage(false) + {} + + void swap(EncodedMessage & other) + { + swap_args(messageNumber, other.messageNumber); + swap_args(address, other.address); + swap_args(isBroadcast, other.isBroadcast); + swap_args(isPathMessage, other.isPathMessage); + buffer.swap(other.buffer); + } + }; + + // + // Decoded message header + // + struct MessageHeader + { + Address address; + unsigned messageNumber; + unsigned payloadBytes; + bool isBroadcast; + bool isPathMessage; + bool isTransactionStart; + bool isTransactionEnd; + unsigned headerSizeBits; + }; + + bool decodeHeader(BitStreamReader * reader, MessageHeader * header, const Address & address); + + // + // Routines for maintaining a list of partially complete messages + // + + // after 4 secs delete dead transactions + #define DP_INCOMPLETE_MESSAGE_TIMEOUT_USEC 4000000 + +} +#endif //INCLUDED_DP_MESSAGEHEADER_H diff --git a/src/common/displayport/inc/dp_messages.h b/src/common/displayport/inc/dp_messages.h new file mode 100644 index 0000000..8e344dd --- /dev/null +++ b/src/common/displayport/inc/dp_messages.h @@ -0,0 +1,376 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_messages.h * +* Encoding routines for aux common messages. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_MESSAGES_H +#define INCLUDED_DP_MESSAGES_H + +#include "dp_address.h" +#include "dp_bitstream.h" +#include "dp_splitter.h" +#include "dp_merger.h" +#include "dp_crc.h" +#include "dp_list.h" +#include "dp_connector.h" +#include "dp_messageheader.h" +#include "dp_auxdefs.h" + +namespace DisplayPort +{ + bool extractGUID(BitStreamReader * reader, GUID * guid); + + typedef enum + { + NakUndefined, + NakWriteFailure, + NakInvalidRAD, + NakCrcFailure, + NakBadParam, + NakDefer, + NakLinkFailure, + NakNoResources, + NakDpcdFail, + NakI2cNak, + NakAllocateFail, + + // Extensions + NakTimeout = 0x100 // Message was unable to be transmitted + + } NakReason; + + typedef struct + { + GUID guid; + NakReason reason; + unsigned nak_data; + } NakData; + + typedef enum + { + ParseResponseSuccess, + ParseResponseFailed, + ParseResponseWrong + } ParseResponseStatus; + + // + // Priority levels are defined to prioritize SBMs for DP1.4 (Highest Priority - LEVEL1, Lowest Priority - DEFAULT) + // Current implementation has the following priority levels + // CLEAR_PAYLOAD_ID_TABLE = NV_DP_SBMSG_PRIORITY_LEVEL_1 + // LINK_ADDRESS = NV_DP_SBMSG_PRIORITY_LEVEL_2 + // REMOTE_DPCD_READ, REMOTE_DPCD_WRITE = NV_DP_SBMSG_PRIORITY_LEVEL_3 + // REMOTE_I2C_READ, REMOTE_I2C_WRITE = NV_DP_SBMSG_PRIORITY_LEVEL_3 + // POWER_UP_PHY, POWER_DOWN_PHY = NV_DP_SBMSG_PRIORITY_LEVEL_3 + // ENUM_PATH_RESOURCES, ALLOCATE_PAYLOAD = NV_DP_SBMSG_PRIORITY_LEVEL_4 + // All other messages = NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT + // + // However, Message::setMessagePriority can be used to override this priority levels, if required. + // + typedef enum + { + NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT, + NV_DP_SBMSG_PRIORITY_LEVEL_4, + NV_DP_SBMSG_PRIORITY_LEVEL_3, + NV_DP_SBMSG_PRIORITY_LEVEL_2, + NV_DP_SBMSG_PRIORITY_LEVEL_1, + } DPSideBandMessagePriority; + + // + // CLASS: MessageManager + // + class MessageManager : + virtual public Object, + IncomingTransactionManager::IncomingTransactionManagerEventSink + { + + Timer * timer; + DPCDHAL * hal; + DownRequestManager splitterDownRequest; + UpReplyManager splitterUpReply; + UpRequestManager mergerUpRequest; + DownReplyManager mergerDownReply; + bool isBeingDestroyed; + bool isPaused; + + List messageReceivers; + List notYetSentDownRequest; // Down Messages yet to be processed + List notYetSentUpReply; // Up Reply Messages yet to be processed + List awaitingReplyDownRequest; // Transmitted, Split, but not yet replied to + + void onUpRequestReceived(bool status, EncodedMessage * message); + void onDownReplyReceived(bool status, EncodedMessage * message); + void transmitAwaitingDownRequests(); + void transmitAwaitingUpReplies(); + + // IncomingTransactionManager + void messagedReceived(IncomingTransactionManager * from, EncodedMessage * message); + + public: + class Message; + void cancelAllByType(unsigned type); + void cancelAll(Message * message); + + void pause() + { + isPaused = true; + } + + void clearPendingMsg() + { + hal->clearPendingMsg(); + } + void IRQUpReqest() + { + mergerUpRequest.mailboxInterrupt(); + } + + void IRQDownReply() + { + mergerDownReply.mailboxInterrupt(); + } + + void clearNotYetSentQSEDownRequest() + { + for (ListElement * i = notYetSentDownRequest.begin(); i!=notYetSentDownRequest.end(); ) + { + Message * m = (Message *)i; + i = i->next; // Do this first since we may unlink the current node + + if (m->requestIdentifier == + NV_DP_SBMSG_REQUEST_ID_QUERY_STREAM_ENCRYPTION_STATUS) + { + notYetSentDownRequest.remove(m); + m->parent = 0; + } + } + } + + bool isAnyAwaitingQSEReplyDownRequest() + { + bool bQSEAwaiting = false; + + for (ListElement * i = awaitingReplyDownRequest.begin(); i!=awaitingReplyDownRequest.end(); ) + { + Message * m = (Message *)i; + i = i->next; // Do this first since we may unlink the current node + + if (m->requestIdentifier == + NV_DP_SBMSG_REQUEST_ID_QUERY_STREAM_ENCRYPTION_STATUS) + { + // We break because there could be only one outstanding QSE message at any time. + bQSEAwaiting = true; + break; + } + } + return bQSEAwaiting; + } + + void clearAwaitingQSEReplyDownRequest() + { + for (ListElement * i = awaitingReplyDownRequest.begin(); i!=awaitingReplyDownRequest.end(); ) + { + Message * m = (Message *)i; + i = i->next; // Do this first since we may unlink the current node + + if (m->requestIdentifier == + NV_DP_SBMSG_REQUEST_ID_QUERY_STREAM_ENCRYPTION_STATUS) + { + awaitingReplyDownRequest.remove(m); + m->parent = 0; + break; + } + } + } + MessageManager(DPCDHAL * hal, Timer * timer) + : timer(timer), hal(hal), + splitterDownRequest(hal, timer), + splitterUpReply(hal, timer), + mergerUpRequest(hal, timer, Address(0), this), + mergerDownReply(hal, timer, Address(0), this), + isBeingDestroyed(false), isPaused(false) + { + } + + // + // CLASS: MessageReceiver + // + class MessageReceiver : public ListElement, OutgoingTransactionManager::OutgoingTransactionManagerEventSink + { + public: + class MessageReceiverEventSink + { + public: + virtual void messageProcessed(MessageReceiver * from) = 0; + }; + + // Returns false if the message should be passed to the next receiver + virtual bool process(EncodedMessage * message); + + // per message type should implement this + virtual bool processByType(EncodedMessage * message, BitStreamReader * reader) = 0; + + unsigned getRequestId() {return requestId;} + Address & getAddress() {return address;} + + MessageReceiver(MessageReceiverEventSink* sink, unsigned requestId) + : sink(sink), + requestId(requestId), + bProcessed(true), + address(0) // 0 to start with + {} + + virtual void splitterFailed(OutgoingTransactionManager * from) + { + DP_ASSERT(0 && "why did we send a reply"); + } + + virtual void splitterTransmitted(OutgoingTransactionManager * from) + { + DP_ASSERT(0 && "why did we send a reply"); + } + + protected: + MessageReceiverEventSink * sink; + unsigned requestId; + bool bProcessed; + Address address; + MessageManager * parent; + + }; + + // + // CLASS: Message + // + class Message : public ListElement, + OutgoingTransactionManager::OutgoingTransactionManagerEventSink, + Timer::TimerCallback /* countdown timer for reply */ + { + public: + class MessageEventSink + { + public: + virtual void messageFailed(Message * from, NakData * nakData) = 0; + virtual void messageCompleted(Message * from) = 0; + }; + unsigned getMsgType() {return requestIdentifier;} + unsigned getSinkPort() {return sinkPort;} + protected: + // Encoded message body (set in dp_messagecodings) + // this data structure is invalidated on post + // as the data gets swapped into the transmit buffer. + EncodedMessage encodedMessage; + MessageEventSink * sink; + + MessageManager * parent; + bool transmitReply; + bool bTransmitted; + bool bBusyWaiting; + unsigned requestIdentifier; + unsigned messagePriority; + unsigned sinkPort; + + // State updated by post operation + struct { + unsigned messageNumber; + Address target; + } state; + + virtual ParseResponseStatus parseResponseAck( + EncodedMessage * message, BitStreamReader * reader) = 0; + virtual ParseResponseStatus parseResponse(EncodedMessage * message); + virtual void splitterFailed(OutgoingTransactionManager * from); + virtual void expired(const void * tag); + virtual void splitterTransmitted(OutgoingTransactionManager * from); + + public: + friend class MessageManager; + + Message(int requestIdentifier, int messagePriority) + : sink(0), + parent(0), + transmitReply(false), + bTransmitted(false), + bBusyWaiting(false), + requestIdentifier(requestIdentifier), + messagePriority(messagePriority), + sinkPort(0xFF) + { + } + + void clear() + { + if (parent) { + parent->timer->cancelCallbacks(this); + parent->splitterDownRequest.cancel(this); + } + + parent = 0; + List::remove(this); + encodedMessage.buffer.reset(); + } + + // This function can be used to override the already set priority of the message from it's constructor. + void setMessagePriority(DPSideBandMessagePriority priorityLevel) + { + this->messagePriority = priorityLevel; + return; + } + + protected: + ~Message() + { + clear(); + } + }; + + // + // Register new receiver for unpair messages + // (eg. broadcast messages or sink->source messages) + // + void registerReceiver(MessageReceiver * receiver); + + // Post a message to be asynchronously transmitted + void post(Message * message, Message::MessageEventSink * sink, bool isReply = false); + void postReply(Message * message, Message::MessageEventSink * sink); + void cancel(Message * message); + + bool send(Message * message, NakData & nakData); + friend class Message; + ~MessageManager(); + }; + struct GenericMessageCompletion : public MessageManager::Message::MessageEventSink + { + bool failed; + bool completed; + NakData nakData; + GenericMessageCompletion(); + void messageFailed(MessageManager::Message * from, NakData * data); + void messageCompleted(MessageManager::Message * from); + }; +} + +#endif //INCLUDED_DP_MESSAGES_H diff --git a/src/common/displayport/inc/dp_object.h b/src/common/displayport/inc/dp_object.h new file mode 100644 index 0000000..9bb02e8 --- /dev/null +++ b/src/common/displayport/inc/dp_object.h @@ -0,0 +1,132 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_object.h * +* This is the object from which all other dynamically-allocated objects * +* must inherit. * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_OBJECT_H +#define INCLUDED_DP_OBJECT_H + +#include "nvtypes.h" +#include "stddef.h" +#include "dp_hostimp.h" + +static inline void dpMemCopy(void * target, const void * source, size_t len) +{ + NvU8 * t = (NvU8 *)target; + const NvU8 * s = (const NvU8 *)source; + + while (len--) + *t++=*s++; +} + +static inline void dpMemZero(void * target, size_t len) +{ + NvU8 * t = (NvU8 *)target; + + while (len--) + *t++=0; +} + +static inline bool dpMemCmp(void *pvBuf1, void *pvBuf2, size_t size) +{ + NvU8 *pBuf1 = (NvU8 *)pvBuf1; + NvU8 *pBuf2 = (NvU8 *)pvBuf2; + + if(!pBuf1 || !pBuf2 || !size) + return false; + + do + { + if(*pBuf1++ == *pBuf2++) + continue; + else + break; + }while(--size); + + if(!size) + return true; + else + return false; +} + +namespace DisplayPort +{ + // + // Any object allocated through "new" must virtually inherit from this type. + // This guarantees that the memory allocation goes through dpMalloc/dpFree. + // Leak detection is implemented only on allocations of this type. Data + // structures may assume 0 initialization if allocated off the heap. + // + // You must use virtual inheritance because objects that inherit from + // multiple Object-derived classes would otherwise cause ambiguity when + // someone tries to use new or delete on them. + // + struct Object + { + virtual ~Object() {} + + void *operator new(size_t sz) + { + void * block = dpMalloc(sz); + if (block) + { + dpMemZero(block, sz); + } + return block; + } + + void *operator new[](size_t sz) + { + void * block = dpMalloc(sz); + if (block) + { + dpMemZero(block, sz); + } + return block; + } + + void operator delete(void * ptr) + { + if (ptr) + { + dpFree(ptr); + } + } + + void operator delete[](void * ptr) + { + if (ptr) + { + dpFree(ptr); + } + } + }; +} + +#endif // INCLUDED_DP_OBJECT_H diff --git a/src/common/displayport/inc/dp_printf.h b/src/common/displayport/inc/dp_printf.h new file mode 100644 index 0000000..be39a57 --- /dev/null +++ b/src/common/displayport/inc/dp_printf.h @@ -0,0 +1,52 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_printf.h * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_PRINTF_H +#define INCLUDED_DP_PRINTF_H + +#define DPLIB_LOG_BUFFER_SIZE 512 + +typedef enum +{ + DP_SILENT, + DP_INFO, + DP_NOTICE, + DP_WARNING, + DP_ERROR, + DP_HW_ERROR, + DP_FATAL, +} DP_LOG_LEVEL; + +#if defined(_DEBUG) || defined(DEBUG) +#define DP_PRINTF(severity, format, ...) dpPrintf(severity, format, ##__VA_ARGS__) +#else +#define DP_PRINTF(severity, format, ...) +#endif // _DEBUG || DEBUG + +#endif // INCLUDED_DP_PRINTF_H diff --git a/src/common/displayport/inc/dp_qse.h b/src/common/displayport/inc/dp_qse.h new file mode 100644 index 0000000..b85de4e --- /dev/null +++ b/src/common/displayport/inc/dp_qse.h @@ -0,0 +1,109 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_qse.h * +* Class definition for HDCP Query Stream Encryption and relative reading.* +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_QSE_H +#define INCLUDED_DP_QSE_H + +#include "dp_messagecodings.h" +#include "dp_auxdefs.h" + +namespace DisplayPort +{ +#define CLIENT_ID_SIZE 7 + + + struct CLIENTID + { + NvU8 data[CLIENT_ID_SIZE]; + + CLIENTID() + { + dpMemZero(&data, sizeof(data)); + } + }; + + enum QSE_REASON + { + qseReason_Generic, + qseReason_Ssc + }; + + class QSENonceGenerator: public Object + { + NvU32 previousRandomLSB; + NvU32 previousRandomMSB; + // + // Linear congruential random number generator + // Seed values chosen from numerical methods + // + NvU64 random(); + + public: + QSENonceGenerator():previousRandomLSB(0),previousRandomMSB(0) + {} + + void clientIdBuilder(NvU64 aN); + // For every clientId generation we need to call makeClientId + void makeClientId(CLIENTID & clientId); + }; + + struct GroupImpl; + struct ConnectorImpl; + + class StreamEncryptionStatusDetection : public Object, public MessageManager::Message::MessageEventSink, Timer::TimerCallback + { + GroupImpl * parent; + ConnectorImpl * connector; + QueryStreamEncryptionMessage qseMessage; + unsigned retriesSendQSEMessage; + QSE_REASON reason; + bool bIsHdcp22Qse; + bool bIsRepeater; + + public: + StreamEncryptionStatusDetection(GroupImpl * parent, ConnectorImpl * connector): + parent(parent), connector(connector), retriesSendQSEMessage(0), bIsHdcp22Qse(false), bIsRepeater(false) + {} + + ~StreamEncryptionStatusDetection(); + + void sendQSEMessage(GroupImpl * group, QSE_REASON reasonId = qseReason_Generic); + void handleQSEDownReply(); + void messageFailed(MessageManager::Message * from, NakData * nakData); + void messageCompleted(MessageManager::Message * from); + void expired(const void * tag); + bool handleQSEReplyValidation(); + void resetQseMessageState(); + void setHdcp22Qse(bool bHdcp22Qse); + }; + + struct DeviceImpl; +} + +#endif // INCLUDED_DP_QSE_H diff --git a/src/common/displayport/inc/dp_regkeydatabase.h b/src/common/displayport/inc/dp_regkeydatabase.h new file mode 100644 index 0000000..4b4cd77 --- /dev/null +++ b/src/common/displayport/inc/dp_regkeydatabase.h @@ -0,0 +1,134 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_regkeydatabase.h * +* Definition of the DP_REGKEY_DATABASE * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_REGKEYDATABASE_H +#define INCLUDED_DP_REGKEYDATABASE_H + +#include "dp_auxdefs.h" + +// Regkey Names +#define NV_DP_REGKEY_DISABLE_QSES "DISABLE_QSES" +#define NV_DP_REGKEY_OVERRIDE_DPCD_REV "OVERRIDE_DPCD_REV" +#define NV_DP_REGKEY_DISABLE_SSC "DISABLE_SSC" // SSC (Stream Status Changed) +#define NV_DP_REGKEY_ENABLE_FAST_LINK_TRAINING "ENABLE_FAST_LINK_TRAINING" +#define NV_DP_REGKEY_DISABLE_MST "DISABLE_MST" +#define NV_DP_REGKEY_ENABLE_INBAND_STEREO_SIGNALING "ENABLE_INBAND_STEREO_SIGNALING" +#define NV_DP_REGKEY_SKIP_POWEROFF_EDP_IN_HEAD_DETACH "SKIP_POWEROFF_EDP_IN_HEAD_DETACH" +#define NV_DP_REGKEY_ENABLE_OCA_LOGGING "ENABLE_OCA_LOGGING" +#define NV_DP_REGKEY_REPORT_DEVICE_LOST_BEFORE_NEW "HP_WAR_1707690" +#define NV_DP_REGKEY_APPLY_LINK_BW_OVERRIDE_WAR "APPLY_LINK_BW_OVERRIDE_WAR" +// For DP2x, the regkey value needs to be in 10M convention +#define NV_DP_REGKEY_APPLY_MAX_LINK_RATE_OVERRIDES "APPLY_OVERRIDES_FOR_BUG_2489143" +#define NV_DP_REGKEY_DISABLE_DSC "DISABLE_DSC" +#define NV_DP_REGKEY_SKIP_ASSESSLINK_FOR_EDP "HP_WAR_2189772" +#define NV_DP_REGKEY_HDCP_AUTH_ONLY_ON_DEMAND "DP_HDCP_AUTH_ONLY_ON_DEMAND" +#define NV_DP_REGKEY_ENABLE_MSA_OVER_MST "ENABLE_MSA_OVER_MST" +#define NV_DP_REGKEY_DISABLE_DOWNSPREAD "DISABLE_DOWNSPREAD" + +// Keep link alive for SST and MST +#define NV_DP_REGKEY_KEEP_OPT_LINK_ALIVE "DP_KEEP_OPT_LINK_ALIVE" +// Keep link alive when connector is in MST +#define NV_DP_REGKEY_KEEP_OPT_LINK_ALIVE_MST "DP_KEEP_OPT_LINK_ALIVE_MST" +// Keep link alive when connector is in SST +#define NV_DP_REGKEY_KEEP_OPT_LINK_ALIVE_SST "DP_KEEP_OPT_LINK_ALIVE_SST" + +#define NV_DP_REGKEY_FORCE_EDP_ILR "DP_BYPASS_EDP_ILR_REV_CHECK" + +// Message to power down video stream before power down link (set D3) +#define NV_DP_REGKEY_POWER_DOWN_PHY "DP_POWER_DOWN_PHY" + +// +// DSC capability of downstream device should be decided based on device's own +// and its parent's DSC capability. +// +#define NV_DP_DSC_MST_CAP_BUG_3143315 "DP_DSC_MST_CAP_BUG_3143315" + +#define NV_DP2X_REGKEY_DISABLE_EFF_BPP_SST_8b10b "DP2X_REGKEY_DISABLE_EFF_BPP_SST_8b10b" + +// +// Bug 4388987 : This regkey will disable reading PCON caps for MST. +// +#define NV_DP_REGKEY_MST_PCON_CAPS_READ_DISABLED "DP_BUG_4388987_WAR" +#define NV_DP_REGKEY_DISABLE_TUNNEL_BW_ALLOCATION "DP_DISABLE_TUNNEL_BW_ALLOCATION" + +#define NV_DP_REGKEY_DISABLE_AVOID_HBR3_WAR "DP_DISABLE_AVOID_HBR3_WAR" + +// Bug 4793112 : On eDP panel, do not cache source OUI if it reads zero +#define NV_DP_REGKEY_SKIP_ZERO_OUI_CACHE "DP_SKIP_ZERO_OUI_CACHE" + +#define NV_DP_REGKEY_ENABLE_FIX_FOR_5147205 "DP_ENABLE_5147205_FIX" +// Bug 5088957 : Force head shutdown in DpLib +#define NV_DP_REGKEY_FORCE_HEAD_SHUTDOWN "DP_WAR_5088957" + +// +// Data Base used to store all the regkey values. +// The actual data base is declared statically in dp_evoadapter.cpp. +// All entries set to 0 before initialized by the first EvoMainLink constructor. +// The first EvoMainLink constructor will populate that data base. +// Later EvoMainLink will use values from that data base. +// +struct DP_REGKEY_DATABASE +{ + bool bInitialized; // set to true after the first EvoMainLink instance is constructed + // Below are regkey values + bool bQsesDisabled; + NvU32 dpcdRevOveride; + bool bSscDisabled; + bool bFastLinkTrainingEnabled; + bool bMstDisabled; + bool bInbandStereoSignalingEnabled; + bool bPoweroffEdpInHeadDetachSkipped; + bool bOcaLoggingEnabled; + bool bReportDeviceLostBeforeNew; + bool bLinkBwOverrideWarApplied; + NvU32 applyMaxLinkRateOverrides; + bool bDscDisabled; + bool bAssesslinkForEdpSkipped; + bool bHdcpAuthOnlyOnDemand; + bool bMsaOverMstEnabled; + bool bOptLinkKeptAlive; + bool bOptLinkKeptAliveMst; + bool bOptLinkKeptAliveSst; + bool bBypassEDPRevCheck; + bool bDscMstCapBug3143315; + bool bPowerDownPhyBeforeD3; + bool bMSTPCONCapsReadDisabled; + bool bForceDisableTunnelBwAllocation; + bool bDownspreadDisabled; + bool bDisableAvoidHBR3War; + bool bCableVconnSourceUnknownWar; + bool bSkipZeroOuiCache; + bool bEnable5147205Fix; + bool bForceHeadShutdown; +}; + +extern struct DP_REGKEY_DATABASE dpRegkeyDatabase; + +#endif //INCLUDED_DP_REGKEYDATABASE_H diff --git a/src/common/displayport/inc/dp_ringbuffer.h b/src/common/displayport/inc/dp_ringbuffer.h new file mode 100644 index 0000000..67fa9e0 --- /dev/null +++ b/src/common/displayport/inc/dp_ringbuffer.h @@ -0,0 +1,33 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include "dp_object.h" + +#define addToRingBufferCollection(x) {} +#define addDpLogRecord(x, ...) {} +#define addDpAssertRecord() {} +#define queryDpLogRecords(a, b, c) {} +#define resetDpAssertRingBuffer() {} + diff --git a/src/common/displayport/inc/dp_splitter.h b/src/common/displayport/inc/dp_splitter.h new file mode 100644 index 0000000..f632cf5 --- /dev/null +++ b/src/common/displayport/inc/dp_splitter.h @@ -0,0 +1,159 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_splitter.h * +* Asynchronous Message splitter * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_SPLITTER_H +#define INCLUDED_DP_SPLITTER_H + +#include "dp_list.h" +#include "dp_auxretry.h" +#include "dp_timer.h" +#include "dp_auxdefs.h" +#include "dp_messageheader.h" + +namespace DisplayPort +{ + + struct EncodedMessage; + class DPCDHAL; + + class MessageTransactionSplitter + { + // + // If set we've pulled an item out of the downQueue queue. + // One or more transactions have been sent as a result + // messageOutstanding->messageOffset show how far into + // the message we are. + // + EncodedMessage * messageOutstanding; + unsigned assemblyTransmitted; + public: + void set(EncodedMessage * messageOutstanding) + { + this->messageOutstanding = messageOutstanding; + assemblyTransmitted = 0; + } + + // + // Encode the next transaction. + // returns false if there are no more transactions + // + bool get(Buffer & assemblyBuffer); + + MessageTransactionSplitter() : messageOutstanding(0), assemblyTransmitted(0) + {} + }; + + class OutgoingTransactionManager: + virtual public Object, + private Timer::TimerCallback + { + public: + class OutgoingTransactionManagerEventSink + { + public: + virtual void splitterFailed(OutgoingTransactionManager * from) = 0; // Sink DEFER the writes + virtual void splitterTransmitted(OutgoingTransactionManager * from) = 0; // message was sent (may NACK later) + }; + + // Send the encoded message. This call is destructive to the EncodedMessage + // passed in + bool send( EncodedMessage & payload, OutgoingTransactionManagerEventSink * sink); + + OutgoingTransactionManager(Timer * timer); + virtual ~OutgoingTransactionManager() { timer->cancelCallbacks(this); } + + // Do not make any calls to the event sink + void cancel(OutgoingTransactionManagerEventSink * sink); + + protected: + virtual AuxRetry::status writeMessageBox(NvU8 * data, size_t length) = 0; + virtual size_t getMessageBoxSize() = 0; + private: + void writeToWindow( bool firstAttempt); + void split(); + void expired(const void * tag); // timer callback + + unsigned retriesLeft; + + Buffer assemblyBuffer; + MessageTransactionSplitter transactionSplitter; + + // + // List of outgoing messages + // + struct OutgoingMessage : ListElement + { + OutgoingTransactionManagerEventSink* eventSink; + EncodedMessage message; + }; + + List queuedMessages; + + // + // Message currently assembled in transactionSplitter + // (if any) + // + OutgoingMessage * activeMessage; + Timer * timer; + }; + + + class DownRequestManager : public OutgoingTransactionManager + { + public: + DownRequestManager(DPCDHAL * hal, Timer * timer) + : OutgoingTransactionManager(timer), hal(hal) + { + } + + virtual ~DownRequestManager() {} + protected: + DPCDHAL * hal; + + virtual AuxRetry::status writeMessageBox(NvU8 * data, size_t length); + virtual size_t getMessageBoxSize(); + }; + + class UpReplyManager : public OutgoingTransactionManager + { + public: + UpReplyManager(DPCDHAL * hal, Timer * timer) + : OutgoingTransactionManager(timer), hal(hal) + { + } + virtual ~UpReplyManager() {} + protected: + DPCDHAL * hal; + + virtual AuxRetry::status writeMessageBox(NvU8 * data, size_t length); + virtual size_t getMessageBoxSize(); + }; +} + +#endif //INCLUDED_DP_SPLITTER_H diff --git a/src/common/displayport/inc/dp_timeout.h b/src/common/displayport/inc/dp_timeout.h new file mode 100644 index 0000000..35f07ab --- /dev/null +++ b/src/common/displayport/inc/dp_timeout.h @@ -0,0 +1,74 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_timeout.h * +* Local timeout management * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_TIMEOUT_H +#define INCLUDED_DP_TIMEOUT_H + +#include "dp_timer.h" + +namespace DisplayPort +{ + // + // Timeout management + // + class Timeout : virtual public Object + { + Timer * timer; + NvU64 timeoutTime; // What time to trigger the timeout at + + public: + + Timeout(Timer * _timer, int timeoutMilliseconds) + : timer(_timer), timeoutTime(_timer->getTimeUs() + timeoutMilliseconds*1000 + 1 /* counter could be about to roll */) + { + } + + NvS64 remainingUs() + { + NvS64 remaining = (NvS64)(timeoutTime - timer->getTimeUs()); + + // Rollover check + if (remaining < 0) + { + remaining = 0; + } + + DP_ASSERT(remaining < ((NvS64)1000000*3600) && "Timeout remaining over an hour"); + + return remaining; + } + + bool valid() + { + return remainingUs() > 0; + } + }; +} + +#endif //INCLUDED_DP_TIMEOUT_H diff --git a/src/common/displayport/inc/dp_timer.h b/src/common/displayport/inc/dp_timer.h new file mode 100644 index 0000000..bc27da9 --- /dev/null +++ b/src/common/displayport/inc/dp_timer.h @@ -0,0 +1,103 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_timer.h * +* Local timer interface * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_TIMER_H +#define INCLUDED_DP_TIMER_H + +#include "dp_list.h" + +namespace DisplayPort +{ + // + // RawTimer + // This API is expected to be implemented by the + // library client. + // + class RawTimer : virtual public Object + { + public: + struct Callback : virtual public Object + { + virtual void expired() = 0; + }; + virtual void queueCallback(Callback * callback, int milliseconds) = 0; + virtual NvU64 getTimeUs() = 0; + virtual void sleep(int milliseconds) = 0; + }; + + + // + // Timer + // + class Timer : public RawTimer::Callback + { + public: + struct TimerCallback + { + virtual void expired(const void * context) = 0; + }; + + private: + RawTimer * raw; + List pending; + struct PendingCallback : ListElement + { + TimerCallback * target; + const void * context; + NvU64 timestamp; // in usec + bool executeInSleep; + + }; + + virtual void expired(); + unsigned fire(bool fromSleep); + + void _pump(unsigned milliseconds, bool fromSleep); + public: + Timer(RawTimer * raw) : raw(raw) {} + virtual ~Timer() {} + + // + // Queue a timer callback. + // Unless the dont-execute-in-sleep flag is + // + void queueCallback(Timer::TimerCallback * target, const void * context, unsigned milliseconds, bool executeInSleep = true); + NvU64 getTimeUs(); + void sleep(unsigned milliseconds); + void cancelCallbacks(Timer::TimerCallback * to); + + void cancelCallback(Timer::TimerCallback * to, const void * context); + void queueCallbackInOrder(Timer::TimerCallback * target, const void * context, unsigned milliseconds, bool executeInSleep); + void cancelCallbacksWithoutContext(const void * context); + void cancelAllCallbacks(); + bool checkCallbacksOfSameContext(const void * context); + }; +} + +#endif //INCLUDED_DP_TIMER_H diff --git a/src/common/displayport/inc/dp_tracing.h b/src/common/displayport/inc/dp_tracing.h new file mode 100644 index 0000000..993320f --- /dev/null +++ b/src/common/displayport/inc/dp_tracing.h @@ -0,0 +1,128 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + /******************************* DisplayPort ******************************\ +* * +* Module: dp_tracing.h * +* Header file for support of tracing, implemented by a host provider * +* Because this is platform-agnostic, the tracing API * +* is left up to the host interface. * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_TRACING_H +#define INCLUDED_DP_TRACING_H + +#include "nvtypes.h" + +typedef enum NV_DP_TRACING_EVENT +{ + TRACE_DP_ID_HOTPLUG, + TRACE_DP_ID_NEW_SINK_DETECTED, + TRACE_DP_ID_NEW_SINK_REPORTED, + TRACE_DP_ID_NEW_MST_DEVICE, + TRACE_DP_ID_LOST_DEVICE, + TRACE_DP_ID_LINK_ASSESSMENT, + TRACE_DP_ID_LINK_TRAINING_START, + TRACE_DP_ID_LINK_TRAINING_DONE, + TRACE_DP_ID_NOTIFY_ATTACH_BEGIN, + TRACE_DP_ID_NOTIFY_ATTACH_BEGIN_STATUS, + TRACE_DP_ID_NOTIFY_ATTACH_END, + TRACE_DP_ID_NOTIFY_DETACH_BEGIN, + TRACE_DP_ID_NOTIFY_DETACH_END, + TRACE_DP_ID_MESSAGE_EXPIRED +} NV_DP_TRACING_EVENT; + +typedef enum NV_DP_TRACING_PRIORITY +{ + TRACE_DP_PRIORITY_ERROR, + TRACE_DP_PRIORITY_WARNING, + TRACE_DP_PRIORITY_INFO +} NV_DP_TRACING_PRIORITY; + +#define NV_DPTRACE_MAX_PARAMS 8 + +#define _NV_DPTRACE_EXPAND_HELPER(x) x +#define _NV_DPTRACE_EXPAND(x) _NV_DPTRACE_EXPAND_HELPER(x) + +// +// _COUNT_ARGS: Counts the size of an argument list. +// +// For example, if the argument list is two-arguments "A, B", then call it like this: +// _COUNT_ARGS(_placeholder, A, B, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +// +// which maps to the argument names like this: +// _COUNT_ARGS(_0=_placeholder, _1=A, _2=B, _3=9, _4=8, _5=7, _6=6, _7=5, _8=4,, _9=3, _10=2, ...) +// +// and thus _COUNT_ARGS will return 2, the correct size of the argument list. +// +#define _NV_DPTRACE_COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, ...) _10 + +#define NV_DPTRACE_ERROR(...) NV_DPTRACE_EVENT(TRACE_DP_PRIORITY_ERROR, __VA_ARGS__) +#define NV_DPTRACE_WARNING(...) NV_DPTRACE_EVENT(TRACE_DP_PRIORITY_WARNING, __VA_ARGS__) +#define NV_DPTRACE_INFO(...) NV_DPTRACE_EVENT(TRACE_DP_PRIORITY_INFO, __VA_ARGS__) + +// +// When ##__VA_ARGS__ is used, it will delete a preceding comma (',') when +// __VA_ARGS__ is blank (i.e. zero-length argument list). This allows +// the zero-argument case to work without resulting in a syntax error. +// +// We have a placeholder argument as the first parameter to _COUNT_ARGS +// so that we can take advantage of this comma-deleting behavior. +// +// However, there shouldn't be a zero-arg case as of now, because the first arg is the event. +// +#define NV_DPTRACE_EVENT(priority, ...) \ + _NV_DPTRACE_SEND(priority, _NV_DPTRACE_EXPAND(_NV_DPTRACE_COUNT_ARGS(_0, ##__VA_ARGS__, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)), __VA_ARGS__) + +#define _NV_DPTRACE_SEND(priority, argc, ...) _NV_DPTRACE_EXPAND(_NV_DPTRACE_SEND_N(priority, argc, __VA_ARGS__)) +#define _NV_DPTRACE_SEND_N(priority, argc, ...) _NV_DPTRACE_EXPAND(_NV_DPTRACE_##argc(priority, __VA_ARGS__)) + +// The first argument is the event - macro number is one higher than num args passed to dpTraceEvent +#define _NV_DPTRACE_1(priority, event) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 0); + +#define _NV_DPTRACE_2(priority, event, p1) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 1, p1); + +#define _NV_DPTRACE_3(priority, event, p1, p2) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 2, p1, p2); + +#define _NV_DPTRACE_4(priority, event, p1, p2, p3) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 3, p1, p2, p3); + +#define _NV_DPTRACE_5(priority, event, p1, p2, p3, p4) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 4, p1, p2, p3, p4); + +#define _NV_DPTRACE_6(priority, event, p1, p2, p3, p4, p5) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 5, p1, p2, p3, p4, p5); + +#define _NV_DPTRACE_7(priority, event, p1, p2, p3, p4, p5, p6) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 6, p1, p2, p3, p4, p5, p6); + +#define _NV_DPTRACE_8(priority, event, p1, p2, p3, p4, p5, p6, p7) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 7, p1, p2, p3, p4, p5, p6, p7); + +#define _NV_DPTRACE_9(priority, event, p1, p2, p3, p4, p5, p6, p7, p8) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 8, p1, p2, p3, p4, p5, p6, p7, p8); + +#endif // INCLUDED_DP_TRACING_H diff --git a/src/common/displayport/inc/dp_vrr.h b/src/common/displayport/inc/dp_vrr.h new file mode 100644 index 0000000..4fa73aa --- /dev/null +++ b/src/common/displayport/inc/dp_vrr.h @@ -0,0 +1,95 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_vrr.h * +* Prototypes and definitions related to VRR enablement * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_VRR_H +#define INCLUDED_DP_VRR_H + +#include "dp_object.h" + +// Worstcase VRR enablement handshake timeout of 600ms (40x15ms) +#define VRR_ENABLE_STATUS_TIMEOUT_THRESHOLD 40 +#define VRR_ENABLE_STATUS_TIMEOUT_INTERVAL_MS 15 + +// Retry enablement threshold in notifyShortPulse() +#define VRR_MAX_RETRIES 3 + +namespace DisplayPort +{ + enum VrrEnableStage + { + VRR_ENABLE_STAGE_MONITOR_ENABLE_BEGIN, + VRR_ENABLE_STAGE_MONITOR_ENABLE_CHALLENGE, + VRR_ENABLE_STAGE_MONITOR_ENABLE_CHECK, + VRR_ENABLE_STAGE_DRIVER_ENABLE_BEGIN, + VRR_ENABLE_STAGE_DRIVER_ENABLE_CHALLENGE, + VRR_ENABLE_STAGE_DRIVER_ENABLE_CHECK, + VRR_ENABLE_STAGE_RESET_MONITOR, + VRR_ENABLE_STAGE_INIT_PUBLIC_INFO, + VRR_ENABLE_STAGE_GET_PUBLIC_INFO, + VRR_ENABLE_STAGE_STATUS_CHECK, + }; + + struct DeviceImpl; + + class VrrEnablement : virtual public Object + { + private: + DeviceImpl *parent; + bool bMonitorEnabled; + + bool vrrGetPublicInfo(void); + bool vrrWaitOnEnableStatus(void); + bool vrrEnableMonitor(void); + bool vrrEnableDriver(void); + + public: + + VrrEnablement(DeviceImpl *parent) + : parent(parent) + { + reset(); + } + + ~VrrEnablement() + { + parent = NULL; + reset(); + } + + bool start(void); + void reset(void) + { + bMonitorEnabled = false; + } + bool isMonitorEnabled(void); + bool isDriverEnabled(void); + }; +} + +#endif diff --git a/src/common/displayport/inc/dp_wardatabase.h b/src/common/displayport/inc/dp_wardatabase.h new file mode 100644 index 0000000..9b54f95 --- /dev/null +++ b/src/common/displayport/inc/dp_wardatabase.h @@ -0,0 +1,75 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_wardatabase.h * +* EDID and OUI based workarounds for panel/TCON issues * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_WARDATABASE_H +#define INCLUDED_DP_WARDATABASE_H + +#include "dp_object.h" + +namespace DisplayPort +{ + #define WAR_MAX_REASSESS_ATTEMPT 3 + #define WAR_MAX_RETRAIN_ATTEMPT 3 + + typedef enum + { + DP_MONITOR_CAPABILITY_DP_SKIP_REDUNDANT_LT = (1 << 0), // Do not train if the link B/W and lane count are already set to the desired quantities + DP_MONITOR_CAPABILITY_DP_SKIP_CABLE_BW_CHECK = (1 << 1), // Skip the link training attempts to test cable bandwidth in CheckDpLink + DP_MONITOR_CAPABILITY_DP_MULTI_WRITE_DPCD_0x600 = (1 << 2), // Repeatedly write 0x1 to 0x600 with extra delays until the read verifies the write + DP_MONITOR_CAPABILITY_DP_WRITE_0x600_BEFORE_LT = (1 << 3), // Power on a monitor before every link training + DP_MONITOR_CAPABILITY_DP_OVERRIDE_OPTIMAL_LINK_CONFIG = (1 << 4), // Override optimal link config + DP_MONITOR_CAPABILITY_DP_OVERRIDE_MAX_LANE_COUNT = (1 << 5), // WAR for some DP monitors which claims more lane count than it really supports. It may generate interrupt storm if unsupported lane count is applied + DP_MONITOR_CAPABILITY_DP_AVOID_UPDATE_POWER_STATE = (1 << 6), // Don't update panel power state when head detach or lid closed + } DP_MONITOR_CAPABILITY; + + struct DpMonitorDenylistData: virtual public Object + { + // Max lane count supported override value + unsigned int dpMaxLaneCountOverride; + + // Link rate and Lane count value overrides + // when we need to skip BW check + struct + { + unsigned int maxLaneAtHighRate; + unsigned int maxLaneAtLowRate; + } dpSkipCheckLink; + + // Link rate and Lane count value overrides + // when we need to force optimal link config + struct + { + unsigned int linkRate; + unsigned int laneCount; + } dpOverrideOptimalLinkConfig; + }; +} + +#endif // INCLUDED_DP_WARDATABASE_H diff --git a/src/common/displayport/inc/dp_watermark.h b/src/common/displayport/inc/dp_watermark.h new file mode 100644 index 0000000..00d7f83 --- /dev/null +++ b/src/common/displayport/inc/dp_watermark.h @@ -0,0 +1,141 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_watermark.h * +* DP watermark IsModePossible calculations. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_WATERMARK_H +#define INCLUDED_DP_WATERMARK_H + +#include "displayport.h" + +namespace DisplayPort +{ + class LinkConfiguration; + + struct ModesetInfo + { + unsigned twoChannelAudioHz; // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz; // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz; // Requested pixel clock for the mode + unsigned rasterWidth; + unsigned rasterHeight; + unsigned surfaceWidth; // RasterBlankStartX - RasterBlankEndX + unsigned surfaceHeight; // Active region height + unsigned depth; + unsigned rasterBlankStartX; + unsigned rasterBlankEndX; + unsigned bitsPerComponent; // Bits per component + bool bEnableDsc; // bEnableDsc=1 indicates DSC would be enabled for the mode + bool bEnablePassThroughForPCON; + + DSC_MODE mode; // DSC Mode + DP_COLORFORMAT colorFormat; + + ModesetInfo(): twoChannelAudioHz(0), + eightChannelAudioHz(0), + pixelClockHz(0), + rasterWidth(0), + rasterHeight(0), + surfaceWidth(0), + surfaceHeight(0), + depth(0), + rasterBlankStartX(0), + rasterBlankEndX(0), + bitsPerComponent(0), + bEnableDsc(false), + bEnablePassThroughForPCON(false), + mode(DSC_SINGLE), + colorFormat(dpColorFormat_Unknown) {} + + ModesetInfo(unsigned newTwoChannelAudioHz, unsigned newEightChannelAudioHz, NvU64 newPixelClockHz, + unsigned newRasterWidth, unsigned newRasterHeight, + unsigned newSurfaceWidth, unsigned newSurfaceHeight, unsigned newDepth, + unsigned newRasterBlankStartX=0, unsigned newRasterBlankEndX=0, bool newBEnableDsc = false, + DSC_MODE newMode = DSC_SINGLE, bool newBEnablePassThroughForPCON = false, + DP_COLORFORMAT dpColorFormat = dpColorFormat_Unknown): + twoChannelAudioHz(newTwoChannelAudioHz), + eightChannelAudioHz(newEightChannelAudioHz), + pixelClockHz(newPixelClockHz), + rasterWidth(newRasterWidth), + rasterHeight(newRasterHeight), + surfaceWidth(newSurfaceWidth), + surfaceHeight(newSurfaceHeight), + depth(newDepth), + rasterBlankStartX(newRasterBlankStartX), + rasterBlankEndX(newRasterBlankEndX), + bitsPerComponent(0), + bEnableDsc(newBEnableDsc), + bEnablePassThroughForPCON(newBEnablePassThroughForPCON), + mode(newMode), + colorFormat(dpColorFormat) {} + }; + + struct Watermark + { + unsigned waterMark; + unsigned tuSize; + unsigned hBlankSym; + unsigned vBlankSym; + NvU32 effectiveBpp; + }; + + bool isModePossibleSST + ( + const LinkConfiguration &linkConfig, + const ModesetInfo &modesetInfo, + Watermark * dpInfo, + bool bUseIncreasedWatermarkLimits = false + ); + + bool isModePossibleMST + ( + const LinkConfiguration &linkConfig, + const ModesetInfo &modesetInfo, + Watermark * dpInfo + ); + + bool isModePossibleSSTWithFEC + ( + const LinkConfiguration &linkConfig, + const ModesetInfo &modesetInfo, + Watermark * dpInfo, + bool bUseIncreasedWatermarkLimits = false + ); + + bool isModePossibleMSTWithFEC + ( + const LinkConfiguration &linkConfig, + const ModesetInfo &modesetInfo, + Watermark * dpInfo + ); + + // Return Payload Bandwidth Number(PBN)for requested mode + unsigned pbnForMode(const ModesetInfo &modesetInfo, bool bAccountSpread = true); +} + +#endif //INCLUDED_DP_WATERMARK_H diff --git a/src/common/displayport/inc/dptestutil/dp_testmessage.h b/src/common/displayport/inc/dptestutil/dp_testmessage.h new file mode 100644 index 0000000..aa8021f --- /dev/null +++ b/src/common/displayport/inc/dptestutil/dp_testmessage.h @@ -0,0 +1,190 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort*********************************\ +* * +* Module: dp_testmessage.h * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_TESTMESSAGE_H +#define INCLUDED_DP_TESTMESSAGE_H + +#include "dp_auxdefs.h" + +#define DP_TESTMESSAGE_QSES 0x38 +#include "dp_qse.h" + +#include "dp_connector.h" + +#define DP_LPRIME_SIZE 20 +namespace DisplayPort +{ + // test request status, for DP and nvapi + typedef enum + { + DP_TESTMESSAGE_REQUEST_STATUS_PENDING = 0, // the request is still be processing + DP_TESTMESSAGE_REQUEST_STATUS_DONE = 1, // request has been processed + DP_TESTMESSAGE_REQUEST_STATUS_ERROR = 2, // error, Dp lib busy with other request + DP_TESTMESSAGE_REQUEST_STATUS_NEWREQUEST = 3, // new request for user + } DP_TESTMESSAGE_REQUEST_STATUS; + + // Request type enum. + typedef enum + { + DP_TESTMESSAGE_REQUEST_TYPE_QSES, // TestMessage from DPTestUtil. + } DP_TESTMESSAGE_REQUEST_TYPE; + + // + // NVAPI QSES reply message struct. + // Do NOT inherit any class, need keep consist with definition with Nvapi part, + // which is C STRUCT + // + typedef struct + { + StreamState streamState; + bool repeaterFuncPresent; + bool encryption; + bool authentication; + OutputSinkType sinkType; + OutputCPType cpType; + bool signedLPrime; + NvU8 streamId; + NvU8 LPrime[DP_LPRIME_SIZE]; + } DP_TESTMESSAGE_REQUEST_QSES_OUTPUT; + + // + // Version of QSES_OUTPUT that consistent with struct in dp_messageencodings.h + // ( without QSES Lprime). + // + // Considering nvapi backward compatibility, don't modify DP_TESTMESSAGE_REQUEST_QSES_OUTPUT + // definition but has internal version to sync up with dplib implementation. + // + // DPLib message implementation is using this version for now. TestMessage + // need this structure to safely copy info from QSES message structure. + // + typedef struct + { + StreamState streamState; + bool repeaterFuncPresent; + bool encryption; + bool authentication; + OutputSinkType sinkType; + OutputCPType cpType; + bool signedLPrime; + NvU8 streamId; + } DP_TESTMESSAGE_REQUEST_QSES_OUTPUT_V2; + + typedef struct + { + // indicated what status to get, for DP, user need fill this + DP_TESTMESSAGE_REQUEST_TYPE requestType; + // stream id for QSES to get, user need file this + NvU32 streamID; + // replay buffer + DP_TESTMESSAGE_REQUEST_QSES_OUTPUT reply; + } DP_TESTMESSAGE_REQUEST_QSES_INPUT; + + class TestMessage; + struct ConnectorImpl; + + struct DPTestMessageCompletion : public MessageManager::Message::MessageEventSink + { + TestMessage *parent; + + public: + void setParent(TestMessage *parent) + { + this->parent = parent; + } + // call back function if message fails, the status of the dp lib(testMessageStatus) + // need to be set to DONE + void messageFailed(MessageManager::Message * from, NakData * data); + + // call back function if message complete, the status of the dp lib(testMessageStatus) + // need to be set to DONE. + // If a message has a reply, it is necessary to record the reply in the dp lib to + // send back to user later + void messageCompleted(MessageManager::Message * from); + + }; + + class TestMessage : virtual public Object + { + private: + ConnectorImpl *pConnector; + // check if the user provided request struct is of valid size + inline bool isValidStruct(DP_TESTMESSAGE_REQUEST_TYPE requestType, NvU32 structSize) + { + switch (requestType) + { + case DP_TESTMESSAGE_REQUEST_TYPE_QSES: + { + return structSize == sizeof(DP_TESTMESSAGE_REQUEST_QSES_INPUT) ? true : false; + } + default: + return false; + } + } + MessageManager *pMsgManager; + DPTestMessageCompletion diagCompl; + + // Data Structure for Generic Message. + NvU32 replyBytes; + void sendTestMsgQSES(void *pBuffer); + + public: + DP_TESTMESSAGE_REQUEST_QSES_OUTPUT_V2 qsesReply; + + DP_TESTMESSAGE_REQUEST_STATUS testMessageStatus; + + TestMessage() : testMessageStatus(DP_TESTMESSAGE_REQUEST_STATUS_DONE) + { + diagCompl.setParent(this); + pConnector = 0; + pMsgManager = 0; + replyBytes = 0; + qsesReply.streamState = DoesNotExist; + qsesReply.repeaterFuncPresent = 0; + qsesReply.encryption = 0; + qsesReply.authentication = 0; + qsesReply.sinkType = StreamUnconnected; + qsesReply.cpType = HDCP1x; + qsesReply.signedLPrime = 0; + qsesReply.streamId = '\0'; + } + DP_TESTMESSAGE_STATUS sendDPTestMessage(void *pBuffer, + NvU32 requestSize, + NvU32 *pDpStatus); + MessageManager * getMessageManager(); + void setupTestMessage(MessageManager *msgManager, ConnectorImpl *connector) + { + pMsgManager = msgManager; + pConnector = connector; + } + + }; +} + + +#endif + diff --git a/src/common/displayport/src/dp_auxretry.cpp b/src/common/displayport/src/dp_auxretry.cpp new file mode 100644 index 0000000..79bb131 --- /dev/null +++ b/src/common/displayport/src/dp_auxretry.cpp @@ -0,0 +1,316 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* Module: dp_auxretry.cpp * +* Interface implemented by library client. * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_auxbus.h" +#include "dp_auxretry.h" +#include "dp_messageheader.h" +#include "dp_printf.h" + +#include "displayport.h" + +using namespace DisplayPort; + +// +// Read a DPCD address. +// - allows size greater than single transaction/burst size +// - handles defer retries +// - handles nacks with incomplete data +// +AuxRetry::status AuxRetry::readTransaction(int address, NvU8 * buffer, unsigned size, unsigned retries) +{ + unsigned completed; + AuxBus::status s; + + DP_ASSERT( size <= aux->transactionSize() ); + + do + { + s = aux->transaction(AuxBus::read, AuxBus::native, address, buffer, size, &completed); + + // + // Got success & requested data. Also size of returned data is + // expected & non zero. + // + if ((s == AuxBus::success) && (completed == size) && (completed != 0)) + { + return ack; + } + else + { + // + // Handle defer case with a simple retry + // + if (s == AuxBus::defer) + { + if (retries) + { + --retries; + continue; + } + + return defer; + } + + // + // Nack shouldn't happen in general. Unsupported registers + // are supposed to ACK with size of 0. + // + if ( s == AuxBus::nack ) + { + return nack; + } + + if ( completed == 0 ) + { + return unsupportedRegister; + } + + // + // We got less data back than we requested... + // It's unclear when this might happen in the spec. + // We can either + // 1. Split the read into multiple pieces + // (Dangerous since we may receive non-atomic updates) + // 2. Retry + // + if ( completed < size ) + { + // + // Retry + // + if (retries) + { + --retries; + continue; + } + else + { + // Closest approximation is a defer + return defer; + } + } + } + } while(retries); + + if ((s == AuxBus::defer) || (completed < size)) + { + return defer; + } + + return ack; +} + +// +// Write a DPCD address. +// - allows size greater than single transaction/burst size +// - handles defer retries +// - handles nacks with incomplete data +// +AuxRetry::status AuxRetry::writeTransaction(int address, NvU8 * buffer, unsigned size, unsigned retries) +{ + unsigned completed; + AuxBus::status s; + + DP_ASSERT( size <= aux->transactionSize() ); + + do + { + s = aux->transaction(AuxBus::write, AuxBus::native, address, buffer, size, &completed); + + // + // Got success & requested data. Also size of returned data is + // expected & non zero. + // + if ((s == AuxBus::success) && (completed == size) && (completed != 0)) + { + return ack; + } + else + { + // + // Handle defer case with a simple retry + // + if (s == AuxBus::defer) + { + if (retries) + { + --retries; + continue; + } + + return defer; + } + + // + // Nack shouldn't happen in general. Unsupported registers + // are supposed to ACK with size of 0. + // + if ( s == AuxBus::nack ) + { + return nack; + } + + DP_ASSERT( s == AuxBus::success); + + if ( completed == 0 ) + { + return unsupportedRegister; + } + + // + // Incomplete write? + // Shouldn't happen. Just retry if it does + // + if ( completed < size ) + { + // + // Retry + // + if (retries) + { + --retries; + continue; + } + else + { + // Closest approximation is a defer + return defer; + } + } + } + } while(retries); + + if ((s == AuxBus::defer) || (completed < size)) + { + return defer; + } + + return ack; +} + +// +// Similar to readTransaction except that it supports reading +// larger spans than AuxBus::transactionSize() +// +AuxRetry::status AuxRetry::read(int address, NvU8 * buffer, unsigned size, unsigned retries) +{ + for (unsigned i = 0 ; i < size; ) + { + int todo = DP_MIN(size - i, aux->transactionSize()); + status s = readTransaction(address+i, buffer+i, todo, retries); + + if (s != ack) + { + return s; + } + + i += todo; + } + + return ack; +} + +// +// Similar to writeTransaction except that it supports writing +// larger spans than AuxBus::transactionSize() +// +AuxRetry::status AuxRetry::write(int address, NvU8 * buffer, unsigned size, unsigned retries) +{ + for (unsigned i = 0 ; i < size; ) + { + int todo = DP_MIN(size - i, aux->transactionSize()); + status s = writeTransaction(address+i, buffer+i, todo, retries); + + if (s != ack) + { + return s; + } + + i += todo; + } + + return ack; +} + +AuxBus::status AuxLogger::transaction(Action action, Type type, int address, + NvU8 * buffer, unsigned sizeRequested, + unsigned * sizeCompleted, unsigned * pNakReason, + NvU8 offset, NvU8 nWriteTransactions) +{ + AuxBus::status result = bus->transaction(action, type, address, buffer, sizeRequested, sizeCompleted); + hint[0] = 0; + // + // Do the hex dump. + // - We can't make library calls + // - We need to do this in one printf + if (result == success) + { + if (type == native) + if (address == NV_DPCD_MBOX_DOWN_REQ || address == NV_DPCD_MBOX_UP_REP || + address == NV_DPCD_MBOX_DOWN_REP || address == NV_DPCD_MBOX_UP_REQ) + { + unsigned len = *sizeCompleted; + Buffer storage(buffer, len); + BitStreamReader reader(&storage, 0, len*8); + MessageHeader header; + DisplayPort::decodeHeader(&reader, &header, Address(1)); + Address::StringBuffer sb; + DP_USED(sb); + dpHexDump(&hex[0], sizeof(hex), buffer, header.headerSizeBits/8); + dpHexDump(&hex_body[0], sizeof(hex), buffer + header.headerSizeBits/8, len - header.headerSizeBits/8); +#if defined(_DEBUG) || defined(DEBUG) + const char * name = ""; + if (header.isTransactionStart && action==write && len > header.headerSizeBits/8) + name = getRequestId(buffer[header.headerSizeBits/8]); + + DP_PRINTF(DP_NOTICE, "DP-AUX> %s%s%s%s%04Xh hint(to:%s %s%s %s #%d) { %s| %s}", + sizeRequested == *sizeCompleted ? "" : "INCOMPLETE ", getStatus(result), + getAction(action), getType(type), address, + header.address.toString(sb), header.isTransactionStart ? "S" : "", + header.isTransactionEnd ? "E" : "", name, header.messageNumber, + hex, hex_body); +#endif + return result; + } + } + else + hex[0] = 0; + + dpHexDump(&hex[0], sizeof(hex), buffer, *sizeCompleted); + DP_PRINTF(DP_NOTICE, "DP-AUX> %s%s%s%s%04Xh { %s }", sizeRequested == *sizeCompleted ? "" : "INCOMPLETE ", + getStatus(result), getAction(action), getType(type), address, hex); + + return result; +} + +AuxBus * DisplayPort::CreateAuxLogger(AuxBus * auxBus) +{ + return new AuxLogger(auxBus); +} diff --git a/src/common/displayport/src/dp_bitstream.cpp b/src/common/displayport/src/dp_bitstream.cpp new file mode 100644 index 0000000..39117de --- /dev/null +++ b/src/common/displayport/src/dp_bitstream.cpp @@ -0,0 +1,204 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_bitstream.c * +* Implementation of Big Endian bit streams. * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_bitstream.h" + +using namespace DisplayPort; +bool BitStreamReader::read(unsigned * value, unsigned bits) +{ + unsigned topbit = (7- (this->bitsOffset & 7)); + + if (this->bitsOffset + bits > this->bitsEnd) + { + return false; + } + + // + // We're filling the byte down from 'topbit' towards 0. + // Can we fit all of the bits starting at topbit before + // overflowing to the next byte? + // + if (bits <= (topbit+1)) + { + int bottombit = topbit - (bits-1); + *value = (this->buffer()->data[this->bitsOffset / 8] >> bottombit) & ((1 << bits)-1); + + this->bitsOffset+=bits; + return true; + } + + // + // We're either reading too many bits or we're straddling + // a byte boundary. Serialize bit by bit. + // NOTE: This scenario is entire unlikely. Don't optimize. + // + + *value = 0; + while (bits) + { + unsigned bit; + if (!read(&bit, 1)) + { + return false; + } + *value = *value * 2 + bit; + bits--; + } + + return true; +} + +unsigned BitStreamReader::readOrDefault(unsigned bits, unsigned defaultValue) +{ + unsigned value; + + if (read(&value, bits)) + { + return value; + } + else + { + return defaultValue; + } +} + + +bool BitStreamReader::align(unsigned align) +{ + // Verify alignment is a power of two + if (!(align && ((align & (align - 1)) == 0))) + { + DP_ASSERT(0); + } + else + { + if (this->bitsOffset & (align - 1)) + { + this->bitsOffset = (this->bitsOffset + align) &~ (align - 1); + } + } + return this->bitsOffset <= this->bitsEnd; +} + +bool BitStreamWriter::write(unsigned value, unsigned bits) +{ + DP_ASSERT((value < (1ULL << bits)) && "Value out of range"); + unsigned topbit = (7- (this->bitsOffset & 7)); + + if (this->bitsOffset + bits > this->buffer()->length * 8) + { + this->buffer()->resize((this->bitsOffset + bits+7)/8); + } + + // + // We're filling the byte down from 'topbit' towards 0. + // Can we fit all of the bits starting at topbit before + // overflowing to the next byte? + // + if (bits <= (topbit+1)) + { + int bottombit = topbit - (bits-1); + NvU8 clearmask = ((1 << bits)-1) << bottombit; + + this->buffer()->data[this->bitsOffset / 8] = (NvU8)((this->buffer()->data[this->bitsOffset / 8] &~ clearmask) | (value << bottombit)); + + this->bitsOffset+=bits; + return true; + } + + // + // We're either writing too many bits or we're straddling + // a byte boundary. Serialize bit by bit. + // NOTE: This scenario is entire unlikely. Don't optimize. + // + + while (bits) + { + bits --; + if (!write( (value >> bits) & 1, 1)) + { + return false; + } + } + + return true; +} + +bool BitStreamWriter::align(unsigned align) +{ + // Verify alignment is a power of two + if (!(align && ((align & (align - 1)) == 0))) + { + DP_ASSERT(0); + } + else + { + if (this->bitsOffset & (align - 1)) + return this->write(0, align - (this->bitsOffset & (align - 1))); + } + + return true; +} + +unsigned BitStreamReader::offset() +{ + return this->bitsOffset; +} + +unsigned BitStreamWriter::offset() +{ + return this->bitsOffset; +} + +Buffer * BitStreamWriter::buffer() +{ + return this->targetBuffer; +} + +Buffer * BitStreamReader::buffer() +{ + return this->sourceBuffer; +} + + +BitStreamWriter::BitStreamWriter(Buffer * buffer, unsigned bitsOffset) +{ + this->targetBuffer = buffer; + this->bitsOffset = bitsOffset; +} + + +BitStreamReader::BitStreamReader(Buffer * buffer, unsigned bitsOffset, unsigned bitsCount) +{ + this->sourceBuffer = buffer; + this->bitsOffset = bitsOffset; + this->bitsEnd = bitsCount + bitsOffset; +} diff --git a/src/common/displayport/src/dp_buffer.cpp b/src/common/displayport/src/dp_buffer.cpp new file mode 100644 index 0000000..cb30938 --- /dev/null +++ b/src/common/displayport/src/dp_buffer.cpp @@ -0,0 +1,272 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_buffer.cpp * +* Resizable byte buffer and stream operations * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_buffer.h" + +using namespace DisplayPort; + +void DisplayPort::swapBuffers(Buffer & left, Buffer & right) +{ + NvU8 *tmpData = left.data; + unsigned tmpLength = left.length; + unsigned tmpCapacity = left.capacity; + bool tmpErrorState = left.errorState; + + left.data = right.data; + left.length = right.length; + left.capacity = right.capacity; + left.errorState = right.errorState; + + right.data = tmpData; + right.length = tmpLength; + right.capacity = tmpCapacity; + right.errorState= tmpErrorState; +} + + +bool Stream::seek(unsigned where) +{ + // + // Allow seek to any position in the file INCLUDING + // the first byte past the end of the file. + // + if (where > this->parent->length) + { + return false; + } + + this->byteOffset = where; + + return true; +} + +bool Stream::read(NvU8 * buffer, unsigned size) +{ + unsigned stopReadAt = this->byteOffset + size; + + if (stopReadAt > this->parent->length) + { + return false; + } + + dpMemCopy(buffer, this->parent->data + this->byteOffset, size); + this->byteOffset = stopReadAt; + + return true; +} + +bool Buffer::resize(unsigned stopWriteAt) +{ + bool mustIncrease = stopWriteAt > this->capacity; + + if (mustIncrease || (stopWriteAt * 4 < this->capacity) ) + { + unsigned newCapacity; + NvU8 * newBuffer; + + newCapacity = 32; + + while (newCapacity <= stopWriteAt) + { + newCapacity *= 2; + } + + if (newCapacity == this->capacity) { + this->length = stopWriteAt; + return true; + } + + newBuffer = (NvU8 *)dpMalloc(sizeof(NvU8) * newCapacity); + + if (!newBuffer) + { + if (mustIncrease) + { + if (this->data) + { + dpFree(this->data); + } + + this->errorState = true; + this->data = 0; + this->capacity = 0; + this->length = 0; + } + else + newCapacity = this->capacity; + + return false; + } + + if (this->data) + { + dpMemCopy(newBuffer, this->data, DP_MIN(newCapacity, this->length)); + dpFree(this->data); + } + + this->data = newBuffer; + this->capacity = newCapacity; + + } + + this->length = stopWriteAt; + return true; +} + +void Buffer::memZero() +{ + if (this->data) + dpMemZero(this->data, this->length); +} + +bool Stream::write(NvU8 * buffer, unsigned size) +{ + unsigned stopWriteAt = this->byteOffset + size; + + if (stopWriteAt > this->parent->length) + { + this->parent->resize(stopWriteAt); + } + + if (isError()) + return false; + + dpMemCopy( this->parent->data + this->byteOffset, buffer, size); + this->byteOffset = stopWriteAt; + this->parent->length = DP_MAX(this->parent->length, stopWriteAt); + + return true; +} + +unsigned Stream::remaining() +{ + return this->parent->length - this->byteOffset; +} + +unsigned Stream::offset() +{ + return this->byteOffset; +} + +Buffer::~Buffer() +{ + reset(); +} + +void Buffer::reset() +{ + if (this->data) + { + dpFree(this->data); + } + + length = 0; + capacity = 0; + data = 0; + errorState = false; +} + +bool Buffer::isError() const +{ + return this->errorState; +} + + +Stream::Stream(Buffer * buffer) + : parent(buffer), byteOffset(0) +{ +} + +bool Stream::isError() const +{ + return this->parent->errorState; +} + +Buffer::Buffer() + : data(0), length(0), capacity(0), errorState(false) +{ +} + +Buffer::Buffer(NvU8 * src, unsigned size) + : data(0), length(0), capacity(0), errorState(false) +{ + if (src && size && resize(size) && data) + dpMemCopy(data, src, size); +} + +Buffer::Buffer(const Buffer & other) + : data(0), length(0), capacity(0), errorState(false) +{ + if (other.isError()) + { + errorState = true; + } + else + { + if (resize(other.getLength()) && other.getData()) + { + if (capacity && getData()) + { + dpMemCopy(getData(), other.getData(), getLength()); + } + } + } +} + +Buffer & Buffer::operator = (const Buffer & other) +{ + if (other.isError()) + { + errorState = true; + } + else + { + if (resize(other.getLength())) + dpMemCopy(getData(), other.getData(), getLength()); + } + return *this; +} + + +bool Buffer::operator== (const Buffer & other) const +{ + if (length != other.length) + return false; + + for (unsigned i = 0; i < length; i++) + { + if (data[i] != other.data[i]) + return false; + + } + + return true; +} diff --git a/src/common/displayport/src/dp_configcaps.cpp b/src/common/displayport/src/dp_configcaps.cpp new file mode 100644 index 0000000..53f801d --- /dev/null +++ b/src/common/displayport/src/dp_configcaps.cpp @@ -0,0 +1,3043 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_configcaps.cpp * +* Abstraction for basic caps registers * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_auxbus.h" +#include "dp_configcaps.h" +#include "dp_auxdefs.h" +#include "dp_printf.h" +#include "displayport.h" + +using namespace DisplayPort; + +void DPCDHALImpl::updateDPCDOffline() +{ + NvU8 buffer[16]; + unsigned retries = 16; + // Burst read from 0x00 to 0x0F. + if (AuxRetry::ack != bus.read(NV_DPCD_REV, &buffer[0], sizeof buffer, retries)) + { + dpcdOffline = true; + } + else + { + dpcdOffline = false; + } +} + +bool DPCDHALImpl::auxAccessAvailable() +{ + NvU8 buffer[16]; + unsigned retries = 16; + // Burst read from 0x00 to 0x0F. + if (AuxRetry::ack != bus.read(NV_DPCD_REV, &buffer[0], sizeof buffer, retries)) + { + return false; + } + else + { + return true; + } +} + +/*! + * @brief Enable/Disable DP Tunnel BW allocation depending on support and client request + * + * @return Boolean to indicate success or failure + */ +void DPCDHALImpl::configureDpTunnelBwAllocation() +{ + NvU8 byte = 0; + + // 0xE000D, DP Tunneling capabilities for DIA + if (AuxRetry::ack == + bus.read(NV_DPCD20_DP_TUNNEL_CAPABILITIES, &byte, sizeof byte)) + { + caps.dpInTunnelingCaps.bIsSupported = + FLD_TEST_DRF(_DPCD20, _DP_TUNNEL_CAPABILITIES, + _DPTUNNELING_SUPPORT, _YES, byte); + + caps.dpInTunnelingCaps.bIsPanelReplayOptimizationSupported = + FLD_TEST_DRF(_DPCD20, _DP_TUNNEL_CAPABILITIES, + _PANEL_REPLAY_TUNNELING_OPTIMIZATION_SUPPORT, + _YES, byte); + + caps.dpInTunnelingCaps.bIsBwAllocationSupported = + FLD_TEST_DRF(_DPCD20, _DP_TUNNEL_CAPABILITIES, + _DPIN_BW_ALLOCATION_MODE_SUPPORT, + _YES, byte); + } + // Client has not requested or it is not enabled due to regkey override + if (!this->bEnableDpTunnelBwAllocationSupport) + { + bIsDpTunnelBwAllocationEnabled = false; + return; + } + + bIsDpTunnelBwAllocationEnabled = false; + + if (caps.dpInTunnelingCaps.bIsSupported) + { + if (AuxRetry::ack == + bus.read(NV_DPCD20_USB4_DRIVER_BW_CAPABILITY, &byte, sizeof byte)) + { + caps.dpInTunnelingCaps.bUsb4DriverBwAllocationSupport = + FLD_TEST_DRF(_DPCD20, _USB4_DRIVER, _BW_ALLOCATION, _YES, byte); + } + } + + bool bIsDpTunnelBwAllocationSupported = false; + + bIsDpTunnelBwAllocationSupported = caps.dpInTunnelingCaps.bIsSupported && + caps.dpInTunnelingCaps.bUsb4DriverBwAllocationSupport && + caps.dpInTunnelingCaps.bIsBwAllocationSupported; + + if (bIsDpTunnelBwAllocationEnabled == bIsDpTunnelBwAllocationSupported) + { + DP_PRINTF(DP_NOTICE, "Bw Allocation already in requested state: %d", bIsDpTunnelBwAllocationSupported); + return; + } + + if (!setDpTunnelBwAllocation(bIsDpTunnelBwAllocationSupported)) + { + DP_PRINTF(DP_ERROR, "Failed to set DP Tunnel BW allocation"); + } + + if (bIsDpTunnelBwAllocationEnabled != bIsDpTunnelBwAllocationSupported) + { + DP_PRINTF(DP_WARNING, "Unable to set BW allocation to requested state: %d", bIsDpTunnelBwAllocationSupported); + } +} + +void DPCDHALImpl::parseAndReadCaps() +{ + NvU8 buffer[16]; + NvU8 byte = 0; + AuxRetry::status status; + unsigned retries = 16; + + NvU8 lttprIdx = 0; + + caps.phyRepeaterCount = 0; + + // Burst read from 0x00 to 0x0F. + + // + // The Extended Receiver Capability field at DPCD Addresses 02200h through 022FFh is valid + // with DPCD Rev. 1.4 (and higher). + // + // A DPRX that supports the Extended Receiver Capability field must set the + // EXTENDED_RECEIVER_CAPABILITY_FIELD_PRESENT bit in the TRAINING_AUX_RD_INTERVAL + // register (DPCD Address 0000Eh, bit 7) to 1 + // + caps.extendedRxCapsPresent = false; + if (AuxRetry::ack == bus.read(NV_DPCD_TRAINING_AUX_RD_INTERVAL, &byte, sizeof byte)) + { + caps.extendedRxCapsPresent = DRF_VAL(_DPCD14, _TRAINING_AUX_RD_INTERVAL, _EXTENDED_RX_CAP, byte); + } + + configureDpTunnelBwAllocation(); + + if (caps.extendedRxCapsPresent) + { + status = bus.read(NV_DPCD14_EXTENDED_REV, &buffer[0], sizeof buffer, retries); + } + else + { + status = bus.read(NV_DPCD_REV, &buffer[0], sizeof buffer, retries); + } + + if (AuxRetry::ack != status) + { + // Failed to read caps. + // Set an invalid state here and make sure we REMEMBER we couldn't get the caps + caps.revisionMajor = 0; + dpcdOffline = true; + return; + } + + // reset the faked dpcd flag since real LT should be possible now. + dpcdOffline = false; + + // reset edp revision to 0 + caps.eDpRevision = 0; + + if (overrideDpcdRev) + { + // Override the revision no. as DPCD override regkey is set + caps.revisionMajor = DRF_VAL(_DPCD, _REV, _MAJOR, overrideDpcdRev); + caps.revisionMinor = DRF_VAL(_DPCD, _REV, _MINOR, overrideDpcdRev); + } + else + { + caps.revisionMajor = DRF_VAL(_DPCD, _REV, _MAJOR, buffer[0]); + caps.revisionMinor = DRF_VAL(_DPCD, _REV, _MINOR, buffer[0]); + if (isAtLeastVersion(1, 2)) + { + // + // WAR required for panels with MSTAR chip as they report themselves as + // DP1.2 but they don't support DP1.2. Check OUI & ESI sinkCount. if OUI + // is not supported & sinkCount is "0", downgrade the revision to 1.1. + // + if (FLD_TEST_DRF(_DPCD, _DOWN_STREAM_PORT, _OUI_SUPPORT, _NO, buffer[7])) + { + // Read the ESI sinkCount & overwrite revision no. if ESI not supported + NvU8 esiBuffer[1] = {0}; + NvU32 sinkCount; + AuxRetry::status status; + // + // Don't just check the transaction status as not-supporting ESI means it may + // NACK a transaction to ESI space or may return "0" as sinkCount. We need + // to override the revision Minor in both cases. + // + status = bus.read(NV_DPCD_SINK_COUNT_ESI, &esiBuffer[0], sizeof esiBuffer); + sinkCount = DRF_VAL(_DPCD, _SINK_COUNT_ESI, _SINK_COUNT, esiBuffer[0]); + + if ((sinkCount == 0) || (status != AuxRetry::ack)) + { + // If ESI not supported then overwrite the revision + caps.revisionMajor = 1; + caps.revisionMinor = 1; + } + } + + // Check if DPCD_DISPLAY_CONTROL_CAPABLE = 1 + if (FLD_TEST_DRF(_DPCD, _EDP_CONFIG_CAP, _DISPLAY_CONTROL_CAPABLE, _YES, buffer[0x0D])) + { + NvU8 edpBuffer[1] = {0}; + status = bus.read(NV_DPCD_EDP_REV, &edpBuffer[0], sizeof edpBuffer); + caps.eDpRevision = DRF_VAL(_DPCD, _EDP, _REV_VAL, edpBuffer[0]); + } + } + } + + bIndexedLinkrateCapable = false; + + if (isAtLeastVersion(1,4) && caps.extendedRxCapsPresent == false) + { + DP_ASSERT(0 && "A DPRX with DPCD Rev. 1.4 (or higher) must have Extended Receiver Capability field."); + } + // Support ESI register space only when GPU support DP1.2MST + caps.supportsESI = (isAtLeastVersion(1,2) && + FLD_TEST_DRF(0073_CTRL_CMD_DP, _GET_CAPS_DP_VERSIONS_SUPPORTED, _DP1_2, _YES, gpuDPSupportedVersions)); + + + // First find MAX_LINK_BANDWIDTH based on MAX_LINK_BANDWIDTH registers + if (FLD_TEST_DRF(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, _1_62_GBPS, buffer[1])) + caps.maxLinkRate = dp2LinkRate_1_62Gbps; + else if (FLD_TEST_DRF(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, _2_70_GBPS, buffer[1])) + caps.maxLinkRate = dp2LinkRate_2_70Gbps; + else if (FLD_TEST_DRF(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, _5_40_GBPS, buffer[1])) + caps.maxLinkRate = dp2LinkRate_5_40Gbps; + else if (FLD_TEST_DRF(_DPCD14, _MAX_LINK_BANDWIDTH, _VAL, _8_10_GBPS, buffer[1])) + caps.maxLinkRate = dp2LinkRate_8_10Gbps; + else + { + DP_ASSERT(0 && "Unknown max link rate. Assuming DP 1.1 defaults"); + caps.maxLinkRate = dp2LinkRate_2_70Gbps; + } + + if (caps.eDpRevision >= NV_DPCD_EDP_REV_VAL_1_4 || this->bBypassILREdpRevCheck) + { + NvU16 maxILRLinkRate = 0; + if (getRawLinkRateTable((NvU8*)&caps.linkRateTable[0])) + { + // First entry must be non-zero for validation + if (caps.linkRateTable[0] != 0) + { + bIndexedLinkrateCapable = true; + for (int i = 0; (i < NV_DPCD_SUPPORTED_LINK_RATES__SIZE) && caps.linkRateTable[i]; i++) + { + if (maxILRLinkRate < caps.linkRateTable[i]) + maxILRLinkRate = caps.linkRateTable[i]; + } + if (maxILRLinkRate) { + // If max ILR is higher than MAX_LINK_BANDWIDTH, then update overall maxLinkRate + caps.maxLinkRate = DP_MAX(caps.maxLinkRate, LINK_RATE_200KHZ_TO_10MHZ((NvU64)maxILRLinkRate)); + } + } + } + } + + // + // To prevent WAR being overridden. + // + if (overrideDpcdMaxLaneCount) + { + caps.maxLaneCount = overrideDpcdMaxLaneCount; + } + else + { + caps.maxLaneCount = DRF_VAL(_DPCD, _MAX_LANE_COUNT, _LANE, buffer[0x2]); + } + + if (!IS_VALID_LANECOUNT(caps.maxLaneCount)) + { + DP_ASSERT(0 && "Invalid lane count. Assuming 1"); + caps.maxLaneCount = 1; + } + + caps.bPostLtAdjustmentSupport = FLD_TEST_DRF(_DPCD, _MAX_LANE_COUNT, _POST_LT_ADJ_REQ_SUPPORT, _YES, buffer[0x2]); + caps.enhancedFraming = FLD_TEST_DRF(_DPCD, _MAX_LANE_COUNT, _ENHANCED_FRAMING, _YES, buffer[0x2]); + if (isAtLeastVersion(1,1) && (!caps.enhancedFraming)) + { + DP_ASSERT(0 && "A DPRX with DPCD Rev. 1.1 (or higher) must have enhanced framing capability."); + } + + if (isAtLeastVersion(1,2) && + FLD_TEST_DRF(0073_CTRL_CMD_DP, _GET_CAPS_DP_VERSIONS_SUPPORTED, _DP1_2, _YES, gpuDPSupportedVersions) && + caps.bPostLtAdjustmentSupport) + { + // Source grants post Link training adjustment support + bGrantsPostLtRequest = true; + } + else + { + // Disable post Link training adjustment support whenever sink does not report capability + // This covers the case of MST to SST transition during which initially this flag is set, we need to explicitly reset this + // in order to avoid PostLTAdjustment during LT. + bGrantsPostLtRequest = false; + } + + caps.supportsNoHandshakeTraining = FLD_TEST_DRF(_DPCD, _MAX_DOWNSPREAD, _NO_AUX_HANDSHAKE_LT, _TRUE, buffer[0x3]); + caps.bSupportsTPS4 = FLD_TEST_DRF(_DPCD14, _MAX_DOWNSPREAD, _TPS4_SUPPORTED, _YES, buffer[0x3]); + + caps.NORP = DRF_VAL(_DPCD, _NORP, _VAL, buffer[0x4]) + 1; + + caps.downStreamPortPresent = FLD_TEST_DRF(_DPCD, _DOWNSTREAMPORT, _PRESENT, _YES, buffer[0x5]); + caps.detailedCapInfo = FLD_TEST_DRF(_DPCD, _DOWNSTREAMPORT, _DETAILED_CAP_INFO_AVAILABLE, _YES, buffer[0x5]); + caps.downStreamPortType = DRF_VAL(_DPCD, _DOWNSTREAMPORT, _TYPE, buffer[0x5]); + + switch (DRF_VAL(_DPCD, _DOWNSTREAMPORT, _TYPE, buffer[0x5])) + { + case 0: legacyPort[0].type = DISPLAY_PORT; break; + case 1: legacyPort[0].type = ANALOG_VGA; break; + case 2: legacyPort[0].type = DVI; break; + case 3: legacyPort[0].type = WITHOUT_EDID; break; + default: DP_ASSERT(0 && "Unknown port type"); break; + } + + caps.downStreamPortCount = DRF_VAL(_DPCD, _DOWN_STREAM_PORT, _COUNT, buffer[0x7]); + caps.msaTimingParIgnored = FLD_TEST_DRF(_DPCD, _DOWN_STREAM_PORT, _MSA_TIMING_PAR_IGNORED, _YES, buffer[0x7]); + caps.ouiSupported = FLD_TEST_DRF(_DPCD, _DOWN_STREAM_PORT, _OUI_SUPPORT, _YES, buffer[0x7]); + + if (caps.downStreamPortPresent && !caps.downStreamPortCount) + { + DP_PRINTF(DP_WARNING, "DPHAL> Non-compliant device, reporting downstream port present, but no downstream ports. Overriding port count to 1."); + caps.downStreamPortCount = 1; + } + + // Burst read from 0x20 to 0x22. + bus.read(NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS, &buffer[0], 0x22 - 0x20 + 1); + + caps.videoFallbackFormats = buffer[0]; + + caps.supportsMultistream = FLD_TEST_DRF(_DPCD, _MSTM, _CAP, _YES, buffer[0x1]); + + caps.numberAudioEndpoints = (unsigned)(DRF_VAL(_DPCD, _NUMBER_OF_AUDIO_ENDPOINTS, _VALUE, buffer[0x2])); + + if (bLttprSupported) + { + // Burst read from 0xF0000 to 0xF0009 + if (AuxRetry::ack == bus.read(NV_DPCD14_LT_TUNABLE_PHY_REPEATER_REV, &buffer[0], 10, retries)) + { + caps.repeaterCaps.revisionMinor = DRF_VAL(_DPCD14, _LT_TUNABLE_PHY_REPEATER_REV, _MINOR, buffer[0x0]); + caps.repeaterCaps.revisionMajor = DRF_VAL(_DPCD14, _LT_TUNABLE_PHY_REPEATER_REV, _MAJOR, buffer[0x0]); + + if (lttprIsAtLeastVersion(1, 4)) + { + caps.phyRepeaterCount = mapPhyRepeaterVal(DRF_VAL(_DPCD14, _PHY_REPEATER_CNT, _VAL, buffer[0x2])); + + if (caps.phyRepeaterCount != 0) + { + if (FLD_TEST_DRF(_DPCD14, _MAX_LINK_RATE_PHY_REPEATER, _VAL, _1_62_GBPS, buffer[1])) + caps.repeaterCaps.maxLinkRate = dp2LinkRate_1_62Gbps; + else if (FLD_TEST_DRF(_DPCD14, _MAX_LINK_RATE_PHY_REPEATER, _VAL, _2_70_GBPS, buffer[1])) + caps.repeaterCaps.maxLinkRate = dp2LinkRate_2_70Gbps; + else if (FLD_TEST_DRF(_DPCD14, _MAX_LINK_RATE_PHY_REPEATER, _VAL, _5_40_GBPS, buffer[1])) + caps.repeaterCaps.maxLinkRate = dp2LinkRate_5_40Gbps; + else if (FLD_TEST_DRF(_DPCD14, _MAX_LINK_RATE_PHY_REPEATER, _VAL, _8_10_GBPS, buffer[1])) + caps.repeaterCaps.maxLinkRate = dp2LinkRate_8_10Gbps; + else + { + DP_ASSERT(0 && "Unknown max link rate or HBR2 without at least DP 1.2. Assuming DP 1.1 defaults"); + caps.repeaterCaps.maxLinkRate = dp2LinkRate_2_70Gbps; + } + + caps.repeaterCaps.maxLaneCount = + DRF_VAL(_DPCD14, _MAX_LANE_COUNT_PHY_REPEATER, + _VAL, buffer[0x4]); + + // The cumulative number of 10ms. + caps.repeaterCaps.phyRepeaterExtendedWakeTimeoutMs = + DRF_VAL(_DPCD14, + _PHY_REPEATER_EXTENDED_WAKE_TIMEOUT, + _REQ, buffer[0x5]) * 10; + + // Set FEC to Capable for repeater by default. + caps.repeaterCaps.bFECSupported = true; + for (lttprIdx = 0; lttprIdx < caps.phyRepeaterCount; lttprIdx++) + { + if (AuxRetry::ack == + bus.read(NV_DPCD14_PHY_REPEATER_FEC_CAP_0(lttprIdx), &byte, 1)) + { + caps.repeaterCaps.bFECSupportedRepeater[lttprIdx] = + FLD_TEST_DRF(_DPCD14, + _PHY_REPEATER_FEC_CAP_0, + _FEC_CAPABLE, + _YES, + byte); + + // bFECSupported is only true if all LTTPR supports FEC. + caps.repeaterCaps.bFECSupported &= + caps.repeaterCaps.bFECSupportedRepeater[lttprIdx]; + } + } + caps.repeaterCaps.bAuxlessALPMSupported = + FLD_TEST_DRF(_DPCD20, _PHY_REPEATER_ALPM_CAPS, _AUX_LESS, _SUPPORTED, buffer[9]); + } + else + { + caps.repeaterCaps.maxLinkRate = 0; + } + } + else + { + // not supported DP revision, we should not be doing LTTPR training + caps.phyRepeaterCount = 0; + caps.repeaterCaps.maxLinkRate = 0; + } + } + } + + // Check if the device requests extended sleep wake timeout + if (AuxRetry::ack == bus.read(NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST, &buffer[0], 1)) + { + if (buffer[0] == NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD_1MS) + { + caps.extendedSleepWakeTimeoutRequestMs = DP_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_DEFAULT_MS; + } + else + { + caps.extendedSleepWakeTimeoutRequestMs = buffer[0] * 20; + } + } + else + { + caps.extendedSleepWakeTimeoutRequestMs = 0; + } + + byte = 0U; + dpMemZero(&caps.psrCaps, sizeof(vesaPsrSinkCaps)); + + status = bus.read(NV_DPCD_EDP_PSR_VERSION, &byte, sizeof byte); + if (status == AuxRetry::ack && byte > 0U) + { + caps.psrCaps.psrVersion = byte; + } + + if (caps.psrCaps.psrVersion) + { + unsigned psrSetupTimeMap[8] = { 330U, 275U, 220U, 165U, 110U, 55U, 0U }; + byte = 0U; + if (AuxRetry::ack == bus.read(NV_DPCD_EDP_PSR_CAP, &byte, sizeof byte)) + { + caps.psrCaps.linkTrainingRequired = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CAP, _LT_NEEDED, _YES, byte); + caps.psrCaps.psrSetupTime = + psrSetupTimeMap[DRF_VAL(_DPCD_EDP, _PSR_CAP,_SETUP_TIME, byte)]; + caps.psrCaps.yCoordinateRequired = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CAP,_Y_COORD_NEEDED, _YES, byte); + caps.psrCaps.psr2UpdateGranularityRequired = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CAP,_GRAN_REQUIRED, _YES, byte); + } + + // Version 2 supports PSR2 and SU + if (caps.psrCaps.psrVersion == 2U) + { + NvU16 xGranular = 0U; + if (AuxRetry::ack == bus.read(NV_DPCD_EDP_PSR2_X_GRANULARITY_H, &byte, sizeof byte)) + { + xGranular = byte; + } + + byte = 0U; + if (AuxRetry::ack == bus.read(NV_DPCD_EDP_PSR2_X_GRANULARITY_L, &byte, sizeof byte)) + { + xGranular = (xGranular << 8U) | byte; + } + + caps.psrCaps.suXGranularity = xGranular; + } + + // version 3 supports Y coordinate + if (caps.psrCaps.psrVersion > 2U) + { + if (AuxRetry::ack == bus.read(NV_DPCD_EDP_PSR2_Y_GRANULARITY, &byte, sizeof byte)) + { + caps.psrCaps.suYGranularity = byte; + } + } + } + + if (bIsDpTunnelBwAllocationEnabled) + { + AuxRetry::status busReadStatus = bus.read(NV_DPCD20_DP_TUNNELING_MAX_LANE_COUNT, &byte, sizeof byte); + + if (AuxRetry::ack == busReadStatus) + { + caps.dpInTunnelingCaps.maxLaneCount = DRF_VAL(_DPCD20, _DP_TUNNELING_MAX_LANE_COUNT, _LANE, byte); + } + else + { + DP_PRINTF(DP_ERROR, "Failed to read NV_DPCD20_DP_TUNNELING_MAX_LANE_COUNT. Return: %d", busReadStatus); + } + + busReadStatus = bus.read(NV_DPCD20_DP_TUNNELING_8B10B_MAX_LINK_RATE, &byte, sizeof byte); + if (AuxRetry::ack == busReadStatus) + { + if (FLD_TEST_DRF(_DPCD20, _DP_TUNNELING_8B10B, _MAX_LINK_RATE_VAL, _1_62_GBPS, byte)) + { + caps.dpInTunnelingCaps.maxLinkRate = dp2LinkRate_1_62Gbps; + } + else if (FLD_TEST_DRF(_DPCD20, _DP_TUNNELING_8B10B, _MAX_LINK_RATE_VAL, _2_70_GBPS, byte)) + { + caps.dpInTunnelingCaps.maxLinkRate = dp2LinkRate_2_70Gbps; + } + else if (FLD_TEST_DRF(_DPCD20, _DP_TUNNELING_8B10B, _MAX_LINK_RATE_VAL, _5_40_GBPS, byte)) + { + caps.dpInTunnelingCaps.maxLinkRate = dp2LinkRate_5_40Gbps; + } + else if (FLD_TEST_DRF(_DPCD20, _DP_TUNNELING_8B10B, _MAX_LINK_RATE_VAL, _8_10_GBPS, byte)) + { + caps.dpInTunnelingCaps.maxLinkRate = dp2LinkRate_8_10Gbps; + } + else + { + DP_ASSERT(0 && "Unknown max link rate. Assuming DP 1.1 defaults"); + caps.dpInTunnelingCaps.maxLinkRate = dp2LinkRate_2_70Gbps; + } + } + else + { + DP_PRINTF(DP_ERROR, "Failed to read NV_DPCD20_DP_TUNNELING_8B10B_MAX_LINK_RATE. Return: %d", busReadStatus); + } + + } + parsePortDescriptors(); +} + +// +// Legacy link rates: DPCD offset 1 * 27000000 +// ILRs: DPCD offset: 0x10~0x1F +// +LinkRate DPCDHALImpl::getMaxLinkRate() +{ + LinkRate maxLinkRate = caps.maxLinkRate; + + if (caps.phyRepeaterCount > 0) + { + maxLinkRate = DP_MIN(maxLinkRate, caps.repeaterCaps.maxLinkRate); + } + + if (caps.dpInTunnelingCaps.bIsSupported && bIsDpTunnelBwAllocationEnabled) + { + maxLinkRate = DP_MIN(maxLinkRate, caps.dpInTunnelingCaps.maxLinkRate); + } + + return maxLinkRate; +} + +unsigned DPCDHALImpl::getMaxLaneCount() +{ + unsigned maxLaneCount = caps.maxLaneCount; + + if (caps.phyRepeaterCount > 0) + { + maxLaneCount = DP_MIN(maxLaneCount, caps.repeaterCaps.maxLaneCount); + } + + if (caps.dpInTunnelingCaps.bIsSupported && bIsDpTunnelBwAllocationEnabled) + { + maxLaneCount = DP_MIN(maxLaneCount, caps.dpInTunnelingCaps.maxLaneCount); + } + + return maxLaneCount; +} + +// Max lanes supported at the desired link rate. +unsigned DPCDHALImpl::getMaxLaneCountSupportedAtLinkRate(LinkRate linkRate) +{ + if (linkRate == dp2LinkRate_2_70Gbps) + { + if (caps.maxLanesAtHBR) + { + return DP_MIN(caps.maxLanesAtHBR, getMaxLaneCount()); + } + } + else if (linkRate == dp2LinkRate_1_62Gbps) + { + if (caps.maxLanesAtRBR) + { + return DP_MIN(caps.maxLanesAtRBR, getMaxLaneCount()); + } + } + // None of the above cases got hit, simply return the max lane count + return getMaxLaneCount(); +} + +// +// Single stream specific caps +// DPCD offset 22h +// +unsigned DPCDHALImpl::getNumberOfAudioEndpoints() +{ + if (caps.numberAudioEndpoints) + return caps.numberAudioEndpoints; + else + return caps.NORP > 1; +} + +bool DPCDHALImpl::getSDPExtnForColorimetry() +{ + bool bSDPExtnForColorimetry = false; + NvU8 byte = 0; + if (caps.extendedRxCapsPresent) + { + if (AuxRetry::ack == bus.read(NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST, &byte, sizeof byte)) + { + bSDPExtnForColorimetry = FLD_TEST_DRF(_DPCD14, _EXTENDED_DPRX_FEATURE_ENUM_LIST, + _VSC_SDP_EXT_FOR_COLORIMETRY, _YES, byte); + } + } + return bSDPExtnForColorimetry; +} + +bool DPCDHALImpl::getRootAsyncSDPSupported() +{ + NvU8 byte = 0; + if (!caps.extendedRxCapsPresent) + return false; + if (AuxRetry::ack != bus.read(NV_DPCD14_DPRX_FEATURE_ENUM_LIST, &byte, sizeof byte) || + FLD_TEST_DRF(_DPCD14, _DPRX_FEATURE_ENUM_LIST, _ADAPTIVE_SYNC_SDP_SUPPORTED, _NO, byte)) + { + return false; + } + if (AuxRetry::ack != bus.read(NV_DPCD_DOWN_STREAM_PORT, &byte, sizeof byte) || + FLD_TEST_DRF(_DPCD, _DOWN_STREAM_PORT, _MSA_TIMING_PAR_IGNORED, _NO, byte)) + { + return false; + } + return true; +} + +AuxRetry::status DPCDHALImpl::setOuiSource +( + unsigned ouiId, + const char * model, + size_t modelNameLength, + NvU8 chipRevision +) +{ + NvU8 ouiBuffer[16]; + + // The first 3 bytes are IEEE_OUI. 2 hex digits per register. + ouiBuffer[0] = (ouiId >> 16) & 0xFF; + ouiBuffer[1] = (ouiId >> 8) & 0xFF; + ouiBuffer[2] = ouiId & 0xFF; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + if (modelNameLength > NV_DPCD_SOURCE_DEV_ID_STRING__SIZE) + { + DP_PRINTF(DP_WARNING, "DPHAL> setOuiSource(): modelNameLength should not be greater than 6"); + modelNameLength = NV_DPCD_SOURCE_DEV_ID_STRING__SIZE; + } + + // Next 6 bytes are Device Identification String. + for (unsigned int i = 0; i < modelNameLength; i++) + { + ouiBuffer[3+i] = *model; + if (*model) + model++; + } + ouiBuffer[9] = chipRevision; + + for (int i = 0xA; i<=0xF; ++i) + ouiBuffer[i] = 0; + + return bus.write(NV_DPCD_SOURCE_IEEE_OUI, &ouiBuffer[0], sizeof ouiBuffer); +} + +bool DPCDHALImpl::getOuiSource(unsigned &ouiId, char * modelName, + size_t modelNameBufferSize, NvU8 & chipRevision) +{ + NvU8 ouiBuffer[16]; + int address = NV_DPCD_SOURCE_IEEE_OUI; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + // If buffer size is larger than dev_id size, the extras are not used. + // If buffer size is smaller, than we can only get certain bytes. + if (modelNameBufferSize > NV_DPCD_SOURCE_DEV_ID_STRING__SIZE) + { + modelNameBufferSize = NV_DPCD_SOURCE_DEV_ID_STRING__SIZE; + } + + if (AuxRetry::ack != bus.read(address, &ouiBuffer[0], sizeof ouiBuffer)) + { + *modelName = 0; + ouiId = 0; + chipRevision = 0; + return false; + } + // The first 3 bytes are IEEE_OUI. 2 hex digits per register. + ouiId = ouiBuffer[2] | (ouiBuffer[1] << 8) | (ouiBuffer[0] << 16); + + // Next 6 bytes are Device Identification String, copy as much as we can (limited buffer case). + unsigned int i; + for (i = 0; i < modelNameBufferSize; i++) + modelName[i] = ouiBuffer[3+i]; + + chipRevision = ouiBuffer[9]; + + return true; +} + +bool DPCDHALImpl::getOuiSink +( + unsigned &ouiId, + unsigned char * modelName, + size_t modelNameBufferSize, + NvU8 & chipRevision +) +{ + NvU8 ouiBuffer[16]; + int address = NV_DPCD_SINK_IEEE_OUI; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + // If buffer size is larger than dev_id size, the extras are not used. + // If buffer size is smaller, than we can only get certain bytes. + if (modelNameBufferSize > NV_DPCD_SOURCE_DEV_ID_STRING__SIZE) + { + modelNameBufferSize = NV_DPCD_SOURCE_DEV_ID_STRING__SIZE; + } + + // + // Check if there is a downstream facing port (DFP) + // If DFP is present, device is a branch device - use branch offset + // Else device is a sink device - use sink offset + // + if(caps.downStreamPortPresent) + { + address = NV_DPCD_BRANCH_IEEE_OUI; + } + + if (AuxRetry::ack != bus.read(address, &ouiBuffer[0], sizeof ouiBuffer)) + { + *modelName = 0; + ouiId = 0; + chipRevision = 0; + return false; + } + // The first 3 bytes are IEEE_OUI. 2 hex digits per register. + ouiId = ouiBuffer[0] | (ouiBuffer[1] << 8) | (ouiBuffer[2] << 16); + + // Next 6 bytes are Device Identification String, copy as much as we can (limited buffer case). + unsigned int i; + for (i = 0; i < modelNameBufferSize; i++) + modelName[i] = (unsigned char)ouiBuffer[3+i]; + + chipRevision = ouiBuffer[9]; + + return true; +} + +// DPCD offset 30h +bool DPCDHALImpl::getGUID(DisplayPort::GUID & guid) +{ + NvU8 buffer[DPCD_GUID_SIZE]; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + bus.read(NV_DPCD_GUID, &buffer[0], sizeof(buffer)); + + for (unsigned i = 0; i < DPCD_GUID_SIZE; i++) + { + guid.data[i] = buffer[i]; + } + return true; +} + +AuxRetry::status DPCDHALImpl::setGUID(DisplayPort::GUID & guid) +{ + NvU8 buffer[DPCD_GUID_SIZE]; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + for (unsigned i = 0; i < DPCD_GUID_SIZE; i++) + { + buffer[i] = guid.data[i]; + } + + return bus.write(NV_DPCD_GUID, buffer, sizeof(buffer)); +} + +void DPCDHALImpl::parsePortDescriptors() +{ + NvU8 basicCaps[128]; + unsigned bytesPerPort = caps.detailedCapInfo ? 4 : 1; + // When Detailed_cap_info_available bit is set to 1, the max number + // of downstream port is limited to 32. Otherwise it supports up to 127 + unsigned maxPorts = caps.detailedCapInfo ? 32 : 127; + unsigned infoByte0; + if (caps.downStreamPortCount > maxPorts) + caps.downStreamPortCount = 1; + unsigned size = (bytesPerPort * caps.downStreamPortCount); + + if (AuxRetry::ack != bus.read(NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT(0), &basicCaps[0], size)) + { + DP_PRINTF(DP_ERROR, "DPHAL> Unable to read detailed caps!"); + caps.downStreamPortCount = 0; + return; + } + + if (!((isVersion(1,0)) || + (isVersion(1,1) && basicCaps[0] == 0 && + legacyPort[0].type == ANALOG_VGA))) + { + for (unsigned port = 0; port < caps.downStreamPortCount; port++) + { + // The index to access detailed info byte 0 + infoByte0 = port * bytesPerPort; + switch (DRF_VAL(_DPCD, _DETAILED_CAP_INFO_DWNSTRM_PORT, _TX_TYPE, basicCaps[infoByte0])) + { + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_DISPLAYPORT: + { + legacyPort[port].type = DISPLAY_PORT; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_ANALOG: + { + legacyPort[port].type = ANALOG_VGA; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_DVI: + { + legacyPort[port].type = DVI; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_HDMI: + { + NvU8 pConCaps = basicCaps[infoByte0+2]; + + legacyPort[port].type = HDMI; + + caps.pconCaps.maxTmdsClkRate = basicCaps[infoByte0+1]; + + caps.pconCaps.bSourceControlModeSupported = + FLD_TEST_DRF(_DPCD, _DETAILED_CAP_INFO, _SRC_CONTROL_MODE_SUPPORT, _YES, pConCaps); + caps.pconCaps.bConcurrentLTSupported = + FLD_TEST_DRF(_DPCD, _DETAILED_CAP_INFO, _CONCURRENT_LT_SUPPORT, _YES, pConCaps); + caps.pconCaps.maxHdmiLinkBandwidthGbps = + DRF_VAL(_DPCD, _DETAILED_CAP_INFO, _MAX_FRL_LINK_BW_SUPPORT, pConCaps); + + switch (DRF_VAL(_DPCD, _DETAILED_CAP_INFO, _MAX_BITS_PER_COMPONENT_DEF, pConCaps)) + { + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_10BPC: + caps.pconCaps.maxBpc = 10; + break; + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_12BPC: + caps.pconCaps.maxBpc = 12; + break; + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_16BPC: + caps.pconCaps.maxBpc = 16; + break; + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_8BPC: + default: + caps.pconCaps.maxBpc = 8; + break; + } + + NvU8 pConColorConvCaps = basicCaps[infoByte0+3]; + caps.pconCaps.bConv444To420Supported = FLD_TEST_DRF(_DPCD, _DETAILED_CAP, _CONV_YCBCR444_TO_YCBCR420_SUPPORTED, _YES, pConColorConvCaps); + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_OTHERS_NO_EDID: + { + legacyPort[port].type = WITHOUT_EDID; + switch (DRF_VAL(_DPCD, _DETAILED_CAP_INFO_DWNSTRM_PORT, _NON_EDID_ATTR, basicCaps[infoByte0])) + { + default: + { + DP_ASSERT(0 && "Unknown non-edid type, assume Reserved"); + legacyPort[port].nonEDID = RESERVED; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_480I_60HZ: + { + legacyPort[port].nonEDID = IL_720_480_60HZ; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_480I_50HZ: + { + legacyPort[port].nonEDID = IL_720_480_50HZ; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_1080I_60HZ: + { + legacyPort[port].nonEDID = IL_1920_1080_60HZ; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_1080I_50HZ: + { + legacyPort[port].nonEDID = IL_1920_1080_50HZ; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_720P_60HZ: + { + legacyPort[port].nonEDID = PG_1280_720_60HZ; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_720P_50HZ: + { + legacyPort[port].nonEDID = PG_1280_720_50_HZ; + break; + } + } + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_DP_PLUSPLUS: + { + legacyPort[port].type = DISPLAY_PORT_PLUSPLUS; + break; + } + default: + { + DP_ASSERT(0 && "Unknown port type"); + break; + } + } + + // Set the Init value to Zero + legacyPort[port].maxTmdsClkRate = 0; + + if (legacyPort[port].type == DVI || + legacyPort[port].type == HDMI || + legacyPort[port].type == DISPLAY_PORT_PLUSPLUS) + { + legacyPort[port].maxTmdsClkRate = ((NvU64)basicCaps[infoByte0 + 1]) * 2500000; + if (legacyPort[port].maxTmdsClkRate == 0) + { + DP_ASSERT(legacyPort[port].maxTmdsClkRate && "No Max TMDS clock rate limits."); + } + + /* + Bug : 3202060 + Parse Byte 2 as well to check the Dongle supports HDMI FRL Output + If HDMI FRL is supported, the maxTmdsClkRate limit should be removed. + */ + + if (DRF_VAL(_DPCD, _DETAILED_CAP_INFO, _MAX_FRL_LINK_BW_SUPPORT, basicCaps[infoByte0 + 2])) + { + // Disable the TMDS CLK Limit + legacyPort[port].maxTmdsClkRate = 0; + } + } + } + } +} + +void DPCDHALImpl::populateFakeDpcd() +{ + dpcdOffline = true; + + // + // fill out the bare minimum caps required ... + // this should be extended in for more dpcd offsets in future. + // + caps.revisionMajor = 0x1; + caps.revisionMinor = 0x1; + caps.supportsESI = false; + caps.maxLinkRate = dp2LinkRate_8_10Gbps; + caps.maxLaneCount = 4; + caps.enhancedFraming = true; + caps.downStreamPortPresent = true; + caps.downStreamPortCount = 1; + + // populate the sinkcount interrupt + interrupts.sinkCount = 1; +} + +// DPCD override routine: Max link rate override. +void DPCDHALImpl::overrideMaxLinkRate(NvU32 overrideMaxLinkRate) +{ + if (overrideMaxLinkRate) + { + caps.maxLinkRate = overrideMaxLinkRate; + } +} + +void DPCDHALImpl::notifyHPD(bool status, bool bSkipDPCDRead) +{ + if (!status) + { + // check if dpcd is alive + NvU8 buffer; + unsigned retries = 16; + if (AuxRetry::ack == bus.read(NV_DPCD_REV, &buffer, sizeof buffer, retries)) + return; + + // Support for EDID locking: + // Refill the cache with "default" dpcd data on an unplug event as later on + // the client may send a hot-plug event for edid locked fake device (no real dpcd). + // Also raise flag "dpcdOffline" so that dpcd accesses may be optimized. + populateFakeDpcd(); + return; + } + + // Skip DPCD read if requested. + if (!bSkipDPCDRead) + { + parseAndReadCaps(); + } + + // + // For Allienware eDp Panel more time is required to assert the HPD & + // power on the AUX link. Retry 1 more time if it has failed. This is + // a BAD way to do it but no EDID is available to differentiate here + // this is the first access which happens before EDID read to read caps. + // We also found that some LG panels on HP NBs goes in a bad state after + // factory reset. Retyring 3 times works for them. So making faultyRetries as 3. + // + NvU32 faultyRetries = 3; + while ((dpcdOffline) && (faultyRetries > 0)) + { + // Read the caps again + parseAndReadCaps(); + --faultyRetries; + } + + parseAndReadInterrupts(); +} + +void DPCDHALImpl::setPostLtAdjustRequestGranted(bool bGrantPostLtRequest) +{ + NvU8 data = 0; + + bus.read(NV_DPCD_LANE_COUNT_SET, &data, sizeof data); + + if (bGrantPostLtRequest) + { + data = FLD_SET_DRF(_DPCD, _LANE_COUNT_SET, _POST_LT_ADJ_REQ_GRANTED, _YES, data); + } + + else + { + data = FLD_SET_DRF(_DPCD, _LANE_COUNT_SET, _POST_LT_ADJ_REQ_GRANTED, _NO, data); + } + + if (AuxRetry::ack != bus.write(NV_DPCD_LANE_COUNT_SET, &data, sizeof data)) + { + DP_PRINTF(DP_ERROR, "DPCONN> Failed to set POST_LT_ADJ_REQ_GRANTED bit."); + } +} + +// DPCD offset 204 +bool DPCDHALImpl::getIsPostLtAdjRequestInProgress() +{ + NvU8 buffer; + + if (AuxRetry::ack != bus.read(NV_DPCD_LANE_ALIGN_STATUS_UPDATED, &buffer, 1)) + { + DP_PRINTF(DP_ERROR, "DPCONN> Post Link Training : Failed to read POST_LT_ADJ_REQ_IN_PROGRESS"); + return false; + } + + return FLD_TEST_DRF(_DPCD, _LANE_ALIGN_STATUS_UPDATED, + _POST_LT_ADJ_REQ_IN_PROGRESS, _YES, buffer); +} + +TrainingPatternSelectType DPCDHALImpl::getTrainingPatternSelect() +{ + NvU8 trainingPat = 0; + TrainingPatternSelectType pattern = TRAINING_DISABLED; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + bus.read(NV_DPCD_TRAINING_PATTERN_SET, &trainingPat, sizeof trainingPat); + + trainingPat = DRF_VAL(_DPCD, _TRAINING_PATTERN_SET, _TPS, trainingPat); + + if (trainingPat == NV_DPCD_TRAINING_PATTERN_SET_TPS_NONE) + pattern = TRAINING_DISABLED; + if (trainingPat == NV_DPCD_TRAINING_PATTERN_SET_TPS_TP1) + pattern = TRAINING_PAT_ONE; + if (trainingPat == NV_DPCD_TRAINING_PATTERN_SET_TPS_TP2) + pattern = TRAINING_PAT_TWO; + if (trainingPat == NV_DPCD_TRAINING_PATTERN_SET_TPS_TP3) + pattern = TRAINING_PAT_THREE; + + return pattern; +} + +bool +DPCDHALImpl::setTrainingMultiLaneSet +( + NvU8 numLanes, + NvU8 *voltSwingSet, + NvU8 *preEmphasisSet +) +{ + NvU8 trainingCtrl[DP_MAX_LANES] = {0}; + unsigned writeAddress = NV_DPCD_TRAINING_LANE_SET(0); + NvU8 laneIndex; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + for (laneIndex = 0; laneIndex < numLanes; laneIndex++) + { + if (voltSwingSet[laneIndex] <= NV_DPCD_MAX_VOLTAGE_SWING) + { + trainingCtrl[laneIndex] = FLD_SET_DRF_NUM(_DPCD, _TRAINING_LANE_SET, + _VOLTAGE_SWING, voltSwingSet[laneIndex], + trainingCtrl[laneIndex]); + } + else + { + DP_ASSERT(0 && "Out of bounds voltage swing. Assuming 0"); + } + + if (voltSwingSet[laneIndex] == NV_DPCD_MAX_VOLTAGE_SWING) + { + trainingCtrl[laneIndex] = FLD_SET_DRF(_DPCD, _TRAINING_LANE_SET, + _VOLTAGE_SWING_MAX_REACHED, + _TRUE, trainingCtrl[laneIndex]); + } + + if (preEmphasisSet[laneIndex] <= NV_DPCD_MAX_VOLTAGE_PREEMPHASIS) + { + trainingCtrl[laneIndex] = FLD_SET_DRF_NUM(_DPCD, _TRAINING_LANE_SET, + _PREEMPHASIS, preEmphasisSet[laneIndex], + trainingCtrl[laneIndex]); + } + else + { + DP_ASSERT(0 && "Out of bounds preemphasis. Assuming 0"); + } + + if (preEmphasisSet[laneIndex] == NV_DPCD_MAX_VOLTAGE_PREEMPHASIS) + { + trainingCtrl[laneIndex] = FLD_SET_DRF(_DPCD, _TRAINING_LANE_SET, + _PREEMPHASIS_MAX_REACHED, _TRUE, + trainingCtrl[laneIndex]); + } + } + + return(AuxRetry::ack == bus.write(writeAddress, trainingCtrl, (unsigned)numLanes)); +} + +AuxRetry::status DPCDHALImpl::setIgnoreMSATimingParamters(bool msaTimingParamIgnoreEn) +{ + + NvU8 downspreadCtrl = 0; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + bus.read(NV_DPCD_DOWNSPREAD_CTRL, &downspreadCtrl, sizeof downspreadCtrl); + + if (msaTimingParamIgnoreEn) + downspreadCtrl = FLD_SET_DRF(_DPCD, _DOWNSPREAD_CTRL, _MSA_TIMING_PAR_IGNORED, _TRUE, downspreadCtrl); + else + downspreadCtrl = FLD_SET_DRF(_DPCD, _DOWNSPREAD_CTRL, _MSA_TIMING_PAR_IGNORED, _FALSE, downspreadCtrl); + + return bus.write(NV_DPCD_DOWNSPREAD_CTRL, &downspreadCtrl, sizeof downspreadCtrl); +} + +AuxRetry::status DPCDHALImpl::setLinkQualPatternSet +( + LinkQualityPatternType linkQualPattern, + unsigned laneCount +) +{ + if (caps.revisionMajor <= 0) + { + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + return AuxRetry::nack; + } + + if (this->isVersion(1, 1) == true) + { + NvU8 buffer = 0; + if (AuxRetry::ack != bus.read(NV_DPCD_TRAINING_PATTERN_SET, &buffer, 1)) + { + DP_ASSERT(0 && "Can't read from NV_DPCD_TRAINING_PATTERN_SET."); + return AuxRetry::nack; + } + + // write on bits 3:2 + NvU8 value = ((linkQualPattern << 2) & 0xc) | (buffer & (~0xc)); + return bus.write(NV_DPCD_TRAINING_PATTERN_SET, &value, sizeof value); + } + else if (isAtLeastVersion(1,2) == true) + { + AuxRetry::status requestStatus = AuxRetry::nack ; + + // Set test patterns for all requested lanes + for (unsigned i = 0; i < laneCount; i++) + { + requestStatus = setLinkQualLaneSet(i, linkQualPattern); + if (requestStatus != AuxRetry::ack) + break; + } + + return requestStatus; + } + else + { + DP_ASSERT(0 && "Regs only supported for DP1.2"); + return AuxRetry::unsupportedRegister; + } +} + +AuxRetry::status DPCDHALImpl::setLinkQualLaneSet(unsigned lane, LinkQualityPatternType linkQualPattern) +{ + NvU8 linkQuality = 0; + unsigned writeAddress = NV_DPCD_LINK_QUAL_LANE_SET(lane); + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + if (isAtLeastVersion(1,2) == false) + { + DP_ASSERT(0 && "Regs only supported for DP1.2"); + return AuxRetry::unsupportedRegister; + } + + // check if parameter is valid + if (lane >= displayPort_LaneSupported) + { + DP_ASSERT(0 && "Unknown lane selected. Assuming Lane 0"); + writeAddress = NV_DPCD_LINK_QUAL_LANE_SET(0); + } + + if (linkQualPattern == LINK_QUAL_DISABLED) + linkQuality = FLD_SET_DRF(_DPCD, _LINK_QUAL_LANE_SET, _LQS, _NO, linkQuality); + if (linkQualPattern == LINK_QUAL_D10_2) + linkQuality = FLD_SET_DRF(_DPCD, _LINK_QUAL_LANE_SET, _LQS, _D10_2, linkQuality); + if (linkQualPattern == LINK_QUAL_SYM_ERROR) + linkQuality = FLD_SET_DRF(_DPCD, _LINK_QUAL_LANE_SET, _LQS, _SYM_ERR_MEASUREMENT_CNT, linkQuality); + if (linkQualPattern == LINK_QUAL_PRBS7) + linkQuality = FLD_SET_DRF(_DPCD, _LINK_QUAL_LANE_SET, _LQS, _PRBS7, linkQuality); + if (linkQualPattern == LINK_QUAL_80BIT_CUST) + linkQuality = FLD_SET_DRF(_DPCD, _LINK_QUAL_LANE_SET, _LQS, _80_BIT_CUSTOM, linkQuality); + if (linkQualPattern == LINK_QUAL_HBR2_COMPLIANCE_EYE) + linkQuality = FLD_SET_DRF(_DPCD, _LINK_QUAL_LANE_SET, _LQS, _HBR2, linkQuality); + if (linkQualPattern == LINK_QUAL_CP2520PAT3) + linkQuality = FLD_SET_DRF(_DPCD14, _LINK_QUAL_LANE_SET, _LQS, _CP2520PAT3, linkQuality); + + return bus.write(writeAddress, &linkQuality, sizeof linkQuality); +} + +AuxRetry::status DPCDHALImpl::setMessagingEnable(bool _uprequestEnable, bool _upstreamIsSource) +{ + NvU8 mstmCtrl = 0; + + if (!this->isAtLeastVersion(1, 2)) + { + DP_ASSERT(!_uprequestEnable && "Can't enable multistream on DP 1.1"); + return AuxRetry::nack; + } + + uprequestEnable = _uprequestEnable; + upstreamIsSource = _upstreamIsSource; + + // + // Lets not touch the MST enable bit here. + // Branch might be getting driven in MST mode and we do not want to + // change that unless we are sure there are no more streams being driven. + // + if (AuxRetry::ack != bus.read(NV_DPCD_MSTM_CTRL, &mstmCtrl, 1)) + { + DP_PRINTF(DP_ERROR, "DPHAL> ERROR! Unable to read 00111h MSTM_CTRL."); + } + + if (_uprequestEnable) + { + bMultistream = caps.supportsMultistream; + } + else + { + bMultistream = false; + } + mstmCtrl = 0; + + if (bMultistream) + mstmCtrl = FLD_SET_DRF(_DPCD, _MSTM_CTRL, _EN, _YES, mstmCtrl); + if (uprequestEnable) + mstmCtrl = FLD_SET_DRF(_DPCD, _MSTM_CTRL, _UP_REQ_EN, _YES, mstmCtrl); + if (upstreamIsSource) + mstmCtrl = FLD_SET_DRF(_DPCD, _MSTM_CTRL, _UPSTREAM_IS_SRC, _YES, mstmCtrl); + + return bus.write(NV_DPCD_MSTM_CTRL, &mstmCtrl, sizeof mstmCtrl); +} + +AuxRetry::status DPCDHALImpl::setMultistreamLink(bool enable) +{ + NvU8 mstmCtrl = 0; + + if (!this->isAtLeastVersion(1, 2)) + { + DP_ASSERT(!enable && "Can't enable multistream on DP 1.1"); + return AuxRetry::nack; + } + + bMultistream = enable; + + if (bMultistream) + mstmCtrl = FLD_SET_DRF(_DPCD, _MSTM_CTRL, _EN, _YES, mstmCtrl); + if (uprequestEnable) + mstmCtrl = FLD_SET_DRF(_DPCD, _MSTM_CTRL, _UP_REQ_EN, _YES, mstmCtrl); + if (upstreamIsSource) + mstmCtrl = FLD_SET_DRF(_DPCD, _MSTM_CTRL, _UPSTREAM_IS_SRC, _YES, mstmCtrl); + + return bus.write(NV_DPCD_MSTM_CTRL, &mstmCtrl, sizeof mstmCtrl); +} + +AuxRetry::status DPCDHALImpl::setMultistreamHotplugMode(MultistreamHotplugMode notifyType) +{ + NvU8 deviceCtrl = 0; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + // notifytype == HPD_LONG_PULSE, adapter 0 + if (notifyType == IRQ_HPD) + deviceCtrl = FLD_SET_DRF(_DPCD, _BRANCH_DEV_CTRL, _HOTPLUG_EVENT_TYPE, _IRQ_HPD, deviceCtrl); + + return bus.write(NV_DPCD_BRANCH_DEV_CTRL, &deviceCtrl, sizeof deviceCtrl); +} + +bool DPCDHALImpl::parseTestRequestTraining(NvU8 * buffer /* 0x18-0x28 valid */) +{ + if (buffer[1] == 0x6) + interrupts.testTraining.testRequestLinkRate = dp2LinkRate_1_62Gbps; + else if (buffer[1] == 0xa) + interrupts.testTraining.testRequestLinkRate = dp2LinkRate_2_70Gbps; + else if (buffer[1] == 0x14) + interrupts.testTraining.testRequestLinkRate = dp2LinkRate_5_40Gbps; + else if (buffer[1] == 0x1E) + interrupts.testTraining.testRequestLinkRate = dp2LinkRate_8_10Gbps; + else + { + DP_ASSERT(0 && "Unknown max link rate. Assuming RBR"); + interrupts.testTraining.testRequestLinkRate = dp2LinkRate_1_62Gbps; + } + + interrupts.testTraining.testRequestLaneCount = buffer[(0x220 - 0x218)] & 0xf; + + return true; +} + +void DPCDHALImpl::parseAutomatedTestRequest(bool testRequestPending) +{ + NvU8 buffer[16]; + + interrupts.automatedTestRequest = false; + interrupts.testEdid.testRequestEdidRead = false; + interrupts.testTraining.testRequestTraining = false; + interrupts.testPhyCompliance.testRequestPhyCompliance = false; + + if (!testRequestPending) + { + return; + } + interrupts.automatedTestRequest = true; + + if (AuxRetry::ack != bus.read(NV_DPCD_TEST_REQUEST, &buffer[0], 16)) + { + DP_PRINTF(DP_ERROR, "DPHAL> ERROR! Automated test request found. Unable to read 0x218 register."); + return; + } + + if (FLD_TEST_DRF(_DPCD, _TEST_REQUEST, _TEST_LINK_TRAINING, _YES, buffer[0])) + { + interrupts.testTraining.testRequestTraining = parseTestRequestTraining(&buffer[0]); + } + + if (FLD_TEST_DRF(_DPCD, _TEST_REQUEST, _TEST_EDID_READ, _YES, buffer[0])) + { + interrupts.testEdid.testRequestEdidRead = true; + } + + if (FLD_TEST_DRF(_DPCD, _TEST_REQUEST, _TEST_PHY_TEST_PATTERN, _YES, buffer[0])) + { + interrupts.testPhyCompliance.testRequestPhyCompliance = parseTestRequestPhy(); + } +} + +bool DPCDHALImpl::parseTestRequestPhy() +{ + NvU8 buffer = 0; + NvU8 bits = 0; + if (AuxRetry::ack != bus.read(NV_DPCD_PHY_TEST_PATTERN, &buffer, 1)) + { + DP_PRINTF(DP_ERROR, "DPHAL> ERROR! Test pattern request found but unable to read NV_DPCD_PHY_TEST_PATTERN register."); + return false; + } + + if (isVersion(1,0)) + bits = 0; + else + bits = DRF_VAL(_DPCD, _PHY_TEST_PATTERN_SEL, _DP12, buffer); + + if (bits == NV_DPCD_PHY_TEST_PATTERN_SEL_NO) + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_DISABLED; + else if (bits == NV_DPCD_PHY_TEST_PATTERN_SEL_D10_2) + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_D10_2; + else if (bits == NV_DPCD_PHY_TEST_PATTERN_SEL_SYM_ERR_MEASUREMENT_CNT) + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_SYM_ERROR; + else if (bits == NV_DPCD_LINK_QUAL_LANE_SET_LQS_PRBS7) + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_PRBS7; + else if (bits == NV_DPCD_LINK_QUAL_LANE_SET_LQS_80_BIT_CUSTOM) + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_80BIT_CUST; + else if (bits == NV_DPCD_LINK_QUAL_LANE_SET_LQS_HBR2) + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_HBR2_COMPLIANCE_EYE; + else if (bits == NV_DPCD14_PHY_TEST_PATTERN_SEL_CP2520PAT3) + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_CP2520PAT3; + else + { + DP_ASSERT(0 && "Unknown pattern type, assuming none"); + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_DISABLED; + return false; + } + + if (interrupts.testPhyCompliance.phyTestPattern == LINK_QUAL_80BIT_CUST) + { + NvU8 buffer[NV_DPCD_TEST_80BIT_CUSTOM_PATTERN__SIZE] = {0}; + if (AuxRetry::ack != bus.read(NV_DPCD_TEST_80BIT_CUSTOM_PATTERN(0), &buffer[0], + NV_DPCD_TEST_80BIT_CUSTOM_PATTERN__SIZE)) + { + DP_PRINTF(DP_ERROR, "DPHAL> ERROR! Request for 80 bit custom pattern. Can't read from 250h."); + return false; + } + + for (unsigned i = 0; i < NV_DPCD_TEST_80BIT_CUSTOM_PATTERN__SIZE; i++) + { + interrupts.cstm80Bits[i] = buffer[i]; + } + } + + return true; +} + +void DPCDHALImpl::readPanelReplayError() +{ + NvU8 config = 0U; + bool bRetVal = (AuxRetry::ack == bus.read(NV_DPCD20_PANEL_REPLAY_ERROR_STATUS, + &config, sizeof(config))); + + if (bRetVal) + { + if (FLD_TEST_DRF(_DPCD20_PANEL_REPLAY, _ERROR_STATUS, + _ACTIVE_FRAME_CRC_ERROR, _YES, config)) + { + DP_PRINTF(DP_ERROR, "DPHAL> ERROR! Active Frame CRC Error set in PanelReplay status register"); + } + if (FLD_TEST_DRF(_DPCD20_PANEL_REPLAY, _ERROR_STATUS, + _RFB_STORAGE_ERROR, _YES, config)) + { + DP_PRINTF(DP_ERROR, "DPHAL> ERROR! RFB Storage Error set in PanelReplay status register"); + } + if (FLD_TEST_DRF(_DPCD20_PANEL_REPLAY, _ERROR_STATUS, + _VSC_SDP_UNCORRECTABLE_ERROR, _YES, config)) + { + DP_PRINTF(DP_ERROR, "DPHAL> ERROR! VSC SDP Uncorrectable Error set in PanelReplay status register"); + } + } + else + { + DP_PRINTF(DP_ERROR, "DPHAL> readPanelReplayError: Failed to read PanelReplay error status"); + } +} + +bool DPCDHALImpl::isLinkStatusValid(unsigned lanes) +{ + bool linkStatus = true; + + this->setDirtyLinkStatus(true); + this->refreshLinkStatus(); + + for (unsigned lane = 0; lane < lanes ; lane++) + { + linkStatus = linkStatus && interrupts.laneStatusIntr.laneStatus[lane].clockRecoveryDone && + interrupts.laneStatusIntr.laneStatus[lane].channelEqualizationDone && + interrupts.laneStatusIntr.laneStatus[lane].symbolLocked; + } + + linkStatus = linkStatus && interrupts.laneStatusIntr.interlaneAlignDone; + + return linkStatus; +} + +void DPCDHALImpl::refreshLinkStatus() +{ + if (interrupts.laneStatusIntr.linkStatusDirtied) + { + if (caps.supportsESI && + (caps.eDpRevision != NV_DPCD_EDP_REV_VAL_1_4) && + (caps.eDpRevision != NV_DPCD_EDP_REV_VAL_1_4A)) + { + this->fetchLinkStatusESI(); + } + else + { + this->fetchLinkStatusLegacy(); + } + } +} + +void DPCDHALImpl::parseAndReadInterruptsESI() +{ + NvU8 buffer[16] = {0}; + bool automatedTestRequest; + + if (AuxRetry::ack != bus.read(NV_DPCD_SINK_COUNT_ESI, &buffer[2], 0x2005 - 0x2002 + 1)) + return; + + interrupts.sinkCount = DRF_VAL(_DPCD, _SINK_COUNT_ESI, _SINK_COUNT, buffer[2]); + + // check if edp revision is v1.4 or v1.4a + if ((caps.eDpRevision != NV_DPCD_EDP_REV_VAL_1_4) && (caps.eDpRevision != NV_DPCD_EDP_REV_VAL_1_4A)) + { + automatedTestRequest = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _AUTO_TEST, _YES, buffer[3]); + } + else + { + // if edp rev is v1.4 or v1.4a, then use legacy address for auto test. + NvU8 legacy = 0; + if (AuxRetry::ack != bus.read(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR, &legacy, 1)) + return; + automatedTestRequest = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _AUTO_TEST, _YES, legacy); + } + + interrupts.cpIRQ = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _CP, _YES, buffer[3]); + interrupts.mccsIRQ = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _MCCS_IRQ, _YES, buffer[3]); + interrupts.downRepMsgRdy = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _DOWN_REP_MSG_RDY, _YES, buffer[3]); + interrupts.upReqMsgRdy = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _UP_REQ_MSG_RDY, _YES, buffer[3]); + + interrupts.prErrorStatus = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI1, + _PANEL_REPLAY_ERROR_STATUS, _YES, buffer[4]); + + interrupts.rxCapChanged = FLD_TEST_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, + _RX_CAP_CHANGED, _YES, buffer[5]); + interrupts.linkStatusChanged = FLD_TEST_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, + _LINK_STATUS_CHANGED, _YES, buffer[5]); + interrupts.streamStatusChanged = FLD_TEST_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, + _STREAM_STATUS_CHANGED, _YES, buffer[5]); + interrupts.hdmiLinkStatusChanged = FLD_TEST_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, + _HDMI_LINK_STATUS_CHANGED, _YES, buffer[5]); + interrupts.dpTunnelingIrq = FLD_TEST_DRF(_DPCD20, _LINK_SERVICE_IRQ_VECTOR_ESI0, + _DP_TUNNELING_IRQ, _YES, buffer[5]); + + // + // Link status changed bit is not necessarily set at all times when the sink + // loses the lane status. Refresh the lane status in any case on an IRQ + // + if ((caps.eDpRevision != NV_DPCD_EDP_REV_VAL_1_4) && + (caps.eDpRevision != NV_DPCD_EDP_REV_VAL_1_4A)) + { + fetchLinkStatusESI(); + } + else + { + fetchLinkStatusLegacy(); + } + + if (interrupts.linkStatusChanged) + { + this->clearLinkStatusChanged(); + } + + if (interrupts.rxCapChanged) + { + + DP_PRINTF(DP_WARNING, "DPHAL> RX Capabilities have changed!"); + parseAndReadCaps(); + this->clearInterruptCapabilitiesChanged(); + } + + if (interrupts.dpTunnelingIrq && hasDpTunnelBwAllocationCapabilityChanged()) + { + // Re read caps and turn on BW allocation if needed + parseAndReadCaps(); + } + + if (interrupts.hdmiLinkStatusChanged) + { + this->clearHdmiLinkStatusChanged(); + } + + if (interrupts.prErrorStatus) + { + this->clearPanelReplayError(); + } + + parseAutomatedTestRequest(automatedTestRequest); +} + +void DPCDHALImpl::readLTTPRLinkStatus(NvS32 rxIndex, NvU8 *buffer) +{ + int addrLane01Status; + // LINK_STATUS for LTTPR is 3 bytes. (NV_DPCD14_PHY_REPEATER_START(i) + 0x20 ~ 0x22) + int bytesToRead = 3; + + DP_ASSERT((rxIndex > 0 && rxIndex <= 8) && "Invalid rxIndex"); + // + // NV_DPCD14_PHY_REPEATER_START is 0-based. + // rxIndex is 1-based. + // + addrLane01Status = NV_DPCD14_PHY_REPEATER_START(rxIndex - 1) + + NV_DPCD14_LANE0_1_STATUS_PHY_REPEATER; + bus.read(addrLane01Status, buffer, bytesToRead); +} + +void DPCDHALImpl::resetIntrLaneStatus() +{ + // + // Reset all laneStatus to true. + // These bits can only set to true when all DPRX (including sink and LTTPRs) set + // the corresponding bit to true. Set to true as init value, and later will do &= + // through all the lanes. + // + for (int lane = 0; lane < 4; lane++) + { + interrupts.laneStatusIntr.laneStatus[lane].clockRecoveryDone = true; + interrupts.laneStatusIntr.laneStatus[lane].channelEqualizationDone = true; + interrupts.laneStatusIntr.laneStatus[lane].symbolLocked = true; + } + interrupts.laneStatusIntr.interlaneAlignDone = true; + interrupts.laneStatusIntr.downstmPortChng = true; + interrupts.laneStatusIntr.linkStatusUpdated = true; +} + +void DPCDHALImpl::fetchLinkStatusESI() +{ + NvU8 buffer[16] = {0}; + NvS32 rxIndex; + // LINK_STATUS_ESI from 0x200C to 0x200E + int bytesToRead = 3; + + // Reset all laneStatus to true. + resetIntrLaneStatus(); + for (rxIndex = caps.phyRepeaterCount; rxIndex >= (NvS32) NV0073_CTRL_DP_DATA_TARGET_SINK; rxIndex--) + { + if (rxIndex != NV0073_CTRL_DP_DATA_TARGET_SINK) + { + readLTTPRLinkStatus(rxIndex, &buffer[0xC]); + } + else + { + bus.read(NV_DPCD_LANE0_1_STATUS_ESI, &buffer[0xC], bytesToRead); + } + + for (int lane = 0; lane < 4; lane++) + { + unsigned laneBits = buffer[0xC+lane/2] >> (4*(lane & 1)); + interrupts.laneStatusIntr.laneStatus[lane].clockRecoveryDone &= !!(laneBits & 1); + interrupts.laneStatusIntr.laneStatus[lane].channelEqualizationDone &= !!(laneBits & 2); + interrupts.laneStatusIntr.laneStatus[lane].symbolLocked &= !!(laneBits & 4); + } + + interrupts.laneStatusIntr.interlaneAlignDone &= + FLD_TEST_DRF(_DPCD, _LANE_ALIGN_STATUS_UPDATED_ESI, _INTERLANE_ALIGN_DONE, _YES, buffer[0xE]); + interrupts.laneStatusIntr.downstmPortChng &= + FLD_TEST_DRF(_DPCD, _LANE_ALIGN_STATUS_UPDATED_ESI, _DOWNSTRM_PORT_STATUS_DONE, _YES, buffer[0xE]); + interrupts.laneStatusIntr.linkStatusUpdated &= + FLD_TEST_DRF(_DPCD, _LANE_ALIGN_STATUS_UPDATED_ESI, _LINK_STATUS_UPDATED, _YES, buffer[0xE]); + } + this->setDirtyLinkStatus(false); +} + +void DPCDHALImpl::fetchLinkStatusLegacy() +{ + NvU8 buffer[16] = {0}; + NvS32 rxIndex; + + // LINK_STATUS from 0x202 to 0x204 + int bytesToRead = 3; + + // Reset all laneStatus to true. + resetIntrLaneStatus(); + + for (rxIndex = caps.phyRepeaterCount; rxIndex >= (NvS32) NV0073_CTRL_DP_DATA_TARGET_SINK; rxIndex--) + { + if (rxIndex != NV0073_CTRL_DP_DATA_TARGET_SINK) + { + readLTTPRLinkStatus(rxIndex, &buffer[2]); + } + else + { + bus.read(NV_DPCD_LANE0_1_STATUS, &buffer[2], bytesToRead); + } + + for (int lane = 0; lane < 4; lane++) + { + unsigned laneBits = buffer[2+lane/2] >> (4*(lane & 1)); + interrupts.laneStatusIntr.laneStatus[lane].clockRecoveryDone &= !!(laneBits & 1); + interrupts.laneStatusIntr.laneStatus[lane].channelEqualizationDone &= !!(laneBits & 2); + interrupts.laneStatusIntr.laneStatus[lane].symbolLocked &= !!(laneBits & 4); + } + + interrupts.laneStatusIntr.interlaneAlignDone &= + FLD_TEST_DRF(_DPCD, _LANE_ALIGN_STATUS_UPDATED, _INTERLANE_ALIGN_DONE, _YES, buffer[4]); + interrupts.laneStatusIntr.downstmPortChng &= + FLD_TEST_DRF(_DPCD, _LANE_ALIGN_STATUS_UPDATED, _D0WNSTRM_PORT_STATUS_DONE, _YES, buffer[4]); + interrupts.laneStatusIntr.linkStatusUpdated &= + FLD_TEST_DRF(_DPCD, _LANE_ALIGN_STATUS_UPDATED, _LINK_STATUS_UPDATED, _YES, buffer[4]); + } + this->setDirtyLinkStatus(false); +} + +bool DPCDHALImpl::readTraining(NvU8* voltageSwingLane, NvU8* preemphasisLane, + NvU8* trainingScoreLane, NvU8* postCursor, + NvU8 activeLaneCount) +{ + NvU8 buffer[0xd] = {0}; + if (voltageSwingLane && preemphasisLane) + { + if (AuxRetry::ack != bus.read(NV_DPCD_LANE0_1_ADJUST_REQ, &buffer[0x6], 2)) + { + DP_ASSERT(0 && "Can't read NV_DPCD_LANE0_1_ADJUST_REQ."); + return false; + } + voltageSwingLane[0] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEX_DRIVE_CURRENT, buffer[6]); + voltageSwingLane[1] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEXPLUS1_DRIVE_CURRENT, buffer[6]); + voltageSwingLane[2] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEX_DRIVE_CURRENT, buffer[7]); + voltageSwingLane[3] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEXPLUS1_DRIVE_CURRENT, buffer[7]); + + preemphasisLane[0] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEX_PREEMPHASIS, buffer[6]); + preemphasisLane[1] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEXPLUS1_PREEMPHASIS, buffer[6]); + preemphasisLane[2] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEX_PREEMPHASIS, buffer[7]); + preemphasisLane[3] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEXPLUS1_PREEMPHASIS, buffer[7]); + + } + if (trainingScoreLane) + { + if (AuxRetry::ack != bus.read(NV_DPCD_TRAINING_SCORE_LANE(0), &buffer[0x8], 4)) + { + DP_ASSERT(0 && "Can't read NV_DPCD_TRAINING_SCORE_LANE(0)."); + return false; + } + trainingScoreLane[0] = buffer[0x8]; + trainingScoreLane[1] = buffer[0x9]; + trainingScoreLane[2] = buffer[0xa]; + trainingScoreLane[3] = buffer[0xb]; + } + if (postCursor) + { + if (AuxRetry::ack != bus.read(NV_DPCD_ADJUST_REQ_POST_CURSOR2, &buffer[0xc], 1)) + { + DP_ASSERT(0 && "Can't read NV_DPCD_ADJUST_REQ_POST_CURSOR2."); + return false; + } + postCursor[0] = DRF_IDX_VAL(_DPCD, _ADJUST_REQ_POST_CURSOR2, _LANE, 0, buffer[0xc]); + postCursor[1] = DRF_IDX_VAL(_DPCD, _ADJUST_REQ_POST_CURSOR2, _LANE, 1, buffer[0xc]); + postCursor[2] = DRF_IDX_VAL(_DPCD, _ADJUST_REQ_POST_CURSOR2, _LANE, 2, buffer[0xc]); + postCursor[3] = DRF_IDX_VAL(_DPCD, _ADJUST_REQ_POST_CURSOR2, _LANE, 3, buffer[0xc]); + } + return true; +} + +bool DPCDHALImpl::isLaneSettingsChanged(NvU8* oldVoltageSwingLane, + NvU8* newVoltageSwingLane, + NvU8* oldPreemphasisLane, + NvU8* newPreemphasisLane, + NvU8 activeLaneCount) +{ + for (unsigned i = 0; i < activeLaneCount; i++) + { + if (oldVoltageSwingLane[i] != newVoltageSwingLane[i] || + oldPreemphasisLane[i] != newPreemphasisLane[i] ) + { + return true; + } + } + return false; +} + +bool DPCDHALImpl::setPowerState(PowerState newState) +{ + NvU8 timeoutMs = 0; + + if (newState == PowerStateD0) + timeoutMs = caps.extendedSleepWakeTimeoutRequestMs; + + // Default behavior is 2ms for better tolerance. + if (timeoutMs < 2) + timeoutMs = 2; + + // + // A Branch Device must forward this value to its downstream devices. + // When set to D3 state, a Sink Device may put its AUX CH circuit in a "power + // saving" state. In this mode the AUX CH circuit may only detect the presence of a + // differential signal input without replying to an AUX CH request transaction. Upon + // detecting the presence of a differential signal input, the Sink Device must exit the + // "power saving" state within 1ms. + // + if (isAtLeastVersion(1, 1)) + { + NvU8 data = 0; + if (newState == PowerStateD0) + data |= NV_DPCD_SET_POWER_VAL_D0_NORMAL; + else if (newState == PowerStateD3) + { + if (caps.extendedSleepWakeTimeoutRequestMs > 1) + { + NvU8 grant = 0; + // Grant extended sleep wake timeout before go D3. + grant = FLD_SET_DRF(_DPCD, _EXTENDED_DPRX_WAKE_TIMEOUT, _PERIOD_GRANTED, _YES, grant); + if (AuxRetry::ack != bus.write(NV_DPCD_EXTENDED_DPRX_WAKE_TIMEOUT, &grant, sizeof(grant))) + { + DP_PRINTF(DP_ERROR, "DisplayPort: Failed to grant extended sleep wake timeout before D3"); + } + } + data = NV_DPCD_SET_POWER_VAL_D3_PWRDWN; + } + else + { + DP_ASSERT(0 && "Unknown power state"); + } + + // + // If we're powering on, we need to allow up to 1ms for the power + // to come online. Ideally we'd handle this with a callback, + // but for now we're going to do a wait here. + // + Timeout timeout(timer, timeoutMs); + unsigned retries = 0; + + do + { + if (AuxRetry::ack == bus.write(NV_DPCD_SET_POWER, &data, sizeof(data))) + { + return true; + } + retries++; + } + while (timeout.valid() || (retries < 40) /* some panels need up to 40 retries */); + + DP_PRINTF(DP_ERROR, "DisplayPort: Failed to bring panel back to wake state"); + } + else + { + // DP 1.0 devices cannot be put to sleep + if (newState == PowerStateD0) + return true; + } + + return false; +} + +void DPCDHALImpl::parseAndReadInterruptsLegacy() +{ + bool automatedTestRequest = false; + NvU8 buffer[16] = {0}; + + if (AuxRetry::ack != bus.read(NV_DPCD_SINK_COUNT, &buffer[0], 2)) + return; + + interrupts.sinkCount = NV_DPCD_SINK_COUNT_VAL(buffer[0]); + + automatedTestRequest = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _AUTO_TEST, _YES, buffer[1]); + interrupts.cpIRQ = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _CP, _YES, buffer[1]); + interrupts.mccsIRQ = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _MCCS_IRQ, _YES, buffer[1]); + interrupts.downRepMsgRdy = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _DOWN_REP_MSG_RDY, _YES, buffer[1]); + interrupts.upReqMsgRdy = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _UP_REQ_MSG_RDY, _YES, buffer[1]); + + fetchLinkStatusLegacy(); + this->setDirtyLinkStatus(false); + + parseAutomatedTestRequest(automatedTestRequest); +} + +void DPCDHALImpl::clearInterruptContentProtection() +{ + if (caps.supportsESI) + { + NvU8 irqVector = 0; + + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _CP, _YES, irqVector); + + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + else + { + NvU8 irqVector = 0; + + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _CP, _YES, irqVector); + + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR, &irqVector, sizeof irqVector); + } +} + +void DPCDHALImpl::clearInterruptMCCS() +{ + if (caps.supportsESI) + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _MCCS_IRQ, _YES, irqVector); + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + else + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _MCCS_IRQ, _YES, irqVector); + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR, &irqVector, sizeof irqVector); + } +} + +void DPCDHALImpl::clearInterruptDownReplyReady() +{ + if (caps.supportsESI) + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _DOWN_REP_MSG_RDY, _YES, irqVector); + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + else + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _DOWN_REP_MSG_RDY, _YES, irqVector); + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR, &irqVector, sizeof irqVector); + } +} + +void DPCDHALImpl::clearInterruptUpRequestReady() +{ + if (caps.supportsESI) + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _UP_REQ_MSG_RDY, _YES, irqVector); + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + else + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _UP_REQ_MSG_RDY, _YES, irqVector); + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR, &irqVector, sizeof irqVector); + } +} + +// DPCD offset 0x68000 +bool DPCDHALImpl::getBKSV(NvU8 *bKSV) +{ + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + if (AuxRetry::ack == bus.read(NV_DPCD_HDCP_BKSV_OFFSET, &bKSV[0], HDCP_KSV_SIZE)) + { + DP_PRINTF(DP_NOTICE, "Found HDCP Bksv= %02x %02x %02x %02x %02x", + bKSV[4], bKSV[3], bKSV[2], bKSV[1], bKSV[0]); + return true; + } + return false; +} + +// DPCD offset 0x68028 +bool DPCDHALImpl::getBCaps(BCaps &bCaps, NvU8 * rawByte) +{ + NvU8 buffer; + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + if (AuxRetry::ack == bus.read(NV_DPCD_HDCP_BCAPS_OFFSET, &buffer, sizeof buffer)) + { + bCaps.HDCPCapable = FLD_TEST_DRF(_DPCD, _HDCP_BCAPS_OFFSET, _HDCP_CAPABLE, _YES, buffer); + bCaps.repeater = FLD_TEST_DRF(_DPCD, _HDCP_BCAPS_OFFSET, _HDCP_REPEATER, _YES, buffer); + if (rawByte) + *rawByte = buffer; + return true; + } + + DP_ASSERT(!"Unable to get BCaps"); + return false; +} + +// DPCD offset 0x6921D +bool DPCDHALImpl::getHdcp22BCaps(BCaps &bCaps, NvU8 *rawByte) +{ + NvU8 buffer; + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + if (AuxRetry::ack == bus.read(NV_DPCD_HDCP22_BCAPS_OFFSET, &buffer, sizeof buffer)) + { + bCaps.HDCPCapable = FLD_TEST_DRF(_DPCD, _HDCP22_BCAPS_OFFSET, _HDCP_CAPABLE, _YES, buffer); + bCaps.repeater = FLD_TEST_DRF(_DPCD, _HDCP22_BCAPS_OFFSET, _HDCP_REPEATER, _YES, buffer); + if (rawByte) + *rawByte = buffer; + return true; + } + + DP_ASSERT(!"Unable to get 22BCaps"); + return false; +} + +// DPCD offset 0x6802A +bool DPCDHALImpl::getBinfo(BInfo &bInfo) +{ + NvU16 buffer; + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + if (AuxRetry::ack == bus.read(NV_DPCD_HDCP_BINFO_OFFSET, (NvU8*)&buffer, sizeof buffer)) + { + bInfo.maxCascadeExceeded = FLD_TEST_DRF(_DPCD_HDCP, _BINFO_OFFSET, _MAX_CASCADE_EXCEEDED, _TRUE, buffer); + bInfo.depth = DRF_VAL(_DPCD_HDCP, _BINFO_OFFSET, _DEPTH, buffer); + bInfo.maxDevsExceeded = FLD_TEST_DRF(_DPCD_HDCP, _BINFO_OFFSET, _MAX_DEVS_EXCEEDED, _TRUE, buffer); + bInfo.deviceCount = DRF_VAL(_DPCD_HDCP, _BINFO_OFFSET, _DEVICE_COUNT, buffer); + return true; + } + + DP_ASSERT(!"Unable to get Binfo"); + return false; +} + +// Get RxStatus per provided HDCP cap +bool DPCDHALImpl::getRxStatus(const HDCPState &hdcpState, NvU8 *data) +{ + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + NvU32 addr = hdcpState.HDCP_State_22_Capable ? + NV_DPCD_HDCP22_RX_STATUS : NV_DPCD_HDCP_BSTATUS_OFFSET; + + if (AuxRetry::ack == bus.read(addr, data, sizeof(NvU8))) + { + return true; + } + + DP_ASSERT(!"Unable to get RxStatus//Bstatus"); + return false; +} + +AuxRetry::status DPCDHALImpl::setTestResponse(bool ack, bool edidChecksumWrite) +{ + NvU8 testResponse = 0; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + if (ack) + testResponse = FLD_SET_DRF(_DPCD, _TEST_RESPONSE, _TEST_ACK, _YES, testResponse); + else + testResponse = FLD_SET_DRF(_DPCD, _TEST_RESPONSE, _TEST_NACK, _YES, testResponse); + + if (edidChecksumWrite) + testResponse = FLD_SET_DRF(_DPCD, _TEST_RESPONSE, _TEST_EDID_CHKSUM_WRITE, _YES, testResponse); + + return bus.write(NV_DPCD_TEST_RESPONSE, &testResponse, sizeof testResponse); +} + +PowerState DPCDHALImpl::getPowerState() +{ + NvU8 data; + if (AuxRetry::ack != bus.read(NV_DPCD_SET_POWER, &data, sizeof data, 0)) + { + // Assume powerdown state + return PowerStateD3; + } + + switch (DRF_VAL(_DPCD, _SET_POWER, _VAL, data)) + { + case NV_DPCD_SET_POWER_VAL_D3_PWRDWN: + return PowerStateD3; + + case NV_DPCD_SET_POWER_VAL_D0_NORMAL: + return PowerStateD0; + + case NV_DPCD_SET_POWER_VAL_D3_AUX_ON: + { + DP_ASSERT(isAtLeastVersion(1, 2) && "DP 1.2 specific power state to be set on a non-DP1.2 system!?"); + return PowerStateD3AuxOn; + } + default: + DP_ASSERT(0 && "Unknown power state! Assuming device is asleep"); + return PowerStateD3; + } +} + +void DPCDHALImpl::payloadTableClearACT() +{ + NvU8 byte = NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_UPDATED_YES; + bus.write(NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS, &byte, sizeof byte); +} + +bool DPCDHALImpl::payloadWaitForACTReceived() +{ + NvU8 byte = 0; + int retries = 0; + + while (true) + { + if (++retries > 40) + { + DP_PRINTF(DP_ERROR, "DPHAL> ACT Not received by sink device!"); + return false; + } + + if (AuxRetry::ack == bus.read(NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS, &byte, sizeof byte)) + { + if (FLD_TEST_DRF(_DPCD, _PAYLOAD_TABLE_UPDATE_STATUS, _ACT_HANDLED, _YES, byte)) + { + DP_PRINTF(DP_NOTICE, "DPHAL> ACT Received"); + return true; + } + } + } +} + +bool DPCDHALImpl::payloadAllocate(unsigned streamId, unsigned begin, unsigned count) +{ + bool bResult = false; + NvU8 payloadAllocate[3]; + DP_ASSERT(streamId < 64 && "Invalid stream location"); + payloadAllocate[0] = (NvU8)streamId; + payloadAllocate[1] = (NvU8)begin; + payloadAllocate[2] = (NvU8)count; + + AuxRetry::status status = bus.write(NV_DPCD_PAYLOAD_ALLOC_SET, (NvU8*)&payloadAllocate, sizeof payloadAllocate); + + if (status == AuxRetry::ack) + { + // + // Bit 0 = VC Payload Table Updated(Change/Read only) + // 1 = Update, cleared to zero when u Packet Source writes 1 + // 0 = Not updated since the last time this bit was cleared + // + NvU8 payloadStatus; + int retries = 0; + + // + // Bug 1385165 that Synaptics branch revision 1.0 found to spend more than 200ms before table updated. + // Retries without delay is too soon for device to complete table update process. + // That will hit bug 1334070 and trigger monitor unplug/hotplug at early return. + // + do + { + if ((bus.read(NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS, &payloadStatus, sizeof(payloadStatus)) == AuxRetry::ack)) + { + if (FLD_TEST_DRF(_DPCD, _PAYLOAD_TABLE_UPDATE_STATUS, _UPDATED, _YES, payloadStatus)) + { + bResult = true; + break; + } + } + else + { + DP_PRINTF(DP_ERROR, "DPHAL> Read NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS failed."); + } + + timer->sleep(1); + } while (++retries < PAYLOADIDTABLE_UPDATED_CHECK_RETRIES); + } + else + { + DP_PRINTF(DP_ERROR, "DPHAL> Send NV_DPCD_PAYLOAD_ALLOC_SET failed."); + } + + DP_PRINTF(DP_NOTICE, "DPHAL> Requesting allocation Stream:%d | First Slot:%d | Count:%d (%s)", + streamId, begin, count, bResult ? "OK" : "FAILED"); + return bResult; +} + +void DPCDHALImpl::setGpuDPSupportedVersions(NvU32 _gpuDPSupportedVersions) +{ + bool bSupportDp1_2 = FLD_TEST_DRF(0073_CTRL_CMD_DP, _GET_CAPS_DP_VERSIONS_SUPPORTED, _DP1_2, + _YES, _gpuDPSupportedVersions); + bool bSupportDp1_4 = FLD_TEST_DRF(0073_CTRL_CMD_DP, _GET_CAPS_DP_VERSIONS_SUPPORTED, _DP1_4, + _YES, _gpuDPSupportedVersions); + + if (bSupportDp1_4) + { + DP_ASSERT(bSupportDp1_2 && "GPU supports DP1.4 should also support DP1.2!"); + } + + gpuDPSupportedVersions = _gpuDPSupportedVersions; +} + +void DPCDHALImpl::applyRegkeyOverrides(const DP_REGKEY_DATABASE& dpRegkeyDatabase) +{ + DP_ASSERT(dpRegkeyDatabase.bInitialized && + "All regkeys are invalid because dpRegkeyDatabase is not initialized!"); + overrideDpcdRev = dpRegkeyDatabase.dpcdRevOveride; + bBypassILREdpRevCheck = dpRegkeyDatabase.bBypassEDPRevCheck; +} + +bool DPCDHALImpl::clearPendingMsg() +{ + NvU8 irqVector, data = 0; + if (AuxRetry::ack == bus.read(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0, + &irqVector, sizeof(irqVector))) + { + if (FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _DOWN_REP_MSG_RDY, _YES, irqVector)) + { + // Clear pending DOWN_REP. + data = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _DOWN_REP_MSG_RDY, _YES, 0); + } + if (FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _UP_REQ_MSG_RDY, _YES, irqVector)) + { + // Clear pending UP_REQ + data = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _UP_REQ_MSG_RDY, _YES, data); + } + if (!data || + (AuxRetry::ack != bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0, + &data, sizeof(data)))) + { + DP_PRINTF(DP_ERROR, "DPCONN> %s(): No Pending Message or " + "Failed to clear pending message: irqVector/data = 0x%08x/0x%08x", + __FUNCTION__, irqVector, data); + return false; + } + + return true; + } + else + { + DP_PRINTF(DP_ERROR, "DPCONN> Clear Pending MSG: Failed to read ESI0"); + } + + return false; +} + +bool DPCDHALImpl::isMessagingEnabled() +{ + NvU8 mstmCtrl; + + if ((AuxRetry::ack == bus.read(NV_DPCD_MSTM_CTRL, &mstmCtrl, 1)) && + (FLD_TEST_DRF(_DPCD, _MSTM_CTRL, _EN, _YES, mstmCtrl))) + { + return true; + } + return false; +} + +NvU16 * DPCDHALImpl::getLinkRateTable() +{ + if (!bIndexedLinkrateCapable) + { + DP_PRINTF(DP_ERROR, "DPCONN> link rate table is invalid"); + } + return &caps.linkRateTable[0]; +} + +bool DPCDHALImpl::getRawLinkRateTable(NvU8 *buffer) +{ + NvU16 temp[NV_DPCD_SUPPORTED_LINK_RATES__SIZE]; + NvU8 *data = (buffer == NULL) ? (NvU8*)&temp[0] : buffer; + + if (AuxRetry::ack != bus.read(NV_DPCD_SUPPORTED_LINK_RATES(0), data, + NV_DPCD_SUPPORTED_LINK_RATES__SIZE * sizeof(NvU16))) + { + return false; + } + return true; +} + +bool DPCDHALImpl::setSourceControlMode(bool bEnableSourceControlMode, bool bEnableFRLMode) +{ + NvU8 data = 0; + + if (bEnableSourceControlMode) + { + data = FLD_SET_DRF(_DPCD14, _PCON_FRL_LINK_CONFIG_1, _SRC_CONTROL_MODE, _ENABLE, data); + if (bEnableFRLMode) + { + data = FLD_SET_DRF(_DPCD14, _PCON_FRL_LINK_CONFIG_1, _LINK_FRL_MODE, _ENABLE, data); + data = FLD_SET_DRF(_DPCD14, _PCON_FRL_LINK_CONFIG_1, _IRQ_LINK_FRL_MODE, _ENABLE, data); + } + else + { + data = FLD_SET_DRF(_DPCD14, _PCON_FRL_LINK_CONFIG_1, _LINK_FRL_MODE, _DISABLE, data); + data = FLD_SET_DRF(_DPCD14, _PCON_FRL_LINK_CONFIG_1, _IRQ_LINK_FRL_MODE, _DISABLE, data); + } + } + else + { + data = FLD_SET_DRF(_DPCD14, _PCON_FRL_LINK_CONFIG_1, _SRC_CONTROL_MODE, _DISABLE, data); + data = FLD_SET_DRF(_DPCD14, _PCON_FRL_LINK_CONFIG_1, _LINK_FRL_MODE, _DISABLE, data); + data = FLD_SET_DRF(_DPCD14, _PCON_FRL_LINK_CONFIG_1, _IRQ_LINK_FRL_MODE, _DISABLE, data); + } + + if (AuxRetry::ack != bus.write(NV_DPCD14_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data))) + { + return false; + } + return true; +} + +bool DPCDHALImpl::checkPCONFrlReady(bool *bFrlReady) +{ + NvU8 data = 0; + + if (bFrlReady == NULL) + { + DP_ASSERT(0); + return true; + } + + *bFrlReady = false; + + if (AuxRetry::ack != bus.read(NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0, &data, sizeof(data))) + { + return false; + } + + if (data == 0) + { + return false; + } + + if (FLD_TEST_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _HDMI_LINK_STATUS_CHANGED, _NO, data)) + { + parseAndReadInterruptsESI(); + return false; + } + + // Clear only this interrupt bit. + this->clearHdmiLinkStatusChanged(); + + if (AuxRetry::ack != bus.read(NV_DPCD14_PCON_HDMI_TX_LINK_STATUS, &data, sizeof(data))) + { + return false; + } + + if (FLD_TEST_DRF(_DPCD14, _PCON_HDMI_TX_LINK_STATUS, _LINK_READY, _YES, data)) + { + *bFrlReady = true; + } + return true; +} + +bool DPCDHALImpl::setupPCONFrlLinkAssessment(NvU32 linkBwMask, + bool bEnableExtendLTMode, + bool bEnableConcurrentMode) +{ + NvU8 data = 0; + + // +1 to convert PCONHdmiLinkBw enum to DPCD FRL BW cap definition + NvU32 requestedMaxBw = (NvU32)(getMaxFrlBwFromMask(linkBwMask)) + 1; + NvU32 targetBw = NV_MIN(caps.pconCaps.maxHdmiLinkBandwidthGbps, + requestedMaxBw); + + // Step 1: Configure FRL Link (FRL BW, BW mask / Concurrent) + if (bEnableExtendLTMode) + { + // + // Set FRL_LT_CONTROL to Extended mode: + // PCON FW trains for all Link BW selected in Link BW Mask (Bit 0~5) + // + data = linkBwMask; + data = FLD_SET_DRF(_DPCD14, _PCON_FRL_LINK_CONFIG_2, _FRL_LT_CONTROL, + _EXTENDED, data); + } + else + { + // Set FRL_LT_CONTROL to Normal mode, so PCON stops when first FRL LT succeed. + data = FLD_SET_DRF(_DPCD14, _PCON_FRL_LINK_CONFIG_2, _FRL_LT_CONTROL, + _NORMAL, data); + } + + if (AuxRetry::ack != bus.write(NV_DPCD14_PCON_FRL_LINK_CONFIG_2, &data, sizeof(data))) + { + return false; + } + + if (AuxRetry::ack != bus.read(NV_DPCD14_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data))) + { + return false; + } + + if (bEnableConcurrentMode && caps.pconCaps.bConcurrentLTSupported) + { + // Client selects concurrent. + data = FLD_SET_DRF(_DPCD14, _PCON_FRL_LINK_CONFIG_1, _CONCURRENT_LT_MODE, + _ENABLE, data); + } + else + { + // + // Don't do concurrent LT for now. + // + data = FLD_SET_DRF(_DPCD14, _PCON_FRL_LINK_CONFIG_1, _CONCURRENT_LT_MODE, + _DISABLE, data); + } + data = FLD_SET_DRF(_DPCD14, _PCON_FRL_LINK_CONFIG_1, _HDMI_LINK, + _ENABLE, data); + data = FLD_SET_DRF_NUM(_DPCD14, _PCON_FRL_LINK_CONFIG_1, _MAX_LINK_BW, + targetBw, data); + + if (AuxRetry::ack != bus.write(NV_DPCD14_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data))) + { + return false; + } + + return true; +} + +bool DPCDHALImpl::checkPCONFrlLinkStatus(NvU32 *frlRateMask) +{ + NvU8 data = 0; + + if (frlRateMask == NULL) + { + DP_ASSERT(0); + return true; + } + + *frlRateMask = 0; + // Check if IRQ happens. + if (AuxRetry::ack != bus.read(NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0, &data, sizeof(data))) + { + return false; + } + + if (FLD_TEST_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _HDMI_LINK_STATUS_CHANGED, _NO, data)) + { + return false; + } + // Check HDMI Link Active status (0x303B Bit 0) and Link Config (0x3036) + if (AuxRetry::ack != bus.read(NV_DPCD14_PCON_HDMI_TX_LINK_STATUS, &data, sizeof(data))) + { + return false; + } + + if (FLD_TEST_DRF(_DPCD14, _PCON_HDMI_TX_LINK_STATUS, _LINK_ACTIVE, _YES, data)) + { + if (AuxRetry::ack == bus.read(NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS, &data, sizeof(data))) + { + *frlRateMask = DRF_VAL(_DPCD14, _PCON_HDMI_LINK_CONFIG_STATUS, _LT_RESULT, data); + } + + } + + return true; +} + +bool DPCDHALImpl::queryHdmiLinkStatus(bool *bLinkActive, bool *bLinkReady) +{ + NvU8 data = 0; + + if (bLinkActive == NULL && bLinkReady == NULL) + return false; + + if (AuxRetry::ack != bus.read(NV_DPCD14_PCON_HDMI_TX_LINK_STATUS, &data, sizeof(data))) + { + return false; + } + if (bLinkReady != NULL) + { + *bLinkReady = (FLD_TEST_DRF(_DPCD14, _PCON_HDMI_TX_LINK_STATUS, + _LINK_READY, _YES, data)); + } + if (bLinkActive != NULL) + { + *bLinkActive = (FLD_TEST_DRF(_DPCD14, _PCON_HDMI_TX_LINK_STATUS, + _LINK_ACTIVE, _YES, data)); + } + return true; +} + +NvU32 DPCDHALImpl::restorePCONFrlLink(NvU32 linkBwMask, + bool bEnableExtendLTMode, + bool bEnableConcurrentMode) +{ + // Restore HDMI Link. + // 1. Clear HDMI link enable bit (305A bit 7) + NvU8 data = 0; + NvU32 loopCount; + NvU32 frlRate; + if (AuxRetry::ack != bus.read(NV_DPCD14_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data))) + { + return false; + } + data = FLD_SET_DRF(_DPCD14, _PCON_FRL_LINK_CONFIG_1, _HDMI_LINK, _DISABLE, data); + if (AuxRetry::ack != bus.write(NV_DPCD14_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data))) + { + return false; + } + // 2. Set FRL or TMDS (Optional if not changed) (305A bit 5) + // 3. Read FRL Ready Bit (303B bit 1) + + Timeout timeout(timer, 500 /* 500ms */); + data = 0; + do + { + if (AuxRetry::ack != bus.read(NV_DPCD14_PCON_HDMI_TX_LINK_STATUS, + &data, sizeof(data))) + continue; + if (FLD_TEST_DRF(_DPCD14, _PCON_HDMI_TX_LINK_STATUS, _LINK_READY, _YES, data)) + break; + } while (timeout.valid()); + + if (FLD_TEST_DRF(_DPCD14, _PCON_HDMI_TX_LINK_STATUS, _LINK_READY, _NO, data)) + { + return false; + } + + // 4. Configure FRL Link (Optional if not changed) + // 5. Set HDMI Enable Bit. + data = 0; + + if (AuxRetry::ack != bus.read(NV_DPCD14_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data))) + { + return false; + } + data = FLD_SET_DRF(_DPCD14, _PCON_FRL_LINK_CONFIG_1, _HDMI_LINK, _ENABLE, data); + if (AuxRetry::ack != bus.write(NV_DPCD14_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data))) + { + return false; + } + + // 6. Read HDMI Link Status link active bit (2005 bit 3) + // 7. Read HDMI link active status bit and link config status (303b bit0 / 3036) + loopCount = NV_PCON_FRL_LT_TIMEOUT_THRESHOLD; + do + { + if (checkPCONFrlLinkStatus(&frlRate) == true) + { + break; + } + Timeout timeout(this->timer, NV_PCON_FRL_LT_TIMEOUT_INTERVAL_MS); + while(timeout.valid()); + continue; + } while (--loopCount); + + return frlRate; +} + +bool DPCDHALImpl::updatePsrConfiguration(vesaPsrConfig psrcfg) +{ + NvU8 config = 0U; + + if (psrcfg.psrCfgEnable) + { + config = FLD_SET_DRF(_DPCD_EDP, _PSR_CONFIG, + _SINK_ENABLE, _YES, config); + } + if (psrcfg.srcTxEnabledInPsrActive) + { + config = FLD_SET_DRF(_DPCD_EDP, _PSR_CONFIG, + _SOURCE_LINK_ACTIVE, _YES, config); + } + if (psrcfg.crcVerifEnabledInPsrActive) + { + config = FLD_SET_DRF(_DPCD_EDP, _PSR_CONFIG, + _CRC_VERIFICATION_ACTIVE, _YES, config); + } + if (psrcfg.frameCaptureSecondActiveFrame) + { + config = FLD_SET_DRF(_DPCD_EDP, _PSR_CONFIG, + _FRAME_CAPTURE_INDICATION, _SECOND, config); + } + if (psrcfg.selectiveUpdateOnSecondActiveline) + { + config = FLD_SET_DRF(_DPCD_EDP, _PSR_CONFIG, + _SU_LINE_CAPTURE_INDICATION, _SECOND, config); + } + if (psrcfg.enableHpdIrqOnCrcMismatch) + { + config = FLD_SET_DRF(_DPCD_EDP, _PSR_CONFIG, + _HPD_IRQ_ON_CRC_ERROR, _YES, config); + } + if (psrcfg.enablePsr2) + { + config = FLD_SET_DRF(_DPCD_EDP, _PSR_CONFIG, + _ENABLE_PSR2, _YES, config); + } + + return AuxRetry::ack == + bus.write(NV_DPCD_EDP_PSR_CONFIG, &config, 1); +} + +bool DPCDHALImpl::readPsrConfiguration(vesaPsrConfig *psrcfg) +{ + NvU8 config = 0U; + bool retVal = AuxRetry::ack == + bus.read(NV_DPCD_EDP_PSR_CONFIG, &config, 1); + + psrcfg->psrCfgEnable = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CONFIG, _SINK_ENABLE, _YES, config); + psrcfg->srcTxEnabledInPsrActive = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CONFIG, _SOURCE_LINK_ACTIVE, _YES, config); + psrcfg->crcVerifEnabledInPsrActive = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CONFIG, _CRC_VERIFICATION_ACTIVE, + _YES, config); + psrcfg->frameCaptureSecondActiveFrame = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CONFIG, _FRAME_CAPTURE_INDICATION, + _SECOND, config); + psrcfg->selectiveUpdateOnSecondActiveline = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CONFIG, + _SU_LINE_CAPTURE_INDICATION, _SECOND, config); + psrcfg->enableHpdIrqOnCrcMismatch = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CONFIG, _HPD_IRQ_ON_CRC_ERROR, _YES, config); + psrcfg->enablePsr2 = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CONFIG, _ENABLE_PSR2, _YES, config); + + return retVal; +} + +bool DPCDHALImpl::readPsrState(vesaPsrState *psrState) +{ + NvU8 config = 0U; + bool retVal = AuxRetry::ack == + bus.read(NV_DPCD_PANEL_SELF_REFRESH_STATUS, &config, 1); + + if (retVal) + { + *psrState = + (vesaPsrState)DRF_VAL(_DPCD, _PANEL_SELF_REFRESH_STATUS, + _VAL, config); + } + return retVal; +} + +bool DPCDHALImpl::readPsrDebugInfo(vesaPsrDebugStatus *psrDbgState) +{ + NvU8 config[2] = { 0U , 0U }; + bool retVal = AuxRetry::ack == + bus.read(NV_DPCD_PANEL_SELF_REFRESH_DEBUG0, + &config[0], sizeof(config)); + + if (retVal) + { + psrDbgState->maxResyncFrames = + DRF_VAL(_DPCD_PANEL_SELF_REFRESH, + _DEBUG0, _MAX_RESYNC_FRAME_CNT, config[0]); + psrDbgState->actualResyncFrames = + DRF_VAL(_DPCD_PANEL_SELF_REFRESH, + _DEBUG0, _LAST_RESYNC_FRAME_CNT, config[0]); + + psrDbgState->lastSdpPsrState = + !!DRF_VAL(_DPCD_PANEL_SELF_REFRESH, _LAST_SDP, + _PSR_STATE_BIT, config[1]); + psrDbgState->lastSdpUpdateRfb = + !!DRF_VAL(_DPCD_PANEL_SELF_REFRESH, _LAST_SDP, + _RFB_BIT, config[1]); + psrDbgState->lastSdpCrcValid = + !!DRF_VAL(_DPCD_PANEL_SELF_REFRESH, _LAST_SDP, + _CRC_VALID_BIT, config[1]); + psrDbgState->lastSdpSuValid = + !!DRF_VAL(_DPCD_PANEL_SELF_REFRESH, _LAST_SDP, + _SU_VALID_BIT, config[1]); + psrDbgState->lastSdpFirstSURcvd = + !!DRF_VAL(_DPCD_PANEL_SELF_REFRESH, _LAST_SDP, + _SU_FIRST_LINE_RCVD, config[1]); + psrDbgState->lastSdpLastSURcvd = + !!DRF_VAL(_DPCD_PANEL_SELF_REFRESH, _LAST_SDP, + _SU_LAST_LINE_RCVD, config[1]); + psrDbgState->lastSdpYCoordValid = + !!DRF_VAL(_DPCD_PANEL_SELF_REFRESH, _LAST_SDP, + _Y_CORD_VALID, config[1]); + } + return retVal; +} + +bool DPCDHALImpl::writePsrErrorStatus(vesaPsrErrorStatus psrErr) +{ + NvU8 config = 0U; + config = FLD_SET_DRF_NUM(_DPCD_PANEL_SELF_REFRESH, + _ERR_STATUS, + _LINK_CRC_ERR, + psrErr.linkCrcError, + config); + config = FLD_SET_DRF_NUM(_DPCD_PANEL_SELF_REFRESH, + _ERR_STATUS, + _RFB_ERR, + psrErr.rfbStoreError, + config); + config = FLD_SET_DRF_NUM(_DPCD_PANEL_SELF_REFRESH, + _ERR_STATUS, + _VSC_SDP_ERR, + psrErr.vscSdpError, + config); + + return AuxRetry::ack == bus.write( + NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS, &config, 1); +} + +bool DPCDHALImpl::readPsrErrorStatus(vesaPsrErrorStatus *psrErr) +{ + NvU8 config = 0U; + bool retVal; + retVal = AuxRetry::ack == bus.read( + NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS, + &config, sizeof(config)); + + if (retVal) + { + psrErr->vscSdpError = FLD_TEST_DRF(_DPCD, + _PANEL_SELF_REFRESH_ERR_STATUS, + _LINK_CRC_ERR, _YES, config); + psrErr->rfbStoreError = FLD_TEST_DRF(_DPCD, + _PANEL_SELF_REFRESH_ERR_STATUS, + _RFB_ERR, _YES, config); + psrErr->linkCrcError = FLD_TEST_DRF(_DPCD, + _PANEL_SELF_REFRESH_ERR_STATUS, + _VSC_SDP_ERR, + _YES, config); + } + return retVal; +} + +bool DPCDHALImpl::writePsrEvtIndicator(vesaPsrEventIndicator psrEvt) +{ + NvU8 config = 0U; + + if (psrEvt.sinkCapChange) + { + config = FLD_SET_DRF(_DPCD, + _PANEL_SELF_REFRESH_EVENT_STATUS, + _CAP_CHANGE, + _YES, config); + } + return AuxRetry::ack == bus.write( + NV_DPCD_PANEL_SELF_REFRESH_EVENT_STATUS, &config, 1); +} + +bool DPCDHALImpl::readPsrEvtIndicator(vesaPsrEventIndicator *psrEvt) +{ + NvU8 config = 0U; + bool retVal; + retVal = AuxRetry::ack == bus.read( + NV_DPCD_PANEL_SELF_REFRESH_EVENT_STATUS, + &config, sizeof(config)); + + if (retVal) + { + psrEvt->sinkCapChange = DRF_VAL(_DPCD, + _PANEL_SELF_REFRESH_EVENT_STATUS, + _CAP_CHANGE, + config); + } + return retVal; +} + +bool DPCDHALImpl::readPrSinkDebugInfo(panelReplaySinkDebugInfo *prDbgInfo) +{ + NvU8 config = 0U; + bool bRetVal = (AuxRetry::ack == + bus.read(NV_DPCD20_PANEL_REPLAY_ERROR_STATUS, + &config, sizeof(config))); + + if (bRetVal) + { + prDbgInfo->activeFrameCrcError = + FLD_TEST_DRF(_DPCD20_PANEL_REPLAY, _ERROR_STATUS, + _ACTIVE_FRAME_CRC_ERROR, _YES, config); + prDbgInfo->rfbStorageError = + FLD_TEST_DRF(_DPCD20_PANEL_REPLAY, _ERROR_STATUS, + _RFB_STORAGE_ERROR, _YES, config); + prDbgInfo->vscSdpUncorrectableError = + FLD_TEST_DRF(_DPCD20_PANEL_REPLAY, _ERROR_STATUS, + _VSC_SDP_UNCORRECTABLE_ERROR, _YES, config); + prDbgInfo->adaptiveSyncSdpMissing = + FLD_TEST_DRF(_DPCD20_PANEL_REPLAY, _ERROR_STATUS, + _ADAPTIVE_SYNC_SDP_MISSING, _YES, config); + } + else + { + DP_PRINTF(DP_ERROR, "DPHAL> readPrSinkDebugInfo: Failed to read PanelReplay error status"); + return bRetVal; + } + + config = 0U; + bRetVal = (AuxRetry::ack == + bus.read(NV_DPCD20_PANEL_REPLAY_AND_FRAME_LOCK_STATUS, + &config, sizeof(config))); + if (bRetVal) + { + prDbgInfo->sinkPrStatus = DRF_VAL(_DPCD20, + _PANEL_REPLAY_AND_FRAME_LOCK_STATUS, _PR_STATUS, config); + + prDbgInfo->sinkFramelocked = DRF_VAL(_DPCD20, + _PANEL_REPLAY_AND_FRAME_LOCK_STATUS, _SINK_FRAME_LOCKED, config); + + prDbgInfo->sinkFrameLockedValid = + FLD_TEST_DRF(_DPCD20_PANEL_REPLAY_AND_FRAME_LOCK_STATUS, + _SINK_FRAME_LOCKED, _VALID, _YES, config); + } + else + { + DP_PRINTF(DP_ERROR, "DPHAL> readPanelReplayError: Failed to read PanelReplay frame lock status"); + return bRetVal; + } + + config = 0U; + bRetVal = AuxRetry::ack == + bus.read(NV_DPCD20_PANEL_REPLAY_DEBUG_LAST_VSC_SDP_CARRYING_PR_INFO, + &config, sizeof(config)); + if (bRetVal) + { + prDbgInfo->currentPrState = + FLD_TEST_DRF(_DPCD20_PANEL_REPLAY_DEBUG_LAST, + _VSC_SDP_CARRYING_PR_INFO, _STATE, _ACTIVE, config); + prDbgInfo->crcValid = + FLD_TEST_DRF(_DPCD20_PANEL_REPLAY_DEBUG_LAST, + _VSC_SDP_CARRYING_PR_INFO, _CRC_VALID, _YES, config); + prDbgInfo->suCoordinatesValid = + FLD_TEST_DRF(_DPCD20_PANEL_REPLAY_DEBUG_LAST, + _VSC_SDP_CARRYING_PR_INFO, _SU_COORDINATE_VALID, + _YES, config); + } + else + { + DP_PRINTF(DP_ERROR, "DPHAL> readPanelReplayError: Failed to read PanelReplay VSC SDP status"); + return bRetVal; + } + return bRetVal; +} + +bool DPCDHALImpl::getDpTunnelGranularityMultiplier(NvU8 &granularityMultiplier) +{ + NvU8 granularity = 0; + if (AuxRetry::ack != + bus.read(NV_DPCD20_DP_TUNNEL_BW_GRANULARITY, &granularity, sizeof(granularity))) + { + DP_PRINTF(DP_ERROR, "Failed to read DP Tunnel granularity"); + return false; + } + + switch (DRF_VAL(_DPCD20, _DP_TUNNEL_BW_GRANULARITY, _VAL, granularity)) + { + case NV_DPCD20_DP_TUNNEL_BW_GRANULARITY_VAL_0_25_GBPS: + granularityMultiplier = 4; + break; + case NV_DPCD20_DP_TUNNEL_BW_GRANULARITY_VAL_0_50_GBPS: + granularityMultiplier = 2; + break; + case NV_DPCD20_DP_TUNNEL_BW_GRANULARITY_VAL_1_00_GBPS: + granularityMultiplier = 1; + break; + default: + DP_PRINTF(DP_WARNING, "Unknown DP Tunnel granularity read from sink. Assuming 1 Gbps"); + granularityMultiplier = 1; + break; + } + + return true; +} + +TriState DPCDHALImpl::getDpTunnelBwRequestStatus() +{ + NvU8 bwRequestStatus = 0; + TriState status = Indeterminate; + if (AuxRetry::ack != + bus.read(NV_DPCD20_DP_TUNNELING_STATUS, &bwRequestStatus, sizeof(bwRequestStatus))) + { + DP_PRINTF(DP_ERROR, "Failed to read DP Tunneling status"); + return status; + } + + if (FLD_TEST_DRF(_DPCD20, _DP_TUNNELING_BW_REQUEST, _FAILED, _YES, bwRequestStatus)) + { + status = False; + } + else if (FLD_TEST_DRF(_DPCD20, _DP_TUNNELING_BW_REQUEST, _SUCCEEDED, _YES, bwRequestStatus)) + { + status = True; + } + + return status; +} + +bool DPCDHALImpl::setDpTunnelBwAllocation(bool bEnable) +{ + DP_PRINTF(DP_INFO, "requested: %d", bEnable); + NvU8 bwAllocationControl = 0; + if(AuxRetry::ack != bus.read(NV_DPCD20_DPTX_BW_ALLOCATION_MODE_CONTROL, + &bwAllocationControl, sizeof(bwAllocationControl))) + { + return false; + } + + if (bEnable) + { + bwAllocationControl = FLD_SET_DRF(_DPCD20, _DPTX, _UNMASK_BW_ALLOCATION_IRQ, _YES, bwAllocationControl); + bwAllocationControl = FLD_SET_DRF(_DPCD20, _DPTX, _DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE, + _YES, bwAllocationControl); + } + else + { + bwAllocationControl = FLD_SET_DRF(_DPCD20, _DPTX, _UNMASK_BW_ALLOCATION_IRQ, _NO, bwAllocationControl); + bwAllocationControl = FLD_SET_DRF(_DPCD20, _DPTX, _DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE, + _NO, bwAllocationControl); + } + + if (AuxRetry::ack != + bus.write(NV_DPCD20_DPTX_BW_ALLOCATION_MODE_CONTROL, &bwAllocationControl, sizeof(bwAllocationControl))) + { + DP_PRINTF(DP_ERROR, "Failed to write bw allocation control: %d", bwAllocationControl); + return false; + } + + bIsDpTunnelBwAllocationEnabled = bEnable; + + return true; +} + +bool DPCDHALImpl::getDpTunnelEstimatedBw(NvU8 &estimatedBw) +{ + if (AuxRetry::ack != + bus.read(NV_DPCD20_DP_TUNNEL_ESTIMATED_BW, &estimatedBw, sizeof(estimatedBw))) + { + DP_PRINTF(DP_ERROR, "Failed to read DP Tunnel estimated BW"); + return false; + } + + return true; +} + +bool DPCDHALImpl::hasDpTunnelEstimatedBwChanged() +{ + NvU8 byte = 0; + if (AuxRetry::ack != + bus.read(NV_DPCD20_DP_TUNNELING_STATUS, &byte, sizeof(byte))) + { + DP_PRINTF(DP_ERROR, "Failed to read DP Tunneling status"); + return false; + } + + return FLD_TEST_DRF(_DPCD20, _DP_TUNNELING, _ESTIMATED_BW_CHANGED, _YES, byte); +} + +bool DPCDHALImpl::hasDpTunnelBwAllocationCapabilityChanged() +{ + NvU8 byte = 0; + if (AuxRetry::ack != + bus.read(NV_DPCD20_DP_TUNNELING_STATUS, &byte, sizeof(byte))) + { + DP_PRINTF(DP_ERROR, "Failed to read DP Tunneling status"); + return false; + } + + return FLD_TEST_DRF(_DPCD20, _DP_TUNNELING, + _BW_ALLOCATION_CAPABILITY_CHANGED, _YES, byte); +} + +bool DPCDHALImpl::writeDpTunnelRequestedBw(NvU8 requestedBw) +{ + if (AuxRetry::ack != + bus.write(NV_DPCD20_DP_TUNNEL_REQUESTED_BW, &requestedBw, sizeof(requestedBw))) + { + DP_PRINTF(DP_ERROR, "Failed to write requested BW"); + return false; + } + + return true; +} + +bool DPCDHALImpl::clearDpTunnelingBwRequestStatus() +{ + NvU8 readByte = 0; + NvU8 writeByte = 0; + if (AuxRetry::ack != + bus.read(NV_DPCD20_DP_TUNNELING_STATUS, &readByte, sizeof(readByte))) + { + DP_PRINTF(DP_ERROR, "Failed to read DP Tunneling status"); + return false; + } + + if (FLD_TEST_DRF(_DPCD20, _DP_TUNNELING, _BW_REQUEST_FAILED, _YES, readByte)) + { + writeByte = FLD_SET_DRF(_DPCD20, _DP_TUNNELING, _BW_REQUEST_FAILED, _YES, writeByte); + } + + if (FLD_TEST_DRF(_DPCD20, _DP_TUNNELING, _BW_REQUEST_SUCCEEDED, _YES, readByte)) + { + writeByte = FLD_SET_DRF(_DPCD20, _DP_TUNNELING, _BW_REQUEST_SUCCEEDED, _YES, writeByte); + } + + if (writeByte > 0) + { + if (AuxRetry::ack != + bus.write(NV_DPCD20_DP_TUNNELING_STATUS, &writeByte, sizeof(writeByte))) + { + DP_PRINTF(DP_ERROR, "Failed to write DP_TUNNELING_STATUS"); + return false; + } + + } + return true; +} + +bool DPCDHALImpl::clearDpTunnelingEstimatedBwStatus() +{ + NvU8 byte = 0; + byte = FLD_SET_DRF(_DPCD20, _DP_TUNNELING, _ESTIMATED_BW_CHANGED, _YES, byte); + if (AuxRetry::ack != + bus.write(NV_DPCD20_DP_TUNNELING_STATUS, &byte, sizeof(byte))) + { + DP_PRINTF(DP_ERROR, "Failed to write clear estimated BW status"); + return false; + } + + return true; +} + +bool DPCDHALImpl::clearDpTunnelingBwAllocationCapStatus() +{ + NvU8 byte = 0; + byte = FLD_SET_DRF(_DPCD20, _DP_TUNNELING, _BW_ALLOCATION_CAPABILITY_CHANGED, _YES, byte); + + if (AuxRetry::ack != + bus.write(NV_DPCD20_DP_TUNNELING_STATUS, &byte, sizeof(byte))) + { + DP_PRINTF(DP_ERROR, "Failed to write clear bw allocation capability changed status"); + return false; + } + + return true; +} + +DPCDHAL * DisplayPort::MakeDPCDHAL(AuxBus * bus, Timer * timer, MainLink * main) +{ + return new DPCDHALImpl(bus, timer); +} + diff --git a/src/common/displayport/src/dp_connectorimpl.cpp b/src/common/displayport/src/dp_connectorimpl.cpp new file mode 100644 index 0000000..1c2e38f --- /dev/null +++ b/src/common/displayport/src/dp_connectorimpl.cpp @@ -0,0 +1,8735 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_connectorimpl.cpp * +* DP connector implementation * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_guid.h" +#include "dp_configcaps.h" +#include "dp_list.h" +#include "dp_buffer.h" +#include "dp_auxdefs.h" +#include "dp_watermark.h" +#include "dp_edid.h" +#include "dp_discovery.h" +#include "dp_groupimpl.h" +#include "dp_deviceimpl.h" +#include "dp_connectorimpl.h" +#include "dp_printf.h" + +#include "dp_qse.h" + +#include "dp_auxbus.h" +#include "dpringbuffertypes.h" + +#include "ctrl/ctrl0073/ctrl0073dfp.h" +#include "ctrl/ctrl0073/ctrl0073dp.h" +#include "dp_tracing.h" + +using namespace DisplayPort; + +ConnectorImpl::ConnectorImpl(MainLink * main, AuxBus * auxBus, Timer * timer, Connector::EventSink * sink) + : main(main), + auxBus(auxBus), + timer(timer), + sink(sink), + cachedSourceOUI(0), + bOuiCached(false), + bIgnoreSrcOuiHandshake(false), + linkPolicy(), + linkGuessed(false), + isLinkQuiesced(false), + bNoLtDoneAfterHeadDetach(false), + isDP12AuthCap(false), + isHDCPAuthOn(false), + isHDCPReAuthPending(false), + isHDCPAuthTriggered(false), + isHopLimitExceeded(false), + isDiscoveryDetectComplete(false), + bDeferNotifyLostDevice(false), + hdcpValidateData(), + authRetries(0), + retryLT(0), + hdcpCapsRetries(0), + hdcpCpIrqRxStatusRetries(0), + bFromResumeToNAB(false), + bAttachOnResume(false), + bHdcpAuthOnlyOnDemand(false), + bHdcpStrmEncrEnblOnlyOnDemand(false), + constructorFailed(false), + policyModesetOrderMitigation(false), + policyForceLTAtNAB(false), + policyAssessLinkSafely(false), + bDisableVbiosScratchRegisterUpdate(false), + modesetOrderMitigation(false), + compoundQueryActive(false), + compoundQueryResult(false), + compoundQueryCount(0), + messageManager(0), + discoveryManager(0), + numPossibleLnkCfg(0), + linkAwaitingTransition(false), + linkState(DP_TRANSPORT_MODE_INIT), + bAudioOverRightPanel(false), + connectorActive(false), + firmwareGroup(0), + qseNonceGenerator(0), + bValidQSERequest(false), + message(0), + clientId(0), + bAcpiInitDone(false), + bIsUefiSystem(false), + bSkipLt(false), + bMitigateZombie(false), + bDelayAfterD3(false), + bKeepOptLinkAlive(false), + bNoFallbackInPostLQA(false), + bIsEncryptionQseValid(true), + LT2FecLatencyMs(0), + bFECEnable(false), + bDscCapBasedOnParent(false), + allocatedDpTunnelBw(0), + inTransitionHeadMask(0x0), + ResStatus(this) +{ + clearTimeslices(); + firmwareGroup = createFirmwareGroup(); + + if (firmwareGroup == NULL) + { + constructorFailed = true; + return; + } + + main->queryGPUCapability(); + main->queryAndUpdateDfpParams(); + hal = MakeDPCDHAL(auxBus, timer, main); + if (hal == NULL) + { + constructorFailed = true; + return; + } + + hal->setPC2Disabled(main->isPC2Disabled()); + + // + // If a GPU is DP1.2 or DP1.4 supported then set these capalibilities. + // This is used for accessing DP1.2/DP1.4 specific register space & features + // + hal->setGpuDPSupportedVersions(main->getGpuDpSupportedVersions()); + + // Set if GPU supports FEC. Check panel FEC caps only if GPU supports it. + hal->setGpuFECSupported(main->isFECSupported()); + + // Set if LTTPR training is supported per regKey + hal->setLttprSupported(main->isLttprSupported()); + + const DP_REGKEY_DATABASE& dpRegkeyDatabase = main->getRegkeyDatabase(); + this->applyRegkeyOverrides(dpRegkeyDatabase); + hal->applyRegkeyOverrides(dpRegkeyDatabase); + + highestAssessedLC = initMaxLinkConfig(); +} + +void ConnectorImpl::applyRegkeyOverrides(const DP_REGKEY_DATABASE& dpRegkeyDatabase) +{ + DP_ASSERT(dpRegkeyDatabase.bInitialized && + "All regkeys are invalid because dpRegkeyDatabase is not initialized!"); + + this->bSkipAssessLinkForEDP = dpRegkeyDatabase.bAssesslinkForEdpSkipped; + + // If Hdcp authenticatoin on demand regkey is set, override to the provided value. + this->bHdcpAuthOnlyOnDemand = dpRegkeyDatabase.bHdcpAuthOnlyOnDemand; + + if (dpRegkeyDatabase.bOptLinkKeptAlive) + { + this->bKeepLinkAliveMST = true; + this->bKeepLinkAliveSST = true; + } + else + { + this->bKeepLinkAliveMST = dpRegkeyDatabase.bOptLinkKeptAliveMst; + this->bKeepLinkAliveSST = dpRegkeyDatabase.bOptLinkKeptAliveSst; + } + this->bReportDeviceLostBeforeNew = dpRegkeyDatabase.bReportDeviceLostBeforeNew; + this->bDisableSSC = dpRegkeyDatabase.bSscDisabled; + this->bEnableFastLT = dpRegkeyDatabase.bFastLinkTrainingEnabled; + this->bDscMstCapBug3143315 = dpRegkeyDatabase.bDscMstCapBug3143315; + this->bPowerDownPhyBeforeD3 = dpRegkeyDatabase.bPowerDownPhyBeforeD3; + if (dpRegkeyDatabase.applyMaxLinkRateOverrides) + { + this->maxLinkRateFromRegkey = hal->mapLinkBandiwdthToLinkrate(dpRegkeyDatabase.applyMaxLinkRateOverrides); // BW to linkrate + } + this->bForceDisableTunnelBwAllocation = dpRegkeyDatabase.bForceDisableTunnelBwAllocation; + this->bSkipZeroOuiCache = dpRegkeyDatabase.bSkipZeroOuiCache; + this->bForceHeadShutdownFromRegkey = dpRegkeyDatabase.bForceHeadShutdown; +} + +void ConnectorImpl::setPolicyModesetOrderMitigation(bool enabled) +{ + policyModesetOrderMitigation = enabled; +} + +void ConnectorImpl::setPolicyForceLTAtNAB(bool enabled) +{ + policyForceLTAtNAB = enabled; +} + +void ConnectorImpl::setPolicyAssessLinkSafely(bool enabled) +{ + policyAssessLinkSafely = enabled; +} + +// +// This function is to re-read remote HDCP BKSV and BCAPS. +// +// Function is added for DP1.2 devices which don't have valid BKSV at HPD and +// make BKSV available after Payload Ack. +// +void ConnectorImpl::readRemoteHdcpCaps() +{ + if (hdcpCapsRetries) + { + fireEvents(); + return; + } + + if (linkUseMultistream()) + { + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + DeviceImpl * dev = (DeviceImpl *)i; + if (dev->isHDCPCap == False) + { + NvU8 portType; + NvU8 peerType; + bool bIsPortPresent; + peerType = dev->peerDevice; + bIsPortPresent = dev->hal->getDownstreamPort(&portType); + + // Skip the Remote DPCD read if the DS is Dongle. + if (bIsPortPresent && (peerType == Dongle)) + { + // BKSV of the dongle might not be ready in some cases. + // Setting it with Branch device value. + hal->getBKSV(&dev->BKSV[0]); + dev->nvBCaps[0] = dev->BCAPS[0] = 0x1; + dev->isHDCPCap = True; + dev->shadow.hdcpCapDone = false; + fireEvents(); + continue; + } + //Issue a new Remote HDCP capability check + DP_ASSERT(dev->isDeviceHDCPDetectionAlive == false); + if((dev->deviceHDCPDetection = new DeviceHDCPDetection(dev, messageManager, timer))) + { + dev->isDeviceHDCPDetectionAlive = true; + dev->deviceHDCPDetection->start(); + dev->shadow.hdcpCapDone = false; + + if (hdcpCapsRetries < 1) + { + timer->queueCallback(this, &tagDelayedHdcpCapRead, 3000); + hdcpCapsRetries++; + } + } + else + { + // For the risk control, make the device as not HDCPCap. + DP_ASSERT(0 && "new failed"); + dev->isDeviceHDCPDetectionAlive = false; + dev->isHDCPCap = False; + + if (!dev->isMultistream()) + dev->shadow.hdcpCapDone = true; + } + } + else + { + DP_PRINTF(DP_FATAL, "DPCONN> This DP1.2 device is HDCP capable"); + } + } + } +} + +void ConnectorImpl::discoveryDetectComplete() +{ + fireEvents(); + // no outstanding EDID reads and branch/sink detections for MST + if (pendingEdidReads.isEmpty() && + (!discoveryManager || + (discoveryManager->outstandingBranchDetections.isEmpty() && + discoveryManager->outstandingSinkDetections.isEmpty()))) + { + bDeferNotifyLostDevice = false; + isDiscoveryDetectComplete = true; + bIsDiscoveryDetectActive = false; + + // Complete detection and see if can enter power saving state. + isNoActiveStreamAndPowerdown(); + + fireEvents(); + } +} + +void ConnectorImpl::applyEdidWARs(Edid & edid, DiscoveryManager::Device & device) +{ + DpMonitorDenylistData *pDenylistData = new DpMonitorDenylistData(); + NvU32 warFlag = 0; + warFlag = main->monitorDenylistInfo(edid.getManufId(), edid.getProductId(), pDenylistData); + + // Apply any edid overrides if required + edid.applyEdidWorkArounds(warFlag, pDenylistData); + this->handleEdidWARs(edid, device); + + delete pDenylistData; +} + +void DisplayPort::DevicePendingEDIDRead::mstEdidCompleted(EdidReadMultistream * from) +{ + Address::StringBuffer sb; + DP_USED(sb); + DP_PRINTF(DP_NOTICE, "DP-CONN> Edid read complete: %s %s", + from->topologyAddress.toString(sb), + from->edid.getName()); + ConnectorImpl * connector = parent; + parent->applyEdidWARs(from->edid, device); + parent->processNewDevice(device, from->edid, true, DISPLAY_PORT, RESERVED); + delete this; + connector->discoveryDetectComplete(); +} + +void DisplayPort::DevicePendingEDIDRead::mstEdidReadFailed(EdidReadMultistream * from) +{ + Address::StringBuffer sb; + DP_USED(sb); + DP_PRINTF(DP_ERROR, "DP-CONN> Edid read failed: %s (using fallback)", + from->topologyAddress.toString(sb)); + ConnectorImpl * connector = parent; + parent->processNewDevice(device, Edid(), true, DISPLAY_PORT, RESERVED); + delete this; + connector->discoveryDetectComplete(); +} + +void ConnectorImpl::messageProcessed(MessageManager::MessageReceiver * from) +{ + if (from == &ResStatus) + { + for (Device * i = enumDevices(0); i; i = enumDevices(i)) + if (i->getGUID() == ResStatus.request.guid) + { + DeviceImpl * child = ((DeviceImpl *)i)->children[ResStatus.request.port]; + if (child) + { + child->resetCacheInferredLink(); + sink->bandwidthChangeNotification((DisplayPort::Device*)child, false); + return; + } + + break; + } + + // Child wasn't found... Invalidate all bandwidths on topology + for (Device * i = enumDevices(0); i; i = enumDevices(i)) { + ((DeviceImpl *)i)->resetCacheInferredLink(); + } + } + else + DP_ASSERT(0 && "Received unexpected upstream message that we AREN'T registered for"); +} + +void ConnectorImpl::discoveryNewDevice(const DiscoveryManager::Device & device) +{ + // + // We're guaranteed that there isn't already a device on the list with the same + // address. If we receive the same device announce again - it is considered + // a notification that the device underlying may have seen an HPD. + // + // We're going to queue an EDID read, and remember which device we did it on. + // If the EDID comes back different we'll have mark the old device object + // as disconnected - and create a new one. This is required because + // EDID is one of the fields considered to be immutable. + // + + if (!device.branch) + { + if (!device.videoSink) + { + // Don't read EDID on a device having no videoSink + processNewDevice(device, Edid(), false, DISPLAY_PORT, RESERVED); + return; + } + pendingEdidReads.insertBack(new DevicePendingEDIDRead(this, messageManager, device)); + } + else + { + // Don't try to read the EDID on a branch device + processNewDevice(device, Edid(), true, DISPLAY_PORT, RESERVED); + } +} + +void ConnectorImpl::processNewDevice(const DiscoveryManager::Device & device, + const Edid & edid, + bool isMultistream, + DwnStreamPortType portType, + DwnStreamPortAttribute portAttribute, + bool isCompliance) +{ + // + // Ideally we should read EDID here. But instead just report the device + // try to find device in list of devices + // + DeviceImpl * existingDev = findDeviceInList(device.address); + if (existingDev) + existingDev->resetCacheInferredLink(); + + // + // Process fallback EDID + // + Edid processedEdid = edid; + + if (!edid.getEdidSize() || !edid.isChecksumValid() || !edid.isValidHeader() || + edid.isPatchedChecksum()) + { + if (portType == WITHOUT_EDID) + { + switch(portAttribute) + { + case RESERVED: + case IL_720_480_60HZ: + case IL_720_480_50HZ: + case IL_1920_1080_60HZ: + case IL_1920_1080_50HZ: + case PG_1280_720_60HZ: + case PG_1280_720_50_HZ: + DP_ASSERT(0 && "Default EDID feature not supported!"); + break; + } + + } + if (portType == ANALOG_VGA) + makeEdidFallbackVGA(processedEdid); + else + { + makeEdidFallback(processedEdid, hal->getVideoFallbackSupported()); + } + } + + // + // Process caps + // + bool hasAudio = device.SDPStreams && device.SDPStreamSinks; + bool hasVideo = device.videoSink; + NvU64 maxTmdsClkRate = 0U; + ConnectorType connector = connectorDisplayPort; + + if (portType == DISPLAY_PORT_PLUSPLUS || portType == DVI || portType == HDMI) + { + maxTmdsClkRate = device.maxTmdsClkRate; + } + + switch(portType) + { + case DISPLAY_PORT: + case DISPLAY_PORT_PLUSPLUS: // DP port that supports DP and TMDS + connector = connectorDisplayPort; + break; + + case ANALOG_VGA: + connector = connectorVGA; + break; + + case DVI: + connector = connectorDVI; + break; + + case HDMI: + connector = connectorHDMI; + break; + + case WITHOUT_EDID: + connector = connectorDisplayPort; + break; + } + + // Dongle in SST mode. + if ((device.peerDevice == Dongle) && (device.address.size() == 0)) + hasAudio = hasVideo = false; + + if (device.branch) + hasAudio = hasVideo = false; + + if (!existingDev) + goto create; + + if (isCompliance && (existingDev->processedEdid == processedEdid)) + { + // unzombie the old device + } + else if (existingDev->audioSink != hasAudio || + existingDev->videoSink != hasVideo || + existingDev->rawEDID != edid || + existingDev->processedEdid != processedEdid || + existingDev->connectorType != connector || + existingDev->multistream != isMultistream || + existingDev->complianceDeviceEdidReadTest != isCompliance || + existingDev->maxTmdsClkRate != maxTmdsClkRate || + (existingDev->address.size() > 1 && !existingDev->getParent()) || + // If it is an Uninitialized Mux device, goto create so that we can properly + // initialize the device and all its caps + existingDev->isFakedMuxDevice()) + goto create; + + // Complete match, make sure its marked as plugged + existingDev->plugged = true; + if (existingDev->isActive()) + existingDev->activeGroup->update(existingDev, true); + + + fireEvents(); + return; +create: + // If there is an existing device, mark it as no longer available. + if (existingDev) + existingDev->plugged = false; + + // Find parent + DeviceImpl * parent = 0; + if (device.address.size() != 0) + { + for (Device * i = enumDevices(0); i; i = enumDevices(i)) + { + if ((i->getTopologyAddress() == device.address.parent()) && + (((DeviceImpl *)i)->plugged)) + { + parent = (DeviceImpl*)i; + break; + } + } + } + + DP_ASSERT((parent || device.address.size() <= 1) && "Device was registered before parent"); + + DeviceImpl * newDev; + // + // If it is a faked Mux device, we have already notified DD of few of its caps. + // Reuse the same device to make sure that DD updates the same device's parameters + // otherwise create a new device + // + if (existingDev && existingDev->isFakedMuxDevice()) + { + newDev = existingDev; + existingDev = NULL; + } + else + { + newDev = new DeviceImpl(hal, this, parent); + } + + if (parent) + parent->children[device.address.tail()] = newDev; + + if (!newDev) + { + DP_ASSERT(0 && "new failed"); + return; + } + + // Fill out the new device + newDev->address = device.address; + newDev->multistream = isMultistream; + newDev->videoSink = hasVideo; + newDev->audioSink = hasAudio; + newDev->plugged = true; + newDev->rawEDID = edid; + newDev->processedEdid = processedEdid; + newDev->connectorType = connector; + newDev->guid = device.peerGuid; + newDev->peerDevice = device.peerDevice; + newDev->portMap = device.portMap; + newDev->dpcdRevisionMajor = device.dpcdRevisionMajor; + newDev->dpcdRevisionMinor = device.dpcdRevisionMinor; + newDev->complianceDeviceEdidReadTest = isCompliance; + newDev->maxTmdsClkRate = maxTmdsClkRate; + newDev->bApplyPclkWarBug4949066 = false; + + Address::NvU32Buffer addrBuffer; + dpMemZero(addrBuffer, sizeof(addrBuffer)); + newDev->address.toNvU32Buffer(addrBuffer); + NV_DPTRACE_INFO(NEW_SINK_DETECTED, newDev->address.size(), addrBuffer[0], addrBuffer[1], addrBuffer[2], addrBuffer[3], + newDev->multistream, newDev->rawEDID.getManufId(), newDev->rawEDID.getProductId()); + + if(newDev->rawEDID.getManufId() == 0x6D1E) + { + newDev->bApplyPclkWarBug4949066 = true; + } + + // Apply any DPCD overrides if required + newDev->dpcdOverrides(); + + this->setDisableDownspread(processedEdid.WARFlags.bDisableDownspread); + // + // Some 4K eDP panel needs HBR2 to support higher modes, Highest assessed LC + // remains in a stale state after applying DPCD overrides here. So we need to + // assess the link again. + // + if (newDev->isOptimalLinkConfigOverridden()) + { + this->assessLink(); + } + + // Postpone the remote HDCPCap read for Dongles + DP_ASSERT(!isLinkInD3() && "Hdcp probe at D3"); + if (device.peerDevice != Dongle) + { + DP_ASSERT(newDev->isDeviceHDCPDetectionAlive == false); + if ((newDev->deviceHDCPDetection = new DeviceHDCPDetection(newDev, messageManager, timer))) + { + // + // We cannot move the hdcpDetection after stream added because DD + // needs hdcp Cap before stream added. + // + newDev->isDeviceHDCPDetectionAlive = true; + newDev->deviceHDCPDetection->start(); + } + else + { + // For the risk control, make the device as not HDCPCap. + DP_ASSERT(0 && "new failed"); + newDev->isDeviceHDCPDetectionAlive = false; + newDev->isHDCPCap = False; + + if (!newDev->isMultistream()) + newDev->shadow.hdcpCapDone = true; + } + } + + newDev->vrrEnablement = new VrrEnablement(newDev); + if (!newDev->vrrEnablement) + { + DP_ASSERT(0 && "new VrrEnablement failed"); + } + + BInfo bInfo; + if ((!isHopLimitExceeded) && (hal->getBinfo(bInfo))) + { + if (bInfo.maxCascadeExceeded || bInfo.maxDevsExceeded) + { + if (isHDCPAuthOn) + { + // Abort the Authentication + DP_PRINTF(DP_WARNING, "DP> Topology limited. Abort Authentication."); + isHDCPAuthOn = false; + isHopLimitExceeded = true; + for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + if (group->hdcpEnabled) + { + group-> hdcpSetEncrypted(false); + } + } + main->configureHDCPAbortAuthentication(KSVTOP); + main->configureHDCPDisableAuthentication(); + isHDCPAuthOn = false; + } + isHopLimitExceeded = true; + } + else + isHopLimitExceeded = false; + } + + // + // If the device is a faked Mux device, then we just initizlied it. + // Reset its faked status and skip adding it to the deviceList + // + if (newDev->isFakedMuxDevice()) + { + newDev->bIsFakedMuxDevice = false; + newDev->bIsPreviouslyFakedMuxDevice = true; + } + else + { + deviceList.insertBack(newDev); + } + + // if a new device has replaced a previous compliance device; let this event be exposed to DD now. + // ie : the old device will be zombied/lost now ... lazily(instead of at an unplug which happened a while back.) + if (existingDev && existingDev->complianceDeviceEdidReadTest) + existingDev->lazyExitNow = true; + + if(newDev->isBranchDevice() && newDev->isAtLeastVersion(1,4)) + { + // + // GUID_2 will be non-zero for a virtual peer device and 0 for others. + // This will help identify if a device is virtual peer device or not. + // + newDev->queryGUID2(); + } + + if (!linkAwaitingTransition) + { + // + // When link is awaiting SST<->MST transition, DSC caps read from downstream + // DSC branch device might be wrong. DSC Caps exposed by DSC MST branch depends + // on the current link state. If it is in SST mode ie MST_EN (0x111[bit 0]) is 0 and + // panel connected behind it supports DSC, then branch will expose the DSC caps + // of the panel connected down stream rather than it's own. This is because source + // will have no other way to read the caps of the downstream panel. In fact when + // MST_EN = 0 and UP_REQ_EN (0x111 [bit 1]) = 1 source can read the caps of the + // downstream panel using REMOTE_DPCD_READ but branch device's behavior depends + // only on MST_EN bit. Similarly in SST, if the panel connected downstream to branch + // does not support DSC, DSC MST branch will expose it's own DSC caps. + // During boot since VBIOS drives display in SST mode and when driver takes over, + // linkAwaitingTransition will be true. DpLib does link assessment and topology + // discovery by setting UP_REQ_EN to true while still keeping MST_EN to false. + // This is to ensure we detach the head and active modeset groups that are in SST mode + // before switching the link to MST mode. When processNewDevice is called at this + // point to create new devices we should not read DSC caps due to above mentioned reason. + // As long as linkIsAwaitingTransition is true, Dplib will not report new Devices to + // to client since isPendingNewDevice() will be false even though DPlib discovered + // new devices. After Dplib completes topology discovery, DD initiates notifyDetachBegin/End + // to remove active groups from the link and notifyDetachEnd calls assessLink + // where we toggle the link state. Only after this we should read DSC caps in this case. + // Following this assesslink calls fireEvents() which will report + // the new devies to clients and client will have the correct DSC caps. + // + bool bGpuDscSupported; + + // Check GPU DSC Support + main->getDscCaps(&bGpuDscSupported); + if (bGpuDscSupported) + { + if (newDev->getDSCSupport()) + { + // Read and parse DSC caps only if panel supports DSC + newDev->readAndParseDSCCaps(); + + // Read and Parse Branch Specific DSC Caps + if (!newDev->isVideoSink() && !newDev->isAudioSink()) + { + newDev->readAndParseBranchSpecificDSCCaps(); + } + } + + if (!processedEdid.WARFlags.bIgnoreDscCap) + { + // Check if DSC is possible for the device and if so, set DSC Decompression device. + newDev->setDscDecompressionDevice(this->bDscCapBasedOnParent); + } + } + } + + if (newDev->peerDevice == Dongle) + { + // For Dongle, we need to read detailed port caps if DPCD access is available on DP 1.4+. + if (newDev->isAtLeastVersion(1,4)) + { + newDev->getPCONCaps(&(newDev->pconCaps)); + } + + // + // If dongle does not have DPCD access but it is native PCON with Virtual peer support, + // we can get dongle port capabilities from parent VP DPCD detailed port descriptors. + // + else if (newDev->parent && (newDev->parent)->isVirtualPeerDevice()) + { + if (!main->isMSTPCONCapsReadDisabled()) + { + newDev->parent->getPCONCaps(&(newDev->pconCaps)); + newDev->connectorType = newDev->parent->getConnectorType(); + } + } + } + + // Read panel replay capabilities + newDev->getPanelReplayCaps(); + + // Get Panel FEC support only if GPU supports FEC + if (this->isFECSupported()) + { + newDev->getFECSupport(); + } + + if (main->supportMSAOverMST()) + { + newDev->bMSAOverMSTCapable = newDev->getSDPExtnForColorimetrySupported(); + } + else + { + newDev->bMSAOverMSTCapable = false; + } + + newDev->applyOUIOverrides(); + + if (main->isEDP() && !bOuiCached) + { + // + // if the regkey is set, and the system is in discrete mode, skip OUI caching. + // i.e. cache OUI if regkey is not set OR system is in dynamic mode. + // + if ((!this->bSkipZeroOuiCache) || main->isInternalPanelDynamicMuxCapable()) + { + // Save Source OUI information for eDP. + hal->getOuiSource(cachedSourceOUI, &cachedSourceModelName[0], + sizeof(cachedSourceModelName), cachedSourceChipRevision); + this->bOuiCached = true; + } + } + + fireEvents(); +} + +LinkRates* ConnectorImpl::importDpLinkRates() +{ + LinkRate linkRate; + LinkRates *pConnectorLinkRates = linkPolicy.getLinkRates(); + + // Attempt to configure link rate table mode if supported + if (hal->isIndexedLinkrateCapable() && + main->configureLinkRateTable(hal->getLinkRateTable(), pConnectorLinkRates)) + { + hal->setIndexedLinkrateEnabled(true); + } + else + { + // Reset configured link rate table if ever enabled to get RM act right + if (hal->isIndexedLinkrateEnabled()) + { + main->configureLinkRateTable(NULL, NULL); + hal->setIndexedLinkrateEnabled(false); + } + pConnectorLinkRates->clear(); + } + + // Get maximal link rate supported by GPU + linkRate = main->maxLinkRateSupported(); + linkRate = DATA_RATE_8B_10B_TO_LINK_RATE(linkRate); + + // Insert in order any additional entries regardless of ILR Capability + + if (linkRate >= dp2LinkRate_1_62Gbps) + pConnectorLinkRates->insert((NvU16)dp2LinkRate_1_62Gbps); + + if (linkRate >= dp2LinkRate_2_70Gbps) + pConnectorLinkRates->insert((NvU16)dp2LinkRate_2_70Gbps); + + if (linkRate >= dp2LinkRate_5_40Gbps) + pConnectorLinkRates->insert((NvU16)dp2LinkRate_5_40Gbps); + + if (linkRate >= dp2LinkRate_8_10Gbps) + pConnectorLinkRates->insert((NvU16)dp2LinkRate_8_10Gbps); + + return pConnectorLinkRates; +} + +void ConnectorImpl::populateAllDpConfigs() +{ + LinkRate linkRate; + LinkRates *pConnectorLinkRates = linkPolicy.getLinkRates(); + + unsigned laneCounts[] = {laneCount_1, laneCount_2, laneCount_4}; + unsigned laneSets = sizeof(laneCounts) / sizeof(laneCounts[0]); + + // + // Following sequence is to be followed for saving power by default; + // It may vary with sinks which support link rate table. + // + // Link Config MBPS + // 1*RBR 162 + // 1*HBR 270 + // 2*RBR 324 + // 1*HBR2 540 + // 2*HBR 540 + // 4*RBR 648 + // 1*HBR3 810 + // ... + // + if (numPossibleLnkCfg) + { + DP_PRINTF(DP_NOTICE, "DPCONN> Rebuild possible link rate configurations"); + delete[] allPossibleLinkCfgs; + numPossibleLnkCfg = 0; + } + + importDpLinkRates(); + + numPossibleLnkCfg = laneSets * pConnectorLinkRates->getNumLinkRates(); + if (numPossibleLnkCfg == 0) + { + DP_PRINTF(DP_ERROR, "DPCONN> %s: lane count %d or link rates %d!", + __FUNCTION__, pConnectorLinkRates->getNumLinkRates(), laneSets); + DP_ASSERT(0 && "Invalid lane count or link rates!"); + return; + } + + allPossibleLinkCfgs = new LinkConfiguration[numPossibleLnkCfg](); + + if (allPossibleLinkCfgs == NULL) + { + DP_PRINTF(DP_ERROR, "DPCONN> %s: Failed to allocate allPossibleLinkCfgs array", + __FUNCTION__); + numPossibleLnkCfg = 0; + return; + } + + // Populate all possible link configuration + linkRate = pConnectorLinkRates->getMaxRate(); + for (unsigned i = 0; i < pConnectorLinkRates->getNumLinkRates(); i++) + { + for (unsigned j = 0; j < laneSets; j++) + { + allPossibleLinkCfgs[i * laneSets + j].setLaneRate(linkRate, laneCounts[j]); + allPossibleLinkCfgs[i * laneSets + j].bDisableDownspread = this->getDownspreadDisabled(); + } + linkRate = pConnectorLinkRates->getLowerRate(linkRate); + } + + // Sort link configurations per bandwidth from low to high + for (unsigned i = 0; i < numPossibleLnkCfg - 1; i++) + { + LinkConfiguration *pLowCfg = &allPossibleLinkCfgs[i]; + for (unsigned j = i + 1; j < numPossibleLnkCfg; j++) + { + if (allPossibleLinkCfgs[j] < *pLowCfg) + pLowCfg = &allPossibleLinkCfgs[j]; + } + // Swap + if (pLowCfg != &allPossibleLinkCfgs[i]) + { + LinkRate swapRate = pLowCfg->peakRate; + unsigned swapLanes = pLowCfg->lanes; + pLowCfg->setLaneRate(allPossibleLinkCfgs[i].peakRate, + allPossibleLinkCfgs[i].lanes); + allPossibleLinkCfgs[i].setLaneRate(swapRate, swapLanes); + } + } +} + +void ConnectorImpl::discoveryLostDevice(const Address & address) +{ + DeviceImpl * existingDev = findDeviceInList(address); + + if (!existingDev) + { + DP_ASSERT(0 && "Device lost on device not in database?!"); + return; + } + + existingDev->plugged = false; + existingDev->devDoingDscDecompression = NULL; + fireEvents(); +} + +ConnectorImpl::~ConnectorImpl() +{ + if (numPossibleLnkCfg) + delete[] allPossibleLinkCfgs; + + timer->cancelCallbacks(this); + delete discoveryManager; + pendingEdidReads.clear(); + delete messageManager; + delete qseNonceGenerator; + delete hal; +} + +// +// Clear all the state associated with the head attachment +// +void ConnectorImpl::hardwareWasReset() +{ + activeLinkConfig.lanes = 0; + + while (!activeGroups.isEmpty()) + { + GroupImpl * g = (GroupImpl *)activeGroups.front(); + activeGroups.remove(g); + inactiveGroups.insertBack(g); + + g->setHeadAttached(false); + } + + while (!dscEnabledDevices.isEmpty()) + (void) dscEnabledDevices.pop(); +} + +Group * ConnectorImpl::resume(bool firmwareLinkHandsOff, + bool firmwareDPActive, + bool plugged, + bool isUefiSystem, + unsigned firmwareHead, + bool bFirmwareLinkUseMultistream, + bool bDisableVbiosScratchRegisterUpdate, + bool bAllowMST) +{ + Group * result = 0; + hardwareWasReset(); + previousPlugged = false; + connectorActive = true; + bIsUefiSystem = isUefiSystem; + + this->bDisableVbiosScratchRegisterUpdate = bDisableVbiosScratchRegisterUpdate; + + bFromResumeToNAB = true; + + if (firmwareLinkHandsOff) + { + isLinkQuiesced = true; + } + else if (firmwareDPActive) + { + DP_PRINTF(DP_NOTICE, "CONN> Detected firmware panel is active on head %d.", firmwareHead); + ((GroupImpl *)firmwareGroup)->setHeadAttached(true); + ((GroupImpl *)firmwareGroup)->headIndex = firmwareHead; + ((GroupImpl *)firmwareGroup)->streamIndex = 1; + ((GroupImpl *)firmwareGroup)->headInFirmware = true; + + this->linkState = bFirmwareLinkUseMultistream ? DP_TRANSPORT_MODE_MULTI_STREAM : DP_TRANSPORT_MODE_SINGLE_STREAM; + + inactiveGroups.remove((GroupImpl *)firmwareGroup); + activeGroups.remove((GroupImpl *)firmwareGroup); + activeGroups.insertBack((GroupImpl *)firmwareGroup); + + result = firmwareGroup; + } + + hal->overrideMultiStreamCap(bAllowMST); + + // + // In resume code path, all devices on this connector gets lost and deleted on first fireEvents() + // and that could generate unnecessary new/lost device events. Therefore defer to lost devices + // until discovery detect gets completed, this allows processNewDevice() function to look + // at matching existing devices and optimize creation of new devices. We only have to set the flag + // to true when plugged = true, since if disconnected, we are not going to defer anything. + // + bDeferNotifyLostDevice = plugged; + bAttachOnResume = true; + + // + // If we are resuming, record the allocatedDpTunnelBw before calling NLP + // NLP will reset and try to allocate BW = LinkConfiguration + // The previous CQA would have been performed with this allocated BW + // If this is different from the new allocation, we will queue a DP allocation changed event + // + NvU64 previousAllocatedDpTunnelBw = allocatedDpTunnelBw; + notifyLongPulse(plugged); + + // Reallocate DP BW for all connected and known clients + updateDpTunnelBwAllocation(); + if (previousAllocatedDpTunnelBw != allocatedDpTunnelBw) + { + timer->queueCallback(this, &tagDpBwAllocationChanged, 0, false /* not allowed in sleep */); + } + + bAttachOnResume = false; + + return result; +} + +void ConnectorImpl::pause() +{ + connectorActive = false; + if (messageManager) + { + messageManager->pause(); + } +} + +// Query current Device topology +Device * ConnectorImpl::enumDevices(Device * previousDevice) +{ + if (previousDevice) + previousDevice = (DeviceImpl *)((DeviceImpl*)previousDevice)->next; + else + previousDevice = (DeviceImpl *)deviceList.begin(); + + if ((DeviceImpl*)previousDevice == deviceList.end()) + return 0; + else + return (DeviceImpl *)previousDevice; +} + +LinkConfiguration ConnectorImpl::getMaxLinkConfig() +{ + NvU64 maxLinkRate; + NvU64 gpuMaxLinkRate; + + DP_ASSERT(hal); + + if (main->isEDP()) + { + // Regkey is supported on eDP panels only + maxLinkRate = maxLinkRateFromRegkey; + // Check if valid value is present in regkey + if (!(maxLinkRate && (IS_VALID_LINKBW_10M(maxLinkRate)))) + { + maxLinkRate = hal->getMaxLinkRate(); + } + } + else + { + maxLinkRate = hal->getMaxLinkRate(); + } + + gpuMaxLinkRate = main->maxLinkRateSupported(); + gpuMaxLinkRate = DATA_RATE_8B_10B_TO_LINK_RATE(gpuMaxLinkRate); + + LinkRate linkRate = maxLinkRate ? + DP_MIN(maxLinkRate, gpuMaxLinkRate) : + gpuMaxLinkRate; + + unsigned laneCount = hal->getMaxLaneCount() ? + DP_MIN(hal->getMaxLaneCountSupportedAtLinkRate(linkRate), hal->getMaxLaneCount()) : + 4U; + + linkRate = LINK_RATE_TO_DATA_RATE_8B_10B(linkRate); + + return LinkConfiguration (&this->linkPolicy, + laneCount, linkRate, + this->hal->getEnhancedFraming(), + linkUseMultistream(), + false, /* disablePostLTRequest */ + this->bFECEnable, + false, /* disableLTTPR */ + this->getDownspreadDisabled()); +} + +LinkConfiguration ConnectorImpl::getActiveLinkConfig() +{ + DP_ASSERT(hal); + + return LinkConfiguration (&activeLinkConfig.policy, + activeLinkConfig.lanes, + (LINK_RATE_TO_DATA_RATE_8B_10B(activeLinkConfig.peakRate)), + activeLinkConfig.enhancedFraming, + activeLinkConfig.multistream, + false, /* disablePostLTRequest */ + activeLinkConfig.bEnableFEC, + false, /* disableLTTPR */ + this->getDownspreadDisabled()); +} + +LinkConfiguration ConnectorImpl::initMaxLinkConfig() +{ + LinkRate linkRate = dp2LinkRate_8_10Gbps; + unsigned laneCount = 4; + + return LinkConfiguration (&this->linkPolicy, + laneCount, linkRate, + this->hal->getEnhancedFraming(), + linkUseMultistream(), + false, /* disablePostLTRequest */ + this->bFECEnable); +} + +void ConnectorImpl::beginCompoundQuery(const bool bForceEnableFEC) +{ + if (linkGuessed && (main->getSorIndex() != DP_INVALID_SOR_INDEX)) + { + assessLink(); + } + + DP_ASSERT( !compoundQueryActive && "Previous compoundQuery was not ended."); + compoundQueryActive = true; + compoundQueryCount = 0; + compoundQueryResult = true; + compoundQueryLocalLinkPBN = 0; + compoundQueryUsedTunnelingBw = 0; + compoundQueryForceEnableFEC = bForceEnableFEC; + + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + DeviceImpl * dev = (DeviceImpl *)i; + + if (i->getTopologyAddress().size() <= 1) + { + dev->bandwidth.lastHopLinkConfig = highestAssessedLC; + dev->bandwidth.compound_query_state.totalTimeSlots = 63; + dev->bandwidth.compound_query_state.timeslots_used_by_query = 0; + continue; + } + + if (!this->linkUseMultistream()) + continue; + + // Initialize starting conditions + // + // Note: this compound query code assumes that the total bandwidth is + // available for the configuration being queried. This ignores the + // concentrator case where some bandwidth may be in use by streams not + // controlled by this driver instance. Concentrators are currently not + // supported. + dev->bandwidth.compound_query_state.timeslots_used_by_query = 0; + dev->inferLeafLink(&dev->bandwidth.compound_query_state.totalTimeSlots); + + // + // Some VBIOS leave the branch in stale state and allocatePayload request queued + // at branch end gets processed much later causing the FreePBN returned to be stale. + // Clear the PBN in case EPR reports 0 free PBN when we have not explicitly requested + // for it, to clear up any previous stale allocations + // + if (dev->bandwidth.compound_query_state.totalTimeSlots == 0 && + !dev->payloadAllocated && dev->plugged) + { + GroupImpl *group = dev->activeGroup; + if (group != NULL) + { + NakData nakData; + Address devAddress = dev->getTopologyAddress(); + + AllocatePayloadMessage allocate; + unsigned sink = 0; // hardcode the audio sink to 0th in the device. + allocate.set(devAddress.parent(), devAddress.tail(), + dev->isAudioSink() ? 1 : 0, group->streamIndex, 0, &sink, true); + + ((DeviceImpl *)dev)->bandwidth.enum_path.dataValid = false; + + if (group->parent->messageManager->send(&allocate, nakData)) + dev->inferLeafLink(&dev->bandwidth.compound_query_state.totalTimeSlots); + } + } + + // Clear assement state + dev->bandwidth.compound_query_state.bandwidthAllocatedForIndex = 0; + } +} + +static DP_IMP_ERROR translatePpsErrorToDpImpError(NVT_STATUS ppsErrorCode) +{ + switch (ppsErrorCode) + { + case NVT_STATUS_COLOR_FORMAT_NOT_SUPPORTED: + return DP_IMP_ERROR_PPS_COLOR_FORMAT_NOT_SUPPORTED; + case NVT_STATUS_INVALID_HBLANK: + return DP_IMP_ERROR_PPS_INVALID_HBLANK; + case NVT_STATUS_INVALID_BPC: + return DP_IMP_ERROR_PPS_INVALID_BPC; + case NVT_STATUS_MAX_LINE_BUFFER_ERROR: + return DP_IMP_ERROR_PPS_MAX_LINE_BUFFER_ERROR; + case NVT_STATUS_OVERALL_THROUGHPUT_ERROR: + return DP_IMP_ERROR_PPS_OVERALL_THROUGHPUT_ERROR; + case NVT_STATUS_DSC_SLICE_ERROR: + return DP_IMP_ERROR_PPS_DSC_SLICE_ERROR; + case NVT_STATUS_PPS_SLICE_COUNT_ERROR: + return DP_IMP_ERROR_PPS_PPS_SLICE_COUNT_ERROR; + case NVT_STATUS_PPS_SLICE_HEIGHT_ERROR: + return DP_IMP_ERROR_PPS_PPS_SLICE_HEIGHT_ERROR; + case NVT_STATUS_PPS_SLICE_WIDTH_ERROR: + return DP_IMP_ERROR_PPS_PPS_SLICE_WIDTH_ERROR; + case NVT_STATUS_INVALID_PEAK_THROUGHPUT: + return DP_IMP_ERROR_PPS_INVALID_PEAK_THROUGHPUT; + case NVT_STATUS_MIN_SLICE_COUNT_ERROR: + return DP_IMP_ERROR_PPS_MIN_SLICE_COUNT_ERROR; + default: + return DP_IMP_ERROR_PPS_GENERIC_ERROR; + } +} + +bool ConnectorImpl::compoundQueryAttachTunneling(const DpModesetParams &modesetParams, + DscParams *pDscParams, + DP_IMP_ERROR *pErrorCode) +{ + if (!hal->isDpTunnelBwAllocationEnabled()) + { + return true; + } + + NvU64 bpp = modesetParams.modesetInfo.depth; + if (pDscParams->bEnableDsc) + { + bpp = divide_ceil(pDscParams->bitsPerPixelX16, 16); + } + + NvU64 modeBwRequired = modesetParams.modesetInfo.pixelClockHz * bpp; + NvU64 freeTunnelingBw = allocatedDpTunnelBw - compoundQueryUsedTunnelingBw; + + if (modeBwRequired > freeTunnelingBw) + { + SET_DP_IMP_ERROR(pErrorCode, DP_IMP_ERROR_INSUFFICIENT_DP_TUNNELING_BANDWIDTH); + return false; + } + + compoundQueryUsedTunnelingBw += modeBwRequired; + + return true; +} + +// +// This call will be deprecated as soon as all clients move to the new API +// +bool ConnectorImpl::compoundQueryAttach(Group * target, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth, + DP_IMP_ERROR *pErrorCode) +{ + ModesetInfo modesetInfo(twoChannelAudioHz, eightChannelAudioHz, pixelClockHz, + rasterWidth, rasterHeight, (rasterBlankStartX - rasterBlankEndX), + 0/*surfaceHeight*/, depth, rasterBlankStartX, rasterBlankEndX); + + DpModesetParams modesetParams(0, modesetInfo); + return compoundQueryAttach(target, modesetParams, NULL, pErrorCode); +} + +bool ConnectorImpl::compoundQueryAttach(Group * target, + const DpModesetParams &modesetParams, // Modeset info + DscParams *pDscParams, // DSC parameters + DP_IMP_ERROR *pErrorCode) +{ + DP_ASSERT(compoundQueryActive); + if (pErrorCode) + *pErrorCode = DP_IMP_ERROR_NONE; + + compoundQueryCount++; + DpModesetParams _dpModesetParams = modesetParams; + _dpModesetParams.modesetInfo.colorFormat = modesetParams.colorFormat; + + if (!modesetParams.modesetInfo.depth || !modesetParams.modesetInfo.pixelClockHz) + { + DP_ASSERT(!"DP-CONN> Params with zero value passed to query!"); + compoundQueryResult = false; + SET_DP_IMP_ERROR(pErrorCode, DP_IMP_ERROR_ZERO_VALUE_PARAMS) + return false; + } + + if (linkUseMultistream()) + { + compoundQueryResult = compoundQueryAttachMST(target, _dpModesetParams, + pDscParams, pErrorCode); + } + else // SingleStream case + { + compoundQueryResult = compoundQueryAttachSST(target, _dpModesetParams, + pDscParams, pErrorCode); + } + + if (compoundQueryResult) + { + compoundQueryResult = compoundQueryAttachTunneling(_dpModesetParams, pDscParams, pErrorCode); + } + + return compoundQueryResult; +} + +bool ConnectorImpl::dpLinkIsModePossible(const DpLinkIsModePossibleParams ¶ms) +{ + bool bResult; + NvU32 numNonDscStreams; + bool bEnableFEC = false; + +reRunCompoundQuery: + bResult = true; + numNonDscStreams = 0; + + for (NvU32 i = 0; i < NV_MAX_HEADS; i++) + { + if (params.head[i].pDscParams != NULL) + params.head[i].pDscParams->bEnableDsc = false; + + if (params.head[i].pErrorStatus != NULL) + *params.head[i].pErrorStatus = DP_IMP_ERROR_NONE; + } + + this->beginCompoundQuery(bEnableFEC /* bForceEnableFEC */); + + for (NvU32 i = 0; i < NV_MAX_HEADS; i++) + { + if (params.head[i].pTarget == NULL) + continue; + + DP_ASSERT(params.head[i].pModesetParams->headIndex == i); + + bResult = this->compoundQueryAttach(params.head[i].pTarget, + *params.head[i].pModesetParams, + params.head[i].pDscParams, + params.head[i].pErrorStatus); + if (!bResult) + break; + + if ((params.head[i].pDscParams == NULL) || + !params.head[i].pDscParams->bEnableDsc) + { + numNonDscStreams++; + continue; + } + + // + // When DSC is enabled, FEC also need to be enabled. The previously + // attached non-dsc streams needs to consider 3% FEC overhead, + // therefore terminate existing compound query, force enable FEC and + // re-run the compound query. + // + if ((numNonDscStreams > 0) && !bEnableFEC) + { + this->endCompoundQuery(); + bEnableFEC = true; + goto reRunCompoundQuery; + } + + bEnableFEC = true; + } + + if (!this->endCompoundQuery()) + bResult = false; + + return bResult; +} + +bool ConnectorImpl::compoundQueryAttachMST(Group * target, + const DpModesetParams &modesetParams, // Modeset info + DscParams *pDscParams, // DSC parameters + DP_IMP_ERROR *pErrorCode) +{ + CompoundQueryAttachMSTInfo localInfo; + NvBool result = true; + + localInfo.localModesetInfo = modesetParams.modesetInfo; + if (this->preferredLinkConfig.isValid()) + localInfo.lc = preferredLinkConfig; + else + localInfo.lc = highestAssessedLC; + + if (compoundQueryForceEnableFEC) + { + localInfo.lc.enableFEC(isFECCapable()); + } + + if (compoundQueryAttachMSTIsDscPossible(target, modesetParams, pDscParams)) + { + unsigned int forceDscBitsPerPixelX16 = pDscParams->bitsPerPixelX16; + result = compoundQueryAttachMSTDsc(target, modesetParams, &localInfo, + pDscParams, pErrorCode); + if (!result) + { + return false; + } + + compoundQueryResult = compoundQueryAttachMSTGeneric(target, modesetParams, &localInfo, + pDscParams, pErrorCode); + // + // compoundQueryAttachMST Generic might fail due to the insufficient bandwidth , + // We only check whether bpp can be fit in the available bandwidth based on the tranied link config in compoundQueryAttachMSTDsc function. + // There might be cases where the default 10 bpp might fit in the available bandwidth based on the trained link config, + // however, the bandwidth might be insufficient at the actual bottleneck link between source and sink to drive the mode, causing CompoundQueryAttachMSTGeneric to fail. + // Incase of CompoundQueryAttachMSTGeneric failure, instead of returning false, check whether the mode can be supported with the max dsc compression bpp + // and return true if it can be supported. + + if (!compoundQueryResult && forceDscBitsPerPixelX16 == 0U) + { + pDscParams->bitsPerPixelX16 = MAX_DSC_COMPRESSION_BPPX16; + result = compoundQueryAttachMSTDsc(target, modesetParams, &localInfo, + pDscParams, pErrorCode); + if (!result) + { + return false; + } + + return compoundQueryAttachMSTGeneric(target, modesetParams, &localInfo, + pDscParams, pErrorCode); + } + return compoundQueryResult; + } + + return compoundQueryAttachMSTGeneric(target, modesetParams, &localInfo, + pDscParams, pErrorCode); +} + +bool ConnectorImpl::compoundQueryAttachMSTIsDscPossible +( + Group * target, + const DpModesetParams &modesetParams, // Modeset info + DscParams *pDscParams // DSC parameters +) +{ + Device * newDev = target->enumDevices(0); + DeviceImpl * dev = (DeviceImpl *)newDev; + bool bFecCapable = false; + bool bGpuDscSupported; + main->getDscCaps(&bGpuDscSupported); + + if (pDscParams && (pDscParams->forceDsc != DSC_FORCE_DISABLE)) + { + if (dev && dev->isDSCPossible()) + { + if ((dev->devDoingDscDecompression != dev) || + ((dev->devDoingDscDecompression == dev) && + (dev->isLogical() && dev->parent))) + { + // + // If DSC decoding is going to happen at sink's parent or + // decoding will be done by sink but sink is a logical port, + // where intermediate link between Branch DFP and Rx Panel can be + // anything other than DP (i.e. DSI, LVDS or something else), + // then we have to only make sure the path from source to sink's + // parent is fec is capable. + // Refer DP 1.4 Spec 5.4.5 + // + bFecCapable = dev->parent->isFECSupported(); + } + else + { + bFecCapable = dev->isFECSupported(); + } + } + } + else + { + return false; + } + // Make sure panel/it's parent & GPU supports DSC and the whole path supports FEC + if (bGpuDscSupported && // If GPU supports DSC + this->isFECSupported() && // If GPU supports FEC + pDscParams && // If client sent DSC info + pDscParams->bCheckWithDsc && // If client wants to check with DSC + (dev && dev->devDoingDscDecompression) && // Either device or it's parent supports DSC + bFecCapable && // If path up to dsc decoding device supports FEC + (modesetParams.modesetInfo.bitsPerComponent != 6)) // DSC doesn't support bpc = 6 + { + return true; + } + else + { + return false; + } +} + +bool ConnectorImpl::compoundQueryAttachMSTDsc(Group * target, + const DpModesetParams &modesetParams, // Modeset info + CompoundQueryAttachMSTInfo * localInfo, + DscParams *pDscParams, // DSC parameters + DP_IMP_ERROR *pErrorCode) +{ + NVT_STATUS result; + + Device * newDev = target->enumDevices(0); + DeviceImpl * dev = (DeviceImpl *)newDev; + + bool bGpuDscSupported; + main->getDscCaps(&bGpuDscSupported); + + DSC_INFO dscInfo; + MODESET_INFO modesetInfoDSC; + WAR_DATA warData; + NvU64 availableBandwidthBitsPerSecond = 0; + unsigned PPS[DSC_MAX_PPS_SIZE_DWORD]; + unsigned bitsPerPixelX16 = 0; + bool bDscBppForced = false; + + if (!pDscParams->bitsPerPixelX16) + { + // + // For now, we will keep a pre defined value for bitsPerPixel for MST = 10 + // bitsPerPixelX16 = 160 + // + pDscParams->bitsPerPixelX16 = PREDEFINED_DSC_MST_BPPX16; + } + else + { + bDscBppForced = true; + } + + bitsPerPixelX16 = pDscParams->bitsPerPixelX16; + + if (!this->preferredLinkConfig.isValid()) + { + localInfo->lc.enableFEC(true); + } + + dpMemZero(PPS, sizeof(unsigned) * DSC_MAX_PPS_SIZE_DWORD); + dpMemZero(&dscInfo, sizeof(DSC_INFO)); + dpMemZero(&warData, sizeof(WAR_DATA)); + + // Populate DSC related info for PPS calculations + populateDscCaps(&dscInfo, dev->devDoingDscDecompression, pDscParams->forcedParams); + + // populate modeset related info for PPS calculations + populateDscModesetInfo(&modesetInfoDSC, &modesetParams); + + // checking for DSC v1.1 and YUV combination + if ((dscInfo.sinkCaps.algorithmRevision.versionMajor == 1) && + (dscInfo.sinkCaps.algorithmRevision.versionMinor == 1) && + (modesetParams.colorFormat == dpColorFormat_YCbCr444 )) + { + DP_PRINTF(DP_WARNING, "WARNING: DSC v1.2 or higher is recommended for using YUV444"); + DP_PRINTF(DP_WARNING, "Current version is 1.1"); + } + + if ((dev->devDoingDscDecompression == dev) && dev->parent) + { + if (dev->parent->bDscPassThroughColorFormatWar) + { + // + // Bug 3692417 + // Color format should only depend on device doing DSC decompression when DSC is enabled according to DP Spec. + // But when Synaptics VMM5320 is the parent of the device doing DSC decompression, if a certain color + // format is not supported by Synaptics Virtual Peer Device decoder(parent), even though it is pass through mode + // and panel supports the color format, panel cannot light up. Once Synaptics fixes this issue, we will modify + // the WAR to be applied only before the firmware version that fixes it. + // + if ((modesetParams.colorFormat == dpColorFormat_RGB && !dev->parent->dscCaps.dscDecoderColorFormatCaps.bRgb) || + (modesetParams.colorFormat == dpColorFormat_YCbCr444 && !dev->parent->dscCaps.dscDecoderColorFormatCaps.bYCbCr444) || + (modesetParams.colorFormat == dpColorFormat_YCbCr422 && !dev->parent->dscCaps.dscDecoderColorFormatCaps.bYCbCrSimple422)) + { + if ((pDscParams->forceDsc == DSC_FORCE_ENABLE) || + (modesetParams.modesetInfo.mode == DSC_DUAL)) + { + // + // If DSC is force enabled or DSC_DUAL mode is requested, + // then return failure here + // + compoundQueryResult = false; + SET_DP_IMP_ERROR(pErrorCode, DP_IMP_ERROR_DSC_SYNAPTICS_COLOR_FORMAT) + pDscParams->bEnableDsc = false; + return false; + } + else + { + // We should check if mode is possible without DSC. + pDscParams->bEnableDsc = false; + if (!compoundQueryForceEnableFEC) + { + localInfo->lc.enableFEC(false); + } + return true; + } + } + } + } + + availableBandwidthBitsPerSecond = localInfo->lc.convertMinRateToDataRate() * 8 * localInfo->lc.lanes; + + warData.dpData.linkRateHz = localInfo->lc.convertLinkRateToDataRate(localInfo->lc.peakRate); + warData.dpData.laneCount = localInfo->lc.lanes; + warData.dpData.dpMode = DSC_DP_MST; + warData.dpData.hBlank = modesetParams.modesetInfo.rasterWidth - modesetParams.modesetInfo.surfaceWidth; + warData.connectorType = DSC_DP; + warData.dpData.bDisableDscMaxBppLimit = bDisableDscMaxBppLimit; + + // + // Dplib needs to pass sliceCountMask to clients + // with all slice counts that can support the mode since clients + // might need to use a slice count other than the minimum slice count + // that supports the mode. Currently we keep the same policy of + // trying 10 bpp first and if that does not pass, try 8pp. But later + // with dynamic PPS update, this will be moved a better algorithm, + // that optimizes bpp for requested mode on each display. + // + + result = DSC_GeneratePPSWithSliceCountMask(&dscInfo, &modesetInfoDSC, + &warData, availableBandwidthBitsPerSecond, + (NvU32*)(PPS), + (NvU32*)(&bitsPerPixelX16), + &(pDscParams->sliceCountMask)); + + // Try max dsc compression bpp = 8 once to check if that can support that mode. + if (result != NVT_STATUS_SUCCESS && !bDscBppForced) + { + pDscParams->bitsPerPixelX16 = MAX_DSC_COMPRESSION_BPPX16; + bitsPerPixelX16 = pDscParams->bitsPerPixelX16; + result = DSC_GeneratePPSWithSliceCountMask(&dscInfo, &modesetInfoDSC, + &warData, availableBandwidthBitsPerSecond, + (NvU32*)(PPS), + (NvU32*)(&bitsPerPixelX16), + &(pDscParams->sliceCountMask)); + } + + if (result != NVT_STATUS_SUCCESS) + { + // + // If generating PPS failed + // AND + // (DSC is force enabled + // OR + // the requested DSC mode = DUAL) + //then + // return failure here + // Else + // we will check if non DSC path is possible. + // + // If dsc mode = DUAL failed to generate PPS and if we pursue + // non DSC path, DD will still follow 2Head1OR modeset path with + // DSC disabled, eventually leading to HW hang. Bug 3632901 + // + if ((pDscParams->forceDsc == DSC_FORCE_ENABLE) || + (modesetParams.modesetInfo.mode == DSC_DUAL)) + { + compoundQueryResult = false; + SET_DP_IMP_ERROR(pErrorCode, translatePpsErrorToDpImpError(result)) + pDscParams->bEnableDsc = false; + return false; + } + else + { + // If PPS calculation failed then try without DSC + pDscParams->bEnableDsc = false; + if (!compoundQueryForceEnableFEC) + { + localInfo->lc.enableFEC(false); + } + return true; + } + } + else + { + pDscParams->bEnableDsc = true; + compoundQueryResult = true; + localInfo->localModesetInfo.bEnableDsc = true; + localInfo->localModesetInfo.depth = bitsPerPixelX16; + if (modesetParams.colorFormat == dpColorFormat_YCbCr422 && + dev->dscCaps.dscDecoderColorFormatCaps.bYCbCrNative422 && + (dscInfo.gpuCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422) && + (dscInfo.sinkCaps.decoderColorFormatMask & DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422)) + { + localInfo->localModesetInfo.colorFormat = dpColorFormat_YCbCr422_Native; + } + + if (dev->peerDevice == Dongle && dev->connectorType == connectorHDMI) + { + // + // For DP2HDMI PCON, if FRL BW is available in detailed caps, + // we need to check if we have enough BW for the stream on FRL link. + // + if (dev->pconCaps.maxHdmiLinkBandwidthGbps != 0) + { + NvU64 requiredBw = (NvU64)(modesetParams.modesetInfo.pixelClockHz * modesetParams.modesetInfo.depth); + NvU64 availableBw = (NvU64)(dev->pconCaps.maxHdmiLinkBandwidthGbps * (NvU64)1000000000U); + if (requiredBw > availableBw) + { + compoundQueryResult = false; + SET_DP_IMP_ERROR(pErrorCode, DP_IMP_ERROR_DSC_PCON_FRL_BANDWIDTH) + pDscParams->bEnableDsc = false; + return false; + } + } + // + // If DP2HDMI PCON does not support FRL, but advertises TMDS + // Character clock rate on detailed caps, we need to honor that. + // + else if (dev->pconCaps.maxTmdsClkRate != 0) + { + NvU64 maxTmdsClkRateU64 = (NvU64)(dev->pconCaps.maxTmdsClkRate); + NvU64 requiredBw = (NvU64)(modesetParams.modesetInfo.pixelClockHz * modesetParams.modesetInfo.depth); + if (modesetParams.colorFormat == dpColorFormat_YCbCr420) + { + if (maxTmdsClkRateU64 < ((requiredBw/24)/2)) + { + compoundQueryResult = false; + SET_DP_IMP_ERROR(pErrorCode, DP_IMP_ERROR_DSC_PCON_HDMI2_BANDWIDTH) + return false; + } + } + else + { + if (maxTmdsClkRateU64 < (requiredBw/24)) + { + compoundQueryResult = false; + SET_DP_IMP_ERROR(pErrorCode, DP_IMP_ERROR_DSC_PCON_HDMI2_BANDWIDTH) + return false; + } + } + } + } + else if (dev->devDoingDscDecompression != dev) + { + // + // Device's parent is doing DSC decompression so we need to check + // if device's parent can send uncompressed stream to Sink. + // + unsigned mode_pbn; + + mode_pbn = pbnForMode(modesetParams.modesetInfo); + + // + // As Device's Parent is doing DSC decompression, this is leaf device and + // complete available bandwidth at this node is available for requested mode. + // + if (mode_pbn > dev->bandwidth.enum_path.total) + { + compoundQueryResult = false; + SET_DP_IMP_ERROR(pErrorCode, DP_IMP_ERROR_DSC_LAST_HOP_BANDWIDTH) + pDscParams->bEnableDsc = false; + return false; + } + } + + if (pDscParams->pDscOutParams != NULL) + { + // + // If requested then DP Library is supposed to return if mode is + // possible with DSC and calculated PPS and bits per pixel. + // + dpMemCopy(pDscParams->pDscOutParams->PPS, PPS, sizeof(unsigned) * DSC_MAX_PPS_SIZE_DWORD); + pDscParams->bitsPerPixelX16 = bitsPerPixelX16; + } + else + { + // + // Client only wants to know if mode is possible or not but doesn't + // need all calculated PPS parameters in case DSC is required. Do nothing. + // + } + } + return true; +} + +bool ConnectorImpl::compoundQueryAttachMSTGeneric(Group * target, + const DpModesetParams &modesetParams, // Modeset info + CompoundQueryAttachMSTInfo * localInfo, + DscParams *pDscParams, // DSC parameters + DP_IMP_ERROR *pErrorCode) +{ + // I. Evaluate use of local link bandwidth + + // Calculate the PBN required + unsigned base_pbn, slots, slots_pbn; + localInfo->lc.pbnRequired(localInfo->localModesetInfo, base_pbn, slots, slots_pbn); + + // Accumulate the amount of PBN rounded up to nearest timeslot + compoundQueryLocalLinkPBN += slots_pbn; + if (compoundQueryLocalLinkPBN > localInfo->lc.pbnTotal()) + { + compoundQueryResult = false; + SET_DP_IMP_ERROR(pErrorCode, DP_IMP_ERROR_INSUFFICIENT_BANDWIDTH) + } + + // Verify the min blanking, etc + Watermark dpinfo; + + if (this->isFECSupported()) + { + if (!isModePossibleMSTWithFEC(localInfo->lc, localInfo->localModesetInfo, &dpinfo)) + { + compoundQueryResult = false; + SET_DP_IMP_ERROR(pErrorCode, DP_IMP_ERROR_WATERMARK_BLANKING) + } + } + else + { + if (!isModePossibleMST(localInfo->lc, localInfo->localModesetInfo, &dpinfo)) + { + compoundQueryResult = false; + SET_DP_IMP_ERROR(pErrorCode, DP_IMP_ERROR_WATERMARK_BLANKING) + } + } + + for(Device * d = target->enumDevices(0); d; d = target->enumDevices(d)) + { + DeviceImpl * i = (DeviceImpl *)d; + + // Allocate bandwidth for the entire path to the root + // NOTE: Above we're already handle the local link + DeviceImpl * tail = i; + while (tail && tail->getParent()) + { + // Have we already accounted for this stream? + if (!(tail->bandwidth.compound_query_state.bandwidthAllocatedForIndex & (1 << compoundQueryCount))) + { + tail->bandwidth.compound_query_state.bandwidthAllocatedForIndex |= (1 << compoundQueryCount); + + LinkConfiguration * linkConfig = tail->inferLeafLink(NULL); + tail->bandwidth.compound_query_state.timeslots_used_by_query += linkConfig->slotsForPBN(base_pbn); + + if ( tail->bandwidth.compound_query_state.timeslots_used_by_query > tail->bandwidth.compound_query_state.totalTimeSlots) + { + compoundQueryResult = false; + tail->bandwidth.compound_query_state.timeslots_used_by_query -= linkConfig->slotsForPBN(base_pbn); + tail->bandwidth.compound_query_state.bandwidthAllocatedForIndex &= ~(1 << compoundQueryCount); + SET_DP_IMP_ERROR(pErrorCode, DP_IMP_ERROR_INSUFFICIENT_BANDWIDTH) + } + } + tail = (DeviceImpl*)tail->getParent(); + } + } + + // If the compoundQueryResult is false, we need to reset the compoundQueryLocalLinkPBN + if (!compoundQueryResult) + { + compoundQueryLocalLinkPBN -= slots_pbn; + } + + return compoundQueryResult; +} + +bool ConnectorImpl::compoundQueryAttachSSTIsDscPossible +( + const DpModesetParams &modesetParams, + DscParams *pDscParams +) +{ + bool bGpuDscSupported = false; + main->getDscCaps(&bGpuDscSupported); + DeviceImpl * nativeDev = this->findDeviceInList(Address()); + + if (bGpuDscSupported && // if GPU supports DSC + this->isFECSupported() && // If GPU supports FEC + pDscParams && // if client sent DSC info + pDscParams->bCheckWithDsc && // if client wants to check with DSC + nativeDev->isDSCPossible() && // if device supports DSC decompression + (nativeDev->isFECSupported() || main->isEDP()) && // if device supports FEC decoding or is an DSC capable eDP panel which doesn't support FEC + (modesetParams.modesetInfo.bitsPerComponent != 6)) // DSC doesn't support bpc = 6 + { + return true; + } + + return false; +} + +bool ConnectorImpl::compoundQueryAttachSSTDsc +( + const DpModesetParams &modesetParams, + LinkConfiguration lc, + DscParams *pDscParams, + DP_IMP_ERROR *pErrorCode +) +{ + DSC_INFO dscInfo; + MODESET_INFO modesetInfoDSC; + WAR_DATA warData; + NvU64 availableBandwidthBitsPerSecond = 0; + unsigned PPS[DSC_MAX_PPS_SIZE_DWORD]; + unsigned bitsPerPixelX16 = pDscParams->bitsPerPixelX16; + bool result; + NVT_STATUS ppsStatus; + ModesetInfo localModesetInfo = modesetParams.modesetInfo; + + DeviceImpl * nativeDev = this->findDeviceInList(Address()); + + if (!this->preferredLinkConfig.isValid() && nativeDev->isFECSupported()) + { + lc.enableFEC(true); + } + + dpMemZero(PPS, sizeof(unsigned) * DSC_MAX_PPS_SIZE_DWORD); + dpMemZero(&dscInfo, sizeof(DSC_INFO)); + dpMemZero(&warData, sizeof(WAR_DATA)); + + // Populate DSC related info for PPS calculations + this->populateDscCaps(&dscInfo, nativeDev->devDoingDscDecompression, pDscParams->forcedParams); + + // Populate modeset related info for PPS calculations + this->populateDscModesetInfo(&modesetInfoDSC, &modesetParams); + + // checking for DSC v1.1 and YUV combination + if ( (dscInfo.sinkCaps.algorithmRevision.versionMajor == 1) && + (dscInfo.sinkCaps.algorithmRevision.versionMinor == 1) && + (modesetParams.colorFormat == dpColorFormat_YCbCr444 )) + { + DP_PRINTF(DP_WARNING, "WARNING: DSC v1.2 or higher is recommended for using YUV444"); + DP_PRINTF(DP_WARNING, "Current version is 1.1"); + } + + availableBandwidthBitsPerSecond = lc.convertMinRateToDataRate() * 8 * lc.lanes; + + warData.dpData.linkRateHz = lc.convertLinkRateToDataRate(lc.peakRate); + warData.dpData.laneCount = lc.lanes; + warData.dpData.hBlank = modesetParams.modesetInfo.rasterWidth - modesetParams.modesetInfo.surfaceWidth; + warData.dpData.dpMode = DSC_DP_SST; + warData.connectorType = DSC_DP; + warData.dpData.bDisableDscMaxBppLimit = bDisableDscMaxBppLimit; + + if (main->isEDP()) + { + warData.dpData.bIsEdp = true; + } + + ppsStatus = DSC_GeneratePPSWithSliceCountMask(&dscInfo, + &modesetInfoDSC, + &warData, + availableBandwidthBitsPerSecond, + (NvU32*)(PPS), + (NvU32*)(&bitsPerPixelX16), + &(pDscParams->sliceCountMask)); + + if (ppsStatus != NVT_STATUS_SUCCESS) + { + result = false; + SET_DP_IMP_ERROR(pErrorCode, translatePpsErrorToDpImpError(ppsStatus)) + pDscParams->bEnableDsc = false; + } + else + { + localModesetInfo.bEnableDsc = true; + localModesetInfo.depth = bitsPerPixelX16; + LinkConfiguration lowestSelected; + bool bIsModeSupported = false; + + if (this->preferredLinkConfig.isValid()) + { + // Check if mode is possible with preferred link config + bIsModeSupported = willLinkSupportModeSST(lc, localModesetInfo, pDscParams); + } + else + { + // + // Check if mode is possible with calculated bits_per_pixel. + // Check with all possible link configs and not just highest + // assessed because with DSC, mode can fail with higher + // link config and pass for lower one. This is because + // if raster parameters are really small and DP bandwidth is + // very high then we may end up with some TU with 0 active + // symbols in SST. This may cause HW hang and so DP IMP rejects + // this mode. Refer Bug 200379426. + // + bIsModeSupported = getValidLowestLinkConfig(lc, lowestSelected, localModesetInfo, pDscParams); + } + + if (!bIsModeSupported) + { + pDscParams->bEnableDsc = false; + SET_DP_IMP_ERROR(pErrorCode, DP_IMP_ERROR_INSUFFICIENT_BANDWIDTH_DSC) + result = false; + } + else + { + pDscParams->bEnableDsc = true; + result = true; + + if (pDscParams->pDscOutParams != NULL) + { + // + // If requested then DP Library is supposed to return if mode is + // possible with DSC and calculated PPS and bits per pixel. + // + dpMemCopy(pDscParams->pDscOutParams->PPS, PPS, sizeof(unsigned) * DSC_MAX_PPS_SIZE_DWORD); + pDscParams->bitsPerPixelX16 = bitsPerPixelX16; + } + else + { + // + // Client only wants to know if mode is possible or not but doesn't + // need all calculated PPS parameters in case DSC is required. Do nothing. + // + } + } + } + + return result; +} + +bool ConnectorImpl::compoundQueryAttachSST(Group * target, + const DpModesetParams &modesetParams, // Modeset info + DscParams *pDscParams, // DSC parameters + DP_IMP_ERROR *pErrorCode) +{ + DeviceImpl * nativeDev = findDeviceInList(Address()); + + if (compoundQueryCount != 1) + { + compoundQueryResult = false; + return false; + } + + if (nativeDev && (nativeDev->connectorType == connectorHDMI)) + { + if (modesetParams.colorFormat == dpColorFormat_YCbCr420) + { + if ((nativeDev->maxTmdsClkRate) && + (nativeDev->maxTmdsClkRate < + ((modesetParams.modesetInfo.pixelClockHz * modesetParams.modesetInfo.depth /24)/2))) + { + compoundQueryResult = false; + SET_DP_IMP_ERROR(pErrorCode, DP_IMP_ERROR_DSC_PCON_HDMI2_BANDWIDTH) + return false; + } + } + else + { + if ((nativeDev->maxTmdsClkRate) && + (nativeDev->maxTmdsClkRate < + (modesetParams.modesetInfo.pixelClockHz * modesetParams.modesetInfo.depth /24))) + { + compoundQueryResult = false; + SET_DP_IMP_ERROR(pErrorCode, DP_IMP_ERROR_DSC_PCON_HDMI2_BANDWIDTH) + return false; + } + } + } + + LinkConfiguration lc = highestAssessedLC; + + // check if there is a special request from the client + if (this->preferredLinkConfig.isValid()) + { + lc = preferredLinkConfig; + } + else + { + // + // Always check for DP IMP without FEC overhead first before + // trying with DSC/FEC + // + lc.enableFEC(false); + } + + // If a valid native DP device was not found, force legacy DP IMP + if (!nativeDev) + { + compoundQueryResult = this->willLinkSupportModeSST(lc, modesetParams.modesetInfo, pDscParams); + if (!compoundQueryResult) + { + SET_DP_IMP_ERROR(pErrorCode, DP_IMP_ERROR_WATERMARK_BLANKING) + } + } + else + { + if ((lc.peakRate == dp2LinkRate_8_10Gbps) && + (main->isAvoidHBR3WAREnabled()) && + (compoundQueryAttachSSTIsDscPossible(modesetParams, pDscParams))) + { + LinkConfiguration lowerLc = lc; + lowerLc.lowerConfig(false); + + if ((pDscParams && (pDscParams->forceDsc == DSC_FORCE_ENABLE)) || + (modesetParams.modesetInfo.mode == DSC_DUAL) || + (!this->willLinkSupportModeSST(lowerLc, modesetParams.modesetInfo, pDscParams))) + { + if (pDscParams && pDscParams->forceDsc != DSC_FORCE_DISABLE) + { + bool result = compoundQueryAttachSSTDsc(modesetParams, lowerLc, pDscParams, pErrorCode); + if (result == true) + return result; + } + } + else + { + // Mode was successful + return true; + } + } + + if ((pDscParams && (pDscParams->forceDsc == DSC_FORCE_ENABLE)) || // DD has forced DSC Enable + (modesetParams.modesetInfo.mode == DSC_DUAL) || // DD decided to use 2 Head 1 OR mode + (!this->willLinkSupportModeSST(lc, modesetParams.modesetInfo, pDscParams))) // Mode is not possible without DSC + { + // If DP IMP fails without DSC or client requested to force DSC + if (pDscParams && pDscParams->forceDsc != DSC_FORCE_DISABLE) + { + // Check if panel and GPU both supports DSC or not. Also check if panel supports FEC + if (compoundQueryAttachSSTIsDscPossible(modesetParams, pDscParams)) + { + compoundQueryResult = compoundQueryAttachSSTDsc(modesetParams, + lc, + pDscParams, + pErrorCode); + } + else + { + // Either GPU or Sink doesn't support DSC + compoundQueryResult = false; + } + } + else + { + // Client hasn't sent DSC params info or has asked to force disable DSC. + compoundQueryResult = false; + SET_DP_IMP_ERROR(pErrorCode, DP_IMP_ERROR_INSUFFICIENT_BANDWIDTH_NO_DSC) + } + } + else + { + // Mode was successful + compoundQueryResult = true; + } + } + + return compoundQueryResult; +} + +void ConnectorImpl::populateDscModesetInfo(MODESET_INFO* pModesetInfo, const DpModesetParams* pModesetParams) +{ + pModesetInfo->pixelClockHz = pModesetParams->modesetInfo.pixelClockHz; + pModesetInfo->activeWidth = pModesetParams->modesetInfo.surfaceWidth; + pModesetInfo->activeHeight = pModesetParams->modesetInfo.surfaceHeight; + pModesetInfo->bitsPerComponent = pModesetParams->modesetInfo.bitsPerComponent; + + if (pModesetParams->colorFormat == dpColorFormat_RGB) + { + pModesetInfo->colorFormat = NVT_COLOR_FORMAT_RGB; + } + else if (pModesetParams->colorFormat == dpColorFormat_YCbCr444) + { + pModesetInfo->colorFormat = NVT_COLOR_FORMAT_YCbCr444; + } + else if (pModesetParams->colorFormat == dpColorFormat_YCbCr422) + { + pModesetInfo->colorFormat = NVT_COLOR_FORMAT_YCbCr422; + } + else if (pModesetParams->colorFormat == dpColorFormat_YCbCr420) + { + pModesetInfo->colorFormat = NVT_COLOR_FORMAT_YCbCr420; + } + else + { + pModesetInfo->colorFormat = NVT_COLOR_FORMAT_RGB; + } + + if (pModesetParams->modesetInfo.mode == DSC_DUAL) + { + pModesetInfo->bDualMode = true; + } + else + { + pModesetInfo->bDualMode = false; + } + + if (pModesetParams->modesetInfo.mode == DSC_DROP) + { + pModesetInfo->bDropMode = true; + } + else + { + pModesetInfo->bDropMode = false; + } +} + +void ConnectorImpl::populateDscGpuCaps(DSC_INFO* dscInfo) +{ + unsigned encoderColorFormatMask; + unsigned lineBufferSizeKB; + unsigned rateBufferSizeKB; + unsigned bitsPerPixelPrecision; + unsigned maxNumHztSlices; + unsigned lineBufferBitDepth; + + // Get GPU DSC capabilities + main->getDscCaps(NULL, + &encoderColorFormatMask, + &lineBufferSizeKB, + &rateBufferSizeKB, + &bitsPerPixelPrecision, + &maxNumHztSlices, + &lineBufferBitDepth); + + if (encoderColorFormatMask & NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB) + { + dscInfo->gpuCaps.encoderColorFormatMask |= DSC_ENCODER_COLOR_FORMAT_RGB; + } + + if (encoderColorFormatMask & NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444) + { + dscInfo->gpuCaps.encoderColorFormatMask |= DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444; + } + + if (encoderColorFormatMask & NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422) + { + dscInfo->gpuCaps.encoderColorFormatMask |= DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422; + } + + if (encoderColorFormatMask & NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420) + { + dscInfo->gpuCaps.encoderColorFormatMask |= DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420; + } + + dscInfo->gpuCaps.lineBufferSize = lineBufferSizeKB; + + if (bitsPerPixelPrecision == NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16) + { + dscInfo->gpuCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_16; + } + + if (bitsPerPixelPrecision == NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8) + { + dscInfo->gpuCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_8; + } + + if (bitsPerPixelPrecision == NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4) + { + dscInfo->gpuCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_4; + } + + if (bitsPerPixelPrecision == NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2) + { + dscInfo->gpuCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_2; + } + + if (bitsPerPixelPrecision == NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1) + { + dscInfo->gpuCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1; + } + + dscInfo->gpuCaps.maxNumHztSlices = maxNumHztSlices; + + dscInfo->gpuCaps.lineBufferBitDepth = lineBufferBitDepth; +} + +void ConnectorImpl::populateDscBranchCaps(DSC_INFO* dscInfo, DeviceImpl * dev) +{ + dscInfo->branchCaps.overallThroughputMode0 = dev->dscCaps.branchDSCOverallThroughputMode0; + dscInfo->branchCaps.overallThroughputMode1 = dev->dscCaps.branchDSCOverallThroughputMode1; + dscInfo->branchCaps.maxLineBufferWidth = dev->dscCaps.branchDSCMaximumLineBufferWidth; + + return; +} + +void ConnectorImpl::populateDscSinkCaps(DSC_INFO* dscInfo, DeviceImpl * dev) +{ + // Early return if dscInfo or dev is NULL + if ((dscInfo == NULL) || (dev == NULL)) + { + return; + } + + if (dev->dscCaps.dscDecoderColorFormatCaps.bRgb) + { + dscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_RGB; + } + + if (dev->dscCaps.dscDecoderColorFormatCaps.bYCbCr444) + { + dscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_444; + } + if (dev->dscCaps.dscDecoderColorFormatCaps.bYCbCrSimple422) + { + dscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_SIMPLE_422; + } + if (dev->dscCaps.dscDecoderColorFormatCaps.bYCbCrNative422) + { + dscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422; + } + if (dev->dscCaps.dscDecoderColorFormatCaps.bYCbCrNative420) + { + dscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420; + } + + switch (dev->dscCaps.dscBitsPerPixelIncrement) + { + case BITS_PER_PIXEL_PRECISION_1_16: + dscInfo->sinkCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_16; + break; + case BITS_PER_PIXEL_PRECISION_1_8: + dscInfo->sinkCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_8; + break; + case BITS_PER_PIXEL_PRECISION_1_4: + dscInfo->sinkCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_4; + break; + case BITS_PER_PIXEL_PRECISION_1_2: + dscInfo->sinkCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_2; + break; + case BITS_PER_PIXEL_PRECISION_1: + dscInfo->sinkCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1; + break; + } + + // Decoder color depth mask + if (dev->dscCaps.dscDecoderColorDepthMask & DSC_BITS_PER_COLOR_MASK_12) + { + dscInfo->sinkCaps.decoderColorDepthMask |= DSC_DECODER_COLOR_DEPTH_CAPS_12_BITS; + } + + if (dev->dscCaps.dscDecoderColorDepthMask & DSC_BITS_PER_COLOR_MASK_10) + { + dscInfo->sinkCaps.decoderColorDepthMask |= DSC_DECODER_COLOR_DEPTH_CAPS_10_BITS; + } + + if (dev->dscCaps.dscDecoderColorDepthMask & DSC_BITS_PER_COLOR_MASK_8) + { + dscInfo->sinkCaps.decoderColorDepthMask |= DSC_DECODER_COLOR_DEPTH_CAPS_8_BITS; + } + + dscInfo->sinkCaps.maxSliceWidth = dev->dscCaps.dscMaxSliceWidth; + dscInfo->sinkCaps.sliceCountSupportedMask = dev->dscCaps.sliceCountSupportedMask; + dscInfo->sinkCaps.maxNumHztSlices = dev->dscCaps.maxSlicesPerSink; + dscInfo->sinkCaps.lineBufferBitDepth = dev->dscCaps.lineBufferBitDepth; + dscInfo->sinkCaps.bBlockPrediction = dev->dscCaps.bDscBlockPredictionSupport; + dscInfo->sinkCaps.algorithmRevision.versionMajor = dev->dscCaps.versionMajor; + dscInfo->sinkCaps.algorithmRevision.versionMinor = dev->dscCaps.versionMinor; + dscInfo->sinkCaps.peakThroughputMode0 = dev->dscCaps.dscPeakThroughputMode0; + dscInfo->sinkCaps.peakThroughputMode1 = dev->dscCaps.dscPeakThroughputMode1; + dscInfo->sinkCaps.maxBitsPerPixelX16 = dev->dscCaps.maxBitsPerPixelX16; + + // If panel does not populate peak DSC throughput, use _MODE0_340. + if (!dscInfo->sinkCaps.peakThroughputMode0) + { + dscInfo->sinkCaps.peakThroughputMode0 = + NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_340; + } + + // If panel does not populate max slice width, use 2560. + if (!dscInfo->sinkCaps.maxSliceWidth) + { + dscInfo->sinkCaps.maxSliceWidth = 2560; + } + + // + // If panel support Native 422 mode but does not populate peak DSC + // throughput, use _MODE0_340. + // + if (dev->dscCaps.dscDecoderColorFormatCaps.bYCbCrNative422 && + !dscInfo->sinkCaps.peakThroughputMode1) + { + dscInfo->sinkCaps.peakThroughputMode1 = + NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_340; + } +} + +void ConnectorImpl::populateForcedDscParams(DSC_INFO* dscInfo, DSC_INFO::FORCED_DSC_PARAMS * forcedParams) +{ + if(forcedParams) + { + dscInfo->forcedDscParams.sliceWidth = forcedParams->sliceWidth; + dscInfo->forcedDscParams.sliceHeight = forcedParams->sliceHeight; + dscInfo->forcedDscParams.sliceCount = forcedParams->sliceCount; + dscInfo->forcedDscParams.dscRevision = forcedParams->dscRevision; + } + +} + +void ConnectorImpl::populateDscCaps(DSC_INFO* dscInfo, DeviceImpl * dev, DSC_INFO::FORCED_DSC_PARAMS * forcedParams) +{ + // Sink DSC capabilities + populateDscSinkCaps(dscInfo, dev); + + // Branch Specific DSC Capabilities + if (!dev->isVideoSink() && !dev->isAudioSink()) + { + populateDscBranchCaps(dscInfo, dev); + } + + // GPU DSC capabilities + populateDscGpuCaps(dscInfo); + + // Forced DSC params + populateForcedDscParams(dscInfo, forcedParams); +} + +bool ConnectorImpl::endCompoundQuery() +{ + DP_ASSERT(compoundQueryActive && "Spurious compoundQuery end."); + compoundQueryActive = false; + return compoundQueryResult; +} + +// +// Set link to HDMI mode +// +void ConnectorImpl::enableLinkHandsOff() +{ + if (isLinkQuiesced) + { + DP_ASSERT(0 && "Link is already quiesced."); + return; + } + + isLinkQuiesced = true; + + // Set the Lane Count to 0 to shut down the link. + powerdownLink(); +} + +// +// Restore from HDMI mode +// +void ConnectorImpl::releaseLinkHandsOff() +{ + if (!isLinkQuiesced) + { + DP_PRINTF(DP_ERROR, "DPCONN> Link is already in use."); + return; + } + + isLinkQuiesced = false; + assessLink(); +} + +// +// Timer callback for event management +// Uses: fireEvents() +void ConnectorImpl::expired(const void * tag) +{ + if (tag == &tagFireEvents) + fireEventsInternal(); + else if (tag == &tagDelayedHdcpCapRead) + { + DP_PRINTF(DP_NOTICE, "DPCONN> Delayed HDCP Cap read called."); + readRemoteHdcpCaps(); + } + else if (tag == &tagHDCPStreamEncrEnable) + { + if (!(bHdcpStrmEncrEnblOnlyOnDemand)) + { + while (!(hdcpEnableTransitionGroups.isEmpty())) + { + GroupImpl* curStrmEncrEnblGroup = hdcpEnableTransitionGroups.pop(); + curStrmEncrEnblGroup->hdcpSetEncrypted(true); + } + } + } + else if (tag == &tagHDCPReauthentication) + { + if (authRetries < HDCP_AUTHENTICATION_RETRIES) + { + HDCPState hdcpState = {0}; + main->configureHDCPGetHDCPState(hdcpState); + + unsigned authDelay = (hdcpState.HDCP_State_22_Capable ? + HDCP22_AUTHENTICATION_COOLDOWN : HDCP_AUTHENTICATION_COOLDOWN); + + // Don't fire any reauthentication if we're not done with the modeset + if (!intransitionGroups.isEmpty()) + { + isHDCPAuthOn = false; + timer->queueCallback(this, &tagHDCPReauthentication, + authDelay); + return; + } + + // Clear the ECF & Reauthentication here for the branch device. + NvU64 ecf = 0x0; + main->configureAndTriggerECF(ecf); + isHDCPAuthOn = false; + + authRetries++; + isHDCPAuthTriggered = true; + main->configureHDCPRenegotiate(); + main->configureHDCPGetHDCPState(hdcpState); + + if (hdcpState.HDCP_State_Authenticated) + { + isHDCPAuthOn = true; + authRetries = 0; + // Set the ECF for the groups which are already active. + for (ListElement *i = this->activeGroups.begin(); i != this->activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + if (group->hdcpEnabled) + { + NvU64 countOnes = (((NvU64)1) << group->timeslot.count) - 1; + NvU64 mask = countOnes << group->timeslot.begin; + ecf |= mask; + } + } + // Restore the ECF and trigger ACT + main->configureAndTriggerECF(ecf); + // Enable HDCP for Group + if (!(bHdcpStrmEncrEnblOnlyOnDemand)) + { + timer->queueCallback(this, &tagHDCPStreamEncrEnable, 100); + } + } + else + { + timer->queueCallback(this, &tagHDCPReauthentication, + authDelay); + } + isHDCPReAuthPending = false; + } + else + { + isHDCPAuthOn = false; + } + } + else if (tag == &tagSendQseMessage) + { + if (this->messageManager->isAnyAwaitingQSEReplyDownRequest()) + { + timer->queueCallback(this, &tagSendQseMessage, HDCP_SEND_QSE_MESSAGE_COOLDOWN); + } + else + { + for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *) i; + + if (group->hdcpEnabled) + { + group->streamEncryptionStatusDetection->sendQSEMessage(group, qseReason_Ssc); + timer->queueCallback(group, &(group->tagStreamValidation), HDCP_STREAM_VALIDATION_REQUEST_COOLDOWN); + } + } + } + } + else if (tag == &tagDelayedHDCPCPIrqHandling) + { + DP_PRINTF(DP_ERROR, "DPCONN> Delayed HDCP CPIRQ handling due to previous RxStatus read failed."); + + if (handleCPIRQ()) + { + hal->clearInterruptContentProtection(); + } + else + { + hdcpCpIrqRxStatusRetries++; + if (hdcpCpIrqRxStatusRetries < HDCP_CPIRQ_RXSTAUS_RETRIES) + { + timer->queueCallback(this, &tagHDCPReauthentication, HDCP_CPIRQ_RXSTATUS_COOLDOWN); + } + else + { + DP_PRINTF(DP_ERROR, "DPCONN> Delayed HDCP CPIRQ RxStatus probe exceeds max retry and aborted."); + hal->clearInterruptContentProtection(); + } + } + } + else if (tag == &tagDpBwAllocationChanged) + { + for (Device * i = enumDevices(0); i; i = enumDevices(i)) + { + DeviceImpl * dev = (DeviceImpl *)i; + sink->bandwidthChangeNotification(dev, false); + } + } + else + { + DP_ASSERT(0); + } +} + +// Generate Events. +// useTimer specifies whether we fire the events on the timer +// context, or this context. +void ConnectorImpl::fireEvents() +{ + bool eventsPending = false; + + // Don't fire any events if we're not done with the modeset + if (!intransitionGroups.isEmpty()) + { + return; + } + + // Walk through the devices looking for state changes + for (ListElement * e = deviceList.begin(); e != deviceList.end(); e = e->next) + { + DeviceImpl * dev = (DeviceImpl *)e; + + if (dev->isPendingNewDevice() || + dev->isPendingLostDevice() || + dev->isPendingCableOk() || + dev->isPendingZombie() || + dev->isPendingHDCPCapDone()) + eventsPending = true; + } + + // If there were any queue an immediate callback to handle them + if (eventsPending || isDiscoveryDetectComplete) + { + { + // Queue the fireEventsInternal. + // It's critical we don't allow this to be processed in a sleep + // since DD may do a modeset in response + timer->queueCallback(this, &tagFireEvents, 0, false /* not allowed in sleep */); + } + } +} + +void ConnectorImpl::fireEventsInternal() +{ + ListElement * next; + Address::StringBuffer sb, sb1; + DP_USED(sb); + DP_USED(sb1); + for (ListElement * e = deviceList.begin(); e != deviceList.end(); e = next) + { + next = e->next; + DeviceImpl * dev = (DeviceImpl *)e; + + if (dev->isPendingLostDevice()) + { + // + // For bug 2335599, where the connected monitor is switched to MST + // from SST after S3 resume, we need to disconnect SST monitor + // early before adding MST monitors. This will avoid client from + // mistaking the disconnection of SST monitor later as parent of + // MST monitors, which will wrongly disconnect MST monitors too. + // + if (!(!dev->multistream && linkUseMultistream()) && + bDeferNotifyLostDevice) + { + continue; + } + dev->shadow.plugged = false; + DP_PRINTF(DP_ERROR, "DPCONN> Lost device %s", dev->address.toString(sb)); + Address::NvU32Buffer addrBuffer; + dpMemZero(addrBuffer, sizeof(addrBuffer)); + dev->address.toNvU32Buffer(addrBuffer); + NV_DPTRACE_WARNING(LOST_DEVICE, dev->address.size(), addrBuffer[0], addrBuffer[1], + addrBuffer[2], addrBuffer[3]); + sink->lostDevice(dev); +#if defined(DEBUG) + // Assert that this device is not contained in any groups. + List* groupLists[] = { + &activeGroups, + &inactiveGroups + }; + + for (unsigned i = 0; i < sizeof(groupLists) / sizeof(groupLists[0]); i++) + { + List *groupList = groupLists[i]; + for (ListElement *e = groupList->begin(); e != groupList->end(); e = e->next) + { + GroupImpl *g = (GroupImpl *)e; + DP_ASSERT(!g->contains(dev)); + } + } +#endif + delete dev; + + // Now that the device is deleted, update the DP Tunnel BW allocation + NvU64 previousAllocatedDpTunnelBw = allocatedDpTunnelBw; + updateDpTunnelBwAllocation(); + if (previousAllocatedDpTunnelBw != allocatedDpTunnelBw) + { + timer->queueCallback(this, &tagDpBwAllocationChanged, 0, false /* not allowed in sleep */); + } + + continue; + } + + if (dev->isPendingCableOk()) + { + dev->shadow.cableOk = dev->isCableOk(); + sink->notifyCableOkStateChange(dev, dev->shadow.cableOk); + } + + if (dev->isPendingZombie()) + { + dev->shadow.zombie = dev->isZombie(); + if (dev->complianceDeviceEdidReadTest) + { + // the zombie event will be hidden for DD/OS + DP_PRINTF(DP_WARNING, "DPCONN> Compliance: Device Internal Zombie? : %d 0x%x", dev->shadow.zombie ? 1 : 0, dev); + return; + } + bMitigateZombie = false; + DP_PRINTF(DP_WARNING, "DPCONN> Zombie? : %d 0x%x", dev->shadow.zombie ? 1 : 0, dev); + sink->notifyZombieStateChange(dev, dev->shadow.zombie); + } + + if (dev->isPendingHDCPCapDone()) + { + DP_ASSERT(dev->isHDCPCap != Indeterminate && "HDCPCap reading is not done!!"); + if (dev->isHDCPCap != Indeterminate) + { + // Notify RM about the new Bcaps.. + if (dev->isActive()) + { + RmDfpCache dfpCache = {0}; + dfpCache.updMask = 0; + dfpCache.bcaps = *dev->BCAPS; + for (unsigned i=0; iBKSV[i]; + + dfpCache.updMask |= (1 << NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_MASK_BCAPS); + dfpCache.updMask |= (1 << NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_MASK_BKSV); + dev->connector->main->rmUpdateDynamicDfpCache(dev->activeGroup->headIndex, &dfpCache, False); + } + + sink->notifyHDCPCapDone(dev, !!dev->isHDCPCap); + DP_PRINTF(DP_NOTICE, "DPCONN> Notify HDCP cap Done : %x", !!dev->isHDCPCap); + } + else + { + sink->notifyHDCPCapDone(dev, false); + } + + dev->shadow.hdcpCapDone = true; + } + + bool mustDisconnect = dev->isMustDisconnect(); + if (dev->shadow.mustDisconnect != mustDisconnect && mustDisconnect) + { + dev->shadow.mustDisconnect = mustDisconnect; + sink->notifyMustDisconnect(dev->activeGroup); + } + } + + for (ListElement * e = deviceList.begin(); e != deviceList.end(); e = next) + { + next = e->next; + DeviceImpl * dev = (DeviceImpl *)e; + + if (dev->isPendingNewDevice()) + { + if (bReportDeviceLostBeforeNew && bDeferNotifyLostDevice) + { + // Let's try to find if there's a device pending lost on the same address + DeviceImpl* _device = NULL; + for (ListElement * le = deviceList.begin(); le != deviceList.end(); le = le->next) + { + _device = (DeviceImpl*)le; + if ((_device->address == dev->address) && (_device->plugged != dev->plugged)) + break; + } + if (_device && + (_device->address == dev->address) && + (_device->plugged != dev->plugged)) + { + // If yes, then we need to report this lost device first. + _device->shadow.plugged = false; + DP_PRINTF(DP_WARNING, "DPCONN> Lost device 0x%x", _device); + sink->lostDevice(_device); + DP_ASSERT(!_device->activeGroup && "DD didn't remove panel from group"); + delete _device; + } + } + dev->shadow.plugged = true; + if (dev->isDSCPossible()) + { + if (dev->isDSCSupported()) + { + DP_PRINTF(DP_NOTICE, "DPCONN> New device %s | Native DSC Capability - Capable | " + "DSC Decompression Device - %s", dev->address.toString(sb), + (dev->devDoingDscDecompression) ? dev->devDoingDscDecompression->address.toString(sb1):"NA"); + } + else + { + DP_PRINTF(DP_NOTICE, "DPCONN> New device %s | Native DSC Capability - Not Capable | " + "DSC Decompression Device - %s", dev->address.toString(sb), + (dev->devDoingDscDecompression) ? dev->devDoingDscDecompression->address.toString(sb1):"NA"); + } + } + else + { + DP_PRINTF(DP_NOTICE, "DPCONN> New device %s | DSC Not Possible", dev->address.toString(sb)); + } + + Address::NvU32Buffer addrBuffer; + dpMemZero(addrBuffer, sizeof(addrBuffer)); + dev->address.toNvU32Buffer(addrBuffer); + NV_DPTRACE_INFO(NEW_SINK_REPORTED, dev->address.size(), addrBuffer[0], addrBuffer[1], + addrBuffer[2], addrBuffer[3]); + + // + // During newDevice, clients would run CQA for modelist validation and to identify the max BW + // For these calls, we expect to run CQA as if the monitor is driven independently + // Update the SW cache of the allocatedDpTunnelBw to Link Configuration + // The newDevice sequence would call dev->setModeList() which would allocate the BW required for all devices + // and set the right value for allocatedDpTunnelBw. + // Additionally, the client is expected to lock DPLib during all the operations so that no + // other thread/IRQ can update allocatedDpTunnelBw + // + allocatedDpTunnelBwShadow = allocatedDpTunnelBw; + allocatedDpTunnelBw = getMaxTunnelBw(); + sink->newDevice(dev); + } + } + + if (isDiscoveryDetectComplete) + { + // + // Bug 200236666 : + // isDiscoveryDetectComplete can be set when we process a new device after + // completing last edid read. In such scenario we will send notifyDetectComplete + // before newDevice for that sink has been sent to DD + // a/ sink->newDevice(dev) above can trigger the pending edid read + // b/ after last edid read completes (::mstEdidCompleted), ::processNewDevice + // will set the plugged flag for new device + // c/ this will queue pendingNewDevice event callback for the last device pending discovery + // d/ isDiscoveryDetectComplete flag set during b/ will trigger a + // premature notifyDetectComplete to DD before pendingNewDevice callback + // To fix above scenario : check if there is any newly pending new/lost device + // if yes, then defer sending notifyDetectComplete till next callback + // + bool bDeferNotifyDetectComplete = false; + for (ListElement * e = deviceList.begin(); e != deviceList.end(); e = next) + { + next = e->next; + DeviceImpl * dev = (DeviceImpl *)e; + + if (dev->isPendingNewDevice() || dev->isPendingLostDevice()) + { + bDeferNotifyDetectComplete = true; + DP_ASSERT(0 && "DP-CONN> Defer notifyDetectComplete as a new/lost device is pending!"); + break; + } + } + + if (!bDeferNotifyDetectComplete) + { + isDiscoveryDetectComplete = false; + DP_PRINTF(DP_NOTICE, "DP-CONN> NotifyDetectComplete"); + sink->notifyDetectComplete(); + } + } + +} + +// +// This call will be deprecated as soon as all clients move to the new API +// +bool ConnectorImpl::isHeadShutDownNeeded(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth) +{ + ModesetInfo modesetInfo = ModesetInfo(twoChannelAudioHz, eightChannelAudioHz, pixelClockHz, + rasterWidth, rasterHeight, (rasterBlankStartX - rasterBlankEndX), 0 /*surfaceHeight*/, + depth, rasterBlankStartX, rasterBlankEndX); + return isHeadShutDownNeeded(target, headIndex, modesetInfo); +} + +// +// Head shutdown will be needed if any of the following conditions are true: +// a. Link rate is going lower than current +// b. Head is activated as MST +// +bool ConnectorImpl::isHeadShutDownNeeded(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + ModesetInfo modesetInfo) +{ + if (bForceHeadShutdownFromRegkey || bForceHeadShutdownPerMonitor) + { + return true; + } + + if (linkUseMultistream()) + { + return true; + } + + if (activeGroups.isEmpty()) + { + return false; + } + + bool bHeadShutdownNeeded = true; + LinkConfiguration lowestSelected = getMaxLinkConfig(); + + // Force highestLink config in SST + bool bSkipLowestConfigCheck = false; + bool bIsModeSupported = false; + + lowestSelected.peakRate = DATA_RATE_8B_10B_TO_LINK_RATE(lowestSelected.peakRate); + lowestSelected.peakRatePossible = DATA_RATE_8B_10B_TO_LINK_RATE(lowestSelected.peakRatePossible); + GroupImpl* targetImpl = (GroupImpl*)target; + + // Certain panels only work when link train to highest linkConfig in SST mode. + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + DeviceImpl * dev = (DeviceImpl *)i; + if (dev->forceMaxLinkConfig()) + { + bSkipLowestConfigCheck = true; + } + } + + // + // Check if there is a special request from the client, + // If so, skip lowering down the link config. + // + if (this->preferredLinkConfig.isValid()) + { + lowestSelected = preferredLinkConfig; + bSkipLowestConfigCheck = true; + } + + // If the flag is set, simply neglect downgrading to lowest possible linkConfig + if (!bSkipLowestConfigCheck) + { + LinkConfiguration lConfig = lowestSelected; + + bIsModeSupported = getValidLowestLinkConfig(lConfig, lowestSelected, modesetInfo, NULL); + } + else + { + if (this->willLinkSupportModeSST(lowestSelected, modesetInfo, NULL)) + { + bIsModeSupported = true; + } + } + + if (bIsModeSupported) + { + // + // This is to handle a case where we query current link config + // to UEFI during boot time and it fails to return. Currently + // we do not handle this scenario and head is not shut down + // though it's actually required. This is to allow head shutdown + // in such cases. + // + if (!isLinkActive()) + { + return true; + } + + // + // In case of DSC, if bpc is changing, we need to shut down the head + // since PPS can change + // In case of mode transition (DSC <-> non-DSC), if the link config is same as previous mode, we need to shut down the head + // since VBID[6] needs to be updated accordingly + // + if ((bForceHeadShutdownOnModeTransition && + ((modesetInfo.bEnableDsc && targetImpl->lastModesetInfo.bEnableDsc) && + (modesetInfo.bitsPerComponent != targetImpl->lastModesetInfo.bitsPerComponent))) || + ((lowestSelected.getTotalDataRate() == activeLinkConfig.getTotalDataRate()) && + (modesetInfo.bEnableDsc != targetImpl->lastModesetInfo.bEnableDsc))) + { + return true; + } + + // For dual DP while changing link config, we need to shut + // down the head + if (lowestSelected.lanes == 8) + { + // If link config is changing, head shutdown will be needed. + if ((activeLinkConfig.lanes == lowestSelected.lanes) && + (activeLinkConfig.peakRate == lowestSelected.peakRate)) + { + bHeadShutdownNeeded = false; + } + } + // + // If link config is going lower then we need to shut down the + // head. If we link train to a lower config before reducing the + // mode, we will hang the HW since head would still be driving + // the higher mode at the time of link train. + // + else if ((lowestSelected.getTotalDataRate()) >= (activeLinkConfig.getTotalDataRate())) + { + bHeadShutdownNeeded = false; + } + } + else + { + DP_ASSERT( 0 && "DP-CONN> This mode is not possible at any link configuration!"); + } + + if (targetImpl) + { + targetImpl->bIsHeadShutdownNeeded = bHeadShutdownNeeded; + } + + return bHeadShutdownNeeded; +} + +bool ConnectorImpl::isLinkTrainingNeededForModeset(ModesetInfo modesetInfo) +{ + // Force highestLink config in SST + bool bSkipLowestConfigCheck = false; + bool bIsModeSupported = false; + LinkConfiguration lowestSelected = getMaxLinkConfig(); + lowestSelected.peakRate = DATA_RATE_8B_10B_TO_LINK_RATE(lowestSelected.peakRate); + lowestSelected.peakRatePossible = DATA_RATE_8B_10B_TO_LINK_RATE(lowestSelected.peakRatePossible); + + if (linkUseMultistream()) + { + if (!isLinkActive()) + { + // If MST, we always need to link train if link is not active + return true; + } + else if (lowestSelected != activeLinkConfig) + { + // + // If the link is active, we have to retrain, if active Link Config is + // not the highest possible Link Config. + // + return true; + } + else + { + // + // We don't have to retrain if link is active and at highest possible config + // since for MST we should always link train to highest possible Link Config. + // + return false; + } + } + + // + // Link training is needed if link is not alive OR alive but inactive + // ie., lane status reports symbol lock/interlane align/CR failures + // + if (isLinkLost() || !isLinkActive()) + { + return true; + } + + // + // Link training is needed if link config was previously guessed (not assessed by the driver). + // The link config is marked as guessed in below cases - + // a. Invalid link rate returned by UEFI + // b. When max link config is HBR3 and currently assessed by UEFI != HBR3 + // c. If a SOR is not assigned to display during link assessment + // + if (this->linkGuessed) + { + return true; + } + + // Certain panels only work when link train to highest linkConfig in SST mode. + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + DeviceImpl * dev = (DeviceImpl *)i; + if (dev->forceMaxLinkConfig()) + { + bSkipLowestConfigCheck = true; + } + } + + // + // Check if there is a special request from the client, + // If so, skip lowering down the link config. + // + if (this->preferredLinkConfig.isValid()) + { + lowestSelected = preferredLinkConfig; + bSkipLowestConfigCheck = true; + } + + // If the flag is set, simply neglect downgrading to lowest possible linkConfig + if (!bSkipLowestConfigCheck) + { + LinkConfiguration lConfig = lowestSelected; + + bIsModeSupported = getValidLowestLinkConfig(lConfig, lowestSelected, modesetInfo, NULL); + } + else + { + if (this->willLinkSupportModeSST(lowestSelected, modesetInfo, NULL)) + { + bIsModeSupported = true; + } + } + + // + // Link training is needed if requested mode/link config is + // different from the active mode/link config + // + if (bIsModeSupported) + { + if ((activeLinkConfig.lanes != lowestSelected.lanes) || + (activeLinkConfig.peakRate != lowestSelected.peakRate)) + { + return true; + } + } + else + { + DP_ASSERT( 0 && "DP-CONN> This mode is not possible at any link configuration!"); + } + + return false; +} + +bool DisplayPort::SetConfigSingleHeadMultiStreamMode(Group **targets, + NvU32 displayIDs[], + NvU32 numStreams, + DP_SINGLE_HEAD_MULTI_STREAM_MODE mode, + bool bSetConfig, + NvU8 vbiosPrimaryDispIdIndex, + bool bEnableAudioOverRightPanel) +{ + GroupImpl *pTargetImpl = NULL; + ConnectorImpl *pConnectorImpl = NULL; + ConnectorImpl *pPrevConnectorImpl = NULL; + + if (numStreams > NV0073_CTRL_CMD_DP_SINGLE_HEAD_MAX_STREAMS || numStreams <= 0) + { + DP_PRINTF(DP_ERROR, "DP-CONN> ERROR: in configuring single head multistream mode " + "invalid number of streams"); + return false; + } + + for (NvU32 iter = 0; iter < numStreams; iter++) + { + pTargetImpl = (GroupImpl*)targets[iter]; + + if(pTargetImpl == NULL) + { + DP_PRINTF(DP_ERROR, "DP-CONN> ERROR: in configuring single head multistream mode:" + "invalid target passed by client"); + return false; + } + + pConnectorImpl = (ConnectorImpl*) (pTargetImpl->parent); + + if (bSetConfig) + { + if (DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST == mode) + { + // + // Detach any active firmware groups before configuring singleHead dual SST + // + if (pTargetImpl->isHeadAttached() && pTargetImpl->headInFirmware) + { + pConnectorImpl->notifyDetachBegin(NULL); + pConnectorImpl->notifyDetachEnd(); + } + + if (displayIDs[iter] != pConnectorImpl->main->getRootDisplayId()) + { + DP_ASSERT( 0 && "DP-CONN> invalid single head multistream SST configuration !"); + return false; + } + + // 0th index is primary connector index, + // 1st is secondary connector index so on + if (iter > DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY) + { + pPrevConnectorImpl->pCoupledConnector = pConnectorImpl; + if (iter == (numStreams - 1)) + { + pConnectorImpl->pCoupledConnector = + (ConnectorImpl*)((GroupImpl*)targets[DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY])->parent; + } + // Clear secondary connector's link guessed state + pConnectorImpl->linkGuessed = false; + } + + pPrevConnectorImpl = pConnectorImpl; + } + + pTargetImpl->singleHeadMultiStreamMode = mode; + pTargetImpl->singleHeadMultiStreamID = (DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID)iter; + + // Save the 'Audio over Right Pannel' configuration in Connector Impl + // Use this configuration when SF gets programed. + if (bEnableAudioOverRightPanel) + { + pConnectorImpl->bAudioOverRightPanel = true; + } + } + else + { + pTargetImpl->singleHeadMultiStreamMode = DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE; + pTargetImpl->singleHeadMultiStreamID = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY; + pConnectorImpl->pCoupledConnector = NULL; + pConnectorImpl->bAudioOverRightPanel = false; + } + } + + pConnectorImpl->main->configureSingleHeadMultiStreamMode(displayIDs, + numStreams, + (NvU32)mode, + bSetConfig, + vbiosPrimaryDispIdIndex); + + return true; +} + +// +// This call will be deprecated as soon as all clients move to the new API +// +bool ConnectorImpl::notifyAttachBegin(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth) +{ + ModesetInfo modesetInfo(twoChannelAudioHz, eightChannelAudioHz, pixelClockHz, rasterWidth, + rasterHeight, (rasterBlankStartX - rasterBlankEndX), 0 /*surfaceHeight*/, + depth, rasterBlankStartX, rasterBlankEndX); + + DpModesetParams modesetParams(headIndex, modesetInfo); + + return notifyAttachBegin (target, modesetParams); +} + +bool ConnectorImpl::setDeviceDscState(Device * dev, bool bEnableDsc) +{ + if (!((DeviceImpl *)dev)->isDSCPossible()) + { + return true; + } + + if (bEnableDsc) + { + if(!(((DeviceImpl *)dev)->setDscEnable(true /*bEnableDsc*/))) + { + DP_ASSERT(!"DP-CONN> Failed to configure DSC on Sink!"); + return false; + } + + if (!dscEnabledDevices.contains(dev)) + dscEnabledDevices.insertFront(dev); + } + else + { + bool bCurrDscEnable = false; + // Get Current DSC Enable State + if (!((DeviceImpl *)dev)->getDscEnable(&bCurrDscEnable)) + { + DP_PRINTF(DP_WARNING, "DP> Not able to get DSC Enable State!"); + } + + if (bCurrDscEnable) + { + + bool bDisableDsc = true; + // Before disabling DSC check if other device with same parent has DSC enabled or not + for (Device * i = dscEnabledDevices.next(NULL); i != NULL; i = dscEnabledDevices.next(i)) + { + if ((i != dev) && (((DeviceImpl *)i)->parent == ((DeviceImpl *)dev)->parent)) + { + DP_PRINTF(DP_WARNING, "Parent is shared among devices and other device has DSC enabled so we can't disable DSC"); + bDisableDsc = false; + break; + } + } + + if(bDisableDsc && !((DeviceImpl *)dev)->setDscEnable(false /*bEnableDsc*/)) + { + DP_ASSERT(!"DP-CONN> Failed to configure DSC on Sink!"); + return false; + } + } + + if (dscEnabledDevices.contains(dev)) + dscEnabledDevices.remove(dev); + } + return true; +} + +bool ConnectorImpl::needToEnableFEC(const DpPreModesetParams ¶ms) +{ + for (NvU32 i = 0; i < NV_MAX_HEADS; i++) + { + if ((params.headMask & NVBIT(i)) == 0x0) + continue; + + if ((params.head[i].pTarget == NULL) || + !params.head[i].pModesetParams->modesetInfo.bEnableDsc) + continue; + + // eDP can support DSC with and without FEC + DeviceImpl * nativeDev = this->findDeviceInList(Address()); + if (this->main->isEDP() && nativeDev) + return nativeDev->getFECSupport(); + else + return true; + } + + return false; +} + +void ConnectorImpl::dpPreModeset(const DpPreModesetParams ¶ms) +{ + this->bFECEnable |= this->needToEnableFEC(params); + + DP_ASSERT(this->inTransitionHeadMask == 0x0); + this->inTransitionHeadMask = 0x0; + + for (NvU32 i = 0; i < NV_MAX_HEADS; i++) + { + if ((params.headMask & NVBIT(i)) == 0x0) + continue; + + this->inTransitionHeadMask |= NVBIT(i); + + if (params.head[i].pTarget != NULL) + { + DP_ASSERT(params.head[i].pModesetParams->headIndex == i); + this->notifyAttachBegin(params.head[i].pTarget, + *params.head[i].pModesetParams); + } + else + { + this->notifyDetachBegin(this->perHeadAttachedGroup[i]); + } + this->perHeadAttachedGroup[i] = params.head[i].pTarget; + } +} + +void ConnectorImpl::dpPostModeset(void) +{ + for (NvU32 i = 0; i < NV_MAX_HEADS; i++) + { + if ((this->inTransitionHeadMask & NVBIT(i)) == 0x0) + continue; + + if (this->perHeadAttachedGroup[i] != NULL) + this->notifyAttachEnd(false); + else + this->notifyDetachEnd(); + + this->inTransitionHeadMask &= ~NVBIT(i); + } +} + +// +// Notify library before/after modeset (update) +// Here is what NAB essentially does: +// 0. Makes sure TMDS is not attached +// 1. Trains link to optimized link config ("optimized" depends on DP1.1, DP1.2) +// 2. Performs quick watermark check for IMP. If IMP is not possible, forces link, zombies devices +// 3. if anything of above fails, marks devices in given group as zombies +// +// Return : true - NAB passed +// false - NAB failed due to invalid params or link training failure +// Link configs are forced in case of link training failure +// +bool ConnectorImpl::notifyAttachBegin(Group * target, // Group of panels we're attaching to this head + const DpModesetParams &modesetParams) +{ + unsigned twoChannelAudioHz = modesetParams.modesetInfo.twoChannelAudioHz; + unsigned eightChannelAudioHz = modesetParams.modesetInfo.eightChannelAudioHz; + NvU64 pixelClockHz = modesetParams.modesetInfo.pixelClockHz; + unsigned rasterWidth = modesetParams.modesetInfo.rasterWidth; + unsigned rasterHeight = modesetParams.modesetInfo.rasterHeight; + unsigned rasterBlankStartX = modesetParams.modesetInfo.rasterBlankStartX; + unsigned rasterBlankEndX = modesetParams.modesetInfo.rasterBlankEndX; + unsigned depth = modesetParams.modesetInfo.depth; + bool bLinkTrainingStatus = true; + bool bEnableDsc = modesetParams.modesetInfo.bEnableDsc; + bool bEnableFEC; + bool bEnablePassThroughForPCON = modesetParams.modesetInfo.bEnablePassThroughForPCON; + Device *newDev = target->enumDevices(0); + DeviceImpl *dev = (DeviceImpl *)newDev; + + if (hal->isDpTunnelBwAllocationEnabled() && + ((allocatedDpTunnelBwShadow != 0) || + (allocatedDpTunnelBw == 0))) + { + // + // We should never be here. + // One possible reason this could happen is if client missed calling setModeList after a newDevice. + // At this point we are definitely in need for mode BW than allocated. + // We could either try to greedily allocate BW = LC or assert and fail. + // However given that DpLib's SW state is possibly incorrect, safer to could assert and fail. + // + DP_ASSERT(!"Shadow BW non zero or no BW allocated. Failing notifyAttachBegin"); + return false; + } + + if(preferredLinkConfig.isValid()) + { + bEnableFEC = preferredLinkConfig.bEnableFEC; + } + else + { + DeviceImpl * nativeDev = findDeviceInList(Address()); + if (main->isEDP() && nativeDev) + { + // eDP can support DSC with and without FEC + bEnableFEC = bEnableDsc && nativeDev->getFECSupport(); + } + else + { + bEnableFEC = bEnableDsc; + } + } + + DP_PRINTF(DP_NOTICE, "DPCONN> Notify Attach Begin (Head %d, pclk %d raster %d x %d %d bpp)", + modesetParams.headIndex, pixelClockHz, rasterWidth, rasterHeight, depth); + NV_DPTRACE_INFO(NOTIFY_ATTACH_BEGIN, modesetParams.headIndex, pixelClockHz, rasterWidth, rasterHeight, + depth, bEnableDsc, bEnableFEC); + + if (!depth || !pixelClockHz) + { + DP_ASSERT(!"DP-CONN> Params with zero value passed to query!"); + return false; + } + + if ((modesetParams.modesetInfo.mode == DSC_DUAL) || + (modesetParams.modesetInfo.mode == DSC_DROP)) + { + if ((modesetParams.headIndex == NV_SECONDARY_HEAD_INDEX_1) || + (modesetParams.headIndex == NV_SECONDARY_HEAD_INDEX_3)) + { + DP_ASSERT(!"DP-CONN> For Two Head One OR, client should send Primary Head index!"); + return false; + } + } + + for (Device * dev = target->enumDevices(0); dev; dev = target->enumDevices(dev)) + { + Address::StringBuffer buffer; + DP_USED(buffer); + if (dev->isVideoSink()) + { + DP_PRINTF(DP_NOTICE, "DPCONN> | %s (VIDEO) |", dev->getTopologyAddress().toString(buffer)); + } + else + { + DP_PRINTF(DP_NOTICE, "DPCONN> | %s (BRANCH) |", dev->getTopologyAddress().toString(buffer)); + } + } + + if (firmwareGroup && ((GroupImpl *)firmwareGroup)->headInFirmware) + { + DP_ASSERT(bIsUefiSystem || (0 && "DPCONN> Firmware still active on head. De-activating")); + } + + GroupImpl* targetImpl = (GroupImpl*)target; + + if (bEnableDsc) + { + switch (modesetParams.modesetInfo.mode) + { + case DSC_SINGLE: + DP_PRINTF(DP_NOTICE, "DPCONN> DSC Mode = SINGLE"); + break; + case DSC_DUAL: + DP_PRINTF(DP_NOTICE, "DPCONN> DSC Mode = DUAL"); + break; + case DSC_DROP: + DP_PRINTF(DP_NOTICE, "DPCONN> DSC Mode = DROP"); + break; + case DSC_MODE_NONE: + DP_PRINTF(DP_NOTICE, "DPCONN> DSC Mode = NONE"); + break; + } + + targetImpl->dscModeRequest = modesetParams.modesetInfo.mode; + } + + DP_ASSERT(!(targetImpl->isHeadAttached() && targetImpl->bIsHeadShutdownNeeded) && "Head should have been shut down but it is still active!"); + + targetImpl->headInFirmware = false; + if (firmwareGroup) + { + ((GroupImpl *)firmwareGroup)->headInFirmware = false; + } + + if (firmwareGroup && activeGroups.contains((GroupImpl*)firmwareGroup)) + { + if (((GroupImpl *)firmwareGroup)->isHeadAttached()) + { + targetImpl->setHeadAttached(true); + } + activeGroups.remove((GroupImpl*)firmwareGroup); + inactiveGroups.insertBack((GroupImpl*)firmwareGroup); + } + + if (this->linkGuessed && (targetImpl->singleHeadMultiStreamMode != DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST)) + { + DP_ASSERT(!(this->linkGuessed) && "Link was not assessed previously. Probable reason: system was not in driver mode. Assessing now."); + this->assessLink(); + } + + DP_ASSERT(this->isLinkQuiesced == 0 && "According to bracketting calls TMDS/alternate DP still active!"); + + // Transfer the group to active list + inactiveGroups.remove(targetImpl); + activeGroups.insertBack(targetImpl); + intransitionGroups.insertFront(targetImpl); + + if (modesetParams.colorFormat == dpColorFormat_YCbCr422 && + dev && dev->dscCaps.dscDecoderColorFormatCaps.bYCbCrNative422) + { + targetImpl->lastModesetInfo = ModesetInfo(twoChannelAudioHz, eightChannelAudioHz, + pixelClockHz, rasterWidth, rasterHeight, + (rasterBlankStartX - rasterBlankEndX), modesetParams.modesetInfo.surfaceHeight, + depth, rasterBlankStartX, rasterBlankEndX, bEnableDsc, modesetParams.modesetInfo.mode, + false, dpColorFormat_YCbCr422_Native); + } + else + { + targetImpl->lastModesetInfo = ModesetInfo(twoChannelAudioHz, eightChannelAudioHz, + pixelClockHz, rasterWidth, rasterHeight, + (rasterBlankStartX - rasterBlankEndX), modesetParams.modesetInfo.surfaceHeight, + depth, rasterBlankStartX, rasterBlankEndX, bEnableDsc, modesetParams.modesetInfo.mode, + false, modesetParams.colorFormat); + } + + targetImpl->headIndex = modesetParams.headIndex; + targetImpl->streamIndex = main->headToStream(modesetParams.headIndex, (messageManager != NULL), targetImpl->singleHeadMultiStreamID); + targetImpl->colorFormat = modesetParams.colorFormat; + + DP_ASSERT(!this->isLinkQuiesced && "TMDS is attached, NABegin is impossible!"); + + // + // Update the FEC enabled flag according to the mode requested. + // + // In MST config, if one panel needs DSC/FEC and the other one does not, + // we still need to keep FEC enabled on the connector since at least one + // stream needs it. + // + this->bFECEnable |= bEnableFEC; + + highestAssessedLC.enableFEC(this->bFECEnable); + + if (main->isEDP()) + { + main->configurePowerState(true); + if (bOuiCached) + { + hal->setOuiSource(cachedSourceOUI, &cachedSourceModelName[0], + 6 /* string length of ieeeOuiDevId */, + cachedSourceChipRevision); + } + else + { + if (!this->bSkipZeroOuiCache) + { + DP_ASSERT("eDP Source OUI is not cached!"); + } + else + { + this->performIeeeOuiHandshake(); + } + } + } + + LinkConfiguration maxLinkConfig = getMaxLinkConfig(); + maxLinkConfig.peakRate = DATA_RATE_8B_10B_TO_LINK_RATE(maxLinkConfig.peakRate); + maxLinkConfig.peakRatePossible = DATA_RATE_8B_10B_TO_LINK_RATE(maxLinkConfig.peakRatePossible); + + // if failed, we're guaranteed that assessed link rate didn't meet the mode requirements + // isZombie() will catch this + bLinkTrainingStatus = trainLinkOptimized(maxLinkConfig); + + // If panel supports DSC, set DSC enabled/disabled + // according to the mode requested. + + for (Device * dev = target->enumDevices(0); dev; dev = target->enumDevices(dev)) + { + if (bPConConnected) + { + if (!(((DeviceImpl *)dev)->setDscEnableDPToHDMIPCON(bEnableDsc, bEnablePassThroughForPCON))) + { + DP_ASSERT(!"DP-CONN> Failed to configure DSC on DP to HDMI PCON!"); + } + } + else if(!setDeviceDscState(dev, bEnableDsc)) + { + DP_ASSERT(!"DP-CONN> Failed to configure DSC on Sink!"); + } + } + +// TODO: Need to check if we can completely remove DP_OPTION_HDCP_12_ENABLED and remove it + // Clean up: Clearing ECF + if (linkUseMultistream()) + { + targetImpl->hdcpSetEncrypted(false, NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_TYPE_0, NV_TRUE, NV_FALSE); + targetImpl->hdcpEnabled = false; + } + + beforeAddStream(targetImpl); + + if (linkUseMultistream()) + { + // Which pipeline to take the affect out of trigger ACT + if ((targetImpl->singleHeadMultiStreamMode != DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST) || + (targetImpl->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY)) + { + main->configureTriggerSelect(targetImpl->headIndex, targetImpl->singleHeadMultiStreamID); + } + } + + if (!linkUseMultistream() || main->supportMSAOverMST()) + { + bool enableInbandStereoSignaling = false; + + DP_ASSERT(activeGroups.isEmpty() == false); + + if (main->isInbandStereoSignalingSupported()) + { + enableInbandStereoSignaling = true; + } + + // + // Bug 200362535 + // setDpStereoMSAParameters does not cache the msa params. It will immediately + // apply just the stereo specific parameters. This is required because we + // can toggle the msa params using nvidia control panel and in that scenario + // we do not get supervisor interrupts. Since SV interrupts do not occur the + // msa parameters do not get applied. So to avoid having to reboot to apply the + // stereo msa params setDpStereoMSAParameters is called. + // + // setDpMSAParameters will contain all msa params, including stereo cached. + // These will be applied during supervisor interrupt. So if we will get + // SV interrupts later the same stereo settings will be applied twice. + // first by setDpStereoMSAParameters and later by setDpMSAParameters. + // + main->setDpStereoMSAParameters(!enableInbandStereoSignaling, modesetParams.msaparams); + main->setDpMSAParameters(!enableInbandStereoSignaling, modesetParams.msaparams); + } + + NV_DPTRACE_INFO(NOTIFY_ATTACH_BEGIN_STATUS, bLinkTrainingStatus); + + bFromResumeToNAB = false; + return bLinkTrainingStatus; +} + + +// +// modesetCancelled True, when DD respected NAB failure and cancelled modeset. +// False, when NAB succeeded, or DD didn't honor NAB failure +// +// Here is what NAE supposed to do: +// 1. modesetCancelled == TRUE, NAB failed: +// unzombie all devices and set linkForced to false; We have Status Quo for next modeset +// 2. modesetCancelled == False, NAB failed: +// If NAB failed, linkForces is TRUE. NAE goes finds zombied devices and notifies DD about them. +// 3. modesetCancelled == False, NAB succeeded: +// NAE is no-op. (but we have some special sanity code) +// +void ConnectorImpl::notifyAttachEnd(bool modesetCancelled) +{ + GroupImpl* currentModesetDeviceGroup = NULL; + DP_PRINTF(DP_NOTICE, "DPCONN> Notify Attach End"); + NV_DPTRACE_INFO(NOTIFY_ATTACH_END); + + bFromResumeToNAB = false; + + if (intransitionGroups.isEmpty()) + { + DP_ASSERT( 0 && "INVALID STATE: Modeset Group is NULL"); + return; + } + + currentModesetDeviceGroup = intransitionGroups.pop(); + + if (modesetCancelled) + { + currentModesetDeviceGroup->setHeadAttached(false); + } + + // set dscModeActive to what was requested in NAB and clear dscModeRequest + currentModesetDeviceGroup->dscModeActive = currentModesetDeviceGroup->dscModeRequest; + currentModesetDeviceGroup->dscModeRequest = DSC_MODE_NONE; + + currentModesetDeviceGroup->setHeadAttached(true); + RmDfpCache dfpCache = {0}; + dfpCache.updMask = 0; + if (currentModesetDeviceGroup->isHeadAttached()) + { + for (DeviceImpl * dev = (DeviceImpl *)currentModesetDeviceGroup->enumDevices(0); + dev; dev = (DeviceImpl *)currentModesetDeviceGroup->enumDevices(dev)) + { + dfpCache.bcaps = *dev->BCAPS; + for (unsigned i=0; iBKSV[i]; + + dfpCache.updMask |= (1 << NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_MASK_BCAPS); + dfpCache.updMask |= (1 << NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_MASK_BKSV); + main->rmUpdateDynamicDfpCache(dev->activeGroup->headIndex, &dfpCache, True); + + // Remove this while enabling HDCP for MSC + break; + } + } + + // + // Add rest of the streams (other than primary) in notifyAE, since this can't be done + // unless a SOR is attached to a Head (part of modeset), and trigger ACT immediate + // + if ((currentModesetDeviceGroup->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST) && + (currentModesetDeviceGroup->singleHeadMultiStreamID > DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY)) + { + DP_ASSERT(linkUseMultistream() && "it should be multistream link to configure single head MST"); + hal->payloadTableClearACT(); + hal->payloadAllocate(currentModesetDeviceGroup->streamIndex, + currentModesetDeviceGroup->timeslot.begin, currentModesetDeviceGroup->timeslot.count); + main->configureTriggerSelect(currentModesetDeviceGroup->headIndex, currentModesetDeviceGroup->singleHeadMultiStreamID); + main->triggerACT(); + } + + afterAddStream(currentModesetDeviceGroup); + + // + // Turn on the Authentication/Encryption back if previous is on. + // For DP1.1, let the upstream to turn it back. + // For DP1.2, we should turn the modeset back if it was on. + // The authentication will be called off during the modeset. + // + HDCPState hdcpState = {0}; + main->configureHDCPGetHDCPState(hdcpState); + if ((!hdcpState.HDCP_State_Authenticated) && (isHDCPAuthOn == true) + && (currentModesetDeviceGroup->hdcpEnabled)) + { + if (!this->linkUseMultistream()) + { + currentModesetDeviceGroup->hdcpEnabled = isHDCPAuthOn = false; + } + else if (!bHdcpAuthOnlyOnDemand) + { + currentModesetDeviceGroup->cancelHdcpCallbacks(); + + if (hdcpState.HDCP_State_Authenticated) + { + isHDCPAuthOn = true; + currentModesetDeviceGroup->hdcpSetEncrypted(true); + } + else + { + currentModesetDeviceGroup->hdcpEnabled = isHDCPAuthOn = false; + } + } + } + + // + // RM has the requirement of Head being ARMed to do authentication. + // Postpone the authentication until the NAE to do the authentication for DP1.2 as solution. + // + if (isDP12AuthCap && !isHopLimitExceeded && !isHDCPReAuthPending && + !bHdcpAuthOnlyOnDemand) + { + isHDCPReAuthPending = true; + timer->queueCallback(this, &tagHDCPReauthentication, HDCP_AUTHENTICATION_COOLDOWN_HPD); + } + + if (!bHdcpStrmEncrEnblOnlyOnDemand) + { + hdcpEnableTransitionGroups.insertFront(currentModesetDeviceGroup); + } + hdcpCapsRetries = 0; + timer->queueCallback(this, &tagDelayedHdcpCapRead, 2000); + + fireEvents(); +} + +// Notify library before/after shutdown (update) +void ConnectorImpl::notifyDetachBegin(Group * target) +{ + if (!target) + target = firmwareGroup; + + NV_DPTRACE_INFO(NOTIFY_DETACH_BEGIN); + + GroupImpl * group = (GroupImpl*)target; + + DP_PRINTF(DP_NOTICE, "DPCONN> Notify detach begin"); + DP_ASSERT((group->headInFirmware || group->isHeadAttached()) && "Disconnecting an inactive device"); + + // check to see if a pattern request was on. if yes clear the pattern + PatternInfo pattern_info; + pattern_info.lqsPattern = hal->getPhyTestPattern(); + // send control call to rm for the pattern + if (pattern_info.lqsPattern != LINK_QUAL_DISABLED) + { + pattern_info.lqsPattern = LINK_QUAL_DISABLED; + if (!main->physicalLayerSetTestPattern(&pattern_info)) + DP_ASSERT(0 && "Could not set the PHY_TEST_PATTERN"); + } + + // + // At this point Pixels are dropped we can clear ECF, + // Force Clear ECF is set to TRUE which will delete time slots and send ACT + // + if (linkUseMultistream()) + { + if (!(bHdcpStrmEncrEnblOnlyOnDemand) && hdcpEnableTransitionGroups.contains(group)) + { + hdcpEnableTransitionGroups.remove(group); + } + group->hdcpSetEncrypted(false, NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_TYPE_0, NV_TRUE, NV_FALSE); + group->hdcpEnabled = false; + } + + beforeDeleteStream(group); + + // + // Set the trigger select so as to which frontend corresponding to the stream + // to take the affect + // + if (linkUseMultistream()) + { + main->configureTriggerSelect(group->headIndex, group->singleHeadMultiStreamID); + + // Clear payload of other than primary streams and trigger ACT immediate + if ((group->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST) && + (group->singleHeadMultiStreamID != DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY)) + { + main->triggerACT(); + if (!hal->payloadWaitForACTReceived()) + { + DP_PRINTF(DP_ERROR, "DP-TS> Downstream device did not receive ACT during stream clear"); + DP_ASSERT(0); + } + } + } + + intransitionGroups.insertFront(group); +} + +// +// Here is what NDE does: +// 1. delete unplugged devices (they were zombies, if they're on this list) +// 2. unmark zombies (they were plugged zombies, they might want to get link trained next time) +// 3. mark head as detached (so that we can delete any HPD unplugged devices) +// +void ConnectorImpl::notifyDetachEnd(bool bKeepOdAlive) +{ + GroupImpl* currentModesetDeviceGroup = NULL; + DP_PRINTF(DP_NOTICE, "DPCONN> Notify detach end"); + NV_DPTRACE_INFO(NOTIFY_DETACH_END); + + if (intransitionGroups.isEmpty()) + { + DP_ASSERT( 0 && "INVALID STATE: Modeset Group is NULL"); + return; + } + + currentModesetDeviceGroup = intransitionGroups.pop(); + + afterDeleteStream(currentModesetDeviceGroup); + + if (!linkUseMultistream()) + { + Device * d = 0; + for (d = currentModesetDeviceGroup->enumDevices(0); + currentModesetDeviceGroup->enumDevices(d) != 0; + d = currentModesetDeviceGroup->enumDevices(d)) + { + // only one device in the group + DP_ASSERT(d && (((DeviceImpl*)d)->activeGroup == currentModesetDeviceGroup)); + } + } + + // nullify last modeset info + dpMemZero(¤tModesetDeviceGroup->lastModesetInfo, sizeof(ModesetInfo)); + currentModesetDeviceGroup->setHeadAttached(false); + currentModesetDeviceGroup->headInFirmware = false; + currentModesetDeviceGroup->dscModeActive = DSC_MODE_NONE; + + // Mark head as disconnected + bNoLtDoneAfterHeadDetach = true; + + // + // Update the last modeset HDCP status here. Hdcp got disabled after modeset + // thus hdcpPreviousStatus would be false to SST after device inserted. + // + HDCPState hdcpState = {0}; + main->configureHDCPGetHDCPState(hdcpState); + if (!(isHDCPAuthOn = hdcpState.HDCP_State_Authenticated)) + { + currentModesetDeviceGroup->hdcpEnabled = false; + } + // + // ToDo: Need to confirm the HW and UpStream SW behavior on DP1.2. + // For HW, we need to know whether ECF will be cleared by modeset or not. + // For UpStream Sw, we need to know whether the upstream will come to call off then encryption. + // TODO: Need to remove this as we are already disabling encryption in Notify Detach Begin + // + else if ((this->linkUseMultistream()) && (currentModesetDeviceGroup->hdcpEnabled)) + { + currentModesetDeviceGroup->hdcpSetEncrypted(false, NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_TYPE_0, NV_TRUE, NV_FALSE); + currentModesetDeviceGroup->hdcpEnabled = false; + } + + // Update Vbios scratch register + for (Device * d = currentModesetDeviceGroup->enumDevices(0); d; + d = currentModesetDeviceGroup->enumDevices(d)) + { + currentModesetDeviceGroup->updateVbiosScratchRegister(d); + } + + // Reset value of bIsHeadShutdownNeeded to get rid of false asserts + currentModesetDeviceGroup->bIsHeadShutdownNeeded = false; + + // If this is eDP and the LCD power is not ON, we don't need to Disable DSC here + bool bPanelPwrSts = true; + if ((!main->isEDP()) || (main->getEdpPowerData(&bPanelPwrSts, NULL) && bPanelPwrSts)) + { + // Disable DSC decompression on the panel if panel supports DSC and reset bFECEnable Flag + for (Device * dev = currentModesetDeviceGroup->enumDevices(0); dev; dev = currentModesetDeviceGroup->enumDevices(dev)) + { + if(!(setDeviceDscState(dev, false/*bEnableDsc*/))) + { + DP_ASSERT(!"DP-CONN> Failed to configure DSC on Sink!"); + } + } + } + + // Transfer to inactive group and cancel pending callbacks for that group. + currentModesetDeviceGroup->cancelHdcpCallbacks(); + activeGroups.remove(currentModesetDeviceGroup); + inactiveGroups.insertBack(currentModesetDeviceGroup); + + if (activeGroups.isEmpty()) + { + cancelHdcpCallbacks(); + + // We disconnected a panel, try to clear the transition + if (linkAwaitingTransition) + { + assessLink(); + } + // + // Power down the links as we have switched away from the monitor. + // Only power down if we are in single stream + // + else + { + // + // - if EDP; disable ASSR after switching off the stream from head + // to prevent corruption (bug 926360) + // - disable ASSR before power down link (bug 1641174) + // + if (main->isEDP()) + { + bool bPanelPowerOn; + // if eDP's power has been shutdown here, don't disable ASSR, else it will be turned on by LT. + if (main->getEdpPowerData(&bPanelPowerOn, NULL) && bPanelPowerOn) + { + main->disableAlternateScramblerReset(); + } + } + // + // Power down the links as we have switched away from the monitor. + // For shared SOR case, we need this to keep SW stats in DP instances in sync. + // Only power down the link when it's not a compliance test device. + // + // Some eDP panels are known having problems when power down. + // See bug 1425706, 1376753, 1347872, 1355592 + // + // Hotplug may trigger detach before processNewDevice if previous state has + // lost device not yet detached. Avoid to powerdown for the case for following + // device discovery hdcp probe. + // + if (!bIsDiscoveryDetectActive) + powerdownLink(!main->skipPowerdownEdpPanelWhenHeadDetach() && !bKeepOdAlive); + } + if (this->policyModesetOrderMitigation && this->modesetOrderMitigation) + this->modesetOrderMitigation = false; + } + fireEvents(); +} + +bool ConnectorImpl::trainPCONFrlLink(PCONLinkControl *pconControl) +{ + NvU32 loopCount = NV_PCON_SOURCE_CONTROL_MODE_TIMEOUT_THRESHOLD; + NvU32 frlRateMask = 0; + bool bFrlReady = false; + bool result = false; + + // Initial return values. + pconControl->result.trainedFrlBwMask = 0; + pconControl->result.maxFrlBwTrained = PCON_HDMI_LINK_BW_FRL_INVALID; + + // Step 1: Setup PCON for later operation + + // Step 1.1: Set D0 power + hal->setPowerState(PowerStateD0); + + hal->resetProtocolConverter(); + + // Step 1.2: Enable Source Control Mode and FRL mode, enable FRL-Ready IRQ + hal->setSourceControlMode(true, true); + + do + { + // + // Step 1.3: Poll for HDMI-Link-Status Change (0x2005 Bit 3) + // Get FRL Ready Bit (0x303B Bit 1) + // + hal->checkPCONFrlReady(&bFrlReady); + if (bFrlReady == true) + { + break; + } + Timeout timeout(this->timer, NV_PCON_SOURCE_CONTROL_MODE_TIMEOUT_INTERVAL_MS); + while(timeout.valid()); + continue; + } while (--loopCount); + + if (bFrlReady == false) + { + pconControl->result.status = NV_DP_PCON_CONTROL_STATUS_ERROR_TIMEOUT; + return false; + } + + // Step 2: Assess FRL Link capability. + + // + // Step 2.1: Configure FRL Link (FRL BW, BW mask / Concurrent) + // Start with mask for all bandwidth. Please refer to definition of DPCD 0x305B. + // + result = hal->setupPCONFrlLinkAssessment(pconControl->frlHdmiBwMask, + pconControl->flags.bExtendedLTMode, + pconControl->flags.bConcurrentMode); + if (result == false) + { + pconControl->result.status = NV_DP_PCON_CONTROL_STATUS_ERROR_GENERIC; + return false; + } + + // Step 2.2: Poll for HDMI-Link-Status Change (0x2005 Bit 3) + loopCount = NV_PCON_FRL_LT_TIMEOUT_THRESHOLD; + do + { + result = hal->checkPCONFrlLinkStatus(&frlRateMask); + if (result == true) + { + break; + } + Timeout timeout(this->timer, NV_PCON_FRL_LT_TIMEOUT_INTERVAL_MS); + while(timeout.valid()); + continue; + } while (--loopCount); + + if (result == true) + { + // + // frlRateMask is result from checkPCONFrlLinkStatus (0x3036) Bit 1~6. + // + pconControl->result.status = NV_DP_PCON_CONTROL_STATUS_SUCCESS; + pconControl->result.trainedFrlBwMask = frlRateMask; + pconControl->result.maxFrlBwTrained = getMaxFrlBwFromMask(frlRateMask); + } + else + { + pconControl->result.status = NV_DP_PCON_CONTROL_STATUS_ERROR_FRL_LT_FAILURE; + } + return result; +} + +bool ConnectorImpl::assessPCONLinkCapability(PCONLinkControl *pConControl) +{ + if (pConControl == NULL || !this->previousPlugged) + return false; + + bool bIsFlushModeEnabled = enableFlush(); + + if (!bIsFlushModeEnabled) + { + return false; + } + + if (pConControl->flags.bSourceControlMode) + { + if (trainPCONFrlLink(pConControl) == false) + { + // restore Autonomous mode and treat this as an active DP dongle. + hal->resetProtocolConverter(); + // Exit flush mode + disableFlush(); + if (!pConControl->flags.bSkipFallback) + { + bSkipAssessLinkForPCon = false; + assessLink(); + } + return false; + } + activePConLinkControl.flags = pConControl->flags; + activePConLinkControl.frlHdmiBwMask = pConControl->frlHdmiBwMask; + activePConLinkControl.result = pConControl->result; + } + else + { + // restore Autonomous mode and treat this as an active DP dongle. + hal->resetProtocolConverter(); + } + + // Step 3: Assess DP Link capability. + LinkConfiguration lConfig = getMaxLinkConfig(); + lConfig.peakRate = DATA_RATE_8B_10B_TO_LINK_RATE(lConfig.peakRate); + lConfig.peakRatePossible = DATA_RATE_8B_10B_TO_LINK_RATE(lConfig.peakRatePossible); + highestAssessedLC = getMaxLinkConfig(); + highestAssessedLC.peakRate = DATA_RATE_8B_10B_TO_LINK_RATE(highestAssessedLC.peakRate); + highestAssessedLC.peakRatePossible = DATA_RATE_8B_10B_TO_LINK_RATE(highestAssessedLC.peakRatePossible); + + hal->updateDPCDOffline(); + if (hal->isDpcdOffline()) + { + disableFlush(); + return false; + } + if (!train(lConfig, false /* do not force LT */)) + { + // + // Note that now train() handles fallback, activeLinkConfig + // has the max link config that was assessed. + // + lConfig = activeLinkConfig; + } + + highestAssessedLC = lConfig; + linkGuessed = false; + disableFlush(); + + this->bKeepLinkAliveForPCON = pConControl->flags.bKeepPCONLinkAlive; + return true; +} + +bool ConnectorImpl::getOuiSink(unsigned &ouiId, unsigned char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision) +{ + if (!previousPlugged || !hal->getOuiSupported()) + return false; + + return hal->getOuiSink(ouiId, modelName, modelNameBufferSize, chipRevision); +} + +void ConnectorImpl::setIgnoreSourceOuiHandshake(bool bIgnoreOuiHandShake) +{ + bIgnoreSrcOuiHandshake = bIgnoreOuiHandShake; +} + +bool ConnectorImpl::getIgnoreSourceOuiHandshake() +{ + return bIgnoreSrcOuiHandshake; +} + +bool ConnectorImpl::performIeeeOuiHandshake() +{ + const char *ieeeOuiDevId = "NVIDIA"; + + if (!hal->getOuiSupported() || getIgnoreSourceOuiHandshake()) + return false; + + if (hal->setOuiSource(DPCD_OUI_NVIDIA, ieeeOuiDevId, 6 /* string length of ieeeOuiDevId */, 0) == AuxRetry::ack) + { + NvU8 chipRevision = 0; + + // parse client OUI. + if (hal->getOuiSink(ouiId, &modelName[0], sizeof(modelName), chipRevision)) + { + DP_PRINTF(DP_NOTICE, "DP> SINK-OUI id(0x%08x) %s: rev:%d.%d", ouiId, + (NvU8*)modelName, + (unsigned)DRF_VAL(_DPCD, _SINK_HARDWARE_REV, _MAJOR, chipRevision), + (unsigned)DRF_VAL(_DPCD, _SINK_HARDWARE_REV, _MINOR, chipRevision)); + return true; + } + } + return false; +} + +bool ConnectorImpl::willLinkSupportModeSST +( + const LinkConfiguration & linkConfig, + const ModesetInfo & modesetInfo, + const DscParams *pDscParams +) +{ + DP_ASSERT(!linkUseMultistream() && "IMP for SST only"); + + // + // mode is not known yet, we have to report is possible + // Otherwise we're going to mark all devices as zombies on first HPD(c), + // since modeset info is not available. + // + if (modesetInfo.pixelClockHz == 0) + return true; + + if (linkConfig.lanes == 0 || linkConfig.peakRate == 0) + return false; + + Watermark water; + + if (this->isFECSupported()) + { + if (!isModePossibleSSTWithFEC(linkConfig, modesetInfo, &water, main->hasIncreasedWatermarkLimits())) + { + // Verify audio + return false; + } + } + else + { + if (!isModePossibleSST(linkConfig, modesetInfo, &water, main->hasIncreasedWatermarkLimits())) + { + // Verify audio + return false; + } + } + return true; +} + +// gets max values for DPCD HAL and forces link trainig with that config +void ConnectorImpl::forceLinkTraining() +{ + LinkConfiguration forcedMaxConfig(getMaxLinkConfig()); + forcedMaxConfig.peakRate = DATA_RATE_8B_10B_TO_LINK_RATE(forcedMaxConfig.peakRate); + forcedMaxConfig.peakRatePossible = DATA_RATE_8B_10B_TO_LINK_RATE(forcedMaxConfig.peakRatePossible); + train(forcedMaxConfig, true); +} + +void ConnectorImpl::powerdownLink(bool bPowerdownPanel) +{ + bool bPanelPwrSts = true; + + LinkConfiguration powerOff = getMaxLinkConfig(); + powerOff.lanes = 0; + powerOff.peakRate = dp2LinkRate_1_62Gbps; // Set to lowest peakRate + + if (linkUseMultistream() && bPowerDownPhyBeforeD3) + { + // Inform Sink about Main Link Power Down. + PowerDownPhyMessage powerDownPhyMsg; + NakData nack; + + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + if (i->isPlugged() && i->isVideoSink()) + { + Address devAddress = ((DeviceImpl*)i)->address; + powerDownPhyMsg.set(devAddress.parent(), devAddress.tail(), NV_TRUE); + this->messageManager->send(&powerDownPhyMsg, nack); + } + } + } + + // + // 1> If it is eDP and the power is not on, we don't need to put it into D3 here + // 2> If FEC is enabled then we have to put panel in D3 after powering down mainlink + // as FEC disable has to be detected by panel which will happen as part of link + // power down, we need to keep panel in D0 for this. + // + if (!this->bFECEnable && + ((!main->isEDP()) || (main->getEdpPowerData(&bPanelPwrSts, NULL) && bPanelPwrSts))) + { + hal->setPowerState(PowerStateD3); + } + + train(powerOff, !bPowerdownPanel); // Train to 0 laneCount, RBR linkRate (powerDown sequence) + + // + // If FEC is enabled, put panel to D3 here for non-eDP. + // For eDP with FEC support, FEC state would be cleared as part of panel + // power down + // + if (this->bFECEnable && (!main->isEDP())) + { + hal->setPowerState(PowerStateD3); + } + + // Set FEC state as false in link power down + this->bFECEnable = false; + highestAssessedLC.enableFEC(false); +} + +GroupImpl * ConnectorImpl::getActiveGroupForSST() +{ + if (this->linkUseMultistream()) + return 0; + GroupImpl * groupAttached = 0; + for (ListElement * e = activeGroups.begin(); e != activeGroups.end(); e = e->next) + { + // there should only be one group for the connector. + if (groupAttached) + { + DP_ASSERT(0 && "Multiple attached heads"); + return 0; + } + groupAttached = (GroupImpl * )e; + } + return groupAttached; +} + +bool ConnectorImpl::trainSingleHeadMultipleSSTLinkNotAlive(GroupImpl *pGroupAttached) +{ + GroupImpl *pPriGrpAttached = NULL; + GroupImpl *pSecGrpAttached = NULL; + ConnectorImpl *pPriConnImpl = NULL; + ConnectorImpl *pSecConnImpl = NULL; + + if ((pGroupAttached == NULL) || + (pCoupledConnector == NULL) || + (pGroupAttached->singleHeadMultiStreamMode != DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST)) + { + return false; + } + if (pGroupAttached->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY) + { + pSecGrpAttached = pCoupledConnector->getActiveGroupForSST(); + pPriGrpAttached = pGroupAttached; + pSecConnImpl = pCoupledConnector; + pPriConnImpl = this; + } + else if (pGroupAttached->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY) + { + pPriGrpAttached = pCoupledConnector->getActiveGroupForSST(); + pSecGrpAttached = pGroupAttached; + pPriConnImpl = pCoupledConnector; + pSecConnImpl = this; + } + else + { + DP_ASSERT(0 && "Invalid 2-SST configuration "); + return false; + } + + if (!pPriGrpAttached || !pSecGrpAttached || !pPriConnImpl || !pSecConnImpl) + { + DP_ASSERT(0 && "Invalid 2-SST configuration "); + return false; + } + + if (!pPriConnImpl->trainLinkOptimizedSingleHeadMultipleSST(pPriGrpAttached)) + { + DP_ASSERT(0 && "not able to configure 2-SST mode on primary link"); + return false; + } + + if (!pSecConnImpl->trainLinkOptimizedSingleHeadMultipleSST(pSecGrpAttached)) + { + DP_ASSERT(0 && "not able to configure 2-SST mode for secondary link"); + return false; + } + + return true; +} + +TriState ConnectorImpl::requestDpTunnelBw(NvU8 requestBw) +{ + TriState status = Indeterminate; + + if (!hal->writeDpTunnelRequestedBw(requestBw)) + { + return status; + } + + Timeout timeout(this->timer, DP_TUNNEL_REQUEST_BW_MAX_TIME_MS); + do + { + timer->sleep(DP_TUNNEL_REQUEST_BW_POLLING_INTERVAL_MS); + status = hal->getDpTunnelBwRequestStatus(); + if (status != Indeterminate) + { + break; + } + } while(timeout.valid()); + + return status; +} + +/*! + * @brief Interface to allow client to enable BW allocation support + */ +void ConnectorImpl::enableDpTunnelingBwAllocationSupport() +{ + // If regkey is set to disable, return early + if (bForceDisableTunnelBwAllocation) + { + return; + } + + hal->enableDpTunnelingBwAllocationSupport(); +} + +/*! + * @brief Allocate the requested Tunnel BW + * + * @return maximum tunnel bw required for this connector + * + */ +NvU64 ConnectorImpl::getMaxTunnelBw() +{ + return highestAssessedLC.getTotalDataRate() * 8; +} + +/*! + * @brief Allocate the requested Tunnel BW + * + * @param[in] bandwidth Requested BW in bps + * @return boolean to indicate success/failure + * + */ +bool ConnectorImpl::allocateDpTunnelBw(NvU64 bandwidth) +{ + NvU8 estimatedBw = 0; + NvU8 granularityMultiplier = 0; + NvU8 requestBw = 0; + TriState requestStatus = Indeterminate; + + if (!hal->isDpTunnelBwAllocationEnabled()) + { + DP_PRINTF(DP_NOTICE, "Bw allocation not enabled"); + return false; + } + + // Threshold the request bw to the max link configuration + if (bandwidth > getMaxTunnelBw()) + { + bandwidth = getMaxTunnelBw(); + } + + if (!hal->getDpTunnelEstimatedBw(estimatedBw)) + { + return false; + } + + if (!hal->getDpTunnelGranularityMultiplier(granularityMultiplier)) + { + return false; + } + + DP_PRINTF(DP_INFO, "Estimated BW: %d Mbps, Requested BW: %d Mbps", + ((NvU64) estimatedBw * 1000) / (NvU64) granularityMultiplier, + bandwidth / (1000 * 1000)); + + // + // Granularity is in Gbps. Eg: 0.25 Gpbs, 0.5 Gpbs, 1 Gbps + // bandwidth is in bps + // granularityMultiplier is 1/Granularity + // bandwidth = DPCD Value * Granularity + // = DPCD Value / granularityMultiplier + // DPCD Value = bandwidth * granularityMultiplier + // + requestBw = (NvU8) divide_ceil(bandwidth * granularityMultiplier, 1000 * 1000 * 1000); + + if (requestBw > estimatedBw) + { + requestBw = estimatedBw; + } + + requestStatus = requestDpTunnelBw(requestBw); + // This shouldn't be Indeterminate. The request can succeed or fail. Indeterminate means something else went wrong + if (requestStatus == Indeterminate) + { + DP_PRINTF(DP_ERROR, "Tunneling chip didn't reply for the BW request\n"); + return false; + } + + if (requestStatus == False) + { + // As per DP spec, if the allocation fails, the Estimated BW now contains the actual BW. Request estimatedBW now + if (!hal->getDpTunnelEstimatedBw(estimatedBw)) + { + return false; + } + + if (!hal->getDpTunnelGranularityMultiplier(granularityMultiplier)) + { + return false; + } + + DP_PRINTF(DP_INFO, "Failed to get requested BW, requesting updated Estimated BW: %d\n", + ((NvU64) estimatedBw * 1000) / (NvU64) granularityMultiplier); + + requestBw = estimatedBw; + requestStatus = requestDpTunnelBw(requestBw); + // + // This shouldn't be Indeterminate. The request can succeed or fail. + // Intrdeterminate means something else went wrong + // + if (requestStatus == Indeterminate) + { + return false; + } + } + + if (requestStatus == True) + { + // Convert this back to bps and record the allocated BW + this->allocatedDpTunnelBw = ((NvU64) requestBw * 1000 * 1000 * 1000) / (NvU64) granularityMultiplier; + this->allocatedDpTunnelBwShadow = 0; + DP_PRINTF(DP_INFO, "Allocated BW: %d Mbps", this->allocatedDpTunnelBw / (1000 * 1000)); + } + + return requestStatus; +} + +bool ConnectorImpl::allocateMaxDpTunnelBw() +{ + if (!hal->isDpTunnelBwAllocationEnabled()) + { + return true; + } + + NvU64 bandwidth = getMaxTunnelBw(); + if (!allocateDpTunnelBw(bandwidth)) + { + DP_PRINTF(DP_ERROR, "Failed to allocate DP Tunnel BW. Requested BW: %d Mbps", + bandwidth / (1000 * 1000)); + return false; + } + + return true; +} + +void ConnectorImpl::assessLink(LinkTrainingType trainType) +{ + this->bSkipLt = false; // Assesslink should never skip LT, so let's reset it in case it was set. + bool bLinkStateToggle = false; + NvU32 retryCount = 0; + + LinkConfiguration _maxLinkConfig = getMaxLinkConfig(); + _maxLinkConfig.peakRate = DATA_RATE_8B_10B_TO_LINK_RATE(_maxLinkConfig.peakRate); + _maxLinkConfig.peakRatePossible = DATA_RATE_8B_10B_TO_LINK_RATE(_maxLinkConfig.peakRatePossible); + + // Cap system max link configuration to preferredLinkConfig + if (preferredLinkConfig.isValid() && this->forcePreferredLinkConfig) + { + _maxLinkConfig = preferredLinkConfig; + } + + if (bSkipAssessLinkForPCon) + { + // Skip assessLink() for PCON. client should call assessPCONLinkCapability later. + return; + } + + if (trainType == NO_LINK_TRAINING) + { + train(preferredLinkConfig, false, trainType); + return; + } + + if (isLinkQuiesced || + (firmwareGroup && ((GroupImpl *)firmwareGroup)->headInFirmware)) + { + highestAssessedLC = _maxLinkConfig; + if (bIsUefiSystem && !hal->getSupportsMultistream()) + { + // + // Since this is a UEFI based system which can provide max link config + // supported on this panel. So try to get the max supported link config + // and update the highestAssessedLC. Once done set linkGuessed as false. + // + unsigned laneCount = 0; + NvU64 linkRate = 0; + bool bFECEnabled = false; + NvU8 linkRateFromUefi, laneCountFromUefi; + + // Query the max link config if provided by UEFI. + if ((!linkGuessed) && (main->getMaxLinkConfigFromUefi(linkRateFromUefi, laneCountFromUefi))) + { + laneCount = laneCountFromUefi; + + if (linkRateFromUefi == 0x6) + { + linkRate = dp2LinkRate_1_62Gbps; + } + else if (linkRateFromUefi == 0xA) + { + linkRate = dp2LinkRate_2_70Gbps; + } + else if (linkRateFromUefi == 0x14) + { + linkRate = dp2LinkRate_5_40Gbps; + } + else if (linkRateFromUefi == 0x1E) + { + linkRate = dp2LinkRate_8_10Gbps; + } + else + { + DP_ASSERT(0 && "DP> Invalid link rate returned from UEFI!"); + linkGuessed = true; + } + + if ((highestAssessedLC.peakRate == dp2LinkRate_8_10Gbps) && + (linkRate != dp2LinkRate_8_10Gbps)) + { + // + // UEFI does not support HBR3 yet (The support will be added in Volta). + // Mark the link as guessed when max supported link config is HBR3 and + // the currently assessed link config, by UEFI is not the highest, to + // force the link assessment by driver. + // + linkGuessed = true; + } + else + { + // + // SW policy change: If the BIOS max link config isn't same as max of panel, mark DPlib for re-link + // assessment by marking linkGuessed as true. + // Re-link training is prefereable over glitchless and booting at low resolutions + // + if (laneCount != highestAssessedLC.lanes || linkRate != highestAssessedLC.peakRate) + { + linkGuessed = true; + } + else + { + linkGuessed = false; + // Update software state with latest link status info + hal->setDirtyLinkStatus(true); + hal->refreshLinkStatus(); + } + } + } + else if (!linkGuessed) + { + // We failed to query max link config from UEFI. Mark link as guessed. + DP_PRINTF(DP_WARNING, "DP CONN> Failed to query max link config from UEFI."); + linkGuessed = true; + } + + if (!linkGuessed) + { + // Update SW state with UEFI provided max link config + highestAssessedLC = LinkConfiguration (&this->linkPolicy, + laneCount, linkRate, + this->hal->getEnhancedFraming(), + linkUseMultistream(), + false, /* disablePostLTRequest */ + false, /* bEnableFEC */ + false, /* bDisableLTTPR */ + this->getDownspreadDisabled()); + + // Get the currently applied linkconfig and update SW state + getCurrentLinkConfigWithFEC(laneCount, linkRate, bFECEnabled); + linkRate = DATA_RATE_8B_10B_TO_LINK_RATE(linkRate); + + activeLinkConfig = LinkConfiguration (&this->linkPolicy, + laneCount, linkRate, + this->hal->getEnhancedFraming(), + linkUseMultistream(), + false, /* disablePostLTRequest */ + false, /* bEnableFEC */ + false, /* bDisableLTTPR */ + this->getDownspreadDisabled()); + } + } + else + { + linkGuessed = true; + } + + allocateMaxDpTunnelBw(); + return; + } + + if (linkAwaitingTransition) + { + if (activeGroups.isEmpty()) + { + linkState = hal->getSupportsMultistream() ? + DP_TRANSPORT_MODE_MULTI_STREAM : DP_TRANSPORT_MODE_SINGLE_STREAM; + linkAwaitingTransition = false; + bLinkStateToggle = true; + } + else + { + // + // If modesetOrderMitigation isn't on, we need to reassess + // immediately. This is because we will report the connects at the + // same time as the disconnects. IMP Query can be done immediately + // on connects. On the other hand if modeset order mitigation is + // off - all attached devices are going to be reported as + // disconnected and might as well use the old configuration. + // + if (this->policyModesetOrderMitigation && this->modesetOrderMitigation) + return; + } + } + else + { + if (hal->isDpcdOffline()) + linkState = DP_TRANSPORT_MODE_INIT; + } + + // linkState might be different from beginning, update _maxLinkConfig to keep it in sync. + _maxLinkConfig.multistream = this->linkUseMultistream(); + + // + // Bug 1545352: This is done to avoid shutting down a display for freeing up a SOR for LT, + // when no SOR is assigned properly to the connector. It can happen when more + // than max supported number of display(s) is connected. + // It came as a requirement from some clients to avoid glitches when shutting + // down a display to make SOR availability for those monitors. + // + if (main->getSorIndex() == DP_INVALID_SOR_INDEX) + { + highestAssessedLC = _maxLinkConfig; + linkGuessed = true; + return; + } + + LinkConfiguration lConfig = _maxLinkConfig; + + LinkConfiguration preFlushModeActiveLinkConfig = activeLinkConfig; + + if (main->isInternalPanelDynamicMuxCapable()) + { + // Skip Link assessment for Dynamic MUX capable Internal Panel + if ((activeLinkConfig.lanes == lConfig.lanes) && + (activeLinkConfig.peakRate == lConfig.peakRate) && + (!isLinkInD3()) && (!isLinkLost())) + { + linkGuessed = false; + return; + } + } + + // Find the active group(s) + GroupImpl * groupAttached = 0; + for (ListElement * e = activeGroups.begin(); e != activeGroups.end(); e = e->next) + { + DP_ASSERT(bIsUefiSystem || linkUseMultistream() || (!groupAttached && "Multiple attached heads")); + groupAttached = (GroupImpl * )e; + } + + // Disconnect heads + bool bIsFlushModeEnabled = enableFlush(); + + if (bIsFlushModeEnabled) + { + do + { + // + // if dpcd is offline; avoid assessing. Just consider max. + // keep lowering lane/rate config till train succeeds + // + hal->updateDPCDOffline(); + + // At first trial / when retraining, always start with _maxLinkConfig + lConfig = _maxLinkConfig; + if (hal->isDpcdOffline()) + { + break; + } + if (!train(lConfig, false /* do not force LT */)) + { + // + // Note that now train() handles fallback, activeLinkConfig + // has the max link config that was assessed. + // + lConfig = activeLinkConfig; + } + // Check if training completed at _maxLinkConfig, no need to retry + if (lConfig == _maxLinkConfig || lConfig.lanes == 0) + { + break; + } + timer->sleep(40); + } while (retryCount++ < WAR_MAX_REASSESS_ATTEMPT); + + if (!activeLinkConfig.isValid()) + { + if (groupAttached && groupAttached->lastModesetInfo.pixelClockHz != 0) + { + // If there is no active link, force LT to max before disable flush + lConfig = _maxLinkConfig; + train(lConfig, true); + } + } + disableFlush(); + } + + if (lConfig != _maxLinkConfig) + { + if (lConfig.lanes == 0) + { + DP_PRINTF(DP_NOTICE, "DP> assessLink(): Device unplugged or offline."); + } + else + { + DP_PRINTF(DP_WARNING, + "DP> assessLink(): Failed to reach max link configuration (%d x %d).", + lConfig.lanes, lConfig.peakRate); + } + } + + if (!hal->isDpcdOffline() && !this->linkUseMultistream() && this->policyAssessLinkSafely) + { + GroupImpl * groupAttached = this->getActiveGroupForSST(); + if (groupAttached && groupAttached->isHeadAttached() && + !willLinkSupportModeSST(lConfig, groupAttached->lastModesetInfo)) + { + DP_ASSERT(0 && "DP> Maximum assessed link configuration is not capable driving existing raster!"); + + bIsFlushModeEnabled = enableFlush(); + if (bIsFlushModeEnabled) + { + train(preFlushModeActiveLinkConfig, true); + disableFlush(); + } + linkGuessed = true; + goto done; + } + } + + highestAssessedLC = lConfig; + + // It is critical that this restore the original (desired) configuration + trainLinkOptimized(lConfig); + + linkGuessed = false; + +done: + + NV_DPTRACE_INFO(LINK_ASSESSMENT, highestAssessedLC.peakRate, highestAssessedLC.lanes); + + if (bLinkStateToggle) + { + DP_PRINTF(DP_NOTICE, "DP> Link state toggled, reading DSC caps now"); + // Read panel DSC support only if GPU supports DSC + bool bGpuDscSupported; + main->getDscCaps(&bGpuDscSupported); + if (bGpuDscSupported) + { + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + DeviceImpl * dev = (DeviceImpl *)i; + if(dev->getDSCSupport()) + { + // Read and parse DSC caps only if panel and GPU supports DSC + dev->readAndParseDSCCaps(); + } + if (!(dev->processedEdid.WARFlags.bIgnoreDscCap)) + { + dev->setDscDecompressionDevice(this->bDscCapBasedOnParent); + } + } + } + } + + + // + // Now that we know the max link rate and lane count possible, try to acquire the full BW. + // Ideally clients are expected to call into DpLib to update the BW requirements + // This however is a failsafe to ensure that at least some BW is allocated on the DP Tunnel IN + // + allocateMaxDpTunnelBw(); +} + +bool ConnectorImpl::handleCPIRQ() +{ + NvU8 bStatus; + HDCPState hdcpState = {0}; + + if (!isLinkActive()) + { + DP_PRINTF(DP_WARNING, "DP> CP_IRQ: Ignored with link down"); + return true; + } + + main->configureHDCPGetHDCPState(hdcpState); + if (hal->getRxStatus(hdcpState, &bStatus)) + { + NvBool bReAuthReq = NV_FALSE; + NvBool bRxIDMsgPending = NV_FALSE; + NvBool bHdcp1xReadyPending = NV_FALSE; + DP_PRINTF(DP_NOTICE, "DP> CP_IRQ HDCP ver:%s RxStatus:0x%2x HDCP Authenticated:%s Encryption:%s", + hdcpState.HDCP_State_22_Capable ? "2.2" : "1.x", + bStatus, + hdcpState.HDCP_State_Authenticated ? "YES" : "NO", + hdcpState.HDCP_State_Encryption ? "ON" : "OFF"); + + // Check device if HDCP2.2 capable instead actual encryption status, + if (hdcpState.HDCP_State_22_Capable) + { + if (FLD_TEST_DRF(_DPCD, _HDCP22_RX_STATUS, _REAUTH_REQUEST, _YES, bStatus) || + FLD_TEST_DRF(_DPCD, _HDCP22_RX_STATUS, _LINK_INTEGRITY_FAILURE, _YES, bStatus)) + { + if (this->linkUseMultistream()) + { + // + // Bug 2860192: Some MST hub throw integrity failure before source trigger + // authentication. This may be stale data since Branch is + // doing protocol translation(DP to HDMI), and cannot treat + // as sink's fault. + // For MST, we would not lose anything here by ignoring either + // CP_Irq event since Auth never started after HPD high or + // LinkTraining start. + // + if (isHDCPAuthTriggered) + { + bReAuthReq = NV_TRUE; + } + else + { + DP_PRINTF(DP_NOTICE, "DP>Ignore integrity failure or ReAuth in transition or before AKE_INIT."); + } + } + else + { + bReAuthReq = NV_TRUE; + } + } + + if (FLD_TEST_DRF(_DPCD, _HDCP22_RX_STATUS, _READY, _YES, bStatus)) + { + bRxIDMsgPending = NV_TRUE; + } + } + else + { + if (FLD_TEST_DRF(_DPCD, _HDCP_BSTATUS, _REAUTHENTICATION_REQUESET, _TRUE, bStatus) || + FLD_TEST_DRF(_DPCD, _HDCP_BSTATUS, _LINK_INTEGRITY_FAILURE, _TRUE, bStatus)) + { + bReAuthReq = NV_TRUE; + } + if (FLD_TEST_DRF(_DPCD, _HDCP_BSTATUS, _READY, _TRUE, bStatus)) + { + bHdcp1xReadyPending = NV_TRUE; + } + } + + if (bReAuthReq || bRxIDMsgPending) + { + DP_PRINTF(DP_NOTICE, "DP> CP_IRQ: REAUTHENTICATION/RXIDPENDING REQUEST"); + + if (bReAuthReq) + { + authRetries = 0; + } + + if (!this->linkUseMultistream()) + { + // Get primary connector when multi-stream SST deployed. + GroupImpl *pGroupAttached = getActiveGroupForSST(); + ConnectorImpl *sstPrim = this; + + if (pGroupAttached && + (pGroupAttached->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST) && + (pGroupAttached->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY)) + { + DP_ASSERT(this->pCoupledConnector); + sstPrim = this->pCoupledConnector; + } + + sstPrim->main->configureHDCPRenegotiate(HDCP_DUMMY_CN, + HDCP_DUMMY_CKSV, + !!bReAuthReq, + !!bRxIDMsgPending); + sstPrim->main->configureHDCPGetHDCPState(hdcpState); + isHDCPAuthOn = hdcpState.HDCP_State_Authenticated; + } + else + { + // + // Clear the ECF and issue another authentication and set ECF accordingly. + // The flash monitor is expected here. + // + if (bReAuthReq) + { + main->configureAndTriggerECF(0x0); + } + + main->configureHDCPRenegotiate(HDCP_DUMMY_CN, HDCP_DUMMY_CKSV, + !!bReAuthReq, !!bRxIDMsgPending); + // If reAuth, schedule callback to check state later. + if (bReAuthReq) + { + isHDCPAuthOn = false; + timer->queueCallback(this, &tagHDCPReauthentication, HDCP_AUTHENTICATION_COOLDOWN); + } + } + } + + if (bHdcp1xReadyPending) + { + DP_PRINTF(DP_NOTICE, "DP> CP_IRQ: HDCP1X READY notification."); + + // + // Bug 200305105: Since RM HDCP1.x repeater authentication has polling + // loop to check RxStatus READY event, here CPIRQ handling to read RxStatus + // cause RM next polling read won't get the one-shot READY event anymore and + // repeater authentication fail. The fix is to forward the READY event that + // RM detect to continue authentication stage. + // + main->forwardPendingKsvListReady(bHdcp1xReadyPending); + } + return true; + } + else + { + DP_PRINTF(DP_ERROR, "DP> CP_IRQ: RxStatus Read failed."); + return false; + } +} + +void ConnectorImpl::handleSSC() +{ + // + // Bit 2 : STREAM_STATUS_CHANGED + // When set to a indicates the source must re-check the Stream + // Status with the QUERY_STREAM_ENCRYPTION_STATUS + // message. + // + // i. Should trigger QueryStreamStatus on all HDCP enabled streams. + // 1. L will change when the KSV list changes (aka new device) + // 2. L will change when the encryption state changes + // a. The library should attempt to recover from this bad state as soon as possible. + // If the player catches it on its 1/2Hz callback, it will disrupt CP over the entire topology + // 3. The output of QueryStreamStatus must be passed down to RM for validation. + // The status is effectively and indirectly signed by M0, the secret value + // for the immediate link between GPU and first branch. + // 4. The stream status validation function in RM will update the encryption state that + // our hardware signs and returns to the player. + // Thus the DisplayDriver should pass any upstream status calls directly to RM. + // + // ii. Should watch out that the ready bit is cleared after Binfo read. + // + DP_PRINTF(DP_NOTICE, "DP> SSC of DP_IRQ"); + + // + // Enable SSC process by default except when regkey 'DISABLE_SSC' set to 1 in DD's path. + // + if (!bDisableSSC) + { + this->messageManager->clearNotYetSentQSEDownRequest(); + + timer->cancelCallback(this, &tagSendQseMessage); + + if (!isLinkActive()) + { + DP_PRINTF(DP_WARNING, "DP> SSC of DP_IRQ: Ignored with link down"); + return; + } + + BInfo bInfo; + if (hal->getBinfo(bInfo)) + { + if (bInfo.maxCascadeExceeded || bInfo.maxDevsExceeded) + { + // Abort the Authentication + DP_PRINTF(DP_ERROR, "DP> StreamStatusChanged: Topology limited. Abort Authentication."); + + isHDCPAuthOn = false; + isHopLimitExceeded = true; + + for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + if (group->hdcpEnabled) + { + group->hdcpSetEncrypted(false); + } + } + + main->configureHDCPAbortAuthentication(KSVTOP); + main->configureHDCPDisableAuthentication(); + return; + } + HDCPValidateData hdcpValidateData = {0}; + NvU64 aN; + + main->configureHDCPValidateLink(hdcpValidateData); + aN = hdcpValidateData.aN; + + this->qseNonceGenerator->clientIdBuilder(aN); + + if (this->messageManager->isAnyAwaitingQSEReplyDownRequest()) + { + // Mark waiting reply's QSE request as invalid. + this->bValidQSERequest = false; + + // Queue callback to check if pending QSE exist and send QSE message. + timer->queueCallback(this, &tagSendQseMessage, HDCP_SEND_QSE_MESSAGE_COOLDOWN); + } + else + { + this->bValidQSERequest = true; + + for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *) i; + + if (group->hdcpEnabled) + { + group->streamEncryptionStatusDetection->sendQSEMessage(group, qseReason_Ssc); + timer->queueCallback(group, &(group->tagStreamValidation), HDCP_STREAM_VALIDATION_REQUEST_COOLDOWN); + } + } + } + } + else + DP_ASSERT(0 && "Unable to get Binfo"); + } + else + { + DP_PRINTF(DP_WARNING, "DP> StreamStatusChanged: SSC Disabled now."); + } +} + +void ConnectorImpl::handleHdmiLinkStatusChanged() +{ + bool bLinkActive; + NvU32 newFrlRate; + // Check Link status + if (!hal->queryHdmiLinkStatus(&bLinkActive, NULL)) + { + return; + } + if (!bLinkActive) + { + newFrlRate = hal->restorePCONFrlLink(activePConLinkControl.frlHdmiBwMask, + activePConLinkControl.flags.bExtendedLTMode, + activePConLinkControl.flags.bConcurrentMode); + + if (newFrlRate != activePConLinkControl.result.trainedFrlBwMask) + { + activePConLinkControl.result.trainedFrlBwMask = newFrlRate; + activePConLinkControl.result.maxFrlBwTrained = getMaxFrlBwFromMask(newFrlRate); + for (Device *i = enumDevices(0); i; i = enumDevices(i)) + { + DeviceImpl *dev = (DeviceImpl *)i; + if ((dev->activeGroup != NULL) && (dev->plugged)) + { + sink->bandwidthChangeNotification(dev, false); + } + } + } + } +} + +void ConnectorImpl::handleMCCSIRQ() +{ + for (Device *i = enumDevices(0); i; i = enumDevices(i)) + { + DeviceImpl *dev = (DeviceImpl *)i; + if ((dev->activeGroup != NULL) && (dev->plugged)) + { + sink->notifyMCCSEvent(dev); + } + } +} + +void ConnectorImpl::handlePanelReplayError() +{ + hal->readPanelReplayError(); +} + +// +// Checks if the link is still trained. +// Note that these hal registers are ONLY re-read in response to an IRQ. +// Calling this function returns the information from the last interrupt. +// +bool ConnectorImpl::isLinkLost() +{ + if (isLinkActive()) + { + // Bug 200320196: Add DPCD offline check to avoid link-train in unplugged state. + if (!hal->isDpcdOffline()) + { + unsigned laneCount; + NvU64 linkRate; + getCurrentLinkConfig(laneCount, linkRate); + // + // Check SW lane count in RM in case it's disabled beyond DPLib. + // Bug 1933751/2897747 + // + if (laneCount == laneCount_0) + return true; + } + + // update the sw cache if required + hal->refreshLinkStatus(); + if (!(hal->isDpInTunnelingSupported() && main->isDpTunnelingHwBugWarEnabled())) + { + if (!hal->getInterlaneAlignDone()) + return true; + } + + for (unsigned i = 0; i < activeLinkConfig.lanes; i++) + { + if (!hal->getLaneStatusSymbolLock(i)) + return true; + if (!hal->getLaneStatusClockRecoveryDone(i)) + return true; + if (!hal->getLaneStatusChannelEqualizationDone(i)) + return true; + } + + if (!(hal->isDpInTunnelingSupported() && main->isDpTunnelingHwBugWarEnabled())) + { + if (!hal->getInterlaneAlignDone()) + return true; + } + } + return false; +} + +bool ConnectorImpl::isLinkActive() +{ + return (activeLinkConfig.isValid()); +} + +bool ConnectorImpl::isLinkInD3() +{ + return (hal->getPowerState() == PowerStateD3); +} + +bool ConnectorImpl::trainLinkOptimizedSingleHeadMultipleSST(GroupImpl *pGroupAttached) +{ + if (!pGroupAttached) + { + DP_PRINTF(DP_ERROR, "DP-CONN> 2-sst group not valid"); + return false; + } + + if (preferredLinkConfig.isValid()) + { + ConnectorImpl *pSecConImpl = this->pCoupledConnector; + if (pSecConImpl->preferredLinkConfig.isValid() && + (preferredLinkConfig.lanes == laneCount_4) && (pSecConImpl->preferredLinkConfig.lanes == laneCount_4) && + (preferredLinkConfig.peakRate == pSecConImpl->preferredLinkConfig.peakRate)) + { + if (willLinkSupportModeSST(preferredLinkConfig, pGroupAttached->lastModesetInfo)) + { + if (pGroupAttached->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY) + { + if (!this->enableFlush()) + return false; + } + preferredLinkConfig.policy.setSkipFallBack(true); + if (!train(preferredLinkConfig, false)) + { + DP_PRINTF(DP_ERROR, "DP-CONN> Unable to set preferred linkconfig on 2-SST display"); + return false; + } + if (pGroupAttached->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY) + { + this->disableFlush(); + } + return true; + } + else + { + DP_PRINTF(DP_ERROR, "DP-CONN> Invalid 2-SST Preferred link configuration"); + return false; + } + } + } + + if (pGroupAttached->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST) + { + if (pGroupAttached->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY) + { + if (this->pCoupledConnector->oneHeadSSTSecPrefLnkCfg.isValid()) + { + bool trainDone = false; + this->pCoupledConnector->oneHeadSSTSecPrefLnkCfg.policy.setSkipFallBack(true); + if (!train(this->pCoupledConnector->oneHeadSSTSecPrefLnkCfg, false)) + { + DP_PRINTF(DP_ERROR, "DP-CONN> Unable set the primary configuration on secondary display"); + trainDone = false; + } + else + { + trainDone = true; + } + this->disableFlush(); + return trainDone; + } + } + + } + + // Order for 2-SST link training and must be with 4 lanes + unsigned linkRateList[] = {dp2LinkRate_1_62Gbps, dp2LinkRate_2_70Gbps, dp2LinkRate_5_40Gbps, dp2LinkRate_8_10Gbps}; + NvU8 linkRateCount = sizeof(linkRateList) / sizeof(unsigned); + + for (NvU8 i = 0; i < linkRateCount; i++) + { + LinkConfiguration linkCfg = LinkConfiguration(&this->linkPolicy, + laneCount_4, linkRateList[i], + hal->getEnhancedFraming(), + false, // MST + false, /* disablePostLTRequest */ + false, /* bEnableFEC */ + false, /* bDisableLTTPR */ + this->getDownspreadDisabled()); + linkCfg.policy.setSkipFallBack(true); + if (willLinkSupportModeSST(linkCfg, pGroupAttached->lastModesetInfo)) + { + if (!this->enableFlush()) + return false; + if (!train(linkCfg, false)) + { + if (i == linkRateCount - 1) + { + // Re-train max link config + linkCfg = getMaxLinkConfig(); + linkCfg.peakRate = DATA_RATE_8B_10B_TO_LINK_RATE(linkCfg.peakRate); + linkCfg.peakRatePossible = DATA_RATE_8B_10B_TO_LINK_RATE(linkCfg.peakRatePossible); + linkCfg.policy.setSkipFallBack(true); + if (!train(linkCfg, false)) + { + DP_ASSERT(0 && "DPCONN> 2-SST setting max link configuration failed "); + break; + } + } + } + else + { + oneHeadSSTSecPrefLnkCfg = linkCfg; + break; + } + } + } + + return true; +} + +bool ConnectorImpl::isNoActiveStreamAndPowerdown() +{ + if (activeGroups.isEmpty()) + { + bool bKeepMSTLinkAlive = (this->bKeepLinkAliveMST && activeLinkConfig.multistream); + bool bKeepSSTLinkAlive = (this->bKeepLinkAliveSST && !activeLinkConfig.multistream); + // + // Power saving unless: + // - Setting fake flag as true to prevent panel power down here. + // - Regkey sets to keep link alive for MST and it's in MST. + // - Regkey sets to keep link alive for SST and it's in SST. + // - bKeepOptLinkAlive is set to true - to avoid link retraining. + // - Device discovery processing that processNewDevice has HDCP probe. + // - Pending remote HDCP detection messages - prevent power down to access HDCP DCPD regs. + // - Keep link active with compliance device as we always do + // + if ((!bKeepMSTLinkAlive) && + (!bKeepSSTLinkAlive) && + (!bKeepOptLinkAlive) && + (!bKeepLinkAliveForPCON) && + (!bIsDiscoveryDetectActive) && + (pendingRemoteHdcpDetections == 0) && + (!main->isInternalPanelDynamicMuxCapable()) + ) + { + powerdownLink(); + + // Sharp panel for HP Valor QHD+ needs 50 ms after D3 + if (bDelayAfterD3) + { + timer->sleep(50); + } + } + + return true; + } + + return false; +} + +bool ConnectorImpl::trainLinkOptimized(LinkConfiguration lConfig) +{ + LinkConfiguration lowestSelected; // initializes to 0 + bool bSkipLowestConfigCheck = false; // Force highestLink config in SST + bool bSkipRedundantLt = false; // Skip redundant LT + bool bEnteredFlushMode = false; + bool bLinkTrainingSuccessful = true; // status indicating if link training actually succeeded + // forced link training is considered a failure + bool bTwoHeadOneOrLinkRetrain = false; // force link re-train if any attached + // groups are in 2Head1OR mode. + + if (isNoActiveStreamAndPowerdown()) + { + DP_PRINTF(DP_INFO, "Power off the link because no stream are active"); + return true; + } + + // + // Split policy. + // If we're multistream we *always pick the highest link configuration available + // - we don't want to interrupt existing panels to light up new ones + // If we're singlestream we always pick the lowest power configurations + // - there can't be multiple streams, so the previous limitation doesn't apply + // + + // + // Find the active group(s) + // + GroupImpl * groupAttached = 0; + for (ListElement * e = activeGroups.begin(); e != activeGroups.end(); e = e->next) + { + DP_ASSERT(bIsUefiSystem || linkUseMultistream() || (!groupAttached && "Multiple attached heads")); + groupAttached = (GroupImpl * )e; + + if ((groupAttached->dscModeRequest == DSC_DUAL) && (groupAttached->dscModeActive != DSC_DUAL)) + { + // + // If current modeset group requires 2Head1OR and + // - group is not active yet (first modeset on the group) + // - group is active but not in 2Head1OR mode (last modeset on the group did not require 2Head1OR) + // then re-train the link + // This is because for 2Head1OR mode, we need to set some LT parametes for slave SOR after + // successful LT on primary SOR without which 2Head1OR modeset will lead to HW hang. + // + bTwoHeadOneOrLinkRetrain = true; + break; + } + } + + lowestSelected = getMaxLinkConfig(); + lowestSelected.peakRate = DATA_RATE_8B_10B_TO_LINK_RATE(lowestSelected.peakRate); + lowestSelected.peakRatePossible = DATA_RATE_8B_10B_TO_LINK_RATE(lowestSelected.peakRatePossible); + + if (!activeLinkConfig.multistream) + { + if (groupAttached && + groupAttached->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST) + { + return trainLinkOptimizedSingleHeadMultipleSST(groupAttached); + } + + if (preferredLinkConfig.isValid()) + { + if (activeLinkConfig != preferredLinkConfig) + { + // if a tool has requested a preferred link config; check if its possible; and train to it. + // else choose the normal path + if (groupAttached && + willLinkSupportModeSST(preferredLinkConfig, groupAttached->lastModesetInfo)) + { + if (!this->enableFlush()) + return false; + if (!train(preferredLinkConfig, false)) + { + DP_PRINTF(DP_ERROR, "DP-CONN> Preferred linkconfig could not be applied. Forcing on gpu side."); + train(preferredLinkConfig, true); + } + this->disableFlush(); + return true; + } + else + { + DP_PRINTF(DP_ERROR, "DP-CONN> Preferred linkconfig does not support the mode"); + return false; + } + } + else + { + // We are already at preferred. Nothing to do here. Return. + return true; + } + } + + // + // This is required for making certain panels to work by training them in + // highest linkConfig in SST mode. + // + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + DeviceImpl * dev = (DeviceImpl *)i; + if (dev->forceMaxLinkConfig()) + { + bSkipLowestConfigCheck = true; + } + if (dev->skipRedundantLt()) + { + bSkipRedundantLt = true; + } + } + + if (bPConConnected) + { + // When PCON is connected, always LT to max to avoid LT. + bSkipLowestConfigCheck = true; + } + + // If the flag is set, simply neglect downgrading to lowest possible linkConfig + if (!bSkipLowestConfigCheck) + { + lConfig = lowestSelected; + + if (groupAttached) + { + lConfig.enableFEC(this->bFECEnable); + // Find lowest link configuration supporting the mode + getValidLowestLinkConfig(lConfig, lowestSelected, groupAttached->lastModesetInfo); + } + } + + if (lowestSelected.isValid()) + { + // + // Check if we are already trained to the desired link config? + // Make sure requested FEC state matches with the current FEC state of link. + // If 2Head1OR mode is requested, retrain if group is not active or + // last modeset on active group was not in 2Head1OR mode. + // bTwoHeadOneOrLinkRetrain tracks this requirement. + // + + // + // Set linkStatus to be dirty so that when isLinkLost() calls + // refreshLinkStatus() it will get real time status. This is to + // fix an issue that when UEFI-to-Driver transition, LTTPR is not + // link trainined but will be link trainined by RM. + // + hal->setDirtyLinkStatus(true); + if ((activeLinkConfig == lowestSelected) && + (!isLinkInD3()) && + (!isLinkLost()) && + (this->bFECEnable == activeLinkConfig.bEnableFEC) && + !bTwoHeadOneOrLinkRetrain) + { + if (bSkipRedundantLt || main->isInternalPanelDynamicMuxCapable()) + { + // Skip LT if the links are already trained to desired config. + DP_PRINTF(DP_NOTICE, "DP-CONN> Skipping redundant LT."); + return true; + } + else + { + // Make sure link status is still good. + if (activeLinkConfig.lanes && hal->isLinkStatusValid(activeLinkConfig.lanes)) + { + // Pass on a flag to RM ctrl call to skip LT at RM level. + DP_PRINTF(DP_NOTICE, "DP-CONN> Skipping redundant LT from RM."); + bSkipLt = true; + } + } + } + else + { + bSkipLt = false; + } + + // Enter flush mode/detach head before LT + if (!bSkipLt) + { + if (!(bEnteredFlushMode = this->enableFlush())) + return false; + } + + bLinkTrainingSuccessful = train(lowestSelected, false); + // + // If LT failed, check if skipLT was marked. If so, clear the flag and + // enable flush mode if required (headattached) and try real LT once. + // + if (!bLinkTrainingSuccessful && bSkipLt) + { + bSkipLt = false; + if (!(bEnteredFlushMode = this->enableFlush())) + return false; + bLinkTrainingSuccessful = train(lowestSelected, false); + } + if (!bLinkTrainingSuccessful) + { + LinkConfiguration maxLinkConfig = getMaxLinkConfig(); + maxLinkConfig.peakRate = DATA_RATE_8B_10B_TO_LINK_RATE(maxLinkConfig.peakRate); + maxLinkConfig.peakRatePossible = DATA_RATE_8B_10B_TO_LINK_RATE(maxLinkConfig.peakRatePossible); + // + // If optimized link config fails, try max link config with fallback. + // Note: It's possible some link rates are dynamically invalidated + // during failed link training. That means we can't assume + // maxLinkConfig is always greater than the lowestSelected + // link configuration. + // + train(maxLinkConfig, false); + + // + // Note here that fallback might happen while attempting LT to max link config. + // activeLinkConfig will be set to that passing config. + // + if (!willLinkSupportModeSST(activeLinkConfig, groupAttached->lastModesetInfo)) + { + // + // If none of the link configs pass LT or a fall back link config passed LT + // but cannot support the mode, then we will force the optimized link config + // on the link and mark LT as fail. + // + + // Force LT really should not fail! + DP_ASSERT(train(lowestSelected, true)); + bLinkTrainingSuccessful = false; + } + else + { + // + // If a fallback link config pass LT and can support + // the mode, mark LT as pass. + // + bLinkTrainingSuccessful = true; + } + } + } + else + { + if (groupAttached && groupAttached->isHeadAttached()) + { + if (!(bEnteredFlushMode = this->enableFlush())) + return false; + } + LinkConfiguration maxLinkConfig = getMaxLinkConfig(); + maxLinkConfig.peakRate = DATA_RATE_8B_10B_TO_LINK_RATE(maxLinkConfig.peakRate); + maxLinkConfig.peakRatePossible = DATA_RATE_8B_10B_TO_LINK_RATE(maxLinkConfig.peakRatePossible); + // Mode wasn't possible at any assessed configuration. + train(maxLinkConfig, true); + + // Mark link training as failed since we forced it + bLinkTrainingSuccessful = false; + } + + lConfig = activeLinkConfig; + + if (bEnteredFlushMode) + { + this->disableFlush(); + } + + // In case this was set, we should reset it to prevent skipping LT next time. + bSkipLt = false; + } + else + { + bool bRetrainToEnsureLinkStatus; + + // + // Multistream: + // If we can't restore all streams after a link train - we need to make sure that + // we set RG_DIV to "slow down" the effective pclk for that head. RG_DIV does give + // us enough room to account for both the HBR2->RBR drop and the 4->1 drop. + // This should allow us to keep the link up and operating at a sane frequency. + // .. thus we'll allow training at any frequency .. + // + + // for MST; the setPreferred calls assessLink directly. + if (preferredLinkConfig.isValid() && (activeLinkConfig != preferredLinkConfig)) + { + if (!train(preferredLinkConfig, false)) + { + DP_PRINTF(DP_ERROR, "DP-CONN> Preferred linkconfig could not be applied. Forcing on gpu side."); + train(preferredLinkConfig, true); + } + return true; + } + + // + // Make sure link is physically active and healthy, otherwise re-train. + // Make sure requested FEC state matches with the current FEC state of link. + // If 2Head1OR mode is requested, retrain if group is not active or last modeset on active group + // was not in 2Head1OR mode. bTwoHeadOneOrLinkRetrain tracks this requirement. + // + bRetrainToEnsureLinkStatus = (isLinkActive() && isLinkInD3()) || + isLinkLost() || + (activeLinkConfig.bEnableFEC != this->bFECEnable) || + bTwoHeadOneOrLinkRetrain; + + if (bRetrainToEnsureLinkStatus || (!isLinkActive())) + { + // + // Train to the highestAssesed link config for MST cases to avoid redundant + // fallback. There is no point of trying to link train at highest link config + // when it failed during the assessment. + // train() handles fallback now. So we don't need to step down when LT fails. + // + LinkConfiguration desired = highestAssessedLC; + + NvU8 retries = DP_LT_MAX_FOR_MST_MAX_RETRIES; + + desired.enableFEC(this->bFECEnable); + + if (bRetrainToEnsureLinkStatus) + { + bEnteredFlushMode = enableFlush(); + } + + // + // In some cases, the FEC isn't enabled and link is not lost (e.g. DP_KEEP_OPT_LINK_ALIVE = 1), + // but we're going to enable DSC. We need to update bSkipLt for retraining the link with FEC. + // As the bSkipLt was set to true prviously while link is not lost. + // + if (activeLinkConfig.bEnableFEC != this->bFECEnable) + { + bSkipLt = false; + } + + train(desired, false); + if (!activeLinkConfig.isValid()) + { + LinkConfiguration maxLinkConfig = getMaxLinkConfig(); + maxLinkConfig.peakRate = DATA_RATE_8B_10B_TO_LINK_RATE(maxLinkConfig.peakRate); + maxLinkConfig.peakRatePossible = DATA_RATE_8B_10B_TO_LINK_RATE(maxLinkConfig.peakRatePossible); + DP_PRINTF(DP_ERROR, "DPCONN> Unable to train link (at all). Forcing training (picture won't show up)"); + train(maxLinkConfig, true); + + // Mark link training as failed since we forced it + bLinkTrainingSuccessful = false; + } + + // + // Bug 2354318: On some MST branches, we might see a problem that LT failed during + // assessLink(), but somehow works later. In this case, we should not + // retry since highestAssessedLC is not a valid comparison now. + // + if (highestAssessedLC.isValid()) + { + while ((highestAssessedLC != activeLinkConfig) && retries > 0) + { + // Give it a few more chances. + train(desired, false); + retries--; + }; + } + + lConfig = activeLinkConfig; + + if (bEnteredFlushMode) + { + disableFlush(); + } + } + } + + return (bLinkTrainingSuccessful && lConfig.isValid()); +} + +bool ConnectorImpl::getValidLowestLinkConfig +( + LinkConfiguration &lConfig, + LinkConfiguration &lowestSelected, + ModesetInfo modesetInfo, + const DscParams *pDscParams +) +{ + bool bIsModeSupported = false; + unsigned i; + LinkConfiguration selectedConfig; + + for (i = 0; i < numPossibleLnkCfg; i++) + { + if ((this->allPossibleLinkCfgs[i].lanes > lConfig.lanes) || + (this->allPossibleLinkCfgs[i].peakRate > lConfig.peakRate)) + { + continue; + } + + // Update enhancedFraming for target config + this->allPossibleLinkCfgs[i].enhancedFraming = lConfig.enhancedFraming; + + selectedConfig = this->allPossibleLinkCfgs[i]; + + selectedConfig.enableFEC(lConfig.bEnableFEC); + if (willLinkSupportModeSST(selectedConfig, modesetInfo)) + { + bIsModeSupported = true; + break; + } + } + + if (bIsModeSupported) + { + lowestSelected = selectedConfig; + lowestSelected.bDisableDownspread = lConfig.bDisableDownspread; + } + else + { + // Invalidate link config if mode is not possible at all + lowestSelected.lanes = 0; + } + + return bIsModeSupported; +} + +bool ConnectorImpl::postLTAdjustment(const LinkConfiguration & lConfig, bool force) +{ + NvU8 lastVoltageSwingLane[DP_MAX_LANES] = {0}; + NvU8 lastPreemphasisLane[DP_MAX_LANES] = {0}; + NvU8 lastTrainingScoreLane[DP_MAX_LANES] = {0}; + NvU8 lastPostCursor[DP_MAX_LANES] = {0}; + NvU8 currVoltageSwingLane[DP_MAX_LANES] = {0}; + NvU8 currPreemphasisLane[DP_MAX_LANES] = {0}; + NvU8 currTrainingScoreLane[DP_MAX_LANES] = {0}; + NvU8 currPostCursor[DP_MAX_LANES] = {0}; + NvU32 updatedLaneSettings[DP_MAX_LANES] = {0}; + NvU8 adjReqCount = 0; + NvU64 startTime; + LinkConfiguration linkConfig = lConfig; + + // Cache Voltage Swing and Preemphasis value just after Link training + if (!hal->readTraining(lastVoltageSwingLane, + lastPreemphasisLane, + lastTrainingScoreLane, + lastPostCursor, + (NvU8)activeLinkConfig.lanes)) + { + DP_PRINTF(DP_ERROR, "DPCONN> Post Link Training : Unable to read current training values"); + } + + if (hal->getTrainingPatternSelect() != TRAINING_DISABLED) + { + DP_PRINTF(DP_WARNING, "DPCONN> Post Link Training : Training pattern is not disabled."); + } + + // + // We have cleared DPCD 102h + // Now hardware will automatically send the idle pattern + // + startTime = timer->getTimeUs(); + + do + { + if (!hal->getIsPostLtAdjRequestInProgress()) + { + // Clear POST_LT_ADJ_REQ_GRANTED bit and start normal AV transmission + hal->setPostLtAdjustRequestGranted(false); + return true; + } + + // Wait for 2ms + Timeout timeout(timer, 2); + + // check if DPCD 00206h~00207h change has reached to ADJ_REQ_LIMIT + if (adjReqCount > DP_POST_LT_ADJ_REQ_LIMIT) + { + // Clear POST_LT_ADJ_REQ_GRANTED bit and start normal AV transmission + hal->setPostLtAdjustRequestGranted(false); + return true; + } + + if (!hal->readTraining(currVoltageSwingLane, + currPreemphasisLane, + currTrainingScoreLane, + currPostCursor, + (NvU8)activeLinkConfig.lanes)) + { + DP_PRINTF(DP_ERROR, "DPCONN> Post Link Training : Unable to read current training values"); + } + else + { + if (!hal->isLaneSettingsChanged(lastVoltageSwingLane, + currVoltageSwingLane, + lastPreemphasisLane, + currPreemphasisLane, + (NvU8)activeLinkConfig.lanes)) + { + // Check if we have exceeded DP_POST_LT_ADJ_REQ_TIMER (200 ms) + if ((timer->getTimeUs() - startTime) > DP_POST_LT_ADJ_REQ_TIMER) + { + DP_PRINTF(DP_ERROR, "DPCONN> Post Link Training : DP_POST_LT_ADJ_REQ_TIMER is timed out."); + // Clear POST_LT_ADJ_REQ_GRANTED bit and start normal AV transmission + hal->setPostLtAdjustRequestGranted(false); + return true; + } + } + else + { + adjReqCount++; + + // Clear ADJ_REQ_TIMER + startTime = timer->getTimeUs(); + + // Change RX drive settings according to DPCD 00206h & 00207h + if (!hal->setTrainingMultiLaneSet((NvU8)activeLinkConfig.lanes, + currVoltageSwingLane, + currPreemphasisLane)) + { + DP_PRINTF(DP_ERROR, "DPCONN> Post Link Training : Failed to set RX drive setting according to DPCD 00206h & 00207h."); + } + + // Populate updated lane settings for currently active lanes + populateUpdatedLaneSettings(currVoltageSwingLane, currPreemphasisLane, updatedLaneSettings); + + // Change TX drive settings according to DPCD 00206h & 00207h + if (!setLaneConfig(activeLinkConfig.lanes, updatedLaneSettings)) + { + DP_PRINTF(DP_ERROR, "DPCONN> Post Link Training : Failed to set TX drive setting according to DPCD 00206h & 00207h."); + } + + // Update last Voltage Swing and Preemphasis values + if (!hal->readTraining(lastVoltageSwingLane, + lastPreemphasisLane, + lastTrainingScoreLane, + lastPostCursor, + (NvU8)activeLinkConfig.lanes)) + { + DP_PRINTF(DP_ERROR, "DPCONN> Post Link Training : Unable to read current training values"); + } + } + } + + // Mark the linkStatus as dirty since we need to retrain in case Rx has lost sync + hal->setDirtyLinkStatus(true); + }while (!isLinkLost()); + + // Clear POST_LT_ADJ_REQ_GRANTED bit + hal->setPostLtAdjustRequestGranted(false); + + if (isLinkLost()) + { + if (bNoFallbackInPostLQA && (retryLT < WAR_MAX_RETRAIN_ATTEMPT)) + { + // + // A monitor may lose link sometimes during assess link or link training. + // So retry for 3 times before fallback to lower config + // + retryLT++; + train(lConfig, force); + return true; + } + // + // If the link is not alive, then we need to retrain at a lower config + // There is no reason to try at the same link configuration. Follow the + // fallback policy that is followed for CR phase of LT + // + if (!linkConfig.lowerConfig()) + { + DP_PRINTF(DP_ERROR, "DPCONN> Post Link Training : Already at the lowest link rate. Cannot reduce further"); + return false; + } + train(linkConfig, force); + } + else if (bNoFallbackInPostLQA && (retryLT != 0)) + { + retryLT = 0; + } + + return true; +} + +void ConnectorImpl::populateUpdatedLaneSettings(NvU8* voltageSwingLane, NvU8* preemphasisLane, NvU32 *data) +{ + NvU32 laneIndex; + + for (laneIndex = 0; laneIndex < activeLinkConfig.lanes; laneIndex++) + { + switch (voltageSwingLane[laneIndex]) + { + case driveCurrent_Level0: + data[laneIndex] = FLD_SET_DRF(0073_CTRL, _DP_LANE_DATA, _DRIVECURRENT, _LEVEL0, data[laneIndex]); + break; + + case driveCurrent_Level1: + data[laneIndex] = FLD_SET_DRF(0073_CTRL, _DP_LANE_DATA, _DRIVECURRENT, _LEVEL1, data[laneIndex]); + break; + + case driveCurrent_Level2: + data[laneIndex] = FLD_SET_DRF(0073_CTRL, _DP_LANE_DATA, _DRIVECURRENT, _LEVEL2, data[laneIndex]); + break; + + case driveCurrent_Level3: + data[laneIndex] = FLD_SET_DRF(0073_CTRL, _DP_LANE_DATA, _DRIVECURRENT, _LEVEL3, data[laneIndex]); + break; + } + + switch (preemphasisLane[laneIndex]) + { + case preEmphasis_Level1: + data[laneIndex] = FLD_SET_DRF(0073_CTRL, _DP_LANE_DATA, _PREEMPHASIS, _LEVEL1, data[laneIndex]); + break; + + case preEmphasis_Level2: + data[laneIndex] = FLD_SET_DRF(0073_CTRL, _DP_LANE_DATA, _PREEMPHASIS, _LEVEL2, data[laneIndex]); + break; + + case preEmphasis_Level3: + data[laneIndex] = FLD_SET_DRF(0073_CTRL, _DP_LANE_DATA, _PREEMPHASIS, _LEVEL3, data[laneIndex]); + break; + } + } +} + +bool ConnectorImpl::validateLinkConfiguration(const LinkConfiguration & lConfig) +{ + NvU64 linkRate10M = lConfig.peakRate; + + if (!IS_VALID_LANECOUNT(lConfig.lanes)) + { + DP_PRINTF(DP_ERROR, "DPCONN> Invalid Lane Count=%d", lConfig.lanes); + return false; + } + + if (lConfig.lanes > hal->getMaxLaneCount()) + { + DP_PRINTF(DP_ERROR, "DPCONN> Requested Lane Count=%d is larger than sinkMaxLaneCount=%d", + lConfig.lanes, hal->getMaxLaneCount()); + return false; + } + + if (lConfig.lanes != 0) + { + if (!IS_VALID_LINKBW_10M(linkRate10M)) + { + DP_PRINTF(DP_ERROR, "DPCONN> Requested link rate=%d is not valid", linkRate10M); + return false; + } + + if (linkRate10M > hal->getMaxLinkRate()) + { + DP_PRINTF(DP_ERROR, "DPCONN> Requested link rate=%d is larger than sinkMaxLinkRate=%d", + linkRate10M, hal->getMaxLinkRate()); + return false; + } + + if (IS_INTERMEDIATE_LINKBW_10M(linkRate10M)) + { + NvU16 *ilrTable; + NvU32 i; + if (!hal->isIndexedLinkrateEnabled()) + { + DP_PRINTF(DP_ERROR, "DPCONN> Indexed Link Rate=%d is Not Enabled in Sink", linkRate10M); + return false; + } + + ilrTable = hal->getLinkRateTable(); + for (i = 0; i < NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES; i++) + { + // + // linkRate10M is in 10M convention and ilrTable entries are the values read from DPCD in 200Kunits + // Convert the ilrTable value to 10M convention before the comparison + // + if (LINK_RATE_200KHZ_TO_10MHZ(ilrTable[i]) == linkRate10M) + break; + if (ilrTable[i] == 0) + { + DP_PRINTF(DP_ERROR, "DPCONN> Indexed Link Rate=%d is Not Found", linkRate10M); + return false; + } + } + + if (i == NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES) + { + DP_PRINTF(DP_ERROR, "DPCONN> Indexed Link Rate=%d is Not Found", linkRate10M); + return false; + } + } + } + return true; +} + +bool ConnectorImpl::train(const LinkConfiguration & lConfig, bool force, + LinkTrainingType trainType) +{ + LinkTrainingType preferredTrainingType = trainType; + bool result = true; + + // Validate link config against caps + if (!force && !validateLinkConfiguration(lConfig)) + { + return false; + } + + if (!lConfig.multistream) + { + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + DeviceImpl * dev = (DeviceImpl *)i; + if (dev->powerOnMonitorBeforeLt() && lConfig.lanes != 0) + { + // + // Some panels expose that they are in D0 even when they are not. + // Explicit write to DPCD 0x600 is required to wake up such panel before LT. + // + hal->setPowerState(PowerStateD0); + } + } + // + // Enable special LT only when regkey 'ENABLE_FAST_LINK_TRAINING' set + // to 1 in DD's path. + // + if (bEnableFastLT) + { + // If the panel can support NLT or FLT, then let's try it first + if (hal->getNoLinkTraining()) + preferredTrainingType = NO_LINK_TRAINING; + else if (hal->getSupportsNoHandshakeTraining()) + preferredTrainingType = FAST_LINK_TRAINING; + } + } + + // + // Don't set the stream if we're shutting off the link + // or forcing the config + // + if (!force && lConfig.lanes != 0) + { + if (isLinkActive()) + { + if (activeLinkConfig.multistream != lConfig.multistream) + { + activeLinkConfig.lanes = 0; + rawTrain(activeLinkConfig, true, NORMAL_LINK_TRAINING); + } + } + + if (AuxRetry::ack != hal->setMultistreamLink(lConfig.multistream)) + { + DP_PRINTF(DP_WARNING, "DP> Failed to enable multistream mode on current link"); + } + } + + // + // Read link rate table before link-train to assure on-board re-driver + // knows link rate going to be set in link rate table. + // If eDP's power has been shutdown here, don't query Link rate table, + // else it will cause panel wake up. + // + if (hal->isIndexedLinkrateEnabled() && (lConfig.lanes != 0)) + { + hal->getRawLinkRateTable(); + } + + activeLinkConfig = lConfig; + result = rawTrain(lConfig, force, preferredTrainingType); + + // If NLT or FLT failed, then fallback to normal LT again + if (!result && (preferredTrainingType != NORMAL_LINK_TRAINING)) + result = rawTrain(lConfig, force, NORMAL_LINK_TRAINING); + + if (!result) + activeLinkConfig.lanes = 0; + else + { + if (activeLinkConfig.multistream) + { + // Total slot is 64, reserve slot 0 for header + maximumSlots = 63; + freeSlots = maximumSlots; + firstFreeSlot = 1; + } + bNoLtDoneAfterHeadDetach = false; + } + + if (!force && result) + this->hal->setDirtyLinkStatus(true); + + // We don't need post LQA while powering down the lanes. + if ((lConfig.lanes != 0) && hal->isPostLtAdjustRequestSupported() && result) + { + result = postLTAdjustment(activeLinkConfig, force); + } + + if((lConfig.lanes != 0) && result && activeLinkConfig.bEnableFEC + ) + { + // + // Extended latency from link-train end to FEC enable pattern + // to avoid link lost or blank screen with Synaptics branch. + // (Bug 2561206) + // + if (LT2FecLatencyMs) + { + timer->sleep(LT2FecLatencyMs); + } + + result = main->configureFec(true /*bEnableFec*/); + DP_ASSERT(result); + } + + // + // Do not compare bEnableFEC here. In DDS case FEC might be requested but + // not performed in RM. + // + if ((lConfig.lanes != activeLinkConfig.lanes) || + (lConfig.peakRate != activeLinkConfig.peakRate) || + (lConfig.enhancedFraming != activeLinkConfig.enhancedFraming) || + (lConfig.multistream != activeLinkConfig.multistream)) + { + // fallback happens, returns fail to make sure clients notice it. + result = false; + } + + if (result) + { + // update PSR link cache on successful LT + this->psrLinkConfig = activeLinkConfig; + } + + return result; +} + +void ConnectorImpl::sortActiveGroups(bool ascending) +{ + List activeSortedGroups; + + while(!activeGroups.isEmpty()) + { + ListElement * e = activeGroups.begin(); + GroupImpl * g = (GroupImpl *)e; + + GroupImpl * groupToInsertBefore = NULL; + + // Remove from active group for sorting + activeGroups.remove(g); + + for (ListElement *e1 = activeSortedGroups.begin(); e1 != activeSortedGroups.end(); e1 = e1->next) + { + GroupImpl * g1 = (GroupImpl *)e1; + if ((g->headIndex < g1->headIndex) || + ((g->headIndex == g1->headIndex) && + ((ascending && (g->singleHeadMultiStreamID < g1->singleHeadMultiStreamID)) || + (!ascending && (g->singleHeadMultiStreamID > g1->singleHeadMultiStreamID))) + )) + { + groupToInsertBefore = g1; + break; + } + } + + if (NULL == groupToInsertBefore) + { + activeSortedGroups.insertBack(g); + } + else + { + activeSortedGroups.insertBefore(groupToInsertBefore, g); + } + } + + // Repopulate active group list + while (!activeSortedGroups.isEmpty()) + { + ListElement * e = activeSortedGroups.begin(); + + // Remove from sorted list + activeSortedGroups.remove(e); + // Insert back to active group list + activeGroups.insertBack(e); + } +} + +bool ConnectorImpl::enableFlush() +{ + bool bHeadAttached = false; + + if (activeGroups.isEmpty()) + return true; + + // + // If SST check that head should be attached with single group else if MST at least + // 1 group should have headAttached before calling flush on SOR + // + if (!this->linkUseMultistream()) + { + GroupImpl * activeGroup = this->getActiveGroupForSST(); + + if (activeGroup && !activeGroup->isHeadAttached() && intransitionGroups.isEmpty()) + { + DP_PRINTF(DP_ERROR, "DPCONN> SST-Flush mode should not be called when head is not attached. Returning early without enabling flush"); + return true; + } + } + else + { + for (ListElement * e = activeGroups.begin(); e != activeGroups.end(); e = e->next) + { + GroupImpl * group = (GroupImpl *)e; + if (group->isHeadAttached()) + { + bHeadAttached = true; + break; + } + } + + if (!bHeadAttached) + { + DP_PRINTF(DP_ERROR, "DPCONN> MST-Flush mode should not be called when head is not attached. Returning early without enabling flush"); + return true; + } + } + + if (!main->setFlushMode()) + return false; + + // + // Enabling flush mode shuts down the link: + // 1. reset activeLinkConfig to indicate the link is now lost. + // 2. The next link training call must not skip programming the hardware. + // Otherwise, EVO will hang if the head is still active when flush mode is disabled. + // + activeLinkConfig = LinkConfiguration(); + bSkipLt = false; + + sortActiveGroups(false); + + for (ListElement * e = activeGroups.begin(); e != activeGroups.end(); e = e->next) + { + GroupImpl * g = (GroupImpl *)e; + + if (!this->linkUseMultistream()) + { + GroupImpl * activeGroup = this->getActiveGroupForSST(); + DP_ASSERT(g == activeGroup); + } + + bool skipPreLinkTraining = (((g->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST) || + (g->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST)) && + (g->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY)); + if (!skipPreLinkTraining) + main->preLinkTraining(g->headIndex); + + beforeDeleteStream(g, true); + if (this->linkUseMultistream()) + { + main->configureTriggerSelect(g->headIndex, g->singleHeadMultiStreamID); + main->triggerACT(); + } + afterDeleteStream(g); + } + + return true; +} + +// +// This is a wrapper for call to mainlink::train(). +bool ConnectorImpl::rawTrain(const LinkConfiguration & lConfig, bool force, LinkTrainingType linkTrainingType) +{ + { + // + // this is the common path + // activeLinkConfig will be updated in main->train() in case fallback happens. + // if the link config sent has disable Post LT request set, we send false for corresponding flag + // + if (lConfig.disablePostLTRequest) + { + return (main->train(lConfig, force, linkTrainingType, &activeLinkConfig, bSkipLt, false, + hal->getPhyRepeaterCount())); + } + return (main->train(lConfig, force, linkTrainingType, &activeLinkConfig, bSkipLt, hal->isPostLtAdjustRequestSupported(), + hal->getPhyRepeaterCount())); + } +} + +// +// Timeslot management +// + +bool ConnectorImpl::deleteAllVirtualChannels() +{ + // Clear the payload table + hal->payloadTableClearACT(); + if (!hal->payloadAllocate(0, 0, 63)) + { + DP_PRINTF(DP_WARNING, "DPCONN> Payload table could not be cleared"); + } + + // send clear_payload_id_table + DP_PRINTF(DP_NOTICE, "DPCONN> Sending CLEAR_PAYLOAD_ID_TABLE broadcast"); + + for (unsigned retries = 0 ; retries < 7; retries++) + { + ClearPayloadIdTableMessage clearPayload; + NakData nack; + + if (this->messageManager->send(&clearPayload, nack)) + return true; + } + + // we should not have reached here. + DP_ASSERT(0 && "DPCONN> CLEAR_PAYLOAD_ID failed!"); + return false; +} + +void ConnectorImpl::clearTimeslices() +{ + for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)((Group *)i); + group->timeslot.PBN = 0; + group->timeslot.count = 0; + group->timeslot.begin = 1; + group->timeslot.hardwareDirty = false; + } + + maximumSlots = 63; + freeSlots = maximumSlots; +} + + +void ConnectorImpl::freeTimeslice(GroupImpl * targetGroup) +{ + // compact timeslot allocation + for (ListElement * e = activeGroups.begin(); e != activeGroups.end(); e = e->next) + { + GroupImpl * group = (GroupImpl *)e; + + if (group->timeslot.begin > targetGroup->timeslot.begin) { + group->timeslot.begin -= targetGroup->timeslot.count; + group->timeslot.hardwareDirty = true; + + // + // enable TRIGGER_ALL on SFs corresponding to the the single head MST driving heads + // as both both pipelines need to take the affect of the shift happening due to deactivating + // an MST display being driven through same SOR + // + if ((DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST == group->singleHeadMultiStreamMode) && + (DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY == group->singleHeadMultiStreamID)) + { + main->configureTriggerAll(group->headIndex, true); + } + } + } + + // mark stream as free + freeSlots += targetGroup->timeslot.count; + targetGroup->timeslot.PBN = 0; + targetGroup->timeslot.count = 0; + targetGroup->timeslot.hardwareDirty = true; +} + +bool ConnectorImpl::checkIsModePossibleMST(GroupImpl *targetGroup) +{ + if (this->isFECSupported()) + { + if (!isModePossibleMSTWithFEC(activeLinkConfig, + targetGroup->lastModesetInfo, + &targetGroup->timeslot.watermarks)) + { + DP_ASSERT(0 && "DisplayDriver bug! This mode is not possible at any " + "link configuration. It should have been rejected at mode filtering time!"); + return false; + } + } + else + { + if (!isModePossibleMST(activeLinkConfig, + targetGroup->lastModesetInfo, + &targetGroup->timeslot.watermarks)) + { + DP_ASSERT(0 && "DisplayDriver bug! This mode is not possible at any " + "link configuration. It should have been rejected at mode filtering time!"); + return false; + } + } + return true; +} + +bool ConnectorImpl::allocateTimeslice(GroupImpl * targetGroup) +{ + unsigned base_pbn, slot_count, slots_pbn; + int firstSlot = firstFreeSlot; + + DP_ASSERT(isLinkActive()); + + if (!checkIsModePossibleMST(targetGroup)) + return false; + + activeLinkConfig.pbnRequired(targetGroup->lastModesetInfo, base_pbn, slot_count, slots_pbn); + + applyTimeslotWAR(slot_count); + + // Check for available timeslots + if (slot_count > freeSlots) + { + DP_PRINTF(DP_ERROR, "DP-TS> Failed to allocate timeslot!! Not enough free slots. slot_count: %d, freeSlots: %d", + slot_count, freeSlots); + return false; + } + + for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + + if (group->timeslot.count != 0 && + (group->timeslot.begin + group->timeslot.count) >= firstSlot) + { + firstSlot = group->timeslot.begin + group->timeslot.count; + } + } + + // Already allocated? + DP_ASSERT(!targetGroup->timeslot.count && "Reallocation of stream that is already present"); + + targetGroup->timeslot.count = slot_count; + targetGroup->timeslot.begin = firstSlot; + targetGroup->timeslot.PBN = base_pbn; + targetGroup->timeslot.hardwareDirty = true; + freeSlots -= slot_count; + + return true; +} + +void ConnectorImpl::flushTimeslotsToHardware() +{ + for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + + if (group->timeslot.hardwareDirty) + { + group->timeslot.hardwareDirty = false; + bool bEnable2Head1Or = false; + + if ((group->lastModesetInfo.mode == DSC_DUAL) || + (group->lastModesetInfo.mode == DSC_DROP)) + { + bEnable2Head1Or = true; + } + + main->configureMultiStream(group->headIndex, + group->timeslot.watermarks.hBlankSym, + group->timeslot.watermarks.vBlankSym, + group->timeslot.begin, + group->timeslot.begin+group->timeslot.count - 1, + group->timeslot.PBN, + activeLinkConfig.PBNForSlots(group->timeslot.count), + group->colorFormat, + group->singleHeadMultiStreamID, + group->singleHeadMultiStreamMode, + bAudioOverRightPanel, + bEnable2Head1Or); + } + } +} + +void ConnectorImpl::beforeDeleteStream(GroupImpl * group, bool forFlushMode) +{ + // + // During flush entry, if the link is not trained, retrain + // the link so that ACT can be ack'd by the sink. + // (ACK is only for multistream case) + // + // Note: A re-training might be required even in cases where link is not + // alive in non-flush mode case (Eg: beforeDeleteStream called from NAB). + // However we cannot simply re-train is such cases, without ensuring that + // head is not actively driving pixels and this needs to be handled + // differently . + // + if (forFlushMode && linkUseMultistream()) + { + if(isLinkLost()) + { + train(highestAssessedLC, false); + } + } + + // check if this is a firmware group + if (group && group->isHeadAttached() && group->headInFirmware) + { + // check if MST is enabled and we have inited messagemanager + if (hal->getSupportsMultistream() && messageManager) + { + // Firmware group can be assumed to be taking up all 63 slots. + group->timeslot.begin = 1; + group->timeslot.count = 63; + this->freeSlots = 0; + + // 1. clear the timeslots using CLEAR_PAYLAOD_TABLE + // 2. clear gpu timeslots. + if (!deleteAllVirtualChannels()) + DP_ASSERT(0 && "Failed to delete VCs. Vbios state in branch could not be cleaned."); + + freeTimeslice(group); + flushTimeslotsToHardware(); + group->bWaitForDeAllocACT = false; + group->setTimeslotAllocated(false); + + return; + } + } + + if (linkUseMultistream() && + group && group->isHeadAttached() && + (group->isTimeslotAllocated() || + group->timeslot.count || + group->timeslot.hardwareDirty)) + { + // Detach all the panels from payload + for (Device * d = group->enumDevices(0); d; d = group->enumDevices(d)) + { + group->update(d, false); + } + + freeTimeslice(group); + flushTimeslotsToHardware(); + group->bWaitForDeAllocACT = true; + group->setTimeslotAllocated(false); + + // Delete the stream + hal->payloadTableClearACT(); + hal->payloadAllocate(group->streamIndex, group->timeslot.begin, 0); + + // + // If entering flush mode, enable RG (with Immediate effect) otherwise for detaching a display, + // if not single heas MST, not required to enable RG. For single head MST streams deletion, enable + // RG at loadv + // + if (forFlushMode || + ((group->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST) && + (group->singleHeadMultiStreamID != DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY))) + { + main->controlRateGoverning(group->headIndex, true/*enable*/, forFlushMode /*Immediate/loadv*/); + } + } +} + +void ConnectorImpl::afterDeleteStream(GroupImpl * group) +{ + if (linkUseMultistream() && group->isHeadAttached() && group->bWaitForDeAllocACT) + { + if (!hal->payloadWaitForACTReceived()) + { + DP_PRINTF(DP_ERROR, "DP> Delete stream failed. Device did not acknowledge stream deletion ACT!"); + DP_ASSERT(0); + } + } +} + +void ConnectorImpl::afterAddStream(GroupImpl * group) +{ + // Skip this as there is no timeslot allocation + if (!linkUseMultistream() || !group->timeslot.count) + return; + + if (group->bDeferredPayloadAlloc) + { + DP_ASSERT(addStreamMSTIntransitionGroups.contains(group)); + hal->payloadTableClearACT(); + hal->payloadAllocate(group->streamIndex, group->timeslot.begin, group->timeslot.count); + main->triggerACT(); + } + group->bDeferredPayloadAlloc = false; + + if (addStreamMSTIntransitionGroups.contains(group)) { + addStreamMSTIntransitionGroups.remove(group); + } + + if (!hal->payloadWaitForACTReceived()) + { + DP_PRINTF(DP_ERROR, "ACT has not been received.Triggering ACT once more"); + DP_ASSERT(0); + + // + // Bug 1334070: During modeset for cloned displays on certain GPU family, + // ACT triggered during SOR attach is not being received due to timing issues. + // Also DP1.2 spec mentions that there is no harm in sending the ACT + // again if there is no change in payload table. Hence triggering ACT once more here + // + main->triggerACT(); + if (!hal->payloadWaitForACTReceived()) + { + DP_PRINTF(DP_ERROR, "DP-TS> Downstream device did not receive ACT during stream re-add."); + return; + } + } + + for (Device * d = group->enumDevices(0); d; d = group->enumDevices(d)) + { + group->update((DeviceImpl *)d, true); + + lastDeviceSetForVbios = d; + } + + // Disable rate gov at the end of adding all streams + if ((DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST != group->singleHeadMultiStreamMode) || + (DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_MAX == group->singleHeadMultiStreamID)) + { + main->controlRateGoverning(group->headIndex, false/*disable*/, false/*loadv*/); + } + + group->updateVbiosScratchRegister(lastDeviceSetForVbios); +} + +bool ConnectorImpl::beforeAddStream(GroupImpl * group, bool test, bool forFlushMode) +{ + bool res = false; + if (linkUseMultistream()) + { + res = beforeAddStreamMST(group, test, forFlushMode); + } + else + { + // SST + Watermark water; + bool bEnable2Head1Or = false; + bool bIsModePossible = false; + + if ((group->lastModesetInfo.mode == DSC_DUAL) || + (group->lastModesetInfo.mode == DSC_DROP)) + { + bEnable2Head1Or = true; + } + + if (this->isFECSupported()) + { + bIsModePossible = isModePossibleSSTWithFEC(activeLinkConfig, + group->lastModesetInfo, + &water, + main->hasIncreasedWatermarkLimits()); + } + else + { + bIsModePossible = isModePossibleSST(activeLinkConfig, + group->lastModesetInfo, + &water, + main->hasIncreasedWatermarkLimits()); + } + + if (bIsModePossible) + { + if (group->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST) + { + if (group->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY) + { + // + // configure sf parameters after secondary linktraining on primary link. + // + main->configureSingleStream(group->headIndex, + water.hBlankSym, + water.vBlankSym, + activeLinkConfig.enhancedFraming, + water.tuSize, + water.waterMark, + group->colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, + group->singleHeadMultiStreamMode, + bAudioOverRightPanel); + } + } + else + { + main->configureSingleStream(group->headIndex, + water.hBlankSym, + water.vBlankSym, + activeLinkConfig.enhancedFraming, + water.tuSize, + water.waterMark, + group->colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, + DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE, + false /*bEnableAudioOverRightPanel*/, + bEnable2Head1Or); + } + } + else + { + if (test) + { + main->configureSingleStream(group->headIndex, + water.hBlankSym, + water.vBlankSym, + activeLinkConfig.enhancedFraming, + water.tuSize, + water.waterMark, + group->colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, + DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE, + false /*bEnableAudioOverRightPanel*/, + bEnable2Head1Or); + DP_PRINTF(DP_ERROR, "DP-TS> Unable to allocate stream. Setting RG_DIV mode"); + res = true; + } + else + DP_ASSERT(0); + } + } + return res; +} + +bool ConnectorImpl::beforeAddStreamMST(GroupImpl * group, bool test, bool forFlushMode) +{ + bool res = false; + bool isPrimaryStream = (DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY == group->singleHeadMultiStreamID); + if (allocateTimeslice(group)) + { + flushTimeslotsToHardware(); + group->setTimeslotAllocated(true); + if (!forFlushMode && isPrimaryStream) + { + main->controlRateGoverning(group->headIndex, true /*enable*/); + } + + // If not single Head MST mode or if primary stream then program here + // other streams programmed in NAE + if (forFlushMode || + (isPrimaryStream && + addStreamMSTIntransitionGroups.isEmpty())) + { + hal->payloadTableClearACT(); + hal->payloadAllocate(group->streamIndex, group->timeslot.begin, group->timeslot.count); + } + else if (isPrimaryStream && + !addStreamMSTIntransitionGroups.isEmpty()) + { + + group->bDeferredPayloadAlloc = true; + } + + addStreamMSTIntransitionGroups.insertFront(group); + } + else + { + if (!test) + { + DP_PRINTF(DP_ERROR, "DP-TS> Unable to allocate stream. Should call mainLink->configureStream to trigger RG_DIV mode"); + main->configureMultiStream(group->headIndex, + group->timeslot.watermarks.hBlankSym, group->timeslot.watermarks.vBlankSym, + 1, 0, 0, 0, group->colorFormat, group->singleHeadMultiStreamID, group->singleHeadMultiStreamMode, bAudioOverRightPanel); + } + else + { + flushTimeslotsToHardware(); + + if (forFlushMode || + (DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST != group->singleHeadMultiStreamMode) || isPrimaryStream) + { + main->configureTriggerSelect(group->headIndex, group->singleHeadMultiStreamID); + hal->payloadTableClearACT(); + hal->payloadAllocate(group->streamIndex, group->timeslot.begin, group->timeslot.count); + } + + DP_PRINTF(DP_ERROR, "DP-TS> Unable to allocate stream. Setting RG_DIV mode"); + res = true; + } + } + + return res; +} + +void ConnectorImpl::disableFlush( bool test) +{ + bool bHeadAttached = false; + + if (activeGroups.isEmpty()) + return; + + sortActiveGroups(true); + + // + // If SST check that head should be attached with single group else if MST at least + // 1 group should have headAttached before calling disable flush on SOR + // + if (!this->linkUseMultistream()) + { + GroupImpl * activeGroup = this->getActiveGroupForSST(); + + if (activeGroup && !activeGroup->isHeadAttached() && intransitionGroups.isEmpty()) + { + DP_PRINTF(DP_ERROR, "DPCONN> SST-Flush mode disable should not be called when head is not attached. Returning early without disabling flush"); + return; + } + } + else + { + for (ListElement * e = activeGroups.begin(); e != activeGroups.end(); e = e->next) + { + GroupImpl * group = (GroupImpl *)e; + if (group->isHeadAttached()) + { + bHeadAttached = true; + break; + } + } + + if (!bHeadAttached) + { + DP_PRINTF(DP_ERROR, "DPCONN> MST-Flush mode disable should not be called when head is not attached. Returning early without disabling flush"); + return; + } + } + + // + // We need to rebuild the tiemslot configuration when exiting flush mode + // Bug 1550750: Change the order to proceed from last to front as they were added. + // Some tiled monitors are happy with this. + // + for (ListElement * e = activeGroups.last(); e != activeGroups.end(); e = e->prev) + { + GroupImpl * g = (GroupImpl *)e; + bool force = false; + NvU32 headMask = 0; + + if (!g->isHeadAttached() && this->linkUseMultistream()) + continue; + + bool skipPostLinkTraining = (((g->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST) || + (g->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST)) && + (g->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY)); + + // + // Allocate the timeslot configuration + // + force = beforeAddStream(g, test, true); + if (this->linkUseMultistream()) + { + main->configureTriggerSelect(g->headIndex, g->singleHeadMultiStreamID); + } + + if (g->lastModesetInfo.mode == DSC_DUAL) + { + // For 2 Head 1 OR - Legal combinations are Head0 and Head1, Head2 and Head3 + headMask = (1 << g->headIndex) | (1 << (g->headIndex + 1)); + } + else + { + headMask = (1 << g->headIndex); + } + + main->clearFlushMode(headMask, force); // ACT is triggered here + if (!skipPostLinkTraining) + main->postLinkTraining(g->headIndex); + afterAddStream(g); + } +} + +DeviceImpl* ConnectorImpl::findDeviceInList(const Address & address) +{ + for (ListElement * e = deviceList.begin(); e != deviceList.end(); e = e->next) + { + DeviceImpl* device = (DeviceImpl*)e; + + // + // There may be multiple hits with the same address. This can + // happen when the head is still attached to the old device.branch + // We never need to resurrect old unplugged devices - and their + // object will be destroyed as soon as the DD handles the + // notifyZombie message. + // + if ((device->address == address) && device->plugged) + return device; + } + + // + // If no plugged devices are found, we should search back through zombied devices. + // This is purely as an optimizations to allow the automatic restoration of a + // panel if it 'reappears' while its still being driven + // + for (ListElement * e = deviceList.begin(); e != deviceList.end(); e = e->next) + { + DeviceImpl* device = (DeviceImpl*)e; + + if (device->address == address) + return device; + } + + return 0; +} + +void ConnectorImpl::disconnectDeviceList() +{ + for (Device * d = enumDevices(0); d; d = enumDevices(d)) + { + ((DeviceImpl*)d)->plugged = false; + // Clear the active bit (payload_allocate) + ((DeviceImpl*)d)->payloadAllocated = false; + + // Deallocate object which may go stale after long pulse handling. + if (((DeviceImpl*)d)->isDeviceHDCPDetectionAlive) + { + delete ((DeviceImpl*)d)->deviceHDCPDetection; + ((DeviceImpl*)d)->deviceHDCPDetection = NULL; + ((DeviceImpl*)d)->isHDCPCap = False; + } + } +} + +// status == true: attach, == false: detach +void ConnectorImpl::notifyLongPulse(bool statusConnected) +{ + NvU32 muxState = 0; + NV_DPTRACE_INFO(HOTPLUG, statusConnected, connectorActive, previousPlugged); + + if (!connectorActive) + { + DP_PRINTF(DP_ERROR, "DP> Got a long pulse before any connector is active!!"); + return; + } + + if (main->getDynamicMuxState(&muxState)) + { + DeviceImpl * existingDev = findDeviceInList(Address()); + bool bIsMuxOnDgpu = DRF_VAL(0073, _CTRL_DFP_DISP_MUX, _STATE, muxState) == NV0073_CTRL_DFP_DISP_MUX_STATE_DISCRETE_GPU; + + if (existingDev && existingDev->isFakedMuxDevice() && !bIsMuxOnDgpu) + { + DP_PRINTF(DP_ERROR, "NotifyLongPulse ignored as mux is not pointing to dGPU and there is a faked device. Marking detect complete"); + sink->notifyDetectComplete(); + return; + } + + if (existingDev && existingDev->isPreviouslyFakedMuxDevice() && !existingDev->isMarkedForDeletion()) + { + DP_PRINTF(DP_NOTICE, "NotifyLongPulse ignored as there is a previously faked device but it is not marked for deletion"); + if (!statusConnected) + { + DP_PRINTF(DP_NOTICE, "Calling notifyDetectComplete"); + sink->notifyDetectComplete(); + } + return; + } + } + + if (previousPlugged && statusConnected) + { + if (main->isInternalPanelDynamicMuxCapable() + ) + { + return; + } + + DP_PRINTF(DP_NOTICE, "DP> Redundant plug"); + // When tunneling is enabled send out BW changed event for client to call set modelist again + timer->queueCallback(this, &tagDpBwAllocationChanged, 0, false /* not allowed in sleep */); + + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + DeviceImpl * dev = (DeviceImpl *)i; + if (dev->ignoreRedundantHotplug()) + { + DP_PRINTF(DP_NOTICE, "DP> Skipping link assessment"); + return; + } + } + + // + // Exit early to avoid coonector re-initialization from breaking MST + // branch state when streams are allocated. + // Additional exceptions: + // - UEFI post(firmwareGroup->headInFirmware)for fresh init. + // - MST to SST transition for that unplug event may be filtered by RM. + // Messaging will be disabled in this case. + // + if (linkUseMultistream() && (!activeGroups.isEmpty()) && + (!(firmwareGroup && ((GroupImpl *)firmwareGroup)->headInFirmware)) && + (hal->isMessagingEnabled())) + { + DP_PRINTF(DP_ERROR, "DP> Bail out early on redundant hotplug with active MST stream"); + return; + } + } + + this->notifyLongPulseInternal(statusConnected); +} + +/*! + * @brief Compute the max BW required across all devices and try to allocate that BW + * + * @return Boolean to indicate success or failure + */ +bool ConnectorImpl::updateDpTunnelBwAllocation() +{ + NvU64 connectorTunnelBw = 0; + if (!hal->isDpTunnelBwAllocationEnabled()) + { + return true; + } + + for (Device * i = enumDevices(0); i; i = enumDevices(i)) + { + DeviceImpl * dev = (DeviceImpl *)i; + NvU64 devMaxModeBwRequired = dev->getMaxModeBwRequired(); + connectorTunnelBw += devMaxModeBwRequired; + } + + DP_PRINTF(DP_INFO, "Required Connector Tunnel BW: %d Mbps", connectorTunnelBw / (1000 * 1000)); + + NvU64 maxTunnelBw = getMaxTunnelBw(); + if (connectorTunnelBw > maxTunnelBw) + { + DP_PRINTF(DP_INFO, "Requested connector tunnel BW is larger than max Tunnel BW of %d Mbps. Overriding Max Tunnel BW\n", + maxTunnelBw / (1000 * 1000)); + connectorTunnelBw = maxTunnelBw; + } + + if (!allocateDpTunnelBw(connectorTunnelBw)) + { + DP_PRINTF(DP_ERROR, "Failed to allocate Dp Tunnel BW: %d Mbps", connectorTunnelBw / (1000 * 1000)); + return false; + } + return true; +} + +// +// notifyLongPulse() filters redundant hotplug notifications and calls into +// notifyLongPulseInternal(). +// +// setAllowMultiStreaming() calls into notifyLongPulseInternal() in order to +// re-detect already connected sink after enabling/disabling +// MST support. +// +void ConnectorImpl::notifyLongPulseInternal(bool statusConnected) +{ + // start from scratch when forcePreferredLinkConfig is not set + if (!(preferredLinkConfig.isValid() && this->forcePreferredLinkConfig)) + { + preferredLinkConfig = LinkConfiguration(); + } + + bPConConnected = false; + bSkipAssessLinkForPCon = false; + + // + // Check if the panel is eDP and DPCD data for that is already parsed. + // Passing this as a parameter inside notifyHPD to skip reading of DPCD + // data in case of eDP after sleep/hibernate resume. + // + hal->notifyHPD(statusConnected, (!hal->isDpcdOffline() && main->isEDP())); + if (main->isLttprSupported()) + { + // + // Update LTTPR counts since it's only correct after HPD. + // If there are some other DFP parameters might change during HPD cycle + // then we can remove the isLttprSupported() check. + // + main->queryAndUpdateDfpParams(); + } + + // For bug 2489143, max link rate needs to be forced on eDP through regkey + if (main->isEDP()) + { + hal->overrideMaxLinkRate(maxLinkRateFromRegkey); + } + + // Some panels whose TCON erroneously sets DPCD 0x200 SINK_COUNT=0. + if (main->isEDP() && hal->getSinkCount() == 0) + hal->setSinkCount(1); + + // disconnect all devices + for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next) { + GroupImpl * g = (GroupImpl *)i; + + // Clear the timeslot table + freeTimeslice(g); + } + + disconnectDeviceList(); + + auxBus->setDevicePlugged(statusConnected); + + if (statusConnected) + { + // Reset all settings for previous downstream device + configInit(); + + if (!hal->isAtLeastVersion(1, 0)) + goto completed; + + DP_PRINTF(DP_NOTICE, "DP> HPD v%d.%d", hal->getRevisionMajor(), hal->getRevisionMinor()); + + // + // Handle to clear pending CP_IRQ that throw short pulse before L-HPD. There's no + // more short pulse corresponding to CP_IRQ after HPD, but IRQ vector needs to be + // clear or block following CP_IRQ. + // + if (hal->interruptContentProtection()) + { + DP_PRINTF(DP_NOTICE, "DP>clear pending CP interrupt at hpd"); + hal->clearInterruptContentProtection(); + } + + populateAllDpConfigs(); + + // + // Perform OUI authentication + // + if (!performIeeeOuiHandshake() && hal->isAtLeastVersion(1, 2)) + { + DP_PRINTF(DP_WARNING, "DP> OUI Noncompliance! Sink is DP 1.2 and is required to implement"); + } + + // Apply Oui WARs here + this->applyOuiWARs(); + + // Tear down old message manager + DP_ASSERT( !hal->getSupportsMultistream() || (hal->isAtLeastVersion(1, 2) && " Device supports multistream but not DP 1.2 !?!? ")); + + // Check if we should be attempting a transition between MST<->SST + if (main->hasMultistream()) + { + if (linkState == DP_TRANSPORT_MODE_INIT) + { + linkState = hal->getSupportsMultistream() ? + DP_TRANSPORT_MODE_MULTI_STREAM : + DP_TRANSPORT_MODE_SINGLE_STREAM; + linkAwaitingTransition = false; + } + else + { + if (linkUseMultistream() != hal->getSupportsMultistream()) + { + linkAwaitingTransition = true; + DP_PRINTF(DP_NOTICE, "CONN> Link Awaiting Transition."); + } + else + { + linkAwaitingTransition = false; + } + } + } + + // + // Only transition between multiStream and single stream when there + // are no active panels. Note: that if we're unable to transition + // we will mark all of the displays as MUST_DISCONNECT. + // + + // + // Shutdown the old message manager if there was one + // If there is a previous stale messageManager or discoveryManager + // present then there is a chance on certain docks where MSTM bits + // needs to be cleared as previous transactions might still be in + // flight. Just checking IRQ VECTOR field might not be enough to + // check for stale messages. + // Please see bug 3928070/4066192 + // + if (discoveryManager || messageManager) + { + bForceClearPendingMsg = true; + } + delete discoveryManager; + isDiscoveryDetectComplete = false; + bIsDiscoveryDetectActive = true; + + pendingEdidReads.clear(); // destroy any half completed requests + delete messageManager; + messageManager = 0; + discoveryManager = 0; + delete qseNonceGenerator; + qseNonceGenerator = 0; + + cancelHdcpCallbacks(); + + if (hal->getSupportsMultistream() && main->hasMultistream()) + { + bool bDeleteFirmwareVC = false; + + DP_PRINTF(DP_NOTICE, "DP> Multistream panel detected, building message manager"); + + // Update preferredLinkConfig multistream status to MST + if (preferredLinkConfig.isValid() && this->forcePreferredLinkConfig) + { + preferredLinkConfig.multistream = true; + } + + // + // Rebuild the message manager to reset and half received messages + // that may be in the pipe. + // + messageManager = new MessageManager(hal, timer); + messageManager->registerReceiver(&ResStatus); + + // + // Create a discovery manager to initiate detection + // + if (AuxRetry::ack != hal->setMessagingEnable(true, true)) + { + DP_PRINTF(DP_WARNING, "DP> Failed to enable messaging for multistream panel"); + } + + if (AuxRetry::ack != hal->setMultistreamHotplugMode(IRQ_HPD)) + { + DP_PRINTF(DP_WARNING, "DP> Failed to enable hotplug mode for multistream panel"); + } + + discoveryManager = new DiscoveryManager(messageManager, this, timer, hal); + qseNonceGenerator = new QSENonceGenerator(); + + // Check and clear if any pending message here + if (hal->clearPendingMsg() || bForceClearPendingMsg) + { + DP_PRINTF(DP_NOTICE, "DP> Stale MSG found: set branch to D3 and back to D0..."); + if (hal->isAtLeastVersion(1, 4)) + { + hal->setMessagingEnable(false, true); + } + hal->setPowerState(PowerStateD3); + hal->setPowerState(PowerStateD0); + if (hal->isAtLeastVersion(1, 4)) + { + hal->setMessagingEnable(true, true); + } + } + pendingRemoteHdcpDetections = 0; + + // + // We need to clear payload table and payload id table during a hotplug in cases + // where DD does not send a null modeset for a device that was plugged. Otherwise + // this will lead to issues where branch does not clear the PBN and sends stale + // available PBN values. One of the scenarios is BSOD in SLI mode, where the secondary + // GPUs are not used for primary boot by VBIOS + // + bDeleteFirmwareVC = ((GroupImpl *)firmwareGroup && + !((GroupImpl *)firmwareGroup)->isHeadAttached() && + !bIsUefiSystem); + + if (bDeleteFirmwareVC || !bAttachOnResume) + { + deleteAllVirtualChannels(); + } + + assessLink(); // Link assessment may re-add a stream + // and must be done AFTER the messaging system + // is restored. + // + // SOR should be able to authentication and enable link encrpytion without being connected to any + // head. From the RM code, it has the requirement of Head being ARMed to do authentication. + // Postpone the authentication until the NAE to do the authentication for DP1.2 as solution. + // + DP_ASSERT((isHDCPAuthOn == false) && (isDP12AuthCap == false)); + + HDCPState hdcpState = {0}; + main->configureHDCPGetHDCPState(hdcpState); + if (hdcpState.HDCP_State_1X_Capable || hdcpState.HDCP_State_22_Capable) + { + isDP12AuthCap = true; + } + else + { + isDP12AuthCap = false; + } + discoveryManager->notifyLongPulse(true); + } + else // SST case + { + DiscoveryManager::Device dev; + Edid tmpEdid; + bool isComplianceForEdidTest = false; + dev.address = Address(); + + // Update preferredLinkConfig multistream status to SST + if (preferredLinkConfig.isValid() && this->forcePreferredLinkConfig) + { + preferredLinkConfig.multistream = false; + } + + if (AuxRetry::ack != hal->setMessagingEnable(false, true)) + { + DP_PRINTF(DP_WARNING, "DP> Failed to clear messaging for singlestream panel"); + } + + // We will report a dongle as new device with videoSink flag as false. + if (hal->getSinkCount() == 0) + { + dev.peerDevice = Dongle; + } + else + { + dev.peerDevice = DownstreamSink; + + // Handle fallback EDID + if(!EdidReadSST(tmpEdid, auxBus, timer, + hal->getPendingTestRequestEdidRead(), + main->isForceRmEdidRequired(), + main->isForceRmEdidRequired() ? main : 0)) + { + bool status = false; + // + // For some DP2VGA dongle which is unable to get the right after several retries. + // Before library, we do give 26 times retries for DP2VGA dongle EDID retries. + // Give most 24 times here for another re-start in library. + // Bug 996248. + // + if (hal->getLegacyPortCount()) + { + LegacyPort * port = hal->getLegacyPort(0); + if (port->getDownstreamPortType() == ANALOG_VGA) + { + NvU8 retries = DP_READ_EDID_MAX_RETRIES; + for (NvU8 i = 0; i < retries; i++) + { + status = EdidReadSST(tmpEdid, auxBus, timer, + hal->getPendingTestRequestEdidRead(), + main->isForceRmEdidRequired(), + main->isForceRmEdidRequired() ? main : 0); + if (status) + break; + } + } + } + if (!status) + { + // corrupt edid + DP_PRINTF(DP_ERROR, "DP-CONN> Corrupt Edid!"); + + // Reading the EDID can fail if AUX is dead. + // So update DPCD state after max number of retries. + hal->updateDPCDOffline(); + } + } + + DP_PRINTF(DP_NOTICE, "DP-CONN> Edid read complete: Manuf Id: 0x%x, Name: %s", tmpEdid.getManufId(), tmpEdid.getName()); + dev.branch = false; + dev.dpcdRevisionMajor = hal->getRevisionMajor(); + dev.dpcdRevisionMinor = hal->getRevisionMinor(); + dev.legacy = false; + dev.SDPStreams = hal->getNumberOfAudioEndpoints() ? 1 : 0; + dev.SDPStreamSinks = hal->getNumberOfAudioEndpoints(); + dev.videoSink = true; + dev.maxTmdsClkRate = 0U; + + // Apply EDID based WARs and update the WAR flags if needed + applyEdidWARs(tmpEdid, dev); + + // + // HP Valor QHD+ needs 50ms delay after D3 + // to prevent black screen + // + if (tmpEdid.WARFlags.delayAfterD3) + { + bDelayAfterD3 = true; + } + + // Panels use Legacy address range for interrupt reporting + if (tmpEdid.WARFlags.useLegacyAddress) + { + hal->setSupportsESI(false); + } + + // + // For some devices short pulse comes in after we disconnect the + // link, so DPLib ignores the request and link trains after modeset + // happens. When modeset happens the link configuration picked may + // be different than what we assessed before. So we skip the link + // power down in assessLink() in such cases + // + if (tmpEdid.WARFlags.keepLinkAlive) + { + DP_PRINTF(DP_NOTICE, "tmpEdid.WARFlags.keepLinkAlive = true, set bKeepOptLinkAlive to true. (keep link alive after assessLink())"); + bKeepOptLinkAlive = true; + } + // Ack the test response, no matter it is a ref sink or not + if (hal->getPendingTestRequestEdidRead()) + { + isComplianceForEdidTest = true; + hal->setTestResponseChecksum(tmpEdid.getLastPageChecksum()); + hal->setTestResponse(true, true); + } + } + + // + // If this is a zombie VRR device that was previously enabled, + // re-enable it now. This must happen before link training if + // VRR was enabled before the device became a zombie or else the + // monitor will report that it's in normal mode even if the GPU is + // driving it in VRR mode. + // + { + DeviceImpl * existingDev = findDeviceInList(dev.address); + if (existingDev && existingDev->isVrrMonitorEnabled() && + !existingDev->isVrrDriverEnabled()) + { + DP_PRINTF(DP_NOTICE, "DP> Re-enabling previously enabled zombie VRR monitor"); + existingDev->resetVrrEnablement(); + existingDev->startVrrEnablement(); + } + } + + if ((hal->getPCONCaps())->bSourceControlModeSupported) + { + bPConConnected = true; + } + + LinkConfiguration maxLinkConfig = getMaxLinkConfig(); + maxLinkConfig.peakRate = DATA_RATE_8B_10B_TO_LINK_RATE(maxLinkConfig.peakRate); + maxLinkConfig.peakRatePossible = DATA_RATE_8B_10B_TO_LINK_RATE(maxLinkConfig.peakRatePossible); + if (bPConConnected || + (main->isEDP() && this->bSkipAssessLinkForEDP) || + (main->isInternalPanelDynamicMuxCapable())) + { + this->highestAssessedLC = maxLinkConfig; + this->linkGuessed = bPConConnected; + this->bSkipAssessLinkForPCon = bPConConnected; + } + else + { + if (tmpEdid.WARFlags.powerOnBeforeLt) + { + // + // Some panels expose that they are in D0 even when they are not. + // Explicit write to DPCD 0x600 is required to wake up such panel before LT. + // + hal->setPowerState(PowerStateD0); + } + this->assessLink(); + } + + if (hal->getLegacyPortCount() != 0) + { + LegacyPort * port = hal->getLegacyPort(0); + DwnStreamPortType portType = port->getDownstreamPortType(); + dev.maxTmdsClkRate = port->getMaxTmdsClkRate(); + processNewDevice(dev, tmpEdid, false, portType, port->getDownstreamNonEDIDPortAttribute()); + } + else + { + processNewDevice(dev, tmpEdid, false, DISPLAY_PORT, RESERVED, isComplianceForEdidTest); + } + + // After processNewDevice, we should not defer any lost device. + bDeferNotifyLostDevice = false; + } + } + else // HPD unplug + { + // + // Shutdown the old message manager if there was one + // + delete discoveryManager; + isDiscoveryDetectComplete = false; + pendingEdidReads.clear(); // destroy any half completed requests + bDeferNotifyLostDevice = false; + + delete messageManager; + messageManager = 0; + discoveryManager = 0; + bAcpiInitDone = false; + bKeepOptLinkAlive = false; + bNoFallbackInPostLQA = false; + bDscCapBasedOnParent = false; + linkState = DP_TRANSPORT_MODE_INIT; + linkAwaitingTransition = false; + + isHDCPAuthOn = isDP12AuthCap = false; + delete qseNonceGenerator; + qseNonceGenerator =0; + + cancelHdcpCallbacks(); + + // Disable the authentication on the main link + main->configureHDCPDisableAuthentication(); + + } +completed: + previousPlugged = statusConnected; + + fireEvents(); + + if (!statusConnected) + { + sink->notifyDetectComplete(); + return; + } + if (!(hal->getSupportsMultistream() && main->hasMultistream())) + { + // Ensure NewDev will be processed before notifyDetectComplete on SST + discoveryDetectComplete(); + } +} + +void ConnectorImpl::handleDpTunnelingIrq() +{ + bool notifyClient = false; + // Unconditionally reset the BW request status + hal->clearDpTunnelingBwRequestStatus(); + + if (hal->hasDpTunnelEstimatedBwChanged()) + { + NvU64 previousAllocatedDpTunnelBw = allocatedDpTunnelBw; + updateDpTunnelBwAllocation(); + if (previousAllocatedDpTunnelBw < allocatedDpTunnelBw) + { + notifyClient = true; + } + + hal->clearDpTunnelingEstimatedBwStatus(); + } + + if (hal->hasDpTunnelBwAllocationCapabilityChanged()) + { + notifyClient = true; + // Try to allocate max tunnel BW if we enabled BW allocation above + if (hal->isDpTunnelBwAllocationEnabled()) + { + allocateMaxDpTunnelBw(); + } + + hal->clearDpTunnelingBwAllocationCapStatus(); + } + + if (notifyClient) + { + timer->queueCallback(this, &tagDpBwAllocationChanged, 0, false /* not allowed in sleep */); + } +} + +void ConnectorImpl::notifyShortPulse() +{ + // + // Do nothing if device is not plugged or + // resume has not been called after hibernate + // to activate the connector + // + if (!connectorActive || !previousPlugged) + { + DP_PRINTF(DP_ERROR, "DP> Got a short pulse after an unplug or before any connector is active!!"); + return; + } + DP_PRINTF(DP_INFO, "DP> IRQ"); + hal->notifyIRQ(); + + // Handle CP_IRQ + if (hal->interruptContentProtection()) + { + // Cancel previous queued delay handling and reset retry counter. + hdcpCpIrqRxStatusRetries = 0; + timer->cancelCallback(this, &tagDelayedHDCPCPIrqHandling); + + if (handleCPIRQ()) + { + hal->clearInterruptContentProtection(); + } + else + { + timer->queueCallback(this, &tagDelayedHDCPCPIrqHandling, HDCP_CPIRQ_RXSTATUS_COOLDOWN); + } + } + + if (hal->getStreamStatusChanged()) + { + if (!messageManager) + { + DP_PRINTF(DP_NOTICE, "DP> Received Stream status changed Interrupt, but not in multistream mode. Ignoring."); + } + else + { + handleSSC(); + hal->clearStreamStatusChanged(); + + // + // Handling of SSC takes longer time during which time we miss IRQs. + // Populate interrupts again. + // + hal->notifyIRQ(); + } + } + + if (hal->interruptCapabilitiesChanged()) + { + DP_PRINTF(DP_NOTICE, "DP> Sink capabilities changed, re-reading caps and reinitializing the link."); + // We need to set dpcdOffline to re-read the caps + hal->setDPCDOffline(true); + hal->clearInterruptCapabilitiesChanged(); + notifyLongPulse(true); + return; + } + + if (detectSinkCountChange()) + { + DP_PRINTF(DP_NOTICE, "DP> Change in downstream sink count. Re-analysing link."); + // We need to set dpcdOffline to re-read the caps + hal->setDPCDOffline(true); + notifyLongPulse(true); + return; + } + + if (hal->interruptDownReplyReady()) + { + if (!messageManager) + { + DP_PRINTF(DP_NOTICE, "DP> Received DownReply Interrupt, but not in multistream mode. Ignoring."); + } + else + { + messageManager->IRQDownReply(); + } + } + + if (hal->interruptUpRequestReady()) + { + if (!messageManager) + { + DP_PRINTF(DP_NOTICE, "DP> Received UpRequest Interrupt, but not in multistream mode. Ignoring."); + } + else + { + messageManager->IRQUpReqest(); + } + } + + if (hal->getDownStreamPortStatusChange() && hal->getSinkCount()) + { + Edid target; + if (!EdidReadSST(target, auxBus, timer, hal->getPendingTestRequestEdidRead())) + { + DP_PRINTF(DP_WARNING, "DP> Failed to read EDID."); + } + + return; + } + + if (hal->getPendingAutomatedTestRequest()) + { + if (hal->getPendingTestRequestEdidRead()) + { + Edid target; + if (EdidReadSST(target, auxBus, timer, true)) + { + hal->setTestResponseChecksum(target.getLastPageChecksum()); + hal->setTestResponse(true, true); + } + else + hal->setTestResponse(false); + } + else if (hal->getPendingTestRequestTraining()) + { + // handleTestLinkTrainRequest will call hal->setTestResponse() once verified the request. + handleTestLinkTrainRequest(); + } + else if (hal->getPendingTestRequestPhyCompliance()) + { + hal->setTestResponse(handlePhyPatternRequest()); + } + } + + // Handle MCCS_IRQ + if (hal->intteruptMCCS()) + { + DP_PRINTF(DP_NOTICE, "DP> MCCS_IRQ"); + handleMCCSIRQ(); + hal->clearInterruptMCCS(); + } + + if (hal->getHdmiLinkStatusChanged()) + { + DP_PRINTF(DP_NOTICE, "DP> HDMI Link Status Changed"); + handleHdmiLinkStatusChanged(); + } + + if (hal->isPanelReplayErrorSet()) + { + DP_PRINTF(DP_ERROR, "DP> Sink set Panel replay error"); + handlePanelReplayError(); + hal->clearPanelReplayError(); + } + + if (hal->getDpTunnelingIrq()) + { + handleDpTunnelingIrq(); + hal->clearDpTunnelingIrq(); + } + + // + // Check to make sure sink is not in D3 low power mode + // and interlane alignment is good, etc + // if not - trigger training + // + if (!isLinkInD3() && isLinkLost()) + { + // If the link status of a VRR monitor has changed, we need to check the enablement again. + if (hal->getLinkStatusChanged()) + { + for (Device *i = enumDevices(0); i; i = enumDevices(i)) + { + DeviceImpl *dev = (DeviceImpl *)i; + + if ((dev->plugged) && (dev->activeGroup != NULL) && (dev->isVrrMonitorEnabled())) + { + // Trigger the full enablement, if the monitor is in locked state. + NvU8 retries = VRR_MAX_RETRIES; + if (!dev->isVrrDriverEnabled()) + { + DP_PRINTF(DP_NOTICE, "DP> VRR enablement state is not synced. Re-enable it."); + do + { + if (!dev->startVrrEnablement()) + { + continue; + } + else + break; + }while(--retries); + + if (!retries) + { + DP_PRINTF(DP_WARNING, "DP> VRR enablement failed on multiple retries."); + } + } + } + } + } + + // If DPCD access is not available, skip trying to restore link configuration. + hal->updateDPCDOffline(); + if (hal->isDpcdOffline()) + { + return; + } + + DP_PRINTF(DP_WARNING, "DP> Link not alive, Try to restore link configuration"); + + if (trainSingleHeadMultipleSSTLinkNotAlive(getActiveGroupForSST())) + { + return; + } + + // Save the previous highest assessed LC + LinkConfiguration previousAssessedLC = highestAssessedLC; + // Save original active link configuration. + LinkConfiguration originalActiveLinkConfig = activeLinkConfig; + + if (activeLinkConfig.isValid() && enableFlush()) + { + if (!train(originalActiveLinkConfig, false)) + { + // + // If original link config could not be restored force + // original config, else SF will overflow when we come + // out of flush mode. + // + DP_PRINTF(DP_ERROR, "DP> After IRQ, original link config could not be restored. Forcing config"); + train(originalActiveLinkConfig, true); + } + disableFlush(); + } + + // if link train fails, call assessLink() again. + if (!activeLinkConfig.isValid()) + { + assessLink(); + } + + //If the highest assessed LC has changed, send notification + if(highestAssessedLC != previousAssessedLC) + { + DeviceImpl * dev = findDeviceInList(Address()); + if (dev) + { + sink->bandwidthChangeNotification(dev, false); + } + } + } +} + +bool ConnectorImpl::detectSinkCountChange() +{ + if (this->linkUseMultistream()) + return false; + + DeviceImpl * existingDev = findDeviceInList(Address()); + if (!existingDev) + return false; + + // detect a zero to non-zero sink count change or vice versa + bool hasSink = !!(hal->getSinkCount()); + return ((existingDev->videoSink || existingDev->audioSink) != hasSink); +} + +/*! + * @brief Sets the preferred link config which the tool has requested to train to. + * + * @param[in] lc client requested link configuration + * @param[in] commit initiate assessLink with lc + * @param[in] force link train to lc. Flush Mode is used. + * @param[in] trainType parameter for assessLink for NORMAL, NO, FAST LT + * @param[in] forcePreferredLinkConfig cap system LT configuration during NLP, IMP, NAB + * + * @return Boolean to indicate success or failure + */ +bool ConnectorImpl::setPreferredLinkConfig(LinkConfiguration & lc, bool commit, + bool force, LinkTrainingType trainType, + bool forcePreferredLinkConfig) +{ + bool bEnteredFlushMode; + Device *dev; + + dev = enumDevices(0); + DeviceImpl * nativeDev = (DeviceImpl *)dev; + if (preferredLinkConfig.lanes || preferredLinkConfig.peakRate || preferredLinkConfig.minRate) + DP_ASSERT(0 && "Missing reset call for a preveious set preferred call"); + + if (lc.bEnableFEC && + ((nativeDev && !nativeDev->isFECSupported()) || (!this->isFECSupported()))) + { + DP_ASSERT(0 && "Client requested to enable FEC but either panel or GPU doesn't support FEC"); + return false; + } + + lc.peakRatePossible = DATA_RATE_8B_10B_TO_LINK_RATE(lc.peakRatePossible); + lc.peakRate = DATA_RATE_8B_10B_TO_LINK_RATE(lc.peakRate); + if (!validateLinkConfiguration(lc)) + { + DP_PRINTF(DP_ERROR, "Client requested bad LinkConfiguration. peakRate=%d, lanes=%d", lc.peakRate, lc.lanes); + return false; + } + + preferredLinkConfig = lc; + preferredLinkConfig.enhancedFraming = hal->getEnhancedFraming(); + preferredLinkConfig.multistream = this->linkUseMultistream(); + preferredLinkConfig.policy = this->linkPolicy; + + // need to force assessLink and during NotifyAttachBegin + this->forcePreferredLinkConfig = forcePreferredLinkConfig; + + if (force) + { + // Do flushmode + if (!(bEnteredFlushMode = this->enableFlush())) + DP_ASSERT(0 && "Flush fails"); + if (this->train(preferredLinkConfig, false)) + activeLinkConfig = preferredLinkConfig; + if (bEnteredFlushMode) + this->disableFlush(true); + } + else + { + if (commit) + { + assessLink(trainType); + } + } + + return true; +} + +bool ConnectorImpl::resetPreferredLinkConfig(bool force) +{ + preferredLinkConfig = LinkConfiguration(); + this->forcePreferredLinkConfig = false; + + if (force) + assessLink(); + return true; +} + +bool ConnectorImpl::isAcpiInitDone() +{ + return (hal->getSupportsMultistream() ? false : bAcpiInitDone); +} + +void ConnectorImpl::notifyAcpiInitDone() +{ + Edid ddcReadEdid; + + // Initiate the EDID Read mechanism only if it is in SST mode & plugged + if (!hal->getSupportsMultistream() && previousPlugged) + { + // Read EDID using RM Control call - NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2 + if (EdidReadSST(ddcReadEdid, auxBus, timer, false, true, main)) + { + // Fill the data in device's ddcEdid & mark ACPI Init done + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + DP_PRINTF(DP_NOTICE, "DPCONN> ACPI Init Done. DDC EDID Read completed!!"); + + DeviceImpl * dev = (DeviceImpl*)i; + dev->ddcEdid = ddcReadEdid; + + this->bAcpiInitDone = true; + break; + } + } + } + + return; +} + +void ConnectorImpl::hdcpRenegotiate(NvU64 cN, NvU64 cKsv) +{ + this->main->configureHDCPRenegotiate(cN, cKsv); + HDCPState hdcpState = {0}; + this->main->configureHDCPGetHDCPState(hdcpState); + this->isHDCPAuthOn = hdcpState.HDCP_State_Authenticated; +} + +bool ConnectorImpl::getHDCPAbortCodesDP12(NvU32 &hdcpAbortCodesDP12) +{ + hdcpAbortCodesDP12 = 0; + + if (isHopLimitExceeded) + { + hdcpAbortCodesDP12 = hdcpAbortCodesDP12 | HDCP_FLAGS_ABORT_HOP_LIMIT_EXCEEDED ; + } + + // Video has also expressed the need of bRevoked but we don't think it's needed. Next RFR will have conclusion. + return true; + return false; +} + +bool ConnectorImpl::hdcpValidateKsv(const NvU8 *ksv, NvU32 Size) +{ + + if (HDCP_KSV_SIZE <= Size) + { + NvU32 i, j; + NvU32 count_ones = 0; + for (i=0; i < HDCP_KSV_SIZE; i++) + { + for (j = 0; j < 8; j++) + { + if (ksv[i] & (1 <<(j))) + { + count_ones++; + } + } + } + + if (count_ones == 20) + { + return true; + } + } + return false; +} + +void ConnectorImpl::cancelHdcpCallbacks() +{ + this->isHDCPReAuthPending = false; + this->isHDCPAuthTriggered = false; + this->authRetries = 0; + + timer->cancelCallback(this, &tagHDCPReauthentication); // Cancel any queue the auth callback. + timer->cancelCallback(this, &tagDelayedHdcpCapRead); // Cancel any HDCP cap callbacks. + timer->cancelCallback(this, &tagHDCPStreamEncrEnable); // Cancel any queued the stream encr enable callback. + + this->bValidQSERequest = false; + timer->cancelCallback(this, &tagSendQseMessage); // Cancel any queue the qse callback. + + + for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + group->cancelHdcpCallbacks(); + } +} + +// Create a new Group +Group * ConnectorImpl::newGroup() +{ + Group * g = new GroupImpl(this); + if (g) + { + inactiveGroups.insertBack((GroupImpl*)g); + } + return g; +} + +// Create a new Group +Group * ConnectorImpl::createFirmwareGroup() +{ + Group * g = new GroupImpl(this, true); + if (g) + { + inactiveGroups.insertBack((GroupImpl*)g); + } + return g; +} + +// Shutdown and the destroy the connector manager +void ConnectorImpl::destroy() +{ + delete this; +} + +void ConnectorImpl::createFakeMuxDevice(const NvU8 *buffer, NvU32 bufferSize) +{ + if (!buffer) + return; + + // Return immediately if DSC is not supported + if(FLD_TEST_DRF(_DPCD14, _DSC_SUPPORT, _DECOMPRESSION, _YES, buffer[0]) != 1) + return; + + DeviceImpl * existingDev = findDeviceInList(Address()); + + // Return immediately if we already have a device + if (existingDev) + { + return; + } + + DeviceImpl *newDev = new DeviceImpl(hal, this, NULL); + if (!newDev) + { + return; + } + + newDev->connectorType = connectorDisplayPort; + newDev->plugged = true; + newDev->videoSink = true; + newDev->bIsFakedMuxDevice = true; + newDev->bIsPreviouslyFakedMuxDevice = false; + + // Initialize DSC state + newDev->dscCaps.bDSCSupported = true; + newDev->dscCaps.bDSCDecompressionSupported = true; + if (!(newDev->setRawDscCaps(buffer, DP_MIN(bufferSize, DSC_CAPS_SIZE)))) + { + DP_ASSERT(0 && "Faking DSC caps failed!"); + } + newDev->bDSCPossible = true; + newDev->devDoingDscDecompression = newDev; + + populateAllDpConfigs(); + deviceList.insertBack(newDev); + sink->newDevice(newDev); + sink->notifyDetectComplete(); +} + +void ConnectorImpl::deleteFakeMuxDevice() +{ + DeviceImpl * existingDev = findDeviceInList(Address()); + if (!existingDev) + return; + + // If this is not a fake device then don't delete it + if (!existingDev->isPreviouslyFakedMuxDevice()) + return; + + existingDev->markDeviceForDeletion(); + notifyLongPulse(false); + + return; +} + +bool ConnectorImpl::getRawDscCaps(NvU8 *buffer, NvU32 bufferSize) +{ + DeviceImpl * existingDev = findDeviceInList(Address()); + if (!existingDev) + return false; + + return existingDev->getRawDscCaps(buffer, bufferSize); +} + +bool ConnectorImpl::isMultiStreamCapable() +{ + return main->hasMultistream(); +} + +bool ConnectorImpl::isFlushSupported() +{ + return true; +} + +bool ConnectorImpl::isStreamCloningEnabled() +{ + return main->isStreamCloningEnabled(); +} + +bool ConnectorImpl::isFECSupported() +{ + return main->isFECSupported(); +} + +bool ConnectorImpl::isFECCapable() +{ + DeviceImpl *dev; + + for (Device * i = enumDevices(0); i; i = enumDevices(i)) + { + dev = (DeviceImpl *)i; + // If it's SST, or if it's the first connected branch. + if (!this->linkUseMultistream() || dev->address.size() == 1) + { + return (dev->getFECSupport() && this->isFECSupported()); + } + } + return false; +} + +NvU32 ConnectorImpl::maxLinkRateSupported() +{ + return main->maxLinkRateSupported(); +} + +Connector * DisplayPort::createConnector +( + MainLink * main, + AuxBus * aux, + Timer * timer, + Connector::EventSink * sink +) +{ + ConnectorImpl *connector; + connector = new ConnectorImpl(main, aux, timer, sink); + + if (connector == NULL || connector->constructorFailed) { + delete connector; + return NULL; + } + + if (main->getRegkeyValue(NV_DP_REGKEY_ENABLE_OCA_LOGGING)) + { + main->retrieveRingBuffer(LOG_CALL, MAX_RECORD_COUNT); + main->retrieveRingBuffer(ASSERT_HIT, MAX_RECORD_COUNT); + } + return connector; +} + +void ConnectorImpl::setAllowMultiStreaming(bool bAllowMST) +{ + // + // hal->getMultiStreamCapOverride() returns true, if MST cap has been + // overridden to SST. + // + if (!hal->getMultiStreamCapOverride() == bAllowMST) + return; + + if (previousPlugged && + getSinkMultiStreamCap() && + !activeGroups.isEmpty() && linkUseMultistream() != bAllowMST) + { + DP_ASSERT(!"If connected sink is MST capable then:" + "Client should detach all active MST video/audio streams " + "before disallowing MST, vise-versa client should detach " + "active SST stream before allowing MST."); + } + + // + // Disable MST messaging, if client has disallowed MST; + // notifyLongPulseInternal() enable back MST messaging when client + // allow MST. + // + if (previousPlugged && linkUseMultistream() && !bAllowMST) + hal->setMessagingEnable( + false /* _uprequestEnable */, true /* _upstreamIsSource */); + + hal->overrideMultiStreamCap(bAllowMST /* mstCapable */ ); + + // Re-detect already connected sink, and to keep software state in sync + if (previousPlugged && getSinkMultiStreamCap()) + { + isHDCPAuthOn = isDP12AuthCap = false; + notifyLongPulseInternal(true); + } +} + +bool ConnectorImpl::getAllowMultiStreaming(void) +{ + // + // hal->getMultiStreamCapOverride() returns true, if MST cap has been + // overridden to SST. + // + return !hal->getMultiStreamCapOverride(); +} + +bool ConnectorImpl::getSinkMultiStreamCap(void) +{ + return hal->getDpcdMultiStreamCap(); +} + +void ConnectorImpl::setDp11ProtocolForced() +{ + if (!this->linkUseMultistream()) + { + return; + } + + this->notifyLongPulse(false); + hal->setMessagingEnable(false, true); + hal->setMultistreamLink(false); + hal->overrideMultiStreamCap(false /*no mst*/); + this->notifyLongPulse(true); +} + +void ConnectorImpl::resetDp11ProtocolForced() +{ + if (this->linkUseMultistream()) + { + return; + } + + this->notifyLongPulse(false); + hal->overrideMultiStreamCap(true /*mst capable*/); + this->notifyLongPulse(true); +} + +bool ConnectorImpl::isDp11ProtocolForced() +{ + return hal->getMultiStreamCapOverride(); +} + +bool ConnectorImpl::getTestPattern(NV0073_CTRL_DP_TESTPATTERN * testPattern) +{ + return (main->getDpTestPattern(testPattern)); +} + +bool ConnectorImpl::setTestPattern(NV0073_CTRL_DP_TESTPATTERN testPattern, NvU8 laneMask, NV0073_CTRL_DP_CSTM cstm, NvBool bIsHBR2, NvBool bSkipLaneDataOverride) +{ + return (main->setDpTestPattern(testPattern, laneMask, cstm, bIsHBR2, bSkipLaneDataOverride)); +} + +bool ConnectorImpl::getLaneConfig(NvU32 *numLanes, NvU32 *data) +{ + return (main->getDpLaneData(numLanes, data)); +} + +bool ConnectorImpl::setLaneConfig(NvU32 numLanes, NvU32 *data) +{ + return (main->setDpLaneData(numLanes, data)); +} + +void ConnectorImpl::getCurrentLinkConfig(unsigned & laneCount, NvU64 & linkRate) +{ + main->getLinkConfig(laneCount, linkRate); +} + +void ConnectorImpl::getCurrentLinkConfigWithFEC(unsigned & laneCount, NvU64 & linkRate, bool &bFECEnabled) +{ + main->getLinkConfigWithFEC(laneCount, linkRate, bFECEnabled); +} +unsigned ConnectorImpl::getPanelDataClockMultiplier() +{ + LinkConfiguration linkConfig = getMaxLinkConfig(); + linkConfig.peakRate = DATA_RATE_8B_10B_TO_LINK_RATE(linkConfig.peakRate); + linkConfig.peakRatePossible = DATA_RATE_8B_10B_TO_LINK_RATE(linkConfig.peakRatePossible); + return getDataClockMultiplier(linkConfig.convertLinkRateToDataRate(linkConfig.peakRatePossible), linkConfig.lanes); +} + +unsigned ConnectorImpl::getGpuDataClockMultiplier() +{ + unsigned laneCount; + NvU64 linkRate; + // Need to get the GPU caps, not monitor caps. + linkRate = maxLinkRateSupported(); + laneCount = laneCount_4; + + return getDataClockMultiplier(linkRate, laneCount); +} + +void ConnectorImpl::configurePowerState(bool bPowerUp) +{ + main->configurePowerState(bPowerUp); +} + +bool ConnectorImpl::readPsrState(vesaPsrState *psrState) +{ + return hal->readPsrState(psrState); +} + +void ConnectorImpl::readPsrCapabilities(vesaPsrSinkCaps *caps) +{ + hal->readPsrCapabilities(caps); +} + +bool ConnectorImpl::readPsrConfiguration(vesaPsrConfig *psrConfig) +{ + return hal->readPsrConfiguration(psrConfig); +} + +bool ConnectorImpl::updatePsrConfiguration(vesaPsrConfig config) +{ + return hal->updatePsrConfiguration(config); +} + +bool ConnectorImpl::readPsrDebugInfo(vesaPsrDebugStatus *psrDbgState) +{ + return hal->readPsrDebugInfo(psrDbgState); +} + +bool ConnectorImpl::writePsrErrorStatus(vesaPsrErrorStatus psrErr) +{ + return hal->writePsrErrorStatus(psrErr); +} + +bool ConnectorImpl::readPsrErrorStatus(vesaPsrErrorStatus *psrErr) +{ + return hal->readPsrErrorStatus(psrErr); +} + +bool ConnectorImpl::writePsrEvtIndicator(vesaPsrEventIndicator psrEvt) +{ + return hal->writePsrEvtIndicator(psrEvt); +} + +bool ConnectorImpl::readPsrEvtIndicator(vesaPsrEventIndicator *psrEvt) +{ + return hal->readPsrEvtIndicator(psrEvt); +} + +bool ConnectorImpl::updatePsrLinkState(bool bTurnOnLink) +{ + bool bRet = true; + bool bEnteredFlushMode = false; + + if (bTurnOnLink) + { + hal->setPowerState(PowerStateD0); + + if (isLinkLost()) + { + if (!this->psrLinkConfig.isValid()) + { + DP_ASSERT(0 && "Invalid PSR link config"); + return false; + } + + // NOTE: always verify changes to below line with 2H1OR case + if (!(bEnteredFlushMode = this->enableFlush())) + { + DP_ASSERT(0 && "Flush fails"); + } + + bRet = this->train(this->psrLinkConfig, false); + + if (bEnteredFlushMode) + { + this->disableFlush(true); + } + } + else + { + // return early if link is already up + return true; + } + } + else + { + // Save the current link config + this->psrLinkConfig = getActiveLinkConfig(); + } + return bRet; +} + +bool ConnectorImpl::readPrSinkDebugInfo(panelReplaySinkDebugInfo *prDbgInfo) +{ + return hal->readPrSinkDebugInfo(prDbgInfo); +} + +bool ConnectorImpl::handlePhyPatternRequest() +{ + + bool status = true; + PatternInfo pattern_info; + + pattern_info.lqsPattern = hal->getPhyTestPattern(); + + // Get lane count from most current link training + unsigned requestedLanes = this->activeLinkConfig.lanes; + + if (pattern_info.lqsPattern == LINK_QUAL_80BIT_CUST) + { + hal->get80BitsCustomTestPattern((NvU8 *)&pattern_info.ctsmLower); + } + + // send control call to rm for the pattern + if (!main->physicalLayerSetTestPattern(&pattern_info)) + { + DP_ASSERT(0 && "Could not set the PHY_TEST_PATTERN"); + status = false; + } + else + { + if (AuxRetry::ack != hal->setLinkQualPatternSet(pattern_info.lqsPattern, requestedLanes)) + { + DP_ASSERT(0 && "Could not set the LINK_QUAL_PATTERN"); + status = false; + } + } + return status; +} + +// return ACK/NACK result of the request +bool ConnectorImpl::handleTestLinkTrainRequest() +{ + if (activeLinkConfig.multistream) + { + hal->setTestResponse(false); + return false; + } + else + { + LinkRate requestedRate; + unsigned requestedLanes; + + hal->getTestRequestTraining(requestedRate, requestedLanes); + // if one of them is illegal; don't ack. let the box try again. + if (requestedRate == 0 || requestedLanes == 0) + { + DP_ASSERT(0 && "illegal requestedRate/Lane, retry.."); + hal->setTestResponse(false); + return false; + } + else + { + // Compliance shouldn't ask us to train above its caps + if (requestedRate == 0 || requestedRate > hal->getMaxLinkRate()) + { + DP_ASSERT(0 && "illegal requestedRate"); + requestedRate = hal->getMaxLinkRate(); + } + + if (requestedLanes == 0 || requestedLanes > hal->getMaxLaneCount()) + { + DP_ASSERT(0 && "illegal requestedLanes"); + requestedLanes = hal->getMaxLaneCount(); + } + + DeviceImpl * dev = findDeviceInList(Address()); + if (!dev || !dev->plugged || dev->multistream) + { + hal->setTestResponse(false); + return false; + } + else + { + GroupImpl * groupAttached = this->getActiveGroupForSST(); + DP_ASSERT(groupAttached && groupAttached->isHeadAttached()); + + if (!dev->activeGroup || (dev->activeGroup != groupAttached)) + { + DP_ASSERT(0 && "Compliance: no group attached"); + } + + DP_PRINTF(DP_NOTICE, "DP> Compliance: LT on IRQ request: 0x%x, %d.", requestedRate, requestedLanes); + // now see whether the current resolution is supported on the requested link config + LinkConfiguration lc(&linkPolicy, requestedLanes, requestedRate, + hal->getEnhancedFraming(), + false, /* MST */ + false, /* disablePostLTRequest */ + false, /* bEnableFEC */ + false, /* bDisableLTTPR */ + this->getDownspreadDisabled()); // DisableDownspread + + if (groupAttached && groupAttached->isHeadAttached()) + { + if (willLinkSupportModeSST(lc, groupAttached->lastModesetInfo)) + { + DP_PRINTF(DP_NOTICE, "DP> Compliance: Executing LT on IRQ: 0x%x, %d.", requestedRate, requestedLanes); + // we need to force the requirement irrespective of whether is supported or not. + if (!enableFlush()) + { + hal->setTestResponse(false); + return false; + } + else + { + // + // Check if linkTraining fails, perform fake linktraining. This is required because + // if we simply fail linkTraining we will not configure the head which results in + // TDRs if any modset happens after this. + // + hal->setTestResponse(true); + if (!train(lc, false)) + train(lc, true); + disableFlush(); + // Don't force/commit. Only keep the request. + LinkConfiguration lcDataRate(lc); + lcDataRate.peakRate = LINK_RATE_TO_DATA_RATE_8B_10B(lcDataRate.peakRate); + lcDataRate.peakRatePossible = LINK_RATE_TO_DATA_RATE_8B_10B(lcDataRate.peakRatePossible); + setPreferredLinkConfig(lcDataRate, false, false); + return true; + } + } + else // linkconfig is not supporting bandwidth. Fallback to default edid and notify DD. + { + // override the device with fallback edid and notify a bw change to DD. + DP_PRINTF(DP_NOTICE, "DP> Compliance: Switching to compliance fallback EDID after IMP failure."); + dev->switchToComplianceFallback(); + + DP_PRINTF(DP_NOTICE, "DP> Compliance: Notifying bandwidth change to DD after IMP failure."); + // notify a bandwidth change to DD + sink->bandwidthChangeNotification(dev, true); + + return false; + } + } + else + { + DP_PRINTF(DP_NOTICE, "DP> Compliance: Link Training when the head is not attached."); + hal->setTestResponse(true); + if (!train(lc, false)) + train(lc, true); + + LinkConfiguration lcDataRate(lc); + lcDataRate.peakRate = LINK_RATE_TO_DATA_RATE_8B_10B(lcDataRate.peakRate); + lcDataRate.peakRatePossible = LINK_RATE_TO_DATA_RATE_8B_10B(lcDataRate.peakRatePossible); + setPreferredLinkConfig(lcDataRate, false, false); + return true; + } + } + } + } +} + +// +// This function is used to send dp test message. +// requestSize indicates the buffer size pointed by pBuffer +// +DP_TESTMESSAGE_STATUS ConnectorImpl::sendDPTestMessage +( + void *pBuffer, + NvU32 requestSize, + NvU32 *pDpStatus +) +{ + if (messageManager) + { + testMessage.setupTestMessage(messageManager, this); + return testMessage.sendDPTestMessage(pBuffer, requestSize, pDpStatus); + } + else + { + return DP_TESTMESSAGE_STATUS_ERROR; + } +} + +// +// This function is designed for user to call twcie. The first time with NULL of +// pStreamIDs to get the number of streams. +// The second time, user would call the function with allocated buffer. +// +DP_TESTMESSAGE_STATUS ConnectorImpl::getStreamIDs(NvU32 *pStreamIDs, NvU32 *pCount) +{ + DP_TESTMESSAGE_STATUS ret; + + NvU32 streamCnt = activeGroups.size(); + if (NULL == pStreamIDs) + { + ret = DP_TESTMESSAGE_STATUS_SUCCESS; + } + else if (*pCount >= streamCnt) + { + NvU32 n = 0; + for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + pStreamIDs[n++] = group->streamIndex; + } + ret = DP_TESTMESSAGE_STATUS_SUCCESS; + } + else + { + //buffer size not enough, the return value will be mapped and returned to nvapi + ret = DP_TESTMESSAGE_STATUS_ERROR_INSUFFICIENT_INPUT_BUFFER; + } + + *pCount = streamCnt; + + return ret; +} + +void ConnectorImpl::notifyGPUCapabilityChange() +{ + // Query current GPU capabilities. + main->queryGPUCapability(); +} + +void ConnectorImpl::notifyHBR2WAREngage() +{ + bool peakBwChanged = false; + LinkConfiguration preLc = getMaxLinkConfig(); + preLc.peakRate = DATA_RATE_8B_10B_TO_LINK_RATE(preLc.peakRate); + preLc.peakRatePossible = DATA_RATE_8B_10B_TO_LINK_RATE(preLc.peakRatePossible); + // Update GPU capabilities + this->notifyGPUCapabilityChange(); + LinkConfiguration postLc = getMaxLinkConfig(); + postLc.peakRate = DATA_RATE_8B_10B_TO_LINK_RATE(postLc.peakRate); + postLc.peakRatePossible = DATA_RATE_8B_10B_TO_LINK_RATE(postLc.peakRatePossible); + + peakBwChanged = (preLc.peakRatePossible != postLc.peakRatePossible); + + if (this->previousPlugged && peakBwChanged) + { + // Set caps change status to make sure device becomes zombie + this->bMitigateZombie = true; + + if (this->policyModesetOrderMitigation) + { + this->modesetOrderMitigation = true; + } + // NEED TO CHECK. MAY GO AFTER LONGPULSE TRUE ???? + // If multistream, delete the MST slots allocation in Branch device + if (this->linkUseMultistream()) + this->deleteAllVirtualChannels(); + + // Disconnect the device + this->notifyLongPulse(false); + + // Connect the device again + this->notifyLongPulse(true); + } + +} + +bool ConnectorImpl::isLinkAwaitingTransition() +{ + return this->linkAwaitingTransition; +} + +void ConnectorImpl::configInit() +{ + // Reset branch specific flags + bKeepOptLinkAlive = 0; + bNoFallbackInPostLQA = 0; + LT2FecLatencyMs = 0; + bDscCapBasedOnParent = false; + bForceClearPendingMsg = false; + allocatedDpTunnelBw = 0; + allocatedDpTunnelBwShadow = 0; + bForceHeadShutdownPerMonitor = false; + bDisableDscMaxBppLimit = false; + bForceHeadShutdownOnModeTransition = false; + bDP2XPreferNonDSCForLowPClk = false; +} + +bool ConnectorImpl::dpUpdateDscStream(Group *target, NvU32 dscBpp) +{ + // TODO : Implement logic + return true; +} + diff --git a/src/common/displayport/src/dp_crc.cpp b/src/common/displayport/src/dp_crc.cpp new file mode 100644 index 0000000..32e26f5 --- /dev/null +++ b/src/common/displayport/src/dp_crc.cpp @@ -0,0 +1,93 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort*********************************\ +* * +* Module: dp_crc.cpp * +* CRC Algorithms for the messaging subsystem. * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_bitstream.h" +#include "dp_crc.h" +using namespace DisplayPort; + +// +// DP CRC for transactions headers +// +unsigned DisplayPort::dpCalculateHeaderCRC(BitStreamReader * reader) +{ + unsigned remainder = 0; + unsigned bit, i; + + while (reader->read(&bit, 1)) + { + remainder <<= 1; + remainder |= bit; + if ((remainder & 0x10) == 0x10) + { + remainder ^= 0x13; + } + } + + for (i = 4; i != 0; i--) + { + remainder <<= 1; + if ((remainder & 0x10) != 0) + { + remainder ^= 0x13; + } + } + + return remainder & 0xF; +} + +// +// DP CRC for body +// +unsigned DisplayPort::dpCalculateBodyCRC(BitStreamReader * reader) +{ + unsigned remainder = 0; + unsigned bit, i; + + while (reader->read(&bit, 1)) + { + remainder <<= 1; + remainder |= bit; + if ((remainder & 0x100) == 0x100) + { + remainder ^= 0xD5; + } + } + + for (i = 8; i != 0; i--) + { + remainder <<= 1; + if ((remainder & 0x100) != 0) + { + remainder ^= 0xD5; + } + } + + return remainder & 0xFF; +} diff --git a/src/common/displayport/src/dp_deviceimpl.cpp b/src/common/displayport/src/dp_deviceimpl.cpp new file mode 100644 index 0000000..ad6f9fd --- /dev/null +++ b/src/common/displayport/src/dp_deviceimpl.cpp @@ -0,0 +1,3496 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_deviceimpl.cpp * +* DP device implementation * +* * +\***************************************************************************/ + +#include "dp_connectorimpl.h" +#include "dp_deviceimpl.h" +#include "dp_auxdefs.h" +#include "dp_groupimpl.h" +#include "dp_printf.h" +#include "ctrl/ctrl0073/ctrl0073dp.h" +using namespace DisplayPort; + +bool DeviceImpl::isMustDisconnect() +{ + // + // Device is must disconnect if we're trying to make an SST<->MST transition + // + if ((this->isActive()) && connector->linkAwaitingTransition) + { + return true; + } + + return false; +} + +DeviceImpl::~DeviceImpl() +{ + if (isDeviceHDCPDetectionAlive && deviceHDCPDetection) + { + delete deviceHDCPDetection; + deviceHDCPDetection = nullptr; + } + + if (vrrEnablement) + { + delete vrrEnablement; + vrrEnablement = NULL; + } + + // Unlink this node from its children + for (unsigned int i = 0; i < sizeof(children)/sizeof(*children); i++) + if (children[i]) + children[i]->parent = 0; + + // Unlink this node from its parent when it's there + if (parent && (parent->children[this->address.tail()] == this)) + parent->children[this->address.tail()] = 0; + + devDoingDscDecompression = NULL; +} + + +DeviceImpl::DeviceImpl(DPCDHAL * hal, ConnectorImpl * connector, DeviceImpl * parent) + : parent(parent), + hal(hal), + activeGroup(0), + connector(connector), + address(), + bVirtualPeerDevice(false), + plugged(false), + friendlyAux(this), + isHDCPCap(False), + isDeviceHDCPDetectionAlive(false), + deviceHDCPDetection(0), + vrrEnablement(0), + bIsFakedMuxDevice(false), + bIsPreviouslyFakedMuxDevice(false), + bisMarkedForDeletion(false), + bIgnoreMsaCap(false), + bIgnoreMsaCapCached(false), + bSdpExtCapable(Indeterminate), + bAsyncSDPCapable(Indeterminate), + bDscPassThroughColorFormatWar(false), + maxModeBwRequired(0) +{ + bandwidth.enum_path.dataValid = false; + shadow.plugged = false; + shadow.zombie = false; + shadow.cableOk = true; + shadow.hdcpCapDone = false; + shadow.highestAssessedLC = connector->highestAssessedLC; + dpMemZero(rawDscCaps, sizeof(rawDscCaps)); +} + +bool DeviceImpl::isZombie() +{ + // You can't be a zombie if nothing is attached + if (!(this->isActive())) + return false; + + if (!plugged) + return true; + + if (isMustDisconnect()) + return true; + + if (!isMultistream()) + { + if (connector->bMitigateZombie) + return true; + + return !connector->willLinkSupportModeSST(connector->highestAssessedLC, + ((GroupImpl*)activeGroup)->lastModesetInfo); + } + else + { + return !this->payloadAllocated; + } +} + +bool DeviceImpl::isCableOk() +{ + if (hal->isDpcdOffline()) + { + // Just say that the cable is ok since we do not have anything connected + return true; + } + else + { + return ! (connector->highestAssessedLC.peakRate < connector->getMaxLinkConfig().peakRate && + connector->highestAssessedLC.lanes < connector->getMaxLinkConfig().lanes); + } +} + +bool DeviceImpl::isLogical() +{ + if (this->address.size() == 0) + return false; + + DP_ASSERT((this->address.tail() <= LOGICAL_PORT_END) && "Invalid port number"); + + // Logical port numbers of a branching unit are from Port 0x08 up to Port 0xF + if (this->address.tail() >= LOGICAL_PORT_START) + return true; + + return false; +} + +bool DeviceImpl::isPendingNewDevice() +{ + if (shadow.plugged == plugged) + return false; + + if (!plugged) + return false; + + // Delay the newDevice event till all enabled heads are not detached. + if (connector->policyModesetOrderMitigation && connector->modesetOrderMitigation) + return false; + + return !connector->linkAwaitingTransition; +} + +bool DeviceImpl::isPendingLostDevice() +{ + // marked for lazy exit..to be done now. + if (complianceDeviceEdidReadTest && lazyExitNow) + return true; + + if (isZombie()) + return false; + + if (shadow.plugged == plugged) + return false; + + return !plugged; +} + +bool DeviceImpl::isPendingZombie() +{ + if (isZombie() && !shadow.zombie) + return true; + else if (!isZombie() && shadow.zombie && plugged) + return (connector->policyModesetOrderMitigation ? false : true); + return false; +} + +bool DeviceImpl::isPendingHDCPCapDone() +{ + if ((isHDCPCap != Indeterminate) && !shadow.hdcpCapDone) + return true; + else + return false; +} + +bool DeviceImpl::isPendingCableOk() +{ + return isCableOk() != shadow.cableOk; +} + +bool DeviceImpl::isPendingBandwidthChange() +{ + return shadow.highestAssessedLC != connector->highestAssessedLC; +} + +bool DeviceImpl::getI2cData(unsigned offset, NvU8 * buffer, unsigned sizeRequested, unsigned * sizeCompleted, bool bForceMot) +{ + unsigned dataCompleted, sizeRemaining; + DisplayPort::AuxBus::status status; + Type transactionType; + + if (!buffer || !sizeCompleted) + return false; + + dataCompleted = 0; + *sizeCompleted = 0; + do + { + sizeRemaining = (sizeRequested - *sizeCompleted); + if ((this->address.size() < 2) && (sizeRemaining > NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE)) + { + + // + // SST case + // if the transaction buffer is a multiple of 16 bytes (NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE). + // Break it to 16 bytes boundary (HW default) and the first transaction sets the middle of + // transaction bit (MOT). This will mark all the subsequent reads are all of a part of the + // same transaction (I2C restart). + // + status = transaction(AuxBus::read, AuxBus::i2cMot, offset, buffer + *sizeCompleted, + NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE, &dataCompleted); + } + else if (sizeRemaining > NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE) + { + + // + // MST case + // For i2c transactions over MST devices, if the transaction buffer is divided into + // 16 bytes chunks, then read index keeps getting reset for subsequent 16B fetch. + // Refer Bug: 1233042. + // + status = transaction(AuxBus::read, AuxBus::i2cMot, offset, buffer + *sizeCompleted, + sizeRemaining, &dataCompleted); + } + else + { + // + // clear the MOT if it is a single transaction or the last bytes of + // a large, multiple of 16 bytes buffer (end of transaction). + // Note that for some customer specific needs they might force MOT bit + // when it shouldn't be set. So check if client forced the MOT bit and honour that. + // + transactionType = bForceMot ? AuxBus::i2cMot : AuxBus::i2c; + status = transaction(AuxBus::read, transactionType, offset, buffer + *sizeCompleted, + sizeRemaining, &dataCompleted); + } + + if (status != AuxBus::success) + { + DP_PRINTF(DP_ERROR, "DPDEV> %s: Failed read transaction", __FUNCTION__); + break; + } + + if (dataCompleted == 0) + { + // Successfully read 0 bytes? Break out + break; + } + *sizeCompleted += dataCompleted; + } + while (*sizeCompleted < sizeRequested); + + return (status == AuxBus::success); +} + +bool DeviceImpl::setI2cData(unsigned offset, NvU8 * buffer, unsigned sizeRequested, unsigned * sizeCompleted, bool bForceMot) +{ + unsigned dataCompleted, sizeRemaining; + DisplayPort::AuxBus::status status; + Type transactionType; + + if (!buffer || !sizeCompleted) + return false; + + dataCompleted = 0; + *sizeCompleted = 0; + + // + // If the hop count is one, we're asking for DPCD to the root node. + // If hop count is zero, this is a DP 1.1 target. + // Hop Count Greater than or equal 2 is when we have a single or multiple branch + // device/s. This signifies REMOTE_I2C_WRITE transaction case. + // Here we should not divide the data to 16 byte boundary as if we + // do, the branch device will not know that it needs to set MOT=1. + // So we send the entire data up to a max payload of 255 Bytes. + // Please refer Bug 1964453 for more information. + // + if ((this->address.size() >= 2) && + (sizeRequested > NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE)) + { + status = transaction(AuxBus::write, AuxBus::i2cMot, offset, buffer, + sizeRequested, &dataCompleted); + + if (status != AuxBus::success) + { + DP_PRINTF(DP_ERROR, "DPDEV> %s: Failed write transaction", __FUNCTION__); + return false; + } + *sizeCompleted = dataCompleted; + DP_ASSERT(*sizeCompleted >= sizeRequested); + return (status == AuxBus::success); + } + + do + { + sizeRemaining = (sizeRequested - *sizeCompleted); + if (sizeRemaining > NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE) + { + + // + // if the transaction buffer is a multiple of 16 bytes (NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE). + // Break it to 16 bytes boundary (HW default) and the first transaction sets the middle of + // transaction bit (MOT). This will mark all the subsequent writes are all of a part of the + // same transaction (I2C restart). + // + status = transaction(AuxBus::write, AuxBus::i2cMot, offset, buffer + *sizeCompleted, + NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE, &dataCompleted); + } + else + { + // + // clear the MOT if it is a single transaction or the last bytes of + // a large, multiple of 16 bytes buffer (end of transaction). + // Note that for some customer specific needs they might force MOT bit + // when it shouldn't be set. So check if client forced the MOT bit and honour that. + // + transactionType = bForceMot ? AuxBus::i2cMot : AuxBus::i2c; + status = transaction(AuxBus::write, transactionType, offset, buffer + *sizeCompleted, + sizeRemaining, &dataCompleted); + } + + if (status != AuxBus::success) + { + DP_PRINTF(DP_ERROR, "DPDEV> Failed write transaction"); + break; + } + + if (dataCompleted == 0) + { + // Successfully read 0 bytes? Break out + break; + } + *sizeCompleted += dataCompleted; + } while (*sizeCompleted < sizeRequested); + + return (status == AuxBus::success); +} + +AuxBus::status DeviceImpl::getDpcdData(unsigned offset, NvU8 * buffer, + unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason) +{ + if (isFakedMuxDevice()) + { + DP_PRINTF(DP_INFO, "Device is faked, returning nack\n"); + return AuxBus::nack; + } + + if (!buffer || !sizeCompleted) + { + // default param may be NULL + if (pNakReason) *pNakReason = NakUndefined; + return AuxBus::nack; + } + + // + // Remote DPCD doesn't work for Peer Device 4 i.e. DP-to-Legacy Dongle. + // But if a virtual DP peer device with Protocol Converter functionality + // populates the DPCD_Revision field of the LINK_ADDRESS Message reply + // then allow DPCD transaction + // + if ((this->peerDevice == Dongle) && (this->dpcdRevisionMajor == 0)) + { + if (pNakReason) *pNakReason = NakBadParam; + return AuxBus::nack; + } + + return (transaction(AuxBus::read, AuxBus::native, offset, buffer, + sizeRequested, sizeCompleted, pNakReason)); +} + +AuxBus::status DeviceImpl::setDpcdData(unsigned offset, NvU8 * buffer, + unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason) +{ + if (isFakedMuxDevice()) + { + DP_PRINTF(DP_INFO, "Device is faked, returning nack\n"); + return AuxBus::nack; + } + + if (!buffer || !sizeCompleted) + { + // default param may be NULL + if (pNakReason) *pNakReason = NakUndefined; + return AuxBus::nack; + } + + // + // Remote DPCD doesn't work for Peer Device 4 i.e. DP-to-Legacy Dongle + // But if a virtual DP peer device with Protocol Converter functionality + // populates the DPCD_Revision field of the LINK_ADDRESS Message reply + // then allow DPCD transaction + // + if ((this->peerDevice == Dongle) && (this->dpcdRevisionMajor == 0)) + { + if (pNakReason) *pNakReason = NakBadParam; + return AuxBus::nack; + } + + return (transaction(AuxBus::write, AuxBus::native, offset, buffer, + sizeRequested, sizeCompleted, pNakReason)); +} + +AuxBus::status DeviceImpl::queryFecData(NvU8 *fecStatus, NvU16 **fecErrorCount, NvU32 flags) +{ + if (!fecStatus || !fecErrorCount) + { + return AuxBus::nack; + } + + return (fecTransaction(fecStatus, fecErrorCount, flags)); +} + +DscCaps DeviceImpl::getDscCaps() +{ + return dscCaps; +} + +// +// This function returns the device itself or its parent device that is doing +// DSC decompression for it. +// +Device* DeviceImpl::getDevDoingDscDecompression() +{ + return devDoingDscDecompression; +} + +bool DeviceImpl::getRawDscCaps(NvU8 *buffer, NvU32 bufferSize) +{ + if (bufferSize < sizeof(rawDscCaps)) + return false; + + dpMemCopy(buffer, &rawDscCaps, sizeof(rawDscCaps)); + return true; +} + +bool DeviceImpl::setRawDscCaps(const NvU8 *buffer, NvU32 bufferSize) +{ + if (bufferSize < sizeof(rawDscCaps)) + return false; + + dpMemCopy(&rawDscCaps, buffer, sizeof(rawDscCaps)); + return parseDscCaps(&rawDscCaps[0], sizeof(rawDscCaps)); +} + +bool DeviceImpl::setValidatedRawDscCaps(NvU8 *buffer, NvU32 bufferSize) +{ + // DSC decompression support DPCD 0x60[0] should not be disabled + if ((buffer[0x0] & 0x1) == 0) + return false; + + // DPCD 0X61h - DSC major version should be 1 and minor version can be either 1 or 2 + if (!((buffer[0x1] & 0x1) && ((buffer[0x1] & 0x10) || (buffer[0x1] & 0x20)))) + return false; + + // DPCD 0x64h - DSC Slice Capabilities should not be 0 + if (buffer[0x4] == 0) + return false; + + // DPCD 0x65h - Line buffer bit depth can be less than equal to 8 + if ((buffer[0x5] & 0xf) > 8) + return false; + + // DPCD 0x69h - DSC Decoder Color format Capability should not be 0 + if ((buffer[0x9] & 0xF) == 0) + return false; + + // DPCD 0x6Ah - DSC Decoder Color Depth Capability should not be 0 + if ((buffer[0xa] & 0x7) == 0) + return false; + + // DPCD 0x6Bh - Either DSC peak throughput mode 0 or mode 1 should be non-zero + if (!((buffer[0xb] & 0xf) || (buffer[0xb] & 0xf0))) + return false; + + // DPCD 0x6Ch - max slice width should not be 0 + if ((buffer[0xc] & 0xff) == 0) + return false; + + // DPCD 0x6fh - Bits per pixel Increment value should be less than 5 + if ((buffer[0xf] & 0x7) > 4) + return false; + + return setRawDscCaps(buffer, bufferSize); +} + +bool DeviceImpl::validatePPSData(DSCPPSDATA *pPps) +{ + NVT_STATUS result = DSC_ValidatePPSData(pPps); + if (result != NVT_STATUS_SUCCESS) + { + DP_PRINTF(DP_ERROR, "DPDEV> DSC PPS data validation failed!"); + return false; + } + return true; +} + + +AuxBus::status DeviceImpl::transaction(Action action, Type type, int address, + NvU8 * buffer, unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason, + NvU8 offset, NvU8 nWriteTransactions) +{ + // In case of default implementation, the reason for transaction failure + // must be stored somewhere + unsigned defaultReason; + if (!pNakReason) pNakReason = &defaultReason; + // default failure reason is undefined + *pNakReason = NakUndefined; + + if (type == AuxBus::i2c || type == AuxBus::i2cMot) + { + address >>= 1; // right shifted DDC Address (request identifier in spec) + } + + // If the hop count is one, we're asking for DPCD to the root node. + // If hop count is zero, this is a DP 1.1 target. + if (this->address.size() >= 2) + { + NakData nak; + + if (connector == NULL || connector->messageManager == NULL) + { + return AuxBus::nack; + } + + if (action == AuxBus::read && type == AuxBus::native) + { + RemoteDpcdReadMessage read; + read.set(this->address.parent(), this->address.tail(), address, sizeRequested); + if (!connector->messageManager->send(&read, nak)) { + // Copy reason back to caller + *pNakReason = nak.reason; + // Translate the DPCD error codes + if (nak.reason == NakDefer) + return AuxBus::defer; + if (nak.reason == NakDpcdFail) + return AuxBus::nack; + + // This isn't quite right. We're translating unknown messaging related + // failure cases into defers. This is done so that the client will retry the operation + return AuxBus::defer; + } + + *sizeCompleted = read.replyNumOfBytesReadDPCD(); + + if (*sizeCompleted > sizeRequested) { + DP_PRINTF(DP_ERROR, "DPDEV> DPCD Read return more data than requested. Clamping buffer to requested size!"); + *sizeCompleted = sizeRequested; + } + + dpMemCopy(buffer, read.replyGetData(), *sizeCompleted); + + return AuxBus::success; + } + else if ((action == AuxBus::read) && ((type == AuxBus::i2c) || (type == AuxBus::i2cMot))) + { + bool isNoStopBit = (type == AuxBus::i2cMot) ? 1:0; + RemoteI2cReadMessage remoteI2cRead; + I2cWriteTransaction i2cWriteTransactions[1]; + i2cWriteTransactions[0] = I2cWriteTransaction(address, + 0, + &offset, + isNoStopBit); + + if (nWriteTransactions > 1) + { + DP_PRINTF(DP_ERROR, "DPDEV> Set function will fail for transactions > 1, please increase the array size!"); + return AuxBus::nack; + } + + remoteI2cRead.set(this->address.parent(), // topology Address + nWriteTransactions, // number of write transactions + this->address.tail(), // port of Device + i2cWriteTransactions, // list of write transactions + address, // right shifted DDC Address (request identifier in spec) + sizeRequested); // requested size + + if (!connector->messageManager->send(&remoteI2cRead, nak)) { + // Copy reason back to caller + *pNakReason = nak.reason; + // Translate the DPCD error codes + if (nak.reason == NakI2cNak) + return AuxBus::nack; + + // This isn't quite right. We're translating unknown messaging related + // failure cases into defers. This is done so that the client will retry the operation + return AuxBus::defer; + } + + *sizeCompleted = remoteI2cRead.replyNumOfBytesReadI2C(); + + if (*sizeCompleted > sizeRequested) { + DP_PRINTF(DP_ERROR, "DPDEV> I2C Read return more data than requested. Clamping buffer to requested size!"); + *sizeCompleted = sizeRequested; + } + + dpMemCopy(buffer, remoteI2cRead.replyGetI2CData(sizeCompleted), *sizeCompleted); + + return AuxBus::success; + } + else if (action == AuxBus::write && type == AuxBus::native) + { + RemoteDpcdWriteMessage write; + write.set(this->address.parent(), this->address.tail(), address, sizeRequested, buffer); + + if (!connector->messageManager->send(&write, nak)) { + // Copy reason back to caller + *pNakReason = nak.reason; + // Translate the DPCD error codes + if (nak.reason == NakDefer) + return AuxBus::defer; + if (nak.reason == NakDpcdFail) + return AuxBus::nack; + + // This isn't quite right. We're translating unknown messaging related + // failure cases into defers. This is done so that the client will retry the operation + return AuxBus::defer; + } + + *sizeCompleted = sizeRequested; + + return AuxBus::success; + } + else if ((action == AuxBus::write) && ((type == AuxBus::i2c) || (type == AuxBus::i2cMot))) + { + RemoteI2cWriteMessage remoteI2cWrite; + + remoteI2cWrite.set(this->address.parent(), // topology Address + this->address.tail(), // port of Device + address, // right shifted DDC Address (request identifier in spec) + sizeRequested, + buffer); + + if (!connector->messageManager->send(&remoteI2cWrite, nak)) { + // Copy reason back to caller + *pNakReason = nak.reason; + // Translate the DPCD error codes + if (nak.reason == NakI2cNak) + return AuxBus::nack; + + // This isn't quite right. We're translating unknown messaging related + // failure cases into defers. This is done so that the client will retry the operation + return AuxBus::defer; + } + + *sizeCompleted = sizeRequested; + + return AuxBus::success; + } + else + { + DP_ASSERT(0 && "Only aux native and i2c reads and writes supported"); + return AuxBus::nack; + } + } + else + { + return this->connector->auxBus->transaction(action, type, address, buffer, + sizeRequested, sizeCompleted, pNakReason); + } +} + +unsigned DeviceImpl::transactionSize() +{ + // + // Remote (DP 1.2) sinks can read much larger chunks at once due to messaging. + // + if (this->address.size() >= 2) + return 255; + else + return this->connector->auxBus->transactionSize(); +} + +static AuxBus::status _QueryFecStatus +( + DeviceImpl *bus, + NvU8 *pStatus +) +{ + AuxBus::status status = AuxBus::success; + + NvU32 addr = NV_DPCD14_FEC_STATUS; + unsigned size = 1; + + unsigned sizeCompleted = 0; + unsigned pNakReason = 0; + + status = bus->getDpcdData(addr, pStatus, size, &sizeCompleted, &pNakReason); + + if (status != AuxBus::success) + { + DP_PRINTF(DP_ERROR, "DP> Error querying FEC status!"); + return AuxBus::nack; + } + return AuxBus::success; +} + +static AuxBus::status _QueryFecErrorCount +( + DeviceImpl *bus, + NvU16 *pErrorCount +) +{ + AuxBus::status status = AuxBus::success; + NvU32 addr = NV_DPCD14_FEC_ERROR_COUNT; + unsigned size = 2; + + unsigned sizeCompleted = 0; + NvU8 cnt[2] = {0, 0}; + unsigned pNakReason = 0; + + status = bus->getDpcdData(addr, &cnt[0], size, &sizeCompleted, &pNakReason); + + if (status != AuxBus::success) + { + DP_PRINTF(DP_ERROR, "DP> Error querying FEC error count!"); + return AuxBus::nack; + } + else + { + *pErrorCount = (((NvU16) cnt[1]) << (sizeof(NvU8) * 8)) | cnt[0]; + } + return AuxBus::success; +} + +static AuxBus::status _WriteFecConfiguration +( + DeviceImpl *bus, + NvU8 configuration +) +{ + AuxBus::status status = AuxBus::success; + + NvU32 addr = NV_DPCD14_FEC_CONFIGURATION; + unsigned size = 1; + + unsigned sizeCompleted = 0; + unsigned pNakReason = 0; + + status = bus->setDpcdData(addr, &configuration, size, &sizeCompleted, &pNakReason); + + if (status != AuxBus::success) + { + DP_PRINTF(DP_ERROR, "DP> Error setting FEC configuration!"); + return AuxBus::nack; + } + return AuxBus::success; +} + +AuxBus::status DeviceImpl::fecTransaction(NvU8 *fecStatus, NvU16 **fecErrorCount, NvU32 flags) +{ + AuxBus::status status; + // the capability needs to be checked first (bits 5:0 and 7 need to be set) + NvU8 data, lane, counter, laneData, offset; + if (!bFECSupported) + { + DP_PRINTF(DP_ERROR, "DP> FEC capability not correct!"); + return nack; + } + + if (!bFECUncorrectedSupported) + { + // check if this counter is specified in the flags + if (FLD_TEST_DRF(_DP, _UNCORRECTED, _ERROR, _YES, flags)) + { + for (int i = 0; i < NV_DP_MAX_NUM_OF_LANES; i++) + { + for (int j = 0; j < NV_DP_ERROR_COUNTERS_PER_LANE; j++) + { + // specific error value for error + fecErrorCount[i][j] = NV_DP_FEC_ERROR_COUNT_INVALID; + } + } + DP_PRINTF(DP_ERROR, "DP> FEC capability not correct!"); + return success; + } + } + if (!bFECCorrectedSupported) + { + // check if this counter is specified in the flags + if (FLD_TEST_DRF(_DP, _CORRECTED, _ERROR, _YES, flags)) + { + for (int i = 0; i < NV_DP_MAX_NUM_OF_LANES; i++) + { + for (int j = 0; j < NV_DP_ERROR_COUNTERS_PER_LANE; j++) + { + // specific error value for error + fecErrorCount[i][j] = NV_DP_FEC_ERROR_COUNT_INVALID; + } + } + DP_PRINTF(DP_ERROR, "DP> FEC capability not correct!"); + return success; + } + } + if (!bFECBitSupported) + { + // check if this counter is specified in the flags + if (FLD_TEST_DRF(_DP, _BIT, _ERROR, _YES, flags)) + { + for (int i = 0; i < NV_DP_MAX_NUM_OF_LANES; i++) + { + for (int j = 0; j < NV_DP_ERROR_COUNTERS_PER_LANE; j++) + { + // specific error value for error + fecErrorCount[i][j] = NV_DP_FEC_ERROR_COUNT_INVALID; + } + } + DP_PRINTF(DP_ERROR, "DP> FEC capability not correct!"); + return success; + } + } + if (!bFECParityBlockSupported) + { + // check if this counter is specified in the flags + if (FLD_TEST_DRF(_DP, _PARITY_BLOCK, _ERROR, _YES, flags)) + { + for (int i = 0; i < NV_DP_MAX_NUM_OF_LANES; i++) + { + for (int j = 0; j < NV_DP_ERROR_COUNTERS_PER_LANE; j++) + { + // specific error value for error + fecErrorCount[i][j] = NV_DP_FEC_ERROR_COUNT_INVALID; + } + } + DP_PRINTF(DP_ERROR, "DP> FEC capability not correct!"); + return success; + } + } + if (!bFECParitySupported) + { + // check if this counter is specified in the flags + if (FLD_TEST_DRF(_DP, _PARITY_BIT, _ERROR, _YES, flags)) + { + for (int i = 0; i < NV_DP_MAX_NUM_OF_LANES; i++) + { + for (int j = 0; j < NV_DP_ERROR_COUNTERS_PER_LANE; j++) + { + // specific error value for error + fecErrorCount[i][j] = NV_DP_FEC_ERROR_COUNT_INVALID; + } + } + DP_PRINTF(DP_ERROR, "DP> FEC capability not correct!"); + return success; + } + } + + status = _QueryFecStatus(this, fecStatus); + if(status != AuxBus::success) + { + return status; + } + // setting configuration for querying error counters for every lane + for (lane = NV_DPCD14_FEC_CONFIGURATION_LANE_SELECT_LANE_0; lane < connector->activeLinkConfig.lanes; lane++) + { + // keeping FEC ready bit + laneData = DRF_DEF(_DPCD14, _FEC_CONFIGURATION, _FEC_READY, _YES); + // selecting specific lane + laneData |= DRF_NUM(_DPCD14, _FEC_CONFIGURATION, _LANE_SELECT, lane); + // setting configuration for querying all the error counters for a specific lane + for (counter = NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_UNCORRECTED_BLOCK_ERROR_COUNT; + counter <= NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_PARITY_BIT_ERROR_COUNT; counter++) + { + // address function for the current register (in the matrix registers start from 0 and in the bit mask from 1) + offset = counter - 1; + // if flag for corresponding register is not set skip querying + if ((flags & NVBIT(offset)) == 0) continue; + // selecting specific counter + data = laneData | DRF_NUM(_DPCD14, _FEC_CONFIGURATION, _FEC_ERROR_COUNT_SEL, counter) ; + status = _WriteFecConfiguration(this, data); + if (status != AuxBus::success) + { + return status; + } + // reading specific error counter register based on address function + status = _QueryFecErrorCount(this, fecErrorCount[lane] + offset); + if (status != AuxBus::success) + { + return status; + } + } + } + return AuxBus::success; +} + +// Apply DPCD overrides if required +void DeviceImpl::dpcdOverrides() +{ + if (this->parent) + { + // + // Device is behind a branch. SW can't perform overrides as branch will + // handle link training the device not source. Also hal can only override + // capability of sink, not the individual device behind the branch. + // + return; + } + if (processedEdid.WARFlags.overrideMaxLaneCount) + { + hal->overrideMaxLaneCount(processedEdid.WARData.maxLaneCount); + } + if (processedEdid.WARFlags.skipCableBWCheck) + { + hal->skipCableBWCheck(processedEdid.WARData.maxLaneAtHighRate, + processedEdid.WARData.maxLaneAtLowRate); + } + if (processedEdid.WARFlags.overrideOptimalLinkCfg) + { + LinkRate optimalLinkRate = 0; + + switch(processedEdid.WARData.optimalLinkRate) + { + case 0x6: + optimalLinkRate = dp2LinkRate_1_62Gbps; + break; + case 0xa: + optimalLinkRate = dp2LinkRate_2_70Gbps; + break; + case 0x14: + optimalLinkRate = dp2LinkRate_5_40Gbps; + break; + case 0x1E: + optimalLinkRate = dp2LinkRate_8_10Gbps; + break; + default: + optimalLinkRate = dp2LinkRate_1_62Gbps; + DP_PRINTF(DP_ERROR, "DP-DEV> Invalid link rate supplied. Falling back to RBR"); + break; + } + hal->overrideOptimalLinkCfg(optimalLinkRate, processedEdid.WARData.optimalLaneCount); + } +} + +void DeviceImpl::applyOUIOverrides() +{ + // For now we only need this for Synaptic branch. + if ((this->peerDevice == DownstreamBranch) || + (this->peerDevice == UpstreamSourceOrSSTBranch)) + { + NvU8 buffer[16] = {0}; + unsigned size = 13; // Read 0x500 ~ 0x50C + unsigned sizeCompleted = 0; + unsigned nakReason = NakUndefined; + + // + // Synaptic branch claims it supports MSA override, but some older firmware has problems + // on their decoder. We need to disable the feature in that case. + // + if (AuxBus::success != this->getDpcdData(NV_DPCD_BRANCH_IEEE_OUI, &buffer[0], + size, &sizeCompleted, &nakReason)) + return; + + // Check Branch IEEE_OUI (0x500h~0x502h) is Synaptic IEEE_OUI (0x90, 0xCC, 0x24) + if ((buffer[0] == 0x90) && (buffer[1] == 0xCC) && (buffer[2] == 0x24)) + { + // Check if Device Identification String (0x503~0x506) is "SYNA" + if ((buffer[3] == 0x53) && (buffer[4] == 0x59) && (buffer[5] == 0x4E) && (buffer[6] == 0x41)) + { + // For Synaptic VMM5331 and VMM5320, it only support MSA-Over-MST for DP after Firmware 5.4.5 + if (buffer[7] == 0x53) + { + // + // This flag will be checked only in DSC Pass through cases (MST). + // All Synaptics VMM53XX chips which support pass through can only support + // color formats that are listed in 0x69h even in pass through mode. + // + this->bDscPassThroughColorFormatWar = true; + + if ((buffer[8] == 0x31) || (buffer[8] == 0x20)) + { + this->bSdpExtCapable = False; + + // + // Check firmware version + // 0x50A: FW/SW Major Revision. + // 0x50B: FW/SW Minor Revision. + // 0x50C: Build Number. + // + if ((buffer[10] >= 0x06) || + ((buffer[10] == 0x05) && (buffer[11] >= 0x05)) || + ((buffer[10] == 0x05) && (buffer[11] == 0x04) && (buffer[12] >= 0x05))) + { + this->bSdpExtCapable = True; + } + } + } + } + } + + } +} + +bool DeviceImpl::getAsyncSDPSupported() +{ + NvU8 byte = 0; + unsigned size = 1; + unsigned sizeCompleted; + unsigned nakReason = NakUndefined; + // + // On faked mux devices, we cannot check if the device has + // the capability as we don't have access to aux. + // + if (this->isFakedMuxDevice()) + { + return false; + } + // If the capability is queried/set already. + if (this->bAsyncSDPCapable != Indeterminate) + { + return (this->bAsyncSDPCapable == True); + } + // Check device capabilities first. + if (AuxBus::success != this->getDpcdData(NV_DPCD_DOWN_STREAM_PORT, &byte, + size, &sizeCompleted, &nakReason) || + (FLD_TEST_DRF(_DPCD, _DOWN_STREAM_PORT, _MSA_TIMING_PAR_IGNORED, _NO, byte))) + { + this->bAsyncSDPCapable = False; + return false; + } + if (AuxBus::success != this->getDpcdData(NV_DPCD14_DPRX_FEATURE_ENUM_LIST, &byte, + size, &sizeCompleted, &nakReason) || + (FLD_TEST_DRF(_DPCD14, _DPRX_FEATURE_ENUM_LIST, _ADAPTIVE_SYNC_SDP_SUPPORTED, _NO, byte))) + { + this->bAsyncSDPCapable = False; + return false; + } + if (this->isMultistream()) + { + // For MST devices, check root branch capability. + this->bAsyncSDPCapable = hal->getRootAsyncSDPSupported() ? True : False; + } + else + { + // For SST, it supports Async SDP once reaches here. + this->bAsyncSDPCapable = True; + } + return (this->bAsyncSDPCapable == True); +} + +bool DeviceImpl::getSDPExtnForColorimetrySupported() +{ + DeviceImpl *targetDevice = NULL; + DeviceImpl *parentDevice = NULL; + + NvU8 byte = 0; + unsigned size = 0; + unsigned nakReason = NakUndefined; + + // + // On fakeed mux devices, we cannot check if the device has + // the capability as we don't have access to aux. + // + if (this->isFakedMuxDevice()) + { + return false; + } + + // If the capability is queried/set already. + if (this->bSdpExtCapable != Indeterminate) + { + return (this->bSdpExtCapable == True); + } + + if (!this->isMultistream()) + { + // If the device is directly connected to the source read the DPCD directly + this->bSdpExtCapable = hal->getSDPExtnForColorimetry() ? True : False; + return (this->bSdpExtCapable == True); + } + + // For MST devices + switch (this->peerDevice) + { + case DownstreamBranch: + case UpstreamSourceOrSSTBranch: + { + targetDevice = this; + break; + } + case DownstreamSink: + { + // + // When the device is type of DownstreamSink and with branch(es) + // between GPU and it, query goes to the device and its parent + // + targetDevice = this; + parentDevice = (DeviceImpl *)this->getParent(); + break; + } + case Dongle: + { + // + // Bug 2527026: When the device is type of dongle and with branch(es) + // between GPU and it, query goes to its parent. + // + targetDevice = (DeviceImpl *)this->getParent(); + break; + } + default: + { + DP_ASSERT(0 && "Unsupported Peer Type for SDP_EXT COLORIMETRY"); + return false; + break; + } + } + + if (!targetDevice) + { + DP_ASSERT(0 && "targetDevice is invalid for SDP_EXT COLORIMETRY"); + return false; + } + + // Send remote DPCD for devices behind the branch + if ((AuxBus::success == targetDevice->getDpcdData(NV_DPCD_TRAINING_AUX_RD_INTERVAL, + &byte, sizeof byte, &size, &nakReason)) && + (FLD_TEST_DRF(_DPCD14, _TRAINING_AUX_RD_INTERVAL, _EXTENDED_RX_CAP, _YES, byte))) + { + byte = 0; + size = 0; + nakReason = NakUndefined; + + if (AuxBus::success == targetDevice->getDpcdData(NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST, + &byte, sizeof byte, &size, &nakReason)) + { + this->bSdpExtCapable = FLD_TEST_DRF(_DPCD14, + _EXTENDED_DPRX_FEATURE_ENUM_LIST, + _VSC_SDP_EXT_FOR_COLORIMETRY, + _YES, byte) ? True : False; + } + } + + if (parentDevice && (this->bSdpExtCapable == True)) + { + // + // Do not override bSdpExtCapable for the sink. Although result won't + // change but we can keep the value for debug purpose. + // + return parentDevice->getSDPExtnForColorimetrySupported(); + } + + return (this->bSdpExtCapable == True); +} + +bool DeviceImpl::getPanelFwRevision(NvU16 *revision) +{ + NvU8 fwRevisionMajor = 0; + NvU8 fwRevisionMinor = 0; + unsigned size = 0; + unsigned nakReason = NakUndefined; + + if (!revision) + { + return false; + } + + *revision = 0; + + // + // On faked mux devices, we cannot check if the device has + // the capability as we don't have access to aux. + // + if (this->isFakedMuxDevice()) + { + return false; + } + + if (AuxBus::success != this->getDpcdData(NV_DPCD14_FW_SW_REVISION_MAJOR, + &fwRevisionMajor, sizeof(fwRevisionMajor), &size, &nakReason)) + { + return false; + } + + if (AuxBus::success != this->getDpcdData(NV_DPCD14_FW_SW_REVISION_MINOR, + &fwRevisionMinor, sizeof(fwRevisionMinor), &size, &nakReason)) + { + return false; + } + + *revision = (fwRevisionMajor << 8) | fwRevisionMinor; + + return true; +} + +bool DeviceImpl::isPowerSuspended() +{ + bool bPanelPowerOn, bDPCDPowerStateD0; + if (connector->main->isEDP()) + { + connector->main->getEdpPowerData(&bPanelPowerOn, &bDPCDPowerStateD0); + return !bDPCDPowerStateD0; + } + return (connector->hal->getPowerState() == PowerStateD3); +} + +void DeviceImpl::setPanelPowerParams(bool bSinkPowerStateD0, bool bPanelPowerStateOn) +{ + bool bPanelPowerOn, bDPCDPowerStateD0; + GroupImpl * pGroupAttached = connector->getActiveGroupForSST(); + + // + // For single head dual SST mode, set the panel power params for the + // secondary connector while updating the primary connector. + // + if (pGroupAttached && + connector->pCoupledConnector && + (pGroupAttached->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST) && + (pGroupAttached->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY)) + { + return; + } + + if (connector->main->isEDP()) + { + connector->main->getEdpPowerData(&bPanelPowerOn, &bDPCDPowerStateD0); + } + else + { + bDPCDPowerStateD0 = (connector->hal->getPowerState() == PowerStateD0)? + true : false; + } + + // Going to Suspend (D3) + if (!bSinkPowerStateD0) + { + if (this->bypassDpcdPowerOff()) + { + DP_PRINTF(DP_NOTICE, "DP-DEV> Bypassing 600h write for this display"); + return; + } + + if (connector->main->isEDP()) + { + /* + * If it's an eDP panel, the setPowerState call below will turn on LCD_POWER + * if it's already off. So only call the function when panel power is on + * and DPCD_SET_POWER is set to _D0. + */ + if (bPanelPowerOn && bDPCDPowerStateD0) + { + // monitor to be put to sleep + if (connector->hal->setPowerState(PowerStateD3)) + shadow.highestAssessedLC = connector->highestAssessedLC; + } + } + else + { + + if (connector->pCoupledConnector) + { + // Put secondary connctor to sleep + connector->pCoupledConnector->hal->setPowerState(PowerStateD3); + } + + // monitor to be put to sleep + if (connector->hal->setPowerState(PowerStateD3)) + { + shadow.highestAssessedLC = connector->highestAssessedLC; + } + } + // + // If bPanelPowerStateOn is false and this + // is not a multistream device, then shut down the main link. Some eDP + // panels are known to need this in order to actually shut down. + // + if (!isMultistream() && !bPanelPowerStateOn) + { + if (connector->pCoupledConnector) + { + // configure power state on secondary + connector->pCoupledConnector->main->configurePowerState(false); + } + connector->main->configurePowerState(false); + } + } + else + { + if (connector->main->isEDP() && !bPanelPowerOn) + { + // Turn on the eDP panel if required. + connector->main->configurePowerState(true); + } + // monitor to be brought out of sleep + if (connector->hal->setPowerState(PowerStateD0)) + { + if (connector->pCoupledConnector) + { + // power up main link on secondary + connector->pCoupledConnector->hal->setPowerState(PowerStateD0); + } + + // Mark linkStatus as dirty as we need to read linkStatus again since we are resuming a power state D0, link might have lost. + connector->hal->setDirtyLinkStatus(true); + if (connector->pCoupledConnector) + { + connector->pCoupledConnector->hal->setDirtyLinkStatus(true); + } + + if (connector->activeGroups.isEmpty()) + { + return; + } + if ((!connector->isLinkActive()) || + (connector->main->isEDP() && !bPanelPowerOn) || + (connector->isLinkLost()) || + (!bDPCDPowerStateD0)) + { + // + // If link is inactive, lost, or the panel was off before, then + // assess Link. Note that this'll detach head if required. + // + if (pGroupAttached && + pGroupAttached->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST) + { + // Special handling for single head dual SST cases + connector->trainSingleHeadMultipleSSTLinkNotAlive(pGroupAttached); + } + else + { + connector->assessLink(); + } + } + } + else + DP_ASSERT(0 && "Could not bring the monitor back from sleep."); + } +} + +void DeviceImpl::switchToComplianceFallback() +{ + Edid fallbackEdid; + makeEdidFallback(fallbackEdid); + this->processedEdid.resetData(); + this->processedEdid = fallbackEdid; +} + +TriState DeviceImpl::hdcpAvailableHop() +{ + return this->isHDCPCap; +} + +TriState DeviceImpl::hdcpAvailable() +{ + if (isNativeDPCD()) + { + return this->hdcpAvailableHop(); + } + else + { + DeviceImpl *targetDevice = this; + while (targetDevice) + { + if (!targetDevice->hdcpAvailableHop()) + { + return False; + } + else if (targetDevice->hdcpAvailableHop() == Indeterminate) + { + return Indeterminate; + } + targetDevice = targetDevice->parent; + } + + return True; + } + return False; +} + +void DeviceImpl::resetCacheInferredLink() +{ + this->bandwidth.enum_path.dataValid = false; +} + +LinkConfiguration * DeviceImpl::inferLeafLink(unsigned * totalLinkSlots) +{ + // update the EPR data + if (!bandwidth.enum_path.dataValid) + { + if (plugged) + { + NakData nack; + for (unsigned retries = 0; retries < 7; retries++) + { + EnumPathResMessage epr(getTopologyAddress().parent(), getTopologyAddress().tail(), true); + bool sendStatus = connector->messageManager->send(&epr, nack); + if (!sendStatus) + { + if (nack.reason == NakDefer || nack.reason == NakTimeout) + continue; + + bandwidth.enum_path.total = 0; + bandwidth.enum_path.free = 0; + bandwidth.enum_path.availableStreams = 0; + break; + } + else + { + bandwidth.enum_path.total = epr.reply.TotalPBN; + bandwidth.enum_path.free = epr.reply.FreePBN; + bandwidth.enum_path.bPathFECCapable = epr.reply.bFECCapability; + bandwidth.enum_path.availableStreams = epr.reply.availableStreams; + + break; + } + } + } + else + { + bandwidth.enum_path.total = bandwidth.enum_path.free = 0; + } + + bandwidth.enum_path.dataValid = true; + bandwidth.lastHopLinkConfig = LinkConfiguration(bandwidth.enum_path.total); + // Update FEC support of the device after EPR + this->getFECSupport(); + } + + if (totalLinkSlots) + { + *totalLinkSlots = bandwidth.lastHopLinkConfig.slotsForPBN(bandwidth.enum_path.total, true /*epr aware*/); + + // + // Override the totalLinkSlots returned to 63 only if peer device is + // 2 (branch), since TS-0 will be used for MTP header. + // Branch may return the total pbn corresponding to 64 timeslots. + // + if (*totalLinkSlots == 64 && peerDevice == DownstreamBranch) + { + *totalLinkSlots = 63; + } + } + + return &bandwidth.lastHopLinkConfig; +} + +void DeviceImpl::inferPathConstraints() +{ + if (!bandwidth.enum_path.availablePbnUpdated) + { + if (plugged) + { + NakData nack; + for (unsigned retries = 0; retries < 7; retries++) + { + // Marking the EPR as a path message in 2x + EnumPathResMessage epr(getTopologyAddress().parent(), getTopologyAddress().tail(), false); + bool sendStatus = connector->messageManager->send(&epr, nack); + if (!sendStatus) + { + if (nack.reason == NakDefer || nack.reason == NakTimeout) + continue; + + bandwidth.enum_path.total = 0; + bandwidth.enum_path.free = 0; + bandwidth.enum_path.availableStreams = 0; + bandwidth.enum_path.dfpLinkAvailable = 0; + break; + } + else + { + bandwidth.enum_path.total = epr.reply.TotalPBN; + bandwidth.enum_path.free = epr.reply.FreePBN; + bandwidth.enum_path.bPathFECCapable = epr.reply.bFECCapability; + bandwidth.enum_path.availableStreams = epr.reply.availableStreams; + // Include the new DFP available PBN only for 2x + bandwidth.enum_path.dfpLinkAvailable = epr.reply.DFPLinkAvailablePBN; + + break; + } + } + } + else + { + bandwidth.enum_path.total = bandwidth.enum_path.free = bandwidth.enum_path.dfpLinkAvailable = 0; + } + + bandwidth.enum_path.dataValid = true; + bandwidth.enum_path.availablePbnUpdated = true; + bandwidth.lastHopLinkConfig = LinkConfiguration(DP_MIN(bandwidth.enum_path.total, bandwidth.enum_path.dfpLinkAvailable)); + // Update FEC support of the device after EPR + this->getFECSupport(); + } + return; +} + +bool DeviceImpl::isActive() +{ + DP_ASSERT(!activeGroup || activeGroup->isHeadAttached()); + return activeGroup != NULL; +} + +bool DeviceImpl::getRawEpr(unsigned * totalEpr, unsigned * freeEpr, rawEprState eprState) +{ + DP_ASSERT((totalEpr && freeEpr) && "Invalid arguments passed to function getRawEpr()"); + bool status = true; + *totalEpr = 0; + *freeEpr = 0; + + // If request has come for main link/Native branch device + // return main link PBNs as "0" & return + if (isNativeDPCD()) + return status; + + // Cached/Software state is queried + if (eprState == software) + { + *totalEpr = bandwidth.enum_path.total; + *freeEpr = bandwidth.enum_path.free; + + return status; + } + + // Hardware state is queried. Send a new EPR message to get the current state + EnumPathResMessage rawEpr(getTopologyAddress().parent(), getTopologyAddress().tail(), true); + NakData nack; + for (unsigned retries = 0; retries < 7; retries++) + { + bool sendStatus = connector->messageManager->send(&rawEpr, nack); + if (!sendStatus) + { + status = false; + if (nack.reason == NakDefer) + continue; + + DP_PRINTF(DP_ERROR, "DP-DEV> EPR message failed while getting RAW EPR"); + + break; + } + else + { + *totalEpr = rawEpr.reply.TotalPBN; + *freeEpr = rawEpr.reply.FreePBN; + status = true; + + break; + } + } + + return status; +} + +unsigned DeviceImpl::getEDIDSize() const +{ + // Return DDC EDID size only if we got a valid EDID there + if (this->connector->isAcpiInitDone() && ddcEdid.isValidHeader()) + { + return ddcEdid.getEdidSize(); + } + else + { + return processedEdid.getEdidSize(); + } +} + +bool DeviceImpl::getEDID(char * buffer, unsigned size) const +{ + // + // Return DDC EDID only if we got a valid EDID there + // This has priority on regular EDID read from panel + // + if (this->connector->isAcpiInitDone() && ddcEdid.isValidHeader()) + { + if (size < ddcEdid.getEdidSize()) + goto panelEdid; + + dpMemCopy(buffer, ddcEdid.getBuffer()->getData(), ddcEdid.getEdidSize()); + return true; + } + +panelEdid: + // No EDID read from SBIOS. Return panel EDID now. + if (size < processedEdid.getEdidSize()) + return false; + + dpMemCopy(buffer, processedEdid.getBuffer()->getData(), processedEdid.getEdidSize()); + return true; +} + +unsigned DeviceImpl::getRawEDIDSize() const +{ + // Return DDC EDID size only if we got a valid EDID there + if (this->connector->isAcpiInitDone() && ddcEdid.isValidHeader()) + { + return ddcEdid.getEdidSize(); + } + else + { + return rawEDID.getEdidSize(); + } +} + +bool DeviceImpl::getRawEDID(char * buffer, unsigned size) const +{ + // + // Return DDC EDID only if we got a valid EDID there + // This has priority on regular EDID read from panel + // + if (this->connector->isAcpiInitDone() && ddcEdid.isValidHeader()) + { + if (size >= ddcEdid.getEdidSize()) + { + dpMemCopy(buffer, ddcEdid.getBuffer()->getData(), ddcEdid.getEdidSize()); + return true; + } + } + + // No EDID read from SBIOS. Return panel EDID now. + if (size < rawEDID.getEdidSize()) + return false; + + dpMemCopy(buffer, rawEDID.getBuffer()->getData(), rawEDID.getEdidSize()); + return true; +} + +bool DeviceImpl::startVrrEnablement() +{ + bool ret = false; + + if (vrrEnablement) + { + ret = vrrEnablement->start(); + } + + return ret; +} + +void DeviceImpl::resetVrrEnablement() +{ + if (vrrEnablement) + { + vrrEnablement->reset(); + } +} + +bool DeviceImpl::isVrrMonitorEnabled() +{ + bool ret = false; + + if (vrrEnablement) + { + ret = vrrEnablement->isMonitorEnabled(); + } + + return ret; +} + +bool DeviceImpl::isVrrDriverEnabled() +{ + bool ret = false; + + if (vrrEnablement) + { + ret = vrrEnablement->isDriverEnabled(); + } + + return ret; +} + +NvBool DeviceImpl::getDSCSupport() +{ + NvU8 byte = 0; + unsigned size = 0; + unsigned nakReason = NakUndefined; + Address::StringBuffer sb; + DP_USED(sb); + + dscCaps.bDSCSupported = false; + + if(AuxBus::success == this->getDpcdData(NV_DPCD14_DSC_SUPPORT, + &byte, sizeof(byte), &size, &nakReason)) + { + if (FLD_TEST_DRF(_DPCD14, _DSC_SUPPORT, _DECOMPRESSION, _YES, byte)) + { + dscCaps.bDSCDecompressionSupported = true; + } + if (FLD_TEST_DRF(_DPCD20, _DSC_SUPPORT, _PASS_THROUGH, _YES, byte)) + { + dscCaps.bDSCPassThroughSupported = true; + } + } + + else + { + DP_PRINTF(DP_ERROR, "DP-DEV> DSC Support AUX READ failed for %s!", address.toString(sb)); + } + + if (dscCaps.bDSCDecompressionSupported || dscCaps.bDSCPassThroughSupported) + { + dscCaps.bDSCSupported = true; + } + + return dscCaps.bDSCSupported; +} + +bool DeviceImpl::isPanelReplaySupported() +{ + return prCaps.bPanelReplaySupported; +} + +void DeviceImpl::getPanelReplayCaps() +{ + NvU8 buffer[10] = {0U}; + unsigned size; + unsigned sizeCompleted; + unsigned nakReason = NakUndefined; + + size = 1U; + if (AuxBus::success == this->getDpcdData(NV_DPCD20_PANEL_REPLAY_CAPABILITY, + &buffer[0], size, &sizeCompleted, &nakReason)) + { + prCaps.bPanelReplaySupported = + FLD_TEST_DRF(_DPCD20_PANEL, _REPLAY_CAPABILITY, _SUPPORTED, + _YES, buffer[0]); + } + else + { + DP_PRINTF(DP_ERROR, "DP-DEV> Aux Read to DPCD offset 0xB0 failed!"); + } +} + +bool DeviceImpl::setPanelReplayConfig(panelReplayConfig prcfg) +{ + NvU8 config = 0U; + unsigned size = 0U; + unsigned nakReason = NakUndefined; + + if (prcfg.enablePanelReplay) + { + config = FLD_SET_DRF(_DPCD20_PANEL, _REPLAY_CONFIGURATION, + _ENABLE_PR_MODE, _YES, config); + } + else + { + config = FLD_SET_DRF(_DPCD20_PANEL, _REPLAY_CONFIGURATION, + _ENABLE_PR_MODE, _NO, config); + } + + if (prcfg.bEnableCrcWithPr) + { + config = FLD_SET_DRF(_DPCD20_PANEL, _REPLAY_CONFIGURATION, + _ENABLE_CRC, _YES, config); + } + else + { + config = FLD_SET_DRF(_DPCD20_PANEL, _REPLAY_CONFIGURATION, + _ENABLE_CRC, _NO, config); + } + + if (prcfg.bHpdOnAdaptiveSyncSdpMissing) + { + config = FLD_SET_DRF(_DPCD20_PANEL, _REPLAY_CONFIGURATION, + _HPD_ADAPTIVE_SYNC_SDP_MISSING, _YES, config); + } + else + { + config = FLD_SET_DRF(_DPCD20_PANEL, _REPLAY_CONFIGURATION, + _HPD_ADAPTIVE_SYNC_SDP_MISSING, _NO, config); + } + + if (prcfg.bHpdOnSdpUncorrectableError) + { + config = FLD_SET_DRF(_DPCD20_PANEL, _REPLAY_CONFIGURATION, + _HPD_SDP_UNCORRECTABLE_ERROR, _YES, config); + } + else + { + config = FLD_SET_DRF(_DPCD20_PANEL, _REPLAY_CONFIGURATION, + _HPD_SDP_UNCORRECTABLE_ERROR, _NO, config); + } + + if (prcfg.bHpdOnRfbStorageErrors) + { + config = FLD_SET_DRF(_DPCD20_PANEL, _REPLAY_CONFIGURATION, + _HPD_RFB_STORAGE_ERRORS, _YES, config); + } + else + { + config = FLD_SET_DRF(_DPCD20_PANEL, _REPLAY_CONFIGURATION, + _HPD_RFB_STORAGE_ERRORS, _NO, config); + } + + if (prcfg.bHpdOnRfbActiveFrameCrcError) + { + config = FLD_SET_DRF(_DPCD20_PANEL, _REPLAY_CONFIGURATION, + _HPD_RFB_ACTIVE_FRAME_CRC_ERROR, _YES, config); + } + else + { + config = FLD_SET_DRF(_DPCD20_PANEL, _REPLAY_CONFIGURATION, + _HPD_RFB_ACTIVE_FRAME_CRC_ERROR, _NO, config); + } + + if (AuxBus::success != + this->setDpcdData(NV_DPCD20_PANEL_REPLAY_CONFIGURATION, + &config, sizeof(config), &size, &nakReason)) + { + return false; + } + + return true; +} + +bool DeviceImpl::getPanelReplayConfig(panelReplayConfig *pPrcfg) +{ + NvU8 config = 0U; + unsigned size = 0U; + unsigned nakReason = NakUndefined; + + if (AuxBus::success != + this->getDpcdData(NV_DPCD20_PANEL_REPLAY_CONFIGURATION, + &config, sizeof(config), &size, &nakReason)) + { + return false; + } + + pPrcfg->enablePanelReplay = FLD_TEST_DRF(_DPCD20_PANEL, _REPLAY_CONFIGURATION, + _ENABLE_PR_MODE, _YES, config); + + pPrcfg->bEnableCrcWithPr = FLD_TEST_DRF(_DPCD20_PANEL, _REPLAY_CONFIGURATION, + _ENABLE_CRC, _YES, config); + + pPrcfg->bHpdOnAdaptiveSyncSdpMissing = FLD_TEST_DRF(_DPCD20_PANEL, _REPLAY_CONFIGURATION, + _HPD_ADAPTIVE_SYNC_SDP_MISSING, _YES, config); + + pPrcfg->bHpdOnSdpUncorrectableError = FLD_TEST_DRF(_DPCD20_PANEL, _REPLAY_CONFIGURATION, + _HPD_SDP_UNCORRECTABLE_ERROR, _YES, config); + + pPrcfg->bHpdOnRfbStorageErrors = FLD_TEST_DRF(_DPCD20_PANEL, _REPLAY_CONFIGURATION, + _HPD_RFB_STORAGE_ERRORS, _YES, config); + + pPrcfg->bHpdOnRfbActiveFrameCrcError = FLD_TEST_DRF(_DPCD20_PANEL, _REPLAY_CONFIGURATION, + _HPD_RFB_ACTIVE_FRAME_CRC_ERROR, _YES, config); + + + return true; +} + +bool DeviceImpl::getPanelReplayStatus(PanelReplayStatus *pPrStatus) +{ + NvU8 state = 0U; + unsigned size = 0U; + unsigned nakReason = NakUndefined; + + if (pPrStatus == NULL) + { + DP_ASSERT(0); + return false; + } + + if (AuxBus::success == this->getDpcdData(NV_DPCD20_PANEL_REPLAY_AND_FRAME_LOCK_STATUS, + &state, sizeof(state), &size, &nakReason)) + { + switch (DRF_VAL(_DPCD20, _PANEL_REPLAY_AND_FRAME_LOCK_STATUS, _PR_STATUS, state)) + { + case NV_DPCD20_PANEL_REPLAY_AND_FRAME_LOCK_STATUS_PR_STATUS_STATE_0: + pPrStatus->prState = PanelReplay_Inactive; + break; + + case NV_DPCD20_PANEL_REPLAY_AND_FRAME_LOCK_STATUS_PR_STATUS_STATE_1: + pPrStatus->prState = PanelReplay_CaptureAndDisplay; + break; + + case NV_DPCD20_PANEL_REPLAY_AND_FRAME_LOCK_STATUS_PR_STATUS_STATE_2: + pPrStatus->prState = PanelReplay_DisplayFromRfb; + break; + + default: + pPrStatus->prState = PanelReplay_Undefined; + break; + } + return true; + } + + return false; +} + +bool DeviceImpl::getFECSupport() +{ + NvU8 byte = 0; + unsigned size = 0; + unsigned nakReason = NakUndefined; + + if(this->address.size() > 1) + { + bFECSupported = this->bandwidth.enum_path.bPathFECCapable; + } + + else if (AuxBus::success == this->getDpcdData(NV_DPCD14_FEC_CAPABILITY, + &byte, sizeof(byte), &size, &nakReason)) + { + bFECSupported = FLD_TEST_DRF(_DPCD14, _FEC_CAPABILITY, _FEC_CAPABLE, _YES, byte); + bFECUncorrectedSupported = FLD_TEST_DRF(_DPCD14, _FEC_CAPABILITY, _UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE, _YES, byte); + bFECCorrectedSupported = FLD_TEST_DRF(_DPCD14, _FEC_CAPABILITY, _CORRECTED_BLOCK_ERROR_COUNT_CAPABLE, _YES, byte); + bFECBitSupported = FLD_TEST_DRF(_DPCD14, _FEC_CAPABILITY, _BIT_ERROR_COUNT_CAPABLE, _YES, byte); + bFECParityBlockSupported = FLD_TEST_DRF(_DPCD14, _FEC_CAPABILITY, _PARITY_BLOCK_ERROR_COUNT_CAPABLE, _YES, byte); + bFECParitySupported = FLD_TEST_DRF(_DPCD14, _FEC_CAPABILITY, _PARITY_ERROR_COUNT_CAPABLE, _YES, byte); + } + + return bFECSupported; +} + +NvBool DeviceImpl::isDSCSupported() +{ + return dscCaps.bDSCSupported; +} + +NvBool DeviceImpl::isDSCDecompressionSupported() +{ + return dscCaps.bDSCDecompressionSupported; +} + +NvBool DeviceImpl::isDSCPassThroughSupported() +{ + return dscCaps.bDSCPassThroughSupported; +} + +NvBool DeviceImpl::isDynamicPPSSupported() +{ + return dscCaps.bDynamicPPSSupported; +} + +NvBool DeviceImpl::isDynamicDscToggleSupported() +{ + return dscCaps.bDynamicDscToggleSupported; +} + +NvBool DeviceImpl::isDSCPossible() +{ + return this->bDSCPossible; +} + +bool DeviceImpl::isFECSupported() +{ + return bFECSupported; +} + +bool DeviceImpl::parseDscCaps(const NvU8 *buffer, NvU32 bufferSize) +{ + + if (bufferSize < 16) + { + DP_PRINTF(DP_ERROR, "DSC caps buffer must be greater than or equal to 16"); + return false; + } + + if (FLD_TEST_DRF(_DPCD20, _DSC_SUPPORT, _PASS_THROUGH, _YES, buffer[0x0])) + { + dscCaps.bDSCPassThroughSupported = true; + } + + if (FLD_TEST_DRF(_DPCD20, _DSC_SUPPORT, _DYNAMIC_PPS_COMPRESSED_TO_COMPRESSED, _YES, buffer[0x0])) + { + dscCaps.bDynamicPPSSupported = true; + } + + if (FLD_TEST_DRF(_DPCD20, _DSC_SUPPORT, _DYNAMIC_PPS_UNCOMPRESSED_TO_FROM_COMPRESSED, _YES, buffer[0x0])) + { + dscCaps.bDynamicDscToggleSupported = true; + } + + dscCaps.versionMajor = DRF_VAL(_DPCD14, _DSC_ALGORITHM_REVISION, _MAJOR, buffer[0x1]); + dscCaps.versionMinor = DRF_VAL(_DPCD14, _DSC_ALGORITHM_REVISION, _MINOR, buffer[0x1]); + + dscCaps.rcBufferBlockSize = DRF_VAL(_DPCD14, _DSC_RC_BUFFER_BLOCK, _SIZE, buffer[0x2]); + + dscCaps.rcBuffersize = DRF_VAL(_DPCD14, _DSC_RC_BUFFER, _SIZE, buffer[0x3]); + + dscCaps.sliceCountSupportedMask = (((buffer[0xD]) << 8) | buffer[0x4]); + if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_2, _SLICES_PER_SINK_24, _YES, buffer[0xD])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_24; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_2, _SLICES_PER_SINK_20, _YES, buffer[0xD])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_20; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_2, _SLICES_PER_SINK_16, _YES, buffer[0xD])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_16; + + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_1, _SLICES_PER_SINK_12, _YES, buffer[0x4])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_12; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_1, _SLICES_PER_SINK_10, _YES, buffer[0x4])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_10; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_1, _SLICES_PER_SINK_8, _YES, buffer[0x4])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_8; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_1, _SLICES_PER_SINK_6, _YES, buffer[0x4])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_6; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_1, _SLICES_PER_SINK_4, _YES, buffer[0x4])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_4; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_1, _SLICES_PER_SINK_2, _YES, buffer[0x4])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_2; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_1, _SLICES_PER_SINK_1, _YES, buffer[0x4])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_1; + + if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _8, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 8; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _9, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 9; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _10, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 10; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _11, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 11; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _12, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 12; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _13, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 13; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _14, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 14; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _15, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 15; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _16, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 16; + } + + if(FLD_TEST_DRF(_DPCD14, _DSC_BLOCK_PREDICTION, _SUPPORT, _YES, buffer[0x6])) + dscCaps.bDscBlockPredictionSupport = true; + + unsigned maxBitsPerPixelLSB = DRF_VAL(_DPCD14, _DSC_MAXIMUM_BITS_PER_PIXEL_1, _LSB, buffer[0x7]); + unsigned maxBitsPerPixelMSB = DRF_VAL(_DPCD14, _DSC_MAXIMUM_BITS_PER_PIXEL_2, _MSB, buffer[0x8]); + + dscCaps.maxBitsPerPixelX16 = (maxBitsPerPixelMSB << 8) | maxBitsPerPixelLSB; + + if(FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_FORMAT_CAPABILITIES, _RGB, _YES, buffer[0x9])) + dscCaps.dscDecoderColorFormatCaps.bRgb = true; + if(FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_FORMAT_CAPABILITIES, _YCbCr_444, _YES, buffer[0x9])) + dscCaps.dscDecoderColorFormatCaps.bYCbCr444 = true; + if(FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_FORMAT_CAPABILITIES, _YCbCr_SIMPLE_422, _YES, buffer[0x9])) + dscCaps.dscDecoderColorFormatCaps.bYCbCrSimple422 = true; + if(FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_FORMAT_CAPABILITIES, _YCbCr_NATIVE_422, _YES, buffer[0x9])) + dscCaps.dscDecoderColorFormatCaps.bYCbCrNative422 = true; + if(FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_FORMAT_CAPABILITIES, _YCbCr_NATIVE_420, _YES, buffer[0x9])) + dscCaps.dscDecoderColorFormatCaps.bYCbCrNative420 = true; + + if (FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_DEPTH_CAPABILITIES, _12_BITS_PER_COLOR, _YES, buffer[0xa])) + dscCaps.dscDecoderColorDepthMask |= DSC_BITS_PER_COLOR_MASK_12; + if (FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_DEPTH_CAPABILITIES, _10_BITS_PER_COLOR, _YES, buffer[0xa])) + dscCaps.dscDecoderColorDepthMask |= DSC_BITS_PER_COLOR_MASK_10; + if (FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_DEPTH_CAPABILITIES, _8_BITS_PER_COLOR, _YES, buffer[0xa])) + dscCaps.dscDecoderColorDepthMask |= DSC_BITS_PER_COLOR_MASK_8; + + dscCaps.dscPeakThroughputMode0 = DRF_VAL(_DPCD14, _DSC_PEAK_THROUGHPUT, _MODE0, buffer[0xb]); + dscCaps.dscPeakThroughputMode1 = DRF_VAL(_DPCD14, _DSC_PEAK_THROUGHPUT, _MODE1, buffer[0xb]); + + unsigned numOfPixels = DRF_VAL(_DPCD14, _DSC_MAXIMUM_SLICE_WIDTH, _MAX, buffer[0xc]); + dscCaps.dscMaxSliceWidth = numOfPixels * 320; + + if (FLD_TEST_DRF(_DPCD14, _DSC_BITS_PER_PIXEL_INCREMENT, _SUPPORTED, _1_16, buffer[0xf])) + { + dscCaps.dscBitsPerPixelIncrement = BITS_PER_PIXEL_PRECISION_1_16; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_BITS_PER_PIXEL_INCREMENT, _SUPPORTED, _1_8, buffer[0xf])) + { + dscCaps.dscBitsPerPixelIncrement = BITS_PER_PIXEL_PRECISION_1_8; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_BITS_PER_PIXEL_INCREMENT, _SUPPORTED, _1_4, buffer[0xf])) + { + dscCaps.dscBitsPerPixelIncrement = BITS_PER_PIXEL_PRECISION_1_4; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_BITS_PER_PIXEL_INCREMENT, _SUPPORTED, _1_2, buffer[0xf])) + { + dscCaps.dscBitsPerPixelIncrement = BITS_PER_PIXEL_PRECISION_1_2; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_BITS_PER_PIXEL_INCREMENT, _SUPPORTED, _1, buffer[0xf])) + { + dscCaps.dscBitsPerPixelIncrement = BITS_PER_PIXEL_PRECISION_1; + } + + return true; +} + +bool DeviceImpl::parseBranchSpecificDscCaps(const NvU8 *buffer, NvU32 bufferSize) +{ + if (bufferSize < 3) + { + DP_PRINTF(DP_ERROR, "Branch DSC caps buffer must be greater than or equal to 3"); + return false; + } + + dscCaps.branchDSCOverallThroughputMode0 = DRF_VAL(_DPCD14, _BRANCH_DSC_OVERALL_THROUGHPUT_MODE_0, _VALUE, buffer[0x0]); + if (dscCaps.branchDSCOverallThroughputMode0 == 1) + { + dscCaps.branchDSCOverallThroughputMode0 = 680; + } + else if (dscCaps.branchDSCOverallThroughputMode0 >= 2) + { + dscCaps.branchDSCOverallThroughputMode0 = 600 + dscCaps.branchDSCOverallThroughputMode0 * 50; + } + + dscCaps.branchDSCOverallThroughputMode1 = DRF_VAL(_DPCD14, _BRANCH_DSC_OVERALL_THROUGHPUT_MODE_1, _VALUE, buffer[0x1]); + if (dscCaps.branchDSCOverallThroughputMode1 == 1) + { + dscCaps.branchDSCOverallThroughputMode1 = 680; + } + else if (dscCaps.branchDSCOverallThroughputMode1 >= 2) + { + dscCaps.branchDSCOverallThroughputMode1 = 600 + dscCaps.branchDSCOverallThroughputMode1 * 50; + } + + dscCaps.branchDSCMaximumLineBufferWidth = DRF_VAL(_DPCD14, _BRANCH_DSC_MAXIMUM_LINE_BUFFER_WIDTH, _VALUE, buffer[0x2]); + if (dscCaps.branchDSCMaximumLineBufferWidth != 0) + { + if (dscCaps.branchDSCMaximumLineBufferWidth >= 16) + { + dscCaps.branchDSCMaximumLineBufferWidth = dscCaps.branchDSCMaximumLineBufferWidth * 320; + } + else + { + dscCaps.branchDSCMaximumLineBufferWidth = 0; + DP_PRINTF(DP_WARNING, "Value of branch DSC maximum line buffer width is invalid, so setting it to 0."); + } + } + return true; +} + +bool DeviceImpl::readAndParseDSCCaps() +{ + // Allocate a buffer of 16 bytes to read DSC caps + + unsigned sizeCompleted = 0; + unsigned nakReason = NakUndefined; + Address::StringBuffer sb; + DP_USED(sb); + + if(AuxBus::success != this->getDpcdData(NV_DPCD14_DSC_SUPPORT, + &rawDscCaps[0], sizeof(rawDscCaps), &sizeCompleted, &nakReason)) + { + DP_PRINTF(DP_ERROR, "DP-DEV> Error querying DSC Caps on %s!", this->address.toString(sb)); + return false; + } + + return parseDscCaps(&rawDscCaps[0], sizeof(rawDscCaps)); +} + +bool DeviceImpl::readAndParseBranchSpecificDSCCaps() +{ + unsigned sizeCompleted = 0; + unsigned nakReason = NakUndefined; + NvU8 rawBranchSpecificDscCaps[3]; + + if(AuxBus::success != this->getDpcdData(NV_DPCD14_BRANCH_DSC_OVERALL_THROUGHPUT_MODE_0, + &rawBranchSpecificDscCaps[0], + sizeof(rawBranchSpecificDscCaps), + &sizeCompleted, &nakReason)) + { + return false; + } + + return parseBranchSpecificDscCaps(&rawBranchSpecificDscCaps[0], sizeof(rawBranchSpecificDscCaps)); +} + +void DeviceImpl::queryGUID2() +{ + unsigned sizeCompleted = 0; + unsigned nakReason = NakUndefined; + Address::StringBuffer sb; + DP_USED(sb); + + if(AuxBus::success == this->getDpcdData(NV_DPCD14_GUID_2, &this->guid2.data[0], + DPCD_GUID_SIZE, &sizeCompleted, &nakReason)) + { + if (!(this->guid2.isGuidZero())) + { + this->bVirtualPeerDevice = true; + } + } + else + { + DP_PRINTF(DP_ERROR, "DP-DEV> Error querying GUID2 on %s!", this->address.toString(sb)); + } +} + +bool DeviceImpl::getDscEnable(bool *pEnable) +{ + AuxBus::status status = AuxBus::success; + unsigned sizeCompleted = 0; + unsigned pNakReason = 0; + NvU8 byte = 0; + + if (!pEnable || + !this->isDSCPossible() || + !this->devDoingDscDecompression || + !this->devDoingDscDecompression->plugged) + { + return false; + } + + status = this->devDoingDscDecompression->getDpcdData(NV_DPCD14_DSC_ENABLE, + &byte, + sizeof byte, + &sizeCompleted, + &pNakReason); + + if (status != AuxBus::success) + { + DP_PRINTF(DP_ERROR, "DP-DEV> Error querying DSC Enable State!"); + return false; + } + + *pEnable = FLD_TEST_DRF(_DPCD14, _DSC_ENABLE, _DECOMPRESSION, _YES, byte); + return true; +} + +void DeviceImpl::setDscDecompressionDevice(bool bDscCapBasedOnParent) +{ + // Decide if DSC stream can be sent to new device + this->bDSCPossible = false; + this->devDoingDscDecompression = NULL; + + if (this->multistream) + { + if ((this->peerDevice == Dongle) && + (this->dpcdRevisionMajor != 0) && + !bDscCapBasedOnParent) + { + // For Peer Type 4 device with LAM DPCD rev != 0.0, check only the device's own DSC capability. + if (this->isDSCSupported()) + { + this->bDSCPossible = true; + this->devDoingDscDecompression = this; + } + } + else + { + // + // Check the device's own and its parent's DSC capability. + // - Sink device will do DSC cecompression when + // 1. Sink device is capable of DSC decompression + // 2. Sink is on a logical port (8-15) + // + // OR + // + // 1. Sink device is capable of DSC decompression + // 2. Parent of sink is a Virtual Peer device + // 3. Parent of sink supports DSC Pass through + // + // - Sink device's parent will do DSC decompression + // 1. Above conditions are not true. + // 2. Parent of sink supports DSC decompression. + // + if (this->isDSCSupported()) + { + if (this->isVideoSink() && this->getParent() != NULL) + { + if (this->isLogical()) + { + this->devDoingDscDecompression = this; + this->bDSCPossible = true; + } + else if (this->parent->isVirtualPeerDevice() && + this->parent->isDSCPassThroughSupported()) + { + // + // This condition takes care of DSC capable sink devices + // connected behind a DSC Pass through capable branch + // + this->devDoingDscDecompression = this; + this->bDSCPossible = true; + } + else if (this->parent->isDSCDecompressionSupported()) + { + // + // This condition takes care of DSC capable sink devices + // connected behind a branch device that is not capable + // of DSC pass through but can do DSC decompression. + // + this->bDSCPossible = true; + this->devDoingDscDecompression = this->parent; + } + } + else + { + if (this->isDSCDecompressionSupported()) + { + // This condition takes care of branch device capable of DSC decoding. + this->devDoingDscDecompression = this; + this->bDSCPossible = true; + } + } + } + else if (this->parent && this->parent->isDSCDecompressionSupported() && + !(this->isLogical())) + { + // + // This condition takes care of sink devices not capable of DSC + // but parent is capable of DSC decompression. We need to skip this + // if sink is at logical port. + // + this->bDSCPossible = true; + this->devDoingDscDecompression = this->parent; + } + } + } + else + { + if (this->isDSCDecompressionSupported()) + { + this->bDSCPossible = true; + this->devDoingDscDecompression = this; + } + } +} + +bool DeviceImpl::setDscEnable(bool enable) +{ + NvU8 dscEnableByte = 0; + NvU8 dscPassthroughByte = 0; + unsigned size = 0; + unsigned nakReason = NakUndefined; + bool bCurrDscEnable = false; + bool bDscPassThrough = false; + AuxBus::status dscEnableStatus = AuxBus::success; + AuxBus::status dscPassThroughStatus = AuxBus::success; + Address::StringBuffer buffer; + DP_USED(buffer); + + if (!this->isDSCPossible() || !this->devDoingDscDecompression || + !this->devDoingDscDecompression->plugged) + { + return false; + } + + if ((this->devDoingDscDecompression == this) && !this->isLogical() && !(this->peerDevice == Dongle) && this->parent != NULL) + { + // + // If the device has a parent, that means the sink is on a MST link and + // and on a MST link if DSC is possible on the path and devDoingDscDecompression + // is the sink itself and sink is not on a logical port, then the parent should be + // DSC Pass through capable. + // + bDscPassThrough = true; + } + + // + // Get Current DSC Enable State + // Ideally we don't need to check the current state but Synaptics DSC device, + // which was used for inital DSC code developement did not follow spec and so + // we have added this code. Overwriting the same value should not have any + // impact as per the spec. Will remove this check once all DSC devices follow spec. + // + if (!getDscEnable(&bCurrDscEnable)) + { + DP_PRINTF(DP_ERROR, "DP-DEV> Not able to get DSC Enable State!"); + return false; + } + + if(enable) + { + if(bDscPassThrough) + { + dscPassthroughByte = FLD_SET_DRF(_DPCD20, _DSC_ENABLE, _PASS_THROUGH, _YES, dscPassthroughByte); + DP_PRINTF(DP_NOTICE, "DP-DEV> Enabling DSC Pass through on branch device - %s", + this->parent->getTopologyAddress().toString(buffer)); + } + + if (!bCurrDscEnable) + { + dscEnableByte = FLD_SET_DRF(_DPCD14, _DSC_ENABLE, _DECOMPRESSION, _YES, dscEnableByte); + DP_PRINTF(DP_NOTICE, "DP-DEV> Enabling DSC decompression on device - %s", + this->devDoingDscDecompression->getTopologyAddress().toString(buffer)); + } + else + { + DP_PRINTF(DP_NOTICE, "DP-DEV> DSC decompression is already enabled on device - %s", + this->devDoingDscDecompression->getTopologyAddress().toString(buffer)); + } + } + else + { + if(bDscPassThrough) + { + dscPassthroughByte = FLD_SET_DRF(_DPCD20, _DSC_ENABLE, _PASS_THROUGH, _NO, dscPassthroughByte); + DP_PRINTF(DP_NOTICE, "DP-DEV> Disabling DSC Pass through on branch device - %s", + this->parent->getTopologyAddress().toString(buffer)); + } + + if (bCurrDscEnable) + { + dscEnableByte = FLD_SET_DRF(_DPCD14, _DSC_ENABLE, _DECOMPRESSION, _NO, dscEnableByte); + DP_PRINTF(DP_NOTICE, "DP-DEV> Disabling DSC decompression on device - %s", + this->devDoingDscDecompression->getTopologyAddress().toString(buffer)); + } + else + { + DP_PRINTF(DP_NOTICE, "DP-DEV> DSC decompression is already disabled on device - %s", + this->devDoingDscDecompression->getTopologyAddress().toString(buffer)); + } + } + + if (bDscPassThrough) + { + dscPassThroughStatus = this->parent->setDpcdData(NV_DPCD14_DSC_ENABLE, + &dscPassthroughByte, sizeof dscPassthroughByte, &size, &nakReason); + if (dscPassThroughStatus != AuxBus::success) + { + DP_PRINTF(DP_ERROR, "DP-DEV> Setting DSC Passthrough on parent branch %s failed", + this->parent->getTopologyAddress().toString(buffer)); + } + } + + if (enable != bCurrDscEnable) + { + dscEnableStatus = this->devDoingDscDecompression->setDpcdData(NV_DPCD14_DSC_ENABLE, + &dscEnableByte, sizeof dscEnableByte, &size, &nakReason); + if (dscEnableStatus != AuxBus::success) + { + DP_PRINTF(DP_ERROR, "DP-DEV> Setting DSC Enable on sink %s failed", + this->devDoingDscDecompression->getTopologyAddress().toString(buffer)); + + } + } + + if ((dscPassThroughStatus != AuxBus::success) || (dscEnableStatus != AuxBus::success)) + { + return false; + } + else + { + return true; + } +} + + + +bool DeviceImpl::setDscEnableDPToHDMIPCON(bool bDscEnable, bool bEnablePassThroughForPCON) +{ + NvU8 dscEnableByte = 0; + unsigned size = 0; + unsigned nakReason = NakUndefined; + AuxBus::status dscEnableStatus = AuxBus::success; + Address::StringBuffer buffer; + DP_USED(buffer); + + if (!this->isDSCPossible()) + { + DP_PRINTF(DP_ERROR, "DP-DEV> DSC is not supported on DP to HDMI PCON - %s", + this->getTopologyAddress().toString(buffer)); + return false; + } + + if (bDscEnable) + { + if(bEnablePassThroughForPCON) + { + dscEnableByte = FLD_SET_DRF(_DPCD20, _DSC_ENABLE, _PASS_THROUGH, _YES, dscEnableByte); + DP_PRINTF(DP_NOTICE, "DP-DEV> Enabling DSC Pass through on DP to HDMI PCON device - %s", + this->getTopologyAddress().toString(buffer)); + } + else + { + dscEnableByte = FLD_SET_DRF(_DPCD14, _DSC_ENABLE, _DECOMPRESSION, _YES, dscEnableByte); + DP_PRINTF(DP_NOTICE, "DP-DEV> Enabling DSC decompression on DP to HDMI PCON device - %s", + this->getTopologyAddress().toString(buffer)); + } + + } + + dscEnableStatus = this->setDpcdData(NV_DPCD14_DSC_ENABLE, + &dscEnableByte, sizeof dscEnableByte, &size, &nakReason); + + if (dscEnableStatus != AuxBus::success) + { + DP_PRINTF(DP_ERROR, "DP-DEV> Setting DSC Enable on DP to HDMI PCON %s failed", + this->getTopologyAddress().toString(buffer)); + return false; + + } + + return true; +} + +unsigned DeviceImpl::getDscVersionMajor() +{ + return dscCaps.versionMajor; +} + +unsigned DeviceImpl::getDscVersionMinor() +{ + return dscCaps.versionMinor; +} + +unsigned DeviceImpl::getDscRcBufferSize() +{ + return dscCaps.rcBuffersize; +} + +unsigned DeviceImpl::getDscRcBufferBlockSize() +{ + return dscCaps.rcBufferBlockSize; +} + +unsigned DeviceImpl::getDscMaxSlicesPerSink() +{ + return dscCaps.maxSlicesPerSink; +} + +unsigned DeviceImpl::getDscLineBufferBitDepth() +{ + return dscCaps.lineBufferBitDepth; +} + +NvBool DeviceImpl::isDscBlockPredictionSupported() +{ + return dscCaps.bDscBlockPredictionSupport; +} + +unsigned DeviceImpl::getDscMaxBitsPerPixel() +{ + return dscCaps.maxBitsPerPixelX16; +} + +NvBool DeviceImpl::isDscRgbSupported() +{ + return dscCaps.dscDecoderColorFormatCaps.bRgb; +} + +NvBool DeviceImpl::isDscYCbCr444Supported() +{ + return dscCaps.dscDecoderColorFormatCaps.bYCbCr444; +} + +NvBool DeviceImpl::isDscYCbCrSimple422Supported() +{ + return dscCaps.dscDecoderColorFormatCaps.bYCbCrSimple422; +} + +NvBool DeviceImpl::isDscYCbCr422NativeSupported() +{ + return dscCaps.dscDecoderColorFormatCaps.bYCbCrNative422; +} + +NvBool DeviceImpl::isDscYCbCr420NativeSupported() +{ + return dscCaps.dscDecoderColorFormatCaps.bYCbCrNative420; +} + +unsigned DeviceImpl::getDscPeakThroughputMode0() +{ + return dscCaps.dscPeakThroughputMode0; +} + +unsigned DeviceImpl::getDscPeakThroughputModel() +{ + return dscCaps.dscPeakThroughputMode1; +} + +unsigned DeviceImpl::getDscMaxSliceWidth() +{ + return dscCaps.dscMaxSliceWidth; +} + +unsigned DeviceImpl::getDscDecoderColorDepthSupportMask() +{ + return dscCaps.dscDecoderColorDepthMask; +} + +bool DeviceImpl::isFakedMuxDevice() +{ + return connector->main->isDynamicMuxCapable() && bIsFakedMuxDevice; +} + +bool DeviceImpl::isPreviouslyFakedMuxDevice() +{ + return connector->main->isDynamicMuxCapable() && bIsPreviouslyFakedMuxDevice; +} + +static AuxBus::status _QueryCrcSink +( + DeviceImpl *bus, + NvU16 *sinkCrc0, + NvU16 *sinkCrc1, + NvU16 *sinkCrc2 +) +{ + AuxBus::status status = AuxBus::success; + // no sink op needs to be done if registers are NULL + if (sinkCrc0 == NULL) return status; + NvU32 addr = NV_DPCD14_DSC_CRC_0; + unsigned size = 2; + NvU8 cnt[2] = {0, 0}; + + unsigned sizeCompleted = 0; + unsigned nakReason = 0; + + status = bus->getDpcdData(addr, &cnt[0], size, &sizeCompleted, &nakReason); + + if (status != AuxBus::success) + { + return status; + } + *sinkCrc0 = (((NvU16) cnt[1]) << (sizeof(NvU8) * 8)) | cnt[0]; + + addr = NV_DPCD14_DSC_CRC_1; + size = 2; + + status = bus->getDpcdData(addr, &cnt[0], size, &sizeCompleted, &nakReason); + + if (status != AuxBus::success) + { + return status; + } + *sinkCrc1 = (((NvU16) cnt[1]) << (sizeof(NvU8) * 8)) | cnt[0]; + + addr = NV_DPCD14_DSC_CRC_2; + size = 2; + + status = bus->getDpcdData(addr, &cnt[0], size, &sizeCompleted, &nakReason); + + if (status != AuxBus::success) + { + return status; + } + *sinkCrc2 = (((NvU16) cnt[1]) << (sizeof(NvU8) * 8)) | cnt[0]; + return status; +} + +AuxBus::status DeviceImpl::dscCrcControl(NvBool bEnable, gpuDscCrc *gpuData, sinkDscCrc *sinkData) +{ + // GPU part + if (this->connector->main->dscCrcTransaction(bEnable, gpuData, (NvU16*) &(activeGroup->headIndex)) != true) + { + return AuxBus::nack; + } + + // sink part + if (!sinkData) + { + return AuxBus::success; + } + return _QueryCrcSink(this, &(sinkData->sinkCrc0), &(sinkData->sinkCrc1), &(sinkData->sinkCrc2)); +} + +bool DeviceImpl::getPCONCaps(PCONCaps *pPCONCaps) +{ + AuxBus::status status = AuxBus::success; + NvU32 addr = NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT(0); + NvU8 data[4] = {0}; + unsigned sizeCompleted = 0; + unsigned nakReason = 0; + NvU8 pConType = 0; + + status = this->getDpcdData(addr, &data[0], sizeof(data), &sizeCompleted, &nakReason); + if (status == AuxBus::success) + { + pConType = DRF_VAL(_DPCD, _DETAILED_CAP_INFO, _DWNSTRM_PORT_TX_TYPE, data[0]); + if (pConType == NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_HDMI) + { + this->connectorType = connectorHDMI; + pPCONCaps->maxTmdsClkRate = data[1] * 2500000; + + pPCONCaps->bSourceControlModeSupported = + FLD_TEST_DRF(_DPCD, _DETAILED_CAP_INFO, _SRC_CONTROL_MODE_SUPPORT, _YES, data[2]); + pPCONCaps->bConcurrentLTSupported = + FLD_TEST_DRF(_DPCD, _DETAILED_CAP_INFO, _CONCURRENT_LT_SUPPORT, _YES, data[2]); + + switch (DRF_VAL(_DPCD, _DETAILED_CAP_INFO, _MAX_FRL_LINK_BW_SUPPORT, data[2])) + { + case NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_9G: + pPCONCaps->maxHdmiLinkBandwidthGbps = 9; + break; + case NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_18G: + pPCONCaps->maxHdmiLinkBandwidthGbps = 18; + break; + case NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_24G: + pPCONCaps->maxHdmiLinkBandwidthGbps = 24; + break; + case NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_32G: + pPCONCaps->maxHdmiLinkBandwidthGbps = 32; + break; + case NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_40G: + pPCONCaps->maxHdmiLinkBandwidthGbps = 40; + break; + case NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_48G: + pPCONCaps->maxHdmiLinkBandwidthGbps = 48; + break; + default: + pPCONCaps->maxHdmiLinkBandwidthGbps = 0; + break; + } + + switch (DRF_VAL(_DPCD, _DETAILED_CAP_INFO, _MAX_BITS_PER_COMPONENT_DEF, data[2])) + { + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_10BPC: + pPCONCaps->maxBpc = 10; + break; + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_12BPC: + pPCONCaps->maxBpc = 12; + break; + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_16BPC: + pPCONCaps->maxBpc = 16; + break; + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_8BPC: + default: + pPCONCaps->maxBpc = 8; + break; + } + + DP_PRINTF(DP_NOTICE, "DP2HDMI PCON caps - Max TMDS Clk: %u LinkBWGbps: %u MaxBpc: %u", + pPCONCaps->maxTmdsClkRate, pPCONCaps->maxHdmiLinkBandwidthGbps, pPCONCaps->maxBpc); + } + } + else + { + DP_PRINTF(DP_ERROR, "DP-DEV> Error - DPCD Read for detailed port capabilities (0x80) failed."); + return false; + } + return true; +} + +bool DeviceImpl::getIgnoreMSACap() +{ + NvU8 byte = 0; + unsigned size = 0; + unsigned nakReason = NakUndefined; + AuxBus::status status; + + if (bIgnoreMsaCapCached) + { + return bIgnoreMsaCap; + } + + if (this->isMultistream()) + { + status = this->getDpcdData(NV_DPCD_DOWN_STREAM_PORT, + &byte, sizeof byte, &size, &nakReason); + if (status == AuxBus::success) + { + if(FLD_TEST_DRF(_DPCD, _DOWN_STREAM_PORT, _MSA_TIMING_PAR_IGNORED, _YES, byte)) + { + if (this->parent && this->parent->isVirtualPeerDevice()) + { + byte = 0; + size = 0; + nakReason = NakUndefined; + + status = this->parent->getDpcdData(NV_DPCD_DOWN_STREAM_PORT, + &byte, sizeof byte, &size, &nakReason); + if (status == AuxBus::success) + { + if(FLD_TEST_DRF(_DPCD, _DOWN_STREAM_PORT, _MSA_TIMING_PAR_IGNORED, _YES, byte)) + { + bIgnoreMsaCap = true; + } + else + { + bIgnoreMsaCap = false; + } + bIgnoreMsaCapCached = true; + } + else + { + DP_PRINTF(DP_ERROR, "DP-DEV> Aux Read from DPCD offset 0x7 failed!"); + return false; + } + } + else + { + bIgnoreMsaCap = true; + bIgnoreMsaCapCached = true; + } + } + else + { + bIgnoreMsaCap = false; + bIgnoreMsaCapCached = true; + } + } + else + { + DP_PRINTF(DP_ERROR, "DP-DEV> Aux Read from DPCD offset 0x7 failed!"); + return false; + } + } + else + { + bIgnoreMsaCap = hal->getMsaTimingparIgnored(); + bIgnoreMsaCapCached = true; + } + + return bIgnoreMsaCap; +} + +AuxRetry::status DeviceImpl::setIgnoreMSAEnable(bool msaTimingParamIgnoreEn) +{ + NvU8 byte = 0; + unsigned size = 0; + unsigned nakReason = NakUndefined; + AuxBus::status status; + + if (this->isMultistream()) + { + status = this->getDpcdData(NV_DPCD_DOWNSPREAD_CTRL, + &byte, sizeof byte, &size, &nakReason); + if (status == AuxBus::success) + { + if (msaTimingParamIgnoreEn) + { + byte = FLD_SET_DRF(_DPCD, _DOWNSPREAD_CTRL, _MSA_TIMING_PAR_IGNORED, _TRUE, byte); + } + else + { + byte = FLD_SET_DRF(_DPCD, _DOWNSPREAD_CTRL, _MSA_TIMING_PAR_IGNORED, _FALSE, byte); + } + + status = this->setDpcdData(NV_DPCD_DOWNSPREAD_CTRL, + &byte, sizeof byte, &size, &nakReason); + if (status == AuxBus::success) + { + return AuxRetry::ack; + } + else + { + DP_PRINTF(DP_ERROR, "DP-DEV> Aux Write to DPCD offset 0x107 failed!"); + return AuxRetry::nack; + } + } + else + { + DP_PRINTF(DP_ERROR, "DP-DEV> Aux Read from DPCD offset 0x107 failed!"); + return AuxRetry::nack; + } + } + else + { + return hal->setIgnoreMSATimingParamters(msaTimingParamIgnoreEn); + } +} + +bool DeviceImpl::getDeviceSpecificData(NvU8 *oui, NvU8 *devIdString, + NvU8 *hwRevision, NvU8 *swMajorRevision, + NvU8 *swMinorRevision) +{ + NvU8 buffer[16] = {0}; + unsigned size = 13U; + unsigned sizeCompleted = 0U; + unsigned nakReason = NakUndefined; + unsigned i = 0U; + unsigned address; + + if (oui == NULL || devIdString == NULL || hwRevision == NULL || + swMajorRevision == NULL || swMinorRevision == NULL) + { + return false; + } + + if (!this->audioSink && !this->videoSink) + { + address = NV_DPCD_BRANCH_IEEE_OUI; + } + else + { + address = NV_DPCD_SINK_IEEE_OUI; + } + + if (AuxBus::success != this->getDpcdData(address, &buffer[0], + size, &sizeCompleted, &nakReason)) + { + return false; + } + + // 0x400-0x402 for sink device and 0x500-0x502 for branch device gives OUI. + for (i = 0U; i < DEVICE_OUI_SIZE; i++) + { + oui[i] = buffer[i]; + } + + // + // 0x403-0x408 for sink device and 0x503-0x508 for branch device provides + // device Identification string. + // + for (unsigned j = 0U; j < NV_DPCD_SOURCE_DEV_ID_STRING__SIZE; i++, j++) + { + devIdString[j] = buffer[i]; + } + + // 0x409 for sink and 0x509 for branch provides HW revision. + // 0x40A-0x40B for sink and 0x50A-0x50B for branch provides SW/Firmware revision. + *hwRevision = buffer[9]; + *swMajorRevision = buffer[10]; + *swMinorRevision = buffer[11]; + + return true; +} + +bool DeviceImpl::getParentSpecificData(NvU8 *oui, NvU8 *devIdString, + NvU8 *hwRevision, NvU8 *swMajorRevision, + NvU8 *swMinorRevision) +{ + if (this->parent == NULL) + { + return false; + } + + return this->parent->getDeviceSpecificData(oui, devIdString, hwRevision, + swMajorRevision, swMinorRevision); +} + +bool DeviceImpl::setModeList(DisplayPort::DpModesetParams *modeList, unsigned numModes) +{ + // Create a dummy group for compoundQuery + GroupImpl g(connector); + g.insert(this); + + maxModeBwRequired = 0; + + for (unsigned modeItr = 0; modeItr < numModes; modeItr++) + { + connector->beginCompoundQuery(); + DscParams dscParams = DscParams(); + dscParams.bCheckWithDsc = true; + + DpModesetParams &modesetParams = modeList[modeItr]; + NvU64 bpp = modesetParams.modesetInfo.depth; + DP_IMP_ERROR dpImpError = DP_IMP_ERROR_NONE; + + if (connector->compoundQueryAttach((Group *)&g, modesetParams, &dscParams, &dpImpError)) + { + if (dscParams.bEnableDsc) + { + bpp = divide_ceil(dscParams.bitsPerPixelX16, 16); + } + + NvU64 modeBwRequired = modesetParams.modesetInfo.pixelClockHz * bpp; + if (maxModeBwRequired < modeBwRequired) + { + maxModeBwRequired = modeBwRequired; + } + } + + connector->endCompoundQuery(); + } + + DP_PRINTF(DP_INFO, "Computed Max mode BW: %d Mbps", maxModeBwRequired / (1000 * 1000)); + + connector->updateDpTunnelBwAllocation(); + + return true; +} + +void +DeviceHDCPDetection::start() +{ + if (parent->isNativeDPCD()) + { + if (!parent->isMultistream()) + { + goto NativeDPCDHDCPCAPRead; + } + else + { + goto NativeDPCDHDCPCAPRead; + } + +NativeDPCDHDCPCAPRead: + + BCaps bCaps = {0}; + unsigned char hdcp22BCAPS[HDCP22_BCAPS_SIZE]; + + // Check if hdcp2.x only device and probe hdcp22Bcaps. + parent->hal->getHdcp22BCaps(bCaps, hdcp22BCAPS); + if (bCaps.HDCPCapable) + { + parent->nvBCaps[0] = FLD_SET_DRF_NUM(_DPCD, _HDCP_BCAPS_OFFSET, + _HDCP_CAPABLE, bCaps.HDCPCapable, + parent->nvBCaps[0]) | + FLD_SET_DRF_NUM(_DPCD, _HDCP_BCAPS_OFFSET, _HDCP_REPEATER, + bCaps.repeater, parent->nvBCaps[0]); + + // + // No need to validate 1.x bksv here and hdcp22 authentication would + // validate certificate with bksv in uproc. + // + parent->isHDCPCap = True; + waivePendingHDCPCapDoneNotification(); + return; + } + else + { + parent->hal->getBCaps(bCaps, parent->BCAPS); + *(parent->nvBCaps) = *(parent->BCAPS); + if (bCaps.HDCPCapable) + { + NvU8 tempBKSV[HDCP_KSV_SIZE] = {0}; + if (parent->hal->getBKSV(tempBKSV)) + { + if (hdcpValidateKsv(tempBKSV, HDCP_KSV_SIZE)) + { + for (unsigned i=0; iBKSV[i] = tempBKSV[i]; + } + } + parent->isHDCPCap = True; + waivePendingHDCPCapDoneNotification(); + return; + } + } + + parent->isHDCPCap = False; + waivePendingHDCPCapDoneNotification(); + } + else + { + parent->isHDCPCap = Indeterminate; + Address parentAddress = parent->address.parent(); + //For DP1.4 atomic messaging, HDCP detection can be delayed, so lowering the priority. + remote22BCapsReadMessage.setMessagePriority(NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT); + remote22BCapsReadMessage.set(parentAddress, parent->address.tail(), NV_DPCD_HDCP22_BCAPS_OFFSET, HDCP22_BCAPS_SIZE); + bCapsReadCompleted = false; + bBCapsReadMessagePending = true; + messageManager->post(&remote22BCapsReadMessage, this); + if (parent->connector) + parent->connector->incPendingRemoteHdcpDetection(); + } +} + +void +DeviceHDCPDetection::messageCompleted +( + MessageManager::Message *from +) +{ + if ((from == &remoteBKSVReadMessage) || + (from == &remoteBCapsReadMessage) || + (from == &remote22BCapsReadMessage)) + { + handleRemoteDpcdReadDownReply(from); + } +} + +void +DeviceHDCPDetection::handleRemoteDpcdReadDownReply +( + MessageManager::Message *from +) +{ + NvU8 i2cBcaps; + unsigned dataCompleted; + unsigned defaultReason; + Address::StringBuffer sb; + DP_USED(sb); + + if (from == &remote22BCapsReadMessage) + { + bCapsReadCompleted = true; + bBCapsReadMessagePending = false; + DP_PRINTF(DP_NOTICE, "DP-QM> REMOTE_DPCD_READ(22BCaps) {%p} at '%s' completed", + (MessageManager::Message *)&remote22BCapsReadMessage, + parent->address.toString(sb)); + + if (remote22BCapsReadMessage.replyNumOfBytesReadDPCD() != HDCP22_BCAPS_SIZE) + { + DP_ASSERT(0 && "Incomplete 22BCaps in remote DPCD read message"); + parent->isHDCPCap = False; + + // Destruct only when no message is pending + if (!(bBKSVReadMessagePending || bBCapsReadMessagePending)) + { + parent->isDeviceHDCPDetectionAlive = false; + delete this; + } + return; + } + + DP_ASSERT(remote22BCapsReadMessage.replyPortNumber() == parent->address.tail()); + if (!!(*remote22BCapsReadMessage.replyGetData() & 0x2)) + { + unsigned char hdcp22BCAPS; + bksvReadCompleted = true; + bBKSVReadMessagePending = false; + + hdcp22BCAPS = *remote22BCapsReadMessage.replyGetData(); + + parent->nvBCaps[0] = FLD_SET_DRF_NUM(_DPCD, _HDCP_BCAPS_OFFSET, + _HDCP_CAPABLE, (hdcp22BCAPS & 0x2) ? 1 : 0, + parent->nvBCaps[0]) | + FLD_SET_DRF_NUM(_DPCD, _HDCP_BCAPS_OFFSET, _HDCP_REPEATER, + (hdcp22BCAPS & 0x1) ? 1 : 0, parent->nvBCaps[0]); + + // hdcp22 will validate certificate's bksv directly. + isBCapsHDCP = isValidBKSV = true; + + DP_PRINTF(DP_NOTICE, "DP-QM> Device at '%s' is with valid 22BCAPS : %x", + parent->address.toString(sb), *remote22BCapsReadMessage.replyGetData()); + } + else + { + Address parentAddress = parent->address.parent(); + //For DP1.4 atomic messaging, HDCP detection can be delayed, so lowering the priority. + remoteBKSVReadMessage.setMessagePriority(NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT); + remoteBKSVReadMessage.set(parentAddress, parent->address.tail(), NV_DPCD_HDCP_BKSV_OFFSET, HDCP_KSV_SIZE); + bksvReadCompleted = false; + bBKSVReadMessagePending = true; + messageManager->post(&remoteBKSVReadMessage, this); + //For DP1.4 atomic messaging, HDCP detection can be delayed, so lowering the priority. + remoteBCapsReadMessage.setMessagePriority(NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT); + remoteBCapsReadMessage.set(parentAddress, parent->address.tail(), NV_DPCD_HDCP_BCAPS_OFFSET, HDCP_BCAPS_SIZE); + bCapsReadCompleted = false; + bBCapsReadMessagePending = true; + messageManager->post(&remoteBCapsReadMessage, this); + } + } + else if (from == &remoteBKSVReadMessage) + { + bksvReadCompleted = true; + bBKSVReadMessagePending = false; + DP_PRINTF(DP_NOTICE, "DP-QM> REMOTE_DPCD_READ(BKSV) {%p} at '%s' completed", + (MessageManager::Message *)&remoteBKSVReadMessage, + parent->address.toString(sb)); + + if (remoteBKSVReadMessage.replyNumOfBytesReadDPCD() != HDCP_KSV_SIZE) + { + DP_ASSERT(0 && "Incomplete BKSV in remote DPCD read message"); + parent->isHDCPCap = False; + + // Destruct only when no message is pending + if (!(bBKSVReadMessagePending || bBCapsReadMessagePending)) + { + parent->isDeviceHDCPDetectionAlive = false; + delete this; + } + return; + } + + DP_ASSERT(remoteBKSVReadMessage.replyPortNumber() == parent->address.tail()); + if (hdcpValidateKsv(remoteBKSVReadMessage.replyGetData(), HDCP_KSV_SIZE)) + { + isValidBKSV = true; + for (unsigned i=0; iBKSV[i] = (remoteBKSVReadMessage.replyGetData())[i]; + + DP_PRINTF(DP_NOTICE, "DP-QM> Device at '%s' is with valid BKSV.", + parent->address.toString(sb)); + } + } + else if (from == &remoteBCapsReadMessage) + { + bCapsReadCompleted = true; + bBCapsReadMessagePending = false; + DP_PRINTF(DP_NOTICE, "DP-QM> REMOTE_DPCD_READ(BCaps) {%p} at '%s' completed", + (MessageManager::Message *)&remoteBCapsReadMessage, + parent->address.toString(sb)); + + if (remoteBCapsReadMessage.replyNumOfBytesReadDPCD() != HDCP_BCAPS_SIZE) + { + DP_ASSERT(0 && "Incomplete BCaps in remote DPCD read message"); + parent->isHDCPCap = False; + + // Destruct only when no message is pending + if (!(bBKSVReadMessagePending || bBCapsReadMessagePending)) + { + parent->isDeviceHDCPDetectionAlive = false; + delete this; + } + return; + } + + DP_ASSERT(remoteBCapsReadMessage.replyPortNumber() == parent->address.tail()); + if (!!(*remoteBCapsReadMessage.replyGetData() & 0x1)) + { + *(parent->nvBCaps) = *(parent->BCAPS) = *remoteBCapsReadMessage.replyGetData(); + isBCapsHDCP = true; + + DP_PRINTF(DP_NOTICE, "DP-QM> Device at '%s' is with valid BCAPS : %x", + parent->address.toString(sb), *remoteBCapsReadMessage.replyGetData()); + } + else + { + if (isValidBKSV) + { + DP_PRINTF(DP_WARNING, "DP-QM> Device at '%s' is with valid BKSV but Invalid BCAPS : %x", + parent->address.toString(sb), *remoteBCapsReadMessage.replyGetData()); + + // Read the BCAPS DDC offset + parent->transaction(AuxBus::read, AuxBus::i2cMot, HDCP_I2C_CLIENT_ADDR, &i2cBcaps, + 1, &dataCompleted, &defaultReason, HDCP_BCAPS_DDC_OFFSET, 1); + + DP_PRINTF(DP_NOTICE, "DP-QM> Device at '%s' is with DDC BACPS: %x", + parent->address.toString(sb), i2cBcaps); + + // If the Reserved Bit is SET, Device supports HDCP + if (i2cBcaps & HDCP_BCAPS_DDC_EN_BIT) + { + isBCapsHDCP = true; + // Set the HDCP cap BCAPS according to DP protocol + *(parent->BCAPS) |= HDCP_BCAPS_DP_EN_BIT; + *(parent->nvBCaps) = *(parent->BCAPS); + } + } + } + } + + if (bCapsReadCompleted && bksvReadCompleted) + { + // Complete remote HDCP probe and check if can power down again. + if (parent->connector) + { + parent->connector->decPendingRemoteHdcpDetection(); + parent->connector->isNoActiveStreamAndPowerdown(); + } + + if (isValidBKSV && isBCapsHDCP) + { + parent->isHDCPCap = True; + } + else + { + parent->isHDCPCap = False; + } + + // Destruct only when no message is pending + if (!(bBKSVReadMessagePending || bBCapsReadMessagePending)) + { + parent->isDeviceHDCPDetectionAlive = false; + delete this; + } + } + else + { + parent->isHDCPCap = Indeterminate; + } +} + +bool +DeviceHDCPDetection::hdcpValidateKsv +( + const NvU8 *ksv, + NvU32 Size +) +{ + + if (HDCP_KSV_SIZE <= Size) + { + NvU32 i, j; + NvU32 count_ones = 0; + for (i=0; i < HDCP_KSV_SIZE; i++) + { + for (j = 0; j < 8; j++) + { + if (ksv[i] & (1 <<(j))) + { + count_ones++; + } + } + } + + if (count_ones == 20) + { + return true; + } + } + return false; +} + +void +DeviceHDCPDetection::messageFailed +( + MessageManager::Message *from, + NakData *nakData +) +{ + if (from == &remoteBKSVReadMessage) + { + if ((retriesRemoteBKSVReadMessage < DPCD_REMOTE_DPCD_READ_MESSAGE_RETRIES) && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesRemoteBKSVReadMessage++; + retryRemoteBKSVReadMessage = bBKSVReadMessagePending = true; + timer->queueCallback(this, "BKSV", DPCD_REMOTE_DPCD_READ_MESSAGE_COOLDOWN_BKSV); + return; + } + // + // If message failed is called after all retries have expired or due + // to any other reason then reset the bBKSVReadMessagePending flag + // + bBKSVReadMessagePending = false; + } + + if (from == &remoteBCapsReadMessage) + { + if ((retriesRemoteBCapsReadMessage < DPCD_REMOTE_DPCD_READ_MESSAGE_RETRIES) && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesRemoteBCapsReadMessage++; + retryRemoteBCapsReadMessage = bBCapsReadMessagePending = true; + timer->queueCallback(this, "BCaps", DPCD_REMOTE_DPCD_READ_MESSAGE_COOLDOWN_BKSV); + return; + } + // + // If message failed is called after all retries have expired or due + // to any other reason then reset the bBCapsReadMessagePending flag + // + bBCapsReadMessagePending = false; + } + + if (from == &remote22BCapsReadMessage) + { + if ((retriesRemote22BCapsReadMessage < DPCD_REMOTE_DPCD_READ_MESSAGE_RETRIES) && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesRemote22BCapsReadMessage++; + retryRemote22BCapsReadMessage = bBCapsReadMessagePending = true; + timer->queueCallback(this, "22BCaps", DPCD_REMOTE_DPCD_READ_MESSAGE_COOLDOWN_BKSV); + return; + } + // + // If message failed is called after all retries have expired or due to + // any other reason then reset the bBCapsReadMessagePending flag + // + bBCapsReadMessagePending = false; + } + + parent->isHDCPCap = False; + Address::StringBuffer sb; + DP_USED(sb); + DP_PRINTF(DP_ERROR, "DP-QM> Message %s {%p} at '%s' failed. Device marked as not HDCP support.", + from == &remoteBKSVReadMessage ? "REMOTE_DPCD_READ(BKSV)" : + from == &remoteBCapsReadMessage ? "REMOTE_DPC_READ(BCaps)" : + from == &remote22BCapsReadMessage ? "REMOTE_DPC_READ(22BCaps)" : "???", + from, parent->address.toString(sb)); + + // Destruct only when no message is pending + if (!(bBKSVReadMessagePending || bBCapsReadMessagePending)) + { + parent->isDeviceHDCPDetectionAlive = false; + + // Complete remote HDCP probe and check if can power down again. + if (parent->connector) + { + parent->connector->decPendingRemoteHdcpDetection(); + parent->connector->isNoActiveStreamAndPowerdown(); + } + + delete this; + } +} + +void +DeviceHDCPDetection::expired +( + const void *tag +) +{ + // Clear stale HDCP states when monitor instance is already destroyed + if (!parent->plugged) + { + if (retryRemoteBKSVReadMessage) + { + retryRemoteBKSVReadMessage = false; + bBKSVReadMessagePending = false; + } + else if (retryRemoteBCapsReadMessage) + { + retryRemoteBCapsReadMessage = false; + bBCapsReadMessagePending = false; + } + else if (retryRemote22BCapsReadMessage) + { + retryRemote22BCapsReadMessage = false; + bBCapsReadMessagePending = false; + } + + if (!(bBKSVReadMessagePending || bBCapsReadMessagePending)) + { + parent->isDeviceHDCPDetectionAlive = false; + delete this; + } + return; + } + + if (retryRemoteBKSVReadMessage) + { + Address parentAddress = parent->address.parent(); + + Address::StringBuffer sb; + DP_USED(sb); + DP_PRINTF(DP_NOTICE, "DP-QM> Requeing REMOTE_DPCD_READ_MESSAGE(BKSV) to %s", parentAddress.toString(sb)); + + retryRemoteBKSVReadMessage = false; + remoteBKSVReadMessage.set(parentAddress, parent->address.tail(), NV_DPCD_HDCP_BKSV_OFFSET, HDCP_KSV_SIZE); + DP_PRINTF(DP_NOTICE, "DP-QM> Get BKSV (remotely) for '%s' sent REMOTE_DPCD_READ {%p}", parent->address.toString(sb), &remoteBKSVReadMessage); + + bBKSVReadMessagePending = true; + messageManager->post(&remoteBKSVReadMessage, this); + } + + if (retryRemoteBCapsReadMessage) + { + Address parentAddress = parent->address.parent(); + + Address::StringBuffer sb; + DP_USED(sb); + DP_PRINTF(DP_NOTICE, "DP-QM> Requeing REMOTE_DPCD_READ_MESSAGE(BCAPS) to %s", parentAddress.toString(sb)); + + retryRemoteBCapsReadMessage = false; + remoteBCapsReadMessage.set(parentAddress, parent->address.tail(), NV_DPCD_HDCP_BCAPS_OFFSET, HDCP_BCAPS_SIZE); + DP_PRINTF(DP_NOTICE, "DP-QM> Get BCaps (remotely) for '%s' sent REMOTE_DPCD_READ {%p}", parent->address.toString(sb), &remoteBCapsReadMessage); + + bBCapsReadMessagePending = true; + messageManager->post(&remoteBCapsReadMessage, this); + } + + if (retryRemote22BCapsReadMessage) + { + Address parentAddress = parent->address.parent(); + + Address::StringBuffer sb; + DP_USED(sb); + DP_PRINTF(DP_NOTICE, "DP-QM> Requeing REMOTE_DPCD_READ_MESSAGE(22BCAPS) to %s", parentAddress.toString(sb)); + + retryRemote22BCapsReadMessage = false; + remote22BCapsReadMessage.set(parentAddress, parent->address.tail(), NV_DPCD_HDCP22_BCAPS_OFFSET, HDCP22_BCAPS_SIZE); + DP_PRINTF(DP_NOTICE, "DP-QM> Get 22BCaps (remotely) for '%s' sent REMOTE_DPCD_READ {%p}", parent->address.toString(sb), &remote22BCapsReadMessage); + + bBCapsReadMessagePending = true; + messageManager->post(&remote22BCapsReadMessage, this); + } + +} + +DeviceHDCPDetection::~DeviceHDCPDetection() +{ + parent->isDeviceHDCPDetectionAlive = false; + + // Clear all pending callbacks/messages + if (this->timer) + { + this->timer->cancelCallbacks(this); + } + + if (this->messageManager) + { + this->messageManager->cancelAll(&remoteBKSVReadMessage); + this->messageManager->cancelAll(&remoteBCapsReadMessage); + this->messageManager->cancelAll(&remote22BCapsReadMessage); + } +} + +void +DeviceHDCPDetection::waivePendingHDCPCapDoneNotification() +{ + // Waive the pendingHDCPCapDone notification + parent->shadow.hdcpCapDone = true; + parent->isDeviceHDCPDetectionAlive = false; + delete this; +} diff --git a/src/common/displayport/src/dp_discovery.cpp b/src/common/displayport/src/dp_discovery.cpp new file mode 100644 index 0000000..c5456d2 --- /dev/null +++ b/src/common/displayport/src/dp_discovery.cpp @@ -0,0 +1,939 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_discovery.cpp * +* The DP MST discovery manager. * +* * +\***************************************************************************/ + +#include "dp_discovery.h" +#include "dp_messages.h" +#include "dp_tracing.h" +#include "dp_printf.h" + +using namespace DisplayPort; + +void DiscoveryManager::notifyLongPulse(bool status) +{ + if (status) + { + Device device; + device.address = Address(0); + device.branch = hal->getSupportsMultistream(); + device.legacy = false; + + detectBranch(device); + } + else if (!status) + { + removeDeviceTree(Address()); + } +} + +void DiscoveryManager::detectBranch(Device device) +{ + Address::StringBuffer sb; + DP_USED(sb); + + // + // 1. Create a LINK_ADDRESS_MESSAGE to send to this target so that we can find who he is + // 2. Create a REMOTE_DPCD_WRITE to set the GUID for this target + // *alternatively* we may have to use the local DPCD HAL to write this + // 3. Enumerate any children that we may wish to queue detect on. + // + DP_PRINTF(DP_NOTICE, "%s(): target = %s", __FUNCTION__, device.address.toString(sb)); + + BranchDetection * branchDetection = new BranchDetection(this, device); + outstandingBranchDetections.insertBack(branchDetection); + branchDetection->start(); +} + +void DiscoveryManager::detectSink(DiscoveryManager::Device device, bool bFromCSN) +{ + Address::StringBuffer sb; + DP_USED(sb); + + DP_PRINTF(DP_NOTICE, "%s(): target = %s", __FUNCTION__, device.address.toString(sb)); + SinkDetection * sinkDetection = new SinkDetection(this, device, bFromCSN); + sinkDetection->start(); +} + +DiscoveryManager::Device * DiscoveryManager::findDevice(const Address & address) +{ + for (unsigned i = 0; i < currentDevicesCount; i++) + if (currentDevices[i].address == address) + { + if (currentDevices[i].peerGuid.isGuidZero() && currentDevices[i].peerDevice != Dongle && + (currentDevices[i].dpcdRevisionMajor >= 1 && currentDevices[i].dpcdRevisionMinor >= 2)) + { + DP_ASSERT(0 && "Zero guid for device even though its not a dongle type."); + } + return ¤tDevices[i]; + } + + return 0; +} + +DiscoveryManager::Device * DiscoveryManager::findDevice(GUID & guid) +{ + if (guid.isGuidZero()) + { + DP_ASSERT(0 && "zero guid search"); + return 0; + } + + for (unsigned i = 0; i < currentDevicesCount; i++) + { + if (currentDevices[i].dpcdRevisionMajor <= 1 && currentDevices[i].dpcdRevisionMinor < 2) + continue; + + if (currentDevices[i].peerGuid == guid) + return ¤tDevices[i]; + } + + return 0; +} + +void DiscoveryManager::addDevice(const DiscoveryManager::Device & device) +{ + Address::StringBuffer sb; + DP_USED(sb); + + GUID guid = device.peerGuid; + if (guid.isGuidZero() && + (device.peerDevice != Dongle) && + (device.dpcdRevisionMajor >= 1 && device.dpcdRevisionMinor >= 2)) + { + DP_ASSERT(0 && "GUID missing for the device"); + } + DP_ASSERT(!findDevice(device.address) && "Redundant add"); + sink->discoveryNewDevice(device); + + DP_PRINTF(DP_NOTICE, "DP-DM> New device '%s' %s %s %s", device.address.toString(sb), + device.branch ? "Branch" : "", device.legacy ? "Legacy" : "", + device.peerDevice == Dongle ? "Dongle" : + device.peerDevice == DownstreamSink ? "DownstreamSink" : ""); + + Address::NvU32Buffer addrBuffer; + dpMemZero(addrBuffer, sizeof(addrBuffer)); + device.address.toNvU32Buffer(addrBuffer); + NV_DPTRACE_INFO(NEW_MST_DEVICE, device.address.size(), addrBuffer[0], addrBuffer[1], + addrBuffer[2], addrBuffer[3], device.branch, device.legacy, device.peerDevice); + + if (currentDevicesCount < maximumTopologyNodes) + { + currentDevices[currentDevicesCount++] = device; + } +} + +void DiscoveryManager::removeDevice(Device * device) +{ + Address::StringBuffer sb; + DP_USED(sb); + + DP_PRINTF(DP_NOTICE, "DP-DM> Lost device '%s' %s %s %s", device->address.toString(sb), + device->branch ? "Branch" : "", device->legacy ? "Legacy" : "", + device->peerDevice == Dongle ? "Dongle" : + device->peerDevice == DownstreamSink ? "DownstreamSink" : ""); + + sink->discoveryLostDevice(device->address); + + for (unsigned i = (unsigned)(device-¤tDevices[0]); i < currentDevicesCount - 1; i++) + currentDevices[i] = currentDevices[i+1]; + currentDevicesCount--; +} + +void DiscoveryManager::removeDeviceTree(const Address & prefix) +{ + for (unsigned i = 0; i < currentDevicesCount;) + if (currentDevices[i].address.under(prefix)) + removeDevice(¤tDevices[i]); + else + i++; +} + +DiscoveryManager::Device * DiscoveryManager::findChildDeviceForBranchWithGuid +( + GUID guid, + unsigned port, + Address & childAddr +) +{ + // Find it in relevant parent's device list + DiscoveryManager::Device * parentDevice = findDevice(guid); + if (!parentDevice) + { + DP_PRINTF(DP_ERROR, "DM> No Parent present for the device in DB."); + return 0; + } + + childAddr = parentDevice->address; + childAddr.append(port); + return (findDevice(childAddr)); +} + +void DiscoveryManager::SinkDetection::detectCompleted(bool passed) +{ + // we could not read or write the guid + if (!passed) + { + // + // DP1.2 monitors that do not support GUID get filtered and dropped as 'not present'. + // Instead we demote such monitors to DP1.1 and continue sink detection so that end + // user at least gets active display scanout on such monitors (albeit reduced to DP1.1). + // + if (device.dpcdRevisionMajor > 1 || device.dpcdRevisionMinor >= 2) + { + Address::StringBuffer sb; + DP_USED(sb); + DP_PRINTF(DP_ERROR, "DP-DM> sink at '%s' failed GUID identification, demote to 1.1 sink.", + address.toString(sb)); + device.dpcdRevisionMajor = 1; + device.dpcdRevisionMinor = 1; + } + else + { + // Had it previously been reported as present? + if (Device * device = parent->findDevice(address)) + parent->removeDevice(device); + + delete this; + return; + } + } + + // at this point we are sure that we have a device GUID. + // We need to check whether the device is new to the DB. + // Had we previously reported the device? + + Device * oldDevice = parent->findDevice(device.address); + + if (!oldDevice) + { + // completely new device + parent->addDevice(device); + } + // If it was a branch and now isn't.. delete the tree of devices under it + else if (oldDevice && oldDevice->branch && !device.branch) + { + parent->removeDeviceTree(device.address); + } + // It changed, delete the previously reported + else if (oldDevice && (oldDevice->legacy != device.legacy || + oldDevice->dpcdRevisionMajor!= device.dpcdRevisionMajor || + oldDevice->dpcdRevisionMinor!= device.dpcdRevisionMinor || + oldDevice->peerDevice != device.peerDevice|| + oldDevice->peerGuid != device.peerGuid || + oldDevice->SDPStreams != device.SDPStreams|| + oldDevice->SDPStreamSinks != device.SDPStreamSinks || + oldDevice->videoSink != device.videoSink)) + { + parent->removeDevice(oldDevice); + } + + // otherwise.. it already existed, and still does + + // We're done + completed = true; + delete this; +} + +void DiscoveryManager::BranchDetection::detectCompleted(bool present) +{ + // + // Handle device not present + // + if (!present) + { + // Had it previously been reported as present? + if (Device * device = parent->findDevice(address)) + parent->removeDevice(device); + + delete this; + return; + } + + // + // We've got a linkAddressMessage and we were able to program the GUID! + // Report the branch and queue any children that were enumerated for detection + // + parent->addDevice(parentDevice); + + unsigned portsToDelete = (1 << (Address::maxPortCount+1)) - 1; // 16 ports + for (unsigned i = 0; i < childCount; i++) + { + Device newDevice; + newDevice.address = address; + newDevice.address.append(child[i].portNumber); + + // + // Input port? Nothing plugged in? Delete the tree of all devices under this one + // DP 1.2 Spec : 2.11.9.5.x + // + if (child[i].isInputPort || !child[i].dpPlugged) { + continue; + } + + portsToDelete &= ~(1 << child[i].portNumber); + + newDevice.peerDevice = child[i].peerDeviceType; + newDevice.legacy = child[i].legacyPlugged && (newDevice.peerDevice == Dongle); + newDevice.dpcdRevisionMajor = child[i].dpcdRevisionMajor; + newDevice.dpcdRevisionMinor = child[i].dpcdRevisionMinor; + // if internal device; use parent's GUID which we ourselves generated or got from the LAM. + if (child[i].portNumber > PHYSICAL_PORT_END) + newDevice.peerGuid = parentDevice.peerGuid; + else + newDevice.peerGuid = child[i].peerGUID; + + newDevice.SDPStreams = child[i].SDPStreams; + newDevice.SDPStreamSinks = child[i].SDPStreamSinks; + + if (child[i].peerDeviceType == DownstreamBranch && + child[i].hasMessaging) + { + newDevice.branch = true; + newDevice.videoSink = false; + } + else + { + newDevice.branch = false; + newDevice.videoSink = ((child[i].peerDeviceType == Dongle) ? + child[i].legacyPlugged : true); + } + + // + // Had we previously reported the device? + // + Device * oldDevice = parent->findDevice(newDevice.address); + + // If it was a branch and now isn't.. delete the tree of devices under it + if (oldDevice && oldDevice->branch && !newDevice.branch) + { + parent->removeDeviceTree(newDevice.address); + } + // It changed, delete + else if (oldDevice && (oldDevice->legacy != newDevice.legacy || + oldDevice->dpcdRevisionMajor!= newDevice.dpcdRevisionMajor || + oldDevice->dpcdRevisionMinor!= newDevice.dpcdRevisionMinor || + oldDevice->peerDevice != newDevice.peerDevice|| + oldDevice->peerGuid != newDevice.peerGuid || + oldDevice->SDPStreams != newDevice.SDPStreams|| + oldDevice->SDPStreamSinks != newDevice.SDPStreamSinks || + oldDevice->videoSink != newDevice.videoSink)) + { + parent->removeDevice(oldDevice); + } + + // otherwise.. it already existed, and still does + if (newDevice.branch) + { + parent->detectBranch(newDevice); + } + else + { + // the new device is a sink. It may or may not have a guid. + // write the guid if needed. + parent->detectSink(newDevice, false); + } + } + + for (unsigned i = 0; i <= Address::maxPortCount; i++) + if ((portsToDelete >> i) & 1) + { + Address a = address; + a.append(i); + parent->removeDeviceTree(a); + } + + // We're done + completed = true; + delete this; +} + +void DiscoveryManager::BranchDetection::expired(const void * tag) +{ + if (retryLinkAddressMessage) + { + Address::StringBuffer sb; + DP_USED(sb); + DP_PRINTF(DP_NOTICE, "DP-DM> Requeing LINK_ADDRESS_MESSAGE to %s", address.toString(sb)); + + retryLinkAddressMessage = false; + linkAddressMessage.set(address); + parent->messageManager->post(&linkAddressMessage, this); + } + else if (retryRemoteDpcdWriteMessage) + { + Address parentAddress = address; + parentAddress.pop(); + + Address::StringBuffer sb; + DP_USED(sb); + DP_PRINTF(DP_NOTICE, "DP-DM> Requeing REMOTE_DPCD_WRITE_MESSAGE to %s", parentAddress.toString(sb)); + + retryRemoteDpcdWriteMessage = false; + remoteDpcdWriteMessage.set(parentAddress, parentAddress.tail(), NV_DPCD_GUID, sizeof(GUID), (NvU8 *)&parentDevice.peerGuid); + DP_PRINTF(DP_NOTICE, "DP-DM> Setting GUID (remotely) for '%s' sent REMOTE_DPCD_WRITE {%p}", address.toString(sb), &remoteDpcdWriteMessage); + + parent->messageManager->post(&remoteDpcdWriteMessage, this); + } +} + +void DiscoveryManager::SinkDetection::expired(const void * tag) +{ + if (retryLinkAddressMessage) + { + Address parentAddress = address; + parentAddress.pop(); + + Address::StringBuffer sb; + DP_USED(sb); + DP_PRINTF(DP_NOTICE, "DP-DM> Requeueing LAM message to %s", parentAddress.toString(sb)); + + retryLinkAddressMessage = false; + linkAddressMessage.set(parentAddress); + + parent->messageManager->post(&linkAddressMessage, this); + } + else if (retryRemoteDpcdReadMessage) + { + Address parentAddress = address; + parentAddress.pop(); + + Address::StringBuffer sb; + DP_USED(sb); + DP_PRINTF(DP_NOTICE, "DP-DM> Requeueing REMOTE_DPCD_READ_MESSAGE to %s", parentAddress.toString(sb)); + + retryRemoteDpcdReadMessage = false; + remoteDpcdReadMessage.set(parentAddress, parentAddress.tail(), NV_DPCD_GUID, sizeof(GUID)); + DP_PRINTF(DP_NOTICE, "DP-DM> Setting GUID (remotely) for '%s' sent REMOTE_DPCD_READ {%p}", address.toString(sb), &remoteDpcdReadMessage); + + parent->messageManager->post(&remoteDpcdReadMessage, this); + } + else if (retryRemoteDpcdWriteMessage) + { + Address parentAddress = address; + parentAddress.pop(); + + Address::StringBuffer sb; + DP_USED(sb); + DP_PRINTF(DP_NOTICE, "DP-DM> Requeueing REMOTE_DPCD_WRITE_MESSAGE to %s", parentAddress.toString(sb)); + + retryRemoteDpcdWriteMessage = false; + remoteDpcdWriteMessage.set(parentAddress, + parentAddress.tail(), + NV_DPCD_GUID, sizeof(GUID), + (NvU8 *)&device.peerGuid); + DP_PRINTF(DP_NOTICE, "DP-DM> Setting GUID (remotely) for '%s' sent REMOTE_DPCD_WRITE {%p}", address.toString(sb), &remoteDpcdWriteMessage); + + parent->messageManager->post(&remoteDpcdWriteMessage, this); + } +} + +void DiscoveryManager::BranchDetection::messageFailed(MessageManager::Message * from, NakData * nakData) +{ + // + // If any of our messages fail, we've completed detection on this buzzard. + // The only exception is if we get a DEFER - then we retry indefinitely + // + if (from == &linkAddressMessage) + { + if (retriesLinkAddressMessage < DPCD_LINK_ADDRESS_MESSAGE_RETRIES && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesLinkAddressMessage++; + retryLinkAddressMessage = true; + parent->timer->queueCallback(this, "DISC", DPCD_LINK_ADDRESS_MESSAGE_COOLDOWN); + return; + } + } + + if (from == &remoteDpcdWriteMessage) + { + if ((retriesRemoteDpcdWriteMessage < DPCD_REMOTE_DPCD_WRITE_MESSAGE_RETRIES) && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesRemoteDpcdWriteMessage++; + retryRemoteDpcdWriteMessage = true; + parent->timer->queueCallback(this, "DISC", DPCD_REMOTE_DPCD_WRITE_MESSAGE_COOLDOWN); + return; + } + } + + Address::StringBuffer sb; + DP_USED(sb); + DP_PRINTF(DP_ERROR, "DP-DM> Message %s {%p} at '%s' failed. Device marked not present.", + from == &linkAddressMessage ? "LINK_ADDRESS_MESSAGE" : + from == &remoteDpcdWriteMessage ? "REMOTE_DPCD_WRITE(GUID)" : "???", + from, address.toString(sb)); + + + // + // Detection is done and branch doesn't exist. + // (Note this automatically removes self from any list we're in) + // + detectCompleted(false); +} + +void DiscoveryManager::SinkDetection::messageFailed(MessageManager::Message * from, NakData * nakData) +{ + if (from == &remoteDpcdReadMessage) + { + if ((retriesRemoteDpcdReadMessage < DPCD_REMOTE_DPCD_READ_MESSAGE_RETRIES) && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesRemoteDpcdReadMessage++; + retryRemoteDpcdReadMessage = true; + parent->timer->queueCallback(this, "DISC", DPCD_REMOTE_DPCD_READ_MESSAGE_COOLDOWN); + return; + } + } + + if (from == &remoteDpcdWriteMessage) + { + if ((retriesRemoteDpcdWriteMessage < DPCD_REMOTE_DPCD_WRITE_MESSAGE_RETRIES) && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesRemoteDpcdWriteMessage++; + retryRemoteDpcdWriteMessage = true; + parent->timer->queueCallback(this, "DISC", DPCD_REMOTE_DPCD_WRITE_MESSAGE_COOLDOWN); + return; + } + } + + if (from == &linkAddressMessage) + { + if ((retriesLinkAddressMessage < DPCD_LINK_ADDRESS_MESSAGE_RETRIES) && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesLinkAddressMessage++; + retryLinkAddressMessage = true; + parent->timer->queueCallback(this, "DISC", DPCD_LINK_ADDRESS_MESSAGE_COOLDOWN); + return; + } + } + + Address::StringBuffer sb; + DP_USED(sb); + DP_PRINTF(DP_ERROR, "DP-DM> Message %s {%p} at '%s' failed.", + from == &remoteDpcdWriteMessage ? "REMOTE_DPCD_WRITE(GUID)" : + from == &remoteDpcdReadMessage ? "REMOTE_DPCD_READ(GUID)" : + from == &linkAddressMessage ? "LINK_ADDRESS_MESSAGE" : "???", + from, address.toString(sb)); + + detectCompleted(false); +} + +void DiscoveryManager::SinkDetection::handleLinkAddressDownReply() +{ + Address::StringBuffer sb; + DP_USED(sb); + LinkAddressMessage::Result child; + child = *linkAddressMessage.result(address.tail()); + + device.peerDevice = child.peerDeviceType; + device.dpcdRevisionMajor = child.dpcdRevisionMajor; + device.dpcdRevisionMinor = child.dpcdRevisionMinor; + + if (device.dpcdRevisionMajor == 0) + { + device.dpcdRevisionMajor = 1; + device.dpcdRevisionMinor = 1; + } + device.portMap.inputMap |= (1 << child.portNumber); + + DP_PRINTF(DP_NOTICE, "DP-DM> handleLinkAddressDownReply for sink device on '%s': DPCD Rev = %d.%d", + address.toString(sb), device.dpcdRevisionMajor, device.dpcdRevisionMinor); + + // Check if the device already has a GUID + // or it is a dongle or on a logical port ; in which case no GUID is required. + if ((!device.peerGuid.isGuidZero()) || + (device.peerDevice == Dongle) || + (device.dpcdRevisionMajor <= 1 && device.dpcdRevisionMinor < 2) || + (device.address.tail() > PHYSICAL_PORT_END)) + { + parent->addDevice(device); + delete this; + return; + } + + Address parentAddress = address.parent(); + remoteDpcdReadMessage.set(parentAddress, address.tail(), NV_DPCD_GUID, sizeof(GUID)); + + parent->messageManager->post(&remoteDpcdReadMessage, this); + +} + +void DiscoveryManager::SinkDetection::handleRemoteDpcdReadDownReply() +{ + Address::StringBuffer sb; + DP_USED(sb); + DP_PRINTF(DP_NOTICE, "DP-DM> REMOTE_DPCD_READ {%p} at '%s' completed", + (MessageManager::Message *)&remoteDpcdReadMessage, + address.toString(sb)); + if (remoteDpcdReadMessage.replyNumOfBytesReadDPCD() != sizeof(GUID)) + { + DP_ASSERT(0 && "Incomplete GUID in remote DPCD read message"); + detectCompleted(false); + return; + } + + DP_ASSERT(remoteDpcdReadMessage.replyPortNumber() == address.tail()); + device.peerGuid.copyFrom(remoteDpcdReadMessage.replyGetData()); + + if (!device.peerGuid.isGuidZero()) + { + // we got the GUID ... handle device add/remove + detectCompleted(true); + } + else + { + // + // We need to give ourselves a non-zero GUID! + // + parent->guidBuilder.makeGuid(device.peerGuid); + + Address parentAddress = address.parent(); + remoteDpcdWriteMessage.set(parentAddress, + address.tail(), + NV_DPCD_GUID, sizeof(GUID), + (NvU8 *)&device.peerGuid); + + DP_PRINTF(DP_NOTICE, "DP-DM> Setting GUID (remotely) for '%s' sent REMOTE_DPCD_WRITE {%p}", + address.toString(sb), &remoteDpcdWriteMessage); + + parent->messageManager->post(&remoteDpcdWriteMessage, this); + } +} + +void DiscoveryManager::BranchDetection::handleLinkAddressDownReply() +{ + Address::StringBuffer sb; + DP_USED(sb); + + // + // Copy link address results out of the structure + // - We cannot process the contents until after + // we've programmed the GUID. The reasoning is + // that we need to make sure we do not enumerate + // devices not yet in a usable state. + // + childCount = linkAddressMessage.resultCount(); + for (unsigned i = 0; i < childCount; i++) + { + child[i] = *linkAddressMessage.result(i); + + // also update the portmap + parentDevice.portMap.internalMap = 0xFF00; // ports 0x8 to 0xF are internal + parentDevice.portMap.validMap |= (1 << child[i].portNumber); + if (child[i].isInputPort) + { + parentDevice.peerDevice = child[i].peerDeviceType; + parentDevice.portMap.inputMap |= (1 << child[i].portNumber); + if (address == Address(0)) + { + // + // For immediate branch device, we will have already read DPCD version + // in notifyHPD. So we can just use that to populate here. + // For the remaining devices, LAM to parent branch will report the child + // DPCD version in reply and we are populating it in + // BranchDetection::detectCompleted. + // + parentDevice.dpcdRevisionMajor = parent->hal->getRevisionMajor(); + parentDevice.dpcdRevisionMinor = parent->hal->getRevisionMinor(); + } + } + } + + linkAddressMessage.getGUID(parentDevice.peerGuid); + if (parentDevice.peerGuid.isGuidZero()) + { + // + // We need to give ourselves a non-zero GUID! + // + parent->guidBuilder.makeGuid(parentDevice.peerGuid); + + if (address == Address(0)) + { + DP_PRINTF(DP_NOTICE, "DP-DM> Setting GUID (locally) for '%s'", address.toString(sb)); + // + // We're locally connected, use the DPCD HAL to write the new GUID + // + if (AuxRetry::ack != parent->hal->setGUID(parentDevice.peerGuid)) + { + detectCompleted(false); + return; + } + + detectCompleted(true); + } + else + { + // + // Let's build a remote DPCD request. Remember the target is the *parent* + // of the device we want to talk to + // + Address parentAddress = address; + parentAddress.pop(); + remoteDpcdWriteMessage.set(parentAddress, address.tail(), + NV_DPCD_GUID, sizeof(GUID), + (NvU8 *)&parentDevice.peerGuid); + + DP_PRINTF(DP_NOTICE, "DP-DM> Setting GUID (remotely) for '%s' sent REMOTE_DPCD_WRITE {%p}", + address.toString(sb), &remoteDpcdWriteMessage); + + parent->messageManager->post(&remoteDpcdWriteMessage, this); + } + } + else + { + // + // Already had a GUID + // + detectCompleted(true); + } + +} + +void DiscoveryManager::BranchDetection::messageCompleted(MessageManager::Message * from) +{ + if (from == &linkAddressMessage) + handleLinkAddressDownReply(); + else if (from == &remoteDpcdWriteMessage) + detectCompleted(true); +} + +void DiscoveryManager::SinkDetection::messageCompleted(MessageManager::Message * from) +{ + if (from == &remoteDpcdReadMessage) + handleRemoteDpcdReadDownReply(); + else if (from == &linkAddressMessage) + handleLinkAddressDownReply(); + else if (from == &remoteDpcdWriteMessage) + detectCompleted(true); +} + +void DiscoveryManager::BranchDetection::start() +{ + // + // 1. Create a LINK_ADDRESS_MESSAGE to send to this target so that we can find who he is + // 2. Create a REMOTE_DPCD_WRITE to set the GUID for this target + // *alternatively* we may have to use the local DPCD HAL to write this + // 3. Enumerate any children that we may wish to queue detect on. + // + linkAddressMessage.set(address); + + Address::StringBuffer sb; + DP_USED(sb); + DP_PRINTF(DP_NOTICE, "DP-DM> Detecting '%s' (sending LINK_ADDRESS_MESSAGE {%p})", + address.toString(sb), + (MessageManager::Message *)&linkAddressMessage); + + parent->messageManager->post(&linkAddressMessage, this); +} + +void DiscoveryManager::SinkDetection::start() +{ + // + // Per DP1.4 requirement: + // Send PowerUpPhy message first, to make sure device is ready to work + // + NakData nakData; + powerUpPhyMessage.set(address.parent(), address.tail(), NV_TRUE); + parent->messageManager->send(&powerUpPhyMessage, nakData); + + Address::StringBuffer sb; + DP_USED(sb); + + // The sink is found in CSN, missing dpcd revision + if (bFromCSN) + { + parent->outstandingSinkDetections.insertBack(this); + // Create a LINK_ADDRESS_MESSAGE to send to parent of this target + linkAddressMessage.set(address.parent()); + + DP_PRINTF(DP_NOTICE, "DP-DM> Detecting '%s' (sending LINK_ADDRESS_MESSAGE {%p})", + address.toString(sb), + (MessageManager::Message *)&linkAddressMessage); + parent->messageManager->post(&linkAddressMessage, this); + } + else // The sink is found in LAM sent for branch, and with DPCD rev. + { + // Check if the device already has a GUID + // or it is a dongle or on a logical port ; in which case no GUID is required. + if ((!device.peerGuid.isGuidZero()) || + (device.peerDevice == Dongle) || + (device.dpcdRevisionMajor <= 1 && device.dpcdRevisionMinor < 2) || + (device.address.tail() > PHYSICAL_PORT_END)) + { + parent->addDevice(device); + delete this; + return; + } + + parent->outstandingSinkDetections.insertBack(this); + Address parentAddress = address.parent(); + remoteDpcdReadMessage.set(parentAddress, address.tail(), NV_DPCD_GUID, sizeof(GUID)); + + parent->messageManager->post(&remoteDpcdReadMessage, this); + } + +} + +DiscoveryManager::BranchDetection::~BranchDetection() +{ + List::remove(this); + + if (parent->outstandingSinkDetections.isEmpty() && + parent->outstandingBranchDetections.isEmpty()) + parent->sink->discoveryDetectComplete(); + + parent->timer->cancelCallbacks(this); +} + +DiscoveryManager::SinkDetection::~SinkDetection() +{ + List::remove(this); + + if (parent->outstandingSinkDetections.isEmpty() && + parent->outstandingBranchDetections.isEmpty()) + parent->sink->discoveryDetectComplete(); + + parent->timer->cancelCallbacks(this); +} + +void DiscoveryManager::ReceiverSink::messageProcessed(MessageManager::MessageReceiver * from) +{ + DP_ASSERT((from->getRequestId() == 0x2) && "This receiver is only meant for CSNs"); + + // CSNs are broadcast messages. So replies will always go to immediate downstream branch + CsnUpReplyContainer * csnReplyContainer = new CsnUpReplyContainer(parent); + parent->pendingCsnUpReplies.insertBack(csnReplyContainer); + + //Send acknowledgement to the CSN sender. + csnReplyContainer->postUpReply(); + + ConnStatusNotifyMessage* csnMessage = static_cast(from); + + if (csnMessage->getUpRequestData()->isInputPort) + { + DP_PRINTF(DP_ERROR, "Concentrator?? Got CSN for an upstream port!"); + return; + } + + Address childAddr; + DiscoveryManager::Device * oldDevice = parent->findChildDeviceForBranchWithGuid(csnMessage->getUpRequestData()->guid, + csnMessage->getUpRequestData()->port, childAddr); + if (!csnMessage->getUpRequestData()->devicePlugged) // some device was unplugged or powered off + { + if (oldDevice) + parent->removeDeviceTree(childAddr); + return; + } + + handleCSN(from); +} + +void DiscoveryManager::ReceiverSink::handleCSN(MessageManager::MessageReceiver * from) +{ + ConnStatusNotifyMessage* csnMessage = static_cast(from); + + // There is no point in serving an upRequest when no device is present. + if (parent->currentDevicesCount == 0) + { + DP_ASSERT(0 && "DM> No Device in the Topology"); + return; + } + + // + // Check for non-zero GUID in CSN message. It is mandatory to find respective parent + // Branch should not send CSN with Zero GUID as a unique GUID is set before CSN + // + if ((csnMessage->getUpRequestData()->guid).isGuidZero()) + { + DP_ASSERT(0 && "Ignoring CSN. Invalid parent device due to zero-GUID."); + return; + } + + Address childAddr; + unsigned port = csnMessage->getUpRequestData()->port; + DiscoveryManager::Device * oldDevice = + parent->findChildDeviceForBranchWithGuid(csnMessage->getUpRequestData()->guid, + port, + childAddr); + + // Check if we already have a device + if (oldDevice) + { + oldDevice->dirty = true; + + // Set the videoSink status of oldDevice again as old device might be a legacy dongle + // and a video sink is now added with it + oldDevice->videoSink = ((csnMessage->getUpRequestData()->peerDeviceType == Dongle) ? + csnMessage->getUpRequestData()->legacyPlugged : true); + + parent->sink->discoveryNewDevice(*oldDevice); + return; + } + + // Exit if no valid address matched for further detection. + if ((childAddr.size() == 0) || + (childAddr.size() > Address::maxHops)) + { + DP_ASSERT(0 && "Ignoring CSN. Invalid parent device due to GUID not found in discovered topology"); + return; + } + + DiscoveryManager::Device newDevice; + newDevice.address = childAddr; + newDevice.branch = (csnMessage->getUpRequestData()->messagingCapability == true) && + (csnMessage->getUpRequestData()->peerDeviceType == DownstreamBranch); + + newDevice.peerDevice = csnMessage->getUpRequestData()->peerDeviceType; + newDevice.legacy = csnMessage->getUpRequestData()->legacyPlugged == true; + newDevice.SDPStreams = newDevice.SDPStreamSinks = 0; + + if (csnMessage->getUpRequestData()->devicePlugged) // Check for a new device only if it's plugged + { + if (newDevice.branch) + { + newDevice.videoSink = false; + // send a LAM and the whole nine yards + DP_ASSERT(newDevice.legacy == false); + parent->detectBranch(newDevice); + return; + } + else + { + newDevice.SDPStreams = newDevice.SDPStreamSinks = 1; + newDevice.videoSink = ((csnMessage->getUpRequestData()->peerDeviceType == Dongle) ? + csnMessage->getUpRequestData()->legacyPlugged : true); + + parent->detectSink(newDevice, true); + return; + } + } +} diff --git a/src/common/displayport/src/dp_edid.cpp b/src/common/displayport/src/dp_edid.cpp new file mode 100644 index 0000000..935ab37 --- /dev/null +++ b/src/common/displayport/src/dp_edid.cpp @@ -0,0 +1,648 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_edid.c * +* Implementation of SST/MST EDID reader * +* * +\***************************************************************************/ + +#include "dp_buffer.h" +#include "dp_internal.h" +#include "dp_edid.h" +#include "dp_printf.h" + +using namespace DisplayPort; + +EdidAssembler::EdidAssembler(Edid * const edid, bool bPatchCrc): + edid(edid), stream(edid->getBuffer()), oldBlockChecksum(0x00), + blocksRead(0), totalBlockCnt(0), retriesCount(0), + bPatchCrc(bPatchCrc) {} + + +bool EdidAssembler::readIsComplete() +{ + return (blocksRead > 0 && blocksRead == totalBlockCnt); +} + +void EdidAssembler::reset() +{ + oldBlockChecksum = 0x00; + blocksRead = 0; + totalBlockCnt = 0; + retriesCount = 0; + stream.seek(0); +} + +void EdidAssembler::postReply(const Buffer & buffer, unsigned sizeCompleted, bool success) +{ + if (!success || buffer.isError()) + { + retriesCount++; + return; + } + + // + // For SST: + // Check the Checksum Error Per Block reading, mark the EDID as "patched" if + // CRC is wrong. DPLib will return fallback EDID. + // + blocksRead++; + stream.write(buffer.data, sizeCompleted); + if (getEDIDBlockChecksum(buffer)) + { + if (bPatchCrc) + edid->patchCrc(); + edid->setPatchedChecksum(true); + } + return; +} + +void EdidAssembler::postReply(unsigned char * data, unsigned sizeCompleted, bool success) +{ + // + // For MST: When read of edid block failed, library will attempt to read + // same block again, but not more than EDID_POLICY_BLOCK_READ_MAX_RETRY_COUNT times + // + if (!success) + { + retriesCount++; + return; + } + + // + // Check the Checksum Error Per Block reading, + // library will attempt to read same block again, + // but not more than EDID_POLICY_BLOCK_READ_MAX_RETRY_COUNT times. + // + Buffer buffer(data, EDID_BLOCK_SIZE); + if (buffer.isError()) + { + retriesCount++; + return; + } + + NvU8 newBlockChecksum = getEDIDBlockChecksum(buffer); + if (newBlockChecksum) + { + if (this->oldBlockChecksum != newBlockChecksum) //First failure? + { + this->oldBlockChecksum = newBlockChecksum; + retriesCount++; + return; + } + } + + this->oldBlockChecksum = 0; + retriesCount = 0; + blocksRead++; + stream.write(data, sizeCompleted); +} + +bool EdidAssembler::readNextRequest(NvU8 & seg, NvU8 & offset) +{ + // + // cache totalBlockCnt, + // In EDID 1.3 HF-EEODB, it might changes after 1 extension block read. + // + if ((blocksRead == 1) || (blocksRead == 2)) + totalBlockCnt = edid->getBlockCount(); + + // + // will return false in two scenarios + // 1. EDID read is complete, all extension blocks were read + // 2. First EDID block was corrupted, then totalBlockCnt = 0 + // + if (blocksRead >= totalBlockCnt) + return false; + + // Retry count exceeded for particular block? + if (retriesCount > EDID_POLICY_BLOCK_READ_MAX_RETRY_COUNT) + return false; + + seg = NvU8(blocksRead >> 1); + offset = NvU8((blocksRead & 0x1) * EDID_BLOCK_SIZE); + return true; +} + + +enum +{ + EDID_V1_IDX_EXTENSION = 0x7E, + EDID_V1_IDX_HEADER0 = 0x00, + EDID_V1_HEADER0 = 0x00, + + EDID_V1_IDX_HEADER1 = 0x01, + EDID_V1_HEADER1 = 0xFF, + + EDID_V1_IDX_VERSION = 0x12, + EDID_V1_VERSION_1 = 0x01, + EDID_V2_IDX_VERREV = 0x00, + + // + // from od_edid.h RM to identify VER 2, use 7:4 bits. + // #define EDID_V2_VERREV_VERSION 7:4 /* RW--F */ + // #define EDID_V2_VERREV_VERSION_2 0x02 /* RWI-V */ + // + // Avoiding FLD_* macros, thus shift VER2 value 4 bits to left + // + EDID_V2_VERREV_VERSION_2 = 0x02 << 4, + EDID_FLAGS_CHKSUM_ATTEMPTS_DP = 0x5, +}; + +enum +{ + // EDID CTA-EXT (CTA 861 Extension) block defines + EDID_CTA_EXT_HEADER_OFFSET = 0x00, + EDID_CTA_EXT_HEADER = 0x02, + EDID_CTA_EXT_VERSION_OFFSET = 0x01, + EDID_CTA_EXT_VERSION_3 = 0x03, + EDID_CTA_EXT_DATA_BLOCK_HEADER_OFFSET = 0x04, + EDID_CTA_EXT_DATA_BLOCK_HEADER_HF_EEODB = 0xE2, + EDID_CTA_EXT_DATA_BLOCK_TAG_OFFSET = 0x05, + EDID_CTA_EXT_DATA_BLOCK_TAG_HF_EEODB = 0x78, + EDID_CTA_EXT_DATA_BLOCK_EXT_COUNT_OFFSET = 0x06, +}; + +Edid::Edid(): buffer() +{ + // fill EDID buffer with zeroes + this->buffer.memZero(); + checkSumValid = false; + forcedCheckSum = false; + fallbackEdid = false; + patchedChecksum = false; + + // clear the WARFlags and WARData. + _WARFlags flagTemp = {0}; + _WARData dataTemp = {0}; + WARFlags = flagTemp; + WARData = dataTemp; +} + +Edid::~Edid() +{ +} + +bool Edid::verifyCRC() +{ + if (getEdidSize() > 0) + { + this->validateCheckSum(); + return this->checkSumValid; + } + else + return false; +} + +// this routine patches the edid crc after it has been overridden for WARs. +void Edid::patchCrc() +{ + // we always override some bytes within the first 128 + // recalculate and fix the checksum for the first page only. + unsigned chksum = 0; + for (unsigned i = 0; i < 128; i++) + { + chksum += buffer.data[i]; + } + chksum = chksum & 0xFF; + + if (chksum) + buffer.data[127] = 0xFF & (buffer.data[127] + (0x100 - chksum)); +} + +bool Edid::isChecksumValid() const +{ + // return checksum valid if it is. + // else return checksum is valid if checksum wasn't valid but we will assume it to be. + return (checkSumValid || forcedCheckSum); +} + +bool Edid::isFallbackEdid() const +{ + return fallbackEdid; +} + +NvU8 Edid::getFirstPageChecksum() +{ + DP_ASSERT(buffer.getLength() >= 128); + if (buffer.getLength() < 128) + return 0; + else + return buffer.data[127]; +} + +NvU8 Edid::getLastPageChecksum() +{ + NvU32 bufferSize = buffer.getLength(); + NvU32 checksumLocation = this->getBlockCount() * 128 - 1; + + if (bufferSize == 0 || bufferSize < (this->getBlockCount() * 128)) + { + DP_PRINTF(DP_ERROR, "DP-EDID> Edid length is 0 or less than required"); + return 0; + } + + if (bufferSize % 128 != 0) + { + DP_PRINTF(DP_ERROR, "DP-EDID> Edid length is not a multiple of 128"); + return 0; + } + + return buffer.data[checksumLocation]; + +} + +void Edid::validateCheckSum() +{ + // Each page has its own checksum + checkSumValid = false; + for (unsigned chunk = 0; chunk < this->buffer.length; chunk += 128) + { + unsigned chksum = 0; + for (unsigned i = 0; i < 128; i++) + { + chksum += buffer.data[i+chunk]; + } + + if ((chksum & 0xFF) != 0) + return; + } + checkSumValid = true; +} + +unsigned Edid::getEdidVersion() +{ + if (buffer.isError() || buffer.length < EDID_BLOCK_SIZE) + { + return 0; + } + + // 0 version is "unknown" + unsigned version = 0; + + // Check for Version 1 EDID + if (this->buffer.data[EDID_V1_IDX_VERSION] == EDID_V1_VERSION_1) + { + version = 1; + } + // Check for version 2 EDID + else if (this->buffer.data[EDID_V2_IDX_VERREV] & EDID_V2_VERREV_VERSION_2) + { + // + // Version 2 has 256 bytes by default. + // There is a note about an extra 256 byte block if byte 0x7E + // bit 7 is set but there's no definition for it listed in + // the EDID Version 3 (971113). So, let's just skip it for now. + // + version = 2; + } + else + { + DP_ASSERT(version && "Unknown EDID version"); + } + + return version; +} + +const char * Edid::getName() const +{ + static char decodedName[16] = {0}; + int tail = 0; + if (buffer.length < 128) + return "?"; + + for (int i = 0; i < 4; i++) + if (buffer.data[0x39 + i * 18 + 0] == 0xFC) + { + for (int j = 0; j < 13; j++) + decodedName[tail++] = buffer.data[0x39 + i*18 + 2 + j]; + break; + } + decodedName[tail++] = 0; + return decodedName; +} + +unsigned Edid::getBlockCount() +{ + if (buffer.isError() || buffer.length < EDID_BLOCK_SIZE) + { + return 0; + } + + unsigned version = getEdidVersion(); + + if (version == 1) + { + NvU32 blockCount = (unsigned) this->buffer.data[EDID_V1_IDX_EXTENSION]+1; + + if (blockCount > EDID_MAX_BLOCK_COUNT) + { + DP_PRINTF(DP_ERROR, "DPEDID> %s: DDC read returned questionable results: " + "Total block Count too high: %d", __FUNCTION__, blockCount); + return 1; + } + // + // Check for the HF-EEODB defined in HDMI 2.1 specification. + // 1. It is EDID version 1.3 and the extension block count is 1 (total block count = 2) + // 2. The 1st EDID extension block is already read. (buffer.length > block size) + // 3. The 1st EDID extension block is CTA extension block. + // 4. It has HF-EEODB (1st extension block: byte4 == 0xE2 and byte5 == 0x78) + // + if ((blockCount == 2) && (buffer.length >= EDID_BLOCK_SIZE * 2)) + { + NvU8 *pExt = &(this->buffer.data[EDID_BLOCK_SIZE]); + + // + // If it's a CTA-EXT block version 3 and has HF-EEODB + // defined, update the total block count. + // + if ((pExt[EDID_CTA_EXT_HEADER_OFFSET] == EDID_CTA_EXT_HEADER) && + (pExt[EDID_CTA_EXT_VERSION_OFFSET] == EDID_CTA_EXT_VERSION_3) && + (pExt[EDID_CTA_EXT_DATA_BLOCK_HEADER_OFFSET] == EDID_CTA_EXT_DATA_BLOCK_HEADER_HF_EEODB) && + (pExt[EDID_CTA_EXT_DATA_BLOCK_TAG_OFFSET] == EDID_CTA_EXT_DATA_BLOCK_TAG_HF_EEODB)) + { + blockCount = pExt[EDID_CTA_EXT_DATA_BLOCK_EXT_COUNT_OFFSET] + 1; + } + + } + return blockCount; + } + else if (version == 2) + { + // + // Version 2 has 256 bytes by default. + // There is a note about an extra 256 byte block + // if byte 0x7E bit 7 is set, but there's no + // definition for it listed in the + // EDID Version 3 (971113) So, let's just skip + // it for now. + // + return 2; + } + else + { + // Unknown EDID version. Skip it. + DP_PRINTF(DP_ERROR, "DPEDID> %s: Unknown EDID Version!",__FUNCTION__); + DP_ASSERT(0 && "Unknown EDID version!"); + return 1; + } +} + +unsigned Edid::getEdidSize() const +{ + return this->buffer.length; +} + +void DisplayPort::Edid::swap(Edid & right) +{ + swapBuffers(buffer, right.buffer); + validateCheckSum(); +} + +const NvU8 fallbackEdidModes[5][EDID_BLOCK_SIZE] = { + // ID Manufacturer Name: NVD + // VIDEO INPUT DEFINITION: + // Digital Signal + // VESA DFP 1.x Compatible + + // + // The first 4 entries are for NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS (DPCD 0x20) + // 1024x768x60Hz: defined in bit 0. + // 1280x720x60Hz: defined in bit 1. + // 1920x1080x60Hz: defined in bit 2. [Mandatory] + // + { + // Bit 2: 1920x1080x60 only + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, + 0x3A, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x04, 0xA5, 0x00, 0x00, 0x64, + 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, + 0x50, 0x54, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3A, + 0x80, 0x18, 0x71, 0x38, 0x2D, 0x40, 0x58, 0x2C, + 0x43, 0x00, 0xC0, 0x1C, 0x32, 0x00, 0x00, 0x1C, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDB + }, + { + // bit 2 + bit 0: 1920x1080x60 + 1024x768x60 + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, + 0x3A, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x04, 0xA5, 0x00, 0x00, 0x64, + 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, + 0x50, 0x54, 0x00, 0x00, 0x08, 0x00, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3A, + 0x80, 0x18, 0x71, 0x38, 0x2D, 0x40, 0x58, 0x2C, + 0x43, 0x00, 0xC0, 0x1C, 0x32, 0x00, 0x00, 0x1C, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD3 + }, + { + // bit 2 + bit 1: 1920x1080x60 + 1280x720x60 + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, + 0x3A, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x04, 0xA5, 0x00, 0x00, 0x64, + 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, + 0x50, 0x54, 0x00, 0x00, 0x00, 0x00, 0x81, 0xC0, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3A, + 0x80, 0x18, 0x71, 0x38, 0x2D, 0x40, 0x58, 0x2C, + 0x43, 0x00, 0xC0, 0x1C, 0x32, 0x00, 0x00, 0x1C, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9C + }, + { + // bit2 + bit 1 + bit 0: All 3 modes. + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, + 0x3A, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x04, 0xA5, 0x00, 0x00, 0x64, + 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, + 0x50, 0x54, 0x00, 0x00, 0x08, 0x00, 0x81, 0xC0, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3A, + 0x80, 0x18, 0x71, 0x38, 0x2D, 0x40, 0x58, 0x2C, + 0x43, 0x00, 0xC0, 0x1C, 0x32, 0x00, 0x00, 0x1C, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x94 + }, + { + // ESTABLISHED TIMING I: + // 640 X 480 @ 60Hz (IBM,VGA) + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, + 0x3A, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x04, 0x95, 0x00, 0x00, 0x78, + 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, + 0x50, 0x54, 0x00, 0x20, 0x00, 0x00, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x92 + } +}; + +// +// Definition of DPCD 0x20: +// 1024x768x60Hz: defined in bit 0. +// 1280x720x60Hz: defined in bit 1. +// 1920x1080x60Hz: defined in bit 2. [Mandatory] +// MIN value is 4 (only 1920x1080 supported) +// MAX value is 7 (supports all 3 modes) +// +#define SINK_VIDEO_FALLBACK_FORMATS_MIN_VALUE (0x00000004) +#define SINK_VIDEO_FALLBACK_FORMATS_MAX_VALUE (0x00000007) + +void DisplayPort::makeEdidFallback(Edid & edid, NvU32 fallbackFormatSupported) +{ + const NvU8 *data; + + // fallbackFormatSupported valid values = 4~7 + if (fallbackFormatSupported > SINK_VIDEO_FALLBACK_FORMATS_MAX_VALUE || + fallbackFormatSupported < SINK_VIDEO_FALLBACK_FORMATS_MIN_VALUE) + { + // 4 is default fallback mode. (only 640x480) + data = fallbackEdidModes[4]; + } + else + { + data = fallbackEdidModes[fallbackFormatSupported-4]; + } + if (!edid.getBuffer()->resize(EDID_BLOCK_SIZE)) + return; + + dpMemCopy(edid.getBuffer()->getData(), (const NvU8*)data, EDID_BLOCK_SIZE); + DP_ASSERT(edid.verifyCRC()); + edid.setFallbackFlag(true); +} + +/* +Fake EDID for DP2VGA dongle when the EDID of the real monitor is not available + +Established Timings [20 CE 00] + 640 x 480 @ 60Hz + 800 x 600 @ 72Hz + 800 x 600 @ 75Hz + 1024 x 768 @ 60Hz + 1024 x 768 @ 70Hz + 1024 x 768 @ 75Hz + +Standard Timings + Timing [3159] : 640 x 480 @ 85Hz (4:3) + Timing [4559] : 800 x 600 @ 85Hz (4:3) + Timing [6159] : 1024 x 768 @ 85Hz (4:3) + Timing [714F] : 1152 x 864 @ 75Hz (4:3) + +Detailed Timing [DTD] 1280 x 1024 @ 60.02Hz + Pixel Clock : 108.00Mhz + HBlank, HBorder : 408, 0 + HSyncStart, HSyncWidth : 48, 112 + VBlank, VBorder : 42, 0 + VSyncStart, VSyncWidth : 1, 3 + Image size : 376mm x 301mm + DigitalSeparate +/+ +*/ + +void DisplayPort::makeEdidFallbackVGA(Edid & edid) +{ + const NvU8 data[] = { + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x3A, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x13, 0x01, 0x03, 0x80, 0x26, 0x1E, 0x78, 0xEE, 0xCB, 0x05, 0xA3, 0x58, 0x4C, 0x9B, 0x25, + 0x13, 0x50, 0x54, 0x20, 0xCE, 0x00, 0x31, 0x59, 0x45, 0x59, 0x61, 0x59, 0x71, 0x4F, 0x81, 0x40, + 0x81, 0x80, 0x01, 0x01, 0x01, 0x01, 0x30, 0x2A, 0x00, 0x98, 0x51, 0x00, 0x2A, 0x40, 0x30, 0x70, + 0x13, 0x00, 0x78, 0x2D, 0x11, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0xFD, 0x00, 0x30, 0x55, 0x1F, + 0x52, 0x0E, 0x00, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x4C, + 0x43, 0x44, 0x5F, 0x56, 0x47, 0x41, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD8 + }; + + if (!edid.getBuffer()->resize(sizeof(data))) + return; + + dpMemCopy(edid.getBuffer()->getData(), (const NvU8*)data, sizeof data); + DP_ASSERT(edid.verifyCRC()); + edid.setFallbackFlag(true); +} + +NvU8 DisplayPort::getEDIDBlockChecksum(const Buffer & buffer) +{ + DP_ASSERT(buffer.getLength() == 128); + + unsigned chksum = 0; + for (unsigned i = 0; i < buffer.getLength(); i++) + { + chksum += buffer.data[i]; + } + chksum = chksum & 0xFF; + return (NvU8)chksum; +} + +bool DisplayPort::Edid::isValidHeader() const +{ + NvU8 validHeaderData[8] = { + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0x00}; + + if (buffer.getLength() < 0x8) + return false; + + for (unsigned i = 0; i < 8; i++) + { + if (buffer.data[i] != validHeaderData[i]) + { + DP_PRINTF(DP_WARNING, "DP-EDID> Invalid EDID Header"); + return false; + } + } + + return true; +} \ No newline at end of file diff --git a/src/common/displayport/src/dp_evoadapter.cpp b/src/common/displayport/src/dp_evoadapter.cpp new file mode 100644 index 0000000..b48797f --- /dev/null +++ b/src/common/displayport/src/dp_evoadapter.cpp @@ -0,0 +1,2186 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_evoadapter.cpp * +* Interface for low level access to the aux bus. * +* This is the synchronous version of the interface. * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_evoadapter.h" +#include "dp_auxdefs.h" +#include "dp_qse.h" +#include "dp_tracing.h" +#include "dp_vrr.h" +#include "dp_printf.h" +#include + +#include +#include +#include + +using namespace DisplayPort; + +// +// Evo hardcodes the relationship between stream and head # +// Head#x is always stream x+1 +// +#define STREAM_TO_HEAD_ID(s) ((s) - 1) +#define HEAD_TO_STREAM_ID(s) ((s) + 1) + +// +// Data Base used to store all the regkey values. +// The type is defined in dp_regkeydatabase.h. +// All entries set to 0 before initialized by the first EvoMainLink constructor. +// The first EvoMainLink constructor will populate that data base. +// Later EvoMainLink will use values from that data base. +// +struct DP_REGKEY_DATABASE dpRegkeyDatabase = {0}; + +enum DP_REG_VAL_TYPE +{ + DP_REG_VAL_BOOL = 0, + DP_REG_VAL_U32 = 1, + DP_REG_VAL_U16 = 2, + DP_REG_VAL_U8 = 3 +}; + +const struct +{ + const char* pName; + void* pValue; + DP_REG_VAL_TYPE valueType; +} DP_REGKEY_TABLE [] = +{ + {NV_DP_REGKEY_DISABLE_QSES, &dpRegkeyDatabase.bQsesDisabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_OVERRIDE_DPCD_REV, &dpRegkeyDatabase.dpcdRevOveride, DP_REG_VAL_U32}, + {NV_DP_REGKEY_DISABLE_SSC, &dpRegkeyDatabase.bSscDisabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_ENABLE_FAST_LINK_TRAINING, &dpRegkeyDatabase.bFastLinkTrainingEnabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_DISABLE_MST, &dpRegkeyDatabase.bMstDisabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_ENABLE_INBAND_STEREO_SIGNALING, &dpRegkeyDatabase.bInbandStereoSignalingEnabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_SKIP_POWEROFF_EDP_IN_HEAD_DETACH, &dpRegkeyDatabase.bPoweroffEdpInHeadDetachSkipped, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_ENABLE_OCA_LOGGING, &dpRegkeyDatabase.bOcaLoggingEnabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_REPORT_DEVICE_LOST_BEFORE_NEW, &dpRegkeyDatabase.bReportDeviceLostBeforeNew, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_APPLY_LINK_BW_OVERRIDE_WAR, &dpRegkeyDatabase.bLinkBwOverrideWarApplied, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_APPLY_MAX_LINK_RATE_OVERRIDES, &dpRegkeyDatabase.applyMaxLinkRateOverrides, DP_REG_VAL_U32}, + {NV_DP_REGKEY_DISABLE_DSC, &dpRegkeyDatabase.bDscDisabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_SKIP_ASSESSLINK_FOR_EDP, &dpRegkeyDatabase.bAssesslinkForEdpSkipped, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_HDCP_AUTH_ONLY_ON_DEMAND, &dpRegkeyDatabase.bHdcpAuthOnlyOnDemand, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_ENABLE_MSA_OVER_MST, &dpRegkeyDatabase.bMsaOverMstEnabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_KEEP_OPT_LINK_ALIVE, &dpRegkeyDatabase.bOptLinkKeptAlive, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_KEEP_OPT_LINK_ALIVE_MST, &dpRegkeyDatabase.bOptLinkKeptAliveMst, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_KEEP_OPT_LINK_ALIVE_SST, &dpRegkeyDatabase.bOptLinkKeptAliveSst, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_FORCE_EDP_ILR, &dpRegkeyDatabase.bBypassEDPRevCheck, DP_REG_VAL_BOOL}, + {NV_DP_DSC_MST_CAP_BUG_3143315, &dpRegkeyDatabase.bDscMstCapBug3143315, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_POWER_DOWN_PHY, &dpRegkeyDatabase.bPowerDownPhyBeforeD3, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_MST_PCON_CAPS_READ_DISABLED, &dpRegkeyDatabase.bMSTPCONCapsReadDisabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_DISABLE_TUNNEL_BW_ALLOCATION, &dpRegkeyDatabase.bForceDisableTunnelBwAllocation, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_DISABLE_DOWNSPREAD, &dpRegkeyDatabase.bDownspreadDisabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_DISABLE_AVOID_HBR3_WAR, &dpRegkeyDatabase.bDisableAvoidHBR3War, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_SKIP_ZERO_OUI_CACHE, &dpRegkeyDatabase.bSkipZeroOuiCache, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_ENABLE_FIX_FOR_5147205, &dpRegkeyDatabase.bEnable5147205Fix, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_FORCE_HEAD_SHUTDOWN, &dpRegkeyDatabase.bForceHeadShutdown, DP_REG_VAL_BOOL} +}; + +EvoMainLink::EvoMainLink(EvoInterface * provider, Timer * timer) : + provider(provider), + timer(timer), + displayId(provider->getDisplayId()), + subdeviceIndex(provider->getSubdeviceIndex()) +{ + // + // Process GPU caps (This needs to be replaced with a control call caps interface) + // + NvU32 code; + + // Initialize shared regkey data base, and apply the overrides + this->initializeRegkeyDatabase(); + this->applyRegkeyOverrides(); + + _isDynamicMuxCapable = false; + _isLTPhyRepeaterSupported = true; + _rmPhyRepeaterCount = 0; + dpMemZero(&_DSC, sizeof(_DSC)); + dpMemZero(&dfpParams, sizeof(dfpParams)); + dpMemZero(&dpParams, sizeof(dpParams)); + dpMemZero(¶msHdcpCtrl, sizeof(paramsHdcpCtrl)); + + // + // Tell RM to hands off on the DisplayPort hardware + // + NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS setManualParams = {0}; + setManualParams.subDeviceInstance = subdeviceIndex; + code = provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT, &setManualParams, sizeof setManualParams); + DP_ASSERT (code == NVOS_STATUS_SUCCESS && "Unable to enable library mode"); + + // + // Get the mask of valid heads + // + NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS allHeadMaskParams; + dpMemZero(&allHeadMaskParams, sizeof allHeadMaskParams); + allHeadMaskParams.subDeviceInstance = subdeviceIndex; + code = provider->rmControl0073(NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK, &allHeadMaskParams, sizeof(allHeadMaskParams)); + + if (code != NVOS_STATUS_SUCCESS) + { + DP_ASSERT(0 && "Unable to get head mask"); + allHeadMask = 3; + } + else + { + allHeadMask = allHeadMaskParams.headMask; + } +} + + +bool EvoMainLink::vrrRunEnablementStage(unsigned stage, NvU32 *status) +{ + NV0073_CTRL_CMD_DP_ENABLE_VRR_PARAMS params = {0}; + params.subDeviceInstance = subdeviceIndex; + params.displayId = this->displayId; + + switch (stage) + { + case VRR_ENABLE_STAGE_MONITOR_ENABLE_BEGIN: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _MONITOR_ENABLE_BEGIN); + break; + case VRR_ENABLE_STAGE_MONITOR_ENABLE_CHALLENGE: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _MONITOR_ENABLE_CHALLENGE); + break; + case VRR_ENABLE_STAGE_MONITOR_ENABLE_CHECK: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _MONITOR_ENABLE_CHECK); + break; + case VRR_ENABLE_STAGE_DRIVER_ENABLE_BEGIN: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _DRIVER_ENABLE_BEGIN); + break; + case VRR_ENABLE_STAGE_DRIVER_ENABLE_CHALLENGE: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _DRIVER_ENABLE_CHALLENGE); + break; + case VRR_ENABLE_STAGE_DRIVER_ENABLE_CHECK: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _DRIVER_ENABLE_CHECK); + break; + case VRR_ENABLE_STAGE_RESET_MONITOR: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _RESET_MONITOR); + break; + case VRR_ENABLE_STAGE_INIT_PUBLIC_INFO: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _INIT_PUBLIC_INFO); + break; + case VRR_ENABLE_STAGE_GET_PUBLIC_INFO: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _GET_PUBLIC_INFO); + break; + case VRR_ENABLE_STAGE_STATUS_CHECK: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _STATUS_CHECK); + break; + default: + DP_ASSERT(0 && "Undefined VRR Enablement Stage."); + return false; + } + NvU32 retVal = provider->rmControl0073(NV0073_CTRL_CMD_DP_ENABLE_VRR, ¶ms, sizeof(params)); + if (status) + { + *status = params.result; + } + if (retVal != NVOS_STATUS_SUCCESS) + { + return false; + } + return true; +} + +bool EvoMainLink::getEdpPowerData(bool *panelPowerOn, bool *dpcdPowerStateD0) +{ + NV0073_CTRL_DP_GET_EDP_DATA_PARAMS params; + params.subDeviceInstance = subdeviceIndex; + params.displayId = this->displayId; + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DP_GET_EDP_DATA, ¶ms, sizeof(params)); + + if (code != NVOS_STATUS_SUCCESS) + { + DP_ASSERT(0 && "Unable to get eDP power data, assuming panel off."); + if (panelPowerOn) + { + *panelPowerOn = false; + } + if (dpcdPowerStateD0) + { + *dpcdPowerStateD0 = false; + } + return false; + } + else + { + if (panelPowerOn) + { + *panelPowerOn = FLD_TEST_DRF(0073_CTRL_DP, _GET_EDP_DATA, _PANEL_POWER, _ON, + params.data); + } + if (dpcdPowerStateD0) + { + *dpcdPowerStateD0 = FLD_TEST_DRF(0073_CTRL_DP, _GET_EDP_DATA, _DPCD_POWER_STATE, _D0, + params.data); + } + return true; + } +} + +NvU32 EvoMainLink::headToStream(NvU32 head, bool bSidebandMessageSupported, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier) +{ + NvU32 streamIndex = 0; + + NvU32 maxHeads = allHeadMask; + NUMSETBITS_32(maxHeads); + streamIndex = DP_MST_HEAD_TO_STREAMID(head, streamIdentifier, maxHeads); + + return streamIndex; +} + +bool EvoMainLink::queryGPUCapability() +{ + dpMemZero(&dpParams, sizeof(dpParams)); + dpParams.subDeviceInstance = subdeviceIndex; + dpParams.sorIndex = provider->getSorIndex(); + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DP_GET_CAPS, &dpParams, sizeof(dpParams)); + if (code != NVOS_STATUS_SUCCESS) + { + DP_ASSERT(0 && "Unable to process GPU caps"); + return false; + } + // + // Check if MST feature needs to be disabled by regkey. This is requirement by few OEMs, they don't want to support + // MST feature on particular sku, whenever requested through INF. + // + _hasMultistream = (dpParams.bIsMultistreamSupported == NV_TRUE) && !_isMstDisabledByRegkey; + _isStreamCloningEnabled = (dpParams.bIsSCEnabled == NV_TRUE) ? true : false; + _hasIncreasedWatermarkLimits = (dpParams.bHasIncreasedWatermarkLimits == NV_TRUE) ? true : false; + _isFECSupported = (dpParams.bFECSupported == NV_TRUE) ? true : false; + _useDfpMaxLinkRateCaps = (dpParams.bOverrideLinkBw == NV_TRUE) ? true : false; + _isLTPhyRepeaterSupported = (dpParams.bIsTrainPhyRepeater == NV_TRUE) ? true : false; + _isDownspreadSupported = (dpParams.bSupportDPDownSpread == NV_TRUE) ? true : false; + _bAvoidHBR3 = (dpParams.bAvoidHBR3 == NV_TRUE) ? true : false; + _bIsDpTunnelingHwBugWarEnabled = (dpParams.bIsDpTunnelingHwBugWarEnabled == NV_TRUE) ? true : false; + + _gpuSupportedDpVersions = dpParams.dpVersionsSupported; + + if (FLD_TEST_DRF(0073, _CTRL_CMD_DP_GET_CAPS, _MAX_LINK_RATE, _1_62, dpParams.maxLinkRate)) + _maxLinkRateSupportedGpu = dp2LinkRate_1_62Gbps; // in 10Mbps + else if (FLD_TEST_DRF(0073, _CTRL_CMD_DP_GET_CAPS, _MAX_LINK_RATE, _2_70, dpParams.maxLinkRate)) + _maxLinkRateSupportedGpu = dp2LinkRate_2_70Gbps; // in 10Mbps + else if (FLD_TEST_DRF(0073, _CTRL_CMD_DP_GET_CAPS, _MAX_LINK_RATE, _5_40, dpParams.maxLinkRate)) + _maxLinkRateSupportedGpu = dp2LinkRate_5_40Gbps; // in 10Mbps + else if (FLD_TEST_DRF(0073, _CTRL_CMD_DP_GET_CAPS, _MAX_LINK_RATE, _8_10, dpParams.maxLinkRate)) + _maxLinkRateSupportedGpu = dp2LinkRate_8_10Gbps; // in 10Mbps + else + { + DP_ASSERT(0 && "Unable to get max link rate"); + // Assume that we can at least support RBR. + _maxLinkRateSupportedGpu = dp2LinkRate_1_62Gbps; + } + + if (!_isDscDisabledByRegkey) + { + _DSC.isDscSupported = dpParams.DSC.bDscSupported ? true : false; + _DSC.encoderColorFormatMask = dpParams.DSC.encoderColorFormatMask; + _DSC.lineBufferSizeKB = dpParams.DSC.lineBufferSizeKB; + _DSC.rateBufferSizeKB = dpParams.DSC.rateBufferSizeKB; + _DSC.bitsPerPixelPrecision = dpParams.DSC.bitsPerPixelPrecision; + _DSC.maxNumHztSlices = dpParams.DSC.maxNumHztSlices; + _DSC.lineBufferBitDepth = dpParams.DSC.lineBufferBitDepth; + } + return true; +} + +void EvoMainLink::triggerACT() +{ + NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS params = {0}; + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = this->displayId; + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_DP_SEND_ACT, ¶ms, sizeof params); + if (ret != NVOS_STATUS_SUCCESS) + { + DP_PRINTF(DP_ERROR, "triggerACT failed!"); + } +} + +void EvoMainLink::configureAndTriggerECF(NvU64 ecf, NvBool bForceClearEcf, NvBool bAddStreamBack) +{ + NV0073_CTRL_CMD_DP_SET_ECF_PARAMS params = {0}; + params.subDeviceInstance = this->subdeviceIndex; + params.sorIndex = provider->getSorIndex(); + params.ecf = ecf; + // + // ForceClearECF will delete DP MST Time slots along with ECF from GA10X and Later + // if ADD Stream Back is set then it will add back same time slots after clearing ECF + // bForceClear = TRUE should be set to have significance for bAddStreamBack + // bForceClear will be only set in case of Detach Stream/Flush mode + // bAddStream will also be set only in case of QSES error scenario + // In all other cases these are set to FALSE + // + params.bForceClearEcf = bForceClearEcf; + params.bAddStreamBack = bAddStreamBack; + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_ECF, ¶ms, sizeof params); + if (ret != NVOS_STATUS_SUCCESS) + { + DP_PRINTF(DP_ERROR, "Set EFC failed!"); + } + triggerACT(); + // Wait for 1 link frame time for ECF to take effect i.e + // Wait Time = 1024 MTPs * 64 clocks/MTP * (1/162MHz) = 404.5 us. + // As the minimum time available for timer->sleep() is 1 ms hence taking that time + timer->sleep(1); + +} + +//TODO: we need to re-arch this code to remove from dp library +void EvoMainLink::configureHDCPRenegotiate(NvU64 cN, NvU64 cKSV, bool bForceReAuth, bool bRxIDMsgPending) +{ + dpMemZero(¶msHdcpCtrl, sizeof(paramsHdcpCtrl)); + paramsHdcpCtrl.subDeviceInstance = this->subdeviceIndex; + paramsHdcpCtrl.displayId = this->displayId; + paramsHdcpCtrl.cN = cN; + paramsHdcpCtrl.cKsv = cKSV; + if (bForceReAuth) + { + paramsHdcpCtrl.flags |= DRF_DEF(0073_CTRL_SPECIFIC, _HDCP_CTRL, _FLAGS_FORCE_REAUTH, _YES); + } + else + { + paramsHdcpCtrl.flags |= DRF_DEF(0073_CTRL_SPECIFIC, _HDCP_CTRL, _FLAGS_FORCE_REAUTH, _NO); + } + + if (bRxIDMsgPending) + { + paramsHdcpCtrl.flags |= DRF_DEF(0073_CTRL_SPECIFIC, _HDCP_CTRL, _FLAGS_RXIDMSG_PENDING, _YES); + } + else + { + paramsHdcpCtrl.flags |= DRF_DEF(0073_CTRL_SPECIFIC, _HDCP_CTRL, _FLAGS_RXIDMSG_PENDING, _NO); + } + + paramsHdcpCtrl.cmd |= DRF_DEF(0073_CTRL_SPECIFIC, _HDCP_CTRL, _CMD, _RENEGOTIATE); + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_SPECIFIC_HDCP_CTRL, ¶msHdcpCtrl, sizeof paramsHdcpCtrl); + if (ret != NVOS_STATUS_SUCCESS) + { + DP_PRINTF(DP_ERROR, "configureHDCPRenegotiate failed!"); + } +} + +void EvoMainLink::configureHDCPDisableAuthentication() +{ + dpMemZero(¶msHdcpCtrl, sizeof(paramsHdcpCtrl)); + paramsHdcpCtrl.subDeviceInstance = this->subdeviceIndex; + paramsHdcpCtrl.displayId = this->displayId; + + paramsHdcpCtrl.cmd |= DRF_DEF(0073_CTRL_SPECIFIC, _HDCP_CTRL, _CMD, _DISABLE_AUTHENTICATION); + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_SPECIFIC_HDCP_CTRL, ¶msHdcpCtrl, sizeof paramsHdcpCtrl); + if (ret != NVOS_STATUS_SUCCESS) + { + DP_PRINTF(DP_ERROR, "configureHDCPDisableAuthentication failed!"); + } +} + +void EvoMainLink::configureHDCPAbortAuthentication(AbortAuthReason abortAuthReason) +{ + dpMemZero(¶msHdcpCtrl, sizeof(paramsHdcpCtrl)); + paramsHdcpCtrl.subDeviceInstance = this->subdeviceIndex; + paramsHdcpCtrl.displayId = this->displayId; + paramsHdcpCtrl.cN = HDCP_DUMMY_CN; + paramsHdcpCtrl.cKsv = HDCP_DUMMY_CKSV; + switch (abortAuthReason) + { + case UNTRUST: paramsHdcpCtrl.flags |= DRF_DEF(0073_CTRL_SPECIFIC, _HDCP_CTRL, _FLAGS_ABORT, _UNTRUST); break; //Abort due to Kp mismatch + case UNRELBL: paramsHdcpCtrl.flags |= DRF_DEF(0073_CTRL_SPECIFIC, _HDCP_CTRL, _FLAGS_ABORT, _UNRELBL); break; //Abort due to repeated link failure + case KSV_LEN: paramsHdcpCtrl.flags |= DRF_DEF(0073_CTRL_SPECIFIC, _HDCP_CTRL, _FLAGS_ABORT, _KSV_LEN); break; //Abort due to KSV length + case KSV_SIG: paramsHdcpCtrl.flags |= DRF_DEF(0073_CTRL_SPECIFIC, _HDCP_CTRL, _FLAGS_ABORT, _KSV_SIG); break; //Abort due to KSV signature + case SRM_SIG: paramsHdcpCtrl.flags |= DRF_DEF(0073_CTRL_SPECIFIC, _HDCP_CTRL, _FLAGS_ABORT, _SRM_SIG); break; //Abort due to SRM signature + case SRM_REV: paramsHdcpCtrl.flags |= DRF_DEF(0073_CTRL_SPECIFIC, _HDCP_CTRL, _FLAGS_ABORT, _SRM_REV); break; //Abort due to SRM revocation + case NORDY: paramsHdcpCtrl.flags |= DRF_DEF(0073_CTRL_SPECIFIC, _HDCP_CTRL, _FLAGS_ABORT, _NORDY); break; //Abort due to repeater not ready + case KSVTOP: paramsHdcpCtrl.flags |= DRF_DEF(0073_CTRL_SPECIFIC, _HDCP_CTRL, _FLAGS_ABORT, _KSVTOP); break; //Abort due to KSV topology error + case BADBKSV: paramsHdcpCtrl.flags |= DRF_DEF(0073_CTRL_SPECIFIC, _HDCP_CTRL, _FLAGS_ABORT, _BADBKSV); break; //Abort due to invalid Bksv + default: paramsHdcpCtrl.flags |= DRF_DEF(0073_CTRL_SPECIFIC, _HDCP_CTRL, _FLAGS_ABORT, _NONE); break; // Default value; + } + paramsHdcpCtrl.cmd |= DRF_DEF(0073_CTRL_SPECIFIC, _HDCP_CTRL, _CMD, _ABORT_AUTHENTICATION); + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_SPECIFIC_HDCP_CTRL, ¶msHdcpCtrl, sizeof paramsHdcpCtrl); + if (ret != NVOS_STATUS_SUCCESS) + { + DP_PRINTF(DP_ERROR, "configureHDCPAbortAuthentication failed!"); + } +} + +void EvoMainLink::configureHDCPValidateLink(HDCPValidateData &hdcpValidateData, NvU64 cN, NvU64 cKsv) +{ + dpMemZero(¶msHdcpCtrl, sizeof(paramsHdcpCtrl)); + paramsHdcpCtrl.subDeviceInstance = this->subdeviceIndex; + paramsHdcpCtrl.displayId = this->displayId; + paramsHdcpCtrl.linkCount = 1; + paramsHdcpCtrl.cN = cN; + paramsHdcpCtrl.cKsv = cKsv; + paramsHdcpCtrl.cmd |= DRF_DEF(0073_CTRL_SPECIFIC, _HDCP_CTRL, _CMD, _VALIDATE_LINK); + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_SPECIFIC_HDCP_CTRL, ¶msHdcpCtrl, sizeof paramsHdcpCtrl); + if (ret != NVOS_STATUS_SUCCESS) + { + DP_PRINTF(DP_ERROR, "configureHDCPValidateLink(): HDCP_CTRL failed!"); + } + + for (unsigned i = 0; i < NV0073_CTRL_HDCP_VPRIME_SIZE; i++) + { + hdcpValidateData.vP[i] = paramsHdcpCtrl.vP[i]; + } + + hdcpValidateData.aN = paramsHdcpCtrl.aN[0]; // Only primary link An for DP use. + hdcpValidateData.mP = paramsHdcpCtrl.mP; +} + +void EvoMainLink::configureHDCPGetHDCPState(HDCPState &hdcpState) +{ + NV0073_CTRL_SPECIFIC_GET_HDCP_STATE_PARAMS params = {0}; + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = this->displayId; + + // Set CACHED to False, it will cause a hdcpStatusRead which gating the eng. + // params.flags = FLD_SET_DRF(0073_CTRL_SPECIFIC, _HDCP_STATE, _ENCRYPTING_CACHED, _TRUE, 0); + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_SPECIFIC_GET_HDCP_STATE, ¶ms, sizeof params); + if (ret != NVOS_STATUS_SUCCESS) + { + DP_PRINTF(DP_ERROR, "configureHDCPGetHDCPState(): Get HDCP state failed!"); + } + + hdcpState.HDCP_State_1X_Capable = FLD_TEST_DRF(0073_CTRL_SPECIFIC, + _HDCP_STATE, _RECEIVER_CAPABLE, _YES, params.flags) ? true : false; + + if (FLD_TEST_DRF(0073_CTRL_SPECIFIC, _HDCP_STATE, _REPEATER_CAPABLE, + _YES, params.flags) || + FLD_TEST_DRF(0073_CTRL_SPECIFIC, _HDCP_STATE, _HDCP22_REPEATER_CAPABLE, + _YES, params.flags)) + { + hdcpState.HDCP_State_Repeater_Capable = true; + } + else + { + hdcpState.HDCP_State_Repeater_Capable = false; + } + + if (FLD_TEST_DRF(0073_CTRL_SPECIFIC, _HDCP_STATE, _HDCP22_RECEIVER_CAPABLE, + _YES, params.flags) || + FLD_TEST_DRF(0073_CTRL_SPECIFIC, _HDCP_STATE, _HDCP22_REPEATER_CAPABLE, + _YES, params.flags)) + { + hdcpState.HDCP_State_22_Capable = true; + } + else + { + hdcpState.HDCP_State_22_Capable = false; + } + + if (hdcpState.HDCP_State_22_Capable) + { + if (FLD_TEST_DRF(0073_CTRL_SPECIFIC, _HDCP_STATE, _HDCP22_ENCRYPTING,_YES, params.flags)) + { + hdcpState.HDCP_State_Encryption = true; + } + else + { + hdcpState.HDCP_State_Encryption = false; + } + } + else + { + if (FLD_TEST_DRF(0073_CTRL_SPECIFIC, _HDCP_STATE,_ENCRYPTING, _YES, params.flags)) + { + hdcpState.HDCP_State_Encryption = true; + } + else + { + hdcpState.HDCP_State_Encryption = false; + } + } + + if (FLD_TEST_DRF(0073_CTRL_SPECIFIC, _HDCP_STATE, _AUTHENTICATED, + _YES, params.flags)) + { + hdcpState.HDCP_State_Authenticated = true; + } + else + { + hdcpState.HDCP_State_Authenticated = false; + } +} + +void EvoMainLink::disableAlternateScramblerReset() +{ + NV0073_CTRL_DP_ASSR_CTRL_PARAMS assrParams; + dpMemZero(&assrParams, sizeof(assrParams)); + assrParams.subDeviceInstance = subdeviceIndex; + assrParams.displayId = displayId; + + assrParams.cmd = DRF_DEF(0073_CTRL, _DP, _ASSR_CMD, _DISABLE); + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DP_ASSR_CTRL, &assrParams, sizeof(assrParams)); + + if (code != NVOS_STATUS_SUCCESS || assrParams.err) + { + DP_ASSERT(0 && "Unable to change scrambler reset"); + } +} + +bool EvoMainLink::setStreamType(unsigned streamIndex, NvU8 streamType, bool * bNeedReNegotiate) +{ + dpMemZero(¶msHdcpCtrl, sizeof(paramsHdcpCtrl)); + paramsHdcpCtrl.subDeviceInstance = this->subdeviceIndex; + paramsHdcpCtrl.displayId = this->displayId; + paramsHdcpCtrl.streamIndex = streamIndex; + paramsHdcpCtrl.streamType = streamType; + + // + // According to spec, Type1 content cannot be transmitted to repeater HDCP1.X downstream device. + // Thus RM provides option for client that force to type0 instead repeater blank the output with type1. + // TODO: check Playready/HWDRM behavior with this, + // 1. Will it stop engaging HWDRM with this fix ? + // 2. VPR blanking gets applied and blanks repeater display as well + // + paramsHdcpCtrl.bEnforceType0Hdcp1xDS = (streamType == NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_TYPE_1); + + paramsHdcpCtrl.cmd |= DRF_DEF(0073_CTRL_SPECIFIC, _HDCP_CTRL, _CMD, + _SET_TYPE); + + *bNeedReNegotiate = false; + + if (!provider->rmControl0073(NV0073_CTRL_CMD_SPECIFIC_HDCP_CTRL, + ¶msHdcpCtrl, sizeof paramsHdcpCtrl)) + { + if (FLD_TEST_DRF(0073_CTRL_SPECIFIC, _HDCP_CTRL_FLAGS, _TYPE_CHANGED, + _YES, paramsHdcpCtrl.flags)) + { + *bNeedReNegotiate = true; + } + + return true; + } + else + { + DP_PRINTF(DP_ERROR, "DP_EVO> set stream type cmd failed!"); + return false; + } +} + +void EvoMainLink::forwardPendingKsvListReady(NvBool bKsvListReady) +{ + dpMemZero(¶msHdcpCtrl, sizeof(paramsHdcpCtrl)); + paramsHdcpCtrl.subDeviceInstance = this->subdeviceIndex; + paramsHdcpCtrl.displayId = this->displayId; + paramsHdcpCtrl.bPendingKsvListReady = bKsvListReady; + paramsHdcpCtrl.cmd |= DRF_DEF(0073_CTRL_SPECIFIC, _HDCP_CTRL, _CMD, + _FORWARD_KSVLIST_READY); + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_SPECIFIC_HDCP_CTRL, ¶msHdcpCtrl, + sizeof paramsHdcpCtrl); + if (ret != NVOS_STATUS_SUCCESS) + { + DP_PRINTF(DP_ERROR, "configureSingleHeadMultiStreamMode failed!"); + } +} + +void EvoMainLink::configureSingleStream(NvU32 head, + NvU32 hBlankSym, + NvU32 vBlankSym, + bool bEnhancedFraming, + NvU32 tuSize, + NvU32 waterMark, + DP_COLORFORMAT colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamId, + DP_SINGLE_HEAD_MULTI_STREAM_MODE singleHeadMultiStreamMode, + bool bAudioOverRightPanel, + bool bEnable2Head1Or) +{ + NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS params = {0}; + params.subDeviceInstance = this->subdeviceIndex; + params.head = head; + params.sorIndex = provider->getSorIndex(); + params.bEnableTwoHeadOneOr = bEnable2Head1Or; + + if (singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST) + { + // In 2-SST mode configure Head-SF on primary link, so primary link configuration + // gets copied to secondary link. + params.dpLink = streamId; + } + else + { + params.dpLink = provider->getLinkIndex(); + } + + params.bEnableOverride = NV_TRUE; + params.bMST = NV_FALSE; + params.hBlankSym = hBlankSym; + params.vBlankSym = vBlankSym; + params.colorFormat = colorFormat; + + params.SST.bEnhancedFraming = bEnhancedFraming; + params.SST.tuSize = tuSize; + params.SST.waterMark = waterMark; + params.SST.bEnableAudioOverRightPanel = bAudioOverRightPanel; + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_DP_CONFIG_STREAM, ¶ms, sizeof params); + if (ret != NVOS_STATUS_SUCCESS) + { + DP_PRINTF(DP_ERROR, "configureSingleStream failed!"); + } +} + +void EvoMainLink::configureSingleHeadMultiStreamMode(NvU32 displayIDs[], + NvU32 numStreams, + NvU32 mode, + bool bSetConfig, + NvU8 vbiosPrimaryDispIdIndex) +{ + NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS params = {0}; + params.subDeviceInstance = this->subdeviceIndex; + + for (NvU32 pipelineID = 0; pipelineID < numStreams; pipelineID++) + { + params.displayIDs[pipelineID] = displayIDs[pipelineID]; + } + params.mode = mode; + params.bSetConfig = bSetConfig; + params.numStreams = numStreams; + params.vbiosPrimaryDispIdIndex = vbiosPrimaryDispIdIndex; + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM, + ¶ms, sizeof params); + if (ret != NVOS_STATUS_SUCCESS) + { + DP_PRINTF(DP_ERROR, "configureSingleHeadMultiStreamMode failed!"); + } +} + +void EvoMainLink::configureMultiStream(NvU32 head, + NvU32 hBlankSym, + NvU32 vBlankSym, + NvU32 slotStart, + NvU32 slotEnd, + NvU32 PBN, + NvU32 Timeslice, + DP_COLORFORMAT colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier, + DP_SINGLE_HEAD_MULTI_STREAM_MODE singleHeadMultistreamMode, + bool bAudioOverRightPanel, + bool bEnable2Head1Or) +{ + NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS params = {0}; + params.head = head; + params.subDeviceInstance = this->subdeviceIndex; + params.sorIndex = provider->getSorIndex(); + params.dpLink = provider->getLinkIndex(); + params.bEnableOverride = NV_TRUE; + params.bMST = NV_TRUE; + params.hBlankSym = hBlankSym; + params.vBlankSym = vBlankSym; + params.colorFormat = colorFormat; + params.bEnableTwoHeadOneOr = bEnable2Head1Or; + params.singleHeadMultistreamMode = singleHeadMultistreamMode; + + params.MST.slotStart = slotStart; + params.MST.slotEnd = slotEnd; + params.MST.PBN = PBN; + params.MST.Timeslice = Timeslice; + params.MST.singleHeadMSTPipeline = streamIdentifier; + params.MST.bEnableAudioOverRightPanel = bAudioOverRightPanel; + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_DP_CONFIG_STREAM, ¶ms, sizeof params); + if (ret != NVOS_STATUS_SUCCESS) + { + DP_PRINTF(DP_ERROR, "configureMultiStream failed!"); + } +} + +void EvoMainLink::configureMsScratchRegisters(NvU32 address, + NvU32 hopCount, + NvU32 dpMsDevAddrState) +{ + NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS params = {0}; + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = this->displayId; + params.activeDevAddr = address; + params.sorIndex = provider->getSorIndex(); + params.dpLink = provider->getLinkIndex(); + params.hopCount = hopCount; + params.dpMsDevAddrState = dpMsDevAddrState; + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG, ¶ms, sizeof params); + if (ret != NVOS_STATUS_SUCCESS) + { + DP_PRINTF(DP_ERROR, "configureMsScratchRegisters failed!"); + } +} + +// +// EvoMainLink::setDpStereoMSAParameters does the DP library Stereo override for +// In-band signaling through the MSA MISC1 field and keeps the rest of the MSA +// params the same. +// +// On GK110 and later, when stereo is enabled, we send the stereo eye +// information to the sink device through the MSA MISC1 bits 2:1. Certain +// DP 1.2 non-compliant DP->VGA dongles cannot handle this information, and +// lose all signal when these bits are non-zero. This WAR uses a RM control +// to override those MSA bits to zero. It should be called whenever a DP->VGA +// dongle is in use. +// +bool EvoMainLink::setDpStereoMSAParameters(bool bStereoEnable, const NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS &msaparams) +{ + NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS params = {0}; + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = msaparams.displayId; + //clubbing the MSA params passed by DD with Dp Library Stereo Override + params.bStereoPhaseInverse = msaparams.bStereoPhaseInverse; + params.featureValues.misc[1] = msaparams.featureValues.misc[1]; + + if (bStereoEnable) { + params.bEnableMSA = NV_TRUE | msaparams.bEnableMSA; + params.featureMask.miscMask[1] = DRF_SHIFTMASK(NV_DP_MSA_PROPERTIES_MISC1_STEREO) | msaparams.featureMask.miscMask[1]; + } else { + params.bEnableMSA = NV_FALSE | msaparams.bEnableMSA; + params.featureMask.miscMask[1] |= msaparams.featureMask.miscMask[1]; + } + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES, ¶ms, sizeof params); + + // + // NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES is only implemented on GK110 and + // later, but this WAR is unnecessary on other GPUs, so ignore + // ERROR_NOT_SUPPORTED. + // + // XXX This may fail if a future GPU requires this WAR but does not + // implement this rmcontrol. To avoid that, this class would need to be + // aware of which evo display HAL is in use. + // + if (ret != NVOS_STATUS_SUCCESS && ret != NVOS_STATUS_ERROR_NOT_SUPPORTED) + { + DP_ASSERT(!"Enabling MSA stereo override failed!"); + return false; + } + + return true; +} + +// +// EvoMainLink::setDpMSAParameters clubs MSA parameters passed by DD for format YCbCr4:2:0 +// with DP library Stereo override for In-band signaling through the MSA MISC1 field. +// +// On GK110 and later, when stereo is enabled, we send the stereo eye +// information to the sink device through the MSA MISC1 bits 2:1. Certain +// DP 1.2 non-compliant DP->VGA dongles cannot handle this information, and +// lose all signal when these bits are non-zero. This WAR uses a RM control +// to override those MSA bits to zero. It should be called whenever a DP->VGA +// dongle is in use. +// +bool EvoMainLink::setDpMSAParameters(bool bStereoEnable, const NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS &msaparams) +{ + NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS params = {0}; + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = msaparams.displayId; + //clubbing the MSA params passed by DD with Dp Library Stereo Override + params.bStereoPhaseInverse = msaparams.bStereoPhaseInverse; + params.bCacheMsaOverrideForNextModeset = true; + params.featureValues.misc[0] = msaparams.featureValues.misc[0]; + params.featureValues.misc[1] = msaparams.featureValues.misc[1]; + params.featureMask.miscMask[0] = msaparams.featureMask.miscMask[0]; + + params.featureValues.rasterTotalHorizontal = msaparams.featureValues.rasterTotalHorizontal; + params.featureValues.rasterTotalVertical = msaparams.featureValues.rasterTotalVertical; + params.featureValues.activeStartHorizontal = msaparams.featureValues.activeStartHorizontal; + params.featureValues.activeStartVertical = msaparams.featureValues.activeStartVertical; + params.featureValues.surfaceTotalHorizontal = msaparams.featureValues.surfaceTotalHorizontal; + params.featureValues.surfaceTotalVertical = msaparams.featureValues.surfaceTotalVertical; + params.featureValues.syncWidthHorizontal = msaparams.featureValues.syncWidthHorizontal; + params.featureValues.syncPolarityHorizontal = msaparams.featureValues.syncPolarityHorizontal; + params.featureValues.syncHeightVertical = msaparams.featureValues.syncHeightVertical; + params.featureValues.syncPolarityVertical = msaparams.featureValues.syncPolarityVertical; + + params.featureMask.bRasterTotalHorizontal = msaparams.featureMask.bRasterTotalHorizontal; + params.featureMask.bRasterTotalVertical = msaparams.featureMask.bRasterTotalVertical; + params.featureMask.bActiveStartHorizontal = msaparams.featureMask.bActiveStartHorizontal; + params.featureMask.bActiveStartVertical = msaparams.featureMask.bActiveStartVertical; + params.featureMask.bSurfaceTotalHorizontal = msaparams.featureMask.bSurfaceTotalHorizontal; + params.featureMask.bSurfaceTotalVertical = msaparams.featureMask.bSurfaceTotalVertical; + params.featureMask.bSyncWidthHorizontal = msaparams.featureMask.bSyncWidthHorizontal; + params.featureMask.bSyncPolarityHorizontal = msaparams.featureMask.bSyncPolarityHorizontal; + params.featureMask.bSyncHeightVertical = msaparams.featureMask.bSyncHeightVertical; + params.featureMask.bSyncPolarityVertical = msaparams.featureMask.bSyncPolarityVertical; + + params.featureValues.reserved[0] = msaparams.featureValues.reserved[0]; + params.featureValues.reserved[1] = msaparams.featureValues.reserved[1]; + params.featureValues.reserved[2] = msaparams.featureValues.reserved[2]; + + params.pFeatureDebugValues = msaparams.pFeatureDebugValues; + + if (bStereoEnable) { + params.bEnableMSA = NV_TRUE | msaparams.bEnableMSA; + params.featureMask.miscMask[1] = DRF_SHIFTMASK(NV_DP_MSA_PROPERTIES_MISC1_STEREO) | msaparams.featureMask.miscMask[1]; + } else { + params.bEnableMSA = NV_FALSE | msaparams.bEnableMSA; + params.featureMask.miscMask[1] |= msaparams.featureMask.miscMask[1]; + } + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES, ¶ms, sizeof params); + + // + // NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES is only implemented on GK110 and + // later, but this WAR is unnecessary on other GPUs, so ignore + // ERROR_NOT_SUPPORTED. + // + // XXX This may fail if a future GPU requires this WAR but does not + // implement this rmcontrol. To avoid that, this class would need to be + // aware of which evo display HAL is in use. + // + if (ret != NVOS_STATUS_SUCCESS && ret != NVOS_STATUS_ERROR_NOT_SUPPORTED) { + DP_ASSERT(!"Enabling MSA stereo override failed!"); + return false; + } + + return true; +} + +bool EvoMainLink::setFlushMode() +{ + NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS params; + dpMemZero(¶ms, sizeof(params)); + + params.bFireAndForget = NV_FALSE; + + params.base.subdeviceIndex = subdeviceIndex; + params.sorNumber = provider->getSorIndex(); + params.bEnable = NV_TRUE; + params.bForceRgDiv = NV_FALSE; + params.bImmediate = NV_FALSE; + params.headMask = 0; + + NvU32 ret = provider->rmControl5070(NV5070_CTRL_CMD_SET_SOR_FLUSH_MODE, ¶ms, sizeof params); + + DP_ASSERT((ret == NVOS_STATUS_SUCCESS) && "Enabling flush mode failed!"); + + return ret == NVOS_STATUS_SUCCESS; +} + +void EvoMainLink::clearFlushMode(unsigned headMask, bool testMode) +{ + NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS params; + dpMemZero(¶ms, sizeof(params)); + + params.bFireAndForget = NV_FALSE; + params.base.subdeviceIndex = subdeviceIndex; + params.sorNumber = provider->getSorIndex(); + params.bEnable = NV_FALSE; + params.bImmediate = NV_FALSE; + params.headMask = headMask; + params.bForceRgDiv = testMode; + + NvU32 ret = provider->rmControl5070(NV5070_CTRL_CMD_SET_SOR_FLUSH_MODE, ¶ms, sizeof params); + if (ret != NVOS_STATUS_SUCCESS) + { + DP_PRINTF(DP_ERROR, "DP_EVO> Disabling flush mode failed!"); + } +} + + +bool EvoMainLink::physicalLayerSetTestPattern(PatternInfo * patternInfo) +{ + // Main parameter + NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS params; + + // To identify which test pattern to transmit. + NV0073_CTRL_DP_TESTPATTERN ctrlPattern; + + dpMemZero(¶ms, sizeof(params)); + dpMemZero(&ctrlPattern, sizeof(ctrlPattern)); + + switch (patternInfo->lqsPattern) + { + case LINK_QUAL_DISABLED: ctrlPattern.testPattern = NV0073_CTRL_DP_TESTPATTERN_DATA_NONE; break; + case LINK_QUAL_D10_2: ctrlPattern.testPattern = NV0073_CTRL_DP_TESTPATTERN_DATA_D10_2; break; + case LINK_QUAL_SYM_ERROR: ctrlPattern.testPattern = NV0073_CTRL_DP_TESTPATTERN_DATA_SERMP; break; + case LINK_QUAL_PRBS7: ctrlPattern.testPattern = NV0073_CTRL_DP_TESTPATTERN_DATA_PRBS_7; break; + case LINK_QUAL_CP2520PAT3: ctrlPattern.testPattern = NV0073_CTRL_DP_TESTPATTERN_DATA_CP2520PAT3; break; + case LINK_QUAL_80BIT_CUST: + { + ctrlPattern.testPattern = NV0073_CTRL_DP_TESTPATTERN_DATA_CSTM; + + params.cstm.field_31_0 = patternInfo->ctsmLower; + params.cstm.field_63_32 = patternInfo->ctsmMiddle; + params.cstm.field_95_64 = patternInfo->ctsmUpper; + break; + } +#ifdef NV0073_CTRL_DP_TESTPATTERN_DATA_HBR2COMPLIANCE + case LINK_QUAL_HBR2_COMPLIANCE_EYE: + { + ctrlPattern.testPattern = NV0073_CTRL_DP_TESTPATTERN_DATA_HBR2COMPLIANCE; + params.cstm.field_31_0 = 0; + params.cstm.field_63_32 = 0; + params.cstm.field_95_64 = 0; + break; + } +#endif + default: + DP_ASSERT(0 && "Unknown Phy Pattern"); + return false; + } + + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + params.testPattern = ctrlPattern; + + // + // Set the appropriate laneMask based on the current lane count. The laneMask is used for GF119+ chips + // only so it doesn't matter if we populate it for all chips. It is set to all lanes since + // setting the test pattern on a lane that is off is effectively a nop. + // The laneMask allows for setting the pattern on specific lanes to check for cross-talk, which is the + // phenomenon of observing the signal crossing over to a different lane where it's not set. + // + params.laneMask = 0xf; + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_TESTPATTERN, ¶ms, sizeof(params)); + + return code == NVOS_STATUS_SUCCESS; +} + +AuxBus::status EvoAuxBus::transaction(Action action, Type type, int address, + NvU8 * buffer, unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned *pNakReason, + NvU8 offset, NvU8 nWriteTransactions) +{ + NV0073_CTRL_DP_AUXCH_CTRL_PARAMS params; + + DP_ASSERT(sizeRequested <= NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE); + + dpMemZero(¶ms, sizeof(params)); + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + + params.cmd = 0; + + if (type == native) + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_TYPE, _AUX); + else + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_TYPE, _I2C); + + if (type == i2cMot) + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_I2C_MOT, _TRUE); + else + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_I2C_MOT, _FALSE); + + if (action == read) + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_REQ_TYPE, _READ); + else if (action == write) + { + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_REQ_TYPE, _WRITE); + dpMemCopy(params.data, buffer, sizeRequested); + } + else if (action == writeStatusUpdateRequest) + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_REQ_TYPE, _WRITE_STATUS); + else + DP_ASSERT(0 && "Unknown action"); + + params.addr = address; + + // + // By definition, an I2C-write-over-AUX request with + // zero bytes of data is an "address-only" transaction. + // + if ((sizeRequested == 0) && (type & (i2cMot | i2c)) && (action == write)) + { + DP_PRINTF(DP_NOTICE, "DP> Client requested address-only transaction"); + params.bAddrOnly = NV_TRUE; + } + else if ((sizeRequested == 0) && (type == native)) + { + // Native aux transactions with size requested zero are not allowed. + DP_ASSERT(0 && "Native Aux transactions shouldn't have zero size requested"); + return nack; + } + + // Control call is taking size as 0-based. + if (sizeRequested == 0) + { + // + // I2c transactions with size requested zero. Decrementing by 1 will + // lead to 0xffffff(RM_INVALID_DATA). So keep size as zero only. + // + params.size = 0; + } + else + { + params.size = sizeRequested - 1; + } + + NvU32 code = 0; + NvU8 retries = 0; + do + { + retries++; + params.retryTimeMs = 0; + code = provider->rmControl0073(NV0073_CTRL_CMD_DP_AUXCH_CTRL, ¶ms, sizeof(params)); + // eDP is not fully powered up yet. Should not access the panel too early. + if (params.retryTimeMs > 0) + { + timer->sleep(params.retryTimeMs); + } + } while (NVOS_STATUS_SUCCESS != code && params.retryTimeMs && retries < 3); + + if (pNakReason != NULL) + { + *pNakReason = params.replyType; + } + + if (action == writeStatusUpdateRequest && code == NVOS_STATUS_ERROR_NOT_SUPPORTED) + { + // + // On some chips write status requests are generated implicitly by the + // hardware. So while the RmControl() will fail with a "not supported" + // error, the request still went out on the DPAUX channel as part of + // the last IC-over-AUX write transaction. So the error should be ignored. + // + DP_PRINTF(DP_NOTICE, "DP> %s: Ignore ERROR_NOT_SUPPORTED for writeStatusUpdateRequest. Returning Success", __FUNCTION__); + return AuxBus::success; + } + + // In case of Timeout we need to retry again for minimum no. of times + if (code != NVOS_STATUS_SUCCESS && code != NVOS_STATUS_ERROR_TIMEOUT) + { + if (devicePlugged) + { + DP_PRINTF(DP_WARNING, "DP> AuxChCtl Failing, if a device is connected you shouldn't be seeing this"); + } + return nack; + } + else if (code == NVOS_STATUS_ERROR_TIMEOUT) + { + return AuxBus::defer; + } + + *sizeCompleted = params.size; + + // Reset sizeCompleted if transaction failed. + if (params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_DEFER || + params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_I2CDEFER) + *sizeCompleted = 0; + + if (params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_ACK) + { + // if it was read operation copy read data to buffer + if (action == read) + { + // Check the size of data to be copied. Should not be + // more than available buffer + if (params.size > sizeRequested) + { + params.size = sizeRequested; + } + dpMemCopy(buffer, params.data, params.size); + } + + return AuxBus::success; + } + + if (params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_NACK || + params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_I2CNACK || + params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_TIMEOUT) + return AuxBus::nack; + + if (params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_DEFER || + params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_I2CDEFER) + return AuxBus::defer; + + DP_ASSERT(0 && "Unknown reply type"); + return AuxBus::nack; +} + +unsigned EvoAuxBus::transactionSize() +{ + return NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE; +} + +void EvoAuxBus::setDevicePlugged(bool plugged) +{ + devicePlugged = plugged; +} + +void EvoMainLink::preLinkTraining(NvU32 head) +{ + provider->preLinkTraining(head); +} + +void EvoMainLink::postLinkTraining(NvU32 head) +{ + provider->postLinkTraining(head); +} + +void EvoMainLink::initializeRegkeyDatabase() +{ + NvU32 i; + + if (dpRegkeyDatabase.bInitialized) + return; + + for (i = 0; i < sizeof(DP_REGKEY_TABLE)/sizeof(DP_REGKEY_TABLE[0]); i++) + { + NvU32 tempValue = 0; + tempValue = provider->getRegkeyValue(DP_REGKEY_TABLE[i].pName); + switch (DP_REGKEY_TABLE[i].valueType) + { + case DP_REG_VAL_U32: + *(NvU32*)(DP_REGKEY_TABLE[i].pValue) = tempValue; + break; + case DP_REG_VAL_U16: + *(NvU16*)(DP_REGKEY_TABLE[i].pValue) = tempValue & 0xFFFF; + break; + case DP_REG_VAL_U8: + *(NvU8*)(DP_REGKEY_TABLE[i].pValue) = tempValue & 0xFF; + break; + case DP_REG_VAL_BOOL: + *(bool*)(DP_REGKEY_TABLE[i].pValue) = !!tempValue; + break; + } + } + + dpRegkeyDatabase.bInitialized = true; +} + +void EvoMainLink::applyRegkeyOverrides() +{ + if (!dpRegkeyDatabase.bInitialized) + { + DP_ASSERT(0 && "dpRegkeyDatabase is not initialized before calling applyRegkeyOverrides."); + this->initializeRegkeyDatabase(); + } + _isMstDisabledByRegkey = dpRegkeyDatabase.bMstDisabled; + _isDscDisabledByRegkey = dpRegkeyDatabase.bDscDisabled; + _skipPowerdownEDPPanelWhenHeadDetach = dpRegkeyDatabase.bPoweroffEdpInHeadDetachSkipped; + _applyLinkBwOverrideWarRegVal = dpRegkeyDatabase.bLinkBwOverrideWarApplied; + _enableMSAOverrideOverMST = dpRegkeyDatabase.bMsaOverMstEnabled; + _isMSTPCONCapsReadDisabled = dpRegkeyDatabase.bMSTPCONCapsReadDisabled; + _isDownspreadDisabledByRegkey = dpRegkeyDatabase.bDownspreadDisabled; + _bAvoidHBR3DisabledByRegkey = dpRegkeyDatabase.bDisableAvoidHBR3War; +} + +NvU32 EvoMainLink::getRegkeyValue(const char *key) +{ + NvU32 i; + if (!dpRegkeyDatabase.bInitialized) + { + DP_ASSERT(0 && "dpRegkeyDatabase is not initialized before calling getRegkeyValue."); + initializeRegkeyDatabase(); + } + if (key == NULL || key[0] == '\0') + return 0; + + for (i = 0; i < sizeof(DP_REGKEY_TABLE)/sizeof(DP_REGKEY_TABLE[0]); i++) + { + NvU32 j = 0; + bool strSame = true; + while (key[j] != '\0' && DP_REGKEY_TABLE[i].pName[j] != '\0') + { + if (key[j] != DP_REGKEY_TABLE[i].pName[j]) + { + strSame = false; + break; + } + ++j; + } + if (strSame && key[j] == '\0' && DP_REGKEY_TABLE[i].pName[j] == '\0') + { + switch (DP_REGKEY_TABLE[i].valueType) + { + case DP_REG_VAL_U32: + return *(NvU32*)(DP_REGKEY_TABLE[i].pValue); + case DP_REG_VAL_U16: + return (NvU32)*(NvU16*)(DP_REGKEY_TABLE[i].pValue); + case DP_REG_VAL_U8: + return (NvU32)*(NvU8*)(DP_REGKEY_TABLE[i].pValue); + case DP_REG_VAL_BOOL: + return (NvU32)*(bool*)(DP_REGKEY_TABLE[i].pValue); + } + } + } + DP_ASSERT(0 && "Requested regkey not found in dpRegkeyDatabase."); + return 0; +} + +const DP_REGKEY_DATABASE& EvoMainLink::getRegkeyDatabase() +{ + return dpRegkeyDatabase; +} + +NvU32 EvoMainLink::getSorIndex() +{ + return provider->getSorIndex(); +} + +bool EvoMainLink::isInbandStereoSignalingSupported() +{ + return provider->isInbandStereoSignalingSupported(); +} + +bool EvoMainLink::train(const LinkConfiguration & link, bool force, + LinkTrainingType linkTrainingType, + LinkConfiguration *retLink, bool bSkipLt, + bool isPostLtAdjRequestGranted, unsigned phyRepeaterCount) +{ + NvU32 targetIndex; + NvU32 ltCounter = retLink->getLTCounter(); + bool bTrainPhyRepeater = + (!link.bDisableLTTPR) && (_isLTPhyRepeaterSupported); + + if (provider->getSorIndex() == DP_INVALID_SOR_INDEX) + { + // bail out and Skip LT since SOR is not allocated for this displayID + return false; + } + NvU32 err = 0; + + NvU32 dpCtrlCmd = DRF_DEF(0073_CTRL, _DP_CMD, _SET_LANE_COUNT, _TRUE) | + DRF_DEF(0073_CTRL, _DP_CMD, _SET_LINK_BW, _TRUE); + + if (link.multistream) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _SET_FORMAT_MODE, _MULTI_STREAM); + + if(link.bEnableFEC) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _ENABLE_FEC, _TRUE); + + if (isPostLtAdjRequestGranted) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _POST_LT_ADJ_REQ_GRANTED, _YES); + + if (link.enhancedFraming) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _SET_ENHANCED_FRAMING, _TRUE); + if (bSkipLt) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _SKIP_HW_PROGRAMMING, _YES); + if (force) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _FAKE_LINK_TRAINING, _DONOT_TOGGLE_TRANSMISSION); + + if (linkTrainingType == NO_LINK_TRAINING) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _NO_LINK_TRAINING, _YES); + else if (linkTrainingType == FAST_LINK_TRAINING) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _FAST_LINK_TRAINING, _YES); + + targetIndex = NV0073_CTRL_DP_DATA_TARGET_SINK; + if (bTrainPhyRepeater && (_rmPhyRepeaterCount != phyRepeaterCount)) + { + // If LTTPR count is out of sync between DPLib and RM, do not link train LTTPRs. + bTrainPhyRepeater = false; + } + + if (bTrainPhyRepeater) + { + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _TRAIN_PHY_REPEATER, _YES); + // + // Start from the one closest to GPU. Note this is 1-based index. + // + targetIndex = phyRepeaterCount; + } + + if (!this->isDownspreadSupported() || link.bDisableDownspread || _isDownspreadDisabledByRegkey) + { + // If GPU does not support downspread, disabling downspread. + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _USE_DOWNSPREAD_SETTING, _FORCE); + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _DISABLE_DOWNSPREAD, _TRUE); + } + + NV_DPTRACE_INFO(LINK_TRAINING_START, link.multistream, link.peakRate, link.lanes, + phyRepeaterCount, _rmPhyRepeaterCount, bTrainPhyRepeater, targetIndex); + + NvU32 status = 0; + NvU8 retries = 0; + bool fallback = false; + + // + // Limited attempts to unblock infinite LT loop while CR failure restores + // high rate and lanes for EQ failure + // + NvU32 crHighRateFallbackCount = 0; + + // + // The rate and lane count we send to RM might be different than what client + // sent to us since fallback might happen. + // + LinkConfiguration requestRmLC = link; + do + { + NvU32 dpCtrlData = 0; + NvU64 linkrate = requestRmLC.peakRate; + NvU64 linkBw = 0; + + switch (linkrate) + { + case dp2LinkRate_1_62Gbps: + case dp2LinkRate_2_16Gbps: + case dp2LinkRate_2_43Gbps: + case dp2LinkRate_2_70Gbps: + case dp2LinkRate_3_24Gbps: + case dp2LinkRate_4_32Gbps: + case dp2LinkRate_5_40Gbps: + case dp2LinkRate_8_10Gbps: + linkBw = LINK_RATE_10MHZ_TO_270MHZ(linkrate); + dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _SET_LINK_BW, + linkBw, dpCtrlData); + break; + default: + if (requestRmLC.lanes != 0) + { + DP_ASSERT(0 && "Unknown rate"); + return false; + } + break; + } + + dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _SET_LANE_COUNT, + requestRmLC.lanes, dpCtrlData); + + if (requestRmLC.lanes == 0) + { + // Only need to target sink when powering down the link. + targetIndex = NV0073_CTRL_DP_DATA_TARGET_SINK; + } + + dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _TARGET, + targetIndex, dpCtrlData); + + // Properly wait eDP to power up before link training. + status = 0; + retries = 0; + fallback = false; + dpCtrlCmd = FLD_SET_DRF(0073_CTRL, _DP_CMD, _FALLBACK_CONFIG, _FALSE, dpCtrlCmd); + do + { + NV0073_CTRL_DP_CTRL_PARAMS params; + + dpMemZero(¶ms, sizeof(params)); + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + params.cmd = dpCtrlCmd; + params.data = dpCtrlData; + + retries++; + params.retryTimeMs = 0; + status = provider->rmControl0073(NV0073_CTRL_CMD_DP_CTRL, ¶ms, sizeof(params)); + ltCounter++; + err = params.err; + + if (params.retryTimeMs > 0) + { + timer->sleep(params.retryTimeMs); + } + + if (status == NVOS_STATUS_SUCCESS || bSkipLt) + { + // if LT failed when bSkipLt was marked, no point in attempting LT again. + break; + } + + if (!params.retryTimeMs || retries >= 3) + { + break; + } + + } while (true); + + if (NVOS_STATUS_SUCCESS == status) + { + if (targetIndex != NV0073_CTRL_DP_DATA_TARGET_SINK) + { + targetIndex -= 1; + continue; + } + else + { + // all done, leave the loop. + break; + } + } + + if (requestRmLC.policy.skipFallback() || bSkipLt) + { + // + // if LT failed when bSkipLT was marked, no point in falling back as the issue + // is not with LinkConfig. + // + break; + } + + if (FLD_TEST_DRF(0073_CTRL_DP, _ERR, _LINK_STATUS, _DISCONNECTED, err)) + { + // Don't fallback if the device is already gone. + break; + } + + if (FLD_TEST_DRF(0073_CTRL_DP, _CMD, _TRAIN_PHY_REPEATER, _YES, dpCtrlCmd) && + FLD_TEST_DRF(0073_CTRL_DP, _ERR, _INVALID_PARAMETER, _ERR, err) && + FLD_TEST_DRF(0073_CTRL_DP, _ERR, _TRAIN_PHY_REPEATER, _ERR, err)) + { + // + // RM has less LTTPR than DPLib expected. + // - Force to do transparent mode. + // + targetIndex = NV0073_CTRL_DP_DATA_TARGET_SINK; + dpCtrlCmd = FLD_SET_DRF(0073_CTRL, _DP_CMD, _TRAIN_PHY_REPEATER, + _NO, dpCtrlCmd); + continue; + } + + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _FALLBACK_CONFIG, _TRUE); + + if (FLD_TEST_DRF(0073_CTRL_DP, _ERR, _CLOCK_RECOVERY, _ERR, err)) + { + // If failed CR, check if we need to fallback. + if (requestRmLC.peakRate != dp2LinkRate_1_62Gbps) + { + // + // We need to fallback on link rate if the following conditions are met: + // 1. CR or EQ phase failed. + // 2. The request link bandwidth is NOT RBR + // + if (!requestRmLC.lowerConfig()) + { + // If no valid link config could be found, break here. + break; + } + fallback = true; + } + else + { + // Already RBR + // Check how many lanes is done. + requestRmLC.lanes = DRF_VAL(0073_CTRL_DP, _ERR, _CR_DONE_LANE, err); + + while (!IS_VALID_LANECOUNT(requestRmLC.lanes)) + { + requestRmLC.lanes--; + } + + if (requestRmLC.lanes == 0) + { + // This is to WAR some system that doesn't set CR_DONE or EQ_DONE at all. + // In this case, we just simply try half of lanes. + requestRmLC.lanes = DRF_VAL(0073_CTRL, _DP_DATA, _SET_LANE_COUNT, dpCtrlData) / 2; + if (requestRmLC.lanes == 0) + { + // Nothing to try. Bail out. + break; + } + } + // Set back to original desired rate. + requestRmLC.peakRate = link.peakRate; + fallback = true; + crHighRateFallbackCount++; + } + } + if (FLD_TEST_DRF(0073_CTRL_DP, _ERR, _CHANNEL_EQUALIZATION, _ERR, err)) + { + // + // If Channel equalization fails, we need to use the fallback policy + // of reducing the lane count vs link rate, but in the special case + // when all lanes have failed CR, we resort to lowering link rate instead + // (this address the new Fallback SCR v2.0) + // + if (FLD_TEST_DRF(0073_CTRL_DP, _ERR, _CR_DONE_LANE, _0_LANE, err)) + { + //Per spec, if link rate has already been reduced to RBR, exit fallback + if(requestRmLC.peakRate == dp2LinkRate_1_62Gbps || !requestRmLC.lowerConfig()) + break; + } + else + { + if(!requestRmLC.lowerConfig(true)) // bReduceLaneCnt = true + break; + } + fallback = true; + } + if (fallback == false) + { + // Nothing to fallback, give up. + break; + } + if ((phyRepeaterCount > 0) && (bTrainPhyRepeater)) + { + // If fallback, need to start from beginning. + targetIndex = phyRepeaterCount; + } + } while (crHighRateFallbackCount < NV_DP_RBR_FALLBACK_MAX_TRIES); + + // + // Result should be checked for only the control call status. 'err' + // doesn't represent failure in LT - some compliance tests such as 700.1.1.2 + // intentionally test against unexpected sink caps + // + bool result = (status == NVOS_STATUS_SUCCESS); + retLink->setLaneRate(requestRmLC.peakRate, result ? requestRmLC.lanes : 0); + retLink->setLTCounter(ltCounter); + + if (requestRmLC.bEnableFEC && (FLD_TEST_DRF(0073_CTRL_DP, _ERR, _ENABLE_FEC, _ERR, err))) + { + retLink->bEnableFEC = false; + DP_ASSERT(0); + } + + NV_DPTRACE_INFO(LINK_TRAINING_DONE, status, requestRmLC.peakRate, requestRmLC.lanes); + + return result; +} + +bool EvoMainLink::retrieveRingBuffer(NvU8 dpRingBuffertype, NvU32 numRecords) +{ + return false; +} + +// Return the current mux state. Returns false if device is not mux capable +bool EvoMainLink::getDynamicMuxState(NvU32 *muxState) +{ + bool bIsMuxCapable = false; + NvU32 ret = 0; + NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS muxStatusParams; + + if (!muxState) + return false; + + *muxState = 0; + + if (!isDynamicMuxCapable()) + return false; + + dpMemZero(&muxStatusParams, sizeof(muxStatusParams)); + muxStatusParams.subDeviceInstance = subdeviceIndex; + muxStatusParams.displayId = displayId; + muxStatusParams.muxStatus = 0; + + ret = provider->rmControl0073(NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS, + &muxStatusParams, sizeof(muxStatusParams)); + if (ret == NV_OK && + DRF_VAL(0073, _CTRL_DFP_DISP_MUX, _STATE, muxStatusParams.muxStatus) != NV0073_CTRL_DFP_DISP_MUX_STATE_INVALID) + { + bIsMuxCapable = true; + *muxState = muxStatusParams.muxStatus; + } + + return bIsMuxCapable; +} + +void EvoMainLink::configurePowerState(bool bPowerUp) +{ + NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS params; + + dpMemZero(¶ms, sizeof(params)); + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + params.ctrl = bPowerUp ? NV0073_CTRL_DP_MAIN_LINK_CTRL_POWER_STATE_POWERUP : + NV0073_CTRL_DP_MAIN_LINK_CTRL_POWER_STATE_POWERDOWN; + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DP_MAIN_LINK_CTRL, ¶ms, sizeof(params)); + + DP_ASSERT(code == NVOS_STATUS_SUCCESS); +} + +void EvoMainLink::getLinkConfig(unsigned &laneCount, NvU64 & linkRate) +{ + NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS params; + dpMemZero(¶ms, sizeof(params)); + + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DP_GET_LINK_CONFIG, ¶ms, sizeof(params)); + + if (code == NVOS_STATUS_SUCCESS) + { + laneCount = params.laneCount; + if (params.linkBW != 0) + { + linkRate = ((NvU64)params.linkBW) * DP_LINK_BW_FREQ_MULTI_MBPS; + } + else + { + // No link rate available. + linkRate = 0; + } + } + else + { + laneCount = 0; + linkRate = 0; + } +} + +bool EvoMainLink::getMaxLinkConfigFromUefi(NvU8 &linkRate, NvU8 &laneCount) +{ + if (provider->getMaxLinkConfigFromUefi(linkRate, laneCount)) + { + if (IS_VALID_LANECOUNT(laneCount) && IS_VALID_LINKBW(linkRate)) + { + return true; + } + } + return false; +} + +bool EvoMainLink::queryAndUpdateDfpParams() +{ + NvU32 dfpFlags; + dpMemZero(&dfpParams, sizeof(dfpParams)); + dfpParams.subDeviceInstance = subdeviceIndex; + dfpParams.displayId = displayId; + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DFP_GET_INFO, &dfpParams, sizeof(dfpParams)); + + if (code != NVOS_STATUS_SUCCESS) + { + DP_ASSERT(0 && "Unable to query DFP params."); + return false; + } + + dfpFlags = dfpParams.flags; + _isEDP = DRF_VAL(0073, _CTRL_DFP_FLAGS, _EMBEDDED_DISPLAYPORT, dfpFlags) == + NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE; + + if (_isLTPhyRepeaterSupported) + { + _rmPhyRepeaterCount = DRF_VAL(0073_CTRL_DFP, _FLAGS, + _DP_PHY_REPEATER_COUNT, dfpFlags); + } + + _needForceRmEdid = DRF_VAL(0073, _CTRL_DFP_FLAGS, _DP_FORCE_RM_EDID ,dfpFlags) == + NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE; + + _isPC2Disabled = DRF_VAL(0073, _CTRL_DFP_FLAGS, _DP_POST_CURSOR2_DISABLED, dfpFlags) == + NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE; + + + switch(DRF_VAL(0073, _CTRL_DFP_FLAGS, _DP_LINK_BW, dfpFlags)) + { + default: + DP_ASSERT(0 && "maxLinkRate is set improperly in dfp object."); + // intentionally fall-thru. + case NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS: + _maxLinkRateSupportedDfp = dp2LinkRate_1_62Gbps; + break; + case NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS: + _maxLinkRateSupportedDfp = dp2LinkRate_2_70Gbps; + break; + case NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS: + _maxLinkRateSupportedDfp = dp2LinkRate_5_40Gbps; + break; + case NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS: + _maxLinkRateSupportedDfp = dp2LinkRate_8_10Gbps; + break; + } + + + _isDynamicMuxCapable = FLD_TEST_DRF(0073, _CTRL_DFP_FLAGS, _DYNAMIC_MUX_CAPABLE, _TRUE, dfpFlags); + + return true; +} + +bool EvoMainLink::fetchEdidByRmCtrl(NvU8* edidBuffer, NvU32 bufferSize) +{ + NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *pEdidParams; + pEdidParams = (NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS*) dpMalloc(sizeof(*pEdidParams)); + + if (pEdidParams == NULL) { + return false; + } + + dpMemZero(pEdidParams, sizeof(*pEdidParams)); + pEdidParams->subDeviceInstance = subdeviceIndex; + pEdidParams->displayId = displayId; + pEdidParams->flags = 0; // use default settings. + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2, pEdidParams, sizeof(*pEdidParams)); + + if (code == NVOS_STATUS_SUCCESS) + { + // Silently dropping part of a too-large output buffer matches the + // behavior of the "V1" of this control. + // But it may make sense to revisit this behavior now that it's under + // control of this client. + NvU32 copySize = NV_MIN(pEdidParams->bufferSize, bufferSize); + dpMemCopy(edidBuffer, pEdidParams->edidBuffer, copySize); + } else { + DP_ASSERT(0 && "Unable to read EDID."); + } + + dpFree(pEdidParams); + return code == NVOS_STATUS_SUCCESS; +} + +bool EvoMainLink::applyEdidOverrideByRmCtrl(NvU8* edidBuffer, NvU32 bufferSize) +{ + NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS *pEdidOverrideParams = + (NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS *) + dpMalloc(sizeof(*pEdidOverrideParams)); + + if (pEdidOverrideParams == NULL) { + return false; + } + + dpMemZero(pEdidOverrideParams, sizeof(*pEdidOverrideParams)); + pEdidOverrideParams->subDeviceInstance = subdeviceIndex; + pEdidOverrideParams->displayId = displayId; + if (bufferSize > sizeof(pEdidOverrideParams->edidBuffer)) { + DP_ASSERT(0 && "EDID override too large for edidBuffer"); + dpFree(pEdidOverrideParams); + return false; + } + pEdidOverrideParams->bufferSize = bufferSize; + dpMemCopy(&pEdidOverrideParams->edidBuffer, edidBuffer, bufferSize); + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_SPECIFIC_APPLY_EDID_OVERRIDE_V2, + pEdidOverrideParams, + sizeof(*pEdidOverrideParams)); + if (code != NVOS_STATUS_SUCCESS) + { + DP_ASSERT(0 && "Unable to apply EDID override."); + dpFree(pEdidOverrideParams); + return false; + } + + DP_ASSERT(pEdidOverrideParams->bufferSize == bufferSize); + dpMemCopy(edidBuffer, &pEdidOverrideParams->edidBuffer, bufferSize); + + dpFree(pEdidOverrideParams); + + return true; + +} + +bool EvoMainLink::isEDP() +{ + return _isEDP; +} + +bool EvoMainLink::supportMSAOverMST() +{ + return _enableMSAOverrideOverMST; +} + +bool EvoMainLink::skipPowerdownEdpPanelWhenHeadDetach() +{ + return _skipPowerdownEDPPanelWhenHeadDetach; +} + +bool EvoMainLink::isMSTPCONCapsReadDisabled() +{ + return _isMSTPCONCapsReadDisabled; +} + +bool EvoMainLink::isActive() +{ + NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS params; + + for (int i = 0; i < 32; i++) + { + // + // Skip floorswept heads + // + if (!(allHeadMask & (1 << i))) + { + continue; + } + + dpMemZero(¶ms, sizeof params); + params.subDeviceInstance = 0; + params.head = i; + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, ¶ms, sizeof(params)); + + if (code != NVOS_STATUS_SUCCESS) + { + DP_ASSERT(0 && "We can't get active displays, RM bug!"); + } + else if (params.displayId & displayId) + { + return true; + } + } + + return false; +} + +bool EvoMainLink::controlRateGoverning(NvU32 head, bool enable, bool updateNow) +{ + NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS params = {0}; + params.subDeviceInstance = this->subdeviceIndex; + params.head = head; + params.sorIndex = provider->getSorIndex(); + + if (enable) + { + params.flags |= DRF_DEF(0073_CTRL, _CMD_DP_SET_RATE_GOV_FLAGS, _ENABLE_RG, _ON); + } + else + { + params.flags |= DRF_DEF(0073_CTRL, _CMD_DP_SET_RATE_GOV_FLAGS, _ENABLE_RG, _OFF); + } + if (updateNow) + { + params.flags |= DRF_DEF(0073_CTRL, _CMD_DP_SET_RATE_GOV_FLAGS, _TRIGGER_MODE, _IMMEDIATE); + } + else + { + params.flags |= DRF_DEF(0073_CTRL, _CMD_DP_SET_RATE_GOV_FLAGS, _TRIGGER_MODE, _LOADV); + } + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_RATE_GOV, ¶ms, sizeof params); + if (ret != NVOS_STATUS_SUCCESS) + { + DP_PRINTF(DP_ERROR, "controlRateGoverning(): Set RateGov failed!"); + } + return true; +} + +bool EvoMainLink::getDpTestPattern(NV0073_CTRL_DP_TESTPATTERN * testPattern) +{ + NV0073_CTRL_DP_GET_TESTPATTERN_PARAMS params = {0}; + + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = this->displayId; + + if (!(provider->rmControl0073(NV0073_CTRL_CMD_DP_GET_TESTPATTERN, ¶ms, sizeof params))) + { + testPattern->testPattern = params.testPattern.testPattern; + return true; + } + else + return false; +} + +bool EvoMainLink::setDpTestPattern(NV0073_CTRL_DP_TESTPATTERN testPattern, NvU8 laneMask, NV0073_CTRL_DP_CSTM cstm, NvBool bIsHBR2, NvBool bSkipLaneDataOverride) +{ + NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS params = {0}; + + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = this->displayId; + params.testPattern = testPattern; + params.laneMask = laneMask; + params.cstm = cstm; + params.bIsHBR2 = bIsHBR2; + params.bSkipLaneDataOverride = bSkipLaneDataOverride; + + if (!(provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_TESTPATTERN, ¶ms, sizeof params))) + return true; + else + return false; +} + +bool EvoMainLink::getDpLaneData(NvU32 *numLanes, NvU32 *data) +{ + NV0073_CTRL_DP_LANE_DATA_PARAMS params = {0}; + + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = this->displayId; + + if (!(provider->rmControl0073(NV0073_CTRL_CMD_DP_GET_LANE_DATA, ¶ms, sizeof params))) + { + *numLanes = params.numLanes; + dpMemCopy(data, params.data, NV0073_CTRL_MAX_LANES*4); + return true; + } + else + return false; +} + +void EvoMainLink::getLinkConfigWithFEC(unsigned &laneCount, NvU64 & linkRate, bool &bFECEnabled) +{ + NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS params; + dpMemZero(¶ms, sizeof(params)); + + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DP_GET_LINK_CONFIG, ¶ms, sizeof(params)); + + if (code == NVOS_STATUS_SUCCESS) + { + laneCount = params.laneCount; + if (params.bFECEnabled) + { + bFECEnabled = true; + } + + if (params.linkBW != 0) + { + linkRate = ((NvU64)params.linkBW) * DP_LINK_BW_FREQ_MULTI_MBPS; + } + else + { + // No link rate available. + linkRate = 0; + } + } + else + { + laneCount = 0; + linkRate = 0; + } +} + +bool EvoMainLink::setDpLaneData(NvU32 numLanes, NvU32 *data) +{ + NV0073_CTRL_DP_LANE_DATA_PARAMS params = {0}; + + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = this->displayId; + params.numLanes = numLanes; + dpMemCopy(params.data, data, NV0073_CTRL_MAX_LANES*4); + + if (!(provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_LANE_DATA, ¶ms, sizeof params))) + return true; + else + return false; +} + +NvU32 EvoMainLink::monitorDenylistInfo(NvU32 ManufacturerID, NvU32 ProductID, DpMonitorDenylistData *pDenylistData) +{ + return provider->monitorDenylistInfo(ManufacturerID, ProductID, pDenylistData); +} +bool EvoMainLink::rmUpdateDynamicDfpCache(NvU32 headIndex, RmDfpCache* dfpCache, NvBool bResetDfp) +{ + NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS params = {0}; + params.headIndex = headIndex; + params.bcaps = dfpCache->bcaps; + for (unsigned i=0; i<5; i++) + params.bksv[i] = dfpCache->bksv[i]; + + params.bHdcpCapable = dfpCache->hdcpCapable; + params.subDeviceInstance = subdeviceIndex; + params.updateMask = dfpCache->updMask; + if (bResetDfp) + params.bResetDfp = NV_TRUE; + + if (!(provider->rmControl0073(NV0073_CTRL_CMD_DFP_UPDATE_DYNAMIC_DFP_CACHE, ¶ms, sizeof params))) + return true; + else + return false; +} + +NvU32 EvoMainLink::allocDisplayId() +{ + NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS params = {0}; + + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID, ¶ms, sizeof(params)); + if (ret == NVOS_STATUS_SUCCESS) + { + return params.displayIdAssigned; + } + + return 0; +} + +bool EvoMainLink::freeDisplayId(NvU32 displayId) +{ + NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS params = {0}; + + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID, ¶ms, sizeof(params)); + return ret == NVOS_STATUS_SUCCESS; +} + +void EvoMainLink::configureTriggerSelect(NvU32 head, DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier) +{ + NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS params = {0}; + params.head = head; + params.subDeviceInstance = subdeviceIndex; + params.sorIndex = provider->getSorIndex(); + params.singleHeadMSTPipeline = streamIdentifier; + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT, ¶ms, sizeof params); + if (ret != NVOS_STATUS_SUCCESS) + { + DP_PRINTF(DP_ERROR, "configureTriggerSelect(): Set Trigger Select failed!"); + } +} + +void EvoMainLink::configureTriggerAll(NvU32 head, bool enable) +{ + NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS params = {0}; + params.head = head; + params.subDeviceInstance = subdeviceIndex; + params.enable = enable; + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL, ¶ms, sizeof params); + if (ret != NVOS_STATUS_SUCCESS) + { + DP_PRINTF(DP_ERROR, "configureTriggerAll(): Set Trigger All failed!"); + } +} + +MainLink * DisplayPort::MakeEvoMainLink(EvoInterface * provider, Timer * timer) +{ + MainLink *main; + main = new EvoMainLink(provider, timer); + return main; +} + +AuxBus * DisplayPort::MakeEvoAuxBus(EvoInterface * provider, Timer * timer) +{ + return new EvoAuxBus(provider, timer); +} + +bool EvoMainLink::dscCrcTransaction(NvBool bEnable, gpuDscCrc *data, NvU16 *headIndex) +{ + NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS params; + NvU32 code; + + dpMemZero(¶ms, sizeof(params)); + params.bEnable = bEnable ? NV_TRUE : NV_FALSE; + params.subDeviceInstance = subdeviceIndex; + params.headIndex = *headIndex; + + // see if setup or querying needs to be specified + if (data == NULL) + { + params.cmd = DRF_DEF(0073_CTRL, _DP_CRC_CONTROL, _CMD, _SETUP); + } + else + { + params.cmd = DRF_DEF(0073_CTRL, _DP_CRC_CONTROL, _CMD, _QUERY); + } + + // GPU part of the call + code = provider->rmControl0073(NV0073_CTRL_CMD_DFP_DSC_CRC_CONTROL, ¶ms, sizeof(params)); + if (code != NVOS_STATUS_SUCCESS) + { + DP_PRINTF(DP_ERROR, "DP> Crc control failed."); + return false; + } + + // if the command is setup, return immediately + if (data != NULL) + { + data->gpuCrc0 = params.gpuCrc0; + data->gpuCrc1 = params.gpuCrc1; + data->gpuCrc2 = params.gpuCrc2; + } + + return true; +} + +// +// @brief This is to request RM to setup/reset link rate table, and save valid +// link rates for use. +// +// @param pLinkRateTable Pointer to link rate table to configure +// @param pLinkRates Pointer to LinkRates to keep valid link rates +// @return +// true Link rate table configured with at least one valid link rate +// false Otherwise +// +bool EvoMainLink::configureLinkRateTable +( + const NvU16 *pLinkRateTable, + LinkRates *pLinkRates +) +{ + NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS params; + dpMemZero(¶ms, sizeof(params)); + + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + + // Setup provided link rate table, otherwise it will be reset + if (pLinkRateTable) + { + for (NvU32 i = 0; i < NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES; i++) + { + params.linkRateTbl[i] = pLinkRateTable[i]; + } + } + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES, ¶ms, sizeof(params)); + + if ((pLinkRates != NULL ) && (code == NVOS_STATUS_SUCCESS) && + (params.linkBwCount <= NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES)) + { + pLinkRates->clear(); + for (int i = 0; i < params.linkBwCount; i++) + { + switch (params.linkBwTbl[i]) + { + case dp2LinkRate_1_62Gbps: + case dp2LinkRate_2_16Gbps: + case dp2LinkRate_2_43Gbps: + case dp2LinkRate_2_70Gbps: + case dp2LinkRate_3_24Gbps: + case dp2LinkRate_4_32Gbps: + case dp2LinkRate_5_40Gbps: + case dp2LinkRate_8_10Gbps: + pLinkRates->import((NvU16)params.linkBwTbl[i]); + break; + default: + DP_PRINTF(DP_ERROR, "DP_EVO> %s: Unsupported link rate received", + __FUNCTION__); + DP_ASSERT(0); + break; + } + } + return true; + } + return false; +} + +// +// @brief This is to request RM to enable/disable Fec +// +// @param enableFec Indicates if enable/disable is requested +// @return +// true If FEC was configured successfully +// false Otherwise +// +bool EvoMainLink::configureFec +( + const bool bEnableFec +) +{ + NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS params; + dpMemZero(¶ms, sizeof(params)); + + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + params.bEnableFec = bEnableFec; + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DP_CONFIGURE_FEC, ¶ms, sizeof(params)); + + if (code == NVOS_STATUS_SUCCESS) + { + return true; + } + + return false; +} diff --git a/src/common/displayport/src/dp_groupimpl.cpp b/src/common/displayport/src/dp_groupimpl.cpp new file mode 100644 index 0000000..c4a0d95 --- /dev/null +++ b/src/common/displayport/src/dp_groupimpl.cpp @@ -0,0 +1,745 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_groupimpl.cpp * +* DP device group implementation * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_connector.h" +#include "dp_list.h" +#include "dp_auxdefs.h" +#include "dp_deviceimpl.h" +#include "dp_groupimpl.h" +#include "dp_connectorimpl.h" +#include "dp_printf.h" + +using namespace DisplayPort; + +void GroupImpl::update(Device * dev, bool allocationState) +{ + Address::StringBuffer sb; + Address devAddress = dev->getTopologyAddress(); + DP_USED(sb); + + // Do not map a stream that is not yet turned on in the gpu. An update shall be sent later during NAE. + if (allocationState && !this->isHeadAttached()) + return; + + // + // Do not enable the stream on an unplugged device but take care of + // detached devices. We need to clear PBNs allocated by such devices + // + if (allocationState && !((DeviceImpl *)dev)->plugged) + return; + + // + // Check if Parent's messageManager exist or not. This is required for cases + // where parent branch itself has been unplugged. No message can be sent in this case. + // + if (!parent->messageManager) + return; + + if (timeslot.count == 0 || + ((DeviceImpl *)dev)->payloadAllocated == allocationState) + return; + + if (!dev->getParent() || !((dev->getParent())->isPlugged())) + { + DeviceImpl * parentDev = NULL; + + // + // Send ALLOCATE_PAYLOAD with pbn 0 to parent port of previous branch + // Find first plugged parent branch & send message to it + // + while(devAddress.size() > 2) + { + devAddress.pop(); + parentDev = parent->findDeviceInList(devAddress.parent()); + + if (parentDev && parentDev->isPlugged()) + break; + } + + // If no parent found simply return as we don't have a valid address to send message + if (!parentDev) + return; + } + + NakData nakData; + for (int retries = 0 ; retries < 7; retries++) + { + AllocatePayloadMessage allocate; + unsigned sink = 0; // hardcode the audio sink to 0th in the device. + allocate.set(devAddress.parent(), devAddress.tail(), + dev->isAudioSink() ? 1 : 0, streamIndex, allocationState ? timeslot.PBN : 0, + &sink, true); + + // Trigger a refetch of epr + ((DeviceImpl *)dev)->bandwidth.enum_path.dataValid = false; + DeviceImpl * tail = (DeviceImpl *) dev; + while (tail && tail->getParent()) + { + tail->bandwidth.enum_path.dataValid = false; + tail = (DeviceImpl *)tail->getParent(); + } + + if (parent->messageManager->send(&allocate, nakData)) + { + if (allocationState) + { + DP_PRINTF(DP_NOTICE, "DP-TM> Attached stream:%d to %s", streamIndex, dev->getTopologyAddress().toString(sb)); + } + else + { + DP_PRINTF(DP_NOTICE, "DP-TM> Detached stream:%d from %s", streamIndex, dev->getTopologyAddress().toString(sb)); + } + + ((DeviceImpl *)dev)->payloadAllocated = allocationState; + + return; + } + } + + // we should not have ideally reached here unless allocate payload failed. + if (allocationState) + { + DP_PRINTF(DP_ERROR, "DP-TM> Allocate_payload: Failed to ATTACH stream:%d to %s", streamIndex, dev->getTopologyAddress().toString(sb)); + DP_ASSERT(0); + } + else + { + DP_PRINTF(DP_ERROR, "DP-TM> Allocate_payload: Failed to DETACH stream:%d from %s", streamIndex, dev->getTopologyAddress().toString(sb)); + DP_ASSERT(0); + } + +} + +void GroupImpl::insert(Device * dev) +{ + DP_ASSERT(!headInFirmware && "Cannot add or remove from a firmware group. You must perform a modeset away from the device"); + DeviceImpl * di = (DeviceImpl *)dev; + + if (isHeadAttached()) + { + if (di->activeGroup && di->activeGroup != this) + { + DP_ASSERT(0 && "Device already in active group, cannot add to another active group!"); + return; + } + di->activeGroup = this; + } + + members.insertFront(di); + + // Is HDCP on for this group? + // YES? Disable HDCP (ECF) + this->hdcpPreviousStatus = this->hdcpEnabled; + if (this->hdcpEnabled) + { + NvU64 ecf = 0x0; + NvU64 countOnes = 0x0; + NvU64 mask = 0x0; + // Get the MASK for the all active groups which is ECF enabled. + for (ListElement * i = parent->activeGroups.begin(); i != parent->activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + if (group->hdcpEnabled) + { + countOnes = (((NvU64)1) << group->timeslot.count) - 1; + + mask = countOnes << group->timeslot.begin; + ecf |= mask; + } + } + + countOnes = (((NvU64)1) << this->timeslot.count) - 1; + + mask = countOnes << this->timeslot.begin; + ecf &= ~mask; + + parent->main->configureAndTriggerECF(ecf); + this->hdcpEnabled = false; + } + + update(dev, true); + + // After Add Stream, we turn the encryption back if it was on. + if (this->hdcpPreviousStatus) + { + hdcpSetEncrypted(true); + } + +} + +void GroupImpl::remove(Device * dev) +{ + DP_ASSERT(!headInFirmware && "Cannot add or remove from a firmware group. You must perform a modeset away from the device"); + + DeviceImpl * di = (DeviceImpl *)dev; + + if (isHeadAttached()) + { + /* + * The device may become lost and free after removal from the active + * group. Therefore, also remove the device from the + * 'dscEnabledDevices' list and ensure that its dangling pointer is not + * left behind. + */ + if (parent->dscEnabledDevices.contains(dev)) { + parent->dscEnabledDevices.remove(dev); + } + + di->activeGroup = 0; + } + members.remove(di); + + update(dev, false); + + updateVbiosScratchRegister(dev); +} + +void GroupImpl::destroy() +{ + ConnectorImpl* parent = NULL; + for (Device * i = enumDevices(0); i; i = enumDevices(i)) + remove(i); + + // Cancel any queue the auth callback. + cancelHdcpCallbacks(); + + if (streamEncryptionStatusDetection) + { + delete streamEncryptionStatusDetection; + streamEncryptionStatusDetection = 0; + } + parent = this->parent; + + if (parent) + { + if (!parent->activeGroups.isEmpty()) + { + for (ListElement * i = parent->activeGroups.begin(); i != parent->activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + if (group == this) + { + parent->activeGroups.remove(this); + break; + } + } + } + + + if (!parent->inactiveGroups.isEmpty()) + { + for (ListElement * i = parent->inactiveGroups.begin(); i != parent->inactiveGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + if (group == this) + { + parent->inactiveGroups.remove(this); + break; + } + } + } + + if (parent->intransitionGroups.contains(this)) + { + parent->intransitionGroups.remove(this); + } + + if (parent->addStreamMSTIntransitionGroups.contains(this)) + { + parent->addStreamMSTIntransitionGroups.remove(this); + } + } + + delete this; +} + +void GroupImpl::cancelHdcpCallbacks() +{ + authRetries = 0; + + parent->timer->cancelCallback(this, &tagHDCPReauthentication); + parent->timer->cancelCallback(this, &tagStreamValidation); + + QSESetECFRetries = 0; + parent->timer->cancelCallback(this, &tagMSTQSEandSetECF); +} + +Device * GroupImpl::enumDevices(Device * previousDevice) +{ + return members.next(previousDevice); +} + +void GroupImpl::expired(const void * tag) +{ + if (tag == &tagHDCPReauthentication) + { + HDCPState hdcpState = {0}; + parent->main->configureHDCPGetHDCPState(hdcpState); + + if (authRetries < HDCP_AUTHENTICATION_RETRIES) + { + this->hdcpEnabled = hdcpState.HDCP_State_Encryption; + if (hdcpState.HDCP_State_Authenticated) + { + parent->isHDCPAuthOn = true; + authRetries = 0; + } + else + { + unsigned authDelay = (hdcpState.HDCP_State_22_Capable ? + HDCP22_AUTHENTICATION_COOLDOWN : HDCP_AUTHENTICATION_COOLDOWN); + + authRetries++; + parent->main->configureHDCPRenegotiate(); + parent->isHDCPAuthOn = false; + parent->timer->queueCallback(this, &tagHDCPReauthentication, + authDelay); + } + } + else + { + parent->isHDCPAuthOn = this->hdcpEnabled = false; + } + } + else if ( tag == &tagStreamValidation) + { + if (!(this->streamValidationDone)) + { + // If we are here we need to debug what has caused the problem for not getting notification from DD. + DP_ASSERT(0 && "DP> Didn't get final notification." ); + } + } + else if (tag == &tagMSTQSEandSetECF) + { + if (QSESetECFRetries < HDCP_QSEANDSETECF_RETRIES) + { + HDCPState hdcpState = {0}; + parent->main->configureHDCPGetHDCPState(hdcpState); + this->hdcpEnabled = parent->isHDCPAuthOn = hdcpState.HDCP_State_Authenticated; + + // Wait till authenticated then enable QSE and set ECF. + if (parent->isHDCPAuthOn) + { + QSESetECFRetries = 0; + parent->timer->cancelCallback(this, &tagMSTQSEandSetECF); + hdcpMSTQSEandSetECF(); + } + else + { + QSESetECFRetries++; + parent->timer->queueCallback(this, &tagMSTQSEandSetECF, + HDCP_QSEANDSETECF_COOLDOWN); + } + } + else + { + DP_ASSERT(0 && "MST HDCP not authenticated within timeout and fail to set ECF." ); + } + } +} + +// bForceClear stands for bForceClearECF. +bool GroupImpl::hdcpSetEncrypted(bool encrypted, NvU8 streamType, NvBool bForceClear, NvBool bAddStreamBack) +{ + + if (encrypted == true) + { + bool bNeedReNegotiate = false; + HDCPState hdcpState = {0}; + + DP_PRINTF(DP_NOTICE, "DP-GRP: enable encryption with type=%d.", streamType); + + // enumerate the displays in the group and see if they are hdcp capable. + Device * d = 0; + bool isHdcpCapable = false; + for (d = ((Group*)this)->enumDevices(0); d != 0; d = ((Group*)this)->enumDevices(d)) + { + NvU8 Bcaps = (NvU8)(((DeviceImpl*)d)->nvBCaps[0]); + + if ((FLD_TEST_DRF(_DPCD, _HDCP_BCAPS_OFFSET, _HDCP_CAPABLE, _YES, Bcaps)) && + (((DeviceImpl*)d)->isHDCPCap == True)) + { + isHdcpCapable = true; + break; + } + } + + if (isHdcpCapable == false) + { + DP_PRINTF(DP_ERROR, "DP-GRP: group does not contain a hdcp capable device."); + return false; + } + + parent->main->configureHDCPGetHDCPState(hdcpState); + + // Clear dplib authentication state if RM reports not authenticated. + if (!hdcpState.HDCP_State_Authenticated) + { + parent->isHDCPAuthOn = this->hdcpEnabled = false; + } + + // Update stream content type and trigger negotiation if need. + if ((hdcpState.HDCP_State_22_Capable) && + (false == parent->main->setStreamType(streamIndex, streamType, &bNeedReNegotiate))) + { + DP_PRINTF(DP_ERROR, "DP-GRP: group set stream type failed."); + return false; + } + + if(!parent->isHDCPAuthOn || bNeedReNegotiate) + { + cancelHdcpCallbacks(); + + parent->main->configureHDCPRenegotiate(); + parent->main->configureHDCPGetHDCPState(hdcpState); + if (hdcpState.HDCP_State_Encryption) + { + parent->isHDCPAuthOn = this->hdcpEnabled = true; + } + else + { + parent->isHDCPAuthOn = this->hdcpEnabled = false; + parent->timer->queueCallback(this, &tagHDCPReauthentication, HDCP_AUTHENTICATION_COOLDOWN); + } + } + else + { + // SST and non128b/132b is done when it's authenticated. + if (!(parent->linkUseMultistream()) + ) + return true; + } + + if (parent->linkUseMultistream() + ) + { + // Check if authenticated else wait it's authenticated then assigning ECF. + if(!parent->isHDCPAuthOn || bNeedReNegotiate) + { + parent->timer->queueCallback(this, &tagMSTQSEandSetECF, HDCP_QSEANDSETECF_COOLDOWN); + return true; + } + else + { + parent->timer->cancelCallback(this, &tagMSTQSEandSetECF); + hdcpMSTQSEandSetECF(); + } + } + } + else + { + if (parent->isHDCPAuthOn) + { + if (!(parent->linkUseMultistream()) + ) + { + parent->main->configureHDCPDisableAuthentication(); + parent->isHDCPAuthOn = this->hdcpEnabled = false; + } + else + { + NvU64 ecf = 0x0; + NvU64 countOnes = 0x0; + NvU64 mask = 0x0; + + // Get the MASK for the all active groups which is ECF enabled. + for (ListElement * i = parent->activeGroups.begin(); i != parent->activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + if (group->hdcpEnabled) + { + countOnes = (((NvU64)1) << group->timeslot.count) - 1; + + mask = countOnes << group->timeslot.begin; + ecf |= mask; + } + } + + //Just clear the ECF not turn off the auth. + for (ListElement * i = parent->activeGroups.begin(); i != parent->activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + + if (this->headIndex == group->headIndex) + { + countOnes = (((NvU64)1) << group->timeslot.count) - 1; + + mask = countOnes << group->timeslot.begin; + ecf &= ~mask; + } + } + + parent->main->configureAndTriggerECF(ecf, bForceClear, bAddStreamBack); + + for (ListElement * i = parent->activeGroups.begin(); i != parent->activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + if (this->headIndex == group->headIndex) + { + group->hdcpEnabled = false; + { // Inform ConnectorEventSink that we have disabled HDCP on this Device + Device * d = 0; + for (d = ((Group*)this)->enumDevices(0); d != 0; d = ((Group*)this)->enumDevices(d)) + { + if (((DeviceImpl*)d)->isHDCPCap == True) + { + parent->sink->notifyHDCPCapDone(d, False); + } + } + } + } + } + } + } + else + return true; + } + + return true; +} +//DP_OPTION_HDCP_SUPPORT_ENABLE + +bool GroupImpl::hdcpGetEncrypted() +{ + // + // Returns whether encryption is currently enabled + // After the setECFencyption we just set the flag for this group and make the default as false. + // + if (parent->isHDCPAuthOn) + { + return this->hdcpEnabled; + } + else + { + return false; + } +} + +void GroupImpl::hdcpMSTQSEandSetECF() +{ + + // + // We become passive and wait for the Stream_Status_Change coming. + // Otherwise, we might not have the change to get the update KSVlist to + // validate it. Before, Naresh's Stream_Status_Change p4r in. + // We just simple turn it on. (which can be the option for non-QSE + // (AKA intel/AMD plan) branch.) + // + + // + // Enable sending QSES message only when regkey 'DISABLE_QSES' set to 0 + // in DD's path. + // This is added to provide driver for ST and not to be productized. + // + if ((parent->bIsEncryptionQseValid) && + (!parent->main->getRegkeyValue(NV_DP_REGKEY_DISABLE_QSES)) + ) + { + for (ListElement * i = parent->activeGroups.begin(); + i != parent->activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + + if (this->headIndex == group->headIndex) + { + HDCPValidateData hdcpValidateData = {0}; + parent->main->configureHDCPValidateLink(hdcpValidateData); + parent->qseNonceGenerator->clientIdBuilder(hdcpValidateData.aN); + } + } + } + + // + // Turn on the ECF set ECF on according to the group's active stream. + // Set flag for the goup for later getting status using. + // + NvU64 ecf = 0x0; + NvU64 countOnes = 0x0; + NvU64 mask = 0x0; + + // Get the MASK for the all active groups which is ECF enabled. + for (ListElement * i = parent->activeGroups.begin(); + i != parent->activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + if (group->hdcpEnabled) + { + countOnes = (((NvU64)1) << group->timeslot.count) - 1; + + mask = countOnes << group->timeslot.begin; + ecf |= mask; + } + } + + for (ListElement * i = parent->activeGroups.begin(); + i != parent->activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + + if (this->headIndex == group->headIndex) + { + countOnes = (((NvU64)1) << group->timeslot.count) - 1; + + mask = countOnes << group->timeslot.begin; + ecf |= mask; + } + } + + // Set the ECF with new added group. + parent->main->configureAndTriggerECF(ecf); + + // + // Enable sending QSES message only when regkey 'DISABLE_QSES' set to 0 in + // DD's path. + // This is added to provide driver for ST and not to be productized. + // + if ((parent->bIsEncryptionQseValid) && + (!parent->main->getRegkeyValue(NV_DP_REGKEY_DISABLE_QSES)) + ) + { + for (ListElement * i = parent->activeGroups.begin(); + i != parent->activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + + if (this->headIndex == group->headIndex) + { + if (NULL == group->streamEncryptionStatusDetection) + { + group->streamEncryptionStatusDetection = + new StreamEncryptionStatusDetection(group, parent); + } + if (group->streamEncryptionStatusDetection) + { + parent->bValidQSERequest = true; + group->streamEncryptionStatusDetection->sendQSEMessage(group); + parent->timer->queueCallback(group, + &(group->tagStreamValidation), + HDCP_STREAM_VALIDATION_REQUEST_COOLDOWN); + } + } + } + } + + for (ListElement * i = parent->activeGroups.begin(); + i != parent->activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + + if (this->headIndex == group->headIndex) + { + group->hdcpEnabled = true; + { // Inform ConnectorEventSink that we have enabled HDCP on this Device + Device * d = 0; + for (d = ((Group*)this)->enumDevices(0); d != 0; d = ((Group*)this)->enumDevices(d)) + { + if (((DeviceImpl*)d)->isHDCPCap == True) + { + parent->sink->notifyHDCPCapDone(d, True); + } + } + } + } + } +} + +void GroupImpl::updateVbiosScratchRegister(Device * lastDev) +{ + if (!parent->bDisableVbiosScratchRegisterUpdate && + parent->lastDeviceSetForVbios == lastDev) + { + // Take a device which is part of a group + for (ListElement * e = parent->deviceList.begin(); + e != parent->deviceList.end(); e = e->next) + { + DeviceImpl * dev = (DeviceImpl *)e; + + if (dev->activeGroup && dev->activeGroup->isHeadAttached()) + { + NvU32 address = 0; + NvU32 addrSize = dev->getTopologyAddress().size(); + + // Set the MS_SCRATCH_REGISTER for lighted up display + for (NvU32 i = addrSize; i; --i) + { + address |= ((dev->address[i-1] & 0xF) << ((addrSize - i)*4)); + } + + parent->main->configureMsScratchRegisters(address, addrSize, 3); + + parent->lastDeviceSetForVbios = (Device *)dev; + + return; + } + } + } +} + +// +// Helper function for attaching and detaching heads. +// +// For attach, we will assert if group already has head attached but for +// some device in the group, active group did not point to current group. +// For detach, we will assert if the group does not have head attached but +// some device in group has an active group OR head is marked attached but +// not all devies in the group have the current group as active group. +// This also sets or clears dev->activeGroup for each contained +// device. +// +void GroupImpl::setHeadAttached(bool attached) +{ + for (Device * i = enumDevices(0); i; i = enumDevices(i)) + { + DeviceImpl *di = (DeviceImpl *)i; + + if (attached) + { + if (headAttached) + { + DP_ASSERT(di->activeGroup == this); + } + di->activeGroup = this; + } + else + { + if (!headAttached) + { + DP_ASSERT(di->activeGroup == NULL); + } + else + { + DP_ASSERT(di->activeGroup == this); + } + di->activeGroup = NULL; + } + } + headAttached = attached; +} diff --git a/src/common/displayport/src/dp_guid.cpp b/src/common/displayport/src/dp_guid.cpp new file mode 100644 index 0000000..271aada --- /dev/null +++ b/src/common/displayport/src/dp_guid.cpp @@ -0,0 +1,81 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_guid.cpp * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_guid.h" +#include "dp_buffer.h" + +using namespace DisplayPort; + +// +// Linear congruential random number generator +// Seed values chosen from numerical methods +// +NvU32 GUIDBuilder::random() +{ + previousRandom = static_cast(( ((NvU64)1664525 * previousRandom + 1013904223) & 0xFFFFFFFF) & 0xFF); + return previousRandom; +} + + +GUIDBuilder::GUIDBuilder(Timer * source, NvU32 salt) + : salt(salt), source(source) +{ + previousRandom = static_cast(( source->getTimeUs() & 0xFFFFFFFF) & 0xFF); +} + +void GUIDBuilder::makeGuid(GUID & guid) +{ + NvU64 currentTimer = source->getTimeUs(); + guid.data[0] = static_cast(( salt >> 24) & 0xFF); + guid.data[1] = static_cast(( salt >> 16) & 0xFF); + guid.data[2] = static_cast(( salt >> 8) & 0xFF); + guid.data[3] = static_cast(( salt) & 0xFF); + + guid.data[4] = static_cast(( currentTimer >> 56) & 0xFF); + guid.data[5] = static_cast(( currentTimer >> 48) & 0xFF); + guid.data[6] = static_cast(( currentTimer >> 40) & 0xFF); + guid.data[7] = static_cast(( currentTimer >> 32) & 0xFF); + guid.data[8] = static_cast(( currentTimer >> 24) & 0xFF); + guid.data[9] = static_cast(( currentTimer >> 16) & 0xFF); + guid.data[10] = static_cast(( currentTimer >> 8) & 0xFF); + guid.data[11] = static_cast(( currentTimer) & 0xFF); + + unsigned rnd = random(); + guid.data[12] = static_cast(( rnd >> 24) & 0xFF); + guid.data[13] = static_cast(( rnd >> 16) & 0xFF); + guid.data[14] = static_cast(( rnd >> 8) & 0xFF); + guid.data[15] = static_cast(( rnd) & 0xFF); + + // + // Spin until we get a new timer counter + // This guarantees a monotonitically increased counter + // + while (source->getTimeUs() == currentTimer) + ; +} diff --git a/src/common/displayport/src/dp_linkconfig.cpp b/src/common/displayport/src/dp_linkconfig.cpp new file mode 100644 index 0000000..b25caac --- /dev/null +++ b/src/common/displayport/src/dp_linkconfig.cpp @@ -0,0 +1,157 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_linkconfig.cpp * +* DP LinkConfiguration implementation * +* * +\***************************************************************************/ + + +#include "dp_linkconfig.h" +#include "dp_regkeydatabase.h" + +using namespace DisplayPort; +bool LinkConfiguration::lowerConfig(bool bReduceLaneCnt) +{ + // + // TODO: bReduceLaneCnt is set to fallback to 4 lanes with lower + // valid link rate. But we should reset to max lane count + // sink supports instead. + // + + LinkRate lowerRate = policy.getLinkRates()->getLowerRate(peakRate); + + if(bReduceLaneCnt) + { + // Reduce laneCount before reducing linkRate + if(lanes == laneCount_1) + { + if (lowerRate) + { + lanes = laneCount_4; + peakRate = lowerRate; + } + else + { + lanes = laneCount_0; + } + } + else + { + lanes /= 2; + } + } + else + { + // Reduce the link rate instead of lane count + if (lowerRate) + { + peakRate = lowerRate; + } + else + { + lanes /= 2; + } + } + + minRate = linkOverhead(peakRate); + return lanes != laneCount_0; +} + +LinkConfiguration::LinkConfiguration(LinkPolicy * p, unsigned lanes, LinkRate peakRate, + bool enhancedFraming, bool MST, bool disablePostLTRequest, + bool bEnableFEC, bool bDisableLTTPR, bool bDisableDownspread) + : lanes(lanes), peakRatePossible(peakRate), peakRate(peakRate), + enhancedFraming(enhancedFraming), multistream(MST), + disablePostLTRequest(disablePostLTRequest), bEnableFEC(bEnableFEC), + bDisableLTTPR(bDisableLTTPR), bDisableDownspread(bDisableDownspread), + linkTrainCounter(0) +{ + // downrate for spread and FEC + minRate = linkOverhead(peakRate); + if (p) + { + policy = *p; + } +} + +NvU64 LinkConfiguration::getBytesPerTimeslot() +{ + NvU64 bytes_per_timeslot; + + // spread is already considered during pbn calculation for required mode. No need to consider here + { + // 8b/10b case + bytes_per_timeslot = getTotalDataRate() / 64; + } + + return bytes_per_timeslot; +} + +NvU32 LinkConfiguration::PBNForSlots(NvU32 slots) +{ + NvU64 bytes_per_pbn = 54 * 1000000 / 64; + NvU64 bytes_per_timeslot = getBytesPerTimeslot(); + + return (NvU32)(bytes_per_timeslot * slots/ bytes_per_pbn); // Rounded down +} + +NvU32 LinkConfiguration::slotsForPBN(NvU32 allocatedPBN, bool usable) +{ + NvU64 bytes_per_pbn = 54 * 1000000 / 64; + NvU64 bytes_per_timeslot = getBytesPerTimeslot(); + NvU32 slots; + + if (bytes_per_timeslot == 0) + return (NvU32)-1; + + if (usable) + { + // round down to find the usable integral slots for a given value of PBN. + slots = (NvU32)divide_floor(allocatedPBN * bytes_per_pbn, bytes_per_timeslot); + DP_ASSERT(slots <= 64); + } + else + { + slots = (NvU32)divide_ceil(allocatedPBN * bytes_per_pbn, bytes_per_timeslot); + } + + return slots; +} + +void LinkConfiguration::pbnRequired(const ModesetInfo & modesetInfo, unsigned & base_pbn, unsigned & slots, unsigned & slots_pbn) +{ + + base_pbn = pbnForMode(modesetInfo); + + if (bEnableFEC) + { + // IF FEC is enabled, we need to consider 2.4% overhead as per DP1.4 spec. + base_pbn = (NvU32)(divide_ceil(base_pbn * 1000, 976)); + } + + slots = slotsForPBN(base_pbn); + slots_pbn = PBNForSlots(slots); +} diff --git a/src/common/displayport/src/dp_list.cpp b/src/common/displayport/src/dp_list.cpp new file mode 100644 index 0000000..d8b3b86 --- /dev/null +++ b/src/common/displayport/src/dp_list.cpp @@ -0,0 +1,159 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* List **************************************\ +* * +* Module: dp_list.cpp * +* Simple doubly linked list * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_list.h" + +using namespace DisplayPort; +ListElement::ListElement() + : next(0), + prev(0) +{ +} + +ListElement::~ListElement() +{ + if (this->next) + { + this->prev->next = this->next; + this->next->prev = this->prev; + this->next = 0; + } +} + +List::List() +{ + this->next = this; + this->prev = this; +} + +void List::clear() +{ + while(!isEmpty()) + delete begin(); +} + +List::~List() +{ + clear(); + this->next = this; + this->prev = this; +} + +bool List::isEmpty() +{ + return this->next == this; +} + +void List::insertFront(ListElement * item) +{ + DP_ASSERT(item->next == 0 && "Attempt to insert when it's already in a list"); + item->prev = this; + item->next = this->next; + item->prev->next = item; + item->next->prev = item; +} + +void List::insertBack(ListElement * item) +{ + DP_ASSERT(item->next == 0 && "Attempt to insert when it's already in a list"); + item->prev = this->prev; + item->next = this; + item->prev->next = item; + item->next->prev = item; +} + +void List::insertBefore(ListElement * insertBeforeThis, ListElement * item) +{ + DP_ASSERT(item->next == 0 && "Attempt to insert when it's already in a list"); + item->next = insertBeforeThis; + item->prev = insertBeforeThis->prev; + insertBeforeThis->prev->next = item; + insertBeforeThis->prev = item; +} + +ListElement* List::front() +{ + DP_ASSERT(!isEmpty()); + return this->next; +} + +ListElement* List::last() +{ + DP_ASSERT(!isEmpty()); + return this->prev; +} + +ListElement * List::remove(ListElement * item) +{ + // Skip if its not already in a list + if (!item->next) + return item; + + item->prev->next = item->next; + item->next->prev = item->prev; + item->next = 0; + item->prev = 0; + + return item; +} + +bool List::contains(ListElement * item) +{ + for (ListElement * i = begin(); i!=end(); i = i->next) + { + if (i == item) + return true; + } + return false; +} + +ListElement * List::replace(ListElement * replacement, ListElement * replacee) +{ + if (!(replacement && replacee)) + { + DP_ASSERT(0 && "replacement or replaces is NULL pointer"); + return 0; + } + + DP_ASSERT(replacement->next && replacement->prev); + + // we are assuming replacee does exist in the list. + replacement->next = replacee->next; + replacement->prev = replacee->prev; + + if (replacement->next) + replacement->next->prev = replacement; + + if (replacement->prev) + replacement->prev->next = replacement; + + return replacee; +} diff --git a/src/common/displayport/src/dp_merger.cpp b/src/common/displayport/src/dp_merger.cpp new file mode 100644 index 0000000..fbd207a --- /dev/null +++ b/src/common/displayport/src/dp_merger.cpp @@ -0,0 +1,311 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_merger.cpp * +* Asynchronous Message merger * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_bitstream.h" +#include "dp_merger.h" +#include "dp_auxdefs.h" +#include "dp_crc.h" +#include "dp_messageheader.h" +#include "dp_printf.h" + +using namespace DisplayPort; + + +EncodedMessage * MessageTransactionMerger::pushTransaction(MessageHeader * header, Buffer * data) +{ + if (freeOnNextCall) + { + delete freeOnNextCall; + freeOnNextCall = 0; + } + + IncompleteMessage * imsg = getTransactionRecord(header->address, header->messageNumber); + + if (!imsg) + { + DP_PRINTF(DP_WARNING, "DP-MM> Ignore message due to OOM"); + return 0; + } + + if (header->isTransactionStart) + { + imsg->message.isPathMessage = header->isPathMessage; + imsg->message.isBroadcast = header->isBroadcast; + } + else + { + if (imsg->message.buffer.length == 0) + { + DP_PRINTF(DP_NOTICE, "DP-MM> Expected transaction-start, ignoring message transaction"); + return 0; + } + + if (imsg->message.isPathMessage != header->isPathMessage || + imsg->message.isBroadcast != header->isBroadcast) + { + DP_ASSERT(0 && "Message type changed during transmission"); + } + } + + // + // Check for redundant start + // + if (header->isTransactionStart && imsg->message.buffer.length) + { + DP_PRINTF(DP_WARNING, "DP-MM> Unexpected repeated transaction-start, resetting message state."); + + // We must have seen a previous incomplete transaction from this device + // they've begun a new packet. Forget about the old thing + imsg->message.buffer.reset(); + } + + // + // Kill the buffer if we've got less payload than we should + // + if (header->payloadBytes > data->length) + { + freeOnNextCall = imsg; + imsg->message.buffer.reset(); + DP_PRINTF(DP_ERROR, "DP-MM> Received truncated or corrupted message transaction"); + return 0; + } + + // + // Verify transaction CRC + // + BitStreamReader bsr(data, header->headerSizeBits, (header->payloadBytes-1)*8); + NvU8 dataCrc = (NvU8)dpCalculateBodyCRC(&bsr); + + DP_ASSERT(header->headerSizeBits % 8 == 0 && "Header must be byte aligned"); + + if (dataCrc != data->data[header->headerSizeBits/8 + header->payloadBytes - 1] || + header->payloadBytes == 0) + { + DP_PRINTF(DP_ERROR, "DP-MM> Received corruption message transactions"); + freeOnNextCall = imsg; + imsg->message.buffer.reset(); + return 0; + } + + // Discount the processed CRC from the payload count + header->payloadBytes--; + + // + // Append active buffer + // + unsigned i = imsg->message.buffer.length; + imsg->message.buffer.resize(i + header->payloadBytes); + dpMemCopy(&imsg->message.buffer.data[i], &data->data[header->headerSizeBits/8], header->payloadBytes); + + // + // Check for end of message transaction + // + if (header->isTransactionEnd) + { + freeOnNextCall = imsg; + + return &imsg->message; + } + + return 0; +} + +MessageTransactionMerger::IncompleteMessage * MessageTransactionMerger::getTransactionRecord(const Address & address, unsigned messageNumber) +{ + IncompleteMessage * msg; + NvU64 currentTime = this->timer->getTimeUs(); + + // + // Search for existing record + // + for (ListElement * i = incompleteMessages.begin();i != incompleteMessages.end();) + { + msg = (IncompleteMessage *)i; + i = i->next; + if (msg->message.address == address && msg->message.messageNumber == messageNumber) + { + goto found; + } + + // + // Found a stale message in the list + // + if (msg->lastUpdated + incompleteMessageTimeoutMs < currentTime) + delete msg; + } + + // + // None exists? Add a new one + // + msg = new IncompleteMessage(); + msg->message.address = address; + msg->message.messageNumber = messageNumber; + this->incompleteMessages.insertFront(msg); + +found: + // + // Update the timestamp + // + msg->lastUpdated = currentTime; + + return msg; +} + +void IncomingTransactionManager::mailboxInterrupt() +{ + MessageHeader msg; + unsigned totalSize; + AuxRetry::status result; + unsigned txSize = (unsigned)getTransactionSize(); + + // + // Size the static aux window + // + this->localWindow.resize(DP_MAX((unsigned)getTransactionSize(), (unsigned)getMessageBoxSize())); + if (this->localWindow.isError()) + return; + + // + // Read one aux-transaction worth of data + // + result = readMessageBox(0, &this->localWindow.data[0], txSize); + + DP_ASSERT( result != AuxRetry::defer && "Unexpected?!" ); + + if (result != AuxRetry::ack) + return; + + BitStreamReader reader(&this->localWindow, 0, 8*txSize); + + + // + // Before decoding the header, start with the downstream + // ports address prefix + // + if (!decodeHeader(&reader, &msg, addressPrefix)) + { + // + // It's possible we should be NACKing here. Ignoring for now + // to allow the message originator to time out (can take seconds). + // + DP_ASSERT(0 && "Not yet implemented"); + + return; + } + + // + // Let's get the entire sideband message in the localWindow + // + + totalSize = (msg.headerSizeBits / 8) + msg.payloadBytes; + + if (totalSize > txSize) + { + if (totalSize > DPCD_MESSAGEBOX_SIZE) + { + // + // Corrupt packet - total packet can't be larger than the window + // + return; + } + if (AuxRetry::ack!=readMessageBox(txSize, &this->localWindow.data[txSize], totalSize - txSize)) + { + // + // Failed to read second half of message + // + return; + } + } + + clearMessageBoxInterrupt(); + + EncodedMessage * em = incompleteMessages.pushTransaction(&msg, &this->localWindow); + + if (em) + { + this->sink->messagedReceived(this, em); + } +} + +IncomingTransactionManager::~IncomingTransactionManager() +{ +} + + +IncomingTransactionManager::IncomingTransactionManager(Timer * timer, const Address & addressPrefix, IncomingTransactionManagerEventSink * sink) + : incompleteMessages(timer, DP_INCOMPLETE_MESSAGE_TIMEOUT_USEC), addressPrefix(addressPrefix) +{ + this->sink = sink; + this->timer = timer; +} + + + +AuxRetry::status DownReplyManager::readMessageBox(NvU32 offset, NvU8 * data, size_t length) +{ + return hal->readDownReplyMessageBox(offset, data, length); +} + +size_t DownReplyManager::getMessageBoxSize() +{ + return hal->getDownReplyMessageBoxSize(); +} + +size_t DownReplyManager::getTransactionSize() +{ + return hal->getTransactionSize(); +} + +void DownReplyManager::clearMessageBoxInterrupt() +{ + hal->clearInterruptDownReplyReady(); +} + +AuxRetry::status UpRequestManager::readMessageBox(NvU32 offset, NvU8 * data, size_t length) +{ + return hal->readUpRequestMessageBox(offset, data, length); +} + +size_t UpRequestManager::getMessageBoxSize() +{ + return hal->getUpRequestMessageBoxSize(); +} + +size_t UpRequestManager::getTransactionSize() +{ + return hal->getTransactionSize(); +} + +void UpRequestManager::clearMessageBoxInterrupt() +{ + hal->clearInterruptUpRequestReady(); +} + diff --git a/src/common/displayport/src/dp_messagecodings.cpp b/src/common/displayport/src/dp_messagecodings.cpp new file mode 100644 index 0000000..8aa2dae --- /dev/null +++ b/src/common/displayport/src/dp_messagecodings.cpp @@ -0,0 +1,749 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_messagecodings.cpp * +* Encoding routines for various messages * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_messagecodings.h" +#include "dp_auxdefs.h" + +using namespace DisplayPort; + +// +// LINK_ADDRESS 0x1 +// +void LinkAddressMessage::set(const Address & target) +{ + clear(); + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // + // Write request identifier + // + writer.write(0 /*zero*/, 1); + writer.write(requestIdentifier, 7); + + encodedMessage.isPathMessage = false; + encodedMessage.isBroadcast = false; + encodedMessage.address = target; +} + +ParseResponseStatus LinkAddressMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + DisplayPort::extractGUID(reader, &reply.guid); + reader->readOrDefault(4 /*zeroes*/, 0); + reply.numberOfPorts = reader->readOrDefault(4 /*Number_Of_Ports*/, 0xF); + + for (unsigned i = 0; i < reply.numberOfPorts; i++) + { + reply.res[i].isInputPort = !!reader->readOrDefault(1 /*Input_Port*/, 1); + reply.res[i].peerDeviceType = (PeerDevice) reader->readOrDefault(3 /*Peer_Device_Type*/, 0x0); + reply.res[i].portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + reply.res[i].hasMessaging = !!reader->readOrDefault(1 /*Messaging_Capability_Status*/, 0x1); + reply.res[i].dpPlugged = !!reader->readOrDefault(1 /*DisplayPort_Device_Plug_Status*/, 0x1); + + if (reply.res[i].isInputPort == false) + { + reply.res[i].legacyPlugged = !!reader->readOrDefault(1 /*Legacy_Device_Plug_Status*/, 0x1); + + reader->readOrDefault(5 /*zeroes*/, 0x0); + + unsigned ver = reader->readOrDefault(8/*DPCD_Revision*/, 0); + reply.res[i].dpcdRevisionMajor = ver >> 4; + reply.res[i].dpcdRevisionMinor = ver & 0xF; + DisplayPort::extractGUID(reader, &reply.res[i].peerGUID); + reply.res[i].SDPStreams = reader->readOrDefault(4 /*Number_SDP_Streams*/, 0xF); + reply.res[i].SDPStreamSinks = reader->readOrDefault(4 /*Number_SDP_Stream_Sinks*/, 0xF); + } + else + { + reader->readOrDefault(6 /*zeroes*/, 0x0); + } + } + + return ParseResponseSuccess; +} + +// +// CONNECTION_STATUS_NOTIFY 0x2 +// +ConnStatusNotifyMessage::ConnStatusNotifyMessage(MessageReceiverEventSink * sink) +: MessageReceiver(sink, NV_DP_SBMSG_REQUEST_ID_CONNECTION_STATUS_NOTIFY /*request id*/) +{ +} + +bool ConnStatusNotifyMessage::processByType(EncodedMessage * message, BitStreamReader * reader) +{ + // read the request body + request.port = reader->readOrDefault(4/*Port_Number*/, 0xF); + reader->readOrDefault(4/*zeroes*/, 0); + bool status = DisplayPort::extractGUID(reader/*GUID of the originating branch device*/, &request.guid); + reader->readOrDefault(1/*zero*/, 0); + request.legacyPlugged = !!reader->readOrDefault(1/*Legacy_Device_Plug_Status*/, 0); + request.devicePlugged = !!reader->readOrDefault(1/*DisplayPort_Device_Plug_Status*/, 0); + request.messagingCapability = !!reader->readOrDefault(1/*Messaging_Capability_Status*/, 0); + request.isInputPort = !!reader->readOrDefault(1/*Input_Port*/, 0); + request.peerDeviceType = (PeerDevice) reader->readOrDefault(3/*Peer_Device_Type*/, 0); + + // action will be implemented by evensink + this->sink->messageProcessed(this); + return status; +} + +// +// GENERIC_UP_REPLY 0xnn +// +void GenericUpReplyMessage::set(const Address & target, + bool bReplyIsNack, + bool bBroadcast, + bool bPath) +{ + clear(); + BitStreamWriter writer(&encodedMessage.buffer, 0); + + writer.write(bReplyIsNack?1:0, 1); + writer.write(requestIdentifier, 7); + + encodedMessage.isPathMessage = bPath; + encodedMessage.isBroadcast = bBroadcast; + encodedMessage.address = target; +} + +GenericUpReplyMessage::GenericUpReplyMessage(unsigned requestId, bool bReplyIsNack, bool bBroadcast, bool bPath) +: Message(requestId, NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT) +{ + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // + // Write request identifier + // + writer.write(bReplyIsNack?1:0, 1); + writer.write(requestId, 7); + + encodedMessage.isPathMessage = bPath; + encodedMessage.isBroadcast = bBroadcast; +} + +GenericUpReplyMessage::GenericUpReplyMessage(const Address & target, unsigned requestId, bool bReplyIsNack, bool bBroadcast, bool bPath) +: Message(requestId, NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT) +{ + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // + // Write request identifier + // + writer.write(bReplyIsNack?1:0, 1); + writer.write(requestId, 7); + + encodedMessage.isPathMessage = bPath; + encodedMessage.isBroadcast = bBroadcast; + encodedMessage.address = target; +} + +ParseResponseStatus GenericUpReplyMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + // + // we are not expecting any replies here + // Since the corresponding post for this kind of message is of reply type; + // message manager won't queue an awaiting down reply for the same. + // + DP_ASSERT(0 && "We shouldn't be here!!"); + return ParseResponseSuccess; +} + +// +// CLEAR_PAYLOAD_ID_TABLE 0x14 +// +ClearPayloadIdTableMessage::ClearPayloadIdTableMessage() +: Message(NV_DP_SBMSG_REQUEST_ID_CLEAR_PAYLOAD_ID_TABLE /* request id */, NV_DP_SBMSG_PRIORITY_LEVEL_1) +{ + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // Write request identifier + writer.write(0/*zero*/, 1); + writer.write(requestIdentifier, 7); + + encodedMessage.isPathMessage = true; + encodedMessage.isBroadcast = true; + encodedMessage.address = Address(); +} + +ParseResponseStatus ClearPayloadIdTableMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + return ParseResponseSuccess; +} + +ParseResponseStatus ClearPayloadIdTableMessage::parseResponse(EncodedMessage * message) +{ + sink->messageCompleted(this); + return ParseResponseSuccess; +} + +// +// ENUM_PATH_RESOURCES 0x10 +// +EnumPathResMessage::EnumPathResMessage(const Address & target, unsigned port, bool point) +: Message(NV_DP_SBMSG_REQUEST_ID_ENUM_PATH_RESOURCES /* request identifier */, + NV_DP_SBMSG_PRIORITY_LEVEL_4) +{ + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // Write request identifier + writer.write(0/*zereo*/, 1); + writer.write(requestIdentifier, 7); + writer.write(port, 4); + writer.write(0/*zeroes*/, 4); + + encodedMessage.isPathMessage = !point; + encodedMessage.isBroadcast = false; + encodedMessage.address = target; + sinkPort = port; + dpMemZero(&reply, sizeof(reply)); +} + +ParseResponseStatus EnumPathResMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + reply.portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + reply.availableStreams = reader->readOrDefault(3 /*Available_Streams*/, 0); + reply.bFECCapability = (reader->readOrDefault(1 /*FEC*/, 0x0) == 1) ? true : false; + reply.TotalPBN = reader->readOrDefault(16 /*PBN*/, 0xFFFF); + reply.FreePBN = reader->readOrDefault(16 /*PBN*/, 0xFFFF); + reply.DFPLinkAvailablePBN = reader->readOrDefault(16 /*PBN*/, 0xFFFF); + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} + +// +// ALLOCATE_PAYLOAD 0x11 +// +void AllocatePayloadMessage::set +( + const Address & target, + unsigned port, + unsigned nSDPStreams, + unsigned vcPayloadId, + unsigned PBN, + unsigned* SDPStreamSink, + bool entirePath +) +{ + clear(); + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // Write request identifier + writer.write(0/*zero*/, 1); + writer.write(requestIdentifier, 7); + + DP_ASSERT(SDPStreamSink || (!nSDPStreams)); + + // Write message request body + writer.write(port, 4); + writer.write(nSDPStreams, 4); + writer.write(0/*zero*/, 1); + writer.write(vcPayloadId, 7); + writer.write(PBN, 16); + for (unsigned i=0; ireadOrDefault(4 /*Port_Number*/, 0xF); + reader->readOrDefault(5 /*zeroes*/, 0); + reply.virtualChannelPayloadId = reader->readOrDefault(7 /*Virtual_Channel_Payload_Identifier*/, 0x0); + reply.PBN = reader->readOrDefault(16 /*PBN*/, 0xFFFF); + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} +// +// QUERY_PAYLOAD 0x12 +// +QueryPayloadMessage::QueryPayloadMessage +( + const Address & target, + unsigned port, + unsigned vcPayloadId +) + : Message(NV_DP_SBMSG_REQUEST_ID_QUERY_PAYLOAD /* request identifier*/, + NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT) +{ + BitStreamWriter writer(&encodedMessage.buffer, 0); + + + // Write request identifier + writer.write(0 /*zero*/, 1); + writer.write(requestIdentifier, 7); + + // Write message request + writer.write(port, 4); + writer.write(0 /*zeroes*/, 5); + writer.write(vcPayloadId, 7); + + encodedMessage.isPathMessage = false; + encodedMessage.isBroadcast = false; + encodedMessage.address = target; + sinkPort = port; + dpMemZero(&reply, sizeof(reply)); +} + +ParseResponseStatus QueryPayloadMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + reply.portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + reader->readOrDefault(4 /*zeroes*/, 0); + reply.allocatedPBN = reader->readOrDefault(16 /*Allocated_PBN*/, 0xFFFF); + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} + + +// +// RESOURCE_STATUS_NOTIFY 0x13 +// + +ResStatusNotifyMessage::ResStatusNotifyMessage(MessageReceiverEventSink * sink) +: MessageReceiver(sink, NV_DP_SBMSG_REQUEST_ID_RESOURCE_STATUS_NOTIFY /*request id*/) +{ + dpMemZero(&request, sizeof(request)); +} + +bool ResStatusNotifyMessage::processByType(EncodedMessage * message, BitStreamReader * reader) +{ + bool status; + + // read the request body + request.port = reader->readOrDefault(4 /*Port_Number*/, 0xF); + request.availableStreams = reader->readOrDefault(3 /*Available_Streams*/, 0); + request.bFECCapability = reader->readOrDefault(1 /*FEC Capability*/, 0); + status = DisplayPort::extractGUID(reader, &request.guid); + request.PBN = reader->readOrDefault(16/*Available_PBN*/, 0); + + // action will be implemented by evensink + this->sink->messageProcessed(this); + return status; +} + +// +// REMOTE_DPCD_READ 0x20 +// +void RemoteDpcdReadMessage::set +( + const Address & target, + unsigned port, + unsigned dpcdAddress, + unsigned nBytesToRead +) +{ + clear(); + + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // Write request identifier + writer.write(0/*zero*/, 1); + writer.write(requestIdentifier, 7); + + // write request data + writer.write(port, 4); + writer.write(dpcdAddress, 20); + writer.write(nBytesToRead, 8); + + encodedMessage.isPathMessage = false; + encodedMessage.isBroadcast = false; + encodedMessage.address = target; + sinkPort = port; +} + +ParseResponseStatus RemoteDpcdReadMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + reader->readOrDefault(4 /*zeroes*/, 0); + reply.portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + reply.numBytesReadDPCD = reader->readOrDefault(8 /*Num_Of_Bytes_Read*/, 0x0); + for (unsigned i=0; ireadOrDefault(8 /*data*/, 0x0); + } + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} + +// +// REMOTE_DPCD_WRITE 0x21 +// +void RemoteDpcdWriteMessage::set +( + const Address & target, + unsigned port, + unsigned dpcdAddress, + unsigned nBytesToWrite, + const NvU8 * writeData +) +{ + clear(); + BitStreamWriter writer(&encodedMessage.buffer, 0); + + DP_ASSERT(writeData || (!nBytesToWrite)); + + // Write request identifier + writer.write(0/*zero*/, 1); + writer.write(requestIdentifier, 7); + + // write request data + writer.write(port, 4); + writer.write(dpcdAddress, 20); + writer.write(nBytesToWrite, 8); + + for (unsigned i=0; ireadOrDefault(4 /*zeroes*/, 0); + unsigned portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + + DP_ASSERT(portNumber == this->sinkPort); + DP_USED(portNumber); + + if (this->getSinkPort() != portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} + +// +// REMOTE_I2C_READ 0x22 +// +void RemoteI2cReadMessage::set +( + const Address & target, + unsigned nWriteTransactions, + unsigned port, + I2cWriteTransaction* transactions, + unsigned readI2cDeviceId, + unsigned nBytesToRead +) +{ + clear(); + + BitStreamWriter writer(&encodedMessage.buffer, 0); + + DP_ASSERT(transactions || (!nWriteTransactions)); + + // Write request identifier + writer.write(0 /*zero*/, 1); + writer.write(requestIdentifier, 7); + + // write request specific data + writer.write(port, 4); + writer.write(0/*zeroes*/, 2); + writer.write(nWriteTransactions, 2); + + for (unsigned i=0; ireadOrDefault(4 /*zeroes*/, 0); + reply.portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + reply.numBytesReadI2C = reader->readOrDefault(8 /*Num_Of_Bytes_Read*/, 0x0); + for (unsigned i=0; ireadOrDefault(8 /*data*/, 0x0); + } + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} + +// +// REMOTE_I2C_WRITE 0x23 +// +void RemoteI2cWriteMessage::set +( + const Address & target, + unsigned port, + unsigned writeI2cDeviceId, + unsigned nBytesToWrite, + unsigned char* writeData +) +{ + clear(); + + BitStreamWriter writer(&encodedMessage.buffer, 0); + + DP_ASSERT(writeData || (!nBytesToWrite)); + + // Write request identifier + writer.write(0 /*zero*/, 1); + writer.write(requestIdentifier, 7); + + // write request data + writer.write(port, 4); + writer.write(0/*zero*/, 5); + writer.write(writeI2cDeviceId, 7); + writer.write(nBytesToWrite, 8); + + for (unsigned i=0; ireadOrDefault(4 /*zeroes*/, 0); + reply.portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} +// +// POWER_UP_PHY 0x24 +// +void PowerUpPhyMessage::set +( + const Address & target, + unsigned port, + bool entirePath +) +{ + clear(); + + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // Write request identifier + writer.write(0 /*zero*/, 1); + writer.write(requestIdentifier, 7); + + // write request specific data + writer.write(port, 4); + writer.write(0 /*zero*/, 4); + + encodedMessage.isPathMessage = entirePath; + encodedMessage.isBroadcast = false; + encodedMessage.address = target; + sinkPort = port; +} + +// +// POWER_DOWN_PHY 0x25 +// +ParseResponseStatus PowerUpPhyMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + reply.portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + reader->readOrDefault(4 /*zeroes*/, 0); + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} + +void PowerDownPhyMessage::set +( + const Address & target, + unsigned port, + bool entirePath +) +{ + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // Write request identifier + writer.write(0 /*zero*/, 1); + writer.write(requestIdentifier, 7); + + // write request specific data + writer.write(port, 4); + writer.write(0/*zeros*/, 4); + + encodedMessage.isPathMessage = entirePath; + encodedMessage.isBroadcast = false; + encodedMessage.address = target; + sinkPort = port; +} + +ParseResponseStatus PowerDownPhyMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + reply.portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + reader->readOrDefault(4 /*zeroes*/, 0); + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} + +// +// SINK_EVENT_NOTIFY 0x30 +// + +SinkEventNotifyMessage::SinkEventNotifyMessage(MessageReceiverEventSink * sink, unsigned requestId) +: MessageReceiver(sink, 0x30 /*request id*/) +{ +} + +bool SinkEventNotifyMessage::processByType(EncodedMessage * message, BitStreamReader * reader) +{ + return true; +} + + +// +// QUERY_STREAM_ENCRYPTION_STATUS 0x38 +// Follow the SCR DP1.2 Query Stream Encryption Status Definition v0.4 +// +void QueryStreamEncryptionMessage::set +( + const Address & target, + unsigned streamId, + NvU8* clientId, + StreamEvent streamEvent, + bool streamEventMask, + StreamBehavior streamBehavior, + bool streamBehaviorMask +) +{ + clear(); + + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // Write request identifier + writer.write(0/*zero*/, 1); + writer.write(requestIdentifier, 7); + + // Write message request body + writer.write(streamId, 8); + for (unsigned i=0; i<7; i++) + { + writer.write(clientId[i], 8); + } + + writer.write(streamEvent, 2); + writer.write(streamEventMask?1:0, 1); + writer.write(streamBehavior, 2); + writer.write(streamBehaviorMask?1:0, 1); + writer.write(0 /*zeroes*/, 2); + + encodedMessage.isPathMessage = false; + encodedMessage.isBroadcast = false; + encodedMessage.address = target; +} + +ParseResponseStatus QueryStreamEncryptionMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + reply.streamState = (StreamState)reader->readOrDefault(2 /*Stream_State*/, 0x0); + reply.repeaterFuncPresent = !!reader->readOrDefault(1 /*Stream_Repeater_Function*/, 0x0); + reply.encryption = !!reader->readOrDefault(1 /*Stream_Encryption */, 0x0); + reply.authentication = !!reader->readOrDefault(1 /*Stream_Authentication */, 0x0); + reader->readOrDefault(3 /*zero*/, 0); + reply.sinkType = (OutputSinkType)reader->readOrDefault(3 /*Stream_Output_Sink_Type*/, 0x0); + reply.cpType = (OutputCPType)reader->readOrDefault(2 /*Stream_Output_CP_Type*/, 0x0); + reader->readOrDefault(2 /*zeroes*/, 0); + reply.signedLPrime = !!reader->readOrDefault(1 /*Signed*/, 0x0); + reply.streamId = (NvU8)reader->readOrDefault(8/*Stream_ID*/, 0x0); + + return ParseResponseSuccess; +} + +I2cWriteTransaction::I2cWriteTransaction +( + unsigned WriteI2cDeviceId, + unsigned NumBytes, + unsigned char * buffer, + bool NoStopBit, + unsigned I2cTransactionDelay +) +{ + this->WriteI2cDeviceId = WriteI2cDeviceId; + this->NumBytes = NumBytes; + this->NoStopBit = NoStopBit; + this->I2cTransactionDelay = I2cTransactionDelay; + this->I2cData = buffer; +} + +I2cWriteTransaction::I2cWriteTransaction(): +WriteI2cDeviceId(0), NumBytes(0), I2cData(0), NoStopBit(0), I2cTransactionDelay(0) +{ +} + diff --git a/src/common/displayport/src/dp_messageheader.cpp b/src/common/displayport/src/dp_messageheader.cpp new file mode 100644 index 0000000..caaa186 --- /dev/null +++ b/src/common/displayport/src/dp_messageheader.cpp @@ -0,0 +1,86 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_messageheader.cpp * +* DP message header parser * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_bitstream.h" +#include "dp_crc.h" +#include "dp_messageheader.h" +#include "dp_printf.h" + + +bool DisplayPort::decodeHeader(BitStreamReader * reader, MessageHeader * header, const Address & address) +{ + unsigned startOffset = reader->offset(); + int LCT, i; + + // + // Read the RAD + // + LCT = reader->readOrDefault( 4, 0); + reader->readOrDefault( 4, 0); + + header->address = address; + + for (i = 0; i < LCT - 1; i++) + { + header->address.append(reader->readOrDefault( 4, 0)); + } + + reader->align( 8); + + // + // Read flags + // + header->isBroadcast = !!reader->readOrDefault( 1, 0); + header->isPathMessage = !!reader->readOrDefault( 1, 0); + header->payloadBytes = reader->readOrDefault( 6, 0) ; + + header->isTransactionStart = !!reader->readOrDefault( 1, 0); + header->isTransactionEnd = !!reader->readOrDefault( 1, 0); + reader->readOrDefault( 1, 0); + header->messageNumber = reader->readOrDefault( 1, 0); + + + // Build a bit reader for the slice of header we just processed + BitStreamReader crcReader(reader->buffer(), startOffset, reader->offset()); + + if (reader->readOrDefault( 4, (NvU32)~0) != dpCalculateHeaderCRC(&crcReader)) + { + // Corrupt packet received + char buffer[48*3+1]; + dpHexDump(&buffer[0], sizeof(buffer), (NvU8*)reader->buffer() + startOffset, reader->offset() - startOffset); + DP_PRINTF(DP_ERROR, "DP-MM> Corrupt message transaction. Expected CRC %d. Message = {%s}", dpCalculateHeaderCRC(&crcReader), buffer); + + return false; + } + + header->headerSizeBits = reader->offset() - startOffset; + return true; +} diff --git a/src/common/displayport/src/dp_messages.cpp b/src/common/displayport/src/dp_messages.cpp new file mode 100644 index 0000000..aed50a5 --- /dev/null +++ b/src/common/displayport/src/dp_messages.cpp @@ -0,0 +1,626 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_messages.cpp * +* Encoding for aux common messages. * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_bitstream.h" +#include "dp_splitter.h" +#include "dp_messages.h" +#include "dp_merger.h" +#include "dp_list.h" +#include "dp_tracing.h" +#include "dp_printf.h" + +using namespace DisplayPort; +namespace DisplayPort +{ + GenericMessageCompletion::GenericMessageCompletion() : + failed(false), completed(false) + { + // Initialize nakData seperately. + nakData.reason = NakUndefined; + nakData.nak_data = 0; + // nakData.guid is initalized in its own constructor. + } + void GenericMessageCompletion::messageFailed(MessageManager::Message * from, NakData * data) + { + nakData = *data; + failed = true; + completed = true; + } + void GenericMessageCompletion::messageCompleted(MessageManager::Message * from) + { + failed = false; + completed = true; + } +}; + +// +// Transmit a message and wait for the response in place. +// +bool MessageManager::send(MessageManager::Message * message, NakData & nakData) +{ + GenericMessageCompletion completion; + Address::StringBuffer sb; + DP_USED(sb); + + NvU64 startTime, elapsedTime; + message->bBusyWaiting = true; + message->setMessagePriority(NV_DP_SBMSG_PRIORITY_LEVEL_1); + post(message, &completion); + startTime = timer->getTimeUs(); + do + { + hal->updateDPCDOffline(); + if (hal->isDpcdOffline()) + { + DP_PRINTF(DP_WARNING, "DP-MM> Device went offline while waiting for reply and so ignoring message %p (ID = %02X, target = %s)", + message, message->requestIdentifier, ((message->state).target).toString(sb)); + completion.nakData.reason = NakDpcdFail; + nakData = completion.nakData; + completion.failed = true; + break; + } + + hal->notifyIRQ(); + if (hal->interruptDownReplyReady()) + IRQDownReply(); + + if (completion.completed) + { + nakData = completion.nakData; + break; + } + + elapsedTime = timer->getTimeUs() - startTime; + + if (elapsedTime > (DPCD_MESSAGE_REPLY_TIMEOUT * 1000)) + { + message->expired(NULL); + nakData.reason = NakTimeout; + break; + } + + // Sleep while processing timer callbacks + timer->sleep(1); + } while(true); + + return !completion.failed; +} + +bool DisplayPort::extractGUID(BitStreamReader * reader, GUID * guid) +{ + for (unsigned i=0; i < 128; i += 8) + { + unsigned data; + if (!reader->read(&data, 8)) + { + return false; + } + + guid->data[i/8] = (NvU8)data; + } + + return true; +} + +void MessageManager::messagedReceived(IncomingTransactionManager * from, EncodedMessage * message) +{ + if (from == &mergerUpRequest) + { + onUpRequestReceived(true, message); + } + else + { + onDownReplyReceived(true, message); + } +} + +void MessageManager::Message::splitterFailed(OutgoingTransactionManager * from) +{ + // + // Message failed + // + NakData nakData; + nakData.reason = NakTimeout; + MessageManager * parent = this->parent; + + if (sink) + sink->messageFailed(this, &nakData); + + if (from == &parent->splitterDownRequest) + { + // + // Tell the message manager he may begin sending the next message + // + parent->transmitAwaitingDownRequests(); + } + else + { + parent->transmitAwaitingUpReplies(); + } +} + +void MessageManager::Message::splitterTransmitted(OutgoingTransactionManager * from) +{ + bTransmitted = true; + MessageManager * parent = this->parent; + + if (from == &parent->splitterDownRequest) + { + // Client will busy-waiting for the message to complete, we don't need the countdown timer. + if (!bBusyWaiting) + { + // Start the countdown timer for the reply + parent->timer->queueCallback(this, "SPLI", DPCD_MESSAGE_REPLY_TIMEOUT); + } + // Tell the message manager he may begin sending the next message + parent->transmitAwaitingDownRequests(); + } + else // UpReply + { + if (sink) + sink->messageCompleted(this); // This is the end for an up reply + + parent->transmitAwaitingUpReplies(); + } + +} + +// Since transmit DPCD_MESSAGE_REPLY_TIMEOUT time has elapsed. +// - Let's assume the message was not replied to +void MessageManager::Message::expired(const void * tag) +{ + Address::StringBuffer sb; + DP_USED(sb); + + DP_PRINTF(DP_WARNING, "DP-MM> Message transmit time expired on message %p (ID = %02X, target = %s)", + (Message*)this, ((Message*)this)->requestIdentifier, (((Message*)this)->state.target).toString(sb)); + + Address::NvU32Buffer addrBuffer; + dpMemZero(addrBuffer, sizeof(addrBuffer)); + (((Message*)this)->state.target).toNvU32Buffer(addrBuffer); + NV_DPTRACE_WARNING(MESSAGE_EXPIRED, ((Message*)this)->requestIdentifier, (((Message*)this)->state.target).size(), + addrBuffer[0], addrBuffer[1], addrBuffer[2], addrBuffer[3]); + + NakData nakData; + nakData.reason = NakTimeout; + + MessageManager * parent = this->parent; + + DP_ASSERT(parent); + if (parent && !parent->isBeingDestroyed) + { + parent->awaitingReplyDownRequest.remove(this); + parent->clearPendingMsg(); + parent->transmitAwaitingDownRequests(); + parent->transmitAwaitingUpReplies(); + } + + if (sink) + sink->messageFailed(this, &nakData); +} + +// +// Enqueue the next message to the splitterDownRequest +// +void MessageManager::transmitAwaitingDownRequests() +{ + for (ListElement * i = notYetSentDownRequest.begin(); i!=notYetSentDownRequest.end(); ) + { + Message * m = (Message *)i; + i = i->next; // Do this first since we may unlink the current node + + if (awaitingReplyDownRequest.isEmpty()) + { + // + // Set the message number, and unlink from the outgoing queue + // + m->encodedMessage.messageNumber = 0; + m->state.messageNumber = 0; + + notYetSentDownRequest.remove(m); + awaitingReplyDownRequest.insertBack(m); + + // + // This call can cause transmitAwaitingDownRequests to be called again + // + bool sent = splitterDownRequest.send(m->encodedMessage, m); + DP_ASSERT(sent); + + return; + } + } +} + +// +// Enqueue the next message to the splitterUpReply +// +void MessageManager::transmitAwaitingUpReplies() +{ + for (ListElement * i = notYetSentUpReply.begin(); i!=notYetSentUpReply.end(); ) + { + Message * m = (Message *)i; + i = i->next; // Do this first since we may unlink the current node + + notYetSentUpReply.remove(m); + + // + // This call can cause transmitAwaitingUpReplies to be called again + // + bool sent = splitterUpReply.send(m->encodedMessage, m); + DP_ASSERT(sent); + } +} + +void MessageManager::postReply(Message * message, Message::MessageEventSink * sink) +{ + post(message, sink, true); +} + +void MessageManager::cancelAllByType(unsigned type) +{ + for (ListElement * i = notYetSentDownRequest.begin(); i!=notYetSentDownRequest.end(); ) + { + Message * m = (Message *)i; + i = i->next; + + if (m->requestIdentifier == type) + notYetSentDownRequest.remove(m); + } + + for (ListElement * i = awaitingReplyDownRequest.begin(); i!=awaitingReplyDownRequest.end(); ) + { + Message * m = (Message *)i; + i = i->next; + + if (m->requestIdentifier == type) + awaitingReplyDownRequest.remove(m); + } +} + +void MessageManager::cancelAll(Message * message) +{ + for (ListElement * i = notYetSentDownRequest.begin(); i!=notYetSentDownRequest.end(); ) + { + Message * m = (Message *)i; + i = i->next; + + if (m == message && m->requestIdentifier == message->requestIdentifier) + notYetSentDownRequest.remove(m); + } + + for (ListElement * i = awaitingReplyDownRequest.begin(); i!=awaitingReplyDownRequest.end(); ) + { + Message * m = (Message *)i; + i = i->next; + + if (m == message && m->requestIdentifier == message->requestIdentifier) + awaitingReplyDownRequest.remove(m); + } +} + +void MessageManager::post(Message * message, Message::MessageEventSink * sink, bool transmitReply) +{ + DP_ASSERT(!isBeingDestroyed && "You may not post messages in response to a shutdown"); + + if (isPaused) + return; + + // + // Initialize the fields + // + message->sink = sink; + message->bTransmitted = false; + + // + // Queue the message for the outgoing queue. + // Later on we'll walk to the queue and make sure + // we have at most two outstanding messages PER + // target address. This is how the message + // number is decided. + // + + message->parent = this; + message->transmitReply = transmitReply; + if (message->encodedMessage.isBroadcast) + { + // if its a broadcast message; the target would be the immediate branch. + Address addr; + addr.clear(); + addr.append(0); + message->state.target = addr; + } + else + message->state.target = message->encodedMessage.address; + + if ( transmitReply ) + { + notYetSentUpReply.insertBack(message); + transmitAwaitingUpReplies(); + } + else + { + // + // If the list is empty or the incoming message has the least priority possible (DEFAULT priority), + // then just add the incoming message to the back of the list. + // Otherwise, find the right location by traversing the list. + // + if(message->messagePriority == NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT || notYetSentDownRequest.isEmpty()) + { + notYetSentDownRequest.insertBack(message); + } + else + { + ListElement *tmp = notYetSentDownRequest.last(); + Message *msg = (Message*) notYetSentDownRequest.last(); + while((msg->prev != tmp) && (msg->messagePriority < message->messagePriority)) + { + msg = (Message*)msg->prev; + } + notYetSentDownRequest.insertBefore(msg->next, message); + } + transmitAwaitingDownRequests(); + } +} + +void MessageManager::onUpRequestReceived(bool status, EncodedMessage * message) +{ + if (!status) + { + return; + } + + // + // Broadcast the up-request message to all + // the receivers on messageReceivers + // + for (ListElement * i = messageReceivers.begin(); i!=messageReceivers.end(); i=i->next) + { + MessageReceiver * rcr = (MessageReceiver *)i; + if (rcr->process((EncodedMessage *)message)) + { + return; + } + } + + DP_ASSERT(0 && "Warning: Unknown upstream UP_REQ message"); +} + + +void MessageManager::onDownReplyReceived(bool status, EncodedMessage * message) +{ + if (!status) + { + return; + } + + // + // Broadcast the down-request message to all + // the receivers on awaitingReplyDownRequest + // + for (ListElement * i = awaitingReplyDownRequest.begin(); i!=awaitingReplyDownRequest.end(); i=i->next) + { + Message * messageAwaitingReply = (Message *)i; + + if( messageAwaitingReply->state.target == message->address && + messageAwaitingReply->state.messageNumber == message->messageNumber) + { + awaitingReplyDownRequest.remove(messageAwaitingReply); + if (messageAwaitingReply->parseResponse(message) == ParseResponseWrong) + { + // + // parseResponse() returns ParseResposeWrong when 'Request_Identifier' of down request + // message and down reply message are mis-matched. So insert message in waiting queue + // and wait for correct down reply message. + // + awaitingReplyDownRequest.insertBack(messageAwaitingReply); + } + + goto nextMessage; + } + } + + DP_PRINTF(DP_WARNING, "DPMM> Warning: Unmatched reply message"); +nextMessage: + transmitAwaitingUpReplies(); + transmitAwaitingDownRequests(); +} + +MessageManager::~MessageManager() +{ + // This causes any posts they may attempt to do to fail + isBeingDestroyed = true; + + // + // The message manager should not be shut down until + // all outgoing messages are in the cancelled state + // + NakData nakUndef; + nakUndef.reason = NakUndefined; + + for (ListElement * i = notYetSentDownRequest.begin(); i!=notYetSentDownRequest.end(); ) + { + ListElement * next = i->next; + if (((Message *)i)->sink) + ((Message *)i)->sink->messageFailed(((Message *)i), &nakUndef); + i = next; + } + if (!notYetSentDownRequest.isEmpty()) + { + + for (ListElement * i = notYetSentDownRequest.begin(); i!=notYetSentDownRequest.end(); ) + { + ListElement * next = i->next; + DP_PRINTF(DP_WARNING, "Down request message type 0x%x client is not cleaning up.", ((Message *)i)->requestIdentifier); + i = next; + } + } + + for (ListElement * i = notYetSentUpReply.begin(); i!=notYetSentUpReply.end();) + { + ListElement * next = i->next; + if (((Message *)i)->sink) + ((Message *)i)->sink->messageFailed(((Message *)i), &nakUndef); + i = next; + } + if (!notYetSentUpReply.isEmpty()) + { + + for (ListElement * i = notYetSentUpReply.begin(); i!=notYetSentUpReply.end(); ) + { + ListElement * next = i->next; + DP_PRINTF(DP_WARNING, "Up reply message type 0x%x client is not cleaning up.", ((Message *)i)->requestIdentifier); + i = next; + } + } + + for (ListElement * i = awaitingReplyDownRequest.begin(); i!=awaitingReplyDownRequest.end(); ) + { + ListElement * next = i->next; + if (((Message *)i)->sink) + ((Message *)i)->sink->messageFailed(((Message *)i), &nakUndef); + i = next; + } + if (!awaitingReplyDownRequest.isEmpty()) + { + + for (ListElement * i = awaitingReplyDownRequest.begin(); i!=awaitingReplyDownRequest.end(); ) + { + ListElement * next = i->next; + DP_PRINTF(DP_WARNING, "Down request message type 0x%x client is not cleaning up.", ((Message *)i)->requestIdentifier); + i = next; + } + } + + // Do not reclaim the memory of our registered receivers + while (!messageReceivers.isEmpty()) + messageReceivers.remove(messageReceivers.front()); +} + +ParseResponseStatus MessageManager::Message::parseResponse(EncodedMessage * message) +{ + BitStreamReader reader(&message->buffer, 0, message->buffer.length*8); + + // Read ReplyType + bool replyNacked = !!reader.readOrDefault(1, true); + + // Read RequestIdentifier + unsigned requestId = reader.readOrDefault(7, 0); + if (requestId != requestIdentifier) + { + DP_PRINTF(DP_NOTICE, "DP-MM> Requested = %x Received = %x", requestId, requestIdentifier); + DP_ASSERT(0 && "Reply type doesn't match"); + return ParseResponseWrong; + } + + if (replyNacked) + { + NakData nakData; + + // failure handler will parse the NAK response and do the required action + if (DisplayPort::extractGUID(&reader, &nakData.guid) == false) + { + DP_ASSERT(0 && "Invalid GUID in NAK"); + } + + nakData.reason = (NakReason)reader.readOrDefault(8, 0); + nakData.nak_data = reader.readOrDefault(8, 0); + + // call specific handler after parsing. + parent->timer->cancelCallbacks(this); + + MessageManager * parent = this->parent; + + if (sink) + sink->messageFailed(this, &nakData); + + parent->transmitAwaitingDownRequests(); + + return ParseResponseSuccess; + } + + ParseResponseStatus parseResult = parseResponseAck(message, &reader); + + if (parseResult == ParseResponseSuccess) + { + parent->timer->cancelCallbacks(this); + + if (this->sink) + { + MessageEventSink * msgSink = this->sink; + msgSink->messageCompleted(this); + } + } + + return parseResult; +} + +void MessageManager::Message::MessageEventSink::messageFailed(Message * from, NakData * nakData) +{ + +} + +void MessageManager::registerReceiver(MessageReceiver * receiver) +{ + messageReceivers.insertBack(receiver); +} + + +bool MessageManager::MessageReceiver::process(EncodedMessage * message) +{ + BitStreamReader reader(&message->buffer, 0, message->buffer.length*8); + + // Read RequestIdentifier + reader.readOrDefault(1, 0); + unsigned reqId = reader.readOrDefault(7, 0); + + if (reqId != this->getRequestId()) + { + // + // This receiver is not meant for this message; + // let the next in the queue handle it. + // + return false; + } + + this->address = message->address; + + // processByType should parse the request, create a response and queue it if needed + bool status = processByType(message, &reader); + if (!status) + { + // + // if we are here; we could get a receiver to handle the request + // but something else went wrong. + // + DP_ASSERT(0); + } + + return true; +} diff --git a/src/common/displayport/src/dp_mst_edid.cpp b/src/common/displayport/src/dp_mst_edid.cpp new file mode 100644 index 0000000..161f615 --- /dev/null +++ b/src/common/displayport/src/dp_mst_edid.cpp @@ -0,0 +1,189 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_mst_edid.c * +* Implementation Multi Stream EDID reads * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_edid.h" +#include "dp_address.h" +#include "dp_messagecodings.h" +#include "dp_messages.h" +#include "dp_printf.h" + +using namespace DisplayPort; + +EdidReadMultistream::~EdidReadMultistream() +{ + timer->cancelCallbacks(this); +} + +void EdidReadMultistream::startReadingEdid() +{ + NvU8 offset = 0; + I2cWriteTransaction i2cWriteTransactions[1]; + Address::StringBuffer buffer; + DP_USED(buffer); + DP_PRINTF(DP_NOTICE, "%s(): start for %s", __FUNCTION__, + topologyAddress.toString(buffer)); + + edidReaderManager.reset(); + edid.resetData(); + + DDCAddress = ddcAddrList[ddcIndex]; + + // set offset within segment 0, no need to set segment, because we're starting reading EDID + i2cWriteTransactions[0] = I2cWriteTransaction(DDCAddress >> 1, + sizeof(offset), + &offset, + true); + NvU8 nWriteTransactions = 1; + + remoteI2cRead.set(topologyAddress.parent(), // topology Address + nWriteTransactions, // number of write transactions + topologyAddress.tail(), // port of Device + i2cWriteTransactions, // list of write transactions + DDCAddress >> 1, // right shifted DDC Address (request identifier in spec) + EDID_BLOCK_SIZE); // requested size + + manager->post(&remoteI2cRead, this); +} + +void EdidReadMultistream::messageCompleted(MessageManager::Message * from) +{ + RemoteI2cReadMessage* I2CReadMessage = (RemoteI2cReadMessage*)from; + unsigned char * data = 0; + unsigned numBytesRead; + Address::StringBuffer buffer; + DP_USED(buffer); + + NvU8 seg; + NvU8 offset; + DP_PRINTF(DP_NOTICE, "%s for %s", __FUNCTION__, topologyAddress.toString(buffer)); + + DP_ASSERT(DDCAddress && "DDCAddress is 0, it is wrong"); + + data = I2CReadMessage->replyGetI2CData(&numBytesRead); + DP_ASSERT(data); + + // this is not required, but I'd like to keep things simple at first submission + DP_ASSERT(numBytesRead == EDID_BLOCK_SIZE); + edidReaderManager.postReply(data, numBytesRead, true); + + if (edidReaderManager.readNextRequest(seg, offset)) + { + readNextBlock(seg, offset); + } + else // EDID read is finished or failed. + { + edidAttemptDone(edidReaderManager.readIsComplete() && edid.verifyCRC()); + } +} + +void EdidReadMultistream::edidAttemptDone(bool succeeded) +{ + if (succeeded) + sink->mstEdidCompleted(this); + else if (ddcIndex + 1 < ddcAddrListSize) + { + ddcIndex++; + startReadingEdid(); + } + else + sink->mstEdidReadFailed(this); +} + +void EdidReadMultistream::readNextBlock(NvU8 seg, NvU8 offset) +{ + I2cWriteTransaction i2cWriteTransactions[2]; + Address::StringBuffer buffer; + DP_USED(buffer); + + // ensure that init function for i2cWriteTranscation for segment and offset won't break + DP_ASSERT(sizeof(seg) == 1); + DP_ASSERT(sizeof(offset) == 1); + + DP_PRINTF(DP_NOTICE, "%s(): for %s (seg/offset) = %d/%d", __FUNCTION__, + topologyAddress.toString(buffer), + seg, offset); + + unsigned nWriteTransactions = 2; + if (seg) + { + // select segment + i2cWriteTransactions[0] = I2cWriteTransaction(EDID_SEG_SELECTOR_OFFSET >> 1, + 1, &seg, true); + // set offset within segment + i2cWriteTransactions[1] = I2cWriteTransaction(DDCAddress >> 1, + 1, &offset, true); + } + else + { + // set offset within segment 0 + i2cWriteTransactions[0] = I2cWriteTransaction(DDCAddress >> 1, 1, &offset, true); + nWriteTransactions = 1; + } + + remoteI2cRead.set(topologyAddress.parent(), // topology Address + nWriteTransactions, // number of write transactions + topologyAddress.tail(), // port of Device + i2cWriteTransactions, // list of write transactions + DDCAddress >> 1, // right shifted DDC Address (request identifier in spec) + EDID_BLOCK_SIZE); // requested size + + manager->post(&remoteI2cRead, this, false); +} + +void EdidReadMultistream::expired(const void * tag) +{ + Address::StringBuffer buffer; + DP_USED(buffer); + DP_PRINTF(DP_NOTICE, "%s on %s", __FUNCTION__, topologyAddress.toString(buffer)); + startReadingEdid(); +} + +void EdidReadMultistream::messageFailed(MessageManager::Message * from, NakData * nakData) +{ + Address::StringBuffer buffer; + DP_USED(buffer); + DP_PRINTF(DP_NOTICE, "%s on %s", __FUNCTION__, topologyAddress.toString(buffer)); + + if (nakData->reason == NakDefer || nakData->reason == NakTimeout) + { + if (retries < MST_EDID_RETRIES) + { + ++retries; + timer->queueCallback(this, "EDID", MST_EDID_COOLDOWN); + } + else + edidAttemptDone(false /* failed */); + } + else + { + edidAttemptDone(false /* failed */); + } +} diff --git a/src/common/displayport/src/dp_qse.cpp b/src/common/displayport/src/dp_qse.cpp new file mode 100644 index 0000000..2ae135a --- /dev/null +++ b/src/common/displayport/src/dp_qse.cpp @@ -0,0 +1,293 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_qse.cpp * +* The DP HDCP Query Stream Encryption. * +* * +\***************************************************************************/ + +#include "dp_auxdefs.h" + +#include "dp_qse.h" +#include "dp_internal.h" +#include "dp_deviceimpl.h" +#include "dp_connectorimpl.h" +#include "dp_printf.h" +#include + +using namespace DisplayPort; + +NvU64 +QSENonceGenerator::random() +{ + NvU64 randomNumber; + + previousRandomLSB = static_cast(((NvU64)1664525 * previousRandomLSB + 1013904223)); + previousRandomMSB = static_cast(((NvU64)1664525 * previousRandomMSB + 1013904223)); + + randomNumber = ((NvU64)previousRandomMSB << 32) | previousRandomLSB ; + + return randomNumber; +} + +void +QSENonceGenerator::clientIdBuilder +( + NvU64 aN +) +{ + previousRandomMSB = (NvU32)(aN >> 32) ; + previousRandomLSB = (NvU32)(aN & 0xFFFFFFFF); +} + +void +QSENonceGenerator::makeClientId +( + CLIENTID &clientId +) +{ + // Generate 56 bit nonce + NvU64 rnd = random(); + + clientId.data[0] = static_cast( rnd & 0xFF); + clientId.data[1] = static_cast((rnd >> 8) & 0xFF); + clientId.data[2] = static_cast((rnd >> 16) & 0xFF); + clientId.data[3] = static_cast((rnd >> 24) & 0xFF); + clientId.data[4] = static_cast((rnd >> 32) & 0xFF); + clientId.data[5] = static_cast((rnd >> 40) & 0xFF); + clientId.data[6] = static_cast((rnd >> 48) & 0xFF); +} + +StreamEncryptionStatusDetection::~StreamEncryptionStatusDetection() +{ + connector->timer->cancelCallbacks(this); +} + +void +StreamEncryptionStatusDetection::messageFailed +( + MessageManager::Message *from, + NakData *nakData +) +{ + if (from == &qseMessage) + { + connector->messageManager->clearAwaitingQSEReplyDownRequest(); + + if ((retriesSendQSEMessage < DPCD_QUERY_STREAM_MESSAGE_RETRIES) && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + connector->timer->cancelCallback(parent, &(parent->tagStreamValidation)); + retriesSendQSEMessage++; + sendQSEMessage(parent); + connector->timer->queueCallback(parent, &(parent->tagStreamValidation), HDCP_STREAM_VALIDATION_REQUEST_COOLDOWN); + return; + } + // + // If message failed is called after all retries have expired then + // we should disable the HDCP. + // + else + { + DP_PRINTF(DP_ERROR, "DP-QSE> Downstream failed to handle %s QSES message", + reason == qseReason_Ssc ? "SSC" : "generic"); + // + // Non-QSE supported branch would get HDCP off if we honor QSES's result even w/o SSC from it. + // So to improve compatibility, we honor QSES's result to have HDCP off only if it's fired for SSC. + // + if (reason == qseReason_Ssc) + { + for (ListElement * i = connector->activeGroups.begin(); i != connector->activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + if (group->hdcpEnabled) + { + // + // In case of MST, time slots will be deleted and add back for clearing ECF + // This will lead to blank screen momentarily + // Similarly for all other QSES errors + // + group->hdcpSetEncrypted(false, NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_TYPE_0, NV_TRUE, NV_TRUE); + } + } + connector->main->configureHDCPAbortAuthentication(KSV_SIG); + connector->main->configureHDCPDisableAuthentication(); + // Clear HDCP cap for groups and connector and devices. + connector->isHDCPAuthOn = false; + } + else + { + connector->bIsEncryptionQseValid = false; + } + + retriesSendQSEMessage = 0; + parent->streamValidationDone = true; + + //Reset the MessageManager pointer state + resetQseMessageState(); + } + } +} + +void +StreamEncryptionStatusDetection::expired( + const void * tag +) +{ + // Not required as of now. +} + +void +StreamEncryptionStatusDetection::handleQSEDownReply() +{ + if ((connector->bValidQSERequest) && (handleQSEReplyValidation())) + { + parent->streamValidationDone = true; + } + else + { + connector->bValidQSERequest = true; + parent->streamValidationDone = true; + } +} + +void +StreamEncryptionStatusDetection::messageCompleted +( + MessageManager::Message *from +) +{ + if (from == &qseMessage) + { + handleQSEDownReply(); + + //Reset the MessageManager pointer state + resetQseMessageState(); + } +} + +void +StreamEncryptionStatusDetection::sendQSEMessage +( + GroupImpl *group, + QSE_REASON reasonId +) +{ + Address address(0); + CLIENTID clientId; + HDCPState hdcpState = {0}; + + // Get hdcp version to see if hdcp22 QSE or not. + connector->main->configureHDCPGetHDCPState(hdcpState); + setHdcp22Qse(hdcpState.HDCP_State_22_Capable); + + // Check whether repeater or not. + bIsRepeater = hdcpState.HDCP_State_Repeater_Capable; + + //Generate the Pseudo Random number + connector->qseNonceGenerator->makeClientId(clientId); + for (unsigned i = 0 ; i < CLIENT_ID_SIZE; i++) + { + group->clientId[i] = clientId.data[i]; + } + this->reason = reasonId; + group->streamValidationDone = false; + qseMessage.set( address, + group->streamIndex, + clientId.data, + CP_IRQ_ON, + STREAM_EVENT_MASK_ON, + Force_Reauth, + STREAM_BEHAVIOUR_MASK_ON); + connector->messageManager->post(&qseMessage, this); +} + +bool +StreamEncryptionStatusDetection::handleQSEReplyValidation() +{ + if (parent->streamIndex != qseMessage.getStreamId()) + { + DP_PRINTF(DP_ERROR, "DP-QSE> Query the active Stream ID %d, but reply Stream ID %d mismatch.", parent->streamIndex, qseMessage.getStreamId()); + parent->hdcpSetEncrypted(false, NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_TYPE_0, NV_TRUE, NV_TRUE); + return false; + } + + NvU16 streamStatus = 0; + streamStatus = qseMessage.getStreamStatus(); + DP_PRINTF(DP_NOTICE, "DP-QSE> Query the active Stream ID %d. The reply streamStatus: %d", parent->streamIndex, streamStatus); + + NvU16 streamState = DRF_VAL(_DP, _HDCP, _STREAM_STATE, streamStatus); + if ((streamState == NV_DP_HDCP_STREAM_STATE_NO_EXIST) || + (streamState == NV_DP_HDCP_STREAM_STATE_ERROR)) + { + DP_PRINTF(DP_ERROR, "DP-QSE> Query the active Stream ID %d, but reply as Stream does not exist or Error/Reserved", parent->streamIndex); + parent->hdcpSetEncrypted(false, NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_TYPE_0, NV_TRUE, NV_TRUE); + return false; + } + else if (streamState == NV_DP_HDCP_STREAM_STATE_NOT_ACTIVE) + { + DP_PRINTF(DP_ERROR, "DP-QSE> Query the active Stream ID %d, but reply as Stream not active", parent->streamIndex); + parent->hdcpSetEncrypted(false, NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_TYPE_0, NV_TRUE, NV_TRUE); + return false; + } + + NvU16 streamAuth = DRF_VAL(_DP, _HDCP, _STREAM_AUTHENTICATION, streamStatus); + if (streamAuth == NV_DP_HDCP_STREAM_AUTHENTICATION_OFF) + { + DP_PRINTF(DP_ERROR, "DP-QSE> Query the Stream ID %d, reply as failed authentication all the way down", parent->streamIndex); + + parent->hdcpSetEncrypted(false, NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_TYPE_0, NV_TRUE, NV_TRUE); + return false; + } + + // Watch here for not over reacting encryption policy here. + NvU16 streamEncrypt = DRF_VAL(_DP, _HDCP, _STREAM_ENCRYPTION, streamStatus); + if (streamEncrypt == NV_DP_HDCP_STREAM_ENCRYPTION_OFF) + { + if (parent->hdcpEnabled) + { + DP_PRINTF(DP_ERROR, "DP-QSE> Query the Stream ID %d, reply as not encryption all the way down", parent->streamIndex); + parent->qseEncryptionStatusMismatch = parent->hdcpEnabled; + parent->hdcpSetEncrypted(false, NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_TYPE_0, NV_TRUE, NV_TRUE); + } + else + return false; + } + + return true; +} + +void +StreamEncryptionStatusDetection::resetQseMessageState() +{ + qseMessage.clear(); +} + +void +StreamEncryptionStatusDetection::setHdcp22Qse(bool bHdcp22Qse) +{ + bIsHdcp22Qse = bHdcp22Qse; + qseMessage.setHdcp22Qse(bHdcp22Qse); +} diff --git a/src/common/displayport/src/dp_splitter.cpp b/src/common/displayport/src/dp_splitter.cpp new file mode 100644 index 0000000..cdcc117 --- /dev/null +++ b/src/common/displayport/src/dp_splitter.cpp @@ -0,0 +1,315 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_splitter.cpp * +* Asynchronous Message Splitter * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_bitstream.h" +#include "dp_splitter.h" +#include "dp_auxdefs.h" +#include "dp_crc.h" +#include "dp_configcaps.h" +#include "dp_printf.h" + +using namespace DisplayPort; + +#define DP_MAX_HEADER_SIZE 16 +// timeout after 110ms with a retry recurring every 5ms for 10 times +#define DOWNSTREAM_RETRY_ON_DEFER_TIMEOUT 110 +#define DOWNSTREAM_RETRY_ON_DEFER_PERIOD 5 +#define DOWNSTREAM_RETRY_ON_DEFER_COUNT 10 + +bool MessageTransactionSplitter::get(Buffer & assemblyBuffer) +{ + unsigned i; + unsigned payloadSize; + bool isTransactionStart, isTransactionEnd; + Address address; + unsigned LCT; + unsigned LCR; + unsigned headerSizeBits; + + assemblyBuffer.reset(); + + // + // Done? + // + if (this->messageOutstanding->buffer.length == this->assemblyTransmitted) + { + return false; + } + + address = this->messageOutstanding->address; + if (this->messageOutstanding->isBroadcast) + { + // no RAD + address.clear(); + LCT = 1; + } + else + { + LCT = address.size(); + } + + // Calculate header size + headerSizeBits = 8 + // LCT/LCR + (((4 * (LCT -1)) + 4) &~ 7) + // byte aligned RAD + 16; + + // + // Pick how much data to send. Header+payloadSize <= 48 bytes. + // + payloadSize = DP_MIN(DPCD_MESSAGEBOX_SIZE - (headerSizeBits+7)/8, /*crc*/1 + this->messageOutstanding->buffer.length - this->assemblyTransmitted); + + // + // Is the first or last transaction in the sequence? + // + isTransactionStart = assemblyTransmitted == 0; + isTransactionEnd = (assemblyTransmitted + payloadSize - 1) == messageOutstanding->buffer.length; + + BitStreamWriter writer(&assemblyBuffer, 0); + + // + // Write the header + // + writer.write(LCT, 4); + + LCR = this->messageOutstanding->isBroadcast ? 6 : LCT > 1 ? LCT - 1 : 0; + + writer.write(LCR, 4); + + // port at i=0 is the outport of source/gpu which should not be included in the RAD in outgoing message header + // if this is a broadcast message; LCT would be 1; hence no RAD. + for (i = 1; i < LCT; i++) + writer.write(address[i], 4); + writer.align(8); + + writer.write(this->messageOutstanding->isBroadcast, 1); + writer.write(this->messageOutstanding->isPathMessage, 1); + writer.write(payloadSize, 6); + + writer.write(isTransactionStart, 1); + writer.write(isTransactionEnd, 1); + writer.write(0, 1); + + DP_ASSERT(messageOutstanding->messageNumber == 0 || messageOutstanding->messageNumber == 1); + writer.write(messageOutstanding->messageNumber, 1); + + // + // Generate 4 bit CRC. (Nibble-wise CRC of previous values) + // + BitStreamReader reader(&assemblyBuffer, 0, writer.offset()); + writer.write(dpCalculateHeaderCRC(&reader), 4); + + DP_ASSERT(writer.offset() == headerSizeBits && "Header size mismatch"); + DP_ASSERT((writer.offset() & 7) == 0 && "Packet header must end byte aligned"); + + // + // Generate body CRC + // + BitStreamReader bodyReader(&this->messageOutstanding->buffer, this->assemblyTransmitted * 8, (payloadSize - 1) * 8); + NvU8 bodyCrc = (NvU8)dpCalculateBodyCRC(&bodyReader); + + // Copy in remaining buffer (leaving room for the CRC) + for (i = 0; i < payloadSize - 1; ++i) + writer.write(this->messageOutstanding->buffer.data[i + this->assemblyTransmitted], 8); + writer.write(bodyCrc, 8); + + this->assemblyTransmitted += payloadSize - 1; + + return true; +} + +void OutgoingTransactionManager::expired(const void * tag) +{ + writeToWindow(false); +} + +void OutgoingTransactionManager::cancel(OutgoingTransactionManagerEventSink * sink) +{ + if (activeMessage && activeMessage->eventSink == sink) + activeMessage->eventSink = 0; + + for (ListElement * el = queuedMessages.begin(); el && el!=queuedMessages.end(); el = el->next) + if (((OutgoingMessage *)el)->eventSink == sink) + ((OutgoingMessage *)el)->eventSink = 0; +} + +bool OutgoingTransactionManager::send( EncodedMessage & payload, OutgoingTransactionManagerEventSink * sink) +{ + OutgoingMessage * om = new OutgoingMessage(); + + if (!om) + { + return false; + } + + om->eventSink = sink; + om->message.swap(payload); + + if (!activeMessage) + { + activeMessage = om; + transactionSplitter.set(&om->message); + transactionSplitter.get(this->assemblyBuffer); + writeToWindow(true); + } + else + { + queuedMessages.insertBack(om); + } + + return true; +} + +void OutgoingTransactionManager::writeToWindow( bool firstAttempt) +{ + AuxRetry::status result; + + if (!activeMessage || !activeMessage->eventSink) + goto findNextMessage; + + result = this->writeMessageBox(assemblyBuffer.data, assemblyBuffer.length); + + if (result == AuxRetry::defer) + { + + // + // if retries left; queue one. + // + if (firstAttempt || retriesLeft ) + { + if (firstAttempt) + { + // initialize retriesLeft + retriesLeft = DOWNSTREAM_RETRY_ON_DEFER_COUNT; + } + + retriesLeft--; + DP_PRINTF(DP_WARNING, "DP-MM> Messagebox write defer-ed. Q-ing retry."); + this->timer->queueCallback(this, "SPDE", DOWNSTREAM_RETRY_ON_DEFER_PERIOD); + + return; + } + + // + // Notify message sender of failure. Keep in mind sender + // might turn around immediately with a queue'd send. + // + if (activeMessage) + { + activeMessage->eventSink->splitterFailed(this); + } + + goto findNextMessage; + } + else if (result == AuxRetry::ack) + { + // + // Split off another chunk and transmit + // + if (transactionSplitter.get(assemblyBuffer)) + { + writeToWindow(true); + } + else + { + // + // Notify message sender of success. Keep in mind sender + // might turn around immediately with a queue'd send. + // + if (activeMessage) + { + activeMessage->eventSink->splitterTransmitted(this); + } + + goto findNextMessage; + } + + return; + } + + // + // Notify message sender of failure. Keep in mind sender + // might turn around immediately with a queued send. + // + if (activeMessage) + { + activeMessage->eventSink->splitterFailed(this); + } + +findNextMessage: + // + // The old transaction is complete. Free the memory + // + delete activeMessage; + activeMessage = 0; + + // + // Look for the next transaction + // + if (queuedMessages.isEmpty()) + { + return; + } + else + { + activeMessage = (OutgoingMessage *)queuedMessages.begin(); + queuedMessages.remove(activeMessage); + + transactionSplitter.set(&activeMessage->message); + transactionSplitter.get(this->assemblyBuffer); + writeToWindow(true); + } +} + +OutgoingTransactionManager::OutgoingTransactionManager(Timer * timer) + : timer(timer) +{ + this->activeMessage = 0; +} + +AuxRetry::status DownRequestManager::writeMessageBox(NvU8 * data, size_t length) +{ + return hal->writeDownRequestMessageBox(data, length); +} + +size_t DownRequestManager::getMessageBoxSize() +{ + return hal->getDownRequestMessageBoxSize(); +} + +AuxRetry::status UpReplyManager::writeMessageBox(NvU8 * data, size_t length) +{ + return hal->writeUpReplyMessageBox(data, length); +} + +size_t UpReplyManager::getMessageBoxSize() +{ + return hal->getUpReplyMessageBoxSize(); +} diff --git a/src/common/displayport/src/dp_sst_edid.cpp b/src/common/displayport/src/dp_sst_edid.cpp new file mode 100644 index 0000000..94ec105 --- /dev/null +++ b/src/common/displayport/src/dp_sst_edid.cpp @@ -0,0 +1,343 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_sst_edid.c * +* Implementation Single Stream EDID reads * +* * +\***************************************************************************/ + +#include "dp_buffer.h" +#include "dp_auxbus.h" +#include "dp_internal.h" +#include "dp_edid.h" +#include "dp_printf.h" + +using namespace DisplayPort; + +/* +* seg -> 256 segment of EDID +* offset -> offset within segment +*/ +static bool readNextBlock(AuxBus * auxBus, NvU8 seg, NvU8 offset, Buffer & buffer, unsigned & totalRead, unsigned DDCAddress, Timer * timer) +{ + AuxBus::Type type = AuxBus::i2cMot; + AuxBus::status auxStatus; + + unsigned retries = 0; + unsigned sizeRequested; + unsigned sizeCompleted; + unsigned transactionSize = auxBus->transactionSize(); + totalRead = 0; + + DP_ASSERT(auxBus); + DP_ASSERT(transactionSize > 0); + + // ASSERT if edidOffset offset wasn't increased in block len sizes + DP_ASSERT(offset == 0 || offset == EDID_BLOCK_SIZE); + + sizeRequested = transactionSize; + if (!buffer.resize(EDID_BLOCK_SIZE)) + { + return false; + } + + DP_ASSERT(sizeof(seg) == 1); + DP_ASSERT(sizeof(offset) == 1); + + // only set segment if it is required + if (seg) + { + // start EDID read by specifying appropriate Edid segment id + for (unsigned retry = 0; retry < EDID_MAX_AUX_RETRIES; retry++) + { + auxStatus = auxBus->transaction(AuxBus::write, AuxBus::i2cMot, EDID_SEG_SELECTOR_OFFSET >> 1, + &seg, sizeof(seg), &sizeCompleted); + if (auxStatus == AuxBus::success) + break; + + // If deferred due to timeout + if (auxStatus == AuxBus::defer) + { + // Wait for sometime between retries + timer->sleep(EDID_AUX_WAIT_TIME); + continue; + } + + return false; + } + } + + auxStatus = AuxBus::nack; + + for (retries = 0; totalRead < EDID_BLOCK_SIZE;) + { + // + // For retry, rewrite the Offset for the internal read pointer + // except when the previous Read auxstatus was an Aux::defer + // since in that case, the offset was never incremented by sink + // + if ((auxStatus != AuxBus::success) && (auxStatus != AuxBus::defer)) + { + // start from this offset, need to verify with display with multiple edid blocks + for (unsigned retry = 0; retry < EDID_MAX_AUX_RETRIES; retry++) + { + auxStatus = auxBus->transaction(AuxBus::write, AuxBus::i2cMot, DDCAddress >> 1, + (NvU8*)(&offset), sizeof(offset), &sizeCompleted); + if (auxStatus == AuxBus::success) + break; + // If deferred due to timeout + if (auxStatus == AuxBus::defer) + { + // Wait for sometime between retries + timer->sleep(EDID_AUX_WAIT_TIME); + continue; + } + + return false; + } + // if retries exceed EDID_MAX_AUX_RETRIES, give up + if (auxStatus != AuxBus::success) + { + return false; + } + } + // need to change to I2C (not MOT) to read just one last part of EDID block + if (totalRead + transactionSize >= EDID_BLOCK_SIZE) + type = AuxBus::i2c; + + sizeRequested = DP_MIN(transactionSize, EDID_BLOCK_SIZE - totalRead); + auxStatus = auxBus->transaction(AuxBus::read, type, DDCAddress >> 1, + &(buffer.data[totalRead]), sizeRequested, &sizeCompleted); + + if (AuxBus::success != auxStatus || (sizeRequested && (sizeCompleted == 0))) + { + if (retries >= EDID_MAX_AUX_RETRIES) + return false; + + DP_PRINTF(DP_WARNING, "DisplayPort: %s: Retrying at totalRead 0x%08x (replyType %x, size %x)", + __FUNCTION__, totalRead, auxStatus, sizeRequested); + + // Wait for sometime between retries + timer->sleep(EDID_AUX_WAIT_TIME); + retries++; + + continue; + } + + // Assert when size mismatches and it is not last block + if ((sizeRequested != sizeCompleted) && + (totalRead + transactionSize < EDID_BLOCK_SIZE)) + { + DP_PRINTF(DP_ERROR, "DisplayPort: %s: dpAux returned edid block smaller than expected. Read from totalRead 0x%08x (replyType %x, size %x)", + __FUNCTION__, totalRead, auxStatus, sizeRequested); + DP_ASSERT(0); + } + + retries = 0; // reset the number of retries + totalRead += sizeCompleted; + offset += (NvU8)sizeCompleted; + } + + return true; +} + +/*! +* @return: true => EDID read is success, false => read is failure +*/ +static bool sstReadEdid(AuxBus * auxBus, Edid & edid, unsigned DDCAddr, Timer * timer, bool pendingTestRequestEdidRead) +{ + // + // If there is pending test request for edid read, + // ask edidReaderManager to take whatever posted, + // instead of discarding bytes read by a failed read. + // Because cert devices may need to see the checksum of these bytes, + // even if they seem corrupted. + // + EdidAssembler edidReaderManager(&edid, pendingTestRequestEdidRead); + NvU32 retryCount = 0; + Buffer buffer; + if (!buffer.resize(EDID_BLOCK_SIZE)) + { + return false; + } + + DP_ASSERT(auxBus); + + do + { + NvU8 seg = 0; + NvU8 offset = 0; + unsigned totalRead = 0; + edidReaderManager.reset(); + + // start by reading first EDID block, posting it and analyzing for next request + do + { + bool success = readNextBlock(auxBus, seg, offset, buffer, totalRead, DDCAddr, timer); + edidReaderManager.postReply(buffer, totalRead, success); + } + while (edidReaderManager.readNextRequest(seg, offset)); + if (!edid.isPatchedChecksum()) + break; + } while (retryCount++ < EDID_POLICY_BLOCK_READ_MAX_RETRY_COUNT); + + // + // EDID read is successful when + // 1. read was done to the end (i.e. no corruption, no blocks exceeding retry count) + // 2. EDID CRC is correct + // + return edidReaderManager.readIsComplete(); +} + +EDID_DDC DisplayPort::sstDDCPing(AuxBus & dpAux) +{ + unsigned sizeRequested = 0, sizeCompleted; + AuxBus::status auxStatus = AuxBus::nack; + NvU8 offset = 0; + unsigned ddcAddrIdx; + + for (ddcAddrIdx = 0; ddcAddrIdx < ddcAddrListSize; ddcAddrIdx++) + { + // + // Don't use an I2C write. Some devices erroneously ACK on the write + // + auxStatus = dpAux.transaction(AuxBus::read, AuxBus::i2c, ddcAddrList[ddcAddrIdx] >> 1, + &offset, sizeRequested, &sizeCompleted); + + if (AuxBus::success == auxStatus) + return (EDID_DDC)ddcAddrList[ddcAddrIdx]; + } + + return EDID_DDC_NONE; + +} + +bool DisplayPort::EdidReadSST(Edid & edid, AuxBus * auxBus, Timer* timer, + bool pendingTestRequestEdidRead, bool bBypassAssembler, + MainLink * main) +{ + Edid previousEdid; + Buffer *buffer; + bool status; + bool firstTrial = true; + NvU64 startTime, elapsedTime; + for (unsigned i = 0; i < ddcAddrListSize; i++) + { + startTime = timer->getTimeUs(); + elapsedTime = 0; + do + { + // + // Client asks to use RM control code to fetch EDID. + // + if (bBypassAssembler && main) + { + unsigned blockCnt; + buffer = edid.getBuffer(); + if (!buffer->resize(EDID_BLOCK_SIZE)) + { + return false; + } + status = main->fetchEdidByRmCtrl(buffer->getData(), buffer->getLength()); + + if (status) + { + blockCnt = edid.getBlockCount(); + + // If read successfully, check if there are two or more blocks. + if (blockCnt != 1) + { + if (!buffer->resize(EDID_BLOCK_SIZE * blockCnt)) + { + return false; + } + status = main->fetchEdidByRmCtrl(buffer->getData(), buffer->getLength()); + } + } + if (!status) + { + // + // If fetchEdidByRmCtrl fails for some reasons: + // Try to read again using DPLib read function. + // One reason client to request read from RM is to making sure + // the EDID is overridden (regkey or others). So call the RM + // control call to apply the EDID overrides. + // + status = sstReadEdid(auxBus, edid, ddcAddrList[i], timer, + pendingTestRequestEdidRead); + if (status) + { + main->applyEdidOverrideByRmCtrl(buffer->getData(), + buffer->getLength()); + } + else + { + DP_PRINTF(DP_ERROR, "EDID> Failed to read EDID from RM and DPLib"); + } + } + } + else + { + // + // If there is pending test request for edid read, make sure we get the raw bytes without check. + // Because cert devices may need to see the checksum of whatever is read for edid, even if they seem corrupted. + // + status = sstReadEdid(auxBus, edid, ddcAddrList[i], timer, pendingTestRequestEdidRead); + + } + + if (status) + { + if (edid.verifyCRC()) + { + return true; + } + else + { + if (firstTrial) // first failure? + { + previousEdid.swap(edid); + firstTrial = false; + } + else + { + if (previousEdid == edid) + { + // we got the same invalid checksum again; we will assume it is valid. + edid.setForcedEdidChecksum(true); + return true; + } + } + } + } + elapsedTime = timer->getTimeUs() - startTime; + timer->sleep(1); + } while (elapsedTime < (EDID_READ_RETRY_TIMEOUT_MS * 1000)); + } + + DP_PRINTF(DP_ERROR, "EDID> Failed to ping sst DDC addresses"); + + return false; +} diff --git a/src/common/displayport/src/dp_timer.cpp b/src/common/displayport/src/dp_timer.cpp new file mode 100644 index 0000000..22c4c62 --- /dev/null +++ b/src/common/displayport/src/dp_timer.cpp @@ -0,0 +1,200 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_timer.cpp * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_timer.h" +#include "dp_printf.h" +using namespace DisplayPort; + +void Timer::expired() +{ + fire(false); +} + +// Take care, this function is re-entrant. +// Consider that sleep() is effectively a call to fire(). +// Clients may sleep in response to a timer callback. +unsigned Timer::fire(bool fromSleep) // returns min time to next item to be fired +{ + restart: + + NvU64 now = getTimeUs(); + NvU64 nearest = (NvU64)-1; + for (PendingCallback * i = (PendingCallback*)pending.begin(); i!=pending.end(); ) + { + if (fromSleep && !i->executeInSleep) { + i = (PendingCallback*)i->next; + continue; + } + + if (now >= i->timestamp) + { + const void * context = i->context; + TimerCallback * target = i->target; + delete i; + if (target) + target->expired(context); // Take care, the client may have made + // a recursive call to fire in here. + // Easy solution: Restart at front of list. + // current time may have also changed + // drastically from a nested sleep + goto restart; + } + else + { + if (i->timestamp < nearest) + nearest = i->timestamp; + i = (PendingCallback*)i->next; + } + } + unsigned minleft = (unsigned)((nearest - now + 999)/ 1000); + return minleft; +} + +void Timer::_pump(unsigned milliseconds, bool fromSleep) +{ + do + { + unsigned amt = fire(fromSleep); + if (amt >= milliseconds) { + raw->sleep(milliseconds); + return; + } + raw->sleep(amt); + milliseconds-=amt; + } while(milliseconds); +} + +// +// Queue a timer callback. +// Unless the dont-execute-in-sleep flag is set +// +void Timer::queueCallback(Timer::TimerCallback * target, const void * context, unsigned milliseconds, bool executeInSleep) +{ + NvU64 now = getTimeUs(); + PendingCallback * callback = new PendingCallback(); + if (callback == NULL) + { + DP_PRINTF(DP_ERROR, "DP> %s: Failed to allocate callback", + __FUNCTION__); + return; + } + callback->target = target; + callback->context = context; + callback->timestamp = now + milliseconds * 1000; + callback->executeInSleep = executeInSleep; + pending.insertBack(callback); + raw->queueCallback(this, milliseconds); +} + +NvU64 Timer::getTimeUs() +{ + return raw->getTimeUs(); +} + +// Sleep a number of milliseconds. +// timer callbacks will be serviced! +void Timer::sleep(unsigned milliseconds) +{ + _pump(milliseconds, true); +} + +void Timer::cancelCallbacks(Timer::TimerCallback * to) +{ + if (!to) + return; + for (PendingCallback * i = (PendingCallback*)pending.begin(); i!=pending.end(); i = (PendingCallback *)i->next) + if (i->target == to) + i->target = 0; +} + +void Timer::cancelCallback(Timer::TimerCallback * to, const void * context) +{ + if (!to) + return; + for (PendingCallback * i = (PendingCallback *)pending.begin(); i!=pending.end(); i = (PendingCallback*)i->next) + if (i->target == to && i->context == context) + i->target = 0; +} + +// Queue callbacks in order. +void Timer::queueCallbackInOrder(Timer::TimerCallback * target, const void * context, unsigned milliseconds, bool executeInSleep) +{ + NvU64 now = getTimeUs(); + PendingCallback * callback = new PendingCallback(); + callback->target = target; + callback->context = context; + callback->timestamp = now + milliseconds * 1000; + callback->executeInSleep = executeInSleep; + + //Figure out where to insert the current callback + Timer::PendingCallback* i; + + for (i = (PendingCallback*)pending.begin(); i != pending.end();) + { + // only for the given context. + if(i->context == context) + { + if(i->timestamp > callback->timestamp) + break; + } + i = (PendingCallback*) i->next; + } + if (i == pending.end()) + { + pending.insertBack(callback); + } + else + { + pending.insertBefore(i, callback); + } + + raw->queueCallback(this, milliseconds); +} + +void Timer::cancelAllCallbacks() +{ + for (PendingCallback * i = (PendingCallback*)pending.begin(); i!=pending.end(); i = (PendingCallback *)i->next) + i->target = 0; +} + +void Timer::cancelCallbacksWithoutContext(const void * context) +{ + for (PendingCallback * i = (PendingCallback*)pending.begin(); i!=pending.end(); i = (PendingCallback *)i->next) + if(i->context != context) + i->target = 0; +} + +bool Timer::checkCallbacksOfSameContext(const void * context) +{ + for (PendingCallback * i = (PendingCallback*)pending.begin(); i!=pending.end(); i = (PendingCallback *)i->next) + if(i->context == context) + return true; + + return false; +} diff --git a/src/common/displayport/src/dp_vrr.cpp b/src/common/displayport/src/dp_vrr.cpp new file mode 100644 index 0000000..4c4d981 --- /dev/null +++ b/src/common/displayport/src/dp_vrr.cpp @@ -0,0 +1,248 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_vrr.cpp * +* Implementation of VRR enablement * +* * +\***************************************************************************/ +#include "dp_connectorimpl.h" +#include "dp_vrr.h" +#include "dp_printf.h" + +using namespace DisplayPort; + +bool VrrEnablement::start() +{ + bool rc; + + DP_PRINTF(DP_NOTICE, "DPHAL_VRR_ENABLE> **** VRR Enablement Started ****"); + rc = vrrGetPublicInfo(); + if(rc) + { + rc = vrrEnableMonitor(); + if(rc != true) + { + return false; + } + rc = vrrEnableDriver(); + if(rc != true) + { + return false; + } + } + else + { + return false; + } + + DP_PRINTF(DP_NOTICE, "DPHAL_VRR_ENABLE> **** VRR Enablement Ends ****"); + + return true; +} + +bool VrrEnablement::vrrGetPublicInfo() +{ + MainLink *main = this->parent->connector->main; + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_INIT_PUBLIC_INFO, NULL) != true) + { + return false; + } + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_RESET_MONITOR, NULL) != true) + { + return false; + } + else + { + if (vrrWaitOnEnableStatus() != true) + { + return false; + } + } + + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_GET_PUBLIC_INFO, NULL) != true) + { + return false; + } + + return vrrWaitOnEnableStatus(); +} + +bool VrrEnablement::vrrEnableMonitor() +{ + MainLink *main = this->parent->connector->main; + + DP_PRINTF(DP_NOTICE, "DPHAL_VRR_ENABLE> ** VRR_MON_ENABLE starts **"); + + // Always set the enable F/W state m/c to a known state. + if(main->vrrRunEnablementStage(VRR_ENABLE_STAGE_RESET_MONITOR, NULL) != true) + { + return false; + } + + // Wait for VRR to be 'ready'. + if (vrrWaitOnEnableStatus() != true) + { + return false; + } + + if(main->vrrRunEnablementStage(VRR_ENABLE_STAGE_MONITOR_ENABLE_BEGIN, NULL) != true) + { + return false; + } + + // Wait for VRR to be 'ready'. + if (vrrWaitOnEnableStatus() != true) + { + return false; + } + + main->vrrRunEnablementStage(VRR_ENABLE_STAGE_MONITOR_ENABLE_CHALLENGE, NULL); + + // Wait for VRR to be ready. + if (vrrWaitOnEnableStatus() != true) + { + return false; + } + // Compare and enable on successful comparison. + if(main->vrrRunEnablementStage(VRR_ENABLE_STAGE_MONITOR_ENABLE_CHECK, NULL) == true) + { + this->bMonitorEnabled = true; + } + + DP_PRINTF(DP_NOTICE, "DPHAL_VRR_ENABLE> ** VRR_MON_ENABLE ends **"); + + return this->bMonitorEnabled; +} + +bool VrrEnablement::vrrEnableDriver() +{ + NvU32 enableResult; + + MainLink *main = this->parent->connector->main; + + DP_PRINTF(DP_NOTICE, "DPHAL_VRR_ENABLE> ** VRR_DRV_ENABLE starts **"); + + // Always set the enable F/W state m/c to a known state. + if(main->vrrRunEnablementStage(VRR_ENABLE_STAGE_RESET_MONITOR, NULL) != true) + { + return false; + } + + // Wait for VRR to be 'ready'. + if (vrrWaitOnEnableStatus() != true) + { + return false; + } + + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_DRIVER_ENABLE_BEGIN, &enableResult) != true) + { + return false; + } + + if (enableResult == NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING) + { + // Wait for VRR to be ready. + if (vrrWaitOnEnableStatus() != true) + { + return false; + } + } + else if (enableResult == NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_OK) + { + return true; + } + + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_DRIVER_ENABLE_CHALLENGE, NULL) != true) + { + return false; + } + + // Wait for VRR to be 'ready'. + if (vrrWaitOnEnableStatus() != true) + { + return false; + } + + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_DRIVER_ENABLE_CHECK, NULL) != true) + { + return false; + } + + DP_PRINTF(DP_NOTICE, "DPHAL_VRR_ENABLE> ** VRR_DRV_ENABLE ends **"); + + return true; +} + +bool VrrEnablement::vrrWaitOnEnableStatus(void) +{ + NvU32 timeout = VRR_ENABLE_STATUS_TIMEOUT_THRESHOLD; + NvU32 enableResult; + + MainLink *main = this->parent->connector->main; + ConnectorImpl *connector = this->parent->connector; + do + { + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_STATUS_CHECK, &enableResult) == true) + { + return true; + } + else + { + if (enableResult == NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_READ_ERROR) + { + return false; + } + else if (enableResult == NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING) + { + Timeout timeout(connector->timer, VRR_ENABLE_STATUS_TIMEOUT_INTERVAL_MS); + while(timeout.valid()); + continue; + } + else + { + return false; + } + } + }while(--timeout); + + return false; +} + +bool VrrEnablement::isMonitorEnabled(void) +{ + return (this->bMonitorEnabled); +} + +bool VrrEnablement::isDriverEnabled(void) +{ + NvU32 enableResult; + MainLink *main = this->parent->connector->main; + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_DRIVER_ENABLE_CHECK, + &enableResult) == true) + { + return true; + } + return false; +} diff --git a/src/common/displayport/src/dp_wardatabase.cpp b/src/common/displayport/src/dp_wardatabase.cpp new file mode 100644 index 0000000..216b8c4 --- /dev/null +++ b/src/common/displayport/src/dp_wardatabase.cpp @@ -0,0 +1,650 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_wardatabase.cpp * +* EDID and OUI based workarounds for panel/TCON issues * +* * +\***************************************************************************/ +#include "dp_wardatabase.h" +#include "dp_edid.h" +#include "dp_connectorimpl.h" +#include "dp_printf.h" + +using namespace DisplayPort; + +void ConnectorImpl::applyOuiWARs() +{ + switch (ouiId) + { + // Megachips Mystique + case 0xE18000: + if (((modelName[0] == 'D') && (modelName[1] == 'p') && (modelName[2] == '1') && + (modelName[3] == '.') && (modelName[4] == '1'))) + { + // + // Mystique based link box for HTC Vive has a peculiar behaviour + // of sending a link retraining pulse if the link is powered down in the absence + // of an active stream. Bug# 1793084. Set the flag so that link is not powered down. + // + bKeepOptLinkAlive = true; + } + + if (((modelName[0] == 'D') && (modelName[1] == 'p') && (modelName[2] == '1') && + (modelName[3] == '.') && (modelName[4] == '2'))) + { + // + // ASUS monitor loses link sometimes during assessing link or link training. + // So if we retrain link by lowering config from HBR2 to HBR we see black screen + // Set the flag so that we first retry link training with same link config + // before following link training fallback. Bug #1846925 + // + bNoFallbackInPostLQA = true; + } + break; + + // Synaptics + case 0x24CC90: + if ((modelName[0] == 'S') && (modelName[1] == 'Y') && (modelName[2] == 'N') && + (modelName[3] == 'A') && (((modelName[4] == 'S') && + ((modelName[5] == '1') || (modelName[5] == '2') || + (modelName[5] == '3') || (modelName[5] == '#') || + (modelName[5] == '\"')))||((modelName[4] == 0x84) && + (modelName[5] == '0')))) + { + // + // Extended latency from link-train end to FEC enable pattern + // to avoid link lost or blank screen with Synaptics branch. + // + LT2FecLatencyMs = 57; + + if (bDscMstCapBug3143315) + { + // + // Synaptics branch device doesn't support Virtual Peer Devices so DSC + // capability of downstream device should be decided based on device's own + // and its parent's DSC capability + // + bDscCapBasedOnParent = true; + } + } + break; + } +} + +void Edid::applyEdidWorkArounds(NvU32 warFlag, const DpMonitorDenylistData *pDenylistData) +{ + unsigned ManufacturerID = this->getManufId(); + unsigned ProductID = this->getProductId(); + unsigned YearWeek = this->getYearWeek(); + + // + // Work around EDID problems, using manufacturer, product ID, and date of manufacture, + // to identify each case. + // + switch (ManufacturerID) + { + // Apple + case 0x1006: + if (0x9227 == ProductID) + { + this->WARFlags.powerOnBeforeLt = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> WAR for Apple thunderbolt J29 panel"); + DP_PRINTF(DP_NOTICE, "DP-WAR> - Monitor needs to be powered up before LT. Bug 933051"); + } + break; + + // Acer + case 0x7204: + // Bug 451868: Acer AL1512 monitor has a wrong extension count: + if(0xad15 == ProductID && YearWeek <= 0x0d01) + { + // clear the extension count + buffer.data[0x7E] = 0; + this->WARFlags.extensionCountDisabled = true; + this->WARFlags.dataForced = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> Edid override on Acer AL1512"); + DP_PRINTF(DP_NOTICE, "DP-WAR> - Disabling extension count.Bug 451868"); + } + break; + + // Westinghouse + case 0x855C: + + // Westinghouse 37" 1080p TV. LVM-37w3 (Port DVI1 EDID). + // Westinghouse 42" 1080p TV. LVM-42w2 (Port DVI1 EDID). + if (ProductID == 0x3703 || ProductID == 0x4202) + { + // Claims HDMI support, but audio causes picture corruption. + // Removing HDMI extension block + + if (buffer.getLength() > 0x80 && + buffer.data[0x7E] == 1 && // extension block present + buffer.data[0x80] == 0x02 && // CEA block + buffer.data[0x81] == 0x03 && // revision 3 + !(buffer.data[0x83] & 0x40)) // No basic audio, must not be the HDMI port + { + // clear the extension count + buffer.data[0x7E] = 0; + this->WARFlags.extensionCountDisabled = true; + this->WARFlags.dataForced = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> Edid overrid on Westinghouse AL1512 LVM- <37/42> w <2/3>"); + DP_PRINTF(DP_NOTICE, "DP-WAR> - Disabling extension count."); + } + } + break; + + // IBM + case 0x4D24: + if(ProductID == 0x1A03) + { + // 2001 Week 50 + if (YearWeek == 0x0B32) + { + // Override IBM T210. IBM T210 reports 2048x1536x60Hz in the edid but it's + // actually 2048x1536x40Hz. See bug 76347. This hack was, earlier, in disp driver + // Now it's being moved down to keep all overrides in same place. + // This hack was also preventing disp driver from comparing entire edid when + // trying to figure out whether or not the edid for some device has changed. + buffer.data[0x36] = 0x32; + buffer.data[0x37] = 0x3E; + this->WARFlags.dataForced = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> Edid overrid on IBM T210"); + DP_PRINTF(DP_NOTICE, "DP-WAR> 2048x1536x60Hz(misreported) -> 2048x1536x40Hz. Bug 76347"); + } + } + break; + // GWY (Gateway) or EMA (eMachines) + case 0xF91E: // GWY + case 0xA115: // EMA + // Some Gateway monitors present the eMachines mfg code, so these two cases are combined. + // Future fixes may require the two cases to be separated. + // Fix for Bug 343870. NOTE: Problem found on G80; fix applied to all GPUs. + if ((ProductID >= 0x0776 ) && (ProductID <= 0x0779)) // Product id's range from decimal 1910 to 1913 + { + // if detailed pixel clock frequency = 106.50MHz + if ((buffer.data[0x36] == 0x9A) && + (buffer.data[0x37] == 0x29) ) + { + // then change detailed pixel clock frequency to 106.54MHz to fix bug 343870 + buffer.data[0x36] = 0x9E; + buffer.data[0x37] = 0x29; + this->WARFlags.dataForced = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> Edid overrid on GWY/EMA"); + DP_PRINTF(DP_NOTICE, "DP-WAR> 106.50MHz(misreported) -> 106.50MHz.Bug 343870"); + } + } + break; + + // INX + case 0x2C0C: + // INX L15CX monitor has an invalid detailed timing 10x311 @ 78Hz. + if( ProductID == 0x1502) + { + // remove detailed timing #4: zero out the first 3 bytes of DTD#4 block + buffer.data[0x6c] = 0x0; + buffer.data[0x6d] = 0x0; + buffer.data[0x6e] = 0x0; + this->WARFlags.dataForced = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> Edid overrid on INX L15CX"); + DP_PRINTF(DP_NOTICE, "DP-WAR> Removing invalid detailed timing 10x311 @ 78Hz"); + } + break; + + // AUO + case 0xAF06: + if ((ProductID == 0x103C) || (ProductID == 0x113C)) + { + // + // Acer have faulty AUO eDP panels which have + // wrong HBlank in the EDID. Correcting it here. + // + buffer.data[0x39] = 0x4B; // new hblank width: 75 + buffer.data[0x3F] = 0x1B; // new hsync pulse width: 27 + this->WARFlags.dataForced = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> Edid overrid on AUO eDP panel"); + DP_PRINTF(DP_NOTICE, "DP-WAR> Modifying HBlank and HSync pulse width."); + DP_PRINTF(DP_NOTICE, "DP-WAR> Bugs 907998, 1001160"); + } + else if (ProductID == 0x109B || ProductID == 0x119B) + { + this->WARFlags.useLegacyAddress = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> AUO eDP"); + DP_PRINTF(DP_NOTICE, "implements only Legacy interrupt address range"); + + // Bug 1792962 - Panel got glitch on D3 write, apply this WAR. + this->WARFlags.disableDpcdPowerOff = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> Disable DPCD Power Off"); + } + break; + + // LPL + case 0x0C32: + if (ProductID == 0x0000) + { + // + // Patch EDID for Quanta - Toshiba LG 1440x900 panel. See Bug 201428 + // Must 1st verify that we have that panel. It has MFG id 32, 0C + // BUT product ID for this (and other different LG panels) are 0000. + // So verify that the last "Custom Timing" area of the EDID has + // a "Monitor Description" of type FE = "ASCII Data String" which + // has this panel's name = "LP171WX2-A4K5". + // + if ((buffer.data[0x71] == 0x4C) && + (buffer.data[0x72] == 0x50) && + (buffer.data[0x73] == 0x31) && + (buffer.data[0x74] == 0x37) && + (buffer.data[0x75] == 0x31) && + (buffer.data[0x76] == 0x57) && + (buffer.data[0x77] == 0x58) && + (buffer.data[0x78] == 0x32) && + (buffer.data[0x79] == 0x2D) && + (buffer.data[0x7A] == 0x41) && + (buffer.data[0x7B] == 0x34) && + (buffer.data[0x7C] == 0x4B) && + (buffer.data[0x7D] == 0x35) ) + { + // + // Was 0x95, 0x25 = -> 0x2595 = 9621 or 96.21 Mhz. + // 96,210,000 / 1760 / 912 = 59.939 Hz + // Want 60 * 1760 * 912 ~= 9631 or 96.31 MHz + // 9631 = 0x259F -> 0x9F 0x25. + // So, change byte 36 from 0x95 to 0x9F. + // + buffer.data[0x36] = 0x9F; + this->WARFlags.dataForced = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> Edid overrid on Quanta - Toshiba LG 1440x900"); + DP_PRINTF(DP_NOTICE, "DP-WAR> Correcting pclk. Bug 201428"); + } + } + else + if (ProductID == 0xE300) + { + // + // Patch EDID for MSI - LG LPL 1280x800 panel. See Bug 359313 + // Must 1st verify that we have that panel. It has MFG id 32, 0C + // BUT product ID for this (and other different LG panels) are E300. + // So verify that the last "Custom Timing" area of the EDID has + // a "Monitor Description" of type FE = "ASCII Data String" which + // has this panel's name = "LP154WX4-TLC3". + // + if ((buffer.data[0x71] == 0x4C) && + (buffer.data[0x72] == 0x50) && + (buffer.data[0x73] == 0x31) && + (buffer.data[0x74] == 0x35) && + (buffer.data[0x75] == 0x34) && + (buffer.data[0x76] == 0x57) && + (buffer.data[0x77] == 0x58) && + (buffer.data[0x78] == 0x34) && + (buffer.data[0x79] == 0x2D) && + (buffer.data[0x7A] == 0x54) && + (buffer.data[0x7B] == 0x4C) && + (buffer.data[0x7C] == 0x43) && + (buffer.data[0x7D] == 0x33) ) + { + // + // Was 0xBC, 0x1B = -> 0x1BBC = 7100 or 71.00 Mhz. + // 71,000,000 / 1488 / 826 = 59.939 Hz + // Want 60 * 1488 * 826 ~= 7111 or 71.11 MHz + // 7111 = 0x1BC7 -> 0xC7 0x1B. + // So, change byte 36 from 0xBC to 0xC7. + // + buffer.data[0x36] = 0xC7; + this->WARFlags.dataForced = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> Edid overrid on MSI - LG LPL 1280x800"); + DP_PRINTF(DP_NOTICE, "DP-WAR> Correcting pclk. Bug 359313"); + } + } + break; + + // SKY + case 0x794D: + if (ProductID == 0x9880) + { + // + // Override for Haier TV to remove resolution + // 1366x768 from EDID data. Refer bug 351680 & 327891 + // Overriding 18 bytes from offset 0x36. + // + buffer.data[0x36] = 0x01; + buffer.data[0x37] = 0x1D; + buffer.data[0x38] = 0x00; + buffer.data[0x39] = 0x72; + buffer.data[0x3A] = 0x51; + buffer.data[0x3B] = 0xD0; + buffer.data[0x3C] = 0x1E; + buffer.data[0x3D] = 0x20; + buffer.data[0x3E] = 0x6E; + buffer.data[0x3F] = 0x28; + buffer.data[0x40] = 0x55; + buffer.data[0x41] = 0x00; + buffer.data[0x42] = 0xC4; + buffer.data[0x43] = 0x8E; + buffer.data[0x44] = 0x21; + buffer.data[0x45] = 0x00; + buffer.data[0x46] = 0x00; + buffer.data[0x47] = 0x1E; + + this->WARFlags.dataForced = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> Edid overrid on Haier TV."); + DP_PRINTF(DP_NOTICE, "DP-WAR> Removing 1366x768. bug 351680 & 327891"); + + } + break; + // HP + case 0xF022: + switch (ProductID) + { + case 0x192F: + // + // WAR for bug 1643712 - Issue specific to HP Z1 G2 (Zeus) All-In-One + // Putting the Rx in power save mode before BL_EN is deasserted, makes this specific sink unhappy + // Bug 1559465 will address the right power down sequence. We need to revisit this WAR once Bug 1559465 is fixed. + // + this->WARFlags.disableDpcdPowerOff = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> Disable DPCD Power Off"); + DP_PRINTF(DP_NOTICE, "DP-WAR> HP Z1 G2 (Zeus) AIO Bug 1643712"); + break; + } + break; + + // Sharp + case 0x104d: + switch (ProductID) + { + case 0x141c: // HP Valor QHD+ N15P-Q3 Sharp EDP + // + // HP Valor QHD+ N15P-Q3 EDP needs 50 ms delay + // after D3 to avoid black screen issues. + // + this->WARFlags.delayAfterD3 = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> HP Valor QHD+ N15P-Q3 Sharp EDP needs 50 ms after D3"); + DP_PRINTF(DP_NOTICE, "DP-WAR> bug 1520011"); + break; + + //Sharp EDPs that declares DP1.2 but doesn't implement ESI address space + case 0x1414: + case 0x1430: + case 0x1445: + case 0x1446: + case 0x144C: + case 0x1450: + case 0x1467: + case 0x145e: + // + // Use Legacy address space for DP1.2 panel + // + this->WARFlags.useLegacyAddress = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> Sharp EDP implements only Legacy interrupt address range"); + break; + + case 0x143B: + // + // Bug 200113041 + // Need to be unique to identify this Sharp panel. Besides + // manufacturer ID and ProductID, we have to add the mode + // name to make this happen as LQ156D1JW05 in ASCII. + // + if ((buffer.data[0x71] == 0x4C) && + (buffer.data[0x72] == 0x51) && + (buffer.data[0x73] == 0x31) && + (buffer.data[0x74] == 0x35) && + (buffer.data[0x75] == 0x36) && + (buffer.data[0x76] == 0x44) && + (buffer.data[0x77] == 0x31) && + (buffer.data[0x78] == 0x4A) && + (buffer.data[0x79] == 0x57) && + (buffer.data[0x7A] == 0x30) && + (buffer.data[0x7B] == 0x35) && + (buffer.data[0x7C] == 0x0A) && + (buffer.data[0x7D] == 0x20)) + { + this->WARFlags.useLegacyAddress = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> Sharp EDP implements only Legacy interrupt address range"); + } + break; + } + break; + + // EIZO + case 0xc315: + if (ProductID == 0x2227) + { + // + // The EIZO FlexScan SX2762W generates a redundant long HPD + // pulse after a modeset, which triggers another modeset on GPUs + // without flush mode, triggering an infinite link training + // loop. + // + this->WARFlags.ignoreRedundantHotplug = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> EIZO FlexScan SX2762W generates redundant"); + DP_PRINTF(DP_NOTICE, "DP-WAR> hotplugs (bug 1048796)"); + break; + } + break; + + // MEI-Panasonic + case 0xa934: + if (ProductID == 0x96a2) + { + // + // Bug 200113041 + // Need to be unique to identify this MEI-Panasonic panel. + // Besides manufacturer ID and ProductID, we have to add the + // model name to make this happen as VVX17P051J00^ in ASCII. + // + if ((buffer.data[0x71] == 0x56) && + (buffer.data[0x72] == 0x56) && + (buffer.data[0x73] == 0x58) && + (buffer.data[0x74] == 0x31) && + (buffer.data[0x75] == 0x37) && + (buffer.data[0x76] == 0x50) && + (buffer.data[0x77] == 0x30) && + (buffer.data[0x78] == 0x35) && + (buffer.data[0x79] == 0x31) && + (buffer.data[0x7A] == 0x4A) && + (buffer.data[0x7B] == 0x30) && + (buffer.data[0x7C] == 0x30) && + (buffer.data[0x7D] == 0x0A)) + { + this->WARFlags.useLegacyAddress = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> MEI-Panasonic EDP"); + DP_PRINTF(DP_NOTICE, "implements only Legacy interrupt address range"); + } + } + break; + + // LG + case 0xE430: + if (ProductID == 0x0469) + { + // + // The LG display can't be driven at FHD with 2*RBR. + // Force max link config + // + this->WARFlags.forceMaxLinkConfig = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> Force maximum link config WAR required on LG panel."); + DP_PRINTF(DP_NOTICE, "DP-WAR> bug 1649626"); + break; + } + break; + case 0x8F34: + if (ProductID == 0xAA55) + { + this->WARFlags.forceMaxLinkConfig = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> Force maximum link config WAR required on Sharp-CerebrEx panel."); + } + break; + + // CMN + case 0xAE0D: + if (ProductID == 0x1747) + { + this->WARFlags.useLegacyAddress = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> CMN eDP"); + DP_PRINTF(DP_NOTICE, "implements only Legacy interrupt address range"); + } + break; + + // BenQ + case 0xD109: + if ((ProductID == 0x7F2B) || (ProductID == 0x7F2F)) + { + this->WARFlags.ignoreRedundantHotplug = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> BenQ GSync power on/off redundant hotplug"); + } + break; + + // MSI + case 0x834C: + if (ProductID == 0x4C48) + { + this->WARFlags.useLegacyAddress = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> MSI eDP\n"); + DP_PRINTF(DP_NOTICE, "implements only Legacy interrupt address range\n"); + } + break; + + // Unigraf + case 0xC754: + case 0x1863: + { + DP_PRINTF(DP_NOTICE, "DP-WAR> Unigraf device, keep link alive during detection\n"); + this->WARFlags.keepLinkAlive = true; + } + break; + + // BOE + case 0xE509: + if ((ProductID == 0x977) || (ProductID == 0x974) || (ProductID == 0x9D9)) + { + this->WARFlags.bIgnoreDscCap = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> BOE panels incorrectly exposing DSC capability. Ignoring it."); + } + break; + + // NCP + case 0x7038: + if (ProductID == 0x005F) + { + this->WARFlags.bIgnoreDscCap = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> NCP panels incorrectly exposing DSC capability. Ignoring it."); + } + break; + + // + // This panel advertise DSC capabilities, but panel doesn't support DSC + // So ignoring DSC capability on this panel + // + case 0x6F0E: + if (ProductID == 0x1609) + { + this->WARFlags.bIgnoreDscCap = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> Ignoring DSC capability on Lenovo CSOT 1609 Panel."); + DP_PRINTF(DP_NOTICE, "DP-WAR> Bug 3444252"); + } + break; + + // Asus + case 0x6D1E: + if (ProductID == 0x7707) + { + this->WARFlags.bIgnoreDscCap = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> Panel incorrectly exposing DSC capability. Ignoring it."); + DP_PRINTF(DP_NOTICE, "DP-WAR> Bug 3543158"); + } + else if (ProductID == 0x5B9A) + { + this->WARFlags.bDisableDscMaxBppLimit = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> Disable DSC max BPP limit of 16 for DSC."); + } + else if (ProductID == 0x5CA7 || ProductID == 0x9E9D || ProductID == 0x9EA0 || ProductID == 0x9EA5 || ProductID == 0x5CB7 || + ProductID == 0x9EA8 || ProductID == 0x9EAF || ProductID == 0x7846 || ProductID == 0x7849 || ProductID == 0x5CB5) + { + this->WARFlags.bForceHeadShutdownOnModeTransition = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> Force head shutdown on Mode transition."); + } + break; + default: + break; + } + + // Find out if the monitor needs a WAR to applied. + if (warFlag) + { + if (warFlag & DP_MONITOR_CAPABILITY_DP_SKIP_REDUNDANT_LT) + { + this->WARFlags.skipRedundantLt = true; + } + + if (warFlag & DP_MONITOR_CAPABILITY_DP_SKIP_CABLE_BW_CHECK) + { + this->WARFlags.skipCableBWCheck = true; + this->WARData.maxLaneAtHighRate = pDenylistData->dpSkipCheckLink.maxLaneAtHighRate; + this->WARData.maxLaneAtLowRate = pDenylistData->dpSkipCheckLink.maxLaneAtLowRate; + } + + if (warFlag & DP_MONITOR_CAPABILITY_DP_WRITE_0x600_BEFORE_LT) + { + // all HP monitors need to be powered up before link training + this->WARFlags.powerOnBeforeLt = true; + DP_PRINTF(DP_NOTICE, "DP-WAR> HP monitors need to be powered up before LT"); + } + + if (warFlag & DP_MONITOR_CAPABILITY_DP_OVERRIDE_OPTIMAL_LINK_CONFIG) + { + // + // Instead of calculating the optimum link config + // based on timing, bpc etc. just used a default + // fixed link config for the monitor for all modes + // + this->WARFlags.overrideOptimalLinkCfg = true; + // Force the fix max LT + this->WARFlags.forceMaxLinkConfig = true; + this->WARData.optimalLinkRate = pDenylistData->dpOverrideOptimalLinkConfig.linkRate; + this->WARData.optimalLaneCount = pDenylistData->dpOverrideOptimalLinkConfig.laneCount; + DP_PRINTF(DP_NOTICE, "DP-WAR> Overriding optimal link config on Dell U2410."); + DP_PRINTF(DP_NOTICE, "DP-WAR> bug 632801"); + } + + if (warFlag & DP_MONITOR_CAPABILITY_DP_OVERRIDE_MAX_LANE_COUNT) + { + // + // Some monitors claim more lanes than they actually support. + // This particular Lenovo monitos has just 2 lanes, but its DPCD says 4. + // This WAR is to override the max lane count read from DPCD. + // + this->WARFlags.overrideMaxLaneCount = true; + this->WARData.maxLaneCount = pDenylistData->dpMaxLaneCountOverride; + DP_PRINTF(DP_NOTICE, "DP-WAR> Overriding max lane count on Lenovo L2440x."); + DP_PRINTF(DP_NOTICE, "DP-WAR> bug 687952"); + } + } + + if (this->WARFlags.dataForced) + { + DP_PRINTF(DP_NOTICE, "DP-WAR> EDID was overridden for some data. Patching CRC."); + this->patchCrc(); + } +} diff --git a/src/common/displayport/src/dp_watermark.cpp b/src/common/displayport/src/dp_watermark.cpp new file mode 100644 index 0000000..f4a970f --- /dev/null +++ b/src/common/displayport/src/dp_watermark.cpp @@ -0,0 +1,879 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_watermark.cpp * +* DP watermark IsModePossible calculations * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_watermark.h" +#include "dp_linkconfig.h" +#include "dp_printf.h" +#include "displayport.h" + +#define FEC_TOTAL_SYMBOLS_PER_BLK(lanes) ((NvU32)((lanes == 1) ? 512U : 256U)) +#define FEC_PARITY_SYMBOLS_PER_BLK(lanes) ((NvU32)((lanes == 1) ? 12U : 6U)) +//return max number of FEC parity symbols in x link clock cycles +#define FEC_PARITY_SYM_SST(lanes, x) (DP_MIN((NvU32)(x) % FEC_TOTAL_SYMBOLS_PER_BLK(lanes), FEC_PARITY_SYMBOLS_PER_BLK(lanes)) + (NvU32)(x) / FEC_TOTAL_SYMBOLS_PER_BLK(lanes) * FEC_PARITY_SYMBOLS_PER_BLK(lanes) + FEC_PARITY_SYMBOLS_PER_BLK(lanes) + 1U) +#define FEC_PARITY_SYM_MST(lanes, x) (DP_MIN((NvU32)(x) % FEC_TOTAL_SYMBOLS_PER_BLK(lanes), FEC_PARITY_SYMBOLS_PER_BLK(lanes)) + (NvU32)(x) / FEC_TOTAL_SYMBOLS_PER_BLK(lanes) * FEC_PARITY_SYMBOLS_PER_BLK(lanes) + 1U) + + +bool DisplayPort::isModePossibleMST +( + const LinkConfiguration & linkConfig, + const ModesetInfo & modesetInfo, + Watermark * dpInfo +) +{ + // For MST, use downspread 0.6% + NvU64 linkFreq; + linkFreq = LINK_RATE_TO_DATA_RATE_8B_10B(linkConfig.peakRate) * 994 / 1000; + + // This function is for multistream only! + DP_ASSERT(linkConfig.multistream); + + if(!modesetInfo.pixelClockHz || !modesetInfo.depth) + { + DP_ASSERT(0 && "INVALID PIXEL CLOCK and DEPTH sent by the client "); + return false; + } + + // depth is multiplied by 16 in case of DSC enable + unsigned DSC_FACTOR = modesetInfo.bEnableDsc ? 16 : 1; + + // Extra bits that we need to send + //(hActiveDiv4Remainder > 0 ? (4- hActiveDiv4Remainder) : 0) --> + // Number of extra pixels that we need to insert due to mapping pixels + // to the DP lanes. (4 lanes for MS) + // + // 160 --> Extra bits that we need to send during horizontal blanking + // (BS+VBID+MVID+MAUD+BE) => 5*8*num_lanes + // + // 6 * 4 --> Pixel padding worst case + // + NvU32 minHBlank = ( ((modesetInfo.surfaceWidth % 4) > 0) ? ((4-(modesetInfo.surfaceWidth % 4)) * modesetInfo.depth)/ DSC_FACTOR : 0 ) + (160 + 6 * 4); + + // Rounding to nearest multiple of 32 since we always send 32 bits in one time slice + minHBlank = minHBlank + (32 - minHBlank % 32); + + // bpp - 1 --> Rounding + minHBlank = ((minHBlank * DSC_FACTOR) + modesetInfo.depth - (1 * DSC_FACTOR))/modesetInfo.depth; + + if (minHBlank > modesetInfo.rasterWidth - modesetInfo.surfaceWidth) + { + DP_PRINTF(DP_ERROR, "ERROR: Blanking Width is smaller than minimum permissible value."); + return false; + } + + // Bug 702290 - Active Width should be greater than 60 + if (modesetInfo.surfaceWidth <= 60) + { + DP_PRINTF(DP_ERROR, "ERROR: Minimum Horizontal Active Width <= 60 not supported."); + return false; + } + + NvS32 vblank_symbols; + NvS32 hblank_symbols = (NvS32)(((NvU64)(modesetInfo.rasterWidth - modesetInfo.surfaceWidth - minHBlank) * linkFreq) / modesetInfo.pixelClockHz); + + //reduce HBlank Symbols to account for secondary data packet + hblank_symbols -= 1; //Stuffer latency to send BS + hblank_symbols -= 3; //SPKT latency to send data to stuffer + + hblank_symbols -= linkConfig.lanes == 1 ? 9 : linkConfig.lanes == 2 ? 6 : 3; + + dpInfo->hBlankSym = (hblank_symbols < 0) ? 0 : hblank_symbols; + + + // + // Audio IMP calculations + // Perform the related audio calculation to determine the number of extra symbols needed. + // + NvU32 twoChannelAudio_symbols = 0; + + if (modesetInfo.twoChannelAudioHz != 0) + { + // 1-2 channel case + NvU32 samples = (NvU32)divide_ceil(modesetInfo.twoChannelAudioHz * modesetInfo.rasterWidth, modesetInfo.pixelClockHz); + + // Round to the next even sample to account for stuffing (2 ch, 4 lanes) + samples = samples + (2 - samples % 2); + + // Convert sample count to symbols + twoChannelAudio_symbols = 10 * samples + 16; + } + + NvU32 eightChannelAudio_symbols = 0; + if (modesetInfo.eightChannelAudioHz != 0) + { + // 3-8 channel case + NvU32 samples = (NvU32)divide_ceil(modesetInfo.eightChannelAudioHz * modesetInfo.rasterWidth, modesetInfo.pixelClockHz); + + // Convert sample count to symbols + eightChannelAudio_symbols = 40 * samples + 16; + } + + if (dpInfo->hBlankSym < DP_MAX(twoChannelAudio_symbols, eightChannelAudio_symbols)) + { + return false; + } + + // Refer to dev_disp.ref for more information. + // # symbols/vblank = ((SetRasterBlankEnd.X + SetRasterSize.Width - SetRasterBlankStart.X - 40) * link_clk / pclk) - Y - 1; + // where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39 + if (modesetInfo.surfaceWidth < 40) + { + vblank_symbols = 0; + } + else + { + vblank_symbols = (NvS32)(((NvU64)(modesetInfo.surfaceWidth - 40) * linkFreq) / modesetInfo.pixelClockHz) - 1; + + vblank_symbols -= linkConfig.lanes == 1 ? 39 : linkConfig.lanes == 2 ? 21 : 12; + } + + dpInfo->vBlankSym = (vblank_symbols < 0) ? 0 : vblank_symbols; + + return true; +} + + +bool DisplayPort::isModePossibleSST +( + const LinkConfiguration & linkConfig, + const ModesetInfo & modesetInfo, + Watermark * dpInfo, + bool bUseIncreasedWatermarkLimits +) +{ + NvU64 laneDataRate; + laneDataRate = linkConfig.convertMinRateToDataRate(); + + + // This function is for single stream only! + DP_ASSERT(!linkConfig.multistream); + + unsigned watermarkAdjust = DP_CONFIG_WATERMARK_ADJUST; + unsigned watermarkMinimum = DP_CONFIG_WATERMARK_LIMIT; + // depth is multiplied by 16 in case of DSC enable + unsigned DSC_FACTOR = modesetInfo.bEnableDsc ? 16 : 1; + + if(bUseIncreasedWatermarkLimits) + { + watermarkAdjust = DP_CONFIG_INCREASED_WATERMARK_ADJUST; + watermarkMinimum = DP_CONFIG_INCREASED_WATERMARK_LIMIT; + } + + if(!modesetInfo.pixelClockHz || !modesetInfo.depth) + { + DP_ASSERT(0 && "INVALID PIXEL CLOCK or DEPTH sent by the client "); + return false; + } + // number of link clocks per line. + int vblank_symbols = 0; + NvU64 PrecisionFactor, ratioF, watermarkF; + + NvU32 numLanesPerLink = linkConfig.lanes; + + DP_ASSERT(!linkConfig.multistream && "MST!"); + + // Check if we have a valid laneCount as currently we support only up to 4-lanes + if (!IS_VALID_LANECOUNT(linkConfig.lanes)) + { + // + // Print debug message and Assert. All calculations assume a max of 8 lanes + // & any increase in lanes should cause these calculation to be updated + // + DP_PRINTF(DP_ERROR, "ERROR: LaneCount - %d is not supported for waterMark calculations.", linkConfig.lanes); + DP_PRINTF(DP_ERROR, "Current support is only up to 4-Lanes & any change/increase in supported lanes " + "should be reflected in waterMark calculations algorithm. " + "Ex: See calc for minHBlank variable below"); + + DP_ASSERT(0); + return false; + } + + if ((modesetInfo.pixelClockHz * modesetInfo.depth) >= (8 * laneDataRate * linkConfig.lanes * DSC_FACTOR)) + { + return false; + } + + // + // For DSC, if (pclk * bpp) < (1/64 * orclk * 8 * lanes) then some TU may end up with + // 0 active symbols. This may cause HW hang. Bug 200379426 + // + if ((modesetInfo.bEnableDsc) && + ((modesetInfo.pixelClockHz * modesetInfo.depth) < ((8 * laneDataRate * linkConfig.lanes * DSC_FACTOR) / 64))) + { + return false; + } + + // + // Perform the SST calculation. + // For auto mode the watermark calculation does not need to track accumulated error the + // formulas for manual mode will not work. So below calculation was extracted from the DTB. + // + dpInfo->tuSize = 64; + PrecisionFactor = 100000; + ratioF = ((NvU64)modesetInfo.pixelClockHz * modesetInfo.depth * PrecisionFactor) / DSC_FACTOR; + + ratioF /= 8 * (NvU64) laneDataRate * linkConfig.lanes; + + if (PrecisionFactor < ratioF) // Assert if we will end up with a negative number in below + return false; + + watermarkF = ratioF * dpInfo->tuSize * (PrecisionFactor - ratioF) / PrecisionFactor; + dpInfo->waterMark = (unsigned)(watermarkAdjust + ((2 * (modesetInfo.depth * PrecisionFactor / (8 * numLanesPerLink * DSC_FACTOR)) + watermarkF) / PrecisionFactor)); + + // Bounds check the watermark + NvU32 numSymbolsPerLine = (modesetInfo.surfaceWidth * modesetInfo.depth) / (8 * linkConfig.lanes * DSC_FACTOR); + + if (dpInfo->waterMark > 39 || dpInfo->waterMark > numSymbolsPerLine) + { + DP_PRINTF(DP_ERROR, "ERROR: watermark should not be greater than 39."); + return false; + } + + // Clamp the low side + if (dpInfo->waterMark < watermarkMinimum) + dpInfo->waterMark = watermarkMinimum; + + //Bits to send BS/BE/Extra symbols due to pixel padding + //Also accounts for enhanced framing. + NvU32 BlankingBits = 3*8*numLanesPerLink + (linkConfig.enhancedFraming ? 3*8*numLanesPerLink : 0); + + //VBID/MVID/MAUD sent 4 times all the time + BlankingBits += 3*8*4; + + NvU32 surfaceWidthPerLink = modesetInfo.surfaceWidth; + + //Extra bits sent due to pixel steering + NvU32 PixelSteeringBits = (surfaceWidthPerLink % numLanesPerLink) ? (((numLanesPerLink - surfaceWidthPerLink % numLanesPerLink) * modesetInfo.depth) / DSC_FACTOR) : 0; + + BlankingBits += PixelSteeringBits; + NvU64 NumBlankingLinkClocks = (NvU64)BlankingBits * PrecisionFactor / (8 * numLanesPerLink); + NvU32 MinHBlank = (NvU32)(NumBlankingLinkClocks * modesetInfo.pixelClockHz/ laneDataRate / PrecisionFactor); + MinHBlank += 12; + + if (MinHBlank > modesetInfo.rasterWidth - modesetInfo.surfaceWidth) + { + DP_PRINTF(DP_ERROR, "ERROR: Blanking Width is smaller than minimum permissible value."); + return false; + } + + // Bug 702290 - Active Width should be greater than 60 + if (modesetInfo.surfaceWidth <= 60) + { + DP_PRINTF(DP_ERROR, "ERROR: Minimum Horizontal Active Width <= 60 not supported."); + return false; + } + + + NvS32 hblank_symbols = (NvS32)(((NvU64)(modesetInfo.rasterWidth - modesetInfo.surfaceWidth - MinHBlank) * laneDataRate) / modesetInfo.pixelClockHz); + + //reduce HBlank Symbols to account for secondary data packet + hblank_symbols -= 1; //Stuffer latency to send BS + hblank_symbols -= 3; //SPKT latency to send data to stuffer + + hblank_symbols -= numLanesPerLink == 1 ? 9 : numLanesPerLink == 2 ? 6 : 3; + + dpInfo->hBlankSym = (hblank_symbols < 0) ? 0 : hblank_symbols; + + // + // Audio IMP calculations + // + + // From dev_disp.ref: + + // The packet generation logic needs to know the length of the hblank period. If there is no room + // in the current hblank for a new packet, it will be delayed until the next blanking period. This + // field should be programmed during the second Supervisor interrupt based on the new raster + // dimensions. + + // ... + + // -------------------------------------- + // The following formulas can be used to calculate the maximum audio sampling rate that can + // be supported by DisplayPort given the current raster dimensions. DisplayPort has much more + // bandwidth during blanking periods than HDMI has, so hblank size is less of an issue. + + // ... + + // Size of a packet for 2ch audio = 20 symbols (up to 2 samples) + // Size of a packet for 8ch audio = 40 symbols + // Size of an audio packet header plus control symbols = 2*#lanes + 8 symbols (assuming < 32 samples per line) + // number of packets/hblank for 2ch audio = Floor ((number of free symbols/hblank - (2*#lanes + 8) / 20) + // number of packets/hblank for 8ch audio = Floor ((number of free symbols/hblank - (2*#lanes + 8) / 40) + + // Maximum audio sample rate possible: + // number of audio samples/line = SetRasterSize.Width * audio_fs / pclk + // number of audio packets needed for 2ch audio = Ceiling(SetRasterSize.Width * audio_fs / (pclk*2)) + // number of audio packets needed for 3-8ch audio = SetRasterSize.Width * audio_fs / pclk + + // If number of audio packets needed > number of packets/hblank, then you cannot support that audio frequency + + // Note that the hBlankSym calculated is per lane. So the number of symbols available for audio is + // (number of lanes * hBlankSym). + // The calculation of audio packets per Hblank needs to account for the following - + // 2 symbols for SS and SE; 8 symbols for header; and additional 2 symbols to account for actual values used by HW. + // -------------------------------------- + + if (modesetInfo.twoChannelAudioHz != 0) + { + if ((dpInfo->hBlankSym * numLanesPerLink) < (2 * numLanesPerLink + 8)) + { + // There aren't enough symbols/hblank available. + return false; + } + + NvU32 twoChannelAudioPacketsPerHBlank = (NvU32)divide_floor(((dpInfo->hBlankSym * numLanesPerLink) - (2 * numLanesPerLink) - 8 - (2 * numLanesPerLink)), 20); + + NvU32 twoChannelAudioPackets = (NvU32)divide_ceil(modesetInfo.twoChannelAudioHz * modesetInfo.rasterWidth, modesetInfo.pixelClockHz * 2); + + if (twoChannelAudioPackets > twoChannelAudioPacketsPerHBlank) + { + // There aren't enough packets/hblank available. + return false; + } + } + + if (modesetInfo.eightChannelAudioHz != 0) + { + if ((dpInfo->hBlankSym * numLanesPerLink) < (2 * numLanesPerLink + 8)) + { + // There aren't enough symbols/hblank available. + return false; + } + + NvU32 eightChannelAudioPacketsPerHBlank = (NvU32)divide_floor(((dpInfo->hBlankSym * numLanesPerLink) - (2 * numLanesPerLink) - 8 - (2 * numLanesPerLink)), 40); + + NvU32 eightChannelAudioPackets = (NvU32)divide_ceil(modesetInfo.eightChannelAudioHz * modesetInfo.rasterWidth, modesetInfo.pixelClockHz); + + if (eightChannelAudioPackets > eightChannelAudioPacketsPerHBlank) + { + // There aren't enough packets/hblank available. + return false; + } + } + + + // Refer to dev_disp.ref for more information. + // # symbols/vblank = ((SetRasterBlankEnd.X + SetRasterSize.Width - SetRasterBlankStart.X - 40) * link_clk / pclk) - Y - 1; + // where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39 + if (modesetInfo.surfaceWidth < 40) + { + vblank_symbols = 0; + } + else + { + vblank_symbols = (NvS32)(((NvU64)(modesetInfo.surfaceWidth - 40) * laneDataRate) / modesetInfo.pixelClockHz) - 1; + + vblank_symbols -= numLanesPerLink == 1 ? 39 : numLanesPerLink == 2 ? 21 : 12; + } + + dpInfo->vBlankSym = (vblank_symbols < 0) ? 0 : vblank_symbols; + + return true; +} + +bool DisplayPort::isModePossibleSSTWithFEC +( + const LinkConfiguration & linkConfig, + const ModesetInfo & modesetInfo, + Watermark * dpInfo, + bool bUseIncreasedWatermarkLimits +) +{ + // + // This function is for single stream only! + // Refer to Bug 200406501 and 200401850 for algorithm + // + DP_ASSERT(!linkConfig.multistream); + + unsigned watermarkAdjust = DP_CONFIG_WATERMARK_ADJUST; + unsigned watermarkMinimum = DP_CONFIG_WATERMARK_LIMIT; + // depth is multiplied by 16 in case of DSC enable + unsigned DSC_FACTOR = modesetInfo.bEnableDsc ? 16 : 1; + + if(bUseIncreasedWatermarkLimits) + { + watermarkAdjust = DP_CONFIG_INCREASED_WATERMARK_ADJUST; + watermarkMinimum = DP_CONFIG_INCREASED_WATERMARK_LIMIT; + } + + NvU64 laneDataRate = linkConfig.convertMinRateToDataRate(); + + if(!modesetInfo.pixelClockHz || !modesetInfo.depth) + { + DP_ASSERT(0 && "INVALID PIXEL CLOCK or DEPTH sent by the client "); + return false; + } + // number of link clocks per line. + int vblank_symbols = 0; + NvU64 PrecisionFactor, ratioF, watermarkF; + NvS32 w0, s; + + NvU32 numLanesPerLink = linkConfig.lanes; + + DP_ASSERT(!linkConfig.multistream && "MST!"); + + // Check if we have a valid laneCount as currently we support only up to 4-lanes + if (!IS_VALID_LANECOUNT(linkConfig.lanes)) + { + // + // Print debug message and Assert. All calculations assume a max of 8 lanes + // & any increase in lanes should cause these calculation to be updated + // + DP_PRINTF(DP_ERROR, "ERROR: LaneCount - %d is not supported for waterMark calculations.", linkConfig.lanes); + DP_PRINTF(DP_ERROR, "Current support is only up to 4-Lanes & any change/increase in supported lanes " + "should be reflected in waterMark calculations algorithm. " + "Ex: See calc for minHBlank variable below"); + + DP_ASSERT(0); + return false; + } + + if ((modesetInfo.pixelClockHz * modesetInfo.depth) >= (8 * laneDataRate * linkConfig.lanes * DSC_FACTOR)) + { + return false; + } + + // + // For DSC, if (pclk * bpp) < (1/64 * orclk * 8 * lanes) then some TU may end up with + // 0 active symbols. This may cause HW hang. Bug 200379426 + // + if ((modesetInfo.bEnableDsc) && + ((modesetInfo.pixelClockHz * modesetInfo.depth) < ((8 * laneDataRate * linkConfig.lanes * DSC_FACTOR) / 64))) + { + return false; + } + + // + // Perform the SST calculation. + // For auto mode the watermark calculation does not need to track accumulated error the + // formulas for manual mode will not work. So below calculation was extracted from the DTB. + // + dpInfo->tuSize = 64; + PrecisionFactor = 100000; + ratioF = ((NvU64)modesetInfo.pixelClockHz * modesetInfo.depth * PrecisionFactor) / DSC_FACTOR; + + ratioF /= 8 * (NvU64)laneDataRate * linkConfig.lanes; + + if (PrecisionFactor < ratioF) // Assert if we will end up with a negative number in below + return false; + + watermarkF = (ratioF * dpInfo->tuSize * (PrecisionFactor - ratioF)) / PrecisionFactor; + + w0 = (8 / linkConfig.lanes); + if (linkConfig.bEnableFEC) + { + s = (linkConfig.lanes == 1) ? 15 : 10; + } + else + { + s = 3 - w0; + } + + dpInfo->waterMark = (unsigned)(watermarkAdjust + ((3 * (modesetInfo.depth * PrecisionFactor / (8 * numLanesPerLink * DSC_FACTOR)) + watermarkF) / PrecisionFactor) + w0 + 3); + + s = ((NvS32)ratioF * s); + + dpInfo->waterMark = (unsigned)((NvS32)dpInfo->waterMark + (s / (NvS32)PrecisionFactor)); + + // + // Bounds check the watermark + // + NvU32 numSymbolsPerLine = (modesetInfo.surfaceWidth * modesetInfo.depth) / (8 * linkConfig.lanes * DSC_FACTOR); + + if (dpInfo->waterMark > numSymbolsPerLine) + { + DP_PRINTF(DP_ERROR, "ERROR: watermark = %d should not be greater than numSymbolsPerLine = %d.", dpInfo->waterMark, numSymbolsPerLine); + return false; + } + + // + // Clamp the low side + // + if (dpInfo->waterMark < watermarkMinimum) + dpInfo->waterMark = watermarkMinimum; + + unsigned MinHBlank = 0; + unsigned MinHBlankFEC = 0; + NvU32 BlankingBits = 0; + NvU32 BlankingSymbolsPerLane = 0; + + BlankingBits = (3U * 8U * 4U) + (2U * 8U * numLanesPerLink); + + if (modesetInfo.bEnableDsc) + { + NvU32 sliceCount, sliceWidth, chunkSize; + + sliceCount = (modesetInfo.mode == DSC_DUAL) ? 8U : 4U; + sliceWidth = (NvU32)divide_ceil(modesetInfo.surfaceWidth, sliceCount); + chunkSize = (NvU32)divide_ceil(modesetInfo.depth * sliceWidth, 8U * DSC_FACTOR); + + if(((NvU64)(chunkSize + 1U) * sliceCount * modesetInfo.pixelClockHz) < (NvU64)(laneDataRate * numLanesPerLink * modesetInfo.surfaceWidth)) + { + // BW is plenty, this is common case. + //EOC symbols, when BW enough, only last EOC needs to be considered. + BlankingBits += 8U * numLanesPerLink; //+BlankingBits_DSC_EOC + BlankingBits += (chunkSize * 8U) - (sliceWidth * modesetInfo.depth / DSC_FACTOR); //+BlankingBits_DSC_bytePadding, only need to consider last slice + BlankingBits += (NvU32)(sliceCount * 8U * (divide_ceil(chunkSize, numLanesPerLink) * numLanesPerLink - chunkSize)); //+BlankingBits_DSC_lane_padding + } + else + { // no extra room in link BW + //EOC symbols, EOC will be accumulated until hblank period. + BlankingBits += (sliceCount * 8U * numLanesPerLink); //+BlankingBits_EOC + //padding, can also use simplified but pessimistic version : BlankingBits += SliceNum * (logic_lanes *8-1); + BlankingBits += (NvU32)(sliceCount * ((divide_ceil(chunkSize, numLanesPerLink) * numLanesPerLink * 8U) - (NvU32)(sliceWidth * modesetInfo.depth / DSC_FACTOR))); //+BlankingBits_DSC_padding + } + } + else + { + NvU32 surfaceWidthPerLink = modesetInfo.surfaceWidth; + NvU32 surfaceWidthPerLane = (NvU32)divide_ceil(surfaceWidthPerLink, numLanesPerLink); + + // Padding + BlankingBits += (NvU32)divide_ceil(surfaceWidthPerLane * modesetInfo.depth, 8U) * 8U * numLanesPerLink - (NvU32)(surfaceWidthPerLink * modesetInfo.depth); //+BlankingBits_nonDSC_padding + } + + BlankingSymbolsPerLane = (NvU32)divide_ceil(BlankingBits , (8U * numLanesPerLink)); //in symbols per lane + BlankingSymbolsPerLane += (linkConfig.enhancedFraming ? 3U : 0U); + + if (linkConfig.bEnableFEC) + { + // + // In worst case, FEC symbols fall into a narrow Hblank period, + // we have to consider this in HBlank checker, see bug 200496977 + // but we don't have to consider this in the calculation of hblank_symbols + // + + MinHBlankFEC = FEC_PARITY_SYM_SST(numLanesPerLink, BlankingSymbolsPerLane); //in symbols + BlankingSymbolsPerLane += MinHBlankFEC; + } + + // BlankingSymbolsPerLane is the MinHBlank in link clock cycles, + MinHBlank = (unsigned)(divide_ceil(BlankingSymbolsPerLane * modesetInfo.pixelClockHz, + LINK_RATE_TO_DATA_RATE_8B_10B(linkConfig.peakRate))); //in pclk cycles + MinHBlank += 3U; //add some margin + + NvU32 HBlank = (modesetInfo.rasterWidth - modesetInfo.surfaceWidth); + + if (MinHBlank > HBlank) + { + DP_PRINTF(DP_ERROR, "ERROR: Blanking Width is smaller than minimum permissible value."); + return false; + } + + // Bug 702290 - Active Width should be greater than 60 + if (modesetInfo.surfaceWidth <= 60) + { + DP_PRINTF(DP_ERROR, "ERROR: Minimum Horizontal Active Width <= 60 not supported."); + return false; + } + + NvU32 total_hblank_symbols = (NvS32)divide_ceil((HBlank * LINK_RATE_TO_DATA_RATE_8B_10B(linkConfig.peakRate)), modesetInfo.pixelClockHz); + NvS32 hblank_symbols = (NvS32)(((NvU64)(HBlank - MinHBlank) * LINK_RATE_TO_DATA_RATE_8B_10B(linkConfig.peakRate)) / modesetInfo.pixelClockHz); + + if (linkConfig.bEnableFEC) + { + hblank_symbols -= (FEC_PARITY_SYM_SST(numLanesPerLink, total_hblank_symbols)); + hblank_symbols += MinHBlankFEC; + } + + //reduce HBlank Symbols to account for secondary data packet + hblank_symbols -= 1; //Stuffer latency to send BS + hblank_symbols -= 3; //SPKT latency to send data to stuffer + hblank_symbols -= 3; //add some margin + + dpInfo->hBlankSym = (hblank_symbols < 0) ? 0 : hblank_symbols; + + // + // Audio IMP calculations + // + + // From dev_disp.ref: + + // The packet generation logic needs to know the length of the hblank period. If there is no room + // in the current hblank for a new packet, it will be delayed until the next blanking period. This + // field should be programmed during the second Supervisor interrupt based on the new raster + // dimensions. + + // ... + + // -------------------------------------- + // The following formulas can be used to calculate the maximum audio sampling rate that can + // be supported by DisplayPort given the current raster dimensions. DisplayPort has much more + // bandwidth during blanking periods than HDMI has, so hblank size is less of an issue. + + // ... + + // Size of a packet for 2ch audio = 20 symbols (up to 2 samples) + // Size of a packet for 8ch audio = 40 symbols + // Size of an audio packet header plus control symbols = 2*#lanes + 8 symbols (assuming < 32 samples per line) + // number of packets/hblank for 2ch audio = Floor ((number of free symbols/hblank - (2*#lanes + 8) / 20) + // number of packets/hblank for 8ch audio = Floor ((number of free symbols/hblank - (2*#lanes + 8) / 40) + + // Maximum audio sample rate possible: + // number of audio samples/line = SetRasterSize.Width * audio_fs / pclk + // number of audio packets needed for 2ch audio = Ceiling(SetRasterSize.Width * audio_fs / (pclk*2)) + // number of audio packets needed for 3-8ch audio = SetRasterSize.Width * audio_fs / pclk + + // If number of audio packets needed > number of packets/hblank, then you cannot support that audio frequency + + // Note that the hBlankSym calculated is per lane. So the number of symbols available for audio is + // (number of lanes * hBlankSym). + // The calculation of audio packets per Hblank needs to account for the following - + // 2 symbols for SS and SE; 8 symbols for header; and additional 2 symbols to account for actual values used by HW. + // -------------------------------------- + + if (modesetInfo.twoChannelAudioHz != 0) + { + if ((dpInfo->hBlankSym * numLanesPerLink) < ((2 * numLanesPerLink) + 8)) + { + // There aren't enough symbols/hblank available. + return false; + } + + NvU32 twoChannelAudioPacketsPerHBlank = (NvU32)divide_floor(((dpInfo->hBlankSym * numLanesPerLink) - (2 * numLanesPerLink) - 8 - (2 * numLanesPerLink)), 20); + + NvU32 twoChannelAudioPackets = (NvU32)divide_ceil(modesetInfo.twoChannelAudioHz * modesetInfo.rasterWidth, modesetInfo.pixelClockHz * 2); + + if (twoChannelAudioPackets > twoChannelAudioPacketsPerHBlank) + { + // There aren't enough packets/hblank available. + return false; + } + } + + if (modesetInfo.eightChannelAudioHz != 0) + { + if ((dpInfo->hBlankSym * numLanesPerLink) < (2 * numLanesPerLink + 8)) + { + // There aren't enough symbols/hblank available. + return false; + } + + NvU32 eightChannelAudioPacketsPerHBlank = (NvU32)divide_floor(((dpInfo->hBlankSym * numLanesPerLink) - (2 * numLanesPerLink) - 8 - (2 * numLanesPerLink)), 40); + + NvU32 eightChannelAudioPackets = (NvU32)divide_ceil(modesetInfo.eightChannelAudioHz * modesetInfo.rasterWidth, modesetInfo.pixelClockHz); + + if (eightChannelAudioPackets > eightChannelAudioPacketsPerHBlank) + { + // There aren't enough packets/hblank available. + return false; + } + } + + // Refer to dev_disp.ref for more information. + // # symbols/vblank = ((SetRasterBlankEnd.X + SetRasterSize.Width - SetRasterBlankStart.X - 40) * link_clk / pclk) - Y - 1; + // where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39 + if (modesetInfo.surfaceWidth < 40) + { + vblank_symbols = 0; + } + else + { + vblank_symbols = (NvS32)(((NvU64)(modesetInfo.surfaceWidth - 3) * LINK_RATE_TO_DATA_RATE_8B_10B(linkConfig.peakRate)) / modesetInfo.pixelClockHz); + + // + // The active region transmission is delayed because of lane fifo storage. + // compare to the negedge of hblank, all the BE will be delayed by watermark/ratio cycles. + // compare to the posedge of hblank(i.e. the time of sending out BS symbols in vblank period), + // all the BS after active pixels will be delayed by maximum 1.5 TU cycles, + // the delay of the BS will cause the 1st vblank line shorter than expected, + // but it will squeeze hblank period first, + // if hblank is short, the BS will be in hactive period and impact vblank_symbols. + // + + NvS32 squeezed_symbols = (dpInfo->tuSize * 3 / 2) - hblank_symbols; + squeezed_symbols = DP_MAX(squeezed_symbols, 0); + NvS32 msa_symbols = (36 / numLanesPerLink) + 3; + + // + // MSA can't be in the 1st vblank line, except v_front_porch=0 + // if we know v_front_porch != 0, + // we can use MAX(squeezed_symbols, msa_symbols) instead of squeezed_symbols+msa_symbols + // + vblank_symbols -= (squeezed_symbols + msa_symbols); + + if (linkConfig.bEnableFEC) + { + vblank_symbols -= FEC_PARITY_SYM_SST(numLanesPerLink, vblank_symbols); + } + vblank_symbols -= 3U; //add some margin + } + + dpInfo->vBlankSym = (vblank_symbols < 0) ? 0 : vblank_symbols; + + if (modesetInfo.bEnableDsc) + { + // + // For DSC enabled case, the vblank_symbols must be large enough to accommodate DSC PPS SDP, see bug 2760673 + // For 1 lane, it requires at least 170+13 symbols + // For 2 lane, it requires at least 86+3 symbols + // For 4 lane, it requires at least 44+3 symbols + // normally, no need to check this, except in some small resolution test case. + // + if ((numLanesPerLink == 1U) && (dpInfo->vBlankSym < 183U)) + { + return false; + } + else if ((numLanesPerLink == 2U) && (dpInfo->vBlankSym < 89U)) + { + return false; + } + if ((numLanesPerLink == 4U) && (dpInfo->vBlankSym <47U)) + { + return false; + } + } + + return true; +} + +bool DisplayPort::isModePossibleMSTWithFEC +( + const LinkConfiguration & linkConfig, + const ModesetInfo & modesetInfo, + Watermark * dpInfo +) +{ + // + // This function is for multistream only! + // Refer to Bug 200406501 and 200401850 for algorithm + // + DP_ASSERT(linkConfig.multistream); + + if (!modesetInfo.pixelClockHz || !modesetInfo.depth) + { + DP_ASSERT(0 && "INVALID PIXEL CLOCK and DEPTH sent by the client "); + return false; + } + + if (linkConfig.lanes == 0) + { + DP_ASSERT(0 && "No Active link / link train failed "); + return false; + } + + // depth is multiplied by 16 in case of DSC enable + unsigned DSC_FACTOR = modesetInfo.bEnableDsc ? 16 : 1; + dpInfo->tuSize = 64; + + NvU32 BlankingBits, BlankingSymbolsPerLane; + NvU32 numLanesPerLink = 4U; + NvU32 MinHBlank; + + BlankingBits = (3U * 8U * 4U) + (2U * 8U * numLanesPerLink); + + if(modesetInfo.bEnableDsc) + { + NvU32 sliceCount, sliceWidth, chunkSize; + + sliceCount = (modesetInfo.mode == DSC_DUAL) ? 8U : 4U; + sliceWidth = (NvU32)divide_ceil(modesetInfo.surfaceWidth, sliceCount); + chunkSize = (NvU32)divide_ceil(modesetInfo.depth * sliceWidth, 8U * DSC_FACTOR); + + //EOC symbols, EOC will be accumulated until hblank period. + BlankingBits += (sliceCount * 8U * numLanesPerLink); //+BlankingBits_EOC + //+BlankingBits_DSC_padding + BlankingBits += (NvU32)(sliceCount * ((divide_ceil(chunkSize, numLanesPerLink) * numLanesPerLink * 8U) - (NvU32)(sliceWidth * modesetInfo.depth / DSC_FACTOR))); + } + else + { + NvU32 surfaceWidthPerLane = (NvU32)divide_ceil(modesetInfo.surfaceWidth, numLanesPerLink); + + //Extra bits sent due to pixel steering + BlankingBits = (NvU32)divide_ceil(surfaceWidthPerLane * modesetInfo.depth, 8U) * 8U * numLanesPerLink - (NvU32)(modesetInfo.surfaceWidth * modesetInfo.depth); //+BlankingBits_nonDSC_padding + } + + BlankingSymbolsPerLane = (NvU32)divide_ceil(BlankingBits, (8U * numLanesPerLink)); //in symbols per lane + + MinHBlank = (NvU32)divide_ceil(BlankingSymbolsPerLane * 8U * numLanesPerLink * DSC_FACTOR, modesetInfo.depth); + MinHBlank += 3U; //add some margin + + NvU32 HBlank = (modesetInfo.rasterWidth - modesetInfo.surfaceWidth); + + if (MinHBlank > HBlank) + { + DP_PRINTF(DP_ERROR, "ERROR: Blanking Width is smaller than minimum permissible value."); + return false; + } + + // Bug 702290 - Active Width should be greater than 60 + if (modesetInfo.surfaceWidth <= 60) + { + DP_PRINTF(DP_ERROR, "ERROR: Minimum Horizontal Active Width <= 60 not supported."); + return false; + } + + // MST can do SDP splitting so all audio configuration are possible. + dpInfo->hBlankSym = 0U; + dpInfo->vBlankSym = 0U; + + return true; +} + +unsigned DisplayPort::pbnForMode(const ModesetInfo & modesetInfo, bool bAccountSpread) +{ + unsigned bpp_factor; + NvU64 pbn_numerator, pbn_denominator; + + if (modesetInfo.bEnableDsc) + { + if (modesetInfo.depth > 512U) + { + bpp_factor = 256U; + } + else + { + // Pre-Blackwell, depth will have bppx16 + bpp_factor = 16U; + } + } + else + { + if (modesetInfo.depth > 36U) + { + // Blackwell and later, depth will have effectiveBppx256 + bpp_factor = 256U; + } + else + { + bpp_factor = 1U; + } + } + + pbn_numerator = modesetInfo.pixelClockHz * modesetInfo.depth * 64 / 8; + pbn_denominator = 54000000ULL * bpp_factor; + + if (bAccountSpread) + { + pbn_numerator *= 1006; + pbn_denominator *= 1000; + } + + return (NvU32)(divide_ceil(pbn_numerator, pbn_denominator)); +} diff --git a/src/common/displayport/src/dptestutil/dp_testmessage.cpp b/src/common/displayport/src/dptestutil/dp_testmessage.cpp new file mode 100644 index 0000000..c49506e --- /dev/null +++ b/src/common/displayport/src/dptestutil/dp_testmessage.cpp @@ -0,0 +1,168 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_testmessage.cpp * +* Used for DP Test Utility * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_auxdefs.h" +#include "dp_messages.h" +#include "dp_testmessage.h" +#include "dp_connectorimpl.h" +using namespace DisplayPort; + +// the dp lib status must be set to DONE indicating there is no pending message +void DPTestMessageCompletion::messageFailed(MessageManager::Message * from, NakData * data) +{ + parent->testMessageStatus = DP_TESTMESSAGE_REQUEST_STATUS_DONE; + + if (from->getMsgType() == NV_DP_SBMSG_REQUEST_ID_QUERY_STREAM_ENCRYPTION_STATUS) + { + delete (QueryStreamEncryptionMessage *)from; + } + else + { + { + DP_ASSERT(0 && "unknown msg type when msg failed"); + } + } +} + +void DPTestMessageCompletion::messageCompleted(MessageManager::Message * from) +{ + parent->testMessageStatus = DP_TESTMESSAGE_REQUEST_STATUS_DONE; + + if (from->getMsgType() == NV_DP_SBMSG_REQUEST_ID_QUERY_STREAM_ENCRYPTION_STATUS) + { + ((QueryStreamEncryptionMessage *)from)->getReply(&parent->qsesReply); + delete (QueryStreamEncryptionMessage *)from; + } + else + { + { + DP_ASSERT(0 && "unknown msg type when msg complete"); + } + } +} + +MessageManager * TestMessage::getMessageManager() +{ + return pMsgManager; +} + +//pBuffer should point to a DP_TESTMESSAGE_REQUEST_QSES_INPUT structure +void TestMessage::sendTestMsgQSES(void *pBuffer) +{ + //Generate the Pseudo Random number + QSENonceGenerator qseNonceGenerator; + + //for qses, send to the root branch + Address address(0); + CLIENTID clientId; + QueryStreamEncryptionMessage *pQseMessage = new QueryStreamEncryptionMessage(); + + DP_TESTMESSAGE_REQUEST_QSES_INPUT *pQSES = + (DP_TESTMESSAGE_REQUEST_QSES_INPUT *)pBuffer; + + pQseMessage->set(address, + pQSES->streamID, + clientId.data, + CP_IRQ_ON, + STREAM_EVENT_MASK_ON, + Force_Reauth, + STREAM_BEHAVIOUR_MASK_ON); + + pMsgManager->post(pQseMessage, &diagCompl); +} + +// +// The function request that the request struct size should be check first to ensure the right structure is used and +// no BSOD will happen. +// +// For each request type, the DP lib status for that type should be check in case of request conflict. At one time, +// for each request type, only ONE instance could be processed +// +DP_TESTMESSAGE_STATUS TestMessage::sendDPTestMessage +( + void *pBuffer, + NvU32 requestSize, + NvU32 *pDpStatus +) +{ + DP_ASSERT(pBuffer); + DP_TESTMESSAGE_REQUEST_TYPE type; + + // the buffer must contain a requestType field at least + if (requestSize < sizeof(DP_TESTMESSAGE_REQUEST_TYPE)) + return DP_TESTMESSAGE_STATUS_ERROR_INVALID_PARAM; + + type = *(DP_TESTMESSAGE_REQUEST_TYPE *)pBuffer; + + if (!isValidStruct(type, requestSize)) + return DP_TESTMESSAGE_STATUS_ERROR_INVALID_PARAM; + + switch (type) + { + case DP_TESTMESSAGE_REQUEST_TYPE_QSES: + // new request, try send message + if (*pDpStatus == DP_TESTMESSAGE_REQUEST_STATUS_NEWREQUEST) + { + //there is still processing request, new one not allow now + if (testMessageStatus == DP_TESTMESSAGE_REQUEST_STATUS_PENDING) + { + *pDpStatus = DP_TESTMESSAGE_REQUEST_STATUS_ERROR; + return DP_TESTMESSAGE_STATUS_ERROR; + } + else + { + sendTestMsgQSES(pBuffer); + //need change the DP lib status accordingly + *pDpStatus = DP_TESTMESSAGE_REQUEST_STATUS_PENDING; + testMessageStatus = DP_TESTMESSAGE_REQUEST_STATUS_PENDING; + } + } + //old request, check if request finished + else if(*pDpStatus == DP_TESTMESSAGE_REQUEST_STATUS_PENDING) + { + //already finished, fill in the data + if (testMessageStatus == DP_TESTMESSAGE_REQUEST_STATUS_DONE) + { + DP_TESTMESSAGE_REQUEST_QSES_INPUT *p = + (DP_TESTMESSAGE_REQUEST_QSES_INPUT *)pBuffer; + p->reply = *(DP_TESTMESSAGE_REQUEST_QSES_OUTPUT *)&qsesReply; + *pDpStatus = DP_TESTMESSAGE_REQUEST_STATUS_DONE; + } + //otherwise, just return and ask the user try again + } + break; + default: + *pDpStatus = DP_TESTMESSAGE_REQUEST_STATUS_ERROR; + return DP_TESTMESSAGE_STATUS_ERROR; + } + + return DP_TESTMESSAGE_STATUS_SUCCESS; +} + diff --git a/src/common/inc/displayport/displayport.h b/src/common/inc/displayport/displayport.h new file mode 100644 index 0000000..75a5844 --- /dev/null +++ b/src/common/inc/displayport/displayport.h @@ -0,0 +1,771 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DISPLAYPORT_H_ +#define _DISPLAYPORT_H_ +#include "nvcfg_sdk.h" + +#include "nvmisc.h" +#include "dpcd.h" +#include "dpcd14.h" +#include "dpcd20.h" + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: DISPLAYPORT.H * +* Defines DISPLAYPORT V1.2 * +* * +\***************************************************************************/ + +// +// 4 Legacy Link Rates: RBR, HBR, HBR2, HBR3 +// 4 ILRs: 2.16G, 2.43G, 3.24G, 4.32G +// +#define NV_SUPPORTED_DP1X_LINK_RATES__SIZE 8 + +// Displayport interoperability with HDMI dongle i2c addr +#define DP2HDMI_DONGLE_I2C_ADDR 0x80 +#define DP2HDMI_DONGLE_DDC_BUFFER_ID_LEN 16 +#define DP2HDMI_DONGLE_CAP_BUFFER_LEN 32 + +// For 8b/10b link rate to data rate, linkRate * 8/10 * 1/8 * 10M -> (linkRate * 1000000) +// For 8b/10b data rate to link rate, dataRate * 10/8 * 8 * 1/10M -> (dataRate / 1000000) +#define LINK_RATE_TO_DATA_RATE_8B_10B(linkRate) (linkRate * 1000000UL) +#define DATA_RATE_8B_10B_TO_LINK_RATE(dataRate) (dataRate / 1000000UL) + +// To calculate the effective link rate with channel encoding accounted +#define OVERHEAD_8B_10B(linkRate) ((linkRate * 8) * 1/10) + +// Convert data rate to link rate in bps +#define DATA_RATE_8B_10B_TO_LINK_RATE_BPS(dataRate) (dataRate * 10) + +// Convert data rate to link rate in bps +#define LINK_RATE_BPS_TO_DATA_RATE_8B_10B(linkRate) (linkRate / 10) + +// Offset to read the dongle identifier +#define NV_DP2HDMI_DONGLE_IDENTIFIER (0x00000010) +#define NV_DP2HDMI_DONGLE_IDENTIFIER_ADAPTER_REV 2:0 +#define NV_DP2HDMI_DONGLE_IDENTIFIER_ADAPTER_REV_TYPE2 (0x00000000) +#define NV_DP2HDMI_DONGLE_IDENTIFIER_ADAPTER_ID 7:4 +#define NV_DP2HDMI_DONGLE_IDENTIFIER_ADAPTER_ID_TYPE2 (0x0000000A) + +// Offset to read the dongle TMDS clock rate +#define NV_DP2HDMI_DONGLE_TMDS_CLOCK_RATE (0x0000001D) + +// HDMI dongle types +#define DP2HDMI_DONGLE_TYPE_1 0x1 +#define DP2HDMI_DONGLE_TYPE_2 0x2 + +// HDMI dongle frequency limits +#define DP2HDMI_DONGLE_TYPE_1_PCLK_LIMIT 165*1000*1000 +#define DP2HDMI_DONGLE_TYPE_2_PCLK_LIMIT 300*1000*1000 + +#define DPCD_VERSION_12 0x12 +#define DPCD_VERSION_13 0x13 +#define DPCD_VERSION_14 0x14 + +#define DP_LINKINDEX_0 0x0 +#define DP_LINKINDEX_1 0x1 + +// Two Head One OR +#define NV_PRIMARY_HEAD_INDEX_0 0 +#define NV_SECONDARY_HEAD_INDEX_1 1 +#define NV_PRIMARY_HEAD_INDEX_2 2 +#define NV_SECONDARY_HEAD_INDEX_3 3 + +typedef enum +{ + displayPort_Lane0 = 0, + displayPort_Lane1 = 1, + displayPort_Lane2 = 2, + displayPort_Lane3 = 3, + displayPort_Lane4 = 4, + displayPort_Lane5 = 5, + displayPort_Lane6 = 6, + displayPort_Lane7 = 7, + displayPort_LaneSupported +} DP_LANE; + +typedef enum +{ + laneCount_0 = 0x0, + laneCount_1 = 0x1, + laneCount_2 = 0x2, + laneCount_4 = 0x4, + laneCount_8 = 0x8, + laneCount_Supported +} DP_LANE_COUNT; + +typedef enum +{ + // enum value unit = 270M + linkBW_1_62Gbps = 0x06, + linkBW_2_16Gbps = 0x08, + linkBW_2_43Gbps = 0x09, + linkBW_2_70Gbps = 0x0A, + linkBW_3_24Gbps = 0x0C, + linkBW_4_32Gbps = 0x10, + linkBW_5_40Gbps = 0x14, + linkBW_8_10Gbps = 0x1E, + linkBW_Supported +} DP_LINK_BANDWIDTH; +typedef enum +{ + // enum value unit = 10M + dp2LinkRate_1_62Gbps = 0x00A2, // 162 + dp2LinkRate_2_16Gbps = 0x00D8, // 216 + dp2LinkRate_2_43Gbps = 0x00F3, // 243 + dp2LinkRate_2_50Gbps = 0x00FA, // 250 + dp2LinkRate_2_70Gbps = 0x010E, // 270 + dp2LinkRate_3_24Gbps = 0x0144, // 324 + dp2LinkRate_4_32Gbps = 0x01B0, // 432 + dp2LinkRate_5_40Gbps = 0x021C, // 540 + dp2LinkRate_8_10Gbps = 0x032A, // 810 + dp2LinkRate_Supported +} DP2X_LINKRATE_10M; + +typedef enum +{ + // Uses 8b/10b channel encoding + // Link Data Rate = link rate * (8 / 10) / 8 + // = link rate * 0.1 + dataRate_1_62Gbps = 162000000, + dataRate_2_16Gbps = 216000000, + dataRate_2_43Gbps = 243000000, + dataRate_2_70Gbps = 270000000, + dataRate_3_24Gbps = 324000000, + dataRate_4_32Gbps = 432000000, + dataRate_5_40Gbps = 540000000, + dataRate_8_10Gbps = 810000000 +} DP_LINK_8B_10B_DATA_RATES; + +#define IS_8B_10B_CODING(dataRate) (((NvU64)(dataRate)== dataRate_1_62Gbps) || \ + ((NvU64)(dataRate)== dataRate_2_16Gbps) || \ + ((NvU64)(dataRate)== dataRate_2_43Gbps) || \ + ((NvU64)(dataRate)== dataRate_2_70Gbps) || \ + ((NvU64)(dataRate)== dataRate_3_24Gbps) || \ + ((NvU64)(dataRate)== dataRate_4_32Gbps) || \ + ((NvU64)(dataRate)== dataRate_5_40Gbps) || \ + ((NvU64)(dataRate)== dataRate_8_10Gbps)) + +typedef enum +{ + linkSpeedId_1_62Gbps = 0x00, + linkSpeedId_2_70Gbps = 0x01, + linkSpeedId_5_40Gbps = 0x02, + linkSpeedId_8_10Gbps = 0x03, + linkSpeedId_2_16Gbps = 0x04, + linkSpeedId_2_43Gbps = 0x05, + linkSpeedId_3_24Gbps = 0x06, + linkSpeedId_4_32Gbps = 0x07, + linkSpeedId_Supported +} DP_LINK_SPEED_INDEX; + +typedef enum +{ + postCursor2_Level0 = 0, + postCursor2_Level1 = 1, + postCursor2_Level2 = 2, + postCursor2_Level3 = 3, + postCursor2_Supported +} DP_POSTCURSOR2; + +typedef enum +{ + preEmphasis_Disabled = 0, + preEmphasis_Level1 = 1, + preEmphasis_Level2 = 2, + preEmphasis_Level3 = 3, + preEmphasis_Supported +} DP_PREEMPHASIS; + +typedef enum +{ + driveCurrent_Level0 = 0, + driveCurrent_Level1 = 1, + driveCurrent_Level2 = 2, + driveCurrent_Level3 = 3, + driveCurrent_Supported +} DP_DRIVECURRENT; + +typedef enum +{ + trainingPattern_Disabled = 0x0, + trainingPattern_1 = 0x1, + trainingPattern_2 = 0x2, + trainingPattern_3 = 0x3, + trainingPattern_4 = 0xB +} DP_TRAININGPATTERN; + +typedef enum +{ + dpOverclock_Percentage_0 = 0, + dpOverclock_Percentage_10 = 10, + dpOverclock_Percentage_20 = 20 +}DP_OVERCLOCKPERCENTAGE; + +typedef enum +{ + dpColorFormat_RGB = 0, + dpColorFormat_YCbCr444 = 0x1, + dpColorFormat_YCbCr422 = 0x2, // this is for simple 422 + dpColorFormat_YCbCr420 = 0x3, + dpColorFormat_YCbCr422_Native = 0x4, + dpColorFormat_Unknown = 0xF +} DP_COLORFORMAT; + +typedef enum +{ + dp_pktType_VideoStreamconfig = 0x7, + dp_pktType_CeaHdrMetaData = 0x21, + dp_pktType_SRInfoFrame = 0x7f, // Self refresh infoframe for eDP enter/exit self refresh, SRS 1698 + dp_pktType_Cea861BInfoFrame = 0x80, + dp_pktType_VendorSpecInfoFrame = 0x81, + dp_pktType_AviInfoFrame = 0x82, + dp_pktType_AudioInfoFrame = 0x84, + dp_pktType_SrcProdDescInfoFrame = 0x83, + dp_pktType_MpegSrcInfoFrame = 0x85, + dp_pktType_DynamicRangeMasteringInfoFrame = 0x87 +} DP_PACKET_TYPE; + +typedef enum +{ + DSC_SLICES_PER_SINK_1 = 1, + DSC_SLICES_PER_SINK_2 = 2, + DSC_SLICES_PER_SINK_4 = 4, + DSC_SLICES_PER_SINK_6 = 6, + DSC_SLICES_PER_SINK_8 = 8, + DSC_SLICES_PER_SINK_10 = 10, + DSC_SLICES_PER_SINK_12 = 12, + DSC_SLICES_PER_SINK_16 = 16, + DSC_SLICES_PER_SINK_20 = 20, + DSC_SLICES_PER_SINK_24 = 24 +} DscSliceCount; + +typedef enum +{ + DSC_BITS_PER_COLOR_MASK_8 = 1, + DSC_BITS_PER_COLOR_MASK_10 = 2, + DSC_BITS_PER_COLOR_MASK_12 = 4 +}DscBitsPerColorMask; + +enum DSC_MODE +{ + DSC_SINGLE, + DSC_DUAL, + DSC_DROP, + DSC_MODE_NONE +}; + +typedef enum +{ + BITS_PER_PIXEL_PRECISION_1_16 = 0, + BITS_PER_PIXEL_PRECISION_1_8 = 1, + BITS_PER_PIXEL_PRECISION_1_4 = 2, + BITS_PER_PIXEL_PRECISION_1_2 = 3, + BITS_PER_PIXEL_PRECISION_1 = 4 +}BITS_PER_PIXEL_INCREMENT; + +typedef enum +{ + NV_DP_FEC_UNCORRECTED = 0, + NV_DP_FEC_CORRECTED = 1, + NV_DP_FEC_BIT = 2, + NV_DP_FEC_PARITY_BLOCK = 3, + NV_DP_FEC_PARITY_BIT = 4 +}FEC_ERROR_COUNTER; + +typedef struct DscCaps +{ + NvBool bDSCSupported; + NvBool bDSCDecompressionSupported; + NvBool bDynamicPPSSupported; + NvBool bDynamicDscToggleSupported; + NvBool bDSCPassThroughSupported; + unsigned versionMajor, versionMinor; + unsigned rcBufferBlockSize; + unsigned rcBuffersize; + unsigned maxSlicesPerSink; + unsigned lineBufferBitDepth; + NvBool bDscBlockPredictionSupport; + unsigned maxBitsPerPixelX16; + unsigned sliceCountSupportedMask; + + struct + { + NvBool bRgb; + NvBool bYCbCr444; + NvBool bYCbCrSimple422; + NvBool bYCbCrNative422; + NvBool bYCbCrNative420; + }dscDecoderColorFormatCaps; + + unsigned dscDecoderColorDepthMask; + unsigned dscPeakThroughputMode0; + unsigned dscPeakThroughputMode1; + unsigned dscMaxSliceWidth; + + unsigned branchDSCOverallThroughputMode0; + unsigned branchDSCOverallThroughputMode1; + unsigned branchDSCMaximumLineBufferWidth; + + BITS_PER_PIXEL_INCREMENT dscBitsPerPixelIncrement; +} DscCaps; + +typedef struct GpuDscCrc +{ + NvU16 gpuCrc0; + NvU16 gpuCrc1; + NvU16 gpuCrc2; +} gpuDscCrc; + +typedef struct SinkDscCrc +{ + NvU16 sinkCrc0; + NvU16 sinkCrc1; + NvU16 sinkCrc2; +} sinkDscCrc; + +typedef struct +{ + NvBool bSourceControlModeSupported; + NvBool bConcurrentLTSupported; + NvBool bConv444To420Supported; + NvU32 maxTmdsClkRate; + NvU8 maxBpc; + NvU8 maxHdmiLinkBandwidthGbps; +} PCONCaps; + +typedef enum +{ + PCON_HDMI_LINK_BW_FRL_9GBPS = 0, + PCON_HDMI_LINK_BW_FRL_18GBPS, + PCON_HDMI_LINK_BW_FRL_24GBPS, + PCON_HDMI_LINK_BW_FRL_32GBPS, + PCON_HDMI_LINK_BW_FRL_40GBPS, + PCON_HDMI_LINK_BW_FRL_48GBPS, + PCON_HDMI_LINK_BW_FRL_INVALID +} PCONHdmiLinkBw; + +typedef enum +{ + NV_DP_PCON_CONTROL_STATUS_SUCCESS = 0, + NV_DP_PCON_CONTROL_STATUS_ERROR_TIMEOUT = 0x80000001, + NV_DP_PCON_CONTROL_STATUS_ERROR_FRL_LT_FAILURE = 0x80000002, + NV_DP_PCON_CONTROL_STATUS_ERROR_FRL_NOT_SUPPORTED = 0x80000003, + NV_DP_PCON_CONTROL_STATUS_ERROR_GENERIC = 0x8000000F +} NV_DP_PCON_CONTROL_STATUS; +// +// Poll HDMI-Link Status change and FRL Ready. +// Spec says it should be done in 500ms, we give it 20% extra time: +// 60 times with interval 10ms. +// +#define NV_PCON_SOURCE_CONTROL_MODE_TIMEOUT_THRESHOLD (60) +#define NV_PCON_SOURCE_CONTROL_MODE_TIMEOUT_INTERVAL_MS (10) +// +// Poll HDMI-Link Status change IRQ and Link Status. +// Spec says it should be done in 250ms, we give it 20% extra time: +// 30 times with interval 10ms. +// +#define NV_PCON_FRL_LT_TIMEOUT_THRESHOLD (30) +#define NV_PCON_FRL_LT_TIMEOUT_INTERVAL_MS (10) + +typedef struct _PCONLinkControl +{ + struct + { + // This struct is being passed in for assessPCONLink I/F + NvU32 bAssessLink : 1; + + // Specify if client wants to use src control - set it false DPLib can just do DP LT alone. + // By default it should be true. + NvU32 bSourceControlMode : 1; + + // Default is sequential mode, set this to choose concurrent mode + NvU32 bConcurrentMode : 1; + + // Default is normal link training mode (stop once FRL-LT succeed). + // Set this to link train all requested FRL Bw in allowedFrlBwMask. + NvU32 bExtendedLTMode : 1; + + // Keep PCON links (DP and FRL link) alive + NvU32 bKeepPCONLinkAlive : 1; + + // Default DPLib will fallback to autonomous mode and perform DP assessLink. + NvU32 bSkipFallback : 1; + } flags; + + // Input: Clients use this to specify the FRL BW PCON should try. + NvU32 frlHdmiBwMask; + + struct + { + NV_DP_PCON_CONTROL_STATUS status; + PCONHdmiLinkBw maxFrlBwTrained; + NvU32 trainedFrlBwMask; + } result; +} PCONLinkControl; + +static NV_INLINE PCONHdmiLinkBw getMaxFrlBwFromMask(NvU32 frlRateMask) +{ + if (frlRateMask == 0) + { + // Nothing is set. Assume TMDS + return PCON_HDMI_LINK_BW_FRL_INVALID; + } + + // find highest set bit (destructive operation) + HIGHESTBITIDX_32(frlRateMask); + + return (PCONHdmiLinkBw)frlRateMask; +} + +/* + EDP VESA PSR defines +*/ + +// PSR state transitions +typedef enum +{ + vesaPsrStatus_Inactive = 0, + vesaPsrStatus_Transition2Active = 1, + vesaPsrStatus_DisplayFromRfb = 2, + vesaPsrStatus_CaptureAndDisplay = 3, + vesaPsrStatus_Transition2Inactive = 4, + vesaPsrStatus_Undefined5 = 5, + vesaPsrStatus_Undefined6 = 6, + vesaPsrStatus_SinkError = 7 +} vesaPsrState; + +typedef struct VesaPsrConfig +{ + NvU8 psrCfgEnable : 1; + NvU8 srcTxEnabledInPsrActive : 1; + NvU8 crcVerifEnabledInPsrActive : 1; + NvU8 frameCaptureSecondActiveFrame : 1; + NvU8 selectiveUpdateOnSecondActiveline : 1; + NvU8 enableHpdIrqOnCrcMismatch : 1; + NvU8 enablePsr2 : 1; + NvU8 reserved : 1; +} vesaPsrConfig; + +typedef struct VesaPsrDebugStatus +{ + NvBool lastSdpPsrState; + NvBool lastSdpUpdateRfb; + NvBool lastSdpCrcValid; + NvBool lastSdpSuValid; + NvBool lastSdpFirstSURcvd; + NvBool lastSdpLastSURcvd; + NvBool lastSdpYCoordValid; + NvU8 maxResyncFrames; + NvU8 actualResyncFrames; +} vesaPsrDebugStatus; + +typedef struct VesaPsrErrorStatus +{ + NvU8 linkCrcError : 1; + NvU8 rfbStoreError : 1; + NvU8 vscSdpError : 1; + NvU8 rsvd : 5; +} vesaPsrErrorStatus; + +typedef struct VesaPsrEventIndicator +{ + NvU8 sinkCapChange : 1; + NvU8 rsvd : 7; +} vesaPsrEventIndicator; + +#pragma pack(1) +typedef struct VesaPsrSinkCaps +{ + NvU8 psrVersion; + NvU8 linkTrainingRequired : 1; + NvU8 psrSetupTime : 3; + NvU8 yCoordinateRequired : 1; + NvU8 psr2UpdateGranularityRequired : 1; + NvU8 reserved : 2; + NvU16 suXGranularity; + NvU8 suYGranularity; +} vesaPsrSinkCaps; +#pragma pack() + +typedef struct PanelReplayCaps +{ + // Indicates if Panel replay is supported or not + NvBool bPanelReplaySupported; +} panelReplayCaps; + +typedef struct PanelReplayConfig +{ + // This field is used to configure Panel replay on sink device + NvBool enablePanelReplay; + + // This field is used to configure CRC with Panel replay on sink device + NvBool bEnableCrcWithPr; + + // Configures sink to Generate an IRQ_HPD when DPCD 02020h[3] = 1. + NvBool bHpdOnAdaptiveSyncSdpMissing; + + // + // Used to configure sink to Generate an IRQ_HPD after finding a VSC SDP + // for PR uncorrectable error. + // + NvBool bHpdOnSdpUncorrectableError; + + // Configures sink to Generate an IRQ_HPD for RFB storage error. + NvBool bHpdOnRfbStorageErrors; + + // + // Configures sink to generate an IRQ_HPD after finding an active video image + // CRC mismatch. + // + NvBool bHpdOnRfbActiveFrameCrcError; +} panelReplayConfig; + +// PR state +typedef enum +{ + PanelReplay_Inactive = 0, + PanelReplay_CaptureAndDisplay = 1, + PanelReplay_DisplayFromRfb = 2, + PanelReplay_Undefined = 7 +} PanelReplayState; + +// PR Sink debug info +typedef struct PanelReplaySinkDebugInfo +{ + NvU8 activeFrameCrcError : 1; + NvU8 rfbStorageError : 1; + NvU8 vscSdpUncorrectableError: 1; + NvU8 adaptiveSyncSdpMissing : 1; + NvU8 sinkPrStatus : 3; + NvU8 sinkFramelocked : 2; + NvU8 sinkFrameLockedValid : 1; + NvU8 currentPrState : 1; + NvU8 crcValid: 1; + NvU8 suCoordinatesValid: 1; +} panelReplaySinkDebugInfo; + +typedef struct +{ + PanelReplayState prState; +} PanelReplayStatus; + +// Multiplier constant to get link frequency in KHZ +// Maximum link rate of Main Link lanes = Value x 270M. +// To get it to KHz unit, we need to multiply 270K. +#define DP_LINK_BW_FREQUENCY_MULTIPLIER_KHZ (270*1000) +#define DP_LINK_BW_FREQUENCY_MULTIPLIER_270MHZ_TO_KHZ DP_LINK_BW_FREQUENCY_MULTIPLIER_KHZ + +// Multiplier constant to get link rate table's in KHZ +#define DP_LINK_RATE_TABLE_MULTIPLIER_KHZ 200 + +// Macro to convert link rate table to 10M convention +#define LINK_RATE_200KHZ_TO_10MHZ(linkRate) (linkRate / 50) + +// +// Get link rate in multiplier of 10MHz from KHz: +// a * 1000(KHz) / 10 * 1000 * 1000(10Mhz) +// +#define LINK_RATE_KHZ_TO_10MHZ(a) ((a) / 10000) +#define LINK_RATE_10MHZ_TO_KHZ(a) ((a) * 10000) +#define LINK_RATE_270MHZ_TO_10MHZ(a) ((a) * 27) +#define LINK_RATE_10MHZ_TO_270MHZ(a) ((a) / 27) + +// +// Multiplier constant to get link frequency (multiplier of 270MHz) in MBps +// a * 270 * 1000 * 1000(270Mhz) * (8 / 10)(8b/10b) / 8(Byte) +// = a * 27000000 +// +#define DP_LINK_BW_FREQ_MULTI_MBPS 27000000 + +// Convert link rate in 10M to its value in bps +#define DP_LINK_RATE_10M_TO_BPS(linkRate) (linkRate * 10000000) + +// Convert link rate in 270M to its value in bps +#define DP_LINK_RATE_270M_TO_BPS(linkRate) (linkRate * 270000000) + +// Convert link rate from bps to Bps +#define DP_LINK_RATE_BITSPS_TO_BYTESPS(linkRate) (linkRate / 8) + +// +// Get link rate in multiplier of 270MHz from KHz: +// a * 1000(KHz) / 270 * 1000 * 1000(270Mhz) +// +#define LINK_RATE_KHZ_TO_MULTP(a) ((a) / 270000) + +// +// Get link rate in MBps from KHz: +// a * 1000 * (8 / 10)(8b/10b) / 8(Byte) +// = a * 100 +// +#define LINK_RATE_KHZ_TO_MBPS(a) ((a) * 100) + +#define DP_MAX_LANES 8 // This defines the maximum number of lanes supported on a chip. +#define DP_MAX_LANES_PER_LINK 4 // This defines the maximum number of lanes per link in a chip. +#define DP_AUX_CHANNEL_MAX_BYTES 16 +#define DP_CLOCK_RECOVERY_TOT_TRIES 10 +#define DP_CLOCK_RECOVERY_MAX_TRIES 5 +#define DP_CH_EQ_MAX_RETRIES 5 +#define DP_LT_MAX_FOR_MST_MAX_RETRIES 3 +#define DP_READ_EDID_MAX_RETRIES 7 +#define DP_AUX_CHANNEL_DEFAULT_DEFER_MAX_TRIES 7 +#define DP_AUX_CHANNEL_TIMEOUT_MAX_TRIES 2 +#define DP_SET_POWER_D0_NORMAL_MAX_TRIES 3 +#define DP_SW_AUTO_READ_REQ_SIZE 6 +#define NV_DP_RBR_FALLBACK_MAX_TRIES 3 + +#define DP_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_DEFAULT_MS 1 + +#define DP_AUX_CHANNEL_TIMEOUT_WAITIDLE 400 // source is required to wait at least 400us before it considers the AUX transaction to have timed out. +#define DP_AUX_CHANNEL_TIMEOUT_VALUE_DEFAULT 400 +#define DP_AUX_CHANNEL_TIMEOUT_VALUE_MAX 3200 + +#define DP_PHY_REPEATER_INDEX_FOR_SINK 0xFFFFFFFF + +#define DP_MESSAGEBOX_SIZE 48 +#define DP_POST_LT_ADJ_REQ_LIMIT 6 +#define DP_POST_LT_ADJ_REQ_TIMER 200000 + +#define DP_AUX_HYBRID_TIMEOUT 600 +#define DP_AUX_SEMA_ACQUIRE_TIMEOUT 20000 + +#define DP_CONFIG_WATERMARK_ADJUST 2 +#define DP_CONFIG_WATERMARK_LIMIT 20 +#define DP_CONFIG_INCREASED_WATERMARK_ADJUST 8 +#define DP_CONFIG_INCREASED_WATERMARK_LIMIT 22 + +#define NV_DP_MSA_PROPERTIES_MISC1_STEREO 2:1 + +#define DP_LANE_STATUS_ARRAY_SIZE ((displayPort_LaneSupported + 1) / 2) +#define DP_LANE_STATUS_ARRAY_INDEX(lane) ((lane) < displayPort_LaneSupported ? ((lane) / 2) : 0) + +#define IS_VALID_LANECOUNT(val) (((NvU32)(val)==0) || ((NvU32)(val)==1) || \ + ((NvU32)(val)==2) || ((NvU32)(val)==4) || \ + ((NvU32)(val)==8)) + +#define IS_STANDARD_LINKBW(val) (((NvU32)(val)==linkBW_1_62Gbps) || \ + ((NvU32)(val)==linkBW_2_70Gbps) || \ + ((NvU32)(val)==linkBW_5_40Gbps) || \ + ((NvU32)(val)==linkBW_8_10Gbps)) + +#define IS_INTERMEDIATE_LINKBW(val) (((NvU32)(val)==linkBW_2_16Gbps) || \ + ((NvU32)(val)==linkBW_2_43Gbps) || \ + ((NvU32)(val)==linkBW_3_24Gbps) || \ + ((NvU32)(val)==linkBW_4_32Gbps)) + +#define IS_VALID_LINKBW(val) (IS_STANDARD_LINKBW(val) || \ + IS_INTERMEDIATE_LINKBW(val)) + +#define IS_VALID_LINKBW_10M(val) IS_VALID_LINKBW(LINK_RATE_10MHZ_TO_270MHZ(val)) +#define IS_INTERMEDIATE_LINKBW_10M(val) IS_INTERMEDIATE_LINKBW(LINK_RATE_10MHZ_TO_270MHZ(val)) +#define IS_STANDARD_LINKBW_10M(val) IS_STANDARD_LINKBW(LINK_RATE_10MHZ_TO_270MHZ(val)) +// +// Phy Repeater count read from DPCD offset F0002h is an +// 8 bit value where each bit represents the total count +// 80h = 1 repeater, 40h = 2 , 20h = 3 ... 04h = 6 +// This function maps it to decimal system +// Note: From DP2.x max count of LTTPR is set to 6. +// +static NV_INLINE NvU32 mapPhyRepeaterVal(NvU32 value) +{ + switch (value) + { + case NV_DPCD14_PHY_REPEATER_CNT_VAL_0: + return 0; + case NV_DPCD14_PHY_REPEATER_CNT_VAL_1: + return 1; + case NV_DPCD14_PHY_REPEATER_CNT_VAL_2: + return 2; + case NV_DPCD14_PHY_REPEATER_CNT_VAL_3: + return 3; + case NV_DPCD14_PHY_REPEATER_CNT_VAL_4: + return 4; + case NV_DPCD14_PHY_REPEATER_CNT_VAL_5: + return 5; + case NV_DPCD14_PHY_REPEATER_CNT_VAL_6: + return 6; + default: + return 0; + } +} + +// HDCP specific definitions + +#define HDCP22_RTX_SIMPLE_PATTERN 0x12345678 +#define HDCP22_TX_CAPS_PATTERN_BIG_ENDIAN {0x02, 0x00, 0x00} + +#define DP_MST_HEAD_TO_STREAMID(head, pipeId, numHeads) ((head) + 1 + (pipeId) * (numHeads)) +#define DP_MST_STREAMID_TO_HEAD(streamid, pipeId, numHeads) ((streamid) - 1 - ((pipeId) * (numHeads))) +#define DP_MST_STREAMID_TO_PIPE(streamid, head, numHeads) (((streamid) - (head) - 1) / (numHeads)) + +typedef enum +{ + NV_DP_SBMSG_REQUEST_ID_GET_MESSAGE_TRANSACTION_VERSION = 0x00, + NV_DP_SBMSG_REQUEST_ID_LINK_ADDRESS = 0x01, + NV_DP_SBMSG_REQUEST_ID_CONNECTION_STATUS_NOTIFY = 0x02, + + NV_DP_SBMSG_REQUEST_ID_ENUM_PATH_RESOURCES = 0x10, + NV_DP_SBMSG_REQUEST_ID_ALLOCATE_PAYLOAD = 0x11, + NV_DP_SBMSG_REQUEST_ID_QUERY_PAYLOAD = 0x12, + NV_DP_SBMSG_REQUEST_ID_RESOURCE_STATUS_NOTIFY = 0x13, + NV_DP_SBMSG_REQUEST_ID_CLEAR_PAYLOAD_ID_TABLE = 0x14, + + NV_DP_SBMSG_REQUEST_ID_REMOTE_DPCD_READ = 0x20, + NV_DP_SBMSG_REQUEST_ID_REMOTE_DPCD_WRITE = 0x21, + NV_DP_SBMSG_REQUEST_ID_REMOTE_I2C_READ = 0x22, + NV_DP_SBMSG_REQUEST_ID_REMOTE_I2C_WRITE = 0x23, + NV_DP_SBMSG_REQUEST_ID_POWER_UP_PHY = 0x24, + NV_DP_SBMSG_REQUEST_ID_POWER_DOWN_PHY = 0x25, + + NV_DP_SBMSG_REQUEST_ID_SINK_EVENT_NOTIFY = 0x30, + NV_DP_SBMSG_REQUEST_ID_QUERY_STREAM_ENCRYPTION_STATUS = 0x38, + + NV_DP_SBMSG_REQUEST_ID_UNDEFINED = 0xFF, +} NV_DP_SBMSG_REQUEST_ID; + +// FEC + +#define NV_DP_FEC_FLAGS_SELECT_ALL 0x7 +#define NV_DP_ERROR_COUNTERS_PER_LANE 5 +#define NV_DP_MAX_NUM_OF_LANES 4 +#define NV_DP_FEC_ERROR_COUNT_INVALID 0xbadf +#define NV_DP_UNCORRECTED_ERROR NV_DP_FEC_UNCORRECTED : NV_DP_FEC_UNCORRECTED +#define NV_DP_CORRECTED_ERROR NV_DP_FEC_CORRECTED : NV_DP_FEC_CORRECTED +#define NV_DP_BIT_ERROR NV_DP_FEC_BIT : NV_DP_FEC_BIT +#define NV_DP_PARITY_BLOCK_ERROR NV_DP_FEC_PARITY_BLOCK : NV_DP_FEC_PARITY_BLOCK +#define NV_DP_PARITY_BIT_ERROR NV_DP_FEC_PARITY_BIT : NV_DP_FEC_PARITY_BIT +#define NV_DP_UNCORRECTED_ERROR_NO 0 +#define NV_DP_UNCORRECTED_ERROR_YES 1 +#define NV_DP_CORRECTED_ERROR_NO 0 +#define NV_DP_CORRECTED_ERROR_YES 1 +#define NV_DP_BIT_ERROR_NO 0 +#define NV_DP_BIT_ERROR_YES 1 +#define NV_DP_PARITY_BLOCK_ERROR_NO 0 +#define NV_DP_PARITY_BLOCK_ERROR_YES 1 +#define NV_DP_PARITY_BIT_ERROR_NO 0 +#define NV_DP_PARITY_BIT_ERROR_YES 1 + + +#endif // #ifndef _DISPLAYPORT_H_ diff --git a/src/common/inc/displayport/displayport2x.h b/src/common/inc/displayport/displayport2x.h new file mode 100644 index 0000000..bafe4e7 --- /dev/null +++ b/src/common/inc/displayport/displayport2x.h @@ -0,0 +1,185 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DISPLAYPORT2X_H_ +#define _DISPLAYPORT2X_H_ + +#include "nvmisc.h" +#include "dpcd.h" +#include "dpcd14.h" +#include "dpcd20.h" +#include "displayport.h" + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: DISPLAYPORT2x.H * +* Defines DISPLAYPORT V2.x * +* * +\***************************************************************************/ + +// +// 4 Legacy Link Rates: RBR, HBR, HBR2, HBR3 +// 5 ILRs: 2.16G, 2.43G, 3.24G, 4.32G, 6.75G +// 3 UHBRs: 10G, 13.5G, 20G +// 2 Internal Test: 2.5G, 5G Do NOT use in any other use cases. +// +#define NV_SUPPORTED_DP2X_LINK_RATES__SIZE 14 +// +// For 128b/132b link rate to data rate, linkRate * 128/132 * 1/8 * 10M -> ((linkRate * 4 * 1000000) / 33) +// For 128b/132b data rate to link rate, dataRate * 132/128 * 8 * 1/10M -> ((dataRate * 33) / (4 * 10000000)) +// Data rates used here are in Bytes per second. +// +#define LINK_RATE_TO_DATA_RATE_128B_132B(linkRate) ((linkRate * 4 * 10000000UL) / 33) +#define DATA_RATE_128B_132B_TO_LINK_RATE(dataRate) (NV_UNSIGNED_DIV_CEIL((dataRate * 33ULL), (4 * 10000000ULL))) + +// To calculate the effective link rate with channel encoding accounted +#define OVERHEAD_128B_132B(linkRate) ((linkRate * 128) / 132) + +// +// 128b/132b precise Data Bandwidth Efficiency. +// Per Spec 3.5.2.18, effective BW with 128b/132b channel coding is linkRate * 0.9671. +// This covers Phy logial layer efficiency 52/1584 and link layer efficiency of 4/65540 as well. +// Also add SSC margin of 0.5%. +// Additionally add another 0.1% for source to be slightly more conservative for DSC environments +// and provide maximum compatibility for LTTPR CDS LT sequence. +// +// (1 - 52/1584) * (1 - 4/65540) * 0.994 = 0.9612 +// +#define DATA_BW_EFF_128B_132B(linkRate) ((linkRate * 9612) / 10000) + +// For channel equalization, max loop count is 20 when waiting CHANNEL_EQ_DONE set. +#define NV_DP2X_MAX_LOOP_COUNT_POLL_CHNL_EQ_DONE (20U) + +typedef enum +{ + linkBW_6_75Gbps = 0x19 +} DP2X_LINK_BANDWIDTH_270M; + +// The definition here is to match HW register defines for link speed. +typedef enum +{ + dp2LinkSpeedId_1_62Gbps = 0x00, + dp2LinkSpeedId_2_70Gbps = 0x01, + dp2LinkSpeedId_5_40Gbps = 0x02, + dp2LinkSpeedId_8_10Gbps = 0x03, + dp2LinkSpeedId_2_16Gbps = 0x04, + dp2LinkSpeedId_2_43Gbps = 0x05, + dp2LinkSpeedId_3_24Gbps = 0x06, + dp2LinkSpeedId_4_32Gbps = 0x07, + dp2LinkSpeedId_6_75Gbps = 0x08, + dp2LinkSpeedId_10_0Gbps = 0x12, + dp2LinkSpeedId_13_5Gbps = 0x13, + dp2LinkSpeedId_20_0Gbps = 0x14, + dp2LinkSpeedId_UHBR_1_62Gbps = 0x1C, + dp2LinkSpeedId_UHBR_5_00Gbps = 0x1D, + dp2LinkSpeedId_UHBR_2_70Gbps = 0x1E, + dp2LinkSpeedId_UHBR_2_50Gbps = 0x1F, + dp2LinkSpeedId_Supported +} DP2X_LINK_SPEED_INDEX; + +typedef enum +{ + dp2xTxFFEPresetId_0 = 0, + dp2xTxFFEPresetId_1 = 1, + dp2xTxFFEPresetId_2 = 2, + dp2xTxFFEPresetId_3 = 3, + dp2xTxFFEPresetId_4 = 4, + dp2xTxFFEPresetId_5 = 5, + dp2xTxFFEPresetId_6 = 6, + dp2xTxFFEPresetId_7 = 7, + dp2xTxFFEPresetId_8 = 8, + dp2xTxFFEPresetId_9 = 9, + dp2xTxFFEPresetId_10 = 10, + dp2xTxFFEPresetId_11 = 11, + dp2xTxFFEPresetId_12 = 12, + dp2xTxFFEPresetId_13 = 13, + dp2xTxFFEPresetId_14 = 14, + dp2xTxFFEPresetId_15 = 15, + dp2xTxFFEPresetId_Supported +} DP2X_TXFFE_PRESET_INDEX; + +// Link Training stages for 128b/132b channel coding. +typedef enum +{ + DP2X_LT_Set_ResetLink = 0, + DP2X_LT_Poll_ResetLink = 1, + DP2X_LT_Set_PreLT = 2, + DP2X_LT_Set_ChnlEq = 3, + DP2X_LT_Poll_ChnlEq_Done = 4, + DP2X_LT_Poll_ChnlEq_InterlaneAlign = 5, + DP2X_LT_Set_CDS = 6, + DP2X_LT_Poll_CDS = 7, + DP2X_LT_Set_PostLT = 8, + DP2X_LT_StageSupported +} DP2X_LT_STAGES; + +typedef enum +{ + DP2X_ResetLinkForPreLT, + DP2X_ResetLinkForFallback, + DP2X_ResetLinkForChannelCoding +} DP2X_RESET_LINK_REASON; + +// +// Multiplier constant to get link frequency (multiplier of 10MHz) in MBps with 128b/132b channel coding. +// a * 10 * 1000 * 1000(10Mhz) * (128 / 132)(128b/132b) / 8(Byte) +// +#define DP_LINK_BW_FREQ_MULTI_10M_TO_MBPS (10 * 1000 * 1000 * 128 / (132 * 8)) + +// +// Multiplier constant to get DP2X link frequency in KHZ +// Maximum link rate of Main Link lanes = Value x 10M. +// To get it to KHz unit, we need to multiply 10K. +// +#define DP_LINK_BW_FREQUENCY_MULTIPLIER_10MHZ_TO_KHZ (10*1000) + +// +// Multiplier constant to get link frequency (multiplier of 270MHz) in MBps +// a * 10 * 1000 * 1000(10Mhz) * (8 / 10)(8b/10b) / 8(Byte) +// = a * 1000000 +// +#define DP_LINK_BW_FREQUENCY_MULTIPLIER_10MHZ_TO_10HZ (1000*1000) + +#define IS_STANDARD_DP2_X_LINKBW(val) (((NvU32)(val)==dp2LinkRate_1_62Gbps) || \ + ((NvU32)(val)==dp2LinkRate_2_70Gbps) || \ + ((NvU32)(val)==dp2LinkRate_5_40Gbps) || \ + ((NvU32)(val)==dp2LinkRate_8_10Gbps)) + +#define IS_INTERMEDIATE_DP2_X_LINKBW(val) (((NvU32)(val)==dp2LinkRate_2_16Gbps) || \ + ((NvU32)(val)==dp2LinkRate_2_43Gbps) || \ + ((NvU32)(val)==dp2LinkRate_3_24Gbps) || \ + ((NvU32)(val)==dp2LinkRate_4_32Gbps)) + +#define IS_DP2_X_UHBR_LINKBW(val) (0) + +#define IS_VALID_DP2_X_LINKBW(val) (IS_STANDARD_DP2_X_LINKBW(val) || \ + IS_INTERMEDIATE_DP2_X_LINKBW(val) || \ + IS_DP2_X_UHBR_LINKBW(val)) + +#define IS_LEGACY_INTERMEDIATE_LINKBW(val) (((NvU32)(val)==linkBW_2_16Gbps) || \ + ((NvU32)(val)==linkBW_2_43Gbps) || \ + ((NvU32)(val)==linkBW_3_24Gbps) || \ + ((NvU32)(val)==linkBW_4_32Gbps) || \ + ((NvU32)(val)==linkBW_6_75Gbps)) + +#endif // #ifndef _DISPLAYPORT2X_H_ diff --git a/src/common/inc/displayport/dpcd.h b/src/common/inc/displayport/dpcd.h new file mode 100644 index 0000000..9be2a6c --- /dev/null +++ b/src/common/inc/displayport/dpcd.h @@ -0,0 +1,1528 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DPCD_H_ +#define _DPCD_H_ + +#define NV_DPCD_CAP_LEGACY_BASE (0x00000000) + +#define NV_DPCD_REV (0x00000000) /* R-XUR */ +#define NV_DPCD_REV_MAJOR 7:4 /* R-XUF */ +#define NV_DPCD_REV_MAJOR_1 (0x00000001) /* R-XUV */ +#define NV_DPCD_REV_MINOR 3:0 /* R-XUF */ +#define NV_DPCD_REV_MINOR_0 (0x00000000) /* R-XUV */ +#define NV_DPCD_REV_MINOR_1 (0x00000001) /* R-XUV */ +#define NV_DPCD_REV_MINOR_2 (0x00000002) /* R-XUV */ +#define NV_DPCD_REV_MINOR_4 (0x00000004) /* R-XUV */ + +#define NV_DPCD_MAX_LINK_BANDWIDTH (0x00000001) /* R-XUR */ +#define NV_DPCD_MAX_LINK_BANDWIDTH_VAL 4:0 /* R-XUF */ +#define NV_DPCD_MAX_LINK_BANDWIDTH_VAL_1_62_GBPS (0x00000006) /* R-XUV */ +#define NV_DPCD_MAX_LINK_BANDWIDTH_VAL_2_70_GBPS (0x0000000a) /* R-XUV */ +#define NV_DPCD_MAX_LINK_BANDWIDTH_VAL_5_40_GBPS (0x00000014) /* R-XUV */ + +#define NV_DPCD_MAX_LANE_COUNT (0x00000002) /* R-XUR */ +#define NV_DPCD_MAX_LANE_COUNT_LANE 4:0 /* R-XUF */ +#define NV_DPCD_MAX_LANE_COUNT_LANE_1 (0x00000001) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_LANE_2 (0x00000002) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_LANE_4 (0x00000004) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_LANE_8 (0x00000008) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_POST_LT_ADJ_REQ_SUPPORT 5:5 /* R-XUF */ +#define NV_DPCD_MAX_LANE_COUNT_POST_LT_ADJ_REQ_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_POST_LT_ADJ_REQ_SUPPORT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_ENHANCED_FRAMING 7:7 /* R-XUF */ +#define NV_DPCD_MAX_LANE_COUNT_ENHANCED_FRAMING_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_ENHANCED_FRAMING_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_TPS3_SUPPORTED 6:6 /* R-XUF */ +#define NV_DPCD_MAX_LANE_COUNT_TPS3_SUPPORTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_TPS3_SUPPORTED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_MAX_DOWNSPREAD (0x00000003) /* R-XUR */ +#define NV_DPCD_MAX_DOWNSPREAD_VAL 0:0 /* R-XUF */ +#define NV_DPCD_MAX_DOWNSPREAD_VAL_NONE (0x00000000) /* R-XUV */ +#define NV_DPCD_MAX_DOWNSPREAD_VAL_0_5_PCT (0x00000001) /* R-XUV */ +#define NV_DPCD_MAX_DOWNSPREAD_NO_AUX_HANDSHAKE_LT 6:6 /* R-XUF */ +#define NV_DPCD_MAX_DOWNSPREAD_NO_AUX_HANDSHAKE_LT_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD_MAX_DOWNSPREAD_NO_AUX_HANDSHAKE_LT_TRUE (0x00000001) /* R-XUV */ + +// NORP = Number of Receiver Ports = Value + 1 +#define NV_DPCD_NORP (0x00000004) /* R-XUR */ +#define NV_DPCD_NORP_VAL 0:0 /* R-XUF */ +#define NV_DPCD_NORP_VAL_ONE (0x00000000) /* R-XUV */ +#define NV_DPCD_NORP_VAL_TWO (0x00000001) /* R-XUV */ +#define NV_DPCD_NORP_VAL_SST_MAX (0x00000001) /* R-XUV */ +#define NV_DPCD_NORP_DP_PWR_CAP_5V 5:5 /* R-XUF */ +#define NV_DPCD_NORP_DP_PWR_CAP_12V 6:6 /* R-XUF */ +#define NV_DPCD_NORP_DP_PWR_CAP_18V 7:7 /* R-XUF */ + +#define NV_DPCD_DOWNSTREAMPORT (0x00000005) /* R-XUR */ +#define NV_DPCD_DOWNSTREAMPORT_PRESENT 0:0 /* R-XUF */ +#define NV_DPCD_DOWNSTREAMPORT_PRESENT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_PRESENT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_TYPE 2:1 /* R-XUF */ +#define NV_DPCD_DOWNSTREAMPORT_TYPE_DISPLAYPORT (0x00000000) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_TYPE_ANALOG (0x00000001) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_TYPE_HDMI_DVI (0x00000002) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_TYPE_OTHERS (0x00000003) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_FORMAT_CONVERSION 3:3 /* R-XUF */ +#define NV_DPCD_DOWNSTREAMPORT_FORMAT_CONVERSION_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_FORMAT_CONVERSION_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_DETAILED_CAP_INFO_AVAILABLE 4:4 /* R-XUF */ +#define NV_DPCD_DOWNSTREAMPORT_DETAILED_CAP_INFO_AVAILABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_DETAILED_CAP_INFO_AVAILABLE_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING (0x00000006) /* R-XUR */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_ANSI_8B_10B 0:0 /* R-XUF */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_ANSI_8B_10B_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_ANSI_8B_10B_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_ANSI_128B_132B 1:1 /* R-XUF */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_ANSI_128B_132B_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_ANSI_128B_132B_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_DOWN_STREAM_PORT (0x00000007) /* R-XUR */ +#define NV_DPCD_DOWN_STREAM_PORT_COUNT 3:0 /* R-XUF */ +#define NV_DPCD_DOWN_STREAM_PORT_MSA_TIMING_PAR_IGNORED 6:6 /* R-XUF */ +#define NV_DPCD_DOWN_STREAM_PORT_MSA_TIMING_PAR_IGNORED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DOWN_STREAM_PORT_MSA_TIMING_PAR_IGNORED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DOWN_STREAM_PORT_OUI_SUPPORT 7:7 /* R-XUF */ +#define NV_DPCD_DOWN_STREAM_PORT_OUI_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DOWN_STREAM_PORT_OUI_SUPPORT_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_RECEIVE_PORT0_CAP_0 (0x00000008) /* R-XUR */ +#define NV_DPCD_RECEIVE_PORT1_CAP_0 (0x0000000A) /* R-XUR */ +#define NV_DPCD_RECEIVE_PORTX_CAP_0_LOCAL_EDID 1:1 /* R-XUF */ +#define NV_DPCD_RECEIVE_PORTX_CAP_0_LOCAL_EDID_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_RECEIVE_PORTX_CAP_0_LOCAL_EDID_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_RECEIVE_PORTX_CAP_0_ASSO_TO_PRECEDING_PORT 2:2 /* R-XUF */ +#define NV_DPCD_RECEIVE_PORTX_CAP_0_ASSO_TO_PRECEDING_PORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_RECEIVE_PORTX_CAP_0_ASSO_TO_PRECEDING_PORT_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_RECEIVE_PORT0_CAP_1 (0x00000009) /* R-XUR */ +#define NV_DPCD_RECEIVE_PORT1_CAP_1 (0x0000000B) /* R-XUR */ +#define NV_DPCD_RECEIVE_PORTX_CAP_1_BUFFER_SIZE 7:0 /* R-XUF */ + +#define NV_DPCD_I2C_CTRL_CAP (0x0000000C) /* R-XUR */ +#define NV_DPCD_I2C_CTRL_CAP_SPEED 7:0 /* R-XUF */ +#define NV_DPCD_I2C_CTRL_CAP_SPEED_1K (0x00000001) /* R-XUV */ +#define NV_DPCD_I2C_CTRL_CAP_SPEED_5K (0x00000002) /* R-XUV */ +#define NV_DPCD_I2C_CTRL_CAP_SPEED_10K (0x00000004) /* R-XUV */ +#define NV_DPCD_I2C_CTRL_CAP_SPEED_100K (0x00000008) /* R-XUV */ +#define NV_DPCD_I2C_CTRL_CAP_SPEED_400K (0x00000010) /* R-XUV */ +#define NV_DPCD_I2C_CTRL_CAP_SPEED_1M (0x00000020) /* R-XUV */ + +#define NV_DPCD_EDP_CONFIG_CAP (0x0000000D) /* R-XUR */ +#define NV_DPCD_EDP_CONFIG_CAP_ALTERNATE_SCRAMBLER_RESET 0:0 /* R-XUF */ +#define NV_DPCD_EDP_CONFIG_CAP_ALTERNATE_SCRAMBLER_RESET_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_CONFIG_CAP_ALTERNATE_SCRAMBLER_RESET_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_CONFIG_CAP_FRAMING_CHANGE 1:1 /* R-XUF */ +#define NV_DPCD_EDP_CONFIG_CAP_FRAMING_CHANGE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_CONFIG_CAP_FRAMING_CHANGE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_CONFIG_CAP_INVERTED_TRAINING_BIT 2:2 /* R-XUF */ +#define NV_DPCD_EDP_CONFIG_CAP_INVERTED_TRAINING_BIT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_CONFIG_CAP_INVERTED_TRAINING_BIT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_CONFIG_CAP_DISPLAY_CONTROL_CAPABLE 3:3 /* R-XUF */ +#define NV_DPCD_EDP_CONFIG_CAP_DISPLAY_CONTROL_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_CONFIG_CAP_DISPLAY_CONTROL_CAPABLE_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_TRAINING_AUX_RD_INTERVAL (0x0000000E) /* R-XUR */ +#define NV_DPCD_TRAINING_AUX_RD_INTERVAL_VAL 6:0 /* R-XUF */ +#define NV_DPCD_TRAINING_AUX_RD_INTERVAL_VAL_DEFAULT (0x00000000) /* R-XUV */ +#define NV_DPCD_TRAINING_AUX_RD_INTERVAL_VAL_4MS (0x00000001) /* R-XUV */ +#define NV_DPCD_TRAINING_AUX_RD_INTERVAL_VAL_8MS (0x00000002) /* R-XUV */ +#define NV_DPCD_TRAINING_AUX_RD_INTERVAL_VAL_12MS (0x00000003) /* R-XUV */ +#define NV_DPCD_TRAINING_AUX_RD_INTERVAL_VAL_16MS (0x00000004) /* R-XUV */ + +#define NV_DPCD_ADAPTER_CAP (0x0000000F) /* R-XUR */ +#define NV_DPCD_ADAPTER_CAP_FORCE_LOAD_SENSE 0:0 /* R-XUF */ +#define NV_DPCD_ADAPTER_CAP_FORCE_LOAD_SENSE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_ADAPTER_CAP_FORCE_LOAD_SENSE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_ADAPTER_CAP_ALT_I2C_PATTERN 1:1 /* R-XUF */ +#define NV_DPCD_ADAPTER_CAP_ALT_I2C_PATTERN_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_ADAPTER_CAP_ALT_I2C_PATTERN_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_SUPPORTED_LINK_RATES(i) (0x00000010+(i)*2) /* R--2A */ +#define NV_DPCD_SUPPORTED_LINK_RATES__SIZE (0x00000008) /* R---S */ + +// 00010h-0001Fh: RESERVED. Reads all 0s + +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS (0x00000020) /* R-XUR */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1024_768 0:0 /* R-XUF */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1024_768_NO (0X00000000) /* R-XUF */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1024_768_YES (0X00000001) /* R-XUF */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1280_720 1:1 /* R-XUV */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1280_720_NO (0X00000000) /* R-XUF */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1280_720_YES (0X00000001) /* R-XUF */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1920_1080 2:2 /* R-XUV */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1920_1080_NO (0X00000000) /* R-XUF */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1920_1080_YES (0X00000001) /* R-XUF */ + +#define NV_DPCD_MSTM (0x00000021) /* R-XUR */ +#define NV_DPCD_MSTM_CAP 0:0 /* R-XUF */ +#define NV_DPCD_MSTM_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_MSTM_CAP_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_NUMBER_OF_AUDIO_ENDPOINTS (0x00000022) /* R-XUR */ +#define NV_DPCD_NUMBER_OF_AUDIO_ENDPOINTS_VALUE 7:0 /* R-XUF */ + +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY (0x00000023) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR 3:0 /* R-XUF */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_3MS (0x00000000) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_2MS (0x00000001) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_1MS (0x00000002) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_500US (0x00000003) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_200US (0x00000004) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_100US (0x00000005) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_10US (0x00000006) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_1US (0x00000007) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_DEFAULT NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_2MS +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR 7:4 /* R-XUF */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_3MS (0x00000000) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_2MS (0x00000001) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_1MS (0x00000002) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_500US (0x00000003) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_200US (0x00000004) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_100US (0x00000005) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_DEFAULT NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_2MS + +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AUD_DEC_LAT_0 (0x00000024) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AUD_DEC_LAT_1 (0x00000025) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AUD_PP_LAT_0 (0x00000026) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AUD_PP_LAT_1 (0x00000027) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_VID_INTER_LAT (0x00000028) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_VID_PROG_LAT (0x00000029) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_REP_LAT (0x0000002A) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AUD_DEL_INS_0 (0x0000002B) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AUD_DEL_INS_1 (0x0000002C) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AUD_DEL_INS_2 (0x0000002D) /* R-XUR */ + +// 0002Eh - 0002Fh: RESERVED. Reads all 0s + +#define NV_DPCD_GUID (0x00000030) /* R-XUR */ + +// 00040h - 00053h: RESERVED. Reads all 0s + +#define NV_DPCD_RX_GTC_VALUE(i) (0x00000054+(i)) /* R--1A */ +#define NV_DPCD_RX_GTC_VALUE__SIZE 4 /* R---S */ + +#define NV_DPCD_RX_GTC_REQ (0x00000058) /* R-XUR */ +#define NV_DPCD_RX_GTC_REQ_RX_GTC_MSTR_REQ 0:0 /* R-XUF */ +#define NV_DPCD_RX_GTC_REQ_RX_GTC_MSTR_REQ_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_RX_GTC_REQ_RX_GTC_MSTR_REQ_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_RX_GTC_REQ_TX_GTC_VALUE_PHASE_SKEW_EN 1:1 /* R-XUF */ +#define NV_DPCD_RX_GTC_REQ_TX_GTC_VALUE_PHASE_SKEW_EN_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_RX_GTC_REQ_TX_GTC_VALUE_PHASE_SKEW_EN_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_RX_GTC_FREQ_LOCK (0x00000059) /* R-XUR */ +#define NV_DPCD_RX_GTC_FREQ_LOCK_DONE 0:0 /* R-XUF */ +#define NV_DPCD_RX_GTC_FREQ_LOCK_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_RX_GTC_FREQ_LOCK_DONE_YES (0x00000001) /* R-XUV */ + +// 0005Ah - 0006Fh: RESERVED Read all 0s + +#define NV_DPCD_EDP_PSR_VERSION (0x00000070) /* R-XUR */ + +#define NV_DPCD_EDP_PSR_CAP (0x00000071) /* R-XUR */ +#define NV_DPCD_EDP_PSR_CAP_LT_NEEDED 0:0 /* R-XUF */ +#define NV_DPCD_EDP_PSR_CAP_LT_NEEDED_YES (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_LT_NEEDED_NO (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME 3:1 /* R-XUF */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME_330US (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME_275US (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME_220US (0x00000002) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME_165US (0x00000003) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME_110US (0x00000004) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME_55US (0x00000005) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME_0US (0x00000006) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_Y_COORD_NEEDED 4:4 /* R-XUF */ +#define NV_DPCD_EDP_PSR_CAP_Y_COORD_NEEDED_NO (0x00000000) /* R-XUF */ +#define NV_DPCD_EDP_PSR_CAP_Y_COORD_NEEDED_YES (0x00000001) /* R-XUF */ +#define NV_DPCD_EDP_PSR_CAP_GRAN_REQUIRED 5:5 /* R-XUF */ +#define NV_DPCD_EDP_PSR_CAP_GRAN_REQUIRED_NO (0x00000000) /* R-XUF */ +#define NV_DPCD_EDP_PSR_CAP_GRAN_REQUIRED_YES (0x00000001) /* R-XUF*/ + +#define NV_DPCD_EDP_PSR2_X_GRANULARITY_L (0x00000072) /* R-XUR */ +#define NV_DPCD_EDP_PSR2_X_GRANULARITY_H (0x00000073) /* R-XUR */ +#define NV_DPCD_EDP_PSR2_Y_GRANULARITY (0x00000074) /* R-XUR */ + +// 00072h - 0007Fh: RESERVED Read all 0s + +/* + * When DETAILED_CAP_INFO_AVAILABLE = 0, 1 byte info per port. + * When DETAILED_CAP_INFO_AVAILABLE = 1, 4 bytes info per port. + * DETAILED_CAP_INFO_AVAILABLE located at 0x05h (DOWNSTREAMPORT_PRESENT), bit 5 + * + * Byte 0 definition. +*/ + +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT(i) (0x00000080+(i)*4) /* R--1A */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT__SIZE 4 /* R---S */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE 2:0 /* R-XUF */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_DISPLAYPORT (0x00000000) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_ANALOG (0x00000001) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_DVI (0x00000002) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_HDMI (0x00000003) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_OTHERS_NO_EDID (0x00000004) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_DP_PLUSPLUS (0x00000005) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_HPD 3:3 /* R-XUF */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_HPD_NOT_AWARE (0x00000000) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_HPD_AWARE (0x00000001) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_ATTR 7:4 /* R-XUF */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_480I_60HZ (0x00000001) /* R-XUV */ // 720x480i +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_480I_50HZ (0x00000002) /* R-XUV */ // 720x480i +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_1080I_60HZ (0x00000003) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_1080I_50HZ (0x00000004) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_720P_60HZ (0x00000005) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_720P_50HZ (0x00000007) /* R-XUV */ + +/* + * Byte 1, Reserved for DisplayPort. + */ + +#define NV_DPCD_DETAILED_CAP_INFO_ONE(i) (0x00000081+(i)*4) /* R--1A */ +#define NV_DPCD_DETAILED_CAP_INFO__SIZE NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT__SIZE +#define NV_DPCD_DETAILED_CAP_INFO_ONE__SIZE 4 /* R---S */ +// For Analog VGA Donwstream Port. Maximum Pixel Rate in Mpixels per sec divided by 8 +#define NV_DPCD_DETAILED_CAP_INFO_VGA_MAX_PIXEL_RATE 7:0 /* R-XUF */ +/* + * For DVI/HDMI/DP++ Downstream Port, Maximum TMDS clock rate supported in Mbps divided by 2.5 + * e.g. 66 (0x42) for 165 MHz, 90 (0x5a) for 225 MHz + */ +#define NV_DPCD_DETAILED_CAP_INFO_TMDS_MAX_CLOCK_RATE 7:0 /* R-XUF */ + +// Byte 2, for VGA/DVI/HDMI/DP++ Downstream Port, reserved for DisplayPort. +#define NV_DPCD_DETAILED_CAP_INFO_TWO(i) (0x00000082+(i)*4) /* R--1A */ +#define NV_DPCD_DETAILED_CAP_INFO_TWO__SIZE 4 /* R---S */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF 1:0 /* R-XUF */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_8BPC (0x00000000) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_10BPC (0x00000001) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_12BPC (0x00000002) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_16BPC (0x00000003) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT 4:2 /* R-XUF */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_ZERO (0x00000000) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_9G (0x00000001) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_18G (0x00000002) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_24G (0x00000003) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_32G (0x00000004) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_40G (0x00000005) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_48G (0x00000006) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_SRC_CONTROL_MODE_SUPPORT 5:5 /* R-XUF */ +#define NV_DPCD_DETAILED_CAP_INFO_SRC_CONTROL_MODE_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_SRC_CONTROL_MODE_SUPPORT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_CONCURRENT_LT_SUPPORT 6:6 /* R-XUF */ +#define NV_DPCD_DETAILED_CAP_INFO_CONCURRENT_LT_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_CONCURRENT_LT_SUPPORT_YES (0x00000001) /* R-XUV */ + +#define NV_MAX_BPC_8 8 +#define NV_MAX_BPC_10 10 +#define NV_MAX_BPC_12 12 +#define NV_MAX_BPC_16 16 + +// Byte 3, Reserved for DisplayPort and VGA +#define NV_DPCD_DETAILED_CAP_INFO_THREE(i) (0x00000083+(i)*4) /* R--1A */ +#define NV_DPCD_DETAILED_CAP_INFO_THREE__SIZE 4 /* R---S */ +// For DVI + #define NV_DPCD_DETAILED_CAP_INFO_DVI_DUAL_LINK 1:1 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_INFO_DVI_DUAL_LINK_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_INFO_DVI_DUAL_LINK_YES (0x00000001) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_INFO_DVI_HIGH_COLOR_DEPTH 2:2 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_INFO_DVI_HIGH_COLOR_DEPTH_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_INFO_DVI_HIGH_COLOR_DEPTH_YES (0x00000001) /* R-XUV */ +// For HDMI and DP++ + #define NV_DPCD_DETAILED_CAP_INFO_FRAME_SEQ_TO_FRAME_PACK 0:0 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_INFO_FRAME_SEQ_TO_FRAME_PACK_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_INFO_FRAME_SEQ_TO_FRAME_PACK_YES (0x00000001) /* R-XUV */ +// For HDMI-PCon + #define NV_DPCD_DETAILED_CAP_YCBCR422_PASS_THRU_SUPPORTED 1:1 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_YCBCR422_PASS_THRU_SUPPORTED_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_YCBCR422_PASS_THRU_SUPPORTED_YES (0x00000001) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_YCBCR420_PASS_THRU_SUPPORTED 2:2 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_YCBCR420_PASS_THRU_SUPPORTED_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_YCBCR420_PASS_THRU_SUPPORTED_YES (0x00000001) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_YCBCR444_TO_YCBCR422_SUPPORTED 3:3 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_CONV_YCBCR444_TO_YCBCR422_SUPPORTED_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_YCBCR444_TO_YCBCR422_SUPPORTED_YES (0x00000001) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_YCBCR444_TO_YCBCR420_SUPPORTED 4:4 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_CONV_YCBCR444_TO_YCBCR420_SUPPORTED_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_YCBCR444_TO_YCBCR420_SUPPORTED_YES (0x00000001) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_RGB601_TO_YCBCR601_SUPPORTED 5:5 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_CONV_RGB601_TO_YCBCR601_SUPPORTED_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_RGB601_TO_YCBCR601_SUPPORTED_YES (0x00000001) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_RGB709_TO_YCBCR709_SUPPORTED 6:6 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_CONV_RGB709_TO_YCBCR709_SUPPORTED_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_RGB709_TO_YCBCR709_SUPPORTED_YES (0x00000001) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_RGBBT2020_TO_YCBCRBT2020_SUPPORTED 7:7 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_CONV_RGBBT2020_TO_YCBCRBT2020_SUPPORTED_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_RGBBT2020_TO_YCBCRBT2020_SUPPORTED_YES (0x00000001) /* R-XUV */ + +/* +00090h - 000FFh: RESERVED for supporting up to 127 Downstream devices per Branch device. Read all 0s +Note: When DETAILED_CAP_INFO_AVAILABLE bit is set to 1, the maximum +number of Downstream ports will be limited to 32. +*/ + +#define NV_DPCD_LINK_BANDWIDTH_SET (0x00000100) /* RWXUR */ +#define NV_DPCD_LINK_BANDWIDTH_SET_VAL 7:0 /* RWXUF */ +#define NV_DPCD_LINK_BANDWIDTH_SET_VAL_1_62_GPBS (0x00000006) /* RWXUV */ +#define NV_DPCD_LINK_BANDWIDTH_SET_VAL_2_70_GPBS (0x0000000a) /* RWXUV */ +#define NV_DPCD_LINK_BANDWIDTH_SET_VAL_5_40_GPBS (0x00000014) /* RWXUV */ + +#define NV_DPCD_LANE_COUNT_SET (0x00000101) /* RWXUR */ +#define NV_DPCD_LANE_COUNT_SET_LANE 4:0 /* RWXUF */ +#define NV_DPCD_LANE_COUNT_SET_LANE_1 (0x00000001) /* RWXUV */ +#define NV_DPCD_LANE_COUNT_SET_LANE_2 (0x00000002) /* RWXUV */ +#define NV_DPCD_LANE_COUNT_SET_LANE_4 (0x00000004) /* RWXUV */ +#define NV_DPCD_LANE_COUNT_SET_POST_LT_ADJ_REQ_GRANTED 5:5 /* RWXUF */ +#define NV_DPCD_LANE_COUNT_SET_POST_LT_ADJ_REQ_GRANTED_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_LANE_COUNT_SET_POST_LT_ADJ_REQ_GRANTED_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_LANE_COUNT_SET_ENHANCEDFRAMING 7:7 /* RWXUF */ +#define NV_DPCD_LANE_COUNT_SET_ENHANCEDFRAMING_FALSE (0x00000000) /* RWXUV */ +#define NV_DPCD_LANE_COUNT_SET_ENHANCEDFRAMING_TRUE (0x00000001) /* RWXUV */ + +#define NV_DPCD_TRAINING_PATTERN_SET (0x00000102) /* RWXUR */ +#define NV_DPCD_TRAINING_PATTERN_SET_TPS 1:0 /* RWXUF */ +#define NV_DPCD_TRAINING_PATTERN_SET_TPS_NONE (0x00000000) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_TPS_TP1 (0x00000001) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_TPS_TP2 (0x00000002) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_TPS_TP3 (0x00000003) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_LQPS 3:2 /* R-XUF */ +#define NV_DPCD_TRAINING_PATTERN_SET_LQPS_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_LQPS_D10_2_TP (0x00000001) /* R-XUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_LQPS_SYM_ERR_RATE_TP (0x00000002) /* R-XUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_LQPS_PRBS7 (0x00000003) /* R-XUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN 4:4 /* RWXUF */ +#define NV_DPCD_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_SCRAMBLING_DISABLED 5:5 /* RWXUF */ +#define NV_DPCD_TRAINING_PATTERN_SET_SCRAMBLING_DISABLED_FALSE (0x00000000) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_SCRAMBLING_DISABLED_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_SYM_ERR_SEL 7:6 /* RWXUF */ +#define NV_DPCD_TRAINING_PATTERN_SET_SYM_ERR_SEL_DISPARITY_ILLEGAL_SYMBOL_ERROR (0x00000000) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_SYM_ERR_SEL_DISPARITY_ERROR (0x00000001) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_SYM_ERR_SEL_ILLEGAL_SYMBOL_ERROR (0x00000002) /* RWXUV */ + +#define NV_DPCD_TRAINING_LANE_SET(i) (0x00000103+(i)) /* RW-1A */ +#define NV_DPCD_TRAINING_LANE_SET__SIZE 4 /* RW--S */ +#define NV_DPCD_TRAINING_LANE_SET_VOLTAGE_SWING 1:0 /* RWXUF */ +#define NV_DPCD_TRAINING_LANE_SET_VOLTAGE_SWING_MAX_REACHED 2:2 /* RWXUF */ +#define NV_DPCD_TRAINING_LANE_SET_VOLTAGE_SWING_MAX_REACHED_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD_TRAINING_LANE_SET_PREEMPHASIS 4:3 /* RWXUF */ +#define NV_DPCD_TRAINING_LANE_SET_PREEMPHASIS_MAX_REACHED 5:5 /* RWXUF */ +#define NV_DPCD_TRAINING_LANE_SET_PREEMPHASIS_MAX_REACHED_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD_TRAINING_LANE0_SET (0x00000103) /* RWXUR */ + +#define NV_DPCD_MAX_VOLTAGE_SWING (0x00000003) /* RWXUV */ +#define NV_DPCD_MAX_VOLTAGE_PREEMPHASIS (0x00000003) /* RWXUV */ + +#define NV_DPCD_TRAINING_LANE1_SET (0x00000104) /* RWXUR */ +#define NV_DPCD_TRAINING_LANE2_SET (0x00000105) /* RWXUR */ +#define NV_DPCD_TRAINING_LANE3_SET (0x00000106) /* RWXUR */ +#define NV_DPCD_TRAINING_LANEX_SET_DRIVE_CURRENT 1:0 /* RWXUF */ +#define NV_DPCD_TRAINING_LANEX_SET_DRIVE_CURRENT_MAX_REACHED 2:2 /* RWXUF */ +#define NV_DPCD_TRAINING_LANEX_SET_DRIVE_CURRENT_MAX_REACHED_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD_TRAINING_LANEX_SET_PREEMPHASIS 4:3 /* RWXUF */ +#define NV_DPCD_TRAINING_LANEX_SET_PREEMPHASIS_MAX_REACHED 5:5 /* RWXUF */ +#define NV_DPCD_TRAINING_LANEX_SET_PREEMPHASIS_MAX_REACHED_TRUE (0x00000001) /* RWXUV */ + +#define NV_DPCD_DOWNSPREAD_CTRL (0x00000107) /* RWXUR */ +#define NV_DPCD_DOWNSPREAD_CTRL_SPREAD_AMP 4:4 /* RWXUF */ +#define NV_DPCD_DOWNSPREAD_CTRL_SPREAD_AMP_NONE (0x00000000) /* RWXUV */ +#define NV_DPCD_DOWNSPREAD_CTRL_SPREAD_AMP_LESS_THAN_0_5 (0x00000001) /* RWXUV */ +#define NV_DPCD_DOWNSPREAD_CTRL_MSA_TIMING_PAR_IGNORED 7:7 /* RWXUF */ +#define NV_DPCD_DOWNSPREAD_CTRL_MSA_TIMING_PAR_IGNORED_FALSE (0x00000000) /* RWXUV */ +#define NV_DPCD_DOWNSPREAD_CTRL_MSA_TIMING_PAR_IGNORED_TRUE (0x00000001) /* RWXUV */ + + +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_SET (0x00000108) /* RWXUR */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_SET_ANSI_8B_10B 0:0 /* RWXUF */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_SET_ANSI_8B_10B_FALSE (0x00000000) /* RWXUV */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_SET_ANSI_8B_10B_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_SET_ANSI_128B_132B 1:1 /* RWXUF */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_SET_ANSI_128B_132B_FALSE (0x00000000) /* RWXUV */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_SET_ANSI_128B_132B_TRUE (0x00000001) /* RWXUV */ + +#define NV_DPCD_I2C_CTRL_SET (0x00000109) /* RWXUR */ +#define NV_DPCD_I2C_CTRL_SET_SPEED 7:0 /* RWXUF */ +#define NV_DPCD_I2C_CTRL_SET_SPEED_DEFAULT (0x00000000) /* RWXUV */ +#define NV_DPCD_I2C_CTRL_SET_SPEED_1K (0x00000001) /* RWXUV */ +#define NV_DPCD_I2C_CTRL_SET_SPEED_5K (0x00000002) /* RWXUV */ +#define NV_DPCD_I2C_CTRL_SET_SPEED_10K (0x00000004) /* RWXUV */ +#define NV_DPCD_I2C_CTRL_SET_SPEED_100K (0x00000008) /* RWXUV */ +#define NV_DPCD_I2C_CTRL_SET_SPEED_400K (0x00000010) /* RWXUV */ +#define NV_DPCD_I2C_CTRL_SET_SPEED_1M (0x00000020) /* RWXUV */ + +#define NV_DPCD_EDP_CONFIG_SET (0x0000010A) /* RWXUR */ +#define NV_DPCD_EDP_CONFIG_SET_ALTERNATE_SCRAMBLER_RESET 0:0 /* RWXUF */ +#define NV_DPCD_EDP_CONFIG_SET_ALTERNATE_SCRAMBLER_RESET_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_CONFIG_SET_ALTERNATE_SCRAMBLER_RESET_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_CONFIG_SET_FRAMING_CHANGE 1:1 /* RWXUF */ +#define NV_DPCD_EDP_CONFIG_SET_FRAMING_CHANGE_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_CONFIG_SET_FRAMING_CHANGE_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_CONFIG_SET_INVERTED_TRAINING_BIT 2:2 /* RWXUF */ +#define NV_DPCD_EDP_CONFIG_SET_INVERTED_TRAINING_BIT_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_CONFIG_SET_INVERTED_TRAINING_BIT_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_CONFIG_SET_PANEL_SELF_TEST 7:7 /* RWXUF */ +#define NV_DPCD_EDP_CONFIG_SET_PANEL_SELF_TEST_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_CONFIG_SET_PANEL_SELF_TEST_ENABLE (0x00000001) /* RWXUV */ + +#define NV_DPCD_LINK_QUAL_LANE_SET(i) (0x0000010B+(i)) /* RW-1A */ +#define NV_DPCD_LINK_QUAL_LANE_SET__SIZE 4 /* RW--S */ +#define NV_DPCD_LINK_QUAL_LANE_SET_LQS 2:0 /* RWXUF */ +#define NV_DPCD_LINK_QUAL_LANE_SET_LQS_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_LINK_QUAL_LANE_SET_LQS_D10_2 (0x00000001) /* RWXUV */ +#define NV_DPCD_LINK_QUAL_LANE_SET_LQS_SYM_ERR_MEASUREMENT_CNT (0x00000002) /* RWXUV */ +#define NV_DPCD_LINK_QUAL_LANE_SET_LQS_PRBS7 (0x00000003) /* RWXUV */ +#define NV_DPCD_LINK_QUAL_LANE_SET_LQS_80_BIT_CUSTOM (0x00000004) /* RWXUV */ +#define NV_DPCD_LINK_QUAL_LANE_SET_LQS_HBR2 (0x00000005) /* RWXUV */ + +#define NV_DPCD_TRAINING_LANE0_1_SET2 (0x0000010F) /* RWXUR */ +#define NV_DPCD_TRAINING_LANE2_3_SET2 (0x00000110) /* RWXUR */ +#define NV_DPCD_LANEX_XPLUS1_TRAINING_LANEX_SET2_POST_CURSOR2 1:0 /* RWXUF */ +#define NV_DPCD_LANEX_XPLUS1_TRAINING_LANEX_SET2_POST_CURSOR2_MAX_REACHED 2:2 /* RWXUF */ +#define NV_DPCD_LANEX_XPLUS1_TRAINING_LANEX_SET2_POST_CURSOR2_MAX_REACHED_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD_LANEX_XPLUS1_TRAINING_LANEXPLUS1_SET2_POST_CURSOR2 5:4 /* RWXUF */ +#define NV_DPCD_LANEX_XPLUS1_TRAINING_LANEXPLUS1_SET2_POST_CURSOR2_MAX_REACHED 6:6 /* RWXUF */ +#define NV_DPCD_LANEX_XPLUS1_TRAINING_LANEXPLUS1_SET2_POST_CURSOR2_MAX_REACHED_TRUE (0x00000001) /* RWXUV */ + +#define NV_DPCD_MSTM_CTRL (0x00000111) /* RWXUR */ +#define NV_DPCD_MSTM_CTRL_EN 0:0 /* RWXUF */ +#define NV_DPCD_MSTM_CTRL_EN_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_MSTM_CTRL_EN_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_MSTM_CTRL_UP_REQ_EN 1:1 /* RWXUF */ +#define NV_DPCD_MSTM_CTRL_UP_REQ_EN_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_MSTM_CTRL_UP_REQ_EN_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_MSTM_CTRL_UPSTREAM_IS_SRC 2:2 /* RWXUF */ +#define NV_DPCD_MSTM_CTRL_UPSTREAM_IS_SRC_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_MSTM_CTRL_UPSTREAM_IS_SRC_YES (0x00000001) /* RWXUV */ + +#define NV_DPCD_AUDIO_DELAY(i) (0x00000112+(i)) /* RW-1A */ +#define NV_DPCD_AUDIO_DELAY__SIZE 3 /* NNNNS */ + +#define NV_DPCD_LINK_RATE_SET (0x00000115) /* RWXUR */ +#define NV_DPCD_LINK_RATE_SET_VAL 2:0 /* RWXUF */ + +// 00115h - 00117h: RESERVED. Reads all 0s + +#define NV_DPCD_UPSTREAM_DEV_DP_PWR (0x00000118) /* RWXUR */ +#define NV_DPCD_UPSTREAM_DEV_DP_PWR_NOT_NEEDED 0:0 /* RWXUF */ +#define NV_DPCD_UPSTREAM_DEV_DP_PWR_NOT_NEEDED_FALSE (0x00000000) /* RWXUV */ +#define NV_DPCD_UPSTREAM_DEV_DP_PWR_NOT_NEEDED_TRUE (0x00000001) /* RWXUV */ + +#define NV_DPCD_EXTENDED_DPRX_WAKE_TIMEOUT (0x00000119) /* RWXUR */ +#define NV_DPCD_EXTENDED_DPRX_WAKE_TIMEOUT_PERIOD_GRANTED 0:0 /* RWXUF */ +#define NV_DPCD_EXTENDED_DPRX_WAKE_TIMEOUT_PERIOD_GRANTED_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_EXTENDED_DPRX_WAKE_TIMEOUT_PERIOD_GRANTED_YES (0x00000001) /* RWXUV */ + +// 0011Ah - 0011Fh: RESERVED. Reads all 0s +// 00126h - 00153h: RESERVED. Reads all 0s + +#define NV_DPCD_TX_GTC_VALUE(i) (0x00000154+(i)) /* RW-1A */ +#define NV_DPCD_TX_GTC_VALUE__SIZE 4 /* R---S */ + +#define NV_DPCD_RX_GTC_VALUE_PHASE_SKEW (0x00000158) /* RWXUR */ +#define NV_DPCD_RX_GTC_VALUE_PHASE_SKEW_EN 0:0 /* RWXUF */ +#define NV_DPCD_RX_GTC_VALUE_PHASE_SKEW_EN_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_RX_GTC_VALUE_PHASE_SKEW_EN_YES (0x00000001) /* RWXUV */ + +#define NV_DPCD_TX_GTC_FREQ_LOCK (0x00000159) /* RWXUR */ +#define NV_DPCD_TX_GTC_FREQ_LOCK_DONE 0:0 /* RWXUF */ +#define NV_DPCD_TX_GTC_FREQ_LOCK_DONE_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_TX_GTC_FREQ_LOCK_DONE_YES (0x00000001) /* RWXUV */ + +// 0015Ah - 0016Fh: RESERVED. Read all 0s + +#define NV_DPCD_EDP_PSR_CONFIG (0x00000170) /* RWXUR */ +#define NV_DPCD_EDP_PSR_CONFIG_SINK_ENABLE 0:0 /* RWXUF */ +#define NV_DPCD_EDP_PSR_CONFIG_SINK_ENABLE_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_SINK_ENABLE_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_SOURCE_LINK_ACTIVE 1:1 /* RWXUF */ +#define NV_DPCD_EDP_PSR_CONFIG_SOURCE_LINK_ACTIVE_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_SOURCE_LINK_ACTIVE_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_CRC_VERIFICATION_ACTIVE 2:2 /* RWXUF */ +#define NV_DPCD_EDP_PSR_CONFIG_CRC_VERIFICATION_ACTIVE_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_CRC_VERIFICATION_ACTIVE_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_FRAME_CAPTURE_INDICATION 3:3 /* RWXUF */ +#define NV_DPCD_EDP_PSR_CONFIG_FRAME_CAPTURE_INDICATION_IMM (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_FRAME_CAPTURE_INDICATION_SECOND (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_SU_LINE_CAPTURE_INDICATION 4:4 /* RWXUF */ +#define NV_DPCD_EDP_PSR_CONFIG_SU_LINE_CAPTURE_INDICATION_IMM (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_SU_LINE_CAPTURE_INDICATION_SECOND (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_HPD_IRQ_ON_CRC_ERROR 5:5 /* RWXUF */ +#define NV_DPCD_EDP_PSR_CONFIG_HPD_IRQ_ON_CRC_ERROR_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_HPD_IRQ_ON_CRC_ERROR_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_ENABLE_PSR2 6:6 /* RWXUF */ +#define NV_DPCD_EDP_PSR_CONFIG_ENABLE_PSR2_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_ENABLE_PSR2_YES (0x00000001) /* RWXUV */ + +// 00171h - 0019Fh: RESERVED. Read all 0s + + +#define NV_DPCD_ADAPTER_CTRL (0x000001A0) /* RWXUR */ +#define NV_DPCD_ADAPTER_CTRL_FORCE_LOAD_SENSE 0:0 /* RWXUF */ +#define NV_DPCD_ADAPTER_CTRL_FORCE_LOAD_SENSE_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_ADAPTER_CTRL_FORCE_LOAD_SENSE_YES (0x00000001) /* RWXUV */ + +#define NV_DPCD_BRANCH_DEV_CTRL (0x000001A1) /* RWXUR */ +#define NV_DPCD_BRANCH_DEV_CTRL_HOTPLUG_EVENT_TYPE 0:0 /* RWXUF */ +#define NV_DPCD_BRANCH_DEV_CTRL_HOTPLUG_EVENT_TYPE_LONGPULSE (0x00000000) /* RWXUV */ +#define NV_DPCD_BRANCH_DEV_CTRL_HOTPLUG_EVENT_TYPE_IRQ_HPD (0x00000001) /* RWXUV */ +#define NV_DPCD_BRANCH_DEV_CTRL_HOTPLUG_EVENT_TYPE_DEFAULT NV_DPCD_BRANCH_DEV_CTRL_HOTPLUG_EVENT_TYPE_LONGPULSE + +// 001A2h - 0019Fh: RESERVED. Read all 0s + +#define NV_DPCD_PAYLOAD_ALLOC_SET (0x000001C0) /* RWXUR */ +#define NV_DPCD_PAYLOAD_ALLOC_SET_PAYLOAD_ID 6:0 /* RWXUF */ + +#define NV_DPCD_PAYLOAD_ALLOC_START_TIME_SLOT (0x000001C1) /* RWXUR */ +#define NV_DPCD_PAYLOAD_ALLOC_START_TIME_SLOT_VAL 5:0 /* RWXUF */ + +#define NV_DPCD_PAYLOAD_ALLOC_TIME_SLOT_COUNT (0x000001C2) /* RWXUR */ +#define NV_DPCD_PAYLOAD_ALLOC_TIME_SLOT_COUNT_VAL 5:0 /* RWXUF */ + +// 001C3h - 001FFh: RESERVED. Reads all 0s + +#define NV_DPCD_SINK_COUNT (0x00000200) /* R-XUR */ +// Bits 7 and 5:0 = SINK_COUNT +#define NV_DPCD_SINK_COUNT_VAL_BIT_05_MASK (0x3F) +#define NV_DPCD_SINK_COUNT_VAL_BIT_7 (0x80) +#define NV_DPCD_SINK_COUNT_VAL(x) ((x & NV_DPCD_SINK_COUNT_VAL_BIT_05_MASK) \ + | ((x & NV_DPCD_SINK_COUNT_VAL_BIT_7) >> 1)) +#define NV_DPCD_SINK_COUNT_CP_READY 6:6 /* R-XUF */ +#define NV_DPCD_SINK_COUNT_CP_READY_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_SINK_COUNT_CP_READY_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR (0x00000201) /* RWXUR */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_REMOTE_CTRL 0:0 /* RWXUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_REMOTE_CTRL_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_REMOTE_CTRL_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_AUTO_TEST 1:1 /* RWXUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_AUTO_TEST_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_AUTO_TEST_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_CP 2:2 /* RWXUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_CP_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_CP_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_MCCS_IRQ 3:3 /* RWXUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_MCCS_IRQ_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_MCCS_IRQ_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_DOWN_REP_MSG_RDY 4:4 /* RWXUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_DOWN_REP_MSG_RDY_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_DOWN_REP_MSG_RDY_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_UP_REQ_MSG_RDY 5:5 /* RWXUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_UP_REQ_MSG_RDY_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_UP_REQ_MSG_RDY_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_SINK_SPECIFIC_IRQ 6:6 /* RWXUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_SINK_SPECIFIC_IRQ_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_SINK_SPECIFIC_IRQ_YES (0x00000001) /* RWXUV */ + +#define NV_DPCD_LANE0_1_STATUS (0x00000202) /* R-XUR */ + +#define NV_DPCD_LANE2_3_STATUS (0x00000203) /* R-XUR */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_CR_DONE 0:0 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_CR_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_CR_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_CHN_EQ_DONE 1:1 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_CHN_EQ_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_CHN_EQ_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_SYMBOL_LOCKED 2:2 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_SYMBOL_LOCKED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_SYMBOL_LOCKED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_CR_DONE 4:4 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_CR_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_CR_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_CHN_EQ_DONE 5:5 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_CHN_EQ_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_CHN_EQ_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_SYMBOL_LOCKED 6:6 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_SYMBOL_LOCKED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_SYMBOL_LOCKED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED (0x00000204) /* R-XUR */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_INTERLANE_ALIGN_DONE 0:0 /* R-XUF */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_INTERLANE_ALIGN_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_INTERLANE_ALIGN_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_POST_LT_ADJ_REQ_IN_PROGRESS 1:1 /* R-XUF */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_POST_LT_ADJ_REQ_IN_PROGRESS_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_POST_LT_ADJ_REQ_IN_PROGRESS_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_D0WNSTRM_PORT_STATUS_DONE 6:6 /* R-XUF */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_D0WNSTRM_PORT_STATUS_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_D0WNSTRM_PORT_STATUS_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_LINK_STATUS_UPDATED 7:7 /* R-XUF */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_LINK_STATUS_UPDATED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_LINK_STATUS_UPDATED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_SINK_STATUS (0x00000205) /* R-XUR */ +#define NV_DPCD_SINK_STATUS_RECEIVE_PORT_0_STATUS 0:0 /* R-XUF */ +#define NV_DPCD_SINK_STATUS_RECEIVE_PORT_0_STATUS_IN_SYNC_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_SINK_STATUS_RECEIVE_PORT_0_STATUS_IN_SYNC_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_SINK_STATUS_RECEIVE_PORT_1_STATUS 1:1 /* R-XUF */ +#define NV_DPCD_SINK_STATUS_RECEIVE_PORT_1_STATUS_IN_SYNC_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_SINK_STATUS_RECEIVE_PORT_1_STATUS_IN_SYNC_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_LANE0_1_ADJUST_REQ (0x00000206) /* R-XUR */ +#define NV_DPCD_LANE2_3_ADJUST_REQ (0x00000207) /* R-XUR */ +#define NV_DPCD_LANEX_XPLUS1_ADJUST_REQ_LANEX_DRIVE_CURRENT 1:0 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_ADJUST_REQ_LANEX_PREEMPHASIS 3:2 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_ADJUST_REQ_LANEXPLUS1_DRIVE_CURRENT 5:4 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_ADJUST_REQ_LANEXPLUS1_PREEMPHASIS 7:6 /* R-XUF */ + +#define NV_DPCD_TRAINING_SCORE_LANE(i) (0x00000208+(i)) /* R--1A */ +#define NV_DPCD_TRAINING_SCORE_LANE__SIZE 4 /* R---S */ + +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2 (0x0000020C) /* R-XUR */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE(i) i%4*2+1:i%4*2 +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE0 1:0 /* R-XUF */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE1 3:2 /* R-XUF */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE2 5:4 /* R-XUF */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE3 7:6 /* R-XUF */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE4 1:0 /* R-XUF */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE5 3:2 /* R-XUF */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE6 5:4 /* R-XUF */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE7 7:6 /* R-XUF */ + +#define NV_DPCD_EDP_LINK_CONFIG_STATUS (0x0000020c) /* RWXUR */ +#define NV_DPCD_EDP_LINK_CONFIG_STATUS_SET 0:0 /* R-XUF */ +#define NV_DPCD_EDP_LINK_CONFIG_STATUS_SET_LINK_BW (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_LINK_CONFIG_STATUS_SET_LINK_RATE (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_LINK_CONFIG_STATUS_VALID 1:1 /* R-XUF */ +#define NV_DPCD_EDP_LINK_CONFIG_STATUS_VALID_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_LINK_CONFIG_STATUS_VALID_YES (0x00000001) /* R-XUV */ + +// 0020Fh: RESERVED. Read all 0s + +#define NV_DPCD_SYMBOL_ERROR_COUNT_LANEX_BYTE0(i) (0x00000210+(i)*2) /* R--1A */ +#define NV_DPCD_SYMBOL_ERROR_COUNT_LANEX_BYTE0__SIZE 4 /* R---S */ +#define NV_DPCD_SYMBOL_ERROR_COUNT_LANEX_BYTE0_VALUE 7:0 /* R-XUF */ +#define NV_DPCD_SYMBOL_ERROR_COUNT_LANEX_BYTE1(i) (0x00000211+(i)*2) /* R--1A */ +#define NV_DPCD_SYMBOL_ERROR_COUNT_LANEX_BYTE1__SIZE 4 /* R---S */ +#define NV_DPCD_SYMBOL_ERROR_COUNT_LANEX_BYTE1_VALUE 6:0 /* R-XUF */ +#define NV_DPCD_SYMBOL_ERROR_COUNT_LANEX_BYTE1_VALID 7:7 /* R-XUF */ + +#define NV_DPCD_TEST_REQUEST (0x00000218) /* R-XUR */ +#define NV_DPCD_TEST_REQUEST_TEST_LINK_TRAINING 0:0 /* R-XUF */ +#define NV_DPCD_TEST_REQUEST_TEST_LINK_TRAINING_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_LINK_TRAINING_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_PATTERN 1:1 /* R-XUF */ +#define NV_DPCD_TEST_REQUEST_TEST_PATTERN_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_PATTERN_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_EDID_READ 2:2 /* R-XUF */ +#define NV_DPCD_TEST_REQUEST_TEST_EDID_READ_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_EDID_READ_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_PHY_TEST_PATTERN 3:3 /* R-XUF */ +#define NV_DPCD_TEST_REQUEST_TEST_PHY_TEST_PATTERN_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_PHY_TEST_PATTERN_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_TEST_LINK_RATE (0x00000219) /* R-XUR */ +#define NV_DPCD_TEST_LINK_RATE_TYPE 7:0 /* R-XUF */ +#define NV_DPCD_TEST_LINK_RATE_TYPE_1_62G (0x00000006) /* R-XUV */ +#define NV_DPCD_TEST_LINK_RATE_TYPE_2_70G (0x0000000A) /* R-XUV */ +#define NV_DPCD_TEST_LINK_RATE_TYPE_5_40G (0x00000014) /* R-XUV */ +#define NV_DPCD_TEST_LINK_RATE_TYPE_8_10G (0x0000001E) /* R-XUV */ + +// 0021Ah - 0021Fh: RESERVED. Read all 0s + +#define NV_DPCD_TEST_LANE_COUNT (0x00000220) /* R-XUR */ +#define NV_DPCD_TEST_LANE_COUNT_VALUE 4:0 /* R-XUF */ + +#define NV_DPCD_TEST_PATTERN (0x00000221) /* R-XUR */ +#define NV_DPCD_TEST_PATTERN_TYPE 1:0 /* R-XUF */ +#define NV_DPCD_TEST_PATTERN_TYPE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_PATTERN_TYPE_COLOR_RAMPS (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_PATTERN_TYPE_BW_VERTICAL_LINES (0x00000002) /* R-XUV */ +#define NV_DPCD_TEST_PATTERN_TYPE_COLOR_SQUARES (0x00000003) /* R-XUV */ + +#define NV_DPCD_TEST_H_TOTAL_HIGH_BYTE (0x00000222) /* R-XUR */ +#define NV_DPCD_TEST_H_TOTAL_LOW_BYTE (0x00000223) /* R-XUR */ + +#define NV_DPCD_TEST_V_TOTAL_HIGH_BYTE (0x00000224) /* R-XUR */ +#define NV_DPCD_TEST_V_TOTAL_LOW_BYTE (0x00000225) /* R-XUR */ + +#define NV_DPCD_TEST_H_START_HIGH_BYTE (0x00000226) /* R-XUR */ +#define NV_DPCD_TEST_H_START_LOW_BYTE (0x00000227) /* R-XUR */ + +#define NV_DPCD_TEST_V_START_HIGH_BYTE (0x00000228) /* R-XUR */ +#define NV_DPCD_TEST_V_START_LOW_BYTE (0x00000229) /* R-XUR */ + +#define NV_DPCD_TEST_HSYNC_HIGH_BYTE (0x0000022A) /* R-XUR */ +#define NV_DPCD_TEST_HSYNC_HIGH_BYTE_VALUE 6:0 /* R-XUF */ +#define NV_DPCD_TEST_HSYNC_HIGH_BYTE_POLARITY 7:7 /* R-XUF */ +#define NV_DPCD_TEST_HSYNC_LOW_BYTE (0x0000022B) /* R-XUR */ + +#define NV_DPCD_TEST_VSYNC_HIGH_BYTE (0x0000022C) /* R-XUR */ +#define NV_DPCD_TEST_VSYNC_HIGH_BYTE_VALUE 6:0 /* R-XUF */ +#define NV_DPCD_TEST_VSYNC_HIGH_BYTE_POLARITY 7:7 /* R-XUF */ +#define NV_DPCD_TEST_VSYNC_LOW_BYTE (0x0000022D) /* R-XUR */ + +#define NV_DPCD_TEST_H_WIDTH_HIGH_BYTE (0x0000022E) /* R-XUR */ +#define NV_DPCD_TEST_H_WIDTH_LOW_BYTE (0x0000022F) /* R-XUR */ + +#define NV_DPCD_TEST_V_HEIGHT_HIGH_BYTE (0x00000230) /* R-XUR */ +#define NV_DPCD_TEST_V_HEIGHT_LOW_BYTE (0x00000231) /* R-XUR */ + +#define NV_DPCD_TEST_MISC0 (0x00000232) /* R-XUR */ +#define NV_DPCD_TEST_MISC0_TEST_SYNC_CLOCK 0:0 /* R-XUF */ +#define NV_DPCD_TEST_MISC0_TEST_COLOR_FORMAT 2:1 /* R-XUF */ +#define NV_DPCD_TEST_MISC0_TEST_COLOR_FORMAT_RGB (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_COLOR_FORMAT_4_2_2 (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_COLOR_FORMAT_4_4_4 (0x00000002) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_COLOR_FORMAT_RESERVED (0x00000003) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_DYNAMIC_RANGE 3:3 /* R-XUF */ +#define NV_DPCD_TEST_MISC0_TEST_YCBCR_COEFF 4:4 /* R-XUF */ +#define NV_DPCD_TEST_MISC0_TEST_BIT_DEPTH 7:5 /* R-XUF */ +#define NV_DPCD_TEST_MISC0_TEST_BIT_DEPTH_6BITS (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_BIT_DEPTH_8BITS (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_BIT_DEPTH_10BITS (0x00000002) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_BIT_DEPTH_12BITS (0x00000003) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_BIT_DEPTH_16BITS (0x00000004) /* R-XUV */ + +#define NV_DPCD_TEST_MISC1 (0x00000233) /* R-XUR */ +#define NV_DPCD_TEST_MISC1_TEST_REFRESH_DENOMINATOR 0:0 /* R-XUF */ +#define NV_DPCD_TEST_MISC1_TEST_INTERLACED 1:1 /* R-XUF */ + +#define NV_DPCD_TEST_REFRESH_RATE_NUMERATOR (0x00000234) /* R-XUR */ + +// 00235h - 0023Fh: RESERVED for test automation extensions. Reads all 0s + +#define NV_DPCD_TEST_CRC_R_Cr_LOW_BYTE (0x00000240) /* R-XUR */ +#define NV_DPCD_TEST_CRC_R_Cr_HIGH_BYTE (0x00000241) /* R-XUR */ + +#define NV_DPCD_TEST_CRC_G_Y_LOW_BYTE (0x00000242) /* R-XUR */ +#define NV_DPCD_TEST_CRC_G_Y_HIGH_BYTE (0x00000243) /* R-XUR */ + +#define NV_DPCD_TEST_CRC_B_Cb_LOW_BYTE (0x00000244) /* R-XUR */ +#define NV_DPCD_TEST_CRC_B_Cb_HIGH_BYTE (0x00000245) /* R-XUR */ + +#define NV_DPCD_TEST_SINK_MISC (0x00000246) /* R-XUR */ +#define NV_DPCD_TEST_SINK_TEST_CRC_COUNT 3:0 /* R-XUF */ +#define NV_DPCD_TEST_SINK_TEST_CRC_SUPPORTED 5:5 /* R-XUF */ +#define NV_DPCD_TEST_SINK_TEST_CRC_SUPPORTED_NO (0X00000000) /* R-XUV */ +#define NV_DPCD_TEST_SINK_TEST_CRC_SUPPORTED_YES (0X00000001) /* R-XUV */ + +//00247h: RESERVED for test automation extensions. Reads all 0s + +#define NV_DPCD_PHY_TEST_PATTERN (0x00000248) /* R-XUR */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_DP11 1:0 /* R-XUF */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_DP12 2:0 /* R-XUF */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_D10_2 (0x00000001) /* R-XUV */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_SYM_ERR_MEASUREMENT_CNT (0x00000002) /* R-XUV */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_PRBS7 (0x00000003) /* R-XUV */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_80_BIT_CUSTOM (0x00000004) /* R-XUV */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_HBR2_COMPLIANCE_EYE (0x00000005) /* R-XUV */ + +#define NV_DPCD_HBR2_COMPLIANCE_SCRAMBLER_RESET_LOW_BYTE (0x0000024A) /* R-XUV */ +#define NV_DPCD_HBR2_COMPLIANCE_SCRAMBLER_RESET_HIGH_BYTE (0x0000024B) /* R-XUV */ + +// 0024Ch - 0024Fh RESERVED for test automation extensions. Reads all 0s + +#define NV_DPCD_TEST_80BIT_CUSTOM_PATTERN(i) (0x00000250+(i)) /* R--1A */ +#define NV_DPCD_TEST_80BIT_CUSTOM_PATTERN__SIZE 10 /* R---S */ + +// 0025Ah - 0025Fh: RESERVED for test automation extensions. Reads all 0s + +#define NV_DPCD_TEST_RESPONSE (0x00000260) /* RWXUR */ +#define NV_DPCD_TEST_RESPONSE_TEST_ACK 0:0 /* RWXUF */ +#define NV_DPCD_TEST_RESPONSE_TEST_ACK_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_TEST_RESPONSE_TEST_ACK_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_TEST_RESPONSE_TEST_NACK 1:1 /* RWXUF */ +#define NV_DPCD_TEST_RESPONSE_TEST_NACK_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_TEST_RESPONSE_TEST_NACK_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_TEST_RESPONSE_TEST_EDID_CHKSUM_WRITE 2:2 /* RWXUF */ +#define NV_DPCD_TEST_RESPONSE_TEST_EDID_CHKSUM_WRITE_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_TEST_RESPONSE_TEST_EDID_CHKSUM_WRITE_YES (0x00000001) /* RWXUV */ + +#define NV_DPCD_TEST_EDID_CHKSUM (0x00000261) /* RWXUR */ + +// 00263h - 0026Fh: RESERVED for test automation extensions Read all 0s. + +#define NV_DPCD_TEST_SINK (0x00000270) /* RWXUR */ +#define NV_DPCD_TEST_SINK_START 0:0 /* RWXUF */ +#define NV_DPCD_TEST_SINK_START_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_TEST_SINK_START_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_TEST_SINK_PHY_SINK_TEST_LANE_SEL 5:4 /* RWXUF */ +#define NV_DPCD_TEST_SINK_PHY_SINK_TEST_LANE_EN 7:7 /* RWXUF */ +#define NV_DPCD_TEST_SINK_PHY_SINK_TEST_LANE_EN_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD_TEST_SINK_PHY_SINK_TEST_LANE_EN_ENABLE (0x00000001) /* RWXUV */ + +#define NV_DPCD_TEST_AUDIO_MODE (0x00000271) /* R-XUR */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE 3:0 /* R-XUF */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE_32_0KHZ (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE_44_1KHZ (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE_48_0KHZ (0x00000002) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE_88_2KHZ (0x00000003) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE_96_0KHZ (0x00000004) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE_176_4KHZ (0x00000005) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE_192_0KHZ (0x00000006) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT 7:4 /* R-XUF */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_1 (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_2 (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_3 (0x00000002) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_4 (0x00000003) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_5 (0x00000004) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_6 (0x00000005) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_7 (0x00000006) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_8 (0x00000007) /* R-XUV */ + +#define NV_DPCD_TEST_AUDIO_PATTERN (0x00000272) /* R-XUR */ +#define NV_DPCD_TEST_AUDIO_PATTERN_TYPE 7:0 /* R-XUF */ +#define NV_DPCD_TEST_AUDIO_PATTERN_TYPE_OP_DEFINED (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PATTERN_TYPE_SAWTOOTH (0x00000001) /* R-XUV */ + +#define NV_DPCD_TEST_AUDIO_PERIOD_CH(i) (0x00000273+(i)) /* R--1A */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH__SIZE 8 /* R---S */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES 3:0 /* R-XUF */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_UNUSED (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_3 (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_6 (0x00000002) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_12 (0x00000003) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_24 (0x00000004) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_48 (0x00000005) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_96 (0x00000006) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_192 (0x00000007) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_384 (0x00000008) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_768 (0x00000009) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_1536 (0x0000000A) /* R-XUV */ + +// 0027Bh - 0027Fh: RESERVED. Read all 0s + +// For DP version 1.3 and above +#define NV_DPCD_FEC_STATUS (0x00000280) /* R-XUR */ +#define NV_DPCD_FEC_STATUS_DECODE_EN 0:0 /* R-XUF */ +#define NV_DPCD_FEC_STATUS_DECODE_EN_NOT_DETECTED (0x00000000) /* R-XUV */ +#define NV_DPCD_FEC_STATUS_DECODE_EN_DETECTED (0x00000001) /* R-XUV */ +#define NV_DPCD_FEC_STATUS_DECODE_DIS 1:1 /* R-XUF */ +#define NV_DPCD_FEC_STATUS_DECODE_DIS_NOT_DETECTED (0x00000000) /* R-XUV */ +#define NV_DPCD_FEC_STATUS_DECODE_DIS_DETECTED (0x00000001) /* R-XUV */ + + +// 00283h - 002BFh: RESERVED. Read all 0s. + +#define NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS (0x000002C0) /* R-XUR */ +#define NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_UPDATED 0:0 /* R-XUF */ +#define NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_UPDATED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_UPDATED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_ACT_HANDLED 1:1 /* R-XUF */ +#define NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_ACT_HANDLED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_ACT_HANDLED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_VC_PAYLOAD_ID_SLOT0_5_0 7:2 /* R-XUF */ + +#define NV_DPCD_VC_PAYLOAD_ID_SLOT1 (0x000002C1) /* R-XUR */ +#define NV_DPCD_VC_PAYLOAD_ID_SLOT1_VC_PAYLOAD_ID_SLOT0_6 7:7 /* R-XUF */ + +#define NV_DPCD_VC_PAYLOAD_ID_SLOT(i) (0x000002C1+(i)) /* R--1A */ +#define NV_DPCD_VC_PAYLOAD_ID_SLOT__SIZE 63 /* R---S */ + +// Source Device-Specific Field, Burst write for 00300h-0030Bh +// 6 hex digits: 0x300~0x302. +#define NV_DPCD_SOURCE_IEEE_OUI (0x00000300) /* RWXUR */ +#define NV_DPCD_OUI_NVIDIA_LITTLE_ENDIAN 0x4B0400 + +// 6 bytes: 0x303~0x308 +#define NV_DPCD_SOURCE_DEV_ID_STRING(i) (0x00000303+(i)) /* RW-1A */ +#define NV_DPCD_SOURCE_DEV_ID_STRING__SIZE 6 /* RW--S */ + +#define NV_DPCD_SOURCE_HARDWARE_REV (0x00000309) /* RWXUR */ +#define NV_DPCD_SOURCE_HARDWARE_REV_MINOR 3:0 /* RWXUF */ +#define NV_DPCD_SOURCE_HARDWARE_REV_MAJOR 7:4 /* RWXUF */ + +#define NV_DPCD_SOURCE_SOFTWARE_REV_MAJOR (0x0000030A) /* RWXUR */ +#define NV_DPCD_SOURCE_SOFTWARE_REV_MINOR (0x0000030B) /* RWXUR */ + +// Sink Device-Specific Field. Read Only +// 6 hex digits: 0x400~0x402 +#define NV_DPCD_SINK_IEEE_OUI (0x00000400) /* R-XUR */ + +// 6 bytes: 0x403~0x408 +#define NV_DPCD_SINK_DEV_ID_STRING(i) (0x00000403+(i)) /* R--1A */ +#define NV_DPCD_SINK_DEV_ID_STRING__SIZE 6 /* R---S */ + +#define NV_DPCD_SINK_HARDWARE_REV (0x00000409) /* R-XUR */ +#define NV_DPCD_SINK_HARDWARE_REV_MINOR 3:0 /* R-XUF */ +#define NV_DPCD_SINK_HARDWARE_REV_MAJOR 7:4 /* R-XUF */ + +#define NV_DPCD_SINK_SOFTWARE_REV_MAJOR (0x0000040A) /* R-XUR */ +#define NV_DPCD_SINK_SOFTWARE_REV_MINOR (0x0000040B) /* R-XUR */ + +// Branch Device-Specific Field +// 6 hex digits: 0x500~0x502 + +#define NV_DPCD_BRANCH_IEEE_OUI (0x00000500) /* R-XUR */ + +// 6 bytes: 0x503~0x508 +#define NV_DPCD_BRANCH_DEV_ID_STRING (0x00000503+(i)) /* R--1A */ +#define NV_DPCD_BRANCH_DEV_ID_STRING__SIZE 6 /* R---S */ + +#define NV_DPCD_BRANCH_HARDWARE_REV (0x00000509) /* R-XUR */ +#define NV_DPCD_BRANCH_HARDWARE_REV_MINOR 3:0 /* R-XUF */ +#define NV_DPCD_BRANCH_HARDWARE_REV_MAJOR 7:4 /* R-XUF */ + +#define NV_DPCD_BRANCH_SOFTWARE_REV_MAJOR (0x0000050A) /* R-XUR */ +#define NV_DPCD_BRANCH_SOFTWARE_REV_MINOR (0x0000050B) /* R-XUR */ + +// Sink Control Field +#define NV_DPCD_SET_POWER (0x00000600) /* RWXUR */ +#define NV_DPCD_SET_POWER_VAL 2:0 /* RWXUF */ +#define NV_DPCD_SET_POWER_VAL_RESERVED (0x00000000) /* RWXUV */ +#define NV_DPCD_SET_POWER_VAL_D0_NORMAL (0x00000001) /* RWXUV */ +#define NV_DPCD_SET_POWER_VAL_D3_PWRDWN (0x00000002) /* RWXUV */ +#define NV_DPCD_SET_POWER_VAL_D3_AUX_ON (0x00000005) /* RWXUV */ + +/* + * 00601h - 006FFh: RESERVED. Read all 0s + */ + +// * 00700h - 007FFh: RESERVED for eDP, see eDP v1.4 and above +#define NV_DPCD_EDP_REV (0x00000700) /* R-XUR */ +#define NV_DPCD_EDP_REV_VAL 7:0 /* R-XUF */ +#define NV_DPCD_EDP_REV_VAL_1_1_OR_LOWER (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_REV_VAL_1_2 (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_REV_VAL_1_3 (0x00000002) /* R-XUV */ +#define NV_DPCD_EDP_REV_VAL_1_4 (0x00000003) /* R-XUV */ +#define NV_DPCD_EDP_REV_VAL_1_4A (0x00000004) /* R-XUV */ +#define NV_DPCD_EDP_REV_VAL_1_4B (0x00000005) /* R-XUV */ +#define NV_DPCD_EDP_REV_VAL_1_5 (0x00000006) /* R-XUV */ +#define NV_DPCD_EDP_REV_VAL_1_5A (0x00000006) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1 (0x00000701) /* R-XUR */ +#define NV_DPCD_EDP_GENERAL_CAP1_TCON_BKLGHT_ADJUST_CAP 0:0 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_TCON_BKLGHT_ADJUST_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_TCON_BKLGHT_ADJUST_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_BKLGHT_PIN_EN_CAP 1:1 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_BKLGHT_PIN_EN_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_BKLGHT_PIN_EN_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_BKLGHT_AUX_EN_CAP 2:2 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_BKLGHT_AUX_EN_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_BKLGHT_AUX_EN_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_PANEL_SELF_TEST_PIN_EN_CAP 3:3 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_PANEL_SELF_TEST_PIN_EN_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_PANEL_SELF_TEST_PIN_EN_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_PANEL_SELF_TEST_AUX_EN_CAP 4:4 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_PANEL_SELF_TEST_AUX_EN_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_PANEL_SELF_TEST_AUX_EN_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_FRC_EN_CAP 5:5 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_FRC_EN_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_FRC_EN_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_COLOR_ENGINE_CAP 6:6 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_COLOR_ENGINE_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_COLOR_ENGINE_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_SET_POWER_CAP 7:7 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_SET_POWER_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_SET_POWER_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP (0x00000702) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_PWM_PIN_CAP 0:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_PWM_PIN_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_PWM_PIN_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_AUX_SET_CAP 1:1 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_AUX_SET_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_AUX_SET_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_AUX_BYTE_CNT 2:2 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_AUX_BYTE_CNT_2B (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_AUX_BYTE_CNT_1B (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_AUX_PWM_PRODUCT_CAP 3:3 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_AUX_PWM_PRODUCT_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_AUX_PWM_PRODUCT_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_FREQ_PWM_PIN_PASSTHRU_CAP 4:4 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_FREQ_PWM_PIN_PASSTHRU_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_FREQ_PWM_PIN_PASSTHRU_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_FREQ_AUX_SET_CAP 5:5 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_FREQ_AUX_SET_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_FREQ_AUX_SET_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_DYNAMIC_BKLGHT_CAP 6:6 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_DYNAMIC_BKLGHT_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_DYNAMIC_BKLGHT_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_VBLANK_BKLGHT_UPDATE_CAP 7:7 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_VBLANK_BKLGHT_UPDATE_CAP_VBL (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_VBLANK_BKLGHT_UPDATE_CAP_IMM (0x00000000) /* R-XUV */ +#define NV_DPCP_EDP_GENERAL_CAP2 (0x00000703) /* R-XUR */ +#define NV_DPCD_EDP_GENERAL_CAP2_OVERDRIVE_ENGINE_CAP 0:0 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP2_OVERDRIVE_ENGINE_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP2_OVERDRIVE_ENGINE_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP2_BKLGHT_BRIGHT_BIT_ALIGNMENT 2:1 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP2_BKLGHT_BRIGHT_BIT_ALIGNMENT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP2_BKLGHT_BRIGHT_BIT_ALIGNMENT_MSB (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP2_BKLGHT_BRIGHT_BIT_ALIGNMENT_LSB (0x00000002) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP2_OVERDRIVE_CONTROL_CAP 3:3 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP2_OVERDRIVE_CONTROL_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP2_OVERDRIVE_CONTROL_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP2_PANEL_LUMINANCE_CONTROL_CAP 4:4 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP2_PANEL_LUMINANCE_CONTROL_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP2_PANEL_LUMINANCE_CONTROL_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP2_VARIABLE_BKLGHT_CONTROL_CAP 5:5 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP2_VARIABLE_BKLGHT_CONTROL_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP2_VARIABLE_BKLGHT_CONTROL_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP3 (0x00000704) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP3_X_REGION_CAP 3:0 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP3_X_REGION_CAP_NOT_SUPPORTED (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP3_Y_REGION_CAP 7:4 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP3_Y_REGION_CAP_NOT_SUPPORTED (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_DISPLAY_CTL (0x00000720) /* RWXUR */ +#define NV_DPCD_EDP_DISPLAY_CTL_BKLGHT_EN 0:0 /* RWXUF */ +#define NV_DPCD_EDP_DISPLAY_CTL_BKLGHT_EN_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_BKLGHT_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_BKLGHT_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_BLACK_VIDEO_EN 1:1 /* RWXUF */ +#define NV_DPCD_EDP_DISPLAY_CTL_BLACK_VIDEO_EN_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_BLACK_VIDEO_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_BLACK_VIDEO_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_FRC_EN 2:2 /* RWXUF */ +#define NV_DPCD_EDP_DISPLAY_CTL_FRC_EN_2BIT (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_COLOR_ENGINE_EN 3:3 /* RWXUF */ +#define NV_DPCD_EDP_DISPLAY_CTL_COLOR_ENGINE_EN_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_COLOR_ENGINE_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_COLOR_ENGINE_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_OVERDRIVE_CTL 5:4 /* RWXUF */ +#define NV_DPCD_EDP_DISPLAY_CTL_OVERDRIVE_CTL_AUTONOMOUS (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_OVERDRIVE_CTL_AUTONOMOUS_1 (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_OVERDRIVE_CTL_DISABLE (0x00000002) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_OVERDRIVE_CTL_ENABLE (0x00000003) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_VARIABLE_BKLGHT_CTRL_DISABLE 6:6 /* RWXUF */ +#define NV_DPCD_EDP_DISPLAY_CTL_VARIABLE_BKLGHT_CTRL_DISABLE_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_VARIABLE_BKLGHT_CTRL_DISABLE_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_VBLANK_BKLGHT_UPDATE_EN 7:7 /* RWXUF */ +#define NV_DPCD_EDP_DISPLAY_CTL_VBLANK_BKLGHT_UPDATE_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_VBLANK_BKLGHT_UPDATE_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET (0x00000721) /* RWXUR */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_BRIGHT_CTL_MODE 1:0 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_BRIGHT_CTL_MODE_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_BRIGHT_CTL_MODE_PWM_PIN (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_BRIGHT_CTL_MODE_PRESET_LV (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_BRIGHT_CTL_MODE_AUX (0x00000002) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_BRIGHT_CTL_MODE_PWM_AND_AUX (0x00000003) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_PWM_PIN_PASSTHRU_EN 2:2 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_PWM_PIN_PASSTHRU_EN_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_PWM_PIN_PASSTHRU_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_PWM_PIN_PASSTHRU_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_AUX_SET_EN 3:3 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_AUX_SET_EN_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_AUX_SET_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_AUX_SET_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_DYNAMIC_BKLGHT_EN 4:4 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_DYNAMIC_BKLGHT_EN_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_DYNAMIC_BKLGHT_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_DYNAMIC_BKLGHT_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_REGIONAL_BKLGHT_EN 5:5 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_REGIONAL_BKLGHT_EN_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_REGIONAL_BKLGHT_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_REGIONAL_BKLGHT_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_UPDATE_REGION_BRIGHTNESS 6:6 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_UPDATE_REGION_BRIGHTNESS_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_UPDATE_REGION_BRIGHTNESS_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_PANEL_LUMINANCE_CONTROL_ENABLE 7:7 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_PANEL_LUMINANCE_CONTROL_ENABLE_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_PANEL_LUMINANCE_CONTROL_ENABLE_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_BRIGHTNESS_MSB (0x00000722) /* RWXUR */ +#define NV_DPCD_EDP_BKLGHT_BRIGHTNESS_MSB_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_BRIGHTNESS_MSB_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_BRIGHTNESS_LSB (0x00000723) /* RWXUR */ +#define NV_DPCD_EDP_BKLGHT_BRIGHTNESS_LSB_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_BRIGHTNESS_LSB_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT (0x00000724) /* RWXUR */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_VAL 4:0 /* RWXUF */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_CAP_MIN (0x00000725) /* R-XUR */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_CAP_MIN_VAL 4:0 /* R-XUF */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_CAP_MIN_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_CAP_MAX (0x00000726) /* R-XUR */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_CAP_MAX_VAL 4:0 /* R-XUF */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_CAP_MAX_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_CTL_STATUS (0x00000727) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_CTL_STATUS_FAULT_CONDITION 0:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_CTL_STATUS_FAULT_CONDITION_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_CTL_STATUS_FAULT_CONDITION_FAULT (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_CTL_STATUS_FAULT_CONDITION_NORMAL (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_CTL_STATUS_VARIABLE_BKLGHT_STATUS 1:1 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_CTL_STATUS_VARIABLE_BKLGHT_STATUS_DISABLED (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_CTL_STATUS_VARIABLE_BKLGHT_STATUS_ENABLED (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_FREQ_SET (0x00000728) /* RWXUR */ +#define NV_DPCD_EDP_BKLGHT_FREQ_SET_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_FREQ_SET_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_MSB (0x0000072A) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_MSB_VAL 7:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_MSB_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_MID (0x0000072B) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_MID_VAL 7:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_MID_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_LSB (0x0000072C) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_LSB_VAL 1:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_LSB_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_MSB (0x0000072D) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_MSB_VAL 7:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_MSB_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_MID (0x0000072E) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_MID_VAL 7:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_MID_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_LSB (0x0000072F) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_LSB_VAL 1:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_LSB_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_DBC_MINIMUM_BRIGHTNESS_SET (0x00000732) /* RWXUR */ +#define NV_DPCD_EDP_DBC_MINIMUM_BRIGHTNESS_SET_VAL 4:0 /* RWXUF */ +#define NV_DPCD_EDP_DBC_MINIMUM_BRIGHTNESS_SET_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DBC_MAXIMUM_BRIGHTNESS_SET (0x00000733) /* RWXUR */ +#define NV_DPCD_EDP_DBC_MAXIMUM_BRIGHTNESS_CAP_VAL 4:0 /* RWXUF */ +#define NV_DPCD_EDP_DBC_MAXIMUM_BRIGHTNESS_CAP_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_PANEL_TARGET_LUMINANCE_LSB (0x00000734) /* RWXUR */ +#define NV_DPCD_PANEL_TARGET_LUMINANCE_LSB_VAL 7:0 /* RWXUF */ +#define NV_DPCD_PANEL_TARGET_LUMINANCE_LSB_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_PANEL_TARGET_LUMINANCE_MID (0x00000735) /* RWXUR */ +#define NV_DPCD_PANEL_TARGET_LUMINANCE_MID_VAL 7:0 /* RWXUF */ +#define NV_DPCD_PANEL_TARGET_LUMINANCE_MID_VAL_INIT 0x00000000) /* RWXUV */ +#define NV_DPCD_PANEL_TARGET_LUMINANCE_MSB (0x00000736) /* RWXUR */ +#define NV_DPCD_PANEL_TARGET_LUMINANCE_MSB_VAL 7:0 /* RWXUF */ +#define NV_DPCD_PANEL_TARGET_LUMINANCE_MSB_VAL_INIT 0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BKLGHT_BASE (0x00000740) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BKLGHT_BASE_INDEX_OFFSET_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BKLGHT_BASE_INDEX_OFFSET_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_0 (0x00000741) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_0_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_0_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_1 (0x00000742) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_1_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_1_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_2 (0x00000743) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_2_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_2_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_3 (0x00000744) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_3_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_3_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_4 (0x00000745) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_4_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_4_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_5 (0x00000746) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_5_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_5_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_6 (0x00000747) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_6_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_6_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_7 (0x00000748) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_7_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_7_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_8 (0x00000749) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_8_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_8_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_9 (0x0000074A) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_9_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_9_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_10 (0x0000074B) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_10_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_10_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_11 (0x0000074C) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_11_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_11_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_12 (0x0000074D) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_12_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_12_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_13 (0x0000074E) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_13_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_13_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_14 (0x0000074F) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_14_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_14_VAL_INIT (0x00000000) /* RWXUV */ + +/* + * 00800h - 00FFFh: RESERVED. Read all 0s + */ + +// Sideband MSG Buffers +#define NV_DPCD_MBOX_DOWN_REQ (0x00001000) /* RWXUR */ +#define NV_DPCD_MBOX_UP_REP (0x00001200) /* RWXUR */ +#define NV_DPCD_MBOX_DOWN_REP (0x00001400) /* R-XUR */ +#define NV_DPCD_MBOX_UP_REQ (0x00001600) /* R-XUR */ + +// 0x2000 & 0x2001 : RESERVED for USB-over-AUX + +// ESI (Event Status Indicator) Field +#define NV_DPCD_SINK_COUNT_ESI (0x00002002) /* R-XUR */ +#define NV_DPCD_SINK_COUNT_ESI_SINK_COUNT 5:0 /* R-XUF */ +#define NV_DPCD_SINK_COUNT_ESI_CP_READY 6:6 /* R-XUF */ +#define NV_DPCD_SINK_COUNT_ESI_CP_READY_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_SINK_COUNT_ESI_CP_READY_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0 (0x00002003) /* R-XUR */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_REMOTE_CTRL 0:0 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_REMOTE_CTRL_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_REMOTE_CTRL_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_AUTO_TEST 1:1 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_AUTO_TEST_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_AUTO_TEST_YES (0x00000001) /* R-XUV */ +// for eDP v1.4 & v1.4a only +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_TOUCH_IRQ 1:1 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_TOUCH_IRQ_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_TOUCH_IRQ_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_CP 2:2 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_CP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_CP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_MCCS_IRQ 3:3 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_MCCS_IRQ_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_MCCS_IRQ_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_DOWN_REP_MSG_RDY 4:4 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_DOWN_REP_MSG_RDY_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_DOWN_REP_MSG_RDY_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_UP_REQ_MSG_RDY 5:5 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_UP_REQ_MSG_RDY_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_UP_REQ_MSG_RDY_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_SINK_SPECIFIC_IRQ 6:6 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_SINK_SPECIFIC_IRQ_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_SINK_SPECIFIC_IRQ_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI1 (0x00002004) /* R-XUR */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI1_RX_GTC_MSTR_REQ_STATUS_CHANGE 0:0 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI1_RX_GTC_MSTR_REQ_STATUS_CHANGE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI1_RX_GTC_MSTR_REQ_STATUS_CHANGE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI1_PANEL_REPLAY_ERROR_STATUS 3:3 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI1_PANEL_REPLAY_ERROR_STATUS_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI1_PANEL_REPLAY_ERROR_STATUS_YES (0x00000001) /* R-XUV */ + + +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0 (0x00002005) /* R-XUR */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_RX_CAP_CHANGED 0:0 /* R-XUF */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_RX_CAP_CHANGED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_RX_CAP_CHANGED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_LINK_STATUS_CHANGED 1:1 /* R-XUF */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_LINK_STATUS_CHANGED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_LINK_STATUS_CHANGED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_STREAM_STATUS_CHANGED 2:2 /* R-XUF */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_STREAM_STATUS_CHANGED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_STREAM_STATUS_CHANGED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_HDMI_LINK_STATUS_CHANGED 3:3 /* R-XUF */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_HDMI_LINK_STATUS_CHANGED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_HDMI_LINK_STATUS_CHANGED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_CONNECTED_OFF_ENTRY_REQ 4:4 /* R-XUF */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_CONNECTED_OFF_ENTRY_REQ_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_CONNECTED_OFF_ENTRY_REQ_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS (0x00002006) /* R-XUR */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_LINK_CRC_ERR 0:0 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_LINK_CRC_ERR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_LINK_CRC_ERR_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_RFB_ERR 1:1 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_RFB_ERR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_RFB_ERR_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_VSC_SDP_ERR 2:2 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_VSC_SDP_ERR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_VSC_SDP_ERR_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_PANEL_SELF_REFRESH_EVENT_STATUS (0x00002007) /* R-XUR */ +#define NV_DPCD_PANEL_SELF_REFRESH_EVENT_STATUS_CAP_CHANGE 0:0 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_EVENT_STATUS_CAP_CHANGE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_EVENT_STATUS_CAP_CHANGE_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS (0x00002008) /* R-XUR */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL 2:0 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_INACTIVE (0x00000000) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_TRANSITION_TO_ACTIVE (0x00000001) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_ACTIVE_DISP_FROM_RFB (0x00000002) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_ACTIVE_SINK_DEV_TIMING (0x00000003) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_TRANSITION_TO_INACTIVE (0x00000004) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_RESERVED0 (0x00000005) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_RESERVED1 (0x00000006) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_SINK_DEV_INTERNAL_ERR (0x00000007) /* R-XUV */ + +#define NV_DPCD_PANEL_SELF_REFRESH_DEBUG0 (0x00002009) /* R-XUR */ +#define NV_DPCD_PANEL_SELF_REFRESH_DEBUG0_MAX_RESYNC_FRAME_CNT 3:0 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_DEBUG0_LAST_RESYNC_FRAME_CNT 7:4 /* R-XUF */ + +#define NV_DPCD_PANEL_SELF_REFRESH_DEBUG1 (0x0000200A) /* R-XUR */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP (0x0000200A) /* R-XUR */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP_PSR_STATE_BIT 0:0 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP_RFB_BIT 1:1 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP_CRC_VALID_BIT 2:2 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP_SU_VALID_BIT 3:3 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP_SU_FIRST_LINE_RCVD 4:4 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP_SU_LAST_LINE_RCVD 5:5 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP_Y_CORD_VALID 6:6 /* R-XUF */ + +// 0200Bh: RESERVED. Read all 0s + +#define NV_DPCD_LANE0_1_STATUS_ESI (0x0000200C) /* R-XUR */ +#define NV_DPCD_LANE2_3_STATUS_ESI (0x0000200D) /* R-XUR */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_CR_DONE 0:0 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_CR_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_CR_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_CHN_EQ_DONE 1:1 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_CHN_EQ_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_CHN_EQ_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_SYMBOL_LOCKED 2:2 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_SYMBOL_LOCKED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_SYMBOL_LOCKED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_CR_DONE 4:4 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_CR_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_CR_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_CHN_EQ_DONE 5:5 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_CHN_EQ_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_CHN_EQ_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_SYMBOL_LOCKED 6:6 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_SYMBOL_LOCKED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_SYMBOL_LOCKED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI (0x0000200E) /* R-XUR */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_INTERLANE_ALIGN_DONE 0:0 /* R-XUF */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_INTERLANE_ALIGN_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_INTERLANE_ALIGN_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_DOWNSTRM_PORT_STATUS_DONE 6:6 /* R-XUF */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_DOWNSTRM_PORT_STATUS_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_DOWNSTRM_PORT_STATUS_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_LINK_STATUS_UPDATED 7:7 /* R-XUF */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_LINK_STATUS_UPDATED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_LINK_STATUS_UPDATED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_SINK_STATUS_ESI (0x0000200F) /* R-XUR */ +#define NV_DPCD_SINK_STATUS_ESI_RECEIVE_PORT_0_STATUS 0:0 /* R-XUF */ +#define NV_DPCD_SINK_STATUS_ESI_RECEIVE_PORT_0_STATUS_IN_SYNC_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_SINK_STATUS_ESI_RECEIVE_PORT_0_STATUS_IN_SYNC_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_SINK_STATUS_ESI_RECEIVE_PORT_1_STATUS 1:1 /* R-XUF */ +#define NV_DPCD_SINK_STATUS_ESI_RECEIVE_PORT_1_STATUS_IN_SYNC_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_SINK_STATUS_ESI_RECEIVE_PORT_1_STATUS_IN_SYNC_YES (0x00000001) /* R-XUV */ + +// 0x00002010-0x0002025: RESERVED. Read all 0s +#define NV_DPCD_OVERDRIVE_STATUS (0x00002026) /* R-XUR */ +#define NV_DPCD_OVERDRIVE_STATUS_OVERDRIVE_ENGINE_STATUS 0:0 /* R-XUF */ +#define NV_DPCD_OVERDRIVE_STATUS_OVERDRIVE_ENGINE_STATUS_NOT_ACTIVE (0x00000000) /* R-XUV */ +#define NV_DPCD_OVERDRIVE_STATUS_OVERDRIVE_ENGINE_STATUS_ACTIVE (0x00000001) /* R-XUV */ +// 0x00002027-0x00067FF: RESERVED. Read all 0s + +#define NV_DPCD_HDCP_BKSV_OFFSET (0x00068000) /* R-XUR */ +#define NV_DPCD_HDCP_RPRIME_OFFSET (0x00068005) /* R-XUR */ +#define NV_DPCD_HDCP_AKSV_OFFSET (0x00068007) /* RWXUR */ +#define NV_DPCD_HDCP_AN_OFFSET (0x0006800C) /* RWXUR */ +#define NV_DPCD_HDCP_BKSV_S_OFFSET (0x00000300) /* RWXUV */ +#define NV_DPCD_HDCP_RPRIME_S_OFFSET (0x00000305) /* RWXUV */ +#define NV_DPCD_HDCP_AKSV_S_OFFSET (0x00000307) /* RWXUV */ +#define NV_DPCD_HDCP_AN_S_OFFSET (0x0000030c) /* RWXUV */ +#define NV_DPCD_HDCP_VPRIME_OFFSET (0x00068014) /* R-XUR */ +#define NV_DPCD_HDCP_BCAPS_OFFSET (0x00068028) /* R-XUR */ +#define NV_DPCD_HDCP_BCAPS_OFFSET_HDCP_CAPABLE 0:0 /* R-XUF */ +#define NV_DPCD_HDCP_BCAPS_OFFSET_HDCP_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BCAPS_OFFSET_HDCP_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP_BCAPS_OFFSET_HDCP_REPEATER 1:1 /* R-XUF */ +#define NV_DPCD_HDCP_BCAPS_OFFSET_HDCP_REPEATER_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BCAPS_OFFSET_HDCP_REPEATER_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_OFFSET (0x00068029) /* R-XUR */ +#define NV_DPCD_HDCP_BSTATUS_REAUTHENTICATION_REQUESET 3:3 /* R-XUF */ +#define NV_DPCD_HDCP_BSTATUS_REAUTHENTICATION_REQUESET_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_REAUTHENTICATION_REQUESET_TRUE (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_LINK_INTEGRITY_FAILURE 2:2 /* R-XUF */ +#define NV_DPCD_HDCP_BSTATUS_LINK_INTEGRITY_FAILURE_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_LINK_INTEGRITY_FAILURE_TRUE (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_RPRIME_AVAILABLE 1:1 /* R-XUF */ +#define NV_DPCD_HDCP_BSTATUS_RPRIME_AVAILABLE_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_RPRIME_AVAILABLE_TRUE (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_READY 0:0 /* R-XUF */ +#define NV_DPCD_HDCP_BSTATUS_READY_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_READY_TRUE (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP_BINFO_OFFSET (0x0006802A) /* R-XUR */ +#define NV_DPCD_HDCP_BINFO_OFFSET_DEVICE_COUNT 6:0 /* R-XUF */ +#define NV_DPCD_HDCP_BINFO_OFFSET_MAX_DEVS_EXCEEDED 7:7 /* R-XUF */ +#define NV_DPCD_HDCP_BINFO_OFFSET_MAX_DEVS_EXCEEDED_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BINFO_OFFSET_MAX_DEVS_EXCEEDED_TRUE (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP_BINFO_OFFSET_DEPTH 10:8 /* R-XUF */ +#define NV_DPCD_HDCP_BINFO_OFFSET_MAX_CASCADE_EXCEEDED 11:11 /* R-XUF */ +#define NV_DPCD_HDCP_BINFO_OFFSET_MAX_CASCADE_EXCEEDED_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BINFO_OFFSET_MAX_CASCADE_EXCEEDED_TRUE (0x00000001) /* R-XUV */ + +#define NV_DPCD_HDCP_KSV_FIFO_OFFSET (0x0006802C) /* R-XUR */ + +#define NV_DPCD_HDCP_AINFO_OFFSET (0x0006803B) /* RWXUR */ +#define NV_DPCD_HDCP_AINFO_OFFSET_REAUTHENTICATION_ENABLE_IRQ_HPD 0:0 /* RWXUF */ +#define NV_DPCD_HDCP_AINFO_OFFSET_REAUTHENTICATION_ENABLE_IRQ_HPD_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_HDCP_AINFO_OFFSET_REAUTHENTICATION_ENABLE_IRQ_HPD_YES (0x00000001) /* RWXUV */ + +// Eight-Lane DP Specific DPCD defines +#define NV_DPCD_SL_TRAINING_LANE0_1_SET2(baseAddr) (baseAddr + 0x0000010E) /* RWXUR */ +#define NV_DPCD_SL_TRAINING_LANE2_3_SET2(baseAddr) (baseAddr + 0x0000010F) /* RWXUR */ +#define NV_DPCD_SL_LANE4_5_STATUS(baseAddr) (baseAddr + 0x00000202) /* R-XUR */ +#define NV_DPCD_SL_LANE6_7_STATUS(baseAddr) (baseAddr + 0x00000203) /* R-XUR */ +#define NV_DPCD_DUAL_DP_CAP (0x000003B0) /* RWXUR */ // Dual DP Capability Register +#define NV_DPCD_DUAL_DP_CAP_DDC 0:0 /* RWXUF */ // Dual DP Capability +#define NV_DPCD_DUAL_DP_CAP_DDC_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD_DUAL_DP_CAP_DDC_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD_DUAL_DP_CAP_DDCIC 1:1 /* RWXUF */ // DDCIC : Dual DP Column Interleave Mode Capability +#define NV_DPCD_DUAL_DP_CAP_DDCIC_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD_DUAL_DP_CAP_DDCIC_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD_DUAL_DP_CAP_DDPSBSC 2:2 /* RWXUF */ // DDPSBSC : Dual DP Pixel Side-by-Side Mode Capability +#define NV_DPCD_DUAL_DP_CAP_DDPSBSC_DISBALE (0x00000000) /* RWXUV */ +#define NV_DPCD_DUAL_DP_CAP_DDPSBSC_ENABLE (0x00000001) /* RWXUV */ + +#define NV_DPCD_DUAL_DP_BASE_ADDRESS 19:0 /* RWXUF */ +#define NV_DPCD_DUAL_DP_COLUMN_WIDTH 15:0 /* RWXUF */ +#define NV_DPCD_DUAL_DP_MAX_LANECOUNT 4:0 /* RWXUF */ +#define NV_DPCD_DUAL_DP_MAX_LANECOUNT_1H 0x1 /* RWXUV */ +#define NV_DPCD_DUAL_DP_MAX_LANECOUNT_2H 0x2 /* RWXUV */ +#define NV_DPCD_DUAL_DP_MAX_LANECOUNT_4H 0x4 /* RWXUV */ +#define NV_DPCD_DUAL_DP_MAX_LANECOUNT_8H 0x8 /* RWXUV */ + +#define NV_DPCD_DUAL_DP_DUAL_LINK_CONTROL(baseAddr) (baseAddr + 0x00000110) /* RWXUR */ // Dual Link Control Register +#define NV_DPCD_DUAL_DP_DUAL_LINK_CONTROL_PIX_MODE 1:0 /* RWXUF */ // PIX_MODE : Pixel mode select +#define NV_DPCD_DUAL_DP_DUAL_LINK_CONTROL_PIX_MODE_SIDE_BY_SIDE (0x00000000) /* RWXUV */ // Side by side Mode enabled +#define NV_DPCD_DUAL_DP_DUAL_LINK_CONTROL_PIX_MODE_COL_INTERLEAVE (0x00000001) /* RWXUV */ // Column Interleave Mode enabled +#define NV_DPCD_DUAL_DP_DUAL_LINK_CONTROL_DD_ENABLE 7:7 /* RWXUF */ // DD_ENABLE: Enable Dual DP mode. +#define NV_DPCD_DUAL_DP_DUAL_LINK_CONTROL_DD_ENABLE_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD_DUAL_DP_DUAL_LINK_CONTROL_DD_ENABLE_FALSE (0x00000000) /* RWXUV */ + +#define NV_DPCD_DUAL_DP_PIXEL_OVERLAP(baseAddr) (baseAddr + 0x00000111) /* RWXUR */ // PIXEL_OVERLAP Register +#define NV_DPCD_DUAL_DP_PIXEL_OVERLAP_IGNORE_PIX_COUNT 6:0 /* RWXUF */ // Ignore Pix Count - Number of pixels to ignore + +#define NV_DPCD_HDCP22_BCAPS_OFFSET (0x0006921D) /* R-XUR */ +#define NV_DPCD_HDCP22_BCAPS_SIZE (0x00000003) /* R---S */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_HDCP_REPEATER 0:0 /* R-XUF */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_HDCP_REPEATER_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_HDCP_REPEATER_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_HDCP_CAPABLE 1:1 /* R-XUF */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_HDCP_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_HDCP_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_RECEIVER_CAPABILITY_MASK 15:2 /* R-XUF */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_RECEIVER_CAPABILITY_MASK_RESERVED (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_VERSION 23:16 /* R-XUF */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_VERSION_22 (0x00000002) /* R-XUV */ + +#define NV_DPCD_HDCP22_BINFO_OFFSET (0x00069330) /* R-XUR */ +#define NV_DPCD_HDCP22_BINFO_SIZE (0x00000002) /* R---S */ + +#define NV_DPCD_HDCP22_RX_STATUS (0x00069493) /* R-XUR */ +#define NV_DPCD_HDCP22_RX_STATUS_SIZE (0x00000001) /* R---S */ +#define NV_DPCD_HDCP22_RX_STATUS_READY 0:0 /* R-XUF */ +#define NV_DPCD_HDCP22_RX_STATUS_READY_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_READY_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_HPRIME_AVAILABLE 1:1 /* R-XUF */ +#define NV_DPCD_HDCP22_RX_STATUS_HPRIME_AVAILABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_HPRIME_AVAILABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_PAIRING_AVAILABLE 2:2 /* R-XUF */ +#define NV_DPCD_HDCP22_RX_STATUS_PAIRING_AVAILABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_PAIRING_AVAILABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_REAUTH_REQUEST 3:3 /* R-XUF */ +#define NV_DPCD_HDCP22_RX_STATUS_REAUTH_REQUEST_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_REAUTH_REQUEST_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_LINK_INTEGRITY_FAILURE 4:4 /* R-XUF */ +#define NV_DPCD_HDCP22_RX_STATUS_LINK_INTEGRITY_FAILURE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_LINK_INTEGRITY_FAILURE_NO (0x00000000) /* R-XUV */ + +#define NV_DPCD_HDCP22_RTX_OFFSET (0x00069000) /* RWXUR */ +#define NV_DPCD_HDCP22_RTX_SIZE (0x00000008) /* R---S */ + +#define NV_DPCD_HDCP22_TXCAPS_OFFSET (0x00069008) /* RWXUR */ +#define NV_DPCD_HDCP22_TXCAPS_SIZE (0x00000003) /* R---S */ + +#define NV_DPCD_HDCP22_CERTRX (0x0006900B) /* R-XUR */ +#define NV_DPCD_HDCP22_CERTRX_SIZE (0x0000020A) /* R---S */ + +#define NV_DPCD_HDCP22_RRX (0x00069215) /* R-XUR */ +#define NV_DPCD_HDCP22_RRX_SIZE (0x00000008) /* R---S */ + +#endif // #ifndef _DPCD_H_ diff --git a/src/common/inc/displayport/dpcd14.h b/src/common/inc/displayport/dpcd14.h new file mode 100644 index 0000000..9a5a766 --- /dev/null +++ b/src/common/inc/displayport/dpcd14.h @@ -0,0 +1,748 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DISPLAYPORT14_H_ +#define _DISPLAYPORT14_H_ + +#define NV_DPCD14_GUID_2 (0x00000040) /* R-XUR */ + +#define NV_DPCD14_EXTEND_CAP_BASE (0x00002200) + +#define NV_DPCD14_MAX_LINK_BANDWIDTH (0x00000001) /* R-XUR */ +#define NV_DPCD14_MAX_LINK_BANDWIDTH_VAL 7:0 /* R-XUF */ +#define NV_DPCD14_MAX_LINK_BANDWIDTH_VAL_8_10_GBPS (0x0000001E) /* R-XUV */ + +#define NV_DPCD14_MAX_DOWNSPREAD (0x00000003) /* R-XUR */ +#define NV_DPCD14_MAX_DOWNSPREAD_TPS4_SUPPORTED 7:7 /* R-XUF */ +#define NV_DPCD14_MAX_DOWNSPREAD_TPS4_SUPPORTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_MAX_DOWNSPREAD_TPS4_SUPPORTED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL (0x0000000E) /* R-XUR */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_EXTENDED_RX_CAP 7:7 /* R-XUF */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_EXTENDED_RX_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_EXTENDED_RX_CAP_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_DSC_SUPPORT (0x00000060) /* R-XUR */ +#define NV_DPCD14_DSC_SUPPORT_DECOMPRESSION 0:0 /* R-XUF */ +#define NV_DPCD14_DSC_SUPPORT_DECOMPRESSION_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SUPPORT_DECOMPRESSION_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_DSC_ALGORITHM_REVISION (0x00000061) /* R-XUR */ +#define NV_DPCD14_DSC_ALGORITHM_REVISION_MAJOR 3:0 /* R-XUF */ +#define NV_DPCD14_DSC_ALGORITHM_REVISION_MINOR 7:4 /* R-XUF */ + +#define NV_DPCD14_DSC_RC_BUFFER_BLOCK (0x00000062) /* R-XUR */ +#define NV_DPCD14_DSC_RC_BUFFER_BLOCK_SIZE 1:0 /* R-XUF */ +#define NV_DPCD14_DSC_RC_BUFFER_BLOCK_SIZE_1KB (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_RC_BUFFER_BLOCK_SIZE_4KB (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_RC_BUFFER_BLOCK_SIZE_16KB (0x00000002) /* R-XUV */ +#define NV_DPCD14_DSC_RC_BUFFER_BLOCK_SIZE_64KB (0x00000003) /* R-XUV */ + +#define NV_DPCD14_DSC_RC_BUFFER (0x00000063) /* R-XUR */ +#define NV_DPCD14_DSC_RC_BUFFER_SIZE 7:0 /* R-XUF */ + +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1 (0x00000064) /* R-XUR */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_1 0:0 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_1_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_1_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_2 1:1 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_2_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_2_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_4 3:3 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_4_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_4_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_6 4:4 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_6_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_6_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_8 5:5 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_8_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_8_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_10 6:6 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_10_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_10_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_12 7:7 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_12_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_12_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_DSC_LINE_BUFFER (0x00000065) /* R-XUR */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH 3:0 /* R-XUF */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_9 (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_10 (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_11 (0x00000002) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_12 (0x00000003) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_13 (0x00000004) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_14 (0x00000005) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_15 (0x00000006) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_16 (0x00000007) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_8 (0x00000008) /* R-XUV */ + +#define NV_DPCD14_DSC_BLOCK_PREDICTION (0x00000066) /* R-XUR */ +#define NV_DPCD14_DSC_BLOCK_PREDICTION_SUPPORT 0:0 /* R-XUF */ +#define NV_DPCD14_DSC_BLOCK_PREDICTION_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_BLOCK_PREDICTION_SUPPORT_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_DSC_MAXIMUM_BITS_PER_PIXEL_1 (0x00000067) /* R-XUR */ +#define NV_DPCD14_DSC_MAXIMUM_BITS_PER_PIXEL_1_LSB 7:0 /* R-XUF */ + +#define NV_DPCD14_DSC_MAXIMUM_BITS_PER_PIXEL_2 (0x00000068) /* R-XUR */ +#define NV_DPCD14_DSC_MAXIMUM_BITS_PER_PIXEL_2_MSB 1:0 /* R-XUF */ + +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES (0x00000069) /* R-XUR */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_RGB 0:0 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_RGB_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_RGB_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_444 1:1 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_444_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_444_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_SIMPLE_422 2:2 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_SIMPLE_422_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_SIMPLE_422_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_NATIVE_422 3:3 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_NATIVE_422_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_NATIVE_422_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_NATIVE_420 4:4 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_NATIVE_420_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_NATIVE_420_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES (0x0000006A) /* R-XUR */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_8_BITS_PER_COLOR 1:1 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_8_BITS_PER_COLOR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_8_BITS_PER_COLOR_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_10_BITS_PER_COLOR 2:2 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_10_BITS_PER_COLOR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_10_BITS_PER_COLOR_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_12_BITS_PER_COLOR 3:3 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_12_BITS_PER_COLOR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_12_BITS_PER_COLOR_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_DSC_PEAK_THROUGHPUT (0x0000006B) /* R-XUR */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0 3:0 /* R-XUF */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_340 (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_400 (0x00000002) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_450 (0x00000003) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_500 (0x00000004) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_550 (0x00000005) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_600 (0x00000006) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_650 (0x00000007) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_700 (0x00000008) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_750 (0x00000009) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_800 (0x0000000A) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_850 (0x0000000B) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_900 (0x0000000C) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_950 (0x0000000D) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_1000 (0x0000000E) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1 7:4 /* R-XUF */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_340 (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_400 (0x00000002) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_450 (0x00000003) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_500 (0x00000004) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_550 (0x00000005) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_600 (0x00000006) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_650 (0x00000007) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_700 (0x00000008) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_750 (0x00000009) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_800 (0x0000000A) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_850 (0x0000000B) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_900 (0x0000000C) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_950 (0x0000000D) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_1000 (0x0000000E) /* R-XUV */ + +#define NV_DPCD14_DSC_MAXIMUM_SLICE_WIDTH (0x0000006C) /* R-XUR */ +#define NV_DPCD14_DSC_MAXIMUM_SLICE_WIDTH_MAX 7:0 /* R-XUF */ + +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2 (0x0000006D) /* R-XUR */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_16 0:0 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_16_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_16_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_20 1:1 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_20_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_20_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_24 2:2 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_24_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_24_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_DSC_BITS_PER_PIXEL_INCREMENT (0x0000006F) /* R-XUR */ +#define NV_DPCD14_DSC_BITS_PER_PIXEL_INCREMENT_SUPPORTED 2:0 /* R-XUF */ +#define NV_DPCD14_DSC_BITS_PER_PIXEL_INCREMENT_SUPPORTED_1_16 (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_BITS_PER_PIXEL_INCREMENT_SUPPORTED_1_8 (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_BITS_PER_PIXEL_INCREMENT_SUPPORTED_1_4 (0x00000002) /* R-XUV */ +#define NV_DPCD14_DSC_BITS_PER_PIXEL_INCREMENT_SUPPORTED_1_2 (0x00000003) /* R-XUV */ +#define NV_DPCD14_DSC_BITS_PER_PIXEL_INCREMENT_SUPPORTED_1 (0x00000004) /* R-XUV */ + +#define NV_DPCD14_DSC_ENABLE (0x00000160) /* R-XUR */ +#define NV_DPCD14_DSC_ENABLE_DECOMPRESSION 0:0 /* R-XUF */ +#define NV_DPCD14_DSC_ENABLE_DECOMPRESSION_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_ENABLE_DECOMPRESSION_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_FEC_CAPABILITY (0x00000090) /* R-XUR */ +#define NV_DPCD14_FEC_CAPABILITY_FEC_CAPABLE 0:0 /* R-XUF */ +#define NV_DPCD14_FEC_CAPABILITY_FEC_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_FEC_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE 1:1 /* R-XUF */ +#define NV_DPCD14_FEC_CAPABILITY_UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_CORRECTED_BLOCK_ERROR_COUNT_CAPABLE 2:2 /* R-XUF */ +#define NV_DPCD14_FEC_CAPABILITY_CORRECTED_BLOCK_ERROR_COUNT_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_CORRECTED_BLOCK_ERROR_COUNT_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_BIT_ERROR_COUNT_CAPABLE 3:3 /* R-XUF */ +#define NV_DPCD14_FEC_CAPABILITY_BIT_ERROR_COUNT_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_BIT_ERROR_COUNT_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_PARITY_BLOCK_ERROR_COUNT_CAPABLE 4:4 /* R-XUF */ +#define NV_DPCD14_FEC_CAPABILITY_PARITY_BLOCK_ERROR_COUNT_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_PARITY_BLOCK_ERROR_COUNT_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_PARITY_ERROR_COUNT_CAPABLE 5:5 /* R-XUF */ +#define NV_DPCD14_FEC_CAPABILITY_PARITY_ERROR_COUNT_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_PARITY_ERROR_COUNT_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_FEC_RUNNING_INDICATOR_SUPPORT 6:6 /* R-XUF */ +#define NV_DPCD14_FEC_CAPABILITY_FEC_RUNNING_INDICATOR_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_FEC_RUNNING_INDICATOR_SUPPORT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_FEC_ERROR_REPORTING_POLICY_SUPPORTED 7:7 /* R-XUF */ +#define NV_DPCD14_FEC_CAPABILITY_FEC_ERROR_REPORTING_POLICY_SUPPORTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_FEC_ERROR_REPORTING_POLICY_SUPPORTED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_TRAINING_PATTERN_SET (0x00000102) /* RWXUR */ +#define NV_DPCD14_TRAINING_PATTERN_SET_TPS 3:0 /* RWXUF */ +#define NV_DPCD14_TRAINING_PATTERN_SET_TPS_NONE (0x00000000) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_TPS_TP1 (0x00000001) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_TPS_TP2 (0x00000002) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_TPS_TP3 (0x00000003) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_TPS_TP4 (0x00000007) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN 4:4 /* RWXUF */ +#define NV_DPCD14_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN_NO (0x00000000) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN_YES (0x00000001) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_SCRAMBLING_DISABLED 5:5 /* RWXUF */ +#define NV_DPCD14_TRAINING_PATTERN_SET_SCRAMBLING_DISABLED_FALSE (0x00000000) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_SCRAMBLING_DISABLED_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_SYM_ERR_SEL 7:6 /* RWXUF */ +#define NV_DPCD14_TRAINING_PATTERN_SET_SYM_ERR_SEL_DISPARITY_ILLEGAL_SYMBOL_ERROR (0x00000000) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_SYM_ERR_SEL_DISPARITY_ERROR (0x00000001) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_SYM_ERR_SEL_ILLEGAL_SYMBOL_ERROR (0x00000002) /* RWXUV */ + +#define NV_DPCD14_LINK_QUAL_LANE_SET(i) (0x0000010B+(i)) /* RW-1A */ +#define NV_DPCD14_LINK_QUAL_LANE_SET__SIZE 4 /* R---S */ +#define NV_DPCD14_LINK_QUAL_LANE_SET_LQS 2:0 /* RWXUF */ +#define NV_DPCD14_LINK_QUAL_LANE_SET_LQS_CP2520PAT3 (0x00000007) /* RWXUV */ + +#define NV_DPCD14_FEC_CONFIGURATION (0x00000120) /* RWXUR */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_READY 0:0 /* RWXUF */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_READY_NO (0x00000000) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_READY_YES (0x00000001) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL 3:1 /* RWXUF */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_FEC_ERROR_COUNT_DIS (0x00000000) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_UNCORRECTED_BLOCK_ERROR_COUNT (0x00000001) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_CORRECTED_BLOCK_ERROR_COUNT (0x00000002) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_BIT_ERROR_COUNT (0x00000003) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_PARITY_BLOCK_ERROR_COUNT (0x00000004) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_PARITY_BIT_ERROR_COUNT (0x00000005) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_LANE_SELECT 5:4 /* RWXUF */ +#define NV_DPCD14_FEC_CONFIGURATION_LANE_SELECT_LANE_0 (0x00000000) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_LANE_SELECT_LANE_1 (0x00000001) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_LANE_SELECT_LANE_2 (0x00000002) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_LANE_SELECT_LANE_3 (0x00000003) /* RWXUV */ + +#define NV_DPCD14_PHY_TEST_PATTERN (0x00000248) /* R-XUR */ +#define NV_DPCD14_PHY_TEST_PATTERN_SEL_CP2520PAT3 (0x00000007) /* R-XUV */ + +#define NV_DPCD14_DSC_CRC_0 (0x00000262) /* R-XUR */ +#define NV_DPCD14_DSC_CRC_0_LOW_BYTE NV_DPCD14_DSC_CRC_0 +#define NV_DPCD14_DSC_CRC_0_HIGH_BYTE (0x00000263) /* R-XUR */ +#define NV_DPCD14_DSC_CRC_1 (0x00000264) /* R-XUR */ +#define NV_DPCD14_DSC_CRC_1_LOW_BYTE NV_DPCD14_DSC_CRC_1 +#define NV_DPCD14_DSC_CRC_1_HIGH_BYTE (0x00000265) /* R-XUR */ +#define NV_DPCD14_DSC_CRC_2 (0x00000266) /* R-XUR */ +#define NV_DPCD14_DSC_CRC_2_LOW_BYTE NV_DPCD14_DSC_CRC_2 +#define NV_DPCD14_DSC_CRC_2_HIGH_BYTE (0x00000267) /* R-XUR */ + +#define NV_DPCD14_FEC_STATUS (0x00000280) /* R-XUR */ +#define NV_DPCD14_FEC_STATUS_FEC_DECODE_EN_DETECTED 0:0 /* R-XUF */ +#define NV_DPCD14_FEC_STATUS_FEC_DECODE_EN_DETECTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_STATUS_FEC_DECODE_EN_DETECTED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_FEC_STATUS_FEC_DECODE_DIS_DETECTED 1:1 /* R-XUF */ +#define NV_DPCD14_FEC_STATUS_FEC_DECODE_DIS_DETECTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_STATUS_FEC_DECODE_DIS_DETECTED_YES (0x00000001) /* R-XUV */ +// Bits 7-2: RESERVED. +#define NV_DPCD14_FEC_STATUS_CLEAR (0x00000001) + +#define NV_DPCD14_FEC_ERROR_COUNT (0x00000281) /* R-XUR */ +#define NV_DPCD14_FEC_ERROR_COUNT_FEC_ERROR_COUNT_LOW_BYTE NV_DPCD14_FEC_ERROR_COUNT +#define NV_DPCD14_FEC_ERROR_COUNT_FEC_ERROR_COUNT_HIGH_BYTE (0x00000282) /* R-XUR */ +#define NV_DPCD14_FEC_ERROR_COUNT_FEC_ERROR_COUNT_VALID 7:7 /* R-XUF */ +#define NV_DPCD14_FEC_ERROR_COUNT_FEC_ERROR_COUNT_VALID_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_ERROR_COUNT_FEC_ERROR_COUNT_VALID_YES (0x00000001) /* R-XUV */ + +// Field definitions for FW/SW Revision +#define NV_DPCD14_FW_SW_REVISION_MAJOR (0x0000040A) /* R-XUR */ +#define NV_DPCD14_FW_SW_REVISION_MINOR (0x0000040B) /* R-XUR */ + +#define NV_DPCD14_EXTENDED_REV (0x00002200) /* R-XUR */ +#define NV_DPCD14_EXTENDED_REV_MAJOR 7:4 /* R-XUF */ +#define NV_DPCD14_EXTENDED_REV_MAJOR_1 (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_REV_MINOR 3:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_REV_MINOR_4 (0x00000004) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_MAX_LINK_BANDWIDTH (0x00002201) /* R-XUR */ +#define NV_DPCD14_EXTENDED_MAX_LINK_BANDWIDTH_VAL 7:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_LINK_BANDWIDTH_VAL_8_10_GBPS (0x0000001E) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT (0x00002202) /* R-XUR */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_LANE 4:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_LANE_1 (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_LANE_2 (0x00000002) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_LANE_4 (0x00000004) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_POST_LT_ADJ_REQ_SUPPORT 5:5 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_POST_LT_ADJ_REQ_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_POST_LT_ADJ_REQ_SUPPORT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_TPS3_SUPPORTED 6:6 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_TPS3_SUPPORTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_TPS3_SUPPORTED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_ENHANCED_FRAMING 7:7 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_ENHANCED_FRAMING_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_ENHANCED_FRAMING_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD (0x00002203) /* R-XUR */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_VAL 0:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_VAL_NONE (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_VAL_0_5_PCT (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_NO_AUX_HANDSHAKE_LT 6:6 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_NO_AUX_HANDSHAKE_LT_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_NO_AUX_HANDSHAKE_LT_TRUE (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_TPS4_SUPPORTED 7:7 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_TPS4_SUPPORTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_TPS4_SUPPORTED_YES (0x00000001) /* R-XUV */ + +// NORP = Number of Receiver Ports = Value + 1 +#define NV_DPCD14_EXTENDED_NORP (0x00002204) /* R-XUR */ +#define NV_DPCD14_EXTENDED_NORP_VAL 0:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_NORP_VAL_ONE (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_NORP_VAL_TWO (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_NORP_VAL_SST_MAX (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_NORP_DP_PWR_CAP_5V 5:5 /* R-XUF */ +#define NV_DPCD14_EXTENDED_NORP_DP_PWR_CAP_12V 6:6 /* R-XUF */ +#define NV_DPCD14_EXTENDED_NORP_DP_PWR_CAP_18V 7:7 /* R-XUF */ + +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT (0x00002205) /* R-XUR */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_PRESENT 0:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_PRESENT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_PRESENT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_TYPE 2:1 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_TYPE_DISPLAYPORT (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_TYPE_ANALOG (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_TYPE_HDMI_DVI (0x00000002) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_TYPE_OTHERS (0x00000003) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_FORMAT_CONVERSION 3:3 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_FORMAT_CONVERSION_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_FORMAT_CONVERSION_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_DETAILED_CAP_INFO_AVAILABLE 4:4 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_DETAILED_CAP_INFO_AVAILABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_DETAILED_CAP_INFO_AVAILABLE_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_MAIN_LINK_CHANNEL_CODING (0x00002206) /* R-XUR */ +#define NV_DPCD14_EXTENDED_MAIN_LINK_CHANNEL_CODING_ANSI_8B_10B 0:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAIN_LINK_CHANNEL_CODING_ANSI_8B_10B_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAIN_LINK_CHANNEL_CODING_ANSI_8B_10B_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT (0x00002207) /* R-XUR */ +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT_COUNT 3:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT_MSA_TIMING_PAR_IGNORED 6:6 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT_MSA_TIMING_PAR_IGNORED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT_MSA_TIMING_PAR_IGNORED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT_OUI_SUPPORT 7:7 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT_OUI_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT_OUI_SUPPORT_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_RECEIVE_PORT0_CAP_0 (0x00002208) /* R-XUR */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORT1_CAP_0 (0x0000220A) /* R-XUR */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_LOCAL_EDID 1:1 /* R-XUF */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_LOCAL_EDID_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_LOCAL_EDID_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_ASSO_TO_PRECEDING_PORT 2:2 /* R-XUF */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_ASSO_TO_PRECEDING_PORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_ASSO_TO_PRECEDING_PORT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_HBLANK_EXPANSION_CAPABLE 3:3 /* R-XUF */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_HBLANK_EXPANSION_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_HBLANK_EXPANSION_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_BUFFER_SIZE_UNIT 4:4 /* R-XUF */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_BUFFER_SIZE_UNIT_PIXEL (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_BUFFER_SIZE_UNIT_BYTE (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_BUFFER_SIZE_PER_PORT 5:5 /* R-XUF */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_BUFFER_SIZE_PER_PORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_BUFFER_SIZE_PER_PORT_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_RECEIVE_PORT0_CAP_1 (0x00002209) /* R-XUR */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORT1_CAP_1 (0x0000220B) /* R-XUR */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_1_BUFFER_SIZE 7:0 /* R-XUF */ + +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP (0x0000220C) /* R-XUR */ +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP_SPEED 7:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP_SPEED_1K (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP_SPEED_5K (0x00000002) /* R-XUV */ +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP_SPEED_10K (0x00000004) /* R-XUV */ +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP_SPEED_100K (0x00000008) /* R-XUV */ +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP_SPEED_400K (0x00000010) /* R-XUV */ +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP_SPEED_1M (0x00000020) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_EDP_CONFIG_CAP (0x0000220D) /* R-XUR */ +#define NV_DPCD14_EXTENDED_EDP_CONFIG_CAP_ALTERNATE_SCRAMBLER_RESET 0:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_EDP_CONFIG_CAP_ALTERNATE_SCRAMBLER_RESET_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_EDP_CONFIG_CAP_ALTERNATE_SCRAMBLER_RESET_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL (0x0000220E) /* R-XUR */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_VAL 6:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_VAL_DEFAULT (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_VAL_4MS (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_VAL_8MS (0x00000002) /* R-XUV */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_VAL_12MS (0x00000003) /* R-XUV */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_VAL_16MS (0x00000004) /* R-XUV */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_EXTENDED_RECEIVER_CAP 7:7 /* R-XUF */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_EXTENDED_RECEIVER_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_EXTENDED_RECEIVER_CAP_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_ADAPTER_CAP (0x0000220F) /* R-XUR */ +#define NV_DPCD14_EXTENDED_ADAPTER_CAP_FORCE_LOAD_SENSE 0:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_ADAPTER_CAP_FORCE_LOAD_SENSE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_ADAPTER_CAP_FORCE_LOAD_SENSE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_ADAPTER_CAP_ALT_I2C_PATTERN 1:1 /* R-XUF */ +#define NV_DPCD14_EXTENDED_ADAPTER_CAP_ALT_I2C_PATTERN_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_ADAPTER_CAP_ALT_I2C_PATTERN_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST (0x00002210) /* R-XUR */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_GTC_CAP 0:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_GTC_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_GTC_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_AV_SYNC_CAP 2:2 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_AV_SYNC_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_AV_SYNC_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_SDP_EXT_FOR_COLORIMETRY 3:3 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_SDP_EXT_FOR_COLORIMETRY_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_SDP_EXT_FOR_COLORIMETRY_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_VESA_SDP 4:4 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_VESA_SDP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_VESA_SDP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_VESA_SDP_CHAINING 5:5 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_VESA_SDP_CHAINING_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_VESA_SDP_CHAINING_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_CTA_SDP 6:6 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_CTA_SDP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_CTA_SDP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_CTA_SDP_CHAINING 7:7 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_CTA_SDP_CHAINING_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_CTA_SDP_CHAINING_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST (0x00002211) /* R-XUR */ +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD 7:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD_1MS (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD_20MS (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD_40MS (0x00000002) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD_60MS (0x00000003) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD_80MS (0x00000004) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD_100MS (0x00000005) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_VSC_EXT_VESA_SDP_MAX_CHAINING (0x00002212) /* R-XUR */ +#define NV_DPCD14_EXTENDED_VSC_EXT_VESA_SDP_MAX_CHAINING_VAL 7:0 /* R-XUF */ + +#define NV_DPCD14_EXTENDED_VSC_EXT_CTA_SDP_MAX_CHAINING (0x00002213) /* R-XUR */ +#define NV_DPCD14_EXTENDED_VSC_EXT_CTA_SDP_MAX_CHAINING_VAL 7:0 /* R-XUF */ + +#define NV_DPCD14_DPRX_FEATURE_ENUM_LIST (0x00002214) /* R-XUR */ +#define NV_DPCD14_DPRX_FEATURE_ENUM_LIST_ADAPTIVE_SYNC_SDP_SUPPORTED 0:0 /* R-XUF */ +#define NV_DPCD14_DPRX_FEATURE_ENUM_LIST_ADAPTIVE_SYNC_SDP_SUPPORTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DPRX_FEATURE_ENUM_LIST_ADAPTIVE_SYNC_SDP_SUPPORTED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DPRX_FEATURE_ENUM_LIST_VSC_EXT_FRAMEWORK_V1_SUPPORTED 4:4 /* R-XUF */ +#define NV_DPCD14_DPRX_FEATURE_ENUM_LIST_VSC_EXT_FRAMEWORK_V1_SUPPORTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DPRX_FEATURE_ENUM_LIST_VSC_EXT_FRAMEWORK_V1_SUPPORTED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_PCON_DOWNSTREAM_LINK_ERROR (0x00003030) /* R-XUR */ +#define NV_DPCD14_PCON_DOWNSTREAM_LINK_ERROR_REPORTING 0:0 /* R-XUF */ +#define NV_DPCD14_PCON_DOWNSTREAM_LINK_ERROR_REPORTING_NOT_SUPPORTED (0x00000000) /* R-XUV */ +#define NV_DPCD14_PCON_DOWNSTREAM_LINK_ERROR_REPORTING_SUPPORTED (0x00000001) /* R-XUV */ +#define NV_DPCD14_PCON_DOWNSTREAM_LINK_ERROR_TMDS_LINK_CLOCK_DATA_NOTIFICATION 7:7 /* R-XUF */ +#define NV_DPCD14_PCON_DOWNSTREAM_LINK_ERROR_TMDS_LINK_CLOCK_DATA_NOTIFICATION_NOT_SUPPORTED (0x00000000) /* R-XUV */ +#define NV_DPCD14_PCON_DOWNSTREAM_LINK_ERROR_TMDS_LINK_CLOCK_DATA_NOTIFICATION_SUPPORTED (0x00000001) /* R-XUV */ + +#define NV_DPCD14_PCON_DOWNSTREAM_HDMI_ERROR_STATUS_CH(i) (0x00003031+(i)) /* R-XUR */ +#define NV_DPCD14_PCON_DOWNSTREAM_HDMI_ERROR_STATUS_CH_THREE_ERRORS 0:0 /* R-XUF */ +#define NV_DPCD14_PCON_DOWNSTREAM_HDMI_ERROR_STATUS_CH_THREE_ERRORS_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PCON_DOWNSTREAM_HDMI_ERROR_STATUS_CH_THREE_ERRORS_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_PCON_DOWNSTREAM_HDMI_ERROR_STATUS_CH_TEN_ERRORS 1:1 /* R-XUF */ +#define NV_DPCD14_PCON_DOWNSTREAM_HDMI_ERROR_STATUS_CH_TEN_ERRORS_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PCON_DOWNSTREAM_HDMI_ERROR_STATUS_CH_TEN_ERRORS_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_PCON_DOWNSTREAM_HDMI_ERROR_STATUS_CH_HUNDRED_ERRORS 2:2 /* R-XUF */ +#define NV_DPCD14_PCON_DOWNSTREAM_HDMI_ERROR_STATUS_CH_HUNDRED_ERRORS_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PCON_DOWNSTREAM_HDMI_ERROR_STATUS_CH_HUNDRED_ERRORS_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_PCON_DOWNSTREAM_HDMI_ERROR_STATUS_CH_CLOCK_DATA_STATUS 5:4 /* R-XUF */ +#define NV_DPCD14_PCON_DOWNSTREAM_HDMI_ERROR_STATUS_CH_CLOCK_DATA_STATUS_BOTH_LOCKED (0x00000000) /* R-XUV */ +#define NV_DPCD14_PCON_DOWNSTREAM_HDMI_ERROR_STATUS_CH_CLOCK_DATA_STATUS_CLOCK_LOCKED (0x00000001) /* R-XUV */ +#define NV_DPCD14_PCON_DOWNSTREAM_HDMI_ERROR_STATUS_CH_CLOCK_DATA_STATUS_NONE_LOCKED (0x00000002) /* R-XUV */ + +#define NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS (0x00003036) /* R-XUR */ +#define NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS_MODE 0:0 /* R-XUF */ +#define NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS_MODE_TMDS (0x00000000) /* R-XUV */ +#define NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS_MODE_FRL (0x00000001) /* R-XUV */ +#define NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS_LT_RESULT 6:1 /* R-XUF */ +#define NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_9G 1:1 /* R-XUF */ +#define NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_9G_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_9G_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_18G 2:2 /* R-XUF */ +#define NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_18G_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_18G_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_24G 3:3 /* R-XUF */ +#define NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_24G_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_24G_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_32G 4:4 /* R-XUF */ +#define NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_32G_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_32G_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_40G 5:5 /* R-XUF */ +#define NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_40G_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_40G_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_48G 6:6 /* R-XUF */ +#define NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_48G_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_48G_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_PCON_DOWNSTREAM_LINK_ERROR_LANE(i) (0x00003037+(i)) /* RW-1A */ +#define NV_DPCD14_PCON_DOWNSTREAM_LINK_ERROR_LANE__SIZE 4 /* R---S */ +#define NV_DPCD14_PCON_DOWNSTREAM_LINK_ERROR_LANE_COUNT 3:0 /* R-XUF */ +#define NV_DPCD14_PCON_DOWNSTREAM_LINK_ERROR_LANE_COUNT_ZERO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PCON_DOWNSTREAM_LINK_ERROR_LANE_COUNT_THREE (0x00000001) /* R-XUV */ +#define NV_DPCD14_PCON_DOWNSTREAM_LINK_ERROR_LANE_COUNT_TEN (0x00000002) /* R-XUV */ +#define NV_DPCD14_PCON_DOWNSTREAM_LINK_ERROR_LANE_COUNT_HUNDRED (0x00000004) /* R-XUV */ + +#define NV_DPCD14_PCON_HDMI_TX_LINK_STATUS (0x0000303B) /* R-XUR */ +#define NV_DPCD14_PCON_HDMI_TX_LINK_STATUS_LINK_ACTIVE 0:0 /* R-XUF */ +#define NV_DPCD14_PCON_HDMI_TX_LINK_STATUS_LINK_ACTIVE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PCON_HDMI_TX_LINK_STATUS_LINK_ACTIVE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_PCON_HDMI_TX_LINK_STATUS_LINK_READY 1:1 /* R-XUF */ +#define NV_DPCD14_PCON_HDMI_TX_LINK_STATUS_LINK_READY_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PCON_HDMI_TX_LINK_STATUS_LINK_READY_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_PCON_CONTROL_0 (0x00003050) /* RWXUR */ +#define NV_DPCD14_PCON_CONTROL_0_OUTPUT_CONFIG 0:0 /* RWXUF */ +#define NV_DPCD14_PCON_CONTROL_0_OUTPUT_CONFIG_DVI (0x00000000) /* RWXUV */ +#define NV_DPCD14_PCON_CONTROL_0_OUTPUT_CONFIG_HDMI (0x00000001) /* RWXUV */ + +#define NV_DPCD14_PCON_CONTROL_1 (0x00003051) /* RWXUR */ +#define NV_DPCD14_PCON_CONTROL_1_CONVERT_YCBCR420 0:0 /* RWXUF */ +#define NV_DPCD14_PCON_CONTROL_1_CONVERT_YCBCR420_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD14_PCON_CONTROL_1_CONVERT_YCBCR420_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD14_PCON_CONTROL_1_DISABLE_HDMI_EDID_PROCESS 1:1 /* RWXUF */ +#define NV_DPCD14_PCON_CONTROL_1_DISABLE_HDMI_EDID_PROCESS_NO (0x00000000) /* RWXUV */ +#define NV_DPCD14_PCON_CONTROL_1_DISABLE_HDMI_EDID_PROCESS_YES (0x00000001) /* RWXUV */ +#define NV_DPCD14_PCON_CONTROL_1_DISABLE_HDMI_AUTO_SCRAMBLING 2:2 /* RWXUF */ +#define NV_DPCD14_PCON_CONTROL_1_DISABLE_HDMI_AUTO_SCRAMBLING_NO (0x00000000) /* RWXUV */ +#define NV_DPCD14_PCON_CONTROL_1_DISABLE_HDMI_AUTO_SCRAMBLING_YES (0x00000001) /* RWXUV */ +#define NV_DPCD14_PCON_CONTROL_1_DISABLE_HDMI_FORCE_SCRAMBLING 3:3 /* RWXUF */ +#define NV_DPCD14_PCON_CONTROL_1_DISABLE_HDMI_FORCE_SCRAMBLING_NO (0x00000000) /* RWXUV */ +#define NV_DPCD14_PCON_CONTROL_1_DISABLE_HDMI_FORCE_SCRAMBLING_YES (0x00000001) /* RWXUV */ + +#define NV_DPCD14_PCON_CONTROL_2 (0x00003052) /* RWXUR */ +#define NV_DPCD14_PCON_CONTROL_2_CONVERT_YCBCR422 0:0 /* RWXUF */ +#define NV_DPCD14_PCON_CONTROL_2_CONVERT_YCBCR422_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD14_PCON_CONTROL_2_CONVERT_YCBCR422_ENABLE (0x00000001) /* RWXUV */ + +#define NV_DPCD14_PCON_CONTROL_3 (0x00003053) /* RWXUR */ +#define NV_DPCD14_PCON_CONTROL_3_COMPONENT_BIT_DEPTH 1:0 /* RWXUF */ +#define NV_DPCD14_PCON_CONTROL_3_COMPONENT_BIT_DEPTH_SAME_AS_INC (0x00000000) /* RWXUV */ +#define NV_DPCD14_PCON_CONTROL_3_COMPONENT_BIT_DEPTH_8BPC (0x00000001) /* RWXUV */ +#define NV_DPCD14_PCON_CONTROL_3_COMPONENT_BIT_DEPTH_10BPC (0x00000002) /* RWXUV */ +#define NV_DPCD14_PCON_CONTROL_3_COMPONENT_BIT_DEPTH_12BPC (0x00000003) /* RWXUV */ + +#define NV_DPCD14_OUTPUT_HTOTAL_LOW (0x00003054) /* RWXUR */ +#define NV_DPCD14_OUTPUT_HTOTAL_HIGH (0x00003055) /* RWXUR */ + +#define NV_DPCD14_OUTPUT_HSTART_LOW (0x00003056) /* RWXUR */ +#define NV_DPCD14_OUTPUT_HSTART_HIGH (0x00003057) /* RWXUR */ + +#define NV_DPCD14_OUTPUT_HSP_HSW_LOW (0x00003056) /* RWXUR */ +#define NV_DPCD14_OUTPUT_HSP_HSW_HIGH (0x00003057) /* RWXUR */ +#define NV_DPCD14_OUTPUT_HSP_HSW_HIGH_VAL 6:0 /* RWXUF */ +#define NV_DPCD14_OUTPUT_HSP_HSW_HIGH_OUTPUT_HSP 7:7 /* RWXUF */ +#define NV_DPCD14_OUTPUT_HSP_HSW_HIGH_OUTPUT_HSP_POSITIVE (0x00000000) /* RWXUV */ +#define NV_DPCD14_OUTPUT_HSP_HSW_HIGH_OUTPUT_HSP_NEGATIVE (0x00000001) /* RWXUV */ + +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1 (0x0000305A) /* RWXUR */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW 2:0 /* RWXUF */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW_ZERO (0x00000000) /* RWXUV */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW_9G (0x00000001) /* RWXUV */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW_18G (0x00000002) /* RWXUV */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW_24G (0x00000003) /* RWXUV */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW_32G (0x00000004) /* RWXUV */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW_40G (0x00000005) /* RWXUV */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW_48G (0x00000006) /* RWXUV */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1_SRC_CONTROL_MODE 3:3 /* RWXUF */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1_SRC_CONTROL_MODE_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1_SRC_CONTROL_MODE_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1_CONCURRENT_LT_MODE 4:4 /* RWXUF */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1_CONCURRENT_LT_MODE_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1_CONCURRENT_LT_MODE_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1_LINK_FRL_MODE 5:5 /* RWXUF */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1_LINK_FRL_MODE_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1_LINK_FRL_MODE_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1_IRQ_LINK_FRL_MODE 6:6 /* RWXUF */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1_IRQ_LINK_FRL_MODE_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1_IRQ_LINK_FRL_MODE_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1_HDMI_LINK 7:7 /* RWXUF */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1_HDMI_LINK_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_1_HDMI_LINK_ENABLE (0x00000001) /* RWXUV */ + +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_2 (0x0000305B) /* RWXUR */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_2_LINK_BW_MASK 5:0 /* RWXUF */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_2_LINK_BW_MASK_9G (0x00000001) /* RWXUV */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_2_LINK_BW_MASK_18G (0x00000002) /* RWXUV */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_2_LINK_BW_MASK_24G (0x00000004) /* RWXUV */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_2_LINK_BW_MASK_32G (0x00000008) /* RWXUV */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_2_LINK_BW_MASK_40G (0x00000010) /* RWXUV */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_2_LINK_BW_MASK_48G (0x00000020) /* RWXUV */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_2_FRL_LT_CONTROL 6:6 /* RWXUF */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_2_FRL_LT_CONTROL_NORMAL (0x00000000) /* RWXUV */ +#define NV_DPCD14_PCON_FRL_LINK_CONFIG_2_FRL_LT_CONTROL_EXTENDED (0x00000001) /* RWXUV */ + +// LT Tunable Repeater Related offsets + +#define NV_DPCD14_LT_TUNABLE_PHY_REPEATER_REV (0x000F0000) /* R-XUR */ +#define NV_DPCD14_LT_TUNABLE_PHY_REPEATER_REV_MINOR 3:0 /* R-XUF */ +#define NV_DPCD14_LT_TUNABLE_PHY_REPEATER_REV_MINOR_0 (0x00000000) /* R-XUV */ +#define NV_DPCD14_LT_TUNABLE_PHY_REPEATER_REV_MAJOR 7:4 /* R-XUF */ +#define NV_DPCD14_LT_TUNABLE_PHY_REPEATER_REV_MAJOR_1 (0x00000001) /* R-XUV */ + +#define NV_DPCD14_MAX_LINK_RATE_PHY_REPEATER (0x000F0001) /* R-XUR */ +#define NV_DPCD14_MAX_LINK_RATE_PHY_REPEATER_VAL 7:0 /* R-XUF */ +#define NV_DPCD14_MAX_LINK_RATE_PHY_REPEATER_VAL_1_62_GBPS (0x00000006) /* R-XUV */ +#define NV_DPCD14_MAX_LINK_RATE_PHY_REPEATER_VAL_2_70_GBPS (0x0000000A) /* R-XUV */ +#define NV_DPCD14_MAX_LINK_RATE_PHY_REPEATER_VAL_5_40_GBPS (0x00000014) /* R-XUV */ +#define NV_DPCD14_MAX_LINK_RATE_PHY_REPEATER_VAL_8_10_GBPS (0x0000001E) /* R-XUV */ + +#define NV_DPCD14_PHY_REPEATER_CNT (0x000F0002) /* R-XUR */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL 7:0 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_0 (0x00000000) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_1 (0x00000080) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_2 (0x00000040) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_3 (0x00000020) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_4 (0x00000010) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_5 (0x00000008) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_6 (0x00000004) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_7 (0x00000002) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_8 (0x00000001) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_MAX 6 + +#define NV_DPCD14_PHY_REPEATER_MODE (0x000F0003) /* R-XUR */ +#define NV_DPCD14_PHY_REPEATER_MODE_VAL_TRANSPARENT (0x00000055) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_MODE_VAL_NON_TRANSPARENT (0x000000AA) /* R-XUV */ + +#define NV_DPCD14_MAX_LANE_COUNT_PHY_REPEATER (0x000F0004) /* R-XUR */ +#define NV_DPCD14_MAX_LANE_COUNT_PHY_REPEATER_VAL 4:0 /* R-XUF */ + +#define NV_DPCD14_PHY_REPEATER_EXTENDED_WAKE_TIMEOUT (0x000F0005) /* RWXUR */ +#define NV_DPCD14_PHY_REPEATER_EXTENDED_WAKE_TIMEOUT_REQ 6:0 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EXTENDED_WAKE_TIMEOUT_GRANT 7:7 /* RWXUF */ + +#define NV_DPCD14_PHY_REPEATER_START(i) (0x000F0010+(i)*0x50) /* RW-1A */ +#define NV_DPCD14_PHY_REPEATER_START__SIZE 8 /* R---S */ +// Following defines are offsets +#define NV_DPCD14_TRAINING_PATTERN_SET_PHY_REPEATER (0x00000000) /* RWXUV */ +#define NV_DPCD14_TRAINING_LANE0_SET_PHY_REPEATER (0x00000001) /* RWXUV */ +#define NV_DPCD14_TRAINING_LANE1_SET_PHY_REPEATER (0x00000002) /* RWXUV */ +#define NV_DPCD14_TRAINING_LANE2_SET_PHY_REPEATER (0x00000003) /* RWXUV */ +#define NV_DPCD14_TRAINING_LANE3_SET_PHY_REPEATER (0x00000004) /* RWXUV */ + +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER (0x00000010) /* R-XUR */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER_VAL 6:0 /* R-XUF */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER_VAL_4MS (0x00000001) /* R-XUV */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER_VAL_8MS (0x00000002) /* R-XUV */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER_VAL_12MS (0x00000003) /* R-XUV */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER_VAL_16MS (0x00000004) /* R-XUV */ + +#define NV_DPCD14_TRANSMITTER_CAP_PHY_REPEATER (0x00000011) /* R-XUR */ +#define NV_DPCD14_TRANSMITTER_CAP_PHY_REPEATER_VOLTAGE_SWING_3 0:0 /* R-XUF */ +#define NV_DPCD14_TRANSMITTER_CAP_PHY_REPEATER_VOLTAGE_SWING_3_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_TRANSMITTER_CAP_PHY_REPEATER_VOLTAGE_SWING_3_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_TRANSMITTER_CAP_PHY_REPEATER_PRE_EMPHASIS_3 1:1 /* R-XUF */ +#define NV_DPCD14_TRANSMITTER_CAP_PHY_REPEATER_PRE_EMPHASIS_3_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_TRANSMITTER_CAP_PHY_REPEATER_PRE_EMPHASIS_3_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_LANE0_1_STATUS_PHY_REPEATER (0x00000020) /* R-XUR */ +#define NV_DPCD14_LANE2_3_STATUS_PHY_REPEATER (0x00000021) /* R-XUR */ +#define NV_DPCD14_LANE_ALIGN_STATUS_UPDATED_PHY_REPEATER (0x00000022) /* R-XUR */ +#define NV_DPCD14_ADJUST_REQUEST_LANE0_1_PHY_REPEATER (0x00000023) /* R-XUR */ +#define NV_DPCD14_ADJUST_REQUEST_LANE2_3_PHY_REPEATER (0x00000024) /* R-XUR */ + +#define NV_DPCD14_PHY_REPEATER_FEC__SIZE NV_DPCD14_PHY_REPEATER_CNT_MAX /* R---S */ +#define NV_DPCD14_PHY_REPEATER_FEC_STATUS(i) (0x000F0290+(i)*8) /* R--1A */ +#define NV_DPCD14_PHY_REPEATER_FEC_STATUS_FEC_DECODE_EN_DETECTED 0:0 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_FEC_STATUS_FEC_DECODE_EN_DETECTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_FEC_STATUS_FEC_DECODE_EN_DETECTED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_FEC_STATUS_FEC_DECODE_DIS_DETECTED 1:1 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_FEC_STATUS_FEC_DECODE_DIS_DETECTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_FEC_STATUS_FEC_DECODE_DIS_DETECTED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_FEC_STATUS_FEC_RUNNING_INDICATOR 2:2 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_FEC_STATUS_FEC_RUNNING_INDICATOR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_FEC_STATUS_FEC_RUNNING_INDICATOR_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_PHY_REPEATER_FEC_ERR_COUNT(i) (0x000F0291+(i)*8) /* R--2A */ +#define NV_DPCD14_PHY_REPEATER_FEC_ERR_COUNT_LOW_BYTE(i) (NV_DPCD14_PHY_REPEATER_FEC_ERR_COUNT(i)) +#define NV_DPCD14_PHY_REPEATER_FEC_ERR_COUNT_HIGH_BYTE(i) ((0x000F0292+(i)*8)) /* R-XUR */ +#define NV_DPCD14_PHY_REPEATER_FEC_ERR_COUNT_VALID 7:7 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_FEC_ERR_COUNT_VALID_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_FEC_ERR_COUNT_VALID_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0(i) (0x000F0294+(i)*8) /* R--1A */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0_FEC_CAPABLE 0:0 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0_FEC_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0_FEC_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0_UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE 1:1 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0_UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0_UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0_CORRECTED_BLOCK_ERROR_COUNT_CAPABLE 2:2 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0CORRECTED_BLOCK_ERROR_COUNT_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0_CORRECTED_BLOCK_ERROR_COUNT_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0_BIT_ERROR_COUNT_CAPABLE 3:3 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0_BIT_ERROR_COUNT_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0_BIT_ERROR_COUNT_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0_PARITY_BLOCK_ERROR_COUNT_CAPABLE 4:4 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0_PARITY_BLOCK_ERROR_COUNT_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0_PARITY_BLOCK_ERROR_COUNT_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0_PARITY_ERROR_COUNT_CAPABLE 5:5 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0_PARITY_ERROR_COUNT_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0_PARITY_ERROR_COUNT_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0_FEC_RUNNING_INDICATOR_SUPPORT 6:6 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0_FEC_RUNNING_INDICATOR_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0_FEC_RUNNING_INDICATOR_SUPPORT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0_FEC_ERROR_REPORTING_POLICY_SUPPORTED 7:7 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0_FEC_ERROR_REPORTING_POLICY_SUPPORTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_0_FEC_ERROR_REPORTING_POLICY_SUPPORTED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_1(i) (0x000F0295+(i)*8) /* R--1A */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_1_AGGREGATE_ERR_COUNT_CAPABLE 0:0 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_1_AGGREGATE_ERR_COUNT_CAPABLE_CAPABLE_N (0x00000000) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_FEC_CAP_1_AGGREGATE_ERR_COUNT_CAPABLE_CAPABLE_YES (0x00000001) /* R-XUV */ + +// BRANCH SPECIFIC DSC CAPS +#define NV_DPCD14_BRANCH_DSC_OVERALL_THROUGHPUT_MODE_0 (0x000000A0) +#define NV_DPCD14_BRANCH_DSC_OVERALL_THROUGHPUT_MODE_0_VALUE 7:0 + +#define NV_DPCD14_BRANCH_DSC_OVERALL_THROUGHPUT_MODE_1 (0x000000A1) +#define NV_DPCD14_BRANCH_DSC_OVERALL_THROUGHPUT_MODE_1_VALUE 7:0 + +#define NV_DPCD14_BRANCH_DSC_MAXIMUM_LINE_BUFFER_WIDTH (0x000000A2) +#define NV_DPCD14_BRANCH_DSC_MAXIMUM_LINE_BUFFER_WIDTH_VALUE 7:0 + +#endif // #ifndef _DISPLAYPORT14_H_ + diff --git a/src/common/inc/displayport/dpcd20.h b/src/common/inc/displayport/dpcd20.h new file mode 100644 index 0000000..f7008a0 --- /dev/null +++ b/src/common/inc/displayport/dpcd20.h @@ -0,0 +1,198 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _DISPLAYPORT20_H_ +#define _DISPLAYPORT20_H_ +#include "nvcfg_sdk.h" + +// DSC Pass Through related DPCD. New bits in DPCD 0x0060h defined in DPCD2.0. +#define NV_DPCD20_DSC_SUPPORT_PASS_THROUGH 1:1 /* R-XUF */ +#define NV_DPCD20_DSC_SUPPORT_PASS_THROUGH_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_DSC_SUPPORT_PASS_THROUGH_YES (0x00000001) /* R-XUV */ +// DSC Pass Through related DPCD. New bits in DPCD 0x0160h defined in DPCD2.0. +#define NV_DPCD20_DSC_ENABLE_PASS_THROUGH 1:1 /* R-XUF */ +#define NV_DPCD20_DSC_ENABLE_PASS_THROUGH_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_DSC_ENABLE_PASS_THROUGH_YES (0x00000001) /* R-XUV */ + +// DSC Dynamic PPS related DPCD. New bits in DPCD 0x0060h defined in DPCD2.0. +#define NV_DPCD20_DSC_SUPPORT_DYNAMIC_PPS_COMPRESSED_TO_COMPRESSED 2:2 +#define NV_DPCD20_DSC_SUPPORT_DYNAMIC_PPS_COMPRESSED_TO_COMPRESSED_NO (0x00000000) +#define NV_DPCD20_DSC_SUPPORT_DYNAMIC_PPS_COMPRESSED_TO_COMPRESSED_YES (0x00000001) + +// DSC Dynamic PPS related DPCD. New bits in DPCD 0x0060h defined in DPCD2.0. +#define NV_DPCD20_DSC_SUPPORT_DYNAMIC_PPS_UNCOMPRESSED_TO_FROM_COMPRESSED 3:3 +#define NV_DPCD20_DSC_SUPPORT_DYNAMIC_PPS_UNCOMPRESSED_TO_FROM_COMPRESSED_NO (0x00000000) +#define NV_DPCD20_DSC_SUPPORT_DYNAMIC_PPS_UNCOMPRESSED_TO_FROM_COMPRESSED_YES (0x00000001) + +// PANEL REPLAY RELATED DPCD +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY (0x000000B0) /* R-XUR */ +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SUPPORTED 0:0 /* R-XUF */ +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SUPPORTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SUPPORTED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION (0x000001B0) /* R-XUR */ +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_ENABLE_PR_MODE 0:0 /* R-XUF */ +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_ENABLE_PR_MODE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_ENABLE_PR_MODE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_ENABLE_CRC 1:1 /* R-XUF */ +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_ENABLE_CRC_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_ENABLE_CRC_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_HPD_ADAPTIVE_SYNC_SDP_MISSING 2:2 /* R-XUF */ +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_HPD_ADAPTIVE_SYNC_SDP_MISSING_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_HPD_ADAPTIVE_SYNC_SDP_MISSING_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_HPD_SDP_UNCORRECTABLE_ERROR 3:3 /* R-XUF */ +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_HPD_SDP_UNCORRECTABLE_ERROR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_HPD_SDP_UNCORRECTABLE_ERROR_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_HPD_RFB_STORAGE_ERRORS 4:4 /* R-XUF */ +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_HPD_RFB_STORAGE_ERRORS_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_HPD_RFB_STORAGE_ERRORS_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_HPD_RFB_ACTIVE_FRAME_CRC_ERROR 5:5 /* R-XUF */ +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_HPD_RFB_ACTIVE_FRAME_CRC_ERROR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_HPD_RFB_ACTIVE_FRAME_CRC_ERROR_YES (0x00000001) /* R-XUV */ + + +#define NV_DPCD20_PANEL_REPLAY_ERROR_STATUS (0x00002020) /* R-XUR */ +#define NV_DPCD20_PANEL_REPLAY_ERROR_STATUS_ACTIVE_FRAME_CRC_ERROR 0:0 /* R-XUF */ +#define NV_DPCD20_PANEL_REPLAY_ERROR_STATUS_ACTIVE_FRAME_CRC_ERROR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_ERROR_STATUS_ACTIVE_FRAME_CRC_ERROR_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_ERROR_STATUS_RFB_STORAGE_ERROR 1:1 /* R-XUF */ +#define NV_DPCD20_PANEL_REPLAY_ERROR_STATUS_RFB_STORAGE_ERROR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_ERROR_STATUS_RFB_STORAGE_ERROR_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_ERROR_STATUS_VSC_SDP_UNCORRECTABLE_ERROR 2:2 /* R-XUF */ +#define NV_DPCD20_PANEL_REPLAY_ERROR_STATUS_VSC_SDP_UNCORRECTABLE_ERROR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_ERROR_STATUS_VSC_SDP_UNCORRECTABLE_ERROR_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_ERROR_STATUS_ADAPTIVE_SYNC_SDP_MISSING 3:3 /* R-XUF */ +#define NV_DPCD20_PANEL_REPLAY_ERROR_STATUS_ADAPTIVE_SYNC_SDP_MISSING_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_ERROR_STATUS_ADAPTIVE_SYNC_SDP_MISSING_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD20_PANEL_REPLAY_AND_FRAME_LOCK_STATUS (0x00002022) /* R-XUR */ +#define NV_DPCD20_PANEL_REPLAY_AND_FRAME_LOCK_STATUS_PR_STATUS 2:0 /* R-XUF */ +#define NV_DPCD20_PANEL_REPLAY_AND_FRAME_LOCK_STATUS_PR_STATUS_STATE_0 (0x00000000) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_AND_FRAME_LOCK_STATUS_PR_STATUS_STATE_1 (0x00000001) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_AND_FRAME_LOCK_STATUS_PR_STATUS_STATE_2 (0x00000002) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_AND_FRAME_LOCK_STATUS_PR_STATUS_STATE_ERROR (0x00000007) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_AND_FRAME_LOCK_STATUS_SINK_FRAME_LOCKED 4:3 /* R-XUF */ +#define NV_DPCD20_PANEL_REPLAY_AND_FRAME_LOCK_STATUS_SINK_FRAME_LOCKED_LOCKED (0x00000000) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_AND_FRAME_LOCK_STATUS_SINK_FRAME_LOCKED_COASTING (0x00000001) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_AND_FRAME_LOCK_STATUS_SINK_FRAME_LOCKED_GOVERNING (0x00000002) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_AND_FRAME_LOCK_STATUS_SINK_FRAME_LOCKED_RELOCKING (0x00000003) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_AND_FRAME_LOCK_STATUS_SINK_FRAME_LOCKED_VALID 5:5 /* R-XUF */ +#define NV_DPCD20_PANEL_REPLAY_AND_FRAME_LOCK_STATUS_SINK_FRAME_LOCKED_VALID_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_AND_FRAME_LOCK_STATUS_SINK_FRAME_LOCKED_VALID_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD20_PANEL_REPLAY_DEBUG_LAST_VSC_SDP_CARRYING_PR_INFO (0x00002024) /* R-XUR */ +#define NV_DPCD20_PANEL_REPLAY_DEBUG_LAST_VSC_SDP_CARRYING_PR_INFO_STATE 0:0 /* R-XUF */ +#define NV_DPCD20_PANEL_REPLAY_DEBUG_LAST_VSC_SDP_CARRYING_PR_INFO_STATE_INACTIVE (0x00000000) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_DEBUG_LAST_VSC_SDP_CARRYING_PR_INFO_STATE_ACTIVE (0x00000001) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_DEBUG_LAST_VSC_SDP_CARRYING_PR_INFO_CRC_VALID 2:2 /* R-XUF */ +#define NV_DPCD20_PANEL_REPLAY_DEBUG_LAST_VSC_SDP_CARRYING_PR_INFO_CRC_VALID_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_DEBUG_LAST_VSC_SDP_CARRYING_PR_INFO_CRC_VALID_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_DEBUG_LAST_VSC_SDP_CARRYING_PR_INFO_SU_COORDINATE_VALID 3:3 /* R-XUF */ +#define NV_DPCD20_PANEL_REPLAY_DEBUG_LAST_VSC_SDP_CARRYING_PR_INFO_SU_COORDINATE_VALID_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PANEL_REPLAY_DEBUG_LAST_VSC_SDP_CARRYING_PR_INFO_SU_COORDINATE_VALID_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD20_PHY_REPEATER_ALPM_CAPS (0x000F0009) /* R-XUR */ +#define NV_DPCD20_PHY_REPEATER_ALPM_CAPS_AUX_LESS 0:0 /* R-XUF */ +#define NV_DPCD20_PHY_REPEATER_ALPM_CAPS_AUX_LESS_NOT_SUPPORTED (0x00000000) /* R-XUV */ +#define NV_DPCD20_PHY_REPEATER_ALPM_CAPS_AUX_LESS_SUPPORTED (0x00000001) /* R-XUV */ + +#define NV_DPCD20_PHY_REPEATER_TOTAL_LTTPR_CNT (0x000F000A) /* RWXUR */ +#define NV_DPCD20_PHY_REPEATER_TOTAL_LTTPR_CNT_VAL 7:0 /* R-XUF */ + + +// +// Adding DPCD registers for DP Tunneling feature. +// +#define NV_DPCD20_DP_TUNNEL_CAPABILITIES (0x000E000D) /* R-XUR */ +#define NV_DPCD20_DP_TUNNEL_CAPABILITIES_DPTUNNELING_SUPPORT 0:0 /* R-XUF */ +#define NV_DPCD20_DP_TUNNEL_CAPABILITIES_DPTUNNELING_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_DP_TUNNEL_CAPABILITIES_DPTUNNELING_SUPPORT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_DP_TUNNEL_CAPABILITIES_PANEL_REPLAY_TUNNELING_OPTIMIZATION_SUPPORT 6:6 /* R-XUF */ +#define NV_DPCD20_DP_TUNNEL_CAPABILITIES_PANEL_REPLAY_TUNNELING_OPTIMIZATION_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_DP_TUNNEL_CAPABILITIES_PANEL_REPLAY_TUNNELING_OPTIMIZATION_SUPPORT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_DP_TUNNEL_CAPABILITIES_DPIN_BW_ALLOCATION_MODE_SUPPORT 7:7 /* R-XUF */ +#define NV_DPCD20_DP_TUNNEL_CAPABILITIES_DPIN_BW_ALLOCATION_MODE_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_DP_TUNNEL_CAPABILITIES_DPIN_BW_ALLOCATION_MODE_SUPPORT_YES (0x00000001) /* R-XUV */ + +// DPCD Registers for DPRX Event Status Indicator Field +#define NV_DPCD20_LINK_SERVICE_IRQ_VECTOR_ESI0 (0x00002005) /* R-XUR */ +#define NV_DPCD20_LINK_SERVICE_IRQ_VECTOR_ESI0_DP_TUNNELING_IRQ 5:5 /* R-XUF */ +#define NV_DPCD20_LINK_SERVICE_IRQ_VECTOR_ESI0_DP_TUNNELING_IRQ_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_LINK_SERVICE_IRQ_VECTOR_ESI0_DP_TUNNELING_IRQ_YES (0x00000001) /* R-XUV */ + +// DPCD Registers for DP IN BW Allocation +#define NV_DPCD20_USB4_DRIVER_BW_CAPABILITY (0x000E0020) /* R-XUR */ +#define NV_DPCD20_USB4_DRIVER_BW_ALLOCATION 7:7 /* R-XUF */ +#define NV_DPCD20_USB4_DRIVER_BW_ALLOCATION_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_USB4_DRIVER_BW_ALLOCATION_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD20_DP_TUNNEL_BW_GRANULARITY (0x000E0022) /* R-XUR */ +#define NV_DPCD20_DP_TUNNEL_BW_GRANULARITY_VAL 1:0 /* R-XUF */ +#define NV_DPCD20_DP_TUNNEL_BW_GRANULARITY_VAL_0_25_GBPS (0x00000000) /* R-XUV */ +#define NV_DPCD20_DP_TUNNEL_BW_GRANULARITY_VAL_0_50_GBPS (0x00000001) /* R-XUV */ +#define NV_DPCD20_DP_TUNNEL_BW_GRANULARITY_VAL_1_00_GBPS (0x00000002) /* R-XUV */ + +#define NV_DPCD20_DP_TUNNEL_ESTIMATED_BW (0x000E0023) /* R-XUR */ + +#define NV_DPCD20_DP_TUNNEL_ALLOCATED_BW (0x000E0024) /* R-XUR */ + +#define NV_DPCD20_DP_TUNNEL_REQUESTED_BW (0x000E0031) /* R-XUR */ + +#define NV_DPCD20_DP_TUNNELING_STATUS (0x000E0025) /* R-XUR */ +#define NV_DPCD20_DP_TUNNELING_BW_REQUEST_FAILED 0:0 /* R-XUF */ +#define NV_DPCD20_DP_TUNNELING_BW_REQUEST_FAILED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_DP_TUNNELING_BW_REQUEST_FAILED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_DP_TUNNELING_BW_REQUEST_SUCCEEDED 1:1 /* R-XUF */ +#define NV_DPCD20_DP_TUNNELING_BW_REQUEST_SUCCEEDED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_DP_TUNNELING_BW_REQUEST_SUCCEEDED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_DP_TUNNELING_ESTIMATED_BW_CHANGED 2:2 /* R-XUF */ +#define NV_DPCD20_DP_TUNNELING_ESTIMATED_BW_CHANGED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_DP_TUNNELING_ESTIMATED_BW_CHANGED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_DP_TUNNELING_BW_ALLOCATION_CAPABILITY_CHANGED 3:3 /* R-XUF */ +#define NV_DPCD20_DP_TUNNELING_BW_ALLOCATION_CAPABILITY_CHANGED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_DP_TUNNELING_BW_ALLOCATION_CAPABILITY_CHANGED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_DP_TUNNELING_EXIT_DISCOVERY_MODE 4:4 /* R-XUF */ +#define NV_DPCD20_DP_TUNNELING_EXIT_DISCOVERY_MODE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_DP_TUNNELING_EXIT_DISCOVERY_MODE_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD20_DP_TUNNELING_8B10B_MAX_LINK_RATE (0x000E0028) /* R-XUR */ +#define NV_DPCD20_DP_TUNNELING_8B10B_MAX_LINK_RATE_VAL 7:0 /* R-XUF */ +#define NV_DPCD20_DP_TUNNELING_8B10B_MAX_LINK_RATE_VAL_1_62_GBPS (0x00000006) /* R-XUV */ +#define NV_DPCD20_DP_TUNNELING_8B10B_MAX_LINK_RATE_VAL_2_70_GBPS (0x0000000A) /* R-XUV */ +#define NV_DPCD20_DP_TUNNELING_8B10B_MAX_LINK_RATE_VAL_5_40_GBPS (0x00000014) /* R-XUV */ +#define NV_DPCD20_DP_TUNNELING_8B10B_MAX_LINK_RATE_VAL_8_10_GBPS (0x0000001E) /* R-XUV */ + +#define NV_DPCD20_DP_TUNNELING_MAX_LANE_COUNT (0x000E0029) /* R-XUR */ +#define NV_DPCD20_DP_TUNNELING_MAX_LANE_COUNT_LANE 7:0 /* R-XUF */ +#define NV_DPCD20_DP_TUNNELING_MAX_LANE_COUNT_LANE_ONE (0x00000001) /* R-XUV */ +#define NV_DPCD20_DP_TUNNELING_MAX_LANE_COUNT_LANE_TWO (0x00000002) /* R-XUV */ +#define NV_DPCD20_DP_TUNNELING_MAX_LANE_COUNT_LANE_FOUR (0x00000004) /* R-XUV */ + +#define NV_DPCD20_DPTX_BW_ALLOCATION_MODE_CONTROL (0x000E0030) /* R-XUR */ +#define NV_DPCD20_DPTX_UNMASK_BW_ALLOCATION_IRQ 6:6 /* R-XUF */ +#define NV_DPCD20_DPTX_UNMASK_BW_ALLOCATION_IRQ_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_DPTX_UNMASK_BW_ALLOCATION_IRQ_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_DPTX_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE 7:7 /* R-XUF */ +#define NV_DPCD20_DPTX_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_DPTX_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE_YES (0x00000001) /* R-XUV */ + +#endif // #ifndef _DISPLAYPORT20_H_ diff --git a/src/common/inc/gps.h b/src/common/inc/gps.h new file mode 100644 index 0000000..9edba61 --- /dev/null +++ b/src/common/inc/gps.h @@ -0,0 +1,54 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef GPS_H +#define GPS_H + +#define GPS_REVISION_ID 0x00000100 +#define GPS_2X_REVISION_ID 0x00000200 + +#define GPS_FUNC_SUPPORT 0x00000000 // Bit list of supported functions +#define GPS_FUNC_GETOBJBYTYPE 0x00000010 // Fetch any specific Object by Type +#define GPS_FUNC_GETALLOBJS 0x00000011 // Fetch all Objects +#define GPS_FUNC_REQUESTDXSTATE 0x00000012 // Request D-Notifier state +#define GPS_FUNC_GETCALLBACKS 0x00000013 // Get system requested callbacks +#define GPS_FUNC_PCONTROL 0x0000001C // GPU power control function +#define GPS_FUNC_PSHARESTATUS 0x00000020 // Get system requested Power Steering settings +#define GPS_FUNC_GETPSS 0x00000021 // Get _PSS object +#define GPS_FUNC_SETPPC 0x00000022 // Set _PPC object +#define GPS_FUNC_GETPPC 0x00000023 // Get _PPC object +#define GPS_FUNC_GETPPL 0x00000024 // Get CPU package power limits +#define GPS_FUNC_SETPPL 0x00000025 // Set CPU package power limits +#define GPS_FUNC_GETTRL 0x00000026 // Get CPU turbo ratio limits +#define GPS_FUNC_SETTRL 0x00000027 // Set CPU turbo ratio limits +#define GPS_FUNC_GETPPM 0x00000028 // Get system power modes +#define GPS_FUNC_SETPPM 0x00000029 // Set system power modes +#define GPS_FUNC_PSHAREPARAMS 0x0000002A // Get sensor information and capabilities +#define GPS_FUNC_SETEDPPLIMITINFO 0x0000002B // Send the GPU EDPPeak limit info to platform +#define GPS_FUNC_GETEDPPLIMIT 0x0000002C // Get EDPPeak limit from platform + +#define GPS_EVENT_STATUS_CHANGE 0x000000C0 // when received call GPS_FUNC_PCONTROL, + // depends on whether system is GPS enabled. + +#endif // GPS_H + diff --git a/src/common/inc/hdmi_spec.h b/src/common/inc/hdmi_spec.h new file mode 100644 index 0000000..9371c1c --- /dev/null +++ b/src/common/inc/hdmi_spec.h @@ -0,0 +1,86 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _HDMI_SPEC_H_ +#define _HDMI_SPEC_H_ + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: HDMI_SPEC.H * +* Defines Common HDMI flags * +* * +\***************************************************************************/ + +/* +* RM will be moving to separate packet types for DP and HDMI +* since the SDP packet type differ between HDMI and DP. Going forward +* clients are expected to use the respective packet type. Once all the +* clients move to the new data types, we can remove the redundant +* PACKET_TYPE definition. +*/ + + +typedef enum +{ + pktType_AudioClkRegeneration = 0x01, + pktType_GeneralControl = 0x03, + pktType_GamutMetadata = 0x0a, + pktType_SRInfoFrame = 0x7f, // Self refresh infoframe for eDP enter/exit self refresh, SRS 1698 + pktType_Cea861BInfoFrame = 0x80, + pktType_VendorSpecInfoFrame = 0x81, + pktType_AviInfoFrame = 0x82, + pktType_AudioInfoFrame = 0x84, + pktType_SrcProdDescInfoFrame = 0x83, + pktType_MpegSrcInfoFrame = 0x85, + pktType_DynamicRangeMasteringInfoFrame = 0x87 +} PACKET_TYPE; + +typedef enum +{ + hdmi_pktType_AudioClkRegeneration = 0x01, + hdmi_pktType_GeneralControl = 0x03, + hdmi_pktType_GamutMetadata = 0x0a, + hdmi_pktType_ExtendedMetadata = 0x7f, + hdmi_pktType_Cea861BInfoFrame = 0x80, + hdmi_pktType_VendorSpecInfoFrame = 0x81, + hdmi_pktType_AviInfoFrame = 0x82, + hdmi_pktType_AudioInfoFrame = 0x84, + hdmi_pktType_SrcProdDescInfoFrame = 0x83, + hdmi_pktType_MpegSrcInfoFrame = 0x85, + hdmi_pktType_DynamicRangeMasteringInfoFrame = 0x87 +} HDMI_PACKET_TYPE; + + +#define HDMI_PKT_HDR_SIZE 3 + +#define HDMI_PKT_AVI_NUM_DBYTES 14 +#define HDMI_PKT_AUDIO_NUM_DBYTES 11 +#define HDMI_PKT_GENCTRL_NUM_DBYTES 7 +#define HDMI_PKT_ACR_NUM_DBYTES 7 +#define HDMI_PKT_GAMUT_METADATA_NUM_DBYTES 28 +#define HDMI_PKT_VS_MAX_NUM_DBYTES 28 + +#define HDMI_GENCTRL_PACKET_MUTE_ENABLE 0x01 +#define HDMI_GENCTRL_PACKET_MUTE_DISABLE 0x10 + +#endif // #ifndef _HDMI_SPEC_H_ diff --git a/src/common/inc/jt.h b/src/common/inc/jt.h new file mode 100644 index 0000000..3e526c8 --- /dev/null +++ b/src/common/inc/jt.h @@ -0,0 +1,115 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef JT_H +#define JT_H + +// +// JT ACPI _DSM method related definitions +// +#define JT_REVISION_ID 0x00000103 // Revision number + +// subfunction 0 is common use: NV_ACPI_ALL_FUNC_SUPPORT +// #define JT_FUNC_SUPPORT 0x00000000 // Function is supported? +#define JT_FUNC_CAPS 0x00000001 // Capabilities +#define JT_FUNC_POLICYSELECT 0x00000002 // Query Policy Selector Status (reserved for future use) +#define JT_FUNC_POWERCONTROL 0x00000003 // dGPU Power Control +#define JT_FUNC_PLATPOLICY 0x00000004 // Query the Platform Policies (reserved for future use) +#define JT_FUNC_DISPLAYSTATUS 0x00000005 // Query the Display Hot-Key +#define JT_FUNC_MDTL 0x00000006 // Display Hot-Key Toggle List + +// +// JT_FUNC_CAPS return buffer definitions +// +#define NV_JT_FUNC_CAPS_JT_ENABLED 0:0 +#define NV_JT_FUNC_CAPS_JT_ENABLED_FALSE 0x00000000 +#define NV_JT_FUNC_CAPS_JT_ENABLED_TRUE 0x00000001 +#define NV_JT_FUNC_CAPS_NVSR_ENABLED 2:1 +#define NV_JT_FUNC_CAPS_NVSR_ENABLED_TRUE 0x00000000 +#define NV_JT_FUNC_CAPS_NVSR_ENABLED_FALSE 0x00000001 +#define NV_JT_FUNC_CAPS_PPR 4:3 +#define NV_JT_FUNC_CAPS_PPR_GC6 0x00000000 +#define NV_JT_FUNC_CAPS_PPR_GC6S3SR 0x00000002 +#define NV_JT_FUNC_CAPS_SRPR 5:5 +#define NV_JT_FUNC_CAPS_SRPR_PANEL 0x00000000 +#define NV_JT_FUNC_CAPS_SRPR_SUSPEND 0x00000001 +#define NV_JT_FUNC_CAPS_FBPR 7:6 +#define NV_JT_FUNC_CAPS_FBPR_GC6_ON 0x00000000 +#define NV_JT_FUNC_CAPS_FBPR_GC6_S3 0x00000002 +#define NV_JT_FUNC_CAPS_GPR 9:8 +#define NV_JT_FUNC_CAPS_GPR_COMBINED 0x00000000 +#define NV_JT_FUNC_CAPS_GPR_PERGPU 0x00000001 +#define NV_JT_FUNC_CAPS_GCR 10:10 +#define NV_JT_FUNC_CAPS_GCR_EXTERNAL 0x00000000 +#define NV_JT_FUNC_CAPS_GCR_INTEGRATED 0x00000001 +#define NV_JT_FUNC_CAPS_PTH_ENABLED 11:11 +#define NV_JT_FUNC_CAPS_PTH_ENABLED_YES 0x00000000 +#define NV_JT_FUNC_CAPS_PTH_ENABLED_NO 0x00000001 +#define NV_JT_FUNC_CAPS_NOT 12:12 +#define NV_JT_FUNC_CAPS_NOT_GC6DONE 0x00000000 +#define NV_JT_FUNC_CAPS_NOT_LINKCHANGE 0x00000001 +#define NV_JT_FUNC_CAPS_MSHYB_ENABLED 13:13 +#define NV_JT_FUNC_CAPS_MSHYB_ENABLED_FALSE 0x00000000 +#define NV_JT_FUNC_CAPS_MSHYB_ENABLED_TRUE 0x00000001 +#define NV_JT_FUNC_CAPS_RPC 14:14 +#define NV_JT_FUNC_CAPS_RPC_DEFAULT 0x00000000 +#define NV_JT_FUNC_CAPS_RPC_FINEGRAIN 0x00000001 +#define NV_JT_FUNC_CAPS_GC6V 16:15 +#define NV_JT_FUNC_CAPS_GC6V_GC6E 0x00000000 +#define NV_JT_FUNC_CAPS_GC6V_GC6A 0x00000001 +#define NV_JT_FUNC_CAPS_GC6V_GC6R 0x00000002 +#define NV_JT_FUNC_CAPS_GEI_ENABLED 17:17 +#define NV_JT_FUNC_CAPS_GEI_ENABLED_FALSE 0x00000000 +#define NV_JT_FUNC_CAPS_GEI_ENABLED_TRUE 0x00000001 +#define NV_JT_FUNC_CAPS_GSW_ENABLED 18:18 +#define NV_JT_FUNC_CAPS_GSW_ENABLED_FALSE 0x00000000 +#define NV_JT_FUNC_CAPS_GSW_ENABLED_TRUE 0x00000001 +#define NV_JT_FUNC_CAPS_REVISION_ID 31:20 +#define NV_JT_FUNC_CAPS_REVISION_ID_1_00 0x00000100 +#define NV_JT_FUNC_CAPS_REVISION_ID_1_01 0x00000101 +#define NV_JT_FUNC_CAPS_REVISION_ID_1_03 0x00000103 +#define NV_JT_FUNC_CAPS_REVISION_ID_2_00 0x00000200 + +// +// JT_FUNC_POWERCONTROL argument definitions (Rev 1.0) +// + +// +// GPU Power Control +// +#define NV_JT_FUNC_POWERCONTROL_GPU_POWER_CONTROL 2:0 +#define NV_JT_FUNC_POWERCONTROL_GPU_POWER_CONTROL_GSS 0x00000000 +// +// JT_FUNC_POWERCONTROL return buffer definitions +// +#define NV_JT_FUNC_POWERCONTROL_GPU_GC_STATE 2:0 +#define NV_JT_FUNC_POWERCONTROL_GPU_GC_STATE_TRANSITION 0x00000000 +#define NV_JT_FUNC_POWERCONTROL_GPU_GC_STATE_ON 0x00000001 +#define NV_JT_FUNC_POWERCONTROL_GPU_GC_STATE_OFF 0x00000002 +#define NV_JT_FUNC_POWERCONTROL_GPU_GC_STATE_GC6 0x00000003 +#define NV_JT_FUNC_POWERCONTROL_GPU_POWER_STATE 3:3 +#define NV_JT_FUNC_POWERCONTROL_GPU_POWER_STATE_OFF 0x00000000 +#define NV_JT_FUNC_POWERCONTROL_GPU_POWER_STATE_PWOK 0x00000001 + +#endif // JT_H + diff --git a/src/common/inc/nvBinSegment.h b/src/common/inc/nvBinSegment.h new file mode 100644 index 0000000..1a87551 --- /dev/null +++ b/src/common/inc/nvBinSegment.h @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVBINSEGMENT_H +#define NVBINSEGMENT_H + +#define PUSH_SEGMENTS +#define POP_SEGMENTS +#define CODE_SEGMENT(__seg) +#define DATA_SEGMENT(__seg) +#define BSS_SEGMENT(__seg) +#define CONS_SEGMENT(__seg) +#define PAGE_SEGMENT +#define NONPAGE_SEGMENT + +#endif // NVBINSEGMENT_H diff --git a/src/common/inc/nvBldVer.h b/src/common/inc/nvBldVer.h new file mode 100644 index 0000000..e779b8c --- /dev/null +++ b/src/common/inc/nvBldVer.h @@ -0,0 +1,73 @@ +#ifndef _NVBLDVER_H_ +#define _NVBLDVER_H_ + +#ifndef NVBLDVER_STRINGIZE +#define NVBLDVER_STRINGIZE(t) #t +#endif +#ifndef STRINGIZE +#define STRINGIZE(t) NVBLDVER_STRINGIZE(t) +#endif + +// These variables can be overridden using ENV vars, see nvCommon.nvmk. +// If no env vars are set, then the defaults seen here will be used. +// In DVS builds, the ENV vars are used to control these values. +// Note- the value of NV_BUILD_CL and NV_BUILD_TYPE_NON_BM is only used in +// non-buildmeister builds, see override section below. +// DVS_SW_CHANGELIST has been added to ENV vars in bug 1486673 +#ifndef DVS_SW_CHANGELIST + #define DVS_SW_CHANGELIST 0 +#endif +#ifndef NV_BUILD_CL + #define NV_BUILD_CL (DVS_SW_CHANGELIST) +#endif +#if NV_BUILD_CL == 0 + #define NV_BUILD_CL (DVS_SW_CHANGELIST) +#endif +#ifndef NV_BUILD_TYPE_NON_BM + #define NV_BUILD_TYPE_NON_BM Private +#endif +#ifndef NV_BUILD_AUTHOR + #define NV_BUILD_AUTHOR unknown +#endif +// End ENV var section + + +// The values of the following strings are set via a buildmeister python script, +// and then checked back in. You cannot make changes to these sections without +// corresponding changes to the buildmeister script +#ifndef NV_BUILD_BRANCH + #define NV_BUILD_BRANCH bugfix_main +#endif +#ifndef NV_PUBLIC_BRANCH + #define NV_PUBLIC_BRANCH bugfix_main +#endif + +#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) +#define NV_BUILD_BRANCH_VERSION "dev/gpu_drv/bugfix_main-16490" +#define NV_BUILD_CHANGELIST_NUM (32065427) +#define NV_BUILD_TYPE "Nightly" +#define NV_BUILD_NAME "dev/gpu_drv/bugfix_main-16490" +#define NV_LAST_OFFICIAL_CHANGELIST_NUM (30396442) + +#else /* Windows builds */ +#define NV_BUILD_BRANCH_VERSION "bugfix_main-17962" +#define NV_BUILD_CHANGELIST_NUM (32066877) +#define NV_BUILD_TYPE "Nightly" +#define NV_BUILD_NAME "bugfix_main-221115" +#define NV_LAST_OFFICIAL_CHANGELIST_NUM (32066644) +#define NV_BUILD_BRANCH_BASE_VERSION R530 +#endif +// End buildmeister python edited section + +// A few of the values are defined differently for non-buildmeister builds, +// this section redefines those defines +#ifndef NV_BUILDMEISTER_BLD + #undef NV_BUILD_TYPE + #define NV_BUILD_TYPE STRINGIZE(NV_BUILD_TYPE_NON_BM) + #undef NV_BUILD_CHANGELIST_NUM + #define NV_BUILD_CHANGELIST_NUM NV_BUILD_CL +#endif + +#define NV_DISPLAY_DRIVER_TITLE NV_BUILD_TYPE " " STRINGIZE(NV_BUILD_BRANCH) " " NV_BUILD_NAME " " STRINGIZE(NV_BUILD_AUTHOR) + +#endif diff --git a/src/common/inc/nvCpuIntrinsics.h b/src/common/inc/nvCpuIntrinsics.h new file mode 100644 index 0000000..199feea --- /dev/null +++ b/src/common/inc/nvCpuIntrinsics.h @@ -0,0 +1,438 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1998,2015,2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __NV_CPU_INTRINSICS_H_ +#define __NV_CPU_INTRINSICS_H_ + +#include +#include "cpuopsys.h" +#include "nvtypes.h" + +///////////////////////////////////// +// Page size +///////////////////////////////////// + +#if defined(NV_UNIX) && !defined(NV_CPU_INTRINSICS_KERNEL) +// Page size is dynamic on all Unix systems +#include +#define __NV_MEM_PAGE_SIZE_BYTES getpagesize() +#else +// And is static for all other known architectures. +#define __NV_MEM_PAGE_SIZE_BYTES 4096 +#endif // defined(NV_UNIX) + +#define __NV_MEM_PAGE_SIZE_MASK (__NV_MEM_PAGE_SIZE_BYTES - 1) + +#define __NV_PAGE_PAD(x) \ + (((x) + __NV_MEM_PAGE_SIZE_MASK) & ~(__NV_MEM_PAGE_SIZE_MASK)) + +///////////////////////////////////// +// Cache line size +///////////////////////////////////// + +#if defined(NVCPU_PPC) +#define __NV_CACHE_LINE_BYTES 32 +#else +#define __NV_CACHE_LINE_BYTES 64 +#endif + +///////////////////////////////////// +// Spin loop hint +///////////////////////////////////// + +#if defined(NVCPU_X86_64) + +// PAUSE (aka REP NOP) opcode is low-power on x86_64 +#if defined(NV_GNU_INLINE_ASM) +#define NV_SPIN_LOOP_HINT() { \ + asm(".byte 0xf3\n\t" \ + ".byte 0x90\n\t"); \ +} +#else +#define NV_SPIN_LOOP_HINT() _mm_pause() +#endif + +#elif defined(NVCPU_X86) + +// PAUSE (aka REP NOP) opcode is low-power on P4's +#if defined(NV_GNU_INLINE_ASM) +#define NV_SPIN_LOOP_HINT() { \ + asm(".byte 0xf3\n\t" \ + ".byte 0x90\n\t"); \ +} +#else +#define NV_SPIN_LOOP_HINT() _mm_pause() +#endif + +#elif defined(NVCPU_PPC) + +#define NV_PPC_CACHE_LINE_SIZE_IN_BYTES 32 +#define NV_PPC_CACHE_LINE_SIZE_IN_U32S 8 + +// Not implemented yet +#define NV_SPIN_LOOP_HINT() + +#elif defined(NVCPU_FAMILY_ARM) || defined(NVCPU_PPC64LE) || defined(NVCPU_RISCV64) + +// Not implemented yet +#define NV_SPIN_LOOP_HINT() + +#else +#error Unknown CPU type +#endif + +///////////////////////////////////// +// Atomic operations +///////////////////////////////////// + +#if defined(__GNUC__) || defined(__clang__) + +// Include stdbool.h to pick up a definition of false to use with the +// __atomic_* intrinsics below. +#if !defined(__cplusplus) +#include +#endif // !defined(__cplusplus) + +// Sets a 32-bit variable to the specified value as an atomic operation. +// The function returns the initial value of the destination memory location. +static NV_FORCEINLINE int __NVatomicExchange(volatile int *location, int value) +{ + return __sync_lock_test_and_set(location, value); +} + +// Sets a pointer variable to the specified value as an atomic operation. +// The function returns the initial value of the destination memory location. +static NV_FORCEINLINE void* __NVatomicExchangePointer(void * volatile *location, void *value) +{ + return __sync_lock_test_and_set(location, value); +} + +// Performs an atomic compare-and-exchange operation on the specified values. The function compares two +// specified 32-bit values and exchanges with another 32-bit value based on the outcome of the comparison. +// The function returns the initial value of the destination memory location. +static NV_FORCEINLINE int __NVatomicCompareExchange(int volatile *location, int newValue, int oldValue) +{ + return __sync_val_compare_and_swap(location, oldValue, newValue); +} + +// Performs an atomic compare-and-exchange operation on the specified values. The function compares two +// specified 64-bit values and exchanges with another 64-bit value based on the outcome of the comparison. +// The function returns the initial value of the destination memory location. +static NV_FORCEINLINE NvS64 __NVatomicCompareExchange64(NvS64 volatile *location, NvS64 newValue, NvS64 oldValue) +{ +#if NVCPU_IS_ARM && !defined(__clang__) + // GCC doesn't provided an ARMv7 64-bit sync-and-swap intrinsic, so define + // one using inline assembly. + NvU32 oldValLow = NvU64_LO32(oldValue); + NvU32 oldValHigh = NvU64_HI32(oldValue); + NvU32 newValLow = NvU64_LO32(newValue); + NvU32 newValHigh = NvU64_HI32(newValue); + NvU32 outValLow; + NvU32 outValHigh; + NvU32 res; + + // The ldrexd and strexd instructions require use of an adjacent even/odd + // pair of registers. GCC supports quad-word register operands and + // modifiers to enable assignment of 64-bit values to two suitable 32-bit + // registers, but Clang does not. To work around this, explicitly request + // some suitable registers in the clobber list and manually shift the + // necessary data in/out of them as needed. + __asm__ __volatile__ ( + "1: ldrexd r2, r3, [%[loc]]\n" + " mov %[res], #0\n" + " mov %[outLo], r2\n" + " mov %[outHi], r3\n" + " mov r2, %[newLo]\n" + " mov r3, %[newHi]\n" + " teq %[outLo], %[oldLo]\n" + " itt eq\n" + " teqeq %[outHi], %[oldHi]\n" + " strexdeq %[res], r2, r3, [%[loc]]\n" + " teq %[res], #0\n" + " bne 1b\n" + // Outputs + : [res] "=&r" (res), + [outLo] "=&r" (outValLow), [outHi] "=&r" (outValHigh), + "+Qo" (*location) + // Inputs + : [loc] "r" (location), + [oldLo] "r" (oldValLow), [oldHi] "r" (oldValHigh), + [newLo] "r" (newValLow), [newHi] "r" (newValHigh) + // Clobbers + : "memory", "cc", "r2", "r3"); + + __asm__ __volatile__ ("dmb" ::: "memory"); + + return (NvS64)(((NvU64)outValHigh << 32llu) | (NvU64)outValLow); +#else + __atomic_compare_exchange_n(location, &oldValue, newValue, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); + return oldValue; +#endif +} + +// Performs an atomic compare-and-exchange operation on the specified values. The function compares two +// specified pointer values and exchanges with another pointer value based on the outcome of the comparison. +// The function returns the initial value of the destination memory location. +static NV_FORCEINLINE void* __NVatomicCompareExchangePointer(void * volatile *location, void *newValue, void *oldValue) +{ + return __sync_val_compare_and_swap(location, oldValue, newValue); +} + +// Increments (increases by one) the value of the specified 32-bit variable as an atomic operation. +// The function returns the resulting incremented value. +static NV_FORCEINLINE int __NVatomicIncrement(int volatile *location) +{ + return __sync_add_and_fetch(location, 1); +} + +// Decrements (decreases by one) the value of the specified 32-bit variable as an atomic operation. +// The function returns the resulting decremented value. +static NV_FORCEINLINE int __NVatomicDecrement(int volatile *location) +{ + return __sync_sub_and_fetch(location, 1); +} + +// Adds the values of the specified 32-bit variables as an atomic operation. +// The function returns the resulting added value. +static NV_FORCEINLINE int __NVatomicExchangeAdd(int volatile *location, int value) +{ + return __sync_add_and_fetch(location, value); +} + +#ifdef NV_CPU_QUERY_LSE_CAPS +/* + * Embedding hand coded instructions only for the inc/dec calls. These are the ones that + * get called very often. The __NVatomicCompareExchange() and other calls for example, + * are called only at init time, and a few times at most. So, keeping this hand-coding + * minimal, and to only the most used ones. + * + * Disassembly for reference: + * b820003e ldadd w0, w30, [x1] + * 0b1e0000 add w0, w0, w30 + * + * x16, x17, x30 are added to the clobber list since there could be veneers that maybe + * generated. + */ +static NV_FORCEINLINE int __NVatomicIncrement_LSE(int volatile *location) +{ + register int w0 asm ("w0") = 1; + register volatile int *x1 asm ("x1") = location; + + asm volatile + ( + ".inst 0xb820003e \n" + "add w0, w0, w30" + : "+r" (w0), "+r" (x1) + : "r" (x1) + : "x16", "x17", "x30", "memory" + ); + + return w0; +} + +static NV_FORCEINLINE int __NVatomicDecrement_LSE(int volatile *location) +{ + register int w0 asm ("w0") = (int32_t)-1; + register volatile int *x1 asm ("x1") = location; + + asm volatile + ( + ".inst 0xb820003e \n" + "add w0, w0, w30" + : "+r" (w0), "+r" (x1) + : "r" (x1) + : "x16", "x17", "x30", "memory" + ); + + return w0; +} +#endif + +#else + +#error undefined architecture + +#endif + +///////////////////////////////////// +// Bit scan operations +///////////////////////////////////// + +// __NV_clz(mask) provides a generic count-leading-zeros. If "mask" is 0, then the return value is undefined. +// __NV_ctz(mask) provides a generic count-trailing-zeros. If "mask" is 0, then the return value is undefined. +// +// __NVbsfFirst(), __NVbsfNext(), __NVbsrFirst() and __NVbsrNext() are helper functions to implement +// generic bit scan operations over a 32 bit mask long the following template: +// +// for (__NVbsfFirst(&index, &mask, maskInit); mask; __NVbsfNext(&index, &mask)) { ... } +// +// These operations are implemented using gcc/MSVC builtins/intrinsics. +// +// The scan process provides the next valid "index". In __NVbsfNext() the bit corresponding to the passed in +// (1 << index) will be masked out. +// +// bsf scan from the lsb to the msb, while bsr scans from the msb to the lsb. +// +// The use of inlines and defines below is dictated by insufficiencies of MSVC ... + +#if defined (__GNUC__) || defined(__clang__) + +static NV_FORCEINLINE int __NV_clz(unsigned int mask) { + return __builtin_clz(mask); +} + +static NV_FORCEINLINE int __NV_ctz(unsigned int mask) { + return __builtin_ctz(mask); +} + +static NV_FORCEINLINE int __NV_clzll(unsigned long long mask) { + return __builtin_clzll(mask); +} + +static NV_FORCEINLINE int __NV_ctzll(unsigned long long mask) { + return __builtin_ctzll(mask); +} + +#define __BitScanForward(_pindex, _mask) *((_pindex)) = __NV_ctz((_mask)) +#define __BitScanReverse(_pindex, _mask) *((_pindex)) = 31 - __NV_clz((_mask)) +#define __BitScanForward64(_pindex, _mask) *((_pindex)) = __NV_ctzll((_mask)) +#define __BitScanReverse64(_pindex, _mask) *((_pindex)) = 63 - __NV_clzll((_mask)) + +#else + +#error Unsupported compiler + +#endif // MSVC_VER + +#ifndef __BitScanForward64 +// Implement bit scan forward for 64 bit using 32 bit instructions +static NV_FORCEINLINE void _BitScanForward64on32(unsigned int *index, NvU64 mask) +{ + const unsigned int lowMask = (unsigned int)(mask & 0xFFFFFFFFULL); + + if (lowMask != 0) { + __BitScanForward(index, lowMask); + } else { + const unsigned int highMask = (unsigned int)(mask >> 32); + __BitScanForward(index, highMask); + *index += 32; + } +} + +#define __BitScanForward64(_pindex, _mask) _BitScanForward64on32((_pindex), (_mask)) +#endif // __BitScanForward64 + +#ifndef __BitScanReverse64 +// Implement bit scan reverse for 64 bit using 32 bit instructions +static NV_FORCEINLINE void _BitScanReverse64on32(unsigned int *index, NvU64 mask) +{ + const unsigned int highMask = (unsigned int)(mask >> 32); + + if (highMask != 0) { + __BitScanReverse(index, highMask); + *index += 32; + } else { + const unsigned int lowMask = (unsigned int)(mask & 0xFFFFFFFFULL); + __BitScanReverse(index, lowMask); + } +} + +#define __BitScanReverse64(_pindex, _mask) _BitScanReverse64on32((_pindex), (_mask)) +#endif // __BitScanReverse64 + +static NV_FORCEINLINE void __NVbsfFirst(unsigned int *pindex, unsigned int *pmask, unsigned int maskInit) +{ + *pmask = maskInit; + __BitScanForward(pindex, maskInit); +} + +static NV_FORCEINLINE void __NVbsfNext(unsigned int *pindex, unsigned int *pmask) +{ + unsigned int index, mask; + + index = *pindex; + mask = *pmask ^ (1ul << index); + + *pmask = mask; + __BitScanForward(pindex, mask); +} + +static NV_FORCEINLINE void __NVbsrFirst(unsigned int *pindex, unsigned int *pmask, unsigned int maskInit) +{ + *pmask = maskInit; + __BitScanReverse(pindex, maskInit); +} + +static NV_FORCEINLINE void __NVbsrNext(unsigned int *pindex, unsigned int *pmask) +{ + unsigned int index, mask; + + index = *pindex; + mask = *pmask ^ (1ul << index); + + *pmask = mask; + __BitScanReverse(pindex, mask); +} + +// Variations for 64 bit maks +static NV_FORCEINLINE void __NVbsfFirst64(unsigned int *pindex, NvU64 *pmask, NvU64 maskInit) +{ + *pmask = maskInit; + __BitScanForward64(pindex, maskInit); +} + +static NV_FORCEINLINE void __NVbsfNext64(unsigned int *pindex, NvU64 *pmask) +{ + unsigned int index; + NvU64 mask; + + index = *pindex; + mask = *pmask ^ (1ULL << index); + + *pmask = mask; + __BitScanForward64(pindex, mask); +} + +static NV_FORCEINLINE void __NVbsrFirst64(unsigned int *pindex, NvU64 *pmask, NvU64 maskInit) +{ + *pmask = maskInit; + __BitScanReverse64(pindex, maskInit); +} + +static NV_FORCEINLINE void __NVbsrNext64(unsigned int *pindex, NvU64 *pmask) +{ + unsigned int index; + NvU64 mask; + + index = *pindex; + mask = *pmask ^ (1ULL << index); + + *pmask = mask; + __BitScanReverse64(pindex, mask); +} + +#undef __BitScanForward +#undef __BitScanReverse +#undef __BitScanForward64 +#undef __BitScanReverse64 + +#endif // __NV_CPU_INTRINSICS_H_ diff --git a/src/common/inc/nvCpuUuid.h b/src/common/inc/nvCpuUuid.h new file mode 100644 index 0000000..0ab546b --- /dev/null +++ b/src/common/inc/nvCpuUuid.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_CPU_UUID_H_ +#define _NV_CPU_UUID_H_ + +#define NV_UUID_LEN 16 + +typedef struct nv_uuid +{ + NvU8 uuid[NV_UUID_LEN]; + +} NvUuid; + +#define NV_UUID_HI(pUuid) (*((NvU64*)((pUuid)->uuid + (NV_UUID_LEN >> 1)))) +#define NV_UUID_LO(pUuid) (*((NvU64*)((pUuid)->uuid + 0))) + +typedef NvUuid NvSystemUuid; + +typedef NvUuid NvProcessorUuid; + +extern const NvProcessorUuid NV_PROCESSOR_UUID_CPU_DEFAULT; + +#endif // _NV_CPU_UUID_H_ diff --git a/src/common/inc/nvHdmiFrlCommon.h b/src/common/inc/nvHdmiFrlCommon.h new file mode 100644 index 0000000..8c4d416 --- /dev/null +++ b/src/common/inc/nvHdmiFrlCommon.h @@ -0,0 +1,134 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** @file nvHdmiFrlCommon.h + * @brief This file defines data needed for and returned by HDMI 2.1 spec FRL calculations + * It meant to be a spec layer within HDMI lib, without carrying any + * driver/hw related information + */ + +#ifndef _NVHDMIFRLCOMMON_H_ +#define _NVHDMIFRLCOMMON_H_ + +#include "nvmisc.h" + +//****************************************************************************** +// Constants/Structures +//****************************************************************************** +#define MAX_RECONSTRUCTED_HACTIVE_PIXELS 2720 + +// HDMI_BPC: Bits per component enums. +typedef enum tagHDMI_BPC +{ + HDMI_BPC8 = 8, + HDMI_BPC10 = 10, + HDMI_BPC12 = 12, + HDMI_BPC16 = 16 +} HDMI_BPC; + +// HDMI_PIXEL_PACKING: Pixel packing type enums +typedef enum tagHDMI_PIXEL_PACKING +{ + HDMI_PIXEL_PACKING_RGB = 0, + HDMI_PIXEL_PACKING_YCbCr444, + HDMI_PIXEL_PACKING_YCbCr422, + HDMI_PIXEL_PACKING_YCbCr420 +} HDMI_PIXEL_PACKING; + +// HDMI_FRL_DATA_RATE: FRL mode enums +typedef enum tagHDMI_FRL_DATA_RATE +{ + HDMI_FRL_DATA_RATE_NONE, + HDMI_FRL_DATA_RATE_3LANES_3GBPS, + HDMI_FRL_DATA_RATE_3LANES_6GBPS, + HDMI_FRL_DATA_RATE_4LANES_6GBPS, + HDMI_FRL_DATA_RATE_4LANES_8GBPS, + HDMI_FRL_DATA_RATE_4LANES_10GBPS, + HDMI_FRL_DATA_RATE_4LANES_12GBPS, + HDMI_FRL_DATA_RATE_UNSPECIFIED +} HDMI_FRL_DATA_RATE; + +typedef enum tagAUDIO_PKTTYPE +{ + AUDIO_PKTTYPE_LPCM_SAMPLE = 0, + AUDIO_PKTTYPE_ONE_BIT_LPCM_SAMPLE, + AUDIO_PKTTYPE_DST_AUDIO, + AUDIO_PKTTYPE_HBR_AUDIO, + AUDIO_PKTTYPE_MULTI_STREAM_AUDIO, + AUDIO_PKTTYPE_ONE_BIT_MULTI_STREAM_AUDIO, + AUDIO_PKTTYPE_3D_AUDIO, + AUDIO_PKTTYPE_ONE_BIT_3D_AUDIO, + NO_AUDIO +} AUDIO_PKTTYPE; + +typedef struct tagFRL_CAPACITY_COMPUTATION_PARAMS +{ + NvU32 numLanes; + NvU32 frlBitRateGbps; + NvU32 pclk10KHz; + NvU32 hTotal; + NvU32 hActive; + NvU32 bpc; + HDMI_PIXEL_PACKING pixelPacking; + AUDIO_PKTTYPE audioType; + NvU32 numAudioChannels; + NvU32 audioFreqKHz; + + struct + { + NvU32 bppTargetx16; + NvU32 hSlices; + NvU32 sliceWidth; + NvU32 dscTotalChunkKBytes; + } compressionInfo; + +} FRL_CAPACITY_COMPUTATION_PARAMS; + +typedef struct tagFRL_COMPUTATION_RESULT +{ + HDMI_FRL_DATA_RATE frlRate; + NvU32 bppTargetx16; + + NvBool engageCompression; + NvBool isAudioSupported; + NvBool dataFlowDisparityReqMet; + NvBool dataFlowMeteringReqMet; + NvBool isVideoTransportSupported; + NvU32 triBytesBorrowed; // uncompressed mode: num of active Tri-bytes to be transmitted at HBlank + NvU32 hcActiveBytes; // compressed mode: num of FRL character bytes in active region + NvU32 hcActiveTriBytes; // compressed mode: num of FRL tri-bytes in active region + NvU32 hcBlankTriBytes; // compressed mode: num of FRL tri-bytes in blanking region + NvU32 tBlankToTTotalX1k; // compressed mode: ratio of time spent on blanking to the total line time +} FRL_COMPUTATION_RESULT; + +typedef struct tagFRL_PRE_CALC_CONFIG +{ + NvU32 vic; + HDMI_PIXEL_PACKING packing; + HDMI_BPC bpc; + HDMI_FRL_DATA_RATE frlRate; + NvU32 bppX16; + NvBool bCompressedMode; +} FRL_PRE_CALC_CONFIG; + +#endif // _NVHDMIFRLCOMMON_H_ diff --git a/src/common/inc/nvPNPVendorIds.h b/src/common/inc/nvPNPVendorIds.h new file mode 100644 index 0000000..ca93cb7 --- /dev/null +++ b/src/common/inc/nvPNPVendorIds.h @@ -0,0 +1,572 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. +*/ +/* + * This header file contains the 3-character Plug and Play Vendor IDs and + * their translation into Vendor names. + * + * If the includer defines NV_PNP_VENDOR_IDS_USE_TCHAR, then + * PNPVendorID::vendorName will have type const TCHAR*; otherwise, it will have + * type const char*. + * + * References: + * https://uefi.org/pnp_id_list + * + */ + +#ifndef __NV_PNP_VENDOR_IDS_H__ +#define __NV_PNP_VENDOR_IDS_H__ + +#if defined(NV_PNP_VENDOR_IDS_USE_TCHAR) + #define _VENDOR_NAME_TYPE const TCHAR + #define _VENDOR_NAME_ENTRY(x) _T(x) +#else + #define _VENDOR_NAME_TYPE const char + #define _VENDOR_NAME_ENTRY(x) (x) +#endif + +typedef struct tagPNPVendorID +{ + char vendorId[4]; // PNP Vendor ID (example: "SNY") + _VENDOR_NAME_TYPE* vendorName; // Vendor name for display (example: "Sony") +} PNPVendorId; + + +/* + * The PNPVendorIds[] table maps between the 3-character Plug and + * Play Vendor Identifiers and user-friendly vendor names + */ +static const PNPVendorId PNPVendorIds[] = +{ + { "___", _VENDOR_NAME_ENTRY("Targa") }, + { "@@@", _VENDOR_NAME_ENTRY("Sangyo") }, + + { "AAA", _VENDOR_NAME_ENTRY("Avolites Ltd") }, + { "AAC", _VENDOR_NAME_ENTRY("Acer") }, + { "ABC", _VENDOR_NAME_ENTRY("AboCom System Inc") }, + { "ABP", _VENDOR_NAME_ENTRY("Advanced System Products") }, + { "ACE", _VENDOR_NAME_ENTRY("ACME") }, + { "ACC", _VENDOR_NAME_ENTRY("ACCTON") }, + { "ACI", _VENDOR_NAME_ENTRY("Ancor Communications Inc") }, + { "ACK", _VENDOR_NAME_ENTRY("ACKSYS") }, + { "ACN", _VENDOR_NAME_ENTRY("ACON") }, + { "ACR", _VENDOR_NAME_ENTRY("Acer") }, + { "ACS", _VENDOR_NAME_ENTRY("Altos/ACS") }, + { "ACT", _VENDOR_NAME_ENTRY("Actebis/Targa") }, + { "ADI", _VENDOR_NAME_ENTRY("ADI") }, + { "ADP", _VENDOR_NAME_ENTRY("Adaptec") }, + { "ADT", _VENDOR_NAME_ENTRY("ADTEK") }, + { "ADV", _VENDOR_NAME_ENTRY("AMD") }, + { "ADX", _VENDOR_NAME_ENTRY("ADAX") }, + { "AEI", _VENDOR_NAME_ENTRY("AIR") }, + { "AEM", _VENDOR_NAME_ENTRY("AEM") }, + { "AEO", _VENDOR_NAME_ENTRY("UHC") }, + { "AGI", _VENDOR_NAME_ENTRY("Artish Graphics") }, + { "AKB", _VENDOR_NAME_ENTRY("Akebia") }, + { "AIC", _VENDOR_NAME_ENTRY("Arnos Instruments") }, + { "AIR", _VENDOR_NAME_ENTRY("Advanced Integrated Research") }, + { "AKB", _VENDOR_NAME_ENTRY("Akebia") }, + { "ALA", _VENDOR_NAME_ENTRY("Alacron") }, + { "ALR", _VENDOR_NAME_ENTRY("Advanced Logic Research") }, + { "AMC", _VENDOR_NAME_ENTRY("Attachmate") }, + { "AMD", _VENDOR_NAME_ENTRY("Amdek") }, + { "AMI", _VENDOR_NAME_ENTRY("American Megatrends") }, + { "AMP", _VENDOR_NAME_ENTRY("Amptron") }, + { "AMT", _VENDOR_NAME_ENTRY("Amtrans") }, + { "ANC", _VENDOR_NAME_ENTRY("Ancot") }, + { "ANI", _VENDOR_NAME_ENTRY("Anigma") }, + { "AOC", _VENDOR_NAME_ENTRY("AOC") }, + { "APD", _VENDOR_NAME_ENTRY("Applidata") }, + { "API", _VENDOR_NAME_ENTRY("AcerView") }, + { "APP", _VENDOR_NAME_ENTRY("Apple") }, + { "APS", _VENDOR_NAME_ENTRY("Autologic") }, + { "ARC", _VENDOR_NAME_ENTRY("Alta Research") }, + { "ART", _VENDOR_NAME_ENTRY("ArtMedia") }, + { "ASE", _VENDOR_NAME_ENTRY("ASEM") }, + { "ASI", _VENDOR_NAME_ENTRY("Ahead Systems") }, + { "AST", _VENDOR_NAME_ENTRY("AST Research") }, + { "ASU", _VENDOR_NAME_ENTRY("ASUS") }, + { "ATI", _VENDOR_NAME_ENTRY("Allied Telesis") }, + { "ATO", _VENDOR_NAME_ENTRY("ASTRO DESIGN, INC.") }, + { "ATT", _VENDOR_NAME_ENTRY("AT&T") }, + { "ATX", _VENDOR_NAME_ENTRY("Athenix") }, + { "AUO", _VENDOR_NAME_ENTRY("AU Optronics Corporation") }, + { "AUS", _VENDOR_NAME_ENTRY("Asustek Computer Inc") }, + { "AVI", _VENDOR_NAME_ENTRY("AIR") }, + { "AVO", _VENDOR_NAME_ENTRY("Avocent Corporation") }, + { "AZU", _VENDOR_NAME_ENTRY("Azura") }, + + { "BAN", _VENDOR_NAME_ENTRY("Banyan") }, + { "BCC", _VENDOR_NAME_ENTRY("Beaver Computer Corporation") }, + { "BCD", _VENDOR_NAME_ENTRY("Dr. Seufert GmbH") }, + { "BEO", _VENDOR_NAME_ENTRY("Bang & Olufsen") }, + { "BGT", _VENDOR_NAME_ENTRY("Budzetron") }, + { "BIG", _VENDOR_NAME_ENTRY("Bigscreen, Inc.") }, + { "BMM", _VENDOR_NAME_ENTRY("MAG Technology") }, + { "BNQ", _VENDOR_NAME_ENTRY("BenQ") }, + { "BOE", _VENDOR_NAME_ENTRY("BOE Technology Group Co., Ltd") }, + { "BRG", _VENDOR_NAME_ENTRY("Bridge") }, + { "BTC", _VENDOR_NAME_ENTRY("Bit 3") }, + { "BTE", _VENDOR_NAME_ENTRY("Brilliant Technology") }, + { "BUS", _VENDOR_NAME_ENTRY("BusTek") }, + + { "CAL", _VENDOR_NAME_ENTRY("Acon") }, + { "CCI", _VENDOR_NAME_ENTRY("Cache") }, + { "CCP", _VENDOR_NAME_ENTRY("Epson") }, + { "CDP", _VENDOR_NAME_ENTRY("CalComp") }, + { "CFG", _VENDOR_NAME_ENTRY("Atlantis") }, + { "CHA", _VENDOR_NAME_ENTRY("Chase Research") }, + { "CIP", _VENDOR_NAME_ENTRY("Ciprico") }, + { "CLO", _VENDOR_NAME_ENTRY("Clone Computers/Analogy") }, + { "CLT", _VENDOR_NAME_ENTRY("automated computer control systems")}, + { "CMD", _VENDOR_NAME_ENTRY("CMD Technology") }, + { "CMN", _VENDOR_NAME_ENTRY("Chimei innolux corp.") }, + { "CMO", _VENDOR_NAME_ENTRY("Chi Mei Optoelectronics corp.") }, + { "CNI", _VENDOR_NAME_ENTRY("Connect International") }, + { "CNT", _VENDOR_NAME_ENTRY("CNet Technology") }, + { "COM", _VENDOR_NAME_ENTRY("Comtrol") }, + { "CPC", _VENDOR_NAME_ENTRY("Ciprico") }, + { "CPD", _VENDOR_NAME_ENTRY("CompuAdd") }, + { "CPG", _VENDOR_NAME_ENTRY("DFI") }, + { "CPI", _VENDOR_NAME_ENTRY("Computer Peripherals") }, + { "CPL", _VENDOR_NAME_ENTRY("Compal") }, + { "CPQ", _VENDOR_NAME_ENTRY("Compaq") }, + { "CPT", _VENDOR_NAME_ENTRY("cPATH") }, + { "CPX", _VENDOR_NAME_ENTRY("Powermatic Data Systems") }, + { "CRD", _VENDOR_NAME_ENTRY("Cardinal Technologies") }, + { "CRN", _VENDOR_NAME_ENTRY("Cornerstone") }, + { "CRS", _VENDOR_NAME_ENTRY("Cisco") }, + { "CSE", _VENDOR_NAME_ENTRY("Compu Shack") }, + { "CSI", _VENDOR_NAME_ENTRY("Cabletron") }, + { "CSO", _VENDOR_NAME_ENTRY("California institute of Technology")}, + { "CSS", _VENDOR_NAME_ENTRY("CSS Laboratories") }, + { "CSW", _VENDOR_NAME_ENTRY("China Star Optoelectronics Technology Co., Ltd") }, + { "CTN", _VENDOR_NAME_ENTRY("Computone") }, + { "CTX", _VENDOR_NAME_ENTRY("Chuntex/CTX") }, + { "CUB", _VENDOR_NAME_ENTRY("Cubix") }, + { "CUI", _VENDOR_NAME_ENTRY("CUI") }, + { "CYB", _VENDOR_NAME_ENTRY("CyberVision") }, + + { "DBI", _VENDOR_NAME_ENTRY("DigiBoard") }, + { "DBL", _VENDOR_NAME_ENTRY("Doble Engineering") }, + { "DCC", _VENDOR_NAME_ENTRY("Dale Computer") }, + { "DCE", _VENDOR_NAME_ENTRY("Mylex") }, + { "DCM", _VENDOR_NAME_ENTRY("DCM Data Products") }, + { "DEC", _VENDOR_NAME_ENTRY("DEC") }, + { "DEI", _VENDOR_NAME_ENTRY("Deico Electronics") }, + { "DEL", _VENDOR_NAME_ENTRY("Dell") }, + { "DFI", _VENDOR_NAME_ENTRY("DFI") }, + { "DGC", _VENDOR_NAME_ENTRY("Data General") }, + { "DGS", _VENDOR_NAME_ENTRY("Diagsoft") }, + { "DIA", _VENDOR_NAME_ENTRY("Diadem") }, + { "DIO", _VENDOR_NAME_ENTRY("DIO") }, + { "DIS", _VENDOR_NAME_ENTRY("Diseda") }, + { "DIT", _VENDOR_NAME_ENTRY("Dragon Information Technology") }, + { "DLK", _VENDOR_NAME_ENTRY("D-Link") }, + { "DLO", _VENDOR_NAME_ENTRY("Dlodlo Technologies Co., Ltd") }, + { "DMB", _VENDOR_NAME_ENTRY("Digicom Systems") }, + { "DMS", _VENDOR_NAME_ENTRY("DOME imaging systems") }, + { "DNV", _VENDOR_NAME_ENTRY("NexView") }, + { "DOM", _VENDOR_NAME_ENTRY("Dome Imaging Systems") }, + { "DON", _VENDOR_NAME_ENTRY("DENON, Ltd.") }, + { "DPC", _VENDOR_NAME_ENTRY("Delta") }, + { "DPI", _VENDOR_NAME_ENTRY("DocuPoint") }, + { "DPL", _VENDOR_NAME_ENTRY("Digital Projection Limited") }, + { "DPN", _VENDOR_NAME_ENTRY("Shanghai Lexiang Technology Limited") }, + { "DPT", _VENDOR_NAME_ENTRY("DPT") }, + { "DRT", _VENDOR_NAME_ENTRY("Digital Research") }, + { "DSJ", _VENDOR_NAME_ENTRY("VR Technology Holdings Limited") }, + { "DSM", _VENDOR_NAME_ENTRY("DSM Digial Services") }, + { "DTC", _VENDOR_NAME_ENTRY("Data Technology") }, + { "DTI", _VENDOR_NAME_ENTRY("Diversified Technology") }, + { "DTK", _VENDOR_NAME_ENTRY("DTK Computer") }, + { "DTX", _VENDOR_NAME_ENTRY("Data Translation") }, + { "DVC", _VENDOR_NAME_ENTRY("DecaView") }, + { "DWE", _VENDOR_NAME_ENTRY("Daewoo") }, + + { "ECS", _VENDOR_NAME_ENTRY("EliteGroup/ECS") }, + { "ENC", _VENDOR_NAME_ENTRY("Eizo") }, + { "EGO", _VENDOR_NAME_ENTRY("Ergo Electronics") }, + { "EKC", _VENDOR_NAME_ENTRY("Kodak") }, + { "EHJ", _VENDOR_NAME_ENTRY("Epson") }, + { "EIZ", _VENDOR_NAME_ENTRY("Eizo") }, + { "ELI", _VENDOR_NAME_ENTRY("Edsun") }, + { "ELS", _VENDOR_NAME_ENTRY("ELSA") }, + { "ELX", _VENDOR_NAME_ENTRY("Elonex") }, + { "EMC", _VENDOR_NAME_ENTRY("ProView/EMC") }, + { "ENC", _VENDOR_NAME_ENTRY("Eizo") }, + { "EPI", _VENDOR_NAME_ENTRY("Envision") }, + { "EQX", _VENDOR_NAME_ENTRY("Equinox") }, + { "ERG", _VENDOR_NAME_ENTRY("Ergo") }, + { "ERP", _VENDOR_NAME_ENTRY("EURAPLAN") }, + { "ESI", _VENDOR_NAME_ENTRY("Extended Systems") }, + { "ETT", _VENDOR_NAME_ENTRY("E-Tech Research") }, + { "EVX", _VENDOR_NAME_ENTRY("Everex") }, + { "EXP", _VENDOR_NAME_ENTRY("Data Export") }, + + { "FCB", _VENDOR_NAME_ENTRY("Furukawa Electric") }, + { "FCM", _VENDOR_NAME_ENTRY("Funai") }, + { "FCT", _VENDOR_NAME_ENTRY("Free Computer Technology") }, + { "FDC", _VENDOR_NAME_ENTRY("Future Domain") }, + { "FDX", _VENDOR_NAME_ENTRY("Findex, Inc. ") }, + { "FGL", _VENDOR_NAME_ENTRY("Fujitsu") }, + { "FIC", _VENDOR_NAME_ENTRY("First International") }, + { "FOR", _VENDOR_NAME_ENTRY("Formac") }, + { "FOV", _VENDOR_NAME_ENTRY("FOVE INC") }, + { "FRC", _VENDOR_NAME_ENTRY("FORCE Computers") }, + { "FRI", _VENDOR_NAME_ENTRY("Fibernet Research") }, + { "FTN", _VENDOR_NAME_ENTRY("Fountain Technologies") }, + { "FUJ", _VENDOR_NAME_ENTRY("Fujitsu") }, + + { "GAG", _VENDOR_NAME_ENTRY("Gage Applied Sciences") }, + { "GCI", _VENDOR_NAME_ENTRY("Gateway Communications") }, + { "GEN", _VENDOR_NAME_ENTRY("Genesys") }, + { "GMX", _VENDOR_NAME_ENTRY("GMX") }, + { "GRA", _VENDOR_NAME_ENTRY("Graphica") }, + { "GSM", _VENDOR_NAME_ENTRY("LG Electronics") }, + { "GVC", _VENDOR_NAME_ENTRY("GVC") }, + { "GWY", _VENDOR_NAME_ENTRY("Gateway") }, + + { "HCL", _VENDOR_NAME_ENTRY("HCL") }, + { "HCP", _VENDOR_NAME_ENTRY("Hitachi") }, + { "HCW", _VENDOR_NAME_ENTRY("Hauppauge") }, + { "HDL", _VENDOR_NAME_ENTRY("Headland") }, + { "HEC", _VENDOR_NAME_ENTRY("Hisense") }, + { "HEI", _VENDOR_NAME_ENTRY("Hyundai") }, + { "HIT", _VENDOR_NAME_ENTRY("Hitachi/HINT") }, + { "HKC", _VENDOR_NAME_ENTRY("HKC Overseas Ltd.") }, + { "HMX", _VENDOR_NAME_ENTRY("HUMAX Co., Ltd.") }, + { "HPN", _VENDOR_NAME_ENTRY("HP Inc.") }, + { "HSD", _VENDOR_NAME_ENTRY("HannStar Display Corp") }, + { "HSL", _VENDOR_NAME_ENTRY("Hansol") }, + { "HTC", _VENDOR_NAME_ENTRY("Hitachi") }, + { "HVR", _VENDOR_NAME_ENTRY("HTC Corporation") }, + { "HWD", _VENDOR_NAME_ENTRY("HighWater Designs") }, + { "HWP", _VENDOR_NAME_ENTRY("HP") }, + { "HYL", _VENDOR_NAME_ENTRY("Hypereal") }, + { "HYP", _VENDOR_NAME_ENTRY("Hyphen Limited") }, + { "HWV", _VENDOR_NAME_ENTRY("Huawei Technologies Co., Ltd") }, + + { "IBC", _VENDOR_NAME_ENTRY("IBS") }, + { "IBM", _VENDOR_NAME_ENTRY("IBM") }, + { "ICC", _VENDOR_NAME_ENTRY("BICC Data Networks") }, + { "ICL", _VENDOR_NAME_ENTRY("Fujitsu/ICL") }, + { "ICN", _VENDOR_NAME_ENTRY("Sanyo/Icon") }, + { "ICU", _VENDOR_NAME_ENTRY("Intel") }, + { "IDS", _VENDOR_NAME_ENTRY("Intellistor") }, + { "IFT", _VENDOR_NAME_ENTRY("Informtech") }, + { "IGM", _VENDOR_NAME_ENTRY("IGM Communications") }, + { "III", _VENDOR_NAME_ENTRY("Intelligent Instrumentation") }, + { "IIN", _VENDOR_NAME_ENTRY("Intel") }, + { "IMA", _VENDOR_NAME_ENTRY("Imagraph") }, + { "IMC", _VENDOR_NAME_ENTRY("IMC Networks") }, + { "IMP", _VENDOR_NAME_ENTRY("Impression") }, + { "IMX", _VENDOR_NAME_ENTRY("Arpara Technology Co., Ltd.") }, + { "INF", _VENDOR_NAME_ENTRY("Inframetrics") }, + { "INL", _VENDOR_NAME_ENTRY("InnoLux Display Corporation") }, + { "INP", _VENDOR_NAME_ENTRY("Interphase") }, + { "INS", _VENDOR_NAME_ENTRY("Ines") }, + { "INT", _VENDOR_NAME_ENTRY("Intel") }, + { "IOD", _VENDOR_NAME_ENTRY("IODATA") }, + { "ISA", _VENDOR_NAME_ENTRY("ISA") }, + { "ISI", _VENDOR_NAME_ENTRY("Interface Solutions") }, + { "ISL", _VENDOR_NAME_ENTRY("Isolation Systems") }, + { "ITA", _VENDOR_NAME_ENTRY("Itausa") }, + { "ITC", _VENDOR_NAME_ENTRY("ITK") }, + { "ITN", _VENDOR_NAME_ENTRY("NTI Group/ASUS") }, + { "ITK", _VENDOR_NAME_ENTRY("NTI Group") }, + { "IVK", _VENDOR_NAME_ENTRY("Iiyama") }, + { "IVM", _VENDOR_NAME_ENTRY("Idek Iiyama") }, + { "IVO", _VENDOR_NAME_ENTRY("InfoVision OptoElectronics Co., Ltd")}, + { "IVR", _VENDOR_NAME_ENTRY("Inlife-Handnet Co., Ltd.") }, + { "IWR", _VENDOR_NAME_ENTRY("Icuiti Corporation") }, + + { "JDI", _VENDOR_NAME_ENTRY("Japan Display Inc") }, + { "JEN", _VENDOR_NAME_ENTRY("Jean") }, + { "JKC", _VENDOR_NAME_ENTRY("JVC Kenwood Corporation") }, + { "JVC", _VENDOR_NAME_ENTRY("JVC") }, + + { "KDS", _VENDOR_NAME_ENTRY("Korea Data Systems") }, + { "KDK", _VENDOR_NAME_ENTRY("Kodiak") }, + { "KES", _VENDOR_NAME_ENTRY("Kesa Crop") }, + { "KFC", _VENDOR_NAME_ENTRY("KFC Computek") }, + { "KPC", _VENDOR_NAME_ENTRY("King Phoenix") }, + { "KSC", _VENDOR_NAME_ENTRY("Kinetic Systems") }, + { "KTC", _VENDOR_NAME_ENTRY("Kingston Technology") }, + { "KTG", _VENDOR_NAME_ENTRY("KayserThrede") }, + { "KTR", _VENDOR_NAME_ENTRY("IMRI") }, + { "KYC", _VENDOR_NAME_ENTRY("Kyocera") }, + + { "LAG", _VENDOR_NAME_ENTRY("Laguna Systems") }, + { "LCD", _VENDOR_NAME_ENTRY("Toshiba Matsushita Display Technology Co., Ltd")}, + { "LCS", _VENDOR_NAME_ENTRY("Longshine Electronics") }, + { "LEF", _VENDOR_NAME_ENTRY("Leaf Systems") }, + { "LEN", _VENDOR_NAME_ENTRY("Lenovo Group Limited") }, + { "LGD", _VENDOR_NAME_ENTRY("LG Display") }, + { "LGE", _VENDOR_NAME_ENTRY("LG Electronics") }, + { "LKM", _VENDOR_NAME_ENTRY("Likom/LKM") }, + { "LNK", _VENDOR_NAME_ENTRY("Link Technologies") }, + { "LTI", _VENDOR_NAME_ENTRY("Longshine") }, + { "LTN", _VENDOR_NAME_ENTRY("Lite-On") }, + + { "MAG", _VENDOR_NAME_ENTRY("MAG Technology") }, + { "MAX", _VENDOR_NAME_ENTRY("Maxdata/Belinea") }, + { "MAY", _VENDOR_NAME_ENTRY("Maynard Electronics") }, + { "MBC", _VENDOR_NAME_ENTRY("MBC") }, + { "MCC", _VENDOR_NAME_ENTRY("MCCI") }, + { "MCD", _VENDOR_NAME_ENTRY("McDATA") }, + { "MCI", _VENDOR_NAME_ENTRY("Micronics") }, + { "MCR", _VENDOR_NAME_ENTRY("Marina Communications") }, + { "MCS", _VENDOR_NAME_ENTRY("Micro Computer Systems") }, + { "MCT", _VENDOR_NAME_ENTRY("Microtec") }, + { "MDD", _VENDOR_NAME_ENTRY("Modis") }, + { "MDG", _VENDOR_NAME_ENTRY("Madge Networks") }, + { "MDS", _VENDOR_NAME_ENTRY("Micro Display Systems") }, + { "MDT", _VENDOR_NAME_ENTRY("Magus Data") }, + { "MED", _VENDOR_NAME_ENTRY("Medion") }, + { "MEI", _VENDOR_NAME_ENTRY("Panasonic") }, + { "MEL", _VENDOR_NAME_ENTRY("Mitsubishi") }, + { "MET", _VENDOR_NAME_ENTRY("Metheus") }, + { "MFG", _VENDOR_NAME_ENTRY("Microfield Graphics") }, + { "MGC", _VENDOR_NAME_ENTRY("CompuAdd") }, + { "MGT", _VENDOR_NAME_ENTRY("Megatech") }, + { "MIC", _VENDOR_NAME_ENTRY("Micronics") }, + { "MIR", _VENDOR_NAME_ENTRY("Miro") }, + { "MJI", _VENDOR_NAME_ENTRY("MARANTZ JAPAN, INC.") }, + { "MLX", _VENDOR_NAME_ENTRY("Mylex") }, + { "MMX", _VENDOR_NAME_ENTRY("MAG Technology") }, + { "MOR", _VENDOR_NAME_ENTRY("Morse Technology") }, + { "MSI", _VENDOR_NAME_ENTRY("Microstep") }, + { "MST", _VENDOR_NAME_ENTRY("MS Telematica") }, + { "MSV", _VENDOR_NAME_ENTRY("Mosgi") }, + { "MTC", _VENDOR_NAME_ENTRY("Mitac") }, + { "MTI", _VENDOR_NAME_ENTRY("Morse Technology") }, + { "MTQ", _VENDOR_NAME_ENTRY("Mountain Computer") }, + { "MTS", _VENDOR_NAME_ENTRY("Multi-Tech Systems") }, + { "MTX", _VENDOR_NAME_ENTRY("Matrox") }, + { "MVD", _VENDOR_NAME_ENTRY("Microvitec PLC") }, + { "MVN", _VENDOR_NAME_ENTRY("META COMPANY") }, + { "MWY", _VENDOR_NAME_ENTRY("Microway") }, + { "MYA", _VENDOR_NAME_ENTRY("Monydata") }, + { "MYL", _VENDOR_NAME_ENTRY("Mylex") }, + { "MYX", _VENDOR_NAME_ENTRY("Micronyx") }, + { "MZI", _VENDOR_NAME_ENTRY("Mozo") }, + + { "NAN", _VENDOR_NAME_ENTRY("Nanao") }, + { "NCA", _VENDOR_NAME_ENTRY("Siemens Nixdorf") }, + { "NCD", _VENDOR_NAME_ENTRY("NCD") }, + { "NCS", _VENDOR_NAME_ENTRY("Northgate") }, + { "NDC", _VENDOR_NAME_ENTRY("National DataComm") }, + { "NDS", _VENDOR_NAME_ENTRY("Nokia") }, + { "NEC", _VENDOR_NAME_ENTRY("NEC") }, + { "NIC", _VENDOR_NAME_ENTRY("National Instruments") }, + { "NIT", _VENDOR_NAME_ENTRY("Network Info Technology") }, + { "NOK", _VENDOR_NAME_ENTRY("Nokia") }, + { "NPI", _VENDOR_NAME_ENTRY("Network Peripherals") }, + { "NSC", _VENDOR_NAME_ENTRY("National Semiconductor") }, + { "NSS", _VENDOR_NAME_ENTRY("Newport Systems") }, + { "NTI", _VENDOR_NAME_ENTRY("New Tech") }, + { "NVD", _VENDOR_NAME_ENTRY("NVIDIA") }, + { "NVL", _VENDOR_NAME_ENTRY("Novell") }, + { "NXG", _VENDOR_NAME_ENTRY("Nexgen") }, + + { "OAS", _VENDOR_NAME_ENTRY("OAsys") }, + { "OCN", _VENDOR_NAME_ENTRY("Olfan") }, + { "OEC", _VENDOR_NAME_ENTRY("Daytek") }, + { "OLC", _VENDOR_NAME_ENTRY("Olicom") }, + { "OLI", _VENDOR_NAME_ENTRY("Olivetti") }, + { "OKI", _VENDOR_NAME_ENTRY("OKI Electric Industrial Company Ltd") }, + { "ONK", _VENDOR_NAME_ENTRY("ONKYO Corporation") }, + { "OPT", _VENDOR_NAME_ENTRY("OPTi") }, + { "OQI", _VENDOR_NAME_ENTRY("Optiquest") }, + { "OTI", _VENDOR_NAME_ENTRY("Orchid Technology") }, + { "OVR", _VENDOR_NAME_ENTRY("Oculus VR Inc.") }, + { "OZO", _VENDOR_NAME_ENTRY("Zoom Telephonics") }, + + { "PAR", _VENDOR_NAME_ENTRY("Parallan Comp Inc") }, + { "PBE", _VENDOR_NAME_ENTRY("Packard Bell") }, + { "PBI", _VENDOR_NAME_ENTRY("Pitney Bowes") }, + { "PBN", _VENDOR_NAME_ENTRY("Packard Bell") }, + { "PCI", _VENDOR_NAME_ENTRY("Pioneer Computer") }, + { "PCP", _VENDOR_NAME_ENTRY("Procomp") }, + { "PDR", _VENDOR_NAME_ENTRY("Pure Data") }, + { "PEA", _VENDOR_NAME_ENTRY("Peacock") }, + { "PGS", _VENDOR_NAME_ENTRY("Princeton Graphics") }, + { "PHI", _VENDOR_NAME_ENTRY("Phillips") }, + { "PHL", _VENDOR_NAME_ENTRY("Philips") }, + { "PIO", _VENDOR_NAME_ENTRY("Pioneer Electronic Corporation") }, + { "PI0", _VENDOR_NAME_ENTRY("Pioneer") }, + { "PIR", _VENDOR_NAME_ENTRY("Pico Technology Inc") }, + { "PJD", _VENDOR_NAME_ENTRY("Projectiondesign AS") }, + { "PLB", _VENDOR_NAME_ENTRY("PLB") }, + { "PLX", _VENDOR_NAME_ENTRY("Ocean Office Automation") }, + { "PMC", _VENDOR_NAME_ENTRY("PMC Consumer Electronics") }, + { "PMV", _VENDOR_NAME_ENTRY("MAG Technology") }, + { "PNR", _VENDOR_NAME_ENTRY("Planar Systems, Inc.") }, + { "PRO", _VENDOR_NAME_ENTRY("Proteon") }, + { "PSI", _VENDOR_NAME_ENTRY("PSI Perceptive Solutions") }, + { "PTS", _VENDOR_NAME_ENTRY("ProView/EMC/PTS") }, + { "PVR", _VENDOR_NAME_ENTRY("Pimax Tech Co., Ltd") }, + + { "QDI", _VENDOR_NAME_ENTRY("Quantum Data Incorporated") }, + { "QDM", _VENDOR_NAME_ENTRY("Quadram") }, + { "QTD", _VENDOR_NAME_ENTRY("Quantum 3D Inc") }, + { "QTM", _VENDOR_NAME_ENTRY("Quantum") }, + + { "RAC", _VENDOR_NAME_ENTRY("Racore Computer Products") }, + { "RCE", _VENDOR_NAME_ENTRY("RCE") }, + { "RCI", _VENDOR_NAME_ENTRY("RC International") }, + { "REL", _VENDOR_NAME_ENTRY("Relisys") }, + { "REM", _VENDOR_NAME_ENTRY("REM") }, + { "RII", _VENDOR_NAME_ENTRY("Racal Interlan") }, + { "RMP", _VENDOR_NAME_ENTRY("Research Machines") }, + { "ROK", _VENDOR_NAME_ENTRY("Rockwell") }, + { "RTI", _VENDOR_NAME_ENTRY("Rancho Technology") }, + { "RUN", _VENDOR_NAME_ENTRY("RUNCO International") }, + + { "SAM", _VENDOR_NAME_ENTRY("Samsung") }, + { "SAN", _VENDOR_NAME_ENTRY("Sanyo Electric Co.,Ltd.") }, + { "SCC", _VENDOR_NAME_ENTRY("SORD") }, + { "SCD", _VENDOR_NAME_ENTRY("Sanyo") }, + { "SDC", _VENDOR_NAME_ENTRY("Samsung Display Corp.") }, + { "SDI", _VENDOR_NAME_ENTRY("Samtron/Sigma Designs") }, + { "SDT", _VENDOR_NAME_ENTRY("Siemens AG") }, + { "SEA", _VENDOR_NAME_ENTRY("Segate") }, + { "SEC", _VENDOR_NAME_ENTRY("Seiko/Epson") }, + { "SEN", _VENDOR_NAME_ENTRY("Sencore") }, + { "SFL", _VENDOR_NAME_ENTRY("Shiftall Inc") }, + { "SGT", _VENDOR_NAME_ENTRY("Stargate Technology/AT&T") }, + { "SGX", _VENDOR_NAME_ENTRY("SGI") }, + { "SHP", _VENDOR_NAME_ENTRY("Sharp") }, + { "SIB", _VENDOR_NAME_ENTRY("Sanyo") }, + { "SIE", _VENDOR_NAME_ENTRY("Siemens Nixdorf") }, + { "SII", _VENDOR_NAME_ENTRY("Silicon Image, Inc.") }, + { "SIS", _VENDOR_NAME_ENTRY("SiS/Modula Tech") }, + { "SIT", _VENDOR_NAME_ENTRY("Sitintel") }, + { "SIX", _VENDOR_NAME_ENTRY("Zuniq Data") }, + { "SKD", _VENDOR_NAME_ENTRY("Schneider & Koch") }, + { "SKW", _VENDOR_NAME_ENTRY("Skyworth") }, + { "SKY", _VENDOR_NAME_ENTRY("SKYDATA S.P.A.") }, + { "SLB", _VENDOR_NAME_ENTRY("Shlumberger Ltd") }, + { "SLT", _VENDOR_NAME_ENTRY("Salt Internatioinal Corp.") }, + { "SLX", _VENDOR_NAME_ENTRY("Specialix") }, + { "SMC", _VENDOR_NAME_ENTRY("Standard Microsystems") }, + { "SMI", _VENDOR_NAME_ENTRY("Smile") }, + { "SML", _VENDOR_NAME_ENTRY("Smile") }, + { "SMN", _VENDOR_NAME_ENTRY("Somnium Space Ltd") }, + { "SMS", _VENDOR_NAME_ENTRY("Silicon Multimedia Systems") }, + { "SNI", _VENDOR_NAME_ENTRY("Siemens Nixdorf") }, + { "SNY", _VENDOR_NAME_ENTRY("Sony") }, + { "SOB", _VENDOR_NAME_ENTRY("Sanyo") }, + { "SPE", _VENDOR_NAME_ENTRY("SPEA") }, + { "SPT", _VENDOR_NAME_ENTRY("Sceptre") }, + { "SRC", _VENDOR_NAME_ENTRY("Shamrock/SunRiver") }, + { "SSS", _VENDOR_NAME_ENTRY("S3") }, + { "STA", _VENDOR_NAME_ENTRY("Stesa") }, + { "STB", _VENDOR_NAME_ENTRY("STB Systems") }, + { "STC", _VENDOR_NAME_ENTRY("Sampo/STAC") }, + { "STP", _VENDOR_NAME_ENTRY("Sceptre") }, + { "STR", _VENDOR_NAME_ENTRY("Starlight Networks") }, + { "SUK", _VENDOR_NAME_ENTRY("Schneider & Koch") }, + { "SUP", _VENDOR_NAME_ENTRY("Supra/Diamond Media") }, + { "SUR", _VENDOR_NAME_ENTRY("Surenam") }, + { "SVR", _VENDOR_NAME_ENTRY("Sensics Inc.") }, + { "SYL", _VENDOR_NAME_ENTRY("Sylvania") }, + { "SYN", _VENDOR_NAME_ENTRY("Synaptics Inc") }, + + { "TAI", _VENDOR_NAME_ENTRY("Toshiba") }, + { "TAT", _VENDOR_NAME_ENTRY("Tatung") }, + { "TAX", _VENDOR_NAME_ENTRY("Taxan") }, + { "TCC", _VENDOR_NAME_ENTRY("Tandon") }, + { "TCI", _VENDOR_NAME_ENTRY("Tulip") }, + { "TCL", _VENDOR_NAME_ENTRY("Tech Concepts") }, + { "TCM", _VENDOR_NAME_ENTRY("Techmedia/3Com") }, + { "TCO", _VENDOR_NAME_ENTRY("Thomas Conrad") }, + { "TCR", _VENDOR_NAME_ENTRY("Thomson Consumer Electronics") }, + { "TCS", _VENDOR_NAME_ENTRY("Tatung") }, + { "TDS", _VENDOR_NAME_ENTRY("Tri Data Systems") }, + { "TDT", _VENDOR_NAME_ENTRY("TDT") }, + { "TDY", _VENDOR_NAME_ENTRY("Tandy") }, + { "TEA", _VENDOR_NAME_ENTRY("Teac") }, + { "TEC", _VENDOR_NAME_ENTRY("Tecmar") }, + { "TEI", _VENDOR_NAME_ENTRY("TECO") }, + { "TGI", _VENDOR_NAME_ENTRY("TriGem") }, + { "TGS", _VENDOR_NAME_ENTRY("Torus") }, + { "TMA", _VENDOR_NAME_ENTRY("Tianma Microelectronics") }, + { "TOS", _VENDOR_NAME_ENTRY("Toshiba") }, + { "TRI", _VENDOR_NAME_ENTRY("Tricord") }, + { "TRM", _VENDOR_NAME_ENTRY("Tekram") }, + { "TRL", _VENDOR_NAME_ENTRY("Royal") }, + { "TRS", _VENDOR_NAME_ENTRY("Torus") }, + { "TRU", _VENDOR_NAME_ENTRY("Aashima/Truevision") }, + { "TSB", _VENDOR_NAME_ENTRY("Toshiba") }, + { "TSC", _VENDOR_NAME_ENTRY("Sanyo") }, + { "TSI", _VENDOR_NAME_ENTRY("TeleVideo") }, + { "TST", _VENDOR_NAME_ENTRY("Transtream Inc") }, + { "TTC", _VENDOR_NAME_ENTRY("Telecommunications Techniques") }, + { "TTK", _VENDOR_NAME_ENTRY("Totoku") }, + { "TTX", _VENDOR_NAME_ENTRY("TTX") }, + { "TVI", _VENDOR_NAME_ENTRY("TeleVideo/Truevision") }, + { "TVM", _VENDOR_NAME_ENTRY("TVM") }, + { "TWA", _VENDOR_NAME_ENTRY("Tidewater") }, + { "TWE", _VENDOR_NAME_ENTRY("Kontron") }, + { "TXN", _VENDOR_NAME_ENTRY("Texas Instruments") }, + { "TYN", _VENDOR_NAME_ENTRY("Tyan Computer") }, + + { "UBI", _VENDOR_NAME_ENTRY("Ungermann Bass") }, + { "UFO", _VENDOR_NAME_ENTRY("UFO Systems") }, + { "UNA", _VENDOR_NAME_ENTRY("Unisys") }, + { "UNI", _VENDOR_NAME_ENTRY("Unisys") }, + { "UNM", _VENDOR_NAME_ENTRY("Unisys") }, + { "UNO", _VENDOR_NAME_ENTRY("Unisys") }, + { "UNS", _VENDOR_NAME_ENTRY("Unisys") }, + { "UNT", _VENDOR_NAME_ENTRY("Unisys") }, + { "USC", _VENDOR_NAME_ENTRY("UltraStor") }, + { "USR", _VENDOR_NAME_ENTRY("US Robotics") }, + { "UTB", _VENDOR_NAME_ENTRY("Utobia") }, + + { "VES", _VENDOR_NAME_ENTRY("Vestel") }, + { "VIK", _VENDOR_NAME_ENTRY("Viking") }, + { "VLV", _VENDOR_NAME_ENTRY("Valve Corporation") }, + { "VMI", _VENDOR_NAME_ENTRY("Vermont MicroSystems") }, + { "VOB", _VENDOR_NAME_ENTRY("Vobis") }, + { "VRG", _VENDOR_NAME_ENTRY("VRgineers, Inc. ") }, + { "VRT", _VENDOR_NAME_ENTRY("Varjo Technologies") }, + { "VSC", _VENDOR_NAME_ENTRY("ViewSonic") }, + + { "WAC", _VENDOR_NAME_ENTRY("Wacom Tech") }, + { "WDC", _VENDOR_NAME_ENTRY("Western Digital") }, + { "WDE", _VENDOR_NAME_ENTRY("Westinghouse Digital Electronics") }, + { "WIL", _VENDOR_NAME_ENTRY("WIPRO") }, + { "WTC", _VENDOR_NAME_ENTRY("Wen Technology") }, + { "WYS", _VENDOR_NAME_ENTRY("Wyse Technology") }, + + { "YMH", _VENDOR_NAME_ENTRY("Yamaha Corporation") }, + { "YHQ", _VENDOR_NAME_ENTRY("Yokogawa") }, + + { "ZCM", _VENDOR_NAME_ENTRY("Zenith") }, + { "ZDS", _VENDOR_NAME_ENTRY("Zenith") }, + { "ZYT", _VENDOR_NAME_ENTRY("Zytex") }, +}; + +#endif /* __NV_PNP_VENDOR_IDS_H__ */ + diff --git a/src/common/inc/nvSemaphoreCommon.h b/src/common/inc/nvSemaphoreCommon.h new file mode 100644 index 0000000..b11297c --- /dev/null +++ b/src/common/inc/nvSemaphoreCommon.h @@ -0,0 +1,217 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_SEMAPHORE_H__ +#define __NV_SEMAPHORE_H__ + +#include "nvtypes.h" +#include "nvCpuIntrinsics.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef volatile struct { + NvU32 payload; + NvU32 reportValue; + NvU64 timer; +} NvReportSemaphore32; + +typedef volatile struct { + NvU64 reportValue; + NvU64 timer; +} NvReportSemaphore64; + +typedef volatile union { + NvReportSemaphore32 sema32; + NvReportSemaphore64 sema64; +} NvReportSemaphore; + +/* + * These structures can't change size. They map to the GPU and other driver + * components expect the same size. + */ +ct_assert(sizeof(NvReportSemaphore32) == 16); +ct_assert(sizeof(NvReportSemaphore64) == 16); +ct_assert(sizeof(NvReportSemaphore) == 16); + +/* + * Pre-Volta GPUs can only read/write a 32-bit semaphore. Rather than try to + * use multiple semaphore writes to emulate a full 64-bit write, which is prone + * to race conditions when the value wraps, derive the full 64-bit value by + * comparing the current GPU-accessible value with the the last value written by + * the CPU or submitted to be written by the GPU, which is stashed in the + * timestamp field of the semaphore by the CPU in both these cases. + */ +static inline void NvTimeSemFermiSetMaxSubmittedVal( + volatile NvU64 *maxSubmittedPtr, + const NvU64 value) +{ + NvU64 oldValue = + (NvU64)__NVatomicCompareExchange64((volatile NvS64 *)maxSubmittedPtr, + 0, 0); + + // Atomically set report->timer to max(value, report->time). + while (oldValue < value) { + const NvU64 prevValue = + (NvU64)__NVatomicCompareExchange64((volatile NvS64 *)maxSubmittedPtr, + (NvS64)value, + (NvS64)oldValue); + if (prevValue == oldValue) { + // The specified value was set. Done. + nvAssert(*maxSubmittedPtr >= value); + break; + } + + oldValue = prevValue; + } +} + +static inline void NvTimeSemFermiSetMaxSubmitted( + NvReportSemaphore32 *report, + const NvU64 value) +{ + NvTimeSemFermiSetMaxSubmittedVal(&report->timer, value); +} + +static inline NvU64 NvTimeSemFermiGetPayloadVal( + volatile void *payloadPtr, + volatile void *maxSubmittedPtr) +{ + // The ordering of the two operations below is critical. Other threads + // may be submitting GPU work that modifies the semaphore value, or + // modifying it from the CPU themselves. Both of those operations first + // set the 64-bit max submitted/timer value, then modify or submit work + // to modify the 32-bit payload value. Consider this hypothetical timeline + // if the order of operations below is reversed: + // + // thread1: + // -SetMaxSubmitted(0x1); + // -report->payload = 0x1; + // + // thread2: + // -Reads 0x1 from report->timer + // + // thread1: + // -SetMaxSubmitted(0x7fffffff); + // -report->payload = 0x7fffffff; + // -SetMaxSubmitted(0x100000000); + // -report->payload = 0x00000000; + // + // thread2: + // -Reads 0x0 from report->payload + // + // The logic below would see 0 (payload) is less than 1 (max submitted) and + // determine a wrap is outstanding, subtract one from the high 32-bits of + // the max submitted value (0x00000000 - 0x1), overflow, and return the + // current 64-bit value as 0xffffffff00000000 when the correct value is + // 0x100000000. To avoid this, we must read the payload prior to reading + // the max submitted value from the timer field. The logic can correctly + // adjust the max submitted value back down if a wrap occurs between these + // two operations, but has no way to bump the max submitted value up if a + // wrap occurs with the opposite ordering. + NvU64 current = *(volatile NvU32*)payloadPtr; + // Use an atomic exchange to ensure the 64-bit read is atomic even on 32-bit + // CPUs. + NvU64 submitted = (NvU64) + __NVatomicCompareExchange64((volatile NvS64 *)maxSubmittedPtr, 0ll, 0ll); + + nvAssert(!(current & 0xFFFFFFFF00000000ull)); + + // The value is monotonically increasing, and differ by no more than + // 2^31 - 1. Hence, if the low word of the submitted value is less + // than the low word of the current value, exactly one 32-bit wrap + // occurred between the current value and the most recently + // submitted value. Walk back the high word to match the value + // associated with the current GPU-visible value. + if ((submitted & 0xFFFFFFFFull) < current) { + submitted -= 0x100000000ull; + } + + current |= (submitted & 0xFFFFFFFF00000000ull); + + return current; +} + +static inline NvU64 NvTimeSemFermiGetPayload( + NvReportSemaphore32 *report) +{ + return NvTimeSemFermiGetPayloadVal(&report->payload, &report->timer); +} + +static inline void NvTimeSemFermiSetPayload( + NvReportSemaphore32 *report, + const NvU64 payload) +{ + // First save the actual value to the reserved/timer bits + NvTimeSemFermiSetMaxSubmittedVal(&report->timer, payload); + + // Then write the low bits to the GPU-accessible semaphore value. + report->payload = (NvU32)(payload & 0xFFFFFFFFULL); +} + +/* + * Volta and up. + */ + +static inline NvU64 NvTimeSemVoltaGetPayloadVal( + volatile void *payloadPtr) +{ + nvAssert(payloadPtr); + return (NvU64) + __NVatomicCompareExchange64((volatile NvS64 *)payloadPtr, + 0, 0); +} + +static inline NvU64 NvTimeSemVoltaGetPayload( + NvReportSemaphore64 *report) +{ + return NvTimeSemVoltaGetPayloadVal(&report->reportValue); +} + +static inline void NvTimeSemVoltaSetPayload( + NvReportSemaphore64 *report, + const NvU64 payload) +{ + NvU64 oldPayload = 0; + + while (NV_TRUE) { + NvU64 prevPayload = (NvU64) + __NVatomicCompareExchange64((volatile NvS64 *)&report->reportValue, + (NvS64)payload, (NvS64)oldPayload); + + if (prevPayload == oldPayload) { + break; + } + + nvAssert(prevPayload < payload); + + oldPayload = prevPayload; + } +} + +#ifdef __cplusplus +}; +#endif + +#endif /* __NV_SEMAPHORE_H__ */ diff --git a/src/common/inc/nvSha1.h b/src/common/inc/nvSha1.h new file mode 100644 index 0000000..4e3e497 --- /dev/null +++ b/src/common/inc/nvSha1.h @@ -0,0 +1,390 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007-2012 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * Utility header file to generate a one-way hash from an arbitrary + * byte array, using the Secure Hashing Algorithm 1 (SHA-1) as defined + * in FIPS PUB 180-1 published April 17, 1995: + * + * https://www.itl.nist.gov/fipspubs/fip180-1.htm + * + * Some common test cases (see Appendices A and B of the above document): + * + * SHA1("abc") = + * A9993E36 4706816A BA3E2571 7850C26C 9CD0D89D + * + * SHA1("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq") = + * 84983E44 1C3BD26E BAAE4AA1 F95129E5 E54670F1 + */ + +#ifndef __NV_SHA1_H__ +#define __NV_SHA1_H__ + +#include "nvtypes.h" + +/*! + * @brief Structure used by the SHA-1 functions to maintain the state of the + * calculations. + */ + +typedef struct +{ + NvU32 state[5]; + NvU32 count; + NvU8 buffer[128]; +} Sha1Context; + + +/*! + * @brief Pointer to a memory accessor function for use by the SHA-1 hash + * function. + * + * Due to memory constraints in some environments where this code is executed + * (e.g., the PMU/DPU), the data that needs to be processed by the SHA-1 hash + * function may not be readily available. This function is responsible for + * copying the data into a buffer to be used by the SHA-1 function. + * + * Besides, SHA1 library can be used by many different clients, so we need to + * provide the memory accessor functions which can work in client's environment. + * + * @param[out] pBuff The buffer to copy the new data to. + * @param[in] index The desired offset to begin copying from. + * @param[in] size The requested number of bytes to be copied. + * @param[in] info Pointer to the data passed into GenerateSha1 as pData. + * + * @return The actual number of bytes copied into the buffer. + */ + +typedef NvU32 Sha1CopyFunc(NvU8 *pBuff, NvU32 index, NvU32 size, void *pInfo); + + +/* + * The following values are defined by the SHA-1 algorithm for initial values. + */ +#define SHA1_INIT_H0 0x67452301 //!< Initial H0 value +#define SHA1_INIT_H1 0xEFCDAB89 //!< Initial H1 value +#define SHA1_INIT_H2 0x98BADCFE //!< Initial H2 value +#define SHA1_INIT_H3 0x10325476 //!< Initial H3 value +#define SHA1_INIT_H4 0xC3D2E1F0 //!< Initial H4 value + + +/*! + * @brief Reverses the byte order of a word; that is, switching the endianness + * of the word. + * + * @param[in] a A 32-bit word + * + * @returns The 32-bit word with its byte order reversed. + */ + +#define REVERSE_BYTE_ORDER(a) \ + (((a) >> 24) | ((a) << 24) | (((a) >> 8) & 0xFF00) | (((a) << 8) & 0xFF0000)) + + +/*! + * @brief Computation step as defined by SHA-1. + * + * Unlike the 64 byte buffer version outlined in the SHA-1 algorithm, this + * function uses a 128 byte buffer to minimize the calculation needed to + * index the data. + * + * @param[in,out] pState + * Pointer to State word array. + * + * @param[in] pBuffer + * Data to operate on. 128 bytes in length. No length checking is done, + * and is assumed to have been done by the calling function. + */ + +static void +_sha1Transform +( + NvU32 *pState, + NvU8 *pBuffer +) +{ + NvU32 a = pState[0]; + NvU32 b = pState[1]; + NvU32 c = pState[2]; + NvU32 d = pState[3]; + NvU32 e = pState[4]; + NvU32 *pBuf = (NvU32 *)pBuffer; + NvU32 *p; + NvU32 i; + NvU32 j; + NvU32 k; + + for (i = 0; i < 80; i++) + { + p = &pBuf[i & 0xf]; + j = p[0]; + if (i < 16) + { + j = REVERSE_BYTE_ORDER(j); + } + else + { + j ^= p[2] ^ p[8] ^ p[13]; + j = (j << 1) + (j >> 31); + } + p[0] = p[16] = j; + if (i < 40) + { + if (i < 20) + { + k = 0x5a827999 + ((b & (c ^ d)) ^ d); + } + else + { + k = 0x6ed9eba1 + (b ^ c ^ d); + } + } + else + { + if (i < 60) + { + k = 0x8f1bbcdc + (((b | c) & d) | (b & c)); + } + else + { + k = 0xca62c1d6 + (b ^ c ^ d); + } + } + j += (a << 5) + (a >> 27) + e + k; + e = d; + d = c; + c = (b << 30) + (b >> 2); + b = a; + a = j; + } + pState[0] += a; + pState[1] += b; + pState[2] += c; + pState[3] += d; + pState[4] += e; +} + + +/*! + * Initializes the SHA-1 context. + * + * @param[out] pContext + * Pointer to the context to initialize. + */ + +static void +_sha1Initialize +( + Sha1Context *pContext +) +{ + pContext->count = 0; + pContext->state[0] = SHA1_INIT_H0; + pContext->state[1] = SHA1_INIT_H1; + pContext->state[2] = SHA1_INIT_H2; + pContext->state[3] = SHA1_INIT_H3; + pContext->state[4] = SHA1_INIT_H4; +} + + +/*! + * @brief Divides the input buffer into multiple 64-byte buffers and computes + * the message digest for each. + * + * @param[in] pContext + * Pointer to a Sha1Context. + * + * @param[in] pData + * Pointer to the data array to compute the message digest. + * + * @param[in] len + * Size of the data. + * + * @param[in] copyFunc + * Copy routine to use. + */ + +static void +_sha1Update +( + Sha1Context *pContext, + void *pData, + NvU32 len, + Sha1CopyFunc copyFunc +) +{ + NvU32 buffer_offset = (pContext->count & 63); + NvU32 copy_size; + NvU32 idx = 0; + + pContext->count += len; + while ((buffer_offset + len) > 63) + { + copy_size = 64 - buffer_offset; + copyFunc(&pContext->buffer[buffer_offset], idx, copy_size, pData); + _sha1Transform(pContext->state, pContext->buffer); + buffer_offset = 0; + idx += copy_size; + len -= copy_size; + } + if (len > 0) + { + copyFunc(&pContext->buffer[buffer_offset], idx, len, pData); + } +} + + +/*! + * @brief fill memory with zero; not all environments in which this + * code runs have memset(3). + * + * @param[out] pData + * The memory to be filled with zero + * + * @param[in] nBytes + * The number of bytes of memory to fill with zero + */ + +static NV_INLINE void +_sha1MemZero +( + NvU8 *pData, + NvU32 nBytes +) +{ + NvU32 i; + + for (i = 0; i < nBytes; i++) { + pData[i] = 0; + } +} + + +/*! + * @brief Pads the message as specified by the SHA-1 algorithm and computes + * the message digest on the final message chunk(s). + * + * @param[out] pDigest + * The SHA-1 hash values. + * + * @param[in] pContext + * Pointer to a Sha1Context. + */ + +static void +_sha1Final +( + NvU8 *pDigest, + Sha1Context *pContext +) +{ + NvU32 i; + NvU32 bufferOffset = (pContext->count & 63); + NvU8 *pBuffer = (NvU8*)&pContext->buffer[bufferOffset]; + NvU32 *pCount; + NvU32 *pDig32; + + // append padding pattern to the end of input + *pBuffer++ = 0x80; + if (bufferOffset < 56) + { + _sha1MemZero(pBuffer, 59 - bufferOffset); + } + else + { + // need an extra sha1_transform + if (bufferOffset < 63) + { + _sha1MemZero(pBuffer, 63 - bufferOffset); + } + _sha1Transform(pContext->state, pContext->buffer); + _sha1MemZero(pContext->buffer, 60); + } + + // set final count (this is the number of *bits* not *bytes*) + pCount = (NvU32*)&pContext->buffer[15 << 2]; + *pCount = REVERSE_BYTE_ORDER(pContext->count << 3); + + _sha1Transform(pContext->state, pContext->buffer); + + // output hash with each dword in big endian + if (pDigest) + { + pDig32 = (NvU32*) pDigest; + for (i = 0; i < 5; i++) + { + pDig32[i] = REVERSE_BYTE_ORDER(pContext->state[i]); + } + } +} + + +/*! + * @brief Generates the SHA-1 hash value on the data provided. + * + * The function does not manipulate the source data directly, as it may not + * have direct access to it. Therefore, it relies upon the copy function to + * copy segments of the data into a local buffer before any manipulation takes + * place. + * + * @param[out] pHash + * Pointer to store the hash array. The buffer must be 20 bytes in + * length, and the result is stored in big endian format. + * + * @param[in] pData + * The source data array to transform. The actual values and make-up + * of this parameter are dependent on the copy function. + * + * @param[in] nBytes + * The size, in bytes, of the source data. + * + * @param[in] copyFunc + * The function responsible for copying data from the source + * for use by the sha1 function. It is possible for the data + * to exist outside the current execution environment (e.g., + * the PMU, and the data to hash are in system memory), so + * the function will never directly manipulate the source + * data. + */ + +#define NV_SHA1_BLOCK_LENGTH 64 +#define NV_SHA1_DIGEST_LENGTH 20 + +static void +sha1Generate +( + NvU8 pHash[NV_SHA1_DIGEST_LENGTH], + void *pData, + NvU32 nBytes, + Sha1CopyFunc copyFunc +) +{ + Sha1Context context; + + _sha1Initialize(&context); + _sha1Update(&context, pData, nBytes, copyFunc); + _sha1Final(pHash, &context); +} + + +#endif /* __NV_SHA1_H__ */ diff --git a/src/common/inc/nvUnixVersion.h b/src/common/inc/nvUnixVersion.h new file mode 100644 index 0000000..a5dcee5 --- /dev/null +++ b/src/common/inc/nvUnixVersion.h @@ -0,0 +1,16 @@ +#ifndef __NV_UNIX_VERSION_H__ +#define __NV_UNIX_VERSION_H__ + +#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \ + (defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1) || \ + defined(NV_DCECORE) + +#define NV_VERSION_STRING "580.00" + +#else + +#error "nvUnixVersion.h should only be included in UNIX builds" + +#endif + +#endif /* __NV_UNIX_VERSION_H__ */ diff --git a/src/common/inc/nvVer.h b/src/common/inc/nvVer.h new file mode 100644 index 0000000..4a8b3fd --- /dev/null +++ b/src/common/inc/nvVer.h @@ -0,0 +1,18 @@ +// nvVer.h - Versions of NV drivers + +#define NV_COMPANY_NAME_STRING_SHORT "NVIDIA" +#define NV_COMPANY_NAME_STRING_FULL "NVIDIA Corporation" +#define NV_COMPANY_NAME_STRING NV_COMPANY_NAME_STRING_FULL +#define NV_COPYRIGHT_YEAR "2025" +#define NV_COPYRIGHT "(C) " NV_COPYRIGHT_YEAR " NVIDIA Corporation. All rights reserved." // Please do not use the non-ascii copyright symbol for (C). + +#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \ + (defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1) || \ + defined(NV_DCECORE) + +// All Version numbering for Unix builds has moved. (Source should be re-directed to directly include that header.) +#include "nvUnixVersion.h" + +#else + +#endif diff --git a/src/common/inc/nv_list.h b/src/common/inc/nv_list.h new file mode 100644 index 0000000..dbb5189 --- /dev/null +++ b/src/common/inc/nv_list.h @@ -0,0 +1,558 @@ +/* + * Copyright © 2010 Intel Corporation + * Copyright © 2010 Francisco Jerez + * Copyright © 2012 NVIDIA Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +/* + * This file was copied from the X.Org X server source at commit + * 5884e7dedecdd82ddbb037360cf9c85143e094b5 and modified to match NVIDIA's X + * driver code style. + */ + +#ifndef _NV_LIST_H_ +#define _NV_LIST_H_ + +#ifdef __cplusplus +extern "C" { +#endif //__cplusplus + +#include "nvmisc.h" + + #define HAVE_TYPEOF 1 + +/** + * @file Classic doubly-link circular list implementation. + * For real usage examples of the linked list, see the file test/list.c + * + * Example: + * We need to keep a list of struct foo in the parent struct bar, i.e. what + * we want is something like this. + * + * struct bar { + * ... + * struct foo *list_of_foos; -----> struct foo {}, struct foo {}, struct foo{} + * ... + * } + * + * We need one list head in bar and a list element in all list_of_foos (both are of + * data type 'NVListRec'). + * + * struct bar { + * ... + * NVListRec list_of_foos; + * ... + * } + * + * struct foo { + * ... + * NVListRec entry; + * ... + * } + * + * Now we initialize the list head: + * + * struct bar bar; + * ... + * nvListInit(&bar.list_of_foos); + * + * Then we create the first element and add it to this list: + * + * struct foo *foo = malloc(...); + * .... + * nvListAdd(&foo->entry, &bar.list_of_foos); + * + * Repeat the above for each element you want to add to the list. Deleting + * works with the element itself. + * nvListDel(&foo->entry); + * free(foo); + * + * Note: calling nvListDel(&bar.list_of_foos) will set bar.list_of_foos to an empty + * list again. + * + * Looping through the list requires a 'struct foo' as iterator and the + * name of the field the subnodes use. + * + * struct foo *iterator; + * nvListForEachEntry(iterator, &bar.list_of_foos, entry) { + * if (iterator->something == ...) + * ... + * } + * + * Note: You must not call nvListDel() on the iterator if you continue the + * loop. You need to run the safe for-each loop instead: + * + * struct foo *iterator, *next; + * nvListForEachEntry_safe(iterator, next, &bar.list_of_foos, entry) { + * if (...) + * nvListDel(&iterator->entry); + * } + * + */ + +/** + * The linkage struct for list nodes. This struct must be part of your + * to-be-linked struct. NVListRec is required for both the head of the + * list and for each list node. + * + * Position and name of the NVListRec field is irrelevant. + * There are no requirements that elements of a list are of the same type. + * There are no requirements for a list head, any NVListRec can be a list + * head. + */ +typedef struct NVList { + struct NVList *next, *prev; +} NVListRec, *NVListPtr; + +/** + * Initialize the list as an empty list. + * + * Example: + * nvListInit(&bar->list_of_foos); + * + * @param The list to initialized. + */ +static NV_INLINE void +nvListInit(NVListPtr list) +{ + list->next = list->prev = list; +} + +/** + * Initialize the list as an empty list. + * + * This is functionally the same as nvListInit, but can be used for + * initialization of global variables. + * + * Example: + * static NVListRec list_of_foos = NV_LIST_INIT(&list_of_foos); + * + * @param The list to initialized. + */ +#define NV_LIST_INIT(head) { .prev = (head), .next = (head) } + +static NV_INLINE void +__nvListAdd(NVListPtr entry, NVListPtr prev, NVListPtr next) +{ + next->prev = entry; + entry->next = next; + entry->prev = prev; + prev->next = entry; +} + +/** + * Insert a new element after the given list head. The new element does not + * need to be initialised as empty list. + * The list changes from: + * head -> some element -> ... + * to + * head -> new element -> older element -> ... + * + * Example: + * struct foo *newfoo = malloc(...); + * nvListAdd(&newfoo->entry, &bar->list_of_foos); + * + * @param entry The new element to prepend to the list. + * @param head The existing list. + */ +static NV_INLINE void +nvListAdd(NVListPtr entry, NVListPtr head) +{ + __nvListAdd(entry, head, head->next); +} + +/** + * Append a new element to the end of the list given with this list head. + * + * The list changes from: + * head -> some element -> ... -> lastelement + * to + * head -> some element -> ... -> lastelement -> new element + * + * Example: + * struct foo *newfoo = malloc(...); + * nvListAppend(&newfoo->entry, &bar->list_of_foos); + * + * @param entry The new element to prepend to the list. + * @param head The existing list. + */ +static NV_INLINE void +nvListAppend(NVListPtr entry, NVListPtr head) +{ + __nvListAdd(entry, head->prev, head); +} + +static NV_INLINE void +__nvListDel(NVListPtr prev, NVListPtr next) +{ + next->prev = prev; + prev->next = next; +} + +/** + * Remove the element from the list it is in. Using this function will reset + * the pointers to/from this element so it is removed from the list. It does + * NOT free the element itself or manipulate it otherwise. + * + * Using nvListDel on a pure list head (like in the example at the top of + * this file) will NOT remove the first element from + * the list but rather reset the list as empty list. + * + * Example: + * nvListDel(&foo->entry); + * + * @param entry The element to remove. + */ +static NV_INLINE void +nvListDel(NVListPtr entry) +{ + __nvListDel(entry->prev, entry->next); + nvListInit(entry); +} + +/** + * Check if the list is empty. + * + * Example: + * nvListIsEmpty(&bar->list_of_foos); + * + * @return True if the list contains one or more elements or False otherwise. + */ +static NV_INLINE NvBool +nvListIsEmpty(const NVListRec *head) +{ + return head->next == head; +} + +static NV_INLINE int +nvListCount(const NVListRec *head) +{ + NVListPtr next; + int count = 0; + + for (next = head->next; next != head; next = next->next) { + count++; + } + + return count; +} + +/** + * Check if entry is present in the list. + * + * Example: + * nvListPresent(&foo->entry, &bar->list_of_foos); + * + * @return 1 if the list contains the specified entry; otherwise, return 0. + */ +static NV_INLINE NvBool +nvListPresent(const NVListRec *entry, const NVListRec *head) +{ + const NVListRec *next; + + for (next = head->next; next != head; next = next->next) { + if (next == entry) { + return NV_TRUE; + } + } + + return NV_FALSE; +} + +/** + * Returns a pointer to the container of this list element. + * + * Example: + * struct foo* f; + * f = nv_container_of(&foo->entry, struct foo, entry); + * assert(f == foo); + * + * @param ptr Pointer to the NVListRec. + * @param type Data type of the list element. + * @param member Member name of the NVListRec field in the list element. + * @return A pointer to the data struct containing the list head. + */ +#ifndef nv_container_of +#define nv_container_of(ptr, type, member) \ + (type *)((char *)(ptr) - NV_OFFSETOF(type, member)) +#endif + +/** + * Alias of nv_container_of + */ +#define nvListEntry(ptr, type, member) \ + nv_container_of(ptr, type, member) + +/** + * Retrieve the first list entry for the given list pointer. + * + * Example: + * struct foo *first; + * first = nvListFirstEntry(&bar->list_of_foos, struct foo, list_of_foos); + * + * @param ptr The list head + * @param type Data type of the list element to retrieve + * @param member Member name of the NVListRec field in the list element. + * @return A pointer to the first list element. + */ +#define nvListFirstEntry(ptr, type, member) \ + nvListEntry((ptr)->next, type, member) + +/** + * Retrieve the last list entry for the given listpointer. + * + * Example: + * struct foo *first; + * first = nvListLastEntry(&bar->list_of_foos, struct foo, list_of_foos); + * + * @param ptr The list head + * @param type Data type of the list element to retrieve + * @param member Member name of the NVListRec field in the list element. + * @return A pointer to the last list element. + */ +#define nvListLastEntry(ptr, type, member) \ + nvListEntry((ptr)->prev, type, member) + +#ifdef HAVE_TYPEOF +#define __nv_container_of(ptr, sample, member) \ + nv_container_of(ptr, __typeof__(*sample), member) +#else +/* This implementation of __nv_container_of has undefined behavior according + * to the C standard, but it works in many cases. If your compiler doesn't + * support __typeof__() and fails with this implementation, please try a newer + * compiler. + */ +#define __nv_container_of(ptr, sample, member) \ + (void *)((char *)(ptr) \ + - ((char *)&(sample)->member - (char *)(sample))) +#endif + +/** + * Loop through the list given by head and set pos to struct in the list. + * + * Example: + * struct foo *iterator; + * nvListForEachEntry(iterator, &bar->list_of_foos, entry) { + * [modify iterator] + * } + * + * This macro is not safe for node deletion. Use nvListForEachEntry_safe + * instead. + * + * @param pos Iterator variable of the type of the list elements. + * @param head List head + * @param member Member name of the NVListRec in the list elements. + * + */ +#ifdef HAVE_TYPEOF +#define __NV_LIST_SET(x, y) x = y +#else +static NV_INLINE void __nvListSet(void **x, void *y) +{ + *x = y; +} + +#define __NV_LIST_SET(x, y) __nvListSet((void **) &x, (void *) (y)) +#endif + +#define nvListForEachEntry(pos, head, member) \ + for (__NV_LIST_SET(pos, __nv_container_of((head)->next, pos, member)); \ + &pos->member != (head); \ + __NV_LIST_SET(pos, __nv_container_of(pos->member.next, pos, member))) + +/** + * Loop through the list, keeping a backup pointer to the element. This + * macro allows for the deletion of a list element while looping through the + * list. + * + * See nvListForEachEntry for more details. + */ +#define nvListForEachEntry_safe(pos, tmp, head, member) \ + for (__NV_LIST_SET(pos, __nv_container_of((head)->next, pos, member)), \ + __NV_LIST_SET(tmp, __nv_container_of(pos->member.next, pos, member)); \ + &pos->member != (head); \ + __NV_LIST_SET(pos, tmp), \ + __NV_LIST_SET(tmp, __nv_container_of(pos->member.next, tmp, member))) + +/* NULL-Terminated List Interface + * + * The interface below does _not_ use the NVListRec as described above. + * It is mainly for legacy structures that cannot easily be switched to + * NVListRec. + * + * This interface is for structs like + * struct foo { + * [...] + * struct foo *next; + * [...] + * }; + * + * The position and field name of "next" are arbitrary. + */ + +/** + * Init the element as null-terminated list. + * + * Example: + * struct foo *list = malloc(); + * nvNTListInit(list, next); + * + * @param list The list element that will be the start of the list + * @param member Member name of the field pointing to next struct + */ +#define nvNTListInit(_list, _member) \ + (_list)->_member = NULL + +/** + * Returns the next element in the list or NULL on termination. + * + * Example: + * struct foo *element = list; + * while ((element = nvNTListNext(element, next)) { } + * + * This macro is not safe for node deletion. Use nvListForEachEntry_safe + * instead. + * + * @param list The list or current element. + * @param member Member name of the field pointing to next struct. + */ +#define nvNTListNext(_list, _member) \ + (_list)->_member + +/** + * Iterate through each element in the list. + * + * Example: + * struct foo *iterator; + * nvNTListForEachEntry(iterator, list, next) { + * [modify iterator] + * } + * + * @param entry Assigned to the current list element + * @param list The list to iterate through. + * @param member Member name of the field pointing to next struct. + */ +#define nvNTListForEachEntry(_entry, _list, _member) \ + for (_entry = _list; _entry; _entry = (_entry)->_member) + +/** + * Iterate through each element in the list, keeping a backup pointer to the + * element. This macro allows for the deletion of a list element while + * looping through the list. + * + * See nvNTListForEachEntry for more details. + * + * @param entry Assigned to the current list element + * @param tmp The pointer to the next element + * @param list The list to iterate through. + * @param member Member name of the field pointing to next struct. + */ +#define nvNTListForEachEntrySafe(_entry, _tmp, _list, _member) \ + for (_entry = _list, _tmp = (_entry) ? (_entry)->_member : NULL;\ + _entry; \ + _entry = _tmp, _tmp = (_tmp) ? (_tmp)->_member: NULL) + +/** + * Append the element to the end of the list. This macro may be used to + * merge two lists. + * + * Example: + * struct foo *elem = malloc(...); + * nvNTListInit(elem, next) + * nvNTListAppend(elem, list, struct foo, next); + * + * Resulting list order: + * list_item_0 -> list_item_1 -> ... -> elem_item_0 -> elem_item_1 ... + * + * @param entry An entry (or list) to append to the list + * @param list The list to append to. This list must be a valid list, not + * NULL. + * @param type The list type + * @param member Member name of the field pointing to next struct + */ +#define nvNTListAppend(_entry, _list, _type, _member) \ + do { \ + _type *__iterator = _list; \ + while (__iterator->_member) { __iterator = __iterator->_member;}\ + __iterator->_member = _entry; \ + } while (0) + +/** + * Insert the element at the next position in the list. This macro may be + * used to insert a list into a list. + * + * struct foo *elem = malloc(...); + * nvNTListInit(elem, next) + * nvNTListInsert(elem, list, struct foo, next); + * + * Resulting list order: + * list_item_0 -> elem_item_0 -> elem_item_1 ... -> list_item_1 -> ... + * + * @param entry An entry (or list) to append to the list + * @param list The list to insert to. This list must be a valid list, not + * NULL. + * @param type The list type + * @param member Member name of the field pointing to next struct + */ +#define nvNTListInsert(_entry, _list, _type, _member) \ + do { \ + nvNTListAppend((_list)->_member, _entry, _type, _member); \ + (_list)->_member = _entry; \ + } while (0) + +/** + * Delete the entry from the list by iterating through the list and + * removing any reference from the list to the entry. + * + * Example: + * struct foo *elem = + * nvNTListDel(elem, list, struct foo, next); + * + * @param entry The entry to delete from the list. entry is always + * re-initialized as a null-terminated list. + * @param list The list containing the entry, set to the new list without + * the removed entry. + * @param type The list type + * @param member Member name of the field pointing to the next entry + */ +#define nvNTListDel(_entry, _list, _type, _member) \ + do { \ + _type *__e = _entry; \ + if (__e == NULL || _list == NULL) break; \ + if ((_list) == __e) { \ + _list = __e->_member; \ + } else { \ + _type *__prev = _list; \ + while (__prev->_member && __prev->_member != __e) \ + __prev = nvNTListNext(__prev, _member); \ + if (__prev->_member) \ + __prev->_member = __e->_member; \ + } \ + nvNTListInit(__e, _member); \ + } while(0) + +#ifdef __cplusplus +} +#endif //__cplusplus + +#endif /* _NV_LIST_H_ */ diff --git a/src/common/inc/nv_mig_types.h b/src/common/inc/nv_mig_types.h new file mode 100644 index 0000000..ae3ea3c --- /dev/null +++ b/src/common/inc/nv_mig_types.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __NV_MIG_TYPES_H__ +#define __NV_MIG_TYPES_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +typedef NvU32 MIGDeviceId; + +#define NO_MIG_DEVICE 0L + +/* Convert a MIGDeviceId into a 0-based per-GPU subdevice index. */ +#define MIG_DEVICE_ID_SUBDEV_MASK 0xf0000000 +#define MIG_DEVICE_ID_SUBDEV_SHIFT 28 + +#define MIG_DEVICE_ID_TO_SUBDEV(migDeviceId) (((migDeviceId) & MIG_DEVICE_ID_SUBDEV_MASK) >> MIG_DEVICE_ID_SUBDEV_SHIFT) + +#ifdef __cplusplus +} +#endif + +#endif /* __NV_MIG_TYPES_H__ */ diff --git a/src/common/inc/nv_smg.h b/src/common/inc/nv_smg.h new file mode 100644 index 0000000..c276465 --- /dev/null +++ b/src/common/inc/nv_smg.h @@ -0,0 +1,100 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __NV_SMG_H__ +#define __NV_SMG_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" +#include "nvlimits.h" +#include "nvgputypes.h" +#include "nvctassert.h" +#include "nvrmcontext.h" +#include "nv_mig_types.h" + +/* + * In the context of SMG a MIG device description is the global identity or + * fingerprint for one MIG device partition that the system has available. + * These are queried through RM and thus they will be the same in + * kernelspace and userspace, and remain immutable and cached for the + * lifetime of the process or kernel module. + * + * For now, the graphics driver does NOT support SMG if the MIG partitions + * change on the fly. RM supports reconfiguring partitions that are not in + * use but, for now, the kernel and userspace graphics drivers expect the + * topology of all physical and MIG devices to remain unchanged throughout + * so that they can agree on the same set of known MIG devices. This is not + * an unreasonable requirement. + * + * Each MIG device description is referred to by a semi-opaque MIGDeviceId. + * The device id is actually the 0-based index to the table of MIG device + * descriptions but with bits flipped so that null value is an invalid + * device. This makes boolean interpretation work more naturally and makes + * structs from calloc() initialize to an invalid device by default. + */ +typedef struct nvMIGDeviceDescriptionRec { + /* The globally unique MIG device ID */ + MIGDeviceId migDeviceId; + + /* RM sub/device instance of the physical device hosting the MIG device */ + NvU32 deviceInstance; + NvU32 subDeviceInstance; + + /* These three uniquely identify a particular MIG device */ + NvU32 gpuId; + NvU32 gpuInstanceId; + NvU32 computeInstanceId; + + /* Whether this device is accessible to the calling process */ + NvBool smgAccessOk; + /* MIG exec partition UUID string */ + char migUuid[NV_MIG_DEVICE_UUID_STR_LENGTH]; + NvU8 migUuidBin[NV_GPU_UUID_LEN]; +} nvMIGDeviceDescription; + +NvBool nvSMGSubscribeSubDevToPartition(nvRMContextPtr rmctx, + NvU32 subdevHandle, + MIGDeviceId migDevice, + NvU32 gpuInstSubscriptionHdl, + NvU32 computeInstSubscriptionHdl); + +NvU32 nvSMGGetDeviceByUUID(nvRMContextPtr rmctx, + const char *migUuid, + const nvMIGDeviceDescription **uniDev); +NvU32 nvSMGGetDeviceById(nvRMContextPtr rmctx, + MIGDeviceId migDev, + const nvMIGDeviceDescription **uniDev); +NvU32 nvSMGGetDeviceList(nvRMContextPtr rmctx, + nvMIGDeviceDescription **devices, + NvU32 *deviceCount); +NvU32 nvSMGGetDefaultDeviceForDeviceInstance(nvRMContextPtr rmctx, + NvU32 deviceInstance, + const nvMIGDeviceDescription **uniDev); + +#ifdef __cplusplus +} +#endif + +#endif /* __NV_SMG_H__ */ diff --git a/src/common/inc/nv_speculation_barrier.h b/src/common/inc/nv_speculation_barrier.h new file mode 100644 index 0000000..8f9121b --- /dev/null +++ b/src/common/inc/nv_speculation_barrier.h @@ -0,0 +1,219 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * NVIDIA GPZ vulnerability mitigation definitions. + */ + +/* + * There are two copies of this file for legacy reasons: + * + * P4: <$NV_SOURCE/>drivers/common/inc/nv_speculation_barrier.h + * Git: include/nv_speculation_barrier.h + * + * Both files need to be kept in sync if any changes are required. + */ + +#ifndef _NV_SPECULATION_BARRIER_H_ +#define _NV_SPECULATION_BARRIER_H_ + +#define NV_SPECULATION_BARRIER_VERSION 2 + +/* + * GNU-C/MSC/clang - x86/x86_64 : x86_64, __i386, __i386__ + * GNU-C - THUMB mode : __GNUC__, __thumb__ + * GNU-C - ARM modes : __GNUC__, __arm__, __aarch64__ + * armclang - THUMB mode : __ARMCC_VERSION, __thumb__ + * armclang - ARM modes : __ARMCC_VERSION, __arm__, __aarch64__ + * GHS - THUMB mode : __ghs__, __THUMB__ + * GHS - ARM modes : __ghs__, __ARM__, __ARM64__ + */ + +#if defined(_M_IX86) || defined(__i386__) || defined(__i386) \ + || defined(__x86_64) || defined(AMD64) || defined(_M_AMD64) + /* All x86 */ + #define NV_SPECULATION_BARRIER_x86 + +#elif defined(macintosh) || defined(__APPLE__) \ + || defined(__powerpc) || defined(__powerpc__) || defined(__powerpc64__) \ + || defined(__POWERPC__) || defined(__ppc) || defined(__ppc__) \ + || defined(__ppc64__) || defined(__PPC__) \ + || defined(__PPC64__) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) + /* All PowerPC */ + #define NV_SPECULATION_BARRIER_PPC + +#elif (defined(__GNUC__) && defined(__thumb__)) \ + || (defined(__ARMCC_VERSION) && defined(__thumb__)) \ + || (defined(__ghs__) && defined(__THUMB__)) + /* ARM-thumb mode(<=ARMv7)/T32 (ARMv8) */ + #define NV_SPECULATION_BARRIER_ARM_COMMON + #define NV_SPEC_BARRIER_CSDB ".inst.w 0xf3af8014\n" + +#elif (defined(__GNUC__) && defined(__arm__)) \ + || (defined(__ARMCC_VERSION) && defined(__arm__)) \ + || (defined(__ghs__) && defined(__ARM__)) + /* aarch32(ARMv8) / arm(<=ARMv7) mode */ + #define NV_SPECULATION_BARRIER_ARM_COMMON + #define NV_SPEC_BARRIER_CSDB ".inst 0xe320f014\n" + +#elif (defined(__GNUC__) && defined(__aarch64__)) \ + || (defined(__ARMCC_VERSION) && defined(__aarch64__)) \ + || (defined(__ghs__) && defined(__ARM64__)) + /* aarch64(ARMv8) mode */ + #define NV_SPECULATION_BARRIER_ARM_COMMON + #define NV_SPEC_BARRIER_CSDB "HINT #20\n" +#elif defined(NVCPU_IS_RISCV64) +# define nv_speculation_barrier() +#else + #error "Unknown compiler/chip family" +#endif + +/* + * nv_speculation_barrier -- General-purpose speculation barrier + * + * This approach provides full protection against variant-1 vulnerability. + * However, the recommended approach is detailed below (See: + * nv_array_index_no_speculate) + * + * Semantics: + * Any memory read that is sequenced after a nv_speculation_barrier(), + * and contained directly within the scope of nv_speculation_barrier() or + * directly within a nested scope, will not speculatively execute until all + * conditions for entering that scope have been architecturally resolved. + * + * Example: + * if (untrusted_index_from_user < bound) { + * ... + * nv_speculation_barrier(); + * ... + * x = array1[untrusted_index_from_user]; + * bit = x & 1; + * y = array2[0x100 * bit]; + * } + */ + +#if defined(NV_SPECULATION_BARRIER_x86) +// Delete after all references are changed to nv_speculation_barrier +#define speculation_barrier() nv_speculation_barrier() + +static inline void nv_speculation_barrier(void) +{ + +#if defined(__GNUC__) || defined(__clang__) + __asm__ __volatile__ ("lfence" : : : "memory"); +#endif + +} + +#elif defined(NV_SPECULATION_BARRIER_PPC) + +static inline void nv_speculation_barrier(void) +{ + asm volatile("ori 31,31,0"); +} + +#elif defined(NV_SPECULATION_BARRIER_ARM_COMMON) + +/* Note: Cortex-A9 GNU-assembler seems to complain about DSB SY */ + #define nv_speculation_barrier() \ + asm volatile \ + ( \ + "DSB sy\n" \ + "ISB\n" \ + : : : "memory" \ + ) +#endif + +/* + * nv_array_index_no_speculate -- Recommended variant-1 mitigation approach + * + * The array-index-no-speculate approach "de-speculates" an array index that + * has already been bounds-checked. + * + * This approach is preferred over nv_speculation_barrier due to the following + * reasons: + * - It is just as effective as the general-purpose speculation barrier. + * - It clearly identifies what array index is being de-speculated and is thus + * self-commenting, whereas the general-purpose speculation barrier requires + * an explanation of what array index is being de-speculated. + * - It performs substantially better than the general-purpose speculation + * barrier on ARM Cortex-A cores (the difference is expected to be tens of + * cycles per invocation). Within tight loops, this difference may become + * noticeable. + * + * Semantics: + * Provided count is non-zero and the caller has already validated or otherwise + * established that index < count, any speculative use of the return value will + * use a speculative value that is less than count. + * + * Example: + * if (untrusted_index_from_user < bound) { + * untrusted_index_from_user = nv_array_index_no_speculate( + * untrusted_index_from_user, bound); + * ... + * x = array1[untrusted_index_from_user]; + * ... + * } + * + * The use of nv_array_index_no_speculate() in the above example ensures that + * subsequent uses of untrusted_index_from_user will not execute speculatively + * (they will wait for the bounds check to complete). + */ + +static inline unsigned long nv_array_index_no_speculate(unsigned long index, + unsigned long count) +{ +#if defined(NV_SPECULATION_BARRIER_x86) && (defined(__GNUC__) || defined(__clang__)) + unsigned long mask; + + __asm__ __volatile__ + ( + "CMP %2, %1 \n" + "SBB %0, %0 \n" + : "=r"(mask) : "r"(index), "r"(count) : "cc" + ); + + return (index & mask); + +#elif defined(NV_SPECULATION_BARRIER_ARM_COMMON) + unsigned long mask; + + asm volatile + ( + "CMP %[ind], %[cnt] \n" + "SBC %[res], %[cnt], %[cnt] \n" + NV_SPEC_BARRIER_CSDB + : [res] "=r" (mask) : [ind] "r" (index), [cnt] "r" (count): "cc" + ); + + return (index & mask); + +/* Fallback to generic speculation barrier for unsupported platforms */ +#else + nv_speculation_barrier(); + + return index; +#endif +} + +#endif //_NV_SPECULATION_BARRIER_H_ diff --git a/src/common/inc/nvctassert.h b/src/common/inc/nvctassert.h new file mode 100644 index 0000000..89a7083 --- /dev/null +++ b/src/common/inc/nvctassert.h @@ -0,0 +1,189 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1997-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __NV_CTASSERT_H +#define __NV_CTASSERT_H + +/*****************************************************************************/ + +/* Compile Time assert + * ------------------- + * Use ct_assert(b) instead of assert(b) whenever the condition 'b' is constant, + * i.e. when 'b' can be determined at compile time. + * + * e.g.: check array size: + * ct_assert(__GL_ARRAYSIZE(arrayName) == constArraySize); + * e.g.: check struct size alignment: + * ct_assert(sizeof(struct xy) % 64 == 0); + * + * When available, standard C or C++ language constructs are used: + * - ISO C++11 defines the static_assert keyword + * - ISO C11 defines the _Static_assert keyword + * + * Note that recent versions of Clang support _Static_assert in all compiler modes + * - not just C11 mode - so we test for that in addition to checking explicitly for + * C11 and C++11 support. + * + * Those new language standards aren't available on all supported platforms; an + * alternate method which involves array declarations is employed in that case, + * described below. + * + * In C, there is a restriction where ct_assert() can be placed: + * It can be placed wherever a variable declaration can be placed, i.e.: + * - either anywhere at file scope + * - or inside a function at the beginning of any {} block; it may be mixed + * with variable declarations. + * e.g.: + * void function() + * { + * ct_assert(...); <-- ok \ + * int a; | + * ct_assert(...); <-- ok | declaration section + * int b; | + * ct_assert(...); <-- ok / + * + * a = 0; -- first statement + * + * int c; <-- error + * ct_assert(...); <-- error + * + * {ct_assert(...);} <-- ok (uses its own block for ct_assert()) + * } + * + * In CPP, there is no such restriction, i.e. it can be placed at file scope + * or anywhere inside a function or namespace or class (i.e., wherever + * a variable declaration may be placed). + * + * For C code, the mechanism of this ct_assert() is to declare a prototype + * of a function (e.g. compile_time_assertion_failed_in_line_555, if current + * line number is 555), which gets an array as argument: + * (1) the size of this array is +1, if b != 0 (ok) + * (2) the size of this array is -1, if b == 0 (error) + * + * In case (2) the compiler throws an error. + * e.g. msvc compiler: + * error C2118: negative subscript or subscript is too large + * e.g. gcc 2.95.3: + * size of array '_compile_time_assertion_failed_in_line_555' is negative + * + * In case the condition 'b' is not constant, the msvc compiler throws + * an error: + * error C2057: expected constant expression + * In this case the run time assert() must be used. + * + * For C++ code, we use a different technique because the function prototype + * declaration can have function linkage conflicts. If a single compilation + * unit has ct_assert() statements on the same line number in two different + * files, we would have: + * + * compile_time_assertion_failed_in_line_777(...); from xxx.cpp + * compile_time_assertion_failed_in_line_777(...); from xxx.h + * + * That is valid C++. But if either declaration were in an extern "C" block, + * the same function would be declared with two different linkage types and an + * error would ensue. + * + * Instead, ct_assert() for C++ simply declares an array typedef. As in the C + * version, we will get a compilation error if a typedef with a negative size + * is specified. Line numbers are not needed because C++ allows redundant + * typedefs as long as they are all defined the same way. But we tack them on + * anyway in case the typedef name is reported in compiler errors. C does not + * permit redundant typedefs, so this version should not be used in true C + * code. It can be used in extern "C" blocks of C++ code, however. As with + * the C version, MSVC will throw a "negative subscript" or "expected constant + * expression" error if the expression asserted is false or non-constant. + * + * Notes: + * - This ct_assert() does *not* generate any code or variable. + * Therefore there is no need to define it away for RELEASE builds. + * - The integration of the current source file number (__LINE__) ... + * ... would be required in C++ to allow multiple use inside the same + * class/namespace (if we used the C-style expansion), because the id + * must be unique. + * ... is nice to have in C or C++ if the compiler's error message contains + * the id (this is not the case for msvc) + * - Using three nested macros instead of only one is necessary to get the id + * compile_time_assertion_failed_in_line_555 + * instead of + * compile_time_assertion_failed_in_line___LINE__ + */ + +#if defined(__clang__) +# ifndef __has_extension +# define __has_extension __has_feature // Compatibility with Clang pre-3.0 compilers. +# endif +# define CLANG_C_STATIC_ASSERT __has_extension(c_static_assert) +#else +# define CLANG_C_STATIC_ASSERT 0 +#endif + +// Adding this macro to fix MISRA 2012 rule 20.12 +#define NV_CTASSERT_STRINGIFY_MACRO(b) #b + +#if (defined(__cplusplus) && __cplusplus >= 201103L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L) + // ISO C++11 defines the static_assert keyword +# define ct_assert(b) static_assert((b), "Compile time assertion failed: " NV_CTASSERT_STRINGIFY_MACRO(b)) +# define ct_assert_i(b,line) static_assert((b), "Compile time assertion failed: " NV_CTASSERT_STRINGIFY_MACRO(b)NV_CTASSERT_STRINGIFY_MACRO(line)) +#elif !defined(NVOC) && ((defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || CLANG_C_STATIC_ASSERT) + // ISO C11 defines the _Static_assert keyword +# define ct_assert(b) _Static_assert((b), "Compile time assertion failed: " NV_CTASSERT_STRINGIFY_MACRO(b)) +# define ct_assert_i(b,line) _Static_assert((b), "Compile time assertion failed: " NV_CTASSERT_STRINGIFY_MACRO(b)NV_CTASSERT_STRINGIFY_MACRO(line)) +#else + // For compilers which don't support ISO C11 or C++11, we fall back to an + // array (type) declaration +# define ct_assert(b) ct_assert_i(b,__LINE__) +# define ct_assert_i(b,line) ct_assert_ii(b,line) +# ifdef __cplusplus +# define ct_assert_ii(b,line) typedef char compile_time_assertion_failed_in_line_##line[(b)?1:-1] +# else + /* + * The use of a function prototype "void compile_time_assertion_failed_in_line_##line(..) + * above violates MISRA-C 2012 Rule 8.6 since the rule disallows a function + * declaration without a definition. To fix the MISRA rule, the cplusplus style + * 'typdef char compile_time_assertion_failed_in_line_##line' + * is acceptable, but doesn't work for typical C code since there can be duplicate + * line numbers leading to duplicate typedefs which C doesn't allow. + * + * The following macro uses the predefined macro __COUNTER__ to create unique + * typedefs that fixes the MISRA violations. However, not all C compilers support + * that macro and even for compilers that support it, the underlying code makes + * use of variably modified identifiers in ct_assert that makes the use of this + * unviable. + * + * For now restrict the use of MACRO only on + * i) GCC 4.3.0 and above that supports __COUNTER__ macro + * ii) Specifically the Falcon port of the compiler since the use of variably + * modified identifiers have been removed on those projects + * + * TBD: Enable the macro on MSVC and CLANG pending + */ +# if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) >= 40300) && defined(GCC_FALCON) +# define ct_assert_ii(b,line) ct_assert_iii(b,line,__COUNTER__) +# define ct_assert_iii(b,line,cntr) ct_assert_cntr(b,line,cntr) +# define ct_assert_cntr(b,line,cntr) typedef char cnt##cntr##_compile_time_assertion_failed_in_line_##line[(b)?1:-1] __attribute__((unused)) +# else +# define ct_assert_ii(b,line) void compile_time_assertion_failed_in_line_##line(int _compile_time_assertion_failed_in_line_##line[(b) ? 1 : -1]) +# endif +# endif +#endif + +#endif // __NV_CTASSERT_H diff --git a/src/common/inc/nvlog_defs.h b/src/common/inc/nvlog_defs.h new file mode 100644 index 0000000..568d078 --- /dev/null +++ b/src/common/inc/nvlog_defs.h @@ -0,0 +1,564 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2024 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NVLOG_DEFS_H_ +#define _NVLOG_DEFS_H_ + +#include "nvtypes.h" +/******************* Common Debug & Trace Defines ***************************\ +* * +* Module: NVLOG_DEFS.H * +* * +\****************************************************************************/ + +#define NVLOG_MAX_DBG_MODULES 256 + +/********************************/ +/********* Structures *********/ +/********************************/ + +// Forward declaration, so it can be used in the function type definition. + +/** + * @brief Struct representing a buffer in NvLog + * + * All logging (Print, Regtrace, etc) use these buffers. + */ +typedef struct _NVLOG_BUFFER NVLOG_BUFFER; + + +/** + * @brief Type of the 'push' function for NvLog buffers + * + * Function called whenever pushing something to an NvLog buffer + */ +typedef NvBool (*NVLOG_BUFFER_PUSHFUNC) (NVLOG_BUFFER *, NvU8 *, NvU32); + + + +/** + * @brief Fields specific to ring buffers + */ +typedef struct _NVLOG_RING_BUFFER_EXTRA_FIELDS +{ + /** How many times the ring buffer has overflown */ + NvU32 overflow; +} NVLOG_RING_BUFFER_EXTRA_FIELDS; + + +/** + * @brief Struct representing a buffer in NvLog + * + * All logging (Print, Regtrace, etc) use these buffers. + */ +struct _NVLOG_BUFFER +{ + /** Function to call when writing to this buffer */ + union + { + NVLOG_BUFFER_PUSHFUNC fn; + + // Pad this union to prevent struct size from varying between 32/64 bit platforms + NvP64 padding; + } push; + + /** Size of the buffer data section */ + NvU32 size; + /** Buffer tag, for easier identification in a dump */ + NvU32 tag; + /** Flags of the buffer, following NVLOG_BUFFER_FLAGS_* DRF's */ + NvU32 flags; + /** Position of the next available byte in the buffer */ + NvU32 pos; + /** Number of threads currently writing to this buffer */ + volatile NvS32 threadCount; + /** Specific buffer types will define their fields here */ + union + { + NVLOG_RING_BUFFER_EXTRA_FIELDS ring; + } extra; + /** Buffer data. */ + NvU8 data[1]; +}; + +#define NVLOG_MAX_BUFFERS_v11 16 +#define NVLOG_MAX_BUFFERS_v12 256 +#define NVLOG_MAX_BUFFERS_v13 3840 + +#define NVLOG_MAX_BUFFERS NVLOG_MAX_BUFFERS_v13 +#define NVLOG_LOGGER_VERSION 13 // v1.3 + +// Due to this file's peculiar location, NvPort may or may not be includable +typedef struct PORT_SPINLOCK PORT_SPINLOCK; +typedef struct PORT_MUTEX PORT_MUTEX; +typedef struct PORT_RWLOCK PORT_RWLOCK; + +#if PORT_IS_KERNEL_BUILD +#include "nvport/nvport.h" +#endif + +/** + * @brief Information about the entire NvLog system + */ +typedef struct _NVLOG_LOGGER +{ + /** NvLog logger version */ + NvU32 version; + /** Logging buffers */ + NVLOG_BUFFER * pBuffers[NVLOG_MAX_BUFFERS]; + /** Index of the first unallocated buffer */ + NvU32 nextFree; + /** Total number of free buffer slots */ + NvU32 totalFree; + /** Lock for some buffer oprations */ + PORT_SPINLOCK* mainLock; + /** Lock for creating/deleting pBuffers and accessing them from RmCtrls */ + PORT_MUTEX* buffersLock; + /** Lock for registering/deregistering flush callbacks */ + PORT_RWLOCK *flushCbsLock; +} NVLOG_LOGGER; +extern NVLOG_LOGGER NvLogLogger; + +/** + * NvLog uses two locks: + * - NVLOG_LOGGER::mainLock is used to protect some accesses to pBuffers, or + * an individual pBuffers entry depending on locking flags. + * - NVLOG_LOGGER::buffersLock is used to protect creating/deleting pBuffers and accessing them + * from certain RmCtrl handlers. + * + * Historically in most contexts obtaining RMAPI lock would suffice, and mainLock would optionally + * be used for certain buffers. Ioctl NV_ESC_RM_LOCKLESS_DIAGNOSTIC cannot touch RMAPI lock and needs + * to access NvLog. The latter operation might race if called at an inopportune time: e.g. if the + * ioctl is called during RM init when KGSP creates/deletes GSP NvLog buffers. Using buffersLock is + * thus necessary to resolve the potential race. + * + * This leads to an unfortunate sequence where mainLock and buffersLock are nested. The latter lock + * cannot be removed as it is used in IRQ paths. + * + * This should be refactored to use a single RWLock that does conditional acquire in possible IRQ + * paths. + */ + +// +// Buffer flags +// + +// Logging to this buffer is disabled +#define NVLOG_BUFFER_FLAGS_DISABLED 0:0 +#define NVLOG_BUFFER_FLAGS_DISABLED_NO 0 +#define NVLOG_BUFFER_FLAGS_DISABLED_YES 1 + +#define NVLOG_BUFFER_FLAGS_TYPE 2:1 +#define NVLOG_BUFFER_FLAGS_TYPE_RING 0 +#define NVLOG_BUFFER_FLAGS_TYPE_NOWRAP 1 +#define NVLOG_BUFFER_FLAGS_TYPE_SYSTEMLOG 2 + +// Expand buffer when full +#define NVLOG_BUFFER_FLAGS_EXPANDABLE 3:3 +#define NVLOG_BUFFER_FLAGS_EXPANDABLE_NO 0 +#define NVLOG_BUFFER_FLAGS_EXPANDABLE_YES 1 + +// Allocate buffer in non paged memory +#define NVLOG_BUFFER_FLAGS_NONPAGED 4:4 +#define NVLOG_BUFFER_FLAGS_NONPAGED_NO 0 +#define NVLOG_BUFFER_FLAGS_NONPAGED_YES 1 + +// +// Type of buffer locking to use +// NONE - No locking performed, for buffers that are inherently single threaded +// STATE - Lock only during state change, do memory copying unlocked +// Don't use with tiny buffers that overflow every write or two. +// FULL - Keep everything locked for the full duration of the write +// +#define NVLOG_BUFFER_FLAGS_LOCKING 6:5 +#define NVLOG_BUFFER_FLAGS_LOCKING_NONE 0 +#define NVLOG_BUFFER_FLAGS_LOCKING_STATE 1 +#define NVLOG_BUFFER_FLAGS_LOCKING_FULL 2 + +// Store this buffer in OCA minidumps +#define NVLOG_BUFFER_FLAGS_OCA 7:7 +#define NVLOG_BUFFER_FLAGS_OCA_NO 0 +#define NVLOG_BUFFER_FLAGS_OCA_YES 1 + +// Buffer format (not included in registry key) +#define NVLOG_BUFFER_FLAGS_FORMAT 10:8 +#define NVLOG_BUFFER_FLAGS_FORMAT_PRINTF 0 +#define NVLOG_BUFFER_FLAGS_FORMAT_LIBOS_LOG 1 +#define NVLOG_BUFFER_FLAGS_FORMAT_MEMTRACK 2 + +// Never deallocate this buffer until RM is unloaded +#define NVLOG_BUFFER_FLAGS_PRESERVE 11:11 +#define NVLOG_BUFFER_FLAGS_PRESERVE_NO 0 +#define NVLOG_BUFFER_FLAGS_PRESERVE_YES 1 + +// Buffer GPU index +#define NVLOG_BUFFER_FLAGS_GPU_INSTANCE 31:24 + +typedef NvU32 NVLOG_BUFFER_HANDLE; + +// +// Utility macros +// +#define NVLOG_IS_RING_BUFFER(pBuffer) \ + FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _TYPE, _RING, pBuffer->flags) +#define NVLOG_IS_NOWRAP_BUFFER(pBuffer) \ + FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _TYPE, _NOWRAP, pBuffer->flags) + +#define NVLOG_PRINT_BUFFER_SIZE(pBuffer) ((pBuffer)->size) +#define NVLOG_BUFFER_SIZE(pBuffer) \ + (NV_OFFSETOF(NVLOG_BUFFER, data) + NVLOG_PRINT_BUFFER_SIZE(pBuffer)) + +/********************************/ +/********* Filtering **********/ +/********************************/ +// TODO - Remove all this once tools are updated + +#define NVLOG_FILTER_INVALID (~0) + +#define NVLOG_FILTER_VALUE_SIMPLE_NO 0x0 +#define NVLOG_FILTER_VALUE_SIMPLE_YES 0x1 +#define NVLOG_FILTER_VALUE_EXPLICIT_NO 0x2 +#define NVLOG_FILTER_VALUE_EXPLICIT_YES 0x3 + +#define NVLOG_FILTER_PRINT_LEVEL_REGTRACE 1:0 +#define NVLOG_FILTER_PRINT_LEVEL_INFO 3:2 +#define NVLOG_FILTER_PRINT_LEVEL_NOTICE 5:4 +#define NVLOG_FILTER_PRINT_LEVEL_WARNINGS 7:6 +#define NVLOG_FILTER_PRINT_LEVEL_ERRORS 9:8 +#define NVLOG_FILTER_PRINT_LEVEL_HW_ERROR 11:10 +#define NVLOG_FILTER_PRINT_LEVEL_FATAL 13:12 + +#define NVLOG_FILTER_PRINT_BUFFER 18:14 +#define NVLOG_FILTER_REGTRACE_BUFFER 22:19 + +#define NVLOG_FILTER_REGTRACE_LOG_READ 25:23 +#define NVLOG_FILTER_REGTRACE_LOG_WRITE 27:26 +#define NVLOG_FILTER_REGTRACE_BREAK_READ 29:28 +#define NVLOG_FILTER_REGTRACE_BREAK_WRITE 31:30 + +#define NVLOG_FILTER_VALUE_IS_NO(val) ((val & 0x1) == 0) +#define NVLOG_FILTER_VALUE_IS_YES(val) (val & 0x1) +#define NVLOG_FILTER_PRINT_GET_VALUE(level, num) ((num >> (level*2)) & 0x3) + +/** + * @brief Type representing a value of a given 16bit range. + */ +typedef struct _NVLOG_RANGE_16 +{ + NvU16 low; + NvU16 high; + NvU32 value; +} NVLOG_RANGE_16; + + +/** + * @brief Type representing a value of a given 32bit range. + */ +typedef struct _NVLOG_RANGE_32 +{ + NvU32 low; + NvU32 high; + NvU32 value; +} NVLOG_RANGE_32; + +// +// Maximum number of files that have a filter assigned to them. +// +#define NVLOG_MAX_FILES 1 +// +// Maximum number of line rules (both single line and range) allowed per file +// +#define NVLOG_FILELINE_FILTER_MAX_RANGES 1 + +/** + * @brief Internal type for NVLOG_FILELINE_FILTER. + * + * Contains filtering info for a single file. + */ +typedef struct _NVLOG_FILELINE_FILTER_FILEHASH +{ + /** ID of the file (24bit MD5) */ + NvU32 fileId; + /** Number of elements in the array 'ranges' */ + NvU32 numElems; + /** Value to use if the given value isn't found in the range array */ + NvU32 defaultValue; + /** Array of ranges representing lines in the file */ + NVLOG_RANGE_16 ranges[NVLOG_FILELINE_FILTER_MAX_RANGES]; +} NVLOG_FILELINE_FILTER_FILEHASH; + +/** + * @brief Filter that contains rules that depend on the file and line number. + */ +typedef struct _NVLOG_FILELINE_FILTER +{ + /** Number of elements in the fileHash array */ + NvU32 numFiles; + /** Value to use if a given file isn't found */ + NvU32 defaultValue; + /** Array of file entries, ordered as a hash table */ + NVLOG_FILELINE_FILTER_FILEHASH fileHash[NVLOG_MAX_FILES]; +} NVLOG_FILELINE_FILTER; + +/********************************/ +/********* Print Logger *********/ +/********************************/ + +#define NVLOG_PRINT_LOGGER_VERSION 11 // v1.1 +// Max buffers cannot be over 32. +#define NVLOG_PRINT_MAX_BUFFERS 8 + +#define NVLOG_PRINT_BUFFER_PRIMARY 1 +#define NVLOG_PRINT_BUFFER_SECONDARY 2 +#define NVLOG_PRINT_BUFFER_SYSTEMLOG 3 + +#define NVLOG_PRINT_DESC1_FILEID 23:0 +#define NVLOG_PRINT_DESC1_GPUID 28:24 // 2^5 = 32 possible +#define NVLOG_PRINT_DESC1_MAGIC 31:29 +#define NVLOG_PRINT_DESC1_MAGIC_VALUE 5 + +#define NVLOG_PRINT_DESC2_LINEID 15:0 +#define NVLOG_PRINT_DESC2_GROUPID 17:16 +#define NVLOG_PRINT_DESC2_GROUPID_RM 0 +#define NVLOG_PRINT_DESC2_GROUPID_PMU 1 +#define NVLOG_PRINT_DESC2_OPT_DATA_COUNT 24:18 // number of dwords +#define NVLOG_PRINT_DESC2_OPT_DATA_COUNT_MAX 0x7F +#define NVLOG_PRINT_DESC2_RESERVED 28:25 +#define NVLOG_PRINT_DESC2_MAGIC 31:29 +#define NVLOG_PRINT_DESC2_MAGIC_VALUE 6 + +#define NVLOG_UNKNOWN_GPU_INSTANCE 0x1f + +#define NVLOG_PRINT_MODULE_FILTER_VALUE 1:0 +#define NVLOG_PRINT_MODULE_FILTER_BUFFER 6:2 +#define NVLOG_PRINT_MODULE_FILTER_ENABLED 7:7 + +// +// Regkey fields - These are copied directly from nvRmReg.h +// A copy is necessary as these might be needed on systems that don't +// have nvRmReg.h, such as DVS builds for NvWatch +// +#ifndef NV_REG_STR_RM_NVLOG +#define NV_REG_STR_RM_NVLOG "RMNvLog" +#define NV_REG_STR_RM_NVLOG_BUFFER_FLAGS 7:0 +#define NV_REG_STR_RM_NVLOG_BUFFER_SIZE 23:8 +#define NV_REG_STR_RM_NVLOG_BUFFER_SIZE_DEFAULT ((NVOS_IS_WINDOWS||NVOS_IS_MACINTOSH)?8:250) +#define NV_REG_STR_RM_NVLOG_BUFFER_SIZE_DISABLE 0 +#define NV_REG_STR_RM_NVLOG_RUNTIME_LEVEL 28:25 +#define NV_REG_STR_RM_NVLOG_TIMESTAMP 30:29 +#define NV_REG_STR_RM_NVLOG_TIMESTAMP_NONE 0 +#define NV_REG_STR_RM_NVLOG_TIMESTAMP_32 1 +#define NV_REG_STR_RM_NVLOG_TIMESTAMP_64 2 +#define NV_REG_STR_RM_NVLOG_TIMESTAMP_32_DIFF 3 +#define NV_REG_STR_RM_NVLOG_INITED 31:31 +#define NV_REG_STR_RM_NVLOG_INITED_NO 0 +#define NV_REG_STR_RM_NVLOG_INITED_YES 1 +#endif // NV_REG_STR_RM_NVLOG + + +// +// Arg types: +// 0: Special meaning. End of argument list. +// 1: d, u, x, X, i, o - Integer type +// 2: lld, llu, llx, llX, lli, llo - Long long integer type +// 3: s - string type (size is 0) +// 4: p - pointer type +// 5: c - char type +// 6: f, g, e, F, G, E - floating point type +// 7-14: Unused at the moment, default value is 0 +// 15: Special meaning. Error value - unsupported type. +// +#define NVLOG_PRINT_MAX_ARG_TYPES 0x10 +#define NVLOG_PRINT_ARG_TYPE_ARGLIST_END 0x0 +#define NVLOG_PRINT_ARG_TYPE_INT 0x1 +#define NVLOG_PRINT_ARG_TYPE_LONGLONG 0x2 +#define NVLOG_PRINT_ARG_TYPE_STRING 0x3 +#define NVLOG_PRINT_ARG_TYPE_POINTER 0x4 +#define NVLOG_PRINT_ARG_TYPE_CHAR 0x5 +#define NVLOG_PRINT_ARG_TYPE_FLOAT 0x6 +#define NVLOG_PRINT_ARG_TYPE_ERROR 0xf + + +/** + * @brief Signature of the database required to decode the print logs + * + * The sig1-sig3 values are generated randomly at compile time. + */ +typedef struct _NVLOG_DB_SIGNATURE +{ + NvU32 timestamp; + NvU32 sig1; + NvU32 sig2; + NvU32 sig3; +} NVLOG_DB_SIGNATURE; + +/** + * @brief Filter that contains all rules used to filter DBG_PRINTF calls + */ +typedef struct _NVLOG_PRINT_FILTER +{ + /** Same file:line filter is shared with the Regtrace system */ + NVLOG_FILELINE_FILTER *pFileLineFilter; + /** Filter based on debug levels. Uses NVLOG_FILTER_PRINT_LEVEL_* DRF's */ + NvU32 runtimePrintLevelFilter; + /** Filter based on debug modules. Uses NVLOG_PRINT_MODULE_FILTER_* DRF's */ + NvU8 runtimePrintModuleFilter[NVLOG_MAX_DBG_MODULES]; +} NVLOG_PRINT_FILTER; + + +/** + * @brief Enum representing all possible argument types to DBG_PRINTF + */ +typedef enum _NVLOG_ARGTYPE +{ + NVLOG_ARGTYPE_NONE, + NVLOG_ARGTYPE_INT, + NVLOG_ARGTYPE_LONG_LONG_INT, + NVLOG_ARGTYPE_STRING, + NVLOG_ARGTYPE_POINTER, + NVLOG_ARGTYPE_FLOAT, + NVLOG_ARGTYPE__COUNT +} NVLOG_ARGTYPE; + +// Default flags for NvLog registry, used for single-buffer option or the read fails +#ifndef NVLOG_DEFAULT_FLAGS +#define NVLOG_DEFAULT_FLAGS \ + ( \ + DRF_NUM(_REG_STR_RM, _NVLOG, _BUFFER_FLAGS, \ + ( \ + DRF_DEF(LOG, _BUFFER_FLAGS, _DISABLED, _NO) | \ + DRF_DEF(LOG, _BUFFER_FLAGS, _TYPE, _RING) | \ + DRF_DEF(LOG, _BUFFER_FLAGS, _EXPANDABLE, _NO) | \ + DRF_DEF(LOG, _BUFFER_FLAGS, _NONPAGED, _YES) | \ + DRF_DEF(LOG, _BUFFER_FLAGS, _LOCKING, _STATE) | \ + DRF_DEF(LOG, _BUFFER_FLAGS, _OCA, _YES) \ + )) | \ + DRF_DEF(_REG_STR_RM, _NVLOG, _BUFFER_SIZE, _DEFAULT) | \ + DRF_NUM(_REG_STR_RM, _NVLOG, _RUNTIME_LEVEL, 0) | \ + DRF_DEF(_REG_STR_RM, _NVLOG, _TIMESTAMP, _64) | \ + DRF_DEF(_REG_STR_RM, _NVLOG, _INITED, _YES) \ + ) +#endif // NVLOG_DEFAULT_FLAGS + +/** + * @brief General info about the NvLog Print system + */ +typedef struct _NVLOG_PRINT_LOGGER +{ + /** NvLog print logger version */ + NvU32 version; + /** Runtime argument sizes (16 different arglist values) */ + NvU8 runtimeSizes[NVLOG_PRINT_MAX_ARG_TYPES]; + /** Database signature for decoding */ + NVLOG_DB_SIGNATURE signature; + /** Filter buffer for print statements */ + NVLOG_PRINT_FILTER filter; + /** Flags for all NvLog print buffers */ + NvU32 flags; + /** Buffer indices for all nvlog buffers. buffers[1] is default. */ + NvU32 buffers[NVLOG_PRINT_MAX_BUFFERS]; + /** Initialized flag, set to true after nvlogPrintInit has executed */ + NvBool initialized; + /** Paused flag, set to true after nvlogPrintInit has executed */ + NvBool paused; +} NVLOG_PRINT_LOGGER; +extern NVLOG_PRINT_LOGGER NvLogPrintLogger; + +#define NVLOG_PRINT_BUFFER_TAG(_i) NvU32_BUILD('t','r','p','0' + (_i)) + +/********************************/ +/********** Regtrace **********/ +/********************************/ + +#define NVLOG_REGTRACE_LOGGER_VERSION 10 // v1.0 +#define NVLOG_REGTRACE_MAX_BUFFERS 4 + +#define NVLOG_REGTRACE_READ 0 +#define NVLOG_REGTRACE_WRITE 1 + +#define NVLOG_REGTRACE_DESC1_FILEID NVLOG_PRINT_DESC1_FILEID +#define NVLOG_REGTRACE_DESC1_GPUID NVLOG_PRINT_DESC1_GPUID +#define NVLOG_REGTRACE_DESC1_MAGIC NVLOG_PRINT_DESC1_MAGIC +#define NVLOG_REGTRACE_DESC1_MAGIC_VALUE (NVLOG_PRINT_DESC1_MAGIC_VALUE-1) + +#define NVLOG_REGTRACE_DESC2_LINEID 15:0 +#define NVLOG_REGTRACE_DESC2_READWRITE 16:16 +#define NVLOG_REGTRACE_DESC2_READWRITE_READ NVLOG_REGTRACE_READ +#define NVLOG_REGTRACE_DESC2_READWRITE_WRITE NVLOG_REGTRACE_WRITE +#define NVLOG_REGTRACE_DESC2_REGSIZE 18:17 +#define NVLOG_REGTRACE_DESC2_REGSIZE_8 0 +#define NVLOG_REGTRACE_DESC2_REGSIZE_16 1 +#define NVLOG_REGTRACE_DESC2_REGSIZE_32 2 +#define NVLOG_REGTRACE_DESC2_REGSIZE_64 3 +#define NVLOG_REGTRACE_DESC2_THREADID 28:19 +#define NVLOG_REGTRACE_DESC2_MAGIC 31:29 +#define NVLOG_REGTRACE_DESC2_MAGIC_VALUE 3 + +/** + * @brief Single entry in an NvLog Regtrace buffer. + */ +typedef struct _NVLOG_REGTRACE_RECORD +{ + /** Uses NVLOG_REGTRACE_DESC1_* DRF's */ + NvU32 desc1; + /** Uses NVLOG_REGTRACE_DESC1_* DRF's */ + NvU32 desc2; + /** Address of the register being accessed */ + NvU32 address; + /** Value that was read/written */ + NvU32 value; +} NVLOG_REGTRACE_RECORD; + + + +#define NVLOG_REGTRACE_FILTER_MAX_RANGES 256 + +// Regtrace shares the file:line filter with print + + +/** + * @brief Filter that contains all rules used to filter register access logging + */ +typedef struct _NVLOG_REGTRACE_FILTER +{ + /** Number of elements in the 'ranges' array */ + NvU32 numRanges; + /** File:line based filter. Shared with NvLog print system */ + NVLOG_FILELINE_FILTER *pFileLineFilter; + /** Range array for filtering based on register addresses */ + NVLOG_RANGE_32 ranges[NVLOG_REGTRACE_FILTER_MAX_RANGES]; +} NVLOG_REGTRACE_FILTER; + +/** + * @brief General info about the NvLog Regtrace system + */ +typedef struct _NVLOG_REGTRACE_LOGGER +{ + /** NvLog regtrace logger version */ + NvU32 version; + /** Filter buffer for regtrace statements */ + NVLOG_REGTRACE_FILTER filter; + /** Buffer indices for all NvLog buffers. First element is default buffer */ + NvU32 buffers[NVLOG_REGTRACE_MAX_BUFFERS]; +} NVLOG_REGTRACE_LOGGER; + +#endif // _NVLOG_DEFS_H_ diff --git a/src/common/inc/nvlog_inc.h b/src/common/inc/nvlog_inc.h new file mode 100644 index 0000000..c40c64f --- /dev/null +++ b/src/common/inc/nvlog_inc.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2016 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +// +// This file must not have include guards, it is supposed to be included +// multiple times - Once in a precompiled header, once through noprecomp.h +// + +// WAR for a GCC precompiled headers problem +#if !defined(NV_RM_PRECOMPILED_HEADER) +#include "nvlog_inc2.h" + +// +// If noprecomp is not included, this will not expand and will result in an +// undefined identifier. Hopefully, the meaningful name will hint at the +// underlying problem. +// +#define ___please_include_noprecomp_h___ + +#endif diff --git a/src/common/inc/nvlog_inc2.h b/src/common/inc/nvlog_inc2.h new file mode 100644 index 0000000..7f10150 --- /dev/null +++ b/src/common/inc/nvlog_inc2.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013,2016-2017,2020-2020 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NVLOG_INC2_H_ +#define _NVLOG_INC2_H_ +// +// Include the auto-generated g_$(filename)-nvlog.h header. The file contains +// information about the trace statements that was pulled out by the NvLog preprocessor. +// NVLOG_INCLUDE is defined by make at compile time, for every source file. +// +// The four lines of macros is some trickiness needed to make it work. +// +#if (defined(NVLOG_ENABLED) || defined(NV_MODS)) && defined(NVLOG_INCLUDE) && !defined(NVLOG_PARSING) +#if NVLOG_ENABLED || defined(NV_MODS) + +#ifndef NVLOG_FILEID // Acts as an include guard +#define NVLOG_INCLUDE3(a) #a +#define NVLOG_INCLUDE2(a) NVLOG_INCLUDE3 a +#define NVLOG_INCLUDE1 NVLOG_INCLUDE2((NVLOG_INCLUDE)) +#include NVLOG_INCLUDE1 +#endif // NVLOG_FILEID + +#endif // NVLOG_ENABLED +#endif // defined(NVLOG_ENABLED) && defined(NVLOG_INCLUDE) + + +#endif // _NVLOG_INC2_H_ diff --git a/src/common/inc/nvop.h b/src/common/inc/nvop.h new file mode 100644 index 0000000..98b3679 --- /dev/null +++ b/src/common/inc/nvop.h @@ -0,0 +1,122 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVOP_H +#define NVOP_H + +#define NVOP_REVISION_ID 0x00000100 + +// subfunction 0 is common use: NV_ACPI_ALL_FUNC_SUPPORT +// #define NVOP_FUNC_SUPPORT 0x00000000 // Bit list of supported functions* +#define NVOP_FUNC_DISPLAYSTATUS 0x00000005 // Query the Display Hot-Key** +#define NVOP_FUNC_MDTL 0x00000006 // Query Display Toggle List** +#define NVOP_FUNC_HCSMBADDR 0x00000007 // Get the SBIOS SMBus address for hybrid uController +#define NVOP_FUNC_GETOBJBYTYPE 0x00000010 // Get the Firmware Object* +#define NVOP_FUNC_GETALLOBJS 0x00000011 // Get the directory and all Objects * +#define NVOP_FUNC_OPTIMUSCAPS 0x0000001A // Optimus Capabilities*** +#define NVOP_FUNC_OPTIMUSFLAG 0x0000001B // Update GPU MultiFunction State to sbios +// * Required if any other functions are used. +// ** Required for Optimus-specific display hotkey functionality +// *** Required for Optimus-specific dGPU subsystem power control + +// Function 1A: OPTIMUSCAPS +// Args +#define NVOP_FUNC_OPTIMUSCAPS_FLAGS 0:0 +#define NVOP_FUNC_OPTIMUSCAPS_FLAGS_ABSENT 0x00000000 +#define NVOP_FUNC_OPTIMUSCAPS_FLAGS_PRESENT 0x00000001 +#define NVOP_FUNC_OPTIMUSCAPS_CFG_SPACE_OWNER_TARGET 1:1 +#define NVOP_FUNC_OPTIMUSCAPS_CFG_SPACE_OWNER_TARGET_SBIOS 0x00000000 +#define NVOP_FUNC_OPTIMUSCAPS_CFG_SPACE_OWNER_TARGET_DRIVER 0x00000001 +#define NVOP_FUNC_OPTIMUSCAPS_CFG_SPACE_OWNER_WR_EN 2:2 +#define NVOP_FUNC_OPTIMUSCAPS_CFG_SPACE_OWNER_WR_EN_FALSE 0x00000000 +#define NVOP_FUNC_OPTIMUSCAPS_CFG_SPACE_OWNER_WR_EN_TRUE 0x00000001 +#define NVOP_FUNC_OPTIMUSCAPS_POWER_CONTROL 25:24 +#define NVOP_FUNC_OPTIMUSCAPS_POWER_CONTROL_DONOT_POWER_DOWN_DGPU 0x00000002 +#define NVOP_FUNC_OPTIMUSCAPS_POWER_CONTROL_POWER_DOWN_DGPU 0x00000003 + +// Returns +#define NVOP_FUNC_OPTIMUSCAPS_OPTIMUS_STATE 0:0 +#define NVOP_FUNC_OPTIMUSCAPS_OPTIMUS_STATE_DISABLED 0x00000000 +#define NVOP_FUNC_OPTIMUSCAPS_OPTIMUS_STATE_ENABLED 0x00000001 +#define NVOP_FUNC_OPTIMUSCAPS_DGPU_POWER 4:3 +#define NVOP_FUNC_OPTIMUSCAPS_DGPU_POWER_OFF 0x00000000 +#define NVOP_FUNC_OPTIMUSCAPS_DGPU_POWER_RESERVED1 0x00000001 +#define NVOP_FUNC_OPTIMUSCAPS_DGPU_POWER_RESERVED2 0x00000002 +#define NVOP_FUNC_OPTIMUSCAPS_DGPU_POWER_STABILIZED 0x00000003 +#define NVOP_FUNC_OPTIMUSCAPS_DGPU_HOTPLUG_CAPABILITIES 6:6 +#define NVOP_FUNC_OPTIMUSCAPS_DGPU_HOTPLUG_CAPABILITIES_COCONNECTED 0x00000001 +#define NVOP_FUNC_OPTIMUSCAPS_DGPU_MUXED_DDC 7:7 +#define NVOP_FUNC_OPTIMUSCAPS_DGPU_MUXED_DDC_FALSE 0x00000000 +#define NVOP_FUNC_OPTIMUSCAPS_DGPU_MUXED_DDC_TRUE 0x00000001 +#define NVOP_FUNC_OPTIMUSCAPS_CFG_SPACE_OWNER_ACTUAL 8:8 +#define NVOP_FUNC_OPTIMUSCAPS_CFG_SPACE_OWNER_ACTUAL_SBIOS 0x00000000 +#define NVOP_FUNC_OPTIMUSCAPS_CFG_SPACE_OWNER_ACTUAL_DRIVER 0x00000001 +#define NVOP_FUNC_OPTIMUSCAPS_OPTIMUS_CAPABILITIES 26:24 +#define NVOP_FUNC_OPTIMUSCAPS_OPTIMUS_CAPABILITIES_ABSENT 0x00000000 +#define NVOP_FUNC_OPTIMUSCAPS_OPTIMUS_CAPABILITIES_DYNAMIC_POWER_CONTROL 0x00000001 +#define NVOP_FUNC_OPTIMUSCAPS_OPTIMUS_HDAUDIO_CAPABILITIES 28:27 +#define NVOP_FUNC_OPTIMUSCAPS_OPTIMUS_HDAUDIO_CAPABILITIES_ABSENT 0x00000000 +#define NVOP_FUNC_OPTIMUSCAPS_OPTIMUS_HDAUDIO_CAPABILITIES_DISABLED 0x00000001 +#define NVOP_FUNC_OPTIMUSCAPS_OPTIMUS_HDAUDIO_CAPABILITIES_PRESENT 0x00000002 + +// Function 1B: OPTIMUSFLAG +// Args +#define NVOP_FUNC_OPTIMUSFLAG_AUDIOCODEC_CONTROL 0:0 +#define NVOP_FUNC_OPTIMUSFLAG_AUDIOCODEC_CONTROL_DISABLE 0x00000000 +#define NVOP_FUNC_OPTIMUSFLAG_AUDIOCODEC_CONTROL_ENABLE 0x00000001 + +#define NVOP_FUNC_OPTIMUSFLAG_AUDIOCODEC_STATECHANGE_REQUEST 1:1 +#define NVOP_FUNC_OPTIMUSFLAG_AUDIOCODEC_STATECHANGE_REQUEST_IGNORE 0x00000000 +#define NVOP_FUNC_OPTIMUSFLAG_AUDIOCODEC_STATECHANGE_REQUEST_EXECUTE 0x00000001 + +#define NVOP_FUNC_OPTIMUSFLAG_APPLICATIONS_COUNT 9:2 + +#define NVOP_FUNC_OPTIMUSFLAG_APPLICATIONS_COUNT_CHANGE_REQUEST 10:10 +#define NVOP_FUNC_OPTIMUSFLAG_APPLICATIONS_COUNT_CHANGE_REQUEST_IGNORE 0x000000000 +#define NVOP_FUNC_OPTIMUSFLAG_APPLICATIONS_COUNT_CHANGE_REQUEST_EXECUTE 0x000000001 + + +// Function 1B: OPTIMUSFLAG +// return +#define NVOP_RET_OPTIMUSFLAG_AUDIOCODEC_STATE 0:0 +#define NVOP_RET_OPTIMUSFLAG_AUDIOCODEC_STATE_DISABLE 0x00000000 +#define NVOP_RET_OPTIMUSFLAG_AUDIOCODEC_STATE_ENABLE 0x00000001 + +#define NVOP_RET_OPTIMUSFLAG_POLICY 3:2 +#define NVOP_RET_OPTIMUSFLAG_POLICY_GPU_POWEROFF 0x00000000 +#define NVOP_RET_OPTIMUSFLAG_POLICY_GPU_POWERON 0x00000001 + +#define NVOP_RET_OPTIMUSFLAG_POLICYCHANGE_REQUEST 4:4 +#define NVOP_RET_OPTIMUSFLAG_POLICYCHANGE_REQUEST_IGNORE 0x00000000 +#define NVOP_RET_OPTIMUSFLAG_POLICYCHANGE_REQUEST_EXECUTE 0x00000001 + +#define NVOP_RET_OPTIMUSFLAG_FORCE_GPU_POLICY 6:5 +#define NVOP_RET_OPTIMUSFLAG_FORCE_GPU_POLICY_OPTIMUS 0x00000000 +#define NVOP_RET_OPTIMUSFLAG_FORCE_GPU_POLICY_IGPU 0x00000001 +#define NVOP_RET_OPTIMUSFLAG_FORCE_GPU_POLICY_DGPU 0x00000002 + +#define NVOP_RET_OPTIMUSFLAG_FORCE_GPU_REQUEST 7:7 +#define NVOP_RET_OPTIMUSFLAG_FORCE_GPU_REQUEST_IGNORE 0x00000000 +#define NVOP_RET_OPTIMUSFLAG_FORCE_GPU_REQUEST_EXECUTE 0x00000001 +#endif // NVOP_H + diff --git a/src/common/inc/nvrmcontext.h b/src/common/inc/nvrmcontext.h new file mode 100644 index 0000000..544afae --- /dev/null +++ b/src/common/inc/nvrmcontext.h @@ -0,0 +1,73 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __NVRMCONTEXT_H__ +#define __NVRMCONTEXT_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" +#include "rs_access.h" + +/* + * An RM wrapping structure to make RMAPI accessible from the RM abstraction + * in one location/context to other locations/contexts in a unified way. + * + * An nvRMContext can be created on the fly or stored preinitialized in + * objects. It has no mutable state, it's just a collection of static + * parameters to make access to RM possible. + * + * This is a C interface for maximal compatibility, and it is intended to be + * used both in kernel and userspace. + * + */ +typedef struct nvRMContextRec nvRMContext, *nvRMContextPtr; + +struct nvRMContextRec { + /* + * The RM client of the calling context. In normal circumstances, the + * callee should pass this as the hClient argument. + */ + NvU32 clientHandle; + + /* User data field for the caller: to be freely used. */ + void *owner; + + /* + * RMAPI function wrappers: it's enough to only fill in those functions + * that the caller will know that will be needed in each case. For now, + * we list function pointers needed by nv_smg.c -- feel free to add more + * RMAPI functions when necessary. + */ + NvU32 (*allocRoot) (nvRMContextPtr rmctx, NvU32 *phClient); + NvU32 (*alloc) (nvRMContextPtr rmctx, NvU32 hClient, NvU32 hParent, NvU32 hObject, NvU32 hClass, void *pAllocParms); + NvU32 (*free) (nvRMContextPtr rmctx, NvU32 hClient, NvU32 hParent, NvU32 hObject); + NvU32 (*control) (nvRMContextPtr rmctx, NvU32 hClient, NvU32 hObject, NvU32 cmd, void *pParams, NvU32 paramsSize); +}; + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // __NVRMCONTEXT_H__ diff --git a/src/common/inc/pex.h b/src/common/inc/pex.h new file mode 100644 index 0000000..19b3e2e --- /dev/null +++ b/src/common/inc/pex.h @@ -0,0 +1,71 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef PEX_H +#define PEX_H + +#define PEX_REVISION_ID 0x00000002 + +// subfunction 0 is common use: NV_ACPI_ALL_FUNC_SUPPORT +// #define PEX_FUNC_GETSUPPORTFUNCTION 0x00000000 // Get supported function +#define PEX_FUNC_GETSLOTINFO 0x00000001 // Get PCI Express Slot Information +#define PEX_FUNC_GETSLOTNUMBER 0x00000002 // Get PCI Express Slot Number +#define PEX_FUNC_GETVENDORTOKENID 0x00000003 // Get PCI Express Vendor Specific Token ID strings +#define PEX_FUNC_GETPCIBUSCAPS 0x00000004 // Get PCI Express Root Bus Capabilities +#define PEX_FUNC_IGNOREPCIBOOTCONFIG 0x00000005 // Indication to OS that PCI Boot config can be ignored +#define PEX_FUNC_GETLTRLATENCY 0x00000006 // Get PCI Express Latency Tolerance Reporting Info +#define PEX_FUNC_NAMEPCIDEVICE 0x00000007 // Get name of PCI or PCIE device +#define PEX_FUNC_SETLTRLATENCY 0x00000008 // Set PCI Express Latency Tolerance Reporting Values +#define PEX_FUNC_AUXPOWERLIMIT 0x0000000A // Set Aux power limit +#define PEX_FUNC_PEXRST_DELAY 0x0000000B // Set Pex reset delay + + +/* + * 0h: Denied. + * Indicates that the platform cannot support the power requested. + * 1h: Granted. + * Indicates that the device is permitted to draw the requested auxiliary power. + * 2h: Granted. + * Indicates that the platform will not remove main power from the slot + * while the system is in S0. + */ +#define NV_AUX_POWER_REQUEST_STATUS 1:0 +#define NV_AUX_POWER_REQUEST_STATUS_DENIED 0x00 +#define NV_AUX_POWER_REQUEST_STATUS_GRANTED_WITHOUT_12V_POWER 0x01 +#define NV_AUX_POWER_REQUEST_STATUS_GRANTED_WITH_12V_POWER 0x02 + +/* + * Retry, with interval. + * Bit 4, is a status bit. If set, it indicates that the platform cannot support + * the power requested at this time, but it may be able to do so in the future. + * Bits 3:0 contains the waiting time, in seconds, after which request can be made again. + */ +#define NV_AUX_POWER_REQUEST_STATUS_RETRY_LATER 4:0 +#define NV_AUX_POWER_REQUEST_STATUS_RETRY_LATER_STATUS 4:4 +#define NV_AUX_POWER_REQUEST_STATUS_RETRY_LATER_STATUS_FALSE 0x0 +#define NV_AUX_POWER_REQUEST_STATUS_RETRY_LATER_STATUS_TRUE 0x1 +#define NV_AUX_POWER_REQUEST_STATUS_RETRY_LATER_INTERVAL 3:0 +#define NV_AUX_POWER_REQUEST_STATUS_RETRY_LATER_INTERVAL_MIN 0x1 +#define NV_AUX_POWER_REQUEST_STATUS_RETRY_LATER_INTERVAL_MAX 0xF + +#endif // PEX_H diff --git a/src/common/inc/rmosxfac.h b/src/common/inc/rmosxfac.h new file mode 100644 index 0000000..5ebfdae --- /dev/null +++ b/src/common/inc/rmosxfac.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2003 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _RMOSXFAC_H_ +#define _RMOSXFAC_H_ + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: RMOSXFAC.H * +* Declarations for common OS interface functions. * +* * +\***************************************************************************/ + +#ifdef __cplusplus +extern "C" { +#endif +extern NvS32 RmInitRm(void); +extern NvS32 RmDestroyRm(void); + +#ifdef __cplusplus +} +#endif + +#endif // _RMOSXFAC_H_ diff --git a/src/common/inc/swref/common_def_nvlink.h b/src/common/inc/swref/common_def_nvlink.h new file mode 100644 index 0000000..5dabc84 --- /dev/null +++ b/src/common/inc/swref/common_def_nvlink.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef COMMON_DEF_NVLINK_H +#define COMMON_DEF_NVLINK_H + +// +// Arch CONNECTION defines, replaces forceconfig. See Bugs 1665737, +// 1665734 and 1734252. +// This per link connection state is passed up from chiplib +// and can be controlled on the command line. +// The max number of connections is speced in __SIZE_1. +// +#define NV_NVLINK_ARCH_CONNECTION 31:0 +#define NV_NVLINK_ARCH_CONNECTION__SIZE_1 32 +#define NV_NVLINK_ARCH_CONNECTION_DISABLED 0x00000000 +#define NV_NVLINK_ARCH_CONNECTION_PEER_MASK 7:0 +#define NV_NVLINK_ARCH_CONNECTION_ENABLED 8:8 +#define NV_NVLINK_ARCH_CONNECTION_PHYSICAL_LINK 21:16 +#define NV_NVLINK_ARCH_CONNECTION_RESERVED 29:20 +#define NV_NVLINK_ARCH_CONNECTION_PEERS_COMPUTE_ONLY 30:30 +#define NV_NVLINK_ARCH_CONNECTION_CPU 31:31 + +#endif // COMMON_DEF_NVLINK_H diff --git a/src/common/inc/swref/published/disp/v02_04/dev_disp.h b/src/common/inc/swref/published/disp/v02_04/dev_disp.h new file mode 100644 index 0000000..c3c75db --- /dev/null +++ b/src/common/inc/swref/published/disp/v02_04/dev_disp.h @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __v02_04_dev_disp_h__ +#define __v02_04_dev_disp_h__ + +#define NV_PDISP_DSI_RM_INTR_DISPATCH 0x00610088 /* R--4R */ +#define NV_PDISP_DSI_RM_INTR_DISPATCH_HEAD(i) (24+(i)):(24+(i)) /* R--VF */ +#define NV_PDISP_DSI_RM_INTR_DISPATCH_HEAD__SIZE_1 4 /* */ +#define NV_PDISP_DSI_RM_INTR_DISPATCH_HEAD_NOT_PENDING 0x00000000 /* */ +#define NV_PDISP_DSI_RM_INTR_DISPATCH_HEAD_PENDING 0x00000001 /* */ + +#define NV_PDISP_DSI_RM_INTR_HEAD(i) (0x006100BC+(i)*2048) /* RW-4A */ +#define NV_PDISP_DSI_RM_INTR_HEAD__SIZE_1 4 /* */ +#define NV_PDISP_DSI_RM_INTR_HEAD_VBLANK 0:0 /* RWIVF */ +#define NV_PDISP_DSI_RM_INTR_HEAD_VBLANK_INIT 0x00000000 /* R-I-V */ +#define NV_PDISP_DSI_RM_INTR_HEAD_VBLANK_NOT_PENDING 0x00000000 /* R---V */ +#define NV_PDISP_DSI_RM_INTR_HEAD_VBLANK_PENDING 0x00000001 /* R---V */ +#define NV_PDISP_DSI_RM_INTR_HEAD_VBLANK_RESET 0x00000001 /* -W--V */ + +#define NV_PDISP_DSI_RM_INTR_HEAD_PMU_RG_LINE 29:29 /* RWIVF */ +#define NV_PDISP_DSI_RM_INTR_HEAD_PMU_RG_LINE_INIT 0x00000000 /* R-I-V */ +#define NV_PDISP_DSI_RM_INTR_HEAD_PMU_RG_LINE_NOT_PENDING 0x00000000 /* R---V */ +#define NV_PDISP_DSI_RM_INTR_HEAD_PMU_RG_LINE_PENDING 0x00000001 /* R---V */ +#define NV_PDISP_DSI_RM_INTR_HEAD_PMU_RG_LINE_RESET 0x00000001 /* -W--V */ + +#define NV_PDISP_DSI_RM_INTR_HEAD_RM_RG_LINE 31:31 /* RWIVF */ +#define NV_PDISP_DSI_RM_INTR_HEAD_RM_RG_LINE_INIT 0x00000000 /* R-I-V */ +#define NV_PDISP_DSI_RM_INTR_HEAD_RM_RG_LINE_NOT_PENDING 0x00000000 /* R---V */ +#define NV_PDISP_DSI_RM_INTR_HEAD_RM_RG_LINE_PENDING 0x00000001 /* R---V */ +#define NV_PDISP_DSI_RM_INTR_HEAD_RM_RG_LINE_RESET 0x00000001 /* -W--V */ + +#define NV_PDISP_PIPE_IN_LOADV_COUNTER(i) (0x00616118+(i)*2048) /* RW-4A */ +#define NV_PDISP_PIPE_IN_LOADV_COUNTER__SIZE_1 4 /* */ +#define NV_PDISP_PIPE_IN_LOADV_COUNTER_VALUE 31:0 /* RWIUF */ +#define NV_PDISP_PIPE_IN_LOADV_COUNTER_VALUE_INIT 0x00000000 /* RWI-V */ +#define NV_PDISP_PIPE_IN_LOADV_COUNTER_VALUE_ZERO 0x00000000 /* RW--V */ +#endif // __v02_04_dev_disp_h__ diff --git a/src/common/inc/swref/published/disp/v03_00/dev_disp.h b/src/common/inc/swref/published/disp/v03_00/dev_disp.h new file mode 100644 index 0000000..8e71b0a --- /dev/null +++ b/src/common/inc/swref/published/disp/v03_00/dev_disp.h @@ -0,0 +1,281 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __v03_00_dev_disp_h__ +#define __v03_00_dev_disp_h__ +#define NV_PDISP_CHN_NUM_CORE 0 /* */ +#define NV_PDISP_CHN_NUM_WIN(i) (1+(i)) /* */ +#define NV_PDISP_CHN_NUM_WIN__SIZE_1 32 /* */ +#define NV_PDISP_CHN_NUM_WINIM(i) (33+(i)) /* */ +#define NV_PDISP_CHN_NUM_WINIM__SIZE_1 32 /* */ +#define NV_PDISP_CHN_NUM_CURS(i) (73+(i)) /* */ +#define NV_PDISP_CHN_NUM_CURS__SIZE_1 8 /* */ +#define NV_PDISP_FE_HW_SYS_CAP_HEAD_EXISTS(i) (0+(i)):(0+(i)) /* R--VF */ +#define NV_PDISP_FE_HW_SYS_CAP_HEAD_EXISTS__SIZE_1 8 /* */ +#define NV_PDISP_FE_HW_SYS_CAP_HEAD_EXISTS_YES 0x00000001 /* R---V */ +#define NV_PDISP_FE_SW 0x00640FFF:0x00640000 /* RW--D */ +#define NV_PDISP_SF_USER_0 0x006F03FF:0x006F0000 /* RW--D */ +#define NV_UDISP_HASH_BASE 0x00000000 /* */ +#define NV_UDISP_HASH_LIMIT 0x00001FFF /* */ +#define NV_UDISP_OBJ_MEM_BASE 0x00002000 /* */ +#define NV_UDISP_OBJ_MEM_LIMIT 0x0000FFFF /* */ +#define NV_UDISP_HASH_TBL_CLIENT_ID (1*32+13):(1*32+0) /* RWXVF */ +#define NV_UDISP_HASH_TBL_INSTANCE (1*32+24):(1*32+14) /* RWXUF */ +#define NV_UDISP_HASH_TBL_CHN (1*32+31):(1*32+25) /* RWXUF */ +#define NV_DMA_TARGET_NODE (0*32+1):(0*32+0) /* RWXVF */ +#define NV_DMA_TARGET_NODE_PHYSICAL_NVM 0x00000001 /* RW--V */ +#define NV_DMA_TARGET_NODE_PHYSICAL_PCI 0x00000002 /* RW--V */ +#define NV_DMA_TARGET_NODE_PHYSICAL_PCI_COHERENT 0x00000003 /* RW--V */ +#define NV_DMA_ACCESS (0*32+2):(0*32+2) /* RWXVF */ +#define NV_DMA_ACCESS_READ_ONLY 0x00000000 /* RW--V */ +#define NV_DMA_ACCESS_READ_AND_WRITE 0x00000001 /* RW--V */ +#define NV_DMA_KIND (0*32+20):(0*32+20) /* RWXVF */ +#define NV_DMA_KIND_PITCH 0x00000000 /* RW--V */ +#define NV_DMA_KIND_BLOCKLINEAR 0x00000001 /* RW--V */ +#define NV_DMA_ADDRESS_BASE_LO (1*32+31):(1*32+0) /* RWXUF */ +#define NV_DMA_ADDRESS_BASE_HI (2*32+6):(2*32+0) /* RWXUF */ +#define NV_DMA_ADDRESS_LIMIT_LO (3*32+31):(3*32+0) /* RWXUF */ +#define NV_DMA_ADDRESS_LIMIT_HI (4*32+6):(4*32+0) /* RWXUF */ +#define NV_DMA_SIZE 20 /* */ +#define NV_UDISP_FE_CHN_ASSY_BASEADR_CORE 0x00680000 /* */ +#define NV_UDISP_FE_CHN_ARMED_BASEADR_CORE (0x00680000+32768) /* */ +#define NV_UDISP_FE_CHN_ASSY_BASEADR_WIN(i) ((0x00690000+(i)*4096)) /* */ +#define NV_UDISP_FE_CHN_ASSY_BASEADR_WINIM(i) ((0x00690000+((i+32)*4096))) /* */ +#define NV_UDISP_FE_CHN_ASSY_BASEADR_CURS(i) (0x006D8000+(i)*4096) /* RW-4A */ +#define NV_UDISP_FE_CHN_ASSY_BASEADR(i) ((i)>0?(((0x00690000+(i-1)*4096))):0x00680000) /* */ +#define NV_UDISP_FE_CHN_ASSY_BASEADR__SIZE_1 81 /* */ +#define NV_PDISP_RG_DPCA(i) (0x00616330+(i)*2048) /* R--4A */ +#define NV_PDISP_RG_DPCA__SIZE_1 8 /* */ +#define NV_PDISP_RG_DPCA_LINE_CNT 15:0 /* R--UF */ +#define NV_PDISP_RG_DPCA_FRM_CNT 31:16 /* R--UF */ +#define NV_PDISP_FE_FLIPLOCK 0x0061206C /* RW-4R */ +#define NV_PDISP_FE_FLIPLOCK_LSR_MIN_TIME 23:0 /* RWIVF */ + +#define NV_PDISP_FE_PBBASE_WIN__SIZE_1 32 /* */ +#define NV_PDISP_FE_PBBASE_WINIM__SIZE_1 32 /* */ + +#define NV_PDISP_FE_CHNCTL_CORE 0x006104E0 /* RW-4R */ +#define NV_PDISP_FE_CHNCTL_CORE_ALLOCATION 0:0 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_CORE_ALLOCATION_ALLOCATE 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_CORE_PUTPTR_WRITE 4:4 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_CORE_PUTPTR_WRITE_DISABLE 0x00000000 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_CORE_PUTPTR_WRITE_ENABLE 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_CORE_SKIP_NOTIF 9:9 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_CORE_SKIP_NOTIF_DISABLE 0x00000000 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_CORE_SKIP_NOTIF_ENABLE 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_CORE_IGNORE_INTERLOCK 11:11 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_CORE_IGNORE_INTERLOCK_DISABLE 0x00000000 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_CORE_IGNORE_INTERLOCK_ENABLE 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_CORE_INTR_DURING_SHTDWN 15:15 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_CORE_INTR_DURING_SHTDWN_DISABLE 0x00000000 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_CORE_INTR_DURING_SHTDWN_ENABLE 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_CORE_TRASH_MODE 14:13 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_CORE_TRASH_MODE_DISABLE 0x00000000 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_CORE_TRASH_MODE_TRASH_ONLY 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_CORE_TRASH_MODE_TRASH_AND_ABORT 0x00000002 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_CURS(i) (0x00610604+(i)*4) /* RW-4A */ +#define NV_PDISP_FE_CHNCTL_CURS__SIZE_1 8 /* */ +#define NV_PDISP_FE_CHNCTL_CURS_ALLOCATION 0:0 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_CURS_ALLOCATION_ALLOCATE 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_CURS_IGNORE_INTERLOCK 11:11 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_CURS_IGNORE_INTERLOCK_ENABLE 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_CURS_IGNORE_INTERLOCK_DISABLE 0x00000000 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_CURS_LOCK_PIO_FIFO 4:4 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_CURS_LOCK_PIO_FIFO_ENABLE 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_CURS_LOCK_PIO_FIFO_DISABLE 0x00000000 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_CURS_TRASH_MODE 14:13 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_CURS_TRASH_MODE_DISABLE 0x00000000 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_CURS_TRASH_MODE_TRASH_ONLY 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_CURS_TRASH_MODE_TRASH_AND_ABORT 0x00000002 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WIN(i) (0x006104E4+(i)*4) /* RW-4A */ +#define NV_PDISP_FE_CHNCTL_WIN_ALLOCATION 0:0 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_WIN_ALLOCATION_ALLOCATE 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WIN_CONNECTION 1:1 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_WIN_CONNECTION_CONNECT 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WIN_CONNECTION_DISCONNECT 0x00000000 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WIN_PUTPTR_WRITE 4:4 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_WIN_PUTPTR_WRITE_DISABLE 0x00000000 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WIN_PUTPTR_WRITE_ENABLE 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WIN_SKIP_SYNCPOINT 6:6 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_WIN_SKIP_SYNCPOINT_DISABLE 0x00000000 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WIN_SKIP_SYNCPOINT_ENABLE 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WIN_IGNORE_TIMESTAMP 7:7 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_WIN_IGNORE_TIMESTAMP_DISABLE 0x00000000 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WIN_IGNORE_TIMESTAMP_ENABLE 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WIN_IGNORE_PI 8:8 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_WIN_IGNORE_PI_DISABLE 0x00000000 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WIN_IGNORE_PI_ENABLE 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WIN_SKIP_NOTIF 9:9 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_WIN_SKIP_NOTIF_DISABLE 0x00000000 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WIN_SKIP_NOTIF_ENABLE 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WIN_SKIP_SEMA 10:10 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_WIN_SKIP_SEMA_DISABLE 0x00000000 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WIN_SKIP_SEMA_ENABLE 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WIN_IGNORE_INTERLOCK 11:11 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_WIN_IGNORE_INTERLOCK_DISABLE 0x00000000 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WIN_IGNORE_INTERLOCK_ENABLE 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WIN_TRASH_MODE 14:13 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_WIN_TRASH_MODE_DISABLE 0x00000000 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WIN_TRASH_MODE_TRASH_ONLY 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WIN_TRASH_MODE_TRASH_AND_ABORT 0x00000002 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WINIM(i) (0x00610564+(i)*4) /* RW-4A */ +#define NV_PDISP_FE_CHNCTL_WINIM_ALLOCATION 0:0 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_WINIM_ALLOCATION_ALLOCATE 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WINIM_PUTPTR_WRITE 4:4 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_WINIM_PUTPTR_WRITE_DISABLE 0x00000000 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WINIM_PUTPTR_WRITE_ENABLE 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WINIM_IGNORE_INTERLOCK 11:11 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_WINIM_IGNORE_INTERLOCK_DISABLE 0x00000000 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WINIM_IGNORE_INTERLOCK_ENABLE 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WINIM_TRASH_MODE 14:13 /* RWIVF */ +#define NV_PDISP_FE_CHNCTL_WINIM_TRASH_MODE_DISABLE 0x00000000 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WINIM_TRASH_MODE_TRASH_ONLY 0x00000001 /* RW--V */ +#define NV_PDISP_FE_CHNCTL_WINIM_TRASH_MODE_TRASH_AND_ABORT 0x00000002 /* RW--V */ +#define NV_PDISP_FE_CHNSTATUS_CORE 0x00610630 /* R--4R */ +#define NV_PDISP_FE_CHNSTATUS_CORE_STATE 20:16 /* R-IVF */ +#define NV_PDISP_FE_CHNSTATUS_CORE_STATE_DEALLOC 0x00000000 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_CORE_STATE_DEALLOC_LIMBO 0x00000001 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_CORE_STATE_VBIOS_INIT1 0x00000002 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_CORE_STATE_VBIOS_INIT2 0x00000003 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_CORE_STATE_VBIOS_OPERATION 0x00000004 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_CORE_STATE_EFI_INIT1 0x00000005 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_CORE_STATE_EFI_INIT2 0x00000006 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_CORE_STATE_EFI_OPERATION 0x00000007 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_CORE_STATE_UNCONNECTED 0x00000008 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_CORE_STATE_INIT1 0x00000009 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_CORE_STATE_INIT2 0x0000000A /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_CORE_STATE_IDLE 0x0000000B /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_CORE_STATE_BUSY 0x0000000C /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_CORE_STATE_SHUTDOWN1 0x0000000D /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_CORE_STATE_SHUTDOWN2 0x0000000E /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_CURS(i) (0x00610784+(i)*4) /* R--4A */ +#define NV_PDISP_FE_CHNSTATUS_CURS__SIZE_1 8 /* */ +#define NV_PDISP_FE_CHNSTATUS_CURS_STATE 18:16 /* R-IVF */ +#define NV_PDISP_FE_CHNSTATUS_CURS_STATE_DEALLOC 0x00000000 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_CURS_STATE_IDLE 0x00000004 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_CURS_STATE_BUSY 0x00000005 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_WIN(i) (0x00610664+(i)*4) /* R--4A */ +#define NV_PDISP_FE_CHNSTATUS_WIN__SIZE_1 32 /* */ +#define NV_PDISP_FE_CHNSTATUS_WIN_STATE 19:16 /* R-IVF */ +#define NV_PDISP_FE_CHNSTATUS_WIN_STATE_DEALLOC 0x00000000 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_WIN_STATE_INIT1 0x00000002 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_WIN_STATE_INIT2 0x00000003 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_WIN_STATE_IDLE 0x00000004 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_WIN_STATE_BUSY 0x00000005 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_WIN_STATE_SHUTDOWN1 0x00000006 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_WIN_STATE_SHUTDOWN2 0x00000007 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_WIN_STATE_UNCONNECTED 0x00000001 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_WIN_STATUS_METHOD_EXEC 31:31 /* R-IVF */ +#define NV_PDISP_FE_CHNSTATUS_WIN_STATUS_METHOD_EXEC_IDLE 0x00000000 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_WINIM(i) (0x006106E4+(i)*4) /* R--4A */ +#define NV_PDISP_FE_CHNSTATUS_WINIM__SIZE_1 32 /* */ +#define NV_PDISP_FE_CHNSTATUS_WINIM_STATE 19:16 /* R-IVF */ +#define NV_PDISP_FE_CHNSTATUS_WINIM_STATE_DEALLOC 0x00000000 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_WINIM_STATE_UNCONNECTED 0x00000001 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_WINIM_STATE_INIT1 0x00000002 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_WINIM_STATE_INIT2 0x00000003 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_WINIM_STATE_IDLE 0x00000004 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_WINIM_STATE_BUSY 0x00000005 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_WINIM_STATE_SHUTDOWN1 0x00000006 /* R---V */ +#define NV_PDISP_FE_CHNSTATUS_WINIM_STATE_SHUTDOWN2 0x00000007 /* R---V */ + +#define NV_PDISP_FE_RM_INTR_DISPATCH 0x00611EC0 /* R--4R */ + +#define NV_PDISP_FE_RM_INTR_DISPATCH_HEAD_TIMING(i) (0+(i)):(0+(i)) /* */ +#define NV_PDISP_FE_RM_INTR_DISPATCH_HEAD_TIMING__SIZE_1 8 /* */ +#define NV_PDISP_FE_RM_INTR_DISPATCH_HEAD_TIMING_NOT_PENDING 0x00000000 /* */ +#define NV_PDISP_FE_RM_INTR_DISPATCH_HEAD_TIMING_PENDING 0x00000001 /* */ + +#define NV_PDISP_FE_RM_INTR_STAT_HEAD_TIMING(i) (0x00611C00+(i)*4) /* R--4A */ +#define NV_PDISP_FE_RM_INTR_STAT_HEAD_TIMING__SIZE_1 8 /* */ + +#define NV_PDISP_FE_RM_INTR_STAT_HEAD_TIMING_LAST_DATA 1:1 /* R-IVF */ +#define NV_PDISP_FE_RM_INTR_STAT_HEAD_TIMING_LAST_DATA_NOT_PENDING 0x00000000 /* R-I-V */ +#define NV_PDISP_FE_RM_INTR_STAT_HEAD_TIMING_LAST_DATA_PENDING 0x00000001 /* R---V */ + +#define NV_PDISP_FE_RM_INTR_STAT_HEAD_TIMING_RG_LINE_A 5:5 /* R-IVF */ +#define NV_PDISP_FE_RM_INTR_STAT_HEAD_TIMING_RG_LINE_A_NOT_PENDING 0x00000000 /* R-I-V */ +#define NV_PDISP_FE_RM_INTR_STAT_HEAD_TIMING_RG_LINE_A_PENDING 0x00000001 /* R---V */ + +#define NV_PDISP_FE_RM_INTR_STAT_HEAD_TIMING_RG_LINE_B 6:6 /* R-IVF */ +#define NV_PDISP_FE_RM_INTR_STAT_HEAD_TIMING_RG_LINE_B_NOT_PENDING 0x00000000 /* R-I-V */ +#define NV_PDISP_FE_RM_INTR_STAT_HEAD_TIMING_RG_LINE_B_PENDING 0x00000001 /* R---V */ + +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING(i) (0x00611800+(i)*4) /* RW-4A */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING__SIZE_1 8 /* */ + +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_LAST_DATA 1:1 /* RWIVF */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_LAST_DATA_INIT 0x00000000 /* R-I-V */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_LAST_DATA_NOT_PENDING 0x00000000 /* R---V */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_LAST_DATA_PENDING 0x00000001 /* R---V */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_LAST_DATA_RESET 0x00000001 /* -W--V */ + +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_RG_LINE_A 5:5 /* RWIVF */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_RG_LINE_A_INIT 0x00000000 /* R-I-V */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_RG_LINE_A_NOT_PENDING 0x00000000 /* R---V */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_RG_LINE_A_PENDING 0x00000001 /* R---V */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_RG_LINE_A_RESET 0x00000001 /* -W--V */ + +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_RG_LINE_B 6:6 /* RWIVF */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_RG_LINE_B_INIT 0x00000000 /* R-I-V */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_RG_LINE_B_NOT_PENDING 0x00000000 /* R---V */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_RG_LINE_B_PENDING 0x00000001 /* R---V */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_RG_LINE_B_RESET 0x00000001 /* -W--V */ + +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_VBLANK 2:2 /* RWIVF */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_VBLANK_INIT 0x00000000 /* R-I-V */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_VBLANK_NOT_PENDING 0x00000000 /* R---V */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_VBLANK_PENDING 0x00000001 /* R---V */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_VBLANK_RESET 0x00000001 /* -W--V */ + +#define NV_PDISP_FE_RM_INTR_STAT_CTRL_DISP 0x00611C30 /* R--4R */ +#define NV_PDISP_FE_RM_INTR_STAT_CTRL_DISP_AWAKEN 8:8 /* R-IVF */ +#define NV_PDISP_FE_RM_INTR_STAT_CTRL_DISP_AWAKEN_NOT_PENDING 0x00000000 /* R-I-V */ +#define NV_PDISP_FE_RM_INTR_STAT_CTRL_DISP_AWAKEN_PENDING 0x00000001 /* R---V */ + +#define NV_PDISP_FE_EVT_STAT_AWAKEN_WIN 0x00611858 /* RW-4R */ + +#define NV_PDISP_FE_EVT_STAT_AWAKEN_WIN_CH(i) (0+(i)):(0+(i)) /* */ +#define NV_PDISP_FE_EVT_STAT_AWAKEN_WIN_CH__SIZE_1 32 /* */ +#define NV_PDISP_FE_EVT_STAT_AWAKEN_WIN_CH_INIT 0x00000000 /* */ +#define NV_PDISP_FE_EVT_STAT_AWAKEN_WIN_CH_NOT_PENDING 0x00000000 /* */ +#define NV_PDISP_FE_EVT_STAT_AWAKEN_WIN_CH_PENDING 0x00000001 /* */ +#define NV_PDISP_FE_EVT_STAT_AWAKEN_WIN_CH_RESET 0x00000001 /* */ + +#define NV_PDISP_FE_EVT_STAT_AWAKEN_OTHER 0x0061185C /* RW-4R */ + +#define NV_PDISP_FE_EVT_STAT_AWAKEN_OTHER_CORE 0:0 /* RWIVF */ +#define NV_PDISP_FE_EVT_STAT_AWAKEN_OTHER_CORE_INIT 0x00000000 /* R-I-V */ +#define NV_PDISP_FE_EVT_STAT_AWAKEN_OTHER_CORE_NOT_PENDING 0x00000000 /* R---V */ +#define NV_PDISP_FE_EVT_STAT_AWAKEN_OTHER_CORE_PENDING 0x00000001 /* R---V */ +#define NV_PDISP_FE_EVT_STAT_AWAKEN_OTHER_CORE_RESET 0x00000001 /* -W--V */ + +#define NV_PDISP_POSTCOMP_HEAD_LOADV_COUNTER(i) (0x0061611C+(i)*2048) /* RW-4A */ +#define NV_PDISP_POSTCOMP_HEAD_LOADV_COUNTER__SIZE_1 8 /* */ +#define NV_PDISP_POSTCOMP_HEAD_LOADV_COUNTER_VALUE 31:0 /* RWIUF */ +#define NV_PDISP_POSTCOMP_HEAD_LOADV_COUNTER_VALUE_INIT 0x00000000 /* RWI-V */ +#define NV_PDISP_POSTCOMP_HEAD_LOADV_COUNTER_VALUE_ZERO 0x00000000 /* RW--V */ +#define NV_PDISP_POSTCOMP_HEAD_LOADV_COUNTER_VALUE_HW 0x00000000 /* R---V */ +#define NV_PDISP_POSTCOMP_HEAD_LOADV_COUNTER_VALUE_SW 0x00000000 /* -W--V */ + +#endif // __v03_00_dev_disp_h__ diff --git a/src/common/inc/swref/published/disp/v04_01/dev_disp.h b/src/common/inc/swref/published/disp/v04_01/dev_disp.h new file mode 100644 index 0000000..3ea0961 --- /dev/null +++ b/src/common/inc/swref/published/disp/v04_01/dev_disp.h @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __v04_01_dev_disp_h__ +#define __v04_01_dev_disp_h__ +#define NV_PDISP 0x006F1FFF:0x00610000 /* RW--D */ + +#define NV_PDISP_FE_EVT_DISPATCH 0x00611A00 /* R--4R */ +#define NV_PDISP_FE_EVT_DISPATCH_SEM_WIN 26:26 /* R--VF */ +#define NV_PDISP_FE_EVT_DISPATCH_SEM_WIN_NOT_PENDING 0x00000000 /* R---V */ +#define NV_PDISP_FE_EVT_DISPATCH_SEM_WIN_PENDING 0x00000001 /* R---V */ + +#define NV_PDISP_FE_EVT_STAT_SEM_WIN 0x00611868 /* RW-4R */ +#define NV_PDISP_FE_EVT_STAT_SEM_WIN_CH(i) (0+(i)):(0+(i)) /* */ +#define NV_PDISP_FE_EVT_STAT_SEM_WIN_CH__SIZE_1 32 /* */ +#define NV_PDISP_FE_EVT_STAT_SEM_WIN_CH_INIT 0x00000000 /* */ +#define NV_PDISP_FE_EVT_STAT_SEM_WIN_CH_NOT_PENDING 0x00000000 /* */ +#define NV_PDISP_FE_EVT_STAT_SEM_WIN_CH_PENDING 0x00000001 /* */ +#define NV_PDISP_FE_EVT_STAT_SEM_WIN_CH_RESET 0x00000001 /* */ + +#define NV_PDISP_FE_RM_INTR_STAT_CTRL_DISP 0x00611C30 /* R--4R */ +#define NV_PDISP_FE_RM_INTR_STAT_CTRL_DISP_WIN_SEM 9:9 /* R-IVF */ +#define NV_PDISP_FE_RM_INTR_STAT_CTRL_DISP_WIN_SEM_NOT_PENDING 0x00000000 /* R-I-V */ +#define NV_PDISP_FE_RM_INTR_STAT_CTRL_DISP_WIN_SEM_PENDING 0x00000001 /* R---V */ + +#define NV_PDISP_FE_RM_INTR_STAT_HEAD_TIMING(i) (0x00611C00+(i)*4) /* R--4A */ +#define NV_PDISP_FE_RM_INTR_STAT_HEAD_TIMING__SIZE_1 8 /* */ + +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING(i) (0x00611800+(i)*4) /* RW-4A */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING__SIZE_1 8 /* */ + +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_RG_SEM(i) (16+(i)):(16+(i)) /* */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_RG_SEM__SIZE_1 6 /* */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_RG_SEM_INIT 0x00000000 /* */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_RG_SEM_NOT_PENDING 0x00000000 /* */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_RG_SEM_PENDING 0x00000001 /* */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_RG_SEM_RESET 0x00000001 /* */ + +#endif // __v04_01_dev_disp_h__ diff --git a/src/common/inc/swref/published/disp/v04_02/dev_disp.h b/src/common/inc/swref/published/disp/v04_02/dev_disp.h new file mode 100644 index 0000000..1651e22 --- /dev/null +++ b/src/common/inc/swref/published/disp/v04_02/dev_disp.h @@ -0,0 +1,27 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __v04_02_dev_disp_h__ +#define __v04_02_dev_disp_h__ +#define NV_PDISP 0x006F1FFF:0x00610000 /* RW--D */ +#endif // __v04_02_dev_disp_h__ diff --git a/src/common/inc/swref/published/disp/v05_01/dev_disp.h b/src/common/inc/swref/published/disp/v05_01/dev_disp.h new file mode 100644 index 0000000..66f88ab --- /dev/null +++ b/src/common/inc/swref/published/disp/v05_01/dev_disp.h @@ -0,0 +1,65 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __v05_01_dev_disp_h__ +#define __v05_01_dev_disp_h__ + +#define NV_PDISP_POSTCOMP_LOADV_COUNTER(i) (0x0061A11C+(i)*1024) /* RW-4A */ +#define NV_PDISP_POSTCOMP_LOADV_COUNTER__SIZE_1 8 /* */ +#define NV_PDISP_POSTCOMP_LOADV_COUNTER_VALUE 31:0 /* RWIUF */ +#define NV_PDISP_POSTCOMP_LOADV_COUNTER_VALUE_INIT 0x00000000 /* RWI-V */ +#define NV_PDISP_POSTCOMP_LOADV_COUNTER_VALUE_ZERO 0x00000000 /* RW--V */ +#define NV_PDISP_POSTCOMP_LOADV_COUNTER_VALUE_HW 0x00000000 /* R---V */ +#define NV_PDISP_POSTCOMP_LOADV_COUNTER_VALUE_SW 0x00000000 /* -W--V */ + +#define NV_PDISP_FE_INTR_RETRIGGER(i) (0x00611F30+(i)*4) /* RW-4A */ +#define NV_PDISP_FE_INTR_RETRIGGER_TRIGGER 0:0 /* RWIVF */ +#define NV_PDISP_FE_INTR_RETRIGGER_TRIGGER_NONE 0x00000000 /* RWI-V */ +#define NV_PDISP_FE_INTR_RETRIGGER_TRIGGER_TURE 0x00000001 /* -W--T */ + +#define NV_PDISP_FE_SW_HEAD_CLK_CAP(i) (0x006405E8+(i)*4) /* RW-4A */ +#define NV_PDISP_FE_SW_HEAD_CLK_CAP__SIZE_1 8 /* */ + +#define NV_PDISP_FE_SW_HEAD_CLK_CAP_PCLK_MAX 7:0 /* RWIUF */ +#define NV_PDISP_FE_SW_HEAD_CLK_CAP_PCLK_MAX__PRODCHK 0 /* */ +#define NV_PDISP_FE_SW_HEAD_CLK_CAP_PCLK_MAX_INIT 0x00000077 /* RWI-V */ + +#define NV_PDISP_FE_SW_HEAD_CLK_CAP_PCLK_MIN 15:8 /* RWIUF */ +#define NV_PDISP_FE_SW_HEAD_CLK_CAP_PCLK_MIN__PRODCHK 0 /* */ +#define NV_PDISP_FE_SW_HEAD_CLK_CAP_PCLK_MIN_INIT 0x00000019 /* RWI-V */ + +#define NV_PDISP_RG_IN_LOADV_COUNTER(i) (0x00616320+(i)*2048) /* RW-4A */ +#define NV_PDISP_RG_IN_LOADV_COUNTER__SIZE_1 8 /* */ +#define NV_PDISP_RG_IN_LOADV_COUNTER_VALUE 31:0 /* RWIUF */ +#define NV_PDISP_RG_IN_LOADV_COUNTER_VALUE__PRODCHK 0 /* */ +#define NV_PDISP_RG_IN_LOADV_COUNTER_VALUE_INIT 0x00000000 /* RWI-V */ +#define NV_PDISP_RG_IN_LOADV_COUNTER_VALUE_ZERO 0x00000000 /* RW--V */ + +#define NV_PDISP_RG_CRASHLOCK_COUNTER(i) (0x00616484+(i)*2048) /* RW-4A */ +#define NV_PDISP_RG_CRASHLOCK_COUNTER__SIZE_1 8 /* */ +#define NV_PDISP_RG_CRASHLOCK_COUNTER_V 31:16 /* RWIUF */ +#define NV_PDISP_RG_CRASHLOCK_COUNTER_V__PRODCHK 0 /* */ +#define NV_PDISP_RG_CRASHLOCK_COUNTER_V_INIT 0x00000000 /* RWI-V */ +#define NV_PDISP_RG_CRASHLOCK_COUNTER_V_ZERO 0x00000000 /* RW--V */ + +#endif // __v05_01_dev_disp_h__ diff --git a/src/common/inc/swref/published/disp/v05_02/dev_disp.h b/src/common/inc/swref/published/disp/v05_02/dev_disp.h new file mode 100644 index 0000000..7ec41fb --- /dev/null +++ b/src/common/inc/swref/published/disp/v05_02/dev_disp.h @@ -0,0 +1,33 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __v05_02_dev_disp_h__ +#define __v05_02_dev_disp_h__ + +#define NV_PDISP_FE_RM_INTR_EN1_HEAD_TIMING(i) (0x00611EF0+(i)*4) /* RW-4A */ +#define NV_PDISP_FE_RM_INTR_EN1_HEAD_TIMING_LAST_DATA 1:1 /* RWIVF */ +#define NV_PDISP_FE_RM_INTR_EN1_HEAD_TIMING_LAST_DATA_INIT 0x00000000 /* RWI-V */ +#define NV_PDISP_FE_RM_INTR_EN1_HEAD_TIMING_LAST_DATA_DISABLE 0x00000000 /* RW--V */ +#define NV_PDISP_FE_RM_INTR_EN1_HEAD_TIMING_LAST_DATA_ENABLE 0x00000001 /* RW--V */ + +#endif diff --git a/src/common/inc/swref/published/nv_arch.h b/src/common/inc/swref/published/nv_arch.h new file mode 100644 index 0000000..3933b91 --- /dev/null +++ b/src/common/inc/swref/published/nv_arch.h @@ -0,0 +1,126 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2025 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NV_ARCH_PUBLISHED_H +#define NV_ARCH_PUBLISHED_H + +#include "nvcfg_sdk.h" + +// high byte indicates GPU-SERIES, as defined in Gpus.pm. +#define NVGPU_ARCHITECTURE_SERIES 31:24 +#define NVGPU_ARCHITECTURE_SERIES_CLASSIC 0x00 +#define NVGPU_ARCHITECTURE_SERIES_SIMULATION 0x00 // XXX - really should be distinct from CLASSIC_GPUS +#define NVGPU_ARCHITECTURE_SERIES_TEGRA 0xE0 +#define NVGPU_ARCHITECTURE_ARCH 23:0 + +#define GPU_ARCHITECTURE(series, arch) (DRF_DEF(GPU, _ARCHITECTURE, _SERIES, series) | \ + DRF_NUM(GPU, _ARCHITECTURE, _ARCH, arch)) + +// +// Architecture constants. +// +#define GPU_ARCHITECTURE_MAXWELL GPU_ARCHITECTURE(_CLASSIC, 0x0110) +#define GPU_ARCHITECTURE_MAXWELL2 GPU_ARCHITECTURE(_CLASSIC, 0x0120) +#define GPU_ARCHITECTURE_PASCAL GPU_ARCHITECTURE(_CLASSIC, 0x0130) +#define GPU_ARCHITECTURE_VOLTA GPU_ARCHITECTURE(_CLASSIC, 0x0140) +#define GPU_ARCHITECTURE_VOLTA2 GPU_ARCHITECTURE(_CLASSIC, 0x0150) +#define GPU_ARCHITECTURE_TURING GPU_ARCHITECTURE(_CLASSIC, 0x0160) +#define GPU_ARCHITECTURE_AMPERE GPU_ARCHITECTURE(_CLASSIC, 0x0170) +#define GPU_ARCHITECTURE_HOPPER GPU_ARCHITECTURE(_CLASSIC, 0x0180) +#define GPU_ARCHITECTURE_ADA GPU_ARCHITECTURE(_CLASSIC, 0x0190) +#define GPU_ARCHITECTURE_BLACKWELL_GB1XX GPU_ARCHITECTURE(_CLASSIC, 0x01A0) + +#define GPU_ARCHITECTURE_T12X GPU_ARCHITECTURE(_TEGRA, 0x0040) +#define GPU_ARCHITECTURE_T13X GPU_ARCHITECTURE(_TEGRA, 0x0013) +#define GPU_ARCHITECTURE_T21X GPU_ARCHITECTURE(_TEGRA, 0x0021) +#define GPU_ARCHITECTURE_T18X GPU_ARCHITECTURE(_TEGRA, 0x0018) +#define GPU_ARCHITECTURE_T19X GPU_ARCHITECTURE(_TEGRA, 0x0019) +#define GPU_ARCHITECTURE_T23X GPU_ARCHITECTURE(_TEGRA, 0x0023) + +#define GPU_ARCHITECTURE_T26X GPU_ARCHITECTURE(_TEGRA, 0x0026) + +#define GPU_ARCHITECTURE_T25X GPU_ARCHITECTURE(_TEGRA, 0x0025) + +#define GPU_ARCHITECTURE_SIMS GPU_ARCHITECTURE(_SIMULATION, 0x01f0) // eg: AMODEL + +// +// Implementation constants. +// These must be unique within a single architecture. +// + +#define GPU_IMPLEMENTATION_GM108 0x08 +#define GPU_IMPLEMENTATION_GM107 0x07 +#define GPU_IMPLEMENTATION_GM200 0x00 +#define GPU_IMPLEMENTATION_GM204 0x04 +#define GPU_IMPLEMENTATION_GM206 0x06 + +#define GPU_IMPLEMENTATION_GP100 0x00 +#define GPU_IMPLEMENTATION_GP102 0x02 +#define GPU_IMPLEMENTATION_GP104 0x04 +#define GPU_IMPLEMENTATION_GP106 0x06 +#define GPU_IMPLEMENTATION_GP107 0x07 +#define GPU_IMPLEMENTATION_GP108 0x08 + +#define GPU_IMPLEMENTATION_GV100 0x00 +#define GPU_IMPLEMENTATION_GV11B 0x0B + +#define GPU_IMPLEMENTATION_TU102 0x02 +#define GPU_IMPLEMENTATION_TU104 0x04 +#define GPU_IMPLEMENTATION_TU106 0x06 +#define GPU_IMPLEMENTATION_TU116 0x08 // TU116 has implementation ID 8 in HW +#define GPU_IMPLEMENTATION_TU117 0x07 + +#define GPU_IMPLEMENTATION_GA100 0x00 +#define GPU_IMPLEMENTATION_GA102 0x02 +#define GPU_IMPLEMENTATION_GA103 0x03 +#define GPU_IMPLEMENTATION_GA104 0x04 +#define GPU_IMPLEMENTATION_GA106 0x06 +#define GPU_IMPLEMENTATION_GA107 0x07 +#define GPU_IMPLEMENTATION_GA102F 0x0F + +#define GPU_IMPLEMENTATION_GH100 0x00 +#define GPU_IMPLEMENTATION_AD102 0x02 +#define GPU_IMPLEMENTATION_AD103 0x03 +#define GPU_IMPLEMENTATION_AD104 0x04 +#define GPU_IMPLEMENTATION_AD106 0x06 +#define GPU_IMPLEMENTATION_AD107 0x07 + +#define GPU_IMPLEMENTATION_GB100 0x00 +#define GPU_IMPLEMENTATION_GB102 0x02 + +#define GPU_IMPLEMENTATION_T124 0x00 +#define GPU_IMPLEMENTATION_T132 0x00 +#define GPU_IMPLEMENTATION_T210 0x00 +#define GPU_IMPLEMENTATION_T186 0x00 +#define GPU_IMPLEMENTATION_T194 0x00 +#define GPU_IMPLEMENTATION_T234 0x04 +#define GPU_IMPLEMENTATION_T234D 0x05 + +#define GPU_IMPLEMENTATION_T264D 0x05 + +#define GPU_IMPLEMENTATION_T256D 0x07 + +/* SIMS gpus */ +#define GPU_IMPLEMENTATION_AMODEL 0x00 + +#endif // NV_ARCH_PUBLISHED_H diff --git a/src/common/inc/swref/published/nv_ref.h b/src/common/inc/swref/published/nv_ref.h new file mode 100644 index 0000000..4d12546 --- /dev/null +++ b/src/common/inc/swref/published/nv_ref.h @@ -0,0 +1,215 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +/***************************************************************************\ +* * +* Hardware Reference Manual extracted defines. * +* - Defines in this file are approved by the HW team for publishing. * +* * +\***************************************************************************/ +#ifndef NV_REF_PUBLISHED_H +#define NV_REF_PUBLISHED_H + + + +#include "nvmisc.h" + +// +// These registers can be accessed by chip-independent code as +// well as chip-dependent code. +// +// NOTE: DO NOT ADD TO THIS FILE. CREATE CHIP SPECIFIC HAL ROUTINES INSTEAD. +// + +/* + * Standard PCI config space header defines. + * The defines here cannot change across generations. + */ + +/* dev_nv_xve.ref */ +/* PBUS field defines converted to NV_CONFIG field defines */ +#define NV_CONFIG_PCI_NV_0 0x00000000 /* R--4R */ +#define NV_CONFIG_PCI_NV_0_VENDOR_ID 15:0 /* C--UF */ +#define NV_CONFIG_PCI_NV_0_VENDOR_ID_NVIDIA 0x000010DE /* C---V */ +#define NV_CONFIG_PCI_NV_0_DEVICE_ID 31:16 /* R--UF */ +#define NV_CONFIG_PCI_NV_1 0x00000004 /* RW-4R */ +#define NV_CONFIG_PCI_NV_1_IO_SPACE 0:0 /* RWIVF */ +#define NV_CONFIG_PCI_NV_1_IO_SPACE_DISABLED 0x00000000 /* RWI-V */ +#define NV_CONFIG_PCI_NV_1_IO_SPACE_ENABLED 0x00000001 /* RW--V */ +#define NV_CONFIG_PCI_NV_1_MEMORY_SPACE 1:1 /* RWIVF */ +#define NV_CONFIG_PCI_NV_1_MEMORY_SPACE_DISABLED 0x00000000 /* RWI-V */ +#define NV_CONFIG_PCI_NV_1_MEMORY_SPACE_ENABLED 0x00000001 /* RW--V */ +#define NV_CONFIG_PCI_NV_1_BUS_MASTER 2:2 /* RWIVF */ +#define NV_CONFIG_PCI_NV_1_BUS_MASTER_DISABLED 0x00000000 /* RWI-V */ +#define NV_CONFIG_PCI_NV_1_BUS_MASTER_ENABLED 0x00000001 /* RW--V */ +#define NV_CONFIG_PCI_NV_2 0x00000008 /* R--4R */ +#define NV_CONFIG_PCI_NV_2_REVISION_ID 7:0 /* C--UF */ +#define NV_CONFIG_PCI_NV_2_CLASS_CODE 31:8 /* C--VF */ +#define NV_CONFIG_PCI_NV_3 0x0000000C /* RW-4R */ +#define NV_CONFIG_PCI_NV_3_LATENCY_TIMER 15:11 /* RWIUF */ +#define NV_CONFIG_PCI_NV_3_LATENCY_TIMER_0_CLOCKS 0x00000000 /* RWI-V */ +#define NV_CONFIG_PCI_NV_3_LATENCY_TIMER_8_CLOCKS 0x00000001 /* RW--V */ +#define NV_CONFIG_PCI_NV_3_LATENCY_TIMER_240_CLOCKS 0x0000001E /* RW--V */ +#define NV_CONFIG_PCI_NV_3_LATENCY_TIMER_248_CLOCKS 0x0000001F /* RW--V */ +#define NV_CONFIG_PCI_NV_4 0x00000010 /* RW-4R */ +#define NV_CONFIG_PCI_NV_5 0x00000014 /* RW-4R */ +#define NV_CONFIG_PCI_NV_5_ADDRESS_TYPE 2:1 /* C--VF */ +#define NV_CONFIG_PCI_NV_5_ADDRESS_TYPE_64_BIT 0x00000002 /* ----V */ +#define NV_CONFIG_PCI_NV_6 0x00000018 /* RW-4R */ +#define NV_CONFIG_PCI_NV_7(i) (0x0000001C+(i)*4) /* R--4A */ +#define NV_CONFIG_PCI_NV_11 0x0000002C /* R--4R */ +#define NV_CONFIG_PCI_NV_11_SUBSYSTEM_VENDOR_ID 15:0 /* R--UF */ +#define NV_CONFIG_PCI_NV_11_SUBSYSTEM_VENDOR_ID_NONE 0x00000000 /* R---V */ +#define NV_CONFIG_PCI_NV_11_SUBSYSTEM_ID 31:16 /* R--UF */ +#define NV_CONFIG_PCI_NV_11_SUBSYSTEM_ID_NONE 0x00000000 /* R---V */ +#define NV_CONFIG_PCI_NV_11_SUBSYSTEM_ID_TNT2PRO 0x0000001f +#define NV_CONFIG_PCI_NV_12 0x00000030 /* RW-4R */ +#define NV_CONFIG_PCI_NV_13 0x00000034 /* RW-4R */ +#define NV_CONFIG_PCI_NV_13_CAP_PTR 7:0 /* C--VF */ +#define NV_CONFIG_PCI_NV_14 0x00000038 /* R--4R */ +#define NV_CONFIG_PCI_NV_15 0x0000003C /* RW-4R */ +#define NV_CONFIG_PCI_NV_15_INTR_LINE 7:0 /* RWIVF */ +/* + * These defines are the correct fields to be used to extract the + * NEXT_PTR and CAP_ID from any PCI capability structure, + * but they still have NV_24 in the name because they were from the + * first PCI capability structure in the capability list in older GPUs. + */ +#define NV_CONFIG_PCI_NV_24_NEXT_PTR 15:8 /* R--VF */ +#define NV_CONFIG_PCI_NV_24_CAP_ID 7:0 /* C--VF */ + +/* + * Standard registers present on NVIDIA chips used to ID the chip. + * Very stable across generations. + */ + +/* dev_boot */ +#define NV_PMC_BOOT_0 0x00000000 /* R--4R */ +#define NV_PMC_BOOT_0_MINOR_REVISION 3:0 /* R--VF */ +#define NV_PMC_BOOT_0_MAJOR_REVISION 7:4 /* R--VF */ +#define NV_PMC_BOOT_0_ARCHITECTURE_1 8:8 /* R--VF */ +#define NV_PMC_BOOT_0_IMPLEMENTATION 23:20 /* R--VF */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_0 0x00000000 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_1 0x00000001 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_2 0x00000002 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_3 0x00000003 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_4 0x00000004 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_5 0x00000005 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_6 0x00000006 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_7 0x00000007 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_8 0x00000008 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_9 0x00000009 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_A 0x0000000A /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_B 0x0000000B /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_C 0x0000000C /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_D 0x0000000D /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_E 0x0000000E /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_F 0x0000000F /* R---V */ +#define NV_PMC_BOOT_0_ARCHITECTURE_0 28:24 /* R--VF */ +#define NV_PMC_BOOT_0_ARCHITECTURE_TU100 0x00000016 /* R---V */ +#define NV_PMC_BOOT_0_ARCHITECTURE_TU110 0x00000016 /* R---V */ +#define NV_PMC_BOOT_0_ARCHITECTURE_GA100 0x00000017 /* R---V */ +#define NV_PMC_BOOT_0_ARCHITECTURE_GH100 0x00000018 /* R---V */ +#define NV_PMC_BOOT_0_ARCHITECTURE_AD100 0x00000019 /* R---V */ +#define NV_PMC_BOOT_0_ARCHITECTURE_GB100 0x0000001A /* R---V */ + +#define NV_PMC_BOOT_1 0x00000004 /* R--4R */ +#define NV_PMC_BOOT_1_VGPU8 8:8 /* R--VF */ +#define NV_PMC_BOOT_1_VGPU8_REAL 0x00000000 /* R-I-V */ +#define NV_PMC_BOOT_1_VGPU8_VIRTUAL 0x00000001 /* R---V */ +#define NV_PMC_BOOT_1_VGPU16 16:16 /* R--VF */ +#define NV_PMC_BOOT_1_VGPU16_REAL 0x00000000 /* R-I-V */ +#define NV_PMC_BOOT_1_VGPU16_VIRTUAL 0x00000001 /* R---V */ +#define NV_PMC_BOOT_1_VGPU 17:16 /* C--VF */ +#define NV_PMC_BOOT_1_VGPU_REAL 0x00000000 /* C---V */ +#define NV_PMC_BOOT_1_VGPU_PV 0x00000001 /* ----V */ +#define NV_PMC_BOOT_1_VGPU_VF 0x00000002 /* ----V */ +#define NV_PMC_BOOT_42 0x00000A00 /* R--4R */ +#define NV_PMC_BOOT_42_MINOR_EXTENDED_REVISION 11:8 /* R-XVF */ +#define NV_PMC_BOOT_42_MINOR_REVISION 15:12 /* R-XVF */ +#define NV_PMC_BOOT_42_MAJOR_REVISION 19:16 /* R-XVF */ +#define NV_PMC_BOOT_42_IMPLEMENTATION 23:20 /* */ +#define NV_PMC_BOOT_42_IMPLEMENTATION_0 0x00000000 /* */ +#define NV_PMC_BOOT_42_IMPLEMENTATION_1 0x00000001 /* */ +#define NV_PMC_BOOT_42_IMPLEMENTATION_2 0x00000002 /* */ +#define NV_PMC_BOOT_42_IMPLEMENTATION_3 0x00000003 /* */ +#define NV_PMC_BOOT_42_IMPLEMENTATION_4 0x00000004 /* */ +#define NV_PMC_BOOT_42_IMPLEMENTATION_5 0x00000005 /* */ +#define NV_PMC_BOOT_42_IMPLEMENTATION_6 0x00000006 /* */ +#define NV_PMC_BOOT_42_IMPLEMENTATION_7 0x00000007 /* */ +#define NV_PMC_BOOT_42_IMPLEMENTATION_8 0x00000008 /* */ +#define NV_PMC_BOOT_42_IMPLEMENTATION_9 0x00000009 /* */ +#define NV_PMC_BOOT_42_IMPLEMENTATION_A 0x0000000A /* */ +#define NV_PMC_BOOT_42_IMPLEMENTATION_B 0x0000000B /* */ +#define NV_PMC_BOOT_42_IMPLEMENTATION_C 0x0000000C /* */ +#define NV_PMC_BOOT_42_IMPLEMENTATION_D 0x0000000D /* */ +#define NV_PMC_BOOT_42_IMPLEMENTATION_E 0x0000000E /* */ +#define NV_PMC_BOOT_42_IMPLEMENTATION_F 0x0000000F /* */ +#define NV_PMC_BOOT_42_ARCHITECTURE 29:24 /* */ +#define NV_PMC_BOOT_42_CHIP_ID 29:20 /* R-XVF */ + +#define NV_PMC_BOOT_42_ARCHITECTURE_GM100 0x00000011 /* */ +#define NV_PMC_BOOT_42_ARCHITECTURE_GM200 0x00000012 /* */ +#define NV_PMC_BOOT_42_ARCHITECTURE_GP100 0x00000013 /* */ +#define NV_PMC_BOOT_42_ARCHITECTURE_GV100 0x00000014 /* */ +#define NV_PMC_BOOT_42_ARCHITECTURE_GV110 0x00000015 /* */ +#define NV_PMC_BOOT_42_ARCHITECTURE_TU100 0x00000016 /* */ +#define NV_PMC_BOOT_42_ARCHITECTURE_GA100 0x00000017 /* */ +#define NV_PMC_BOOT_42_ARCHITECTURE_GH100 0x00000018 /* */ +#define NV_PMC_BOOT_42_ARCHITECTURE_AD100 0x00000019 /* */ +#define NV_PMC_BOOT_42_ARCHITECTURE_GB100 0x0000001A /* */ +#define NV_PMC_BOOT_42_ARCHITECTURE_AMODEL 0x0000001F /* */ + +#define NV_PMC_BOOT_42_CHIP_ID_GA100 0x00000170 /* */ + +/* dev_arapb_misc.h */ +#define NV_PAPB_MISC_GP_HIDREV_CHIPID 15:8 /* ----F */ +#define NV_PAPB_MISC_GP_HIDREV_MAJORREV 7:4 /* ----F */ + + +// +// Helper to return NV_PMC_BOOT_0 architecture, which is split across fields: +// ARCHITECTURE_1 (msb) and ARCHITECTURE_0 (lsb) +// +static inline NvU32 +decodePmcBoot0Architecture(NvU32 pmcBoot0RegVal) +{ + return (DRF_VAL(_PMC, _BOOT_0, _ARCHITECTURE_1, pmcBoot0RegVal) << DRF_SIZE(NV_PMC_BOOT_0_ARCHITECTURE_0)) | + DRF_VAL(_PMC, _BOOT_0, _ARCHITECTURE_0, pmcBoot0RegVal); +} + +// Helpers to return NV_PMC_BOOT_42 architecture and chip ID +static inline NvU32 +decodePmcBoot42Architecture(NvU32 pmcBoot42RegVal) +{ + return DRF_VAL(_PMC, _BOOT_42, _ARCHITECTURE, pmcBoot42RegVal); +} + +static inline NvU32 +decodePmcBoot42ChipId(NvU32 pmcBoot42RegVal) +{ + return DRF_VAL(_PMC, _BOOT_42, _CHIP_ID, pmcBoot42RegVal); +} + +#endif // NV_REF_PUBLISHED_H diff --git a/src/common/inc/swref/published/t23x/t234/dev_fuse.h b/src/common/inc/swref/published/t23x/t234/dev_fuse.h new file mode 100644 index 0000000..4f54831 --- /dev/null +++ b/src/common/inc/swref/published/t23x/t234/dev_fuse.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __t234_dev_fuse_h__ +#define __t234_dev_fuse_h__ + +#define NV_FUSE_STATUS_OPT_DISPLAY 0x00820C04 /* R-I4R */ +#define NV_FUSE_STATUS_OPT_DISPLAY_DATA 0:0 /* R-IVF */ +#define NV_FUSE_STATUS_OPT_DISPLAY_DATA_ENABLE 0x00000000 /* R---V */ + +#endif // __t234_dev_fuse_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_mmu.h b/src/common/inc/swref/published/turing/tu102/dev_mmu.h new file mode 100644 index 0000000..0134b3d --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_mmu.h @@ -0,0 +1,119 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __tu102_dev_mmu_h__ +#define __tu102_dev_mmu_h__ +#define NV_MMU_PDE_APERTURE_BIG (0*32+1):(0*32+0) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PDE_SIZE (0*32+3):(0*32+2) /* RWXVF */ +#define NV_MMU_PDE_SIZE_FULL 0x00000000 /* RW--V */ +#define NV_MMU_PDE_SIZE_HALF 0x00000001 /* RW--V */ +#define NV_MMU_PDE_SIZE_QUARTER 0x00000002 /* RW--V */ +#define NV_MMU_PDE_SIZE_EIGHTH 0x00000003 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_BIG_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_BIG_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_SMALL (1*32+1):(1*32+0) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PDE_VOL_SMALL (1*32+2):(1*32+2) /* RWXVF */ +#define NV_MMU_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PDE_VOL_BIG (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_SMALL_SYS (1*32+31):(1*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SMALL_VID (1*32+31-3):(1*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_PDE__SIZE 8 +#define NV_MMU_PTE_VALID (0*32+0):(0*32+0) /* RWXVF */ +#define NV_MMU_PTE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_PRIVILEGE (0*32+1):(0*32+1) /* RWXVF */ +#define NV_MMU_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_READ_ONLY (0*32+2):(0*32+2) /* RWXVF */ +#define NV_MMU_PTE_READ_ONLY_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_READ_ONLY_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ENCRYPTED (0*32+3):(0*32+3) /* RWXVF */ +#define NV_MMU_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */ +#define NV_MMU_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */ +#define NV_MMU_PTE_ADDRESS_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */ +#define NV_MMU_PTE_VOL (1*32+0):(1*32+0) /* RWXVF */ +#define NV_MMU_PTE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PTE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PTE_APERTURE (1*32+2):(1*32+1) /* RWXVF */ +#define NV_MMU_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */ +#define NV_MMU_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PTE_LOCK (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PTE_LOCK_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_LOCK_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_COMPTAGLINE (1*32+20+11):(1*32+12) /* RWXVF */ +#define NV_MMU_PTE_READ_DISABLE (1*32+30):(1*32+30) /* RWXVF */ +#define NV_MMU_PTE_READ_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_READ_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_WRITE_DISABLE (1*32+31):(1*32+31) /* RWXVF */ +#define NV_MMU_PTE_WRITE_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_WRITE_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_PTE__SIZE 8 +#define NV_MMU_PTE_KIND (1*32+11):(1*32+4) /* RWXVF */ +#define NV_MMU_PTE_KIND_INVALID 0x07 /* R---V */ +#define NV_MMU_PTE_KIND_PITCH 0x00 /* R---V */ +#define NV_MMU_PTE_KIND_GENERIC_MEMORY 0x06 /* R---V */ +#define NV_MMU_PTE_KIND_Z16 0x01 /* R---V */ +#define NV_MMU_PTE_KIND_S8 0x02 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24 0x03 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8 0x04 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8 0x05 /* R---V */ +#define NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE 0x08 /* R---V */ +#define NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE_DISABLE_PLC 0x09 /* R---V */ +#define NV_MMU_PTE_KIND_S8_COMPRESSIBLE_DISABLE_PLC 0x0A /* R---V */ +#define NV_MMU_PTE_KIND_Z16_COMPRESSIBLE_DISABLE_PLC 0x0B /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_COMPRESSIBLE_DISABLE_PLC 0x0C /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_COMPRESSIBLE_DISABLE_PLC 0x0D /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_COMPRESSIBLE_DISABLE_PLC 0x0E /* R---V */ +#define NV_MMU_PTE_KIND_SMSKED_MESSAGE 0x0F /* R---V */ +#define NV_MMU_CLIENT_KIND_Z16 0x1 /* R---V */ +#define NV_MMU_CLIENT_KIND_Z24S8 0x5 /* R---V */ +#define NV_MMU_CLIENT_KIND_INVALID 0x7 /* R---V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID (35-3):8 /* RWXVF */ +#define NV_MMU_VER2_PTE_COMPTAGLINE (20+35):36 /* RWXVF */ +#define NV_MMU_VER2_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#endif // __tu102_dev_mmu_h__ diff --git a/src/common/inc/swref/published/turing/tu102/kind_macros.h b/src/common/inc/swref/published/turing/tu102/kind_macros.h new file mode 100644 index 0000000..1544d88 --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/kind_macros.h @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef TU102_KIND_MACROS_H_INCLUDED +#define TU102_KIND_MACROS_H_INCLUDED + +#define KIND_INVALID(k) ( ((k) ==NV_MMU_CLIENT_KIND_INVALID)) +#define PTEKIND_PITCH(k) ( ((k) ==NV_MMU_PTE_KIND_PITCH)|| ((k) ==NV_MMU_PTE_KIND_SMSKED_MESSAGE)) +#define PTEKIND_COMPRESSIBLE(k) ( ((k) >=NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE && (k) <= NV_MMU_PTE_KIND_Z24S8_COMPRESSIBLE_DISABLE_PLC)) +#define PTEKIND_DISALLOWS_PLC(k) ( !((k) ==NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE)) +#define PTEKIND_SUPPORTED(k) ( ((k) ==NV_MMU_PTE_KIND_INVALID)|| ((k) ==NV_MMU_PTE_KIND_PITCH)|| ((k) ==NV_MMU_PTE_KIND_GENERIC_MEMORY)|| ((k) >=NV_MMU_PTE_KIND_Z16 && (k) <= NV_MMU_PTE_KIND_Z24S8)|| ((k) >=NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE && (k) <= NV_MMU_PTE_KIND_SMSKED_MESSAGE)) +#define KIND_Z(k) ( ((k) >=NV_MMU_CLIENT_KIND_Z16 && (k) <= NV_MMU_CLIENT_KIND_Z24S8)) +#define PTEKIND_Z(k) ( ((k) >=NV_MMU_PTE_KIND_Z16 && (k) <= NV_MMU_PTE_KIND_Z24S8)|| ((k) >=NV_MMU_PTE_KIND_S8_COMPRESSIBLE_DISABLE_PLC && (k) <= NV_MMU_PTE_KIND_Z24S8_COMPRESSIBLE_DISABLE_PLC)) +#define PTEKIND_GENERIC_MEMORY(k) ( ((k) ==NV_MMU_PTE_KIND_GENERIC_MEMORY)|| ((k) >=NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE && (k) <= NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE_DISABLE_PLC)) + +#endif // TU102_KIND_MACROS_H_INCLUDED diff --git a/src/common/modeset/hdmipacket/nvhdmi_frlInterface.h b/src/common/modeset/hdmipacket/nvhdmi_frlInterface.h new file mode 100644 index 0000000..ed6bba9 --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmi_frlInterface.h @@ -0,0 +1,268 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** @file nvhdmi_frlInterface.h + * @brief This file provides FRL related interfaces between client and HDMI lib + */ + + +#ifndef _NVHDMI_FRLINTERFACE_H_ +#define _NVHDMI_FRLINTERFACE_H_ + +#include "nvhdmipkt.h" +#include "nvHdmiFrlCommon.h" + +#include "../timing/nvtiming.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +// DSC encoder color format bitmasks (these match DSC lib & RM ctrl 0073 fields) +typedef enum tagHDMI_DSC_ENCODER_COLOR_FORMAT +{ + HDMI_DSC_ENCODER_COLOR_FORMAT_RGB = 1, + HDMI_DSC_ENCODER_COLOR_FORMAT_YCBCR444 = 2, + HDMI_DSC_ENCODER_COLOR_FORMAT_YCBCRNATIVE422 = 4, + HDMI_DSC_ENCODER_COLOR_FORMAT_YCBCRNATIVE420 = 8 +} HDMI_DSC_ENCODER_COLOR_FORMAT; + +// Options for QueryFRLConfig interface +typedef enum tagHDMI_QUERY_FRL_OPTION +{ + HDMI_QUERY_FRL_ANY_CONFIG = 0, // any FRL config that supports mode + HDMI_QUERY_FRL_OPTIMUM_CONFIG, // find best fit config for this mode + HDMI_QUERY_FRL_LOWEST_BANDWIDTH, // min bw + HDMI_QUERY_FRL_HIGHEST_PIXEL_QUALITY, // trade off bandwidth for pixel quality + HDMI_QUERY_FRL_HIGHEST_BANDWIDTH +} HDMI_QUERY_FRL_OPTION; + +/************************************************************************************************* +* HDMI_VIDEO_TRANSPORT_INFO: * +* Video transport format - a combination of timing, bpc, packing represents what goes on the link* +* client passes this in, lib uses this for bandwidth calculations to decide required FRL rate * +**************************************************************************************************/ +typedef struct tagHDMI_VIDEO_TRANSPORT_INFO +{ + const NVT_TIMING *pTiming; // backend timing + HDMI_BPC bpc; + HDMI_PIXEL_PACKING packing; + NvBool bDualHeadMode; // 2H1OR +} HDMI_VIDEO_TRANSPORT_INFO; + +/************************************************************************************************ +* HDMI_QUERY_FRL_CLIENT_CONTROL: * +* Allow client to force request DSC/FRL configurations. For testing purpose or otherwise * +* eg, client could query for any fitting FRL config instead of most optimum. It could trade off * +* bandwidth for pixel quality. * +*************************************************************************************************/ +typedef struct tagHDMI_QUERY_FRL_CLIENT_CONTROL +{ + HDMI_QUERY_FRL_OPTION option; + + NvU32 forceFRLRate : 1; + NvU32 forceAudio2Ch48KHz : 1; + NvU32 enableDSC : 1; + NvU32 forceSliceCount : 1; + NvU32 forceSliceWidth : 1; + NvU32 forceBppx16 : 1; + NvU32 skipGeneratePPS : 1; + NvU32 reserved : 25; + + // client can set below params if respective force flag is set + NvU32 sliceCount; + NvU32 sliceWidth; + NvU32 bitsPerPixelX16; + HDMI_FRL_DATA_RATE frlRate; + +} HDMI_QUERY_FRL_CLIENT_CONTROL; + +/************************************************************************************************ +* HDMI_SRC_CAPS: * +* Input to HDMI lib. * +* * +* Client gives info about GPU capabilities - DSC related caps * +*************************************************************************************************/ +typedef struct tagHDMI_SRC_CAPS +{ + struct + { + NvU32 dscCapable : 1; + NvU32 bppPrecision : 8; + NvU32 encoderColorFormatMask : 8; + NvU32 lineBufferSizeKB : 8; + NvU32 rateBufferSizeKB : 8; + NvU32 maxNumHztSlices : 8; + NvU32 lineBufferBitDepth : 8; + NvU32 dualHeadBppTargetMaxX16 : 16; + NvU32 maxWidthPerSlice; + } dscCaps; + + HDMI_FRL_DATA_RATE linkMaxFRLRate; +} HDMI_SRC_CAPS; + +/************************************************************************************************ +* HDMI_SINK_CAPS: * +* Input to HDMI lib. * +* * +* Client gives info from EDID, HDMI lib uses DSC related info to call DSC lib to generate PPS * +* Audio information from CEA861 block is used for bandwidth calculations * +* linkMaxFRLRate and linkMaxFRLRateDSC are max link rates determined from physical link * +* training. * +*************************************************************************************************/ +typedef struct tagHDMI_SINK_CAPS +{ + const NVT_HDMI_FORUM_INFO *pHdmiForumInfo; + NvU32 audioType; + NvU32 maxAudioChannels; + NvU32 maxAudioFreqKHz; + NvBool bHBRAudio; + HDMI_FRL_DATA_RATE linkMaxFRLRate; + HDMI_FRL_DATA_RATE linkMaxFRLRateDSC; +} HDMI_SINK_CAPS; + +/************************************************************************************************ +* HDMI_FRL_CONFIG: * +* Output from HDMI lib. Client uses this info for modeset * +* * +* maxSupportedAudioCh, maxSupportedAudioFreqKHz - max possible audio settings at the chosen * +* FRL rate, though the sink caps may have reported higher caps * +* * +* dscInfo - if current timing requires DSC, lib returns PPS information here * +* * +* bitsPerPixelx16 - optimum bpp value calculated per spec * +* dscHActiveBytes - in compressed video transport mode, number of bytes in 1 line * +* dscHActiveTriBytes - in compressed video transport mode, number of tri-bytes in 1 line * +* dscHBlankTriBytes - in compressed video transport mode, number of tri-bytes to be sent * +* to represent horizontal blanking * +* * +* pps[32] - PPS data. HDMI lib calls DSC lib to fill it in * +*************************************************************************************************/ +#define HDMI_DSC_MAX_PPS_SIZE_DWORD 32 +typedef struct tagHDMI_FRL_CONFIG +{ + HDMI_FRL_DATA_RATE frlRate; + NvU32 maxSupportedAudioCh; + NvU32 maxSupportedAudioFreqKHz; + + // DSC info client will use for core channel modeset + struct + { + NvU32 bEnableDSC : 1; + NvU32 reserved : 31; + + NvU32 bitsPerPixelX16; + NvU32 sliceCount; + NvU32 sliceWidth; + NvU32 pps[HDMI_DSC_MAX_PPS_SIZE_DWORD]; + NvU32 dscHActiveBytes; + NvU32 dscHActiveTriBytes; + NvU32 dscHBlankTriBytes; + NvU32 dscTBlankToTTotalRatioX1k; + } dscInfo; + +} HDMI_FRL_CONFIG; + +/************************************************************************************************ +* NvHdmi_AssessLinkCapabilities: * +* * +* Input parameters: * +* subDevice - Sub Device ID. * +* displayId - Display ID. * +* pSinkEdid - EDID of sink * +* * +* Output parameters: * +* pSrcCaps - src capabilities - DSC caps * +* pSinkCaps - sink capabilities - actual caps calculated from link training * +* * +* Calls RM to get DSC related src side caps. Performs physical link training to determine if * +* sink reported max FRL rate can actually be supported on the physical link * +*************************************************************************************************/ +NVHDMIPKT_RESULT +NvHdmi_AssessLinkCapabilities(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NVT_EDID_INFO const * const pSinkEdid, + HDMI_SRC_CAPS *pSrcCaps, + HDMI_SINK_CAPS *pSinkCaps); + +/************************************************************************************************ +* NvHdmi_QueryFRLConfig: * +* * +* Input parameters: * +* libHandle - Hdmi library handle, provided on initializing the library. * +* pVidTransInfo - information about timing, bpc and packing * +* pClientCtrl - settings client wants to see set. HDMI lib tries to honor these * +* pSinkCaps - sink capabilities * +* * +* Output parameters: * +* pFRLConfig - chosen FRL rate and DSC configuration * +* * +*************************************************************************************************/ +NVHDMIPKT_RESULT +NvHdmi_QueryFRLConfig(NvHdmiPkt_Handle libHandle, + HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + HDMI_QUERY_FRL_CLIENT_CONTROL const * const pClientCtrl, + HDMI_SRC_CAPS const * const pSrcCaps, + HDMI_SINK_CAPS const * const pSinkCaps, + HDMI_FRL_CONFIG *pFRLConfig); + +/************************************************************************************************ +* NvHdmi_SetFRLConfig: * +* * +* Input parameters: * +* libHandle - Hdmi library handle, provided on initializing the library. * +* subDevice - Sub Device ID. * +* displayId - Display ID. * +* bFakeLt - Indicates that the GPU's link configuration should be forced and that * +* configuration of the sink device should be skipped. * +* pFRLConfig - Link configuration to set. * +* * +************************************************************************************************/ +NVHDMIPKT_RESULT +NvHdmi_SetFRLConfig(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NvBool bFakeLt, + HDMI_FRL_CONFIG *pFRLConfig); + +/************************************************************************************************ +* NvHdmi_ClearFRLConfig: * +* * +* Input parameters: * +* libHandle - Hdmi library handle, provided on initializing the library. * +* subDevice - Sub Device ID. * +* displayId - Display ID to change the settings on. * +* * +************************************************************************************************/ +NVHDMIPKT_RESULT +NvHdmi_ClearFRLConfig(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId); + +#ifdef __cplusplus +} +#endif + +#endif // _NVHDMI_FRLINTERFACE_H_ diff --git a/src/common/modeset/hdmipacket/nvhdmipkt.c b/src/common/modeset/hdmipacket/nvhdmipkt.c new file mode 100644 index 0000000..c6b4044 --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt.c @@ -0,0 +1,706 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt.c + * + * Purpose: Provide initialization functions for HDMI library + */ + +#include +#include "nvlimits.h" +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" +#include "nvhdmipkt_internal.h" + +#include "../timing/nvt_dsc_pps.h" + +#include "class/cl9170.h" +#include "class/cl917d.h" +#include "class/cl9270.h" +#include "class/cl927d.h" +#include "class/cl9470.h" +#include "class/cl947d.h" +#include "class/cl9570.h" +#include "class/cl957d.h" +#include "class/clc370.h" +#include "class/clc37d.h" +#include "class/clc570.h" +#include "class/clc57d.h" +#include "class/clc670.h" +#include "class/clc67d.h" +#include "class/clc870.h" +#include "class/clc87d.h" +#include "class/clc970.h" +#include "class/clc97d.h" +#include "class/clcc70.h" +#include "class/clcc7d.h" + +#include "hdmi_spec.h" + +// Class hierarchy structure +typedef struct tagNVHDMIPKT_CLASS_HIERARCHY +{ + NVHDMIPKT_CLASS_ID classId; + NVHDMIPKT_CLASS_ID parentClassId; + NvBool isRootClass; + void (*initInterface)(NVHDMIPKT_CLASS*); + NvBool (*constructor) (NVHDMIPKT_CLASS*); + void (*destructor) (NVHDMIPKT_CLASS*); + NvU32 displayClass; + NvU32 coreDmaClass; +} NVHDMIPKT_CLASS_HIERARCHY; + +/************************************************************************************************* + * hierarchy structure establishes the relationship between classes. * + * If isRootClass=NV_TRUE, it is a root class, else it is a child of a class. classId * + * also acts as an index, and hence the order of the structure below should be maintanied. * + * * + * ASSUMPTION: There are two huge assumptions while creating the class relationship and * + * while traversing it. 1. That of the Class ID definitaion (NVHDMIPKT_CLASS_ID), which has * + * to be strictly indexed, that is 0, 1, 2... and so on. And 2. that the structure * + * CLASS_HIERARCHY (above) follow that indexing. That is NVHDMIPKT_0073_CLASS is value 0 and * + * the first entry in CLASS_HIERARCHY, NVHDMIPKT_9171_CLASS is value 1 and hence the second * + * entry in CLASS_HIERARCHY, so on and so forth. * + * * + * HOW TO ADD A NEW CLASS? * + * 1. Add an ID in NVHDMIPKT_CLASS_ID. * + * 2. Add a source file nvhdmipkt_XXXX.c, and include it into makefiles. Makefiles of * + * Mods, Windows, and Linux. * + * 3. Provide initializeHdmiPktInterfaceXXXX, hdmiConstructorXXXX, and, hdmiDestructorXXXX. * + * 4. Add functions that needs to be overridden in NVHDMIPKT_CLASS. * + * 5. Add a relationship in hierarchy[] array. The new class can be a subclass or a root. In * + * case of a root all the interfaces needs to be overridden in NVHDMIPKT_CLASS. * + ************************************************************************************************/ +static const NVHDMIPKT_CLASS_HIERARCHY hierarchy[] = +{ + [NVHDMIPKT_0073_CLASS] = {// Index 0==NVHDMIPKT_0073_CLASS + NVHDMIPKT_0073_CLASS, // classId + NVHDMIPKT_0073_CLASS, // parentClassId + NV_TRUE, // isRootClass + initializeHdmiPktInterface0073, // initInterface + hdmiConstructor0073, // constructor + hdmiDestructor0073, // destructor + 0, // displayClass + 0 // coreDmaClass + }, + [NVHDMIPKT_9171_CLASS] = {// Index 1==NVHDMIPKT_9171_CLASS + NVHDMIPKT_9171_CLASS, // classId + NVHDMIPKT_9171_CLASS, // parentClassId + NV_TRUE, // isRootClass + initializeHdmiPktInterface9171, // initInterface + hdmiConstructor9171, // constructor + hdmiDestructor9171, // destructor + NV9170_DISPLAY, // displayClass + NV917D_CORE_CHANNEL_DMA // coreDmaClass + }, + [NVHDMIPKT_9271_CLASS] = {// Index 2==NVHDMIPKT_9271_CLASS + NVHDMIPKT_9271_CLASS, // classId + NVHDMIPKT_9171_CLASS, // parentClassId + NV_FALSE, // isRootClass + initializeHdmiPktInterface9271, // initInterface + hdmiConstructor9271, // constructor + hdmiDestructor9271, // destructor + NV9270_DISPLAY, // displayClass + NV927D_CORE_CHANNEL_DMA // coreDmaClass + }, + [NVHDMIPKT_9471_CLASS] = {// Index 3==NVHDMIPKT_9471_CLASS + NVHDMIPKT_9471_CLASS, // classId + NVHDMIPKT_9171_CLASS, // parentClassId + NV_FALSE, // isRootClass + initializeHdmiPktInterface9471, // initInterface + hdmiConstructor9471, // constructor + hdmiDestructor9471, // destructor + NV9470_DISPLAY, // displayClass + NV947D_CORE_CHANNEL_DMA // coreDmaClass + }, + [NVHDMIPKT_9571_CLASS] = {// Index 4==NVHDMIPKT_9571_CLASS + NVHDMIPKT_9571_CLASS, // classId + NVHDMIPKT_9171_CLASS, // parentClassId + NV_FALSE, // isRootClass + initializeHdmiPktInterface9571, // initInterface + hdmiConstructor9571, // constructor + hdmiDestructor9571, // destructor + NV9570_DISPLAY, // displayClass + NV957D_CORE_CHANNEL_DMA // coreDmaClass + }, + [NVHDMIPKT_C371_CLASS] = {// Index 5==NVHDMIPKT_C371_CLASS + NVHDMIPKT_C371_CLASS, // classId + NVHDMIPKT_9171_CLASS, // parentClassId + NV_FALSE, // isRootClass + initializeHdmiPktInterfaceC371, // initInterface + hdmiConstructorC371, // constructor + hdmiDestructorC371, // destructor + NVC370_DISPLAY, // displayClass + NVC37D_CORE_CHANNEL_DMA // coreDmaClass + }, + [NVHDMIPKT_C571_CLASS] = {// Index 6==NVHDMIPKT_C571_CLASS + // Note that Turing (C57x) has a distinct displayClass and coreDmaClass, + // but it inherits the _DISP_SF_USER class from Volta (C37x). We call this + // NVHDMIPKT_C571_CLASS, but reuse initInterface()/constructor()/destructor() + // from C371. + NVHDMIPKT_C571_CLASS, + NVHDMIPKT_9171_CLASS, // parentClassId + NV_FALSE, // isRootClass + initializeHdmiPktInterfaceC371, // initInterface + hdmiConstructorC371, // constructor + hdmiDestructorC371, // destructor + NVC570_DISPLAY, // displayClass + NVC57D_CORE_CHANNEL_DMA // coreDmaClass + }, + [NVHDMIPKT_C671_CLASS] = {// Index 7==NVHDMIPKT_C671_CLASS + NVHDMIPKT_C671_CLASS, // classId + NVHDMIPKT_9171_CLASS, // parentClassId + NV_FALSE, // isRootClass + initializeHdmiPktInterfaceC671, // initInterface + hdmiConstructorC671, // constructor + hdmiDestructorC671, // destructor + NVC670_DISPLAY, // displayClass + NVC67D_CORE_CHANNEL_DMA // coreDmaClass + }, + [NVHDMIPKT_C871_CLASS] = {// Index 9==NVHDMIPKT_C871_CLASS + NVHDMIPKT_C871_CLASS, // classId + NVHDMIPKT_C671_CLASS, // parentClassId + NV_FALSE, // isRootClass + initializeHdmiPktInterfaceC871, // initInterface + hdmiConstructorC871, // constructor + hdmiDestructorC871, // destructor + NVC870_DISPLAY, // displayClass + NVC87D_CORE_CHANNEL_DMA // coreDmaClass + }, + [NVHDMIPKT_C971_CLASS] = {// Index 10==NVHDMIPKT_C971_CLASS + NVHDMIPKT_C971_CLASS, // classId + NVHDMIPKT_C871_CLASS, // parentClassId + NV_FALSE, // isRootClass + initializeHdmiPktInterfaceC971, // initInterface + hdmiConstructorC971, // constructor + hdmiDestructorC971, // destructor + NVC970_DISPLAY, // displayClass + NVC97D_CORE_CHANNEL_DMA // coreDmaClass + }, + [NVHDMIPKT_CC71_CLASS] = {// Index 13==NVHDMIPKT_CC71_CLASS + NVHDMIPKT_CC71_CLASS, // classId + NVHDMIPKT_C971_CLASS, // parentClassId + NV_FALSE, // isRootClass + initializeHdmiPktInterfaceCC71, // initInterface + hdmiConstructorC971, // constructor + hdmiDestructorC971, // destructor + NVCC70_DISPLAY, // displayClass + NVCC7D_CORE_CHANNEL_DMA // coreDmaClass + }, +}; + +/********************************** HDMI Library interfaces *************************************/ +/* + * NvHdmiPkt_PacketCtrl + */ +NVHDMIPKT_RESULT +NvHdmiPkt_PacketCtrl(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl) +{ + NVHDMIPKT_CLASS* pClass = fromHdmiPktHandle(libHandle); + + if (libHandle == NVHDMIPKT_INVALID_HANDLE) + { + return NVHDMIPKT_LIBRARY_INIT_FAIL; + } + + return pClass->hdmiPacketCtrl(pClass, + subDevice, + displayId, + head, + packetType, + transmitControl); +} + +/* + * NvHdmiPkt_PacketWrite + */ +NVHDMIPKT_RESULT +NvHdmiPkt_PacketWrite(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NVHDMIPKT_CLASS* pClass = fromHdmiPktHandle(libHandle); + + if (libHandle == NVHDMIPKT_INVALID_HANDLE) + { + return NVHDMIPKT_LIBRARY_INIT_FAIL; + } + + if ((pPacket == NULL) || (packetLen == 0)) + { + return NVHDMIPKT_INVALID_ARG; + } + + HDMI_PACKET_TYPE infoframeType = pPacket[0]; // header byte 0 is packet type + // Lower bound check. Since actual infoframe size varies depending on the infoframe packet being sent, + // check all supported infoframe types and their expected sizes. This is not a strict == check becuase they may/may not need + // additional checksum byte (library clients take care of adding checksum byte if needed) + if (((infoframeType == hdmi_pktType_GeneralControl) && (packetLen < 6)) || + ((infoframeType == hdmi_pktType_GamutMetadata) && (packetLen < sizeof(NVT_GAMUT_METADATA))) || + ((infoframeType == hdmi_pktType_ExtendedMetadata) && (packetLen < sizeof(NVT_EXTENDED_METADATA_PACKET_INFOFRAME)))|| + ((infoframeType == hdmi_pktType_VendorSpecInfoFrame) && (packetLen < 8)) || + ((infoframeType == hdmi_pktType_AviInfoFrame) && (packetLen < 13)) || + ((infoframeType == hdmi_pktType_SrcProdDescInfoFrame) && (packetLen < sizeof(NVT_SPD_INFOFRAME))) || + ((infoframeType == hdmi_pktType_DynamicRangeMasteringInfoFrame) && (packetLen < sizeof(NVT_HDR_INFOFRAME)))) + // Unused: hdmi_pktType_AudioClkRegeneration + // Unused: hdmi_pktType_MpegSrcInfoFrame + { + NvHdmiPkt_Print(pClass, "WARNING - packet length too small for infoframe type %d check payload ", infoframeType); + } + + return pClass->hdmiPacketWrite(pClass, + subDevice, + displayId, + head, + packetType, + transmitControl, + packetLen, + pPacket); +} + +/* + * NvHdmiPkt_PacketRead + */ +NVHDMIPKT_RESULT +NvHdmiPkt_PacketRead(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 head, + NVHDMIPKT_TYPE packetReg, + NvU32 bufferLen, + NvU8 *const pOutPktBuffer) +{ + if (libHandle == NVHDMIPKT_INVALID_HANDLE) + { + return NVHDMIPKT_LIBRARY_INIT_FAIL; + } + + NVHDMIPKT_CLASS* pClass = fromHdmiPktHandle(libHandle); + + if ((pOutPktBuffer == NULL) || (bufferLen == 0)) + { + return NVHDMIPKT_INVALID_ARG; + } + + return pClass->hdmiPacketRead(pClass, + subDevice, + head, + packetReg, + bufferLen, + pOutPktBuffer); +} + +/* + * NvHdmiPkt_SetupAdvancedInfoframe + */ +NVHDMIPKT_RESULT +NvHdmiPkt_SetupAdvancedInfoframe(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 head, + NVHDMIPKT_TYPE packetReg, + ADVANCED_INFOFRAME const *pInfoframe) +{ + if (libHandle == NVHDMIPKT_INVALID_HANDLE) + { + return NVHDMIPKT_LIBRARY_INIT_FAIL; + } + + NVHDMIPKT_CLASS* pClass = fromHdmiPktHandle(libHandle); + + return pClass->programAdvancedInfoframe(pClass, + subDevice, + head, + packetReg, + pInfoframe); +} + +NVHDMIPKT_RESULT +NvHdmi_AssessLinkCapabilities(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NVT_EDID_INFO const * const pSinkEdid, + HDMI_SRC_CAPS *pSrcCaps, + HDMI_SINK_CAPS *pSinkCaps) +{ + if (libHandle == NVHDMIPKT_INVALID_HANDLE) + { + return NVHDMIPKT_LIBRARY_INIT_FAIL; + } + + if (!pSinkEdid || + !pSrcCaps || + !pSinkCaps) + { + return NVHDMIPKT_INVALID_ARG; + } + + NVHDMIPKT_CLASS* pClass = fromHdmiPktHandle(libHandle); + return pClass->hdmiAssessLinkCapabilities(pClass, + subDevice, + displayId, + pSinkEdid, + pSrcCaps, + pSinkCaps); +} +/* + * NvHdmi_QueryFRLConfig + */ +NVHDMIPKT_RESULT +NvHdmi_QueryFRLConfig(NvHdmiPkt_Handle libHandle, + HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + HDMI_QUERY_FRL_CLIENT_CONTROL const * const pClientCtrl, + HDMI_SRC_CAPS const * const pSrcCaps, + HDMI_SINK_CAPS const * const pSinkCaps, + HDMI_FRL_CONFIG *pFRLConfig) +{ + if (libHandle == NVHDMIPKT_INVALID_HANDLE) + { + return NVHDMIPKT_LIBRARY_INIT_FAIL; + } + + if (!pVidTransInfo || + !pClientCtrl || + !pSrcCaps || + !pSinkCaps || + !pFRLConfig) + { + return NVHDMIPKT_INVALID_ARG; + } + + // if there is no FRL capability reported fail this call + if ((pSrcCaps->linkMaxFRLRate == HDMI_FRL_DATA_RATE_NONE) || + (pSinkCaps->linkMaxFRLRate == HDMI_FRL_DATA_RATE_NONE)) + { + return NVHDMIPKT_FAIL; + } + + NVHDMIPKT_CLASS* pClass = fromHdmiPktHandle(libHandle); + return pClass->hdmiQueryFRLConfig(pClass, + pVidTransInfo, + pClientCtrl, + pSrcCaps, + pSinkCaps, + pFRLConfig); +} + +/* + * NvHdmi_SetFRLConfig + */ +NVHDMIPKT_RESULT +NvHdmi_SetFRLConfig(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NvBool bFakeLt, + HDMI_FRL_CONFIG *pFRLConfig) +{ + if (libHandle == NVHDMIPKT_INVALID_HANDLE) + { + return NVHDMIPKT_LIBRARY_INIT_FAIL; + } + + if (!pFRLConfig) + { + return NVHDMIPKT_INVALID_ARG; + } + + NVHDMIPKT_CLASS* pClass = fromHdmiPktHandle(libHandle); + return pClass->hdmiSetFRLConfig(pClass, + subDevice, + displayId, + bFakeLt, + pFRLConfig); + +} + +/* + * NvHdmi_ClearFRLConfig + */ +NVHDMIPKT_RESULT +NvHdmi_ClearFRLConfig(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId) +{ + if (libHandle == NVHDMIPKT_INVALID_HANDLE) + { + return NVHDMIPKT_LIBRARY_INIT_FAIL; + } + + NVHDMIPKT_CLASS* pClass = fromHdmiPktHandle(libHandle); + return pClass->hdmiClearFRLConfig(pClass, + subDevice, + displayId); +} + +/*************************** HDMI Library internal helper functions *****************************/ +/* + * NvHdmiPkt_HwClass2HdmiClass + * internal function; translates display/display-dma class to hdmi class + */ +static NVHDMIPKT_CLASS_ID +NvHdmiPkt_HwClass2HdmiClass(NvU32 const hwClass) +{ + NVHDMIPKT_CLASS_ID hdmiClassId = NVHDMIPKT_9571_CLASS; + NvU32 i = 0; + + for (i = 0; i < NVHDMIPKT_INVALID_CLASS; i++) + { + if ((hierarchy[i].displayClass == hwClass) || + (hierarchy[i].coreDmaClass == hwClass)) + { + hdmiClassId = hierarchy[i].classId; + break; + } + } + + // Assign default class 73 to pre-Kepler families + if (hwClass < NV9170_DISPLAY) + { + hdmiClassId = NVHDMIPKT_0073_CLASS; + } + + return hdmiClassId; +} + +/* + * NvHdmiPkt_InitInterfaces + * internal function; calls class init interface functions + */ +static void +NvHdmiPkt_InitInterfaces(NVHDMIPKT_CLASS_ID const thisClassId, + NVHDMIPKT_CLASS* const pClass) +{ + // Recurse to the root first, and then call each initInterface() method + // from root to child. + if (!hierarchy[thisClassId].isRootClass) + { + NvHdmiPkt_InitInterfaces(hierarchy[thisClassId].parentClassId, pClass); + } + hierarchy[thisClassId].initInterface(pClass); +} + +static void +NvHdmiPkt_CallDestructors(NVHDMIPKT_CLASS_ID const thisClassId, + NVHDMIPKT_CLASS* const pClass) +{ + // Destructor calls are made from this to root class. + hierarchy[thisClassId].destructor(pClass); + if (!hierarchy[thisClassId].isRootClass) + { + NvHdmiPkt_CallDestructors(hierarchy[thisClassId].parentClassId, pClass); + } +} + +/* + * NvHdmiPkt_CallConstructors + * internal function; calls class constructors and returns boolean success/failure + */ +static NvBool +NvHdmiPkt_CallConstructors(NVHDMIPKT_CLASS_ID const thisClassId, + NVHDMIPKT_CLASS* const pClass) +{ + // Recurse to the root first, and then call each constructor + // from root to child. + if (!hierarchy[thisClassId].isRootClass) + { + if (!NvHdmiPkt_CallConstructors(hierarchy[thisClassId].parentClassId, pClass)) + { + return NV_FALSE; + } + } + + if (!hierarchy[thisClassId].constructor(pClass)) + { + if (!hierarchy[thisClassId].isRootClass) + { + // Backtrack on constructor failure + NvHdmiPkt_CallDestructors(hierarchy[thisClassId].parentClassId, pClass); + } + + return NV_FALSE; + } + + return NV_TRUE; +} + +/******************************** HDMI Library Init functions ***********************************/ +/* + * NvHdmiPkt_InitializeLibrary + */ +NvHdmiPkt_Handle +NvHdmiPkt_InitializeLibrary(NvU32 const hwClass, + NvU32 const numSubDevices, + NvHdmiPkt_CBHandle const cbHandle, + const NVHDMIPKT_CALLBACK* const pCallbacks, + NvU32 const sfUserHandle, + const NVHDMIPKT_RM_CLIENT_HANDLES* const pClientHandles) +{ + NVHDMIPKT_CLASS* pClass = 0; + NvU32 i = 0; + NvBool result = NV_FALSE; + NVHDMIPKT_CLASS_ID thisClassId = NVHDMIPKT_INVALID_CLASS; + + // Argument validations + if (pCallbacks == 0 || numSubDevices == 0) + { + goto NvHdmiPkt_InitializeLibrary_exit; + } + + // Validating RM handles/callbacks +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (sfUserHandle == 0 || pClientHandles == 0) + { + goto NvHdmiPkt_InitializeLibrary_exit; + } +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + if (pCallbacks->rmGetMemoryMap == 0 || + pCallbacks->rmFreeMemoryMap == 0 || + pCallbacks->rmDispControl2 == 0) + { + goto NvHdmiPkt_InitializeLibrary_exit; + } +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + + // Mandatory mutex callbacks. + if (pCallbacks->acquireMutex == 0 || pCallbacks->releaseMutex == 0) + { + goto NvHdmiPkt_InitializeLibrary_exit; + } + + // Mandatory memory allocation callbacks. + if (pCallbacks->malloc == 0 || pCallbacks->free == 0) + { + goto NvHdmiPkt_InitializeLibrary_exit; + } + + pClass = pCallbacks->malloc(cbHandle, sizeof(NVHDMIPKT_CLASS)); + if (!pClass) + { + goto NvHdmiPkt_InitializeLibrary_exit; + } + + // 0. Get the hdmi class ID + thisClassId = NvHdmiPkt_HwClass2HdmiClass(hwClass); + + // Init data + NVMISC_MEMSET(pClass, 0, sizeof(NVHDMIPKT_CLASS)); + + for (i = 0; i < NV_MAX_SUBDEVICES; i++) + { + pClass->memMap[i].subDevice = NVHDMIPKT_INVALID_SUBDEV; + } + + pClass->numSubDevices = numSubDevices; + pClass->cbHandle = cbHandle; + pClass->thisId = thisClassId; + + // RM handles/callbacks +#if NVHDMIPKT_RM_CALLS_INTERNAL + pClass->isRMCallInternal = NV_TRUE; + pClass->sfUserHandle = sfUserHandle; + pClass->clientHandles.hClient = pClientHandles->hClient; + pClass->clientHandles.hDevice = pClientHandles->hDevice; + pClass->clientHandles.hDisplay = pClientHandles->hDisplay; + + for (i = 0; i < NV_MAX_SUBDEVICES; i++) + { + pClass->clientHandles.hSubDevices[i] = pClientHandles->hSubDevices[i]; + } +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + pClass->isRMCallInternal = NV_FALSE; + pClass->callback.rmGetMemoryMap = pCallbacks->rmGetMemoryMap; + pClass->callback.rmFreeMemoryMap = pCallbacks->rmFreeMemoryMap; + pClass->callback.rmDispControl2 = pCallbacks->rmDispControl2; +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + + pClass->callback.acquireMutex = pCallbacks->acquireMutex; + pClass->callback.releaseMutex = pCallbacks->releaseMutex; + + pClass->callback.malloc = pCallbacks->malloc; + pClass->callback.free = pCallbacks->free; + +#if !defined (NVHDMIPKT_DONT_USE_TIMER) + pClass->callback.setTimeout = pCallbacks->setTimeout; + pClass->callback.checkTimeout = pCallbacks->checkTimeout; +#endif + +#if defined (DEBUG) + pClass->callback.print = pCallbacks->print; + pClass->callback.assert = pCallbacks->assert; +#endif + + // 1. Init interfaces + NvHdmiPkt_InitInterfaces(thisClassId, pClass); + + // 2. Constructor calls + result = NvHdmiPkt_CallConstructors(thisClassId, pClass); + +NvHdmiPkt_InitializeLibrary_exit: + if (result) + { + NvHdmiPkt_Print(pClass, "Initialize Success."); + } + else + { + if (pClass) + { + NvHdmiPkt_Print(pClass, "Initialize Failed."); + } + if (pCallbacks && pCallbacks->free) + { + pCallbacks->free(cbHandle, pClass); + } + } + + return (result == NV_TRUE) ? toHdmiPktHandle(pClass) : NVHDMIPKT_INVALID_HANDLE; +} + +/* + * NvHdmiPkt_DestroyLibrary + */ +void +NvHdmiPkt_DestroyLibrary(NvHdmiPkt_Handle libHandle) +{ + NVHDMIPKT_CLASS* pClass = fromHdmiPktHandle(libHandle); + NVHDMIPKT_CLASS_ID currClassId = NVHDMIPKT_0073_CLASS; + + if (pClass != 0) + { + NvHdmiPkt_Print(pClass, "Destroy."); + NvHdmiPkt_CBHandle cbHandle = pClass->cbHandle; + void (*freeCb) (NvHdmiPkt_CBHandle handle, + void *pMem) = pClass->callback.free; + + currClassId = pClass->thisId; + NvHdmiPkt_CallDestructors(currClassId, pClass); + + freeCb(cbHandle, pClass); + } +} diff --git a/src/common/modeset/hdmipacket/nvhdmipkt.h b/src/common/modeset/hdmipacket/nvhdmipkt.h new file mode 100644 index 0000000..c6ef53d --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt.h @@ -0,0 +1,426 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt.h + * + * Purpose: This file is a common header for all HDMI Library Clients + */ + +#ifndef _NVHDMIPKT_H_ +#define _NVHDMIPKT_H_ + +#include + +#include "nvmisc.h" + + +#ifdef __cplusplus +extern "C" { +#endif + +/**************************** HDMI Library defines, enums and structs ***************************/ +/************************************************************************************************ + * NOTE: NVHDMIPKT_RM_CALLS_INTERNAL define tells this library to make RM calls (allocate, free * + * control, etc.) internally and not through callbacks into the client. * + ************************************************************************************************/ +#if !defined(NVHDMIPKT_RM_CALLS_INTERNAL) +# define NVHDMIPKT_RM_CALLS_INTERNAL 1 +#endif + +// NVHDMIPKT_RESULT: HDMI library return result enums +typedef enum +{ + NVHDMIPKT_SUCCESS = 0, + NVHDMIPKT_FAIL = 1, + NVHDMIPKT_LIBRARY_INIT_FAIL = 2, + NVHDMIPKT_INVALID_ARG = 3, + NVHDMIPKT_TIMEOUT = 4, + NVHDMIPKT_ERR_GENERAL = 5, + NVHDMIPKT_INSUFFICIENT_BANDWIDTH = 6, + NVHDMIPKT_RETRY = 7, + NVHDMIPKT_INSUFFICIENT_BUFFER = 8, + NVHDMIPKT_DSC_PPS_ERROR = 9 +} NVHDMIPKT_RESULT; + +// NVHDMIPKT_TYPE: HDMI Packet Enums +typedef enum _NVHDMIPKT_TYPE +{ + NVHDMIPKT_TYPE_UNDEFINED = 0, // Undefined Packet Type + NVHDMIPKT_TYPE_GENERIC = 1, // Generic packet, any Generic Packet + // (e.g Gamut Metadata packet) + NVHDMIPKT_TYPE_AVI_INFOFRAME = 2, // Avi infoframe + NVHDMIPKT_TYPE_GENERAL_CONTROL = 3, // GCP + NVHDMIPKT_TYPE_VENDOR_SPECIFIC_INFOFRAME = 4, // VSI + NVHDMIPKT_TYPE_AUDIO_INFOFRAME = 5, // Audio InfoFrame + NVHDMIPKT_TYPE_SHARED_GENERIC1 = 6, // ADA+ Specifc shared generic infoframe 1~6 + NVHDMIPKT_TYPE_SHARED_GENERIC2 = 7, + NVHDMIPKT_TYPE_SHARED_GENERIC3 = 8, + NVHDMIPKT_TYPE_SHARED_GENERIC4 = 9, + NVHDMIPKT_TYPE_SHARED_GENERIC5 = 10, + NVHDMIPKT_TYPE_SHARED_GENERIC6 = 11, + NVHDMIPKT_TYPE_SHARED_GENERIC7 = 12, + NVHDMIPKT_TYPE_SHARED_GENERIC8 = 13, + NVHDMIPKT_TYPE_SHARED_GENERIC9 = 14, + NVHDMIPKT_TYPE_SHARED_GENERIC10 = 15, + NVHDMIPKT_INVALID_PKT_TYPE = 16 +} NVHDMIPKT_TYPE; + +// Hdmi packet TransmitControl defines. These definitions reflect the +// defines from ctrl and class defines for info frames. +#define NV_HDMI_PKT_TRANSMIT_CTRL_ENABLE 0:0 +#define NV_HDMI_PKT_TRANSMIT_CTRL_ENABLE_DIS 0x00000000 +#define NV_HDMI_PKT_TRANSMIT_CTRL_ENABLE_EN 0x00000001 + +#define NV_HDMI_PKT_TRANSMIT_CTRL_OTHER 1:1 +#define NV_HDMI_PKT_TRANSMIT_CTRL_OTHER_DIS 0x00000000 +#define NV_HDMI_PKT_TRANSMIT_CTRL_OTHER_EN 0x00000001 + +#define NV_HDMI_PKT_TRANSMIT_CTRL_SINGLE 2:2 +#define NV_HDMI_PKT_TRANSMIT_CTRL_SINGLE_DIS 0x00000000 +#define NV_HDMI_PKT_TRANSMIT_CTRL_SINGLE_EN 0x00000001 + +#define NV_HDMI_PKT_TRANSMIT_CTRL_CHKSUM_HW 3:3 +#define NV_HDMI_PKT_TRANSMIT_CTRL_CHKSUM_HW_DIS 0x00000000 +#define NV_HDMI_PKT_TRANSMIT_CTRL_CHKSUM_HW_EN 0x00000001 + +#define NV_HDMI_PKT_TRANSMIT_CTRL_HBLANK 4:4 +#define NV_HDMI_PKT_TRANSMIT_CTRL_HBLANK_DIS 0x00000000 +#define NV_HDMI_PKT_TRANSMIT_CTRL_HBLANK_EN 0x00000001 + +#define NV_HDMI_PKT_TRANSMIT_CTRL_VIDEO_FMT 5:5 +#define NV_HDMI_PKT_TRANSMIT_CTRL_VIDEO_FMT_SW_CTRL 0x00000000 +#define NV_HDMI_PKT_TRANSMIT_CTRL_VIDEO_FMT_HW_CTRL 0x00000001 + +// NVHDMIPKT_TC: HDMI Packet Transmit Control +// NOTE: Client should use these defines below for transmit control, and avoid using the ones +// above. Use only if client knows and wants fine control. And in that case the value +// passed has to be explicitly typecasted to NVHDMIPKT_TC by the client. +typedef enum _NVHDMIPKT_TC +{ + NVHDMIPKT_TRANSMIT_CONTROL_DISABLE = + (DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _ENABLE, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _OTHER, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _SINGLE, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _CHKSUM_HW, _DIS)), + + NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_EVERY_FRAME = + (DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _ENABLE, _EN) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _OTHER, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _SINGLE, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _CHKSUM_HW, _EN)), + + NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_EVERY_FRAME_SW_CHECKSUM = + (DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _ENABLE, _EN) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _OTHER, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _SINGLE, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _CHKSUM_HW, _DIS)), + + NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_SINGLE_FRAME = + (DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _ENABLE, _EN) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _OTHER, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _SINGLE, _EN) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _CHKSUM_HW, _EN)), + + NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_EVERY_OTHER_FRAME = + (DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _ENABLE, _EN) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _OTHER, _EN) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _SINGLE, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _CHKSUM_HW, _EN)), + + NVHDMIPKT_TRANSMIT_CONTROL_VIDEO_FMT_HW_CTRL = + (DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _VIDEO_FMT, _HW_CTRL)), + +} NVHDMIPKT_TC; + +typedef enum _INFOFRAME_CTRL_RUN_MODE +{ + INFOFRAME_CTRL_RUN_MODE_ALWAYS = 0, + INFOFRAME_CTRL_RUN_MODE_ONCE, + INFOFRAME_CTRL_RUN_MODE_FID_ALWAYS, + INFOFRAME_CTRL_RUN_MODE_FID_ONCE, + INFOFRAME_CTRL_RUN_MODE_FID_TRIGGER +} INFOFRAME_CTRL_RUN_MODE; + +typedef enum _INFOFRAME_CTRL_LOC +{ + INFOFRAME_CTRL_LOC_VBLANK = 0, + INFOFRAME_CTRL_LOC_VSYNC, + INFOFRAME_CTRL_LOC_LINE, + INFOFRAME_CTRL_LOC_LOADV +} INFOFRAME_CTRL_LOC; + +// RM client handles. Used when client chooses that hdmi library make RM calls. +// NOTE: NVHDMIPKT_RM_CALLS_INTERNAL macro should be define to use it. +typedef struct tagNVHDMIPKT_RM_CLIENT_HANDLES +{ + NvU32 hClient; + NvU32 hDevice; + NvU32 hSubDevices[NV_MAX_SUBDEVICES]; + NvU32 hDisplay; +} NVHDMIPKT_RM_CLIENT_HANDLES; + +/****************************** HDMI Library callbacks into client ******************************/ +typedef void* NvHdmiPkt_CBHandle; + +/************************************************************************************************ + * [rmGetMemoryMap, rmFreeMemoryMap, rmDispControl,] acquireMutex and releaseMutex are mandatory* + * callbacks, to be implemented by the client. Callbacks in [] above are mandatory only for * + * Windows. * + * Linux need not implement those, if they plan to use NVHDMIPKT_RM_CALLS_INTERNAL define. * + * * + * rmGetMemoryMap and rmFreeMemoryMap are RM calls to allocate the DISP_SF_USER class. * + * And mutex callbacks keep hemi packet operations atomic. * + ************************************************************************************************/ +typedef struct _tagNVHDMIPKT_CALLBACK +{ + // MANDATORY callbacks. + NvBool + (*rmGetMemoryMap) (NvHdmiPkt_CBHandle handle, + NvU32 dispSfUserClassId, + NvU32 dispSfUserSize, + NvU32 subDevice, + NvU32* pMemHandle, + void** ppBaseMem); + + void + (*rmFreeMemoryMap) (NvHdmiPkt_CBHandle handle, + NvU32 subDevice, + NvU32 memHandle, + void* pMem); + + NvBool + (*rmDispControl2) (NvHdmiPkt_CBHandle handle, + NvU32 subDevice, + NvU32 cmd, + void* pParams, + NvU32 paramSize); + + + void + (*acquireMutex) (NvHdmiPkt_CBHandle handle); + + void + (*releaseMutex) (NvHdmiPkt_CBHandle handle); + + // OPTIONAL callbacks + /* time in microseconds (us) */ + NvBool + (*setTimeout) (NvHdmiPkt_CBHandle handle, + NvU32 us_timeout); + + /* ChecTimeout returns true when timer times out */ + NvBool + (*checkTimeout) (NvHdmiPkt_CBHandle handle); + + // callbacks to allocate memory on heap to reduce stack usage + void* + (*malloc) (NvHdmiPkt_CBHandle handle, + NvLength numBytes); + + void + (*free) (NvHdmiPkt_CBHandle handle, + void *pMem); + + void + (*print) (NvHdmiPkt_CBHandle handle, + const char* fmtstring, + ...) +#if defined(__GNUC__) + __attribute__ ((format (printf, 2, 3))) +#endif + ; + + void + (*assert) (const char *expression, + const char *filename, + const char *function, + unsigned int line); +} NVHDMIPKT_CALLBACK; + + +/************************************************************************************************ + * runMode - specify one of the modes of operation* + * location - vsync/line/vblank * + * flipId - client to provide if FID mode of operation, 0 otherwise * + * lineNum - if infoframe is sent at line border specify lineNum. 0 is default * + * * + * T239 chip more infoframe support * + ************************************************************************************************/ +typedef struct tagADVANCED_INFOFRAME +{ + INFOFRAME_CTRL_RUN_MODE runMode; + INFOFRAME_CTRL_LOC location; + + NvU32 flipId; + NvU32 lineNum; + NvU32 numAdditionalInfoframes; + + NvU32 packetLen; // client is expected to fill in 9 DWs for each infoframe, leaving unused bytes 0 + NvU8 const * pPacket; // (4 bytes header, 32 bytes max payload per infoframe) For SIZE > 0, pPacket is + // continuous array of multiple infoframes each 9DW in size + + // flags + NvU32 isLargeInfoframe : 1; // set if client wants SIZE > 0. Default 0 = normal infoframe + NvU32 lineIdReversed : 1; // set if client wants line Id reversed. Default 0 = not reversed + NvU32 crcOverride : 1; // set if client uses CRC override. Default 0 = CRC override not used + NvU32 asSdpOverride : 1; // set if client wants to enable AS SDP override in infoframe HW + NvU32 hwChecksum : 1; // set if client wants HW checksum. Default 0 = SW checksum + NvU32 matchFidMethodArmState : 1; // set if client wants HW to send infoframe when FID method's ARM state matches. Default 0 = send only when FID method Active state + NvU32 winMethodCyaBroadcast : 1; // set if client will set up CYA reg for SF to decode directly. Default 0 = private precalc decode (SF decodes from precalc) + NvU32 highAudioPriority : 1; // set if client wants high priority for audio. Default 0 = audio low priority + NvU32 reserved : 24; +} ADVANCED_INFOFRAME; + +/*********************** HDMI Library interface to write hdmi ctrl/packet ***********************/ +typedef void* NvHdmiPkt_Handle; +#define NVHDMIPKT_INVALID_HANDLE ((NvHdmiPkt_Handle)0) + +/************************************************************************************************ + * NvHdmiPkt_PacketCtrl - Returns HDMI NVHDMIPKT_RESULT. * + * * + * Parameters: * + * libHandle - Hdmi library handle, provided on initializing the library. * + * subDevice - Sub Device ID. * + * displayId - Display ID. * + * head - Head number. * + * packetType - One of the NVHDMIPKT_TYPE types. * + * transmitControl - Packet transmit control setting. * + ************************************************************************************************/ +NVHDMIPKT_RESULT +NvHdmiPkt_PacketCtrl (NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl); + +/************************************************************************************************ + * NvHdmiPkt_PacketWrite - Returns HDMI NVHDMIPKT_RESULT. * + * * + * Parameters: * + * libHandle - Hdmi library handle, provided on initializing the library. * + * subDevice - Sub Device ID. * + * displayId - Display ID. * + * head - Head number. * + * packetType - One of the NVHDMIPKT_TYPE types. * + * transmitControl - Packet transmit control setting. * + * packetLen - Length of the packet in bytes to be transmitted. * + * pPacket - Pointer to packet data. * + ************************************************************************************************/ +NVHDMIPKT_RESULT +NvHdmiPkt_PacketWrite(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket); + + +/************************************************************************************************ + * NvHdmiPkt_PacketRead - Returns HDMI NVHDMIPKT_RESULT. * + * * + * Parameters: * + * libHandle - Hdmi library handle, provided on initializing the library. * + * subDevice - Sub Device ID. * + * head - Head number. * + * packetReg - One of the NVHDMIPKT_TYPE types. * + * bufferLen - Size of the client provided read out buffer * + * pOutPktBuffer - buffer into which read packet data is to be written. * + ************************************************************************************************/ +NVHDMIPKT_RESULT +NvHdmiPkt_PacketRead(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 head, + NVHDMIPKT_TYPE packetReg, + NvU32 bufferLen, + NvU8 *const pOutPktBuffer); + + +/************************************************************************************************ + * NvHdmiPkt_SetupAdvancedInfoframe - Returns HDMI NVHDMIPKT_RESULT. * + * * + * Parameters: * + * libHandle - Hdmi library handle, provided on initializing the library. * + * subDevice - Sub Device ID. * + * head - Head number. * + * packetType - One of the NVHDMIPKT_TYPE types. * + * pInfoframe - details about infoframe to be programmed - run mode/loc/etc and pkt bytes * + ************************************************************************************************/ +NVHDMIPKT_RESULT +NvHdmiPkt_SetupAdvancedInfoframe(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 head, + NVHDMIPKT_TYPE packetReg, + ADVANCED_INFOFRAME const *pInfoframe); + + +/***************************** Interface to initialize HDMI Library *****************************/ + +/************************************************************************************************ + * NvHdmiPkt_InitializeLibrary - Returns NvHdmiPkt_Handle. This handle is used to call * + * library interfaces. If handle returned is invalid - * + * NVHDMIPKT_INVALID_HANDLE -, there was a problem in * + * initialization and the library won't work. * + * * + * Parameters: * + * hwClass - Depending on HW, apply display class or display dma class. Either will do.* + * Eg. for GK104- NV9170_DISPLAY or NV917D_CORE_CHANNEL_DMA. * + * numSubDevices - Number of sub devices. * + * * + * cbHandle - Callback handle. Client cookie for callbacks made to client. * + * pCallback - Callbacks. Struct NVHDMIPKT_CALLBACK. * + * * + * Below mentioned sfUserHandle and clientHandles parameters are used only when not providing * + * rmGetMemoryMap, rmFreeMemoryMap and rmDispControl callbacks. This is meant for Linux. * + * And is controlled by NVHDMIPKT_RM_CALLS_INTERNAL macro. * + * NOTE: And Clients not using NVHDMIPKT_RM_CALLS_INTERNAL, need to set both sfUserHandle and * + * clientHandles to 0. * + * * + * sfUserHandle - SF_USER handle; this is the base handle. Subsequent subdevice handles are * + * derived incrementally from this handle. * + * pClientHandles - RM handles for client, device, subdevices and displayCommon. * + * * + ************************************************************************************************/ +NvHdmiPkt_Handle +NvHdmiPkt_InitializeLibrary(NvU32 const hwClass, + NvU32 const numSubDevices, + NvHdmiPkt_CBHandle const cbHandle, + const NVHDMIPKT_CALLBACK* const pCallback, + NvU32 const sfUserHandle, + const NVHDMIPKT_RM_CLIENT_HANDLES* const pClientHandles); + +/************************************************************************************************ + * NvHdmiPkt_DestroyLibrary * + * * + * When done with the HDMI Library call NvHdmiPkt_DestroyLibrary. It is like a destructor. * + * This destructor frees up resources acquired during initialize. * + * * + ************************************************************************************************/ +void +NvHdmiPkt_DestroyLibrary(NvHdmiPkt_Handle libHandle); + +#ifdef __cplusplus +} +#endif +#endif // _NVHDMIPKT_H_ diff --git a/src/common/modeset/hdmipacket/nvhdmipkt_0073.c b/src/common/modeset/hdmipacket/nvhdmipkt_0073.c new file mode 100644 index 0000000..3928255 --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt_0073.c @@ -0,0 +1,422 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_0073.c + * + * Purpose: Provides infoframe write functions for HDMI library for Pre-KEPLER chips + */ + +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" + +#include "nvhdmipkt_internal.h" + +#include "hdmi_spec.h" +#include "ctrl/ctrl0073/ctrl0073specific.h" + +NVHDMIPKT_RESULT +hdmiPacketCtrl0073(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl); + +NVHDMIPKT_RESULT +hdmiPacketWrite0073(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket); + +/* + * hdmiPacketCtrl0073 + */ +NVHDMIPKT_RESULT +hdmiPacketCtrl0073(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS params = {0}; + + NVMISC_MEMSET(¶ms, 0, sizeof(params)); + + params.subDeviceInstance = subDevice; + params.displayId = displayId; + params.type = pThis->translatePacketType(pThis, packetType); + params.transmitControl = pThis->translateTransmitControl(pThis, transmitControl); + +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (NvRmControl(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET_CTRL, + ¶ms, + sizeof(params)) != NVOS_STATUS_SUCCESS) + +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + params.subDeviceInstance, + NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET_CTRL, + ¶ms, sizeof(params)); + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + NvHdmiPkt_Print(pThis, "ERROR - RM call to hdmiPacketCtrl failed."); + NvHdmiPkt_Assert(0); + result = NVHDMIPKT_FAIL; + } + + return result; +} + +/* + * hdmiPacketWrite0073 + */ +NVHDMIPKT_RESULT +hdmiPacketWrite0073(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + + if (packetLen > NV0073_CTRL_SET_OD_MAX_PACKET_SIZE) + { + return NVHDMIPKT_INVALID_ARG; + } + + NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS params = {0}; + + NVMISC_MEMSET(¶ms, 0, sizeof(params)); + + params.subDeviceInstance = subDevice; + params.displayId = displayId; + params.packetSize = packetLen; + params.transmitControl = pThis->translateTransmitControl(pThis, transmitControl); + + // init the infoframe packet + NVMISC_MEMSET(params.aPacket, 0, NV0073_CTRL_SET_OD_MAX_PACKET_SIZE); + + // copy the payload + NVMISC_MEMCPY(params.aPacket, pPacket, packetLen); + +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (NvRmControl(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET, + ¶ms, + sizeof(params)) != NVOS_STATUS_SUCCESS) + +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + params.subDeviceInstance, + NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET, + ¶ms, + sizeof(params)); + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + NvHdmiPkt_Print(pThis, "ERROR - RM call to hdmiPacketWrite failed."); + NvHdmiPkt_Assert(0); + result = NVHDMIPKT_FAIL; + } + + return result; +} + +/* + * translatePacketType0073 + */ +static NvU32 +translatePacketType0073(NVHDMIPKT_CLASS* pThis, + NVHDMIPKT_TYPE packetType) +{ + NvU32 type0073 = 0; + + switch (packetType) + { + case NVHDMIPKT_TYPE_AVI_INFOFRAME: + type0073 = pktType_AviInfoFrame; + break; + + case NVHDMIPKT_TYPE_GENERIC: + type0073 = pktType_GamutMetadata; + break; + + case NVHDMIPKT_TYPE_GENERAL_CONTROL: + type0073 = pktType_GeneralControl; + break; + + case NVHDMIPKT_TYPE_VENDOR_SPECIFIC_INFOFRAME: + type0073 = pktType_VendorSpecInfoFrame; + break; + + case NVHDMIPKT_TYPE_AUDIO_INFOFRAME: + type0073 = pktType_AudioInfoFrame; + break; + + default: + NvHdmiPkt_Print(pThis, "ERROR - translatePacketType wrong packet type: %0x", + packetType); + NvHdmiPkt_Assert(0); + break; + } + + return type0073; +} + +/* + * translateTransmitControl0073 + */ +static NvU32 +translateTransmitControl0073(NVHDMIPKT_CLASS* pThis, + NVHDMIPKT_TC transmitControl) +{ + NvU32 tc = 0; + + // TODO: tc validation + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _ENABLE, _EN, transmitControl)) + { + tc = FLD_SET_DRF(0073, _CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL, + _ENABLE, _YES, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _OTHER, _EN, transmitControl)) + { + tc = FLD_SET_DRF(0073, _CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL, + _OTHER_FRAME, _ENABLE, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _SINGLE, _EN, transmitControl)) + { + tc = FLD_SET_DRF(0073, _CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL, + _SINGLE_FRAME, _ENABLE, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _HBLANK, _EN, transmitControl)) + { + tc = FLD_SET_DRF(0073, _CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL, + _ON_HBLANK, _ENABLE, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _VIDEO_FMT, _HW_CTRL, transmitControl)) + { + tc = FLD_SET_DRF(0073, _CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL, + _VIDEO_FMT, _HW_CONTROLLED, tc); + } + + return tc; +} + +// non-HW - class utility/maintenance functions +/* + * hdmiConstructor0073 + */ +NvBool +hdmiConstructor0073(NVHDMIPKT_CLASS* pThis) +{ + return NV_TRUE; +} + +/* + * hdmiUnDestructor0073 + */ +void +hdmiDestructor0073(NVHDMIPKT_CLASS* pThis) + +{ + return; +} + +// Below are dummy functions for the HW functions not needed for a display class +/* + * hdmiWriteDummyPacket + */ +void +hdmiWriteDummyPacket(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NvHdmiPkt_Print(pThis, "ERROR - Dummy function hdmiWriteDummyPacket called. " + "Should never be called."); + NvHdmiPkt_Assert(0); + return; +} + +/* + * hdmiReadDummyPacketStatus + */ +static NvBool +hdmiReadDummyPacketStatus(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 pktType0073) +{ + NvHdmiPkt_Print(pThis, "ERROR - Dummy function hdmiReadDummyPacketStatus called. " + "Should never be called."); + NvHdmiPkt_Assert(0); + return NV_TRUE; +} + +/* + * hdmiWriteDummyPacketCtrl + */ +static NVHDMIPKT_RESULT +hdmiWriteDummyPacketCtrl(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 pktType0073, + NvU32 transmitControl, + NvBool bDisable) +{ + NvHdmiPkt_Print(pThis, "ERROR - Dummy function hdmiWriteDummyPacketCtrl called. " + "Should never be called."); + NvHdmiPkt_Assert(0); + return NVHDMIPKT_SUCCESS; +} + +NVHDMIPKT_RESULT +hdmiAssessLinkCapabilitiesDummy(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NVT_EDID_INFO const * const pSinkEdid, + HDMI_SRC_CAPS *pSrcCaps, + HDMI_SINK_CAPS *pSinkCaps) +{ + NvHdmiPkt_Print(pThis, "ERROR - Dummy function hdmiAssessLinkCapabilitiesDummy called. " + "Should never be called."); + NvHdmiPkt_Assert(0); + return NVHDMIPKT_SUCCESS; +} + +NVHDMIPKT_RESULT +hdmiQueryFRLConfigDummy(NVHDMIPKT_CLASS *pThis, + HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + HDMI_QUERY_FRL_CLIENT_CONTROL const * const pClientCtrl, + HDMI_SRC_CAPS const * const pSrcCaps, + HDMI_SINK_CAPS const * const pSinkCaps, + HDMI_FRL_CONFIG *pFRLConfig) +{ + NvHdmiPkt_Print(pThis, "ERROR - Dummy function hdmiQueryFRLConfigDummy called. " + "Should never be called."); + NvHdmiPkt_Assert(0); + return NVHDMIPKT_SUCCESS; +} + +NVHDMIPKT_RESULT +hdmiSetFRLConfigDummy(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NvBool bFakeLt, + HDMI_FRL_CONFIG *pFRLConfig) +{ + NvHdmiPkt_Print(pThis, "ERROR - Dummy function hdmiSetFRLConfigDummy called. " + "Should never be called."); + NvHdmiPkt_Assert(0); + return NVHDMIPKT_SUCCESS; +} + +NVHDMIPKT_RESULT +hdmiClearFRLConfigDummy(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId) +{ + NvHdmiPkt_Print(pThis, "ERROR - Dummy function hdmiClearFRLConfigDummy called. " + "Should never be called."); + NvHdmiPkt_Assert(0); + return NVHDMIPKT_SUCCESS; +} + +NVHDMIPKT_RESULT +hdmiPacketReadDummy(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 head, + NVHDMIPKT_TYPE packetReg, + NvU32 bufferLen, + NvU8 *const pOutPktBuffer) +{ + NvHdmiPkt_Print(pThis, "ERROR - Dummy function hdmiPacketReadDummy called. " + "Should never be called."); + NvHdmiPkt_Assert(0); + return NVHDMIPKT_SUCCESS; +} + +NVHDMIPKT_RESULT +programAdvancedInfoframeDummy(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 head, + NVHDMIPKT_TYPE packetReg, + const ADVANCED_INFOFRAME* pInfoframe) +{ + NvHdmiPkt_Print(pThis, "ERROR - Dummy function programAdvancedInfoframeDummy called. " + "Should never be called."); + NvHdmiPkt_Assert(0); + return NVHDMIPKT_SUCCESS; +} + +/* + * initializeHdmiPktInterface0073 + */ +void +initializeHdmiPktInterface0073(NVHDMIPKT_CLASS* pClass) +{ + pClass->hdmiPacketCtrl = hdmiPacketCtrl0073; + pClass->hdmiPacketWrite = hdmiPacketWrite0073; + pClass->translatePacketType = translatePacketType0073; + pClass->translateTransmitControl = translateTransmitControl0073; + + // Functions below are mapped to dummy functions, as not needed for HW before GK104 + pClass->hdmiReadPacketStatus = hdmiReadDummyPacketStatus; + pClass->hdmiWritePacketCtrl = hdmiWriteDummyPacketCtrl; + pClass->hdmiWriteAviPacket = hdmiWriteDummyPacket; + pClass->hdmiWriteAudioPacket = hdmiWriteDummyPacket; + pClass->hdmiWriteGenericPacket = hdmiWriteDummyPacket; + pClass->hdmiWriteGeneralCtrlPacket = hdmiWriteDummyPacket; + pClass->hdmiWriteVendorPacket = hdmiWriteDummyPacket; + + // Update SF_USER data + pClass->dispSfUserClassId = 0; + pClass->dispSfUserSize = 0; + + // Functions below are used by HDMI FRL and will be available for Ampere+. + pClass->hdmiAssessLinkCapabilities = hdmiAssessLinkCapabilitiesDummy; + pClass->hdmiQueryFRLConfig = hdmiQueryFRLConfigDummy; + pClass->hdmiSetFRLConfig = hdmiSetFRLConfigDummy; + pClass->hdmiClearFRLConfig = hdmiClearFRLConfigDummy; + + // More (generic) infoframe support on T239+ + pClass->hdmiPacketRead = hdmiPacketReadDummy; + pClass->programAdvancedInfoframe = programAdvancedInfoframeDummy; +} diff --git a/src/common/modeset/hdmipacket/nvhdmipkt_9171.c b/src/common/modeset/hdmipacket/nvhdmipkt_9171.c new file mode 100644 index 0000000..404e5f4 --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt_9171.c @@ -0,0 +1,834 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_9171.c + * + * Purpose: Provides packet write functions for HDMI library for KEPLER + chips + */ + +#include "nvlimits.h" +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" + +#include "nvhdmipkt_internal.h" + +#include "hdmi_spec.h" +#include "class/cl9171.h" +#include "ctrl/ctrl0073/ctrl0073specific.h" + +#define NVHDMIPKT_9171_INVALID_PKT_TYPE ((NV9171_SF_HDMI_INFO_IDX_VSI) + 1) +#define NVHDMIPKT_9171_MAX_PKT_BYTES_AVI 17 // 3 bytes header + 14 bytes data + +NVHDMIPKT_RESULT +hdmiPacketWrite9171(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket); + +NVHDMIPKT_RESULT +hdmiPacketCtrl9171(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl); + +void +hdmiWriteAviPacket9171(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket); + +/* + * hdmiReadPacketStatus9171 + */ +static NvBool +hdmiReadPacketStatus9171(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 pktType9171) +{ + NvBool bResult = NV_FALSE; + NvU32 regOffset = 0; + NvU32 status = 0; + + if (pBaseReg == 0 || head >= NV9171_SF_HDMI_INFO_STATUS__SIZE_1) + { + return bResult; + } + + switch (pktType9171) + { + case NV9171_SF_HDMI_INFO_IDX_AVI_INFOFRAME: + case NV9171_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME: + case NV9171_SF_HDMI_INFO_IDX_GCP: + case NV9171_SF_HDMI_INFO_IDX_VSI: + regOffset = NV9171_SF_HDMI_INFO_STATUS(head, pktType9171); + status = REG_RD32(pBaseReg, regOffset); + bResult = FLD_TEST_DRF(9171, _SF_HDMI_INFO_STATUS, _SENT, _DONE, status); + break; + + default: + break; + } + + return bResult; +} + +/* + * hdmiWritePacketCtrl9171 + */ +static NVHDMIPKT_RESULT +hdmiWritePacketCtrl9171(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 pktType9171, + NvU32 transmitControl, + NvBool bDisable) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_INVALID_ARG; + NvU32 regOffset = 0; + NvU32 hdmiCtrl = 0; + + if (pBaseReg == 0 || head >= NV9171_SF_HDMI_INFO_CTRL__SIZE_1) + { + return result; + } + + switch (pktType9171) + { + case NV9171_SF_HDMI_INFO_IDX_AVI_INFOFRAME: + case NV9171_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME: + case NV9171_SF_HDMI_INFO_IDX_GCP: + case NV9171_SF_HDMI_INFO_IDX_VSI: + regOffset = NV9171_SF_HDMI_INFO_CTRL(head, pktType9171); + hdmiCtrl = REG_RD32(pBaseReg, regOffset); + hdmiCtrl = (bDisable == NV_TRUE) ? + (FLD_SET_DRF(9171, _SF_HDMI_INFO_CTRL, _ENABLE, _DIS, hdmiCtrl)) : + (transmitControl); + REG_WR32(pBaseReg, regOffset, hdmiCtrl); + + result = NVHDMIPKT_SUCCESS; + break; + + default: + break; + } + + return result; +} + +/* + * hdmiWriteAviPacket9171 + */ +void +hdmiWriteAviPacket9171(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NvU32 data = 0; + + if (packetLen > NVHDMIPKT_9171_MAX_PKT_BYTES_AVI) + { + NvHdmiPkt_Print(pThis, "WARNING - input AVI packet length incorrect. Write will be capped to max allowable bytes"); + } + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_HEADER(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_HEADER, _HB0, pPacket[0], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_HEADER, _HB1, pPacket[1], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_HEADER, _HB2, pPacket[2], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_HEADER(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW, _PB0, pPacket[3], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW, _PB1, pPacket[4], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW, _PB2, pPacket[5], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW, _PB3, pPacket[6], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH, _PB4, pPacket[7], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH, _PB5, pPacket[8], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH, _PB6, pPacket[9], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW, _PB7, pPacket[10], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW, _PB8, pPacket[11], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW, _PB9, pPacket[12], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW, _PB10, pPacket[13], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH, _PB11, pPacket[14], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH, _PB12, pPacket[15], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH, _PB13, pPacket[16], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(head), data); + + return; +} + +/* + * hdmiWriteGenericPacket9171 + */ +static void +hdmiWriteGenericPacket9171(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NvU32 data = 0; + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_HEADER(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_HEADER, _HB0, pPacket[0], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_HEADER, _HB1, pPacket[1], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_HEADER, _HB2, pPacket[2], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_HEADER(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK0_LOW, _PB0, pPacket[3], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK0_LOW, _PB1, pPacket[4], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK0_LOW, _PB2, pPacket[5], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK0_LOW, _PB3, pPacket[6], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK0_HIGH, _PB4, pPacket[7], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK0_HIGH, _PB5, pPacket[8], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK0_HIGH, _PB6, pPacket[9], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK1_LOW, _PB7, pPacket[10], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK1_LOW, _PB8, pPacket[11], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK1_LOW, _PB9, pPacket[12], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK1_LOW, _PB10, pPacket[13], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK1_HIGH, _PB11, pPacket[14], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK1_HIGH, _PB12, pPacket[15], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK1_HIGH, _PB13, pPacket[16], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK2_LOW, _PB14, pPacket[17], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK2_LOW, _PB15, pPacket[18], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK2_LOW, _PB16, pPacket[19], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK2_LOW, _PB17, pPacket[20], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK2_HIGH, _PB18, pPacket[21], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK2_HIGH, _PB19, pPacket[22], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK2_HIGH, _PB20, pPacket[23], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK3_LOW, _PB21, pPacket[24], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK3_LOW, _PB22, pPacket[25], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK3_LOW, _PB23, pPacket[26], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK3_LOW, _PB24, pPacket[27], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK3_HIGH, _PB25, pPacket[28], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK3_HIGH, _PB26, pPacket[29], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK3_HIGH, _PB27, pPacket[30], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH(head), data); + + return; +} + +/* + * hdmiWriteGeneralCtrlPacket9171 + */ +static void +hdmiWriteGeneralCtrlPacket9171(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NvU32 data = 0; + + // orIndexer info is ignored. + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GCP_SUBPACK(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GCP_SUBPACK, _SB0, pPacket[3], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GCP_SUBPACK, _SB1, pPacket[4], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GCP_SUBPACK, _SB2, pPacket[5], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GCP_SUBPACK(head), data); + + return; +} + +/* + * hdmiWriteVendorPacket9171 + */ +static void +hdmiWriteVendorPacket9171(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NvU32 data = 0; + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_HEADER(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_HEADER, _HB0, pPacket[0], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_HEADER, _HB1, pPacket[1], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_HEADER, _HB2, pPacket[2], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_HEADER(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK0_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK0_LOW, _PB0, pPacket[3], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK0_LOW, _PB1, pPacket[4], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK0_LOW, _PB2, pPacket[5], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK0_LOW, _PB3, pPacket[6], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK0_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK0_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK0_HIGH, _PB4, pPacket[7], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK0_HIGH, _PB5, pPacket[8], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK0_HIGH, _PB6, pPacket[9], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK0_HIGH(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK1_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK1_LOW, _PB7, pPacket[10], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK1_LOW, _PB8, pPacket[11], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK1_LOW, _PB9, pPacket[12], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK1_LOW, _PB10, pPacket[13], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK1_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK1_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK1_HIGH, _PB11, pPacket[14], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK1_HIGH, _PB12, pPacket[15], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK1_HIGH, _PB13, pPacket[16], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK1_HIGH(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK2_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK2_LOW, _PB14, pPacket[17], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK2_LOW, _PB15, pPacket[18], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK2_LOW, _PB16, pPacket[19], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK2_LOW, _PB17, pPacket[20], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK2_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK2_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK2_HIGH, _PB18, pPacket[21], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK2_HIGH, _PB19, pPacket[22], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK2_HIGH, _PB20, pPacket[23], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK2_HIGH(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK3_LOW, _PB21, pPacket[24], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK3_LOW, _PB22, pPacket[25], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK3_LOW, _PB23, pPacket[26], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK3_LOW, _PB24, pPacket[27], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK3_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK3_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK3_HIGH, _PB25, pPacket[28], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK3_HIGH, _PB26, pPacket[29], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK3_HIGH, _PB27, pPacket[30], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK3_HIGH(head), data); + + return; +} + +/* + * translatePacketType9171 + */ +static NvU32 +translatePacketType9171(NVHDMIPKT_CLASS* pThis, + NVHDMIPKT_TYPE packetType) +{ + NvU32 type9171 = NVHDMIPKT_9171_INVALID_PKT_TYPE; + + switch (packetType) + { + case NVHDMIPKT_TYPE_AVI_INFOFRAME: + type9171 = NV9171_SF_HDMI_INFO_IDX_AVI_INFOFRAME; + break; + + case NVHDMIPKT_TYPE_GENERIC: + type9171 = NV9171_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME; + break; + + case NVHDMIPKT_TYPE_GENERAL_CONTROL: + type9171 = NV9171_SF_HDMI_INFO_IDX_GCP; + break; + + case NVHDMIPKT_TYPE_VENDOR_SPECIFIC_INFOFRAME: + type9171 = NV9171_SF_HDMI_INFO_IDX_VSI; + break; + + case NVHDMIPKT_TYPE_AUDIO_INFOFRAME: + default: + NvHdmiPkt_Print(pThis, "ERROR - translatePacketType wrong packet type: %0x.", + packetType); + NvHdmiPkt_Assert(0); + break; + } + + return type9171; +} + +/* + * translateTransmitControl9171 + */ +static NvU32 +translateTransmitControl9171(NVHDMIPKT_CLASS* pThis, + NVHDMIPKT_TC transmitControl) +{ + NvU32 tc = 0; + + // TODO: tc validation + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _ENABLE, _EN, transmitControl)) + { + tc = FLD_SET_DRF(9171, _SF_HDMI_INFO_CTRL, _ENABLE, _EN, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _OTHER, _EN, transmitControl)) + { + tc = FLD_SET_DRF(9171, _SF_HDMI_INFO_CTRL, _OTHER, _EN, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _SINGLE, _EN, transmitControl)) + { + tc = FLD_SET_DRF(9171, _SF_HDMI_INFO_CTRL, _SINGLE, _EN, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _CHKSUM_HW, _EN, transmitControl)) + { + tc = FLD_SET_DRF(9171, _SF_HDMI_INFO_CTRL, _CHKSUM_HW, _EN, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _HBLANK, _EN, transmitControl)) + { + tc = FLD_SET_DRF(9171, _SF_HDMI_INFO_CTRL, _HBLANK, _EN, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _VIDEO_FMT, _HW_CTRL, transmitControl)) + { + tc = FLD_SET_DRF(9171, _SF_HDMI_INFO_CTRL, _VIDEO_FMT, _HW_CONTROLLED, tc); + } + + return tc; +} + +/* + * hdmiPacketCtrl9171 + */ +NVHDMIPKT_RESULT +hdmiPacketCtrl9171(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl) +{ + NvU32* pBaseReg = (NvU32*)pThis->memMap[subDevice].pMemBase; + NvU32 pktType9171 = pThis->translatePacketType(pThis, packetType); + NvU32 tc = pThis->translateTransmitControl(pThis, transmitControl); + + if (pBaseReg == 0 || head >= NV9171_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 || + pktType9171 == NVHDMIPKT_9171_INVALID_PKT_TYPE) + { + return NVHDMIPKT_INVALID_ARG; + } + + return pThis->hdmiWritePacketCtrl(pThis, pBaseReg, head, pktType9171, tc, NV_FALSE); +} + +/* + * internal utility function + * checkPacketStatus + */ +static NVHDMIPKT_RESULT +checkPacketStatus(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 pktType9171) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + NvBool bCheckPacketStatus = NV_TRUE; + NvU32 regOffset = 0; + NvU32 status = 0; + + // check to see if timer callbacks are provided + if (pThis->callback.setTimeout == 0 || pThis->callback.checkTimeout == 0) + { + goto checkPacketStatus_exit; + } + + // Mark packets that don't need status check + switch (pktType9171) + { + case NV9171_SF_HDMI_INFO_IDX_AVI_INFOFRAME: + case NV9171_SF_HDMI_INFO_IDX_GCP: + regOffset = NV9171_SF_HDMI_INFO_STATUS(head, pktType9171); + status = REG_RD32(pBaseReg, regOffset); + bCheckPacketStatus = FLD_TEST_DRF(9171, _SF_HDMI_INFO_CTRL, _SINGLE, _EN, status); + break; + + default: + bCheckPacketStatus = NV_FALSE; + break; + } + + if (bCheckPacketStatus == NV_TRUE) + { + if (pThis->callback.setTimeout(pThis->cbHandle, NVHDMIPKT_STATUS_READ_TIMEOUT_IN_us) + == NV_FALSE) + { + // Timer set failed + goto checkPacketStatus_exit; + } + + while(pThis->hdmiReadPacketStatus(pThis, pBaseReg, head, pktType9171) == NV_FALSE) + { + if (pThis->callback.checkTimeout(pThis->cbHandle) == NV_TRUE) + { + // status check operation timed out + result = NVHDMIPKT_TIMEOUT; + goto checkPacketStatus_exit; + } + } + } + +checkPacketStatus_exit: + return result; +} + +/* + * hdmiPacketWrite9171 + */ +NVHDMIPKT_RESULT +hdmiPacketWrite9171(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacketIn) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + NvU32* pBaseReg = (NvU32*)pThis->memMap[subDevice].pMemBase; + NvU32 pktType9171 = pThis->translatePacketType(pThis, packetType); + NvU32 tc = pThis->translateTransmitControl(pThis, transmitControl); + NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS params = {0}; + + // packetIn can be of varying size. Use a fixed max size buffer for programing hw units to prevent out of bounds access + NvU8 pPacket[NVHDMIPKT_CTAIF_MAX_PKT_BYTES] = {0}; + + if (pBaseReg == 0 || head >= NV9171_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 || + packetLen == 0 || pPacketIn == 0 || pktType9171 == NVHDMIPKT_9171_INVALID_PKT_TYPE) + { + result = NVHDMIPKT_INVALID_ARG; + NvHdmiPkt_Print(pThis, "Invalid arg"); + goto hdmiPacketWrite9171_exit; + } + + if (packetLen > NVHDMIPKT_CTAIF_MAX_PKT_BYTES) + { + NvHdmiPkt_Print(pThis, "ERROR - input packet length incorrect %d Packet write will be capped to max allowable bytes", packetLen); + packetLen = NVHDMIPKT_CTAIF_MAX_PKT_BYTES; + NvHdmiPkt_Assert(0); + } + + // input packet looks ok to use, copy over the bytes + NVMISC_MEMCPY(pPacket, pPacketIn, packetLen); + + // acquire mutex + pThis->callback.acquireMutex(pThis->cbHandle); + + // Check status if last infoframe was sent out or not + + if ((result = checkPacketStatus(pThis, pBaseReg, head, pktType9171)) == + NVHDMIPKT_TIMEOUT) + { + NvHdmiPkt_Print(pThis, "ERROR - Packet status check timed out."); + NvHdmiPkt_Assert(0); + goto hdmiPacketWrite9171_release_mutex_exit; + } + + // Disable this packet type. + pThis->hdmiWritePacketCtrl(pThis, pBaseReg, head, pktType9171, tc, NV_TRUE); + + // write the packet + switch (pktType9171) + { + case NV9171_SF_HDMI_INFO_IDX_AVI_INFOFRAME: + pThis->hdmiWriteAviPacket(pThis, pBaseReg, head, packetLen, pPacket); + break; + + case NV9171_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME: + pThis->hdmiWriteGenericPacket(pThis, pBaseReg, head, packetLen, pPacket); + break; + + case NV9171_SF_HDMI_INFO_IDX_GCP: + // Check whether the GCP packet is AVMute DISABLE or AvMute ENABLE + // Enable HDMI only on GCP unmute i.e. AVMUTE DISABLE + if (pPacket[HDMI_PKT_HDR_SIZE] == HDMI_GENCTRL_PACKET_MUTE_DISABLE) + { + // Enable HDMI. + NVMISC_MEMSET(¶ms, 0, sizeof(params)); + params.subDeviceInstance = (NvU8)subDevice; + params.displayId = displayId; + params.bEnable = NV0073_CTRL_SPECIFIC_CTRL_HDMI_ENABLE; + +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_CTRL_HDMI, + ¶ms, + sizeof(params)) != NVOS_STATUS_SUCCESS) + +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + params.subDeviceInstance, + NV0073_CTRL_CMD_SPECIFIC_CTRL_HDMI, + ¶ms, + sizeof(params)); + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + NvHdmiPkt_Print(pThis, "ERROR - RM call to enable hdmi ctrl failed."); + NvHdmiPkt_Assert(0); + result = NVHDMIPKT_FAIL; + } + } + pThis->hdmiWriteGeneralCtrlPacket(pThis, pBaseReg, head, packetLen, pPacket); + break; + + case NV9171_SF_HDMI_INFO_IDX_VSI: + pThis->hdmiWriteVendorPacket(pThis, pBaseReg, head, packetLen, pPacket); + break; + + default: + result = NVHDMIPKT_INVALID_ARG; + break; + } + + // Enable this infoframe. + pThis->hdmiWritePacketCtrl(pThis, pBaseReg, head, pktType9171, tc, NV_FALSE); + +hdmiPacketWrite9171_release_mutex_exit: + // release mutex + pThis->callback.releaseMutex(pThis->cbHandle); +hdmiPacketWrite9171_exit: + return result; +} + +// non-HW - class utility/maintenance functions +/* + * hdmiConstructor9171 + */ +NvBool +hdmiConstructor9171(NVHDMIPKT_CLASS* pThis) +{ + NvU32 i = 0; + NvBool result = NV_TRUE; + +#if NVHDMIPKT_RM_CALLS_INTERNAL + for (i = 0; i < pThis->numSubDevices; i++) + { + if (CALL_DISP_RM(NvRmAlloc)(pThis->clientHandles.hClient, + pThis->clientHandles.hSubDevices[i], + pThis->sfUserHandle + i, + pThis->dispSfUserClassId, + (void*)0) != NVOS_STATUS_SUCCESS) + { + NvHdmiPkt_Print(pThis, "ERROR - Init failed. " + "Failed to alloc SF_USER handle"); + NvHdmiPkt_Assert(0); + break; + } + + pThis->memMap[i].memHandle = pThis->sfUserHandle + i; + + if (CALL_DISP_RM(NvRmMapMemory)(pThis->clientHandles.hClient, + pThis->clientHandles.hSubDevices[i], + pThis->memMap[i].memHandle, + 0, + pThis->dispSfUserSize, + &pThis->memMap[i].pMemBase, + 0) != NVOS_STATUS_SUCCESS) + { + NvHdmiPkt_Print(pThis, "ERROR - Init failed. " + "Failed to map SF_USER memory."); + NvHdmiPkt_Assert(0); + break; + } + + if (pThis->memMap[i].pMemBase == 0) + { + NvHdmiPkt_Print(pThis, "ERROR - Init failed. " + "SF_USER memory returned is NULL."); + NvHdmiPkt_Assert(0); + break; + } + + pThis->memMap[i].subDevice = i; + } + + // coudln't complete the loop above + if (i < pThis->numSubDevices) + { + result = NV_FALSE; + goto hdmiConstructor9171_exit; + } +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + for (i = 0; i < pThis->numSubDevices; i++) + { + result = pThis->callback.rmGetMemoryMap(pThis->cbHandle, + pThis->dispSfUserClassId, + pThis->dispSfUserSize, + i, + &pThis->memMap[i].memHandle, + &pThis->memMap[i].pMemBase); + if (result == NV_TRUE) + { + pThis->memMap[i].subDevice = i; + } + else + { + NvHdmiPkt_Print(pThis, "ERROR - Init failed. " + "Failed to map SF_USER memory."); + NvHdmiPkt_Assert(0); + result = NV_FALSE; + goto hdmiConstructor9171_exit; + } + } +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + +hdmiConstructor9171_exit: + return result; +} + +/* + * hdmiDestructor9171 + */ +void +hdmiDestructor9171(NVHDMIPKT_CLASS* pThis) + +{ + NvU32 i = 0; + +#if NVHDMIPKT_RM_CALLS_INTERNAL + for (i = 0; i < NV_MAX_SUBDEVICES; i++) + { + // free memory + if (pThis->memMap[i].pMemBase) + { + if (CALL_DISP_RM(NvRmUnmapMemory)(pThis->clientHandles.hClient, + pThis->clientHandles.hSubDevices[i], + pThis->memMap[i].memHandle, + pThis->memMap[i].pMemBase, + 0) != NVOS_STATUS_SUCCESS) + { + NvHdmiPkt_Print(pThis, "ERROR - unInit failed. " + "SF_USER memory unMap failed."); + NvHdmiPkt_Assert(0); + } + } + + // free handle + if (pThis->memMap[i].memHandle) + { + if (CALL_DISP_RM(NvRmFree)(pThis->clientHandles.hClient, + pThis->clientHandles.hSubDevices[i], + pThis->memMap[i].memHandle) != NVOS_STATUS_SUCCESS) + { + NvHdmiPkt_Print(pThis, "ERROR - unInit failed. " + "Freeing SF_USER memory handle failed."); + NvHdmiPkt_Assert(0); + } + } + + pThis->memMap[i].subDevice = NVHDMIPKT_INVALID_SUBDEV; + pThis->memMap[i].memHandle = 0; + pThis->memMap[i].pMemBase = 0; + } +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + for (i = 0; i < NV_MAX_SUBDEVICES; i++) + { + if (pThis->memMap[i].memHandle) + { + pThis->callback.rmFreeMemoryMap(pThis->cbHandle, + i, + pThis->memMap[i].memHandle, + pThis->memMap[i].pMemBase); + + pThis->memMap[i].subDevice = NVHDMIPKT_INVALID_SUBDEV; + pThis->memMap[i].memHandle = 0; + pThis->memMap[i].pMemBase = 0; + } + } +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + + return; +} + +/* + * initializeHdmiPktInterface9171 + */ +void +initializeHdmiPktInterface9171(NVHDMIPKT_CLASS* pClass) +{ + pClass->hdmiPacketCtrl = hdmiPacketCtrl9171; + pClass->hdmiPacketWrite = hdmiPacketWrite9171; + pClass->translatePacketType = translatePacketType9171; + pClass->translateTransmitControl = translateTransmitControl9171; + + // HW register write functions + pClass->hdmiReadPacketStatus = hdmiReadPacketStatus9171; + pClass->hdmiWritePacketCtrl = hdmiWritePacketCtrl9171; + pClass->hdmiWriteAviPacket = hdmiWriteAviPacket9171; + pClass->hdmiWriteAudioPacket = hdmiWriteDummyPacket; + pClass->hdmiWriteGenericPacket = hdmiWriteGenericPacket9171; + pClass->hdmiWriteGeneralCtrlPacket = hdmiWriteGeneralCtrlPacket9171; + pClass->hdmiWriteVendorPacket = hdmiWriteVendorPacket9171; + + // Update SF_USER data + pClass->dispSfUserClassId = NV9171_DISP_SF_USER; + pClass->dispSfUserSize = sizeof(Nv9171DispSfUserMap); + + // Functions below are used by HDMI FRL and will be available for Ampere+. + pClass->hdmiAssessLinkCapabilities = hdmiAssessLinkCapabilitiesDummy; + pClass->hdmiQueryFRLConfig = hdmiQueryFRLConfigDummy; + pClass->hdmiSetFRLConfig = hdmiSetFRLConfigDummy; + pClass->hdmiClearFRLConfig = hdmiClearFRLConfigDummy; + + // T239+ + pClass->hdmiPacketRead = hdmiPacketReadDummy; + pClass->programAdvancedInfoframe = programAdvancedInfoframeDummy; +} diff --git a/src/common/modeset/hdmipacket/nvhdmipkt_9271.c b/src/common/modeset/hdmipacket/nvhdmipkt_9271.c new file mode 100644 index 0000000..eaf65e5 --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt_9271.c @@ -0,0 +1,71 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_9271.c + * + * Purpose: Provides packet write functions for HDMI library for KEPLER + chips + */ + +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" + +#include "nvhdmipkt_internal.h" + +#include "class/cl9271.h" + +/******************************************** NOTE *********************************************** +* This file serves as an example on how to add a new HW SF USER CLASS. Notice that this * +* Class didn't override any functions, as 9171 is identical to 9271. * +*************************************************************************************************/ + +// non-HW - class utility/maintenance functions +/* + * hdmiConstructor9271 + */ +NvBool +hdmiConstructor9271(NVHDMIPKT_CLASS* pThis) +{ + NvBool result = NV_TRUE; + + return result; +} + +/* + * hdmiDestructor9271 + */ +void +hdmiDestructor9271(NVHDMIPKT_CLASS* pThis) + +{ + return; +} + +/* + * initializeHdmiPktInterface9271 + */ +void +initializeHdmiPktInterface9271(NVHDMIPKT_CLASS* pClass) +{ + // Update SF_USER data + pClass->dispSfUserClassId = NV9271_DISP_SF_USER; + pClass->dispSfUserSize = sizeof(Nv9271DispSfUserMap); +} diff --git a/src/common/modeset/hdmipacket/nvhdmipkt_9471.c b/src/common/modeset/hdmipacket/nvhdmipkt_9471.c new file mode 100644 index 0000000..d863c9f --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt_9471.c @@ -0,0 +1,71 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_9471.c + * + * Purpose: Provides packet write functions for HDMI library for Maxwell + chips + */ + +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" + +#include "nvhdmipkt_internal.h" + +#include "class/cl9471.h" + +/******************************************** NOTE *********************************************** +* This file serves as an example on how to add a new HW SF USER CLASS. Notice that this * +* Class didn't override any functions, as 9171 is identical to 9471. * +*************************************************************************************************/ + +// non-HW - class utility/maintenance functions +/* + * hdmiConstructor9471 + */ +NvBool +hdmiConstructor9471(NVHDMIPKT_CLASS* pThis) +{ + NvBool result = NV_TRUE; + + return result; +} + +/* + * hdmiDestructor9471 + */ +void +hdmiDestructor9471(NVHDMIPKT_CLASS* pThis) + +{ + return; +} + +/* + * initializeHdmiPktInterface9471 + */ +void +initializeHdmiPktInterface9471(NVHDMIPKT_CLASS* pClass) +{ + // Update SF_USER data + pClass->dispSfUserClassId = NV9471_DISP_SF_USER; + pClass->dispSfUserSize = sizeof(Nv9471DispSfUserMap); +} diff --git a/src/common/modeset/hdmipacket/nvhdmipkt_9571.c b/src/common/modeset/hdmipacket/nvhdmipkt_9571.c new file mode 100644 index 0000000..85e6b13 --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt_9571.c @@ -0,0 +1,71 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_9571.c + * + * Purpose: Provides packet write functions for HDMI library for Maxwell + chips + */ + +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" + +#include "nvhdmipkt_internal.h" + +#include "class/cl9571.h" + +/******************************************** NOTE *********************************************** +* This file serves as an example on how to add a new HW SF USER CLASS. Notice that this * +* Class didn't override any functions, as 9171 is identical to 9571. * +*************************************************************************************************/ + +// non-HW - class utility/maintenance functions +/* + * hdmiConstructor9571 + */ +NvBool +hdmiConstructor9571(NVHDMIPKT_CLASS* pThis) +{ + NvBool result = NV_TRUE; + + return result; +} + +/* + * hdmiDestructor9571 + */ +void +hdmiDestructor9571(NVHDMIPKT_CLASS* pThis) + +{ + return; +} + +/* + * initializeHdmiPktInterface9571 + */ +void +initializeHdmiPktInterface9571(NVHDMIPKT_CLASS* pClass) +{ + // Update SF_USER data + pClass->dispSfUserClassId = NV9571_DISP_SF_USER; + pClass->dispSfUserSize = sizeof(Nv9571DispSfUserMap); +} diff --git a/src/common/modeset/hdmipacket/nvhdmipkt_C371.c b/src/common/modeset/hdmipacket/nvhdmipkt_C371.c new file mode 100644 index 0000000..fd89eac --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt_C371.c @@ -0,0 +1,71 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_C371.c + * + * Purpose: Provides packet write functions for HDMI library for Volta+ chips + */ + +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" + +#include "nvhdmipkt_internal.h" + +#include "class/clc371.h" + +/******************************************** NOTE *********************************************** +* This file serves as an example on how to add a new HW SF USER CLASS. Notice that this * +* Class didn't override any functions, as 9171 is identical to C371. * +*************************************************************************************************/ + +// non-HW - class utility/maintenance functions +/* + * hdmiConstructorC371 + */ +NvBool +hdmiConstructorC371(NVHDMIPKT_CLASS* pThis) +{ + NvBool result = NV_TRUE; + + return result; +} + +/* + * hdmiDestructorC371 + */ +void +hdmiDestructorC371(NVHDMIPKT_CLASS* pThis) + +{ + return; +} + +/* + * initializeHdmiPktInterfaceC371 + */ +void +initializeHdmiPktInterfaceC371(NVHDMIPKT_CLASS* pClass) +{ + // Update SF_USER data + pClass->dispSfUserClassId = NVC371_DISP_SF_USER; + pClass->dispSfUserSize = sizeof(NvC371DispSfUserMap); +} diff --git a/src/common/modeset/hdmipacket/nvhdmipkt_C671.c b/src/common/modeset/hdmipacket/nvhdmipkt_C671.c new file mode 100644 index 0000000..a2d4bd6 --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt_C671.c @@ -0,0 +1,1415 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_C671.c + * + * Purpose: Provides packet write functions for HDMI library for Ampere+ chips + */ + +#include +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" + +#include "nvhdmipkt_internal.h" +#include "nvHdmiFrlCommon.h" + +#include "../timing/nvt_dsc_pps.h" +#include "ctrl/ctrl0073/ctrl0073system.h" + +#include "class/clc671.h" +#include "ctrl/ctrl0073/ctrl0073dp.h" +#include "ctrl/ctrl0073/ctrl0073specific.h" + +#define MULTIPLIER_1G 1000000000 +#define PCLK_VARIANCE_10MHZ 1000 + +// In HDMI case, for PPS set, HDMI2.1 spec expects source to set this field to 13, decoder capability is assumed +// Note, in DP case, DSC decoder is allowed to report line buffer depth capability through DPCD registers +#define HDMI_DSC_DECODER_LINE_BUFFER_BIT_DEPTH_CAP 13 +#define NVHDMIPKT_C671_INVALID_PKT_TYPE ((NVC671_SF_HDMI_INFO_IDX_VSI) + 1) + +extern NVHDMIPKT_RESULT hdmiPacketWrite0073(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket); + +extern NVHDMIPKT_RESULT hdmiPacketCtrl0073(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl); + +extern NVHDMIPKT_RESULT hdmiPacketWrite9171(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket); + +static NVHDMIPKT_RESULT hdmiClearFRLConfigC671(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId); + + +// translate FRL rate to RM control param +static NvU32 translateFRLRateToNv0073SetHdmiFrlConfig(HDMI_FRL_DATA_RATE frlRate) +{ + switch(frlRate) + { + case HDMI_FRL_DATA_RATE_NONE : return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_NONE; + case HDMI_FRL_DATA_RATE_3LANES_3GBPS : return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_3LANES_3G; + case HDMI_FRL_DATA_RATE_3LANES_6GBPS : return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_3LANES_6G; + case HDMI_FRL_DATA_RATE_4LANES_6GBPS : return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_6G; + case HDMI_FRL_DATA_RATE_4LANES_8GBPS : return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_8G; + case HDMI_FRL_DATA_RATE_4LANES_10GBPS : return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_10G; + case HDMI_FRL_DATA_RATE_4LANES_12GBPS : return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_12G; + default: + break; + } + return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_NONE; +} + +/* + * Both DSC_Max_FRL_Rate and Max_FRL_Rate have same translation + * + */ +static HDMI_FRL_DATA_RATE translateFRLCapToFRLDataRate(NvU32 sinkFRLcap) +{ + switch(sinkFRLcap) + { + case 6: return HDMI_FRL_DATA_RATE_4LANES_12GBPS; + case 5: return HDMI_FRL_DATA_RATE_4LANES_10GBPS; + case 4: return HDMI_FRL_DATA_RATE_4LANES_8GBPS; + case 3: return HDMI_FRL_DATA_RATE_4LANES_6GBPS; + case 2: return HDMI_FRL_DATA_RATE_3LANES_6GBPS; + case 1: return HDMI_FRL_DATA_RATE_3LANES_3GBPS; + case 0: // fall through + default: break; + } + + if (sinkFRLcap > 6 && sinkFRLcap <= 15) + { + return HDMI_FRL_DATA_RATE_4LANES_12GBPS; + } + + return HDMI_FRL_DATA_RATE_NONE; +} + +// If we want to force 2ch48KHz fill it in as default, if not, +// Lookup sink short audio descriptor blocks to see max supported audio +static void populateAudioCaps(NVT_EDID_CEA861_INFO const * const p861ExtBlock, + HDMI_SINK_CAPS * pSinkCaps) +{ + NvU32 i; + + for (i = 0; i < p861ExtBlock->total_sad; i++) + { + NvU32 data = p861ExtBlock->audio[i].byte1; + data = (data & NVT_CEA861_AUDIO_FORMAT_MASK) >> NVT_CEA861_AUDIO_FORMAT_SHIFT; + + // unsupported + if ((data == NVT_CEA861_AUDIO_FORMAT_RSVD) || + (data == NVT_CEA861_AUDIO_FORMAT_RSVD15)) + { + continue; + } + + // check for HBR audio support. We don't support any other packet types + if ((data == NVT_CEA861_AUDIO_FORMAT_DTS_HD) || + (data == NVT_CEA861_AUDIO_FORMAT_MAT)) + { + pSinkCaps->bHBRAudio = NV_TRUE; + } + + // num of channels for this audio format + data = p861ExtBlock->audio[i].byte1; + NvU32 numChannels = ((data & NVT_CEA861_AUDIO_MAX_CHANNEL_MASK) >> NVT_CEA861_AUDIO_MAX_CHANNEL_SHIFT) + 1; + if (pSinkCaps->maxAudioChannels < numChannels) + { + pSinkCaps->maxAudioChannels = numChannels; + } + + // get max sampling frequency + data = p861ExtBlock->audio[i].byte2; + NvU32 sampleFreq = (data & NVT_CEA861_AUDIO_SAMPLE_RATE_192KHZ) ? 192 : + (data & NVT_CEA861_AUDIO_SAMPLE_RATE_176KHZ) ? 176 : + (data & NVT_CEA861_AUDIO_SAMPLE_RATE_96KHZ) ? 96 : + (data & NVT_CEA861_AUDIO_SAMPLE_RATE_88KHZ) ? 88 : + (data & NVT_CEA861_AUDIO_SAMPLE_RATE_48KHZ) ? 48 : + (data & NVT_CEA861_AUDIO_SAMPLE_RATE_44KHZ) ? 44 : + (data & NVT_CEA861_AUDIO_SAMPLE_RATE_32KHZ) ? 32 : 0; + if (pSinkCaps->maxAudioFreqKHz < sampleFreq) + { + pSinkCaps->maxAudioFreqKHz = sampleFreq; + } + } +} + +/* + * hdmiAssessLinkCapabilities + * + * 1. Try physical link training to determine max link capacity + * 2. Calculate max audio capabilities + * 3. Limit connector max to what the source can support + * AssesssLinkCapabilities is expected to be called at hotplug time. Ideally, srcCaps need to be calculated one time, + * but for now, no incentive to do so. In future move it out to better place as need arises + */ +static NVHDMIPKT_RESULT +hdmiAssessLinkCapabilitiesC671(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NVT_EDID_INFO const * const pSinkEdid, + HDMI_SRC_CAPS *pSrcCaps, + HDMI_SINK_CAPS *pSinkCaps) +{ + + // Read DSC caps from RM - gpu caps for DSC are same across DP and HDMI FRL (HDMI 2.1+) + // Hence use same RM control as DP case for reading this cap + NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS params; + params.subDeviceInstance = 0; + params.sorIndex = 0; // Passing SOR index as 0 since all SORs have the same capability. + +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_DP_GET_CAPS, + ¶ms, + sizeof(params)) != NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + params.subDeviceInstance, + NV0073_CTRL_CMD_DP_GET_CAPS, + ¶ms, + sizeof(params)); + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + return NVHDMIPKT_FAIL; + } + + pSrcCaps->dscCaps.dscCapable = params.DSC.bDscSupported; + pSrcCaps->dscCaps.encoderColorFormatMask = params.DSC.encoderColorFormatMask; + pSrcCaps->dscCaps.dualHeadBppTargetMaxX16 = 256; // Tu10x/GA10x HW DSC module allow max 16bpp in 2H1OR mode. + + NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS hdmiGpuCapsParams; + NVMISC_MEMSET(&hdmiGpuCapsParams, 0, sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS)); +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS, + &hdmiGpuCapsParams, + sizeof(hdmiGpuCapsParams)) != NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + hdmiGpuCapsParams.subDeviceInstance, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS, + &hdmiGpuCapsParams, + sizeof(hdmiGpuCapsParams)); + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + NVMISC_MEMSET(&hdmiGpuCapsParams, 0, sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS)); + } + + pSrcCaps->linkMaxFRLRate = translateFRLCapToFRLDataRate(hdmiGpuCapsParams.caps); + + switch(params.DSC.bitsPerPixelPrecision) + { + case NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16: + pSrcCaps->dscCaps.bppPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_16; break; + case NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8: + pSrcCaps->dscCaps.bppPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_8; break; + case NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4: + pSrcCaps->dscCaps.bppPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_4; break; + case NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2: + pSrcCaps->dscCaps.bppPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_2; break; + case NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1: + pSrcCaps->dscCaps.bppPrecision = DSC_BITS_PER_PIXEL_PRECISION_1; break; + default: break; + } + + pSrcCaps->dscCaps.lineBufferSizeKB = params.DSC.lineBufferSizeKB; + pSrcCaps->dscCaps.rateBufferSizeKB = params.DSC.rateBufferSizeKB; + pSrcCaps->dscCaps.maxNumHztSlices = params.DSC.maxNumHztSlices; + pSrcCaps->dscCaps.lineBufferBitDepth = params.DSC.lineBufferBitDepth; + pSrcCaps->dscCaps.maxWidthPerSlice = 5120; // Max DSC buffer width per head is 5120, this can be chunks of 1/2/4 slices, so keep 5120 as the very max. + + pSinkCaps->pHdmiForumInfo = &pSinkEdid->hdmiForumInfo; + populateAudioCaps(&pSinkEdid->ext861, pSinkCaps); + populateAudioCaps(&pSinkEdid->ext861_2, pSinkCaps); + + NvU32 setFRLRate = pSinkEdid->hdmiForumInfo.max_FRL_Rate; + + pSinkCaps->linkMaxFRLRate = translateFRLCapToFRLDataRate(setFRLRate); + pSinkCaps->linkMaxFRLRateDSC = (pSrcCaps->dscCaps.dscCapable && + (pSinkEdid->hdmiForumInfo.dsc_Max_FRL_Rate > setFRLRate)) ? + pSinkCaps->linkMaxFRLRate : + translateFRLCapToFRLDataRate(pSinkEdid->hdmiForumInfo.dsc_Max_FRL_Rate); + + return NVHDMIPKT_SUCCESS; +} + +// Fill in basic params from Timing info etc +static void populateBaseFRLParams(HDMI_VIDEO_TRANSPORT_INFO const *pVidTransInfo, + HDMI_SINK_CAPS const *pSinkCaps, + NvBool bForce2Ch48KHz, + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS *pFRLParams) +{ + pFRLParams->pclk10KHz = pVidTransInfo->pTiming->pclk; + pFRLParams->hTotal = pVidTransInfo->pTiming->HTotal; + pFRLParams->hActive = pVidTransInfo->pTiming->HVisible; + pFRLParams->bpc = pVidTransInfo->bpc; + pFRLParams->pixelPacking = pVidTransInfo->packing; + + pFRLParams->numAudioChannels = bForce2Ch48KHz ? 2 : pSinkCaps->maxAudioChannels; + pFRLParams->audioFreqKHz = bForce2Ch48KHz ? 48 : pSinkCaps->maxAudioFreqKHz; + pFRLParams->audioType = pSinkCaps->bHBRAudio ? AUDIO_PKTTYPE_HBR_AUDIO : + AUDIO_PKTTYPE_LPCM_SAMPLE; + + pFRLParams->compressionInfo.dscTotalChunkKBytes = 1024 * (pSinkCaps->pHdmiForumInfo->dsc_totalChunkKBytes); +} + + +// Get next higher link rate +static HDMI_FRL_DATA_RATE getNextHigherLinkRate(HDMI_FRL_DATA_RATE frlRate) +{ + return (frlRate == HDMI_FRL_DATA_RATE_4LANES_12GBPS) ? HDMI_FRL_DATA_RATE_NONE : (frlRate + 1); +} + +// Fill in GPU and Monitor caps for DSC PPS calculations +static void populateDscCaps(HDMI_SRC_CAPS const * const pSrcCaps, + HDMI_SINK_CAPS const * const pSinkCaps, + DSC_INFO * pDscInfo) +{ + // populate src caps + pDscInfo->gpuCaps.encoderColorFormatMask = pSrcCaps->dscCaps.encoderColorFormatMask; + pDscInfo->gpuCaps.lineBufferSize = pSrcCaps->dscCaps.lineBufferSizeKB; + pDscInfo->gpuCaps.bitsPerPixelPrecision = pSrcCaps->dscCaps.bppPrecision; + pDscInfo->gpuCaps.maxNumHztSlices = pSrcCaps->dscCaps.maxNumHztSlices; + pDscInfo->gpuCaps.lineBufferBitDepth = pSrcCaps->dscCaps.lineBufferBitDepth; + + // populate sink caps + pDscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_RGB; + pDscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_444; + pDscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_SIMPLE_422; + pDscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422; + + pDscInfo->sinkCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_16; + if (pSinkCaps->pHdmiForumInfo->dsc_Native_420) + { + pDscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420; + } + + // limited by spec + pDscInfo->sinkCaps.maxSliceWidth = 2720; + + NvU32 sliceCountMask = 0; + NvU32 maxNumHztSlices = pSinkCaps->pHdmiForumInfo->dsc_MaxSlices; + NvU32 peakThroughput = (pSinkCaps->pHdmiForumInfo->dsc_MaxPclkPerSliceMHz == 400) ? + DSC_DECODER_PEAK_THROUGHPUT_MODE0_400 : + DSC_DECODER_PEAK_THROUGHPUT_MODE0_340; + + switch(pSinkCaps->pHdmiForumInfo->dsc_MaxSlices) + { + case 16: sliceCountMask |= DSC_DECODER_SLICES_PER_SINK_16; // fall-through + case 12: sliceCountMask |= DSC_DECODER_SLICES_PER_SINK_12; // fall-through + case 8: sliceCountMask |= DSC_DECODER_SLICES_PER_SINK_8; // fall-through + case 4: sliceCountMask |= DSC_DECODER_SLICES_PER_SINK_4; // fall-through + case 2: sliceCountMask |= DSC_DECODER_SLICES_PER_SINK_2; // fall-through + case 1: sliceCountMask |= DSC_DECODER_SLICES_PER_SINK_1; break; + default: break; + } + + pDscInfo->sinkCaps.sliceCountSupportedMask = sliceCountMask; + pDscInfo->sinkCaps.maxNumHztSlices = maxNumHztSlices; + pDscInfo->sinkCaps.lineBufferBitDepth = HDMI_DSC_DECODER_LINE_BUFFER_BIT_DEPTH_CAP; + + // Color depth supported by DSC decoder of panel + pDscInfo->sinkCaps.decoderColorDepthMask |= pSinkCaps->pHdmiForumInfo->dsc_16bpc ? DSC_DECODER_COLOR_DEPTH_CAPS_16_BITS : 0; + pDscInfo->sinkCaps.decoderColorDepthMask |= pSinkCaps->pHdmiForumInfo->dsc_12bpc ? DSC_DECODER_COLOR_DEPTH_CAPS_12_BITS : 0; + pDscInfo->sinkCaps.decoderColorDepthMask |= pSinkCaps->pHdmiForumInfo->dsc_10bpc ? DSC_DECODER_COLOR_DEPTH_CAPS_10_BITS : 0; + pDscInfo->sinkCaps.decoderColorDepthMask |= DSC_DECODER_COLOR_DEPTH_CAPS_8_BITS; + + pDscInfo->sinkCaps.bBlockPrediction = 1; + pDscInfo->sinkCaps.algorithmRevision.versionMajor = 1; + pDscInfo->sinkCaps.algorithmRevision.versionMinor = 2; + pDscInfo->sinkCaps.peakThroughputMode0 = peakThroughput; + + // Per DSC v1.2 spec, native 422/420 per-slice peak throughput is approximately twice of RGB/444 peak throughput + // HDMI has only one throughput cap reporting, no separate 422/420 throughput cap unlike for DP, so just double 444's value here. + pDscInfo->sinkCaps.peakThroughputMode1 = (peakThroughput == DSC_DECODER_PEAK_THROUGHPUT_MODE0_340) ? + DSC_DECODER_PEAK_THROUGHPUT_MODE1_680 : DSC_DECODER_PEAK_THROUGHPUT_MODE1_800; +} + +// Fill in mode related info for DSC lib +static void populateDscModesetInfo(HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + MODESET_INFO * pDscModesetInfo) +{ + pDscModesetInfo->pixelClockHz = pVidTransInfo->pTiming->pclk * 10000; // Requested pixel clock for the mode + pDscModesetInfo->activeWidth = pVidTransInfo->pTiming->HVisible; // Active Width + pDscModesetInfo->activeHeight = pVidTransInfo->pTiming->VVisible; // Active Height + pDscModesetInfo->bitsPerComponent = (NvU32)pVidTransInfo->bpc; // BPC value to be used + pDscModesetInfo->colorFormat = (pVidTransInfo->packing == HDMI_PIXEL_PACKING_RGB) ? NVT_COLOR_FORMAT_RGB : + (pVidTransInfo->packing == HDMI_PIXEL_PACKING_YCbCr444) ? NVT_COLOR_FORMAT_YCbCr444 : + (pVidTransInfo->packing == HDMI_PIXEL_PACKING_YCbCr422) ? NVT_COLOR_FORMAT_YCbCr422 : + (pVidTransInfo->packing == HDMI_PIXEL_PACKING_YCbCr420) ? NVT_COLOR_FORMAT_YCbCr420 : 0; + pDscModesetInfo->bDualMode = pVidTransInfo->bDualHeadMode; + pDscModesetInfo->bDropMode = NV_FALSE; +} + +// Checks against source and sink caps whether DSC is possible +// Tries to determine slice width and slice count accounting for 2Head1Or, populates this info into FRL calculation structure +// if this calculation fails DSC cannot be enabled +static NvBool evaluateIsDSCPossible(NVHDMIPKT_CLASS *pThis, + HDMI_SRC_CAPS const *pSrcCaps, + HDMI_SINK_CAPS const *pSinkCaps, + HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS *pFRLParams) +{ + const NvU32 numHeadsDrivingSink = pVidTransInfo->bDualHeadMode ? 2 : 1; + + if (!pSrcCaps->dscCaps.dscCapable) + { + return NV_FALSE; + } + + if (!pSinkCaps->pHdmiForumInfo->dsc_1p2 || + !pSinkCaps->linkMaxFRLRateDSC || + (!pSinkCaps->pHdmiForumInfo->dsc_16bpc && (pFRLParams->bpc == HDMI_BPC16)) || + (!pSinkCaps->pHdmiForumInfo->dsc_12bpc && (pFRLParams->bpc == HDMI_BPC12)) || + (!pSinkCaps->pHdmiForumInfo->dsc_10bpc && (pFRLParams->bpc == HDMI_BPC10))) + { + return NV_FALSE; + } + + // Disallow DSC if the source or sink don't support DSC with this mode's colorformat/packing. + switch (pVidTransInfo->packing) + { + case HDMI_PIXEL_PACKING_RGB: + if (!(pSrcCaps->dscCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_RGB)) + { + return NV_FALSE; + } + break; + case HDMI_PIXEL_PACKING_YCbCr444: + if (!(pSrcCaps->dscCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444)) + { + return NV_FALSE; + } + break; + case HDMI_PIXEL_PACKING_YCbCr422: + if (!(pSrcCaps->dscCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422)) + { + return NV_FALSE; + } + break; + case HDMI_PIXEL_PACKING_YCbCr420: + if (!(pSrcCaps->dscCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420) || + !pSinkCaps->pHdmiForumInfo->dsc_Native_420) + { + return NV_FALSE; + } + break; + } + + NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS *pGetHdmiFrlCapacityComputationParams = NULL; + pGetHdmiFrlCapacityComputationParams = pThis->callback.malloc(pThis->cbHandle, sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (pGetHdmiFrlCapacityComputationParams) + { + NvBool bIsDSCPossible = NV_FALSE; + NVMISC_MEMSET(pGetHdmiFrlCapacityComputationParams, 0, sizeof(*pGetHdmiFrlCapacityComputationParams)); + pGetHdmiFrlCapacityComputationParams->input = *pFRLParams; + pGetHdmiFrlCapacityComputationParams->dsc.maxSliceCount = NV_MIN(pSrcCaps->dscCaps.maxNumHztSlices * numHeadsDrivingSink, pSinkCaps->pHdmiForumInfo->dsc_MaxSlices); + pGetHdmiFrlCapacityComputationParams->dsc.maxSliceWidth = pSrcCaps->dscCaps.maxWidthPerSlice; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_IS_FRL_DSC_POSSIBLE; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + bIsDSCPossible = pGetHdmiFrlCapacityComputationParams->dsc.bIsDSCPossible; + *pFRLParams = pGetHdmiFrlCapacityComputationParams->input; + } + + pThis->callback.free(pThis->cbHandle, pGetHdmiFrlCapacityComputationParams); + return bIsDSCPossible; + } + return NV_FALSE; + + return NV_TRUE; +} + +static void translateBitRate(HDMI_FRL_DATA_RATE frlRate, NvU32 *pFrlBitRateGbps, NvU32 *pNumLanes) +{ + switch(frlRate) + { + case HDMI_FRL_DATA_RATE_4LANES_12GBPS : { *pFrlBitRateGbps = 12; *pNumLanes = 4; break; } + case HDMI_FRL_DATA_RATE_4LANES_10GBPS : { *pFrlBitRateGbps = 10; *pNumLanes = 4; break; } + case HDMI_FRL_DATA_RATE_4LANES_8GBPS : { *pFrlBitRateGbps = 8; *pNumLanes = 4; break; } + case HDMI_FRL_DATA_RATE_4LANES_6GBPS : { *pFrlBitRateGbps = 6; *pNumLanes = 4; break; } + case HDMI_FRL_DATA_RATE_3LANES_6GBPS : { *pFrlBitRateGbps = 6; *pNumLanes = 3; break; } + case HDMI_FRL_DATA_RATE_3LANES_3GBPS : // fall through + default : { *pFrlBitRateGbps = 3; *pNumLanes = 3; break; } + } +} + +// Determine if video transport is possible at any FRL rate in the specified range +// Iterate from min rate to max rate +static NVHDMIPKT_RESULT +determineUncompressedFRLConfig(NVHDMIPKT_CLASS *pThis, + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS *pFRLParams, + HDMI_FRL_DATA_RATE minFRLRate, + HDMI_FRL_DATA_RATE maxFRLRate, + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT *pResults) +{ + NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS *pGetHdmiFrlCapacityComputationParams = NULL; + HDMI_FRL_DATA_RATE frlRate = minFRLRate; + NVHDMIPKT_RESULT status = NVHDMIPKT_INSUFFICIENT_BANDWIDTH; + + pGetHdmiFrlCapacityComputationParams = pThis->callback.malloc(pThis->cbHandle, sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + + while (frlRate != HDMI_FRL_DATA_RATE_NONE) + { + translateBitRate(frlRate, &pFRLParams->frlBitRateGbps, &pFRLParams->numLanes); + + if (pGetHdmiFrlCapacityComputationParams) + { + NVMISC_MEMSET(pGetHdmiFrlCapacityComputationParams, 0, sizeof(*pGetHdmiFrlCapacityComputationParams)); + pGetHdmiFrlCapacityComputationParams->input = *pFRLParams; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_UNCOMPRESSED_VIDEO; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + *pResults = pGetHdmiFrlCapacityComputationParams->result; + } + } + else + { + status = NVHDMIPKT_FAIL; + goto uncompressedQuery_exit; + } + + status = (pResults->isVideoTransportSupported && pResults->isAudioSupported) ? NVHDMIPKT_SUCCESS : status; + + if ((status == NVHDMIPKT_SUCCESS) || + (frlRate == maxFRLRate)) + { + break; + } + + // try again at next link rate + frlRate = getNextHigherLinkRate(frlRate); + } + + pResults->frlRate = frlRate; + +uncompressedQuery_exit: + if (pGetHdmiFrlCapacityComputationParams) + { + pThis->callback.free(pThis->cbHandle, pGetHdmiFrlCapacityComputationParams); + } + return status; +} + +// Determines the absolute min n max Bpp settings we can use with DSC. This is irrespective of FRL rate +static void calcBppMinMax(HDMI_SRC_CAPS const *pSrcCaps, + HDMI_SINK_CAPS const *pSinkCaps, + HDMI_VIDEO_TRANSPORT_INFO const *pVidTransInfo, + NvU32 *pBppMinX16, + NvU32 *pBppMaxX16) +{ + + NvU32 bppMinX16 = 0; + NvU32 bppMaxX16 = 0; + + switch(pVidTransInfo->packing) + { + case HDMI_PIXEL_PACKING_YCbCr420: { bppMinX16 = 6 * 16; bppMaxX16 = (3 * pVidTransInfo->bpc * 8 - 1); break; } + case HDMI_PIXEL_PACKING_YCbCr422: { bppMinX16 = 7 * 16; bppMaxX16 = (2 * pVidTransInfo->bpc * 16 - 1); break; } + case HDMI_PIXEL_PACKING_RGB: + case HDMI_PIXEL_PACKING_YCbCr444: + default: { bppMinX16 = 8 * 16; bppMaxX16 = (3 * pVidTransInfo->bpc * 16 - 1); break; } + } + + // cap to 12 if DSC_All_Bpp is not set + if (!pSinkCaps->pHdmiForumInfo->dsc_All_bpp) + { + bppMaxX16 = (bppMaxX16 > 12*16) ? 12*16 : bppMaxX16; + } + + // DSC_GeneratePPS : Multi-tile configs (NVD 5.0 and later), + // because of architectural limitation we can't use bits_per_pixel more than 16. + if (pSrcCaps->dscCaps.maxNumHztSlices > 4U) + { + bppMaxX16 = (bppMaxX16 > 16*16) ? 16*16 : bppMaxX16; + } + + if (pVidTransInfo->bDualHeadMode && (bppMaxX16 > pSrcCaps->dscCaps.dualHeadBppTargetMaxX16)) + { + bppMaxX16 = pSrcCaps->dscCaps.dualHeadBppTargetMaxX16; + } + + *pBppMinX16 = bppMinX16; + *pBppMaxX16 = bppMaxX16; +} + + +// Determine minimum FRL rate at which Video Transport is possible at given min bpp +// Once FRL rate is found, determine the max bpp possible at this FRL rate +// To determine Primary Compressed Format using this function caller must pass in the full range of min, max FRL and min, max Bpp +// For any optimizations on top of the Primary Compressed Format, caller must adjust the range of these + +static NVHDMIPKT_RESULT +determineCompressedFRLConfig(NVHDMIPKT_CLASS *pThis, + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS *pFRLParams, + HDMI_FRL_DATA_RATE minFRLRate, + HDMI_FRL_DATA_RATE maxFRLRate, + NvU32 bppMinX16, + NvU32 bppMaxX16, + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT *pResults) +{ + NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS *pGetHdmiFrlCapacityComputationParams = NULL; + HDMI_FRL_DATA_RATE frlRate = minFRLRate; + NvU32 bppTargetX16 = bppMinX16; + NVHDMIPKT_RESULT status = NVHDMIPKT_INSUFFICIENT_BANDWIDTH; + + pGetHdmiFrlCapacityComputationParams = pThis->callback.malloc(pThis->cbHandle, sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + + // Set bppTarget to min and iterate over FRL rates + pFRLParams->compressionInfo.bppTargetx16 = bppMinX16; + while (frlRate != HDMI_FRL_DATA_RATE_NONE) + { + translateBitRate(frlRate, &pFRLParams->frlBitRateGbps, &pFRLParams->numLanes); + if (pGetHdmiFrlCapacityComputationParams) + { + NVMISC_MEMSET(pGetHdmiFrlCapacityComputationParams, 0, sizeof(*pGetHdmiFrlCapacityComputationParams)); + pGetHdmiFrlCapacityComputationParams->input = *pFRLParams; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_COMPRESSED_VIDEO; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + *pResults = pGetHdmiFrlCapacityComputationParams->result; + } + } + else + { + status = NVHDMIPKT_FAIL; + goto compressedQuery_exit; + } + + status = (pResults->isVideoTransportSupported && pResults->isAudioSupported) ? NVHDMIPKT_SUCCESS : status; + + if ((status == NVHDMIPKT_SUCCESS) || + (frlRate == maxFRLRate)) + { + break; + } + + frlRate = getNextHigherLinkRate(frlRate); + } + + if (status != NVHDMIPKT_SUCCESS) + { + goto compressedQuery_exit; + } + + // We now have the base FRL rate. Iterate over bppTarget to find the max supported bpp + status = NVHDMIPKT_INSUFFICIENT_BANDWIDTH; + bppTargetX16 = bppMaxX16; + NvU32 stepSize = 16; + + while (status != NVHDMIPKT_SUCCESS) + { + pFRLParams->compressionInfo.bppTargetx16 = bppTargetX16; + if (pGetHdmiFrlCapacityComputationParams) + { + NVMISC_MEMSET(pGetHdmiFrlCapacityComputationParams, 0, sizeof(*pGetHdmiFrlCapacityComputationParams)); + pGetHdmiFrlCapacityComputationParams->input = *pFRLParams; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_COMPRESSED_VIDEO; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + *pResults = pGetHdmiFrlCapacityComputationParams->result; + } + } + else + { + status = NVHDMIPKT_FAIL; + goto compressedQuery_exit; + } + + status = (pResults->isVideoTransportSupported && pResults->isAudioSupported) ? NVHDMIPKT_SUCCESS : status; + + if (status == NVHDMIPKT_SUCCESS) + { + // If this is the maxBpp nothing else to try + if (bppTargetX16 == bppMaxX16) + { + break; + } + + // If we detected a successful bppTarget value, go up a step size, + // and iterate by decrementing bppTarget by 1/16 to reach a finer tuned bpp value + if (stepSize == 16) + { + status = NVHDMIPKT_RETRY; + bppTargetX16 = bppTargetX16 + stepSize - 1; + stepSize = 1; + } + } + else + { + bppTargetX16 = bppTargetX16 - stepSize; + // bppTargetX16 is guaranteed to be >= bppMinX16 + } + } + + pResults->frlRate = frlRate; + pResults->bppTargetx16 = bppTargetX16; + +compressedQuery_exit: + if (pGetHdmiFrlCapacityComputationParams) + { + pThis->callback.free(pThis->cbHandle, pGetHdmiFrlCapacityComputationParams); + } + + return status; +} + +/* + * hdmiQueryFRLConfigC671 + * + * This function uses below logic: + * Verify if force params from client are in expected range + * If client is not asking for optimum config or force enable DSC, try uncompressed first + * For DSC enabled, honor all choices client has made for slice count/width. Determine the primary compressed format (PCF) first. + * For any other items client wants to control do this as optimization on top of the PCF + * Call DSC library for PPS generation unless specified otherwise. + */ +static NVHDMIPKT_RESULT +hdmiQueryFRLConfigC671(NVHDMIPKT_CLASS *pThis, + HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + HDMI_QUERY_FRL_CLIENT_CONTROL const * const pClientCtrl, + HDMI_SRC_CAPS const * const pSrcCaps, + HDMI_SINK_CAPS const * const pSinkCaps, + HDMI_FRL_CONFIG *pFRLConfig) +{ + NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS *pGetHdmiFrlCapacityComputationParams = NULL; + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + + NVMISC_MEMSET(pFRLConfig, 0, sizeof(HDMI_FRL_CONFIG)); + + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS frlParams; + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT frlComputeResult; + NvU32 bppMinX16, bppMaxX16; + + NVMISC_MEMSET(&frlParams, 0, sizeof(NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS)); + NVMISC_MEMSET(&frlComputeResult, 0, sizeof(NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT)); + + NvU32 vic = NVT_GET_CEA_FORMAT(pVidTransInfo->pTiming->etc.status); + NvBool bTryUncompressedMode, bCanUseDSC; + + populateBaseFRLParams(pVidTransInfo, + pSinkCaps, + pClientCtrl->forceAudio2Ch48KHz ? NV_TRUE : NV_FALSE, + &frlParams); + + calcBppMinMax(pSrcCaps, pSinkCaps, pVidTransInfo, &bppMinX16, &bppMaxX16); + bCanUseDSC = evaluateIsDSCPossible(pThis, pSrcCaps, pSinkCaps, pVidTransInfo, &frlParams); + const NvU32 numHeadsDrivingSink = pVidTransInfo->bDualHeadMode ? 2 : 1; + + // Input validation + // Note, maxNumHztSlices src cap is per head. account for total number of heads driving the sink + if ((pClientCtrl->forceFRLRate && (pClientCtrl->frlRate > pSinkCaps->linkMaxFRLRate)) || + (pClientCtrl->enableDSC && !bCanUseDSC) || + (pClientCtrl->forceSliceCount && (pClientCtrl->sliceCount > + (NvU32)(NV_MIN(pSrcCaps->dscCaps.maxNumHztSlices * numHeadsDrivingSink, + pSinkCaps->pHdmiForumInfo->dsc_MaxSlices)))) || + (pClientCtrl->forceSliceWidth && (pClientCtrl->sliceWidth > NV_MIN(pSrcCaps->dscCaps.maxWidthPerSlice, MAX_RECONSTRUCTED_HACTIVE_PIXELS))) || + (pClientCtrl->forceBppx16 && ((pClientCtrl->bitsPerPixelX16 < bppMinX16) || (pClientCtrl->bitsPerPixelX16 > bppMaxX16))) || + (pClientCtrl->forceBppx16 && !pSinkCaps->pHdmiForumInfo->dsc_All_bpp)) + { + return NVHDMIPKT_INVALID_ARG; + } + + bTryUncompressedMode = (bCanUseDSC && (pClientCtrl->enableDSC || + (pClientCtrl->option == HDMI_QUERY_FRL_LOWEST_BANDWIDTH))) ? + NV_FALSE : NV_TRUE; + + HDMI_FRL_DATA_RATE maxRate = NV_MIN(pSinkCaps->linkMaxFRLRate, pSrcCaps->linkMaxFRLRate); + + pGetHdmiFrlCapacityComputationParams = pThis->callback.malloc(pThis->cbHandle, sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + + if (bTryUncompressedMode) + { + HDMI_FRL_DATA_RATE minFRLRate = HDMI_FRL_DATA_RATE_NONE, maxFRLRate = HDMI_FRL_DATA_RATE_NONE; + NvBool bHasPreCalcFRLData = NV_FALSE; + + if (pGetHdmiFrlCapacityComputationParams) + { + NVMISC_MEMSET(pGetHdmiFrlCapacityComputationParams, 0, sizeof(*pGetHdmiFrlCapacityComputationParams)); + pGetHdmiFrlCapacityComputationParams->preCalc.vic = vic; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_HAS_PRECAL_FRL_DATA; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + bHasPreCalcFRLData = pGetHdmiFrlCapacityComputationParams->preCalc.bHasPreCalcFRLData; + } + else + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + } + else + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + // We iterate over a range of FRL rates to see if timing is supported in uncompressed manner + // adjust the min and max range here according to what we aim for: if client wants to force a single FRL rate, + // min n max point to just this one rate. If client just wants any config, just try max supported rate. + // For everything else, iterate from lowest to highest FRL rate + if (pClientCtrl->forceFRLRate) + { + minFRLRate = pClientCtrl->frlRate; + maxFRLRate = pClientCtrl->frlRate; + } + else if (pClientCtrl->option == HDMI_QUERY_FRL_HIGHEST_BANDWIDTH) + { + minFRLRate = maxRate; + maxFRLRate = maxRate; + } + else if (bHasPreCalcFRLData) + { + HDMI_FRL_DATA_RATE preCalcFrlRate; + pGetHdmiFrlCapacityComputationParams->preCalc.packing = pVidTransInfo->packing; + pGetHdmiFrlCapacityComputationParams->preCalc.bpc = pVidTransInfo->bpc; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_GET_PRECAL_UNCOMPRESSED_FRL_CONFIG; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + preCalcFrlRate = pGetHdmiFrlCapacityComputationParams->preCalc.frlRate; + } + else + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + if (preCalcFrlRate <= maxRate) + { + minFRLRate = preCalcFrlRate; + maxFRLRate = preCalcFrlRate; + } + else if (!bCanUseDSC) + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + } + else if (pClientCtrl->option == HDMI_QUERY_FRL_ANY_CONFIG) + { + minFRLRate = maxRate; + maxFRLRate = maxRate; + } + else // HDMI_QUERY_FRL_OPTIMUM_CONFIG or HDMI_QUERY_FRL_LOWEST_BANDWIDTH + { + minFRLRate = HDMI_FRL_DATA_RATE_3LANES_3GBPS; + maxFRLRate = maxRate; + } + + result = determineUncompressedFRLConfig(pThis, &frlParams, minFRLRate, maxFRLRate, &frlComputeResult); + if (result == NVHDMIPKT_SUCCESS) + { + goto frlQuery_Success; + } + // If we could not find a FRL rate and DSC is not allowed, try using min audio see if it gets us a pass result + else if (!bCanUseDSC) + { + frlParams.numAudioChannels = 2; + frlParams.audioFreqKHz = 48; + frlParams.audioType = AUDIO_PKTTYPE_LPCM_SAMPLE; + result = determineUncompressedFRLConfig(pThis, &frlParams, minFRLRate, maxFRLRate, &frlComputeResult); + // If still not found return failure. Nothing more to try + if (result != NVHDMIPKT_SUCCESS) + { + goto frlQuery_fail; + } + } + } + + if (bCanUseDSC) + { + HDMI_FRL_DATA_RATE minFRLRateItr, maxFRLRateItr; + HDMI_FRL_DATA_RATE dscMaxFRLRate = NV_MIN(pSinkCaps->linkMaxFRLRateDSC, pSrcCaps->linkMaxFRLRate); + NvU32 bppMinX16Itr, bppMaxX16Itr; + NvBool bHasPreCalcFRLData = NV_FALSE; + + // DSC_All_bpp = 1: + // Lower the compression ratio better the pixel quality, hence a high bppTarget value will be ideal + // DSC_All_bpp = 1 allows us the flexibility to use a bppTarget setting different from the primary compressed format + // DSC_All_bpp = 0: + // Per spec, this supports only the bppTarget from primary compressed format - {minimum FRL rate, bpp, HCactive, HCblank} + + minFRLRateItr = HDMI_FRL_DATA_RATE_3LANES_3GBPS; + maxFRLRateItr = dscMaxFRLRate; + bppMinX16Itr = bppMinX16; + bppMaxX16Itr = bppMaxX16; + + if (pGetHdmiFrlCapacityComputationParams) + { + NVMISC_MEMSET(pGetHdmiFrlCapacityComputationParams, 0, sizeof(*pGetHdmiFrlCapacityComputationParams)); + pGetHdmiFrlCapacityComputationParams->preCalc.vic = vic; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_HAS_PRECAL_FRL_DATA; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + bHasPreCalcFRLData = pGetHdmiFrlCapacityComputationParams->preCalc.bHasPreCalcFRLData; + } + else + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + } + else + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + if (bHasPreCalcFRLData) + { + HDMI_FRL_DATA_RATE preCalcFrlRate; + NvU32 preCalcBppx16; + + if (pGetHdmiFrlCapacityComputationParams) + { + pGetHdmiFrlCapacityComputationParams->preCalc.packing = pVidTransInfo->packing; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_GET_PRECAL_COMPRESSED_FRL_CONFIG; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + preCalcFrlRate = pGetHdmiFrlCapacityComputationParams->preCalc.frlRate; + preCalcBppx16 = pGetHdmiFrlCapacityComputationParams->preCalc.bppX16; + } + else + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + } + else + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + + if (preCalcFrlRate != HDMI_FRL_DATA_RATE_UNSPECIFIED) + { + if (preCalcFrlRate > dscMaxFRLRate) + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + maxFRLRateItr = minFRLRateItr = preCalcFrlRate; + bppMaxX16Itr = bppMinX16Itr = preCalcBppx16; + } + } + + // force SliceWidth and count if requested + if (pClientCtrl->forceSliceCount) + { + frlParams.compressionInfo.hSlices = pClientCtrl->sliceCount; + frlParams.compressionInfo.sliceWidth = NV_UNSIGNED_DIV_CEIL(pVidTransInfo->pTiming->HVisible, pClientCtrl->sliceCount); + } + else if (pClientCtrl->forceSliceWidth) + { + frlParams.compressionInfo.sliceWidth = pClientCtrl->sliceWidth; + frlParams.compressionInfo.hSlices = NV_UNSIGNED_DIV_CEIL(pVidTransInfo->pTiming->HVisible, pClientCtrl->sliceWidth); + } + + if (pClientCtrl->forceFRLRate) + { + if (pClientCtrl->frlRate > dscMaxFRLRate) + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + + minFRLRateItr = pClientCtrl->frlRate; + maxFRLRateItr = pClientCtrl->frlRate; + } + + if (pClientCtrl->forceBppx16) + { + bppMinX16Itr = pClientCtrl->bitsPerPixelX16; + bppMaxX16Itr = pClientCtrl->bitsPerPixelX16; + } + + // Determine Primary Compressed Format + // First determine the FRL rate at which video transport is possible even at bppMin + // Then iterate over bppTarget - start at max n decrement until we hit bppMin. The max bpp for which + // video transport is possible together with the FRL rate is the primary compressed format + + result = determineCompressedFRLConfig(pThis, &frlParams, + minFRLRateItr, maxFRLRateItr, + bppMinX16Itr, bppMaxX16Itr, + &frlComputeResult); + + + // there are no FRL rates at which video transport is possible even at min bpp + // Could not even determine PCF. Cannot support this mode + if (result != NVHDMIPKT_SUCCESS) + { + goto frlQuery_fail; + } + + // Any other optimizations we want to do over the Primary Compressed Format? + { + NvBool bRedoDSCCalc = NV_FALSE; + + if (pClientCtrl->option == HDMI_QUERY_FRL_HIGHEST_BANDWIDTH) + { + NvBool bHasPreCalcFRLData = NV_TRUE; + + if (bHasPreCalcFRLData) + { + frlComputeResult.frlRate = dscMaxFRLRate; + } + else + { + // Keep bppTgt calculated as Primary Compressed Format and use FRL rate the highest availableLinkBw + // redo DSC calculations to recalculate TBlanktoTTotal ratio and HCblank/active to suit the new rate + // The hw method setting matters and may cause blank screen if not recalculated - see Bug 3458295 #9 + minFRLRateItr = maxFRLRateItr = dscMaxFRLRate; + bppMinX16Itr = bppMaxX16Itr = frlComputeResult.bppTargetx16; + bRedoDSCCalc = NV_TRUE; + } + } + + if (pSinkCaps->pHdmiForumInfo->dsc_All_bpp) + { + if ((pClientCtrl->option == HDMI_QUERY_FRL_HIGHEST_PIXEL_QUALITY) && + (frlComputeResult.frlRate < (NvU32)dscMaxFRLRate)) + { + // Increase FRL rate if possible and iterate over primary compressed format bppTarget to max Bpp + minFRLRateItr = getNextHigherLinkRate(frlComputeResult.frlRate); + bppMinX16Itr = frlComputeResult.bppTargetx16; + bppMaxX16Itr = bppMaxX16; + bRedoDSCCalc = NV_TRUE; + } + + if (pClientCtrl->option == HDMI_QUERY_FRL_LOWEST_BANDWIDTH) + { + // Keep FRL rate as the primary compressed format rate and force Bpp to Min + minFRLRateItr = maxFRLRateItr = frlComputeResult.frlRate; + bppMinX16Itr = bppMaxX16Itr = bppMinX16; + bRedoDSCCalc = NV_TRUE; + } + } + + if (bRedoDSCCalc) + { + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS optQueryParams; + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT optQueryResult; + NVMISC_MEMCPY(&optQueryParams, &frlParams, sizeof(NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS)); + + // If optimization is successful copy over new results. If not, no need to fail, keep Primary Compressed Format + if(determineCompressedFRLConfig(pThis, &optQueryParams, minFRLRateItr, maxFRLRateItr, + bppMinX16Itr, bppMaxX16Itr, + &optQueryResult) == NVHDMIPKT_SUCCESS) + { + NVMISC_MEMCPY(&frlParams, &optQueryParams, sizeof(NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS)); + NVMISC_MEMCPY(&frlComputeResult, &optQueryResult, sizeof(NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT)); + } + } + } + } + +frlQuery_Success: + pFRLConfig->maxSupportedAudioCh = frlParams.numAudioChannels; + pFRLConfig->maxSupportedAudioFreqKHz = frlParams.audioFreqKHz; + pFRLConfig->dscInfo.sliceCount = frlParams.compressionInfo.hSlices; + pFRLConfig->dscInfo.sliceWidth = frlParams.compressionInfo.sliceWidth; + + pFRLConfig->frlRate = frlComputeResult.frlRate; + pFRLConfig->dscInfo.bEnableDSC = frlComputeResult.engageCompression; + pFRLConfig->dscInfo.bitsPerPixelX16 = frlComputeResult.bppTargetx16; + pFRLConfig->dscInfo.dscHActiveBytes = frlComputeResult.hcActiveBytes; + pFRLConfig->dscInfo.dscHActiveTriBytes = frlComputeResult.hcActiveTriBytes; + pFRLConfig->dscInfo.dscHBlankTriBytes = frlComputeResult.hcBlankTriBytes; + pFRLConfig->dscInfo.dscTBlankToTTotalRatioX1k = frlComputeResult.tBlankToTTotalX1k; + + if (pFRLConfig->dscInfo.bEnableDSC && !pClientCtrl->skipGeneratePPS) + { + DSC_INFO dscInfo; + MODESET_INFO dscModesetInfo; + WAR_DATA warData; + + NVMISC_MEMSET(&dscInfo , 0, sizeof(DSC_INFO)); + NVMISC_MEMSET(&dscModesetInfo, 0, sizeof(MODESET_INFO)); + NVMISC_MEMSET(&warData , 0, sizeof(WAR_DATA)); + + populateDscCaps(pSrcCaps, pSinkCaps, &dscInfo); + populateDscModesetInfo(pVidTransInfo, &dscModesetInfo); + + dscInfo.forcedDscParams.sliceWidth = pFRLConfig->dscInfo.sliceWidth; + dscInfo.forcedDscParams.dscRevision.versionMajor = 1; + dscInfo.forcedDscParams.dscRevision.versionMinor = 2; + + NvU32 bitsPerPixelX16 = pFRLConfig->dscInfo.bitsPerPixelX16; + NvU32 frlBitRateGbps = 0, numLanes = 0; + translateBitRate(pFRLConfig->frlRate, &frlBitRateGbps, &numLanes); + NvU64 availableLinkBw = (NvU64)(frlBitRateGbps) * (NvU64)(numLanes) * MULTIPLIER_1G; + warData.connectorType = DSC_HDMI; + + DSC_GENERATE_PPS_OPAQUE_WORKAREA *pDscScratchBuffer = NULL; + pDscScratchBuffer = (DSC_GENERATE_PPS_OPAQUE_WORKAREA*)pThis->callback.malloc(pThis->cbHandle, + sizeof(DSC_GENERATE_PPS_OPAQUE_WORKAREA)); + if ((DSC_GeneratePPS(&dscInfo, + &dscModesetInfo, + &warData, + availableLinkBw, + pDscScratchBuffer, + pFRLConfig->dscInfo.pps, + &bitsPerPixelX16)) != NVT_STATUS_SUCCESS) + { + NvHdmiPkt_Print(pThis, "ERROR - DSC PPS calculation failed."); + NvHdmiPkt_Assert(0); + result = NVHDMIPKT_DSC_PPS_ERROR; + } + + if (pDscScratchBuffer != NULL) + { + pThis->callback.free(pThis->cbHandle, pDscScratchBuffer); + pDscScratchBuffer = NULL; + } + + // DSC lib should honor the bpp setting passed from client, assert here just in case + NvHdmiPkt_Assert(bitsPerPixelX16 == pFRLConfig->dscInfo.bitsPerPixelX16); + } + +frlQuery_fail: + if (pGetHdmiFrlCapacityComputationParams) + { + pThis->callback.free(pThis->cbHandle, pGetHdmiFrlCapacityComputationParams); + } + + return result; +} + +/* + * hdmiSetFRLConfigC671 + */ +static NVHDMIPKT_RESULT +hdmiSetFRLConfigC671(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NvBool bFakeLt, + HDMI_FRL_CONFIG *pFRLConfig) +{ + NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS params = {0}; + NVMISC_MEMSET(¶ms, 0, sizeof(params)); + params.subDeviceInstance = subDevice; + params.displayId = displayId; + params.data = translateFRLRateToNv0073SetHdmiFrlConfig(pFRLConfig->frlRate); + params.bFakeLt = bFakeLt; + +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_FRL_CONFIG, + ¶ms, + sizeof(params)) != NVOS_STATUS_SUCCESS) + +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + params.subDeviceInstance, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_FRL_CONFIG, + ¶ms, + sizeof(params)); + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + NvHdmiPkt_Print(pThis, "ERROR - RM call to set HDMI FRL failed."); + NvHdmiPkt_Assert(0); + + return NVHDMIPKT_FAIL; + } + + return NVHDMIPKT_SUCCESS; +} + +/* + * hdmiClearFRLConfigC671 + */ +static NVHDMIPKT_RESULT +hdmiClearFRLConfigC671(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + + NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS params = {0}; + NVMISC_MEMSET(¶ms, 0, sizeof(params)); + params.subDeviceInstance = subDevice; + params.displayId = displayId; + params.data = NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_NONE; + +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_FRL_CONFIG, + ¶ms, + sizeof(params)) != NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + params.subDeviceInstance, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_FRL_CONFIG, + ¶ms, + sizeof(params)); + + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + NvHdmiPkt_Print(pThis, "WARNING - RM call to reset HDMI FRL failed."); + result = NVHDMIPKT_FAIL; + } + return result; +} + +static NVHDMIPKT_RESULT +hdmiPacketWriteC671(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + NvU32 pktTypeC671 = pThis->translatePacketType(pThis, packetType); + + if (head >= NVC671_SF_HDMI_INFO_CTRL__SIZE_1 || + packetLen == 0 || + pPacket == 0 || + pktTypeC671 == NVHDMIPKT_C671_INVALID_PKT_TYPE) + { + result = NVHDMIPKT_INVALID_ARG; + goto hdmiPacketWriteC671_exit; + } + + if (pktTypeC671 == NVC671_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME) + { + // In GA10X, we use Generic infoframe for ACR WAR. This RM ctrl is used to control if the WAR is enabled/not. + NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS dispCapsParams; + + NVMISC_MEMSET(&dispCapsParams, 0, sizeof(dispCapsParams)); + +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (NvRmControl(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SYSTEM_GET_CAPS_V2, + &dispCapsParams, + sizeof(dispCapsParams)) != NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + subDevice, + NV0073_CTRL_CMD_SYSTEM_GET_CAPS_V2, + &dispCapsParams, sizeof(dispCapsParams)); + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + NvHdmiPkt_Print(pThis, "ERROR - RM call to get caps failed."); + NvHdmiPkt_Assert(0); + result = NVHDMIPKT_FAIL; + goto hdmiPacketWriteC671_exit; + } + + NvBool bSwAcr = (NV0073_CTRL_SYSTEM_GET_CAP(dispCapsParams.capsTbl, NV0073_CTRL_SYSTEM_CAPS_HDMI21_SW_ACR_BUG_3275257)) ? NV_TRUE: NV_FALSE; + + if (bSwAcr) + { + // acquire mutex + pThis->callback.acquireMutex(pThis->cbHandle); + + result = hdmiPacketWrite0073(pThis, subDevice, displayId, head, packetType, transmitControl, packetLen, pPacket); + + if (result == NVHDMIPKT_SUCCESS) + { + result = hdmiPacketCtrl0073(pThis, subDevice, displayId, head, packetType, transmitControl); + } + + // release mutex + pThis->callback.releaseMutex(pThis->cbHandle); + } + else + { + result = hdmiPacketWrite9171(pThis, subDevice, displayId, head, packetType, transmitControl, packetLen, pPacket); + } + } + else + { + result = hdmiPacketWrite9171(pThis, subDevice, displayId, head, packetType, transmitControl, packetLen, pPacket); + } + +hdmiPacketWriteC671_exit: + return result; +} + +// non-HW - class utility/maintenance functions +/* + * hdmiConstructorC671 + */ +NvBool +hdmiConstructorC671(NVHDMIPKT_CLASS* pThis) +{ + NvBool result = NV_TRUE; + + return result; +} + +/* + * hdmiDestructorC671 + */ +void +hdmiDestructorC671(NVHDMIPKT_CLASS* pThis) + +{ + return; +} + +/* + * initializeHdmiPktInterfaceC671 + */ +void +initializeHdmiPktInterfaceC671(NVHDMIPKT_CLASS* pClass) +{ + pClass->dispSfUserClassId = NVC671_DISP_SF_USER; + pClass->hdmiAssessLinkCapabilities = hdmiAssessLinkCapabilitiesC671; + pClass->hdmiQueryFRLConfig = hdmiQueryFRLConfigC671; + pClass->hdmiSetFRLConfig = hdmiSetFRLConfigC671; + pClass->hdmiClearFRLConfig = hdmiClearFRLConfigC671; + pClass->hdmiPacketWrite = hdmiPacketWriteC671; +} diff --git a/src/common/modeset/hdmipacket/nvhdmipkt_C871.c b/src/common/modeset/hdmipacket/nvhdmipkt_C871.c new file mode 100644 index 0000000..880f96e --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt_C871.c @@ -0,0 +1,641 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_C871.c + * + */ + +#include "nvhdmipkt_common.h" +/* + * Purpose: Provides packet write functions for HDMI library for T23+ chips + */ +#include "nvhdmipkt_class.h" +#include "nvhdmipkt_internal.h" +#include "hdmi_spec.h" +#include "class/clc871.h" +#include "ctrl/ctrl0073/ctrl0073specific.h" + +#define NVHDMIPKT_C871_MAX_PKT_BYTES_AVI 21 // 3 bytes header + 18 bytes payload + +extern void hdmiWriteAviPacket9171(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket); + +/* + * translatePacketTypeC871 + */ +static NvU32 +translatePacketTypeC871(NVHDMIPKT_CLASS* pThis, + NVHDMIPKT_TYPE packetType) +{ + NvU32 typeC871 = NVHDMIPKT_INVALID_PKT_TYPE; // initialize to an invalid type enum + + switch (packetType) + { + case NVHDMIPKT_TYPE_AVI_INFOFRAME: + typeC871 = NVC871_SF_HDMI_INFO_IDX_AVI_INFOFRAME; + break; + case NVHDMIPKT_TYPE_GENERAL_CONTROL: + typeC871 = NVC871_SF_HDMI_INFO_IDX_GCP; + break; + case NVHDMIPKT_TYPE_AUDIO_INFOFRAME: + default: + NvHdmiPkt_Print(pThis, "ERROR - translatePacketType wrong packet type for class C871: %0x.", + packetType); + NvHdmiPkt_Assert(0); + break; + } + + return typeC871; +} + +/* + * hdmiWriteAviPacketC871 + */ +static void +hdmiWriteAviPacketC871(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NvU32 data = 0; + + if (packetLen > NVHDMIPKT_C871_MAX_PKT_BYTES_AVI) + { + NvHdmiPkt_Print(pThis, "ERROR - input AVI packet length incorrect. Write will be capped to max allowable bytes"); + NvHdmiPkt_Assert(0); + } + + data = REG_RD32(pBaseReg, NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW(head)); + data = FLD_SET_DRF_NUM(C871, _SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW, _PB14, pPacket[17], data); + data = FLD_SET_DRF_NUM(C871, _SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW, _PB15, pPacket[18], data); + data = FLD_SET_DRF_NUM(C871, _SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW, _PB16, pPacket[19], data); + data = FLD_SET_DRF_NUM(C871, _SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW, _PB17, pPacket[20], data); + REG_WR32(pBaseReg, NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW(head), data); + + // the lower 17 bytes remain the same as in 9171 class, call 9171 packet write function to program them + hdmiWriteAviPacket9171(pThis, + pBaseReg, + head, + 17, // HB0-2 and PB0-14 + pPacket); + + return; +} + +/* + * hdmiWriteGeneralCtrlPacketC871 + */ +static void +hdmiWriteGeneralCtrlPacketC871(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NvU32 data = 0; + + // orIndexer info is ignored. + data = REG_RD32(pBaseReg, NVC871_SF_HDMI_GCP_SUBPACK(head)); + data = FLD_SET_DRF_NUM(C871, _SF_HDMI_GCP_SUBPACK, _SB0, pPacket[3], data); + data = FLD_SET_DRF_NUM(C871, _SF_HDMI_GCP_SUBPACK, _SB1, pPacket[4], data); + data = FLD_SET_DRF_NUM(C871, _SF_HDMI_GCP_SUBPACK, _SB2, pPacket[5], data); + REG_WR32(pBaseReg, NVC871_SF_HDMI_GCP_SUBPACK(head), data); + + return; +} + +/* + * hdmiPacketWriteC871 + */ +static NVHDMIPKT_RESULT +hdmiPacketWriteC871(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacketIn) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + NvU32* pBaseReg = (NvU32*)pThis->memMap[subDevice].pMemBase; + NvU32 pktTypeC871 = pThis->translatePacketType(pThis, packetType); + NvU32 tc = pThis->translateTransmitControl(pThis, transmitControl); + NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS params = {0}; + + // packetIn can be of varying size. Use a fixed max size buffer for programing hw units to prevent out of bounds access + NvU8 pPacket[NVHDMIPKT_CTAIF_MAX_PKT_BYTES] = {0}; + + if (pBaseReg == 0 || head >= NVC871_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 || + packetLen == 0 || pPacketIn == 0 || pktTypeC871 == NVHDMIPKT_INVALID_PKT_TYPE) + { + result = NVHDMIPKT_INVALID_ARG; + NvHdmiPkt_Print(pThis, "Invalid arg"); + goto hdmiPacketWriteC871_exit; + } + + if (packetLen > NVHDMIPKT_CTAIF_MAX_PKT_BYTES) + { + NvHdmiPkt_Print(pThis, "ERROR - input packet length incorrect %d Packet write will be capped to max allowable bytes", packetLen); + packetLen = NVHDMIPKT_CTAIF_MAX_PKT_BYTES; + NvHdmiPkt_Assert(0); + } + + // input packet looks ok to use, copy over the bytes + NVMISC_MEMCPY(pPacket, pPacketIn, packetLen); + + // acquire mutex + pThis->callback.acquireMutex(pThis->cbHandle); + + // Disable this packet type. + pThis->hdmiWritePacketCtrl(pThis, pBaseReg, head, pktTypeC871, tc, NV_TRUE); + + // write the packet + switch (pktTypeC871) + { + case NVC871_SF_HDMI_INFO_IDX_AVI_INFOFRAME: + pThis->hdmiWriteAviPacket(pThis, pBaseReg, head, packetLen, pPacket); + break; + + case NVC871_SF_HDMI_INFO_IDX_GCP: + // Check whether the GCP packet is AVMute DISABLE or AvMute ENABLE + // Enable HDMI only on GCP unmute i.e. AVMUTE DISABLE + if (pPacket[HDMI_PKT_HDR_SIZE] == HDMI_GENCTRL_PACKET_MUTE_DISABLE) + { + // Enable HDMI. + NVMISC_MEMSET(¶ms, 0, sizeof(params)); + params.subDeviceInstance = (NvU8)subDevice; + params.displayId = displayId; + params.bEnable = NV0073_CTRL_SPECIFIC_CTRL_HDMI_ENABLE; + +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_CTRL_HDMI, + ¶ms, + sizeof(params)) != NVOS_STATUS_SUCCESS) + +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + params.subDeviceInstance, + NV0073_CTRL_CMD_SPECIFIC_CTRL_HDMI, + ¶ms, + sizeof(params)); + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + NvHdmiPkt_Print(pThis, "ERROR - RM call to enable hdmi ctrl failed."); + NvHdmiPkt_Assert(0); + result = NVHDMIPKT_FAIL; + } + } + pThis->hdmiWriteGeneralCtrlPacket(pThis, pBaseReg, head, packetLen, pPacket); + break; + + default: + result = NVHDMIPKT_INVALID_ARG; + break; + } + + // Enable this infoframe. + pThis->hdmiWritePacketCtrl(pThis, pBaseReg, head, pktTypeC871, tc, NV_FALSE); + + // release mutex + pThis->callback.releaseMutex(pThis->cbHandle); +hdmiPacketWriteC871_exit: + return result; +} + + +NvBool +isInfoframeOffsetAvailable(NvU32* pBaseReg, + NvU32 head, + NvU32 requestedInfoframe) +{ + NvU32 regAddr, regData = 0; + NvU32 ifIndex, size; + NvBool bResult = NV_TRUE; + + for (ifIndex = 0; ifIndex < NVC871_SF_GENERIC_INFOFRAME_CTRL__SIZE_2; ifIndex++) + { + regAddr = NVC871_SF_GENERIC_INFOFRAME_CTRL(head, ifIndex); + regData = REG_RD32(pBaseReg, regAddr); + size = DRF_VAL(C871, _SF_GENERIC_INFOFRAME_CTRL, _SIZE, regData); + + // if an infoframe is enabled and it's occupying the offset the requested infoframe would use, + // we cannot allow programming this requested infoframe + if (FLD_TEST_DRF(C871, _SF_GENERIC_INFOFRAME_CTRL, _ENABLE, _YES, regData) && (size > 0)) + { + if ((ifIndex + size) > requestedInfoframe) + { + bResult = NV_FALSE; + break; + } + } + } + + return bResult; +} + +/* + * disableInfoframeC871 + */ + +NVHDMIPKT_RESULT +disableInfoframeC871(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 ifIndex) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_TIMEOUT; + NvU32 regAddr, regData; + + regAddr = NVC871_SF_GENERIC_INFOFRAME_CTRL(head, ifIndex); + regData = REG_RD32(pBaseReg, regAddr); + + // if infoframe is already disabled nothing to do + if (FLD_TEST_DRF(C871, _SF_GENERIC_INFOFRAME_CTRL, _ENABLE, _NO, regData)) + { + return NVHDMIPKT_SUCCESS; + } + + // engage timer callbacks to wait for HW register status change if timer callbacks are provided + NvBool bWaitForIdle = NV_FALSE; + if ((pThis->callback.setTimeout != 0) && (pThis->callback.checkTimeout != 0)) + { + // wait until BUSY _NO if timer could be engaged successfully + bWaitForIdle = (pThis->callback.setTimeout(pThis->cbHandle, NVHDMIPKT_STATUS_READ_TIMEOUT_IN_us) == NV_TRUE); + } + + // write ENABLE_NO + regData = FLD_SET_DRF(C871, _SF_GENERIC_INFOFRAME_CTRL, _ENABLE, _NO, regData); + REG_WR32(pBaseReg, regAddr, regData); + + // if timer callbacks are available poll for disable done + if (bWaitForIdle) + { + regData = REG_RD32(pBaseReg, regAddr); + while(FLD_TEST_DRF(C871, _SF_GENERIC_INFOFRAME_CTRL, _BUSY, _YES, regData)) + { + if (pThis->callback.checkTimeout(pThis->cbHandle) == NV_TRUE) + { + // timeout waiting for infoframe to get disabled + NvHdmiPkt_Print(pThis, "MoreInfoframe: timeout waiting for infoframe to get disabled"); + goto disableInfoframe_exit; + } + regData = REG_RD32(pBaseReg, regAddr); + } + + NvHdmiPkt_Assert(FLD_TEST_DRF(C871, _SF_GENERIC_INFOFRAME_CTRL, _BUSY, _NO, regData)); + } + else + { + NvHdmiPkt_Print(pThis, "MoreInfoframe: Clients must ideally provide timer callbacks to wait for enable/disable infoframes"); + NvHdmiPkt_Assert(0); + } + + result = NVHDMIPKT_SUCCESS; + +disableInfoframe_exit: + return result; +} + +/* + * programAdvancedInfoframeC871 + */ +static NVHDMIPKT_RESULT +programAdvancedInfoframeC871(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 head, + NVHDMIPKT_TYPE packetReg, + const ADVANCED_INFOFRAME *pInfoframe) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + NvU32* pBaseReg = (NvU32*)pThis->memMap[subDevice].pMemBase; + + if ((packetReg < NVHDMIPKT_TYPE_SHARED_GENERIC1) || (packetReg >= NVHDMIPKT_INVALID_PKT_TYPE)) + { + return NVHDMIPKT_INVALID_ARG; + } + + NvU32 ifIndex = packetReg - NVHDMIPKT_TYPE_SHARED_GENERIC1; + NvU32 ifNum, dwordNum; + + NvU32 regAddr = 0; + NvU32 regData = 0; + NvU32 numOfInfoframes = pInfoframe->isLargeInfoframe ? (pInfoframe->numAdditionalInfoframes + 1) : 1; + + if (NV_FALSE == isInfoframeOffsetAvailable(pBaseReg, head, ifIndex)) + { + NvHdmiPkt_Print(pThis, "MoreInfoframe: Client requested overwriting an active infoframe"); + } + + // acquire mutex + pThis->callback.acquireMutex(pThis->cbHandle); + + // disable and wait for infoframe HW unit to be ready + result = disableInfoframeC871(pThis, pBaseReg, head, ifIndex); + if (result != NVHDMIPKT_SUCCESS) + { + return result; + } + + // write DATA_CTRL + regData = 0; + regAddr = NVC871_SF_GENERIC_INFOFRAME_DATA_CTRL(head); + regData = FLD_SET_DRF_NUM(C871, _SF_GENERIC_INFOFRAME_DATA_CTRL, _OFFSET, ifIndex, regData); + REG_WR32(pBaseReg, regAddr, regData); + + // send header + payload + NvHdmiPkt_Assert(pInfoframe->packetLen == (9 * sizeof(NvU32) * numOfInfoframes)); + + for (ifNum = 0; ifNum < numOfInfoframes; ifNum++) + { + const NvU8 *pPayload = pInfoframe->pPacket + (ifNum * 9 * sizeof(NvU32)); + + for (dwordNum = 0; dwordNum < 9; dwordNum++) // each infoframe is 9 DWORDs including the header + { + regData = 0; + regData = FLD_SET_DRF_NUM(C871, _SF_GENERIC_INFOFRAME_DATA, _BYTE0, pPayload[4*dwordNum + 0], regData); + regData = FLD_SET_DRF_NUM(C871, _SF_GENERIC_INFOFRAME_DATA, _BYTE1, pPayload[4*dwordNum + 1], regData); + regData = FLD_SET_DRF_NUM(C871, _SF_GENERIC_INFOFRAME_DATA, _BYTE2, pPayload[4*dwordNum + 2], regData); + regData = FLD_SET_DRF_NUM(C871, _SF_GENERIC_INFOFRAME_DATA, _BYTE3, pPayload[4*dwordNum + 3], regData); + + REG_WR32(pBaseReg, NVC871_SF_GENERIC_INFOFRAME_DATA(head), regData); + } + } + + // write GENERIC_CONFIG + regData = 0; + regAddr = NVC871_SF_GENERIC_INFOFRAME_CONFIG(head, ifIndex); + regData = FLD_SET_DRF_NUM(C871, _SF_GENERIC_INFOFRAME_CONFIG, _FID, pInfoframe->flipId, regData); + if (pInfoframe->location == INFOFRAME_CTRL_LOC_LINE) + { + regData = FLD_SET_DRF_NUM(C871, _SF_GENERIC_INFOFRAME_CONFIG, _LINE_ID, pInfoframe->lineNum, regData); + regData = (pInfoframe->lineIdReversed) ? + FLD_SET_DRF(C871, _SF_GENERIC_INFOFRAME_CONFIG, _LINE_ID_REVERSED, _YES, regData) : + FLD_SET_DRF(C871, _SF_GENERIC_INFOFRAME_CONFIG, _LINE_ID_REVERSED, _NO, regData); + } + + regData = (pInfoframe->crcOverride) ? + FLD_SET_DRF(C871, _SF_GENERIC_INFOFRAME_CONFIG, _CRC_OVERRIDE, _YES, regData) : + FLD_SET_DRF(C871, _SF_GENERIC_INFOFRAME_CONFIG, _CRC_OVERRIDE, _NO, regData); + + regData = (pInfoframe->matchFidMethodArmState) ? + FLD_SET_DRF(C871, _SF_GENERIC_INFOFRAME_CONFIG, _MTD_STATE_CTRL, _ARM, regData) : // send Infoframe at LOC when matching FID found at channel's FID method's ARM state + FLD_SET_DRF(C871, _SF_GENERIC_INFOFRAME_CONFIG, _MTD_STATE_CTRL, _ACT, regData); // default is when FID method is at ACTIVE state + + // write reg + REG_WR32(pBaseReg, regAddr, regData); + + // ENABLE_YES to GENERIC_CTRL + regData = 0; + regAddr = NVC871_SF_GENERIC_INFOFRAME_CTRL(head, ifIndex); + regData = FLD_SET_DRF_NUM(C871, _SF_GENERIC_INFOFRAME_CTRL, _RUN_MODE, pInfoframe->runMode, regData); + regData = FLD_SET_DRF_NUM(C871, _SF_GENERIC_INFOFRAME_CTRL, _LOC, pInfoframe->location, regData); + regData = FLD_SET_DRF_NUM(C871, _SF_GENERIC_INFOFRAME_CTRL, _OFFSET, ifIndex, regData); + regData = FLD_SET_DRF_NUM(C871, _SF_GENERIC_INFOFRAME_CTRL, _SIZE, pInfoframe->numAdditionalInfoframes, regData); + regData = FLD_SET_DRF (C871, _SF_GENERIC_INFOFRAME_CTRL, _ENABLE, _YES, regData); + + // write reg + REG_WR32(pBaseReg, regAddr, regData); + + NvHdmiPkt_Print(pThis, "MoreInfoframe: Sent infoframe of length %d bytes, transmit ctrl 0x%x at offset %d head=%x subdev=%d", + pInfoframe->packetLen, regData, ifIndex, head, subDevice); + + // setup MSC_CTRL + regData = 0; + regAddr = NVC871_SF_GENERIC_INFOFRAME_MISC_CTRL(head); + regData = pInfoframe->winMethodCyaBroadcast ? + FLD_SET_DRF(C871, _SF_GENERIC_INFOFRAME_MISC_CTRL, _WIN_CHN_SEL, _PUBLIC, regData) : + FLD_SET_DRF(C871, _SF_GENERIC_INFOFRAME_MISC_CTRL, _WIN_CHN_SEL, _PRIVATE, regData) ; + regData = pInfoframe->highAudioPriority ? + FLD_SET_DRF(C871, _SF_GENERIC_INFOFRAME_MISC_CTRL, _AUDIO_PRIORITY, _HIGH, regData) : + FLD_SET_DRF(C871, _SF_GENERIC_INFOFRAME_MISC_CTRL, _AUDIO_PRIORITY, _LOW, regData); + // write reg + REG_WR32(pBaseReg, regAddr, regData); + + + // release mutex + pThis->callback.releaseMutex(pThis->cbHandle); + + return result; +} + +/* + * hdmiWritePacketCtrlC871 + */ +static NVHDMIPKT_RESULT +hdmiWritePacketCtrlLegacyPktsC871(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 pktTypeC871, + NvU32 transmitControl, + NvBool bDisable) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_INVALID_ARG; + NvU32 regOffset = 0; + NvU32 hdmiCtrl = 0; + + if (pBaseReg == 0 || head >= NVC871_SF_HDMI_INFO_CTRL__SIZE_1) + { + return result; + } + + switch (pktTypeC871) + { + case NVC871_SF_HDMI_INFO_IDX_AVI_INFOFRAME: + case NVC871_SF_HDMI_INFO_IDX_GCP: + regOffset = NVC871_SF_HDMI_INFO_CTRL(head, pktTypeC871); + hdmiCtrl = REG_RD32(pBaseReg, regOffset); + hdmiCtrl = (bDisable == NV_TRUE) ? + (FLD_SET_DRF(C871, _SF_HDMI_INFO_CTRL, _ENABLE, _DIS, hdmiCtrl)) : + (transmitControl); + REG_WR32(pBaseReg, regOffset, hdmiCtrl); + result = NVHDMIPKT_SUCCESS; + break; + default: + NvHdmiPkt_Assert(0 && "Invalid pkt type!"); + break; + } + + return result; +} + +/* + * hdmiPacketReadC871 + */ +static NVHDMIPKT_RESULT +hdmiPacketReadC871(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 head, + NVHDMIPKT_TYPE packetReg, + NvU32 bufferLen, + NvU8 *const pOutPktBuffer) +{ + NvU32 ifIndex, ifNum, dw, regAddr, regData, numOfInfoframes; + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + NvU32* pBaseReg = (NvU32*)pThis->memMap[subDevice].pMemBase; + + if ((packetReg < NVHDMIPKT_TYPE_SHARED_GENERIC1) || (packetReg >= NVHDMIPKT_INVALID_PKT_TYPE)) + { + return NVHDMIPKT_INVALID_ARG; + } + + ifIndex = packetReg - NVHDMIPKT_TYPE_SHARED_GENERIC1; + + // write infoframe Offset to DATA_CTRL + regData = 0; + regAddr = NVC871_SF_GENERIC_INFOFRAME_DATA_CTRL(head); + regData = FLD_SET_DRF_NUM(C871, _SF_GENERIC_INFOFRAME_DATA_CTRL, _OFFSET, ifIndex, regData); + REG_WR32(pBaseReg, regAddr, regData); + + // read size of infoframe programmed at this Offset + regAddr = NVC871_SF_GENERIC_INFOFRAME_CTRL(head, ifIndex); + regData = REG_RD32(pBaseReg, regAddr); + numOfInfoframes = DRF_VAL(C871, _SF_GENERIC_INFOFRAME_CTRL, _SIZE, regData) + 1; // total size is 1 more than SIZE field + + NvU32 remainingBufSize = bufferLen; + NvU8 *pBuffer = pOutPktBuffer; + + for (ifNum = 0; ifNum < numOfInfoframes; ifNum++) + { + if (remainingBufSize == 0) + { + NvHdmiPkt_Assert(0 && "MoreInfoframe: Buffer size insufficient to copy read packet data"); + result = NVHDMIPKT_INSUFFICIENT_BUFFER; + break; + } + + // a temporary buffer to read a 36 byte chunk of this infoframe + NvU8 pktBytes[9 * sizeof(NvU32)]; + + for (dw = 0; dw < 9; dw++) // each infoframe is 9 DWORDs including the header + { + regData = REG_RD32(pBaseReg, NVC871_SF_GENERIC_INFOFRAME_DATA(head)); + + pktBytes[dw*4 + 0] = DRF_VAL(C871, _SF_GENERIC_INFOFRAME_DATA, _BYTE0, regData); + pktBytes[dw*4 + 1] = DRF_VAL(C871, _SF_GENERIC_INFOFRAME_DATA, _BYTE1, regData); + pktBytes[dw*4 + 2] = DRF_VAL(C871, _SF_GENERIC_INFOFRAME_DATA, _BYTE2, regData); + pktBytes[dw*4 + 3] = DRF_VAL(C871, _SF_GENERIC_INFOFRAME_DATA, _BYTE3, regData); + } + + NvU32 bytesCopied = (remainingBufSize > 36) ? 36 : remainingBufSize; + NVMISC_MEMCPY(pBuffer, &pktBytes, bytesCopied); + + // move out buffer ptr by the copied bytes + pBuffer += bytesCopied; + // reduce remaining buffer size by the amount we copied + remainingBufSize = remainingBufSize - bytesCopied; + } + + return result; +} + +/* + * hdmiPacketCtrlC871 + */ +static NVHDMIPKT_RESULT +hdmiPacketCtrlC871(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl) +{ + NvU32* pBaseReg = (NvU32*)pThis->memMap[subDevice].pMemBase; + NvU32 tc = pThis->translateTransmitControl(pThis, transmitControl); + + if ((pBaseReg == 0) || (head >= NVC871_SF_GENERIC_INFOFRAME_CTRL__SIZE_1) || + (packetType >= NVHDMIPKT_INVALID_PKT_TYPE)) + { + return NVHDMIPKT_INVALID_ARG; + } + + if ((packetType == NVHDMIPKT_TYPE_VENDOR_SPECIFIC_INFOFRAME) || + (packetType == NVHDMIPKT_TYPE_GENERIC)) + { + NvHdmiPkt_Print(pThis, "Generic and VSI registers removed in C871 HW. Call NvHdmiPkt_SetupAdvancedInfoframe to use one of the generic registers!"); + NvHdmiPkt_Assert(0); + return NVHDMIPKT_INVALID_ARG; + } + + if (packetType >= NVHDMIPKT_TYPE_SHARED_GENERIC1 && packetType <= NVHDMIPKT_TYPE_SHARED_GENERIC10) + { + // client is only expected to call packet ctrl interface to disable the infoframe, no support for other packet ctrl options + // to reprogram/change run mode of a packet, call NvHdmiPkt_SetupAdvancedInfoframe interface + NvHdmiPkt_Assert(FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _ENABLE, _DIS, transmitControl)); + + return disableInfoframeC871(pThis, pBaseReg, head, (packetType - NVHDMIPKT_TYPE_SHARED_GENERIC1)); + } + + NvU32 pktTypeC871 = pThis->translatePacketType(pThis, packetType); + return pThis->hdmiWritePacketCtrl(pThis, pBaseReg, head, pktTypeC871, tc, NV_FALSE); +} + +// non-HW - class utility/maintenance functions +/* + * hdmiConstructorC871 + */ +NvBool +hdmiConstructorC871(NVHDMIPKT_CLASS* pThis) +{ + NvBool result = NV_TRUE; + + return result; +} + + +/* + * hdmiDestructorC871 + */ +void +hdmiDestructorC871(NVHDMIPKT_CLASS* pThis) +{ + return; +} + +/* + * initializeHdmiPktInterfaceC871 + */ +void +initializeHdmiPktInterfaceC871(NVHDMIPKT_CLASS* pClass) +{ + // Update SF_USER data + pClass->dispSfUserClassId = NVC871_DISP_SF_USER; + pClass->dispSfUserSize = sizeof(NvC871DispSfUserMap); + + pClass->translatePacketType = translatePacketTypeC871; + pClass->hdmiWriteAviPacket = hdmiWriteAviPacketC871; + pClass->hdmiWriteGeneralCtrlPacket = hdmiWriteGeneralCtrlPacketC871; + pClass->hdmiPacketWrite = hdmiPacketWriteC871; + pClass->hdmiPacketCtrl = hdmiPacketCtrlC871; + pClass->hdmiWritePacketCtrl = hdmiWritePacketCtrlLegacyPktsC871; + + // generic infoframe (shareable by DP and HDMI) + pClass->hdmiPacketRead = hdmiPacketReadC871; + pClass->programAdvancedInfoframe = programAdvancedInfoframeC871; +} diff --git a/src/common/modeset/hdmipacket/nvhdmipkt_C971.c b/src/common/modeset/hdmipacket/nvhdmipkt_C971.c new file mode 100644 index 0000000..00e7e54 --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt_C971.c @@ -0,0 +1,204 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_C971.c + * + */ + +#include "nvhdmipkt_common.h" +/* + * Purpose: Provides packet write functions for HDMI library for NVD5.0 chips + */ +#include "nvhdmipkt_class.h" +#include "nvhdmipkt_internal.h" +#include "class/clc971.h" + +/* + * programAdvancedInfoframeC971 + */ +static NVHDMIPKT_RESULT +programAdvancedInfoframeC971(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 head, + NVHDMIPKT_TYPE packetReg, + const ADVANCED_INFOFRAME *pInfoframe) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + NvU32* pBaseReg = (NvU32*)pThis->memMap[subDevice].pMemBase; + + if ((packetReg < NVHDMIPKT_TYPE_SHARED_GENERIC1) || (packetReg >= NVHDMIPKT_INVALID_PKT_TYPE)) + { + return NVHDMIPKT_INVALID_ARG; + } + + NvU32 ifIndex = packetReg - NVHDMIPKT_TYPE_SHARED_GENERIC1; + NvU32 ifNum, dwordNum; + + NvU32 regAddr = 0; + NvU32 regData = 0; + NvU32 numOfInfoframes = pInfoframe->isLargeInfoframe ? (pInfoframe->numAdditionalInfoframes + 1) : 1; + + if (NV_FALSE == isInfoframeOffsetAvailable(pBaseReg, head, ifIndex)) + { + NvHdmiPkt_Print(pThis, "MoreInfoframe: Client requested overwriting an active infoframe"); + } + + NvHdmiPkt_Assert((pInfoframe->crcOverride == 0) && + "CRC Override bit not supported in Nvd 5.0"); + NvHdmiPkt_Assert((pInfoframe->winMethodCyaBroadcast == 0) && + "window channel priv reg control not supported in Nvd5.0"); + NvHdmiPkt_Assert((pInfoframe->location != INFOFRAME_CTRL_LOC_LOADV) && + "LoadV location not supported in Nvd5.0"); + + // acquire mutex + pThis->callback.acquireMutex(pThis->cbHandle); + + // disable and wait for infoframe HW unit to be ready + // Note, C971 HW provides Clear option for SENT field, so we don't have to disable + // just to make sure new data sending is successful. But we disable Infoframe + // before reprogramming to avoid corrupting a payload that is actively being sent + result = disableInfoframeC871(pThis, pBaseReg, head, ifIndex); + if (result != NVHDMIPKT_SUCCESS) + { + return result; + } + + // write SENT bit to clear the SENT field + regAddr = NVC971_SF_GENERIC_INFOFRAME_CTRL(head, ifIndex); + regData = REG_RD32(pBaseReg, regAddr); + regData = FLD_SET_DRF(C971, _SF_GENERIC_INFOFRAME_CTRL, _SENT, _CLEAR, regData); + REG_WR32(pBaseReg, regAddr, regData); + + // write DATA_CTRL + regData = 0; + regAddr = NVC971_SF_GENERIC_INFOFRAME_DATA_CTRL(head); + regData = FLD_SET_DRF_NUM(C971, _SF_GENERIC_INFOFRAME_DATA_CTRL, _OFFSET, ifIndex, regData); + REG_WR32(pBaseReg, regAddr, regData); + + // send header + payload + NvHdmiPkt_Assert(pInfoframe->packetLen == (9 * sizeof(NvU32) * numOfInfoframes)); + + for (ifNum = 0; ifNum < numOfInfoframes; ifNum++) + { + const NvU8 *pPayload = pInfoframe->pPacket + (ifNum * 9 * sizeof(NvU32)); + + for (dwordNum = 0; dwordNum < 9; dwordNum++) // each infoframe is 9 DWORDs including the header + { + regData = 0; + regData = FLD_SET_DRF_NUM(C971, _SF_GENERIC_INFOFRAME_DATA, _BYTE0, pPayload[4*dwordNum + 0], regData); + regData = FLD_SET_DRF_NUM(C971, _SF_GENERIC_INFOFRAME_DATA, _BYTE1, pPayload[4*dwordNum + 1], regData); + regData = FLD_SET_DRF_NUM(C971, _SF_GENERIC_INFOFRAME_DATA, _BYTE2, pPayload[4*dwordNum + 2], regData); + regData = FLD_SET_DRF_NUM(C971, _SF_GENERIC_INFOFRAME_DATA, _BYTE3, pPayload[4*dwordNum + 3], regData); + + REG_WR32(pBaseReg, NVC971_SF_GENERIC_INFOFRAME_DATA(head), regData); + } + } + + // write GENERIC_CONFIG + regData = 0; + regAddr = NVC971_SF_GENERIC_INFOFRAME_CONFIG(head, ifIndex); + regData = FLD_SET_DRF_NUM(C971, _SF_GENERIC_INFOFRAME_CONFIG, _FID, pInfoframe->flipId, regData); + if (pInfoframe->location == INFOFRAME_CTRL_LOC_LINE) + { + regData = FLD_SET_DRF_NUM(C971, _SF_GENERIC_INFOFRAME_CONFIG, _LINE_ID, pInfoframe->lineNum, regData); + regData = (pInfoframe->lineIdReversed) ? + FLD_SET_DRF(C971, _SF_GENERIC_INFOFRAME_CONFIG, _LINE_ID_REVERSED, _YES, regData) : + FLD_SET_DRF(C971, _SF_GENERIC_INFOFRAME_CONFIG, _LINE_ID_REVERSED, _NO, regData); + } + + regData = (pInfoframe->asSdpOverride) ? + FLD_SET_DRF(C971, _SF_GENERIC_INFOFRAME_CONFIG, _AS_SDP_OVERRIDE_EN, _YES, regData) : + FLD_SET_DRF(C971, _SF_GENERIC_INFOFRAME_CONFIG, _AS_SDP_OVERRIDE_EN, _NO, regData); + + regData = (pInfoframe->matchFidMethodArmState) ? + FLD_SET_DRF(C971, _SF_GENERIC_INFOFRAME_CONFIG, _MTD_STATE_CTRL, _ARM, regData) : + // send Infoframe at LOC when matching FID found at channel's FID method's ARM state + FLD_SET_DRF(C971, _SF_GENERIC_INFOFRAME_CONFIG, _MTD_STATE_CTRL, _ACT, regData); + // default is when FID method is at ACTIVE state + + // write reg + REG_WR32(pBaseReg, regAddr, regData); + + // ENABLE_YES to GENERIC_CTRL + regData = 0; + regAddr = NVC971_SF_GENERIC_INFOFRAME_CTRL(head, ifIndex); + regData = FLD_SET_DRF_NUM(C971, _SF_GENERIC_INFOFRAME_CTRL, _RUN_MODE, pInfoframe->runMode, regData); + regData = FLD_SET_DRF_NUM(C971, _SF_GENERIC_INFOFRAME_CTRL, _LOC, pInfoframe->location, regData); + regData = FLD_SET_DRF_NUM(C971, _SF_GENERIC_INFOFRAME_CTRL, _OFFSET, ifIndex, regData); + regData = FLD_SET_DRF_NUM(C971, _SF_GENERIC_INFOFRAME_CTRL, _SIZE, pInfoframe->numAdditionalInfoframes, regData); + regData = FLD_SET_DRF (C971, _SF_GENERIC_INFOFRAME_CTRL, _ENABLE, _YES, regData); + + // write reg + REG_WR32(pBaseReg, regAddr, regData); + + NvHdmiPkt_Print(pThis, "MoreInfoframe: Sent infoframe of length %d bytes, transmit ctrl 0x%x at offset %d head=%x subdev=%d", + pInfoframe->packetLen, regData, ifIndex, head, subDevice); + + // setup MSC_CTRL + regData = 0; + regAddr = NVC971_SF_GENERIC_INFOFRAME_MISC_CTRL(head); + regData = pInfoframe->highAudioPriority ? + FLD_SET_DRF(C971, _SF_GENERIC_INFOFRAME_MISC_CTRL, _AUDIO_PRIORITY, _HIGH, regData) : + FLD_SET_DRF(C971, _SF_GENERIC_INFOFRAME_MISC_CTRL, _AUDIO_PRIORITY, _LOW, regData); + // write reg + REG_WR32(pBaseReg, regAddr, regData); + + // release mutex + pThis->callback.releaseMutex(pThis->cbHandle); + + return result; +} + +// non-HW - class utility/maintenance functions +/* + * hdmiConstructorC971 + */ +NvBool +hdmiConstructorC971(NVHDMIPKT_CLASS* pThis) +{ + NvBool result = NV_TRUE; + + return result; +} + +/* + * hdmiDestructorC971 + */ +void +hdmiDestructorC971(NVHDMIPKT_CLASS* pThis) +{ + return; +} + +/* + * initializeHdmiPktInterfaceC971 + */ +void +initializeHdmiPktInterfaceC971(NVHDMIPKT_CLASS* pClass) +{ + // Update SF_USER data + pClass->dispSfUserClassId = NVC971_DISP_SF_USER; + pClass->dispSfUserSize = sizeof(NvC971DispSfUserMap); + + // generic infoframe (shareable by DP and HDMI) + pClass->programAdvancedInfoframe = programAdvancedInfoframeC971; +} diff --git a/src/common/modeset/hdmipacket/nvhdmipkt_CC71.c b/src/common/modeset/hdmipacket/nvhdmipkt_CC71.c new file mode 100644 index 0000000..3b8bf7b --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt_CC71.c @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_CC71.c + * + */ + +#include "nvhdmipkt_common.h" +/* + * Purpose: Provides packet write functions for HDMI library for NVD5.0 chips + */ +#include "nvhdmipkt_class.h" +#include "nvhdmipkt_internal.h" +#include "class/clcc71.h" + +// non-HW - class utility/maintenance functions + +/* + * initializeHdmiPktInterfaceCC71 + */ +void +initializeHdmiPktInterfaceCC71(NVHDMIPKT_CLASS* pClass) +{ + // Update SF_USER data + pClass->dispSfUserClassId = NVCC71_DISP_SF_USER; + pClass->dispSfUserSize = sizeof(NvCC71DispSfUserMap); +} diff --git a/src/common/modeset/hdmipacket/nvhdmipkt_class.h b/src/common/modeset/hdmipacket/nvhdmipkt_class.h new file mode 100644 index 0000000..e240af7 --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt_class.h @@ -0,0 +1,197 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_class.h + * + * Purpose: This file contains hdmipkt class definition. Which defines class interfaces. + */ + +#ifndef _NVHDMIPKT_CLASS_H_ +#define _NVHDMIPKT_CLASS_H_ + +#include "nvlimits.h" +#include "nvhdmi_frlInterface.h" + +/************************************************************************************************* + * NOTE * This header file to be used only inside this (Hdmi Packet) library. * + ************************************************************************************************/ +// NVHDMIPKT_CLASS_ID: HDMI packet class version +// NOTE: Anytime a new class comes with upgrades, it needs to be added here. +// Consult resman\kernel\inc\classhal.h, before adding a class. +typedef enum +{ + NVHDMIPKT_0073_CLASS = 0, // pre GK104 + NVHDMIPKT_9171_CLASS = 1, // GK104 + NVHDMIPKT_9271_CLASS = 2, // GK110 + NVHDMIPKT_9471_CLASS = 3, // GM10X + NVHDMIPKT_9571_CLASS = 4, // GM20X + NVHDMIPKT_C371_CLASS = 5, // GV100 + NVHDMIPKT_C571_CLASS = 6, // TU102 + NVHDMIPKT_C671_CLASS = 7, // GA102, T234D + NVHDMIPKT_C871_CLASS = 9, // T239 + NVHDMIPKT_C971_CLASS = 10, // NVD5.0 + NVHDMIPKT_CC71_CLASS = 13, + NVHDMIPKT_INVALID_CLASS // Not to be used by client, and always the last entry here. +} NVHDMIPKT_CLASS_ID; + +// Hdmi packet class +struct tagNVHDMIPKT_CLASS +{ + // data + NvU32 dispSfUserClassId; // Id from nvidia/class definition + NvU32 dispSfUserSize; + NvU32 numSubDevices; + NvU32 sfUserHandle; + NVHDMIPKT_RM_CLIENT_HANDLES clientHandles; + NVHDMIPKT_MEM_MAP memMap[NV_MAX_SUBDEVICES]; + NvHdmiPkt_CBHandle cbHandle; + NVHDMIPKT_CALLBACK callback; + NVHDMIPKT_CLASS_ID thisId; + NvBool isRMCallInternal; + + // functions + NVHDMIPKT_RESULT + (*hdmiPacketCtrl) (NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl); + + NVHDMIPKT_RESULT + (*hdmiPacketWrite) (NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket); + + NVHDMIPKT_RESULT + (*hdmiPacketRead) (NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 head, + NVHDMIPKT_TYPE packetReg, + NvU32 bufferLen, + NvU8 *const pOutPktBuffer); + + NVHDMIPKT_RESULT + (*programAdvancedInfoframe) (NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 head, + NVHDMIPKT_TYPE packetReg, + const ADVANCED_INFOFRAME* pInfoframe); + + // HW functions - that read/write registers + NvBool + (*hdmiReadPacketStatus) (NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 pktTypeNative); + + NVHDMIPKT_RESULT + (*hdmiWritePacketCtrl) (NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 pktTypeNative, + NvU32 transmitControl, + NvBool bDisable); + + void + (*hdmiWriteAviPacket) (NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket); + + void + (*hdmiWriteAudioPacket) (NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket); + + void + (*hdmiWriteGenericPacket) (NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket); + + void + (*hdmiWriteGeneralCtrlPacket)(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket); + + void + (*hdmiWriteVendorPacket) (NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket); + + // utility functions to translate the generic packet type and transmit control + // to corresponding rm ctrl or hw define types. + NvU32 + (*translatePacketType) (NVHDMIPKT_CLASS* pThis, + NVHDMIPKT_TYPE packetType); + + NvU32 + (*translateTransmitControl) (NVHDMIPKT_CLASS* pThis, + NVHDMIPKT_TC transmitControl); + + // + // HDMI FRL functions to enable/disable HDMI FRL and calculate the bandwidth + // capacity required for target timing. + // + NVHDMIPKT_RESULT + (*hdmiAssessLinkCapabilities) (NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NVT_EDID_INFO const * const pSinkEdid, + HDMI_SRC_CAPS *pSrcCaps, + HDMI_SINK_CAPS *pSinkCaps); + NVHDMIPKT_RESULT + (*hdmiQueryFRLConfig) (NVHDMIPKT_CLASS *pThis, + HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + HDMI_QUERY_FRL_CLIENT_CONTROL const * const pClientCtrl, + HDMI_SRC_CAPS const * const pSrcCaps, + HDMI_SINK_CAPS const * const pSinkCaps, + HDMI_FRL_CONFIG *pFRLConfig); + + NVHDMIPKT_RESULT + (*hdmiSetFRLConfig) (NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NvBool bFakeLt, + HDMI_FRL_CONFIG *pFRLConfig); + + NVHDMIPKT_RESULT + (*hdmiClearFRLConfig) (NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId); +}; + +#endif //_NVHDMIPKT_CLASS_H_ diff --git a/src/common/modeset/hdmipacket/nvhdmipkt_common.h b/src/common/modeset/hdmipacket/nvhdmipkt_common.h new file mode 100644 index 0000000..780029c --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt_common.h @@ -0,0 +1,132 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_common.h + * + * Purpose: This file contains defines and structures used across hdmipkt library. All the + * common stuff goes here. + */ + +#ifndef _NVHDMIPKT_COMMON_H_ +#define _NVHDMIPKT_COMMON_H_ + +/************************************************************************************************* + * NOTE * This header file to be used only inside this (Hdmi Packet) library. * + ************************************************************************************************/ + +#include "nvhdmipkt.h" +#include "nvhdmi_frlInterface.h" +#if NVHDMIPKT_RM_CALLS_INTERNAL +#include "nvRmApi.h" +#define CALL_DISP_RM(x) x + +#endif + +/**************************** HDMI Library defines, enums and structs ***************************/ +// typedefs +typedef struct tagNVHDMIPKT_CLASS NVHDMIPKT_CLASS; +typedef struct tagNVHDMIPKT_MEM_MAP NVHDMIPKT_MEM_MAP; + +// Register read/write defines +#define REG_RD32(reg, offset) (*(((volatile NvU32*)(reg)) + ((offset)/4))) +#define REG_WR32(reg, offset, data) ((*(((volatile NvU32*)(reg)) + ((offset)/4))) = (data)) + +#define NVHDMIPKT_INVALID_SUBDEV (0xFFFFFFFF) +#if !defined (WINNT) && !defined(NVHDMIPKT_NVKMS) +#define NVHDMIPKT_DONT_USE_TIMER +#endif +#define NVHDMIPKT_STATUS_READ_TIMEOUT_IN_us (1*1000*1000) /* us - micro second */ + +// Disp SF User memory map and handle structure +struct tagNVHDMIPKT_MEM_MAP +{ + NvU32 subDevice; + NvU32 memHandle; + void* pMemBase; +}; + +// HDMIPKT print define +#if defined (DEBUG) + #define NvHdmiPkt_Print(_p, ...) \ + do { \ + if ((_p)->callback.print) \ + { \ + (_p)->callback.print((_p)->cbHandle, "HdmiPacketLibrary: " __VA_ARGS__); \ + } \ + } while(0) +#else + #define NvHdmiPkt_Print(_p, ...) /* nothing */ +#endif + + +// HDMIPKT assert define +#if defined (DEBUG) + #define NvHdmiPkt_AssertP(p, expr) ((p)->callback.assert && !(expr) ? \ + (p)->callback.assert(#expr, \ + __FILE__, \ + __FUNCTION__, \ + __LINE__) \ + : 0) + #define NvHdmiPkt_Assert(expr) NvHdmiPkt_AssertP(pThis, expr) +#else + #define NvHdmiPkt_AssertP(p, expr) + #define NvHdmiPkt_Assert(expr) +#endif + + +// Prototypes for common functions shared across implementations. +extern void hdmiWriteDummyPacket(NVHDMIPKT_CLASS*, NvU32*, NvU32, NvU32, NvU8 const *const); +extern NVHDMIPKT_RESULT hdmiAssessLinkCapabilitiesDummy(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NVT_EDID_INFO const * const pSinkEdid, + HDMI_SRC_CAPS *pSrcCaps, + HDMI_SINK_CAPS *pSinkCaps); +extern NVHDMIPKT_RESULT hdmiQueryFRLConfigDummy(NVHDMIPKT_CLASS *pThis, + HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + HDMI_QUERY_FRL_CLIENT_CONTROL const * const pClientCtrl, + HDMI_SRC_CAPS const * const pSrcCaps, + HDMI_SINK_CAPS const * const pSinkCaps, + HDMI_FRL_CONFIG *pFRLConfig); +extern NVHDMIPKT_RESULT hdmiSetFRLConfigDummy(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NvBool bFakeLt, + HDMI_FRL_CONFIG *pFRLConfig); +extern NVHDMIPKT_RESULT hdmiClearFRLConfigDummy(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId); + +extern NVHDMIPKT_RESULT hdmiPacketReadDummy(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 head, + NVHDMIPKT_TYPE packetReg, + NvU32 bufferLen, + NvU8 *const pOutPktBuffer); + +extern NVHDMIPKT_RESULT programAdvancedInfoframeDummy(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 head, + NVHDMIPKT_TYPE packetReg, + const ADVANCED_INFOFRAME* pInfoframe); + +#endif //_NVHDMIPKT_COMMON_H_ diff --git a/src/common/modeset/hdmipacket/nvhdmipkt_internal.h b/src/common/modeset/hdmipacket/nvhdmipkt_internal.h new file mode 100644 index 0000000..475d8ea --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt_internal.h @@ -0,0 +1,77 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_internal.h + * + * Purpose: This files contains defines to be used by nvhdmipkt.c + */ + +#ifndef _NVHDMIPKT_INTERNAL_H_ +#define _NVHDMIPKT_INTERNAL_H_ + +/************************************************************************************************* + * NOTE * This header file to be used only inside this (Hdmi Packet) library. * + ************************************************************************************************/ +#define toHdmiPktHandle(p) ((NvHdmiPkt_Handle)(p)) +#define fromHdmiPktHandle(h) ((NVHDMIPKT_CLASS*)(h)) + +// CTA infoframe max payload size +#define NVHDMIPKT_CTAIF_MAX_PKT_BYTES 31 // 3 bytes header + 28 bytes data + +extern void initializeHdmiPktInterface0073(NVHDMIPKT_CLASS*); +extern void initializeHdmiPktInterface9171(NVHDMIPKT_CLASS*); +extern void initializeHdmiPktInterface9271(NVHDMIPKT_CLASS*); +extern void initializeHdmiPktInterface9471(NVHDMIPKT_CLASS*); +extern void initializeHdmiPktInterface9571(NVHDMIPKT_CLASS*); +extern void initializeHdmiPktInterfaceC371(NVHDMIPKT_CLASS*); +extern void initializeHdmiPktInterfaceC671(NVHDMIPKT_CLASS*); +extern void initializeHdmiPktInterfaceC871(NVHDMIPKT_CLASS*); +extern void initializeHdmiPktInterfaceC971(NVHDMIPKT_CLASS*); +extern void initializeHdmiPktInterfaceCC71(NVHDMIPKT_CLASS*); + +extern NvBool hdmiConstructor0073(NVHDMIPKT_CLASS*); +extern void hdmiDestructor0073 (NVHDMIPKT_CLASS*); +extern NvBool hdmiConstructor9171(NVHDMIPKT_CLASS*); +extern void hdmiDestructor9171 (NVHDMIPKT_CLASS*); +extern NvBool hdmiConstructor9271(NVHDMIPKT_CLASS*); +extern void hdmiDestructor9271 (NVHDMIPKT_CLASS*); +extern NvBool hdmiConstructor9471(NVHDMIPKT_CLASS*); +extern void hdmiDestructor9471 (NVHDMIPKT_CLASS*); +extern NvBool hdmiConstructor9571(NVHDMIPKT_CLASS*); +extern void hdmiDestructor9571 (NVHDMIPKT_CLASS*); +extern NvBool hdmiConstructorC371(NVHDMIPKT_CLASS*); +extern void hdmiDestructorC371 (NVHDMIPKT_CLASS*); +extern NvBool hdmiConstructorC671(NVHDMIPKT_CLASS*); +extern void hdmiDestructorC671 (NVHDMIPKT_CLASS*); +extern NvBool hdmiConstructorC871(NVHDMIPKT_CLASS*); +extern void hdmiDestructorC871 (NVHDMIPKT_CLASS*); +extern NvBool isInfoframeOffsetAvailable(NvU32* pBaseReg, + NvU32 head, + NvU32 requestedInfoframe); +extern NVHDMIPKT_RESULT disableInfoframeC871(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 ifIndex); +extern NvBool hdmiConstructorC971(NVHDMIPKT_CLASS*); +extern void hdmiDestructorC971 (NVHDMIPKT_CLASS*); + +#endif //_NVHDMIPKT_INTERNAL_H_ diff --git a/src/common/modeset/timing/displayid.h b/src/common/modeset/timing/displayid.h new file mode 100644 index 0000000..987c6fe --- /dev/null +++ b/src/common/modeset/timing/displayid.h @@ -0,0 +1,776 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: displayid.h +// +// Purpose: the template for DisplayID parsing (future replacement for EDID) +// +//***************************************************************************** + + +#ifndef __DISPLAYID_H_ +#define __DISPLAYID_H_ + +#include "nvtiming.h" + +// The structures below must be tightly packed, in order to correctly +// overlay on the EDID DisplayID extension block bytes. Both MSVC and +// gcc support the pack() pragma for this. + +#if defined(__GNUC__) || defined(_MSC_VER) +# define __SUPPORTS_PACK_PRAGMA 1 +#else +# error "unrecognized compiler: displayid structures must be tightly packed" +#endif + +#ifdef __SUPPORTS_PACK_PRAGMA +#pragma pack(1) +#endif + +typedef struct _tagDISPLAYID_SECTION +{ + NvU8 version; // displayid version + NvU8 section_bytes; // length of this displayID section excluding mandatory bytes [0, 251] + + NvU8 product_type; // NVT_DISPLAYID_PROD_X + NvU8 extension_count; + + NvU8 data[NVT_DISPLAYID_SECTION_MAX_SIZE]; // data blocks. Note, the length of this structure may + // exceed valid memory, as DisplayID has variable length + +} DISPLAYID_SECTION; + +#define NVT_DISPLAYID_VER_1_1 0x101 + +#define NVT_DISPLAYID_PROD_EXTENSION 0 // Extension (product type not declared) +#define NVT_DISPLAYID_PROD_TEST 1 // Test Structure/Test Equipment +#define NVT_DISPLAYID_PROD_DISPLAY_PANEL 2 // Display Panel, LCD, or PDP module, etc. +#define NVT_DISPLAYID_PROD_STANDALONE_MONITOR 3 // Standalone display device, desktop monitor, TV monitor +#define NVT_DISPLAYID_PROD_RECEIVER 4 // Television receiver or display product capable of RF signals +#define NVT_DISPLAYID_PROD_REPEATER 5 // Repeater/translator that is not intended as display device +#define NVT_DISPLAYID_PROD_DIRECT_DRIVE 6 // Direct Drive monitor +#define NVT_DISPLAYID_PROD_MAX_NUMBER 6 // max product number + + +typedef struct _tagDISPLAYID_DATA_BLOCK_HEADER +{ + NvU8 type; // identification + NvU8 revision; + NvU8 data_bytes; // number of payload bytes [0, 248] + +} DISPLAYID_DATA_BLOCK_HEADER; + +#define NVT_DISPLAYID_BLOCK_TYPE_PRODUCT_IDENTITY 0 // Product Identification block +#define NVT_DISPLAYID_BLOCK_TYPE_DISPLAY_PARAM 1 // Display Parameters block +#define NVT_DISPLAYID_BLOCK_TYPE_COLOR_CHAR 2 // Color Characteristics block +#define NVT_DISPLAYID_BLOCK_TYPE_TIMING_1 3 // Type 1 Detailed Timing block +#define NVT_DISPLAYID_BLOCK_TYPE_TIMING_2 4 // Type 2 Detailed Timing block +#define NVT_DISPLAYID_BLOCK_TYPE_TIMING_3 5 // Type 3 Short Timing block +#define NVT_DISPLAYID_BLOCK_TYPE_TIMING_4 6 // Type 4 DMT ID Timing block +#define NVT_DISPLAYID_BLOCK_TYPE_TIMING_VESA 7 // VESA Standard Timing block +#define NVT_DISPLAYID_BLOCK_TYPE_TIMING_CEA 8 // CEA Standard Timing block +#define NVT_DISPLAYID_BLOCK_TYPE_RANGE_LIMITS 9 // Video Timing Range Limits block +#define NVT_DISPLAYID_BLOCK_TYPE_SERIAL_NUMBER 10 // Product Serial Number block +#define NVT_DISPLAYID_BLOCK_TYPE_ASCII_STRING 11 // General Purpose ASCII String block +#define NVT_DISPLAYID_BLOCK_TYPE_DEVICE_DATA 12 // Display Device Data block +#define NVT_DISPLAYID_BLOCK_TYPE_INTERFACE_POWER 13 // Interface Power Sequencing block +#define NVT_DISPLAYID_BLOCK_TYPE_TRANSFER_CHAR 14 // Transfer Characteristics block +#define NVT_DISPLAYID_BLOCK_TYPE_DISPLAY_INTERFACE 15 // Display Interface Data Block +#define NVT_DISPLAYID_BLOCK_TYPE_STEREO 16 // Stereo Data Block +#define NVT_DISPLAYID_BLOCK_TYPE_TIMING_5 17 // Type V Timing Short Descriptor +#define NVT_DISPLAYID_BLOCK_TYPE_TILEDDISPLAY 18 // Tiled Display Data Block +#define NVT_DISPLAYID_BLOCK_TYPE_DISPLAY_INTERFACE_FEATURES 0X26 // DisplayID2.0 Display Interface Features Data Block // +#define NVT_DISPLAYID_BLOCK_TYPE_CTA_DATA 0x81 // DIsplay ID data block +#define NVT_DISPLAYID_BLOCK_TYPE_VENDOR_SPEC 0x7F // Vendor Specific Data Block + +#define NVT_DISPLAYID_PRODUCT_IDENTITY_MIN_LEN 12 +#define NVT_DISPLAYID_PRODUCT_IDENTITY_MAX_STRING_LEN 0xE9 + +typedef struct _tagDISPLAYID_PROD_IDENTIFICATION_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + + NvU8 vendor[3]; + NvU16 product_code; + NvU32 serial_number; + NvU8 model_tag; + NvU8 model_year; + NvU8 productid_string_size; + + NvU8 productid_string[NVT_DISPLAYID_PRODUCT_IDENTITY_MAX_STRING_LEN]; +} DISPLAYID_PROD_IDENTIFICATION_BLOCK; + +typedef struct _tagDISPLAYID_DISPLAY_PARAM_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU16 horizontal_image_size; + NvU16 vertical_image_size; + NvU16 horizontal_pixel_count; + NvU16 vertical_pixel_count; + + NvU8 feature; + + NvU8 transfer_char_gamma; + NvU8 aspect_ratio; + NvU8 color_bit_depth; +} DISPLAYID_DISPLAY_PARAM_BLOCK; + +#define NVT_DISPLAYID_DISPLAY_PARAM_BLOCK_LEN 0x0C + +#define NVT_DISPLAYID_DISPLAY_PARAM_SUPPORT_AUDIO 7:7 +#define NVT_DISPLAYID_DISPLAY_PARAM_SEPARATE_AUDIO 6:6 +#define NVT_DISPLAYID_DISPLAY_PARAM_AUDIO_INPUT_OVERRIDE 5:5 +#define NVT_DISPLAYID_DISPLAY_PARAM_POWER_MANAGEMENT 4:4 +#define NVT_DISPLAYID_DISPLAY_PARAM_FIXED_TIMING 3:3 +#define NVT_DISPLAYID_DISPLAY_PARAM_FIXED_PIXEL_FORMAT 2:2 +#define NVT_DISPLAYID_DISPLAY_PARAM_DEINTERLACING 0:0 + +#define NVT_DISPLAYID_DISPLAY_PARAM_DEPTH_OVERALL 7:4 +#define NVT_DISPLAYID_DISPLAY_PARAM_DEPTH_NATIVE 3:0 + +typedef struct _tagDISPLAYID_COLOR_POINT +{ + NvU8 color_x_bits_low; + NvU8 color_bits_mid; + NvU8 color_y_bits_high; +} DISPLAYID_COLOR_POINT; + +#define NVT_DISPLAYID_COLOR_POINT_Y 7:4 +#define NVT_DISPLAYID_COLOR_POINT_X 3:0 + +#define NVT_DISPLAYID_COLOR_MAX_POINTS 22 + +typedef struct _tagDISPLAYID_COLOR_CHAR_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + + // Color Characteristics Information + NvU8 point_info; + + DISPLAYID_COLOR_POINT points[NVT_DISPLAYID_COLOR_MAX_POINTS]; +} DISPLAYID_COLOR_CHAR_BLOCK; + +#define NVT_DISPLAYID_COLOR_PRIMARIES 6:4 +#define NVT_DISPLAYID_COLOR_WHITE_POINTS 3:0 +#define NVT_DISPLAYID_COLOR_TEMPORAL 7:7 + +// the following fields apply to Timing Descriptors 1-3 (Not all of them are +// used per descriptor, but the format is the same +#define NVT_DISPLAYID_TIMING_PREFERRED 7:7 +#define NVT_DISPLAYID_TIMING_3D_STEREO 6:5 +#define NVT_DISPLAYID_TIMING_3D_STEREO_MONO 0 +#define NVT_DISPLAYID_TIMING_3D_STEREO_STEREO 1 +#define NVT_DISPLAYID_TIMING_3D_STEREO_EITHER 2 +#define NVT_DISPLAYID_TIMING_INTERLACE 4:4 +#define NVT_DISPLAYID_TIMING_ASPECT_RATIO 2:0 +#define NVT_DISPLAYID_TIMING_ASPECT_RATIO_1_1 0 +#define NVT_DISPLAYID_TIMING_ASPECT_RATIO_5_4 1 +#define NVT_DISPLAYID_TIMING_ASPECT_RATIO_4_3 2 +#define NVT_DISPLAYID_TIMING_ASPECT_RATIO_15_9 3 +#define NVT_DISPLAYID_TIMING_ASPECT_RATIO_16_9 4 +#define NVT_DISPLAYID_TIMING_ASPECT_RATIO_16_10 5 + +typedef struct _tag_DISPLAYID_TIMING_1_DESCRIPTOR +{ + NvU8 pixel_clock_low_minus_0_01MHz; + NvU8 pixel_clock_mid; + NvU8 pixel_clock_high; + + struct + { + NvU8 aspect_ratio : 3; + NvU8 rsvd : 1; + NvU8 interface_frame_scanning_type : 1; + NvU8 stereo_support : 2; + NvU8 is_preferred_detailed_timing : 1; + }options; + + struct + { + NvU8 active_image_pixels_low_minus_1; + NvU8 active_image_pixels_high; + NvU8 blank_pixels_low_minus_1; + NvU8 blank_pixels_high; + NvU8 front_porch_low_minus_1; + NvU8 front_porch_high : 7; + NvU8 sync_polarity : 1; + NvU8 sync_width_low_minus_1; + NvU8 sync_width_high; + }horizontal; + + struct + { + NvU8 active_image_lines_low_minus_1; + NvU8 active_image_lines_high; + NvU8 blank_lines_low_minus_1; + NvU8 blank_lines_high; + NvU8 front_porch_lines_low_minus_1; + NvU8 front_porch_lines_high : 7; + NvU8 sync_polarity : 1; + NvU8 sync_width_lines_low_minus_1; + NvU8 sync_width_lines_high; + }vertical; + +} DISPLAYID_TIMING_1_DESCRIPTOR; + +#define NVT_DISPLAYID_TIMING_1_MAX_DESCRIPTORS 12 + +typedef struct _tagDISPLAYID_TIMING_1_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + DISPLAYID_TIMING_1_DESCRIPTOR descriptors[NVT_DISPLAYID_TIMING_1_MAX_DESCRIPTORS]; +} DISPLAYID_TIMING_1_BLOCK; + +#define NVT_DISPLAYID_TIMING_1_POLARITY_SHIFT 15 +#define NVT_DISPLAYID_CHAR_WIDTH_IN_PIXELS 8 + +typedef struct _tag_DISPLAYID_TIMING_2_DESCRIPTOR +{ + NvU8 pixel_clock_low_minus_0_01MHz; + NvU8 pixel_clock_mid; + NvU8 pixel_clock_high; + + struct + { + NvU8 rsvd : 2; + NvU8 vsync_polarity : 1; + NvU8 hsync_polarity : 1; + NvU8 interface_frame_scanning_type : 1; + NvU8 stereo_support : 2; + NvU8 is_preferred_detailed_timing : 1; + }options; + + struct + { + NvU8 active_image_in_char_minus_1; + NvU8 active_image_in_char_high : 1; + NvU8 blank_in_char_minus_1 : 7; + NvU8 sync_width_in_char_minus_1 : 4; + NvU8 front_porch_in_char_minus_1 : 4; + }horizontal; + + struct + { + NvU8 active_image_lines_low_minus_1; + NvU8 active_image_lines_high : 4; + NvU8 reserved : 4; + NvU8 blank_lines_minus_1; + NvU8 sync_width_lines_minus_1 : 4; + NvU8 front_porch_lines_minus_1 : 4; + }vertical; + +} DISPLAYID_TIMING_2_DESCRIPTOR; + +#define NVT_DISPLAYID_TIMING_2_HORIZ_BLANK_PIXEL 7:1 +#define NVT_DISPLAYID_TIMING_2_HORIZ_ACTIVE_PIXEL_HIGH 0:0 +#define NVT_DISPLAYID_TIMING_2_HORIZ_OFFSET 7:4 +#define NVT_DISPLAYID_TIMING_2_HORIZ_SYNC 3:0 +#define NVT_DISPLAYID_TIMING_2_VERT_ACTIVE_PIXEL_HIGH 3:0 +#define NVT_DISPLAYID_TIMING_2_VERT_OFFSET 7:4 +#define NVT_DISPLAYID_TIMING_2_VERT_SYNC 3:0 + +#define NVT_DISPLAYID_TIMING_2_MAX_DESCRIPTORS 22 + +typedef struct _tagDISPLAYID_TIMING_2_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + DISPLAYID_TIMING_2_DESCRIPTOR descriptors[NVT_DISPLAYID_TIMING_2_MAX_DESCRIPTORS]; +} DISPLAYID_TIMING_2_BLOCK; + +typedef struct _TAG_DISPLAYID_TIMING_3_DESCRIPTOR +{ + NvU8 optns; + NvU8 horizontal_active_pixels; + NvU8 transfer; +} DISPLAYID_TIMING_3_DESCRIPTOR; + +#define NVT_DISPLAYID_TIMING_3_FORMULA 6:4 +#define NVT_DISPLAYID_TIMING_3_FORMULA_STANDARD 0 +#define NVT_DISPLAYID_TIMING_3_FORMULA_REDUCED_BLANKING 1 +#define NVT_DISPLAYID_TIMING_3_ASPECT_RATIO 3:0 +#define NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_1_1 0 +#define NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_5_4 1 +#define NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_4_3 2 +#define NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_15_9 3 +#define NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_16_9 4 +#define NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_16_10 5 +#define NVT_DISPLAYID_TIMING_3_INTERLACE 7:7 +#define NVT_DISPLAYID_TIMING_3_REFRESH_RATE 6:0 + +#define NVT_DISPLAYID_TIMING_3_MAX_DESCRIPTORS 82 + +typedef struct _tagDISPLAYID_TIMING_3_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + DISPLAYID_TIMING_3_DESCRIPTOR descriptors[NVT_DISPLAYID_TIMING_3_MAX_DESCRIPTORS]; +} DISPLAYID_TIMING_3_BLOCK; + +#define NVT_DISPLAYID_TIMING_4_MAX_CODES NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN + +typedef struct _tagDISPLAYID_TIMING_4_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 timing_codes[NVT_DISPLAYID_TIMING_4_MAX_CODES]; +} DISPLAYID_TIMING_4_BLOCK; + +#define NVT_DISPLAYID_TIMING_5_STEREO_SUPPORT_MASK 0x60 +#define NVT_DISPLAYID_TIMING_5_FRACTIONAL_RR_SUPPORT_MASK 0x10 +#define NVT_DISPLAYID_TIMING_5_FORMULA_SUPPORT_MASK 3 + +typedef struct _TAG_DISPLAYID_TIMING_5_DESCRIPTOR +{ + NvU8 optns; + NvU8 rsvd; + NvU8 horizontal_active_pixels_low; + NvU8 horizontal_active_pixels_high; + NvU8 vertical_active_pixels_low; + NvU8 vertical_active_pixels_high; + NvU8 refresh_rate; +} DISPLAYID_TIMING_5_DESCRIPTOR; + +#define NVT_DISPLAYID_TIMING_5_MAX_DESCRIPTORS 53 + +typedef struct _tagDISPLAYID_TIMING_5_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + DISPLAYID_TIMING_5_DESCRIPTOR descriptors[NVT_DISPLAYID_TIMING_5_MAX_DESCRIPTORS]; +} DISPLAYID_TIMING_5_BLOCK; + +#define DISPLAYID_TIMING_VESA_BLOCK_SIZE 0x0A +#define DISPLAYID_TIMING_CEA_BLOCK_SIZE 0x08 + +typedef struct _tagDISPLAYID_TIMING_MODE_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 timing_modes[DISPLAYID_TIMING_VESA_BLOCK_SIZE]; +} DISPLAYID_TIMING_MODE_BLOCK; + + +typedef struct _tagDISPLAYID_RANGE_LIMITS_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 pixel_clock_min[3]; + NvU8 pixel_clock_max[3]; + NvU8 horizontal_frequency_min; + NvU8 horizontal_frequency_max; + NvU16 horizontal_blanking_min; + NvU8 vertical_refresh_rate_min; + NvU8 vertical_refresh_rate_max; + NvU16 vertical_blanking_min; + + NvU8 optns; +} DISPLAYID_RANGE_LIMITS_BLOCK; + +#define DISPLAYID_RANGE_LIMITS_BLOCK_LEN 0xF + +#define NVT_DISPLAYID_RANGE_LIMITS_INTERLACE 7:7 +#define NVT_DISPLAYID_RANGE_LIMITS_CVT_STANDARD 6:6 +#define NVT_DISPLAYID_RANGE_LIMITS_CVT_REDUCED 5:5 +#define NVT_DISPLAYID_RANGE_LIMITS_DFD 4:4 + +typedef struct _tagDISPLAYID_ASCII_STRING_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 data[NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN]; +} DISPLAYID_ASCII_STRING_BLOCK; + +typedef struct _tagDISPLAYID_DEVICE_DATA_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + + NvU8 technology; + NvU8 operating_mode; + NvU16 horizontal_pixel_count; + NvU16 vertical_pixel_count; + NvU8 aspect_ratio; + NvU8 orientation; + + NvU8 subpixel_info; + NvU8 horizontal_pitch; + NvU8 vertical_pitch; + + NvU8 color_bit_depth; + NvU8 response_time; + +} DISPLAYID_DEVICE_DATA_BLOCK; + +#define DISPLAYID_DEVICE_DATA_BLOCK_LEN 0xD + +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_CRT_MONOCHROME 0x00 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_CRT_STANDARD 0x01 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_CRT_OTHER 0x02 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_PASSIVE_MATRIX_TN 0x10 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_PASSIVE_MATRIX_CHOL_LC 0x11 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_PASSIVE_MATRIX_FERRO_LC 0x12 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_PASSIVE_MATRIX_OTHER 0x13 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_ACTIVE_MATRIX_TN 0x14 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_ACTIVE_MATRIX_IPS 0x15 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_ACTIVE_MATRIX_VA 0x16 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_ACTIVE_MATRIX_OCB 0x17 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_ACTIVE_MATRIX_FERRO 0x18 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_OTHER 0x1F +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_PLASMA_DC 0x20 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_PLASMA_AC 0x21 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_ELECTROLUM 0x30 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_INORGANIC_LED 0x40 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_ORGANIC_LED 0x50 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_FED 0x60 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_ELECTROPHORETIC 0x70 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_ELECTROCHROMIC 0x80 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_ELECTROMECHANICAL 0x90 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_ELECTROWETTING 0xA0 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_OTHER 0xF0 + +// Display Device operating mode info +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE 7:4 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_REFLECTIVE_NO_ILLUM 0x0 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_REFLECTIVE_ILLUM 0x1 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_REFLECTIVE_ILLUM_DEF 0x2 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_TRANSMISSIVE_NO_ILLUM 0x3 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_TRANSMISSIVE_ILLUM 0x4 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_TRANSMISSIVE_ILLUM_DEF 0x5 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_EMISSIVE 0x6 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_TRANSFLECTIVE_REF 0x7 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_TRANSFLECTIVE_TRANS 0x8 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_TRANSPARENT_AMB 0x9 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_TRANSPARENT_EMIS 0xA +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_PROJECTION_REF 0xB +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_PROJECTION_TRANS 0xC +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_PROJECTION_EMIS 0xD +#define NVT_DISPLAYID_DEVICE_BACKLIGHT 3:3 +#define NVT_DISPLAYID_DEVICE_INTENSITY 2:2 + +// Display Device aspect ratio/orientation info +#define NVT_DISPLAYID_DEVICE_ORIENTATION 7:6 +#define NVT_DISPLAYID_DEVICE_ORIENTATION_LANDSCAPE 0 +#define NVT_DISPLAYID_DEVICE_ORIENTATION_PORTRAIT 1 +#define NVT_DISPLAYID_DEVICE_ORIENTATION_NOT_FIXED 2 +#define NVT_DISPLAYID_DEVICE_ORIENTATION_UNDEFINED 3 +#define NVT_DISPLAYID_DEVICE_ROTATION 5:4 +#define NVT_DISPLAYID_DEVICE_ROTATION_NONE 0 +#define NVT_DISPLAYID_DEVICE_ROTATION_CLOCKWISE 1 +#define NVT_DISPLAYID_DEVICE_ROTATION_COUNTERCLOCKWISE 2 +#define NVT_DISPLAYID_DEVICE_ROTATION_BOTH 3 +#define NVT_DISPLAYID_DEVICE_ZERO_PIXEL 3:2 +#define NVT_DISPLAYID_DEVICE_ZERO_PIXEL_UPPER_LEFT 0 +#define NVT_DISPLAYID_DEVICE_ZERO_PIXEL_UPPER_RIGHT 1 +#define NVT_DISPLAYID_DEVICE_ZERO_PIXEL_LOWER_LEFT 2 +#define NVT_DISPLAYID_DEVICE_ZERO_PIXEL_LOWER RIGHT 3 +#define NVT_DISPLAYID_DEVICE_SCAN 1:0 +#define NVT_DISPLAYID_DEVICE_SCAN_UNDEFINED 0 +#define NVT_DISPLAYID_DEVICE_SCAN_FAST_LONG 1 +#define NVT_DISPLAYID_DEVICE_SCAN_FAST_SHORT 2 + +// Display Device Color Depth information +#define NVT_DISPLAYID_DEVICE_COLOR_DEPTH 3:0 + +// Display Device Response Time information +#define NVT_DISPLAYID_DEVICE_WHITE_BLACK 7:7 +#define NVT_DISPLAYID_DEVICE_RESPONSE_TIME 6:0 + +#define NVT_DISPLAYID_SUBPIXEL_UNDEFINED 0 +#define NVT_DISPLAYID_SUBPIXEL_RGB_VERTICAL 1 +#define NVT_DISPLAYID_SUBPIXEL_RGB_HORIZONTAL 2 +#define NVT_DISPLAYID_SUBPIXEL_VERTICAL_STR 3 +#define NVT_DISPLAYID_SUBPIXEL_HORIZONTAL_STR 4 +#define NVT_DISPLAYID_SUBPIXEL_QUAD_RED_TOP_LEFT 5 +#define NVT_DISPLAYID_SUBPIXEL_QUAD_RED_BOTTOM_LEFT 6 +#define NVT_DISPLAYID_SUBPIXEL_DELTA_RGB 7 +#define NVT_DISPLAYID_SUBPIXEL_MOSAIC 8 +#define NVT_DISPLAYID_SUBPIXEL_QUAD_INC_WHITE 9 +#define NVT_DISPLAYID_SUBPIXEL_FIVE 10 +#define NVT_DISPLAYID_SUBPIXEL_SIX 11 +#define NVT_DISPLAYID_SUBPIXEL_PENTILE 12 + +typedef struct _tagDISPLAYID_INTERFACE_POWER_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 power_sequence_T1; + NvU8 power_sequence_T2; + NvU8 power_sequence_T3; + NvU8 power_sequence_T4_min; + NvU8 power_sequence_T5_min; + NvU8 power_sequence_T6_min; +} DISPLAYID_INTERFACE_POWER_BLOCK; + +#define DISPLAYID_INTERFACE_POWER_BLOCK_LEN 0x6 + +#define NVT_DISPLAYID_POWER_T1_MIN 7:4 +#define NVT_DISPLAYID_POWER_T1_MAX 3:0 +#define NVT_DISPLAYID_POWER_T2 5:0 +#define NVT_DISPLAYID_POWER_T3 5:0 +#define NVT_DISPLAYID_POWER_T4_MIN 6:0 +#define NVT_DISPLAYID_POWER_T5_MIN 5:0 +#define NVT_DISPLAYID_POWER_T6_MIN 5:0 + +typedef struct _tagDISPLAYID_TRANSFER_CHAR_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 info; + NvU8 samples; + NvU8 curve_data[NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN - 2]; +} DISPLAYID_TRANSFER_CHAR_BLOCK; + +typedef struct _tagDISPLAYID_INTERFACE_DATA_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 info; + + NvU8 version; + NvU8 color_depth_rgb; + NvU8 color_depth_ycbcr444; + NvU8 color_depth_ycbcr422; + NvU8 content_protection; + NvU8 content_protection_version; + + NvU8 spread; + + NvU8 interface_attribute_1; + NvU8 interface_attribute_2; +} DISPLAYID_INTERFACE_DATA_BLOCK; + +#define DISPLAYID_INTERFACE_DATA_BLOCK_LEN 0xA + +#define NVT_DISPLAYID_INTERFACE_TYPE 7:4 + +// Interface Codes (note exception for Analog Interface) +#define NVT_DISPLAYID_INTERFACE_TYPE_ANALOG 0 +#define NVT_DISPLAYID_INTERFACE_TYPE_LVDS 1 +#define NVT_DISPLAYID_INTERFACE_TYPE_TMDS 2 +#define NVT_DISPLAYID_INTERFACE_TYPE_RSDS 3 +#define NVT_DISPLAYID_INTERFACE_TYPE_DVI_D 4 +#define NVT_DISPLAYID_INTERFACE_TYPE_DVI_I_ANALOG 5 +#define NVT_DISPLAYID_INTERFACE_TYPE_DVI_I_DIGITAL 6 +#define NVT_DISPLAYID_INTERFACE_TYPE_HDMI_A 7 +#define NVT_DISPLAYID_INTERFACE_TYPE_HDMI_B 8 +#define NVT_DISPLAYID_INTERFACE_TYPE_MDDI 9 +#define NVT_DISPLAYID_INTERFACE_TYPE_DISPLAYPORT 10 +#define NVT_DISPLAYID_INTERFACE_TYPE_PROPRIETARY 11 + +// Analog Interface Subtype codes +#define NVT_DISPLAYID_INTERFACE_TYPE_ANALOG_VGA 0 +#define NVT_DISPLAYID_INTERFACE_TYPE_ANALOG_VESA_NAVI_V 1 +#define NVT_DISPLAYID_INTERFACE_TYPE_ANALOG_VESA_NAVI_D 2 + +#define NVT_DISPLAYID_INTERFACE_NUMLINKS 3:0 +#define NVT_DISPLAYID_INTERFACE_CONTENT 2:0 +#define NVT_DISPLAYID_INTERFACE_CONTENT_NONE 0 +#define NVT_DISPLAYID_INTERFACE_CONTENT_HDCP 1 +#define NVT_DISPLAYID_INTERFACE_CONTENT_DTCP 2 +#define NVT_DISPLAYID_INTERFACE_CONTENT_DPCP 3 +#define NVT_DISPLAYID_INTERFACE_SPREAD_TYPE 7:6 +#define NVT_DISPLAYID_INTERFACE_SPREAD_TYPE_NONE 0 +#define NVT_DISPLAYID_INTERFACE_SPREAD_TYPE_DOWN 1 +#define NVT_DISPLAYID_INTERFACE_SPREAD_TYPE_CENTER 2 +#define NVT_DISPLAYID_INTERFACE_SPREAD_PER 3:0 + +#define NVT_DISPLAYID_INTERFACE_RGB16 5:5 +#define NVT_DISPLAYID_INTERFACE_RGB14 4:4 +#define NVT_DISPLAYID_INTERFACE_RGB12 3:3 +#define NVT_DISPLAYID_INTERFACE_RGB10 2:2 +#define NVT_DISPLAYID_INTERFACE_RGB8 1:1 +#define NVT_DISPLAYID_INTERFACE_RGB6 0:0 + +#define NVT_DISPLAYID_INTERFACE_YCBCR444_16 5:5 +#define NVT_DISPLAYID_INTERFACE_YCBCR444_14 4:4 +#define NVT_DISPLAYID_INTERFACE_YCBCR444_12 3:3 +#define NVT_DISPLAYID_INTERFACE_YCBCR444_10 2:2 +#define NVT_DISPLAYID_INTERFACE_YCBCR444_8 1:1 +#define NVT_DISPLAYID_INTERFACE_YCBCR444_6 0:0 + +#define NVT_DISPLAYID_INTERFACE_YCBCR422_16 4:4 +#define NVT_DISPLAYID_INTERFACE_YCBCR422_14 3:3 +#define NVT_DISPLAYID_INTERFACE_YCBCR422_12 2:2 +#define NVT_DISPLAYID_INTERFACE_YCBCR422_10 1:1 +#define NVT_DISPLAYID_INTERFACE_YCBCR422_8 0:0 + +// LVDS specific settings +#define NVT_DISPLAYID_LVDS_COLOR 4:4 +#define NVT_DISPLAYID_LVDS_2_8 3:3 +#define NVT_DISPLAYID_LVDS_12 2:2 +#define NVT_DISPLAYID_LVDS_5 1:1 +#define NVT_DISPLAYID_LVDS_3_3 0:0 + +#define NVT_DISPLAYID_INTERFACE_DE 2:2 +#define NVT_DISPLAYID_INTERFACE_POLARITY 1:1 +#define NVT_DISPLAYID_INTERFACE_STROBE 0:0 + +typedef struct _tagDISPLAYID_STEREO_INTERFACE_METHOD_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 stereo_bytes; + NvU8 stereo_code; + NvU8 timing_sub_block[NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN]; +} DISPLAYID_STEREO_INTERFACE_METHOD_BLOCK; + +#define NVT_DISPLAYID_STEREO_FIELD_SEQUENTIAL 0x0 +#define NVT_DISPLAYID_STEREO_SIDE_BY_SIDE 0x1 +#define NVT_DISPLAYID_STEREO_PIXEL_INTERLEAVED 0x2 +#define NVT_DISPLAYID_STEREO_DUAL_INTERFACE 0x3 +#define NVT_DISPLAYID_STEREO_MULTIVIEW 0x4 +#define NVT_DISPLAYID_STEREO_PROPRIETARY 0xFF + +#define NVT_DISPLAYID_STEREO_MIRRORING 2:1 +#define NVT_DISPLAYID_STEREO_POLARITY 0:0 + +typedef struct _tagDISPLAYID_TILED_DISPLAY_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + struct + { + NvU8 single_tile_behavior:3; // 0x03 + NvU8 multi_tile_behavior:2; // 0x03 + NvU8 rsvd :1; // 0x03 + NvU8 has_bezel_info :1; // 0x03 + NvU8 single_enclosure :1; // 0x03 + } capability; + struct + { + NvU8 row :4; // 0x04 + NvU8 col :4; // 0x04 + } topology_low; + struct + { + NvU8 y :4; // 0x05 + NvU8 x :4; // 0x05 + } location_low; + struct + { + NvU8 y :1; // 0x06 + NvU8 reserved1 :1; // 0x06 + NvU8 x :1; // 0x06 + NvU8 reserved2 :1; // 0x06 + NvU8 row :1; // 0x06 + NvU8 reserved3 :1; // 0x06 + NvU8 col :1; // 0x06 + NvU8 reserved4 :1; // 0x06 + } topo_loc_high; + struct + { + NvU8 width_low; // 0x07 + NvU8 width_high; // 0x08 + NvU8 height_low; // 0x09 + NvU8 height_high; // 0X0A + } native_resolution; + struct + { + NvU8 pixel_density; // 0x0B + NvU8 top; // 0x0C + NvU8 bottom; // 0x0D + NvU8 right; // 0x0E + NvU8 left; // 0x0F + } bezel_info; + struct + { + NvU8 vendor_id[3]; // 0x10 ~ 0x12 + NvU8 product_id[2]; // 0x13 ~ 0x14 + NvU8 serial_number[4]; // 0x15 ~ 0x18 + } topology_id; +} DISPLAYID_TILED_DISPLAY_BLOCK; + +typedef struct _tagDISPLAYID_INTERFACE_FEATURES_DATA_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 supported_color_depth_rgb; + NvU8 supported_color_depth_ycbcr444; + NvU8 supported_color_depth_ycbcr422; + NvU8 supported_color_depth_ycbcr420; + NvU8 minimum_pixel_rate_ycbcr420; + NvU8 supported_audio_capability; + NvU8 supported_colorspace_eotf_combination_1; + NvU8 supported_colorspace_eotf_combination_2; + NvU8 additional_supported_colorspace_eotf_total; + NvU8 additional_supported_colorspace_eotf[NVT_DISPLAYID_DISPLAY_INTERFACE_FEATURES_MAX_ADDITIONAL_SUPPORTED_COLORSPACE_EOTF]; +} DISPLAYID_INTERFACE_FEATURES_DATA_BLOCK; + +#define DISPLAYID_INTERFACE_FEATURES_DATA_BLOCK_MAX_LEN sizeof(DISPLAYID_INTERFACE_FEATURES_DATA_BLOCK) + +#define NVT_DISPLAYID_INTERFACE_FEATURES_RGB16 5:5 +#define NVT_DISPLAYID_INTERFACE_FEATURES_RGB14 4:4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_RGB12 3:3 +#define NVT_DISPLAYID_INTERFACE_FEATURES_RGB10 2:2 +#define NVT_DISPLAYID_INTERFACE_FEATURES_RGB8 1:1 +#define NVT_DISPLAYID_INTERFACE_FEATURES_RGB6 0:0 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR444_16 5:5 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR444_14 4:4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR444_12 3:3 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR444_10 2:2 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR444_8 1:1 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR444_6 0:0 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR422_16 4:4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR422_14 3:3 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR422_12 2:2 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR422_10 1:1 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR422_8 0:0 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR420_16 4:4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR420_14 3:3 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR420_12 2:2 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR420_10 1:1 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR420_8 0:0 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_AUDIO_SUPPORTED_32KHZ 7:7 +#define NVT_DISPLAYID_INTERFACE_FEATURES_AUDIO_SUPPORTED_44_1KHZ 6:6 +#define NVT_DISPLAYID_INTERFACE_FEATURES_AUDIO_SUPPORTED_48KHZ 5:5 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_COLORSPACE_BT2020_EOTF_SMPTE_ST2084 6:6 +#define NVT_DISPLAYID_INTERFACE_FEATURES_COLORSPACE_BT2020_EOTF_BT2020 5:5 +#define NVT_DISPLAYID_INTERFACE_FEATURES_COLORSPACE_DCI_P3_EOTF_DCI_P3 4:4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_COLORSPACE_ADOBE_RGB_EOTF_ADOBE_RGB 3:3 +#define NVT_DISPLAYID_INTERFACE_FEATURES_COLORSPACE_BT709_EOTF_BT1886 2:2 +#define NVT_DISPLAYID_INTERFACE_FEATURES_COLORSPACE_BT601_EOTF_BT601 1:1 +#define NVT_DISPLAYID_INTERFACE_FEATURES_COLORSPACE_SRGB_EOTF_SRGB 0:0 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_EOTF_TOTAL 2:0 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE 7:4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_NOT_DEFINED 0 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_SRGB 1 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_BT601 2 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_BT709 3 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_ADOBE_RGB 4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_DCI_P3 5 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_BT2020 6 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_CUSTOM 7 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF 3:0 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_NOT_DEFINED 0 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_SRGB 1 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_BT601 2 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_BT709 3 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_ADOBE_RGB 4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_DCI_P3 5 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_BT2020 6 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_GAMMA 7 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_SMPTE_ST2084 8 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_HYBRID_LOG 9 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_CUSTOM 10 + + +#ifdef __SUPPORTS_PACK_PRAGMA +#pragma pack() +#endif + +#endif // __DISPLAYID_H_ diff --git a/src/common/modeset/timing/displayid20.h b/src/common/modeset/timing/displayid20.h new file mode 100644 index 0000000..03ca269 --- /dev/null +++ b/src/common/modeset/timing/displayid20.h @@ -0,0 +1,797 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: displayid20.h +// +// Purpose: the template for DisplayID 2.0 parsing (future replacement for EDID) +// +//***************************************************************************** + + +#ifndef __DISPLAYID20_H_ +#define __DISPLAYID20_H_ + +#include "nvtiming.h" + +// The structures below must be tightly packed, in order to correctly +// overlay on the DisplayID 2.0 block bytes. Both MSVC and +// gcc support the pack() pragma for this. + +#if defined(__GNUC__) || defined(_MSC_VER) +# define __SUPPORTS_PACK_PRAGMA 1 +#else +# error "unrecognized compiler: displayid structures must be tightly packed" +#endif + +#ifdef __SUPPORTS_PACK_PRAGMA +#pragma pack(1) +#endif + +#define DISPLAYID_2_0_SECTION_SIZE_TOTAL(_pSectionHeader_) ((_pSectionHeader_).section_bytes + \ + sizeof(DISPLAYID_2_0_SECTION_HEADER) + \ + sizeof(NvU8)) +#define DISPLAYID_2_0_DATA_BLOCK_SIZE_TOTAL(_pBlockHeader_) ((_pBlockHeader_)->data_bytes + \ + sizeof(DISPLAYID_2_0_DATA_BLOCK_HEADER)) +#define DISPLAYID_2_0_SECTION_SIZE_MAX 256 +#define DISPLAYID_2_0_SECTION_DATA_SIZE_MAX (DISPLAYID_2_0_SECTION_SIZE_MAX - \ + sizeof(DISPLAYID_2_0_SECTION_HEADER) + +typedef struct _tagDISPLAYID_2_0_SECTION_HEADER +{ + NvU8 revision:4; // displayID revision + NvU8 version:4; // displayID version + NvU8 section_bytes; // length of this displayID section excluding mandatory bytes [0, 251] + + NvU8 product_type:4; // Display Product Primary Use Case + NvU8 reserved:4; // RESERVED + NvU8 extension_count; // Total extension count. +} DISPLAYID_2_0_SECTION_HEADER; + +typedef struct _tagDISPLAYID_2_0_SECTION +{ + DISPLAYID_2_0_SECTION_HEADER header; + + NvU8 data[DISPLAYID_2_0_SECTION_SIZE_MAX]; // data blocks. Note, DisplayID has variable length +} DISPLAYID_2_0_SECTION; + +#define DISPLAYID_2_0_VERSION 2 +#define DISPLAYID_2_0_REVISION 0 + +#define DISPLAYID_2_0_PROD_EXTENSION 0 // Extension (same primary use case as base section) +#define DISPLAYID_2_0_PROD_TEST 1 // Test Structure/Test Equipment +#define DISPLAYID_2_0_PROD_GENERIC_DISPLAY 2 // None of the listed primary use cases; generic display +#define DISPLAYID_2_0_PROD_TELEVISION 3 // Television (TV) display +#define DISPLAYID_2_0_PROD_DESKTOP_PRODUCTIVITY_DISPLAY 4 // Desktop productivity display +#define DISPLAYID_2_0_PROD_DESKTOP_GAMING_DISPLAY 5 // Desktop gaming display +#define DISPLAYID_2_0_PROD_PRESENTATION_DISPLAY 6 // Presentation display +#define DISPLAYID_2_0_PROD_HMD_VR 7 // Head mounted Virtual Reality display +#define DISPLAYID_2_0_PROD_HMD_AR 8 // Head mounted Augmented Reality display + +typedef struct _tagDISPLAYID_2_0_DATA_BLOCK_HEADER +{ + NvU8 type; // Data block tag + NvU8 revision:3; // block revision + NvU8 reserved:5; + NvU8 data_bytes; // number of payload bytes in Block [ 0, 248] +} DISPLAYID_2_0_DATA_BLOCK_HEADER; + +#define DISPLAYID_2_0_BLOCK_TYPE_PRODUCT_IDENTITY 0x20 +#define DISPLAYID_2_0_BLOCK_TYPE_DISPLAY_PARAM 0x21 +#define DISPLAYID_2_0_BLOCK_TYPE_TIMING_7 0x22 +#define DISPLAYID_2_0_BLOCK_TYPE_TIMING_8 0x23 +#define DISPLAYID_2_0_BLOCK_TYPE_TIMING_9 0x24 +#define DISPLAYID_2_0_BLOCK_TYPE_RANGE_LIMITS 0x25 +#define DISPLAYID_2_0_BLOCK_TYPE_INTERFACE_FEATURES 0x26 +#define DISPLAYID_2_0_BLOCK_TYPE_STEREO 0x27 +#define DISPLAYID_2_0_BLOCK_TYPE_TILED_DISPLAY 0x28 +#define DISPLAYID_2_0_BLOCK_TYPE_CONTAINER_ID 0x29 +#define DISPLAYID_2_0_BLOCK_TYPE_TIMING_10 0x2A +#define DISPLAYID_2_0_BLOCK_TYPE_ADAPTIVE_SYNC 0x2B +#define DISPLAYID_2_0_BLOCK_TYPE_ARVR_HMD 0x2C +#define DISPLAYID_2_0_BLOCK_TYPE_ARVR_LAYER 0x2D +#define DISPLAYID_2_0_BLOCK_TYPE_BRIGHTNESS_LUMINANCE_RANGE 0x2E +// 0x7D - 0x2F RESERVED for Additional VESA-defined Data Blocks +#define DISPLAYID_2_0_BLOCK_TYPE_VENDOR_SPEC 0x7E +// 0x80 - 0x7F RESERVED +#define DISPLAYID_2_0_BLOCK_TYPE_CTA_DATA 0x81 +// 0xFF - 0x82 RESERVED for additional data blocks related to external standards organization(s). + +#define DISPLAYID_2_0_PRODUCT_NAME_STRING_MAX_LEN ((0xFB - 0xF) + 1) + +typedef struct _tagDISPLAYID_2_0_PROD_IDENTIFICATION_BLOCK +{ + // Product Identification Data Block (0x20) + // Number of payload bytes 12(0xC) - 248(0xF8) + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + + NvU8 vendor[3]; + NvU8 product_code[2]; + NvU8 serial_number[4]; + NvU8 model_tag; + NvU8 model_year; + NvU8 product_name_string_size; + NvU8 product_name_string[DISPLAYID_2_0_PRODUCT_NAME_STRING_MAX_LEN]; +} DISPLAYID_2_0_PROD_IDENTIFICATION_BLOCK; + +typedef struct _tagDISPLAY_2_0_DISPLAY_PARAM_BLOCK_HEADER +{ + NvU8 type; // Display Parameters Data Block (0x21) + NvU8 revision:3; + NvU8 reserved:4; + NvU8 image_size_multiplier:1; + NvU8 data_bytes; // number of payload bytes 29(0x1D) +} DISPLAY_2_0_DISPLAY_PARAM_BLOCK_HEADER; + +typedef struct _tagDISPLAYID_2_0_COLOR_CHROMATICITY +{ + NvU8 color_x_bits_low; + struct { + NvU8 color_x_bits_high:4; + NvU8 color_y_bits_low:4; + } color_bits_mid; + NvU8 color_y_bits_high; +} DISPLAYID_2_0_COLOR_CHROMATICITY; + +typedef enum _tagDISPLAYID_2_0_NATIVE_COLOR_DEPTH +{ + NATIVE_COLOR_NOT_DEFINED = 0, + NATIVE_COLOR_BPC_6 = 1, + NATIVE_COLOR_BPC_8 = 2, + NATIVE_COLOR_BPC_10 = 3, + NATIVE_COLOR_BPC_12 = 4, + NATIVE_COLOR_BPC_16 = 5, +} DISPLAYID_2_0_NATIVE_COLOR_DEPTH; + +#define DISPLAYID_2_0_DISPLAY_PARAM_BLOCK_PAYLOAD_LENGTH 29 +typedef struct _tagDISPLAYID_2_0_DISPLAY_PARAM_BLOCK +{ + DISPLAY_2_0_DISPLAY_PARAM_BLOCK_HEADER header; + + NvU8 horizontal_image_size[2]; + NvU8 vertical_image_size[2]; + NvU8 horizontal_pixel_count[2]; + NvU8 vertical_pixel_count[2]; + + struct { + NvU8 scan_orientation :3; + NvU8 luminance_information :2; + NvU8 reserved :1; + NvU8 color_information :1; + NvU8 audio_speaker_information :1; + } feature; + + DISPLAYID_2_0_COLOR_CHROMATICITY primary_color_1_chromaticity; + DISPLAYID_2_0_COLOR_CHROMATICITY primary_color_2_chromaticity; + DISPLAYID_2_0_COLOR_CHROMATICITY primary_color_3_chromaticity; + DISPLAYID_2_0_COLOR_CHROMATICITY white_point_chromaticity; + NvU8 max_luminance_full_coverage[2]; + NvU8 max_luminance_10_percent_rectangular_coverage[2]; + NvU8 min_luminance[2]; + + struct { + NvU8 color_depth :3; + NvU8 reserved0 :1; + NvU8 device_technology :3; + NvU8 device_theme_preference :1; + } color_depth_and_device_technology; + + NvU8 gamma_EOTF; +} DISPLAYID_2_0_DISPLAY_PARAM_BLOCK; + +#define DISPLAYID_2_0_SCAN_ORIENTATION_LRTB 0 // Left to right, top to bottom +#define DISPLAYID_2_0_SCAN_ORIENTATION_RLTB 1 // Right to left, top to bottom +#define DISPLAYID_2_0_SCAN_ORIENTATION_TBRL 2 // Top to bottom, right to left +#define DISPLAYID_2_0_SCAN_ORIENTATION_BTRL 3 // Bottom to top, right to left +#define DISPLAYID_2_0_SCAN_ORIENTATION_RLBT 4 // Right to left, bottom to top +#define DISPLAYID_2_0_SCAN_ORIENTATION_LRBT 5 // Left to right, bottom to top +#define DISPLAYID_2_0_SCAN_ORIENTATION_BTLR 6 // Bottom to top, left to right +#define DISPLAYID_2_0_SCAN_ORIENTATION_TBLR 7 // Top to bottom, left to right + +#define DISPLAYID_2_0_COLOR_INFORMATION_1931_CIE 0 +#define DISPLAYID_2_0_color_INFORMATION_1976_CIE 1 + +#define DISPLAYID_2_0_AUDIO_SPEAKER_INTEGRATED 0 +#define DISPLAYID_2_0_AUDIO_SPEAKER_NOT_INTEGRATED 1 + +#define DISPLAYID_2_0_DEVICE_TECHNOLOGY_UNSPECIFIED 0 +#define DISPLAYID_2_0_DEVICE_TECHNOLOGY_LCD 1 +#define DISPLAYID_2_0_DEVICE_TECHNOLOGY_OLED 2 + +#define DISPLAYID_2_0_TYPE7_DSC_PASSTHRU_REVISION 1 +#define DISPLAYID_2_0_TYPE7_YCC420_SUPPORT_REVISION 2 + +// DisplayID_v2.0 E5 - DSC Pass-Through timing +// DisplayID_v2.0 E7 - YCC420 and > 20 bytes per descriptor supported +typedef struct _tagDISPLAYID_2_0_TIMING_7_BLOCK_HEADER +{ + NvU8 type; // Type VII Timing (0x22) + NvU8 revision :3; + NvU8 dsc_passthrough :1; + NvU8 payload_bytes_len :3; + NvU8 reserved :1; + NvU8 data_bytes; // Values range from 1(0x01) to 248(0xF8) +} DISPLAYID_2_0_TIMING_7_BLOCK_HEADER; + +typedef struct _tag_DISPLAYID_2_0_TIMING_7_DESCRIPTOR +{ + // Range is defined as 0.001 through 16,777.216 MP/s + NvU8 pixel_clock[3]; + + struct + { + NvU8 aspect_ratio : 4; + NvU8 interface_frame_scanning_type : 1; + NvU8 stereo_support : 2; + NvU8 is_preferred_or_ycc420 : 1; + } options; + + struct + { + NvU8 active_image_pixels[2]; + NvU8 blank_pixels[2]; + NvU8 front_porch_pixels_low; + NvU8 front_porch_pixels_high : 7; + NvU8 sync_polarity : 1; + NvU8 sync_width_pixels[2]; + } horizontal; + + struct + { + NvU8 active_image_lines[2]; + NvU8 blank_lines[2]; + NvU8 front_porch_lines_low; + NvU8 front_porch_lines_high : 7; + NvU8 sync_polarity : 1; + NvU8 sync_width_lines[2]; + } vertical; +} DISPLAYID_2_0_TIMING_7_DESCRIPTOR; + +#define DISPLAYID_2_0_TIMING_7_MAX_DESCRIPTORS 12 + +typedef struct _tagDISPLAYID_2_0_TIMING_7_BLOCK +{ + DISPLAYID_2_0_TIMING_7_BLOCK_HEADER header; + DISPLAYID_2_0_TIMING_7_DESCRIPTOR descriptors[DISPLAYID_2_0_TIMING_7_MAX_DESCRIPTORS]; +} DISPLAYID_2_0_TIMING_7_BLOCK; + +#define DISPLAYID_2_0_TIMING_DSC_PASSTHRU_TIMING 1 + +// the following fields apply to Timing 7 Descriptors (Not all of them are +// used per descriptor, but the format is the same +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_1_1 0 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_5_4 1 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_4_3 2 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_15_9 3 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_16_9 4 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_16_10 5 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_64_27 6 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_256_135 7 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_CALCULATE 8 // calculate using Horizontal and Vertical Active Image Pixels + +#define DISPLAYID_2_0_TIMING_PROGRESSIVE_SCAN 0 +#define DISPLAYID_2_0_TIMING_INTERLACED_SCAN 1 + +#define DISPLAYID_2_0_TIMING_3D_STEREO_MONO 0 +#define DISPLAYID_2_0_TIMING_3D_STEREO_STEREO 1 +#define DISPLAYID_2_0_TIMING_3D_STEREO_EITHER 2 + +#define DISPLAYID_2_0_TIMING_SYNC_POLARITY_NEGATIVE 0 +#define DISPLAYID_2_0_TIMING_SYNC_POLARITY_POSITIVE 1 + +typedef struct _tagDISPLAYID_2_0_TIMING_8_BLOCK_HEADER +{ + NvU8 type; // Type VIII Timing (0x23) + NvU8 revision :3; + NvU8 timing_code_size :1; + NvU8 reserved :1; + NvU8 is_support_yuv420 :1; + NvU8 timing_code_type :2; + NvU8 data_bytes; // Values range from 1(0x01) to 248(0xF8) +} DISPLAYID_2_0_TIMING_8_BLOCK_HEADER; + +#define DISPLAYID_2_0_TIMING_8_MAX_CODES 248 + +typedef struct _tagDISPLAYID_2_0_TIMING_8_BLOCK +{ + DISPLAYID_2_0_TIMING_8_BLOCK_HEADER header; + NvU8 timingCode[DISPLAYID_2_0_TIMING_8_MAX_CODES]; +} DISPLAYID_2_0_TIMING_8_BLOCK; + +// the following fields apply to Timing 8 Descriptors +#define DISPLAYID_2_0_TIMING_CODE_DMT 0 +#define DISPLAYID_2_0_TIMING_CODE_CTA_VIC 1 +#define DISPLAYID_2_0_TIMING_CODE_HDMI_VIC 2 +#define DISPLAYID_2_0_TIMING_CODE_RSERVED 3 +#define DISPLAYID_2_0_TIMING_CODE_SIZE_1_BYTE 0 +#define DISPLAYID_2_0_TIMING_CODE_SIZE_2_BYTE 1 + +typedef struct _TAG_DISPLAYID_2_0_TIMING_9_DESCRIPTOR +{ + struct { + NvU8 timing_formula :3; + NvU8 reserved0 :1; + NvU8 rr_1000div1001_support :1; + NvU8 stereo_support :2; + NvU8 reserved1 :1; + } options; + + NvU8 horizontal_active_pixels[2]; + NvU8 vertical_active_lines[2]; + NvU8 refresh_rate; // 1 Hz to 256 Hz +} DISPLAYID_2_0_TIMING_9_DESCRIPTOR; + +// the following fields apply to Timing 9/10 Descriptors +#define DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_STANDARD_CRT_BASED 0 +#define DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_REDUCED_BLANKING_1 1 +#define DISPLAYID_2_0_TIMING_FORMULA_CVT_2_1_REDUCED_BLANKING_2 2 +#define DISPLAYID_2_0_TIMING_FORMULA_CVT_2_1_REDUCED_BLANKING_3 3 + +#define DISPLAYID_2_0_TIMING_9_MAX_DESCRIPTORS 10 + +typedef struct _tagDISPLAYID_2_0_TIMING_9_BLOCK +{ + // Type IX Timing (0x24) + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + DISPLAYID_2_0_TIMING_9_DESCRIPTOR descriptors[DISPLAYID_2_0_TIMING_9_MAX_DESCRIPTORS]; +} DISPLAYID_2_0_TIMING_9_BLOCK; + +#define DISPLAYID_2_0_TIMING_10_PAYLOAD_BYTES_6 0 +#define DISPLAYID_2_0_TIMING_10_PAYLOAD_BYTES_7 1 +#define DISPLAYID_2_1_TIMING_10_PAYLOAD_BYTES_8 2 + +typedef struct _tagDISPLAYID_2_0_TIMING_10_BLOCK_HEADER +{ + NvU8 type; // Type X Timing (0x2A) + NvU8 revision :3; + NvU8 reserved0 :1; + NvU8 payload_bytes_len :3; + NvU8 reserved1 :1; + NvU8 payload_bytes; +} DISPLAYID_2_0_TIMING_10_BLOCK_HEADER; + +#define DISPLAYID_2_0_TIMING_10_MAX_6BYTES_DESCRIPTORS 18 +#define DISPLAYID_2_0_TIMING_10_MAX_7BYTES_DESCRIPTORS 16 +#define DISPLAYID_2_1_TIMING_10_MAX_8BYTES_DESCRIPTORS 14 + +typedef struct _DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR +{ + struct { + NvU8 timing_formula :3; + NvU8 early_vsync :1; + NvU8 rr1000div1001_or_hblank :1; + NvU8 stereo_support :2; + NvU8 ycc420_support :1; + } options; + + NvU8 horizontal_active_pixels[2]; + NvU8 vertical_active_lines[2]; + NvU8 refresh_rate; // 1 Hz to 256 Hz +} DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR; + +#define DISPLAYID_2_0_TIMING_10_NOMINAL_MINIMUM_VBLANK 35 +#define DISPLAYID_2_1_TIMING_10_ALTERNATE_MINIMUM_VBLANK 20 + +typedef struct _DISPLAYID_2_0_TIMING_10_7BYTES_DESCRIPTOR +{ + DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR descriptor_6_bytes; + NvU8 refresh_rate_high :2; + NvU8 delta_hblank :3; + NvU8 additional_vblank_timing :3; +} DISPLAYID_2_0_TIMING_10_7BYTES_DESCRIPTOR; + +typedef struct _DISPLAYID_2_1_TIMING_10_8BYTES_DESCRIPTOR +{ + DISPLAYID_2_0_TIMING_10_7BYTES_DESCRIPTOR descriptor_7_bytes; + NvU8 additional_mini_vblank :1; + NvU8 reserved :7; +} DISPLAYID_2_1_TIMING_10_8BYTES_DESCRIPTOR; + +typedef struct _DISPLAYID_2_0_TIMING_10_BLOCK +{ + DISPLAYID_2_0_TIMING_10_BLOCK_HEADER header; + NvU8 descriptors[144]; +} DISPLAYID_2_0_TIMING_10_BLOCK; + +#define DISPLAYID_2_0_RANGE_LIMITS_BLOCK_PAYLOAD_LENGTH 9 +typedef struct _tagDISPLAYID_2_0_RANGE_LIMITS_BLOCK +{ + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + + NvU8 pixel_clock_min[3]; + NvU8 pixel_clock_max[3]; + NvU8 vertical_frequency_min; + NvU8 vertical_frequency_max_7_0; + + struct { + NvU8 vertical_frequency_max_9_8 :2; + NvU8 reserved :5; + NvU8 seamless_dynamic_video_timing_change :1; + } dynamic_video_timing_range_support; +} DISPLAYID_2_0_RANGE_LIMITS_BLOCK; + +#define DISPLAYID_2_0_SEAMLESS_DYNAMIC_VIDEO_TIMING_CHANGE_NOT_SUPPORTED 0 +#define DISPLAYID_2_0_SEAMLESS_DYNAMIC_VIDEO_TIMING_CHANGE_SUPPORTED 1 + +#define DISPLAYID_2_0_INTERFACE_FEATURES_BLOCK_PAYLOAD_LENGTH_MIN 9 +#define DISPLAYID_2_0_MAX_COLOR_SPACE_AND_EOTF 7 +typedef struct _tagDISPLAYID_2_0_INTERFACE_FEATURES_BLOCK +{ + // Display Interface Features Data Block (0x26) + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + + struct { + NvU8 bit_per_primary_6:1; + NvU8 bit_per_primary_8:1; + NvU8 bit_per_primary_10:1; + NvU8 bit_per_primary_12:1; + NvU8 bit_per_primary_14:1; + NvU8 bit_per_primary_16:1; + NvU8 reserved:2; + } interface_color_depth_rgb; + + struct { + NvU8 bit_per_primary_6:1; + NvU8 bit_per_primary_8:1; + NvU8 bit_per_primary_10:1; + NvU8 bit_per_primary_12:1; + NvU8 bit_per_primary_14:1; + NvU8 bit_per_primary_16:1; + NvU8 reserved:2; + } interface_color_depth_ycbcr444; + + struct { + NvU8 bit_per_primary_8:1; + NvU8 bit_per_primary_10:1; + NvU8 bit_per_primary_12:1; + NvU8 bit_per_primary_14:1; + NvU8 bit_per_primary_16:1; + NvU8 reserved:3; + } interface_color_depth_ycbcr422; + + struct { + NvU8 bit_per_primary_8:1; + NvU8 bit_per_primary_10:1; + NvU8 bit_per_primary_12:1; + NvU8 bit_per_primary_14:1; + NvU8 bit_per_primary_16:1; + NvU8 reserved:3; + } interface_color_depth_ycbcr420; + + NvU8 min_pixel_rate_ycbcr420; // x 74.25MP/s + + struct { + NvU8 reserved:5; + NvU8 sample_rate_48_khz:1; + NvU8 sample_rate_44_1_khz:1; + NvU8 sample_rate_32_khz:1; + } audio_capability; + + struct { + NvU8 color_space_srgb_eotf_srgb:1; + NvU8 color_space_bt601_eotf_bt601:1; + NvU8 color_space_bt709_eotf_bt1886:1; + NvU8 color_space_adobe_rgb_eotf_adobe_rgb:1; + NvU8 color_space_dci_p3_eotf_dci_p3:1; + NvU8 color_space_bt2020_eotf_bt2020:1; + NvU8 color_space_bt2020_eotf_smpte_st2084:1; + NvU8 reserved:1; + } color_space_and_eotf_1; + + struct { + NvU8 reserved; + } color_space_and_eotf_2; + + struct { + NvU8 count:3; + NvU8 reserved:5; + } additional_color_space_and_eotf_count; + + struct { + NvU8 eotf:4; + NvU8 color_space:4; + } additional_color_space_and_eotf[DISPLAYID_2_0_MAX_COLOR_SPACE_AND_EOTF]; +} DISPLAYID_2_0_INTERFACE_FEATURES_BLOCK; + +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_NOT_DEFINED 0 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_SRGB 1 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_BT601 2 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_BT709 3 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_ADOBE_RGB 4 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_DCI_P3 5 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_BT2020 6 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_CUSTOM 7 + +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_NOT_DEFINED 0 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_SRGB 1 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_BT601 2 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_BT709 3 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_ADOBE_RGB 4 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_DCI_P3 5 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_BT2020 6 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_GAMMA 7 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_SMPTE_ST2084 8 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_HYBRID_LOG 9 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_CUSTOM 10 + +typedef struct _tagDISPLAYID_2_0_STEREO_INTERFACE_BLOCK_HEADER +{ + NvU8 type; + NvU8 revision:3; + NvU8 reserved:3; + NvU8 stereo_timing_support:2; +} DISPLAYID_2_0_STEREO_INTERFACE_BLOCK_HEADER; + +typedef struct _tagDISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR +{ + NvU8 supported_timing_code_count:5; + NvU8 reserved:1; + NvU8 timing_code_type:2; + NvU8 timing_code[0x1F]; +} DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_FIELD_SEQUENTIAL_INTERFACE_DESCRIPTOR +{ + NvU8 polarity_descriptor:1; + NvU8 reserved:7; + DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR timing_descriptor; +} DISPLAYID_2_0_STEREO_FIELD_SEQUENTIAL_INTERFACE_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_SIDE_BY_SIDE_INTERFACE_DESCRIPTOR +{ + NvU8 view_identity_descriptor:1; + NvU8 reserved:7; + DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR timing_descriptor; +} DISPLAYID_2_0_STEREO_SIDE_BY_SIDE_INTERFACE_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_PIXEL_INTERLEAVED_DESCRIPTOR +{ + NvU8 interleaved_pattern_descriptor[8]; + DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR timing_descriptor; +} DISPLAYID_2_0_STEREO_PIXEL_INTERLEAVED_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_DUAL_INTERFACE_LEFT_AND_RIGHT_SEPARATE_DESCRIPTOR +{ + NvU8 left_and_right_polarity_descriptor:1; + NvU8 mirroring_descriptor:2; + NvU8 reserved:5; + DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR timing_descriptor; +} DISPLAYID_2_0_STEREO_DUAL_INTERFACE_LEFT_AND_RIGHT_SEPARATE_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_MULTI_VIEW_DESCRIPTOR +{ + NvU8 views_descriptors_count; + NvU8 view_interleaving_method_code_descriptor; + DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR timing_descriptor; +} DISPLAYID_2_0_STEREO_MULTI_VIEW_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_STACKED_FRAME_DESCRIPTOR +{ + NvU8 view_identity_descriptor:1; + NvU8 reserved:7; + DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR timing_descriptor; +} DISPLAYID_2_0_STEREO_STACKED_FRAME_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_PROPRIETARY_DESCRIPTOR +{ + DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR timing_descriptor; +} DISPLAYID_2_0_STEREO_PROPRIETARY_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_INTERFACE_METHOD_BLOCK +{ + DISPLAYID_2_0_STEREO_INTERFACE_BLOCK_HEADER header; + + NvU8 stereo_bytes; + NvU8 stereo_code; + union { + DISPLAYID_2_0_STEREO_FIELD_SEQUENTIAL_INTERFACE_DESCRIPTOR field_sequential; + DISPLAYID_2_0_STEREO_SIDE_BY_SIDE_INTERFACE_DESCRIPTOR side_by_side; + DISPLAYID_2_0_STEREO_PIXEL_INTERLEAVED_DESCRIPTOR pixel_interleaved; + DISPLAYID_2_0_STEREO_DUAL_INTERFACE_LEFT_AND_RIGHT_SEPARATE_DESCRIPTOR dual_interface; + DISPLAYID_2_0_STEREO_MULTI_VIEW_DESCRIPTOR multi_view; + DISPLAYID_2_0_STEREO_STACKED_FRAME_DESCRIPTOR stacked_frame; + DISPLAYID_2_0_STEREO_PROPRIETARY_DESCRIPTOR proprietary; + }; +} DISPLAYID_2_0_STEREO_INTERFACE_METHOD_BLOCK; + +#define DISPLAYID_2_0_STEREO_CODE_FIELD_SEQUENTIAL 0x0 +#define DISPLAYID_2_0_STEREO_CODE_SIDE_BY_SIDE 0x1 +#define DISPLAYID_2_0_STEREO_CODE_PIXEL_INTERLEAVED 0x2 +#define DISPLAYID_2_0_STEREO_CODE_DUAL_INTERFACE 0x3 +#define DISPLAYID_2_0_STEREO_CODE_MULTIVIEW 0x4 +#define DISPLAYID_2_0_STEREO_CODE_STACKED_FRAME 0x5 +#define DISPLAYID_2_0_STEREO_CODE_PROPRIETARY 0xFF + +#define DISPLAYID_STEREO_MIRRORING 2:1 +#define DISPLAYID_STEREO_POLARITY 0:0 + +#define DISPLAYID_2_0_TILED_DISPLAY_BLOCK_PAYLOAD_LENGTH 22 +typedef struct _tagDISPLAYID_2_0_TILED_DISPLAY_BLOCK +{ + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + struct + { + NvU8 single_tile_behavior:3; // 0x03 + NvU8 multi_tile_behavior:2; // 0x03 + NvU8 rsvd :1; // 0x03 + NvU8 has_bezel_info :1; // 0x03 + NvU8 single_enclosure :1; // 0x03 + } capability; + struct + { + NvU8 row :4; // 0x04 + NvU8 col :4; // 0x04 + } topo_low; + struct + { + NvU8 y :4; // 0x05 + NvU8 x :4; // 0x05 + } loc_low; + struct + { + NvU8 y :2; // 0x06 + NvU8 x :2; // 0x06 + NvU8 row :2; // 0x06 + NvU8 col :2; // 0x06 + } topo_loc_high; + struct + { + NvU8 width_low; // 0x07 + NvU8 width_high; // 0x08 + NvU8 height_low; // 0x09 + NvU8 height_high; // 0X0A + } native_resolution; + struct + { + NvU8 pixel_density; // 0x0B + NvU8 top; // 0x0C + NvU8 bottom; // 0x0D + NvU8 right; // 0x0E + NvU8 left; // 0x0F + } bezel_info; + struct + { + NvU8 vendor_id[3]; // 0x10 ~ 0x12 + NvU8 product_id[2]; // 0x13 ~ 0x14 + NvU8 serial_number[4]; // 0x15 ~ 0x18 + } topo_id; +} DISPLAYID_2_0_TILED_DISPLAY_BLOCK; + +#define DISPLAYID_2_0_CONTAINERID_BLOCK_PAYLOAD_LENGTH 16 +typedef struct _tagDISPLAYID_2_0_CONTAINERID_BLOCK +{ + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + NvU8 container_id[DISPLAYID_2_0_CONTAINERID_BLOCK_PAYLOAD_LENGTH]; +} DISPLAYID_2_0_CONTAINERID_BLOCK; + +#define DISPLAYID_2_0_ADAPTIVE_SYNC_DETAILED_TIMING_COUNT 4 +typedef struct _tagDISPLAYID_2_0_ADAPTIVE_SYNC_BLOCK_HEADER +{ + NvU8 type; // Adaptive-Sync (0x2B) + NvU8 revision :3; + NvU8 reserved0 :1; + NvU8 payload_bytes_adaptive_sync_len :3; + NvU8 reserved1 :1; + NvU8 payload_bytes; +} DISPLAYID_2_0_ADAPTIVE_SYNC_BLOCK_HEADER; + +typedef struct _tagDISPLAYID_2_0_ADAPTIVE_SYNC_DESCRIPTOR +{ + struct + { + NvU8 range : 1; + NvU8 successive_frame_inc_tolerance : 1; + NvU8 modes : 2; + NvU8 seamless_transition_not_support: 1; + NvU8 successive_frame_dec_tolerance : 1; + NvU8 reserved : 2; + } operation_range_info; + + // 6.2 format (six integer bits and two fractional bits) + // six integer bits == 0 - 63ms + // two fractional bits == 0.00(00), 0.25(01b),0.50(10), 0.75(11b) + NvU8 max_single_frame_inc; + NvU8 min_refresh_rate; + struct + { + NvU8 max_rr_7_0; + NvU8 max_rr_9_8 : 2; + NvU8 reserved : 6; + } max_refresh_rate; + + // same as max_single_frame_inc expression + NvU8 max_single_frame_dec; +} DISPLAYID_2_0_ADAPTIVE_SYNC_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_ADAPTIVE_SYNC_BLOCK +{ + DISPLAYID_2_0_ADAPTIVE_SYNC_BLOCK_HEADER header; + DISPLAYID_2_0_ADAPTIVE_SYNC_DESCRIPTOR descriptors[DISPLAYID_2_0_ADAPTIVE_SYNC_DETAILED_TIMING_COUNT]; +} DISPLAYID_2_0_ADAPTIVE_SYNC_BLOCK; + +// Payload value as defined in DID2.1 spec +#define DISPLAYID_2_0_BRIGHTNESS_LUMINANCE_RANGE_BLOCK_PAYLOAD_LENGTH 6 +typedef struct _tagDISPLAYID_2_0_BRIGHTNESS_LUMINANCE_RANGE_BLOCK +{ + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + NvU16 min_sdr_luminance; // 0x03 ~ 0x04 + NvU16 max_sdr_luminance; // 0x05 ~ 0x06 + NvU16 max_boost_sdr_luminance; // 0x07 ~ 0x08 +} DISPLAYID_2_0_BRIGHTNESS_LUMINANCE_RANGE_BLOCK; + +typedef struct _tagDISPLAYID_2_0_VENDOR_SPECIFIC_BLOCK +{ + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + NvU8 vendor_id[3]; + NvU8 vendor_specific_data[245]; +} DISPLAYID_2_0_VENDOR_SPECIFIC_BLOCK; + +typedef struct _tagDISPLAYID_2_0_CTA_BLOCK +{ + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + NvU8 cta_data[248]; +} DISPLAYID_2_0_CTA_BLOCK; + +#ifdef __SUPPORTS_PACK_PRAGMA +#pragma pack() +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +// Entry point functions both used in DID20 and DID20ext +NVT_STATUS parseDisplayId20DataBlock(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); +NvU8 computeDisplayId20SectionCheckSum(const NvU8 *pSectionBytes, NvU32 length); + +// DisplayID20 as EDID extension entry point function +NVT_STATUS parseDisplayId20EDIDExtSection(DISPLAYID_2_0_SECTION *section, NVT_EDID_INFO *pEdidInfo); + +// DisplayID20 Entry point functions +NVT_STATUS parseDisplayId20BaseSection(const DISPLAYID_2_0_SECTION *pSection, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); +NVT_STATUS parseDisplayId20SectionDataBlocks(const DISPLAYID_2_0_SECTION *pSection, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); +NVT_STATUS parseDisplayId20ExtensionSection(const DISPLAYID_2_0_SECTION *pSection, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); + +// DisplayID20 Data Block Tag Allocation +NVT_STATUS parseDisplayId20ProductIdentity(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x20 Product Identificaton +NVT_STATUS parseDisplayId20DisplayParam(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x21 Display Parameters +NVT_STATUS parseDisplayId20Timing7(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x22 Type VII Timing - Detailed Timing +NVT_STATUS parseDisplayId20Timing8(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x23 Type VIII Timing - Enumerated Timing +NVT_STATUS parseDisplayId20Timing9(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x24 Type IX Timing - Formula-based +NVT_STATUS parseDisplayId20RangeLimit(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x25 Dynamic Video Timing Range Limits +NVT_STATUS parseDisplayId20DisplayInterfaceFeatures(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x26 Display Interface Features +NVT_STATUS parseDisplayId20Stereo(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x27 Stereo Display Interface +NVT_STATUS parseDisplayId20TiledDisplay(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x28 Tiled Display Topology +NVT_STATUS parseDisplayId20ContainerId(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x29 ContainerID +NVT_STATUS parseDisplayId20Timing10(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x2A Type X Timing - Formula-based Timing +NVT_STATUS parseDisplayId20AdaptiveSync(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x2B Adaptive-Sync +NVT_STATUS parseDisplayId20ARVRHMD(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x2C ARVR HMD +NVT_STATUS parseDisplayId20ARVRLayer(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x2D ARVR Layer +NVT_STATUS parseDisplayId20BrightnessLuminanceRange(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x2E Brightness Luminance Range +NVT_STATUS parseDisplayId20VendorSpecific(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x7E Vendor-specific +NVT_STATUS parseDisplayId20CtaData(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x81 CTA DisplayID + +#ifdef __cplusplus +} +#endif +#endif // __DISPLAYID20_H_1 + diff --git a/src/common/modeset/timing/dpsdp.h b/src/common/modeset/timing/dpsdp.h new file mode 100644 index 0000000..b35b218 --- /dev/null +++ b/src/common/modeset/timing/dpsdp.h @@ -0,0 +1,376 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* +=============================================================================== + + dp_sdp.cpp + + Provide definition needed for display port secondary data packet. + +================================================================================ +*/ + +#ifndef __DPSDP_H__ +#define __DPSDP_H__ + +#include "nvtypes.h" + +#define DP_SDP_HEADER_SIZE 4 +#define DP_SDP_DATA_SIZE 28 + +// TODO: needs to wait for RM to provide the enum. Therefore, hardcoded to 7, which is the packet type for VSC SDP +typedef enum tagSDP_PACKET_TYPE +{ + SDP_PACKET_TYPE_VSC = 7, +} SDP_PACKET_TYPE; + +typedef enum tagSDP_VSC_REVNUM +{ + SDP_VSC_REVNUM_DISABLED = 0, + SDP_VSC_REVNUM_STEREO = 1, + SDP_VSC_REVNUM_STEREO_PSR, + SDP_VSC_REVNUM_STEREO_PSR2, + SDP_VSC_REVNUM_PSR2_EXTN, + SDP_VSC_REVNUM_STEREO_PSR2_COLOR, + SDP_VSC_REVNUM_STEREO_PR, + SDP_VSC_REVNUM_STEREO_PR_COLOR, +} SDP_VSC_REVNUM; + +typedef enum tagSDP_VSC_VALID_DATA_BYTES +{ + SDP_VSC_VALID_DATA_BYTES_DISABLED = 0, + SDP_VSC_VALID_DATA_BYTES_STEREO = 1, + SDP_VSC_VALID_DATA_BYTES_STEREO_PSR = 8, + SDP_VSC_VALID_DATA_BYTES_PSR2 = 12, + SDP_VSC_VALID_DATA_BYTES_PSR2_COLOR = 19, + SDP_VSC_VALID_DATA_BYTES_PR = 16, + SDP_VSC_VALID_DATA_BYTES_PR_COLOR = 19, +} SDP_VSC_VALID_DATA_BYTES; + +typedef enum tagSDP_VSC_DYNAMIC_RANGE +{ + SDP_VSC_DYNAMIC_RANGE_VESA, + SDP_VSC_DYNAMIC_RANGE_CEA, +} SDP_VSC_DYNAMIC_RANGE; + +typedef enum tagSDP_VSC_PIX_ENC +{ + SDP_VSC_PIX_ENC_RGB, + SDP_VSC_PIX_ENC_YCBCR444, + SDP_VSC_PIX_ENC_YCBCR422, + SDP_VSC_PIX_ENC_YCBCR420, + SDP_VSC_PIX_ENC_Y, + SDP_VSC_PIX_ENC_RAW, +} SDP_VSC_PIX_ENC; + +typedef enum tagSDP_VSC_BIT_DEPTH_RGB +{ + SDP_VSC_BIT_DEPTH_RGB_6BPC = 0, + SDP_VSC_BIT_DEPTH_RGB_8BPC, + SDP_VSC_BIT_DEPTH_RGB_10BPC, + SDP_VSC_BIT_DEPTH_RGB_12BPC, + SDP_VSC_BIT_DEPTH_RGB_16BPC, + +} SDP_VSC_BIT_DEPTH_RGB; + +typedef enum tagSDP_VSC_BIT_DEPTH_YCBCR +{ + SDP_VSC_BIT_DEPTH_YCBCR_8BPC = 1, + SDP_VSC_BIT_DEPTH_YCBCR_10BPC, + SDP_VSC_BIT_DEPTH_YCBCR_12BPC, + SDP_VSC_BIT_DEPTH_YCBCR_16BPC, + +} SDP_VSC_BIT_DEPTH_YCBCR; + +typedef enum tagSDP_VSC_BIT_DEPTH_RAW +{ + SDP_VSC_BIT_DEPTH_RAW_6BPC = 1, + SDP_VSC_BIT_DEPTH_RAW_7BPC, + SDP_VSC_BIT_DEPTH_RAW_8BPC, + SDP_VSC_BIT_DEPTH_RAW_10BPC, + SDP_VSC_BIT_DEPTH_RAW_12BPC, + SDP_VSC_BIT_DEPTH_RAW_14PC, + SDP_VSC_BIT_DEPTH_RAW_16PC, + +} SDP_VSC_BIT_DEPTH_RAW; + +typedef enum tagSDP_VSC_CONTENT_TYPE +{ + SDP_VSC_CONTENT_TYPE_UNDEFINED = 0, + SDP_VSC_CONTENT_TYPE_GRAPHICS, + SDP_VSC_CONTENT_TYPE_PHOTO, + SDP_VSC_CONTENT_TYPE_VIDEO, + SDP_VSC_CONTENT_TYPE_GAMES, + +} SDP_VSC_CONTENT_TYPE; + +typedef enum tagSDP_VSC_COLOR_FMT_RGB_COLORIMETRY +{ + SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_SRGB = 0, + SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_RGB_WIDE_GAMUT_FIXED, + SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_RGB_SCRGB, + SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_ADOBERGB, + SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_DCI_P3, + SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_CUSTOM, + SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_ITU_R_BT2020_RGB, +} SDP_VSC_COLOR_FMT_RGB_COLORIMETRY; + +typedef enum tagSDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY +{ + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ITU_R_BT601 = 0, + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ITU_R_BT709, + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_XVYCC601, + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_XVYCC709, + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_SYCC601, + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ADOBEYCC601, + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ITU_R_BT2020_YCCBCCRC, + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ITU_R_BT2020_YCBCR, +} SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY; + +typedef enum tagSDP_VSC_COLOR_FMT_RAW_COLORIMETRY +{ + SDP_VSC_COLOR_FMT_RAW_COLORIMETRY_CUSTOM_COLOR_PROFILE = 0, +} SDP_VSC_COLOR_FMT_RAW; + +typedef enum tagSDP_VSC_COLOR_FMT_Y_COLORIMETRY +{ + SDP_VSC_COLOR_FMT_Y_COLORIMETRY_DICOM = 0, +} SDP_VSC_COLOR_FMT_Y; + +// The struct element field hb and db fields are arranged to match the HW registers +// NV_PDISP_SF_DP_GENERIC_INFOFRAME_HEADER* and NV_PDISP_SF_DP_GENERIC_INFOFRAME_SUBPACK0_DB* +typedef struct tagDPSDP_DP_VSC_SDP_DESCRIPTOR +{ + NvU8 dataSize; // the db data size + + // header + struct + { + NvU8 hb0; // DP1.3 spec, the value = 0 + NvU8 hb1; // DP1.3 spec, value = 7 + NvU8 revisionNumber : 5; + NvU8 hb2Reserved : 3; + NvU8 numValidDataBytes : 5; // number of valid data bytes + NvU8 hb3Reserved : 3; + } hb; + + // data content + struct + { + // Stereo field. Note: Needs to be expanded when needed. Refer to DP1.3 spec. + NvU8 stereoInterface; // DB0 + // PSR Field. Note: Needs to be expanded when needed. Refer to DP1.3 spec. + NvU8 psrState : 1; //DB1 + NvU8 psrUpdateRfb : 1; + NvU8 psrCrcValid : 1; + NvU8 psrSuValid : 1; + NvU8 psrSuFirstScanLine : 1; + NvU8 psrSuLastScanLine : 1; + NvU8 psrYCoordinateValid : 1; + NvU8 psrReserved : 1; + NvU8 db2; + NvU8 db3; + NvU8 db4; + NvU8 db5; + NvU8 db6; + NvU8 db7; + // DB8 - DB15 are undefined in DP 1.3 spec. + NvU8 db8; + NvU8 db9; + NvU8 db10; + NvU8 db11; + NvU8 db12; + NvU8 db13; + NvU8 db14; + NvU8 db15; + + // Colorimetry Infoframe Secondary Data Package following DP1.3 spec + NvU8 colorimetryFormat : 4; // DB16 infoframe per DP1.3 spec + NvU8 pixEncoding : 4; // DB16 infoframe per DP1.3 spec + + NvU8 bitDepth : 7; // DB17 infoframe per DP1.3 spec + NvU8 dynamicRange : 1; // DB17 infoframe per DP1.3 spec + + NvU8 contentType : 3; // DB18 infoframe per DP1.3 spec + NvU8 db18Reserved : 5; + + NvU8 db19; + NvU8 db20; + NvU8 db21; + NvU8 db22; + NvU8 db23; + NvU8 db24; + NvU8 db25; + NvU8 db26; + NvU8 db27; + } db; + +} DPSDP_DP_VSC_SDP_DESCRIPTOR; + +typedef struct tagDPSDP_DP_PR_VSC_SDP_DESCRIPTOR +{ + NvU8 dataSize; // the db data size + + // header + struct + { + NvU8 hb0; // DP1.3 spec, the value = 0 + NvU8 hb1; // DP1.3 spec, value = 7 + NvU8 revisionNumber : 5; + NvU8 hb2Reserved : 3; + NvU8 numValidDataBytes : 5; // number of valid data bytes + NvU8 hb3Reserved : 3; + } hb; + + // data content + struct + { + // Stereo field. Note: Needs to be expanded when needed. Refer to DP1.3 spec. + NvU8 stereoInterface; // DB0 + // PSR Field. Note: Needs to be expanded when needed. Refer to DP1.3 spec. + NvU8 prState : 1; // DB1 + NvU8 prReserved : 1; // Always ZERO + NvU8 prCrcValid : 1; + NvU8 prSuValid : 1; + NvU8 prReservedEx : 4; + + NvU8 db2; + NvU8 db3; + NvU8 db4; + NvU8 db5; + NvU8 db6; + NvU8 db7; + // DB8 - DB15 are undefined in DP 1.3 spec. + NvU8 db8; + NvU8 db9; + NvU8 db10; + NvU8 db11; + NvU8 db12; + NvU8 db13; + NvU8 db14; + NvU8 db15; + + // Colorimetry Infoframe Secondary Data Package following DP1.3 spec + NvU8 colorimetryFormat : 4; // DB16 infoframe per DP1.3 spec + NvU8 pixEncoding : 4; // DB16 infoframe per DP1.3 spec + + NvU8 bitDepth : 7; // DB17 infoframe per DP1.3 spec + NvU8 dynamicRange : 1; // DB17 infoframe per DP1.3 spec + + NvU8 contentType : 3; // DB18 infoframe per DP1.3 spec + NvU8 db18Reserved : 5; + + NvU8 db19; + NvU8 db20; + NvU8 db21; + NvU8 db22; + NvU8 db23; + NvU8 db24; + NvU8 db25; + NvU8 db26; + NvU8 db27; + } db; + +} DPSDP_DP_PR_VSC_SDP_DESCRIPTOR; + +typedef struct tagDPSDP_DESCRIPTOR +{ + NvU8 dataSize; + + // header byte + struct + { + NvU8 hb0; + NvU8 hb1; + NvU8 hb2; + NvU8 hb3; + } hb; + + // content byte + struct + { + NvU8 db0; + NvU8 db1; + NvU8 db2; + NvU8 db3; + NvU8 db4; + NvU8 db5; + NvU8 db6; + NvU8 db7; + NvU8 db8; + NvU8 db9; + NvU8 db10; + NvU8 db11; + NvU8 db12; + NvU8 db13; + NvU8 db14; + NvU8 db15; + NvU8 db16; + NvU8 db17; + NvU8 db18; + NvU8 db19; + NvU8 db20; + NvU8 db21; + NvU8 db22; + NvU8 db23; + NvU8 db24; + NvU8 db25; + NvU8 db26; + NvU8 db27; + NvU8 db28; + NvU8 db29; + NvU8 db30; + NvU8 db31; + } db; + +} DPSDP_DESCRIPTOR; + +// The following #defines are for RGB only +#define DP_VSC_SDP_BIT_DEPTH_RGB_6BPC 0 +#define DP_VSC_SDP_BIT_DEPTH_RGB_8BPC 1 +#define DP_VSC_SDP_BIT_DEPTH_RGB_10BPC 2 +#define DP_VSC_SDP_BIT_DEPTH_RGB_12BPC 3 +#define DP_VSC_SDP_BIT_DEPTH_RGB_16BPC 4 + +// The following #defines are for YUV only +#define DP_VSC_SDP_BIT_DEPTH_YUV_8BPC 1 +#define DP_VSC_SDP_BIT_DEPTH_YUV_10BPC 2 +#define DP_VSC_SDP_BIT_DEPTH_YUV_12BPC 3 +#define DP_VSC_SDP_BIT_DEPTH_YUV_16BPC 4 + +// The following #defines are for RAW only +#define DP_VSC_SDP_BIT_DEPTH_RAW_6BPC 1 +#define DP_VSC_SDP_BIT_DEPTH_RAW_7BPC 2 +#define DP_VSC_SDP_BIT_DEPTH_RAW_8BPC 3 +#define DP_VSC_SDP_BIT_DEPTH_RAW_10BPC 4 +#define DP_VSC_SDP_BIT_DEPTH_RAW_12BPC 5 +#define DP_VSC_SDP_BIT_DEPTH_RAW_14BPC 6 +#define DP_VSC_SDP_BIT_DEPTH_RAW_16BPC 7 + +#define DP_INFOFRAME_SDP_V1_3_VERSION 0x13 +#define DP_INFOFRAME_SDP_V1_3_HB3_VERSION_MASK 0xFC +#define DP_INFOFRAME_SDP_V1_3_HB3_VERSION_SHIFT 2 +#define DP_INFOFRAME_SDP_V1_3_HB3_MSB_MASK 0x3 +#define DP_INFOFRAME_SDP_V1_3_NON_AUDIO_SIZE 30 +#endif // __DPSDP_H_ diff --git a/src/common/modeset/timing/edid.h b/src/common/modeset/timing/edid.h new file mode 100644 index 0000000..ac2392c --- /dev/null +++ b/src/common/modeset/timing/edid.h @@ -0,0 +1,352 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: edid.h +// +// Purpose: the template for EDID parse +// +//***************************************************************************** + +#ifndef __EDID_H_ +#define __EDID_H_ + +#include "nvtiming_pvt.h" +#include "displayid.h" +#include "displayid20.h" + +// EDID 1.x detailed timing template + +#define NVT_PVT_EDID_LDD_PAYLOAD_SIZE 13 + +typedef struct _tagEDID_LONG_DISPLAY_DESCRIPTOR +{ + // the header + NvU8 prefix[2]; // 0x00 ~ 0x01 + NvU8 rsvd; // 0x02 + NvU8 tag; // 0x03 + NvU8 rsvd2; // 0x04 + + // the payload + NvU8 data[NVT_PVT_EDID_LDD_PAYLOAD_SIZE]; // 0x05~0x11 +}EDID_LONG_DISPLAY_DESCRIPTOR; +typedef struct _tagEDID_MONITOR_RANGE_GTF2 +{ + NvU8 reserved; // byte 0x0B: reserved as 00 + NvU8 startFreq; // byte 0x0C: start frequency for secondary curve, hot. freq./2[kHz] + NvU8 C; // byte 0x0D: C*2 0 <= 127 + NvU8 M_LSB; // byte 0x0E-0x0F: M (LSB) 0 <= M <= 65535 + NvU8 M_MSB; + NvU8 K; // byte 0x10: K 0 <= K <= 255 + NvU8 J; // byte 0x11: J*2 0 <= J <= 127 +}EDID_MONITOR_RANGE_GTF2; + +typedef struct _tagEDID_MONITOR_RANGE_CVT +{ + NvU8 version; // byte 0x0B: cvt version + NvU8 pixel_clock; // byte 0x0C: [bits 7:2]pixel clock precision + // [bits 1:0]max active MSB + NvU8 max_active; // byte 0x0D: with byte 12 [bits 1:0], max active pixels per line + NvU8 aspect_supported; // byte 0x0E: supported aspect ratios + NvU8 aspect_preferred_blanking; // byte 0x0F: preferred aspect ratio / blanking style support + NvU8 scaling_support; // byte 0x10: display scaling support + NvU8 preferred_refresh_rate; // byte 0x11: preferred vertical refresh rate +}EDID_MONITOR_RANGE_CVT; + +// cvt support in display range limit block +#define NVT_PVT_EDID_CVT_PIXEL_CLOCK_MASK 0xFC +#define NVT_PVT_EDID_CVT_PIXEL_CLOCK_SHIFT 2 +#define NVT_PVT_EDID_CVT_ACTIVE_MSB_MASK 0x03 +#define NVT_PVT_EDID_CVT_ACTIVE_MSB_SHIFT 8 + +#define NVT_PVT_EDID_CVT_ASPECT_SUPPORTED_MASK 0xF8 +#define NVT_PVT_EDID_CVT_ASPECT_SUPPORTED_SHIFT 3 +#define NVT_PVT_EDID_CVT_RESERVED0_MASK 0x07 +#define NVT_PVT_EDID_CVT_RESERVED0_SHIFT 0 + +#define NVT_PVT_EDID_CVT_ASPECT_PREFERRED_MASK 0xE0 +#define NVT_PVT_EDID_CVT_ASPECT_PREFERRED_SHIFT 5 +#define NVT_PVT_EDID_CVT_BLANKING_MASK 0x18 +#define NVT_PVT_EDID_CVT_BLANKING_SHIFT 3 +#define NVT_PVT_EDID_CVT_RESERVED1_MASK 0x07 +#define NVT_PVT_EDID_CVT_RESERVED1_SHIFT 0 + +#define NVT_PVT_EDID_CVT_SCALING_MASK 0xF0 +#define NVT_PVT_EDID_CVT_SCALING_SHIFT 4 +#define NVT_PVT_EDID_CVT_RESERVED2_MASK 0x0F +#define NVT_PVT_EDID_CVT_RESERVED2_SHIFT 0 + +typedef struct _tagEDID_MONITOR_RANGE_LIMIT +{ + // the header in monitor descriptor data + NvU8 minVRate; // byte 0x05: min vertical rate + NvU8 maxVRate; // byte 0x06: max vertical rate + NvU8 minHRate; // byte 0x07: min horizontal rate + NvU8 maxHRate; // byte 0x08: max horizontal rate + NvU8 maxPClock10M; // byte 0x09: max pixel clock in 10M + NvU8 timing_support; // byte 0x0A: 2nd GTF / CVT timing formula support + union + { + EDID_MONITOR_RANGE_GTF2 gtf2; // bytes 0x0B-0x11 + EDID_MONITOR_RANGE_CVT cvt; // ... + }u; +} EDID_MONITOR_RANGE_LIMIT; + +// timing_support +#define NVT_PVT_EDID_RANGE_OFFSET_VER_MIN 0x01 +#define NVT_PVT_EDID_RANGE_OFFSET_VER_MAX 0x02 +#define NVT_PVT_EDID_RANGE_OFFSET_HOR_MIN 0x04 +#define NVT_PVT_EDID_RANGE_OFFSET_HOR_MAX 0x08 +#define NVT_PVT_EDID_RANGE_OFFSET_AMOUNT 255 + +typedef struct _tagEDID_CVT_3BYTE_BLOCK +{ + NvU8 addressable_lines; // byte 0: 8 lsb of addressable lines + NvU8 lines_ratio; // byte 1 : [bits7:4] 4 msb of addressable lines [bits3:2] aspect ratio + NvU8 refresh_rates; // byte 2 : supported/preferred refresh rates +}EDID_CVT_3BYTE_BLOCK; + +typedef struct _tagEDID_CVT_3BYTE +{ + // the header in monitor descriptor data. + NvU8 version; // byte 0x05 : version code (0x01) + EDID_CVT_3BYTE_BLOCK block[NVT_EDID_DD_MAX_CVT3_PER_DESCRITPOR]; // bytes 0x06-0x11 +}EDID_CVT_3BYTE; + +// CVT 3byte +#define NVT_PVT_EDID_CVT3_LINES_MSB_MASK 0xF0 +#define NVT_PVT_EDID_CVT3_LINES_MSB_SHIFT 4 +#define NVT_PVT_EDID_CVT3_ASPECT_MASK 0x0C +#define NVT_PVT_EDID_CVT3_ASPECT_SHIFT 2 + +#define NVT_PVT_EDID_CVT3_PREFERRED_RATE_MASK 0x60 +#define NVT_PVT_EDID_CVT3_PREFERRED_RATE_SHIFT 5 +#define NVT_PVT_EDID_CVT3_SUPPORTED_RATE_MASK 0x1F +#define NVT_PVT_EDID_CVT3_SUPPORTED_RATE_SHIFT 0 + +typedef struct _tagEDID_COLOR_POINT_DATA +{ + NvU8 wp1_index; // 0x05: white point index number + NvU8 wp1_x_y; // 0x06: [bits3:2] lsb of wp1_x [bits1:0] lsb of wp1_y + NvU8 wp1_x; // 0x07: msb of wp1_x + NvU8 wp1_y; // 0x08: msb of wp1_y + NvU8 wp1_gamma; // 0x09: (gamma x 100) - 100 + NvU8 wp2_index; // 0x0A: ... + NvU8 wp2_x_y; // 0x0B: ... + NvU8 wp2_x; // 0x0C: ... + NvU8 wp2_y; // 0x0D: ... + NvU8 wp2_gamma; // 0x0E: ... + NvU8 line_feed; // 0x0F: reserved for line feed (0x0A) + NvU16 reserved0; // 0x10-0x11: reserved for space (0x2020) +}EDID_COLOR_POINT_DATA; + +#define NVT_PVT_EDID_CPD_WP_X_MASK 0x0C +#define NVT_PVT_EDID_CPD_WP_X_SHIFT 2 +#define NVT_PVT_EDID_CPD_WP_Y_MASK 0x03 +#define NVT_PVT_EDID_CPD_WP_Y_SHIFT 0 + +typedef struct _tagEDID_STANDARD_TIMING_ID +{ + NvU16 std_timing[NVT_EDID_DD_STI_NUM]; //0x05-0x10: 6 standard timings + NvU8 line_feed; //0x11: reserved for line feed (0x0A) +}EDID_STANDARD_TIMING_ID; + +typedef struct _tagEDID_COLOR_MANAGEMENT_DATA +{ + NvU8 version; //0x05: version (0x03) + NvU8 red_a3_lsb; //0x06: Red a3 LSB + NvU8 red_a3_msb; //0x07: Red a3 MSB + NvU8 red_a2_lsb; //0x08 + NvU8 red_a2_msb; //0x09 + NvU8 green_a3_lsb; //0x0A + NvU8 green_a3_msb; //0x0B + NvU8 green_a2_lsb; //0x0C + NvU8 green_a2_msb; //0x0D + NvU8 blue_a3_lsb; //0x0E + NvU8 blue_a3_msb; //0x0F + NvU8 blue_a2_lsb; //0x10 + NvU8 blue_a2_msb; //0x11 +}EDID_COLOR_MANAGEMENT_DATA; + +typedef struct _tagEDID_EST_TIMINGS_III +{ + NvU8 revision; //0x05: revision (0x0A) + NvU8 timing_byte[12]; //0x05-0x11: established timings III +}EDID_EST_TIMINGS_III; + +typedef struct _tagDETAILEDTIMINGDESCRIPTOR +{ + NvU16 wDTPixelClock; // 0x00 + NvU8 bDTHorizontalActive; // 0x02 + NvU8 bDTHorizontalBlanking; // 0x03 + NvU8 bDTHorizActiveBlank; // 0x04 + NvU8 bDTVerticalActive; // 0x05 + NvU8 bDTVerticalBlanking; // 0x06 + NvU8 bDTVertActiveBlank; // 0x07 + NvU8 bDTHorizontalSync; // 0x08 + NvU8 bDTHorizontalSyncWidth; // 0x09 + NvU8 bDTVerticalSync; // 0x0A + NvU8 bDTHorizVertSyncOverFlow; // 0x0B + NvU8 bDTHorizontalImage; // 0x0C + NvU8 bDTVerticalImage; // 0x0D + NvU8 bDTHorizVertImage; // 0x0E + NvU8 bDTHorizontalBorder; // 0x0F + NvU8 bDTVerticalBorder; // 0x10 + NvU8 bDTFlags; // 0x11 +}DETAILEDTIMINGDESCRIPTOR; + +// EDID 1.x basic block template +typedef struct _tagEDIDV1STRUC +{ + NvU8 bHeader[8]; // 0x00-0x07 + NvU16 wIDManufName; // 0x08 + NvU16 wIDProductCode; // 0x0A + NvU32 dwIDSerialNumber; // 0x0C + NvU8 bWeekManuf; // 0x10 + NvU8 bYearManuf; // 0x11 + NvU8 bVersionNumber; // 0x12 + NvU8 bRevisionNumber; // 0x13 + NvU8 bVideoInputDef; // 0x14 + NvU8 bMaxHorizImageSize; // 0x15 + NvU8 bMaxVertImageSize; // 0x16 + NvU8 bDisplayXferChar; // 0x17 + NvU8 bFeatureSupport; // 0x18 + NvU8 Chromaticity[10]; // 0x19-0x22 + NvU8 bEstablishedTimings1; // 0x23 + NvU8 bEstablishedTimings2; // 0x24 + NvU8 bManufReservedTimings; // 0x25 + NvU16 wStandardTimingID[8]; // 0x26 + DETAILEDTIMINGDESCRIPTOR DetailedTimingDesc[4]; // 0x36 + NvU8 bExtensionFlag; // 0x7E + NvU8 bChecksum; // 0x7F +}EDIDV1STRUC; + +// EDID 2.x basic block template +typedef struct _tagEDIDV2STRUC +{ + NvU8 bHeader; // 0x00 + NvU16 wIDManufName; // 0x01 + NvU16 wIDProductCode; // 0x03 + NvU8 bWeekManuf; // 0x05 + NvU16 wYearManuf; // 0x06 + NvU8 bProductIDString[32]; // 0x08 + NvU8 bSerialNumber[16]; // 0x28 + NvU8 bReserved1[8]; // 0x38 + NvU8 bPhysicalInterfaceType; // 0x40 + NvU8 bVideoInterfaceType; // 0x41 + NvU8 bInterfaceDataFormat[8]; // 0x42 + NvU8 bInterfaceColor[5]; // 0x4A + NvU8 bDisplayTechType; // 0x4F + NvU8 bMajorDisplayChar; // 0x50 + NvU8 bFeaturesSupported[3]; // 0x51 + NvU16 wDisplayResponseTime; // 0x54 + NvU32 dwDisplayXferChar; // 0x56 + NvU32 dwMaxLuminance; // 0x5A + NvU8 bColorimetry[20]; // 0x5E + NvU16 wMaxHorizImageSize; // 0x72 + NvU16 wMaxVertImageSize; // 0x74 + NvU16 wMaxHorizAddressibility; // 0x76 + NvU16 wMaxVertAddressibility; // 0x78 + NvU8 bHorizPixelPitch; // 0x7A + NvU8 bVertPixelPitch; // 0x7B + NvU8 bReserved2; // 0x7C + NvU8 bGTFSupportInfo; // 0x7D + NvU16 wTimingInfoMap; // 0x7E + NvU8 bTableDescriptors[127]; // 0x80 + NvU8 bChecksum; // 0xFF +}EDIDV2STRUC; + +// EDID CEA/EIA-861 extension block template +typedef struct _tagEIA861EXTENSION +{ + NvU8 tag; // 0x00 + NvU8 revision; // 0x01 + NvU8 offset; // 0x02 + NvU8 misc; // 0x03 + NvU8 data[NVT_CEA861_MAX_PAYLOAD]; // 0x04 - 0x7E + NvU8 checksum; // 0x7F +}EIA861EXTENSION; + +typedef struct _tagVTBEXTENSION +{ + NvU8 tag; // 0x00 + NvU8 revision; // 0x01 + NvU8 num_detailed; // 0x02 + NvU8 num_cvt; // 0x03 + NvU8 num_standard; // 0x04 + NvU8 data[NVT_VTB_MAX_PAYLOAD]; // 0x05 - 0x7E + NvU8 checksum; +}VTBEXTENSION; + +// EDID DisplayID extension block template +typedef struct _tagDIDEXTENSION +{ + NvU8 tag; // 0x00 + NvU8 struct_version; // 0x01 + NvU8 length; // 0x02 + NvU8 use_case; // 0x03 + NvU8 ext_count; // 0x04 + NvU8 data[NVT_DID_MAX_EXT_PAYLOAD]; // 0x05 - 0x7E + NvU8 checksum; // 0x7F +}DIDEXTENSION; + +// video signal interface mask +#define NVT_PVT_EDID_INPUT_ISDIGITAL_MASK 0x80 // 0==analog +#define NVT_PVT_EDID_INPUT_ISDIGITAL_SHIFT 7 +#define NVT_PVT_EDID_INPUT_ANALOG_ETC_MASK 0x7F +#define NVT_PVT_EDID_INPUT_ANALOG_ETC_SHIFT 0 + +#define NVT_PVT_EDID_INPUT_INTERFACE_MASK 0x0F +#define NVT_PVT_EDID_INPUT_INTERFACE_SHIFT 0 + +#define NVT_PVT_EDID_INPUT_BPC_MASK 0x70 +#define NVT_PVT_EDID_INPUT_BPC_SHIFT 4 +#define NVT_PVT_EDID_INPUT_BPC_UNDEF 0x00 +#define NVT_PVT_EDID_INPUT_BPC_6 0x01 +#define NVT_PVT_EDID_INPUT_BPC_8 0x02 +#define NVT_PVT_EDID_INPUT_BPC_10 0x03 +#define NVT_PVT_EDID_INPUT_BPC_12 0x04 +#define NVT_PVT_EDID_INPUT_BPC_14 0x05 +#define NVT_PVT_EDID_INPUT_BPC_16 0x06 + +// color characteristic +#define NVT_PVT_EDID_CC_RED_X1_X0_MASK 0xC0 +#define NVT_PVT_EDID_CC_RED_X1_X0_SHIFT 6 +#define NVT_PVT_EDID_CC_RED_Y1_Y0_MASK 0x30 +#define NVT_PVT_EDID_CC_RED_Y1_Y0_SHIFT 4 + +#define NVT_PVT_EDID_CC_GREEN_X1_X0_MASK 0x0C +#define NVT_PVT_EDID_CC_GREEN_X1_X0_SHIFT 2 +#define NVT_PVT_EDID_CC_GREEN_Y1_Y0_MASK 0x03 +#define NVT_PVT_EDID_CC_GREEN_Y1_Y0_SHIFT 0 + +#define NVT_PVT_EDID_CC_BLUE_X1_X0_MASK 0xC0 +#define NVT_PVT_EDID_CC_BLUE_X1_X0_SHIFT 6 +#define NVT_PVT_EDID_CC_BLUE_Y1_Y0_MASK 0x30 +#define NVT_PVT_EDID_CC_BLUE_Y1_Y0_SHIFT 4 + +#define NVT_PVT_EDID_CC_WHITE_X1_X0_MASK 0x0C +#define NVT_PVT_EDID_CC_WHITE_X1_X0_SHIFT 2 +#define NVT_PVT_EDID_CC_WHITE_Y1_Y0_MASK 0x03 +#define NVT_PVT_EDID_CC_WHITE_Y1_Y0_SHIFT 0 + +#endif // __EDID_H_ diff --git a/src/common/modeset/timing/nvt_cvt.c b/src/common/modeset/timing/nvt_cvt.c new file mode 100644 index 0000000..9fc97ac --- /dev/null +++ b/src/common/modeset/timing/nvt_cvt.c @@ -0,0 +1,675 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_cvt.c +// +// Purpose: calculate CVT/CVT-RB timing +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "nvtiming_pvt.h" + +PUSH_SEGMENTS + +CONS_SEGMENT(PAGE_CONS) + +const NvU32 NVT_MAX_NVU32= (NvU32)(-1); + +const NvU32 NVT_CVT_CELL_GRAN = 8; // Character cell width. +const NvU32 NVT_CVT_MIN_VSYNCBP = 11; // in 550us (!!) [1000000:550 = 20000:11] +const NvU32 NVT_CVT_V_PORCH = 3; // in pixels +const NvU32 NVT_CVT_C_PRIME = 30; // value of (C' * 10) +const NvU32 NVT_CVT_M_PRIME_D_20 = 15; // value of (M' / 100) +const NvU32 NVT_CVT_CLOCK_STEP = 25; // Pclk step, in 10kHz +const NvU32 NVT_CVT_H_SYNC_PER = 8; // HSYNC percentage (8%) + +const NvU32 NVT_CVT_RB_HBLANK_CELLS = 20; // 160 fixed hblank for RB +const NvU32 NVT_CVT_RB_HFPORCH_CELLS = 6; // 48 fixed hfporch for RB +const NvU32 NVT_CVT_RB_HSYNCW_CELLS = 4; // 32 fixed hsyncwidth for RB +const NvU32 NVT_CVT_RB_MIN_VBLANK = 23; // 460 line s (or 460 us?) [1000000:460 = 50000:23] +const NvU32 NVT_CVT_MIN_V_BPORCH = 6; // Minimum vertical back porch. + + +// VESA CVT spec ver1.2: +// +// Page 24 : Table 5-4 : Delta between Original Reduced Blank Timing and Reduced Blanking Timing V2 +#define NVT_CVT_RB2_CLOCK_STEP_KHZ 1 +#define NVT_CVT_RB2_H_BLANK_PIXELS 80 +#define NVT_CVT_RB2_H_SYNC_PIXELS 32 +#define NVT_CVT_RB2_MIN_VBLANK_MICROSEC 460 +#define NVT_CVT_RB2_MIN_ALT_VBLANK_MICROSEC 300 +#define NVT_CVT_RB2_MIN_V_FPORCH 1 +#define NVT_CVT_RB2_MIN_V_BPORCH 6 +// Page 16 : Table 3-2 : Vertical Sync Duration +#define NVT_CVT_RB2_V_SYNC_WIDTH 8 +// Page 22: RB_MIN_VBI = RB_V_FPORCH + V_SYNC_RND + MIN_V_BPORCH +#define NVT_CVT_RB2_MIN_VBI NVT_CVT_RB2_V_SYNC_WIDTH + NVT_CVT_RB2_MIN_V_FPORCH + NVT_CVT_RB2_MIN_V_BPORCH +// Page 15 : The Horizontal Sync Pulse duration will in all cases be 32 pixel clocks in duration, with the position +// set so that the trailing edge of the Horizontal Sync Pulse is located in the center of the Horizontal +// Blanking period.This implies that for a fixed blank of 80 pixel clocks, the Horizontal Back Porch is +// fixed to(80 / 2) 40 pixel clocks and the Horizontal Front Porch is fixed to(80 - 40 - 32) = 8 clock cycles. +#define NVT_CVT_RB2_H_FPORCH 8 +#define NVT_CVT_RB2_H_BPORCH 40 + +// VESA CVT spec ver2.0: +// +// Page 15 : Table 3-2 Constants +#define NVT_CVT_RB3_CLOCK_STEP_KHZ 1000 +#define NVT_CVT_RB3_H_BLANK_PIXELS NVT_CVT_RB2_H_BLANK_PIXELS +#define NVT_CVT_RB3_H_SYNC_PIXELS NVT_CVT_RB2_H_SYNC_PIXELS +#define NVT_CVT_RB3_H_FPORCH NVT_CVT_RB2_H_FPORCH +#define NVT_CVT_RB3_MIN_VBLANK_MICROSEC NVT_CVT_RB2_MIN_VBLANK_MICROSEC +#define NVT_CVT_RB3_MIN_ALT_VBLANK_MICROSEC NVT_CVT_RB2_MIN_ALT_VBLANK_MICROSEC +#define NVT_CVT_RB3_V_FIELD_RATE_PPM_ADJ 350 +#define NVT_CVT_RB3_V_SYNC_WIDTH NVT_CVT_RB2_V_SYNC_WIDTH +#define NVT_CVT_RB3_MIN_V_FPORCH NVT_CVT_RB2_MIN_V_FPORCH +#define NVT_CVT_RB3_MIN_V_BPROCH NVT_CVT_RB2_MIN_V_BPORCH + +#define NVT_CVT_RB3_MIN_VBI NVT_CVT_RB2_MIN_VBI + +CODE_SEGMENT(PAGE_DD_CODE) +static NvU16 getCVTVSync(NvU32 XRes, NvU32 YRes) +{ + // 4:3 modes + if(XRes * 3 == YRes * 4) + return 4; + + // 16:9 modes + //if((XRes * 9 == YRes * 16) || + // (XRes == 848 && YRes == 480) || // 53:30 = 1.76666 + // (XRes == 1064 && YRes == 600) || // 133:75 = 1.77333 + // (XRes == 1360 && YRes == 768) || // 85:48 = 1.77083 + // (XRes == 1704 && YRes == 960) || // 71:40 = 1.775 + // (XRes == 1864 && YRes == 1050) || // 832:525 = 1.77523809 + // (XRes == 2128 && YRes == 1200) || // 133:75 + // (XRes == 2728 && YRes == 1536) || // 341:192 = 1.7760416 + // (XRes == 3408 && YRes == 1920) || // 71:40 + // (XRes == 4264 && YRes == 2400)) // 533:300 = 1.77666 + // return 5; + // NOTE: Because 16:9 modes are really a collection of mode of + // aspect ratio between 16:9 and 53:30, we will include + // all generic mode within this aspect ration range + if((XRes * 9 <= YRes * 16) && (XRes * 30 >= YRes * 53)) + return 5; + + // 16:10 modes + if((XRes * 5 == YRes * 8) || + (XRes == 1224 && YRes == 768) || + (XRes == 2456 && YRes == 1536)) + return 6; + + // Special 1280 modes + if((XRes == 1280 && YRes == 1024) || + (XRes == 1280 && YRes == 768)) + return 7; + + // Failure value, for identification + return 10; +} + + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcCVT(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT) +{ + NvU32 dwXCells, dwVSyncBP, dwHBlankCells, dwPClk, dwHSyncCells, dwVSyncWidth; + + NvU32 dwHPeriodEstimate_NUM, dwHPeroidEstimate_DEN; + NvU32 dwIdealDutyCycle_NUM, dwIdealDutyCycle_DEN; + + // parameter check + if (pT == NULL) + return NVT_STATUS_ERR; + + if (width == 0 || height == 0 || rr == 0 ) + return NVT_STATUS_ERR; + + // Check for valid input parameter + if (width < 300 || height < 200 || rr < 10) + return NVT_STATUS_ERR;//return NVT_STATUS_ERR_BACKOFF | NVT_STATUS_ERR_OUTOFRANGE; + + NVMISC_MEMSET(pT, 0, sizeof(NVT_TIMING)); + + pT->etc.status = NVT_STATUS_CVT; + + // For 1, 2, 3, 4 in Computation of common parameters + // H_PIXELS_RND = ROUNDDOWN(H_PIXELS / CELL_GRAN_RND,0) * CELL_GRAN_RND + if ((width % NVT_CVT_CELL_GRAN)!=0) + { + width = (width / NVT_CVT_CELL_GRAN) * NVT_CVT_CELL_GRAN; + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_ALIGNMENT); + } + + // Calculate timing + dwXCells = width / NVT_CVT_CELL_GRAN; // Convert to number of cells + dwVSyncWidth = getCVTVSync(dwXCells * NVT_CVT_CELL_GRAN, height); + + dwHPeriodEstimate_NUM = 20000 - NVT_CVT_MIN_VSYNCBP * rr; + dwHPeroidEstimate_DEN = rr * (height + NVT_CVT_V_PORCH); + + dwVSyncBP = NVT_CVT_MIN_VSYNCBP * dwHPeroidEstimate_DEN / dwHPeriodEstimate_NUM +1; + if(dwVSyncBP < dwVSyncWidth + NVT_CVT_MIN_V_BPORCH) + dwVSyncBP = dwVSyncWidth + NVT_CVT_MIN_V_BPORCH; + + // Check for overflow + //DBG_ASSERT(NVT_MAX_NVU32 / NVT_CVT_C_PRIME > dwHPeroidEstimate_DEN); + + dwIdealDutyCycle_DEN = dwHPeroidEstimate_DEN; + dwIdealDutyCycle_NUM = NVT_CVT_C_PRIME * dwHPeroidEstimate_DEN - NVT_CVT_M_PRIME_D_20 * dwHPeriodEstimate_NUM; + + if (dwIdealDutyCycle_NUM < dwIdealDutyCycle_DEN * 20 || + (NVT_CVT_C_PRIME * dwHPeroidEstimate_DEN < NVT_CVT_M_PRIME_D_20 * dwHPeriodEstimate_NUM)) + { + dwIdealDutyCycle_NUM=20; + dwIdealDutyCycle_DEN=1; + } + + // Check for overflow + if (NVT_MAX_NVU32 / dwXCells <= dwIdealDutyCycle_NUM) + { + dwIdealDutyCycle_NUM /= 10; + dwIdealDutyCycle_DEN /= 10; + } + + dwHBlankCells = ((dwXCells * dwIdealDutyCycle_NUM)/(200*dwIdealDutyCycle_DEN - 2*dwIdealDutyCycle_NUM))*2; + + // Check for overflow + //DBG_ASSERT(MAX_NVU32 / dwHPeroidEstimate_DEN > (dwXCells + dwHBlankCells)*CVT_CELL_GRAN); + dwPClk = ((dwXCells + dwHBlankCells) * NVT_CVT_CELL_GRAN * dwHPeroidEstimate_DEN * 2 / dwHPeriodEstimate_NUM / NVT_CVT_CLOCK_STEP) * NVT_CVT_CLOCK_STEP; + + dwHSyncCells = (dwXCells + dwHBlankCells) * NVT_CVT_H_SYNC_PER / 100; + + + pT->HVisible = (NvU16)(dwXCells * NVT_CVT_CELL_GRAN); + pT->VVisible = (NvU16)height; + + pT->HTotal = (NvU16)((dwXCells + dwHBlankCells) * NVT_CVT_CELL_GRAN); + pT->HFrontPorch = (NvU16)((dwHBlankCells/2 - dwHSyncCells) * NVT_CVT_CELL_GRAN); + pT->HSyncWidth = (NvU16)(dwHSyncCells * NVT_CVT_CELL_GRAN); + + pT->VTotal = (NvU16)(height + dwVSyncBP + NVT_CVT_V_PORCH); + pT->VFrontPorch = (NvU16)(NVT_CVT_V_PORCH); + pT->VSyncWidth = getCVTVSync(dwXCells * NVT_CVT_CELL_GRAN, height); + + pT->pclk = dwPClk; + pT->pclk1khz = (dwPClk << 3) + (dwPClk << 1); // *10 + + pT->HSyncPol = NVT_H_SYNC_NEGATIVE; + pT->VSyncPol = NVT_V_SYNC_POSITIVE; + + // Clear unused fields + pT->HBorder = pT->VBorder = 0; + pT->interlaced = NVT_PROGRESSIVE; + + pT->etc.flag = 0; + pT->etc.rr = (NvU16)rr; + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk1khz, (NvU32)1000*(NvU32)1000, (NvU32)pT->HTotal*(NvU32)pT->VTotal); + pT->etc.aspect = 0; + pT->etc.rep = 0x1; + NVT_SNPRINTF((char *)pT->etc.name, 40, "CVT:%dx%dx%dHz",width, height, rr); + pT->etc.name[39] = '\0'; + + // interlaced adjustment + if ((flag & NVT_PVT_INTERLACED_MASK) != 0) + { + if ((pT->VTotal & 0x1) != 0) + pT->interlaced = NVT_INTERLACED_EXTRA_VBLANK_ON_FIELD2; + else + pT->interlaced = NVT_INTERLACED_NO_EXTRA_VBLANK_ON_FIELD2; + + pT->pclk >>= 1; + pT->pclk1khz >>= 1; + pT->VTotal >>= 1; + pT->VVisible = (pT->VVisible + 1) / 2; + } + pT->etc.rgb444.bpc.bpc8 = 1; + + return NVT_STATUS_SUCCESS; +} + +// CVT-RB timing calculation +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcCVT_RB(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT) +{ + NvU32 dwXCells, dwPClk, dwVBILines, dwVSyncWidth; + + // parameter check + if (pT == NULL) + return NVT_STATUS_ERR; + + if (width == 0 || height == 0 || rr == 0 ) + return NVT_STATUS_ERR; + + // Check for valid input parameter + if (width < 300 || height < 200 || rr < 10) + return NVT_STATUS_ERR;//NVT_STATUS_ERR_BACKOFF | NVT_STATUS_ERR_OUTOFRANGE; + + NVMISC_MEMSET(pT, 0, sizeof(NVT_TIMING)); + pT->etc.status = NVT_STATUS_CVT_RB; + + // H_PIXELS_RND = ROUNDDOWN(H_PIXELS / CELL_GRAN_RND,0) * CELL_GRAN_RND + if ((width % NVT_CVT_CELL_GRAN)!=0) + { + width = (width / NVT_CVT_CELL_GRAN) * NVT_CVT_CELL_GRAN; + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_ALIGNMENT); + } + + // Calculate timing + dwXCells = width / NVT_CVT_CELL_GRAN; // Convert to number of cells + dwVSyncWidth = getCVTVSync(dwXCells * NVT_CVT_CELL_GRAN, height); + + dwVBILines = (NVT_CVT_RB_MIN_VBLANK * height * rr) / (50000 - NVT_CVT_RB_MIN_VBLANK * rr) + 1; + + if(dwVBILines < NVT_CVT_V_PORCH + dwVSyncWidth + NVT_CVT_MIN_V_BPORCH) + dwVBILines = NVT_CVT_V_PORCH + dwVSyncWidth + NVT_CVT_MIN_V_BPORCH; + + dwPClk = rr * (height + dwVBILines) * (dwXCells + NVT_CVT_RB_HBLANK_CELLS) / (10000 / NVT_CVT_CELL_GRAN) / NVT_CVT_CLOCK_STEP; + dwPClk *= NVT_CVT_CLOCK_STEP; + + pT->HVisible = (NvU16)(dwXCells * NVT_CVT_CELL_GRAN); + pT->VVisible = (NvU16)height; + + pT->HTotal = (NvU16)((dwXCells + NVT_CVT_RB_HBLANK_CELLS) * NVT_CVT_CELL_GRAN); + pT->HFrontPorch = (NvU16)(NVT_CVT_RB_HFPORCH_CELLS * NVT_CVT_CELL_GRAN); + pT->HSyncWidth = (NvU16)(NVT_CVT_RB_HSYNCW_CELLS * NVT_CVT_CELL_GRAN); + + pT->VTotal = (NvU16)(height + dwVBILines); + pT->VFrontPorch = (NvU16)(NVT_CVT_V_PORCH); + pT->VSyncWidth = (NvU16)dwVSyncWidth; + + pT->pclk = dwPClk; + pT->pclk1khz = (dwPClk << 3) + (dwPClk << 1); // *10; + + pT->HSyncPol = NVT_H_SYNC_POSITIVE; + pT->VSyncPol = NVT_V_SYNC_NEGATIVE; + + // Clear unused fields + pT->HBorder = pT->VBorder = 0; + pT->interlaced = 0; + + // fill in the extra timing info + pT->etc.flag = 0; + pT->etc.rr = (NvU16)rr; + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk1khz, (NvU32)1000*(NvU32)1000, (NvU32)pT->HTotal*(NvU32)pT->VTotal); + pT->etc.aspect = 0; + pT->etc.rep = 0x1; + NVT_SNPRINTF((char *)pT->etc.name, 40, "CVT-RB:%dx%dx%dHz",width, height, rr); + pT->etc.name[39] = '\0'; + + // interlaced adjustment + if ((flag & NVT_PVT_INTERLACED_MASK) != 0) + { + if ((pT->VTotal & 0x1) != 0) + pT->interlaced = NVT_INTERLACED_EXTRA_VBLANK_ON_FIELD2; + else + pT->interlaced = NVT_INTERLACED_NO_EXTRA_VBLANK_ON_FIELD2; + + pT->pclk >>= 1; + pT->pclk1khz >>= 1; + pT->VTotal >>= 1; + pT->VVisible = (pT->VVisible + 1) / 2; + } + + return NVT_STATUS_SUCCESS; +} + +// CVT-RB2 timing calculation +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcCVT_RB2(NvU32 width, NvU32 height, NvU32 rr, NvBool is1000div1001, NvBool isAltMiniVblankTiming, NVT_TIMING *pT) +{ + NvU32 vbi, act_vbi_lines, total_v_lines, total_pixels, act_pixel_freq_khz; + + // parameter check + if (pT == NULL || width == 0 || height == 0 || rr == 0) + return NVT_STATUS_ERR; + + // Check for valid input parameter + if (width < 300 || height < 200 || rr < 10) + return NVT_STATUS_ERR; + + NVMISC_MEMSET(pT, 0, sizeof(NVT_TIMING)); + pT->etc.status = NVT_STATUS_CVT_RB_2; + + // CVT spec1.2 - page 21 : 5.4 Computation of Reduced Blanking Timing Parameters + // 8. Estimate the Horizontal Period (kHz): + // H_PERIOD_EST = ((1000000 / (V_FIELD_RATE_RQD)) - RB_MIN_V_BLANK) / (V_LINES_RND + + // TOP_MARGIN + BOT_MARGIN) + // h_period_est = (1000000 / rr - NVT_CVT_RB2_MIN_VBLANK) / height; + + // 9. Determine the number of lines in the vertical blanking interval : + // VBI_LINES = ROUNDDOWN(RB_MIN_V_BLANK / H_PERIOD_EST, 0) + 1 + // vbi = NVT_CVT_RB2_MIN_VBLANK / h_period_est + 1; + + // combining step 8, 9, + if (isAltMiniVblankTiming) + { + // CVT spec2.1 - page 11 6. VBlank Period + // an alternate minimum VBlank duration of 300 us may be used under conditions defined by the interface specification on which the timing is used + vbi = height * NVT_CVT_RB2_MIN_ALT_VBLANK_MICROSEC * rr / (1000000 - NVT_CVT_RB2_MIN_ALT_VBLANK_MICROSEC * rr) + 1; + } + else + { + vbi = height * NVT_CVT_RB2_MIN_VBLANK_MICROSEC * rr / (1000000 - NVT_CVT_RB2_MIN_VBLANK_MICROSEC * rr) + 1; + } + + // 10. Check Vertical Blanking is Sufficient : + // RB_MIN_VBI = RB_V_FPORCH + V_SYNC_RND + MIN_V_BPORCH + // ACT_VBI_LINES = IF(VBI_LINES < RB_MIN_VBI, RB_MIN_VBI, VBI_LINES) + act_vbi_lines = MAX(vbi, NVT_CVT_RB2_MIN_VBI); + + // 11. Find total number of vertical lines : + // TOTAL_V_LINES = ACT_VBI_LINES + V_LINES_RND + TOP_MARGIN + BOT_MARGIN + // + INTERLACE + total_v_lines = act_vbi_lines + height; //+0.5 if interlaced + + // 12. Find total number of pixel clocks per line : + // TOTAL_PIXELS = RB_H_BLANK + TOTAL_ACTIVE_PIXELS + total_pixels = NVT_CVT_RB2_H_BLANK_PIXELS + width; + + // sanity check just in case of bad edid where the timing value could exceed the limit of NVT_TIMING structure which unfortunately is defined in NvU16 + if (total_pixels > (NvU16)-1 || total_v_lines > (NvU16)-1) + return NVT_STATUS_INVALID_PARAMETER; + + // 13. Calculate Pixel Clock Frequency to nearest CLOCK_STEP MHz : + // ACT_PIXEL_FREQ = CLOCK_STEP * ROUNDDOWN((V_FIELD_RATE_RQD * TOTAL_V_LINES * + // TOTAL_PIXELS / 1000000 * REFRESH_MULTIPLIER) / CLOCK_STEP, 0) + if (is1000div1001) + act_pixel_freq_khz = NVT_CVT_RB2_CLOCK_STEP_KHZ * (rr * total_v_lines * total_pixels / 1001 / NVT_CVT_RB2_CLOCK_STEP_KHZ); + else + act_pixel_freq_khz = NVT_CVT_RB2_CLOCK_STEP_KHZ * (rr * total_v_lines * total_pixels / 1000 / NVT_CVT_RB2_CLOCK_STEP_KHZ); + + // 14. Find actual Horizontal Frequency(kHz) : + // ACT_H_FREQ = 1000 * ACT_PIXEL_FREQ / TOTAL_PIXELS + // 15. Find Actual Field Rate(Hz) : + // ACT_FIELD_RATE = 1000 * ACT_H_FREQ / TOTAL_V_LINES + // 16. Find actual Vertical Refresh Rate(Hz) : + // ACT_FRAME_RATE = IF(INT_RQD ? = "y", ACT_FIELD_RATE / 2, ACT_FIELD_RATE + + // fill in the essential timing info for output + pT->HVisible = (NvU16)width; + pT->HTotal = (NvU16)(total_pixels); + pT->HFrontPorch = NVT_CVT_RB2_H_FPORCH; + pT->HSyncWidth = NVT_CVT_RB2_H_SYNC_PIXELS; + pT->VVisible = (NvU16)height; + pT->VTotal = (NvU16)total_v_lines; + pT->VSyncWidth = NVT_CVT_RB2_V_SYNC_WIDTH; + pT->VFrontPorch = (NvU16)(act_vbi_lines - NVT_CVT_RB2_V_SYNC_WIDTH - NVT_CVT_RB2_MIN_V_BPORCH); + pT->pclk = (act_pixel_freq_khz + 5) / 10; //convert to 10Khz + pT->pclk1khz = act_pixel_freq_khz; + pT->HSyncPol = NVT_H_SYNC_POSITIVE; + pT->VSyncPol = NVT_V_SYNC_NEGATIVE; + pT->HBorder = pT->VBorder = 0; // not supported + pT->interlaced = 0; // not supported yet + + // fill in the extra timing info + pT->etc.flag = 0; + pT->etc.rr = (NvU16)rr; + pT->etc.rrx1k = (NvU32)axb_div_c_64((NvU64)act_pixel_freq_khz, (NvU64)1000 * (NvU64)1000, (NvU64)pT->HTotal*(NvU64)pT->VTotal); + pT->etc.aspect = 0; + pT->etc.rep = 0x1; + NVT_SNPRINTF((char *)pT->etc.name, 40, "CVT-RB2:%dx%dx%dHz", width, height, rr); + pT->etc.name[39] = '\0'; + + return NVT_STATUS_SUCCESS; +} + + +// CVT-RB3 timing calculation +// This is intended to work in conjunction with VESA Adaptive-Sync operation (or other similar VRR methodology) +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcCVT_RB3(NvU32 width, + NvU32 height, + NvU32 rr, + NvU32 deltaHBlank, + NvU32 vBlankMicroSec, + NvBool isAltMiniVblankTiming, + NvBool isEarlyVSync, + NVT_TIMING *pT) +{ + NvU32 vbi, act_v_blank_time, act_v_blank_lines, v_back_porch_est, v_back_porch, total_v_lines, total_pixels, adj_rr_x1M, act_pixel_freq_khz; + NvU64 act_pixel_freq_hz = 0xFFFFFFFFFFFFFFFFULL; + + // parameter check + if (pT == NULL) + return NVT_STATUS_ERR; + + if (width == 0 || height == 0 || rr == 0) + return NVT_STATUS_ERR; + + // Check for valid input parameter + if ((height % 8 != 0) || (deltaHBlank % 8 != 0) || deltaHBlank > 120) + return NVT_STATUS_INVALID_PARAMETER; + + if ((isAltMiniVblankTiming && vBlankMicroSec > 140) || (!isAltMiniVblankTiming && vBlankMicroSec > 245)) + return NVT_STATUS_INVALID_PARAMETER; + + NVMISC_MEMSET(pT, 0, sizeof(NVT_TIMING)); + pT->etc.status = NVT_STATUS_CVT_RB_3; + + // 1 Calculate the required field refresh rate (Hz): + // V_FIELD_RATE_RQD = I_IP_FREQ_RQD * (1 + C_V_FIELD_RATE_PPM_ADJ / 1000000) + + // Parameters mapping: + // - V_FIELD_RATE_RQD == "adj_rr_x1M" + // - I_IP_FREQ_RQD == "rr" + // - C_V_FIELD_RATE_PPM_ADJ == "NVT_CVT_RB3_V_FIELD_RATE_PPM_ADJ" + adj_rr_x1M = rr * (1000000 + NVT_CVT_RB3_V_FIELD_RATE_PPM_ADJ); + + // 2 Round the desired number of horizontal pixels down to the nearest character cell boundary: + // TOTAL_ACTIVE_PIXELS = ROUNDDOWN(I_H_PIXELS / C_CELL_GRAN_RND, 0) * C_CELL_GRAN_RND + + // Parameters mapping: + // - TOTAL_ACTIVE_PIXELS and I_H_PIXELS == "width" + // - C_CELL_GRAN_RND == "NVT_CVT_CELL_GRAN" + if ((width % NVT_CVT_CELL_GRAN) != 0) + { + // ROUNDDOWN + width = (width / NVT_CVT_CELL_GRAN) * NVT_CVT_CELL_GRAN; + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_ALIGNMENT); + } + + // 3 Round the number of vertical lines down to the nearest integer: + // V_LINES_RND = ROUNDDOWN(I_V_LINES, 0) + + // Parameters mapping: + // - V_LINES_RND == "height" + + // 4 Calculate the minimum allowed VBlank duration: + // MIN_V_BLANK = IF(I_ALTERNATE_VBLANK_MIN?="N", C_RB_MIN_V_BLANK, C_RB_ALT_MIN_V_BLANK) + + // 5 Calculate the estimated Horizontal Period (kHz): + // H_PERIOD_EST = ((1,000,000 / (V_FIELD_RATE_RQD)) – MIN_V_BLANK) / V_LINES_RND + + // Parameters mapping: + // - H_PERIOD_EST == "h_period_est" + // - C_RB_MIN_V_BLANK == "NVT_CVT_RB3_MIN_VBLANK_MICROSEC" == 460 + // - C_RB_ALT_MIN_V_BLANK == "NVT_CVT_RB3_MIN_ALT_VBLANK_MICROSEC" == 300 + // h_period_est = ((1000000000000 / adj_rr_x1M) - MIN_V_BLANK) / height + + // 6 Calculate the total VBlank time: + // ACT_V_BLANK_TIME = IF(I_VBLANK < MIN_V_BLANK, MIN_V_BLANK, I_VBLANK) + + // Parameters mapping: + // - ACT_V_BLANK_TIME == "act_v_blank_time" + // - I_VBLANK == "vBlankMicroSec" + if (isAltMiniVblankTiming) + act_v_blank_time = MAX(vBlankMicroSec + NVT_CVT_RB3_MIN_ALT_VBLANK_MICROSEC, NVT_CVT_RB3_MIN_ALT_VBLANK_MICROSEC); + else + act_v_blank_time = MAX(vBlankMicroSec + NVT_CVT_RB3_MIN_VBLANK_MICROSEC, NVT_CVT_RB3_MIN_VBLANK_MICROSEC); + + // 7 Calculate the number of idealized lines in the VBlank interval: + // VBI_LINES = ROUNDUP(ACT_V_BLANK_TIME / H_PERIOD_EST, 0) + + // Parameters mapping: + // - VBI_LINES == "vbi" + // below formula are combining step 4, 5, 6 together. i.e. both numerator and denominator multiple by height and addj_rr_x1M. + vbi = (NvU32)(((NvU64)height * (NvU64)act_v_blank_time * (NvU64)adj_rr_x1M) / ((NvU64)1000000000000 - (NvU64)act_v_blank_time * (NvU64)adj_rr_x1M)); + // ROUNDUP + if (((NvU64)height * (NvU64)act_v_blank_time * (NvU64)adj_rr_x1M) % ((NvU64)1000000000000 - (NvU64)act_v_blank_time * (NvU64)adj_rr_x1M) !=0) + vbi += 1; + + // 8 Determine whether idealized VBlank is sufficient and calculate the actual number of lines in the VBlank period: + // RB_MIN_VBI = C_RB_MIN_V_FPORCH + C_V_SYNC_RND + C_MIN_V_BPORCH + // V_BLANK = IF(VBI_LINES < RB_MIN_VBI, RB_MIN_VBI, VBI_LINES) + + // Parameters mapping: + // - C_RB_MIN_V_FPORCH == 1 + // - C_V_SYNC_RND == 8 + // - C_MIN_V_BPORCH == 6 + // - V_BLANK == "act_v_blank_lines" + // NVT_CVT_RB3_MIN_VBI == 1 + 8 + 6 = 15 + act_v_blank_lines = MAX(vbi, NVT_CVT_RB3_MIN_VBI); + + // 9 Calculate the total number of vertical lines: + // TOTAL_V_LINES = V_BLANK + V_LINES_RND + total_v_lines = act_v_blank_lines + height; + + // 10 Calculate the estimated vertical back porch, determine whether the remaining vertical front porch is greater than the + // minimum vertical front porch (C_RB_MIN_V_FPORCH), and if not, reduce the estimated vertical back porch to reserve VBlank lines for the vertical front porch: + // V_BACK_PORCH_EST = IF(AND(I_RED_BLANK_VER=3,I_EARLY_VSYNC_RQD?="Y"), ROUNDDOWN(VBI_LINES / 2, 0), C_MIN_V_BPORCH) + // V_BACK_PORCH = IF(AND(I_RED_BLANK_VER=3, I_EARLY_VSYNC_RQD?="Y"), + // IF(VBLANK – V_BACK_PORCH_EST – C_V_SYNC_RND < C_RB_MIN_V_FPORCH, V_BLANK – C_V_SYNC_RND – C_RB_MIN_V_FPORCH, V_BACK_PORCH_EST), + // V_BACK_PORCH_EST) + + // Parameters mapping: + // - V_BACK_PORCH_EST == "(VBI_LINES / 2) + // - V_BACK_PORCH == "v_back_porch" + // - I_RED_BLANK_VER == "3" this is for RB3 function so the value is 3 + // - I_EARLY_VSYNC_RQD == "isEarlyVSync" + // - C_V_SYNC_RND == NVT_CVT_RB3_V_SYNC_WIDTH == 8 + // - C_RB_MIN_V_FPORCH == NVT_CVT_RB3_MIN_V_FPORCH == 1 + // - C_MIN_V_BPORCH == NVT_CVT_RB3_MIN_V_BPROCH == 6 + if (isEarlyVSync) + { + v_back_porch_est = vbi / 2; + if ((act_v_blank_lines - v_back_porch_est - NVT_CVT_RB3_V_SYNC_WIDTH) < NVT_CVT_RB3_MIN_V_FPORCH) + v_back_porch = act_v_blank_lines - NVT_CVT_RB3_V_SYNC_WIDTH - NVT_CVT_RB3_MIN_V_FPORCH; + else + v_back_porch = v_back_porch_est; + } + else + v_back_porch = NVT_CVT_RB3_MIN_V_BPROCH; + + // 11 Calculate the vertical front porch: + // V_FRONT_PORCH = V_BLANK – V_BACK_PORCH – C_V_SYNC_RND + // pT->VFrontPorch = (NvU16)(act_v_blank_lines - NVT_CVT_RB3_V_SYNC_WIDTH - v_back_porch); + + // 12 Calculate the total number of pixels per line: + // TOTAL_PIXELS = TOTAL_ACTIVE_PIXELS + C_RB_MIN_H_BLANK + IF(I_RED_BLANK_VER=3, I_ADDITIONAL_HBLANK, 0) + + // Parameters mapping: + // - C_RB_MIN_H_BLANK == NVT_CVT_RB3_H_BLANK_PIXELS == 80 + // - I_ADDITIONAL_HBLANK == deltaHBlank scopes are defined in the TypeX in displayId2.1a + // 80 <= HBlank <= 200 is a valid scope + total_pixels = width + NVT_CVT_RB3_H_BLANK_PIXELS + deltaHBlank; + + // 13 Calculate the horizontal back porch: + // H_BACK_PORCH = C_RB_MIN_H_BLANK + IF(I_RED_BLANK_VER=3, I_ADDITIONAL_HBLANK, 0) – C_H_FRONT_PORCH – C_RB_H_SYNC + // NVT_TIMING did not need to store H_BACK_PORCH + + // sanity check just in case of bad edid where the timing value could exceed the limit of NVT_TIMING structure which unfortunately is defined in NvU16 + if (total_pixels > (NvU16)-1 || total_v_lines > (NvU16)-1) + return NVT_STATUS_INVALID_PARAMETER; + + // 14 Calculate the pixel clock frequency to the nearest C_CLOCK_STEP (MHz): + // ACT_PIXEL_FREQ = C_CLOCK_STEP * ROUNDUP((V_FIELD_RATE_RQD * TOTAL_V_LINES * TOTAL_PIXELS / 1000000 * 1) / C_CLOCK_STEP, 0)) + + // Parameters mapping: + // - ACT_PIXEL_FREQ == "act_pixel_freq_hz" + // - C_CLOCK_STEP == "NVT_CVT_RB3_CLOCK_STEP_KHZ" == 1000 + act_pixel_freq_hz = ((NvU64)adj_rr_x1M * (NvU64)total_v_lines * (NvU64)total_pixels / (NvU64)1000000); + + // Here we need to divide extra 1000 because adj_rr_x1M extends to Million, i.e 1Mhz / 1000 = 1kHz + act_pixel_freq_khz = (NvU32)(act_pixel_freq_hz / NVT_CVT_RB3_CLOCK_STEP_KHZ); + + // kHz ROUNDUP + if ((act_pixel_freq_hz % 1000) != 0) + act_pixel_freq_khz += 1; + + pT->HVisible = (NvU16)width; + pT->HTotal = (NvU16)total_pixels; + pT->HFrontPorch = NVT_CVT_RB3_H_FPORCH; + pT->HSyncWidth = NVT_CVT_RB3_H_SYNC_PIXELS; + pT->VVisible = (NvU16)height; + pT->VTotal = (NvU16)total_v_lines; + pT->VSyncWidth = NVT_CVT_RB3_V_SYNC_WIDTH; + pT->VFrontPorch = (NvU16)(act_v_blank_lines - NVT_CVT_RB3_V_SYNC_WIDTH - v_back_porch); + pT->pclk = ((NvU32)act_pixel_freq_khz + 5) / 10; //convert to 10Khz + pT->pclk1khz = act_pixel_freq_khz; + pT->HSyncPol = NVT_H_SYNC_POSITIVE; + pT->VSyncPol = NVT_V_SYNC_NEGATIVE; + pT->HBorder = pT->VBorder = 0; // not supported + pT->interlaced = 0; // not supported yet + + // fill in the extra timing info + pT->etc.flag = 0; + pT->etc.rr = (NvU16)rr; + pT->etc.rrx1k = (NvU32)axb_div_c_64((NvU64)act_pixel_freq_khz, (NvU64)1000 * (NvU64)1000, (NvU64)pT->HTotal*(NvU64)pT->VTotal); + pT->etc.aspect = 0; + pT->etc.rep = 0x1; + NVT_SNPRINTF((char *)pT->etc.name, 40, "CVT-RB3:%dx%dx%dHz", width, height, rr); + pT->etc.name[39] = '\0'; + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvBool NvTiming_IsTimingCVTRB(const NVT_TIMING *pTiming) +{ + // Check from the Timing Type + NvU32 reducedType = 0; + NvU32 hblank = 0; + reducedType = NVT_GET_TIMING_STATUS_TYPE(pTiming->etc.status); + + if (reducedType == NVT_TYPE_CVT_RB || reducedType == NVT_TYPE_CVT_RB_2 || reducedType == NVT_TYPE_CVT_RB_3) + { + return NV_TRUE; + } + + hblank = pTiming->HTotal - pTiming->HVisible; + + // Manually Check for RB 1 and 2 + // RB1 - HBlank = 160, and HSync = 32, HFrontPorch = 48, HBackPorch = 80 + if ((hblank == 160) && (pTiming->HSyncWidth == 32) && (pTiming->HFrontPorch == 48)) + { + return NV_TRUE; + } + + // RB2 - HBlank = 80, HSync = 32, HFrontPorch = 8, HBackPorch = 40 + if ((hblank == 80) && (pTiming->HSyncWidth == 32) && (pTiming->HFrontPorch == 8)) + { + return NV_TRUE; + } + + // RB3 - HBlank is any integer multiple of 8 [80-200], HSync = 32, HFrontPorch = 8 + if (((hblank > 80) && (hblank <= 200) && (hblank % 8 == 0)) && (pTiming->HSyncWidth == 32) && (pTiming->HFrontPorch == 8)) + { + return NV_TRUE; + } + + return NV_FALSE; +} + +POP_SEGMENTS diff --git a/src/common/modeset/timing/nvt_displayid20.c b/src/common/modeset/timing/nvt_displayid20.c new file mode 100644 index 0000000..75e98a4 --- /dev/null +++ b/src/common/modeset/timing/nvt_displayid20.c @@ -0,0 +1,2012 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_displayid20.c +// +// Purpose: the provide displayID 2.0 related services +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "nvtiming_pvt.h" +#include "displayid20.h" + +PUSH_SEGMENTS + +// Helper function +static NVT_STATUS getPrimaryUseCase(NvU8 product_type, NVT_DISPLAYID_PRODUCT_PRIMARY_USE_CASE *primary_use_case); +static NvU32 greatestCommonDenominator(NvU32 x, NvU32 y); +static NvU8 getExistedTimingSeqNumber(NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo, enum NVT_TIMING_TYPE); + +/* + * The Second-generation version of VESA DisplayID Standard + * DisplayID v2.0 + * + * @brief Parses a displayID20 section + * + * @param pDisplayId The DisplayId20 Section Block () + * @param length Size of the displayId section Block + * @param pDisplayIdInfo Need to parse the raw data to store as NV structure + * + */ +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NV_STDCALL +NvTiming_parseDisplayId20Info( + const NvU8 *pDisplayId, + NvU32 length, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_SECTION *pSection = NULL; + NvU32 offset = 0; + NvU32 extensionIndex = 0; + NvU32 idx = 0; + + // parameter check + if ((pDisplayId == NULL) || + (pDisplayIdInfo == NULL)) + { + return NVT_STATUS_ERR; + } + + pSection = (const DISPLAYID_2_0_SECTION *)pDisplayId; + + if ((pSection->header.version < DISPLAYID_2_0_VERSION) || + (DISPLAYID_2_0_SECTION_SIZE_TOTAL(pSection->header) > length)) + { + return NVT_STATUS_ERR; + } + + NVMISC_MEMSET(pDisplayIdInfo, 0, sizeof(NVT_DISPLAYID_2_0_INFO)); + + status = parseDisplayId20BaseSection(pSection, pDisplayIdInfo); + if (status != NVT_STATUS_SUCCESS) + { + return status; + } + + pDisplayIdInfo->extension_count = pSection->header.extension_count; + for (extensionIndex = 0; extensionIndex < pDisplayIdInfo->extension_count; extensionIndex++) + { + // Get offset to the next section. + offset += DISPLAYID_2_0_SECTION_SIZE_TOTAL(pSection->header); + + // validate the next section buffer is valid + pSection = (const DISPLAYID_2_0_SECTION *)(pDisplayId + offset); + if ((offset + DISPLAYID_2_0_SECTION_SIZE_TOTAL(pSection->header)) > length) + { + return NVT_STATUS_ERR; + } + + // process the section + status = parseDisplayId20ExtensionSection(pSection, pDisplayIdInfo); + if (status != NVT_STATUS_SUCCESS) + { + return status; + } + } + + for (idx = 0; idx < pDisplayIdInfo->total_timings; idx++) + { + updateColorFormatForDisplayId20Timings(pDisplayIdInfo, idx); + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 +NvTiming_DisplayID2ValidationMask( + NVT_DISPLAYID_2_0_INFO *pDisplayId20Info, + NvBool bIsStrongValidation) +{ + NvU32 j; + NvU32 ret = 0; + + // check the DisplayId2 version and signature + if (pDisplayId20Info->version != 0x2) + { + ret |= NVT_DID2_VALIDATION_ERR_MASK(NVT_DID2_VALIDATION_ERR_VERSION); + } + + if (!pDisplayId20Info->valid_data_blocks.product_id_present) + { + ret |= NVT_DID2_VALIDATION_ERR_MASK(NVT_DID2_VALIDATION_ERR_PRODUCT_IDENTIFY); + } + + if (pDisplayId20Info->primary_use_case >= PRODUCT_PRIMARY_USE_GENERIC_DISPLAY && + pDisplayId20Info->primary_use_case <= PRODUCT_PRIMARY_USE_HEAD_MOUNT_AUGMENTED_REALITY) + { + if (!(pDisplayId20Info->valid_data_blocks.parameters_present && + pDisplayId20Info->valid_data_blocks.interface_feature_present && + pDisplayId20Info->valid_data_blocks.type7Timing_present && + pDisplayId20Info->total_timings)) + { + ret |= NVT_DID2_VALIDATION_ERR_MASK(NVT_DID2_VALIDATION_ERR_NO_DATA_BLOCK); + } + } + + // Strong validation to follow + if (bIsStrongValidation == NV_TRUE) + { + // TODO : For each of the Data Block limitation + // Type 7 Timings data block + for (j = 0; j <= pDisplayId20Info->total_timings; j++) + { + if ( NVT_PREFERRED_TIMING_IS_DISPLAYID(pDisplayId20Info->timing[j].etc.flag) && + (pDisplayId20Info->display_param.h_pixels != 0) && + (pDisplayId20Info->display_param.v_pixels != 0)) + { + if ( pDisplayId20Info->timing[j].HVisible != pDisplayId20Info->display_param.h_pixels || + pDisplayId20Info->timing[j].VVisible != pDisplayId20Info->display_param.v_pixels ) + { + ret |= NVT_DID2_VALIDATION_ERR_MASK(NVT_DID2_VALIDATION_ERR_NO_DATA_BLOCK); + break; + } + } + } + // TODO : go on the next data block validation if it existed. + // TODO : validate extension blocks + } + + return ret; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +NvTiming_DisplayID2ValidationDataBlocks( + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo, + NvBool bIsStrongValidation) +{ + if (NvTiming_DisplayID2ValidationMask(pDisplayIdInfo, bIsStrongValidation) != 0) + { + return NVT_STATUS_ERR; + } + else + { + return NVT_STATUS_SUCCESS; + } +} + +// DisplayID20 Entry point functions +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20BaseSection( + const DISPLAYID_2_0_SECTION *pSection, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + + // validate for section checksum before processing the data block + if (computeDisplayId20SectionCheckSum((const NvU8 *)pSection, DISPLAYID_2_0_SECTION_SIZE_TOTAL(pSection->header)) != 0) + { + status |= NVT_DID2_VALIDATION_ERR_MASK(NVT_DID2_VALIDATION_ERR_CHECKSUM); + return status; + } + + pDisplayIdInfo->revision = pSection->header.revision; + pDisplayIdInfo->version = pSection->header.version; + + status = getPrimaryUseCase(pSection->header.product_type, + &pDisplayIdInfo->primary_use_case); + if (status != NVT_STATUS_SUCCESS) + { + return status; + } + + status = parseDisplayId20SectionDataBlocks(pSection, pDisplayIdInfo); + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20ExtensionSection( + const DISPLAYID_2_0_SECTION *pSection, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + + // validate for section checksum before processing the data block + if (computeDisplayId20SectionCheckSum((const NvU8 *)pSection, DISPLAYID_2_0_SECTION_SIZE_TOTAL(pSection->header)) != 0) + { + status |= NVT_DID2_VALIDATION_ERR_MASK(NVT_DID2_VALIDATION_ERR_CHECKSUM); + return status; + } + + nvt_assert(pSection->header.version >= DISPLAYID_2_0_VERSION); + nvt_assert(pSection->header.extension_count == 0); + nvt_assert(pSection->header.product_type == DISPLAYID_2_0_PROD_EXTENSION); + + status = parseDisplayId20SectionDataBlocks(pSection, pDisplayIdInfo); + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20SectionDataBlocks( + const DISPLAYID_2_0_SECTION *pSection, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NvU32 i = 0; + NvU32 offset = 0; + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock = NULL; + NVT_STATUS status = NVT_STATUS_SUCCESS; + + while (offset < pSection->header.section_bytes) + { + // Get current block + pDataBlock = (const DISPLAYID_2_0_DATA_BLOCK_HEADER *)(pSection->data + offset); + + // detected zero padding + if (pDataBlock->type == 0) + { + for (i = offset; i < pSection->header.section_bytes; i++) + { + // validate that all paddings are zeros + nvt_assert(pSection->data[i] == 0); + } + break; + } + + // check data block is valid. + if ((offset + DISPLAYID_2_0_DATA_BLOCK_SIZE_TOTAL(pDataBlock)) > pSection->header.section_bytes) + { + return NVT_STATUS_ERR; + } + + // parse the data block + status = parseDisplayId20DataBlock(pDataBlock, pDisplayIdInfo); + if (status != NVT_STATUS_SUCCESS) + { + return status; + } + + switch (pDataBlock->type) + { + case DISPLAYID_2_0_BLOCK_TYPE_PRODUCT_IDENTITY: + pDisplayIdInfo->valid_data_blocks.product_id_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_DISPLAY_PARAM: + pDisplayIdInfo->valid_data_blocks.parameters_present = NV_TRUE; + if (pDisplayIdInfo->display_param.audio_speakers_integrated == AUDIO_SPEAKER_INTEGRATED_SUPPORTED) + { + pDisplayIdInfo->basic_caps |= NVT_DISPLAY_2_0_CAP_BASIC_AUDIO; + } + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_7: + pDisplayIdInfo->valid_data_blocks.type7Timing_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_8: + pDisplayIdInfo->valid_data_blocks.type8Timing_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_9: + pDisplayIdInfo->valid_data_blocks.type9Timing_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_RANGE_LIMITS: + pDisplayIdInfo->valid_data_blocks.dynamic_range_limit_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_INTERFACE_FEATURES: + pDisplayIdInfo->valid_data_blocks.interface_feature_present = NV_TRUE; + + // Supported - Color depth is supported for all supported timings. Supported timing includes all Display-ID exposed timings + // (that is timing exposed using DisplayID timing types and CTA VICs) + if (IS_BPC_SUPPORTED_COLORFORMAT(pDisplayIdInfo->interface_features.yuv444.bpcs)) + { + pDisplayIdInfo->basic_caps |= NVT_DISPLAY_2_0_CAP_YCbCr_444; + } + + if (IS_BPC_SUPPORTED_COLORFORMAT(pDisplayIdInfo->interface_features.yuv422.bpcs)) + { + pDisplayIdInfo->basic_caps |= NVT_DISPLAY_2_0_CAP_YCbCr_422; + } + + if (pDisplayIdInfo->interface_features.audio_capability.support_48khz || + pDisplayIdInfo->interface_features.audio_capability.support_44_1khz || + pDisplayIdInfo->interface_features.audio_capability.support_32khz) + { + pDisplayIdInfo->basic_caps |= NVT_DISPLAY_2_0_CAP_BASIC_AUDIO; + } + break; + case DISPLAYID_2_0_BLOCK_TYPE_STEREO: + pDisplayIdInfo->valid_data_blocks.stereo_interface_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_TILED_DISPLAY: + pDisplayIdInfo->valid_data_blocks.tiled_display_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_CONTAINER_ID: + pDisplayIdInfo->valid_data_blocks.container_id_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_10: + pDisplayIdInfo->valid_data_blocks.type10Timing_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_ADAPTIVE_SYNC: + pDisplayIdInfo->valid_data_blocks.adaptive_sync_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_ARVR_HMD: + pDisplayIdInfo->valid_data_blocks.arvr_hmd_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_ARVR_LAYER: + pDisplayIdInfo->valid_data_blocks.arvr_layer_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_BRIGHTNESS_LUMINANCE_RANGE: + pDisplayIdInfo->valid_data_blocks.brightness_luminance_range_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_VENDOR_SPEC: + pDisplayIdInfo->valid_data_blocks.vendor_specific_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_CTA_DATA: + pDisplayIdInfo->valid_data_blocks.cta_data_present = NV_TRUE; + break; + default: + status = NVT_STATUS_ERR; + } + + // advance to the next block + offset += DISPLAYID_2_0_DATA_BLOCK_SIZE_TOTAL(pDataBlock); + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20DataBlock( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + + switch (pDataBlock->type) + { + case DISPLAYID_2_0_BLOCK_TYPE_PRODUCT_IDENTITY: + status = parseDisplayId20ProductIdentity(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_DISPLAY_PARAM: + status = parseDisplayId20DisplayParam(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_7: + status = parseDisplayId20Timing7(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_8: + status = parseDisplayId20Timing8(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_9: + status = parseDisplayId20Timing9(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_10: + status = parseDisplayId20Timing10(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_RANGE_LIMITS: + status = parseDisplayId20RangeLimit(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_INTERFACE_FEATURES: + status = parseDisplayId20DisplayInterfaceFeatures(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_STEREO: + status = parseDisplayId20Stereo(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_TILED_DISPLAY: + status = parseDisplayId20TiledDisplay(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_CONTAINER_ID: + status = parseDisplayId20ContainerId(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_ADAPTIVE_SYNC: + status = parseDisplayId20AdaptiveSync(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_ARVR_HMD: + status = parseDisplayId20ARVRHMD(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_ARVR_LAYER: + status = parseDisplayId20ARVRLayer(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_BRIGHTNESS_LUMINANCE_RANGE: + status = parseDisplayId20BrightnessLuminanceRange(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_VENDOR_SPEC: + status = parseDisplayId20VendorSpecific(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_CTA_DATA: + status = parseDisplayId20CtaData(pDataBlock, pDisplayIdInfo); + break; + default: + status = NVT_STATUS_ERR; + } + return status; +} + +// All Data Blocks Parsing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20ProductIdentity( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + NVT_DISPLAYID_PRODUCT_IDENTITY *pProductIdentity = NULL; + const DISPLAYID_2_0_PROD_IDENTIFICATION_BLOCK *pProductIdBlock = NULL; + + pProductIdBlock = (const DISPLAYID_2_0_PROD_IDENTIFICATION_BLOCK *)pDataBlock; + + // add more validation if needed + + if (pDisplayIdInfo == NULL) return status; + + pProductIdentity = &pDisplayIdInfo->product_identity; + + pProductIdentity->vendor_id = (pProductIdBlock->vendor[0] << 16) | + (pProductIdBlock->vendor[1] << 8) | + (pProductIdBlock->vendor[2]); + pProductIdentity->product_id = (pProductIdBlock->product_code[0]) | + (pProductIdBlock->product_code[1] << 8); + pProductIdentity->serial_number = (pProductIdBlock->serial_number[0]) | + (pProductIdBlock->serial_number[1] << 8) | + (pProductIdBlock->serial_number[2] << 16) | + (pProductIdBlock->serial_number[3] << 24); + pProductIdentity->week = (pProductIdBlock->model_tag >= 1 && pProductIdBlock->model_tag <= 52) ? + pProductIdBlock->model_tag : 0; + pProductIdentity->year = (pProductIdBlock->model_tag == 0xFF) ? + pProductIdBlock->model_year : + pProductIdBlock->model_year + 2000; + + if (pProductIdBlock->product_name_string_size != 0) + { + NVMISC_STRNCPY((char *)pProductIdentity->product_string, + (const char *)pProductIdBlock->product_name_string, + pProductIdBlock->product_name_string_size); + } + pProductIdentity->product_string[pProductIdBlock->product_name_string_size] = '\0'; + + return status; +} + + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20DisplayParam( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_DISPLAY_PARAM_BLOCK *pDisplayParamBlock = NULL; + NVT_DISPLAYID_DISPLAY_PARAMETERS *pDisplayParam = NULL; + + if (pDataBlock->data_bytes != DISPLAYID_2_0_DISPLAY_PARAM_BLOCK_PAYLOAD_LENGTH) + { + return NVT_STATUS_ERR; + } + + // Add more validation here if needed + + if (pDisplayIdInfo == NULL) return status; + + pDisplayParamBlock = (const DISPLAYID_2_0_DISPLAY_PARAM_BLOCK *)pDataBlock; + pDisplayParam = &pDisplayIdInfo->display_param; + + pDisplayParam->revision = pDisplayParamBlock->header.revision; + pDisplayParam->h_image_size_micro_meter = (pDisplayParamBlock->horizontal_image_size[1] << 8 | + pDisplayParamBlock->horizontal_image_size[0]) * + (pDisplayParamBlock->header.image_size_multiplier ? 1000 : 100); + pDisplayParam->v_image_size_micro_meter = (pDisplayParamBlock->vertical_image_size[1] << 8 | + pDisplayParamBlock->vertical_image_size[0]) * + (pDisplayParamBlock->header.image_size_multiplier ? 1000 : 100); + pDisplayParam->h_pixels = pDisplayParamBlock->horizontal_pixel_count[1] << 8 | + pDisplayParamBlock->horizontal_pixel_count[0]; + pDisplayParam->v_pixels = pDisplayParamBlock->vertical_pixel_count[1] << 8 | + pDisplayParamBlock->vertical_pixel_count[0]; + + pDisplayParam->scan_orientation = pDisplayParamBlock->feature.scan_orientation; + pDisplayParam->audio_speakers_integrated = pDisplayParamBlock->feature.audio_speaker_information ? AUDIO_SPEAKER_INTEGRATED_NOT_SUPPORTED : AUDIO_SPEAKER_INTEGRATED_SUPPORTED; + pDisplayParam->color_map_standard = pDisplayParamBlock->feature.color_information ? COLOR_MAP_CIE_1976 : COLOR_MAP_CIE_1931; + + // 12 bits Binary Fraction Representations + pDisplayParam->primaries[0].x = pDisplayParamBlock->primary_color_1_chromaticity.color_bits_mid.color_x_bits_high << 8 | + pDisplayParamBlock->primary_color_1_chromaticity.color_x_bits_low; + pDisplayParam->primaries[0].y = pDisplayParamBlock->primary_color_1_chromaticity.color_y_bits_high << 4 | + pDisplayParamBlock->primary_color_1_chromaticity.color_bits_mid.color_y_bits_low; + pDisplayParam->primaries[1].x = pDisplayParamBlock->primary_color_2_chromaticity.color_bits_mid.color_x_bits_high << 8 | + pDisplayParamBlock->primary_color_2_chromaticity.color_x_bits_low; + pDisplayParam->primaries[1].y = pDisplayParamBlock->primary_color_2_chromaticity.color_y_bits_high << 4 | + pDisplayParamBlock->primary_color_2_chromaticity.color_bits_mid.color_y_bits_low; + pDisplayParam->primaries[2].x = pDisplayParamBlock->primary_color_3_chromaticity.color_bits_mid.color_x_bits_high << 8 | + pDisplayParamBlock->primary_color_3_chromaticity.color_x_bits_low; + pDisplayParam->primaries[2].y = pDisplayParamBlock->primary_color_3_chromaticity.color_y_bits_high << 4 | + pDisplayParamBlock->primary_color_3_chromaticity.color_bits_mid.color_y_bits_low; + pDisplayParam->white.x = pDisplayParamBlock->white_point_chromaticity.color_bits_mid.color_x_bits_high << 8 | + pDisplayParamBlock->white_point_chromaticity.color_x_bits_low; + pDisplayParam->white.y = pDisplayParamBlock->white_point_chromaticity.color_y_bits_high << 4 | + pDisplayParamBlock->white_point_chromaticity.color_bits_mid.color_y_bits_low; + + // IEEE 754 half-precision binary floating-point format + pDisplayParam->native_max_luminance_full_coverage = pDisplayParamBlock->max_luminance_full_coverage[1] << 8 | + pDisplayParamBlock->max_luminance_full_coverage[0]; + pDisplayParam->native_max_luminance_10_percent_rect_coverage = pDisplayParamBlock->max_luminance_10_percent_rectangular_coverage[1] << 8 | + pDisplayParamBlock->max_luminance_10_percent_rectangular_coverage[0]; + pDisplayParam->native_min_luminance = pDisplayParamBlock->min_luminance[1] << 8 | + pDisplayParamBlock->min_luminance[0]; + + if (pDisplayParamBlock->feature.luminance_information == 0) + { + pDisplayParam->native_luminance_info = NATIVE_LUMINANCE_INFO_MIN_GURANTEE_VALUE; + } + else if (pDisplayParamBlock->feature.luminance_information == 1) + { + pDisplayParam->native_luminance_info = NATIVE_LUMINANCE_INFO_SOURCE_DEVICE_GUIDANCE; + } + else + { + return NVT_STATUS_ERR; + } + + UPDATE_BPC_FOR_COLORFORMAT(pDisplayParam->native_color_depth, + pDisplayParamBlock->color_depth_and_device_technology.color_depth == NATIVE_COLOR_BPC_6, + pDisplayParamBlock->color_depth_and_device_technology.color_depth == NATIVE_COLOR_BPC_8, + pDisplayParamBlock->color_depth_and_device_technology.color_depth == NATIVE_COLOR_BPC_10, + pDisplayParamBlock->color_depth_and_device_technology.color_depth == NATIVE_COLOR_BPC_12, + 0, + pDisplayParamBlock->color_depth_and_device_technology.color_depth == NATIVE_COLOR_BPC_16); + + pDisplayParam->device_technology = pDisplayParamBlock->color_depth_and_device_technology.device_technology; + if (pDisplayParam->revision == 1) + { + pDisplayParam->device_theme_Preference = pDisplayParamBlock->color_depth_and_device_technology.device_theme_preference; + } + pDisplayParam->gamma_x100 = (pDisplayParamBlock->gamma_EOTF + 100); + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20Timing7( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_TIMING_7_BLOCK *pTiming7Block = NULL; + NvU32 descriptorCount = 0; + NvU8 revision = 0; + NvU8 i = 0; + NvU8 startSeqNumber = 0; + + NVT_TIMING newTiming; + + pTiming7Block = (const DISPLAYID_2_0_TIMING_7_BLOCK *)pDataBlock; + + // Based on the DisplayID_2_0_E7 spec: + // the Future descriptor can be defined with more than 20 Byte per descriptor without creating a new timing type + if (pTiming7Block->header.payload_bytes_len == 0) + { + if (pDataBlock->data_bytes % sizeof(DISPLAYID_2_0_TIMING_7_DESCRIPTOR) != 0) + { + return NVT_STATUS_ERR; + } + + revision = pTiming7Block->header.revision; + descriptorCount = pDataBlock->data_bytes / (sizeof(DISPLAYID_2_0_TIMING_7_DESCRIPTOR) + pTiming7Block->header.payload_bytes_len); + + if (descriptorCount < 1 || descriptorCount > DISPLAYID_2_0_TIMING_7_MAX_DESCRIPTORS) + { + return NVT_STATUS_ERR; + } + + if (pDisplayIdInfo != NULL) + { + startSeqNumber = getExistedTimingSeqNumber(pDisplayIdInfo, NVT_TYPE_DISPLAYID_7); + } + + for (i = 0; i < descriptorCount; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + if (parseDisplayId20Timing7Descriptor(&pTiming7Block->descriptors[i], &newTiming, startSeqNumber+i) == NVT_STATUS_SUCCESS) + { + newTiming.etc.flag |= (revision >= DISPLAYID_2_0_TYPE7_DSC_PASSTHRU_REVISION && pTiming7Block->header.dsc_passthrough == 1) ? + NVT_FLAG_DISPLAYID_T7_DSC_PASSTHRU : + 0; + + if (revision >= DISPLAYID_2_0_TYPE7_YCC420_SUPPORT_REVISION) + { + newTiming.etc.flag |= pTiming7Block->descriptors[i].options.is_preferred_or_ycc420 ? NVT_FLAG_DISPLAYID_T7_T8_EXPLICT_YUV420 : 0; + + if (pTiming7Block->descriptors[i].options.is_preferred_or_ycc420) // YCC 420 support + { + UPDATE_BPC_FOR_COLORFORMAT(newTiming.etc.yuv420, 0, 1, 1, 1, 0, 1); + } + } + else + { + newTiming.etc.flag |= pTiming7Block->descriptors[i].options.is_preferred_or_ycc420 ? NVT_FLAG_DISPLAYID_DTD_PREFERRED_TIMING : 0; + } + + NVT_SNPRINTF((char *)newTiming.etc.name, sizeof(newTiming.etc.name), "DID20-Type7:#%2d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status), + (int)newTiming.HVisible, + (int)((newTiming.interlaced ? 2 : 1)*newTiming.VVisible), + (int)newTiming.etc.rrx1k/1000, + (int)newTiming.etc.rrx1k%1000, + (newTiming.interlaced ? "I":"P")); + newTiming.etc.name[sizeof(newTiming.etc.name) - 1] = '\0'; + newTiming.etc.rep = 0x1; + + if (!assignNextAvailableDisplayId20Timing(pDisplayIdInfo, &newTiming)) + { + break; + } + } + else + { + if (pDisplayIdInfo == NULL) return NVT_STATUS_ERR; + } + } + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20Timing8( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_TIMING_8_BLOCK *pTiming8Block = NULL; + NVT_TIMING newTiming; + + NvU8 codeCount = 0; + NvU8 startSeqNumber = 0; + NvU8 i; + + pTiming8Block = (const DISPLAYID_2_0_TIMING_8_BLOCK *)pDataBlock; + codeCount = pDataBlock->data_bytes; + + if (codeCount == 0) + { + nvt_assert(0 && "No available byte code!"); + return NVT_STATUS_SUCCESS; + } + + if (codeCount > DISPLAYID_2_0_TIMING_8_MAX_CODES) + { + nvt_assert(0 && "one byte code is out of range!"); + return NVT_STATUS_SUCCESS; + } + + if (pDisplayIdInfo != NULL) + { + startSeqNumber = getExistedTimingSeqNumber(pDisplayIdInfo, NVT_TYPE_DISPLAYID_8); + } + + for (i = 0; i < codeCount; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseDisplayId20Timing8Descriptor(&pTiming8Block->timingCode, &newTiming, + pTiming8Block->header.timing_code_type, + pTiming8Block->header.timing_code_size, i, startSeqNumber+i) == NVT_STATUS_SUCCESS) + { + newTiming.etc.flag |= ((pTiming8Block->header.revision == 1) && pTiming8Block->header.is_support_yuv420) ? + NVT_FLAG_DISPLAYID_T7_T8_EXPLICT_YUV420 : + 0; + + NVT_SNPRINTF((char *)newTiming.etc.name, sizeof(newTiming.etc.name), "DID20-Type8:#%3d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status), + (int)newTiming.HVisible, (int)newTiming.VVisible, + (int)newTiming.etc.rrx1k/1000, (int)newTiming.etc.rrx1k%1000, + (newTiming.interlaced ? "I":"P")); + newTiming.etc.name[sizeof(newTiming.etc.name) - 1] = '\0'; + newTiming.etc.rep = 0x1; + + if (!assignNextAvailableDisplayId20Timing(pDisplayIdInfo, &newTiming)) + { + break; + } + } + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20Timing9( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_TIMING_9_BLOCK *pTiming9Block = NULL; + NVT_TIMING newTiming; + NvU32 descriptorCount = 0; + NvU8 startSeqNumber = 0; + NvU8 i = 0; + + descriptorCount = pDataBlock->data_bytes / sizeof(DISPLAYID_2_0_TIMING_9_DESCRIPTOR); + if (descriptorCount < 1 || descriptorCount > DISPLAYID_2_0_TIMING_9_MAX_DESCRIPTORS) + { + return NVT_STATUS_ERR; + } + + pTiming9Block = (const DISPLAYID_2_0_TIMING_9_BLOCK *)pDataBlock; + + if (pDisplayIdInfo != NULL) + { + startSeqNumber = getExistedTimingSeqNumber(pDisplayIdInfo, NVT_TYPE_DISPLAYID_9); + } + + for (i = 0; i < descriptorCount; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseDisplayId20Timing9Descriptor(&pTiming9Block->descriptors[i], &newTiming, startSeqNumber+i) == NVT_STATUS_SUCCESS) + { + if (!assignNextAvailableDisplayId20Timing(pDisplayIdInfo, &newTiming)) + { + break; + } + } + else + { + if (pDisplayIdInfo == NULL) return NVT_STATUS_ERR; + } + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20Timing10( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + NvU32 descriptorCount = 0; + NvU8 startSeqNumber = 0; + NvU8 i = 0; + NvU8 eachOfDescriptorsSize = sizeof(DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR); + + const DISPLAYID_2_0_TIMING_10_BLOCK *pTiming10Block = NULL; + const DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR *p6bytesDescriptor = NULL; + + NVT_TIMING newTiming; + + pTiming10Block = (const DISPLAYID_2_0_TIMING_10_BLOCK *)pDataBlock; + + if (pTiming10Block->header.type != DISPLAYID_2_0_BLOCK_TYPE_TIMING_10) + { + return NVT_STATUS_ERR; + } + + if (pTiming10Block->header.payload_bytes_len == DISPLAYID_2_0_TIMING_10_PAYLOAD_BYTES_6) + { + descriptorCount = pDataBlock->data_bytes / sizeof(DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR); + + if (descriptorCount < 1 || descriptorCount > DISPLAYID_2_0_TIMING_10_MAX_6BYTES_DESCRIPTORS) + { + return NVT_STATUS_ERR; + } + } + else if (pTiming10Block->header.payload_bytes_len == DISPLAYID_2_0_TIMING_10_PAYLOAD_BYTES_7) + { + descriptorCount = pDataBlock->data_bytes / sizeof(DISPLAYID_2_0_TIMING_10_7BYTES_DESCRIPTOR); + + if (descriptorCount < 1 || descriptorCount > DISPLAYID_2_0_TIMING_10_MAX_7BYTES_DESCRIPTORS) + { + return NVT_STATUS_ERR; + } + } + else if (pTiming10Block->header.payload_bytes_len == DISPLAYID_2_1_TIMING_10_PAYLOAD_BYTES_8) + { + descriptorCount = pDataBlock->data_bytes / sizeof(DISPLAYID_2_1_TIMING_10_8BYTES_DESCRIPTOR); + + if (descriptorCount < 1 || descriptorCount > DISPLAYID_2_1_TIMING_10_MAX_8BYTES_DESCRIPTORS) + { + return NVT_STATUS_ERR; + } + } + + eachOfDescriptorsSize += pTiming10Block->header.payload_bytes_len; + + if (pDisplayIdInfo != NULL) + { + startSeqNumber = getExistedTimingSeqNumber(pDisplayIdInfo, NVT_TYPE_DISPLAYID_10); + } + + for (i = 0; i < descriptorCount; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + if (NVT_STATUS_SUCCESS == parseDisplayId20Timing10Descriptor(&pTiming10Block->descriptors[i*eachOfDescriptorsSize], + &newTiming, + pTiming10Block->header.payload_bytes_len, + startSeqNumber+i)) + { + p6bytesDescriptor = (const DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR *)&pTiming10Block->descriptors[i*eachOfDescriptorsSize]; + + if (p6bytesDescriptor->options.ycc420_support) + { + UPDATE_BPC_FOR_COLORFORMAT(newTiming.etc.yuv420, 0, 1, 1, 1, 0, 1); + } + + if (p6bytesDescriptor->options.timing_formula == DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_STANDARD_CRT_BASED) + { + NVT_SNPRINTF((char *)newTiming.etc.name, sizeof(newTiming.etc.name), "DID20-Type10:#%3d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status), + (int)newTiming.HVisible, + (int)newTiming.VVisible, + (int)newTiming.etc.rrx1k/1000, + (int)newTiming.etc.rrx1k%1000, + (newTiming.interlaced ? "I":"P")); + } + else + { + NVT_SNPRINTF((char *)newTiming.etc.name, sizeof(newTiming.etc.name), "DID20-Type10RB%d:#%3d:%dx%dx%3d.%03dHz/%s", + p6bytesDescriptor->options.timing_formula, + (int)NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status), + (int)newTiming.HVisible, + (int)newTiming.VVisible, + (int)newTiming.etc.rrx1k/1000, + (int)newTiming.etc.rrx1k%1000, + (newTiming.interlaced ? "I":"P")); + + } + newTiming.etc.name[sizeof(newTiming.etc.name) - 1] = '\0'; + newTiming.etc.rep = 0x1; + + if (!assignNextAvailableDisplayId20Timing(pDisplayIdInfo, &newTiming)) + { + break; + } + } + else + { + if (pDisplayIdInfo == NULL) + { + return NVT_STATUS_ERR; + } + continue; + } + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20RangeLimit( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_RANGE_LIMITS_BLOCK *pRangeLimitsBlock = NULL; + NVT_DISPLAYID_RANGE_LIMITS rangeLimits = {0}; + + // basic sanity check + if (pDataBlock->data_bytes != DISPLAYID_2_0_RANGE_LIMITS_BLOCK_PAYLOAD_LENGTH) + { + return NVT_STATUS_ERR; + } + + pRangeLimitsBlock = (const DISPLAYID_2_0_RANGE_LIMITS_BLOCK *)pDataBlock; + + rangeLimits.revision = pDataBlock->revision; + + rangeLimits.pclk_min = (pRangeLimitsBlock->pixel_clock_min[2] << 16 | + pRangeLimitsBlock->pixel_clock_min[1] << 8 | + pRangeLimitsBlock->pixel_clock_min[0]) + 1; + rangeLimits.pclk_max = (pRangeLimitsBlock->pixel_clock_max[2] << 16 | + pRangeLimitsBlock->pixel_clock_max[1] << 8 | + pRangeLimitsBlock->pixel_clock_max[0]) + 1; + rangeLimits.vfreq_min = pRangeLimitsBlock->vertical_frequency_min; + if (rangeLimits.revision == 1) + { + rangeLimits.vfreq_max = pRangeLimitsBlock->dynamic_video_timing_range_support.vertical_frequency_max_9_8 << 8 | + pRangeLimitsBlock->vertical_frequency_max_7_0; + } + else + { + rangeLimits.vfreq_max = pRangeLimitsBlock->vertical_frequency_max_7_0; + } + + rangeLimits.seamless_dynamic_video_timing_change = pRangeLimitsBlock->dynamic_video_timing_range_support.seamless_dynamic_video_timing_change; + + if (pDisplayIdInfo == NULL) + { + if (rangeLimits.vfreq_min > rangeLimits.vfreq_max || rangeLimits.pclk_min > rangeLimits.pclk_max || + rangeLimits.vfreq_min == 0 || rangeLimits.vfreq_max == 0) + { + nvt_assert(0 && "wrong range limit"); + status = NVT_STATUS_ERR; + } + return status; + } + + NVMISC_MEMCPY(&pDisplayIdInfo->range_limits, &rangeLimits, sizeof(NVT_DISPLAYID_RANGE_LIMITS)); + + return status; +} + +#define ADD_COLOR_SPACE_EOTF_COMBINATION(_pInterfaceFeatures, _color_space, _eotf) do { \ + (_pInterfaceFeatures)->colorspace_eotf_combination[(_pInterfaceFeatures)->combination_count].color_space = (_color_space); \ + (_pInterfaceFeatures)->colorspace_eotf_combination[(_pInterfaceFeatures)->combination_count].eotf = (_eotf); \ + (_pInterfaceFeatures)->combination_count++; \ + } while(0) + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20DisplayInterfaceFeatures( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + NvU32 i = 0; + const DISPLAYID_2_0_INTERFACE_FEATURES_BLOCK *pInterfaceFeaturesBlock = NULL; + NVT_DISPLAYID_INTERFACE_FEATURES *pInterfaceFeatures = NULL; + + if (pDataBlock->data_bytes < DISPLAYID_2_0_INTERFACE_FEATURES_BLOCK_PAYLOAD_LENGTH_MIN) + { + return NVT_STATUS_ERR; + } + + // Add more validation here if needed. + + if (pDisplayIdInfo == NULL) return status; + + pInterfaceFeatures = &pDisplayIdInfo->interface_features; + + pInterfaceFeaturesBlock = (const DISPLAYID_2_0_INTERFACE_FEATURES_BLOCK *)pDataBlock; + pInterfaceFeatures->revision = pDataBlock->revision; + + UPDATE_BPC_FOR_COLORFORMAT(pInterfaceFeatures->rgb444, + pInterfaceFeaturesBlock->interface_color_depth_rgb.bit_per_primary_6, + pInterfaceFeaturesBlock->interface_color_depth_rgb.bit_per_primary_8, + pInterfaceFeaturesBlock->interface_color_depth_rgb.bit_per_primary_10, + pInterfaceFeaturesBlock->interface_color_depth_rgb.bit_per_primary_12, + pInterfaceFeaturesBlock->interface_color_depth_rgb.bit_per_primary_14, + pInterfaceFeaturesBlock->interface_color_depth_rgb.bit_per_primary_16); + UPDATE_BPC_FOR_COLORFORMAT(pInterfaceFeatures->yuv444, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr444.bit_per_primary_6, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr444.bit_per_primary_8, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr444.bit_per_primary_10, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr444.bit_per_primary_12, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr444.bit_per_primary_14, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr444.bit_per_primary_16); + UPDATE_BPC_FOR_COLORFORMAT(pInterfaceFeatures->yuv422, + 0, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr422.bit_per_primary_8, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr422.bit_per_primary_10, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr422.bit_per_primary_12, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr422.bit_per_primary_14, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr422.bit_per_primary_16); + UPDATE_BPC_FOR_COLORFORMAT(pInterfaceFeatures->yuv420, + 0, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr420.bit_per_primary_8, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr420.bit_per_primary_10, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr420.bit_per_primary_12, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr420.bit_per_primary_14, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr420.bit_per_primary_16); + + // * 74.25MP/s + pInterfaceFeatures->yuv420_min_pclk = pInterfaceFeaturesBlock->min_pixel_rate_ycbcr420 * + 7425; + + pInterfaceFeatures->audio_capability.support_48khz = + pInterfaceFeaturesBlock->audio_capability.sample_rate_48_khz; + pInterfaceFeatures->audio_capability.support_44_1khz = + pInterfaceFeaturesBlock->audio_capability.sample_rate_44_1_khz; + pInterfaceFeatures->audio_capability.support_32khz = + pInterfaceFeaturesBlock->audio_capability.sample_rate_32_khz; + + if (pInterfaceFeaturesBlock->color_space_and_eotf_1.color_space_srgb_eotf_srgb) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + INTERFACE_COLOR_SPACE_SRGB, + INTERFACE_EOTF_SRGB); + } + if (pInterfaceFeaturesBlock->color_space_and_eotf_1.color_space_bt601_eotf_bt601) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + INTERFACE_COLOR_SPACE_BT601, + INTERFACE_EOTF_BT601); + } + if (pInterfaceFeaturesBlock->color_space_and_eotf_1.color_space_bt709_eotf_bt1886) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + INTERFACE_COLOR_SPACE_BT709, + INTERFACE_EOTF_BT1886); + } + if (pInterfaceFeaturesBlock->color_space_and_eotf_1.color_space_adobe_rgb_eotf_adobe_rgb) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + INTERFACE_COLOR_SPACE_ADOBE_RGB, + INTERFACE_EOTF_ADOBE_RGB); + } + if (pInterfaceFeaturesBlock->color_space_and_eotf_1.color_space_dci_p3_eotf_dci_p3) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + INTERFACE_COLOR_SPACE_DCI_P3, + INTERFACE_EOTF_DCI_P3); + } + if (pInterfaceFeaturesBlock->color_space_and_eotf_1.color_space_bt2020_eotf_bt2020) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + INTERFACE_COLOR_SPACE_BT2020, + INTERFACE_EOTF_BT2020); + } + if (pInterfaceFeaturesBlock->color_space_and_eotf_1.color_space_bt2020_eotf_smpte_st2084) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + INTERFACE_COLOR_SPACE_BT2020, + INTERFACE_EOTF_SMPTE_ST2084); + } + + for (i = 0; i < pInterfaceFeaturesBlock->additional_color_space_and_eotf_count.count; i++) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + pInterfaceFeaturesBlock->additional_color_space_and_eotf[i].color_space, + pInterfaceFeaturesBlock->additional_color_space_and_eotf[i].eotf); + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20Stereo( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + + if (pDisplayIdInfo == NULL) return status; + + // TODO: Implement the parsing here. + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20TiledDisplay( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_TILED_DISPLAY_BLOCK *pTiledDisplayBlock = NULL; + NVT_DISPLAYID_TILED_DISPLAY_TOPOLOGY *pTileTopo = NULL; + + if (pDataBlock->data_bytes != DISPLAYID_2_0_TILED_DISPLAY_BLOCK_PAYLOAD_LENGTH) + { + return NVT_STATUS_ERR; + } + + if (pDisplayIdInfo == NULL) return status; + + pTiledDisplayBlock = (const DISPLAYID_2_0_TILED_DISPLAY_BLOCK *)pDataBlock; + pTileTopo = &pDisplayIdInfo->tile_topo; + + pTileTopo->revision = pDataBlock->revision; + + pTileTopo->capability.bSingleEnclosure = pTiledDisplayBlock->capability.single_enclosure; + pTileTopo->capability.bHasBezelInfo = pTiledDisplayBlock->capability.has_bezel_info; + pTileTopo->capability.multi_tile_behavior = pTiledDisplayBlock->capability.multi_tile_behavior; + pTileTopo->capability.single_tile_behavior = pTiledDisplayBlock->capability.single_tile_behavior; + + pTileTopo->topology.row = ((pTiledDisplayBlock->topo_loc_high.row << 5) | + (pTiledDisplayBlock->topo_low.row)) + 1; + pTileTopo->topology.col = ((pTiledDisplayBlock->topo_loc_high.col << 5) | + (pTiledDisplayBlock->topo_low.col)) + 1; + pTileTopo->location.x = ((pTiledDisplayBlock->topo_loc_high.x << 5) | + (pTiledDisplayBlock->loc_low.x)); + pTileTopo->location.y = ((pTiledDisplayBlock->topo_loc_high.y << 5) | + (pTiledDisplayBlock->loc_low.y)); + + pTileTopo->native_resolution.width = ((pTiledDisplayBlock->native_resolution.width_high << 8) | + pTiledDisplayBlock->native_resolution.width_low) + 1; + pTileTopo->native_resolution.height = ((pTiledDisplayBlock->native_resolution.height_high << 8) | + pTiledDisplayBlock->native_resolution.height_low) + 1; + + pTileTopo->bezel_info.top = (pTiledDisplayBlock->bezel_info.top * + pTiledDisplayBlock->bezel_info.pixel_density) / 10; + pTileTopo->bezel_info.bottom = (pTiledDisplayBlock->bezel_info.bottom * + pTiledDisplayBlock->bezel_info.pixel_density) / 10; + pTileTopo->bezel_info.right = (pTiledDisplayBlock->bezel_info.right * + pTiledDisplayBlock->bezel_info.pixel_density) / 10; + pTileTopo->bezel_info.left = (pTiledDisplayBlock->bezel_info.left * + pTiledDisplayBlock->bezel_info.pixel_density) / 10; + + pTileTopo->tile_topology_id.vendor_id = pTiledDisplayBlock->topo_id.vendor_id[0] << 16 | + pTiledDisplayBlock->topo_id.vendor_id[1] << 8 | + pTiledDisplayBlock->topo_id.vendor_id[2]; + pTileTopo->tile_topology_id.product_id = pTiledDisplayBlock->topo_id.product_id[1] << 8 | + pTiledDisplayBlock->topo_id.product_id[0]; + pTileTopo->tile_topology_id.serial_number = pTiledDisplayBlock->topo_id.serial_number[3] << 24 | + pTiledDisplayBlock->topo_id.serial_number[2] << 16 | + pTiledDisplayBlock->topo_id.serial_number[1] << 8 | + pTiledDisplayBlock->topo_id.serial_number[0]; + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20ContainerId( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_CONTAINERID_BLOCK *pContainerIdBlock = NULL; + NVT_DISPLAYID_CONTAINERID *pContainerId = NULL; + + if (pDataBlock->data_bytes != DISPLAYID_2_0_CONTAINERID_BLOCK_PAYLOAD_LENGTH) + { + return NVT_STATUS_ERR; + } + + if (pDisplayIdInfo == NULL) return status; + + pContainerIdBlock = (const DISPLAYID_2_0_CONTAINERID_BLOCK *)pDataBlock; + pContainerId = &pDisplayIdInfo->container_id; + + pContainerId->revision = pDataBlock->revision; + pContainerId->data1 = pContainerIdBlock->container_id[0] << 24 | + pContainerIdBlock->container_id[1] << 16 | + pContainerIdBlock->container_id[2] << 8 | + pContainerIdBlock->container_id[3]; + pContainerId->data2 = pContainerIdBlock->container_id[4] << 8 | + pContainerIdBlock->container_id[5]; + pContainerId->data3 = pContainerIdBlock->container_id[6] << 8 | + pContainerIdBlock->container_id[7]; + pContainerId->data4 = pContainerIdBlock->container_id[8] << 8 | + pContainerIdBlock->container_id[9]; + pContainerId->data5[0] = pContainerIdBlock->container_id[10]; + pContainerId->data5[1] = pContainerIdBlock->container_id[11]; + pContainerId->data5[2] = pContainerIdBlock->container_id[12]; + pContainerId->data5[3] = pContainerIdBlock->container_id[13]; + pContainerId->data5[4] = pContainerIdBlock->container_id[14]; + pContainerId->data5[5] = pContainerIdBlock->container_id[15]; + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20AdaptiveSync( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_ADAPTIVE_SYNC_BLOCK *pAdaptiveSyncBlock = NULL; + NvU32 descriptorCnt = 0; + NvU8 i = 0; + NvU8 minRR = 0; + NvU16 maxRR = 0; + + pAdaptiveSyncBlock = (const DISPLAYID_2_0_ADAPTIVE_SYNC_BLOCK *)pDataBlock; + + if (pAdaptiveSyncBlock->header.payload_bytes_adaptive_sync_len == 0) + { + // Sanity check + if (pDataBlock->data_bytes % sizeof(DISPLAYID_2_0_ADAPTIVE_SYNC_DESCRIPTOR) != 0) + { + status = NVT_STATUS_ERR; + } + + descriptorCnt = pDataBlock->data_bytes / sizeof(DISPLAYID_2_0_ADAPTIVE_SYNC_DESCRIPTOR); + + if (descriptorCnt < 1) return NVT_STATUS_ERR; + + if (pDisplayIdInfo == NULL) + { + for (i = 0; i < descriptorCnt; i++) + { + minRR = pAdaptiveSyncBlock->descriptors[i].min_refresh_rate; + maxRR = (pAdaptiveSyncBlock->descriptors[i].max_refresh_rate.max_rr_9_8 << 8 | + pAdaptiveSyncBlock->descriptors[i].max_refresh_rate.max_rr_7_0) + 1; + if (minRR > (maxRR + 1) || minRR == 0 || maxRR == 0) + { + status = NVT_STATUS_ERR; + } + } + return status; + } + + pDisplayIdInfo->total_adaptive_sync_descriptor = descriptorCnt; + + for (i = 0; i < descriptorCnt; i++) + { + // Byte 0 Adaptive-Sync Operation and Range Information + pDisplayIdInfo->adaptive_sync_descriptor[i].u.information.adaptive_sync_range = pAdaptiveSyncBlock->descriptors[i].operation_range_info.range; + pDisplayIdInfo->adaptive_sync_descriptor[i].u.information.duration_inc_flicker_perf = pAdaptiveSyncBlock->descriptors[i].operation_range_info.successive_frame_inc_tolerance; + pDisplayIdInfo->adaptive_sync_descriptor[i].u.information.modes = pAdaptiveSyncBlock->descriptors[i].operation_range_info.modes; + pDisplayIdInfo->adaptive_sync_descriptor[i].u.information.seamless_not_support = pAdaptiveSyncBlock->descriptors[i].operation_range_info.seamless_transition_not_support; + pDisplayIdInfo->adaptive_sync_descriptor[i].u.information.duration_dec_flicker_perf = pAdaptiveSyncBlock->descriptors[i].operation_range_info.successive_frame_dec_tolerance; + + // 6.2 format (six integer bits and two fractional bits) a value range of 0.00 to 63.75ms + pDisplayIdInfo->adaptive_sync_descriptor[i].max_duration_inc = pAdaptiveSyncBlock->descriptors[i].max_single_frame_inc; + pDisplayIdInfo->adaptive_sync_descriptor[i].min_rr = pAdaptiveSyncBlock->descriptors[i].min_refresh_rate; + pDisplayIdInfo->adaptive_sync_descriptor[i].max_rr = (pAdaptiveSyncBlock->descriptors[i].max_refresh_rate.max_rr_9_8 << 8 | + pAdaptiveSyncBlock->descriptors[i].max_refresh_rate.max_rr_7_0) + 1; + // 6.2 format (six integer bits and two fractional bits) a value range of 0.00 to 63.75ms + pDisplayIdInfo->adaptive_sync_descriptor[i].max_duration_dec = pAdaptiveSyncBlock->descriptors[i].max_single_frame_dec; + } + } + else // all other values are RESERVED. + { + if (pDisplayIdInfo == NULL) return NVT_STATUS_ERR; + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20ARVRHMD( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + + if (pDisplayIdInfo == NULL) return status; + + // TODO: Implement the parsing here. + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20ARVRLayer( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + + if (pDisplayIdInfo == NULL) return status; + + // TODO: Implement the parsing here. + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20BrightnessLuminanceRange( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_BRIGHTNESS_LUMINANCE_RANGE_BLOCK *pBrightnessLuminanceRangeBlock = NULL; + NVT_DISPLAYID_BRIGHTNESS_LUMINANCE_RANGE *pluminanceRanges = NULL; + + if ((pDataBlock == NULL) || pDataBlock->data_bytes != DISPLAYID_2_0_BRIGHTNESS_LUMINANCE_RANGE_BLOCK_PAYLOAD_LENGTH) + { + return NVT_STATUS_ERR; + } + + if (pDisplayIdInfo == NULL) return status; + + pBrightnessLuminanceRangeBlock = (const DISPLAYID_2_0_BRIGHTNESS_LUMINANCE_RANGE_BLOCK *)pDataBlock; + pluminanceRanges = &pDisplayIdInfo->luminance_ranges; + + pluminanceRanges->revision = pDataBlock->revision; + pluminanceRanges->min_sdr_luminance = pBrightnessLuminanceRangeBlock->min_sdr_luminance; + pluminanceRanges->max_sdr_luminance = pBrightnessLuminanceRangeBlock->max_sdr_luminance; + pluminanceRanges->max_boost_sdr_luminance = pBrightnessLuminanceRangeBlock->max_boost_sdr_luminance; + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20VendorSpecific( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_VENDOR_SPECIFIC_BLOCK *block = NULL; + NVT_DISPLAYID_VENDOR_SPECIFIC *pVendorSpecific = NULL; + NvU32 ieee_oui = 0; + + // Add more validation here if needed + + if (pDisplayIdInfo == NULL) return status; + + block = (const DISPLAYID_2_0_VENDOR_SPECIFIC_BLOCK*)pDataBlock; + pVendorSpecific = &pDisplayIdInfo->vendor_specific; + + ieee_oui = (NvU32)((block->vendor_id[0] << 16) | + (block->vendor_id[1] << 8) | + (block->vendor_id[2])); + + switch (ieee_oui) + { + case NVT_VESA_VENDOR_SPECIFIC_IEEE_ID: + // TODO: below parser shall be updated if DID21 changed in the future + if (pDataBlock->data_bytes == NVT_VESA_VENDOR_SPECIFIC_LENGTH) + { + pVendorSpecific->vesaVsdb.data_struct_type.type = + block->vendor_specific_data[0] & NVT_VESA_ORG_VSDB_DATA_TYPE_MASK; + pVendorSpecific->vesaVsdb.data_struct_type.color_space_and_eotf = + (block->vendor_specific_data[0] & NVT_VESA_ORG_VSDB_COLOR_SPACE_AND_EOTF_MASK) >> NVT_VESA_ORG_VSDB_COLOR_SPACE_AND_EOTF_SHIFT; + pVendorSpecific->vesaVsdb.overlapping.pixels_overlapping_count = + block->vendor_specific_data[1] & NVT_VESA_ORG_VSDB_PIXELS_OVERLAPPING_MASK; + pVendorSpecific->vesaVsdb.overlapping.multi_sst = + (block->vendor_specific_data[1] & NVT_VESA_ORG_VSDB_MULTI_SST_MODE_MASK) >> NVT_VESA_ORG_VSDB_MULTI_SST_MODE_SHIFT; + pVendorSpecific->vesaVsdb.pass_through_integer.pass_through_integer_dsc = + block->vendor_specific_data[2] & NVT_VESA_ORG_VSDB_PASS_THROUGH_INTEGER_MASK; + pVendorSpecific->vesaVsdb.pass_through_fractional.pass_through_fraction_dsc = + block->vendor_specific_data[3] & NVT_VESA_ORG_VSDB_PASS_THROUGH_FRACTIOINAL_MASK; + } + else + { + status = NVT_STATUS_ERR; + } + break; + + default: + break; + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20CtaData( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + + NVT_EDID_CEA861_INFO *p861Info = NULL; + const DISPLAYID_2_0_CTA_BLOCK * ctaBlock = NULL; + NvU8 *pcta_data = NULL; + + ctaBlock = (const DISPLAYID_2_0_CTA_BLOCK *)pDataBlock; + + // WAR here to add a (size_t) cast for casting member from const to non-const in order to avoid Linux old compiler failed in DVS. + pcta_data = (NvU8 *)(size_t)ctaBlock->cta_data; + + if (pDisplayIdInfo == NULL) + { + status = parseCta861DataBlockInfo(pcta_data, pDataBlock->data_bytes, NULL); + return status; + } + else + { + status = parseCta861DataBlockInfo(pcta_data, pDataBlock->data_bytes, &pDisplayIdInfo->cta.cta861_info); + } + + if (status != NVT_STATUS_SUCCESS) + { + return status; + } + + p861Info = &(pDisplayIdInfo->cta.cta861_info); + + parseCta861VsdbBlocks(p861Info, pDisplayIdInfo, FROM_DISPLAYID_20_DATA_BLOCK); + parseCta861VsvdbBlocks(p861Info, pDisplayIdInfo, FROM_DISPLAYID_20_DATA_BLOCK); + + parseCta861HfScdb(p861Info, pDisplayIdInfo, FROM_DISPLAYID_20_DATA_BLOCK); + // This CTA 861 function to parse 861 part + parse861bShortTiming(p861Info, pDisplayIdInfo, FROM_DISPLAYID_20_DATA_BLOCK); + // yuv420-only video + parse861bShortYuv420Timing(p861Info, pDisplayIdInfo, FROM_DISPLAYID_20_DATA_BLOCK); + + if (pDisplayIdInfo->cta.cta861_info.valid.hdr_static_metadata != 0) + { + parseCta861HdrStaticMetadataDataBlock(p861Info, pDisplayIdInfo, FROM_DISPLAYID_20_DATA_BLOCK); + } + + // CEA861-F at 7.5.12 section about VFPDB block. + if (p861Info->total_svr > 0) + { + parseCta861NativeOrPreferredTiming(p861Info, pDisplayIdInfo, FROM_DISPLAYID_20_DATA_BLOCK); + } + + return status; +} + +// Helper function +CODE_SEGMENT(PAGE_DD_CODE) +static NvU32 +greatestCommonDenominator( + NvU32 x, + NvU32 y) +{ + NvU32 g = 0; + + while (x > 0) + { + g = x; + x = y % x; + y = g; + } + return g; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +getPrimaryUseCase( + NvU8 product_type, + NVT_DISPLAYID_PRODUCT_PRIMARY_USE_CASE *primary_use_case) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + + switch (product_type) + { + case DISPLAYID_2_0_PROD_TEST: + *primary_use_case = PRODUCT_PRIMARY_USE_TEST_EQUIPMENT; + break; + case DISPLAYID_2_0_PROD_GENERIC_DISPLAY: + *primary_use_case = PRODUCT_PRIMARY_USE_GENERIC_DISPLAY; + break; + case DISPLAYID_2_0_PROD_TELEVISION: + *primary_use_case = PRODUCT_PRIMARY_USE_TELEVISION; + break; + case DISPLAYID_2_0_PROD_DESKTOP_PRODUCTIVITY_DISPLAY: + *primary_use_case = PRODUCT_PRIMARY_USE_DESKTOP_PRODUCTIVITY; + break; + case DISPLAYID_2_0_PROD_DESKTOP_GAMING_DISPLAY: + *primary_use_case = PRODUCT_PRIMARY_USE_DESKTOP_GAMING; + break; + case DISPLAYID_2_0_PROD_PRESENTATION_DISPLAY: + *primary_use_case = PRODUCT_PRIMARY_USE_PRESENTATION; + break; + case DISPLAYID_2_0_PROD_HMD_VR: + *primary_use_case = PRODUCT_PRIMARY_USE_HEAD_MOUNT_VIRTUAL_REALITY; + break; + case DISPLAYID_2_0_PROD_HMD_AR: + *primary_use_case = PRODUCT_PRIMARY_USE_HEAD_MOUNT_AUGMENTED_REALITY; + break; + case DISPLAYID_2_0_PROD_EXTENSION: + status = NVT_STATUS_ERR; + break; + default: + status = NVT_STATUS_ERR; + } + + return status; +} + +// used in DID20 and DID20ext +CODE_SEGMENT(PAGE_DD_CODE) +NvU8 +computeDisplayId20SectionCheckSum( + const NvU8 *pSectionBytes, + NvU32 length) +{ + + NvU32 i = 0; + NvU32 checkSum = 0; + + // Each DisplayID section composed of five mandatory bytes: + // DisplayID Structure Version and Revision + // Section Size + // Product Primary Use Case + // Extension Count + // Checksum + for (i = 0, checkSum = 0; i < length; i++) + { + checkSum += pSectionBytes[i]; + } + + return (checkSum & 0xFF); +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvBool +assignNextAvailableDisplayId20Timing( + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo, + const NVT_TIMING *pTiming) +{ + if (pDisplayIdInfo == NULL) return NV_TRUE; + + if (pDisplayIdInfo->total_timings >= COUNT(pDisplayIdInfo->timing)) + { + return NV_FALSE; + } + + pDisplayIdInfo->timing[pDisplayIdInfo->total_timings] = *pTiming; + pDisplayIdInfo->total_timings++; + + return NV_TRUE; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20Timing7Descriptor( + const void *pVoidDescriptor, + NVT_TIMING *pTiming, + NvU8 count) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + NvU32 gdc = 0; + + const DISPLAYID_2_0_TIMING_7_DESCRIPTOR *pDescriptor = NULL; + + pDescriptor = (const DISPLAYID_2_0_TIMING_7_DESCRIPTOR *)pVoidDescriptor; + + // pclk is in 1Khz + pTiming->pclk1khz = ((pDescriptor->pixel_clock[2] << 16 | + pDescriptor->pixel_clock[1] << 8 | + pDescriptor->pixel_clock[0]) + 1); + + pTiming->HBorder = 0; + pTiming->VBorder = 0; + + pTiming->HVisible = ((pDescriptor->horizontal.active_image_pixels[1] << 8) | + (pDescriptor->horizontal.active_image_pixels[0])) + 1; + pTiming->VVisible = ((pDescriptor->vertical.active_image_lines[1] << 8) | + (pDescriptor->vertical.active_image_lines[0])) + 1; + + pTiming->HTotal = (((pDescriptor->horizontal.blank_pixels[1] << 8) | + (pDescriptor->horizontal.blank_pixels[0])) + 1) + + pTiming->HVisible; + pTiming->VTotal = (((pDescriptor->vertical.blank_lines[1] << 8) | + (pDescriptor->vertical.blank_lines[0])) + 1) + + pTiming->VVisible; + + pTiming->HFrontPorch = ((pDescriptor->horizontal.front_porch_pixels_high << 8) | + (pDescriptor->horizontal.front_porch_pixels_low)) + 1; + pTiming->VFrontPorch = ((pDescriptor->vertical.front_porch_lines_high << 8) | + (pDescriptor->vertical.front_porch_lines_low)) + 1; + + pTiming->HSyncWidth = ((pDescriptor->horizontal.sync_width_pixels[1] << 8) | + (pDescriptor->horizontal.sync_width_pixels[0])) + 1; + pTiming->VSyncWidth = ((pDescriptor->vertical.sync_width_lines[1] << 8) | + (pDescriptor->vertical.sync_width_lines[0])) + 1; + + pTiming->HSyncPol = pDescriptor->horizontal.sync_polarity ? NVT_H_SYNC_POSITIVE : + NVT_H_SYNC_NEGATIVE; + pTiming->VSyncPol = pDescriptor->vertical.sync_polarity ? NVT_V_SYNC_POSITIVE : + NVT_V_SYNC_NEGATIVE; + + // EDID used in DP1.4 Compliance test had incorrect HBlank listed, leading to wrong raster sizes being set by driver (bug 2714607) + // Filter incorrect timings here. HTotal must cover sufficient blanking time + if (pTiming->HTotal < (pTiming->HVisible + pTiming->HFrontPorch + pTiming->HSyncWidth)) + { + return NVT_STATUS_ERR; + } + + pTiming->interlaced = pDescriptor->options.interface_frame_scanning_type; + + switch (pDescriptor->options.aspect_ratio) + { + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_1_1: + pTiming->etc.aspect = (1 << 16) | 1; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_5_4: + pTiming->etc.aspect = (5 << 16) | 4; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_4_3: + pTiming->etc.aspect = (4 << 16) | 3; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_15_9: + pTiming->etc.aspect = (15 << 16) | 9; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_16_9: + pTiming->etc.aspect = (16 << 16) | 9; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_16_10: + pTiming->etc.aspect = (16 << 16) | 10; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_64_27: + pTiming->etc.aspect = (64 << 16) | 27; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_256_135: + pTiming->etc.aspect = (256 << 16) | 135; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_CALCULATE: + gdc = greatestCommonDenominator(pTiming->HVisible, pTiming->VVisible); + if (gdc != 0) + { + pTiming->etc.aspect = ((pTiming->HVisible / gdc) << 16) | + (pTiming->VVisible / gdc); + } + else + { + pTiming->etc.aspect = 0; + } + break; + default: + pTiming->etc.aspect = 0; + } + + pTiming->etc.rr = NvTiming_CalcRR(pTiming->pclk1khz, + pTiming->interlaced, + pTiming->HTotal, + pTiming->VTotal); + pTiming->etc.rrx1k = NvTiming_CalcRRx1k(pTiming->pclk1khz, + pTiming->interlaced, + pTiming->HTotal, + pTiming->VTotal); + + // pclk change to 10kHz + pTiming->pclk = (pTiming->pclk1khz + 5) / 10; + + pTiming->etc.status = NVT_STATUS_DISPLAYID_7N(++count); + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20Timing8Descriptor( + const void *pVoidDescriptor, + NVT_TIMING *pTiming, + NvU8 codeType, + NvU8 codeSize, + NvU8 idx, + NvU8 count) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + + const NvU8 *pTimingCode = (const NvU8 *)pVoidDescriptor; + const NvU16 *pTiming2ByteCode = (const NvU16 *)pVoidDescriptor; + + if (codeSize == DISPLAYID_2_0_TIMING_CODE_SIZE_1_BYTE) + { + switch (codeType) + { + case DISPLAYID_2_0_TIMING_CODE_DMT: //single-byte DMT ID Codes + status = NvTiming_EnumDMT((NvU32)(pTimingCode[idx]), pTiming); + break; + case DISPLAYID_2_0_TIMING_CODE_CTA_VIC: + status = NvTiming_EnumCEA861bTiming((NvU32)(pTimingCode[idx]), pTiming); + break; + case DISPLAYID_2_0_TIMING_CODE_HDMI_VIC: + status = NvTiming_EnumHdmiVsdbExtendedTiming((NvU32)(pTimingCode[idx]), pTiming); + break; + default: + { + nvt_assert(0 && "RESERVED timing code type"); + status = NVT_STATUS_ERR; + } + break; + } + } + else if (codeSize == DISPLAYID_2_0_TIMING_CODE_SIZE_2_BYTE) + { + // Standard two-byte codes + if (codeType == DISPLAYID_2_0_TIMING_CODE_DMT) + { + status = NvTiming_EnumStdTwoBytesCode((NvU16)pTiming2ByteCode[idx], pTiming); + } + } + + if (status == NVT_STATUS_SUCCESS) + { + pTiming->etc.status = NVT_STATUS_DISPLAYID_8N(++count); + return status; + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20Timing9Descriptor( + const void *pVoidDescriptor, + NVT_TIMING *pTiming, + NvU8 count) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_TIMING_9_DESCRIPTOR* pDescriptor = NULL; + NvU32 width = 0; + NvU32 height = 0; + NvU32 rr = 0; + + pDescriptor = (const DISPLAYID_2_0_TIMING_9_DESCRIPTOR *)pVoidDescriptor; + + width = (pDescriptor->horizontal_active_pixels[1] << 8 | pDescriptor->horizontal_active_pixels[0]) + 1; + height = (pDescriptor->vertical_active_lines[1] << 8 | pDescriptor->vertical_active_lines[0]) + 1; + rr = pDescriptor->refresh_rate + 1; + + switch (pDescriptor->options.timing_formula) + { + case DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_STANDARD_CRT_BASED: + status = NvTiming_CalcCVT(width, height, rr, NVT_PROGRESSIVE, pTiming); + break; + case DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_REDUCED_BLANKING_1: + status = NvTiming_CalcCVT_RB(width, height, rr, NVT_PROGRESSIVE, pTiming); + break; + case DISPLAYID_2_0_TIMING_FORMULA_CVT_2_1_REDUCED_BLANKING_2: + status = NvTiming_CalcCVT_RB2(width, height, rr, pDescriptor->options.rr_1000div1001_support, NV_FALSE, pTiming); + break; + default: + { + nvt_assert(0 && "Unknown timing formula"); + status = NVT_STATUS_ERR; + } + break; + } + + if (status == NVT_STATUS_SUCCESS) + { + NVMISC_MEMSET(pTiming->etc.name, 0, sizeof(pTiming->etc.name)); + pTiming->etc.status = NVT_STATUS_DISPLAYID_9N(++count); + + if (pDescriptor->options.timing_formula == DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_STANDARD_CRT_BASED) + { + NVT_SNPRINTF((char *)pTiming->etc.name, sizeof(pTiming->etc.name), "DID20-Type9:#%3d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status), + (int)pTiming->HVisible, + (int)pTiming->VVisible, + (int)pTiming->etc.rrx1k/1000, + (int)pTiming->etc.rrx1k%1000, + (pTiming->interlaced ? "I":"P")); + } + else + { + NVT_SNPRINTF((char *)pTiming->etc.name, sizeof(pTiming->etc.name), "DID20-Type9RB%d:#%3d:%dx%dx%3d.%03dHz/%s", + pDescriptor->options.timing_formula, + (int)NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status), + (int)pTiming->HVisible, + (int)pTiming->VVisible, + (int)pTiming->etc.rrx1k/1000, + (int)pTiming->etc.rrx1k%1000, + (pTiming->interlaced ? "I":"P")); + } + } + pTiming->etc.name[sizeof(pTiming->etc.name) - 1] = '\0'; + pTiming->etc.rep = 0x1; + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20Timing10Descriptor( + const void *pDescriptor, + NVT_TIMING *pTiming, + NvU8 payloadBytes, + NvU8 count) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR* p6bytesDescriptor = NULL; + const DISPLAYID_2_0_TIMING_10_7BYTES_DESCRIPTOR* p7bytesDescriptor = NULL; + const DISPLAYID_2_1_TIMING_10_8BYTES_DESCRIPTOR* p8bytesDescriptor = NULL; + NvU32 width = 0; + NvU32 height = 0; + NvU32 rr = 0; + + p6bytesDescriptor = (const DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR *)pDescriptor; + + width = (p6bytesDescriptor->horizontal_active_pixels[1] << 8 | p6bytesDescriptor->horizontal_active_pixels[0]) + 1; + height = (p6bytesDescriptor->vertical_active_lines[1] << 8 | p6bytesDescriptor->vertical_active_lines[0]) + 1; + rr = p6bytesDescriptor->refresh_rate + 1; + + if (payloadBytes == DISPLAYID_2_0_TIMING_10_PAYLOAD_BYTES_7) + { + p7bytesDescriptor = (const DISPLAYID_2_0_TIMING_10_7BYTES_DESCRIPTOR *)pDescriptor; + rr = (p7bytesDescriptor->descriptor_6_bytes.refresh_rate | p7bytesDescriptor->refresh_rate_high << 8) + 1; + } + else if (payloadBytes == DISPLAYID_2_1_TIMING_10_PAYLOAD_BYTES_8) + { + p8bytesDescriptor = (const DISPLAYID_2_1_TIMING_10_8BYTES_DESCRIPTOR *)pDescriptor; + rr = (p8bytesDescriptor->descriptor_7_bytes.descriptor_6_bytes.refresh_rate | + p8bytesDescriptor->descriptor_7_bytes.refresh_rate_high << 8) + 1; + } + + switch (p6bytesDescriptor->options.timing_formula) + { + case DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_STANDARD_CRT_BASED: + status = NvTiming_CalcCVT(width, height, rr, NVT_PROGRESSIVE, pTiming); + break; + case DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_REDUCED_BLANKING_1: + status = NvTiming_CalcCVT_RB(width, height, rr, NVT_PROGRESSIVE, pTiming); + break; + case DISPLAYID_2_0_TIMING_FORMULA_CVT_2_1_REDUCED_BLANKING_2: + if (p8bytesDescriptor != NULL) + { + status = NvTiming_CalcCVT_RB2(width, height, rr, p6bytesDescriptor->options.rr1000div1001_or_hblank, p8bytesDescriptor->additional_mini_vblank, pTiming); + } + else + { + status = NvTiming_CalcCVT_RB2(width, height, rr, p6bytesDescriptor->options.rr1000div1001_or_hblank, NV_FALSE, pTiming); + } + + break; + case DISPLAYID_2_0_TIMING_FORMULA_CVT_2_1_REDUCED_BLANKING_3: + { + NvU32 deltaHBlank = 0; + NvU32 multiplier = DISPLAYID_2_0_TIMING_10_NOMINAL_MINIMUM_VBLANK; + + if (p7bytesDescriptor != NULL) + { + if (p6bytesDescriptor->options.rr1000div1001_or_hblank == 0) // Horizontal Blank in Pixels = [Field Value] * 8 + 80 + { + deltaHBlank = p7bytesDescriptor->delta_hblank * 8; + } + else if (p6bytesDescriptor->options.rr1000div1001_or_hblank == 1) + { + if (p7bytesDescriptor->delta_hblank <= 5) + deltaHBlank = (p7bytesDescriptor->delta_hblank * 8 + 160) - 80; + else // if 5 < Field Value <=7 + deltaHBlank = (160 - ((p7bytesDescriptor->delta_hblank - 5) * 8)) - 80; + } + + status = NvTiming_CalcCVT_RB3(width, + height, + rr, + deltaHBlank, + p7bytesDescriptor->additional_vblank_timing * multiplier, + 0, + p6bytesDescriptor->options.early_vsync, + pTiming); + + } + else if (p8bytesDescriptor != NULL) + { + if (p6bytesDescriptor->options.rr1000div1001_or_hblank == 0) // Horizontal Blank in Pixels = [Field Value] * 8 + 80 + { + deltaHBlank = p8bytesDescriptor->descriptor_7_bytes.delta_hblank * 8; + } + else if (p6bytesDescriptor->options.rr1000div1001_or_hblank == 1) + { + if (p8bytesDescriptor->descriptor_7_bytes.delta_hblank <= 5) + deltaHBlank = (p8bytesDescriptor->descriptor_7_bytes.delta_hblank * 8 + 160) - 80; + else // if 5 < Field Value <=7 + deltaHBlank = (160 - ((p8bytesDescriptor->descriptor_7_bytes.delta_hblank - 5) * 8)) - 80; + } + + if (p8bytesDescriptor->additional_mini_vblank == 1) + multiplier = DISPLAYID_2_1_TIMING_10_ALTERNATE_MINIMUM_VBLANK; + + status = NvTiming_CalcCVT_RB3(width, + height, + rr, + deltaHBlank, + p8bytesDescriptor->descriptor_7_bytes.additional_vblank_timing * multiplier, + p8bytesDescriptor->additional_mini_vblank, + p6bytesDescriptor->options.early_vsync, + pTiming); + } + else // 6 bytes descriptor + { + // just assign the HBlank 80pixel but actually HBlank is 160 in DisplayId2.1a spec + if (p6bytesDescriptor->options.rr1000div1001_or_hblank == 1) + deltaHBlank = 80; + + status = NvTiming_CalcCVT_RB3(width, height, rr, deltaHBlank, 0, 0, p6bytesDescriptor->options.early_vsync, pTiming); + } + } + break; + default: + { + nvt_assert(0 && "Unknown timing formula"); + status = NVT_STATUS_ERR; + } + break; + } + + if (status == NVT_STATUS_SUCCESS) + { + pTiming->etc.status = NVT_STATUS_DISPLAYID_10N(++count); + return status; + } + + return status; +} + +// get the existed stored timing sequence number +CODE_SEGMENT(PAGE_DD_CODE) +static NvU8 +getExistedTimingSeqNumber( + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo, + enum NVT_TIMING_TYPE timingType) +{ + NvU8 count = 0; + NvU8 i = 0; + + switch (timingType) + { + case NVT_TYPE_DISPLAYID_7: + case NVT_TYPE_DISPLAYID_8: + case NVT_TYPE_DISPLAYID_9: + case NVT_TYPE_DISPLAYID_10: + break; + default: + return count; + } + + for (i = 0; i< pDisplayIdInfo->total_timings; i++) + { + if (NVT_GET_TIMING_STATUS_TYPE(pDisplayIdInfo->timing[i].etc.status) == timingType) + { + ++count; + } + } + + return count; +} + +// get the version +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 getDID2Version(NvU8 *pData, NvU32 *pVer) +{ + const DISPLAYID_2_0_SECTION *pSection = (const DISPLAYID_2_0_SECTION*)pData; + + *pVer = 0; + if (pSection->header.version == 0x2) + { + *pVer = (((NvU32)pSection->header.version) << 8) + ((NvU32)pSection->header.revision); + } + else + return NVT_STATUS_ERR; // un-recongnized DisplayID20 version + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +void +updateColorFormatForDisplayId20Timings( + NVT_DISPLAYID_2_0_INFO *pDisplayId20Info, + NvU32 timingIdx) +{ + // pDisplayId20Info parsed displayID20 info + NVT_TIMING *pT= &pDisplayId20Info->timing[timingIdx]; + + nvt_assert(timingIdx <= COUNT(pDisplayId20Info->timing)); + + // rgb444 (always support 6bpc and 8bpc as per DP spec 5.1.1.1.1 RGB Colorimetry) + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.rgb444, 1, + 1, + pDisplayId20Info->interface_features.rgb444.bpc.bpc10, + pDisplayId20Info->interface_features.rgb444.bpc.bpc12, + pDisplayId20Info->interface_features.rgb444.bpc.bpc14, + pDisplayId20Info->interface_features.rgb444.bpc.bpc16); + + // yuv444 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv444, 0, // yuv444 does not support 6bpc + pDisplayId20Info->interface_features.yuv444.bpc.bpc8, + pDisplayId20Info->interface_features.yuv444.bpc.bpc10, + pDisplayId20Info->interface_features.yuv444.bpc.bpc12, + pDisplayId20Info->interface_features.yuv444.bpc.bpc14, + pDisplayId20Info->interface_features.yuv444.bpc.bpc16); + // yuv422 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv422, 0, // yuv422 does not support 6bpc + pDisplayId20Info->interface_features.yuv422.bpc.bpc8, + pDisplayId20Info->interface_features.yuv422.bpc.bpc10, + pDisplayId20Info->interface_features.yuv422.bpc.bpc12, + pDisplayId20Info->interface_features.yuv422.bpc.bpc14, + pDisplayId20Info->interface_features.yuv422.bpc.bpc16); + + if (!NVT_DID20_TIMING_IS_CTA861(pT->etc.flag, pT->etc.status)) + { + // yuv420 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv420, 0, // yuv420 does not support 6bpc + pDisplayId20Info->interface_features.yuv420.bpc.bpc8, + pDisplayId20Info->interface_features.yuv420.bpc.bpc10, + pDisplayId20Info->interface_features.yuv420.bpc.bpc12, + pDisplayId20Info->interface_features.yuv420.bpc.bpc14, + pDisplayId20Info->interface_features.yuv420.bpc.bpc16); + } +} +POP_SEGMENTS + diff --git a/src/common/modeset/timing/nvt_dmt.c b/src/common/modeset/timing/nvt_dmt.c new file mode 100644 index 0000000..ccfbb69 --- /dev/null +++ b/src/common/modeset/timing/nvt_dmt.c @@ -0,0 +1,345 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_dmt.c +// +// Purpose: calculate DMT/DMT-RB timing +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "nvtiming_pvt.h" + +PUSH_SEGMENTS + +// DMT table 2-1 +// Macro to declare a TIMING initializer for given parameters without border +#define DMT_TIMING(hv,hfp,hsw,ht,hsp,vv,vfp,vsw,vt,vsp,rr,pclk,id) \ +{hv,0,hfp,hsw,ht,(hsp)=='-',vv,0,vfp,vsw,vt,(vsp)=='-',NVT_PROGRESSIVE,pclk,((pclk<<3)+(pclk<<1)),{0,rr,set_rrx1k(pclk,ht,vt),0,0x1,{0},{0},{0},{0},NVT_DEF_TIMING_STATUS(NVT_TYPE_DMT,id),"VESA DMT"}} + +#define DMTRB_TIMING(hv,hfp,hsw,ht,hsp,vv,vfp,vsw,vt,vsp,rr,pclk,id) \ +{hv,0,hfp,hsw,ht,(hsp)=='-',vv,0,vfp,vsw,vt,(vsp)=='-',NVT_PROGRESSIVE,pclk,((pclk<<3)+(pclk<<1)),{0,rr,set_rrx1k(pclk,ht,vt),0,0x1,{0},{0},{0},{0},NVT_DEF_TIMING_STATUS(NVT_TYPE_DMT_RB,id),"VESA DMT/RB"}} + +#define DMTRB_2_TIMING(hv,hfp,hsw,ht,hsp,vv,vfp,vsw,vt,vsp,rr,pclk,id) \ +{hv,0,hfp,hsw,ht,(hsp)=='-',vv,0,vfp,vsw,vt,(vsp)=='-',NVT_PROGRESSIVE,pclk,((pclk<<3)+(pclk<<1)),{0,rr,set_rrx1k(pclk,ht,vt),0,0x1,{0},{0},{0},{0},NVT_DEF_TIMING_STATUS(NVT_TYPE_DMT_RB_2,id),"VESA DMT/RB2"}} + +DATA_SEGMENT(PAGE_DATA) + +static NVT_TIMING DMT[] = +{ + // VESA standard entries (ordered according to VESA DMT ID). + // hv,hfp,hsw, ht,hsp, vv,vfp,vsw, vt,vsp, rr,pclk , id + DMT_TIMING ( 640, 32, 64, 832,'+', 350, 32, 3, 445,'-', 85, 3150, 0x01), + DMT_TIMING ( 640, 32, 64, 832,'-', 400, 1, 3, 445,'+', 85, 3150, 0x02), + DMT_TIMING ( 720, 36, 72, 936,'-', 400, 1, 3, 446,'+', 85, 3550, 0x03), + DMT_TIMING ( 640, 8, 96, 800,'-', 480, 2, 2, 525,'-', 60, 2518, 0x04), + // 640x480x72Hz (VESA) - this entry have borders + {640,8,16,40,832,NVT_H_SYNC_NEGATIVE,480,8,1,3,520,NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,3150,31500,{0,72,72000,0,1,{0},{0},{0},{0},NVT_DEF_TIMING_STATUS(NVT_TYPE_DMT,5),"VESA DMT"}}, + DMT_TIMING ( 640, 16, 64, 840,'-', 480, 1, 3, 500,'-', 75, 3150, 0x06), + DMT_TIMING ( 640, 56, 56, 832,'-', 480, 1, 3, 509,'-', 85, 3600, 0x07), + DMT_TIMING ( 800, 24, 72,1024,'+', 600, 1, 2, 625,'+', 56, 3600, 0x08), + DMT_TIMING ( 800, 40,128,1056,'+', 600, 1, 4, 628,'+', 60, 4000, 0x09), + DMT_TIMING ( 800, 56,120,1040,'+', 600, 37, 6, 666,'+', 72, 5000, 0x0A), + DMT_TIMING ( 800, 16, 80,1056,'+', 600, 1, 3, 625,'+', 75, 4950, 0x0B), + DMT_TIMING ( 800, 32, 64,1048,'+', 600, 1, 3, 631,'+', 85, 5625, 0x0C), + DMTRB_TIMING( 800, 48, 32, 960,'+', 600, 3, 4, 636,'-',120, 7325, 0x0D), + DMT_TIMING ( 848, 16,112,1088,'+', 480, 6, 8, 517,'+', 60, 3375, 0x0E), + DMT_TIMING (1024, 8,176,1264,'+', 768, 0, 4, 817,'+', 43, 4490, 0x0F), + DMT_TIMING (1024, 24,136,1344,'-', 768, 3, 6, 806,'-', 60, 6500, 0x10), + DMT_TIMING (1024, 24,136,1328,'-', 768, 3, 6, 806,'-', 70, 7500, 0x11), + DMT_TIMING (1024, 16, 96,1312,'+', 768, 1, 3, 800,'+', 75, 7875, 0x12), + DMT_TIMING (1024, 48, 96,1376,'+', 768, 1, 3, 808,'+', 85, 9450, 0x13), + DMTRB_TIMING(1024, 48, 32,1184,'+', 768, 3, 4, 813,'-',120,11550, 0x14), + DMT_TIMING (1152, 64,128,1600,'+', 864, 1, 3, 900,'+', 75,10800, 0x15), + DMTRB_TIMING(1280, 48, 32,1440,'+', 768, 3, 7, 790,'-', 60, 6825, 0x16), + DMT_TIMING (1280, 64,128,1664,'-', 768, 3, 7, 798,'+', 60, 7950, 0x17), + DMT_TIMING (1280, 80,128,1696,'-', 768, 3, 7, 805,'+', 75,10225, 0x18), + DMT_TIMING (1280, 80,136,1712,'-', 768, 3, 7, 809,'+', 85,11750, 0x19), + DMTRB_TIMING(1280, 48, 32,1440,'+', 768, 3, 7, 813,'-',120,14025, 0x1A), + DMTRB_TIMING(1280, 48, 32,1440,'+', 800, 3, 6, 823,'-', 60, 7100, 0x1B), + DMT_TIMING (1280, 72,128,1680,'-', 800, 3, 6, 831,'+', 60, 8350, 0x1C), + DMT_TIMING (1280, 80,128,1696,'-', 800, 3, 6, 838,'+', 75,10650, 0x1D), + DMT_TIMING (1280, 80,136,1712,'-', 800, 3, 6, 843,'+', 85,12250, 0x1E), + DMTRB_TIMING(1280, 48, 32,1440,'+', 800, 3, 6, 847,'-',120,14625, 0x1F), + DMT_TIMING (1280, 96,112,1800,'+', 960, 1, 3,1000,'+', 60,10800, 0x20), + DMT_TIMING (1280, 64,160,1728,'+', 960, 1, 3,1011,'+', 85,14850, 0x21), + DMTRB_TIMING(1280, 48, 32,1440,'+', 960, 3, 4,1017,'-',120,17550, 0x22), + DMT_TIMING (1280, 48,112,1688,'+',1024, 1, 3,1066,'+', 60,10800, 0x23), + DMT_TIMING (1280, 16,144,1688,'+',1024, 1, 3,1066,'+', 75,13500, 0x24), + DMT_TIMING (1280, 64,160,1728,'+',1024, 1, 3,1072,'+', 85,15750, 0x25), + DMTRB_TIMING(1280, 48, 32,1440,'+',1024, 3, 7,1084,'-',120,18725, 0x26), + DMT_TIMING (1360, 64,112,1792,'+', 768, 3, 6, 795,'+', 60, 8550, 0x27), + DMTRB_TIMING(1360, 48, 32,1520,'+', 768, 3, 5, 813,'-',120,14825, 0x28), + DMTRB_TIMING(1400, 48, 32,1560,'+',1050, 3, 4,1080,'-', 60,10100, 0x29), + DMT_TIMING (1400, 88,144,1864,'-',1050, 3, 4,1089,'+', 60,12175, 0x2A), + DMT_TIMING (1400,104,144,1896,'-',1050, 3, 4,1099,'+', 75,15600, 0x2B), + DMT_TIMING (1400,104,152,1912,'-',1050, 3, 4,1105,'+', 85,17950, 0x2C), + DMTRB_TIMING(1400, 48, 32,1560,'+',1050, 3, 4,1050,'-',120,20800, 0x2D), + DMTRB_TIMING(1440, 48, 32,1600,'+', 900, 3, 6, 926,'-', 60, 8875, 0x2E), + DMT_TIMING (1440, 80,152,1904,'-', 900, 3, 6, 934,'+', 60,10650, 0x2F), + DMT_TIMING (1440, 96,152,1936,'-', 900, 3, 6, 942,'+', 75,13675, 0x30), + DMT_TIMING (1440,104,152,1952,'-', 900, 3, 6, 948,'+', 85,15700, 0x31), + DMTRB_TIMING(1440, 48, 32,1600,'+', 900, 3, 6, 953,'-',120,18275, 0x32), + DMT_TIMING (1600, 64,192,2160,'+',1200, 1, 3,1250,'+', 60,16200, 0x33), + DMT_TIMING (1600, 64,192,2160,'+',1200, 1, 3,1250,'+', 65,17550, 0x34), + DMT_TIMING (1600, 64,192,2160,'+',1200, 1, 3,1250,'+', 70,18900, 0x35), + DMT_TIMING (1600, 64,192,2160,'+',1200, 1, 3,1250,'+', 75,20250, 0x36), + DMT_TIMING (1600, 64,192,2160,'+',1200, 1, 3,1250,'+', 85,22950, 0x37), + DMTRB_TIMING(1600, 48, 32,1760,'+',1200, 3, 4,1271,'-',120,26825, 0x38), + DMTRB_TIMING(1680, 48, 32,1840,'+',1050, 3, 6,1080,'-', 60,11900, 0x39), + DMT_TIMING (1680,104,176,2240,'-',1050, 3, 6,1089,'+', 60,14625, 0x3A), + DMT_TIMING (1680,120,176,2272,'-',1050, 3, 6,1099,'+', 75,18700, 0x3B), + DMT_TIMING (1680,128,176,2288,'-',1050, 3, 6,1105,'+', 85,21475, 0x3C), + DMTRB_TIMING(1680, 48, 32,1840,'+',1050, 3, 6,1112,'-',120,24550, 0x3D), + DMT_TIMING (1792,128,200,2448,'-',1344, 1, 3,1394,'+', 60,20475, 0x3E), + DMT_TIMING (1792, 96,216,2456,'-',1344, 1, 3,1417,'+', 75,26100, 0x3F), + DMTRB_TIMING(1792, 48, 32,1952,'+',1344, 3, 4,1423,'-',120,33325, 0x40), + DMT_TIMING (1856, 96,224,2528,'-',1392, 1, 3,1439,'+', 60,21825, 0x41), + DMT_TIMING (1856,128,224,2560,'-',1392, 1, 3,1500,'+', 75,28800, 0x42), + DMTRB_TIMING(1856, 48, 32,2016,'+',1392, 3, 4,1474,'-',120,35650, 0x43), + DMTRB_TIMING(1920, 48, 32,2080,'+',1200, 3, 6,1235,'-', 60,15400, 0x44), + DMT_TIMING (1920,136,200,2592,'-',1200, 3, 6,1245,'+', 60,19325, 0x45), + DMT_TIMING (1920,136,208,2608,'-',1200, 3, 6,1255,'+', 75,24525, 0x46), + DMT_TIMING (1920,144,208,2624,'-',1200, 3, 6,1262,'+', 85,28125, 0x47), + DMTRB_TIMING(1920, 48, 32,2080,'+',1200, 3, 6,1271,'-',120,31700, 0x48), + DMT_TIMING (1920,128,208,2600,'-',1440, 1, 3,1500,'+', 60,23400, 0x49), + DMT_TIMING (1920,144,224,2640,'-',1440, 1, 3,1500,'+', 75,29700, 0x4A), + DMTRB_TIMING(1920, 48, 32,2080,'+',1440, 3, 4,1525,'-',120,38050, 0x4B), + DMTRB_TIMING(2560, 48, 32,2720,'+',1600, 3, 6,1646,'-', 60,26850, 0x4C), + DMT_TIMING (2560,192,280,3504,'-',1600, 3, 6,1658,'+', 60,34850, 0x4D), + DMT_TIMING (2560,208,280,3536,'-',1600, 3, 6,1672,'+', 75,44325, 0x4E), + DMT_TIMING (2560,208,280,3536,'-',1600, 3, 6,1682,'+', 85,50525, 0x4F), + DMTRB_TIMING(2560, 48, 32,2720,'+',1600, 3, 6,1694,'-',120,55275, 0x50), + DMT_TIMING (1366, 70,143,1792,'+',768 , 3, 3, 798,'+', 60, 8550, 0x51),//1366 x 768 @60 (non-interlaced) DMT ID: 51h + DMT_TIMING (1920, 88, 44,2200,'+',1080, 4, 5,1125,'+', 60,14850, 0x52),//1920 x 1080 @60 (non-interlaced) DMT ID: 52h + DMTRB_TIMING(1600, 24, 80,1800,'+', 900, 1, 3,1000,'+', 60,10800, 0x53),//1600 x 900 @60 (non-interlaced) DMT ID: 53h + DMTRB_TIMING(2048, 26, 80,2250,'+',1152, 1, 3,1200,'+', 60,16200, 0x54),//2048 x 1152 @60 (non-interlaced) DMT ID: 54h + DMT_TIMING (1280,110, 40,1650,'+', 720, 5, 5, 750,'+', 60, 7425, 0x55),//1280 x 720 @60 (non-interlaced) DMT ID: 55h + DMTRB_TIMING(1366, 14, 56,1500,'+', 768, 1, 3, 800,'+', 60, 7200, 0x56),//1366 x 768 @60 (non-interlaced) DMT ID: 56h + + // Added timing definitions in DMT 1.3 Version 1.0, Rev. 13 + DMTRB_2_TIMING(4096, 8, 56,4176,'+', 2160, 48, 8, 2222,'-', 60,55674, 0x57),//4096 x 2160 @60 (non-interlaced) DMT ID: 57h + DMTRB_2_TIMING(4096, 8, 56,4176,'+', 2160, 48, 8, 2222,'-', 59,55619, 0x58),//4096 x 2160 @60 (non-interlaced) DMT ID: 58h + + // ******************************** + // Additional non-standard entries. + // ******************************** + + // Settings for 640x400 + // GTF timing for 640x400x60Hz has too low HFreq, this is a + // Specially constructed timing from 640x480, with extra blanking + // on top and bottom of the screen + + DMT_TIMING(640,16,96,800,'-',400,50,2,525,'-',60,2518,0), + DMT_TIMING(640,16,96,800,'+',400,12,2,449,'-',70,2518,0), + + // the end of table + NVT_TIMING_SENTINEL +}; +static NvU32 MAX_DMT_FORMAT = sizeof(DMT)/sizeof(DMT[0]) - 1; + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_EnumDMT(NvU32 dmtId, NVT_TIMING *pT) +{ + if ((pT == NULL) || (dmtId == 0)) + { + return NVT_STATUS_ERR; + } + + // The last entry is not used. + if (dmtId > MAX_DMT_FORMAT) + { + return NVT_STATUS_ERR; + } + + // Make sure that the DMT ID matches according to the array index. + if (NVT_GET_TIMING_STATUS_SEQ(DMT[dmtId - 1].etc.status) == dmtId) + { + *pT = DMT[dmtId - 1]; + + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk1khz, (NvU32)1000*(NvU32)1000, (NvU32)pT->HTotal*(NvU32)pT->VTotal); + NVT_SNPRINTF((char *)pT->etc.name, 40, "DMT:#%d:%dx%dx%dHz", + dmtId, pT->HVisible, pT->VVisible, pT->etc.rr); + ((char *)pT->etc.name)[39] = '\0'; + + return NVT_STATUS_SUCCESS; + } + + return NVT_STATUS_ERR; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_EnumStdTwoBytesCode(NvU16 std2ByteCode, NVT_TIMING *pT) +{ + NvU32 aspect, width, height, rr; + + if ((pT == NULL) || (std2ByteCode == 0)) + { + return NVT_STATUS_ERR; + } + + // The value in the EDID = (Horizontal active pixels/8) - 31 + width = (std2ByteCode & 0x0FF) + 31; + width <<= 3; + rr = ((std2ByteCode >> 8) & 0x3F) + 60; // bits 5->0 + + // get the height + aspect = ((std2ByteCode >> 8) & 0xC0); // aspect ratio at bit 7:6 + + if (aspect == 0x00) height = width * 5 / 8; // 16:10 + else if (aspect == 0x40) height = width * 3 / 4; // 4:3 + else if (aspect == 0x80) height = width * 4 / 5; // 5:4 + else height = width * 9 / 16; // 16:9 + + // try to get the timing from DMT or DMT_RB + if (NvTiming_CalcDMT(width, height, rr, 0, pT) == NVT_STATUS_SUCCESS) + { + return NVT_STATUS_SUCCESS; + } + // try to get the timing from DMT_RB2 + else if (NvTiming_CalcDMT_RB2(width, height, rr, 0, pT) == NVT_STATUS_SUCCESS) + { + return NVT_STATUS_SUCCESS; + } + + return NVT_STATUS_ERR; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcDMT(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT) +{ + NVT_TIMING *p = (NVT_TIMING *)DMT; + + if (pT == NULL) + return NVT_STATUS_ERR; + + if (width == 0 || height == 0 || rr == 0 ) + return NVT_STATUS_ERR; + + // no interlaced DMT timing + if ((flag & NVT_PVT_INTERLACED_MASK) != 0) + return NVT_STATUS_ERR; + + while (p->HVisible != 0 && p->VVisible != 0) + { + if (NVT_GET_TIMING_STATUS_TYPE(p->etc.status) == NVT_TYPE_DMT) + { + if ((NvU32)p->HVisible == width && + (NvU32)p->VVisible == height && + (NvU32)p->etc.rr == rr) + { + NVMISC_MEMSET(pT, 0, sizeof(NVT_TIMING)); + *pT = *p; + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk1khz, (NvU32)1000*(NvU32)1000, (NvU32)pT->HTotal*(NvU32)pT->VTotal); + NVT_SNPRINTF((char *)pT->etc.name, 40, "DMT:%dx%dx%dHz",width, height, rr); + pT->etc.name[39] = '\0'; + pT->etc.rgb444.bpc.bpc8 = 1; + return NVT_STATUS_SUCCESS; + } + } + p ++; + } + + // if we couldn't find a DMT with regular blanking, try the DMT with reduced blanking next + return NvTiming_CalcDMT_RB(width, height, rr, flag, pT); +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcDMT_RB(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT) +{ + NVT_TIMING *p = (NVT_TIMING *)DMT; + + if (pT == NULL) + return NVT_STATUS_ERR; + + if (width == 0 || height == 0 || rr == 0 ) + return NVT_STATUS_ERR; + + // no interlaced DMT timing + if ((flag & NVT_PVT_INTERLACED_MASK) != 0) + return NVT_STATUS_ERR; + + while (p->HVisible != 0 && p->VVisible != 0) + { + // select only reduced-bandwidth timing. + if (NVT_GET_TIMING_STATUS_TYPE(p->etc.status) == NVT_TYPE_DMT_RB) + { + if ((NvU32)p->HVisible == width && + (NvU32)p->VVisible == height && + (NvU32)p->etc.rr == rr) + { + NVMISC_MEMSET(pT, 0, sizeof(NVT_TIMING)); + *pT = *p; + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk1khz, (NvU32)1000*(NvU32)1000, (NvU32)pT->HTotal*(NvU32)pT->VTotal); + NVT_SNPRINTF((char *)pT->etc.name, 40, "DMT-RB:%dx%dx%dHz",width, height, rr); + pT->etc.name[39] = '\0'; + pT->etc.rgb444.bpc.bpc8 = 1; + return NVT_STATUS_SUCCESS; + } + } + p ++; + } + return NVT_STATUS_ERR; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcDMT_RB2(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT) +{ + NVT_TIMING *p = (NVT_TIMING *)DMT; + + if (pT == NULL) + return NVT_STATUS_ERR; + + if (width == 0 || height == 0 || rr == 0) + return NVT_STATUS_ERR; + + // no interlaced DMT timing + if ((flag & NVT_PVT_INTERLACED_MASK) != 0) + return NVT_STATUS_ERR; + + while (p->HVisible != 0 && p->VVisible != 0) + { + // select only reduced-bandwidth timing. + if (NVT_GET_TIMING_STATUS_TYPE(p->etc.status) == NVT_TYPE_DMT_RB_2) + { + if ((NvU32)p->HVisible == width && + (NvU32)p->VVisible == height && + (NvU32)p->etc.rr == rr) + { + NVMISC_MEMSET(pT, 0, sizeof(NVT_TIMING)); + *pT = *p; + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk1khz, (NvU32)1000*(NvU32)1000, (NvU32)pT->HTotal*(NvU32)pT->VTotal); + NVT_SNPRINTF((char *)pT->etc.name, 40, "DMT-RB2:%dx%dx%dHz",width, height, rr); + pT->etc.name[39] = '\0'; + pT->etc.rgb444.bpc.bpc8 = 1; + return NVT_STATUS_SUCCESS; + } + } + p ++; + } + return NVT_STATUS_ERR; +} + +POP_SEGMENTS diff --git a/src/common/modeset/timing/nvt_dsc_pps.c b/src/common/modeset/timing/nvt_dsc_pps.c new file mode 100644 index 0000000..c0beb2b --- /dev/null +++ b/src/common/modeset/timing/nvt_dsc_pps.c @@ -0,0 +1,2759 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +//============================================================================= +// +// Provide function to calculate PPS(Picture Parameter Set) +// +// +//============================================================================== + +/* ------------------------ Includes --------------------------------------- */ +#include "nvt_dsc_pps.h" +#include "nvmisc.h" +#include "displayport/displayport.h" +#include "nvctassert.h" +#include + +/* ------------------------ Macros ----------------------------------------- */ + +#define MIN_CHECK(s,a,b) { if((a)<(b)) { return (NVT_STATUS_ERR);} } +#define RANGE_CHECK(s,a,b,c) { if((((NvS32)(a))<(NvS32)(b))||(((NvS32)(a))>(NvS32)(c))) { return (NVT_STATUS_ERR);} } +#define ENUM_CHECK(s,a,b) { if((a)!=(b)) { return (NVT_STATUS_ERR);} } +#define ENUM2_CHECK(s,a,b,c) { if(((a)!=(b))&&((a)!=(c))) { return (NVT_STATUS_ERR);} } +#define ENUM3_CHECK(s,a,b,c,d) { if(((a)!=(b))&&((a)!=(c))&&((a)!=(d))) { return (NVT_STATUS_ERR);} } +#define MAX(a,b) (((a)>=(b) || (b == 0xffffffff))?(a):(b)) +#define MIN(a,b) ((a)>=(b)?(b):(a)) +#define CLAMP(a,b,c) ((a)<=(b)?(b):((a)>(c)?(c):(a))) +#define ADJUST_SLICE_NUM(n) ((n)>4?8:((n)>2?4:(n))) +#define MSB(a) (((a)>>8)&0xFF) +#define LSB(a) ((a)&0xFF) + +#define NUM_BUF_RANGES 15 +#define BPP_UNIT 16 +#define OFFSET_FRACTIONAL_BITS 11 +#define PIXELS_PER_GROUP 3 + +//The max pclk frequency(in Mhz) per slice +//DP1.4 spec defines the number of slices needed per display line, +//based on the pixel rate. it's about 340Mhz per slice. +#define MAX_PCLK_PER_SLICE_KHZ 340000 +//The max slice_width used in slice_width calculation +//this is not HW limitation(which is 5120 per head), just a recommendation +#define MAX_WIDTH_PER_SLICE 5120 +//RC algorithm will get better performance if slice size is bigger. +//This requires slice size be much greater than rc_model_size(8K bits) +//but bigger slice will increase the error rate of DSC slices. +//256KB is a moderate value (about 1280x200 @8bpp) +#define MIN_SLICE_SIZE (256*1024) +// Per DP 1.4 spec, sink should support slice width of up to at least 2560 (it is allowed to support more). +#define SINK_MAX_SLICE_WIDTH_DEFAULT 2560 +// Min bits per pixel supported +#define MIN_BITS_PER_PIXEL 8 +// Max bits per pixel supported +#define MAX_BITS_PER_PIXEL 32 +// Max HBlank pixel count +#define MAX_HBLANK_PIXELS 7680 +#define MHZ_TO_HZ 1000000 + +/* ------------------------ Datatypes -------------------------------------- */ + +//input parameters to the pps calculation +typedef struct +{ + NvU32 dsc_version_minor; // DSC minor version (1-DSC1.1, 2-DSC 1.2) + NvU32 bits_per_component; // bits per component of input pixels (8,10,12) + NvU32 linebuf_depth; // bits per component of reconstructed line buffer (8 ~ 13) + NvU32 block_pred_enable; // block prediction enable (0, 1) + NvU32 convert_rgb; // input pixel format (0 YCbCr, 1 RGB) + NvU32 bits_per_pixel; // bits per pixel*BPP_UNIT (8.0*BPP_UNIT ~ 32.0*BPP_UNIT) + NvU32 pic_height; // picture height (8 ~ 8192) + NvU32 pic_width; // picture width (single mode: 32 ~ 5120, dual mode: 64 ~ 8192) + NvU32 slice_height; // 0 - auto, others (8 ~ 8192) - must be (pic_height % slice_height == 0) + NvU32 slice_width; // maximum slice_width, 0-- default: 1280. + NvU32 slice_num; // 0 - auto, others: 1,2,4,8 + NvU32 slice_count_mask; // no of slices supported by sink + NvU32 max_slice_num; // slice number cap determined from GPU and sink caps + NvU32 max_slice_width; // slice width cap determined from GPU and sink caps + NvU32 pixel_clkMHz; // pixel clock frequency in MHz, used for slice_width calculation. + NvU32 dual_mode; // 0 - single mode, 1 - dual mode, only for checking pic_width + NvU32 simple_422; // 4:2:2 simple mode + NvU32 native_420; // 420 native mode + NvU32 native_422; // 422 native mode + NvU32 drop_mode; // 0 - normal mode, 1 - drop mode. + NvU32 multi_tile; // 1 = Multi-tile architecture, 0 = dsc single or dual mode without multi-tile + NvU32 peak_throughput_mode0; // peak throughput supported by the sink for 444 and simple 422 modes. + NvU32 peak_throughput_mode1; // peak throughput supported by the sink for native 422 and 420 modes. + NvU32 eDP; // 1 = connector type is eDP, 0 otherwise. +} DSC_INPUT_PARAMS; + +//output pps parameters after calculation +typedef struct +{ + NvU32 dsc_version_major; // DSC major version, always 1 + NvU32 dsc_version_minor; // DSC minor version + NvU32 pps_identifier; // Application-specific identifier, always 0 + NvU32 bits_per_component; // bits per component for input pixels + NvU32 linebuf_depth; // line buffer bit depth + NvU32 block_pred_enable; // enable/disable block prediction + NvU32 convert_rgb; // color space for input pixels + NvU32 simple_422; // 4:2:2 simple mode + NvU32 vbr_enable; // enable VBR mode + NvU32 bits_per_pixel; // (bits per pixel * BPP_UNIT) after compression + NvU32 pic_height; // picture height + NvU32 pic_width; // picture width + NvU32 slice_height; // slice height + NvU32 slice_width; // slice width + NvU32 chunk_size; // the size in bytes of the slice chunks + NvU32 initial_xmit_delay; // initial transmission delay + NvU32 initial_dec_delay; // initial decoding delay + NvU32 initial_scale_value; // initial xcXformScale factor value + NvU32 scale_increment_interval; // number of group times between incrementing the rcXformScale factor + NvU32 scale_decrement_interval; // number of group times between decrementing the rcXformScale factor + NvU32 first_line_bpg_offset; // number of additional bits allocated for each group on the first line in a slice + NvU32 nfl_bpg_offset; // number of bits de-allocated for each group after the first line in a slice + NvU32 slice_bpg_offset; // number of bits de-allocated for each group to enforce the slice constrain + NvU32 initial_offset; // initial value for rcXformOffset + NvU32 final_offset; // maximum end-of-slice value for rcXformOffset + NvU32 flatness_min_qp; // minimum flatness QP + NvU32 flatness_max_qp; // maximum flatness QP + //rc_parameter_set + NvU32 rc_model_size; // number of bits within the "RC model" + NvU32 rc_edge_factor; // edge detection factor + NvU32 rc_quant_incr_limit0; // QP threshold for short-term RC + NvU32 rc_quant_incr_limit1; // QP threshold for short-term RC + NvU32 rc_tgt_offset_hi; // upper end of the target bpg range for short-term RC + NvU32 rc_tgt_offset_lo; // lower end of the target bpg range for short-term RC + NvU32 rc_buf_thresh[NUM_BUF_RANGES-1]; // thresholds in "RC model" + //rc_range_parameters + NvU32 range_min_qp[NUM_BUF_RANGES]; // minimum QP for each of the RC ranges + NvU32 range_max_qp[NUM_BUF_RANGES]; // maximum QP for each of the RC ranges + NvU32 range_bpg_offset[NUM_BUF_RANGES]; // bpg adjustment for each of the RC ranges + //420,422 + NvU32 native_420; // 420 native mode + NvU32 native_422; // 422 native mode + NvU32 second_line_bpg_offset; // 2nd line bpg offset to use, native 420 only + NvU32 nsl_bpg_offset; // non-2nd line bpg offset to use, native 420 only + NvU32 second_line_offset_adj; // adjustment to 2nd line bpg offset, native 420 only + + //additional params not in PPS + NvU32 slice_num; + NvU32 groups_per_line; + NvU32 num_extra_mux_bits; + NvU32 flatness_det_thresh; +} DSC_OUTPUT_PARAMS; + +// +// Opaque scratch space is passed by client for DSC calculation usage. +// Use an internal struct to cast the input buffer +// into in/out params for DSC PPS calculation functions to work with +// +typedef struct _DSC_GENERATE_PPS_WORKAREA +{ + DSC_INPUT_PARAMS in; + DSC_OUTPUT_PARAMS out; +} DSC_GENERATE_PPS_WORKAREA; + +// Compile time check to ensure Opaque workarea buffer size always covers required work area. +ct_assert(sizeof(DSC_GENERATE_PPS_OPAQUE_WORKAREA) == sizeof(DSC_GENERATE_PPS_WORKAREA)); + +/* ------------------------ Global Variables ------------------------------- */ + +static const NvU8 minqp444_8b[15][37]={ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0} + ,{ 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0} + ,{ 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} + ,{ 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0} + ,{ 6, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0} + ,{ 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0} + ,{ 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0} + ,{ 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0} + ,{ 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1} + ,{14,14,13,13,12,12,12,12,11,11,10,10,10,10, 9, 9, 9, 8, 8, 8, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3} +}; + +static const NvU8 maxqp444_8b[15][37]={ + { 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 6, 6, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} + ,{ 8, 7, 7, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} + ,{ 8, 8, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0} + ,{ 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 4, 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0} + ,{ 9, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1} + ,{ 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1} + ,{10,10, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1} + ,{11,11,10,10, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 7, 6, 6, 5, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1} + ,{12,11,11,10,10,10, 9, 9, 9, 9, 9, 9, 9, 8, 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1} + ,{12,12,11,11,10,10,10,10,10,10, 9, 9, 9, 8, 8, 7, 7, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1} + ,{12,12,12,11,11,11,10,10,10,10, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1} + ,{12,12,12,12,11,11,11,11,11,10,10, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1} + ,{13,13,13,13,12,12,11,11,11,11,10,10,10,10, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2} + ,{15,15,14,14,13,13,13,13,12,12,11,11,11,11,10,10,10, 9, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4} +}; + +static const NvU8 minqp444_10b[15][49]={ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 7, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 7, 7, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 9, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0} + ,{ 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0} + ,{ 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0, 0} + ,{ 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 0, 0} + ,{10, 9, 9, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 0} + ,{10,10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1} + ,{10,10,10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1} + ,{10,10,10,10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1} + ,{12,12,12,12,12,12,12,12,12,12,11,11,11,11,11,11,11,11,11,11,10,10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1} + ,{18,18,17,17,16,16,16,16,15,15,14,14,14,14,13,13,13,12,12,12,11,11,11,11,10,10, 9, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3} +}; + +static const NvU8 maxqp444_10b[15][49]={ + { 8, 8, 8, 8, 8, 8, 7, 7, 7, 6, 5, 5, 4, 4, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{10,10, 9, 9, 8, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{12,11,11,10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 6, 6, 5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0} + ,{12,12,11,11,10,10,10,10,10,10,10,10, 9, 9, 9, 8, 7, 7, 6, 6, 6, 5, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0} + ,{13,12,12,11,11,11,11,11,11,11,11,11,10,10, 9, 8, 8, 7, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0} + ,{13,12,12,12,11,11,11,11,11,11,11,11,10,10,10, 9, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 0, 0} + ,{13,13,12,12,11,11,11,11,11,11,11,11,11,10,10, 9, 8, 8, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1} + ,{14,14,13,13,12,12,12,12,12,12,12,12,12,11,11,10, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1} + ,{15,15,14,14,13,13,13,13,13,13,12,12,12,11,11,10,10, 9, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 6, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 1, 1} + ,{16,15,15,14,14,14,13,13,13,13,13,13,13,12,12,11,10,10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 1} + ,{16,16,15,15,14,14,14,14,14,14,13,13,13,12,12,11,11,10,10,10, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2} + ,{16,16,16,15,15,15,14,14,14,14,13,13,13,13,12,12,12,11,11,11,10,10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2} + ,{16,16,16,16,15,15,15,15,15,14,14,13,13,13,12,12,12,11,11,11,10,10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2} + ,{17,17,17,17,16,16,15,15,15,15,14,14,14,14,13,13,12,12,12,12,11,11,10,10,10,10, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2} + ,{19,19,18,18,17,17,17,17,16,16,15,15,15,15,14,14,14,13,13,13,12,12,12,12,11,11,10,10,10,10,10, 9, 9, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4} +}; + +static const NvU8 minqp444_12b[15][61]={ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{11,10,10, 9, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 6, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{11,11,10,10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{13,12,12,11,11,11,11,11,11,11,11,11,10,10, 9, 9, 9, 8, 7, 7, 7, 7, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{13,12,12,12,11,11,11,11,11,11,11,11,11,11,11,10, 9, 9, 8, 8, 8, 8, 6, 6, 6, 6, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0} + ,{13,13,12,12,11,11,11,11,11,11,11,11,11,11,11,10, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} + ,{13,13,12,12,11,11,11,11,11,11,11,11,11,11,11,11,10,10,10,10,10,10, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0} + ,{13,13,12,12,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,10,10,10,10, 9, 9, 8, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0} + ,{14,13,13,12,12,12,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,10,10,10,10, 9, 9, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 0} + ,{14,14,13,13,13,13,13,13,13,13,13,13,13,13,13,12,12,12,12,12,11,11,11,11,11,11,10,10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1} + ,{14,14,14,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,12,12,11,11,11,11,11,11,10,10,10,10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 6, 6, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1} + ,{14,14,14,14,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,12,12,12,12,12,12,11,11,10,10,10,10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1} + ,{17,17,17,17,16,16,15,15,15,15,15,15,15,15,15,15,15,15,15,15,14,14,13,13,13,13,12,12,11,11,11,11,10,10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 1} + ,{22,22,21,21,20,20,20,20,19,19,18,18,18,18,17,17,17,16,16,16,15,15,15,15,14,14,13,13,13,13,13,12,12,11,11,11,11,11,10,10, 9, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3} +}; + +static const NvU8 maxqp444_12b[15][61]={ + {12,12,12,12,12,12,11,11,11,10, 9, 9, 6, 6, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{14,14,13,13,12,12,12,12,12,12,11,11, 9, 9, 9, 8, 8, 7, 7, 7, 7, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{16,15,15,14,13,13,13,13,13,13,13,13,12,12,12,11,10,10, 9, 9, 9, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{16,16,15,15,14,14,14,14,14,14,14,14,13,13,13,12,11,11,10,10,10, 8, 8, 8, 8, 8, 7, 7, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{17,16,16,15,15,15,15,15,15,15,15,15,14,14,13,12,12,11,10,10,10,10, 8, 8, 8, 8, 8, 8, 7, 7, 7, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0} + ,{17,16,16,16,15,15,15,15,15,15,15,15,14,14,14,13,12,12,11,11,11,11, 9, 9, 9, 9, 8, 8, 8, 8, 7, 6, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0} + ,{17,17,16,16,15,15,15,15,15,15,15,15,15,14,14,13,12,12,11,11,11,11,11,10,10,10, 9, 9, 9, 8, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0} + ,{18,18,17,17,16,16,16,16,16,16,16,16,16,15,15,14,13,13,12,12,12,12,11,11,11,11,10,10,10, 8, 8, 8, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1} + ,{19,19,18,18,17,17,17,17,17,17,16,16,16,15,15,14,14,13,13,13,13,13,12,12,12,12,11,11,10, 9, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1} + ,{20,19,19,18,18,18,17,17,17,17,17,17,17,16,16,15,14,14,13,13,13,13,12,12,12,12,11,11,10,10, 9, 9, 9, 9, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 1} + ,{20,20,19,19,18,18,18,18,18,18,17,17,17,16,16,15,15,14,14,14,13,13,12,12,12,12,11,11,10,10,10,10,10,10,10,10, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2} + ,{20,20,20,19,19,19,18,18,18,18,17,17,17,17,16,16,16,15,15,15,14,14,13,13,13,13,12,12,11,11,11,11,10,10,10,10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2} + ,{20,20,20,20,19,19,19,19,19,18,18,17,17,17,16,16,16,15,15,15,14,14,13,13,13,13,12,12,11,11,11,11,10,10,10,10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2} + ,{21,21,21,21,20,20,19,19,19,19,18,18,18,18,17,17,16,16,16,16,15,15,14,14,14,14,13,13,12,12,12,12,11,11,10,10,10,10, 9, 9, 8, 8, 8, 8, 8, 7, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 2} + ,{23,23,22,22,21,21,21,21,20,20,19,19,19,19,18,18,18,17,17,17,16,16,16,16,15,15,14,14,14,14,14,13,13,12,12,12,12,12,11,11,10,10,10,10,10, 9, 9, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4} +}; + +static const NvU8 minqp422_8b[15][21] = { + {0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{1 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{3 ,3 ,3 ,3 ,3 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0} + ,{3 ,3 ,3 ,3 ,3 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0} + ,{3 ,3 ,3 ,3 ,3 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0} + ,{3 ,3 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,0 ,0} + ,{3 ,3 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,1} + ,{3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,1 ,1 ,1} + ,{5 ,5 ,5 ,5 ,5 ,4 ,4 ,4 ,4 ,4 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,1 ,1 ,1} + ,{5 ,5 ,5 ,5 ,5 ,5 ,5 ,4 ,4 ,4 ,4 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,1 ,1} + ,{5 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,4 ,4 ,3 ,3 ,2 ,2 ,1 ,1} + ,{8 ,8 ,7 ,7 ,7 ,7 ,7 ,7 ,7 ,7 ,6 ,6 ,5 ,5 ,4 ,4 ,3 ,3 ,2 ,2 ,2} + ,{12,12,11,11,10,10,9 ,9 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,4 ,4 ,4 ,3 ,3} +}; + +static const NvU8 maxqp422_8b[15][21] = { + {4 ,4 ,3 ,3 ,2 ,2 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{4 ,4 ,4 ,4 ,4 ,3 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{5 ,5 ,5 ,5 ,5 ,4 ,3 ,2 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0} + ,{6 ,6 ,6 ,6 ,6 ,5 ,4 ,3 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1 ,0 ,0} + ,{7 ,7 ,7 ,7 ,7 ,6 ,5 ,3 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1} + ,{7 ,7 ,7 ,7 ,7 ,6 ,5 ,4 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,1} + ,{7 ,7 ,7 ,7 ,7 ,6 ,5 ,4 ,3 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1} + ,{8 ,8 ,8 ,8 ,8 ,7 ,6 ,5 ,4 ,4 ,4 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,1 ,1} + ,{9 ,9 ,9 ,8 ,8 ,7 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,2} + ,{10,10,9 ,9 ,9 ,8 ,7 ,6 ,5 ,5 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,2} + ,{10,10,10,9 ,9 ,8 ,7 ,7 ,6 ,6 ,6 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,2 ,2 ,2} + ,{11,11,10,10,9 ,9 ,8 ,7 ,7 ,7 ,6 ,6 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,2 ,2} + ,{11,11,11,10,9 ,9 ,8 ,8 ,7 ,7 ,7 ,6 ,6 ,5 ,5 ,4 ,4 ,3 ,3 ,2 ,2} + ,{12,12,11,11,10,10,9 ,9 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,4 ,4 ,3 ,3 ,3} + ,{13,13,12,12,11,11,10,10,9 ,9 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,5 ,4 ,4} +}; + +static const NvU8 minqp422_10b[15][29] = { + {0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{4 ,4 ,4 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{5 ,5 ,5 ,4 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{6 ,6 ,6 ,6 ,5 ,4 ,4 ,4 ,3 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{6 ,6 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,4 ,4 ,4 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,2 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0} + ,{6 ,6 ,6 ,6 ,6 ,5 ,5 ,5 ,5 ,4 ,4 ,4 ,4 ,4 ,4 ,4 ,4 ,3 ,3 ,3 ,3 ,2 ,1 ,1 ,0 ,0 ,0 ,0 ,0} + ,{6 ,6 ,6 ,6 ,6 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,4 ,4 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,1 ,1 ,1 ,0 ,0 ,0} + ,{7 ,7 ,7 ,7 ,7 ,6 ,6 ,6 ,6 ,6 ,6 ,5 ,5 ,5 ,5 ,4 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,1 ,1 ,1 ,1 ,1} + ,{7 ,7 ,7 ,7 ,7 ,6 ,6 ,6 ,6 ,6 ,6 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,4 ,4 ,4 ,3 ,2 ,2 ,1 ,1 ,1 ,1 ,1} + ,{8 ,8 ,7 ,7 ,7 ,7 ,7 ,7 ,7 ,7 ,7 ,7 ,6 ,6 ,6 ,6 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,2 ,2 ,1 ,1 ,1 ,1} + ,{9 ,9 ,9 ,8 ,8 ,8 ,8 ,8 ,8 ,8 ,8 ,7 ,7 ,6 ,6 ,6 ,5 ,5 ,5 ,5 ,5 ,3 ,3 ,2 ,2 ,2 ,1 ,1 ,1} + ,{9 ,9 ,9 ,9 ,8 ,8 ,8 ,8 ,8 ,8 ,8 ,8 ,7 ,7 ,6 ,6 ,6 ,6 ,6 ,5 ,5 ,4 ,3 ,3 ,2 ,2 ,1 ,1 ,1} + ,{9 ,9 ,9 ,9 ,9 ,9 ,9 ,9 ,9 ,9 ,9 ,8 ,8 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,4 ,3 ,3 ,3 ,2 ,2 ,1 ,1} + ,{12,12,11,11,11,11,11,11,11,11,10,10,9 ,9 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,4 ,4 ,3 ,3 ,2 ,2 ,1} + ,{16,16,15,15,14,14,13,13,12,12,11,11,10,10,9 ,9 ,8 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,3} +}; + +static const NvU8 maxqp422_10b[15][29] = { + {8 ,8 ,7 ,5 ,4 ,4 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{8 ,8 ,8 ,6 ,6 ,5 ,4 ,4 ,3 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{9 ,9 ,9 ,8 ,7 ,6 ,5 ,4 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0} + ,{10,10,10,10,9 ,8 ,7 ,6 ,5 ,5 ,5 ,5 ,5 ,4 ,4 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1 ,1} + ,{11,11,11,11,10,9 ,8 ,6 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,1} + ,{11,11,11,11,11,10,9 ,8 ,7 ,6 ,6 ,5 ,5 ,5 ,5 ,5 ,5 ,4 ,4 ,4 ,4 ,3 ,2 ,2 ,1 ,1 ,1 ,1 ,1} + ,{11,11,11,11,11,10,9 ,8 ,7 ,7 ,7 ,7 ,7 ,6 ,6 ,6 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,2 ,2 ,2 ,1 ,1 ,1} + ,{12,12,12,12,12,11,10,9 ,8 ,8 ,8 ,7 ,7 ,7 ,7 ,6 ,5 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,2 ,2 ,2 ,2 ,2} + ,{13,13,13,12,12,11,10,10,9 ,9 ,9 ,8 ,8 ,7 ,7 ,7 ,6 ,5 ,5 ,5 ,5 ,4 ,3 ,3 ,2 ,2 ,2 ,2 ,2} + ,{14,14,13,13,13,12,11,10,9 ,9 ,9 ,9 ,8 ,8 ,8 ,7 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,3 ,3 ,2 ,2 ,2 ,2} + ,{14,14,14,13,13,12,11,11,10,10,10,9 ,9 ,8 ,8 ,8 ,7 ,7 ,6 ,6 ,6 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,2} + ,{15,15,14,14,13,13,12,11,11,11,10,10,9 ,9 ,8 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,4 ,4 ,3 ,3 ,2 ,2 ,2} + ,{15,15,15,14,13,13,12,12,11,11,11,10,10,9 ,9 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,4 ,4 ,4 ,3 ,3 ,2 ,2} + ,{16,16,15,15,14,14,13,13,12,12,11,11,10,10,9 ,9 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,4 ,4 ,3 ,3 ,2} + ,{17,17,16,16,15,15,14,14,13,13,12,12,11,11,10,10,9 ,9 ,9 ,8 ,8 ,7 ,7 ,6 ,6 ,6 ,5 ,5 ,4} +}; + +static const NvU8 minqp422_12b[15][37] = { + {0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{4 ,4 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{9 ,9 ,9 ,8 ,7 ,6 ,5 ,5 ,4 ,4 ,4 ,4 ,4 ,4 ,3 ,3 ,2 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{10,10,10,10,8 ,8 ,8 ,7 ,6 ,6 ,6 ,6 ,6 ,5 ,4 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{11,11,11,11,10,9 ,9 ,8 ,7 ,7 ,7 ,7 ,6 ,6 ,5 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{11,11,11,11,11,10,10,9 ,9 ,8 ,8 ,7 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,4 ,4 ,3 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0} + ,{11,11,11,11,11,10,10,10,9 ,9 ,9 ,9 ,8 ,7 ,7 ,7 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,0 ,0 ,0 ,0} + ,{11,11,11,11,11,11,10,10,10,10,10,9 ,8 ,8 ,8 ,7 ,6 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,3 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,1 ,1 ,0 ,0 ,0} + ,{11,11,11,11,11,11,11,11,11,11,11,10,9 ,8 ,8 ,8 ,7 ,6 ,6 ,6 ,6 ,5 ,4 ,4 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,2 ,2 ,1 ,0 ,0 ,0} + ,{11,11,11,11,11,11,11,11,11,11,11,11,9 ,9 ,9 ,8 ,7 ,7 ,6 ,6 ,6 ,5 ,5 ,4 ,4 ,3 ,3 ,3 ,3 ,3 ,3 ,2 ,2 ,1 ,1 ,0 ,0} + ,{13,13,13,13,13,12,12,12,12,12,12,11,11,10,10,10,9 ,9 ,8 ,8 ,8 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,4 ,4 ,3 ,3 ,2 ,2 ,1 ,1 ,1} + ,{13,13,13,13,13,13,13,13,13,13,12,12,11,11,10,10,10,9 ,9 ,8 ,8 ,7 ,6 ,6 ,5 ,5 ,4 ,4 ,4 ,4 ,4 ,3 ,3 ,2 ,2 ,1 ,1} + ,{13,13,13,13,13,13,13,13,13,13,13,12,12,11,11,10,10,9 ,9 ,8 ,8 ,7 ,6 ,6 ,6 ,5 ,5 ,4 ,4 ,4 ,4 ,3 ,3 ,2 ,2 ,1 ,1} + ,{16,16,15,15,15,15,15,15,15,15,14,14,13,13,12,12,11,11,10,10,9 ,9 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,2 ,2} + ,{20,20,19,19,18,18,17,17,16,16,15,15,14,14,13,13,12,12,12,11,11,10,10,9 ,9 ,9 ,8 ,8 ,7 ,7 ,6 ,6 ,6 ,5 ,5 ,4 ,4} +}; + +static const NvU8 maxqp422_12b[15][37] = { + {12,12,11,9 ,6 ,6 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{12,12,12,10,9 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{13,13,13,12,10,9 ,8 ,7 ,6 ,6 ,6 ,6 ,6 ,6 ,5 ,5 ,4 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{14,14,14,14,12,11,10,9 ,8 ,8 ,8 ,8 ,8 ,7 ,6 ,5 ,5 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0} + ,{15,15,15,15,14,13,12,10,9 ,9 ,9 ,9 ,8 ,8 ,7 ,6 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,0 ,0 ,0} + ,{15,15,15,15,15,14,13,12,11,10,10,9 ,8 ,8 ,7 ,7 ,7 ,6 ,6 ,6 ,6 ,5 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1} + ,{15,15,15,15,15,14,13,12,11,11,11,11,10,9 ,9 ,9 ,8 ,8 ,7 ,7 ,7 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,2 ,2 ,1 ,1 ,1 ,1} + ,{16,16,16,16,16,15,14,13,12,12,12,11,10,10,10,9 ,8 ,8 ,8 ,7 ,7 ,7 ,6 ,6 ,5 ,5 ,5 ,5 ,5 ,3 ,3 ,3 ,2 ,2 ,1 ,1 ,1} + ,{17,17,17,16,16,15,14,14,13,13,13,12,11,10,10,10,9 ,8 ,8 ,8 ,8 ,7 ,6 ,6 ,5 ,5 ,5 ,5 ,5 ,4 ,4 ,3 ,3 ,2 ,1 ,1 ,1} + ,{18,18,17,17,17,16,15,14,13,13,13,13,11,11,11,10,9 ,9 ,8 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,5 ,5 ,4 ,4 ,3 ,3 ,2 ,2 ,1 ,1} + ,{18,18,18,17,17,16,15,15,14,14,14,13,13,12,12,12,11,11,10,10,10,8 ,8 ,7 ,7 ,7 ,6 ,6 ,6 ,5 ,4 ,4 ,3 ,3 ,2 ,2 ,2} + ,{19,19,18,18,17,17,16,15,15,15,14,14,13,13,12,12,12,11,11,10,10,9 ,8 ,8 ,7 ,7 ,6 ,6 ,6 ,5 ,5 ,4 ,4 ,3 ,3 ,2 ,2} + ,{19,19,19,18,17,17,16,16,15,15,15,14,14,13,13,12,12,11,11,10,10,9 ,8 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,4 ,4 ,3 ,3 ,2 ,2} + ,{20,20,19,19,18,18,17,17,16,16,15,15,14,14,13,13,12,12,11,11,10,10,9 ,9 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,3 ,3} + ,{21,21,20,20,19,19,18,18,17,17,16,16,15,15,14,14,13,13,13,12,12,11,11,10,10,10,9 ,9 ,8 ,8 ,7 ,7 ,7 ,6 ,6 ,5 ,5} +}; + +static const NvU32 rcBufThresh[] = { 896, 1792, 2688, 3584, 4480, 5376, 6272, 6720, 7168, 7616, 7744, 7872, 8000, 8064 }; + +/* ------------------------ Static Variables ------------------------------- */ +/* ------------------------ Private Functions Prototype--------------------- */ +static NvU32 +DSC_GetHigherSliceCount +( + NvU32 common_slice_count_mask, + NvU32 desired_slice_num, + NvU32 *new_slice_num +); +static NvU32 DSC_AlignDownForBppPrecision(NvU32 bitsPerPixelX16, NvU32 bitsPerPixelPrecision); + +static NvU32 +DSC_GetPeakThroughputMps(NvU32 peak_throughput); + +static NvU32 +DSC_SliceCountMaskforSliceNum (NvU32 slice_num); + +static NvU32 +DSC_GetSliceCountMask(NvU32 maxSliceNum, NvBool bInclusive); + +static NVT_STATUS +DSC_GetMinSliceCountForMode +( + NvU32 picWidth, + NvU32 pixelClkMhz, + NvU32 maxSliceWidth, + NvU32 peakThroughPutMps, + NvU32 maxSliceCount, + NvU32 commonSliceCountMask, + NvU32 *pMinSliceCount +); + +/* ------------------------ Private Functions ------------------------------ */ + +/* + * @brief Calculate Bits Per Pixel aligned down as per bitsPerPixelPrecision supported + * by Sink + * + * @param[in] bitsPerPixelX16 Bits Per Pixel + * @param[in] bitsPerPixelPrecision Bits Per Pixel Precision Supported by Panel + * + * @returns Aligned down Bits Per Pixel value + */ +static NvU32 +DSC_AlignDownForBppPrecision +( + NvU32 bitsPerPixelX16, + NvU32 bitsPerPixelPrecision +) +{ + NvU32 allignDownForBppPrecision; + + switch (bitsPerPixelPrecision) + { + case DSC_BITS_PER_PIXEL_PRECISION_1_16: + allignDownForBppPrecision = 1; + break; + + case DSC_BITS_PER_PIXEL_PRECISION_1_8: + allignDownForBppPrecision = 2; + break; + + case DSC_BITS_PER_PIXEL_PRECISION_1_4: + allignDownForBppPrecision = 4; + break; + + case DSC_BITS_PER_PIXEL_PRECISION_1_2: + allignDownForBppPrecision = 8; + break; + + case DSC_BITS_PER_PIXEL_PRECISION_1: + allignDownForBppPrecision = 16; + break; + + default: + allignDownForBppPrecision = 16; + } + + return (bitsPerPixelX16 & ~(allignDownForBppPrecision - 1)); +} + +/* + * @brief Calculate chunk size, num_extra_mux_bits + * + * @param[in/out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NVT_STATUS +DSC_PpsCalcExtraBits +( + DSC_OUTPUT_PARAMS *out +) +{ + NvU32 numSsps = out->native_422 ? 4 : 3; + NvU32 sliceBits; + NvU32 extra_bits; + NvU32 bitsPerComponent = out->bits_per_component; + NvU32 muxWordSize; + + muxWordSize = (bitsPerComponent >= 12) ? 64 : 48; + if (out->convert_rgb) + { + extra_bits = (numSsps * (muxWordSize + (4 * bitsPerComponent + 4) - 2)); + } + else if (!out->native_422) // YCbCr + { + extra_bits = (numSsps * muxWordSize + (4 * bitsPerComponent + 4) + 2 * (4 * bitsPerComponent) - 2); + } + else + { + extra_bits = (numSsps * muxWordSize + (4 * bitsPerComponent + 4) + 3 * (4 * bitsPerComponent) - 2); + } + + sliceBits = 8 * out->chunk_size * out->slice_height; + //while ((extra_bits>0) && ((sliceBits - extra_bits) % muxWordSize)) + // extra_bits--; + sliceBits = (sliceBits - extra_bits) % muxWordSize; + if (sliceBits != 0) + { + extra_bits -= MIN(extra_bits, muxWordSize - sliceBits); + } + + out->num_extra_mux_bits = extra_bits; + return NVT_STATUS_SUCCESS; +} + +/* + * @brief Calculate RC initial value. + * Require: groups_per_line in Dsc_PpsCalcWidth() + * + * @param[in/out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NVT_STATUS +DSC_PpsCalcRcInitValue +( + DSC_OUTPUT_PARAMS *out +) +{ + NvU32 bitsPerPixel = out->bits_per_pixel; + NvU32 xmit_delay; + out->rc_model_size = 8192; + + if (out->native_422) + { + // =IF(CompressBpp >= 8, 2048, IF(CompressBpp <= 7, 5632, 5632 - ROUND((CompressBpp - 7) * (3584), 0))) + if (bitsPerPixel >= 16 * BPP_UNIT) + out->initial_offset = 2048; + else if (bitsPerPixel >= 14 * BPP_UNIT) + out->initial_offset = 5632 - ((bitsPerPixel - 14 * BPP_UNIT) * 1792 + BPP_UNIT / 2) / BPP_UNIT; + else + out->initial_offset = 5632; + } + else + { + if (bitsPerPixel >= 12 * BPP_UNIT) + out->initial_offset = 2048; + else if (bitsPerPixel >= 10 * BPP_UNIT) + out->initial_offset = 5632 - ((bitsPerPixel - 10 * BPP_UNIT) * 1792 + BPP_UNIT / 2) / BPP_UNIT; + else if (bitsPerPixel >= 8 * BPP_UNIT) + out->initial_offset = 6144 - ((bitsPerPixel - 8 * BPP_UNIT) * 256 + BPP_UNIT / 2) / BPP_UNIT; + else + out->initial_offset = 6144; + } + RANGE_CHECK("initial_offset", out->initial_offset, 0, out->rc_model_size); + + out->initial_scale_value = 8 * out->rc_model_size / (out->rc_model_size - out->initial_offset); + if (out->groups_per_line < out->initial_scale_value - 8) + { + out->initial_scale_value = out->groups_per_line + 8; + } + RANGE_CHECK("initial_scale_value", out->initial_scale_value, 0, 63); + + xmit_delay = (4096*BPP_UNIT + bitsPerPixel/2) / bitsPerPixel; + + if (out->native_420 || out->native_422) + { + NvU32 slicew = (out->native_420 || out->native_422) ? out->slice_width / 2 : out->slice_width; + NvU32 padding_pixels = ((slicew % 3) ? (3 - (slicew % 3)) : 0) * (xmit_delay / slicew); + if (3 * bitsPerPixel >= ((xmit_delay + 2) / 3) * (out->native_422 ? 4 : 3) * BPP_UNIT && + (((xmit_delay + padding_pixels) % 3) == 1)) + { + xmit_delay++; + } + } + out->initial_xmit_delay = xmit_delay; + RANGE_CHECK("initial_xmit_delay", out->initial_xmit_delay, 0, 1023); + + return NVT_STATUS_SUCCESS; +} + +static NvU32 DSC_PpsCalcComputeOffset(DSC_OUTPUT_PARAMS *out, NvU32 grpcnt) +{ + NvU32 offset = 0; + NvU32 groupsPerLine = out->groups_per_line; + NvU32 grpcnt_id = (out->initial_xmit_delay + PIXELS_PER_GROUP - 1) / PIXELS_PER_GROUP; + + if(grpcnt <= grpcnt_id) + offset = (grpcnt * PIXELS_PER_GROUP * out->bits_per_pixel + BPP_UNIT - 1) / BPP_UNIT; + else + offset = (grpcnt_id * PIXELS_PER_GROUP * out->bits_per_pixel + BPP_UNIT - 1) / BPP_UNIT - (((grpcnt-grpcnt_id) * out->slice_bpg_offset)>>OFFSET_FRACTIONAL_BITS); + + if(grpcnt <= groupsPerLine) + offset += grpcnt * out->first_line_bpg_offset; + else + offset += groupsPerLine * out->first_line_bpg_offset - (((grpcnt - groupsPerLine) * out->nfl_bpg_offset)>>OFFSET_FRACTIONAL_BITS); + + if(out->native_420) + { + if(grpcnt <= groupsPerLine) + offset -= (grpcnt * out->nsl_bpg_offset) >> OFFSET_FRACTIONAL_BITS; + else if(grpcnt <= 2*groupsPerLine) + offset += (grpcnt - groupsPerLine) * out->second_line_bpg_offset - ((groupsPerLine * out->nsl_bpg_offset)>>OFFSET_FRACTIONAL_BITS); + else + offset += (grpcnt - groupsPerLine) * out->second_line_bpg_offset - (((grpcnt - groupsPerLine) * out->nsl_bpg_offset)>>OFFSET_FRACTIONAL_BITS); + } + return(offset); +} + +/* + * @brief Calculate bpg value except slice_bpg_offset + * + * @param[in/out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static +NvU32 DSC_PpsCalcBpg +( + DSC_OUTPUT_PARAMS *out +) +{ + NvU32 uncompressedBpgRate; + NvU32 ub_BpgOfs; + NvU32 firstLineBpgOfs; + NvU32 secondLineBpgOfs; + NvU32 bitsPerPixel; + NvU32 rbsMin; + NvU32 hrdDelay; + NvU32 groups_total; + + if (out->native_422) + uncompressedBpgRate = PIXELS_PER_GROUP * out->bits_per_component * 4; + else + uncompressedBpgRate = (3 * out->bits_per_component + (out->convert_rgb ? 2 : 0)) * PIXELS_PER_GROUP; + + ub_BpgOfs = (uncompressedBpgRate*BPP_UNIT - PIXELS_PER_GROUP * out->bits_per_pixel) / BPP_UNIT; + + if (out->slice_height >= 8) + firstLineBpgOfs = 12 + MIN(34, out->slice_height - 8) * 9 / 100; + else + firstLineBpgOfs = 2 * (out->slice_height - 1); + + firstLineBpgOfs = CLAMP(firstLineBpgOfs, 0, ub_BpgOfs); + out->first_line_bpg_offset = firstLineBpgOfs; + RANGE_CHECK("first_line_bpg_offset", out->first_line_bpg_offset, 0, 31); + + if (out->slice_height > 1) + out->nfl_bpg_offset = ((out->first_line_bpg_offset << OFFSET_FRACTIONAL_BITS) + out->slice_height - 2) / (out->slice_height - 1); + else + out->nfl_bpg_offset = 0; + + RANGE_CHECK("nfl_bpg_offset", out->nfl_bpg_offset, 0, 65535); + + secondLineBpgOfs = out->native_420 ? 12 : 0; + secondLineBpgOfs = CLAMP(secondLineBpgOfs, 0, ub_BpgOfs); + out->second_line_bpg_offset = secondLineBpgOfs; + RANGE_CHECK("second_line_bpg_offset", out->second_line_bpg_offset, 0, 31); + + if (out->slice_height > 2) + out->nsl_bpg_offset = ((out->second_line_bpg_offset << OFFSET_FRACTIONAL_BITS) + out->slice_height - 2) / (out->slice_height - 1); + else + out->nsl_bpg_offset = 0; + RANGE_CHECK("nsl_bpg_offset", out->nsl_bpg_offset, 0, 65535); + + out->second_line_offset_adj = out->native_420 ? 512 : 0; + + bitsPerPixel = out->bits_per_pixel; + groups_total = out->groups_per_line * out->slice_height; + out->slice_bpg_offset = (((out->rc_model_size - out->initial_offset + out->num_extra_mux_bits) << OFFSET_FRACTIONAL_BITS) + + groups_total - 1) / groups_total; + RANGE_CHECK("slice_bpg_offset", out->slice_bpg_offset, 0, 65535); + + if((PIXELS_PER_GROUP * bitsPerPixel << OFFSET_FRACTIONAL_BITS) - (out->slice_bpg_offset + out->nfl_bpg_offset) * BPP_UNIT + < (1+5*PIXELS_PER_GROUP)*BPP_UNIT <dsc_version_major > 1) || (out->dsc_version_major == 1 && out->dsc_version_minor >= 2)) && + (out->native_420 || out->native_422)) + { + // OPTIMIZED computation of rbsMin: + // Compute max by sampling offset at points of inflection + // *MODEL NOTE* MN_RBS_MIN + NvU32 maxOffset; + maxOffset = DSC_PpsCalcComputeOffset(out, (out->initial_xmit_delay+PIXELS_PER_GROUP-1)/PIXELS_PER_GROUP ); // After initial delay + maxOffset = MAX(maxOffset, DSC_PpsCalcComputeOffset(out, out->groups_per_line)); // After first line + maxOffset = MAX(maxOffset, DSC_PpsCalcComputeOffset(out, 2*out->groups_per_line)); + rbsMin = out->rc_model_size - out->initial_offset + maxOffset; + } + else + { // DSC 1.1 method + rbsMin = out->rc_model_size - out->initial_offset + + (out->initial_xmit_delay * bitsPerPixel + BPP_UNIT - 1) / BPP_UNIT + + out->groups_per_line * out->first_line_bpg_offset; + } + hrdDelay = (rbsMin * BPP_UNIT + bitsPerPixel - 1) / bitsPerPixel; + out->initial_dec_delay = hrdDelay - out->initial_xmit_delay; + RANGE_CHECK("initial_dec_delay", out->initial_dec_delay, 0, 65535); + + return NVT_STATUS_SUCCESS; +} + +/* + * @brief Calculate final_offset and scale_increment_interval, + * scale_decrement_interval + * + * @param[in/out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NvU32 +DSC_PpsCalcScaleInterval +( + DSC_OUTPUT_PARAMS *out +) +{ + NvU32 final_scale; + + out->final_offset = (out->rc_model_size - (out->initial_xmit_delay * out->bits_per_pixel + 8) / + BPP_UNIT + out->num_extra_mux_bits); + RANGE_CHECK("final_offset", out->final_offset, 0, out->rc_model_size-1); //try increase initial_xmit_delay + + final_scale = 8 * out->rc_model_size / (out->rc_model_size - out->final_offset); + RANGE_CHECK("final_scale", final_scale, 0, 63); //try increase initial_xmit_delay + + // BEGIN scale_increment_NvU32erval fix + if(final_scale > 9) + { + // + // Note: the following calculation assumes that the rcXformOffset crosses 0 at some point. If the zero-crossing + // doesn't occur in a configuration, we recommend to reconfigure the rc_model_size and thresholds to be smaller + // for that configuration. + // + out->scale_increment_interval = (out->final_offset << OFFSET_FRACTIONAL_BITS) / + ((final_scale - 9) * (out->nfl_bpg_offset + + out->slice_bpg_offset + out->nsl_bpg_offset)); + RANGE_CHECK("scale_increment_interval", out->scale_increment_interval, 0, 65535); + } + else + { + out->scale_increment_interval = 0; + } + + // END scale_increment_interval fix + if (out->initial_scale_value > 8) + out->scale_decrement_interval = out->groups_per_line / (out->initial_scale_value - 8); + else + out->scale_decrement_interval = 4095; + RANGE_CHECK("scale_decrement_interval", out->scale_decrement_interval, 1, 4095); + return NVT_STATUS_SUCCESS; +} + +/* + * @brief Calculate RC parameters + * + * @param[in/out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NvU32 +DSC_PpsCalcRcParam +( + DSC_OUTPUT_PARAMS *out +) +{ + NvU32 i, idx; + NvU32 bitsPerPixel = out->bits_per_pixel; + NvU32 bpcm8 = out->bits_per_component - 8; + NvU32 yuv_modifier = out->convert_rgb == 0 && out->dsc_version_minor == 1; + NvU32 qp_bpc_modifier = bpcm8 * 2 - yuv_modifier; + const int ofs_und6[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 }; + const int ofs_und7[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 }; + const int ofs_und10[] = { 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12 }; + + out->flatness_min_qp = 3 + qp_bpc_modifier; + out->flatness_max_qp = 12 + qp_bpc_modifier; + out->flatness_det_thresh = 2 << bpcm8; + out->rc_edge_factor = 6; + out->rc_quant_incr_limit0 = 11 + qp_bpc_modifier; + out->rc_quant_incr_limit1 = 11 + qp_bpc_modifier; + out->rc_tgt_offset_hi = 3; + out->rc_tgt_offset_lo = 3; + + for (i = 0; i < NUM_BUF_RANGES - 1; i++) + out->rc_buf_thresh[i] = rcBufThresh[i] & (0xFF << 6); + + if (out->native_422) + { + idx = bitsPerPixel/BPP_UNIT - 12; + if (bpcm8 == 0) + { + for (i = 0; i < NUM_BUF_RANGES; ++i) + { + out->range_min_qp[i] = minqp422_8b[i][idx]; + out->range_max_qp[i] = maxqp422_8b[i][idx]; + } + } + else if (bpcm8 == 2) + { + for (i=0; i < NUM_BUF_RANGES; i++) + { + out->range_min_qp[i] = minqp422_10b[i][idx]; + out->range_max_qp[i] = maxqp422_10b[i][idx]; + } + } + else + { + for (i=0; irange_min_qp[i] = minqp422_12b[i][idx]; + out->range_max_qp[i] = maxqp422_12b[i][idx]; + } + } + + for (i = 0; i < NUM_BUF_RANGES; ++i) + { + if (bitsPerPixel <= 12*BPP_UNIT) + { + out->range_bpg_offset[i] = ofs_und6[i]; + } + else if (bitsPerPixel <= 14*BPP_UNIT) + { + out->range_bpg_offset[i] = ofs_und6[i] + ((bitsPerPixel - 12*BPP_UNIT) * + (ofs_und7[i] - ofs_und6[i]) + BPP_UNIT) / (2*BPP_UNIT); + } + else if (bitsPerPixel <= 16*BPP_UNIT) + { + out->range_bpg_offset[i] = ofs_und7[i]; + } + else if (bitsPerPixel <= 20*BPP_UNIT) + { + out->range_bpg_offset[i] = ofs_und7[i] + ((bitsPerPixel - 16*BPP_UNIT) * + (ofs_und10[i] - ofs_und7[i]) + 2*BPP_UNIT) / (4*BPP_UNIT); + } + else + { + out->range_bpg_offset[i] = ofs_und10[i]; + } + } + } + else + { + idx = (2 * (bitsPerPixel - 6 * BPP_UNIT) ) / BPP_UNIT; + + if (bpcm8 == 0) + { + for (i = 0; i < NUM_BUF_RANGES; i++) + { + const NvU32 min = minqp444_8b[i][idx]; + const NvU32 max = maxqp444_8b[i][idx]; + + out->range_min_qp[i] = MAX(0, min - yuv_modifier); + out->range_max_qp[i] = MAX(0, max - yuv_modifier); + } + } + else if (bpcm8 == 2) + { + for (i = 0; i < NUM_BUF_RANGES; i++) + { + const NvU32 min = minqp444_10b[i][idx]; + const NvU32 max = maxqp444_10b[i][idx]; + + out->range_min_qp[i] = MAX(0, min - yuv_modifier); + out->range_max_qp[i] = MAX(0, max - yuv_modifier); + } + } + else + { + for (i = 0; i < NUM_BUF_RANGES; i++) + { + const NvU32 min = minqp444_12b[i][idx]; + const NvU32 max = maxqp444_12b[i][idx]; + + out->range_min_qp[i] = MAX(0, min - yuv_modifier); + out->range_max_qp[i] = MAX(0, max - yuv_modifier); + } + } + + for (i = 0; i < NUM_BUF_RANGES; ++i) + { + //if (out->native_420) + //{ + // NvU32 ofs_und4[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 }; + // NvU32 ofs_und5[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 }; + // NvU32 ofs_und6[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 }; + // NvU32 ofs_und8[] = { 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12 }; + // out->range_min_qp[i] = minqp_420[bpcm8 / 2][i][idx]; + // out->range_max_qp[i] = maxqp_420[bpcm8 / 2][i][idx]; + // if (bitsPerPixel <= 8*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und4[i]; + // else if (bitsPerPixel <= 10*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und4[i] + (NvU32)(0.5 * (bitsPerPixel - 8.0) * (ofs_und5[i] - ofs_und4[i]) + 0.5); + // else if (bitsPerPixel <= 12*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und5[i] + (NvU32)(0.5 * (bitsPerPixel - 10.0) * (ofs_und6[i] - ofs_und5[i]) + 0.5); + // else if (bitsPerPixel <= 16*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und6[i] + (NvU32)(0.25 * (bitsPerPixel - 12.0) * (ofs_und8[i] - ofs_und6[i]) + 0.5); + // else + // out->range_bpg_offset[i] = ofs_und8[i]; + //} + //else if (out->native_422) + //{ + // NvU32 ofs_und6[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 }; + // NvU32 ofs_und7[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 }; + // NvU32 ofs_und10[] = { 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12 }; + // out->range_min_qp[i] = minqp_422[bpcm8 / 2][i][idx]; + // out->range_max_qp[i] = maxqp_422[bpcm8 / 2][i][idx]; + // if (bitsPerPixel <= 12*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und6[i]; + // else if(bitsPerPixel <= 14*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und6[i] + (NvU32)((bitsPerPixel - 12.0) * (ofs_und7[i] - ofs_und6[i]) / 2.0 + 0.5); + // else if(bitsPerPixel <= 16*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und7[i]; + // else if(bitsPerPixel <= 20*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und7[i] + (NvU32)((bitsPerPixel - 16.0) * (ofs_und10[i] - ofs_und7[i]) / 4.0 + 0.5); + // else + // out->range_bpg_offset[i] = ofs_und10[i]; + //} + //else + { + const NvU32 ofs_und6[] = { 0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 }; + const NvU32 ofs_und8[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 }; + const NvU32 ofs_und12[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 }; + const NvU32 ofs_und15[] = { 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12 }; + + if (bitsPerPixel <= 6 * BPP_UNIT) + { + out->range_bpg_offset[i] = ofs_und6[i]; + } + else if (bitsPerPixel <= 8 * BPP_UNIT) + { + out->range_bpg_offset[i] = ofs_und6[i] + ((bitsPerPixel - 6 * BPP_UNIT) * + (ofs_und8[i] - ofs_und6[i]) + BPP_UNIT) / (2 * BPP_UNIT); + } + else if (bitsPerPixel <= 12 * BPP_UNIT) + { + out->range_bpg_offset[i] = ofs_und8[i]; + } + else if (bitsPerPixel <= 15 * BPP_UNIT) + { + out->range_bpg_offset[i] = ofs_und12[i] + ((bitsPerPixel - 12 * BPP_UNIT) * + (ofs_und15[i] - ofs_und12[i]) + 3 * BPP_UNIT / 2) / (3 * BPP_UNIT); + } + else + { + out->range_bpg_offset[i] = ofs_und15[i]; + } + } + } + } + return NVT_STATUS_SUCCESS; +} + +/* + * @brief Initialize with basic PPS values based on passed down input params + * + * @param[in] in DSC input parameter + * @param[out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NvU32 +DSC_PpsCalcBase +( + const DSC_INPUT_PARAMS *in, + DSC_OUTPUT_PARAMS *out +) +{ + out->dsc_version_major = 1; + ENUM2_CHECK("dsc_version_minor", in->dsc_version_minor, 1, 2); + out->dsc_version_minor = in->dsc_version_minor == 1 ? 1 : 2; + out->pps_identifier = 0; + ENUM3_CHECK("bits_per_component", in->bits_per_component, 8, 10, 12); + out->bits_per_component = in->bits_per_component; + out->bits_per_pixel = in->bits_per_pixel; + RANGE_CHECK("linebuf_depth", in->linebuf_depth, DSC_DECODER_LINE_BUFFER_BIT_DEPTH_MIN, DSC_DECODER_LINE_BUFFER_BIT_DEPTH_MAX); + out->linebuf_depth = in->linebuf_depth; + ENUM2_CHECK("block_pred_enable", in->block_pred_enable, 0, 1); + out->block_pred_enable = in->block_pred_enable ? 1 : 0; + ENUM2_CHECK("convert_rgb", in->convert_rgb, 0, 1); + out->convert_rgb = in->convert_rgb ? 1 : 0; + + if (in->multi_tile) + { + RANGE_CHECK("pic_width", in->pic_width, 64, 16384); + RANGE_CHECK("pic_height", in->pic_height, 8, 16384); + } + else + { + RANGE_CHECK("pic_height", in->pic_height, 8, 8192); + if (in->dual_mode) + { + RANGE_CHECK("pic_width", in->pic_width, 64, 8192); + } + else + { + RANGE_CHECK("pic_width", in->pic_width, 32, 5120); + } + } + + out->pic_height = in->pic_height; + out->pic_width = in->pic_width; + out->simple_422 = in->simple_422; + out->vbr_enable = 0; + out->native_420 = in->native_420; + out->native_422 = in->native_422; + out->slice_num = in->slice_num; + out->slice_width= in->slice_width; + out->slice_height= in->slice_height; + + return NVT_STATUS_SUCCESS; +} + +/* + * @brief Generate 32bit data array from DSC_OUTPUT_PARAMS. + * + * @param[in] in DSC input parameter + * @param[out] out DSC output parameter + * NvU32[32] to return the pps data. + * The data can be send to SetDscPpsData* methods directly. + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static void +DSC_PpsConstruct +( + const DSC_OUTPUT_PARAMS *in, + NvU32 data[DSC_MAX_PPS_SIZE_DWORD] +) +{ + NvU32 i; + NvU32 pps[96]; + + if (data == NULL) + { + return; + } + + pps[0] = (in->dsc_version_major << 4) | (in->dsc_version_minor & 0xF); + pps[1] = in->pps_identifier; + pps[2] = 0; + pps[3] = (in->bits_per_component << 4) | (in->linebuf_depth & 0xF); + pps[4] = (in->block_pred_enable << 5) | (in->convert_rgb << 4) | + (in->simple_422 << 3) | (in->vbr_enable << 2) | + MSB(in->bits_per_pixel & 0x3FF); + pps[5] = LSB(in->bits_per_pixel); + pps[6] = MSB(in->pic_height); + pps[7] = LSB(in->pic_height); + pps[8] = MSB(in->pic_width); + pps[9] = LSB(in->pic_width); + pps[10] = MSB(in->slice_height); + pps[11] = LSB(in->slice_height); + pps[12] = MSB(in->slice_width); + pps[13] = LSB(in->slice_width); + pps[14] = MSB(in->chunk_size); + pps[15] = LSB(in->chunk_size); + pps[16] = MSB(in->initial_xmit_delay & 0x3FF); + pps[17] = LSB(in->initial_xmit_delay); + pps[18] = MSB(in->initial_dec_delay); + pps[19] = LSB(in->initial_dec_delay); + pps[20] = 0; + pps[21] = in->initial_scale_value & 0x3F; + pps[22] = MSB(in->scale_increment_interval); + pps[23] = LSB(in->scale_increment_interval); + pps[24] = MSB(in->scale_decrement_interval & 0xFFF); + pps[25] = LSB(in->scale_decrement_interval); + pps[26] = 0; + pps[27] = in->first_line_bpg_offset & 0x1F; + pps[28] = MSB(in->nfl_bpg_offset); + pps[29] = LSB(in->nfl_bpg_offset); + pps[30] = MSB(in->slice_bpg_offset); + pps[31] = LSB(in->slice_bpg_offset); + pps[32] = MSB(in->initial_offset); + pps[33] = LSB(in->initial_offset); + pps[34] = MSB(in->final_offset); + pps[35] = LSB(in->final_offset); + pps[36] = in->flatness_min_qp & 0x1F; + pps[37] = in->flatness_max_qp & 0x1F; + + pps[38] = MSB(in->rc_model_size); + pps[39] = LSB(in->rc_model_size); + pps[40] = in->rc_edge_factor & 0xF; + pps[41] = in->rc_quant_incr_limit0 & 0x1F; + pps[42] = in->rc_quant_incr_limit1 & 0x1F; + pps[43] = (in->rc_tgt_offset_hi << 4) | (in->rc_tgt_offset_lo & 0xF); + for (i = 0; i < NUM_BUF_RANGES - 1; i++) + pps[44 + i] = in->rc_buf_thresh[i] >> 6; + + for (i = 0; i < NUM_BUF_RANGES; i++) + { + NvU32 x = ((in->range_min_qp[i] & 0x1F) << 11) | + ((in->range_max_qp[i] & 0x1F) << 6) | + ((in->range_bpg_offset[i] & 0x3F)) ; + pps[58 + i * 2] = MSB(x); + pps[59 + i * 2] = LSB(x); + } + + pps[88] = (in->native_420 << 1) | (in->native_422); + pps[89] = in->second_line_bpg_offset & 0x1F; + pps[90] = MSB(in->nsl_bpg_offset); + pps[91] = LSB(in->nsl_bpg_offset); + pps[92] = MSB(in->second_line_offset_adj); + pps[93] = LSB(in->second_line_offset_adj); + pps[94] = 0; + pps[95] = 0; + + for (i = 0; i < 24; i++) + { + data[i] = ((pps[i * 4 + 0] << 0) | + (pps[i * 4 + 1] << 8) | + (pps[i * 4 + 2] << 16) | + (pps[i * 4 + 3] << 24)); + } + + for(; i < 32; i++) + data[i] = 0; +} + +/* + * @brief Extract DSC parameters from PPS data + * + * @param[in] pps PPS data array + * @param[out] out DSC output parameters + */ +static void +DSC_GenerateDataFromPPS(const NvU32 *pps, DSC_OUTPUT_PARAMS *out) +{ + NvU32 i; + out->dsc_version_major = pps[0] >> 4; + out->dsc_version_minor = pps[0] & 0xF; + out->pps_identifier = pps[1]; + out->bits_per_component = (pps[3] >> 4); + out->linebuf_depth = pps[3] & 0xF; + out->block_pred_enable = (pps[4] >> 5) & 0x1; + out->convert_rgb = (pps[4] >> 4) & 0x1; + out->simple_422 = (pps[4] >> 3) & 0x1; + out->vbr_enable = (pps[4] >> 2) & 0x1; + out->bits_per_pixel = (pps[4] & 0x3) << 8 | pps[5]; + out->pic_height = (pps[6] << 8) | pps[7]; + out->pic_width = (pps[8] << 8) | pps[9]; + out->slice_height = (pps[10] << 8) | pps[11]; + out->slice_width = (pps[12] << 8) | pps[13]; + out->chunk_size = (pps[14] << 8) | pps[15]; + out->initial_xmit_delay = (pps[16] << 8) | pps[17]; + out->initial_dec_delay = (pps[18] << 8) | pps[19]; + out->initial_scale_value = pps[21]; + out->scale_increment_interval = (pps[22] << 8) | pps[23]; + out->scale_decrement_interval = (pps[24] << 8) | pps[25]; + out->first_line_bpg_offset = pps[27]; + out->nfl_bpg_offset = (pps[28] << 8) | pps[29]; + out->slice_bpg_offset = (pps[30] << 8) | pps[31]; + out->initial_offset = (pps[32] << 8) | pps[33]; + out->final_offset = (pps[34] << 8) | pps[35]; + out->flatness_min_qp = pps[36]; + out->flatness_max_qp = pps[37]; + out->rc_model_size = (pps[38] << 8) | pps[39]; + out->rc_edge_factor = pps[40]; + out->rc_quant_incr_limit0 = pps[41]; + out->rc_quant_incr_limit1 = pps[42]; + out->rc_tgt_offset_hi = (pps[43] >> 4) & 0xF; + out->rc_tgt_offset_lo = pps[43] & 0xF; + + for (i = 0; i < NUM_BUF_RANGES - 1; i++) + out->rc_buf_thresh[i] = (pps[44 + i] << 6); + + for (i = 0; i < NUM_BUF_RANGES; i++) { + out->range_min_qp[i] = (pps[58 + i * 2] >> 3); + out->range_max_qp[i] = (pps[58 + i * 2] & 0x7) << 2 | (pps[59 + i * 2] >> 6); + out->range_bpg_offset[i] = pps[59 + i * 2] & 0x3F; + } + + out->native_420 = (pps[88] >> 1) & 0x1; + out->native_422 = pps[88] & 0x1; + out->second_line_bpg_offset = pps[89]; + out->nsl_bpg_offset = (pps[90] << 8) | pps[91]; + out->second_line_offset_adj = (pps[92] << 8) | pps[93]; +} + +/* + @brief Validate DSC PPS data + @param[in] PPS data + @return true if PPS data is valid, false otherwise +*/ +NVT_STATUS +DSC_ValidatePPSData(DSCPPSDATA *pPps) +{ + DSC_OUTPUT_PARAMS outParams; + DSC_OUTPUT_PARAMS *out = &outParams; + NvU32 i; + NvU32 pps[96]; + NvU32 slice_num; /* Added missing slice_num variable used in validation check */ + + if(pPps == NULL) + { + return NVT_STATUS_ERR; + } + + /* Extract params from PPS data */ + for(i = 0; i < 24; i++) + { + pps[i*4 + 0] = (pPps->pPps[i] >> 0) & 0xFF; + pps[i*4 + 1] = (pPps->pPps[i] >> 8) & 0xFF; + pps[i*4 + 2] = (pPps->pPps[i] >> 16) & 0xFF; + pps[i*4 + 3] = (pPps->pPps[i] >> 24) & 0xFF; + } + DSC_GenerateDataFromPPS(pps, out); + slice_num = out->pic_width / out->slice_width; + ENUM_CHECK("dsc_version_major", out->dsc_version_major, 1); + ENUM2_CHECK("dsc_version_minor", out->dsc_version_minor, 1, 2); + ENUM3_CHECK("bits_per_component", out->bits_per_component, 8, 10, 12); + RANGE_CHECK("linebuf_depth", out->linebuf_depth, DSC_DECODER_LINE_BUFFER_BIT_DEPTH_MIN, DSC_DECODER_LINE_BUFFER_BIT_DEPTH_MAX); + ENUM2_CHECK("block_pred_enable", out->block_pred_enable, 0, 1); + ENUM2_CHECK("convert_rgb", out->convert_rgb, 0, 1); + RANGE_CHECK("initial_offset", out->initial_offset, 0, out->rc_model_size); + RANGE_CHECK("initial_scale_value", out->initial_scale_value, 0, 63); + RANGE_CHECK("initial_xmit_delay", out->initial_xmit_delay, 0, 1023); + RANGE_CHECK("slice_height", out->slice_height, 8, out->pic_height); + RANGE_CHECK("first_line_bpg_offset", out->first_line_bpg_offset, 0, 31); + RANGE_CHECK("nfl_bpg_offset", out->nfl_bpg_offset, 0, 65535); + RANGE_CHECK("second_line_bpg_offset", out->second_line_bpg_offset, 0, 31); + RANGE_CHECK("nsl_bpg_offset", out->nsl_bpg_offset, 0, 65535); + RANGE_CHECK("slice_bpg_offset", out->slice_bpg_offset, 0, 65535); + RANGE_CHECK("initial_dec_delay", out->initial_dec_delay, 0, 65535); + RANGE_CHECK("final_offset", out->final_offset, 0, out->rc_model_size-1); + RANGE_CHECK("scale_increment_interval", out->scale_increment_interval, 0, 65535); + RANGE_CHECK("scale_decrement_interval", out->scale_decrement_interval, 1, 4095); + + if ((out->chunk_size + 3) / 4 * slice_num > out->pic_width) + { + // Error! bpp too high + return NVT_STATUS_ERR; + } + return NVT_STATUS_SUCCESS; +} + + + +/* + * @brief Generate slice count supported mask with given slice num. + * + * @param[in] slice_num slice num for which mask needs to be generated + * + * @returns out_slice_count_mask if successful + * 0 if not successful + */ +static NvU32 +DSC_SliceCountMaskforSliceNum (NvU32 slice_num) +{ + switch (slice_num) + { + case 1: + return DSC_DECODER_SLICES_PER_SINK_1; + case 2: + return DSC_DECODER_SLICES_PER_SINK_2; + case 4: + return DSC_DECODER_SLICES_PER_SINK_4; + case 6: + return DSC_DECODER_SLICES_PER_SINK_6; + case 8: + return DSC_DECODER_SLICES_PER_SINK_8; + case 10: + return DSC_DECODER_SLICES_PER_SINK_10; + case 12: + return DSC_DECODER_SLICES_PER_SINK_12; + case 16: + return DSC_DECODER_SLICES_PER_SINK_16; + case 20: + return DSC_DECODER_SLICES_PER_SINK_20; + case 24: + return DSC_DECODER_SLICES_PER_SINK_24; + default: + return DSC_DECODER_SLICES_PER_SINK_INVALID; + } +} + +/* + * @brief Convert peak throughput placeholders into numeric values. + * + * @param[in] peak_throughput_mode0 peak throughput sink cap placeholder. + * + * @returns peak_throughput_mps actual throughput in MegaPixels/second. + */ +static NvU32 +DSC_GetPeakThroughputMps(NvU32 peak_throughput) +{ + NvU32 peak_throughput_mps; + switch(peak_throughput) + { + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_340: + peak_throughput_mps = 340; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_400: + peak_throughput_mps = 400; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_450: + peak_throughput_mps = 450; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_500: + peak_throughput_mps = 500; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_550: + peak_throughput_mps = 550; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_600: + peak_throughput_mps = 600; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_650: + peak_throughput_mps = 650; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_700: + peak_throughput_mps = 700; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_750: + peak_throughput_mps = 750; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_800: + peak_throughput_mps = 800; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_850: + peak_throughput_mps = 850; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_900: + peak_throughput_mps = 900; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_950: + peak_throughput_mps = 950; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_1000: + peak_throughput_mps = 1000; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_170: + peak_throughput_mps = 170; + break; + // Custom one, defined for HDMI YUV422/YUV420 modes + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_680: + peak_throughput_mps = 680; + break; + default: + peak_throughput_mps = 0; + } + return peak_throughput_mps; +} + +/* + * @brief Get minimum slice count needed to support the mode. + * + * @param[in] picWidth active width of the mode. + * @param[in] pixelClkMhz pixel clock in Mhz of the mode. + * @param[in] maxSliceWidth Max slice with considering gpu and sink + * @param[in] peakThroughPutMps Max throughput supported by the sink dsc decoder. + * @param[in] maxSliceCount Max slice count considering gpu and sink + * @param[in] bInclusive maximum slice count should be included in mask or not + @param[in] commonSliceCountMask Slice count mask to be considered + @param[out] minSliceCount Minimum slice count to be used for the mode. + * + * @returns minimum slice count to be used for the mode. + */ +static NVT_STATUS +DSC_GetMinSliceCountForMode +( + NvU32 picWidth, + NvU32 pixelClkMhz, + NvU32 maxSliceWidth, + NvU32 peakThroughPutMps, + NvU32 maxSliceCount, + NvU32 commonSliceCountMask, + NvU32 *pMinSliceCount +) +{ + NvU32 minSliceCountLocal = 0U; + NvU32 minSliceCountPicWidth = (picWidth + maxSliceWidth - 1) / maxSliceWidth; + NvU32 minsliceCountThroughput = (pixelClkMhz + peakThroughPutMps - 1) / peakThroughPutMps; + minSliceCountLocal = MAX(minSliceCountPicWidth, minsliceCountThroughput); + if (maxSliceCount < minSliceCountLocal) + { + return NVT_STATUS_MIN_SLICE_COUNT_ERROR; + } + if ((DSC_SliceCountMaskforSliceNum(minSliceCountLocal) & commonSliceCountMask) == 0x0) + { + // + // It is possible that the mininum slice count calculated from pic width and + // pixel clock criteria is not a valid slice count supported by both GPU and + // sink. In those cases, we need to find next valid slice count for the + // combo. + // + NvU32 newMinSliceCount = 0U; + if (DSC_GetHigherSliceCount(commonSliceCountMask, minSliceCountLocal, &newMinSliceCount) != NVT_STATUS_SUCCESS) + { + return NVT_STATUS_MIN_SLICE_COUNT_ERROR; + } + minSliceCountLocal = newMinSliceCount; + } + *pMinSliceCount = minSliceCountLocal; + return NVT_STATUS_SUCCESS; +} + +/* + * @brief Get slice count mask upto max slice count. + * + * @param[in] max_slice_num max slice number to be considered while generating mask + * @param[in] bInclusive maximum slice number should be included in mask or not + * + * @returns slice count mask of all slice counts up to max slice count + */ +static NvU32 +DSC_GetSliceCountMask +( + NvU32 maxSliceNum, + NvBool bInclusive +) +{ + // Below are the valid slice counts according to DP2.0 spec. + NvU32 validSliceNum[] = {1U,2U,4U,6U,8U,10U,12U,16U,20U,24U}; + NvU32 sliceCountMask = 0U; + NvU32 sliceArrayCount; + NvU32 i; + + sliceArrayCount = sizeof(validSliceNum)/sizeof(NvU32); + + if (maxSliceNum == 0U) + return 0U; + + for(i = 0U; ((i < sliceArrayCount) && (validSliceNum[i] < maxSliceNum)); i++) + { + sliceCountMask |= DSC_SliceCountMaskforSliceNum(validSliceNum[i]); + } + + if (bInclusive && (i < sliceArrayCount)) + { + sliceCountMask |= DSC_SliceCountMaskforSliceNum(validSliceNum[i]); + } + + return sliceCountMask; +} + +/* + * @brief Get the next higher valid slice count. + * + * Note each bit position in the mask represents corresponding slice count as + * per validSliceNum. The function compares the bit position of the each set + * bits in the mask against the passed current slice count. If it finds a slice + * count that is more than the current slice count, that is returned as next + * higher slice count. + * + * @param[in] commonSliceCountMask Includes slice counts supported by both + * GPU and sink + * @param[in] currentSliceCount Current slice count + * @param[in] newSliceCount Higher slice count if one was found. + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NvU32 +DSC_GetHigherSliceCount +( + NvU32 commonSliceCountMask, + NvU32 currentSliceCount, + NvU32 *newSliceCount +) +{ + NvU32 i = 0U; + NvU32 sliceMask = commonSliceCountMask; + // + // Below are the valid slice counts according to DP2.0 spec. + // Refer DPCD 64h & 6Dh. Note validSliceNum[2] is kept 0 to + // indicate DPCD 64[2] which is kept reserved according to spec. + // + NvU32 validSliceNum[] = {1U,2U,0U,4U,6U,8U,10U,12U,16U,20U,24U}; + NvU32 sliceArrayCount; + + sliceArrayCount = sizeof(validSliceNum)/sizeof(NvU32); + + // + // We need to decode the slice count mask and find out if there is a slice + // count in the mask that is higher than the passed in currentSliceCount. + // + while (sliceMask != 0U && i < sliceArrayCount) + { + if (sliceMask & 0x1) + { + if (validSliceNum[i] > currentSliceCount) + { + *newSliceCount = validSliceNum[i]; + return NVT_STATUS_SUCCESS; + } + } + sliceMask = sliceMask >> 1; + i++; + } + + return NVT_STATUS_PPS_SLICE_COUNT_ERROR; +} + +/* + * @brief Function validates and calculates, if required, the slice parameters like + * slice_width, slice_num for the DSC mode requested. + * + * If slice width, slice num is not forced, fn calculates them by trying to minimize + * slice num used. + * + * If slice width/slice num is forced, it validates the forced parameter and calculates + * corresponding parameter and makes sure it can be supported. + * + * If both slice num and slice width are forced, it validates both. + * + * @param[in] pixel_clkMHz Pixel clock + * @param[in] dual_mode Specify if Dual Mode is enabled or not + * @param[in] max_slice_num max slice number supported by sink + * @param[in] max_slice_width max slice width supported by sink + * @param[in] slice_count_mask Mask of slice counts supported by sink + * @param[in] peak_throughput Peak throughput supported by DSC sink + * decoder in Mega Pixels Per Second + * @param[out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NvU32 +DSC_PpsCalcSliceParams +( + NvU32 pixel_clkMHz, + NvU32 dual_mode, + NvU32 max_slice_num, + NvU32 max_slice_width, + NvU32 slice_count_mask, + NvU32 peak_throughput, + DSC_OUTPUT_PARAMS *out +) +{ + NvU32 min_slice_num; + NvU32 slicew; + NvU32 peak_throughput_mps; + NvU32 common_slice_count_mask; + NvU32 gpu_slice_count_mask; + NVT_STATUS status; + + gpu_slice_count_mask = DSC_GetSliceCountMask(max_slice_num, NV_TRUE /*bInclusive*/); + + if (dual_mode) + { + // + // Dual mode will be set until Ada which supports upto 8 slices with 2 heads + // So minimum slice count to be used in this mode is 2 (1 slice on each head) + // Also slice count 6 is not supported until Ada. So we need to remove both + // slice counts from the mask. + // + gpu_slice_count_mask &= ~(DSC_SliceCountMaskforSliceNum(1) | + DSC_SliceCountMaskforSliceNum(6)); + } + + common_slice_count_mask = gpu_slice_count_mask & slice_count_mask; + + if (!common_slice_count_mask) + { + // DSC cannot be supported since no common supported slice count + return NVT_STATUS_DSC_SLICE_ERROR; + } + + peak_throughput_mps = DSC_GetPeakThroughputMps(peak_throughput); + if (!peak_throughput_mps) + { + // Peak throughput cannot be zero + return NVT_STATUS_INVALID_PEAK_THROUGHPUT; + } + + if (out->slice_num == 0 && out->slice_width == 0) + { + status = DSC_GetMinSliceCountForMode(out->pic_width, pixel_clkMHz, + max_slice_width, peak_throughput_mps, + max_slice_num, + common_slice_count_mask, + &min_slice_num); + if (status != NVT_STATUS_SUCCESS) + { + return status; + } + + out->slice_num = min_slice_num; + out->slice_width = (out->pic_width + out->slice_num - 1) / out->slice_num; + } + else if (out->slice_num == 0) + { + if (out->slice_width > max_slice_width) + { + // Error! Calculated slice width exceeds max Supported Slice Width + return NVT_STATUS_PPS_SLICE_WIDTH_ERROR; + } + + out->slice_num = (out->pic_width + out->slice_width - 1) / out->slice_width; + if (!(DSC_SliceCountMaskforSliceNum(out->slice_num) & common_slice_count_mask)) + { + // Slice count corresponding to requested slice_width is not supported + return NVT_STATUS_PPS_SLICE_COUNT_ERROR; + } + } + else if (out->slice_width == 0) + { + if (!(DSC_SliceCountMaskforSliceNum(out->slice_num) & common_slice_count_mask)) + { + // Slice count requested is not supported + return NVT_STATUS_PPS_SLICE_COUNT_ERROR; + } + + out->slice_width = (out->pic_width + out->slice_num - 1) / out->slice_num; + + if (out->native_420 || out->native_422) + { + out->slice_width = (out->slice_width+1)/2 * 2 ; + } + + if (out->slice_width > max_slice_width) + { + // Slice width corresponding to the requested slice count is not supported + return NVT_STATUS_PPS_SLICE_WIDTH_ERROR; + } + } + else + { + if (!(DSC_SliceCountMaskforSliceNum(out->slice_num) & common_slice_count_mask)) + { + // Requested slice count is not supported + return NVT_STATUS_PPS_SLICE_COUNT_ERROR; + } + + if (out->slice_width > max_slice_width) + { + // Requested slice width cannot be supported + return NVT_STATUS_PPS_SLICE_WIDTH_ERROR; + } + + if (out->slice_width != (out->pic_width + out->slice_num - 1) / out->slice_num) + { + // slice_width must equal CEIL(pic_width/slice_num) + return NVT_STATUS_PPS_SLICE_WIDTH_ERROR; + } + } + + if((pixel_clkMHz / out->slice_num) > peak_throughput_mps) + { + // Sink DSC decoder does not support minimum throughout required for this DSC config + return NVT_STATUS_ERR; + } + + if (max_slice_width < SINK_MAX_SLICE_WIDTH_DEFAULT) + { + // Sink has to support a max slice width of at least 2560 as per DP1.4 spec. Ignoring for now. + } + + if (out->slice_width < 32) + { + // slice_width must >= 32 + return NVT_STATUS_PPS_SLICE_WIDTH_ERROR; + } + + slicew = out->slice_width >> (out->native_420 || out->native_422); // /2 in 4:2:0 mode + out->groups_per_line = (slicew + PIXELS_PER_GROUP - 1) / PIXELS_PER_GROUP; + out->chunk_size = (slicew * out->bits_per_pixel + 8 * BPP_UNIT - 1) / (8 * BPP_UNIT); // Number of bytes per chunk + + // + // Below is not constraint of DSC module, this is RG limitation. + // check total data packet per line from DSC to RG won't larger than pic_width + // + if ((out->chunk_size + 3) / 4 * out->slice_num > out->pic_width) + { + // Error! bpp too high, RG will overflow, normally, this error is also caused by padding + // (pic_widthmulti_tile && in->eDP) + { + if (out->slice_height == 0U) + { + // Minimum area of slice should be 15000 as per VESA spec + out->slice_height = (NvU32)NV_CEIL(15000U,(out->slice_width)); + while (out->pic_height > out->slice_height) + { + if (out->pic_height % out->slice_height == 0U) + { + if (DSC_PpsCheckSliceHeight(out) == NVT_STATUS_SUCCESS) + { + return NVT_STATUS_SUCCESS; + } + else + { + out->slice_height++; + } + } + else + { + out->slice_height++; + } + + if (out->pic_height == out->slice_height) + { + if(DSC_PpsCheckSliceHeight(out) == NVT_STATUS_SUCCESS) + { + return NVT_STATUS_SUCCESS; + } + else + { + return NVT_STATUS_PPS_SLICE_HEIGHT_ERROR; + } + } + } + } + } + else + { + if(out->slice_height == 0) + { + NvU32 i; + for (i = 1 ; i <= 16; i++) + { + out->slice_height = out->pic_height / i; + if (out->pic_height != out->slice_height * i ) + continue; + + if (DSC_PpsCheckSliceHeight(out) == NVT_STATUS_SUCCESS) + return NVT_STATUS_SUCCESS; + } + // Error! can't find valid slice_height + return NVT_STATUS_PPS_SLICE_HEIGHT_ERROR; + } + } + + RANGE_CHECK("slice_height", out->slice_height, 8, out->pic_height); + + if (out->pic_height % out->slice_height != 0) + { + // Error! pic_height % slice_height must be 0 + return NVT_STATUS_PPS_SLICE_HEIGHT_ERROR; + } + + if(DSC_PpsCheckSliceHeight(out) != NVT_STATUS_SUCCESS) + { + // Error! slice_height not valid + return NVT_STATUS_PPS_SLICE_HEIGHT_ERROR; + } + return NVT_STATUS_SUCCESS; +} + +/* + * @brief Calculate DSC_OUTPUT_PARAMS from DSC_INPUT_PARAMS. + * + * @param[in] in DSC input parameter + * @param[out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NVT_STATUS +DSC_PpsCalc +( + const DSC_INPUT_PARAMS *in, + DSC_OUTPUT_PARAMS *out +) +{ + NVT_STATUS ret; + NvU32 peak_throughput = 0; + + ret = DSC_PpsCalcBase(in, out); + if (ret != NVT_STATUS_SUCCESS) + return ret; + + if (in->drop_mode) + { + // in drop mode, HW requires these params to simplify the design + out->bits_per_pixel = 16 * BPP_UNIT; + out->slice_num = 2; + } + + if (out->native_420 || out->native_422) + { + peak_throughput = in->peak_throughput_mode1; + } + else + { + peak_throughput = in->peak_throughput_mode0; + } + + ret = DSC_PpsCalcSliceParams(in->pixel_clkMHz, in->dual_mode, + in->max_slice_num, in->max_slice_width, in->slice_count_mask, + peak_throughput, out); + if (ret != NVT_STATUS_SUCCESS) return ret; + ret = DSC_PpsCalcRcInitValue(out); + if (ret != NVT_STATUS_SUCCESS) return ret; + ret = Dsc_PpsCalcHeight(in, out); + if (ret != NVT_STATUS_SUCCESS) return ret; + ret = DSC_PpsCalcRcParam(out); + return ret; +} + +/* + * @brief Calculate DSC_OUTPUT_PARAMS from DSC_INPUT_PARAMS internally, + * then pack pps parameters into 32bit data array. + * + * @param[in] in DSC input parameter + * @param[in] pPpsOut A preallocated work-area buffer for calculations + * @param[out] out DSC output parameter + * NvU32[32] to return the pps data. + * The data can be send to SetDscPpsData* methods directly. + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NVT_STATUS +DSC_PpsDataGen +( + const DSC_INPUT_PARAMS *in, + DSC_OUTPUT_PARAMS *pPpsOut, + NvU32 out[DSC_MAX_PPS_SIZE_DWORD] +) +{ + NVT_STATUS ret; + + NVMISC_MEMSET(pPpsOut, 0, sizeof(DSC_OUTPUT_PARAMS)); + ret = DSC_PpsCalc(in, pPpsOut); + if (ret != NVT_STATUS_SUCCESS) + { + goto done; + } + + DSC_PpsConstruct(pPpsOut, out); + + /* fall through */ +done: + + return ret; +} + +/* + * @brief Validate input parameter we got from caller of this function + * + * @param[in] pDscInfo Includes Sink and GPU DSC capabilities + * @param[in] pModesetInfo Modeset related information + * @param[in] pWARData Data required for providing WAR for issues + * @param[in] availableBandwidthBitsPerSecond Available bandwidth for video + * transmission(After FEC/Downspread overhead consideration) + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NVT_STATUS +_validateInput +( + const DSC_INFO *pDscInfo, + const MODESET_INFO *pModesetInfo, + const WAR_DATA *pWARData, + NvU64 availableBandwidthBitsPerSecond +) +{ + // Validate DSC Info + if (pDscInfo->sinkCaps.decoderColorFormatMask == 0U) + { + // ERROR - At least one of the color format decoding needs to be supported by Sink. + return NVT_STATUS_INVALID_PARAMETER; + } + + if (!ONEBITSET(pDscInfo->sinkCaps.bitsPerPixelPrecision)) + { + // ERROR - Only one of Bits Per Pixel Precision should be set + return NVT_STATUS_INVALID_PARAMETER; + } + + if ((pDscInfo->sinkCaps.bitsPerPixelPrecision != 1U) && + (pDscInfo->sinkCaps.bitsPerPixelPrecision != 2U) && + (pDscInfo->sinkCaps.bitsPerPixelPrecision != 4U) && + (pDscInfo->sinkCaps.bitsPerPixelPrecision != 8U) && + (pDscInfo->sinkCaps.bitsPerPixelPrecision != 16U)) + { + // ERROR - Bits Per Pixel Precision should be 1/16, 1/8, 1/4, 1/2 or 1 bpp. + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->sinkCaps.maxSliceWidth == 0U) + { + // ERROR - Invalid max slice width supported by sink. + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->sinkCaps.maxNumHztSlices == 0U) + { + // ERROR - Invalid max number of horizontal slices supported by sink. + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->sinkCaps.lineBufferBitDepth == 0U) + { + // ERROR - Invalid line buffer bit depth supported by sink. + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->sinkCaps.algorithmRevision.versionMinor == 0U) + { + // ERROR - Invalid DSC algorithm revision supported by sink. + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->gpuCaps.encoderColorFormatMask == 0U) + { + // ERROR - At least one of the color format encoding needs to be supported by GPU. + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->gpuCaps.lineBufferSize == 0U) + { + // ERROR - Invalid Line buffer size supported by GPU. + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->gpuCaps.maxNumHztSlices == 0U) + { + // ERROR - Invalid max number of horizontal slices supported by GPU. + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->gpuCaps.lineBufferBitDepth == 0U) + { + // ERROR - Invalid line buffer bit depth supported by GPU. + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->forcedDscParams.sliceCount > pDscInfo->sinkCaps.maxNumHztSlices) + { + // ERROR - Client can't specify forced slice count greater than what sink supports. + return NVT_STATUS_DSC_SLICE_ERROR; + } + + if ((pDscInfo->forcedDscParams.sliceCount / (pModesetInfo->bDualMode ? 2 : 1)) > pDscInfo->gpuCaps.maxNumHztSlices) + { + // ERROR - Client can't specify forced slice count greater than what GPU supports. + return NVT_STATUS_DSC_SLICE_ERROR; + } + + if (pDscInfo->forcedDscParams.sliceWidth > pDscInfo->sinkCaps.maxSliceWidth) + { + // ERROR - Client can't specify forced slice width greater than what sink supports. + return NVT_STATUS_DSC_SLICE_ERROR; + } + + if ((pDscInfo->forcedDscParams.sliceCount > 0U) && + (pDscInfo->forcedDscParams.sliceWidth != 0U)) + { + // ERROR - Client can't specify both forced slice count and slice width. + return NVT_STATUS_DSC_SLICE_ERROR; + } + + if ((pDscInfo->forcedDscParams.sliceCount != 0U) && + (pDscInfo->forcedDscParams.sliceCount != 1U) && + (pDscInfo->forcedDscParams.sliceCount != 2U) && + (pDscInfo->forcedDscParams.sliceCount != 4U) && + (pDscInfo->forcedDscParams.sliceCount != 8U) && + (pDscInfo->forcedDscParams.sliceCount != 10U) && + (pDscInfo->forcedDscParams.sliceCount != 12U) && + (pDscInfo->forcedDscParams.sliceCount != 16U) && + (pDscInfo->forcedDscParams.sliceCount != 20U) && + (pDscInfo->forcedDscParams.sliceCount != 24U)) + { + // ERROR - Forced Slice Count has to be 1/2/4/8/10/12/16/20/24. + return NVT_STATUS_DSC_SLICE_ERROR; + } + + if (pDscInfo->forcedDscParams.sliceWidth > pModesetInfo->activeWidth) + { + // ERROR - Forced Slice Width can't be more than Active Width. + return NVT_STATUS_DSC_SLICE_ERROR; + } + + if (pDscInfo->forcedDscParams.sliceHeight > pModesetInfo->activeHeight) + { + // ERROR - Forced Slice Height can't be more than Active Height. + return NVT_STATUS_DSC_SLICE_ERROR; + } + + if (pDscInfo->forcedDscParams.dscRevision.versionMinor > + pDscInfo->sinkCaps.algorithmRevision.versionMinor) + { + // ERROR - Forced DSC Algorithm Revision is greater than Sink Supported value. + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->forcedDscParams.dscRevision.versionMinor > 2U) + { + // ERROR - Forced DSC Algorithm Revision is greater than 1.2 + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pModesetInfo->pixelClockHz == 0U) + { + // ERROR - Invalid pixel Clock for mode. + return NVT_STATUS_INVALID_PARAMETER; + } + + if ((pDscInfo->branchCaps.overallThroughputMode0 != 0U) && + (pModesetInfo->pixelClockHz > pDscInfo->branchCaps.overallThroughputMode0 * MHZ_TO_HZ)) + { + // ERROR - Pixel clock cannot be greater than Branch DSC Overall Throughput Mode 0 + return NVT_STATUS_OVERALL_THROUGHPUT_ERROR; + } + + if (pModesetInfo->activeWidth == 0U) + { + // ERROR - Invalid active width for mode. + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->branchCaps.maxLineBufferWidth != 0U && + pModesetInfo->activeWidth > pDscInfo->branchCaps.maxLineBufferWidth) + { + // ERROR - Active width cannot be greater than DSC Decompressor max line buffer width + return NVT_STATUS_MAX_LINE_BUFFER_ERROR; + } + + if (pModesetInfo->activeHeight == 0U) + { + // ERROR - Invalid active height for mode. + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pModesetInfo->bitsPerComponent == 0U) + { + // ERROR - Invalid bits per component for mode. + return NVT_STATUS_INVALID_PARAMETER; + } + + if (availableBandwidthBitsPerSecond == 0U) + { + // ERROR - Invalid available bandwidth in Bits Per Second. + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pModesetInfo->colorFormat == NVT_COLOR_FORMAT_YCbCr422) + { + // + // For using YCbCr422 with DSC, either of the following has to be true + // 1> Sink supports Simple422 + // 2> GPU and Sink supports Native 422 + // + if ((!(pDscInfo->sinkCaps.decoderColorFormatMask & DSC_DECODER_COLOR_FORMAT_Y_CB_CR_SIMPLE_422)) && + (!((pDscInfo->gpuCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422) && + (pDscInfo->sinkCaps.decoderColorFormatMask & DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422)))) + { + // ERROR - Can't enable YCbCr422 with current GPU and Sink DSC config. + return NVT_STATUS_COLOR_FORMAT_NOT_SUPPORTED; + } + } + + if (pModesetInfo->colorFormat == NVT_COLOR_FORMAT_YCbCr420) + { + // + // For using YCbCr420 with DSC, GPU and Sink has to support Native 420 + // + if (!((pDscInfo->gpuCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420) && + (pDscInfo->sinkCaps.decoderColorFormatMask & DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420))) + { + // ERROR - Can't enable YCbCr420 with current GPU and Sink DSC config. + return NVT_STATUS_COLOR_FORMAT_NOT_SUPPORTED; + } + } + + if ((pDscInfo->sinkCaps.algorithmRevision.versionMajor == 1U) && + (pDscInfo->sinkCaps.algorithmRevision.versionMinor == 1U) && + (pModesetInfo->colorFormat == NVT_COLOR_FORMAT_YCbCr420)) + { + // WARNING: DSC v1.2 or higher is recommended for using YUV444 + // Current version is 1.1 + } + + if (pDscInfo->sinkCaps.maxBitsPerPixelX16 > 1024U) + { + // ERROR - Max bits per pixel can't be greater than 1024 + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->sinkCaps.decoderColorDepthMask) + { + switch (pModesetInfo->bitsPerComponent) + { + case 12: + if (!(pDscInfo->sinkCaps.decoderColorDepthMask & DSC_DECODER_COLOR_DEPTH_CAPS_12_BITS)) + { + // ERROR - Sink DSC Decoder does not support 12 bpc + return NVT_STATUS_INVALID_BPC; + } + break; + case 10: + if (!(pDscInfo->sinkCaps.decoderColorDepthMask & DSC_DECODER_COLOR_DEPTH_CAPS_10_BITS)) + { + // ERROR - Sink DSC Decoder does not support 10 bpc + return NVT_STATUS_INVALID_BPC; + } + break; + case 8: + if (!(pDscInfo->sinkCaps.decoderColorDepthMask & DSC_DECODER_COLOR_DEPTH_CAPS_8_BITS)) + { + // ERROR - Sink DSC Decoder does not support 8 bpc + return NVT_STATUS_INVALID_BPC; + } + break; + + default: + // ERROR - Invalid bits per component specified + return NVT_STATUS_INVALID_PARAMETER; + } + } + else + { + // WARNING - Decoder Color Depth Mask was not provided. Assuming that decoder supports all depths. + } + + // Validate WAR data + if (pWARData) + { + if ((pWARData->connectorType != DSC_DP) && (pWARData->connectorType != DSC_HDMI)) + { + // WARNING - Incorrect connector info sent with WAR data + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pWARData->connectorType == DSC_DP) + { + if (!IS_VALID_LANECOUNT(pWARData->dpData.laneCount)) + { + // ERROR - Incorrect DP Lane count info sent with WAR data + return NVT_STATUS_INVALID_PARAMETER; + } + + if (!IS_VALID_LINKBW(pWARData->dpData.linkRateHz / DP_LINK_BW_FREQ_MULTI_MBPS)) + { + // ERROR - Incorrect DP Link rate info sent with WAR data + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pWARData->dpData.hBlank > MAX_HBLANK_PIXELS) + { + // ERROR - Incorrect DP HBlank info sent with WAR data + return NVT_STATUS_INVALID_HBLANK; + } + + if ((pWARData->dpData.dpMode != DSC_DP_SST) && (pWARData->dpData.dpMode != DSC_DP_MST)) + { + // ERROR - Incorrect DP Stream mode sent with WAR data + return NVT_STATUS_INVALID_PARAMETER; + } + } + } + + return NVT_STATUS_SUCCESS; +} + +/* ------------------------ Public Functions ------------------------------- */ + +/* + * @brief Calculate PPS parameters and slice count mask based on passed down + * Sink, GPU capability and modeset info + * + * + * @param[in] pDscInfo Includes Sink and GPU DSC capabilities + * @param[in] pModesetInfo Modeset related information + * @param[in] pWARData Data required for providing WAR for issues + * @param[in] availableBandwidthBitsPerSecond Available bandwidth for video + * transmission(After FEC/Downspread overhead consideration) + * @param[out] pps Calculated PPS parameter. + * The data can be sent to SetDscPpsData* methods directly. + * @param[out] pBitsPerPixelX16 Bits per pixel multiplied by 16 + * @param[out] pSliceCountMask Mask of all slice counts supported by the mode. + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_DSC_SLICE_ERROR if no common slice count could be found; + * NVT_STATUS_INVALID_PEAK_THROUGHPUT if peak through put is invalid; + * NVT_STATUS_PPS_SLICE_COUNT_ERROR if there is no slice count possible for the mode. + * In case this returns failure consider that PPS is not possible. + */ +NVT_STATUS +DSC_GeneratePPSWithSliceCountMask +( + const DSC_INFO *pDscInfo, + const MODESET_INFO *pModesetInfo, + const WAR_DATA *pWARData, + NvU64 availableBandwidthBitsPerSecond, + NvU32 pps[DSC_MAX_PPS_SIZE_DWORD], + NvU32 *pBitsPerPixelX16, + NvU32 *pSliceCountMask +) +{ + NvU32 commonSliceCountMask; + NvU32 gpuSliceCountMask; + NvU32 rejectSliceCountMask; + NvU32 possibleSliceCountMask; + NvU32 validSliceCountMask = 0x0; + NvU32 peakThroughPutIndex = 0U; + NvU32 peakThroughPutMps = 0U; + NvU32 maxSliceCount; + NvU32 maxSliceWidth; + NvU32 minSliceCount; + NvU32 sliceArrayCount; + NvU32 i; + DSC_INFO localDscInfo; + NVT_STATUS status; + DSC_GENERATE_PPS_OPAQUE_WORKAREA scratchBuffer; + + // Below are the valid slice counts according to DP2.0 spec. + NvU32 validSliceNum[] = {1U,2U,4U,6U,8U,10U,12U,16U,20U,24U}; + + // if any slice parameters are forced, just return PPS. + if (pDscInfo->forcedDscParams.sliceWidth != 0U || + pDscInfo->forcedDscParams.sliceCount != 0U) + { + return DSC_GeneratePPS(pDscInfo, pModesetInfo, pWARData, + availableBandwidthBitsPerSecond, + &scratchBuffer, pps, pBitsPerPixelX16); + } + + sliceArrayCount = sizeof(validSliceNum)/sizeof(NvU32); + + // For 2Head1OR mode, slice count supported by GPU is always 8. + maxSliceCount = MIN(pDscInfo->sinkCaps.maxNumHztSlices, + pModesetInfo->bDualMode ? 8U : pDscInfo->gpuCaps.maxNumHztSlices); + + // lineBufferSize is reported in 1024 units by HW, so need to multiply by 1024 to get pixels. + maxSliceWidth = MIN(pDscInfo->sinkCaps.maxSliceWidth, pDscInfo->gpuCaps.lineBufferSize * 1024); + + gpuSliceCountMask = DSC_GetSliceCountMask(maxSliceCount, NV_TRUE /*bInclusive*/); + + if (pModesetInfo->bDualMode) + { + // For DSC_DUAL, slice counts 1 and 6 are invalid. + gpuSliceCountMask &= ~(0x11); + } + + commonSliceCountMask = gpuSliceCountMask & pDscInfo->sinkCaps.sliceCountSupportedMask; + + if (commonSliceCountMask == 0x0) + { + return NVT_STATUS_DSC_SLICE_ERROR; + } + + if ((pModesetInfo->colorFormat == NVT_COLOR_FORMAT_YCbCr422 && + ((pDscInfo->gpuCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422) && + (pDscInfo->sinkCaps.decoderColorFormatMask & DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422))) || + (pModesetInfo->colorFormat == NVT_COLOR_FORMAT_YCbCr420 && + ((pDscInfo->gpuCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420) && + (pDscInfo->sinkCaps.decoderColorFormatMask & DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420)))) + { + peakThroughPutIndex = pDscInfo->sinkCaps.peakThroughputMode1; + } + else + { + peakThroughPutIndex = pDscInfo->sinkCaps.peakThroughputMode0; + } + + peakThroughPutMps = DSC_GetPeakThroughputMps(peakThroughPutIndex); + if (peakThroughPutMps == 0U) + { + return NVT_STATUS_INVALID_PEAK_THROUGHPUT; + } + + status = DSC_GetMinSliceCountForMode(pModesetInfo->activeWidth, + (NvU32)(pModesetInfo->pixelClockHz / 1000000L), + maxSliceWidth, + peakThroughPutMps, + maxSliceCount, + commonSliceCountMask, + &minSliceCount); + if (status != NVT_STATUS_SUCCESS) + return status; + + // Find mask of slice counts which are less than min slice count + rejectSliceCountMask = DSC_GetSliceCountMask(minSliceCount, NV_FALSE /*bInclusive*/); + + // Now find mask of slice counts that can be supported by the mode + possibleSliceCountMask = commonSliceCountMask & (~rejectSliceCountMask); + + // + // If we have mask of all possible slice counts, loop to generate PPS with + // each of those slice counts forced. + // + if (possibleSliceCountMask) + { + NvU32 minSliceCountOut = 0; + localDscInfo = *pDscInfo; + + for(i = 0U ; i < sliceArrayCount; i++) + { + if (possibleSliceCountMask & DSC_SliceCountMaskforSliceNum(validSliceNum[i])) + { + // Use the forced bits per pixel, if any + NvU32 bitsPerPixelX16Local = *pBitsPerPixelX16; + localDscInfo.forcedDscParams.sliceCount = validSliceNum[i]; + status = DSC_GeneratePPS(&localDscInfo, pModesetInfo, pWARData, + availableBandwidthBitsPerSecond, &scratchBuffer, + NULL, &bitsPerPixelX16Local); + if (status == NVT_STATUS_SUCCESS) + { + // + // DPlib and PPSlib follows DP spec to set slice count indices + // in slice count mask. This mapping of index to slice count + // is not 1:1. For eg. slice count 8 corresponds to bit + // index 5 as per spec. PPSLib clients are spec agnostic + // and prefer indices to indicate corresponding slice count. + // For eg. slice count = 8 should be set at bit index 7. + // So while passing the mask back to clients, here we set + // corresponding bit index. + // + validSliceCountMask |= NVBIT32((validSliceNum[i]) - 1U); + if ((minSliceCountOut == 0) || (minSliceCountOut > validSliceNum[i])) + { + minSliceCountOut = validSliceNum[i]; + } + } + } + } + + if (minSliceCountOut != 0) + { + // + // We need to return PPS with minimum slice count if client + // has not forced any slice count even though we generate + // pps with all other possible slice counts to validate them. + // + localDscInfo.forcedDscParams.sliceCount = minSliceCountOut; + status = DSC_GeneratePPS(&localDscInfo, pModesetInfo, pWARData, + availableBandwidthBitsPerSecond, &scratchBuffer, + pps, pBitsPerPixelX16); + if (status != NVT_STATUS_SUCCESS) + { + return status; + } + } + } + else + { + return NVT_STATUS_PPS_SLICE_COUNT_ERROR; + } + + if (validSliceCountMask == 0U) + { + // Reason for failure with hightest possible slice count will be returned. + return status; + } + + *pSliceCountMask = validSliceCountMask; + + return NVT_STATUS_SUCCESS; +} + +/* + * @brief Calculate PPS parameters based on passed down Sink, + * GPU capability and modeset info + * + * @param[in] pDscInfo Includes Sink and GPU DSC capabilities + * @param[in] pModesetInfo Modeset related information + * @param[in] pWARData Data required for providing WAR for issues + * @param[in] availableBandwidthBitsPerSecond Available bandwidth for video + * transmission(After FEC/Downspread overhead consideration) + * @param[in] pOpaqueWorkarea Scratch buffer of sufficient size pre-allocated + by client for DSC PPS calculations internal use + * @param[out] pps Calculated PPS parameter. + * The data can be send to SetDscPpsData* methods directly. + * @param[out] pBitsPerPixelX16 Bits per pixel multiplied by 16 + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + * In case this returns failure consider that PPS is not possible. + */ +NVT_STATUS +DSC_GeneratePPS +( + const DSC_INFO *pDscInfo, + const MODESET_INFO *pModesetInfo, + const WAR_DATA *pWARData, + NvU64 availableBandwidthBitsPerSecond, + DSC_GENERATE_PPS_OPAQUE_WORKAREA *pOpaqueWorkarea, + NvU32 pps[DSC_MAX_PPS_SIZE_DWORD], + NvU32 *pBitsPerPixelX16 +) +{ + DSC_INPUT_PARAMS *in = NULL; + DSC_OUTPUT_PARAMS *out = NULL; + DSC_GENERATE_PPS_WORKAREA *pWorkarea = NULL; + NVT_STATUS ret = NVT_STATUS_ERR; + + if ((!pDscInfo) || (!pModesetInfo) || (!pOpaqueWorkarea) || (!pBitsPerPixelX16)) + { + ret = NVT_STATUS_INVALID_PARAMETER; + goto done; + } + + pWorkarea = (DSC_GENERATE_PPS_WORKAREA*)(pOpaqueWorkarea); + in = &pWorkarea->in; + out = &pWorkarea->out; + + ret = _validateInput(pDscInfo, pModesetInfo, pWARData, availableBandwidthBitsPerSecond); + if (ret != NVT_STATUS_SUCCESS) + { + goto done; + } + + NVMISC_MEMSET(in, 0, sizeof(DSC_INPUT_PARAMS)); + + in->bits_per_component = pModesetInfo->bitsPerComponent; + in->linebuf_depth = MIN((pDscInfo->sinkCaps.lineBufferBitDepth), (pDscInfo->gpuCaps.lineBufferBitDepth)); + in->block_pred_enable = pDscInfo->sinkCaps.bBlockPrediction; + in->multi_tile = (pDscInfo->gpuCaps.maxNumHztSlices > 4U) ? 1 : 0; + in->dsc_version_minor = pDscInfo->forcedDscParams.dscRevision.versionMinor ? pDscInfo->forcedDscParams.dscRevision.versionMinor : + pDscInfo->sinkCaps.algorithmRevision.versionMinor; + in->pic_width = pModesetInfo->activeWidth; + in->pic_height = pModesetInfo->activeHeight; + in->slice_height = pDscInfo->forcedDscParams.sliceHeight; + in->slice_width = pDscInfo->forcedDscParams.sliceWidth; + in->slice_num = pDscInfo->forcedDscParams.sliceCount; + in->max_slice_num = MIN(pDscInfo->sinkCaps.maxNumHztSlices, + pModesetInfo->bDualMode ? pDscInfo->gpuCaps.maxNumHztSlices * 2 : pDscInfo->gpuCaps.maxNumHztSlices); + // lineBufferSize is reported in 1024 units by HW, so need to multiply by 1024 to get pixels. + in->max_slice_width = MIN(pDscInfo->sinkCaps.maxSliceWidth, pDscInfo->gpuCaps.lineBufferSize * 1024); + in->pixel_clkMHz = (NvU32)(pModesetInfo->pixelClockHz / 1000000L); + in->dual_mode = pModesetInfo->bDualMode; + in->drop_mode = pModesetInfo->bDropMode; + in->slice_count_mask = pDscInfo->sinkCaps.sliceCountSupportedMask; + in->peak_throughput_mode0 = pDscInfo->sinkCaps.peakThroughputMode0; + in->peak_throughput_mode1 = pDscInfo->sinkCaps.peakThroughputMode1; + + switch (pModesetInfo->colorFormat) + { + case NVT_COLOR_FORMAT_RGB: + in->convert_rgb = 1; + break; + + case NVT_COLOR_FORMAT_YCbCr444: + in->convert_rgb = 0; + break; + case NVT_COLOR_FORMAT_YCbCr422: + in->convert_rgb = 0; + + if ((pDscInfo->gpuCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422) && + (pDscInfo->sinkCaps.decoderColorFormatMask & DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422)) + { + in->native_422 = 1; + } + else if (pDscInfo->sinkCaps.decoderColorFormatMask & DSC_DECODER_COLOR_FORMAT_Y_CB_CR_SIMPLE_422) + { + in->simple_422 = 1; + } + else + { + // ERROR - YCbCr422 is not possible with current config. + ret = NVT_STATUS_COLOR_FORMAT_NOT_SUPPORTED; + goto done; + } + break; + case NVT_COLOR_FORMAT_YCbCr420: + in->convert_rgb = 0; + + if ((pDscInfo->gpuCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420) && + (pDscInfo->sinkCaps.decoderColorFormatMask & DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420)) + { + in->native_420 = 1; + } + else + { + // ERROR - YCbCr420 is not possible with current config. + ret = NVT_STATUS_COLOR_FORMAT_NOT_SUPPORTED; + goto done; + } + break; + + default: + // ERROR - Invalid color Format specified. + ret = NVT_STATUS_COLOR_FORMAT_NOT_SUPPORTED; + goto done; + } + + // calculate max possible bits per pixel allowed by the available bandwidth + in->bits_per_pixel = (NvU32)((availableBandwidthBitsPerSecond * BPP_UNIT) / pModesetInfo->pixelClockHz); + + if (pWARData && (pWARData->connectorType == DSC_DP)) + { + + if(!in->multi_tile || + (in->multi_tile && pWARData->dpData.dpMode == DSC_DP_SST + )) + { + // + // In DP case, being too close to the available bandwidth caused HW to hang. + // 2 is subtracted based on issues seen in DP CTS testing. Refer to bug 200406501, comment 76 + // This limitation is only on DP, not needed for HDMI DSC HW + // + in->bits_per_pixel = (NvU32)((availableBandwidthBitsPerSecond * BPP_UNIT) / pModesetInfo->pixelClockHz) - (BPP_UNIT/8); + + if (pWARData->dpData.laneCount == 1U) + { + // + // SOR lane fifo might get overflown when DP 1 lane, FEC enabled and pclk*bpp > 96%*linkclk*8 i.e. + // DSC stream is consuming more than 96% of the total bandwidth. Use lower bits per pixel. Refer Bug 200561864. + // + in->bits_per_pixel = (NvU32)((96U * availableBandwidthBitsPerSecond * BPP_UNIT) / (100U * pModesetInfo->pixelClockHz)) - + (BPP_UNIT / 8U); + } + } + + if ((pWARData->dpData.dpMode == DSC_DP_SST) && (pWARData->dpData.hBlank < 100U)) + { + // + // For short HBlank timing, using bits per pixel value which may have to add DSC padding for each chunk + // may not be possible so use bits per pixel value which won't require DSC padding. Bug 200628516 + // + + NvU32 protocolOverhead; + NvU32 dscOverhead; + NvU32 minSliceCount = (NvU32)NV_CEIL(pModesetInfo->pixelClockHz, (MAX_PCLK_PER_SLICE_KHZ * 1000U)); + NvU32 sliceWidth; + NvU32 i; + NvU64 dataRate; + + if ((minSliceCount > 2U) &&(minSliceCount < 4U)) + { + minSliceCount = 4U; + } + else if (minSliceCount > 4U) + { + minSliceCount = 8U; + } + + sliceWidth = (NvU32)NV_CEIL(pModesetInfo->activeWidth, minSliceCount); + + if (pWARData->dpData.laneCount == 1U) + { + protocolOverhead = 42U; + } + else if (pWARData->dpData.laneCount == 2U) + { + protocolOverhead = 24U; + } + else + { + protocolOverhead = 21U; + } + + dscOverhead = minSliceCount * 2U; + + dataRate = pWARData->dpData.linkRateHz; + if ((pWARData->dpData.hBlank * dataRate / pModesetInfo->pixelClockHz) < + (protocolOverhead + dscOverhead + 3U)) + { + // + // For very short HBlank timing, find out bits per pixel value which will not require additional + // DSC padding. 128 will be used as the lowest bits per pixel value. + // + for (i = in->bits_per_pixel; i >= MIN_BITS_PER_PIXEL * BPP_UNIT; i--) + { + if (((i * sliceWidth) % ( 8U * minSliceCount * pWARData->dpData.laneCount * 16U)) == 0U) + { + break; + } + } + in->bits_per_pixel = i; + } + } + in->eDP = (pWARData->dpData.bIsEdp == NV_TRUE) ? 1 : 0; + } + + // + // bits per pixel upper limit is minimum of 3 times bits per component or 32 + // + if (in->bits_per_pixel > MIN((3 * in->bits_per_component * BPP_UNIT), (MAX_BITS_PER_PIXEL * BPP_UNIT))) + { + in->bits_per_pixel = MIN((3 * in->bits_per_component * BPP_UNIT), (MAX_BITS_PER_PIXEL * BPP_UNIT)); + } + + in->bits_per_pixel = DSC_AlignDownForBppPrecision(in->bits_per_pixel, pDscInfo->sinkCaps.bitsPerPixelPrecision); + + // If user specified bits_per_pixel value to be used check if it is valid one + if (*pBitsPerPixelX16 != 0) + { + *pBitsPerPixelX16 = DSC_AlignDownForBppPrecision(*pBitsPerPixelX16, pDscInfo->sinkCaps.bitsPerPixelPrecision); + + // + // The calculation of in->bits_per_pixel here in PPSlib, which is the maximum bpp that is allowed by available bandwidth, + // which is applicable to DP alone and not to HDMI FRL. + // Before calling PPS lib to generate PPS data, HDMI library has done calculation according to HDMI2.1 spec + // to determine if FRL rate is sufficient for the requested bpp. So restricting the condition to DP alone. + // + if ((pWARData && (pWARData->connectorType == DSC_DP)) && + (*pBitsPerPixelX16 > in->bits_per_pixel)) + { + // ERROR - Invalid bits per pixel value specified. + ret = NVT_STATUS_INVALID_BPP; + goto done; + } + else + { + in->bits_per_pixel = *pBitsPerPixelX16; + } + + // + // For DSC Dual Mode or Multi-tile configs (NVD 5.0 and later), + // because of architectural limitation we can't use bits_per_pixel + // more than 16. + // + if ((pModesetInfo->bDualMode || + (in->multi_tile && (!pWARData || (pWARData && !pWARData->dpData.bDisableDscMaxBppLimit)))) + && (in->bits_per_pixel > 256 /*bits_per_pixel = 16*/)) + { + ret = NVT_STATUS_INVALID_BPP; + goto done; + } + + if ((pDscInfo->sinkCaps.maxBitsPerPixelX16 != 0) && (*pBitsPerPixelX16 > pDscInfo->sinkCaps.maxBitsPerPixelX16)) + { + // ERROR - bits per pixel value specified by user is greater than what DSC decompressor can support. + ret = NVT_STATUS_INVALID_BPP; + goto done; + } + } + else + { + // + // For DSC Dual Mode or for multi-tile configs (NVD 5.0 and later), + // because of architectural limitation we can't use bits_per_pixel more + // than 16. So forcing it to 16. + // + if ((pModesetInfo->bDualMode || + (in->multi_tile && (!pWARData || (pWARData && !pWARData->dpData.bDisableDscMaxBppLimit)))) + && (in->bits_per_pixel > 256 /*bits_per_pixel = 16*/)) + { + // ERROR - DSC Dual Mode, because of architectural limitation we can't use bits_per_pixel more than 16. + // ERROR - Forcing it to 16. + in->bits_per_pixel = 256; + } + + // If calculated bits_per_pixel is 126 or 127, we need to use 128 value. Bug 2686078 + if ((in->bits_per_pixel == 126) || (in->bits_per_pixel == 127)) + { + // WARNING: bits_per_pixel is forced to 128 because calculated value was 126 or 127 + in->bits_per_pixel = 128; + } + + if ((pDscInfo->sinkCaps.maxBitsPerPixelX16 != 0) && (in->bits_per_pixel > pDscInfo->sinkCaps.maxBitsPerPixelX16)) + { + // WARNING - Optimal bits per pixel value calculated is greater than what DSC decompressor can support. Forcing it to max that decompressor can support + in->bits_per_pixel = pDscInfo->sinkCaps.maxBitsPerPixelX16; + } + + } + + if (pModesetInfo->bDualMode && (pDscInfo->gpuCaps.maxNumHztSlices > 4U)) + { + // ERROR - Dual Mode should not be set when GPU can support more than 4 slices per head. + ret = NVT_STATUS_INVALID_PARAMETER; + goto done; + } + + if (in->native_422) + { + // bits_per_pixel in PPS is defined as 5 fractional bits in native422 mode + in->bits_per_pixel *= 2; + + if (in->dsc_version_minor == 1) + { + // Error! DSC1.1 can't support native422! + ret = NVT_STATUS_COLOR_FORMAT_NOT_SUPPORTED; + goto done; + } + //the bpp in native 422 mode is doubled. + if((((NvS32)(in->bits_per_pixel)) < (NvS32)(2*7*BPP_UNIT)) || + (((NvS32)(in->bits_per_pixel)) > (NvS32)(2*2*(in->bits_per_component)*BPP_UNIT-1))) + { + // ERROR - bits_per_pixelx16 outside valid range + ret = NVT_STATUS_INVALID_BPP; + goto done; + } + } + else + { + if ((((NvS32)(in->bits_per_pixel)) < (NvS32)(8*BPP_UNIT)) || + (((NvS32)(in->bits_per_pixel)) > (NvS32)(32*BPP_UNIT))) + { + // ERROR - bits_per_pixelx16 outside valid range + ret = NVT_STATUS_INVALID_BPP; + goto done; + } + } + + ret = DSC_PpsDataGen(in, out, pps); + + *pBitsPerPixelX16 = in->bits_per_pixel; + /* fall through */ +done: + return ret; +} diff --git a/src/common/modeset/timing/nvt_dsc_pps.h b/src/common/modeset/timing/nvt_dsc_pps.h new file mode 100644 index 0000000..43db275 --- /dev/null +++ b/src/common/modeset/timing/nvt_dsc_pps.h @@ -0,0 +1,353 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* +=============================================================================== + + dsc_pps.h + + Provide definition needed for DSC(Display Stream Compression) PPS(Picture Parameter Set) + +================================================================================ +*/ + +#ifndef __DSCPPS_H__ +#define __DSCPPS_H__ + +/* ------------------------ Includes --------------------------------------- */ +#include "nvtypes.h" +#include "nvtiming.h" + +/* ------------------------ Macros ----------------------------------------- */ +#define DSC_MAX_PPS_SIZE_DWORD 32 + +/* ------------------------ Datatypes -------------------------------------- */ +typedef struct +{ + NvU32 versionMajor; + NvU32 versionMinor; +} DSC_ALGORITHM_REV; + +typedef struct +{ + NvU64 pixelClockHz; // Requested pixel clock for the mode + NvU32 activeWidth; // Active Width + NvU32 activeHeight; // Active Height + NvU32 bitsPerComponent; // BPC value to be used + NVT_COLOR_FORMAT colorFormat; // Color format to be used for this modeset + + // + // Whether to enable Dual mode for DSC. + // Dual mode specifies that 2 heads would be generating + // pixels for complete stream. + // + NvBool bDualMode; + + // + // Whether to enable DROP mode for DSC. + // DROP mode specifies that instead of compressing the pixels, OR will drop + // the pixels of the right half frame to reduce the data rate by half. + // This mode is added for testing 2head1OR solution without a DSC panel + // + NvBool bDropMode; +} MODESET_INFO; + +typedef struct +{ + struct SINK_DSC_CAPS + { + // Mask of all color formats for which decoding supported by panel + NvU32 decoderColorFormatMask; +#define DSC_DECODER_COLOR_FORMAT_RGB (0x00000001) +#define DSC_DECODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002) +#define DSC_DECODER_COLOR_FORMAT_Y_CB_CR_SIMPLE_422 (0x00000004) +#define DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000008) +#define DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000010) + + // e.g. 1/16, 1/8, 1/4, 1/2, 1bpp + NvU32 bitsPerPixelPrecision; +#define DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001) +#define DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002) +#define DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000004) +#define DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000008) +#define DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000010) + + // Maximum slice width supported by panel + NvU32 maxSliceWidth; + + // Maximum number of horizontal slices supported + NvU32 maxNumHztSlices; + + // Slice counts supported by the sink + NvU32 sliceCountSupportedMask; +#define DSC_DECODER_SLICES_PER_SINK_INVALID (0x00000000) +#define DSC_DECODER_SLICES_PER_SINK_1 (0x00000001) +#define DSC_DECODER_SLICES_PER_SINK_2 (0x00000002) +#define DSC_DECODER_SLICES_PER_SINK_4 (0x00000008) +#define DSC_DECODER_SLICES_PER_SINK_6 (0x00000010) +#define DSC_DECODER_SLICES_PER_SINK_8 (0x00000020) +#define DSC_DECODER_SLICES_PER_SINK_10 (0x00000040) +#define DSC_DECODER_SLICES_PER_SINK_12 (0x00000080) +#define DSC_DECODER_SLICES_PER_SINK_16 (0x00000100) +#define DSC_DECODER_SLICES_PER_SINK_20 (0x00000200) +#define DSC_DECODER_SLICES_PER_SINK_24 (0x00000400) + + // + // Bit depth used by the Sink device to store the + // reconstructed pixels within the line buffer + // + NvU32 lineBufferBitDepth; +#define DSC_DECODER_LINE_BUFFER_BIT_DEPTH_MIN (0x00000008) +#define DSC_DECODER_LINE_BUFFER_BIT_DEPTH_MAX (0x0000000D) + + NvU32 decoderColorDepthCaps; // Color depth supported by DSC decoder of panel +#define DSC_DECODER_COLOR_DEPTH_CAPS_8_BITS (0x00000001) +#define DSC_DECODER_COLOR_DEPTH_CAPS_10_BITS (0x00000002) +#define DSC_DECODER_COLOR_DEPTH_CAPS_12_BITS (0x00000004) +#define DSC_DECODER_COLOR_DEPTH_CAPS_16_BITS (0x00000008) + + NvU32 decoderColorDepthMask; + + DSC_ALGORITHM_REV algorithmRevision; // DSC algorithm revision that sink supports + + NvBool bBlockPrediction; // Whether block prediction is supported or not. + + // Peak throughput supported for 444 and simple 422 modes + NvU32 peakThroughputMode0; +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_INVALID (0x00000000) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_340 (0x00000001) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_400 (0x00000002) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_450 (0x00000003) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_500 (0x00000004) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_550 (0x00000005) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_600 (0x00000006) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_650 (0x00000007) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_700 (0x00000008) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_750 (0x00000009) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_800 (0x0000000A) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_850 (0x0000000B) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_900 (0x0000000C) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_950 (0x0000000D) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_1000 (0x0000000E) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_170 (0x0000000F) +// Custom definition of peak throughput for HDMI YUV422/YUV420 modes since those are not defined in spec +// Starting with 0x100 to provide headroom for DSC spec definitions in future +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_680 (0x00000100) + + // Peak throughput supported for native 422 and 420 modes + NvU32 peakThroughputMode1; +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_INVALID (0x00000000) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_340 (0x00000001) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_400 (0x00000002) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_450 (0x00000003) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_500 (0x00000004) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_550 (0x00000005) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_600 (0x00000006) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_650 (0x00000007) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_700 (0x00000008) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_750 (0x00000009) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_800 (0x0000000A) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_850 (0x0000000B) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_900 (0x0000000C) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_950 (0x0000000D) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_1000 (0x0000000E) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_170 (0x0000000F) +// Custom definition of peak throughput for HDMI YUV modes since those are not defined in spec +// Starting with 0x100 to provide headroom for DSC spec definitions in future +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_680 (0x00000100) + + // Maximum bits_per_pixel supported by the DSC decompressor multiplied by 16 + NvU32 maxBitsPerPixelX16; + }sinkCaps; + + struct BRANCH_DSC_CAPS + { + NvU32 overallThroughputMode0; + NvU32 overallThroughputMode1; + NvU32 maxLineBufferWidth; + }branchCaps; + + struct GPU_DSC_CAPS + { + // Mask of all color formats for which encoding supported by GPU + NvU32 encoderColorFormatMask; +#define DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001) +#define DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002) +#define DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004) +#define DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008) + + // + // Size of line buffer inside DSC. Should be in number of pixels. + // this should be greater than or equal to active width + // + NvU32 lineBufferSize; + + // e.g. 1/16, 1/8, 1/4, 1/2, 1bpp + NvU32 bitsPerPixelPrecision; + + // Maximum number of horizontal slices supported + NvU32 maxNumHztSlices; + + // + // Bit depth used by the GPU to store the + // reconstructed pixels within the line buffer + // + NvU32 lineBufferBitDepth; + }gpuCaps; + + struct FORCED_DSC_PARAMS + { + // Forced Slice count + NvU32 sliceCount; + + // Forced Slice width + NvU32 sliceWidth; + + // Forced Slice height + NvU32 sliceHeight; + + // Forced DSC Algorithm Revision + DSC_ALGORITHM_REV dscRevision; + }forcedDscParams; +} DSC_INFO; + +typedef struct +{ + NvU32 manufacturerID; + NvU32 productID; + NvU32 yearWeek; +} EDID_INFO; + +typedef enum +{ + DSC_DP, + DSC_HDMI +} DSC_CONNECTOR_TYPE; + +typedef enum +{ + DSC_DP_SST, + DSC_DP_MST +} DSC_DP_MODE; + +typedef struct +{ + DSC_CONNECTOR_TYPE connectorType; + struct DP_DATA + { + NvU64 linkRateHz; + NvU32 laneCount; + DSC_DP_MODE dpMode; + NvU32 hBlank; + NvBool bIsEdp; + NvBool bDisableDscMaxBppLimit; + }dpData; +} WAR_DATA; + +typedef struct { + NvU8 data[500U]; // total size of DSC_IN/OUTPUT_PARAMS +} DSC_GENERATE_PPS_OPAQUE_WORKAREA; + +typedef struct +{ + NvU32 pPps[DSC_MAX_PPS_SIZE_DWORD]; // Out - PPS SDP data +} DSCPPSDATA; + +/* + * Windows testbed compiles are done with warnings as errors + * with the maximum warning level. Here we turn off some + * of the problematic warnings. + */ + +/* ------------------------ Global Variables ------------------------------- */ +/* ------------------------ Static Variables ------------------------------- */ +/* ------------------------ Private Functions ------------------------------ */ +/* ------------------------ Public Functions ------------------------------- */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * @brief Calculate PPS parameters based on passed down Sink, + * GPU capability and modeset info + * + * @param[in] pDscInfo Includes Sink and GPU DSC capabilities + * @param[in] pModesetInfo Modeset related information + * @param[in] pWARData Data required for providing WAR for issues + * @param[in] availableBandwidthBitsPerSecond Available bandwidth for video + * transmission(After FEC/Downspread overhead consideration) + * @param[in] pOpaqueWorkarea Scratch buffer of sufficient size pre-allocated + by client for DSC PPS calculations use + * @param[out] pps Calculated PPS parameter. + * The data can be send to SetDscPpsData* methods directly. + * @param[out] pBitsPerPixelX16 Bits per pixel multiplied by 16 + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +NVT_STATUS DSC_GeneratePPS(const DSC_INFO *pDscInfo, + const MODESET_INFO *pModesetInfo, + const WAR_DATA *pWARData, + NvU64 availableBandwidthBitsPerSecond, + DSC_GENERATE_PPS_OPAQUE_WORKAREA *pOpaqueWorkarea, + NvU32 pps[DSC_MAX_PPS_SIZE_DWORD], + NvU32 *pBitsPerPixelX16); + +/* + * @brief Calculate PPS parameters and slice count mask based on passed down + * Sink, GPU capability and modeset info + * + * + * @param[in] pDscInfo Includes Sink and GPU DSC capabilities + * @param[in] pModesetInfo Modeset related information + * @param[in] pWARData Data required for providing WAR for issues + * @param[in] availableBandwidthBitsPerSecond Available bandwidth for video + * transmission(After FEC/Downspread overhead consideration) + * @param[out] pps Calculated PPS parameter. + * The data can be send to SetDscPpsData* methods directly. + * @param[out] pBitsPerPixelX16 Bits per pixel multiplied by 16 + * @param[out] pSliceCountMask Mask of all slice counts supported by the mode. + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + * In case this returns failure consider that PPS is not possible. + */ +NVT_STATUS +DSC_GeneratePPSWithSliceCountMask +( + const DSC_INFO *pDscInfo, + const MODESET_INFO *pModesetInfo, + const WAR_DATA *pWARData, + NvU64 availableBandwidthBitsPerSecond, + NvU32 pps[DSC_MAX_PPS_SIZE_DWORD], + NvU32 *pBitsPerPixelX16, + NvU32 *sliceCountMask +); + +NVT_STATUS DSC_ValidatePPSData(DSCPPSDATA *pPps); + +#ifdef __cplusplus +} +#endif +#endif // __DSCPPS_H__ diff --git a/src/common/modeset/timing/nvt_edid.c b/src/common/modeset/timing/nvt_edid.c new file mode 100644 index 0000000..1adf60e --- /dev/null +++ b/src/common/modeset/timing/nvt_edid.c @@ -0,0 +1,3214 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_edid.c +// +// Purpose: the provide edid related services +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "edid.h" + + + +PUSH_SEGMENTS + +// Macro to declare a TIMING initializer for given parameters without border +#define EST_TIMING(hv,hfp,hsw,ht,hsp,vv,vfp,vsw,vt,vsp,rr,pclk,format) \ +{hv,0,hfp,hsw,ht,(hsp)=='-',vv,0,vfp,vsw,vt,(vsp)=='-',NVT_PROGRESSIVE,pclk,((pclk<<3)+(pclk<<1)),{0,rr,set_rrx1k(pclk,ht,vt),0,1,{0},{0},{0},{0},format,"VESA Established"}} + +DATA_SEGMENT(PAGE_DATA) +#if !defined(NV_WSA) +CONS_SEGMENT(PAGE_CONS) +#endif // wsa + +// There is a large table of strings that translate 3-character PNP vendor IDs to a more user-friendly name in the following header. +// Mark this constant table as pageable. +#include "nvPNPVendorIds.h" + +static const NVT_TIMING EDID_EST[] = +{ + EST_TIMING( 720, 0, 0, 720,'-', 400, 0,0, 400,'-',70, 0,NVT_STATUS_EDID_EST), // 720x400x70Hz (IBM, VGA) + EST_TIMING( 720, 0, 0, 720,'-', 400, 0,0, 400,'-',88, 0,NVT_STATUS_EDID_EST), // 720x400x88Hz (IBM, XGA2) + {640,0,16,96,800,NVT_H_SYNC_NEGATIVE,480,0,10,2,525,NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,2518,25175,{0,60,60000,0,1,{0},{0},{0},{0},NVT_STATUS_EDID_EST,"EDID_Established"}}, + + EST_TIMING( 640, 0, 0, 640,'-', 480, 0,0, 480,'-',67, 0,NVT_STATUS_EDID_EST), // 640x480x67Hz (Apple, Mac II) + + // 640x480x72Hz (VESA) - this entry have borders + {640,8,16,40,832,NVT_H_SYNC_NEGATIVE,480,8,1,3,520,NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,3150,31500,{0,72,72000,0,1,{0},{0},{0},{0},NVT_STATUS_EDID_EST,"EDID_Established"}}, + EST_TIMING( 640,16, 64, 840,'-', 480, 1,3, 500,'-',75, 3150,NVT_STATUS_EDID_EST), // 640x480x75Hz (VESA) + EST_TIMING( 800,24, 72,1024,'+', 600, 1,2, 625,'+',56, 3600,NVT_STATUS_EDID_EST), // 800x600x56Hz (VESA) + EST_TIMING( 800,40,128,1056,'+', 600, 1,4, 628,'+',60, 4000,NVT_STATUS_EDID_EST), // 800x600x60Hz (VESA) + + EST_TIMING( 800,56,120,1040,'+', 600,37,6, 666,'+',72, 5000,NVT_STATUS_EDID_EST), // 800x600x72Hz (VESA) + EST_TIMING( 800,16, 80,1056,'+', 600, 1,3, 625,'+',75, 4950,NVT_STATUS_EDID_EST), // 800x600x75Hz (VESA) + EST_TIMING( 832, 0, 0, 832,'-', 624, 0,0, 624,'-',75, 0,NVT_STATUS_EDID_EST), // 832x624x75Hz (Apple, Mac II) + EST_TIMING(1024, 0, 0,1024,'-', 768, 0,0, 768,'-',87, 0,NVT_STATUS_EDID_EST), // 1024x768x87Hz (IBM, Interlaced) + + EST_TIMING(1024,24,136,1344,'-', 768, 3,6, 806,'-',60, 6500,NVT_STATUS_EDID_EST), // 1024x768x60Hz (VESA) + EST_TIMING(1024,24,136,1328,'-', 768, 3,6, 806,'-',70, 7500,NVT_STATUS_EDID_EST), // 1024x768x70Hz (VESA) + EST_TIMING(1024,16, 96,1312,'+', 768, 1,3, 800,'+',75, 7875,NVT_STATUS_EDID_EST), // 1024x768x75Hz (VESA) + EST_TIMING(1280,16,144,1688,'+',1024, 1,3,1066,'+',75,13500,NVT_STATUS_EDID_EST), // 1280x1024x75Hz (VESA) + + // the end + NVT_TIMING_SENTINEL +}; + +static NvU32 MAX_EST_FORMAT = sizeof(EDID_EST)/sizeof(EDID_EST[0]) - 1; + +static const NVT_TIMING EDID_ESTIII[] = +{ + EST_TIMING( 640, 32, 64, 832,'+', 350,32,3, 445,'-',85, 3150,NVT_STATUS_EDID_EST), // 640x350x85Hz + EST_TIMING( 640, 32, 64, 832,'-', 400, 1,3, 445,'+',85, 3150,NVT_STATUS_EDID_EST), // 640x400x85Hz + EST_TIMING( 720, 36, 72, 936,'-', 400, 1,3, 446,'+',85, 3550,NVT_STATUS_EDID_EST), // 720x400x85Hz + EST_TIMING( 640, 56, 56, 832,'-', 480, 1,3, 509,'-',85, 3600,NVT_STATUS_EDID_EST), // 640x480x85Hz + EST_TIMING( 848, 16,112,1088,'+', 480, 6,8, 517,'+',60, 3375,NVT_STATUS_EDID_EST), // 848x480x60HZ + EST_TIMING( 800, 32, 64,1048,'+', 600, 1,3, 631,'+',85, 5625,NVT_STATUS_EDID_EST), // 800x600x85Hz + EST_TIMING(1024, 48, 96,1376,'+', 768, 1,3, 808,'+',85, 9450,NVT_STATUS_EDID_EST), // 1024x768x85Hz + EST_TIMING(1152, 64,128,1600,'+', 864, 1,3, 900,'+',75,10800,NVT_STATUS_EDID_EST), // 1152x864x75Hz + + EST_TIMING(1280, 48, 32,1440,'+', 768, 3,7, 790,'-',60, 6825,NVT_STATUS_EDID_EST), // 1280x768x60Hz (RB) + EST_TIMING(1280, 64,128,1664,'-', 768, 3,7, 798,'+',60, 7950,NVT_STATUS_EDID_EST), // 1280x768x60Hz + EST_TIMING(1280, 80,128,1696,'-', 768, 3,7, 805,'+',75,10225,NVT_STATUS_EDID_EST), // 1280x768x75Hz + EST_TIMING(1280, 80,136,1712,'-', 768, 3,7, 809,'+',85,11750,NVT_STATUS_EDID_EST), // 1280x768x85Hz + EST_TIMING(1280, 96,112,1800,'+', 960, 1,3,1000,'+',60,10800,NVT_STATUS_EDID_EST), // 1280x960x60Hz + EST_TIMING(1280, 64,160,1728,'+', 960, 1,3,1011,'+',85,14850,NVT_STATUS_EDID_EST), // 1280x960x85Hz + EST_TIMING(1280, 48,112,1688,'+',1024, 1,3,1066,'+',60,10800,NVT_STATUS_EDID_EST), // 1280x1024x60Hz + EST_TIMING(1280, 64,160,1728,'+',1024, 1,3,1072,'+',85,15750,NVT_STATUS_EDID_EST), // 1280x1024x85Hz + + EST_TIMING(1360, 64,112,1792,'+', 768, 3,6, 795,'+',60, 8550,NVT_STATUS_EDID_EST), // 1360x768x60Hz + EST_TIMING(1440, 48, 32,1600,'+', 900, 3,6, 926,'-',60, 8875,NVT_STATUS_EDID_EST), // 1440x900x60Hz (RB) + EST_TIMING(1440, 80,152,1904,'-', 900, 3,6, 934,'+',60,10650,NVT_STATUS_EDID_EST), // 1440x900x60Hz + EST_TIMING(1440, 96,152,1936,'-', 900, 3,6, 942,'+',75,13675,NVT_STATUS_EDID_EST), // 1440x900x75Hz + EST_TIMING(1440,104,152,1952,'-', 900, 3,6, 948,'+',85,15700,NVT_STATUS_EDID_EST), // 1440x900x85Hz + EST_TIMING(1400, 48, 32,1560,'+',1050, 3,4,1080,'-',60,10100,NVT_STATUS_EDID_EST), // 1440x1050x60Hz (RB) + EST_TIMING(1400, 88,144,1864,'-',1050, 3,4,1089,'+',60,12175,NVT_STATUS_EDID_EST), // 1440x1050x60Hz + EST_TIMING(1400,104,144,1896,'-',1050, 3,4,1099,'+',75,15600,NVT_STATUS_EDID_EST), // 1440x1050x75Hz + + EST_TIMING(1400,104,152,1912,'-',1050, 3,4,1105,'+',85,17950,NVT_STATUS_EDID_EST), // 1440x1050x85Hz + EST_TIMING(1680, 48, 32,1840,'+',1050, 3,6,1080,'-',60,11900,NVT_STATUS_EDID_EST), // 1680x1050x60Hz (RB) + EST_TIMING(1680,104,176,2240,'-',1050, 3,6,1089,'+',60,14625,NVT_STATUS_EDID_EST), // 1680x1050x60Hz + EST_TIMING(1680,120,176,2272,'-',1050, 3,6,1099,'+',75,18700,NVT_STATUS_EDID_EST), // 1680x1050x75Hz + EST_TIMING(1680,128,176,2288,'-',1050, 3,6,1105,'+',85,21475,NVT_STATUS_EDID_EST), // 1680x1050x85Hz + EST_TIMING(1600, 64,192,2160,'+',1200, 1,3,1250,'+',60,16200,NVT_STATUS_EDID_EST), // 1600x1200x60Hz + EST_TIMING(1600, 64,192,2160,'+',1200, 1,3,1250,'+',65,17550,NVT_STATUS_EDID_EST), // 1600x1200x65Hz + EST_TIMING(1600, 64,192,2160,'+',1200, 1,3,1250,'+',70,18900,NVT_STATUS_EDID_EST), // 1600x1200x70Hz + + EST_TIMING(1600, 64,192,2160,'+',1200, 1,3,1250,'+',75,20250,NVT_STATUS_EDID_EST), // 1600x1200x75Hz + EST_TIMING(1600, 64,192,2160,'+',1200, 1,3,1250,'+',85,22950,NVT_STATUS_EDID_EST), // 1600x1200x85Hz + EST_TIMING(1792,128,200,2448,'-',1344, 1,3,1394,'+',60,20475,NVT_STATUS_EDID_EST), // 1792x1344x60Hz + EST_TIMING(1792, 96,216,2456,'-',1344, 1,3,1417,'+',75,26100,NVT_STATUS_EDID_EST), // 1792x1344x75Hz + EST_TIMING(1856, 96,224,2528,'-',1392, 1,3,1439,'+',60,21825,NVT_STATUS_EDID_EST), // 1856x1392x60Hz + EST_TIMING(1856,128,224,2560,'-',1392, 1,3,1500,'+',75,28800,NVT_STATUS_EDID_EST), // 1856x1392x75Hz + EST_TIMING(1920, 48, 32,2080,'+',1200, 3,6,1235,'-',60,15400,NVT_STATUS_EDID_EST), // 1920x1200x60Hz (RB) + EST_TIMING(1920,136,200,2592,'-',1200, 3,6,1245,'+',60,19325,NVT_STATUS_EDID_EST), // 1920x1200x60Hz + + EST_TIMING(1920,136,208,2608,'-',1200, 3,6,1255,'+',75,24525,NVT_STATUS_EDID_EST), // 1920x1200x75Hz + EST_TIMING(1920,144,208,2624,'-',1200, 3,6,1262,'+',85,28125,NVT_STATUS_EDID_EST), // 1920x1200x85Hz + EST_TIMING(1920,128,208,2600,'-',1440, 1,3,1500,'+',60,23400,NVT_STATUS_EDID_EST), // 1920x1440x60Hz + EST_TIMING(1920,144,224,2640,'-',1440, 1,3,1500,'+',75,29700,NVT_STATUS_EDID_EST), // 1920x1440x75Hz + + NVT_TIMING_SENTINEL, + NVT_TIMING_SENTINEL, + NVT_TIMING_SENTINEL, + NVT_TIMING_SENTINEL +}; + +static NvU32 MAX_ESTIII_FORMAT = sizeof(EDID_ESTIII)/sizeof(EDID_ESTIII[0]) - 1; + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_EnumEST(NvU32 index, NVT_TIMING *pT) +{ + if ((pT == NULL) || (index > MAX_EST_FORMAT)) + { + return NVT_STATUS_ERR; + } + + *pT = EDID_EST[index]; + + if (pT->HTotal == 0 || pT->VTotal == 0) + { + return NVT_STATUS_ERR; + } + + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk1khz, + (NvU32)1000*(NvU32)1000, + (NvU32)pT->HTotal*(NvU32)pT->VTotal); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_EnumESTIII(NvU32 index, NVT_TIMING *pT) +{ + if ((pT == NULL) || (index > MAX_ESTIII_FORMAT)) + { + return NVT_STATUS_ERR; + } + + *pT = EDID_ESTIII[index]; + + if (pT->HTotal == 0 || pT->VTotal == 0) + { + return NVT_STATUS_ERR; + } + + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk1khz, + (NvU32)1000*(NvU32)1000, + (NvU32)pT->HTotal*(NvU32)pT->VTotal); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 isHdmi3DStereoType(NvU8 StereoStructureType) +{ + return ((NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK == StereoStructureType) || + (NVT_HDMI_VS_BYTE5_HDMI_3DS_FIELD_ALT == StereoStructureType) || + (NVT_HDMI_VS_BYTE5_HDMI_3DS_LINE_ALT == StereoStructureType) || + (NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEFULL == StereoStructureType) || + (NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTH == StereoStructureType) || + (NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTHGFX == StereoStructureType) || + (NVT_HDMI_VS_BYTE5_HDMI_3DS_TOPBOTTOM == StereoStructureType) || + (NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF == StereoStructureType)); +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 NvTiming_GetVESADisplayDescriptorVersion(NvU8 *rawData, NvU32 *pVer) +{ + return getEdidVersion(rawData, pVer); +} + +// get the EDID version +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 getEdidVersion(NvU8 *pEdid, NvU32 *pVer) +{ + EDIDV1STRUC *p = (EDIDV1STRUC *) pEdid; + + if (pEdid[0] == 0x00) + { + // For Version 1.x, first 8 bytes of EDID must be 00h, FFh, FFh, FFh, FFh, FFh, FFh, 00h. + // Beware of Endian-ness and signed-ness. + if (p->bHeader[1] != 0xFF || p->bHeader[2] != 0xFF || p->bHeader[3] != 0xFF || + p->bHeader[4] != 0xFF || p->bHeader[5] != 0xFF || p->bHeader[6] != 0xFF || + p->bHeader[7] != 0x00) + return NVT_STATUS_ERR; + + *pVer = (((NvU32) p->bVersionNumber) << 8) + ((NvU32) p->bRevisionNumber); + } + else if ((pEdid[0] & 0xF0) == 0x20) + *pVer = (((NvU32) (pEdid[0] & 0XF0) << 4) + (NvU32) (pEdid[0] & 0X0F)) ; // DisplayID version 2.x + else + return NVT_STATUS_ERR; // un-recongnized EDID version + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidCvt3ByteDescriptor(NvU8 *p, NVT_EDID_INFO *pInfo, NvU32 *vtbCount) +{ + NvU32 k; + NvU32 width, height, aspect, rr = 0; + NVT_EDID_DD_CVT_3BYTE_BLOCK *pTiming = (NVT_EDID_DD_CVT_3BYTE_BLOCK *)p; + NVT_TIMING newTiming; + NVT_STATUS status; + + + if (pTiming->addressable_lines == 0) + return; + + height = pTiming->addressable_lines; + aspect = pTiming->aspect_ratio; + + if (aspect == NVT_EDID_CVT3_ASPECT_4X3) + width = height * 4 / 3; + else if (aspect == NVT_EDID_CVT3_ASPECT_16X9) + width = height * 16 / 9; + else if (aspect == NVT_EDID_CVT3_ASPECT_16X10) + width = height * 16 / 10; + else //15:9 + width = height * 15 / 9; + + width &= 0xFFFFFFF8; // round down to nearest 8 + + // loop through bits4:0 of supported_vert_rate so we can add a timing + // for each supported rate + for (k=1; k<=0x10; k<<=1) + { + // skip if this bit indicate no support for the rate; + if ( (pTiming->supported_vert_rates & (k)) == 0) + continue; + + // find the correct refresh rate for this bit + switch (k) + { + case NVT_EDID_CVT3_SUPPORTED_RATE_60HZ_REDUCED_BLANKING : + case NVT_EDID_CVT3_SUPPORTED_RATE_60HZ : + rr = 60; + break; + case NVT_EDID_CVT3_SUPPORTED_RATE_85HZ : + rr = 85; + break; + case NVT_EDID_CVT3_SUPPORTED_RATE_75HZ : + rr = 75; + break; + case NVT_EDID_CVT3_SUPPORTED_RATE_50HZ : + rr = 50; + break; + } + + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if ( (k) != NVT_EDID_CVT3_SUPPORTED_RATE_60HZ_REDUCED_BLANKING) // standard blanking + { + status = NvTiming_CalcCVT(width, height, rr, + NVT_PROGRESSIVE, + &newTiming); + } + else // reduced blanking + { + status = NvTiming_CalcCVT_RB(width, height, rr, + NVT_PROGRESSIVE, + &newTiming); + } + + if (status == NVT_STATUS_SUCCESS) + { + // For VTB timings, add additional information + if (vtbCount) + { + (*vtbCount)++; + newTiming.etc.status = NVT_STATUS_EDID_VTB_EXT_CVTn(*vtbCount); + newTiming.etc.name[39] = '\0'; + } + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + } + } // for (k=1; k<=0x10; k<<=1) + +} + +// parse the EDID 1.x based cvt timing info +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidCvtTiming(NVT_EDID_INFO *pInfo) +{ + NvU32 i, j; + + // find display range limit with cvt, or cvt 3-byte LDDs + for (i=0; ildd[i].tag == NVT_EDID_DISPLAY_DESCRIPTOR_CVT ) + { + NVT_EDID_DD_CVT_3BYTE *pCVT = (NVT_EDID_DD_CVT_3BYTE *)&pInfo->ldd[i].u.cvt; + + // loop through cvt 3-byte blocks + for (j=0; jblock + j), + pInfo, NULL); + } // for(j=0; jestablished_timings_1_2) * 8 - 1), j = 0; i != 0; i >>= 1, j ++) + { + if ((pInfo->established_timings_1_2 & i) != 0 && EDID_EST[j].pclk1khz != 0) + { + // count the timing + newTiming = EDID_EST[j]; + newTiming.etc.status = NVT_STATUS_EDID_ESTn(++count); + NVT_SNPRINTF((char *)newTiming.etc.name, 40, + "EDID-EST(VESA):%dx%dx%dHz", + (int)newTiming.HVisible, + (int)newTiming.VVisible, + (int)newTiming.etc.rr); + newTiming.etc.name[39] = '\0'; + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + } + } + + // ESTIII block in ldd only supported in EDID1.4 and above + if (pInfo->version < NVT_EDID_VER_1_4) + return; + + for (i=0; ildd[i].tag == NVT_EDID_DISPLAY_DESCRIPTOR_ESTIII ) + { + NVT_EDID_DD_EST_TIMING3* pEST = &pInfo->ldd[i].u.est3; + + for (j=0; jdata[j] & (1<> 8) & 0x3F) + 60; // bits 5->0 + + // get the height + aspect = ((timing >> 8) & 0xC0); // aspect ratio at bit 7:6 + if (aspect == 0x00) + height = (pInfo->version < 0x103) ? width : (width * 5 / 8); //16:10 per EDID1.3 and 1:1 with earlier EDID + else if (aspect == 0x40) + height = width * 3 / 4; //4:3 + else if (aspect == 0x80) + height = width * 4 / 5; //5:4 + else + height = width * 9 / 16; //16:9 + + // try to get the timing from DMT first + if (NvTiming_CalcDMT(width, height, rr, 0, pT) == NVT_STATUS_SUCCESS) + { + pT->etc.status = NVT_STATUS_EDID_STDn(count); + NVT_SNPRINTF((char *)pT->etc.name, 40, "EDID-STD(DMT):%dx%dx%dHz", (int)width, (int)height, (int)rr); + pT->etc.name[39] = '\0'; + } + // EDID1.4 and above defaults to CVT, instead of GTF. GTF is deprecated as of 1.4. + else if ((pInfo->version >= NVT_EDID_VER_1_4) && (NvTiming_CalcCVT(width, height, rr, NVT_PROGRESSIVE, pT) == NVT_STATUS_SUCCESS)) + { + pT->etc.status = NVT_STATUS_EDID_STDn(count); + NVT_SNPRINTF((char *)pT->etc.name, 40, "EDID-STD(CVT):%dx%dx%dHz", (int)width, (int)height, (int)rr); + pT->etc.name[39] = '\0'; + } + else + { + // if the mode is not found in DMT, use GTF timing + if (NvTiming_CalcGTF(width, height, rr, NVT_PROGRESSIVE, pT) == NVT_STATUS_SUCCESS) + { + NVT_SNPRINTF((char *)pT->etc.name, 40, "EDID-STD(GTF):%dx%dx%dHz", (int)width, (int)height, (int)rr); + pT->etc.name[39] = '\0'; + } + pT->etc.status = NVT_STATUS_EDID_STDn(count); + } +} + +// parse the EDID 1.x based standard timing info +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidStandardTiming(NVT_EDID_INFO *pInfo) +{ + NvU32 i, j; + NVT_TIMING newTiming; + NvU32 count = 0; + + // now check for standard timings + for (i=0; istandard_timings[i] & 0x0FF) != 0x1) && //proper indication of unused field + (pInfo->standard_timings[i] != 0x0)) //improper indication (bad edid) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + parseEdidStandardTimingDescriptor(pInfo->standard_timings[i], + pInfo, count, &newTiming); + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + + count++; + }//if ((pInfo->standard_timings[i] & 0x0FF) != 0x1) + } //for (i=0; iversion < NVT_EDID_VER_1_4) + return; + + // now check for standard timings in long display descriptors + for (i=0; ildd[i].tag == NVT_EDID_DISPLAY_DESCRIPTOR_STI ) + { + NVT_EDID_DD_STD_TIMING* pSTI = &pInfo->ldd[i].u.std_timing; + for (j=0; jdescriptor[j] & 0x0FF) != 0x00) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + parseEdidStandardTimingDescriptor(pSTI->descriptor[j], + pInfo, count, &newTiming); + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + + count++; + } // if ((pSTI->std_timing[i] & 0x0FF) != 0x1) + } //for (j=0; jDetailedTimingDesc[i]).tag = NVT_EDID_DISPLAY_DESCRIPTOR_STI ) + } //for (i=0; iwDTPixelClock !=0 || pDTD->bDTHorizontalActive !=0) + && (pDTD->wDTPixelClock != 0x0101 || pDTD->bDTHorizontalActive != 1 || + pDTD->bDTHorizontalBlanking != 1 || pDTD->bDTHorizActiveBlank != 1)) + { + // Note that hvisible and vvisible here correspond to the "Addressable Video" portion of the + // "Active Video" defined in the EDID spec (see section 3.12: Note Regarding Borders) + hvisible = (pDTD->bDTHorizontalActive + ((pDTD->bDTHorizActiveBlank & 0xF0) << 4)) - 2 * pDTD->bDTHorizontalBorder; + vvisible = (pDTD->bDTVerticalActive + ((pDTD->bDTVertActiveBlank & 0xF0) << 4)) - 2 * pDTD->bDTVerticalBorder; + + // Sanity check since we are getting values from the monitor + if (hvisible <= 0 || vvisible <= 0 || pDTD->wDTPixelClock == 0) + { + if (pT) + pT->HVisible = 0; + return NVT_STATUS_ERR; + } + + // if the output timing buffer is not provide, simply return here to indicate a legal descriptor + if (pT == NULL) + return NVT_STATUS_SUCCESS; + + // horizontal timing parameters + pT->HVisible = (NvU16)hvisible; + pT->HBorder = (NvU16)pDTD->bDTHorizontalBorder; + pT->HTotal = (NvU16)hvisible + (NvU16)(pDTD->bDTHorizontalBlanking + ((pDTD->bDTHorizActiveBlank & 0x0F) << 8)) + pT->HBorder * 2; + pT->HFrontPorch = (NvU16)(pDTD->bDTHorizontalSync + ((pDTD->bDTHorizVertSyncOverFlow & 0xC0) << 2)); + pT->HSyncWidth = (NvU16)(pDTD->bDTHorizontalSyncWidth + ((pDTD->bDTHorizVertSyncOverFlow & 0x30) << 4)); + + // vertical timing parameters + pT->VVisible = (NvU16)vvisible; + pT->VBorder = (NvU16)pDTD->bDTVerticalBorder; + pT->VTotal = (NvU16)vvisible + (NvU16)(pDTD->bDTVerticalBlanking + ((pDTD->bDTVertActiveBlank & 0x0F) << 8)) + pT->VBorder * 2; + pT->VFrontPorch = (NvU16)(((pDTD->bDTVerticalSync & 0xF0) >> 4) + ((pDTD->bDTHorizVertSyncOverFlow & 0x0C) << 2)); + pT->VSyncWidth = (NvU16)((pDTD->bDTVerticalSync & 0x0F) + ((pDTD->bDTHorizVertSyncOverFlow & 0x03) << 4)); + + // pixel clock + pT->pclk = (NvU32)pDTD->wDTPixelClock; + pT->pclk1khz = (NvU32)((pDTD->wDTPixelClock << 3) + (pDTD->wDTPixelClock << 1)); + + // sync polarities + if ((pDTD->bDTFlags & 0x18) == 0x18) + { + pT->HSyncPol = ((pDTD->bDTFlags & 0x2) != 0) ? NVT_H_SYNC_POSITIVE : NVT_H_SYNC_NEGATIVE; + pT->VSyncPol = ((pDTD->bDTFlags & 0x4) != 0) ? NVT_V_SYNC_POSITIVE : NVT_V_SYNC_NEGATIVE; + } + else if ((pDTD->bDTFlags & 0x18) == 0x10) + { + pT->HSyncPol = ((pDTD->bDTFlags & 0x2) != 0) ? NVT_H_SYNC_POSITIVE : NVT_H_SYNC_NEGATIVE; + pT->VSyncPol = NVT_V_SYNC_POSITIVE; + } + else + { + pT->HSyncPol = NVT_H_SYNC_NEGATIVE; + pT->VSyncPol = NVT_V_SYNC_POSITIVE; + } + + // interlaced + if ((pDTD->bDTFlags & 0x80) == 0x80) + pT->interlaced = 1; + else + pT->interlaced = 0; + + // Eizo split EDID case, using 0th bit to indicate split display capability + if (((pDTD->bDTFlags & 1) == 1) && !(((pDTD->bDTFlags & 0x20) == 0x20) || ((pDTD->bDTFlags & 0x40) == 0x40))) + { + pT->etc.flag |= NVT_FLAG_EDID_DTD_EIZO_SPLIT; + } + if (pT->interlaced) + { + // Adjust for one extra blank line in every other frame. + dwTotalPixels = (((NvU32)pT->HTotal * pT->VTotal) + + ((NvU32)pT->HTotal * (pT->VTotal + 1))) / 2; + } + else + { + dwTotalPixels = (NvU32)pT->HTotal * pT->VTotal; + } + + pT->etc.rr = (NvU16)(((NvU32)pDTD->wDTPixelClock*10000+dwTotalPixels/2)/dwTotalPixels); + // Using utility call to multiply and divide to take care of overflow and truncation of large values + // How did we arrive at 10000000? It comes from the fact that Pixel clock mentioned in EDID is in mulitples of 10KHz = 10000 + // and the refresh rate is mentioned in 0.001Hz, that is 60Hz will be represented as 60000, which brings in the factor of 1000. + // And hence 10000 * 1000 = 10000000 + pT->etc.rrx1k = axb_div_c(pDTD->wDTPixelClock, 10000000, dwTotalPixels); + pT->etc.status = NVT_STATUS_EDID_DTD; + NVT_SNPRINTF((char *)pT->etc.name, sizeof(pT->etc.name), "EDID-Detailed:%dx%dx%d.%03dHz%s", (int)pT->HVisible, (int)(pT->interlaced ? 2 : 1)*pT->VVisible , (int)pT->etc.rrx1k/1000, (int)pT->etc.rrx1k%1000, (pT->interlaced ? "/i" : "")); + pT->etc.name[sizeof(pT->etc.name) - 1] = '\0'; + + // aspect ratio + pT->etc.aspect = (pDTD->bDTHorizVertImage & 0xF0) << 20 | pDTD->bDTHorizontalImage << 16 | + (pDTD->bDTHorizVertImage & 0x0F) << 8 | pDTD->bDTVerticalImage; + + pT->etc.rep = 0x1; // Bit mask for no pixel repetition. + + return NVT_STATUS_SUCCESS; + } + + return NVT_STATUS_ERR; +} + +// parse the EDID 1.x based standard timing info +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidDetailedTiming(NvU8 *pEdid, NVT_EDID_INFO *pInfo) +{ + EDIDV1STRUC *p = (EDIDV1STRUC *) pEdid; + NVT_TIMING newTiming; + NvU32 i; + NvBool found = NV_FALSE; + + for (i = 0; i < 4; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseEdidDetailedTimingDescriptor((NvU8 *)&p->DetailedTimingDesc[i], + &newTiming) == NVT_STATUS_SUCCESS) + { + newTiming.etc.status = NVT_STATUS_EDID_DTDn(i+1); + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + + found = NV_TRUE; + } + } + + if (found) + { + // if edid_ver 1.3, PTM flag should be set + //nvt_assert(pInfo->version > 0x103 || (pInfo->u.feature & + // NVT_EDID_OTHER_FEATURES_PTM_INCLUDE_NATIVE)); + + if (pInfo->u.feature & NVT_EDID_OTHER_FEATURES_PTM_INCLUDE_NATIVE) + { + pInfo->timing[0].etc.flag |= NVT_FLAG_DTD1_PREFERRED_TIMING; + } + } +} + + +// parse the EDID 1.x 18-byte long display descriptor +CODE_SEGMENT(PAGE_DD_CODE) +static void parseEdidLongDisplayDescriptor(EDID_LONG_DISPLAY_DESCRIPTOR *descriptor, NVT_EDID_18BYTE_DESCRIPTOR *p, NvU32 version) +{ + NvU32 i; + + // bypass the input pointer check in this private function + + // return if it's a detailed timing descriptor + if (descriptor->prefix[0] != 0 || descriptor->prefix[1] != 0) + return; + + // other sanity check for the input data + if (descriptor->rsvd != 0) + return; + + p->tag = descriptor->tag; + + // now translate the descriptor + switch (descriptor->tag) + { + case NVT_EDID_DISPLAY_DESCRIPTOR_DPSN: // display product serial number + case NVT_EDID_DISPLAY_DESCRITPOR_DPN: // display product name + case NVT_EDID_DISPLAY_DESCRIPTOR_ADS: // alphanumeric data string (ASCII) + + // copy the 13 characters payload from the 18-byte descriptor + for (i = 0; i < NVT_PVT_EDID_LDD_PAYLOAD_SIZE; i++) + { + if (descriptor->data[i] == 0x0A) + p->u.serial_number.str[i] = '\0'; + else + p->u.serial_number.str[i] = descriptor->data[i]; + } + break; + + case NVT_EDID_DISPLAY_DESCRIPTOR_DRL: // display range limit + { + EDID_MONITOR_RANGE_LIMIT *pRangeLimit = (EDID_MONITOR_RANGE_LIMIT *)&descriptor->data[0]; + + p->u.range_limit.min_v_rate = pRangeLimit->minVRate; + p->u.range_limit.max_v_rate = pRangeLimit->maxVRate; + p->u.range_limit.min_h_rate = pRangeLimit->minHRate; + p->u.range_limit.max_h_rate = pRangeLimit->maxHRate; + p->u.range_limit.max_pclk_MHz = pRangeLimit->maxPClock10M * 10; + p->u.range_limit.timing_support = pRangeLimit->timing_support; + + // add 255Hz offsets if needed, use descriptor->rsvd2 + // to offset the min values their max MUST be offset as well + if (version >= NVT_EDID_VER_1_4) + { + if (descriptor->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_VER_MAX) + { + p->u.range_limit.max_v_rate += NVT_PVT_EDID_RANGE_OFFSET_AMOUNT; + if (descriptor->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_VER_MIN) + { + p->u.range_limit.min_v_rate += NVT_PVT_EDID_RANGE_OFFSET_AMOUNT; + } + } + if (descriptor->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_HOR_MAX) + { + p->u.range_limit.max_h_rate += NVT_PVT_EDID_RANGE_OFFSET_AMOUNT; + if (descriptor->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_HOR_MIN) + { + p->u.range_limit.min_h_rate += NVT_PVT_EDID_RANGE_OFFSET_AMOUNT; + } + } + } + + if (p->u.range_limit.timing_support == NVT_EDID_RANGE_SUPPORT_GTF2) + { + // descriptor->data[7] + // Start frequency for secondary curve, hor freq./2[kHz] + p->u.range_limit.u.gtf2.C = pRangeLimit->u.gtf2.C / 2; // 0 <= C <= 127 + p->u.range_limit.u.gtf2.K = pRangeLimit->u.gtf2.K; // 0 <= K <= 255 + p->u.range_limit.u.gtf2.J = pRangeLimit->u.gtf2.J / 2; // 0 <= J <= 127 + p->u.range_limit.u.gtf2.M = (pRangeLimit->u.gtf2.M_MSB << 8) + + pRangeLimit->u.gtf2.M_LSB; // 0 <= M <= 65535 + } + else if (p->u.range_limit.timing_support == NVT_EDID_RANGE_SUPPORT_CVT) + { + // the pixel clock adjustment is in cvt.pixel_clock @ bits7:2 + // that number is in 0.25MHz, ie actual max clock is max_pclk_MHz - (0.25 x cvt_pixel_clock) + // subtract the whole number part from max_pclk_MHz, save the remainder + p->u.range_limit.max_pclk_MHz -= (pRangeLimit->u.cvt.pixel_clock & NVT_PVT_EDID_CVT_PIXEL_CLOCK_MASK) >> NVT_PVT_EDID_CVT_PIXEL_CLOCK_SHIFT >> 2; // ie divide by 4 to get whole number + p->u.range_limit.u.cvt.pixel_clock_adjustment = ((pRangeLimit->u.cvt.pixel_clock & NVT_PVT_EDID_CVT_PIXEL_CLOCK_MASK) >> NVT_PVT_EDID_CVT_PIXEL_CLOCK_SHIFT) & 0x03; // ie modulus 4 + + p->u.range_limit.u.cvt.max_active_pixels_per_line = (pRangeLimit->u.cvt.pixel_clock & NVT_PVT_EDID_CVT_ACTIVE_MSB_MASK) << NVT_PVT_EDID_CVT_ACTIVE_MSB_SHIFT; + p->u.range_limit.u.cvt.max_active_pixels_per_line |= pRangeLimit->u.cvt.max_active; + p->u.range_limit.u.cvt.max_active_pixels_per_line <<= 3; // ie multiply 8 + + p->u.range_limit.u.cvt.aspect_supported = (pRangeLimit->u.cvt.aspect_supported & NVT_PVT_EDID_CVT_ASPECT_SUPPORTED_MASK) >> NVT_PVT_EDID_CVT_ASPECT_SUPPORTED_SHIFT; + + p->u.range_limit.u.cvt.aspect_preferred = ( pRangeLimit->u.cvt.aspect_preferred_blanking & NVT_PVT_EDID_CVT_ASPECT_PREFERRED_MASK) >> NVT_PVT_EDID_CVT_ASPECT_PREFERRED_SHIFT; + p->u.range_limit.u.cvt.blanking_support = ( pRangeLimit->u.cvt.aspect_preferred_blanking & NVT_PVT_EDID_CVT_BLANKING_MASK) >> NVT_PVT_EDID_CVT_BLANKING_SHIFT; + + p->u.range_limit.u.cvt.scaling_support = (pRangeLimit->u.cvt.scaling_support & NVT_PVT_EDID_CVT_SCALING_MASK) >> NVT_PVT_EDID_CVT_SCALING_SHIFT; + + p->u.range_limit.u.cvt.preferred_refresh_rate = pRangeLimit->u.cvt.preferred_refresh_rate; + } + } + + break; + + case NVT_EDID_DISPLAY_DESCRIPTOR_CPD: // color point data + { + EDID_COLOR_POINT_DATA *pColorPoint = (EDID_COLOR_POINT_DATA *)&descriptor->data[0]; + + p->u.color_point.wp1_index = pColorPoint->wp1_index; + p->u.color_point.wp1_x = pColorPoint->wp1_x << 2; + p->u.color_point.wp1_x |= (pColorPoint->wp1_x_y & NVT_PVT_EDID_CPD_WP_X_MASK) >> NVT_PVT_EDID_CPD_WP_X_SHIFT; + p->u.color_point.wp1_y = pColorPoint->wp1_y << 2; + p->u.color_point.wp1_y |= (pColorPoint->wp1_x_y & NVT_PVT_EDID_CPD_WP_Y_MASK) >> NVT_PVT_EDID_CPD_WP_Y_SHIFT; + p->u.color_point.wp1_gamma = pColorPoint->wp1_gamma + 100; + + p->u.color_point.wp2_index = pColorPoint->wp2_index; + p->u.color_point.wp2_x = pColorPoint->wp2_x << 2; + p->u.color_point.wp2_x |= (pColorPoint->wp2_x_y & NVT_PVT_EDID_CPD_WP_X_MASK) >> NVT_PVT_EDID_CPD_WP_X_SHIFT; + p->u.color_point.wp2_y = pColorPoint->wp2_y << 2; + p->u.color_point.wp2_y |= (pColorPoint->wp2_x_y & NVT_PVT_EDID_CPD_WP_Y_MASK) >> NVT_PVT_EDID_CPD_WP_Y_SHIFT; + p->u.color_point.wp2_gamma = pColorPoint->wp2_gamma + 100; + } + break; + + case NVT_EDID_DISPLAY_DESCRIPTOR_STI: // standard timing identification + { + EDID_STANDARD_TIMING_ID *pStdTiming = (EDID_STANDARD_TIMING_ID *)&descriptor->data[0]; + + for(i=0; iu.std_timing.descriptor[i] = pStdTiming->std_timing[i]; + } + } + break; + + case NVT_EDID_DISPLAY_DESCRIPTOR_DCM: // display color management + { + EDID_COLOR_MANAGEMENT_DATA *pColorMan = (EDID_COLOR_MANAGEMENT_DATA *)&descriptor->data[0]; + + p->u.color_man.red_a3 = pColorMan->red_a3_lsb | (pColorMan->red_a3_msb << 8); + p->u.color_man.red_a2 = pColorMan->red_a2_lsb | (pColorMan->red_a2_msb << 8); + + p->u.color_man.green_a3 = pColorMan->green_a3_lsb | (pColorMan->green_a3_msb << 8); + p->u.color_man.green_a2 = pColorMan->green_a2_lsb | (pColorMan->green_a2_msb << 8); + + p->u.color_man.blue_a3 = pColorMan->blue_a3_lsb | (pColorMan->blue_a3_msb << 8); + p->u.color_man.blue_a2 = pColorMan->blue_a2_lsb | (pColorMan->blue_a2_msb << 8); + } + break; + + case NVT_EDID_DISPLAY_DESCRIPTOR_CVT: // CVT 3-byte timing code + { + EDID_CVT_3BYTE *pCVT_3byte = (EDID_CVT_3BYTE *)&descriptor->data[0]; + + for (i=0; iblock[i].addressable_lines != 0) + { + p->u.cvt.block[i].addressable_lines = pCVT_3byte->block[i].addressable_lines; + p->u.cvt.block[i].addressable_lines |= (pCVT_3byte->block[i].lines_ratio & NVT_PVT_EDID_CVT3_LINES_MSB_MASK) << NVT_PVT_EDID_CVT3_LINES_MSB_SHIFT; + p->u.cvt.block[i].addressable_lines +=1; + p->u.cvt.block[i].addressable_lines <<= 1; + + p->u.cvt.block[i].aspect_ratio = (pCVT_3byte->block[i].lines_ratio & NVT_PVT_EDID_CVT3_ASPECT_MASK) >> NVT_PVT_EDID_CVT3_ASPECT_SHIFT; + + p->u.cvt.block[i].preferred_vert_rates = (pCVT_3byte->block[i].refresh_rates & NVT_PVT_EDID_CVT3_PREFERRED_RATE_MASK) >> NVT_PVT_EDID_CVT3_PREFERRED_RATE_SHIFT; + p->u.cvt.block[i].supported_vert_rates = (pCVT_3byte->block[i].refresh_rates & NVT_PVT_EDID_CVT3_SUPPORTED_RATE_MASK) >> NVT_PVT_EDID_CVT3_SUPPORTED_RATE_SHIFT; + } + } + } + break; + + case NVT_EDID_DISPLAY_DESCRIPTOR_ESTIII: // establishied timing III + { + EDID_EST_TIMINGS_III *pEstTiming = (EDID_EST_TIMINGS_III *)&descriptor->data[0]; + + for(i=0; iu.est3.data[i] = pEstTiming->timing_byte[i]; + } + } + break; + + case NVT_EDID_DISPLAY_DESCRIPTOR_DUMMY: // dummy descriptor + default: + // unresolved descriptor yet + for (i = 0; i < NVT_PVT_EDID_LDD_PAYLOAD_SIZE; i++) + { + p->u.dummy.data[i] = descriptor->data[i]; + } + break; + } + +} + +// get generic EDID info +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NV_STDCALL NvTiming_ParseEDIDInfo(NvU8 *pEdid, NvU32 length, NVT_EDID_INFO *pInfo) +{ + NvU32 i, j, k, data; + EDIDV1STRUC *p; + NvU8 *pExt; + NVT_EDID_CEA861_INFO *p861Info; + + // parameter check + if (pEdid == NULL || length < 128 || pInfo == NULL) + { + return NVT_STATUS_ERR; + } + + NVMISC_MEMSET(pInfo, 0, sizeof(NVT_EDID_INFO)); + + // get the EDID version + if (getEdidVersion(pEdid, &pInfo->version) == NVT_STATUS_ERR) + { + return NVT_STATUS_ERR; + } + + p = (EDIDV1STRUC *) pEdid; + + // get the IDs + pInfo->manuf_id = p->wIDManufName; + pInfo->product_id = p->wIDProductCode; + + // translate the ID into manufacturer's name + pInfo->manuf_name[0] = 'A' + (NvU8)((pInfo->manuf_id & 0x007c) >> 2) - 1; + pInfo->manuf_name[1] = 'A' + (NvU8)((pInfo->manuf_id & 0x0003) << 3 | (pInfo->manuf_id & 0xe000) >> 13) - 1; + pInfo->manuf_name[2] = 'A' + (NvU8)((pInfo->manuf_id & 0x1f00) >> 8) - 1; + pInfo->manuf_name[3] = '\0'; + + // get serial number + pInfo->serial_number = p->dwIDSerialNumber; + + // get the week and year + pInfo->week = p->bWeekManuf; + pInfo->year = p->bYearManuf + 1990; + + // get the interface info + pInfo->input.isDigital = (p->bVideoInputDef & NVT_PVT_EDID_INPUT_ISDIGITAL_MASK) >> NVT_PVT_EDID_INPUT_ISDIGITAL_SHIFT; + + if (pInfo->input.isDigital && pInfo->version > 0x103) // must be at least EDID1.4 to support the following fields + { + switch ( (p->bVideoInputDef & NVT_PVT_EDID_INPUT_BPC_MASK) >> NVT_PVT_EDID_INPUT_BPC_SHIFT) + { + case NVT_PVT_EDID_INPUT_BPC_6 : + pInfo->input.u.digital.bpc = 6; + break; + case NVT_PVT_EDID_INPUT_BPC_8 : + pInfo->input.u.digital.bpc = 8; + break; + case NVT_PVT_EDID_INPUT_BPC_10 : + pInfo->input.u.digital.bpc = 10; + break; + case NVT_PVT_EDID_INPUT_BPC_12 : + pInfo->input.u.digital.bpc = 12; + break; + case NVT_PVT_EDID_INPUT_BPC_14 : + pInfo->input.u.digital.bpc = 14; + break; + case NVT_PVT_EDID_INPUT_BPC_16 : + pInfo->input.u.digital.bpc = 16; + break; + default : + pInfo->input.u.digital.bpc = 0; + break; + } + pInfo->input.u.digital.video_interface = (p->bVideoInputDef & NVT_PVT_EDID_INPUT_INTERFACE_MASK) >> NVT_PVT_EDID_INPUT_INTERFACE_SHIFT; + } + else if (!pInfo->input.isDigital) + { + pInfo->input.u.analog_data = (p->bVideoInputDef & NVT_PVT_EDID_INPUT_ANALOG_ETC_MASK) >> NVT_PVT_EDID_INPUT_ANALOG_ETC_SHIFT; + } + + // get the max image size and aspect ratio + if (p->bMaxHorizImageSize != 0 && p->bMaxVertImageSize != 0) + { + pInfo->screen_size_x = p->bMaxHorizImageSize; + pInfo->screen_size_y = p->bMaxVertImageSize; + pInfo->screen_aspect_x = 0; + pInfo->screen_aspect_y = 0; + } + else if (p->bMaxHorizImageSize != 0 && p->bMaxVertImageSize == 0) + { + pInfo->screen_size_x = 0; + pInfo->screen_size_y = 0; + pInfo->screen_aspect_x = 99 + p->bMaxHorizImageSize; + pInfo->screen_aspect_y = 100; + } + else if (p->bMaxHorizImageSize == 0 && p->bMaxVertImageSize != 0) + { + pInfo->screen_size_x = 0; + pInfo->screen_size_y = 0; + pInfo->screen_aspect_x = 100; + pInfo->screen_aspect_y = 99 + p->bMaxVertImageSize; + } + + // get the gamma + pInfo->gamma = p->bDisplayXferChar + 100; + + // get the features + pInfo->u.feature = p->bFeatureSupport; + + // get chromaticity coordinates + pInfo->cc_red_x = p->Chromaticity[2] << 2; + pInfo->cc_red_x |= (p->Chromaticity[0] & NVT_PVT_EDID_CC_RED_X1_X0_MASK) >> NVT_PVT_EDID_CC_RED_X1_X0_SHIFT; + pInfo->cc_red_y = p->Chromaticity[3] << 2; + pInfo->cc_red_y |= (p->Chromaticity[0] & NVT_PVT_EDID_CC_RED_Y1_Y0_MASK) >> NVT_PVT_EDID_CC_RED_Y1_Y0_SHIFT; + + pInfo->cc_green_x = p->Chromaticity[4] << 2; + pInfo->cc_green_x |= (p->Chromaticity[0] & NVT_PVT_EDID_CC_GREEN_X1_X0_MASK) >> NVT_PVT_EDID_CC_GREEN_X1_X0_SHIFT; + pInfo->cc_green_y = p->Chromaticity[5] << 2; + pInfo->cc_green_y |= (p->Chromaticity[0] & NVT_PVT_EDID_CC_GREEN_Y1_Y0_MASK) >> NVT_PVT_EDID_CC_GREEN_Y1_Y0_SHIFT; + + pInfo->cc_blue_x = p->Chromaticity[6] << 2; + pInfo->cc_blue_x |= (p->Chromaticity[1] & NVT_PVT_EDID_CC_BLUE_X1_X0_MASK) >> NVT_PVT_EDID_CC_BLUE_X1_X0_SHIFT; + pInfo->cc_blue_y = p->Chromaticity[7] << 2; + pInfo->cc_blue_y |= (p->Chromaticity[1] & NVT_PVT_EDID_CC_BLUE_Y1_Y0_MASK) >> NVT_PVT_EDID_CC_BLUE_Y1_Y0_SHIFT; + + pInfo->cc_white_x = p->Chromaticity[8] << 2; + pInfo->cc_white_x |= (p->Chromaticity[1] & NVT_PVT_EDID_CC_WHITE_X1_X0_MASK) >> NVT_PVT_EDID_CC_WHITE_X1_X0_SHIFT; + pInfo->cc_white_y = p->Chromaticity[9] << 2; + pInfo->cc_white_y |= (p->Chromaticity[1] & NVT_PVT_EDID_CC_WHITE_Y1_Y0_MASK) >> NVT_PVT_EDID_CC_WHITE_Y1_Y0_SHIFT; + + // copy established timings + pInfo->established_timings_1_2 = (NvU16)p->bEstablishedTimings1 << 8; + pInfo->established_timings_1_2 |= (NvU16)p->bEstablishedTimings2; + + // copy manuf reserved timings + pInfo->manufReservedTimings = p->bManufReservedTimings; + + // copy standard timings + for (i = 0; i < NVT_EDID_MAX_STANDARD_TIMINGS; i++) + { + pInfo->standard_timings[i] = p->wStandardTimingID[i]; + } + + // get the number of extensions + pInfo->total_extensions = p->bExtensionFlag; + + // check_sum + for (i = 0, data = 0; i < length; i++) + { + data += pEdid[i]; + } + pInfo->checksum_ok = !(data & 0xFF); + pInfo->checksum = p->bChecksum; + + + // now find out the total number of all of the timings in the EDID + pInfo->total_timings = 0; + + // now find out the detailed timings + parseEdidDetailedTiming(pEdid, pInfo); + + // now parse all 18-byte long display descriptors (not detailed timing) + for (i = 0; i < NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR; i++) + { + parseEdidLongDisplayDescriptor((EDID_LONG_DISPLAY_DESCRIPTOR *)&p->DetailedTimingDesc[i], &pInfo->ldd[i], pInfo->version); + } + + // now check the number of timings in the extension + for (k = 0, j = 1; j <= pInfo->total_extensions && (j + 1) * sizeof(EDIDV1STRUC) <= length; j++) + { + pExt = pEdid + sizeof(EDIDV1STRUC) * j; + + // check for 861 extension first + switch (*pExt) + { + case NVT_EDID_EXTENSION_CTA: + p861Info = (k == 0) ? &pInfo->ext861 : &pInfo->ext861_2; + + get861ExtInfo(pExt, sizeof(EDIDV1STRUC), p861Info); + + // HF EEODB is present in edid v1.3 and v1.4 does not need this.Also, it is always present in the 1st CTA extension block. + if (j == 1 && pInfo->version == NVT_EDID_VER_1_3) + { + parseCta861HfEeodb(p861Info, &pInfo->total_extensions); + } + + // update pInfo with basic hdmi info + // assumes each edid will only have one such block across multiple cta861 blocks (otherwise may create declaration conflict) + // In case of multiple such blocks, the last one takes precedence, except for SCDB + + // parseCta861VsdbBlocks() uses hfScdb info so need to be parsed first + parseCta861HfScdb(p861Info, pInfo, FROM_CTA861_EXTENSION); + parseCta861VsdbBlocks(p861Info, pInfo, FROM_CTA861_EXTENSION); + parseCta861VsvdbBlocks(p861Info, pInfo, FROM_CTA861_EXTENSION); + + // parse HDR related information from the HDR static metadata data block + if (p861Info->valid.hdr_static_metadata != 0) + { + parseCta861HdrStaticMetadataDataBlock(p861Info, pInfo, FROM_CTA861_EXTENSION); + } + + // Timings are listed (or shall) be listed in priority order + // So read SVD, yuv420 SVDs first before reading detailed timings + + // add the 861B short video timing descriptor + if (p861Info->revision >= NVT_CEA861_REV_B) + { + // base video + parse861bShortTiming(p861Info, pInfo, FROM_CTA861_EXTENSION); + + // yuv420-only video + parse861bShortYuv420Timing(p861Info, pInfo, FROM_CTA861_EXTENSION); + } + + // add the detailed timings in 18-byte long display descriptor + parse861ExtDetailedTiming(pExt, p861Info->basic_caps, pInfo); + + if (p861Info->revision >= NVT_CTA861_REV_H) + { + if (p861Info->total_vfdb != 0) parseCta861VideoFormatDataBlock(p861Info, pInfo); + if (p861Info->total_did_type7db != 0) parseCta861DIDType7VideoTimingDataBlock(p861Info, pInfo); + if (p861Info->total_did_type8db != 0) parseCta861DIDType8VideoTimingDataBlock(p861Info, pInfo); + if (p861Info->total_did_type10db != 0) parseCta861DIDType10VideoTimingDataBlock(p861Info, pInfo); + } + + // CEA861-F at 7.5.12 section about VFPDB block. + if (p861Info->revision >= NVT_CEA861_REV_F && (p861Info->total_svr != 0 || p861Info->valid.NVRDB == 1)) + { + parseCta861NativeOrPreferredTiming(p861Info, pInfo, FROM_CTA861_EXTENSION); + } + + k++; + break; + + case NVT_EDID_EXTENSION_VTB: + parseVTBExtension(pExt, pInfo); + break; + + case NVT_EDID_EXTENSION_DISPLAYID: + if ((pExt[1] & 0xF0) == 0x20) // displayID2.x as EDID extension + { + if(getDisplayId20EDIDExtInfo(pExt, sizeof(EDIDV1STRUC), + pInfo) == NVT_STATUS_SUCCESS) + { + if (pInfo->ext861.total_y420vdb != 0 || pInfo->ext861.total_y420cmdb != 0) + { + pInfo->ext_displayid20.interface_features.yuv420_min_pclk = 0; + } + + if (pInfo->ext_displayid20.valid_data_blocks.interface_feature_present) + { + pInfo->ext861.basic_caps |= pInfo->ext_displayid20.basic_caps; + } + } + } + else // displayID13 as EDID extension + { + //do not fail function based on return value of getDisplayIdEDIDExtInfo refer bug 3247180 where some rogue monitors don't provide correct DID13 raw data. + if (getDisplayIdEDIDExtInfo(pExt, sizeof(EDIDV1STRUC), + pInfo) == NVT_STATUS_SUCCESS) + { + // Check if YCbCr is supported in base block + // since it is mandatory if YCbCr is supported on any other display interface as per 5.1.1.1 Video Colorimetry + if(pInfo->u.feature_ver_1_4_digital.support_ycrcb_444) + { + if (!pInfo->ext_displayid.supported_displayId2_0) + { + pInfo->ext_displayid.u4.display_interface.ycbcr444_depth.support_8b = 1; + } + else + { + pInfo->ext_displayid.u4.display_interface_features.ycbcr444_depth.support_8b = 1; + } + } + + if(pInfo->u.feature_ver_1_4_digital.support_ycrcb_422) + { + if (!pInfo->ext_displayid.supported_displayId2_0) + { + pInfo->ext_displayid.u4.display_interface.ycbcr422_depth.support_8b = 1; + } + else + { + pInfo->ext_displayid.u4.display_interface_features.ycbcr422_depth.support_8b = 1; + } + } + } + } + break; + + default: + break; + } + } + + // Copy all the timings(could include type 7/8/9/10) from displayid20->timings[] to pEdidInfo->timings[] + for (i = 0; i < pInfo->ext_displayid20.total_timings; i++) + { + if (!assignNextAvailableTiming(pInfo, &(pInfo->ext_displayid20.timing[i]))) + { + return NVT_STATUS_ERR; + } + } + + // check for cvt timings - in display range limits or cvt 3-byte LDD, only for EDID1.4 and above + if (pInfo->version > NVT_EDID_VER_1_3) + { + parseEdidCvtTiming(pInfo); + } + + // now check for standard timings - base EDID and then the LDDs + parseEdidStandardTiming(pInfo); + + // find out the total established timings - base EDID and then the LDDs + parseEdidEstablishedTiming(pInfo); + + // remove the T8VTDB timing if it co-existed in standard or established timings + if (pInfo->ext861.revision >= NVT_CTA861_REV_H && pInfo->ext861.total_did_type8db != 0 && pInfo->total_timings > 1) + { + for (i = 0; i < pInfo->total_timings; i++) + { + if (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status) == NVT_TYPE_CTA861_DID_T8) + { + if (isMatchedStandardTiming(pInfo, &pInfo->timing[i]) || isMatchedEstablishedTiming(pInfo, &pInfo->timing[i])) + { + for (j = i; j < pInfo->total_timings - 1; j++) + { + // remove the entry by moving the next entry up. + pInfo->timing[j] = pInfo->timing[j+1]; + } + NVMISC_MEMSET(&pInfo->timing[pInfo->total_timings-1], 0, sizeof(NVT_TIMING)); + pInfo->total_timings--; i--; + } + } + } + } + + getEdidHDM1_4bVsdbTiming(pInfo); + + // Assert if no timings were found (due to a bad EDID) or if we mistakenly + // assigned more timings than we allocated space for (due to bad logic above) + nvt_assert(pInfo->total_timings && + (pInfo->total_timings <= COUNT(pInfo->timing))); + + // go through all timings and update supported color formats + // consider the supported bpc per color format from parsed EDID / CTA861 / DisplayId + updateColorFormatAndBpcTiming(pInfo); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +void updateColorFormatAndBpcTiming(NVT_EDID_INFO *pInfo) +{ + NvU32 i, j, data; + + for (i = 0; i < pInfo->total_timings; i++) + { + data = NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status); + switch (data) + { + case NVT_TYPE_HDMI_STEREO: + case NVT_TYPE_HDMI_EXT: + // VTB timing use the base EDID (block 0) to determine the color format support + case NVT_TYPE_EDID_VTB_EXT: + case NVT_TYPE_EDID_VTB_EXT_STD: + case NVT_TYPE_EDID_VTB_EXT_DTD: + case NVT_TYPE_EDID_VTB_EXT_CVT: + // pInfo->u.feature_ver_1_3.color_type provides mono, rgb, rgy, undefined + // assume RGB 8-bpc support only (VTB is pretty old edid standard) + pInfo->timing[i].etc.rgb444.bpc.bpc8 = 1; + break; + // These are from the CTA block, and relies on + // Since there could be multiple CEA blocks, these are adjusted when the blocks are parsed + case NVT_TYPE_EDID_861ST: + case NVT_TYPE_EDID_EXT_DTD: + if (pInfo->ext_displayid20.as_edid_extension && + pInfo->ext_displayid20.valid_data_blocks.cta_data_present) + { + updateColorFormatForDisplayId20ExtnTimings(pInfo, i); + } + updateBpcForTiming(pInfo, i); + break; + default: + // * the displayID_v1.3/v2.0 EDID extension need to follow the EDID bpc definition. + // * all other default to base edid + updateBpcForTiming(pInfo, i); + } + + // The timings[i] entries need to update the bpc values where are based on the different color format again + // if displayId extension existed it's interface feature data block + if (pInfo->ext_displayid.version == 0x12 || pInfo->ext_displayid.version == 0x13) + { + updateColorFormatForDisplayIdExtnTimings(pInfo, i); + } + else if (pInfo->ext_displayid20.valid_data_blocks.interface_feature_present) + { + // DisplayId2.0 spec has its own way of determining color format support which includes bpc + color format + updateColorFormatForDisplayId20ExtnTimings(pInfo, i); + } + } + + // Go through all the timings and set CTA format accordingly. If a timing is a CTA 861b timing, store the + // index of this CTA 861b standard in NVT_TIMING.etc.status field. + // However parser needs to exclude the DTD timing in EDID base block where is shared same detailed timing in VIC/DTD_ext in CTA861 + for (i = 0; i < pInfo->total_timings; i++) + { + data = NvTiming_GetCEA861TimingIndex(&pInfo->timing[i]); + // DisplayID block did not belong to CTA timing and it owned the deep color block itself + if (data && !((NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status) == NVT_TYPE_DISPLAYID_1) || + (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status) == NVT_TYPE_DISPLAYID_2) || + (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status) == NVT_TYPE_DISPLAYID_7) || + (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status) == NVT_TYPE_DISPLAYID_8) || + (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status) == NVT_TYPE_DISPLAYID_9) || + (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status) == NVT_TYPE_DISPLAYID_10))) + { + // CEA timings may be enumerated outside of SVD blocks -- the formats of these timings don't have CEA FORMAT (vic) set + // before marking them CEA, make sure their color formats are updated too + if (NVT_GET_CEA_FORMAT(pInfo->timing[i].etc.status) == 0 && + (!NVT_IS_DTD(pInfo->timing[i].etc.status) || + isMatchedCTA861Timing(pInfo, &pInfo->timing[i]))) + { + for (j = 0; j < pInfo->total_timings; j++) + { + // It is assumed CTA timings that are repeated by the CTA block or different CTA blocks will + // announce the same color format for the same CTA timings + if (NVT_GET_CEA_FORMAT(pInfo->timing[j].etc.status) == data) + { + // There could be anomalies between EDID 1.4 base block color format vs CEA861 basic caps + // In this case we assume the union is supported + pInfo->timing[i].etc.rgb444.bpcs |= pInfo->timing[j].etc.rgb444.bpcs; + pInfo->timing[i].etc.yuv444.bpcs |= pInfo->timing[j].etc.yuv444.bpcs; + pInfo->timing[i].etc.yuv422.bpcs |= pInfo->timing[j].etc.yuv422.bpcs; + pInfo->timing[i].etc.yuv420.bpcs |= pInfo->timing[j].etc.yuv420.bpcs; + break; + } + } + + // now update the VIC of this timing + NVT_SET_CEA_FORMAT(pInfo->timing[i].etc.status, data); + } + // see the aspect ratio info if needed + if (pInfo->timing[i].etc.aspect == 0) + { + pInfo->timing[i].etc.aspect = getCEA861TimingAspectRatio(data); + } + } + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvBool isMatchedStandardTiming(NVT_EDID_INFO *pInfo, NVT_TIMING *pT) +{ + NvU32 j; + + for (j = 0; j < pInfo->total_timings; j++) + { + if (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[j].etc.status) == NVT_TYPE_EDID_STD && + NvTiming_IsTimingRelaxedEqual(&pInfo->timing[j], pT)) + { + return NV_TRUE; + } + } + return NV_FALSE; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvBool isMatchedEstablishedTiming(NVT_EDID_INFO *pInfo, NVT_TIMING *pT) +{ + NvU32 j; + + for (j = 0; j < pInfo->total_timings; j++) + { + if (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[j].etc.status) == NVT_TYPE_EDID_EST && + NvTiming_IsTimingRelaxedEqual(&pInfo->timing[j], pT)) + { + return NV_TRUE; + } + } + return NV_FALSE; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvBool isMatchedCTA861Timing(NVT_EDID_INFO *pInfo, NVT_TIMING *pT) +{ + NvU32 j; + + for (j = 0; j < pInfo->total_timings; j++) + { + if (NVT_GET_CEA_FORMAT(pInfo->timing[j].etc.status) && NvTiming_IsTimingExactEqual(&pInfo->timing[j], pT)) + { + return NV_TRUE; + } + } + return NV_FALSE; +} + +CODE_SEGMENT(PAGE_DD_CODE) +void updateBpcForTiming(NVT_EDID_INFO *pInfo, NvU32 index) +{ + NVT_EDID_CEA861_INFO *p861Info; + + // assume/prefer data from 1st CEA block if multiple exist + p861Info = &pInfo->ext861; + + pInfo->timing[index].etc.rgb444.bpc.bpc8 = 1; + + if (pInfo->version >= NVT_EDID_VER_1_4 && pInfo->input.isDigital) + { + if (pInfo->u.feature_ver_1_4_digital.support_ycrcb_444) + { + pInfo->timing[index].etc.yuv444.bpc.bpc8 = 1; + } + if (pInfo->u.feature_ver_1_4_digital.support_ycrcb_422) + { + pInfo->timing[index].etc.yuv422.bpc.bpc8 = 1; + } + if (pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_DISPLAYPORT_SUPPORTED || + pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_UNDEFINED) + { + pInfo->timing[index].etc.rgb444.bpc.bpc6 = 1; + + // trust bpc claim in edid base block for DP only + if (pInfo->input.u.digital.bpc >= NVT_EDID_VIDEOSIGNAL_BPC_10) + { + pInfo->timing[index].etc.rgb444.bpc.bpc10 = 1; + pInfo->timing[index].etc.yuv444.bpc.bpc10 = pInfo->u.feature_ver_1_4_digital.support_ycrcb_444 || (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_444); + pInfo->timing[index].etc.yuv422.bpc.bpc10 = pInfo->u.feature_ver_1_4_digital.support_ycrcb_422 || (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_422); + } + if (pInfo->input.u.digital.bpc >= NVT_EDID_VIDEOSIGNAL_BPC_12) + { + pInfo->timing[index].etc.rgb444.bpc.bpc12 = 1; + pInfo->timing[index].etc.yuv444.bpc.bpc12 = pInfo->u.feature_ver_1_4_digital.support_ycrcb_444 || (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_444); + pInfo->timing[index].etc.yuv422.bpc.bpc12 = pInfo->u.feature_ver_1_4_digital.support_ycrcb_422 || (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_422); + } + if (pInfo->input.u.digital.bpc >= NVT_EDID_VIDEOSIGNAL_BPC_16) + { + pInfo->timing[index].etc.rgb444.bpc.bpc16 = 1; + pInfo->timing[index].etc.yuv444.bpc.bpc16 = pInfo->u.feature_ver_1_4_digital.support_ycrcb_444 || (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_444); + pInfo->timing[index].etc.yuv422.bpc.bpc16 = pInfo->u.feature_ver_1_4_digital.support_ycrcb_422 || (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_422); + } + } + else if ((pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_A_SUPPORTED || + pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_B_SUPPORTED || + pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_UNDEFINED) && + p861Info->revision >= NVT_CEA861_REV_A) + { + updateHDMILLCDeepColorForTiming(pInfo, index); + } + } + else if (p861Info->revision >= NVT_CEA861_REV_A) + { + updateHDMILLCDeepColorForTiming(pInfo, index); + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_Get18ByteLongDescriptorIndex(NVT_EDID_INFO *pEdidInfo, NvU8 tag, NvU32 *pDtdIndex) +{ + NvU32 dtdIndex; + + if (!pEdidInfo || !pDtdIndex) + { + return NVT_STATUS_ERR; + } + + for (dtdIndex = *pDtdIndex; dtdIndex < NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR; dtdIndex++) + { + if (pEdidInfo->ldd[dtdIndex].tag == tag) + { + *pDtdIndex = dtdIndex; + return NVT_STATUS_SUCCESS; + } + } + + return NVT_STATUS_ERR; +} + +// get the edid timing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetEdidTimingExWithPclk(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_EDID_INFO *pEdidInfo, NVT_TIMING *pT, NvU32 rrx1k, NvU32 pclk) +{ + NvU8 kth = 0; + NvU32 i, j; + NvU32 native_cta, preferred_cta, preferred_displayid_dtd, preferred_dtd1, dtd1, map0, map1, map2, map3, map4, ceaIndex, max, cvt; + NVT_TIMING *pEdidTiming; + NVT_EDID_DD_RANGE_CVT *pCVT = NULL; + NVT_TIMING cvtTiming; + + // input check + if (pEdidInfo == NULL || pEdidInfo->total_timings == 0 || pT == 0) + return NVT_STATUS_ERR; + + if (width == 0 || height == 0 || rr == 0 ) // rrx1k and pclk are optional, can be 0. + return NVT_STATUS_ERR; + + pEdidTiming = pEdidInfo->timing; + + // the timing mapping index : + // + // native_cta - the "native resoluiotn of the sink" in the CTA861.6 A Source shall override any other native video resolution indicators + // if the Source supports NVRDB and the NVRDB was found in the E-EDID + // preferred_cta - the "prefer SVD" in CTA-861-F (i.e. A Sink that prefers a Video Format that is not listed as an SVD in Video Data Block, but instead listed in YCBCR 4:2:0 VDB) + // preferred_displayid_dtd - the "prefer detailed timing of DispalyID" extension + // preferred_dtd1 - the first deatiled timing and PTM flag is enable + // dtd1 - the first detailed timing + // map0 - the "perfect" match (the timing's H/V-visible and pixel clock(refresh rate) are the same as the asking "width", "height" and "rr". + // map1 - the "closest" match with the honor of the interlaced flag + // map2 - the "closest" match without the honor of the interlaced flag + // map3 - the "closest" match to the panel's native timing (i.e. the first DTD timing or the short 861B/C/D timings with "native" flag). + // map4 - the "closest" match with the same refresh rate + // max - the timing with the max visible area + native_cta = preferred_cta = preferred_displayid_dtd = preferred_dtd1 = dtd1 = map0 = map1 = map2 = map3 = map4 = ceaIndex = pEdidInfo->total_timings; + max = cvt = 0; + + if (pEdidInfo->ext861.total_svr > 1) + { + kth = getHighestPrioritySVRIdx(&pEdidInfo->ext861); + } + + for (i = 0; i < pEdidInfo->total_timings; i++) + { + // if the client prefers _NATIVE timing, then don't select custom timing + if ((flag & (NVT_FLAG_NATIVE_TIMING | NVT_FLAG_EDID_TIMING)) != 0 && NVT_IS_CUST_ENTRY(pEdidTiming[i].etc.status) != 0) + { + continue; + } + + // find the perfect match is possible + if ((flag & NVT_FLAG_MAX_EDID_TIMING) == 0 && + width == pEdidTiming[i].HVisible && + height == frame_height(pEdidTiming[i]) && + rr == pEdidTiming[i].etc.rr && + ((rrx1k == 0) || (rrx1k == pEdidTiming[i].etc.rrx1k)) && + !!(flag & NVT_PVT_INTERLACED_MASK) == !!pEdidTiming[i].interlaced) + { + if (map0 >= pEdidInfo->total_timings || pEdidTiming[i].pclk == pclk) + { + // make sure we take the priority as "detailed>standard>established". (The array timing[] always have the detailed timings in the front and then the standard and established.) + map0 = i; + } + + if ( (NVT_PREFERRED_TIMING_IS_CTA(pEdidTiming[i].etc.flag)) || + ((0 == (flag & NVT_FLAG_EDID_861_ST)) && NVT_PREFERRED_TIMING_IS_DTD1(pEdidTiming[i].etc.flag, pEdidTiming[i].etc.status)) || + (NVT_PREFERRED_TIMING_IS_DISPLAYID(pEdidTiming[i].etc.flag)) || + (NVT_IS_NATIVE_TIMING(pEdidTiming[i].etc.status))) + { + *pT = pEdidTiming[i]; + return NVT_STATUS_SUCCESS; + } + + if (NVT_GET_TIMING_STATUS_TYPE(pEdidTiming[i].etc.status) == NVT_TYPE_EDID_861ST) + { + if (ceaIndex == pEdidInfo->total_timings) + { + // Save the first entry found. + ceaIndex = i; + } + else + { + if (((flag & NVT_FLAG_CEA_4X3_TIMING) && (pEdidTiming[i].etc.aspect == 0x40003)) || + ((flag & NVT_FLAG_CEA_16X9_TIMING) && (pEdidTiming[i].etc.aspect == 0x160009))) + { + // Use preferred aspect ratio if specified. + ceaIndex = i; + } + } + } + } // if ((flag & NVT_FLAG_MAX_EDID_TIMING) == 0 && + + // bypass the custom timing to be select for the mismatch case + if (NVT_GET_TIMING_STATUS_TYPE(pEdidTiming[i].etc.status) == NVT_TYPE_CUST || + NVT_IS_CUST_ENTRY(pEdidTiming[i].etc.status) != 0) + { + if (width != pEdidTiming[i].HVisible || height != frame_height(pEdidTiming[i]) || rr != pEdidTiming[i].etc.rr) + { + continue; + } + } + + // find out the preferred timing just in case of cea_vfpdb is existed + if (native_cta == pEdidInfo->total_timings && NVT_NATIVE_TIMING_IS_CTA(pEdidTiming[i].etc.flag)) + { + native_cta = i; + } + + if (preferred_cta == pEdidInfo->total_timings && NVT_PREFERRED_TIMING_IS_CTA(pEdidTiming[i].etc.flag)) + { + if (pEdidInfo->ext861.total_svr > 1) + { + if (kth != 0) + { + // svr == vic + if (NVT_IS_CTA861(pEdidTiming[i].etc.status) && (NVT_GET_CEA_FORMAT(pEdidTiming[i].etc.status) == kth)) + { + preferred_cta = i; + } + else if (NVT_GET_TIMING_STATUS_SEQ(pEdidTiming[i].etc.status) == kth) + { + preferred_cta = i; + } + } + } + else + { + preferred_cta = i; + } + } + + // find out the preferred timing just in case + // Caller we will force rr value as 1 to select the DisplayID prefer timing in pEdidTiming if it existed + // however, we can't assign the correct refresh rate we want if we had two and above rr values which shared the same timing. + if (rr != 1) + { + if (pEdidTiming[i].etc.rr == rr && NVT_PREFERRED_TIMING_IS_DISPLAYID(pEdidTiming[i].etc.flag)) + { + preferred_displayid_dtd = i; + } + } + else if (preferred_displayid_dtd == pEdidInfo->total_timings && + NVT_PREFERRED_TIMING_IS_DISPLAYID(pEdidTiming[i].etc.flag)) + { + preferred_displayid_dtd = i; + } + + if (NVT_PREFERRED_TIMING_IS_DTD1(pEdidTiming[i].etc.flag, pEdidTiming[i].etc.status)) + { + preferred_dtd1 = i; + } + + if (NVT_IS_DTD1(pEdidTiming[i].etc.status)) + { + dtd1 = i; + } + + // find out the max mode just in case + if (pEdidTiming[i].HVisible * pEdidTiming[i].VVisible > pEdidTiming[max].HVisible * pEdidTiming[max].VVisible) + max = i; + + // if the requested timing is not in the EDID, try to find out the EDID entry with the same progressive/interlaced setting + if (map1 >= pEdidInfo->total_timings) + { + if (!!(flag & NVT_PVT_INTERLACED_MASK) == !!pEdidTiming[i].interlaced && + width <= pEdidTiming[i].HVisible && + height <= frame_height(pEdidTiming[i])) + { + map1 = i; + } + } + else + { + if (!!(flag & NVT_PVT_INTERLACED_MASK) == !!pEdidTiming[i].interlaced && + width <= pEdidTiming[i].HVisible && + height <= frame_height(pEdidTiming[i]) && + abs_delta(pEdidTiming[i].HVisible, width) <= abs_delta(pEdidTiming[map1].HVisible, width) && + abs_delta(frame_height(pEdidTiming[i]), height) <= abs_delta(frame_height(pEdidTiming[map1]), height)) + { + // if there're 2 timings with the same visible size, choose the one with closer refresh rate + if (pEdidTiming[i].HVisible == pEdidTiming[map1].HVisible && + frame_height(pEdidTiming[i]) == frame_height(pEdidTiming[map1])) + { + if (abs_delta(pEdidTiming[i].etc.rr, rr) < abs_delta(pEdidTiming[map1].etc.rr, rr)) + { + map1 = i; + } + } + else + { + map1 = i; + } + } + } + + // if the requested timing is not in the EDID, try to find out the EDID entry without the progressive/interlaced setting + if (map2 >= pEdidInfo->total_timings) + { + if (width <= pEdidTiming[i].HVisible && + height <= frame_height(pEdidTiming[i])) + { + map2 = i; + } + } + else + { + if (width <= pEdidTiming[i].HVisible && + height <= frame_height(pEdidTiming[i]) && + abs_delta(pEdidTiming[i].HVisible, width) <= abs_delta(pEdidTiming[map2].HVisible, width) && + abs_delta(frame_height(pEdidTiming[i]), height) <= abs_delta(frame_height(pEdidTiming[map2]), height)) + { + // if there're 2 timings with the same visible size, choose the one with closer refresh rate + if (pEdidTiming[i].HVisible == pEdidTiming[map2].HVisible && + frame_height(pEdidTiming[i]) == frame_height(pEdidTiming[map2])) + { + if (abs_delta(pEdidTiming[i].etc.rr, rr) < abs_delta(pEdidTiming[map2].etc.rr, rr)) + { + map2 = i; + } + } + else + { + map2 = i; + } + } + } + + // find out the native timing + if (NVT_IS_NATIVE_TIMING(pEdidTiming[i].etc.status) || NVT_IS_DTD1(pEdidTiming[i].etc.status)) + { + if (map3 >= pEdidInfo->total_timings) + { + if (width <= pEdidTiming[i].HVisible && + height <= frame_height(pEdidTiming[i])) + { + map3 = i; + } + } + else if(abs_delta(pEdidTiming[i].HVisible, width) <= abs_delta(pEdidTiming[map3].HVisible, width) && + abs_delta(frame_height(pEdidTiming[i]), height) <= abs_delta(frame_height(pEdidTiming[map3]), height) && + width <= pEdidTiming[i].HVisible && + height <= frame_height(pEdidTiming[i])) + { + map3 = i; + } + } + + // find the edid timing with refresh rate matching + if (map4 >= pEdidInfo->total_timings) + { + if (width <= pEdidTiming[i].HVisible && + height <= pEdidTiming[i].VVisible && + rr == pEdidTiming[i].etc.rr) + { + map4 = i; + } + } + else + { + if (width <= pEdidTiming[i].HVisible && + height <= pEdidTiming[i].HVisible && + rr == pEdidTiming[i].etc.rr && + abs_delta(pEdidTiming[i].HVisible, width) <= abs_delta(pEdidTiming[map4].HVisible, width) && + abs_delta(pEdidTiming[i].VVisible, height) <= abs_delta(pEdidTiming[map4].VVisible, height)) + { + map4 = i; + } + } + + }//for (i = 0; i < pEdidInfo->total_timings; i++) + + if ( (preferred_displayid_dtd == preferred_dtd1) && (preferred_dtd1 == dtd1) && + (dtd1 == map0) && + (map0 == map1) && + (map1 == map2) && + (map2 == map3) && + (map3 == map4) && + (map4 == pEdidInfo->total_timings) && + pEdidInfo->version >= NVT_EDID_VER_1_4 && + pEdidInfo->u.feature_ver_1_4_digital.continuous_frequency && + !(flag & NVT_PVT_INTERLACED_MASK)) + { + // try to find CVT timing that fits + NvU32 maxHeight, minHeight, tempHeight; + + minHeight = ~0; + maxHeight = tempHeight= 0; + + // looping through long display descriptors + for (i=0; ildd[i].tag != NVT_EDID_DISPLAY_DESCRIPTOR_DRL || pEdidInfo->ldd[i].u.range_limit.timing_support != NVT_EDID_RANGE_SUPPORT_CVT) + { + continue; + } + + pCVT = &pEdidInfo->ldd[i].u.range_limit.u.cvt; + + if (width <= pCVT->max_active_pixels_per_line || (pCVT->scaling_support & NVT_EDID_CVT_SCALING_HOR_SHRINK)) + { + for (j=0; jaspect_supported & (1< tempHeight) + { + minHeight = tempHeight; + } + if (maxHeight < tempHeight) + { + maxHeight = tempHeight; + } + + }//for (j=0; j<5; j++) + }//if (width <= pCVT->max_active_pixels_per_line || (pCVT->scaling_support & NVT_EDID_CVT_SCALING_HOR_STRETCH)) + + if ( ((minHeight < height) && (pCVT->scaling_support & NVT_EDID_CVT_SCALING_VER_SHRINK)) || + ((maxHeight > height) && (pCVT->scaling_support & NVT_EDID_CVT_SCALING_VER_STRETCH)) ) + { + cvt = 1; + } + + if (cvt) + { + break; + } + }//for (i=0; iblanking_support & NVT_EDID_CVT_BLANKING_REDUCED && NvTiming_CalcCVT_RB(width, height, rr, NVT_PROGRESSIVE, &cvtTiming) == NVT_STATUS_SUCCESS) + { + if ( cvtTiming.pclk > (NvU32)((pEdidInfo->ldd[i].u.range_limit.max_pclk_MHz * 100) - (pCVT->pixel_clock_adjustment * 25)) ) + { + cvt = 0; + } + } + else if (pCVT->blanking_support & NVT_EDID_CVT_BLANKING_STANDARD && NvTiming_CalcCVT(width, height, rr, NVT_PROGRESSIVE, &cvtTiming) == NVT_STATUS_SUCCESS) + { + if ( cvtTiming.pclk > (NvU32)((pEdidInfo->ldd[i].u.range_limit.max_pclk_MHz * 100) - (pCVT->pixel_clock_adjustment * 25)) ) + { + cvt = 0; + } + } + else + { + cvt = 0; + } + + } + }//(dtd1 == map0 == map1 == map2 == map3 == pEdidInfo->total_timings) && pEdidInfo->version >= NVT_EDID_VER_1_4 && + // pEdidInfo->feature_ver_1_4_digital.continuous_frequency && !(flag & NVT_PVT_INTERLACED_MASK)) + + // now return the mismatched EDID timing + if (flag & NVT_FLAG_NV_PREFERRED_TIMING) + { + *pT = (preferred_displayid_dtd != pEdidInfo->total_timings) ? pEdidTiming[preferred_displayid_dtd] : + (native_cta != pEdidInfo->total_timings) ? pEdidTiming[native_cta] : + (preferred_cta != pEdidInfo->total_timings) ? pEdidTiming[preferred_cta] : + (preferred_dtd1 != pEdidInfo->total_timings) ? pEdidTiming[preferred_dtd1] : + pEdidTiming[dtd1]; + // what if DTD1 itself is filtered out, in such case dtd1 index points to an invalid timing[]? + // (dtd1 != pEdidInfo->total_timings) ? pEdidTiming[dtd1] : pEdidTiming[0]; + } + else if (flag & NVT_FLAG_DTD1_TIMING) + { + *pT = pEdidTiming[dtd1]; + } + else if ((flag & NVT_FLAG_MAX_EDID_TIMING) && (0 == (flag & NVT_FLAG_EDID_861_ST))) + { + *pT = pEdidTiming[max]; + } + else if ((flag & (NVT_FLAG_CEA_4X3_TIMING | NVT_FLAG_CEA_16X9_TIMING | NVT_FLAG_EDID_861_ST)) && ceaIndex < (pEdidInfo->total_timings)) + { + *pT = pEdidTiming[ceaIndex]; + } + else if ((flag & NVT_FLAG_NATIVE_TIMING) != 0 && map3 < pEdidInfo->total_timings) + { + // Allow closest refresh rate match when EDID has detailed timing for different RR on native resolution. + if (map0 < pEdidInfo->total_timings && + pEdidTiming[map0].HVisible == pEdidTiming[map3].HVisible && + pEdidTiming[map0].VVisible == pEdidTiming[map3].VVisible) + { + *pT = pEdidTiming[map0]; + } + else + { + *pT = pEdidTiming[map3]; + } + } + else if (map0 < pEdidInfo->total_timings) + { + // use the exact mapped timing if possible + *pT = pEdidTiming[map0]; + } + else if ((flag & NVT_FLAG_EDID_TIMING_RR_MATCH) && map4 < pEdidInfo->total_timings) + { + *pT = pEdidTiming[map4]; + } + else if (map1 < pEdidInfo->total_timings) + { + // use the mapped timing if possible + *pT = pEdidTiming[map1]; + } + else if (map2 < pEdidInfo->total_timings) + { + // use the 2nd mapped timing if possible + *pT = pEdidTiming[map2]; + } + else if (dtd1 < pEdidInfo->total_timings && width <= pEdidTiming[dtd1].HVisible && height <= pEdidTiming[dtd1].VVisible) + { + // use the 1st detailed timing if possible + *pT = pEdidTiming[dtd1]; + } + else if (cvt) + { + // use the cvt timing + *pT = cvtTiming; + } + else + { + // use the max timing for all other cases + *pT = pEdidTiming[max]; + } + + // set the mismatch status + if (pT->HVisible != width || frame_height(*pT) != height) + { + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_SIZE); + } + if (!NvTiming_IsRoundedRREqual(pT->etc.rr, pT->etc.rrx1k, (NvU16)rr)) + { + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_RR); + } + if (!!pT->interlaced != !!(flag & NVT_PVT_INTERLACED_MASK)) + { + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_FORMAT); + } + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetEdidTimingEx(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_EDID_INFO *pEdidInfo, NVT_TIMING *pT, NvU32 rrx1k) +{ + return NvTiming_GetEdidTimingExWithPclk(width, height, rr, flag, pEdidInfo, pT, rrx1k, 0); +} + +// get the edid timing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetEdidTiming(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_EDID_INFO *pEdidInfo, NVT_TIMING *pT) +{ + return NvTiming_GetEdidTimingEx(width, height, rr, flag, pEdidInfo, pT, 0); +} +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetHDMIStereoExtTimingFromEDID(NvU32 width, NvU32 height, NvU32 rr, NvU8 StereoStructureType, NvU8 SideBySideHalfDetail, NvU32 flag, NVT_EDID_INFO *pEdidInfo, NVT_EXT_TIMING *pT) +{ + NVT_STATUS status = NVT_STATUS_ERR; + NvU8 Vic; + NvU32 i; + NVT_TIMING Timing; + + NVMISC_MEMSET(pT, 0, sizeof(NVT_EXT_TIMING)); + + // adjust the flags -- + // need EDID timing with RR match, + // not max timing, + flag = flag | NVT_FLAG_EDID_TIMING | NVT_FLAG_EDID_TIMING_RR_MATCH | NVT_FLAG_EDID_861_ST; + flag = flag & ~(NVT_FLAG_MAX_EDID_TIMING); + + status = NvTiming_GetEdidTiming(width, height, rr, flag, pEdidInfo, &Timing); + if (NVT_STATUS_SUCCESS == status) + { + status = NVT_STATUS_ERR; + + // is this an exact match? + if (0 == NVT_GET_TIMING_STATUS_MATCH(Timing.etc.status)) + { + if (NVT_TYPE_EDID_861ST == NVT_GET_TIMING_STATUS_TYPE(Timing.etc.status)) + { + // lookup the vic for this timing in the support map. + Vic = (NvU8) NVT_GET_CEA_FORMAT(Timing.etc.status); + for (i = 0; i < pEdidInfo->Hdmi3Dsupport.total; ++i) + { + if (Vic == pEdidInfo->Hdmi3Dsupport.map[i].Vic) + { + break; + } + } + if (i < pEdidInfo->Hdmi3Dsupport.total) + { + // does this vic support the requested structure type? + if (0 != (NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(StereoStructureType) & pEdidInfo->Hdmi3Dsupport.map[i].StereoStructureMask)) + { + // if this is side-by-side(half) the detail needs to match also. + if ((NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF != StereoStructureType) || (SideBySideHalfDetail == pEdidInfo->Hdmi3Dsupport.map[i].SideBySideHalfDetail)) + { + // convert the 2D timing to 3D. + NvTiming_GetHDMIStereoTimingFrom2DTiming(&Timing, StereoStructureType, SideBySideHalfDetail, pT); + status = NVT_STATUS_SUCCESS; + } + } + } + } + } + } + return status; +} + +// EDID based AspectRatio Timing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetEDIDBasedASPRTiming( NvU16 width, NvU16 height, NvU16 rr, NVT_EDID_INFO *pEI, NVT_TIMING *pT) +{ + NvU32 i, dwStatus; + NvU32 dwNativeIndex; + NvU32 flag; + NvU32 ret; + + // sanity check + if( pEI == NULL || pEI->total_timings == 0 || pT == NULL ) + { + return NVT_STATUS_ERR; + } + if( width == 0 || height == 0 ) + { + return NVT_STATUS_ERR; + } + + // get an EDID timing. Return err if it fails as we don't have any timing to tweak. + flag = 0; + ret = NvTiming_GetEdidTiming(width, height, rr, flag, pEI, pT); + if( NVT_STATUS_SUCCESS != ret ) + { + return NVT_STATUS_ERR; + } + // in case we have an exact match from EDID (in terms of Size), we return Success. + else if ((NVT_GET_TIMING_STATUS_MATCH(pT->etc.status) & NVT_STATUS_TIMING_MISMATCH_SIZE) == 0) + { + return NVT_STATUS_SUCCESS; + } + + // find the Native timing + for (i = 0, dwNativeIndex = pEI->total_timings + 1; i < pEI->total_timings; i++) + { + dwStatus = pEI->timing[i].etc.status; + + if ((NVT_IS_NATIVE_TIMING(dwStatus)) || NVT_IS_DTD1(dwStatus)) + { + dwNativeIndex = i; + break; + } + } + + // we don't want to apply LogicScaling(Letterboxing) to Wide Mode on Wide Panel (or non-Wide Mode on non-Wide Panel) + if( nvt_is_wideaspect(width, height) == nvt_is_wideaspect(pEI->timing[dwNativeIndex].HVisible, pEI->timing[dwNativeIndex].VVisible) ) + { + return NVT_STATUS_ERR; + } + + // Letterbox mode enabled by regkey LogicScalingMode + // When we try to set modes not supported in EDID (eg. DFP over DSub) the display may not fit the screen. + // If Logic Scaling is enabled (ie why we are here), we need to tweak the timing (for CRT) provided: + // 1) the aspect ratio of native mode and requested mode differ + // eg. Native AR = 5:4, 1280x1024 + // Requested AR = 16:10, 1280x800 + // 2) Both Width and Height do not mismatch together; If they do we shall go in for DMT/GTF timing + // by failing this call. + if( pT->interlaced == 0 && + dwNativeIndex < pEI->total_timings && + (pEI->timing[dwNativeIndex].HVisible*height != pEI->timing[dwNativeIndex].VVisible*width) && + (width == pT->HVisible || height == pT->VVisible)) + { + pT->HFrontPorch += (pT->HVisible - width) / 2; + pT->VFrontPorch += (pT->VVisible - height) / 2; + pT->HVisible = width; + pT->VVisible = height; + if(rr != pT->etc.rr) + { + pT->etc.rrx1k = rr * 1000; + pT->pclk = RRx1kToPclk (pT); + pT->pclk1khz = RRx1kToPclk1khz(pT); + } + + pT->etc.status = NVT_STATUS_ASPR; + return NVT_STATUS_SUCCESS; + } + + return NVT_STATUS_ERR; +} + +/** + * + * @brief check EDID raw data is valid or not, and it will return the err flags if it existed + * @param pEdid : this is a pointer to EDID data + * @param length : read length of EDID + * @param bIsTrongValidation : true - added more check + * false- only header and checksum and size check + * + */ +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 NvTiming_EDIDValidationMask(NvU8 *pEdid, NvU32 length, NvBool bIsStrongValidation) +{ + NvU32 i, j, version, checkSum; + EDIDV1STRUC *p = (EDIDV1STRUC *)pEdid; + EDID_LONG_DISPLAY_DESCRIPTOR *pLdd; + NvU8 *pExt; + DETAILEDTIMINGDESCRIPTOR *pDTD; + NvU32 ret = 0; + + // check the EDID base size to avoid accessing beyond the EDID buffer, do not proceed with + // further validation. + if (length < sizeof(EDIDV1STRUC)) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_SIZE); + return ret; + } + + // check the EDID version and signature + if (getEdidVersion(pEdid, &version) != NVT_STATUS_SUCCESS) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_VERSION); + return ret; + } + + // check block 0 checksum value + if (!isChecksumValid(pEdid)) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_CHECKSUM); + return ret; + } + + // Strong validation to follow + if (bIsStrongValidation == NV_TRUE) + { + // range limit check + for (i = 0; i < NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR; i++) + { + pLdd = (EDID_LONG_DISPLAY_DESCRIPTOR *)&p->DetailedTimingDesc[i]; + if (pLdd->tag == NVT_EDID_DISPLAY_DESCRIPTOR_DRL && (version == 0x103 || (version == 0x104 && (p->bFeatureSupport & 1)))) + { + EDID_MONITOR_RANGE_LIMIT *pRangeLimit = (EDID_MONITOR_RANGE_LIMIT *)pLdd->data; + NvU8 max_v_rate_offset, min_v_rate_offset, max_h_rate_offset, min_h_rate_offset; + + // add 255Hz offsets as needed before doing the check, use descriptor->rsvd2 + nvt_assert(!(pLdd->rsvd2 & 0xF0)); + + max_v_rate_offset = pLdd->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_VER_MAX ? NVT_PVT_EDID_RANGE_OFFSET_AMOUNT : 0; + min_v_rate_offset = pLdd->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_VER_MIN ? NVT_PVT_EDID_RANGE_OFFSET_AMOUNT : 0; + max_h_rate_offset = pLdd->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_HOR_MAX ? NVT_PVT_EDID_RANGE_OFFSET_AMOUNT : 0; + min_h_rate_offset = pLdd->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_HOR_MIN ? NVT_PVT_EDID_RANGE_OFFSET_AMOUNT : 0; + + if ((pRangeLimit->minVRate + min_v_rate_offset) > (pRangeLimit->maxVRate + max_v_rate_offset) || + (pRangeLimit->minHRate + min_h_rate_offset) > (pRangeLimit->maxHRate + max_h_rate_offset) || + pRangeLimit->maxVRate == 0 || + pRangeLimit->maxHRate == 0) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_RANGE_LIMIT); + } + break; + } + } + + // extension and size check + if ((NvU32)(p->bExtensionFlag + 1) * sizeof(EDIDV1STRUC) > length) + { + // Do not proceed with further validation if the size is invalid. + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_SIZE); + return ret; + } + + // validate Detailed Timing Descriptors, 4 blocks + for (i = 0; i < 4; i++) + { + if (*((NvU16 *)&p->DetailedTimingDesc[i]) != 0) + { + // This block is not a Display Descriptor. + // It must be a valid timing definition + // validate the block by passing NULL as the NVTIMING parameter to parseEdidDetailedTimingDescriptor + if (parseEdidDetailedTimingDescriptor((NvU8 *)&p->DetailedTimingDesc[i], NULL) != NVT_STATUS_SUCCESS) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_DTD); + } + } + else + { + // This block is a display descriptor, validate + if (((EDID_LONG_DISPLAY_DESCRIPTOR *)&p->DetailedTimingDesc[i])->rsvd != 0) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_DTD); + } + } + } + + // validate extension blocks + for (j = 1; j <= p->bExtensionFlag; j++) + { + pExt = pEdid + sizeof(EDIDV1STRUC) * j; + + // check for 861 extension + switch (*pExt) + { + case NVT_EDID_EXTENSION_CTA: + // first sanity check on the extension block + if (get861ExtInfo(pExt, sizeof(EIA861EXTENSION), NULL) != NVT_STATUS_SUCCESS) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT); + } + + // check sum on CEA extension block + for (i = 0, checkSum = 0; i < sizeof(EIA861EXTENSION); i ++) + { + checkSum += pExt[i]; + } + + if ((checkSum & 0xFF) != 0) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_CHECKSUM); + } + + // 0 indicates no DTD in this block + if (((EIA861EXTENSION*)pExt)->offset == 0) + { + continue; + } + + // validate DTD blocks + pDTD = (DETAILEDTIMINGDESCRIPTOR *)&pExt[((EIA861EXTENSION *)pExt)->offset]; + while ((pDTD->wDTPixelClock != 0) && + (((NvU8 *)pDTD - pExt + sizeof(DETAILEDTIMINGDESCRIPTOR)) < ((NvU8)sizeof(EIA861EXTENSION)))) + { + if (parseEdidDetailedTimingDescriptor((NvU8 *)pDTD, NULL) != NVT_STATUS_SUCCESS) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DTD); + } + pDTD++; + } + break; + case NVT_EDID_EXTENSION_VTB: + // perform a checksum on the VTB block + for (i = 0, checkSum = 0; i < sizeof(VTBEXTENSION); i++) + { + checkSum += pExt[i]; + } + if ((checkSum & 0xFF) != 0) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_CHECKSUM); + } + break; + case NVT_EDID_EXTENSION_DISPLAYID: + // perform a checksum on the VTB block + for (i = 0, checkSum = 0; i < sizeof(EIA861EXTENSION); i++) + { + checkSum += pExt[i]; + } + if ((checkSum & 0xFF) != 0) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_CHECKSUM); + } + break; + default: + break; + } + } + } + + return ret; +} + +/** + * + * @brief sanity check EDID binary frequently used data block is valid or not, + * and it will return error checkpoint flag if it existed + * @param pEdid : this is a pointer to EDID raw data + * @param length : read length of EDID + * + */ +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 NvTiming_EDIDStrongValidationMask(NvU8 *pEdid, NvU32 length) +{ + NvU32 i, j, version, extnCount; + EDIDV1STRUC *p = (EDIDV1STRUC *)pEdid; + EDID_LONG_DISPLAY_DESCRIPTOR *pLdd; + NvU8 *pExt; + DETAILEDTIMINGDESCRIPTOR *pDTD; + // For CTA861 + NvU8 ctaDTD_Offset; + NvU8 *pData_collection; + NvU32 ctaBlockTag, ctaPayload, vic; + // For DisplayID + DIDEXTENSION *pDisplayid; + NvU8 did_section_length = 0x79; + NvU8 did2ExtCount = 0; + DISPLAYID_2_0_DATA_BLOCK_HEADER *pDID2Header; + DISPLAYID_DATA_BLOCK_HEADER *pHeader; + NvU8 block_length = 0; + NvBool bAllZero = NV_TRUE; + NvU32 ret = 0; + + // check the EDID base size to avoid accessing beyond the EDID buffer + if (length < sizeof(EDIDV1STRUC) || (length > sizeof(EDIDV1STRUC) && (length % sizeof(EDIDV1STRUC) != 0))) + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_SIZE); + + // check the EDID version and signature + if (getEdidVersion(pEdid, &version) != NVT_STATUS_SUCCESS) + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_HEADER); + + // check block 0 checksum value + if (!isChecksumValid(pEdid)) + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_CHECKSUM); + + if (p->bVersionNumber != 0x01 || p->bRevisionNumber > 0x04) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_VERSION); + } + + // 18bytes in DTD or Display Descriptor check + for (i = 0; i < NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR; i++) + { + if (*((NvU16 *)&p->DetailedTimingDesc[i]) != 0) + { + // This block is not a Display Descriptor. + // It must be a valid timing definition + // validate the block by passing NULL as the NVTIMING parameter to parseEdidDetailedTimingDescriptor + if (parseEdidDetailedTimingDescriptor((NvU8 *)&p->DetailedTimingDesc[i], NULL) != NVT_STATUS_SUCCESS) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_DTD); + } + else + { + // check the max image size in monitor and its DTD defines value + if (p->bMaxHorizImageSize != 0 && p->bMaxVertImageSize != 0) + { + DETAILEDTIMINGDESCRIPTOR *pDTD = (DETAILEDTIMINGDESCRIPTOR *)&p->DetailedTimingDesc[i]; + NvU16 hDTDImageSize = (pDTD->bDTHorizVertImage & 0xF0) << 4 | pDTD->bDTHorizontalImage; + NvU16 vDTDImageSize = (pDTD->bDTHorizVertImage & 0x0F) << 8 | pDTD->bDTVerticalImage; + + if ((hDTDImageSize/10) > p->bMaxHorizImageSize || (vDTDImageSize/10) > p->bMaxVertImageSize) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_DTD); + } + } + } + } + else + { + pLdd = (EDID_LONG_DISPLAY_DESCRIPTOR *)&p->DetailedTimingDesc[i]; + + // This block is a display descriptor, validate + if (((EDID_LONG_DISPLAY_DESCRIPTOR *)&p->DetailedTimingDesc[i])->rsvd != 0 || // (00 00 00)h indicates Display Descriptor + (pLdd->tag >= 0x11 && pLdd->tag <= 0xF6)) // Reserved : Do Not Use + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_DESCRIPTOR); + + if (pLdd->tag == NVT_EDID_DISPLAY_DESCRIPTOR_DRL && (version == 0x103 || (version == 0x104 && (p->bFeatureSupport & 1)))) + { + EDID_MONITOR_RANGE_LIMIT *pRangeLimit = (EDID_MONITOR_RANGE_LIMIT *)pLdd->data; + NvU8 max_v_rate_offset, min_v_rate_offset, max_h_rate_offset, min_h_rate_offset; + + // add 255Hz offsets as needed before doing the check, use descriptor->rsvd2 + nvt_assert(!(pLdd->rsvd2 & 0xF0)); + + max_v_rate_offset = pLdd->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_VER_MAX ? NVT_PVT_EDID_RANGE_OFFSET_AMOUNT : 0; + min_v_rate_offset = pLdd->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_VER_MIN ? NVT_PVT_EDID_RANGE_OFFSET_AMOUNT : 0; + max_h_rate_offset = pLdd->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_HOR_MAX ? NVT_PVT_EDID_RANGE_OFFSET_AMOUNT : 0; + min_h_rate_offset = pLdd->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_HOR_MIN ? NVT_PVT_EDID_RANGE_OFFSET_AMOUNT : 0; + + if ((pRangeLimit->minVRate + min_v_rate_offset) > (pRangeLimit->maxVRate + max_v_rate_offset) || + (pRangeLimit->minHRate + min_h_rate_offset) > (pRangeLimit->maxHRate + max_h_rate_offset) || + pRangeLimit->maxVRate == 0 || + pRangeLimit->maxHRate == 0) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_RANGE_LIMIT); + } + } + } + } + + // extension and size check + if ((NvU32)(p->bExtensionFlag + 1) * sizeof(EDIDV1STRUC) > length) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXTENSION_COUNT); + } + + // we shall not trust any extension blocks with wrong input EDID size + if (NVT_IS_EDID_VALIDATION_FLAGS(ret, NVT_EDID_VALIDATION_ERR_SIZE) || + NVT_IS_EDID_VALIDATION_FLAGS(ret, NVT_EDID_VALIDATION_ERR_EXTENSION_COUNT)) + return ret; + + // validate extension blocks + for (j = 1; j <= p->bExtensionFlag; j++) + { + pExt = pEdid + sizeof(EDIDV1STRUC) * j; + + // check for 861 extension + switch (*pExt) + { + case NVT_EDID_EXTENSION_CTA: + ctaDTD_Offset = ((EIA861EXTENSION *)pExt)->offset; + // first sanity check on the extension block + if (get861ExtInfo(pExt, sizeof(EIA861EXTENSION), NULL) != NVT_STATUS_SUCCESS || + ((EIA861EXTENSION *)pExt)->revision < NVT_CEA861_REV_B) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_CTA_BASIC); + } + + // 0 indicated there is no DTD and data collection in this block + if (ctaDTD_Offset == 0) + { + if(!isChecksumValid(pExt)) + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_CTA_CHECKSUM); + continue; + } + + // validate SVD block + ctaBlockTag = NVT_CEA861_GET_SHORT_DESCRIPTOR_TAG(((EIA861EXTENSION *)pExt)->data[0]); + pData_collection = ((EIA861EXTENSION *)pExt)->data; + + while ((ctaDTD_Offset - 4) > 0 && pData_collection != &pExt[ctaDTD_Offset] && + ctaBlockTag > NVT_CEA861_TAG_RSVD && ctaBlockTag <= NVT_CEA861_TAG_EXTENDED_FLAG) + { + ctaBlockTag = NVT_CEA861_GET_SHORT_DESCRIPTOR_TAG(*pData_collection); + ctaPayload = NVT_CEA861_GET_SHORT_DESCRIPTOR_SIZE(*pData_collection); + + if (parseCta861DataBlockInfo(pData_collection, (NvU32)ctaDTD_Offset - 4, NULL) == NVT_STATUS_SUCCESS) + { + pData_collection++; // go to the next byte. skip Tag+Length byte + + if (ctaBlockTag == NVT_CEA861_TAG_VIDEO) + { + for (i=0; i < ctaPayload; i++) + { + vic = NVT_GET_CTA_8BIT_VIC(*pData_collection); + if (vic == 0 || vic > 255 || (vic >= 128 && vic <=192)) + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_CTA_SVD); + pData_collection++; + } + } + else if (ctaBlockTag == NVT_CEA861_TAG_EXTENDED_FLAG) + { + if (*pData_collection == NVT_CTA861_EXT_TAG_HF_EEODB) + { + if ((p->bVersionNumber != 0x01) || (p->bRevisionNumber != 0x03)) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_CTA_INVALID_DATA_BLOCK); + pData_collection += ctaPayload; + } + else + { + ret &= ~NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXTENSION_COUNT); + extnCount = *(++pData_collection); + // check the EDID extension count value again because EDID extension block count + // value in EEODB override it and source shall ignore extension flag > 1 value + if ((extnCount + 1) != (length / (sizeof(EDIDV1STRUC)))) + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXTENSION_COUNT); + pData_collection++; + } + } + else + pData_collection += ctaPayload; + } + else if (ctaBlockTag == NVT_CEA861_TAG_RSVD) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_CTA_TAG); + pData_collection += ctaPayload; + } + else + pData_collection += ctaPayload; + } + else + { + pData_collection++; // go to the next byte. skip Tag+Length byte + + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_CTA_INVALID_DATA_BLOCK); + pData_collection += ctaPayload; + } + } + + // validate DTD blocks + pDTD = (DETAILEDTIMINGDESCRIPTOR *)&pExt[((EIA861EXTENSION *)pExt)->offset]; + while ((pDTD->wDTPixelClock != 0) && + (((NvU8 *)pDTD - pExt + sizeof(DETAILEDTIMINGDESCRIPTOR)) < ((NvU8)sizeof(EIA861EXTENSION)))) + { + if (parseEdidDetailedTimingDescriptor((NvU8 *)pDTD, NULL) != NVT_STATUS_SUCCESS) + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DTD); + else + { + // check the max image size and + if (p->bMaxHorizImageSize != 0 && p->bMaxVertImageSize != 0) + { + NvU16 hDTDImageSize = (pDTD->bDTHorizVertImage & 0xF0) << 4 | pDTD->bDTHorizontalImage; + NvU16 vDTDImageSize = (pDTD->bDTHorizVertImage & 0x0F) << 8 | pDTD->bDTVerticalImage; + + if ((hDTDImageSize/10) > (p->bMaxHorizImageSize) || (vDTDImageSize/10) > p->bMaxVertImageSize) + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_CTA_DTD); + } + } + pDTD++; + } + + if(!isChecksumValid(pExt)) + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_CTA_CHECKSUM); + break; + case NVT_EDID_EXTENSION_DISPLAYID: + pDisplayid = ((DIDEXTENSION *)pExt); + if (pDisplayid->ext_count != 0) + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DID_EXTCOUNT); + + if (pDisplayid->length != 0x79) + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DID_SEC_SIZE); + + if (!isChecksumValid(pExt)) + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DID_CHECKSUM); + + // check the DID2 data blocks + if ((pDisplayid->struct_version & 0xF0) >> 4 == 2) + { + if ((pDisplayid->struct_version & 0xFF) == 0x21) + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DID_VERSION); + + did2ExtCount++; + + if (pDisplayid->use_case == 0 && did2ExtCount == 1) + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DID2_USE_CASE); + + // check the DisplayId2 valid timing + pDID2Header = (DISPLAYID_2_0_DATA_BLOCK_HEADER*)pDisplayid->data; + pData_collection = pDisplayid->data; + + // Sanity check every data blocks + while (((pDID2Header->type >= DISPLAYID_2_0_BLOCK_TYPE_PRODUCT_IDENTITY && + pDID2Header->type <= DISPLAYID_2_0_BLOCK_TYPE_BRIGHTNESS_LUMINANCE_RANGE) || + pDID2Header->type == DISPLAYID_2_0_BLOCK_TYPE_VENDOR_SPEC || + pDID2Header->type == DISPLAYID_2_0_BLOCK_TYPE_CTA_DATA) && pDID2Header->data_bytes != 0 && + (pData_collection - pExt < (int)sizeof(DIDEXTENSION))) + { + if (parseDisplayId20EDIDExtDataBlocks(pData_collection, did_section_length, &block_length, NULL) == NVT_STATUS_ERR) + { + if (pDID2Header->type == DISPLAYID_2_0_BLOCK_TYPE_TIMING_7) + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DID2_TYPE7); + + if (pDID2Header->type == DISPLAYID_2_0_BLOCK_TYPE_RANGE_LIMITS) + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_RANGE_LIMIT); + + if (pDID2Header->type == DISPLAYID_2_0_BLOCK_TYPE_ADAPTIVE_SYNC) + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DID2_ADAPTIVE_SYNC); + // add more data blocks tag here to evaluate + } + pData_collection += block_length; + pDID2Header = (DISPLAYID_2_0_DATA_BLOCK_HEADER*)pData_collection; + } + + // compare the remain 0 value are correct or not before meet checksum byte + for (i = 0; i < (NvU32)(&pDisplayid->data[NVT_DID_MAX_EXT_PAYLOAD-1] - pData_collection); i++) + { + if (pData_collection[i] != 0) + { + bAllZero = NV_FALSE; + break; + } + } + + // if the first tag failed, ignore all the tags afterward then + if (!bAllZero && + (pDID2Header->type < DISPLAYID_2_0_BLOCK_TYPE_PRODUCT_IDENTITY || + (pDID2Header->type > DISPLAYID_2_0_BLOCK_TYPE_BRIGHTNESS_LUMINANCE_RANGE && + pDID2Header->type != DISPLAYID_2_0_BLOCK_TYPE_VENDOR_SPEC && + pDID2Header->type != DISPLAYID_2_0_BLOCK_TYPE_CTA_DATA)) && + (pData_collection - pExt < (int)sizeof(DIDEXTENSION))) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DID2_TAG); + continue; + } + } + else if ((pDisplayid->struct_version & 0xFF) == 0x12 || (pDisplayid->struct_version & 0xFF) == 0x13) + { + if ((pDisplayid->struct_version & 0xFF) == 0x13) + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DID_VERSION); + + pHeader = (DISPLAYID_DATA_BLOCK_HEADER*)pDisplayid->data; + pData_collection = pDisplayid->data; + + // Sanity check every data blocks + while ((pHeader->type <= NVT_DISPLAYID_BLOCK_TYPE_TILEDDISPLAY || + pHeader->type == NVT_DISPLAYID_BLOCK_TYPE_CTA_DATA || + pHeader->type == NVT_DISPLAYID_BLOCK_TYPE_VENDOR_SPEC) && pHeader->data_bytes != 0 && + (pData_collection - pExt < (int)sizeof(DIDEXTENSION))) + { + if (parseDisplayIdBlock(pData_collection, did_section_length, &block_length, NULL) == NVT_STATUS_ERR) + { + if (pHeader->type == NVT_DISPLAYID_BLOCK_TYPE_TIMING_1) + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DID13_TYPE1); + + if (pHeader->type == NVT_DISPLAYID_BLOCK_TYPE_RANGE_LIMITS) + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_RANGE_LIMIT); + + // add more data blocks tag here to evaluate + } + pData_collection += block_length; + pHeader = (DISPLAYID_DATA_BLOCK_HEADER*)pData_collection; + } + + // compare the remain 0 value are correct or not before meet checksum byte + for (i = 0; i < (NvU32)(&pDisplayid->data[NVT_DID_MAX_EXT_PAYLOAD-1] - pData_collection); i++) + { + if (pData_collection[i] != 0) + { + bAllZero = NV_FALSE; + break; + } + } + + // if the first tag failed, ignore all the tags afterward then + if (!bAllZero && + pHeader->type > NVT_DISPLAYID_BLOCK_TYPE_TILEDDISPLAY && + pHeader->type != NVT_DISPLAYID_BLOCK_TYPE_CTA_DATA && + pHeader->type != NVT_DISPLAYID_BLOCK_TYPE_VENDOR_SPEC && + (pData_collection - pExt < (int)sizeof(DIDEXTENSION))) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DID13_TAG); + continue; + } + } + else + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DID_VERSION); + break; + default: + // the useful extension only CTA (0x02) and DisplayID (0x70) + if ( *pExt != NVT_EDID_EXTENSION_VTB && *pExt != NVT_EDID_EXTENSION_DI && + *pExt != NVT_EDID_EXTENSION_LS && *pExt != NVT_EDID_EXTENSION_DPVL && + *pExt != NVT_EDID_EXTENSION_BM && *pExt != NVT_EDID_EXTENSION_OEM ) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXTENSION_TAG); + } + break; + } + } + + return ret; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_EDIDValidation (NvU8 *pEdid, NvU32 length, NvBool bIsStrongValidation) +{ + if (NvTiming_EDIDValidationMask(pEdid, length, bIsStrongValidation) != 0) { + return NVT_STATUS_ERR; + } else { + return NVT_STATUS_SUCCESS; + } +} + +// Function Description: Get the first Detailed Timing Descriptor +// +// Parameters: +// pEdidInfo: IN - pointer to parsed EDID +// pT: OUT - pointer to where the DTD1 timing will be stored +// +// Return: +// NVT_STATUS_SUCCESS: DTD1 was found in parsed EDID, pT is a valid result +// NVT_STATUS_INVALID_PARAMETER: one or more parameter was invalid +// NVT_STATUS_ERR: DTD1 was not found in parsed EDID, pT is invalid +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetDTD1Timing (NVT_EDID_INFO * pEdidInfo, NVT_TIMING * pT) +{ + NvU32 j; + + // check param + if (pEdidInfo == NULL || pT == NULL) + { + return NVT_STATUS_INVALID_PARAMETER; + } + + // find the PTM mode + for (j = 0; j < pEdidInfo->total_timings; j++) + { + if (NVT_PREFERRED_TIMING_IS_DTD1(pEdidInfo->timing[j].etc.flag, pEdidInfo->timing[j].etc.status)) + { + *pT = pEdidInfo->timing[j]; + return NVT_STATUS_SUCCESS; + } + } + + // find DisplayID preferred + for (j = 1; j < pEdidInfo->total_timings; j++) + { + if (NVT_PREFERRED_TIMING_IS_DISPLAYID(pEdidInfo->timing[j].etc.flag)) + { + *pT = pEdidInfo->timing[j]; + return NVT_STATUS_SUCCESS; + } + } + + // DTD1 should exist, but if it doesn't, return not found + for (j = 0; j < pEdidInfo->total_timings; j++) + { + NvU32 data = pEdidInfo->timing[j].etc.status; + if (NVT_IS_DTD1(data)) + { + *pT = pEdidInfo->timing[j]; + return NVT_STATUS_SUCCESS; + } + } + + // DTD1 should exist, but if it doesn't, return not found + return NVT_STATUS_ERR; +} + +// Description: Parses a VTB extension block into its associated timings +// +// Parameters: +// pEdidExt: IN - pointer to the beginning of the extension block +// pInfo: IN - The original block information, including the +// array of timings. +// +// NOTE: this function *really* should be in its own separate file, but a certain DVS test +// uses cross build makefiles which do not allow the specification of a new file. +CODE_SEGMENT(PAGE_DD_CODE) +void parseVTBExtension(NvU8 *pEdidExt, NVT_EDID_INFO *pInfo) +{ + NvU32 i; + VTBEXTENSION *pExt = (VTBEXTENSION *)pEdidExt; + NvU32 count; + NvU32 bytes; + NVT_TIMING newTiming; + + // Null = bad idea + if (pEdidExt == NULL) + { + return; + } + + // Sanity check for VTB extension block + if (pExt->tag != NVT_EDID_EXTENSION_VTB || + pExt->revision == NVT_VTB_REV_NONE) + { + return; + } + + // Sanity check - ensure that the # of descriptor does not exceed + // byte size + count = (NvU32)sizeof(EDID_LONG_DISPLAY_DESCRIPTOR) * pExt->num_detailed + + (NvU32)sizeof(EDID_CVT_3BYTE_BLOCK) * pExt->num_cvt + + (NvU32)sizeof(NvU16) * pExt->num_standard; + if (count > NVT_VTB_MAX_PAYLOAD) + { + return; + } + + count = 0; + bytes = 0; + + // Process Detailed Timings + for (i = 0; i < pExt->num_detailed; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseEdidDetailedTimingDescriptor((NvU8 *)(pExt->data + bytes), + &newTiming) == NVT_STATUS_SUCCESS) + { + newTiming.etc.name[39] = '\0'; + newTiming.etc.status = NVT_STATUS_EDID_VTB_EXT_DTDn(++count); + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + + bytes += (NvU32)(sizeof(EDID_LONG_DISPLAY_DESCRIPTOR)); + } + } + + // Process CVT Timings + for (i = 0; i < pExt->num_cvt; i++) + { + parseEdidCvt3ByteDescriptor((NvU8 *)(pExt->data + bytes), pInfo, &count); + + bytes += (NvU32)sizeof(EDID_CVT_3BYTE_BLOCK); + } + + // Process Standard Timings + for (i = 0; i < pExt->num_standard; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + parseEdidStandardTimingDescriptor(*(NvU16 *)(pExt->data + bytes), + pInfo, count, &newTiming); + newTiming.etc.name[39] = '\0'; + newTiming.etc.status = NVT_STATUS_EDID_VTB_EXT_STDn(++count); + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + + bytes += (NvU32)sizeof(NvU16); + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +static int IsPrintable(NvU8 c) +{ + return ((c >= ' ') && (c <= '~')); +} + +CODE_SEGMENT(PAGE_DD_CODE) +static int IsWhiteSpace(NvU8 c) +{ + // consider anything unprintable or single space (ASCII 32) + // to be whitespace + return (!IsPrintable(c) || (c == ' ')); +} + +CODE_SEGMENT(PAGE_DD_CODE) +static void RemoveTrailingWhiteSpace(NvU8 *str, int len) +{ + int i; + + for (i = len; (i >= 0) && IsWhiteSpace(str[i]); i--) + { + str[i] = '\0'; + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +static void RemoveNonPrintableCharacters(NvU8 *str) +{ + int i; + + // Check that all characters are printable. + // If not, replace them with '?' + for (i = 0; str[i] != '\0'; i++) + { + if (!IsPrintable(str[i])) + { + str[i] = '?'; + } + } +} + +/** + * @brief Assigns this timing to the next available slot in pInfo->timing[] if + * possible. + * @param pInfo EDID struct containing the parsed timings + * @param pTiming New timing to be copied into pInfo->timing[] + */ +CODE_SEGMENT(PAGE_DD_CODE) +NvBool assignNextAvailableTiming(NVT_EDID_INFO *pInfo, + const NVT_TIMING *pTiming) +{ + if (pInfo == NULL) return NV_TRUE; + + // Don't write past the end of + // pInfo->timing[NVT_EDID_MAX_TOTAL_TIMING] + if (pInfo->total_timings >= COUNT(pInfo->timing)) { + return NV_FALSE; + } + + pInfo->timing[pInfo->total_timings++] = *pTiming; + return NV_TRUE; +} + +/** + * @brief Return the nth highest priority index based on the different SVR + * @param svr Short Video Reference + */ +CODE_SEGMENT(PAGE_DD_CODE) +NvU8 getHighestPrioritySVRIdx(const NVT_EDID_CEA861_INFO *pExt861) +{ + // In general, sink shall define the first one timing sequence + NvU8 kth = 1; + NvU8 i = 0; + + for (i = 0; i < pExt861->total_svr; i++) + { + NvU8 svr = pExt861->svr_vfpdb[i]; + + // Reserved + if (svr == 0 || svr == 128 || (svr >= 176 && svr <= 192) || svr == 255) + continue; + + if (svr >= 129 && svr <= 144) return svr - 128; // Interpret as the Kth 18-byte DTD in both base0 and CTA block (for N = 1 to 16) + else if (svr >= 145 && svr <= 160) return svr - 144; // Interpret as the Nth 20-byte DTD or 6- or 7-byte CVT-based descriptor. (for N = 1 to 16) + else if (svr >= 161 && svr <= 175) return svr - 160; // Interpret as the video format indicated by the first VFD of the first VFDB with Frame Rates of Rate Index N (for N = 1 to 15) + else if (svr == 254) return kth; // Interpret as the timing format indicated by the first code of the first T8VTDB (for N = 1) + else // assign corresponding CTA format's timing from pre-defined CE timing table, EIA861B + { + // ( SVR >= 1 and SVR <= 127) and (SVR >= 193 and SVR <= 253) needs to handle it by client + return svr; + } + } + + return 0; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetProductName(const NVT_EDID_INFO *pEdidInfo, + NvU8 *pProductName, + const NvU32 productNameLength) +{ + NvU32 i = 0, m = 0, n = 0; + + if( pEdidInfo == NULL || pProductName == NULL ) + { + return NVT_STATUS_INVALID_PARAMETER; + } + + for ( i = 0; i < NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR; i++) + { + if (pEdidInfo->ldd[i].tag == NVT_EDID_DISPLAY_DESCRITPOR_DPN) + { + for(n = 0; n < NVT_EDID_LDD_PAYLOAD_SIZE && pEdidInfo->ldd[i].u.product_name.str[n] != 0x0; n++) + { + pProductName[m++] = pEdidInfo->ldd[i].u.product_name.str[n]; + if ((m + 1) >= productNameLength) + { + goto done; + } + } + } + } +done: + pProductName[m] = '\0'; //Ensure a null termination at the end. + + RemoveTrailingWhiteSpace(pProductName, m); + RemoveNonPrintableCharacters(pProductName); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 NvTiming_CalculateEDIDCRC32(NvU8* pEDIDBuffer, NvU32 edidsize) +{ + return calculateCRC32(pEDIDBuffer, edidsize); +} + +//Calculates EDID/DisplayID2 CRC after purging 'Week of Manufacture', 'Year of Manufacture', +//'Product ID String' & 'Serial Number' from EDID +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 NvTiming_CalculateCommonEDIDCRC32(NvU8* pEDIDBuffer, NvU32 edidVersion) +{ + NvU32 commonEDIDBufferSize = 0; + NvU8 CommonEDIDBuffer[256]; + NvU32 edidBufferIndex = 0; + + if(pEDIDBuffer==NULL) + { + return 0; + } + + // Transfer over the original EDID buffer + NVMISC_MEMCPY(CommonEDIDBuffer, pEDIDBuffer, 256); + + if ((pEDIDBuffer[0] & 0xF0) == 0x20) + { + /* + typedef struct DisplayId2Struct + { + NvU8 bVersion; // 0x00 + NvU8 bSectionBytes; // 0x01 - section length, exclusive the five mandatory bytes. + NvU8 bwPrimaryUseCase; // 0x02 + NvU8 bExtensionCount; // 0x03 + // 0x20 DisplayId2 Standalone always exists Product Identification data block + NvU8 bProductIdtag; // 0x04 + NvU8 bPIDRevision; // 0x05 + NvU8 bPayloadByte; // 0x06 + NvU8 bManuId[3]; // 0x07-0x09 + NvU16 wProductId; // 0x0A-0x0B + NvU32 dwSerialNum; // 0x0C-0x0F + NvU16 wWeekandYear; // 0x10-0x11 + NvU8 SizeOfProductNameString; // 0x12 + } DISPLAY_ID2_FIXED_FORMAT; + */ + + // Wipe out the Serial Number, Week of Manufacture, and Year of Manufacture or Model Year + NVMISC_MEMSET(CommonEDIDBuffer + 0x0C, 0, 6); + + // Wipe out the checksums + CommonEDIDBuffer[CommonEDIDBuffer[1]+5/*mandatory bytes*/-1] = 0; + CommonEDIDBuffer[0xFF] = 0; + + // zero out any Produc Name in Prodcut Identification data block + if (CommonEDIDBuffer[0x12] != 0) + { + NVMISC_MEMSET(CommonEDIDBuffer + 0x13, 0, CommonEDIDBuffer[0x12]); + CommonEDIDBuffer[0x12] = 0; + } + + // displayId2 standalone uses 256 length sections + commonEDIDBufferSize = 256; + } + else + { + // Wipe out the Serial Number, Week of Manufacture, and Year of Manufacture or Model Year + NVMISC_MEMSET(CommonEDIDBuffer + 0x0C, 0, 6); + + // Wipe out the checksums + CommonEDIDBuffer[0x7F] = 0; + CommonEDIDBuffer[0xFF] = 0; + + // We also need to zero out any "EDID Other Monitor Descriptors" (https://en.wikipedia.org/wiki/Extended_display_identification_data) + for (edidBufferIndex = 54; edidBufferIndex <= 108; edidBufferIndex += 18) + { + if (CommonEDIDBuffer[edidBufferIndex] == 0 && CommonEDIDBuffer[edidBufferIndex+1] == 0) + { + // Wipe this block out. It contains OEM-specific details that contain things like serial numbers + NVMISC_MEMSET(CommonEDIDBuffer + edidBufferIndex, 0, 18); + } + } + + // Check what size we should do the compare against + commonEDIDBufferSize = 128; + } + + return NvTiming_CalculateEDIDCRC32(CommonEDIDBuffer, commonEDIDBufferSize); +} // NvTiming_CalculateCommonEDIDCRC32 + +// Calculate the minimum and maximum v_rate and h_rate, as well as +// maximum pclk; initialize with the range of values in the EDID mode +// list, but override with what is in the range limit descriptor section. +// +// based on drivers/modeset.nxt/CODE/edid.c:EdidGetMonitorLimits() and +// EdidBuildRangeLimits() +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalculateEDIDLimits(NVT_EDID_INFO *pEdidInfo, NVT_EDID_RANGE_LIMIT *pLimit) +{ + NvU32 i, pclk10khz; + + NVMISC_MEMSET(pLimit, 0, sizeof(NVT_EDID_RANGE_LIMIT)); + + // the below currently only supports 1.x EDIDs + if ((pEdidInfo->version & 0xFF00) != 0x100) + { + return NVT_STATUS_ERR; + } + + pLimit->min_v_rate_hzx1k = ~0; + pLimit->max_v_rate_hzx1k = 0; + pLimit->min_h_rate_hz = ~0; + pLimit->max_h_rate_hz = 0; + pLimit->max_pclk_10khz = 0; + + // find the ranges in the EDID mode list + for (i = 0; i < pEdidInfo->total_timings; i++) + { + NVT_TIMING *pTiming = &pEdidInfo->timing[i]; + NvU32 h_rate_hz; + + if (pLimit->min_v_rate_hzx1k > pTiming->etc.rrx1k) + { + pLimit->min_v_rate_hzx1k = pTiming->etc.rrx1k; + } + if (pLimit->max_v_rate_hzx1k < pTiming->etc.rrx1k) + { + pLimit->max_v_rate_hzx1k = pTiming->etc.rrx1k; + } + + h_rate_hz = axb_div_c(pTiming->pclk1khz, 1000, (NvU32)pTiming->HTotal); + + if (pLimit->min_h_rate_hz > h_rate_hz) + { + pLimit->min_h_rate_hz = h_rate_hz; + } + if (pLimit->max_h_rate_hz < h_rate_hz) + { + pLimit->max_h_rate_hz = h_rate_hz; + } + + pclk10khz = (pTiming->pclk1khz + 5 ) / 10; + + if (pLimit->max_pclk_10khz < pclk10khz) + { + pLimit->max_pclk_10khz = pclk10khz; + } + } + + // use the range limit display descriptor, if available: these + // override anything we found in the EDID mode list + for (i = 0; i < NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR; i++) + { + if (pEdidInfo->ldd[i].tag == NVT_EDID_DISPLAY_DESCRIPTOR_DRL) + { + NVT_EDID_DD_RANGE_LIMIT *pRangeLimit = &pEdidInfo->ldd[i].u.range_limit; + NvU32 max_pclk_10khz; + + // {min,max}_v_rate is in hz + if (pRangeLimit->min_v_rate != 0) { + pLimit->min_v_rate_hzx1k = pRangeLimit->min_v_rate * 1000; + } + if (pRangeLimit->max_v_rate != 0) { + pLimit->max_v_rate_hzx1k = pRangeLimit->max_v_rate * 1000; + } + + // {min,max}_h_rate is in khz + if (pRangeLimit->min_h_rate != 0) { + pLimit->min_h_rate_hz = pRangeLimit->min_h_rate * 1000; + } + if (pRangeLimit->max_h_rate != 0) { + pLimit->max_h_rate_hz = pRangeLimit->max_h_rate * 1000; + } + + // EdidGetMonitorLimits() honored the pclk from the + // modelist over what it found in the range limit + // descriptor, so do the same here + max_pclk_10khz = pRangeLimit->max_pclk_MHz * 100; + if (pLimit->max_pclk_10khz < max_pclk_10khz) { + pLimit->max_pclk_10khz = max_pclk_10khz; + } + + break; + } + } + + return NVT_STATUS_SUCCESS; +} + +// Build a user-friendly name: +// +// * get the vendor name: +// * use the 3 character PNP ID from the EDID's manufacturer ID field +// * expand, if possible, the PNP ID using the PNPVendorIds[] table +// * get the product name from the descriptor block(s) +// * prepend the vendor name and the product name, unless the product +// name already contains the vendor name +// * if any characters in the string are outside the printable ASCII +// range, replace them with '?' + +#define tolower(c) (((c) >= 'A' && (c) <= 'Z') ? (c) + ('a'-'A') : (c)) + +CODE_SEGMENT(PAGE_DD_CODE) +void NvTiming_GetMonitorName(NVT_EDID_INFO *pEdidInfo, + NvU8 monitor_name[NVT_EDID_MONITOR_NAME_STRING_LENGTH]) +{ + NvU8 product_name[NVT_EDID_MONITOR_NAME_STRING_LENGTH]; + const NvU8 *vendor_name; + NVT_STATUS status; + NvU32 i, j; + NvBool prepend_vendor; + + NVMISC_MEMSET(monitor_name, 0, NVT_EDID_MONITOR_NAME_STRING_LENGTH); + + // get vendor_name: it is either the manufacturer ID or the PNP vendor name + vendor_name = pEdidInfo->manuf_name; + + for (i = 0; i < (sizeof(PNPVendorIds)/sizeof(PNPVendorIds[0])); i++) + { + if ((vendor_name[0] == PNPVendorIds[i].vendorId[0]) && + (vendor_name[1] == PNPVendorIds[i].vendorId[1]) && + (vendor_name[2] == PNPVendorIds[i].vendorId[2])) + { + vendor_name = (const NvU8 *) PNPVendorIds[i].vendorName; + break; + } + } + + // get the product name from the descriptor blocks + status = NvTiming_GetProductName(pEdidInfo, product_name, sizeof(product_name)); + + if (status != NVT_STATUS_SUCCESS) + { + product_name[0] = '\0'; + } + + // determine if the product name already includes the vendor name; + // if so, do not prepend the vendor name to the monitor name + prepend_vendor = NV_TRUE; + + for (i = 0; i < NVT_EDID_MONITOR_NAME_STRING_LENGTH; i++) + { + if (vendor_name[i] == '\0') + { + prepend_vendor = NV_FALSE; + break; + } + + if (tolower(product_name[i]) != tolower(vendor_name[i])) + { + break; + } + } + + j = 0; + + // prepend the vendor name to the monitor name + if (prepend_vendor) + { + for (i = 0; (i < NVT_EDID_MONITOR_NAME_STRING_LENGTH) && (vendor_name[i] != '\0'); i++) + { + monitor_name[j++] = vendor_name[i]; + } + } + + // if we added the vendor name above, add a space between the + // vendor name and the product name + if ((j > 0) && (j < (NVT_EDID_MONITOR_NAME_STRING_LENGTH - 1))) + { + monitor_name[j++] = ' '; + } + + // append the product name to the monitor string + for (i = 0; (i < NVT_EDID_MONITOR_NAME_STRING_LENGTH) && (product_name[i] != '\0'); i++) + { + if (j >= (NVT_EDID_MONITOR_NAME_STRING_LENGTH - 1)) + { + break; + } + monitor_name[j++] = product_name[i]; + } + monitor_name[j] = '\0'; + + RemoveTrailingWhiteSpace(monitor_name, j); + RemoveNonPrintableCharacters(monitor_name); +} + +CODE_SEGMENT(PAGE_DD_CODE) +void updateHDMILLCDeepColorForTiming(NVT_EDID_INFO *pInfo, NvU32 index) +{ + NVT_EDID_CEA861_INFO *p861Info = &pInfo->ext861; + // NOTE: EDID and CEA861 does not have clear statement regarding this. + // To be backward compatible with current Nvidia implementation, if not edid >= 1.4 and CEA block exists, follow color format declaration from CEA block. + // update supported color space within each bpc + // rgb 8bpc always supported + + UPDATE_BPC_FOR_COLORFORMAT(pInfo->timing[index].etc.rgb444, 0, 1, + pInfo->hdmiLlcInfo.dc_30_bit, + pInfo->hdmiLlcInfo.dc_36_bit, + 0, pInfo->hdmiLlcInfo.dc_48_bit); + + if (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_444) + { + // pHdmiLlc->dc_y444 assumed basic cap is set; when base cap is set, 8bpc yuv444 always supported + UPDATE_BPC_FOR_COLORFORMAT(pInfo->timing[index].etc.yuv444, 0, 1, + pInfo->hdmiLlcInfo.dc_y444 && pInfo->hdmiLlcInfo.dc_30_bit, + pInfo->hdmiLlcInfo.dc_y444 && pInfo->hdmiLlcInfo.dc_36_bit, + 0, pInfo->hdmiLlcInfo.dc_y444 && pInfo->hdmiLlcInfo.dc_48_bit); + } + if (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_422) + { + // pHdmiLlc->dc_y444 assumed basic cap is set; when base cap is set, 8bpc yuv422 always supported + // newer CEA861/HDMI specs suggest the base cap should support both or neither (Nvidia puts no limitations here) + // HDMI1.4b spec Section 6.2.4 Color Depth Requirements states that YCbCr 4:2:2 format is 36-bit mode, which means 8, 10 and 12bpc output is supported as soon as there is enough bandwidth + UPDATE_BPC_FOR_COLORFORMAT(pInfo->timing[index].etc.yuv422, 0, 1, 1, 1, 0, 0); + } +} + +POP_SEGMENTS diff --git a/src/common/modeset/timing/nvt_edidext_861.c b/src/common/modeset/timing/nvt_edidext_861.c new file mode 100644 index 0000000..e09c95a --- /dev/null +++ b/src/common/modeset/timing/nvt_edidext_861.c @@ -0,0 +1,4015 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_edidext_861.c +// +// Purpose: the provide edid 861 extension related services +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "edid.h" + + + +PUSH_SEGMENTS + +#define EIA_TIMING(hv,hfp,hsw,ht,hsp,vv,vfp,vsw,vt,vsp,rrx1k,ip,aspect,rep,format) \ + {hv,0,hfp,hsw,ht,(hsp)=='-',vv,0,vfp,vsw,vt,(vsp)=='-',(ip)=='i' ? NVT_INTERLACED:NVT_PROGRESSIVE,\ + 0,0,{0,((rrx1k)+500)/1000,rrx1k,((1?aspect)<<16)|(0?aspect),rep,{0},{0},{0},{0},NVT_STATUS_EDID_861STn(format),"CEA-861B:#"#format""}} + +#define NVT_TIMING(hv,hfp,hsw,ht,hsp,vv,vfp,vsw,vt,vsp,rrx1k,ip,aspect,rep,format,name) \ + {hv,0,hfp,hsw,ht,(hsp)=='-',vv,0,vfp,vsw,vt,(vsp)=='-',(ip)=='i' ? NVT_INTERLACED:NVT_PROGRESSIVE,\ + 0,0,{0,((rrx1k)+500)/1000,rrx1k,((1?aspect)<<16)|(0?aspect),rep,{0},{0},{0},{0},NVT_TYPE_NV_PREDEFINEDn(format),name}} + +#define HDMI_EXT_TIMING(hv,hfp,hsw,ht,hsp,vv,vfp,vsw,vt,vsp,rrx1k,ip,aspect,rep,format,name) \ + {hv,0,hfp,hsw,ht,(hsp)=='-',vv,0,vfp,vsw,vt,(vsp)=='-',(ip)=='i' ? NVT_INTERLACED:NVT_PROGRESSIVE,\ + 0,0,{0,((rrx1k)+500)/1000,rrx1k,((1?aspect)<<16)|(0?aspect),rep,{0},{0},{0},{0},NVT_STATUS_HDMI_EXTn(format),name}} + +#define RID_MODE(hv, hsp, vv, vsp, ip, aspect, rid) \ + {hv, (hsp)=='-', vv, (vsp)=='-',(ip)=='i'? NVT_INTERLACED:NVT_PROGRESSIVE,((1?aspect)<<16)|(0?aspect), rid} +DATA_SEGMENT(PAGE_DATA) +CONS_SEGMENT(PAGE_CONS) + +static const NVT_TIMING EIA861B[]= +{ + // all 64 EIA/CEA-861E timings + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 1),//640 x 480p @59.94/60 (Format 1) + EIA_TIMING( 720, 16, 62, 858,'-', 480, 9,6, 525,'-', 59940,'p', 4:3, 0x1, 2),//720 x 480p @59.94/60 (Format 2) + EIA_TIMING( 720, 16, 62, 858,'-', 480, 9,6, 525,'-', 59940,'p',16:9, 0x1, 3),//720 x 480p @59.94/60 (Format 3) + EIA_TIMING(1280, 110, 40,1650,'+', 720, 5,5, 750,'+', 59940,'p',16:9, 0x1, 4),//1280 x 720p @59.94/60 (Format 4) + EIA_TIMING(1920, 88, 44,2200,'+', 540, 2,5, 562,'+', 59940,'i',16:9, 0x1, 5),//1920 x 1080i @59.94/60 (Format 5) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 262,'-', 59940,'i', 4:3, 0x2, 6),//720(1440) x 480i @59.94/60 (Format 6) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 262,'-', 59940,'i',16:9, 0x2, 7),//720(1440) x 480i @59.94/60 (Format 7) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 263,'-', 59940,'p', 4:3, 0x2, 8),//720(1440) x 240p @59.94/60 (Format 8) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 263,'-', 59940,'p',16:9, 0x2, 9),//720(1440) x 240p @59.94/60 (Format 9) + EIA_TIMING(2880, 76,248,3432,'-', 240, 4,3, 262,'-', 59940,'i', 4:3, 0x3ff,10),//(2880) x 480i @59.94/60 (Format 10) + EIA_TIMING(2880, 76,248,3432,'-', 240, 4,3, 262,'-', 59940,'i',16:9, 0x3ff,11),//(2880) x 480i @59.94/60 (Format 11) + EIA_TIMING(2880, 76,248,3432,'-', 240, 5,3, 263,'-', 59940,'p', 4:3, 0x3ff,12),//(2880) x 480p @59.94/60 (Format 12) + EIA_TIMING(2880, 76,248,3432,'-', 240, 5,3, 263,'-', 59940,'p',16:9, 0x3ff,13),//(2880) x 480p @59.94/60 (Format 13) + EIA_TIMING(1440, 32,124,1716,'-', 480, 9,6, 525,'-', 59940,'p', 4:3, 0x3,14),//1440 x 480p @59.94/60 (Format 14) + EIA_TIMING(1440, 32,124,1716,'-', 480, 9,6, 525,'-', 59940,'p',16:9, 0x3,15),//1440 x 480p @59.94/60 (Format 15) + EIA_TIMING(1920, 88, 44,2200,'+',1080, 4,5,1125,'+', 59940,'p',16:9, 0x1,16),//1920 x 1080p @59.94/60 (Format 16) + EIA_TIMING( 720, 12, 64, 864,'-', 576, 5,5, 625,'-', 50000,'p', 4:3, 0x1,17),//720 x 576p @50 (Format 17) + EIA_TIMING( 720, 12, 64, 864,'-', 576, 5,5, 625,'-', 50000,'p',16:9, 0x1,18),//720 x 576p @50 (Format 18) + EIA_TIMING(1280, 440, 40,1980,'+', 720, 5,5, 750,'+', 50000,'p',16:9, 0x1,19),//1280 x 720p @50 (Format 19) + EIA_TIMING(1920, 528, 44,2640,'+', 540, 2,5, 562,'+', 50000,'i',16:9, 0x1,20),//1920 x 1080i @50 (Format 20) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-', 50000,'i', 4:3, 0x2,21),//720(1440) x 576i @50 (Format 21) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-', 50000,'i',16:9, 0x2,22),//720(1440) x 576i @50 (Format 22) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-', 50000,'p', 4:3, 0x2,23),//720(1440) x 288p @50 (Format 23) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-', 50000,'p',16:9, 0x2,24),//720(1440) x 288p @50 (Format 24) + EIA_TIMING(2880, 48,252,3456,'-', 288, 2,3, 312,'-', 50000,'i', 4:3, 0x3ff,25),//(2880) x 576i @50 (Format 25) + EIA_TIMING(2880, 48,252,3456,'-', 288, 2,3, 312,'-', 50000,'i',16:9, 0x3ff,26),//(2880) x 576i @50 (Format 26) + EIA_TIMING(2880, 48,252,3456,'-', 288, 2,3, 312,'-', 50000,'p', 4:3, 0x3ff,27),//(2880) x 288p @50 (Format 27) + EIA_TIMING(2880, 48,252,3456,'-', 288, 2,3, 312,'-', 50000,'p',16:9, 0x3ff,28),//(2880) x 288p @50 (Format 28) + EIA_TIMING(1440, 24,128,1728,'-', 576, 5,5, 625,'_', 50000,'p', 4:3, 0x3,29),//1440 x 576p @50 (Format 29) + EIA_TIMING(1440, 24,128,1728,'-', 576, 5,5, 625,'_', 50000,'p',16:9, 0x3,30),//1440 x 576p @50 (Format 30) + EIA_TIMING(1920, 528, 44,2640,'+',1080, 4,5,1125,'+', 50000,'p',16:9, 0x1,31),//1920 x 1080p @50 (Format 31) + EIA_TIMING(1920, 638, 44,2750,'+',1080, 4,5,1125,'+', 23976,'p',16:9, 0x1,32),//1920 x 1080p @23.97/24 (Format 32) + EIA_TIMING(1920, 528, 44,2640,'+',1080, 4,5,1125,'+', 25000,'p',16:9, 0x1,33),//1920 x 1080p @25 (Format 33) + EIA_TIMING(1920, 88, 44,2200,'+',1080, 4,5,1125,'+', 29970,'p',16:9, 0x1,34),//1920 x 1080p @29.97/30 (Format 34) + EIA_TIMING(2880, 64,248,3432,'-', 480, 9,6, 525,'-', 59940,'p', 4:3, 0x7,35),//(2880) x 480p @59.94/60 (Format 35) + EIA_TIMING(2880, 64,248,3432,'-', 480, 9,6, 525,'-', 59940,'p',16:9, 0x7,36),//(2880) x 480p @59.94/60 (Format 36) + EIA_TIMING(2880, 48,256,3456,'-', 576, 5,5, 625,'-', 50000,'p', 4:3, 0x7,37),//(2880) x 576p @50 (Format 37) + EIA_TIMING(2880, 48,256,3456,'-', 576, 5,5, 625,'-', 50000,'p',16:9, 0x7,38),//(2880) x 576p @50 (Format 38) + EIA_TIMING(1920, 32,168,2304,'+', 540,23,5, 625,'-', 50000,'i',16:9, 0x1,39),//1920 x 1080i @50 (Format 39) + EIA_TIMING(1920, 528, 44,2640,'+', 540, 2,5, 562,'+',100000,'i',16:9, 0x1,40),//1920 x 1080i @100 (Format 40) + EIA_TIMING(1280, 440, 40,1980,'+', 720, 5,5, 750,'+',100000,'p',16:9, 0x1,41),//1280 x 720p @100 (Format 41) + EIA_TIMING( 720, 12, 64, 864,'-', 576, 5,5, 625,'-',100000,'p', 4:3, 0x1,42),//720 x 576p @100 (Format 42) + EIA_TIMING( 720, 12, 64, 864,'-', 576, 5,5, 625,'-',100000,'p',16:9, 0x1,43),//720 x 576p @100 (Format 43) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-',100000,'i', 4:3, 0x2,44),//720(1440) x 576i @100 (Format 44) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-',100000,'i',16:9, 0x2,45),//720(1440) x 576i @100 (Format 45) + EIA_TIMING(1920, 88, 44,2200,'+', 540, 2,5, 562,'+',119880,'i',16:9, 0x1,46),//1920 x 1080i @119.88/120 (Format 46) + EIA_TIMING(1280, 110, 40,1650,'+', 720, 5,5, 750,'+',119880,'p',16:9, 0x1,47),//1280 x 720p @119.88/120 (Format 47) + EIA_TIMING( 720, 16, 62, 858,'-', 480, 9,6, 525,'-',119880,'p', 4:3, 0x1,48),//720 x 480p @119.88/120 (Format 48) + EIA_TIMING( 720, 16, 62, 858,'-', 480, 9,6, 525,'-',119880,'p',16:9, 0x1,49),//720 x 480p @119.88/120 (Format 49) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 262,'-',119880,'i', 4:3, 0x2,50),//720(1440) x 480i @119.88/120 (Format 50) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 262,'-',119880,'i',16:9, 0x2,51),//720(1440) x 480i @119.88/120 (Format 51) + EIA_TIMING( 720, 12, 64, 864,'-', 576, 5,5, 625,'-',200000,'p', 4:3, 0x1,52),//720 x 576p @200 (Format 52) + EIA_TIMING( 720, 12, 64, 864,'-', 576, 5,5, 625,'-',200000,'p',16:9, 0x1,53),//720 x 576p @200 (Format 53) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-',200000,'i', 4:3, 0x2,54),//720(1440) x 576i @200 (Format 54) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-',200000,'i',16:9, 0x2,55),//720(1440) x 576i @200 (Format 55) + EIA_TIMING( 720, 16, 62, 858,'-', 480, 9,6, 525,'-',239760,'p', 4:3, 0x1,56),//720 x 480p @239.76/240 (Format 56) + EIA_TIMING( 720, 16, 62, 858,'-', 480, 9,6, 525,'-',239760,'p',16:9, 0x1,57),//720 x 480p @239.76/240 (Format 57) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 262,'-',239760,'i', 4:3, 0x2,58),//720(1440) x 480i @239.76/240 (Format 58) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 262,'-',239760,'i',16:9, 0x2,59),//720(1440) x 480i @239.76/240 (Format 59) + EIA_TIMING(1280,1760, 40,3300,'+', 720, 5,5, 750,'+',23976, 'p',16:9, 0x1,60),//1280 x 720p @23.97/24 (Format 60) + EIA_TIMING(1280,2420, 40,3960,'+', 720, 5,5, 750,'+',25000, 'p',16:9, 0x1,61),//1280 x 720p @25 (Format 61) + EIA_TIMING(1280,1760, 40,3300,'-', 720, 5,5, 750,'+',29970, 'p',16:9, 0x1,62),//1280 x 720p @29.97/30 (Format 62) + EIA_TIMING(1920, 88, 44,2200,'+',1080, 4,5,1125,'+',119880,'p',16:9, 0x1,63),//1920 x 1080p @119.88/120 (Format 63) + EIA_TIMING(1920, 528, 44,2640,'+',1080, 4,5,1125,'+',100000,'p',16:9, 0x1,64),//1920 x 1080p @100 (Format 64) + // Following modes are from CEA-861F + EIA_TIMING(1280,1760, 40,3300,'+', 720, 5, 5, 750,'+', 23976,'p', 64:27, 0x1, 65),//1280 x 720p @23.98/24 (Format 65) + EIA_TIMING(1280,2420, 40,3960,'+', 720, 5, 5, 750,'+', 25000,'p', 64:27, 0x1, 66),//1280 x 720p @25 (Format 66) + EIA_TIMING(1280,1760, 40,3300,'+', 720, 5, 5, 750,'+', 29970,'p', 64:27, 0x1, 67),//1280 x 720p @29.97/30 (Format 67) + EIA_TIMING(1280, 440, 40,1980,'+', 720, 5, 5, 750,'+', 50000,'p', 64:27, 0x1, 68),//1280 x 720p @50 (Format 68) + EIA_TIMING(1280, 110, 40,1650,'+', 720, 5, 5, 750,'+', 59940,'p', 64:27, 0x1, 69),//1280 x 720p @59.94/60 (Format 69) + EIA_TIMING(1280, 440, 40,1980,'+', 720, 5, 5, 750,'+',100000,'p', 64:27, 0x1, 70),//1280 x 720p @100 (Format 70) + EIA_TIMING(1280, 110, 40,1650,'+', 720, 5, 5, 750,'+',119880,'p', 64:27, 0x1, 71),//1280 x 720p @119.88/120 (Format 71) + EIA_TIMING(1920, 638, 44,2750,'+',1080, 4, 5,1125,'+', 23976,'p', 64:27, 0x1, 72),//1920 x1080p @23.98/24 (Format 72) + EIA_TIMING(1920, 528, 44,2640,'+',1080, 4, 5,1125,'+', 25000,'p', 64:27, 0x1, 73),//1920 x1080p @25 (Format 73) + EIA_TIMING(1920, 88, 44,2200,'+',1080, 4, 5,1125,'+', 29970,'p', 64:27, 0x1, 74),//1920 x1080p @29.97/30 (Format 74) + EIA_TIMING(1920, 528, 44,2640,'+',1080, 4, 5,1125,'+', 50000,'p', 64:27, 0x1, 75),//1920 x1080p @50 (Format 75) + EIA_TIMING(1920, 88, 44,2200,'+',1080, 4, 5,1125,'+', 59940,'p', 64:27, 0x1, 76),//1920 x1080p @59.94/60 (Format 76) + EIA_TIMING(1920, 528, 44,2640,'+',1080, 4, 5,1125,'+',100000,'p', 64:27, 0x1, 77),//1920 x1080p @100 (Format 77) + EIA_TIMING(1920, 88, 44,2200,'+',1080, 4, 5,1125,'+',119880,'p', 64:27, 0x1, 78),//1920 x1080p @119.88/120 (Format 78) + EIA_TIMING(1680,1360, 40,3300,'+', 720, 5, 5, 750,'+', 23976,'p', 64:27, 0x1, 79),//1680 x 720p @23.98/24 (Format 79) + EIA_TIMING(1680,1228, 40,3168,'+', 720, 5, 5, 750,'+', 25000,'p', 64:27, 0x1, 80),//1680 x 720p @25 (Format 80) + EIA_TIMING(1680, 700, 40,2640,'+', 720, 5, 5, 750,'+', 29970,'p', 64:27, 0x1, 81),//1680 x 720p @29.97/30 (Format 81) + EIA_TIMING(1680, 260, 40,2200,'+', 720, 5, 5, 750,'+', 50000,'p', 64:27, 0x1, 82),//1680 x 720p @50 (Format 82) + EIA_TIMING(1680, 260, 40,2200,'+', 720, 5, 5, 750,'+', 59940,'p', 64:27, 0x1, 83),//1680 x 720p @59.94/60 (Format 83) + EIA_TIMING(1680, 60, 40,2000,'+', 720, 5, 5, 825,'+',100000,'p', 64:27, 0x1, 84),//1680 x 720p @100 (Format 84) + EIA_TIMING(1680, 60, 40,2000,'+', 720, 5, 5, 825,'+',119880,'p', 64:27, 0x1, 85),//1680 x 720p @119.88/120 (Format 85) + EIA_TIMING(2560, 998, 44,3750,'+',1080, 4, 5,1100,'+', 23976,'p', 64:27, 0x1, 86),//2560 x1080p @23.98/24 (Format 86) + EIA_TIMING(2560, 448, 44,3200,'+',1080, 4, 5,1125,'+', 25000,'p', 64:27, 0x1, 87),//2560 x1080p @25 (Format 87) + EIA_TIMING(2560, 768, 44,3520,'+',1080, 4, 5,1125,'+', 29970,'p', 64:27, 0x1, 88),//2560 x1080p @29.97/30 (Format 88) + EIA_TIMING(2560, 548, 44,3300,'+',1080, 4, 5,1125,'+', 50000,'p', 64:27, 0x1, 89),//2560 x1080p @50 (Format 89) + EIA_TIMING(2560, 248, 44,3000,'+',1080, 4, 5,1100,'+', 59940,'p', 64:27, 0x1, 90),//2560 x1080p @59.94/60 (Format 90) + EIA_TIMING(2560, 218, 44,2970,'+',1080, 4, 5,1250,'+',100000,'p', 64:27, 0x1, 91),//2560 x1080p @100 (Format 91) + EIA_TIMING(2560, 548, 44,3300,'+',1080, 4, 5,1250,'+',119880,'p', 64:27, 0x1, 92),//2560 x1080p @119.88/120 (Format 92) + EIA_TIMING(3840,1276, 88,5500,'+',2160, 8,10,2250,'+', 23976,'p', 16:9, 0x1, 93),//3840 x2160p @23.98/24 (Format 93) + EIA_TIMING(3840,1056, 88,5280,'+',2160, 8,10,2250,'+', 25000,'p', 16:9, 0x1, 94),//3840 x2160p @25 (Format 94) + EIA_TIMING(3840, 176, 88,4400,'+',2160, 8,10,2250,'+', 29970,'p', 16:9, 0x1, 95),//3840 x2160p @29.97/30 (Format 95) + EIA_TIMING(3840,1056, 88,5280,'+',2160, 8,10,2250,'+', 50000,'p', 16:9, 0x1, 96),//3840 x2160p @50 (Format 96) + EIA_TIMING(3840, 176, 88,4400,'+',2160, 8,10,2250,'+', 59940,'p', 16:9, 0x1, 97),//3840 x2160p @59.94/60 (Format 97) + EIA_TIMING(4096,1020, 88,5500,'+',2160, 8,10,2250,'+', 23976,'p',256:135, 0x1, 98),//4096 x2160p @23.98/24 (Format 98) + EIA_TIMING(4096, 968, 88,5280,'+',2160, 8,10,2250,'+', 25000,'p',256:135, 0x1, 99),//4096 x2160p @25 (Format 99) + EIA_TIMING(4096, 88, 88,4400,'+',2160, 8,10,2250,'+', 29970,'p',256:135, 0x1,100),//4096 x2160p @29.97/30 (Format 100) + EIA_TIMING(4096, 968, 88,5280,'+',2160, 8,10,2250,'+', 50000,'p',256:135, 0x1,101),//4096 x2160p @50 (Format 101) + EIA_TIMING(4096, 88, 88,4400,'+',2160, 8,10,2250,'+', 59940,'p',256:135, 0x1,102),//4096 x2160p @59.94/60 (Format 102) + EIA_TIMING(3840,1276, 88,5500,'+',2160, 8,10,2250,'+', 23976,'p', 64:27, 0x1,103),//3840 x2160p @23.98/24 (Format 103) + EIA_TIMING(3840,1056, 88,5280,'+',2160, 8,10,2250,'+', 25000,'p', 64:27, 0x1,104),//3840 x2160p @25 (Format 104) + EIA_TIMING(3840, 176, 88,4400,'+',2160, 8,10,2250,'+', 29970,'p', 64:27, 0x1,105),//3840 x2160p @29.97/30 (Format 105) + EIA_TIMING(3840,1056, 88,5280,'+',2160, 8,10,2250,'+', 50000,'p', 64:27, 0x1,106),//3840 x2160p @50 (Format 106) + EIA_TIMING(3840, 176, 88,4400,'+',2160, 8,10,2250,'+', 59940,'p', 64:27, 0x1,107),//3840 x2160p @59.94/60 (Format 107) + // VIC 108-127 timings are from CTA-861-G_FINAL_revised_2018_Errata_2.pdf + EIA_TIMING(1280, 960, 40, 2500,'+', 720, 5, 5, 750,'+', 47950,'p', 16:9, 0x1,108),//1280 x 720p @47.95/48 (Format 108) + EIA_TIMING(1280, 960, 40, 2500,'+', 720, 5, 5, 750,'+', 47950,'p', 64:27, 0x1,109),//1280 x 720p @47.95/48 (Format 109) + EIA_TIMING(1680, 810, 40, 2750,'+', 720, 5, 5, 750,'+', 47950,'p', 64:27, 0x1,110),//1680 x 720p @47.95/48 (Format 110) + EIA_TIMING(1920, 638, 44, 2750,'+',1080, 4, 5,1125,'+', 47950,'p', 16:9, 0x1,111),//1920 x 1080p @47.95/48 (Format 111) + EIA_TIMING(1920, 638, 44, 2750,'+',1080, 4, 5,1125,'+', 47950,'p', 64:27, 0x1,112),//1920 x 1080p @47.95/48 (Format 112) + EIA_TIMING(2560, 998, 44, 3750,'+',1080, 4, 5,1100,'+', 47950,'p', 64:27, 0x1,113),//2560 x 1080p @47.95/48 (Format 113) + EIA_TIMING(3840,1276, 88, 5500,'+',2160, 8,10,2250,'+', 47950,'p', 16:9, 0x1,114),//3840 x 2160p @47.95/48 (Format 114) + EIA_TIMING(4096,1020, 88, 5500,'+',2160, 8,10,2250,'+', 47950,'p',256:135, 0x1,115),//4096 x 2160p @47.95/48 (Format 115) + EIA_TIMING(3840,1276, 88, 5500,'+',2160, 8,10,2250,'+', 47950,'p', 64:27, 0x1,116),//3840 x 2160p @47.95/48 (Format 116) + EIA_TIMING(3840,1056, 88, 5280,'+',2160, 8,10,2250,'+',100000,'p', 16:9, 0x1,117),//3840 x 2160p @100 (Format 117) + EIA_TIMING(3840, 176, 88, 4400,'+',2160, 8,10,2250,'+',119880,'p', 16:9, 0x1,118),//3840 x 2160p @119.88/120 (Format 118) + EIA_TIMING(3840,1056, 88, 5280,'+',2160, 8,10,2250,'+',100000,'p', 64:27, 0x1,119),//3840 x 2160p @100 (Format 119) + EIA_TIMING(3840, 176, 88, 4400,'+',2160, 8,10,2250,'+',119880,'p', 64:27, 0x1,120),//3840 x 2160p @119.88/120 (Format 120) + EIA_TIMING(5120,1996, 88, 7500,'+',2160, 8,10,2200,'+', 23976,'p', 64:27, 0x1,121),//5120 x 2160p @23.98/24 (Format 121) + EIA_TIMING(5120,1696, 88, 7200,'+',2160, 8,10,2200,'+', 25000,'p', 64:27, 0x1,122),//5120 x 2160p @25 (Format 122) + EIA_TIMING(5120, 664, 88, 6000,'+',2160, 8,10,2200,'+', 29970,'p', 64:27, 0x1,123),//5120 x 2160p @29.97/30 (Format 123) + EIA_TIMING(5120, 746, 88, 6250,'+',2160, 8,10,2475,'+', 47950,'p', 64:27, 0x1,124),//5120 x 2160p @47.95/48 (Format 124) + EIA_TIMING(5120,1096, 88, 6600,'+',2160, 8,10,2250,'+', 50000,'p', 64:27, 0x1,125),//5120 x 2160p @50 (Format 125) + EIA_TIMING(5120, 164, 88, 5500,'+',2160, 8,10,2250,'+', 59940,'p', 64:27, 0x1,126),//5120 x 2160p @59.94/60 (Format 126) + EIA_TIMING(5120,1096, 88, 6600,'+',2160, 8,10,2250,'+',100000,'p', 64:27, 0x1,127),//5120 x 2160p @100 (Format 127) + // VIC 128-192 are Forbidden and should be never used. But to simplify the SVD access, put a default timing here. + // We can remove these after adding a function to access CEA Timings. + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 128) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 129) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 130) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 131) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 132) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 133) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 134) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 135) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 136) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 137) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 138) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 139) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 140) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 141) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 142) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 143) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 144) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 145) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 146) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 147) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 148) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 149) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 150) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 151) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 152) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 153) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 154) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 155) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 156) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 157) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 158) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 159) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 160) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 161) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 162) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 163) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 164) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 165) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 166) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 167) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 168) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 169) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 170) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 171) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 172) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 173) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 174) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 175) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 176) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 177) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 178) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 179) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 180) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 181) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 182) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 183) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 184) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 185) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 186) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 187) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 188) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 189) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 190) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 191) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 192) + // VIC 193-219 timings are from CTA-861-G_FINAL_revised_2018_Errata_2.pdf + EIA_TIMING( 5120, 164, 88, 5500,'+',2160, 8,10,2250,'+',120000,'p', 64:27,0x1,193),// 5120 x 2160p @119.88/120 (Format 193) + EIA_TIMING( 7680,2552,176,11000,'+',4320,16,20,4500,'+', 23976,'p', 16:9,0x1,194),// 7680 x 4320p @23.98/24 (Format 194) + EIA_TIMING( 7680,2352,176,10800,'+',4320,16,20,4400,'+', 25000,'p', 16:9,0x1,195),// 7680 x 4320p @25 (Format 195) + EIA_TIMING( 7680, 552,176, 9000,'+',4320,16,20,4400,'+', 29970,'p', 16:9,0x1,196),// 7680 x 4320p @29.97/30 (Format 196) + EIA_TIMING( 7680,2552,176,11000,'+',4320,16,20,4500,'+', 47950,'p', 16:9,0x1,197),// 7680 x 4320p @47.95/48 (Format 197) + EIA_TIMING( 7680,2352,176,10800,'+',4320,16,20,4400,'+', 50000,'p', 16:9,0x1,198),// 7680 x 4320p @50 (Format 198) + EIA_TIMING( 7680, 552,176, 9000,'+',4320,16,20,4400,'+', 59940,'p', 16:9,0x1,199),// 7680 x 4320p @59.94/60 (Format 199) + EIA_TIMING( 7680,2112,176,10560,'+',4320,16,20,4500,'+',100000,'p', 16:9,0x1,200),// 7680 x 4320p @100 (Format 200) + EIA_TIMING( 7680, 352,176, 8800,'+',4320,16,20,4500,'+',119880,'p', 16:9,0x1,201),// 7680 x 4320p @119.88/120 (Format 201) + EIA_TIMING( 7680,2552,176,11000,'+',4320,16,20,4500,'+', 23976,'p', 64:27,0x1,202),// 7680 x 4320p @23.98/24 (Format 202) + EIA_TIMING( 7680,2352,176,10800,'+',4320,16,20,4400,'+', 25000,'p', 64:27,0x1,203),// 7680 x 4320p @25 (Format 203) + EIA_TIMING( 7680, 552,176, 9000,'+',4320,16,20,4400,'+', 29970,'p', 64:27,0x1,204),// 7680 x 4320p @29.97/30 (Format 204) + EIA_TIMING( 7680,2552,176,11000,'+',4320,16,20,4500,'+', 47950,'p', 64:27,0x1,205),// 7680 x 4320p @47.95/48 (Format 205) + EIA_TIMING( 7680,2352,176,10800,'+',4320,16,20,4400,'+', 50000,'p', 64:27,0x1,206),// 7680 x 4320p @50 (Format 206) + EIA_TIMING( 7680, 552,176, 9000,'+',4320,16,20,4400,'+', 59940,'p', 64:27,0x1,207),// 7680 x 4320p @59.94/60 (Format 207) + EIA_TIMING( 7680,2112,176,10560,'+',4320,16,20,4500,'+',100000,'p', 64:27,0x1,208),// 7680 x 4320p @100 (Format 208) + EIA_TIMING( 7680, 352,176, 8800,'+',4320,16,20,4500,'+',119880,'p', 64:27,0x1,209),// 7680 x 4320p @119.88/120 (Format 209) + EIA_TIMING(10240,1492,176,12500,'+',4320,16,20,4950,'+', 23976,'p', 64:27,0x1,210),//10240 x 4320p @23.98/24 (Format 210) + EIA_TIMING(10240,2492,176,13500,'+',4320,16,20,4400,'+', 25000,'p', 64:27,0x1,211),//10240 x 4320p @25 (Format 211) + EIA_TIMING(10240, 288,176,11000,'+',4320,16,20,4500,'+', 29970,'p', 64:27,0x1,212),//10240 x 4320p @29.97/30 (Format 212) + EIA_TIMING(10240,1492,176,12500,'+',4320,16,20,4950,'+', 47950,'p', 64:27,0x1,213),//10240 x 4320p @47.95/48 (Format 213) + EIA_TIMING(10240,2492,176,13500,'+',4320,16,20,4400,'+', 50000,'p', 64:27,0x1,214),//10240 x 4320p @50 (Format 214) + EIA_TIMING(10240, 288,176,11000,'+',4320,16,20,4500,'+', 59940,'p', 64:27,0x1,215),//10240 x 4320p @59.94/60 (Format 215) + EIA_TIMING(10240,2192,176,13200,'+',4320,16,20,4500,'+',100000,'p', 64:27,0x1,216),//10240 x 4320p @100 (Format 216) + EIA_TIMING(10240, 288,176,11000,'+',4320,16,20,4500,'+',119880,'p', 64:27,0x1,217),//10240 x 4320p @119.88/120 (Format 217) + EIA_TIMING( 4096, 800, 88, 5280,'+',2160, 8,10,2250,'+',100000,'p',256:135,0x1,218),// 4096 x 2160p @100 (Format 218) + EIA_TIMING( 4096, 88, 88, 4400,'+',2160, 8,10,2250,'+',119880,'p',256:135,0x1,219),// 4096 x 2160p @119.88/120 (Format 219) + // 220-255 Reserved for the Future + // the end + EIA_TIMING(0,0,0,0,'-',0,0,0,0,'-',0,'p',4:3,0,0) +}; +static NvU32 MAX_CEA861B_FORMAT = sizeof(EIA861B)/sizeof(EIA861B[0]) - 1; + +static const NvU32 EIA861B_DUAL_ASPECT_VICS[][2] = +{ + { 2, 3 }, // 720x480p 59.94Hz/60Hz + { 4, 69 }, // 1280x720p 59.94Hz/60Hz + { 6, 7 }, // 720(1440)x480i 59.94Hz/60Hz + { 8, 9 }, // 720(1440)x240p 59.94Hz/60Hz + + { 10, 11 }, // 2880x480i 59.94Hz/60Hz + { 12, 13 }, // 2880x240p 59.94Hz/60Hz + { 14, 15 }, // 1440x480p 59.94Hz/60Hz + { 16, 76 }, // 1920x1080p 59.94Hz/60Hz + { 17, 18 }, // 720x576p 50Hz + { 19, 68 }, // 1280x720p 50Hz + + { 21, 22 }, // 720(1440)x576i 50Hz + { 23, 24 }, // 720(1440)x288p 50Hz + { 25, 26 }, // 2880x576i 50Hz + { 27, 28 }, // 2880x288p 50Hz + { 29, 30 }, // 1440x576p 50Hz + + { 31, 75 }, // 1920x1080p 50Hz + { 32, 72 }, // 1920x1080p 23.98Hz/24Hz + { 33, 73 }, // 1920x1080p 25Hz + { 34, 74 }, // 1920x1080p 29.97Hz/30Hz + { 35, 36 }, // 2880x480p 59.94Hz/60Hz + { 37, 38 }, // 2880x576p 50Hz + + { 41, 70 }, // 1280x720p 100Hz + { 42, 43 }, // 720x576p 100Hz + { 44, 45 }, // 720(1440)x576i 100Hz + { 47, 71 }, // 1280x720p 119.88/120Hz + { 48, 49 }, // 720x480p 119.88/120Hz + + { 50, 51 }, // 720(1440)x480i 119.88/120Hz + { 52, 53 }, // 720x576p 200Hz + { 54, 55 }, // 720(1440)x576i 200Hz + { 56, 57 }, // 720x480p 239.76/240Hz + { 58, 59 }, // 720(1440)x480i 239.76/240Hz + + { 60, 65 }, // 1280x720p 23.98Hz/24Hz + { 61, 66 }, // 1280x720p 25Hz + { 62, 67 }, // 1280x720p 29.97Hz/30Hz + { 63, 78 }, // 1920x1080p 119.88/120Hz + { 64, 77 }, // 1920x1080p 100Hz + + { 93, 103 }, // 3840x2160p 23.98Hz/24Hz + { 94, 104 }, // 3840x2160p 25Hz + { 95, 105 }, // 3840x2160p 29.97Hz/30Hz + { 96, 106 }, // 3840x2160p 50Hz + { 97, 107 }, // 3840x2160p 59.94Hz/60Hz +}; +static NvU32 MAX_EIA861B_DUAL_ASPECT_VICS = sizeof(EIA861B_DUAL_ASPECT_VICS) / sizeof(EIA861B_DUAL_ASPECT_VICS[0]); + +static const NVT_RID_CODES RID[] = +{ + RID_MODE( 0, '+', 0, '+', 'p', 16:9 , 0), // No Resolution Identification Available + RID_MODE( 1280, '+', 720, '+', 'p', 16:9 , 1), // HD, 720p + RID_MODE( 1280, '+', 720, '+', 'p', 64:27, 2), // HD, 720p, 21:9 anamorphic + RID_MODE( 1680, '+', 720, '+', 'p', 64:27, 3), // 21:9 "1.5k" + RID_MODE( 1920, '+', 1080, '+', 'p', 16:9 , 4), // Full HD, 1080p + RID_MODE( 1929, '+', 1080, '+', 'p', 64:27, 5), // Full HD, 1080p, 21:9 anamorphic + RID_MODE( 2560, '+', 1080, '+', 'p', 64:27, 6), // 21:9 "2.5k" + RID_MODE( 3840, '+', 1080, '+', 'p', 32:9 , 7), // 32:9 "4K" + RID_MODE( 2560, '+', 1440, '+', 'p', 16:9 , 8), // QHD, 1440p + RID_MODE( 3440, '+', 1440, '+', 'p', 64:27, 9), // WQHD + RID_MODE( 5120, '+', 1440, '+', 'p', 32:9 ,10), // 32:9 5k + RID_MODE( 3840, '+', 2160, '+', 'p', 16:9 ,11), // HD "4K", 2160p + RID_MODE( 3840, '+', 2160, '+', 'p', 64:27,12), // UHD "4K", 2160p, 21:9 anamorphic + RID_MODE( 5120, '+', 2160, '+', 'p', 64:27,13), // 21:9 "5K" + RID_MODE( 7680, '+', 2160, '+', 'p', 32:9 ,14), // 32:9 "8K" + RID_MODE( 5120, '+', 2880, '+', 'p', 16:9 ,15), // 2880p + RID_MODE( 5120, '+', 2880, '+', 'p', 64:27,16), // 2880p, 21:9 anamorphic + RID_MODE( 6880, '+', 2880, '+', 'p', 64:27,17), // 21:9 "6K" + RID_MODE(10240, '+', 2880, '+', 'p', 32:9 ,18), // 32:9 "10K" + RID_MODE( 7680, '+', 4320, '+', 'p', 16:9 ,19), // UHD "8K", 4320p + RID_MODE( 7680, '+', 4320, '+', 'p', 64:27,20), // UHD "8K", 4320p, 21:9 anamorphic + RID_MODE(10240, '+', 4320, '+', 'p', 64:27,21), // 21:9 "10K" + RID_MODE(15360, '+', 4320, '+', 'p', 32:9 ,22), // 32:9 "15K" + RID_MODE(11520, '+', 6480, '+', 'p', 16:9 ,23), // UHD "12K", 6480p + RID_MODE(11520, '+', 6480, '+', 'p', 64:27,24), // UHD "12K", 6480p, 21:9 anamorphic + RID_MODE(15360, '+', 6480, '+', 'p', 64:27,25), // 21:9 "15K" + RID_MODE(15360, '+', 8640, '+', 'p', 16:9 ,26), // UHD "16K", 8640p + RID_MODE(15360, '+', 8640, '+', 'p', 64:27,27), // UHD "16K", 8640p, 21:9 anamorphic + RID_MODE(20480, '+', 8640, '+', 'p', 64:27,28) // 21:9 "20K" + // 29...63 Reserved for future +}; +static NvU32 MAX_RID_CODES_COUNT = sizeof(RID) / sizeof(RID[0]) - 1; + +// RID to VIC Mapping +static const NvU8 RID_VIC_MAP[][8] = +{ + { 0, 0, 0, 0, 0, 0, 0, 0 }, + { 60, 61, 62, 108, 19, 4, 41, 47 }, // RID 01 + { 65, 66, 67, 109, 68, 69, 70, 71 }, // RID 02 + { 79, 80, 81, 110, 82, 83, 84, 85 }, // RID 03 + { 32, 33, 34, 111, 31, 16, 64, 63 }, // RID 04 + { 72, 73, 74, 112, 75, 76, 77, 78 }, // RID 05 + { 86, 87, 88, 113, 89, 90, 91, 92 }, // RID 06 + { 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0 }, + { 93, 94, 95, 114, 96, 97, 117, 118 }, // RID 11 + { 103, 104, 105, 116, 106, 107, 119, 120 }, // RID 12 + { 121, 122, 123, 124, 125, 126, 127, 193 }, // RID 13 + { 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0 }, + { 194, 195, 196, 197, 198, 199, 200, 201 }, // RID 19 + { 202, 203, 204, 205, 206, 207, 208, 209 }, // RID 20 + { 210, 211, 212, 213, 214, 215, 216, 217 }, // RID 21 + { 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0 } +}; + +// All the frame rate supported in VF +static const NvU16 VF_FRAME_RATE[] = +{ + 0, 24, 25, 30, 48, 50, 60, 100, 120, 144, 200, 240, 300, 360, 400, 480 +}; +static NvU8 MAX_VF_FRAME_RATE_COUNT = sizeof(VF_FRAME_RATE) / sizeof (VF_FRAME_RATE[0])-1; + +static const NVT_TIMING PSF_TIMING[]= +{ + NVT_TIMING( 1920,600, 88,2750,'+', 540, 2,5,562,'+',47952,'i',16:9, 0x1, 1, "ITU-R BT.709-5:1080i/24Psf"),//1920x1080i @47.952Hz | 24/PsF | ITU-R BT.709-5 + NVT_TIMING( 1920,488, 88,2640,'+', 540, 2,5,562,'+',49950,'i',16:9, 0x1, 2, "ITU-R BT.709-5:1080i/25Psf"),//1920x1080i @49.950Hz | 25/PsF | ITU-R BT.709-5 + + // the end + EIA_TIMING(0,0,0,0,'-',0,0,0,0,'-',0,'p',4:3,0,0) +}; +static NvU32 MAX_PSF_FORMAT = sizeof(PSF_TIMING)/sizeof(PSF_TIMING[0]) - 1; + +static const NVT_TIMING HDMI_EXT_4Kx2K_TIMING[]= +{ + HDMI_EXT_TIMING( 3840, 176, 88,4400,'+', 2160, 8,10,2250,'+',29970,'p',16:9, 0x1, NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx30Hz, "HDMI EXT: 3840x2160x29.97/30hz"),//3840x2160 @29.97/30Hz VIC: 0x01 + HDMI_EXT_TIMING( 3840,1056, 88,5280,'+', 2160, 8,10,2250,'+',25000,'p',16:9, 0x1, NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx25Hz, "HDMI EXT: 3840x2160x25hz"), //3840x2160 @25Hz VIC: 0x02 + HDMI_EXT_TIMING( 3840,1276, 88,5500,'+', 2160, 8,10,2250,'+',23976,'p',16:9, 0x1, NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx24Hz, "HDMI EXT: 3840x2160x23.98/24hz"),//3840x2160 @23.98/24Hz VIC: 0x03 + HDMI_EXT_TIMING( 4096,1020, 88,5500,'+', 2160, 8,10,2250,'+',24000,'p',16:9, 0x1, NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx24Hz_SMPTE, "HDMI EXT: 4096x2160x24hzSmpte"), //4096x2160 @24Hz VIC: 0x04 + + // the end + EIA_TIMING(0,0,0,0,'-',0,0,0,0,'-',0,'p',4:3,0,0) +}; +static NvU32 MAX_HDMI_EXT_4Kx2K_FORMAT = sizeof(HDMI_EXT_4Kx2K_TIMING)/sizeof(HDMI_EXT_4Kx2K_TIMING[0]) - 1; + +// HDMI 1.4a mandatory 3D video formats. +// From HDMI 1.4a specification page 147 of 201, table 8-15. And HDMI 1.4a Complaince test specification page 190. +static const HDMI3DDETAILS HDMI_MANDATORY_3D_FORMATS[] = +{ + {32, NVT_HDMI_3D_SUPPORTED_FRAMEPACK_MASK | NVT_HDMI_3D_SUPPORTED_TOPBOTTOM_MASK, 0}, // 1920 x 1080p @ 24 Hz + { 4, NVT_HDMI_3D_SUPPORTED_FRAMEPACK_MASK | NVT_HDMI_3D_SUPPORTED_TOPBOTTOM_MASK, 0}, // 1280 x 720p @ 60 Hz + {19, NVT_HDMI_3D_SUPPORTED_FRAMEPACK_MASK | NVT_HDMI_3D_SUPPORTED_TOPBOTTOM_MASK, 0}, // 1280 x 720p @ 50 Hz + { 5, NVT_HDMI_3D_SUPPORTED_SIDEBYSIDEHALF_MASK, NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH}, // 1920 x 1080i @ 60 Hz + {20, NVT_HDMI_3D_SUPPORTED_SIDEBYSIDEHALF_MASK, NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH} // 1920 x 1080i @ 50 Hz +}; +static NvU32 MAX_HDMI_MANDATORY_3D_FORMAT = sizeof(HDMI_MANDATORY_3D_FORMATS) / sizeof(HDMI_MANDATORY_3D_FORMATS[0]); +static const NVT_VIDEO_INFOFRAME DEFAULT_VIDEO_INFOFRAME = {/*header*/2,2,13, /*byte1*/0, /*byte2*/0x8, /*byte3*/0, /*byte4*/0, /*byte5*/0, /*byte6~13*/0,0,0,0,0,0,0,0, /*byte14~15*/0,0}; +static const NVT_AUDIO_INFOFRAME DEFAULT_AUDIO_INFOFRAME = {/*header*/4,1,10, /*byte1*/0, /*byte2*/0, /*byte3*/0, /*byte*/0, /*byte5*/0, /*byte6~10*/0,0,0,0,0}; + +CODE_SEGMENT(PAGE_DD_CODE) +static NvU8 +getExistedCTATimingSeqNumber( + NVT_EDID_INFO *pInfo, + enum NVT_TIMING_TYPE timingType) +{ + NvU8 count = 0; + NvU8 i = 0; + + switch (timingType) + { + case NVT_TYPE_CTA861_DID_T7: + case NVT_TYPE_CTA861_DID_T8: + case NVT_TYPE_CTA861_DID_T10: + case NVT_TYPE_EDID_861ST: + break; + default: + return count; + } + + for (i = 0; i< pInfo->total_timings; i++) + { + if (timingType == NVT_TYPE_EDID_861ST) + { + if (NVT_TIMING_IS_OVT(pInfo->timing[i].etc.flag)) + ++count; + } + else if (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status) == timingType) + { + ++count; + } + } + + return count; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NvBool isVFDRefreshRate(NvU8 vfdSize, NvU8 *vfd, NvU8 rateIdx) +{ + NvU8 rid, factor, i; + NvU16 rr; + NvBool bFR24, bFR48, bBFR50, bBFR60, bFR144, bFRFactor; + + // frame rate factor {0.5x, 1x, 2x, 4x, 6x, 8x} x 2 + const NvU8 frame_rate_factors[6] = { 1, 2, 4, 8, 12, 16 }; + + rr = VF_FRAME_RATE[rateIdx]; + factor = 0; + + rid = ((const VFD_ONE_BYTE*)vfd)->rid; + if (rid == 0) return NV_FALSE; + + bBFR50 = ((const VFD_ONE_BYTE*)vfd)->bfr50; + // frame rate factor + // If Byte 2 is not present in the VFD, flags 0.5X, 1X and BFR60 shall be considered set + bBFR60 = vfdSize > 1 ? ((const VFD_TWO_BYTE*)vfd)->bfr60 : 1; + bFRFactor = vfdSize > 1 ? ((const VFD_TWO_BYTE*)vfd)->frRate : 3; + + // individual frame rate + bFR24 = ((const VFD_ONE_BYTE*)vfd)->fr24; + if (rr == 24) return bFR24; + + // individual frame rate + bFR48 = vfdSize > 2 ? ((const VFD_THREE_BYTE*)vfd)->fr48 : 0; + if (rr == 48) return bFR48; + + // individual frame rate + bFR144 = vfdSize > 1 ? ((const VFD_TWO_BYTE*)vfd)->fr144 : 0; + if (rr == 144) return bFR144; + + if (rr % (50/2) == 0) + { + if (!bBFR50) return NV_FALSE; + factor = rr / 25; + } + else if (rr % (60/2) == 0) + { + if (!bBFR60) return NV_FALSE; + factor = rr / 30; + } + + for (i = 0; i < COUNT(frame_rate_factors); i++) + { + if (frame_rate_factors[i] == factor) + { + if (bFRFactor & (1 << i)) + return NV_TRUE; + else + break; + } + } + + return NV_FALSE; +} + +// parse the 861 detailed timing info +CODE_SEGMENT(PAGE_DD_CODE) +void parse861ExtDetailedTiming(NvU8 *pEdidExt, + NvU8 basicCaps, + NVT_EDID_INFO *pInfo) +{ + NvU32 count = 0; + EIA861EXTENSION *pEIA861 = (EIA861EXTENSION *) pEdidExt; + DETAILEDTIMINGDESCRIPTOR *pDTD; + NVT_TIMING newTiming; + + // sanity check for CEA ext block + if ((pEIA861->tag != 0x2) || (0 == pEIA861->offset) || (NVT_CEA861_REV_NONE == pEIA861->revision)) + { + // no CEA ext block, return + return; + } + + // Get all detailed timings in CEA ext block + pDTD = (DETAILEDTIMINGDESCRIPTOR *)&pEdidExt[pEIA861->offset]; + + while((NvU8 *)pDTD + sizeof(DETAILEDTIMINGDESCRIPTOR) < (pEdidExt + sizeof(EDIDV1STRUC)) && + pDTD->wDTPixelClock != 0) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseEdidDetailedTimingDescriptor((NvU8 *)pDTD, + &newTiming) == NVT_STATUS_SUCCESS) + { + NVT_SNPRINTF((char *)newTiming.etc.name, sizeof(newTiming.etc.name), + "CTA-861Long:%5dx%4dx%3d.%03dHz/%s", + (int)newTiming.HVisible, + (int)((newTiming.interlaced ? 2 : 1) * newTiming.VVisible), + (int)newTiming.etc.rrx1k/1000, + (int)newTiming.etc.rrx1k%1000, + (newTiming.interlaced ? "I":"P")); + newTiming.etc.name[sizeof(newTiming.etc.name)-1] = '\0'; + newTiming.etc.status = NVT_STATUS_EDID_EXT_DTDn(++count); + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + } + pDTD ++; + } +} + +// parse the 861B short timing descriptor +CODE_SEGMENT(PAGE_DD_CODE) +void parse861bShortTiming(NVT_EDID_CEA861_INFO *pExt861, + void *pRawInfo, + NVT_CTA861_ORIGIN flag) +{ + NvU32 i; + NvU32 vic, bytePos, bitPos; + NVT_TIMING newTiming; + NVT_HDMI_FORUM_INFO *pHfvs = NULL; + NVT_EDID_INFO *pInfo = NULL; + NVT_DISPLAYID_2_0_INFO *pDisplayID20 = NULL; + + NvU8 *pVic = pExt861->video; + NvU32 total_svd = pExt861->total_svd; + NvU8 *pYuv420Map = pExt861->valid.y420cmdb ? pExt861->map_y420cmdb : NULL; + NvU8 yuv420MapCount = pExt861->total_y420cmdb; + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + pInfo = (NVT_EDID_INFO *)pRawInfo; + pHfvs = &pInfo->hdmiForumInfo; + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + pDisplayID20 = (NVT_DISPLAYID_2_0_INFO *)pRawInfo; + pHfvs = &pDisplayID20->vendor_specific.hfvs; + } + else + { + return; + } + + for (i = 0; i < total_svd; i++) + { + vic = NVT_GET_CTA_8BIT_VIC(pVic[i]); + + if (vic == 0 || vic > MAX_CEA861B_FORMAT) + continue; + + // assign corresponding CEA format's timing from pre-defined CE timing table, EIA861B + newTiming = EIA861B[vic-1]; + newTiming.etc.status = NVT_STATUS_EDID_861STn(vic); + + // set CEA format to location of _CEA_FORMAT. _CEA_FORMAT isn't set in pre-defined CE timing from + // EIA861B table + if (NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status) != + NVT_CEA861_640X480P_59940HZ_4X3) + { + // Although IT 640x480 video timing has a CE id, it is not a CE timing. See 3.1 + // "General Video Format Requirements" section in CEA-861-E spec + NVT_SET_CEA_FORMAT(newTiming.etc.status, + NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status)); + } + + // calculate the pixel clock + newTiming.pclk = RRx1kToPclk(&newTiming); + newTiming.pclk1khz = (newTiming.pclk << 3) + (newTiming.pclk << 1); // *10 + + if ((vic <= 64) && (pVic[i] & NVT_CTA861_VIDEO_NATIVE_MASK)) + { + NVT_SET_NATIVE_TIMING_FLAG(newTiming.etc.status); + } + NVT_SNPRINTF((char *)newTiming.etc.name, sizeof(newTiming.etc.name), + "CTA-861G:#%3d:%5dx%4dx%3d.%03dHz/%s", (int)vic, + (int)newTiming.HVisible, + (int)((newTiming.interlaced ? 2 : 1)*newTiming.VVisible), + (int)newTiming.etc.rrx1k/1000, (int)newTiming.etc.rrx1k%1000, + (newTiming.interlaced ? "I":"P")); + newTiming.etc.name[sizeof(newTiming.etc.name)-1] = '\0'; + + // if yuv420 is supported in the video SVDs, it is indicated by CMDB bitmap + bytePos = i / (8 * sizeof(NvU8)); + if (bytePos < yuv420MapCount) + { + bitPos = 1 << (i % (8 * sizeof(NvU8))); + if (pYuv420Map[bytePos] & bitPos) + { + // pHfvs->dcXXX are only for YCbCr420; when bitPos is set, 8bpc yuv420 always supported + UPDATE_BPC_FOR_COLORFORMAT(newTiming.etc.yuv420, 0, 1, + pHfvs->dc_30bit_420, + pHfvs->dc_36bit_420, 0, + pHfvs->dc_48bit_420); + } + } + + // Y420CMDB with L == 1, implies yuv420MapCount == 0 but all SVDs support 420 + if (pYuv420Map && yuv420MapCount == 0) + { + UPDATE_BPC_FOR_COLORFORMAT(newTiming.etc.yuv420, 0, 1, + pHfvs->dc_30bit_420, + pHfvs->dc_36bit_420, 0, + pHfvs->dc_48bit_420); + } + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + newTiming.etc.flag |= NVT_FLAG_DISPLAYID_2_0_TIMING; + + if (!assignNextAvailableDisplayId20Timing(pDisplayID20, &newTiming)) + { + break; + } + } + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseCta861VideoFormatDataBlock(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo) +{ + NvU8 i = 0; + NvU8 rateIdx = 0; + NvU8 vfdb_idx = 0; + NvU8 startSeqNum = 0; + NvU8 eachOfDescSize = 0; + NvU32 width = 0; + NvU32 height = 0; + + const VFD_ONE_BYTE *pVFDOneByte = 0 ; + + NVT_TIMING newTiming; + NVT_EDID_INFO *pInfo = (NVT_EDID_INFO *)pRawInfo; + + for (vfdb_idx = 0; vfdb_idx < pExt861->total_vfdb; vfdb_idx++) + { + eachOfDescSize = pExt861->vfdb[vfdb_idx].info.vfd_len + 1; + + if (eachOfDescSize == 0) + { + // Video Format Descriptor length is 0!; + return; + } + + for (i = 0; i < pExt861->vfdb[vfdb_idx].total_vfd; i++) + { + // data block value sanity check: + if (eachOfDescSize > 2 && (((const VFD_THREE_BYTE*)&pExt861->vfdb[vfdb_idx].video_format_desc[i*eachOfDescSize])->f31_37 != 0)) + nvt_assert(0 && "F31-F37 bits does not be 0 in Byte3!.\n"); + + if (eachOfDescSize > 3 && ((const VFD_FOUR_BYTE*)&pExt861->vfdb[vfdb_idx].video_format_desc[i*eachOfDescSize])->f40_47 !=0) + nvt_assert(0 && "It is not support yet in Byte4!"); + + pVFDOneByte = (const VFD_ONE_BYTE *)&pExt861->vfdb[vfdb_idx].video_format_desc[i*eachOfDescSize]; + + /* + * If any of the following is true, then the RID shall be set to 0: + * 1. If a Video Format not listed in "Table 12 - Resolution Identification (RID) is sent + * 2. if a Video Format with Frame Rates not listed in "Table 13 - AVI InfoFrame Video Format Frame Rate" is sent + * 3. if a Video Format listed in "Table 14 - RID To VIC Mapping" is sent. + */ + // For 1. + if ((pVFDOneByte->rid & NVT_CTA861_VF_RID_MASK) == 0 || pVFDOneByte->rid > MAX_RID_CODES_COUNT) + { + // shall have a non-zero RID value or RID code value larger than 28 + continue; + } + + width = RID[pVFDOneByte->rid].HVisible; + height = RID[pVFDOneByte->rid].VVisible; + + // If the Source is sending a Video Format that can be indicated by RID and FR, + // and is not listed in Table 14 (RID to VIC), then it shall set the RID and FR fields to the proper codes + for (rateIdx = 1; rateIdx <= MAX_VF_FRAME_RATE_COUNT; rateIdx++) + { + // For 2. + if (!isVFDRefreshRate(eachOfDescSize, &pExt861->vfdb[vfdb_idx].video_format_desc[i*eachOfDescSize], rateIdx)) + { + continue; + } + + // For 3. + if (VF_FRAME_RATE[rateIdx] < 144 && RID_VIC_MAP[pVFDOneByte->rid][rateIdx-1]) + { + nvt_assert(0 && "RID not allowed since it maps to VIC!"); + continue; + } + + startSeqNum = getExistedCTATimingSeqNumber(pInfo, NVT_TYPE_EDID_861ST); + + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (NvTiming_CalcOVT(width, height, VF_FRAME_RATE[rateIdx], &newTiming) == NVT_STATUS_SUCCESS) + { + if (pExt861->vfdb[vfdb_idx].info.y420 && newTiming.pclk1khz > NVT_HDMI_YUV_420_PCLK_SUPPORTED_MIN) + { + UPDATE_BPC_FOR_COLORFORMAT(newTiming.etc.yuv420, 0, 1, + pInfo->hdmiForumInfo.dc_30bit_420, + pInfo->hdmiForumInfo.dc_36bit_420, 0, + pInfo->hdmiForumInfo.dc_48bit_420); + } + + newTiming.etc.aspect = RID[pVFDOneByte->rid].aspect; + newTiming.etc.flag |= NVT_FLAG_CTA_OVT_TIMING; + if (pExt861->vfdb[vfdb_idx].info.ntsc) + { + newTiming.etc.flag |= NVT_FLAG_CTA_OVT_FRR_TIMING; + } + + newTiming.etc.status = NVT_STATUS_EDID_861STn(++startSeqNum); + NVT_SNPRINTF((char *)newTiming.etc.name, sizeof(newTiming.etc.name), "CTA861-OVT%d:#%3d:%dx%dx%3d.%03dHz/%s", + (int)pVFDOneByte->rid, + (int)NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status), + (int)newTiming.HVisible, + (int)newTiming.VVisible, + (int)newTiming.etc.rrx1k/1000, + (int)newTiming.etc.rrx1k%1000, + (newTiming.interlaced ? "I":"P")); + newTiming.etc.name[sizeof(newTiming.etc.name) - 1] = '\0'; + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + continue; + } + } + else + { + continue; + } + } + } + } +} + +// parse the 861B short Yuv420 timing descriptor +CODE_SEGMENT(PAGE_DD_CODE) +void parse861bShortYuv420Timing(NVT_EDID_CEA861_INFO *pExt861, + void *pRawInfo, + NVT_CTA861_ORIGIN flag) +{ + NvU32 i; + NvU8 vic; + NVT_TIMING newTiming; + NVT_HDMI_FORUM_INFO *pHfvs = NULL; + NVT_EDID_INFO *pInfo = NULL; + NVT_DISPLAYID_2_0_INFO *pDisplayID20 = NULL; + NvU8 *pYuv420Vic = pExt861->svd_y420vdb; + NvU32 total_y420vdb = pExt861->total_y420vdb; + NvU8 *pVdb = pExt861->video; + NvU32 total_svd = pExt861->total_svd; + NvU32 total_timings = 0; + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + pInfo = (NVT_EDID_INFO *)pRawInfo; + pHfvs = &pInfo->hdmiForumInfo; + total_timings = pInfo->total_timings; + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + pDisplayID20 = (NVT_DISPLAYID_2_0_INFO *)pRawInfo; + pHfvs = &pDisplayID20->vendor_specific.hfvs; + total_timings = pDisplayID20->total_timings; + } + else + { + return; + } + + if (total_timings == 0) + { + return; + } + + for (i = 0; i < total_y420vdb; i++) + { + vic = NVT_GET_CTA_8BIT_VIC(pYuv420Vic[i]); + + if (vic == 0 || vic > MAX_CEA861B_FORMAT) + continue; + + // assign corresponding CEA format's timing from pre-defined CE timing table, EIA861B + newTiming = EIA861B[vic-1]; + + // if yuv420 is supported in the video SVDs, it is indicated by yuv420vdb + if(total_svd > 0) + { + NvU8 idx, j; + NvBool bFound = NV_FALSE; + for (idx=0; idx < total_svd; idx++) + { + if (pVdb[idx] == vic) + { + for (j=0; j < total_timings; j++) + { + NVT_TIMING *timing = NULL; + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + timing = &pInfo->timing[j]; + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + timing = &pDisplayID20->timing[j]; + } + + if (NvTiming_IsTimingExactEqual(timing, &newTiming)) + { + bFound = NV_TRUE; + // we found one in pExt861->video[]. pHfvs->dcXXX are only for YCbCr420, so we can support: + // 1. 8bpc yuv420 always supported. + // 2. only add yuv420 and its deep colour caps into Video Data Block + UPDATE_BPC_FOR_COLORFORMAT(timing->etc.yuv420, 0, 1, + pHfvs->dc_30bit_420, + pHfvs->dc_36bit_420, 0, + pHfvs->dc_48bit_420); + break; + } + } + } + } + if (bFound) continue; + } + + newTiming.etc.status = NVT_STATUS_EDID_861STn(vic); + + // set CEA format to location of _CEA_FORMAT. _CEA_FORMAT isn't set in pre-defined CE timing from + // EIA861B table + if (NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status) != + NVT_CEA861_640X480P_59940HZ_4X3) + { + // Although IT 640x480 video timing has a CE id, it is not a CE timing. See 3.1 + // "General Video Format Requirements" section in CEA-861-E spec + NVT_SET_CEA_FORMAT(newTiming.etc.status, + NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status)); + } + + // calculate the pixel clock + newTiming.pclk = RRx1kToPclk(&newTiming); + newTiming.pclk1khz = RRx1kToPclk1khz(&newTiming); + + // From CTA-861-F: By default, Y420VDB SVDs, when present in the EDID, shall be less preferred than all regular Video Data Block SVDs. + // So it should use normal VIC code without native flag. + //if ((vic <= 64) && (pVic[i] & NVT_CTA861_VIDEO_NATIVE_MASK)) + //{ + // NVT_SET_NATIVE_TIMING_FLAG(newTiming.etc.status); + //} + NVT_SNPRINTF((char *)newTiming.etc.name, sizeof(newTiming.etc.name), + "CTA-861G:#%3d:%5dx%4dx%3d.%03dHz/%s", (int)vic, + (int)newTiming.HVisible, + (int)((newTiming.interlaced ? 2 : 1)*newTiming.VVisible), + (int)newTiming.etc.rrx1k/1000, (int)newTiming.etc.rrx1k%1000, + (newTiming.interlaced ? "I":"P")); + newTiming.etc.name[sizeof(newTiming.etc.name)-1] = '\0'; + + // update supported color space; any VICs enumerated in the Y420VDB are yuv420 only modes + // update 8bpc supported color space; other bpc updated once VSDB is parsed + + // pHfvs->dcXXX are only for YCbCr420; when Vic enumerated here, 8bpc yuv420 always supported + UPDATE_BPC_FOR_COLORFORMAT(newTiming.etc.yuv420, 0, 1, + pHfvs->dc_30bit_420, + pHfvs->dc_36bit_420, 0, + pHfvs->dc_48bit_420); + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + newTiming.etc.flag |= NVT_FLAG_DISPLAYID_2_0_TIMING; + + if (assignNextAvailableDisplayId20Timing(pDisplayID20, &newTiming)) + { + + break; + } + } + } +} + +// Currently, the SVR both used in the NVRDB and VFPDB. +// "One particular application is a Sink that prefers a Video Format that is not listed as an SVD in a VDB +// but instead listed in a YCBCR 4:2:0 Video Data Block" +CODE_SEGMENT(PAGE_DD_CODE) +void parseCta861NativeOrPreferredTiming(NVT_EDID_CEA861_INFO *pExt861, + void *pRawInfo, + NVT_CTA861_ORIGIN flag) +{ + NvU32 isMatch,i,j = 0; + + NVT_TIMING preferTiming; + NVT_EDID_INFO *pInfo = NULL; + NVT_DISPLAYID_2_0_INFO *pDisplayID20 = NULL; + NvU8 nativeSvr = 0; + NvU8 *pSvr = pExt861->svr_vfpdb; + NvU8 totalSvr = pExt861->total_svr; + NvU8 kth = 0; + NvU8 extKth = 0; + NvU8 DTDCount = 0; + NvU8 extDTDCount = 0; + NvU8 DIDT7Count = 0; + NvU8 DIDT10Count = 0; + NvU8 OVTCount = 0; + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + pInfo = (NVT_EDID_INFO *)pRawInfo; + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + pDisplayID20 = (NVT_DISPLAYID_2_0_INFO *)pRawInfo; + } + else + { + return; + } + + // finding all the DTD in Base 0 or CTA861 + if (flag == FROM_CTA861_EXTENSION) + { + // get the NVRDB, from the spec this native resolution has more high priority than others + if (pExt861->valid.NVRDB == 1) + { + nativeSvr = pExt861->native_video_resolution_db.native_svr; + totalSvr = 1; + } + + for (j = 0; j < pInfo->total_timings; j++) + { + if (NVT_IS_DTD(pInfo->timing[j].etc.status)) DTDCount++; + else if (NVT_IS_EXT_DTD(pInfo->timing[j].etc.status)) extDTDCount++; + else if (NVT_IS_CTA861_DID_T7(pInfo->timing[j].etc.status)) DIDT7Count++; + else if (NVT_IS_CTA861_DID_T10(pInfo->timing[j].etc.status)) DIDT10Count++; + else if (NVT_TIMING_IS_OVT(pInfo->timing[j].etc.flag)) OVTCount++; + } + } + + // this only handles single SVR + for (i = 0; i < totalSvr; i++) + { + NvU8 svr = 0; + NvU8 vic = 0; + + if (pExt861->valid.NVRDB == 1) + svr = nativeSvr; + else + svr = pSvr[i]; + + // Reserved + if (svr == 0 || svr == 128 || (svr >= 176 && svr <= 192) || svr == 255) + continue; + + // Interpret as the Kth 18-byte DTD, where K = SVR - 128 (for K = 1 to 16) in both base0 and CTA block + if (svr >= 129 && svr <= 144) + { + kth = svr - 128; + // only base EDID and CTA861 can support 18bytes + if (flag == FROM_CTA861_EXTENSION) + { + for (j = 0; j < pInfo->total_timings; j++) + { + if (kth <= DTDCount) + { + if (NVT_IS_DTDn(pInfo->timing[j].etc.status, kth)) + break; + } + else + { + extKth = kth - DTDCount; + if (NVT_IS_EXT_DTDn(pInfo->timing[j].etc.status, extKth)) + break; + } + } + } + } + else if (svr >= 145 && svr <= 160) + { + // Interpret as the Nth 20-byte DTD or 6- or 7-byte CVT-based descriptor + // where N = SVR - 144 (for N = 1 to 16) + kth = svr - 144; + + if (flag == FROM_CTA861_EXTENSION) + { + for (j = 0; j < pInfo->total_timings; j++) + { + if (kth <= DIDT7Count) // pick the Nth 20-byte first + { + if (NVT_IS_CTA861_DID_T7n(pInfo->timing[j].etc.status, kth)) + break; + } + else + { + extKth = kth - DIDT7Count; // pick the T10 CVT-based timing then + if (NVT_IS_CTA861_DID_T10n(pInfo->timing[j].etc.status, extKth)) + break; + } + } + } + } + else if (svr >= 161 && svr <= 175) + { + // Interpret as the video format indicated by the first VFD of the first VFDB with Frame Rates of Rate Index N + // where N = SVR - 160 (for N = 1 to 15) + kth = svr - 160; + if (flag == FROM_CTA861_EXTENSION) + { + for (j = 0; j < pInfo->total_timings; j++) + { + if (kth <= OVTCount) + { + if (NVT_IS_CTA861_OVT_Tn(pInfo->timing[j].etc.flag, pInfo->timing[j].etc.status, kth)) + break; + } + } + } + } + else if (svr == 254) + { + // Interpret as the timing format indicated by the first code of the first T8VTDB + if (flag == FROM_CTA861_EXTENSION) + { + for (j = 0; j < pInfo->total_timings; j++) + { + if (NVT_IS_CTA861_DID_T8_1(pInfo->timing[j].etc.status)) + { + kth = 1; + break; + } + } + } + } + else // assign corresponding CEA format's timing from pre-defined CE timing table, EIA861B + { + // ( SVR >= 1 and SVR <= 127) and (SVR >= 193 and SVR <= 253) + vic = NVT_GET_CTA_8BIT_VIC(svr); + preferTiming = EIA861B[vic-1]; + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + for (j = 0; j < pInfo->total_timings; j++) + { + isMatch = NvTiming_IsTimingExactEqual(&pInfo->timing[j], &preferTiming); + if (isMatch && (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[j].etc.status) == NVT_TYPE_EDID_861ST)) + break; + } + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + for (j = 0; j < pDisplayID20->total_timings; j++) + { + isMatch = NvTiming_IsTimingExactEqual(&pDisplayID20->timing[j], &preferTiming); + if (isMatch && (NVT_GET_TIMING_STATUS_TYPE(pDisplayID20->timing[j].etc.status) == NVT_TYPE_EDID_861ST)) + break; + } + } + } + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + if (pExt861->valid.NVRDB == 1) + pInfo->timing[j].etc.flag |= NVT_FLAG_CTA_NATIVE_TIMING; + else if (vic != 0 || kth != 0) + pInfo->timing[j].etc.flag |= NVT_FLAG_CTA_PREFERRED_TIMING; + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + if (pExt861->valid.NVRDB == 1) + pDisplayID20->timing[j].etc.flag |= NVT_FLAG_CTA_NATIVE_TIMING | NVT_FLAG_DISPLAYID_2_0_TIMING; + else if (vic !=0 || kth != 0) + pDisplayID20->timing[j].etc.flag |= NVT_FLAG_CTA_PREFERRED_TIMING | NVT_FLAG_DISPLAYID_2_0_TIMING; + } + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseCta861HdrStaticMetadataDataBlock(NVT_EDID_CEA861_INFO *pExt861, + void *pRawInfo, + NVT_CTA861_ORIGIN flag) +{ + NVT_EDID_INFO *pInfo = NULL; + NVT_DISPLAYID_2_0_INFO *pDisplayID20 = NULL; + NVT_HDR_STATIC_METADATA *pHdrInfo = NULL; + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + pInfo = (NVT_EDID_INFO *)pRawInfo; + pHdrInfo = &pInfo->hdr_static_metadata_info; + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + pDisplayID20 = (NVT_DISPLAYID_2_0_INFO *)pRawInfo; + pHdrInfo = &pDisplayID20->cta.hdrInfo; + } + else + { + return; + } + + if (pExt861 == NULL || pHdrInfo == NULL) + { + return; + } + + // Parse the EOTF capability information. It's possible to have multiple EOTF + if (pExt861->hdr_static_metadata.byte1 & NVT_CEA861_EOTF_GAMMA_SDR) + { + pHdrInfo->supported_eotf.trad_gamma_sdr_eotf = 1; + } + if (pExt861->hdr_static_metadata.byte1 & NVT_CEA861_EOTF_GAMMA_HDR) + { + pHdrInfo->supported_eotf.trad_gamma_hdr_eotf = 1; + } + if (pExt861->hdr_static_metadata.byte1 & NVT_CEA861_EOTF_SMPTE_ST2084) + { + pHdrInfo->supported_eotf.smpte_st_2084_eotf = 1; + } + if (pExt861->hdr_static_metadata.byte1 & NVT_CEA861_EOTF_FUTURE) + { + pHdrInfo->supported_eotf.future_eotf = 1; + } + + // Parse the static metadata descriptor + if (pExt861->hdr_static_metadata.byte2) + { + pHdrInfo->static_metadata_type = 1; + } + + pHdrInfo->max_cll = pExt861->hdr_static_metadata.byte3 & NVT_CEA861_MAX_CLL_MASK; + pHdrInfo->max_fall = pExt861->hdr_static_metadata.byte4 & NVT_CEA861_MAX_FALL_MASK; + pHdrInfo->min_cll = pExt861->hdr_static_metadata.byte5 & NVT_CEA861_MIN_CLL_MASK; +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseCta861DvStaticMetadataDataBlock(VSVDB_DATA* pVsvdb, NVT_DV_STATIC_METADATA *pDvInfo) +{ + NvU32 vsvdbVersion = 0; + + NVT_DV_STATIC_METADATA_TYPE0 *pDvType0 = NULL; + NVT_DV_STATIC_METADATA_TYPE1 *pDvType1 = NULL; + NVT_DV_STATIC_METADATA_TYPE1_1 *pvDvType1_1 = NULL; + NVT_DV_STATIC_METADATA_TYPE2 *pDvType2 = NULL; + + if (pVsvdb == NULL || pDvInfo == NULL) + { + return; + } + + if(pVsvdb->ieee_id != NVT_CEA861_DV_IEEE_ID) + { + return; + } + + //init + NVMISC_MEMSET(pDvInfo, 0, sizeof(NVT_DV_STATIC_METADATA)); + + // copy ieee id + pDvInfo->ieee_id = pVsvdb->ieee_id; + + vsvdbVersion = (pVsvdb->vendor_data[0] & NVT_CEA861_VSVDB_VERSION_MASK) >> NVT_CEA861_VSVDB_VERSION_MASK_SHIFT; + + switch (vsvdbVersion) + { + case 0: + if (pVsvdb->vendor_data_size < sizeof(NVT_DV_STATIC_METADATA_TYPE0)) + { + return; + } + pDvType0 = (NVT_DV_STATIC_METADATA_TYPE0 *)(&pVsvdb->vendor_data); + // copy the data + pDvInfo->VSVDB_version = pDvType0->VSVDB_version; + pDvInfo->supports_2160p60hz = pDvType0->supports_2160p60hz; + pDvInfo->supports_YUV422_12bit = pDvType0->supports_YUV422_12bit; + pDvInfo->supports_global_dimming = pDvType0->supports_global_dimming; + pDvInfo->colorimetry = 0; // this field does not exist in type0 + pDvInfo->dm_version = (pDvType0->dm_version_major << 4) | pDvType0->dm_version_minor; + pDvInfo->target_min_luminance = (pDvType0->target_min_pq_11_4 << 4) | pDvType0->target_min_pq_3_0; + pDvInfo->target_max_luminance = (pDvType0->target_max_pq_11_4 << 4) | pDvType0->target_max_pq_3_0; + pDvInfo->cc_red_x = (pDvType0->cc_red_x_11_4 << 4) | pDvType0->cc_red_x_3_0; + pDvInfo->cc_red_y = (pDvType0->cc_red_y_11_4 << 4) | pDvType0->cc_red_y_3_0; + pDvInfo->cc_green_x = (pDvType0->cc_green_x_11_4 << 4) | pDvType0->cc_green_x_3_0; + pDvInfo->cc_green_y = (pDvType0->cc_green_y_11_4 << 4) | pDvType0->cc_green_y_3_0; + pDvInfo->cc_blue_x = (pDvType0->cc_blue_x_11_4 << 4) | pDvType0->cc_blue_x_3_0; + pDvInfo->cc_blue_y = (pDvType0->cc_blue_y_11_4 << 4) | pDvType0->cc_blue_y_3_0; + pDvInfo->cc_white_x = (pDvType0->cc_white_x_11_4 << 4) | pDvType0->cc_white_x_3_0; + pDvInfo->cc_white_y = (pDvType0->cc_white_y_11_4 << 4) | pDvType0->cc_white_y_3_0; + pDvInfo->supports_backlight_control = 0; + pDvInfo->backlt_min_luma = 0; + pDvInfo->interface_supported_by_sink = 0; + pDvInfo->supports_10b_12b_444 = 0; + break; + case 1: + if (pVsvdb->vendor_data_size == sizeof(NVT_DV_STATIC_METADATA_TYPE1)) + { + pDvType1 = (NVT_DV_STATIC_METADATA_TYPE1 *)(&pVsvdb->vendor_data); + // copy the data + pDvInfo->VSVDB_version = pDvType1->VSVDB_version; + pDvInfo->supports_2160p60hz = pDvType1->supports_2160p60hz; + pDvInfo->supports_YUV422_12bit = pDvType1->supports_YUV422_12bit; + pDvInfo->dm_version = pDvType1->dm_version; + pDvInfo->supports_global_dimming = pDvType1->supports_global_dimming; + pDvInfo->colorimetry = pDvType1->colorimetry; + pDvInfo->target_min_luminance = pDvType1->target_min_luminance; + pDvInfo->target_max_luminance = pDvType1->target_max_luminance; + pDvInfo->cc_red_x = pDvType1->cc_red_x; + pDvInfo->cc_red_y = pDvType1->cc_red_y; + pDvInfo->cc_green_x = pDvType1->cc_green_x; + pDvInfo->cc_green_y = pDvType1->cc_green_y; + pDvInfo->cc_blue_x = pDvType1->cc_blue_x; + pDvInfo->cc_blue_y = pDvType1->cc_blue_y; + pDvInfo->supports_backlight_control = 0; + pDvInfo->backlt_min_luma = 0; + pDvInfo->interface_supported_by_sink = 0; + pDvInfo->supports_10b_12b_444 = 0; + pDvInfo->cc_white_x = 0; + pDvInfo->cc_white_y = 0; + } + else if (pVsvdb->vendor_data_size == sizeof(NVT_DV_STATIC_METADATA_TYPE1_1)) + { + pvDvType1_1 = (NVT_DV_STATIC_METADATA_TYPE1_1 *)(&pVsvdb->vendor_data); + // copy the data + pDvInfo->VSVDB_version = pvDvType1_1->VSVDB_version; + pDvInfo->supports_2160p60hz = pvDvType1_1->supports_2160p60hz; + pDvInfo->supports_YUV422_12bit = pvDvType1_1->supports_YUV422_12bit; + pDvInfo->dm_version = pvDvType1_1->dm_version; + pDvInfo->supports_global_dimming = pvDvType1_1->supports_global_dimming; + pDvInfo->colorimetry = pvDvType1_1->colorimetry; + pDvInfo->target_min_luminance = pvDvType1_1->target_min_luminance; + pDvInfo->target_max_luminance = pvDvType1_1->target_max_luminance; + pDvInfo->cc_green_x = NVT_DOLBY_CHROMATICITY_MSB_GX | pvDvType1_1->unique_Gx; + pDvInfo->cc_green_y = NVT_DOLBY_CHROMATICITY_MSB_GY | pvDvType1_1->unique_Gy; + pDvInfo->cc_blue_x = NVT_DOLBY_CHROMATICITY_MSB_BX | pvDvType1_1->unique_Bx; + pDvInfo->cc_blue_y = NVT_DOLBY_CHROMATICITY_MSB_BY | pvDvType1_1->unique_By; + pDvInfo->cc_red_x = NVT_DOLBY_CHROMATICITY_MSB_RX | pvDvType1_1->unique_Rx; + pDvInfo->cc_red_y = NVT_DOLBY_CHROMATICITY_MSB_RY | (pvDvType1_1->unique_Ry_bit_0 | (pvDvType1_1->unique_Ry_bit_1 <<1) | (pvDvType1_1->unique_Ry_bit_2_to_4 << 2)); + pDvInfo->supports_backlight_control = 0; + pDvInfo->backlt_min_luma = 0; + pDvInfo->interface_supported_by_sink = pvDvType1_1->interface_supported_by_sink; + pDvInfo->supports_10b_12b_444 = 0; + pDvInfo->cc_white_x = 0; + pDvInfo->cc_white_y = 0; + } + else + { + return; + } + + break; + case 2: + if (pVsvdb->vendor_data_size < sizeof(NVT_DV_STATIC_METADATA_TYPE2)) + { + return; + } + pDvType2 = (NVT_DV_STATIC_METADATA_TYPE2 *)(&pVsvdb->vendor_data); + // copy the data + pDvInfo->VSVDB_version = pDvType2->VSVDB_version; + pDvInfo->supports_backlight_control = pDvType2->supports_backlight_control; + pDvInfo->supports_YUV422_12bit = pDvType2->supports_YUV422_12bit; + pDvInfo->dm_version = pDvType2->dm_version; + pDvInfo->supports_global_dimming = pDvType2->supports_global_dimming; + pDvInfo->target_min_luminance = pDvType2->target_min_luminance; + pDvInfo->interface_supported_by_sink = pDvType2->interface_supported_by_sink; + pDvInfo->parity = pDvType2->parity; + pDvInfo->target_max_luminance = pDvType2->target_max_luminance; + pDvInfo->cc_green_x = NVT_DOLBY_CHROMATICITY_MSB_GX | pDvType2->unique_Gx; + pDvInfo->cc_green_y = NVT_DOLBY_CHROMATICITY_MSB_GY | pDvType2->unique_Gy; + pDvInfo->cc_blue_x = NVT_DOLBY_CHROMATICITY_MSB_BX | pDvType2->unique_Bx; + pDvInfo->cc_blue_y = NVT_DOLBY_CHROMATICITY_MSB_BY | pDvType2->unique_By; + pDvInfo->cc_red_x = NVT_DOLBY_CHROMATICITY_MSB_RX | pDvType2->unique_Rx; + pDvInfo->cc_red_y = NVT_DOLBY_CHROMATICITY_MSB_RY | pDvType2->unique_Ry; + pDvInfo->supports_10b_12b_444 = pDvType2->supports_10b_12b_444_bit0 | (pDvType2->supports_10b_12b_444_bit1 << 1); + pDvInfo->colorimetry = 0; + pDvInfo->supports_2160p60hz = 0; + pDvInfo->cc_white_x = 0; + pDvInfo->cc_white_y = 0; + break; + default: + break; + } +} + +// find both hdmi llc and hdmi forum vendor specific data block and return basic hdmi information +CODE_SEGMENT(PAGE_DD_CODE) +void parseCta861VsdbBlocks(NVT_EDID_CEA861_INFO *pExt861, + void *pRawInfo, + NVT_CTA861_ORIGIN flag + ) +{ + NvU32 i; + + NVT_EDID_INFO *pInfo = NULL; + NVT_DISPLAYID_2_0_INFO *pDisplayID20 = NULL; + NVT_HDMI_LLC_INFO *pHdmiLlc = NULL; + NVT_HDMI_FORUM_INFO *pHfvs = NULL; + NVDA_VSDB_PARSED_INFO *pNvVsdb = NULL; + MSFT_VSDB_PARSED_INFO *pMsftVsdb = NULL; + + if (pExt861 == NULL || pRawInfo == NULL) + { + return; + } + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + pInfo = (NVT_EDID_INFO *)pRawInfo; + pHdmiLlc = &pInfo->hdmiLlcInfo; + pHfvs = &pInfo->hdmiForumInfo; + pNvVsdb = &pInfo->nvdaVsdbInfo; + pMsftVsdb = &pInfo->msftVsdbInfo; + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + pDisplayID20 = (NVT_DISPLAYID_2_0_INFO *)pRawInfo; + pHdmiLlc = &pDisplayID20->vendor_specific.hdmiLlc; + pHfvs = &pDisplayID20->vendor_specific.hfvs; + pNvVsdb = &pDisplayID20->vendor_specific.nvVsdb; + pMsftVsdb = &pDisplayID20->vendor_specific.msftVsdb; + } + else + { + return; + } + + if (pHdmiLlc == NULL || pHfvs == NULL || pNvVsdb == NULL || pMsftVsdb == NULL || (pExt861->total_vsdb == 0)) + { + return; + } + + for (i = 0; i < pExt861->total_vsdb; i++) + { + // Assumes each vsdb is unique for this CEA block, e.g., no two HDMI_IEEE_ID + switch (pExt861->vsdb[i].ieee_id) + { + case NVT_CEA861_HDMI_IEEE_ID: + // set any 3D timings and HDMI extended timing specified in the VSDB + parseEdidHdmiLlcBasicInfo((VSDB_DATA *)(&pExt861->vsdb[i]), pHdmiLlc); + pExt861->valid.H14B_VSDB = 1; + break; + + case NVT_CEA861_HDMI_FORUM_IEEE_ID: + parseEdidHdmiForumVSDB((VSDB_DATA *)(&pExt861->vsdb[i]), pHfvs); + pExt861->valid.H20_HF_VSDB = 1; + break; + + case NVT_CEA861_NVDA_IEEE_ID: + parseEdidNvidiaVSDBBlock((VSDB_DATA *)(&pExt861->vsdb[i]), pNvVsdb); + pExt861->valid.nvda_vsdb = 1; + break; + + case NVT_CEA861_MSFT_IEEE_ID: + parseEdidMsftVsdbBlock((VSDB_DATA *)(&pExt861->vsdb[i]), pMsftVsdb); + pExt861->valid.msft_vsdb = 1; + break; + + } + } + + // H20_HF_VSDB shall be listed only if H14B_VSDB is also listed + // H20_HF_VSDB should not specify > 600MHz + nvt_assert(!pExt861->valid.H20_HF_VSDB || (pExt861->valid.H14B_VSDB && (pHfvs->max_TMDS_char_rate <= 0x78))); + + // Done with reading CEA VSDB blocks, sanitize them now + if (pExt861->valid.SCDB) + { + pHdmiLlc->effective_tmds_clock = pExt861->hfscdb[1]; + } + else if (pExt861->valid.H14B_VSDB) + { + // HDMI 2.0 Spec - section 10.3.2 + // The maximum Rate = Max_TMDS_Character_Rate * 5 MHz. + // If the Sink does not support TMDS Character Rates > 340 Mcsc, then the Sink shall set this field to 0. + // If the Sink supports TMDS Character Rates > 340 Mcsc, the Sink shall set Max_TMDS_Character_Rate appropriately and non - zero. + + // Pick updated TMDS clock rate + pHdmiLlc->effective_tmds_clock = (pExt861->valid.H20_HF_VSDB) ? + MAX(pHdmiLlc->max_tmds_clock, pHfvs->max_TMDS_char_rate) : + MIN(pHdmiLlc->max_tmds_clock, 0x44); + } + +} + +// parse vendor specific video data block (VSVDB) information +CODE_SEGMENT(PAGE_DD_CODE) +void parseCta861VsvdbBlocks(NVT_EDID_CEA861_INFO *pExt861, + void *pRawInfo, + NVT_CTA861_ORIGIN flag + ) +{ + NvU32 i; + + NVT_EDID_INFO *pInfo = NULL; + NVT_DISPLAYID_2_0_INFO *pDisplayID20 = NULL; + NVT_DV_STATIC_METADATA *pDvInfo = NULL; + NVT_HDR10PLUS_INFO *pHdr10PlusInfo = NULL; + + if (pExt861 == NULL || pRawInfo == NULL) + { + return; + } + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + pInfo = (NVT_EDID_INFO *)pRawInfo; + pDvInfo = &pInfo->dv_static_metadata_info; + pHdr10PlusInfo = &pInfo->hdr10PlusInfo; + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + pDisplayID20 = (NVT_DISPLAYID_2_0_INFO *)pRawInfo; + pDvInfo = &pDisplayID20->cta.dvInfo; + pHdr10PlusInfo = &pDisplayID20->cta.hdr10PlusInfo; + } + else + { + return; + } + + if (pDvInfo == NULL || pHdr10PlusInfo == NULL || (pExt861->total_vsvdb == 0)) + { + return; + } + + for (i = 0; i < pExt861->total_vsvdb; i++) + { + // Assumes each vsvdb is unique for this CEA block, e.g., no two HDMI_IEEE_ID + switch (pExt861->vsvdb[i].ieee_id) + { + case NVT_CEA861_DV_IEEE_ID: + // parse Dolby Vision related information from the DV vendor specific video data block + parseCta861DvStaticMetadataDataBlock(&pExt861->vsvdb[i], pDvInfo); + pExt861->valid.dv_static_metadata = 1; + break; + + case NVT_CEA861_HDR10PLUS_IEEE_ID: + // parse HDR10+ related information from the HDR10+ LLC Vendor Specific Video Data Block + parseCta861Hdr10PlusDataBlock(&pExt861->vsvdb[i], pHdr10PlusInfo); + pExt861->valid.hdr10Plus = 1; + break; + } + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseCta861HfEeodb(NVT_EDID_CEA861_INFO *pExt861, + NvU32 *pTotalEdidExtensions) +{ + // *pTotalEdidExtensions set by the edid extension flag should be >= 1 for HFEEODB to be valid. + if (pTotalEdidExtensions == NULL || pExt861 == NULL || !pExt861->valid.HF_EEODB || *pTotalEdidExtensions == 0) + { + return; + } + + // HDMI 2.1 AmendmentA1 specifies that if EEODB is present sources shall ignore the Extension flag. + // This effectively overrides the extension count from extension flag. + *pTotalEdidExtensions = pExt861->hfeeodb; +} + + +CODE_SEGMENT(PAGE_DD_CODE) +void parseCta861HfScdb(NVT_EDID_CEA861_INFO *pExt861, + void *pRawInfo, + NVT_CTA861_ORIGIN flag) +{ + NVT_EDID_INFO *pInfo = (NVT_EDID_INFO *)pRawInfo; + VSDB_DATA vsdbData; + + if (pExt861 == NULL || pRawInfo == NULL) + { + return; + } + + if (!pExt861->valid.SCDB || pExt861->valid.H20_HF_VSDB) + { + return; + } + NVMISC_MEMSET(&vsdbData, 0, sizeof(vsdbData)); + NVMISC_MEMCPY(&vsdbData.vendor_data, pExt861->hfscdb, sizeof(vsdbData.vendor_data)); + + vsdbData.vendor_data_size = pExt861->hfscdbSize; + + parseEdidHdmiForumVSDB(&vsdbData, &pInfo->hdmiForumInfo); +} + + +CODE_SEGMENT(PAGE_DD_CODE) +void getEdidHDM1_4bVsdbTiming(NVT_EDID_INFO *pInfo) +{ + NvU32 i = 0, j = 0; + + for (i = 0; i < 2; ++i) + { + NVT_EDID_CEA861_INFO *pExt861 = (0 == i) ? &pInfo->ext861 : &pInfo->ext861_2; + + for (j = 0; j < pExt861->total_vsdb; ++j) + { + switch (pExt861->vsdb[j].ieee_id) + { + case NVT_CEA861_HDMI_IEEE_ID: + { + NvU32 count = 0; + // set any 3D timings and HDMI extended timing specified in the VSDB + parseEdidHDMILLCTiming(pInfo, (VSDB_DATA *)(&pExt861->vsdb[j]), &count, &(pInfo->Hdmi3Dsupport)); + pInfo->HDMI3DSupported = 0 < count; + break; + } + + default: + break; + } + } + } +} + +// get the full EDID 861 extension info +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS get861ExtInfo(NvU8 *p, NvU32 size, NVT_EDID_CEA861_INFO *p861info) +{ + NvU32 dtd_offset; + // sanity check + if (p == NULL || size < sizeof(EDIDV1STRUC)) + { + return NVT_STATUS_ERR; + } + + // make sure we have 861 extension + if (p[0] != 0x2 || p[1] < NVT_CEA861_REV_ORIGINAL) + { + return NVT_STATUS_ERR; + } + + // DTD offset sanity check + if (p[2] >= 1 && p[2] <= 3) + { + return NVT_STATUS_ERR; + } + + // don't do anything further if p861info is NULL + if (p861info == NULL) + { + return NVT_STATUS_SUCCESS; + } + + // init + NVMISC_MEMSET(p861info, 0, sizeof(NVT_EDID_CEA861_INFO)); + + // get the revision number + p861info->revision = p[1]; + + // no extra info for 861-original, returning from here + if (p861info->revision == NVT_CEA861_REV_ORIGINAL) + { + return NVT_STATUS_SUCCESS; + } + + p861info->basic_caps = p[3]; + + // no extra info for 861-A, returning from here + if (p861info->revision == NVT_CEA861_REV_A) + { + return NVT_STATUS_SUCCESS; + } + + dtd_offset = (NvU32)p[2]; + if (dtd_offset == 0 || dtd_offset == 4) + { + return NVT_STATUS_SUCCESS; + } + + // resolve all short descriptors in the reserved block + // reserved block starts from offset 04 to dtd_offset-1 + return parseCta861DataBlockInfo(&p[4], dtd_offset - 4, p861info); +} + +// 1. get the 861 extension tags info +// 2. or validation purpose if p861info == NULL +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS parseCta861DataBlockInfo(NvU8 *p, + NvU32 size, + NVT_EDID_CEA861_INFO *p861info) +{ + NvU32 i, j; + NvU32 video_index = 0; + NvU32 audio_index = 0; + NvU32 speaker_index = 0; + NvU32 vendor_index = 0; + NvU32 vsvdb_index = 0; + NvU32 yuv420vdb_index = 0; + NvU32 yuv420cmdb_index = 0; + NvU8 vfd_index = 0; + NvU8 didT7_index = 0; + NvU8 didT8_index = 0; + NvU8 didT10_index = 0; + NvU8 svr_index = 0; + NvU32 ieee_id = 0; + NvU32 tag, ext_tag, payload; + i= 0; + + while (i < size) + { + // get the descriptor's tag and payload size + tag = NVT_CEA861_GET_SHORT_DESCRIPTOR_TAG(p[i]); + payload = NVT_CEA861_GET_SHORT_DESCRIPTOR_SIZE(p[i]); + + /*don't allow data colleciton totally size larger than [127 - 5 (tag, revision, offset, describing native video format, checksum)]*/ + if ((i + payload > size) || (i + payload > 122)) + { + break; + } + // move the pointer to the payload section or extended Tag Code + i++; + + // NvTiming_EDIDValidationMask will use the different tag/payload value to make sure each of cta861 data block legal + if (p861info == NULL) + { + switch(tag) + { + case NVT_CEA861_TAG_AUDIO: + case NVT_CEA861_TAG_VIDEO: + case NVT_CEA861_TAG_SPEAKER_ALLOC: + case NVT_CEA861_TAG_VESA_DTC: + case NVT_CEA861_TAG_RSVD: + break; + case NVT_CTA861_TAG_VIDEO_FORMAT: + if (payload < 2) return NVT_STATUS_ERR; // no VFD + break; + case NVT_CEA861_TAG_VENDOR: + if (payload < 3) return NVT_STATUS_ERR; + break; + case NVT_CEA861_TAG_EXTENDED_FLAG: + if (payload >= 1) + { + ext_tag = p[i]; + if (ext_tag == NVT_CEA861_EXT_TAG_VIDEO_CAP && (payload != 2)) return NVT_STATUS_ERR; + else if (ext_tag == NVT_CEA861_EXT_TAG_COLORIMETRY && payload != 3) return NVT_STATUS_ERR; + else if (ext_tag == NVT_CEA861_EXT_TAG_VIDEO_FORMAT_PREFERENCE && payload < 2) return NVT_STATUS_ERR; + else if (ext_tag == NVT_CEA861_EXT_TAG_YCBCR420_VIDEO && payload < 2) return NVT_STATUS_ERR; + else if (ext_tag == NVT_CEA861_EXT_TAG_YCBCR420_CAP && payload < 1) return NVT_STATUS_ERR; + else if (ext_tag == NVT_CEA861_EXT_TAG_HDR_STATIC_METADATA && payload < 3) return NVT_STATUS_ERR; + else if (ext_tag == NVT_CEA861_EXT_TAG_VENDOR_SPECIFIC_VIDEO && payload < 4) return NVT_STATUS_ERR; + else if (ext_tag == NVT_CTA861_EXT_TAG_SCDB && payload < 7) return NVT_STATUS_ERR; + else if (ext_tag == NVT_CTA861_EXT_TAG_HF_EEODB && payload != 2) return NVT_STATUS_ERR; + else if (ext_tag == NVT_CTA861_EXT_TAG_DID_TYPE_VII && payload <= 2) return NVT_STATUS_ERR; + else if (ext_tag == NVT_CTA861_EXT_TAG_DID_TYPE_VIII && payload <= 2) return NVT_STATUS_ERR; + else if (ext_tag == NVT_CTA861_EXT_TAG_DID_TYPE_X && payload <= 2) return NVT_STATUS_ERR; + else if (ext_tag == NVT_CTA861_EXT_TAG_NATIVE_VIDEO_RESOLUTION) + { + if (payload != 2 && payload != 3 && payload != 7) return NVT_STATUS_ERR; + } + + if (payload > 2) + { + if (ext_tag == NVT_CTA861_EXT_TAG_DID_TYPE_VII) + { + if ((payload-2) != 20) return NVT_STATUS_ERR; // only support 20-bytes + if ((p[i+1] & 0x7) != 2) return NVT_STATUS_ERR; // Block Revision shall be 2 + if ((p[i+1] & 0x70) >> 4 != 0) return NVT_STATUS_ERR; // Not allow extra byte + } + else if (ext_tag == NVT_CTA861_EXT_TAG_DID_TYPE_VIII) + { + if ((payload-2) < 1) return NVT_STATUS_ERR; // minimum one code supported + if ((p[i+1] & 0x7) != 1) return NVT_STATUS_ERR; // Block Revision shall be 1 + if ((p[i+1] & 0xC0) >> 6 != 0) return NVT_STATUS_ERR; // Not allow others than DMT Timing + } + else if (ext_tag == NVT_CTA861_EXT_TAG_DID_TYPE_X) + { + if ((p[i+1] & 0x7) != 0) return NVT_STATUS_ERR; // Block Revision shall be 0 + if (((p[i+1] & 0x70) >> 4 == 0) && (payload-2) % 6) return NVT_STATUS_ERR; // supported 6-bytes descriptors + if (((p[i+1] & 0x70) >> 4 == 1) && (payload-2) % 7) return NVT_STATUS_ERR; // supported 7-bytes descriptors + } + } + } + break; + default: + break; + } + return NVT_STATUS_SUCCESS; + } + + // loop through all descriptors + if (tag == NVT_CEA861_TAG_VIDEO) + { + // short video descriptor + for (j = 0; j < payload; j ++, i ++, video_index ++) + { + if (video_index < NVT_CEA861_VIDEO_MAX_DESCRIPTOR) + { + p861info->video[video_index] = p[i]; + } + else + { + break; + } + } + p861info->total_svd = (NvU8)video_index; + } + else if (tag == NVT_CEA861_TAG_AUDIO) + { + // short audio descriptor + for (j = 0; j < payload / 3; j ++, i += 3, audio_index ++) + { + if (audio_index < NVT_CEA861_AUDIO_MAX_DESCRIPTOR) + { + p861info->audio[audio_index].byte1 = p[i]; + p861info->audio[audio_index].byte2 = p[i+1]; + p861info->audio[audio_index].byte3 = p[i+2]; + } + else + { + break; + } + } + p861info->total_sad = (NvU8)audio_index; + } + else if (tag == NVT_CEA861_TAG_SPEAKER_ALLOC) + { + // speaker allocation descriptor + for (j = 0; j < payload / 3; j ++, i += 3, speaker_index ++) + { + if (speaker_index < NVT_CEA861_SPEAKER_MAX_DESCRIPTOR) + { + p861info->speaker[speaker_index].byte1 = p[i]; + p861info->speaker[speaker_index].byte2 = p[i+1]; + p861info->speaker[speaker_index].byte3 = p[i+2]; + } + else + { + break; + } + } + p861info->total_ssd = (NvU8)speaker_index; + } + else if (tag == NVT_CEA861_TAG_VENDOR) + { + if (vendor_index < NVT_CEA861_VSDB_MAX_BLOCKS) + { + if (payload < 3) + { + // This malformed payload will cause a hang below. + return NVT_STATUS_ERR; + } + + p861info->vsdb[vendor_index].ieee_id = p[i]; //IEEE ID low byte + p861info->vsdb[vendor_index].ieee_id |= (p[i+1]) << 8; //IEEE ID middle byte + p861info->vsdb[vendor_index].ieee_id |= (p[i+2]) << 16; //IEEE ID high byte + + p861info->vsdb[vendor_index].vendor_data_size = payload - 3; + + // move the pointer to the vsdb payload + i += 3; + + // get the other vendor specific data + for (j = 0; j < payload - 3; j ++, i ++) + { + if (j < NVT_CEA861_VSDB_PAYLOAD_MAX_LENGTH) + { + p861info->vsdb[vendor_index].vendor_data[j] = p[i]; + } + } + vendor_index++; + } + } + else if (tag == NVT_CTA861_TAG_VIDEO_FORMAT) + { + if (payload > 0) + { + p861info->vfdb[vfd_index].info.vfd_len = p[i] & 0x03; + p861info->vfdb[vfd_index].info.ntsc = (p[i] & 0x40) >> 6; + p861info->vfdb[vfd_index].info.y420 = (p[i] & 0x80) >> 7; + p861info->vfdb[vfd_index].total_vfd = (NvU8)(payload - 1) / (p861info->vfdb[vfd_index].info.vfd_len + 1); + + i++; payload--; + + for (j = 0; (j < payload) && (p861info->vfdb[vfd_index].total_vfd != 0); j++, i++) + { + p861info->vfdb[vfd_index].video_format_desc[j] = p[i]; + } + + p861info->total_vfdb = ++vfd_index; + } + } + else if (tag == NVT_CEA861_TAG_EXTENDED_FLAG) + { + if (payload >= 1) + { + ext_tag = p[i]; + if (ext_tag == NVT_CEA861_EXT_TAG_VIDEO_CAP && payload >= 2) + { + p861info->video_capability = p[i + 1] & NVT_CEA861_VIDEO_CAPABILITY_MASK; + p861info->valid.VCDB = 1; + i += payload; + } + else if (ext_tag == NVT_CEA861_EXT_TAG_COLORIMETRY && payload >= 3) + { + p861info->colorimetry.byte1 = p[i + 1] & NVT_CEA861_COLORIMETRY_MASK; + p861info->colorimetry.byte2 = p[i + 2] & NVT_CEA861_GAMUT_METADATA_MASK; + p861info->valid.colorimetry = 1; + i += payload; + } + else if (ext_tag == NVT_CEA861_EXT_TAG_VIDEO_FORMAT_PREFERENCE && payload >= 2) + { + // when present, indicates the order of preference for selected Video Formats listed as DTDs and/or SVDs throughout Block 0 and the CTA Extensions of the + // order of SVD preferred modes shall take precedence over preferred modes defined elsewhere in the EDID/CEA861 blocks + + // exclude the extended tag + i++; payload--; + + for (j = 0; (j < payload) && (svr_index < NVT_CEA861_VFPDB_MAX_DESCRIPTOR); j++, i++, svr_index++) + { + p861info->svr_vfpdb[svr_index] = p[i]; + } + p861info->total_svr = svr_index; + } + else if (ext_tag == NVT_CEA861_EXT_TAG_YCBCR420_VIDEO && payload >= 2) + { + // when present, list SVDs that are only supported in YCbCr 4:2:0 + + // exclude the extended tag + i++; payload--; + + for (j = 0; (j < payload) && (yuv420vdb_index < NVT_CEA861_Y420VDB_MAX_DESCRIPTOR); j++, i++, yuv420vdb_index++) + { + p861info->svd_y420vdb[yuv420vdb_index] = p[i]; + } + p861info->total_y420vdb = (NvU8)yuv420vdb_index; + } + else if (ext_tag == NVT_CEA861_EXT_TAG_YCBCR420_CAP && payload >= 1) + { + // when present, provides bitmap to video SVDs that also support YCbCr 4:2:0 in addition to RGB, YCbCr 4:4:4, and/or YCbCr 4: 2:0 + + // exclude the extended tag + i++; payload--; + + for (j = 0; (j < payload) && (yuv420cmdb_index < NVT_CEA861_Y420CMDB_MAX_DESCRIPTOR); j++, i++, yuv420cmdb_index++) + { + p861info->map_y420cmdb[yuv420cmdb_index] = p[i]; + } + p861info->total_y420cmdb = (NvU8)yuv420cmdb_index; + + p861info->valid.y420cmdb = 1; // total_y420cmdb is not enough as this could be 0. See CEA861-F 7.5.11 + } + else if(ext_tag == NVT_CEA861_EXT_TAG_HDR_STATIC_METADATA && payload >= 3) + { + p861info->hdr_static_metadata.byte1 = p[i + 1] & NVT_CEA861_EOTF_MASK; // This byte has bits which identify which EOTF supported by the sink. + p861info->hdr_static_metadata.byte2 = p[i + 2] & NVT_CEA861_STATIC_METADATA_DESCRIPTOR_MASK; // This byte has bits which identify which Static Metadata descriptors are supported by the sink. + + i += 3; + + if (payload > 3) + { + p861info->hdr_static_metadata.byte3 = p[i]; + i++; + } + + if (payload > 4) + { + p861info->hdr_static_metadata.byte4 = p[i]; + i++; + } + + if (payload > 5) + { + p861info->hdr_static_metadata.byte5 = p[i]; + i++; + } + + p861info->valid.hdr_static_metadata = 1; + } + else if(ext_tag == NVT_CEA861_EXT_TAG_VENDOR_SPECIFIC_VIDEO) + { + if (vsvdb_index < NVT_CEA861_VSVDB_MAX_BLOCKS) + { + ieee_id = p[i + 1]; //IEEE ID low byte + ieee_id |= (p[i + 2]) << 8; //IEEE ID middle byte + ieee_id |= (p[i + 3]) << 16; //IEEE ID high byte + + if ((ieee_id == NVT_CEA861_DV_IEEE_ID) || (ieee_id == NVT_CEA861_HDR10PLUS_IEEE_ID)) + { + // exclude the extended tag + i++; payload--; + + p861info->vsvdb[vsvdb_index].ieee_id = ieee_id; + p861info->vsvdb[vsvdb_index].vendor_data_size = payload - 3; + + // move the pointer to the payload + i += 3; + + // get the other vendor specific video data + for (j = 0; j < payload - 3; j++, i++) + { + if (j < NVT_CEA861_VSVDB_PAYLOAD_MAX_LENGTH) + { + p861info->vsvdb[vsvdb_index].vendor_data[j] = p[i]; + } + } + + vsvdb_index++; + + p861info->total_vsvdb = (NvU8)vsvdb_index; + + } + else + { + // skip the unsupported extended block + i += payload; + } + } + else + { + // skip the extended block that we don't have a room for, + // the NVT_CEA861_VSVDB_MAX_BLOCKS should be incremented for new VSVDB types + nvt_assert(vsvdb_index >= NVT_CEA861_VSVDB_MAX_BLOCKS); + i += payload; + } + } + else if (ext_tag == NVT_CTA861_EXT_TAG_NATIVE_VIDEO_RESOLUTION) + { + if (payload != 2 && payload != 3 && payload != 7) break; + + i++; payload--; + p861info->native_video_resolution_db.native_svr = p[i]; + p861info->valid.NVRDB = 1; + + i++; payload--; + if (payload != 0) + { + p861info->native_video_resolution_db.option.img_size = p[i] & 0x01; + p861info->native_video_resolution_db.option.sz_prec = (p[i] & 0x80) >> 7; + + i++; payload--; + if (p861info->native_video_resolution_db.option.img_size == 1) + { + for (j = 0; j< payload; j++, i++) + { + p861info->native_video_resolution_db.image_size[j] = p[i]; + } + } + } + } + else if (ext_tag == NVT_CTA861_EXT_TAG_DID_TYPE_VII) + { + if( payload != 22) break; + + i++; payload--; + p861info->did_type7_data_block[didT7_index].version.revision = p[i] & 0x07; + p861info->did_type7_data_block[didT7_index].version.dsc_pt = (p[i] & 0x08) >> 3; + p861info->did_type7_data_block[didT7_index].version.t7_m = (p[i] & 0x70) >> 4; + + //do not consider Byte 3 + i++; payload--; + + p861info->did_type7_data_block[didT7_index].total_descriptors = + (NvU8)(payload / (NVT_CTA861_DID_TYPE7_DESCRIPTORS_LENGTH + p861info->did_type7_data_block[didT7_index].version.t7_m)); + + for (j = 0; j < payload; j++, i++) + { + p861info->did_type7_data_block[didT7_index].payload[j] = p[i]; + } + // next type7 data block if it exists + p861info->total_did_type7db = ++didT7_index; + } + else if (ext_tag == NVT_CTA861_EXT_TAG_DID_TYPE_VIII) + { + if (payload <= 2) break; + + i++; payload--; + p861info->did_type8_data_block[didT8_index].version.revision = p[i] & 0x07; + p861info->did_type8_data_block[didT8_index].version.tcs = (p[i] & 0x08) >> 3; + p861info->did_type8_data_block[didT8_index].version.t8y420 = (p[i] & 0x20) >> 5; + p861info->did_type8_data_block[didT8_index].version.code_type = (p[i] & 0xC0) >> 6; + + //do not consider Byte 3 + i++; payload--; + + if (p861info->did_type8_data_block[didT8_index].version.tcs == 0) + p861info->did_type8_data_block[didT8_index].total_descriptors = (NvU8)payload; + else if (p861info->did_type8_data_block[didT8_index].version.tcs == 1) + p861info->did_type8_data_block[didT8_index].total_descriptors = (NvU8)(payload / 2); + + for (j = 0; j < payload; j++, i++) + { + p861info->did_type8_data_block[didT8_index].payload[j] = p[i]; + } + // next type7 data block if it exists + p861info->total_did_type8db = ++didT8_index; + } + else if (ext_tag == NVT_CTA861_EXT_TAG_DID_TYPE_X) + { + if (payload < 8 || ((payload-2) % 6 != 0 && (payload-2) % 7 != 0)) break; + + i++; payload--; + p861info->did_type10_data_block[didT10_index].version.revision = p[i] & 0x07; + p861info->did_type10_data_block[didT10_index].version.t10_m = (p[i] & 0x70) >> 4; + + // do not consider Byte 3 + i++; payload--; + + if (p861info->did_type10_data_block[didT10_index].version.t10_m == 0) + p861info->did_type10_data_block[didT10_index].total_descriptors = (NvU8)(payload / 6); + else if (p861info->did_type10_data_block[didT10_index].version.t10_m == 1) + p861info->did_type10_data_block[didT10_index].total_descriptors = (NvU8)(payload / 7); + + for (j = 0; j < payload; j++, i++) + { + p861info->did_type10_data_block[didT10_index].payload[j] = p[i]; + } + // next type10 data block if it exists + p861info->total_did_type10db = ++didT10_index; + } + else if(ext_tag == NVT_CTA861_EXT_TAG_SCDB && payload >= 7) // sizeof(HDMI Forum Sink Capability Data Block) ranges between 7 to 31 bytes + { + // As per HDMI2.1 A1 amendment Sink Capability Data Structure(SCDS) can alternatively be included in HDMI Forum Sink Capability Data Block(HF-SCDB), + // instead of HF-VSDB, to indicate HDMI2.1 capability. + // Sinks will expose HF-SCDB if they do not expose HF-VSDB. + + // move pointer to SCDS + i += 3; + + // Copy SCDS over to p861info->vsdb[vendor_index]. Parsing will later be handled in parseEdidHdmiForumVSDB(). + for (j = 0; (j < payload - 3) && (j < NVT_CTA861_EXT_SCDB_PAYLOAD_MAX_LENGTH); j ++, i ++) + { + p861info->hfscdb[j] = p[i]; + } + p861info->hfscdbSize = MIN(payload - 3, NVT_CTA861_EXT_SCDB_PAYLOAD_MAX_LENGTH); + p861info->valid.SCDB = 1; + } + else if (ext_tag == NVT_CTA861_EXT_TAG_HF_EEODB && payload == 2) + { + // Skip over extended tag + i++; payload--; + + p861info->hfeeodb = p[i]; + p861info->valid.HF_EEODB = 1; + i += payload; + } + else + { + // skip the unrecognized extended block + i += payload; + } + } + } + else + { + // reserved block, just skip here + i += payload; + } + } + + if (p861info) + { + p861info->total_vsdb = (NvU8)vendor_index; + } + + return NVT_STATUS_SUCCESS; +} + +// enum the EIA/CEA 861B predefined timing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_EnumCEA861bTiming(NvU32 ceaFormat, NVT_TIMING *pT) +{ + if (pT == NULL || ceaFormat == 0 || ceaFormat > MAX_CEA861B_FORMAT) + { + return NVT_STATUS_ERR; + } + + ceaFormat = NVT_GET_CTA_8BIT_VIC(ceaFormat); + + if (ceaFormat ==0) + return NVT_STATUS_ERR; + + *pT = EIA861B[ceaFormat - 1]; + + // calculate the pixel clock + pT->pclk = RRx1kToPclk (pT); + pT->pclk1khz = (pT->pclk << 3) + (pT->pclk << 1); // *10 + NVT_SET_CEA_FORMAT(pT->etc.status, ceaFormat); + + NVT_SNPRINTF((char *)pT->etc.name, sizeof(pT->etc.name), "CTA-861G:#%3d:%dx%dx%3d.%03dHz/%s", (int)ceaFormat, (int)pT->HVisible, (int)((pT->interlaced ? 2 : 1)*pT->VVisible), (int)pT->etc.rrx1k/1000, (int)pT->etc.rrx1k%1000, (pT->interlaced ? "I":"P")); + pT->etc.name[sizeof(pT->etc.name) - 1] = '\0'; + + return NVT_STATUS_SUCCESS; +} + + +// Check whether the given timing is a CEA 861 timing. +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 NvTiming_GetCEA861TimingIndex (NVT_TIMING *pT) +{ + NvU32 i = 0, j = 0; + NvU32 ceaIndex = 0; + NvU32 aspect_x; + NvU32 aspect_y; + + if (pT == NULL) + { + return ceaIndex; + } + + if (NVT_GET_CEA_FORMAT(pT->etc.status) != 0) + { + // CEA format has been set, done + return NVT_GET_CEA_FORMAT(pT->etc.status); + } + + aspect_x = nvt_aspect_x(pT->etc.aspect); + aspect_y = nvt_aspect_y(pT->etc.aspect); + + // loop through the pre-defined CEA 861 table + // Skip VIC1 - Although IT 640x480 video timing has a CE id, it is not a CE timing. See 3.1 + // "General Video Format Requirements" section in CEA-861-E spec + for (i = 1; i < MAX_CEA861B_FORMAT; i++) + { + if (NvTiming_IsTimingRelaxedEqual(pT, &EIA861B[i])) + { + // The timing matches with a CEA 861 timing. Set CEA format to NVT_TIMING.etc.status. + ceaIndex = NVT_GET_TIMING_STATUS_SEQ(EIA861B[i].etc.status); + + if (!aspect_x || !aspect_y) + { + return ceaIndex; + } + + // for the dual-aspect ratio timings we should further check the aspect ratio matching(16:9 or 4:3) based on the integer rounding error + for (j = 0; j < MAX_EIA861B_DUAL_ASPECT_VICS; j++) + { + if (ceaIndex == EIA861B_DUAL_ASPECT_VICS[j][0]) + { + NvU32 ceaIndex1 = EIA861B_DUAL_ASPECT_VICS[j][1]; + + NvU32 format1 = axb_div_c(aspect_x, nvt_aspect_y(EIA861B[ceaIndex - 1].etc.aspect), aspect_y); + NvU32 format2 = axb_div_c(aspect_x, nvt_aspect_y(EIA861B[ceaIndex1 - 1].etc.aspect), aspect_y); + + NvU32 format_1_diff = abs_delta(format1, nvt_aspect_x(EIA861B[ceaIndex - 1].etc.aspect)); + NvU32 format_2_diff = abs_delta(format2, nvt_aspect_x(EIA861B[ceaIndex1 - 1].etc.aspect)); + + if (format_2_diff < format_1_diff) + { + ceaIndex = ceaIndex1; + } + break; + } + else if (ceaIndex < EIA861B_DUAL_ASPECT_VICS[j][0]) // not a dual-dspect ratio timing + { + break; + } + } + break; + } + } + return ceaIndex; +} + +// calculate 861B based timing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcCEA861bTiming(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NvU32 pixelRepeatCount, NVT_TIMING *pT) + +{ + NvU32 i = 0; + NvU16 pixelRepeatMask = 1 << (pixelRepeatCount - 1); + + nvt_assert(pixelRepeatCount > 0 && pixelRepeatCount <= 10); + + if (pT == NULL) + { + return NVT_STATUS_ERR; + } + + // loop through the table + for (i = 0; i < MAX_CEA861B_FORMAT; i ++) + { + if ((EIA861B[i].etc.rep & pixelRepeatMask) == 0) + { + continue; + } + + if (width == (NvU32)NvTiming_MaxFrameWidth(EIA861B[i].HVisible, pixelRepeatMask) && + height == frame_height(EIA861B[i])&& + rr == EIA861B[i].etc.rr && + (!!(flag & NVT_PVT_INTERLACED_MASK)) == (!!EIA861B[i].interlaced)) + { + *pT = EIA861B[i]; + + // calculate the pixel clock + pT->pclk = RRx1kToPclk (pT); + pT->pclk1khz = RRx1kToPclk1khz (pT); + + NVT_SET_CEA_FORMAT(pT->etc.status, NVT_GET_TIMING_STATUS_SEQ(pT->etc.status)); + + NVT_SNPRINTF((char *)pT->etc.name, sizeof(pT->etc.name), "CTA-861G:#%3d:%dx%dx%3d.%03dHz/%s", (int)NVT_GET_TIMING_STATUS_SEQ(pT->etc.status), (int)pT->HVisible, (int)((pT->interlaced ? 2 : 1)*pT->VVisible), (int)pT->etc.rrx1k/1000, (int)pT->etc.rrx1k%1000, (pT->interlaced ? "I":"P")); + pT->etc.name[sizeof(pT->etc.name) - 1] = '\0'; + + return NVT_STATUS_SUCCESS; + } + } + + return NVT_STATUS_ERR; + +} + +// Assign fields in NVT_VIDEO_INFOFRAME_CTRL, using NVT_TIMING +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_ConstructVideoInfoframeCtrl(const NVT_TIMING *pTiming, NVT_VIDEO_INFOFRAME_CTRL *pCtrl) +{ + // setup VIC code it is not specified + if (pCtrl->video_format_id == NVT_INFOFRAME_CTRL_DONTCARE || + pCtrl->video_format_id == 0 || + pCtrl->video_format_id > NVT_CEA861_1920X1080P_29970HZ_16X9) + { + // setup video format ID + pCtrl->video_format_id = (NvU8)NVT_GET_CEA_FORMAT(pTiming->etc.status); + if (pCtrl->video_format_id < NVT_CEA861_640X480P_59940HZ_4X3 || + pCtrl->video_format_id > NVT_CTA861_4096x2160p_119880HZ_256X135) + { + // Prior RFE 543088 + if (pCtrl->video_format_id == 0 && + !(NVT_FRR_TIMING_IS_OVT(pTiming->etc.flag) || NVT_TIMING_IS_OVT(pTiming->etc.flag)) && + NVT_GET_TIMING_STATUS_TYPE(pTiming->etc.status) == NVT_TYPE_EDID_861ST) + { + pCtrl->video_format_id = (NvU8)NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status); + } + if (pCtrl->video_format_id == 0 && + pTiming->HVisible == 640 && + pTiming->VVisible == 480 && + pTiming->interlaced == 0 && + pTiming->etc.rr == 60) + { + pCtrl->video_format_id = NVT_CEA861_640X480P_59940HZ_4X3; + } + } + } + + // setup RID code + if (pCtrl->rid == NVT_INFOFRAME_CTRL_DONTCARE) + { + if (NVT_TYPE_EDID_861ST == NVT_GET_TIMING_STATUS_TYPE(pTiming->etc.status) && + NVT_TIMING_IS_OVT(pTiming->etc.flag)) + { + NvU8 ridIdx = 0; + + // get the correct rid from the name string = CTA861-OVT'%d':xxx + // %d value shall included two digital or one digital character + if (pTiming->etc.name[11] == ':') + { + ridIdx = pTiming->etc.name[10] - '0'; + } + else + { + ridIdx = 10 * (pTiming->etc.name[10] - '0') + (pTiming->etc.name[11] - '0'); + } + + if (ridIdx >= NVT_CTA861_RID_1280x720p_16x9 && + ridIdx < NVT_CTA861_RID_EXCEED_RESOLUTION) + { + pCtrl->rid = ridIdx; + } + else + { + pCtrl->rid = NVT_INFOFRAME_CTRL_DONTCARE; + } + } + } + + // setup Video Format Frame Rate + if (pCtrl->rid != NVT_INFOFRAME_CTRL_DONTCARE) + { + switch (pTiming->etc.rr) + { + case 24: + pCtrl->frame_rate = NVT_CTA861_FR_2400; + if (NVT_FRR_TIMING_IS_OVT(pTiming->etc.flag)) + { + pCtrl->frame_rate = NVT_CTA861_FR_2398; + } + break; + case 25: + pCtrl->frame_rate = NVT_CTA861_FR_2500; + break; + case 30: + pCtrl->frame_rate = NVT_CTA861_FR_3000; + if (NVT_FRR_TIMING_IS_OVT(pTiming->etc.flag)) + { + pCtrl->frame_rate = NVT_CTA861_FR_2997; + } + break; + case 48: + pCtrl->frame_rate = NVT_CTA861_FR_48000; + if (NVT_FRR_TIMING_IS_OVT(pTiming->etc.flag)) + { + pCtrl->frame_rate = NVT_CTA861_FR_4795; + } + break; + case 50: + pCtrl->frame_rate = NVT_CTA861_FR_5000; + break; + case 60: + pCtrl->frame_rate = NVT_CTA861_FR_6000; + if (NVT_FRR_TIMING_IS_OVT(pTiming->etc.flag)) + { + pCtrl->frame_rate = NVT_CTA861_FR_5994; + } + break; + case 100: + pCtrl->frame_rate = NVT_CTA861_FR_10000; + break; + case 120: + pCtrl->frame_rate = NVT_CTA861_FR_12000; + if (NVT_FRR_TIMING_IS_OVT(pTiming->etc.flag)) + { + pCtrl->frame_rate = NVT_CTA861_FR_11988; + } + break; + case 144: + pCtrl->frame_rate = NVT_CTA861_FR_14400; + if (NVT_FRR_TIMING_IS_OVT(pTiming->etc.flag)) + { + pCtrl->frame_rate = NVT_CTA861_FR_14386; + } + break; + case 200: + pCtrl->frame_rate = NVT_CTA861_FR_20000; + break; + case 240: + pCtrl->frame_rate = NVT_CTA861_FR_24000; + if (NVT_FRR_TIMING_IS_OVT(pTiming->etc.flag)) + { + pCtrl->frame_rate = NVT_CTA861_FR_23976; + } + break; + case 300: + pCtrl->frame_rate = NVT_CTA861_FR_30000; + break; + case 360: + pCtrl->frame_rate = NVT_CTA861_FR_36000; + if (NVT_FRR_TIMING_IS_OVT(pTiming->etc.flag)) + { + pCtrl->frame_rate = NVT_CTA861_FR_35964; + } + break; + case 400: + pCtrl->frame_rate = NVT_CTA861_FR_40000; + break; + case 480: + pCtrl->frame_rate = NVT_CTA861_FR_48000; + if (NVT_FRR_TIMING_IS_OVT(pTiming->etc.flag)) + { + pCtrl->frame_rate = NVT_CTA861_FR_47952; + } + break; + default: + pCtrl->frame_rate = NVT_INFOFRAME_CTRL_DONTCARE; + break; + } + } + + // for HDMI_EXT timing, AVI VIC should be 0. + if (NVT_GET_TIMING_STATUS_TYPE(pTiming->etc.status) == NVT_TYPE_HDMI_EXT) + { + pCtrl->video_format_id = 0; + } + + // setup aspect ratio it is not specified + if (pCtrl->pic_aspect_ratio == NVT_INFOFRAME_CTRL_DONTCARE || + pCtrl->pic_aspect_ratio == NVT_VIDEO_INFOFRAME_BYTE2_M1M0_NO_DATA || + pCtrl->pic_aspect_ratio > NVT_VIDEO_INFOFRAME_BYTE2_M1M0_FUTURE) + { + // extract the screen measurements from the DTD aspect ratio. + // (we pack the height & width in a DWORD to form the aspect ratio) + + NvU32 x,y; + x = (pTiming->etc.aspect & 0x0fff); + y = ((pTiming->etc.aspect >> 16) & 0x0fff); + + if (axb_div_c(y,3,x) == 4) + { + pCtrl->pic_aspect_ratio = NVT_VIDEO_INFOFRAME_BYTE2_M1M0_4X3; + } + else if (axb_div_c(y,9,x) == 16) + { + pCtrl->pic_aspect_ratio = NVT_VIDEO_INFOFRAME_BYTE2_M1M0_16X9; + } + else if (pCtrl->video_format_id == NVT_CEA861_640X480P_59940HZ_4X3) + { + pCtrl->pic_aspect_ratio = NVT_VIDEO_INFOFRAME_BYTE2_M1M0_4X3; + } + else + { + // default to no data if there is no match, to cover other non-cta modes + pCtrl->pic_aspect_ratio = NVT_VIDEO_INFOFRAME_BYTE2_M1M0_NO_DATA; + } + } + + if (pCtrl->it_content == NVT_INFOFRAME_CTRL_DONTCARE) + { + // Initialize ITC flag to NVT_VIDEO_INFOFRAME_BYTE3_ITC_IT_CONTENT + pCtrl->it_content = NVT_VIDEO_INFOFRAME_BYTE3_ITC_IT_CONTENT; + pCtrl->it_content_type = NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_GRAPHICS; + } + + if (pCtrl->pixel_repeat == NVT_INFOFRAME_CTRL_DONTCARE) + { + // Initialize pixel repetitions + NvU32 pixelRepeat = pTiming->etc.rep; + LOWESTBITIDX_32(pixelRepeat); + pCtrl->pixel_repeat = (NvU8)pixelRepeat; + } + + return NVT_STATUS_SUCCESS; +} + + +// construct AVI video infoframe based on the user control and the current context state +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_ConstructVideoInfoframe(NVT_EDID_INFO *pEdidInfo, NVT_VIDEO_INFOFRAME_CTRL *pCtrl, NVT_VIDEO_INFOFRAME *pContext, NVT_VIDEO_INFOFRAME *pInfoFrame) +{ + // parameter check + if (pEdidInfo == NULL || pInfoFrame == NULL) + { + return NVT_STATUS_ERR; + } + + // infoframe is only supported on 861A and later + if (pEdidInfo->ext861.revision < NVT_CEA861_REV_A) + { + return NVT_STATUS_ERR; + } + + // if context state is provided, use it to initialize the infoframe buffer + if (pContext != NULL) + { + *pInfoFrame = *pContext; + } + else + { + *pInfoFrame = DEFAULT_VIDEO_INFOFRAME; + } + + // init the header + pInfoFrame->type = NVT_INFOFRAME_TYPE_VIDEO; + + // see 6.4 Format of Version 2, 3, and 4 AVI InfoFrames in CTA861-I + if (pCtrl) + { + if (nvt_get_bits(pInfoFrame->byte1, NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_MASK, NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_SHIFT) <= NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_YCbCr420) // this shall be as 0 always. + { + if ((pCtrl->rid != NVT_CTA861_RID_NONE) || (pCtrl->frame_rate != NVT_CTA861_FR_NO_DATA)) + { + pInfoFrame->version = NVT_VIDEO_INFOFRAME_VERSION_4; // just put the logic to get the correct version 4, but it shall not be used at currently stage. + pInfoFrame->length = sizeof(NVT_VIDEO_INFOFRAME) - sizeof(NVT_INFOFRAME_HEADER); // Length == 15 + } + else + if ((nvt_get_bits(pInfoFrame->byte2, NVT_VIDEO_INFOFRAME_BYTE2_C1C0_MASK, NVT_VIDEO_INFOFRAME_BYTE2_C1C0_SHIFT) == NVT_VIDEO_INFOFRAME_BYTE2_C1C0_EXT_COLORIMETRY) && + //EC2-0 is based on the 7.5.5 at CTA861-G which DCI-P3 bit defined or notat byte4 + (nvt_get_bits(pInfoFrame->byte3, NVT_VIDEO_INFOFRAME_BYTE3_EC_MASK, NVT_VIDEO_INFOFRAME_BYTE3_EC_SHIFT) == NVT_VIDEO_INFOFRAME_BYTE3_EC_AdditionalColorExt)) + { + pInfoFrame->version = NVT_VIDEO_INFOFRAME_VERSION_4; // just put the logic to get the correct version 4, but it shall not be used at currently stage. + pInfoFrame->length = 14; + } + else + { + pInfoFrame->version = (((pCtrl->video_format_id & NVT_VIDEO_INFOFRAME_BYTE4_VIC7) != 0) ? NVT_VIDEO_INFOFRAME_VERSION_3 : + ((pEdidInfo->ext861.revision >= NVT_CEA861_REV_B) ? NVT_VIDEO_INFOFRAME_VERSION_2 : NVT_VIDEO_INFOFRAME_VERSION_1)); + pInfoFrame->length = 13; + } + } + else // Y=7, the IDO defineds the C, EC, ACE fileds. In the case the Source shall set the AVI InforFrame Version filed to no less than 3 + { + pInfoFrame->version = NVT_VIDEO_INFOFRAME_VERSION_4; + pInfoFrame->length = 14; + } + } + else + { + pInfoFrame->version = (pEdidInfo->ext861.revision >= NVT_CEA861_REV_B) ? NVT_VIDEO_INFOFRAME_VERSION_2 : NVT_VIDEO_INFOFRAME_VERSION_1; + pInfoFrame->length = 13; + } + + if (pInfoFrame->version < NVT_VIDEO_INFOFRAME_VERSION_3) + { + nvt_nvu8_set_bits(pInfoFrame->byte1, 0, NVT_VIDEO_INFOFRAME_BYTE1_RESERVED_MASK, NVT_VIDEO_INFOFRAME_BYTE1_RESERVED_SHIFT); + } + + if (pInfoFrame->version == NVT_VIDEO_INFOFRAME_VERSION_2) + { + nvt_nvu8_set_bits(pInfoFrame->byte4, 0, NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V2_MASK, NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V2_SHIFT); + } + else if (pInfoFrame->version == NVT_VIDEO_INFOFRAME_VERSION_1) + { + nvt_nvu8_set_bits(pInfoFrame->byte3, 0, NVT_VIDEO_INFOFRAME_BYTE3_RESERVED_V1_MASK, NVT_VIDEO_INFOFRAME_BYTE3_RESERVED_V1_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->byte4, 0, NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V1_MASK, NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V1_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->byte5, 0, NVT_VIDEO_INFOFRAME_BYTE5_RESERVED_V1_MASK, NVT_VIDEO_INFOFRAME_BYTE5_RESERVED_V1_SHIFT); + } + + // construct the desired infoframe contents based on the control + if (pCtrl) + { + // byte 1 + if (pCtrl->color_space != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte1, pCtrl->color_space, NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_MASK, NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_SHIFT); + } + + if (pCtrl->active_format_info_present != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte1, pCtrl->active_format_info_present, NVT_VIDEO_INFOFRAME_BYTE1_A0_MASK, NVT_VIDEO_INFOFRAME_BYTE1_A0_SHIFT); + } + + if (pCtrl->bar_info != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte1, pCtrl->bar_info, NVT_VIDEO_INFOFRAME_BYTE1_B1B0_MASK, NVT_VIDEO_INFOFRAME_BYTE1_B1B0_SHIFT); + } + + if (pCtrl->scan_info != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte1, pCtrl->scan_info, NVT_VIDEO_INFOFRAME_BYTE1_S1S0_MASK, NVT_VIDEO_INFOFRAME_BYTE1_S1S0_SHIFT); + } + + // byte 2 + if (pCtrl->colorimetry != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte2, pCtrl->colorimetry, NVT_VIDEO_INFOFRAME_BYTE2_C1C0_MASK, NVT_VIDEO_INFOFRAME_BYTE2_C1C0_SHIFT); + } + + if (pCtrl->pic_aspect_ratio != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte2, pCtrl->pic_aspect_ratio, NVT_VIDEO_INFOFRAME_BYTE2_M1M0_MASK, NVT_VIDEO_INFOFRAME_BYTE2_M1M0_SHIFT); + } + + if (pCtrl->active_format_aspect_ratio != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte2, pCtrl->active_format_aspect_ratio, NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_MASK, NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_SHIFT); + } + + // byte 3 + if (pCtrl->it_content != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte3, pCtrl->it_content, NVT_VIDEO_INFOFRAME_BYTE3_ITC_MASK, NVT_VIDEO_INFOFRAME_BYTE3_ITC_SHIFT); + } + + if (pCtrl->extended_colorimetry != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte3, pCtrl->extended_colorimetry, NVT_VIDEO_INFOFRAME_BYTE3_EC_MASK, NVT_VIDEO_INFOFRAME_BYTE3_EC_SHIFT); + } + + if (pCtrl->rgb_quantization_range != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte3, pCtrl->rgb_quantization_range, NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_MASK, NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_SHIFT); + } + + if (pCtrl->nonuniform_scaling != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte3, pCtrl->nonuniform_scaling, NVT_VIDEO_INFOFRAME_BYTE3_SC_MASK, NVT_VIDEO_INFOFRAME_BYTE3_SC_SHIFT); + } + + // byte 4 and byte 5 only supported on InfoFrame 2.0 + if (pInfoFrame->version >= NVT_VIDEO_INFOFRAME_VERSION_2) + { + // byte 4 + if (pCtrl->video_format_id != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte4, pCtrl->video_format_id, NVT_VIDEO_INFOFRAME_BYTE4_VIC_MASK, NVT_VIDEO_INFOFRAME_BYTE4_VIC_SHIFT); + } + + // byte 5 + if (pCtrl->pixel_repeat != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte5, pCtrl->pixel_repeat, NVT_VIDEO_INFOFRAME_BYTE5_PR_MASK, NVT_VIDEO_INFOFRAME_BYTE5_PR_SHIFT); + } + + // byte5 + if (pCtrl->it_content_type != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte5, pCtrl->it_content_type, NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_MASK, NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_SHIFT); + } + } + + // byte 6~13, bar info + if (pCtrl->top_bar != 0xFFFF) + { + pInfoFrame->top_bar_low = (NvU8)(pCtrl->top_bar % 0x100); + pInfoFrame->top_bar_high = (NvU8)(pCtrl->top_bar / 0x100); + } + if (pCtrl->bottom_bar != 0xFFFF) + { + pInfoFrame->bottom_bar_low = (NvU8)(pCtrl->bottom_bar % 0x100); + pInfoFrame->bottom_bar_high = (NvU8)(pCtrl->bottom_bar / 0x100); + } + if (pCtrl->left_bar != 0xFFFF) + { + pInfoFrame->left_bar_low = (NvU8)(pCtrl->left_bar % 0x100); + pInfoFrame->left_bar_high = (NvU8)(pCtrl->left_bar / 0x100); + } + if (pCtrl->right_bar != 0xFFFF) + { + pInfoFrame->right_bar_low = (NvU8)(pCtrl->right_bar % 0x100); + pInfoFrame->right_bar_high = (NvU8)(pCtrl->right_bar / 0x100); + } + + // byte 14-15 + if (pInfoFrame->version >= NVT_VIDEO_INFOFRAME_VERSION_4) + { + if (pCtrl->addition_colorimetry_ext != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte14, pCtrl->addition_colorimetry_ext, NVT_VIDEO_INFOFRAME_BYTE14_ACE0_3_MASK, NVT_VIDEO_INFOFRAME_BYTE14_ACE0_3_SHIFT); + } + + if (pCtrl->frame_rate != NVT_INFOFRAME_CTRL_DONTCARE) + { + // Frame rate + nvt_nvu8_set_bits(pInfoFrame->byte14, pCtrl->frame_rate, NVT_VIDEO_INFOFRAME_BYTE14_FR0_FR3_MASK, NVT_VIDEO_INFOFRAME_BYTE14_FR0_FR3_SHIFT); + pInfoFrame->byte15 &= NVT_VIDEO_INFOFRAME_BYTE15_FR4_MASK^0xFFU; + pInfoFrame->byte15 |= ((pCtrl->frame_rate & NVT_VIDEO_INFOFRAME_BYTE14_FR4_ONE_BIT_MASK) << NVT_VIDEO_INFOFRAME_BYTE15_FR4_SHIFT) & NVT_VIDEO_INFOFRAME_BYTE15_FR4_MASK; + } + + if (pCtrl->rid != NVT_INFOFRAME_CTRL_DONTCARE) + { + // RID + nvt_nvu8_set_bits(pInfoFrame->byte15, pCtrl->rid, NVT_VIDEO_INFOFRAME_BYTE15_RID_MASK, NVT_VIDEO_INFOFRAME_BYTE15_RID_SHIFT); + } + } + else // version 2 or 3 + { + pInfoFrame->byte14 = 0; + pInfoFrame->byte15 = 0; + } + } + + return NVT_STATUS_SUCCESS; +} + +// construct AVI audio infoframe based on the user control and the current context state +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_ConstructAudioInfoframe(NVT_AUDIO_INFOFRAME_CTRL *pUserCtrl, NVT_AUDIO_INFOFRAME *pContext, NVT_AUDIO_INFOFRAME *pInfoFrame) +{ + NVT_AUDIO_INFOFRAME_CTRL ctrl; + + // parameter check + if (pInfoFrame == NULL) + { + return NVT_STATUS_ERR; + } + + // use the user provided control if possible + if (pUserCtrl) + { + ctrl = *pUserCtrl; + } + else + { + // otherwise use the default control + NVMISC_MEMSET(&ctrl, NVT_INFOFRAME_CTRL_DONTCARE, sizeof(ctrl)); + } + + // if context state is provided, use it to initialize the infoframe buffer + if (pContext != NULL) + { + *pInfoFrame = *pContext; + } + else + { + *pInfoFrame = DEFAULT_AUDIO_INFOFRAME; + + // if the context state is not provide, we should user EDID info to build a default ctrl + //buildDefaultAudioInfoframeCtrl(pEdidInfo, &ctrl); + } + + // init the header + pInfoFrame->type = NVT_INFOFRAME_TYPE_AUDIO; + pInfoFrame->version = NVT_VIDEO_INFOFRAME_VERSION_1; + pInfoFrame->length = sizeof(NVT_AUDIO_INFOFRAME) - sizeof(NVT_INFOFRAME_HEADER); + + // init the reserved fields + nvt_nvu8_set_bits(pInfoFrame->byte1, 0, NVT_AUDIO_INFOFRAME_BYTE1_RESERVED_MASK, NVT_AUDIO_INFOFRAME_BYTE1_RESERVED_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->byte2, 0, NVT_AUDIO_INFOFRAME_BYTE2_RESERVED_MASK, NVT_AUDIO_INFOFRAME_BYTE2_RESERVED_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->byte5, 0, NVT_AUDIO_INFOFRAME_BYTE5_RESERVED_MASK, NVT_AUDIO_INFOFRAME_BYTE5_RESERVED_SHIFT); + pInfoFrame->rsvd_byte6 = 0; + pInfoFrame->rsvd_byte7 = 0; + pInfoFrame->rsvd_byte8 = 0; + pInfoFrame->rsvd_byte9 = 0; + pInfoFrame->rsvd_byte10 = 0; + + // byte 1 + if (ctrl.channel_count != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte1, ctrl.channel_count, NVT_AUDIO_INFOFRAME_BYTE1_CC_MASK, NVT_AUDIO_INFOFRAME_BYTE1_CC_SHIFT); + } + + if (ctrl.coding_type != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte1, ctrl.coding_type, NVT_AUDIO_INFOFRAME_BYTE1_CT_MASK, NVT_AUDIO_INFOFRAME_BYTE1_CT_SHIFT); + } + + // byte 2 + if (ctrl.sample_depth != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte2, ctrl.sample_depth, NVT_AUDIO_INFOFRAME_BYTE2_SS_MASK, NVT_AUDIO_INFOFRAME_BYTE2_SS_SHIFT); + } + + if (ctrl.sample_rate != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte2, ctrl.sample_rate, NVT_AUDIO_INFOFRAME_BYTE2_SF_MASK, NVT_AUDIO_INFOFRAME_BYTE2_SF_SHIFT); + } + + // byte 3 + pInfoFrame->byte3 = 0; + + // byte 4 + if (ctrl.speaker_placement != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte4, ctrl.speaker_placement, NVT_AUDIO_INFOFRAME_BYTE4_CA_MASK, NVT_AUDIO_INFOFRAME_BYTE4_CA_SHIFT); + } + + // byte 5 + if (ctrl.level_shift != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte5, ctrl.level_shift, NVT_AUDIO_INFOFRAME_BYTE5_LSV_MASK, NVT_AUDIO_INFOFRAME_BYTE5_LSV_SHIFT); + } + + if (ctrl.down_mix_inhibit != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte5, ctrl.down_mix_inhibit, NVT_AUDIO_INFOFRAME_BYTE5_DM_INH_MASK, NVT_AUDIO_INFOFRAME_BYTE5_DM_INH_SHIFT); + } + + + return NVT_STATUS_SUCCESS; + +} + +// Construct Vendor Specific Infoframe +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_ConstructVendorSpecificInfoframe(NVT_EDID_INFO *pEdidInfo, NVT_VENDOR_SPECIFIC_INFOFRAME_CTRL *pCtrl, NVT_VENDOR_SPECIFIC_INFOFRAME *pInfoFrame) +{ + NVT_STATUS RetCode = NVT_STATUS_SUCCESS; + NvU8 optIdx = 0; + NvU8 HDMIFormat; + + // parameter check + if (pEdidInfo == NULL || pInfoFrame == NULL || pCtrl == NULL) + { + return NVT_STATUS_INVALID_PARAMETER; + } + + // infoframe is only supported on 861A and later + if (pEdidInfo->ext861.revision < NVT_CEA861_REV_A) + { + return NVT_STATUS_ERR; + } + + + // initialize the infoframe buffer + nvt_nvu8_set_bits(pInfoFrame->Header.type, NVT_HDMI_VS_HB0_VALUE, NVT_HDMI_VS_HB0_MASK, NVT_HDMI_VS_HB0_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->Header.version, NVT_HDMI_VS_HB1_VALUE, NVT_HDMI_VS_HB1_MASK, NVT_HDMI_VS_HB1_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->Header.length, NVT_HDMI_VS_HB2_VALUE, NVT_HDMI_VS_HB2_MASK, NVT_HDMI_VS_HB2_SHIFT); + + if (pCtrl->VSIFVersion == NVT_VSIF_VERSION_H14B_VSIF) + { + nvt_nvu8_set_bits(pInfoFrame->Data.byte1, NVT_HDMI_VS_BYTE1_OUI_VER_1_4, NVT_HDMI_VS_BYTE1_OUI_MASK, NVT_HDMI_VS_BYTE1_OUI_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->Data.byte2, NVT_HDMI_VS_BYTE2_OUI_VER_1_4, NVT_HDMI_VS_BYTE2_OUI_MASK, NVT_HDMI_VS_BYTE2_OUI_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->Data.byte3, NVT_HDMI_VS_BYTE3_OUI_VER_1_4, NVT_HDMI_VS_BYTE3_OUI_MASK, NVT_HDMI_VS_BYTE3_OUI_SHIFT); + } + else if (pCtrl->VSIFVersion == NVT_VSIF_VERSION_HF_VSIF) + { + nvt_nvu8_set_bits(pInfoFrame->Data.byte1, NVT_HDMI_VS_BYTE1_OUI_VER_2_0, NVT_HDMI_VS_BYTE1_OUI_MASK, NVT_HDMI_VS_BYTE1_OUI_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->Data.byte2, NVT_HDMI_VS_BYTE2_OUI_VER_2_0, NVT_HDMI_VS_BYTE2_OUI_MASK, NVT_HDMI_VS_BYTE2_OUI_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->Data.byte3, NVT_HDMI_VS_BYTE3_OUI_VER_2_0, NVT_HDMI_VS_BYTE3_OUI_MASK, NVT_HDMI_VS_BYTE3_OUI_SHIFT); + } + + // init the header (mostly done in default Infoframe) + pInfoFrame->Header.length = offsetof(NVT_VENDOR_SPECIFIC_INFOFRAME_PAYLOAD, optionalBytes); + + // construct the desired infoframe contents based on the control + + // clear all static reserved fields + nvt_nvu8_set_bits(pInfoFrame->Data.byte4, 0, NVT_HDMI_VS_BYTE4_RSVD_MASK, NVT_HDMI_VS_BYTE4_RSVD_SHIFT); + + // setup the parameters + nvt_nvu8_set_bits(pInfoFrame->Data.byte4, pCtrl->HDMIFormat, NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_MASK, NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_SHIFT); + + // determine what the format is -- if disabled, force the format to NONE. + if (pCtrl->Enable) + { + HDMIFormat = pCtrl->HDMIFormat; + } + else + { + HDMIFormat = NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_NONE; + } + + switch(HDMIFormat) + { + case NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_NONE: + { + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, 0, NVT_HDMI_VS_BYTENv_RSVD_MASK, NVT_HDMI_VS_BYTENv_RSVD_SHIFT); + break; + } + case NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_EXT: + { + // Note: extended resolution frames are not yet fully supported + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, pCtrl->HDMI_VIC, NVT_HDMI_VS_BYTE5_HDMI_VIC_MASK, NVT_HDMI_VS_BYTE5_HDMI_VIC_SHIFT); + break; + } + case NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_3D: + { + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, 0, NVT_HDMI_VS_BYTE5_HDMI_RSVD_MASK, NVT_HDMI_VS_BYTE5_HDMI_RSVD_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, pCtrl->ThreeDStruc, NVT_HDMI_VS_BYTE5_HDMI_3DS_MASK, NVT_HDMI_VS_BYTE5_HDMI_3DS_SHIFT); + + // side by side half requires additional format data in the infoframe. + if (NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF == pCtrl->ThreeDStruc) + { + nvt_nvu8_set_bits(pInfoFrame->Data.optionalBytes[optIdx], pCtrl->ThreeDDetail, NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_MASK, NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SHIFT); + optIdx++; + } + if (pCtrl->MetadataPresent) + { + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, NVT_HDMI_VS_BYTE5_HDMI_META_PRESENT_PRES, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_MASK, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_SHIFT); + + switch(pCtrl->MetadataType) + { + case NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_PARALLAX: + { + if (sizeof(pCtrl->Metadata) >= NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_PARALLAX && + sizeof(pInfoFrame->Data.optionalBytes) - (optIdx + 1) >= NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_PARALLAX) + { + nvt_nvu8_set_bits(pInfoFrame->Data.optionalBytes[optIdx], NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_PARALLAX, NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_MASK, NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->Data.optionalBytes[optIdx], NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_PARALLAX, NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_MASK, NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_SHIFT); + ++optIdx; + + NVMISC_MEMCPY(pCtrl->Metadata, &pInfoFrame->Data.optionalBytes[optIdx], NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_PARALLAX); + optIdx += NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_PARALLAX; + } + else + { + // not enough data in the control struct or not enough room in the infoframe -- BOTH compile time issues!! + // ignore metadata. + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, NVT_HDMI_VS_BYTE5_HDMI_META_PRESENT_NOTPRES, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_MASK, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_SHIFT); + } + break; + } + default: + { + // unrecognised metadata, recover the best we can. + // note -- can not copy whatever is there because type implies length. + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, NVT_HDMI_VS_BYTE5_HDMI_META_PRESENT_NOTPRES, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_MASK, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_SHIFT); + RetCode = NVT_STATUS_ERR; + } + } + + } + else + { + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, NVT_HDMI_VS_BYTE5_HDMI_META_PRESENT_NOTPRES, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_MASK, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_SHIFT); + } + break; + } + + } + + if (pCtrl->ALLMEnable == 1) + { + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, NVT_HDMI_VS_BYTE5_ALLM_MODE_EN, NVT_HDMI_VS_BYTE5_ALLM_MODE_MASK, NVT_HDMI_VS_BYTE5_ALLM_MODE_SHIFT); + } + else if (pCtrl->ALLMEnable == 0) + { + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, NVT_HDMI_VS_BYTE5_ALLM_MODE_DIS, NVT_HDMI_VS_BYTE5_ALLM_MODE_MASK, NVT_HDMI_VS_BYTE5_ALLM_MODE_SHIFT); + } + + // clear last byte of infoframe (reserved per spec). + pInfoFrame->Header.length += optIdx + 1; + for (; optIdx < sizeof(pInfoFrame->Data.optionalBytes); ++optIdx) + { + nvt_nvu8_set_bits(pInfoFrame->Data.optionalBytes[optIdx], NVT_HDMI_VS_BYTENv_RSVD, NVT_HDMI_VS_BYTENv_RSVD_MASK, NVT_HDMI_VS_BYTENv_RSVD_SHIFT); + } + + return RetCode; +} + +// Construct Extended Metadata Packet Infoframe +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_ConstructExtendedMetadataPacketInfoframe( + NVT_EXTENDED_METADATA_PACKET_INFOFRAME_CTRL *pCtrl, + NVT_EXTENDED_METADATA_PACKET_INFOFRAME *pInfoFrame) +{ + NVT_STATUS RetCode = NVT_STATUS_SUCCESS; + if (!pCtrl || !pInfoFrame) + { + return NVT_STATUS_INVALID_PARAMETER; + } + + // Initialize the infoframe + NVMISC_MEMSET(pInfoFrame, 0, sizeof(*pInfoFrame)); + + // Construct an infoframe to enable or disable HDMI 2.1 VRR + pInfoFrame->Header.type = NVT_INFOFRAME_TYPE_EXTENDED_METADATA_PACKET; + pInfoFrame->Header.firstLast = NVT_EMP_HEADER_FIRST_LAST; + pInfoFrame->Header.sequenceIndex = 0x00; + + if (pCtrl->Sync) + { + nvt_nvu8_set_bits(pInfoFrame->Data.byte1, NVT_HDMI_EMP_BYTE1_SYNC_ENABLE, + NVT_HDMI_EMP_BYTE1_SYNC_MASK, + NVT_HDMI_EMP_BYTE1_SYNC_SHIFT); + } + + nvt_nvu8_set_bits(pInfoFrame->Data.byte1, NVT_HDMI_EMP_BYTE1_VFR_ENABLE, + NVT_HDMI_EMP_BYTE1_VFR_MASK, + NVT_HDMI_EMP_BYTE1_VFR_SHIFT); + + nvt_nvu8_set_bits(pInfoFrame->Data.byte1, NVT_HDMI_EMP_BYTE1_NEW_ENABLE, + NVT_HDMI_EMP_BYTE1_NEW_MASK, + NVT_HDMI_EMP_BYTE1_NEW_SHIFT); + + if (!pCtrl->EnableVRR && !pCtrl->EnableQMS) + { + nvt_nvu8_set_bits(pInfoFrame->Data.byte1, NVT_HDMI_EMP_BYTE1_END_ENABLE, + NVT_HDMI_EMP_BYTE1_END_MASK, + NVT_HDMI_EMP_BYTE1_END_SHIFT); + } + + nvt_nvu8_set_bits(pInfoFrame->Data.byte3, + NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_SPEC_DEFINED, + NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_MASK, + NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_SHIFT); + + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, 1, + NVT_HDMI_EMP_BYTE5_DATA_SET_TAG_LSB_MASK, + NVT_HDMI_EMP_BYTE5_DATA_SET_TAG_LSB_SHIFT); + + nvt_nvu8_set_bits(pInfoFrame->Data.byte7, ((pCtrl->EnableVRR || pCtrl->EnableQMS) ? 4 : 0), + NVT_HDMI_EMP_BYTE7_DATA_SET_LENGTH_LSB_MASK, + NVT_HDMI_EMP_BYTE7_DATA_SET_LENGTH_LSB_SHIFT); + + if (pCtrl->EnableVRR) + { + nvt_nvu8_set_bits(pInfoFrame->Data.metadataBytes[0], + NVT_HDMI_EMP_BYTE8_MD0_VRR_EN_ENABLE, + NVT_HDMI_EMP_BYTE8_MD0_VRR_EN_MASK, + NVT_HDMI_EMP_BYTE8_MD0_VRR_EN_SHIFT); + } + else if (pCtrl->EnableQMS) + { + nvt_nvu8_set_bits(pInfoFrame->Data.metadataBytes[0], pCtrl->MConst, + NVT_HDMI_EMP_BYTE8_MD0_M_CONST_MASK, + NVT_HDMI_EMP_BYTE8_MD0_M_CONST_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->Data.metadataBytes[0], + NVT_HDMI_EMP_BYTE8_MD0_QMS_EN_ENABLE, + NVT_HDMI_EMP_BYTE8_MD0_QMS_EN_MASK, + NVT_HDMI_EMP_BYTE8_MD0_QMS_EN_SHIFT); + } + + if (pCtrl->ITTiming) + { + nvt_nvu8_set_bits(pInfoFrame->Data.metadataBytes[1], + pCtrl->BaseVFP, + NVT_HDMI_EMP_BYTE8_MD1_BASE_VFRONT_MASK, + NVT_HDMI_EMP_BYTE8_MD1_BASE_VFRONT_SHIFT); + + // In HDMI2.1, MD2 bit 2 is set when RB timing is used. + // In HDMI2.1A, MD2 bit 2 is RSVD as 0 + if (pCtrl->version == NVT_EXTENDED_METADATA_PACKET_INFOFRAME_VER_HDMI21) + { + nvt_nvu8_set_bits(pInfoFrame->Data.metadataBytes[2], + pCtrl->ReducedBlanking, + NVT_HDMI_EMP_BYTE8_MD2_RB_MASK, + NVT_HDMI_EMP_BYTE8_MD2_RB_SHIFT); + } + + if (pCtrl->version == NVT_EXTENDED_METADATA_PACKET_INFOFRAME_VER_HDMI21A && pCtrl->EnableQMS) + { + nvt_nvu8_set_bits(pInfoFrame->Data.metadataBytes[2], + pCtrl->NextTFR, + NVT_HDMI_EMP_BYTE8_MD2_NEXT_TFR_MASK, + NVT_HDMI_EMP_BYTE8_MD2_NEXT_TFR_SHIFT); + } + + // MSB for Base Refresh Rate + nvt_nvu8_set_bits(pInfoFrame->Data.metadataBytes[2], + pCtrl->BaseRefreshRate >> 8, + NVT_HDMI_EMP_BYTE8_MD2_BASE_RR_MSB_MASK, + NVT_HDMI_EMP_BYTE8_MD2_BASE_RR_MSB_SHIFT); + + // LSB for Base Refresh Rate + nvt_nvu8_set_bits(pInfoFrame->Data.metadataBytes[3], + pCtrl->BaseRefreshRate, + NVT_HDMI_EMP_BYTE8_MD3_BASE_RR_LSB_MASK, + NVT_HDMI_EMP_BYTE8_MD3_BASE_RR_LSB_SHIFT); + } + + return RetCode; +} + +// Enumerate Psf Timing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_EnumNvPsfTiming(NvU32 nvPsfFormat, NVT_TIMING *pT) +{ + if (pT == NULL || nvPsfFormat == 0 || nvPsfFormat > MAX_PSF_FORMAT) + { + return NVT_STATUS_ERR; + } + + *pT = PSF_TIMING[nvPsfFormat - 1]; + + // calculate the pixel clock + pT->pclk = RRx1kToPclk (pT); + pT->pclk1khz = (pT->pclk << 3) + (pT->pclk << 1); // *10 + + return NVT_STATUS_SUCCESS; +} + +// Set ActiveSpace for HDMI 3D stereo timing +CODE_SEGMENT(PAGE_DD_CODE) +void SetActiveSpaceForHDMI3DStereo(const NVT_TIMING *pTiming, NVT_EXT_TIMING *pExtTiming) +{ + // Note -- this assumes that the Timng is the 2D instance. + NvU16 VBlank; + + // assume no active space to start. + pExtTiming->HDMI3D.VActiveSpace[0] = 0; + pExtTiming->HDMI3D.VActiveSpace[1] = 0; + + if (NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK == pExtTiming->HDMI3D.StereoStructureType) + { + VBlank = pTiming->VTotal - pTiming->VVisible; + if (pTiming->interlaced) + { + //++++ This need to be revisited, not sure when active space 1 & 2 should be different. + // (fortunately, we are not supporting any interlaced packed frame modes yet). + pExtTiming->HDMI3D.VActiveSpace[0] = VBlank + 1; + pExtTiming->HDMI3D.VActiveSpace[1] = VBlank - 1; + } + else + { + pExtTiming->HDMI3D.VActiveSpace[0] = VBlank; + } + } + return; +} + +// Generate HDMI stereo timing from 2D timing +CODE_SEGMENT(PAGE_DD_CODE) +void NvTiming_GetHDMIStereoTimingFrom2DTiming(const NVT_TIMING *pTiming, NvU8 StereoStructureType, NvU8 SideBySideHalfDetail, NVT_EXT_TIMING *pExtTiming) +{ + NvU16 VBlank; + NvU16 HBlank; + + if ((NULL == pTiming) || (NULL == pExtTiming) || (!isHdmi3DStereoType(StereoStructureType))) + { + return; + } + // init the extended timing + NVMISC_MEMSET(pExtTiming, 0, sizeof(NVT_EXT_TIMING)); + + // copy the 2D timing to the 3D timing. + pExtTiming->timing = *pTiming; + + // init the extension w/in the 3D timing + pExtTiming->HDMI3D.StereoStructureType = StereoStructureType; + pExtTiming->HDMI3D.SideBySideHalfDetail = SideBySideHalfDetail; + + + switch(StereoStructureType) + { + case NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK: + { + // calculate VBlank + VBlank = pTiming->VTotal - pTiming->VVisible; + + // Use the 2D timing to calculate the Active Space + SetActiveSpaceForHDMI3DStereo(pTiming, pExtTiming); + + // Calculate the 3D VVisible size based on the 2D VVisible and the active space. + if (pTiming->interlaced) + { + pExtTiming->timing.VVisible = ((pTiming->VVisible * 4) + (pExtTiming->HDMI3D.VActiveSpace[0]) * 2) + pExtTiming->HDMI3D.VActiveSpace[1]; + } + else + { + pExtTiming->timing.VVisible = (pTiming->VVisible * 2) + pExtTiming->HDMI3D.VActiveSpace[0]; + } + // Calculate the 3D VTotal from the 3D VVisible & the VBlank. + pExtTiming->timing.VTotal = pExtTiming->timing.VVisible + VBlank; + + pExtTiming->timing.etc.status = NVT_SET_TIMING_STATUS_TYPE(pExtTiming->timing.etc.status, NVT_TYPE_HDMI_STEREO); + + break; + } + case NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEFULL: + { + // calculate HBlank before calculating new HVisible + HBlank = pTiming->HTotal - pTiming->HVisible; + + pExtTiming->timing.HVisible = pTiming->HVisible * 2; + + pExtTiming->timing.HTotal = pExtTiming->timing.HVisible + HBlank; + + pExtTiming->timing.etc.status = NVT_SET_TIMING_STATUS_TYPE(pExtTiming->timing.etc.status, NVT_TYPE_HDMI_STEREO); + + break; + } + case NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF: // valid formats with no timing changes. + case NVT_HDMI_VS_BYTE5_HDMI_3DS_TOPBOTTOM: + { + break; + } + case NVT_HDMI_VS_BYTE5_HDMI_3DS_FIELD_ALT: // formats we are not supporting. + case NVT_HDMI_VS_BYTE5_HDMI_3DS_LINE_ALT: + case NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTH: + case NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTHGFX: + { + break; + } + } + // calculate the pixel clock + pExtTiming->timing.pclk = RRx1kToPclk (&(pExtTiming->timing)); + pExtTiming->timing.pclk1khz = (pExtTiming->timing.pclk << 3) + (pExtTiming->timing.pclk << 1); // *10; + + return; +} + +// Add mode to 3D stereo support map +CODE_SEGMENT(PAGE_DD_CODE) +void AddModeToSupportMap(HDMI3DSUPPORTMAP * pMap, NvU8 Vic, NvU8 StereoStructureType, NvU8 SideBySideHalfDetail) +{ + NvU32 i; + + if (0 < Vic) + { + // first check if the vic is already listed. + for (i = 0; i < pMap->total; ++i) + { + if (pMap->map[i].Vic == Vic) + { + break; + } + } + if (i == pMap->total) + { + // vic is not in the map. + // add it. + // note that we can't add the VIC to one of the 1st 16 entries. + // 1st 16 entries in the map are reserved for the vics from the EDID. + // if we add this VIC to the 1st 16, & there are any optional modes listed, + // the optional mode(s) will be improperly applied to this VIC as well + i = MAX(MAX_EDID_ADDRESSABLE_3D_VICS, pMap->total); + if (i < MAX_3D_VICS_SUPPORTED) + { + pMap->map[i].Vic = Vic; + pMap->total = i + 1; + } + } + nvt_assert(pMap->total <= MAX_3D_VICS_SUPPORTED); + if (i < pMap->total) + { + pMap->map[i].StereoStructureMask = pMap->map[i].StereoStructureMask | NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(StereoStructureType); + if (NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF == StereoStructureType) + { + pMap->map[i].SideBySideHalfDetail = SideBySideHalfDetail; + } + } + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidHdmiLlcBasicInfo(VSDB_DATA *pVsdb, NVT_HDMI_LLC_INFO *pHdmiLlc) +{ + NVT_HDMI_LLC_VSDB_PAYLOAD *p; + if (pVsdb == NULL || pHdmiLlc == NULL) + { + return; + } + + p = (NVT_HDMI_LLC_VSDB_PAYLOAD *)(&pVsdb->vendor_data); + + // Minimum vendor_data_size is 2 + pHdmiLlc->addrA = p->A; + pHdmiLlc->addrB = p->B; + pHdmiLlc->addrC = p->C; + pHdmiLlc->addrD = p->D; + + // If more data is provided, we read it as well each field at a time up to video latency + if (pVsdb->vendor_data_size >= 3) + { + pHdmiLlc->supports_AI = p->Supports_AI; + pHdmiLlc->dc_48_bit = p->DC_48bit; + pHdmiLlc->dc_36_bit = p->DC_36bit; + pHdmiLlc->dc_30_bit = p->DC_30bit; + pHdmiLlc->dc_y444 = p->DC_Y444; + pHdmiLlc->dual_dvi = p->DVI_Dual; + + if (pVsdb->vendor_data_size >= 4) + { + pHdmiLlc->max_tmds_clock = p->Max_TMDS_Clock; + + if (pVsdb->vendor_data_size >= 5) + { + pHdmiLlc->latency_field_present = p->Latency_Fields_Present; + pHdmiLlc->i_latency_field_present = p->I_Latency_Fields_Present; + pHdmiLlc->hdmi_video_present = p->HDMI_Video_present; + pHdmiLlc->cnc3 = p->CNC3; + pHdmiLlc->cnc2 = p->CNC2; + pHdmiLlc->cnc1 = p->CNC1; + pHdmiLlc->cnc0 = p->CNC0; + } + } + } + +} + +// get HDMI 1.4 specific timing (3D stereo timings and extended mode timings) +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidHDMILLCTiming(NVT_EDID_INFO *pInfo, VSDB_DATA *pVsdb, NvU32 *pMapSz, HDMI3DSUPPORTMAP * pM) +{ + NVT_HDMI_LLC_VSDB_PAYLOAD *pHdmiLLC; + NVT_HDMI_VIDEO *pHDMIVideo; + NvU32 DataCnt = 0; + NvU32 DataSz; + NvU16 i, j, k; + NvU16 Supports50Hz; + NvU16 Supports60Hz; + NvU32 vendorDataSize; + + if ((NULL == pInfo) || (NULL == pVsdb) || (NULL == pM)) + { + return; + } + + // init the support map + NVMISC_MEMSET(pM, 0, sizeof(HDMI3DSUPPORTMAP)); + Supports50Hz = 0; + Supports60Hz = 0; + + nvt_assert(pInfo->total_timings <= COUNT(pInfo->timing)); + + for (i = 0; i < pInfo->total_timings; ++i) + { + if (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status) == NVT_TYPE_EDID_861ST) + { + if (MAX_EDID_ADDRESSABLE_3D_VICS > pM->total) + { + // fill in the VICs from the EDID (up to the 1st 16). These are used for applying any 3D optional modes listed in the LLC + // -- the optional modes are addressed based on their relative location within the EDID. + pM->map[pM->total].Vic = (NvU8) NVT_GET_TIMING_STATUS_SEQ(pInfo->timing[i].etc.status); + ++pM->total; + } + + // since we are spinning through the timing array anyway, + // check to see which refresh rates are supported. + if (50 == pInfo->timing[i].etc.rr) + { + Supports50Hz = 1; + } + else if (60 == pInfo->timing[i].etc.rr) + { + Supports60Hz = 1; + } + } + } + + if (0 == pM->total) + { + if (NULL != pMapSz) + { + *pMapSz = 0; + } + } + + vendorDataSize = pVsdb->vendor_data_size; + if ((NVT_CEA861_HDMI_IEEE_ID == pVsdb->ieee_id) && + (offsetof(NVT_HDMI_LLC_VSDB_PAYLOAD, Data) < vendorDataSize)) + { + pHdmiLLC = (NVT_HDMI_LLC_VSDB_PAYLOAD *)(&pVsdb->vendor_data); + DataSz = (NvU32) MIN(vendorDataSize - offsetof(NVT_HDMI_LLC_VSDB_PAYLOAD, Data), sizeof(pHdmiLLC->Data)); + + if (5 <= vendorDataSize) + { + if (pHdmiLLC->Latency_Fields_Present) + { + DataCnt += (NvU32) sizeof(NVT_CEA861_LATENCY); + + if (pHdmiLLC->I_Latency_Fields_Present) + { + DataCnt += (NvU32) sizeof(NVT_CEA861_LATENCY); + } + } + + if ((pHdmiLLC->HDMI_Video_present) && + (DataSz > DataCnt) && + (DataSz - DataCnt >= sizeof(NVT_HDMI_VIDEO))) + { + pHDMIVideo = (NVT_HDMI_VIDEO *) &pHdmiLLC->Data[DataCnt]; + DataCnt += (NvU32) sizeof(NVT_HDMI_VIDEO); + + // If 3D is present, then add the basic 3D modes 1st. + if (pHDMIVideo->ThreeD_Present) + { + if ((0 != Supports50Hz) || (0 != Supports60Hz)) + { + // 50 and / or 60 Hz is supported, add 1920 x 1080 @ 24Hz 3D modes. + AddModeToSupportMap(pM, 32, NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK, 0); // 1920 x 1080p @ 24 Hz + AddModeToSupportMap(pM, 32, NVT_HDMI_VS_BYTE5_HDMI_3DS_TOPBOTTOM, 0); // 1920 x 1080p @ 24 Hz + + if (0 != Supports50Hz) + { + // add the mandatory modes for 50 Hz + AddModeToSupportMap(pM, 19, NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK, 0); // 1280 x 720p @ 50 Hz + AddModeToSupportMap(pM, 19, NVT_HDMI_VS_BYTE5_HDMI_3DS_TOPBOTTOM, 0); // 1280 x 720p @ 50 Hz + // 1920 x 1080i @ 50 Hz + AddModeToSupportMap(pM, 20, NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF, NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH); + } + + if (0 != Supports60Hz) + { + // add the mandatory modes for 60 Hz + AddModeToSupportMap(pM, 4, NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK, 0); // 1280 x 720p @ 60 Hz + AddModeToSupportMap(pM, 4, NVT_HDMI_VS_BYTE5_HDMI_3DS_TOPBOTTOM, 0); // 1280 x 720p @ 60 Hz + // 1920 x 1080i @ 60 Hz + AddModeToSupportMap(pM, 5, NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF, NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH); + } + } + } + + if ((DataSz > DataCnt) && + (DataSz - DataCnt >= pHDMIVideo->HDMI_VIC_Len)) + { + // handle HDMI VIC entries to add HDMI 1.4a 4kx2k extended modes + NVT_HDMI_VIC_LIST * pVicList = (NVT_HDMI_VIC_LIST *) &pHdmiLLC->Data[DataCnt]; + + for ( k = 0; k < pHDMIVideo->HDMI_VIC_Len; ++k) + { + NVT_TIMING newTiming; + + // extended mode VIC code from 1 - 4. + if ((0 < pVicList->HDMI_VIC[k]) && (pVicList->HDMI_VIC[k] <= MAX_HDMI_EXT_4Kx2K_FORMAT)) + { + NVMISC_MEMCPY(&newTiming, + &HDMI_EXT_4Kx2K_TIMING[pVicList->HDMI_VIC[k] - 1], + sizeof(newTiming)); + + // Fill in the pixel clock + newTiming.pclk = RRx1kToPclk(&newTiming); + newTiming.pclk1khz = (newTiming.pclk << 3) + (newTiming.pclk << 1); // *10 + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + } + } + + DataCnt += pHDMIVideo->HDMI_VIC_Len; + } + + // the following code implements parsing the HDMI 3D additional modes (all modes bitmap & additional vic modes) + // Kepler and above support 3D secondary modes + if ((pHDMIVideo->ThreeD_Present) && + ((1 == pHDMIVideo->ThreeD_Multi_Present) || (2 == pHDMIVideo->ThreeD_Multi_Present)) && + (0 < pHDMIVideo->HDMI_3D_Len) && + (DataSz > (DataCnt + 1)) && //make sure pHdmiLLC->Data[DataCnt + 1] is valid + (DataSz - DataCnt >= pHDMIVideo->HDMI_3D_Len)) + { + NvU16 AllVicStructMask; + NvU16 AllVicIdxMask; + NvU8 AllVicDetail; + + // determine which modes to apply to all VICs. + AllVicStructMask = (pHdmiLLC->Data[DataCnt] << 8) | pHdmiLLC->Data[DataCnt + 1]; + AllVicStructMask = AllVicStructMask & NVT_ALL_HDMI_3D_STRUCT_SUPPORTED_MASK; + DataCnt += 2; + + if ((2 == pHDMIVideo->ThreeD_Multi_Present) && (DataSz > (DataCnt+1))) //make sure pHdmiLLC->Data[DataCnt + 1] is valid + { + AllVicIdxMask = pHdmiLLC->Data[DataCnt] << 8 | pHdmiLLC->Data[DataCnt + 1]; + DataCnt += 2; + } + else + { + AllVicIdxMask = 0xffff; + } + + // determine what the detail should be. + AllVicDetail = 0 != (AllVicStructMask & NVT_HDMI_3D_SUPPORTED_SIDEBYSIDEHALF_MASK) ? NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH : 0; + + // add the modes to the Support map for all the listed VICs. + for (k = 0; k < MIN(MAX_EDID_ADDRESSABLE_3D_VICS, pM->total); ++k) + { + if ((0 != (AllVicIdxMask & (1 << k))) && (0 != pM->map[k].Vic)) + { + pM->map[k].StereoStructureMask = pM->map[k].StereoStructureMask | AllVicStructMask; + pM->map[k].SideBySideHalfDetail = AllVicDetail; + } + } + } + + // handle any additional per vic modes listed in the EDID + while (DataSz > DataCnt) + { + // get a pointer to the entry. + NVT_3D_MULTI_LIST * pMultiListEntry = (NVT_3D_MULTI_LIST *) &pHdmiLLC->Data[DataCnt]; + + // apply the specified structure to the Support Map + pM->map[pMultiListEntry->TwoD_VIC_order].StereoStructureMask = + pM->map[pMultiListEntry->TwoD_VIC_order].StereoStructureMask | NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(pMultiListEntry->ThreeD_Structure); + + // increment the Data count by 2 if this is side by side half, + // or 1 if it is any other structure. + if (NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF <= pMultiListEntry->ThreeD_Structure) + { + pM->map[pMultiListEntry->TwoD_VIC_order].SideBySideHalfDetail = pMultiListEntry->ThreeD_Detail; + DataCnt += 2; + } + else + { + pM->map[pMultiListEntry->TwoD_VIC_order].SideBySideHalfDetail = 0; + DataCnt += 1; + } + } + } + } + } + + + // compress out entries where there is no 3D support. + for (i = 0, j = 0; i < pM->total; ++i) + { + if (0 != pM->map[i].StereoStructureMask) + { + pM->map[j] = pM->map[i]; + ++j; + } + } + + pM->total = j; + + if (NULL != pMapSz) + { + *pMapSz = pM->total; + } +} + +// get HDMI 1.4 3D mandatory stereo format datail base on the input vic. +// If the vic is not in the mandatory format list, return error. +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetHDMIStereoMandatoryFormatDetail(const NvU8 vic, NvU16 *pStereoStructureMask, NvU8 *pSideBySideHalfDetail) +{ + NvU32 i; + + if ((vic < 1) || (vic > MAX_CEA861B_FORMAT)) + { + return NVT_STATUS_ERR; + } + + for (i = 0; i < MAX_HDMI_MANDATORY_3D_FORMAT; i++) + { + if (vic == HDMI_MANDATORY_3D_FORMATS[i].Vic) + { + if (pStereoStructureMask != NULL) + { + *pStereoStructureMask = HDMI_MANDATORY_3D_FORMATS[i].StereoStructureMask; + } + + if (pSideBySideHalfDetail != NULL) + { + *pSideBySideHalfDetail = HDMI_MANDATORY_3D_FORMATS[i].SideBySideHalfDetail; + } + + return NVT_STATUS_SUCCESS; + } + } + + return NVT_STATUS_ERR; +} +// return the aspect ratio of a given CEA/EIA 861 timing +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 getCEA861TimingAspectRatio(NvU32 vic) +{ + return (vic > 0 && vic < MAX_CEA861B_FORMAT + 1) ? EIA861B[vic-1].etc.aspect : 0; +} + +// expose the HDMI extended video timing defined by the HDMI LLC VSDB +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_EnumHdmiVsdbExtendedTiming(NvU32 hdmi_vic, NVT_TIMING *pT) +{ + if (hdmi_vic > MAX_HDMI_EXT_4Kx2K_FORMAT || hdmi_vic == 0 || pT == NULL) + { + return NVT_STATUS_ERR; + } + *pT = HDMI_EXT_4Kx2K_TIMING[hdmi_vic - 1]; + pT->pclk = RRx1kToPclk(pT); + pT->pclk1khz = (pT->pclk << 3) + (pT->pclk << 1); // *10 + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidNvidiaVSDBBlock(VSDB_DATA *pVsdb, NVDA_VSDB_PARSED_INFO *vsdbInfo) +{ + NVT_NVDA_VSDB_PAYLOAD *pNvda; + + if ((pVsdb == NULL) || (vsdbInfo == NULL)) + { + return; + } + + if ((NVT_CEA861_NVDA_IEEE_ID == pVsdb->ieee_id) && + (pVsdb->vendor_data_size >= sizeof(NVT_NVDA_VSDB_PAYLOAD))) + { + pNvda = (NVT_NVDA_VSDB_PAYLOAD *)(&pVsdb->vendor_data); + + if (pNvda->opcode == 0x1 || pNvda->opcode == 0x2) + { + vsdbInfo->vsdbVersion = pNvda->opcode; + } + + switch (vsdbInfo->vsdbVersion) + { + case 1: + case 2: + vsdbInfo->valid = NV_TRUE; + vsdbInfo->vrrData.v1.supportsVrr = NV_TRUE; + vsdbInfo->vrrData.v1.minRefreshRate = pNvda->vrrMinRefreshRate; + break; + default: + break; + } + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidMsftVsdbBlock(VSDB_DATA *pVsdb, MSFT_VSDB_PARSED_INFO *pVsdbInfo) +{ + if ((pVsdb == NULL) || (pVsdbInfo == NULL)) + { + return; + } + + NVMISC_MEMSET(pVsdbInfo, 0, sizeof(MSFT_VSDB_PARSED_INFO)); + + if ((NVT_CEA861_MSFT_IEEE_ID == pVsdb->ieee_id) && + (pVsdb->vendor_data_size >= sizeof(NVT_MSFT_VSDB_PAYLOAD))) + { + NvU32 i = 0; + NVT_MSFT_VSDB_PAYLOAD *pMsftVsdbPayload = (NVT_MSFT_VSDB_PAYLOAD *)(&pVsdb->vendor_data); + + pVsdbInfo->version = pMsftVsdbPayload->version; + + if (pVsdbInfo->version >= 1) + { + for (i = 0; i < MSFT_VSDB_CONTAINER_ID_SIZE; i++) + { + pVsdbInfo->containerId[i] = pMsftVsdbPayload->containerId[i]; + } + + pVsdbInfo->desktopUsage = pMsftVsdbPayload->desktopUsage; + pVsdbInfo->thirdPartyUsage = pMsftVsdbPayload->thirdPartyUsage; + pVsdbInfo->valid = NV_TRUE; + } + // Version 3 is the latest version of MSFT VSDB at the time of writing this code + // Any update from newer version will be ignored and be parsed as Version 3, till + // we have an explicit handling for newer version here. + if (pVsdbInfo->version >= 3) + { + // Primary Use case is valid from Version 3 and is ignored on previous versions. + pVsdbInfo->primaryUseCase = pMsftVsdbPayload->primaryUseCase; + } + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidHdmiForumVSDB(VSDB_DATA *pVsdb, NVT_HDMI_FORUM_INFO *pHdmiInfo) +{ + NVT_HDMI_FORUM_VSDB_PAYLOAD *pHdmiForum; + NvU32 remainingSize; + + if ((pVsdb == NULL) || pHdmiInfo == NULL) + { + return; + } + + pHdmiForum = (NVT_HDMI_FORUM_VSDB_PAYLOAD *)(&pVsdb->vendor_data); + switch(pHdmiForum->Version) + { + case 1: + // From HDMI spec the payload data size is from 7 to 31 + // In parseCta861DataBlockInfo(), the payload size recorded in pHdmiForum is + // subtracted by 3. Thus the expected range here is 4 - 28. + // Assert if the the vendor_data_size < 4. + nvt_assert(pVsdb->vendor_data_size >= 4); + + remainingSize = pVsdb->vendor_data_size; + + // second byte + pHdmiInfo->max_TMDS_char_rate = pHdmiForum->Max_TMDS_Character_Rate; + + // third byte + pHdmiInfo->threeD_Osd_Disparity = pHdmiForum->ThreeD_Osd_Disparity; + pHdmiInfo->dual_view = pHdmiForum->Dual_View; + pHdmiInfo->independent_View = pHdmiForum->Independent_View; + pHdmiInfo->lte_340Mcsc_scramble = pHdmiForum->Lte_340mcsc_Scramble; + pHdmiInfo->ccbpci = pHdmiForum->CCBPCI; + pHdmiInfo->cable_status = pHdmiForum->CABLE_STATUS; + pHdmiInfo->rr_capable = pHdmiForum->RR_Capable; + pHdmiInfo->scdc_present = pHdmiForum->SCDC_Present; + + // fourth byte + pHdmiInfo->dc_30bit_420 = pHdmiForum->DC_30bit_420; + pHdmiInfo->dc_36bit_420 = pHdmiForum->DC_36bit_420; + pHdmiInfo->dc_48bit_420 = pHdmiForum->DC_48bit_420; + pHdmiInfo->uhd_vic = pHdmiForum->UHD_VIC; + pHdmiInfo->max_FRL_Rate = pHdmiForum->Max_FRL_Rate; + + remainingSize -= 4; + + // fifth byte + if (!remainingSize--) + { + break; + } + pHdmiInfo->fapa_start_location = pHdmiForum->FAPA_start_location; + pHdmiInfo->allm = pHdmiForum->ALLM; + pHdmiInfo->fva = pHdmiForum->FVA; + pHdmiInfo->cnmvrr = pHdmiForum->CNMVRR; + pHdmiInfo->cinemaVrr = pHdmiForum->CinemaVRR; + pHdmiInfo->m_delta = pHdmiForum->M_delta; + pHdmiInfo->qms = pHdmiForum->QMS; + pHdmiInfo->fapa_end_extended = pHdmiForum->FAPA_End_Extended; + + // sixth byte + if (!remainingSize--) + { + break; + } + pHdmiInfo->vrr_min = pHdmiForum->VRR_min; + pHdmiInfo->vrr_max = ((NvU16)pHdmiForum->VRR_max_high) << 8; + + // seventh byte + if (!remainingSize--) + { + break; + } + pHdmiInfo->vrr_max |= (pHdmiForum->VRR_max_low); + + // eighth byte + if (!remainingSize--) + { + break; + } + pHdmiInfo->dsc_10bpc = pHdmiForum->DSC_10bpc; + pHdmiInfo->dsc_12bpc = pHdmiForum->DSC_12bpc; + pHdmiInfo->dsc_16bpc = pHdmiForum->DSC_16bpc; + pHdmiInfo->dsc_All_bpp = pHdmiForum->DSC_All_bpp; + pHdmiInfo->dsc_Native_420 = pHdmiForum->DSC_Native_420; + pHdmiInfo->dsc_1p2 = pHdmiForum->DSC_1p2; + pHdmiInfo->qms_tfr_min = pHdmiForum->QMS_TFR_min; + pHdmiInfo->qms_tfr_max = pHdmiForum->QMS_TFR_max; + + // ninth byte + if (!remainingSize--) + { + break; + } + pHdmiInfo->dsc_MaxSlices = 0; + pHdmiInfo->dsc_MaxPclkPerSliceMHz = 0; + switch(pHdmiForum->DSC_MaxSlices) + { + case 7: pHdmiInfo->dsc_MaxSlices = 16; pHdmiInfo->dsc_MaxPclkPerSliceMHz = 400; break; + case 6: pHdmiInfo->dsc_MaxSlices = 12; pHdmiInfo->dsc_MaxPclkPerSliceMHz = 400; break; + case 5: pHdmiInfo->dsc_MaxSlices = 8; pHdmiInfo->dsc_MaxPclkPerSliceMHz = 400; break; + case 4: pHdmiInfo->dsc_MaxSlices = 8; pHdmiInfo->dsc_MaxPclkPerSliceMHz = 340; break; + case 3: pHdmiInfo->dsc_MaxSlices = 4; pHdmiInfo->dsc_MaxPclkPerSliceMHz = 340; break; + case 2: pHdmiInfo->dsc_MaxSlices = 2; pHdmiInfo->dsc_MaxPclkPerSliceMHz = 340; break; + case 1: pHdmiInfo->dsc_MaxSlices = 1; pHdmiInfo->dsc_MaxPclkPerSliceMHz = 340; break; + default: break; + } + + pHdmiInfo->dsc_Max_FRL_Rate = pHdmiForum->DSC_Max_FRL_Rate; + + // tenth byte + if (!remainingSize--) + { + break; + } + + // Per spec, number of bytes has to be computed as 1024 x (1 + DSC_TotalChunkKBytes). + // For driver parser purposes, add 1 here so that the field means max num of KBytes in a link of chunks + pHdmiInfo->dsc_totalChunkKBytes = (pHdmiForum->DSC_totalChunkKBytes == 0) ? 0 : pHdmiForum->DSC_totalChunkKBytes + 1; + break; + + default: + break; + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseCta861Hdr10PlusDataBlock(VSVDB_DATA* pVsvdb, NVT_HDR10PLUS_INFO* pHdr10PlusInfo) +{ + if (pVsvdb == NULL || pHdr10PlusInfo == NULL) + return; + + if(pVsvdb->ieee_id != NVT_CEA861_HDR10PLUS_IEEE_ID) + return; + + NVMISC_MEMSET(pHdr10PlusInfo, 0, sizeof(NVT_HDR10PLUS_INFO)); + + if (pVsvdb->vendor_data_size < sizeof(NVT_HDR10PLUS_INFO)) + return; + + NVMISC_MEMCPY(pHdr10PlusInfo, &pVsvdb->vendor_data, sizeof(NVT_HDR10PLUS_INFO)); +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseCta861DIDType7VideoTimingDataBlock(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo) +{ + NvU8 i = 0; + NvU8 t7db_idx = 0; + NvU8 startSeqNum = 0; + + NVT_TIMING newTiming; + NVT_EDID_INFO *pInfo = (NVT_EDID_INFO *)pRawInfo; + const DISPLAYID_2_0_TIMING_7_DESCRIPTOR *pT7Descriptor = NULL; + NvU8 eachOfDescSize = sizeof(DISPLAYID_2_0_TIMING_7_DESCRIPTOR); + + for (t7db_idx = 0; t7db_idx < pExt861->total_did_type7db; t7db_idx++) + { + // 20 bytes + eachOfDescSize += pExt861->did_type7_data_block[t7db_idx].version.t7_m; + + if (pExt861->did_type7_data_block[t7db_idx].total_descriptors != NVT_CTA861_DID_TYPE7_DESCRIPTORS_MAX) + { + // payload descriptor invalid. expect T7VTDB only 1 descriptor + continue; + } + + if (pExt861->did_type7_data_block[t7db_idx].version.revision != 2 ) + { + nvt_assert(0 && "The revision supported by CTA-861 is not 2"); + } + + startSeqNum = getExistedCTATimingSeqNumber(pInfo, NVT_TYPE_CTA861_DID_T7); + + for (i = 0; i < pExt861->did_type7_data_block[i].total_descriptors; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + if (NVT_STATUS_SUCCESS == parseDisplayId20Timing7Descriptor(&pExt861->did_type7_data_block[t7db_idx].payload[i*eachOfDescSize], + &newTiming, + startSeqNum+i)) + { + // T7VTDB shall not be used with video timing that can be expressed in an 18-byte DTD + if (newTiming.HVisible < 4096 && newTiming.VVisible < 4096 && newTiming.pclk < 65536) + { + nvt_assert(0 && "The timing can be expressed in an 18-byte DTD"); + continue; + } + + pT7Descriptor = (const DISPLAYID_2_0_TIMING_7_DESCRIPTOR *) + &pExt861->did_type7_data_block[t7db_idx].payload[i*eachOfDescSize]; + + if (pT7Descriptor->options.is_preferred_or_ycc420 == 1 && newTiming.pclk1khz > NVT_HDMI_YUV_420_PCLK_SUPPORTED_MIN) + { + newTiming.etc.yuv420.bpcs = 0; + UPDATE_BPC_FOR_COLORFORMAT(newTiming.etc.yuv420, 0, 1, + pInfo->hdmiForumInfo.dc_30bit_420, + pInfo->hdmiForumInfo.dc_36bit_420, 0, + pInfo->hdmiForumInfo.dc_48bit_420); + } + + NVT_SNPRINTF((char *)newTiming.etc.name, sizeof(newTiming.etc.name), "CTA861-T7:#%3d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status), + (int)newTiming.HVisible, + (int)newTiming.VVisible, + (int)newTiming.etc.rrx1k/1000, + (int)newTiming.etc.rrx1k%1000, + (newTiming.interlaced ? "I":"P")); + newTiming.etc.status = NVT_STATUS_CTA861_DID_T7N(NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status)); + newTiming.etc.name[sizeof(newTiming.etc.name) - 1] = '\0'; + newTiming.etc.rep = 0x1; + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + } + } + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseCta861DIDType8VideoTimingDataBlock(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo) +{ + NvU8 i = 0; + NvU8 t8db_idx = 0; + NvU8 startSeqNum = 0; + NvU8 codeSize = 0; + NvU8 codeType = 0; + + NVT_TIMING newTiming; + NVT_EDID_INFO *pInfo = (NVT_EDID_INFO *)pRawInfo; + + for (t8db_idx = 0; t8db_idx < pExt861->total_did_type8db; t8db_idx++) + { + codeType = pExt861->did_type8_data_block[t8db_idx].version.code_type; + codeSize = pExt861->did_type8_data_block[t8db_idx].version.tcs; + + if (codeType != 0 /*DMT*/) + { + nvt_assert(0 && "Not DMT code type!"); + continue; + } + + startSeqNum = getExistedCTATimingSeqNumber(pInfo, NVT_TYPE_CTA861_DID_T8); + + for (i=0; i < pExt861->did_type8_data_block[t8db_idx].total_descriptors; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseDisplayId20Timing8Descriptor(pExt861->did_type8_data_block[t8db_idx].payload, + &newTiming, codeType, codeSize, i, startSeqNum + i) == NVT_STATUS_SUCCESS) + { + if (pExt861->did_type8_data_block[t8db_idx].version.t8y420 == 1 && newTiming.pclk1khz > NVT_HDMI_YUV_420_PCLK_SUPPORTED_MIN) + { + UPDATE_BPC_FOR_COLORFORMAT(newTiming.etc.yuv420, 0, 1, + pInfo->hdmiForumInfo.dc_30bit_420, + pInfo->hdmiForumInfo.dc_36bit_420, 0, + pInfo->hdmiForumInfo.dc_48bit_420); + } + NVT_SNPRINTF((char *)newTiming.etc.name, sizeof(newTiming.etc.name), "CTA861-T8:#%3d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status), + (int)newTiming.HVisible, (int)newTiming.VVisible, + (int)newTiming.etc.rrx1k/1000, (int)newTiming.etc.rrx1k%1000, + (newTiming.interlaced ? "I":"P")); + newTiming.etc.status = NVT_STATUS_CTA861_DID_T8N(NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status)); + newTiming.etc.name[sizeof(newTiming.etc.name) - 1] = '\0'; + newTiming.etc.rep = 0x1; + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + } + } + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseCta861DIDType10VideoTimingDataBlock(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo) +{ + NvU8 i = 0; + NvU8 t10db_idx = 0; + NvU8 startSeqNum = 0; + + NVT_TIMING newTiming; + NVT_EDID_INFO *pInfo = (NVT_EDID_INFO *)pRawInfo; + const DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR *p6bytesDescriptor = NULL; + NvU8 eachOfDescriptorsSize = sizeof (DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR); + + for (t10db_idx = 0; t10db_idx < pExt861->total_did_type10db; t10db_idx++) + { + startSeqNum = getExistedCTATimingSeqNumber(pInfo, NVT_TYPE_CTA861_DID_T10); + + // 6 or 7 bytes length + eachOfDescriptorsSize += pExt861->did_type10_data_block[t10db_idx].version.t10_m; + + for (i = 0; i < pExt861->did_type10_data_block[t10db_idx].total_descriptors; i++) + { + if (pExt861->did_type10_data_block[t10db_idx].total_descriptors < NVT_CTA861_DID_TYPE10_DESCRIPTORS_MIN || + pExt861->did_type10_data_block[t10db_idx].total_descriptors > NVT_CTA861_DID_TYPE10_DESCRIPTORS_MAX) + { + nvt_assert(0 && "payload descriptor invalid. expect T10VTDB has minimum 1 descriptor, maximum 4 descriptors"); + continue; + } + + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + if (NVT_STATUS_SUCCESS == parseDisplayId20Timing10Descriptor(&pExt861->did_type10_data_block[t10db_idx].payload[i*eachOfDescriptorsSize], + &newTiming, + pExt861->did_type10_data_block[t10db_idx].version.t10_m, startSeqNum+i)) + { + p6bytesDescriptor = (const DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR *) + &pExt861->did_type10_data_block[t10db_idx].payload[i*eachOfDescriptorsSize]; + + if (p6bytesDescriptor->options.ycc420_support && newTiming.pclk1khz > NVT_HDMI_YUV_420_PCLK_SUPPORTED_MIN) + { + UPDATE_BPC_FOR_COLORFORMAT(newTiming.etc.yuv420, 0, 1, + pInfo->hdmiForumInfo.dc_30bit_420, + pInfo->hdmiForumInfo.dc_36bit_420, 0, + pInfo->hdmiForumInfo.dc_48bit_420); + } + + if (p6bytesDescriptor->options.timing_formula == DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_STANDARD_CRT_BASED) + { + NVT_SNPRINTF((char *)newTiming.etc.name, sizeof(newTiming.etc.name), "CTA861-T10:#%3d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status), + (int)newTiming.HVisible, + (int)newTiming.VVisible, + (int)newTiming.etc.rrx1k/1000, + (int)newTiming.etc.rrx1k%1000, + (newTiming.interlaced ? "I":"P")); + } + else + { + NVT_SNPRINTF((char *)newTiming.etc.name, sizeof(newTiming.etc.name), "CTA861-T10RB%d:#%3d:%dx%dx%3d.%03dHz/%s", + p6bytesDescriptor->options.timing_formula, + (int)NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status), + (int)newTiming.HVisible, + (int)newTiming.VVisible, + (int)newTiming.etc.rrx1k/1000, + (int)newTiming.etc.rrx1k%1000, + (newTiming.interlaced ? "I":"P")); + } + newTiming.etc.status = NVT_STATUS_CTA861_DID_T10N(NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status)); + newTiming.etc.name[sizeof(newTiming.etc.name) - 1] = '\0'; + newTiming.etc.rep = 0x1; + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + } + else + { + continue; + } + } + } +} + +POP_SEGMENTS diff --git a/src/common/modeset/timing/nvt_edidext_displayid.c b/src/common/modeset/timing/nvt_edidext_displayid.c new file mode 100644 index 0000000..d9d8314 --- /dev/null +++ b/src/common/modeset/timing/nvt_edidext_displayid.c @@ -0,0 +1,1386 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_edidext_displayid.c +// +// Purpose: the provide edid related services +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "edid.h" + +PUSH_SEGMENTS + +static NVT_STATUS parseDisplayIdSection(DISPLAYID_SECTION * section, + NvU32 max_length, + NVT_EDID_INFO *pEdidInfo); + +// Specific blocks that can be parsed based on DisplayID +static NVT_STATUS parseDisplayIdProdIdentityBlock(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdParam(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdColorChar(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdTiming1(NvU8 * block, NVT_EDID_INFO *pEdidInfo); +static NVT_STATUS parseDisplayIdTiming2(NvU8 * block, NVT_EDID_INFO *pEdidInfo); +static NVT_STATUS parseDisplayIdTiming3(NvU8 * block, NVT_EDID_INFO *pEdidInfo); +static NVT_STATUS parseDisplayIdTiming4(NvU8 * block, NVT_EDID_INFO *pEdidInfo); +static NVT_STATUS parseDisplayIdTiming5(NvU8 * block, NVT_EDID_INFO *pEdidInfo); +static NVT_STATUS parseDisplayIdTimingVesa(NvU8 * block, NVT_EDID_INFO *pEdidInfo); +static NVT_STATUS parseDisplayIdTimingEIA(NvU8 * block, NVT_EDID_INFO *pEdidInfo); +static NVT_STATUS parseDisplayIdRangeLimits(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdSerialNumber(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdAsciiString(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdDeviceData(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdInterfacePower(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdTransferChar(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdDisplayInterface(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdStereo(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdTiledDisplay(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdCtaData(NvU8 * block, NVT_EDID_INFO *pInfo); +static NVT_STATUS parseDisplayIdDisplayInterfaceFeatures(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); + +static NVT_STATUS parseDisplayIdTiming1Descriptor(DISPLAYID_TIMING_1_DESCRIPTOR * desc, NVT_TIMING *pT); +static NVT_STATUS parseDisplayIdTiming2Descriptor(DISPLAYID_TIMING_2_DESCRIPTOR * desc, NVT_TIMING *pT); +static NVT_STATUS parseDisplayIdTiming3Descriptor(DISPLAYID_TIMING_3_DESCRIPTOR * desc, NVT_TIMING *pT); +static NVT_STATUS parseDisplayIdTiming5Descriptor(DISPLAYID_TIMING_5_DESCRIPTOR * desc, NVT_TIMING *pT); + +/** + * @brief Parses a displayID Extension block, with timings stored in pT and + * other info stored in pInfo + * @param p The EDID Extension Block (With a DisplayID in it) + * @param size Size of the displayID Extension Block + * @param pEdidInfo EDID struct containing DisplayID information and + * the timings + */ +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS getDisplayIdEDIDExtInfo(NvU8 *p, NvU32 size, + NVT_EDID_INFO *pEdidInfo) +{ + DISPLAYID_SECTION * section; + + if (p == NULL || size < sizeof(EDIDV1STRUC)) + return NVT_STATUS_ERR; + if (p[0] != NVT_EDID_EXTENSION_DISPLAYID) + return NVT_STATUS_ERR; + + section = (DISPLAYID_SECTION *)(p + 1); + pEdidInfo->ext_displayid.version = section->version; + if (section->product_type > NVT_DISPLAYID_PROD_MAX_NUMBER) + return NVT_STATUS_ERR; + + return parseDisplayIdSection(section, sizeof(EDIDV1STRUC) - 1, pEdidInfo); +} + +/** + * @brief updates the color format for each bpc for each timing + * @param pInfo EDID struct containing DisplayID information and + * the timings + * @param timingIdx Index of the first display ID timing in the + * pInfo->timing[] timing array. + */ +CODE_SEGMENT(PAGE_DD_CODE) +void updateColorFormatForDisplayIdExtnTimings(NVT_EDID_INFO *pInfo, + NvU32 timingIdx) +{ + // pDisplayIdInfo is the parsed display ID info + NVT_DISPLAYID_INFO *pDisplayIdInfo = &pInfo->ext_displayid; + NVT_TIMING *pT = &pInfo->timing[timingIdx]; + + nvt_assert((timingIdx) <= COUNT(pInfo->timing)); + + if ((pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_A_SUPPORTED || + pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_B_SUPPORTED || + pInfo->ext861.valid.H14B_VSDB || pInfo->ext861.valid.H20_HF_VSDB) && pInfo->ext861.revision >= NVT_CEA861_REV_A) + { + if (!pInfo->ext_displayid.supported_displayId2_0) + { + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.rgb444, 0, + 1, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_10b, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_12b, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_14b, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_16b); + } + else + { + // rgb444 (always support 6bpc and 8bpc as per DP spec 5.1.1.1.1 RGB Colorimetry) + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.rgb444, 0, + 1, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_10b, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_12b, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_14b, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_16b); + } + } + else // DP + { + if (!pInfo->ext_displayid.supported_displayId2_0) + { + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.rgb444, 1, + 1, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_10b, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_12b, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_14b, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_16b); + } + else + { + // rgb444 (always support 6bpc and 8bpc as per DP spec 5.1.1.1.1 RGB Colorimetry) + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.rgb444, 1, + 1, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_10b, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_12b, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_14b, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_16b); + } + } + + if (!pInfo->ext_displayid.supported_displayId2_0) + { + // yuv444 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv444, 0, /* yuv444 does not support 6bpc */ + pDisplayIdInfo->u4.display_interface.ycbcr444_depth.support_8b, + pDisplayIdInfo->u4.display_interface.ycbcr444_depth.support_10b, + pDisplayIdInfo->u4.display_interface.ycbcr444_depth.support_12b, + pDisplayIdInfo->u4.display_interface.ycbcr444_depth.support_14b, + pDisplayIdInfo->u4.display_interface.ycbcr444_depth.support_16b); + // yuv422 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv422, 0, /* yuv422 does not support 6bpc */ + pDisplayIdInfo->u4.display_interface.ycbcr422_depth.support_8b, + pDisplayIdInfo->u4.display_interface.ycbcr422_depth.support_10b, + pDisplayIdInfo->u4.display_interface.ycbcr422_depth.support_12b, + pDisplayIdInfo->u4.display_interface.ycbcr422_depth.support_14b, + pDisplayIdInfo->u4.display_interface.ycbcr422_depth.support_16b); + } + else + { + // yuv444 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv444, 0, /* yuv444 does not support 6bpc */ + pDisplayIdInfo->u4.display_interface_features.ycbcr444_depth.support_8b, + pDisplayIdInfo->u4.display_interface_features.ycbcr444_depth.support_10b, + pDisplayIdInfo->u4.display_interface_features.ycbcr444_depth.support_12b, + pDisplayIdInfo->u4.display_interface_features.ycbcr444_depth.support_14b, + pDisplayIdInfo->u4.display_interface_features.ycbcr444_depth.support_16b); + // yuv422 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv422, 0, /* yuv422 does not support 6bpc */ + pDisplayIdInfo->u4.display_interface_features.ycbcr422_depth.support_8b, + pDisplayIdInfo->u4.display_interface_features.ycbcr422_depth.support_10b, + pDisplayIdInfo->u4.display_interface_features.ycbcr422_depth.support_12b, + pDisplayIdInfo->u4.display_interface_features.ycbcr422_depth.support_14b, + pDisplayIdInfo->u4.display_interface_features.ycbcr422_depth.support_16b); + // yuv420 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv420, 0, /* yuv420 does not support 6bpc */ + pDisplayIdInfo->u4.display_interface_features.ycbcr420_depth.support_8b, + pDisplayIdInfo->u4.display_interface_features.ycbcr420_depth.support_10b, + pDisplayIdInfo->u4.display_interface_features.ycbcr420_depth.support_12b, + pDisplayIdInfo->u4.display_interface_features.ycbcr420_depth.support_14b, + pDisplayIdInfo->u4.display_interface_features.ycbcr420_depth.support_16b); + } +} + +/** + * @brief Parses a displayID Section + * @param section The DisplayID Section to parse + * @param max_length The indicated total length of the displayID as given (or + * sizeof(EDIDV1STRUCT) for an extension block) + * @param pEdidInfo EDID struct containing DisplayID information and + * the timings + */ +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdSection(DISPLAYID_SECTION * section, + NvU32 max_length, + NVT_EDID_INFO *pEdidInfo) +{ + NvU8 block_location = 0; + NvU8 section_length; + NvU8 remaining_length; + + if (section == NULL || max_length <= NVT_DISPLAYID_SECTION_HEADER_LEN) + return NVT_STATUS_ERR; + if (section->section_bytes > max_length - NVT_DISPLAYID_SECTION_HEADER_LEN) + return NVT_STATUS_ERR; + + remaining_length = section->section_bytes; + + while (block_location < section->section_bytes) + { + DISPLAYID_DATA_BLOCK_HEADER * hdr = (DISPLAYID_DATA_BLOCK_HEADER *) (section->data + block_location); + NvBool is_prod_id = remaining_length > 3 && block_location == 0 && hdr->type == 0 && hdr->data_bytes > 0; + NvU8 i; + + // Check the padding. + if (hdr->type == 0 && !is_prod_id) + { + for (i = 1 ; i < remaining_length; i++) + { + // All remaining bytes must all be 0. + if (section->data[block_location + i] != 0) + { + return NVT_STATUS_ERR; + } + } + + section_length = remaining_length; + } + else + { + if (parseDisplayIdBlock((NvU8 *)(section->data + block_location), + section->section_bytes - block_location, + §ion_length, + pEdidInfo) != NVT_STATUS_SUCCESS) + return NVT_STATUS_ERR; + } + + block_location += section_length; + remaining_length -= section_length; + } + + return NVT_STATUS_SUCCESS; +} + +/** + * @brief Parses a displayID data block + * @param block The DisplayID data block to parse + * @param max_length The indicated total length of the each data block for checking + * @param pLength return the indicated length of the each data block + * @param pEdidInfo EDID struct containing DisplayID information and + * the timings or validation purpose if it is NULL + */ +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS parseDisplayIdBlock(NvU8* pBlock, + NvU8 max_length, + NvU8* pLength, + NVT_EDID_INFO *pEdidInfo) +{ + DISPLAYID_DATA_BLOCK_HEADER * hdr = (DISPLAYID_DATA_BLOCK_HEADER *) pBlock; + NVT_STATUS ret = NVT_STATUS_SUCCESS; + NVT_DISPLAYID_INFO *pInfo; + + if (pBlock == NULL || max_length <= NVT_DISPLAYID_DATABLOCK_HEADER_LEN) + return NVT_STATUS_ERR; + + if (hdr->data_bytes > max_length - NVT_DISPLAYID_DATABLOCK_HEADER_LEN) + return NVT_STATUS_ERR; + + pInfo = pEdidInfo == NULL ? NULL : &pEdidInfo->ext_displayid; + + *pLength = hdr->data_bytes + NVT_DISPLAYID_DATABLOCK_HEADER_LEN; + + switch (hdr->type) + { + case NVT_DISPLAYID_BLOCK_TYPE_PRODUCT_IDENTITY: + ret = parseDisplayIdProdIdentityBlock(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_DISPLAY_PARAM: + ret = parseDisplayIdParam(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_COLOR_CHAR: + ret = parseDisplayIdColorChar(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TIMING_1: + ret = parseDisplayIdTiming1(pBlock, pEdidInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TIMING_2: + ret = parseDisplayIdTiming2(pBlock, pEdidInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TIMING_3: + ret = parseDisplayIdTiming3(pBlock, pEdidInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TIMING_4: + ret = parseDisplayIdTiming4(pBlock, pEdidInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TIMING_5: + ret = parseDisplayIdTiming5(pBlock, pEdidInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TIMING_VESA: + ret = parseDisplayIdTimingVesa(pBlock, pEdidInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TIMING_CEA: + ret = parseDisplayIdTimingEIA(pBlock, pEdidInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_RANGE_LIMITS: + ret = parseDisplayIdRangeLimits(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_SERIAL_NUMBER: + ret = parseDisplayIdSerialNumber(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_ASCII_STRING: + ret = parseDisplayIdAsciiString(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_DEVICE_DATA: + ret = parseDisplayIdDeviceData(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_INTERFACE_POWER: + ret = parseDisplayIdInterfacePower(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TRANSFER_CHAR: + ret = parseDisplayIdTransferChar(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_DISPLAY_INTERFACE: + ret = parseDisplayIdDisplayInterface(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_STEREO: + ret = parseDisplayIdStereo(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TILEDDISPLAY: + ret = parseDisplayIdTiledDisplay(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_CTA_DATA: + ret = parseDisplayIdCtaData(pBlock, pEdidInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_DISPLAY_INTERFACE_FEATURES: + ret = parseDisplayIdDisplayInterfaceFeatures(pBlock, pInfo); + break; + default: + ret = NVT_STATUS_ERR; + break; + } + + if (pEdidInfo == NULL) return ret; + + return NVT_STATUS_SUCCESS; +} +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdColorChar(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + NvU32 i, j; + NvU16 x_p, y_p; + DISPLAYID_COLOR_CHAR_BLOCK * blk = (DISPLAYID_COLOR_CHAR_BLOCK *)block; + + /** unused flag - uncomment if you wish to use it in the future + NvU8 isTemp = DRF_VAL(T_DISPLAYID, _COLOR, _TEMPORAL, blk->point_info); + */ + NvU8 wp_num = DRF_VAL(T_DISPLAYID, _COLOR, _WHITE_POINTS, blk->point_info); + NvU8 prim_num = DRF_VAL(T_DISPLAYID, _COLOR, _PRIMARIES, blk->point_info); + + if ((prim_num + wp_num) * sizeof(DISPLAYID_COLOR_POINT) + 1 != blk->header.data_bytes) + { + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + for (i = 0; i < prim_num; i++) + { + x_p = (blk->points)[i].color_x_bits_low + + (DRF_VAL(T_DISPLAYID, _COLOR, _POINT_X, (blk->points)[i].color_bits_mid) << 8); + y_p = DRF_VAL(T_DISPLAYID, _COLOR, _POINT_Y, (blk->points)[i].color_bits_mid) + + ((blk->points)[i].color_y_bits_high << 4); + pInfo->primaries[i].x = x_p; + pInfo->primaries[i].y = y_p; + } + + for (j = 0; j < wp_num; j++) + { + x_p = (blk->points)[i].color_x_bits_low + + (DRF_VAL(T_DISPLAYID, _COLOR, _POINT_X, (blk->points)[i].color_bits_mid) << 8); + y_p = DRF_VAL(T_DISPLAYID, _COLOR, _POINT_Y, (blk->points)[i].color_bits_mid) + + ((blk->points)[i].color_y_bits_high << 4); + pInfo->white_points[pInfo->total_primaries + j].x = x_p; + pInfo->white_points[pInfo->total_primaries + j].y = y_p; + + i++; + } + pInfo->total_primaries = prim_num; + pInfo->total_white_points += wp_num; + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdProdIdentityBlock(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_PROD_IDENTIFICATION_BLOCK * blk = (DISPLAYID_PROD_IDENTIFICATION_BLOCK *)block; + if (blk->header.data_bytes - blk->productid_string_size != NVT_DISPLAYID_PRODUCT_IDENTITY_MIN_LEN) + { + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + pInfo->vendor_id = (blk->vendor)[2] | ((blk->vendor)[1] << 8) | ((blk->vendor)[0] << 16); + pInfo->product_id = blk->product_code; + pInfo->serial_number = blk->serial_number; + pInfo->week = blk->model_tag; + pInfo->year = blk->model_year; + + if (blk->productid_string_size != 0) + NVMISC_STRNCPY((char *)pInfo->product_string, (const char *)blk->productid_string, blk->productid_string_size); + pInfo->product_string[blk->productid_string_size] = '\0'; + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdParam(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_DISPLAY_PARAM_BLOCK * blk = (DISPLAYID_DISPLAY_PARAM_BLOCK *)block; + if (blk->header.data_bytes != NVT_DISPLAYID_DISPLAY_PARAM_BLOCK_LEN) + { + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + pInfo->horiz_size = blk->horizontal_image_size; + pInfo->vert_size = blk->vertical_image_size; + pInfo->horiz_pixels = blk->horizontal_pixel_count; + pInfo->vert_pixels = blk->vertical_pixel_count; + + pInfo->support_audio = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _SUPPORT_AUDIO, blk->feature); + pInfo->separate_audio = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _SEPARATE_AUDIO, blk->feature); + pInfo->audio_override = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _AUDIO_INPUT_OVERRIDE, blk->feature); + pInfo->power_management = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _POWER_MANAGEMENT, blk->feature); + pInfo->fixed_timing = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _FIXED_TIMING, blk->feature); + pInfo->fixed_pixel_format = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _FIXED_PIXEL_FORMAT, blk->feature); + pInfo->deinterlace = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _DEINTERLACING, blk->feature); + + pInfo->gamma = (NvU16)(blk->transfer_char_gamma - 1) * 100; + pInfo->aspect_ratio = blk->aspect_ratio; + + pInfo->depth_overall = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _DEPTH_OVERALL, blk->color_bit_depth); + pInfo->depth_native = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _DEPTH_NATIVE, blk->color_bit_depth); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming1(NvU8 * block, NVT_EDID_INFO *pEdidInfo) +{ + NvU16 i; + NVT_TIMING newTiming; + DISPLAYID_TIMING_1_BLOCK * blk = (DISPLAYID_TIMING_1_BLOCK *)block; + if (blk->header.data_bytes % sizeof(DISPLAYID_TIMING_1_DESCRIPTOR) != 0) + { + return NVT_STATUS_ERR; + } + + for (i = 0; i * sizeof(DISPLAYID_TIMING_1_DESCRIPTOR) < blk->header.data_bytes; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseDisplayIdTiming1Descriptor(blk->descriptors + i, + &newTiming) == NVT_STATUS_SUCCESS) + { + if (pEdidInfo == NULL) continue; + + if (!assignNextAvailableTiming(pEdidInfo, &newTiming)) + { + break; + } + } + else + { + if (pEdidInfo == NULL) return NVT_STATUS_ERR; + } + } + return NVT_STATUS_SUCCESS; +} +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming1Descriptor(DISPLAYID_TIMING_1_DESCRIPTOR * type1, NVT_TIMING *pT) +{ + if (type1 == NULL || pT == NULL) + return NVT_STATUS_ERR; + + // the pixel clock + pT->pclk = (NvU32)((type1->pixel_clock_high << 16) + (type1->pixel_clock_mid << 8) + type1->pixel_clock_low_minus_0_01MHz + 1); + pT->pclk1khz = (pT->pclk << 3) + (pT->pclk << 1); + + // the DisplayID spec does not support border + pT->HBorder = pT->VBorder = 0; + + // get horizontal timing parameters + pT->HVisible = (NvU16)((type1->horizontal.active_image_pixels_high << 8) + type1->horizontal.active_image_pixels_low_minus_1 + 1); + pT->HTotal = (NvU16)((type1->horizontal.blank_pixels_high << 8) + type1->horizontal.blank_pixels_low_minus_1 + 1) + pT->HVisible; + pT->HFrontPorch = (NvU16)((type1->horizontal.front_porch_high << 8) + type1->horizontal.front_porch_low_minus_1 + 1); + pT->HSyncWidth = (NvU16)((type1->horizontal.sync_width_high << 8) + type1->horizontal.sync_width_low_minus_1 + 1); + pT->HSyncPol = type1->horizontal.sync_polarity ? NVT_H_SYNC_POSITIVE : NVT_H_SYNC_NEGATIVE; + + // get vertical timings + pT->VVisible = (NvU16)((type1->vertical.active_image_lines_high << 8) + type1->vertical.active_image_lines_low_minus_1 + 1); + pT->VTotal = (NvU16)((type1->vertical.blank_lines_high << 8) + type1->vertical.blank_lines_low_minus_1 + 1) + pT->VVisible; + pT->VFrontPorch = (NvU16)((type1->vertical.front_porch_lines_high << 8) + type1->vertical.front_porch_lines_low_minus_1 + 1); + pT->VSyncWidth = (NvU16)((type1->vertical.sync_width_lines_high << 8) + type1->vertical.sync_width_lines_low_minus_1 + 1); + pT->VSyncPol = type1->vertical.sync_polarity ? NVT_V_SYNC_POSITIVE : NVT_V_SYNC_NEGATIVE; + + // EDID used in DP1.4 Compliance test had incorrect HBlank listed, leading to wrong raster sizes being set by driver (bug 2714607) + // Filter incorrect timings here. HTotal must cover sufficient blanking time + if (pT->HTotal < (pT->HVisible + pT->HFrontPorch + pT->HSyncWidth)) + { + return NVT_STATUS_ERR; + } + + // the frame scanning type + pT->interlaced = type1->options.interface_frame_scanning_type; + + // the aspect ratio + switch (type1->options.aspect_ratio) + { + case NVT_DISPLAYID_TIMING_ASPECT_RATIO_1_1: + pT->etc.aspect = (1 << 16) | 1; + break; + case NVT_DISPLAYID_TIMING_ASPECT_RATIO_5_4: + pT->etc.aspect = (5 << 16) | 4; + break; + case NVT_DISPLAYID_TIMING_ASPECT_RATIO_4_3: + pT->etc.aspect = (4 << 16) | 3; + break; + case NVT_DISPLAYID_TIMING_ASPECT_RATIO_15_9: + pT->etc.aspect = (15 << 16) | 9; + break; + case NVT_DISPLAYID_TIMING_ASPECT_RATIO_16_9: + pT->etc.aspect = (16 << 16) | 9; + break; + case NVT_DISPLAYID_TIMING_ASPECT_RATIO_16_10: + pT->etc.aspect = (16 << 16) | 10; + break; + default: + pT->etc.aspect = 0; + break; + } + + // the refresh rate + pT->etc.rr = NvTiming_CalcRR(pT->pclk1khz, pT->interlaced, pT->HTotal, pT->VTotal); + pT->etc.rrx1k = NvTiming_CalcRRx1k(pT->pclk1khz, pT->interlaced, pT->HTotal, pT->VTotal); + pT->etc.name[39] = '\0'; + pT->etc.rep = 0x1; // bit mask for no pixel repetition + + pT->etc.status = NVT_STATUS_DISPLAYID_1; + // Unlike the PTM in EDID base block, DisplayID type I/II preferred timing does not have dependency on sequence + // so we'll just update the preferred flag, not sequence them + //pT->etc.status = NVT_STATUS_DISPLAYID_1N(1); + pT->etc.flag |= type1->options.is_preferred_detailed_timing ? NVT_FLAG_DISPLAYID_DTD_PREFERRED_TIMING : 0; + + /* Fields currently not used. Uncomment them for future use + type1->options.stereo_support; + */ + + // the DisplayID spec covers the timing parameter(Visible/FrontPorch/SyncWidth/Total) range from 1~65536 while our NVT_TIMING structure which is mostly based on NvU16 only covers 0~65535 + nvt_assert(pT->HVisible != 0); + nvt_assert(pT->HFrontPorch != 0); + nvt_assert(pT->HSyncWidth != 0); + nvt_assert(pT->VVisible != 0); + nvt_assert(pT->VFrontPorch != 0); + nvt_assert(pT->VSyncWidth != 0); + + // cover the possible overflow + nvt_assert(pT->HTotal > pT->HVisible); + nvt_assert(pT->VTotal > pT->VVisible); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming2(NvU8 * block, NVT_EDID_INFO *pEdidInfo) +{ + NvU16 i; + DISPLAYID_TIMING_2_BLOCK * blk = (DISPLAYID_TIMING_2_BLOCK *)block; + NVT_TIMING newTiming; + + if (blk->header.data_bytes % sizeof(DISPLAYID_TIMING_2_DESCRIPTOR) != 0) + { + return NVT_STATUS_ERR; + } + + for (i = 0; i * sizeof(DISPLAYID_TIMING_2_DESCRIPTOR) < blk->header.data_bytes; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseDisplayIdTiming2Descriptor(blk->descriptors + i, + &newTiming) == NVT_STATUS_SUCCESS) + { + if (pEdidInfo == NULL) continue; + + if (!assignNextAvailableTiming(pEdidInfo, &newTiming)) + { + break; + } + } + else + { + if (pEdidInfo == NULL) return NVT_STATUS_ERR; + } + } + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming2Descriptor(DISPLAYID_TIMING_2_DESCRIPTOR * type2, NVT_TIMING *pT) +{ + if (type2 == NULL || pT == NULL) + return NVT_STATUS_ERR; + + // the pixel clock + pT->pclk = (NvU32)((type2->pixel_clock_high << 16) + (type2->pixel_clock_mid << 8) + type2->pixel_clock_low_minus_0_01MHz + 1); + pT->pclk1khz = (pT->pclk << 3) + (pT->pclk << 1); + + // the DisplayID spec does not support border + pT->HBorder = pT->VBorder = 0; + + // get horizontal timing parameters + pT->HVisible = (NvU16)((type2->horizontal.active_image_in_char_high << 8) + type2->horizontal.active_image_in_char_minus_1 + 1) * NVT_DISPLAYID_CHAR_WIDTH_IN_PIXELS; + pT->HTotal = (NvU16)(type2->horizontal.blank_in_char_minus_1 + 1) * NVT_DISPLAYID_CHAR_WIDTH_IN_PIXELS + pT->HVisible; + pT->HFrontPorch = (NvU16)(type2->horizontal.front_porch_in_char_minus_1 + 1) * NVT_DISPLAYID_CHAR_WIDTH_IN_PIXELS; + pT->HSyncWidth = (NvU16)(type2->horizontal.sync_width_in_char_minus_1 + 1) * NVT_DISPLAYID_CHAR_WIDTH_IN_PIXELS; + pT->HSyncPol = type2->options.hsync_polarity ? NVT_H_SYNC_POSITIVE : NVT_H_SYNC_NEGATIVE; + + // get vertical timing parameters + pT->VVisible = (NvU16)((type2->vertical.active_image_lines_high << 8) + type2->vertical.active_image_lines_low_minus_1 + 1); + pT->VTotal = (NvU16)(type2->vertical.blank_lines_minus_1 + 1) + pT->VVisible; + pT->VFrontPorch = (NvU16)(type2->vertical.front_porch_lines_minus_1 + 1); + pT->VSyncWidth = (NvU16)(type2->vertical.sync_width_lines_minus_1 + 1); + pT->VSyncPol = type2->options.vsync_polarity ? NVT_V_SYNC_POSITIVE : NVT_V_SYNC_NEGATIVE; + + // the frame scanning type + pT->interlaced = type2->options.interface_frame_scanning_type; + + // the refresh rate + pT->etc.rr = NvTiming_CalcRR(pT->pclk1khz, pT->interlaced, pT->HTotal, pT->VTotal); + pT->etc.rrx1k = NvTiming_CalcRRx1k(pT->pclk1khz, pT->interlaced, pT->HTotal, pT->VTotal); + + pT->etc.aspect = 0; + pT->etc.name[39] = '\0'; + pT->etc.rep = 0x1; // Bit mask for no pixel repetition + + pT->etc.status = NVT_STATUS_DISPLAYID_2; + // Unlike the PTM in EDID base block, DisplayID type I/II preferred timing does not have dependency on sequence + // so we'll just update the preferred flag, not sequence them + //pT->etc.status = NVT_STATUS_DISPLAYID_1N(1); + pT->etc.flag |= type2->options.is_preferred_detailed_timing ? NVT_FLAG_DISPLAYID_DTD_PREFERRED_TIMING : 0; + + /* Fields currently not used. Uncomment them for future use + type1->options.stereo_support; + */ + + // the DisplayID spec covers the timing parameter(Visible/FrontPorch/SyncWidth/Total) range from 1~65536 while our NVT_TIMING structure which is mostly based on NvU16 only covers 0~65535 + nvt_assert(pT->HVisible != 0); + nvt_assert(pT->HFrontPorch != 0); + nvt_assert(pT->HSyncWidth != 0); + nvt_assert(pT->VVisible != 0); + nvt_assert(pT->VFrontPorch != 0); + nvt_assert(pT->VSyncWidth != 0); + + // cover the possible overflow + nvt_assert(pT->HTotal > pT->HVisible); + nvt_assert(pT->VTotal > pT->VVisible); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming3Descriptor(DISPLAYID_TIMING_3_DESCRIPTOR * desc, + NVT_TIMING *pT) +{ + NvU8 formula, aspect; + NvU32 horiz, vert, rr; + NvU32 interlace; + if (desc == NULL || pT == NULL) + return NVT_STATUS_ERR; + + formula = DRF_VAL(T_DISPLAYID, _TIMING_3, _FORMULA, desc->optns); + /* Fields currently not used, uncomment for use + preferred = DRF_VAL(T_DISPLAYID, _TIMING, _PREFERRED, desc->optns); + */ + aspect = DRF_VAL(T_DISPLAYID, _TIMING_3, _ASPECT_RATIO, desc->optns); + interlace = DRF_VAL(T_DISPLAYID, _TIMING_3, _INTERLACE, desc->transfer) ? NVT_INTERLACED : NVT_PROGRESSIVE; + rr = (NvU32)(DRF_VAL(T_DISPLAYID, _TIMING_3, _REFRESH_RATE, desc->transfer) + 1); + + horiz = (NvU32)((desc->horizontal_active_pixels + 1) << 3); + + switch (aspect) + { + case NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_1_1: + vert = horiz; + break; + case NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_5_4: + vert = horiz * 4 / 5; + break; + case NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_4_3: + vert = horiz * 3 / 4; + break; + case NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_15_9: + vert = horiz * 9 / 15; + break; + case NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_16_9: + vert = horiz * 9 / 16; + break; + case NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_16_10: + vert = horiz * 10 / 16; + break; + default: + return NVT_STATUS_ERR; + } + + switch (formula) + { + case NVT_DISPLAYID_TIMING_3_FORMULA_STANDARD: + if (NvTiming_CalcCVT(horiz, vert, rr, interlace, pT) != NVT_STATUS_SUCCESS) + return NVT_STATUS_ERR; + break; + case NVT_DISPLAYID_TIMING_3_FORMULA_REDUCED_BLANKING: + if (NvTiming_CalcCVT_RB(horiz, vert, rr, interlace, pT) != NVT_STATUS_SUCCESS) + return NVT_STATUS_ERR; + break; + default: + return NVT_STATUS_ERR; + } + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming3(NvU8 * block, NVT_EDID_INFO *pEdidInfo) +{ + NvU16 i; + DISPLAYID_TIMING_3_BLOCK * blk = (DISPLAYID_TIMING_3_BLOCK *)block; + NVT_TIMING newTiming; + + if (blk->header.data_bytes % sizeof(DISPLAYID_TIMING_3_DESCRIPTOR) != 0) + { + return NVT_STATUS_ERR; + } + + for (i = 0; i * sizeof(DISPLAYID_TIMING_3_DESCRIPTOR) < blk->header.data_bytes; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseDisplayIdTiming3Descriptor(blk->descriptors + i, + &newTiming) == NVT_STATUS_SUCCESS) + { + if (pEdidInfo == NULL) continue; + + if (!assignNextAvailableTiming(pEdidInfo, &newTiming)) + { + break; + } + } + else + { + if (pEdidInfo == NULL) return NVT_STATUS_ERR; + } + } + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming4(NvU8 * block, NVT_EDID_INFO *pEdidInfo) +{ + NvU16 i; + NVT_TIMING newTiming; + DISPLAYID_TIMING_4_BLOCK * blk = (DISPLAYID_TIMING_4_BLOCK *)block; + if (blk->header.data_bytes < 1 || blk->header.data_bytes > NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN) + { + return NVT_STATUS_ERR; + } + + for (i = 0; i < blk->header.data_bytes; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (NvTiming_EnumDMT((NvU32)(blk->timing_codes[i]), + &newTiming) == NVT_STATUS_SUCCESS) + { + if (pEdidInfo == NULL) continue; + + if (!assignNextAvailableTiming(pEdidInfo, &newTiming)) + { + break; + } + } + else + { + if (pEdidInfo == NULL) return NVT_STATUS_ERR; + } + } + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming5Descriptor(DISPLAYID_TIMING_5_DESCRIPTOR * desc, NVT_TIMING *pT) +{ + NvU32 width, height, rr; + NvBool is1000div1001 = NV_FALSE; + + // we don't handle stereo type nor custom reduced blanking yet + //NvU8 stereoType, formula; + //stereoType = (desc->optns & NVT_DISPLAYID_TIMING_5_STEREO_SUPPORT_MASK); + //formula = desc->optns & NVT_DISPLAYID_TIMING_5_FORMULA_SUPPORT_MASK; + + if (desc->optns & NVT_DISPLAYID_TIMING_5_FRACTIONAL_RR_SUPPORT_MASK) + { + is1000div1001 = NV_TRUE; + } + width = ((desc->horizontal_active_pixels_high << 8) | desc->horizontal_active_pixels_low) + 1; + height = ((desc->vertical_active_pixels_high << 8) | desc->vertical_active_pixels_low) + 1; + rr = desc->refresh_rate + 1; + return NvTiming_CalcCVT_RB2(width, height, rr, is1000div1001, NV_FALSE, pT); +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming5(NvU8 * block, NVT_EDID_INFO *pEdidInfo) +{ + NvU16 i; + NVT_TIMING newTiming; + DISPLAYID_TIMING_5_BLOCK * blk = (DISPLAYID_TIMING_5_BLOCK *)block; + if (blk->header.data_bytes < 1 || blk->header.data_bytes > NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN) + { + return NVT_STATUS_ERR; + } + for (i = 0; i * sizeof(DISPLAYID_TIMING_5_DESCRIPTOR) < blk->header.data_bytes; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseDisplayIdTiming5Descriptor(blk->descriptors + i, &newTiming) == NVT_STATUS_SUCCESS) + { + if (pEdidInfo == NULL) continue; + + if (!assignNextAvailableTiming(pEdidInfo, &newTiming)) + { + break; + } + } + else + { + if (pEdidInfo == NULL) return NVT_STATUS_ERR; + } + } + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTimingVesa(NvU8 * block, NVT_EDID_INFO *pEdidInfo) +{ + NvU8 i, j; + NVT_TIMING newTiming; + DISPLAYID_TIMING_MODE_BLOCK * blk = (DISPLAYID_TIMING_MODE_BLOCK *)block; + if (blk->header.data_bytes != DISPLAYID_TIMING_VESA_BLOCK_SIZE) + { + return NVT_STATUS_ERR; + } + + for (i = 0; i < DISPLAYID_TIMING_VESA_BLOCK_SIZE; i++) + { + for (j = 0; j < 8; j++) + { + if (blk->timing_modes[i] & (1 << j)) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (NvTiming_EnumDMT((NvU32)(i * 8 + j + 1), + &newTiming) == NVT_STATUS_SUCCESS) + { + if (pEdidInfo == NULL) continue; + + if (!assignNextAvailableTiming(pEdidInfo, &newTiming)) + { + break; + } + } + else + { + if (pEdidInfo == NULL) return NVT_STATUS_ERR; + } + } + } + } + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTimingEIA(NvU8 * block, NVT_EDID_INFO *pEdidInfo) +{ + NvU8 i, j; + NVT_TIMING newTiming; + DISPLAYID_TIMING_MODE_BLOCK * blk = (DISPLAYID_TIMING_MODE_BLOCK *)block; + if (blk->header.data_bytes != DISPLAYID_TIMING_CEA_BLOCK_SIZE) + { + return NVT_STATUS_ERR; + } + + for (i = 0; i < DISPLAYID_TIMING_CEA_BLOCK_SIZE; i++) + { + for (j = 0; j < 8; j++) + { + if (blk->timing_modes[i] & (1 << j)) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (NvTiming_EnumCEA861bTiming((NvU32)(i * 8 + j + 1), + &newTiming) == NVT_STATUS_SUCCESS) + { + if (pEdidInfo == NULL) continue; + + if (!assignNextAvailableTiming(pEdidInfo, &newTiming)) + { + break; + } + } + else + { + if (pEdidInfo == NULL) return NVT_STATUS_ERR; + } + } + } + } + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdRangeLimits(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + NVT_DISPLAYID_RANGE_LIMITS * rl; + DISPLAYID_RANGE_LIMITS_BLOCK * blk = (DISPLAYID_RANGE_LIMITS_BLOCK *)block; + NVT_STATUS status = NVT_STATUS_SUCCESS; + NvU32 minPclk = 0; + NvU32 maxPclk = 0; + + if (blk->header.data_bytes != DISPLAYID_RANGE_LIMITS_BLOCK_LEN) + { + return NVT_STATUS_ERR; + } + + minPclk = blk->pixel_clock_min[0] | (blk->pixel_clock_min[1] << 8) | (blk->pixel_clock_min[2] << 16); + maxPclk = blk->pixel_clock_max[0] | (blk->pixel_clock_max[1] << 8) | (blk->pixel_clock_max[2] << 16); + + if (blk->vertical_refresh_rate_min == 0 || blk->vertical_refresh_rate_max == 0 || + blk->vertical_refresh_rate_min > blk->vertical_refresh_rate_max || + minPclk > maxPclk) + { + // wrong range limit + status = NVT_STATUS_ERR; + } + + if (pInfo == NULL) return status; + + if (pInfo->rl_num >= NVT_DISPLAYID_RANGE_LIMITS_MAX_COUNT) + { + return NVT_STATUS_ERR; + } + + rl = pInfo->range_limits + pInfo->rl_num; + (pInfo->rl_num)++; + + rl->pclk_min = minPclk; + rl->pclk_max = maxPclk; + + rl->interlaced = DRF_VAL(T_DISPLAYID, _RANGE_LIMITS, _INTERLACE, blk->optns); + rl->cvt = DRF_VAL(T_DISPLAYID, _RANGE_LIMITS, _CVT_STANDARD, blk->optns); + rl->cvt_reduced = DRF_VAL(T_DISPLAYID, _RANGE_LIMITS, _CVT_REDUCED, blk->optns); + rl->dfd = DRF_VAL(T_DISPLAYID, _RANGE_LIMITS, _DFD, blk->optns); + + rl->hfreq_min = blk->horizontal_frequency_min; + rl->hfreq_max = blk->horizontal_frequency_max; + rl->hblank_min = blk->horizontal_blanking_min; + rl->vfreq_min = blk->vertical_refresh_rate_min; + rl->vfreq_max = blk->vertical_refresh_rate_max; + rl->vblank_min = blk->vertical_blanking_min; + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdSerialNumber(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_ASCII_STRING_BLOCK * blk = (DISPLAYID_ASCII_STRING_BLOCK *)block; + if (blk->header.data_bytes > NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN) + { + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + // Nothing is currently done to store any ASCII Serial Number, if it is + // required. Code here may need to be modified sometime in the future, along + // with NVT_DISPLAYID_INFO struct + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdAsciiString(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_ASCII_STRING_BLOCK * blk = (DISPLAYID_ASCII_STRING_BLOCK *)block; + if (blk->header.data_bytes > NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN) + { + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + // Nothing is currently done to store any ASCII String Data, if it is + // required. Code here may need to be modified sometime in the future, along + // with NVT_DISPLAYID_INFO struct + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdDeviceData(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_DEVICE_DATA_BLOCK * blk = (DISPLAYID_DEVICE_DATA_BLOCK *)block; + if (blk->header.data_bytes != DISPLAYID_DEVICE_DATA_BLOCK_LEN) + { + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + pInfo->tech_type = blk->technology; + + pInfo->device_op_mode = DRF_VAL(T_DISPLAYID, _DEVICE, _OPERATING_MODE, blk->operating_mode); + pInfo->support_backlight = DRF_VAL(T_DISPLAYID, _DEVICE, _BACKLIGHT, blk->operating_mode); + pInfo->support_intensity = DRF_VAL(T_DISPLAYID, _DEVICE, _INTENSITY, blk->operating_mode); + + pInfo->horiz_pixel_count = blk->horizontal_pixel_count; + pInfo->vert_pixel_count = blk->vertical_pixel_count; + + pInfo->orientation = DRF_VAL(T_DISPLAYID, _DEVICE, _ORIENTATION, blk->orientation); + pInfo->rotation = DRF_VAL(T_DISPLAYID, _DEVICE, _ROTATION, blk->orientation); + pInfo->zero_pixel = DRF_VAL(T_DISPLAYID, _DEVICE, _ZERO_PIXEL, blk->orientation); + pInfo->scan_direction = DRF_VAL(T_DISPLAYID, _DEVICE, _SCAN, blk->orientation); + + pInfo->subpixel_info = blk->subpixel_info; + pInfo->horiz_pitch = blk->horizontal_pitch; + pInfo->vert_pitch = blk->vertical_pitch; + + pInfo->color_bit_depth = DRF_VAL(T_DISPLAYID, _DEVICE, _COLOR_DEPTH, blk->color_bit_depth); + pInfo->white_to_black = DRF_VAL(T_DISPLAYID, _DEVICE, _WHITE_BLACK, blk->response_time); + pInfo->response_time = DRF_VAL(T_DISPLAYID, _DEVICE, _RESPONSE_TIME, blk->response_time); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdInterfacePower(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_INTERFACE_POWER_BLOCK * blk = (DISPLAYID_INTERFACE_POWER_BLOCK *)block; + if (blk->header.data_bytes != DISPLAYID_INTERFACE_POWER_BLOCK_LEN) + { + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + // Note specifically that the data inside T1/T2 variables are the exact + // interface power data. the millisecond increments are dependent on the + // DisplayID specification. + pInfo->t1_min = DRF_VAL(T_DISPLAYID, _POWER, _T1_MIN, blk->power_sequence_T1); + pInfo->t1_max = DRF_VAL(T_DISPLAYID, _POWER, _T1_MAX, blk->power_sequence_T1); + pInfo->t2_max = DRF_VAL(T_DISPLAYID, _POWER, _T2, blk->power_sequence_T2); + pInfo->t3_max = DRF_VAL(T_DISPLAYID, _POWER, _T3, blk->power_sequence_T3); + pInfo->t4_min = DRF_VAL(T_DISPLAYID, _POWER, _T4_MIN, blk->power_sequence_T4_min); + pInfo->t5_min = DRF_VAL(T_DISPLAYID, _POWER, _T5_MIN, blk->power_sequence_T5_min); + pInfo->t6_min = DRF_VAL(T_DISPLAYID, _POWER, _T6_MIN, blk->power_sequence_T6_min); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTransferChar(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + // Transfer Characteristics are currently not supported, but parsing of the + // block should be added in the future when more specifications on monitors + // that require this information is located here. + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdDisplayInterface(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_INTERFACE_DATA_BLOCK * blk = (DISPLAYID_INTERFACE_DATA_BLOCK *)block; + if (blk->header.data_bytes != DISPLAYID_INTERFACE_DATA_BLOCK_LEN) + { + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + pInfo->supported_displayId2_0 = 0; + + // Type/Link Info + pInfo->u4.display_interface.interface_type = DRF_VAL(T_DISPLAYID, _INTERFACE, _TYPE, blk->info); + pInfo->u4.display_interface.u1.digital_num_links = DRF_VAL(T_DISPLAYID, _INTERFACE, _NUMLINKS, blk->info); + pInfo->u4.display_interface.interface_version = blk->version; + + // Color Depths + pInfo->u4.display_interface.rgb_depth.support_16b = DRF_VAL(T_DISPLAYID, _INTERFACE, _RGB16, blk->color_depth_rgb); + pInfo->u4.display_interface.rgb_depth.support_14b = DRF_VAL(T_DISPLAYID, _INTERFACE, _RGB14, blk->color_depth_rgb); + pInfo->u4.display_interface.rgb_depth.support_12b = DRF_VAL(T_DISPLAYID, _INTERFACE, _RGB12, blk->color_depth_rgb); + pInfo->u4.display_interface.rgb_depth.support_10b = DRF_VAL(T_DISPLAYID, _INTERFACE, _RGB10, blk->color_depth_rgb); + pInfo->u4.display_interface.rgb_depth.support_8b = DRF_VAL(T_DISPLAYID, _INTERFACE, _RGB8, blk->color_depth_rgb); + pInfo->u4.display_interface.rgb_depth.support_6b = DRF_VAL(T_DISPLAYID, _INTERFACE, _RGB6, blk->color_depth_rgb); + pInfo->u4.display_interface.ycbcr444_depth.support_16b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR444_16, blk->color_depth_ycbcr444); + pInfo->u4.display_interface.ycbcr444_depth.support_14b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR444_14, blk->color_depth_ycbcr444); + pInfo->u4.display_interface.ycbcr444_depth.support_12b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR444_12, blk->color_depth_ycbcr444); + pInfo->u4.display_interface.ycbcr444_depth.support_10b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR444_10, blk->color_depth_ycbcr444); + pInfo->u4.display_interface.ycbcr444_depth.support_8b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR444_8, blk->color_depth_ycbcr444); + pInfo->u4.display_interface.ycbcr444_depth.support_6b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR444_6, blk->color_depth_ycbcr444); + pInfo->u4.display_interface.ycbcr422_depth.support_16b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR422_16, blk->color_depth_ycbcr422); + pInfo->u4.display_interface.ycbcr422_depth.support_14b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR422_14, blk->color_depth_ycbcr422); + pInfo->u4.display_interface.ycbcr422_depth.support_12b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR422_12, blk->color_depth_ycbcr422); + pInfo->u4.display_interface.ycbcr422_depth.support_10b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR422_10, blk->color_depth_ycbcr422); + pInfo->u4.display_interface.ycbcr422_depth.support_8b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR422_8, blk->color_depth_ycbcr422); + + // Content Protection + pInfo->u4.display_interface.content_protection = DRF_VAL(T_DISPLAYID, _INTERFACE, _CONTENT, blk->content_protection); + pInfo->u4.display_interface.content_protection_version = blk->content_protection_version; + + // Spread + pInfo->u4.display_interface.spread_spectrum = DRF_VAL(T_DISPLAYID, _INTERFACE, _SPREAD_TYPE, blk->spread); + pInfo->u4.display_interface.spread_percent = DRF_VAL(T_DISPLAYID, _INTERFACE, _SPREAD_PER, blk->spread); + + // Proprietary Information + switch (pInfo->u4.display_interface.interface_type) + { + case NVT_DISPLAYID_INTERFACE_TYPE_LVDS: + pInfo->u2.lvds.color_map = DRF_VAL(T_DISPLAYID, _LVDS, _COLOR, blk->interface_attribute_1); + pInfo->u2.lvds.support_2_8v = DRF_VAL(T_DISPLAYID, _LVDS, _2_8, blk->interface_attribute_1); + pInfo->u2.lvds.support_12v = DRF_VAL(T_DISPLAYID, _LVDS, _12, blk->interface_attribute_1); + pInfo->u2.lvds.support_5v = DRF_VAL(T_DISPLAYID, _LVDS, _5, blk->interface_attribute_1); + pInfo->u2.lvds.support_3_3v = DRF_VAL(T_DISPLAYID, _LVDS, _3_3, blk->interface_attribute_1); + pInfo->u2.lvds.DE_mode = DRF_VAL(T_DISPLAYID, _INTERFACE, _DE, blk->interface_attribute_2); + pInfo->u2.lvds.polarity = DRF_VAL(T_DISPLAYID, _INTERFACE, _POLARITY, blk->interface_attribute_2); + pInfo->u2.lvds.data_strobe = DRF_VAL(T_DISPLAYID, _INTERFACE, _STROBE, blk->interface_attribute_2); + break; + case NVT_DISPLAYID_INTERFACE_TYPE_PROPRIETARY: + pInfo->u2.proprietary.DE_mode = DRF_VAL(T_DISPLAYID, _INTERFACE, _DE, blk->interface_attribute_1); + pInfo->u2.proprietary.polarity = DRF_VAL(T_DISPLAYID, _INTERFACE, _POLARITY, blk->interface_attribute_1); + pInfo->u2.proprietary.data_strobe = DRF_VAL(T_DISPLAYID, _INTERFACE, _STROBE, blk->interface_attribute_1); + break; + default: + break; + } + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdStereo(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + NvU8 * sub; + + DISPLAYID_STEREO_INTERFACE_METHOD_BLOCK * blk = (DISPLAYID_STEREO_INTERFACE_METHOD_BLOCK *)block; + if (blk->header.data_bytes > NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN) + { + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + sub = blk->timing_sub_block; + + pInfo->stereo_code = blk->stereo_code; + switch (blk->stereo_code) + { + case NVT_DISPLAYID_STEREO_FIELD_SEQUENTIAL: + pInfo->u3.field_sequential.stereo_polarity = sub[0]; + break; + case NVT_DISPLAYID_STEREO_SIDE_BY_SIDE: + pInfo->u3.side_by_side.view_identity = sub[0]; + break; + case NVT_DISPLAYID_STEREO_PIXEL_INTERLEAVED: + NVMISC_MEMCPY(pInfo->u3.pixel_interleaved.interleave_pattern, sub, 8); + break; + case NVT_DISPLAYID_STEREO_DUAL_INTERFACE: + pInfo->u3.left_right_separate.mirroring = DRF_VAL(T_DISPLAYID, _STEREO, _MIRRORING, sub[0]); + pInfo->u3.left_right_separate.polarity = DRF_VAL(T_DISPLAYID, _STEREO, _POLARITY, sub[0]); + break; + case NVT_DISPLAYID_STEREO_MULTIVIEW: + pInfo->u3.multiview.num_views = sub[0]; + pInfo->u3.multiview.code = sub[1]; + break; + case NVT_DISPLAYID_STEREO_PROPRIETARY: + break; + default: + return NVT_STATUS_ERR; + } + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiledDisplay(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_TILED_DISPLAY_BLOCK * blk = (DISPLAYID_TILED_DISPLAY_BLOCK *)block; + if (blk->header.data_bytes > NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN) + { + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + // For revision 0, we only allow one tiled display data block. + if (!blk->header.revision && pInfo->tile_topology_id.vendor_id) + return NVT_STATUS_SUCCESS; + + pInfo->tiled_display_revision = blk->header.revision; + + pInfo->tile_capability.bSingleEnclosure = blk->capability.single_enclosure; + pInfo->tile_capability.bHasBezelInfo = blk->capability.has_bezel_info; + pInfo->tile_capability.multi_tile_behavior = blk->capability.multi_tile_behavior; + pInfo->tile_capability.single_tile_behavior = blk->capability.single_tile_behavior; + + pInfo->tile_topology.row = ((blk->topo_loc_high.row << 5) | blk->topology_low.row) + 1; + pInfo->tile_topology.col = ((blk->topo_loc_high.col << 5) | blk->topology_low.col) + 1; + + pInfo->tile_location.x = (blk->topo_loc_high.x << 5) | blk->location_low.x; + pInfo->tile_location.y = (blk->topo_loc_high.y << 5) | blk->location_low.y; + + pInfo->native_resolution.width = ((blk->native_resolution.width_high<<8)|blk->native_resolution.width_low) + 1; + pInfo->native_resolution.height = ((blk->native_resolution.height_high<<8)|blk->native_resolution.height_low) + 1; + + pInfo->bezel_info.pixel_density = blk->bezel_info.pixel_density; + pInfo->bezel_info.top = (blk->bezel_info.top * blk->bezel_info.pixel_density) / 10; + pInfo->bezel_info.bottom = (blk->bezel_info.bottom * blk->bezel_info.pixel_density) / 10; + pInfo->bezel_info.right = (blk->bezel_info.right * blk->bezel_info.pixel_density) / 10; + pInfo->bezel_info.left = (blk->bezel_info.left * blk->bezel_info.pixel_density) / 10; + + pInfo->tile_topology_id.vendor_id = (blk->topology_id.vendor_id[2] << 16) | + (blk->topology_id.vendor_id[1] << 8 ) | + blk->topology_id.vendor_id[0]; + + pInfo->tile_topology_id.product_id = (blk->topology_id.product_id[1] << 8) | blk->topology_id.product_id[0]; + + pInfo->tile_topology_id.serial_number = (blk->topology_id.serial_number[3] << 24) | + (blk->topology_id.serial_number[2] << 16) | + (blk->topology_id.serial_number[1] << 8 ) | + blk->topology_id.serial_number[0]; + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdCtaData(NvU8 * block, NVT_EDID_INFO *pInfo) +{ + DISPLAYID_DATA_BLOCK_HEADER * blk = (DISPLAYID_DATA_BLOCK_HEADER*)block; + NVT_EDID_CEA861_INFO *p861info; + if (blk->data_bytes > NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN) + { + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + p861info = &pInfo->ext861; + + pInfo->ext_displayid.cea_data_block_present = 1; + p861info->revision = blk->revision; + + //parse CEA tags which starts at 3rd byte from block + parseCta861DataBlockInfo(&block[3], blk->data_bytes, p861info); + + // update pInfo with basic hdmi info + // assumes each edid will only have one such block across multiple cta861 blocks (otherwise may create declaration conflict) + // in case of multiple such blocks, the last one takes precedence + parseCta861VsdbBlocks(p861info, pInfo, FROM_DISPLAYID_13_DATA_BLOCK); + + parseCta861HfScdb(p861info, pInfo, FROM_DISPLAYID_13_DATA_BLOCK); + + //parse HDR related information from the HDR static metadata data block + if (p861info->valid.hdr_static_metadata != 0) + { + parseCta861HdrStaticMetadataDataBlock(p861info, pInfo, FROM_DISPLAYID_13_DATA_BLOCK); + } + + // base video + parse861bShortTiming(p861info, pInfo, FROM_DISPLAYID_13_DATA_BLOCK); + // yuv420-only video + parse861bShortYuv420Timing(p861info, pInfo, FROM_DISPLAYID_13_DATA_BLOCK); + // CEA861-F at 7.5.12 section about VFPDB block. + if (p861info->total_svr != 0) + { + parseCta861NativeOrPreferredTiming(p861info, pInfo, FROM_DISPLAYID_13_DATA_BLOCK); + } + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdDisplayInterfaceFeatures(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + NvU8 i; + DISPLAYID_INTERFACE_FEATURES_DATA_BLOCK * blk = (DISPLAYID_INTERFACE_FEATURES_DATA_BLOCK *)block; + if (blk->header.data_bytes > DISPLAYID_INTERFACE_FEATURES_DATA_BLOCK_MAX_LEN) + { + + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + pInfo->supported_displayId2_0 = 1; + + // Color Depths + pInfo->u4.display_interface_features.rgb_depth.support_16b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _RGB16, blk->supported_color_depth_rgb); + pInfo->u4.display_interface_features.rgb_depth.support_14b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _RGB14, blk->supported_color_depth_rgb); + pInfo->u4.display_interface_features.rgb_depth.support_12b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _RGB12, blk->supported_color_depth_rgb); + pInfo->u4.display_interface_features.rgb_depth.support_10b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _RGB10, blk->supported_color_depth_rgb); + pInfo->u4.display_interface_features.rgb_depth.support_8b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _RGB8, blk->supported_color_depth_rgb); + pInfo->u4.display_interface_features.rgb_depth.support_6b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _RGB6, blk->supported_color_depth_rgb); + pInfo->u4.display_interface_features.ycbcr444_depth.support_16b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR444_16, blk->supported_color_depth_ycbcr444); + pInfo->u4.display_interface_features.ycbcr444_depth.support_14b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR444_14, blk->supported_color_depth_ycbcr444); + pInfo->u4.display_interface_features.ycbcr444_depth.support_12b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR444_12, blk->supported_color_depth_ycbcr444); + pInfo->u4.display_interface_features.ycbcr444_depth.support_10b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR444_10, blk->supported_color_depth_ycbcr444); + pInfo->u4.display_interface_features.ycbcr444_depth.support_8b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR444_8, blk->supported_color_depth_ycbcr444); + pInfo->u4.display_interface_features.ycbcr444_depth.support_6b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR444_6, blk->supported_color_depth_ycbcr444); + pInfo->u4.display_interface_features.ycbcr422_depth.support_16b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR422_16, blk->supported_color_depth_ycbcr422); + pInfo->u4.display_interface_features.ycbcr422_depth.support_14b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR422_14, blk->supported_color_depth_ycbcr422); + pInfo->u4.display_interface_features.ycbcr422_depth.support_12b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR422_12, blk->supported_color_depth_ycbcr422); + pInfo->u4.display_interface_features.ycbcr422_depth.support_10b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR422_10, blk->supported_color_depth_ycbcr422); + pInfo->u4.display_interface_features.ycbcr422_depth.support_8b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR422_8, blk->supported_color_depth_ycbcr422); + pInfo->u4.display_interface_features.ycbcr420_depth.support_16b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR420_16, blk->supported_color_depth_ycbcr420); + pInfo->u4.display_interface_features.ycbcr420_depth.support_14b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR420_14, blk->supported_color_depth_ycbcr420); + pInfo->u4.display_interface_features.ycbcr420_depth.support_12b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR420_12, blk->supported_color_depth_ycbcr420); + pInfo->u4.display_interface_features.ycbcr420_depth.support_10b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR420_10, blk->supported_color_depth_ycbcr420); + pInfo->u4.display_interface_features.ycbcr420_depth.support_8b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR420_8, blk->supported_color_depth_ycbcr420); + + // Minimum Pixel Rate at Which YCbCr 4:2:0 Encoding Is Supported + pInfo->u4.display_interface_features.minimum_pixel_rate_ycbcr420 = blk->minimum_pixel_rate_ycbcr420; + + // Audio capability + pInfo->u4.display_interface_features.audio_capability.support_32khz = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _AUDIO_SUPPORTED_32KHZ, blk->supported_audio_capability); + pInfo->u4.display_interface_features.audio_capability.support_44_1khz = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _AUDIO_SUPPORTED_44_1KHZ, blk->supported_audio_capability); + pInfo->u4.display_interface_features.audio_capability.support_48khz = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _AUDIO_SUPPORTED_48KHZ, blk->supported_audio_capability); + + // Colorspace and EOTF combination + pInfo->u4.display_interface_features.colorspace_eotf_combination_1.support_colorspace_bt2020_eotf_smpte_st2084 = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _COLORSPACE_BT2020_EOTF_SMPTE_ST2084, blk->supported_colorspace_eotf_combination_1); + pInfo->u4.display_interface_features.colorspace_eotf_combination_1.support_colorspace_bt2020_eotf_bt2020 = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _COLORSPACE_BT2020_EOTF_BT2020, blk->supported_colorspace_eotf_combination_1); + pInfo->u4.display_interface_features.colorspace_eotf_combination_1.support_colorspace_dci_p3_eotf_dci_p3 = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _COLORSPACE_DCI_P3_EOTF_DCI_P3, blk->supported_colorspace_eotf_combination_1); + pInfo->u4.display_interface_features.colorspace_eotf_combination_1.support_colorspace_adobe_rgb_eotf_adobe_rgb = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _COLORSPACE_ADOBE_RGB_EOTF_ADOBE_RGB, blk->supported_colorspace_eotf_combination_1); + pInfo->u4.display_interface_features.colorspace_eotf_combination_1.support_colorspace_bt709_eotf_bt1886 = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _COLORSPACE_BT709_EOTF_BT1886, blk->supported_colorspace_eotf_combination_1); + pInfo->u4.display_interface_features.colorspace_eotf_combination_1.support_colorspace_bt601_eotf_bt601 = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _COLORSPACE_BT601_EOTF_BT601, blk->supported_colorspace_eotf_combination_1); + pInfo->u4.display_interface_features.colorspace_eotf_combination_1.support_colorspace_srgb_eotf_srgb = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _COLORSPACE_SRGB_EOTF_SRGB, blk->supported_colorspace_eotf_combination_1); + + // Additional support Colorspace and EOTF + pInfo->u4.display_interface_features.total_additional_colorspace_eotf.total = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _ADDITIONAL_SUPPORTED_COLORSPACE_EOTF_TOTAL, blk->additional_supported_colorspace_eotf_total); + + for (i = 0; i < pInfo->u4.display_interface_features.total_additional_colorspace_eotf.total; i++) + { + pInfo->u4.display_interface_features.additional_colorspace_eotf[i].support_colorspace = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _ADDITIONAL_SUPPORTED_COLORSPACE, blk->additional_supported_colorspace_eotf[i]); + pInfo->u4.display_interface_features.additional_colorspace_eotf[i].support_eotf = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _ADDITIONAL_SUPPORTED_EOTF, blk->additional_supported_colorspace_eotf[i]); + + } + return NVT_STATUS_SUCCESS; +} + +POP_SEGMENTS diff --git a/src/common/modeset/timing/nvt_edidext_displayid20.c b/src/common/modeset/timing/nvt_edidext_displayid20.c new file mode 100644 index 0000000..c6c4104 --- /dev/null +++ b/src/common/modeset/timing/nvt_edidext_displayid20.c @@ -0,0 +1,436 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_edidext_displayid20.c +// +// Purpose: the provide displayID 2.0 related services +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "edid.h" + +PUSH_SEGMENTS + +/** + * + * @brief Parses a displayId20 EDID Extension block, with timings stored in p and + * other info stored in pInfo + * @param p The EDID Extension Block (With a DisplayID in it) + * @param size Size of the displayId Extension Block + * @param pEdidInfo EDID struct containing DisplayID information and + * the timings + */ +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +getDisplayId20EDIDExtInfo( + NvU8 *p, + NvU32 size, + NVT_EDID_INFO *pEdidInfo) +{ + DISPLAYID_2_0_SECTION *extSection = NULL; + + if (p == NULL || + size < sizeof(EDIDV1STRUC) || + size > sizeof(EDIDV1STRUC) || + p[0] != NVT_EDID_EXTENSION_DISPLAYID || + pEdidInfo == NULL) + { + return NVT_STATUS_INVALID_PARAMETER; + } + + // Calculate the All DisplayID20 Extension checksum + // The function name + if (computeDisplayId20SectionCheckSum(p, sizeof(EDIDV1STRUC)) != 0) + { +#ifdef DD_UNITTEST + return NVT_STATUS_ERR; +#endif + } + + extSection = (DISPLAYID_2_0_SECTION *)(p + 1); + + return parseDisplayId20EDIDExtSection(extSection, pEdidInfo); +} + +/* + * @brief DisplayId20 as EDID extension block's "Section" entry point functions + */ +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20EDIDExtSection( + DISPLAYID_2_0_SECTION * extSection, + NVT_EDID_INFO *pEdidInfo) +{ + NvU8 datablock_location = 0; + NvU8 datablock_length; + NvU8 remaining_length; + + if ((extSection == NULL) || + (extSection->header.section_bytes != 121)) + { + return NVT_STATUS_ERR; + } + + // It is based on the DisplayID v2.0 Errata E7 + // First DisplayID2.0 section as EDID extension shall populate "Display Product Primary Use Case" byte with a value from 1h-8h based on the intended primary use case of the sink. + // Any subsequent DisplayID2.0 section EDID extension shall set the "Display Product Primary Use Case" byte to 0h. + pEdidInfo->total_did2_extensions++; + + if (extSection->header.version == DISPLAYID_2_0_VERSION) + { + if (((pEdidInfo->total_did2_extensions == 1) && (extSection->header.product_type == DISPLAYID_2_0_PROD_EXTENSION || + extSection->header.product_type > DISPLAYID_2_0_PROD_HMD_AR || + extSection->header.extension_count != DISPLAYID_2_0_PROD_EXTENSION)) || + (pEdidInfo->total_did2_extensions > 1 && extSection->header.product_type != DISPLAYID_2_0_PROD_EXTENSION)) + { +#ifdef DD_UNITTEST + return NVT_STATUS_ERR; +#endif + } + + pEdidInfo->ext_displayid20.version = extSection->header.version; + pEdidInfo->ext_displayid20.revision = extSection->header.revision; + if (pEdidInfo->total_did2_extensions == 1) + { + pEdidInfo->ext_displayid20.primary_use_case = extSection->header.product_type; + } + pEdidInfo->ext_displayid20.as_edid_extension = NV_TRUE; + } + else + { + return NVT_STATUS_INVALID_PARAMETER; + } + + // validate for section checksum before processing the data block + if (computeDisplayId20SectionCheckSum((const NvU8*)extSection, DISPLAYID_2_0_SECTION_SIZE_TOTAL(extSection->header)) != 0) + { + return NVT_STATUS_ERR; + } + + remaining_length = extSection->header.section_bytes; + + while (datablock_location < extSection->header.section_bytes) + { + DISPLAYID_2_0_DATA_BLOCK_HEADER * dbHeader = (DISPLAYID_2_0_DATA_BLOCK_HEADER *) (extSection->data + datablock_location); + NvU8 is_reserve = remaining_length > 3 && datablock_location == 0 && dbHeader->type == 0 && dbHeader->data_bytes > 0; + NvU8 i; + + // Check the padding. + if (dbHeader->type == 0 && !is_reserve) + { + for (i = 1 ; i < remaining_length; i++) + { + // All remaining bytes must all be 0. + if (extSection->data[datablock_location + i] != 0) + { + return NVT_STATUS_ERR; + } + } + + datablock_length = remaining_length; + } + else + { + if (parseDisplayId20EDIDExtDataBlocks((NvU8 *)(extSection->data + datablock_location), + extSection->header.section_bytes - datablock_location, + &datablock_length, + pEdidInfo) != NVT_STATUS_SUCCESS) + return NVT_STATUS_ERR; + } + + datablock_location += datablock_length; + remaining_length -= datablock_length; + } + + return NVT_STATUS_SUCCESS; +} + +/* + * @brief DisplayId20 as EDID extension block's "Data Block" entry point functions + * For validation, passed the NULL pEdidInfo, and client will check the return value + */ +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20EDIDExtDataBlocks( + NvU8 *pDataBlock, + NvU8 RemainSectionLength, + NvU8 *pCurrentDBLength, + NVT_EDID_INFO *pEdidInfo) +{ + DISPLAYID_2_0_DATA_BLOCK_HEADER * block_header = (DISPLAYID_2_0_DATA_BLOCK_HEADER *) pDataBlock; + NVT_STATUS status = NVT_STATUS_SUCCESS; + NVT_DISPLAYID_2_0_INFO *pDisplayId20Info = NULL; + + NvU8 i; + + // size sanity checking + if ((pDataBlock == NULL || RemainSectionLength <= NVT_DISPLAYID_DATABLOCK_HEADER_LEN) || + (block_header->data_bytes > RemainSectionLength - NVT_DISPLAYID_DATABLOCK_HEADER_LEN)) + return NVT_STATUS_ERR; + + if (block_header->type < DISPLAYID_2_0_BLOCK_TYPE_PRODUCT_IDENTITY) + { + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pEdidInfo != NULL) + { + pDisplayId20Info = &pEdidInfo->ext_displayid20; + } + + *pCurrentDBLength = block_header->data_bytes + NVT_DISPLAYID_DATABLOCK_HEADER_LEN; + + status = parseDisplayId20DataBlock(block_header, pDisplayId20Info); + + if (pDisplayId20Info == NULL) return status; + + // TODO : All the data blocks shall sync the data from the datablock in DisplayID2_0 to pEdidInfo + if (status == NVT_STATUS_SUCCESS && pDisplayId20Info->as_edid_extension == NV_TRUE) + { + switch (block_header->type) + { + case DISPLAYID_2_0_BLOCK_TYPE_PRODUCT_IDENTITY: + pDisplayId20Info->valid_data_blocks.product_id_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_INTERFACE_FEATURES: + pDisplayId20Info->valid_data_blocks.interface_feature_present = NV_TRUE; + + // Supported - Color depth is supported for all supported timings. Supported timing includes all Display-ID exposed timings + // (that is timing exposed using DisplayID timing types and CTA VICs) + if (IS_BPC_SUPPORTED_COLORFORMAT(pDisplayId20Info->interface_features.yuv444.bpcs)) + { + pDisplayId20Info->basic_caps |= NVT_DISPLAY_2_0_CAP_YCbCr_444; + } + + if (IS_BPC_SUPPORTED_COLORFORMAT(pDisplayId20Info->interface_features.yuv422.bpcs)) + { + pDisplayId20Info->basic_caps |= NVT_DISPLAY_2_0_CAP_YCbCr_422; + } + + if (pDisplayId20Info->interface_features.audio_capability.support_48khz || + pDisplayId20Info->interface_features.audio_capability.support_44_1khz || + pDisplayId20Info->interface_features.audio_capability.support_32khz) + { + pDisplayId20Info->basic_caps |= NVT_DISPLAY_2_0_CAP_BASIC_AUDIO; + } + + for (i = 0; i < pDisplayId20Info->interface_features.combination_count; i++) + { + if (pDisplayId20Info->interface_features.colorspace_eotf_combination[i].eotf == INTERFACE_EOTF_SMPTE_ST2084 && + pDisplayId20Info->interface_features.colorspace_eotf_combination[i].color_space == INTERFACE_COLOR_SPACE_BT2020) + { + pEdidInfo->hdr_static_metadata_info.static_metadata_type = 1; + pEdidInfo->hdr_static_metadata_info.supported_eotf.smpte_st_2084_eotf = 1; + + pEdidInfo->ext861.hdr_static_metadata.byte1 |= NVT_CEA861_EOTF_SMPTE_ST2084; + pEdidInfo->ext861.colorimetry.byte1 |= NVT_CEA861_COLORIMETRY_BT2020RGB; + + if (IS_BPC_SUPPORTED_COLORFORMAT(pDisplayId20Info->interface_features.yuv444.bpcs) || + IS_BPC_SUPPORTED_COLORFORMAT(pDisplayId20Info->interface_features.yuv422.bpcs)) + { + pEdidInfo->ext861.colorimetry.byte1 |= NVT_CEA861_COLORIMETRY_BT2020YCC; + } + } + } + break; + + // DisplayID_v2.0 E5 defined + // if inside CTA embedded block existed 420 VDB/CMDB, then we follow these two blocks only. + // * support for 420 pixel encoding is limited to the timings exposed in the restricted set exposed in the CTA data block. + // * field of "Mini Pixel Rate at YCbCr420" shall be set 00h + case DISPLAYID_2_0_BLOCK_TYPE_CTA_DATA: + pDisplayId20Info->valid_data_blocks.cta_data_present = NV_TRUE; + + // copy all the vendor specific data block from DisplayId20 to pEdidInfo + // NOTE: mixed CTA extension block and DID2.0 extension block are not handled + if (pEdidInfo->ext861.valid.H14B_VSDB == 0 && pEdidInfo->ext861_2.valid.H14B_VSDB == 0 && pDisplayId20Info->cta.cta861_info.valid.H14B_VSDB) + NVMISC_MEMCPY(&pEdidInfo->hdmiLlcInfo, &pDisplayId20Info->vendor_specific.hdmiLlc, sizeof(NVT_HDMI_LLC_INFO)); + if (pEdidInfo->ext861.valid.H20_HF_VSDB == 0 && pEdidInfo->ext861_2.valid.H20_HF_VSDB == 0 && pDisplayId20Info->cta.cta861_info.valid.H20_HF_VSDB) + NVMISC_MEMCPY(&pEdidInfo->hdmiForumInfo, &pDisplayId20Info->vendor_specific.hfvs, sizeof(NVT_HDMI_FORUM_INFO)); + if (pEdidInfo->ext861.valid.nvda_vsdb == 0 && pEdidInfo->ext861_2.valid.nvda_vsdb == 0 && pDisplayId20Info->cta.cta861_info.valid.nvda_vsdb) + NVMISC_MEMCPY(&pEdidInfo->nvdaVsdbInfo, &pDisplayId20Info->vendor_specific.nvVsdb, sizeof(NVDA_VSDB_PARSED_INFO)); + if (pEdidInfo->ext861.valid.msft_vsdb == 0 && pEdidInfo->ext861_2.valid.msft_vsdb == 0 && pDisplayId20Info->cta.cta861_info.valid.msft_vsdb) + NVMISC_MEMCPY(&pEdidInfo->msftVsdbInfo, &pDisplayId20Info->vendor_specific.msftVsdb, sizeof(MSFT_VSDB_PARSED_INFO)); + if (pEdidInfo->ext861.valid.hdr_static_metadata == 0 && pEdidInfo->ext861_2.valid.hdr_static_metadata == 0 && pDisplayId20Info->cta.cta861_info.valid.hdr_static_metadata) + NVMISC_MEMCPY(&pEdidInfo->hdr_static_metadata_info, &pDisplayId20Info->cta.hdrInfo, sizeof(NVT_HDR_STATIC_METADATA)); + if (pEdidInfo->ext861.valid.dv_static_metadata == 0 && pEdidInfo->ext861_2.valid.dv_static_metadata == 0 && pDisplayId20Info->cta.cta861_info.valid.dv_static_metadata) + NVMISC_MEMCPY(&pEdidInfo->dv_static_metadata_info, &pDisplayId20Info->cta.dvInfo, sizeof(NVT_DV_STATIC_METADATA)); + if (pEdidInfo->ext861.valid.hdr10Plus == 0 && pEdidInfo->ext861_2.valid.hdr10Plus == 0 && pDisplayId20Info->cta.cta861_info.valid.hdr10Plus) + NVMISC_MEMCPY(&pEdidInfo->hdr10PlusInfo, &pDisplayId20Info->cta.hdr10PlusInfo, sizeof(NVT_HDR10PLUS_INFO)); + + // If the CTA861 extension existed already, we need to synced the revision/basic_caps to CTA which is embedded in DID20 + if (pEdidInfo->ext861.revision >= NVT_CEA861_REV_B) + { + pDisplayId20Info->cta.cta861_info.revision = pEdidInfo->ext861.revision; + pDisplayId20Info->cta.cta861_info.basic_caps = pEdidInfo->ext861.basic_caps; + pDisplayId20Info->basic_caps = pEdidInfo->ext861.basic_caps; + } + + // this is the DisplayID20 Extension, just copy needed data block value here: + if (pEdidInfo->ext861.revision == 0) + { + if (pDisplayId20Info->cta.cta861_info.valid.colorimetry) + { + pEdidInfo->ext861.colorimetry.byte1 = pDisplayId20Info->cta.cta861_info.colorimetry.byte1; + pEdidInfo->ext861.colorimetry.byte2 = pDisplayId20Info->cta.cta861_info.colorimetry.byte2; + } + } + else if (pEdidInfo->ext861_2.revision == 0) + { + if (pDisplayId20Info->cta.cta861_info.valid.colorimetry) + { + pEdidInfo->ext861_2.colorimetry.byte1 = pDisplayId20Info->cta.cta861_info.colorimetry.byte1; + pEdidInfo->ext861_2.colorimetry.byte2 = pDisplayId20Info->cta.cta861_info.colorimetry.byte2; + } + } + break; + + case DISPLAYID_2_0_BLOCK_TYPE_DISPLAY_PARAM: + pDisplayId20Info->valid_data_blocks.parameters_present = NV_TRUE; + + // EDID only supported 10bits chromaticity to match the OS D3DKMDT_2DOFFSET 10bits, so we don't need to transfer it here. + + pEdidInfo->input.u.digital.bpc = NVT_COLORDEPTH_HIGHEST_BPC(pDisplayId20Info->display_param.native_color_depth); + pEdidInfo->gamma = pDisplayId20Info->display_param.gamma_x100; + + if (pDisplayId20Info->display_param.audio_speakers_integrated == AUDIO_SPEAKER_INTEGRATED_SUPPORTED) + { + pDisplayId20Info->basic_caps |= NVT_DISPLAY_2_0_CAP_BASIC_AUDIO; + } + + if (pDisplayId20Info->display_param.gamma_x100 != 0) + { + pEdidInfo->hdr_static_metadata_info.supported_eotf.trad_gamma_sdr_eotf = 1; + } + break; + case DISPLAYID_2_0_BLOCK_TYPE_STEREO: + pDisplayId20Info->valid_data_blocks.stereo_interface_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_TILED_DISPLAY: + pDisplayId20Info->valid_data_blocks.tiled_display_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_CONTAINER_ID: + pDisplayId20Info->valid_data_blocks.container_id_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_7: + pDisplayId20Info->valid_data_blocks.type7Timing_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_8: + pDisplayId20Info->valid_data_blocks.type8Timing_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_9: + pDisplayId20Info->valid_data_blocks.type9Timing_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_10: + pDisplayId20Info->valid_data_blocks.type10Timing_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_RANGE_LIMITS: + pDisplayId20Info->valid_data_blocks.dynamic_range_limit_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_ADAPTIVE_SYNC: + pDisplayId20Info->valid_data_blocks.adaptive_sync_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_BRIGHTNESS_LUMINANCE_RANGE: + pDisplayId20Info->valid_data_blocks.brightness_luminance_range_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_VENDOR_SPEC: + pDisplayId20Info->valid_data_blocks.vendor_specific_present = NV_TRUE; + break; + default: + break; + } + } + + return status; +} + +/* @brief Update the correct color format / attribute of timings from interface feature data block + */ +CODE_SEGMENT(PAGE_DD_CODE) +void +updateColorFormatForDisplayId20ExtnTimings( + NVT_EDID_INFO *pInfo, + NvU32 timingIdx) +{ + // pDisplayId20Info parsed displayID20 info + NVT_DISPLAYID_2_0_INFO *pDisplayId20Info = &pInfo->ext_displayid20; + NVT_TIMING *pT= &pInfo->timing[timingIdx]; + + nvt_assert(timingIdx <= COUNT(pInfo->timing)); + + if (pDisplayId20Info->as_edid_extension) + { + if ((pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_A_SUPPORTED || + pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_B_SUPPORTED || + pInfo->ext861.valid.H14B_VSDB || pInfo->ext861.valid.H20_HF_VSDB) && pInfo->ext861.revision >= NVT_CEA861_REV_A) + { + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.rgb444, 0, + 1, + pDisplayId20Info->interface_features.rgb444.bpc.bpc10, + pDisplayId20Info->interface_features.rgb444.bpc.bpc12, + pDisplayId20Info->interface_features.rgb444.bpc.bpc14, + pDisplayId20Info->interface_features.rgb444.bpc.bpc16); + } + else + { + // rgb444 (always support 6bpc and 8bpc as per DP spec 5.1.1.1.1 RGB Colorimetry) + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.rgb444, 1, + 1, + pDisplayId20Info->interface_features.rgb444.bpc.bpc10, + pDisplayId20Info->interface_features.rgb444.bpc.bpc12, + pDisplayId20Info->interface_features.rgb444.bpc.bpc14, + pDisplayId20Info->interface_features.rgb444.bpc.bpc16); + } + + // yuv444 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv444, 0, /* yuv444 does not support 6bpc */ + pDisplayId20Info->interface_features.yuv444.bpc.bpc8, + pDisplayId20Info->interface_features.yuv444.bpc.bpc10, + pDisplayId20Info->interface_features.yuv444.bpc.bpc12, + pDisplayId20Info->interface_features.yuv444.bpc.bpc14, + pDisplayId20Info->interface_features.yuv444.bpc.bpc16); + // yuv422 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv422, 0, /* yuv422 does not support 6bpc */ + pDisplayId20Info->interface_features.yuv422.bpc.bpc8, + pDisplayId20Info->interface_features.yuv422.bpc.bpc10, + pDisplayId20Info->interface_features.yuv422.bpc.bpc12, + pDisplayId20Info->interface_features.yuv422.bpc.bpc14, + pDisplayId20Info->interface_features.yuv422.bpc.bpc16); + + if (!NVT_DID20_TIMING_IS_CTA861(pInfo->timing[timingIdx].etc.flag, pInfo->timing[timingIdx].etc.status)) + { + // yuv420 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv420, 0, /* yuv420 does not support 6bpc */ + pDisplayId20Info->interface_features.yuv420.bpc.bpc8, + pDisplayId20Info->interface_features.yuv420.bpc.bpc10, + pDisplayId20Info->interface_features.yuv420.bpc.bpc12, + pDisplayId20Info->interface_features.yuv420.bpc.bpc14, + pDisplayId20Info->interface_features.yuv420.bpc.bpc16); + } + } +} + +POP_SEGMENTS diff --git a/src/common/modeset/timing/nvt_gtf.c b/src/common/modeset/timing/nvt_gtf.c new file mode 100644 index 0000000..405a16c --- /dev/null +++ b/src/common/modeset/timing/nvt_gtf.c @@ -0,0 +1,138 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_gtf.c +// +// Purpose: calculate gtf timing +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "nvtiming_pvt.h" + +// calculate GTF timing + +PUSH_SEGMENTS + +CONS_SEGMENT(PAGE_CONS) + +const NvU32 NVT_GTF_CELL_GRAN=8; +const NvU32 NVT_GTF_MIN_VSYNCBP=11; // in 550us (!!) [1000000:550 = 20000:11] +const NvU32 NVT_GTF_MIN_VPORCH=1; + +const NvU32 NVT_GTF_C_PRIME=30; // (gtf_C-gtf_J)*gtf_K/256+gtf_J; +const NvU32 NVT_GTF_M_PRIME=300; // NVT_GTFK/256*gtf_M; +const NvU32 NVT_GTF_VSYNC_RQD=3; +const NvU32 NVT_GTF_HSYNC_PERCENTAGE=8; // 8% HSync for GTF + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcGTF(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT) +{ + NvU32 dwXCells, dwVSyncBP, dwVTotal, dwIdN, dwIdD, dwHBlank, dwHTCells, dwHSync, dwHFrontPorch, dwRefreshRate; + + // parameter check + if (pT == NULL) + return NVT_STATUS_ERR; + + if (width == 0 || height == 0 || rr == 0 ) + return NVT_STATUS_ERR; + + dwRefreshRate = rr; + dwXCells = a_div_b(width, NVT_GTF_CELL_GRAN); + + if(dwRefreshRate * NVT_GTF_MIN_VSYNCBP >= 20000) + return NVT_STATUS_ERR;//NVT_STATUS_ERR_OUTOFRANGE; // H period estimate less than 0 + + dwVSyncBP = a_div_b((height + NVT_GTF_MIN_VPORCH) * NVT_GTF_MIN_VSYNCBP * dwRefreshRate, + (20000 - NVT_GTF_MIN_VSYNCBP * dwRefreshRate)); + dwVTotal = dwVSyncBP + height + NVT_GTF_MIN_VPORCH; + + // Calculate the numerator and denominator of Ideal Duty Cycle + // NOTE: here dwIdN/dwIdN = IdealDutyCycle/GTF_C_Prime + dwIdD = dwVTotal * dwRefreshRate; + + if(dwIdD <= NVT_GTF_M_PRIME * 1000 / NVT_GTF_C_PRIME) + return NVT_STATUS_ERR;//NVT_STATUS_ERR_OUTOFRANGE; // Ideal duty cycle less than 0 + + dwIdN = dwIdD - NVT_GTF_M_PRIME * 1000 / NVT_GTF_C_PRIME; + + // A proper way to calculate dwXCells*dwIdN/(100*dwIdD/GTF_C_PRIME-dwIdN) + dwHBlank = axb_div_c(dwIdN*3, dwXCells, 2*(300*dwIdD/NVT_GTF_C_PRIME - dwIdN*3)); + dwHBlank = ( dwHBlank ) * 2 * NVT_GTF_CELL_GRAN; + dwHTCells = dwXCells + dwHBlank / NVT_GTF_CELL_GRAN; + dwHSync = a_div_b(dwHTCells * NVT_GTF_HSYNC_PERCENTAGE, 100) * NVT_GTF_CELL_GRAN; + if((dwHSync == 0) || (dwHSync*2 > dwHBlank)) + return NVT_STATUS_ERR;//NVT_STATUS_ERR_OUTOFRANGE; // HSync too small or too big. + + dwHFrontPorch = dwHBlank/2-dwHSync; + + NVMISC_MEMSET(pT, 0, sizeof(NVT_TIMING)); + + pT->HVisible = (NvU16)(dwXCells*NVT_GTF_CELL_GRAN); + pT->VVisible = (NvU16)height; + + pT->HTotal = (NvU16)(dwHTCells*NVT_GTF_CELL_GRAN); + pT->HFrontPorch = (NvU16)dwHFrontPorch; + pT->HSyncWidth = (NvU16)dwHSync; + + pT->VTotal = (NvU16)dwVTotal; + pT->VFrontPorch = (NvU16)NVT_GTF_MIN_VPORCH; + pT->VSyncWidth = (NvU16)NVT_GTF_VSYNC_RQD; + + // A proper way to calculate fixed HTotal*VTotal*Rr/10000 + pT->pclk = axb_div_c(dwHTCells*dwVTotal, dwRefreshRate, 10000/NVT_GTF_CELL_GRAN); + + pT->HSyncPol = NVT_H_SYNC_NEGATIVE; + pT->VSyncPol = NVT_V_SYNC_POSITIVE; + pT->interlaced = 0; + + // fill in the extra timing info + pT->etc.flag = 0; + pT->etc.rr = (NvU16)rr; + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk, (NvU32)10000*(NvU32)1000, (NvU32)pT->HTotal*(NvU32)pT->VTotal); + pT->etc.aspect = 0; + pT->etc.rep = 0x1; + pT->etc.status = NVT_STATUS_GTF; + NVT_SNPRINTF((char *)pT->etc.name, 40, "GTF:%dx%dx%dHz",width, height, rr); + pT->etc.name[39] = '\0'; + pT->etc.rgb444.bpc.bpc8 = 1; + + // interlaced adjustment + if ((flag & NVT_PVT_INTERLACED_MASK) != 0) + { + if ((pT->VTotal & 0x1) != 0) + pT->interlaced = NVT_INTERLACED_EXTRA_VBLANK_ON_FIELD2; + else + pT->interlaced = NVT_INTERLACED_NO_EXTRA_VBLANK_ON_FIELD2; + + pT->pclk >>= 1; + pT->VTotal >>= 1; + pT->VVisible = (pT->VVisible + 1) / 2; + } + + return NVT_STATUS_SUCCESS; +} + +POP_SEGMENTS diff --git a/src/common/modeset/timing/nvt_ovt.c b/src/common/modeset/timing/nvt_ovt.c new file mode 100644 index 0000000..d6485a1 --- /dev/null +++ b/src/common/modeset/timing/nvt_ovt.c @@ -0,0 +1,295 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_ovt.c +// +// Purpose: calculate Optimized Video Timing (OVT) timing +// +//***************************************************************************** + + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "nvtiming_pvt.h" + +PUSH_SEGMENTS + +CONS_SEGMENT(PAGE_CONS) + +const NvU32 NVT_OVT_PIXEL_CLOCK_GRANULARITY = 1000; // Resulting Pixel Clock will be a multiple of this +const NvU32 NVT_OVT_MIN_H_TOTAL_GRANULARITY = 8; // Resulting Htotal value will be a multiple of this +const NvU32 NVT_OVT_MIN_V_BLANK_MICROSEC = 460; // Minimum duration of Vblank (us) +const NvU32 NVT_OVT_MIN_V_SYNC_LEADING_EDGE = 400; // Minimum duration of Vsync + Vback (us) +const NvU32 NVT_OVT_MIN_CLOCK_RATE_420 = 590000000; // interface-specific minimum pixel rate for transport of 4:2:0 sample +const NvU32 NVT_OVT_PIXEL_FACTOR_420 = 2; // Worst case of two pixels per link character for pixel rates of MinClockRate420 or more +const NvU32 NVT_OVT_MIN_H_BLANK_444 = 80; // Minimum Hblank width for pixel rates below MinClockRate420 +const NvU32 NVT_OVT_MIN_H_BLANK_420 = 128; // Minimum Hblank width for pixel rates of MinClockRate420 or more +const NvU32 NVT_OVT_MAX_CHUNK_RATE = 650000000; // Maximum rate of chunks of pixels with a power-of-two size +const NvU32 NVT_OVT_AUDIO_PACKET_RATE = 195000; // 192k sample packets + 3k auxiliary data packets +const NvU32 NVT_OVT_AUDIO_PACKET_SIZE = 32; // each packet carries 8 audio sample +const NvU32 NVT_OVT_LINE_OVERHEAD = 32; // interface-specific overhead: 32 pixels/line + +const NvU32 NVT_OVT_H_SYNC_PIXELS = 32; +const NvU32 NVT_OVT_H_BACK_WIDTH = 32; +const NvU32 NVT_OVT_V_SYNC_WIDTH = 8; + +CODE_SEGMENT(PAGE_DD_CODE) +static NvU32 nvFloorPow2_U32(NvU32 x) +{ + return x & ~(x - 1); +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NvU32 computeGCD(NvU32 a, NvU32 b) +{ + NvU32 temp; + while (b != 0) + { + temp = a % b; + if (temp == 0) return b; + a = b; + b = temp; + } + return a; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NvU32 calculate_aspect_ratio(NVT_TIMING *pT) +{ + NvU32 gcd = computeGCD(pT->HVisible, pT->VVisible); + + if (gcd == 0) + { + pT->etc.aspect = (NvU32)0; + return 0; + } + + return (pT->HVisible / gcd) << 16 | (pT->VVisible / gcd); +} + +/** + * OVT Algorithm Calculations Formula + * + * @brief Sinks can indicate supported video formats with VFD in a VFDB that are not represented by a CTA VIC. + * The timing parameters of those Video Formats are determined by the Optimized Video Timing(OVT) algorithm + * + * @param width : resolution width from RID + * @param height : resolution height from RID + * @param refreshRate : refresh rate x fraction rate + * @param pT : output all the parameters in NVT_TIMING + * + * @return NVT_STATUS_SUCCESS + * + */ +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcOVT(NvU32 width, NvU32 height, NvU32 refreshRate, NVT_TIMING *pT) +{ + NvU32 hTotal = 0; + NvU32 vTotal = 0; + NvU32 maxVRate = refreshRate; + NvU32 vTotalGranularity = 1; + NvU32 resolutionGranularity = 0; + NvU32 minVBlank, minVTotal, minLineRate, minHBlank, minHTotal, vBlank, vSyncPosition; + NvU32 hTotalGranularityChunk, hTotalGranularity, maxAudioPacketsPerLine; + + NvU64 minPixelClockRate = 0LL; + NvU64 pixelClockRate = 0LL; + NvU64 maxActiveTime = 0LL; + NvU64 minLineTime = 0LL; + NvU64 minResolution = 0LL; + NvU32 V = 0; + NvU32 H = 0; + NvU64 R = 0; + + // parameter sanity check + if (width % 8 != 0) + return NVT_STATUS_ERR; + + // ** Preparation ** + // 1. Determine maximum Vrate of frame rate group (see Table 13) and V-Total granularity: + // Currently client doesn't have customize refresh rate value as it only from VFDB + switch (refreshRate) + { + case 24: case 25: case 30: + maxVRate = 30; + vTotalGranularity = 20; + break; + case 48: case 50: case 60: + maxVRate = 60; + vTotalGranularity = 20; + break; + case 100: case 120: + maxVRate = 120; + vTotalGranularity = 5; + break; + case 144: + maxVRate = 144; + vTotalGranularity = 1; + break; + case 200: case 240: + maxVRate = 240; + vTotalGranularity = 5; + break; + case 300: case 360: + maxVRate = 360; + vTotalGranularity = 5; + break; + case 400: case 480: + maxVRate = 480; + vTotalGranularity = 5; + break; + default: + vTotalGranularity = 1; + maxVRate = refreshRate; + break; + } + + // 2. Minimum Vtotal is found from highest frame rate of Vrate group, Vactive and the minimum Vblank time of 460 μSec: + // 2.1 the minimum number of determine the maximum active time. For the sake of precision, it is multiplied by 1,000,000. + maxActiveTime = ((NvU64)1000000000000 / (NvU64)maxVRate) - (NvU64)NVT_OVT_MIN_V_BLANK_MICROSEC * 1000000; + // 2.2 get the minimum line time + minLineTime = maxActiveTime / (NvU64)height; + // 2.3 get the minimum number of VBlank lines. The multiplicand 1000000 is for accuracy, because we multiply it at 2.1 + minVBlank = (NvU32)(NV_UNSIGNED_DIV_CEIL((NvU64)NVT_OVT_MIN_V_BLANK_MICROSEC * (NvU64)1000000, (NvU64)minLineTime)); + // 2.4 get the minimum total number of lines + minVTotal = height + minVBlank; + if (minVTotal % vTotalGranularity !=0) + minVTotal += (vTotalGranularity - (minVTotal % vTotalGranularity)); + + // 3. Find the audio packet rate and use it to determine the required audio packets per line: + // 3.1 determine a minimum line rate + minLineRate = maxVRate * minVTotal; // Hz + // 3.2 The maximum number of audio packets + maxAudioPacketsPerLine = NV_UNSIGNED_DIV_CEIL(NVT_OVT_AUDIO_PACKET_RATE, minLineRate); + + // 4. Find initial minimum horizontal total size, based on audio requirements (1 pixel = 1 character): + minHBlank = NVT_OVT_LINE_OVERHEAD + NVT_OVT_AUDIO_PACKET_SIZE * maxAudioPacketsPerLine; + // 4.1 determine a minimum Horizontal Total pixel (MinHtotal) + minHTotal = width + NV_MAX(NVT_OVT_MIN_H_BLANK_444, minHBlank); + + // 5. Find hTotal and vTotal so that the pixelClockRate is divisible by the pixelClockGranularity, and + // hTotal is divisible by an appropriate processing chunk size: + minPixelClockRate = (NvU64)maxVRate * (NvU64)minHTotal * (NvU64)minVTotal; // Hz + // 5.1 determinate new granularity and minHtotal based on the new granularity + hTotalGranularityChunk = nvNextPow2_U32((NvU32)NV_UNSIGNED_DIV_CEIL(minPixelClockRate, (NvU64)NVT_OVT_MAX_CHUNK_RATE)); + // 5.2 If this value is greater than the 8, it becomes the new horizontal granularity + hTotalGranularity = NV_MAX((NvU64)NVT_OVT_MIN_H_TOTAL_GRANULARITY, hTotalGranularityChunk); + if (minHTotal % hTotalGranularity != 0) + { + minHTotal += (hTotalGranularity - (minHTotal % hTotalGranularity)); + } + // 5.3 optimized by iterating on resolution totals without multiplying by the max refresh rate. + resolutionGranularity = NVT_OVT_PIXEL_CLOCK_GRANULARITY / computeGCD(NVT_OVT_PIXEL_CLOCK_GRANULARITY, maxVRate); + + // ** OVT Timing Search ** + // 5.4 it will repeat until the found pixel clock is greater than the divisible pixel clock of the search at hte previous vTotal value, + // the hTotal and vTotal values of that preceding search are chosen for the video timing + for(;;) + { + minResolution = 0; + V = minVTotal; + + for(;;) + { + H = minHTotal; + R = (NvU64)H * (NvU64)V; + + if (minResolution && R > minResolution) + break; + + while (R % resolutionGranularity || maxVRate * R / nvFloorPow2_U32(H) > NVT_OVT_MAX_CHUNK_RATE) + { + H += hTotalGranularity; + R = (NvU64)H * (NvU64)V; + } + + if (minResolution == 0 || R < minResolution) + { + hTotal = H; + vTotal = V; + minResolution = R; + } + V += vTotalGranularity; + } + + pixelClockRate = maxVRate * minResolution; + + // 6. Check if timing requires adjustments for 4:2:0: + // 6.a Re-calculate minHTotal, in pixels, adjusted for 4:2:0 requirements. (2 pixels = 1 character): + minHTotal = width + NV_MAX(NVT_OVT_MIN_H_BLANK_420, NVT_OVT_PIXEL_FACTOR_420 * minHBlank); + // 6.b If the resulting PixelClockRate allows for 4:2:0, assure that the new Hblank requirement is met, or repeat calculation with new MinHtotal: + if (pixelClockRate >= NVT_OVT_MIN_CLOCK_RATE_420 && hTotal < minHTotal) + { + continue; + } + break; + } + + // ** post-processing ** + // 7. Adjust Vtotal, in lines, to achieve (integer) target Vrate: + vTotal = vTotal * maxVRate / refreshRate; + + // 8. Find Vsync leading edge: + vBlank = vTotal - height; + vSyncPosition = (NvU32)NV_UNSIGNED_DIV_CEIL(((NvU64)NVT_OVT_MIN_V_SYNC_LEADING_EDGE * (NvU64)pixelClockRate), ((NvU64)1000000 * (NvU64)hTotal)); + + // 10. fill in the essential timing info for output + pT->HVisible = (NvU16)width; + pT->HTotal = (NvU16)hTotal; + pT->HFrontPorch = (NvU16)(hTotal - width - NVT_OVT_H_SYNC_PIXELS - NVT_OVT_H_BACK_WIDTH); + pT->HSyncWidth = (NvU16)NVT_OVT_H_SYNC_PIXELS; + pT->VVisible = (NvU16)height; + pT->VTotal = (NvU16)vTotal; + pT->VSyncWidth = (NvU16)NVT_OVT_V_SYNC_WIDTH; + pT->VFrontPorch = (NvU16)(vBlank - vSyncPosition); + pT->pclk = (NvU32)(pixelClockRate /*Hz*/ / 1000 + 5) / 10; //convert to 10Khz + pT->pclk1khz = (NvU32)(pixelClockRate /*Hz*/ / 1000); //convert to 1Khz + pT->HSyncPol = NVT_H_SYNC_POSITIVE; + pT->VSyncPol = NVT_V_SYNC_POSITIVE; + pT->HBorder = pT->VBorder = 0; // not supported + pT->interlaced = 0; // not supported + // fill in the extra timing info + pT->etc.flag = 0; + pT->etc.rr = (NvU16)refreshRate; + pT->etc.rrx1k = (NvU32)axb_div_c_64((NvU64)pT->pclk, (NvU64)10000 * (NvU64)1000, (NvU64)pT->HTotal*(NvU64)pT->VTotal); + pT->etc.aspect = calculate_aspect_ratio(pT); + pT->etc.rep = 0x1; + NVT_SNPRINTF((char *)pT->etc.name, 40, "CTA861-OVT:%dx%dx%dHz", width, height, refreshRate); + pT->etc.name[39] = '\0'; + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvBool NvTiming_IsTimingOVT(const NVT_TIMING *pTiming) +{ + // Check from the Timing Type + if (pTiming->etc.flag & NVT_FLAG_CTA_OVT_TIMING) + { + return NV_TRUE; + } + return NV_FALSE; +} + +POP_SEGMENTS diff --git a/src/common/modeset/timing/nvt_tv.c b/src/common/modeset/timing/nvt_tv.c new file mode 100644 index 0000000..32b9bf4 --- /dev/null +++ b/src/common/modeset/timing/nvt_tv.c @@ -0,0 +1,192 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_tv.c +// +// Purpose: calculate tv based timing timing +// +//***************************************************************************** + +#include "nvBinSegment.h" + +#include "nvtiming_pvt.h" + +PUSH_SEGMENTS + +CONS_SEGMENT(PAGE_CONS) + +static const NVT_TIMING TV_TIMING[] = +{ + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,240, 0,10, 6,262, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1407 , 14070, {0,60,59940,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_SDTV_NTSC_M, "SDTV:NTSC_M"}}, + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,240, 0,10, 6,262, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1407 , 14070, {0,60,59940,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_SDTV_NTSC_J, "SDTV:NTSC_J"}}, + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,288, 0,10, 8,312, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1397 , 13970, {0,50,50000,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_SDTV_PAL_M, "SDTV:PAL_M"}}, + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,288, 0,10, 8,312, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1397 , 13970, {0,50,50000,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_SDTV_PAL_A, "SDTV:PAL_A"}}, + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,288, 0,10, 8,312, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1397 , 13970, {0,50,50000,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_SDTV_PAL_N, "SDTV:PAL_N"}}, + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,288, 0,10, 8,312, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1397 , 13970, {0,50,50000,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_SDTV_PAL_NC, "SDTV:PAL_NC"}}, + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,240, 0,10, 6,262, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1407 , 14070, {0,60,59940,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_480I, "HDTV(analog):480i"}}, + {720, 0,15,8, 858, NVT_H_SYNC_NEGATIVE,480, 0,10, 4,525, NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE, 2700 , 27000, {0,60,59940,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_480P, "HDTV(analog):480p"}}, + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,288, 0,10, 8,312, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1397 , 13970, {0,50,50000,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_576I, "HDTV(analog):576i"}}, + {720, 0,10,8, 864, NVT_H_SYNC_NEGATIVE,576, 0, 5, 4,625, NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE, 2700 , 27000, {0,50,50000,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_576P, "HDTV(analog):576p"}}, + {1280,0,70,80, 1650,NVT_H_SYNC_NEGATIVE,720, 0, 5, 5,750, NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE, 7418 , 74180, {0,60,59940,0x1009,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_720P, "HDTV(analog):720p"}}, + {1920,0,44,88,2200, NVT_H_SYNC_NEGATIVE,540, 0, 2, 5,562, NVT_V_SYNC_NEGATIVE,NVT_INTERLACED, 7418 , 74180, {0,60,59940,0x1009,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_1080I, "HDTV(analog):1080i"}}, + {1920,0,44,88,2200, NVT_H_SYNC_NEGATIVE,1080,0, 4, 5,1125,NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE, 14835,148350, {0,60,59940,0x1009,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_1080P, "HDTV(analog):1080p"}}, + {1280,0,400,80,1980,NVT_H_SYNC_NEGATIVE,720, 0, 5, 5,750, NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE, 7425 , 74250, {0,50,50000,0x1009,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_720P50, "HDTV(analog):720p50"}}, + {1920,0,594,88,2750,NVT_H_SYNC_NEGATIVE,1080,0, 4, 5,1125,NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE, 7425 , 74250, {0,24,24000,0x1009,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_1080P24,"HDTV(analog):1080p24"}}, + {1920,0,484,88,2640,NVT_H_SYNC_NEGATIVE,540, 0, 4, 5,562, NVT_V_SYNC_NEGATIVE,NVT_INTERLACED, 7425 , 74250, {0,50,50000,0x1009,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_1080I50,"HDTV(analog):1080i50"}}, + {1920,0,484,88,2640,NVT_H_SYNC_NEGATIVE,1080,0, 4, 5,1125,NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,14850 ,148500, {0,50,50000,0x1009,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_1080P50,"HDTV(analog):1080p50"}}, + {0,0,0,0,0,NVT_H_SYNC_NEGATIVE,0,0,0,0,0,NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,0,0,{0,0,0,0,0,{0},{0},{0},{0},0,""}} +}; + +//*********************************************** +//** Wrapper Structure to store Fake EDID data ** +//*********************************************** +typedef struct tagFAKE_TV_EDID +{ + NvU32 EdidType; + NvU32 EdidSize; + const NvU8* FakeEdid; +} FAKE_TV_EDID; + +// calculate the backend TV timing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetTvTiming(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NvU32 tvFormat, NVT_TIMING *pT) +{ + NvU32 i, j, k; + + // input check + if (pT == NULL) + return NVT_STATUS_ERR; + + if ((width == 0 || height == 0 || rr == 0) && tvFormat >= NVT_MAX_TV_FORMAT) + return NVT_STATUS_ERR; + + // handle double scan + if (height <= NVT_PVT_DOUBLE_SCAN_HEIGHT) + { + width <<= 1; + height <<= 1; + } + + // try the exact match first + if (tvFormat != NVT_AUTO_HDTV_FORMAT) + { + i = 0; + while (TV_TIMING[i].HVisible != 0) + { + if (NVT_GET_TIMING_STATUS_SEQ(TV_TIMING[i].etc.status) == tvFormat) + { + // find the match + *pT = TV_TIMING[i]; + return NVT_STATUS_SUCCESS; + } + + // move to the next entry + i++; + } + + // unknown TV format, return failure here + *pT = TV_TIMING[0]; + return NVT_STATUS_ERR; + } + + // we are doing auto HDTV format binding here + i = 0; + j = k = sizeof(TV_TIMING)/sizeof(TV_TIMING[0]) - 1; + while (TV_TIMING[i].HVisible != 0) + { + // #1: try the exact resolution/refreshrate/interlaced match + if (width == TV_TIMING[i].HVisible && + height == frame_height(TV_TIMING[i])&& + rr == TV_TIMING[i].etc.rr && + !!(flag & NVT_PVT_INTERLACED_MASK) == !!TV_TIMING[i].interlaced && + NVT_GET_TIMING_STATUS_TYPE(TV_TIMING[i].etc.status) == NVT_TYPE_HDTV) + { + // exact match, return from here + *pT = TV_TIMING[i]; + return NVT_STATUS_SUCCESS; + } + + // #2: try to closest match with interlaced check ON + if (!!(flag & NVT_PVT_INTERLACED_MASK) == !!TV_TIMING[i].interlaced && + NVT_GET_TIMING_STATUS_TYPE(TV_TIMING[i].etc.status) == NVT_TYPE_HDTV) + { + if (abs_delta(width, TV_TIMING[i].HVisible) <= abs_delta(width, TV_TIMING[j].HVisible) && + abs_delta(height, frame_height(TV_TIMING[i])) <= abs_delta(height, frame_height(TV_TIMING[j])) && + abs_delta(rr, TV_TIMING[i].etc.rr) <= abs_delta(rr, TV_TIMING[j].etc.rr) && + width <= TV_TIMING[i].HVisible && + height <= frame_height(TV_TIMING[i])) + { + j = i; + } + } + + // #3: try to closest match with interlaced check OFF + if (NVT_GET_TIMING_STATUS_TYPE(TV_TIMING[i].etc.status) == NVT_TYPE_HDTV) + { + if (abs_delta(width, TV_TIMING[i].HVisible) <= abs_delta(width, TV_TIMING[k].HVisible) && + abs_delta(height, frame_height(TV_TIMING[i])) <= abs_delta(height, frame_height(TV_TIMING[k])) && + abs_delta(rr, TV_TIMING[i].etc.rr) <= abs_delta(rr, TV_TIMING[j].etc.rr) && + width <= TV_TIMING[i].HVisible && + height <= frame_height(TV_TIMING[i])) + { + k = i; + } + } + + // move to the next entry + i++; + } + + // return the closest matched timing here + if (TV_TIMING[j].HVisible != 0) + { + *pT = TV_TIMING[j]; + } + else if (TV_TIMING[k].HVisible != 0) + { + *pT = TV_TIMING[k]; + } + else + { + *pT = TV_TIMING[0]; + } + + // set the mismatch status + if (pT->HVisible != width || frame_height(*pT) != height) + { + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_SIZE); + } + if (pT->etc.rr != rr) + { + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_RR); + } + if (!!pT->interlaced != !!(flag & NVT_PVT_INTERLACED_MASK)) + { + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_FORMAT); + } + + return NVT_STATUS_SUCCESS; + +} + +POP_SEGMENTS diff --git a/src/common/modeset/timing/nvt_util.c b/src/common/modeset/timing/nvt_util.c new file mode 100644 index 0000000..eccf865 --- /dev/null +++ b/src/common/modeset/timing/nvt_util.c @@ -0,0 +1,472 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_util.c +// +// Purpose: provide the utility functions for timing library +// +//***************************************************************************** + +#include "nvBinSegment.h" + +#include "nvtiming_pvt.h" +#include "nvmisc.h" // NV_MAX + +PUSH_SEGMENTS + +CONS_SEGMENT(PAGE_CONS) + +// The following table was generated w/ this program: +/* +#include + +#define CRC32_POLYNOMIAL 0xEDB88320 + +void main() +{ + unsigned int crc = 0, i = 0, j = 0; + unsigned int CRCTable[256]; + + for (i = 0; i < 256 ; i++) + { + crc = i; + for (j = 8; j > 0; j--) + { + if (crc & 1) + crc = (crc >> 1) ^ CRC32_POLYNOMIAL; + else + crc >>= 1; + } + CRCTable[i] = crc; + } + + printf("static const NvU32 s_CRCTable[256] = {"); + for (i = 0; i < 256; i++) + { + printf("%s0x%08X%s", + ((i % 10 == 0) ? "\n " : ""), + CRCTable[i], + ((i != 255) ? ", " : " ")); + } + printf("};\n"); +} +*/ +static const NvU32 s_CRCTable[256] = { + 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, 0x0EDB8832, 0x79DCB8A4, + 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, + 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, + 0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, + 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, 0x26D930AC, 0x51DE003A, + 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, + 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, + 0x9FBFE4A5, 0xE8B8D433, 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, + 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950, + 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, + 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, + 0xAA0A4C5F, 0xDD0D7CC9, 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, + 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, + 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, + 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, + 0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, + 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, 0xD80D2BDA, 0xAF0A1B4C, + 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, + 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, + 0x2CD99E8B, 0x5BDEAE1D, 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, + 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242, + 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, + 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, + 0x4969474D, 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, + 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, + 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D }; + +CODE_SEGMENT(NONPAGE_DD_CODE) +NvU32 a_div_b(NvU32 a, NvU32 b) +{ + if (b == 0) + return 0xFFFFFFFF; + + return (a + b/2)/b; +} + +CODE_SEGMENT(NONPAGE_DD_CODE) +NvU32 axb_div_c(NvU32 a, NvU32 b, NvU32 c) +{ + NvU32 AhxBl, AlxBh; + NvU32 AxB_high, AxB_low; + NvU32 AxB_div_C_low; + + if (c==0) + return 0xFFFFFFFF; + + // calculate a*b + AhxBl = (a>>16)*(b&0xFFFF); + AlxBh = (a&0xFFFF)*(b>>16); + + AxB_high = (a>>16) * (b>>16); + AxB_low = (a&0xFFFF) * (b&0xFFFF); + + AxB_high += AlxBh >> 16; + AxB_high += AhxBl >> 16; + + if ((AxB_low + (AlxBh<<16))< AxB_low) + AxB_high ++; + AxB_low += AlxBh << 16; + + if ((AxB_low + (AhxBl<<16)) < AxB_low) + AxB_high ++; + AxB_low += AhxBl << 16; + + AxB_div_C_low = AxB_low/c; + AxB_div_C_low += 0xFFFFFFFF / c * (AxB_high % c); + AxB_div_C_low += ((0xFFFFFFFF % c + 1) * (AxB_high % c) + (AxB_low % c) + c/2) / c; + + + return AxB_div_C_low; +} + +CODE_SEGMENT(NONPAGE_DD_CODE) +NvU64 axb_div_c_64(NvU64 a, NvU64 b, NvU64 c) +{ + // NvU64 arithmetic to keep precision and avoid floats + // a*b/c = (a/c)*b + ((a%c)*b + c/2)/c + return ((a/c)*b + ((a%c)*b + c/2)/c); +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 calculateCRC32(NvU8* pBuf, NvU32 bufsize) +{ + NvU32 crc32 = 0xFFFFFFFF, temp1, temp2, count = bufsize; + + if (bufsize == 0 || pBuf == NULL) + { + return 0; + } + + while (count-- != 0) + { + temp1 = (crc32 >> 8) & 0x00FFFFFF; + temp2 = s_CRCTable[(crc32 ^ *pBuf++) & 0xFF]; + crc32 = temp1 ^ temp2; + } + crc32 ^= 0xFFFFFFFF; + + return crc32; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvBool isChecksumValid(NvU8 *pBuf) +{ + NvU8 i; + NvU8 checksum = 0; + + for (i= 0; i < NVT_EDID_BLOCK_SIZE; i++) + { + checksum += pBuf[i]; + } + + if ((checksum & 0xFF) == 0) + { + return NV_TRUE; + } + + return NV_FALSE; +} + +CODE_SEGMENT(PAGE_DD_CODE) +void patchChecksum(NvU8 *pBuf) +{ + NvU8 i; + NvU8 chksum = 0; + + for (i = 0; i < NVT_EDID_BLOCK_SIZE; i++) + { + chksum += pBuf[i]; + } + chksum &= 0xFF; + + // The 1-byte sum of all 128 bytes in this EDID block shall equal zero + // The Checksum Byte (at address 7Fh) shall contain a value such that a checksum of the entire + // 128-byte BASE EDID equals 00h. + if (chksum) + { + pBuf[127] = 0xFF & (pBuf[127] + (0x100 - chksum)); + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_ComposeCustTimingString(NVT_TIMING *pT) +{ + if (pT == NULL) + return NVT_STATUS_ERR; + + NVT_SNPRINTF((char *)pT->etc.name, 40, "CUST:%dx%dx%d.%03dHz%s",pT->HVisible, (pT->interlaced ? 2 : 1)*pT->VVisible , pT->etc.rrx1k/1000, pT->etc.rrx1k%1000, (pT->interlaced ? "/i" : "")); + pT->etc.name[39] = '\0'; + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU16 NvTiming_CalcRR(NvU32 pclk1khz, NvU16 interlaced, NvU16 HTotal, NvU16 VTotal) +{ + NvU16 rr = 0; + + if (interlaced) + { + NvU32 totalPixelsIn2Fields = (NvU32)HTotal * ((NvU32)VTotal * 2 + 1); + + if (totalPixelsIn2Fields != 0) + { + rr = (NvU16)axb_div_c_64((NvU64)pclk1khz * 2, (NvU64)1000, (NvU64)totalPixelsIn2Fields); + } + } + else + { + NvU32 totalPixels = (NvU32)HTotal * VTotal; + + if (totalPixels != 0) + { + rr = (NvU16)axb_div_c_64((NvU64)pclk1khz, (NvU64)1000, (NvU64)totalPixels); + } + } + return rr; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 NvTiming_CalcRRx1k(NvU32 pclk1khz, NvU16 interlaced, NvU16 HTotal, NvU16 VTotal) +{ + NvU32 rrx1k = 0; + + if (interlaced) + { + NvU32 totalPixelsIn2Fields = (NvU32)HTotal * ((NvU32)VTotal * 2 + 1); + + if (totalPixelsIn2Fields != 0) + { + rrx1k = (NvU32)axb_div_c_64((NvU64)pclk1khz * 2, (NvU64)1000000, (NvU64)totalPixelsIn2Fields); + } + } + else + { + NvU32 totalPixels = (NvU32)HTotal * VTotal; + + if (totalPixels != 0) + { + rrx1k = (NvU32)axb_div_c_64((NvU64)pclk1khz, (NvU64)1000000, (NvU64)totalPixels); + } + } + + return rrx1k; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 NvTiming_IsRoundedRREqual(NvU16 rr1, NvU32 rr1x1k, NvU16 rr2) +{ + return ((rr1 >= (rr1x1k/1000)) && (rr1 <= (rr1x1k + 500) / 1000) && + (rr2 >= (rr1x1k/1000)) && (rr2 <= (rr1x1k + 500) / 1000)); +} + +CODE_SEGMENT(NONPAGE_DD_CODE) +NvU32 NvTiming_IsTimingExactEqual(const NVT_TIMING *pT1, const NVT_TIMING *pT2) +{ + if ((pT1 == NULL) || (pT2 == NULL)) + { + return 0; + } + + return (( pT1->HVisible == pT2->HVisible) && + ( pT1->HBorder == pT2->HBorder) && + ( pT1->HFrontPorch == pT2->HFrontPorch) && + ( pT1->HSyncWidth == pT2->HSyncWidth) && + ( pT1->HSyncPol == pT2->HSyncPol) && + ( pT1->HTotal == pT2->HTotal) && + ( pT1->VVisible == pT2->VVisible) && + ( pT1->VBorder == pT2->VBorder) && + ( pT1->VFrontPorch == pT2->VFrontPorch) && + ( pT1->VSyncWidth == pT2->VSyncWidth) && + ( pT1->VSyncPol == pT2->VSyncPol) && + ( pT1->VTotal == pT2->VTotal) && + ( pT1->etc.rr == pT2->etc.rr) && + (!!pT1->interlaced == !!pT2->interlaced)); +} + +CODE_SEGMENT(NONPAGE_DD_CODE) +NvU32 NvTiming_IsTimingExactEqualEx(const NVT_TIMING *pT1, const NVT_TIMING *pT2) +{ + NvU32 bIsTimingExactEqual = NvTiming_IsTimingExactEqual(pT1, pT2); + return (bIsTimingExactEqual && (pT1->etc.rrx1k == pT2->etc.rrx1k)); +} + +CODE_SEGMENT(NONPAGE_DD_CODE) +NvU32 NvTiming_IsTimingRelaxedEqual(const NVT_TIMING *pT1, const NVT_TIMING *pT2) +{ + if ((pT1 == NULL) || (pT2 == NULL)) + { + return 0; + } + + return (( pT1->HVisible == pT2->HVisible) && + ( pT1->HBorder == pT2->HBorder) && + ( pT1->HFrontPorch == pT2->HFrontPorch) && + ( pT1->HSyncWidth == pT2->HSyncWidth) && + //( pT1->HSyncPol == pT2->HSyncPol) && // skip the polarity check to tolerate mismatch h/v sync polarities in 18-byte DTD + ( pT1->HTotal == pT2->HTotal) && + ( pT1->VVisible == pT2->VVisible) && + ( pT1->VBorder == pT2->VBorder) && + ( pT1->VFrontPorch == pT2->VFrontPorch) && + ( pT1->VSyncWidth == pT2->VSyncWidth) && + //( pT1->VSyncPol == pT2->VSyncPol) && // skip the polarity check to tolerate mismatch h/v sync polarities in 18-byte DTD + ( pT1->VTotal == pT2->VTotal) && + ( pT1->etc.rr == pT2->etc.rr) && + (!!pT1->interlaced == !!pT2->interlaced)); +} + +CODE_SEGMENT(NONPAGE_DD_CODE) +NvU32 RRx1kToPclk (NVT_TIMING *pT) +{ + return (NvU32)axb_div_c_64(pT->HTotal * (pT->VTotal + ((pT->interlaced != 0) ? (pT->VTotal + 1) : 0)), + pT->etc.rrx1k, + 1000 * ((pT->interlaced != 0) ? 20000 : 10000)); +} + +CODE_SEGMENT(NONPAGE_DD_CODE) +NvU32 RRx1kToPclk1khz (NVT_TIMING *pT) +{ + return (NvU32)axb_div_c_64((NvU32)pT->HTotal * (NvU32)(pT->VTotal + ((pT->interlaced != 0) ? (pT->VTotal + 1) : 0)), + pT->etc.rrx1k, + 1000 * ((pT->interlaced != 0) ? 2000 : 1000)); +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU16 NvTiming_MaxFrameWidth(NvU16 HVisible, NvU16 repMask) +{ + NvU16 minPixelRepeat; + + if (repMask == 0) + { + return HVisible; + } + + minPixelRepeat = 1; + while ((repMask & 1) == 0) + { + repMask >>= 1; + minPixelRepeat++; + } + + return (HVisible / minPixelRepeat); +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 NvTiming_GetVrrFmin( + const NVT_EDID_INFO *pEdidInfo, + const NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo, + NvU32 nominalRefreshRateHz, + NVT_PROTOCOL sinkProtocol) +{ + NvU32 fmin = 0; + + // DP Adaptive Sync + if (sinkProtocol == NVT_PROTOCOL_DP) + { + if (pEdidInfo) + { + if (pEdidInfo->ext_displayid.version) + { + fmin = pEdidInfo->ext_displayid.range_limits[0].vfreq_min; + } + + if (pEdidInfo->ext_displayid20.version && pEdidInfo->ext_displayid20.range_limits.seamless_dynamic_video_timing_change) + { + fmin = pEdidInfo->ext_displayid20.range_limits.vfreq_min; + } + + // DisplayID 2.0 extension + if (pEdidInfo->ext_displayid20.version && pEdidInfo->ext_displayid20.total_adaptive_sync_descriptor != 0) + { + // Go through all the Adaptive Sync Data Blocks and pick the right frequency based on nominalRR + NvU32 i; + for (i = 0; i < pEdidInfo->ext_displayid20.total_adaptive_sync_descriptor; i++) + { + if ((pEdidInfo->ext_displayid20.adaptive_sync_descriptor[i].max_rr == nominalRefreshRateHz) || + (nominalRefreshRateHz == 0)) + { + fmin = pEdidInfo->ext_displayid20.adaptive_sync_descriptor[i].min_rr; + break; + } + } + } + + if (!fmin) + { + NvU32 i; + for (i = 0; i < NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR; i++) + { + if (pEdidInfo->ldd[i].tag == NVT_EDID_DISPLAY_DESCRIPTOR_DRL) + { + fmin = pEdidInfo->ldd[i].u.range_limit.min_v_rate; + } + } + } + + // Gsync + if (pEdidInfo->nvdaVsdbInfo.valid) + { + fmin = NV_MAX(pEdidInfo->nvdaVsdbInfo.vrrData.v1.minRefreshRate, 10); + } + } + + // Display ID 2.0 Standalone + if (pDisplayIdInfo) + { + // Go through all the Adaptive Sync Data Blocks and pick the right frequency based on nominalRR + NvU32 i; + for (i = 0; i < pDisplayIdInfo->total_adaptive_sync_descriptor; i++) + { + if ((pDisplayIdInfo->adaptive_sync_descriptor[i].max_rr == nominalRefreshRateHz) || + (nominalRefreshRateHz == 0)) + { + fmin = pDisplayIdInfo->adaptive_sync_descriptor[i].min_rr; + break; + } + } + // If unable to find the value, choose a fallback from DisplayId + if (!fmin) + { + fmin = pDisplayIdInfo->range_limits.vfreq_min; + } + } + } + + // HDMI 2.1 VRR + else if (sinkProtocol == NVT_PROTOCOL_HDMI) + { + if (pEdidInfo) + { + fmin = pEdidInfo->hdmiForumInfo.vrr_min; + } + } + + return fmin; +} + +POP_SEGMENTS diff --git a/src/common/modeset/timing/nvtiming.h b/src/common/modeset/timing/nvtiming.h new file mode 100644 index 0000000..718f88e --- /dev/null +++ b/src/common/modeset/timing/nvtiming.h @@ -0,0 +1,5943 @@ +//**************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvtiming.h +// +// Purpose: This file is the common header all nv timing library clients. +// +//***************************************************************************** + +#ifndef __NVTIMING_H__ +#define __NVTIMING_H__ + +#include "nvtypes.h" + + +#define abs_delta(a,b) ((a)>(b)?((a)-(b)):((b)-(a))) + +//*********************** +// The Timing Structure +//*********************** +// +// Nvidia specific timing extras +typedef struct tagNVT_HDMIEXT +{ + // in the case of stereo, the NVT_TIMING structure will hold the 2D + // instance of the timing parameters, and the stereo extension will + // contain the variants required to produce the stereo frame. + NvU8 StereoStructureType; + NvU8 SideBySideHalfDetail; + NvU16 VActiveSpace[2]; +} NVT_HDMIEXT; +#define NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(x) (1 << (x)) +#define NVT_HDMI_3D_SUPPORTED_FRAMEPACK_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK) +#define NVT_HDMI_3D_SUPPORTED_FIELD_ALT_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_FIELD_ALT) +#define NVT_HDMI_3D_SUPPORTED_LINE_ALT_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_LINE_ALT) +#define NVT_HDMI_3D_SUPPORTED_SIDEBYSIDEFULL_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEFULL) +#define NVT_HDMI_3D_SUPPORTED_LDEPTH_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTH) +#define NVT_HDMI_3D_SUPPORTED_LDEPTHGFX_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTHGFX) +#define NVT_HDMI_3D_SUPPORTED_TOPBOTTOM_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_TOPBOTTOM) +#define NVT_HDMI_3D_SUPPORTED_SIDEBYSIDEHALF_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF) +#define NVT_ALL_HDMI_3D_STRUCT_SUPPORTED_MASK (NVT_HDMI_3D_SUPPORTED_FRAMEPACK_MASK | NVT_HDMI_3D_SUPPORTED_TOPBOTTOM_MASK | NVT_HDMI_3D_SUPPORTED_SIDEBYSIDEHALF_MASK) + +typedef union tagNVT_COLORDEPTH +{ + NvU8 bpcs; + struct + { + NvU8 bpc6 : 1; + NvU8 bpc8 : 1; + NvU8 bpc10 : 1; + NvU8 bpc12 : 1; + NvU8 bpc14 : 1; + NvU8 bpc16 : 1; + NvU8 rsrvd1 : 1; // must be 0 + NvU8 rsrvd2 : 1; // must be 0 + } bpc; +} NVT_COLORDEPTH; + +#define IS_BPC_SUPPORTED_COLORFORMAT(colorDepth) (!!((NvU8)(colorDepth))) +#define UPDATE_BPC_FOR_COLORFORMAT(colorFormat, b6bpc, b8bpc, b10bpc, b12bpc, b14bpc, b16bpc) \ + if ((b6bpc)) ((colorFormat).bpc.bpc6 = 1); \ + if ((b8bpc)) ((colorFormat).bpc.bpc8 = 1); \ + if ((b10bpc)) ((colorFormat).bpc.bpc10 = 1); \ + if ((b12bpc)) ((colorFormat).bpc.bpc12 = 1); \ + if ((b14bpc)) ((colorFormat).bpc.bpc14 = 1); \ + if ((b16bpc)) ((colorFormat).bpc.bpc16 = 1); + +#define SET_BPC_FOR_COLORFORMAT(_colorFormat, _bpc) \ + if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_6) \ + ((_colorFormat).bpc.bpc6 = 1); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_8) \ + ((_colorFormat).bpc.bpc8 = 1); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_10) \ + ((_colorFormat).bpc.bpc10 = 1); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_12) \ + ((_colorFormat).bpc.bpc12 = 1); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_16) ((_colorFormat).bpc.bpc16 = 1); + +#define CLEAR_BPC_FOR_COLORFORMAT(_colorFormat, _bpc) \ + if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_6) \ + ((_colorFormat).bpc.bpc6 = 0); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_8) \ + ((_colorFormat).bpc.bpc8 = 0); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_10) \ + ((_colorFormat).bpc.bpc10 = 0); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_12) \ + ((_colorFormat).bpc.bpc12 = 0); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_16) ((_colorFormat).bpc.bpc16 = 0); + +#define NVT_COLORDEPTH_HIGHEST_BPC(_colorFormat) \ + (_colorFormat).bpc.bpc16 ? NVT_EDID_VIDEOSIGNAL_BPC_16 : \ + (_colorFormat).bpc.bpc12 ? NVT_EDID_VIDEOSIGNAL_BPC_12 : \ + (_colorFormat).bpc.bpc10 ? NVT_EDID_VIDEOSIGNAL_BPC_10 : \ + (_colorFormat).bpc.bpc8 ? NVT_EDID_VIDEOSIGNAL_BPC_8 : \ + (_colorFormat).bpc.bpc6 ? NVT_EDID_VIDEOSIGNAL_BPC_6 : NVT_EDID_VIDEOSIGNAL_BPC_NOT_DEFINED + +#define NVT_COLORDEPTH_LOWEREST_BPC(_colorFormat) \ + (_colorFormat).bpc.bpc6 ? NVT_EDID_VIDEOSIGNAL_BPC_6 : \ + (_colorFormat).bpc.bpc8 ? NVT_EDID_VIDEOSIGNAL_BPC_8 : \ + (_colorFormat).bpc.bpc10 ? NVT_EDID_VIDEOSIGNAL_BPC_10 : \ + (_colorFormat).bpc.bpc12 ? NVT_EDID_VIDEOSIGNAL_BPC_12 : \ + (_colorFormat).bpc.bpc16 ? NVT_EDID_VIDEOSIGNAL_BPC_16 : NVT_EDID_VIDEOSIGNAL_BPC_NOT_DEFINED + +typedef struct tagNVT_TIMINGEXT +{ + NvU32 flag; // reserve for NV h/w based enhancement like double-scan. + NvU16 rr; // the logical refresh rate to present + NvU32 rrx1k; // the physical vertical refresh rate in 0.001Hz + NvU32 aspect; // the display aspect ratio Hi(aspect):horizontal-aspect, Low(aspect):vertical-aspect + // + // Bitmask of one-hot encoded possible pixel repetitions: + // 0x1: no pixel repetition (i.e., display each pixel once) + // 0x2: each pixel is displayed twice horizontally; + // 0x3: use either no pixel repetition or display each pixel twice + // ... + // + NvU16 rep; + NVT_COLORDEPTH rgb444; // each bit within is set if rgb444 supported on that bpc + NVT_COLORDEPTH yuv444; // each bit within is set if yuv444 supported on that bpc + NVT_COLORDEPTH yuv422; // each bit within is set if yuv422 supported on that bpc + NVT_COLORDEPTH yuv420; // each bit within is set if yuv420 supported on that bpc + NvU32 status; // the timing standard being used + NvU8 name[51]; // the name of the timing +}NVT_TIMINGEXT; +// +// +//The very basic timing structure based on the VESA standard: +// +// |<----------------------------htotal--------------------------->| +// ---------"active" video-------->|<-------blanking------>|<----- +// |<-------hvisible-------->|<-hb->|<-hfp->|<-hsw->|<-hbp->|<-hb->| +// ----------+-------------------------+ | | | | | +// A A | | | | | | | +// : : | | | | | | | +// : : | | | | | | | +// :verical| addressable video | | | | | | +// :visible| | | | | | | +// : : | | | | | | | +// : : | | | | | | | +// verical V | | | | | | | +// total --+-------------------------+ | | | | | +// : vb border | | | | | +// : -----------------------------------+ | | | | +// : vfp front porch | | | | +// : -------------------------------------------+ | | | +// : vsw sync width | | | +// : ---------------------------------------------------+ | | +// : vbp back porch | | +// : -----------------------------------------------------------+ | +// V vb border | +// --------------------------------------------------------------------------+ +// +typedef struct tagNVT_TIMING +{ + // VESA scan out timing parameters: + NvU16 HVisible; //horizontal visible + NvU16 HBorder; //horizontal border + NvU16 HFrontPorch; //horizontal front porch + NvU16 HSyncWidth; //horizontal sync width + NvU16 HTotal; //horizontal total + NvU8 HSyncPol; //horizontal sync polarity: 1-negative, 0-positive + + NvU16 VVisible; //vertical visible + NvU16 VBorder; //vertical border + NvU16 VFrontPorch; //vertical front porch + NvU16 VSyncWidth; //vertical sync width + NvU16 VTotal; //vertical total + NvU8 VSyncPol; //vertical sync polarity: 1-negative, 0-positive + + NvU16 interlaced; //1-interlaced, 0-progressive + NvU32 pclk; //pixel clock in 10KHz + NvU32 pclk1khz; //pixel clock in 1kHz for Type7, CVT RB2, CVT RB3 + + //other timing related extras + NVT_TIMINGEXT etc; +}NVT_TIMING; + +#define NVT_MAX_TOTAL_TIMING 128 + +// +// The below VSync/HSync Polarity definition have been inverted to match +// HW Display Class definition. +// timing related constants: +#define NVT_H_SYNC_POSITIVE 0 +#define NVT_H_SYNC_NEGATIVE 1 +#define NVT_H_SYNC_DEFAULT NVT_H_SYNC_NEGATIVE +// +#define NVT_V_SYNC_POSITIVE 0 +#define NVT_V_SYNC_NEGATIVE 1 +#define NVT_V_SYNC_DEFAULT NVT_V_SYNC_POSITIVE +// +#define NVT_PROGRESSIVE 0 +#define NVT_INTERLACED 1 +#define NVT_INTERLACED_EXTRA_VBLANK_ON_FIELD2 1 +#define NVT_INTERLACED_NO_EXTRA_VBLANK_ON_FIELD2 2 + +// timing related macros: +#define NVT_FRAME_HEIGHT(_vvisible_, _interlaced_) ((_vvisible_) * ((_interlaced_ != 0) ? 2 : 1)) + +//************************************* +// The Timing Status encoded in +// NVT_TIMING::NVT_TIMINGEXT::status +//************************************* +// +// TIMING_STATUS has the following kinds of info: +// +// NVT_TIMING::NVT_TIMINGEXT::status +// +// +----+----+---------+----+----+------------------------------+---+---------------+---+----------------+ +// bit31 bit30 bit29 bit22 bit21 bit20 bit16 bit15 bit8 bit7 bit0 +// |native|cust|<-cta format->|Dual|<--------mismatch status-------->|<---timing type--->|<---timing seq#--->| +// +// 1. the monitor preferred timing flag and cust EDID entry flag +// +#define NVT_STATUS_TIMING_NATIVE_FLAG_MASK 0x80000000 +#define NVT_STATUS_TIMING_NATIVE_FLAG_SHIFT 31 +#define NVT_IS_NATIVE_TIMING(n) (((n)&NVT_STATUS_TIMING_NATIVE_FLAG_MASK)>>NVT_STATUS_TIMING_NATIVE_FLAG_SHIFT) +#define NVT_SET_NATIVE_TIMING_FLAG(n) ((n)|=1U<< NVT_STATUS_TIMING_NATIVE_FLAG_SHIFT) +#define NVT_PREFERRED_TIMING_MODE_MASK 0x2 +// +#define NVT_STATUS_TIMING_CUST_ENTRY_MASK 0x40000000 +#define NVT_STATUS_TIMING_CUST_ENTRY_SHIFT 30 +#define NVT_IS_CUST_ENTRY(n) (((n)&NVT_STATUS_TIMING_CUST_ENTRY_MASK)>>NVT_STATUS_TIMING_CUST_ENTRY_SHIFT) +#define NVT_SET_CUST_ENTRY_FLAG(n) ((n)|=1<>NVT_STATUS_TIMING_CEA_FORMAT_SHIFT) +#define NVT_SET_CEA_FORMAT(n,index) {(n)&=~NVT_STATUS_TIMING_CEA_FORMAT_MASK;(n)|=(index)<>NVT_STATUS_TIMING_CEA_DMT_SHIFT) +#define NVT_SET_CEA_DMT_DUAL_STANDARD_FLAG(n) ((n)|=NVT_STATUS_TIMING_CEA_DMT_MASK) +// +// +// 3. the mismatch status +#define NVT_STATUS_TIMING_MISMATCH_MASK 0x001F0000 +#define NVT_STATUS_TIMING_MISMATCH_SHIFT 16 +#define NVT_STATUS_TIMING_MISMATCH_SIZE 0x1 //visible width and height don't match with the asked width/height +#define NVT_STATUS_TIMING_MISMATCH_RR 0x2 //the refresh rate doesn't match with the requested +#define NVT_STATUS_TIMING_MISMATCH_FORMAT 0x4 //other timing info doesn't match (i.e. progressive/interlaced, double, reduced-blanking etc...) +#define NVT_STATUS_TIMING_MISMATCH_ALIGNMENT 0x8 //the asking alignment doesn't match the spec +// +// macroes to set/get the timing mismatch status +#define NVT_SET_TIMING_STATUS_MISMATCH(m,n) ((m)|=(((n)<>NVT_STATUS_TIMING_MISMATCH_SHIFT) +// +// +// 4. the timing type +// +#define NVT_STATUS_TIMING_TYPE_MASK 0x0000FF00 +#define NVT_STATUS_TIMING_TYPE_SHIFT 8 +// +typedef enum NVT_TIMING_TYPE +{ + NVT_TYPE_DMT = 1, // DMT + NVT_TYPE_GTF, // GTF + NVT_TYPE_ASPR, // wide aspect ratio timing, for legacy support only + NVT_TYPE_NTSC_TV, // NTSC TV timing. for legacy support only + NVT_TYPE_PAL_TV, // PAL TV timing, legacy support only + NVT_TYPE_CVT, // CVT timing + NVT_TYPE_CVT_RB, // CVT timing with reduced blanking + NVT_TYPE_CUST, // Customized timing + NVT_TYPE_EDID_DTD, // EDID detailed timing + NVT_TYPE_EDID_STD, // = 10 EDID standard timing + NVT_TYPE_EDID_EST, // EDID established timing + NVT_TYPE_EDID_CVT, // EDID defined CVT timing (EDID 1.4) + NVT_TYPE_EDID_861ST, // EDID defined CEA/EIA 861 timing (in the CTA861 extension) + NVT_TYPE_NV_PREDEFINED, // NV pre-defined timings (PsF timings) + NVT_TYPE_DMT_RB, // DMT timing with reduced blanking + NVT_TYPE_EDID_EXT_DTD, // EDID detailed timing in the extension + NVT_TYPE_SDTV, // SDTV timing (including NTSC, PAL etc) + NVT_TYPE_HDTV, // HDTV timing (480p,480i,720p, 1080i etc) + NVT_TYPE_SMPTE, // deprecated ? still used by drivers\unix\nvkms\src\nvkms-dpy.c + NVT_TYPE_EDID_VTB_EXT, // = 20 EDID defined VTB extension timing + NVT_TYPE_EDID_VTB_EXT_STD, // EDID defined VTB extension standard timing + NVT_TYPE_EDID_VTB_EXT_DTD, // EDID defined VTB extension detailed timing + NVT_TYPE_EDID_VTB_EXT_CVT, // EDID defined VTB extension cvt timing + NVT_TYPE_HDMI_STEREO, // EDID defined HDMI stereo timing + NVT_TYPE_DISPLAYID_1, // DisplayID Type 1 timing + NVT_TYPE_DISPLAYID_2, // DisplayID Type 2 timing + NVT_TYPE_HDMI_EXT, // EDID defined HDMI extended resolution timing (UHDTV - 4k, 8k etc.) + NVT_TYPE_CUST_AUTO, // Customized timing generated automatically by NVCPL + NVT_TYPE_CUST_MANUAL, // Customized timing entered manually by user + NVT_TYPE_CVT_RB_2, // = 30 CVT timing with reduced blanking V2 + NVT_TYPE_DMT_RB_2, // DMT timing with reduced blanking V2 + NVT_TYPE_DISPLAYID_7, // DisplayID 2.0 detailed timing - Type VII + NVT_TYPE_DISPLAYID_8, // DisplayID 2.0 enumerated timing - Type VIII + NVT_TYPE_DISPLAYID_9, // DisplayID 2.0 formula-based timing - Type IX + NVT_TYPE_DISPLAYID_10, // DisplayID 2.0 formula-based timing - Type X + NVT_TYPE_CVT_RB_3, // CVT timing with reduced blanking V3 + NVT_TYPE_CTA861_DID_T7, // EDID defined CTA861 DisplayID Type VII timing (in the CTA861 extension) + NVT_TYPE_CTA861_DID_T8, // EDID defined CTA861 DisplayID Type VIII timing (in the CTA861 extension) + NVT_TYPE_CTA861_DID_T10 // EDID defined CTA861 DisplayID Type X timing (in the CTA861 extension) +}NVT_TIMING_TYPE; +// +// 5. the timing sequence number like the TV format and EIA861B predefined timing format +// **the numbers are chosen to match with the NV h/w format** +// +#define NVT_STATUS_TIMING_SEQ_MASK 0x000000FF +// +typedef enum NVT_TV_FORMAT +{ + NVT_NTSC = 0, + NVT_NTSC_M = 0, + NVT_NTSC_J = 1, + NVT_PAL = 2, + NVT_PAL_M = 2, + NVT_PAL_A = 3, + NVT_PAL_N = 4, + NVT_PAL_NC = 5, + NVT_HD576I = 8, + NVT_HD480I, + NVT_HD480P, + NVT_HD576P, + NVT_HD720P, + NVT_HD1080I, + NVT_HD1080P, + NVT_HD720P50, + NVT_HD1080P24, + NVT_HD1080I50, + NVT_HD1080P50, + NVT_MAX_TV_FORMAT, + NVT_AUTO_SDTV_FORMAT = (NvU32)(-2), // Not supported in NvTiming_GetTvTiming + NVT_AUTO_HDTV_FORMAT = (NvU32)(-1), +}NVT_TV_FORMAT; + +#define NVT_DEFAULT_HDTV_FMT NVT_HD1080I +// +// macros to set/get the timing type and seq number +// +#define NVT_DEF_TIMING_STATUS(type, seq) ((((type)<>NVT_STATUS_TIMING_TYPE_SHIFT +#define NVT_GET_TIMING_STATUS_SEQ(n) ((n)&NVT_STATUS_TIMING_SEQ_MASK) +// +// +// +// the timing type definitions +#define NVT_STATUS_DMT NVT_DEF_TIMING_STATUS(NVT_TYPE_DMT, 0) // DMT +#define NVT_STATUS_GTF NVT_DEF_TIMING_STATUS(NVT_TYPE_GTF, 0) // GTF +#define NVT_STATUS_ASPR NVT_DEF_TIMING_STATUS(NVT_TYPE_ASPR, 0) // ASPR +#define NVT_STATUS_NTSC_TV NVT_DEF_TIMING_STATUS(NVT_TYPE_NTSC_TV, 0) // TVN +#define NVT_STATUS_PAL_TV NVT_DEF_TIMING_STATUS(NVT_TYPE_PAL_TV, 0) // TVP +#define NVT_STATUS_CVT NVT_DEF_TIMING_STATUS(NVT_TYPE_CVT, 0) // CVT timing with regular blanking +#define NVT_STATUS_CVT_RB NVT_DEF_TIMING_STATUS(NVT_TYPE_CVT_RB, 0) // CVT_RB timing V1 +#define NVT_STATUS_CVT_RB_2 NVT_DEF_TIMING_STATUS(NVT_TYPE_CVT_RB_2, 0) // CVT_RB timing V2 +#define NVT_STATUS_CVT_RB_3 NVT_DEF_TIMING_STATUS(NVT_TYPE_CVT_RB_3, 0) // CVT_RB timing V3 +#define NVT_STATUS_CUST NVT_DEF_TIMING_STATUS(NVT_TYPE_CUST, 0) // Customized timing +#define NVT_STATUS_EDID_DTD NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_DTD, 0) +#define NVT_STATUS_EDID_STD NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_STD, 0) +#define NVT_STATUS_EDID_EST NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_EST, 0) +#define NVT_STATUS_EDID_CVT NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_CVT, 0) +#define NVT_STATUS_EDID_861ST NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_861ST, 0) +#define NVT_STATUS_DMT_RB NVT_DEF_TIMING_STATUS(NVT_TYPE_DMT_RB, 0) +#define NVT_STATUS_EDID_EXT_DTD NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_EXT_DTD, 0) +#define NVT_STATUS_SDTV_NTSC NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_NTSC) +#define NVT_STATUS_SDTV_NTSC_M NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_NTSC) +#define NVT_STATUS_SDTV_NTSC_J NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_NTSC_J) +#define NVT_STATUS_SDTV_PAL NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_PAL) +#define NVT_STATUS_SDTV_PAL_M NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_PAL) +#define NVT_STATUS_SDTV_PAL_A NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_PAL_A) +#define NVT_STATUS_SDTV_PAL_N NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_PAL_N) +#define NVT_STATUS_SDTV_PAL_NC NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_PAL_NC) +#define NVT_STATUS_HDTV_480I NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD480I) +#define NVT_STATUS_HDTV_480P NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD480P) +#define NVT_STATUS_HDTV_576I NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD576I) +#define NVT_STATUS_HDTV_576P NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD576P) +#define NVT_STATUS_HDTV_720P NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD720P) +#define NVT_STATUS_HDTV_1080I NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD1080I) +#define NVT_STATUS_HDTV_1080P NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD1080P) +#define NVT_STATUS_HDTV_720P50 NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD720P50) +#define NVT_STATUS_HDTV_1080P24 NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD1080P24) +#define NVT_STATUS_HDTV_1080I50 NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD1080I50) +#define NVT_STATUS_HDTV_1080P50 NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD1080P50) +#define NVT_STATUS_EDID_VTB_EXT NVT_DEF_TIMING_STATUS(NVT_TYPE_VTB_EXT, 0) +#define NVT_STATUS_EDID_VTB_EXT_DTD NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_VTB_EXT_DTD, 0) +#define NVT_STATUS_EDID_VTB_EXT_CVT NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_VTB_EXT_CVT, 0) +#define NVT_STATUS_EDID_VTB_EXT_STD NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_VTB_EXT_STD, 0) +#define NVT_STATUS_HDMI_STEREO NVT_DEF_TIMING_STATUS(NVT_TYPE_HDMI_STEREO, 0) +#define NVT_STATUS_DISPLAYID_1 NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_1, 0) +#define NVT_STATUS_DISPLAYID_2 NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_2, 0) +#define NVT_STATUS_DISPLAYID_7 NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_7, 0) +#define NVT_STATUS_DISPLAYID_8 NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_8, 0) +#define NVT_STATUS_DISPLAYID_9 NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_9, 0) +#define NVT_STATUS_DISPLAYID_10 NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_10, 0) +#define NVT_STATUS_HDMI_EXT NVT_DEF_TIMING_STATUS(NVT_TYPE_HDMI_EXT, 0) +#define NVT_STATUS_CUST_AUTO NVT_DEF_TIMING_STATUS(NVT_TYPE_CUST_AUTO, 0) +#define NVT_STATUS_CUST_MANUAL NVT_DEF_TIMING_STATUS(NVT_TYPE_CUST_MANUAL, 0) + +// +// adding the timing sequence (from the EDID) to the modeset status +#define NVT_STATUS_DTD1 NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_DTD, 1) +#define NVT_STATUS_EDID_DTDn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_DTD, n) +#define NVT_STATUS_EDID_STDn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_STD, n) +#define NVT_STATUS_EDID_ESTn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_EST, n) +#define NVT_STATUS_EDID_CVTn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_CVT, n) +#define NVT_STATUS_EDID_861STn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_861ST, n) +#define NVT_STATUS_EDID_EXT_DTDn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_EXT_DTD, n) +#define NVT_STATUS_CUSTn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_CUST, n) +#define NVT_TYPE_NV_PREDEFINEDn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_NV_PREDEFINED, n) +#define NVT_STATUS_EDID_VTB_EXTn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_VTB_EXT, n) +#define NVT_STATUS_EDID_VTB_EXT_DTDn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_VTB_EXT_DTD, n) +#define NVT_STATUS_EDID_VTB_EXT_STDn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_VTB_EXT_STD, n) +#define NVT_STATUS_EDID_VTB_EXT_CVTn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_VTB_EXT_CVT, n) +#define NVT_STATUS_HDMI_STEREO_REQn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_HDMI_STEREO_REQ, n) +#define NVT_STATUS_DISPLAYID_1N(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_1, n) +#define NVT_STATUS_DISPLAYID_2N(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_2, n) +#define NVT_STATUS_DISPLAYID_7N(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_7, n) +#define NVT_STATUS_DISPLAYID_8N(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_8, n) +#define NVT_STATUS_DISPLAYID_9N(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_9, n) +#define NVT_STATUS_DISPLAYID_10N(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_10, n) +#define NVT_STATUS_HDMI_EXTn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_HDMI_EXT, n) +#define NVT_STATUS_CTA861_DID_T7N(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_CTA861_DID_T7, n) +#define NVT_STATUS_CTA861_DID_T8N(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_CTA861_DID_T8, n) +#define NVT_STATUS_CTA861_DID_T10N(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_CTA861_DID_T10, n) +#define NVT_STATUS_CTA861_OVT_Tn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_CTA861_OVT, n) + +//******************************** +// CEA/EIA 861 related EDID info +//******************************** +#define NVT_CEA861_REV_NONE 0 +#define NVT_CEA861_REV_ORIGINAL 1 +#define NVT_CEA861_REV_A 2 +#define NVT_CEA861_REV_B 3 +#define NVT_CEA861_REV_C 3 +#define NVT_CEA861_REV_D 3 +#define NVT_CEA861_REV_E 3 +#define NVT_CEA861_REV_F 3 +#define NVT_CTA861_REV_G 3 +#define NVT_CTA861_REV_H 3 +// +// max data after misc/basic_caps in EIA861EXTENSION +#define NVT_CEA861_MAX_PAYLOAD 123 +// +// the basic info encoded in byte[3] +#define NVT_CEA861_CAP_UNDERSCAN 0x80 // DTV monitor supports underscan +#define NVT_CEA861_CAP_BASIC_AUDIO 0x40 // DTV monitor supports basic audio +#define NVT_CEA861_CAP_YCbCr_444 0x20 // DTV monitor supports YCbCr4:4:4 +#define NVT_CEA861_CAP_YCbCr_422 0x10 // DTV monitor supports YCbCr4:2:2 +// +#define NVT_CEA861_TOTAL_LT_MASK 0x0F //the max number of 18-byte detailed timing descriptor +// +// +#define NVT_CEA861_SHORT_DESCRIPTOR_SIZE_MASK 0x1F +#define NVT_CEA861_SHORT_DESCRIPTOR_TAG_MASK 0xE0 +#define NVT_CEA861_SHORT_DESCRIPTOR_TAG_SHIFT 5 +// +// the CTA Tag Codes +#define NVT_CEA861_TAG_RSVD 0 // reserved block +#define NVT_CEA861_TAG_NONE 0 // reserved block +#define NVT_CEA861_TAG_AUDIO 1 // Audio Data Block +#define NVT_CEA861_TAG_VIDEO 2 // Video Data Block +#define NVT_CEA861_TAG_VENDOR 3 // Vendor Specific Data Block +#define NVT_CEA861_TAG_SPEAKER_ALLOC 4 // Speaker Allocation Data Block +#define NVT_CEA861_TAG_VESA_DTC 5 // VESA DTC data block +#define NVT_CTA861_TAG_VIDEO_FORMAT 6 // Video Format Data Block in CTA861.6 +#define NVT_CEA861_TAG_EXTENDED_FLAG 7 // use Extended Tag +// +// the extended tag codes when NVT_CEA861_TAG_EXTENDED_FLAG +#define NVT_CEA861_EXT_TAG_VIDEO_CAP 0 // Video Capability Data Block +#define NVT_CEA861_EXT_TAG_VENDOR_SPECIFIC_VIDEO 1 // Vendor-Specific Video Data Block +#define NVT_CEA861_EXT_TAG_VESA_VIDEO_DISPLAY_DEVICE 2 // VESA Video Display Device Information Data Block +#define NVT_CEA861_EXT_TAG_VESA_VIDEO 3 // Reserved for VESA Video Data BLock +#define NVT_CEA861_EXT_TAG_HDMI_VIDEO 4 // Reserved for HDMI Video Data Block +#define NVT_CEA861_EXT_TAG_COLORIMETRY 5 // Colorimetry Data Block +#define NVT_CEA861_EXT_TAG_HDR_STATIC_METADATA 6 // HDR Static Metadata Data Block CEA861.3 HDR extension for HDMI 2.0a +#define NVT_CTA861_EXT_TAG_HDR_DYNAMIC_METADATA 7 // CTA861-H HDR Dynamic Metadata Data Block +#define NVT_CTA861_EXT_TAG_NATIVE_VIDEO_RESOLUTION 8 // CTA861.6 Native Video Resolution Data Block +#define NVT_CTA861_EXT_TAG_VIDEO_RSVD_MIN 9 // 9...12 : Reserved for video-related blocks +#define NVT_CTA861_EXT_TAG_VIDEO_RSVD_MAX 12 +#define NVT_CEA861_EXT_TAG_VIDEO_FORMAT_PREFERENCE 13 // CEA861F Video Format Preference Data Block +#define NVT_CEA861_EXT_TAG_YCBCR420_VIDEO 14 // CEA861F YCBCR 4:2:0 Video Data Block +#define NVT_CEA861_EXT_TAG_YCBCR420_CAP 15 // CEA861F YCBCR 4:2:0 Capability Map Data Block +#define NVT_CEA861_EXT_TAG_MISC_AUDIO 16 // CEA Miscellaneous Audio Fields +#define NVT_CEA861_EXT_TAG_VENDOR_SPECIFIC_AUDIO 17 // Vendor-Specific Audio Data Block +#define NVT_CTA861_EXT_TAG_HDMI_AUDIO 18 // Reserved for HDMI Audio Data Block +#define NVT_CTA861_EXT_TAG_ROOM_CONFIGURATION 19 // CTA861-H Room Configuration Data Block +#define NVT_CTA861_EXT_TAG_SPEACKER_LOCATION 20 // CTA861-H Speaker Location Data Block +#define NVT_CTA861_EXT_TAG_AUDIO_RSVD_MIN 21 // 21...31 : Reserved for audio-related blocks +#define NVT_CTA861_EXT_TAG_AUDIO_RSVD_MAX 31 +#define NVT_CEA861_EXT_TAG_INFOFRAME 32 // Infoframe Data Block +#define NVT_CTA861_EXT_TAG_RSVD 33 // Reserved +#define NVT_CTA861_EXT_TAG_DID_TYPE_VII 34 // DisplayID Type VII Video Timing Data Block +#define NVT_CTA861_EXT_TAG_DID_TYPE_VIII 35 // DisplayID Type VIII Video Timing Data Block +#define NVT_CTA861_EXT_TAG_RSVD_MIN_1 36 // 36...41 : Reserved for general +#define NVT_CTA861_EXT_TAG_RSVD_MAX_1 41 +#define NVT_CTA861_EXT_TAG_DID_TYPE_X 42 // DisplayID Type X Video Timing Data Block +#define NVT_CTA861_EXT_TAG_RSVD_MIN_2 43 // 43...119 : Reserved for general +#define NVT_CTA861_EXT_TAG_RSVD_MAX_2 119 +#define NVT_CTA861_EXT_TAG_HF_EEODB 120 // HDMI Forum Edid Extension Override Data Block +#define NVT_CTA861_EXT_TAG_SCDB 121 // 0x79 == Tag for Sink Capability Data Block +#define NVT_CTA861_EXT_TAG_HDMI_RSVD_MIN 122 // 122...127 : Reserved for HDMI +#define NVT_CTA861_EXT_TAG_HDMI_RSVD_MAX 127 +#define NVT_CTA861_EXT_TAG_RSVD_MIN_3 128 // 128...255 : Reserved for general +#define NVT_CTA861_EXT_TAG_RSVD_MAX_3 255 +// +//the extended tag payload size; the size includes the extended tag code +#define NVT_CEA861_EXT_VIDEO_CAP_SD_SIZE 2 +#define NVT_CEA861_EXT_COLORIMETRY_SD_SIZE 3 +#define NVT_CTA861_EXT_HDR_STATIC_METADATA_SIZE 6 +#define NVT_CTA861_EXT_SCDB_PAYLOAD_MAX_LENGTH NVT_CEA861_VSDB_PAYLOAD_MAX_LENGTH +// +// +#define NVT_CEA861_GET_SHORT_DESCRIPTOR_TAG(a) (((a)&NVT_CEA861_SHORT_DESCRIPTOR_TAG_MASK)>>NVT_CEA861_SHORT_DESCRIPTOR_TAG_SHIFT) +#define NVT_CEA861_GET_SHORT_DESCRIPTOR_SIZE(a) ((NvU32)((a)&NVT_CEA861_SHORT_DESCRIPTOR_SIZE_MASK)) + + +//******************************** +// VTB Extension related info +//******************************** + +#define NVT_VTB_REV_NONE 0 +#define NVT_VTB_REV_A 1 + +#define NVT_VTB_MAX_PAYLOAD 122 + +//************************* +// short descriptor +//************************* +#define NVT_CEA861_SD_HEADER_SIZE 1 +#define NVT_CEA861_SD_PAYLOAD_SIZE 31 +#define NVT_CEA861_SD_TOTAL_SIZE (NVT_CEA861_SD_HEADER_SIZE + NVT_CEA861_SD_PAYLOAD_SIZE) + +//************************* +// short video descriptor +//************************* +#define NVT_CEA861_VIDEO_SD_SIZE 1 +// the max total short video descriptors possible; See CEA-861-E, section 7.5, +// "It is also possible to have more than one of a specific type of data block if necessary +// to include all of the descriptors needed to describe the sinks capabilities." +#define NVT_CEA861_VIDEO_MAX_DESCRIPTOR ((NVT_CEA861_MAX_PAYLOAD / NVT_CEA861_SD_TOTAL_SIZE) * (NVT_CEA861_SD_PAYLOAD_SIZE / NVT_CEA861_VIDEO_SD_SIZE) + \ + (NVT_CEA861_MAX_PAYLOAD % NVT_CEA861_SD_TOTAL_SIZE - NVT_CEA861_SD_HEADER_SIZE) / NVT_CEA861_VIDEO_SD_SIZE) +#define NVT_CTA861_VIDEO_VIC_MASK 0xFF //the VIC mask of the short video descriptor +#define NVT_CTA861_7BIT_VIDEO_VIC_MASK 0x7F //the 7 bits VIC mask of the short video descriptor +#define NVT_CTA861_VIDEO_NATIVE_MASK 0x80 //the Native mask of the short video descriptor +#define NVT_HDMI_YUV_420_PCLK_SUPPORTED_MIN 590000//the vale shall equal or larger than 590MHz to support YCbCr in HDMI2.1 , unit of 1khz + +// CTA-861G supports more SVDs which is over 0x7F index +// All value below 192 will be treated as 7 bit VIC. Value 128~192 shall be forbidden. +#define NVT_GET_CTA_8BIT_VIC(vic) (((vic) <= NVT_CTA861_7BIT_8BIT_SEPARATE_VALUE) ? ((vic) & NVT_CTA861_7BIT_VIDEO_VIC_MASK) : ((vic) & NVT_CTA861_VIDEO_VIC_MASK)) +// + +// According to CEA-861-E Spec. +// Note 3. A video timing with a vertical frequency that is an integer multiple +// of 6.00 Hz (i.e. 24.00, 30.00, 60.00, 120.00 or 240.00 Hz) is considered to +// be the same as a video timing with the equivalent detailed timing +// information but where the vertical frequency is adjusted by a factor of +// 1000/1001 (i.e., 24/1.001, 30/1.001, 60/1.001, 120/1.001 or 240/1.001). +// Excluding ceaIndex 1 640x480 which is a PC Mode. +#define NVT_CTA861_TIMING_FRR(_VID_, _RR_) ((_VID_) > 1 && ((_RR_) % 6) == 0) +#define NVT_CEA861_640X480P_59940HZ_4X3 1 // Video Identification Code: format 1 +#define NVT_CEA861_720X480P_59940HZ_4X3 2 // Video Identification Code: format 2 +#define NVT_CEA861_720X480P_59940HZ_16X9 3 // Video Identification Code: format 3 +#define NVT_CEA861_1280X720P_59940HZ_16X9 4 // ... +#define NVT_CEA861_1920X1080I_59940HZ_16X9 5 // ... +#define NVT_CEA861_1440X480I_59940HZ_4X3 6 // ... +#define NVT_CEA861_1440X480I_59940HZ_16X9 7 // ... +#define NVT_CEA861_1440X240P_59940HZ_4X3 8 // ... +#define NVT_CEA861_1440X240P_59940HZ_16X9 9 // ... +#define NVT_CEA861_2880X480I_59940HZ_4X3 10 // ... +#define NVT_CEA861_2880X480I_59940HZ_16X9 11 // ... +#define NVT_CEA861_2880X240P_59940HZ_4X3 12 // ... +#define NVT_CEA861_2880X240P_59940HZ_16X9 13 // ... +#define NVT_CEA861_1440X480P_59940HZ_4X3 14 // ... +#define NVT_CEA861_1440X480P_59940HZ_16X9 15 // ... +#define NVT_CEA861_1920X1080P_59940HZ_16X9 16 // ... +#define NVT_CEA861_720X576P_50000HZ_4X3 17 // ... +#define NVT_CEA861_720X576P_50000HZ_16X9 18 // ... +#define NVT_CEA861_1280X720P_50000HZ_16X9 19 // ... +#define NVT_CEA861_1920X1080I_50000HZ_16X9 20 // ... +#define NVT_CEA861_1440X576I_50000HZ_4X3 21 // ... +#define NVT_CEA861_1440X576I_50000HZ_16X9 22 // ... +#define NVT_CEA861_1440X288P_50000HZ_4X3 23 // ... +#define NVT_CEA861_1440X288P_50000HZ_16X9 24 // ... +#define NVT_CEA861_2880X576I_50000HZ_4X3 25 // ... +#define NVT_CEA861_2880X576I_50000HZ_16X9 26 // ... +#define NVT_CEA861_2880X288P_50000HZ_4X3 27 // ... +#define NVT_CEA861_2880X288P_50000HZ_16X9 28 // ... +#define NVT_CEA861_1440X576P_50000HZ_4X3 29 // ... +#define NVT_CEA861_1440X576P_50000HZ_16X9 30 // ... +#define NVT_CEA861_1920X1080P_50000HZ_16X9 31 // ... +#define NVT_CEA861_1920X1080P_23976HZ_16X9 32 // ... +#define NVT_CEA861_1920X1080P_25000HZ_16X9 33 // ... +#define NVT_CEA861_1920X1080P_29970HZ_16X9 34 // ... +#define NVT_CEA861_2880X480P_59940HZ_4X3 35 // ... +#define NVT_CEA861_2880X480P_59940HZ_16X9 36 // ... +#define NVT_CEA861_2880X576P_50000HZ_4X3 37 // ... +#define NVT_CEA861_2880X576P_50000HZ_16X9 38 // ... +#define NVT_CEA861_1920X1250I_50000HZ_16X9 39 // ... +#define NVT_CEA861_1920X1080I_100000HZ_16X9 40 // ... +#define NVT_CEA861_1280X720P_100000HZ_16X9 41 // ... +#define NVT_CEA861_720X576P_100000HZ_4X3 42 // ... +#define NVT_CEA861_720X576P_100000HZ_16X9 43 // ... +#define NVT_CEA861_1440X576I_100000HZ_4X3 44 // ... +#define NVT_CEA861_1440X576I_100000HZ_16X9 45 // ... +#define NVT_CEA861_1920X1080I_119880HZ_16X9 46 // ... +#define NVT_CEA861_1280X720P_119880HZ_16X9 47 // ... +#define NVT_CEA861_720X480P_119880HZ_4X3 48 // ... +#define NVT_CEA861_720X480P_119880HZ_16X9 49 // ... +#define NVT_CEA861_1440X480I_119880HZ_4X3 50 // ... +#define NVT_CEA861_1440X480I_119880HZ_16X9 51 // ... +#define NVT_CEA861_720X576P_200000HZ_4X3 52 // ... +#define NVT_CEA861_720X576P_200000HZ_16X9 53 // ... +#define NVT_CEA861_1440X576I_200000HZ_4X3 54 // ... +#define NVT_CEA861_1440X576I_200000HZ_16X9 55 // ... +#define NVT_CEA861_720X480P_239760HZ_4X3 56 // ... +#define NVT_CEA861_720X480P_239760HZ_16X9 57 // ... +#define NVT_CEA861_1440X480I_239760HZ_4X3 58 // Video Identification Code: format 58 +#define NVT_CEA861_1440X480I_239760HZ_16X9 59 // Video Identification Code: format 59 +#define NVT_CEA861_1280X720P_23976HZ_16X9 60 // ... +#define NVT_CEA861_1280X720P_25000HZ_16X9 61 // ... +#define NVT_CEA861_1280X720P_29970HZ_16X9 62 // ... +#define NVT_CEA861_1920X1080P_119880HZ_16X9 63 // ... +#define NVT_CEA861_1920X1080P_100000HZ_16X9 64 // ... + +// Following modes are from CEA-861F +#define NVT_CEA861_1280X720P_23980HZ_64X27 65 // Video Identification Code: format 65 +#define NVT_CEA861_1280X720P_25000HZ_64X27 66 // Video Identification Code: format 66 +#define NVT_CEA861_1280X720P_29970HZ_64X27 67 // Video Identification Code: format 67 +#define NVT_CEA861_1280X720P_50000HZ_64X27 68 +#define NVT_CEA861_1280X720P_59940HZ_64X27 69 +#define NVT_CEA861_1280X720P_100000HZ_64X27 70 +#define NVT_CEA861_1280X720P_119880HZ_64X27 71 +#define NVT_CEA861_1920X1080P_23980HZ_64X27 72 +#define NVT_CEA861_1920X1080P_25000HZ_64X27 73 +#define NVT_CEA861_1920X1080P_29970HZ_64X27 74 +#define NVT_CEA861_1920X1080P_50000HZ_64X27 75 +#define NVT_CEA861_1920X1080P_59940HZ_64X27 76 +#define NVT_CEA861_1920X1080P_100000HZ_64X27 77 +#define NVT_CEA861_1920X1080P_119880HZ_64X27 78 +#define NVT_CEA861_1680X720P_23980HZ_64X27 79 +#define NVT_CEA861_1680X720P_25000HZ_64X27 80 +#define NVT_CEA861_1680X720P_29970HZ_64X27 81 +#define NVT_CEA861_1680X720P_50000HZ_64X27 82 +#define NVT_CEA861_1680X720P_59940HZ_64X27 83 +#define NVT_CEA861_1680X720P_100000HZ_64X27 84 +#define NVT_CEA861_1680X720P_119880HZ_64X27 85 +#define NVT_CEA861_2560X1080P_23980HZ_64X27 86 +#define NVT_CEA861_2560X1080P_25000HZ_64X27 87 +#define NVT_CEA861_2560X1080P_29970HZ_64X27 88 +#define NVT_CEA861_2560X1080P_50000HZ_64X27 89 +#define NVT_CEA861_2560X1080P_59940HZ_64X27 90 +#define NVT_CEA861_2560X1080P_100000HZ_64X27 91 +#define NVT_CEA861_2560X1080P_119880HZ_64X27 92 +#define NVT_CEA861_3840X2160P_23980HZ_16X9 93 +#define NVT_CEA861_3840X2160P_25000HZ_16X9 94 +#define NVT_CEA861_3840X2160P_29970HZ_16X9 95 +#define NVT_CEA861_3840X2160P_50000HZ_16X9 96 +#define NVT_CEA861_3840X2160P_59940HZ_16X9 97 +#define NVT_CEA861_4096X2160P_23980HZ_256X135 98 +#define NVT_CEA861_4096X2160P_25000HZ_256X135 99 +#define NVT_CEA861_4096X2160P_29970HZ_256X135 100 +#define NVT_CEA861_4096X2160P_50000HZ_256X135 101 +#define NVT_CEA861_4096X2160P_59940HZ_256X135 102 +#define NVT_CEA861_4096X2160P_23980HZ_64X27 103 +#define NVT_CEA861_4096X2160P_25000HZ_64X27 104 +#define NVT_CEA861_4096X2160P_29970HZ_64X27 105 +#define NVT_CEA861_4096X2160P_50000HZ_64X27 106 +#define NVT_CEA861_4096X2160P_59940HZ_64X27 107 + +// Following modes are from CTA-861G +#define NVT_CTA861_1280X720P_47950HZ_16X9 108 +#define NVT_CTA861_1280X720P_47950HZ_64x27 109 +#define NVT_CTA861_1680X720P_47950HZ_64x27 110 +#define NVT_CTA861_1920X1080P_47950HZ_16X9 111 +#define NVT_CTA861_1920X1080P_47950HZ_64x27 112 +#define NVT_CTA861_2560X1080P_47950HZ_64x27 113 +#define NVT_CTA861_3840X2160P_47950HZ_16X9 114 +#define NVT_CTA861_4096x2160p_47950HZ_256X135 115 +#define NVT_CTA861_3840x2160p_47950HZ_64x276 116 +#define NVT_CTA861_3840x2160p_100000HZ_16X9 117 +#define NVT_CTA861_3840x2160p_119880HZ_16X9 118 +#define NVT_CTA861_3840x2160p_100000HZ_64X276 119 +#define NVT_CTA861_3840x2160p_119880HZ_64X276 120 +#define NVT_CTA861_5120x2160p_23980HZ_64X276 121 +#define NVT_CTA861_5120x2160p_25000HZ_64X276 122 +#define NVT_CTA861_5120x2160p_29970HZ_64X276 123 +#define NVT_CTA861_5120x2160p_47950Hz_64X276 124 +#define NVT_CTA861_5120x2160p_50000HZ_64X276 125 +#define NVT_CTA861_5120x2160p_59940HZ_64X276 126 +#define NVT_CTA861_5120x2160p_100000HZ_64X276 127 + +#define NVT_CTA861_7BIT_8BIT_SEPARATE_VALUE 192 + +#define NVT_CTA861_5120x2160p_119880HZ_64X276 193 +#define NVT_CTA861_7680x4320p_23980HZ_16X9 194 +#define NVT_CTA861_7680x4320p_25000HZ_16X9 195 +#define NVT_CTA861_7680x4320p_29970HZ_16X9 196 +#define NVT_CTA861_7680x4320p_47950HZ_16X9 197 +#define NVT_CTA861_7680x4320p_50000HZ_16X9 198 +#define NVT_CTA861_7680x4320p_59940HZ_16X9 199 +#define NVT_CTA861_7680x4320p_100000HZ_16X9 200 +#define NVT_CTA861_7680x4320p_119880HZ_16X9 201 +#define NVT_CTA861_7680x4320p_23980HZ_64X276 202 +#define NVT_CTA861_7680x4320p_25000HZ_64X276 203 +#define NVT_CTA861_7680x4320p_29970HZ_64X276 204 +#define NVT_CTA861_7680x4320p_47950HZ_64X276 205 +#define NVT_CTA861_7680x4320p_50000HZ_64X276 206 +#define NVT_CTA861_7680x4320p_59940HZ_64X276 207 +#define NVT_CTA861_7680x4320p_100000HZ_64X276 208 +#define NVT_CTA861_7680x4320p_119880HZ_64X276 209 +#define NVT_CTA861_10240x4320p_23980HZ_64X276 210 +#define NVT_CTA861_10240x4320p_25000HZ_64X276 211 +#define NVT_CTA861_10240x4320p_29970HZ_64X276 212 +#define NVT_CTA861_10240x4320p_47950HZ_64X276 213 +#define NVT_CTA861_10240x4320p_50000HZ_64X276 214 +#define NVT_CTA861_10240x4320p_59940HZ_64X276 215 +#define NVT_CTA861_10240x4320p_100000HZ_64X276 216 +#define NVT_CTA861_10240x4320p_119880HZ_64X276 217 +#define NVT_CTA861_4096x2160p_100000HZ_256X135 218 +#define NVT_CTA861_4096x2160p_119880HZ_256X135 219 + +// When defining new CEA861 format: +// Search code base to update array of certain category of CEA formats, such as 720p, 1080i, etc... +// Ideally, it's better to define these groups in one module. However, they should not reside +// in this .h file, thus updating these groups in other file is still needed. +// example of the group: 720p: NVT_CEA861_1280X720P_59940HZ_16X9, +// NVT_CEA861_1280X720P_100000HZ_16X9, +// NVT_CEA861_1280X720P_119880HZ_16X9 + +// According to CEA-861-I Spec. +// Table 11 - Resoution Identification (RID) +#define NVT_CTA861_OVT_TIMING_FRR(_FLAG_, _RR_) (((_FLAG_) & (NVT_FLAG_CTA_OVT_TIMING)) != 0 && ((_RR_) % 6) == 0 && (_RR_) != 300) +#define NVT_CTA861_RID_NONE NVT_INFOFRAME_CTRL_DONTCARE +#define NVT_CTA861_RID_1280x720p_16x9 1 +#define NVT_CTA861_RID_1280x720p_64x27 2 +#define NVT_CTA861_RID_1680x720p_64x27 3 +#define NVT_CTA861_RID_1920x1080p_16x9 4 +#define NVT_CTA861_RID_1920x1080p_64x27 5 +#define NVT_CTA861_RID_2560x1080p_64x27 6 +#define NVT_CTA861_RID_3840x1080p_32x9 7 +#define NVT_CTA861_RID_2560x1440p_16x9 8 +#define NVT_CTA861_RID_3440x1440p_64x27 9 +#define NVT_CTA861_RID_5120x1440p_32x9 10 +#define NVT_CTA861_RID_3840x2160p_16x9 11 +#define NVT_CTA861_RID_3840x2160p_64x27 12 +#define NVT_CTA861_RID_5120x2160p_64x27 13 +#define NVT_CTA861_RID_7680x2160p_32x9 14 +#define NVT_CTA861_RID_5120x2880p_16x9 15 +#define NVT_CTA861_RID_5120x2880p_64x27 16 +#define NVT_CTA861_RID_6880x2880p_64x27 17 +#define NVT_CTA861_RID_10240x2880p_32x9 18 +#define NVT_CTA861_RID_7680x4320p_16x9 19 +#define NVT_CTA861_RID_7680x4320p_64x27 20 +#define NVT_CTA861_RID_10240x4320p_64x27 21 +#define NVT_CTA861_RID_15360x4320p_32x9 22 +#define NVT_CTA861_RID_11520x6480p_16x9 23 +#define NVT_CTA861_RID_11520x6480p_64x27 24 +#define NVT_CTA861_RID_15360x6480p_64x27 25 +#define NVT_CTA861_RID_15360x8640p_16x9 26 +#define NVT_CTA861_RID_15360x8640p_64x27 27 +#define NVT_CTA861_RID_20480x8640p_64x27 28 +#define NVT_CTA861_RID_EXCEED_RESOLUTION NVT_CTA861_RID_NONE + +// Table 12 - AVI InfoFrame Video Format Frame Rate +#define NVT_CTA861_FR_NO_DATA NVT_INFOFRAME_CTRL_DONTCARE +#define NVT_CTA861_FR_2398 1 +#define NVT_CTA861_FR_2400 2 +#define NVT_CTA861_FR_2500 3 +#define NVT_CTA861_FR_2997 4 +#define NVT_CTA861_FR_3000 5 +#define NVT_CTA861_FR_4795 6 +#define NVT_CTA861_FR_4800 7 +#define NVT_CTA861_FR_5000 8 +#define NVT_CTA861_FR_5994 9 +#define NVT_CTA861_FR_6000 10 +#define NVT_CTA861_FR_10000 11 +#define NVT_CTA861_FR_11988 12 +#define NVT_CTA861_FR_12000 13 +#define NVT_CTA861_FR_14386 14 +#define NVT_CTA861_FR_14400 15 +#define NVT_CTA861_FR_20000 16 +#define NVT_CTA861_FR_23976 17 +#define NVT_CTA861_FR_24000 18 +#define NVT_CTA861_FR_30000 19 +#define NVT_CTA861_FR_35964 20 +#define NVT_CTA861_FR_36000 21 +#define NVT_CTA861_FR_40000 22 +#define NVT_CTA861_FR_47952 23 +#define NVT_CTA861_FR_48000 24 + +//************************* +// short audio descriptor +//************************* +#define NVT_CEA861_AUDIO_SD_SIZE sizeof(NVT_3BYTES) +// the max total short audio descriptors possible; See CEA-861-E, section 7.5 on repeated types +#define NVT_CEA861_AUDIO_MAX_DESCRIPTOR ((NVT_CEA861_MAX_PAYLOAD / NVT_CEA861_SD_TOTAL_SIZE) * (NVT_CEA861_SD_PAYLOAD_SIZE / NVT_CEA861_AUDIO_SD_SIZE) + \ + (NVT_CEA861_MAX_PAYLOAD % NVT_CEA861_SD_TOTAL_SIZE - NVT_CEA861_SD_HEADER_SIZE) / NVT_CEA861_AUDIO_SD_SIZE) +// +// short audio descriptor - byte 1 +#define NVT_CEA861_AUDIO_FORMAT_MASK 0x78 //the audio format mask of the CEA short +#define NVT_CEA861_AUDIO_FORMAT_SHIFT 3 //the audio format data shift +// +#define NVT_CEA861_AUDIO_FORMAT_RSVD 0 // short audio descriptor format - reserved +#define NVT_CEA861_AUDIO_FORMAT_LINEAR_PCM 1 // short audio descriptor format - Linear PCM (uncompressed) +#define NVT_CEA861_AUDIO_FORMAT_AC3 2 // short audio descriptor format - AC3 +#define NVT_CEA861_AUDIO_FORMAT_MPEG1 3 // short audio descriptor format - MPEG1(layer 1&2) +#define NVT_CEA861_AUDIO_FORMAT_MP3 4 // short audio descriptor format - MP3(MPEG1 layer 3) +#define NVT_CEA861_AUDIO_FORMAT_MPEG2 5 // short audio descriptor format - MPEG2 (multichannel) +#define NVT_CEA861_AUDIO_FORMAT_AAC 6 // short audio descriptor format - AAC +#define NVT_CEA861_AUDIO_FORMAT_DTS 7 // short audio descriptor format - DTS +#define NVT_CEA861_AUDIO_FORMAT_ATRAC 8 // short audio descriptor format - ATRAC +#define NVT_CEA861_AUDIO_FORMAT_ONE_BIT 9 // short audio descriptor format - one bit audio +#define NVT_CEA861_AUDIO_FORMAT_DDP 10 // short audio descriptor format - dolby digital + +#define NVT_CEA861_AUDIO_FORMAT_DTS_HD 11 // short audio descriptor format - DTS_HD +#define NVT_CEA861_AUDIO_FORMAT_MAT 12 // short audio descriptor format - MAT(MLP) +#define NVT_CEA861_AUDIO_FORMAT_DST 13 // short audio descriptor format - DST +#define NVT_CEA861_AUDIO_FORMAT_WMA_PRO 14 // short audio descriptor format - WMA Pro +#define NVT_CEA861_AUDIO_FORMAT_RSVD15 15 // short audio descriptor format - reserved +// +#define NVT_CEA861_AUDIO_MAX_CHANNEL_MASK 7 // short audio descriptor format - Max Number of channels - 1 +#define NVT_CEA861_AUDIO_MAX_CHANNEL_SHIFT 0 // short audio descriptor format shift +// +// short audio descriptor - byte 2 +#define NVT_CEA861_AUDIO_SAMPLE_RATE_MASK 0x7F //the sample rate mask +#define NVT_CEA861_AUDIO_SAMPLE_RATE_SHIFT 0 //the sample rate shift +// +#define NVT_CEA861_AUDIO_SAMPLE_RATE_32KHZ 0x01 // short audio descriptor - sample rate : 32KHz +#define NVT_CEA861_AUDIO_SAMPLE_RATE_44KHZ 0x02 // short audio descriptor - sample rate : 44KHz +#define NVT_CEA861_AUDIO_SAMPLE_RATE_48KHZ 0x04 // short audio descriptor - sample rate : 48KHz +#define NVT_CEA861_AUDIO_SAMPLE_RATE_88KHZ 0x08 // short audio descriptor - sample rate : 88KHz +#define NVT_CEA861_AUDIO_SAMPLE_RATE_96KHZ 0x10 // short audio descriptor - sample rate : 96KHz +#define NVT_CEA861_AUDIO_SAMPLE_RATE_176KHZ 0x20 // short audio descriptor - sample rate : 176KHz +#define NVT_CEA861_AUDIO_SAMPLE_RATE_192KHZ 0x40 // short audio descriptor - sample rate : 192KHz +#define NVT_CEA861_AUDIO_SAMPLE_RATE_RSVD 0x80 // short audio descriptor - sample rate : reserved +// +// short audio descriptor - byte 3 +#define NVT_CEA861_AUDIO_SAMPLE_DEPTH_MASK 0x07 // the uncompressed audio resolution mask +#define NVT_CEA861_AUDIO_SAMPLE_DEPTH_SHIFT 0 // the uncompressed audio resolution shift +// +#define NVT_CEA861_AUDIO_SAMPLE_SIZE_16BIT 0x01 // uncompressed (Linear PCM) audio A/D resolution - 16bit +#define NVT_CEA861_AUDIO_SAMPLE_SIZE_20BIT 0x02 // uncompressed (Linear PCM) audio A/D resolution - 20bit +#define NVT_CEA861_AUDIO_SAMPLE_SIZE_24BIT 0x04 // uncompressed (Linear PCM) audio A/D resolution - 24bit + +//************************** +// speaker allocation data +//************************** +#define NVT_CEA861_SPEAKER_SD_SIZE sizeof(NVT_3BYTES) +// the max total short speaker descriptors possible; See CEA-861-E, section 7.5 on repeated types +#define NVT_CEA861_SPEAKER_MAX_DESCRIPTOR ((NVT_CEA861_MAX_PAYLOAD / NVT_CEA861_SD_TOTAL_SIZE) * (NVT_CEA861_SD_PAYLOAD_SIZE / NVT_CEA861_SPEAKER_SD_SIZE) + \ + (NVT_CEA861_MAX_PAYLOAD % NVT_CEA861_SD_TOTAL_SIZE - NVT_CEA861_SD_HEADER_SIZE) / NVT_CEA861_SPEAKER_SD_SIZE) +#define NVT_CEA861_SPEAKER_ALLOC_MASK 0x7F // the speaker allocation mask +#define NVT_CEA861_SPEAKER_ALLOC_SHIFT 0 // the speaker allocation mask shift +// +#define NVT_CEA861_SPEAKER_ALLOC_FL_FR 0x01 // speaker allocation : Front Left + Front Right +#define NVT_CEA861_SPEAKER_ALLOC_LFE 0x02 // speaker allocation : Low Frequency Effect +#define NVT_CEA861_SPEAKER_ALLOC_FC 0x04 // speaker allocation : Front Center +#define NVT_CEA861_SPEAKER_ALLOC_RL_RR 0x08 // speaker allocation : Rear Left + Rear Right +#define NVT_CEA861_SPEAKER_ALLOC_RC 0x10 // speaker allocation : Rear Center +#define NVT_CEA861_SPEAKER_ALLOC_FLC_FRC 0x20 // speaker allocation : Front Left Center + Front Right Center +#define NVT_CEA861_SPEAKER_ALLOC_RLC_RRC 0x40 // speaker allocation : Rear Left Center + Rear Right Center + +//*********************** +// vendor specific data block (VSDB) +//*********************** +#define NVT_CEA861_VSDB_HEADER_SIZE 4 +#define NVT_CEA861_VSDB_PAYLOAD_MAX_LENGTH 28 // max allowed vendor specific data block payload (in byte) +#define NVT_CEA861_HDMI_IEEE_ID 0x0C03 +#define NVT_CEA861_HDMI_LLC_IEEE_ID NVT_CEA861_HDMI_IEEE_ID +#define NVT_CEA861_NVDA_IEEE_ID 0x44B +#define NVT_CEA861_HDMI_FORUM_IEEE_ID 0xC45DD8 +#define NVT_CEA861_MSFT_IEEE_ID 0xCA125C + +#define NVT_CEA861_VSDB_MAX_BLOCKS 4 // NOTE: The maximum number of VSDB blocks should be: + // (NVT_CEA861_MAX_PAYLOAD / (NVT_CEA861_VSDB_HEADER_SIZE + 1)) (assume at least 1 byte of payload) + // As of Sept 2013, there are 3 different VSDBs defined in the spec. Hence allocating space for all 24 + // is overkill. As a tradeoff, we define this limit as 4 for now. If required, this should be increased later. + +typedef struct VSDB_DATA +{ + NvU32 ieee_id; + NvU32 vendor_data_size; // size of data copied to vendor_data (excludes ieee_id from frame) + NvU8 vendor_data[NVT_CEA861_VSDB_PAYLOAD_MAX_LENGTH]; +} VSDB_DATA; + +//******************************* +// vendor specific video data block (VSVDB) +//******************************* +#define NVT_CEA861_VSVDB_MAX_BLOCKS 2 // Dolby Vision, HDR10+ VSVDBs +#define NVT_CEA861_DV_IEEE_ID 0x00D046 +#define NVT_CEA861_HDR10PLUS_IEEE_ID 0x90848B +#define NVT_CEA861_VSVDB_PAYLOAD_MAX_LENGTH 25 // max allowed vendor specific video data block payload (in byte) +#define NVT_CEA861_VSVDB_VERSION_MASK 0xE0 // vsdb version mask +#define NVT_CEA861_VSVDB_VERSION_MASK_SHIFT 5 // vsdb version shift mask + +typedef struct VSVDB_DATA +{ + NvU32 ieee_id; + NvU32 vendor_data_size; // size of data copied to vendor_data (excludes ieee_id from frame) + NvU8 vendor_data[NVT_CEA861_VSVDB_PAYLOAD_MAX_LENGTH]; +} VSVDB_DATA; + +//******************************* +// Video Format Data Block (VFDB) +//******************************* + +#define NVT_CTA861_VF_MAX_BLOCKS 4 +#define NVT_CTA861_VF_MAX_DESCRIPTORS 30 + +#define NVT_CTA861_VF_RID_MASK 0x3F + +typedef struct tagNVT_RID_CODES +{ + NvU16 HVisible; // horizontal visible + NvU8 HSyncPol; // horizontal sync polarity: 1-negative, 0-positive + NvU16 VVisible; // vertical visible + NvU8 VSyncPol; // vertical sync polarity: 1-negative, 0-positive + NvU16 interlaced; // 1-interlaced, 0-progressive + NvU32 aspect; // the display aspect ratio Hi(aspect):horizontal-aspect, Low(aspect):vertical-aspect + NvU8 rid; // Resolution Identification (RID) +} NVT_RID_CODES; + +#pragma pack(1) +typedef struct tagVFD_ONE_BYTE +{ + NvU8 rid : 6; + NvU8 fr24 : 1; + NvU8 bfr50 : 1; +} VFD_ONE_BYTE; + +typedef struct tagVFD_TWO_BYTE +{ + VFD_ONE_BYTE in_onebyte; + NvU8 frRate : 6; + NvU8 fr144 : 1; + NvU8 bfr60 : 1; +} VFD_TWO_BYTE; + +typedef struct tagVFD_THREE_BYTE +{ + VFD_TWO_BYTE in_twobyte; + NvU8 fr48 : 1; + NvU8 f31_37 : 7; +} VFD_THREE_BYTE; + +typedef struct tagVFD_FOUR_BYTE +{ + VFD_THREE_BYTE in_threebyte; + NvU8 f40_47; +} VFD_FOUR_BYTE; + +typedef struct tagVFDB_DATA +{ + struct { + NvU8 vfd_len : 2; + NvU8 f22_25 : 4; + NvU8 ntsc : 1; + NvU8 y420 : 1; + } info; + + NvU8 total_vfd; + NvU8 video_format_desc[NVT_CTA861_VF_MAX_DESCRIPTORS]; +} VFDB_DATA; + +typedef struct tagNVT_DV_STATIC_METADATA_TYPE0 +{ + // first byte + NvU8 supports_YUV422_12bit : 1; + NvU8 supports_2160p60hz : 1; + NvU8 supports_global_dimming : 1; + NvU8 reserved_1 : 2; + NvU8 VSVDB_version : 3; + + // second- fourth byte + NvU8 cc_red_y_3_0 : 4; + NvU8 cc_red_x_3_0 : 4; + NvU8 cc_red_x_11_4 : 8; + NvU8 cc_red_y_11_4 : 8; + + NvU8 cc_green_y_3_0 : 4; + NvU8 cc_green_x_3_0 : 4; + NvU8 cc_green_x_11_4 : 8; + NvU8 cc_green_y_11_4 : 8; + + NvU8 cc_blue_y_3_0 : 4; + NvU8 cc_blue_x_3_0 : 4; + NvU8 cc_blue_x_11_4 : 8; + NvU8 cc_blue_y_11_4 : 8; + + NvU8 cc_white_y_3_0 : 4; + NvU8 cc_white_x_3_0 : 4; + NvU8 cc_white_x_11_4 : 8; + NvU8 cc_white_y_11_4 : 8; + + NvU8 target_max_pq_3_0 : 4; + NvU8 target_min_pq_3_0 : 4; + NvU8 target_min_pq_11_4 : 8; + NvU8 target_max_pq_11_4 : 8; + + NvU8 dm_version_minor : 4; + NvU8 dm_version_major : 4; + + NvU8 reserved_2 : 8; + NvU8 reserved_3 : 8; + NvU8 reserved_4 : 8; + NvU8 reserved_5 : 8; +} NVT_DV_STATIC_METADATA_TYPE0; + +typedef struct tagNVT_DV_STATIC_METADATA_TYPE1 +{ + // first byte + NvU8 supports_YUV422_12bit : 1; + NvU8 supports_2160p60hz : 1; + NvU8 dm_version : 3; + NvU8 VSVDB_version : 3; + + // second byte + NvU8 supports_global_dimming : 1; + NvU8 target_max_luminance : 7; + + // third byte + NvU8 colorimetry : 1; + NvU8 target_min_luminance : 7; + + //fourth byte + NvU8 reserved : 8; + //fith to tenth byte + NvU8 cc_red_x : 8; + NvU8 cc_red_y : 8; + NvU8 cc_green_x : 8; + NvU8 cc_green_y : 8; + NvU8 cc_blue_x : 8; + NvU8 cc_blue_y : 8; +} NVT_DV_STATIC_METADATA_TYPE1; + +typedef struct tagNVT_DV_STATIC_METADATA_TYPE1_1 +{ + // first byte + NvU8 supports_YUV422_12bit : 1; + NvU8 supports_2160p60hz : 1; + NvU8 dm_version : 3; + NvU8 VSVDB_version : 3; + + // second byte + NvU8 supports_global_dimming : 1; + NvU8 target_max_luminance : 7; + + // third byte + NvU8 colorimetry : 1; + NvU8 target_min_luminance : 7; + + //fourth byte + NvU8 interface_supported_by_sink : 2; + NvU8 unique_By : 3; + NvU8 unique_Bx : 3; + + //fifth byte + NvU8 unique_Ry_bit_0 : 1; + NvU8 unique_Gx : 7; + + //sixth byte + NvU8 unique_Ry_bit_1 : 1; + NvU8 unique_Gy : 7; + + //seventh byte + NvU8 unique_Rx : 5; + NvU8 unique_Ry_bit_2_to_4 : 3; + +} NVT_DV_STATIC_METADATA_TYPE1_1; + +typedef struct tagNVT_DV_STATIC_METADATA_TYPE2 +{ + // first byte + NvU8 supports_YUV422_12bit : 1; + NvU8 supports_backlight_control : 1; + NvU8 dm_version : 3; + NvU8 VSVDB_version : 3; + + // second byte + NvU8 reserved : 2; + NvU8 supports_global_dimming : 1; + NvU8 target_min_luminance : 5; + + // third byte + NvU8 interface_supported_by_sink : 2; + NvU8 parity : 1; + NvU8 target_max_luminance : 5; + + //fourth byte + NvU8 supports_10b_12b_444_bit1 : 1; + NvU8 unique_Gx : 7; + + //fifth byte + NvU8 supports_10b_12b_444_bit0 : 1; + NvU8 unique_Gy : 7; + + //sixth byte + NvU8 unique_Bx : 3; + NvU8 unique_Rx : 5; + + //seventh byte + NvU8 unique_By : 3; + NvU8 unique_Ry : 5; + +} NVT_DV_STATIC_METADATA_TYPE2; + +typedef struct tagNVT_HDR10PLUS_INFO +{ + // first byte + NvU8 application_version : 2; + NvU8 full_frame_peak_luminance_index : 2; + NvU8 peak_luminance_index : 4; +} NVT_HDR10PLUS_INFO; +#pragma pack() + +//*************************** +// colorimetry data block +//*************************** +// +// Colorimetry capabilities - byte 3 +#define NVT_CEA861_COLORIMETRY_MASK 0xFF // the colorimetry cap mask +#define NVT_CEA861_COLORIMETRY_SHIFT 0 // the colorimetry cap shift + +#define NVT_CEA861_COLORIMETRY_NO_DATA 0x00 +#define NVT_CEA861_COLORIMETRY_xvYCC_601 0x01 // xvYCC601 capable +#define NVT_CEA861_COLORIMETRY_xvYCC_709 0x02 // xvYCC709 capable +#define NVT_CEA861_COLORIMETRY_sYCC_601 0x04 // sYCC601 capable +#define NVT_CEA861_COLORIMETRY_AdobeYCC_601 0x08 // AdobeYCC601 capable +#define NVT_CEA861_COLORIMETRY_AdobeRGB 0x10 // AdobeRGB capable +#define NVT_CEA861_COLORIMETRY_BT2020cYCC 0x20 // BT2020 cYCbCr (constant luminance) capable +#define NVT_CEA861_COLORIMETRY_BT2020YCC 0x40 // BT2020 Y'CbCr capable +#define NVT_CEA861_COLORIMETRY_BT2020RGB 0x80 // BT2020 RGB capable +// Colorimetry capabilities - byte 4 +#define NVT_CEA861_COLORIMETRY_defaultRGB 0x10 // based on the default chromaticity in Basic Display Parameters and Feature Block +#define NVT_CEA861_COLORIMETRY_sRGB 0x20 // IEC 61966-2-1 +#define NVT_CEA861_COLORIMETRY_ICtCp 0x40 // ITU-R BT.2100 ICtCp +#define NVT_CEA861_COLORIMETRY_ST2113RGB 0x80 // SMPTE ST 2113 R'G'B' +// +// gamut-related metadata capabilities - byte 4 +#define NVT_CEA861_GAMUT_METADATA_MASK 0x8F // the colorimetry or gamut-related metadata block mask +#define NVT_CEA861_GAMUT_METADATA_SHIFT 0 // the metadata block shift +// +#define NVT_CEA861_GAMUT_METADATA_MD0 0x01 // MD0 +#define NVT_CEA861_GAMUT_METADATA_MD1 0x02 // MD1 +#define NVT_CEA861_GAMUT_METADATA_MD2 0x04 // MD2 +#define NVT_CEA861_GAMUT_METADATA_MD3 0x08 // MD2 + +//*************************** +// HDR static metadata data block +//*************************** +// +typedef struct tagNVT_5BYTES +{ + NvU8 byte1; + NvU8 byte2; + NvU8 byte3; + NvU8 byte4; + NvU8 byte5; +} NVT_5BYTES; + +// Supported Electro-Optical Transfer Function - byte 3 +#define NVT_CEA861_EOTF_MASK 0x3F // the EOTF cap mask +#define NVT_CEA861_EOTF_SHIFT 0 // the EOTF cap shift +// +#define NVT_CEA861_EOTF_GAMMA_SDR 0x01 // ET_0 Traditional gamma - SDR Luminance Range +#define NVT_CEA861_EOTF_GAMMA_HDR 0x02 // ET_1 Traditional gamma - HDR Luminance Range +#define NVT_CEA861_EOTF_SMPTE_ST2084 0x04 // ET_2 SMPTE ST2084 EOTF (a.k.a PQ - Perceptual Quantizer EOTF) +#define NVT_CEA861_EOTF_FUTURE 0x08 // ET_3 Future EOTF + +// +// Supported Static Metadata Descriptor - byte 4 +#define NVT_CEA861_STATIC_METADATA_DESCRIPTOR_MASK 0x01 // the supported static metadata descriptor block mask +#define NVT_CEA861_STATIC_METADATA_SHIFT 0 // the metadata block shift +// +#define NVT_CEA861_STATIC_METADATA_SM0 0x00 // Static Metadata Type 1 + +// +// Desired Content Max Luminance data - byte 5 +#define NVT_CEA861_MAX_CLL_MASK 0xFF // the desired content max luminance level (MaxCLL) data block mask +#define NVT_CEA861_MAX_CLL_SHIFT 0 // the metadata block shift + +// Desired Content Max Frame-Average Luminance data - byte 6 +#define NVT_CEA861_MAX_FALL_MASK 0xFF // the desired content max frame-average luminance (MaxFALL) data block mask +#define NVT_CEA861_MAX_FALL_SHIFT 0 // the metadata block shift + +// Desired Content Min Luminance data - byte 7 +#define NVT_CEA861_MIN_CLL_MASK 0xFF // the desired content min luminance level (MinCLL) data block mask +#define NVT_CEA861_MIN_CLL_SHIFT 0 // the metadata block shift + +//*************************** +// video capability data block +//*************************** +// +#define NVT_CEA861_VIDEO_CAPABILITY_MASK 0x7F // the video capability data block mask +#define NVT_CEA861_VIDEO_CAPABILITY_SHIFT 0 // the video capability data block shift +// +#define NVT_CEA861_VIDEO_CAPABILITY_S_CE0 0x01 // S_CE0 +#define NVT_CEA861_VIDEO_CAPABILITY_S_CE1 0x02 // S_CE1 +#define NVT_CEA861_VIDEO_CAPABILITY_S_IT0 0x04 // S_IT0 +#define NVT_CEA861_VIDEO_CAPABILITY_S_IT1 0x08 // S_IT1 +#define NVT_CEA861_VIDEO_CAPABILITY_S_PT0 0x10 // S_PT0 +#define NVT_CEA861_VIDEO_CAPABILITY_S_PT1 0x20 // S_PT1 +#define NVT_CEA861_VIDEO_CAPABILITY_S_QS 0x40 // S_QS + +//************************** +// EDID 861 Extension Info +//************************** +typedef struct tagNVT_3BYTES +{ + NvU8 byte1; + NvU8 byte2; + NvU8 byte3; +} NVT_3BYTES; + +//*********************** +// VCDB specific data +//*********************** +#define NVT_CEA861_VCDB_QS_MASK 0x40 // quantization range selectable mask +#define NVT_CEA861_VCDB_QS_SHIFT 6 // quantization range selectable shift + +#define NVT_CEA861_VCDB_S_PT_MASK 0x30 // PT over/underscan behavior mask +#define NVT_CEA861_VCDB_S_PT_SHIFT 4 // PT over/underscan behavior shift +#define NVT_CEA861_VCDB_S_PT_NO_DATA 0x00 +#define NVT_CEA861_VCDB_S_PT_ALWAYS_OVERSCAN 0x01 +#define NVT_CEA861_VCDB_S_PT_ALWAYS_UNDERSCAN 0x02 +#define NVT_CEA861_VCDB_S_PT_OVER_OR_UNDERSCAN 0x03 + +#define NVT_CEA861_VCDB_S_IT_MASK 0x0C // IT over/underscan behavior mask +#define NVT_CEA861_VCDB_S_IT_SHIFT 2 // IT over/underscan behavior shift +#define NVT_CEA861_VCDB_S_IT_NOT_SUPPORTED 0x00 +#define NVT_CEA861_VCDB_S_IT_ALWAYS_OVERSCAN 0x01 +#define NVT_CEA861_VCDB_S_IT_ALWAYS_UNDERSCAN 0x02 +#define NVT_CEA861_VCDB_S_IT_OVER_OR_UNDERSCAN 0x03 + +#define NVT_CEA861_VCDB_S_CE_MASK 0x03 // CE over/underscan behavior mask +#define NVT_CEA861_VCDB_S_CE_SHIFT 0 // CE over/underscan behavior shift +#define NVT_CEA861_VCDB_S_CE_NOT_SUPPORTED 0x00 +#define NVT_CEA861_VCDB_S_CE_ALWAYS_OVERSCAN 0x01 +#define NVT_CEA861_VCDB_S_CE_ALWAYS_UNDERSCAN 0x02 +#define NVT_CEA861_VCDB_S_CE_OVER_OR_UNDERSCAN 0x03 + +// +typedef struct tagNVT_2BYTES +{ + NvU8 byte1; + NvU8 byte2; +} NVT_2BYTES; + +#pragma pack(1) +#define NVT_CTA861_DID_MAX_DATA_BLOCK 4 +//*********************** +// DisplayID VII Video Timing Data Block (T7VDB) +//*********************** +#define NVT_CTA861_DID_TYPE7_DESCRIPTORS_MIN 1 +#define NVT_CTA861_DID_TYPE7_DESCRIPTORS_MAX 1 +#define NVT_CTA861_DID_TYPE7_DESCRIPTORS_LENGTH 20 + +typedef struct tagDID_TYPE7_DATA +{ + struct { + NvU8 revision : 3; + NvU8 dsc_pt : 1; + NvU8 t7_m : 3; + NvU8 F37 : 1; + } version; + + NvU8 total_descriptors; + NvU8 payload[29]; // t7_m=0 so only 20byte used +} DID_TYPE7_DATA; + +//*********************** +// DisplayID VIII Video Timing Data Block (T8VDB) +//*********************** +#define NVT_CTA861_DID_TYPE8_ONE_BYTE_DESCRIPTOR 1 +#define NVT_CTA861_DID_TYPE8_TWO_BYTE_DESCRIPTOR 2 +#define NVT_CTA861_DID_TYPE8_DESCRIPTORS_MIN 1 +#define NVT_CTA861_DID_TYPE8_ONE_BYTE_DESCRIPTORS_MAX 28 +#define NVT_CTA861_DID_TYPE8_TWO_BYTE_DESCRIPTORS_MAX 14 + +typedef struct tagDID_TYPE8_DATA +{ + struct { + NvU8 revision : 3; + NvU8 tcs : 1; + NvU8 F34 : 1; + NvU8 t8y420 : 1; + NvU8 code_type : 2; + } version; + + NvU8 total_descriptors; + NvU8 payload[NVT_CTA861_DID_TYPE8_ONE_BYTE_DESCRIPTORS_MAX]; // used one_byte descriptor length +} DID_TYPE8_DATA; + +//*********************** +// DisplayID X Video Timing Data Block (T10VDB) +//*********************** +#define NVT_CTA861_DID_TYPE10_DESCRIPTORS_MIN 1 +#define NVT_CTA861_DID_TYPE10_DESCRIPTORS_MAX 4 + +typedef struct tagDID_TYPE10_DATA +{ + struct { + NvU8 revision : 3; + NvU8 F33 : 1; + NvU8 t10_m : 3; + NvU8 F37 : 1; + } version; + + NvU8 total_descriptors; + NvU8 payload[28]; // given the 7bytes * 4 space +} DID_TYPE10_DATA; + +//*********************** +// Native Video Resolution Data Block (NVRDB) +//*********************** +typedef struct tagNATIVE_VIDEO_RESOLUTION_DATA +{ + NvU8 native_svr; + + struct { + NvU8 img_size : 1; + NvU8 f41 : 1; + NvU8 f42 : 1; + NvU8 f43 : 1; + NvU8 f44 : 1; + NvU8 f45 : 1; + NvU8 f46 : 1; + NvU8 sz_prec : 1; + } option; + + NvU8 image_size[4]; +} NATIVE_VIDEO_RESOLUTION_DATA; + +#pragma pack() + +// See CEA-861E, Table 42, 43 Extended Tags; indicates that the corresponding CEA extended data block value is valid, +// e.g. if colorimetry is set, then NVT_EDID_CEA861_INFO::colorimetry is valid +typedef struct tagNVT_VALID_EXTENDED_BLOCKS +{ + NvU32 VCDB : 1; + NvU32 VSVD : 1; + NvU32 colorimetry : 1; + NvU32 H14B_VSDB : 1; + NvU32 H20_HF_VSDB : 1; + NvU32 y420cmdb : 1; + NvU32 hdr_static_metadata : 1; + NvU32 dv_static_metadata : 1; + NvU32 hdr10Plus : 1; + NvU32 SCDB : 1; + NvU32 HF_EEODB : 1; + NvU32 nvda_vsdb : 1; + NvU32 msft_vsdb : 1; + NvU32 NVRDB : 1; +} NVT_VALID_EXTENDED_BLOCKS; + +//************************* +// extended data blocks +//************************* +#define NVT_CEA861_SD_EXT_HEADER_SIZE 1 + +#define NVT_CEA861_Y420VDB_SD_SIZE 1 + +// Max number of YUV420 VDBs for each VDB block is 30 per CTA-861-G spec sec. 7.5.10 +// Accomodate 2 blocks +#define NVT_CEA861_Y420VDB_MAX_DESCRIPTOR 60 + +#define NVT_CEA861_Y420CMDB_SD_SIZE 1 + +// Max number of YUV420 SVDs for each VDB block is 30 per CTA-861-G spec sec. 7.5.11 +// Accomodate 2 blocks +#define NVT_CEA861_Y420CMDB_MAX_DESCRIPTOR 60 +#define NVT_CEA861_VFPDB_SD_SIZE 1 +#define NVT_CEA861_VFPDB_MAX_DESCRIPTOR 16 // NOTE: Limiting to 16 to not allocate too much space. The maximum descriptor should be: + // ((NVT_CEA861_MAX_PAYLOAD / NVT_CEA861_SD_TOTAL_SIZE) * (NVT_CEA861_SD_PAYLOAD_SIZE / NVT_CEA861_VFPDB_SD_SIZE) + + // (NVT_CEA861_MAX_PAYLOAD % NVT_CEA861_SD_TOTAL_SIZE - NVT_CEA861_SD_HEADER_SIZE - NVT_CEA861_SD_EXT_HEADER_SIZE) / NVT_CEA861_VFPDB_SD_SIZE) + +typedef enum tagNVT_CTA861_ORIGIN +{ + FROM_CTA861_EXTENSION, + FROM_DISPLAYID_13_DATA_BLOCK, + FROM_DISPLAYID_20_DATA_BLOCK, +} NVT_CTA861_ORIGIN; + +// +typedef struct tagEDID_CEA861_INFO +{ + NvU8 revision; + NvU8 basic_caps; + + // short video descriptor + NvU8 total_svd; + NvU8 video[NVT_CEA861_VIDEO_MAX_DESCRIPTOR]; + + // short audio descriptor + NvU8 total_sad; + NVT_3BYTES audio[NVT_CEA861_AUDIO_MAX_DESCRIPTOR]; + + // speaker allocation data + NvU8 total_ssd; + NVT_3BYTES speaker[NVT_CEA861_SPEAKER_MAX_DESCRIPTOR]; + + // vendor specific data + NvU8 total_vsdb; + VSDB_DATA vsdb[NVT_CEA861_VSDB_MAX_BLOCKS]; + + // vendor specific video data + NvU8 total_vsvdb; + VSVDB_DATA vsvdb[NVT_CEA861_VSVDB_MAX_BLOCKS]; + + // video format data + NvU8 total_vfdb; + VFDB_DATA vfdb[NVT_CTA861_VF_MAX_BLOCKS]; + + // indicates which of the extended data blocks below contain valid data excluding extended blocks with total count + NVT_VALID_EXTENDED_BLOCKS valid; + // extended data blocks + NVT_2BYTES colorimetry; // Colorimetry Data Block + NvU8 video_capability; // Video Capability Block + + // HDR Static Metadata Data Block. See CEA-861.3 HDR Static Metadata Extensions, Section 4.2 + NVT_5BYTES hdr_static_metadata; + + // VFPDB extended block. See CEA861-H, Section 7.5.12 Video Format Preference Data Block + NvU8 total_svr; + NvU8 svr_vfpdb[NVT_CEA861_VFPDB_MAX_DESCRIPTOR]; // svr of preferred video formats + + // NVRDB extended block. see CTA861.6, Section 7.5.18 Native Video Resolution Data Block + NATIVE_VIDEO_RESOLUTION_DATA native_video_resolution_db; + + // Y420VDB extended block. See CEA861-F, Section 7.5.10 YCBCR 4:2:0 Video Data Block + NvU8 total_y420vdb; + NvU8 svd_y420vdb[NVT_CEA861_Y420VDB_MAX_DESCRIPTOR]; // svd of video formats that only support YCbCr 4:2:0 + + // Y420CMDB extended block. See CEA861-F, Section 7.5.11 YCBCR 4:2:0 Capability Map Data Block + NvU8 total_y420cmdb; + NvU8 map_y420cmdb[NVT_CEA861_Y420CMDB_MAX_DESCRIPTOR]; // bit map to svd in video[] that also supports YCbCr 4:2:0 + + // NVT_EDID_CEA861_INFO::vsvdb.SCDB = 1 in case hfscdb is exposed by sink. + NvU32 hfscdbSize; + NvU8 hfscdb[NVT_CTA861_EXT_SCDB_PAYLOAD_MAX_LENGTH]; + + // DID Type VII Video extended block, see 7.5.17.1 in CTA861-H + NvU8 total_did_type7db; + DID_TYPE7_DATA did_type7_data_block[NVT_CTA861_DID_MAX_DATA_BLOCK]; + + // DID Type VIII Video extended block, see 7.5.17.2 in CTA861-H + NvU8 total_did_type8db; + DID_TYPE8_DATA did_type8_data_block[NVT_CTA861_DID_MAX_DATA_BLOCK]; + + // DID Type X Video extended block, see 7.5.17.3 in CTA861-H + NvU8 total_did_type10db; + DID_TYPE10_DATA did_type10_data_block[NVT_CTA861_DID_MAX_DATA_BLOCK]; + + NvU8 hfeeodb; // HDMI Forum Edid Extension Override Data Block. +} NVT_EDID_CEA861_INFO; + + +//******************* +// Parsed DisplayID Information +//******************* +#define NVT_DISPLAYID_SECTION_MAX_SIZE 251 +#define NVT_DISPLAYID_SECTION_HEADER_LEN 5 +#define NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN 248 +#define NVT_DISPLAYID_DATABLOCK_HEADER_LEN 3 + +#define NVT_DISPLAYID_PRODUCT_STRING_MAX_LEN 233 +#define NVT_DISPLAYID_COLOR_MAX_WHITEPOINTS 5 +#define NVT_DISPLAYID_COLOR_MAX_PRIMARIES 3 +#define NVT_DISPLAYID_RANGE_LIMITS_MAX_COUNT 2 +#define NVT_DISPLAYID_DISPLAY_INTERFACE_FEATURES_MAX_ADDITIONAL_SUPPORTED_COLORSPACE_EOTF 7 + +typedef enum tagNVT_SINGLE_TILE_BEHAVIOR +{ + NVT_SINGLE_TILE_BEHAVIOR_OTHER = 0, + NVT_SINGLE_TILE_BEHAVIOR_SOURCE_DRIVEN, + NVT_SINGLE_TILE_BEHAVIOR_SCALE, + NVT_SINGLE_TILE_BEHAVIOR_CLONE +} NVT_SINGLE_TILE_BEHAVIOR; + +typedef enum tagNVT_MULTI_TILE_BEHAVIOR +{ + NVT_MULTI_TILE_BEHAVIOR_OTHER = 0, + NVT_MULTI_TILE_BEHAVIOR_SOURCE_DRIVEN +} NVT_MULTI_TILE_BEHAVIOR; + +typedef struct _tagNVT_TILEDDISPLAY_TOPOLOGY_ID +{ + NvU32 vendor_id; + NvU16 product_id; + NvU32 serial_number; +} NVT_TILEDDISPLAY_TOPOLOGY_ID; + +typedef struct _tagNVT_COLOR_POINT +{ + NvU16 x; + NvU16 y; +} NVT_COLOR_POINT; + +typedef struct _tagNVT_DISPLAYID_RANGE_LIMITS +{ + NvU32 revision; + NvU32 pclk_min; + NvU32 pclk_max; + NvU8 hfreq_min; + NvU8 hfreq_max; + NvU16 hblank_min; + NvU8 vfreq_min; + NvU16 vfreq_max; + NvU16 vblank_min; + NvU8 interlaced : 1; + NvU8 cvt : 1; + NvU8 cvt_reduced : 1; + NvU8 dfd : 1; + NvU8 seamless_dynamic_video_timing_change : 1; +} NVT_DISPLAYID_RANGE_LIMITS; + +#define NVT_DID_MAX_EXT_PAYLOAD 122 + +typedef struct _tagNVT_DISPLAYID_INFO +{ + // Top Level Header Information + NvU8 version; + NvU8 product_type; + + // Product Identification (0 or 1 Blocks Allowed) + NvU32 vendor_id; + NvU16 product_id; + NvU32 serial_number; + NvU8 week; + NvU8 year; + NvU8 product_string[NVT_DISPLAYID_PRODUCT_STRING_MAX_LEN + 1]; + + // Display Parameters + NvU16 horiz_size; + NvU16 vert_size; + NvU16 horiz_pixels; + NvU16 vert_pixels; + NvU8 support_audio : 1; + NvU8 separate_audio : 1; + NvU8 audio_override : 1; + NvU8 power_management : 1; + NvU8 fixed_timing : 1; + NvU8 fixed_pixel_format : 1; + NvU8 rsvd4 : 1; + NvU8 deinterlace : 1; + NvU16 gamma; + NvU8 aspect_ratio; + NvU8 depth_overall : 4; + NvU8 depth_native : 4; + + // Color Characteristics + NvU8 total_white_points; + NvU8 total_primaries : 3; + NvU8 temporal : 1; + NVT_COLOR_POINT white_points[NVT_DISPLAYID_COLOR_MAX_WHITEPOINTS]; + NVT_COLOR_POINT primaries[NVT_DISPLAYID_COLOR_MAX_PRIMARIES]; + + // Range Limits + NvU8 rl_num; + NVT_DISPLAYID_RANGE_LIMITS range_limits[NVT_DISPLAYID_RANGE_LIMITS_MAX_COUNT]; + + // Display Data + NvU8 tech_type; + NvU8 device_op_mode : 4; + NvU8 support_backlight : 1; + NvU8 support_intensity : 1; + NvU8 rsvd1 : 2; + NvU16 horiz_pixel_count; + NvU16 vert_pixel_count; + NvU8 orientation : 2; + NvU8 rotation : 2; + NvU8 zero_pixel : 2; + NvU8 scan_direction : 2; + NvU8 subpixel_info; + NvU8 horiz_pitch; + NvU8 vert_pitch; + NvU8 rsvd2 : 4; + NvU8 color_bit_depth : 4; + NvU8 white_to_black : 1; + NvU8 response_time : 7; + + // Power Settings + NvU8 t1_min : 4; + NvU8 t1_max : 4; + NvU8 t2_max; + NvU8 t3_max; + NvU8 t4_min; + NvU8 t5_min; + NvU8 t6_min; + + union + { + struct + { + NvU8 rsvd : 3; + NvU8 color_map : 1; + NvU8 support_2_8v : 1; + NvU8 support_12v : 1; + NvU8 support_5v : 1; + NvU8 support_3_3v : 1; + NvU8 rsvd2 : 5; + NvU8 DE_mode : 1; + NvU8 polarity : 1; + NvU8 data_strobe : 1; + } lvds; + + struct + { + NvU8 rsvd : 5; + NvU8 DE_mode : 1; + NvU8 polarity : 1; + NvU8 data_strobe : 1; + } proprietary; + } u2; + + // Stereo Interface + NvU8 stereo_code; + union + { + struct + { + NvU8 stereo_polarity; + } field_sequential; + + struct + { + NvU8 view_identity; + } side_by_side; + + struct + { + NvU8 interleave_pattern[8]; + } pixel_interleaved; + + struct + { + NvU8 rsvd : 5; + NvU8 mirroring : 2; + NvU8 polarity : 1; + } left_right_separate; + + struct + { + NvU8 num_views; + NvU8 code; + } multiview; + } u3; + + NvU32 tiled_display_revision; + struct + { + NvBool bSingleEnclosure; + NvBool bHasBezelInfo; + NVT_SINGLE_TILE_BEHAVIOR single_tile_behavior; + NVT_MULTI_TILE_BEHAVIOR multi_tile_behavior; + } tile_capability; + + struct + { + NvU32 row; + NvU32 col; + } tile_topology; + + struct + { + NvU32 x; + NvU32 y; + } tile_location; + + struct + { + NvU32 width; + NvU32 height; + } native_resolution; + + struct + { + NvU32 pixel_density; + NvU32 top; + NvU32 bottom; + NvU32 right; + NvU32 left; + } bezel_info; + + NVT_TILEDDISPLAY_TOPOLOGY_ID tile_topology_id; + NvU8 cea_data_block_present; + + NvU8 supported_displayId2_0; + union + { + // Display Interface + struct + { + NvU8 interface_type : 4; + union + { + NvU8 analog_subtype : 4; + NvU8 digital_num_links : 4; + } u1; + + NvU8 interface_version; + + struct + { + NvU8 rsvd : 2; + NvU8 support_16b : 1; + NvU8 support_14b : 1; + NvU8 support_12b : 1; + NvU8 support_10b : 1; + NvU8 support_8b : 1; + NvU8 support_6b : 1; + } rgb_depth; + + struct + { + NvU8 rsvd : 2; + NvU8 support_16b : 1; + NvU8 support_14b : 1; + NvU8 support_12b : 1; + NvU8 support_10b : 1; + NvU8 support_8b : 1; + NvU8 support_6b : 1; + } ycbcr444_depth; + + struct + { + NvU8 rsvd : 3; + NvU8 support_16b : 1; + NvU8 support_14b : 1; + NvU8 support_12b : 1; + NvU8 support_10b : 1; + NvU8 support_8b : 1; + } ycbcr422_depth; + + NvU8 content_protection; + NvU8 content_protection_version; + NvU8 spread_spectrum : 2; + NvU8 rsvd3 : 2; + NvU8 spread_percent : 4; + + } display_interface; + + //display interface features for DID2.0 + struct + { + struct + { + NvU8 rsvd : 2; + NvU8 support_16b : 1; + NvU8 support_14b : 1; + NvU8 support_12b : 1; + NvU8 support_10b : 1; + NvU8 support_8b : 1; + NvU8 support_6b : 1; + } rgb_depth; + + struct + { + NvU8 rsvd : 2; + NvU8 support_16b : 1; + NvU8 support_14b : 1; + NvU8 support_12b : 1; + NvU8 support_10b : 1; + NvU8 support_8b : 1; + NvU8 support_6b : 1; + } ycbcr444_depth; + + struct + { + NvU8 rsvd : 3; + NvU8 support_16b : 1; + NvU8 support_14b : 1; + NvU8 support_12b : 1; + NvU8 support_10b : 1; + NvU8 support_8b : 1; + } ycbcr422_depth; + + struct + { + NvU8 rsvd : 3; + NvU8 support_16b : 1; + NvU8 support_14b : 1; + NvU8 support_12b : 1; + NvU8 support_10b : 1; + NvU8 support_8b : 1; + } ycbcr420_depth; + + // based on the DID2.0 spec. minimum pixel rate at which the Sink device shall support YCbCr 4:2:0 encoding + NvU8 minimum_pixel_rate_ycbcr420; + + struct + { + NvU8 support_32khz : 1; + NvU8 support_44_1khz : 1; + NvU8 support_48khz : 1; + NvU8 rsvd : 5; + } audio_capability; + + struct + { + NvU8 rsvd : 1; + NvU8 support_colorspace_bt2020_eotf_smpte_st2084: 1; + NvU8 support_colorspace_bt2020_eotf_bt2020 : 1; + NvU8 support_colorspace_dci_p3_eotf_dci_p3 : 1; + NvU8 support_colorspace_adobe_rgb_eotf_adobe_rgb: 1; + NvU8 support_colorspace_bt709_eotf_bt1886 : 1; + NvU8 support_colorspace_bt601_eotf_bt601 : 1; + NvU8 support_colorspace_srgb_eotf_srgb : 1; + } colorspace_eotf_combination_1; + + struct + { + NvU8 rsvd : 8; + } colorspace_eotf_combination_2; + + struct + { + NvU8 rsvd : 5; + NvU8 total : 3; + } total_additional_colorspace_eotf; + + struct + { + NvU8 support_colorspace : 4; + NvU8 support_eotf : 4; + } additional_colorspace_eotf[NVT_DISPLAYID_DISPLAY_INTERFACE_FEATURES_MAX_ADDITIONAL_SUPPORTED_COLORSPACE_EOTF]; + } display_interface_features; + } u4; + +} NVT_DISPLAYID_INFO; + +//*********************************** +// EDID 18-byte display descriptors +//*********************************** +// +// +//*** (Tag = 0xFF) ***/ +// Display Product Serial Number +#define NVT_EDID_LDD_PAYLOAD_SIZE 13 +typedef struct tagNVT_EDID_DD_SERIAL_NUMBER +{ + NvU8 str[NVT_EDID_LDD_PAYLOAD_SIZE]; + NvU8 padding[16 - NVT_EDID_LDD_PAYLOAD_SIZE]; +} NVT_EDID_DD_SERIAL_NUMBER; +// +// +// +//*** (Tag = 0xFE) ***/ +// Alphanumeric Data String (ASCII) +typedef struct tagNVT_EDID_DD_DATA_STRING +{ + NvU8 str[NVT_EDID_LDD_PAYLOAD_SIZE]; + NvU8 padding[16 - NVT_EDID_LDD_PAYLOAD_SIZE]; +} NVT_EDID_DD_DATA_STRING; +// +// +// +//*** (Tag = 0xFD) ***/ +// Display Range Limit +// +typedef struct tagNVT_EDID_DD_RANGE_GTF2 +{ + NvU8 C; + NvU8 K; + NvU8 J; + NvU16 M; +} NVT_EDID_DD_RANGE_GTF2; + +typedef struct tagNVT_EDID_DD_RANGE_CVT +{ + NvU16 max_active_pixels_per_line; + + NvU8 pixel_clock_adjustment : 2; // this is in 0.25Hz, subtract from max_pixel_clock + // the whole number part (if existing) gets subtracted + // from max_pclk_MHz right away + NvU8 aspect_supported : 5; + + NvU8 aspect_preferred : 3; + NvU8 blanking_support : 2; + NvU8 reserved1 : 3; + + NvU8 scaling_support : 4; + NvU8 reserved2 : 4; + + NvU8 preferred_refresh_rate; +} NVT_EDID_DD_RANGE_CVT; + +typedef struct tagNVT_EDID_DD_RANGE_LIMIT +{ + NvU16 min_v_rate; + NvU16 max_v_rate; + NvU16 min_h_rate; + NvU16 max_h_rate; + NvU16 max_pclk_MHz; + NvU8 timing_support; // indicates 2nd GTF / CVT support + union + { + // if timing_support = 0x02 + NVT_EDID_DD_RANGE_GTF2 gtf2; + + // if timing_support = 0x04 + NVT_EDID_DD_RANGE_CVT cvt; + }u; +} NVT_EDID_DD_RANGE_LIMIT; + +typedef struct tagNVT_EDID_RANGE_LIMIT +{ + NvU32 min_v_rate_hzx1k; + NvU32 max_v_rate_hzx1k; + NvU32 min_h_rate_hz; + NvU32 max_h_rate_hz; + NvU32 max_pclk_10khz; +} NVT_EDID_RANGE_LIMIT; + +// timing support +#define NVT_EDID_RANGE_SUPPORT_GTF2 0x02 +#define NVT_EDID_RANGE_SUPPORT_CVT 0x04 + +// supported aspect ratios +#define NVT_EDID_CVT_ASPECT_SUPPORT_MAX 5 + +#define NVT_EDID_CVT_ASPECT_SUPPORT_4X3 0x10 +#define NVT_EDID_CVT_ASPECT_SUPPORT_16X9 0x08 +#define NVT_EDID_CVT_ASPECT_SUPPORT_16X10 0x04 +#define NVT_EDID_CVT_ASPECT_SUPPORT_5X4 0x02 +#define NVT_EDID_CVT_ASPECT_SUPPORT_15X9 0x01 + +// preferred aspect ratios +#define NVT_EDID_CVT_ASPECT_PREFER_4X3 0x00 +#define NVT_EDID_CVT_ASPECT_PREFER_16X9 0x01 +#define NVT_EDID_CVT_ASPECT_PREFER_16X10 0x02 +#define NVT_EDID_CVT_ASPECT_PREFER_5X4 0x03 +#define NVT_EDID_CVT_ASPECT_PREFER_15X9 0x04 + +// cvt blanking support +#define NVT_EDID_CVT_BLANKING_STANDARD 0x01 +#define NVT_EDID_CVT_BLANKING_REDUCED 0x02 + +// scaling support +#define NVT_EDID_CVT_SCALING_HOR_SHRINK 0x08 +#define NVT_EDID_CVT_SCALING_HOR_STRETCH 0x04 +#define NVT_EDID_CVT_SCALING_VER_SHRINK 0x02 +#define NVT_EDID_CVT_SCALING_VER_STRETCH 0x01 + +// +// +// +//*** (Tag = 0xFC) ***/ +// Display Product Name +typedef struct tagNVT_EDID_DD_PRODUCT_NAME +{ + NvU8 str[NVT_EDID_LDD_PAYLOAD_SIZE]; + NvU8 padding[16 - NVT_EDID_LDD_PAYLOAD_SIZE]; +} NVT_EDID_DD_PRODUCT_NAME; +// +// +// +//*** (Tag = 0xFB) ***/ +// the 18-byte display descriptors +// Display Color Point Data +typedef struct tagNVT_EDID_DD_COLOR_POINT +{ + NvU8 wp1_index; + NvU16 wp1_x; + NvU16 wp1_y; + NvU16 wp1_gamma; + NvU8 wp2_index; + NvU16 wp2_x; + NvU16 wp2_y; + NvU16 wp2_gamma; +} NVT_EDID_DD_COLOR_POINT; +// +// +// +//*** (Tag = 0xFA) ***/ +// Standard Timing Identifications +#define NVT_EDID_DD_STI_NUM 6 + +typedef struct tagNVT_EDID_DD_STD_TIMING +{ + NvU16 descriptor[NVT_EDID_DD_STI_NUM]; +} NVT_EDID_DD_STD_TIMING; +// +// +// +//*** (Tag = 0xF9) ***/ +// Display Color Management Data (DCM) +typedef struct tagNVT_EDID_DD_COLOR_MANAGEMENT_DATA +{ + NvU16 red_a3; + NvU16 red_a2; + NvU16 green_a3; + NvU16 green_a2; + NvU16 blue_a3; + NvU16 blue_a2; +} NVT_EDID_DD_COLOR_MANAGEMENT_DATA; +// +// +// +//*** (Tag = 0xF8) ***/ +// CVT 3 Byte Timing Code +#define NVT_EDID_DD_MAX_CVT3_PER_DESCRITPOR 4 + +typedef struct tagEDID_DD_CVT_3BYTE_BLOCK +{ + NvU16 addressable_lines : 14; + NvU8 aspect_ratio : 2; + NvU8 reserved0 : 1; + NvU8 preferred_vert_rates : 2; + NvU8 supported_vert_rates : 5; + +} NVT_EDID_DD_CVT_3BYTE_BLOCK; + +typedef struct tagNVT_EDID_DD_CVT_3BYTE +{ + NVT_EDID_DD_CVT_3BYTE_BLOCK block[NVT_EDID_DD_MAX_CVT3_PER_DESCRITPOR]; +} NVT_EDID_DD_CVT_3BYTE; + +#define NVT_EDID_CVT3_ASPECT_4X3 0x00 +#define NVT_EDID_CVT3_ASPECT_16X9 0x01 +#define NVT_EDID_CVT3_ASPECT_16X10 0x02 +#define NVT_EDID_CVT3_ASPECT_15X9 0x03 + +#define NVT_EDID_CVT3_PREFFERED_RATE_50HZ 0x00 +#define NVT_EDID_CVT3_PREFFERED_RATE_60HZ 0x01 +#define NVT_EDID_CVT3_PREFFERED_RATE_75HZ 0x02 +#define NVT_EDID_CVT3_PREFFERED_RATE_85HZ 0x03 + +#define NVT_EDID_CVT3_SUPPORTED_RATE_50HZ 0x10 +#define NVT_EDID_CVT3_SUPPORTED_RATE_60HZ 0x08 +#define NVT_EDID_CVT3_SUPPORTED_RATE_75HZ 0x04 +#define NVT_EDID_CVT3_SUPPORTED_RATE_85HZ 0x02 +#define NVT_EDID_CVT3_SUPPORTED_RATE_60HZ_REDUCED_BLANKING 0x01 +// +// +// +//*** (Tag = 0xF7) ***/ +// Established Timings III +// +#define NVT_EDID_DD_EST_TIMING3_NUM 6 + +typedef struct tagNVT_EDID_DD_EST_TIMING3 +{ + NvU8 revision; + NvU8 data[NVT_EDID_DD_EST_TIMING3_NUM]; +} NVT_EDID_DD_EST_TIMING3; +// +// +// +//*** (Tag = 0x10) ***/ +// Dummy Descriptor Definition +typedef struct tagNVT_EDID_DD_DUMMY_DESCRIPTOR +{ + NvU8 data[13]; +} NVT_EDID_DD_DUMMY_DESCRIPTOR; +// +// +// +//*** (Tag = 0x00 to 0x0F) ***/ +// Manufacturer Special Data +typedef struct tagNVT_EDID_DD_MANUF_DATA +{ + NvU8 data[13]; +} NVT_EDID_DD_MANUF_DATA; +// +// +// +// the translated generic 18-byte long descriptor +typedef struct tagNVT_EDID_18BYTE_DESCRIPTOR +{ + NvU8 tag; + union + { + NVT_EDID_DD_SERIAL_NUMBER serial_number; + NVT_EDID_DD_DATA_STRING data_str; + NVT_EDID_DD_RANGE_LIMIT range_limit; + NVT_EDID_DD_PRODUCT_NAME product_name; + NVT_EDID_DD_COLOR_POINT color_point; + NVT_EDID_DD_STD_TIMING std_timing; + NVT_EDID_DD_COLOR_MANAGEMENT_DATA color_man; + NVT_EDID_DD_CVT_3BYTE cvt; + NVT_EDID_DD_EST_TIMING3 est3; + NVT_EDID_DD_DUMMY_DESCRIPTOR dummy; + NVT_EDID_DD_MANUF_DATA manuf_data; + } u; +} NVT_EDID_18BYTE_DESCRIPTOR; +// +// +// Display Descriptor Tags +#define NVT_EDID_DISPLAY_DESCRIPTOR_DPSN 0xFF // display product serial number +#define NVT_EDID_DISPLAY_DESCRIPTOR_ADS 0xFE // alphanumeric data string (ASCII) +#define NVT_EDID_DISPLAY_DESCRIPTOR_DRL 0xFD // display range limit +#define NVT_EDID_DISPLAY_DESCRITPOR_DPN 0xFC // display product name +#define NVT_EDID_DISPLAY_DESCRIPTOR_CPD 0xFB // color point data +#define NVT_EDID_DISPLAY_DESCRIPTOR_STI 0xFA // standard timing identification +#define NVT_EDID_DISPLAY_DESCRIPTOR_DCM 0xF9 // display color management +#define NVT_EDID_DISPLAY_DESCRIPTOR_CVT 0xF8 // CVT 3-byte timing code +#define NVT_EDID_DISPLAY_DESCRIPTOR_ESTIII 0xF7 // establishied timing III +#define NVT_EDID_DISPLAY_DESCRIPTOR_DUMMY 0x10 // dummy descriptor + +//******************* +// Raw EDID offsets and info +//******************* +// +// Byte 14, video input definition +// +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_MASK 0x0F // dvi/hdmi/dp +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_MASK 0x70 // bpc support +#define NVT_EDID_VIDEO_INPUT_DEFINITION_DIGITAL_MASK 0x80 // digital/analog +// +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_SHIFT 0 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_SHIFT 4 +#define NVT_EDID_VIDEO_INPUT_DEFINITION_DIGITAL_SHIFT 7 +// +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_UNDEFINED 0 +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_DVI_SUPPORTED 1 +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_A_SUPPORTED 2 +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_B_SUPPORTED 3 +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_MDDI_SUPPORTED 4 +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_DISPLAYPORT_SUPPORTED 5 +//#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_RESERVED 6 - 15 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_UNDEFINED 0 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_6BPC 1 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_8BPC 2 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_10BPC 3 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_12BPC 4 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_14BPC 5 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_16BPC 6 +//#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_RESERVED 7 +#define NVT_EDID_VIDEO_INPUT_DEFINITION_DIGITAL 0x01 +// +// Byte 18, feature support +// +#define NVT_EDID_OTHER_FEATURES_MASK 0x07 // sRGB space, preferred timing, continuous freq. +#define NVT_EDID_DISPLAY_COLOR_TYPE_MASK 0x18 // for analog, see byte 14, bit 7 +#define NVT_EDID_DISPLAY_COLOR_ENCODING_MASK 0x18 // for digital +#define NVT_EDID_DISPLAY_POWER_MANAGEMENT_MASK 0xE0 // standby/suspend/active off +// +#define NVT_EDID_OTHER_FEATURES_SHIFT 0 +#define NVT_EDID_DISPLAY_COLOR_TYPE_SHIFT 3 +#define NVT_EDID_DISPLAY_COLOR_ENCODING_SHIFT 3 +#define NVT_EDID_DISPLAY_POWER_MANAGEMENT_SHIFT 5 +// +#define NVT_EDID_OTHER_FEATURES_USES_CONTINUOUS_FREQ (1 << 0) +#define NVT_EDID_OTHER_FEATURES_PTM_INCLUDE_NATIVE (1 << 1) +#define NVT_EDID_OTHER_FEATURES_SRGB_DEFAULT_COLORSPACE (1 << 2) +// +#define NVT_EDID_DISPLAY_COLOR_TYPE_MONOCHROME 0 +#define NVT_EDID_DISPLAY_COLOR_TYPE_RGB 1 +#define NVT_EDID_DISPLAY_COLOR_TYPE_NON_RGB 2 +#define NVT_EDID_DISPLAY_COLOR_TYPE_UNDEFINED 3 +// +#define NVT_EDID_DISPLAY_COLOR_ENCODING_YCBCR_444 (1 << 0) // RGB is always supported +#define NVT_EDID_DISPLAY_COLOR_ENCODING_YCBCR_422 (1 << 1) // RGB is always supported +// +#define NVT_EDID_DISPLAY_POWER_MANAGEMENT_SUPPORTS_ACTIVE_OFF (1 << 0) +#define NVT_EDID_DISPLAY_POWER_MANAGEMENT_SUPPORTS_SUSPENDED_MODE (1 << 1) +#define NVT_EDID_DISPLAY_POWER_MANAGEMENT_SUPPORTS_STANDBY_MODE (1 << 2) +// +// edid offsets +// +#define NVT_EDID_VIDEO_INPUT_DEFINITION 0x14 +#define NVT_EDID_FEATURE_SUPPORT 0x18 + + +//******************* +// Parsed EDID info +//******************* +// +#define NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR 4 +#define NVT_EDID_MAX_STANDARD_TIMINGS 8 +#define NVT_EDID_MAX_TOTAL_TIMING NVT_MAX_TOTAL_TIMING +#define NVT_EDID_VER_1_1 0x101 +#define NVT_EDID_VER_1_2 0x102 +#define NVT_EDID_VER_1_3 0x103 +#define NVT_EDID_VER_1_4 0x104 +// +// byte 0x14, Digital +// bits 0-3 +#define NVT_EDID_VIDEOSIGNAL_INTERFACE_NOT_DEFINED 0x0 +#define NVT_EDID_VIDEOSIGNAL_INTERFACE_DVI 0x1 +#define NVT_EDID_VIDEOSIGNAL_INTERFACE_HDMI_A 0x2 +#define NVT_EDID_VIDEOSIGNAL_INTERFACE_HDMI_B 0x3 +#define NVT_EDID_VIDEOSIGNAL_INTERFACE_MDDI 0x4 +#define NVT_EDID_VIDEOSIGNAL_INTERFACE_DP 0x5 +// bits 4-6; these are translated values. See NvTiming_ParseEDIDInfo() +#define NVT_EDID_VIDEOSIGNAL_BPC_NOT_DEFINED 0 +#define NVT_EDID_VIDEOSIGNAL_BPC_6 6 +#define NVT_EDID_VIDEOSIGNAL_BPC_8 8 +#define NVT_EDID_VIDEOSIGNAL_BPC_10 10 +#define NVT_EDID_VIDEOSIGNAL_BPC_12 12 +#define NVT_EDID_VIDEOSIGNAL_BPC_14 14 +#define NVT_EDID_VIDEOSIGNAL_BPC_16 16 +// +// byte 0x18, edid 1.3 +// bits 3-4 +#define NVT_EDID_FEATURESUPPORT_COLOR_MONOCHROME 0x0 /* Monochrome/grayscale display */ +#define NVT_EDID_FEATURESUPPORT_COLOR_RGB 0x1 /* R/G/B color display */ +#define NVT_EDID_FEATURESUPPORT_COLOR_MULTICOLOR 0x2 /* non R/G/B multicolor displays e.g. R/G/Y */ +#define NVT_EDID_FEATURESUPPORT_COLOR_UNDEFINED 0x3 /* Undefined */ +// +// byte 0x18, edid 1.4 +// bits 3-4 +#define NVT_EDID_FEATURESUPPORT_COLOR_ENCODING_RBG 0x0 /* RGB always supported */ +#define NVT_EDID_FEATURESUPPORT_COLOR_ENCODING_YCRCB444 0x1 /* RGB + 444 */ +#define NVT_EDID_FEATURESUPPORT_COLOR_ENCODING_YCRCB422 0x2 /* RGB + 422 */ +#define NVT_EDID_FEATURESUPPORT_COLOR_ENCODING_YCRCB 0x3 /* RGB + 444 + 422 supported */ +// +// +// structure used internally to map support for HDMI 3D modes. +#define MAX_EDID_ADDRESSABLE_3D_VICS 16 +#define MAX_3D_VICS_RESERVED_FOR_MANDATORY 8 +#define MAX_3D_VICS_SUPPORTED (MAX_EDID_ADDRESSABLE_3D_VICS + MAX_3D_VICS_RESERVED_FOR_MANDATORY) + +//Constants given by Dolby to be appended for chromaticity information +#define NVT_DOLBY_CHROMATICITY_MSB_BX 0x20 +#define NVT_DOLBY_CHROMATICITY_MSB_BY 0x08 +#define NVT_DOLBY_CHROMATICITY_MSB_GX 0x00 +#define NVT_DOLBY_CHROMATICITY_MSB_GY 0x80 +#define NVT_DOLBY_CHROMATICITY_MSB_RX 0xA0 +#define NVT_DOLBY_CHROMATICITY_MSB_RY 0x40 + +typedef struct _HDMI3DDetails +{ + NvU8 Vic; + NvU16 StereoStructureMask; + NvU8 SideBySideHalfDetail; +} HDMI3DDETAILS; + +typedef struct _SupportMap +{ + HDMI3DDETAILS map[MAX_3D_VICS_SUPPORTED]; + NvU32 total; +} HDMI3DSUPPORTMAP; + +typedef struct tagNVT_EXT_TIMING +{ + NVT_TIMING timing; + NVT_HDMIEXT HDMI3D; +} NVT_EXT_TIMING; + +typedef struct _NVDA_VSDB_PARSED_INFO +{ + NvBool valid; + NvU8 vsdbVersion; + + // these fields are specified in version 1 of the NVDA VSDB + union + { + struct + { + NvBool supportsVrr; + NvU8 minRefreshRate; + } v1; + } vrrData; + +} NVDA_VSDB_PARSED_INFO; + +typedef enum _MSFT_VSDB_DESKTOP_USAGE +{ + MSFT_VSDB_NOT_USABLE_BY_DESKTOP = 0, + MSFT_VSDB_USABLE_BY_DESKTOP = 1 +} MSFT_VSDB_DESKTOP_USAGE; + +typedef enum _MSFT_VSDB_THIRD_PARTY_USAGE +{ + MSFT_VSDB_NOT_USABLE_BY_THIRD_PARTY = 0, + MSFT_VSDB_USABLE_BY_THIRD_PARTY = 1 +} MSFT_VSDB_THIRD_PARTY_USAGE; + +typedef enum _MSFT_VSDB_PRIMARY_USE_CASE +{ + MSFT_VSDB_FOR_UNDEFINED = 0, + MSFT_VSDB_FOR_TEST_EQUIPMENT = 0x1, + MSFT_VSDB_FOR_GENERIC_DISPLAY = 0x2, + MSFT_VSDB_FOR_TELEVISION_DISPLAY = 0x3, + MSFT_VSDB_FOR_DESKTOP_PRODUCTIVITY_DISPLAY = 0x4, + MSFT_VSDB_FOR_DESKTOP_GAMING_DISPLAY = 0x5, + MSFT_VSDB_FOR_PRESENTATION_DISPLAY = 0x6, + MSFT_VSDB_FOR_VIRTUAL_REALITY_HEADSETS = 0x7, + MSFT_VSDB_FOR_AUGMENTED_REALITY = 0x8, + MSFT_VSDB_FOR_VIDEO_WALL_DISPLAY = 0x10, + MSFT_VSDB_FOR_MEDICAL_IMAGING_DISPLAY = 0x11, + MSFT_VSDB_FOR_DEDICATED_GAMING_DISPLAY = 0x12, + MSFT_VSDB_FOR_DEDICATED_VIDEO_MONITOR_DISPLAY = 0x13, + MSFT_VSDB_FOR_ACCESSORY_DISPLAY = 0X14 +} MSFT_VSDB_PRIMARY_USE_CASE; + +#define MSFT_VSDB_CONTAINER_ID_SIZE (16) +#define MSFT_VSDB_MAX_VERSION_SUPPORT (3) + +typedef struct _MSFT_VSDB_PARSED_INFO +{ + NvBool valid; + NvU8 version; + + MSFT_VSDB_DESKTOP_USAGE desktopUsage; + MSFT_VSDB_THIRD_PARTY_USAGE thirdPartyUsage; + MSFT_VSDB_PRIMARY_USE_CASE primaryUseCase; + NvU8 containerId[MSFT_VSDB_CONTAINER_ID_SIZE]; + +} MSFT_VSDB_PARSED_INFO; + +typedef struct tagNVT_HDMI_LLC_INFO +{ + // A.B.C.D address + NvU8 addrA; + NvU8 addrB; + NvU8 addrC; + NvU8 addrD; + + NvU8 supports_AI : 1; + NvU8 dc_48_bit : 1; + NvU8 dc_36_bit : 1; + NvU8 dc_30_bit : 1; + NvU8 dc_y444 : 1; + NvU8 dual_dvi : 1; + NvU8 max_tmds_clock; + NvU8 effective_tmds_clock; + NvU8 latency_field_present : 1; + NvU8 i_latency_field_present : 1; + NvU8 hdmi_video_present : 1; + NvU8 cnc3 : 1; + NvU8 cnc2 : 1; + NvU8 cnc1 : 1; + NvU8 cnc0 : 1; + NvU8 video_latency; + NvU8 audio_latency; + NvU8 interlaced_video_latency; + NvU8 interlaced_audio_latency; + NvU8 threeD_present : 1; + NvU8 threeD_multi_present : 2; + NvU8 image_size : 2; + NvU8 hdmi_vic_len : 3; + NvU8 hdmi_3d_len : 5; + // for now ignoring the other extensions + // .... +} NVT_HDMI_LLC_INFO; + +typedef struct tagNVT_HDMI_FORUM_INFO +{ + NvU8 max_TMDS_char_rate; + + NvU8 threeD_Osd_Disparity : 1; + NvU8 dual_view : 1; + NvU8 independent_View : 1; + NvU8 lte_340Mcsc_scramble : 1; + NvU8 ccbpci : 1; + NvU8 cable_status : 1; + NvU8 rr_capable : 1; + NvU8 scdc_present : 1; + + NvU8 dc_30bit_420 : 1; + NvU8 dc_36bit_420 : 1; + NvU8 dc_48bit_420 : 1; + NvU8 uhd_vic : 1; + NvU8 max_FRL_Rate : 4; + + NvU8 fapa_start_location : 1; + NvU8 allm : 1; + NvU8 fva : 1; + NvU8 cnmvrr : 1; + NvU8 cinemaVrr : 1; + NvU8 m_delta : 1; + NvU8 qms : 1; + NvU8 fapa_end_extended : 1; + + NvU16 vrr_min : 6; + NvU16 vrr_max : 10; + + NvU8 qms_tfr_min : 1; + NvU8 qms_tfr_max : 1; + NvU16 dsc_MaxSlices : 6; + NvU16 dsc_MaxPclkPerSliceMHz : 10; + + NvU8 dsc_10bpc : 1; + NvU8 dsc_12bpc : 1; + NvU8 dsc_16bpc : 1; + NvU8 dsc_All_bpp : 1; + NvU8 dsc_Max_FRL_Rate : 4; + + NvU8 dsc_Native_420 : 1; + NvU8 dsc_1p2 : 1; + NvU8 rsvd_2 : 6; + + NvU8 dsc_totalChunkKBytes : 7; // = 1 + EDID reported DSC_TotalChunkKBytes + NvU8 rsvd_3 : 1; + +} NVT_HDMI_FORUM_INFO; + +typedef struct tagNVT_HDR_STATIC_METADATA +{ + struct + { + NvU8 trad_gamma_sdr_eotf : 1; + NvU8 trad_gamma_hdr_eotf : 1; + NvU8 smpte_st_2084_eotf : 1; + NvU8 future_eotf : 1; + } supported_eotf; + + NvU8 static_metadata_type; // set to 1 if the sink support for static meta data type 1 + NvU8 max_cll; // maximum luminance level value + NvU8 max_fall; // maximum fram-average luminance + NvU8 min_cll; // minimum luminance level value + +}NVT_HDR_STATIC_METADATA; + +typedef struct tagNVT_DV_STATIC_METADATA +{ + NvU32 ieee_id : 24; + NvU32 VSVDB_version : 3; + NvU32 dm_version : 8; + NvU32 supports_2160p60hz : 1; + NvU32 supports_YUV422_12bit : 1; + NvU32 supports_global_dimming : 1; + NvU32 colorimetry : 1; + NvU32 target_min_luminance : 12; + NvU32 target_max_luminance : 12; + NvU32 cc_red_x : 12; + NvU32 cc_red_y : 12; + NvU32 cc_green_x : 12; + NvU32 cc_green_y : 12; + NvU32 cc_blue_x : 12; + NvU32 cc_blue_y : 12; + NvU32 cc_white_x : 12; + NvU32 cc_white_y : 12; + NvU32 supports_backlight_control : 2; + NvU32 backlt_min_luma : 2; + NvU32 interface_supported_by_sink : 2; + NvU32 supports_10b_12b_444 : 2; + NvU32 parity : 1; +}NVT_DV_STATIC_METADATA; + +//*********************************** +// parsed DisplayID 2.0 definitions +//*********************************** +#define NVT_DISPLAYID_2_0_PRODUCT_STRING_MAX_LEN 236 + +// the basic info encoded in byte[3] +#define NVT_DISPLAY_2_0_CAP_BASIC_AUDIO 0x40 // DTV monitor supports basic audio +#define NVT_DISPLAY_2_0_CAP_YCbCr_444 0x20 // DTV monitor supports YCbCr4:4:4 +#define NVT_DISPLAY_2_0_CAP_YCbCr_422 0x10 // DTV monitor supports YCbCr4:2:2 + +// vendor specific +#define NVT_VESA_VENDOR_SPECIFIC_IEEE_ID 0x3A0292 +#define NVT_VESA_VENDOR_SPECIFIC_LENGTH 7 + +#define NVT_VESA_ORG_VSDB_DATA_TYPE_MASK 0x07 +#define NVT_VESA_ORG_VSDB_COLOR_SPACE_AND_EOTF_MASK 0x80 +#define NVT_VESA_ORG_VSDB_COLOR_SPACE_AND_EOTF_SHIFT 7 +#define NVT_VESA_ORG_VSDB_PIXELS_OVERLAPPING_MASK 0x0F +#define NVT_VESA_ORG_VSDB_MULTI_SST_MODE_MASK 0x60 +#define NVT_VESA_ORG_VSDB_MULTI_SST_MODE_SHIFT 5 +#define NVT_VESA_ORG_VSDB_PASS_THROUGH_INTEGER_MASK 0x3F +#define NVT_VESA_ORG_VSDB_PASS_THROUGH_FRACTIOINAL_MASK 0x0F + +// adaptive-sync +#define NVT_ADAPTIVE_SYNC_DESCRIPTOR_MAX_COUNT 0x04 + +typedef enum _tagNVT_DISPLAYID_PRODUCT_PRIMARY_USE_CASE +{ + PRODUCT_PRIMARY_USE_TEST_EQUIPMENT = 1, + PRODUCT_PRIMARY_USE_GENERIC_DISPLAY = 2, + PRODUCT_PRIMARY_USE_TELEVISION = 3, + PRODUCT_PRIMARY_USE_DESKTOP_PRODUCTIVITY = 4, + PRODUCT_PRIMARY_USE_DESKTOP_GAMING = 5, + PRODUCT_PRIMARY_USE_PRESENTATION = 6, + PRODUCT_PRIMARY_USE_HEAD_MOUNT_VIRTUAL_REALITY = 7, + PRODUCT_PRIMARY_USE_HEAD_MOUNT_AUGMENTED_REALITY = 8, +} NVT_DISPLAYID_PRODUCT_PRIMARY_USE_CASE; + +typedef enum _tagNVT_DISPLAYID_SCAN_ORIENTATION +{ + SCAN_ORIENTATION_LRTB = 0, + SCAN_ORIENTATION_RLTB = 1, + SCAN_ORIENTATION_TBRL = 2, + SCAN_ORIENTATION_BTRL = 3, + SCAN_ORIENTATION_RLBT = 4, + SCAN_ORIENTATION_LRBT = 5, + SCAN_ORIENTATION_BTLR = 6, + SCAN_ORIENTATION_TBLR = 7, +} NVT_DISPLAYID_SCAN_ORIENTATION; + +typedef enum _tagNVT_DISPLAYID_INTERFACE_EOTF +{ + INTERFACE_EOTF_NOT_DEFINED = 0x0, + INTERFACE_EOTF_SRGB = 0x1, + INTERFACE_EOTF_BT601 = 0x2, + INTERFACE_EOTF_BT1886 = 0x3, + INTERFACE_EOTF_ADOBE_RGB = 0x4, + INTERFACE_EOTF_DCI_P3 = 0x5, + INTERFACE_EOTF_BT2020 = 0x6, + INTERFACE_EOTF_NATIVE_GAMMA = 0x7, + INTERFACE_EOTF_SMPTE_ST2084 = 0x8, + INTERFACE_EOTF_HYBRID_LOG = 0x9, + INTERFACE_EOTF_CUSTOM = 0x10, +} NVT_DISPLAYID_INTERFACE_EOTF; + +typedef enum _tagNVT_DISPLAYID_INTERFACE_COLOR_SPACE +{ + INTERFACE_COLOR_SPACE_NOT_DEFINED = 0x0, + INTERFACE_COLOR_SPACE_SRGB = 0x1, + INTERFACE_COLOR_SPACE_BT601 = 0x2, + INTERFACE_COLOR_SPACE_BT709 = 0x3, + INTERFACE_COLOR_SPACE_ADOBE_RGB = 0x4, + INTERFACE_COLOR_SPACE_DCI_P3 = 0x5, + INTERFACE_COLOR_SPACE_BT2020 = 0x6, + INTERFACE_COLOR_SPACE_CUSTOM = 0x7, +} NVT_DISPLAYID_INTERFACE_COLOR_SPACE; + +typedef enum _tagNVT_DISPLAYID_DEVICE_TECHNOLOGY +{ + DEVICE_TECHNOLOGY_NOT_SPECIFIED, + DEVICE_TECHNOLOGY_LCD, + DEVICE_TECHNOLOGY_OLED, +} NVT_DISPLAYID_DEVICE_TECHNOLOGY; + +typedef struct _tagNVT_DISPLAYID_TILED_DISPLAY_TOPOLOGY +{ + NvU32 revision; + + struct + { + NvBool bSingleEnclosure; + NvBool bHasBezelInfo; + NVT_SINGLE_TILE_BEHAVIOR single_tile_behavior; + NVT_MULTI_TILE_BEHAVIOR multi_tile_behavior; + } capability; + + struct + { + NvU32 row; + NvU32 col; + } topology; + + struct + { + NvU32 x; + NvU32 y; + } location; + + struct + { + NvU32 width; + NvU32 height; + } native_resolution; + + struct + { + NvU32 top; // Top bezel in pixels + NvU32 bottom; // Bottom bezel in pixels + NvU32 right; // Right bezel in pixels + NvU32 left; // Left bezel in pixels + } bezel_info; + + NVT_TILEDDISPLAY_TOPOLOGY_ID tile_topology_id; +} NVT_DISPLAYID_TILED_DISPLAY_TOPOLOGY; + +typedef struct _tagNVT_DISPLAYID_CONTAINERID +{ + NvU32 revision; + NvU32 data1; + NvU16 data2; + NvU16 data3; + NvU16 data4; + NvU8 data5[6]; +} NVT_DISPLAYID_CONTAINERID; + +typedef struct _tagNVT_DISPLAYID_INTERFACE_FEATURES +{ + NvU32 revision; + + NVT_COLORDEPTH rgb444; // each bit within is set if rgb444 supported on that bpc + NVT_COLORDEPTH yuv444; // each bit within is set if yuv444 supported on that bpc + NVT_COLORDEPTH yuv422; // each bit within is set if yuv422 supported on that bpc + NVT_COLORDEPTH yuv420; // each bit within is set if yuv420 supported on that bpc + + NvU32 yuv420_min_pclk; + + struct + { + NvU8 support_32khz : 1; + NvU8 support_44_1khz : 1; + NvU8 support_48khz : 1; + NvU8 rsvd : 5; + } audio_capability; + + NvU32 combination_count; + struct + { + NVT_DISPLAYID_INTERFACE_EOTF eotf; + NVT_DISPLAYID_INTERFACE_COLOR_SPACE color_space; + } colorspace_eotf_combination[NVT_DISPLAYID_DISPLAY_INTERFACE_FEATURES_MAX_ADDITIONAL_SUPPORTED_COLORSPACE_EOTF + 1]; + +} NVT_DISPLAYID_INTERFACE_FEATURES; + +typedef struct _tagNVT_DISPLAYID_PRODUCT_IDENTITY +{ + NvU32 revision; + NvU32 vendor_id; + NvU16 product_id; + NvU32 serial_number; + NvU16 week; + NvU16 year; + NvU8 product_string[NVT_DISPLAYID_2_0_PRODUCT_STRING_MAX_LEN + 1]; +} NVT_DISPLAYID_PRODUCT_IDENTITY; + +typedef enum _tagNVT_COLOR_MAP_STANDARD +{ + COLOR_MAP_CIE_1931, + COLOR_MAP_CIE_1976, +} NVT_COLOR_MAP_STANDARD; + +typedef enum _tagNVT_AUDIO_SPEAKER_INTEGRATED +{ + AUDIO_SPEAKER_INTEGRATED_SUPPORTED = 0, + AUDIO_SPEAKER_INTEGRATED_NOT_SUPPORTED = 1, +} NVT_AUDIO_SPEAKER_INTEGRATED; + +typedef enum _tagNVT_NATIVE_LUMINANCE_INFO +{ + NATIVE_LUMINANCE_INFO_MIN_GURANTEE_VALUE = 0, + NATIVE_LUMINANCE_INFO_SOURCE_DEVICE_GUIDANCE = 1, +} NVT_NATIVE_LUMINANCE_INFO; + +typedef struct _tagNVT_DISPLAYID_DISPLAY_PARAMETERS +{ + NvU32 revision; + NvU32 h_image_size_micro_meter; + NvU32 v_image_size_micro_meter; + NvU16 h_pixels; + NvU16 v_pixels; + NVT_DISPLAYID_SCAN_ORIENTATION scan_orientation; + NVT_COLOR_MAP_STANDARD color_map_standard; + NVT_COLOR_POINT primaries[3]; + NVT_COLOR_POINT white; + NVT_NATIVE_LUMINANCE_INFO native_luminance_info; + NvU16 native_max_luminance_full_coverage; + NvU16 native_max_luminance_10_percent_rect_coverage; + NvU16 native_min_luminance; + NVT_COLORDEPTH native_color_depth; + NvU16 gamma_x100; + NVT_DISPLAYID_DEVICE_TECHNOLOGY device_technology; + NvBool device_theme_Preference; + NvBool audio_speakers_integrated; +} NVT_DISPLAYID_DISPLAY_PARAMETERS; + +typedef struct _tagNVT_DISPLAYID_ADAPTIVE_SYNC +{ + union + { + NvU8 operation_range_info; + struct + { + NvU8 adaptive_sync_range : 1; + NvU8 duration_inc_flicker_perf : 1; + NvU8 modes : 2; + NvU8 seamless_not_support : 1; + NvU8 duration_dec_flicker_perf : 1; + NvU8 reserved : 2; + } information; + } u; + + NvU8 max_duration_inc; + NvU8 min_rr; + NvU16 max_rr; + NvU8 max_duration_dec; +} NVT_DISPLAYID_ADAPTIVE_SYNC; + +typedef struct _tagVESA_VSDB_PARSED_INFO +{ + struct + { + NvU8 type : 3; + NvU8 reserved : 4; + NvU8 color_space_and_eotf : 1; + } data_struct_type; + + struct + { + NvU8 pixels_overlapping_count : 4; + NvU8 reserved_0 : 1; + NvU8 multi_sst : 2; + NvU8 reserved_1 : 1; + } overlapping; + + struct + { + NvU8 pass_through_integer_dsc : 6; + NvU8 reserved : 2; + } pass_through_integer; + + struct + { + NvU8 pass_through_fraction_dsc : 4; + NvU8 reserved : 4; + } pass_through_fractional; +} VESA_VSDB_PARSED_INFO; + +typedef struct _tagNVT_DISPLAYID_VENDOR_SPECIFIC +{ + NVT_HDMI_LLC_INFO hdmiLlc; + NVT_HDMI_FORUM_INFO hfvs; + NVDA_VSDB_PARSED_INFO nvVsdb; + MSFT_VSDB_PARSED_INFO msftVsdb; + VESA_VSDB_PARSED_INFO vesaVsdb; +} NVT_DISPLAYID_VENDOR_SPECIFIC; + +typedef struct _tagNVT_DISPLAYID_CTA +{ + NVT_EDID_CEA861_INFO cta861_info; + NVT_HDR_STATIC_METADATA hdrInfo; + NVT_DV_STATIC_METADATA dvInfo; + NVT_HDR10PLUS_INFO hdr10PlusInfo; +} NVT_DISPLAYID_CTA; + +typedef struct _tagNVT_DISPLAYID_BRIGHTNESS_LUMINANCE_RANGE +{ + NvU32 revision; + NvU16 min_sdr_luminance; + NvU16 max_sdr_luminance; + NvU16 max_boost_sdr_luminance; +} NVT_DISPLAYID_BRIGHTNESS_LUMINANCE_RANGE; + +typedef struct _tagNVT_VALID_DATA_BLOCKS +{ + NvBool product_id_present; + NvBool parameters_present; + NvBool type7Timing_present; + NvBool type8Timing_present; + NvBool type9Timing_present; + NvBool dynamic_range_limit_present; + NvBool interface_feature_present; + NvBool stereo_interface_present; + NvBool tiled_display_present; + NvBool container_id_present; + NvBool type10Timing_present; + NvBool adaptive_sync_present; + NvBool arvr_hmd_present; + NvBool arvr_layer_present; + NvBool brightness_luminance_range_present; + NvBool vendor_specific_present; + NvBool cta_data_present; +} NVT_VALID_DATA_BLOCKS; + +#define NVT_DISPLAYID_MAX_TOTAL_TIMING NVT_MAX_TOTAL_TIMING +typedef struct _tagNVT_DISPLAYID_2_0_INFO +{ + NvU8 revision; + NvU8 version; + + // support audio/yuv444/yuv422 color for CTA861 compatible + NvU8 basic_caps; + + // the all extensions that may appear following the base section + NvU32 extension_count; + + // this displayID20 is EDID extension or not + NvBool as_edid_extension; + + // data blocks present or not + NVT_VALID_DATA_BLOCKS valid_data_blocks; + + NVT_DISPLAYID_PRODUCT_PRIMARY_USE_CASE primary_use_case; + + // Product Identification Data Block (Mandatory) + NVT_DISPLAYID_PRODUCT_IDENTITY product_identity; + + // Display Parameter Data Block (Mandatory for Display Use) + NVT_DISPLAYID_DISPLAY_PARAMETERS display_param; + + // Detailed Timing Data Block (Mandatory for Display Use) + NvU32 total_timings; + NVT_TIMING timing[NVT_DISPLAYID_MAX_TOTAL_TIMING]; + + // Enumerated Timing Code Data Block (Not Mandatory) + + // Formula-based Timing Data Block (Not Mandatory) + + // Dynamic Video Timing Range Limits Data Block (Not Mandatory) + NVT_DISPLAYID_RANGE_LIMITS range_limits; + + // Display Interface Features Data Block (Mandatory) + NVT_DISPLAYID_INTERFACE_FEATURES interface_features; + + // Stereo Display Interface Data Block (Not Mandatory) + + // Tiled Display Topology Data Block (Not Mandatory) + NVT_DISPLAYID_TILED_DISPLAY_TOPOLOGY tile_topo; + + // ContainerID Data Block (Mandatory for Multi-function Device) + NVT_DISPLAYID_CONTAINERID container_id; + + // Adaptive-Sync Data Block (Mandatory for display device supports Adaptive-Sync) + NvU32 total_adaptive_sync_descriptor; + NVT_DISPLAYID_ADAPTIVE_SYNC adaptive_sync_descriptor[NVT_ADAPTIVE_SYNC_DESCRIPTOR_MAX_COUNT]; + + // Brightness Luminance Range Data Block (Mandatory for display device supports Nits based brightness control) + NVT_DISPLAYID_BRIGHTNESS_LUMINANCE_RANGE luminance_ranges; + + // Vendor-specific Data Block (Not Mandatory) + NVT_DISPLAYID_VENDOR_SPECIFIC vendor_specific; + + // CTA DisplayID Data Block (Not Mandatory) + NVT_DISPLAYID_CTA cta; +} NVT_DISPLAYID_2_0_INFO; + +#define NVT_EDID_PRIMARY_COLOR_FP2INT_FACTOR 1024 // Per EDID 1.4, 10bit color primary is encoded in floating point as (bit9/2 + bit8/4 + bi7/8 + ... + bit0) +typedef struct tagNVT_EDID_INFO +{ + // generic edid info + NvU32 version; + NvU16 manuf_id; + NvU16 manuf_id_hi; + NvU8 manuf_name[4]; + NvU16 product_id; + NvU32 serial_number; + NvU8 week; + NvU16 year; + + // the interface info + struct + { + union + { + struct + { + NvU8 serrations : 1; + NvU8 sync_type : 3; + NvU8 video_setup : 1; + NvU8 vp_p : 2; + } analog; + struct + { + NvU8 video_interface : 4; + NvU8 bpc : 5; + } digital; + NvU8 analog_data : 7; + } u; + NvU8 isDigital : 1; + } input; + + // the screen size info + NvU8 screen_size_x; // horizontal screen size in cm + NvU8 screen_size_y; // verical screen size in cm + NvU16 screen_aspect_x; // aspect ratio + NvU16 screen_aspect_y; // aspect ratio + + // display transfer characteristics + NvU16 gamma; + + // features support + union + { + NvU8 feature; + struct + { + NvU8 support_gtf : 1; + NvU8 preferred_timing_is_native : 1; // should be "Preferred_timing_is_dtd1". To be exact, "Native" is referenced as the native HDTV timing by CEA861 extension block + NvU8 default_colorspace_srgb : 1; + NvU8 color_type : 2; + NvU8 support_active_off : 1; + NvU8 support_suspend : 1; + NvU8 support_standby : 1; + + } feature_ver_1_3; + struct + { + NvU8 continuous_frequency : 1; + NvU8 preferred_timing_is_native : 1; // should be "Preferred_timing_is_dtd1". To be exact, "Native" is referenced as the native HDTV timing by CEA861 extension block + NvU8 default_colorspace_srgb : 1; + NvU8 color_type : 2; + NvU8 support_active_off : 1; + NvU8 support_suspend : 1; + NvU8 support_standby : 1; + } feature_ver_1_4_analog; + struct + { + NvU8 continuous_frequency : 1; + NvU8 preferred_timing_is_native : 1; // should be "Preferred_timing_is_dtd1". To be exact, "Native" is referenced as the native HDTV timing by CEA861 extension block + NvU8 default_colorspace_srgb : 1; + NvU8 support_ycrcb_444 : 1; + NvU8 support_ycrcb_422 : 1; + NvU8 support_active_off : 1; + NvU8 support_suspend : 1; + NvU8 support_standby : 1; + } feature_ver_1_4_digital; + }u; + + // chromaticity coordinates + NvU16 cc_red_x; + NvU16 cc_red_y; + NvU16 cc_green_x; + NvU16 cc_green_y; + NvU16 cc_blue_x; + NvU16 cc_blue_y; + NvU16 cc_white_x; + NvU16 cc_white_y; + + // established timings 1 and 2 + NvU16 established_timings_1_2; + + // Manufacturer reserved timings + NvU16 manufReservedTimings; + + // standard timings + NvU16 standard_timings[NVT_EDID_MAX_STANDARD_TIMINGS]; + + // 18-bytes display descriptor info + NVT_EDID_18BYTE_DESCRIPTOR ldd[NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR]; + + // the parse timing + NVT_TIMING timing[NVT_EDID_MAX_TOTAL_TIMING]; + + // Note: This contains the timing after validation. + NvU32 total_timings; + + // This contains the count timing that were invalidated because they don't meet + // some policies (PClk, etc). + NvU32 total_invalidTimings; + + // indicates support for HDMI 1.4+ 3D stereo modes are present + NvU32 HDMI3DSupported; + + HDMI3DSUPPORTMAP Hdmi3Dsupport; + + // Data parsed from NVDA VSDB - Variable Refresh Rate Monitor capabilities + NVDA_VSDB_PARSED_INFO nvdaVsdbInfo; + + // Data parsed from MSFT VSDB - HMD and Specialized (Direct display) Monitor capabilities + MSFT_VSDB_PARSED_INFO msftVsdbInfo; + + // HDR capability information from the HDR Metadata Data Block + NVT_HDR_STATIC_METADATA hdr_static_metadata_info; + + // DV capability information from the DV Metadata Data Block + NVT_DV_STATIC_METADATA dv_static_metadata_info; + + // HDR10+ capability information from the HDR10+ LLC VSVDB + NVT_HDR10PLUS_INFO hdr10PlusInfo; + + // HDMI LLC info + NVT_HDMI_LLC_INFO hdmiLlcInfo; + + // HDMI 2.0 information + NVT_HDMI_FORUM_INFO hdmiForumInfo; + // deprecating the following, please use hdmiForumInfo; + struct + { + NvU8 max_TMDS_char_rate; + NvU8 lte_340Mcsc_scramble :1; + NvU8 rr_capable :1; + NvU8 SCDC_present :1; + } hdmi_2_0_info; + + // the total edid extension(s) attached to the basic block + NvU32 total_extensions; + // the total displayid2 extension(s) attached to the basic block. + NvU32 total_did2_extensions; + + NvU8 checksum; + NvU8 checksum_ok; + + // extension info + NVT_EDID_CEA861_INFO ext861; + + // for the 2nd CEA/EIA861 extension + // note: "ext861" should really be an array but since it requires massive name change and it's hard + // to find more than one 861 extension in the real world, I made a trade off like this for now. + NVT_EDID_CEA861_INFO ext861_2; + + NVT_DISPLAYID_INFO ext_displayid; + NVT_DISPLAYID_2_0_INFO ext_displayid20; +} NVT_EDID_INFO; + +typedef enum +{ + NVT_PROTOCOL_UNKNOWN = 0, + NVT_PROTOCOL_DP = 1, + NVT_PROTOCOL_HDMI = 2, + NVT_PROTOCOL_DVI = 3, +} NVT_PROTOCOL; + +// the display interface/connector claimed by the EDID +#define NVT_EDID_INPUT_DIGITAL_UNDEFINED 0x00 // undefined digital interface +#define NVT_EDID_INPUT_DVI 0x01 +#define NVT_EDID_INPUT_HDMI_TYPE_A 0x02 +#define NVT_EDID_INPUT_HDMI_TYPE_B 0x03 +#define NVT_EDID_INPUT_MDDI 0x04 +#define NVT_EDID_INPUT_DISPLAY_PORT 0x05 + + +// the EDID extension TAG +#define NVT_EDID_EXTENSION_CTA 0x02 // CTA 861 series extensions +#define NVT_EDID_EXTENSION_VTB 0x10 // video timing block extension +#define NVT_EDID_EXTENSION_DI 0x40 // display information extension +#define NVT_EDID_EXTENSION_LS 0x50 // localized string extension +#define NVT_EDID_EXTENSION_DPVL 0x60 // digital packet video link extension +#define NVT_EDID_EXTENSION_DISPLAYID 0x70 // display id +#define NVT_EDID_EXTENSION_BM 0xF0 // extension block map +#define NVT_EDID_EXTENSION_OEM 0xFF // extension defined by the display manufacturer + +//************************************ +// Audio and Video Infoframe Control +//************************************ +// +// the control info for generating infoframe data +#define NVT_INFOFRAME_CTRL_DONTCARE 0xFF +// +typedef struct tagNVT_VIDEO_INFOFRAME_CTRL +{ + NvU8 color_space; + NvU8 active_format_info_present; + NvU8 bar_info; + NvU8 scan_info; + NvU8 colorimetry; + NvU8 pic_aspect_ratio; + NvU8 active_format_aspect_ratio; + NvU8 it_content; + NvU8 it_content_type; + NvU8 extended_colorimetry; + NvU8 rgb_quantization_range; + NvU8 nonuniform_scaling; + NvU8 video_format_id; + NvU8 pixel_repeat; + NvU16 top_bar; + NvU16 bottom_bar; + NvU16 left_bar; + NvU16 right_bar; + NvU8 addition_colorimetry_ext; + NvU8 frame_rate; + NvU8 rid; +}NVT_VIDEO_INFOFRAME_CTRL; + +// +typedef struct tagNVT_AUDIO_INFOFRAME_CTRL +{ + NvU8 coding_type; + NvU8 channel_count; + NvU8 sample_rate; + NvU8 sample_depth; + NvU8 speaker_placement; + NvU8 level_shift; + NvU8 down_mix_inhibit; +}NVT_AUDIO_INFOFRAME_CTRL; + +typedef struct tagNVT_VENDOR_SPECIFIC_INFOFRAME_CTRL +{ + NvU8 Enable; + NvU8 VSIFVersion; + NvU8 HDMIFormat; + NvU8 HDMI_VIC; + NvBool ALLMEnable; + NvU8 ThreeDStruc; + NvU8 ThreeDDetail; + NvU8 MetadataPresent; + NvU8 MetadataType; + NvU8 Metadata[8]; // type determines length + +} NVT_VENDOR_SPECIFIC_INFOFRAME_CTRL; +#define NVT_3D_METADTATA_TYPE_PARALAX 0x00 +#define NVT_3D_METADTATA_PARALAX_LEN 0x08 +#define NVT_VSIF_VERSION_NONE 0 +#define NVT_VSIF_VERSION_H14B_VSIF 14 +#define NVT_VSIF_VERSION_HF_VSIF 20 + + +#define NVT_EXTENDED_METADATA_PACKET_INFOFRAME_VER_HDMI21 0x0 +#define NVT_EXTENDED_METADATA_PACKET_INFOFRAME_VER_HDMI21A 0x1 +typedef struct tagNVT_EXTENDED_METADATA_PACKET_INFOFRAME_CTRL +{ + NvU32 version; // See #define NVT_EXTENDED_METADATA_PACKET_INFOFRAME_VER + NvU32 EnableVRR; + NvU32 ITTiming; + NvU32 BaseVFP; + NvU32 ReducedBlanking; + NvU32 BaseRefreshRate; + NvU32 EnableQMS; + NvU32 NextTFR; + NvU32 Sync; + NvU32 MConst; +} NVT_EXTENDED_METADATA_PACKET_INFOFRAME_CTRL; + +typedef struct tagNVT_ADAPTIVE_SYNC_SDP_CTRL +{ + NvU32 minVTotal; + NvU32 targetRefreshRate; + NvU32 srCoastingVTotal; + NvBool bFixedVTotal; + NvBool bRefreshRateDivider; +}NVT_ADAPTIVE_SYNC_SDP_CTRL; + +//*********************************** +// the actual Auido/Video Infoframe +//*********************************** +// +// info frame type code +#define NVT_INFOFRAME_TYPE_VENDOR_SPECIFIC 1 +#define NVT_INFOFRAME_TYPE_VIDEO 2 +#define NVT_INFOFRAME_TYPE_SOURCE_PRODUCT_DESCRIPTION 3 +#define NVT_INFOFRAME_TYPE_AUDIO 4 +#define NVT_INFOFRAME_TYPE_MPEG_SOURCE 5 +#define NVT_INFOFRAME_TYPE_SELF_REFRESH 6 +#define NVT_INFOFRAME_TYPE_DYNAMIC_RANGE_MASTERING 7 +#define NVT_INFOFRAME_TYPE_EXTENDED_METADATA_PACKET 8 +// +// +typedef struct tagNVT_INFOFRAME_HEADER +{ + NvU8 type; + NvU8 version; + NvU8 length; +}NVT_INFOFRAME_HEADER; + +typedef struct tagNVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER +{ + NvU8 type; + NvU8 firstLast; + NvU8 sequenceIndex; +} NVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER; + +#define NVT_EMP_HEADER_FIRST 0x80 +#define NVT_EMP_HEADER_LAST 0x40 +#define NVT_EMP_HEADER_FIRST_LAST 0xC0 + +// SPD Infoframe +typedef struct tagNVT_SPD_INFOFRAME_PAYLOAD +{ + NvU8 vendorBytes[8]; + NvU8 productBytes[16]; + + NvU8 sourceInformation; + +} NVT_SPD_INFOFRAME_PAYLOAD; + +typedef struct tagNVT_SPD_INFOFRAME +{ + NVT_INFOFRAME_HEADER Header; + NVT_SPD_INFOFRAME_PAYLOAD Data; +} NVT_SPD_INFOFRAME; + +// the video infoframe version 1-3 structure +typedef struct tagNVT_VIDEO_INFOFRAME +{ + NvU8 type; + NvU8 version; + NvU8 length; + + // byte 1~5 + NvU8 byte1; + NvU8 byte2; + NvU8 byte3; + NvU8 byte4; + NvU8 byte5; + + // byte 6~13 + NvU8 top_bar_low; + NvU8 top_bar_high; + NvU8 bottom_bar_low; + NvU8 bottom_bar_high; + NvU8 left_bar_low; + NvU8 left_bar_high; + NvU8 right_bar_low; + NvU8 right_bar_high; + + // byte 14~15 + NvU8 byte14; + NvU8 byte15; +}NVT_VIDEO_INFOFRAME; +// +#define NVT_VIDEO_INFOFRAME_VERSION_1 1 +#define NVT_VIDEO_INFOFRAME_VERSION_2 2 +#define NVT_VIDEO_INFOFRAME_VERSION_3 3 +#define NVT_VIDEO_INFOFRAME_VERSION_4 4 +// +#define NVT_VIDEO_INFOFRAME_BYTE1_S1S0_MASK 0x03 +#define NVT_VIDEO_INFOFRAME_BYTE1_S1S0_SHIFT 0 +#define NVT_VIDEO_INFOFRAME_BYTE1_S1S0_NO_DATA 0 +#define NVT_VIDEO_INFOFRAME_BYTE1_S1S0_OVERSCANNED 1 +#define NVT_VIDEO_INFOFRAME_BYTE1_S1S0_UNDERSCANNED 2 +#define NVT_VIDEO_INFOFRAME_BYTE1_S1S0_FUTURE 3 +// +#define NVT_VIDEO_INFOFRAME_BYTE1_B1B0_MASK 0x0C +#define NVT_VIDEO_INFOFRAME_BYTE1_B1B0_SHIFT 2 +#define NVT_VIDEO_INFOFRAME_BYTE1_B1B0_NOT_VALID 0 +#define NVT_VIDEO_INFOFRAME_BYTE1_B1B0_VERT_VALID 1 +#define NVT_VIDEO_INFOFRAME_BYTE1_B1B0_HORIZ_VALID 2 +#define NVT_VIDEO_INFOFRAME_BYTE1_B1B0_H_V_VALID 3 +// +#define NVT_VIDEO_INFOFRAME_BYTE1_A0_MASK 0x10 // active format info present +#define NVT_VIDEO_INFOFRAME_BYTE1_A0_SHIFT 4 // active format info present +#define NVT_VIDEO_INFOFRAME_BYTE1_A0_NO_DATA 0 +#define NVT_VIDEO_INFOFRAME_BYTE1_A0_VALID 1 +// +// CTA-861G new requirement - DD changed this policy +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2_MASK 8 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_MASK 0xE0 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_SHIFT 0x5 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_RGB 0 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_YCbCr422 1 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_YCbCr444 2 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_YCbCr420 3 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_FUTURE 3 // nvlEscape still uses this line 4266 +// CTA-861I new requirement +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_IDODEFINED 7 +// CEA-861-F - Unix still used this one +#define NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_MASK 0x60 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_SHIFT 0x5 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_RGB 0 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr422 1 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr444 2 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr420 3 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_FUTURE 3 // nvlEscape still uses this lline 4266 +// +#define NVT_VIDEO_INFOFRAME_BYTE1_RESERVED_MASK 0x80 // for Inforframe V1 / V2 +#define NVT_VIDEO_INFOFRAME_BYTE1_RESERVED_SHIFT 7 +// +#define NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_MASK 0x0F // active format aspect ratio +#define NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_SHIFT 0 +#define NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_SAME_AS_M1M0 8 +#define NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_4X3_CENTER 9 +#define NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_16X9_CENTER 10 +#define NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_14x9_CENTER 11 +// +#define NVT_VIDEO_INFOFRAME_BYTE2_M1M0_MASK 0x30 // picture aspect ratio +#define NVT_VIDEO_INFOFRAME_BYTE2_M1M0_SHIFT 4 // picture aspect ratio +#define NVT_VIDEO_INFOFRAME_BYTE2_M1M0_NO_DATA 0 +#define NVT_VIDEO_INFOFRAME_BYTE2_M1M0_4X3 1 +#define NVT_VIDEO_INFOFRAME_BYTE2_M1M0_16X9 2 +#define NVT_VIDEO_INFOFRAME_BYTE2_M1M0_FUTURE 3 +// +#define NVT_VIDEO_INFOFRAME_BYTE2_C1C0_MASK 0xC0 // colorimetry +#define NVT_VIDEO_INFOFRAME_BYTE2_C1C0_SHIFT 6 +#define NVT_VIDEO_INFOFRAME_BYTE2_C1C0_NO_DATA 0 +#define NVT_VIDEO_INFOFRAME_BYTE2_C1C0_SMPTE170M_ITU601 1 +#define NVT_VIDEO_INFOFRAME_BYTE2_C1C0_ITU709 2 +#define NVT_VIDEO_INFOFRAME_BYTE2_C1C0_EXT_COLORIMETRY 3 +// +#define NVT_VIDEO_INFOFRAME_BYTE3_SC_MASK 0x03 // non-uniform scaling +#define NVT_VIDEO_INFOFRAME_BYTE3_SC_SHIFT 0 +#define NVT_VIDEO_INFOFRAME_BYTE3_SC_NONE 0 +#define NVT_VIDEO_INFOFRAME_BYTE3_SC_HORIZ_SCALED 1 +#define NVT_VIDEO_INFOFRAME_BYTE3_SC_VERT_SCALED 2 +#define NVT_VIDEO_INFOFRAME_BYTE3_SC_H_V_SCALED 3 +// +#define NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_MASK 0x0C // quantization +#define NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_SHIFT 2 +#define NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_DEFAULT 0 +#define NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_LIMITED_RANGE 1 +#define NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_FULL_RANGE 2 +#define NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_RESERVED 3 +// +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_MASK 0x70 // extended colorimetry +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_SHIFT 4 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_xvYCC_601 0 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_xvYCC_709 1 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_sYCC_601 2 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_AdobeYCC_601 3 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_AdobeRGB 4 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_BT2020cYCC 5 // CEA-861-F define it as "ITU-R BT.2020 YcCbcCrc" at Table 12 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_BT2020RGBYCC 6 // CEA-861-F define it as "ITU-R BT.2020 YcCbCr" at Table 12 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_RESERVED7 7 // CEA-861-F define it as "Reserved" at Table 12 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_AdditionalColorExt 7 // CTA-861-G define it as "Additional Colorimtry Ext Info Valid" at Table_13 +// +#define NVT_VIDEO_INFOFRAME_BYTE3_ITC_MASK 0x80 // IT content +#define NVT_VIDEO_INFOFRAME_BYTE3_ITC_SHIFT 7 +#define NVT_VIDEO_INFOFRAME_BYTE3_ITC_NO_DATA 0 +#define NVT_VIDEO_INFOFRAME_BYTE3_ITC_IT_CONTENT 1 +// +#define NVT_VIDEO_INFOFRAME_BYTE3_RESERVED_V1_MASK 0x60 // reserved +#define NVT_VIDEO_INFOFRAME_BYTE3_RESERVED_V1_SHIFT 5 +// +#define NVT_VIDEO_INFOFRAME_BYTE4_VIC_MASK 0xFF // video identification code +#define NVT_VIDEO_INFOFRAME_BYTE4_VIC_SHIFT 0 +#define NVT_VIDEO_INFOFRAME_BYTE4_VIC7 0x80 +// +#define NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V3_MASK 0x00 +#define NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V3_SHIFT 0 +#define NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V2_MASK 0x80 +#define NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V2_SHIFT 7 +#define NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V1_MASK 0xFF +#define NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V1_SHIFT 0 +// +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_MASK 0x0F // pixel repetitions +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_SHIFT 0 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_NO_PEP 0 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_2X 1 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_3X 2 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_4X 3 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_5X 4 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_6X 5 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_7X 6 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_8X 7 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_9X 8 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_10X 9 +// +#define NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_MASK 0x30 // Content Information +#define NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_SHIFT 4 +#define NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_NODATA 0 // ITC = 0 +#define NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_GRAPHICS 0 // ITC = 1 +#define NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_PHOTO 1 // ITC = don't care +#define NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_CINEMA 2 // ITC = don't care +#define NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_GAME 3 // ITC = don't care + +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ1YQ0_MASK 0xC0 // YCC quantization +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ1YQ0_SHIFT 6 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ1YQ0_LIMITED_RANGE 1 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ1YQ0_FULL_RANGE 2 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ1YQ0_RESERVED3 3 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ1YQ0_RESERVED4 4 +// +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ_MASK 0xc0 // content type +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ_SHIFT 6 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ_LIMITED 0 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ_FULL 1 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ_RSVD1 2 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ_RSVD2 3 +// +#define NVT_VIDEO_INFOFRAME_BYTE5_RESERVED_V2_MASK 0x00 +#define NVT_VIDEO_INFOFRAME_BYTE5_RESERVED_V2_SHIFT 0 +#define NVT_VIDEO_INFOFRAME_BYTE5_RESERVED_V1_MASK 0xFF +#define NVT_VIDEO_INFOFRAME_BYTE5_RESERVED_V1_SHIFT 0 +// +#define NVT_VIDEO_INFOFRAME_BYTE14_FR0_FR3_MASK 0x0F +#define NVT_VIDEO_INFOFRAME_BYTE14_FR0_FR3_SHIFT 0 +#define NVT_VIDEO_INFOFRAME_BYTE14_FR0_FR3_NODATA 0 +#define NVT_VIDEO_INFOFRAME_BYTE14_FR4_ONE_BIT_MASK 0x10 +#define NVT_VIDEO_INFOFRAME_BYTE15_FR4_MASK 0x40 +#define NVT_VIDEO_INFOFRAME_BYTE15_FR4_NODATA 0 +#define NVT_VIDEO_INFOFRAME_BYTE15_FR4_SHIFT 2 +// +#define NVT_VIDEO_INFOFRAME_BYTE15_RID_MASK 0x3F +#define NVT_VIDEO_INFOFRAME_BYTE15_RID_SHIFT 0 +#define NVT_VIDEO_INFOFRAME_BYTE15_RID_NODATA 0 +// +#define NVT_VIDEO_INFOFRAME_BYTE14_ACE0_3_MASK 0xF0 +#define NVT_VIDEO_INFOFRAME_BYTE14_ACE0_3_SHIFT 4 +#define NVT_VIDEO_INFOFRAME_BYTE14_ACE0_3_P3D65RGB 0 +#define NVT_VIDEO_INFOFRAME_BYTE14_ACE0_3_P3DCIRGB 1 +#define NVT_VIDEO_INFOFRAME_BYTE14_ACE0_3_BT2100_ICtCp 2 +#define NVT_VIDEO_INFOFRAME_BYTE14_ACE0_3_sRGB 3 +#define NVT_VIDEO_INFOFRAME_BYTE14_ACE0_3_defaultRGB 4 +// +#define NVT_VIDEO_INFOFRAME_CONTENT_VIDEO 0 +#define NVT_VIDEO_INFOFRAME_CONTENT_GRAPHICS 1 +#define NVT_VIDEO_INFOFRAME_CONTENT_PHOTO 2 +#define NVT_VIDEO_INFOFRAME_CONTENT_CINEMA 3 +#define NVT_VIDEO_INFOFRAME_CONTENT_GAME 4 +#define NVT_VIDEO_INFOFRAME_CONTENT_LAST 4 + +#pragma pack(1) +typedef struct +{ + // byte 1 + struct + { + NvU8 scanInfo : 2; + NvU8 barInfo : 2; + NvU8 activeFormatInfoPresent : 1; + NvU8 colorSpace : 2; + NvU8 rsvd_bits_byte1 : 1; + } byte1; + + // byte 2 + struct + { + NvU8 activeFormatAspectRatio : 4; + NvU8 picAspectRatio : 2; + NvU8 colorimetry : 2; + } byte2; + + // byte 3 + struct + { + NvU8 nonuniformScaling : 2; + NvU8 rgbQuantizationRange : 2; + NvU8 extendedColorimetry : 3; + NvU8 itContent : 1; + } byte3; + + // byte 4 + struct + { + NvU8 vic : 7; + NvU8 rsvd_bits_byte4 : 1; + } byte4; + + // byte 5 + struct + { + NvU8 pixelRepeat : 4; + NvU8 contentTypes : 2; + NvU8 yccQuantizationRange : 2; + } byte5; + + NvU16 topBar; + NvU16 bottomBar; + NvU16 leftBar; + NvU16 rightBar; + + // byte 14~15 + struct + { + NvU8 fr_low : 4; + NvU8 ace : 4; + } byte14; + + struct + { + NvU8 rid : 6; + NvU8 fr_hi : 1; + NvU8 rsvd_bits_byte15 : 1; + }byte15; +} NVT_VIDEO_INFOFRAME_OVERRIDE; +#pragma pack() + +typedef struct +{ + NvU32 vic : 8; + NvU32 pixelRepeat : 5; + NvU32 colorSpace : 3; + NvU32 colorimetry : 3; + NvU32 extendedColorimetry : 4; + NvU32 rgbQuantizationRange : 3; + NvU32 yccQuantizationRange : 3; + NvU32 itContent : 2; + NvU32 contentTypes : 3; + NvU32 scanInfo : 3; + NvU32 activeFormatInfoPresent : 2; + NvU32 activeFormatAspectRatio : 5; + NvU32 picAspectRatio : 3; + NvU32 nonuniformScaling : 3; + NvU32 barInfo : 3; + NvU32 top_bar : 17; + NvU32 bottom_bar : 17; + NvU32 left_bar : 17; + NvU32 right_bar : 17; + NvU32 Future17 : 2; + NvU32 Future47 : 2; +} NVT_INFOFRAME_VIDEO; + + +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE1_S1S0_MASK 0x3 +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE1_B1B0_MASK 0x3 +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE1_A0_MASK 0x1 // active format info present +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE1_Y1Y0_MASK 0x3 +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE1_Y2Y1Y0_MASK 0x7 +// +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE2_R3R2R1R0_MASK 0xF // active format aspect ratio +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE2_M1M0_MASK 0x3 // picture aspect ratio +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE2_C1C0_MASK 0x3 // colorimetry +// +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE3_SC_MASK 0x3 // non-uniform scaling +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE3_Q1Q0_MASK 0x3 // quantization +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE3_EC_MASK 0x7 // extended colorimetry +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE3_ITC_MASK 0x1 // IT content +// +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE4_VIC_MASK 0x7F // video identification code +// +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE5_PR_MASK 0xF // pixel repetitions +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE5_CN1CN0_MASK 0x3 +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE5_YQ1YQ0_MASK 0x3 // YCC quantization +// +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE14_FR0FR3_MASK 0xF // Frame rate 0-3 bits in Byte14 +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE14_ACE0ACE3_MASK 0xF // Additional Colorimetry Extension +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE15_RID0RID5_MASK 0x3F // Resolution Identification +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE15_FR4_MASK 0x1 // Frame rate 4th bit in Byte 15 + +// audio infoframe structure +typedef struct tagNVT_AUDIO_INFOFRAME +{ + NvU8 type; + NvU8 version; + NvU8 length; + + // byte 1~5 + NvU8 byte1; + NvU8 byte2; + NvU8 byte3; + NvU8 byte4; + NvU8 byte5; + + // byte 6~10 + NvU8 rsvd_byte6; + NvU8 rsvd_byte7; + NvU8 rsvd_byte8; + NvU8 rsvd_byte9; + NvU8 rsvd_byte10; + +}NVT_AUDIO_INFOFRAME; + +// self refresh infoframe structure. See SR spec. +typedef struct tagNVT_SR_INFOFRAME +{ + NvU8 type; + NvU8 version; + NvU8 length; + + NvU8 data; + +}NVT_SR_INFOFRAME; + +// +#define NVT_AUDIO_INFOFRAME_VERSION_1 1 +// +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_MASK 0x07 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_SHIFT 0 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_REF_HEADER 0 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_2CH 1 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_DO_NOT_USE 2 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_4CH 3 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_5CH 4 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_6CH 5 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_7CH 6 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_8CH 7 +// +#define NVT_AUDIO_INFOFRAME_BYTE1_RESERVED_MASK 0x08 +#define NVT_AUDIO_INFOFRAME_BYTE1_RESERVED_SHIFT 3 +// +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_MASK 0xF0 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_SHIFT 4 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_REF_HEADER 0 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_PCM 1 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_DO_NOT_USE 2 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_MPEG1 3 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_MP3 4 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_MPEG2 5 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_AAC 6 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_DTS 7 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_USE_CODING_EXTENSION_TYPE 15 +// +#define NVT_AUDIO_INFOFRAME_BYTE2_SS_MASK 0x3 +#define NVT_AUDIO_INFOFRAME_BYTE2_SS_SHIFT 0 +#define NVT_AUDIO_INFOFRAME_BYTE2_SS_REF_HEADER 0 +#define NVT_AUDIO_INFOFRAME_BYTE2_SS_16BIT 1 +#define NVT_AUDIO_INFOFRAME_BYTE2_SS_20BIT 2 +#define NVT_AUDIO_INFOFRAME_BYTE2_SS_24BIT 3 +// +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_MASK 0x1C +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_SHIFT 2 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_HEADER 0 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_32KHz 1 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_44KHz 2 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_48KHz 3 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_88KHz 4 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_96KHz 5 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_176KHz 6 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_192KHz 7 +// +#define NVT_AUDIO_INFOFRAME_BYTE2_RESERVED_MASK 0xE0 +#define NVT_AUDIO_INFOFRAME_BYTE2_RESERVED_SHIFT 5 +// +#define NVT_AUDIO_INFOFRAME_BYTE3_CXT_MASK 0x1F +#define NVT_AUDIO_INFOFRAME_BYTE3_CXT_SHIFT 0 +#define NVT_AUDIO_INFOFRAME_BYTE3_CXT_RESERVE31 31 +// +#define NVT_AUDIO_INFOFRAME_BYTE3_RESERVED_MASK 0xE0 +#define NVT_AUDIO_INFOFRAME_BYTE3_RESERVED_SHIFT 5 +// +#define NVT_AUDIO_INFOFRAME_BYTE4_CA_MASK 0xFF +#define NVT_AUDIO_INFOFRAME_BYTE4_CA_SHIFT 0 +#define NVT_AUDIO_INFOFRAME_BYTE4_CA_FRW_FLW_RR_RL_FC_LFE_FR_FL 49 +// +#define NVT_AUDIO_INFOFRAME_BYTE5_LFEPBL_MASK 0x03 +#define NVT_AUDIO_INFOFRAME_BYTE5_LFEPBL_SHIFT 0 +#define NVT_AUDIO_INFOFRAME_BYTE5_LFEPBL_NO_DATA 0 +#define NVT_AUDIO_INFOFRAME_BYTE5_LFEPBL_0DB 1 +#define NVT_AUDIO_INFOFRAME_BYTE5_LFEPBL_PLUS10DB 2 +#define NVT_AUDIO_INFOFRAME_BYTE5_LFEPBL_RESERVED03 3 +// +#define NVT_AUDIO_INFOFRAME_BYTE5_RESERVED_MASK 0x4 +#define NVT_AUDIO_INFOFRAME_BYTE5_RESERVED_SHIFT 2 +// +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_MASK 0x78 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_SHIFT 3 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_0dB 0 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_1dB 1 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_2dB 2 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_3dB 3 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_4dB 4 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_5dB 5 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_6dB 6 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_7dB 7 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_8dB 8 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_9dB 9 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_10dB 10 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_11dB 11 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_12dB 12 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_13dB 13 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_14dB 14 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_15dB 15 +// +#define NVT_AUDIO_INFOFRAME_BYTE5_DM_INH_MASK 0x80 +#define NVT_AUDIO_INFOFRAME_BYTE5_DM_INH_SHIFT 7 +#define NVT_AUDIO_INFOFRAME_BYTE5_DM_INH_PERMITTED 0 +#define NVT_AUDIO_INFOFRAME_BYTE5_DM_INH_PROHIBITED 1 +// +#define NVT_AUDIO_INFOFRAME_BYTE6_RESERVED_MASK 0xFF +#define NVT_AUDIO_INFOFRAME_BYTE6_RESERVED_SHIFT 0 +// +// +#define NVT_AUDIO_INFOFRAME_BYTE7_RESERVED_MASK 0xFF +#define NVT_AUDIO_INFOFRAME_BYTE7_RESERVED_SHIFT 0 +// +/// +#define NVT_AUDIO_INFOFRAME_BYTE8_RESERVED_MASK 0xFF +#define NVT_AUDIO_INFOFRAME_BYTE8_RESERVED_SHIFT 0 +// +// +#define NVT_AUDIO_INFOFRAME_BYTE9_RESERVED_MASK 0xFF +#define NVT_AUDIO_INFOFRAME_BYTE9_RESERVED_SHIFT 0 +// +// +#define NVT_AUDIO_INFOFRAME_BYTE10_RESERVED_MASK 0xFF +#define NVT_AUDIO_INFOFRAME_BYTE10_RESERVED_SHIFT 0 +// + +typedef struct +{ + // byte 1 + struct + { + NvU8 channelCount : 3; + NvU8 rsvd_bits_byte1 : 1; + NvU8 codingType : 4; + } byte1; + + // byte 2 + struct + { + NvU8 sampleSize : 2; + NvU8 sampleRate : 3; + NvU8 rsvd_bits_byte2 : 3; + } byte2; + + + // byte 3 + struct + { + NvU8 codingExtensionType : 5; + NvU8 rsvd_bits_byte3 : 3; + } byte3; + + // byte 4 + NvU8 speakerPlacement; + + // byte 5 + struct + { + NvU8 lfePlaybackLevel : 2; + NvU8 rsvd_bits_byte5 : 1; + NvU8 levelShift : 4; + NvU8 downmixInhibit : 1; + } byte5; + + // byte 6~10 + NvU8 rsvd_byte6; + NvU8 rsvd_byte7; + NvU8 rsvd_byte8; + NvU8 rsvd_byte9; + NvU8 rsvd_byte10; +} NVT_AUDIO_INFOFRAME_OVERRIDE; + +typedef struct +{ + NvU32 codingType : 5; + NvU32 codingExtensionType : 6; + NvU32 sampleSize : 3; + NvU32 sampleRate : 4; + NvU32 channelCount : 4; + NvU32 speakerPlacement : 9; + NvU32 downmixInhibit : 2; + NvU32 lfePlaybackLevel : 3; + NvU32 levelShift : 5; + NvU32 Future12 : 2; + NvU32 Future2x : 4; + NvU32 Future3x : 4; + NvU32 Future52 : 2; + NvU32 Future6 : 9; + NvU32 Future7 : 9; + NvU32 Future8 : 9; + NvU32 Future9 : 9; + NvU32 Future10 : 9; +} NVT_INFOFRAME_AUDIO; + +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE1_CC_MASK 0x07 +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE1_CT_MASK 0x0F +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE2_SS_MASK 0x03 +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE2_SF_MASK 0x03 +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE3_CXT_MASK 0x1F +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE4_CA_MASK 0xFF +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE5_LFEPBL_MASK 0x03 +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE5_LSV_MASK 0x0F +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE5_DM_INH_MASK 0x01 + +// +// HDMI 1.3a GCP, ColorDepth +// +#define NVT_HDMI_COLOR_DEPTH_DEFAULT 0x0 +#define NVT_HDMI_COLOR_DEPTH_RSVD1 0x1 +#define NVT_HDMI_COLOR_DEPTH_RSVD2 0x2 +#define NVT_HDMI_COLOR_DEPTH_RSVD3 0x3 +#define NVT_HDMI_COLOR_DEPTH_24 0x4 +#define NVT_HDMI_COLOR_DEPTH_30 0x5 +#define NVT_HDMI_COLOR_DEPTH_36 0x6 +#define NVT_HDMI_COLOR_DEPTH_48 0x7 +#define NVT_HDMI_COLOR_DEPTH_RSVD8 0x8 +#define NVT_HDMI_COLOR_DEPTH_RSVD9 0x9 +#define NVT_HDMI_COLOR_DEPTH_RSVD10 0xA +#define NVT_HDMI_COLOR_DEPTH_RSVD11 0xB +#define NVT_HDMI_COLOR_DEPTH_RSVD12 0xC +#define NVT_HDMI_COLOR_DEPTH_RSVD13 0xD +#define NVT_HDMI_COLOR_DEPTH_RSVD14 0xE +#define NVT_HDMI_COLOR_DEPTH_RSVD15 0xF + +// HDMI 1.3a GCP, PixelPacking Phase +#define NVT_HDMI_PIXELPACKING_PHASE4 0x0 +#define NVT_HDMI_PIXELPACKING_PHASE1 0x1 +#define NVT_HDMI_PIXELPACKING_PHASE2 0x2 +#define NVT_HDMI_PIXELPACKING_PHASE3 0x3 +#define NVT_HDMI_PIXELPACKING_RSVD4 0x4 +#define NVT_HDMI_PIXELPACKING_RSVD5 0x5 +#define NVT_HDMI_PIXELPACKING_RSVD6 0x6 +#define NVT_HDMI_PIXELPACKING_RSVD7 0x7 +#define NVT_HDMI_PIXELPACKING_RSVD8 0x8 +#define NVT_HDMI_PIXELPACKING_RSVD9 0x9 +#define NVT_HDMI_PIXELPACKING_RSVD10 0xA +#define NVT_HDMI_PIXELPACKING_RSVD11 0xB +#define NVT_HDMI_PIXELPACKING_RSVD12 0xC +#define NVT_HDMI_PIXELPACKING_RSVD13 0xD +#define NVT_HDMI_PIXELPACKING_RSVD14 0xE +#define NVT_HDMI_PIXELPACKING_RSVD15 0xF + +#define NVT_HDMI_RESET_DEFAULT_PIXELPACKING_PHASE 0x0 +#define NVT_HDMI_SET_DEFAULT_PIXELPACKING_PHASE 0x1 + +#define NVT_HDMI_GCP_SB1_CD_SHIFT 0 +#define NVT_HDMI_GCP_SB1_PP_SHIFT 4 + + +// Vendor specific info frame (HDMI 1.4 specific) +typedef struct tagNVT_VENDOR_SPECIFIC_INFOFRAME_PAYLOAD +{ + // byte 1~5 + NvU8 byte1; + NvU8 byte2; + NvU8 byte3; + NvU8 byte4; + NvU8 byte5; + NvU8 optionalBytes[22]; +}NVT_VENDOR_SPECIFIC_INFOFRAME_PAYLOAD; +typedef struct tagNVT_VENDOR_SPECIFIC_INFOFRAME +{ + NVT_INFOFRAME_HEADER Header; + NVT_VENDOR_SPECIFIC_INFOFRAME_PAYLOAD Data; +} NVT_VENDOR_SPECIFIC_INFOFRAME; +// +#define NVT_HDMI_VS_INFOFRAME_VERSION_1 1 + +#define NVT_HDMI_VS_HB0_MASK 0xFF +#define NVT_HDMI_VS_HB0_SHIFT 0x00 +#define NVT_HDMI_VS_HB0_VALUE 0x01 + +#define NVT_HDMI_VS_HB1_MASK 0xFF +#define NVT_HDMI_VS_HB1_SHIFT 0x00 +#define NVT_HDMI_VS_HB1_VALUE 0x01 + +#define NVT_HDMI_VS_HB2_MASK 0xFF +#define NVT_HDMI_VS_HB2_SHIFT 0x00 +#define NVT_HDMI_VS_HB2_VALUE 0x06 + +#define NVT_HDMI_VS_BYTE1_OUI_MASK 0xff +#define NVT_HDMI_VS_BYTE1_OUI_SHIFT 0x00 +#define NVT_HDMI_VS_BYTE1_OUI_VER_1_4 0x03 +#define NVT_HDMI_VS_BYTE1_OUI_VER_2_0 0xD8 + +#define NVT_HDMI_VS_BYTE2_OUI_MASK 0xff +#define NVT_HDMI_VS_BYTE2_OUI_SHIFT 0x00 +#define NVT_HDMI_VS_BYTE2_OUI_VER_1_4 0x0C +#define NVT_HDMI_VS_BYTE2_OUI_VER_2_0 0x5D + +#define NVT_HDMI_VS_BYTE3_OUI_MASK 0xff +#define NVT_HDMI_VS_BYTE3_OUI_SHIFT 0x00 +#define NVT_HDMI_VS_BYTE3_OUI_VER_1_4 0x00 +#define NVT_HDMI_VS_BYTE3_OUI_VER_2_0 0xC4 + +// +#define NVT_HDMI_VS_BYTE4_RSVD_MASK 0x1f +#define NVT_HDMI_VS_BYTE4_RSVD_SHIFT 0x00 +#define NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_MASK 0xe0 +#define NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_SHIFT 0x05 +#define NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_NONE 0x00 +#define NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_EXT 0x01 +#define NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_3D 0x02 +// 0x03-0x07 reserved +// +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_MASK 0xff // HDMI_VID_FMT = HDMI_VID_FMT_EXT +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_SHIFT 0x00 +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_NA 0xfe +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_RSVD 0x00 +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx30Hz 0x01 +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx25Hz 0x02 +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx24Hz 0x03 +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx24Hz_SMPTE 0x04 +// 0x05-0xff reserved +// +#define NVT_HDMI_VS_BYTE5_HDMI_RSVD_MASK 0x07 // HDMI_VID_FMT = HDMI_VID_FMT_3D +#define NVT_HDMI_VS_BYTE5_HDMI_RSVD_SHIFT 0x00 +#define NVT_HDMI_VS_BYTE5_3D_META_PRESENT_MASK 0x01 +#define NVT_HDMI_VS_BYTE5_3D_META_PRESENT_SHIFT 0x03 +#define NVT_HDMI_VS_BYTE5_HDMI_META_PRESENT_NOTPRES 0x00 // HDMI Metadata is not present +#define NVT_HDMI_VS_BYTE5_HDMI_META_PRESENT_PRES 0x01 // HDMI Metadata is present +#define NVT_HDMI_VS_BYTE5_ALLM_MODE_MASK 0x02 // ALLM is field of length 1 bit at Bit Number 1 +#define NVT_HDMI_VS_BYTE5_ALLM_MODE_DIS 0x00 +#define NVT_HDMI_VS_BYTE5_ALLM_MODE_EN 0x01 +#define NVT_HDMI_VS_BYTE5_ALLM_MODE_SHIFT 0x01 // ALLM is byte5 bit position 1, so shift 1 bit +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_MASK 0xf0 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_SHIFT 0x04 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_NA 0xfe +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK 0x00 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_FIELD_ALT 0x01 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_LINE_ALT 0x02 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEFULL 0x03 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTH 0x04 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTHGFX 0x05 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_TOPBOTTOM 0x06 +//0x06-0x07 reserved +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF 0x08 +//0x09-0x0f reserved +// +// bytes 6-21 are optional depending on the 3D mode & the presence/abcense of metadata +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_RSVD_MASK 0x0f // HDMI_VID_FMT = HDMI_VID_FMT_3D +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_RSVD_SHIFT 0x00 +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_MASK 0xf0 +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SHIFT 0x04 +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_NA 0xfe // Extended data is not applicable +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH 0x01 // Horizontal subsampling 1.4a defines a single subsampling vs 1.4s 4. +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_H_OL_OR 0x00 // Horizontal subsampling Odd Left Odd Right +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_H_OL_ER 0x01 // Horizontal subsampling Odd Left Even Right +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_H_EL_OR 0x02 // Horizontal subsampling Even Left Odd Right +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_H_EL_ER 0x03 // Horizontal subsampling Even Left Even Right +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_Q_OL_OR 0x04 // Quincunx matrix Odd Left Odd Right +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_Q_OL_ER 0x05 // Quincunx matrix Odd Left Even Right +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_Q_EL_OR 0x06 // Quincunx matrix Even Left Odd Right +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_Q_EL_ER 0x07 // Quincunx matrix Even Left Even Right +//0x08-0x0f reserved +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_MASK 0xf0 // HDMI_VID_FMT = HDMI_VID_FMT_3D; HDMI_3D_META_PRESENT = 1 +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_SHIFT 0x04 // +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_NONE 0x00 // length of no metadata +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_PARALLAX 0x08 // length of paralax data + +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_MASK 0x0f // HDMI_VID_FMT = HDMI_VID_FMT_3D +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_SHIFT 0x00 +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_PARALLAX 0x00 // parallax metadata in the frame +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_NA 0xfe // no metadata in the frame + +#define NVT_HDMI_VS_BYTENv_RSVD_MASK 0xff // if last byte of infoframe, will move depending on HDMI_VID_FMT, 3D metadata present, 3D_Metadata type. +#define NVT_HDMI_VS_BYTENv_RSVD_SHIFT 0x00 +#define NVT_HDMI_VS_BYTENv_RSVD 0x00 + + +// Extended Metadata Packet (HDMI 2.1 specific) +typedef struct tagNVT_EXTENDED_METADATA_PACKET_INFOFRAME_PAYLOAD +{ + // byte 1~7 + NvU8 byte1; + NvU8 byte2; + NvU8 byte3; + NvU8 byte4; + NvU8 byte5; + NvU8 byte6; + NvU8 byte7; + + NvU8 metadataBytes[21]; +} NVT_EXTENDED_METADATA_PACKET_INFOFRAME_PAYLOAD; + +typedef struct tagNVT_EXTENDED_METADATA_PACKET_INFOFRAME +{ + NVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER Header; + NVT_EXTENDED_METADATA_PACKET_INFOFRAME_PAYLOAD Data; +} NVT_EXTENDED_METADATA_PACKET_INFOFRAME; + +#define NVT_HDMI_EMP_BYTE1_RSVD_MASK 0x01 +#define NVT_HDMI_EMP_BYTE1_RSVD_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE1_SYNC_MASK 0x02 +#define NVT_HDMI_EMP_BYTE1_SYNC_SHIFT 1 +#define NVT_HDMI_EMP_BYTE1_SYNC_DISABLE 0 +#define NVT_HDMI_EMP_BYTE1_SYNC_ENABLE 1 + +#define NVT_HDMI_EMP_BYTE1_VFR_MASK 0x04 +#define NVT_HDMI_EMP_BYTE1_VFR_SHIFT 2 +#define NVT_HDMI_EMP_BYTE1_VFR_DISABLE 0 +#define NVT_HDMI_EMP_BYTE1_VFR_ENABLE 1 + +#define NVT_HDMI_EMP_BYTE1_AFR_MASK 0x08 +#define NVT_HDMI_EMP_BYTE1_AFR_SHIFT 3 +#define NVT_HDMI_EMP_BYTE1_AFR_DISABLE 0 +#define NVT_HDMI_EMP_BYTE1_AFR_ENABLE 1 + +#define NVT_HDMI_EMP_BYTE1_DS_TYPE_MASK 0x30 +#define NVT_HDMI_EMP_BYTE1_DS_TYPE_SHIFT 4 +#define NVT_HDMI_EMP_BYTE1_DS_TYPE_PERIODIC_PSEUDO_STATIC 0 +#define NVT_HDMI_EMP_BYTE1_DS_TYPE_PERIODIC_DYNAMIC 1 +#define NVT_HDMI_EMP_BYTE1_DS_TYPE_UNIQUE 2 +#define NVT_HDMI_EMP_BYTE1_DS_TYPE_RSVD 3 + +#define NVT_HDMI_EMP_BYTE1_END_MASK 0x40 +#define NVT_HDMI_EMP_BYTE1_END_SHIFT 6 +#define NVT_HDMI_EMP_BYTE1_END_DISABLE 0 +#define NVT_HDMI_EMP_BYTE1_END_ENABLE 1 + +#define NVT_HDMI_EMP_BYTE1_NEW_MASK 0x80 +#define NVT_HDMI_EMP_BYTE1_NEW_SHIFT 7 +#define NVT_HDMI_EMP_BYTE1_NEW_DISABLE 0 +#define NVT_HDMI_EMP_BYTE1_NEW_ENABLE 1 + +#define NVT_HDMI_EMP_BYTE2_RSVD_MASK 0xff +#define NVT_HDMI_EMP_BYTE2_RSVD_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_MASK 0xff +#define NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_SHIFT 0 +#define NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_VENDOR_SPECIFIC 0 +#define NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_SPEC_DEFINED 1 +#define NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_CTA_DEFINED 2 +#define NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_VESA_DEFINED 3 + +#define NVT_HDMI_EMP_BYTE4_DATA_SET_TAG_MSB_MASK 0xff +#define NVT_HDMI_EMP_BYTE4_DATA_SET_TAG_MSB_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE5_DATA_SET_TAG_LSB_MASK 0xff +#define NVT_HDMI_EMP_BYTE5_DATA_SET_TAG_LSB_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE6_DATA_SET_LENGTH_MSB_MASK 0xff +#define NVT_HDMI_EMP_BYTE6_DATA_SET_LENGTH_MSB_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE7_DATA_SET_LENGTH_LSB_MASK 0xff +#define NVT_HDMI_EMP_BYTE7_DATA_SET_LENGTH_LSB_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE8_MD0_VRR_EN_MASK 0x01 +#define NVT_HDMI_EMP_BYTE8_MD0_VRR_EN_SHIFT 0 +#define NVT_HDMI_EMP_BYTE8_MD0_VRR_EN_DISABLE 0 +#define NVT_HDMI_EMP_BYTE8_MD0_VRR_EN_ENABLE 1 +#define NVT_HDMI_EMP_BYTE8_MD0_M_CONST_MASK 0x02 +#define NVT_HDMI_EMP_BYTE8_MD0_M_CONST_SHIFT 1 +#define NVT_HDMI_EMP_BYTE8_MD0_QMS_EN_MASK 0x04 +#define NVT_HDMI_EMP_BYTE8_MD0_QMS_EN_SHIFT 2 +#define NVT_HDMI_EMP_BYTE8_MD0_QMS_EN_DISABLE 0 +#define NVT_HDMI_EMP_BYTE8_MD0_QMS_EN_ENABLE 1 + +#define NVT_HDMI_EMP_BYTE8_MD1_BASE_VFRONT_MASK 0xff +#define NVT_HDMI_EMP_BYTE8_MD1_BASE_VFRONT_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE8_MD2_RB_MASK 0x04 +#define NVT_HDMI_EMP_BYTE8_MD2_RB_SHIFT 2 +#define NVT_HDMI_EMP_BYTE8_MD2_RB_DISABLE 0 +#define NVT_HDMI_EMP_BYTE8_MD2_RB_ENABLE 1 + +#define NVT_HDMI_EMP_BYTE8_MD2_NEXT_TFR_MASK 0xf8 +#define NVT_HDMI_EMP_BYTE8_MD2_NEXT_TFR_SHIFT 3 + +#define NVT_HDMI_EMP_BYTE8_MD2_BASE_RR_MSB_MASK 0x03 +#define NVT_HDMI_EMP_BYTE8_MD2_BASE_RR_MSB_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE8_MD3_BASE_RR_LSB_MASK 0xff +#define NVT_HDMI_EMP_BYTE8_MD3_BASE_RR_LSB_SHIFT 0 + +#define NVT_DP_ADAPTIVE_SYNC_SDP_PACKET_TYPE 0x22 +#define NVT_DP_ADAPTIVE_SYNC_SDP_VERSION 0x2 +#define NVT_DP_ADAPTIVE_SYNC_SDP_LENGTH 0x9 +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB0_VARIABLE_FRAME_RATE_MASK 0x3 +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB0_VARIABLE_FRAME_RATE_SHIFT 0 +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB0_VARIABLE_FRAME_RATE_AVT_VARIABLE 0 +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB0_VARIABLE_FRAME_RATE_AVT_FIXED 1 +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB0_VARIABLE_FRAME_RATE_FAVT_TARGET_NOT_REACHED 2 +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB0_VARIABLE_FRAME_RATE_FAVT_TARGET_REACHED 3 + +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB0_DISABLE_PR_ACTIVE_MASK 0x4 +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB0_DISABLE_PR_ACTIVE_SHIFT 2 +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB0_DISABLE_PR_ACTIVE_SOURCE_SINK_SYNC_ENABLED 0 +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB0_DISABLE_PR_ACTIVE_SOURCE_SINK_SYNC_DISABLED 1 + +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB0_DISABLE_PR_ACTIVE_RFB_UPDATE_MASK 0x8 +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB0_DISABLE_PR_ACTIVE_RFB_UPDATE_SHIFT 3 +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB0_DISABLE_PR_ACTIVE_RFB_UPDATE_NO_UPDATE 0 +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB0_DISABLE_PR_ACTIVE_RFB_UPDATE_UPDATE 1 + +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB0_RSVD_MASK 0xf0 +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB0_RSVD_SHIFT 4 + +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB1_MIN_VTOTAL_LSB_MASK 0xff +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB1_MIN_VTOTAL_LSB_SHIFT 0 + +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB2_MIN_VTOTAL_MSB_MASK 0xff +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB2_MIN_VTOTAL_MSB_SHIFT 0 + +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB3_TARGET_RR_LSB_MASK 0xff +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB3_TARGET_RR_LSB_SHIFT 0 + +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB4_TARGET_RR_MSB_MASK 0x03 +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB4_TARGET_RR_MSB_SHIFT 0 + +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB4_RSVD_MASK 0x1c +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB4_RSVD_SHIFT 2 + +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB4_TARGET_RR_DIVIDER_MASK 0x20 +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB4_TARGET_RR_DIVIDER_SHIFT 5 +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB4_TARGET_RR_DIVIDER_DISABLE 0 +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB4_TARGET_RR_DIVIDER_ENABLE 1 + +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB4_SUCCESSIVE_FRAME_INC_MASK 0x40 +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB4_SUCCESSIVE_FRAME_INC_SHIFT 6 +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB4_SUCCESSIVE_FRAME_INC_DISABLE 0 +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB4_SUCCESSIVE_FRAME_INC_ENABLE 1 + +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB4_SUCCESSIVE_FRAME_DEC_MASK 0x80 +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB4_SUCCESSIVE_FRAME_DEC_SHIFT 7 +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB4_SUCCESSIVE_FRAME_DEC_DISABLE 0 +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB4_SUCCESSIVE_FRAME_DEC_ENABLE 1 + +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB5_DURATION_INCREASE_CONSTRAINT_LSB_MASK 0xff +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB5_DURATION_INCREASE_CONSTRAINT_LSB_SHIFT 0 + +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB6_DURATION_INCREASE_CONSTRAINT_MSB_MASK 0xff +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB6_DURATION_INCREASE_CONSTRAINT_MSB_SHIFT 0 + +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB7_PR_COASTING_VTOTAL_LSB_MASK 0xff +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB7_PR_COASTING_VTOTAL_LSB_SHIFT 0 + +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB8_PR_COASTING_VTOTAL_MSB_MASK 0xff +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB8_PR_COASTING_VTOTAL_MSB_SHIFT 0 + +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB1_MIN_VTOTAL_BYTE2_MASK 0xff +#define NVT_DP_ADAPTIVE_SYNC_SDP_DB1_MIN_VTOTAL_BYTE2_SHIFT 0 + +typedef struct tagNVT_ADAPTIVE_SYNC_SDP_HEADER +{ + NvU8 hb0; + NvU8 type; + NvU8 version; + NvU8 length; +}NVT_ADAPTIVE_SYNC_SDP_HEADER; + +typedef struct tagNVT_ADAPTIVE_SYNC_SDP_PAYLOAD +{ + NvU8 db0; // operatingMode + NvU8 db1; // minVTotalLSB + NvU8 db2; // minVTotalMSB + NvU8 db3; // targetRefreshRateLSB + NvU8 db4; // targetRefreshRateMSB, rsvd, targetRRDivider, frameInc/Dec Config + NvU8 db5; // frameDurationIncMs + NvU8 db6; // frameDurationDecreaseMs + NvU8 db7; // coastingVTotalPrLSB + NvU8 db8; // coastingVTotalPrMSB + + NvU8 rsvd[23]; +}NVT_ADAPTIVE_SYNC_SDP_PAYLOAD; + +typedef struct tagADAPTIVE_SYNC_SDP +{ + NVT_ADAPTIVE_SYNC_SDP_HEADER header; + NVT_ADAPTIVE_SYNC_SDP_PAYLOAD payload; +}NVT_ADAPTIVE_SYNC_SDP; + +// the Vendor-Specific-Data-Block header +typedef struct tagNVT_CEA861_VSDB_HEADER +{ + // byte 0 + NvU32 length : 5; + NvU32 vendorSpecificTag : 3; + // byte 1-3 + NvU32 ieee_id : 24; + +} NVT_CEA861_VSDB_HEADER; + +// HDMI LLC Vendor-Specific data block +// from HDMI 1.4 spec (superset of VSDB from HDMI 1.3a spec) +typedef struct tagNVT_CEA861_LATENCY +{ + NvU8 Video_Latency; + NvU8 Audio_Latency; + +} NVT_CEA861_LATENCY; + +typedef struct tagNVT_HDMI_VIDEO +{ + NvU8 Rsvd_1 : 3; + NvU8 ImageSize : 2; + NvU8 ThreeD_Multi_Present : 2; + NvU8 ThreeD_Present : 1; + NvU8 HDMI_3D_Len : 5; + NvU8 HDMI_VIC_Len : 3; +} NVT_HDMI_VIDEO; + +typedef struct tagNVT_HDMI_VIC_LIST +{ + NvU8 HDMI_VIC[1]; // note: list length is actually specified in HDMI_VIC_Len +} NVT_HDMI_VIC_LIST; + +typedef struct tagNVT_3D_STRUCT_ALL +{ + NvU8 ThreeDStructALL0_FramePacking : 1; + NvU8 ThreeDStructALL1_FieldAlt : 1; + NvU8 ThreeDStructALL2_LineAlt : 1; + NvU8 ThreeDStructALL3_SSFull : 1; + NvU8 ThreeDStructALL4_LDepth : 1; + NvU8 ThreeDStructALL5_LDepthGFX : 1; + NvU8 ThreeDStructALL6_TopBottom : 1; + NvU8 ThreeDStructALL7 : 1; + NvU8 ThreeDStructALL8_SSHalf : 1; + NvU8 Rsvd_1 : 7; +} NVT_3D_STRUCT_ALL; + +typedef struct tagNVT_3D_MULTI_LIST +{ + NvU8 ThreeD_Structure : 4; + NvU8 TwoD_VIC_order : 4; + NvU8 Rsvd_2 : 4; + NvU8 ThreeD_Detail : 4; +} NVT_3D_MULTI_LIST; + +#define NVT_3D_DETAILS_ALL 0x00 +#define NVT_3D_DETAILS_ALL_HORIZONTAL 0x01 +#define NVT_3D_DETAILS_HORIZONTAL_ODD_LEFT_ODD_RIGHT 0x02 +#define NVT_3D_DETAILS_HORIZONTAL_ODD_LEFT_EVEN_RIGHT 0x03 +#define NVT_3D_DETAILS_HORIZONTAL_EVEN_LEFT_ODD_RIGHT 0x04 +#define NVT_3D_DETAILS_HORIZONTAL_EVEN_LEFT_EVEN_RIGHT 0x05 +#define NVT_3D_DETAILS_ALL_QUINCUNX 0x06 +#define NVT_3D_DETAILS_QUINCUNX_ODD_LEFT_ODD_RIGHT 0x07 +#define NVT_3D_DETAILS_QUINCUNX_ODD_LEFT_EVEN_RIGHT 0x08 +#define NVT_3D_DETAILS_QUINCUNX_EVEN_LEFT_ODD_RIGHT 0x09 +#define NVT_3D_DETAILS_QUINCUNX_EVEN_LEFT_EVEN_RIGHT 0x0a + +typedef struct tagNVT_HDMI_LLC_VSDB_PAYLOAD +{ + // 1st byte + NvU8 B : 4; + NvU8 A : 4; + // 2nd byte + NvU8 D : 4; + NvU8 C : 4; + // 3rd byte + NvU8 DVI_Dual : 1; + NvU8 Rsvd_3 : 2; + NvU8 DC_Y444 : 1; + NvU8 DC_30bit : 1; + NvU8 DC_36bit : 1; + NvU8 DC_48bit : 1; + NvU8 Supports_AI : 1; + // 4th byte + NvU8 Max_TMDS_Clock; + // 5th byte + NvU8 CNC0 : 1; + NvU8 CNC1 : 1; + NvU8 CNC2 : 1; + NvU8 CNC3 : 1; + NvU8 Rsvd_5 : 1; + NvU8 HDMI_Video_present : 1; + NvU8 I_Latency_Fields_Present : 1; + NvU8 Latency_Fields_Present : 1; + + // the rest of the frame may contain optional data as defined + // in the NVT_CEA861_LATENCY, HDMI_VIDEO, HDMI_VIC, NVT_3D_STRUCT_ALL & 3D_MULTI_LIST structures + // and as specified by the corresponding control bits + NvU8 Data[NVT_CEA861_VSDB_PAYLOAD_MAX_LENGTH - 5]; + +} NVT_HDMI_LLC_VSDB_PAYLOAD; + +// HDMI LLC Vendor Specific Data Block +typedef struct tagNVT_HDMI_LLC_DATA +{ + NVT_CEA861_VSDB_HEADER header; + NVT_HDMI_LLC_VSDB_PAYLOAD payload; +} NVT_HDMI_LLC_DATA; + +typedef struct tagNVT_NVDA_VSDB_PAYLOAD +{ + NvU8 opcode; // Nvidia specific opcode - please refer to VRR monitor spec v17 + NvU8 vrrMinRefreshRate; // Minimum refresh rate supported by this monitor +} NVT_NVDA_VSDB_PAYLOAD; + +// NVIDIA Vendor Specific Data Block +typedef struct tagNVT_NVDA_VSDB_DATA +{ + NVT_CEA861_VSDB_HEADER header; + NVT_NVDA_VSDB_PAYLOAD payload; +} NVT_NVDA_VSDB_DATA; + +typedef struct _NVT_MSFT_VSDB_PAYLOAD +{ + NvU8 version; + NvU8 primaryUseCase : 5; + NvU8 thirdPartyUsage : 1; + NvU8 desktopUsage : 1; + NvU8 reserved : 1; + NvU8 containerId[MSFT_VSDB_CONTAINER_ID_SIZE]; +} NVT_MSFT_VSDB_PAYLOAD; + +typedef struct _NVT_MSFT_VSDB_DATA +{ + NVT_CEA861_VSDB_HEADER header; + NVT_MSFT_VSDB_PAYLOAD payload; +} NVT_MSFT_VSDB_DATA; + +#define NVT_MSFT_VSDB_BLOCK_SIZE (sizeof(NVT_MSFT_VSDB_DATA)) + +typedef struct tagNVT_HDMI_FORUM_VSDB_PAYLOAD +{ + // first byte + NvU8 Version; + // second byte + NvU8 Max_TMDS_Character_Rate; + // third byte + NvU8 ThreeD_Osd_Disparity : 1; + NvU8 Dual_View : 1; + NvU8 Independent_View : 1; + NvU8 Lte_340mcsc_Scramble : 1; + NvU8 CCBPCI : 1; + NvU8 CABLE_STATUS : 1; + NvU8 RR_Capable : 1; + NvU8 SCDC_Present : 1; + // fourth byte + NvU8 DC_30bit_420 : 1; + NvU8 DC_36bit_420 : 1; + NvU8 DC_48bit_420 : 1; + NvU8 UHD_VIC : 1; + NvU8 Max_FRL_Rate : 4; + // fifth byte + NvU8 FAPA_start_location : 1; + NvU8 ALLM : 1; + NvU8 FVA : 1; + NvU8 CNMVRR : 1; + NvU8 CinemaVRR : 1; + NvU8 M_delta : 1; + NvU8 QMS : 1; + NvU8 FAPA_End_Extended : 1; + + // sixth byte + NvU8 VRR_min : 6; + NvU8 VRR_max_high : 2; + // seventh byte + NvU8 VRR_max_low : 8; + // eighth byte + NvU8 DSC_10bpc : 1; + NvU8 DSC_12bpc : 1; + NvU8 DSC_16bpc : 1; + NvU8 DSC_All_bpp : 1; + NvU8 QMS_TFR_min : 1; + NvU8 QMS_TFR_max : 1; + NvU8 DSC_Native_420 : 1; + NvU8 DSC_1p2 : 1; + // ninth byte + NvU8 DSC_MaxSlices : 4; + NvU8 DSC_Max_FRL_Rate : 4; + // tenth byte + NvU8 DSC_totalChunkKBytes : 6; + NvU8 Rsvd_4 : 2; +} NVT_HDMI_FORUM_VSDB_PAYLOAD; + +// HDMI Forum Vendor Specific Data Block +typedef struct tagNVT_HDMI_FORUM_DATA +{ + NVT_CEA861_VSDB_HEADER header; + NVT_HDMI_FORUM_VSDB_PAYLOAD payload; +} NVT_HDMI_FORUM_DATA; + +// +// +// Video Capability Data Block (VCDB) +typedef struct _NV_ESC_MONITOR_CAPS_VCDB +{ + NvU8 quantizationRangeYcc : 1; + NvU8 quantizationRangeRgb : 1; + NvU8 scanInfoPreferredVideoFormat : 2; + NvU8 scanInfoITVideoFormats : 2; + NvU8 scanInfoCEVideoFormats : 2; +} NVT_HDMI_VCDB_DATA; + +// +// +//*********************************************************** +// Dynamic Range and Mastering Infoframe (HDR) +//*********************************************************** +// +typedef struct tagNVT_HDR_INFOFRAME_MASTERING_DATA +{ + NvU16 displayPrimary_x0; //!< x coordinate of color primary 0 (e.g. Red) of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + NvU16 displayPrimary_y0; //!< y coordinate of color primary 0 (e.g. Red) of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + + NvU16 displayPrimary_x1; //!< x coordinate of color primary 1 (e.g. Green) of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + NvU16 displayPrimary_y1; //!< y coordinate of color primary 1 (e.g. Green) of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + + NvU16 displayPrimary_x2; //!< x coordinate of color primary 2 (e.g. Blue) of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + NvU16 displayPrimary_y2; //!< y coordinate of color primary 2 (e.g. Blue) of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + + NvU16 displayWhitePoint_x; //!< x coordinate of white point of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + NvU16 displayWhitePoint_y; //!< y coordinate of white point of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + + NvU16 max_display_mastering_luminance; //!< Maximum display mastering luminance ([0x0001-0xFFFF] = [1.0 - 65535.0] cd/m^2) + NvU16 min_display_mastering_luminance; //!< Minimum display mastering luminance ([0x0001-0xFFFF] = [1.0 - 6.55350] cd/m^2) + + NvU16 max_content_light_level; //!< Maximum Content Light level (MaxCLL) ([0x0001-0xFFFF] = [1.0 - 65535.0] cd/m^2) + NvU16 max_frame_average_light_level; //!< Maximum Frame-Average Light Level (MaxFALL) ([0x0001-0xFFFF] = [1.0 - 65535.0] cd/m^2) +} NVT_HDR_INFOFRAME_MASTERING_DATA; + +#define NVT_CEA861_HDR_INFOFRAME_EOTF_SDR_GAMMA 0 //SDR Luminance Range +#define NVT_CEA861_HDR_INFOFRAME_EOTF_HDR_GAMMA 1 //HDR Luminance Range +#define NVT_CEA861_HDR_INFOFRAME_EOTF_ST2084 2 +#define NVT_CEA861_HDR_INFOFRAME_EOTF_Future 3 +#define NVT_CEA861_STATIC_METADATA_TYPE1_PRIMARY_COLOR_NORMALIZE_FACTOR 0xC350 // Per CEA-861.3 spec + +typedef struct tagNVT_HDR_INFOFRAME_PAYLOAD +{ + //byte 1 + NvU8 eotf : 3; + NvU8 f13_17 : 5; // These bits are reserved for future use + //byte 2 + NvU8 static_metadata_desc_id : 3; + NvU8 f23_27 : 5; // These bits are reserved for future use + + NVT_HDR_INFOFRAME_MASTERING_DATA type1; +} NVT_HDR_INFOFRAME_PAYLOAD; + +#pragma pack(1) +typedef struct tagNVT_HDR_INFOFRAME +{ + NVT_INFOFRAME_HEADER header; + NVT_HDR_INFOFRAME_PAYLOAD payload; +} NVT_HDR_INFOFRAME; +#pragma pack() + +// +// +//*********************************************************** +// Gamut Metadata Range and Vertices structures +//*********************************************************** +// +// GBD structure formats +// +#define NVT_GAMUT_FORMAT_VERTICES 0 +#define NVT_GAMUT_FORMAT_RANGE 1 + +typedef struct tagNVT_GAMUT_HEADER +{ + NvU8 type:8; + + // byte 1 + NvU8 AGSNum:4; + NvU8 GBD_profile:3; + NvU8 Next_Field:1; + + // byte 2 + NvU8 CGSNum:4; + NvU8 Packet_Seq:2; + NvU8 Rsvd:1; + NvU8 No_Cmt_GBD:1; + +} NVT_GAMUT_HEADER; + +typedef struct tagNVT_GAMUT_METADATA_RANGE_8BIT{ + + // Header + NvU8 GBD_Color_Space:3; + NvU8 GBD_Color_Precision:2; + NvU8 Rsvd:2; + NvU8 Format_Flag:1; + + // Packaged data + NvU8 Min_Red_Data:8; + NvU8 Max_Red_Data:8; + NvU8 Min_Green_Data:8; + NvU8 Max_Green_Data:8; + NvU8 Min_Blue_Data:8; + NvU8 Max_Blue_Data:8; +} NVT_GAMUT_METADATA_RANGE_8BIT; + +typedef struct tagNVT_GAMUT_METADATA_RANGE_10BIT{ + + // Header + NvU8 GBD_Color_Space:3; + NvU8 GBD_Color_Precision:2; + NvU8 Rsvd:2; + NvU8 Format_Flag:1; + + // Packaged data + NvU8 Min_Red_Data_HI:8; + + NvU8 Max_Red_Data_HI:6; + NvU8 Min_Red_Data_LO:2; + + NvU8 Min_Green_Data_HI:4; + NvU8 Max_Red_Data_LO:4; + + NvU8 Max_Green_Data_HI:2; + NvU8 Min_Green_Data_LO:6; + + NvU8 Max_Green_Data_LO:8; + + NvU8 Min_Blue_Data_HI:8; + + NvU8 Max_Blue_Data_HI:6; + NvU8 Min_Blue_Data_LO:2; + + NvU8 Data_Rsvd:4; + NvU8 Max_Blue_Data_LO:4; + +} NVT_GAMUT_METADATA_RANGE_10BIT; + +typedef struct tagNVT_GAMUT_METADATA_RANGE_12BIT{ + + // Header + NvU8 GBD_Color_Space:3; + NvU8 GBD_Color_Precision:2; + NvU8 Rsvd:2; + NvU8 Format_Flag:1; + + // Packaged data + NvU8 Min_Red_Data_HI:8; + + NvU8 Max_Red_Data_HI:4; + NvU8 Min_Red_Data_LO:4; + + NvU8 Max_Red_Data_LO:8; + + NvU8 Min_Green_Data_HI:8; + + NvU8 Max_Green_Data_HI:4; + NvU8 Min_Green_Data_LO:4; + + NvU8 Max_Green_Data_LO:8; + + NvU8 Min_Blue_Data_HI:8; + + NvU8 Max_Blue_Data_HI:4; + NvU8 Min_Blue_Data_LO:4; + + NvU8 Max_Blue_Data_LO:8; + +} NVT_GAMUT_METADATA_RANGE_12BIT; + +typedef struct tagNVT_GAMUT_METADATA_VERTICES_8BIT +{ + // Header + NvU8 GBD_Color_Space:3; + NvU8 GBD_Color_Precision:2; + NvU8 Rsvd:1; + NvU8 Facet_Mode:1; // Must be set to 0 + NvU8 Format_Flag:1; // Must be set to 0 + NvU8 Number_Vertices_H:8; // Must be set to 0 + NvU8 Number_Vertices_L:8; // Must be set to 4 + + // Packaged data + NvU8 Black_Y_R; + NvU8 Black_Cb_G; + NvU8 Black_Cr_B; + NvU8 Red_Y_R; + NvU8 Red_Cb_G; + NvU8 Red_Cr_B; + NvU8 Green_Y_R; + NvU8 Green_Cb_G; + NvU8 Green_Cr_B; + NvU8 Blue_Y_R; + NvU8 Blue_Cb_G; + NvU8 Blue_Cr_B; +} NVT_GAMUT_METADATA_VERTICES_8BIT; + +typedef struct tagNVT_GAMUT_METADATA_VERTICES_10BIT +{ + // Header + NvU8 GBD_Color_Space:3; + NvU8 GBD_Color_Precision:2; + NvU8 Rsvd:1; + NvU8 Facet_Mode:1; // Must be set to 0 + NvU8 Format_Flag:1; // Must be set to 0 + NvU8 Number_Vertices_H:8; // Must be set to 0 + NvU8 Number_Vertices_L:8; // Must be set to 4 + + // Packaged data + NvU8 Black_Y_R_HI; + + NvU8 Black_Cb_G_HI:6; + NvU8 Black_Y_R_LO:2; + + NvU8 Black_Cr_B_HI:4; + NvU8 Black_Cb_G_LO:4; + + NvU8 Red_Y_R_HI:2; + NvU8 Black_Cr_B_LO:6; + + NvU8 Red_Y_R_LO; + + NvU8 Red_Cb_G_HI; + + NvU8 Red_Cr_B_HI:6; + NvU8 Red_Cb_G_LO:2; + + NvU8 Green_Y_R_HI:4; + NvU8 Red_Cr_B_LO:4; + + NvU8 Green_Cb_G_HI:2; + NvU8 Green_Y_R_LO:6; + + NvU8 Green_Cb_G_LO; + + NvU8 Green_Cr_B_HI; + + NvU8 Blue_Y_R_HI:6; + NvU8 Green_Cr_B_LO:2; + + NvU8 Blue_Cb_G_HI:4; + NvU8 Blue_Y_R_LO:4; + + NvU8 Blue_Cr_B_HI:2; + NvU8 Blue_Cb_G_LO:6; + + NvU8 Blue_Cr_B_LO; +} NVT_GAMUT_METADATA_VERTICES_10BIT; + +typedef struct tagNVT_GAMUT_METADATA_VERTICES_12BIT +{ + // Header + NvU8 GBD_Color_Space:3; + NvU8 GBD_Color_Precision:2; + NvU8 Rsvd:1; + NvU8 Facet_Mode:1; // Must be set to 0 + NvU8 Format_Flag:1; // Must be set to 0 + NvU8 Number_Vertices_H:8; // Must be set to 0 + NvU8 Number_Vertices_L:8; // Must be set to 4 + + // Packaged data + NvU8 Black_Y_R_HI; + + NvU8 Black_Cb_G_HI:4; + NvU8 Black_Y_R_LO:4; + + NvU8 Black_Cb_G_LO; + + NvU8 Black_Cr_B_HI; + + NvU8 Red_Y_R_HI:4; + NvU8 Black_Cr_B_LO:4; + + NvU8 Red_Y_R_LO; + + NvU8 Red_Cb_G_HI; + + NvU8 Red_Cr_B_HI:4; + NvU8 Red_Cb_G_LO:4; + + NvU8 Red_Cr_B_LO; + + NvU8 Green_Y_R_HI; + + NvU8 Green_Cb_G_HI:4; + NvU8 Green_Y_R_LO:4; + + NvU8 Green_Cb_G_LO; + + NvU8 Green_Cr_B_HI; + + NvU8 Blue_Y_R_HI:4; + NvU8 Green_Cr_B_LO:4; + + NvU8 Blue_Y_R_LO; + + NvU8 Blue_Cb_G_HI; + + NvU8 Blue_Cr_B_HI:4; + NvU8 Blue_Cb_G_LO:4; + + NvU8 Blue_Cr_B_LO; +} NVT_GAMUT_METADATA_VERTICES_12BIT; + +typedef struct tagNVT_GAMUT_METADATA +{ + NVT_GAMUT_HEADER header; + + union + { + NVT_GAMUT_METADATA_RANGE_8BIT range8Bit; + NVT_GAMUT_METADATA_RANGE_10BIT range10Bit; + NVT_GAMUT_METADATA_RANGE_12BIT range12Bit; + NVT_GAMUT_METADATA_VERTICES_8BIT vertices8bit; + NVT_GAMUT_METADATA_VERTICES_10BIT vertices10bit; + NVT_GAMUT_METADATA_VERTICES_12BIT vertices12bit; + }payload; + +}NVT_GAMUT_METADATA; +// +//*********************************** +// Display Port Configuration Data +//*********************************** +// +// DPCD field offset +#define NVT_DPCD_ADDRESS_RECEIVER_CAPABILITY_FIELD 0x00000 +#define NVT_DPCD_ADDRESS_LINK_CONFIG_FIELD 0x00100 +#define NVT_DPCD_ADDRESS_MSTM_CTRL_FIELD 0x00111 //DPMST Control MST <-> ST +#define NVT_DPCD_ADDRESS_MSTM_BRANCH_DEVICE 0x001A1 +#define NVT_DPCD_ADDRESS_LINK_SINK_STATUS_FIELD 0x00200 +#define NVT_DPCD_ADDRESS_VENDOR_SPECIFIC_SOURCE_DEVICE 0x00300 +#define NVT_DPCD_ADDRESS_VENDOR_SPECIFIC_SINK_DEVICE 0x00400 +#define NVT_DPCD_ADDRESS_VENDOR_SPECIFIC_BRANCH_DEVICE 0x00500 +#define NVT_DPCD_ADDRESS_SINK_CTRL_FIELD 0x00600 +#define NVT_DPCD_ADDRESS_DOWN_REQ_BUFFER_FIELD 0x01000 +#define NVT_DPCD_ADDRESS_UP_REP_BUFFER_FIELD 0x01200 +#define NVT_DPCD_ADDRESS_DOWN_REP_BUFFER_FIELD 0x01400 +#define NVT_DPCD_ADDRESS_UP_REQ_BUFFER_FIELD 0x01600 +#define NVT_DPCD_ADDRESS_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x02003 +#define NVT_DPCD_ADDRESS_DP_TUNNELING_DEVICE_IEEE_OUI 0xE0000 +#define NVT_DPCD_ADDRESS_DP_TUNNELING_DEVICE_ID_STRING 0xE0003 +#define NVT_DPCD_ADDRESS_DP_TUNNELING_CAPS_SUPPORT_FIELD 0xE000D +#define NVT_DPCD_ADDRESS_DP_IN_ADAPTER_INFO_FIELD 0xE000E +#define NVT_DPCD_ADDRESS_USB4_DRIVER_ID_FIELD 0xE000F +#define NVT_DPCD_ADDRESS_USB4_ROUTER_TOPOLOGY_ID_FIELD 0xE001B + +// +// Raw DPCD data format - Receiver Capability Field // 00000h - 000FFh +typedef struct tagNVT_DPCD_RECEIVER_CAP +{ + NvU8 rev; // 00000h + NvU8 max_link_rate; // 00001h + NvU8 max_lane_count; // 00002h + NvU8 max_downspread; // 00003h + NvU8 norp; // 00004h + NvU8 downstream_port_present; // 00005h + NvU8 main_link_ch_coding; // 00006h + NvU8 down_stream_port_count; // 00007h + NvU8 receive_port0_cap_0; // 00008h + NvU8 receive_port0_cap_1; // 00009h + NvU8 receive_port1_cap_0; // 0000Ah + NvU8 receive_port1_cap_1; // 0000Bh + NvU8 reserved_0[0x7F - 0xC + 1]; // 0000Ch - 0007Fh + NvU8 down_strm_port0_cap[0x8F - 0x80 + 1]; // 00080h - 0008Fh + //NvU8 reserved_1[0xFF - 0x90 + 1]; // 00090h - 000FFh +}NVT_DPCD_RECEIVER_CAP; + +// +// Raw DPCD data format - Link Configuration Field // 00100h - 001FFh +typedef struct tagNVT_DPCD_LINK_CFG +{ + NvU8 link_bw_set; // 00100h + NvU8 lane_count_set; // 00101h + NvU8 training_pattern_set; // 00102h + NvU8 training_lane0_set; // 00103h + NvU8 training_lane1_set; // 00104h + NvU8 training_lane2_set; // 00105h + NvU8 training_lane3_set; // 00106h + NvU8 downspread_ctrl; // 00107h + NvU8 main_link_ch_coding_set; // 00108h + NvU8 reserved_0[0x110 - 0x109 + 1]; // 00110h - 00109h + NvU8 mstm_ctrl; // 00111h + // NvU8 reserved_0[0x1FF - 0x111 + 1]; +}NVT_DPCD_LINK_CFG; +// +// Raw DPCD data format - Link/Sink Status Field // 00200h - 002FFh +typedef struct tagNVT_DPCD_LINK_SINK_STATUS +{ + NvU8 sink_count; // 00200h + NvU8 device_service_irq_vector; // 00201h + NvU8 lane0_1_status; // 00202h + NvU8 lane2_3_status; // 00203h + NvU8 lane_align_status_update; // 00204h + NvU8 sink_status; // 00205h + NvU8 adjust_req_lane0_1; // 00206h + NvU8 adjust_req_lane2_3; // 00207h + NvU8 training_score_lane0; // 00208h + NvU8 training_score_lane1; // 00209h + NvU8 training_score_lane2; // 0020Ah + NvU8 training_score_lane3; // 0020Bh + NvU8 reserved_0[0x20F - 0x20C + 1]; // 0020Fh - 0020Ch + NvU16 sym_err_count_lane0; // 00210h - 00211h + NvU16 sym_err_count_lane1; // 00212h - 00213h + NvU16 sym_err_count_lane2; // 00214h - 00215h + NvU16 sym_err_count_lane3; // 00217h - 00216h + NvU8 test_req; // 00218h + NvU8 test_link_rate; // 00219h + NvU8 reserved_1[0x21F - 0x21A + 1]; // 0021Fh - 0021Ah + NvU8 test_lane_count; // 00220h + NvU8 test_pattern; // 00221h + NvU16 test_h_total; // 00222h - 00223h + NvU16 test_v_total; // 00224h - 00225h + NvU16 test_h_start; // 00226h - 00227h + NvU16 test_v_start; // 00228h - 00229h + NvU16 test_hsync; // 0022Ah - 0022Bh + NvU16 test_vsync; // 0022Ch - 0022Dh + NvU16 test_h_width; // 0022Eh - 0022Fh + NvU16 test_v_height; // 00230h - 00231h + NvU16 test_misc; // 00232h - 00233h + NvU8 test_refresh_rate_numerator; // 00234h + NvU8 reserved_2[0x23F - 0x235 + 1]; // 00235h - 0023Fh + NvU16 test_crc_R_Cr; // 00240h - 00241h + NvU16 test_crc_G_Y; // 00242h - 00243h + NvU16 test_crc_B_Cb; // 00244h - 00245h + NvU8 test_sink_misc; // 00246h + NvU8 reserved_3[0x25F - 0x247 + 1]; // 00247h - 0025fh + NvU8 test_response; // 00260h + NvU8 test_edid_checksum; // 00261h + NvU8 reserved_4[0x26F - 0x262 + 1]; // 00262h - 0026Fh + NvU8 test_sink; // 00270h + //NvU8 reserved_5[0x27F - 0x271 + 1]; // 00271h - 0027Fh + //NvU8 reserved_6[0x2FF - 0x280 + 1]; // 00280h - 002FFh +}NVT_DPCD_LINK_SINK_STATUS; + +#define NV_DPCD_DONGLE_NVIDIA_OUI 0x00044B + +// +// Raw DPCD data format - Vendor-Specific Field for Source Device // 00300h - 003FFh +// Raw DPCD data format - Vendor-Specific Field for Sink Device // 00400h - 004FFh +// Raw DPCD data format - Vendor-Specific Field for Branch Device // 00500h - 005FFh +typedef struct tagNVT_DPCD_VENDOR_SPECIFIC_FIELD +{ + NvU8 ieee_oui7_0; // 00300h + NvU8 ieee_oui15_8; // 00301h + NvU8 ieee_oui23_16; // 00302h + //NvU8 reserved[0x3FF - 0x303 + 1]; // 003FFh - 00303h +}NVT_DPCD_VENDOR_SPECIFIC_FIELD; +// +// Raw DPCD data format - Dongle Specific Field +typedef struct tagNVT_DPCD_DONGLE_SPECIFIC_FIELD +{ + NvU8 vendor_b0; // 00300h + NvU8 vendor_b1; // 00301h + NvU8 vendor_b2; // 00302h + NvU8 model[6]; // 00303h - 00308h + NvU8 chipIDVersion; // 00309h + //NvU8 reserved[0x3FF - 0x30A + 1]; // 0030Ah - 005FFh +}NVT_DPCD_DONGLE_SPECIFIC_FIELD; +// +// Raw DPCD data format - DualDP Specific Field +typedef struct tagNVT_DPCD_DUALDP_SPECIFIC_FIELD +{ + NvU8 vendor_b0; // 00300h + NvU8 vendor_b1; // 00301h + NvU8 vendor_b2; // 00302h + NvU8 model[6]; // 00303h - 00308h + NvU8 chipd_id_version; // 00309h + NvU8 reserved_1[0x3AF - 0x30A + 1]; // 0030Ah - 003AFh + NvU8 dual_dp_cap; // 003B0h + NvU8 dual_dp_base_addr[3]; // 003B1h - 003B3h + //NvU8 reserved_2[0x3FF - 0x3B4 + 1]; // 003B4h - 003FFh +}NVT_DPCD_DUALDP_SPECIFIC_FIELD; + +// +// Raw DPCD data format - Sink Control Field // 00600h - 006FFh +typedef struct tagNVT_DPCD_SINK_CTRL_FIELD +{ + NvU8 set_power; // 00600h + //NvU8 reserved[0x6FF - 0x601 + 1]; // 00601h - 006FFh +}NVT_DPCD_SINK_CTRL_FIELD; +// +// The entire DPCD data block +typedef struct tagNVT_DPCD +{ + NVT_DPCD_RECEIVER_CAP receiver_cap; + NVT_DPCD_LINK_CFG link_cfg; + NVT_DPCD_LINK_SINK_STATUS link_status; + NVT_DPCD_VENDOR_SPECIFIC_FIELD vsp_source_device; + NVT_DPCD_VENDOR_SPECIFIC_FIELD vsp_sink_device; + NVT_DPCD_VENDOR_SPECIFIC_FIELD vsp_branch_device; + NVT_DPCD_SINK_CTRL_FIELD sink_ctrl; +}NVT_DPCD; +// +// +// Parsed DPCD info +// +// +#define NVT_DPCD_REV_10 NVT_DPCD_DPCD_REV_10 // DPCD revision 1.0 +#define NVT_DPCD_REV_11 NVT_DPCD_DPCD_REV_11 // DPCD revision 1.1 +#define NVT_DPCD_REV_12 NVT_DPCD_DPCD_REV_12 // DPCD revision 1.2 +#define NVT_DPCD_RECEIVER_MAX_DOWNSTREAM_PORT 16 // the max downstream port possible per device +#define NVT_DPCD_RECEIVER_DOWNSTREAM_PORT_TYPE_DP NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_DISPLAYPORT // Display Port +#define NVT_DPCD_RECEIVER_DOWNSTREAM_PORT_TYPE_VGA NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_VGA // analog VGA or analog video over DVI-I +#define NVT_DPCD_RECEIVER_DOWNSTREAM_PORT_TYPE_DVI NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_DVI // DVI +#define NVT_DPCD_RECEIVER_DOWNSTREAM_PORT_TYPE_HDMI NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_HDMI // HDMI +#define NVT_DPCD_RECEIVER_DOWNSTREAM_PORT_TYPE_OTHERS NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_NO_EDID // the downstream port type will have no EDID in sink device such as Composite/SVideo. +#define NVT_DPCD_RECEIVER_DOWNSTREAM_PORT_TYPE_DP_PP NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_DISPLAYPORT_PP // Display Port++ +#define NVT_DPCD_LINK_RATE_1_62_GBPS NVT_DPCD_LINK_BW_SET_LINK_BW_SET_1_62GPBS_PER_LANE // 1.62Gbps per lane +#define NVT_DPCD_LINK_RATE_2_70_GBPS NVT_DPCD_LINK_BW_SET_LINK_BW_SET_2_70GPBS_PER_LANE // 2.70Gbps per lane +#define NVT_DPCD_LINK_RATE_5_40_GBPS NVT_DPCD_LINK_BW_SET_LINK_BW_SET_5_40GPBS_PER_LANE // 5.40Gbps per lane +#define NVT_DPCD_LINK_RATE_8_10_GBPS NVT_DPCD_LINK_BW_SET_LINK_BW_SET_8_10GPBS_PER_LANE // 5.40Gbps per lane +#define NVT_DPCD_LINK_RATE_FACTOR_IN_10KHZ_MBPS 2700 // e.g. NVT_DPCD_LINK_RATE_1_62_GBPS * 0.27Gbps per lane (in 10KHz) +#define NVT_DPCD_LANE_COUNT_1 NVT_DPCD_LANE_COUNT_SET_LANE_COUNT_SET_1_LANE +#define NVT_DPCD_LANE_COUNT_2 NVT_DPCD_LANE_COUNT_SET_LANE_COUNT_SET_2_LANES +#define NVT_DPCD_LANE_COUNT_4 NVT_DPCD_LANE_COUNT_SET_LANE_COUNT_SET_4_LANES +#define NVT_DPCD_LANE_COUNT_8 8 + +// note: the values of NVT_COLOR_FORMAT_* are fixed in order to match the equivalent NV classes +typedef enum _NVT_COLOR_FORMAT +{ + NVT_COLOR_FORMAT_RGB = 0, + NVT_COLOR_FORMAT_YCbCr422 = 1, + NVT_COLOR_FORMAT_YCbCr444 = 2, + NVT_COLOR_FORMAT_YCbCr420 = 3, + NVT_COLOR_FORMAT_Y = 4, + NVT_COLOR_FORMAT_RAW = 5, + NVT_COLOR_FORMAT_INVALID = 0xFF +} NVT_COLOR_FORMAT; + +typedef enum +{ + NVT_COLOR_RANGE_FULL = 0, + NVT_COLOR_RANGE_LIMITED = 1 +} NVT_COLOR_RANGE; + +// note: the values of NVT_COLORIMETRY_* are fixed in order to match the equivalent NV classes +typedef enum +{ + NVT_COLORIMETRY_RGB = 0, + NVT_COLORIMETRY_YUV_601 = 1, + NVT_COLORIMETRY_YUV_709 = 2, + NVT_COLORIMETRY_EXTENDED = 3, + NVT_COLORIMETRY_XVYCC_601 = 4, + NVT_COLORIMETRY_XVYCC_709 = 5, + NVT_COLORIMETRY_ADOBERGB = 6, + NVT_COLORIMETRY_BT2020cYCC = 7, + NVT_COLORIMETRY_BT2020YCC = 8, + NVT_COLORIMETRY_BT2020RGB = 9, + NVT_COLORIMETRY_INVALID = 0xFF +} NVT_COLORIMETRY; + +#define NVT_DPCD_BPC_DEFAULT 0x00 +#define NVT_DPCD_BPC_6 0x01 +#define NVT_DPCD_BPC_8 0x02 +#define NVT_DPCD_BPC_10 0x03 +#define NVT_DPCD_BPC_12 0x04 +#define NVT_DPCD_BPC_16 0x05 + +#define NVT_DPCD_AUTOMATED_TEST 0x02 +#define NVT_DPCD_CP_IRQ 0x04 + +#define NVT_DPCD_LANES_2_3_TRAINED 0x77 +#define NVT_DPCD_LANE_1_TRAINED 0x07 +#define NVT_DPCD_LANE_0_TRAINED 0x07 +#define NVT_DPCD_INTERLANE_ALIGN_DONE 0x1 + +#define NVT_DPCD_LANE_1_STATUS 7:4 +#define NVT_DPCD_LANE_0_STATUS 3:0 +#define NVT_DPCD_ADDRESS_LANE_STATUS 0x00202 + +#define NVT_DPCD_TEST_REQ_LINK_TRAINING 0x01 +#define NVT_DPCD_TEST_REQ_TEST_PATTERN 0x02 +#define NVT_DPCD_TEST_REQ_EDID_READ 0x04 +#define NVT_DPCD_TEST_REQ_PHY_TEST_PATTERN 0x08 + +#define NVT_DPCD_TEST_ACK 0x01 +#define NVT_DPCD_TEST_NAK 0x02 +#define NVT_DPCD_TEST_EDID_CHECKSUM_WRITE 0x04 + +#define NVT_DPCD_TEST_MISC_COLOR_FORMAT 2:1 +#define NVT_DPCD_TEST_MISC_DYNAMIC_RANGE 3:3 +#define NVT_DPCD_TEST_MISC_YCbCr_COEFFICIENT 4:4 +#define NVT_DPCD_TEST_MISC_BIT_DEPTH 7:5 + +#define NVT_DPCD_TEST_EDID_CHECKSUM_ADDRESS 0x261 +#define NVT_DPCD_TEST_RESPONSE_ADDRESS 0x260 +#define NVT_EDID_CHECKSUM_BYTE 127 + +#define NVT_DPCD_POWER_STATE_NORMAL 0x01 +#define NVT_DPCD_POWER_STATE_POWER_DOWN 0x02 + +// ******************* +// ** DPCD 1.1 Spec ** +// ******************* + +// 0x000h DPCD_REV +#define NVT_DPCD_DPCD_REV 0x000 +#define NVT_DPCD_DPCD_REV_MINOR_VER 3:0 +#define NVT_DPCD_DPCD_REV_MAJOR_VER 7:4 +#define NVT_DPCD_DPCD_REV_10 0x10 +#define NVT_DPCD_DPCD_REV_11 0x11 +#define NVT_DPCD_DPCD_REV_12 0x12 + +// 0x001h MAX_LINK_RATE +#define NVT_DPCD_MAX_LINK_RATE 0x001 +#define NVT_DPCD_MAX_LINK_RATE_MAX_LINK_RATE 7:0 +#define NVT_DPCD_MAX_LINK_RATE_MAX_LINK_RATE_1_62GPS_PER_LANE 0x06 +#define NVT_DPCD_MAX_LINK_RATE_MAX_LINK_RATE_2_70GPS_PER_LANE 0x0A +#define NVT_DPCD_MAX_LINK_RATE_MAX_LINK_RATE_5_40GPS_PER_LANE 0x14 +#define NVT_DPCD_MAX_LINK_RATE_MAX_LINK_RATE_8_10GPS_PER_LANE 0x1E + +// 0x002h - MAX_LANE_COUNT +#define NVT_DPCD_MAX_LANE_COUNT 0x002 +#define NVT_DPCD_MAX_LANE_COUNT_MAX_LANE_COUNT 4:0 +#define NVT_DPCD_MAX_LANE_COUNT_RSVD 6:5 +#define NVT_DPCD_MAX_LANE_COUNT_ENHANCED_FRAME_CAP 7:7 + +// 0x003h - MAX_DOWNSPREAD +#define NVT_DPCD_MAX_DOWNSPREAD 0x003 +#define NVT_DPCD_MAX_DOWNSPREAD_MAX_DOWNSPREAD 0:0 +#define NVT_DPCD_MAX_DOWNSPREAD_MAX_DOWNSPREAD_NO 0 +#define NVT_DPCD_MAX_DOWNSPREAD_MAX_DOWNSPREAD_YES 1 +#define NVT_DPCD_MAX_DOWNSPREAD_RSVD 5:1 +#define NVT_DPCD_MAX_DOWNSPREAD_NO_AUX_HANDSHAKE_LINK_TRAINING 6:6 +#define NVT_DPCD_MAX_DOWNSPREAD_RSVD_2 7:7 + +// 0x004h - NORP +#define NVT_DPCD_NORP 0x004 +#define NVT_DPCD_NORP_NUMBER_OF_RECEIVER_PORT_SUBTRACT_ONE 0:0 +#define NVT_DPCD_NORP_RSVD 7:1 + +// 0x005 - DOWNSTREAMPORT_PRESENT +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT 0x005 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_DWN_STRM_PORT_PRESENT 0:0 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_DWN_STRM_PORT_TYPE 2:1 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_DWN_STRM_PORT_TYPE_DISPLAYPORT 0 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_DWN_STRM_PORT_TYPE_VGA 1 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_DWN_STRM_PORT_TYPE_DVI_HDMI 2 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_DWN_STRM_PORT_TYPE_OTHERS 3 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_FORMAT_CONVERSION 3:3 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_RSVD 7:4 + +// 0x006 - MAIN_LINK_CHANNEL_CODING +#define NVT_DPCD_MAIN_LINK_CHANNEL_CODING 0x006 +#define NVT_DPCD_MAIN_LINK_CHANNEL_CODING_ANSI8B_10B 0:0 +#define NVT_DPCD_MAIN_LINK_CHANNEL_CODING_RSVD 7:1 + +// 0x007 - DOWN_STREAM_PORT_COUNT +#define NVT_DPCD_DOWN_STREAM_PORT_COUNT 0x007 +#define NVT_DPCD_DOWN_STREAM_PORT_COUNT_DWN_STRM_PORT_COUNT 3:0 +#define NVT_DPCD_DOWN_STREAM_PORT_COUNT_RSVD 6:4 +#define NVT_DPCD_DOWN_STREAM_PORT_COUNT_OUI_SUPPORT 7:7 +#define NVT_DPCD_DOWN_STREAM_PORT_COUNT_OUI_SUPPORT_YES 1 +#define NVT_DPCD_DOWN_STREAM_PORT_COUNT_OUI_SUPPORT_NO 0 + +// 0x008h - RECEIVE_PORT0_CAP_0 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0 0x008 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_RSVD 0:0 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_LOCAL_EDID_PRESENT 1:1 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_LOCAL_EDID_PRESENT_YES 1 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_LOCAL_EDID_PRESENT_NO 0 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_ASSOCIATED_TO_PRECEDING_PORT 2:2 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_ASSOCIATED_TO_PRECEDING_PORT_YES 1 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_ASSOCIATED_TO_PRECEDING_PORT_NO 0 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_RSVD_2 7:3 + +// 0x009h - RECEIVE_PORT0_CAP_1 +#define NVT_DPCD_RECEIVE_PORT0_CAP_1 0x009 +#define NVT_DPCD_RECEIVE_PORT0_CAP_1_BUFFER_SIZE 7:0 + +// 0x00Ah - RECEIVE_PORT1_CAP_0 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0 0x00A +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_RSVD 0:0 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_LOCAL_EDID_PRESENT 1:1 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_LOCAL_EDID_PRESENT_YES 1 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_LOCAL_EDID_PRESENT_NO 0 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_ASSOCIATED_TO_PRECEDING_PORT 2:2 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_ASSOCIATED_TO_PRECEDING_PORT_YES 1 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_ASSOCIATED_TO_PRECEDING_PORT_NO 0 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_RSVD_2 7:3 + +// 0x00Bh - RECEIVE_PORT1_CAP_1 +#define NVT_DPCD_RECEIVE_PORT1_CAP_1 0x00B +#define NVT_DPCD_RECEIVE_PORT1_CAP_1_BUFFER_SIZE 7:0 + +// 0x021h - MST_CAP +#define NVT_DPCD_MSTM_CAP 0x021 +#define NVT_DPCD_MSTM_CAP_MST_CAP 0:0 +#define NVT_DPCD_MSTM_CAP_MST_CAP_NO 0 +#define NVT_DPCD_MSTM_CAP_MST_CAP_YES 1 + +// 0x080h ~ 0x08Fh - DWN_STRM_PORT0_CAP +#define NVT_DPCD_DWN_STRM_PORT0_CAP 0x080 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE 2:0 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_DISPLAYPORT 0 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_VGA 1 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_DVI 2 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_HDMI 3 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_NO_EDID 4 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_DISPLAYPORT_PP 5 //Defined in Post DP 1.2 draft +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_HPD 3:3 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_HPD_AWARE_YES 1 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_HPD_AWARE_NO 0 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_RSVD 7:4 + +// 0x100h - LINK_BW_SET +#define NVT_DPCD_LINK_BW_SET 0x100 +#define NVT_DPCD_LINK_BW_SET_LINK_BW_SET 7:0 +#define NVT_DPCD_LINK_BW_SET_LINK_BW_SET_1_62GPBS_PER_LANE 0x06 +#define NVT_DPCD_LINK_BW_SET_LINK_BW_SET_2_70GPBS_PER_LANE 0x0A +#define NVT_DPCD_LINK_BW_SET_LINK_BW_SET_5_40GPBS_PER_LANE 0x14 +#define NVT_DPCD_LINK_BW_SET_LINK_BW_SET_8_10GPBS_PER_LANE 0x1E + +// 0x101h - LANE_COUNT_SET +#define NVT_DPCD_LANE_COUNT_SET 0x101 +#define NVT_DPCD_LANE_COUNT_SET_LANE_COUNT_SET 4:0 +#define NVT_DPCD_LANE_COUNT_SET_LANE_COUNT_SET_1_LANE 1 +#define NVT_DPCD_LANE_COUNT_SET_LANE_COUNT_SET_2_LANES 2 +#define NVT_DPCD_LANE_COUNT_SET_LANE_COUNT_SET_4_LANES 4 +#define NVT_DPCD_LANE_COUNT_SET_RSVD 6:5 +#define NVT_DPCD_LANE_COUNT_SET_ENHANCED_FRAME_EN 7:7 +#define NVT_DPCD_LANE_COUNT_SET_ENHANCED_FRAME_EN_YES 1 +#define NVT_DPCD_LANE_COUNT_SET_ENHANCED_FRAME_EN_NO 0 + +// 0x102h - TRAINING_PATTERN_SET +#define NVT_DPCD_TRAINING_PATTERN_SET 0x102 +#define NVT_DPCD_TRAINING_PATTERN_SET_TRAINING_PATTERN_SET 1:0 +#define NVT_DPCD_TRAINING_PATTERN_SET_TRAINING_PATTERN_SET_NOT_IN_PROGRESS 0 +#define NVT_DPCD_TRAINING_PATTERN_SET_TRAINING_PATTERN_SET_PATTERN_1 1 +#define NVT_DPCD_TRAINING_PATTERN_SET_TRAINING_PATTERN_SET_PATTERN_2 2 +#define NVT_DPCD_TRAINING_PATTERN_SET_TRAINING_PATTERN_SET_RSVD 3 +#define NVT_DPCD_TRAINING_PATTERN_SET_LINK_QUAL_PATTERN_SET 3:2 +#define NVT_DPCD_TRAINING_PATTERN_SET_LINK_QUAL_PATTERN_SET_NOT_TRANSMITTED 0 +#define NVT_DPCD_TRAINING_PATTERN_SET_LINK_QUAL_PATTERN_SET_D10_2 1 +#define NVT_DPCD_TRAINING_PATTERN_SET_LINK_QUAL_PATTERN_SET_SERMPT 2 +#define NVT_DPCD_TRAINING_PATTERN_SET_LINK_QUAL_PATTERN_SET_PRBS7 3 +#define NVT_DPCD_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN 4:4 +#define NVT_DPCD_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN_NO 0 +#define NVT_DPCD_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN_YES 1 +#define NVT_DPCD_TRAINING_PATTERN_SET_SCRAMBLING_DISABLE 5:5 +#define NVT_DPCD_TRAINING_PATTERN_SET_SCRAMBLING_DISABLE_NO 0 +#define NVT_DPCD_TRAINING_PATTERN_SET_SCRAMBLING_DISABLE_YES 1 +#define NVT_DPCD_TRAINING_PATTERN_SET_SYMBOL_ERROR_COUNT_SEL 7:6 +#define NVT_DPCD_TRAINING_PATTERN_SET_SYMBOL_ERROR_COUNT_SEL_DIS_ERROR 0 +#define NVT_DPCD_TRAINING_PATTERN_SET_SYMBOL_ERROR_COUNT_SEL_D_ERROR 1 +#define NVT_DPCD_TRAINING_PATTERN_SET_SYMBOL_ERROR_COUNT_SEL_IS_ERROR 2 +#define NVT_DPCD_TRAINING_PATTERN_SET_SYMBOL_ERROR_COUNT_SEL_RSVD 3 + +// 0x103h ~ 0x106h - TRAINING_LANE?_SET +#define NVT_DPCD_TRAINING_LANE0_SET 0x103 +#define NVT_DPCD_TRAINING_LANE1_SET 0x104 +#define NVT_DPCD_TRAINING_LANE2_SET 0x105 +#define NVT_DPCD_TRAINING_LANE3_SET 0x106 +#define NVT_DPCD_TRAINING_LANE0_SET_VOLTAGE_SWING_SET 1:0 +#define NVT_DPCD_TRAINING_LANE0_SET_VOLTAGE_SWING_SET_TP1_VS_L0 0 +#define NVT_DPCD_TRAINING_LANE0_SET_VOLTAGE_SWING_SET_TP1_VS_L1 1 +#define NVT_DPCD_TRAINING_LANE0_SET_VOLTAGE_SWING_SET_TP1_VS_L2 2 +#define NVT_DPCD_TRAINING_LANE0_SET_VOLTAGE_SWING_SET_TP1_VS_L3 3 +#define NVT_DPCD_TRAINING_LANE0_SET_MAX_SWING_REACHED 2:2 +#define NVT_DPCD_TRAINING_LANE0_SET_MAX_SWING_REACHED_NO 0 +#define NVT_DPCD_TRAINING_LANE0_SET_MAX_SWING_REACHED_YES 1 +#define NVT_DPCD_TRAINING_LANE0_SET_PRE_EMPHASIS_SET 4:3 +#define NVT_DPCD_TRAINING_LANE0_SET_PRE_EMPHASIS_SET_TP2_PE_NONE 0 +#define NVT_DPCD_TRAINING_LANE0_SET_PRE_EMPHASIS_SET_TP2_PE_L1 1 +#define NVT_DPCD_TRAINING_LANE0_SET_PRE_EMPHASIS_SET_TP2_PE_L2 2 +#define NVT_DPCD_TRAINING_LANE0_SET_PRE_EMPHASIS_SET_TP2_PE_L3 3 +#define NVT_DPCD_TRAINING_LANE0_SET_MAX_PRE_EMPHASIS_REACHED 5:5 +#define NVT_DPCD_TRAINING_LANE0_SET_MAX_PRE_EMPHASIS_REACHED_NO 0 +#define NVT_DPCD_TRAINING_LANE0_SET_MAX_PRE_EMPHASIS_REACHED_YES 1 +#define NVT_DPCD_TRAINING_LANE0_SET_RSVD 7:6 + +// 0x107h - DOWNSPREAD_CTRL +#define NVT_DPCD_DOWNSPREAD_CTRL 0x107 +#define NVT_DPCD_DOWNSPREAD_CTRL_RSVD 3:0 +#define NVT_DPCD_DOWNSPREAD_CTRL_SPREAD_AMP 4:4 +#define NVT_DPCD_DOWNSPREAD_CTRL_SPREAD_AMP_NO 0 +#define NVT_DPCD_DOWNSPREAD_CTRL_SPREAD_AMP_YES 1 +#define NVT_DPCD_DOWNSPREAD_CTRL_RSVD_2 7:5 + +// 0x108h - MAIN_LINK_CHANNEL_CODING_SET +#define NVT_DPCD_MAIN_LINK_CHANNEL_CODING_SET 0x108 +#define NVT_DPCD_MAIN_LINK_CHANNEL_CODING_SET_SET_ANSI8B10B 0:0 +#define NVT_DPCD_MAIN_LINK_CHANNEL_CODING_SET_RSVD 7:1 + +// 0x111h - MSTM_CTRL +#define NVT_DPCD_MSTM_CTRL 0x111 +#define NVT_DPCD_MSTM_CTRL_MST_EN 0:0 +#define NVT_DPCD_MSTM_CTRL_MST_EN_NO 0 +#define NVT_DPCD_MSTM_CTRL_MST_EN_YES 1 +#define NVT_DPCD_MSTM_CTRL_UP_REQ_EN 1:1 +#define NVT_DPCD_MSTM_CTRL_UP_REQ_EN_NO 0 +#define NVT_DPCD_MSTM_CTRL_UP_REQ_EN_YES 1 +#define NVT_DPCD_MSTM_CTRL_UPSTREAM_IS_SRC 2:2 +#define NVT_DPCD_MSTM_CTRL_UPSTREAM_IS_SRC_NO 0 +#define NVT_DPCD_MSTM_CTRL_UPSTREAM_IS_SRC_YES 1 +#define NVT_DPCD_MSTM_CTRL_MST_RSVD 7:3 + +// 0x1A1h - BRANCH_DEVICE_CTRL +#define NVT_DPCD_BRANCH_DEVICE_CTRL 0x1A1 +#define NVT_DPCD_BRANCH_DEVICE_CTRL_HPD_NOTIF_TYPE 0:0 +#define NVT_DPCD_BRANCH_DEVICE_CTRL_HPD_NOTIF_TYPE_LONG_HPD_PULSE 0 +#define NVT_DPCD_BRANCH_DEVICE_CTRL_HPD_NOTIF_TYPE_SHORT_IRQ_PULSE 1 +#define NVT_DPCD_BRANCH_DEVICE_CTRL_RSVD 7:1 + +#define NVT_DPCD_PAYLOAD_ALLOCATE_SET 0x1C0 +#define NVT_DPCD_PAYLOAD_ALLOCATE_SET_VC_ID 6:0 + +#define NVT_DPCD_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1C1 +#define NVT_DPCD_PAYLOAD_ALLOCATE_START_TIME_SLOT_FIELD 5:0 + +#define NVT_DPCD_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1C2 +#define NVT_DPCD_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT_FIELD 5:0 + +// 0x200h - SINK_COUNT +#define NVT_DPCD_SINK_COUNT 0x200 +#define NVT_DPCD_SINK_COUNT_SINK_COUNT 5:0 +#define NVT_DPCD_SINK_COUNT_CP_READY 6:6 +#define NVT_DPCD_SINK_COUNT_RSVD 7:7 + +// 0x201h - DEVICE_SERVICE_IRQ_VECTOR +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR 0x201 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_RSVD_REMOTE_CTRL_CMD_PENDING 0:0 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_AUTOMATED_TEST_REQUEST 1:1 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_CP_IRQ 2:2 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_MCCS_IRQ 3:3 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_DOWN_REP_MSG_READY 4:4 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_UP_REQ_MSG_READY 5:5 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_SINK_SPECIFIC_IRQ 6:6 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_RSVD_2 7:7 + +// 0x202h ~ 0x203h - LANE0_1_STATUS +#define NVT_DPCD_LANE0_1_STATUS 0x202 +#define NVT_DPCD_LANE2_3_STATUS 0x203 +#define NVT_DPCD_LANE0_1_STATUS_LANE0_CR_DONE 0:0 +#define NVT_DPCD_LANE0_1_STATUS_LANE0_CHANNEL_EQ_DONE 1:1 +#define NVT_DPCD_LANE0_1_STATUS_LANE0_SYMBOL_LOCKED 2:2 +#define NVT_DPCD_LANE0_1_STATUS_RSVD 3:3 +#define NVT_DPCD_LANE0_1_STATUS_LANE1_CR_DONE 4:4 +#define NVT_DPCD_LANE0_1_STATUS_LANE1_CHANNEL_EQ_DONE 5:5 +#define NVT_DPCD_LANE0_1_STATUS_LANE1_SYMBOL_LOCKED 6:6 +#define NVT_DPCD_LANE0_1_STATUS_RSVD_2 7:7 + +// 0x204h - LANE_ALIGN_STATUS_UPDATED +// Temporary until Linux/Apple change their code. +#define NVT_DPCD_LANE_ALIGN_STAUTS_UPDATED NVT_DPCD_LANE_ALIGN_STATUS_UPDATED +#define NVT_DPCD_LANE_ALIGN_STATUS_UPDATED 0x204 +#define NVT_DPCD_LANE_ALIGN_STATUS_UPDATED_INTERLANE_ALIGN_DONE 0:0 +#define NVT_DPCD_LANE_ALIGN_STATUS_UPDATED_RSVD 5:1 +#define NVT_DPCD_LANE_ALIGN_STATUS_UPDATED_DOWNSTREAM_PORT_STATUS_CHANGED 6:6 +#define NVT_DPCD_LANE_ALIGN_STATUS_UPDATED_LINK_STATUS_UPDATED 7:7 + +// 0x205 - SINK_STATUS +#define NVT_DPCD_SINK_STATUS 0x205 +#define NVT_DPCD_SINK_STATUS_RECEIVE_PORT_0_STATUS 0:0 +#define NVT_DPCD_SINK_STATUS_RECEIVE_PORT_0_STATUS_OUT_OF_SYNC 0 +#define NVT_DPCD_SINK_STATUS_RECEIVE_PORT_0_STATUS_IN_SYNC 1 +#define NVT_DPCD_SINK_STATUS_RECEIVE_PORT_1_STATUS 1:1 +#define NVT_DPCD_SINK_STATUS_RECEIVE_PORT_1_STATUS_OUT_OF_SYNC 0 +#define NVT_DPCD_SINK_STATUS_RECEIVE_PORT_1_STATUS_IN_SYNC 1 +#define NVT_DPCD_SINK_STATUS_RSVD 7:2 + +// 0x206h ~ 0x207h - ADJUST_REQUEST_LANE0_1 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1 0x206 +#define NVT_DPCD_ADJUST_REQUEST_LANE2_3 0x207 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE0 1:0 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE0_LEVEL_0 0 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE0_LEVEL_1 1 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE0_LEVEL_2 2 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE0_LEVEL_3 3 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE0 3:2 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE0_LEVEL_0 0 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE0_LEVEL_1 1 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE0_LEVEL_2 2 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE0_LEVEL_3 3 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE1 5:4 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE1_LEVEL_0 0 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE1_LEVEL_1 1 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE1_LEVEL_2 2 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE1_LEVEL_3 3 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE1 7:6 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE1_LEVEL_0 0 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE1_LEVEL_1 1 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE1_LEVEL_2 2 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE1_LEVEL_3 3 + +// 0x208h ~ 0x20Bh TRAINING_SCORE_LANE0~3 +#define NVT_DPCD_TRAINING_SCORE_LANE0 0x208 +#define NVT_DPCD_TRAINING_SCORE_LANE1 0x209 +#define NVT_DPCD_TRAINING_SCORE_LANE2 0x20A +#define NVT_DPCD_TRAINING_SCORE_LANE3 0x20B + +// 0x210h ~ 0x217h SYMBOL_ERROR_COUNT_LANE0 (16bit) +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE0_LO 0x210 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE0_HI 0x211 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE1_LO 0x212 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE1_HI 0x213 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE2_LO 0x214 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE2_HI 0x215 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE3_LO 0x216 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE3_HI 0x217 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE0_ERROR_COUNT_LO 7:0 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE0_ERROR_COUNT_HI 6:0 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE0_ERROR_COUNT_VALID 7:7 + +// 0x218h TEST_REQUEST +#define NVT_DPCD_TEST_REQUEST 0x218 +#define NVT_DPCD_TEST_REQUEST_TEST_LINK_TRAINING 0:0 +#define NVT_DPCD_TEST_REQUEST_TEST_PATTERN 1:1 +#define NVT_DPCD_TEST_REQUEST_TEST_EDID_READ 2:2 +#define NVT_DPCD_TEST_REQUEST_PHY_TEST_PATTERN 3:3 +#define NVT_DPCD_TEST_REQUEST_RSVD 7:4 + +// 0x219h TEST_LINK_RATE +#define NVT_DPCD_TEST_LINK_RATE 0x219 + +// 0x220h TEST_LANE_COUNT +#define NVT_DPCD_TEST_LANE_COUNT 0x220 +#define NVT_DPCD_TEST_LANE_COUNT_TEST_LANE_COUNT 4:0 +#define NVT_DPCD_TEST_LANE_COUNT_TEST_LANE_COUNT_ONE_LANE 1 +#define NVT_DPCD_TEST_LANE_COUNT_TEST_LANE_COUNT_TWO_LANES 2 +#define NVT_DPCD_TEST_LANE_COUNT_TEST_LANE_COUNT_FOUR_LANES 4 +#define NVT_DPCD_TEST_LANE_COUNT_RSVD 7:5 + +// 0x221h TEST_PATTERN +#define NVT_DPCD_TEST_PATTERN 0x221 +#define NVT_DPCD_TEST_PATTERN_NO_TEST_PATTERN_TRANSMITTED 0 +#define NVT_DPCD_TEST_PATTERN_COLOR_RAMPS 1 +#define NVT_DPCD_TEST_PATTERN_BLACK_AND_WHITE_VERTICAL_LINES 2 +#define NVT_DPCD_TEST_PATTERN_COLOR_SQUARE 3 + +// 0x222h ~ 0x223h TEST_H_TOTAL +#define NVT_DPCD_TEST_H_TOTAL_HI 0x222 +#define NVT_DPCD_TEST_H_TOTAL_LO 0x223 + +// 0x224h ~ 0x225h TEST_V_TOTAL +#define NVT_DPCD_TEST_V_TOTAL_HI 0x224 +#define NVT_DPCD_TEST_V_TOTAL_LO 0x225 + +// 0x226h ~ 0x227h TEST_H_START +#define NVT_DPCD_TEST_H_START_HI 0x226 +#define NVT_DPCD_TEST_H_START_LO 0x227 + +// 0x228h ~ 0x229h TEST_V_START +#define NVT_DPCD_TEST_V_START_HI 0x228 +#define NVT_DPCD_TEST_V_START_LO 0x229 + +// 0x22Ah ~ 0x22Bh TEST_HSYNC +#define NVT_DPCD_TEST_HSYNC_HI 0x22A +#define NVT_DPCD_TEST_HSYNC_LO 0x22B +#define NVT_DPCD_TEST_HSYNC_HI_TEST_HSYNC_WIDTH_14_8 6:0 +#define NVT_DPCD_TEST_HSYNC_HI_TEST_HSYNC_POLARITY 7:7 + +// 0x22Ch ~ 0x22Dh TEST_VSYNC +#define NVT_DPCD_TEST_VSYNC_HI 0x22C +#define NVT_DPCD_TEST_VSYNC_LO 0x22D +#define NVT_DPCD_TEST_VSYNC_HI_TEST_VSYNC_WIDTH_14_8 6:0 +#define NVT_DPCD_TEST_VSYNC_HI_TEST_VSYNC_POLARITY 7:7 + +// 0x22Eh ~ 0x22Fh TEST_H_WIDTH +#define NVT_DPCD_TEST_H_WIDTH_HI 0x22E +#define NVT_DPCD_TEST_H_WIDTH_LO 0x22F + +// 0x230h ~ 0x231h TEST_V_WIDTH +#define NVT_DPCD_TEST_V_HEIGHT_HI 0x230 +#define NVT_DPCD_TEST_V_HEIGHT_LO 0x231 + +// 0x232h ~ 0x233h TEST_MISC +#define NVT_DPCD_TEST_MISC_LO 0x232 +#define NVT_DPCD_TEST_MISC_LO_TEST_SYNCHRONOUS_CLOCK 0:0 +#define NVT_DPCD_TEST_MISC_LO_TEST_SYNCHRONOUS_CLOCK_ASYNC 0 +#define NVT_DPCD_TEST_MISC_LO_TEST_SYNCHRONOUS_CLOCK_SYNC 1 +#define NVT_DPCD_TEST_MISC_LO_TEST_COLOR_FORMAT 2:1 +#define NVT_DPCD_TEST_MISC_LO_TEST_COLOR_FORMAT_RGB 0 +#define NVT_DPCD_TEST_MISC_LO_TEST_COLOR_FORMAT_YCbCr422 1 +#define NVT_DPCD_TEST_MISC_LO_TEST_COLOR_FORMAT_YCbCr444 2 +#define NVT_DPCD_TEST_MISC_LO_TEST_COLOR_FORMAT_RSVD 3 +#define NVT_DPCD_TEST_MISC_LO_TEST_DYNAMIC_RANGE 3:3 +#define NVT_DPCD_TEST_MISC_LO_TEST_DYNAMIC_RANGE_VESA 0 +#define NVT_DPCD_TEST_MISC_LO_TEST_DYNAMIC_RANGE_CEA 1 +#define NVT_DPCD_TEST_MISC_LO_TEST_YCBCR_COEFFICIENTS 4:4 +#define NVT_DPCD_TEST_MISC_LO_TEST_YCBCR_COEFFICIENTS_ITU601 0 +#define NVT_DPCD_TEST_MISC_LO_TEST_YCBCR_COEFFICIENTS_ITU709 1 +#define NVT_DPCD_TEST_MISC_LO_TEST_BIT_DEPTH 7:5 +#define NVT_DPCD_TEST_MISC_LO_TEST_BIT_DEPTH_6BPC 0 +#define NVT_DPCD_TEST_MISC_LO_TEST_BIT_DEPTH_8BPC 1 +#define NVT_DPCD_TEST_MISC_LO_TEST_BIT_DEPTH_10BPC 2 +#define NVT_DPCD_TEST_MISC_LO_TEST_BIT_DEPTH_12BPC 3 +#define NVT_DPCD_TEST_MISC_LO_TEST_BIT_DEPTH_16BPC 4 +#define NVT_DPCD_TEST_MISC_HI 0x233 +#define NVT_DPCD_TEST_MISC_HI_TEST_REFRESH_DENOMINATOR 0:0 +#define NVT_DPCD_TEST_MISC_HI_TEST_REFRESH_DENOMINATOR_1 0 +#define NVT_DPCD_TEST_MISC_HI_TEST_REFRESH_DENOMINATOR_1001 1 +#define NVT_DPCD_TEST_MISC_HI_TEST_INTERLACED 1:1 +#define NVT_DPCD_TEST_MISC_HI_TEST_INTERLACED_NO 0 +#define NVT_DPCD_TEST_MISC_HI_TEST_INTERLACED_YES 1 +#define NVT_DPCD_TEST_MISC_HI_TEST_INTERLACED_RSVD 7:2 + +// 0x234h TEST_REFRESH_RATE_NUMERATOR +#define NVT_DPCD_TEST_REFRESH_RATE_NUMERATOR 0x234 + +// 0x240h ~ 0x241h TEST_CRC_R_Cr +#define NVT_DPCD_TEST_CRC_R_Cr_LO 0x240 +#define NVT_DPCD_TEST_CRC_R_Cr_HI 0x241 + +// 0x242h ~ 0x243h TEST_CRC_G_Y +#define NVT_DPCD_TEST_CRC_G_Y_LO 0x242 +#define NVT_DPCD_TEST_CRC_G_Y_HI 0x243 + +// 0x244h ~ 0x245h TEST_CRC_B_Cb +#define NVT_DPCD_TEST_CRC_B_Cb_LO 0x244 +#define NVT_DPCD_TEST_CRC_B_Cb_HI 0x245 + +// 0x246h TEST_SINC_MISC +#define NVT_DPCD_TEST_SINK_MISC 0x246 +#define NVT_DPCD_TEST_SINK_MISC_TEST_CRC_COUNT 3:0 +#define NVT_DPCD_TEST_SINK_MISC_TEST_CRC_SUPPORTED 5:5 +#define NVT_DPCD_TEST_SINK_MISC_TEST_CRC_SUPPORTED_NO 0 +#define NVT_DPCD_TEST_SINK_MISC_TEST_CRC_SUPPORTED_YES 1 +#define NVT_DPCD_TEST_SINK_MISC_RSVD 7:6 + +// 0x248h PHY_TEST_PATTERN +#define NVT_DPCD_PHY_TEST_PATTERN 0x248 +#define NVT_DPCD_PHY_TEST_PATTERN_PHY_TEST_PATTERN_SEL 1:0 +#define NVT_DPCD_PHY_TEST_PATTERN_PHY_TEST_PATTERN_SEL_NO_TEST_PATTERN 0 +#define NVT_DPCD_PHY_TEST_PATTERN_PHY_TEST_PATTERN_SEL_D10_2 1 +#define NVT_DPCD_PHY_TEST_PATTERN_PHY_TEST_PATTERN_SEL_SEMC 2 +#define NVT_DPCD_PHY_TEST_PATTERN_PHY_TEST_PATTERN_SEL_PRBS7 3 +#define NVT_DPCD_PHY_TEST_PATTERN_RSVD 7:2 + +// 0x260h TEST_RESPONSE +#define NVT_DPCD_TEST_RESPONSE 0x260 +#define NVT_DPCD_TEST_RESPONSE_TEST_ACK 0:0 +#define NVT_DPCD_TEST_RESPONSE_TEST_ACK_KEEP_TEST_REQ 0 +#define NVT_DPCD_TEST_RESPONSE_TEST_ACK_CLEAR_TEST_REQ 1 +#define NVT_DPCD_TEST_RESPONSE_TEST_NAK 1:1 +#define NVT_DPCD_TEST_RESPONSE_TEST_NACK_KEEP_TEST_REQ 0 +#define NVT_DPCD_TEST_RESPONSE_TEST_NACK_CLEAR_TEST_REQ 1 +#define NVT_DPCD_TEST_RESPONSE_TEST_EDID_CHECKSUM_WRITE 2:2 +#define NVT_DPCD_TEST_RESPONSE_TEST_EDID_CHECKSUM_WRITE_NO 0 +#define NVT_DPCD_TEST_RESPONSE_TEST_EDID_CHECKSUM_WRITE_YES 1 +#define NVT_DPCD_TEST_RESPONSE_RSVD 7:3 + +// 0x261h TEST_EDID_CHECKSUM +#define NVT_DPCD_TEST_EDID_CHECKSUM 0x261 + +// 0x270 TEST_SINK +#define NVT_DPCD_TEST_SINK 0x270 +#define NVT_DPCD_TEST_SINK_TEST_SINK_START 0:0 +#define NVT_DPCD_TEST_SINK_TEST_SINK_START_STOP_CALC_CRC 0 +#define NVT_DPCD_TEST_SINK_TEST_SINK_START_START_CALC_CRC 1 +#define NVT_DPCD_TEST_SINK_RSVD 7:1 + +#define NVT_DPCD_PAYLOAD_TABLE_UPDATE_STATUS 0x2C0 +#define NVT_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_TABLE_UPDATED 0:0 +#define NVT_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_TABLE_UPDATED_NO 0 +#define NVT_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_TABLE_UPDATED_YES 1 +#define NVT_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_ACT_HANDLED 1:1 +#define NVT_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_ACT_HANDLED_NO 0 +#define NVT_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_ACT_HANDLED_YES 1 + +// 0x300h ~ 0x302h SOURCE_IEEE_OUT +#define NVT_DPCD_SOURCE_IEEE_OUT_7_0 0x300 +#define NVT_DPCD_SOURCE_IEEE_OUT_15_8 0x301 +#define NVT_DPCD_SOURCE_IEEE_OUT_23_16 0x302 + +// 0x400h ~ 0x402h SINK_IEEE_OUT +#define NVT_DPCD_SINK_IEEE_OUT_7_0 0x400 +#define NVT_DPCD_SINK_IEEE_OUT_15_8 0x401 +#define NVT_DPCD_SINK_IEEE_OUT_23_16 0x402 + +// 0x500h ~ 0x502h BRANCH_IEEE_OUT +#define NVT_DPCD_BRANCH_IEEE_OUT_7_0 0x500 +#define NVT_DPCD_BRANCH_IEEE_OUT_15_8 0x501 +#define NVT_DPCD_BRANCH_IEEE_OUT_23_16 0x502 + +// 0x600 SET_POWER +#define NVT_DPCD_SET_POWER 0x600 +#define NVT_DPCD_SET_POWER_SET_POWER_STATE 1:0 +#define NVT_DPCD_SET_POWER_SET_POWER_STATE_RSVD 0 +#define NVT_DPCD_SET_POWER_SET_POWER_STATE_D0 1 +#define NVT_DPCD_SET_POWER_SET_POWER_STATE_D3 2 +#define NVT_DPCD_SET_POWER_SET_POWER_STATE_RSVD_2 3 +#define NVT_DPCD_SET_POWER_RSVD 7:2 + +//************************************* +// DP 1.2 Main Stream Attribute Fiedls +//************************************* + +#define NVT_DP_INFOFRAME_MSA_MISC0_SYNC_CLOCK_MASK 0x01 // MISC0 bit 0 Synchronous Clock +#define NVT_DP_INFOFRAME_MSA_MISC0_SYNC_CLOCK_SHIFT 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC0_SYNC_CLOCK_ASYNC 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC0_SYNC_CLOCK_INSYNC 0x1 + +#define NVT_DP_INFOFRAME_MSA_MISC0_BITS_PER_COLOR_MASK 0xe0 // MISC0 bits 7:5 number of bits per color +#define NVT_DP_INFOFRAME_MSA_MISC0_BITS_PER_COLOR_SHIFT 0x5 +#define NVT_DP_INFOFRAME_MSA_MISC0_BITS_PER_COLOR_6 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC0_BITS_PER_COLOR_8 0x1 +#define NVT_DP_INFOFRAME_MSA_MISC0_BITS_PER_COLOR_10 0x2 +#define NVT_DP_INFOFRAME_MSA_MISC0_BITS_PER_COLOR_12 0x3 +#define NVT_DP_INFOFRAME_MSA_MISC0_BITS_PER_COLOR_16 0x4 + +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_MASK 0x1e // MISC0 bits 4:1 Color Encoding Format and Content Color Gamut +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_SHIFT 0x1 +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_LEGACY 0x0 // RGB unspecified color space (legacy RGB mode) +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_CEA_RGB 0x4 // CEA RGB (sRGB primaries) +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_WIDE_GAMUT_FIXED_POINT 0x3 // RGB wide gamut fixed point (XR8,XR10, XR12) +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_WIDE_GAMUT_FLOAT_POINT 0xb // RGB wide gamut floating point(scRGB) +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_YCBCR_422_ITU601 0x5 +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_YCBCR_422_ITU709 0xd +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_YCBCR_444_ITU601 0x6 +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_YCBCR_444_ITU709 0xe +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_XVYCC_422_ITU601 0x1 +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_XVYCC_422_ITU709 0x9 +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_XVYCC_444_ITU601 0x2 +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_XVYCC_444_ITU709 0xa +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_ADOBE_RGB 0xc + +#define NVT_DP_INFOFRAME_MSA_MISC1_INTERLACED_V_TOTAL_MASK 0x01 // MISC1 bit 0 Interlaced Vertical Total +#define NVT_DP_INFOFRAME_MSA_MISC1_INTERLACED_V_TOTAL_SHIFT 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC1_INTERLACED_V_TOTAL_ODD 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC1_INTERLACED_V_TOTAL_EVEN 0x1 + +#define NVT_DP_INFOFRAME_MSA_MISC1_STEREO_MASK 0x06 // MISC1 bits 2:1 stereo video attribute +#define NVT_DP_INFOFRAME_MSA_MISC1_STEREO_SHIFT 0x1 +#define NVT_DP_INFOFRAME_MSA_MISC1_STEREO_NONE 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC1_STEREO_RIGHT_LEFT 0x1 +#define NVT_DP_INFOFRAME_MSA_MISC1_STEREO_LEFT_RIGHT 0x3 +#define NVT_DP_INFOFRAME_MSA_MISC1_STEREO_RESERVED 0x2 + +#define NVT_DP_INFOFRAME_MSA_MISC1_RESERVED_MASK 0x38 // MISC1 bits 5:3 reserved (DP1.3). Note: DP1.2 MISC 6:3 is reserved and undefined. +#define NVT_DP_INFOFRAME_MSA_MISC1_RESERVED_SHIFT 0x3 +#define NVT_DP_INFOFRAME_MSA_MISC1_RESERVED_DEFAULT 0x0 + +#define NVT_DP_INFOFRAME_MSA_MISC1_VSC_SDP_MASK 0x40 // MISC1 bit Using VSC SDP, and sink to ignore MISC1 bit 7 and MISC0 7:1. +#define NVT_DP_INFOFRAME_MSA_MISC1_VSC_SDP_SHIFT 0x6 +#define NVT_DP_INFOFRAME_MSA_MISC1_VSC_SDP_DISABLE 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC1_VSC_SDP_ENABLE 0x1 + +#define NVT_DP_INFOFRAME_MSA_MISC1_BITS_PER_COLOR_OR_LUMINANCE_MASK 0x80 // MISC1 bit 7 Y-Only Video +#define NVT_DP_INFOFRAME_MSA_MISC1_BITS_PER_COLOR_OR_LUMINANCE_SHIFT 0x7 +#define NVT_DP_INFOFRAME_MSA_MISC1_BITS_PER_COLOR 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC1_BITS_PER_LUMINANCE 0x1 + +// ************************ +// ** HDCP DPCD 1.0 Spec ** +// ************************ + +// 0x68029 BSTATUS +#define NVT_DPCD_HDCP_BSTATUS 0x68029 +#define NVT_DPCD_HDCP_BSTATUS_LINK_INTEGRITY_FAILURE 0x04 +#define NVT_DPCD_HDCP_BSTATUS_REAUTHENTICATION_REQUEST 0x08 + +#define NVT_DPCD_HDCP_BCAPS_OFFSET 0x00068028 +#define NVT_DPCD_HDCP_BCAPS_OFFSET_HDCP_CAPABLE 0:0 +#define NVT_DPCD_HDCP_BCAPS_OFFSET_HDCP_CAPABLE_NO 0x00000000 +#define NVT_DPCD_HDCP_BCAPS_OFFSET_HDCP_CAPABLE_YES 0x00000001 +#define NVT_DPCD_HDCP_BCAPS_OFFSET_HDCP_REPEATER 1:1 +#define NVT_DPCD_HDCP_BCAPS_OFFSET_HDCP_REPEATER_NO 0x00000000 +#define NVT_DPCD_HDCP_BCAPS_OFFSET_HDCP_REPEATER_YES 0x00000001 + +#define NVT_DPCD_HDCP_BKSV_OFFSET 0x00068000 +#define HDCP_KSV_SIZE 5 + + +// ********************************************* +// ** Vendor DPCD for Apple's mDP->VGA dongle ** +// ********************************************* + +// 0x30F DP2VGA_I2C_SPEED_CONTROL +#define NVT_DPCD_DP2VGA_I2C_SPEED_CONTROL 0x30F + +// 0x50C DP2VGA_GENERAL_STATUS +#define NVT_DPCD_DP2VGA_GENERAL_STATUS 0x50C + +// 0x50D DP2VGA_I2C_SPEED_CAP +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP 0x50D +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP_SLOWEST 0xFF +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP_1KBPS 0x01 +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP_3KBPS 0x02 +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP_10KBPS 0x04 +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP_100KBPS 0x08 +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP_400KBPS 0x10 +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP_1MBPS 0x20 + + +// +// HDMI/DP common definitions + +#define NVT_DYNAMIC_RANGE_VESA 0x00 +#define NVT_DYNAMIC_RANGE_CEA 0x01 +#define NVT_DYNAMIC_RANGE_AUTO 0xFF + + +typedef struct tagNVT_PARSED_DPCD_INFO_DOWNSTREAM_PORT +{ + NvU8 type : 3; // the downstream port type + NvU8 isHpdAware : 1; // if it's HPD aware + NvU8 reserved : 4; +}NVT_PARSED_DPCD_INFO_DOWNSTREAM_PORT; +// +typedef struct tagNVT_DPCD_PARSED_RECEIVER_INFO +{ + // receiver info + NvU32 rev; // DPCD version number + NvU32 maxLinkRate; // the max link rate of main link lanes in 10KHz + NvU32 maxLaneCount; // the max number of lanes + NvU32 numOfPorts; // the number of receiver ports + NvU32 p0BufferSizePerLane; // the buffer size per lane (in BYTE) + NvU32 p1BufferSizePerLane; // the buffer size per lane (in BYTE) + + // downstream port info + NvU32 downstreamPortCount; // the total number of down stream ports + NvU32 downstreamPort0Type; // type of downstream port 0 + NVT_PARSED_DPCD_INFO_DOWNSTREAM_PORT downstreamPort[NVT_DPCD_RECEIVER_MAX_DOWNSTREAM_PORT]; + + // other misc info + NvU32 cap_support0_005DownSpread : 1; + NvU32 cap_supportEnhancedFrame : 1; + NvU32 cap_noAuxHandshakeLinkTraining : 1; + NvU32 cap_downstreamPortHasFormatConvBlk : 1; + NvU32 cap_mainLinkChSupportANSI8B10B : 1; + NvU32 cap_downstreamPortSupportOUI : 1; + NvU32 cap_p0HasEDID : 1; + NvU32 cap_p0AssociatedToPrecedingPort : 1; + NvU32 cap_p1HasEDID : 1; + NvU32 cap_p1AssociatedToPrecedingPort : 1; + + // DP 1.2 fields + NvU32 cap_mstm : 1; + NvU32 cap_reserved : 21; +}NVT_DPCD_PARSED_RECEIVER_INFO; + +#define NVT_DPCD_NUM_TRAINING_LANES 4 + +typedef struct tagNVT_TRAINING_LANE_SETTING +{ + NvU8 voltageSwing; + NvU8 maxSwingReached; + NvU8 preEmphasis; + NvU8 maxPreEmphasisReached; +}NVT_TRAINING_LANE_SETTING; + +// 00100h LINK CONFIGURATION FIELD +typedef struct tagNVT_DPCD_PARSED_LINK_CONFIG +{ + NvU8 linkRate; + NvU8 laneCount; + + NVT_TRAINING_LANE_SETTING trainLaneSetting[NVT_DPCD_NUM_TRAINING_LANES]; + + NvU32 enhancedFrameEnabled : 1; + NvU32 trainingPatternSetting : 2; + NvU32 linkQualityPatternSetting : 2; + NvU32 recoveredClockOutputEnabled : 1; + NvU32 scramblingDisable : 1; + NvU32 symbolErrorCount : 2; + NvU32 spreadAmp : 1; + NvU32 mainLinkCoding8b10b : 1; + NvU32 multiStreamEnabled : 1; + NvU32 reserved : 19; +}NVT_DPCD_PARSED_LINK_CONFIG; + +typedef struct tagNVT_DPCD_INFO +{ + NVT_DPCD_PARSED_RECEIVER_INFO receiver; + NVT_DPCD_PARSED_LINK_CONFIG linkConfig; + NvU32 sourceOUI; + NvU32 sinkOUI; + NvU32 branchOUI; +}NVT_DPCD_INFO; + +typedef struct tagNVT_DPCD_CONFIG +{ + NvU32 dpInfoFlags; +#define NV_DISPLAYPORT_INFO_FLAGS_DP_ENABLED 0:0 +#define NV_DISPLAYPORT_INFO_FLAGS_DP_ENABLED_FALSE (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_ENABLED_TRUE (0x00000001) +#define NV_DISPLAYPORT_INFO_FLAGS_DONGLE_TYPE 7:4 +#define NV_DISPLAYPORT_INFO_FLAGS_DONGLE_TYPE_NONE (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DONGLE_TYPE_DP2DVI (0x00000001) // B2: dp2dvi-singlelink +#define NV_DISPLAYPORT_INFO_FLAGS_DONGLE_TYPE_DP2HDMI (0x00000002) // dp2hdmi +#define NV_DISPLAYPORT_INFO_FLAGS_DONGLE_TYPE_DP2DVI2 (0x00000003) // B3: dp2dvi-duallink +#define NV_DISPLAYPORT_INFO_FLAGS_DONGLE_TYPE_DP2VGA (0x00000004) // B4: dp2vga +#define NV_DISPLAYPORT_INFO_FLAGS_DONGLE_TYPE_DP2TV (0x00000005) // Composite/SVideo +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LANECOUNT 10:8 // Maximum supported laneCount +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LANECOUNT_1_LANE (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LANECOUNT_2_LANE (0x00000001) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LANECOUNT_4_LANE (0x00000002) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LINKRATE 13:11 // Maximum supported linkRate +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LINKRATE_1_62GBPS (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LINKRATE_2_70GBPS (0x00000001) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LINKRATE_5_40GBPS (0x00000002) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LINKRATE_8_10GBPS (0x00000003) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MULTISTREAM 16:16 // Bit to check MST/SST +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MULTISTREAM_DISABLED (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MULTISTREAM_ENABLED (0x00000001) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_ENHANCED_FRAMING 17:17 // Bit to check enhanced framing support +#define NV_DISPLAYPORT_INFO_FLAGS_DP_ENHANCED_FRAMING_DISABLED (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_ENHANCED_FRAMING_ENABLED (0x00000001) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_DOWNSPREAD 18:18 // Bit to check downspread support +#define NV_DISPLAYPORT_INFO_FLAGS_DP_DOWNSPREAD_DISABLED (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_DOWNSPREAD_ENABLED (0x00000001) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_SCRAMBLING 19:19 // Bit to check scrambling +#define NV_DISPLAYPORT_INFO_FLAGS_DP_SCRAMBLING_DISABLED (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_SCRAMBLING_ENABLED (0x00000001) + NvU32 linkRate; + NvU32 laneCount; + NvU32 colorFormat; + NvU32 dynamicRange; + NvU32 colorimetry; + NvU32 bpc; + NvU32 bpp; + + // pre-emphasis and drive current level (EFI might need this information) + NvU8 laneData[4]; + // DP max pixelClock supported based on DP max laneCount/linkRate + NvU32 dpMaxPixelClk; + NvU8 maxCapLinkRate; + NvU8 maxCapLaneCount; + + // B4 (DP2VGA) Vendor Specific I2C Speed Control + NvU8 dp2vga_i2cCap; + NvU8 dp2vga_i2cCtrl; + + NvU8 bDpOffline; +}NVT_DPCD_CONFIG; + +typedef struct tagNVT_DPCD_DP_TUNNELING_CAPS +{ + NvU8 dpTunneling : 1; // DP Tunneling through USB4 Support + NvU8 reserved : 5; // Reserved. + NvU8 dpPanelReplayTunnelingOptSupport : 1; // Panel Replay Tunneling Optimization Support + NvU8 dpInBwAllocationModeSupport : 1; // DP IN Bandwidth Allocation Mode Support +}NVT_DPCD_DP_TUNNELING_CAPS; + +typedef struct tagNVT_DPCD_DP_IN_ADAPTER_INFO +{ + NvU8 dpInAdapterNumber : 6; // DP IN Adapter Number + NvU8 reserved : 2; +}NVT_DPCD_DP_IN_ADAPTER_INFO; + +typedef struct tagNVT_DPCD_USB4_DRIVER_ID +{ + NvU8 usb4DriverId : 4; // USB4 Driver ID + NvU8 reserved : 4; +}NVT_DPCD_USB4_DRIVER_ID; + +//****************************** +// Intel EDID Like Data (ELD) +//****************************** +#define NVT_ELD_VER_1 0x1 // ELD version 1, which is an obsolete ELD structure. Treated as reserved +#define NVT_ELD_VER_2 0x2 // ELD version 2, which supports CEA version 861-D or below. Max baseline ELD size of 80 bytes (15 short audio descriptors) +#define NVT_ELD_VER_VIDEO_DRIVER_UNLOAD 0x1F // Indicates an ELD that has been partially populated through implementation specific mean of default programming before an external + // graphics driver is load, Only the fields that is called out as "canned" fields will be populated, and audio driver should + // ignore the non "canned" fields. +#define NVT_ELD_CONN_TYPE_HDMI 0x0 // indicates an HDMI connection type +#define NVT_ELD_CONN_TYPE_DP 0x1 // indicates a DP connection type + + +//****************************** +// Audio +//****************************** +#define NVT_AUDIO_768KHZ 768000 // HBR Audio +#define NVT_AUDIO_384KHZ 384000 // HBR Audio +#define NVT_AUDIO_192KHZ 192000 +#define NVT_AUDIO_176KHZ 176000 +#define NVT_AUDIO_96KHZ 96000 +#define NVT_AUDIO_88KHZ 88000 +#define NVT_AUDIO_48KHZ 48000 +#define NVT_AUDIO_44KHZ 44000 +#define NVT_AUDIO_32KHZ 32000 + +//Default format for HDTV is NVT_DEFAULT_HDTV_FMT i.e 1080i +#define NVT_DEFAULT_HDTV_PREFERRED_TIMING(x, y, z, p) \ + if(((x) == 1920) && ((y) == 1080) && ((z) != D3DDDI_VSSLO_PROGRESSIVE )) p = 1; + +//Default format for non-DDC displays is 10x7 +#define NVT_DEFAULT_NONDCC_PREFERRED_TIMING(x, y, z, p) \ + if(((x) == 1024) && ((y) == 768) && ((z) == 60 )) p = 1; + + +// Length of user-friendly monitor name, derived from the EDID's +// Display Product Name descriptor block, plus the EDID manufacturer PNP +// ID. The Display Product can be distributed across four 13-byte +// descriptor blocks, and the PNP ID currently decodes to at most 40 +// characters: 4*13 + 40 = 92 +#define NVT_EDID_MONITOR_NAME_STRING_LENGTH 96 + +// Compute the actual size of an EDID with a pointer to an NVT_EDID_INFO. +static NV_INLINE NvU32 NVT_EDID_ACTUAL_SIZE(const NVT_EDID_INFO *pInfo) +{ + return (pInfo->total_extensions + 1) * 128; +} + +//****************************** +//****************************** +//** the export functions ** +//****************************** +//****************************** + +// the common timing function return values +typedef enum +{ + NVT_STATUS_SUCCESS = 0, // Success (no status) + NVT_STATUS_ERR = 0x80000000, // generic get timing error + NVT_STATUS_INVALID_PARAMETER, // passed an invalid parameter + NVT_STATUS_NO_MEMORY, // memory allocation failed + NVT_STATUS_COLOR_FORMAT_NOT_SUPPORTED, + NVT_STATUS_INVALID_HBLANK, + NVT_STATUS_INVALID_BPC, + NVT_STATUS_INVALID_BPP, + NVT_STATUS_MAX_LINE_BUFFER_ERROR, + NVT_STATUS_OVERALL_THROUGHPUT_ERROR, + NVT_STATUS_DSC_SLICE_ERROR, + NVT_STATUS_PPS_SLICE_COUNT_ERROR, + NVT_STATUS_PPS_SLICE_HEIGHT_ERROR, + NVT_STATUS_PPS_SLICE_WIDTH_ERROR, + NVT_STATUS_INVALID_PEAK_THROUGHPUT, + NVT_STATUS_MIN_SLICE_COUNT_ERROR, +} NVT_STATUS; + +//************************************* +// The EDID validation Mask +//************************************* +#define NVT_EDID_VALIDATION_MASK 0xFFFFFFFF +#define NVT_IS_EDID_VALIDATION_FLAGS(x, n) ((((x)&NVT_EDID_VALIDATION_MASK)) & NVBIT32(n)) +#define NVT_CLEAR_EDID_VALIDATION_FLAGS(x, n) ((x)&=(~NVBIT32(n))) + +typedef enum +{ + // errors returned as a bitmask by NvTiming_EDIDValidationMask() + NVT_EDID_VALIDATION_ERR_EXT = 0, + NVT_EDID_VALIDATION_ERR_VERSION, + NVT_EDID_VALIDATION_ERR_SIZE, + NVT_EDID_VALIDATION_ERR_CHECKSUM, + NVT_EDID_VALIDATION_ERR_RANGE_LIMIT, + NVT_EDID_VALIDATION_ERR_DTD, + NVT_EDID_VALIDATION_ERR_HEADER, + NVT_EDID_VALIDATION_ERR_EXT_DTD, + NVT_EDID_VALIDATION_ERR_EXTENSION_TAG, + NVT_EDID_VALIDATION_ERR_EXTENSION_COUNT, + NVT_EDID_VALIDATION_ERR_DESCRIPTOR, + NVT_EDID_VALIDATION_ERR_EXT_CTA_BASIC, + NVT_EDID_VALIDATION_ERR_EXT_CTA_DTD, + NVT_EDID_VALIDATION_ERR_EXT_CTA_TAG, + NVT_EDID_VALIDATION_ERR_EXT_CTA_SVD, + NVT_EDID_VALIDATION_ERR_EXT_CTA_INVALID_DATA_BLOCK, + NVT_EDID_VALIDATION_ERR_EXT_CTA_CHECKSUM, + NVT_EDID_VALIDATION_ERR_EXT_DID_VERSION, + NVT_EDID_VALIDATION_ERR_EXT_DID_EXTCOUNT, + NVT_EDID_VALIDATION_ERR_EXT_DID_CHECKSUM, + NVT_EDID_VALIDATION_ERR_EXT_DID_SEC_SIZE, + NVT_EDID_VALIDATION_ERR_EXT_DID13_TAG, + NVT_EDID_VALIDATION_ERR_EXT_DID13_TYPE1, + NVT_EDID_VALIDATION_ERR_EXT_DID2_TAG, + NVT_EDID_VALIDATION_ERR_EXT_DID2_USE_CASE, + NVT_EDID_VALIDATION_ERR_EXT_DID2_MANDATORY_BLOCKS, + NVT_EDID_VALIDATION_ERR_EXT_DID2_TYPE7, + NVT_EDID_VALIDATION_ERR_EXT_DID2_TYPE10, + NVT_EDID_VALIDATION_ERR_EXT_RANGE_LIMIT, + NVT_EDID_VALIDATION_ERR_EXT_DID2_ADAPTIVE_SYNC, +} NVT_EDID_VALIDATION_ERR_STATUS; +#define NVT_EDID_VALIDATION_ERR_MASK(x) NVBIT32(x) + +//************************************* +// The DisplayID2 validation Mask +//************************************* +typedef enum +{ + // errors returned as a bitmask by NvTiming_DisplayID2ValidationMask() + NVT_DID2_VALIDATION_ERR_VERSION = 0, + NVT_DID2_VALIDATION_ERR_SIZE, + NVT_DID2_VALIDATION_ERR_CHECKSUM, + NVT_DID2_VALIDATION_ERR_NO_DATA_BLOCK, + NVT_EDID_VALIDATION_ERR_TAG, + NVT_DID2_VALIDATION_ERR_RANGE_LIMIT, + NVT_DID2_VALIDATION_ERR_NATIVE_DTD, + NVT_DID2_VALIDATION_ERR_MANDATORY_BLOCKS, + NVT_DID2_VALIDATION_ERR_PRODUCT_IDENTIFY, + NVT_DID2_VALIDATION_ERR_PARAMETER, + NVT_DID2_VALIDATION_ERR_INTERFACE, + NVT_DID2_VALIDATION_ERR_TYPE7, + NVT_DID2_VALIDATION_ERR_TYPE10, + NVT_DID2_VALIDATION_ERR_ADAPTIVE_SYNC, +} NVT_DID2_VALIDATION_ERR_STATUS; +#define NVT_DID2_VALIDATION_ERR_MASK(x) NVBIT32(x) + +// timing calculation flags: +#define NVT_FLAG_PROGRESSIVE_TIMING 0x00000000 +#define NVT_FLAG_INTERLACED_TIMING NVT_INTERLACED +#define NVT_FLAG_INTERLACED_TIMING2 NVT_INTERLACED_NO_EXTRA_VBLANK_ON_FIELD2 //without extra vblank on field 2 +#define NVT_FLAG_DOUBLE_SCAN_TIMING 0x00000010 +#define NVT_FLAG_REDUCED_BLANKING_TIMING 0x00000020 +#define NVT_FLAG_MAX_EDID_TIMING 0x00000040 +#define NVT_FLAG_NV_DOUBLE_SCAN_TIMING 0x00000080 +#define NVT_FLAG_NATIVE_TIMING 0x00000100 +#define NVT_FLAG_EDID_TIMING 0x00000200 +#define NVT_FLAG_CEA_4X3_TIMING 0x00000400 +#define NVT_FLAG_CEA_16X9_TIMING 0x00000800 +#define NVT_FLAG_OS_ADDED_TIMING 0x00001000 +#define NVT_FLAG_SPECTRUM_SPREAD 0x00002000 // TODO: remove this +#define NVT_FLAG_EDID_TIMING_RR_MATCH 0x00004000 +#define NVT_FLAG_EDID_861_ST 0x00008000 +#define NVT_FLAG_EDID_DTD_EIZO_SPLIT 0x00010000 +#define NVT_FLAG_DTD1_TIMING 0x00020000 +#define NVT_FLAG_NV_PREFERRED_TIMING 0x00040000 +#define NVT_FLAG_DTD1_PREFERRED_TIMING 0x00080000 +#define NVT_FLAG_DISPLAYID_DTD_PREFERRED_TIMING 0x00100000 +#define NVT_FLAG_CTA_PREFERRED_TIMING 0x00200000 +#define NVT_FLAG_DISPLAYID_T7_DSC_PASSTHRU 0x00400000 +#define NVT_FLAG_DISPLAYID_2_0_TIMING 0x00800000 // this one for the CTA861 embedded in DID20 +#define NVT_FLAG_DISPLAYID_T7_T8_EXPLICT_YUV420 0x01000000 // DID2 E7 spec. supported yuv420 indicated +#define NVT_FLAG_CTA_NATIVE_TIMING 0x02000000 // NVRDB defined +#define NVT_FLAG_CTA_OVT_TIMING 0x04000000 // CTA861 CTA OVT Timing +#define NVT_FLAG_CTA_OVT_FRR_TIMING 0x08000000 // CTA861 CTA OVT Timing supported ntsc + +#define NVT_FLAG_INTERLACED_MASK (NVT_FLAG_INTERLACED_TIMING | NVT_FLAG_INTERLACED_TIMING2) + +#ifdef __cplusplus +extern "C" { +#endif + +// Generic timing parameter calculation +NvU16 NvTiming_CalcRR(NvU32 pclk1khz, NvU16 interlaced, NvU16 HTotal, NvU16 VTotal); +NvU32 NvTiming_CalcRRx1k(NvU32 pclk1khz, NvU16 interlaced, NvU16 HTotal, NvU16 VTotal); + +NvU32 NvTiming_IsRoundedRREqual(NvU16 rr1, NvU32 rr1x1k, NvU16 rr2); +NvU32 NvTiming_IsTimingExactEqual(const NVT_TIMING *pT1, const NVT_TIMING *pT2); +NvU32 NvTiming_IsTimingExactEqualEx(const NVT_TIMING *pT1, const NVT_TIMING *pT2); +NvU32 NvTiming_IsTimingRelaxedEqual(const NVT_TIMING *pT1, const NVT_TIMING *pT2); +NvU16 NvTiming_MaxFrameWidth(NvU16 HVisible, NvU16 rep); + +NvU32 NvTiming_GetVrrFmin(const NVT_EDID_INFO *pEdidInfo, const NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo, + NvU32 nominalRefreshRateHz, NVT_PROTOCOL sinkProtocol); + +// Establish timing enumeration +NVT_STATUS NvTiming_EnumEST(NvU32 index, NVT_TIMING *pT); +NVT_STATUS NvTiming_EnumESTIII(NvU32 index, NVT_TIMING *pT); + +// GTF timing calculation +NVT_STATUS NvTiming_CalcGTF(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT); + +// DMT timing calculation +NVT_STATUS NvTiming_EnumDMT(NvU32 dmtId, NVT_TIMING *pT); +NVT_STATUS NvTiming_EnumStdTwoBytesCode(NvU16 std2ByteCodes, NVT_TIMING *pT); +NVT_STATUS NvTiming_CalcDMT(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT); +NVT_STATUS NvTiming_CalcDMT_RB(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT); +NVT_STATUS NvTiming_CalcDMT_RB2(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT); + +// CVT timing calculation +NVT_STATUS NvTiming_CalcCVT(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT); +NVT_STATUS NvTiming_CalcCVT_RB(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT); +NVT_STATUS NvTiming_CalcCVT_RB2(NvU32 width, NvU32 height, NvU32 rr, NvBool is1000div1001, NvBool isAltMiniVblankTiming, NVT_TIMING *pT); +NVT_STATUS NvTiming_CalcCVT_RB3(NvU32 width, NvU32 height, NvU32 rr, NvU32 deltaHBlank, NvU32 vBlankMicroSec, NvBool isAltMiniVblankTiming, NvBool isEarlyVSync, NVT_TIMING *pT); +NvBool NvTiming_IsTimingCVTRB(const NVT_TIMING *pTiming); + +// OVT timing calculation +NVT_STATUS NvTiming_CalcOVT(NvU32 width, NvU32 height, NvU32 rr, NVT_TIMING *pT); +NvBool NvTiming_IsTimingOVT(const NVT_TIMING *pTiming); + +// CEA/EIA/Psf timing +NVT_STATUS NvTiming_CalcCEA861bTiming(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NvU32 pixelRepeatCount, NVT_TIMING *pT); +NVT_STATUS NvTiming_EnumCEA861bTiming(NvU32 ceaFormat, NVT_TIMING *pT); +NVT_STATUS NvTiming_EnumNvPsfTiming(NvU32 nvPsfFormat, NVT_TIMING *pT); +NvU32 NvTiming_GetCEA861TimingIndex(NVT_TIMING *pT); + +//expose the HDMI extended video timing defined by the HDMI LLC VSDB +NVT_STATUS NvTiming_EnumHdmiVsdbExtendedTiming(NvU32 hdmi_vic, NVT_TIMING *pT); + +// TV(analog) based timing +NVT_STATUS NvTiming_GetTvTiming(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NvU32 tvFormat, NVT_TIMING *pT); + +// Get EDID timing +NVT_STATUS NvTiming_GetEdidTimingExWithPclk(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_EDID_INFO *pEdidInfo, NVT_TIMING *pT, NvU32 rrx1k, NvU32 pclk); +NVT_STATUS NvTiming_GetEdidTimingEx(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_EDID_INFO *pEdidInfo, NVT_TIMING *pT, NvU32 rrx1k); +NVT_STATUS NvTiming_GetEdidTiming(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_EDID_INFO *pEdidInfo, NVT_TIMING *pT); + +// Get EDID based HDMI Stereo timing +NVT_STATUS NvTiming_GetHDMIStereoExtTimingFromEDID(NvU32 width, NvU32 height, NvU32 rr, NvU8 structure, NvU8 detail, NvU32 flag, NVT_EDID_INFO *pEdidInfo, NVT_EXT_TIMING *pT); +void NvTiming_GetHDMIStereoTimingFrom2DTiming(const NVT_TIMING *pTiming, NvU8 StereoStructureType, NvU8 SideBySideHalfDetail, NVT_EXT_TIMING *pExtTiming); +NVT_STATUS NvTiming_GetHDMIStereoMandatoryFormatDetail(const NvU8 vic, NvU16 *pStereoStructureMask, NvU8 *pSideBySideHalfDetail); + +// EDID based AspectRatio Timing +NVT_STATUS NvTiming_GetEDIDBasedASPRTiming(NvU16 width, NvU16 height, NvU16 rr, NVT_EDID_INFO *pEI, NVT_TIMING *ft); + +// EDID or DISPLAYID2 version +NvU32 NvTiming_GetVESADisplayDescriptorVersion(NvU8 *rawData, NvU32 *pVer); + +// EDID entry parse +NVT_STATUS NV_STDCALL NvTiming_ParseEDIDInfo(NvU8 *pEdid, NvU32 length, NVT_EDID_INFO *pEdidInfo); +NvU32 NvTiming_EDIDValidationMask(NvU8 *pEdid, NvU32 length, NvBool bIsStrongValidation); +NvU32 NvTiming_EDIDStrongValidationMask(NvU8 *pEdid, NvU32 length); +NVT_STATUS NvTiming_EDIDValidation(NvU8 *pEdid, NvU32 length, NvBool bIsStrongValidation); + +// DisplayID20 standalone entry parse +NVT_STATUS NV_STDCALL NvTiming_parseDisplayId20Info(const NvU8 *pDisplayId, NvU32 length, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); +NvU32 NvTiming_DisplayID2ValidationMask(NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo, NvBool bIsStrongValidation); +NVT_STATUS NvTiming_DisplayID2ValidationDataBlocks(NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo, NvBool bIsStrongValidation); + +NVT_STATUS NvTiming_Get18ByteLongDescriptorIndex(NVT_EDID_INFO *pEdidInfo, NvU8 tag, NvU32 *dtdIndex); +NVT_STATUS NvTiming_GetProductName(const NVT_EDID_INFO *pEdidInfo, NvU8 *pProductName, const NvU32 productNameLength); +NvU32 NvTiming_CalculateEDIDCRC32(NvU8* pEDIDBuffer, NvU32 edidsize); +NvU32 NvTiming_CalculateCommonEDIDCRC32(NvU8* pEDIDBuffer, NvU32 edidVersion); +NVT_STATUS NvTiming_CalculateEDIDLimits(NVT_EDID_INFO *pEdidInfo, NVT_EDID_RANGE_LIMIT *pLimit); +void NvTiming_GetMonitorName(NVT_EDID_INFO *pEdidInfo, NvU8 monitor_name[NVT_EDID_MONITOR_NAME_STRING_LENGTH]); + +// utility routines +NvU64 axb_div_c_64(NvU64 a, NvU64 b, NvU64 c); +NvU32 axb_div_c(NvU32 a, NvU32 b, NvU32 c); +NvU32 a_div_b(NvU32 a, NvU32 b); +NvU32 calculateCRC32(NvU8* pBuf, NvU32 bufsize); +void patchChecksum(NvU8* pBuf); +NvBool isChecksumValid(NvU8* pBuf); +NvU32 RRx1kToPclk (NVT_TIMING *pT); +NvU32 RRx1kToPclk1khz (NVT_TIMING *pT); + +NVT_STATUS NvTiming_ComposeCustTimingString(NVT_TIMING *pT); + +// Infoframe/SDP composer +NVT_STATUS NvTiming_ConstructVideoInfoframeCtrl(const NVT_TIMING *pTiming, NVT_VIDEO_INFOFRAME_CTRL *pCtrl); +NVT_STATUS NvTiming_ConstructVideoInfoframe(NVT_EDID_INFO *pEdidInfo, NVT_VIDEO_INFOFRAME_CTRL *pCtrl, NVT_VIDEO_INFOFRAME *pContext, NVT_VIDEO_INFOFRAME *p); +NVT_STATUS NvTiming_ConstructAudioInfoframe(NVT_AUDIO_INFOFRAME_CTRL *pCtrl, NVT_AUDIO_INFOFRAME *pContext, NVT_AUDIO_INFOFRAME *p); +NVT_STATUS NvTiming_ConstructVendorSpecificInfoframe(NVT_EDID_INFO *pEdidInfo, NVT_VENDOR_SPECIFIC_INFOFRAME_CTRL *pCtrl, NVT_VENDOR_SPECIFIC_INFOFRAME *p); +NVT_STATUS NvTiming_ConstructExtendedMetadataPacketInfoframe(NVT_EXTENDED_METADATA_PACKET_INFOFRAME_CTRL *pCtrl, NVT_EXTENDED_METADATA_PACKET_INFOFRAME *p); +void NvTiming_ConstructAdaptiveSyncSDP(const NVT_ADAPTIVE_SYNC_SDP_CTRL *pCtrl, NVT_ADAPTIVE_SYNC_SDP *p); + + +// Get specific timing from parsed EDID +NVT_STATUS NvTiming_GetDTD1Timing (NVT_EDID_INFO * pEdidInfo, NVT_TIMING * pT); + +#define NVT_IS_DTD(d) (NVT_GET_TIMING_STATUS_TYPE((d)) == NVT_TYPE_EDID_DTD) +#define NVT_IS_EXT_DTD(d) (NVT_GET_TIMING_STATUS_TYPE((d)) == NVT_TYPE_EDID_EXT_DTD) +#define NVT_IS_CTA861(d) (NVT_GET_TIMING_STATUS_TYPE((d)) == NVT_TYPE_EDID_861ST) +#define NVT_IS_CTA861_DID_T7(d) (NVT_GET_TIMING_STATUS_TYPE((d)) == NVT_TYPE_CTA861_DID_T7) +#define NVT_IS_CTA861_DID_T8(d) (NVT_GET_TIMING_STATUS_TYPE((d)) == NVT_TYPE_CTA861_DID_T8) +#define NVT_IS_CTA861_DID_T10(d) (NVT_GET_TIMING_STATUS_TYPE((d)) == NVT_TYPE_CTA861_DID_T10) + +#define NVT_IS_DTD1(d) ((NVT_IS_DTD((d))) && (NVT_GET_TIMING_STATUS_SEQ((d)) == 1)) +#define NVT_IS_DTDn(d, n) ((NVT_IS_DTD((d))) && (NVT_GET_TIMING_STATUS_SEQ((d)) == n)) +#define NVT_IS_EXT_DTDn(d, n) ((NVT_IS_EXT_DTD((d))) && (NVT_GET_TIMING_STATUS_SEQ((d)) == n)) +#define NVT_IS_CTA861_DID_T7n(d, n) ((NVT_IS_CTA861_DID_T7((d))) && (NVT_GET_TIMING_STATUS_SEQ((d)) == n)) +#define NVT_IS_CTA861_DID_T8_1(d) ((NVT_IS_CTA861_DID_T8((d))) && (NVT_GET_TIMING_STATUS_SEQ((d)) == 1)) +#define NVT_IS_CTA861_DID_T10n(d, n) ((NVT_IS_CTA861_DID_T10((d))) && (NVT_GET_TIMING_STATUS_SEQ((d)) == n)) + +#define NVT_IS_CTA861_OVT_Tn(flag, status, n) ((0 != (NVT_FLAG_CTA_OVT_TIMING & (flag))) && (NVT_GET_TIMING_STATUS_SEQ((status)) == n)) + +#define NVT_DID20_TIMING_IS_CTA861(flag, status) ((NVT_IS_CTA861((status))) && (0 != (NVT_FLAG_DISPLAYID_2_0_TIMING & (flag)))) +#define NVT_PREFERRED_TIMING_IS_DTD1(flag, status) ((NVT_IS_DTD1((status))) && (0 != (NVT_FLAG_DTD1_PREFERRED_TIMING & (flag)))) +#define NVT_PREFERRED_TIMING_IS_DISPLAYID(flag) (0 != (NVT_FLAG_DISPLAYID_DTD_PREFERRED_TIMING & flag)) +#define NVT_PREFERRED_TIMING_IS_CTA(flag) (0 != (NVT_FLAG_CTA_PREFERRED_TIMING & flag)) +#define NVT_NATIVE_TIMING_IS_CTA(flag) (0 != (NVT_FLAG_CTA_NATIVE_TIMING & flag)) +#define NVT_TIMING_IS_OVT(flag) (0 != (NVT_FLAG_CTA_OVT_TIMING & flag)) +#define NVT_FRR_TIMING_IS_OVT(flag) (0 != (NVT_FLAG_CTA_OVT_FRR_TIMING & flag)) + +#ifdef __cplusplus +} +#endif + +#endif //__NVTIMING_H__ diff --git a/src/common/modeset/timing/nvtiming_pvt.h b/src/common/modeset/timing/nvtiming_pvt.h new file mode 100644 index 0000000..04aca8c --- /dev/null +++ b/src/common/modeset/timing/nvtiming_pvt.h @@ -0,0 +1,170 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvtiming_pvt.h +// +// Purpose: the private functions/structures which are only used inside +// the nv timing library. +// +//***************************************************************************** + +#ifndef __NVTIMING_PVT_H_ +#define __NVTIMING_PVT_H_ + +#include "nvtiming.h" + +#if defined(NVT_USE_NVKMS) + #include "nvidia-modeset-os-interface.h" + #define NVT_SNPRINTF nvkms_snprintf +#else + #include + #include + #define NVT_SNPRINTF snprintf +#endif + +#define nvt_assert(p) ((void)0) + +#ifdef DD_UNITTEST +#undef nvt_assert(p) +#define nvt_assert(p) ((void)0) +#endif // DD_UNITTEST + +#include // NULL + +#ifdef __cplusplus +extern "C" { +#endif + +// EDID related private functions +NvU32 getEdidVersion(NvU8 *pData, NvU32 *pVer); +NvBool assignNextAvailableTiming(NVT_EDID_INFO *pInfo, const NVT_TIMING *pTiming); +void parseEdidCvtTiming(NVT_EDID_INFO *pInfo); +void parseEdidEstablishedTiming(NVT_EDID_INFO *pInfo); +void parseEdidStandardTiming(NVT_EDID_INFO *pInfo); +void parseEdidDetailedTiming(NvU8 *pEdid, NVT_EDID_INFO *pInfo); +NVT_STATUS parseEdidDetailedTimingDescriptor(NvU8 *pDTD, NVT_TIMING *pT); +void parseEdidCvt3ByteDescriptor(NvU8 *p, NVT_EDID_INFO *pInfo, NvU32 *vtbCount); +void parseEdidStandardTimingDescriptor(NvU16 timing, NVT_EDID_INFO *pInfo, NvU32 count, NVT_TIMING * pT); +void parseVTBExtension(NvU8 *pEdidExt, NVT_EDID_INFO *pInfo); +void updateHDMILLCDeepColorForTiming(NVT_EDID_INFO *pInfo, NvU32 index); +void updateBpcForTiming(NVT_EDID_INFO *pInfo, NvU32 index); +void updateColorFormatAndBpcTiming(NVT_EDID_INFO *pInfo); +// End EDID + +// CTA861 related private functions +NVT_STATUS get861ExtInfo(NvU8 *pEdid, NvU32 edidSize, NVT_EDID_CEA861_INFO *p); +NVT_STATUS parseCta861DataBlockInfo(NvU8 *pEdid, NvU32 size, NVT_EDID_CEA861_INFO *p); +void parse861ExtDetailedTiming(NvU8 *pEdidExt, NvU8 basicCaps, NVT_EDID_INFO *pInfo); +void parse861bShortTiming(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo, NVT_CTA861_ORIGIN flag); +void parse861bShortYuv420Timing(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo, NVT_CTA861_ORIGIN flag); +void parseCta861VideoFormatDataBlock(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo); +void parseCta861NativeOrPreferredTiming(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo, NVT_CTA861_ORIGIN flag); +void parseCta861VsdbBlocks(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo, NVT_CTA861_ORIGIN flag); +void parseCta861VsvdbBlocks(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo, NVT_CTA861_ORIGIN flag); +void parseCta861HfScdb(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo, NVT_CTA861_ORIGIN flag); +void parseCta861HfEeodb(NVT_EDID_CEA861_INFO *pExt861, NvU32 *pTotalEdidExtensions); +void parseEdidMsftVsdbBlock(VSDB_DATA *pVsdb, MSFT_VSDB_PARSED_INFO *vsdbInfo); +void parseEdidHdmiLlcBasicInfo(VSDB_DATA *pVsdb, NVT_HDMI_LLC_INFO *pHdmiLlc); +void parseEdidHdmiForumVSDB(VSDB_DATA *pVsdb, NVT_HDMI_FORUM_INFO *pHdmiInfo); +void getEdidHDM1_4bVsdbTiming(NVT_EDID_INFO *pInfo); +void parseEdidHDMILLCTiming(NVT_EDID_INFO *pInfo, VSDB_DATA *pVsdb, NvU32 *pSupported, HDMI3DSUPPORTMAP * pM); +void parseEdidNvidiaVSDBBlock(VSDB_DATA *pVsdb, NVDA_VSDB_PARSED_INFO *vsdbInfo); +void parseCta861HdrStaticMetadataDataBlock(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo, NVT_CTA861_ORIGIN flag); +void parseCta861DvStaticMetadataDataBlock(VSVDB_DATA* pVsvdb, NVT_DV_STATIC_METADATA* pDvInfo); +void parseCta861Hdr10PlusDataBlock(VSVDB_DATA* pVsvdb, NVT_HDR10PLUS_INFO* pHdr10PlusInfo); +void parseCta861DIDType7VideoTimingDataBlock(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo); +void parseCta861DIDType8VideoTimingDataBlock(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo); +void parseCta861DIDType10VideoTimingDataBlock(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo); +NvBool isMatchedCTA861Timing(NVT_EDID_INFO *pInfo, NVT_TIMING *pT); +NvBool isMatchedStandardTiming(NVT_EDID_INFO *pInfo, NVT_TIMING *pT); +NvBool isMatchedEstablishedTiming(NVT_EDID_INFO *pInfo, NVT_TIMING *pT); +NvU32 isHdmi3DStereoType(NvU8 StereoStructureType); +NvU32 getCEA861TimingAspectRatio(NvU32 vic); +NvU8 getHighestPrioritySVRIdx(const NVT_EDID_CEA861_INFO *pExt861); +void SetActiveSpaceForHDMI3DStereo(const NVT_TIMING *pTiming, NVT_EXT_TIMING *pExtTiming); +void AddModeToSupportMap(HDMI3DSUPPORTMAP * pMap, NvU8 vic, NvU8 structure, NvU8 Detail); +void getMonitorDescriptorString(NvU8 *pEdid, NvU8 tag, char *str, int onceOnly); +// End CTA861 + +// DispalyID base / extension related functions +NvU32 getDID2Version(NvU8 *pData, NvU32 *pVer); +NVT_STATUS getDisplayIdEDIDExtInfo(NvU8* pEdid, NvU32 edidSize, NVT_EDID_INFO* pEdidInfo); +NVT_STATUS parseDisplayIdBlock(NvU8* pBlock, NvU8 max_length, NvU8* pLength, NVT_EDID_INFO* pEdidInfo); +NVT_STATUS getDisplayId20EDIDExtInfo(NvU8* pDisplayid, NvU32 edidSize, NVT_EDID_INFO* pEdidInfo); +NVT_STATUS parseDisplayId20EDIDExtDataBlocks(NvU8* pDataBlock, NvU8 remainSectionLength, NvU8* pCurrentDBLength, NVT_EDID_INFO* pEdidInfo); +NVT_STATUS parseDisplayId20Timing7Descriptor(const void *pDescriptor, NVT_TIMING *pTiming, NvU8 count); +NVT_STATUS parseDisplayId20Timing8Descriptor(const void *pDescriptor, NVT_TIMING *pTiming, NvU8 codeType, NvU8 codeSize, NvU8 index, NvU8 count); +NVT_STATUS parseDisplayId20Timing9Descriptor(const void *pDescriptor, NVT_TIMING *pTiming, NvU8 count); +NVT_STATUS parseDisplayId20Timing10Descriptor(const void *pDescriptor, NVT_TIMING *pTiming, NvU8 payloadBytes, NvU8 count); +void updateColorFormatForDisplayIdExtnTimings(NVT_EDID_INFO* pInfo, NvU32 timingIdx); +void updateColorFormatForDisplayId20ExtnTimings(NVT_EDID_INFO* pInfo, NvU32 timingIdx); +NvBool assignNextAvailableDisplayId20Timing(NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo, const NVT_TIMING *pTiming); +void updateColorFormatForDisplayId20Timings(NVT_DISPLAYID_2_0_INFO* pDisplayId2Info, NvU32 timingIdx); +// End DisplayID +#ifdef __cplusplus +} +#endif + +NvU32 axb_div_c_old(NvU32 a, NvU32 b, NvU32 c); + +#define NVT_EDID_BLOCK_SIZE 128 + +#define NVT_PVT_INTERLACED_MASK 0xF +#define NVT_PVT_DOUBLESCAN_MASK 0x10 +#define NVT_PVT_RB_MASK 0x20 + +#define NVT_PVT_DOUBLE_SCAN_HEIGHT 384 +#define NVT_PVT_DOUBLE_SCAN_HEIGHT_VGA 600 +#define NVT_PVT_DOUBLE_SCAN_PCLK_MIN 1200 //in 10KHz + +#define abs(a) ((a)>0?(a):-(a)) +#define set_rrx1k(a,b,c) ((b)*(c)==0?(0):(NvU32)(((NvU64)(a)*10000*1000+(b)*(c)/2)/((b)*(c)))) +#define frame_height(a) ((NvU32)((a).VVisible * ((a).interlaced!=0?2:1))) +#define nvt_is_wideaspect(width,height) ((width)*5 >= (height)*8) + +#ifndef MIN +#define MIN(x, y) ((x)>(y) ? (y) : (x)) +#endif +#ifndef MAX +#define MAX(x,y) ((x) > (y) ? (x) : (y)) +#endif + + +#ifndef COUNT +#define COUNT(a) (sizeof(a)/sizeof(a[0])) +#endif +#ifndef offsetof +#define offsetof(st, m) ((size_t) ( (char *)&((st *)(0))->m - (char *)0 )) +#endif +#define nvt_nvu8_set_bits(d, s, m, shift) {(d)&=(NvU8)((NvU8)(m)^0xFFU);(d)|=((s)<<(shift))&(m);} +#define nvt_get_bits(d, m, shift) (((d)&(m))>>shift) +#define nvt_lowest_bit(n) ((n)&(~((n)-1))) +#define nvt_aspect_x(n) ((n)>>16) +#define nvt_aspect_y(n) ((n)&0xFFFF) + +// Sentinel values for NVT_TIMING +#define NVT_TIMINGEXT_SENTINEL {0,0,0,0,0,{0},{0},{0},{0},0,""} +#define NVT_TIMING_SENTINEL {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,NVT_TIMINGEXT_SENTINEL} + +#endif //__NVTIMING_PVT_H_ + diff --git a/src/common/sdk/nvidia/inc/alloc/alloc_channel.h b/src/common/sdk/nvidia/inc/alloc/alloc_channel.h new file mode 100644 index 0000000..465efcb --- /dev/null +++ b/src/common/sdk/nvidia/inc/alloc/alloc_channel.h @@ -0,0 +1,345 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: alloc/alloc_channel.finn +// + +#include "nvlimits.h" +#include "nvcfg_sdk.h" + +typedef struct NV_MEMORY_DESC_PARAMS { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 addressSpace; + NvU32 cacheAttrib; +} NV_MEMORY_DESC_PARAMS; + +/* + * NV_CHANNEL_ALLOC_PARAMS.flags values. + * + * These flags may apply to all channel types: PIO, DMA, and GPFIFO. + * They are also designed so that zero is always the correct default. + * + * NVOS04_FLAGS_CHANNEL_TYPE: + * This flag specifies the type of channel to allocate. Legal values + * for this flag include: + * + * NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL: + * This flag specifies that a physical channel is to be allocated. + * + * NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL: + * OBSOLETE - NOT SUPPORTED + * + * NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL: + * OBSOLETE - NOT SUPPORTED + */ + +/* valid NVOS04_FLAGS_CHANNEL_TYPE values */ +#define NVOS04_FLAGS_CHANNEL_TYPE 1:0 +#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL 0x00000000 +#define NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL 0x00000001 // OBSOLETE +#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL 0x00000002 // OBSOLETE + +/* + * NVOS04_FLAGS_VPR: + * This flag specifies if channel is intended for work with + * Video Protected Regions (VPR) + * + * NVOS04_FLAGS_VPR_TRUE: + * The channel will only write to protected memory regions. + * + * NVOS04_FLAGS_VPR_FALSE: + * The channel will never read from protected memory regions. + */ +#define NVOS04_FLAGS_VPR 2:2 +#define NVOS04_FLAGS_VPR_FALSE 0x00000000 +#define NVOS04_FLAGS_VPR_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_CC_SECURE: + * This flag specifies if channel is intended to be used for + * encryption/decryption of data between SYSMEM <-> VIDMEM. Only CE + * & SEC2 Channels are capable of handling encrypted content and this + * flag will be ignored when CC is disabled or for chips that are not CC + * Capable. + * Reusing VPR index since VPR & CC are mutually exclusive. + * + * NVOS04_FLAGS_CC_SECURE_TRUE: + * The channel will support CC Encryption/Decryption + * + * NVOS04_FLAGS_CC_SECURE_FALSE: + * The channel will not support CC Encryption/Decryption + */ +#define NVOS04_FLAGS_CC_SECURE 2:2 +#define NVOS04_FLAGS_CC_SECURE_FALSE 0x00000000 +#define NVOS04_FLAGS_CC_SECURE_TRUE 0x00000001 + + + +/* + * NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING: + * This flag specifies if the channel can skip refcounting of potentially + * accessed mappings on job kickoff. This flag is only meaningful for + * kernel drivers which perform refcounting of memory mappings. + * + * NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE: + * The channel cannot not skip refcounting of memory mappings + * + * NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE: + * The channel can skip refcounting of memory mappings + */ +#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING 3:3 +#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE: + * This flag specifies which "runqueue" the allocated channel will be + * executed on in a TSG. Channels on different runqueues within a TSG + * may be able to feed methods into the engine simultaneously. + * Non-default values are only supported on GP10x and later and only for + * channels within a TSG. + */ +#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE 4:4 +#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_DEFAULT 0x00000000 +#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_ONE 0x00000001 + +/* + * NVOS04_FLAGS_PRIVILEGED_CHANNEL: + * This flag tells RM whether to give the channel admin privilege. This + * flag will only take effect if the client is GSP-vGPU plugin. It is + * needed so that guest can update page tables in physical mode and do + * scrubbing. + */ +#define NVOS04_FLAGS_PRIVILEGED_CHANNEL 5:5 +#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_FALSE 0x00000000 +#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING: + * This flags tells RM not to schedule a newly created channel within a + * channel group immediately even if channel group is currently scheduled. + * Channel will not be scheduled until NVA06F_CTRL_GPFIFO_SCHEDULE is + * invoked. This is used eg. for CUDA which needs to do additional + * initialization before starting up a channel. + * Default is FALSE. + */ +#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING 6:6 +#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_FALSE 0x00000000 +#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_DENY_PHYSICAL_MODE_CE: + * This flag specifies whether or not to deny access to the physical + * mode of CopyEngine regardless of whether or not the client handle + * is admin. If set to true, this channel allocation will always result + * in an unprivileged channel. If set to false, the privilege of the channel + * will depend on the privilege level of the client handle. + * This is primarily meant for vGPU since all client handles + * granted to guests are admin. + */ +#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE 7:7 +#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE + * + * This flag specifies the channel offset in terms of within a page of + * USERD. For example, value 3 means the 4th channel within a USERD page. + * Given the USERD size is 512B, we will have 8 channels total, so 3 bits + * are reserved. + * + * When _USERD_INDEX_FIXED_TRUE is set but INDEX_PAGE_FIXED_FALSE is set, + * it will ask for a new USERD page. + * + */ +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE 10:8 + +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED 11:11 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE + * + * This flag specifies the channel offset in terms of USERD page. When + * this PAGE_FIXED_TRUE is set, the INDEX_FIXED_FALSE bit should also + * be set, otherwise INVALID_STATE will be returned. + * + * And the field _USERD_INDEX_VALUE will be used to request the specific + * offset within a USERD page. + */ + +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE 20:12 + +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED 21:21 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_DENY_AUTH_LEVEL_PRIV + * This flag specifies whether or not to deny access to the privileged + * host methods TLB_INVALIDATE and ACCESS_COUNTER_CLR + */ +#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV 22:22 +#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER + * + * This flag specifies scrubbing should be skipped for any internal + * allocations made for this channel from PMA using ctx buf pools. + * Only kernel clients are allowed to use this setting. + */ +#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER 23:23 +#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO + * + * This flag specifies that the client is expected to map USERD themselves + * and RM need not do so. + */ +#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO 24:24 +#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL + */ +#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL 25:25 +#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_FALSE 0x00000000 +#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT + * + * This flag specifies whether the channel calling context is from CPU + * VGPU plugin. + */ +#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT 26:26 +#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_TRUE 0x00000001 + + /* + * NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT + * + * This flag specifies the channel PBDMA ACQUIRE timeout option. + * _FALSE to disable it, _TRUE to enable it. + * When this flag is enabled, if a host semaphore acquire does not + * complete in about 2 sec, it will time out and trigger a RC error. + */ +#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT 27:27 +#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_GROUP_CHANNEL_THREAD: + * This flags specifies the thread id in which an allocated channel + * will be executed in a TSG. The relationship between the thread id + * in A TSG and respective definitions are implementation specific. + * Also, not all classes will be supported at thread > 0. + * This field cannot be used on non-TSG channels and must be set to + * the default value (0) in that case. If thread > 0 on a non-TSG + * channel, the allocation will fail + */ +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD 29:28 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_DEFAULT 0x00000000 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_ONE 0x00000001 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_TWO 0x00000002 + +#define NVOS04_FLAGS_MAP_CHANNEL 30:30 +#define NVOS04_FLAGS_MAP_CHANNEL_FALSE 0x00000000 +#define NVOS04_FLAGS_MAP_CHANNEL_TRUE 0x00000001 + +#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC 31:31 +#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_FALSE 0x00000000 +#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_TRUE 0x00000001 + + + +#define CC_CHAN_ALLOC_IV_SIZE_DWORD 3U +#define CC_CHAN_ALLOC_NONCE_SIZE_DWORD 8U + +#define NV_CHANNEL_ALLOC_PARAMS_MESSAGE_ID (0x906fU) + +typedef struct NV_CHANNEL_ALLOC_PARAMS { + + NvHandle hObjectError; // error context DMA + NvHandle hObjectBuffer; // no longer used + NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); // offset to beginning of GP FIFO + NvU32 gpFifoEntries; // number of GP FIFO entries + + NvU32 flags; + + + NvHandle hContextShare; // context share handle + NvHandle hVASpace; // VASpace for the channel + + // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0 + NvHandle hUserdMemory[NV_MAX_SUBDEVICES]; + + // offset to beginning of UserD within hUserdMemory[x] + NV_DECLARE_ALIGNED(NvU64 userdOffset[NV_MAX_SUBDEVICES], 8); + + // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated + NvU32 engineType; + // Channel identifier that is unique for the duration of a RM session + NvU32 cid; + // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods + NvU32 subDeviceId; + NvHandle hObjectEccError; // ECC error context DMA + + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS instanceMem, 8); + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS userdMem, 8); + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS ramfcMem, 8); + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS mthdbufMem, 8); + + NvHandle hPhysChannelGroup; // reserved + NvU32 internalFlags; // reserved + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS errorNotifierMem, 8); // reserved + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS eccErrorNotifierMem, 8); // reserved + NvU32 ProcessID; // reserved + NvU32 SubProcessID; // reserved + + // IV used for CPU-side encryption / GPU-side decryption. + NvU32 encryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved + // IV used for CPU-side decryption / GPU-side encryption. + NvU32 decryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved + // Nonce used CPU-side signing / GPU-side signature verification. + NvU32 hmacNonce[CC_CHAN_ALLOC_NONCE_SIZE_DWORD]; // reserved + NvU32 tpcConfigID; // TPC Configuration Id as supported by DTD-PG Feature +} NV_CHANNEL_ALLOC_PARAMS; + +typedef NV_CHANNEL_ALLOC_PARAMS NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS; + diff --git a/src/common/sdk/nvidia/inc/cc_drv.h b/src/common/sdk/nvidia/inc/cc_drv.h new file mode 100644 index 0000000..6d7a09a --- /dev/null +++ b/src/common/sdk/nvidia/inc/cc_drv.h @@ -0,0 +1,101 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: cc_drv.finn +// + + + +#include "nvtypes.h" +#include "nvcfg_sdk.h" + +// CLASS NV_CONF_COMPUTE +#define CC_AES_256_GCM_IV_SIZE_BYTES (0xcU) /* finn: Evaluated from "(96 / 8)" */ +#define CC_AES_256_GCM_IV_SIZE_DWORD (0x3U) /* finn: Evaluated from "(CC_AES_256_GCM_IV_SIZE_BYTES / 4)" */ +#define CC_AES_256_GCM_KEY_SIZE_BYTES (0x20U) /* finn: Evaluated from "(256 / 8)" */ +#define CC_AES_256_GCM_KEY_SIZE_DWORD (0x8U) /* finn: Evaluated from "(CC_AES_256_GCM_KEY_SIZE_BYTES / 4)" */ + +#define CC_HMAC_NONCE_SIZE_BYTES (0x20U) /* finn: Evaluated from "(256 / 8)" */ +#define CC_HMAC_NONCE_SIZE_DWORD (0x8U) /* finn: Evaluated from "(CC_HMAC_NONCE_SIZE_BYTES / 4)" */ +#define CC_HMAC_KEY_SIZE_BYTES (0x20U) /* finn: Evaluated from "(256 / 8)" */ +#define CC_HMAC_KEY_SIZE_DWORD (0x8U) /* finn: Evaluated from "(CC_HMAC_KEY_SIZE_BYTES / 4)" */ + + +// Type is shared between CC control calls and RMKeyStore +typedef enum ROTATE_IV_TYPE { + ROTATE_IV_ENCRYPT = 0, // Rotate the IV for encryptBundle + ROTATE_IV_DECRYPT = 1, // Rotate the IV for decryptBundle + ROTATE_IV_HMAC = 2, // Rotate the IV for hmacBundle + ROTATE_IV_ALL_VALID = 3, // Rotate the IV for all valid bundles in the KMB +} ROTATE_IV_TYPE; + +// Status value written into NvNotification.Info16 +typedef enum KEY_ROTATION_STATUS { + KEY_ROTATION_STATUS_IDLE = 0, // Key rotation complete/not in progress + KEY_ROTATION_STATUS_PENDING = 1, // RM is waiting for clients to report their channels are idle for key rotation + KEY_ROTATION_STATUS_IN_PROGRESS = 2, // Key rotation is in progress + KEY_ROTATION_STATUS_FAILED_TIMEOUT = 3, // Key rotation timeout failure, RM will RC non-idle channels + KEY_ROTATION_STATUS_FAILED_THRESHOLD = 4, // Key rotation failed because upper threshold was crossed, RM will RC non-idle channels + KEY_ROTATION_STATUS_FAILED_ROTATION = 5, // Internal RM failure while rotating keys for a certain channel, RM will RC the channel + KEY_ROTATION_STATUS_PENDING_TIMER_SUSPENDED = 6, // Key rotation timer suspended waiting for kernel key rotation to complete + KEY_ROTATION_STATUS_MAX_COUNT = 7, +} KEY_ROTATION_STATUS; + +typedef struct CC_AES_CRYPTOBUNDLE { + NvU32 iv[CC_AES_256_GCM_IV_SIZE_DWORD]; + NvU32 key[CC_AES_256_GCM_KEY_SIZE_DWORD]; + NvU32 ivMask[CC_AES_256_GCM_IV_SIZE_DWORD]; +} CC_AES_CRYPTOBUNDLE; +typedef struct CC_AES_CRYPTOBUNDLE *PCC_AES_CRYPTOBUNDLE; + +typedef struct CC_HMAC_CRYPTOBUNDLE { + NvU32 nonce[CC_HMAC_NONCE_SIZE_DWORD]; + NvU32 key[CC_HMAC_KEY_SIZE_DWORD]; +} CC_HMAC_CRYPTOBUNDLE; +typedef struct CC_HMAC_CRYPTOBUNDLE *PCC_HMAC_CRYPTOBUNDLE; + +typedef struct CC_KMB { + CC_AES_CRYPTOBUNDLE encryptBundle; // Bundle of encyption material + + union { + CC_HMAC_CRYPTOBUNDLE hmacBundle; // HMAC bundle used for method stream authenticity + CC_AES_CRYPTOBUNDLE decryptBundle; // Bundle of decryption material + }; + NvBool bIsWorkLaunch; // False if decryption parameters are valid +} CC_KMB; +typedef struct CC_KMB *PCC_KMB; + +typedef struct CC_CRYPTOBUNDLE_STATS { + NV_DECLARE_ALIGNED(NvU64 numEncryptionsH2D, 8); + NV_DECLARE_ALIGNED(NvU64 numEncryptionsD2H, 8); + NV_DECLARE_ALIGNED(NvU64 bytesEncryptedH2D, 8); + NV_DECLARE_ALIGNED(NvU64 bytesEncryptedD2H, 8); +} CC_CRYPTOBUNDLE_STATS; +typedef struct CC_CRYPTOBUNDLE_STATS *PCC_CRYPTOBUNDLE_STATS; + diff --git a/src/common/sdk/nvidia/inc/class/cl0000.h b/src/common/sdk/nvidia/inc/class/cl0000.h new file mode 100644 index 0000000..b56b157 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0000.h @@ -0,0 +1,53 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl0000.finn +// + +#include "nvlimits.h" +#include "cl0000_notification.h" + +/* object NV01_NULL_OBJECT */ +#define NV01_NULL_OBJECT (0x0) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */ + +/* obsolete alises */ +#define NV1_NULL_OBJECT NV01_NULL_OBJECT + +#define NV01_ROOT (0x0U) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */ + +/* NvAlloc parameteters */ +#define NV0000_ALLOC_PARAMETERS_MESSAGE_ID (0x0000U) + +typedef struct NV0000_ALLOC_PARAMETERS { + NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */ + NvU32 processID; + char processName[NV_PROC_NAME_MAX_LENGTH]; + NV_DECLARE_ALIGNED(NvP64 pOsPidInfo, 8); +} NV0000_ALLOC_PARAMETERS; + diff --git a/src/common/sdk/nvidia/inc/class/cl0000_notification.h b/src/common/sdk/nvidia/inc/class/cl0000_notification.h new file mode 100644 index 0000000..9c91de3 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0000_notification.h @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0000_notification_h_ +#define _cl0000_notification_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/*Status definitions for NV0000_NOTIFIERS_DISPLAY_CHANGE event*/ + +#define NV0000_NOTIFIERS_STATUS_ACPI_DISPLAY_DEVICE_CYCLE (0) + +//--------------------------------------------------------------------------- + +/* NvNotification[] fields and values */ +#define NV000_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) + + +/* pio method data structure */ +typedef volatile struct _cl0000_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv000Typedef, Nv01Root; + +/* obsolete aliases */ +#define NV000_TYPEDEF Nv01Root +#define Nv1Root Nv01Root +#define nv1Root Nv01Root +#define nv01Root Nv01Root + +/*event values*/ +#define NV0000_NOTIFIERS_ENABLE_CPU_UTIL_CTRL (1) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0000_notification_h_ */ + diff --git a/src/common/sdk/nvidia/inc/class/cl0001.h b/src/common/sdk/nvidia/inc/class/cl0001.h new file mode 100644 index 0000000..10a8af4 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0001.h @@ -0,0 +1,37 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0001_h_ +#define _cl0001_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV01_ROOT_NON_PRIV (0x00000001) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0001_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl0002.h b/src/common/sdk/nvidia/inc/class/cl0002.h new file mode 100644 index 0000000..dd1d6c6 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0002.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2001-2001, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0002_h_ +#define _cl0002_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV01_CONTEXT_DMA_FROM_MEMORY (0x00000002) +/* NvNotification[] fields and values */ +#define NV002_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) +/* pio method data structure */ +typedef volatile struct _cl0002_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv002Typedef, Nv01ContextDmaFromMemory; +#define NV002_TYPEDEF Nv01ContextDmaFromMemory +/* obsolete stuff */ +#define NV1_CONTEXT_DMA_FROM_MEMORY (0x00000002) +#define NV01_CONTEXT_DMA (0x00000002) +#define Nv1ContextDmaFromMemory Nv01ContextDmaFromMemory +#define nv1ContextDmaFromMemory Nv01ContextDmaFromMemory +#define nv01ContextDmaFromMemory Nv01ContextDmaFromMemory + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0002_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl0004.h b/src/common/sdk/nvidia/inc/class/cl0004.h new file mode 100644 index 0000000..32e25e7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0004.h @@ -0,0 +1,50 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0004_h_ +#define _cl0004_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV01_TIMER (0x00000004) +/* NvNotification[] elements */ +#define NV004_NOTIFIERS_SET_ALARM_NOTIFY (0) +#define NV004_NOTIFIERS_MAXCOUNT (1) + +/* mapped timer registers */ +typedef volatile struct _Nv01TimerMapTypedef { + NvU32 Reserved00[0x100]; + NvU32 PTimerTime0; /* 0x00009400 */ + NvU32 Reserved01[0x3]; + NvU32 PTimerTime1; /* 0x00009410 */ +} Nv01TimerMap; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0004_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl0005.h b/src/common/sdk/nvidia/inc/class/cl0005.h new file mode 100644 index 0000000..f3f5b53 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0005.h @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl0005.finn +// + +#include "cl0005_notification.h" + +#define NV01_EVENT (0x5U) /* finn: Evaluated from "NV0005_ALLOC_PARAMETERS_MESSAGE_ID" */ + +/* NvRmAlloc() parameters */ +#define NV0005_ALLOC_PARAMETERS_MESSAGE_ID (0x0005U) + +typedef struct NV0005_ALLOC_PARAMETERS { + NvHandle hParentClient; + NvHandle hSrcResource; + + NvV32 hClass; + NvV32 notifyIndex; + NV_DECLARE_ALIGNED(NvP64 data, 8); +} NV0005_ALLOC_PARAMETERS; + + +/* NV0005_ALLOC_PARAMETERS's notifyIndex field is overloaded to contain the + * notifyIndex value itself, plus flags, and optionally a subdevice field if + * flags contains NV01_EVENT_SUBDEVICE_SPECIFIC. Note that NV01_EVENT_* + * contain the full 32-bit flag value that is OR'd into notifyIndex, not the + * contents of the FLAGS field (i.e. NV01_EVENT_* are pre-shifted into place). + */ +#define NV0005_NOTIFY_INDEX_INDEX 15:0 +#define NV0005_NOTIFY_INDEX_SUBDEVICE 23:16 +#define NV0005_NOTIFY_INDEX_FLAGS 31:24 diff --git a/src/common/sdk/nvidia/inc/class/cl0005_notification.h b/src/common/sdk/nvidia/inc/class/cl0005_notification.h new file mode 100644 index 0000000..5e6b32e --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0005_notification.h @@ -0,0 +1,51 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0005_notification_h_ +#define _cl0005_notification_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* NvNotification[] fields and values */ +#define NV003_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) + +/* pio method data structure */ +typedef volatile struct _cl0005_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv005Typedef, Nv01Event; + +#define NV005_TYPEDEF Nv01Event + +/* obsolete stuff */ +#define NV1_TIMER (0x00000004) +#define Nv1Event Nv01Event +#define nv1Event Nv01Event +#define nv01Event Nv01Event + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0005_notification_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl0020.h b/src/common/sdk/nvidia/inc/class/cl0020.h new file mode 100644 index 0000000..87154a9 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0020.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0020_h_ +#define _cl0020_h_ + +#include "nvtypes.h" + +#define NV0020_GPU_MANAGEMENT (0x00000020) + +#endif /* _cl0020_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl003e.h b/src/common/sdk/nvidia/inc/class/cl003e.h new file mode 100644 index 0000000..d23e642 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl003e.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2001-2001, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl003e_h_ +#define _cl003e_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV01_CONTEXT_ERROR_TO_MEMORY (0x0000003E) +#define NV01_MEMORY_SYSTEM (0x0000003E) +/* NvNotification[] fields and values */ +#define NV03E_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) +/* pio method data structure */ +typedef volatile struct _cl003e_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv03eTypedef, Nv01ContextErrorToMemory; +#define NV03E_TYPEDEF Nv01ContextErrorToMemory +/* obsolete stuff */ +#define NV1_CONTEXT_ERROR_TO_MEMORY (0x0000003E) +#define Nv1ContextErrorToMemory Nv01ContextErrorToMemory +#define nv1ContextErrorToMemory Nv01ContextErrorToMemory +#define nv01ContextErrorToMemory Nv01ContextErrorToMemory + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl003e_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl0040.h b/src/common/sdk/nvidia/inc/class/cl0040.h new file mode 100644 index 0000000..103ec3a --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0040.h @@ -0,0 +1,55 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2001 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl0040_h_ +#define _cl0040_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV01_MEMORY_LOCAL_USER (0x00000040) +/* NvNotification[] fields and values */ +#define NV040_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) +/* pio method data structure */ +typedef volatile struct _cl0040_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv040Typedef, Nv01MemoryLocalUser; +#define NV040_TYPEDEF Nv01MemoryLocalUser +/* obsolete stuff */ +#define NV01_MEMORY_USER (0x00000040) +#define NV1_MEMORY_USER (0x00000040) +#define Nv01MemoryUser Nv01MemoryLocalUser +#define nv01MemoryUser Nv01MemoryLocalUser +#define Nv1MemoryUser Nv01MemoryLocalUser +#define nv1MemoryUser Nv01MemoryLocalUser +#define nv01MemoryLocalUser Nv01MemoryLocalUser + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0040_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl0041.h b/src/common/sdk/nvidia/inc/class/cl0041.h new file mode 100644 index 0000000..fa8707a --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0041.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2001-2005, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0041_h_ +#define _cl0041_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV04_MEMORY (0x00000041) +/* NvNotification[] fields and values */ +#define NV041_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) +/* pio method data structure */ +typedef volatile struct _cl0041_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv041Typedef, Nv04Memory; +#define NV041_TYPEDEF Nv04Memory; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0041_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl0070.h b/src/common/sdk/nvidia/inc/class/cl0070.h new file mode 100644 index 0000000..95d173e --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0070.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2001-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl0070.finn +// + +#define NV01_MEMORY_VIRTUAL (0x70U) /* finn: Evaluated from "NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS_MESSAGE_ID" */ +#define NV01_MEMORY_SYSTEM_DYNAMIC (0x70U) /* finn: Evaluated from "NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS_MESSAGE_ID" */ + +/* + * NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS + * + * Allocation params for NV01_MEMORY_VIRTUAL. + * + * NV01_MEMORY_SYSTEM_DYNAMIC is an alias for NV01_MEMORY_VIRTUAL. This + * was traditionally allocated with RmAllocMemory64(). The default GPU + * virtual address space is used, and the limit of this address space is + * returned in limit. The NV01_MEMORY_SYSTEM_DYNAMIC handle can be + * passed to RmAllocContextDma2() with an offset/limit. The context dma + * handle can then be used as the hDma handle for RmMapMemoryDma. + * + * This behavior is maintained in the RM compatibility shim. + * + * NV01_MEMORY_VIRTUAL replaces this behavior with a single object. + * + * hVASpace - if hVASpace is NV01_NULL_OBJECT the default GPU VA space is + * selected. Alternatively a FERMI_VASPACE_A handle may be specified. + * + * The NV_MEMORY_VIRTUAL_SYSMEM_DYNAMIC_HVASPACE is used for by the + * compatibility layer to emulate NV01_MEMORY_SYSTEM_DYNAMIC semantics. + * + * offset - An offset into the virtual address space may be specified. This + * will limit range of the GPU VA returned by RmMapMemoryDma to be + * above offset. + * + * limit - When limit is zero the maximum limit used. If a non-zero limit + * is specified then it will be used. The final limit is returned. + */ +#define NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS_MESSAGE_ID (0x0070U) + +typedef struct NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS { + NV_DECLARE_ALIGNED(NvU64 offset, 8); // [IN] - offset into address space + NV_DECLARE_ALIGNED(NvU64 limit, 8); // [IN/OUT] - limit of address space + NvHandle hVASpace; // [IN] - Address space handle +} NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS; + +#define NV_MEMORY_VIRTUAL_SYSMEM_DYNAMIC_HVASPACE 0xffffffffU + diff --git a/src/common/sdk/nvidia/inc/class/cl0071.h b/src/common/sdk/nvidia/inc/class/cl0071.h new file mode 100644 index 0000000..be106d3 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0071.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2001-2001, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0071_h_ +#define _cl0071_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV01_MEMORY_SYSTEM_OS_DESCRIPTOR (0x00000071) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0071_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl0073.h b/src/common/sdk/nvidia/inc/class/cl0073.h new file mode 100644 index 0000000..d6a7ec2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0073.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2001-2021, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0073_h_ +#define _cl0073_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV04_DISPLAY_COMMON (0x00000073) + +/* event values */ +#define NV0073_NOTIFIERS_SW (0) +#define NV0073_NOTIFIERS_LTM_CALC_TIMEOUT (5) +#define NV0073_NOTIFIERS_MAXCOUNT (6) + + +#define NV0073_NOTIFICATION_STATUS_IN_PROGRESS (0x8000) +#define NV0073_NOTIFICATION_STATUS_BAD_ARGUMENT (0x4000) +#define NV0073_NOTIFICATION_STATUS_ERROR_INVALID_STATE (0x2000) +#define NV0073_NOTIFICATION_STATUS_ERROR_STATE_IN_USE (0x1000) +#define NV0073_NOTIFICATION_STATUS_DONE_SUCCESS (0x0000) + +/* pio method data structure */ +typedef volatile struct _cl0073_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv073Typedef, Nv04DisplayCommon; +#define NV073_TYPEDEF Nv04DisplayCommon + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0073_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl0076.h b/src/common/sdk/nvidia/inc/class/cl0076.h new file mode 100644 index 0000000..50bd0a9 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0076.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0076_h_ +#define _cl0076_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV01_MEMORY_FRAMEBUFFER_CONSOLE (0x00000076) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0076_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl0080.h b/src/common/sdk/nvidia/inc/class/cl0080.h new file mode 100644 index 0000000..9e9aad2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0080.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl0080.finn +// + +#include "nvlimits.h" +#include "cl0080_notification.h" + +#define NV01_DEVICE_0 (0x80U) /* finn: Evaluated from "NV0080_ALLOC_PARAMETERS_MESSAGE_ID" */ + +/* NvAlloc parameteters */ +#define NV0080_MAX_DEVICES NV_MAX_DEVICES + +/** + * @brief Alloc param + * + * @param vaMode mode for virtual address space allocation + * Three modes: + * NV_DEVICE_ALLOCATION_VAMODE_OPTIONAL_MULTIPLE_VASPACES + * NV_DEVICE_ALLOCATION_VAMODE_SINGLE_VASPACE + * NV_DEVICE_ALLOCATION_VAMODE_MULTIPLE_VASPACES + * Detailed description of these modes is in nvos.h + **/ + +#define NV0080_ALLOC_PARAMETERS_MESSAGE_ID (0x0080U) + +typedef struct NV0080_ALLOC_PARAMETERS { + NvU32 deviceId; + NvHandle hClientShare; + NvHandle hTargetClient; + NvHandle hTargetDevice; + NvV32 flags; + NV_DECLARE_ALIGNED(NvU64 vaSpaceSize, 8); + NV_DECLARE_ALIGNED(NvU64 vaStartInternal, 8); + NV_DECLARE_ALIGNED(NvU64 vaLimitInternal, 8); + NvV32 vaMode; +} NV0080_ALLOC_PARAMETERS; diff --git a/src/common/sdk/nvidia/inc/class/cl0080_notification.h b/src/common/sdk/nvidia/inc/class/cl0080_notification.h new file mode 100644 index 0000000..a976d3f --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0080_notification.h @@ -0,0 +1,45 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0080_notification_h_ +#define _cl0080_notification_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* NvNotification[] fields and values */ +#define NV080_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) + +/* pio method data structure */ +typedef volatile struct _cl0080_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv080Typedef, Nv01Device0; + +#define NV080_TYPEDEF Nv01Device0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0080_notification_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl008f.h b/src/common/sdk/nvidia/inc/class/cl008f.h new file mode 100644 index 0000000..63f59e8 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl008f.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl008f.finn +// + +#define KERNEL_WATCHDOG 0x008FU + diff --git a/src/common/sdk/nvidia/inc/class/cl0092.h b/src/common/sdk/nvidia/inc/class/cl0092.h new file mode 100644 index 0000000..3008277 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0092.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl0092.finn +// + +#include "class/cl0092_callback.h" + +/* + * This RgLineCallback class allows RM clients to register/unregister the RG line callback functions. + * + * Must be allocated with kernel access rights. + * + * Allocation params: + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the NV04_DISPLAY_COMMON parent device to which the + * operation should be directed. + * head + * This parameter specifies the head for which the callback is to be egistered/unregistered. This value must be + * less than the maximum number of heads supported by the GPU subdevice. + * rgLineNum + * This indicates the RG scanout line number on which the callback will be executed. + * 1/ Client should set the proper RG line number based on mode in which the display head is running and + * subsequent possible modeset that may affect the line number. + * 2/ Client is expected to clear/set the interrupts around modesets or power-transitions (like s3/hibernation). + * 3/ Client should make sure that this param does not exceed the raster settings. + * pCallbkFn + * Pointer to callback function. Cannot be NULL. + * pCallbkParams + * Pointer to the ctrl call param struct. + */ + +#define NV0092_RG_LINE_CALLBACK (0x92U) /* finn: Evaluated from "NV0092_RG_LINE_CALLBACK_ALLOCATION_PARAMETERS_MESSAGE_ID" */ + +#define NV0092_RG_LINE_CALLBACK_ALLOCATION_PARAMETERS_MESSAGE_ID (0x0092U) + +typedef struct NV0092_RG_LINE_CALLBACK_ALLOCATION_PARAMETERS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 rgLineNum; + + NV_DECLARE_ALIGNED(NvP64 pCallbkFn, 8); /* A function pointer of NV0092_REGISTER_RG_LINE_CALLBACK_FN */ + + NV_DECLARE_ALIGNED(NvP64 pCallbkParams, 8); /* The param1 in NV0092_REGISTER_RG_LINE_CALLBACK_FN */ +} NV0092_RG_LINE_CALLBACK_ALLOCATION_PARAMETERS; + diff --git a/src/common/sdk/nvidia/inc/class/cl0092_callback.h b/src/common/sdk/nvidia/inc/class/cl0092_callback.h new file mode 100644 index 0000000..4cd86ad --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0092_callback.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef SDK_CL0092_CALLBACK_H +#define SDK_CL0092_CALLBACK_H + +typedef void (*NV0092_REGISTER_RG_LINE_CALLBACK_FN)(NvU32 rgIntrLine, NvP64 param1, NvBool bIsIrqlIsr); + +#endif // SDK_CL0092_CALLBACK_H diff --git a/src/common/sdk/nvidia/inc/class/cl00b1.h b/src/common/sdk/nvidia/inc/class/cl00b1.h new file mode 100644 index 0000000..bc6e69f --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl00b1.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _CL00B1_H_ +#define _CL00B1_H_ + +#define NV01_MEMORY_HW_RESOURCES 0x00b1 + +#endif // _CL00B1_H_ diff --git a/src/common/sdk/nvidia/inc/class/cl00c1.h b/src/common/sdk/nvidia/inc/class/cl00c1.h new file mode 100644 index 0000000..d95e4fd --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl00c1.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl00c1.finn +// + +#include "nvlimits.h" +#define NV_FB_SEGMENT (0xc1U) /* finn: Evaluated from "NV_FB_SEGMENT_ALLOCATION_PARAMS_MESSAGE_ID" */ + +/* + * NV_FB_SEGMENT_ALLOCATION_PARAMS - Allocation params to create FB segment through + * NvRmAlloc. + */ +#define NV_FB_SEGMENT_ALLOCATION_PARAMS_MESSAGE_ID (0x00C1U) + +typedef struct NV_FB_SEGMENT_ALLOCATION_PARAMS { + NvHandle hCtxDma; // unused + NvU32 subDeviceIDMask; + NV_DECLARE_ALIGNED(NvU64 dmaOffset, 8); // unused + NV_DECLARE_ALIGNED(NvU64 VidOffset, 8); + NV_DECLARE_ALIGNED(NvU64 Offset, 8); // To be deprecated + NV_DECLARE_ALIGNED(NvU64 pOffset[NV_MAX_SUBDEVICES], 8); + NV_DECLARE_ALIGNED(NvU64 Length, 8); + NV_DECLARE_ALIGNED(NvU64 ValidLength, 8); + NV_DECLARE_ALIGNED(NvP64 pPageArray, 8); + NvU32 startPageIndex; + NvHandle AllocHintHandle; + NvU32 Flags; + NvHandle hMemory; // Not used in NvRmAlloc path; only used in CTRL path + NvHandle hClient; // Not used in NvRmAlloc path; only used in CTRL path + NvHandle hDevice; // Not used in NvRmAlloc path; only used in CTRL path + NV_DECLARE_ALIGNED(NvP64 pCpuAddress, 8); // To be deprecated + NV_DECLARE_ALIGNED(NvP64 ppCpuAddress[NV_MAX_SUBDEVICES], 8); + NV_DECLARE_ALIGNED(NvU64 GpuAddress, 8); // To be deprecated + NV_DECLARE_ALIGNED(NvU64 pGpuAddress[NV_MAX_SUBDEVICES], 8); + NvHandle hAllocHintClient; + NvU32 kind; + NvU32 compTag; +} NV_FB_SEGMENT_ALLOCATION_PARAMS; + diff --git a/src/common/sdk/nvidia/inc/class/cl00c3.h b/src/common/sdk/nvidia/inc/class/cl00c3.h new file mode 100644 index 0000000..eb9e1cb --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl00c3.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl00c3.finn +// + +#define NV01_MEMORY_SYNCPOINT (0xc3U) /* finn: Evaluated from "NV_MEMORY_SYNCPOINT_ALLOCATION_PARAMS_MESSAGE_ID" */ + +/* + * NV_MEMORY_SYNCPOINT_ALLOCATION_PARAMS - Allocation params to create syncpoint + * through NvRmAlloc. + */ +#define NV_MEMORY_SYNCPOINT_ALLOCATION_PARAMS_MESSAGE_ID (0x00c3U) + +typedef struct NV_MEMORY_SYNCPOINT_ALLOCATION_PARAMS { + NvU32 syncpointId; +} NV_MEMORY_SYNCPOINT_ALLOCATION_PARAMS; + diff --git a/src/common/sdk/nvidia/inc/class/cl00da.h b/src/common/sdk/nvidia/inc/class/cl00da.h new file mode 100644 index 0000000..871f913 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl00da.h @@ -0,0 +1,85 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl00da.finn +// + + + +/* +* NV_SEMAPHORE_SURFACE +* A semaphore surface object contains a GPU-and-CPU-accessible memory region +* containing semaphores, associated monitored fence values, and any other +* related data necessary to implement a circular 32-bit or monotonic 64-bit +* incrementing semaphore primitive and associated event delivery. +*/ +#define NV_SEMAPHORE_SURFACE (0xdaU) /* finn: Evaluated from "NV_SEMAPHORE_SURFACE_ALLOC_PARAMETERS_MESSAGE_ID" */ + + /* + * NV_SEMAPHORE_SURFACE_WAIT_VALUE + * The notification index used when registering events with the RM event + * subsystem. RM clients should not need to use this value, as they don't + * allocate events against this class themselves. Rather, they specify an + * OS event when registering a CPU waiter, and semaphore surface takes care + * of creating event objects internally as necessary, similar to the event + * buffer record notification OS event mechanism. + */ +#define NV_SEMAPHORE_SURFACE_WAIT_VALUE (0x00000000) + +/* +* NV_SEMAPHORE_SURFACE_ALLOC_PARAMETERS +* +* hSemaphoreMem [IN] +* The handle to the memory used for the semaphore value and, when +* supported, the monitored fence/conditional interrupt value. Must be +* accessible by the GPU corresponding to the parent of the semaphore +* surface, as well as the CPU. +* +* hMaxSubmittedMem [IN] +* The handle to the memory used to track the maximum signal value +* submitted to the GPU for processing for a given semaphore slot in +* hSemaphoreMem. This memory is only accessed by the CPU, but must +* support a CPU mapping that allows the use of 64-bit atomic exchange +* operations. This may be the same memory object as hSemaphoreMem if it is +* possible to create one memory object that supports all the requirements +* for a given GPU and CPU. This handle may be omitted/set to zero on GPUs +* that do not require maximum submitted value tracking. +* +* flags [IN] +* Flags affecting the semaphore surface allocation. Currently, there are +* not valid flags, so this value must be set to zero. +*/ +#define NV_SEMAPHORE_SURFACE_ALLOC_PARAMETERS_MESSAGE_ID (0x00DAU) + +typedef struct NV_SEMAPHORE_SURFACE_ALLOC_PARAMETERS { + NvHandle hSemaphoreMem; + NvHandle hMaxSubmittedMem; + NV_DECLARE_ALIGNED(NvU64 flags, 8); +} NV_SEMAPHORE_SURFACE_ALLOC_PARAMETERS; + diff --git a/src/common/sdk/nvidia/inc/class/cl00de.h b/src/common/sdk/nvidia/inc/class/cl00de.h new file mode 100644 index 0000000..80affb1 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl00de.h @@ -0,0 +1,436 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl00de_h_ +#define _cl00de_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvmisc.h" +#include "nvfixedtypes.h" +#include "ctrl/ctrl2080/ctrl2080ecc.h" + +#define RM_USER_SHARED_DATA (0x000000de) + +#define RUSD_TIMESTAMP_WRITE_IN_PROGRESS (NV_U64_MAX) +#define RUSD_TIMESTAMP_INVALID 0 + +#define RUSD_SEQ_START (0xFF00000000000000LLU) + +#define RUSD_SEQ_DATA_VALID(x) \ + ((((x) < RUSD_SEQ_START) && ((x) != RUSD_TIMESTAMP_INVALID)) || \ + (((x) >= RUSD_SEQ_START) && (((x) & 0x1LLU) == 0x0LLU))) + +// +// Helper macros to check seq before reading RUSD. +// No dowhile wrap as it is using continue/break +// +#define RUSD_SEQ_CHECK1(dataField) \ + NvU64 RUSD_SEQ = (dataField)->lastModifiedTimestamp; \ + portAtomicMemoryFenceLoad(); \ + if (!RUSD_SEQ_DATA_VALID(RUSD_SEQ)) \ + continue; + +// Clear lastModifiedTimestamp on failure in case of reaching loop limit +#define RUSD_SEQ_CHECK2(dataField) \ + portAtomicMemoryFenceLoad(); \ + if (RUSD_SEQ == (dataField)->lastModifiedTimestamp) \ + break; \ + +// +// Read RUSD data field `dataField` from NV00DE_SHARED_DATA struct `pSharedData` into destination pointer `pDst` +// `pDst` should be the data struct type matching `dataField` +// Check (pDst)->lastModifiedTimestamp using RUSD_IS_DATA_STALE to verify data validity. +// +#define RUSD_READ_DATA(pSharedData,dataField,pDst) \ +do { \ + portMemSet((pDst), 0, sizeof(*pDst)); \ + for (NvU32 RUSD_READ_DATA_ATTEMPTS = 0; RUSD_READ_DATA_ATTEMPTS < 10; ++RUSD_READ_DATA_ATTEMPTS) \ + { \ + RUSD_SEQ_CHECK1(&((pSharedData)->dataField)); \ + portMemCopy((pDst), sizeof(*pDst), &((pSharedData)->dataField), sizeof(*pDst)); \ + RUSD_SEQ_CHECK2(&((pSharedData)->dataField)); \ + (pDst)->lastModifiedTimestamp = RUSD_TIMESTAMP_INVALID; \ + } \ +} while(0); + +// +// Check if RUSD data timestamp is stale. +// For polled data, returns true if data is older than `staleThreshold` +// For non-polled data, returns true if data was successfully read +// +#define RUSD_IS_DATA_STALE(timestamp,currentTime,staleThreshold) \ + ((((timestamp) < (RUSD_SEQ_START)) && /* Polled Data */ \ + (((timestamp) == (RUSD_TIMESTAMP_INVALID)) || /* Invalid */ \ + (((currentTime) - (timestamp)) > (staleThreshold)))) || \ + (((timestamp) >= (RUSD_SEQ_START)) && /* Non-Polled Data */ \ + (((timestamp) & (0x1LLU)) == 1LLU))) + +enum { + RUSD_CLK_THROTTLE_REASON_GPU_IDLE = NVBIT(0), + RUSD_CLK_THROTTLE_REASON_APPLICATION_CLOCK_SETTING = NVBIT(1), + RUSD_CLK_THROTTLE_REASON_SW_POWER_CAP = NVBIT(2), + RUSD_CLK_THROTTLE_REASON_HW_SLOWDOWN = NVBIT(3), + RUSD_CLK_THROTTLE_REASON_SYNC_BOOST = NVBIT(4), + RUSD_CLK_THROTTLE_REASON_SW_THERMAL_SLOWDOWN = NVBIT(5), + RUSD_CLK_THROTTLE_REASON_HW_THERMAL_SLOWDOWN = NVBIT(6), + RUSD_CLK_THROTTLE_REASON_HW_POWER_BRAKES_SLOWDOWN = NVBIT(7), + RUSD_CLK_THROTTLE_REASON_DISPLAY_CLOCK_SETTING = NVBIT(8), +}; + +typedef struct RUSD_BAR1_MEMORY_INFO { + volatile NvU64 lastModifiedTimestamp; + // + // Non-polled data, not tied to any specific RM API + // Total size and available memory in Bar1 + // + NvU32 bar1Size; + NvU32 bar1AvailSize; +} RUSD_BAR1_MEMORY_INFO; + +typedef struct RUSD_PMA_MEMORY_INFO { + volatile NvU64 lastModifiedTimestamp; + // + // Non-polled data, not tied to any specific RM API + // Total size and available memory in PMA + // + NvU64 totalPmaMemory; + NvU64 freePmaMemory; +} RUSD_PMA_MEMORY_INFO; + +enum { + RUSD_CLK_PUBLIC_DOMAIN_GRAPHICS = 0, + RUSD_CLK_PUBLIC_DOMAIN_MEMORY, + RUSD_CLK_PUBLIC_DOMAIN_VIDEO, + + // Put at the end. See bug 1000230 NVML doesn't report SM frequency on Kepler + RUSD_CLK_PUBLIC_DOMAIN_SM, + RUSD_CLK_PUBLIC_DOMAIN_MAX_TYPE, +}; + +typedef struct RUSD_CLK_PUBLIC_DOMAIN_INFO { + NvU32 targetClkMHz; +} RUSD_CLK_PUBLIC_DOMAIN_INFO; + +typedef struct RUSD_CLK_PUBLIC_DOMAIN_INFOS { + volatile NvU64 lastModifiedTimestamp; + RUSD_CLK_PUBLIC_DOMAIN_INFO info[RUSD_CLK_PUBLIC_DOMAIN_MAX_TYPE]; +} RUSD_CLK_PUBLIC_DOMAIN_INFOS; + +typedef struct RUSD_ENG_UTILIZATION { + NvU32 clkPercentBusy; + NvU32 samplingPeriodUs; +} RUSD_ENG_UTILIZATION; + +#define RUSD_ENG_UTILIZATION_VID_ENG_NVENC 0 +#define RUSD_ENG_UTILIZATION_VID_ENG_NVDEC 1 +#define RUSD_ENG_UTILIZATION_VID_ENG_NVJPG 2 +#define RUSD_ENG_UTILIZATION_VID_ENG_NVOFA 3 +#define RUSD_ENG_UTILIZATION_COUNT 4 + +typedef struct RUSD_PERF_DEVICE_UTILIZATION_INFO { + NvU32 gpuPercentBusy; + NvU32 memoryPercentBusy; + RUSD_ENG_UTILIZATION engUtil[RUSD_ENG_UTILIZATION_COUNT]; +} RUSD_PERF_DEVICE_UTILIZATION_INFO; + +typedef struct RUSD_PERF_DEVICE_UTILIZATION { + volatile NvU64 lastModifiedTimestamp; + RUSD_PERF_DEVICE_UTILIZATION_INFO info; +} RUSD_PERF_DEVICE_UTILIZATION; + +typedef struct RUSD_PERF_CURRENT_PSTATE { + volatile NvU64 lastModifiedTimestamp; + // Provided from NV2080_CTRL_CMD_PERF_GET_CURRENT_PSTATE + NvU32 currentPstate; +} RUSD_PERF_CURRENT_PSTATE; + +#define RUSD_CLK_VIOLATION_NUM 32 + +#define RUSD_PERF_POINT_MAX_CLOCK 0 +#define RUSD_PERF_POINT_TURBO_BOOST 1 +#define RUSD_PERF_POINT_3D_BOOST 2 +#define RUSD_PERF_POINT_RATED_TDP 3 +#define RUSD_PERF_POINT_MAX_CUSTOMER_BOOST 4 +#define RUSD_PERF_POINT_DISPLAY_CLOCK_INTERSECT 5 +#define RUSD_PERF_POINT_NUM 6 + +typedef struct RUSD_CLK_VIOLATION_STATUS { + NvU32 perfPointMask; + NvU64 timeNs[RUSD_PERF_POINT_NUM]; +} RUSD_CLK_VIOLATION_STATUS; + +typedef struct RUSD_CLK_THROTTLE_INFO { + volatile NvU64 lastModifiedTimestamp; + NvU32 reasonMask; // Bitmask of RUSD_CLK_THROTTLE_REASON_* + + NvU64 referenceTimeNs; + NvU32 supportedViolationTimeMask; + RUSD_CLK_VIOLATION_STATUS violation[RUSD_CLK_VIOLATION_NUM]; + RUSD_CLK_VIOLATION_STATUS globalViolation; +} RUSD_CLK_THROTTLE_INFO; + +typedef struct RUSD_CLK_THROTTLE_INFO RUSD_CLK_THROTTLE_REASON; + +typedef struct RUSD_MEM_ERROR_COUNTS { + NvU64 correctedVolatile; + NvU64 correctedAggregate; + NvU64 uncorrectedVolatile; + NvU64 uncorrectedAggregate; +} RUSD_MEM_ERROR_COUNTS; + +#define RUSD_MEMORY_ERROR_TYPE_TOTAL 0 +#define RUSD_MEMORY_ERROR_TYPE_DRAM 1 +#define RUSD_MEMORY_ERROR_TYPE_SRAM 2 +#define RUSD_MEMORY_ERROR_TYPE_COUNT 3 + +typedef struct RUSD_ECC_COUNTS { + NV2080_CTRL_ECC_GET_VOLATILE_COUNTS_PARAMS volatileCounts; + NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS clientExposedCounts; +} RUSD_ECC_INFO; + +typedef struct RUSD_MEM_ECC { + volatile NvU64 lastModifiedTimestamp; + RUSD_MEM_ERROR_COUNTS count[RUSD_MEMORY_ERROR_TYPE_COUNT]; + // TODO: Due to incapability of getting voteup for X driver update with + // existing RM APIs, need to resolve bug 5138911 before fully updating + // RUSD_MEM_ECC and removing deprecated RUSD_MEM_ERROR_COUNTS. + RUSD_ECC_INFO info; +} RUSD_MEM_ECC; + +typedef struct RUSD_POWER_LIMIT_INFO { + NvU32 requestedmW; + NvU32 enforcedmW; +} RUSD_POWER_LIMIT_INFO; + +typedef struct RUSD_POWER_LIMITS { + volatile NvU64 lastModifiedTimestamp; + RUSD_POWER_LIMIT_INFO info; +} RUSD_POWER_LIMITS; + +typedef enum RUSD_TEMPERATURE_SENSOR { + RUSD_TEMPERATURE_SENSOR_GPU, + RUSD_TEMPERATURE_SENSOR_MEMORY, + RUSD_TEMPERATURE_SENSOR_BOARD, + RUSD_TEMPERATURE_SENSOR_POWER_SUPPLY, + + // Should always be last entry + RUSD_TEMPERATURE_SENSOR_MAX +} RUSD_TEMPERATURE_SENSOR; + +typedef enum RUSD_TEMPERATURE_TYPE { + RUSD_TEMPERATURE_TYPE_GPU, + RUSD_TEMPERATURE_TYPE_MEMORY, + RUSD_TEMPERATURE_TYPE_BOARD, + RUSD_TEMPERATURE_TYPE_POWER_SUPPLY, + RUSD_TEMPERATURE_TYPE_HBM, + RUSD_TEMPERATURE_TYPE_MAX +} RUSD_TEMPERATURE_TYPE; + +typedef struct RUSD_TEMPERATURE { + volatile NvU64 lastModifiedTimestamp; + NvTemp temperature; +} RUSD_TEMPERATURE; + +typedef struct RUSD_MEM_ROW_REMAP_INFO { + // Provided from NV2080_CTRL_CMD_FB_GET_ROW_REMAPPER_HISTOGRAM + NvU32 histogramMax; // No remapped row is used. + NvU32 histogramHigh; // One remapped row is used. + NvU32 histogramPartial; // More than one remapped rows are used. + NvU32 histogramLow; // One remapped row is available. + NvU32 histogramNone; // All remapped rows are used. + + NvU32 correctableRows; + NvU32 uncorrectableRows; + NvBool isPending; + NvBool hasFailureOccurred; +} RUSD_MEM_ROW_REMAP_INFO; + +typedef struct RUSD_MEM_ROW_REMAP { + volatile NvU64 lastModifiedTimestamp; + RUSD_MEM_ROW_REMAP_INFO info; +} RUSD_MEM_ROW_REMAP; + +typedef struct RUSD_AVG_POWER_INFO { + NvU32 averageGpuPower; // mW + NvU32 averageModulePower; // mW + NvU32 averageMemoryPower; // mW +} RUSD_AVG_POWER_INFO; + +typedef struct RUSD_AVG_POWER_USAGE { + volatile NvU64 lastModifiedTimestamp; + RUSD_AVG_POWER_INFO info; +} RUSD_AVG_POWER_USAGE; + +typedef struct RUSD_INST_POWER_INFO { + NvU32 instGpuPower; // mW + NvU32 instModulePower; // mW + NvU32 instCpuPower; // mW +} RUSD_INST_POWER_INFO; + +typedef struct RUSD_INST_POWER_USAGE { + volatile NvU64 lastModifiedTimestamp; + RUSD_INST_POWER_INFO info; +} RUSD_INST_POWER_USAGE; + +typedef struct RUSD_POWER_POLICY_STATUS_INFO { + NvU32 tgpmW; // Total GPU power in mW +} RUSD_POWER_POLICY_STATUS_INFO; + +typedef struct RUSD_POWER_POLICY_STATUS { + volatile NvU64 lastModifiedTimestamp; + RUSD_POWER_POLICY_STATUS_INFO info; +} RUSD_POWER_POLICY_STATUS; + +#define RUSD_FAN_COOLER_MAX_COOLERS 16U + +typedef struct RUSD_FAN_COOLER_INFO { + NvU32 rpmCurr[RUSD_FAN_COOLER_MAX_COOLERS]; +} RUSD_FAN_COOLER_INFO; + +typedef struct RUSD_FAN_COOLER_STATUS { + volatile NvU64 lastModifiedTimestamp; + RUSD_FAN_COOLER_INFO info; +} RUSD_FAN_COOLER_STATUS; + +typedef struct RUSD_SHADOW_ERR_CONT { + volatile NvU64 lastModifiedTimestamp; + // + // Non-polled data, not tied to any specific RM API + // Shadowed ERR_CONT register value + // + NvU32 shadowErrContVal; +} RUSD_SHADOW_ERR_CONT; + +// Each RUSD_BUS_DATA_* define corresponds to the equivalent NV2080_CTRL_BUS_INFO_INDEX_* +#define RUSD_BUS_DATA_PCIE_GEN_INFO 0 +#define RUSD_BUS_DATA_PCIE_GPU_LINK_LINECODE_ERRORS 1 +#define RUSD_BUS_DATA_PCIE_GPU_LINK_CRC_ERRORS 2 +#define RUSD_BUS_DATA_PCIE_GPU_LINK_NAKS_RECEIVED 3 +#define RUSD_BUS_DATA_PCIE_GPU_LINK_FAILED_L0S_EXITS 4 +#define RUSD_BUS_DATA_PCIE_GPU_LINK_CORRECTABLE_ERRORS 5 +#define RUSD_BUS_DATA_PCIE_GPU_LINK_NONFATAL_ERRORS 6 +#define RUSD_BUS_DATA_PCIE_GPU_LINK_FATAL_ERRORS 7 +#define RUSD_BUS_DATA_PCIE_GPU_LINK_UNSUPPORTED_REQUESTS 8 +#define RUSD_BUS_DATA_COUNT 9 + +typedef struct RUSD_PCIE_DATA_INFO { + // Provided from NV2080_CTRL_CMD_BUS_GET_INFO_V2 + NvU32 data[RUSD_BUS_DATA_COUNT]; +} RUSD_PCIE_DATA_INFO; + +typedef struct RUSD_PCIE_DATA { + volatile NvU64 lastModifiedTimestamp; + RUSD_PCIE_DATA_INFO info; +} RUSD_PCIE_DATA; + +typedef struct RUSD_GR_INFO +{ + volatile NvU64 lastModifiedTimestamp; + NvBool bCtxswLoggingEnabled; +} RUSD_GR_INFO; + +typedef struct NV00DE_SHARED_DATA { + NV_DECLARE_ALIGNED(RUSD_BAR1_MEMORY_INFO bar1MemoryInfo, 8); + + NV_DECLARE_ALIGNED(RUSD_PMA_MEMORY_INFO pmaMemoryInfo, 8); + + NV_DECLARE_ALIGNED(RUSD_SHADOW_ERR_CONT shadowErrCont, 8); + + NV_DECLARE_ALIGNED(RUSD_GR_INFO grInfo, 8); + + // gpuUpdateUserSharedData is sensitive to these two sections being contiguous + + // + // Polled data section + // All data structs are a volatile NvU64 timestamp followed by data contents. + // Access by reading timestamp, then copying the struct contents, then reading the timestamp again. + // If time0 matches time1, data has not changed during the read, and contents are valid. + // If timestamp is RUSD_TIMESTAMP_WRITE_IN_PROGRESS, data was edited during the read, retry. + // If timestamp is RUSD_TIMESTAMP_INVALID, data is not available or not supported on this platform. + // + + // POLL_CLOCK + NV_DECLARE_ALIGNED(RUSD_CLK_PUBLIC_DOMAIN_INFOS clkPublicDomainInfos, 8); + + // POLL_PERF + NV_DECLARE_ALIGNED(RUSD_CLK_THROTTLE_INFO clkThrottleInfo, 8); + + // POLL_PERF + NV_DECLARE_ALIGNED(RUSD_PERF_DEVICE_UTILIZATION perfDevUtil, 8); + + // POLL_MEMORY + NV_DECLARE_ALIGNED(RUSD_MEM_ECC memEcc, 8); + + // POLL_PERF + NV_DECLARE_ALIGNED(RUSD_PERF_CURRENT_PSTATE perfCurrentPstate, 8); + + // POLL_POWER + // Module Limit is not supported on Ampere/Hopper + NV_DECLARE_ALIGNED(RUSD_POWER_LIMITS powerLimitGpu, 8); + + // POLL_THERMAL + NV_DECLARE_ALIGNED(RUSD_TEMPERATURE temperatures[RUSD_TEMPERATURE_TYPE_MAX], 8); + + // POLL_MEMORY + NV_DECLARE_ALIGNED(RUSD_MEM_ROW_REMAP memRowRemap, 8); + + // POLL_POWER + NV_DECLARE_ALIGNED(RUSD_AVG_POWER_USAGE avgPowerUsage, 8); + + // POLL_POWER + NV_DECLARE_ALIGNED(RUSD_INST_POWER_USAGE instPowerUsage, 8); + + // POLL_POWER + NV_DECLARE_ALIGNED(RUSD_POWER_POLICY_STATUS powerPolicyStatus, 8); + + // POLL_PCI + NV_DECLARE_ALIGNED(RUSD_PCIE_DATA pciBusData, 8); + + // POLL_FAN + NV_DECLARE_ALIGNED(RUSD_FAN_COOLER_STATUS fanCoolerStatus, 8); +} NV00DE_SHARED_DATA; + +// +// Polling mask bits, pass into ALLOC_PARAMETERS or NV00DE_CTRL_REQEUSET_DATA_POLL +// to request above polled data to be provided +// +#define NV00DE_RUSD_POLL_CLOCK 0x1 +#define NV00DE_RUSD_POLL_PERF 0x2 +#define NV00DE_RUSD_POLL_MEMORY 0x4 +#define NV00DE_RUSD_POLL_POWER 0x8 +#define NV00DE_RUSD_POLL_THERMAL 0x10 +#define NV00DE_RUSD_POLL_PCI 0x20 +#define NV00DE_RUSD_POLL_FAN 0x40 + +typedef struct NV00DE_ALLOC_PARAMETERS { + NvU64 polledDataMask; // Bitmask of data to request polling at alloc time, 0 if not needed +} NV00DE_ALLOC_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl00de_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl00f2.h b/src/common/sdk/nvidia/inc/class/cl00f2.h new file mode 100644 index 0000000..13b0624 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl00f2.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvtypes.h" + +#ifndef _cl00f2_h_ +#define _cl00f2_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define IO_VASPACE_A (0x000000f2) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl00f2_h + diff --git a/src/common/sdk/nvidia/inc/class/cl00fc.h b/src/common/sdk/nvidia/inc/class/cl00fc.h new file mode 100644 index 0000000..aa50ead --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl00fc.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvtypes.h" + +#ifndef _cl00fc_h_ +#define _cl00fc_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define FABRIC_VASPACE_A (0x000000fc) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl00fc_h + diff --git a/src/common/sdk/nvidia/inc/class/cl00fe.h b/src/common/sdk/nvidia/inc/class/cl00fe.h new file mode 100644 index 0000000..c76d509 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl00fe.h @@ -0,0 +1,53 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl00fe.finn +// + +#define NV_MEMORY_MAPPER (0xfeU) /* finn: Evaluated from "NV_MEMORY_MAPPER_ALLOCATION_PARAMS_MESSAGE_ID" */ + +/* + * NV_MEMORY_MAPPER_ALLOCATION_PARAMS + * + * Allocation params for NV_MEMORY_MAPPER. + * This class provides paging operations channel interface to userspace clients. + */ +#define NV_MEMORY_MAPPER_ALLOCATION_PARAMS_MESSAGE_ID (0x00FEU) + +typedef struct NV_MEMORY_MAPPER_ALLOCATION_PARAMS { + NvHandle hSemaphoreSurface; + NvU32 maxQueueSize; + NvHandle hNotificationMemory; + NV_DECLARE_ALIGNED(NvU64 notificationOffset, 8); +} NV_MEMORY_MAPPER_ALLOCATION_PARAMS; + +typedef struct NV_MEMORY_MAPPER_NOTIFICATION { + NvU32 status; // NV_STATUS error code +} NV_MEMORY_MAPPER_NOTIFICATION; + diff --git a/src/common/sdk/nvidia/inc/class/cl0100.h b/src/common/sdk/nvidia/inc/class/cl0100.h new file mode 100644 index 0000000..8795e82 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0100.h @@ -0,0 +1,51 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl0100.finn +// + + + +/* + * The lock stress object (LOCK_STRESS_OBJECT) is a test-only object that exports + * multiple RM controls which exercise acquiring different combinations of RM locks in + * order to stress and properly test RM locking specifically. These API's only exercise + * legal combinations of RM locks that are in use elsewhere in RM but they are intended + * to be exhaustive in covering all possible combinations of RM locks. The API's + * themselves, don't do much outside of incrementing/decrementing global integers that + * can be validated later on within a test executable to ensure that all accesses to + * these global integers were synchronous. + * + * The lock stress object is not allocatable without the "RmEnableLockStress" registry + * key being turned on, which is off by default. The creation/destruction of the object + * does not accomplish anything useful but simply serves as a way to check for the + * registry key's enablement before allowing executing any of the test API's this object + * provides. + */ + +#define LOCK_STRESS_OBJECT 0x0100U + diff --git a/src/common/sdk/nvidia/inc/class/cl0101.h b/src/common/sdk/nvidia/inc/class/cl0101.h new file mode 100644 index 0000000..8aba267 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0101.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl0101.finn +// + + + +/* + * LOCK_TEST_RELAXED_DUP_OBJECT + * An object used for testing the relaxed GPU lock for RMDUP operation. + * Can be allocated only if PDB_PROP_SYS_ENABLE_RM_TEST_ONLY_CODE is set. + */ +#define LOCK_TEST_RELAXED_DUP_OBJECT 0x0101U + diff --git a/src/common/sdk/nvidia/inc/class/cl2080.h b/src/common/sdk/nvidia/inc/class/cl2080.h new file mode 100644 index 0000000..a85a662 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl2080.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2002-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl2080.finn +// + +#include "nvlimits.h" +#include "cl2080_notification.h" + +#define NV20_SUBDEVICE_0 (0x2080U) /* finn: Evaluated from "NV2080_ALLOC_PARAMETERS_MESSAGE_ID" */ + +/* NvAlloc parameteters */ +#define NV2080_MAX_SUBDEVICES NV_MAX_SUBDEVICES + +#define NV2080_ALLOC_PARAMETERS_MESSAGE_ID (0x2080U) + +typedef struct NV2080_ALLOC_PARAMETERS { + NvU32 subDeviceId; +} NV2080_ALLOC_PARAMETERS; + diff --git a/src/common/sdk/nvidia/inc/class/cl2080_notification.h b/src/common/sdk/nvidia/inc/class/cl2080_notification.h new file mode 100644 index 0000000..52729c2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl2080_notification.h @@ -0,0 +1,613 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl2080_notification_h_ +#define _cl2080_notification_h_ + +#include "nvcfg_sdk.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvcfg_sdk.h" + +/* event values */ +#define NV2080_NOTIFIERS_SW (0) +#define NV2080_NOTIFIERS_HOTPLUG (1) +#define NV2080_NOTIFIERS_POWER_CONNECTOR (2) +#define NV2080_NOTIFIERS_THERMAL_SW (3) +#define NV2080_NOTIFIERS_THERMAL_HW (4) +#define NV2080_NOTIFIERS_FULL_SCREEN_CHANGE (5) +#define NV2080_NOTIFIERS_EVENTBUFFER (6) +#define NV2080_NOTIFIERS_DP_IRQ (7) +#define NV2080_NOTIFIERS_GR_DEBUG_INTR (8) +#define NV2080_NOTIFIERS_PMU_EVENT (9) +#define NV2080_NOTIFIERS_PMU_COMMAND (10) +#define NV2080_NOTIFIERS_TIMER (11) +#define NV2080_NOTIFIERS_GRAPHICS (12) +#define NV2080_NOTIFIERS_PPP (13) +#define NV2080_NOTIFIERS_VLD (14) // also known as BSP +#define NV2080_NOTIFIERS_NVDEC0 NV2080_NOTIFIERS_VLD +#define NV2080_NOTIFIERS_NVDEC1 (15) +#define NV2080_NOTIFIERS_NVDEC2 (16) +#define NV2080_NOTIFIERS_NVDEC3 (17) +#define NV2080_NOTIFIERS_NVDEC4 (18) +#define NV2080_NOTIFIERS_NVDEC5 (19) +#define NV2080_NOTIFIERS_NVDEC6 (20) +#define NV2080_NOTIFIERS_NVDEC7 (21) +#define NV2080_NOTIFIERS_PDEC (22) // also known as VP +#define NV2080_NOTIFIERS_CE0 (23) +#define NV2080_NOTIFIERS_CE1 (24) +#define NV2080_NOTIFIERS_CE2 (25) +#define NV2080_NOTIFIERS_CE3 (26) +#define NV2080_NOTIFIERS_CE4 (27) +#define NV2080_NOTIFIERS_CE5 (28) +#define NV2080_NOTIFIERS_CE6 (29) +#define NV2080_NOTIFIERS_CE7 (30) +#define NV2080_NOTIFIERS_CE8 (31) +#define NV2080_NOTIFIERS_CE9 (32) +#define NV2080_NOTIFIERS_PSTATE_CHANGE (33) +#define NV2080_NOTIFIERS_HDCP_STATUS_CHANGE (34) +#define NV2080_NOTIFIERS_FIFO_EVENT_MTHD (35) +#define NV2080_NOTIFIERS_PRIV_RING_HANG (36) +#define NV2080_NOTIFIERS_RC_ERROR (37) +#define NV2080_NOTIFIERS_MSENC (38) +#define NV2080_NOTIFIERS_NVENC0 NV2080_NOTIFIERS_MSENC +#define NV2080_NOTIFIERS_NVENC1 (39) +#define NV2080_NOTIFIERS_NVENC2 (40) +#define NV2080_NOTIFIERS_UNUSED_0 (41) // Unused +#define NV2080_NOTIFIERS_ACPI_NOTIFY (42) +#define NV2080_NOTIFIERS_COOLER_DIAG_ZONE (43) +#define NV2080_NOTIFIERS_THERMAL_DIAG_ZONE (44) +#define NV2080_NOTIFIERS_AUDIO_HDCP_REQUEST (45) +#define NV2080_NOTIFIERS_WORKLOAD_MODULATION_CHANGE (46) +#define NV2080_NOTIFIERS_GPIO_0_RISING_INTERRUPT (47) +#define NV2080_NOTIFIERS_GPIO_1_RISING_INTERRUPT (48) +#define NV2080_NOTIFIERS_GPIO_2_RISING_INTERRUPT (49) +#define NV2080_NOTIFIERS_GPIO_3_RISING_INTERRUPT (50) +#define NV2080_NOTIFIERS_GPIO_4_RISING_INTERRUPT (51) +#define NV2080_NOTIFIERS_GPIO_5_RISING_INTERRUPT (52) +#define NV2080_NOTIFIERS_GPIO_6_RISING_INTERRUPT (53) +#define NV2080_NOTIFIERS_GPIO_7_RISING_INTERRUPT (54) +#define NV2080_NOTIFIERS_GPIO_8_RISING_INTERRUPT (55) +#define NV2080_NOTIFIERS_GPIO_9_RISING_INTERRUPT (56) +#define NV2080_NOTIFIERS_GPIO_10_RISING_INTERRUPT (57) +#define NV2080_NOTIFIERS_GPIO_11_RISING_INTERRUPT (58) +#define NV2080_NOTIFIERS_GPIO_12_RISING_INTERRUPT (59) +#define NV2080_NOTIFIERS_GPIO_13_RISING_INTERRUPT (60) +#define NV2080_NOTIFIERS_GPIO_14_RISING_INTERRUPT (61) +#define NV2080_NOTIFIERS_GPIO_15_RISING_INTERRUPT (62) +#define NV2080_NOTIFIERS_GPIO_16_RISING_INTERRUPT (63) +#define NV2080_NOTIFIERS_GPIO_17_RISING_INTERRUPT (64) +#define NV2080_NOTIFIERS_GPIO_18_RISING_INTERRUPT (65) +#define NV2080_NOTIFIERS_GPIO_19_RISING_INTERRUPT (66) +#define NV2080_NOTIFIERS_GPIO_20_RISING_INTERRUPT (67) +#define NV2080_NOTIFIERS_GPIO_21_RISING_INTERRUPT (68) +#define NV2080_NOTIFIERS_GPIO_22_RISING_INTERRUPT (69) +#define NV2080_NOTIFIERS_GPIO_23_RISING_INTERRUPT (70) +#define NV2080_NOTIFIERS_GPIO_24_RISING_INTERRUPT (71) +#define NV2080_NOTIFIERS_GPIO_25_RISING_INTERRUPT (72) +#define NV2080_NOTIFIERS_GPIO_26_RISING_INTERRUPT (73) +#define NV2080_NOTIFIERS_GPIO_27_RISING_INTERRUPT (74) +#define NV2080_NOTIFIERS_GPIO_28_RISING_INTERRUPT (75) +#define NV2080_NOTIFIERS_GPIO_29_RISING_INTERRUPT (76) +#define NV2080_NOTIFIERS_GPIO_30_RISING_INTERRUPT (77) +#define NV2080_NOTIFIERS_GPIO_31_RISING_INTERRUPT (78) +#define NV2080_NOTIFIERS_GPIO_0_FALLING_INTERRUPT (79) +#define NV2080_NOTIFIERS_GPIO_1_FALLING_INTERRUPT (80) +#define NV2080_NOTIFIERS_GPIO_2_FALLING_INTERRUPT (81) +#define NV2080_NOTIFIERS_GPIO_3_FALLING_INTERRUPT (82) +#define NV2080_NOTIFIERS_GPIO_4_FALLING_INTERRUPT (83) +#define NV2080_NOTIFIERS_GPIO_5_FALLING_INTERRUPT (84) +#define NV2080_NOTIFIERS_GPIO_6_FALLING_INTERRUPT (85) +#define NV2080_NOTIFIERS_GPIO_7_FALLING_INTERRUPT (86) +#define NV2080_NOTIFIERS_GPIO_8_FALLING_INTERRUPT (87) +#define NV2080_NOTIFIERS_GPIO_9_FALLING_INTERRUPT (88) +#define NV2080_NOTIFIERS_GPIO_10_FALLING_INTERRUPT (89) +#define NV2080_NOTIFIERS_GPIO_11_FALLING_INTERRUPT (90) +#define NV2080_NOTIFIERS_GPIO_12_FALLING_INTERRUPT (91) +#define NV2080_NOTIFIERS_GPIO_13_FALLING_INTERRUPT (92) +#define NV2080_NOTIFIERS_GPIO_14_FALLING_INTERRUPT (93) +#define NV2080_NOTIFIERS_GPIO_15_FALLING_INTERRUPT (94) +#define NV2080_NOTIFIERS_GPIO_16_FALLING_INTERRUPT (95) +#define NV2080_NOTIFIERS_GPIO_17_FALLING_INTERRUPT (96) +#define NV2080_NOTIFIERS_GPIO_18_FALLING_INTERRUPT (97) +#define NV2080_NOTIFIERS_GPIO_19_FALLING_INTERRUPT (98) +#define NV2080_NOTIFIERS_GPIO_20_FALLING_INTERRUPT (99) +#define NV2080_NOTIFIERS_GPIO_21_FALLING_INTERRUPT (100) +#define NV2080_NOTIFIERS_GPIO_22_FALLING_INTERRUPT (101) +#define NV2080_NOTIFIERS_GPIO_23_FALLING_INTERRUPT (102) +#define NV2080_NOTIFIERS_GPIO_24_FALLING_INTERRUPT (103) +#define NV2080_NOTIFIERS_GPIO_25_FALLING_INTERRUPT (104) +#define NV2080_NOTIFIERS_GPIO_26_FALLING_INTERRUPT (105) +#define NV2080_NOTIFIERS_GPIO_27_FALLING_INTERRUPT (106) +#define NV2080_NOTIFIERS_GPIO_28_FALLING_INTERRUPT (107) +#define NV2080_NOTIFIERS_GPIO_29_FALLING_INTERRUPT (108) +#define NV2080_NOTIFIERS_GPIO_30_FALLING_INTERRUPT (109) +#define NV2080_NOTIFIERS_GPIO_31_FALLING_INTERRUPT (110) +#define NV2080_NOTIFIERS_ECC_SBE (111) +#define NV2080_NOTIFIERS_ECC_DBE (112) +#define NV2080_NOTIFIERS_STEREO_EMITTER_DETECTION (113) +#define NV2080_NOTIFIERS_GC5_GPU_READY (114) +#define NV2080_NOTIFIERS_SEC2 (115) +#define NV2080_NOTIFIERS_GC6_REFCOUNT_INC (116) +#define NV2080_NOTIFIERS_GC6_REFCOUNT_DEC (117) +#define NV2080_NOTIFIERS_POWER_EVENT (118) +#define NV2080_NOTIFIERS_CLOCKS_CHANGE (119) +#define NV2080_NOTIFIERS_HOTPLUG_PROCESSING_COMPLETE (120) +#define NV2080_NOTIFIERS_PHYSICAL_PAGE_FAULT (121) +#define NV2080_NOTIFIERS_RESERVED122 (122) +#define NV2080_NOTIFIERS_NVLINK_ERROR_FATAL (123) +#define NV2080_NOTIFIERS_PRIV_REG_ACCESS_FAULT (124) +#define NV2080_NOTIFIERS_NVLINK_ERROR_RECOVERY_REQUIRED (125) +#define NV2080_NOTIFIERS_NVJPG (126) +#define NV2080_NOTIFIERS_NVJPEG0 NV2080_NOTIFIERS_NVJPG +#define NV2080_NOTIFIERS_NVJPEG1 (127) +#define NV2080_NOTIFIERS_NVJPEG2 (128) +#define NV2080_NOTIFIERS_NVJPEG3 (129) +#define NV2080_NOTIFIERS_NVJPEG4 (130) +#define NV2080_NOTIFIERS_NVJPEG5 (131) +#define NV2080_NOTIFIERS_NVJPEG6 (132) +#define NV2080_NOTIFIERS_NVJPEG7 (133) +#define NV2080_NOTIFIERS_RUNLIST_AND_ENG_IDLE (134) +#define NV2080_NOTIFIERS_RUNLIST_ACQUIRE (135) +#define NV2080_NOTIFIERS_RUNLIST_ACQUIRE_AND_ENG_IDLE (136) +#define NV2080_NOTIFIERS_RUNLIST_IDLE (137) +#define NV2080_NOTIFIERS_TSG_PREEMPT_COMPLETE (138) +#define NV2080_NOTIFIERS_RUNLIST_PREEMPT_COMPLETE (139) +#define NV2080_NOTIFIERS_CTXSW_TIMEOUT (140) +#define NV2080_NOTIFIERS_INFOROM_ECC_OBJECT_UPDATED (141) +#define NV2080_NOTIFIERS_NVTELEMETRY_REPORT_EVENT (142) +#define NV2080_NOTIFIERS_DSTATE_XUSB_PPC (143) +#define NV2080_NOTIFIERS_FECS_CTX_SWITCH (144) +#define NV2080_NOTIFIERS_XUSB_PPC_CONNECTED (145) +#define NV2080_NOTIFIERS_GR0 NV2080_NOTIFIERS_GRAPHICS +#define NV2080_NOTIFIERS_GR1 (146) +#define NV2080_NOTIFIERS_GR2 (147) +#define NV2080_NOTIFIERS_GR3 (148) +#define NV2080_NOTIFIERS_GR4 (149) +#define NV2080_NOTIFIERS_GR5 (150) +#define NV2080_NOTIFIERS_GR6 (151) +#define NV2080_NOTIFIERS_GR7 (152) +#define NV2080_NOTIFIERS_OFA (153) +#define NV2080_NOTIFIERS_OFA0 NV2080_NOTIFIERS_OFA +#define NV2080_NOTIFIERS_DSTATE_HDA (154) +#define NV2080_NOTIFIERS_POISON_ERROR_NON_FATAL (155) +#define NV2080_NOTIFIERS_POISON_ERROR_FATAL (156) +#define NV2080_NOTIFIERS_UCODE_RESET (157) +#define NV2080_NOTIFIERS_PLATFORM_POWER_MODE_CHANGE (158) +#define NV2080_NOTIFIERS_SMC_CONFIG_UPDATE (159) +#define NV2080_NOTIFIERS_INFOROM_RRL_OBJECT_UPDATED (160) +#define NV2080_NOTIFIERS_INFOROM_PBL_OBJECT_UPDATED (161) +#define NV2080_NOTIFIERS_LPWR_DIFR_PREFETCH_REQUEST (162) +#define NV2080_NOTIFIERS_SEC_FAULT_ERROR (163) +#define NV2080_NOTIFIERS_UNUSED_1 (164) // Unused +#define NV2080_NOTIFIERS_NVLINK_INFO_LINK_UP (165) +#define NV2080_NOTIFIERS_CE10 (166) +#define NV2080_NOTIFIERS_CE11 (167) +#define NV2080_NOTIFIERS_CE12 (168) +#define NV2080_NOTIFIERS_CE13 (169) +#define NV2080_NOTIFIERS_CE14 (170) +#define NV2080_NOTIFIERS_CE15 (171) +#define NV2080_NOTIFIERS_CE16 (172) +#define NV2080_NOTIFIERS_CE17 (173) +#define NV2080_NOTIFIERS_CE18 (174) +#define NV2080_NOTIFIERS_CE19 (175) +#define NV2080_NOTIFIERS_NVLINK_INFO_LINK_DOWN (176) +#define NV2080_NOTIFIERS_NVPCF_EVENTS (177) +#define NV2080_NOTIFIERS_HDMI_FRL_RETRAINING_REQUEST (178) +#define NV2080_NOTIFIERS_VRR_SET_TIMEOUT (179) +#define NV2080_NOTIFIERS_OFA1 (180) +#define NV2080_NOTIFIERS_AUX_POWER_EVENT (181) +#define NV2080_NOTIFIERS_AUX_POWER_STATE_CHANGE (182) +#define NV2080_NOTIFIERS_NVENC3 (183) +#define NV2080_NOTIFIERS_GSP_PERF_TRACE (184) +#define NV2080_NOTIFIERS_INBAND_RESPONSE (185) +#define NV2080_NOTIFIERS_RESERVED_186 (186) // Unused +#define NV2080_NOTIFIERS_ECC_SBE_STORM (187) +#define NV2080_NOTIFIERS_DRAM_RETIREMENT_EVENT (188) +#define NV2080_NOTIFIERS_DRAM_RETIREMENT_FAILURE (189) +#define NV2080_NOTIFIERS_NVLINK_UNCONTAINED_ERROR (190) +#define NV2080_NOTIFIERS_GPU_UNAVAILABLE (191) +#define NV2080_NOTIFIERS_GPU_RECOVERY_ACTION (192) +#define NV2080_NOTIFIERS_POWER_SUSPEND (193) +#define NV2080_NOTIFIERS_POWER_RESUME (194) +#define NV2080_NOTIFIERS_CTXSW_UCODE_ERROR (195) +#define NV2080_NOTIFIERS_USE_GC6_REDUCED_THRESHOLD (196) +#define NV2080_NOTIFIERS_GPU_RC_RESET (197) +#define NV2080_NOTIFIERS_MAXCOUNT (198) + +// Indexed GR notifier reference +#define NV2080_NOTIFIERS_GR(x) ((x == 0) ? (NV2080_NOTIFIERS_GR0) : (NV2080_NOTIFIERS_GR1 + (x - 1))) +#define NV2080_NOTIFIERS_GR_IDX(x) ((x) - NV2080_NOTIFIERS_GR0) +#define NV2080_NOTIFIER_TYPE_IS_GR(x) (((x) == NV2080_NOTIFIERS_GR0) || (((x) >= NV2080_NOTIFIERS_GR1) && ((x) <= NV2080_NOTIFIERS_GR7))) + +// Indexed CE notifier reference +#define NV2080_NOTIFIERS_CE(x) (((x) < 10) ? (NV2080_NOTIFIERS_CE0 + (x)) : (NV2080_NOTIFIERS_CE10 + (x) - 10)) +#define NV2080_NOTIFIERS_CE_IDX(x) (((x) <= NV2080_NOTIFIERS_CE9) ? ((x) - NV2080_NOTIFIERS_CE0) : ((x) - NV2080_NOTIFIERS_CE10 + 10)) +#define NV2080_NOTIFIER_TYPE_IS_CE(x) ((((x) >= NV2080_NOTIFIERS_CE0) && ((x) <= NV2080_NOTIFIERS_CE9)) || \ + (((x) >= NV2080_NOTIFIERS_CE10) && ((x) <= NV2080_NOTIFIERS_CE19))) + +// Indexed MSENC notifier reference +#define NV2080_NOTIFIERS_NVENC(x) (((x) < 3) ? (NV2080_NOTIFIERS_NVENC0 + (x)) : (NV2080_NOTIFIERS_NVENC3 + (x) - 3)) +#define NV2080_NOTIFIERS_NVENC_IDX(x) (((x) <= NV2080_NOTIFIERS_NVENC2) ? ((x) - NV2080_NOTIFIERS_NVENC0) : ((x) - NV2080_NOTIFIERS_NVENC3 + 3)) +#define NV2080_NOTIFIER_TYPE_IS_NVENC(x) ((((x) >= NV2080_NOTIFIERS_NVENC0) && ((x) <= NV2080_NOTIFIERS_NVENC2)) || \ + (((x) == NV2080_NOTIFIERS_NVENC3))) +// Indexed NVDEC notifier reference +#define NV2080_NOTIFIERS_NVDEC(x) (NV2080_NOTIFIERS_NVDEC0 + (x)) +#define NV2080_NOTIFIERS_NVDEC_IDX(x) ((x) - NV2080_NOTIFIERS_NVDEC0) +#define NV2080_NOTIFIER_TYPE_IS_NVDEC(x) (((x) >= NV2080_NOTIFIERS_NVDEC0) && ((x) <= NV2080_NOTIFIERS_NVDEC7)) +// Indexed NVJPEG notifier reference +#define NV2080_NOTIFIERS_NVJPEG(x) (NV2080_NOTIFIERS_NVJPEG0 + (x)) +#define NV2080_NOTIFIERS_NVJPEG_IDX(x) ((x) - NV2080_NOTIFIERS_NVJPEG0) +#define NV2080_NOTIFIER_TYPE_IS_NVJPEG(x) (((x) >= NV2080_NOTIFIERS_NVJPEG0) && ((x) <= NV2080_NOTIFIERS_NVJPEG7)) + +// Indexed OFA notifier reference +#define NV2080_NOTIFIERS_OFAn(x) ((x == 0) ? (NV2080_NOTIFIERS_OFA0) : (NV2080_NOTIFIERS_OFA1)) +#define NV2080_NOTIFIERS_OFA_IDX(x) ((x == NV2080_NOTIFIERS_OFA0) ? ((x) - NV2080_NOTIFIERS_OFA0) : ((x) - NV2080_NOTIFIERS_OFA1 + 1)) +#define NV2080_NOTIFIER_TYPE_IS_OFA(x) (((x) == NV2080_NOTIFIERS_OFA0) || ((x) == NV2080_NOTIFIERS_OFA1)) + +#define NV2080_NOTIFIERS_GPIO_RISING_INTERRUPT(pin) (NV2080_NOTIFIERS_GPIO_0_RISING_INTERRUPT + (pin)) +#define NV2080_NOTIFIERS_GPIO_FALLING_INTERRUPT(pin) (NV2080_NOTIFIERS_GPIO_0_FALLING_INTERRUPT + (pin)) + +#define NV2080_SUBDEVICE_NOTIFICATION_STATUS_IN_PROGRESS (0x8000) +#define NV2080_SUBDEVICE_NOTIFICATION_STATUS_BAD_ARGUMENT (0x4000) +#define NV2080_SUBDEVICE_NOTIFICATION_STATUS_ERROR_INVALID_STATE (0x2000) +#define NV2080_SUBDEVICE_NOTIFICATION_STATUS_ERROR_STATE_IN_USE (0x1000) +#define NV2080_SUBDEVICE_NOTIFICATION_STATUS_DONE_SUCCESS (0x0000) + +/* exported engine defines */ +#define NV2080_ENGINE_TYPE_NULL (0x00000000) +#define NV2080_ENGINE_TYPE_GRAPHICS (0x00000001) +#define NV2080_ENGINE_TYPE_GR0 NV2080_ENGINE_TYPE_GRAPHICS +#define NV2080_ENGINE_TYPE_GR1 (0x00000002) +#define NV2080_ENGINE_TYPE_GR2 (0x00000003) +#define NV2080_ENGINE_TYPE_GR3 (0x00000004) +#define NV2080_ENGINE_TYPE_GR4 (0x00000005) +#define NV2080_ENGINE_TYPE_GR5 (0x00000006) +#define NV2080_ENGINE_TYPE_GR6 (0x00000007) +#define NV2080_ENGINE_TYPE_GR7 (0x00000008) +#define NV2080_ENGINE_TYPE_COPY0 (0x00000009) +#define NV2080_ENGINE_TYPE_COPY1 (0x0000000a) +#define NV2080_ENGINE_TYPE_COPY2 (0x0000000b) +#define NV2080_ENGINE_TYPE_COPY3 (0x0000000c) +#define NV2080_ENGINE_TYPE_COPY4 (0x0000000d) +#define NV2080_ENGINE_TYPE_COPY5 (0x0000000e) +#define NV2080_ENGINE_TYPE_COPY6 (0x0000000f) +#define NV2080_ENGINE_TYPE_COPY7 (0x00000010) +#define NV2080_ENGINE_TYPE_COPY8 (0x00000011) +#define NV2080_ENGINE_TYPE_COPY9 (0x00000012) +#define NV2080_ENGINE_TYPE_BSP (0x00000013) +#define NV2080_ENGINE_TYPE_NVDEC0 NV2080_ENGINE_TYPE_BSP +#define NV2080_ENGINE_TYPE_NVDEC1 (0x00000014) +#define NV2080_ENGINE_TYPE_NVDEC2 (0x00000015) +#define NV2080_ENGINE_TYPE_NVDEC3 (0x00000016) +#define NV2080_ENGINE_TYPE_NVDEC4 (0x00000017) +#define NV2080_ENGINE_TYPE_NVDEC5 (0x00000018) +#define NV2080_ENGINE_TYPE_NVDEC6 (0x00000019) +#define NV2080_ENGINE_TYPE_NVDEC7 (0x0000001a) +#define NV2080_ENGINE_TYPE_MSENC (0x0000001b) +#define NV2080_ENGINE_TYPE_NVENC0 NV2080_ENGINE_TYPE_MSENC /* Mutually exclusive alias */ +#define NV2080_ENGINE_TYPE_NVENC1 (0x0000001c) +#define NV2080_ENGINE_TYPE_NVENC2 (0x0000001d) +#define NV2080_ENGINE_TYPE_VP (0x0000001e) +#define NV2080_ENGINE_TYPE_ME (0x0000001f) +#define NV2080_ENGINE_TYPE_PPP (0x00000020) +#define NV2080_ENGINE_TYPE_MPEG (0x00000021) +#define NV2080_ENGINE_TYPE_SW (0x00000022) +#define NV2080_ENGINE_TYPE_CIPHER (0x00000023) +#define NV2080_ENGINE_TYPE_TSEC NV2080_ENGINE_TYPE_CIPHER +#define NV2080_ENGINE_TYPE_VIC (0x00000024) +#define NV2080_ENGINE_TYPE_MP (0x00000025) +#define NV2080_ENGINE_TYPE_SEC2 (0x00000026) +#define NV2080_ENGINE_TYPE_HOST (0x00000027) +#define NV2080_ENGINE_TYPE_DPU (0x00000028) +#define NV2080_ENGINE_TYPE_PMU (0x00000029) +#define NV2080_ENGINE_TYPE_FBFLCN (0x0000002a) +#define NV2080_ENGINE_TYPE_NVJPG (0x0000002b) +#define NV2080_ENGINE_TYPE_NVJPEG0 NV2080_ENGINE_TYPE_NVJPG +#define NV2080_ENGINE_TYPE_NVJPEG1 (0x0000002c) +#define NV2080_ENGINE_TYPE_NVJPEG2 (0x0000002d) +#define NV2080_ENGINE_TYPE_NVJPEG3 (0x0000002e) +#define NV2080_ENGINE_TYPE_NVJPEG4 (0x0000002f) +#define NV2080_ENGINE_TYPE_NVJPEG5 (0x00000030) +#define NV2080_ENGINE_TYPE_NVJPEG6 (0x00000031) +#define NV2080_ENGINE_TYPE_NVJPEG7 (0x00000032) +#define NV2080_ENGINE_TYPE_OFA (0x00000033) +#define NV2080_ENGINE_TYPE_OFA0 NV2080_ENGINE_TYPE_OFA +// Update the TYPE_COMP_DECOMP_COPYN defines as well when you update COPYN defines +#define NV2080_ENGINE_TYPE_COPY10 (0x00000034) +#define NV2080_ENGINE_TYPE_COPY11 (0x00000035) +#define NV2080_ENGINE_TYPE_COPY12 (0x00000036) +#define NV2080_ENGINE_TYPE_COPY13 (0x00000037) +#define NV2080_ENGINE_TYPE_COPY14 (0x00000038) +#define NV2080_ENGINE_TYPE_COPY15 (0x00000039) +#define NV2080_ENGINE_TYPE_COPY16 (0x0000003a) +#define NV2080_ENGINE_TYPE_COPY17 (0x0000003b) +#define NV2080_ENGINE_TYPE_COPY18 (0x0000003c) +#define NV2080_ENGINE_TYPE_COPY19 (0x0000003d) +#define NV2080_ENGINE_TYPE_OFA1 (0x0000003e) +#define NV2080_ENGINE_TYPE_NVENC3 (0x0000003f) +// See TBD documentation for how these defines work with existing ENGINE_TYPE_COPYN defines +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY0 (0x00000040) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY1 (0x00000041) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY2 (0x00000042) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY3 (0x00000043) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY4 (0x00000044) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY5 (0x00000045) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY6 (0x00000046) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY7 (0x00000047) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY8 (0x00000048) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY9 (0x00000049) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY10 (0x0000004a) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY11 (0x0000004b) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY12 (0x0000004c) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY13 (0x0000004d) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY14 (0x0000004e) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY15 (0x0000004f) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY16 (0x00000050) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY17 (0x00000051) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY18 (0x00000052) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY19 (0x00000053) +#define NV2080_ENGINE_TYPE_LAST (0x00000054) +#define NV2080_ENGINE_TYPE_ALLENGINES (0xffffffff) + +// +// NV2080_ENGINE_TYPE_COPY_SIZE is now defined as the maximum possible CE size. +// It does not reflect the max supported NV2080_ENGINE_TYPE_COPY counts. Bug 3713687 #90. +// It needs to use NV2080_ENGINE_TYPE_IS_COPY() to check if a CE is valid when +// the clients try to enumerate all NV2080_ENGINE_TYPE_COPYs. +// +#define NV2080_ENGINE_TYPE_COPY_SIZE 64 + +#define NV2080_ENGINE_TYPE_NVENC_SIZE 3 +#define NV2080_ENGINE_TYPE_NVJPEG_SIZE 8 +#define NV2080_ENGINE_TYPE_NVDEC_SIZE 8 +#define NV2080_ENGINE_TYPE_GR_SIZE 8 +#define NV2080_ENGINE_TYPE_OFA_SIZE 2 + +// Indexed engines +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY(i) (NV2080_ENGINE_TYPE_COMP_DECOMP_COPY0 + (i)) +#define NV2080_ENGINE_TYPE_IS_COMP_DECOMP_COPY(i) (((i) >= NV2080_ENGINE_TYPE_COMP_DECOMP_COPY0) && ((i) <= NV2080_ENGINE_TYPE_COMP_DECOMP_COPY19)) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY_IDX(i) ((i) - NV2080_ENGINE_TYPE_COMP_DECOMP_COPY0) + +#define NV2080_ENGINE_TYPE_COPY(i) (((i) < 10) ? (NV2080_ENGINE_TYPE_COPY0 + (i)) : (NV2080_ENGINE_TYPE_COPY10 + (i) - 10)) +#define NV2080_ENGINE_TYPE_IS_COPY(i) ((((i) >= NV2080_ENGINE_TYPE_COPY0) && ((i) <= NV2080_ENGINE_TYPE_COPY9)) || \ + (((i) >= NV2080_ENGINE_TYPE_COPY10) && ((i) <= NV2080_ENGINE_TYPE_COPY19))) +#define NV2080_ENGINE_TYPE_COPY_IDX(i) (((i) <= NV2080_ENGINE_TYPE_COPY9) ? \ + ((i) - NV2080_ENGINE_TYPE_COPY0) : ((i) - NV2080_ENGINE_TYPE_COPY10 + 10)) + +#define NV2080_ENGINE_TYPE_NVENC(i) (((i) < 3) ? (NV2080_ENGINE_TYPE_NVENC0 + (i)) : (NV2080_ENGINE_TYPE_NVENC3 + (i) - 3)) +#define NV2080_ENGINE_TYPE_IS_NVENC(i) ((((i) >= NV2080_ENGINE_TYPE_NVENC0) && ((i) <= NV2080_ENGINE_TYPE_NVENC2)) || \ + (((i) == NV2080_ENGINE_TYPE_NVENC3))) +#define NV2080_ENGINE_TYPE_NVENC_IDX(i) (((i) <= NV2080_ENGINE_TYPE_NVENC2) ? \ + ((i)-NV2080_ENGINE_TYPE_NVENC0) : ((i)-NV2080_ENGINE_TYPE_NVENC3 + 3)) + +#define NV2080_ENGINE_TYPE_NVDEC(i) (NV2080_ENGINE_TYPE_NVDEC0+(i)) +#define NV2080_ENGINE_TYPE_IS_NVDEC(i) (((i) >= NV2080_ENGINE_TYPE_NVDEC0) && ((i) < NV2080_ENGINE_TYPE_NVDEC(NV2080_ENGINE_TYPE_NVDEC_SIZE))) +#define NV2080_ENGINE_TYPE_NVDEC_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVDEC0) + +#define NV2080_ENGINE_TYPE_NVJPEG(i) (NV2080_ENGINE_TYPE_NVJPEG0+(i)) +#define NV2080_ENGINE_TYPE_IS_NVJPEG(i) (((i) >= NV2080_ENGINE_TYPE_NVJPEG0) && ((i) < NV2080_ENGINE_TYPE_NVJPEG(NV2080_ENGINE_TYPE_NVJPEG_SIZE))) +#define NV2080_ENGINE_TYPE_NVJPEG_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVJPEG0) + +#define NV2080_ENGINE_TYPE_GR(i) (NV2080_ENGINE_TYPE_GR0 + (i)) +#define NV2080_ENGINE_TYPE_IS_GR(i) (((i) >= NV2080_ENGINE_TYPE_GR0) && ((i) < NV2080_ENGINE_TYPE_GR(NV2080_ENGINE_TYPE_GR_SIZE))) +#define NV2080_ENGINE_TYPE_GR_IDX(i) ((i) - NV2080_ENGINE_TYPE_GR0) + +#define NV2080_ENGINE_TYPE_OFAn(i) ((i == 0) ? (NV2080_ENGINE_TYPE_OFA0) : (NV2080_ENGINE_TYPE_OFA1)) +#define NV2080_ENGINE_TYPE_IS_OFA(i) (((i) == NV2080_ENGINE_TYPE_OFA0) || ((i) == NV2080_ENGINE_TYPE_OFA1)) +#define NV2080_ENGINE_TYPE_OFA_IDX(i) ((i == NV2080_ENGINE_TYPE_OFA0) ? ((i) - NV2080_ENGINE_TYPE_OFA0) : ((i) - NV2080_ENGINE_TYPE_OFA1 + 1)) + +#define NV2080_ENGINE_TYPE_IS_VALID(i) (((i) > (NV2080_ENGINE_TYPE_NULL)) && ((i) < (NV2080_ENGINE_TYPE_LAST))) + +/* exported client defines */ +#define NV2080_CLIENT_TYPE_TEX (0x00000001) +#define NV2080_CLIENT_TYPE_COLOR (0x00000002) +#define NV2080_CLIENT_TYPE_DEPTH (0x00000003) +#define NV2080_CLIENT_TYPE_DA (0x00000004) +#define NV2080_CLIENT_TYPE_FE (0x00000005) +#define NV2080_CLIENT_TYPE_SCC (0x00000006) +#define NV2080_CLIENT_TYPE_WID (0x00000007) +#define NV2080_CLIENT_TYPE_MSVLD (0x00000008) +#define NV2080_CLIENT_TYPE_MSPDEC (0x00000009) +#define NV2080_CLIENT_TYPE_MSPPP (0x0000000a) +#define NV2080_CLIENT_TYPE_VIC (0x0000000b) +#define NV2080_CLIENT_TYPE_ALLCLIENTS (0xffffffff) + +/* GC5 Gpu Ready event defines */ +#define NV2080_GC5_EXIT_COMPLETE (0x00000001) +#define NV2080_GC5_ENTRY_ABORTED (0x00000002) + +/* Platform Power Mode event defines */ +#define NV2080_PLATFORM_POWER_MODE_CHANGE_COMPLETION (0x00000000) +#define NV2080_PLATFORM_POWER_MODE_CHANGE_ACPI_NOTIFICATION (0x00000001) + +/* NvNotification[] fields and values */ +#define NV2080_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) +/* pio method data structure */ +typedef volatile struct _cl2080_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv2080Typedef, Nv20Subdevice0; +#define NV2080_TYPEDEF Nv20Subdevice0 + +/* HDCP Status change notification information */ +typedef struct Nv2080HdcpStatusChangeNotificationRec { + NvU32 displayId; + NvU32 hdcpStatusChangeNotif; +} Nv2080HdcpStatusChangeNotification; + +/* Pstate change notification information */ +typedef struct Nv2080PStateChangeNotificationRec { + struct { + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvU32 NewPstate; +} Nv2080PStateChangeNotification; + +/* Clocks change notification information */ +typedef struct Nv2080ClocksChangeNotificationRec { + struct { + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ +} Nv2080ClocksChangeNotification; + +/* WorkLoad Modulation state change notification information*/ +typedef struct Nv2080WorkloadModulationChangeNotificationRec { + struct { + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvBool WorkloadModulationEnabled; +} Nv2080WorkloadModulationChangeNotification; + +/* Hotplug notification information */ +typedef struct { + NvU32 plugDisplayMask; + NvU32 unplugDisplayMask; +} Nv2080HotplugNotification; + +/* Power state changing notification information */ +typedef struct { + NvBool bSwitchToAC; + NvBool bGPUCapabilityChanged; + NvU32 displayMaskAffected; +} Nv2080PowerEventNotification; + +/* DP IRQ notification information */ +typedef struct Nv2080DpIrqNotificationRec { + NvU32 displayId; +} Nv2080DpIrqNotification; + +/* XUSB/PPC D-State change notification information */ +typedef struct Nv2080DstateXusbPpcNotificationRec { + NvU32 dstateXusb; + NvU32 dstatePpc; +} Nv2080DstateXusbPpcNotification; + +/* XUSB/PPC Connection status notification information */ +typedef struct Nv2080XusbPpcConnectStateNotificationRec { + NvBool bConnected; +} Nv2080XusbPpcConnectStateNotification; + +/* ACPI event notification information */ +typedef struct Nv2080ACPIEvent { + NvU32 event; +} Nv2080ACPIEvent; + +/* Cooler Zone notification information */ +typedef struct _NV2080_COOLER_DIAG_ZONE_NOTIFICATION_REC { + NvU32 currentZone; +} NV2080_COOLER_DIAG_ZONE_NOTIFICATION_REC; + +/* Thermal Zone notification information */ +typedef struct _NV2080_THERM_DIAG_ZONE_NOTIFICATION_REC { + NvU32 currentZone; +} NV2080_THERM_DIAG_ZONE_NOTIFICATION_REC; + +/* HDCP ref count change notification information */ +typedef struct Nv2080AudioHdcpRequestRec { + NvU32 displayId; + NvU32 requestedState; +} Nv2080AudioHdcpRequest; + +/* Gpu ready event information */ +typedef struct Nv2080GC5GpuReadyParams { + NvU32 event; + NvU32 sciIntr0; + NvU32 sciIntr1; +} Nv2080GC5GpuReadyParams; + +/* Priv reg access fault notification information */ +typedef struct { + NvU32 errAddr; +} Nv2080PrivRegAccessFaultNotification; + +/* HDA D-State change notification information + * See @HDACODEC_DSTATE for definitions + */ +typedef struct Nv2080DstateHdaCodecNotificationRec { + NvU32 dstateHdaCodec; +} Nv2080DstateHdaCodecNotification; + +/* HDMI FRL retraining request notification information */ +typedef struct Nv2080HdmiFrlRequestNotificationRec { + NvU32 displayId; +} Nv2080HdmiFrlRequestNotification; + +/* + * Platform Power Mode event information + */ +typedef struct _NV2080_PLATFORM_POWER_MODE_CHANGE_STATUS { + NvU8 platformPowerModeIndex; + NvU8 platformPowerModeMask; + NvU8 eventReason; +} NV2080_PLATFORM_POWER_MODE_CHANGE_STATUS; + +/* + * Workload type update event information. Workload type is enumerated as per the + * NV2080_CTRL_PERF_PERF_CF_CONTROLLER_DLCC_WORKLOAD_TYPE_ENUM + */ +typedef struct _NV2080_NOTIFIERS_USE_GC6_REDUCED_THRESHOLD_UPDATE { + NvU8 workloadType; +} NV2080_NOTIFIERS_USE_GC6_REDUCED_THRESHOLD_UPDATE; + +#define NV2080_PLATFORM_POWER_MODE_CHANGE_INFO_INDEX 7:0 +#define NV2080_PLATFORM_POWER_MODE_CHANGE_INFO_MASK 15:8 +#define NV2080_PLATFORM_POWER_MODE_CHANGE_INFO_REASON 23:16 + +/* + * ENGINE_INFO_TYPE_NV2080 of the engine for which the QOS interrupt has been raised + */ +typedef struct { + NvU32 engineType; +} Nv2080QosIntrNotification; + +typedef struct { + NvU64 physAddress NV_ALIGN_BYTES(8); +} Nv2080EccDbeNotification; + +/* + * LPWR DIFR Prefetch Request - Size of L2 Cache + */ +typedef struct { + NvU32 l2CacheSize; +} Nv2080LpwrDifrPrefetchNotification; + +/* + * Nvlink Link status change Notification + */ +typedef struct { + NvU32 GpuId; + NvU32 linkId; +} Nv2080NvlinkLnkChangeNotification; + +typedef struct { + NvU32 head; +} Nv2080VrrSetTimeoutNotification; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl2080_notification_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl2081.h b/src/common/sdk/nvidia/inc/class/cl2081.h new file mode 100644 index 0000000..2d1cede --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl2081.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl2081.finn +// + +#define NV2081_BINAPI (0x2081U) /* finn: Evaluated from "NV2081_ALLOC_PARAMETERS_MESSAGE_ID" */ + +#define NV2081_ALLOC_PARAMETERS_MESSAGE_ID (0x2081U) + +typedef struct NV2081_ALLOC_PARAMETERS { + NvU32 reserved; +} NV2081_ALLOC_PARAMETERS; + diff --git a/src/common/sdk/nvidia/inc/class/cl2082.h b/src/common/sdk/nvidia/inc/class/cl2082.h new file mode 100644 index 0000000..0846209 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl2082.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl2082.finn +// + +#define NV2082_BINAPI_PRIVILEGED (0x2082U) /* finn: Evaluated from "NV2082_ALLOC_PARAMETERS_MESSAGE_ID" */ + +#define NV2082_ALLOC_PARAMETERS_MESSAGE_ID (0x2082U) + +typedef struct NV2082_ALLOC_PARAMETERS { + NvU32 reserved; +} NV2082_ALLOC_PARAMETERS; + diff --git a/src/common/sdk/nvidia/inc/class/cl30f1.h b/src/common/sdk/nvidia/inc/class/cl30f1.h new file mode 100644 index 0000000..ffba5e1 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl30f1.h @@ -0,0 +1,56 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl30f1.finn +// + +#include "cl30f1_notification.h" + +/* class NV30_GSYNC */ +#define NV30_GSYNC (0x30f1U) /* finn: Evaluated from "NV30F1_ALLOC_PARAMETERS_MESSAGE_ID" */ + +#define NV30F1_GSYNC_CONNECTOR_ONE (0) +#define NV30F1_GSYNC_CONNECTOR_TWO (1) +#define NV30F1_GSYNC_CONNECTOR_THREE (2) +#define NV30F1_GSYNC_CONNECTOR_FOUR (3) + +#define NV30F1_GSYNC_CONNECTOR_PRIMARY NV30F1_GSYNC_CONNECTOR_ONE +#define NV30F1_GSYNC_CONNECTOR_SECONDARY NV30F1_GSYNC_CONNECTOR_TWO + +#define NV30F1_GSYNC_CONNECTOR_COUNT (4) + +/* NvRmAlloc parameters */ +#define NV30F1_MAX_GSYNCS (0x0000004) + +#define NV30F1_ALLOC_PARAMETERS_MESSAGE_ID (0x30f1U) + +typedef struct NV30F1_ALLOC_PARAMETERS { + NvU32 gsyncInstance; +} NV30F1_ALLOC_PARAMETERS; + diff --git a/src/common/sdk/nvidia/inc/class/cl30f1_notification.h b/src/common/sdk/nvidia/inc/class/cl30f1_notification.h new file mode 100644 index 0000000..290eb5c --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl30f1_notification.h @@ -0,0 +1,74 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl30f1_notification_h_ +#define _cl30f1_notification_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * A client should use NV01_EVENT_OS_EVENT as hClass and NV30F1_GSYNC_NOTIFIERS_* as + * notify index when allocating event, if separate event notifications are needed for + * separate event types. + * + * A client should use NV01_EVENT_KERNEL_CALLBACK as hClass and + * NV30F1_GSYNC_NOTIFIERS_ALL as notify index, if a single event is required. + * In this case RM would set event data equal to a pointer to NvNotification structure. + * The info32 field of NvNotification structure would be equal a bitmask of + * NV30F1_GSYNC_NOTIFIERS_* values. + */ + +/* NvNotification[] fields and values */ + +/* Framelock sync gain and loss events. These are connector specific events. */ +#define NV30F1_GSYNC_NOTIFIERS_SYNC_LOSS(c) (0x00+(c)) +#define NV30F1_GSYNC_NOTIFIERS_SYNC_GAIN(c) (0x04+(c)) + +/* Framelock stereo gain and loss events. These are connector specific events. */ +#define NV30F1_GSYNC_NOTIFIERS_STEREO_LOSS(c) (0x08+(c)) +#define NV30F1_GSYNC_NOTIFIERS_STEREO_GAIN(c) (0x0C+(c)) + +/* House cable gain(plug in) and loss(plug out) events. */ +#define NV30F1_GSYNC_NOTIFIERS_HOUSE_GAIN (0x10) +#define NV30F1_GSYNC_NOTIFIERS_HOUSE_LOSS (0x11) + +/* RJ45 cable gain(plug in) and loss(plug out) events. */ +#define NV30F1_GSYNC_NOTIFIERS_RJ45_GAIN (0x12) +#define NV30F1_GSYNC_NOTIFIERS_RJ45_LOSS (0x13) + +#define NV30F1_GSYNC_NOTIFIERS_MAXCOUNT (0x14) + +/* + * For handling all event types. + * Note for Windows, it only handles NV01_EVENT_KERNEL_CALLBACK_EX; as for NV01_EVENT_OS_EVENT, it can only + * signal an event but not handle over any information. + */ +#define NV30F1_GSYNC_NOTIFIERS_ALL NV30F1_GSYNC_NOTIFIERS_MAXCOUNT + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl30f1_notification_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl402c.h b/src/common/sdk/nvidia/inc/class/cl402c.h new file mode 100644 index 0000000..9ba7c3b --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl402c.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2010 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl402c_h_ +#define _cl402c_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* I2C operations */ +#define NV40_I2C (0x0000402c) + +typedef volatile struct _cl402c_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv402cTypedef, Nv40I2c; +#define NV402C_TYPEDEF Nv40I2c + + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl402c_h_ */ + diff --git a/src/common/sdk/nvidia/inc/class/cl5070.h b/src/common/sdk/nvidia/inc/class/cl5070.h new file mode 100644 index 0000000..9d16514 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl5070.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl5070.finn +// + +#include "cl5070_notification.h" + +#define NV50_DISPLAY (0x5070U) /* finn: Evaluated from "NV5070_ALLOCATION_PARAMETERS_MESSAGE_ID" */ + +#define NV5070_ALLOCATION_PARAMETERS_MESSAGE_ID (0x5070U) + +typedef struct NV5070_ALLOCATION_PARAMETERS { + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numDacs; // Number of DACs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NV5070_ALLOCATION_PARAMETERS; diff --git a/src/common/sdk/nvidia/inc/class/cl5070_notification.h b/src/common/sdk/nvidia/inc/class/cl5070_notification.h new file mode 100644 index 0000000..a23107d --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl5070_notification.h @@ -0,0 +1,45 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl5070_notification_h_ +#define _cl5070_notification_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* event values */ +#define NV5070_NOTIFIERS_SW (0) +#define NV5070_NOTIFIERS_MAXCOUNT (1) + +#define NV5070_NOTIFICATION_STATUS_IN_PROGRESS (0x8000) +#define NV5070_NOTIFICATION_STATUS_BAD_ARGUMENT (0x4000) +#define NV5070_NOTIFICATION_STATUS_ERROR_INVALID_STATE (0x2000) +#define NV5070_NOTIFICATION_STATUS_ERROR_STATE_IN_USE (0x1000) +#define NV5070_NOTIFICATION_STATUS_DONE_SUCCESS (0x0000) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl5070_notification_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl50a0.h b/src/common/sdk/nvidia/inc/class/cl50a0.h new file mode 100644 index 0000000..5fec57e --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl50a0.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2005, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl50a0_h_ +#define _cl50a0_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV50_MEMORY_VIRTUAL (0x000050a0) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl50a0_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl84a0.h b/src/common/sdk/nvidia/inc/class/cl84a0.h new file mode 100644 index 0000000..a1f6d90 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl84a0.h @@ -0,0 +1,110 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl84a0.finn +// + +#include "cl84a0_deprecated.h" + +/* + * Class definitions for creating a memory descriptor from a list of page numbers + * in RmAllocMemory. No memory is allocated: only a memory descriptor and + * memory object are created for later use in other calls. These classes + * are used by vGPU to create references to memory assigned to a guest VM. + * In all cases, the list is passed as reference, in the pAddress argument + * of RmAllocMemory, to a Nv01MemoryList structure (cast to a void **). + */ + +/* List of system memory physical page numbers */ +#define NV01_MEMORY_LIST_SYSTEM (0x00000081) +/* List of frame buffer physical page numbers */ +#define NV01_MEMORY_LIST_FBMEM (0x00000082) +/* List of page numbers relative to the start of the specified object */ +#define NV01_MEMORY_LIST_OBJECT (0x00000083) + +/* + * List structure of NV01_MEMORY_LIST_* classes + * + * The pageNumber array is variable in length, with pageCount elements, + * so the allocated size of the structure must reflect that. + * + * FBMEM items apply only to NV01_MEMORY_LIST_FBMEM and to + * NV01_MEMORY_LIST_OBJECT when the underlying object is + * FBMEM (must be zero for other cases) + * + * NV_MEMORY_LIST_ALLOCATION_PARAMS - Allocation params to create memory list + * through NvRmAlloc. + */ +#define NV_MEMORY_LIST_ALLOCATION_PARAMS_MESSAGE_ID (0x84a0U) + +typedef struct NV_MEMORY_LIST_ALLOCATION_PARAMS { + NvHandle hClient; /* client to which object belongs + * (may differ from client creating the mapping). + * May be NV01_NULL_OBJECT, in which case client + * handle is used */ + NvHandle hParent; /* device with which object is associated. + * Must be NV01_NULL_OBJECT if hClient is NV01_NULL_OBJECT. + * Must not be NV01_NULL_OBJECT if hClient is + * not NV01_NULL_OBJECT. */ + NvHandle hObject; /* object to which pages are relative + * (NV01_NULL_OBJECT for NV01_MEMORY_LIST_SYSTEM + * and NV01_MEMORY_LIST_FBMEM) */ + NvHandle hHwResClient; /* client associated with the backdoor vnc surface*/ + NvHandle hHwResDevice; /* device associated to the bacdoor vnc surface*/ + NvHandle hHwResHandle; /* handle to hardware resources allocated to + * backdoor vnc surface*/ + NvU32 pteAdjust; /* offset of data in first page */ + NvU32 reserved_0; /* reserved: must be 0 */ + NvU32 type; /* FBMEM: NVOS32_TYPE_* */ + NvU32 flags; /* FBMEM: NVOS32_ALLOC_FLAGS_* */ + NvU32 attr; /* FBMEM: NVOS32_ATTR_* */ + NvU32 attr2; /* FBMEM: NVOS32_ATTR2_* */ + NvU32 height; /* FBMEM: height in pixels */ + NvU32 width; /* FBMEM: width in pixels */ + NvU32 format; /* FBMEM: memory kind */ + NvU32 comprcovg; /* FBMEM: compression coverage */ + NvU32 zcullcovg; /* FBMEM: Z-cull coverage */ + NvU32 pageCount; /* count of elements in pageNumber array */ + NvU32 heapOwner; /* heap owner information from client */ + + NV_DECLARE_ALIGNED(NvU64 guestId, 8); + /* ID of the guest VM. e.g., domain ID in case of Xen */ + NV_DECLARE_ALIGNED(NvU64 rangeBegin, 8); + /* preferred VA range start address */ + NV_DECLARE_ALIGNED(NvU64 rangeEnd, 8); + /* preferred VA range end address */ + NvU32 pitch; + NvU32 ctagOffset; + NV_DECLARE_ALIGNED(NvU64 size, 8); + NV_DECLARE_ALIGNED(NvU64 align, 8); + NV_DECLARE_ALIGNED(NvP64 pageNumberList, 8); + NV_DECLARE_ALIGNED(NvU64 limit, 8); + NvU32 flagsOs02; +} NV_MEMORY_LIST_ALLOCATION_PARAMS; + diff --git a/src/common/sdk/nvidia/inc/class/cl84a0_deprecated.h b/src/common/sdk/nvidia/inc/class/cl84a0_deprecated.h new file mode 100644 index 0000000..f1e7f0c --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl84a0_deprecated.h @@ -0,0 +1,90 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl84a0_deprecated_h_ +#define _cl84a0_deprecated_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * List structure of NV01_MEMORY_LIST_* classes + * + * The pageNumber array is variable in length, with pageCount elements, + * so the allocated size of the structure must reflect that. + * + * FBMEM items apply only to NV01_MEMORY_LIST_FBMEM and to + * NV01_MEMORY_LIST_OBJECT when the underlying object is + * FBMEM (must be zero for other cases) + * + * Nv01MemoryList is deprecated. NV_MEMORY_LIST_ALLOCATION_PARAMS should be used + * instead. + */ +typedef struct Nv01MemoryListRec { + NvHandle hClient; /* client to which object belongs + * (may differ from client creating the mapping). + * May be NV01_NULL_OBJECT, in which case client + * handle is used */ + NvHandle hParent; /* device with which object is associated. + * Must be NV01_NULL_OBJECT if hClient is NV01_NULL_OBJECT. + * Must not be NV01_NULL_OBJECT if hClient is + * not NV01_NULL_OBJECT. */ + NvHandle hObject; /* object to which pages are relative + * (NV01_NULL_OBJECT for NV01_MEMORY_LIST_SYSTEM + * and NV01_MEMORY_LIST_FBMEM) */ + NvHandle hHwResClient;/* client associated with the backdoor vnc surface*/ + NvHandle hHwResDevice;/* device associated to the bacdoor vnc surface*/ + NvHandle hHwResHandle;/* handle to hardware resources allocated to + * backdoor vnc surface*/ + NvU32 pteAdjust; /* offset of data in first page */ + NvU32 type; /* FBMEM: NVOS32_TYPE_* */ + NvU32 flags; /* FBMEM: NVOS32_ALLOC_FLAGS_* */ + NvU32 attr; /* FBMEM: NVOS32_ATTR_* */ + NvU32 attr2; /* FBMEM: NVOS32_ATTR2_* */ + NvU32 height; /* FBMEM: height in pixels */ + NvU32 width; /* FBMEM: width in pixels */ + NvU32 format; /* FBMEM: memory kind */ + NvU32 comprcovg; /* FBMEM: compression coverage */ + NvU32 zcullcovg; /* FBMEM: Z-cull coverage */ + NvU32 pageCount; /* count of elements in pageNumber array */ + NvU32 heapOwner; /* heap owner information from client */ + NvU32 reserved_1; /* reserved: must be 0 */ + NvU64 NV_DECLARE_ALIGNED(guestId,8); + /* ID of the guest VM. e.g., domain ID in case of Xen */ + NvU64 NV_DECLARE_ALIGNED(rangeBegin,8); + /* preferred VA range start address */ + NvU64 NV_DECLARE_ALIGNED(rangeEnd,8); + /* preferred VA range end address */ + NvU32 pitch; + NvU32 ctagOffset; + NvU64 size; + NvU64 align; + NvU64 pageNumber[1]; /* variable length array of page numbers */ +} Nv01MemoryList; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl84a0_deprecated_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl900e.h b/src/common/sdk/nvidia/inc/class/cl900e.h new file mode 100644 index 0000000..6684889 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl900e.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl900e_h_ +#define _cl900e_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define MPS_COMPUTE (0x0000900E) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl900e_h_ */ + diff --git a/src/common/sdk/nvidia/inc/class/cl9010.h b/src/common/sdk/nvidia/inc/class/cl9010.h new file mode 100644 index 0000000..ad901c7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9010.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl9010.finn +// + +#include "class/cl9010_callback.h" + +#define NV9010_VBLANK_CALLBACK (0x9010U) /* finn: Evaluated from "NV_VBLANK_CALLBACK_ALLOCATION_PARAMETERS_MESSAGE_ID" */ + +#define NV_VBLANK_CALLBACK_ALLOCATION_PARAMETERS_MESSAGE_ID (0x9010U) + +typedef struct NV_VBLANK_CALLBACK_ALLOCATION_PARAMETERS { + NV_DECLARE_ALIGNED(NvP64 pProc, 8); // Routine to call at vblank time + // A function pointer of OSVBLANKCALLBACKPROC + NvV32 LogicalHead; // Logical Head + NV_DECLARE_ALIGNED(NvP64 pParm1, 8); // pParm1 + NV_DECLARE_ALIGNED(NvP64 pParm2, 8); // pParm2 +} NV_VBLANK_CALLBACK_ALLOCATION_PARAMETERS; + diff --git a/src/common/sdk/nvidia/inc/class/cl9010_callback.h b/src/common/sdk/nvidia/inc/class/cl9010_callback.h new file mode 100644 index 0000000..1945b68 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9010_callback.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef SDK_CL9010_CALLBACK_H +#define SDK_CL9010_CALLBACK_H + +typedef void (*OSVBLANKCALLBACKPROC)(NvP64 pParm1, NvP64 pParm2); + +#endif // SDK_CL9010_CALLBACK_H diff --git a/src/common/sdk/nvidia/inc/class/cl902d.h b/src/common/sdk/nvidia/inc/class/cl902d.h new file mode 100644 index 0000000..38bd5cf --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl902d.h @@ -0,0 +1,1092 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl_fermi_twod_a_h_ +#define _cl_fermi_twod_a_h_ + +#define FERMI_TWOD_A 0x902D + +typedef volatile struct fermi_twod_a_struct { + NvU32 SetObject; + NvU32 Reserved_0x04[0x3F]; + NvU32 NoOperation; + NvU32 SetNotifyA; + NvU32 SetNotifyB; + NvU32 Notify; + NvU32 WaitForIdle; + NvU32 LoadMmeInstructionRamPointer; + NvU32 LoadMmeInstructionRam; + NvU32 LoadMmeStartAddressRamPointer; + NvU32 LoadMmeStartAddressRam; + NvU32 SetMmeShadowRamControl; + NvU32 Reserved_0x128[0x2]; + NvU32 SetGlobalRenderEnableA; + NvU32 SetGlobalRenderEnableB; + NvU32 SetGlobalRenderEnableC; + NvU32 SendGoIdle; + NvU32 PmTrigger; + NvU32 Reserved_0x144[0x3]; + NvU32 SetInstrumentationMethodHeader; + NvU32 SetInstrumentationMethodData; + NvU32 Reserved_0x158[0x25]; + NvU32 SetMmeSwitchState; + NvU32 Reserved_0x1F0[0x4]; + NvU32 SetDstFormat; + NvU32 SetDstMemoryLayout; + NvU32 SetDstBlockSize; + NvU32 SetDstDepth; + NvU32 SetDstLayer; + NvU32 SetDstPitch; + NvU32 SetDstWidth; + NvU32 SetDstHeight; + NvU32 SetDstOffsetUpper; + NvU32 SetDstOffsetLower; + NvU32 FlushAndInvalidateRopMiniCache; + NvU32 SetSpareNoop06; + NvU32 SetSrcFormat; + NvU32 SetSrcMemoryLayout; + NvU32 SetSrcBlockSize; + NvU32 SetSrcDepth; + NvU32 TwodInvalidateTextureDataCache; + NvU32 SetSrcPitch; + NvU32 SetSrcWidth; + NvU32 SetSrcHeight; + NvU32 SetSrcOffsetUpper; + NvU32 SetSrcOffsetLower; + NvU32 SetPixelsFromMemorySectorPromotion; + NvU32 SetSpareNoop12; + NvU32 SetNumProcessingClusters; + NvU32 SetRenderEnableA; + NvU32 SetRenderEnableB; + NvU32 SetRenderEnableC; + NvU32 SetSpareNoop08; + NvU32 SetSpareNoop01; + NvU32 SetSpareNoop11; + NvU32 SetSpareNoop07; + NvU32 SetClipX0; + NvU32 SetClipY0; + NvU32 SetClipWidth; + NvU32 SetClipHeight; + NvU32 SetClipEnable; + NvU32 SetColorKeyFormat; + NvU32 SetColorKey; + NvU32 SetColorKeyEnable; + NvU32 SetRop; + NvU32 SetBeta1; + NvU32 SetBeta4; + NvU32 SetOperation; + NvU32 SetPatternOffset; + NvU32 SetPatternSelect; + NvU32 SetDstColorRenderToZetaSurface; + NvU32 SetSpareNoop04; + NvU32 SetSpareNoop15; + NvU32 SetSpareNoop13; + NvU32 SetSpareNoop03; + NvU32 SetSpareNoop14; + NvU32 SetSpareNoop02; + NvU32 SetCompression; + NvU32 SetSpareNoop09; + NvU32 SetRenderEnableOverride; + NvU32 SetPixelsFromMemoryDirection; + NvU32 SetSpareNoop10; + NvU32 SetMonochromePatternColorFormat; + NvU32 SetMonochromePatternFormat; + NvU32 SetMonochromePatternColor0; + NvU32 SetMonochromePatternColor1; + NvU32 SetMonochromePattern0; + NvU32 SetMonochromePattern1; + NvU32 ColorPatternX8R8G8B8[0x40]; + NvU32 ColorPatternR5G6B5[0x20]; + NvU32 ColorPatternX1R5G5B5[0x20]; + NvU32 ColorPatternY8[0x10]; + NvU32 SetRenderSolidPrimColor0; + NvU32 SetRenderSolidPrimColor1; + NvU32 SetRenderSolidPrimColor2; + NvU32 SetRenderSolidPrimColor3; + NvU32 SetMmeMemAddressA; + NvU32 SetMmeMemAddressB; + NvU32 SetMmeDataRamAddress; + NvU32 MmeDmaRead; + NvU32 MmeDmaReadFifoed; + NvU32 MmeDmaWrite; + NvU32 MmeDmaReduction; + NvU32 MmeDmaSysmembar; + NvU32 MmeDmaSync; + NvU32 SetMmeDataFifoConfig; + NvU32 Reserved_0x578[0x2]; + NvU32 RenderSolidPrimMode; + NvU32 SetRenderSolidPrimColorFormat; + NvU32 SetRenderSolidPrimColor; + NvU32 SetRenderSolidLineTieBreakBits; + NvU32 Reserved_0x590[0x14]; + NvU32 RenderSolidPrimPointXY; + NvU32 Reserved_0x5E4[0x7]; + struct { + NvU32 SetX; + NvU32 Y; + } RenderSolidPrimPoint[0x40]; + NvU32 SetPixelsFromCpuDataType; + NvU32 SetPixelsFromCpuColorFormat; + NvU32 SetPixelsFromCpuIndexFormat; + NvU32 SetPixelsFromCpuMonoFormat; + NvU32 SetPixelsFromCpuWrap; + NvU32 SetPixelsFromCpuColor0; + NvU32 SetPixelsFromCpuColor1; + NvU32 SetPixelsFromCpuMonoOpacity; + NvU32 Reserved_0x820[0x6]; + NvU32 SetPixelsFromCpuSrcWidth; + NvU32 SetPixelsFromCpuSrcHeight; + NvU32 SetPixelsFromCpuDxDuFrac; + NvU32 SetPixelsFromCpuDxDuInt; + NvU32 SetPixelsFromCpuDyDvFrac; + NvU32 SetPixelsFromCpuDyDvInt; + NvU32 SetPixelsFromCpuDstX0Frac; + NvU32 SetPixelsFromCpuDstX0Int; + NvU32 SetPixelsFromCpuDstY0Frac; + NvU32 SetPixelsFromCpuDstY0Int; + NvU32 PixelsFromCpuData; + NvU32 Reserved_0x864[0x3]; + NvU32 SetBigEndianControl; + NvU32 Reserved_0x874[0x3]; + NvU32 SetPixelsFromMemoryBlockShape; + NvU32 SetPixelsFromMemoryCorralSize; + NvU32 SetPixelsFromMemorySafeOverlap; + NvU32 SetPixelsFromMemorySampleMode; + NvU32 Reserved_0x890[0x8]; + NvU32 SetPixelsFromMemoryDstX0; + NvU32 SetPixelsFromMemoryDstY0; + NvU32 SetPixelsFromMemoryDstWidth; + NvU32 SetPixelsFromMemoryDstHeight; + NvU32 SetPixelsFromMemoryDuDxFrac; + NvU32 SetPixelsFromMemoryDuDxInt; + NvU32 SetPixelsFromMemoryDvDyFrac; + NvU32 SetPixelsFromMemoryDvDyInt; + NvU32 SetPixelsFromMemorySrcX0Frac; + NvU32 SetPixelsFromMemorySrcX0Int; + NvU32 SetPixelsFromMemorySrcY0Frac; + NvU32 PixelsFromMemorySrcY0Int; + NvU32 SetFalcon00; + NvU32 SetFalcon01; + NvU32 SetFalcon02; + NvU32 SetFalcon03; + NvU32 SetFalcon04; + NvU32 SetFalcon05; + NvU32 SetFalcon06; + NvU32 SetFalcon07; + NvU32 SetFalcon08; + NvU32 SetFalcon09; + NvU32 SetFalcon10; + NvU32 SetFalcon11; + NvU32 SetFalcon12; + NvU32 SetFalcon13; + NvU32 SetFalcon14; + NvU32 SetFalcon15; + NvU32 SetFalcon16; + NvU32 SetFalcon17; + NvU32 SetFalcon18; + NvU32 SetFalcon19; + NvU32 SetFalcon20; + NvU32 SetFalcon21; + NvU32 SetFalcon22; + NvU32 SetFalcon23; + NvU32 SetFalcon24; + NvU32 SetFalcon25; + NvU32 SetFalcon26; + NvU32 SetFalcon27; + NvU32 SetFalcon28; + NvU32 SetFalcon29; + NvU32 SetFalcon30; + NvU32 SetFalcon31; + NvU32 Reserved_0x960[0x123]; + NvU32 MmeDmaWriteMethodBarrier; + NvU32 Reserved_0xDF0[0x984]; + NvU32 SetMmeShadowScratch[0x100]; + struct { + NvU32 Macro; + NvU32 Data; + } CallMme[0xE0]; +} fermi_twod_a_t; + + +#define NV902D_SET_OBJECT 0x0000 +#define NV902D_SET_OBJECT_CLASS_ID 15:0 +#define NV902D_SET_OBJECT_ENGINE_ID 20:16 + +#define NV902D_NO_OPERATION 0x0100 +#define NV902D_NO_OPERATION_V 31:0 + +#define NV902D_SET_NOTIFY_A 0x0104 +#define NV902D_SET_NOTIFY_A_ADDRESS_UPPER 24:0 + +#define NV902D_SET_NOTIFY_B 0x0108 +#define NV902D_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NV902D_NOTIFY 0x010c +#define NV902D_NOTIFY_TYPE 31:0 +#define NV902D_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NV902D_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NV902D_WAIT_FOR_IDLE 0x0110 +#define NV902D_WAIT_FOR_IDLE_V 31:0 + +#define NV902D_LOAD_MME_INSTRUCTION_RAM_POINTER 0x0114 +#define NV902D_LOAD_MME_INSTRUCTION_RAM_POINTER_V 31:0 + +#define NV902D_LOAD_MME_INSTRUCTION_RAM 0x0118 +#define NV902D_LOAD_MME_INSTRUCTION_RAM_V 31:0 + +#define NV902D_LOAD_MME_START_ADDRESS_RAM_POINTER 0x011c +#define NV902D_LOAD_MME_START_ADDRESS_RAM_POINTER_V 31:0 + +#define NV902D_LOAD_MME_START_ADDRESS_RAM 0x0120 +#define NV902D_LOAD_MME_START_ADDRESS_RAM_V 31:0 + +#define NV902D_SET_MME_SHADOW_RAM_CONTROL 0x0124 +#define NV902D_SET_MME_SHADOW_RAM_CONTROL_MODE 1:0 +#define NV902D_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK 0x00000000 +#define NV902D_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK_WITH_FILTER 0x00000001 +#define NV902D_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_PASSTHROUGH 0x00000002 +#define NV902D_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_REPLAY 0x00000003 + +#define NV902D_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NV902D_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NV902D_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NV902D_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NV902D_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NV902D_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NV902D_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NV902D_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NV902D_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NV902D_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NV902D_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NV902D_SEND_GO_IDLE 0x013c +#define NV902D_SEND_GO_IDLE_V 31:0 + +#define NV902D_PM_TRIGGER 0x0140 +#define NV902D_PM_TRIGGER_V 31:0 + +#define NV902D_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NV902D_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NV902D_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NV902D_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NV902D_SET_MME_SWITCH_STATE 0x01ec +#define NV902D_SET_MME_SWITCH_STATE_VALID 0:0 +#define NV902D_SET_MME_SWITCH_STATE_VALID_FALSE 0x00000000 +#define NV902D_SET_MME_SWITCH_STATE_VALID_TRUE 0x00000001 +#define NV902D_SET_MME_SWITCH_STATE_SAVE_MACRO 11:4 +#define NV902D_SET_MME_SWITCH_STATE_RESTORE_MACRO 19:12 + +#define NV902D_SET_DST_FORMAT 0x0200 +#define NV902D_SET_DST_FORMAT_V 7:0 +#define NV902D_SET_DST_FORMAT_V_A8R8G8B8 0x000000CF +#define NV902D_SET_DST_FORMAT_V_A8RL8GL8BL8 0x000000D0 +#define NV902D_SET_DST_FORMAT_V_A2R10G10B10 0x000000DF +#define NV902D_SET_DST_FORMAT_V_A8B8G8R8 0x000000D5 +#define NV902D_SET_DST_FORMAT_V_A8BL8GL8RL8 0x000000D6 +#define NV902D_SET_DST_FORMAT_V_A2B10G10R10 0x000000D1 +#define NV902D_SET_DST_FORMAT_V_X8R8G8B8 0x000000E6 +#define NV902D_SET_DST_FORMAT_V_X8RL8GL8BL8 0x000000E7 +#define NV902D_SET_DST_FORMAT_V_X8B8G8R8 0x000000F9 +#define NV902D_SET_DST_FORMAT_V_X8BL8GL8RL8 0x000000FA +#define NV902D_SET_DST_FORMAT_V_R5G6B5 0x000000E8 +#define NV902D_SET_DST_FORMAT_V_A1R5G5B5 0x000000E9 +#define NV902D_SET_DST_FORMAT_V_X1R5G5B5 0x000000F8 +#define NV902D_SET_DST_FORMAT_V_Y8 0x000000F3 +#define NV902D_SET_DST_FORMAT_V_Y16 0x000000EE +#define NV902D_SET_DST_FORMAT_V_Y32 0x000000FF +#define NV902D_SET_DST_FORMAT_V_Z1R5G5B5 0x000000FB +#define NV902D_SET_DST_FORMAT_V_O1R5G5B5 0x000000FC +#define NV902D_SET_DST_FORMAT_V_Z8R8G8B8 0x000000FD +#define NV902D_SET_DST_FORMAT_V_O8R8G8B8 0x000000FE +#define NV902D_SET_DST_FORMAT_V_Y1_8X8 0x0000001C +#define NV902D_SET_DST_FORMAT_V_RF16 0x000000F2 +#define NV902D_SET_DST_FORMAT_V_RF32 0x000000E5 +#define NV902D_SET_DST_FORMAT_V_RF32_GF32 0x000000CB +#define NV902D_SET_DST_FORMAT_V_RF16_GF16_BF16_AF16 0x000000CA +#define NV902D_SET_DST_FORMAT_V_RF16_GF16_BF16_X16 0x000000CE +#define NV902D_SET_DST_FORMAT_V_RF32_GF32_BF32_AF32 0x000000C0 +#define NV902D_SET_DST_FORMAT_V_RF32_GF32_BF32_X32 0x000000C3 +#define NV902D_SET_DST_FORMAT_V_R16_G16_B16_A16 0x000000C6 +#define NV902D_SET_DST_FORMAT_V_RN16_GN16_BN16_AN16 0x000000C7 +#define NV902D_SET_DST_FORMAT_V_BF10GF11RF11 0x000000E0 +#define NV902D_SET_DST_FORMAT_V_AN8BN8GN8RN8 0x000000D7 +#define NV902D_SET_DST_FORMAT_V_RF16_GF16 0x000000DE +#define NV902D_SET_DST_FORMAT_V_R16_G16 0x000000DA +#define NV902D_SET_DST_FORMAT_V_RN16_GN16 0x000000DB +#define NV902D_SET_DST_FORMAT_V_G8R8 0x000000EA +#define NV902D_SET_DST_FORMAT_V_GN8RN8 0x000000EB +#define NV902D_SET_DST_FORMAT_V_RN16 0x000000EF +#define NV902D_SET_DST_FORMAT_V_RN8 0x000000F4 +#define NV902D_SET_DST_FORMAT_V_A8 0x000000F7 + +#define NV902D_SET_DST_MEMORY_LAYOUT 0x0204 +#define NV902D_SET_DST_MEMORY_LAYOUT_V 0:0 +#define NV902D_SET_DST_MEMORY_LAYOUT_V_BLOCKLINEAR 0x00000000 +#define NV902D_SET_DST_MEMORY_LAYOUT_V_PITCH 0x00000001 + +#define NV902D_SET_DST_BLOCK_SIZE 0x0208 +#define NV902D_SET_DST_BLOCK_SIZE_HEIGHT 6:4 +#define NV902D_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NV902D_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NV902D_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NV902D_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NV902D_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NV902D_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NV902D_SET_DST_BLOCK_SIZE_DEPTH 10:8 +#define NV902D_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NV902D_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NV902D_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NV902D_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NV902D_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NV902D_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NV902D_SET_DST_DEPTH 0x020c +#define NV902D_SET_DST_DEPTH_V 31:0 + +#define NV902D_SET_DST_LAYER 0x0210 +#define NV902D_SET_DST_LAYER_V 31:0 + +#define NV902D_SET_DST_PITCH 0x0214 +#define NV902D_SET_DST_PITCH_V 31:0 + +#define NV902D_SET_DST_WIDTH 0x0218 +#define NV902D_SET_DST_WIDTH_V 31:0 + +#define NV902D_SET_DST_HEIGHT 0x021c +#define NV902D_SET_DST_HEIGHT_V 31:0 + +#define NV902D_SET_DST_OFFSET_UPPER 0x0220 +#define NV902D_SET_DST_OFFSET_UPPER_V 7:0 + +#define NV902D_SET_DST_OFFSET_LOWER 0x0224 +#define NV902D_SET_DST_OFFSET_LOWER_V 31:0 + +#define NV902D_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE 0x0228 +#define NV902D_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE_V 0:0 + +#define NV902D_SET_SPARE_NOOP06 0x022c +#define NV902D_SET_SPARE_NOOP06_V 31:0 + +#define NV902D_SET_SRC_FORMAT 0x0230 +#define NV902D_SET_SRC_FORMAT_V 7:0 +#define NV902D_SET_SRC_FORMAT_V_A8R8G8B8 0x000000CF +#define NV902D_SET_SRC_FORMAT_V_A8RL8GL8BL8 0x000000D0 +#define NV902D_SET_SRC_FORMAT_V_A2R10G10B10 0x000000DF +#define NV902D_SET_SRC_FORMAT_V_A8B8G8R8 0x000000D5 +#define NV902D_SET_SRC_FORMAT_V_A8BL8GL8RL8 0x000000D6 +#define NV902D_SET_SRC_FORMAT_V_A2B10G10R10 0x000000D1 +#define NV902D_SET_SRC_FORMAT_V_X8R8G8B8 0x000000E6 +#define NV902D_SET_SRC_FORMAT_V_X8RL8GL8BL8 0x000000E7 +#define NV902D_SET_SRC_FORMAT_V_X8B8G8R8 0x000000F9 +#define NV902D_SET_SRC_FORMAT_V_X8BL8GL8RL8 0x000000FA +#define NV902D_SET_SRC_FORMAT_V_R5G6B5 0x000000E8 +#define NV902D_SET_SRC_FORMAT_V_A1R5G5B5 0x000000E9 +#define NV902D_SET_SRC_FORMAT_V_X1R5G5B5 0x000000F8 +#define NV902D_SET_SRC_FORMAT_V_Y8 0x000000F3 +#define NV902D_SET_SRC_FORMAT_V_AY8 0x0000001D +#define NV902D_SET_SRC_FORMAT_V_Y16 0x000000EE +#define NV902D_SET_SRC_FORMAT_V_Y32 0x000000FF +#define NV902D_SET_SRC_FORMAT_V_Z1R5G5B5 0x000000FB +#define NV902D_SET_SRC_FORMAT_V_O1R5G5B5 0x000000FC +#define NV902D_SET_SRC_FORMAT_V_Z8R8G8B8 0x000000FD +#define NV902D_SET_SRC_FORMAT_V_O8R8G8B8 0x000000FE +#define NV902D_SET_SRC_FORMAT_V_Y1_8X8 0x0000001C +#define NV902D_SET_SRC_FORMAT_V_RF16 0x000000F2 +#define NV902D_SET_SRC_FORMAT_V_RF32 0x000000E5 +#define NV902D_SET_SRC_FORMAT_V_RF32_GF32 0x000000CB +#define NV902D_SET_SRC_FORMAT_V_RF16_GF16_BF16_AF16 0x000000CA +#define NV902D_SET_SRC_FORMAT_V_RF16_GF16_BF16_X16 0x000000CE +#define NV902D_SET_SRC_FORMAT_V_RF32_GF32_BF32_AF32 0x000000C0 +#define NV902D_SET_SRC_FORMAT_V_RF32_GF32_BF32_X32 0x000000C3 +#define NV902D_SET_SRC_FORMAT_V_R16_G16_B16_A16 0x000000C6 +#define NV902D_SET_SRC_FORMAT_V_RN16_GN16_BN16_AN16 0x000000C7 +#define NV902D_SET_SRC_FORMAT_V_BF10GF11RF11 0x000000E0 +#define NV902D_SET_SRC_FORMAT_V_AN8BN8GN8RN8 0x000000D7 +#define NV902D_SET_SRC_FORMAT_V_RF16_GF16 0x000000DE +#define NV902D_SET_SRC_FORMAT_V_R16_G16 0x000000DA +#define NV902D_SET_SRC_FORMAT_V_RN16_GN16 0x000000DB +#define NV902D_SET_SRC_FORMAT_V_G8R8 0x000000EA +#define NV902D_SET_SRC_FORMAT_V_GN8RN8 0x000000EB +#define NV902D_SET_SRC_FORMAT_V_RN16 0x000000EF +#define NV902D_SET_SRC_FORMAT_V_RN8 0x000000F4 +#define NV902D_SET_SRC_FORMAT_V_A8 0x000000F7 + +#define NV902D_SET_SRC_MEMORY_LAYOUT 0x0234 +#define NV902D_SET_SRC_MEMORY_LAYOUT_V 0:0 +#define NV902D_SET_SRC_MEMORY_LAYOUT_V_BLOCKLINEAR 0x00000000 +#define NV902D_SET_SRC_MEMORY_LAYOUT_V_PITCH 0x00000001 + +#define NV902D_SET_SRC_BLOCK_SIZE 0x0238 +#define NV902D_SET_SRC_BLOCK_SIZE_HEIGHT 6:4 +#define NV902D_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NV902D_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NV902D_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NV902D_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NV902D_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NV902D_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NV902D_SET_SRC_BLOCK_SIZE_DEPTH 10:8 +#define NV902D_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NV902D_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NV902D_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NV902D_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NV902D_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NV902D_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NV902D_SET_SRC_DEPTH 0x023c +#define NV902D_SET_SRC_DEPTH_V 31:0 + +#define NV902D_TWOD_INVALIDATE_TEXTURE_DATA_CACHE 0x0240 +#define NV902D_TWOD_INVALIDATE_TEXTURE_DATA_CACHE_V 1:0 +#define NV902D_TWOD_INVALIDATE_TEXTURE_DATA_CACHE_V_L1_ONLY 0x00000000 +#define NV902D_TWOD_INVALIDATE_TEXTURE_DATA_CACHE_V_L2_ONLY 0x00000001 +#define NV902D_TWOD_INVALIDATE_TEXTURE_DATA_CACHE_V_L1_AND_L2 0x00000002 + +#define NV902D_SET_SRC_PITCH 0x0244 +#define NV902D_SET_SRC_PITCH_V 31:0 + +#define NV902D_SET_SRC_WIDTH 0x0248 +#define NV902D_SET_SRC_WIDTH_V 31:0 + +#define NV902D_SET_SRC_HEIGHT 0x024c +#define NV902D_SET_SRC_HEIGHT_V 31:0 + +#define NV902D_SET_SRC_OFFSET_UPPER 0x0250 +#define NV902D_SET_SRC_OFFSET_UPPER_V 7:0 + +#define NV902D_SET_SRC_OFFSET_LOWER 0x0254 +#define NV902D_SET_SRC_OFFSET_LOWER_V 31:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_SECTOR_PROMOTION 0x0258 +#define NV902D_SET_PIXELS_FROM_MEMORY_SECTOR_PROMOTION_V 1:0 +#define NV902D_SET_PIXELS_FROM_MEMORY_SECTOR_PROMOTION_V_NO_PROMOTION 0x00000000 +#define NV902D_SET_PIXELS_FROM_MEMORY_SECTOR_PROMOTION_V_PROMOTE_TO_2_V 0x00000001 +#define NV902D_SET_PIXELS_FROM_MEMORY_SECTOR_PROMOTION_V_PROMOTE_TO_2_H 0x00000002 +#define NV902D_SET_PIXELS_FROM_MEMORY_SECTOR_PROMOTION_V_PROMOTE_TO_4 0x00000003 + +#define NV902D_SET_SPARE_NOOP12 0x025c +#define NV902D_SET_SPARE_NOOP12_V 31:0 + +#define NV902D_SET_NUM_PROCESSING_CLUSTERS 0x0260 +#define NV902D_SET_NUM_PROCESSING_CLUSTERS_V 0:0 +#define NV902D_SET_NUM_PROCESSING_CLUSTERS_V_ALL 0x00000000 +#define NV902D_SET_NUM_PROCESSING_CLUSTERS_V_ONE 0x00000001 + +#define NV902D_SET_RENDER_ENABLE_A 0x0264 +#define NV902D_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NV902D_SET_RENDER_ENABLE_B 0x0268 +#define NV902D_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NV902D_SET_RENDER_ENABLE_C 0x026c +#define NV902D_SET_RENDER_ENABLE_C_MODE 2:0 +#define NV902D_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NV902D_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NV902D_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NV902D_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NV902D_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NV902D_SET_SPARE_NOOP08 0x0270 +#define NV902D_SET_SPARE_NOOP08_V 31:0 + +#define NV902D_SET_SPARE_NOOP01 0x0274 +#define NV902D_SET_SPARE_NOOP01_V 31:0 + +#define NV902D_SET_SPARE_NOOP11 0x0278 +#define NV902D_SET_SPARE_NOOP11_V 31:0 + +#define NV902D_SET_SPARE_NOOP07 0x027c +#define NV902D_SET_SPARE_NOOP07_V 31:0 + +#define NV902D_SET_CLIP_X0 0x0280 +#define NV902D_SET_CLIP_X0_V 31:0 + +#define NV902D_SET_CLIP_Y0 0x0284 +#define NV902D_SET_CLIP_Y0_V 31:0 + +#define NV902D_SET_CLIP_WIDTH 0x0288 +#define NV902D_SET_CLIP_WIDTH_V 31:0 + +#define NV902D_SET_CLIP_HEIGHT 0x028c +#define NV902D_SET_CLIP_HEIGHT_V 31:0 + +#define NV902D_SET_CLIP_ENABLE 0x0290 +#define NV902D_SET_CLIP_ENABLE_V 0:0 +#define NV902D_SET_CLIP_ENABLE_V_FALSE 0x00000000 +#define NV902D_SET_CLIP_ENABLE_V_TRUE 0x00000001 + +#define NV902D_SET_COLOR_KEY_FORMAT 0x0294 +#define NV902D_SET_COLOR_KEY_FORMAT_V 2:0 +#define NV902D_SET_COLOR_KEY_FORMAT_V_A16R5G6B5 0x00000000 +#define NV902D_SET_COLOR_KEY_FORMAT_V_A1R5G5B5 0x00000001 +#define NV902D_SET_COLOR_KEY_FORMAT_V_A8R8G8B8 0x00000002 +#define NV902D_SET_COLOR_KEY_FORMAT_V_A2R10G10B10 0x00000003 +#define NV902D_SET_COLOR_KEY_FORMAT_V_Y8 0x00000004 +#define NV902D_SET_COLOR_KEY_FORMAT_V_Y16 0x00000005 +#define NV902D_SET_COLOR_KEY_FORMAT_V_Y32 0x00000006 + +#define NV902D_SET_COLOR_KEY 0x0298 +#define NV902D_SET_COLOR_KEY_V 31:0 + +#define NV902D_SET_COLOR_KEY_ENABLE 0x029c +#define NV902D_SET_COLOR_KEY_ENABLE_V 0:0 +#define NV902D_SET_COLOR_KEY_ENABLE_V_FALSE 0x00000000 +#define NV902D_SET_COLOR_KEY_ENABLE_V_TRUE 0x00000001 + +#define NV902D_SET_ROP 0x02a0 +#define NV902D_SET_ROP_V 7:0 + +#define NV902D_SET_BETA1 0x02a4 +#define NV902D_SET_BETA1_V 31:0 + +#define NV902D_SET_BETA4 0x02a8 +#define NV902D_SET_BETA4_B 7:0 +#define NV902D_SET_BETA4_G 15:8 +#define NV902D_SET_BETA4_R 23:16 +#define NV902D_SET_BETA4_A 31:24 + +#define NV902D_SET_OPERATION 0x02ac +#define NV902D_SET_OPERATION_V 2:0 +#define NV902D_SET_OPERATION_V_SRCCOPY_AND 0x00000000 +#define NV902D_SET_OPERATION_V_ROP_AND 0x00000001 +#define NV902D_SET_OPERATION_V_BLEND_AND 0x00000002 +#define NV902D_SET_OPERATION_V_SRCCOPY 0x00000003 +#define NV902D_SET_OPERATION_V_ROP 0x00000004 +#define NV902D_SET_OPERATION_V_SRCCOPY_PREMULT 0x00000005 +#define NV902D_SET_OPERATION_V_BLEND_PREMULT 0x00000006 + +#define NV902D_SET_PATTERN_OFFSET 0x02b0 +#define NV902D_SET_PATTERN_OFFSET_X 5:0 +#define NV902D_SET_PATTERN_OFFSET_Y 13:8 + +#define NV902D_SET_PATTERN_SELECT 0x02b4 +#define NV902D_SET_PATTERN_SELECT_V 1:0 +#define NV902D_SET_PATTERN_SELECT_V_MONOCHROME_8x8 0x00000000 +#define NV902D_SET_PATTERN_SELECT_V_MONOCHROME_64x1 0x00000001 +#define NV902D_SET_PATTERN_SELECT_V_MONOCHROME_1x64 0x00000002 +#define NV902D_SET_PATTERN_SELECT_V_COLOR 0x00000003 + +#define NV902D_SET_DST_COLOR_RENDER_TO_ZETA_SURFACE 0x02b8 +#define NV902D_SET_DST_COLOR_RENDER_TO_ZETA_SURFACE_V 0:0 +#define NV902D_SET_DST_COLOR_RENDER_TO_ZETA_SURFACE_V_FALSE 0x00000000 +#define NV902D_SET_DST_COLOR_RENDER_TO_ZETA_SURFACE_V_TRUE 0x00000001 + +#define NV902D_SET_SPARE_NOOP04 0x02bc +#define NV902D_SET_SPARE_NOOP04_V 31:0 + +#define NV902D_SET_SPARE_NOOP15 0x02c0 +#define NV902D_SET_SPARE_NOOP15_V 31:0 + +#define NV902D_SET_SPARE_NOOP13 0x02c4 +#define NV902D_SET_SPARE_NOOP13_V 31:0 + +#define NV902D_SET_SPARE_NOOP03 0x02c8 +#define NV902D_SET_SPARE_NOOP03_V 31:0 + +#define NV902D_SET_SPARE_NOOP14 0x02cc +#define NV902D_SET_SPARE_NOOP14_V 31:0 + +#define NV902D_SET_SPARE_NOOP02 0x02d0 +#define NV902D_SET_SPARE_NOOP02_V 31:0 + +#define NV902D_SET_COMPRESSION 0x02d4 +#define NV902D_SET_COMPRESSION_ENABLE 0:0 +#define NV902D_SET_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NV902D_SET_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NV902D_SET_SPARE_NOOP09 0x02d8 +#define NV902D_SET_SPARE_NOOP09_V 31:0 + +#define NV902D_SET_RENDER_ENABLE_OVERRIDE 0x02dc +#define NV902D_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NV902D_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NV902D_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NV902D_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NV902D_SET_PIXELS_FROM_MEMORY_DIRECTION 0x02e0 +#define NV902D_SET_PIXELS_FROM_MEMORY_DIRECTION_HORIZONTAL 1:0 +#define NV902D_SET_PIXELS_FROM_MEMORY_DIRECTION_HORIZONTAL_HW_DECIDES 0x00000000 +#define NV902D_SET_PIXELS_FROM_MEMORY_DIRECTION_HORIZONTAL_LEFT_TO_RIGHT 0x00000001 +#define NV902D_SET_PIXELS_FROM_MEMORY_DIRECTION_HORIZONTAL_RIGHT_TO_LEFT 0x00000002 +#define NV902D_SET_PIXELS_FROM_MEMORY_DIRECTION_VERTICAL 5:4 +#define NV902D_SET_PIXELS_FROM_MEMORY_DIRECTION_VERTICAL_HW_DECIDES 0x00000000 +#define NV902D_SET_PIXELS_FROM_MEMORY_DIRECTION_VERTICAL_TOP_TO_BOTTOM 0x00000001 +#define NV902D_SET_PIXELS_FROM_MEMORY_DIRECTION_VERTICAL_BOTTOM_TO_TOP 0x00000002 + +#define NV902D_SET_SPARE_NOOP10 0x02e4 +#define NV902D_SET_SPARE_NOOP10_V 31:0 + +#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT 0x02e8 +#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V 2:0 +#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8X8R5G6B5 0x00000000 +#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A1R5G5B5 0x00000001 +#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8R8G8B8 0x00000002 +#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8Y8 0x00000003 +#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8X8Y16 0x00000004 +#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_Y32 0x00000005 +#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_BYTE_EXPAND 0x00000006 + +#define NV902D_SET_MONOCHROME_PATTERN_FORMAT 0x02ec +#define NV902D_SET_MONOCHROME_PATTERN_FORMAT_V 0:0 +#define NV902D_SET_MONOCHROME_PATTERN_FORMAT_V_CGA6_M1 0x00000000 +#define NV902D_SET_MONOCHROME_PATTERN_FORMAT_V_LE_M1 0x00000001 + +#define NV902D_SET_MONOCHROME_PATTERN_COLOR0 0x02f0 +#define NV902D_SET_MONOCHROME_PATTERN_COLOR0_V 31:0 + +#define NV902D_SET_MONOCHROME_PATTERN_COLOR1 0x02f4 +#define NV902D_SET_MONOCHROME_PATTERN_COLOR1_V 31:0 + +#define NV902D_SET_MONOCHROME_PATTERN0 0x02f8 +#define NV902D_SET_MONOCHROME_PATTERN0_V 31:0 + +#define NV902D_SET_MONOCHROME_PATTERN1 0x02fc +#define NV902D_SET_MONOCHROME_PATTERN1_V 31:0 + +#define NV902D_COLOR_PATTERN_X8R8G8B8(i) (0x0300+(i)*4) +#define NV902D_COLOR_PATTERN_X8R8G8B8_B0 7:0 +#define NV902D_COLOR_PATTERN_X8R8G8B8_G0 15:8 +#define NV902D_COLOR_PATTERN_X8R8G8B8_R0 23:16 +#define NV902D_COLOR_PATTERN_X8R8G8B8_IGNORE0 31:24 + +#define NV902D_COLOR_PATTERN_R5G6B5(i) (0x0400+(i)*4) +#define NV902D_COLOR_PATTERN_R5G6B5_B0 4:0 +#define NV902D_COLOR_PATTERN_R5G6B5_G0 10:5 +#define NV902D_COLOR_PATTERN_R5G6B5_R0 15:11 +#define NV902D_COLOR_PATTERN_R5G6B5_B1 20:16 +#define NV902D_COLOR_PATTERN_R5G6B5_G1 26:21 +#define NV902D_COLOR_PATTERN_R5G6B5_R1 31:27 + +#define NV902D_COLOR_PATTERN_X1R5G5B5(i) (0x0480+(i)*4) +#define NV902D_COLOR_PATTERN_X1R5G5B5_B0 4:0 +#define NV902D_COLOR_PATTERN_X1R5G5B5_G0 9:5 +#define NV902D_COLOR_PATTERN_X1R5G5B5_R0 14:10 +#define NV902D_COLOR_PATTERN_X1R5G5B5_IGNORE0 15:15 +#define NV902D_COLOR_PATTERN_X1R5G5B5_B1 20:16 +#define NV902D_COLOR_PATTERN_X1R5G5B5_G1 25:21 +#define NV902D_COLOR_PATTERN_X1R5G5B5_R1 30:26 +#define NV902D_COLOR_PATTERN_X1R5G5B5_IGNORE1 31:31 + +#define NV902D_COLOR_PATTERN_Y8(i) (0x0500+(i)*4) +#define NV902D_COLOR_PATTERN_Y8_Y0 7:0 +#define NV902D_COLOR_PATTERN_Y8_Y1 15:8 +#define NV902D_COLOR_PATTERN_Y8_Y2 23:16 +#define NV902D_COLOR_PATTERN_Y8_Y3 31:24 + +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR0 0x0540 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR0_V 31:0 + +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR1 0x0544 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR1_V 31:0 + +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR2 0x0548 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR2_V 31:0 + +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR3 0x054c +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR3_V 31:0 + +#define NV902D_SET_MME_MEM_ADDRESS_A 0x0550 +#define NV902D_SET_MME_MEM_ADDRESS_A_UPPER 24:0 + +#define NV902D_SET_MME_MEM_ADDRESS_B 0x0554 +#define NV902D_SET_MME_MEM_ADDRESS_B_LOWER 31:0 + +#define NV902D_SET_MME_DATA_RAM_ADDRESS 0x0558 +#define NV902D_SET_MME_DATA_RAM_ADDRESS_WORD 31:0 + +#define NV902D_MME_DMA_READ 0x055c +#define NV902D_MME_DMA_READ_LENGTH 31:0 + +#define NV902D_MME_DMA_READ_FIFOED 0x0560 +#define NV902D_MME_DMA_READ_FIFOED_LENGTH 31:0 + +#define NV902D_MME_DMA_WRITE 0x0564 +#define NV902D_MME_DMA_WRITE_LENGTH 31:0 + +#define NV902D_MME_DMA_REDUCTION 0x0568 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_OP 2:0 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_OP_RED_ADD 0x00000000 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_OP_RED_MIN 0x00000001 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_OP_RED_MAX 0x00000002 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_OP_RED_INC 0x00000003 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_OP_RED_DEC 0x00000004 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_OP_RED_AND 0x00000005 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_OP_RED_OR 0x00000006 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_OP_RED_XOR 0x00000007 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_FORMAT 5:4 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_FORMAT_UNSIGNED 0x00000000 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_FORMAT_SIGNED 0x00000001 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_SIZE 8:8 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_SIZE_FOUR_BYTES 0x00000000 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_SIZE_EIGHT_BYTES 0x00000001 + +#define NV902D_MME_DMA_SYSMEMBAR 0x056c +#define NV902D_MME_DMA_SYSMEMBAR_V 0:0 + +#define NV902D_MME_DMA_SYNC 0x0570 +#define NV902D_MME_DMA_SYNC_VALUE 31:0 + +#define NV902D_SET_MME_DATA_FIFO_CONFIG 0x0574 +#define NV902D_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE 2:0 +#define NV902D_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_0KB 0x00000000 +#define NV902D_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_4KB 0x00000001 +#define NV902D_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_8KB 0x00000002 +#define NV902D_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_12KB 0x00000003 +#define NV902D_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_16KB 0x00000004 + +#define NV902D_RENDER_SOLID_PRIM_MODE 0x0580 +#define NV902D_RENDER_SOLID_PRIM_MODE_V 2:0 +#define NV902D_RENDER_SOLID_PRIM_MODE_V_POINTS 0x00000000 +#define NV902D_RENDER_SOLID_PRIM_MODE_V_LINES 0x00000001 +#define NV902D_RENDER_SOLID_PRIM_MODE_V_POLYLINE 0x00000002 +#define NV902D_RENDER_SOLID_PRIM_MODE_V_TRIANGLES 0x00000003 +#define NV902D_RENDER_SOLID_PRIM_MODE_V_RECTS 0x00000004 + +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT 0x0584 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V 7:0 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_RF32_GF32_BF32_AF32 0x000000C0 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_RF16_GF16_BF16_AF16 0x000000CA +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_RF32_GF32 0x000000CB +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A8R8G8B8 0x000000CF +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A2R10G10B10 0x000000DF +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A8B8G8R8 0x000000D5 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A2B10G10R10 0x000000D1 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_X8R8G8B8 0x000000E6 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_X8B8G8R8 0x000000F9 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_R5G6B5 0x000000E8 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A1R5G5B5 0x000000E9 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_X1R5G5B5 0x000000F8 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Y8 0x000000F3 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Y16 0x000000EE +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Y32 0x000000FF +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Z1R5G5B5 0x000000FB +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_O1R5G5B5 0x000000FC +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Z8R8G8B8 0x000000FD +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_O8R8G8B8 0x000000FE + +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR 0x0588 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_V 31:0 + +#define NV902D_SET_RENDER_SOLID_LINE_TIE_BREAK_BITS 0x058c +#define NV902D_SET_RENDER_SOLID_LINE_TIE_BREAK_BITS_XMAJ__XINC__YINC 0:0 +#define NV902D_SET_RENDER_SOLID_LINE_TIE_BREAK_BITS_XMAJ__XDEC__YINC 4:4 +#define NV902D_SET_RENDER_SOLID_LINE_TIE_BREAK_BITS_YMAJ__XINC__YINC 8:8 +#define NV902D_SET_RENDER_SOLID_LINE_TIE_BREAK_BITS_YMAJ__XDEC__YINC 12:12 + +#define NV902D_RENDER_SOLID_PRIM_POINT_X_Y 0x05e0 +#define NV902D_RENDER_SOLID_PRIM_POINT_X_Y_X 15:0 +#define NV902D_RENDER_SOLID_PRIM_POINT_X_Y_Y 31:16 + +#define NV902D_RENDER_SOLID_PRIM_POINT_SET_X(j) (0x0600+(j)*8) +#define NV902D_RENDER_SOLID_PRIM_POINT_SET_X_V 31:0 + +#define NV902D_RENDER_SOLID_PRIM_POINT_Y(j) (0x0604+(j)*8) +#define NV902D_RENDER_SOLID_PRIM_POINT_Y_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_DATA_TYPE 0x0800 +#define NV902D_SET_PIXELS_FROM_CPU_DATA_TYPE_V 0:0 +#define NV902D_SET_PIXELS_FROM_CPU_DATA_TYPE_V_COLOR 0x00000000 +#define NV902D_SET_PIXELS_FROM_CPU_DATA_TYPE_V_INDEX 0x00000001 + +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT 0x0804 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V 7:0 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A8R8G8B8 0x000000CF +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A2R10G10B10 0x000000DF +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A8B8G8R8 0x000000D5 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A2B10G10R10 0x000000D1 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_X8R8G8B8 0x000000E6 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_X8B8G8R8 0x000000F9 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_R5G6B5 0x000000E8 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A1R5G5B5 0x000000E9 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_X1R5G5B5 0x000000F8 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Y8 0x000000F3 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Y16 0x000000EE +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Y32 0x000000FF +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Z1R5G5B5 0x000000FB +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_O1R5G5B5 0x000000FC +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Z8R8G8B8 0x000000FD +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_O8R8G8B8 0x000000FE + +#define NV902D_SET_PIXELS_FROM_CPU_INDEX_FORMAT 0x0808 +#define NV902D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V 1:0 +#define NV902D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V_I1 0x00000000 +#define NV902D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V_I4 0x00000001 +#define NV902D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V_I8 0x00000002 + +#define NV902D_SET_PIXELS_FROM_CPU_MONO_FORMAT 0x080c +#define NV902D_SET_PIXELS_FROM_CPU_MONO_FORMAT_V 0:0 +#define NV902D_SET_PIXELS_FROM_CPU_MONO_FORMAT_V_CGA6_M1 0x00000000 +#define NV902D_SET_PIXELS_FROM_CPU_MONO_FORMAT_V_LE_M1 0x00000001 + +#define NV902D_SET_PIXELS_FROM_CPU_WRAP 0x0810 +#define NV902D_SET_PIXELS_FROM_CPU_WRAP_V 1:0 +#define NV902D_SET_PIXELS_FROM_CPU_WRAP_V_WRAP_PIXEL 0x00000000 +#define NV902D_SET_PIXELS_FROM_CPU_WRAP_V_WRAP_BYTE 0x00000001 +#define NV902D_SET_PIXELS_FROM_CPU_WRAP_V_WRAP_DWORD 0x00000002 + +#define NV902D_SET_PIXELS_FROM_CPU_COLOR0 0x0814 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR0_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_COLOR1 0x0818 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR1_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_MONO_OPACITY 0x081c +#define NV902D_SET_PIXELS_FROM_CPU_MONO_OPACITY_V 0:0 +#define NV902D_SET_PIXELS_FROM_CPU_MONO_OPACITY_V_TRANSPARENT 0x00000000 +#define NV902D_SET_PIXELS_FROM_CPU_MONO_OPACITY_V_OPAQUE 0x00000001 + +#define NV902D_SET_PIXELS_FROM_CPU_SRC_WIDTH 0x0838 +#define NV902D_SET_PIXELS_FROM_CPU_SRC_WIDTH_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_SRC_HEIGHT 0x083c +#define NV902D_SET_PIXELS_FROM_CPU_SRC_HEIGHT_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_DX_DU_FRAC 0x0840 +#define NV902D_SET_PIXELS_FROM_CPU_DX_DU_FRAC_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_DX_DU_INT 0x0844 +#define NV902D_SET_PIXELS_FROM_CPU_DX_DU_INT_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_DY_DV_FRAC 0x0848 +#define NV902D_SET_PIXELS_FROM_CPU_DY_DV_FRAC_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_DY_DV_INT 0x084c +#define NV902D_SET_PIXELS_FROM_CPU_DY_DV_INT_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_DST_X0_FRAC 0x0850 +#define NV902D_SET_PIXELS_FROM_CPU_DST_X0_FRAC_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_DST_X0_INT 0x0854 +#define NV902D_SET_PIXELS_FROM_CPU_DST_X0_INT_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_DST_Y0_FRAC 0x0858 +#define NV902D_SET_PIXELS_FROM_CPU_DST_Y0_FRAC_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_DST_Y0_INT 0x085c +#define NV902D_SET_PIXELS_FROM_CPU_DST_Y0_INT_V 31:0 + +#define NV902D_PIXELS_FROM_CPU_DATA 0x0860 +#define NV902D_PIXELS_FROM_CPU_DATA_V 31:0 + +#define NV902D_SET_BIG_ENDIAN_CONTROL 0x0870 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X32_SWAP_1 0:0 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X32_SWAP_4 1:1 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X32_SWAP_8 2:2 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X32_SWAP_16 3:3 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X16_SWAP_1 4:4 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X16_SWAP_4 5:5 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X16_SWAP_8 6:6 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X16_SWAP_16 7:7 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X8_SWAP_1 8:8 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X8_SWAP_4 9:9 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X8_SWAP_8 10:10 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X8_SWAP_16 11:11 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I1_X8_CGA6_SWAP_1 12:12 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I1_X8_CGA6_SWAP_4 13:13 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I1_X8_CGA6_SWAP_8 14:14 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I1_X8_CGA6_SWAP_16 15:15 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I1_X8_LE_SWAP_1 16:16 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I1_X8_LE_SWAP_4 17:17 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I1_X8_LE_SWAP_8 18:18 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I1_X8_LE_SWAP_16 19:19 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I4_SWAP_1 20:20 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I4_SWAP_4 21:21 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I4_SWAP_8 22:22 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I4_SWAP_16 23:23 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I8_SWAP_1 24:24 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I8_SWAP_4 25:25 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I8_SWAP_8 26:26 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I8_SWAP_16 27:27 +#define NV902D_SET_BIG_ENDIAN_CONTROL_OVERRIDE 28:28 + +#define NV902D_SET_PIXELS_FROM_MEMORY_BLOCK_SHAPE 0x0880 +#define NV902D_SET_PIXELS_FROM_MEMORY_BLOCK_SHAPE_V 2:0 +#define NV902D_SET_PIXELS_FROM_MEMORY_BLOCK_SHAPE_V_AUTO 0x00000000 +#define NV902D_SET_PIXELS_FROM_MEMORY_BLOCK_SHAPE_V_SHAPE_8X8 0x00000001 +#define NV902D_SET_PIXELS_FROM_MEMORY_BLOCK_SHAPE_V_SHAPE_16X4 0x00000002 + +#define NV902D_SET_PIXELS_FROM_MEMORY_CORRAL_SIZE 0x0884 +#define NV902D_SET_PIXELS_FROM_MEMORY_CORRAL_SIZE_V 9:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP 0x0888 +#define NV902D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP_V 0:0 +#define NV902D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP_V_FALSE 0x00000000 +#define NV902D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP_V_TRUE 0x00000001 + +#define NV902D_SET_PIXELS_FROM_MEMORY_SAMPLE_MODE 0x088c +#define NV902D_SET_PIXELS_FROM_MEMORY_SAMPLE_MODE_ORIGIN 0:0 +#define NV902D_SET_PIXELS_FROM_MEMORY_SAMPLE_MODE_ORIGIN_CENTER 0x00000000 +#define NV902D_SET_PIXELS_FROM_MEMORY_SAMPLE_MODE_ORIGIN_CORNER 0x00000001 +#define NV902D_SET_PIXELS_FROM_MEMORY_SAMPLE_MODE_FILTER 4:4 +#define NV902D_SET_PIXELS_FROM_MEMORY_SAMPLE_MODE_FILTER_POINT 0x00000000 +#define NV902D_SET_PIXELS_FROM_MEMORY_SAMPLE_MODE_FILTER_BILINEAR 0x00000001 + +#define NV902D_SET_PIXELS_FROM_MEMORY_DST_X0 0x08b0 +#define NV902D_SET_PIXELS_FROM_MEMORY_DST_X0_V 31:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_DST_Y0 0x08b4 +#define NV902D_SET_PIXELS_FROM_MEMORY_DST_Y0_V 31:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_DST_WIDTH 0x08b8 +#define NV902D_SET_PIXELS_FROM_MEMORY_DST_WIDTH_V 31:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_DST_HEIGHT 0x08bc +#define NV902D_SET_PIXELS_FROM_MEMORY_DST_HEIGHT_V 31:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_DU_DX_FRAC 0x08c0 +#define NV902D_SET_PIXELS_FROM_MEMORY_DU_DX_FRAC_V 31:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_DU_DX_INT 0x08c4 +#define NV902D_SET_PIXELS_FROM_MEMORY_DU_DX_INT_V 31:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_DV_DY_FRAC 0x08c8 +#define NV902D_SET_PIXELS_FROM_MEMORY_DV_DY_FRAC_V 31:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_DV_DY_INT 0x08cc +#define NV902D_SET_PIXELS_FROM_MEMORY_DV_DY_INT_V 31:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_X0_FRAC 0x08d0 +#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_X0_FRAC_V 31:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_X0_INT 0x08d4 +#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_X0_INT_V 31:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_Y0_FRAC 0x08d8 +#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_Y0_FRAC_V 31:0 + +#define NV902D_PIXELS_FROM_MEMORY_SRC_Y0_INT 0x08dc +#define NV902D_PIXELS_FROM_MEMORY_SRC_Y0_INT_V 31:0 + +#define NV902D_SET_FALCON00 0x08e0 +#define NV902D_SET_FALCON00_V 31:0 + +#define NV902D_SET_FALCON01 0x08e4 +#define NV902D_SET_FALCON01_V 31:0 + +#define NV902D_SET_FALCON02 0x08e8 +#define NV902D_SET_FALCON02_V 31:0 + +#define NV902D_SET_FALCON03 0x08ec +#define NV902D_SET_FALCON03_V 31:0 + +#define NV902D_SET_FALCON04 0x08f0 +#define NV902D_SET_FALCON04_V 31:0 + +#define NV902D_SET_FALCON05 0x08f4 +#define NV902D_SET_FALCON05_V 31:0 + +#define NV902D_SET_FALCON06 0x08f8 +#define NV902D_SET_FALCON06_V 31:0 + +#define NV902D_SET_FALCON07 0x08fc +#define NV902D_SET_FALCON07_V 31:0 + +#define NV902D_SET_FALCON08 0x0900 +#define NV902D_SET_FALCON08_V 31:0 + +#define NV902D_SET_FALCON09 0x0904 +#define NV902D_SET_FALCON09_V 31:0 + +#define NV902D_SET_FALCON10 0x0908 +#define NV902D_SET_FALCON10_V 31:0 + +#define NV902D_SET_FALCON11 0x090c +#define NV902D_SET_FALCON11_V 31:0 + +#define NV902D_SET_FALCON12 0x0910 +#define NV902D_SET_FALCON12_V 31:0 + +#define NV902D_SET_FALCON13 0x0914 +#define NV902D_SET_FALCON13_V 31:0 + +#define NV902D_SET_FALCON14 0x0918 +#define NV902D_SET_FALCON14_V 31:0 + +#define NV902D_SET_FALCON15 0x091c +#define NV902D_SET_FALCON15_V 31:0 + +#define NV902D_SET_FALCON16 0x0920 +#define NV902D_SET_FALCON16_V 31:0 + +#define NV902D_SET_FALCON17 0x0924 +#define NV902D_SET_FALCON17_V 31:0 + +#define NV902D_SET_FALCON18 0x0928 +#define NV902D_SET_FALCON18_V 31:0 + +#define NV902D_SET_FALCON19 0x092c +#define NV902D_SET_FALCON19_V 31:0 + +#define NV902D_SET_FALCON20 0x0930 +#define NV902D_SET_FALCON20_V 31:0 + +#define NV902D_SET_FALCON21 0x0934 +#define NV902D_SET_FALCON21_V 31:0 + +#define NV902D_SET_FALCON22 0x0938 +#define NV902D_SET_FALCON22_V 31:0 + +#define NV902D_SET_FALCON23 0x093c +#define NV902D_SET_FALCON23_V 31:0 + +#define NV902D_SET_FALCON24 0x0940 +#define NV902D_SET_FALCON24_V 31:0 + +#define NV902D_SET_FALCON25 0x0944 +#define NV902D_SET_FALCON25_V 31:0 + +#define NV902D_SET_FALCON26 0x0948 +#define NV902D_SET_FALCON26_V 31:0 + +#define NV902D_SET_FALCON27 0x094c +#define NV902D_SET_FALCON27_V 31:0 + +#define NV902D_SET_FALCON28 0x0950 +#define NV902D_SET_FALCON28_V 31:0 + +#define NV902D_SET_FALCON29 0x0954 +#define NV902D_SET_FALCON29_V 31:0 + +#define NV902D_SET_FALCON30 0x0958 +#define NV902D_SET_FALCON30_V 31:0 + +#define NV902D_SET_FALCON31 0x095c +#define NV902D_SET_FALCON31_V 31:0 + +#define NV902D_MME_DMA_WRITE_METHOD_BARRIER 0x0dec +#define NV902D_MME_DMA_WRITE_METHOD_BARRIER_V 0:0 + +#define NV902D_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NV902D_SET_MME_SHADOW_SCRATCH_V 31:0 + +#define NV902D_CALL_MME_MACRO(j) (0x3800+(j)*8) +#define NV902D_CALL_MME_MACRO_V 31:0 + +#define NV902D_CALL_MME_DATA(j) (0x3804+(j)*8) +#define NV902D_CALL_MME_DATA_V 31:0 + +#endif /* _cl_fermi_twod_a_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl907dswspare.h b/src/common/sdk/nvidia/inc/class/cl907dswspare.h new file mode 100644 index 0000000..ee9ddaa --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl907dswspare.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2010-2014, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl907d_sw_spare_h_ +#define _cl907d_sw_spare_h_ + +/* This file is *not* auto-generated. */ + +#define NV907D_HEAD_SET_SW_SPARE_A_CODE_VPLL_REF 1:0 +#define NV907D_HEAD_SET_SW_SPARE_A_CODE_VPLL_REF_NO_PREF (0x00000000) +#define NV907D_HEAD_SET_SW_SPARE_A_CODE_VPLL_REF_GSYNC (0x00000001) + +#define NV907D_PIOR_SET_SW_SPARE_A_CODE_FOR_LOCK_SIGNAL_PROPAGATION_ONLY 1:0 +#define NV907D_PIOR_SET_SW_SPARE_A_CODE_FOR_LOCK_SIGNAL_PROPAGATION_ONLY_FALSE (0x00000000) +#define NV907D_PIOR_SET_SW_SPARE_A_CODE_FOR_LOCK_SIGNAL_PROPAGATION_ONLY_TRUE (0x00000001) + +#endif // _cl907d_sw_spare_h_ + diff --git a/src/common/sdk/nvidia/inc/class/cl9097.h b/src/common/sdk/nvidia/inc/class/cl9097.h new file mode 100644 index 0000000..8bc7792 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9097.h @@ -0,0 +1,3815 @@ +/******************************************************************************* + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef _cl_fermi_a_h_ +#define _cl_fermi_a_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../class/bin/sw_header.pl fermi_a */ + +#include "nvtypes.h" + +#define FERMI_A 0x9097 + +#define NV9097_SET_OBJECT 0x0000 +#define NV9097_SET_OBJECT_CLASS_ID 15:0 +#define NV9097_SET_OBJECT_ENGINE_ID 20:16 + +#define NV9097_NO_OPERATION 0x0100 +#define NV9097_NO_OPERATION_V 31:0 + +#define NV9097_SET_NOTIFY_A 0x0104 +#define NV9097_SET_NOTIFY_A_ADDRESS_UPPER 7:0 + +#define NV9097_SET_NOTIFY_B 0x0108 +#define NV9097_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NV9097_NOTIFY 0x010c +#define NV9097_NOTIFY_TYPE 31:0 +#define NV9097_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NV9097_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NV9097_WAIT_FOR_IDLE 0x0110 +#define NV9097_WAIT_FOR_IDLE_V 31:0 + +#define NV9097_LOAD_MME_INSTRUCTION_RAM_POINTER 0x0114 +#define NV9097_LOAD_MME_INSTRUCTION_RAM_POINTER_V 31:0 + +#define NV9097_LOAD_MME_INSTRUCTION_RAM 0x0118 +#define NV9097_LOAD_MME_INSTRUCTION_RAM_V 31:0 + +#define NV9097_LOAD_MME_START_ADDRESS_RAM_POINTER 0x011c +#define NV9097_LOAD_MME_START_ADDRESS_RAM_POINTER_V 31:0 + +#define NV9097_LOAD_MME_START_ADDRESS_RAM 0x0120 +#define NV9097_LOAD_MME_START_ADDRESS_RAM_V 31:0 + +#define NV9097_SET_MME_SHADOW_RAM_CONTROL 0x0124 +#define NV9097_SET_MME_SHADOW_RAM_CONTROL_MODE 1:0 +#define NV9097_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK 0x00000000 +#define NV9097_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK_WITH_FILTER 0x00000001 +#define NV9097_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_PASSTHROUGH 0x00000002 +#define NV9097_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_REPLAY 0x00000003 + +#define NV9097_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER 0x0128 +#define NV9097_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER_V 7:0 + +#define NV9097_PEER_SEMAPHORE_RELEASE_OFFSET 0x012c +#define NV9097_PEER_SEMAPHORE_RELEASE_OFFSET_V 31:0 + +#define NV9097_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NV9097_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NV9097_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NV9097_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NV9097_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NV9097_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NV9097_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NV9097_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NV9097_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NV9097_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NV9097_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NV9097_SEND_GO_IDLE 0x013c +#define NV9097_SEND_GO_IDLE_V 31:0 + +#define NV9097_PM_TRIGGER 0x0140 +#define NV9097_PM_TRIGGER_V 31:0 + +#define NV9097_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NV9097_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NV9097_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NV9097_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NV9097_RUN_DS_NOW 0x0200 +#define NV9097_RUN_DS_NOW_V 31:0 + +#define NV9097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS 0x0204 +#define NV9097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD 4:0 +#define NV9097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_INSTANTANEOUS 0x00000000 +#define NV9097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16 0x00000001 +#define NV9097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32 0x00000002 +#define NV9097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__64 0x00000003 +#define NV9097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__128 0x00000004 +#define NV9097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__256 0x00000005 +#define NV9097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__512 0x00000006 +#define NV9097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1024 0x00000007 +#define NV9097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2048 0x00000008 +#define NV9097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4096 0x00000009 +#define NV9097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__8192 0x0000000A +#define NV9097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16384 0x0000000B +#define NV9097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32768 0x0000000C +#define NV9097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__65536 0x0000000D +#define NV9097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__131072 0x0000000E +#define NV9097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__262144 0x0000000F +#define NV9097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__524288 0x00000010 +#define NV9097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1048576 0x00000011 +#define NV9097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2097152 0x00000012 +#define NV9097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4194304 0x00000013 +#define NV9097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_LATEZ_ALWAYS 0x0000001F + +#define NV9097_SET_RASTER_PIPE_SYNC_CONTROL 0x0208 +#define NV9097_SET_RASTER_PIPE_SYNC_CONTROL_PRIM_AREA_THRESHOLD 21:0 +#define NV9097_SET_RASTER_PIPE_SYNC_CONTROL_ENABLE 24:24 +#define NV9097_SET_RASTER_PIPE_SYNC_CONTROL_ENABLE_FALSE 0x00000000 +#define NV9097_SET_RASTER_PIPE_SYNC_CONTROL_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_ALIASED_LINE_WIDTH_ENABLE 0x020c +#define NV9097_SET_ALIASED_LINE_WIDTH_ENABLE_V 0:0 +#define NV9097_SET_ALIASED_LINE_WIDTH_ENABLE_V_FALSE 0x00000000 +#define NV9097_SET_ALIASED_LINE_WIDTH_ENABLE_V_TRUE 0x00000001 + +#define NV9097_SET_API_MANDATED_EARLY_Z 0x0210 +#define NV9097_SET_API_MANDATED_EARLY_Z_ENABLE 0:0 +#define NV9097_SET_API_MANDATED_EARLY_Z_ENABLE_FALSE 0x00000000 +#define NV9097_SET_API_MANDATED_EARLY_Z_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_GS_DM_FIFO 0x0214 +#define NV9097_SET_GS_DM_FIFO_SIZE_RASTER_ON 12:0 +#define NV9097_SET_GS_DM_FIFO_SIZE_RASTER_OFF 28:16 +#define NV9097_SET_GS_DM_FIFO_SPILL_ENABLED 31:31 +#define NV9097_SET_GS_DM_FIFO_SPILL_ENABLED_FALSE 0x00000000 +#define NV9097_SET_GS_DM_FIFO_SPILL_ENABLED_TRUE 0x00000001 + +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS 0x0218 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY 5:4 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NV9097_INVALIDATE_SHADER_CACHES 0x021c +#define NV9097_INVALIDATE_SHADER_CACHES_INSTRUCTION 0:0 +#define NV9097_INVALIDATE_SHADER_CACHES_INSTRUCTION_FALSE 0x00000000 +#define NV9097_INVALIDATE_SHADER_CACHES_INSTRUCTION_TRUE 0x00000001 +#define NV9097_INVALIDATE_SHADER_CACHES_DATA 4:4 +#define NV9097_INVALIDATE_SHADER_CACHES_DATA_FALSE 0x00000000 +#define NV9097_INVALIDATE_SHADER_CACHES_DATA_TRUE 0x00000001 +#define NV9097_INVALIDATE_SHADER_CACHES_UNIFORM 8:8 +#define NV9097_INVALIDATE_SHADER_CACHES_UNIFORM_FALSE 0x00000000 +#define NV9097_INVALIDATE_SHADER_CACHES_UNIFORM_TRUE 0x00000001 +#define NV9097_INVALIDATE_SHADER_CACHES_CONSTANT 12:12 +#define NV9097_INVALIDATE_SHADER_CACHES_CONSTANT_FALSE 0x00000000 +#define NV9097_INVALIDATE_SHADER_CACHES_CONSTANT_TRUE 0x00000001 +#define NV9097_INVALIDATE_SHADER_CACHES_LOCKS 1:1 +#define NV9097_INVALIDATE_SHADER_CACHES_LOCKS_FALSE 0x00000000 +#define NV9097_INVALIDATE_SHADER_CACHES_LOCKS_TRUE 0x00000001 +#define NV9097_INVALIDATE_SHADER_CACHES_FLUSH_DATA 2:2 +#define NV9097_INVALIDATE_SHADER_CACHES_FLUSH_DATA_FALSE 0x00000000 +#define NV9097_INVALIDATE_SHADER_CACHES_FLUSH_DATA_TRUE 0x00000001 + +#define NV9097_SET_VAB_VERTEX3F(i) (0x0220+(i)*4) +#define NV9097_SET_VAB_VERTEX3F_V 31:0 + +#define NV9097_SET_VAB_VERTEX4F(i) (0x0230+(i)*4) +#define NV9097_SET_VAB_VERTEX4F_V 31:0 + +#define NV9097_SET_VAB_NORMAL3F(i) (0x0240+(i)*4) +#define NV9097_SET_VAB_NORMAL3F_V 31:0 + +#define NV9097_SET_VAB_COLOR3F(i) (0x0250+(i)*4) +#define NV9097_SET_VAB_COLOR3F_V 31:0 + +#define NV9097_SET_VAB_COLOR4F(i) (0x0260+(i)*4) +#define NV9097_SET_VAB_COLOR4F_V 31:0 + +#define NV9097_SET_VAB_COLOR4UB(i) (0x0270+(i)*4) +#define NV9097_SET_VAB_COLOR4UB_V 31:0 + +#define NV9097_SET_VAB_TEX_COORD1F(i) (0x0280+(i)*4) +#define NV9097_SET_VAB_TEX_COORD1F_V 31:0 + +#define NV9097_SET_VAB_TEX_COORD2F(i) (0x0290+(i)*4) +#define NV9097_SET_VAB_TEX_COORD2F_V 31:0 + +#define NV9097_SET_VAB_TEX_COORD3F(i) (0x02a0+(i)*4) +#define NV9097_SET_VAB_TEX_COORD3F_V 31:0 + +#define NV9097_SET_VAB_TEX_COORD4F(i) (0x02b0+(i)*4) +#define NV9097_SET_VAB_TEX_COORD4F_V 31:0 + +#define NV9097_SET_GA_TO_VA_MAPPING_MODE 0x02c4 +#define NV9097_SET_GA_TO_VA_MAPPING_MODE_V 0:0 +#define NV9097_SET_GA_TO_VA_MAPPING_MODE_V_DISABLE 0x00000000 +#define NV9097_SET_GA_TO_VA_MAPPING_MODE_V_ENABLE 0x00000001 + +#define NV9097_LOAD_GA_TO_VA_MAPPING_ENTRY 0x02c8 +#define NV9097_LOAD_GA_TO_VA_MAPPING_ENTRY_VIRTUAL_ADDRESS_UPPER 7:0 +#define NV9097_LOAD_GA_TO_VA_MAPPING_ENTRY_GENERIC_ADDRESS_UPPER 23:16 +#define NV9097_LOAD_GA_TO_VA_MAPPING_ENTRY_READ_ENABLE 30:30 +#define NV9097_LOAD_GA_TO_VA_MAPPING_ENTRY_READ_ENABLE_FALSE 0x00000000 +#define NV9097_LOAD_GA_TO_VA_MAPPING_ENTRY_READ_ENABLE_TRUE 0x00000001 +#define NV9097_LOAD_GA_TO_VA_MAPPING_ENTRY_WRITE_ENABLE 31:31 +#define NV9097_LOAD_GA_TO_VA_MAPPING_ENTRY_WRITE_ENABLE_FALSE 0x00000000 +#define NV9097_LOAD_GA_TO_VA_MAPPING_ENTRY_WRITE_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_TASK_CIRCULAR_BUFFER_THROTTLE 0x02cc +#define NV9097_SET_TASK_CIRCULAR_BUFFER_THROTTLE_TASK_COUNT 21:0 + +#define NV9097_SET_PRIM_CIRCULAR_BUFFER_THROTTLE 0x02d0 +#define NV9097_SET_PRIM_CIRCULAR_BUFFER_THROTTLE_PRIM_AREA 21:0 + +#define NV9097_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE 0x02d4 +#define NV9097_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE_V 0:0 + +#define NV9097_SET_SURFACE_CLIP_ID_BLOCK_SIZE 0x02d8 +#define NV9097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH 3:0 +#define NV9097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NV9097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT 7:4 +#define NV9097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NV9097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NV9097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NV9097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NV9097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NV9097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NV9097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH 11:8 +#define NV9097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NV9097_SET_RASTER_BOUNDING_BOX 0x02ec +#define NV9097_SET_RASTER_BOUNDING_BOX_MODE 0:0 +#define NV9097_SET_RASTER_BOUNDING_BOX_MODE_BOUNDING_BOX 0x00000000 +#define NV9097_SET_RASTER_BOUNDING_BOX_MODE_FULL_VIEWPORT 0x00000001 +#define NV9097_SET_RASTER_BOUNDING_BOX_PAD 11:4 + +#define NV9097_PEER_SEMAPHORE_RELEASE 0x02f0 +#define NV9097_PEER_SEMAPHORE_RELEASE_V 31:0 + +#define NV9097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE 0x0300 +#define NV9097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE 0:0 +#define NV9097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_FALSE 0x00000000 +#define NV9097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_TRUE 0x00000001 +#define NV9097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE 1:1 +#define NV9097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NV9097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 + +#define NV9097_DRAW_ZERO_INDEX 0x0304 +#define NV9097_DRAW_ZERO_INDEX_COUNT 31:0 + +#define NV9097_SET_L1_CONFIGURATION 0x0308 +#define NV9097_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY 2:0 +#define NV9097_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_48KB 0x00000003 + +#define NV9097_SET_RENDER_ENABLE_CONTROL 0x030c +#define NV9097_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER 0:0 +#define NV9097_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_FALSE 0x00000000 +#define NV9097_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_TRUE 0x00000001 + +#define NV9097_X_X_X_SET_CT_ENABLE 0x0310 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET0 0:0 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET0_FALSE 0x00000000 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET0_TRUE 0x00000001 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET1 1:1 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET1_FALSE 0x00000000 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET1_TRUE 0x00000001 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET2 2:2 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET2_FALSE 0x00000000 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET2_TRUE 0x00000001 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET3 3:3 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET3_FALSE 0x00000000 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET3_TRUE 0x00000001 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET4 4:4 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET4_FALSE 0x00000000 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET4_TRUE 0x00000001 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET5 5:5 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET5_FALSE 0x00000000 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET5_TRUE 0x00000001 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET6 6:6 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET6_FALSE 0x00000000 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET6_TRUE 0x00000001 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET7 7:7 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET7_FALSE 0x00000000 +#define NV9097_X_X_X_SET_CT_ENABLE_TARGET7_TRUE 0x00000001 + +#define NV9097_SET_IEEE_CLEAN_UPDATE 0x0314 +#define NV9097_SET_IEEE_CLEAN_UPDATE_ENABLE 0:0 +#define NV9097_SET_IEEE_CLEAN_UPDATE_ENABLE_FALSE 0x00000000 +#define NV9097_SET_IEEE_CLEAN_UPDATE_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_SNAP_GRID_LINE 0x0318 +#define NV9097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NV9097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NV9097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NV9097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NV9097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NV9097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NV9097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NV9097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NV9097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NV9097_SET_SNAP_GRID_LINE_ROUNDING_MODE 8:8 +#define NV9097_SET_SNAP_GRID_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NV9097_SET_SNAP_GRID_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NV9097_SET_SNAP_GRID_NON_LINE 0x031c +#define NV9097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NV9097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NV9097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NV9097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NV9097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NV9097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NV9097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NV9097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NV9097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NV9097_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE 8:8 +#define NV9097_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NV9097_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NV9097_SET_TESSELLATION_PARAMETERS 0x0320 +#define NV9097_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE 1:0 +#define NV9097_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_ISOLINE 0x00000000 +#define NV9097_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_TRIANGLE 0x00000001 +#define NV9097_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_QUAD 0x00000002 +#define NV9097_SET_TESSELLATION_PARAMETERS_SPACING 5:4 +#define NV9097_SET_TESSELLATION_PARAMETERS_SPACING_INTEGER 0x00000000 +#define NV9097_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_ODD 0x00000001 +#define NV9097_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_EVEN 0x00000002 +#define NV9097_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES 9:8 +#define NV9097_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_POINTS 0x00000000 +#define NV9097_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_LINES 0x00000001 +#define NV9097_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CW 0x00000002 +#define NV9097_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CCW 0x00000003 + +#define NV9097_SET_TESSELLATION_LOD_U0_OR_DENSITY 0x0324 +#define NV9097_SET_TESSELLATION_LOD_U0_OR_DENSITY_V 31:0 + +#define NV9097_SET_TESSELLATION_LOD_V0_OR_DETAIL 0x0328 +#define NV9097_SET_TESSELLATION_LOD_V0_OR_DETAIL_V 31:0 + +#define NV9097_SET_TESSELLATION_LOD_U1_OR_W0 0x032c +#define NV9097_SET_TESSELLATION_LOD_U1_OR_W0_V 31:0 + +#define NV9097_SET_TESSELLATION_LOD_V1 0x0330 +#define NV9097_SET_TESSELLATION_LOD_V1_V 31:0 + +#define NV9097_SET_TG_LOD_INTERIOR_U 0x0334 +#define NV9097_SET_TG_LOD_INTERIOR_U_V 31:0 + +#define NV9097_SET_TG_LOD_INTERIOR_V 0x0338 +#define NV9097_SET_TG_LOD_INTERIOR_V_V 31:0 + +#define NV9097_RESERVED_TG07 0x033c +#define NV9097_RESERVED_TG07_V 0:0 + +#define NV9097_RESERVED_TG08 0x0340 +#define NV9097_RESERVED_TG08_V 0:0 + +#define NV9097_RESERVED_TG09 0x0344 +#define NV9097_RESERVED_TG09_V 0:0 + +#define NV9097_RESERVED_TG10 0x0348 +#define NV9097_RESERVED_TG10_V 0:0 + +#define NV9097_RESERVED_TG11 0x034c +#define NV9097_RESERVED_TG11_V 0:0 + +#define NV9097_RESERVED_TG12 0x0350 +#define NV9097_RESERVED_TG12_V 0:0 + +#define NV9097_RESERVED_TG13 0x0354 +#define NV9097_RESERVED_TG13_V 0:0 + +#define NV9097_RESERVED_TG14 0x0358 +#define NV9097_RESERVED_TG14_V 0:0 + +#define NV9097_RESERVED_TG15 0x035c +#define NV9097_RESERVED_TG15_V 0:0 + +#define NV9097_SET_SUBTILING_PERF_KNOB_A 0x0360 +#define NV9097_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_REGISTER_FILE_PER_SUBTILE 7:0 +#define NV9097_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_PIXEL_OUTPUT_BUFFER_PER_SUBTILE 15:8 +#define NV9097_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_TRIANGLE_RAM_PER_SUBTILE 23:16 +#define NV9097_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_MAX_QUADS_PER_SUBTILE 31:24 + +#define NV9097_SET_SUBTILING_PERF_KNOB_B 0x0364 +#define NV9097_SET_SUBTILING_PERF_KNOB_B_FRACTION_OF_MAX_PRIMITIVES_PER_SUBTILE 7:0 + +#define NV9097_SET_SUBTILING_PERF_KNOB_C 0x0368 +#define NV9097_SET_SUBTILING_PERF_KNOB_C_RESERVED 0:0 + +#define NV9097_SET_RASTER_ENABLE 0x037c +#define NV9097_SET_RASTER_ENABLE_V 0:0 +#define NV9097_SET_RASTER_ENABLE_V_FALSE 0x00000000 +#define NV9097_SET_RASTER_ENABLE_V_TRUE 0x00000001 + +#define NV9097_SET_STREAM_OUT_BUFFER_ENABLE(j) (0x0380+(j)*32) +#define NV9097_SET_STREAM_OUT_BUFFER_ENABLE_V 0:0 +#define NV9097_SET_STREAM_OUT_BUFFER_ENABLE_V_FALSE 0x00000000 +#define NV9097_SET_STREAM_OUT_BUFFER_ENABLE_V_TRUE 0x00000001 + +#define NV9097_SET_STREAM_OUT_BUFFER_ADDRESS_A(j) (0x0384+(j)*32) +#define NV9097_SET_STREAM_OUT_BUFFER_ADDRESS_A_UPPER 7:0 + +#define NV9097_SET_STREAM_OUT_BUFFER_ADDRESS_B(j) (0x0388+(j)*32) +#define NV9097_SET_STREAM_OUT_BUFFER_ADDRESS_B_LOWER 31:0 + +#define NV9097_SET_STREAM_OUT_BUFFER_SIZE(j) (0x038c+(j)*32) +#define NV9097_SET_STREAM_OUT_BUFFER_SIZE_BYTES 31:0 + +#define NV9097_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER(j) (0x0390+(j)*32) +#define NV9097_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER_START_OFFSET 31:0 + +#define NV9097_SET_VAB_DATA_TYPELESS(i) (0x0400+(i)*4) +#define NV9097_SET_VAB_DATA_TYPELESS_V 31:0 + +#define NV9097_SET_STREAM_OUT_CONTROL_STREAM(j) (0x0700+(j)*16) +#define NV9097_SET_STREAM_OUT_CONTROL_STREAM_SELECT 1:0 + +#define NV9097_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT(j) (0x0704+(j)*16) +#define NV9097_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT_MAX 7:0 + +#define NV9097_SET_STREAM_OUT_CONTROL_STRIDE(j) (0x0708+(j)*16) +#define NV9097_SET_STREAM_OUT_CONTROL_STRIDE_BYTES 31:0 + +#define NV9097_SET_RASTER_INPUT 0x0740 +#define NV9097_SET_RASTER_INPUT_STREAM_SELECT 1:0 + +#define NV9097_SET_STREAM_OUTPUT 0x0744 +#define NV9097_SET_STREAM_OUTPUT_ENABLE 0:0 +#define NV9097_SET_STREAM_OUTPUT_ENABLE_FALSE 0x00000000 +#define NV9097_SET_STREAM_OUTPUT_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE 0x0748 +#define NV9097_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE 0:0 +#define NV9097_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_FALSE 0x00000000 +#define NV9097_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_ALPHA_FRACTION 0x074c +#define NV9097_SET_ALPHA_FRACTION_V 7:0 + +#define NV9097_SET_HYBRID_ANTI_ALIAS_CONTROL 0x0754 +#define NV9097_SET_HYBRID_ANTI_ALIAS_CONTROL_PASSES 3:0 +#define NV9097_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID 4:4 +#define NV9097_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_FRAGMENT 0x00000000 +#define NV9097_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_PASS 0x00000001 + +#define NV9097_SET_MAX_TI_WARPS_PER_BATCH 0x075c +#define NV9097_SET_MAX_TI_WARPS_PER_BATCH_V 5:0 + +#define NV9097_SET_SHADER_LOCAL_MEMORY_WINDOW 0x077c +#define NV9097_SET_SHADER_LOCAL_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NV9097_SET_SHADER_LOCAL_MEMORY_A 0x0790 +#define NV9097_SET_SHADER_LOCAL_MEMORY_A_ADDRESS_UPPER 7:0 + +#define NV9097_SET_SHADER_LOCAL_MEMORY_B 0x0794 +#define NV9097_SET_SHADER_LOCAL_MEMORY_B_ADDRESS_LOWER 31:0 + +#define NV9097_SET_SHADER_LOCAL_MEMORY_C 0x0798 +#define NV9097_SET_SHADER_LOCAL_MEMORY_C_SIZE_UPPER 5:0 + +#define NV9097_SET_SHADER_LOCAL_MEMORY_D 0x079c +#define NV9097_SET_SHADER_LOCAL_MEMORY_D_SIZE_LOWER 31:0 + +#define NV9097_SET_SHADER_LOCAL_MEMORY_E 0x07a0 +#define NV9097_SET_SHADER_LOCAL_MEMORY_E_DEFAULT_SIZE_PER_WARP 25:0 + +#define NV9097_SET_VAB_VERTEX2F(i) (0x07b0+(i)*4) +#define NV9097_SET_VAB_VERTEX2F_V 31:0 + +#define NV9097_SET_ZCULL_REGION_SIZE_A 0x07c0 +#define NV9097_SET_ZCULL_REGION_SIZE_A_WIDTH 15:0 + +#define NV9097_SET_ZCULL_REGION_SIZE_B 0x07c4 +#define NV9097_SET_ZCULL_REGION_SIZE_B_HEIGHT 15:0 + +#define NV9097_SET_ZCULL_REGION_SIZE_C 0x07c8 +#define NV9097_SET_ZCULL_REGION_SIZE_C_DEPTH 15:0 + +#define NV9097_SET_ZCULL_REGION_PIXEL_OFFSET_C 0x07cc +#define NV9097_SET_ZCULL_REGION_PIXEL_OFFSET_C_DEPTH 15:0 + +#define NV9097_SET_CULL_BEFORE_FETCH 0x07dc +#define NV9097_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE 0:0 +#define NV9097_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_FALSE 0x00000000 +#define NV9097_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_TRUE 0x00000001 + +#define NV9097_SET_ZCULL_REGION_LOCATION 0x07e0 +#define NV9097_SET_ZCULL_REGION_LOCATION_START_ALIQUOT 15:0 +#define NV9097_SET_ZCULL_REGION_LOCATION_ALIQUOT_COUNT 31:16 + +#define NV9097_SET_ZCULL_REGION_ALIQUOTS 0x07e4 +#define NV9097_SET_ZCULL_REGION_ALIQUOTS_PER_LAYER 15:0 + +#define NV9097_SET_ZCULL_STORAGE_A 0x07e8 +#define NV9097_SET_ZCULL_STORAGE_A_ADDRESS_UPPER 7:0 + +#define NV9097_SET_ZCULL_STORAGE_B 0x07ec +#define NV9097_SET_ZCULL_STORAGE_B_ADDRESS_LOWER 31:0 + +#define NV9097_SET_ZCULL_STORAGE_C 0x07f0 +#define NV9097_SET_ZCULL_STORAGE_C_LIMIT_ADDRESS_UPPER 7:0 + +#define NV9097_SET_ZCULL_STORAGE_D 0x07f4 +#define NV9097_SET_ZCULL_STORAGE_D_LIMIT_ADDRESS_LOWER 31:0 + +#define NV9097_SET_ZT_READ_ONLY 0x07f8 +#define NV9097_SET_ZT_READ_ONLY_ENABLE_Z 0:0 +#define NV9097_SET_ZT_READ_ONLY_ENABLE_Z_FALSE 0x00000000 +#define NV9097_SET_ZT_READ_ONLY_ENABLE_Z_TRUE 0x00000001 +#define NV9097_SET_ZT_READ_ONLY_ENABLE_STENCIL 4:4 +#define NV9097_SET_ZT_READ_ONLY_ENABLE_STENCIL_FALSE 0x00000000 +#define NV9097_SET_ZT_READ_ONLY_ENABLE_STENCIL_TRUE 0x00000001 + +#define NV9097_SET_COLOR_TARGET_A(j) (0x0800+(j)*64) +#define NV9097_SET_COLOR_TARGET_A_OFFSET_UPPER 7:0 + +#define NV9097_SET_COLOR_TARGET_B(j) (0x0804+(j)*64) +#define NV9097_SET_COLOR_TARGET_B_OFFSET_LOWER 31:0 + +#define NV9097_SET_COLOR_TARGET_WIDTH(j) (0x0808+(j)*64) +#define NV9097_SET_COLOR_TARGET_WIDTH_V 27:0 + +#define NV9097_SET_COLOR_TARGET_HEIGHT(j) (0x080c+(j)*64) +#define NV9097_SET_COLOR_TARGET_HEIGHT_V 16:0 + +#define NV9097_SET_COLOR_TARGET_FORMAT(j) (0x0810+(j)*64) +#define NV9097_SET_COLOR_TARGET_FORMAT_V 7:0 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_DISABLED 0x00000000 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_AF32 0x000000C0 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_AS32 0x000000C1 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_AU32 0x000000C2 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_X32 0x000000C3 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_X32 0x000000C4 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_X32 0x000000C5 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_R16_G16_B16_A16 0x000000C6 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RN16_GN16_BN16_AN16 0x000000C7 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RS16_GS16_BS16_AS16 0x000000C8 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RU16_GU16_BU16_AU16 0x000000C9 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_AF16 0x000000CA +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RF32_GF32 0x000000CB +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RS32_GS32 0x000000CC +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RU32_GU32 0x000000CD +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_X16 0x000000CE +#define NV9097_SET_COLOR_TARGET_FORMAT_V_A8R8G8B8 0x000000CF +#define NV9097_SET_COLOR_TARGET_FORMAT_V_A8RL8GL8BL8 0x000000D0 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_A2B10G10R10 0x000000D1 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_AU2BU10GU10RU10 0x000000D2 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_A8B8G8R8 0x000000D5 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_A8BL8GL8RL8 0x000000D6 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_AN8BN8GN8RN8 0x000000D7 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_AS8BS8GS8RS8 0x000000D8 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_AU8BU8GU8RU8 0x000000D9 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_R16_G16 0x000000DA +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RN16_GN16 0x000000DB +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RS16_GS16 0x000000DC +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RU16_GU16 0x000000DD +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RF16_GF16 0x000000DE +#define NV9097_SET_COLOR_TARGET_FORMAT_V_A2R10G10B10 0x000000DF +#define NV9097_SET_COLOR_TARGET_FORMAT_V_BF10GF11RF11 0x000000E0 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RS32 0x000000E3 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RU32 0x000000E4 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RF32 0x000000E5 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_X8R8G8B8 0x000000E6 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_X8RL8GL8BL8 0x000000E7 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_R5G6B5 0x000000E8 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_A1R5G5B5 0x000000E9 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_G8R8 0x000000EA +#define NV9097_SET_COLOR_TARGET_FORMAT_V_GN8RN8 0x000000EB +#define NV9097_SET_COLOR_TARGET_FORMAT_V_GS8RS8 0x000000EC +#define NV9097_SET_COLOR_TARGET_FORMAT_V_GU8RU8 0x000000ED +#define NV9097_SET_COLOR_TARGET_FORMAT_V_R16 0x000000EE +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RN16 0x000000EF +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RS16 0x000000F0 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RU16 0x000000F1 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RF16 0x000000F2 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_R8 0x000000F3 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RN8 0x000000F4 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RS8 0x000000F5 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RU8 0x000000F6 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_A8 0x000000F7 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_X1R5G5B5 0x000000F8 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_X8B8G8R8 0x000000F9 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_X8BL8GL8RL8 0x000000FA +#define NV9097_SET_COLOR_TARGET_FORMAT_V_Z1R5G5B5 0x000000FB +#define NV9097_SET_COLOR_TARGET_FORMAT_V_O1R5G5B5 0x000000FC +#define NV9097_SET_COLOR_TARGET_FORMAT_V_Z8R8G8B8 0x000000FD +#define NV9097_SET_COLOR_TARGET_FORMAT_V_O8R8G8B8 0x000000FE +#define NV9097_SET_COLOR_TARGET_FORMAT_V_R32 0x000000FF +#define NV9097_SET_COLOR_TARGET_FORMAT_V_A16 0x00000040 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_AF16 0x00000041 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_AF32 0x00000042 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_A8R8 0x00000043 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_R16_A16 0x00000044 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RF16_AF16 0x00000045 +#define NV9097_SET_COLOR_TARGET_FORMAT_V_RF32_AF32 0x00000046 + +#define NV9097_SET_COLOR_TARGET_MEMORY(j) (0x0814+(j)*64) +#define NV9097_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH 3:0 +#define NV9097_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH_ONE_GOB 0x00000000 +#define NV9097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT 7:4 +#define NV9097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_ONE_GOB 0x00000000 +#define NV9097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_TWO_GOBS 0x00000001 +#define NV9097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_FOUR_GOBS 0x00000002 +#define NV9097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_EIGHT_GOBS 0x00000003 +#define NV9097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NV9097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NV9097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH 11:8 +#define NV9097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_ONE_GOB 0x00000000 +#define NV9097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_TWO_GOBS 0x00000001 +#define NV9097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_FOUR_GOBS 0x00000002 +#define NV9097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_EIGHT_GOBS 0x00000003 +#define NV9097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NV9097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_THIRTYTWO_GOBS 0x00000005 +#define NV9097_SET_COLOR_TARGET_MEMORY_LAYOUT 12:12 +#define NV9097_SET_COLOR_TARGET_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NV9097_SET_COLOR_TARGET_MEMORY_LAYOUT_PITCH 0x00000001 +#define NV9097_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL 16:16 +#define NV9097_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NV9097_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_DEPTH_SIZE 0x00000001 + +#define NV9097_SET_COLOR_TARGET_THIRD_DIMENSION(j) (0x0818+(j)*64) +#define NV9097_SET_COLOR_TARGET_THIRD_DIMENSION_V 27:0 + +#define NV9097_SET_COLOR_TARGET_ARRAY_PITCH(j) (0x081c+(j)*64) +#define NV9097_SET_COLOR_TARGET_ARRAY_PITCH_V 31:0 + +#define NV9097_SET_COLOR_TARGET_LAYER(j) (0x0820+(j)*64) +#define NV9097_SET_COLOR_TARGET_LAYER_OFFSET 15:0 + +#define NV9097_SET_COLOR_TARGET_MARK(j) (0x0824+(j)*64) +#define NV9097_SET_COLOR_TARGET_MARK_IEEE_CLEAN 0:0 +#define NV9097_SET_COLOR_TARGET_MARK_IEEE_CLEAN_FALSE 0x00000000 +#define NV9097_SET_COLOR_TARGET_MARK_IEEE_CLEAN_TRUE 0x00000001 + +#define NV9097_SET_VIEWPORT_SCALE_X(j) (0x0a00+(j)*32) +#define NV9097_SET_VIEWPORT_SCALE_X_V 31:0 + +#define NV9097_SET_VIEWPORT_SCALE_Y(j) (0x0a04+(j)*32) +#define NV9097_SET_VIEWPORT_SCALE_Y_V 31:0 + +#define NV9097_SET_VIEWPORT_SCALE_Z(j) (0x0a08+(j)*32) +#define NV9097_SET_VIEWPORT_SCALE_Z_V 31:0 + +#define NV9097_SET_VIEWPORT_OFFSET_X(j) (0x0a0c+(j)*32) +#define NV9097_SET_VIEWPORT_OFFSET_X_V 31:0 + +#define NV9097_SET_VIEWPORT_OFFSET_Y(j) (0x0a10+(j)*32) +#define NV9097_SET_VIEWPORT_OFFSET_Y_V 31:0 + +#define NV9097_SET_VIEWPORT_OFFSET_Z(j) (0x0a14+(j)*32) +#define NV9097_SET_VIEWPORT_OFFSET_Z_V 31:0 + +#define NV9097_SET_VIEWPORT_CLIP_HORIZONTAL(j) (0x0c00+(j)*16) +#define NV9097_SET_VIEWPORT_CLIP_HORIZONTAL_X0 15:0 +#define NV9097_SET_VIEWPORT_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NV9097_SET_VIEWPORT_CLIP_VERTICAL(j) (0x0c04+(j)*16) +#define NV9097_SET_VIEWPORT_CLIP_VERTICAL_Y0 15:0 +#define NV9097_SET_VIEWPORT_CLIP_VERTICAL_HEIGHT 31:16 + +#define NV9097_SET_VIEWPORT_CLIP_MIN_Z(j) (0x0c08+(j)*16) +#define NV9097_SET_VIEWPORT_CLIP_MIN_Z_V 31:0 + +#define NV9097_SET_VIEWPORT_CLIP_MAX_Z(j) (0x0c0c+(j)*16) +#define NV9097_SET_VIEWPORT_CLIP_MAX_Z_V 31:0 + +#define NV9097_SET_WINDOW_CLIP_HORIZONTAL(j) (0x0d00+(j)*8) +#define NV9097_SET_WINDOW_CLIP_HORIZONTAL_XMIN 15:0 +#define NV9097_SET_WINDOW_CLIP_HORIZONTAL_XMAX 31:16 + +#define NV9097_SET_WINDOW_CLIP_VERTICAL(j) (0x0d04+(j)*8) +#define NV9097_SET_WINDOW_CLIP_VERTICAL_YMIN 15:0 +#define NV9097_SET_WINDOW_CLIP_VERTICAL_YMAX 31:16 + +#define NV9097_SET_CLIP_ID_EXTENT_X(j) (0x0d40+(j)*8) +#define NV9097_SET_CLIP_ID_EXTENT_X_MINX 15:0 +#define NV9097_SET_CLIP_ID_EXTENT_X_WIDTH 31:16 + +#define NV9097_SET_CLIP_ID_EXTENT_Y(j) (0x0d44+(j)*8) +#define NV9097_SET_CLIP_ID_EXTENT_Y_MINY 15:0 +#define NV9097_SET_CLIP_ID_EXTENT_Y_HEIGHT 31:16 + +#define NV9097_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK 0x0d60 +#define NV9097_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK_V 10:0 + +#define NV9097_SET_API_VISIBLE_CALL_LIMIT 0x0d64 +#define NV9097_SET_API_VISIBLE_CALL_LIMIT_V 3:0 +#define NV9097_SET_API_VISIBLE_CALL_LIMIT_V__0 0x00000000 +#define NV9097_SET_API_VISIBLE_CALL_LIMIT_V__1 0x00000001 +#define NV9097_SET_API_VISIBLE_CALL_LIMIT_V__2 0x00000002 +#define NV9097_SET_API_VISIBLE_CALL_LIMIT_V__4 0x00000003 +#define NV9097_SET_API_VISIBLE_CALL_LIMIT_V__8 0x00000004 +#define NV9097_SET_API_VISIBLE_CALL_LIMIT_V__16 0x00000005 +#define NV9097_SET_API_VISIBLE_CALL_LIMIT_V__32 0x00000006 +#define NV9097_SET_API_VISIBLE_CALL_LIMIT_V__64 0x00000007 +#define NV9097_SET_API_VISIBLE_CALL_LIMIT_V__128 0x00000008 +#define NV9097_SET_API_VISIBLE_CALL_LIMIT_V_NO_CHECK 0x0000000F + +#define NV9097_SET_STATISTICS_COUNTER 0x0d68 +#define NV9097_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE 0:0 +#define NV9097_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_FALSE 0x00000000 +#define NV9097_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_TRUE 0x00000001 +#define NV9097_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE 1:1 +#define NV9097_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NV9097_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NV9097_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE 2:2 +#define NV9097_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NV9097_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NV9097_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE 3:3 +#define NV9097_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NV9097_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NV9097_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE 4:4 +#define NV9097_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NV9097_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NV9097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE 5:5 +#define NV9097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NV9097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NV9097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE 6:6 +#define NV9097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_FALSE 0x00000000 +#define NV9097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_TRUE 0x00000001 +#define NV9097_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE 7:7 +#define NV9097_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NV9097_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NV9097_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE 8:8 +#define NV9097_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NV9097_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NV9097_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE 9:9 +#define NV9097_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NV9097_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NV9097_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE 11:11 +#define NV9097_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NV9097_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NV9097_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE 12:12 +#define NV9097_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NV9097_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NV9097_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE 13:13 +#define NV9097_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NV9097_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NV9097_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE 14:14 +#define NV9097_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NV9097_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NV9097_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE 10:10 +#define NV9097_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_FALSE 0x00000000 +#define NV9097_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_TRUE 0x00000001 +#define NV9097_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE 15:15 +#define NV9097_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_FALSE 0x00000000 +#define NV9097_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_CLEAR_RECT_HORIZONTAL 0x0d6c +#define NV9097_SET_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NV9097_SET_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NV9097_SET_CLEAR_RECT_VERTICAL 0x0d70 +#define NV9097_SET_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NV9097_SET_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NV9097_SET_VERTEX_ARRAY_START 0x0d74 +#define NV9097_SET_VERTEX_ARRAY_START_V 31:0 + +#define NV9097_DRAW_VERTEX_ARRAY 0x0d78 +#define NV9097_DRAW_VERTEX_ARRAY_COUNT 31:0 + +#define NV9097_SET_VIEWPORT_Z_CLIP 0x0d7c +#define NV9097_SET_VIEWPORT_Z_CLIP_RANGE 0:0 +#define NV9097_SET_VIEWPORT_Z_CLIP_RANGE_NEGATIVE_W_TO_POSITIVE_W 0x00000000 +#define NV9097_SET_VIEWPORT_Z_CLIP_RANGE_ZERO_TO_POSITIVE_W 0x00000001 + +#define NV9097_SET_COLOR_CLEAR_VALUE(i) (0x0d80+(i)*4) +#define NV9097_SET_COLOR_CLEAR_VALUE_V 31:0 + +#define NV9097_SET_Z_CLEAR_VALUE 0x0d90 +#define NV9097_SET_Z_CLEAR_VALUE_V 31:0 + +#define NV9097_SET_SHADER_CACHE_CONTROL 0x0d94 +#define NV9097_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE 0:0 +#define NV9097_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_FALSE 0x00000000 +#define NV9097_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_REDUCE_COLOR_THRESHOLDS_ENABLE 0x0d9c +#define NV9097_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V 0:0 +#define NV9097_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_FALSE 0x00000000 +#define NV9097_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_TRUE 0x00000001 + +#define NV9097_SET_STENCIL_CLEAR_VALUE 0x0da0 +#define NV9097_SET_STENCIL_CLEAR_VALUE_V 7:0 + +#define NV9097_SET_FRONT_POLYGON_MODE 0x0dac +#define NV9097_SET_FRONT_POLYGON_MODE_V 31:0 +#define NV9097_SET_FRONT_POLYGON_MODE_V_POINT 0x00001B00 +#define NV9097_SET_FRONT_POLYGON_MODE_V_LINE 0x00001B01 +#define NV9097_SET_FRONT_POLYGON_MODE_V_FILL 0x00001B02 + +#define NV9097_SET_BACK_POLYGON_MODE 0x0db0 +#define NV9097_SET_BACK_POLYGON_MODE_V 31:0 +#define NV9097_SET_BACK_POLYGON_MODE_V_POINT 0x00001B00 +#define NV9097_SET_BACK_POLYGON_MODE_V_LINE 0x00001B01 +#define NV9097_SET_BACK_POLYGON_MODE_V_FILL 0x00001B02 + +#define NV9097_SET_POLY_SMOOTH 0x0db4 +#define NV9097_SET_POLY_SMOOTH_ENABLE 0:0 +#define NV9097_SET_POLY_SMOOTH_ENABLE_FALSE 0x00000000 +#define NV9097_SET_POLY_SMOOTH_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_ZT_MARK 0x0db8 +#define NV9097_SET_ZT_MARK_IEEE_CLEAN 0:0 +#define NV9097_SET_ZT_MARK_IEEE_CLEAN_FALSE 0x00000000 +#define NV9097_SET_ZT_MARK_IEEE_CLEAN_TRUE 0x00000001 + +#define NV9097_SET_ZCULL_DIR_FORMAT 0x0dbc +#define NV9097_SET_ZCULL_DIR_FORMAT_ZDIR 15:0 +#define NV9097_SET_ZCULL_DIR_FORMAT_ZDIR_LESS 0x00000000 +#define NV9097_SET_ZCULL_DIR_FORMAT_ZDIR_GREATER 0x00000001 +#define NV9097_SET_ZCULL_DIR_FORMAT_ZFORMAT 31:16 +#define NV9097_SET_ZCULL_DIR_FORMAT_ZFORMAT_MSB 0x00000000 +#define NV9097_SET_ZCULL_DIR_FORMAT_ZFORMAT_FP 0x00000001 +#define NV9097_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZTRICK 0x00000002 +#define NV9097_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZF32_1 0x00000003 + +#define NV9097_SET_POLY_OFFSET_POINT 0x0dc0 +#define NV9097_SET_POLY_OFFSET_POINT_ENABLE 0:0 +#define NV9097_SET_POLY_OFFSET_POINT_ENABLE_FALSE 0x00000000 +#define NV9097_SET_POLY_OFFSET_POINT_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_POLY_OFFSET_LINE 0x0dc4 +#define NV9097_SET_POLY_OFFSET_LINE_ENABLE 0:0 +#define NV9097_SET_POLY_OFFSET_LINE_ENABLE_FALSE 0x00000000 +#define NV9097_SET_POLY_OFFSET_LINE_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_POLY_OFFSET_FILL 0x0dc8 +#define NV9097_SET_POLY_OFFSET_FILL_ENABLE 0:0 +#define NV9097_SET_POLY_OFFSET_FILL_ENABLE_FALSE 0x00000000 +#define NV9097_SET_POLY_OFFSET_FILL_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_PATCH 0x0dcc +#define NV9097_SET_PATCH_SIZE 7:0 + +#define NV9097_SET_ZCULL_CRITERION 0x0dd8 +#define NV9097_SET_ZCULL_CRITERION_SFUNC 7:0 +#define NV9097_SET_ZCULL_CRITERION_SFUNC_NEVER 0x00000000 +#define NV9097_SET_ZCULL_CRITERION_SFUNC_LESS 0x00000001 +#define NV9097_SET_ZCULL_CRITERION_SFUNC_EQUAL 0x00000002 +#define NV9097_SET_ZCULL_CRITERION_SFUNC_LEQUAL 0x00000003 +#define NV9097_SET_ZCULL_CRITERION_SFUNC_GREATER 0x00000004 +#define NV9097_SET_ZCULL_CRITERION_SFUNC_NOTEQUAL 0x00000005 +#define NV9097_SET_ZCULL_CRITERION_SFUNC_GEQUAL 0x00000006 +#define NV9097_SET_ZCULL_CRITERION_SFUNC_ALWAYS 0x00000007 +#define NV9097_SET_ZCULL_CRITERION_NO_INVALIDATE 8:8 +#define NV9097_SET_ZCULL_CRITERION_NO_INVALIDATE_FALSE 0x00000000 +#define NV9097_SET_ZCULL_CRITERION_NO_INVALIDATE_TRUE 0x00000001 +#define NV9097_SET_ZCULL_CRITERION_FORCE_MATCH 9:9 +#define NV9097_SET_ZCULL_CRITERION_FORCE_MATCH_FALSE 0x00000000 +#define NV9097_SET_ZCULL_CRITERION_FORCE_MATCH_TRUE 0x00000001 +#define NV9097_SET_ZCULL_CRITERION_SREF 23:16 +#define NV9097_SET_ZCULL_CRITERION_SMASK 31:24 + +#define NV9097_X_X_X_SET_DA_ATTRIBUTE_CACHE_LINE 0x0ddc +#define NV9097_X_X_X_SET_DA_ATTRIBUTE_CACHE_LINE_V 1:0 +#define NV9097_X_X_X_SET_DA_ATTRIBUTE_CACHE_LINE_V_SIZE128 0x00000000 +#define NV9097_X_X_X_SET_DA_ATTRIBUTE_CACHE_LINE_V_SIZE64 0x00000001 +#define NV9097_X_X_X_SET_DA_ATTRIBUTE_CACHE_LINE_V_SIZE32 0x00000002 + +#define NV9097_SET_SM_TIMEOUT_INTERVAL 0x0de4 +#define NV9097_SET_SM_TIMEOUT_INTERVAL_COUNTER_BIT 5:0 + +#define NV9097_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY 0x0de8 +#define NV9097_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE 0:0 +#define NV9097_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_FALSE 0x00000000 +#define NV9097_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_DRAW_INLINE_VERTEX_VAB_UPDATE 0x0dec +#define NV9097_SET_DRAW_INLINE_VERTEX_VAB_UPDATE_ENABLE 0:0 +#define NV9097_SET_DRAW_INLINE_VERTEX_VAB_UPDATE_ENABLE_FALSE 0x00000000 +#define NV9097_SET_DRAW_INLINE_VERTEX_VAB_UPDATE_ENABLE_TRUE 0x00000001 + +#define NV9097_X_X_X_SET_REDUCE_COLOR 0x0df4 +#define NV9097_X_X_X_SET_REDUCE_COLOR_U8_THRESHOLD 7:0 +#define NV9097_X_X_X_SET_REDUCE_COLOR_FP16_THRESHOLD 23:8 + +#define NV9097_SET_WINDOW_OFFSET_X 0x0df8 +#define NV9097_SET_WINDOW_OFFSET_X_V 16:0 + +#define NV9097_SET_WINDOW_OFFSET_Y 0x0dfc +#define NV9097_SET_WINDOW_OFFSET_Y_V 17:0 + +#define NV9097_SET_SCISSOR_ENABLE(j) (0x0e00+(j)*16) +#define NV9097_SET_SCISSOR_ENABLE_V 0:0 +#define NV9097_SET_SCISSOR_ENABLE_V_FALSE 0x00000000 +#define NV9097_SET_SCISSOR_ENABLE_V_TRUE 0x00000001 + +#define NV9097_SET_SCISSOR_HORIZONTAL(j) (0x0e04+(j)*16) +#define NV9097_SET_SCISSOR_HORIZONTAL_XMIN 15:0 +#define NV9097_SET_SCISSOR_HORIZONTAL_XMAX 31:16 + +#define NV9097_SET_SCISSOR_VERTICAL(j) (0x0e08+(j)*16) +#define NV9097_SET_SCISSOR_VERTICAL_YMIN 15:0 +#define NV9097_SET_SCISSOR_VERTICAL_YMAX 31:16 + +#define NV9097_SET_VAB_NORMAL3S(i) (0x0f00+(i)*4) +#define NV9097_SET_VAB_NORMAL3S_V 31:0 + +#define NV9097_SET_BACK_STENCIL_FUNC_REF 0x0f54 +#define NV9097_SET_BACK_STENCIL_FUNC_REF_V 7:0 + +#define NV9097_SET_BACK_STENCIL_MASK 0x0f58 +#define NV9097_SET_BACK_STENCIL_MASK_V 7:0 + +#define NV9097_SET_BACK_STENCIL_FUNC_MASK 0x0f5c +#define NV9097_SET_BACK_STENCIL_FUNC_MASK_V 7:0 + +#define NV9097_SET_VERTEX_STREAM_SUBSTITUTE_A 0x0f84 +#define NV9097_SET_VERTEX_STREAM_SUBSTITUTE_A_ADDRESS_UPPER 7:0 + +#define NV9097_SET_VERTEX_STREAM_SUBSTITUTE_B 0x0f88 +#define NV9097_SET_VERTEX_STREAM_SUBSTITUTE_B_ADDRESS_LOWER 31:0 + +#define NV9097_SET_LINE_MODE_POLYGON_CLIP 0x0f8c +#define NV9097_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE 0:0 +#define NV9097_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DRAW_LINE 0x00000000 +#define NV9097_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DO_NOT_DRAW_LINE 0x00000001 + +#define NV9097_SET_SINGLE_CT_WRITE_CONTROL 0x0f90 +#define NV9097_SET_SINGLE_CT_WRITE_CONTROL_ENABLE 0:0 +#define NV9097_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_FALSE 0x00000000 +#define NV9097_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_VTG_WARP_WATERMARKS 0x0f98 +#define NV9097_SET_VTG_WARP_WATERMARKS_LOW 15:0 +#define NV9097_SET_VTG_WARP_WATERMARKS_HIGH 31:16 + +#define NV9097_SET_DEPTH_BOUNDS_MIN 0x0f9c +#define NV9097_SET_DEPTH_BOUNDS_MIN_V 31:0 + +#define NV9097_SET_DEPTH_BOUNDS_MAX 0x0fa0 +#define NV9097_SET_DEPTH_BOUNDS_MAX_V 31:0 + +#define NV9097_SET_CT_MRT_ENABLE 0x0fac +#define NV9097_SET_CT_MRT_ENABLE_V 0:0 +#define NV9097_SET_CT_MRT_ENABLE_V_FALSE 0x00000000 +#define NV9097_SET_CT_MRT_ENABLE_V_TRUE 0x00000001 + +#define NV9097_SET_NONMULTISAMPLED_Z 0x0fb0 +#define NV9097_SET_NONMULTISAMPLED_Z_V 0:0 +#define NV9097_SET_NONMULTISAMPLED_Z_V_PER_SAMPLE 0x00000000 +#define NV9097_SET_NONMULTISAMPLED_Z_V_AT_PIXEL_CENTER 0x00000001 + +#define NV9097_SET_SAMPLE_MASK_X0_Y0 0x0fbc +#define NV9097_SET_SAMPLE_MASK_X0_Y0_V 15:0 + +#define NV9097_SET_SAMPLE_MASK_X1_Y0 0x0fc0 +#define NV9097_SET_SAMPLE_MASK_X1_Y0_V 15:0 + +#define NV9097_SET_SAMPLE_MASK_X0_Y1 0x0fc4 +#define NV9097_SET_SAMPLE_MASK_X0_Y1_V 15:0 + +#define NV9097_SET_SAMPLE_MASK_X1_Y1 0x0fc8 +#define NV9097_SET_SAMPLE_MASK_X1_Y1_V 15:0 + +#define NV9097_SET_SURFACE_CLIP_ID_MEMORY_A 0x0fcc +#define NV9097_SET_SURFACE_CLIP_ID_MEMORY_A_OFFSET_UPPER 7:0 + +#define NV9097_SET_SURFACE_CLIP_ID_MEMORY_B 0x0fd0 +#define NV9097_SET_SURFACE_CLIP_ID_MEMORY_B_OFFSET_LOWER 31:0 + +#define NV9097_SET_BLEND_OPT_CONTROL 0x0fdc +#define NV9097_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS 0:0 +#define NV9097_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_FALSE 0x00000000 +#define NV9097_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_TRUE 0x00000001 + +#define NV9097_SET_ZT_A 0x0fe0 +#define NV9097_SET_ZT_A_OFFSET_UPPER 7:0 + +#define NV9097_SET_ZT_B 0x0fe4 +#define NV9097_SET_ZT_B_OFFSET_LOWER 31:0 + +#define NV9097_SET_ZT_FORMAT 0x0fe8 +#define NV9097_SET_ZT_FORMAT_V 4:0 +#define NV9097_SET_ZT_FORMAT_V_Z16 0x00000013 +#define NV9097_SET_ZT_FORMAT_V_Z24S8 0x00000014 +#define NV9097_SET_ZT_FORMAT_V_X8Z24 0x00000015 +#define NV9097_SET_ZT_FORMAT_V_S8Z24 0x00000016 +#define NV9097_SET_ZT_FORMAT_V_V8Z24 0x00000018 +#define NV9097_SET_ZT_FORMAT_V_ZF32 0x0000000A +#define NV9097_SET_ZT_FORMAT_V_ZF32_X24S8 0x00000019 +#define NV9097_SET_ZT_FORMAT_V_X8Z24_X16V8S8 0x0000001D +#define NV9097_SET_ZT_FORMAT_V_ZF32_X16V8X8 0x0000001E +#define NV9097_SET_ZT_FORMAT_V_ZF32_X16V8S8 0x0000001F + +#define NV9097_SET_ZT_BLOCK_SIZE 0x0fec +#define NV9097_SET_ZT_BLOCK_SIZE_WIDTH 3:0 +#define NV9097_SET_ZT_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NV9097_SET_ZT_BLOCK_SIZE_HEIGHT 7:4 +#define NV9097_SET_ZT_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NV9097_SET_ZT_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NV9097_SET_ZT_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NV9097_SET_ZT_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NV9097_SET_ZT_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NV9097_SET_ZT_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NV9097_SET_ZT_BLOCK_SIZE_DEPTH 11:8 +#define NV9097_SET_ZT_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NV9097_SET_ZT_ARRAY_PITCH 0x0ff0 +#define NV9097_SET_ZT_ARRAY_PITCH_V 31:0 + +#define NV9097_SET_SURFACE_CLIP_HORIZONTAL 0x0ff4 +#define NV9097_SET_SURFACE_CLIP_HORIZONTAL_X 15:0 +#define NV9097_SET_SURFACE_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NV9097_SET_SURFACE_CLIP_VERTICAL 0x0ff8 +#define NV9097_SET_SURFACE_CLIP_VERTICAL_Y 15:0 +#define NV9097_SET_SURFACE_CLIP_VERTICAL_HEIGHT 31:16 + +#define NV9097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS 0x1000 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE 0:0 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_FALSE 0x00000000 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_TRUE 0x00000001 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY 5:4 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NV9097_SET_FORCE_ONE_TEXTURE_UNIT 0x1004 +#define NV9097_SET_FORCE_ONE_TEXTURE_UNIT_ENABLE 0:0 +#define NV9097_SET_FORCE_ONE_TEXTURE_UNIT_ENABLE_FALSE 0x00000000 +#define NV9097_SET_FORCE_ONE_TEXTURE_UNIT_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_TESSELLATION_CUT_HEIGHT 0x1008 +#define NV9097_SET_TESSELLATION_CUT_HEIGHT_V 4:0 + +#define NV9097_SET_MAX_GS_INSTANCES_PER_TASK 0x100c +#define NV9097_SET_MAX_GS_INSTANCES_PER_TASK_V 10:0 + +#define NV9097_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK 0x1010 +#define NV9097_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK_V 15:0 + +#define NV9097_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER 0x1018 +#define NV9097_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NV9097_SET_BETA_CB_STORAGE_CONSTRAINT 0x101c +#define NV9097_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NV9097_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NV9097_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER 0x1020 +#define NV9097_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NV9097_SET_ALPHA_CB_STORAGE_CONSTRAINT 0x1024 +#define NV9097_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NV9097_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NV9097_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_SPARE_NOOP00 0x1040 +#define NV9097_SET_SPARE_NOOP00_V 31:0 + +#define NV9097_SET_SPARE_NOOP01 0x1044 +#define NV9097_SET_SPARE_NOOP01_V 31:0 + +#define NV9097_SET_SPARE_NOOP02 0x1048 +#define NV9097_SET_SPARE_NOOP02_V 31:0 + +#define NV9097_SET_SPARE_NOOP03 0x104c +#define NV9097_SET_SPARE_NOOP03_V 31:0 + +#define NV9097_SET_SPARE_NOOP04 0x1050 +#define NV9097_SET_SPARE_NOOP04_V 31:0 + +#define NV9097_SET_SPARE_NOOP05 0x1054 +#define NV9097_SET_SPARE_NOOP05_V 31:0 + +#define NV9097_SET_SPARE_NOOP06 0x1058 +#define NV9097_SET_SPARE_NOOP06_V 31:0 + +#define NV9097_SET_SPARE_NOOP07 0x105c +#define NV9097_SET_SPARE_NOOP07_V 31:0 + +#define NV9097_SET_SPARE_NOOP08 0x1060 +#define NV9097_SET_SPARE_NOOP08_V 31:0 + +#define NV9097_SET_SPARE_NOOP09 0x1064 +#define NV9097_SET_SPARE_NOOP09_V 31:0 + +#define NV9097_SET_SPARE_NOOP10 0x1068 +#define NV9097_SET_SPARE_NOOP10_V 31:0 + +#define NV9097_SET_SPARE_NOOP11 0x106c +#define NV9097_SET_SPARE_NOOP11_V 31:0 + +#define NV9097_SET_SPARE_NOOP12 0x1070 +#define NV9097_SET_SPARE_NOOP12_V 31:0 + +#define NV9097_SET_SPARE_NOOP13 0x1074 +#define NV9097_SET_SPARE_NOOP13_V 31:0 + +#define NV9097_SET_SPARE_NOOP14 0x1078 +#define NV9097_SET_SPARE_NOOP14_V 31:0 + +#define NV9097_SET_SPARE_NOOP15 0x107c +#define NV9097_SET_SPARE_NOOP15_V 31:0 + +#define NV9097_SET_REDUCE_COLOR_THRESHOLDS_UNORM8 0x10cc +#define NV9097_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NV9097_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED 23:16 + +#define NV9097_SET_REDUCE_COLOR_THRESHOLDS_UNORM10 0x10e0 +#define NV9097_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NV9097_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED 23:16 + +#define NV9097_SET_REDUCE_COLOR_THRESHOLDS_UNORM16 0x10e4 +#define NV9097_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NV9097_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED 23:16 + +#define NV9097_SET_REDUCE_COLOR_THRESHOLDS_FP11 0x10e8 +#define NV9097_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED_ALL_HIT_ONCE 5:0 +#define NV9097_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED 21:16 + +#define NV9097_SET_REDUCE_COLOR_THRESHOLDS_FP16 0x10ec +#define NV9097_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NV9097_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED 23:16 + +#define NV9097_SET_REDUCE_COLOR_THRESHOLDS_SRGB8 0x10f0 +#define NV9097_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NV9097_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED 23:16 + +#define NV9097_UNBIND_ALL 0x10f4 +#define NV9097_UNBIND_ALL_TEXTURE_HEADERS 0:0 +#define NV9097_UNBIND_ALL_TEXTURE_HEADERS_FALSE 0x00000000 +#define NV9097_UNBIND_ALL_TEXTURE_HEADERS_TRUE 0x00000001 +#define NV9097_UNBIND_ALL_TEXTURE_SAMPLERS 4:4 +#define NV9097_UNBIND_ALL_TEXTURE_SAMPLERS_FALSE 0x00000000 +#define NV9097_UNBIND_ALL_TEXTURE_SAMPLERS_TRUE 0x00000001 +#define NV9097_UNBIND_ALL_CONSTANT_BUFFERS 8:8 +#define NV9097_UNBIND_ALL_CONSTANT_BUFFERS_FALSE 0x00000000 +#define NV9097_UNBIND_ALL_CONSTANT_BUFFERS_TRUE 0x00000001 + +#define NV9097_SET_CLEAR_SURFACE_CONTROL 0x10f8 +#define NV9097_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK 0:0 +#define NV9097_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_FALSE 0x00000000 +#define NV9097_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_TRUE 0x00000001 +#define NV9097_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT 4:4 +#define NV9097_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_FALSE 0x00000000 +#define NV9097_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_TRUE 0x00000001 +#define NV9097_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0 8:8 +#define NV9097_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_FALSE 0x00000000 +#define NV9097_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_TRUE 0x00000001 +#define NV9097_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0 12:12 +#define NV9097_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_FALSE 0x00000000 +#define NV9097_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_TRUE 0x00000001 + +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS 0x10fc +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NV9097_NO_OPERATION_DATA_HI 0x110c +#define NV9097_NO_OPERATION_DATA_HI_V 31:0 + +#define NV9097_SET_DEPTH_BIAS_CONTROL 0x1110 +#define NV9097_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT 0:0 +#define NV9097_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_FALSE 0x00000000 +#define NV9097_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_TRUE 0x00000001 + +#define NV9097_PM_TRIGGER_END 0x1114 +#define NV9097_PM_TRIGGER_END_V 31:0 + +#define NV9097_SET_VERTEX_ID_BASE 0x1118 +#define NV9097_SET_VERTEX_ID_BASE_V 31:0 + +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A(i) (0x1120+(i)*4) +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0 0:0 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1 1:1 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2 2:2 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3 3:3 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0 4:4 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1 5:5 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2 6:6 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3 7:7 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0 8:8 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1 9:9 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2 10:10 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3 11:11 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0 12:12 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1 13:13 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2 14:14 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3 15:15 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0 16:16 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1 17:17 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2 18:18 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3 19:19 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0 20:20 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1 21:21 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2 22:22 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3 23:23 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0 24:24 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1 25:25 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2 26:26 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3 27:27 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0 28:28 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1 29:29 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2 30:30 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3 31:31 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B(i) (0x1128+(i)*4) +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0 0:0 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1 1:1 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2 2:2 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3 3:3 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0 4:4 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1 5:5 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2 6:6 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3 7:7 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0 8:8 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1 9:9 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2 10:10 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3 11:11 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0 12:12 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1 13:13 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2 14:14 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3 15:15 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0 16:16 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1 17:17 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2 18:18 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3 19:19 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0 20:20 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1 21:21 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2 22:22 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3 23:23 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0 24:24 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1 25:25 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2 26:26 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3 27:27 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0 28:28 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1 29:29 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2 30:30 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3 31:31 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NV9097_SET_BLEND_PER_FORMAT_ENABLE 0x1140 +#define NV9097_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16 4:4 +#define NV9097_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_FALSE 0x00000000 +#define NV9097_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_TRUE 0x00000001 + +#define NV9097_FLUSH_PENDING_WRITES 0x1144 +#define NV9097_FLUSH_PENDING_WRITES_SM_DOES_GLOBAL_STORE 0:0 + +#define NV9097_SET_VAB_DATA_CONTROL 0x114c +#define NV9097_SET_VAB_DATA_CONTROL_VAB_INDEX 7:0 +#define NV9097_SET_VAB_DATA_CONTROL_COMPONENT_COUNT 10:8 +#define NV9097_SET_VAB_DATA_CONTROL_COMPONENT_BYTE_WIDTH 14:12 +#define NV9097_SET_VAB_DATA_CONTROL_FORMAT 18:16 +#define NV9097_SET_VAB_DATA_CONTROL_FORMAT_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NV9097_SET_VAB_DATA_CONTROL_FORMAT_NUM_SNORM 0x00000001 +#define NV9097_SET_VAB_DATA_CONTROL_FORMAT_NUM_UNORM 0x00000002 +#define NV9097_SET_VAB_DATA_CONTROL_FORMAT_NUM_SINT 0x00000003 +#define NV9097_SET_VAB_DATA_CONTROL_FORMAT_NUM_UINT 0x00000004 +#define NV9097_SET_VAB_DATA_CONTROL_FORMAT_NUM_USCALED 0x00000005 +#define NV9097_SET_VAB_DATA_CONTROL_FORMAT_NUM_SSCALED 0x00000006 +#define NV9097_SET_VAB_DATA_CONTROL_FORMAT_NUM_FLOAT 0x00000007 + +#define NV9097_SET_VAB_DATA(i) (0x1150+(i)*4) +#define NV9097_SET_VAB_DATA_V 31:0 + +#define NV9097_SET_VERTEX_ATTRIBUTE_A(i) (0x1160+(i)*4) +#define NV9097_SET_VERTEX_ATTRIBUTE_A_STREAM 4:0 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_SOURCE 6:6 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_SOURCE_ACTIVE 0x00000000 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_SOURCE_INACTIVE 0x00000001 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_OFFSET 20:7 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS 26:21 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NV9097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NV9097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NV9097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NV9097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NV9097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE 29:27 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B 31:31 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_FALSE 0x00000000 +#define NV9097_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_TRUE 0x00000001 + +#define NV9097_SET_VERTEX_ATTRIBUTE_B(i) (0x11a0+(i)*4) +#define NV9097_SET_VERTEX_ATTRIBUTE_B_STREAM 4:0 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_SOURCE 6:6 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_SOURCE_ACTIVE 0x00000000 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_SOURCE_INACTIVE 0x00000001 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_OFFSET 20:7 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS 26:21 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NV9097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NV9097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NV9097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NV9097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NV9097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE 29:27 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B 31:31 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_FALSE 0x00000000 +#define NV9097_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_TRUE 0x00000001 + +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST 0x1214 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_START_INDEX 15:0 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT 0x1218 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_START_INDEX 15:0 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NV9097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NV9097_SET_CT_SELECT 0x121c +#define NV9097_SET_CT_SELECT_TARGET_COUNT 3:0 +#define NV9097_SET_CT_SELECT_TARGET0 6:4 +#define NV9097_SET_CT_SELECT_TARGET1 9:7 +#define NV9097_SET_CT_SELECT_TARGET2 12:10 +#define NV9097_SET_CT_SELECT_TARGET3 15:13 +#define NV9097_SET_CT_SELECT_TARGET4 18:16 +#define NV9097_SET_CT_SELECT_TARGET5 21:19 +#define NV9097_SET_CT_SELECT_TARGET6 24:22 +#define NV9097_SET_CT_SELECT_TARGET7 27:25 + +#define NV9097_SET_COMPRESSION_THRESHOLD 0x1220 +#define NV9097_SET_COMPRESSION_THRESHOLD_SAMPLES 3:0 +#define NV9097_SET_COMPRESSION_THRESHOLD_SAMPLES__0 0x00000000 +#define NV9097_SET_COMPRESSION_THRESHOLD_SAMPLES__1 0x00000001 +#define NV9097_SET_COMPRESSION_THRESHOLD_SAMPLES__2 0x00000002 +#define NV9097_SET_COMPRESSION_THRESHOLD_SAMPLES__4 0x00000003 +#define NV9097_SET_COMPRESSION_THRESHOLD_SAMPLES__8 0x00000004 +#define NV9097_SET_COMPRESSION_THRESHOLD_SAMPLES__16 0x00000005 +#define NV9097_SET_COMPRESSION_THRESHOLD_SAMPLES__32 0x00000006 +#define NV9097_SET_COMPRESSION_THRESHOLD_SAMPLES__64 0x00000007 +#define NV9097_SET_COMPRESSION_THRESHOLD_SAMPLES__128 0x00000008 +#define NV9097_SET_COMPRESSION_THRESHOLD_SAMPLES__256 0x00000009 +#define NV9097_SET_COMPRESSION_THRESHOLD_SAMPLES__512 0x0000000A +#define NV9097_SET_COMPRESSION_THRESHOLD_SAMPLES__1024 0x0000000B +#define NV9097_SET_COMPRESSION_THRESHOLD_SAMPLES__2048 0x0000000C + +#define NV9097_SET_ZT_SIZE_A 0x1228 +#define NV9097_SET_ZT_SIZE_A_WIDTH 27:0 + +#define NV9097_SET_ZT_SIZE_B 0x122c +#define NV9097_SET_ZT_SIZE_B_HEIGHT 16:0 + +#define NV9097_SET_ZT_SIZE_C 0x1230 +#define NV9097_SET_ZT_SIZE_C_THIRD_DIMENSION 15:0 +#define NV9097_SET_ZT_SIZE_C_CONTROL 16:16 +#define NV9097_SET_ZT_SIZE_C_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NV9097_SET_ZT_SIZE_C_CONTROL_ARRAY_SIZE_IS_ONE 0x00000001 + +#define NV9097_SET_SAMPLER_BINDING 0x1234 +#define NV9097_SET_SAMPLER_BINDING_V 0:0 +#define NV9097_SET_SAMPLER_BINDING_V_INDEPENDENTLY 0x00000000 +#define NV9097_SET_SAMPLER_BINDING_V_VIA_HEADER_BINDING 0x00000001 + +#define NV9097_DRAW_AUTO 0x123c +#define NV9097_DRAW_AUTO_BYTE_COUNT 31:0 + +#define NV9097_SET_CIRCULAR_BUFFER_SIZE 0x1280 +#define NV9097_SET_CIRCULAR_BUFFER_SIZE_CACHE_LINES_PER_SM 9:0 + +#define NV9097_SET_VTG_REGISTER_WATERMARKS 0x1284 +#define NV9097_SET_VTG_REGISTER_WATERMARKS_LOW 15:0 +#define NV9097_SET_VTG_REGISTER_WATERMARKS_HIGH 31:16 + +#define NV9097_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI 0x1288 +#define NV9097_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES 0:0 +#define NV9097_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NV9097_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NV9097_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_TAG 25:4 + +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS 0x1290 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NV9097_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE 0x12a4 +#define NV9097_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE_V 31:0 + +#define NV9097_SET_SHADER_SCHEDULING 0x12ac +#define NV9097_SET_SHADER_SCHEDULING_MODE 0:0 +#define NV9097_SET_SHADER_SCHEDULING_MODE_OLDEST_THREAD_FIRST 0x00000000 +#define NV9097_SET_SHADER_SCHEDULING_MODE_ROUND_ROBIN 0x00000001 + +#define NV9097_CLEAR_ZCULL_REGION 0x12c8 +#define NV9097_CLEAR_ZCULL_REGION_Z_ENABLE 0:0 +#define NV9097_CLEAR_ZCULL_REGION_Z_ENABLE_FALSE 0x00000000 +#define NV9097_CLEAR_ZCULL_REGION_Z_ENABLE_TRUE 0x00000001 +#define NV9097_CLEAR_ZCULL_REGION_STENCIL_ENABLE 4:4 +#define NV9097_CLEAR_ZCULL_REGION_STENCIL_ENABLE_FALSE 0x00000000 +#define NV9097_CLEAR_ZCULL_REGION_STENCIL_ENABLE_TRUE 0x00000001 +#define NV9097_CLEAR_ZCULL_REGION_USE_CLEAR_RECT 1:1 +#define NV9097_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_FALSE 0x00000000 +#define NV9097_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_TRUE 0x00000001 +#define NV9097_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX 2:2 +#define NV9097_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_FALSE 0x00000000 +#define NV9097_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_TRUE 0x00000001 +#define NV9097_CLEAR_ZCULL_REGION_RT_ARRAY_INDEX 20:5 +#define NV9097_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE 3:3 +#define NV9097_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_FALSE 0x00000000 +#define NV9097_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_TRUE 0x00000001 + +#define NV9097_SET_DEPTH_TEST 0x12cc +#define NV9097_SET_DEPTH_TEST_ENABLE 0:0 +#define NV9097_SET_DEPTH_TEST_ENABLE_FALSE 0x00000000 +#define NV9097_SET_DEPTH_TEST_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_FILL_MODE 0x12d0 +#define NV9097_SET_FILL_MODE_V 31:0 +#define NV9097_SET_FILL_MODE_V_POINT 0x00000001 +#define NV9097_SET_FILL_MODE_V_WIREFRAME 0x00000002 +#define NV9097_SET_FILL_MODE_V_SOLID 0x00000003 + +#define NV9097_SET_SHADE_MODE 0x12d4 +#define NV9097_SET_SHADE_MODE_V 31:0 +#define NV9097_SET_SHADE_MODE_V_FLAT 0x00000001 +#define NV9097_SET_SHADE_MODE_V_GOURAUD 0x00000002 +#define NV9097_SET_SHADE_MODE_V_OGL_FLAT 0x00001D00 +#define NV9097_SET_SHADE_MODE_V_OGL_SMOOTH 0x00001D01 + +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS 0x12d8 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS 0x12dc +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NV9097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NV9097_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL 0x12e0 +#define NV9097_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT 3:0 +#define NV9097_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1 0x00000000 +#define NV9097_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_2X2 0x00000001 +#define NV9097_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1_VIRTUAL_SAMPLES 0x00000002 + +#define NV9097_SET_BLEND_STATE_PER_TARGET 0x12e4 +#define NV9097_SET_BLEND_STATE_PER_TARGET_ENABLE 0:0 +#define NV9097_SET_BLEND_STATE_PER_TARGET_ENABLE_FALSE 0x00000000 +#define NV9097_SET_BLEND_STATE_PER_TARGET_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_DEPTH_WRITE 0x12e8 +#define NV9097_SET_DEPTH_WRITE_ENABLE 0:0 +#define NV9097_SET_DEPTH_WRITE_ENABLE_FALSE 0x00000000 +#define NV9097_SET_DEPTH_WRITE_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_ALPHA_TEST 0x12ec +#define NV9097_SET_ALPHA_TEST_ENABLE 0:0 +#define NV9097_SET_ALPHA_TEST_ENABLE_FALSE 0x00000000 +#define NV9097_SET_ALPHA_TEST_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_INLINE_INDEX4X8_ALIGN 0x1300 +#define NV9097_SET_INLINE_INDEX4X8_ALIGN_COUNT 29:0 +#define NV9097_SET_INLINE_INDEX4X8_ALIGN_START 31:30 + +#define NV9097_DRAW_INLINE_INDEX4X8 0x1304 +#define NV9097_DRAW_INLINE_INDEX4X8_INDEX0 7:0 +#define NV9097_DRAW_INLINE_INDEX4X8_INDEX1 15:8 +#define NV9097_DRAW_INLINE_INDEX4X8_INDEX2 23:16 +#define NV9097_DRAW_INLINE_INDEX4X8_INDEX3 31:24 + +#define NV9097_D3D_SET_CULL_MODE 0x1308 +#define NV9097_D3D_SET_CULL_MODE_V 31:0 +#define NV9097_D3D_SET_CULL_MODE_V_NONE 0x00000001 +#define NV9097_D3D_SET_CULL_MODE_V_CW 0x00000002 +#define NV9097_D3D_SET_CULL_MODE_V_CCW 0x00000003 + +#define NV9097_SET_DEPTH_FUNC 0x130c +#define NV9097_SET_DEPTH_FUNC_V 31:0 +#define NV9097_SET_DEPTH_FUNC_V_OGL_NEVER 0x00000200 +#define NV9097_SET_DEPTH_FUNC_V_OGL_LESS 0x00000201 +#define NV9097_SET_DEPTH_FUNC_V_OGL_EQUAL 0x00000202 +#define NV9097_SET_DEPTH_FUNC_V_OGL_LEQUAL 0x00000203 +#define NV9097_SET_DEPTH_FUNC_V_OGL_GREATER 0x00000204 +#define NV9097_SET_DEPTH_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NV9097_SET_DEPTH_FUNC_V_OGL_GEQUAL 0x00000206 +#define NV9097_SET_DEPTH_FUNC_V_OGL_ALWAYS 0x00000207 +#define NV9097_SET_DEPTH_FUNC_V_D3D_NEVER 0x00000001 +#define NV9097_SET_DEPTH_FUNC_V_D3D_LESS 0x00000002 +#define NV9097_SET_DEPTH_FUNC_V_D3D_EQUAL 0x00000003 +#define NV9097_SET_DEPTH_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NV9097_SET_DEPTH_FUNC_V_D3D_GREATER 0x00000005 +#define NV9097_SET_DEPTH_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NV9097_SET_DEPTH_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NV9097_SET_DEPTH_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NV9097_SET_ALPHA_REF 0x1310 +#define NV9097_SET_ALPHA_REF_V 31:0 + +#define NV9097_SET_ALPHA_FUNC 0x1314 +#define NV9097_SET_ALPHA_FUNC_V 31:0 +#define NV9097_SET_ALPHA_FUNC_V_OGL_NEVER 0x00000200 +#define NV9097_SET_ALPHA_FUNC_V_OGL_LESS 0x00000201 +#define NV9097_SET_ALPHA_FUNC_V_OGL_EQUAL 0x00000202 +#define NV9097_SET_ALPHA_FUNC_V_OGL_LEQUAL 0x00000203 +#define NV9097_SET_ALPHA_FUNC_V_OGL_GREATER 0x00000204 +#define NV9097_SET_ALPHA_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NV9097_SET_ALPHA_FUNC_V_OGL_GEQUAL 0x00000206 +#define NV9097_SET_ALPHA_FUNC_V_OGL_ALWAYS 0x00000207 +#define NV9097_SET_ALPHA_FUNC_V_D3D_NEVER 0x00000001 +#define NV9097_SET_ALPHA_FUNC_V_D3D_LESS 0x00000002 +#define NV9097_SET_ALPHA_FUNC_V_D3D_EQUAL 0x00000003 +#define NV9097_SET_ALPHA_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NV9097_SET_ALPHA_FUNC_V_D3D_GREATER 0x00000005 +#define NV9097_SET_ALPHA_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NV9097_SET_ALPHA_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NV9097_SET_ALPHA_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NV9097_SET_DRAW_AUTO_STRIDE 0x1318 +#define NV9097_SET_DRAW_AUTO_STRIDE_V 11:0 + +#define NV9097_SET_BLEND_CONST_RED 0x131c +#define NV9097_SET_BLEND_CONST_RED_V 31:0 + +#define NV9097_SET_BLEND_CONST_GREEN 0x1320 +#define NV9097_SET_BLEND_CONST_GREEN_V 31:0 + +#define NV9097_SET_BLEND_CONST_BLUE 0x1324 +#define NV9097_SET_BLEND_CONST_BLUE_V 31:0 + +#define NV9097_SET_BLEND_CONST_ALPHA 0x1328 +#define NV9097_SET_BLEND_CONST_ALPHA_V 31:0 + +#define NV9097_INVALIDATE_SAMPLER_CACHE 0x1330 +#define NV9097_INVALIDATE_SAMPLER_CACHE_LINES 0:0 +#define NV9097_INVALIDATE_SAMPLER_CACHE_LINES_ALL 0x00000000 +#define NV9097_INVALIDATE_SAMPLER_CACHE_LINES_ONE 0x00000001 +#define NV9097_INVALIDATE_SAMPLER_CACHE_TAG 25:4 + +#define NV9097_INVALIDATE_TEXTURE_HEADER_CACHE 0x1334 +#define NV9097_INVALIDATE_TEXTURE_HEADER_CACHE_LINES 0:0 +#define NV9097_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ALL 0x00000000 +#define NV9097_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ONE 0x00000001 +#define NV9097_INVALIDATE_TEXTURE_HEADER_CACHE_TAG 25:4 + +#define NV9097_INVALIDATE_TEXTURE_DATA_CACHE 0x1338 +#define NV9097_INVALIDATE_TEXTURE_DATA_CACHE_LINES 0:0 +#define NV9097_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ALL 0x00000000 +#define NV9097_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ONE 0x00000001 +#define NV9097_INVALIDATE_TEXTURE_DATA_CACHE_TAG 25:4 +#define NV9097_INVALIDATE_TEXTURE_DATA_CACHE_LEVELS 2:1 +#define NV9097_INVALIDATE_TEXTURE_DATA_CACHE_LEVELS_L1_ONLY 0x00000000 + +#define NV9097_SET_BLEND_SEPARATE_FOR_ALPHA 0x133c +#define NV9097_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NV9097_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NV9097_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_BLEND_COLOR_OP 0x1340 +#define NV9097_SET_BLEND_COLOR_OP_V 31:0 +#define NV9097_SET_BLEND_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NV9097_SET_BLEND_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NV9097_SET_BLEND_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NV9097_SET_BLEND_COLOR_OP_V_OGL_MIN 0x00008007 +#define NV9097_SET_BLEND_COLOR_OP_V_OGL_MAX 0x00008008 +#define NV9097_SET_BLEND_COLOR_OP_V_D3D_ADD 0x00000001 +#define NV9097_SET_BLEND_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NV9097_SET_BLEND_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NV9097_SET_BLEND_COLOR_OP_V_D3D_MIN 0x00000004 +#define NV9097_SET_BLEND_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF 0x1344 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V 31:0 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NV9097_SET_BLEND_COLOR_DEST_COEFF 0x1348 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V 31:0 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NV9097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NV9097_SET_BLEND_ALPHA_OP 0x134c +#define NV9097_SET_BLEND_ALPHA_OP_V 31:0 +#define NV9097_SET_BLEND_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NV9097_SET_BLEND_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NV9097_SET_BLEND_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NV9097_SET_BLEND_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NV9097_SET_BLEND_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NV9097_SET_BLEND_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NV9097_SET_BLEND_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NV9097_SET_BLEND_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NV9097_SET_BLEND_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NV9097_SET_BLEND_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF 0x1350 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V 31:0 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NV9097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NV9097_SET_GLOBAL_COLOR_KEY 0x1354 +#define NV9097_SET_GLOBAL_COLOR_KEY_ENABLE 0:0 +#define NV9097_SET_GLOBAL_COLOR_KEY_ENABLE_FALSE 0x00000000 +#define NV9097_SET_GLOBAL_COLOR_KEY_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF 0x1358 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V 31:0 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NV9097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NV9097_SET_SINGLE_ROP_CONTROL 0x135c +#define NV9097_SET_SINGLE_ROP_CONTROL_ENABLE 0:0 +#define NV9097_SET_SINGLE_ROP_CONTROL_ENABLE_FALSE 0x00000000 +#define NV9097_SET_SINGLE_ROP_CONTROL_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_BLEND(i) (0x1360+(i)*4) +#define NV9097_SET_BLEND_ENABLE 0:0 +#define NV9097_SET_BLEND_ENABLE_FALSE 0x00000000 +#define NV9097_SET_BLEND_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_STENCIL_TEST 0x1380 +#define NV9097_SET_STENCIL_TEST_ENABLE 0:0 +#define NV9097_SET_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NV9097_SET_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_STENCIL_OP_FAIL 0x1384 +#define NV9097_SET_STENCIL_OP_FAIL_V 31:0 +#define NV9097_SET_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NV9097_SET_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NV9097_SET_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NV9097_SET_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NV9097_SET_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NV9097_SET_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NV9097_SET_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NV9097_SET_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NV9097_SET_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NV9097_SET_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NV9097_SET_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NV9097_SET_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NV9097_SET_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NV9097_SET_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NV9097_SET_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NV9097_SET_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NV9097_SET_STENCIL_OP_ZFAIL 0x1388 +#define NV9097_SET_STENCIL_OP_ZFAIL_V 31:0 +#define NV9097_SET_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NV9097_SET_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NV9097_SET_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NV9097_SET_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NV9097_SET_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NV9097_SET_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NV9097_SET_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NV9097_SET_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NV9097_SET_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NV9097_SET_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NV9097_SET_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NV9097_SET_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NV9097_SET_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NV9097_SET_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NV9097_SET_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NV9097_SET_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NV9097_SET_STENCIL_OP_ZPASS 0x138c +#define NV9097_SET_STENCIL_OP_ZPASS_V 31:0 +#define NV9097_SET_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NV9097_SET_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NV9097_SET_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NV9097_SET_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NV9097_SET_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NV9097_SET_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NV9097_SET_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NV9097_SET_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NV9097_SET_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NV9097_SET_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NV9097_SET_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NV9097_SET_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NV9097_SET_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NV9097_SET_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NV9097_SET_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NV9097_SET_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NV9097_SET_STENCIL_FUNC 0x1390 +#define NV9097_SET_STENCIL_FUNC_V 31:0 +#define NV9097_SET_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NV9097_SET_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NV9097_SET_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NV9097_SET_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NV9097_SET_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NV9097_SET_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NV9097_SET_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NV9097_SET_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NV9097_SET_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NV9097_SET_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NV9097_SET_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NV9097_SET_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NV9097_SET_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NV9097_SET_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NV9097_SET_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NV9097_SET_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NV9097_SET_STENCIL_FUNC_REF 0x1394 +#define NV9097_SET_STENCIL_FUNC_REF_V 7:0 + +#define NV9097_SET_STENCIL_FUNC_MASK 0x1398 +#define NV9097_SET_STENCIL_FUNC_MASK_V 7:0 + +#define NV9097_SET_STENCIL_MASK 0x139c +#define NV9097_SET_STENCIL_MASK_V 7:0 + +#define NV9097_SET_DRAW_AUTO_START 0x13a4 +#define NV9097_SET_DRAW_AUTO_START_BYTE_COUNT 31:0 + +#define NV9097_SET_PS_SATURATE 0x13a8 +#define NV9097_SET_PS_SATURATE_OUTPUT0 0:0 +#define NV9097_SET_PS_SATURATE_OUTPUT0_FALSE 0x00000000 +#define NV9097_SET_PS_SATURATE_OUTPUT0_TRUE 0x00000001 +#define NV9097_SET_PS_SATURATE_OUTPUT1 4:4 +#define NV9097_SET_PS_SATURATE_OUTPUT1_FALSE 0x00000000 +#define NV9097_SET_PS_SATURATE_OUTPUT1_TRUE 0x00000001 +#define NV9097_SET_PS_SATURATE_OUTPUT2 8:8 +#define NV9097_SET_PS_SATURATE_OUTPUT2_FALSE 0x00000000 +#define NV9097_SET_PS_SATURATE_OUTPUT2_TRUE 0x00000001 +#define NV9097_SET_PS_SATURATE_OUTPUT3 12:12 +#define NV9097_SET_PS_SATURATE_OUTPUT3_FALSE 0x00000000 +#define NV9097_SET_PS_SATURATE_OUTPUT3_TRUE 0x00000001 +#define NV9097_SET_PS_SATURATE_OUTPUT4 16:16 +#define NV9097_SET_PS_SATURATE_OUTPUT4_FALSE 0x00000000 +#define NV9097_SET_PS_SATURATE_OUTPUT4_TRUE 0x00000001 +#define NV9097_SET_PS_SATURATE_OUTPUT5 20:20 +#define NV9097_SET_PS_SATURATE_OUTPUT5_FALSE 0x00000000 +#define NV9097_SET_PS_SATURATE_OUTPUT5_TRUE 0x00000001 +#define NV9097_SET_PS_SATURATE_OUTPUT6 24:24 +#define NV9097_SET_PS_SATURATE_OUTPUT6_FALSE 0x00000000 +#define NV9097_SET_PS_SATURATE_OUTPUT6_TRUE 0x00000001 +#define NV9097_SET_PS_SATURATE_OUTPUT7 28:28 +#define NV9097_SET_PS_SATURATE_OUTPUT7_FALSE 0x00000000 +#define NV9097_SET_PS_SATURATE_OUTPUT7_TRUE 0x00000001 + +#define NV9097_SET_WINDOW_ORIGIN 0x13ac +#define NV9097_SET_WINDOW_ORIGIN_MODE 0:0 +#define NV9097_SET_WINDOW_ORIGIN_MODE_UPPER_LEFT 0x00000000 +#define NV9097_SET_WINDOW_ORIGIN_MODE_LOWER_LEFT 0x00000001 +#define NV9097_SET_WINDOW_ORIGIN_FLIP_Y 4:4 +#define NV9097_SET_WINDOW_ORIGIN_FLIP_Y_FALSE 0x00000000 +#define NV9097_SET_WINDOW_ORIGIN_FLIP_Y_TRUE 0x00000001 + +#define NV9097_SET_LINE_WIDTH_FLOAT 0x13b0 +#define NV9097_SET_LINE_WIDTH_FLOAT_V 31:0 + +#define NV9097_SET_ALIASED_LINE_WIDTH_FLOAT 0x13b4 +#define NV9097_SET_ALIASED_LINE_WIDTH_FLOAT_V 31:0 + +#define NV9097_SET_LINE_MULTISAMPLE_OVERRIDE 0x1418 +#define NV9097_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE 0:0 +#define NV9097_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_FALSE 0x00000000 +#define NV9097_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_ALPHA_HYSTERESIS 0x1420 +#define NV9097_SET_ALPHA_HYSTERESIS_ROUNDS_OF_ALPHA 7:0 + +#define NV9097_INVALIDATE_SAMPLER_CACHE_NO_WFI 0x1424 +#define NV9097_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES 0:0 +#define NV9097_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NV9097_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NV9097_INVALIDATE_SAMPLER_CACHE_NO_WFI_TAG 25:4 + +#define NV9097_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI 0x1428 +#define NV9097_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES 0:0 +#define NV9097_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NV9097_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NV9097_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_TAG 25:4 + +#define NV9097_INVALIDATE_DA_DMA_CACHE 0x142c +#define NV9097_INVALIDATE_DA_DMA_CACHE_V 0:0 + +#define NV9097_X_X_X_SET_REDUCE_DST_COLOR 0x1430 +#define NV9097_X_X_X_SET_REDUCE_DST_COLOR_UNORM_ENABLE 4:4 +#define NV9097_X_X_X_SET_REDUCE_DST_COLOR_UNORM_ENABLE_FALSE 0x00000000 +#define NV9097_X_X_X_SET_REDUCE_DST_COLOR_UNORM_ENABLE_TRUE 0x00000001 +#define NV9097_X_X_X_SET_REDUCE_DST_COLOR_SRGB_ENABLE 8:8 +#define NV9097_X_X_X_SET_REDUCE_DST_COLOR_SRGB_ENABLE_FALSE 0x00000000 +#define NV9097_X_X_X_SET_REDUCE_DST_COLOR_SRGB_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_GLOBAL_BASE_VERTEX_INDEX 0x1434 +#define NV9097_SET_GLOBAL_BASE_VERTEX_INDEX_V 31:0 + +#define NV9097_SET_GLOBAL_BASE_INSTANCE_INDEX 0x1438 +#define NV9097_SET_GLOBAL_BASE_INSTANCE_INDEX_V 31:0 + +#define NV9097_X_X_X_SET_CLEAR_CONTROL 0x143c +#define NV9097_X_X_X_SET_CLEAR_CONTROL_RESPECT_STENCIL_MASK 0:0 +#define NV9097_X_X_X_SET_CLEAR_CONTROL_RESPECT_STENCIL_MASK_FALSE 0x00000000 +#define NV9097_X_X_X_SET_CLEAR_CONTROL_RESPECT_STENCIL_MASK_TRUE 0x00000001 +#define NV9097_X_X_X_SET_CLEAR_CONTROL_USE_CLEAR_RECT 4:4 +#define NV9097_X_X_X_SET_CLEAR_CONTROL_USE_CLEAR_RECT_FALSE 0x00000000 +#define NV9097_X_X_X_SET_CLEAR_CONTROL_USE_CLEAR_RECT_TRUE 0x00000001 + +#define NV9097_SET_PS_WARP_WATERMARKS 0x1450 +#define NV9097_SET_PS_WARP_WATERMARKS_LOW 15:0 +#define NV9097_SET_PS_WARP_WATERMARKS_HIGH 31:16 + +#define NV9097_SET_PS_REGISTER_WATERMARKS 0x1454 +#define NV9097_SET_PS_REGISTER_WATERMARKS_LOW 15:0 +#define NV9097_SET_PS_REGISTER_WATERMARKS_HIGH 31:16 + +#define NV9097_STORE_ZCULL 0x1464 +#define NV9097_STORE_ZCULL_V 0:0 + +#define NV9097_LOAD_ZCULL 0x1500 +#define NV9097_LOAD_ZCULL_V 0:0 + +#define NV9097_SET_SURFACE_CLIP_ID_HEIGHT 0x1504 +#define NV9097_SET_SURFACE_CLIP_ID_HEIGHT_V 31:0 + +#define NV9097_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL 0x1508 +#define NV9097_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NV9097_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NV9097_SET_CLIP_ID_CLEAR_RECT_VERTICAL 0x150c +#define NV9097_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NV9097_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NV9097_SET_USER_CLIP_ENABLE 0x1510 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE0 0:0 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE0_FALSE 0x00000000 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE0_TRUE 0x00000001 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE1 1:1 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE1_FALSE 0x00000000 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE1_TRUE 0x00000001 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE2 2:2 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE2_FALSE 0x00000000 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE2_TRUE 0x00000001 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE3 3:3 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE3_FALSE 0x00000000 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE3_TRUE 0x00000001 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE4 4:4 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE4_FALSE 0x00000000 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE4_TRUE 0x00000001 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE5 5:5 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE5_FALSE 0x00000000 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE5_TRUE 0x00000001 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE6 6:6 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE6_FALSE 0x00000000 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE6_TRUE 0x00000001 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE7 7:7 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE7_FALSE 0x00000000 +#define NV9097_SET_USER_CLIP_ENABLE_PLANE7_TRUE 0x00000001 + +#define NV9097_SET_ZPASS_PIXEL_COUNT 0x1514 +#define NV9097_SET_ZPASS_PIXEL_COUNT_ENABLE 0:0 +#define NV9097_SET_ZPASS_PIXEL_COUNT_ENABLE_FALSE 0x00000000 +#define NV9097_SET_ZPASS_PIXEL_COUNT_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_POINT_SIZE 0x1518 +#define NV9097_SET_POINT_SIZE_V 31:0 + +#define NV9097_SET_ZCULL_STATS 0x151c +#define NV9097_SET_ZCULL_STATS_ENABLE 0:0 +#define NV9097_SET_ZCULL_STATS_ENABLE_FALSE 0x00000000 +#define NV9097_SET_ZCULL_STATS_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_POINT_SPRITE 0x1520 +#define NV9097_SET_POINT_SPRITE_ENABLE 0:0 +#define NV9097_SET_POINT_SPRITE_ENABLE_FALSE 0x00000000 +#define NV9097_SET_POINT_SPRITE_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_SHADER_EXCEPTIONS 0x1528 +#define NV9097_SET_SHADER_EXCEPTIONS_ENABLE 0:0 +#define NV9097_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0x00000000 +#define NV9097_SET_SHADER_EXCEPTIONS_ENABLE_TRUE 0x00000001 + +#define NV9097_CLEAR_REPORT_VALUE 0x1530 +#define NV9097_CLEAR_REPORT_VALUE_TYPE 4:0 +#define NV9097_CLEAR_REPORT_VALUE_TYPE_DA_VERTICES_GENERATED 0x00000012 +#define NV9097_CLEAR_REPORT_VALUE_TYPE_DA_PRIMITIVES_GENERATED 0x00000013 +#define NV9097_CLEAR_REPORT_VALUE_TYPE_VS_INVOCATIONS 0x00000015 +#define NV9097_CLEAR_REPORT_VALUE_TYPE_TI_INVOCATIONS 0x00000016 +#define NV9097_CLEAR_REPORT_VALUE_TYPE_TS_INVOCATIONS 0x00000017 +#define NV9097_CLEAR_REPORT_VALUE_TYPE_TS_PRIMITIVES_GENERATED 0x00000018 +#define NV9097_CLEAR_REPORT_VALUE_TYPE_GS_INVOCATIONS 0x0000001A +#define NV9097_CLEAR_REPORT_VALUE_TYPE_GS_PRIMITIVES_GENERATED 0x0000001B +#define NV9097_CLEAR_REPORT_VALUE_TYPE_VTG_PRIMITIVES_OUT 0x0000001F +#define NV9097_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_SUCCEEDED 0x00000010 +#define NV9097_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_NEEDED 0x00000011 +#define NV9097_CLEAR_REPORT_VALUE_TYPE_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000003 +#define NV9097_CLEAR_REPORT_VALUE_TYPE_CLIPPER_INVOCATIONS 0x0000001C +#define NV9097_CLEAR_REPORT_VALUE_TYPE_CLIPPER_PRIMITIVES_GENERATED 0x0000001D +#define NV9097_CLEAR_REPORT_VALUE_TYPE_ZCULL_STATS 0x00000002 +#define NV9097_CLEAR_REPORT_VALUE_TYPE_PS_INVOCATIONS 0x0000001E +#define NV9097_CLEAR_REPORT_VALUE_TYPE_ZPASS_PIXEL_CNT 0x00000001 +#define NV9097_CLEAR_REPORT_VALUE_TYPE_ALPHA_BETA_CLOCKS 0x00000004 + +#define NV9097_SET_ANTI_ALIAS_ENABLE 0x1534 +#define NV9097_SET_ANTI_ALIAS_ENABLE_V 0:0 +#define NV9097_SET_ANTI_ALIAS_ENABLE_V_FALSE 0x00000000 +#define NV9097_SET_ANTI_ALIAS_ENABLE_V_TRUE 0x00000001 + +#define NV9097_SET_ZT_SELECT 0x1538 +#define NV9097_SET_ZT_SELECT_TARGET_COUNT 0:0 + +#define NV9097_SET_ANTI_ALIAS_ALPHA_CONTROL 0x153c +#define NV9097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE 0:0 +#define NV9097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_DISABLE 0x00000000 +#define NV9097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_ENABLE 0x00000001 +#define NV9097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE 4:4 +#define NV9097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_DISABLE 0x00000000 +#define NV9097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_ENABLE 0x00000001 + +#define NV9097_SET_RENDER_ENABLE_A 0x1550 +#define NV9097_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NV9097_SET_RENDER_ENABLE_B 0x1554 +#define NV9097_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NV9097_SET_RENDER_ENABLE_C 0x1558 +#define NV9097_SET_RENDER_ENABLE_C_MODE 2:0 +#define NV9097_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NV9097_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NV9097_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NV9097_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NV9097_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NV9097_SET_TEX_SAMPLER_POOL_A 0x155c +#define NV9097_SET_TEX_SAMPLER_POOL_A_OFFSET_UPPER 7:0 + +#define NV9097_SET_TEX_SAMPLER_POOL_B 0x1560 +#define NV9097_SET_TEX_SAMPLER_POOL_B_OFFSET_LOWER 31:0 + +#define NV9097_SET_TEX_SAMPLER_POOL_C 0x1564 +#define NV9097_SET_TEX_SAMPLER_POOL_C_MAXIMUM_INDEX 19:0 + +#define NV9097_SET_SLOPE_SCALE_DEPTH_BIAS 0x156c +#define NV9097_SET_SLOPE_SCALE_DEPTH_BIAS_V 31:0 + +#define NV9097_SET_ANTI_ALIASED_LINE 0x1570 +#define NV9097_SET_ANTI_ALIASED_LINE_ENABLE 0:0 +#define NV9097_SET_ANTI_ALIASED_LINE_ENABLE_FALSE 0x00000000 +#define NV9097_SET_ANTI_ALIASED_LINE_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_TEX_HEADER_POOL_A 0x1574 +#define NV9097_SET_TEX_HEADER_POOL_A_OFFSET_UPPER 7:0 + +#define NV9097_SET_TEX_HEADER_POOL_B 0x1578 +#define NV9097_SET_TEX_HEADER_POOL_B_OFFSET_LOWER 31:0 + +#define NV9097_SET_TEX_HEADER_POOL_C 0x157c +#define NV9097_SET_TEX_HEADER_POOL_C_MAXIMUM_INDEX 21:0 + +#define NV9097_SET_ACTIVE_ZCULL_REGION 0x1590 +#define NV9097_SET_ACTIVE_ZCULL_REGION_ID 5:0 + +#define NV9097_SET_TWO_SIDED_STENCIL_TEST 0x1594 +#define NV9097_SET_TWO_SIDED_STENCIL_TEST_ENABLE 0:0 +#define NV9097_SET_TWO_SIDED_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NV9097_SET_TWO_SIDED_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_BACK_STENCIL_OP_FAIL 0x1598 +#define NV9097_SET_BACK_STENCIL_OP_FAIL_V 31:0 +#define NV9097_SET_BACK_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NV9097_SET_BACK_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NV9097_SET_BACK_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NV9097_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NV9097_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NV9097_SET_BACK_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NV9097_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NV9097_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NV9097_SET_BACK_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NV9097_SET_BACK_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NV9097_SET_BACK_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NV9097_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NV9097_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NV9097_SET_BACK_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NV9097_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NV9097_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NV9097_SET_BACK_STENCIL_OP_ZFAIL 0x159c +#define NV9097_SET_BACK_STENCIL_OP_ZFAIL_V 31:0 +#define NV9097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NV9097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NV9097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NV9097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NV9097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NV9097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NV9097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NV9097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NV9097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NV9097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NV9097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NV9097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NV9097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NV9097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NV9097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NV9097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NV9097_SET_BACK_STENCIL_OP_ZPASS 0x15a0 +#define NV9097_SET_BACK_STENCIL_OP_ZPASS_V 31:0 +#define NV9097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NV9097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NV9097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NV9097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NV9097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NV9097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NV9097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NV9097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NV9097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NV9097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NV9097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NV9097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NV9097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NV9097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NV9097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NV9097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NV9097_SET_BACK_STENCIL_FUNC 0x15a4 +#define NV9097_SET_BACK_STENCIL_FUNC_V 31:0 +#define NV9097_SET_BACK_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NV9097_SET_BACK_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NV9097_SET_BACK_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NV9097_SET_BACK_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NV9097_SET_BACK_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NV9097_SET_BACK_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NV9097_SET_BACK_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NV9097_SET_BACK_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NV9097_SET_BACK_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NV9097_SET_BACK_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NV9097_SET_BACK_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NV9097_SET_BACK_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NV9097_SET_BACK_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NV9097_SET_BACK_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NV9097_SET_BACK_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NV9097_SET_BACK_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NV9097_SET_SRGB_WRITE 0x15b8 +#define NV9097_SET_SRGB_WRITE_ENABLE 0:0 +#define NV9097_SET_SRGB_WRITE_ENABLE_FALSE 0x00000000 +#define NV9097_SET_SRGB_WRITE_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_DEPTH_BIAS 0x15bc +#define NV9097_SET_DEPTH_BIAS_V 31:0 + +#define NV9097_SET_ZCULL_REGION_FORMAT 0x15c8 +#define NV9097_SET_ZCULL_REGION_FORMAT_TYPE 1:0 +#define NV9097_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X4 0x00000000 +#define NV9097_SET_ZCULL_REGION_FORMAT_TYPE_ZS_4X4 0x00000001 +#define NV9097_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X2 0x00000002 +#define NV9097_SET_ZCULL_REGION_FORMAT_TYPE_Z_2X4 0x00000003 + +#define NV9097_SET_RT_LAYER 0x15cc +#define NV9097_SET_RT_LAYER_V 15:0 +#define NV9097_SET_RT_LAYER_CONTROL 16:16 +#define NV9097_SET_RT_LAYER_CONTROL_V_SELECTS_LAYER 0x00000000 +#define NV9097_SET_RT_LAYER_CONTROL_GEOMETRY_SHADER_SELECTS_LAYER 0x00000001 + +#define NV9097_SET_ANTI_ALIAS 0x15d0 +#define NV9097_SET_ANTI_ALIAS_SAMPLES 3:0 +#define NV9097_SET_ANTI_ALIAS_SAMPLES_MODE_1X1 0x00000000 +#define NV9097_SET_ANTI_ALIAS_SAMPLES_MODE_2X1 0x00000001 +#define NV9097_SET_ANTI_ALIAS_SAMPLES_MODE_2X2 0x00000002 +#define NV9097_SET_ANTI_ALIAS_SAMPLES_MODE_4X2 0x00000003 +#define NV9097_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_D3D 0x00000004 +#define NV9097_SET_ANTI_ALIAS_SAMPLES_MODE_2X1_D3D 0x00000005 +#define NV9097_SET_ANTI_ALIAS_SAMPLES_MODE_4X4 0x00000006 +#define NV9097_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_4 0x00000008 +#define NV9097_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_12 0x00000009 +#define NV9097_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_8 0x0000000A +#define NV9097_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_24 0x0000000B + +#define NV9097_SET_EDGE_FLAG 0x15e4 +#define NV9097_SET_EDGE_FLAG_V 0:0 +#define NV9097_SET_EDGE_FLAG_V_FALSE 0x00000000 +#define NV9097_SET_EDGE_FLAG_V_TRUE 0x00000001 + +#define NV9097_DRAW_INLINE_INDEX 0x15e8 +#define NV9097_DRAW_INLINE_INDEX_V 31:0 + +#define NV9097_SET_INLINE_INDEX2X16_ALIGN 0x15ec +#define NV9097_SET_INLINE_INDEX2X16_ALIGN_COUNT 30:0 +#define NV9097_SET_INLINE_INDEX2X16_ALIGN_START_ODD 31:31 +#define NV9097_SET_INLINE_INDEX2X16_ALIGN_START_ODD_FALSE 0x00000000 +#define NV9097_SET_INLINE_INDEX2X16_ALIGN_START_ODD_TRUE 0x00000001 + +#define NV9097_DRAW_INLINE_INDEX2X16 0x15f0 +#define NV9097_DRAW_INLINE_INDEX2X16_EVEN 15:0 +#define NV9097_DRAW_INLINE_INDEX2X16_ODD 31:16 + +#define NV9097_SET_VERTEX_GLOBAL_BASE_OFFSET_A 0x15f4 +#define NV9097_SET_VERTEX_GLOBAL_BASE_OFFSET_A_UPPER 7:0 + +#define NV9097_SET_VERTEX_GLOBAL_BASE_OFFSET_B 0x15f8 +#define NV9097_SET_VERTEX_GLOBAL_BASE_OFFSET_B_LOWER 31:0 + +#define NV9097_SET_ZCULL_REGION_PIXEL_OFFSET_A 0x15fc +#define NV9097_SET_ZCULL_REGION_PIXEL_OFFSET_A_WIDTH 15:0 + +#define NV9097_SET_ZCULL_REGION_PIXEL_OFFSET_B 0x1600 +#define NV9097_SET_ZCULL_REGION_PIXEL_OFFSET_B_HEIGHT 15:0 + +#define NV9097_SET_POINT_SPRITE_SELECT 0x1604 +#define NV9097_SET_POINT_SPRITE_SELECT_RMODE 1:0 +#define NV9097_SET_POINT_SPRITE_SELECT_RMODE_ZERO 0x00000000 +#define NV9097_SET_POINT_SPRITE_SELECT_RMODE_FROM_R 0x00000001 +#define NV9097_SET_POINT_SPRITE_SELECT_RMODE_FROM_S 0x00000002 +#define NV9097_SET_POINT_SPRITE_SELECT_ORIGIN 2:2 +#define NV9097_SET_POINT_SPRITE_SELECT_ORIGIN_BOTTOM 0x00000000 +#define NV9097_SET_POINT_SPRITE_SELECT_ORIGIN_TOP 0x00000001 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE0 3:3 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE0_PASSTHROUGH 0x00000000 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE0_GENERATE 0x00000001 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE1 4:4 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE1_PASSTHROUGH 0x00000000 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE1_GENERATE 0x00000001 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE2 5:5 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE2_PASSTHROUGH 0x00000000 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE2_GENERATE 0x00000001 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE3 6:6 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE3_PASSTHROUGH 0x00000000 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE3_GENERATE 0x00000001 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE4 7:7 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE4_PASSTHROUGH 0x00000000 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE4_GENERATE 0x00000001 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE5 8:8 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE5_PASSTHROUGH 0x00000000 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE5_GENERATE 0x00000001 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE6 9:9 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE6_PASSTHROUGH 0x00000000 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE6_GENERATE 0x00000001 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE7 10:10 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE7_PASSTHROUGH 0x00000000 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE7_GENERATE 0x00000001 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE8 11:11 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE8_PASSTHROUGH 0x00000000 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE8_GENERATE 0x00000001 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE9 12:12 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE9_PASSTHROUGH 0x00000000 +#define NV9097_SET_POINT_SPRITE_SELECT_TEXTURE9_GENERATE 0x00000001 + +#define NV9097_SET_PROGRAM_REGION_A 0x1608 +#define NV9097_SET_PROGRAM_REGION_A_ADDRESS_UPPER 7:0 + +#define NV9097_SET_PROGRAM_REGION_B 0x160c +#define NV9097_SET_PROGRAM_REGION_B_ADDRESS_LOWER 31:0 + +#define NV9097_SET_ATTRIBUTE_DEFAULT 0x1610 +#define NV9097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE 0:0 +#define NV9097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_0001 0x00000000 +#define NV9097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_1111 0x00000001 +#define NV9097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR 1:1 +#define NV9097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0000 0x00000000 +#define NV9097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0001 0x00000001 +#define NV9097_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR 2:2 +#define NV9097_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0000 0x00000000 +#define NV9097_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0001 0x00000001 +#define NV9097_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE 3:3 +#define NV9097_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0000 0x00000000 +#define NV9097_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0001 0x00000001 +#define NV9097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0 4:4 +#define NV9097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_0001 0x00000000 +#define NV9097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_1111 0x00000001 +#define NV9097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15 5:5 +#define NV9097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0000 0x00000000 +#define NV9097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0001 0x00000001 + +#define NV9097_END 0x1614 +#define NV9097_END_V 0:0 + +#define NV9097_BEGIN 0x1618 +#define NV9097_BEGIN_OP 15:0 +#define NV9097_BEGIN_OP_POINTS 0x00000000 +#define NV9097_BEGIN_OP_LINES 0x00000001 +#define NV9097_BEGIN_OP_LINE_LOOP 0x00000002 +#define NV9097_BEGIN_OP_LINE_STRIP 0x00000003 +#define NV9097_BEGIN_OP_TRIANGLES 0x00000004 +#define NV9097_BEGIN_OP_TRIANGLE_STRIP 0x00000005 +#define NV9097_BEGIN_OP_TRIANGLE_FAN 0x00000006 +#define NV9097_BEGIN_OP_QUADS 0x00000007 +#define NV9097_BEGIN_OP_QUAD_STRIP 0x00000008 +#define NV9097_BEGIN_OP_POLYGON 0x00000009 +#define NV9097_BEGIN_OP_LINELIST_ADJCY 0x0000000A +#define NV9097_BEGIN_OP_LINESTRIP_ADJCY 0x0000000B +#define NV9097_BEGIN_OP_TRIANGLELIST_ADJCY 0x0000000C +#define NV9097_BEGIN_OP_TRIANGLESTRIP_ADJCY 0x0000000D +#define NV9097_BEGIN_OP_PATCH 0x0000000E +#define NV9097_BEGIN_PRIMITIVE_ID 24:24 +#define NV9097_BEGIN_PRIMITIVE_ID_FIRST 0x00000000 +#define NV9097_BEGIN_PRIMITIVE_ID_UNCHANGED 0x00000001 +#define NV9097_BEGIN_INSTANCE_ID 27:26 +#define NV9097_BEGIN_INSTANCE_ID_FIRST 0x00000000 +#define NV9097_BEGIN_INSTANCE_ID_SUBSEQUENT 0x00000001 +#define NV9097_BEGIN_INSTANCE_ID_UNCHANGED 0x00000002 +#define NV9097_BEGIN_SPLIT_MODE 30:29 +#define NV9097_BEGIN_SPLIT_MODE_NORMAL_BEGIN_NORMAL_END 0x00000000 +#define NV9097_BEGIN_SPLIT_MODE_NORMAL_BEGIN_OPEN_END 0x00000001 +#define NV9097_BEGIN_SPLIT_MODE_OPEN_BEGIN_OPEN_END 0x00000002 +#define NV9097_BEGIN_SPLIT_MODE_OPEN_BEGIN_NORMAL_END 0x00000003 + +#define NV9097_SET_VERTEX_ID_COPY 0x161c +#define NV9097_SET_VERTEX_ID_COPY_ENABLE 0:0 +#define NV9097_SET_VERTEX_ID_COPY_ENABLE_FALSE 0x00000000 +#define NV9097_SET_VERTEX_ID_COPY_ENABLE_TRUE 0x00000001 +#define NV9097_SET_VERTEX_ID_COPY_ATTRIBUTE_SLOT 11:4 + +#define NV9097_ADD_TO_PRIMITIVE_ID 0x1620 +#define NV9097_ADD_TO_PRIMITIVE_ID_V 31:0 + +#define NV9097_LOAD_PRIMITIVE_ID 0x1624 +#define NV9097_LOAD_PRIMITIVE_ID_V 31:0 + +#define NV9097_SET_SHADER_BASED_CULL 0x162c +#define NV9097_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE 1:1 +#define NV9097_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_FALSE 0x00000000 +#define NV9097_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_TRUE 0x00000001 +#define NV9097_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE 0:0 +#define NV9097_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_FALSE 0x00000000 +#define NV9097_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_SHADER_ISA_VERSION 0x1634 +#define NV9097_SET_SHADER_ISA_VERSION_MINOR_SUB 7:0 +#define NV9097_SET_SHADER_ISA_VERSION_MINOR 15:8 +#define NV9097_SET_SHADER_ISA_VERSION_MAJOR 23:16 + +#define NV9097_SET_FERMI_CLASS_VERSION 0x1638 +#define NV9097_SET_FERMI_CLASS_VERSION_CURRENT 15:0 +#define NV9097_SET_FERMI_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NV9097_SET_VAB_PAGE 0x163c +#define NV9097_SET_VAB_PAGE_READ_SELECT 0:0 +#define NV9097_SET_VAB_PAGE_READ_SELECT_PAGES_0_AND_1 0x00000000 +#define NV9097_SET_VAB_PAGE_READ_SELECT_PAGES_0_AND_2 0x00000001 + +#define NV9097_DRAW_INLINE_VERTEX 0x1640 +#define NV9097_DRAW_INLINE_VERTEX_V 31:0 + +#define NV9097_SET_DA_PRIMITIVE_RESTART 0x1644 +#define NV9097_SET_DA_PRIMITIVE_RESTART_ENABLE 0:0 +#define NV9097_SET_DA_PRIMITIVE_RESTART_ENABLE_FALSE 0x00000000 +#define NV9097_SET_DA_PRIMITIVE_RESTART_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_DA_PRIMITIVE_RESTART_INDEX 0x1648 +#define NV9097_SET_DA_PRIMITIVE_RESTART_INDEX_V 31:0 + +#define NV9097_SET_DA_OUTPUT 0x164c +#define NV9097_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START 12:12 +#define NV9097_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_FALSE 0x00000000 +#define NV9097_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_TRUE 0x00000001 + +#define NV9097_SET_ANTI_ALIASED_POINT 0x1658 +#define NV9097_SET_ANTI_ALIASED_POINT_ENABLE 0:0 +#define NV9097_SET_ANTI_ALIASED_POINT_ENABLE_FALSE 0x00000000 +#define NV9097_SET_ANTI_ALIASED_POINT_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_POINT_CENTER_MODE 0x165c +#define NV9097_SET_POINT_CENTER_MODE_V 31:0 +#define NV9097_SET_POINT_CENTER_MODE_V_OGL 0x00000000 +#define NV9097_SET_POINT_CENTER_MODE_V_D3D 0x00000001 + +#define NV9097_SET_CUBEMAP_INTER_FACE_FILTERING 0x1664 +#define NV9097_SET_CUBEMAP_INTER_FACE_FILTERING_MODE 2:1 +#define NV9097_SET_CUBEMAP_INTER_FACE_FILTERING_MODE_USE_WRAP 0x00000000 +#define NV9097_SET_CUBEMAP_INTER_FACE_FILTERING_MODE_OVERRIDE_WRAP 0x00000001 +#define NV9097_SET_CUBEMAP_INTER_FACE_FILTERING_MODE_AUTO_SPAN_SEAM 0x00000002 +#define NV9097_SET_CUBEMAP_INTER_FACE_FILTERING_MODE_AUTO_CROSS_SEAM 0x00000003 + +#define NV9097_SET_LINE_SMOOTH_PARAMETERS 0x1668 +#define NV9097_SET_LINE_SMOOTH_PARAMETERS_FALLOFF 31:0 +#define NV9097_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_00 0x00000000 +#define NV9097_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_33 0x00000001 +#define NV9097_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_60 0x00000002 + +#define NV9097_SET_LINE_STIPPLE 0x166c +#define NV9097_SET_LINE_STIPPLE_ENABLE 0:0 +#define NV9097_SET_LINE_STIPPLE_ENABLE_FALSE 0x00000000 +#define NV9097_SET_LINE_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_LINE_SMOOTH_EDGE_TABLE(i) (0x1670+(i)*4) +#define NV9097_SET_LINE_SMOOTH_EDGE_TABLE_V0 7:0 +#define NV9097_SET_LINE_SMOOTH_EDGE_TABLE_V1 15:8 +#define NV9097_SET_LINE_SMOOTH_EDGE_TABLE_V2 23:16 +#define NV9097_SET_LINE_SMOOTH_EDGE_TABLE_V3 31:24 + +#define NV9097_SET_LINE_STIPPLE_PARAMETERS 0x1680 +#define NV9097_SET_LINE_STIPPLE_PARAMETERS_FACTOR 7:0 +#define NV9097_SET_LINE_STIPPLE_PARAMETERS_PATTERN 23:8 + +#define NV9097_SET_PROVOKING_VERTEX 0x1684 +#define NV9097_SET_PROVOKING_VERTEX_V 0:0 +#define NV9097_SET_PROVOKING_VERTEX_V_FIRST 0x00000000 +#define NV9097_SET_PROVOKING_VERTEX_V_LAST 0x00000001 + +#define NV9097_SET_TWO_SIDED_LIGHT 0x1688 +#define NV9097_SET_TWO_SIDED_LIGHT_ENABLE 0:0 +#define NV9097_SET_TWO_SIDED_LIGHT_ENABLE_FALSE 0x00000000 +#define NV9097_SET_TWO_SIDED_LIGHT_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_POLYGON_STIPPLE 0x168c +#define NV9097_SET_POLYGON_STIPPLE_ENABLE 0:0 +#define NV9097_SET_POLYGON_STIPPLE_ENABLE_FALSE 0x00000000 +#define NV9097_SET_POLYGON_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_SHADER_CONTROL 0x1690 +#define NV9097_SET_SHADER_CONTROL_DEFAULT_PARTIAL 0:0 +#define NV9097_SET_SHADER_CONTROL_DEFAULT_PARTIAL_ZERO 0x00000000 +#define NV9097_SET_SHADER_CONTROL_DEFAULT_PARTIAL_INFINITY 0x00000001 +#define NV9097_SET_SHADER_CONTROL_ZERO_TIMES_ANYTHING_IS_ZERO 16:16 +#define NV9097_SET_SHADER_CONTROL_ZERO_TIMES_ANYTHING_IS_ZERO_FALSE 0x00000000 +#define NV9097_SET_SHADER_CONTROL_ZERO_TIMES_ANYTHING_IS_ZERO_TRUE 0x00000001 + +#define NV9097_LAUNCH_VERTEX 0x169c +#define NV9097_LAUNCH_VERTEX_V 0:0 + +#define NV9097_CHECK_FERMI_CLASS_VERSION 0x16a0 +#define NV9097_CHECK_FERMI_CLASS_VERSION_CURRENT 15:0 +#define NV9097_CHECK_FERMI_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NV9097_SET_SPH_VERSION 0x16a4 +#define NV9097_SET_SPH_VERSION_CURRENT 15:0 +#define NV9097_SET_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NV9097_CHECK_SPH_VERSION 0x16a8 +#define NV9097_CHECK_SPH_VERSION_CURRENT 15:0 +#define NV9097_CHECK_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NV9097_SET_ALPHA_TO_COVERAGE_OVERRIDE 0x16b4 +#define NV9097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE 0:0 +#define NV9097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NV9097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 +#define NV9097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT 1:1 +#define NV9097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_DISABLE 0x00000000 +#define NV9097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_ENABLE 0x00000001 + +#define NV9097_SET_POLYGON_STIPPLE_PATTERN(i) (0x1700+(i)*4) +#define NV9097_SET_POLYGON_STIPPLE_PATTERN_V 31:0 + +#define NV9097_SET_AAM_VERSION 0x1790 +#define NV9097_SET_AAM_VERSION_CURRENT 15:0 +#define NV9097_SET_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NV9097_CHECK_AAM_VERSION 0x1794 +#define NV9097_CHECK_AAM_VERSION_CURRENT 15:0 +#define NV9097_CHECK_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NV9097_SET_ZT_LAYER 0x179c +#define NV9097_SET_ZT_LAYER_OFFSET 15:0 + +#define NV9097_SET_VAB_MEMORY_AREA_A 0x17bc +#define NV9097_SET_VAB_MEMORY_AREA_A_OFFSET_UPPER 7:0 + +#define NV9097_SET_VAB_MEMORY_AREA_B 0x17c0 +#define NV9097_SET_VAB_MEMORY_AREA_B_OFFSET_LOWER 31:0 + +#define NV9097_SET_VAB_MEMORY_AREA_C 0x17c4 +#define NV9097_SET_VAB_MEMORY_AREA_C_SIZE 1:0 +#define NV9097_SET_VAB_MEMORY_AREA_C_SIZE_BYTES_64K 0x00000001 +#define NV9097_SET_VAB_MEMORY_AREA_C_SIZE_BYTES_128K 0x00000002 +#define NV9097_SET_VAB_MEMORY_AREA_C_SIZE_BYTES_256K 0x00000003 + +#define NV9097_SET_INDEX_BUFFER_A 0x17c8 +#define NV9097_SET_INDEX_BUFFER_A_ADDRESS_UPPER 7:0 + +#define NV9097_SET_INDEX_BUFFER_B 0x17cc +#define NV9097_SET_INDEX_BUFFER_B_ADDRESS_LOWER 31:0 + +#define NV9097_SET_INDEX_BUFFER_C 0x17d0 +#define NV9097_SET_INDEX_BUFFER_C_LIMIT_ADDRESS_UPPER 7:0 + +#define NV9097_SET_INDEX_BUFFER_D 0x17d4 +#define NV9097_SET_INDEX_BUFFER_D_LIMIT_ADDRESS_LOWER 31:0 + +#define NV9097_SET_INDEX_BUFFER_E 0x17d8 +#define NV9097_SET_INDEX_BUFFER_E_INDEX_SIZE 1:0 +#define NV9097_SET_INDEX_BUFFER_E_INDEX_SIZE_ONE_BYTE 0x00000000 +#define NV9097_SET_INDEX_BUFFER_E_INDEX_SIZE_TWO_BYTES 0x00000001 +#define NV9097_SET_INDEX_BUFFER_E_INDEX_SIZE_FOUR_BYTES 0x00000002 + +#define NV9097_SET_INDEX_BUFFER_F 0x17dc +#define NV9097_SET_INDEX_BUFFER_F_FIRST 31:0 + +#define NV9097_DRAW_INDEX_BUFFER 0x17e0 +#define NV9097_DRAW_INDEX_BUFFER_COUNT 31:0 + +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST 0x17e4 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST 0x17e8 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST 0x17ec +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f0 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NV9097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f4 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NV9097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f8 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NV9097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NV9097_SET_DEPTH_BIAS_CLAMP 0x187c +#define NV9097_SET_DEPTH_BIAS_CLAMP_V 31:0 + +#define NV9097_SET_VERTEX_STREAM_INSTANCE_A(i) (0x1880+(i)*4) +#define NV9097_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED 0:0 +#define NV9097_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_FALSE 0x00000000 +#define NV9097_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_TRUE 0x00000001 + +#define NV9097_SET_VERTEX_STREAM_INSTANCE_B(i) (0x18c0+(i)*4) +#define NV9097_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED 0:0 +#define NV9097_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_FALSE 0x00000000 +#define NV9097_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_TRUE 0x00000001 + +#define NV9097_SET_ATTRIBUTE_POINT_SIZE 0x1910 +#define NV9097_SET_ATTRIBUTE_POINT_SIZE_ENABLE 0:0 +#define NV9097_SET_ATTRIBUTE_POINT_SIZE_ENABLE_FALSE 0x00000000 +#define NV9097_SET_ATTRIBUTE_POINT_SIZE_ENABLE_TRUE 0x00000001 +#define NV9097_SET_ATTRIBUTE_POINT_SIZE_SLOT 11:4 + +#define NV9097_OGL_SET_CULL 0x1918 +#define NV9097_OGL_SET_CULL_ENABLE 0:0 +#define NV9097_OGL_SET_CULL_ENABLE_FALSE 0x00000000 +#define NV9097_OGL_SET_CULL_ENABLE_TRUE 0x00000001 + +#define NV9097_OGL_SET_FRONT_FACE 0x191c +#define NV9097_OGL_SET_FRONT_FACE_V 31:0 +#define NV9097_OGL_SET_FRONT_FACE_V_CW 0x00000900 +#define NV9097_OGL_SET_FRONT_FACE_V_CCW 0x00000901 + +#define NV9097_OGL_SET_CULL_FACE 0x1920 +#define NV9097_OGL_SET_CULL_FACE_V 31:0 +#define NV9097_OGL_SET_CULL_FACE_V_FRONT 0x00000404 +#define NV9097_OGL_SET_CULL_FACE_V_BACK 0x00000405 +#define NV9097_OGL_SET_CULL_FACE_V_FRONT_AND_BACK 0x00000408 + +#define NV9097_SET_VIEWPORT_PIXEL 0x1924 +#define NV9097_SET_VIEWPORT_PIXEL_CENTER 0:0 +#define NV9097_SET_VIEWPORT_PIXEL_CENTER_AT_HALF_INTEGERS 0x00000000 +#define NV9097_SET_VIEWPORT_PIXEL_CENTER_AT_INTEGERS 0x00000001 + +#define NV9097_SET_VIEWPORT_SCALE_OFFSET 0x192c +#define NV9097_SET_VIEWPORT_SCALE_OFFSET_ENABLE 0:0 +#define NV9097_SET_VIEWPORT_SCALE_OFFSET_ENABLE_FALSE 0x00000000 +#define NV9097_SET_VIEWPORT_SCALE_OFFSET_ENABLE_TRUE 0x00000001 + +#define NV9097_INVALIDATE_CONSTANT_BUFFER_CACHE 0x1930 +#define NV9097_INVALIDATE_CONSTANT_BUFFER_CACHE_THRU_L2 0:0 +#define NV9097_INVALIDATE_CONSTANT_BUFFER_CACHE_THRU_L2_FALSE 0x00000000 +#define NV9097_INVALIDATE_CONSTANT_BUFFER_CACHE_THRU_L2_TRUE 0x00000001 + +#define NV9097_SET_VIEWPORT_CLIP_CONTROL 0x193c +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE 0:0 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_FALSE 0x00000000 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_TRUE 0x00000001 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z 3:3 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLIP 0x00000000 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLAMP 0x00000001 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z 4:4 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLIP 0x00000000 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLAMP 0x00000001 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND 7:7 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_256 0x00000000 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_1 0x00000001 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND 10:10 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_256 0x00000000 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_1 0x00000001 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP 13:11 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP 0x00000000 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_PASSTHRU 0x00000001 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XY_CLIP 0x00000002 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XYZ_CLIP 0x00000003 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP_NO_Z_CULL 0x00000004 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_Z_CLIP 0x00000005 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z 2:1 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SAME_AS_XY_GUARDBAND 0x00000000 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_256 0x00000001 +#define NV9097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_1 0x00000002 + +#define NV9097_SET_USER_CLIP_OP 0x1940 +#define NV9097_SET_USER_CLIP_OP_PLANE0 0:0 +#define NV9097_SET_USER_CLIP_OP_PLANE0_CLIP 0x00000000 +#define NV9097_SET_USER_CLIP_OP_PLANE0_CULL 0x00000001 +#define NV9097_SET_USER_CLIP_OP_PLANE1 4:4 +#define NV9097_SET_USER_CLIP_OP_PLANE1_CLIP 0x00000000 +#define NV9097_SET_USER_CLIP_OP_PLANE1_CULL 0x00000001 +#define NV9097_SET_USER_CLIP_OP_PLANE2 8:8 +#define NV9097_SET_USER_CLIP_OP_PLANE2_CLIP 0x00000000 +#define NV9097_SET_USER_CLIP_OP_PLANE2_CULL 0x00000001 +#define NV9097_SET_USER_CLIP_OP_PLANE3 12:12 +#define NV9097_SET_USER_CLIP_OP_PLANE3_CLIP 0x00000000 +#define NV9097_SET_USER_CLIP_OP_PLANE3_CULL 0x00000001 +#define NV9097_SET_USER_CLIP_OP_PLANE4 16:16 +#define NV9097_SET_USER_CLIP_OP_PLANE4_CLIP 0x00000000 +#define NV9097_SET_USER_CLIP_OP_PLANE4_CULL 0x00000001 +#define NV9097_SET_USER_CLIP_OP_PLANE5 20:20 +#define NV9097_SET_USER_CLIP_OP_PLANE5_CLIP 0x00000000 +#define NV9097_SET_USER_CLIP_OP_PLANE5_CULL 0x00000001 +#define NV9097_SET_USER_CLIP_OP_PLANE6 24:24 +#define NV9097_SET_USER_CLIP_OP_PLANE6_CLIP 0x00000000 +#define NV9097_SET_USER_CLIP_OP_PLANE6_CULL 0x00000001 +#define NV9097_SET_USER_CLIP_OP_PLANE7 28:28 +#define NV9097_SET_USER_CLIP_OP_PLANE7_CLIP 0x00000000 +#define NV9097_SET_USER_CLIP_OP_PLANE7_CULL 0x00000001 + +#define NV9097_SET_RENDER_ENABLE_OVERRIDE 0x1944 +#define NV9097_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NV9097_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NV9097_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NV9097_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NV9097_SET_PRIMITIVE_TOPOLOGY_CONTROL 0x1948 +#define NV9097_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE 0:0 +#define NV9097_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_TOPOLOGY_IN_BEGIN_METHODS 0x00000000 +#define NV9097_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_SEPARATE_TOPOLOGY_STATE 0x00000001 + +#define NV9097_SET_WINDOW_CLIP_ENABLE 0x194c +#define NV9097_SET_WINDOW_CLIP_ENABLE_V 0:0 +#define NV9097_SET_WINDOW_CLIP_ENABLE_V_FALSE 0x00000000 +#define NV9097_SET_WINDOW_CLIP_ENABLE_V_TRUE 0x00000001 + +#define NV9097_SET_WINDOW_CLIP_TYPE 0x1950 +#define NV9097_SET_WINDOW_CLIP_TYPE_V 1:0 +#define NV9097_SET_WINDOW_CLIP_TYPE_V_INCLUSIVE 0x00000000 +#define NV9097_SET_WINDOW_CLIP_TYPE_V_EXCLUSIVE 0x00000001 +#define NV9097_SET_WINDOW_CLIP_TYPE_V_CLIPALL 0x00000002 + +#define NV9097_INVALIDATE_ZCULL 0x1958 +#define NV9097_INVALIDATE_ZCULL_V 31:0 +#define NV9097_INVALIDATE_ZCULL_V_INVALIDATE 0x00000000 + +#define NV9097_SET_ZCULL 0x1968 +#define NV9097_SET_ZCULL_Z_ENABLE 0:0 +#define NV9097_SET_ZCULL_Z_ENABLE_FALSE 0x00000000 +#define NV9097_SET_ZCULL_Z_ENABLE_TRUE 0x00000001 +#define NV9097_SET_ZCULL_STENCIL_ENABLE 4:4 +#define NV9097_SET_ZCULL_STENCIL_ENABLE_FALSE 0x00000000 +#define NV9097_SET_ZCULL_STENCIL_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_ZCULL_BOUNDS 0x196c +#define NV9097_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE 0:0 +#define NV9097_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NV9097_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_TRUE 0x00000001 +#define NV9097_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE 4:4 +#define NV9097_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NV9097_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_PRIMITIVE_TOPOLOGY 0x1970 +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V 15:0 +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_POINTLIST 0x00000001 +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_LINELIST 0x00000002 +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP 0x00000003 +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST 0x00000004 +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP 0x00000005 +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_LINELIST_ADJCY 0x0000000A +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP_ADJCY 0x0000000B +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST_ADJCY 0x0000000C +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP_ADJCY 0x0000000D +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_PATCHLIST 0x0000000E +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_POINTS 0x00001001 +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST 0x00001002 +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST 0x00001003 +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST 0x0000100F +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINESTRIP 0x00001010 +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINESTRIP 0x00001011 +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLELIST 0x00001012 +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLESTRIP 0x00001013 +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLESTRIP 0x00001014 +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN 0x00001015 +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLEFAN 0x00001016 +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN_IMM 0x00001017 +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST_IMM 0x00001018 +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST2 0x0000101A +#define NV9097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST2 0x0000101B + +#define NV9097_ZCULL_SYNC 0x1978 +#define NV9097_ZCULL_SYNC_V 31:0 + +#define NV9097_SET_CLIP_ID_TEST 0x197c +#define NV9097_SET_CLIP_ID_TEST_ENABLE 0:0 +#define NV9097_SET_CLIP_ID_TEST_ENABLE_FALSE 0x00000000 +#define NV9097_SET_CLIP_ID_TEST_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_SURFACE_CLIP_ID_WIDTH 0x1980 +#define NV9097_SET_SURFACE_CLIP_ID_WIDTH_V 31:0 + +#define NV9097_SET_CLIP_ID 0x1984 +#define NV9097_SET_CLIP_ID_V 31:0 + +#define NV9097_SET_DEPTH_BOUNDS_TEST 0x19bc +#define NV9097_SET_DEPTH_BOUNDS_TEST_ENABLE 0:0 +#define NV9097_SET_DEPTH_BOUNDS_TEST_ENABLE_FALSE 0x00000000 +#define NV9097_SET_DEPTH_BOUNDS_TEST_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_BLEND_FLOAT_OPTION 0x19c0 +#define NV9097_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO 0:0 +#define NV9097_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_FALSE 0x00000000 +#define NV9097_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_TRUE 0x00000001 + +#define NV9097_SET_LOGIC_OP 0x19c4 +#define NV9097_SET_LOGIC_OP_ENABLE 0:0 +#define NV9097_SET_LOGIC_OP_ENABLE_FALSE 0x00000000 +#define NV9097_SET_LOGIC_OP_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_LOGIC_OP_FUNC 0x19c8 +#define NV9097_SET_LOGIC_OP_FUNC_V 31:0 +#define NV9097_SET_LOGIC_OP_FUNC_V_CLEAR 0x00001500 +#define NV9097_SET_LOGIC_OP_FUNC_V_AND 0x00001501 +#define NV9097_SET_LOGIC_OP_FUNC_V_AND_REVERSE 0x00001502 +#define NV9097_SET_LOGIC_OP_FUNC_V_COPY 0x00001503 +#define NV9097_SET_LOGIC_OP_FUNC_V_AND_INVERTED 0x00001504 +#define NV9097_SET_LOGIC_OP_FUNC_V_NOOP 0x00001505 +#define NV9097_SET_LOGIC_OP_FUNC_V_XOR 0x00001506 +#define NV9097_SET_LOGIC_OP_FUNC_V_OR 0x00001507 +#define NV9097_SET_LOGIC_OP_FUNC_V_NOR 0x00001508 +#define NV9097_SET_LOGIC_OP_FUNC_V_EQUIV 0x00001509 +#define NV9097_SET_LOGIC_OP_FUNC_V_INVERT 0x0000150A +#define NV9097_SET_LOGIC_OP_FUNC_V_OR_REVERSE 0x0000150B +#define NV9097_SET_LOGIC_OP_FUNC_V_COPY_INVERTED 0x0000150C +#define NV9097_SET_LOGIC_OP_FUNC_V_OR_INVERTED 0x0000150D +#define NV9097_SET_LOGIC_OP_FUNC_V_NAND 0x0000150E +#define NV9097_SET_LOGIC_OP_FUNC_V_SET 0x0000150F + +#define NV9097_SET_Z_COMPRESSION 0x19cc +#define NV9097_SET_Z_COMPRESSION_ENABLE 0:0 +#define NV9097_SET_Z_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NV9097_SET_Z_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NV9097_CLEAR_SURFACE 0x19d0 +#define NV9097_CLEAR_SURFACE_Z_ENABLE 0:0 +#define NV9097_CLEAR_SURFACE_Z_ENABLE_FALSE 0x00000000 +#define NV9097_CLEAR_SURFACE_Z_ENABLE_TRUE 0x00000001 +#define NV9097_CLEAR_SURFACE_STENCIL_ENABLE 1:1 +#define NV9097_CLEAR_SURFACE_STENCIL_ENABLE_FALSE 0x00000000 +#define NV9097_CLEAR_SURFACE_STENCIL_ENABLE_TRUE 0x00000001 +#define NV9097_CLEAR_SURFACE_R_ENABLE 2:2 +#define NV9097_CLEAR_SURFACE_R_ENABLE_FALSE 0x00000000 +#define NV9097_CLEAR_SURFACE_R_ENABLE_TRUE 0x00000001 +#define NV9097_CLEAR_SURFACE_G_ENABLE 3:3 +#define NV9097_CLEAR_SURFACE_G_ENABLE_FALSE 0x00000000 +#define NV9097_CLEAR_SURFACE_G_ENABLE_TRUE 0x00000001 +#define NV9097_CLEAR_SURFACE_B_ENABLE 4:4 +#define NV9097_CLEAR_SURFACE_B_ENABLE_FALSE 0x00000000 +#define NV9097_CLEAR_SURFACE_B_ENABLE_TRUE 0x00000001 +#define NV9097_CLEAR_SURFACE_A_ENABLE 5:5 +#define NV9097_CLEAR_SURFACE_A_ENABLE_FALSE 0x00000000 +#define NV9097_CLEAR_SURFACE_A_ENABLE_TRUE 0x00000001 +#define NV9097_CLEAR_SURFACE_MRT_SELECT 9:6 +#define NV9097_CLEAR_SURFACE_RT_ARRAY_INDEX 25:10 + +#define NV9097_CLEAR_CLIP_ID_SURFACE 0x19d4 +#define NV9097_CLEAR_CLIP_ID_SURFACE_V 31:0 + +#define NV9097_SET_COLOR_COMPRESSION(i) (0x19e0+(i)*4) +#define NV9097_SET_COLOR_COMPRESSION_ENABLE 0:0 +#define NV9097_SET_COLOR_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NV9097_SET_COLOR_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_CT_WRITE(i) (0x1a00+(i)*4) +#define NV9097_SET_CT_WRITE_R_ENABLE 0:0 +#define NV9097_SET_CT_WRITE_R_ENABLE_FALSE 0x00000000 +#define NV9097_SET_CT_WRITE_R_ENABLE_TRUE 0x00000001 +#define NV9097_SET_CT_WRITE_G_ENABLE 4:4 +#define NV9097_SET_CT_WRITE_G_ENABLE_FALSE 0x00000000 +#define NV9097_SET_CT_WRITE_G_ENABLE_TRUE 0x00000001 +#define NV9097_SET_CT_WRITE_B_ENABLE 8:8 +#define NV9097_SET_CT_WRITE_B_ENABLE_FALSE 0x00000000 +#define NV9097_SET_CT_WRITE_B_ENABLE_TRUE 0x00000001 +#define NV9097_SET_CT_WRITE_A_ENABLE 12:12 +#define NV9097_SET_CT_WRITE_A_ENABLE_FALSE 0x00000000 +#define NV9097_SET_CT_WRITE_A_ENABLE_TRUE 0x00000001 + +#define NV9097_PIPE_NOP 0x1a2c +#define NV9097_PIPE_NOP_V 31:0 + +#define NV9097_SET_SPARE00 0x1a30 +#define NV9097_SET_SPARE00_V 31:0 + +#define NV9097_SET_SPARE01 0x1a34 +#define NV9097_SET_SPARE01_V 31:0 + +#define NV9097_SET_SPARE02 0x1a38 +#define NV9097_SET_SPARE02_V 31:0 + +#define NV9097_SET_SPARE03 0x1a3c +#define NV9097_SET_SPARE03_V 31:0 + +#define NV9097_SET_REPORT_SEMAPHORE_A 0x1b00 +#define NV9097_SET_REPORT_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NV9097_SET_REPORT_SEMAPHORE_B 0x1b04 +#define NV9097_SET_REPORT_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NV9097_SET_REPORT_SEMAPHORE_C 0x1b08 +#define NV9097_SET_REPORT_SEMAPHORE_C_PAYLOAD 31:0 + +#define NV9097_SET_REPORT_SEMAPHORE_D 0x1b0c +#define NV9097_SET_REPORT_SEMAPHORE_D_OPERATION 1:0 +#define NV9097_SET_REPORT_SEMAPHORE_D_OPERATION_RELEASE 0x00000000 +#define NV9097_SET_REPORT_SEMAPHORE_D_OPERATION_ACQUIRE 0x00000001 +#define NV9097_SET_REPORT_SEMAPHORE_D_OPERATION_REPORT_ONLY 0x00000002 +#define NV9097_SET_REPORT_SEMAPHORE_D_OPERATION_TRAP 0x00000003 +#define NV9097_SET_REPORT_SEMAPHORE_D_RELEASE 4:4 +#define NV9097_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_READS_COMPLETE 0x00000000 +#define NV9097_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_WRITES_COMPLETE 0x00000001 +#define NV9097_SET_REPORT_SEMAPHORE_D_ACQUIRE 8:8 +#define NV9097_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_WRITES_START 0x00000000 +#define NV9097_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_READS_START 0x00000001 +#define NV9097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION 15:12 +#define NV9097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_NONE 0x00000000 +#define NV9097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DATA_ASSEMBLER 0x00000001 +#define NV9097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VERTEX_SHADER 0x00000002 +#define NV9097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_INIT_SHADER 0x00000008 +#define NV9097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_SHADER 0x00000009 +#define NV9097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_GEOMETRY_SHADER 0x00000006 +#define NV9097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_STREAMING_OUTPUT 0x00000005 +#define NV9097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VPC 0x00000004 +#define NV9097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ZCULL 0x00000007 +#define NV9097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_PIXEL_SHADER 0x0000000A +#define NV9097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DEPTH_TEST 0x0000000C +#define NV9097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ALL 0x0000000F +#define NV9097_SET_REPORT_SEMAPHORE_D_COMPARISON 16:16 +#define NV9097_SET_REPORT_SEMAPHORE_D_COMPARISON_EQ 0x00000000 +#define NV9097_SET_REPORT_SEMAPHORE_D_COMPARISON_GE 0x00000001 +#define NV9097_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE 20:20 +#define NV9097_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_FALSE 0x00000000 +#define NV9097_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_TRUE 0x00000001 +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT 27:23 +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_NONE 0x00000000 +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_DA_VERTICES_GENERATED 0x00000001 +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_DA_PRIMITIVES_GENERATED 0x00000003 +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_VS_INVOCATIONS 0x00000005 +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_TI_INVOCATIONS 0x0000001B +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_TS_INVOCATIONS 0x0000001D +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_TS_PRIMITIVES_GENERATED 0x0000001F +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_GS_INVOCATIONS 0x00000007 +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_GS_PRIMITIVES_GENERATED 0x00000009 +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_ALPHA_BETA_CLOCKS 0x00000004 +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_VTG_PRIMITIVES_OUT 0x00000012 +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x0000001E +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_SUCCEEDED 0x0000000B +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED 0x0000000D +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000006 +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_BYTE_COUNT 0x0000001A +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_INVOCATIONS 0x0000000F +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_PRIMITIVES_GENERATED 0x00000011 +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS0 0x0000000A +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS1 0x0000000C +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS2 0x0000000E +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS3 0x00000010 +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_PS_INVOCATIONS 0x00000013 +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT 0x00000002 +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT64 0x00000015 +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_COLOR_TARGET 0x00000018 +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_ZETA_TARGET 0x00000019 +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_BOUNDING_RECTANGLE 0x0000001C +#define NV9097_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE 28:28 +#define NV9097_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NV9097_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NV9097_SET_REPORT_SEMAPHORE_D_SUB_REPORT 7:5 +#define NV9097_SET_REPORT_SEMAPHORE_D_REPORT_DWORD_NUMBER 21:21 +#define NV9097_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE 2:2 +#define NV9097_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_FALSE 0x00000000 +#define NV9097_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_TRUE 0x00000001 + +#define NV9097_SET_VERTEX_STREAM_A_FORMAT(j) (0x1c00+(j)*16) +#define NV9097_SET_VERTEX_STREAM_A_FORMAT_STRIDE 11:0 +#define NV9097_SET_VERTEX_STREAM_A_FORMAT_ENABLE 12:12 +#define NV9097_SET_VERTEX_STREAM_A_FORMAT_ENABLE_FALSE 0x00000000 +#define NV9097_SET_VERTEX_STREAM_A_FORMAT_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_VERTEX_STREAM_A_LOCATION_A(j) (0x1c04+(j)*16) +#define NV9097_SET_VERTEX_STREAM_A_LOCATION_A_OFFSET_UPPER 7:0 + +#define NV9097_SET_VERTEX_STREAM_A_LOCATION_B(j) (0x1c08+(j)*16) +#define NV9097_SET_VERTEX_STREAM_A_LOCATION_B_OFFSET_LOWER 31:0 + +#define NV9097_SET_VERTEX_STREAM_A_FREQUENCY(j) (0x1c0c+(j)*16) +#define NV9097_SET_VERTEX_STREAM_A_FREQUENCY_V 31:0 + +#define NV9097_SET_VERTEX_STREAM_B_FORMAT(j) (0x1d00+(j)*16) +#define NV9097_SET_VERTEX_STREAM_B_FORMAT_STRIDE 11:0 +#define NV9097_SET_VERTEX_STREAM_B_FORMAT_ENABLE 12:12 +#define NV9097_SET_VERTEX_STREAM_B_FORMAT_ENABLE_FALSE 0x00000000 +#define NV9097_SET_VERTEX_STREAM_B_FORMAT_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_VERTEX_STREAM_B_LOCATION_A(j) (0x1d04+(j)*16) +#define NV9097_SET_VERTEX_STREAM_B_LOCATION_A_OFFSET_UPPER 7:0 + +#define NV9097_SET_VERTEX_STREAM_B_LOCATION_B(j) (0x1d08+(j)*16) +#define NV9097_SET_VERTEX_STREAM_B_LOCATION_B_OFFSET_LOWER 31:0 + +#define NV9097_SET_VERTEX_STREAM_B_FREQUENCY(j) (0x1d0c+(j)*16) +#define NV9097_SET_VERTEX_STREAM_B_FREQUENCY_V 31:0 + +#define NV9097_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA(j) (0x1e00+(j)*32) +#define NV9097_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NV9097_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NV9097_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_BLEND_PER_TARGET_COLOR_OP(j) (0x1e04+(j)*32) +#define NV9097_SET_BLEND_PER_TARGET_COLOR_OP_V 31:0 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NV9097_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NV9097_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MIN 0x00008007 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MAX 0x00008008 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_ADD 0x00000001 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MIN 0x00000004 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF(j) (0x1e08+(j)*32) +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V 31:0 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF(j) (0x1e0c+(j)*32) +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V 31:0 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NV9097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_OP(j) (0x1e10+(j)*32) +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_OP_V 31:0 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF(j) (0x1e14+(j)*32) +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V 31:0 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF(j) (0x1e18+(j)*32) +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V 31:0 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NV9097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NV9097_SET_VERTEX_STREAM_LIMIT_A_A(j) (0x1f00+(j)*8) +#define NV9097_SET_VERTEX_STREAM_LIMIT_A_A_UPPER 7:0 + +#define NV9097_SET_VERTEX_STREAM_LIMIT_A_B(j) (0x1f04+(j)*8) +#define NV9097_SET_VERTEX_STREAM_LIMIT_A_B_LOWER 31:0 + +#define NV9097_SET_VERTEX_STREAM_LIMIT_B_A(j) (0x1f80+(j)*8) +#define NV9097_SET_VERTEX_STREAM_LIMIT_B_A_UPPER 7:0 + +#define NV9097_SET_VERTEX_STREAM_LIMIT_B_B(j) (0x1f84+(j)*8) +#define NV9097_SET_VERTEX_STREAM_LIMIT_B_B_LOWER 31:0 + +#define NV9097_SET_PIPELINE_SHADER(j) (0x2000+(j)*64) +#define NV9097_SET_PIPELINE_SHADER_ENABLE 0:0 +#define NV9097_SET_PIPELINE_SHADER_ENABLE_FALSE 0x00000000 +#define NV9097_SET_PIPELINE_SHADER_ENABLE_TRUE 0x00000001 +#define NV9097_SET_PIPELINE_SHADER_TYPE 7:4 +#define NV9097_SET_PIPELINE_SHADER_TYPE_VERTEX_CULL_BEFORE_FETCH 0x00000000 +#define NV9097_SET_PIPELINE_SHADER_TYPE_VERTEX 0x00000001 +#define NV9097_SET_PIPELINE_SHADER_TYPE_TESSELLATION_INIT 0x00000002 +#define NV9097_SET_PIPELINE_SHADER_TYPE_TESSELLATION 0x00000003 +#define NV9097_SET_PIPELINE_SHADER_TYPE_GEOMETRY 0x00000004 +#define NV9097_SET_PIPELINE_SHADER_TYPE_PIXEL 0x00000005 + +#define NV9097_SET_PIPELINE_PROGRAM(j) (0x2004+(j)*64) +#define NV9097_SET_PIPELINE_PROGRAM_OFFSET 31:0 + +#define NV9097_SET_PIPELINE_RESERVED_A(j) (0x2008+(j)*64) +#define NV9097_SET_PIPELINE_RESERVED_A_V 0:0 + +#define NV9097_SET_PIPELINE_REGISTER_COUNT(j) (0x200c+(j)*64) +#define NV9097_SET_PIPELINE_REGISTER_COUNT_V 7:0 + +#define NV9097_SET_PIPELINE_BINDING(j) (0x2010+(j)*64) +#define NV9097_SET_PIPELINE_BINDING_GROUP 2:0 + +#define NV9097_SET_PIPELINE_RESERVED_B(j) (0x2014+(j)*64) +#define NV9097_SET_PIPELINE_RESERVED_B_V 0:0 + +#define NV9097_SET_PIPELINE_RESERVED_C(j) (0x2018+(j)*64) +#define NV9097_SET_PIPELINE_RESERVED_C_V 0:0 + +#define NV9097_SET_PIPELINE_RESERVED_D(j) (0x201c+(j)*64) +#define NV9097_SET_PIPELINE_RESERVED_D_V 0:0 + +#define NV9097_SET_PIPELINE_RESERVED_E(j) (0x2020+(j)*64) +#define NV9097_SET_PIPELINE_RESERVED_E_V 0:0 + +#define NV9097_SET_BINDING_CONTROL_TEXTURE(j) (0x2200+(j)*16) +#define NV9097_SET_BINDING_CONTROL_TEXTURE_MAX_ACTIVE_SAMPLERS 3:0 +#define NV9097_SET_BINDING_CONTROL_TEXTURE_MAX_ACTIVE_SAMPLERS__1 0x00000000 +#define NV9097_SET_BINDING_CONTROL_TEXTURE_MAX_ACTIVE_SAMPLERS__2 0x00000001 +#define NV9097_SET_BINDING_CONTROL_TEXTURE_MAX_ACTIVE_SAMPLERS__4 0x00000002 +#define NV9097_SET_BINDING_CONTROL_TEXTURE_MAX_ACTIVE_SAMPLERS__8 0x00000003 +#define NV9097_SET_BINDING_CONTROL_TEXTURE_MAX_ACTIVE_SAMPLERS__16 0x00000004 +#define NV9097_SET_BINDING_CONTROL_TEXTURE_MAX_ACTIVE_HEADERS 7:4 +#define NV9097_SET_BINDING_CONTROL_TEXTURE_MAX_ACTIVE_HEADERS__1 0x00000000 +#define NV9097_SET_BINDING_CONTROL_TEXTURE_MAX_ACTIVE_HEADERS__2 0x00000001 +#define NV9097_SET_BINDING_CONTROL_TEXTURE_MAX_ACTIVE_HEADERS__4 0x00000002 +#define NV9097_SET_BINDING_CONTROL_TEXTURE_MAX_ACTIVE_HEADERS__8 0x00000003 +#define NV9097_SET_BINDING_CONTROL_TEXTURE_MAX_ACTIVE_HEADERS__16 0x00000004 +#define NV9097_SET_BINDING_CONTROL_TEXTURE_MAX_ACTIVE_HEADERS__32 0x00000005 +#define NV9097_SET_BINDING_CONTROL_TEXTURE_MAX_ACTIVE_HEADERS__64 0x00000006 +#define NV9097_SET_BINDING_CONTROL_TEXTURE_MAX_ACTIVE_HEADERS__128 0x00000007 + +#define NV9097_SET_BINDING_CONTROL_RESERVED_A(j) (0x2204+(j)*16) +#define NV9097_SET_BINDING_CONTROL_RESERVED_A_V 0:0 + +#define NV9097_SET_BINDING_CONTROL_RESERVED_B(j) (0x2208+(j)*16) +#define NV9097_SET_BINDING_CONTROL_RESERVED_B_V 0:0 + +#define NV9097_SET_FALCON00 0x2300 +#define NV9097_SET_FALCON00_V 31:0 + +#define NV9097_SET_FALCON01 0x2304 +#define NV9097_SET_FALCON01_V 31:0 + +#define NV9097_SET_FALCON02 0x2308 +#define NV9097_SET_FALCON02_V 31:0 + +#define NV9097_SET_FALCON03 0x230c +#define NV9097_SET_FALCON03_V 31:0 + +#define NV9097_SET_FALCON04 0x2310 +#define NV9097_SET_FALCON04_V 31:0 + +#define NV9097_SET_FALCON05 0x2314 +#define NV9097_SET_FALCON05_V 31:0 + +#define NV9097_SET_FALCON06 0x2318 +#define NV9097_SET_FALCON06_V 31:0 + +#define NV9097_SET_FALCON07 0x231c +#define NV9097_SET_FALCON07_V 31:0 + +#define NV9097_SET_FALCON08 0x2320 +#define NV9097_SET_FALCON08_V 31:0 + +#define NV9097_SET_FALCON09 0x2324 +#define NV9097_SET_FALCON09_V 31:0 + +#define NV9097_SET_FALCON10 0x2328 +#define NV9097_SET_FALCON10_V 31:0 + +#define NV9097_SET_FALCON11 0x232c +#define NV9097_SET_FALCON11_V 31:0 + +#define NV9097_SET_FALCON12 0x2330 +#define NV9097_SET_FALCON12_V 31:0 + +#define NV9097_SET_FALCON13 0x2334 +#define NV9097_SET_FALCON13_V 31:0 + +#define NV9097_SET_FALCON14 0x2338 +#define NV9097_SET_FALCON14_V 31:0 + +#define NV9097_SET_FALCON15 0x233c +#define NV9097_SET_FALCON15_V 31:0 + +#define NV9097_SET_FALCON16 0x2340 +#define NV9097_SET_FALCON16_V 31:0 + +#define NV9097_SET_FALCON17 0x2344 +#define NV9097_SET_FALCON17_V 31:0 + +#define NV9097_SET_FALCON18 0x2348 +#define NV9097_SET_FALCON18_V 31:0 + +#define NV9097_SET_FALCON19 0x234c +#define NV9097_SET_FALCON19_V 31:0 + +#define NV9097_SET_FALCON20 0x2350 +#define NV9097_SET_FALCON20_V 31:0 + +#define NV9097_SET_FALCON21 0x2354 +#define NV9097_SET_FALCON21_V 31:0 + +#define NV9097_SET_FALCON22 0x2358 +#define NV9097_SET_FALCON22_V 31:0 + +#define NV9097_SET_FALCON23 0x235c +#define NV9097_SET_FALCON23_V 31:0 + +#define NV9097_SET_FALCON24 0x2360 +#define NV9097_SET_FALCON24_V 31:0 + +#define NV9097_SET_FALCON25 0x2364 +#define NV9097_SET_FALCON25_V 31:0 + +#define NV9097_SET_FALCON26 0x2368 +#define NV9097_SET_FALCON26_V 31:0 + +#define NV9097_SET_FALCON27 0x236c +#define NV9097_SET_FALCON27_V 31:0 + +#define NV9097_SET_FALCON28 0x2370 +#define NV9097_SET_FALCON28_V 31:0 + +#define NV9097_SET_FALCON29 0x2374 +#define NV9097_SET_FALCON29_V 31:0 + +#define NV9097_SET_FALCON30 0x2378 +#define NV9097_SET_FALCON30_V 31:0 + +#define NV9097_SET_FALCON31 0x237c +#define NV9097_SET_FALCON31_V 31:0 + +#define NV9097_SET_CONSTANT_BUFFER_SELECTOR_A 0x2380 +#define NV9097_SET_CONSTANT_BUFFER_SELECTOR_A_SIZE 16:0 + +#define NV9097_SET_CONSTANT_BUFFER_SELECTOR_B 0x2384 +#define NV9097_SET_CONSTANT_BUFFER_SELECTOR_B_ADDRESS_UPPER 7:0 + +#define NV9097_SET_CONSTANT_BUFFER_SELECTOR_C 0x2388 +#define NV9097_SET_CONSTANT_BUFFER_SELECTOR_C_ADDRESS_LOWER 31:0 + +#define NV9097_LOAD_CONSTANT_BUFFER_OFFSET 0x238c +#define NV9097_LOAD_CONSTANT_BUFFER_OFFSET_V 15:0 + +#define NV9097_LOAD_CONSTANT_BUFFER(i) (0x2390+(i)*4) +#define NV9097_LOAD_CONSTANT_BUFFER_V 31:0 + +#define NV9097_BIND_GROUP_TEXTURE_SAMPLER(j) (0x2400+(j)*32) +#define NV9097_BIND_GROUP_TEXTURE_SAMPLER_VALID 0:0 +#define NV9097_BIND_GROUP_TEXTURE_SAMPLER_VALID_FALSE 0x00000000 +#define NV9097_BIND_GROUP_TEXTURE_SAMPLER_VALID_TRUE 0x00000001 +#define NV9097_BIND_GROUP_TEXTURE_SAMPLER_SAMPLER_SLOT 11:4 +#define NV9097_BIND_GROUP_TEXTURE_SAMPLER_INDEX 24:12 + +#define NV9097_BIND_GROUP_TEXTURE_HEADER(j) (0x2404+(j)*32) +#define NV9097_BIND_GROUP_TEXTURE_HEADER_VALID 0:0 +#define NV9097_BIND_GROUP_TEXTURE_HEADER_VALID_FALSE 0x00000000 +#define NV9097_BIND_GROUP_TEXTURE_HEADER_VALID_TRUE 0x00000001 +#define NV9097_BIND_GROUP_TEXTURE_HEADER_TEXTURE_SLOT 8:1 +#define NV9097_BIND_GROUP_TEXTURE_HEADER_INDEX 30:9 + +#define NV9097_BIND_GROUP_EXTRA_TEXTURE_SAMPLER(j) (0x2408+(j)*32) +#define NV9097_BIND_GROUP_EXTRA_TEXTURE_SAMPLER_VALID 0:0 +#define NV9097_BIND_GROUP_EXTRA_TEXTURE_SAMPLER_VALID_FALSE 0x00000000 +#define NV9097_BIND_GROUP_EXTRA_TEXTURE_SAMPLER_VALID_TRUE 0x00000001 +#define NV9097_BIND_GROUP_EXTRA_TEXTURE_SAMPLER_SAMPLER_SLOT 11:4 +#define NV9097_BIND_GROUP_EXTRA_TEXTURE_SAMPLER_INDEX 24:12 + +#define NV9097_BIND_GROUP_EXTRA_TEXTURE_HEADER(j) (0x240c+(j)*32) +#define NV9097_BIND_GROUP_EXTRA_TEXTURE_HEADER_VALID 0:0 +#define NV9097_BIND_GROUP_EXTRA_TEXTURE_HEADER_VALID_FALSE 0x00000000 +#define NV9097_BIND_GROUP_EXTRA_TEXTURE_HEADER_VALID_TRUE 0x00000001 +#define NV9097_BIND_GROUP_EXTRA_TEXTURE_HEADER_TEXTURE_SLOT 8:1 +#define NV9097_BIND_GROUP_EXTRA_TEXTURE_HEADER_INDEX 30:9 + +#define NV9097_BIND_GROUP_CONSTANT_BUFFER(j) (0x2410+(j)*32) +#define NV9097_BIND_GROUP_CONSTANT_BUFFER_VALID 0:0 +#define NV9097_BIND_GROUP_CONSTANT_BUFFER_VALID_FALSE 0x00000000 +#define NV9097_BIND_GROUP_CONSTANT_BUFFER_VALID_TRUE 0x00000001 +#define NV9097_BIND_GROUP_CONSTANT_BUFFER_SHADER_SLOT 8:4 + +#define NV9097_RESERVED_GROUP_B_RESERVED_A(j) (0x2500+(j)*32) +#define NV9097_RESERVED_GROUP_B_RESERVED_A_V 0:0 + +#define NV9097_RESERVED_GROUP_B_RESERVED_B(j) (0x2504+(j)*32) +#define NV9097_RESERVED_GROUP_B_RESERVED_B_V 0:0 + +#define NV9097_RESERVED_GROUP_B_RESERVED_C(j) (0x2508+(j)*32) +#define NV9097_RESERVED_GROUP_B_RESERVED_C_V 0:0 + +#define NV9097_RESERVED_GROUP_B_RESERVED_D(j) (0x250c+(j)*32) +#define NV9097_RESERVED_GROUP_B_RESERVED_D_V 0:0 + +#define NV9097_RESERVED_GROUP_B_RESERVED_E(j) (0x2510+(j)*32) +#define NV9097_RESERVED_GROUP_B_RESERVED_E_V 0:0 + +#define NV9097_SET_COLOR_CLAMP 0x2600 +#define NV9097_SET_COLOR_CLAMP_ENABLE 0:0 +#define NV9097_SET_COLOR_CLAMP_ENABLE_FALSE 0x00000000 +#define NV9097_SET_COLOR_CLAMP_ENABLE_TRUE 0x00000001 + +#define NV9097_SET_SU_LD_ST_TARGET_A(j) (0x2700+(j)*32) +#define NV9097_SET_SU_LD_ST_TARGET_A_OFFSET_UPPER 7:0 + +#define NV9097_SET_SU_LD_ST_TARGET_B(j) (0x2704+(j)*32) +#define NV9097_SET_SU_LD_ST_TARGET_B_OFFSET_LOWER 31:0 + +#define NV9097_SET_SU_LD_ST_TARGET_C(j) (0x2708+(j)*32) +#define NV9097_SET_SU_LD_ST_TARGET_C_WIDTH 31:0 + +#define NV9097_SET_SU_LD_ST_TARGET_D(j) (0x270c+(j)*32) +#define NV9097_SET_SU_LD_ST_TARGET_D_HEIGHT 16:0 +#define NV9097_SET_SU_LD_ST_TARGET_D_LAYOUT_IN_MEMORY 20:20 +#define NV9097_SET_SU_LD_ST_TARGET_D_LAYOUT_IN_MEMORY_BLOCKLINEAR 0x00000000 +#define NV9097_SET_SU_LD_ST_TARGET_D_LAYOUT_IN_MEMORY_PITCH 0x00000001 + +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT(j) (0x2710+(j)*32) +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_TYPE 0:0 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_TYPE_COLOR 0x00000000 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_TYPE_ZETA 0x00000001 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR 11:4 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_DISABLED 0x00000000 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RF32_GF32_BF32_AF32 0x000000C0 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RS32_GS32_BS32_AS32 0x000000C1 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RU32_GU32_BU32_AU32 0x000000C2 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RF32_GF32_BF32_X32 0x000000C3 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RS32_GS32_BS32_X32 0x000000C4 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RU32_GU32_BU32_X32 0x000000C5 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_R16_G16_B16_A16 0x000000C6 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RN16_GN16_BN16_AN16 0x000000C7 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RS16_GS16_BS16_AS16 0x000000C8 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RU16_GU16_BU16_AU16 0x000000C9 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RF16_GF16_BF16_AF16 0x000000CA +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RF32_GF32 0x000000CB +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RS32_GS32 0x000000CC +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RU32_GU32 0x000000CD +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RF16_GF16_BF16_X16 0x000000CE +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_A8R8G8B8 0x000000CF +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_A8RL8GL8BL8 0x000000D0 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_A2B10G10R10 0x000000D1 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_AU2BU10GU10RU10 0x000000D2 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_A8B8G8R8 0x000000D5 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_A8BL8GL8RL8 0x000000D6 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_AN8BN8GN8RN8 0x000000D7 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_AS8BS8GS8RS8 0x000000D8 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_AU8BU8GU8RU8 0x000000D9 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_R16_G16 0x000000DA +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RN16_GN16 0x000000DB +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RS16_GS16 0x000000DC +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RU16_GU16 0x000000DD +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RF16_GF16 0x000000DE +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_A2R10G10B10 0x000000DF +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_BF10GF11RF11 0x000000E0 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RS32 0x000000E3 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RU32 0x000000E4 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RF32 0x000000E5 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_X8R8G8B8 0x000000E6 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_X8RL8GL8BL8 0x000000E7 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_R5G6B5 0x000000E8 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_A1R5G5B5 0x000000E9 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_G8R8 0x000000EA +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_GN8RN8 0x000000EB +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_GS8RS8 0x000000EC +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_GU8RU8 0x000000ED +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_R16 0x000000EE +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RN16 0x000000EF +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RS16 0x000000F0 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RU16 0x000000F1 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RF16 0x000000F2 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_R8 0x000000F3 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RN8 0x000000F4 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RS8 0x000000F5 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RU8 0x000000F6 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_A8 0x000000F7 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_X1R5G5B5 0x000000F8 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_X8B8G8R8 0x000000F9 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_X8BL8GL8RL8 0x000000FA +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_Z1R5G5B5 0x000000FB +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_O1R5G5B5 0x000000FC +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_Z8R8G8B8 0x000000FD +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_O8R8G8B8 0x000000FE +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_R32 0x000000FF +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_A16 0x00000040 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_AF16 0x00000041 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_AF32 0x00000042 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_A8R8 0x00000043 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_R16_A16 0x00000044 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RF16_AF16 0x00000045 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_COLOR_RF32_AF32 0x00000046 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_ZETA 16:12 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_ZETA_Z16 0x00000013 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_ZETA_Z24S8 0x00000014 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_ZETA_X8Z24 0x00000015 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_ZETA_S8Z24 0x00000016 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_ZETA_V8Z24 0x00000018 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_ZETA_ZF32 0x0000000A +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_ZETA_ZF32_X24S8 0x00000019 +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_ZETA_X8Z24_X16V8S8 0x0000001D +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_ZETA_ZF32_X16V8X8 0x0000001E +#define NV9097_SET_SU_LD_ST_TARGET_FORMAT_ZETA_ZF32_X16V8S8 0x0000001F + +#define NV9097_SET_SU_LD_ST_TARGET_BLOCK_SIZE(j) (0x2714+(j)*32) +#define NV9097_SET_SU_LD_ST_TARGET_BLOCK_SIZE_WIDTH 3:0 +#define NV9097_SET_SU_LD_ST_TARGET_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NV9097_SET_SU_LD_ST_TARGET_BLOCK_SIZE_HEIGHT 7:4 +#define NV9097_SET_SU_LD_ST_TARGET_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NV9097_SET_SU_LD_ST_TARGET_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NV9097_SET_SU_LD_ST_TARGET_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NV9097_SET_SU_LD_ST_TARGET_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NV9097_SET_SU_LD_ST_TARGET_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NV9097_SET_SU_LD_ST_TARGET_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 + +#define NV9097_SET_STREAM_OUT_LAYOUT_SELECT(i,j) (0x2800+(i)*128+(j)*4) +#define NV9097_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER00 7:0 +#define NV9097_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER01 15:8 +#define NV9097_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER02 23:16 +#define NV9097_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER03 31:24 + +#define NV9097_SET_SHADER_PERFORMANCE_COUNTER_VALUE(i) (0x335c+(i)*4) +#define NV9097_SET_SHADER_PERFORMANCE_COUNTER_VALUE_V 31:0 + +#define NV9097_SET_SHADER_PERFORMANCE_COUNTER_EVENT(i) (0x337c+(i)*4) +#define NV9097_SET_SHADER_PERFORMANCE_COUNTER_EVENT_EVENT 7:0 + +#define NV9097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A(i) (0x339c+(i)*4) +#define NV9097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT0 2:0 +#define NV9097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT0 6:4 +#define NV9097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT1 10:8 +#define NV9097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT1 14:12 +#define NV9097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT2 18:16 +#define NV9097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT2 22:20 +#define NV9097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT3 26:24 +#define NV9097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT3 30:28 + +#define NV9097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B(i) (0x33bc+(i)*4) +#define NV9097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_EDGE 0:0 +#define NV9097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_FUNC 19:4 + +#define NV9097_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL 0x33dc +#define NV9097_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL_MASK 7:0 + +#define NV9097_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NV9097_SET_MME_SHADOW_SCRATCH_V 31:0 + +#define NV9097_CALL_MME_MACRO(j) (0x3800+(j)*8) +#define NV9097_CALL_MME_MACRO_V 31:0 + +#define NV9097_CALL_MME_DATA(j) (0x3804+(j)*8) +#define NV9097_CALL_MME_DATA_V 31:0 + +#endif /* _cl_fermi_a_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl90cd.h b/src/common/sdk/nvidia/inc/class/cl90cd.h new file mode 100644 index 0000000..4c732eb --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl90cd.h @@ -0,0 +1,237 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * NV_EVENT_BUFFER + * An event buffer is shared between user (RO) and kernel(RW). + * It holds debug/profile event data provided by the kernel. + * + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl90cd.finn +// + +#define NV_EVENT_BUFFER (0x90cdU) /* finn: Evaluated from "NV_EVENT_BUFFER_ALLOC_PARAMETERS_MESSAGE_ID" */ + +/* + * NV_EVENT_BUFFER_HEADER + * This structure holds the get and put values used to index/consume event buffer. + * Along with other RO data shared with the user. + * + * recordGet/Put: These "pointers" work in the traditional sense: + * - when GET==PUT, the fifo is empty + * - when GET==PUT+1, the fifo is full + * This implies a full fifo always has one "wasted" element. + * + * recordCount: This is the total number of records added to the buffer by the kernel + * This information is filled out when the buffer is setup to keep newest records. + * recordCount = number of records currently in the buffer + overflow count. + * + * recordDropcount: This is the number of event records that are dropped because the + * buffer is full. + * This information is filled out when event buffer is setup to keep oldest records. + * + * vardataDropcount: Event buffer provides a dual stream of data, where the record can contain + * an optional offset to a variable length data buffer. + * This is the number of variable data records that are dropped because the + * buffer is full. + * This information is filled out when event buffer is setup to keep oldest records. + */ +typedef struct NV_EVENT_BUFFER_HEADER { + NvU32 recordGet; + NvU32 recordPut; + NV_DECLARE_ALIGNED(NvU64 recordCount, 8); + NV_DECLARE_ALIGNED(NvU64 recordDropcount, 8); + NV_DECLARE_ALIGNED(NvU64 vardataDropcount, 8); +} NV_EVENT_BUFFER_HEADER; + +/* + * NV_EVENT_BUFFER_RECORD_HEADER + * This is the header added to each event record. + * This helps identify the event type and variable length data is associated with it. + */ +typedef struct NV_EVENT_BUFFER_RECORD_HEADER { + NvU16 type; + NvU16 subtype; + NvU32 varData; // [31: 5] = (varDataOffset >> 5); 0 < vardataOffset <= vardataBufferSize + // [ 4: 1] = reserved for future use + // [ 0: 0] = isVardataStartOffsetZero +} NV_EVENT_BUFFER_RECORD_HEADER; + +/* + * NV_EVENT_BUFFER_RECORD + * This structure defines a generic event record. + * The size of this record is fixed for a given event buffer. + * It is configured by the user during allocation. + */ +typedef struct NV_EVENT_BUFFER_RECORD { + NV_EVENT_BUFFER_RECORD_HEADER recordHeader; + NV_DECLARE_ALIGNED(NvU64 inlinePayload[1], 8); // 1st element of the payload/data + // Do not add more elements here, inlinePayload can contain more than one elements +} NV_EVENT_BUFFER_RECORD; + +#define NV_EVENT_VARDATA_GRANULARITY 32 +#define NV_EVENT_VARDATA_OFFSET_MASK (~(NV_EVENT_VARDATA_GRANULARITY - 1)) +#define NV_EVENT_VARDATA_START_OFFSET_ZERO 0x01 + +/* + * NV_EVENT_BUFFER_ALLOC_PARAMETERS + * + * bufferHeader [OUT] + * This is the user VA offset pointing to the base of NV_EVENT_BUFFER_HEADER. + * + * recordBuffer [OUT] + * This is the user VA offset pointing to the base of the event record buffer. + * This buffer will contain NV_EVENT_BUFFER_RECORDs added by the kernel. + * + * recordSize [IN] + * This is the size of NV_EVENT_BUFFER_RECORD used by this buffer + * + * recordCount [IN] + * This is the number of records that recordBuffer can hold. + * + * vardataBuffer [OUT] + * This is the user VA offset pointing to the base of the variable data buffer. + * + * vardataBufferSize [IN] + * Size of the variable data buffer in bytes. + * + * recordsFreeThreshold [IN] + * This is the notification threshold for the event record buffer. + * This felid specifies the number of records that the buffer can + * still hold before it gets full. + * + * vardataFreeThreshold [IN] + * This is the notification threshold for the vardata buffer. + * This felid specifies the number of bytes that the buffer can + * still hold before it gets full. + * + * notificationHandle [IN] + * When recordsFreeThreshold or vardataFreeThreshold is met, kernel will notify + * user on this handle. If notificationHandle = NULL, event notification + * is disabled. This is an OS specific notification handle. + * It is a Windows event handle or a fd pointer on Linux. + * + * hSubDevice [IN] + * An event buffer can either hold sub-device related events or system events. + * This handle specifies the sub-device to associate this buffer with. + * If this parameter is NULL, then the buffer is tied to the client instead. + * + * flags [IN] + * Set to 0 by default. + * This field can hold any future flags to configure the buffer if needed. + * + * hBufferHeader [IN] + * The backing memory object for the buffer header. Must be a NV01_MEMORY_DEVICELESS object. + * On Windows platforms, a buffer will be internally generated if hBufferHeader is 0. + * + * hRecordBuffer [IN] + * The backing memory object for the record buffer. Must be a NV01_MEMORY_DEVICELESS object. + * On Windows platforms, a buffer will be internally generated if hRecordBuffer is 0. + * + * hVardataBuffer [IN] + * The backing memory object for the vardata buffer. Must be a NV01_MEMORY_DEVICELESS object. + * On Windows platforms, a buffer will be internally generated if hVardataBuffer is 0. + * + */ +#define NV_EVENT_BUFFER_ALLOC_PARAMETERS_MESSAGE_ID (0x90cdU) + +typedef struct NV_EVENT_BUFFER_ALLOC_PARAMETERS { + NV_DECLARE_ALIGNED(NvP64 bufferHeader, 8); + NV_DECLARE_ALIGNED(NvP64 recordBuffer, 8); + NvU32 recordSize; + NvU32 recordCount; + NV_DECLARE_ALIGNED(NvP64 vardataBuffer, 8); + NvU32 vardataBufferSize; + NvU32 recordsFreeThreshold; + NV_DECLARE_ALIGNED(NvU64 notificationHandle, 8); + NvU32 vardataFreeThreshold; + NvHandle hSubDevice; + NvU32 flags; + + NvHandle hBufferHeader; + NvHandle hRecordBuffer; + NvHandle hVardataBuffer; +} NV_EVENT_BUFFER_ALLOC_PARAMETERS; + +/* + * NV_EVENT_BUFFER_BIND + * This class is used to allocate an Event Type object bound to a given event buffer. + * This allocation call associates an event type with an event buffer. + * Multiple event types can be associated with the same buffer as long as they belong to + * the same category i.e. either sub-device or system. + * When event buffer is enabled, if an event bound to this buffer occurs, + * some relevant data gets added to it. + * cl2080.h has a list of sub-device events that can be associated with a buffer + * cl0000.h has a list of system events that can be associated with a buffer + * These defines are also used in class NV01_EVENT_OS_EVENT (0x79) to get event notification + * and class NV01_EVENT_KERNEL_CALLBACK_EX (0x7E) to get kernel callbacks. + * This class extends that support to additionally get relevant data in an event buffer + * + */ +#define NV_EVENT_BUFFER_BIND (0x0000007F) + +/* + * NV_EVENT_BUFFER_BIND_PARAMETERS + * + * bufferHandle [IN] + * Event buffer handle used to bind the given event type + * + * eventType [IN] + * This is one of the eventTypeIDs from cl2080.h/cl000.h + * e.g. NV2080_NOTIFIERS_PSTATE_CHANGE + * + * eventSubtype [IN] + * Event subtype for a given type of event. + * This field is optional depending on if an eventtype has a subtype. + * + * hClientTarget [IN] + * Handle of the target client whose events are to be bound to the given buffer + * e.g. context switch events can be tracked for a given client. + * This field is optional depending on the event type. + * e.g. pstate change events are per gpu but do not depend on a client. + * + * hSrcResource [IN] + * source resource handle for the event type + * e.g. channel handle: RC/context switch can be tracked for a given channel + * This field is optional depending on the event type. + * e.g. pstate change events are per gpu and cannot be sub-categorized + * + * KernelCallbackdata [IN] + * This field is reserved for KERNEL ONLY clients. + * + */ +typedef struct NV_EVENT_BUFFER_BIND_PARAMETERS { + NvHandle bufferHandle; + NvU16 eventType; + NvU16 eventSubtype; + NvHandle hClientTarget; + NvHandle hSrcResource; + NV_DECLARE_ALIGNED(NvP64 KernelCallbackdata, 8); +} NV_EVENT_BUFFER_BIND_PARAMETERS; + diff --git a/src/common/sdk/nvidia/inc/class/cl90cdtrace.h b/src/common/sdk/nvidia/inc/class/cl90cdtrace.h new file mode 100644 index 0000000..5460a9b --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl90cdtrace.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl90cdtrace_h_ +#define _cl90cdtrace_h_ + +#include + +/* This file defines parameters for RATS/RM tracing */ + +typedef struct NV_RATS_GSP_TRACE_RECORD_V1 +{ + NvU16 seqNo; + NvU16 gspSeqNo; + NvU32 threadId; + NvU64 info; + NvU64 timeStamp; + NvU64 recordType; +} NV_RATS_GSP_TRACE_RECORD_V1; + +typedef NV_RATS_GSP_TRACE_RECORD_V1 NV_RATS_GSP_TRACE_RECORD; + +#define VGPU_TRACING_BUFFER_KEEP_OLDEST 0 +#define VGPU_TRACING_BUFFER_KEEP_NEWEST 1 + +typedef struct NV_RATS_VGPU_GSP_TRACING_BUFFER_V1{ + NvU8 policy; + + NvBool bGuestNotifInProgress; + + NvU16 seqNo; + NvU32 bufferSize; + NvU32 bufferWatermark; + NvU32 recordCount; + + NvU64 tracepointMask; + NvU32 read; + NvU32 write; + + NvU64 lastReadTimestamp; + + NV_RATS_GSP_TRACE_RECORD *buffer; +} NV_RATS_VGPU_GSP_TRACING_BUFFER_V1; + +typedef NV_RATS_VGPU_GSP_TRACING_BUFFER_V1 NV_RATS_VGPU_GSP_TRACING_BUFFER; + +#endif diff --git a/src/common/sdk/nvidia/inc/class/cl90e7.h b/src/common/sdk/nvidia/inc/class/cl90e7.h new file mode 100644 index 0000000..3219f13 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl90e7.h @@ -0,0 +1,38 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl90e7_h_ +#define _cl90e7_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define GF100_SUBDEVICE_INFOROM (0x000090e7) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _cl90e7_h + diff --git a/src/common/sdk/nvidia/inc/class/cl90ec.h b/src/common/sdk/nvidia/inc/class/cl90ec.h new file mode 100644 index 0000000..182640a --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl90ec.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl90ec_h_ +#define _cl90ec_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* Class within the subdevice used for communicating with HDACODEC*/ +#define GF100_HDACODEC (0x000090EC) + + /* pio method data structure */ +typedef volatile struct _cl90ec_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv90ECTypedef, GF100Hdacodec; +#define NV90EC_TYPEDEF GF100Hdacodec + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl9071_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl90f1.h b/src/common/sdk/nvidia/inc/class/cl90f1.h new file mode 100644 index 0000000..7523ef1 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl90f1.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvtypes.h" + +#ifndef _cl90f1_h_ +#define _cl90f1_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define FERMI_VASPACE_A (0x000090f1) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl90f1_h + diff --git a/src/common/sdk/nvidia/inc/class/cl9170.h b/src/common/sdk/nvidia/inc/class/cl9170.h new file mode 100644 index 0000000..64e520c --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9170.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 1993-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl9170.finn +// + +#define NV9170_DISPLAY (0x9170U) /* finn: Evaluated from "NV9170_ALLOCATION_PARAMETERS_MESSAGE_ID" */ + +#define NV9170_ALLOCATION_PARAMETERS_MESSAGE_ID (0x9170U) + +typedef struct NV9170_ALLOCATION_PARAMETERS { + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numDacs; // Number of DACs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NV9170_ALLOCATION_PARAMETERS; diff --git a/src/common/sdk/nvidia/inc/class/cl9171.h b/src/common/sdk/nvidia/inc/class/cl9171.h new file mode 100644 index 0000000..731157a --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9171.h @@ -0,0 +1,317 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl9171_h_ +#define _cl9171_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV9171_DISP_SF_USER 0x9171 + +typedef volatile struct _cl9171_tag0 { + NvU32 dispSfUserOffset[0x400]; /* NV_PDISP_SF_USER 0x00690FFF:0x00690000 */ +} _Nv9171DispSfUser, Nv9171DispSfUserMap; + +#define NV9171_SF_HDMI_INFO_IDX_AVI_INFOFRAME 0x00000000 /* */ +#define NV9171_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME 0x00000001 /* */ +#define NV9171_SF_HDMI_INFO_IDX_GCP 0x00000003 /* */ +#define NV9171_SF_HDMI_INFO_IDX_VSI 0x00000004 /* */ +#define NV9171_SF_HDMI_INFO_CTRL(i,j) (0x00690000-0x00690000+(i)*1024+(j)*64) /* RWX4A */ +#define NV9171_SF_HDMI_INFO_CTRL__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_INFO_CTRL__SIZE_2 5 /* */ +#define NV9171_SF_HDMI_INFO_CTRL_ENABLE 0:0 /* RWIVF */ +#define NV9171_SF_HDMI_INFO_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_INFO_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_OTHER 4:4 /* RWIVF */ +#define NV9171_SF_HDMI_INFO_CTRL_OTHER_DIS 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_INFO_CTRL_OTHER_EN 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_SINGLE 8:8 /* RWIVF */ +#define NV9171_SF_HDMI_INFO_CTRL_SINGLE_DIS 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_INFO_CTRL_SINGLE_EN 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NV9171_SF_HDMI_INFO_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_CHKSUM_HW_EN 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_CHKSUM_HW_DIS 0x00000000 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NV9171_SF_HDMI_INFO_CTRL_HBLANK 12:12 /* RWIVF */ +#define NV9171_SF_HDMI_INFO_CTRL_HBLANK_DIS 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_INFO_CTRL_HBLANK_EN 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_VIDEO_FMT 16:16 /* RWIVF */ +#define NV9171_SF_HDMI_INFO_CTRL_VIDEO_FMT_SW_CONTROLLED 0x00000000 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_VIDEO_FMT_HW_CONTROLLED 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_VIDEO_FMT_INIT 0x00000001 /* RWI-V */ +#define NV9171_SF_HDMI_INFO_STATUS(i,j) (0x00690004-0x00690000+(i)*1024+(j)*64) /* R--4A */ +#define NV9171_SF_HDMI_INFO_STATUS__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_INFO_STATUS__SIZE_2 5 /* */ +#define NV9171_SF_HDMI_INFO_STATUS_SENT 0:0 /* R--VF */ +#define NV9171_SF_HDMI_INFO_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NV9171_SF_HDMI_INFO_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NV9171_SF_HDMI_INFO_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_CTRL(i) (0x00690000-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE 0:0 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER 4:4 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER_DIS 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER_EN 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE 8:8 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE_DIS 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE_EN 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_STATUS(i) (0x00690004-0x00690000+(i)*1024) /* R--4A */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_STATUS__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_STATUS_SENT 0:0 /* R--VF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER(i) (0x00690008-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER_HB0 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER_HB1 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER_HB2 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(i) (0x0069000C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(i) (0x00690010-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(i) (0x00690014-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(i) (0x00690018-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_HEADER(i) (0x00690048-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_HEADER__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_HEADER_HB0 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_HEADER_HB1 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_HEADER_HB2 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW(i) (0x0069004C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH(i) (0x00690050-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW(i) (0x00690054-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH(i) (0x00690058-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW(i) (0x0069005C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH(i) (0x00690060-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW(i) (0x00690064-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH(i) (0x00690068-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GCP_SUBPACK(i) (0x006900CC-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GCP_SUBPACK__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB0 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB0_INIT 0x00000001 /* RWI-V */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB0_SET_AVMUTE 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB0_CLR_AVMUTE 0x00000010 /* RW--V */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB1 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB1_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB2 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB2_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_HEADER(i) (0x00690108-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_HEADER__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_HEADER_HB0 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_HEADER_HB1 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_HEADER_HB2 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW(i) (0x0069010C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH(i) (0x00690110-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW(i) (0x00690114-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH(i) (0x00690118-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW(i) (0x0069011C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH(i) (0x00690120-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW(i) (0x00690124-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH(i) (0x00690128-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _cl9171_h_ diff --git a/src/common/sdk/nvidia/inc/class/cl917a.h b/src/common/sdk/nvidia/inc/class/cl917a.h new file mode 100644 index 0000000..2a7ffc5 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl917a.h @@ -0,0 +1,57 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl917a_h_ +#define _cl917a_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV917A_CURSOR_CHANNEL_PIO (0x0000917A) + +typedef volatile struct { + NvV32 Reserved00[0x2]; + NvV32 Free; // 0x00000008 - 0x0000000B + NvV32 Reserved01[0x1D]; + NvV32 Update; // 0x00000080 - 0x00000083 + NvV32 SetCursorHotSpotPointsOut[2]; // 0x00000084 - 0x0000008B + NvV32 Reserved02[0x3DD]; +} GK104DispCursorControlPio; + +#define NV917A_FREE (0x00000008) +#define NV917A_FREE_COUNT 5:0 +#define NV917A_UPDATE (0x00000080) +#define NV917A_UPDATE_INTERLOCK_WITH_CORE 0:0 +#define NV917A_UPDATE_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NV917A_UPDATE_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NV917A_SET_CURSOR_HOT_SPOT_POINTS_OUT(b) (0x00000084 + (b)*0x00000004) +#define NV917A_SET_CURSOR_HOT_SPOT_POINTS_OUT_X 15:0 +#define NV917A_SET_CURSOR_HOT_SPOT_POINTS_OUT_Y 31:16 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl917a_h + diff --git a/src/common/sdk/nvidia/inc/class/cl917b.h b/src/common/sdk/nvidia/inc/class/cl917b.h new file mode 100644 index 0000000..78b39c4 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl917b.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl917b_h_ +#define _cl917b_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV917B_OVERLAY_IMM_CHANNEL_PIO (0x0000917B) + +typedef volatile struct { + NvV32 Reserved00[0x2]; + NvV32 Free; // 0x00000008 - 0x0000000B + NvV32 Reserved01[0x1D]; + NvV32 Update; // 0x00000080 - 0x00000083 + NvV32 SetPointsOut[2]; // 0x00000084 - 0x0000008B + NvV32 Reserved02[0x1]; + NvV32 AwakenOnceFlippedTo; // 0x00000090 - 0x00000093 + NvV32 Reserved03[0x3DB]; +} GK104DispOverlayImmControlPio; + +#define NV917B_FREE (0x00000008) +#define NV917B_FREE_COUNT 5:0 +#define NV917B_UPDATE (0x00000080) +#define NV917B_UPDATE_INTERLOCK_WITH_CORE 0:0 +#define NV917B_UPDATE_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NV917B_UPDATE_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NV917B_SET_POINTS_OUT(b) (0x00000084 + (b)*0x00000004) +#define NV917B_SET_POINTS_OUT_X 15:0 +#define NV917B_SET_POINTS_OUT_Y 31:16 +#define NV917B_AWAKEN_ONCE_FLIPPED_TO (0x00000090) +#define NV917B_AWAKEN_ONCE_FLIPPED_TO_AWAKEN_COUNT 11:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl917b_h + diff --git a/src/common/sdk/nvidia/inc/class/cl917c.h b/src/common/sdk/nvidia/inc/class/cl917c.h new file mode 100644 index 0000000..2b7c6a2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl917c.h @@ -0,0 +1,298 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl917c_h_ +#define _cl917c_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV917C_BASE_CHANNEL_DMA (0x0000917C) + +#define NV_DISP_BASE_NOTIFIER_1 0x00000000 +#define NV_DISP_BASE_NOTIFIER_1_SIZEOF 0x00000004 +#define NV_DISP_BASE_NOTIFIER_1__0 0x00000000 +#define NV_DISP_BASE_NOTIFIER_1__0_PRESENTATION_COUNT 15:0 +#define NV_DISP_BASE_NOTIFIER_1__0_TIMESTAMP 29:16 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS 31:30 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_NOT_BEGUN 0x00000000 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_BEGUN 0x00000001 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_FINISHED 0x00000002 + + +#define NV_DISP_NOTIFICATION_2 0x00000000 +#define NV_DISP_NOTIFICATION_2_SIZEOF 0x00000010 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_0 0x00000000 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_0_NANOSECONDS0 31:0 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_1 0x00000001 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_1_NANOSECONDS1 31:0 +#define NV_DISP_NOTIFICATION_2_INFO32_2 0x00000002 +#define NV_DISP_NOTIFICATION_2_INFO32_2_R0 31:0 +#define NV_DISP_NOTIFICATION_2_INFO16_3 0x00000003 +#define NV_DISP_NOTIFICATION_2_INFO16_3_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFICATION_2_INFO16_3_FIELD 8:8 +#define NV_DISP_NOTIFICATION_2_INFO16_3_R1 15:9 +#define NV_DISP_NOTIFICATION_2__3_STATUS 31:16 +#define NV_DISP_NOTIFICATION_2__3_STATUS_NOT_BEGUN 0x00008000 +#define NV_DISP_NOTIFICATION_2__3_STATUS_BEGUN 0x0000FFFF +#define NV_DISP_NOTIFICATION_2__3_STATUS_FINISHED 0x00000000 + + +#define NV_DISP_NOTIFICATION_INFO16 0x00000000 +#define NV_DISP_NOTIFICATION_INFO16_SIZEOF 0x00000002 +#define NV_DISP_NOTIFICATION_INFO16__0 0x00000000 +#define NV_DISP_NOTIFICATION_INFO16__0_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFICATION_INFO16__0_FIELD 8:8 +#define NV_DISP_NOTIFICATION_INFO16__0_R1 15:9 + + +#define NV_DISP_NOTIFICATION_STATUS 0x00000000 +#define NV_DISP_NOTIFICATION_STATUS_SIZEOF 0x00000002 +#define NV_DISP_NOTIFICATION_STATUS__0 0x00000000 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS 15:0 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_NOT_BEGUN 0x00008000 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_BEGUN 0x0000FFFF +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_FINISHED 0x00000000 + + +// dma opcode instructions +#define NV917C_DMA 0x00000000 +#define NV917C_DMA_OPCODE 31:29 +#define NV917C_DMA_OPCODE_METHOD 0x00000000 +#define NV917C_DMA_OPCODE_JUMP 0x00000001 +#define NV917C_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV917C_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV917C_DMA_OPCODE 31:29 +#define NV917C_DMA_OPCODE_METHOD 0x00000000 +#define NV917C_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV917C_DMA_METHOD_COUNT 27:18 +#define NV917C_DMA_METHOD_OFFSET 11:2 +#define NV917C_DMA_DATA 31:0 +#define NV917C_DMA_DATA_NOP 0x00000000 +#define NV917C_DMA_OPCODE 31:29 +#define NV917C_DMA_OPCODE_JUMP 0x00000001 +#define NV917C_DMA_JUMP_OFFSET 11:2 +#define NV917C_DMA_OPCODE 31:29 +#define NV917C_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV917C_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV917C_PUT (0x00000000) +#define NV917C_PUT_PTR 11:2 +#define NV917C_GET (0x00000004) +#define NV917C_GET_PTR 11:2 +#define NV917C_GET_SCANLINE (0x00000010) +#define NV917C_GET_SCANLINE_LINE 15:0 +#define NV917C_UPDATE (0x00000080) +#define NV917C_UPDATE_INTERLOCK_WITH_CORE 0:0 +#define NV917C_UPDATE_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NV917C_UPDATE_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NV917C_UPDATE_SPECIAL_HANDLING 25:24 +#define NV917C_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV917C_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV917C_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV917C_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV917C_SET_PRESENT_CONTROL (0x00000084) +#define NV917C_SET_PRESENT_CONTROL_BEGIN_MODE 9:8 +#define NV917C_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000) +#define NV917C_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001) +#define NV917C_SET_PRESENT_CONTROL_BEGIN_MODE_ON_LINE (0x00000002) +#define NV917C_SET_PRESENT_CONTROL_STEREO_FLIP_MODE 3:3 +#define NV917C_SET_PRESENT_CONTROL_STEREO_FLIP_MODE_PAIR_FLIP (0x00000000) +#define NV917C_SET_PRESENT_CONTROL_STEREO_FLIP_MODE_AT_ANY_FRAME (0x00000001) +#define NV917C_SET_PRESENT_CONTROL_TIMESTAMP_MODE 2:2 +#define NV917C_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000) +#define NV917C_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001) +#define NV917C_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 7:4 +#define NV917C_SET_PRESENT_CONTROL_BEGIN_LINE 30:16 +#define NV917C_SET_PRESENT_CONTROL_ON_LINE_MARGIN 15:10 +#define NV917C_SET_PRESENT_CONTROL_MODE 1:0 +#define NV917C_SET_PRESENT_CONTROL_MODE_MONO (0x00000000) +#define NV917C_SET_PRESENT_CONTROL_MODE_STEREO (0x00000001) +#define NV917C_SET_PRESENT_CONTROL_MODE_SPEC_FLIP (0x00000002) +#define NV917C_SET_SEMAPHORE_CONTROL (0x00000088) +#define NV917C_SET_SEMAPHORE_CONTROL_OFFSET 11:2 +#define NV917C_SET_SEMAPHORE_CONTROL_DELAY 26:26 +#define NV917C_SET_SEMAPHORE_CONTROL_DELAY_DISABLE (0x00000000) +#define NV917C_SET_SEMAPHORE_CONTROL_DELAY_ENABLE (0x00000001) +#define NV917C_SET_SEMAPHORE_CONTROL_FORMAT 28:28 +#define NV917C_SET_SEMAPHORE_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV917C_SET_SEMAPHORE_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV917C_SET_SEMAPHORE_ACQUIRE (0x0000008C) +#define NV917C_SET_SEMAPHORE_ACQUIRE_VALUE 31:0 +#define NV917C_SET_SEMAPHORE_RELEASE (0x00000090) +#define NV917C_SET_SEMAPHORE_RELEASE_VALUE 31:0 +#define NV917C_SET_CONTEXT_DMA_SEMAPHORE (0x00000094) +#define NV917C_SET_CONTEXT_DMA_SEMAPHORE_HANDLE 31:0 +#define NV917C_SET_NOTIFIER_CONTROL (0x000000A0) +#define NV917C_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV917C_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV917C_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV917C_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV917C_SET_NOTIFIER_CONTROL_DELAY 26:26 +#define NV917C_SET_NOTIFIER_CONTROL_DELAY_DISABLE (0x00000000) +#define NV917C_SET_NOTIFIER_CONTROL_DELAY_ENABLE (0x00000001) +#define NV917C_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV917C_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV917C_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV917C_SET_CONTEXT_DMA_NOTIFIER (0x000000A4) +#define NV917C_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV917C_SET_CONTEXT_DMAS_ISO(b) (0x000000C0 + (b)*0x00000004) +#define NV917C_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV917C_SET_BASE_LUT_LO (0x000000E0) +#define NV917C_SET_BASE_LUT_LO_ENABLE 31:30 +#define NV917C_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV917C_SET_BASE_LUT_LO_ENABLE_USE_CORE_LUT (0x00000001) +#define NV917C_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000002) +#define NV917C_SET_BASE_LUT_LO_MODE 27:24 +#define NV917C_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV917C_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV917C_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV917C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV917C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV917C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV917C_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV917C_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV917C_SET_BASE_LUT_HI (0x000000E4) +#define NV917C_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV917C_SET_OUTPUT_LUT_LO (0x000000E8) +#define NV917C_SET_OUTPUT_LUT_LO_ENABLE 31:30 +#define NV917C_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV917C_SET_OUTPUT_LUT_LO_ENABLE_USE_CORE_LUT (0x00000001) +#define NV917C_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000002) +#define NV917C_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV917C_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV917C_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV917C_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV917C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV917C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV917C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV917C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV917C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV917C_SET_OUTPUT_LUT_HI (0x000000EC) +#define NV917C_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV917C_SET_CONTEXT_DMA_LUT (0x000000FC) +#define NV917C_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV917C_SET_PROCESSING (0x00000110) +#define NV917C_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV917C_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV917C_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV917C_SET_CONVERSION_RED (0x00000114) +#define NV917C_SET_CONVERSION_RED_GAIN 15:0 +#define NV917C_SET_CONVERSION_RED_OFS 31:16 +#define NV917C_SET_CONVERSION_GRN (0x00000118) +#define NV917C_SET_CONVERSION_GRN_GAIN 15:0 +#define NV917C_SET_CONVERSION_GRN_OFS 31:16 +#define NV917C_SET_CONVERSION_BLU (0x0000011C) +#define NV917C_SET_CONVERSION_BLU_GAIN 15:0 +#define NV917C_SET_CONVERSION_BLU_OFS 31:16 +#define NV917C_SET_TIMESTAMP_ORIGIN_LO (0x00000130) +#define NV917C_SET_TIMESTAMP_ORIGIN_LO_TIMESTAMP_LO 31:0 +#define NV917C_SET_TIMESTAMP_ORIGIN_HI (0x00000134) +#define NV917C_SET_TIMESTAMP_ORIGIN_HI_TIMESTAMP_HI 31:0 +#define NV917C_SET_UPDATE_TIMESTAMP_LO (0x00000138) +#define NV917C_SET_UPDATE_TIMESTAMP_LO_TIMESTAMP_LO 31:0 +#define NV917C_SET_UPDATE_TIMESTAMP_HI (0x0000013C) +#define NV917C_SET_UPDATE_TIMESTAMP_HI_TIMESTAMP_HI 31:0 +#define NV917C_SET_CSC_RED2RED (0x00000140) +#define NV917C_SET_CSC_RED2RED_OWNER 31:31 +#define NV917C_SET_CSC_RED2RED_OWNER_CORE (0x00000000) +#define NV917C_SET_CSC_RED2RED_OWNER_BASE (0x00000001) +#define NV917C_SET_CSC_RED2RED_COEFF 18:0 +#define NV917C_SET_CSC_GRN2RED (0x00000144) +#define NV917C_SET_CSC_GRN2RED_COEFF 18:0 +#define NV917C_SET_CSC_BLU2RED (0x00000148) +#define NV917C_SET_CSC_BLU2RED_COEFF 18:0 +#define NV917C_SET_CSC_CONSTANT2RED (0x0000014C) +#define NV917C_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV917C_SET_CSC_RED2GRN (0x00000150) +#define NV917C_SET_CSC_RED2GRN_COEFF 18:0 +#define NV917C_SET_CSC_GRN2GRN (0x00000154) +#define NV917C_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV917C_SET_CSC_BLU2GRN (0x00000158) +#define NV917C_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV917C_SET_CSC_CONSTANT2GRN (0x0000015C) +#define NV917C_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV917C_SET_CSC_RED2BLU (0x00000160) +#define NV917C_SET_CSC_RED2BLU_COEFF 18:0 +#define NV917C_SET_CSC_GRN2BLU (0x00000164) +#define NV917C_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV917C_SET_CSC_BLU2BLU (0x00000168) +#define NV917C_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV917C_SET_CSC_CONSTANT2BLU (0x0000016C) +#define NV917C_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV917C_SET_SPARE (0x000003BC) +#define NV917C_SET_SPARE_UNUSED 31:0 +#define NV917C_SET_SPARE_NOOP(b) (0x000003C0 + (b)*0x00000004) +#define NV917C_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV917C_SURFACE_SET_OFFSET(a,b) (0x00000400 + (a)*0x00000020 + (b)*0x00000004) +#define NV917C_SURFACE_SET_OFFSET_ORIGIN 31:0 +#define NV917C_SURFACE_SET_SIZE(a) (0x00000408 + (a)*0x00000020) +#define NV917C_SURFACE_SET_SIZE_WIDTH 15:0 +#define NV917C_SURFACE_SET_SIZE_HEIGHT 31:16 +#define NV917C_SURFACE_SET_STORAGE(a) (0x0000040C + (a)*0x00000020) +#define NV917C_SURFACE_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV917C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV917C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV917C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV917C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV917C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV917C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV917C_SURFACE_SET_STORAGE_PITCH 20:8 +#define NV917C_SURFACE_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV917C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV917C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV917C_SURFACE_SET_PARAMS(a) (0x00000410 + (a)*0x00000020) +#define NV917C_SURFACE_SET_PARAMS_FORMAT 15:8 +#define NV917C_SURFACE_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV917C_SURFACE_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV917C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV917C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV917C_SURFACE_SET_PARAMS_GAMMA 2:2 +#define NV917C_SURFACE_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV917C_SURFACE_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV917C_SURFACE_SET_PARAMS_LAYOUT 5:4 +#define NV917C_SURFACE_SET_PARAMS_LAYOUT_FRM (0x00000000) +#define NV917C_SURFACE_SET_PARAMS_LAYOUT_FLD1 (0x00000001) +#define NV917C_SURFACE_SET_PARAMS_LAYOUT_FLD2 (0x00000002) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl917c_h diff --git a/src/common/sdk/nvidia/inc/class/cl917cswspare.h b/src/common/sdk/nvidia/inc/class/cl917cswspare.h new file mode 100644 index 0000000..39e88c0 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl917cswspare.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2010-2014, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl917c_sw_spare_h_ +#define _cl917c_sw_spare_h_ + +/* This file is *not* auto-generated. */ + +/* NV917C_SET_SPARE_PRE_UPDATE_TRAP is an alias of NV917C_SET_SPARE_NOOP(0) */ +#define NV917C_SET_SPARE_PRE_UPDATE_TRAP (0x000003C0) +#define NV917C_SET_SPARE_PRE_UPDATE_TRAP_UNUSED 31:0 + +/* NV917C_SET_SPARE_POST_UPDATE_TRAP is an alias of NV917C_SET_SPARE_NOOP(1) */ +#define NV917C_SET_SPARE_POST_UPDATE_TRAP (0x000003C4) +#define NV917C_SET_SPARE_POST_UPDATE_TRAP_UNUSED 31:0 + + +#endif /* _cl917c_sw_spare_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl917d.h b/src/common/sdk/nvidia/inc/class/cl917d.h new file mode 100644 index 0000000..4f70d81 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl917d.h @@ -0,0 +1,1551 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl917d_h_ +#define _cl917d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV917D_CORE_CHANNEL_DMA (0x0000917D) + +#define NV917D_CORE_NOTIFIER_3 0x00000000 +#define NV917D_CORE_NOTIFIER_3_SIZEOF 0x00000150 +#define NV917D_CORE_NOTIFIER_3_COMPLETION_0 0x00000000 +#define NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE 0:0 +#define NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_COMPLETION_0_R0 15:1 +#define NV917D_CORE_NOTIFIER_3_COMPLETION_0_TIMESTAMP 29:16 +#define NV917D_CORE_NOTIFIER_3__1 0x00000001 +#define NV917D_CORE_NOTIFIER_3__1_R1 31:0 +#define NV917D_CORE_NOTIFIER_3__2 0x00000002 +#define NV917D_CORE_NOTIFIER_3__2_R2 31:0 +#define NV917D_CORE_NOTIFIER_3__3 0x00000003 +#define NV917D_CORE_NOTIFIER_3__3_R3 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_R0 19:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA 20:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_R1 29:21 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE 30:30 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5 0x00000005 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE 3:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE 11:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE 15:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE 19:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE 27:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE 31:28 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6 0x00000006 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE 3:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE 11:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE 15:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE 19:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE 27:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE 31:28 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_7 0x00000007 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_7_DISPCLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_7_R4 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_8 0x00000008 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_8_R5 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_9 0x00000009 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_9_R6 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_10 0x0000000A +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_10_R7 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_11 0x0000000B +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_11_R8 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12 0x0000000C +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_R0 31:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13 0x0000000D +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_CRT_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_R1 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14 0x0000000E +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_R0 31:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15 0x0000000F +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_CRT_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_R1 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16 0x00000010 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_R0 31:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17 0x00000011 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_CRT_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_R1 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18 0x00000012 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_R0 31:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19 0x00000013 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_CRT_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_R1 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20 0x00000014 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21 0x00000015 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22 0x00000016 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23 0x00000017 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24 0x00000018 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25 0x00000019 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26 0x0000001A +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27 0x0000001B +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28 0x0000001C +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29 0x0000001D +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30 0x0000001E +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31 0x0000001F +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32 0x00000020 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33 0x00000021 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34 0x00000022 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35 0x00000023 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36 0x00000024 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_R0 31:7 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37 0x00000025 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_EXT_ENC_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R1 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R2 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38 0x00000026 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_R0 31:7 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39 0x00000027 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_EXT_ENC_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R1 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R2 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40 0x00000028 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_R0 31:7 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41 0x00000029 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_EXT_ENC_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R1 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R2 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42 0x0000002A +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_R0 31:7 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43 0x0000002B +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_EXT_ENC_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R1 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R2 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44 0x0000002C +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45 0x0000002D +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45_R1 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46 0x0000002E +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47 0x0000002F +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47_R1 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48 0x00000030 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49 0x00000031 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49_R1 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50 0x00000032 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51 0x00000033 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51_R1 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52 0x00000034 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53 0x00000035 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R1 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R2 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54 0x00000036 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R3 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R4 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55 0x00000037 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R5 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R6 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56 0x00000038 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_PCLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_R7 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57 0x00000039 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57_R8 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58 0x0000003A +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58_R9 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59 0x0000003B +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59_R10 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60 0x0000003C +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61 0x0000003D +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R1 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R2 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62 0x0000003E +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R3 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R4 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63 0x0000003F +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R5 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R6 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64 0x00000040 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_PCLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_R7 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65 0x00000041 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65_R8 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66 0x00000042 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66_R9 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67 0x00000043 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67_R10 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68 0x00000044 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69 0x00000045 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R1 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R2 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70 0x00000046 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R3 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R4 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71 0x00000047 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R5 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R6 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72 0x00000048 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_PCLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_R7 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73 0x00000049 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73_R8 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74 0x0000004A +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74_R9 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75 0x0000004B +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75_R10 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76 0x0000004C +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77 0x0000004D +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R1 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R2 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78 0x0000004E +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R3 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R4 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79 0x0000004F +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R5 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R6 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80 0x00000050 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_PCLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_R7 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81 0x00000051 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81_R8 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82 0x00000052 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82_R9 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83 0x00000053 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83_R10 31:0 + + +// dma opcode instructions +#define NV917D_DMA 0x00000000 +#define NV917D_DMA_OPCODE 31:29 +#define NV917D_DMA_OPCODE_METHOD 0x00000000 +#define NV917D_DMA_OPCODE_JUMP 0x00000001 +#define NV917D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV917D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV917D_DMA_OPCODE 31:29 +#define NV917D_DMA_OPCODE_METHOD 0x00000000 +#define NV917D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV917D_DMA_METHOD_COUNT 27:18 +#define NV917D_DMA_METHOD_OFFSET 11:2 +#define NV917D_DMA_DATA 31:0 +#define NV917D_DMA_DATA_NOP 0x00000000 +#define NV917D_DMA_OPCODE 31:29 +#define NV917D_DMA_OPCODE_JUMP 0x00000001 +#define NV917D_DMA_JUMP_OFFSET 11:2 +#define NV917D_DMA_OPCODE 31:29 +#define NV917D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV917D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV917D_PUT (0x00000000) +#define NV917D_PUT_PTR 11:2 +#define NV917D_GET (0x00000004) +#define NV917D_GET_PTR 11:2 +#define NV917D_UPDATE (0x00000080) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR(i) (0 +(i)*4):(0 +(i)*4) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR__SIZE_1 4 +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR0 0:0 +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR1 4:4 +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR2 8:8 +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR3 12:12 +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE(i) (1 +(i)*4):(1 +(i)*4) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE__SIZE_1 4 +#define NV917D_UPDATE_INTERLOCK_WITH_BASE_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE0 1:1 +#define NV917D_UPDATE_INTERLOCK_WITH_BASE0_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE0_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE1 5:5 +#define NV917D_UPDATE_INTERLOCK_WITH_BASE1_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE1_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE2 9:9 +#define NV917D_UPDATE_INTERLOCK_WITH_BASE2_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE2_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE3 13:13 +#define NV917D_UPDATE_INTERLOCK_WITH_BASE3_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE3_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY(i) (2 +(i)*4):(2 +(i)*4) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY__SIZE_1 4 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY0 2:2 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY0_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY0_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY1 6:6 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY1_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY1_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY2 10:10 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY2_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY2_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY3 14:14 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY3_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY3_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM(i) (3 +(i)*4):(3 +(i)*4) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM__SIZE_1 4 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0 3:3 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1 7:7 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2 11:11 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3 15:15 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_ENABLE (0x00000001) +#define NV917D_UPDATE_SPECIAL_HANDLING 25:24 +#define NV917D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV917D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV917D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV917D_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV917D_UPDATE_NOT_DRIVER_FRIENDLY 31:31 +#define NV917D_UPDATE_NOT_DRIVER_FRIENDLY_FALSE (0x00000000) +#define NV917D_UPDATE_NOT_DRIVER_FRIENDLY_TRUE (0x00000001) +#define NV917D_UPDATE_NOT_DRIVER_UNFRIENDLY 30:30 +#define NV917D_UPDATE_NOT_DRIVER_UNFRIENDLY_FALSE (0x00000000) +#define NV917D_UPDATE_NOT_DRIVER_UNFRIENDLY_TRUE (0x00000001) +#define NV917D_UPDATE_INHIBIT_INTERRUPTS 29:29 +#define NV917D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NV917D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NV917D_SET_NOTIFIER_CONTROL (0x00000084) +#define NV917D_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV917D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV917D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV917D_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV917D_SET_NOTIFIER_CONTROL_NOTIFY 31:31 +#define NV917D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NV917D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NV917D_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV917D_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV917D_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV917D_SET_CONTEXT_DMA_NOTIFIER (0x00000088) +#define NV917D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV917D_GET_CAPABILITIES (0x0000008C) +#define NV917D_GET_CAPABILITIES_DUMMY 31:0 +#define NV917D_SET_SPARE (0x0000016C) +#define NV917D_SET_SPARE_UNUSED 31:0 +#define NV917D_SET_SPARE_NOOP(b) (0x00000170 + (b)*0x00000004) +#define NV917D_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV917D_DAC_SET_CONTROL(a) (0x00000180 + (a)*0x00000020) +#define NV917D_DAC_SET_CONTROL_OWNER_MASK 3:0 +#define NV917D_DAC_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV917D_DAC_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV917D_DAC_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV917D_DAC_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV917D_DAC_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV917D_DAC_SET_CONTROL_PROTOCOL 12:8 +#define NV917D_DAC_SET_CONTROL_PROTOCOL_RGB_CRT (0x00000000) +#define NV917D_DAC_SET_CONTROL_PROTOCOL_YUV_CRT (0x00000013) +#define NV917D_DAC_SET_SW_SPARE_A(a) (0x00000184 + (a)*0x00000020) +#define NV917D_DAC_SET_SW_SPARE_A_CODE 31:0 +#define NV917D_DAC_SET_SW_SPARE_B(a) (0x00000188 + (a)*0x00000020) +#define NV917D_DAC_SET_SW_SPARE_B_CODE 31:0 +#define NV917D_DAC_SET_CUSTOM_REASON(a) (0x00000190 + (a)*0x00000020) +#define NV917D_DAC_SET_CUSTOM_REASON_CODE 31:0 + +#define NV917D_SOR_SET_CONTROL(a) (0x00000200 + (a)*0x00000020) +#define NV917D_SOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV917D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV917D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV917D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV917D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV917D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV917D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NV917D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NV917D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NV917D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NV917D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NV917D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NV917D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NV917D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NV917D_SOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV917D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV917D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV917D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NV917D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NV917D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NV917D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NV917D_SOR_SET_SW_SPARE_A(a) (0x00000204 + (a)*0x00000020) +#define NV917D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NV917D_SOR_SET_SW_SPARE_B(a) (0x00000208 + (a)*0x00000020) +#define NV917D_SOR_SET_SW_SPARE_B_CODE 31:0 +#define NV917D_SOR_SET_CUSTOM_REASON(a) (0x00000210 + (a)*0x00000020) +#define NV917D_SOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV917D_PIOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NV917D_PIOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV917D_PIOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV917D_PIOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV917D_PIOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV917D_PIOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV917D_PIOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV917D_PIOR_SET_CONTROL_PROTOCOL 11:8 +#define NV917D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC (0x00000000) +#define NV917D_PIOR_SET_CONTROL_PROTOCOL_EXT_TV_ENC (0x00000001) +#define NV917D_PIOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV917D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV917D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV917D_PIOR_SET_SW_SPARE_A(a) (0x00000304 + (a)*0x00000020) +#define NV917D_PIOR_SET_SW_SPARE_A_CODE 31:0 +#define NV917D_PIOR_SET_SW_SPARE_B(a) (0x00000308 + (a)*0x00000020) +#define NV917D_PIOR_SET_SW_SPARE_B_CODE 31:0 +#define NV917D_PIOR_SET_CUSTOM_REASON(a) (0x00000310 + (a)*0x00000020) +#define NV917D_PIOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV917D_HEAD_SET_PRESENT_CONTROL(a) (0x00000400 + (a)*0x00000300) +#define NV917D_HEAD_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NV917D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 8:8 +#define NV917D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NV917D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NV917D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00000404 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 3:3 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 4:4 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 9:6 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_DEFAULT (0x00000000) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000003) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000004) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000005) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000006) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000007) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000008) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000009) +#define NV917D_HEAD_SET_CONTROL(a) (0x00000408 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTROL_STRUCTURE 0:0 +#define NV917D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_STRUCTURE_INTERLACED (0x00000001) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 3:2 +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 19:15 +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 7:4 +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 9:8 +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 14:10 +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK 1:1 +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_ENABLE (0x00000001) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN 24:20 +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN 29:25 +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000003) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000004) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000005) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000006) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000007) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000008) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x00000009) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000A) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000B) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000C) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000D) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000E) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x0000000F) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_UNSPECIFIED (0x00000010) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV917D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NV917D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NV917D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_OVERSCAN_COLOR(a) (0x00000410 + (a)*0x00000300) +#define NV917D_HEAD_SET_OVERSCAN_COLOR_RED 9:0 +#define NV917D_HEAD_SET_OVERSCAN_COLOR_GRN 19:10 +#define NV917D_HEAD_SET_OVERSCAN_COLOR_BLU 29:20 +#define NV917D_HEAD_SET_RASTER_SIZE(a) (0x00000414 + (a)*0x00000300) +#define NV917D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NV917D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NV917D_HEAD_SET_RASTER_SYNC_END(a) (0x00000418 + (a)*0x00000300) +#define NV917D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NV917D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NV917D_HEAD_SET_RASTER_BLANK_END(a) (0x0000041C + (a)*0x00000300) +#define NV917D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NV917D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NV917D_HEAD_SET_RASTER_BLANK_START(a) (0x00000420 + (a)*0x00000300) +#define NV917D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NV917D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NV917D_HEAD_SET_RASTER_VERT_BLANK2(a) (0x00000424 + (a)*0x00000300) +#define NV917D_HEAD_SET_RASTER_VERT_BLANK2_YSTART 14:0 +#define NV917D_HEAD_SET_RASTER_VERT_BLANK2_YEND 30:16 +#define NV917D_HEAD_SET_LOCK_CHAIN(a) (0x00000428 + (a)*0x00000300) +#define NV917D_HEAD_SET_LOCK_CHAIN_POSITION 27:24 +#define NV917D_HEAD_SET_DEFAULT_BASE_COLOR(a) (0x0000042C + (a)*0x00000300) +#define NV917D_HEAD_SET_DEFAULT_BASE_COLOR_RED 9:0 +#define NV917D_HEAD_SET_DEFAULT_BASE_COLOR_GREEN 19:10 +#define NV917D_HEAD_SET_DEFAULT_BASE_COLOR_BLUE 29:20 +#define NV917D_HEAD_SET_CRC_CONTROL(a) (0x00000430 + (a)*0x00000300) +#define NV917D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 1:0 +#define NV917D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000000) +#define NV917D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_BASE (0x00000001) +#define NV917D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_OVERLAY (0x00000002) +#define NV917D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 2:2 +#define NV917D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NV917D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NV917D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE 3:3 +#define NV917D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_FALSE (0x00000000) +#define NV917D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_TRUE (0x00000001) +#define NV917D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE 4:4 +#define NV917D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_FALSE (0x00000000) +#define NV917D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_TRUE (0x00000001) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT 19:8 +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC__SIZE_1 4 +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC0 (0x00000FF0) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC1 (0x00000FF1) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC2 (0x00000FF2) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC3 (0x00000FF3) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG__SIZE_1 4 +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG0 (0x00000FF8) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG1 (0x00000FF9) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG2 (0x00000FFA) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG3 (0x00000FFB) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR__SIZE_1 8 +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR0 (0x00000F0F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR1 (0x00000F1F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR2 (0x00000F2F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR3 (0x00000F3F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR4 (0x00000F4F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR5 (0x00000F5F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR6 (0x00000F6F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR7 (0x00000F7F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF__SIZE_1 4 +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF0 (0x00000F8F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF1 (0x00000F9F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF2 (0x00000FAF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF3 (0x00000FBF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR__SIZE_1 8 +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR0 (0x000000FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR1 (0x000001FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR2 (0x000002FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR3 (0x000003FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR4 (0x000004FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR5 (0x000005FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR6 (0x000006FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR7 (0x000007FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_NONE (0x00000FFF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT 31:20 +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC__SIZE_1 4 +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC0 (0x00000FF0) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC1 (0x00000FF1) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC2 (0x00000FF2) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC3 (0x00000FF3) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG__SIZE_1 4 +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG0 (0x00000FF8) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG1 (0x00000FF9) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG2 (0x00000FFA) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG3 (0x00000FFB) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR__SIZE_1 8 +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR0 (0x00000F0F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR1 (0x00000F1F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR2 (0x00000F2F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR3 (0x00000F3F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR4 (0x00000F4F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR5 (0x00000F5F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR6 (0x00000F6F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR7 (0x00000F7F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF__SIZE_1 4 +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF0 (0x00000F8F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF1 (0x00000F9F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF2 (0x00000FAF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF3 (0x00000FBF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR__SIZE_1 8 +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR0 (0x000000FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR1 (0x000001FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR2 (0x000002FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR3 (0x000003FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR4 (0x000004FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR5 (0x000005FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR6 (0x000006FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR7 (0x000007FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_NONE (0x00000FFF) +#define NV917D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 5:5 +#define NV917D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC 6:6 +#define NV917D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_ENABLE (0x00000001) +#define NV917D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00000438 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NV917D_HEAD_SET_BASE_LUT_LO(a) (0x00000440 + (a)*0x00000300) +#define NV917D_HEAD_SET_BASE_LUT_LO_ENABLE 31:31 +#define NV917D_HEAD_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE 27:24 +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV917D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV917D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_BASE_LUT_HI(a) (0x00000444 + (a)*0x00000300) +#define NV917D_HEAD_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV917D_HEAD_SET_OUTPUT_LUT_LO(a) (0x00000448 + (a)*0x00000300) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_ENABLE 31:31 +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_OUTPUT_LUT_HI(a) (0x0000044C + (a)*0x00000300) +#define NV917D_HEAD_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x00000450 + (a)*0x00000300) +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x00000454 + (a)*0x00000300) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE 21:20 +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_25 (0x00000000) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_28 (0x00000001) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_CUSTOM (0x00000002) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 24:24 +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING 25:25 +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_FALSE (0x00000000) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_TRUE (0x00000001) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 26:26 +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00000458 + (a)*0x00000300) +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NV917D_HEAD_SET_CONTEXT_DMA_LUT(a) (0x0000045C + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV917D_HEAD_SET_OFFSET(a) (0x00000460 + (a)*0x00000300) +#define NV917D_HEAD_SET_OFFSET_ORIGIN 31:0 +#define NV917D_HEAD_SET_SIZE(a) (0x00000468 + (a)*0x00000300) +#define NV917D_HEAD_SET_SIZE_WIDTH 15:0 +#define NV917D_HEAD_SET_SIZE_HEIGHT 31:16 +#define NV917D_HEAD_SET_STORAGE(a) (0x0000046C + (a)*0x00000300) +#define NV917D_HEAD_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV917D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV917D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV917D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV917D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV917D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV917D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV917D_HEAD_SET_STORAGE_PITCH 20:8 +#define NV917D_HEAD_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV917D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV917D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV917D_HEAD_SET_PARAMS(a) (0x00000470 + (a)*0x00000300) +#define NV917D_HEAD_SET_PARAMS_FORMAT 15:8 +#define NV917D_HEAD_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV917D_HEAD_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV917D_HEAD_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV917D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV917D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV917D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV917D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV917D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV917D_HEAD_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV917D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV917D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV917D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV917D_HEAD_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV917D_HEAD_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV917D_HEAD_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV917D_HEAD_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV917D_HEAD_SET_PARAMS_GAMMA 2:2 +#define NV917D_HEAD_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV917D_HEAD_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV917D_HEAD_SET_CONTEXT_DMAS_ISO(a) (0x00000474 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV917D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x0000047C + (a)*0x00000300) +#define NV917D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 1:0 +#define NV917D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NV917D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NV917D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_SPEC_FLIP (0x00000002) +#define NV917D_HEAD_SET_CONTROL_CURSOR(a) (0x00000480 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NV917D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_CONTROL_CURSOR_FORMAT 25:24 +#define NV917D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE 27:26 +#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NV917D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 15:8 +#define NV917D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 23:16 +#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION 29:28 +#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000) +#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001) +#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002) +#define NV917D_HEAD_SET_OFFSETS_CURSOR(a,b) (0x00000484 + (a)*0x00000300 + (b)*0x00000004) +#define NV917D_HEAD_SET_OFFSETS_CURSOR_ORIGIN 31:0 +#define NV917D_HEAD_SET_CONTEXT_DMAS_CURSOR(a,b) (0x0000048C + (a)*0x00000300 + (b)*0x00000004) +#define NV917D_HEAD_SET_CONTEXT_DMAS_CURSOR_HANDLE 31:0 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00000494 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3_ADAPTIVE (0x00000003) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 4:3 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HRESPONSE_BIAS 23:16 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VRESPONSE_BIAS 31:24 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422 8:8 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_ENABLE (0x00000001) +#define NV917D_HEAD_SET_PROCAMP(a) (0x00000498 + (a)*0x00000300) +#define NV917D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NV917D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NV917D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NV917D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NV917D_HEAD_SET_PROCAMP_CHROMA_LPF 2:2 +#define NV917D_HEAD_SET_PROCAMP_CHROMA_LPF_AUTO (0x00000000) +#define NV917D_HEAD_SET_PROCAMP_CHROMA_LPF_ON (0x00000001) +#define NV917D_HEAD_SET_PROCAMP_SAT_COS 19:8 +#define NV917D_HEAD_SET_PROCAMP_SAT_SINE 31:20 +#define NV917D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 5:5 +#define NV917D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NV917D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NV917D_HEAD_SET_PROCAMP_RANGE_COMPRESSION 6:6 +#define NV917D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE (0x00000000) +#define NV917D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE (0x00000001) +#define NV917D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300) +#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_DITHER_CONTROL_BITS 2:1 +#define NV917D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS (0x00000000) +#define NV917D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS (0x00000001) +#define NV917D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_10_BITS (0x00000002) +#define NV917D_HEAD_SET_DITHER_CONTROL_MODE 6:3 +#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NV917D_HEAD_SET_DITHER_CONTROL_PHASE 8:7 +#define NV917D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x000004B0 + (a)*0x00000300) +#define NV917D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NV917D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x000004B8 + (a)*0x00000300) +#define NV917D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NV917D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x000004BC + (a)*0x00000300) +#define NV917D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NV917D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x000004C0 + (a)*0x00000300) +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(a) (0x000004C4 + (a)*0x00000300) +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_WIDTH 14:0 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_HEIGHT 30:16 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX(a) (0x000004C8 + (a)*0x00000300) +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_WIDTH 14:0 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_HEIGHT 30:16 +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a) (0x000004D0 + (a)*0x00000300) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE 0:0 +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8 (0x00000000) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE 13:12 +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT 17:16 +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_NONE (0x00000000) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_257 (0x00000001) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_1025 (0x00000002) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT 21:20 +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS(a) (0x000004D4 + (a)*0x00000300) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE 0:0 +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT 13:12 +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_NONE (0x00000000) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_257 (0x00000001) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_1025 (0x00000002) +#define NV917D_HEAD_SET_PROCESSING(a) (0x000004E0 + (a)*0x00000300) +#define NV917D_HEAD_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV917D_HEAD_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV917D_HEAD_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV917D_HEAD_SET_CONVERSION_RED(a) (0x000004E4 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONVERSION_RED_GAIN 15:0 +#define NV917D_HEAD_SET_CONVERSION_RED_OFS 31:16 +#define NV917D_HEAD_SET_CONVERSION_GRN(a) (0x000004E8 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONVERSION_GRN_GAIN 15:0 +#define NV917D_HEAD_SET_CONVERSION_GRN_OFS 31:16 +#define NV917D_HEAD_SET_CONVERSION_BLU(a) (0x000004EC + (a)*0x00000300) +#define NV917D_HEAD_SET_CONVERSION_BLU_GAIN 15:0 +#define NV917D_HEAD_SET_CONVERSION_BLU_OFS 31:16 +#define NV917D_HEAD_SET_CSC_RED2RED(a) (0x000004F0 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE 31:31 +#define NV917D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_CSC_RED2RED_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_GRN2RED(a) (0x000004F4 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_GRN2RED_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_BLU2RED(a) (0x000004F8 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_BLU2RED_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_CONSTANT2RED(a) (0x000004FC + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_RED2GRN(a) (0x00000500 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_RED2GRN_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_GRN2GRN(a) (0x00000504 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_BLU2GRN(a) (0x00000508 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_CONSTANT2GRN(a) (0x0000050C + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_RED2BLU(a) (0x00000510 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_RED2BLU_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_GRN2BLU(a) (0x00000514 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_BLU2BLU(a) (0x00000518 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_CONSTANT2BLU(a) (0x0000051C + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV917D_HEAD_SET_HDMI_CTRL(a) (0x00000520 + (a)*0x00000300) +#define NV917D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NV917D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NV917D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NV917D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NV917D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE 15:12 +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000000) +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FIELD_ALTERNATIVE (0x00000001) +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_LINE_ALTERNATIVE (0x00000002) +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_FULL (0x00000003) +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH (0x00000004) +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH_GRAPHICS (0x00000005) +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_TOP_AND_BOTTOM (0x00000006) +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_HALF (0x00000008) +#define NV917D_HEAD_SET_VACTIVE_SPACE_COLOR(a) (0x00000524 + (a)*0x00000300) +#define NV917D_HEAD_SET_VACTIVE_SPACE_COLOR_RED_CR 9:0 +#define NV917D_HEAD_SET_VACTIVE_SPACE_COLOR_GRN_Y 19:10 +#define NV917D_HEAD_SET_VACTIVE_SPACE_COLOR_BLU_CB 29:20 +#define NV917D_HEAD_SET_DISPLAY_ID(a,b) (0x0000052C + (a)*0x00000300 + (b)*0x00000004) +#define NV917D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NV917D_HEAD_SET_SW_SPARE_A(a) (0x0000054C + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NV917D_HEAD_SET_SW_SPARE_B(a) (0x00000550 + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NV917D_HEAD_SET_SW_SPARE_C(a) (0x00000554 + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NV917D_HEAD_SET_SW_SPARE_D(a) (0x00000558 + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NV917D_HEAD_SET_GET_BLANKING_CTRL(a) (0x0000055C + (a)*0x00000300) +#define NV917D_HEAD_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NV917D_HEAD_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NV917D_HEAD_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NV917D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NV917D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NV917D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_A(a) (0x000006D0 + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_A_UNUSED 31:0 +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_B(a) (0x000006D4 + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_B_UNUSED 31:0 +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_C(a) (0x000006D8 + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_C_UNUSED 31:0 +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_D(a) (0x000006DC + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_D_UNUSED 31:0 +#define NV917D_HEAD_SET_SPARE(a) (0x000006EC + (a)*0x00000300) +#define NV917D_HEAD_SET_SPARE_UNUSED 31:0 +#define NV917D_HEAD_SET_SPARE_NOOP(a,b) (0x000006F0 + (a)*0x00000300 + (b)*0x00000004) +#define NV917D_HEAD_SET_SPARE_NOOP_UNUSED 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl917d_h diff --git a/src/common/sdk/nvidia/inc/class/cl917dcrcnotif.h b/src/common/sdk/nvidia/inc/class/cl917dcrcnotif.h new file mode 100644 index 0000000..dbcfb1c --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl917dcrcnotif.h @@ -0,0 +1,45 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2010 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __cl917dcrcnotif_h__ +#define __cl917dcrcnotif_h__ +/* This file is autogenerated. Do not edit */ + +#define NV917D_NOTIFIER_CRC_1_STATUS_0 0x00000000 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_DONE 0:0 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_DONE_FALSE 0x00000000 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_DONE_TRUE 0x00000001 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_COMPOSITOR_OVERFLOW 3:3 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_COMPOSITOR_OVERFLOW_FALSE 0x00000000 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_COMPOSITOR_OVERFLOW_TRUE 0x00000001 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_PRIMARY_OUTPUT_OVERFLOW 4:4 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_PRIMARY_OUTPUT_OVERFLOW_FALSE 0x00000000 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_PRIMARY_OUTPUT_OVERFLOW_TRUE 0x00000001 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_COUNT 31:24 +#define NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_3 0x00000003 +#define NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_3_COMPOSITOR_CRC 31:0 +#define NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_4 0x00000004 +#define NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_4_PRIMARY_OUTPUT_CRC 31:0 +#define NV917D_NOTIFIER_CRC_1_CRC_ENTRY1_8 0x00000008 + +#endif // __cl917dcrcnotif_h__ diff --git a/src/common/sdk/nvidia/inc/class/cl917e.h b/src/common/sdk/nvidia/inc/class/cl917e.h new file mode 100644 index 0000000..6586bda --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl917e.h @@ -0,0 +1,265 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl917e_h_ +#define _cl917e_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV917E_OVERLAY_CHANNEL_DMA (0x0000917E) + +#define NV_DISP_NOTIFICATION_2 0x00000000 +#define NV_DISP_NOTIFICATION_2_SIZEOF 0x00000010 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_0 0x00000000 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_0_NANOSECONDS0 31:0 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_1 0x00000001 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_1_NANOSECONDS1 31:0 +#define NV_DISP_NOTIFICATION_2_INFO32_2 0x00000002 +#define NV_DISP_NOTIFICATION_2_INFO32_2_R0 31:0 +#define NV_DISP_NOTIFICATION_2_INFO16_3 0x00000003 +#define NV_DISP_NOTIFICATION_2_INFO16_3_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFICATION_2_INFO16_3_FIELD 8:8 +#define NV_DISP_NOTIFICATION_2_INFO16_3_R1 15:9 +#define NV_DISP_NOTIFICATION_2__3_STATUS 31:16 +#define NV_DISP_NOTIFICATION_2__3_STATUS_NOT_BEGUN 0x00008000 +#define NV_DISP_NOTIFICATION_2__3_STATUS_BEGUN 0x0000FFFF +#define NV_DISP_NOTIFICATION_2__3_STATUS_FINISHED 0x00000000 + + +#define NV_DISP_NOTIFICATION_INFO16 0x00000000 +#define NV_DISP_NOTIFICATION_INFO16_SIZEOF 0x00000002 +#define NV_DISP_NOTIFICATION_INFO16__0 0x00000000 +#define NV_DISP_NOTIFICATION_INFO16__0_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFICATION_INFO16__0_FIELD 8:8 +#define NV_DISP_NOTIFICATION_INFO16__0_R1 15:9 + + +#define NV_DISP_NOTIFICATION_STATUS 0x00000000 +#define NV_DISP_NOTIFICATION_STATUS_SIZEOF 0x00000002 +#define NV_DISP_NOTIFICATION_STATUS__0 0x00000000 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS 15:0 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_NOT_BEGUN 0x00008000 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_BEGUN 0x0000FFFF +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_FINISHED 0x00000000 + + +// dma opcode instructions +#define NV917E_DMA 0x00000000 +#define NV917E_DMA_OPCODE 31:29 +#define NV917E_DMA_OPCODE_METHOD 0x00000000 +#define NV917E_DMA_OPCODE_JUMP 0x00000001 +#define NV917E_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV917E_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV917E_DMA_OPCODE 31:29 +#define NV917E_DMA_OPCODE_METHOD 0x00000000 +#define NV917E_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV917E_DMA_METHOD_COUNT 27:18 +#define NV917E_DMA_METHOD_OFFSET 11:2 +#define NV917E_DMA_DATA 31:0 +#define NV917E_DMA_DATA_NOP 0x00000000 +#define NV917E_DMA_OPCODE 31:29 +#define NV917E_DMA_OPCODE_JUMP 0x00000001 +#define NV917E_DMA_JUMP_OFFSET 11:2 +#define NV917E_DMA_OPCODE 31:29 +#define NV917E_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV917E_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV917E_PUT (0x00000000) +#define NV917E_PUT_PTR 11:2 +#define NV917E_GET (0x00000004) +#define NV917E_GET_PTR 11:2 +#define NV917E_UPDATE (0x00000080) +#define NV917E_UPDATE_INTERLOCK_WITH_CORE 0:0 +#define NV917E_UPDATE_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NV917E_UPDATE_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NV917E_UPDATE_SPECIAL_HANDLING 25:24 +#define NV917E_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV917E_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV917E_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV917E_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV917E_SET_PRESENT_CONTROL (0x00000084) +#define NV917E_SET_PRESENT_CONTROL_BEGIN_MODE 1:0 +#define NV917E_SET_PRESENT_CONTROL_BEGIN_MODE_ASAP (0x00000000) +#define NV917E_SET_PRESENT_CONTROL_BEGIN_MODE_TIMESTAMP (0x00000003) +#define NV917E_SET_PRESENT_CONTROL_STEREO_FLIP_MODE 3:3 +#define NV917E_SET_PRESENT_CONTROL_STEREO_FLIP_MODE_PAIR_FLIP (0x00000000) +#define NV917E_SET_PRESENT_CONTROL_STEREO_FLIP_MODE_AT_ANY_FRAME (0x00000001) +#define NV917E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 7:4 +#define NV917E_SET_PRESENT_CONTROL_MODE 11:10 +#define NV917E_SET_PRESENT_CONTROL_MODE_MONO (0x00000000) +#define NV917E_SET_PRESENT_CONTROL_MODE_STEREO (0x00000001) +#define NV917E_SET_PRESENT_CONTROL_MODE_SPEC_FLIP (0x00000002) +#define NV917E_SET_SEMAPHORE_ACQUIRE (0x00000088) +#define NV917E_SET_SEMAPHORE_ACQUIRE_VALUE 31:0 +#define NV917E_SET_SEMAPHORE_RELEASE (0x0000008C) +#define NV917E_SET_SEMAPHORE_RELEASE_VALUE 31:0 +#define NV917E_SET_SEMAPHORE_CONTROL (0x00000090) +#define NV917E_SET_SEMAPHORE_CONTROL_OFFSET 11:2 +#define NV917E_SET_SEMAPHORE_CONTROL_FORMAT 28:28 +#define NV917E_SET_SEMAPHORE_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV917E_SET_SEMAPHORE_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV917E_SET_CONTEXT_DMA_SEMAPHORE (0x00000094) +#define NV917E_SET_CONTEXT_DMA_SEMAPHORE_HANDLE 31:0 +#define NV917E_SET_NOTIFIER_CONTROL (0x000000A0) +#define NV917E_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV917E_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV917E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV917E_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV917E_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV917E_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV917E_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV917E_SET_CONTEXT_DMA_NOTIFIER (0x000000A4) +#define NV917E_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV917E_SET_CONTEXT_DMA_LUT (0x000000B0) +#define NV917E_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV917E_SET_OVERLAY_LUT_LO (0x000000B4) +#define NV917E_SET_OVERLAY_LUT_LO_ENABLE 31:31 +#define NV917E_SET_OVERLAY_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV917E_SET_OVERLAY_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV917E_SET_OVERLAY_LUT_LO_MODE 27:24 +#define NV917E_SET_OVERLAY_LUT_LO_MODE_LORES (0x00000000) +#define NV917E_SET_OVERLAY_LUT_LO_MODE_HIRES (0x00000001) +#define NV917E_SET_OVERLAY_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV917E_SET_OVERLAY_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV917E_SET_OVERLAY_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV917E_SET_OVERLAY_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV917E_SET_OVERLAY_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV917E_SET_OVERLAY_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV917E_SET_OVERLAY_LUT_HI (0x000000B8) +#define NV917E_SET_OVERLAY_LUT_HI_ORIGIN 31:0 +#define NV917E_SET_CONTEXT_DMAS_ISO(b) (0x000000C0 + (b)*0x00000004) +#define NV917E_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV917E_SET_POINT_IN (0x000000E0) +#define NV917E_SET_POINT_IN_X 14:0 +#define NV917E_SET_POINT_IN_Y 30:16 +#define NV917E_SET_SIZE_IN (0x000000E4) +#define NV917E_SET_SIZE_IN_WIDTH 14:0 +#define NV917E_SET_SIZE_IN_HEIGHT 30:16 +#define NV917E_SET_SIZE_OUT (0x000000E8) +#define NV917E_SET_SIZE_OUT_WIDTH 14:0 +#define NV917E_SET_COMPOSITION_CONTROL (0x00000100) +#define NV917E_SET_COMPOSITION_CONTROL_MODE 3:0 +#define NV917E_SET_COMPOSITION_CONTROL_MODE_SOURCE_COLOR_VALUE_KEYING (0x00000000) +#define NV917E_SET_COMPOSITION_CONTROL_MODE_DESTINATION_COLOR_VALUE_KEYING (0x00000001) +#define NV917E_SET_COMPOSITION_CONTROL_MODE_OPAQUE (0x00000002) +#define NV917E_SET_KEY_COLOR_LO (0x00000104) +#define NV917E_SET_KEY_COLOR_LO_COLOR 31:0 +#define NV917E_SET_KEY_COLOR_HI (0x00000108) +#define NV917E_SET_KEY_COLOR_HI_COLOR 31:0 +#define NV917E_SET_KEY_MASK_LO (0x0000010C) +#define NV917E_SET_KEY_MASK_LO_MASK 31:0 +#define NV917E_SET_KEY_MASK_HI (0x00000110) +#define NV917E_SET_KEY_MASK_HI_MASK 31:0 +#define NV917E_SET_PROCESSING (0x00000118) +#define NV917E_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV917E_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV917E_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV917E_SET_CONVERSION_RED (0x0000011C) +#define NV917E_SET_CONVERSION_RED_GAIN 15:0 +#define NV917E_SET_CONVERSION_RED_OFS 31:16 +#define NV917E_SET_CONVERSION_GRN (0x00000120) +#define NV917E_SET_CONVERSION_GRN_GAIN 15:0 +#define NV917E_SET_CONVERSION_GRN_OFS 31:16 +#define NV917E_SET_CONVERSION_BLU (0x00000124) +#define NV917E_SET_CONVERSION_BLU_GAIN 15:0 +#define NV917E_SET_CONVERSION_BLU_OFS 31:16 +#define NV917E_SET_TIMESTAMP_ORIGIN_LO (0x00000130) +#define NV917E_SET_TIMESTAMP_ORIGIN_LO_TIMESTAMP_LO 31:0 +#define NV917E_SET_TIMESTAMP_ORIGIN_HI (0x00000134) +#define NV917E_SET_TIMESTAMP_ORIGIN_HI_TIMESTAMP_HI 31:0 +#define NV917E_SET_UPDATE_TIMESTAMP_LO (0x00000138) +#define NV917E_SET_UPDATE_TIMESTAMP_LO_TIMESTAMP_LO 31:0 +#define NV917E_SET_UPDATE_TIMESTAMP_HI (0x0000013C) +#define NV917E_SET_UPDATE_TIMESTAMP_HI_TIMESTAMP_HI 31:0 +#define NV917E_SET_CSC_RED2RED (0x00000140) +#define NV917E_SET_CSC_RED2RED_COEFF 18:0 +#define NV917E_SET_CSC_GRN2RED (0x00000144) +#define NV917E_SET_CSC_GRN2RED_COEFF 18:0 +#define NV917E_SET_CSC_BLU2RED (0x00000148) +#define NV917E_SET_CSC_BLU2RED_COEFF 18:0 +#define NV917E_SET_CSC_CONSTANT2RED (0x0000014C) +#define NV917E_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV917E_SET_CSC_RED2GRN (0x00000150) +#define NV917E_SET_CSC_RED2GRN_COEFF 18:0 +#define NV917E_SET_CSC_GRN2GRN (0x00000154) +#define NV917E_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV917E_SET_CSC_BLU2GRN (0x00000158) +#define NV917E_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV917E_SET_CSC_CONSTANT2GRN (0x0000015C) +#define NV917E_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV917E_SET_CSC_RED2BLU (0x00000160) +#define NV917E_SET_CSC_RED2BLU_COEFF 18:0 +#define NV917E_SET_CSC_GRN2BLU (0x00000164) +#define NV917E_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV917E_SET_CSC_BLU2BLU (0x00000168) +#define NV917E_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV917E_SET_CSC_CONSTANT2BLU (0x0000016C) +#define NV917E_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV917E_SET_SPARE (0x000003BC) +#define NV917E_SET_SPARE_UNUSED 31:0 +#define NV917E_SET_SPARE_NOOP(b) (0x000003C0 + (b)*0x00000004) +#define NV917E_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV917E_SURFACE_SET_OFFSET(b) (0x00000400 + (b)*0x00000004) +#define NV917E_SURFACE_SET_OFFSET_ORIGIN 31:0 +#define NV917E_SURFACE_SET_SIZE (0x00000408) +#define NV917E_SURFACE_SET_SIZE_WIDTH 15:0 +#define NV917E_SURFACE_SET_SIZE_HEIGHT 31:16 +#define NV917E_SURFACE_SET_STORAGE (0x0000040C) +#define NV917E_SURFACE_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV917E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV917E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV917E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV917E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV917E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV917E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV917E_SURFACE_SET_STORAGE_PITCH 20:8 +#define NV917E_SURFACE_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV917E_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV917E_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV917E_SURFACE_SET_PARAMS (0x00000410) +#define NV917E_SURFACE_SET_PARAMS_FORMAT 15:8 +#define NV917E_SURFACE_SET_PARAMS_FORMAT_VE8YO8UE8YE8 (0x00000028) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_YO8VE8YE8UE8 (0x00000029) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV917E_SURFACE_SET_PARAMS_COLOR_SPACE 1:0 +#define NV917E_SURFACE_SET_PARAMS_COLOR_SPACE_RGB (0x00000000) +#define NV917E_SURFACE_SET_PARAMS_COLOR_SPACE_YUV_601 (0x00000001) +#define NV917E_SURFACE_SET_PARAMS_COLOR_SPACE_YUV_709 (0x00000002) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl917e_h diff --git a/src/common/sdk/nvidia/inc/class/cl9270.h b/src/common/sdk/nvidia/inc/class/cl9270.h new file mode 100644 index 0000000..2cb750f --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9270.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 1993-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl9270.finn +// + +#define NV9270_DISPLAY (0x9270U) /* finn: Evaluated from "NV9270_ALLOCATION_PARAMETERS_MESSAGE_ID" */ + +#define NV9270_ALLOCATION_PARAMETERS_MESSAGE_ID (0x9270U) + +typedef struct NV9270_ALLOCATION_PARAMETERS { + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numDacs; // Number of DACs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NV9270_ALLOCATION_PARAMETERS; diff --git a/src/common/sdk/nvidia/inc/class/cl9271.h b/src/common/sdk/nvidia/inc/class/cl9271.h new file mode 100644 index 0000000..bbe74b0 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9271.h @@ -0,0 +1,317 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl9271_h_ +#define _cl9271_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV9271_DISP_SF_USER 0x9271 + +typedef volatile struct _cl9271_tag0 { + NvU32 dispSfUserOffset[0x400]; /* NV_PDISP_SF_USER 0x00690FFF:0x00690000 */ +} _Nv9271DispSfUser, Nv9271DispSfUserMap; + +#define NV9271_SF_HDMI_INFO_IDX_AVI_INFOFRAME 0x00000000 /* */ +#define NV9271_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME 0x00000001 /* */ +#define NV9271_SF_HDMI_INFO_IDX_GCP 0x00000003 /* */ +#define NV9271_SF_HDMI_INFO_IDX_VSI 0x00000004 /* */ +#define NV9271_SF_HDMI_INFO_CTRL(i,j) (0x00690000-0x00690000+(i)*1024+(j)*64) /* RWX4A */ +#define NV9271_SF_HDMI_INFO_CTRL__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_INFO_CTRL__SIZE_2 5 /* */ +#define NV9271_SF_HDMI_INFO_CTRL_ENABLE 0:0 /* RWIVF */ +#define NV9271_SF_HDMI_INFO_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_INFO_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_OTHER 4:4 /* RWIVF */ +#define NV9271_SF_HDMI_INFO_CTRL_OTHER_DIS 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_INFO_CTRL_OTHER_EN 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_SINGLE 8:8 /* RWIVF */ +#define NV9271_SF_HDMI_INFO_CTRL_SINGLE_DIS 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_INFO_CTRL_SINGLE_EN 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NV9271_SF_HDMI_INFO_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_CHKSUM_HW_EN 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_CHKSUM_HW_DIS 0x00000000 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NV9271_SF_HDMI_INFO_CTRL_HBLANK 12:12 /* RWIVF */ +#define NV9271_SF_HDMI_INFO_CTRL_HBLANK_DIS 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_INFO_CTRL_HBLANK_EN 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_VIDEO_FMT 16:16 /* RWIVF */ +#define NV9271_SF_HDMI_INFO_CTRL_VIDEO_FMT_SW_CONTROLLED 0x00000000 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_VIDEO_FMT_HW_CONTROLLED 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_VIDEO_FMT_INIT 0x00000001 /* RWI-V */ +#define NV9271_SF_HDMI_INFO_STATUS(i,j) (0x00690004-0x00690000+(i)*1024+(j)*64) /* R--4A */ +#define NV9271_SF_HDMI_INFO_STATUS__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_INFO_STATUS__SIZE_2 5 /* */ +#define NV9271_SF_HDMI_INFO_STATUS_SENT 0:0 /* R--VF */ +#define NV9271_SF_HDMI_INFO_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NV9271_SF_HDMI_INFO_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NV9271_SF_HDMI_INFO_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_CTRL(i) (0x00690000-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE 0:0 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER 4:4 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER_DIS 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER_EN 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE 8:8 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE_DIS 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE_EN 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_STATUS(i) (0x00690004-0x00690000+(i)*1024) /* R--4A */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_STATUS__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_STATUS_SENT 0:0 /* R--VF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER(i) (0x00690008-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER_HB0 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER_HB1 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER_HB2 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(i) (0x0069000C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(i) (0x00690010-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(i) (0x00690014-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(i) (0x00690018-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_HEADER(i) (0x00690048-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_HEADER__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_HEADER_HB0 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_HEADER_HB1 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_HEADER_HB2 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW(i) (0x0069004C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH(i) (0x00690050-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW(i) (0x00690054-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH(i) (0x00690058-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW(i) (0x0069005C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH(i) (0x00690060-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW(i) (0x00690064-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH(i) (0x00690068-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GCP_SUBPACK(i) (0x006900CC-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GCP_SUBPACK__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB0 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB0_INIT 0x00000001 /* RWI-V */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB0_SET_AVMUTE 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB0_CLR_AVMUTE 0x00000010 /* RW--V */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB1 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB1_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB2 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB2_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_HEADER(i) (0x00690108-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_HEADER__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_HEADER_HB0 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_HEADER_HB1 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_HEADER_HB2 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW(i) (0x0069010C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH(i) (0x00690110-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW(i) (0x00690114-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH(i) (0x00690118-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW(i) (0x0069011C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH(i) (0x00690120-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW(i) (0x00690124-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH(i) (0x00690128-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _cl9271_h_ diff --git a/src/common/sdk/nvidia/inc/class/cl927c.h b/src/common/sdk/nvidia/inc/class/cl927c.h new file mode 100644 index 0000000..1b78305 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl927c.h @@ -0,0 +1,299 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl927c_h_ +#define _cl927c_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV927C_BASE_CHANNEL_DMA (0x0000927C) + +#define NV_DISP_BASE_NOTIFIER_1 0x00000000 +#define NV_DISP_BASE_NOTIFIER_1_SIZEOF 0x00000004 +#define NV_DISP_BASE_NOTIFIER_1__0 0x00000000 +#define NV_DISP_BASE_NOTIFIER_1__0_PRESENTATION_COUNT 15:0 +#define NV_DISP_BASE_NOTIFIER_1__0_TIMESTAMP 29:16 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS 31:30 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_NOT_BEGUN 0x00000000 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_BEGUN 0x00000001 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_FINISHED 0x00000002 + + +#define NV_DISP_NOTIFICATION_2 0x00000000 +#define NV_DISP_NOTIFICATION_2_SIZEOF 0x00000010 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_0 0x00000000 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_0_NANOSECONDS0 31:0 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_1 0x00000001 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_1_NANOSECONDS1 31:0 +#define NV_DISP_NOTIFICATION_2_INFO32_2 0x00000002 +#define NV_DISP_NOTIFICATION_2_INFO32_2_R0 31:0 +#define NV_DISP_NOTIFICATION_2_INFO16_3 0x00000003 +#define NV_DISP_NOTIFICATION_2_INFO16_3_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFICATION_2_INFO16_3_FIELD 8:8 +#define NV_DISP_NOTIFICATION_2_INFO16_3_R1 15:9 +#define NV_DISP_NOTIFICATION_2__3_STATUS 31:16 +#define NV_DISP_NOTIFICATION_2__3_STATUS_NOT_BEGUN 0x00008000 +#define NV_DISP_NOTIFICATION_2__3_STATUS_BEGUN 0x0000FFFF +#define NV_DISP_NOTIFICATION_2__3_STATUS_FINISHED 0x00000000 + + +#define NV_DISP_NOTIFICATION_INFO16 0x00000000 +#define NV_DISP_NOTIFICATION_INFO16_SIZEOF 0x00000002 +#define NV_DISP_NOTIFICATION_INFO16__0 0x00000000 +#define NV_DISP_NOTIFICATION_INFO16__0_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFICATION_INFO16__0_FIELD 8:8 +#define NV_DISP_NOTIFICATION_INFO16__0_R1 15:9 + + +#define NV_DISP_NOTIFICATION_STATUS 0x00000000 +#define NV_DISP_NOTIFICATION_STATUS_SIZEOF 0x00000002 +#define NV_DISP_NOTIFICATION_STATUS__0 0x00000000 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS 15:0 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_NOT_BEGUN 0x00008000 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_BEGUN 0x0000FFFF +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_FINISHED 0x00000000 + + +// dma opcode instructions +#define NV927C_DMA 0x00000000 +#define NV927C_DMA_OPCODE 31:29 +#define NV927C_DMA_OPCODE_METHOD 0x00000000 +#define NV927C_DMA_OPCODE_JUMP 0x00000001 +#define NV927C_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV927C_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV927C_DMA_OPCODE 31:29 +#define NV927C_DMA_OPCODE_METHOD 0x00000000 +#define NV927C_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV927C_DMA_METHOD_COUNT 27:18 +#define NV927C_DMA_METHOD_OFFSET 11:2 +#define NV927C_DMA_DATA 31:0 +#define NV927C_DMA_DATA_NOP 0x00000000 +#define NV927C_DMA_OPCODE 31:29 +#define NV927C_DMA_OPCODE_JUMP 0x00000001 +#define NV927C_DMA_JUMP_OFFSET 11:2 +#define NV927C_DMA_OPCODE 31:29 +#define NV927C_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV927C_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV927C_PUT (0x00000000) +#define NV927C_PUT_PTR 11:2 +#define NV927C_GET (0x00000004) +#define NV927C_GET_PTR 11:2 +#define NV927C_GET_SCANLINE (0x00000010) +#define NV927C_GET_SCANLINE_LINE 15:0 +#define NV927C_UPDATE (0x00000080) +#define NV927C_UPDATE_INTERLOCK_WITH_CORE 0:0 +#define NV927C_UPDATE_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NV927C_UPDATE_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NV927C_UPDATE_SPECIAL_HANDLING 25:24 +#define NV927C_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV927C_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV927C_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV927C_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV927C_SET_PRESENT_CONTROL (0x00000084) +#define NV927C_SET_PRESENT_CONTROL_BEGIN_MODE 9:8 +#define NV927C_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000) +#define NV927C_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001) +#define NV927C_SET_PRESENT_CONTROL_BEGIN_MODE_ON_LINE (0x00000002) +#define NV927C_SET_PRESENT_CONTROL_STEREO_FLIP_MODE 3:3 +#define NV927C_SET_PRESENT_CONTROL_STEREO_FLIP_MODE_PAIR_FLIP (0x00000000) +#define NV927C_SET_PRESENT_CONTROL_STEREO_FLIP_MODE_AT_ANY_FRAME (0x00000001) +#define NV927C_SET_PRESENT_CONTROL_TIMESTAMP_MODE 2:2 +#define NV927C_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000) +#define NV927C_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001) +#define NV927C_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 7:4 +#define NV927C_SET_PRESENT_CONTROL_BEGIN_LINE 30:16 +#define NV927C_SET_PRESENT_CONTROL_ON_LINE_MARGIN 15:10 +#define NV927C_SET_PRESENT_CONTROL_MODE 1:0 +#define NV927C_SET_PRESENT_CONTROL_MODE_MONO (0x00000000) +#define NV927C_SET_PRESENT_CONTROL_MODE_STEREO (0x00000001) +#define NV927C_SET_PRESENT_CONTROL_MODE_SPEC_FLIP (0x00000002) +#define NV927C_SET_SEMAPHORE_CONTROL (0x00000088) +#define NV927C_SET_SEMAPHORE_CONTROL_OFFSET 11:2 +#define NV927C_SET_SEMAPHORE_CONTROL_DELAY 26:26 +#define NV927C_SET_SEMAPHORE_CONTROL_DELAY_DISABLE (0x00000000) +#define NV927C_SET_SEMAPHORE_CONTROL_DELAY_ENABLE (0x00000001) +#define NV927C_SET_SEMAPHORE_CONTROL_FORMAT 28:28 +#define NV927C_SET_SEMAPHORE_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV927C_SET_SEMAPHORE_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV927C_SET_SEMAPHORE_ACQUIRE (0x0000008C) +#define NV927C_SET_SEMAPHORE_ACQUIRE_VALUE 31:0 +#define NV927C_SET_SEMAPHORE_RELEASE (0x00000090) +#define NV927C_SET_SEMAPHORE_RELEASE_VALUE 31:0 +#define NV927C_SET_CONTEXT_DMA_SEMAPHORE (0x00000094) +#define NV927C_SET_CONTEXT_DMA_SEMAPHORE_HANDLE 31:0 +#define NV927C_SET_NOTIFIER_CONTROL (0x000000A0) +#define NV927C_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV927C_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV927C_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV927C_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV927C_SET_NOTIFIER_CONTROL_DELAY 26:26 +#define NV927C_SET_NOTIFIER_CONTROL_DELAY_DISABLE (0x00000000) +#define NV927C_SET_NOTIFIER_CONTROL_DELAY_ENABLE (0x00000001) +#define NV927C_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV927C_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV927C_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV927C_SET_CONTEXT_DMA_NOTIFIER (0x000000A4) +#define NV927C_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV927C_SET_CONTEXT_DMAS_ISO(b) (0x000000C0 + (b)*0x00000004) +#define NV927C_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV927C_SET_BASE_LUT_LO (0x000000E0) +#define NV927C_SET_BASE_LUT_LO_ENABLE 31:30 +#define NV927C_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV927C_SET_BASE_LUT_LO_ENABLE_USE_CORE_LUT (0x00000001) +#define NV927C_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000002) +#define NV927C_SET_BASE_LUT_LO_MODE 27:24 +#define NV927C_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV927C_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV927C_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV927C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV927C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV927C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV927C_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV927C_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV927C_SET_BASE_LUT_HI (0x000000E4) +#define NV927C_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV927C_SET_OUTPUT_LUT_LO (0x000000E8) +#define NV927C_SET_OUTPUT_LUT_LO_ENABLE 31:30 +#define NV927C_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV927C_SET_OUTPUT_LUT_LO_ENABLE_USE_CORE_LUT (0x00000001) +#define NV927C_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000002) +#define NV927C_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV927C_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV927C_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV927C_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV927C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV927C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV927C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV927C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV927C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV927C_SET_OUTPUT_LUT_HI (0x000000EC) +#define NV927C_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV927C_SET_CONTEXT_DMA_LUT (0x000000FC) +#define NV927C_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV927C_SET_PROCESSING (0x00000110) +#define NV927C_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV927C_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV927C_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV927C_SET_CONVERSION_RED (0x00000114) +#define NV927C_SET_CONVERSION_RED_GAIN 15:0 +#define NV927C_SET_CONVERSION_RED_OFS 31:16 +#define NV927C_SET_CONVERSION_GRN (0x00000118) +#define NV927C_SET_CONVERSION_GRN_GAIN 15:0 +#define NV927C_SET_CONVERSION_GRN_OFS 31:16 +#define NV927C_SET_CONVERSION_BLU (0x0000011C) +#define NV927C_SET_CONVERSION_BLU_GAIN 15:0 +#define NV927C_SET_CONVERSION_BLU_OFS 31:16 +#define NV927C_SET_TIMESTAMP_ORIGIN_LO (0x00000130) +#define NV927C_SET_TIMESTAMP_ORIGIN_LO_TIMESTAMP_LO 31:0 +#define NV927C_SET_TIMESTAMP_ORIGIN_HI (0x00000134) +#define NV927C_SET_TIMESTAMP_ORIGIN_HI_TIMESTAMP_HI 31:0 +#define NV927C_SET_UPDATE_TIMESTAMP_LO (0x00000138) +#define NV927C_SET_UPDATE_TIMESTAMP_LO_TIMESTAMP_LO 31:0 +#define NV927C_SET_UPDATE_TIMESTAMP_HI (0x0000013C) +#define NV927C_SET_UPDATE_TIMESTAMP_HI_TIMESTAMP_HI 31:0 +#define NV927C_SET_CSC_RED2RED (0x00000140) +#define NV927C_SET_CSC_RED2RED_OWNER 31:31 +#define NV927C_SET_CSC_RED2RED_OWNER_CORE (0x00000000) +#define NV927C_SET_CSC_RED2RED_OWNER_BASE (0x00000001) +#define NV927C_SET_CSC_RED2RED_COEFF 18:0 +#define NV927C_SET_CSC_GRN2RED (0x00000144) +#define NV927C_SET_CSC_GRN2RED_COEFF 18:0 +#define NV927C_SET_CSC_BLU2RED (0x00000148) +#define NV927C_SET_CSC_BLU2RED_COEFF 18:0 +#define NV927C_SET_CSC_CONSTANT2RED (0x0000014C) +#define NV927C_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV927C_SET_CSC_RED2GRN (0x00000150) +#define NV927C_SET_CSC_RED2GRN_COEFF 18:0 +#define NV927C_SET_CSC_GRN2GRN (0x00000154) +#define NV927C_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV927C_SET_CSC_BLU2GRN (0x00000158) +#define NV927C_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV927C_SET_CSC_CONSTANT2GRN (0x0000015C) +#define NV927C_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV927C_SET_CSC_RED2BLU (0x00000160) +#define NV927C_SET_CSC_RED2BLU_COEFF 18:0 +#define NV927C_SET_CSC_GRN2BLU (0x00000164) +#define NV927C_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV927C_SET_CSC_BLU2BLU (0x00000168) +#define NV927C_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV927C_SET_CSC_CONSTANT2BLU (0x0000016C) +#define NV927C_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV927C_SET_SPARE (0x000003BC) +#define NV927C_SET_SPARE_UNUSED 31:0 +#define NV927C_SET_SPARE_NOOP(b) (0x000003C0 + (b)*0x00000004) +#define NV927C_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV927C_SURFACE_SET_OFFSET(a,b) (0x00000400 + (a)*0x00000020 + (b)*0x00000004) +#define NV927C_SURFACE_SET_OFFSET_ORIGIN 31:0 +#define NV927C_SURFACE_SET_SIZE(a) (0x00000408 + (a)*0x00000020) +#define NV927C_SURFACE_SET_SIZE_WIDTH 15:0 +#define NV927C_SURFACE_SET_SIZE_HEIGHT 31:16 +#define NV927C_SURFACE_SET_STORAGE(a) (0x0000040C + (a)*0x00000020) +#define NV927C_SURFACE_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV927C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV927C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV927C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV927C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV927C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV927C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV927C_SURFACE_SET_STORAGE_PITCH 20:8 +#define NV927C_SURFACE_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV927C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV927C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV927C_SURFACE_SET_PARAMS(a) (0x00000410 + (a)*0x00000020) +#define NV927C_SURFACE_SET_PARAMS_FORMAT 15:8 +#define NV927C_SURFACE_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV927C_SURFACE_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV927C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV927C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV927C_SURFACE_SET_PARAMS_GAMMA 2:2 +#define NV927C_SURFACE_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV927C_SURFACE_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV927C_SURFACE_SET_PARAMS_LAYOUT 5:4 +#define NV927C_SURFACE_SET_PARAMS_LAYOUT_FRM (0x00000000) +#define NV927C_SURFACE_SET_PARAMS_LAYOUT_FLD1 (0x00000001) +#define NV927C_SURFACE_SET_PARAMS_LAYOUT_FLD2 (0x00000002) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl927c_h diff --git a/src/common/sdk/nvidia/inc/class/cl927d.h b/src/common/sdk/nvidia/inc/class/cl927d.h new file mode 100644 index 0000000..df45fec --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl927d.h @@ -0,0 +1,1556 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl927d_h_ +#define _cl927d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV927D_CORE_CHANNEL_DMA (0x0000927D) + +#define NV927D_CORE_NOTIFIER_3 0x00000000 +#define NV927D_CORE_NOTIFIER_3_SIZEOF 0x00000150 +#define NV927D_CORE_NOTIFIER_3_COMPLETION_0 0x00000000 +#define NV927D_CORE_NOTIFIER_3_COMPLETION_0_DONE 0:0 +#define NV927D_CORE_NOTIFIER_3_COMPLETION_0_DONE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_COMPLETION_0_R0 15:1 +#define NV927D_CORE_NOTIFIER_3_COMPLETION_0_TIMESTAMP 29:16 +#define NV927D_CORE_NOTIFIER_3__1 0x00000001 +#define NV927D_CORE_NOTIFIER_3__1_R1 31:0 +#define NV927D_CORE_NOTIFIER_3__2 0x00000002 +#define NV927D_CORE_NOTIFIER_3__2_R2 31:0 +#define NV927D_CORE_NOTIFIER_3__3 0x00000003 +#define NV927D_CORE_NOTIFIER_3__3_R3 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_R0 19:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA 20:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_R1 29:21 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE 30:30 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5 0x00000005 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE 3:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE 11:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE 15:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE 19:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE 27:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE 31:28 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6 0x00000006 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE 3:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE 11:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE 15:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE 19:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE 27:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE 31:28 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_7 0x00000007 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_7_DISPCLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_7_R4 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_8 0x00000008 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_8_R5 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_9 0x00000009 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_9_R6 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_10 0x0000000A +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_10_R7 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_11 0x0000000B +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_11_R8 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12 0x0000000C +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_R0 31:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13 0x0000000D +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_CRT_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_R1 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14 0x0000000E +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_R0 31:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15 0x0000000F +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_CRT_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_R1 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16 0x00000010 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_R0 31:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17 0x00000011 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_CRT_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_R1 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18 0x00000012 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_R0 31:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19 0x00000013 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_CRT_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_R1 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20 0x00000014 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21 0x00000015 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22 0x00000016 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23 0x00000017 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24 0x00000018 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25 0x00000019 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26 0x0000001A +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27 0x0000001B +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28 0x0000001C +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29 0x0000001D +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30 0x0000001E +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31 0x0000001F +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32 0x00000020 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33 0x00000021 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34 0x00000022 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35 0x00000023 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36 0x00000024 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_R0 31:7 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37 0x00000025 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_EXT_ENC_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R1 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R2 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38 0x00000026 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_R0 31:7 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39 0x00000027 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_EXT_ENC_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R1 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R2 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40 0x00000028 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_R0 31:7 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41 0x00000029 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_EXT_ENC_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R1 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R2 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42 0x0000002A +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_R0 31:7 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43 0x0000002B +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_EXT_ENC_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R1 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R2 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44 0x0000002C +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45 0x0000002D +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45_R1 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46 0x0000002E +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47 0x0000002F +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47_R1 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48 0x00000030 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49 0x00000031 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49_R1 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50 0x00000032 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51 0x00000033 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51_R1 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52 0x00000034 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53 0x00000035 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R1 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R2 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54 0x00000036 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R3 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R4 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55 0x00000037 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R5 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R6 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56 0x00000038 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_PCLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_R7 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57 0x00000039 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57_R8 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58 0x0000003A +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58_R9 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59 0x0000003B +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59_R10 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60 0x0000003C +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61 0x0000003D +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R1 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R2 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62 0x0000003E +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R3 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R4 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63 0x0000003F +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R5 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R6 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64 0x00000040 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_PCLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_R7 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65 0x00000041 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65_R8 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66 0x00000042 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66_R9 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67 0x00000043 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67_R10 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68 0x00000044 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69 0x00000045 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R1 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R2 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70 0x00000046 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R3 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R4 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71 0x00000047 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R5 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R6 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72 0x00000048 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_PCLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_R7 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73 0x00000049 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73_R8 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74 0x0000004A +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74_R9 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75 0x0000004B +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75_R10 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76 0x0000004C +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77 0x0000004D +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R1 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R2 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78 0x0000004E +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R3 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R4 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79 0x0000004F +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R5 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R6 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80 0x00000050 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_PCLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_R7 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81 0x00000051 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81_R8 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82 0x00000052 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82_R9 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83 0x00000053 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83_R10 31:0 + + +// dma opcode instructions +#define NV927D_DMA 0x00000000 +#define NV927D_DMA_OPCODE 31:29 +#define NV927D_DMA_OPCODE_METHOD 0x00000000 +#define NV927D_DMA_OPCODE_JUMP 0x00000001 +#define NV927D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV927D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV927D_DMA_OPCODE 31:29 +#define NV927D_DMA_OPCODE_METHOD 0x00000000 +#define NV927D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV927D_DMA_METHOD_COUNT 27:18 +#define NV927D_DMA_METHOD_OFFSET 11:2 +#define NV927D_DMA_DATA 31:0 +#define NV927D_DMA_DATA_NOP 0x00000000 +#define NV927D_DMA_OPCODE 31:29 +#define NV927D_DMA_OPCODE_JUMP 0x00000001 +#define NV927D_DMA_JUMP_OFFSET 11:2 +#define NV927D_DMA_OPCODE 31:29 +#define NV927D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV927D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV927D_PUT (0x00000000) +#define NV927D_PUT_PTR 11:2 +#define NV927D_GET (0x00000004) +#define NV927D_GET_PTR 11:2 +#define NV927D_UPDATE (0x00000080) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR(i) (0 +(i)*4):(0 +(i)*4) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR__SIZE_1 4 +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR0 0:0 +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR1 4:4 +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR2 8:8 +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR3 12:12 +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE(i) (1 +(i)*4):(1 +(i)*4) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE__SIZE_1 4 +#define NV927D_UPDATE_INTERLOCK_WITH_BASE_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE0 1:1 +#define NV927D_UPDATE_INTERLOCK_WITH_BASE0_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE0_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE1 5:5 +#define NV927D_UPDATE_INTERLOCK_WITH_BASE1_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE1_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE2 9:9 +#define NV927D_UPDATE_INTERLOCK_WITH_BASE2_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE2_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE3 13:13 +#define NV927D_UPDATE_INTERLOCK_WITH_BASE3_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE3_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY(i) (2 +(i)*4):(2 +(i)*4) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY__SIZE_1 4 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY0 2:2 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY0_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY0_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY1 6:6 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY1_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY1_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY2 10:10 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY2_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY2_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY3 14:14 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY3_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY3_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM(i) (3 +(i)*4):(3 +(i)*4) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM__SIZE_1 4 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0 3:3 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1 7:7 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2 11:11 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3 15:15 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_ENABLE (0x00000001) +#define NV927D_UPDATE_SPECIAL_HANDLING 25:24 +#define NV927D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV927D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV927D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV927D_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV927D_UPDATE_NOT_DRIVER_FRIENDLY 31:31 +#define NV927D_UPDATE_NOT_DRIVER_FRIENDLY_FALSE (0x00000000) +#define NV927D_UPDATE_NOT_DRIVER_FRIENDLY_TRUE (0x00000001) +#define NV927D_UPDATE_NOT_DRIVER_UNFRIENDLY 30:30 +#define NV927D_UPDATE_NOT_DRIVER_UNFRIENDLY_FALSE (0x00000000) +#define NV927D_UPDATE_NOT_DRIVER_UNFRIENDLY_TRUE (0x00000001) +#define NV927D_UPDATE_INHIBIT_INTERRUPTS 29:29 +#define NV927D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NV927D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NV927D_SET_NOTIFIER_CONTROL (0x00000084) +#define NV927D_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV927D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV927D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV927D_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV927D_SET_NOTIFIER_CONTROL_NOTIFY 31:31 +#define NV927D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NV927D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NV927D_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV927D_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV927D_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV927D_SET_CONTEXT_DMA_NOTIFIER (0x00000088) +#define NV927D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV927D_GET_CAPABILITIES (0x0000008C) +#define NV927D_GET_CAPABILITIES_DUMMY 31:0 +#define NV927D_SET_SPARE (0x0000016C) +#define NV927D_SET_SPARE_UNUSED 31:0 +#define NV927D_SET_SPARE_NOOP(b) (0x00000170 + (b)*0x00000004) +#define NV927D_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV927D_DAC_SET_CONTROL(a) (0x00000180 + (a)*0x00000020) +#define NV927D_DAC_SET_CONTROL_OWNER_MASK 3:0 +#define NV927D_DAC_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV927D_DAC_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV927D_DAC_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV927D_DAC_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV927D_DAC_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV927D_DAC_SET_CONTROL_PROTOCOL 12:8 +#define NV927D_DAC_SET_CONTROL_PROTOCOL_RGB_CRT (0x00000000) +#define NV927D_DAC_SET_CONTROL_PROTOCOL_YUV_CRT (0x00000013) +#define NV927D_DAC_SET_SW_SPARE_A(a) (0x00000184 + (a)*0x00000020) +#define NV927D_DAC_SET_SW_SPARE_A_CODE 31:0 +#define NV927D_DAC_SET_SW_SPARE_B(a) (0x00000188 + (a)*0x00000020) +#define NV927D_DAC_SET_SW_SPARE_B_CODE 31:0 +#define NV927D_DAC_SET_CUSTOM_REASON(a) (0x00000190 + (a)*0x00000020) +#define NV927D_DAC_SET_CUSTOM_REASON_CODE 31:0 + +#define NV927D_SOR_SET_CONTROL(a) (0x00000200 + (a)*0x00000020) +#define NV927D_SOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV927D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV927D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV927D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV927D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV927D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV927D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NV927D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NV927D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NV927D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NV927D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NV927D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NV927D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NV927D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NV927D_SOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV927D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV927D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV927D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NV927D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NV927D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NV927D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NV927D_SOR_SET_SW_SPARE_A(a) (0x00000204 + (a)*0x00000020) +#define NV927D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NV927D_SOR_SET_SW_SPARE_B(a) (0x00000208 + (a)*0x00000020) +#define NV927D_SOR_SET_SW_SPARE_B_CODE 31:0 +#define NV927D_SOR_SET_CUSTOM_REASON(a) (0x00000210 + (a)*0x00000020) +#define NV927D_SOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV927D_PIOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NV927D_PIOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV927D_PIOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV927D_PIOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV927D_PIOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV927D_PIOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV927D_PIOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV927D_PIOR_SET_CONTROL_PROTOCOL 11:8 +#define NV927D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC (0x00000000) +#define NV927D_PIOR_SET_CONTROL_PROTOCOL_EXT_TV_ENC (0x00000001) +#define NV927D_PIOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV927D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV927D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV927D_PIOR_SET_SW_SPARE_A(a) (0x00000304 + (a)*0x00000020) +#define NV927D_PIOR_SET_SW_SPARE_A_CODE 31:0 +#define NV927D_PIOR_SET_SW_SPARE_B(a) (0x00000308 + (a)*0x00000020) +#define NV927D_PIOR_SET_SW_SPARE_B_CODE 31:0 +#define NV927D_PIOR_SET_CUSTOM_REASON(a) (0x00000310 + (a)*0x00000020) +#define NV927D_PIOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV927D_HEAD_SET_PRESENT_CONTROL(a) (0x00000400 + (a)*0x00000300) +#define NV927D_HEAD_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NV927D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 8:8 +#define NV927D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NV927D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NV927D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00000404 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 3:3 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 4:4 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 9:6 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_DEFAULT (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000003) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000004) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000005) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000006) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000007) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000008) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000009) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 12:12 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 20:13 +#define NV927D_HEAD_SET_CONTROL(a) (0x00000408 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONTROL_STRUCTURE 0:0 +#define NV927D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_STRUCTURE_INTERLACED (0x00000001) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 3:2 +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 19:15 +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 7:4 +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 9:8 +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 14:10 +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK 1:1 +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN 24:20 +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN 29:25 +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000003) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000004) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000005) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000006) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000007) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000008) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x00000009) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000A) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000B) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000C) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000D) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000E) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x0000000F) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_UNSPECIFIED (0x00000010) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV927D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NV927D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NV927D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_OVERSCAN_COLOR(a) (0x00000410 + (a)*0x00000300) +#define NV927D_HEAD_SET_OVERSCAN_COLOR_RED 9:0 +#define NV927D_HEAD_SET_OVERSCAN_COLOR_GRN 19:10 +#define NV927D_HEAD_SET_OVERSCAN_COLOR_BLU 29:20 +#define NV927D_HEAD_SET_RASTER_SIZE(a) (0x00000414 + (a)*0x00000300) +#define NV927D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NV927D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NV927D_HEAD_SET_RASTER_SYNC_END(a) (0x00000418 + (a)*0x00000300) +#define NV927D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NV927D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NV927D_HEAD_SET_RASTER_BLANK_END(a) (0x0000041C + (a)*0x00000300) +#define NV927D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NV927D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NV927D_HEAD_SET_RASTER_BLANK_START(a) (0x00000420 + (a)*0x00000300) +#define NV927D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NV927D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NV927D_HEAD_SET_RASTER_VERT_BLANK2(a) (0x00000424 + (a)*0x00000300) +#define NV927D_HEAD_SET_RASTER_VERT_BLANK2_YSTART 14:0 +#define NV927D_HEAD_SET_RASTER_VERT_BLANK2_YEND 30:16 +#define NV927D_HEAD_SET_LOCK_CHAIN(a) (0x00000428 + (a)*0x00000300) +#define NV927D_HEAD_SET_LOCK_CHAIN_POSITION 27:24 +#define NV927D_HEAD_SET_DEFAULT_BASE_COLOR(a) (0x0000042C + (a)*0x00000300) +#define NV927D_HEAD_SET_DEFAULT_BASE_COLOR_RED 9:0 +#define NV927D_HEAD_SET_DEFAULT_BASE_COLOR_GREEN 19:10 +#define NV927D_HEAD_SET_DEFAULT_BASE_COLOR_BLUE 29:20 +#define NV927D_HEAD_SET_CRC_CONTROL(a) (0x00000430 + (a)*0x00000300) +#define NV927D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 1:0 +#define NV927D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000000) +#define NV927D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_BASE (0x00000001) +#define NV927D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_OVERLAY (0x00000002) +#define NV927D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 2:2 +#define NV927D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NV927D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NV927D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE 3:3 +#define NV927D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_FALSE (0x00000000) +#define NV927D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_TRUE (0x00000001) +#define NV927D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE 4:4 +#define NV927D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_FALSE (0x00000000) +#define NV927D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_TRUE (0x00000001) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT 19:8 +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC__SIZE_1 4 +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC0 (0x00000FF0) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC1 (0x00000FF1) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC2 (0x00000FF2) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC3 (0x00000FF3) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG__SIZE_1 4 +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG0 (0x00000FF8) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG1 (0x00000FF9) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG2 (0x00000FFA) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG3 (0x00000FFB) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR__SIZE_1 8 +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR0 (0x00000F0F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR1 (0x00000F1F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR2 (0x00000F2F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR3 (0x00000F3F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR4 (0x00000F4F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR5 (0x00000F5F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR6 (0x00000F6F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR7 (0x00000F7F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF__SIZE_1 4 +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF0 (0x00000F8F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF1 (0x00000F9F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF2 (0x00000FAF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF3 (0x00000FBF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR__SIZE_1 8 +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR0 (0x000000FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR1 (0x000001FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR2 (0x000002FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR3 (0x000003FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR4 (0x000004FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR5 (0x000005FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR6 (0x000006FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR7 (0x000007FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_NONE (0x00000FFF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT 31:20 +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC__SIZE_1 4 +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC0 (0x00000FF0) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC1 (0x00000FF1) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC2 (0x00000FF2) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC3 (0x00000FF3) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG__SIZE_1 4 +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG0 (0x00000FF8) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG1 (0x00000FF9) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG2 (0x00000FFA) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG3 (0x00000FFB) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR__SIZE_1 8 +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR0 (0x00000F0F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR1 (0x00000F1F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR2 (0x00000F2F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR3 (0x00000F3F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR4 (0x00000F4F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR5 (0x00000F5F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR6 (0x00000F6F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR7 (0x00000F7F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF__SIZE_1 4 +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF0 (0x00000F8F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF1 (0x00000F9F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF2 (0x00000FAF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF3 (0x00000FBF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR__SIZE_1 8 +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR0 (0x000000FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR1 (0x000001FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR2 (0x000002FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR3 (0x000003FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR4 (0x000004FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR5 (0x000005FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR6 (0x000006FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR7 (0x000007FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_NONE (0x00000FFF) +#define NV927D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 5:5 +#define NV927D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC 6:6 +#define NV927D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00000438 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NV927D_HEAD_SET_BASE_LUT_LO(a) (0x00000440 + (a)*0x00000300) +#define NV927D_HEAD_SET_BASE_LUT_LO_ENABLE 31:31 +#define NV927D_HEAD_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE 27:24 +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV927D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV927D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_BASE_LUT_HI(a) (0x00000444 + (a)*0x00000300) +#define NV927D_HEAD_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV927D_HEAD_SET_OUTPUT_LUT_LO(a) (0x00000448 + (a)*0x00000300) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_ENABLE 31:31 +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_OUTPUT_LUT_HI(a) (0x0000044C + (a)*0x00000300) +#define NV927D_HEAD_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x00000450 + (a)*0x00000300) +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x00000454 + (a)*0x00000300) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE 21:20 +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_25 (0x00000000) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_28 (0x00000001) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_CUSTOM (0x00000002) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 24:24 +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING 25:25 +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_FALSE (0x00000000) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_TRUE (0x00000001) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 26:26 +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00000458 + (a)*0x00000300) +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NV927D_HEAD_SET_CONTEXT_DMA_LUT(a) (0x0000045C + (a)*0x00000300) +#define NV927D_HEAD_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV927D_HEAD_SET_OFFSET(a) (0x00000460 + (a)*0x00000300) +#define NV927D_HEAD_SET_OFFSET_ORIGIN 31:0 +#define NV927D_HEAD_SET_SIZE(a) (0x00000468 + (a)*0x00000300) +#define NV927D_HEAD_SET_SIZE_WIDTH 15:0 +#define NV927D_HEAD_SET_SIZE_HEIGHT 31:16 +#define NV927D_HEAD_SET_STORAGE(a) (0x0000046C + (a)*0x00000300) +#define NV927D_HEAD_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV927D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV927D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV927D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV927D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV927D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV927D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV927D_HEAD_SET_STORAGE_PITCH 20:8 +#define NV927D_HEAD_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV927D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV927D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV927D_HEAD_SET_PARAMS(a) (0x00000470 + (a)*0x00000300) +#define NV927D_HEAD_SET_PARAMS_FORMAT 15:8 +#define NV927D_HEAD_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV927D_HEAD_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV927D_HEAD_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV927D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV927D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV927D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV927D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV927D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NV927D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV927D_HEAD_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV927D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV927D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV927D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV927D_HEAD_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV927D_HEAD_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV927D_HEAD_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV927D_HEAD_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV927D_HEAD_SET_PARAMS_GAMMA 2:2 +#define NV927D_HEAD_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV927D_HEAD_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV927D_HEAD_SET_CONTEXT_DMAS_ISO(a) (0x00000474 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV927D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x0000047C + (a)*0x00000300) +#define NV927D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 1:0 +#define NV927D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NV927D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NV927D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_SPEC_FLIP (0x00000002) +#define NV927D_HEAD_SET_CONTROL_CURSOR(a) (0x00000480 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NV927D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CONTROL_CURSOR_FORMAT 25:24 +#define NV927D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_CURSOR_SIZE 27:26 +#define NV927D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NV927D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 15:8 +#define NV927D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 23:16 +#define NV927D_HEAD_SET_CONTROL_CURSOR_COMPOSITION 29:28 +#define NV927D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000) +#define NV927D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001) +#define NV927D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002) +#define NV927D_HEAD_SET_OFFSETS_CURSOR(a,b) (0x00000484 + (a)*0x00000300 + (b)*0x00000004) +#define NV927D_HEAD_SET_OFFSETS_CURSOR_ORIGIN 31:0 +#define NV927D_HEAD_SET_CONTEXT_DMAS_CURSOR(a,b) (0x0000048C + (a)*0x00000300 + (b)*0x00000004) +#define NV927D_HEAD_SET_CONTEXT_DMAS_CURSOR_HANDLE 31:0 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00000494 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3_ADAPTIVE (0x00000003) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 4:3 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_HRESPONSE_BIAS 23:16 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_VRESPONSE_BIAS 31:24 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422 8:8 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_ENABLE (0x00000001) +#define NV927D_HEAD_SET_PROCAMP(a) (0x00000498 + (a)*0x00000300) +#define NV927D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NV927D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NV927D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NV927D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NV927D_HEAD_SET_PROCAMP_CHROMA_LPF 2:2 +#define NV927D_HEAD_SET_PROCAMP_CHROMA_LPF_AUTO (0x00000000) +#define NV927D_HEAD_SET_PROCAMP_CHROMA_LPF_ON (0x00000001) +#define NV927D_HEAD_SET_PROCAMP_SAT_COS 19:8 +#define NV927D_HEAD_SET_PROCAMP_SAT_SINE 31:20 +#define NV927D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 5:5 +#define NV927D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NV927D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NV927D_HEAD_SET_PROCAMP_RANGE_COMPRESSION 6:6 +#define NV927D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE (0x00000000) +#define NV927D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE (0x00000001) +#define NV927D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300) +#define NV927D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NV927D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_DITHER_CONTROL_BITS 2:1 +#define NV927D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS (0x00000000) +#define NV927D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS (0x00000001) +#define NV927D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_10_BITS (0x00000002) +#define NV927D_HEAD_SET_DITHER_CONTROL_MODE 6:3 +#define NV927D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NV927D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NV927D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NV927D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NV927D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NV927D_HEAD_SET_DITHER_CONTROL_PHASE 8:7 +#define NV927D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x000004B0 + (a)*0x00000300) +#define NV927D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NV927D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x000004B8 + (a)*0x00000300) +#define NV927D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NV927D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x000004BC + (a)*0x00000300) +#define NV927D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NV927D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x000004C0 + (a)*0x00000300) +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(a) (0x000004C4 + (a)*0x00000300) +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_WIDTH 14:0 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_HEIGHT 30:16 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX(a) (0x000004C8 + (a)*0x00000300) +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_WIDTH 14:0 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_HEIGHT 30:16 +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a) (0x000004D0 + (a)*0x00000300) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE 0:0 +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8 (0x00000000) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE 13:12 +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT 17:16 +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_NONE (0x00000000) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_257 (0x00000001) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_1025 (0x00000002) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT 21:20 +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS(a) (0x000004D4 + (a)*0x00000300) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE 0:0 +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT 13:12 +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_NONE (0x00000000) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_257 (0x00000001) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_1025 (0x00000002) +#define NV927D_HEAD_SET_PROCESSING(a) (0x000004E0 + (a)*0x00000300) +#define NV927D_HEAD_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV927D_HEAD_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV927D_HEAD_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CONVERSION_RED(a) (0x000004E4 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONVERSION_RED_GAIN 15:0 +#define NV927D_HEAD_SET_CONVERSION_RED_OFS 31:16 +#define NV927D_HEAD_SET_CONVERSION_GRN(a) (0x000004E8 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONVERSION_GRN_GAIN 15:0 +#define NV927D_HEAD_SET_CONVERSION_GRN_OFS 31:16 +#define NV927D_HEAD_SET_CONVERSION_BLU(a) (0x000004EC + (a)*0x00000300) +#define NV927D_HEAD_SET_CONVERSION_BLU_GAIN 15:0 +#define NV927D_HEAD_SET_CONVERSION_BLU_OFS 31:16 +#define NV927D_HEAD_SET_CSC_RED2RED(a) (0x000004F0 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE 31:31 +#define NV927D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CSC_RED2RED_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_GRN2RED(a) (0x000004F4 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_GRN2RED_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_BLU2RED(a) (0x000004F8 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_BLU2RED_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_CONSTANT2RED(a) (0x000004FC + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_RED2GRN(a) (0x00000500 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_RED2GRN_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_GRN2GRN(a) (0x00000504 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_BLU2GRN(a) (0x00000508 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_CONSTANT2GRN(a) (0x0000050C + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_RED2BLU(a) (0x00000510 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_RED2BLU_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_GRN2BLU(a) (0x00000514 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_BLU2BLU(a) (0x00000518 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_CONSTANT2BLU(a) (0x0000051C + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV927D_HEAD_SET_HDMI_CTRL(a) (0x00000520 + (a)*0x00000300) +#define NV927D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NV927D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NV927D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NV927D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NV927D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE 15:12 +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000000) +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FIELD_ALTERNATIVE (0x00000001) +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_LINE_ALTERNATIVE (0x00000002) +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_FULL (0x00000003) +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH (0x00000004) +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH_GRAPHICS (0x00000005) +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_TOP_AND_BOTTOM (0x00000006) +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_HALF (0x00000008) +#define NV927D_HEAD_SET_VACTIVE_SPACE_COLOR(a) (0x00000524 + (a)*0x00000300) +#define NV927D_HEAD_SET_VACTIVE_SPACE_COLOR_RED_CR 9:0 +#define NV927D_HEAD_SET_VACTIVE_SPACE_COLOR_GRN_Y 19:10 +#define NV927D_HEAD_SET_VACTIVE_SPACE_COLOR_BLU_CB 29:20 +#define NV927D_HEAD_SET_DISPLAY_ID(a,b) (0x0000052C + (a)*0x00000300 + (b)*0x00000004) +#define NV927D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NV927D_HEAD_SET_SW_SPARE_A(a) (0x0000054C + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NV927D_HEAD_SET_SW_SPARE_B(a) (0x00000550 + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NV927D_HEAD_SET_SW_SPARE_C(a) (0x00000554 + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NV927D_HEAD_SET_SW_SPARE_D(a) (0x00000558 + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NV927D_HEAD_SET_GET_BLANKING_CTRL(a) (0x0000055C + (a)*0x00000300) +#define NV927D_HEAD_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NV927D_HEAD_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NV927D_HEAD_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NV927D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NV927D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NV927D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_A(a) (0x000006D0 + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_A_UNUSED 31:0 +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_B(a) (0x000006D4 + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_B_UNUSED 31:0 +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_C(a) (0x000006D8 + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_C_UNUSED 31:0 +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_D(a) (0x000006DC + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_D_UNUSED 31:0 +#define NV927D_HEAD_SET_SPARE(a) (0x000006EC + (a)*0x00000300) +#define NV927D_HEAD_SET_SPARE_UNUSED 31:0 +#define NV927D_HEAD_SET_SPARE_NOOP(a,b) (0x000006F0 + (a)*0x00000300 + (b)*0x00000004) +#define NV927D_HEAD_SET_SPARE_NOOP_UNUSED 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl927d_h diff --git a/src/common/sdk/nvidia/inc/class/cl9470.h b/src/common/sdk/nvidia/inc/class/cl9470.h new file mode 100644 index 0000000..6b0c829 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9470.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 1993-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl9470.finn +// + +#define NV9470_DISPLAY (0x9470U) /* finn: Evaluated from "NV9470_ALLOCATION_PARAMETERS_MESSAGE_ID" */ + +#define NV9470_ALLOCATION_PARAMETERS_MESSAGE_ID (0x9470U) + +typedef struct NV9470_ALLOCATION_PARAMETERS { + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numDacs; // Number of DACs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NV9470_ALLOCATION_PARAMETERS; diff --git a/src/common/sdk/nvidia/inc/class/cl9471.h b/src/common/sdk/nvidia/inc/class/cl9471.h new file mode 100644 index 0000000..14086ca --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9471.h @@ -0,0 +1,317 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl9471_h_ +#define _cl9471_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV9471_DISP_SF_USER 0x9471 + +typedef volatile struct _cl9471_tag0 { + NvU32 dispSfUserOffset[0x400]; /* NV_PDISP_SF_USER 0x00690FFF:0x00690000 */ +} _Nv9471DispSfUser, Nv9471DispSfUserMap; + +#define NV9471_SF_HDMI_INFO_IDX_AVI_INFOFRAME 0x00000000 /* */ +#define NV9471_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME 0x00000001 /* */ +#define NV9471_SF_HDMI_INFO_IDX_GCP 0x00000003 /* */ +#define NV9471_SF_HDMI_INFO_IDX_VSI 0x00000004 /* */ +#define NV9471_SF_HDMI_INFO_CTRL(i,j) (0x00690000-0x00690000+(i)*1024+(j)*64) /* RWX4A */ +#define NV9471_SF_HDMI_INFO_CTRL__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_INFO_CTRL__SIZE_2 5 /* */ +#define NV9471_SF_HDMI_INFO_CTRL_ENABLE 0:0 /* RWIVF */ +#define NV9471_SF_HDMI_INFO_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_INFO_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_OTHER 4:4 /* RWIVF */ +#define NV9471_SF_HDMI_INFO_CTRL_OTHER_DIS 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_INFO_CTRL_OTHER_EN 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_SINGLE 8:8 /* RWIVF */ +#define NV9471_SF_HDMI_INFO_CTRL_SINGLE_DIS 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_INFO_CTRL_SINGLE_EN 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NV9471_SF_HDMI_INFO_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_CHKSUM_HW_EN 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_CHKSUM_HW_DIS 0x00000000 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NV9471_SF_HDMI_INFO_CTRL_HBLANK 12:12 /* RWIVF */ +#define NV9471_SF_HDMI_INFO_CTRL_HBLANK_DIS 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_INFO_CTRL_HBLANK_EN 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_VIDEO_FMT 16:16 /* RWIVF */ +#define NV9471_SF_HDMI_INFO_CTRL_VIDEO_FMT_SW_CONTROLLED 0x00000000 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_VIDEO_FMT_HW_CONTROLLED 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_VIDEO_FMT_INIT 0x00000001 /* RWI-V */ +#define NV9471_SF_HDMI_INFO_STATUS(i,j) (0x00690004-0x00690000+(i)*1024+(j)*64) /* R--4A */ +#define NV9471_SF_HDMI_INFO_STATUS__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_INFO_STATUS__SIZE_2 5 /* */ +#define NV9471_SF_HDMI_INFO_STATUS_SENT 0:0 /* R--VF */ +#define NV9471_SF_HDMI_INFO_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NV9471_SF_HDMI_INFO_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NV9471_SF_HDMI_INFO_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_CTRL(i) (0x00690000-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE 0:0 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER 4:4 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER_DIS 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER_EN 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE 8:8 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE_DIS 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE_EN 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_STATUS(i) (0x00690004-0x00690000+(i)*1024) /* R--4A */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_STATUS__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_STATUS_SENT 0:0 /* R--VF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER(i) (0x00690008-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER_HB0 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER_HB1 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER_HB2 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(i) (0x0069000C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(i) (0x00690010-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(i) (0x00690014-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(i) (0x00690018-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_HEADER(i) (0x00690048-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_HEADER__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_HEADER_HB0 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_HEADER_HB1 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_HEADER_HB2 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW(i) (0x0069004C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH(i) (0x00690050-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW(i) (0x00690054-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH(i) (0x00690058-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW(i) (0x0069005C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH(i) (0x00690060-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW(i) (0x00690064-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH(i) (0x00690068-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GCP_SUBPACK(i) (0x006900CC-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GCP_SUBPACK__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB0 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB0_INIT 0x00000001 /* RWI-V */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB0_SET_AVMUTE 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB0_CLR_AVMUTE 0x00000010 /* RW--V */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB1 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB1_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB2 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB2_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_HEADER(i) (0x00690108-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_HEADER__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_HEADER_HB0 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_HEADER_HB1 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_HEADER_HB2 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW(i) (0x0069010C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH(i) (0x00690110-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW(i) (0x00690114-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH(i) (0x00690118-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW(i) (0x0069011C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH(i) (0x00690120-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW(i) (0x00690124-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH(i) (0x00690128-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _cl9471_h_ diff --git a/src/common/sdk/nvidia/inc/class/cl947d.h b/src/common/sdk/nvidia/inc/class/cl947d.h new file mode 100644 index 0000000..6842406 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl947d.h @@ -0,0 +1,1606 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl947d_h_ +#define _cl947d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV947D_CORE_CHANNEL_DMA (0x0000947D) + +#define NV947D_CORE_NOTIFIER_3 0x00000000 +#define NV947D_CORE_NOTIFIER_3_SIZEOF 0x00000150 +#define NV947D_CORE_NOTIFIER_3_COMPLETION_0 0x00000000 +#define NV947D_CORE_NOTIFIER_3_COMPLETION_0_DONE 0:0 +#define NV947D_CORE_NOTIFIER_3_COMPLETION_0_DONE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_COMPLETION_0_R0 15:1 +#define NV947D_CORE_NOTIFIER_3_COMPLETION_0_TIMESTAMP 29:16 +#define NV947D_CORE_NOTIFIER_3__1 0x00000001 +#define NV947D_CORE_NOTIFIER_3__1_R1 31:0 +#define NV947D_CORE_NOTIFIER_3__2 0x00000002 +#define NV947D_CORE_NOTIFIER_3__2_R2 31:0 +#define NV947D_CORE_NOTIFIER_3__3 0x00000003 +#define NV947D_CORE_NOTIFIER_3__3_R3 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_R0 19:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA 20:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_R1 29:21 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE 30:30 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5 0x00000005 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE 3:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE 11:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE 15:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE 19:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE 27:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6 0x00000006 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE 3:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE 11:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE 15:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE 19:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE 27:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_7 0x00000007 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_7_DISPCLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_7_R4 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_8 0x00000008 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_8_R5 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_9 0x00000009 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_9_R6 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_10 0x0000000A +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_10_R7 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_11 0x0000000B +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_11_R8 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12 0x0000000C +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_R0 31:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13 0x0000000D +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_CRT_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_R1 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14 0x0000000E +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_R0 31:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15 0x0000000F +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_CRT_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_R1 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16 0x00000010 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_R0 31:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17 0x00000011 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_CRT_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_R1 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18 0x00000012 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_R0 31:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19 0x00000013 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_CRT_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_R1 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20 0x00000014 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21 0x00000015 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22 0x00000016 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23 0x00000017 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24 0x00000018 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25 0x00000019 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26 0x0000001A +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27 0x0000001B +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28 0x0000001C +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29 0x0000001D +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30 0x0000001E +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31 0x0000001F +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32 0x00000020 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33 0x00000021 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34 0x00000022 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35 0x00000023 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36 0x00000024 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_R0 31:7 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37 0x00000025 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_EXT_ENC_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R1 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R2 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38 0x00000026 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_R0 31:7 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39 0x00000027 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_EXT_ENC_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R1 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R2 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40 0x00000028 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_R0 31:7 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41 0x00000029 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_EXT_ENC_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R1 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R2 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42 0x0000002A +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_R0 31:7 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43 0x0000002B +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_EXT_ENC_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R1 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R2 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44 0x0000002C +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44_R0 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45 0x0000002D +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45_R1 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46 0x0000002E +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46_R0 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47 0x0000002F +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47_R1 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48 0x00000030 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48_R0 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49 0x00000031 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49_R1 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50 0x00000032 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50_R0 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51 0x00000033 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51_R1 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52 0x00000034 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52_REORDER_BANK_WIDTH_SIZE_MAX 13:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52_R0 31:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53 0x00000035 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R1 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R2 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54 0x00000036 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R3 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R4 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55 0x00000037 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R5 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R6 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56 0x00000038 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_PCLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_R7 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57 0x00000039 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57_R8 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58 0x0000003A +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58_R9 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59 0x0000003B +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59_R10 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60 0x0000003C +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60_REORDER_BANK_WIDTH_SIZE_MAX 13:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60_R0 31:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61 0x0000003D +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R1 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R2 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62 0x0000003E +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R3 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R4 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63 0x0000003F +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R5 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R6 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64 0x00000040 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_PCLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_R7 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65 0x00000041 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65_R8 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66 0x00000042 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66_R9 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67 0x00000043 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67_R10 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68 0x00000044 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68_REORDER_BANK_WIDTH_SIZE_MAX 13:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68_R0 31:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69 0x00000045 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R1 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R2 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70 0x00000046 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R3 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R4 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71 0x00000047 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R5 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R6 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72 0x00000048 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_PCLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_R7 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73 0x00000049 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73_R8 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74 0x0000004A +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74_R9 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75 0x0000004B +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75_R10 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76 0x0000004C +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76_REORDER_BANK_WIDTH_SIZE_MAX 13:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76_R0 31:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77 0x0000004D +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R1 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R2 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78 0x0000004E +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R3 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R4 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79 0x0000004F +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R5 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R6 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80 0x00000050 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_PCLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_R7 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81 0x00000051 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81_R8 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82 0x00000052 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82_R9 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83 0x00000053 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83_R10 31:0 + + +// dma opcode instructions +#define NV947D_DMA 0x00000000 +#define NV947D_DMA_OPCODE 31:29 +#define NV947D_DMA_OPCODE_METHOD 0x00000000 +#define NV947D_DMA_OPCODE_JUMP 0x00000001 +#define NV947D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV947D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV947D_DMA_METHOD_COUNT 27:18 +#define NV947D_DMA_METHOD_OFFSET 11:2 +#define NV947D_DMA_DATA 31:0 +#define NV947D_DMA_DATA_NOP 0x00000000 +#define NV947D_DMA_JUMP_OFFSET 11:2 +#define NV947D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV947D_PUT (0x00000000) +#define NV947D_PUT_PTR 11:2 +#define NV947D_GET (0x00000004) +#define NV947D_GET_PTR 11:2 +#define NV947D_UPDATE (0x00000080) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR(i) (0 +(i)*4):(0 +(i)*4) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR__SIZE_1 4 +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR0 0:0 +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR1 4:4 +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR2 8:8 +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR3 12:12 +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE(i) (1 +(i)*4):(1 +(i)*4) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE__SIZE_1 4 +#define NV947D_UPDATE_INTERLOCK_WITH_BASE_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE0 1:1 +#define NV947D_UPDATE_INTERLOCK_WITH_BASE0_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE0_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE1 5:5 +#define NV947D_UPDATE_INTERLOCK_WITH_BASE1_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE1_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE2 9:9 +#define NV947D_UPDATE_INTERLOCK_WITH_BASE2_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE2_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE3 13:13 +#define NV947D_UPDATE_INTERLOCK_WITH_BASE3_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE3_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY(i) (2 +(i)*4):(2 +(i)*4) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY__SIZE_1 4 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY0 2:2 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY0_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY0_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY1 6:6 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY1_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY1_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY2 10:10 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY2_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY2_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY3 14:14 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY3_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY3_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM(i) (3 +(i)*4):(3 +(i)*4) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM__SIZE_1 4 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0 3:3 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1 7:7 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2 11:11 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3 15:15 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_ENABLE (0x00000001) +#define NV947D_UPDATE_SPECIAL_HANDLING 25:24 +#define NV947D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV947D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV947D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV947D_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV947D_UPDATE_NOT_DRIVER_FRIENDLY 31:31 +#define NV947D_UPDATE_NOT_DRIVER_FRIENDLY_FALSE (0x00000000) +#define NV947D_UPDATE_NOT_DRIVER_FRIENDLY_TRUE (0x00000001) +#define NV947D_UPDATE_NOT_DRIVER_UNFRIENDLY 30:30 +#define NV947D_UPDATE_NOT_DRIVER_UNFRIENDLY_FALSE (0x00000000) +#define NV947D_UPDATE_NOT_DRIVER_UNFRIENDLY_TRUE (0x00000001) +#define NV947D_UPDATE_INHIBIT_INTERRUPTS 29:29 +#define NV947D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NV947D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NV947D_SET_NOTIFIER_CONTROL (0x00000084) +#define NV947D_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV947D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV947D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV947D_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV947D_SET_NOTIFIER_CONTROL_NOTIFY 31:31 +#define NV947D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NV947D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NV947D_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV947D_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV947D_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV947D_SET_CONTEXT_DMA_NOTIFIER (0x00000088) +#define NV947D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV947D_GET_CAPABILITIES (0x0000008C) +#define NV947D_GET_CAPABILITIES_DUMMY 31:0 +#define NV947D_SET_SPARE (0x0000016C) +#define NV947D_SET_SPARE_UNUSED 31:0 +#define NV947D_SET_SPARE_NOOP(b) (0x00000170 + (b)*0x00000004) +#define NV947D_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV947D_DAC_SET_CONTROL(a) (0x00000180 + (a)*0x00000020) +#define NV947D_DAC_SET_CONTROL_OWNER_MASK 3:0 +#define NV947D_DAC_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV947D_DAC_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV947D_DAC_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV947D_DAC_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV947D_DAC_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV947D_DAC_SET_CONTROL_PROTOCOL 12:8 +#define NV947D_DAC_SET_CONTROL_PROTOCOL_RGB_CRT (0x00000000) +#define NV947D_DAC_SET_CONTROL_PROTOCOL_YUV_CRT (0x00000013) +#define NV947D_DAC_SET_SW_SPARE_A(a) (0x00000184 + (a)*0x00000020) +#define NV947D_DAC_SET_SW_SPARE_A_CODE 31:0 +#define NV947D_DAC_SET_SW_SPARE_B(a) (0x00000188 + (a)*0x00000020) +#define NV947D_DAC_SET_SW_SPARE_B_CODE 31:0 +#define NV947D_DAC_SET_CUSTOM_REASON(a) (0x00000190 + (a)*0x00000020) +#define NV947D_DAC_SET_CUSTOM_REASON_CODE 31:0 + +#define NV947D_SOR_SET_CONTROL(a) (0x00000200 + (a)*0x00000020) +#define NV947D_SOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV947D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV947D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV947D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV947D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV947D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV947D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NV947D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NV947D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NV947D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NV947D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NV947D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NV947D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NV947D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NV947D_SOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV947D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV947D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV947D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NV947D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NV947D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NV947D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NV947D_SOR_SET_SW_SPARE_A(a) (0x00000204 + (a)*0x00000020) +#define NV947D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NV947D_SOR_SET_SW_SPARE_B(a) (0x00000208 + (a)*0x00000020) +#define NV947D_SOR_SET_SW_SPARE_B_CODE 31:0 +#define NV947D_SOR_SET_CUSTOM_REASON(a) (0x00000210 + (a)*0x00000020) +#define NV947D_SOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV947D_PIOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NV947D_PIOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV947D_PIOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV947D_PIOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV947D_PIOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV947D_PIOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV947D_PIOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV947D_PIOR_SET_CONTROL_PROTOCOL 11:8 +#define NV947D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC (0x00000000) +#define NV947D_PIOR_SET_CONTROL_PROTOCOL_EXT_TV_ENC (0x00000001) +#define NV947D_PIOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV947D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV947D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV947D_PIOR_SET_SW_SPARE_A(a) (0x00000304 + (a)*0x00000020) +#define NV947D_PIOR_SET_SW_SPARE_A_CODE 31:0 +#define NV947D_PIOR_SET_SW_SPARE_B(a) (0x00000308 + (a)*0x00000020) +#define NV947D_PIOR_SET_SW_SPARE_B_CODE 31:0 +#define NV947D_PIOR_SET_CUSTOM_REASON(a) (0x00000310 + (a)*0x00000020) +#define NV947D_PIOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV947D_HEAD_SET_PRESENT_CONTROL(a) (0x00000400 + (a)*0x00000300) +#define NV947D_HEAD_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NV947D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 8:8 +#define NV947D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NV947D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NV947D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00000404 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 3:3 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 4:4 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 9:6 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_DEFAULT (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000003) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000004) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000005) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000006) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000007) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000008) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000009) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 12:12 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 20:13 +#define NV947D_HEAD_SET_CONTROL(a) (0x00000408 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTROL_STRUCTURE 0:0 +#define NV947D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_STRUCTURE_INTERLACED (0x00000001) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 3:2 +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 19:15 +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 7:4 +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 9:8 +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 14:10 +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK 1:1 +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN 24:20 +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN 29:25 +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000003) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000004) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000005) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000006) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000007) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000008) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x00000009) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000A) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000B) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000C) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000D) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000E) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x0000000F) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_UNSPECIFIED (0x00000010) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV947D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NV947D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NV947D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_OVERSCAN_COLOR(a) (0x00000410 + (a)*0x00000300) +#define NV947D_HEAD_SET_OVERSCAN_COLOR_RED 9:0 +#define NV947D_HEAD_SET_OVERSCAN_COLOR_GRN 19:10 +#define NV947D_HEAD_SET_OVERSCAN_COLOR_BLU 29:20 +#define NV947D_HEAD_SET_RASTER_SIZE(a) (0x00000414 + (a)*0x00000300) +#define NV947D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NV947D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NV947D_HEAD_SET_RASTER_SYNC_END(a) (0x00000418 + (a)*0x00000300) +#define NV947D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NV947D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NV947D_HEAD_SET_RASTER_BLANK_END(a) (0x0000041C + (a)*0x00000300) +#define NV947D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NV947D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NV947D_HEAD_SET_RASTER_BLANK_START(a) (0x00000420 + (a)*0x00000300) +#define NV947D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NV947D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NV947D_HEAD_SET_RASTER_VERT_BLANK2(a) (0x00000424 + (a)*0x00000300) +#define NV947D_HEAD_SET_RASTER_VERT_BLANK2_YSTART 14:0 +#define NV947D_HEAD_SET_RASTER_VERT_BLANK2_YEND 30:16 +#define NV947D_HEAD_SET_LOCK_CHAIN(a) (0x00000428 + (a)*0x00000300) +#define NV947D_HEAD_SET_LOCK_CHAIN_POSITION 27:24 +#define NV947D_HEAD_SET_DEFAULT_BASE_COLOR(a) (0x0000042C + (a)*0x00000300) +#define NV947D_HEAD_SET_DEFAULT_BASE_COLOR_RED 9:0 +#define NV947D_HEAD_SET_DEFAULT_BASE_COLOR_GREEN 19:10 +#define NV947D_HEAD_SET_DEFAULT_BASE_COLOR_BLUE 29:20 +#define NV947D_HEAD_SET_CRC_CONTROL(a) (0x00000430 + (a)*0x00000300) +#define NV947D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 1:0 +#define NV947D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000000) +#define NV947D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_BASE (0x00000001) +#define NV947D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_OVERLAY (0x00000002) +#define NV947D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 2:2 +#define NV947D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NV947D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NV947D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE 3:3 +#define NV947D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_FALSE (0x00000000) +#define NV947D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_TRUE (0x00000001) +#define NV947D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE 4:4 +#define NV947D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_FALSE (0x00000000) +#define NV947D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_TRUE (0x00000001) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT 19:8 +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC__SIZE_1 4 +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC0 (0x00000FF0) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC1 (0x00000FF1) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC2 (0x00000FF2) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC3 (0x00000FF3) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG__SIZE_1 4 +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG0 (0x00000FF8) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG1 (0x00000FF9) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG2 (0x00000FFA) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG3 (0x00000FFB) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR__SIZE_1 8 +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR0 (0x00000F0F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR1 (0x00000F1F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR2 (0x00000F2F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR3 (0x00000F3F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR4 (0x00000F4F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR5 (0x00000F5F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR6 (0x00000F6F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR7 (0x00000F7F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF__SIZE_1 4 +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF0 (0x00000F8F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF1 (0x00000F9F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF2 (0x00000FAF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF3 (0x00000FBF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR__SIZE_1 8 +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR0 (0x000000FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR1 (0x000001FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR2 (0x000002FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR3 (0x000003FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR4 (0x000004FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR5 (0x000005FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR6 (0x000006FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR7 (0x000007FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_NONE (0x00000FFF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT 31:20 +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC__SIZE_1 4 +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC0 (0x00000FF0) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC1 (0x00000FF1) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC2 (0x00000FF2) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC3 (0x00000FF3) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG__SIZE_1 4 +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG0 (0x00000FF8) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG1 (0x00000FF9) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG2 (0x00000FFA) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG3 (0x00000FFB) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR__SIZE_1 8 +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR0 (0x00000F0F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR1 (0x00000F1F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR2 (0x00000F2F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR3 (0x00000F3F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR4 (0x00000F4F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR5 (0x00000F5F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR6 (0x00000F6F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR7 (0x00000F7F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF__SIZE_1 4 +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF0 (0x00000F8F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF1 (0x00000F9F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF2 (0x00000FAF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF3 (0x00000FBF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR__SIZE_1 8 +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR0 (0x000000FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR1 (0x000001FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR2 (0x000002FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR3 (0x000003FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR4 (0x000004FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR5 (0x000005FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR6 (0x000006FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR7 (0x000007FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_NONE (0x00000FFF) +#define NV947D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 5:5 +#define NV947D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC 6:6 +#define NV947D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00000438 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NV947D_HEAD_SET_BASE_LUT_LO(a) (0x00000440 + (a)*0x00000300) +#define NV947D_HEAD_SET_BASE_LUT_LO_ENABLE 31:31 +#define NV947D_HEAD_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE 27:24 +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV947D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV947D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_BASE_LUT_HI(a) (0x00000444 + (a)*0x00000300) +#define NV947D_HEAD_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV947D_HEAD_SET_OUTPUT_LUT_LO(a) (0x00000448 + (a)*0x00000300) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_ENABLE 31:31 +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_OUTPUT_LUT_HI(a) (0x0000044C + (a)*0x00000300) +#define NV947D_HEAD_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x00000450 + (a)*0x00000300) +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x00000454 + (a)*0x00000300) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE 21:20 +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_25 (0x00000000) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_28 (0x00000001) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_CUSTOM (0x00000002) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 24:24 +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING 25:25 +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_FALSE (0x00000000) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_TRUE (0x00000001) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 26:26 +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00000458 + (a)*0x00000300) +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NV947D_HEAD_SET_CONTEXT_DMA_LUT(a) (0x0000045C + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV947D_HEAD_SET_OFFSET(a) (0x00000460 + (a)*0x00000300) +#define NV947D_HEAD_SET_OFFSET_ORIGIN 31:0 +#define NV947D_HEAD_SET_SIZE(a) (0x00000468 + (a)*0x00000300) +#define NV947D_HEAD_SET_SIZE_WIDTH 15:0 +#define NV947D_HEAD_SET_SIZE_HEIGHT 31:16 +#define NV947D_HEAD_SET_STORAGE(a) (0x0000046C + (a)*0x00000300) +#define NV947D_HEAD_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV947D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV947D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV947D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV947D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV947D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV947D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV947D_HEAD_SET_STORAGE_PITCH 20:8 +#define NV947D_HEAD_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV947D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV947D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV947D_HEAD_SET_PARAMS(a) (0x00000470 + (a)*0x00000300) +#define NV947D_HEAD_SET_PARAMS_FORMAT 15:8 +#define NV947D_HEAD_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV947D_HEAD_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV947D_HEAD_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV947D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV947D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV947D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV947D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV947D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NV947D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV947D_HEAD_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV947D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV947D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV947D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV947D_HEAD_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV947D_HEAD_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV947D_HEAD_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV947D_HEAD_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV947D_HEAD_SET_PARAMS_GAMMA 2:2 +#define NV947D_HEAD_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV947D_HEAD_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV947D_HEAD_SET_CONTEXT_DMAS_ISO(a) (0x00000474 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV947D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x0000047C + (a)*0x00000300) +#define NV947D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 1:0 +#define NV947D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NV947D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NV947D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_SPEC_FLIP (0x00000002) +#define NV947D_HEAD_SET_CONTROL_CURSOR(a) (0x00000480 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NV947D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_CURSOR_FORMAT 25:24 +#define NV947D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_CURSOR_SIZE 27:26 +#define NV947D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NV947D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 15:8 +#define NV947D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 23:16 +#define NV947D_HEAD_SET_CONTROL_CURSOR_COMPOSITION 29:28 +#define NV947D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000) +#define NV947D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001) +#define NV947D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002) +#define NV947D_HEAD_SET_OFFSETS_CURSOR(a,b) (0x00000484 + (a)*0x00000300 + (b)*0x00000004) +#define NV947D_HEAD_SET_OFFSETS_CURSOR_ORIGIN 31:0 +#define NV947D_HEAD_SET_CONTEXT_DMAS_CURSOR(a,b) (0x0000048C + (a)*0x00000300 + (b)*0x00000004) +#define NV947D_HEAD_SET_CONTEXT_DMAS_CURSOR_HANDLE 31:0 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00000494 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3_ADAPTIVE (0x00000003) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 4:3 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_HRESPONSE_BIAS 23:16 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_VRESPONSE_BIAS 31:24 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422 8:8 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_ENABLE (0x00000001) +#define NV947D_HEAD_SET_PROCAMP(a) (0x00000498 + (a)*0x00000300) +#define NV947D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NV947D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NV947D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NV947D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NV947D_HEAD_SET_PROCAMP_CHROMA_LPF 2:2 +#define NV947D_HEAD_SET_PROCAMP_CHROMA_LPF_AUTO (0x00000000) +#define NV947D_HEAD_SET_PROCAMP_CHROMA_LPF_ON (0x00000001) +#define NV947D_HEAD_SET_PROCAMP_SAT_COS 19:8 +#define NV947D_HEAD_SET_PROCAMP_SAT_SINE 31:20 +#define NV947D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 5:5 +#define NV947D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NV947D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NV947D_HEAD_SET_PROCAMP_RANGE_COMPRESSION 6:6 +#define NV947D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE (0x00000000) +#define NV947D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE (0x00000001) +#define NV947D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300) +#define NV947D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NV947D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_DITHER_CONTROL_BITS 2:1 +#define NV947D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS (0x00000000) +#define NV947D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS (0x00000001) +#define NV947D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_10_BITS (0x00000002) +#define NV947D_HEAD_SET_DITHER_CONTROL_MODE 6:3 +#define NV947D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NV947D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NV947D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NV947D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NV947D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NV947D_HEAD_SET_DITHER_CONTROL_PHASE 8:7 +#define NV947D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x000004B0 + (a)*0x00000300) +#define NV947D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NV947D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x000004B8 + (a)*0x00000300) +#define NV947D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NV947D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x000004BC + (a)*0x00000300) +#define NV947D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NV947D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x000004C0 + (a)*0x00000300) +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(a) (0x000004C4 + (a)*0x00000300) +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_WIDTH 14:0 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_HEIGHT 30:16 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX(a) (0x000004C8 + (a)*0x00000300) +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_WIDTH 14:0 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_HEIGHT 30:16 +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a) (0x000004D0 + (a)*0x00000300) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE 0:0 +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8 (0x00000000) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE 13:12 +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT 17:16 +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_NONE (0x00000000) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_257 (0x00000001) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_1025 (0x00000002) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT 21:20 +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS(a) (0x000004D4 + (a)*0x00000300) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE 0:0 +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT 13:12 +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_NONE (0x00000000) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_257 (0x00000001) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_1025 (0x00000002) +#define NV947D_HEAD_SET_PROCESSING(a) (0x000004E0 + (a)*0x00000300) +#define NV947D_HEAD_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV947D_HEAD_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV947D_HEAD_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONVERSION_RED(a) (0x000004E4 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONVERSION_RED_GAIN 15:0 +#define NV947D_HEAD_SET_CONVERSION_RED_OFS 31:16 +#define NV947D_HEAD_SET_CONVERSION_GRN(a) (0x000004E8 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONVERSION_GRN_GAIN 15:0 +#define NV947D_HEAD_SET_CONVERSION_GRN_OFS 31:16 +#define NV947D_HEAD_SET_CONVERSION_BLU(a) (0x000004EC + (a)*0x00000300) +#define NV947D_HEAD_SET_CONVERSION_BLU_GAIN 15:0 +#define NV947D_HEAD_SET_CONVERSION_BLU_OFS 31:16 +#define NV947D_HEAD_SET_CSC_RED2RED(a) (0x000004F0 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE 31:31 +#define NV947D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CSC_RED2RED_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_GRN2RED(a) (0x000004F4 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_GRN2RED_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_BLU2RED(a) (0x000004F8 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_BLU2RED_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_CONSTANT2RED(a) (0x000004FC + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_RED2GRN(a) (0x00000500 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_RED2GRN_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_GRN2GRN(a) (0x00000504 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_BLU2GRN(a) (0x00000508 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_CONSTANT2GRN(a) (0x0000050C + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_RED2BLU(a) (0x00000510 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_RED2BLU_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_GRN2BLU(a) (0x00000514 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_BLU2BLU(a) (0x00000518 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_CONSTANT2BLU(a) (0x0000051C + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV947D_HEAD_SET_HDMI_CTRL(a) (0x00000520 + (a)*0x00000300) +#define NV947D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NV947D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NV947D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NV947D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NV947D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE 15:12 +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000000) +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FIELD_ALTERNATIVE (0x00000001) +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_LINE_ALTERNATIVE (0x00000002) +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_FULL (0x00000003) +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH (0x00000004) +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH_GRAPHICS (0x00000005) +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_TOP_AND_BOTTOM (0x00000006) +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_HALF (0x00000008) +#define NV947D_HEAD_SET_VACTIVE_SPACE_COLOR(a) (0x00000524 + (a)*0x00000300) +#define NV947D_HEAD_SET_VACTIVE_SPACE_COLOR_RED_CR 9:0 +#define NV947D_HEAD_SET_VACTIVE_SPACE_COLOR_GRN_Y 19:10 +#define NV947D_HEAD_SET_VACTIVE_SPACE_COLOR_BLU_CB 29:20 +#define NV947D_HEAD_SET_PIXEL_REORDER_CONTROL(a) (0x00000528 + (a)*0x00000300) +#define NV947D_HEAD_SET_PIXEL_REORDER_CONTROL_BANK_WIDTH 13:0 +#define NV947D_HEAD_SET_DISPLAY_ID(a,b) (0x0000052C + (a)*0x00000300 + (b)*0x00000004) +#define NV947D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NV947D_HEAD_SET_SW_SPARE_A(a) (0x0000054C + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NV947D_HEAD_SET_SW_SPARE_B(a) (0x00000550 + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NV947D_HEAD_SET_SW_SPARE_C(a) (0x00000554 + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NV947D_HEAD_SET_SW_SPARE_D(a) (0x00000558 + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NV947D_HEAD_SET_GET_BLANKING_CTRL(a) (0x0000055C + (a)*0x00000300) +#define NV947D_HEAD_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NV947D_HEAD_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NV947D_HEAD_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NV947D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NV947D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NV947D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_COMPRESSION(a) (0x00000560 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_ENABLE 0:0 +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_CHUNK_BANDWIDTH 12:1 +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_LAST_BANDWIDTH 24:13 +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_LA(a) (0x00000564 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY1 7:4 +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY2 11:8 +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY3 15:12 +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_LA_CHUNK_SIZE 23:16 +#define NV947D_HEAD_SET_STALL_LOCK(a) (0x00000568 + (a)*0x00000300) +#define NV947D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NV947D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NV947D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NV947D_HEAD_SET_STALL_LOCK_MODE 1:1 +#define NV947D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NV947D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN 6:2 +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV947D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 7:7 +#define NV947D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NV947D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_A(a) (0x000006D0 + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_A_UNUSED 31:0 +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_B(a) (0x000006D4 + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_B_UNUSED 31:0 +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_C(a) (0x000006D8 + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_C_UNUSED 31:0 +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_D(a) (0x000006DC + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_D_UNUSED 31:0 +#define NV947D_HEAD_SET_SPARE(a) (0x000006EC + (a)*0x00000300) +#define NV947D_HEAD_SET_SPARE_UNUSED 31:0 +#define NV947D_HEAD_SET_SPARE_NOOP(a,b) (0x000006F0 + (a)*0x00000300 + (b)*0x00000004) +#define NV947D_HEAD_SET_SPARE_NOOP_UNUSED 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl947d_h diff --git a/src/common/sdk/nvidia/inc/class/cl9570.h b/src/common/sdk/nvidia/inc/class/cl9570.h new file mode 100644 index 0000000..3197fd8 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9570.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 1993-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl9570.finn +// + +#define NV9570_DISPLAY (0x9570U) /* finn: Evaluated from "NV9570_ALLOCATION_PARAMETERS_MESSAGE_ID" */ + +#define NV9570_ALLOCATION_PARAMETERS_MESSAGE_ID (0x9570U) + +typedef struct NV9570_ALLOCATION_PARAMETERS { + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numDacs; // Number of DACs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NV9570_ALLOCATION_PARAMETERS; diff --git a/src/common/sdk/nvidia/inc/class/cl9571.h b/src/common/sdk/nvidia/inc/class/cl9571.h new file mode 100644 index 0000000..75ac7c8 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9571.h @@ -0,0 +1,317 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl9571_h_ +#define _cl9571_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV9571_DISP_SF_USER 0x9571 + +typedef volatile struct _cl9571_tag0 { + NvU32 dispSfUserOffset[0x400]; /* NV_PDISP_SF_USER 0x00690FFF:0x00690000 */ +} _Nv9571DispSfUser, Nv9571DispSfUserMap; + +#define NV9571_SF_HDMI_INFO_IDX_AVI_INFOFRAME 0x00000000 /* */ +#define NV9571_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME 0x00000001 /* */ +#define NV9571_SF_HDMI_INFO_IDX_GCP 0x00000003 /* */ +#define NV9571_SF_HDMI_INFO_IDX_VSI 0x00000004 /* */ +#define NV9571_SF_HDMI_INFO_CTRL(i,j) (0x00690000-0x00690000+(i)*1024+(j)*64) /* RWX4A */ +#define NV9571_SF_HDMI_INFO_CTRL__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_INFO_CTRL__SIZE_2 5 /* */ +#define NV9571_SF_HDMI_INFO_CTRL_ENABLE 0:0 /* RWIVF */ +#define NV9571_SF_HDMI_INFO_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_INFO_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_OTHER 4:4 /* RWIVF */ +#define NV9571_SF_HDMI_INFO_CTRL_OTHER_DIS 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_INFO_CTRL_OTHER_EN 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_SINGLE 8:8 /* RWIVF */ +#define NV9571_SF_HDMI_INFO_CTRL_SINGLE_DIS 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_INFO_CTRL_SINGLE_EN 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NV9571_SF_HDMI_INFO_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_CHKSUM_HW_EN 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_CHKSUM_HW_DIS 0x00000000 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NV9571_SF_HDMI_INFO_CTRL_HBLANK 12:12 /* RWIVF */ +#define NV9571_SF_HDMI_INFO_CTRL_HBLANK_DIS 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_INFO_CTRL_HBLANK_EN 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_VIDEO_FMT 16:16 /* RWIVF */ +#define NV9571_SF_HDMI_INFO_CTRL_VIDEO_FMT_SW_CONTROLLED 0x00000000 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_VIDEO_FMT_HW_CONTROLLED 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_VIDEO_FMT_INIT 0x00000001 /* RWI-V */ +#define NV9571_SF_HDMI_INFO_STATUS(i,j) (0x00690004-0x00690000+(i)*1024+(j)*64) /* R--4A */ +#define NV9571_SF_HDMI_INFO_STATUS__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_INFO_STATUS__SIZE_2 5 /* */ +#define NV9571_SF_HDMI_INFO_STATUS_SENT 0:0 /* R--VF */ +#define NV9571_SF_HDMI_INFO_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NV9571_SF_HDMI_INFO_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NV9571_SF_HDMI_INFO_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_CTRL(i) (0x00690000-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE 0:0 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER 4:4 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER_DIS 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER_EN 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE 8:8 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE_DIS 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE_EN 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_STATUS(i) (0x00690004-0x00690000+(i)*1024) /* R--4A */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_STATUS__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_STATUS_SENT 0:0 /* R--VF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER(i) (0x00690008-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER_HB0 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER_HB1 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER_HB2 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(i) (0x0069000C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(i) (0x00690010-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(i) (0x00690014-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(i) (0x00690018-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_HEADER(i) (0x00690048-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_HEADER__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_HEADER_HB0 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_HEADER_HB1 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_HEADER_HB2 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW(i) (0x0069004C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH(i) (0x00690050-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW(i) (0x00690054-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH(i) (0x00690058-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW(i) (0x0069005C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH(i) (0x00690060-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW(i) (0x00690064-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH(i) (0x00690068-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GCP_SUBPACK(i) (0x006900CC-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GCP_SUBPACK__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB0 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB0_INIT 0x00000001 /* RWI-V */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB0_SET_AVMUTE 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB0_CLR_AVMUTE 0x00000010 /* RW--V */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB1 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB1_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB2 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB2_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_HEADER(i) (0x00690108-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_HEADER__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_HEADER_HB0 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_HEADER_HB1 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_HEADER_HB2 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW(i) (0x0069010C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH(i) (0x00690110-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW(i) (0x00690114-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH(i) (0x00690118-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW(i) (0x0069011C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH(i) (0x00690120-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW(i) (0x00690124-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH(i) (0x00690128-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _cl9571_h_ diff --git a/src/common/sdk/nvidia/inc/class/cl957d.h b/src/common/sdk/nvidia/inc/class/cl957d.h new file mode 100644 index 0000000..ea10694 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl957d.h @@ -0,0 +1,1602 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl957d_h_ +#define _cl957d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV957D_CORE_CHANNEL_DMA (0x0000957D) + +#define NV957D_CORE_NOTIFIER_3 0x00000000 +#define NV957D_CORE_NOTIFIER_3_SIZEOF 0x00000150 +#define NV957D_CORE_NOTIFIER_3_COMPLETION_0 0x00000000 +#define NV957D_CORE_NOTIFIER_3_COMPLETION_0_DONE 0:0 +#define NV957D_CORE_NOTIFIER_3_COMPLETION_0_DONE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_COMPLETION_0_R0 15:1 +#define NV957D_CORE_NOTIFIER_3_COMPLETION_0_TIMESTAMP 29:16 +#define NV957D_CORE_NOTIFIER_3__1 0x00000001 +#define NV957D_CORE_NOTIFIER_3__1_R1 31:0 +#define NV957D_CORE_NOTIFIER_3__2 0x00000002 +#define NV957D_CORE_NOTIFIER_3__2_R2 31:0 +#define NV957D_CORE_NOTIFIER_3__3 0x00000003 +#define NV957D_CORE_NOTIFIER_3__3_R3 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_R0 19:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA 20:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_R1 29:21 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE 30:30 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5 0x00000005 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE 3:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE 11:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE 15:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE 19:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE 27:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6 0x00000006 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE 3:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE 11:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE 15:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE 19:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE 27:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_7 0x00000007 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_7_DISPCLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_7_R4 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_8 0x00000008 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_8_R5 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_9 0x00000009 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_9_R6 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_10 0x0000000A +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_10_R7 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_11 0x0000000B +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_11_R8 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12 0x0000000C +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_R0 31:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13 0x0000000D +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_CRT_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_R1 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14 0x0000000E +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_R0 31:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15 0x0000000F +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_CRT_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_R1 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16 0x00000010 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_R0 31:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17 0x00000011 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_CRT_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_R1 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18 0x00000012 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_R0 31:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19 0x00000013 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_CRT_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_R1 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20 0x00000014 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21 0x00000015 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22 0x00000016 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23 0x00000017 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24 0x00000018 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25 0x00000019 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26 0x0000001A +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27 0x0000001B +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28 0x0000001C +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29 0x0000001D +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30 0x0000001E +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31 0x0000001F +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32 0x00000020 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33 0x00000021 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34 0x00000022 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35 0x00000023 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36 0x00000024 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_R0 31:7 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37 0x00000025 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_EXT_ENC_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R1 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R2 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38 0x00000026 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_R0 31:7 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39 0x00000027 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_EXT_ENC_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R1 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R2 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40 0x00000028 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_R0 31:7 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41 0x00000029 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_EXT_ENC_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R1 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R2 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42 0x0000002A +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_R0 31:7 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43 0x0000002B +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_EXT_ENC_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R1 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R2 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44 0x0000002C +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44_R0 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45 0x0000002D +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45_R1 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46 0x0000002E +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46_R0 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47 0x0000002F +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47_R1 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48 0x00000030 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48_R0 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49 0x00000031 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49_R1 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50 0x00000032 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50_R0 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51 0x00000033 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51_R1 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52 0x00000034 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52_R0 31:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53 0x00000035 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R1 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R2 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54 0x00000036 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R3 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R4 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55 0x00000037 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R5 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R6 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56 0x00000038 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_PCLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_R7 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57 0x00000039 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57_R8 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58 0x0000003A +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58_R9 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59 0x0000003B +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59_R10 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60 0x0000003C +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60_R0 31:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61 0x0000003D +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R1 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R2 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62 0x0000003E +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R3 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R4 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63 0x0000003F +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R5 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R6 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64 0x00000040 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_PCLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_R7 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65 0x00000041 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65_R8 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66 0x00000042 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66_R9 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67 0x00000043 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67_R10 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68 0x00000044 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68_R0 31:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69 0x00000045 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R1 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R2 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70 0x00000046 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R3 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R4 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71 0x00000047 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R5 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R6 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72 0x00000048 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_PCLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_R7 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73 0x00000049 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73_R8 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74 0x0000004A +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74_R9 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75 0x0000004B +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75_R10 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76 0x0000004C +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76_R0 31:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77 0x0000004D +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R1 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R2 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78 0x0000004E +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R3 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R4 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79 0x0000004F +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R5 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R6 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80 0x00000050 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_PCLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_R7 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81 0x00000051 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81_R8 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82 0x00000052 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82_R9 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83 0x00000053 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83_R10 31:0 + + +// dma opcode instructions +#define NV957D_DMA 0x00000000 +#define NV957D_DMA_OPCODE 31:29 +#define NV957D_DMA_OPCODE_METHOD 0x00000000 +#define NV957D_DMA_OPCODE_JUMP 0x00000001 +#define NV957D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV957D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV957D_DMA_METHOD_COUNT 27:18 +#define NV957D_DMA_METHOD_OFFSET 11:2 +#define NV957D_DMA_DATA 31:0 +#define NV957D_DMA_DATA_NOP 0x00000000 +#define NV957D_DMA_JUMP_OFFSET 11:2 +#define NV957D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV957D_PUT (0x00000000) +#define NV957D_PUT_PTR 11:2 +#define NV957D_GET (0x00000004) +#define NV957D_GET_PTR 11:2 +#define NV957D_UPDATE (0x00000080) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR(i) (0 +(i)*4):(0 +(i)*4) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR__SIZE_1 4 +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR0 0:0 +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR1 4:4 +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR2 8:8 +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR3 12:12 +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE(i) (1 +(i)*4):(1 +(i)*4) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE__SIZE_1 4 +#define NV957D_UPDATE_INTERLOCK_WITH_BASE_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE0 1:1 +#define NV957D_UPDATE_INTERLOCK_WITH_BASE0_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE0_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE1 5:5 +#define NV957D_UPDATE_INTERLOCK_WITH_BASE1_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE1_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE2 9:9 +#define NV957D_UPDATE_INTERLOCK_WITH_BASE2_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE2_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE3 13:13 +#define NV957D_UPDATE_INTERLOCK_WITH_BASE3_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE3_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY(i) (2 +(i)*4):(2 +(i)*4) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY__SIZE_1 4 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY0 2:2 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY0_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY0_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY1 6:6 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY1_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY1_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY2 10:10 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY2_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY2_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY3 14:14 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY3_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY3_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM(i) (3 +(i)*4):(3 +(i)*4) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM__SIZE_1 4 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0 3:3 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1 7:7 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2 11:11 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3 15:15 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_ENABLE (0x00000001) +#define NV957D_UPDATE_SPECIAL_HANDLING 25:24 +#define NV957D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV957D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV957D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV957D_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV957D_UPDATE_NOT_DRIVER_FRIENDLY 31:31 +#define NV957D_UPDATE_NOT_DRIVER_FRIENDLY_FALSE (0x00000000) +#define NV957D_UPDATE_NOT_DRIVER_FRIENDLY_TRUE (0x00000001) +#define NV957D_UPDATE_NOT_DRIVER_UNFRIENDLY 30:30 +#define NV957D_UPDATE_NOT_DRIVER_UNFRIENDLY_FALSE (0x00000000) +#define NV957D_UPDATE_NOT_DRIVER_UNFRIENDLY_TRUE (0x00000001) +#define NV957D_UPDATE_INHIBIT_INTERRUPTS 29:29 +#define NV957D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NV957D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NV957D_SET_NOTIFIER_CONTROL (0x00000084) +#define NV957D_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV957D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV957D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV957D_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV957D_SET_NOTIFIER_CONTROL_NOTIFY 31:31 +#define NV957D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NV957D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NV957D_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV957D_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV957D_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV957D_SET_CONTEXT_DMA_NOTIFIER (0x00000088) +#define NV957D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV957D_GET_CAPABILITIES (0x0000008C) +#define NV957D_GET_CAPABILITIES_DUMMY 31:0 +#define NV957D_SET_SPARE (0x0000016C) +#define NV957D_SET_SPARE_UNUSED 31:0 +#define NV957D_SET_SPARE_NOOP(b) (0x00000170 + (b)*0x00000004) +#define NV957D_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV957D_DAC_SET_CONTROL(a) (0x00000180 + (a)*0x00000020) +#define NV957D_DAC_SET_CONTROL_OWNER_MASK 3:0 +#define NV957D_DAC_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV957D_DAC_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV957D_DAC_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV957D_DAC_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV957D_DAC_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV957D_DAC_SET_CONTROL_PROTOCOL 12:8 +#define NV957D_DAC_SET_CONTROL_PROTOCOL_RGB_CRT (0x00000000) +#define NV957D_DAC_SET_CONTROL_PROTOCOL_YUV_CRT (0x00000013) +#define NV957D_DAC_SET_SW_SPARE_A(a) (0x00000184 + (a)*0x00000020) +#define NV957D_DAC_SET_SW_SPARE_A_CODE 31:0 +#define NV957D_DAC_SET_SW_SPARE_B(a) (0x00000188 + (a)*0x00000020) +#define NV957D_DAC_SET_SW_SPARE_B_CODE 31:0 +#define NV957D_DAC_SET_CUSTOM_REASON(a) (0x00000190 + (a)*0x00000020) +#define NV957D_DAC_SET_CUSTOM_REASON_CODE 31:0 + +#define NV957D_SOR_SET_CONTROL(a) (0x00000200 + (a)*0x00000020) +#define NV957D_SOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV957D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV957D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV957D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV957D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV957D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV957D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NV957D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NV957D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NV957D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NV957D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NV957D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NV957D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NV957D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NV957D_SOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV957D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV957D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV957D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NV957D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NV957D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NV957D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NV957D_SOR_SET_SW_SPARE_A(a) (0x00000204 + (a)*0x00000020) +#define NV957D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NV957D_SOR_SET_SW_SPARE_B(a) (0x00000208 + (a)*0x00000020) +#define NV957D_SOR_SET_SW_SPARE_B_CODE 31:0 +#define NV957D_SOR_SET_CUSTOM_REASON(a) (0x00000210 + (a)*0x00000020) +#define NV957D_SOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV957D_PIOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NV957D_PIOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV957D_PIOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV957D_PIOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV957D_PIOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV957D_PIOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV957D_PIOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV957D_PIOR_SET_CONTROL_PROTOCOL 11:8 +#define NV957D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC (0x00000000) +#define NV957D_PIOR_SET_CONTROL_PROTOCOL_EXT_TV_ENC (0x00000001) +#define NV957D_PIOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV957D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV957D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV957D_PIOR_SET_SW_SPARE_A(a) (0x00000304 + (a)*0x00000020) +#define NV957D_PIOR_SET_SW_SPARE_A_CODE 31:0 +#define NV957D_PIOR_SET_SW_SPARE_B(a) (0x00000308 + (a)*0x00000020) +#define NV957D_PIOR_SET_SW_SPARE_B_CODE 31:0 +#define NV957D_PIOR_SET_CUSTOM_REASON(a) (0x00000310 + (a)*0x00000020) +#define NV957D_PIOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV957D_HEAD_SET_PRESENT_CONTROL(a) (0x00000400 + (a)*0x00000300) +#define NV957D_HEAD_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NV957D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 8:8 +#define NV957D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NV957D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NV957D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00000404 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 3:3 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 4:4 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 9:6 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_DEFAULT (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000003) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000004) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000005) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000006) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000007) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000008) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000009) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 12:12 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 20:13 +#define NV957D_HEAD_SET_CONTROL(a) (0x00000408 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTROL_STRUCTURE 0:0 +#define NV957D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_STRUCTURE_INTERLACED (0x00000001) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 3:2 +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 19:15 +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 7:4 +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 9:8 +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 14:10 +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK 1:1 +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN 24:20 +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN 29:25 +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000003) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000004) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000005) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000006) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000007) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000008) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x00000009) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000A) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000B) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000C) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000D) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000E) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x0000000F) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_UNSPECIFIED (0x00000010) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV957D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NV957D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NV957D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_OVERSCAN_COLOR(a) (0x00000410 + (a)*0x00000300) +#define NV957D_HEAD_SET_OVERSCAN_COLOR_RED 9:0 +#define NV957D_HEAD_SET_OVERSCAN_COLOR_GRN 19:10 +#define NV957D_HEAD_SET_OVERSCAN_COLOR_BLU 29:20 +#define NV957D_HEAD_SET_RASTER_SIZE(a) (0x00000414 + (a)*0x00000300) +#define NV957D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NV957D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NV957D_HEAD_SET_RASTER_SYNC_END(a) (0x00000418 + (a)*0x00000300) +#define NV957D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NV957D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NV957D_HEAD_SET_RASTER_BLANK_END(a) (0x0000041C + (a)*0x00000300) +#define NV957D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NV957D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NV957D_HEAD_SET_RASTER_BLANK_START(a) (0x00000420 + (a)*0x00000300) +#define NV957D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NV957D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NV957D_HEAD_SET_RASTER_VERT_BLANK2(a) (0x00000424 + (a)*0x00000300) +#define NV957D_HEAD_SET_RASTER_VERT_BLANK2_YSTART 14:0 +#define NV957D_HEAD_SET_RASTER_VERT_BLANK2_YEND 30:16 +#define NV957D_HEAD_SET_LOCK_CHAIN(a) (0x00000428 + (a)*0x00000300) +#define NV957D_HEAD_SET_LOCK_CHAIN_POSITION 27:24 +#define NV957D_HEAD_SET_DEFAULT_BASE_COLOR(a) (0x0000042C + (a)*0x00000300) +#define NV957D_HEAD_SET_DEFAULT_BASE_COLOR_RED 9:0 +#define NV957D_HEAD_SET_DEFAULT_BASE_COLOR_GREEN 19:10 +#define NV957D_HEAD_SET_DEFAULT_BASE_COLOR_BLUE 29:20 +#define NV957D_HEAD_SET_CRC_CONTROL(a) (0x00000430 + (a)*0x00000300) +#define NV957D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 1:0 +#define NV957D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000000) +#define NV957D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_BASE (0x00000001) +#define NV957D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_OVERLAY (0x00000002) +#define NV957D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 2:2 +#define NV957D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NV957D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NV957D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE 3:3 +#define NV957D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_FALSE (0x00000000) +#define NV957D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_TRUE (0x00000001) +#define NV957D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE 4:4 +#define NV957D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_FALSE (0x00000000) +#define NV957D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_TRUE (0x00000001) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT 19:8 +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC__SIZE_1 4 +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC0 (0x00000FF0) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC1 (0x00000FF1) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC2 (0x00000FF2) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC3 (0x00000FF3) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG__SIZE_1 4 +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG0 (0x00000FF8) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG1 (0x00000FF9) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG2 (0x00000FFA) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG3 (0x00000FFB) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR__SIZE_1 8 +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR0 (0x00000F0F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR1 (0x00000F1F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR2 (0x00000F2F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR3 (0x00000F3F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR4 (0x00000F4F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR5 (0x00000F5F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR6 (0x00000F6F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR7 (0x00000F7F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF__SIZE_1 4 +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF0 (0x00000F8F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF1 (0x00000F9F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF2 (0x00000FAF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF3 (0x00000FBF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR__SIZE_1 8 +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR0 (0x000000FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR1 (0x000001FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR2 (0x000002FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR3 (0x000003FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR4 (0x000004FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR5 (0x000005FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR6 (0x000006FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR7 (0x000007FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_NONE (0x00000FFF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT 31:20 +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC__SIZE_1 4 +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC0 (0x00000FF0) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC1 (0x00000FF1) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC2 (0x00000FF2) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC3 (0x00000FF3) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG__SIZE_1 4 +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG0 (0x00000FF8) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG1 (0x00000FF9) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG2 (0x00000FFA) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG3 (0x00000FFB) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR__SIZE_1 8 +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR0 (0x00000F0F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR1 (0x00000F1F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR2 (0x00000F2F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR3 (0x00000F3F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR4 (0x00000F4F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR5 (0x00000F5F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR6 (0x00000F6F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR7 (0x00000F7F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF__SIZE_1 4 +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF0 (0x00000F8F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF1 (0x00000F9F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF2 (0x00000FAF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF3 (0x00000FBF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR__SIZE_1 8 +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR0 (0x000000FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR1 (0x000001FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR2 (0x000002FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR3 (0x000003FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR4 (0x000004FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR5 (0x000005FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR6 (0x000006FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR7 (0x000007FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_NONE (0x00000FFF) +#define NV957D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 5:5 +#define NV957D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC 6:6 +#define NV957D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00000438 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NV957D_HEAD_SET_BASE_LUT_LO(a) (0x00000440 + (a)*0x00000300) +#define NV957D_HEAD_SET_BASE_LUT_LO_ENABLE 31:31 +#define NV957D_HEAD_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE 27:24 +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV957D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV957D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_BASE_LUT_HI(a) (0x00000444 + (a)*0x00000300) +#define NV957D_HEAD_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV957D_HEAD_SET_OUTPUT_LUT_LO(a) (0x00000448 + (a)*0x00000300) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_ENABLE 31:31 +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_OUTPUT_LUT_HI(a) (0x0000044C + (a)*0x00000300) +#define NV957D_HEAD_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x00000450 + (a)*0x00000300) +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x00000454 + (a)*0x00000300) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE 21:20 +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_25 (0x00000000) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_28 (0x00000001) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_CUSTOM (0x00000002) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 24:24 +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING 25:25 +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_FALSE (0x00000000) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_TRUE (0x00000001) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 26:26 +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00000458 + (a)*0x00000300) +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NV957D_HEAD_SET_CONTEXT_DMA_LUT(a) (0x0000045C + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV957D_HEAD_SET_OFFSET(a) (0x00000460 + (a)*0x00000300) +#define NV957D_HEAD_SET_OFFSET_ORIGIN 31:0 +#define NV957D_HEAD_SET_SIZE(a) (0x00000468 + (a)*0x00000300) +#define NV957D_HEAD_SET_SIZE_WIDTH 15:0 +#define NV957D_HEAD_SET_SIZE_HEIGHT 31:16 +#define NV957D_HEAD_SET_STORAGE(a) (0x0000046C + (a)*0x00000300) +#define NV957D_HEAD_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV957D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV957D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV957D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV957D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV957D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV957D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV957D_HEAD_SET_STORAGE_PITCH 20:8 +#define NV957D_HEAD_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV957D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV957D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV957D_HEAD_SET_PARAMS(a) (0x00000470 + (a)*0x00000300) +#define NV957D_HEAD_SET_PARAMS_FORMAT 15:8 +#define NV957D_HEAD_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV957D_HEAD_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV957D_HEAD_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV957D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV957D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV957D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV957D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV957D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NV957D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV957D_HEAD_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV957D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV957D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV957D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV957D_HEAD_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV957D_HEAD_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV957D_HEAD_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV957D_HEAD_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV957D_HEAD_SET_PARAMS_GAMMA 2:2 +#define NV957D_HEAD_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV957D_HEAD_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV957D_HEAD_SET_CONTEXT_DMAS_ISO(a) (0x00000474 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV957D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x0000047C + (a)*0x00000300) +#define NV957D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 1:0 +#define NV957D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NV957D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NV957D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_SPEC_FLIP (0x00000002) +#define NV957D_HEAD_SET_CONTROL_CURSOR(a) (0x00000480 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NV957D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_CURSOR_FORMAT 25:24 +#define NV957D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_CURSOR_SIZE 27:26 +#define NV957D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NV957D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 15:8 +#define NV957D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 23:16 +#define NV957D_HEAD_SET_CONTROL_CURSOR_COMPOSITION 29:28 +#define NV957D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000) +#define NV957D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001) +#define NV957D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002) +#define NV957D_HEAD_SET_OFFSETS_CURSOR(a,b) (0x00000484 + (a)*0x00000300 + (b)*0x00000004) +#define NV957D_HEAD_SET_OFFSETS_CURSOR_ORIGIN 31:0 +#define NV957D_HEAD_SET_CONTEXT_DMAS_CURSOR(a,b) (0x0000048C + (a)*0x00000300 + (b)*0x00000004) +#define NV957D_HEAD_SET_CONTEXT_DMAS_CURSOR_HANDLE 31:0 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00000494 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3_ADAPTIVE (0x00000003) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 4:3 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_HRESPONSE_BIAS 23:16 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_VRESPONSE_BIAS 31:24 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422 8:8 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_ENABLE (0x00000001) +#define NV957D_HEAD_SET_PROCAMP(a) (0x00000498 + (a)*0x00000300) +#define NV957D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NV957D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NV957D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NV957D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NV957D_HEAD_SET_PROCAMP_CHROMA_LPF 2:2 +#define NV957D_HEAD_SET_PROCAMP_CHROMA_LPF_AUTO (0x00000000) +#define NV957D_HEAD_SET_PROCAMP_CHROMA_LPF_ON (0x00000001) +#define NV957D_HEAD_SET_PROCAMP_SAT_COS 19:8 +#define NV957D_HEAD_SET_PROCAMP_SAT_SINE 31:20 +#define NV957D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 5:5 +#define NV957D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NV957D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NV957D_HEAD_SET_PROCAMP_RANGE_COMPRESSION 6:6 +#define NV957D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE (0x00000000) +#define NV957D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE (0x00000001) +#define NV957D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300) +#define NV957D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NV957D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_DITHER_CONTROL_BITS 2:1 +#define NV957D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS (0x00000000) +#define NV957D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS (0x00000001) +#define NV957D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_10_BITS (0x00000002) +#define NV957D_HEAD_SET_DITHER_CONTROL_MODE 6:3 +#define NV957D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NV957D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NV957D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NV957D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NV957D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NV957D_HEAD_SET_DITHER_CONTROL_PHASE 8:7 +#define NV957D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x000004B0 + (a)*0x00000300) +#define NV957D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NV957D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x000004B8 + (a)*0x00000300) +#define NV957D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NV957D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x000004BC + (a)*0x00000300) +#define NV957D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NV957D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x000004C0 + (a)*0x00000300) +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(a) (0x000004C4 + (a)*0x00000300) +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_WIDTH 14:0 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_HEIGHT 30:16 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX(a) (0x000004C8 + (a)*0x00000300) +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_WIDTH 14:0 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_HEIGHT 30:16 +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a) (0x000004D0 + (a)*0x00000300) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE 0:0 +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8 (0x00000000) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE 13:12 +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT 17:16 +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_NONE (0x00000000) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_257 (0x00000001) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_1025 (0x00000002) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT 21:20 +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS(a) (0x000004D4 + (a)*0x00000300) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE 0:0 +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT 13:12 +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_NONE (0x00000000) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_257 (0x00000001) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_1025 (0x00000002) +#define NV957D_HEAD_SET_PROCESSING(a) (0x000004E0 + (a)*0x00000300) +#define NV957D_HEAD_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV957D_HEAD_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV957D_HEAD_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONVERSION_RED(a) (0x000004E4 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONVERSION_RED_GAIN 15:0 +#define NV957D_HEAD_SET_CONVERSION_RED_OFS 31:16 +#define NV957D_HEAD_SET_CONVERSION_GRN(a) (0x000004E8 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONVERSION_GRN_GAIN 15:0 +#define NV957D_HEAD_SET_CONVERSION_GRN_OFS 31:16 +#define NV957D_HEAD_SET_CONVERSION_BLU(a) (0x000004EC + (a)*0x00000300) +#define NV957D_HEAD_SET_CONVERSION_BLU_GAIN 15:0 +#define NV957D_HEAD_SET_CONVERSION_BLU_OFS 31:16 +#define NV957D_HEAD_SET_CSC_RED2RED(a) (0x000004F0 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE 31:31 +#define NV957D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CSC_RED2RED_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_GRN2RED(a) (0x000004F4 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_GRN2RED_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_BLU2RED(a) (0x000004F8 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_BLU2RED_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_CONSTANT2RED(a) (0x000004FC + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_RED2GRN(a) (0x00000500 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_RED2GRN_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_GRN2GRN(a) (0x00000504 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_BLU2GRN(a) (0x00000508 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_CONSTANT2GRN(a) (0x0000050C + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_RED2BLU(a) (0x00000510 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_RED2BLU_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_GRN2BLU(a) (0x00000514 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_BLU2BLU(a) (0x00000518 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_CONSTANT2BLU(a) (0x0000051C + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV957D_HEAD_SET_HDMI_CTRL(a) (0x00000520 + (a)*0x00000300) +#define NV957D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NV957D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NV957D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NV957D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NV957D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE 15:12 +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000000) +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FIELD_ALTERNATIVE (0x00000001) +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_LINE_ALTERNATIVE (0x00000002) +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_FULL (0x00000003) +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH (0x00000004) +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH_GRAPHICS (0x00000005) +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_TOP_AND_BOTTOM (0x00000006) +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_HALF (0x00000008) +#define NV957D_HEAD_SET_VACTIVE_SPACE_COLOR(a) (0x00000524 + (a)*0x00000300) +#define NV957D_HEAD_SET_VACTIVE_SPACE_COLOR_RED_CR 9:0 +#define NV957D_HEAD_SET_VACTIVE_SPACE_COLOR_GRN_Y 19:10 +#define NV957D_HEAD_SET_VACTIVE_SPACE_COLOR_BLU_CB 29:20 +#define NV957D_HEAD_SET_PIXEL_REORDER_CONTROL(a) (0x00000528 + (a)*0x00000300) +#define NV957D_HEAD_SET_PIXEL_REORDER_CONTROL_BANK_WIDTH 13:0 +#define NV957D_HEAD_SET_DISPLAY_ID(a,b) (0x0000052C + (a)*0x00000300 + (b)*0x00000004) +#define NV957D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NV957D_HEAD_SET_SW_SPARE_A(a) (0x0000054C + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NV957D_HEAD_SET_SW_SPARE_B(a) (0x00000550 + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NV957D_HEAD_SET_SW_SPARE_C(a) (0x00000554 + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NV957D_HEAD_SET_SW_SPARE_D(a) (0x00000558 + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NV957D_HEAD_SET_GET_BLANKING_CTRL(a) (0x0000055C + (a)*0x00000300) +#define NV957D_HEAD_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NV957D_HEAD_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NV957D_HEAD_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NV957D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NV957D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NV957D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_COMPRESSION(a) (0x00000560 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_ENABLE 0:0 +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_CHUNK_BANDWIDTH 12:1 +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_LAST_BANDWIDTH 24:13 +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_LA(a) (0x00000564 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY1 7:4 +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY2 11:8 +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY3 15:12 +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_LA_CHUNK_SIZE 23:16 +#define NV957D_HEAD_SET_STALL_LOCK(a) (0x00000568 + (a)*0x00000300) +#define NV957D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NV957D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NV957D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NV957D_HEAD_SET_STALL_LOCK_MODE 1:1 +#define NV957D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NV957D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN 6:2 +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV957D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 7:7 +#define NV957D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NV957D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_A(a) (0x000006D0 + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_A_UNUSED 31:0 +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_B(a) (0x000006D4 + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_B_UNUSED 31:0 +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_C(a) (0x000006D8 + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_C_UNUSED 31:0 +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_D(a) (0x000006DC + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_D_UNUSED 31:0 +#define NV957D_HEAD_SET_SPARE(a) (0x000006EC + (a)*0x00000300) +#define NV957D_HEAD_SET_SPARE_UNUSED 31:0 +#define NV957D_HEAD_SET_SPARE_NOOP(a,b) (0x000006F0 + (a)*0x00000300 + (b)*0x00000004) +#define NV957D_HEAD_SET_SPARE_NOOP_UNUSED 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl957d_h diff --git a/src/common/sdk/nvidia/inc/class/cl9770.h b/src/common/sdk/nvidia/inc/class/cl9770.h new file mode 100644 index 0000000..9f14281 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9770.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 1993-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl9770.finn +// + +#define NV9770_DISPLAY (0x9770U) /* finn: Evaluated from "NV9770_ALLOCATION_PARAMETERS_MESSAGE_ID" */ + +#define NV9770_ALLOCATION_PARAMETERS_MESSAGE_ID (0x9770U) + +typedef struct NV9770_ALLOCATION_PARAMETERS { + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numDacs; // Number of DACs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NV9770_ALLOCATION_PARAMETERS; diff --git a/src/common/sdk/nvidia/inc/class/cl977d.h b/src/common/sdk/nvidia/inc/class/cl977d.h new file mode 100644 index 0000000..adf59a2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl977d.h @@ -0,0 +1,1587 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl977d_h_ +#define _cl977d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV977D_CORE_CHANNEL_DMA (0x0000977D) + +#define NV977D_CORE_NOTIFIER_3 0x00000000 +#define NV977D_CORE_NOTIFIER_3_SIZEOF 0x00000150 +#define NV977D_CORE_NOTIFIER_3_COMPLETION_0 0x00000000 +#define NV977D_CORE_NOTIFIER_3_COMPLETION_0_DONE 0:0 +#define NV977D_CORE_NOTIFIER_3_COMPLETION_0_DONE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_COMPLETION_0_R0 15:1 +#define NV977D_CORE_NOTIFIER_3_COMPLETION_0_TIMESTAMP 29:16 +#define NV977D_CORE_NOTIFIER_3__1 0x00000001 +#define NV977D_CORE_NOTIFIER_3__1_R1 31:0 +#define NV977D_CORE_NOTIFIER_3__2 0x00000002 +#define NV977D_CORE_NOTIFIER_3__2_R2 31:0 +#define NV977D_CORE_NOTIFIER_3__3 0x00000003 +#define NV977D_CORE_NOTIFIER_3__3_R3 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_R0 19:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA 20:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_R1 29:21 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE 30:30 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5 0x00000005 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE 3:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE 11:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE 15:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE 19:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE 27:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6 0x00000006 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE 3:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE 11:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE 15:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE 19:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE 27:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_7 0x00000007 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_7_DISPCLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_7_R4 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_8 0x00000008 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_8_R5 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_9 0x00000009 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_9_R6 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_10 0x0000000A +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_10_R7 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_11 0x0000000B +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_11_R8 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12 0x0000000C +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_R0 31:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13 0x0000000D +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_CRT_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_R1 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14 0x0000000E +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_R0 31:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15 0x0000000F +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_CRT_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_R1 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16 0x00000010 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_R0 31:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17 0x00000011 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_CRT_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_R1 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18 0x00000012 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_R0 31:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19 0x00000013 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_CRT_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_R1 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20 0x00000014 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21 0x00000015 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22 0x00000016 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23 0x00000017 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24 0x00000018 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25 0x00000019 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26 0x0000001A +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27 0x0000001B +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28 0x0000001C +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29 0x0000001D +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30 0x0000001E +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31 0x0000001F +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32 0x00000020 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33 0x00000021 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34 0x00000022 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35 0x00000023 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36 0x00000024 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_R0 31:7 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37 0x00000025 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_EXT_ENC_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R1 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R2 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38 0x00000026 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_R0 31:7 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39 0x00000027 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_EXT_ENC_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R1 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R2 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40 0x00000028 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_R0 31:7 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41 0x00000029 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_EXT_ENC_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R1 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R2 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42 0x0000002A +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_R0 31:7 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43 0x0000002B +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_EXT_ENC_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R1 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R2 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52 0x00000034 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52_R0 31:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53 0x00000035 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R1 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R2 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54 0x00000036 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R3 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R4 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55 0x00000037 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R5 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R6 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56 0x00000038 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_PCLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_R7 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57 0x00000039 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57_R8 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58 0x0000003A +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58_R9 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59 0x0000003B +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59_R10 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60 0x0000003C +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60_R0 31:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61 0x0000003D +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R1 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R2 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62 0x0000003E +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R3 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R4 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63 0x0000003F +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R5 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R6 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64 0x00000040 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_PCLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_R7 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65 0x00000041 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65_R8 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66 0x00000042 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66_R9 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67 0x00000043 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67_R10 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68 0x00000044 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68_R0 31:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69 0x00000045 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R1 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R2 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70 0x00000046 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R3 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R4 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71 0x00000047 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R5 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R6 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72 0x00000048 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_PCLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_R7 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73 0x00000049 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73_R8 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74 0x0000004A +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74_R9 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75 0x0000004B +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75_R10 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76 0x0000004C +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76_R0 31:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77 0x0000004D +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R1 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R2 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78 0x0000004E +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R3 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R4 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79 0x0000004F +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R5 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R6 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80 0x00000050 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_PCLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_R7 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81 0x00000051 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81_R8 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82 0x00000052 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82_R9 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83 0x00000053 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83_R10 31:0 + + +// dma opcode instructions +#define NV977D_DMA 0x00000000 +#define NV977D_DMA_OPCODE 31:29 +#define NV977D_DMA_OPCODE_METHOD 0x00000000 +#define NV977D_DMA_OPCODE_JUMP 0x00000001 +#define NV977D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV977D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV977D_DMA_METHOD_COUNT 27:18 +#define NV977D_DMA_METHOD_OFFSET 11:2 +#define NV977D_DMA_DATA 31:0 +#define NV977D_DMA_DATA_NOP 0x00000000 +#define NV977D_DMA_JUMP_OFFSET 11:2 +#define NV977D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV977D_PUT (0x00000000) +#define NV977D_PUT_PTR 11:2 +#define NV977D_GET (0x00000004) +#define NV977D_GET_PTR 11:2 +#define NV977D_UPDATE (0x00000080) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR(i) (0 +(i)*4):(0 +(i)*4) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR__SIZE_1 4 +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR0 0:0 +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR1 4:4 +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR2 8:8 +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR3 12:12 +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE(i) (1 +(i)*4):(1 +(i)*4) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE__SIZE_1 4 +#define NV977D_UPDATE_INTERLOCK_WITH_BASE_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE0 1:1 +#define NV977D_UPDATE_INTERLOCK_WITH_BASE0_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE0_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE1 5:5 +#define NV977D_UPDATE_INTERLOCK_WITH_BASE1_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE1_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE2 9:9 +#define NV977D_UPDATE_INTERLOCK_WITH_BASE2_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE2_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE3 13:13 +#define NV977D_UPDATE_INTERLOCK_WITH_BASE3_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE3_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY(i) (2 +(i)*4):(2 +(i)*4) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY__SIZE_1 4 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY0 2:2 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY0_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY0_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY1 6:6 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY1_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY1_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY2 10:10 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY2_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY2_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY3 14:14 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY3_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY3_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM(i) (3 +(i)*4):(3 +(i)*4) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM__SIZE_1 4 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0 3:3 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1 7:7 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2 11:11 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3 15:15 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_ENABLE (0x00000001) +#define NV977D_UPDATE_SPECIAL_HANDLING 25:24 +#define NV977D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV977D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV977D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV977D_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV977D_UPDATE_NOT_DRIVER_FRIENDLY 31:31 +#define NV977D_UPDATE_NOT_DRIVER_FRIENDLY_FALSE (0x00000000) +#define NV977D_UPDATE_NOT_DRIVER_FRIENDLY_TRUE (0x00000001) +#define NV977D_UPDATE_NOT_DRIVER_UNFRIENDLY 30:30 +#define NV977D_UPDATE_NOT_DRIVER_UNFRIENDLY_FALSE (0x00000000) +#define NV977D_UPDATE_NOT_DRIVER_UNFRIENDLY_TRUE (0x00000001) +#define NV977D_UPDATE_INHIBIT_INTERRUPTS 29:29 +#define NV977D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NV977D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NV977D_SET_NOTIFIER_CONTROL (0x00000084) +#define NV977D_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV977D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV977D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV977D_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV977D_SET_NOTIFIER_CONTROL_NOTIFY 31:31 +#define NV977D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NV977D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NV977D_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV977D_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV977D_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV977D_SET_CONTEXT_DMA_NOTIFIER (0x00000088) +#define NV977D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV977D_GET_CAPABILITIES (0x0000008C) +#define NV977D_GET_CAPABILITIES_DUMMY 31:0 +#define NV977D_SET_SPARE (0x0000016C) +#define NV977D_SET_SPARE_UNUSED 31:0 +#define NV977D_SET_SPARE_NOOP(b) (0x00000170 + (b)*0x00000004) +#define NV977D_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV977D_DAC_SET_CONTROL(a) (0x00000180 + (a)*0x00000020) +#define NV977D_DAC_SET_CONTROL_OWNER_MASK 3:0 +#define NV977D_DAC_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV977D_DAC_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV977D_DAC_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV977D_DAC_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV977D_DAC_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV977D_DAC_SET_CONTROL_PROTOCOL 12:8 +#define NV977D_DAC_SET_CONTROL_PROTOCOL_RGB_CRT (0x00000000) +#define NV977D_DAC_SET_CONTROL_PROTOCOL_YUV_CRT (0x00000013) +#define NV977D_DAC_SET_SW_SPARE_A(a) (0x00000184 + (a)*0x00000020) +#define NV977D_DAC_SET_SW_SPARE_A_CODE 31:0 +#define NV977D_DAC_SET_SW_SPARE_B(a) (0x00000188 + (a)*0x00000020) +#define NV977D_DAC_SET_SW_SPARE_B_CODE 31:0 +#define NV977D_DAC_SET_CUSTOM_REASON(a) (0x00000190 + (a)*0x00000020) +#define NV977D_DAC_SET_CUSTOM_REASON_CODE 31:0 + +#define NV977D_SOR_SET_CONTROL(a) (0x00000200 + (a)*0x00000020) +#define NV977D_SOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV977D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV977D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV977D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV977D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV977D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV977D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NV977D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NV977D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NV977D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NV977D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NV977D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NV977D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NV977D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NV977D_SOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV977D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV977D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV977D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NV977D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NV977D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NV977D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NV977D_SOR_SET_SW_SPARE_A(a) (0x00000204 + (a)*0x00000020) +#define NV977D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NV977D_SOR_SET_SW_SPARE_B(a) (0x00000208 + (a)*0x00000020) +#define NV977D_SOR_SET_SW_SPARE_B_CODE 31:0 +#define NV977D_SOR_SET_CUSTOM_REASON(a) (0x00000210 + (a)*0x00000020) +#define NV977D_SOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV977D_PIOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NV977D_PIOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV977D_PIOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV977D_PIOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV977D_PIOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV977D_PIOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV977D_PIOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV977D_PIOR_SET_CONTROL_PROTOCOL 11:8 +#define NV977D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC (0x00000000) +#define NV977D_PIOR_SET_CONTROL_PROTOCOL_EXT_TV_ENC (0x00000001) +#define NV977D_PIOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV977D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV977D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV977D_PIOR_SET_SW_SPARE_A(a) (0x00000304 + (a)*0x00000020) +#define NV977D_PIOR_SET_SW_SPARE_A_CODE 31:0 +#define NV977D_PIOR_SET_SW_SPARE_B(a) (0x00000308 + (a)*0x00000020) +#define NV977D_PIOR_SET_SW_SPARE_B_CODE 31:0 +#define NV977D_PIOR_SET_CUSTOM_REASON(a) (0x00000310 + (a)*0x00000020) +#define NV977D_PIOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV977D_HEAD_SET_PRESENT_CONTROL(a) (0x00000400 + (a)*0x00000300) +#define NV977D_HEAD_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NV977D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 8:8 +#define NV977D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NV977D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NV977D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00000404 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 3:3 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 4:4 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 9:6 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_DEFAULT (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000003) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000004) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000005) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000006) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000007) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000008) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000009) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 12:12 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 24:13 +#define NV977D_HEAD_SET_CONTROL(a) (0x00000408 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTROL_STRUCTURE 0:0 +#define NV977D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_STRUCTURE_INTERLACED (0x00000001) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 3:2 +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 19:15 +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 7:4 +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 9:8 +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 14:10 +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK 1:1 +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN 24:20 +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN 29:25 +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000003) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000004) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000005) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000006) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000007) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000008) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x00000009) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000A) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000B) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000C) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000D) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000E) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x0000000F) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_UNSPECIFIED (0x00000010) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV977D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NV977D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NV977D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_OVERSCAN_COLOR(a) (0x00000410 + (a)*0x00000300) +#define NV977D_HEAD_SET_OVERSCAN_COLOR_RED 9:0 +#define NV977D_HEAD_SET_OVERSCAN_COLOR_GRN 19:10 +#define NV977D_HEAD_SET_OVERSCAN_COLOR_BLU 29:20 +#define NV977D_HEAD_SET_RASTER_SIZE(a) (0x00000414 + (a)*0x00000300) +#define NV977D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NV977D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NV977D_HEAD_SET_RASTER_SYNC_END(a) (0x00000418 + (a)*0x00000300) +#define NV977D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NV977D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NV977D_HEAD_SET_RASTER_BLANK_END(a) (0x0000041C + (a)*0x00000300) +#define NV977D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NV977D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NV977D_HEAD_SET_RASTER_BLANK_START(a) (0x00000420 + (a)*0x00000300) +#define NV977D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NV977D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NV977D_HEAD_SET_RASTER_VERT_BLANK2(a) (0x00000424 + (a)*0x00000300) +#define NV977D_HEAD_SET_RASTER_VERT_BLANK2_YSTART 14:0 +#define NV977D_HEAD_SET_RASTER_VERT_BLANK2_YEND 30:16 +#define NV977D_HEAD_SET_LOCK_CHAIN(a) (0x00000428 + (a)*0x00000300) +#define NV977D_HEAD_SET_LOCK_CHAIN_POSITION 27:24 +#define NV977D_HEAD_SET_DEFAULT_BASE_COLOR(a) (0x0000042C + (a)*0x00000300) +#define NV977D_HEAD_SET_DEFAULT_BASE_COLOR_RED 9:0 +#define NV977D_HEAD_SET_DEFAULT_BASE_COLOR_GREEN 19:10 +#define NV977D_HEAD_SET_DEFAULT_BASE_COLOR_BLUE 29:20 +#define NV977D_HEAD_SET_CRC_CONTROL(a) (0x00000430 + (a)*0x00000300) +#define NV977D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 1:0 +#define NV977D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000000) +#define NV977D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_BASE (0x00000001) +#define NV977D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_OVERLAY (0x00000002) +#define NV977D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 2:2 +#define NV977D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NV977D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NV977D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE 3:3 +#define NV977D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_FALSE (0x00000000) +#define NV977D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_TRUE (0x00000001) +#define NV977D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE 4:4 +#define NV977D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_FALSE (0x00000000) +#define NV977D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_TRUE (0x00000001) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT 19:8 +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC__SIZE_1 4 +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC0 (0x00000FF0) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC1 (0x00000FF1) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC2 (0x00000FF2) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC3 (0x00000FF3) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG__SIZE_1 4 +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG0 (0x00000FF8) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG1 (0x00000FF9) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG2 (0x00000FFA) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG3 (0x00000FFB) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR__SIZE_1 8 +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR0 (0x00000F0F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR1 (0x00000F1F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR2 (0x00000F2F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR3 (0x00000F3F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR4 (0x00000F4F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR5 (0x00000F5F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR6 (0x00000F6F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR7 (0x00000F7F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF__SIZE_1 4 +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF0 (0x00000F8F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF1 (0x00000F9F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF2 (0x00000FAF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF3 (0x00000FBF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR__SIZE_1 8 +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR0 (0x000000FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR1 (0x000001FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR2 (0x000002FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR3 (0x000003FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR4 (0x000004FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR5 (0x000005FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR6 (0x000006FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR7 (0x000007FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_NONE (0x00000FFF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT 31:20 +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC__SIZE_1 4 +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC0 (0x00000FF0) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC1 (0x00000FF1) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC2 (0x00000FF2) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC3 (0x00000FF3) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG__SIZE_1 4 +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG0 (0x00000FF8) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG1 (0x00000FF9) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG2 (0x00000FFA) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG3 (0x00000FFB) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR__SIZE_1 8 +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR0 (0x00000F0F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR1 (0x00000F1F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR2 (0x00000F2F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR3 (0x00000F3F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR4 (0x00000F4F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR5 (0x00000F5F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR6 (0x00000F6F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR7 (0x00000F7F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF__SIZE_1 4 +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF0 (0x00000F8F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF1 (0x00000F9F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF2 (0x00000FAF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF3 (0x00000FBF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR__SIZE_1 8 +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR0 (0x000000FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR1 (0x000001FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR2 (0x000002FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR3 (0x000003FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR4 (0x000004FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR5 (0x000005FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR6 (0x000006FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR7 (0x000007FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_NONE (0x00000FFF) +#define NV977D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 5:5 +#define NV977D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC 6:6 +#define NV977D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00000438 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NV977D_HEAD_SET_BASE_LUT_LO(a) (0x00000440 + (a)*0x00000300) +#define NV977D_HEAD_SET_BASE_LUT_LO_ENABLE 31:31 +#define NV977D_HEAD_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE 27:24 +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV977D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV977D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_BASE_LUT_HI(a) (0x00000444 + (a)*0x00000300) +#define NV977D_HEAD_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV977D_HEAD_SET_OUTPUT_LUT_LO(a) (0x00000448 + (a)*0x00000300) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_ENABLE 31:31 +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_OUTPUT_LUT_HI(a) (0x0000044C + (a)*0x00000300) +#define NV977D_HEAD_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x00000450 + (a)*0x00000300) +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x00000454 + (a)*0x00000300) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE 21:20 +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_25 (0x00000000) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_28 (0x00000001) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_CUSTOM (0x00000002) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 24:24 +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING 25:25 +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_FALSE (0x00000000) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_TRUE (0x00000001) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 26:26 +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00000458 + (a)*0x00000300) +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NV977D_HEAD_SET_CONTEXT_DMA_LUT(a) (0x0000045C + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV977D_HEAD_SET_OFFSET(a) (0x00000460 + (a)*0x00000300) +#define NV977D_HEAD_SET_OFFSET_ORIGIN 31:0 +#define NV977D_HEAD_SET_SIZE(a) (0x00000468 + (a)*0x00000300) +#define NV977D_HEAD_SET_SIZE_WIDTH 15:0 +#define NV977D_HEAD_SET_SIZE_HEIGHT 31:16 +#define NV977D_HEAD_SET_STORAGE(a) (0x0000046C + (a)*0x00000300) +#define NV977D_HEAD_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV977D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV977D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV977D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV977D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV977D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV977D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV977D_HEAD_SET_STORAGE_PITCH 20:8 +#define NV977D_HEAD_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV977D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV977D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV977D_HEAD_SET_PARAMS(a) (0x00000470 + (a)*0x00000300) +#define NV977D_HEAD_SET_PARAMS_FORMAT 15:8 +#define NV977D_HEAD_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV977D_HEAD_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV977D_HEAD_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV977D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV977D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV977D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV977D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV977D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NV977D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV977D_HEAD_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV977D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV977D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV977D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV977D_HEAD_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV977D_HEAD_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV977D_HEAD_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV977D_HEAD_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV977D_HEAD_SET_PARAMS_GAMMA 2:2 +#define NV977D_HEAD_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV977D_HEAD_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV977D_HEAD_SET_CONTEXT_DMAS_ISO(a) (0x00000474 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV977D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x0000047C + (a)*0x00000300) +#define NV977D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 1:0 +#define NV977D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NV977D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NV977D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_SPEC_FLIP (0x00000002) +#define NV977D_HEAD_SET_CONTROL_CURSOR(a) (0x00000480 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NV977D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_CURSOR_FORMAT 25:24 +#define NV977D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_CURSOR_SIZE 27:26 +#define NV977D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NV977D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 15:8 +#define NV977D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 23:16 +#define NV977D_HEAD_SET_CONTROL_CURSOR_COMPOSITION 29:28 +#define NV977D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000) +#define NV977D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001) +#define NV977D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002) +#define NV977D_HEAD_SET_OFFSETS_CURSOR(a,b) (0x00000484 + (a)*0x00000300 + (b)*0x00000004) +#define NV977D_HEAD_SET_OFFSETS_CURSOR_ORIGIN 31:0 +#define NV977D_HEAD_SET_CONTEXT_DMAS_CURSOR(a,b) (0x0000048C + (a)*0x00000300 + (b)*0x00000004) +#define NV977D_HEAD_SET_CONTEXT_DMAS_CURSOR_HANDLE 31:0 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00000494 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3_ADAPTIVE (0x00000003) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 4:3 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_HRESPONSE_BIAS 23:16 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_VRESPONSE_BIAS 31:24 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422 8:8 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_ENABLE (0x00000001) +#define NV977D_HEAD_SET_PROCAMP(a) (0x00000498 + (a)*0x00000300) +#define NV977D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NV977D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NV977D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NV977D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NV977D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003) +#define NV977D_HEAD_SET_PROCAMP_CHROMA_LPF 2:2 +#define NV977D_HEAD_SET_PROCAMP_CHROMA_LPF_AUTO (0x00000000) +#define NV977D_HEAD_SET_PROCAMP_CHROMA_LPF_ON (0x00000001) +#define NV977D_HEAD_SET_PROCAMP_SAT_COS 19:8 +#define NV977D_HEAD_SET_PROCAMP_SAT_SINE 31:20 +#define NV977D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 5:5 +#define NV977D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NV977D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NV977D_HEAD_SET_PROCAMP_RANGE_COMPRESSION 6:6 +#define NV977D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE (0x00000000) +#define NV977D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE (0x00000001) +#define NV977D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300) +#define NV977D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NV977D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_DITHER_CONTROL_BITS 2:1 +#define NV977D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS (0x00000000) +#define NV977D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS (0x00000001) +#define NV977D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_10_BITS (0x00000002) +#define NV977D_HEAD_SET_DITHER_CONTROL_MODE 6:3 +#define NV977D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NV977D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NV977D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NV977D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NV977D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NV977D_HEAD_SET_DITHER_CONTROL_PHASE 8:7 +#define NV977D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x000004B0 + (a)*0x00000300) +#define NV977D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NV977D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x000004B8 + (a)*0x00000300) +#define NV977D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NV977D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x000004BC + (a)*0x00000300) +#define NV977D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NV977D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x000004C0 + (a)*0x00000300) +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(a) (0x000004C4 + (a)*0x00000300) +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_WIDTH 14:0 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_HEIGHT 30:16 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX(a) (0x000004C8 + (a)*0x00000300) +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_WIDTH 14:0 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_HEIGHT 30:16 +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a) (0x000004D0 + (a)*0x00000300) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE 0:0 +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8 (0x00000000) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE 13:12 +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT 17:16 +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_NONE (0x00000000) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_257 (0x00000001) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_1025 (0x00000002) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT 21:20 +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS(a) (0x000004D4 + (a)*0x00000300) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE 0:0 +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT 13:12 +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_NONE (0x00000000) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_257 (0x00000001) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_1025 (0x00000002) +#define NV977D_HEAD_SET_PROCESSING(a) (0x000004E0 + (a)*0x00000300) +#define NV977D_HEAD_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV977D_HEAD_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV977D_HEAD_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONVERSION_RED(a) (0x000004E4 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONVERSION_RED_GAIN 15:0 +#define NV977D_HEAD_SET_CONVERSION_RED_OFS 31:16 +#define NV977D_HEAD_SET_CONVERSION_GRN(a) (0x000004E8 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONVERSION_GRN_GAIN 15:0 +#define NV977D_HEAD_SET_CONVERSION_GRN_OFS 31:16 +#define NV977D_HEAD_SET_CONVERSION_BLU(a) (0x000004EC + (a)*0x00000300) +#define NV977D_HEAD_SET_CONVERSION_BLU_GAIN 15:0 +#define NV977D_HEAD_SET_CONVERSION_BLU_OFS 31:16 +#define NV977D_HEAD_SET_CSC_RED2RED(a) (0x000004F0 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE 31:31 +#define NV977D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CSC_RED2RED_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_GRN2RED(a) (0x000004F4 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_GRN2RED_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_BLU2RED(a) (0x000004F8 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_BLU2RED_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_CONSTANT2RED(a) (0x000004FC + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_RED2GRN(a) (0x00000500 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_RED2GRN_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_GRN2GRN(a) (0x00000504 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_BLU2GRN(a) (0x00000508 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_CONSTANT2GRN(a) (0x0000050C + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_RED2BLU(a) (0x00000510 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_RED2BLU_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_GRN2BLU(a) (0x00000514 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_BLU2BLU(a) (0x00000518 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_CONSTANT2BLU(a) (0x0000051C + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV977D_HEAD_SET_HDMI_CTRL(a) (0x00000520 + (a)*0x00000300) +#define NV977D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NV977D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NV977D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NV977D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NV977D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE 15:12 +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000000) +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FIELD_ALTERNATIVE (0x00000001) +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_LINE_ALTERNATIVE (0x00000002) +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_FULL (0x00000003) +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH (0x00000004) +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH_GRAPHICS (0x00000005) +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_TOP_AND_BOTTOM (0x00000006) +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_HALF (0x00000008) +#define NV977D_HEAD_SET_VACTIVE_SPACE_COLOR(a) (0x00000524 + (a)*0x00000300) +#define NV977D_HEAD_SET_VACTIVE_SPACE_COLOR_RED_CR 9:0 +#define NV977D_HEAD_SET_VACTIVE_SPACE_COLOR_GRN_Y 19:10 +#define NV977D_HEAD_SET_VACTIVE_SPACE_COLOR_BLU_CB 29:20 +#define NV977D_HEAD_SET_PIXEL_REORDER_CONTROL(a) (0x00000528 + (a)*0x00000300) +#define NV977D_HEAD_SET_PIXEL_REORDER_CONTROL_BANK_WIDTH 13:0 +#define NV977D_HEAD_SET_DISPLAY_ID(a,b) (0x0000052C + (a)*0x00000300 + (b)*0x00000004) +#define NV977D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NV977D_HEAD_SET_SW_SPARE_A(a) (0x0000054C + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NV977D_HEAD_SET_SW_SPARE_B(a) (0x00000550 + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NV977D_HEAD_SET_SW_SPARE_C(a) (0x00000554 + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NV977D_HEAD_SET_SW_SPARE_D(a) (0x00000558 + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NV977D_HEAD_SET_GET_BLANKING_CTRL(a) (0x0000055C + (a)*0x00000300) +#define NV977D_HEAD_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NV977D_HEAD_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NV977D_HEAD_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NV977D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NV977D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NV977D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_COMPRESSION(a) (0x00000560 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_ENABLE 0:0 +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_CHUNK_BANDWIDTH 12:1 +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_LAST_BANDWIDTH 24:13 +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_LA(a) (0x00000564 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY1 7:4 +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY2 11:8 +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY3 15:12 +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_LA_CHUNK_SIZE 23:16 +#define NV977D_HEAD_SET_STALL_LOCK(a) (0x00000568 + (a)*0x00000300) +#define NV977D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NV977D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NV977D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NV977D_HEAD_SET_STALL_LOCK_MODE 1:1 +#define NV977D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NV977D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN 6:2 +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV977D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 7:7 +#define NV977D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NV977D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_A(a) (0x000006D0 + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_A_UNUSED 31:0 +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_B(a) (0x000006D4 + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_B_UNUSED 31:0 +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_C(a) (0x000006D8 + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_C_UNUSED 31:0 +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_D(a) (0x000006DC + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_D_UNUSED 31:0 +#define NV977D_HEAD_SET_SPARE(a) (0x000006EC + (a)*0x00000300) +#define NV977D_HEAD_SET_SPARE_UNUSED 31:0 +#define NV977D_HEAD_SET_SPARE_NOOP(a,b) (0x000006F0 + (a)*0x00000300 + (b)*0x00000004) +#define NV977D_HEAD_SET_SPARE_NOOP_UNUSED 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl977d_h diff --git a/src/common/sdk/nvidia/inc/class/cl9870.h b/src/common/sdk/nvidia/inc/class/cl9870.h new file mode 100644 index 0000000..3bf5482 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9870.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 1993-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl9870.finn +// + +#define NV9870_DISPLAY (0x9870U) /* finn: Evaluated from "NV9870_ALLOCATION_PARAMETERS_MESSAGE_ID" */ + +#define NV9870_ALLOCATION_PARAMETERS_MESSAGE_ID (0x9870U) + +typedef struct NV9870_ALLOCATION_PARAMETERS { + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numDacs; // Number of DACs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NV9870_ALLOCATION_PARAMETERS; diff --git a/src/common/sdk/nvidia/inc/class/cl987d.h b/src/common/sdk/nvidia/inc/class/cl987d.h new file mode 100644 index 0000000..ab01f62 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl987d.h @@ -0,0 +1,1590 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl987d_h_ +#define _cl987d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV987D_CORE_CHANNEL_DMA (0x0000987D) + +#define NV987D_CORE_NOTIFIER_3 0x00000000 +#define NV987D_CORE_NOTIFIER_3_SIZEOF 0x00000150 +#define NV987D_CORE_NOTIFIER_3_COMPLETION_0 0x00000000 +#define NV987D_CORE_NOTIFIER_3_COMPLETION_0_DONE 0:0 +#define NV987D_CORE_NOTIFIER_3_COMPLETION_0_DONE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_COMPLETION_0_R0 15:1 +#define NV987D_CORE_NOTIFIER_3_COMPLETION_0_TIMESTAMP 29:16 +#define NV987D_CORE_NOTIFIER_3__1 0x00000001 +#define NV987D_CORE_NOTIFIER_3__1_R1 31:0 +#define NV987D_CORE_NOTIFIER_3__2 0x00000002 +#define NV987D_CORE_NOTIFIER_3__2_R2 31:0 +#define NV987D_CORE_NOTIFIER_3__3 0x00000003 +#define NV987D_CORE_NOTIFIER_3__3_R3 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_R0 19:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA 20:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_R1 29:21 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE 30:30 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5 0x00000005 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE 3:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE 11:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE 15:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE 19:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE 27:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6 0x00000006 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE 3:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE 11:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE 15:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE 19:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE 27:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_7 0x00000007 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_7_DISPCLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_7_R4 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_8 0x00000008 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_8_R5 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_9 0x00000009 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_9_R6 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_10 0x0000000A +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_10_R7 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_11 0x0000000B +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_11_R8 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12 0x0000000C +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_R0 31:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13 0x0000000D +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_CRT_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_R1 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14 0x0000000E +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_R0 31:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15 0x0000000F +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_CRT_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_R1 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16 0x00000010 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_R0 31:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17 0x00000011 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_CRT_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_R1 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18 0x00000012 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_R0 31:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19 0x00000013 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_CRT_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_R1 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20 0x00000014 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21 0x00000015 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22 0x00000016 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23 0x00000017 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24 0x00000018 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25 0x00000019 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26 0x0000001A +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27 0x0000001B +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28 0x0000001C +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29 0x0000001D +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30 0x0000001E +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31 0x0000001F +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32 0x00000020 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33 0x00000021 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34 0x00000022 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35 0x00000023 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36 0x00000024 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_R0 31:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37 0x00000025 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_EXT_ENC_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R1 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R2 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38 0x00000026 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_R0 31:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39 0x00000027 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_EXT_ENC_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R1 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R2 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40 0x00000028 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_R0 31:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41 0x00000029 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_EXT_ENC_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R1 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R2 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42 0x0000002A +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_R0 31:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43 0x0000002B +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_EXT_ENC_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R1 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R2 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52 0x00000034 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52_R0 31:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53 0x00000035 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R1 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R2 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54 0x00000036 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R3 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R4 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55 0x00000037 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R5 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R6 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56 0x00000038 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_PCLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_R7 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57 0x00000039 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57_R8 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58 0x0000003A +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58_R9 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59 0x0000003B +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59_R10 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60 0x0000003C +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60_R0 31:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61 0x0000003D +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R1 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R2 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62 0x0000003E +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R3 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R4 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63 0x0000003F +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R5 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R6 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64 0x00000040 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_PCLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_R7 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65 0x00000041 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65_R8 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66 0x00000042 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66_R9 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67 0x00000043 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67_R10 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68 0x00000044 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68_R0 31:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69 0x00000045 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R1 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R2 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70 0x00000046 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R3 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R4 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71 0x00000047 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R5 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R6 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72 0x00000048 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_PCLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_R7 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73 0x00000049 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73_R8 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74 0x0000004A +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74_R9 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75 0x0000004B +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75_R10 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76 0x0000004C +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76_R0 31:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77 0x0000004D +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R1 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R2 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78 0x0000004E +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R3 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R4 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79 0x0000004F +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R5 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R6 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80 0x00000050 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_PCLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_R7 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81 0x00000051 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81_R8 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82 0x00000052 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82_R9 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83 0x00000053 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83_R10 31:0 + + +// dma opcode instructions +#define NV987D_DMA 0x00000000 +#define NV987D_DMA_OPCODE 31:29 +#define NV987D_DMA_OPCODE_METHOD 0x00000000 +#define NV987D_DMA_OPCODE_JUMP 0x00000001 +#define NV987D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV987D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV987D_DMA_METHOD_COUNT 27:18 +#define NV987D_DMA_METHOD_OFFSET 11:2 +#define NV987D_DMA_DATA 31:0 +#define NV987D_DMA_DATA_NOP 0x00000000 +#define NV987D_DMA_JUMP_OFFSET 11:2 +#define NV987D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV987D_PUT (0x00000000) +#define NV987D_PUT_PTR 11:2 +#define NV987D_GET (0x00000004) +#define NV987D_GET_PTR 11:2 +#define NV987D_UPDATE (0x00000080) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR(i) (0 +(i)*4):(0 +(i)*4) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR__SIZE_1 4 +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR0 0:0 +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR1 4:4 +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR2 8:8 +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR3 12:12 +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE(i) (1 +(i)*4):(1 +(i)*4) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE__SIZE_1 4 +#define NV987D_UPDATE_INTERLOCK_WITH_BASE_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE0 1:1 +#define NV987D_UPDATE_INTERLOCK_WITH_BASE0_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE0_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE1 5:5 +#define NV987D_UPDATE_INTERLOCK_WITH_BASE1_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE1_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE2 9:9 +#define NV987D_UPDATE_INTERLOCK_WITH_BASE2_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE2_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE3 13:13 +#define NV987D_UPDATE_INTERLOCK_WITH_BASE3_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE3_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY(i) (2 +(i)*4):(2 +(i)*4) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY__SIZE_1 4 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY0 2:2 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY0_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY0_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY1 6:6 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY1_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY1_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY2 10:10 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY2_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY2_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY3 14:14 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY3_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY3_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM(i) (3 +(i)*4):(3 +(i)*4) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM__SIZE_1 4 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0 3:3 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1 7:7 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2 11:11 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3 15:15 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_ENABLE (0x00000001) +#define NV987D_UPDATE_SPECIAL_HANDLING 25:24 +#define NV987D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV987D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV987D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV987D_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV987D_UPDATE_NOT_DRIVER_FRIENDLY 31:31 +#define NV987D_UPDATE_NOT_DRIVER_FRIENDLY_FALSE (0x00000000) +#define NV987D_UPDATE_NOT_DRIVER_FRIENDLY_TRUE (0x00000001) +#define NV987D_UPDATE_NOT_DRIVER_UNFRIENDLY 30:30 +#define NV987D_UPDATE_NOT_DRIVER_UNFRIENDLY_FALSE (0x00000000) +#define NV987D_UPDATE_NOT_DRIVER_UNFRIENDLY_TRUE (0x00000001) +#define NV987D_UPDATE_INHIBIT_INTERRUPTS 29:29 +#define NV987D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NV987D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NV987D_SET_NOTIFIER_CONTROL (0x00000084) +#define NV987D_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV987D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV987D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV987D_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV987D_SET_NOTIFIER_CONTROL_NOTIFY 31:31 +#define NV987D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NV987D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NV987D_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV987D_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV987D_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV987D_SET_NOTIFIER_CONTROL_NO_WAIT_ACTIVE 0:0 +#define NV987D_SET_NOTIFIER_CONTROL_NO_WAIT_ACTIVE_FALSE (0x00000000) +#define NV987D_SET_NOTIFIER_CONTROL_NO_WAIT_ACTIVE_TRUE (0x00000001) +#define NV987D_SET_CONTEXT_DMA_NOTIFIER (0x00000088) +#define NV987D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV987D_GET_CAPABILITIES (0x0000008C) +#define NV987D_GET_CAPABILITIES_DUMMY 31:0 +#define NV987D_SET_SPARE (0x0000016C) +#define NV987D_SET_SPARE_UNUSED 31:0 +#define NV987D_SET_SPARE_NOOP(b) (0x00000170 + (b)*0x00000004) +#define NV987D_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV987D_DAC_SET_CONTROL(a) (0x00000180 + (a)*0x00000020) +#define NV987D_DAC_SET_CONTROL_OWNER_MASK 3:0 +#define NV987D_DAC_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV987D_DAC_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV987D_DAC_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV987D_DAC_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV987D_DAC_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV987D_DAC_SET_CONTROL_PROTOCOL 12:8 +#define NV987D_DAC_SET_CONTROL_PROTOCOL_RGB_CRT (0x00000000) +#define NV987D_DAC_SET_CONTROL_PROTOCOL_YUV_CRT (0x00000013) +#define NV987D_DAC_SET_SW_SPARE_A(a) (0x00000184 + (a)*0x00000020) +#define NV987D_DAC_SET_SW_SPARE_A_CODE 31:0 +#define NV987D_DAC_SET_SW_SPARE_B(a) (0x00000188 + (a)*0x00000020) +#define NV987D_DAC_SET_SW_SPARE_B_CODE 31:0 +#define NV987D_DAC_SET_CUSTOM_REASON(a) (0x00000190 + (a)*0x00000020) +#define NV987D_DAC_SET_CUSTOM_REASON_CODE 31:0 + +#define NV987D_SOR_SET_CONTROL(a) (0x00000200 + (a)*0x00000020) +#define NV987D_SOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV987D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV987D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV987D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV987D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV987D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV987D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NV987D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NV987D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NV987D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NV987D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NV987D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NV987D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NV987D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NV987D_SOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV987D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV987D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV987D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NV987D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NV987D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NV987D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NV987D_SOR_SET_SW_SPARE_A(a) (0x00000204 + (a)*0x00000020) +#define NV987D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NV987D_SOR_SET_SW_SPARE_B(a) (0x00000208 + (a)*0x00000020) +#define NV987D_SOR_SET_SW_SPARE_B_CODE 31:0 +#define NV987D_SOR_SET_CUSTOM_REASON(a) (0x00000210 + (a)*0x00000020) +#define NV987D_SOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV987D_PIOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NV987D_PIOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV987D_PIOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV987D_PIOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV987D_PIOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV987D_PIOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV987D_PIOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV987D_PIOR_SET_CONTROL_PROTOCOL 11:8 +#define NV987D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC (0x00000000) +#define NV987D_PIOR_SET_CONTROL_PROTOCOL_EXT_TV_ENC (0x00000001) +#define NV987D_PIOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV987D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV987D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV987D_PIOR_SET_SW_SPARE_A(a) (0x00000304 + (a)*0x00000020) +#define NV987D_PIOR_SET_SW_SPARE_A_CODE 31:0 +#define NV987D_PIOR_SET_SW_SPARE_B(a) (0x00000308 + (a)*0x00000020) +#define NV987D_PIOR_SET_SW_SPARE_B_CODE 31:0 +#define NV987D_PIOR_SET_CUSTOM_REASON(a) (0x00000310 + (a)*0x00000020) +#define NV987D_PIOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV987D_HEAD_SET_PRESENT_CONTROL(a) (0x00000400 + (a)*0x00000300) +#define NV987D_HEAD_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NV987D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 8:8 +#define NV987D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NV987D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NV987D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00000404 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 3:3 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 4:4 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 9:6 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_DEFAULT (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000003) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000004) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000005) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000006) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000007) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000008) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000009) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 12:12 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 24:13 +#define NV987D_HEAD_SET_CONTROL(a) (0x00000408 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTROL_STRUCTURE 0:0 +#define NV987D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_STRUCTURE_INTERLACED (0x00000001) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 3:2 +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 19:15 +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 7:4 +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 9:8 +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 14:10 +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK 1:1 +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN 24:20 +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN 29:25 +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000003) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000004) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000005) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000006) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000007) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000008) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x00000009) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000A) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000B) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000C) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000D) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000E) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x0000000F) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_UNSPECIFIED (0x00000010) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV987D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NV987D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NV987D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_OVERSCAN_COLOR(a) (0x00000410 + (a)*0x00000300) +#define NV987D_HEAD_SET_OVERSCAN_COLOR_RED 9:0 +#define NV987D_HEAD_SET_OVERSCAN_COLOR_GRN 19:10 +#define NV987D_HEAD_SET_OVERSCAN_COLOR_BLU 29:20 +#define NV987D_HEAD_SET_RASTER_SIZE(a) (0x00000414 + (a)*0x00000300) +#define NV987D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NV987D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NV987D_HEAD_SET_RASTER_SYNC_END(a) (0x00000418 + (a)*0x00000300) +#define NV987D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NV987D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NV987D_HEAD_SET_RASTER_BLANK_END(a) (0x0000041C + (a)*0x00000300) +#define NV987D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NV987D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NV987D_HEAD_SET_RASTER_BLANK_START(a) (0x00000420 + (a)*0x00000300) +#define NV987D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NV987D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NV987D_HEAD_SET_RASTER_VERT_BLANK2(a) (0x00000424 + (a)*0x00000300) +#define NV987D_HEAD_SET_RASTER_VERT_BLANK2_YSTART 14:0 +#define NV987D_HEAD_SET_RASTER_VERT_BLANK2_YEND 30:16 +#define NV987D_HEAD_SET_LOCK_CHAIN(a) (0x00000428 + (a)*0x00000300) +#define NV987D_HEAD_SET_LOCK_CHAIN_POSITION 27:24 +#define NV987D_HEAD_SET_DEFAULT_BASE_COLOR(a) (0x0000042C + (a)*0x00000300) +#define NV987D_HEAD_SET_DEFAULT_BASE_COLOR_RED 9:0 +#define NV987D_HEAD_SET_DEFAULT_BASE_COLOR_GREEN 19:10 +#define NV987D_HEAD_SET_DEFAULT_BASE_COLOR_BLUE 29:20 +#define NV987D_HEAD_SET_CRC_CONTROL(a) (0x00000430 + (a)*0x00000300) +#define NV987D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 1:0 +#define NV987D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000000) +#define NV987D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_BASE (0x00000001) +#define NV987D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_OVERLAY (0x00000002) +#define NV987D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 2:2 +#define NV987D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NV987D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NV987D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE 3:3 +#define NV987D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_FALSE (0x00000000) +#define NV987D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_TRUE (0x00000001) +#define NV987D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE 4:4 +#define NV987D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_FALSE (0x00000000) +#define NV987D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_TRUE (0x00000001) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT 19:8 +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC__SIZE_1 4 +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC0 (0x00000FF0) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC1 (0x00000FF1) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC2 (0x00000FF2) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC3 (0x00000FF3) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG__SIZE_1 4 +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG0 (0x00000FF8) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG1 (0x00000FF9) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG2 (0x00000FFA) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG3 (0x00000FFB) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR__SIZE_1 8 +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR0 (0x00000F0F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR1 (0x00000F1F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR2 (0x00000F2F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR3 (0x00000F3F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR4 (0x00000F4F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR5 (0x00000F5F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR6 (0x00000F6F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR7 (0x00000F7F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF__SIZE_1 4 +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF0 (0x00000F8F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF1 (0x00000F9F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF2 (0x00000FAF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF3 (0x00000FBF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR__SIZE_1 8 +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR0 (0x000000FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR1 (0x000001FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR2 (0x000002FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR3 (0x000003FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR4 (0x000004FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR5 (0x000005FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR6 (0x000006FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR7 (0x000007FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_NONE (0x00000FFF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT 31:20 +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC__SIZE_1 4 +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC0 (0x00000FF0) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC1 (0x00000FF1) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC2 (0x00000FF2) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC3 (0x00000FF3) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG__SIZE_1 4 +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG0 (0x00000FF8) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG1 (0x00000FF9) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG2 (0x00000FFA) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG3 (0x00000FFB) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR__SIZE_1 8 +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR0 (0x00000F0F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR1 (0x00000F1F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR2 (0x00000F2F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR3 (0x00000F3F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR4 (0x00000F4F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR5 (0x00000F5F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR6 (0x00000F6F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR7 (0x00000F7F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF__SIZE_1 4 +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF0 (0x00000F8F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF1 (0x00000F9F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF2 (0x00000FAF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF3 (0x00000FBF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR__SIZE_1 8 +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR0 (0x000000FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR1 (0x000001FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR2 (0x000002FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR3 (0x000003FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR4 (0x000004FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR5 (0x000005FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR6 (0x000006FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR7 (0x000007FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_NONE (0x00000FFF) +#define NV987D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 5:5 +#define NV987D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC 6:6 +#define NV987D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00000438 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NV987D_HEAD_SET_BASE_LUT_LO(a) (0x00000440 + (a)*0x00000300) +#define NV987D_HEAD_SET_BASE_LUT_LO_ENABLE 31:31 +#define NV987D_HEAD_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE 27:24 +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV987D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV987D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_BASE_LUT_HI(a) (0x00000444 + (a)*0x00000300) +#define NV987D_HEAD_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV987D_HEAD_SET_OUTPUT_LUT_LO(a) (0x00000448 + (a)*0x00000300) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_ENABLE 31:31 +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_OUTPUT_LUT_HI(a) (0x0000044C + (a)*0x00000300) +#define NV987D_HEAD_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x00000450 + (a)*0x00000300) +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x00000454 + (a)*0x00000300) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE 21:20 +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_25 (0x00000000) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_28 (0x00000001) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_CUSTOM (0x00000002) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 24:24 +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING 25:25 +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_FALSE (0x00000000) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_TRUE (0x00000001) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 26:26 +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00000458 + (a)*0x00000300) +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NV987D_HEAD_SET_CONTEXT_DMA_LUT(a) (0x0000045C + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV987D_HEAD_SET_OFFSET(a) (0x00000460 + (a)*0x00000300) +#define NV987D_HEAD_SET_OFFSET_ORIGIN 31:0 +#define NV987D_HEAD_SET_SIZE(a) (0x00000468 + (a)*0x00000300) +#define NV987D_HEAD_SET_SIZE_WIDTH 15:0 +#define NV987D_HEAD_SET_SIZE_HEIGHT 31:16 +#define NV987D_HEAD_SET_STORAGE(a) (0x0000046C + (a)*0x00000300) +#define NV987D_HEAD_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV987D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV987D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV987D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV987D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV987D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV987D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV987D_HEAD_SET_STORAGE_PITCH 20:8 +#define NV987D_HEAD_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV987D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV987D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV987D_HEAD_SET_PARAMS(a) (0x00000470 + (a)*0x00000300) +#define NV987D_HEAD_SET_PARAMS_FORMAT 15:8 +#define NV987D_HEAD_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV987D_HEAD_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV987D_HEAD_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV987D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV987D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV987D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV987D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV987D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NV987D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV987D_HEAD_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV987D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV987D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV987D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV987D_HEAD_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV987D_HEAD_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV987D_HEAD_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV987D_HEAD_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV987D_HEAD_SET_PARAMS_GAMMA 2:2 +#define NV987D_HEAD_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV987D_HEAD_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV987D_HEAD_SET_CONTEXT_DMAS_ISO(a) (0x00000474 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV987D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x0000047C + (a)*0x00000300) +#define NV987D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 1:0 +#define NV987D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NV987D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NV987D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_SPEC_FLIP (0x00000002) +#define NV987D_HEAD_SET_CONTROL_CURSOR(a) (0x00000480 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NV987D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_CURSOR_FORMAT 25:24 +#define NV987D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_CURSOR_SIZE 27:26 +#define NV987D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NV987D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 15:8 +#define NV987D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 23:16 +#define NV987D_HEAD_SET_CONTROL_CURSOR_COMPOSITION 29:28 +#define NV987D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000) +#define NV987D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001) +#define NV987D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002) +#define NV987D_HEAD_SET_OFFSETS_CURSOR(a,b) (0x00000484 + (a)*0x00000300 + (b)*0x00000004) +#define NV987D_HEAD_SET_OFFSETS_CURSOR_ORIGIN 31:0 +#define NV987D_HEAD_SET_CONTEXT_DMAS_CURSOR(a,b) (0x0000048C + (a)*0x00000300 + (b)*0x00000004) +#define NV987D_HEAD_SET_CONTEXT_DMAS_CURSOR_HANDLE 31:0 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00000494 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3_ADAPTIVE (0x00000003) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 4:3 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_HRESPONSE_BIAS 23:16 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_VRESPONSE_BIAS 31:24 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422 8:8 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_ENABLE (0x00000001) +#define NV987D_HEAD_SET_PROCAMP(a) (0x00000498 + (a)*0x00000300) +#define NV987D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NV987D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NV987D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NV987D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NV987D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003) +#define NV987D_HEAD_SET_PROCAMP_CHROMA_LPF 2:2 +#define NV987D_HEAD_SET_PROCAMP_CHROMA_LPF_AUTO (0x00000000) +#define NV987D_HEAD_SET_PROCAMP_CHROMA_LPF_ON (0x00000001) +#define NV987D_HEAD_SET_PROCAMP_SAT_COS 19:8 +#define NV987D_HEAD_SET_PROCAMP_SAT_SINE 31:20 +#define NV987D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 5:5 +#define NV987D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NV987D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NV987D_HEAD_SET_PROCAMP_RANGE_COMPRESSION 6:6 +#define NV987D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE (0x00000000) +#define NV987D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE (0x00000001) +#define NV987D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300) +#define NV987D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NV987D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_DITHER_CONTROL_BITS 2:1 +#define NV987D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS (0x00000000) +#define NV987D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS (0x00000001) +#define NV987D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_10_BITS (0x00000002) +#define NV987D_HEAD_SET_DITHER_CONTROL_MODE 6:3 +#define NV987D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NV987D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NV987D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NV987D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NV987D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NV987D_HEAD_SET_DITHER_CONTROL_PHASE 8:7 +#define NV987D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x000004B0 + (a)*0x00000300) +#define NV987D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NV987D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x000004B8 + (a)*0x00000300) +#define NV987D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NV987D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x000004BC + (a)*0x00000300) +#define NV987D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NV987D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x000004C0 + (a)*0x00000300) +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(a) (0x000004C4 + (a)*0x00000300) +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_WIDTH 14:0 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_HEIGHT 30:16 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX(a) (0x000004C8 + (a)*0x00000300) +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_WIDTH 14:0 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_HEIGHT 30:16 +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a) (0x000004D0 + (a)*0x00000300) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE 0:0 +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8 (0x00000000) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE 13:12 +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT 17:16 +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_NONE (0x00000000) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_257 (0x00000001) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_1025 (0x00000002) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT 21:20 +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS(a) (0x000004D4 + (a)*0x00000300) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE 0:0 +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT 13:12 +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_NONE (0x00000000) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_257 (0x00000001) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_1025 (0x00000002) +#define NV987D_HEAD_SET_PROCESSING(a) (0x000004E0 + (a)*0x00000300) +#define NV987D_HEAD_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV987D_HEAD_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV987D_HEAD_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONVERSION_RED(a) (0x000004E4 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONVERSION_RED_GAIN 15:0 +#define NV987D_HEAD_SET_CONVERSION_RED_OFS 31:16 +#define NV987D_HEAD_SET_CONVERSION_GRN(a) (0x000004E8 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONVERSION_GRN_GAIN 15:0 +#define NV987D_HEAD_SET_CONVERSION_GRN_OFS 31:16 +#define NV987D_HEAD_SET_CONVERSION_BLU(a) (0x000004EC + (a)*0x00000300) +#define NV987D_HEAD_SET_CONVERSION_BLU_GAIN 15:0 +#define NV987D_HEAD_SET_CONVERSION_BLU_OFS 31:16 +#define NV987D_HEAD_SET_CSC_RED2RED(a) (0x000004F0 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE 31:31 +#define NV987D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CSC_RED2RED_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_GRN2RED(a) (0x000004F4 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_GRN2RED_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_BLU2RED(a) (0x000004F8 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_BLU2RED_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_CONSTANT2RED(a) (0x000004FC + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_RED2GRN(a) (0x00000500 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_RED2GRN_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_GRN2GRN(a) (0x00000504 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_BLU2GRN(a) (0x00000508 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_CONSTANT2GRN(a) (0x0000050C + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_RED2BLU(a) (0x00000510 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_RED2BLU_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_GRN2BLU(a) (0x00000514 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_BLU2BLU(a) (0x00000518 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_CONSTANT2BLU(a) (0x0000051C + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV987D_HEAD_SET_HDMI_CTRL(a) (0x00000520 + (a)*0x00000300) +#define NV987D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NV987D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NV987D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NV987D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NV987D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE 15:12 +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000000) +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FIELD_ALTERNATIVE (0x00000001) +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_LINE_ALTERNATIVE (0x00000002) +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_FULL (0x00000003) +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH (0x00000004) +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH_GRAPHICS (0x00000005) +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_TOP_AND_BOTTOM (0x00000006) +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_HALF (0x00000008) +#define NV987D_HEAD_SET_VACTIVE_SPACE_COLOR(a) (0x00000524 + (a)*0x00000300) +#define NV987D_HEAD_SET_VACTIVE_SPACE_COLOR_RED_CR 9:0 +#define NV987D_HEAD_SET_VACTIVE_SPACE_COLOR_GRN_Y 19:10 +#define NV987D_HEAD_SET_VACTIVE_SPACE_COLOR_BLU_CB 29:20 +#define NV987D_HEAD_SET_PIXEL_REORDER_CONTROL(a) (0x00000528 + (a)*0x00000300) +#define NV987D_HEAD_SET_PIXEL_REORDER_CONTROL_BANK_WIDTH 13:0 +#define NV987D_HEAD_SET_DISPLAY_ID(a,b) (0x0000052C + (a)*0x00000300 + (b)*0x00000004) +#define NV987D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NV987D_HEAD_SET_SW_SPARE_A(a) (0x0000054C + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NV987D_HEAD_SET_SW_SPARE_B(a) (0x00000550 + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NV987D_HEAD_SET_SW_SPARE_C(a) (0x00000554 + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NV987D_HEAD_SET_SW_SPARE_D(a) (0x00000558 + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NV987D_HEAD_SET_GET_BLANKING_CTRL(a) (0x0000055C + (a)*0x00000300) +#define NV987D_HEAD_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NV987D_HEAD_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NV987D_HEAD_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NV987D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NV987D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NV987D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_COMPRESSION(a) (0x00000560 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_ENABLE 0:0 +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_CHUNK_BANDWIDTH 12:1 +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_LAST_BANDWIDTH 24:13 +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_LA(a) (0x00000564 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY1 7:4 +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY2 11:8 +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY3 15:12 +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_LA_CHUNK_SIZE 23:16 +#define NV987D_HEAD_SET_STALL_LOCK(a) (0x00000568 + (a)*0x00000300) +#define NV987D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NV987D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NV987D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NV987D_HEAD_SET_STALL_LOCK_MODE 1:1 +#define NV987D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NV987D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN 6:2 +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV987D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 7:7 +#define NV987D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NV987D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_A(a) (0x000006D0 + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_A_UNUSED 31:0 +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_B(a) (0x000006D4 + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_B_UNUSED 31:0 +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_C(a) (0x000006D8 + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_C_UNUSED 31:0 +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_D(a) (0x000006DC + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_D_UNUSED 31:0 +#define NV987D_HEAD_SET_SPARE(a) (0x000006EC + (a)*0x00000300) +#define NV987D_HEAD_SET_SPARE_UNUSED 31:0 +#define NV987D_HEAD_SET_SPARE_NOOP(a,b) (0x000006F0 + (a)*0x00000300 + (b)*0x00000004) +#define NV987D_HEAD_SET_SPARE_NOOP_UNUSED 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl987d_h diff --git a/src/common/sdk/nvidia/inc/class/cla06f.h b/src/common/sdk/nvidia/inc/class/cla06f.h new file mode 100644 index 0000000..336cab5 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cla06f.h @@ -0,0 +1,240 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clA06f_h_ +#define _clA06f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* class KEPLER_CHANNEL_GPFIFO */ +/* + * Documentation for KEPLER_CHANNEL_GPFIFO can be found in dev_pbdma.ref, + * chapter "User Control Registers". It is documented as device NV_UDMA. + * The GPFIFO format itself is also documented in dev_pbdma.ref, + * NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref, + * chapter "FIFO DMA RAM", NV_FIFO_DMA_*. + * + */ +#define KEPLER_CHANNEL_GPFIFO_A (0x0000A06F) + + +/* pio method data structure */ +typedef volatile struct _cla06f_tag0 { + NvV32 Reserved00[0x7c0]; +} NvA06FTypedef, KEPLER_ChannelGPFifo; +#define NVA06F_TYPEDEF KEPLER_CHANNELChannelGPFifo +/* dma flow control data structure */ +typedef volatile struct _cla06f_tag1 { + NvU32 Ignored00[0x010]; /* 0000-0043*/ + NvU32 Put; /* put offset, read/write 0040-0043*/ + NvU32 Get; /* get offset, read only 0044-0047*/ + NvU32 Reference; /* reference value, read only 0048-004b*/ + NvU32 PutHi; /* high order put offset bits 004c-004f*/ + NvU32 Ignored01[0x002]; /* 0050-0057*/ + NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/ + NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/ + NvU32 GetHi; /* high order get offset bits 0060-0063*/ + NvU32 Ignored02[0x007]; /* 0064-007f*/ + NvU32 Ignored03; /* used to be engine yield 0080-0083*/ + NvU32 Ignored04[0x001]; /* 0084-0087*/ + NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/ + NvU32 GPPut; /* GP FIFO put offset 008c-008f*/ + NvU32 Ignored05[0x5c]; +} NvA06FControl, KeplerAControlGPFifo; +/* fields and values */ +#define NVA06F_NUMBER_OF_SUBCHANNELS (8) +#define NVA06F_SET_OBJECT (0x00000000) +#define NVA06F_SET_OBJECT_NVCLASS 15:0 +#define NVA06F_SET_OBJECT_ENGINE 20:16 +#define NVA06F_SET_OBJECT_ENGINE_SW 0x0000001f +#define NVA06F_ILLEGAL (0x00000004) +#define NVA06F_ILLEGAL_HANDLE 31:0 +#define NVA06F_NOP (0x00000008) +#define NVA06F_NOP_HANDLE 31:0 +#define NVA06F_SEMAPHOREA (0x00000010) +#define NVA06F_SEMAPHOREA_OFFSET_UPPER 7:0 +#define NVA06F_SEMAPHOREB (0x00000014) +#define NVA06F_SEMAPHOREB_OFFSET_LOWER 31:2 +#define NVA06F_SEMAPHOREC (0x00000018) +#define NVA06F_SEMAPHOREC_PAYLOAD 31:0 +#define NVA06F_SEMAPHORED (0x0000001C) +#define NVA06F_SEMAPHORED_OPERATION 3:0 +#define NVA06F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001 +#define NVA06F_SEMAPHORED_OPERATION_RELEASE 0x00000002 +#define NVA06F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004 +#define NVA06F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008 +#define NVA06F_SEMAPHORED_ACQUIRE_SWITCH 12:12 +#define NVA06F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000 +#define NVA06F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001 +#define NVA06F_SEMAPHORED_RELEASE_WFI 20:20 +#define NVA06F_SEMAPHORED_RELEASE_WFI_EN 0x00000000 +#define NVA06F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001 +#define NVA06F_SEMAPHORED_RELEASE_SIZE 24:24 +#define NVA06F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000 +#define NVA06F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001 +#define NVA06F_NON_STALL_INTERRUPT (0x00000020) +#define NVA06F_NON_STALL_INTERRUPT_HANDLE 31:0 +#define NVA06F_FB_FLUSH (0x00000024) +#define NVA06F_FB_FLUSH_HANDLE 31:0 +#define NVA06F_MEM_OP_A (0x00000028) +#define NVA06F_MEM_OP_A_OPERAND_LOW 31:2 +#define NVA06F_MEM_OP_A_TLB_INVALIDATE_ADDR 29:2 +#define NVA06F_MEM_OP_A_TLB_INVALIDATE_TARGET 31:30 +#define NVA06F_MEM_OP_A_TLB_INVALIDATE_TARGET_VID_MEM 0x00000000 +#define NVA06F_MEM_OP_A_TLB_INVALIDATE_TARGET_SYS_MEM_COHERENT 0x00000002 +#define NVA06F_MEM_OP_A_TLB_INVALIDATE_TARGET_SYS_MEM_NONCOHERENT 0x00000003 +#define NVA06F_MEM_OP_B (0x0000002c) +#define NVA06F_MEM_OP_B_OPERAND_HIGH 7:0 +#define NVA06F_MEM_OP_B_OPERATION 31:27 +#define NVA06F_MEM_OP_B_OPERATION_SYSMEMBAR_FLUSH 0x00000005 +#define NVA06F_MEM_OP_B_OPERATION_SOFT_FLUSH 0x00000006 +#define NVA06F_MEM_OP_B_OPERATION_MMU_TLB_INVALIDATE 0x00000009 +#define NVA06F_MEM_OP_B_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d +#define NVA06F_MEM_OP_B_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e +#define NVA06F_MEM_OP_B_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f +#define NVA06F_MEM_OP_B_OPERATION_L2_FLUSH_DIRTY 0x00000010 +#define NVA06F_MEM_OP_B_MMU_TLB_INVALIDATE_PDB 0:0 +#define NVA06F_MEM_OP_B_MMU_TLB_INVALIDATE_PDB_ONE 0x00000000 +#define NVA06F_MEM_OP_B_MMU_TLB_INVALIDATE_PDB_ALL 0x00000001 +#define NVA06F_MEM_OP_B_MMU_TLB_INVALIDATE_GPC 1:1 +#define NVA06F_MEM_OP_B_MMU_TLB_INVALIDATE_GPC_ENABLE 0x00000000 +#define NVA06F_MEM_OP_B_MMU_TLB_INVALIDATE_GPC_DISABLE 0x00000001 +#define NVA06F_SET_REFERENCE (0x00000050) +#define NVA06F_SET_REFERENCE_COUNT 31:0 +#define NVA06F_CRC_CHECK (0x0000007c) +#define NVA06F_CRC_CHECK_VALUE 31:0 +#define NVA06F_YIELD (0x00000080) +#define NVA06F_YIELD_OP 1:0 +#define NVA06F_YIELD_OP_NOP 0x00000000 + + +/* GPFIFO entry format */ +#define NVA06F_GP_ENTRY__SIZE 8 +#define NVA06F_GP_ENTRY0_FETCH 0:0 +#define NVA06F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000 +#define NVA06F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001 +#define NVA06F_GP_ENTRY0_GET 31:2 +#define NVA06F_GP_ENTRY0_OPERAND 31:0 +#define NVA06F_GP_ENTRY1_GET_HI 7:0 +#define NVA06F_GP_ENTRY1_PRIV 8:8 +#define NVA06F_GP_ENTRY1_PRIV_USER 0x00000000 +#define NVA06F_GP_ENTRY1_PRIV_KERNEL 0x00000001 +#define NVA06F_GP_ENTRY1_LEVEL 9:9 +#define NVA06F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NVA06F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NVA06F_GP_ENTRY1_LENGTH 30:10 +#define NVA06F_GP_ENTRY1_SYNC 31:31 +#define NVA06F_GP_ENTRY1_SYNC_PROCEED 0x00000000 +#define NVA06F_GP_ENTRY1_SYNC_WAIT 0x00000001 +#define NVA06F_GP_ENTRY1_OPCODE 7:0 +#define NVA06F_GP_ENTRY1_OPCODE_NOP 0x00000000 +#define NVA06F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001 +#define NVA06F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002 +#define NVA06F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003 + +/* dma method formats */ +#define NVA06F_DMA_METHOD_ADDRESS_OLD 12:2 +#define NVA06F_DMA_METHOD_ADDRESS 11:0 +#define NVA06F_DMA_SUBDEVICE_MASK 15:4 +#define NVA06F_DMA_METHOD_SUBCHANNEL 15:13 +#define NVA06F_DMA_TERT_OP 17:16 +#define NVA06F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000) +#define NVA06F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001) +#define NVA06F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002) +#define NVA06F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003) +#define NVA06F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000) +#define NVA06F_DMA_METHOD_COUNT_OLD 28:18 +#define NVA06F_DMA_METHOD_COUNT 28:16 +#define NVA06F_DMA_IMMD_DATA 28:16 +#define NVA06F_DMA_SEC_OP 31:29 +#define NVA06F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000) +#define NVA06F_DMA_SEC_OP_INC_METHOD (0x00000001) +#define NVA06F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002) +#define NVA06F_DMA_SEC_OP_NON_INC_METHOD (0x00000003) +#define NVA06F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004) +#define NVA06F_DMA_SEC_OP_ONE_INC (0x00000005) +#define NVA06F_DMA_SEC_OP_RESERVED6 (0x00000006) +#define NVA06F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007) +/* dma incrementing method format */ +#define NVA06F_DMA_INCR_ADDRESS 11:0 +#define NVA06F_DMA_INCR_SUBCHANNEL 15:13 +#define NVA06F_DMA_INCR_COUNT 28:16 +#define NVA06F_DMA_INCR_OPCODE 31:29 +#define NVA06F_DMA_INCR_OPCODE_VALUE (0x00000001) +#define NVA06F_DMA_INCR_DATA 31:0 +/* dma non-incrementing method format */ +#define NVA06F_DMA_NONINCR_ADDRESS 11:0 +#define NVA06F_DMA_NONINCR_SUBCHANNEL 15:13 +#define NVA06F_DMA_NONINCR_COUNT 28:16 +#define NVA06F_DMA_NONINCR_OPCODE 31:29 +#define NVA06F_DMA_NONINCR_OPCODE_VALUE (0x00000003) +#define NVA06F_DMA_NONINCR_DATA 31:0 +/* dma increment-once method format */ +#define NVA06F_DMA_ONEINCR_ADDRESS 11:0 +#define NVA06F_DMA_ONEINCR_SUBCHANNEL 15:13 +#define NVA06F_DMA_ONEINCR_COUNT 28:16 +#define NVA06F_DMA_ONEINCR_OPCODE 31:29 +#define NVA06F_DMA_ONEINCR_OPCODE_VALUE (0x00000005) +#define NVA06F_DMA_ONEINCR_DATA 31:0 +/* dma no-operation format */ +#define NVA06F_DMA_NOP (0x00000000) +/* dma immediate-data format */ +#define NVA06F_DMA_IMMD_ADDRESS 11:0 +#define NVA06F_DMA_IMMD_SUBCHANNEL 15:13 +#define NVA06F_DMA_IMMD_DATA 28:16 +#define NVA06F_DMA_IMMD_OPCODE 31:29 +#define NVA06F_DMA_IMMD_OPCODE_VALUE (0x00000004) +/* dma set sub-device mask format */ +#define NVA06F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4 +#define NVA06F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16 +#define NVA06F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001) +/* dma store sub-device mask format */ +#define NVA06F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4 +#define NVA06F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVA06F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002) +/* dma use sub-device mask format */ +#define NVA06F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVA06F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003) +/* dma end-segment format */ +#define NVA06F_DMA_ENDSEG_OPCODE 31:29 +#define NVA06F_DMA_ENDSEG_OPCODE_VALUE (0x00000007) +/* dma legacy incrementing/non-incrementing formats */ +#define NVA06F_DMA_ADDRESS 12:2 +#define NVA06F_DMA_SUBCH 15:13 +#define NVA06F_DMA_OPCODE3 17:16 +#define NVA06F_DMA_OPCODE3_NONE (0x00000000) +#define NVA06F_DMA_COUNT 28:18 +#define NVA06F_DMA_OPCODE 31:29 +#define NVA06F_DMA_OPCODE_METHOD (0x00000000) +#define NVA06F_DMA_OPCODE_NONINC_METHOD (0x00000002) +#define NVA06F_DMA_DATA 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clA06F_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cla06fsubch.h b/src/common/sdk/nvidia/inc/class/cla06fsubch.h new file mode 100644 index 0000000..330a681 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cla06fsubch.h @@ -0,0 +1,33 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cla06fsubch_h_ +#define _cla06fsubch_h_ + +#define NVA06F_SUBCHANNEL_2D 3 +#define NVA06F_SUBCHANNEL_3D 0 +#define NVA06F_SUBCHANNEL_COMPUTE 1 +#define NVA06F_SUBCHANNEL_COPY_ENGINE 4 +#define NVA06F_SUBCHANNEL_I2M 2 + +#endif // _cla06fsubch_h_ diff --git a/src/common/sdk/nvidia/inc/class/cla097.h b/src/common/sdk/nvidia/inc/class/cla097.h new file mode 100644 index 0000000..9ebbcfd --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cla097.h @@ -0,0 +1,3817 @@ +/******************************************************************************* + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef _cl_kepler_a_h_ +#define _cl_kepler_a_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../class/bin/sw_header.pl kepler_a */ + +#include "nvtypes.h" + +#define KEPLER_A 0xA097 + +#define NVA097_SET_OBJECT 0x0000 +#define NVA097_SET_OBJECT_CLASS_ID 15:0 +#define NVA097_SET_OBJECT_ENGINE_ID 20:16 + +#define NVA097_NO_OPERATION 0x0100 +#define NVA097_NO_OPERATION_V 31:0 + +#define NVA097_SET_NOTIFY_A 0x0104 +#define NVA097_SET_NOTIFY_A_ADDRESS_UPPER 7:0 + +#define NVA097_SET_NOTIFY_B 0x0108 +#define NVA097_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NVA097_NOTIFY 0x010c +#define NVA097_NOTIFY_TYPE 31:0 +#define NVA097_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NVA097_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NVA097_WAIT_FOR_IDLE 0x0110 +#define NVA097_WAIT_FOR_IDLE_V 31:0 + +#define NVA097_LOAD_MME_INSTRUCTION_RAM_POINTER 0x0114 +#define NVA097_LOAD_MME_INSTRUCTION_RAM_POINTER_V 31:0 + +#define NVA097_LOAD_MME_INSTRUCTION_RAM 0x0118 +#define NVA097_LOAD_MME_INSTRUCTION_RAM_V 31:0 + +#define NVA097_LOAD_MME_START_ADDRESS_RAM_POINTER 0x011c +#define NVA097_LOAD_MME_START_ADDRESS_RAM_POINTER_V 31:0 + +#define NVA097_LOAD_MME_START_ADDRESS_RAM 0x0120 +#define NVA097_LOAD_MME_START_ADDRESS_RAM_V 31:0 + +#define NVA097_SET_MME_SHADOW_RAM_CONTROL 0x0124 +#define NVA097_SET_MME_SHADOW_RAM_CONTROL_MODE 1:0 +#define NVA097_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK 0x00000000 +#define NVA097_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK_WITH_FILTER 0x00000001 +#define NVA097_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_PASSTHROUGH 0x00000002 +#define NVA097_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_REPLAY 0x00000003 + +#define NVA097_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER 0x0128 +#define NVA097_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER_V 7:0 + +#define NVA097_PEER_SEMAPHORE_RELEASE_OFFSET 0x012c +#define NVA097_PEER_SEMAPHORE_RELEASE_OFFSET_V 31:0 + +#define NVA097_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NVA097_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVA097_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NVA097_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVA097_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NVA097_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NVA097_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVA097_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVA097_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVA097_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVA097_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVA097_SEND_GO_IDLE 0x013c +#define NVA097_SEND_GO_IDLE_V 31:0 + +#define NVA097_PM_TRIGGER 0x0140 +#define NVA097_PM_TRIGGER_V 31:0 + +#define NVA097_PM_TRIGGER_WFI 0x0144 +#define NVA097_PM_TRIGGER_WFI_V 31:0 + +#define NVA097_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NVA097_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NVA097_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NVA097_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NVA097_LINE_LENGTH_IN 0x0180 +#define NVA097_LINE_LENGTH_IN_VALUE 31:0 + +#define NVA097_LINE_COUNT 0x0184 +#define NVA097_LINE_COUNT_VALUE 31:0 + +#define NVA097_OFFSET_OUT_UPPER 0x0188 +#define NVA097_OFFSET_OUT_UPPER_VALUE 7:0 + +#define NVA097_OFFSET_OUT 0x018c +#define NVA097_OFFSET_OUT_VALUE 31:0 + +#define NVA097_PITCH_OUT 0x0190 +#define NVA097_PITCH_OUT_VALUE 31:0 + +#define NVA097_SET_DST_BLOCK_SIZE 0x0194 +#define NVA097_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVA097_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVA097_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVA097_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVA097_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVA097_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVA097_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVA097_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVA097_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVA097_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVA097_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NVA097_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NVA097_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NVA097_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NVA097_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVA097_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NVA097_SET_DST_WIDTH 0x0198 +#define NVA097_SET_DST_WIDTH_V 31:0 + +#define NVA097_SET_DST_HEIGHT 0x019c +#define NVA097_SET_DST_HEIGHT_V 31:0 + +#define NVA097_SET_DST_DEPTH 0x01a0 +#define NVA097_SET_DST_DEPTH_V 31:0 + +#define NVA097_SET_DST_LAYER 0x01a4 +#define NVA097_SET_DST_LAYER_V 31:0 + +#define NVA097_SET_DST_ORIGIN_BYTES_X 0x01a8 +#define NVA097_SET_DST_ORIGIN_BYTES_X_V 19:0 + +#define NVA097_SET_DST_ORIGIN_SAMPLES_Y 0x01ac +#define NVA097_SET_DST_ORIGIN_SAMPLES_Y_V 15:0 + +#define NVA097_LAUNCH_DMA 0x01b0 +#define NVA097_LAUNCH_DMA_DST_MEMORY_LAYOUT 0:0 +#define NVA097_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVA097_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVA097_LAUNCH_DMA_COMPLETION_TYPE 5:4 +#define NVA097_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_DISABLE 0x00000000 +#define NVA097_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_ONLY 0x00000001 +#define NVA097_LAUNCH_DMA_COMPLETION_TYPE_RELEASE_SEMAPHORE 0x00000002 +#define NVA097_LAUNCH_DMA_INTERRUPT_TYPE 9:8 +#define NVA097_LAUNCH_DMA_INTERRUPT_TYPE_NONE 0x00000000 +#define NVA097_LAUNCH_DMA_INTERRUPT_TYPE_INTERRUPT 0x00000001 +#define NVA097_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE 12:12 +#define NVA097_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_FOUR_WORDS 0x00000000 +#define NVA097_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_ONE_WORD 0x00000001 +#define NVA097_LAUNCH_DMA_REDUCTION_ENABLE 1:1 +#define NVA097_LAUNCH_DMA_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVA097_LAUNCH_DMA_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVA097_LAUNCH_DMA_REDUCTION_OP 15:13 +#define NVA097_LAUNCH_DMA_REDUCTION_OP_RED_ADD 0x00000000 +#define NVA097_LAUNCH_DMA_REDUCTION_OP_RED_MIN 0x00000001 +#define NVA097_LAUNCH_DMA_REDUCTION_OP_RED_MAX 0x00000002 +#define NVA097_LAUNCH_DMA_REDUCTION_OP_RED_INC 0x00000003 +#define NVA097_LAUNCH_DMA_REDUCTION_OP_RED_DEC 0x00000004 +#define NVA097_LAUNCH_DMA_REDUCTION_OP_RED_AND 0x00000005 +#define NVA097_LAUNCH_DMA_REDUCTION_OP_RED_OR 0x00000006 +#define NVA097_LAUNCH_DMA_REDUCTION_OP_RED_XOR 0x00000007 +#define NVA097_LAUNCH_DMA_REDUCTION_FORMAT 3:2 +#define NVA097_LAUNCH_DMA_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVA097_LAUNCH_DMA_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVA097_LAUNCH_DMA_SYSMEMBAR_DISABLE 6:6 +#define NVA097_LAUNCH_DMA_SYSMEMBAR_DISABLE_FALSE 0x00000000 +#define NVA097_LAUNCH_DMA_SYSMEMBAR_DISABLE_TRUE 0x00000001 + +#define NVA097_LOAD_INLINE_DATA 0x01b4 +#define NVA097_LOAD_INLINE_DATA_V 31:0 + +#define NVA097_SET_I2M_SEMAPHORE_A 0x01dc +#define NVA097_SET_I2M_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVA097_SET_I2M_SEMAPHORE_B 0x01e0 +#define NVA097_SET_I2M_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVA097_SET_I2M_SEMAPHORE_C 0x01e4 +#define NVA097_SET_I2M_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVA097_SET_I2M_SPARE_NOOP00 0x01f0 +#define NVA097_SET_I2M_SPARE_NOOP00_V 31:0 + +#define NVA097_SET_I2M_SPARE_NOOP01 0x01f4 +#define NVA097_SET_I2M_SPARE_NOOP01_V 31:0 + +#define NVA097_SET_I2M_SPARE_NOOP02 0x01f8 +#define NVA097_SET_I2M_SPARE_NOOP02_V 31:0 + +#define NVA097_SET_I2M_SPARE_NOOP03 0x01fc +#define NVA097_SET_I2M_SPARE_NOOP03_V 31:0 + +#define NVA097_RUN_DS_NOW 0x0200 +#define NVA097_RUN_DS_NOW_V 31:0 + +#define NVA097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS 0x0204 +#define NVA097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD 4:0 +#define NVA097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_INSTANTANEOUS 0x00000000 +#define NVA097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16 0x00000001 +#define NVA097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32 0x00000002 +#define NVA097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__64 0x00000003 +#define NVA097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__128 0x00000004 +#define NVA097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__256 0x00000005 +#define NVA097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__512 0x00000006 +#define NVA097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1024 0x00000007 +#define NVA097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2048 0x00000008 +#define NVA097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4096 0x00000009 +#define NVA097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__8192 0x0000000A +#define NVA097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16384 0x0000000B +#define NVA097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32768 0x0000000C +#define NVA097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__65536 0x0000000D +#define NVA097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__131072 0x0000000E +#define NVA097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__262144 0x0000000F +#define NVA097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__524288 0x00000010 +#define NVA097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1048576 0x00000011 +#define NVA097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2097152 0x00000012 +#define NVA097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4194304 0x00000013 +#define NVA097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_LATEZ_ALWAYS 0x0000001F + +#define NVA097_SET_RASTER_PIPE_SYNC_CONTROL 0x0208 +#define NVA097_SET_RASTER_PIPE_SYNC_CONTROL_PRIM_AREA_THRESHOLD 21:0 +#define NVA097_SET_RASTER_PIPE_SYNC_CONTROL_ENABLE 24:24 +#define NVA097_SET_RASTER_PIPE_SYNC_CONTROL_ENABLE_FALSE 0x00000000 +#define NVA097_SET_RASTER_PIPE_SYNC_CONTROL_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_ALIASED_LINE_WIDTH_ENABLE 0x020c +#define NVA097_SET_ALIASED_LINE_WIDTH_ENABLE_V 0:0 +#define NVA097_SET_ALIASED_LINE_WIDTH_ENABLE_V_FALSE 0x00000000 +#define NVA097_SET_ALIASED_LINE_WIDTH_ENABLE_V_TRUE 0x00000001 + +#define NVA097_SET_API_MANDATED_EARLY_Z 0x0210 +#define NVA097_SET_API_MANDATED_EARLY_Z_ENABLE 0:0 +#define NVA097_SET_API_MANDATED_EARLY_Z_ENABLE_FALSE 0x00000000 +#define NVA097_SET_API_MANDATED_EARLY_Z_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_GS_DM_FIFO 0x0214 +#define NVA097_SET_GS_DM_FIFO_SIZE_RASTER_ON 12:0 +#define NVA097_SET_GS_DM_FIFO_SIZE_RASTER_OFF 28:16 +#define NVA097_SET_GS_DM_FIFO_SPILL_ENABLED 31:31 +#define NVA097_SET_GS_DM_FIFO_SPILL_ENABLED_FALSE 0x00000000 +#define NVA097_SET_GS_DM_FIFO_SPILL_ENABLED_TRUE 0x00000001 + +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS 0x0218 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY 5:4 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVA097_INVALIDATE_SHADER_CACHES 0x021c +#define NVA097_INVALIDATE_SHADER_CACHES_INSTRUCTION 0:0 +#define NVA097_INVALIDATE_SHADER_CACHES_INSTRUCTION_FALSE 0x00000000 +#define NVA097_INVALIDATE_SHADER_CACHES_INSTRUCTION_TRUE 0x00000001 +#define NVA097_INVALIDATE_SHADER_CACHES_DATA 4:4 +#define NVA097_INVALIDATE_SHADER_CACHES_DATA_FALSE 0x00000000 +#define NVA097_INVALIDATE_SHADER_CACHES_DATA_TRUE 0x00000001 +#define NVA097_INVALIDATE_SHADER_CACHES_CONSTANT 12:12 +#define NVA097_INVALIDATE_SHADER_CACHES_CONSTANT_FALSE 0x00000000 +#define NVA097_INVALIDATE_SHADER_CACHES_CONSTANT_TRUE 0x00000001 +#define NVA097_INVALIDATE_SHADER_CACHES_LOCKS 1:1 +#define NVA097_INVALIDATE_SHADER_CACHES_LOCKS_FALSE 0x00000000 +#define NVA097_INVALIDATE_SHADER_CACHES_LOCKS_TRUE 0x00000001 +#define NVA097_INVALIDATE_SHADER_CACHES_FLUSH_DATA 2:2 +#define NVA097_INVALIDATE_SHADER_CACHES_FLUSH_DATA_FALSE 0x00000000 +#define NVA097_INVALIDATE_SHADER_CACHES_FLUSH_DATA_TRUE 0x00000001 + +#define NVA097_SET_VAB_VERTEX3F(i) (0x0220+(i)*4) +#define NVA097_SET_VAB_VERTEX3F_V 31:0 + +#define NVA097_SET_VAB_VERTEX4F(i) (0x0230+(i)*4) +#define NVA097_SET_VAB_VERTEX4F_V 31:0 + +#define NVA097_SET_VAB_NORMAL3F(i) (0x0240+(i)*4) +#define NVA097_SET_VAB_NORMAL3F_V 31:0 + +#define NVA097_SET_VAB_COLOR3F(i) (0x0250+(i)*4) +#define NVA097_SET_VAB_COLOR3F_V 31:0 + +#define NVA097_SET_VAB_COLOR4F(i) (0x0260+(i)*4) +#define NVA097_SET_VAB_COLOR4F_V 31:0 + +#define NVA097_SET_VAB_COLOR4UB(i) (0x0270+(i)*4) +#define NVA097_SET_VAB_COLOR4UB_V 31:0 + +#define NVA097_SET_VAB_TEX_COORD1F(i) (0x0280+(i)*4) +#define NVA097_SET_VAB_TEX_COORD1F_V 31:0 + +#define NVA097_SET_VAB_TEX_COORD2F(i) (0x0290+(i)*4) +#define NVA097_SET_VAB_TEX_COORD2F_V 31:0 + +#define NVA097_SET_VAB_TEX_COORD3F(i) (0x02a0+(i)*4) +#define NVA097_SET_VAB_TEX_COORD3F_V 31:0 + +#define NVA097_SET_VAB_TEX_COORD4F(i) (0x02b0+(i)*4) +#define NVA097_SET_VAB_TEX_COORD4F_V 31:0 + +#define NVA097_SET_TASK_CIRCULAR_BUFFER_THROTTLE 0x02cc +#define NVA097_SET_TASK_CIRCULAR_BUFFER_THROTTLE_TASK_COUNT 21:0 + +#define NVA097_SET_PRIM_CIRCULAR_BUFFER_THROTTLE 0x02d0 +#define NVA097_SET_PRIM_CIRCULAR_BUFFER_THROTTLE_PRIM_AREA 21:0 + +#define NVA097_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE 0x02d4 +#define NVA097_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE_V 0:0 + +#define NVA097_SET_SURFACE_CLIP_ID_BLOCK_SIZE 0x02d8 +#define NVA097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH 3:0 +#define NVA097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVA097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT 7:4 +#define NVA097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVA097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVA097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVA097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVA097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVA097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVA097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH 11:8 +#define NVA097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NVA097_SET_ALPHA_CIRCULAR_BUFFER_SIZE 0x02dc +#define NVA097_SET_ALPHA_CIRCULAR_BUFFER_SIZE_CACHE_LINES_PER_SM 9:0 + +#define NVA097_SET_ZCULL_ROP_BYPASS 0x02e4 +#define NVA097_SET_ZCULL_ROP_BYPASS_ENABLE 0:0 +#define NVA097_SET_ZCULL_ROP_BYPASS_ENABLE_FALSE 0x00000000 +#define NVA097_SET_ZCULL_ROP_BYPASS_ENABLE_TRUE 0x00000001 +#define NVA097_SET_ZCULL_ROP_BYPASS_NO_STALL 4:4 +#define NVA097_SET_ZCULL_ROP_BYPASS_NO_STALL_FALSE 0x00000000 +#define NVA097_SET_ZCULL_ROP_BYPASS_NO_STALL_TRUE 0x00000001 +#define NVA097_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING 8:8 +#define NVA097_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING_FALSE 0x00000000 +#define NVA097_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING_TRUE 0x00000001 +#define NVA097_SET_ZCULL_ROP_BYPASS_THRESHOLD 15:12 + +#define NVA097_SET_ZCULL_SUBREGION 0x02e8 +#define NVA097_SET_ZCULL_SUBREGION_ENABLE 0:0 +#define NVA097_SET_ZCULL_SUBREGION_ENABLE_FALSE 0x00000000 +#define NVA097_SET_ZCULL_SUBREGION_ENABLE_TRUE 0x00000001 +#define NVA097_SET_ZCULL_SUBREGION_NORMALIZED_ALIQUOTS 27:4 + +#define NVA097_SET_RASTER_BOUNDING_BOX 0x02ec +#define NVA097_SET_RASTER_BOUNDING_BOX_MODE 0:0 +#define NVA097_SET_RASTER_BOUNDING_BOX_MODE_BOUNDING_BOX 0x00000000 +#define NVA097_SET_RASTER_BOUNDING_BOX_MODE_FULL_VIEWPORT 0x00000001 +#define NVA097_SET_RASTER_BOUNDING_BOX_PAD 11:4 + +#define NVA097_PEER_SEMAPHORE_RELEASE 0x02f0 +#define NVA097_PEER_SEMAPHORE_RELEASE_V 31:0 + +#define NVA097_SET_ZCULL_SUBREGION_ALLOCATION 0x02f8 +#define NVA097_SET_ZCULL_SUBREGION_ALLOCATION_SUBREGION_ID 7:0 +#define NVA097_SET_ZCULL_SUBREGION_ALLOCATION_ALIQUOTS 23:8 +#define NVA097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT 27:24 +#define NVA097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16X2_4X4 0x00000000 +#define NVA097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X16_4X4 0x00000001 +#define NVA097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_4X2 0x00000002 +#define NVA097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_2X4 0x00000003 +#define NVA097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X8_4X4 0x00000004 +#define NVA097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_8X8_4X2 0x00000005 +#define NVA097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_8X8_2X4 0x00000006 +#define NVA097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_4X8 0x00000007 +#define NVA097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_4X8_2X2 0x00000008 +#define NVA097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X8_4X2 0x00000009 +#define NVA097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X8_2X4 0x0000000A +#define NVA097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_8X8_2X2 0x0000000B +#define NVA097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_4X8_1X1 0x0000000C +#define NVA097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_NONE 0x0000000F + +#define NVA097_ASSIGN_ZCULL_SUBREGIONS 0x02fc +#define NVA097_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM 1:0 +#define NVA097_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM_Static 0x00000000 +#define NVA097_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM_Adaptive 0x00000001 + +#define NVA097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE 0x0300 +#define NVA097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE 0:0 +#define NVA097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_FALSE 0x00000000 +#define NVA097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_TRUE 0x00000001 +#define NVA097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE 1:1 +#define NVA097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NVA097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 + +#define NVA097_DRAW_ZERO_INDEX 0x0304 +#define NVA097_DRAW_ZERO_INDEX_COUNT 31:0 + +#define NVA097_SET_L1_CONFIGURATION 0x0308 +#define NVA097_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY 2:0 +#define NVA097_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_16KB 0x00000001 +#define NVA097_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_48KB 0x00000003 + +#define NVA097_SET_RENDER_ENABLE_CONTROL 0x030c +#define NVA097_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER 0:0 +#define NVA097_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_FALSE 0x00000000 +#define NVA097_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_TRUE 0x00000001 + +#define NVA097_SET_SPA_VERSION 0x0310 +#define NVA097_SET_SPA_VERSION_MINOR 7:0 +#define NVA097_SET_SPA_VERSION_MAJOR 15:8 + +#define NVA097_SET_IEEE_CLEAN_UPDATE 0x0314 +#define NVA097_SET_IEEE_CLEAN_UPDATE_ENABLE 0:0 +#define NVA097_SET_IEEE_CLEAN_UPDATE_ENABLE_FALSE 0x00000000 +#define NVA097_SET_IEEE_CLEAN_UPDATE_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_SNAP_GRID_LINE 0x0318 +#define NVA097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NVA097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NVA097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NVA097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NVA097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NVA097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NVA097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NVA097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NVA097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NVA097_SET_SNAP_GRID_LINE_ROUNDING_MODE 8:8 +#define NVA097_SET_SNAP_GRID_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NVA097_SET_SNAP_GRID_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NVA097_SET_SNAP_GRID_NON_LINE 0x031c +#define NVA097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NVA097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NVA097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NVA097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NVA097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NVA097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NVA097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NVA097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NVA097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NVA097_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE 8:8 +#define NVA097_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NVA097_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NVA097_SET_TESSELLATION_PARAMETERS 0x0320 +#define NVA097_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE 1:0 +#define NVA097_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_ISOLINE 0x00000000 +#define NVA097_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_TRIANGLE 0x00000001 +#define NVA097_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_QUAD 0x00000002 +#define NVA097_SET_TESSELLATION_PARAMETERS_SPACING 5:4 +#define NVA097_SET_TESSELLATION_PARAMETERS_SPACING_INTEGER 0x00000000 +#define NVA097_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_ODD 0x00000001 +#define NVA097_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_EVEN 0x00000002 +#define NVA097_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES 9:8 +#define NVA097_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_POINTS 0x00000000 +#define NVA097_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_LINES 0x00000001 +#define NVA097_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CW 0x00000002 +#define NVA097_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CCW 0x00000003 + +#define NVA097_SET_TESSELLATION_LOD_U0_OR_DENSITY 0x0324 +#define NVA097_SET_TESSELLATION_LOD_U0_OR_DENSITY_V 31:0 + +#define NVA097_SET_TESSELLATION_LOD_V0_OR_DETAIL 0x0328 +#define NVA097_SET_TESSELLATION_LOD_V0_OR_DETAIL_V 31:0 + +#define NVA097_SET_TESSELLATION_LOD_U1_OR_W0 0x032c +#define NVA097_SET_TESSELLATION_LOD_U1_OR_W0_V 31:0 + +#define NVA097_SET_TESSELLATION_LOD_V1 0x0330 +#define NVA097_SET_TESSELLATION_LOD_V1_V 31:0 + +#define NVA097_SET_TG_LOD_INTERIOR_U 0x0334 +#define NVA097_SET_TG_LOD_INTERIOR_U_V 31:0 + +#define NVA097_SET_TG_LOD_INTERIOR_V 0x0338 +#define NVA097_SET_TG_LOD_INTERIOR_V_V 31:0 + +#define NVA097_RESERVED_TG07 0x033c +#define NVA097_RESERVED_TG07_V 0:0 + +#define NVA097_RESERVED_TG08 0x0340 +#define NVA097_RESERVED_TG08_V 0:0 + +#define NVA097_RESERVED_TG09 0x0344 +#define NVA097_RESERVED_TG09_V 0:0 + +#define NVA097_RESERVED_TG10 0x0348 +#define NVA097_RESERVED_TG10_V 0:0 + +#define NVA097_RESERVED_TG11 0x034c +#define NVA097_RESERVED_TG11_V 0:0 + +#define NVA097_RESERVED_TG12 0x0350 +#define NVA097_RESERVED_TG12_V 0:0 + +#define NVA097_RESERVED_TG13 0x0354 +#define NVA097_RESERVED_TG13_V 0:0 + +#define NVA097_RESERVED_TG14 0x0358 +#define NVA097_RESERVED_TG14_V 0:0 + +#define NVA097_RESERVED_TG15 0x035c +#define NVA097_RESERVED_TG15_V 0:0 + +#define NVA097_SET_SUBTILING_PERF_KNOB_A 0x0360 +#define NVA097_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_REGISTER_FILE_PER_SUBTILE 7:0 +#define NVA097_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_PIXEL_OUTPUT_BUFFER_PER_SUBTILE 15:8 +#define NVA097_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_TRIANGLE_RAM_PER_SUBTILE 23:16 +#define NVA097_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_MAX_QUADS_PER_SUBTILE 31:24 + +#define NVA097_SET_SUBTILING_PERF_KNOB_B 0x0364 +#define NVA097_SET_SUBTILING_PERF_KNOB_B_FRACTION_OF_MAX_PRIMITIVES_PER_SUBTILE 7:0 + +#define NVA097_SET_SUBTILING_PERF_KNOB_C 0x0368 +#define NVA097_SET_SUBTILING_PERF_KNOB_C_RESERVED 0:0 + +#define NVA097_SET_ZCULL_SUBREGION_TO_REPORT 0x036c +#define NVA097_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE 0:0 +#define NVA097_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE_FALSE 0x00000000 +#define NVA097_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE_TRUE 0x00000001 +#define NVA097_SET_ZCULL_SUBREGION_TO_REPORT_SUBREGION_ID 11:4 + +#define NVA097_SET_ZCULL_SUBREGION_REPORT_TYPE 0x0370 +#define NVA097_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE 0:0 +#define NVA097_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE_FALSE 0x00000000 +#define NVA097_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE_TRUE 0x00000001 +#define NVA097_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE 6:4 +#define NVA097_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST 0x00000000 +#define NVA097_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST_NO_ACCEPT 0x00000001 +#define NVA097_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST_LATE_Z 0x00000002 +#define NVA097_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_STENCIL_TEST 0x00000003 + +#define NVA097_SET_BALANCED_PRIMITIVE_WORKLOAD 0x0374 +#define NVA097_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE 0:0 +#define NVA097_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE_FALSE 0x00000000 +#define NVA097_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE_TRUE 0x00000001 +#define NVA097_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE 4:4 +#define NVA097_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE_FALSE 0x00000000 +#define NVA097_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE_TRUE 0x00000001 + +#define NVA097_SET_MAX_PATCHES_PER_BATCH 0x0378 +#define NVA097_SET_MAX_PATCHES_PER_BATCH_V 5:0 + +#define NVA097_SET_RASTER_ENABLE 0x037c +#define NVA097_SET_RASTER_ENABLE_V 0:0 +#define NVA097_SET_RASTER_ENABLE_V_FALSE 0x00000000 +#define NVA097_SET_RASTER_ENABLE_V_TRUE 0x00000001 + +#define NVA097_SET_STREAM_OUT_BUFFER_ENABLE(j) (0x0380+(j)*32) +#define NVA097_SET_STREAM_OUT_BUFFER_ENABLE_V 0:0 +#define NVA097_SET_STREAM_OUT_BUFFER_ENABLE_V_FALSE 0x00000000 +#define NVA097_SET_STREAM_OUT_BUFFER_ENABLE_V_TRUE 0x00000001 + +#define NVA097_SET_STREAM_OUT_BUFFER_ADDRESS_A(j) (0x0384+(j)*32) +#define NVA097_SET_STREAM_OUT_BUFFER_ADDRESS_A_UPPER 7:0 + +#define NVA097_SET_STREAM_OUT_BUFFER_ADDRESS_B(j) (0x0388+(j)*32) +#define NVA097_SET_STREAM_OUT_BUFFER_ADDRESS_B_LOWER 31:0 + +#define NVA097_SET_STREAM_OUT_BUFFER_SIZE(j) (0x038c+(j)*32) +#define NVA097_SET_STREAM_OUT_BUFFER_SIZE_BYTES 31:0 + +#define NVA097_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER(j) (0x0390+(j)*32) +#define NVA097_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER_START_OFFSET 31:0 + +#define NVA097_SET_VAB_DATA_TYPELESS(i) (0x0400+(i)*4) +#define NVA097_SET_VAB_DATA_TYPELESS_V 31:0 + +#define NVA097_SET_STREAM_OUT_CONTROL_STREAM(j) (0x0700+(j)*16) +#define NVA097_SET_STREAM_OUT_CONTROL_STREAM_SELECT 1:0 + +#define NVA097_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT(j) (0x0704+(j)*16) +#define NVA097_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT_MAX 7:0 + +#define NVA097_SET_STREAM_OUT_CONTROL_STRIDE(j) (0x0708+(j)*16) +#define NVA097_SET_STREAM_OUT_CONTROL_STRIDE_BYTES 31:0 + +#define NVA097_SET_RASTER_INPUT 0x0740 +#define NVA097_SET_RASTER_INPUT_STREAM_SELECT 1:0 + +#define NVA097_SET_STREAM_OUTPUT 0x0744 +#define NVA097_SET_STREAM_OUTPUT_ENABLE 0:0 +#define NVA097_SET_STREAM_OUTPUT_ENABLE_FALSE 0x00000000 +#define NVA097_SET_STREAM_OUTPUT_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE 0x0748 +#define NVA097_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE 0:0 +#define NVA097_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_FALSE 0x00000000 +#define NVA097_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_ALPHA_FRACTION 0x074c +#define NVA097_SET_ALPHA_FRACTION_V 7:0 + +#define NVA097_SET_HYBRID_ANTI_ALIAS_CONTROL 0x0754 +#define NVA097_SET_HYBRID_ANTI_ALIAS_CONTROL_PASSES 3:0 +#define NVA097_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID 4:4 +#define NVA097_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_FRAGMENT 0x00000000 +#define NVA097_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_PASS 0x00000001 + +#define NVA097_SET_MAX_TI_WARPS_PER_BATCH 0x075c +#define NVA097_SET_MAX_TI_WARPS_PER_BATCH_V 5:0 + +#define NVA097_SET_SHADER_LOCAL_MEMORY_WINDOW 0x077c +#define NVA097_SET_SHADER_LOCAL_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NVA097_SET_SHADER_LOCAL_MEMORY_A 0x0790 +#define NVA097_SET_SHADER_LOCAL_MEMORY_A_ADDRESS_UPPER 7:0 + +#define NVA097_SET_SHADER_LOCAL_MEMORY_B 0x0794 +#define NVA097_SET_SHADER_LOCAL_MEMORY_B_ADDRESS_LOWER 31:0 + +#define NVA097_SET_SHADER_LOCAL_MEMORY_C 0x0798 +#define NVA097_SET_SHADER_LOCAL_MEMORY_C_SIZE_UPPER 5:0 + +#define NVA097_SET_SHADER_LOCAL_MEMORY_D 0x079c +#define NVA097_SET_SHADER_LOCAL_MEMORY_D_SIZE_LOWER 31:0 + +#define NVA097_SET_SHADER_LOCAL_MEMORY_E 0x07a0 +#define NVA097_SET_SHADER_LOCAL_MEMORY_E_DEFAULT_SIZE_PER_WARP 25:0 + +#define NVA097_SET_COLOR_ZERO_BANDWIDTH_CLEAR 0x07a4 +#define NVA097_SET_COLOR_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVA097_SET_Z_ZERO_BANDWIDTH_CLEAR 0x07a8 +#define NVA097_SET_Z_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVA097_SET_ISBE_SAVE_RESTORE_PROGRAM 0x07ac +#define NVA097_SET_ISBE_SAVE_RESTORE_PROGRAM_OFFSET 31:0 + +#define NVA097_SET_VAB_VERTEX2F(i) (0x07b0+(i)*4) +#define NVA097_SET_VAB_VERTEX2F_V 31:0 + +#define NVA097_SET_ZCULL_REGION_SIZE_A 0x07c0 +#define NVA097_SET_ZCULL_REGION_SIZE_A_WIDTH 15:0 + +#define NVA097_SET_ZCULL_REGION_SIZE_B 0x07c4 +#define NVA097_SET_ZCULL_REGION_SIZE_B_HEIGHT 15:0 + +#define NVA097_SET_ZCULL_REGION_SIZE_C 0x07c8 +#define NVA097_SET_ZCULL_REGION_SIZE_C_DEPTH 15:0 + +#define NVA097_SET_ZCULL_REGION_PIXEL_OFFSET_C 0x07cc +#define NVA097_SET_ZCULL_REGION_PIXEL_OFFSET_C_DEPTH 15:0 + +#define NVA097_SET_CULL_BEFORE_FETCH 0x07dc +#define NVA097_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE 0:0 +#define NVA097_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_FALSE 0x00000000 +#define NVA097_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_TRUE 0x00000001 + +#define NVA097_SET_ZCULL_REGION_LOCATION 0x07e0 +#define NVA097_SET_ZCULL_REGION_LOCATION_START_ALIQUOT 15:0 +#define NVA097_SET_ZCULL_REGION_LOCATION_ALIQUOT_COUNT 31:16 + +#define NVA097_SET_ZCULL_REGION_ALIQUOTS 0x07e4 +#define NVA097_SET_ZCULL_REGION_ALIQUOTS_PER_LAYER 15:0 + +#define NVA097_SET_ZCULL_STORAGE_A 0x07e8 +#define NVA097_SET_ZCULL_STORAGE_A_ADDRESS_UPPER 7:0 + +#define NVA097_SET_ZCULL_STORAGE_B 0x07ec +#define NVA097_SET_ZCULL_STORAGE_B_ADDRESS_LOWER 31:0 + +#define NVA097_SET_ZCULL_STORAGE_C 0x07f0 +#define NVA097_SET_ZCULL_STORAGE_C_LIMIT_ADDRESS_UPPER 7:0 + +#define NVA097_SET_ZCULL_STORAGE_D 0x07f4 +#define NVA097_SET_ZCULL_STORAGE_D_LIMIT_ADDRESS_LOWER 31:0 + +#define NVA097_SET_ZT_READ_ONLY 0x07f8 +#define NVA097_SET_ZT_READ_ONLY_ENABLE_Z 0:0 +#define NVA097_SET_ZT_READ_ONLY_ENABLE_Z_FALSE 0x00000000 +#define NVA097_SET_ZT_READ_ONLY_ENABLE_Z_TRUE 0x00000001 +#define NVA097_SET_ZT_READ_ONLY_ENABLE_STENCIL 4:4 +#define NVA097_SET_ZT_READ_ONLY_ENABLE_STENCIL_FALSE 0x00000000 +#define NVA097_SET_ZT_READ_ONLY_ENABLE_STENCIL_TRUE 0x00000001 + +#define NVA097_SET_TEXTURE_INSTRUCTION_OPERAND 0x07fc +#define NVA097_SET_TEXTURE_INSTRUCTION_OPERAND_ORDERING 0:0 +#define NVA097_SET_TEXTURE_INSTRUCTION_OPERAND_ORDERING_FERMI_ORDER 0x00000000 +#define NVA097_SET_TEXTURE_INSTRUCTION_OPERAND_ORDERING_KEPLER_ORDER 0x00000001 + +#define NVA097_SET_COLOR_TARGET_A(j) (0x0800+(j)*64) +#define NVA097_SET_COLOR_TARGET_A_OFFSET_UPPER 7:0 + +#define NVA097_SET_COLOR_TARGET_B(j) (0x0804+(j)*64) +#define NVA097_SET_COLOR_TARGET_B_OFFSET_LOWER 31:0 + +#define NVA097_SET_COLOR_TARGET_WIDTH(j) (0x0808+(j)*64) +#define NVA097_SET_COLOR_TARGET_WIDTH_V 27:0 + +#define NVA097_SET_COLOR_TARGET_HEIGHT(j) (0x080c+(j)*64) +#define NVA097_SET_COLOR_TARGET_HEIGHT_V 16:0 + +#define NVA097_SET_COLOR_TARGET_FORMAT(j) (0x0810+(j)*64) +#define NVA097_SET_COLOR_TARGET_FORMAT_V 7:0 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_DISABLED 0x00000000 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_AF32 0x000000C0 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_AS32 0x000000C1 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_AU32 0x000000C2 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_X32 0x000000C3 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_X32 0x000000C4 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_X32 0x000000C5 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_R16_G16_B16_A16 0x000000C6 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RN16_GN16_BN16_AN16 0x000000C7 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RS16_GS16_BS16_AS16 0x000000C8 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RU16_GU16_BU16_AU16 0x000000C9 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_AF16 0x000000CA +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RF32_GF32 0x000000CB +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RS32_GS32 0x000000CC +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RU32_GU32 0x000000CD +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_X16 0x000000CE +#define NVA097_SET_COLOR_TARGET_FORMAT_V_A8R8G8B8 0x000000CF +#define NVA097_SET_COLOR_TARGET_FORMAT_V_A8RL8GL8BL8 0x000000D0 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_A2B10G10R10 0x000000D1 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_AU2BU10GU10RU10 0x000000D2 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_A8B8G8R8 0x000000D5 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_A8BL8GL8RL8 0x000000D6 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_AN8BN8GN8RN8 0x000000D7 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_AS8BS8GS8RS8 0x000000D8 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_AU8BU8GU8RU8 0x000000D9 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_R16_G16 0x000000DA +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RN16_GN16 0x000000DB +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RS16_GS16 0x000000DC +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RU16_GU16 0x000000DD +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RF16_GF16 0x000000DE +#define NVA097_SET_COLOR_TARGET_FORMAT_V_A2R10G10B10 0x000000DF +#define NVA097_SET_COLOR_TARGET_FORMAT_V_BF10GF11RF11 0x000000E0 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RS32 0x000000E3 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RU32 0x000000E4 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RF32 0x000000E5 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_X8R8G8B8 0x000000E6 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_X8RL8GL8BL8 0x000000E7 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_R5G6B5 0x000000E8 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_A1R5G5B5 0x000000E9 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_G8R8 0x000000EA +#define NVA097_SET_COLOR_TARGET_FORMAT_V_GN8RN8 0x000000EB +#define NVA097_SET_COLOR_TARGET_FORMAT_V_GS8RS8 0x000000EC +#define NVA097_SET_COLOR_TARGET_FORMAT_V_GU8RU8 0x000000ED +#define NVA097_SET_COLOR_TARGET_FORMAT_V_R16 0x000000EE +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RN16 0x000000EF +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RS16 0x000000F0 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RU16 0x000000F1 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RF16 0x000000F2 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_R8 0x000000F3 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RN8 0x000000F4 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RS8 0x000000F5 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RU8 0x000000F6 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_A8 0x000000F7 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_X1R5G5B5 0x000000F8 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_X8B8G8R8 0x000000F9 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_X8BL8GL8RL8 0x000000FA +#define NVA097_SET_COLOR_TARGET_FORMAT_V_Z1R5G5B5 0x000000FB +#define NVA097_SET_COLOR_TARGET_FORMAT_V_O1R5G5B5 0x000000FC +#define NVA097_SET_COLOR_TARGET_FORMAT_V_Z8R8G8B8 0x000000FD +#define NVA097_SET_COLOR_TARGET_FORMAT_V_O8R8G8B8 0x000000FE +#define NVA097_SET_COLOR_TARGET_FORMAT_V_R32 0x000000FF +#define NVA097_SET_COLOR_TARGET_FORMAT_V_A16 0x00000040 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_AF16 0x00000041 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_AF32 0x00000042 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_A8R8 0x00000043 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_R16_A16 0x00000044 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RF16_AF16 0x00000045 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_RF32_AF32 0x00000046 +#define NVA097_SET_COLOR_TARGET_FORMAT_V_B8G8R8A8 0x00000047 + +#define NVA097_SET_COLOR_TARGET_MEMORY(j) (0x0814+(j)*64) +#define NVA097_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH 3:0 +#define NVA097_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH_ONE_GOB 0x00000000 +#define NVA097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT 7:4 +#define NVA097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_ONE_GOB 0x00000000 +#define NVA097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_TWO_GOBS 0x00000001 +#define NVA097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_FOUR_GOBS 0x00000002 +#define NVA097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVA097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVA097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVA097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH 11:8 +#define NVA097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_ONE_GOB 0x00000000 +#define NVA097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_TWO_GOBS 0x00000001 +#define NVA097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_FOUR_GOBS 0x00000002 +#define NVA097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_EIGHT_GOBS 0x00000003 +#define NVA097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVA097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_THIRTYTWO_GOBS 0x00000005 +#define NVA097_SET_COLOR_TARGET_MEMORY_LAYOUT 12:12 +#define NVA097_SET_COLOR_TARGET_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVA097_SET_COLOR_TARGET_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVA097_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL 16:16 +#define NVA097_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NVA097_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_DEPTH_SIZE 0x00000001 + +#define NVA097_SET_COLOR_TARGET_THIRD_DIMENSION(j) (0x0818+(j)*64) +#define NVA097_SET_COLOR_TARGET_THIRD_DIMENSION_V 27:0 + +#define NVA097_SET_COLOR_TARGET_ARRAY_PITCH(j) (0x081c+(j)*64) +#define NVA097_SET_COLOR_TARGET_ARRAY_PITCH_V 31:0 + +#define NVA097_SET_COLOR_TARGET_LAYER(j) (0x0820+(j)*64) +#define NVA097_SET_COLOR_TARGET_LAYER_OFFSET 15:0 + +#define NVA097_SET_COLOR_TARGET_MARK(j) (0x0824+(j)*64) +#define NVA097_SET_COLOR_TARGET_MARK_IEEE_CLEAN 0:0 +#define NVA097_SET_COLOR_TARGET_MARK_IEEE_CLEAN_FALSE 0x00000000 +#define NVA097_SET_COLOR_TARGET_MARK_IEEE_CLEAN_TRUE 0x00000001 + +#define NVA097_SET_VIEWPORT_SCALE_X(j) (0x0a00+(j)*32) +#define NVA097_SET_VIEWPORT_SCALE_X_V 31:0 + +#define NVA097_SET_VIEWPORT_SCALE_Y(j) (0x0a04+(j)*32) +#define NVA097_SET_VIEWPORT_SCALE_Y_V 31:0 + +#define NVA097_SET_VIEWPORT_SCALE_Z(j) (0x0a08+(j)*32) +#define NVA097_SET_VIEWPORT_SCALE_Z_V 31:0 + +#define NVA097_SET_VIEWPORT_OFFSET_X(j) (0x0a0c+(j)*32) +#define NVA097_SET_VIEWPORT_OFFSET_X_V 31:0 + +#define NVA097_SET_VIEWPORT_OFFSET_Y(j) (0x0a10+(j)*32) +#define NVA097_SET_VIEWPORT_OFFSET_Y_V 31:0 + +#define NVA097_SET_VIEWPORT_OFFSET_Z(j) (0x0a14+(j)*32) +#define NVA097_SET_VIEWPORT_OFFSET_Z_V 31:0 + +#define NVA097_SET_VIEWPORT_CLIP_HORIZONTAL(j) (0x0c00+(j)*16) +#define NVA097_SET_VIEWPORT_CLIP_HORIZONTAL_X0 15:0 +#define NVA097_SET_VIEWPORT_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NVA097_SET_VIEWPORT_CLIP_VERTICAL(j) (0x0c04+(j)*16) +#define NVA097_SET_VIEWPORT_CLIP_VERTICAL_Y0 15:0 +#define NVA097_SET_VIEWPORT_CLIP_VERTICAL_HEIGHT 31:16 + +#define NVA097_SET_VIEWPORT_CLIP_MIN_Z(j) (0x0c08+(j)*16) +#define NVA097_SET_VIEWPORT_CLIP_MIN_Z_V 31:0 + +#define NVA097_SET_VIEWPORT_CLIP_MAX_Z(j) (0x0c0c+(j)*16) +#define NVA097_SET_VIEWPORT_CLIP_MAX_Z_V 31:0 + +#define NVA097_SET_WINDOW_CLIP_HORIZONTAL(j) (0x0d00+(j)*8) +#define NVA097_SET_WINDOW_CLIP_HORIZONTAL_XMIN 15:0 +#define NVA097_SET_WINDOW_CLIP_HORIZONTAL_XMAX 31:16 + +#define NVA097_SET_WINDOW_CLIP_VERTICAL(j) (0x0d04+(j)*8) +#define NVA097_SET_WINDOW_CLIP_VERTICAL_YMIN 15:0 +#define NVA097_SET_WINDOW_CLIP_VERTICAL_YMAX 31:16 + +#define NVA097_SET_CLIP_ID_EXTENT_X(j) (0x0d40+(j)*8) +#define NVA097_SET_CLIP_ID_EXTENT_X_MINX 15:0 +#define NVA097_SET_CLIP_ID_EXTENT_X_WIDTH 31:16 + +#define NVA097_SET_CLIP_ID_EXTENT_Y(j) (0x0d44+(j)*8) +#define NVA097_SET_CLIP_ID_EXTENT_Y_MINY 15:0 +#define NVA097_SET_CLIP_ID_EXTENT_Y_HEIGHT 31:16 + +#define NVA097_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK 0x0d60 +#define NVA097_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK_V 10:0 + +#define NVA097_SET_API_VISIBLE_CALL_LIMIT 0x0d64 +#define NVA097_SET_API_VISIBLE_CALL_LIMIT_V 3:0 +#define NVA097_SET_API_VISIBLE_CALL_LIMIT_V__0 0x00000000 +#define NVA097_SET_API_VISIBLE_CALL_LIMIT_V__1 0x00000001 +#define NVA097_SET_API_VISIBLE_CALL_LIMIT_V__2 0x00000002 +#define NVA097_SET_API_VISIBLE_CALL_LIMIT_V__4 0x00000003 +#define NVA097_SET_API_VISIBLE_CALL_LIMIT_V__8 0x00000004 +#define NVA097_SET_API_VISIBLE_CALL_LIMIT_V__16 0x00000005 +#define NVA097_SET_API_VISIBLE_CALL_LIMIT_V__32 0x00000006 +#define NVA097_SET_API_VISIBLE_CALL_LIMIT_V__64 0x00000007 +#define NVA097_SET_API_VISIBLE_CALL_LIMIT_V__128 0x00000008 +#define NVA097_SET_API_VISIBLE_CALL_LIMIT_V_NO_CHECK 0x0000000F + +#define NVA097_SET_STATISTICS_COUNTER 0x0d68 +#define NVA097_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE 0:0 +#define NVA097_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVA097_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVA097_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE 1:1 +#define NVA097_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVA097_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVA097_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE 2:2 +#define NVA097_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVA097_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVA097_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE 3:3 +#define NVA097_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVA097_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVA097_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE 4:4 +#define NVA097_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVA097_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVA097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE 5:5 +#define NVA097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NVA097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NVA097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE 6:6 +#define NVA097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_FALSE 0x00000000 +#define NVA097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_TRUE 0x00000001 +#define NVA097_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE 7:7 +#define NVA097_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVA097_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVA097_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE 8:8 +#define NVA097_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVA097_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVA097_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE 9:9 +#define NVA097_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVA097_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVA097_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE 11:11 +#define NVA097_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVA097_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVA097_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE 12:12 +#define NVA097_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVA097_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVA097_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE 13:13 +#define NVA097_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVA097_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVA097_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE 14:14 +#define NVA097_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NVA097_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NVA097_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE 10:10 +#define NVA097_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_FALSE 0x00000000 +#define NVA097_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_TRUE 0x00000001 +#define NVA097_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE 15:15 +#define NVA097_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_FALSE 0x00000000 +#define NVA097_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_CLEAR_RECT_HORIZONTAL 0x0d6c +#define NVA097_SET_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NVA097_SET_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NVA097_SET_CLEAR_RECT_VERTICAL 0x0d70 +#define NVA097_SET_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NVA097_SET_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NVA097_SET_VERTEX_ARRAY_START 0x0d74 +#define NVA097_SET_VERTEX_ARRAY_START_V 31:0 + +#define NVA097_DRAW_VERTEX_ARRAY 0x0d78 +#define NVA097_DRAW_VERTEX_ARRAY_COUNT 31:0 + +#define NVA097_SET_VIEWPORT_Z_CLIP 0x0d7c +#define NVA097_SET_VIEWPORT_Z_CLIP_RANGE 0:0 +#define NVA097_SET_VIEWPORT_Z_CLIP_RANGE_NEGATIVE_W_TO_POSITIVE_W 0x00000000 +#define NVA097_SET_VIEWPORT_Z_CLIP_RANGE_ZERO_TO_POSITIVE_W 0x00000001 + +#define NVA097_SET_COLOR_CLEAR_VALUE(i) (0x0d80+(i)*4) +#define NVA097_SET_COLOR_CLEAR_VALUE_V 31:0 + +#define NVA097_SET_Z_CLEAR_VALUE 0x0d90 +#define NVA097_SET_Z_CLEAR_VALUE_V 31:0 + +#define NVA097_SET_SHADER_CACHE_CONTROL 0x0d94 +#define NVA097_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE 0:0 +#define NVA097_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_FALSE 0x00000000 +#define NVA097_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_TRUE 0x00000001 + +#define NVA097_FORCE_TRANSITION_TO_BETA 0x0d98 +#define NVA097_FORCE_TRANSITION_TO_BETA_V 0:0 + +#define NVA097_SET_REDUCE_COLOR_THRESHOLDS_ENABLE 0x0d9c +#define NVA097_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V 0:0 +#define NVA097_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_FALSE 0x00000000 +#define NVA097_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_TRUE 0x00000001 + +#define NVA097_SET_STENCIL_CLEAR_VALUE 0x0da0 +#define NVA097_SET_STENCIL_CLEAR_VALUE_V 7:0 + +#define NVA097_INVALIDATE_SHADER_CACHES_NO_WFI 0x0da4 +#define NVA097_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION 0:0 +#define NVA097_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_FALSE 0x00000000 +#define NVA097_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_TRUE 0x00000001 +#define NVA097_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA 4:4 +#define NVA097_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_FALSE 0x00000000 +#define NVA097_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_TRUE 0x00000001 +#define NVA097_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT 12:12 +#define NVA097_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_FALSE 0x00000000 +#define NVA097_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_TRUE 0x00000001 + +#define NVA097_SET_FRONT_POLYGON_MODE 0x0dac +#define NVA097_SET_FRONT_POLYGON_MODE_V 31:0 +#define NVA097_SET_FRONT_POLYGON_MODE_V_POINT 0x00001B00 +#define NVA097_SET_FRONT_POLYGON_MODE_V_LINE 0x00001B01 +#define NVA097_SET_FRONT_POLYGON_MODE_V_FILL 0x00001B02 + +#define NVA097_SET_BACK_POLYGON_MODE 0x0db0 +#define NVA097_SET_BACK_POLYGON_MODE_V 31:0 +#define NVA097_SET_BACK_POLYGON_MODE_V_POINT 0x00001B00 +#define NVA097_SET_BACK_POLYGON_MODE_V_LINE 0x00001B01 +#define NVA097_SET_BACK_POLYGON_MODE_V_FILL 0x00001B02 + +#define NVA097_SET_POLY_SMOOTH 0x0db4 +#define NVA097_SET_POLY_SMOOTH_ENABLE 0:0 +#define NVA097_SET_POLY_SMOOTH_ENABLE_FALSE 0x00000000 +#define NVA097_SET_POLY_SMOOTH_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_ZT_MARK 0x0db8 +#define NVA097_SET_ZT_MARK_IEEE_CLEAN 0:0 +#define NVA097_SET_ZT_MARK_IEEE_CLEAN_FALSE 0x00000000 +#define NVA097_SET_ZT_MARK_IEEE_CLEAN_TRUE 0x00000001 + +#define NVA097_SET_ZCULL_DIR_FORMAT 0x0dbc +#define NVA097_SET_ZCULL_DIR_FORMAT_ZDIR 15:0 +#define NVA097_SET_ZCULL_DIR_FORMAT_ZDIR_LESS 0x00000000 +#define NVA097_SET_ZCULL_DIR_FORMAT_ZDIR_GREATER 0x00000001 +#define NVA097_SET_ZCULL_DIR_FORMAT_ZFORMAT 31:16 +#define NVA097_SET_ZCULL_DIR_FORMAT_ZFORMAT_MSB 0x00000000 +#define NVA097_SET_ZCULL_DIR_FORMAT_ZFORMAT_FP 0x00000001 +#define NVA097_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZTRICK 0x00000002 +#define NVA097_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZF32_1 0x00000003 + +#define NVA097_SET_POLY_OFFSET_POINT 0x0dc0 +#define NVA097_SET_POLY_OFFSET_POINT_ENABLE 0:0 +#define NVA097_SET_POLY_OFFSET_POINT_ENABLE_FALSE 0x00000000 +#define NVA097_SET_POLY_OFFSET_POINT_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_POLY_OFFSET_LINE 0x0dc4 +#define NVA097_SET_POLY_OFFSET_LINE_ENABLE 0:0 +#define NVA097_SET_POLY_OFFSET_LINE_ENABLE_FALSE 0x00000000 +#define NVA097_SET_POLY_OFFSET_LINE_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_POLY_OFFSET_FILL 0x0dc8 +#define NVA097_SET_POLY_OFFSET_FILL_ENABLE 0:0 +#define NVA097_SET_POLY_OFFSET_FILL_ENABLE_FALSE 0x00000000 +#define NVA097_SET_POLY_OFFSET_FILL_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_PATCH 0x0dcc +#define NVA097_SET_PATCH_SIZE 7:0 + +#define NVA097_SET_ZCULL_CRITERION 0x0dd8 +#define NVA097_SET_ZCULL_CRITERION_SFUNC 7:0 +#define NVA097_SET_ZCULL_CRITERION_SFUNC_NEVER 0x00000000 +#define NVA097_SET_ZCULL_CRITERION_SFUNC_LESS 0x00000001 +#define NVA097_SET_ZCULL_CRITERION_SFUNC_EQUAL 0x00000002 +#define NVA097_SET_ZCULL_CRITERION_SFUNC_LEQUAL 0x00000003 +#define NVA097_SET_ZCULL_CRITERION_SFUNC_GREATER 0x00000004 +#define NVA097_SET_ZCULL_CRITERION_SFUNC_NOTEQUAL 0x00000005 +#define NVA097_SET_ZCULL_CRITERION_SFUNC_GEQUAL 0x00000006 +#define NVA097_SET_ZCULL_CRITERION_SFUNC_ALWAYS 0x00000007 +#define NVA097_SET_ZCULL_CRITERION_NO_INVALIDATE 8:8 +#define NVA097_SET_ZCULL_CRITERION_NO_INVALIDATE_FALSE 0x00000000 +#define NVA097_SET_ZCULL_CRITERION_NO_INVALIDATE_TRUE 0x00000001 +#define NVA097_SET_ZCULL_CRITERION_FORCE_MATCH 9:9 +#define NVA097_SET_ZCULL_CRITERION_FORCE_MATCH_FALSE 0x00000000 +#define NVA097_SET_ZCULL_CRITERION_FORCE_MATCH_TRUE 0x00000001 +#define NVA097_SET_ZCULL_CRITERION_SREF 23:16 +#define NVA097_SET_ZCULL_CRITERION_SMASK 31:24 + +#define NVA097_SET_SM_TIMEOUT_INTERVAL 0x0de4 +#define NVA097_SET_SM_TIMEOUT_INTERVAL_COUNTER_BIT 5:0 + +#define NVA097_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY 0x0de8 +#define NVA097_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE 0:0 +#define NVA097_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_FALSE 0x00000000 +#define NVA097_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_DRAW_INLINE_VERTEX_VAB_UPDATE 0x0dec +#define NVA097_SET_DRAW_INLINE_VERTEX_VAB_UPDATE_ENABLE 0:0 +#define NVA097_SET_DRAW_INLINE_VERTEX_VAB_UPDATE_ENABLE_FALSE 0x00000000 +#define NVA097_SET_DRAW_INLINE_VERTEX_VAB_UPDATE_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_WINDOW_OFFSET_X 0x0df8 +#define NVA097_SET_WINDOW_OFFSET_X_V 16:0 + +#define NVA097_SET_WINDOW_OFFSET_Y 0x0dfc +#define NVA097_SET_WINDOW_OFFSET_Y_V 17:0 + +#define NVA097_SET_SCISSOR_ENABLE(j) (0x0e00+(j)*16) +#define NVA097_SET_SCISSOR_ENABLE_V 0:0 +#define NVA097_SET_SCISSOR_ENABLE_V_FALSE 0x00000000 +#define NVA097_SET_SCISSOR_ENABLE_V_TRUE 0x00000001 + +#define NVA097_SET_SCISSOR_HORIZONTAL(j) (0x0e04+(j)*16) +#define NVA097_SET_SCISSOR_HORIZONTAL_XMIN 15:0 +#define NVA097_SET_SCISSOR_HORIZONTAL_XMAX 31:16 + +#define NVA097_SET_SCISSOR_VERTICAL(j) (0x0e08+(j)*16) +#define NVA097_SET_SCISSOR_VERTICAL_YMIN 15:0 +#define NVA097_SET_SCISSOR_VERTICAL_YMAX 31:16 + +#define NVA097_SET_VAB_NORMAL3S(i) (0x0f00+(i)*4) +#define NVA097_SET_VAB_NORMAL3S_V 31:0 + +#define NVA097_SET_BACK_STENCIL_FUNC_REF 0x0f54 +#define NVA097_SET_BACK_STENCIL_FUNC_REF_V 7:0 + +#define NVA097_SET_BACK_STENCIL_MASK 0x0f58 +#define NVA097_SET_BACK_STENCIL_MASK_V 7:0 + +#define NVA097_SET_BACK_STENCIL_FUNC_MASK 0x0f5c +#define NVA097_SET_BACK_STENCIL_FUNC_MASK_V 7:0 + +#define NVA097_SET_VERTEX_STREAM_SUBSTITUTE_A 0x0f84 +#define NVA097_SET_VERTEX_STREAM_SUBSTITUTE_A_ADDRESS_UPPER 7:0 + +#define NVA097_SET_VERTEX_STREAM_SUBSTITUTE_B 0x0f88 +#define NVA097_SET_VERTEX_STREAM_SUBSTITUTE_B_ADDRESS_LOWER 31:0 + +#define NVA097_SET_LINE_MODE_POLYGON_CLIP 0x0f8c +#define NVA097_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE 0:0 +#define NVA097_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DRAW_LINE 0x00000000 +#define NVA097_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DO_NOT_DRAW_LINE 0x00000001 + +#define NVA097_SET_SINGLE_CT_WRITE_CONTROL 0x0f90 +#define NVA097_SET_SINGLE_CT_WRITE_CONTROL_ENABLE 0:0 +#define NVA097_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_FALSE 0x00000000 +#define NVA097_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_VTG_WARP_WATERMARKS 0x0f98 +#define NVA097_SET_VTG_WARP_WATERMARKS_LOW 15:0 +#define NVA097_SET_VTG_WARP_WATERMARKS_HIGH 31:16 + +#define NVA097_SET_DEPTH_BOUNDS_MIN 0x0f9c +#define NVA097_SET_DEPTH_BOUNDS_MIN_V 31:0 + +#define NVA097_SET_DEPTH_BOUNDS_MAX 0x0fa0 +#define NVA097_SET_DEPTH_BOUNDS_MAX_V 31:0 + +#define NVA097_SET_CT_MRT_ENABLE 0x0fac +#define NVA097_SET_CT_MRT_ENABLE_V 0:0 +#define NVA097_SET_CT_MRT_ENABLE_V_FALSE 0x00000000 +#define NVA097_SET_CT_MRT_ENABLE_V_TRUE 0x00000001 + +#define NVA097_SET_NONMULTISAMPLED_Z 0x0fb0 +#define NVA097_SET_NONMULTISAMPLED_Z_V 0:0 +#define NVA097_SET_NONMULTISAMPLED_Z_V_PER_SAMPLE 0x00000000 +#define NVA097_SET_NONMULTISAMPLED_Z_V_AT_PIXEL_CENTER 0x00000001 + +#define NVA097_SET_SAMPLE_MASK_X0_Y0 0x0fbc +#define NVA097_SET_SAMPLE_MASK_X0_Y0_V 15:0 + +#define NVA097_SET_SAMPLE_MASK_X1_Y0 0x0fc0 +#define NVA097_SET_SAMPLE_MASK_X1_Y0_V 15:0 + +#define NVA097_SET_SAMPLE_MASK_X0_Y1 0x0fc4 +#define NVA097_SET_SAMPLE_MASK_X0_Y1_V 15:0 + +#define NVA097_SET_SAMPLE_MASK_X1_Y1 0x0fc8 +#define NVA097_SET_SAMPLE_MASK_X1_Y1_V 15:0 + +#define NVA097_SET_SURFACE_CLIP_ID_MEMORY_A 0x0fcc +#define NVA097_SET_SURFACE_CLIP_ID_MEMORY_A_OFFSET_UPPER 7:0 + +#define NVA097_SET_SURFACE_CLIP_ID_MEMORY_B 0x0fd0 +#define NVA097_SET_SURFACE_CLIP_ID_MEMORY_B_OFFSET_LOWER 31:0 + +#define NVA097_SET_BLEND_OPT_CONTROL 0x0fdc +#define NVA097_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS 0:0 +#define NVA097_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_FALSE 0x00000000 +#define NVA097_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_TRUE 0x00000001 + +#define NVA097_SET_ZT_A 0x0fe0 +#define NVA097_SET_ZT_A_OFFSET_UPPER 7:0 + +#define NVA097_SET_ZT_B 0x0fe4 +#define NVA097_SET_ZT_B_OFFSET_LOWER 31:0 + +#define NVA097_SET_ZT_FORMAT 0x0fe8 +#define NVA097_SET_ZT_FORMAT_V 4:0 +#define NVA097_SET_ZT_FORMAT_V_Z16 0x00000013 +#define NVA097_SET_ZT_FORMAT_V_Z24S8 0x00000014 +#define NVA097_SET_ZT_FORMAT_V_X8Z24 0x00000015 +#define NVA097_SET_ZT_FORMAT_V_S8Z24 0x00000016 +#define NVA097_SET_ZT_FORMAT_V_V8Z24 0x00000018 +#define NVA097_SET_ZT_FORMAT_V_ZF32 0x0000000A +#define NVA097_SET_ZT_FORMAT_V_ZF32_X24S8 0x00000019 +#define NVA097_SET_ZT_FORMAT_V_X8Z24_X16V8S8 0x0000001D +#define NVA097_SET_ZT_FORMAT_V_ZF32_X16V8X8 0x0000001E +#define NVA097_SET_ZT_FORMAT_V_ZF32_X16V8S8 0x0000001F + +#define NVA097_SET_ZT_BLOCK_SIZE 0x0fec +#define NVA097_SET_ZT_BLOCK_SIZE_WIDTH 3:0 +#define NVA097_SET_ZT_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVA097_SET_ZT_BLOCK_SIZE_HEIGHT 7:4 +#define NVA097_SET_ZT_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVA097_SET_ZT_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVA097_SET_ZT_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVA097_SET_ZT_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVA097_SET_ZT_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVA097_SET_ZT_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVA097_SET_ZT_BLOCK_SIZE_DEPTH 11:8 +#define NVA097_SET_ZT_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NVA097_SET_ZT_ARRAY_PITCH 0x0ff0 +#define NVA097_SET_ZT_ARRAY_PITCH_V 31:0 + +#define NVA097_SET_SURFACE_CLIP_HORIZONTAL 0x0ff4 +#define NVA097_SET_SURFACE_CLIP_HORIZONTAL_X 15:0 +#define NVA097_SET_SURFACE_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NVA097_SET_SURFACE_CLIP_VERTICAL 0x0ff8 +#define NVA097_SET_SURFACE_CLIP_VERTICAL_Y 15:0 +#define NVA097_SET_SURFACE_CLIP_VERTICAL_HEIGHT 31:16 + +#define NVA097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS 0x1000 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE 0:0 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_FALSE 0x00000000 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_TRUE 0x00000001 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY 5:4 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVA097_SET_TESSELLATION_CUT_HEIGHT 0x1008 +#define NVA097_SET_TESSELLATION_CUT_HEIGHT_V 4:0 + +#define NVA097_SET_MAX_GS_INSTANCES_PER_TASK 0x100c +#define NVA097_SET_MAX_GS_INSTANCES_PER_TASK_V 10:0 + +#define NVA097_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK 0x1010 +#define NVA097_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK_V 15:0 + +#define NVA097_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER 0x1018 +#define NVA097_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NVA097_SET_BETA_CB_STORAGE_CONSTRAINT 0x101c +#define NVA097_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NVA097_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NVA097_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER 0x1020 +#define NVA097_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NVA097_SET_ALPHA_CB_STORAGE_CONSTRAINT 0x1024 +#define NVA097_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NVA097_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NVA097_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_SPARE_NOOP00 0x1040 +#define NVA097_SET_SPARE_NOOP00_V 31:0 + +#define NVA097_SET_SPARE_NOOP01 0x1044 +#define NVA097_SET_SPARE_NOOP01_V 31:0 + +#define NVA097_SET_SPARE_NOOP02 0x1048 +#define NVA097_SET_SPARE_NOOP02_V 31:0 + +#define NVA097_SET_SPARE_NOOP03 0x104c +#define NVA097_SET_SPARE_NOOP03_V 31:0 + +#define NVA097_SET_SPARE_NOOP04 0x1050 +#define NVA097_SET_SPARE_NOOP04_V 31:0 + +#define NVA097_SET_SPARE_NOOP05 0x1054 +#define NVA097_SET_SPARE_NOOP05_V 31:0 + +#define NVA097_SET_SPARE_NOOP06 0x1058 +#define NVA097_SET_SPARE_NOOP06_V 31:0 + +#define NVA097_SET_SPARE_NOOP07 0x105c +#define NVA097_SET_SPARE_NOOP07_V 31:0 + +#define NVA097_SET_SPARE_NOOP08 0x1060 +#define NVA097_SET_SPARE_NOOP08_V 31:0 + +#define NVA097_SET_SPARE_NOOP09 0x1064 +#define NVA097_SET_SPARE_NOOP09_V 31:0 + +#define NVA097_SET_SPARE_NOOP10 0x1068 +#define NVA097_SET_SPARE_NOOP10_V 31:0 + +#define NVA097_SET_SPARE_NOOP11 0x106c +#define NVA097_SET_SPARE_NOOP11_V 31:0 + +#define NVA097_SET_SPARE_NOOP12 0x1070 +#define NVA097_SET_SPARE_NOOP12_V 31:0 + +#define NVA097_SET_SPARE_NOOP13 0x1074 +#define NVA097_SET_SPARE_NOOP13_V 31:0 + +#define NVA097_SET_SPARE_NOOP14 0x1078 +#define NVA097_SET_SPARE_NOOP14_V 31:0 + +#define NVA097_SET_SPARE_NOOP15 0x107c +#define NVA097_SET_SPARE_NOOP15_V 31:0 + +#define NVA097_SET_REDUCE_COLOR_THRESHOLDS_UNORM8 0x10cc +#define NVA097_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVA097_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED 23:16 + +#define NVA097_SET_REDUCE_COLOR_THRESHOLDS_UNORM10 0x10e0 +#define NVA097_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVA097_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED 23:16 + +#define NVA097_SET_REDUCE_COLOR_THRESHOLDS_UNORM16 0x10e4 +#define NVA097_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVA097_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED 23:16 + +#define NVA097_SET_REDUCE_COLOR_THRESHOLDS_FP11 0x10e8 +#define NVA097_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED_ALL_HIT_ONCE 5:0 +#define NVA097_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED 21:16 + +#define NVA097_SET_REDUCE_COLOR_THRESHOLDS_FP16 0x10ec +#define NVA097_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVA097_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED 23:16 + +#define NVA097_SET_REDUCE_COLOR_THRESHOLDS_SRGB8 0x10f0 +#define NVA097_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVA097_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED 23:16 + +#define NVA097_UNBIND_ALL 0x10f4 +#define NVA097_UNBIND_ALL_CONSTANT_BUFFERS 8:8 +#define NVA097_UNBIND_ALL_CONSTANT_BUFFERS_FALSE 0x00000000 +#define NVA097_UNBIND_ALL_CONSTANT_BUFFERS_TRUE 0x00000001 + +#define NVA097_SET_CLEAR_SURFACE_CONTROL 0x10f8 +#define NVA097_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK 0:0 +#define NVA097_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_FALSE 0x00000000 +#define NVA097_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_TRUE 0x00000001 +#define NVA097_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT 4:4 +#define NVA097_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_FALSE 0x00000000 +#define NVA097_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_TRUE 0x00000001 +#define NVA097_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0 8:8 +#define NVA097_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_FALSE 0x00000000 +#define NVA097_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_TRUE 0x00000001 +#define NVA097_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0 12:12 +#define NVA097_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_FALSE 0x00000000 +#define NVA097_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_TRUE 0x00000001 + +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS 0x10fc +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVA097_NO_OPERATION_DATA_HI 0x110c +#define NVA097_NO_OPERATION_DATA_HI_V 31:0 + +#define NVA097_SET_DEPTH_BIAS_CONTROL 0x1110 +#define NVA097_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT 0:0 +#define NVA097_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_FALSE 0x00000000 +#define NVA097_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_TRUE 0x00000001 + +#define NVA097_PM_TRIGGER_END 0x1114 +#define NVA097_PM_TRIGGER_END_V 31:0 + +#define NVA097_SET_VERTEX_ID_BASE 0x1118 +#define NVA097_SET_VERTEX_ID_BASE_V 31:0 + +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A(i) (0x1120+(i)*4) +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0 0:0 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1 1:1 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2 2:2 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3 3:3 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0 4:4 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1 5:5 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2 6:6 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3 7:7 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0 8:8 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1 9:9 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2 10:10 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3 11:11 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0 12:12 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1 13:13 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2 14:14 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3 15:15 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0 16:16 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1 17:17 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2 18:18 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3 19:19 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0 20:20 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1 21:21 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2 22:22 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3 23:23 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0 24:24 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1 25:25 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2 26:26 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3 27:27 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0 28:28 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1 29:29 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2 30:30 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3 31:31 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B(i) (0x1128+(i)*4) +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0 0:0 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1 1:1 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2 2:2 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3 3:3 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0 4:4 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1 5:5 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2 6:6 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3 7:7 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0 8:8 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1 9:9 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2 10:10 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3 11:11 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0 12:12 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1 13:13 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2 14:14 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3 15:15 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0 16:16 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1 17:17 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2 18:18 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3 19:19 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0 20:20 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1 21:21 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2 22:22 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3 23:23 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0 24:24 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1 25:25 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2 26:26 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3 27:27 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0 28:28 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1 29:29 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2 30:30 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3 31:31 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NVA097_SET_BLEND_PER_FORMAT_ENABLE 0x1140 +#define NVA097_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16 4:4 +#define NVA097_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_FALSE 0x00000000 +#define NVA097_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_TRUE 0x00000001 + +#define NVA097_FLUSH_PENDING_WRITES 0x1144 +#define NVA097_FLUSH_PENDING_WRITES_SM_DOES_GLOBAL_STORE 0:0 + +#define NVA097_SET_VAB_DATA_CONTROL 0x114c +#define NVA097_SET_VAB_DATA_CONTROL_VAB_INDEX 7:0 +#define NVA097_SET_VAB_DATA_CONTROL_COMPONENT_COUNT 10:8 +#define NVA097_SET_VAB_DATA_CONTROL_COMPONENT_BYTE_WIDTH 14:12 +#define NVA097_SET_VAB_DATA_CONTROL_FORMAT 18:16 +#define NVA097_SET_VAB_DATA_CONTROL_FORMAT_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NVA097_SET_VAB_DATA_CONTROL_FORMAT_NUM_SNORM 0x00000001 +#define NVA097_SET_VAB_DATA_CONTROL_FORMAT_NUM_UNORM 0x00000002 +#define NVA097_SET_VAB_DATA_CONTROL_FORMAT_NUM_SINT 0x00000003 +#define NVA097_SET_VAB_DATA_CONTROL_FORMAT_NUM_UINT 0x00000004 +#define NVA097_SET_VAB_DATA_CONTROL_FORMAT_NUM_USCALED 0x00000005 +#define NVA097_SET_VAB_DATA_CONTROL_FORMAT_NUM_SSCALED 0x00000006 +#define NVA097_SET_VAB_DATA_CONTROL_FORMAT_NUM_FLOAT 0x00000007 + +#define NVA097_SET_VAB_DATA(i) (0x1150+(i)*4) +#define NVA097_SET_VAB_DATA_V 31:0 + +#define NVA097_SET_VERTEX_ATTRIBUTE_A(i) (0x1160+(i)*4) +#define NVA097_SET_VERTEX_ATTRIBUTE_A_STREAM 4:0 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_SOURCE 6:6 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_SOURCE_ACTIVE 0x00000000 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_SOURCE_INACTIVE 0x00000001 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_OFFSET 20:7 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS 26:21 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NVA097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NVA097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NVA097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NVA097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NVA097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE 29:27 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B 31:31 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_FALSE 0x00000000 +#define NVA097_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_TRUE 0x00000001 + +#define NVA097_SET_VERTEX_ATTRIBUTE_B(i) (0x11a0+(i)*4) +#define NVA097_SET_VERTEX_ATTRIBUTE_B_STREAM 4:0 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_SOURCE 6:6 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_SOURCE_ACTIVE 0x00000000 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_SOURCE_INACTIVE 0x00000001 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_OFFSET 20:7 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS 26:21 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NVA097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NVA097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NVA097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NVA097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NVA097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE 29:27 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B 31:31 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_FALSE 0x00000000 +#define NVA097_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_TRUE 0x00000001 + +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST 0x1214 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_START_INDEX 15:0 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT 0x1218 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_START_INDEX 15:0 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVA097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVA097_SET_CT_SELECT 0x121c +#define NVA097_SET_CT_SELECT_TARGET_COUNT 3:0 +#define NVA097_SET_CT_SELECT_TARGET0 6:4 +#define NVA097_SET_CT_SELECT_TARGET1 9:7 +#define NVA097_SET_CT_SELECT_TARGET2 12:10 +#define NVA097_SET_CT_SELECT_TARGET3 15:13 +#define NVA097_SET_CT_SELECT_TARGET4 18:16 +#define NVA097_SET_CT_SELECT_TARGET5 21:19 +#define NVA097_SET_CT_SELECT_TARGET6 24:22 +#define NVA097_SET_CT_SELECT_TARGET7 27:25 + +#define NVA097_SET_COMPRESSION_THRESHOLD 0x1220 +#define NVA097_SET_COMPRESSION_THRESHOLD_SAMPLES 3:0 +#define NVA097_SET_COMPRESSION_THRESHOLD_SAMPLES__0 0x00000000 +#define NVA097_SET_COMPRESSION_THRESHOLD_SAMPLES__1 0x00000001 +#define NVA097_SET_COMPRESSION_THRESHOLD_SAMPLES__2 0x00000002 +#define NVA097_SET_COMPRESSION_THRESHOLD_SAMPLES__4 0x00000003 +#define NVA097_SET_COMPRESSION_THRESHOLD_SAMPLES__8 0x00000004 +#define NVA097_SET_COMPRESSION_THRESHOLD_SAMPLES__16 0x00000005 +#define NVA097_SET_COMPRESSION_THRESHOLD_SAMPLES__32 0x00000006 +#define NVA097_SET_COMPRESSION_THRESHOLD_SAMPLES__64 0x00000007 +#define NVA097_SET_COMPRESSION_THRESHOLD_SAMPLES__128 0x00000008 +#define NVA097_SET_COMPRESSION_THRESHOLD_SAMPLES__256 0x00000009 +#define NVA097_SET_COMPRESSION_THRESHOLD_SAMPLES__512 0x0000000A +#define NVA097_SET_COMPRESSION_THRESHOLD_SAMPLES__1024 0x0000000B +#define NVA097_SET_COMPRESSION_THRESHOLD_SAMPLES__2048 0x0000000C + +#define NVA097_SET_ZT_SIZE_A 0x1228 +#define NVA097_SET_ZT_SIZE_A_WIDTH 27:0 + +#define NVA097_SET_ZT_SIZE_B 0x122c +#define NVA097_SET_ZT_SIZE_B_HEIGHT 16:0 + +#define NVA097_SET_ZT_SIZE_C 0x1230 +#define NVA097_SET_ZT_SIZE_C_THIRD_DIMENSION 15:0 +#define NVA097_SET_ZT_SIZE_C_CONTROL 16:16 +#define NVA097_SET_ZT_SIZE_C_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NVA097_SET_ZT_SIZE_C_CONTROL_ARRAY_SIZE_IS_ONE 0x00000001 + +#define NVA097_SET_SAMPLER_BINDING 0x1234 +#define NVA097_SET_SAMPLER_BINDING_V 0:0 +#define NVA097_SET_SAMPLER_BINDING_V_INDEPENDENTLY 0x00000000 +#define NVA097_SET_SAMPLER_BINDING_V_VIA_HEADER_BINDING 0x00000001 + +#define NVA097_DRAW_AUTO 0x123c +#define NVA097_DRAW_AUTO_BYTE_COUNT 31:0 + +#define NVA097_SET_CIRCULAR_BUFFER_SIZE 0x1280 +#define NVA097_SET_CIRCULAR_BUFFER_SIZE_CACHE_LINES_PER_SM 9:0 + +#define NVA097_SET_VTG_REGISTER_WATERMARKS 0x1284 +#define NVA097_SET_VTG_REGISTER_WATERMARKS_LOW 15:0 +#define NVA097_SET_VTG_REGISTER_WATERMARKS_HIGH 31:16 + +#define NVA097_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI 0x1288 +#define NVA097_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES 0:0 +#define NVA097_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVA097_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVA097_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_TAG 25:4 + +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS 0x1290 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVA097_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE 0x12a4 +#define NVA097_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE_V 31:0 + +#define NVA097_SET_SHADER_SCHEDULING 0x12ac +#define NVA097_SET_SHADER_SCHEDULING_MODE 0:0 +#define NVA097_SET_SHADER_SCHEDULING_MODE_OLDEST_THREAD_FIRST 0x00000000 +#define NVA097_SET_SHADER_SCHEDULING_MODE_ROUND_ROBIN 0x00000001 + +#define NVA097_CLEAR_ZCULL_REGION 0x12c8 +#define NVA097_CLEAR_ZCULL_REGION_Z_ENABLE 0:0 +#define NVA097_CLEAR_ZCULL_REGION_Z_ENABLE_FALSE 0x00000000 +#define NVA097_CLEAR_ZCULL_REGION_Z_ENABLE_TRUE 0x00000001 +#define NVA097_CLEAR_ZCULL_REGION_STENCIL_ENABLE 4:4 +#define NVA097_CLEAR_ZCULL_REGION_STENCIL_ENABLE_FALSE 0x00000000 +#define NVA097_CLEAR_ZCULL_REGION_STENCIL_ENABLE_TRUE 0x00000001 +#define NVA097_CLEAR_ZCULL_REGION_USE_CLEAR_RECT 1:1 +#define NVA097_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_FALSE 0x00000000 +#define NVA097_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_TRUE 0x00000001 +#define NVA097_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX 2:2 +#define NVA097_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_FALSE 0x00000000 +#define NVA097_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_TRUE 0x00000001 +#define NVA097_CLEAR_ZCULL_REGION_RT_ARRAY_INDEX 20:5 +#define NVA097_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE 3:3 +#define NVA097_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_FALSE 0x00000000 +#define NVA097_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_TRUE 0x00000001 + +#define NVA097_SET_DEPTH_TEST 0x12cc +#define NVA097_SET_DEPTH_TEST_ENABLE 0:0 +#define NVA097_SET_DEPTH_TEST_ENABLE_FALSE 0x00000000 +#define NVA097_SET_DEPTH_TEST_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_FILL_MODE 0x12d0 +#define NVA097_SET_FILL_MODE_V 31:0 +#define NVA097_SET_FILL_MODE_V_POINT 0x00000001 +#define NVA097_SET_FILL_MODE_V_WIREFRAME 0x00000002 +#define NVA097_SET_FILL_MODE_V_SOLID 0x00000003 + +#define NVA097_SET_SHADE_MODE 0x12d4 +#define NVA097_SET_SHADE_MODE_V 31:0 +#define NVA097_SET_SHADE_MODE_V_FLAT 0x00000001 +#define NVA097_SET_SHADE_MODE_V_GOURAUD 0x00000002 +#define NVA097_SET_SHADE_MODE_V_OGL_FLAT 0x00001D00 +#define NVA097_SET_SHADE_MODE_V_OGL_SMOOTH 0x00001D01 + +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS 0x12d8 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS 0x12dc +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVA097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVA097_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL 0x12e0 +#define NVA097_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT 3:0 +#define NVA097_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1 0x00000000 +#define NVA097_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_2X2 0x00000001 +#define NVA097_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1_VIRTUAL_SAMPLES 0x00000002 + +#define NVA097_SET_BLEND_STATE_PER_TARGET 0x12e4 +#define NVA097_SET_BLEND_STATE_PER_TARGET_ENABLE 0:0 +#define NVA097_SET_BLEND_STATE_PER_TARGET_ENABLE_FALSE 0x00000000 +#define NVA097_SET_BLEND_STATE_PER_TARGET_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_DEPTH_WRITE 0x12e8 +#define NVA097_SET_DEPTH_WRITE_ENABLE 0:0 +#define NVA097_SET_DEPTH_WRITE_ENABLE_FALSE 0x00000000 +#define NVA097_SET_DEPTH_WRITE_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_ALPHA_TEST 0x12ec +#define NVA097_SET_ALPHA_TEST_ENABLE 0:0 +#define NVA097_SET_ALPHA_TEST_ENABLE_FALSE 0x00000000 +#define NVA097_SET_ALPHA_TEST_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_INLINE_INDEX4X8_ALIGN 0x1300 +#define NVA097_SET_INLINE_INDEX4X8_ALIGN_COUNT 29:0 +#define NVA097_SET_INLINE_INDEX4X8_ALIGN_START 31:30 + +#define NVA097_DRAW_INLINE_INDEX4X8 0x1304 +#define NVA097_DRAW_INLINE_INDEX4X8_INDEX0 7:0 +#define NVA097_DRAW_INLINE_INDEX4X8_INDEX1 15:8 +#define NVA097_DRAW_INLINE_INDEX4X8_INDEX2 23:16 +#define NVA097_DRAW_INLINE_INDEX4X8_INDEX3 31:24 + +#define NVA097_D3D_SET_CULL_MODE 0x1308 +#define NVA097_D3D_SET_CULL_MODE_V 31:0 +#define NVA097_D3D_SET_CULL_MODE_V_NONE 0x00000001 +#define NVA097_D3D_SET_CULL_MODE_V_CW 0x00000002 +#define NVA097_D3D_SET_CULL_MODE_V_CCW 0x00000003 + +#define NVA097_SET_DEPTH_FUNC 0x130c +#define NVA097_SET_DEPTH_FUNC_V 31:0 +#define NVA097_SET_DEPTH_FUNC_V_OGL_NEVER 0x00000200 +#define NVA097_SET_DEPTH_FUNC_V_OGL_LESS 0x00000201 +#define NVA097_SET_DEPTH_FUNC_V_OGL_EQUAL 0x00000202 +#define NVA097_SET_DEPTH_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVA097_SET_DEPTH_FUNC_V_OGL_GREATER 0x00000204 +#define NVA097_SET_DEPTH_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVA097_SET_DEPTH_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVA097_SET_DEPTH_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVA097_SET_DEPTH_FUNC_V_D3D_NEVER 0x00000001 +#define NVA097_SET_DEPTH_FUNC_V_D3D_LESS 0x00000002 +#define NVA097_SET_DEPTH_FUNC_V_D3D_EQUAL 0x00000003 +#define NVA097_SET_DEPTH_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVA097_SET_DEPTH_FUNC_V_D3D_GREATER 0x00000005 +#define NVA097_SET_DEPTH_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVA097_SET_DEPTH_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVA097_SET_DEPTH_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVA097_SET_ALPHA_REF 0x1310 +#define NVA097_SET_ALPHA_REF_V 31:0 + +#define NVA097_SET_ALPHA_FUNC 0x1314 +#define NVA097_SET_ALPHA_FUNC_V 31:0 +#define NVA097_SET_ALPHA_FUNC_V_OGL_NEVER 0x00000200 +#define NVA097_SET_ALPHA_FUNC_V_OGL_LESS 0x00000201 +#define NVA097_SET_ALPHA_FUNC_V_OGL_EQUAL 0x00000202 +#define NVA097_SET_ALPHA_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVA097_SET_ALPHA_FUNC_V_OGL_GREATER 0x00000204 +#define NVA097_SET_ALPHA_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVA097_SET_ALPHA_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVA097_SET_ALPHA_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVA097_SET_ALPHA_FUNC_V_D3D_NEVER 0x00000001 +#define NVA097_SET_ALPHA_FUNC_V_D3D_LESS 0x00000002 +#define NVA097_SET_ALPHA_FUNC_V_D3D_EQUAL 0x00000003 +#define NVA097_SET_ALPHA_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVA097_SET_ALPHA_FUNC_V_D3D_GREATER 0x00000005 +#define NVA097_SET_ALPHA_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVA097_SET_ALPHA_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVA097_SET_ALPHA_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVA097_SET_DRAW_AUTO_STRIDE 0x1318 +#define NVA097_SET_DRAW_AUTO_STRIDE_V 11:0 + +#define NVA097_SET_BLEND_CONST_RED 0x131c +#define NVA097_SET_BLEND_CONST_RED_V 31:0 + +#define NVA097_SET_BLEND_CONST_GREEN 0x1320 +#define NVA097_SET_BLEND_CONST_GREEN_V 31:0 + +#define NVA097_SET_BLEND_CONST_BLUE 0x1324 +#define NVA097_SET_BLEND_CONST_BLUE_V 31:0 + +#define NVA097_SET_BLEND_CONST_ALPHA 0x1328 +#define NVA097_SET_BLEND_CONST_ALPHA_V 31:0 + +#define NVA097_INVALIDATE_SAMPLER_CACHE 0x1330 +#define NVA097_INVALIDATE_SAMPLER_CACHE_LINES 0:0 +#define NVA097_INVALIDATE_SAMPLER_CACHE_LINES_ALL 0x00000000 +#define NVA097_INVALIDATE_SAMPLER_CACHE_LINES_ONE 0x00000001 +#define NVA097_INVALIDATE_SAMPLER_CACHE_TAG 25:4 + +#define NVA097_INVALIDATE_TEXTURE_HEADER_CACHE 0x1334 +#define NVA097_INVALIDATE_TEXTURE_HEADER_CACHE_LINES 0:0 +#define NVA097_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ALL 0x00000000 +#define NVA097_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ONE 0x00000001 +#define NVA097_INVALIDATE_TEXTURE_HEADER_CACHE_TAG 25:4 + +#define NVA097_INVALIDATE_TEXTURE_DATA_CACHE 0x1338 +#define NVA097_INVALIDATE_TEXTURE_DATA_CACHE_LINES 0:0 +#define NVA097_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ALL 0x00000000 +#define NVA097_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ONE 0x00000001 +#define NVA097_INVALIDATE_TEXTURE_DATA_CACHE_TAG 25:4 + +#define NVA097_SET_BLEND_SEPARATE_FOR_ALPHA 0x133c +#define NVA097_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NVA097_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NVA097_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_BLEND_COLOR_OP 0x1340 +#define NVA097_SET_BLEND_COLOR_OP_V 31:0 +#define NVA097_SET_BLEND_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVA097_SET_BLEND_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVA097_SET_BLEND_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVA097_SET_BLEND_COLOR_OP_V_OGL_MIN 0x00008007 +#define NVA097_SET_BLEND_COLOR_OP_V_OGL_MAX 0x00008008 +#define NVA097_SET_BLEND_COLOR_OP_V_D3D_ADD 0x00000001 +#define NVA097_SET_BLEND_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NVA097_SET_BLEND_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVA097_SET_BLEND_COLOR_OP_V_D3D_MIN 0x00000004 +#define NVA097_SET_BLEND_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF 0x1344 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V 31:0 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVA097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVA097_SET_BLEND_COLOR_DEST_COEFF 0x1348 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V 31:0 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVA097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVA097_SET_BLEND_ALPHA_OP 0x134c +#define NVA097_SET_BLEND_ALPHA_OP_V 31:0 +#define NVA097_SET_BLEND_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVA097_SET_BLEND_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVA097_SET_BLEND_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVA097_SET_BLEND_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NVA097_SET_BLEND_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NVA097_SET_BLEND_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NVA097_SET_BLEND_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NVA097_SET_BLEND_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVA097_SET_BLEND_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NVA097_SET_BLEND_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF 0x1350 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V 31:0 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVA097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVA097_SET_GLOBAL_COLOR_KEY 0x1354 +#define NVA097_SET_GLOBAL_COLOR_KEY_ENABLE 0:0 +#define NVA097_SET_GLOBAL_COLOR_KEY_ENABLE_FALSE 0x00000000 +#define NVA097_SET_GLOBAL_COLOR_KEY_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF 0x1358 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V 31:0 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVA097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVA097_SET_SINGLE_ROP_CONTROL 0x135c +#define NVA097_SET_SINGLE_ROP_CONTROL_ENABLE 0:0 +#define NVA097_SET_SINGLE_ROP_CONTROL_ENABLE_FALSE 0x00000000 +#define NVA097_SET_SINGLE_ROP_CONTROL_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_BLEND(i) (0x1360+(i)*4) +#define NVA097_SET_BLEND_ENABLE 0:0 +#define NVA097_SET_BLEND_ENABLE_FALSE 0x00000000 +#define NVA097_SET_BLEND_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_STENCIL_TEST 0x1380 +#define NVA097_SET_STENCIL_TEST_ENABLE 0:0 +#define NVA097_SET_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NVA097_SET_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_STENCIL_OP_FAIL 0x1384 +#define NVA097_SET_STENCIL_OP_FAIL_V 31:0 +#define NVA097_SET_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NVA097_SET_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NVA097_SET_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NVA097_SET_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NVA097_SET_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NVA097_SET_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NVA097_SET_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NVA097_SET_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NVA097_SET_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NVA097_SET_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NVA097_SET_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NVA097_SET_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NVA097_SET_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NVA097_SET_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NVA097_SET_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NVA097_SET_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NVA097_SET_STENCIL_OP_ZFAIL 0x1388 +#define NVA097_SET_STENCIL_OP_ZFAIL_V 31:0 +#define NVA097_SET_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NVA097_SET_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NVA097_SET_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NVA097_SET_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NVA097_SET_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NVA097_SET_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NVA097_SET_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NVA097_SET_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NVA097_SET_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NVA097_SET_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NVA097_SET_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NVA097_SET_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NVA097_SET_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NVA097_SET_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NVA097_SET_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NVA097_SET_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NVA097_SET_STENCIL_OP_ZPASS 0x138c +#define NVA097_SET_STENCIL_OP_ZPASS_V 31:0 +#define NVA097_SET_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NVA097_SET_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NVA097_SET_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NVA097_SET_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NVA097_SET_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NVA097_SET_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NVA097_SET_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NVA097_SET_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NVA097_SET_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NVA097_SET_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NVA097_SET_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NVA097_SET_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NVA097_SET_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NVA097_SET_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NVA097_SET_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NVA097_SET_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NVA097_SET_STENCIL_FUNC 0x1390 +#define NVA097_SET_STENCIL_FUNC_V 31:0 +#define NVA097_SET_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NVA097_SET_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NVA097_SET_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NVA097_SET_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVA097_SET_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NVA097_SET_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVA097_SET_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVA097_SET_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVA097_SET_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NVA097_SET_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NVA097_SET_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NVA097_SET_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVA097_SET_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NVA097_SET_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVA097_SET_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVA097_SET_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVA097_SET_STENCIL_FUNC_REF 0x1394 +#define NVA097_SET_STENCIL_FUNC_REF_V 7:0 + +#define NVA097_SET_STENCIL_FUNC_MASK 0x1398 +#define NVA097_SET_STENCIL_FUNC_MASK_V 7:0 + +#define NVA097_SET_STENCIL_MASK 0x139c +#define NVA097_SET_STENCIL_MASK_V 7:0 + +#define NVA097_SET_DRAW_AUTO_START 0x13a4 +#define NVA097_SET_DRAW_AUTO_START_BYTE_COUNT 31:0 + +#define NVA097_SET_PS_SATURATE 0x13a8 +#define NVA097_SET_PS_SATURATE_OUTPUT0 0:0 +#define NVA097_SET_PS_SATURATE_OUTPUT0_FALSE 0x00000000 +#define NVA097_SET_PS_SATURATE_OUTPUT0_TRUE 0x00000001 +#define NVA097_SET_PS_SATURATE_OUTPUT1 4:4 +#define NVA097_SET_PS_SATURATE_OUTPUT1_FALSE 0x00000000 +#define NVA097_SET_PS_SATURATE_OUTPUT1_TRUE 0x00000001 +#define NVA097_SET_PS_SATURATE_OUTPUT2 8:8 +#define NVA097_SET_PS_SATURATE_OUTPUT2_FALSE 0x00000000 +#define NVA097_SET_PS_SATURATE_OUTPUT2_TRUE 0x00000001 +#define NVA097_SET_PS_SATURATE_OUTPUT3 12:12 +#define NVA097_SET_PS_SATURATE_OUTPUT3_FALSE 0x00000000 +#define NVA097_SET_PS_SATURATE_OUTPUT3_TRUE 0x00000001 +#define NVA097_SET_PS_SATURATE_OUTPUT4 16:16 +#define NVA097_SET_PS_SATURATE_OUTPUT4_FALSE 0x00000000 +#define NVA097_SET_PS_SATURATE_OUTPUT4_TRUE 0x00000001 +#define NVA097_SET_PS_SATURATE_OUTPUT5 20:20 +#define NVA097_SET_PS_SATURATE_OUTPUT5_FALSE 0x00000000 +#define NVA097_SET_PS_SATURATE_OUTPUT5_TRUE 0x00000001 +#define NVA097_SET_PS_SATURATE_OUTPUT6 24:24 +#define NVA097_SET_PS_SATURATE_OUTPUT6_FALSE 0x00000000 +#define NVA097_SET_PS_SATURATE_OUTPUT6_TRUE 0x00000001 +#define NVA097_SET_PS_SATURATE_OUTPUT7 28:28 +#define NVA097_SET_PS_SATURATE_OUTPUT7_FALSE 0x00000000 +#define NVA097_SET_PS_SATURATE_OUTPUT7_TRUE 0x00000001 + +#define NVA097_SET_WINDOW_ORIGIN 0x13ac +#define NVA097_SET_WINDOW_ORIGIN_MODE 0:0 +#define NVA097_SET_WINDOW_ORIGIN_MODE_UPPER_LEFT 0x00000000 +#define NVA097_SET_WINDOW_ORIGIN_MODE_LOWER_LEFT 0x00000001 +#define NVA097_SET_WINDOW_ORIGIN_FLIP_Y 4:4 +#define NVA097_SET_WINDOW_ORIGIN_FLIP_Y_FALSE 0x00000000 +#define NVA097_SET_WINDOW_ORIGIN_FLIP_Y_TRUE 0x00000001 + +#define NVA097_SET_LINE_WIDTH_FLOAT 0x13b0 +#define NVA097_SET_LINE_WIDTH_FLOAT_V 31:0 + +#define NVA097_SET_ALIASED_LINE_WIDTH_FLOAT 0x13b4 +#define NVA097_SET_ALIASED_LINE_WIDTH_FLOAT_V 31:0 + +#define NVA097_SET_LINE_MULTISAMPLE_OVERRIDE 0x1418 +#define NVA097_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE 0:0 +#define NVA097_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_FALSE 0x00000000 +#define NVA097_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_ALPHA_HYSTERESIS 0x1420 +#define NVA097_SET_ALPHA_HYSTERESIS_ROUNDS_OF_ALPHA 7:0 + +#define NVA097_INVALIDATE_SAMPLER_CACHE_NO_WFI 0x1424 +#define NVA097_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES 0:0 +#define NVA097_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVA097_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVA097_INVALIDATE_SAMPLER_CACHE_NO_WFI_TAG 25:4 + +#define NVA097_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI 0x1428 +#define NVA097_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES 0:0 +#define NVA097_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVA097_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVA097_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_TAG 25:4 + +#define NVA097_INVALIDATE_DA_DMA_CACHE 0x142c +#define NVA097_INVALIDATE_DA_DMA_CACHE_V 0:0 + +#define NVA097_SET_GLOBAL_BASE_VERTEX_INDEX 0x1434 +#define NVA097_SET_GLOBAL_BASE_VERTEX_INDEX_V 31:0 + +#define NVA097_SET_GLOBAL_BASE_INSTANCE_INDEX 0x1438 +#define NVA097_SET_GLOBAL_BASE_INSTANCE_INDEX_V 31:0 + +#define NVA097_SET_PS_WARP_WATERMARKS 0x1450 +#define NVA097_SET_PS_WARP_WATERMARKS_LOW 15:0 +#define NVA097_SET_PS_WARP_WATERMARKS_HIGH 31:16 + +#define NVA097_SET_PS_REGISTER_WATERMARKS 0x1454 +#define NVA097_SET_PS_REGISTER_WATERMARKS_LOW 15:0 +#define NVA097_SET_PS_REGISTER_WATERMARKS_HIGH 31:16 + +#define NVA097_STORE_ZCULL 0x1464 +#define NVA097_STORE_ZCULL_V 0:0 + +#define NVA097_LOAD_ZCULL 0x1500 +#define NVA097_LOAD_ZCULL_V 0:0 + +#define NVA097_SET_SURFACE_CLIP_ID_HEIGHT 0x1504 +#define NVA097_SET_SURFACE_CLIP_ID_HEIGHT_V 31:0 + +#define NVA097_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL 0x1508 +#define NVA097_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NVA097_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NVA097_SET_CLIP_ID_CLEAR_RECT_VERTICAL 0x150c +#define NVA097_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NVA097_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NVA097_SET_USER_CLIP_ENABLE 0x1510 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE0 0:0 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE0_FALSE 0x00000000 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE0_TRUE 0x00000001 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE1 1:1 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE1_FALSE 0x00000000 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE1_TRUE 0x00000001 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE2 2:2 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE2_FALSE 0x00000000 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE2_TRUE 0x00000001 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE3 3:3 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE3_FALSE 0x00000000 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE3_TRUE 0x00000001 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE4 4:4 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE4_FALSE 0x00000000 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE4_TRUE 0x00000001 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE5 5:5 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE5_FALSE 0x00000000 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE5_TRUE 0x00000001 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE6 6:6 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE6_FALSE 0x00000000 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE6_TRUE 0x00000001 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE7 7:7 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE7_FALSE 0x00000000 +#define NVA097_SET_USER_CLIP_ENABLE_PLANE7_TRUE 0x00000001 + +#define NVA097_SET_ZPASS_PIXEL_COUNT 0x1514 +#define NVA097_SET_ZPASS_PIXEL_COUNT_ENABLE 0:0 +#define NVA097_SET_ZPASS_PIXEL_COUNT_ENABLE_FALSE 0x00000000 +#define NVA097_SET_ZPASS_PIXEL_COUNT_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_POINT_SIZE 0x1518 +#define NVA097_SET_POINT_SIZE_V 31:0 + +#define NVA097_SET_ZCULL_STATS 0x151c +#define NVA097_SET_ZCULL_STATS_ENABLE 0:0 +#define NVA097_SET_ZCULL_STATS_ENABLE_FALSE 0x00000000 +#define NVA097_SET_ZCULL_STATS_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_POINT_SPRITE 0x1520 +#define NVA097_SET_POINT_SPRITE_ENABLE 0:0 +#define NVA097_SET_POINT_SPRITE_ENABLE_FALSE 0x00000000 +#define NVA097_SET_POINT_SPRITE_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_SHADER_EXCEPTIONS 0x1528 +#define NVA097_SET_SHADER_EXCEPTIONS_ENABLE 0:0 +#define NVA097_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0x00000000 +#define NVA097_SET_SHADER_EXCEPTIONS_ENABLE_TRUE 0x00000001 + +#define NVA097_CLEAR_REPORT_VALUE 0x1530 +#define NVA097_CLEAR_REPORT_VALUE_TYPE 4:0 +#define NVA097_CLEAR_REPORT_VALUE_TYPE_DA_VERTICES_GENERATED 0x00000012 +#define NVA097_CLEAR_REPORT_VALUE_TYPE_DA_PRIMITIVES_GENERATED 0x00000013 +#define NVA097_CLEAR_REPORT_VALUE_TYPE_VS_INVOCATIONS 0x00000015 +#define NVA097_CLEAR_REPORT_VALUE_TYPE_TI_INVOCATIONS 0x00000016 +#define NVA097_CLEAR_REPORT_VALUE_TYPE_TS_INVOCATIONS 0x00000017 +#define NVA097_CLEAR_REPORT_VALUE_TYPE_TS_PRIMITIVES_GENERATED 0x00000018 +#define NVA097_CLEAR_REPORT_VALUE_TYPE_GS_INVOCATIONS 0x0000001A +#define NVA097_CLEAR_REPORT_VALUE_TYPE_GS_PRIMITIVES_GENERATED 0x0000001B +#define NVA097_CLEAR_REPORT_VALUE_TYPE_VTG_PRIMITIVES_OUT 0x0000001F +#define NVA097_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_SUCCEEDED 0x00000010 +#define NVA097_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_NEEDED 0x00000011 +#define NVA097_CLEAR_REPORT_VALUE_TYPE_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000003 +#define NVA097_CLEAR_REPORT_VALUE_TYPE_CLIPPER_INVOCATIONS 0x0000001C +#define NVA097_CLEAR_REPORT_VALUE_TYPE_CLIPPER_PRIMITIVES_GENERATED 0x0000001D +#define NVA097_CLEAR_REPORT_VALUE_TYPE_ZCULL_STATS 0x00000002 +#define NVA097_CLEAR_REPORT_VALUE_TYPE_PS_INVOCATIONS 0x0000001E +#define NVA097_CLEAR_REPORT_VALUE_TYPE_ZPASS_PIXEL_CNT 0x00000001 +#define NVA097_CLEAR_REPORT_VALUE_TYPE_ALPHA_BETA_CLOCKS 0x00000004 + +#define NVA097_SET_ANTI_ALIAS_ENABLE 0x1534 +#define NVA097_SET_ANTI_ALIAS_ENABLE_V 0:0 +#define NVA097_SET_ANTI_ALIAS_ENABLE_V_FALSE 0x00000000 +#define NVA097_SET_ANTI_ALIAS_ENABLE_V_TRUE 0x00000001 + +#define NVA097_SET_ZT_SELECT 0x1538 +#define NVA097_SET_ZT_SELECT_TARGET_COUNT 0:0 + +#define NVA097_SET_ANTI_ALIAS_ALPHA_CONTROL 0x153c +#define NVA097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE 0:0 +#define NVA097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_DISABLE 0x00000000 +#define NVA097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_ENABLE 0x00000001 +#define NVA097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE 4:4 +#define NVA097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_DISABLE 0x00000000 +#define NVA097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_ENABLE 0x00000001 + +#define NVA097_SET_RENDER_ENABLE_A 0x1550 +#define NVA097_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVA097_SET_RENDER_ENABLE_B 0x1554 +#define NVA097_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVA097_SET_RENDER_ENABLE_C 0x1558 +#define NVA097_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVA097_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVA097_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVA097_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVA097_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVA097_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVA097_SET_TEX_SAMPLER_POOL_A 0x155c +#define NVA097_SET_TEX_SAMPLER_POOL_A_OFFSET_UPPER 7:0 + +#define NVA097_SET_TEX_SAMPLER_POOL_B 0x1560 +#define NVA097_SET_TEX_SAMPLER_POOL_B_OFFSET_LOWER 31:0 + +#define NVA097_SET_TEX_SAMPLER_POOL_C 0x1564 +#define NVA097_SET_TEX_SAMPLER_POOL_C_MAXIMUM_INDEX 19:0 + +#define NVA097_SET_SLOPE_SCALE_DEPTH_BIAS 0x156c +#define NVA097_SET_SLOPE_SCALE_DEPTH_BIAS_V 31:0 + +#define NVA097_SET_ANTI_ALIASED_LINE 0x1570 +#define NVA097_SET_ANTI_ALIASED_LINE_ENABLE 0:0 +#define NVA097_SET_ANTI_ALIASED_LINE_ENABLE_FALSE 0x00000000 +#define NVA097_SET_ANTI_ALIASED_LINE_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_TEX_HEADER_POOL_A 0x1574 +#define NVA097_SET_TEX_HEADER_POOL_A_OFFSET_UPPER 7:0 + +#define NVA097_SET_TEX_HEADER_POOL_B 0x1578 +#define NVA097_SET_TEX_HEADER_POOL_B_OFFSET_LOWER 31:0 + +#define NVA097_SET_TEX_HEADER_POOL_C 0x157c +#define NVA097_SET_TEX_HEADER_POOL_C_MAXIMUM_INDEX 21:0 + +#define NVA097_SET_ACTIVE_ZCULL_REGION 0x1590 +#define NVA097_SET_ACTIVE_ZCULL_REGION_ID 5:0 + +#define NVA097_SET_TWO_SIDED_STENCIL_TEST 0x1594 +#define NVA097_SET_TWO_SIDED_STENCIL_TEST_ENABLE 0:0 +#define NVA097_SET_TWO_SIDED_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NVA097_SET_TWO_SIDED_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_BACK_STENCIL_OP_FAIL 0x1598 +#define NVA097_SET_BACK_STENCIL_OP_FAIL_V 31:0 +#define NVA097_SET_BACK_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NVA097_SET_BACK_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NVA097_SET_BACK_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NVA097_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NVA097_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NVA097_SET_BACK_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NVA097_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NVA097_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NVA097_SET_BACK_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NVA097_SET_BACK_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NVA097_SET_BACK_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NVA097_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NVA097_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NVA097_SET_BACK_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NVA097_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NVA097_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NVA097_SET_BACK_STENCIL_OP_ZFAIL 0x159c +#define NVA097_SET_BACK_STENCIL_OP_ZFAIL_V 31:0 +#define NVA097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NVA097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NVA097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NVA097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NVA097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NVA097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NVA097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NVA097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NVA097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NVA097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NVA097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NVA097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NVA097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NVA097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NVA097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NVA097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NVA097_SET_BACK_STENCIL_OP_ZPASS 0x15a0 +#define NVA097_SET_BACK_STENCIL_OP_ZPASS_V 31:0 +#define NVA097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NVA097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NVA097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NVA097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NVA097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NVA097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NVA097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NVA097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NVA097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NVA097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NVA097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NVA097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NVA097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NVA097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NVA097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NVA097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NVA097_SET_BACK_STENCIL_FUNC 0x15a4 +#define NVA097_SET_BACK_STENCIL_FUNC_V 31:0 +#define NVA097_SET_BACK_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NVA097_SET_BACK_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NVA097_SET_BACK_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NVA097_SET_BACK_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVA097_SET_BACK_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NVA097_SET_BACK_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVA097_SET_BACK_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVA097_SET_BACK_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVA097_SET_BACK_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NVA097_SET_BACK_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NVA097_SET_BACK_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NVA097_SET_BACK_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVA097_SET_BACK_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NVA097_SET_BACK_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVA097_SET_BACK_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVA097_SET_BACK_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVA097_SET_SRGB_WRITE 0x15b8 +#define NVA097_SET_SRGB_WRITE_ENABLE 0:0 +#define NVA097_SET_SRGB_WRITE_ENABLE_FALSE 0x00000000 +#define NVA097_SET_SRGB_WRITE_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_DEPTH_BIAS 0x15bc +#define NVA097_SET_DEPTH_BIAS_V 31:0 + +#define NVA097_SET_ZCULL_REGION_FORMAT 0x15c8 +#define NVA097_SET_ZCULL_REGION_FORMAT_TYPE 3:0 +#define NVA097_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X4 0x00000000 +#define NVA097_SET_ZCULL_REGION_FORMAT_TYPE_ZS_4X4 0x00000001 +#define NVA097_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X2 0x00000002 +#define NVA097_SET_ZCULL_REGION_FORMAT_TYPE_Z_2X4 0x00000003 +#define NVA097_SET_ZCULL_REGION_FORMAT_TYPE_Z_16X8_4X4 0x00000004 +#define NVA097_SET_ZCULL_REGION_FORMAT_TYPE_Z_8X8_4X2 0x00000005 +#define NVA097_SET_ZCULL_REGION_FORMAT_TYPE_Z_8X8_2X4 0x00000006 +#define NVA097_SET_ZCULL_REGION_FORMAT_TYPE_Z_16X16_4X8 0x00000007 +#define NVA097_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X8_2X2 0x00000008 +#define NVA097_SET_ZCULL_REGION_FORMAT_TYPE_ZS_16X8_4X2 0x00000009 +#define NVA097_SET_ZCULL_REGION_FORMAT_TYPE_ZS_16X8_2X4 0x0000000A +#define NVA097_SET_ZCULL_REGION_FORMAT_TYPE_ZS_8X8_2X2 0x0000000B +#define NVA097_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X8_1X1 0x0000000C + +#define NVA097_SET_RT_LAYER 0x15cc +#define NVA097_SET_RT_LAYER_V 15:0 +#define NVA097_SET_RT_LAYER_CONTROL 16:16 +#define NVA097_SET_RT_LAYER_CONTROL_V_SELECTS_LAYER 0x00000000 +#define NVA097_SET_RT_LAYER_CONTROL_GEOMETRY_SHADER_SELECTS_LAYER 0x00000001 + +#define NVA097_SET_ANTI_ALIAS 0x15d0 +#define NVA097_SET_ANTI_ALIAS_SAMPLES 3:0 +#define NVA097_SET_ANTI_ALIAS_SAMPLES_MODE_1X1 0x00000000 +#define NVA097_SET_ANTI_ALIAS_SAMPLES_MODE_2X1 0x00000001 +#define NVA097_SET_ANTI_ALIAS_SAMPLES_MODE_2X2 0x00000002 +#define NVA097_SET_ANTI_ALIAS_SAMPLES_MODE_4X2 0x00000003 +#define NVA097_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_D3D 0x00000004 +#define NVA097_SET_ANTI_ALIAS_SAMPLES_MODE_2X1_D3D 0x00000005 +#define NVA097_SET_ANTI_ALIAS_SAMPLES_MODE_4X4 0x00000006 +#define NVA097_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_4 0x00000008 +#define NVA097_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_12 0x00000009 +#define NVA097_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_8 0x0000000A +#define NVA097_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_24 0x0000000B + +#define NVA097_SET_EDGE_FLAG 0x15e4 +#define NVA097_SET_EDGE_FLAG_V 0:0 +#define NVA097_SET_EDGE_FLAG_V_FALSE 0x00000000 +#define NVA097_SET_EDGE_FLAG_V_TRUE 0x00000001 + +#define NVA097_DRAW_INLINE_INDEX 0x15e8 +#define NVA097_DRAW_INLINE_INDEX_V 31:0 + +#define NVA097_SET_INLINE_INDEX2X16_ALIGN 0x15ec +#define NVA097_SET_INLINE_INDEX2X16_ALIGN_COUNT 30:0 +#define NVA097_SET_INLINE_INDEX2X16_ALIGN_START_ODD 31:31 +#define NVA097_SET_INLINE_INDEX2X16_ALIGN_START_ODD_FALSE 0x00000000 +#define NVA097_SET_INLINE_INDEX2X16_ALIGN_START_ODD_TRUE 0x00000001 + +#define NVA097_DRAW_INLINE_INDEX2X16 0x15f0 +#define NVA097_DRAW_INLINE_INDEX2X16_EVEN 15:0 +#define NVA097_DRAW_INLINE_INDEX2X16_ODD 31:16 + +#define NVA097_SET_VERTEX_GLOBAL_BASE_OFFSET_A 0x15f4 +#define NVA097_SET_VERTEX_GLOBAL_BASE_OFFSET_A_UPPER 7:0 + +#define NVA097_SET_VERTEX_GLOBAL_BASE_OFFSET_B 0x15f8 +#define NVA097_SET_VERTEX_GLOBAL_BASE_OFFSET_B_LOWER 31:0 + +#define NVA097_SET_ZCULL_REGION_PIXEL_OFFSET_A 0x15fc +#define NVA097_SET_ZCULL_REGION_PIXEL_OFFSET_A_WIDTH 15:0 + +#define NVA097_SET_ZCULL_REGION_PIXEL_OFFSET_B 0x1600 +#define NVA097_SET_ZCULL_REGION_PIXEL_OFFSET_B_HEIGHT 15:0 + +#define NVA097_SET_POINT_SPRITE_SELECT 0x1604 +#define NVA097_SET_POINT_SPRITE_SELECT_RMODE 1:0 +#define NVA097_SET_POINT_SPRITE_SELECT_RMODE_ZERO 0x00000000 +#define NVA097_SET_POINT_SPRITE_SELECT_RMODE_FROM_R 0x00000001 +#define NVA097_SET_POINT_SPRITE_SELECT_RMODE_FROM_S 0x00000002 +#define NVA097_SET_POINT_SPRITE_SELECT_ORIGIN 2:2 +#define NVA097_SET_POINT_SPRITE_SELECT_ORIGIN_BOTTOM 0x00000000 +#define NVA097_SET_POINT_SPRITE_SELECT_ORIGIN_TOP 0x00000001 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE0 3:3 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE0_PASSTHROUGH 0x00000000 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE0_GENERATE 0x00000001 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE1 4:4 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE1_PASSTHROUGH 0x00000000 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE1_GENERATE 0x00000001 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE2 5:5 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE2_PASSTHROUGH 0x00000000 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE2_GENERATE 0x00000001 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE3 6:6 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE3_PASSTHROUGH 0x00000000 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE3_GENERATE 0x00000001 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE4 7:7 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE4_PASSTHROUGH 0x00000000 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE4_GENERATE 0x00000001 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE5 8:8 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE5_PASSTHROUGH 0x00000000 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE5_GENERATE 0x00000001 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE6 9:9 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE6_PASSTHROUGH 0x00000000 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE6_GENERATE 0x00000001 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE7 10:10 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE7_PASSTHROUGH 0x00000000 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE7_GENERATE 0x00000001 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE8 11:11 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE8_PASSTHROUGH 0x00000000 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE8_GENERATE 0x00000001 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE9 12:12 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE9_PASSTHROUGH 0x00000000 +#define NVA097_SET_POINT_SPRITE_SELECT_TEXTURE9_GENERATE 0x00000001 + +#define NVA097_SET_PROGRAM_REGION_A 0x1608 +#define NVA097_SET_PROGRAM_REGION_A_ADDRESS_UPPER 7:0 + +#define NVA097_SET_PROGRAM_REGION_B 0x160c +#define NVA097_SET_PROGRAM_REGION_B_ADDRESS_LOWER 31:0 + +#define NVA097_SET_ATTRIBUTE_DEFAULT 0x1610 +#define NVA097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE 0:0 +#define NVA097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_0001 0x00000000 +#define NVA097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_1111 0x00000001 +#define NVA097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR 1:1 +#define NVA097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0000 0x00000000 +#define NVA097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0001 0x00000001 +#define NVA097_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR 2:2 +#define NVA097_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0000 0x00000000 +#define NVA097_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0001 0x00000001 +#define NVA097_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE 3:3 +#define NVA097_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0000 0x00000000 +#define NVA097_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0001 0x00000001 +#define NVA097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0 4:4 +#define NVA097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_0001 0x00000000 +#define NVA097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_1111 0x00000001 +#define NVA097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15 5:5 +#define NVA097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0000 0x00000000 +#define NVA097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0001 0x00000001 + +#define NVA097_END 0x1614 +#define NVA097_END_V 0:0 + +#define NVA097_BEGIN 0x1618 +#define NVA097_BEGIN_OP 15:0 +#define NVA097_BEGIN_OP_POINTS 0x00000000 +#define NVA097_BEGIN_OP_LINES 0x00000001 +#define NVA097_BEGIN_OP_LINE_LOOP 0x00000002 +#define NVA097_BEGIN_OP_LINE_STRIP 0x00000003 +#define NVA097_BEGIN_OP_TRIANGLES 0x00000004 +#define NVA097_BEGIN_OP_TRIANGLE_STRIP 0x00000005 +#define NVA097_BEGIN_OP_TRIANGLE_FAN 0x00000006 +#define NVA097_BEGIN_OP_QUADS 0x00000007 +#define NVA097_BEGIN_OP_QUAD_STRIP 0x00000008 +#define NVA097_BEGIN_OP_POLYGON 0x00000009 +#define NVA097_BEGIN_OP_LINELIST_ADJCY 0x0000000A +#define NVA097_BEGIN_OP_LINESTRIP_ADJCY 0x0000000B +#define NVA097_BEGIN_OP_TRIANGLELIST_ADJCY 0x0000000C +#define NVA097_BEGIN_OP_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVA097_BEGIN_OP_PATCH 0x0000000E +#define NVA097_BEGIN_PRIMITIVE_ID 24:24 +#define NVA097_BEGIN_PRIMITIVE_ID_FIRST 0x00000000 +#define NVA097_BEGIN_PRIMITIVE_ID_UNCHANGED 0x00000001 +#define NVA097_BEGIN_INSTANCE_ID 27:26 +#define NVA097_BEGIN_INSTANCE_ID_FIRST 0x00000000 +#define NVA097_BEGIN_INSTANCE_ID_SUBSEQUENT 0x00000001 +#define NVA097_BEGIN_INSTANCE_ID_UNCHANGED 0x00000002 +#define NVA097_BEGIN_SPLIT_MODE 30:29 +#define NVA097_BEGIN_SPLIT_MODE_NORMAL_BEGIN_NORMAL_END 0x00000000 +#define NVA097_BEGIN_SPLIT_MODE_NORMAL_BEGIN_OPEN_END 0x00000001 +#define NVA097_BEGIN_SPLIT_MODE_OPEN_BEGIN_OPEN_END 0x00000002 +#define NVA097_BEGIN_SPLIT_MODE_OPEN_BEGIN_NORMAL_END 0x00000003 + +#define NVA097_SET_VERTEX_ID_COPY 0x161c +#define NVA097_SET_VERTEX_ID_COPY_ENABLE 0:0 +#define NVA097_SET_VERTEX_ID_COPY_ENABLE_FALSE 0x00000000 +#define NVA097_SET_VERTEX_ID_COPY_ENABLE_TRUE 0x00000001 +#define NVA097_SET_VERTEX_ID_COPY_ATTRIBUTE_SLOT 11:4 + +#define NVA097_ADD_TO_PRIMITIVE_ID 0x1620 +#define NVA097_ADD_TO_PRIMITIVE_ID_V 31:0 + +#define NVA097_LOAD_PRIMITIVE_ID 0x1624 +#define NVA097_LOAD_PRIMITIVE_ID_V 31:0 + +#define NVA097_SET_SHADER_BASED_CULL 0x162c +#define NVA097_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE 1:1 +#define NVA097_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_FALSE 0x00000000 +#define NVA097_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_TRUE 0x00000001 +#define NVA097_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE 0:0 +#define NVA097_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_FALSE 0x00000000 +#define NVA097_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_CLASS_VERSION 0x1638 +#define NVA097_SET_CLASS_VERSION_CURRENT 15:0 +#define NVA097_SET_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVA097_SET_VAB_PAGE 0x163c +#define NVA097_SET_VAB_PAGE_READ_SELECT 0:0 +#define NVA097_SET_VAB_PAGE_READ_SELECT_PAGES_0_AND_1 0x00000000 +#define NVA097_SET_VAB_PAGE_READ_SELECT_PAGES_0_AND_2 0x00000001 + +#define NVA097_DRAW_INLINE_VERTEX 0x1640 +#define NVA097_DRAW_INLINE_VERTEX_V 31:0 + +#define NVA097_SET_DA_PRIMITIVE_RESTART 0x1644 +#define NVA097_SET_DA_PRIMITIVE_RESTART_ENABLE 0:0 +#define NVA097_SET_DA_PRIMITIVE_RESTART_ENABLE_FALSE 0x00000000 +#define NVA097_SET_DA_PRIMITIVE_RESTART_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_DA_PRIMITIVE_RESTART_INDEX 0x1648 +#define NVA097_SET_DA_PRIMITIVE_RESTART_INDEX_V 31:0 + +#define NVA097_SET_DA_OUTPUT 0x164c +#define NVA097_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START 12:12 +#define NVA097_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_FALSE 0x00000000 +#define NVA097_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_TRUE 0x00000001 + +#define NVA097_SET_ANTI_ALIASED_POINT 0x1658 +#define NVA097_SET_ANTI_ALIASED_POINT_ENABLE 0:0 +#define NVA097_SET_ANTI_ALIASED_POINT_ENABLE_FALSE 0x00000000 +#define NVA097_SET_ANTI_ALIASED_POINT_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_POINT_CENTER_MODE 0x165c +#define NVA097_SET_POINT_CENTER_MODE_V 31:0 +#define NVA097_SET_POINT_CENTER_MODE_V_OGL 0x00000000 +#define NVA097_SET_POINT_CENTER_MODE_V_D3D 0x00000001 + +#define NVA097_SET_LINE_SMOOTH_PARAMETERS 0x1668 +#define NVA097_SET_LINE_SMOOTH_PARAMETERS_FALLOFF 31:0 +#define NVA097_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_00 0x00000000 +#define NVA097_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_33 0x00000001 +#define NVA097_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_60 0x00000002 + +#define NVA097_SET_LINE_STIPPLE 0x166c +#define NVA097_SET_LINE_STIPPLE_ENABLE 0:0 +#define NVA097_SET_LINE_STIPPLE_ENABLE_FALSE 0x00000000 +#define NVA097_SET_LINE_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_LINE_SMOOTH_EDGE_TABLE(i) (0x1670+(i)*4) +#define NVA097_SET_LINE_SMOOTH_EDGE_TABLE_V0 7:0 +#define NVA097_SET_LINE_SMOOTH_EDGE_TABLE_V1 15:8 +#define NVA097_SET_LINE_SMOOTH_EDGE_TABLE_V2 23:16 +#define NVA097_SET_LINE_SMOOTH_EDGE_TABLE_V3 31:24 + +#define NVA097_SET_LINE_STIPPLE_PARAMETERS 0x1680 +#define NVA097_SET_LINE_STIPPLE_PARAMETERS_FACTOR 7:0 +#define NVA097_SET_LINE_STIPPLE_PARAMETERS_PATTERN 23:8 + +#define NVA097_SET_PROVOKING_VERTEX 0x1684 +#define NVA097_SET_PROVOKING_VERTEX_V 0:0 +#define NVA097_SET_PROVOKING_VERTEX_V_FIRST 0x00000000 +#define NVA097_SET_PROVOKING_VERTEX_V_LAST 0x00000001 + +#define NVA097_SET_TWO_SIDED_LIGHT 0x1688 +#define NVA097_SET_TWO_SIDED_LIGHT_ENABLE 0:0 +#define NVA097_SET_TWO_SIDED_LIGHT_ENABLE_FALSE 0x00000000 +#define NVA097_SET_TWO_SIDED_LIGHT_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_POLYGON_STIPPLE 0x168c +#define NVA097_SET_POLYGON_STIPPLE_ENABLE 0:0 +#define NVA097_SET_POLYGON_STIPPLE_ENABLE_FALSE 0x00000000 +#define NVA097_SET_POLYGON_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_SHADER_CONTROL 0x1690 +#define NVA097_SET_SHADER_CONTROL_DEFAULT_PARTIAL 0:0 +#define NVA097_SET_SHADER_CONTROL_DEFAULT_PARTIAL_ZERO 0x00000000 +#define NVA097_SET_SHADER_CONTROL_DEFAULT_PARTIAL_INFINITY 0x00000001 +#define NVA097_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR 1:1 +#define NVA097_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR_LEGACY 0x00000000 +#define NVA097_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR_FP64_COMPATIBLE 0x00000001 +#define NVA097_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR 2:2 +#define NVA097_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR_PASS_ZERO 0x00000000 +#define NVA097_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR_PASS_INDEFINITE 0x00000001 + +#define NVA097_LAUNCH_VERTEX 0x169c +#define NVA097_LAUNCH_VERTEX_V 0:0 + +#define NVA097_CHECK_CLASS_VERSION 0x16a0 +#define NVA097_CHECK_CLASS_VERSION_CURRENT 15:0 +#define NVA097_CHECK_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVA097_SET_SPH_VERSION 0x16a4 +#define NVA097_SET_SPH_VERSION_CURRENT 15:0 +#define NVA097_SET_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVA097_CHECK_SPH_VERSION 0x16a8 +#define NVA097_CHECK_SPH_VERSION_CURRENT 15:0 +#define NVA097_CHECK_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVA097_SET_ALPHA_TO_COVERAGE_OVERRIDE 0x16b4 +#define NVA097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE 0:0 +#define NVA097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NVA097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 +#define NVA097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT 1:1 +#define NVA097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_DISABLE 0x00000000 +#define NVA097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_ENABLE 0x00000001 + +#define NVA097_SET_POLYGON_STIPPLE_PATTERN(i) (0x1700+(i)*4) +#define NVA097_SET_POLYGON_STIPPLE_PATTERN_V 31:0 + +#define NVA097_SET_AAM_VERSION 0x1790 +#define NVA097_SET_AAM_VERSION_CURRENT 15:0 +#define NVA097_SET_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVA097_CHECK_AAM_VERSION 0x1794 +#define NVA097_CHECK_AAM_VERSION_CURRENT 15:0 +#define NVA097_CHECK_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVA097_SET_ZT_LAYER 0x179c +#define NVA097_SET_ZT_LAYER_OFFSET 15:0 + +#define NVA097_SET_VAB_MEMORY_AREA_A 0x17bc +#define NVA097_SET_VAB_MEMORY_AREA_A_OFFSET_UPPER 7:0 + +#define NVA097_SET_VAB_MEMORY_AREA_B 0x17c0 +#define NVA097_SET_VAB_MEMORY_AREA_B_OFFSET_LOWER 31:0 + +#define NVA097_SET_VAB_MEMORY_AREA_C 0x17c4 +#define NVA097_SET_VAB_MEMORY_AREA_C_SIZE 1:0 +#define NVA097_SET_VAB_MEMORY_AREA_C_SIZE_BYTES_64K 0x00000001 +#define NVA097_SET_VAB_MEMORY_AREA_C_SIZE_BYTES_128K 0x00000002 +#define NVA097_SET_VAB_MEMORY_AREA_C_SIZE_BYTES_256K 0x00000003 + +#define NVA097_SET_INDEX_BUFFER_A 0x17c8 +#define NVA097_SET_INDEX_BUFFER_A_ADDRESS_UPPER 7:0 + +#define NVA097_SET_INDEX_BUFFER_B 0x17cc +#define NVA097_SET_INDEX_BUFFER_B_ADDRESS_LOWER 31:0 + +#define NVA097_SET_INDEX_BUFFER_C 0x17d0 +#define NVA097_SET_INDEX_BUFFER_C_LIMIT_ADDRESS_UPPER 7:0 + +#define NVA097_SET_INDEX_BUFFER_D 0x17d4 +#define NVA097_SET_INDEX_BUFFER_D_LIMIT_ADDRESS_LOWER 31:0 + +#define NVA097_SET_INDEX_BUFFER_E 0x17d8 +#define NVA097_SET_INDEX_BUFFER_E_INDEX_SIZE 1:0 +#define NVA097_SET_INDEX_BUFFER_E_INDEX_SIZE_ONE_BYTE 0x00000000 +#define NVA097_SET_INDEX_BUFFER_E_INDEX_SIZE_TWO_BYTES 0x00000001 +#define NVA097_SET_INDEX_BUFFER_E_INDEX_SIZE_FOUR_BYTES 0x00000002 + +#define NVA097_SET_INDEX_BUFFER_F 0x17dc +#define NVA097_SET_INDEX_BUFFER_F_FIRST 31:0 + +#define NVA097_DRAW_INDEX_BUFFER 0x17e0 +#define NVA097_DRAW_INDEX_BUFFER_COUNT 31:0 + +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST 0x17e4 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST 0x17e8 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST 0x17ec +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f0 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVA097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f4 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVA097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f8 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVA097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVA097_SET_DEPTH_BIAS_CLAMP 0x187c +#define NVA097_SET_DEPTH_BIAS_CLAMP_V 31:0 + +#define NVA097_SET_VERTEX_STREAM_INSTANCE_A(i) (0x1880+(i)*4) +#define NVA097_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED 0:0 +#define NVA097_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_FALSE 0x00000000 +#define NVA097_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_TRUE 0x00000001 + +#define NVA097_SET_VERTEX_STREAM_INSTANCE_B(i) (0x18c0+(i)*4) +#define NVA097_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED 0:0 +#define NVA097_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_FALSE 0x00000000 +#define NVA097_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_TRUE 0x00000001 + +#define NVA097_SET_ATTRIBUTE_POINT_SIZE 0x1910 +#define NVA097_SET_ATTRIBUTE_POINT_SIZE_ENABLE 0:0 +#define NVA097_SET_ATTRIBUTE_POINT_SIZE_ENABLE_FALSE 0x00000000 +#define NVA097_SET_ATTRIBUTE_POINT_SIZE_ENABLE_TRUE 0x00000001 +#define NVA097_SET_ATTRIBUTE_POINT_SIZE_SLOT 11:4 + +#define NVA097_OGL_SET_CULL 0x1918 +#define NVA097_OGL_SET_CULL_ENABLE 0:0 +#define NVA097_OGL_SET_CULL_ENABLE_FALSE 0x00000000 +#define NVA097_OGL_SET_CULL_ENABLE_TRUE 0x00000001 + +#define NVA097_OGL_SET_FRONT_FACE 0x191c +#define NVA097_OGL_SET_FRONT_FACE_V 31:0 +#define NVA097_OGL_SET_FRONT_FACE_V_CW 0x00000900 +#define NVA097_OGL_SET_FRONT_FACE_V_CCW 0x00000901 + +#define NVA097_OGL_SET_CULL_FACE 0x1920 +#define NVA097_OGL_SET_CULL_FACE_V 31:0 +#define NVA097_OGL_SET_CULL_FACE_V_FRONT 0x00000404 +#define NVA097_OGL_SET_CULL_FACE_V_BACK 0x00000405 +#define NVA097_OGL_SET_CULL_FACE_V_FRONT_AND_BACK 0x00000408 + +#define NVA097_SET_VIEWPORT_PIXEL 0x1924 +#define NVA097_SET_VIEWPORT_PIXEL_CENTER 0:0 +#define NVA097_SET_VIEWPORT_PIXEL_CENTER_AT_HALF_INTEGERS 0x00000000 +#define NVA097_SET_VIEWPORT_PIXEL_CENTER_AT_INTEGERS 0x00000001 + +#define NVA097_SET_VIEWPORT_SCALE_OFFSET 0x192c +#define NVA097_SET_VIEWPORT_SCALE_OFFSET_ENABLE 0:0 +#define NVA097_SET_VIEWPORT_SCALE_OFFSET_ENABLE_FALSE 0x00000000 +#define NVA097_SET_VIEWPORT_SCALE_OFFSET_ENABLE_TRUE 0x00000001 + +#define NVA097_INVALIDATE_CONSTANT_BUFFER_CACHE 0x1930 +#define NVA097_INVALIDATE_CONSTANT_BUFFER_CACHE_THRU_L2 0:0 +#define NVA097_INVALIDATE_CONSTANT_BUFFER_CACHE_THRU_L2_FALSE 0x00000000 +#define NVA097_INVALIDATE_CONSTANT_BUFFER_CACHE_THRU_L2_TRUE 0x00000001 + +#define NVA097_SET_VIEWPORT_CLIP_CONTROL 0x193c +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE 0:0 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_FALSE 0x00000000 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_TRUE 0x00000001 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z 3:3 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLIP 0x00000000 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLAMP 0x00000001 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z 4:4 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLIP 0x00000000 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLAMP 0x00000001 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND 7:7 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_256 0x00000000 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_1 0x00000001 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND 10:10 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_256 0x00000000 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_1 0x00000001 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP 13:11 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP 0x00000000 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_PASSTHRU 0x00000001 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XY_CLIP 0x00000002 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XYZ_CLIP 0x00000003 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP_NO_Z_CULL 0x00000004 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_Z_CLIP 0x00000005 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z 2:1 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SAME_AS_XY_GUARDBAND 0x00000000 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_256 0x00000001 +#define NVA097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_1 0x00000002 + +#define NVA097_SET_USER_CLIP_OP 0x1940 +#define NVA097_SET_USER_CLIP_OP_PLANE0 0:0 +#define NVA097_SET_USER_CLIP_OP_PLANE0_CLIP 0x00000000 +#define NVA097_SET_USER_CLIP_OP_PLANE0_CULL 0x00000001 +#define NVA097_SET_USER_CLIP_OP_PLANE1 4:4 +#define NVA097_SET_USER_CLIP_OP_PLANE1_CLIP 0x00000000 +#define NVA097_SET_USER_CLIP_OP_PLANE1_CULL 0x00000001 +#define NVA097_SET_USER_CLIP_OP_PLANE2 8:8 +#define NVA097_SET_USER_CLIP_OP_PLANE2_CLIP 0x00000000 +#define NVA097_SET_USER_CLIP_OP_PLANE2_CULL 0x00000001 +#define NVA097_SET_USER_CLIP_OP_PLANE3 12:12 +#define NVA097_SET_USER_CLIP_OP_PLANE3_CLIP 0x00000000 +#define NVA097_SET_USER_CLIP_OP_PLANE3_CULL 0x00000001 +#define NVA097_SET_USER_CLIP_OP_PLANE4 16:16 +#define NVA097_SET_USER_CLIP_OP_PLANE4_CLIP 0x00000000 +#define NVA097_SET_USER_CLIP_OP_PLANE4_CULL 0x00000001 +#define NVA097_SET_USER_CLIP_OP_PLANE5 20:20 +#define NVA097_SET_USER_CLIP_OP_PLANE5_CLIP 0x00000000 +#define NVA097_SET_USER_CLIP_OP_PLANE5_CULL 0x00000001 +#define NVA097_SET_USER_CLIP_OP_PLANE6 24:24 +#define NVA097_SET_USER_CLIP_OP_PLANE6_CLIP 0x00000000 +#define NVA097_SET_USER_CLIP_OP_PLANE6_CULL 0x00000001 +#define NVA097_SET_USER_CLIP_OP_PLANE7 28:28 +#define NVA097_SET_USER_CLIP_OP_PLANE7_CLIP 0x00000000 +#define NVA097_SET_USER_CLIP_OP_PLANE7_CULL 0x00000001 + +#define NVA097_SET_RENDER_ENABLE_OVERRIDE 0x1944 +#define NVA097_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NVA097_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NVA097_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NVA097_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NVA097_SET_PRIMITIVE_TOPOLOGY_CONTROL 0x1948 +#define NVA097_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE 0:0 +#define NVA097_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_TOPOLOGY_IN_BEGIN_METHODS 0x00000000 +#define NVA097_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_SEPARATE_TOPOLOGY_STATE 0x00000001 + +#define NVA097_SET_WINDOW_CLIP_ENABLE 0x194c +#define NVA097_SET_WINDOW_CLIP_ENABLE_V 0:0 +#define NVA097_SET_WINDOW_CLIP_ENABLE_V_FALSE 0x00000000 +#define NVA097_SET_WINDOW_CLIP_ENABLE_V_TRUE 0x00000001 + +#define NVA097_SET_WINDOW_CLIP_TYPE 0x1950 +#define NVA097_SET_WINDOW_CLIP_TYPE_V 1:0 +#define NVA097_SET_WINDOW_CLIP_TYPE_V_INCLUSIVE 0x00000000 +#define NVA097_SET_WINDOW_CLIP_TYPE_V_EXCLUSIVE 0x00000001 +#define NVA097_SET_WINDOW_CLIP_TYPE_V_CLIPALL 0x00000002 + +#define NVA097_INVALIDATE_ZCULL 0x1958 +#define NVA097_INVALIDATE_ZCULL_V 31:0 +#define NVA097_INVALIDATE_ZCULL_V_INVALIDATE 0x00000000 + +#define NVA097_SET_ZCULL 0x1968 +#define NVA097_SET_ZCULL_Z_ENABLE 0:0 +#define NVA097_SET_ZCULL_Z_ENABLE_FALSE 0x00000000 +#define NVA097_SET_ZCULL_Z_ENABLE_TRUE 0x00000001 +#define NVA097_SET_ZCULL_STENCIL_ENABLE 4:4 +#define NVA097_SET_ZCULL_STENCIL_ENABLE_FALSE 0x00000000 +#define NVA097_SET_ZCULL_STENCIL_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_ZCULL_BOUNDS 0x196c +#define NVA097_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE 0:0 +#define NVA097_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NVA097_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_TRUE 0x00000001 +#define NVA097_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE 4:4 +#define NVA097_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NVA097_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_PRIMITIVE_TOPOLOGY 0x1970 +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V 15:0 +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_POINTLIST 0x00000001 +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_LINELIST 0x00000002 +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP 0x00000003 +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST 0x00000004 +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP 0x00000005 +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_LINELIST_ADJCY 0x0000000A +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP_ADJCY 0x0000000B +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST_ADJCY 0x0000000C +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_PATCHLIST 0x0000000E +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_POINTS 0x00001001 +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST 0x00001002 +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST 0x00001003 +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST 0x0000100F +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINESTRIP 0x00001010 +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINESTRIP 0x00001011 +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLELIST 0x00001012 +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLESTRIP 0x00001013 +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLESTRIP 0x00001014 +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN 0x00001015 +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLEFAN 0x00001016 +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN_IMM 0x00001017 +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST_IMM 0x00001018 +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST2 0x0000101A +#define NVA097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST2 0x0000101B + +#define NVA097_ZCULL_SYNC 0x1978 +#define NVA097_ZCULL_SYNC_V 31:0 + +#define NVA097_SET_CLIP_ID_TEST 0x197c +#define NVA097_SET_CLIP_ID_TEST_ENABLE 0:0 +#define NVA097_SET_CLIP_ID_TEST_ENABLE_FALSE 0x00000000 +#define NVA097_SET_CLIP_ID_TEST_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_SURFACE_CLIP_ID_WIDTH 0x1980 +#define NVA097_SET_SURFACE_CLIP_ID_WIDTH_V 31:0 + +#define NVA097_SET_CLIP_ID 0x1984 +#define NVA097_SET_CLIP_ID_V 31:0 + +#define NVA097_SET_DEPTH_BOUNDS_TEST 0x19bc +#define NVA097_SET_DEPTH_BOUNDS_TEST_ENABLE 0:0 +#define NVA097_SET_DEPTH_BOUNDS_TEST_ENABLE_FALSE 0x00000000 +#define NVA097_SET_DEPTH_BOUNDS_TEST_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_BLEND_FLOAT_OPTION 0x19c0 +#define NVA097_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO 0:0 +#define NVA097_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_FALSE 0x00000000 +#define NVA097_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_TRUE 0x00000001 + +#define NVA097_SET_LOGIC_OP 0x19c4 +#define NVA097_SET_LOGIC_OP_ENABLE 0:0 +#define NVA097_SET_LOGIC_OP_ENABLE_FALSE 0x00000000 +#define NVA097_SET_LOGIC_OP_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_LOGIC_OP_FUNC 0x19c8 +#define NVA097_SET_LOGIC_OP_FUNC_V 31:0 +#define NVA097_SET_LOGIC_OP_FUNC_V_CLEAR 0x00001500 +#define NVA097_SET_LOGIC_OP_FUNC_V_AND 0x00001501 +#define NVA097_SET_LOGIC_OP_FUNC_V_AND_REVERSE 0x00001502 +#define NVA097_SET_LOGIC_OP_FUNC_V_COPY 0x00001503 +#define NVA097_SET_LOGIC_OP_FUNC_V_AND_INVERTED 0x00001504 +#define NVA097_SET_LOGIC_OP_FUNC_V_NOOP 0x00001505 +#define NVA097_SET_LOGIC_OP_FUNC_V_XOR 0x00001506 +#define NVA097_SET_LOGIC_OP_FUNC_V_OR 0x00001507 +#define NVA097_SET_LOGIC_OP_FUNC_V_NOR 0x00001508 +#define NVA097_SET_LOGIC_OP_FUNC_V_EQUIV 0x00001509 +#define NVA097_SET_LOGIC_OP_FUNC_V_INVERT 0x0000150A +#define NVA097_SET_LOGIC_OP_FUNC_V_OR_REVERSE 0x0000150B +#define NVA097_SET_LOGIC_OP_FUNC_V_COPY_INVERTED 0x0000150C +#define NVA097_SET_LOGIC_OP_FUNC_V_OR_INVERTED 0x0000150D +#define NVA097_SET_LOGIC_OP_FUNC_V_NAND 0x0000150E +#define NVA097_SET_LOGIC_OP_FUNC_V_SET 0x0000150F + +#define NVA097_SET_Z_COMPRESSION 0x19cc +#define NVA097_SET_Z_COMPRESSION_ENABLE 0:0 +#define NVA097_SET_Z_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVA097_SET_Z_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVA097_CLEAR_SURFACE 0x19d0 +#define NVA097_CLEAR_SURFACE_Z_ENABLE 0:0 +#define NVA097_CLEAR_SURFACE_Z_ENABLE_FALSE 0x00000000 +#define NVA097_CLEAR_SURFACE_Z_ENABLE_TRUE 0x00000001 +#define NVA097_CLEAR_SURFACE_STENCIL_ENABLE 1:1 +#define NVA097_CLEAR_SURFACE_STENCIL_ENABLE_FALSE 0x00000000 +#define NVA097_CLEAR_SURFACE_STENCIL_ENABLE_TRUE 0x00000001 +#define NVA097_CLEAR_SURFACE_R_ENABLE 2:2 +#define NVA097_CLEAR_SURFACE_R_ENABLE_FALSE 0x00000000 +#define NVA097_CLEAR_SURFACE_R_ENABLE_TRUE 0x00000001 +#define NVA097_CLEAR_SURFACE_G_ENABLE 3:3 +#define NVA097_CLEAR_SURFACE_G_ENABLE_FALSE 0x00000000 +#define NVA097_CLEAR_SURFACE_G_ENABLE_TRUE 0x00000001 +#define NVA097_CLEAR_SURFACE_B_ENABLE 4:4 +#define NVA097_CLEAR_SURFACE_B_ENABLE_FALSE 0x00000000 +#define NVA097_CLEAR_SURFACE_B_ENABLE_TRUE 0x00000001 +#define NVA097_CLEAR_SURFACE_A_ENABLE 5:5 +#define NVA097_CLEAR_SURFACE_A_ENABLE_FALSE 0x00000000 +#define NVA097_CLEAR_SURFACE_A_ENABLE_TRUE 0x00000001 +#define NVA097_CLEAR_SURFACE_MRT_SELECT 9:6 +#define NVA097_CLEAR_SURFACE_RT_ARRAY_INDEX 25:10 + +#define NVA097_CLEAR_CLIP_ID_SURFACE 0x19d4 +#define NVA097_CLEAR_CLIP_ID_SURFACE_V 31:0 + +#define NVA097_SET_COLOR_COMPRESSION(i) (0x19e0+(i)*4) +#define NVA097_SET_COLOR_COMPRESSION_ENABLE 0:0 +#define NVA097_SET_COLOR_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVA097_SET_COLOR_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_CT_WRITE(i) (0x1a00+(i)*4) +#define NVA097_SET_CT_WRITE_R_ENABLE 0:0 +#define NVA097_SET_CT_WRITE_R_ENABLE_FALSE 0x00000000 +#define NVA097_SET_CT_WRITE_R_ENABLE_TRUE 0x00000001 +#define NVA097_SET_CT_WRITE_G_ENABLE 4:4 +#define NVA097_SET_CT_WRITE_G_ENABLE_FALSE 0x00000000 +#define NVA097_SET_CT_WRITE_G_ENABLE_TRUE 0x00000001 +#define NVA097_SET_CT_WRITE_B_ENABLE 8:8 +#define NVA097_SET_CT_WRITE_B_ENABLE_FALSE 0x00000000 +#define NVA097_SET_CT_WRITE_B_ENABLE_TRUE 0x00000001 +#define NVA097_SET_CT_WRITE_A_ENABLE 12:12 +#define NVA097_SET_CT_WRITE_A_ENABLE_FALSE 0x00000000 +#define NVA097_SET_CT_WRITE_A_ENABLE_TRUE 0x00000001 + +#define NVA097_PIPE_NOP 0x1a2c +#define NVA097_PIPE_NOP_V 31:0 + +#define NVA097_SET_SPARE00 0x1a30 +#define NVA097_SET_SPARE00_V 31:0 + +#define NVA097_SET_SPARE01 0x1a34 +#define NVA097_SET_SPARE01_V 31:0 + +#define NVA097_SET_SPARE02 0x1a38 +#define NVA097_SET_SPARE02_V 31:0 + +#define NVA097_SET_SPARE03 0x1a3c +#define NVA097_SET_SPARE03_V 31:0 + +#define NVA097_SET_REPORT_SEMAPHORE_A 0x1b00 +#define NVA097_SET_REPORT_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVA097_SET_REPORT_SEMAPHORE_B 0x1b04 +#define NVA097_SET_REPORT_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVA097_SET_REPORT_SEMAPHORE_C 0x1b08 +#define NVA097_SET_REPORT_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVA097_SET_REPORT_SEMAPHORE_D 0x1b0c +#define NVA097_SET_REPORT_SEMAPHORE_D_OPERATION 1:0 +#define NVA097_SET_REPORT_SEMAPHORE_D_OPERATION_RELEASE 0x00000000 +#define NVA097_SET_REPORT_SEMAPHORE_D_OPERATION_ACQUIRE 0x00000001 +#define NVA097_SET_REPORT_SEMAPHORE_D_OPERATION_REPORT_ONLY 0x00000002 +#define NVA097_SET_REPORT_SEMAPHORE_D_OPERATION_TRAP 0x00000003 +#define NVA097_SET_REPORT_SEMAPHORE_D_RELEASE 4:4 +#define NVA097_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_READS_COMPLETE 0x00000000 +#define NVA097_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_WRITES_COMPLETE 0x00000001 +#define NVA097_SET_REPORT_SEMAPHORE_D_ACQUIRE 8:8 +#define NVA097_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_WRITES_START 0x00000000 +#define NVA097_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_READS_START 0x00000001 +#define NVA097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION 15:12 +#define NVA097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_NONE 0x00000000 +#define NVA097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DATA_ASSEMBLER 0x00000001 +#define NVA097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VERTEX_SHADER 0x00000002 +#define NVA097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_INIT_SHADER 0x00000008 +#define NVA097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_SHADER 0x00000009 +#define NVA097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_GEOMETRY_SHADER 0x00000006 +#define NVA097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_STREAMING_OUTPUT 0x00000005 +#define NVA097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VPC 0x00000004 +#define NVA097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ZCULL 0x00000007 +#define NVA097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_PIXEL_SHADER 0x0000000A +#define NVA097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DEPTH_TEST 0x0000000C +#define NVA097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ALL 0x0000000F +#define NVA097_SET_REPORT_SEMAPHORE_D_COMPARISON 16:16 +#define NVA097_SET_REPORT_SEMAPHORE_D_COMPARISON_EQ 0x00000000 +#define NVA097_SET_REPORT_SEMAPHORE_D_COMPARISON_GE 0x00000001 +#define NVA097_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE 20:20 +#define NVA097_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVA097_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT 27:23 +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_NONE 0x00000000 +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_DA_VERTICES_GENERATED 0x00000001 +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_DA_PRIMITIVES_GENERATED 0x00000003 +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_VS_INVOCATIONS 0x00000005 +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_TI_INVOCATIONS 0x0000001B +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_TS_INVOCATIONS 0x0000001D +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_TS_PRIMITIVES_GENERATED 0x0000001F +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_GS_INVOCATIONS 0x00000007 +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_GS_PRIMITIVES_GENERATED 0x00000009 +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_ALPHA_BETA_CLOCKS 0x00000004 +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_VTG_PRIMITIVES_OUT 0x00000012 +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x0000001E +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_SUCCEEDED 0x0000000B +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED 0x0000000D +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000006 +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_BYTE_COUNT 0x0000001A +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_INVOCATIONS 0x0000000F +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_PRIMITIVES_GENERATED 0x00000011 +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS0 0x0000000A +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS1 0x0000000C +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS2 0x0000000E +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS3 0x00000010 +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_PS_INVOCATIONS 0x00000013 +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT 0x00000002 +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT64 0x00000015 +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_COLOR_TARGET 0x00000018 +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_ZETA_TARGET 0x00000019 +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_BOUNDING_RECTANGLE 0x0000001C +#define NVA097_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE 28:28 +#define NVA097_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVA097_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVA097_SET_REPORT_SEMAPHORE_D_SUB_REPORT 7:5 +#define NVA097_SET_REPORT_SEMAPHORE_D_REPORT_DWORD_NUMBER 21:21 +#define NVA097_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE 2:2 +#define NVA097_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_FALSE 0x00000000 +#define NVA097_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_TRUE 0x00000001 +#define NVA097_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE 3:3 +#define NVA097_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVA097_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVA097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP 11:9 +#define NVA097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_ADD 0x00000000 +#define NVA097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MIN 0x00000001 +#define NVA097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MAX 0x00000002 +#define NVA097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_INC 0x00000003 +#define NVA097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_DEC 0x00000004 +#define NVA097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_AND 0x00000005 +#define NVA097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_OR 0x00000006 +#define NVA097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_XOR 0x00000007 +#define NVA097_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT 18:17 +#define NVA097_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVA097_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_SIGNED_32 0x00000001 + +#define NVA097_SET_VERTEX_STREAM_A_FORMAT(j) (0x1c00+(j)*16) +#define NVA097_SET_VERTEX_STREAM_A_FORMAT_STRIDE 11:0 +#define NVA097_SET_VERTEX_STREAM_A_FORMAT_ENABLE 12:12 +#define NVA097_SET_VERTEX_STREAM_A_FORMAT_ENABLE_FALSE 0x00000000 +#define NVA097_SET_VERTEX_STREAM_A_FORMAT_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_VERTEX_STREAM_A_LOCATION_A(j) (0x1c04+(j)*16) +#define NVA097_SET_VERTEX_STREAM_A_LOCATION_A_OFFSET_UPPER 7:0 + +#define NVA097_SET_VERTEX_STREAM_A_LOCATION_B(j) (0x1c08+(j)*16) +#define NVA097_SET_VERTEX_STREAM_A_LOCATION_B_OFFSET_LOWER 31:0 + +#define NVA097_SET_VERTEX_STREAM_A_FREQUENCY(j) (0x1c0c+(j)*16) +#define NVA097_SET_VERTEX_STREAM_A_FREQUENCY_V 31:0 + +#define NVA097_SET_VERTEX_STREAM_B_FORMAT(j) (0x1d00+(j)*16) +#define NVA097_SET_VERTEX_STREAM_B_FORMAT_STRIDE 11:0 +#define NVA097_SET_VERTEX_STREAM_B_FORMAT_ENABLE 12:12 +#define NVA097_SET_VERTEX_STREAM_B_FORMAT_ENABLE_FALSE 0x00000000 +#define NVA097_SET_VERTEX_STREAM_B_FORMAT_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_VERTEX_STREAM_B_LOCATION_A(j) (0x1d04+(j)*16) +#define NVA097_SET_VERTEX_STREAM_B_LOCATION_A_OFFSET_UPPER 7:0 + +#define NVA097_SET_VERTEX_STREAM_B_LOCATION_B(j) (0x1d08+(j)*16) +#define NVA097_SET_VERTEX_STREAM_B_LOCATION_B_OFFSET_LOWER 31:0 + +#define NVA097_SET_VERTEX_STREAM_B_FREQUENCY(j) (0x1d0c+(j)*16) +#define NVA097_SET_VERTEX_STREAM_B_FREQUENCY_V 31:0 + +#define NVA097_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA(j) (0x1e00+(j)*32) +#define NVA097_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NVA097_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NVA097_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVA097_SET_BLEND_PER_TARGET_COLOR_OP(j) (0x1e04+(j)*32) +#define NVA097_SET_BLEND_PER_TARGET_COLOR_OP_V 31:0 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVA097_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVA097_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MIN 0x00008007 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MAX 0x00008008 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_ADD 0x00000001 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MIN 0x00000004 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF(j) (0x1e08+(j)*32) +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V 31:0 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF(j) (0x1e0c+(j)*32) +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V 31:0 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVA097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_OP(j) (0x1e10+(j)*32) +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_OP_V 31:0 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF(j) (0x1e14+(j)*32) +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V 31:0 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF(j) (0x1e18+(j)*32) +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V 31:0 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVA097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVA097_SET_VERTEX_STREAM_LIMIT_A_A(j) (0x1f00+(j)*8) +#define NVA097_SET_VERTEX_STREAM_LIMIT_A_A_UPPER 7:0 + +#define NVA097_SET_VERTEX_STREAM_LIMIT_A_B(j) (0x1f04+(j)*8) +#define NVA097_SET_VERTEX_STREAM_LIMIT_A_B_LOWER 31:0 + +#define NVA097_SET_VERTEX_STREAM_LIMIT_B_A(j) (0x1f80+(j)*8) +#define NVA097_SET_VERTEX_STREAM_LIMIT_B_A_UPPER 7:0 + +#define NVA097_SET_VERTEX_STREAM_LIMIT_B_B(j) (0x1f84+(j)*8) +#define NVA097_SET_VERTEX_STREAM_LIMIT_B_B_LOWER 31:0 + +#define NVA097_SET_PIPELINE_SHADER(j) (0x2000+(j)*64) +#define NVA097_SET_PIPELINE_SHADER_ENABLE 0:0 +#define NVA097_SET_PIPELINE_SHADER_ENABLE_FALSE 0x00000000 +#define NVA097_SET_PIPELINE_SHADER_ENABLE_TRUE 0x00000001 +#define NVA097_SET_PIPELINE_SHADER_TYPE 7:4 +#define NVA097_SET_PIPELINE_SHADER_TYPE_VERTEX_CULL_BEFORE_FETCH 0x00000000 +#define NVA097_SET_PIPELINE_SHADER_TYPE_VERTEX 0x00000001 +#define NVA097_SET_PIPELINE_SHADER_TYPE_TESSELLATION_INIT 0x00000002 +#define NVA097_SET_PIPELINE_SHADER_TYPE_TESSELLATION 0x00000003 +#define NVA097_SET_PIPELINE_SHADER_TYPE_GEOMETRY 0x00000004 +#define NVA097_SET_PIPELINE_SHADER_TYPE_PIXEL 0x00000005 + +#define NVA097_SET_PIPELINE_PROGRAM(j) (0x2004+(j)*64) +#define NVA097_SET_PIPELINE_PROGRAM_OFFSET 31:0 + +#define NVA097_SET_PIPELINE_RESERVED_A(j) (0x2008+(j)*64) +#define NVA097_SET_PIPELINE_RESERVED_A_V 0:0 + +#define NVA097_SET_PIPELINE_REGISTER_COUNT(j) (0x200c+(j)*64) +#define NVA097_SET_PIPELINE_REGISTER_COUNT_V 7:0 + +#define NVA097_SET_PIPELINE_BINDING(j) (0x2010+(j)*64) +#define NVA097_SET_PIPELINE_BINDING_GROUP 2:0 + +#define NVA097_SET_PIPELINE_RESERVED_B(j) (0x2014+(j)*64) +#define NVA097_SET_PIPELINE_RESERVED_B_V 0:0 + +#define NVA097_SET_PIPELINE_RESERVED_C(j) (0x2018+(j)*64) +#define NVA097_SET_PIPELINE_RESERVED_C_V 0:0 + +#define NVA097_SET_PIPELINE_RESERVED_D(j) (0x201c+(j)*64) +#define NVA097_SET_PIPELINE_RESERVED_D_V 0:0 + +#define NVA097_SET_PIPELINE_RESERVED_E(j) (0x2020+(j)*64) +#define NVA097_SET_PIPELINE_RESERVED_E_V 0:0 + +#define NVA097_SET_FALCON00 0x2300 +#define NVA097_SET_FALCON00_V 31:0 + +#define NVA097_SET_FALCON01 0x2304 +#define NVA097_SET_FALCON01_V 31:0 + +#define NVA097_SET_FALCON02 0x2308 +#define NVA097_SET_FALCON02_V 31:0 + +#define NVA097_SET_FALCON03 0x230c +#define NVA097_SET_FALCON03_V 31:0 + +#define NVA097_SET_FALCON04 0x2310 +#define NVA097_SET_FALCON04_V 31:0 + +#define NVA097_SET_FALCON05 0x2314 +#define NVA097_SET_FALCON05_V 31:0 + +#define NVA097_SET_FALCON06 0x2318 +#define NVA097_SET_FALCON06_V 31:0 + +#define NVA097_SET_FALCON07 0x231c +#define NVA097_SET_FALCON07_V 31:0 + +#define NVA097_SET_FALCON08 0x2320 +#define NVA097_SET_FALCON08_V 31:0 + +#define NVA097_SET_FALCON09 0x2324 +#define NVA097_SET_FALCON09_V 31:0 + +#define NVA097_SET_FALCON10 0x2328 +#define NVA097_SET_FALCON10_V 31:0 + +#define NVA097_SET_FALCON11 0x232c +#define NVA097_SET_FALCON11_V 31:0 + +#define NVA097_SET_FALCON12 0x2330 +#define NVA097_SET_FALCON12_V 31:0 + +#define NVA097_SET_FALCON13 0x2334 +#define NVA097_SET_FALCON13_V 31:0 + +#define NVA097_SET_FALCON14 0x2338 +#define NVA097_SET_FALCON14_V 31:0 + +#define NVA097_SET_FALCON15 0x233c +#define NVA097_SET_FALCON15_V 31:0 + +#define NVA097_SET_FALCON16 0x2340 +#define NVA097_SET_FALCON16_V 31:0 + +#define NVA097_SET_FALCON17 0x2344 +#define NVA097_SET_FALCON17_V 31:0 + +#define NVA097_SET_FALCON18 0x2348 +#define NVA097_SET_FALCON18_V 31:0 + +#define NVA097_SET_FALCON19 0x234c +#define NVA097_SET_FALCON19_V 31:0 + +#define NVA097_SET_FALCON20 0x2350 +#define NVA097_SET_FALCON20_V 31:0 + +#define NVA097_SET_FALCON21 0x2354 +#define NVA097_SET_FALCON21_V 31:0 + +#define NVA097_SET_FALCON22 0x2358 +#define NVA097_SET_FALCON22_V 31:0 + +#define NVA097_SET_FALCON23 0x235c +#define NVA097_SET_FALCON23_V 31:0 + +#define NVA097_SET_FALCON24 0x2360 +#define NVA097_SET_FALCON24_V 31:0 + +#define NVA097_SET_FALCON25 0x2364 +#define NVA097_SET_FALCON25_V 31:0 + +#define NVA097_SET_FALCON26 0x2368 +#define NVA097_SET_FALCON26_V 31:0 + +#define NVA097_SET_FALCON27 0x236c +#define NVA097_SET_FALCON27_V 31:0 + +#define NVA097_SET_FALCON28 0x2370 +#define NVA097_SET_FALCON28_V 31:0 + +#define NVA097_SET_FALCON29 0x2374 +#define NVA097_SET_FALCON29_V 31:0 + +#define NVA097_SET_FALCON30 0x2378 +#define NVA097_SET_FALCON30_V 31:0 + +#define NVA097_SET_FALCON31 0x237c +#define NVA097_SET_FALCON31_V 31:0 + +#define NVA097_SET_CONSTANT_BUFFER_SELECTOR_A 0x2380 +#define NVA097_SET_CONSTANT_BUFFER_SELECTOR_A_SIZE 16:0 + +#define NVA097_SET_CONSTANT_BUFFER_SELECTOR_B 0x2384 +#define NVA097_SET_CONSTANT_BUFFER_SELECTOR_B_ADDRESS_UPPER 7:0 + +#define NVA097_SET_CONSTANT_BUFFER_SELECTOR_C 0x2388 +#define NVA097_SET_CONSTANT_BUFFER_SELECTOR_C_ADDRESS_LOWER 31:0 + +#define NVA097_LOAD_CONSTANT_BUFFER_OFFSET 0x238c +#define NVA097_LOAD_CONSTANT_BUFFER_OFFSET_V 15:0 + +#define NVA097_LOAD_CONSTANT_BUFFER(i) (0x2390+(i)*4) +#define NVA097_LOAD_CONSTANT_BUFFER_V 31:0 + +#define NVA097_BIND_GROUP_RESERVED_A(j) (0x2400+(j)*32) +#define NVA097_BIND_GROUP_RESERVED_A_V 0:0 + +#define NVA097_BIND_GROUP_RESERVED_B(j) (0x2404+(j)*32) +#define NVA097_BIND_GROUP_RESERVED_B_V 0:0 + +#define NVA097_BIND_GROUP_RESERVED_C(j) (0x2408+(j)*32) +#define NVA097_BIND_GROUP_RESERVED_C_V 0:0 + +#define NVA097_BIND_GROUP_RESERVED_D(j) (0x240c+(j)*32) +#define NVA097_BIND_GROUP_RESERVED_D_V 0:0 + +#define NVA097_BIND_GROUP_CONSTANT_BUFFER(j) (0x2410+(j)*32) +#define NVA097_BIND_GROUP_CONSTANT_BUFFER_VALID 0:0 +#define NVA097_BIND_GROUP_CONSTANT_BUFFER_VALID_FALSE 0x00000000 +#define NVA097_BIND_GROUP_CONSTANT_BUFFER_VALID_TRUE 0x00000001 +#define NVA097_BIND_GROUP_CONSTANT_BUFFER_SHADER_SLOT 8:4 + +#define NVA097_SET_COLOR_CLAMP 0x2600 +#define NVA097_SET_COLOR_CLAMP_ENABLE 0:0 +#define NVA097_SET_COLOR_CLAMP_ENABLE_FALSE 0x00000000 +#define NVA097_SET_COLOR_CLAMP_ENABLE_TRUE 0x00000001 + +#define NVA097_NOOP_X_X_X_SET_VALVE 0x2604 +#define NVA097_NOOP_X_X_X_SET_VALVE_HIGHER_PRIORITY 0:0 +#define NVA097_NOOP_X_X_X_SET_VALVE_HIGHER_PRIORITY_COMPUTE 0x00000000 +#define NVA097_NOOP_X_X_X_SET_VALVE_HIGHER_PRIORITY_GRAPHICS 0x00000001 + +#define NVA097_SET_BINDLESS_TEXTURE 0x2608 +#define NVA097_SET_BINDLESS_TEXTURE_CONSTANT_BUFFER_SLOT_SELECT 4:0 + +#define NVA097_SET_TRAP_HANDLER 0x260c +#define NVA097_SET_TRAP_HANDLER_OFFSET 31:0 + +#define NVA097_SET_STREAM_OUT_LAYOUT_SELECT(i,j) (0x2800+(i)*128+(j)*4) +#define NVA097_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER00 7:0 +#define NVA097_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER01 15:8 +#define NVA097_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER02 23:16 +#define NVA097_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER03 31:24 + +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_VALUE(i) (0x335c+(i)*4) +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_VALUE_V 31:0 + +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_EVENT(i) (0x337c+(i)*4) +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_EVENT_EVENT 7:0 + +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A(i) (0x339c+(i)*4) +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT0 1:0 +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT0 4:2 +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT1 6:5 +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT1 9:7 +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT2 11:10 +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT2 14:12 +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT3 16:15 +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT3 19:17 +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT4 21:20 +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT4 24:22 +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT5 26:25 +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT5 29:27 +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_SPARE 31:30 + +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B(i) (0x33bc+(i)*4) +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_EDGE 0:0 +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_MODE 2:1 +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_WINDOWED 3:3 +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_FUNC 19:4 + +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL 0x33dc +#define NVA097_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL_MASK 7:0 + +#define NVA097_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NVA097_SET_MME_SHADOW_SCRATCH_V 31:0 + +#define NVA097_CALL_MME_MACRO(j) (0x3800+(j)*8) +#define NVA097_CALL_MME_MACRO_V 31:0 + +#define NVA097_CALL_MME_DATA(j) (0x3804+(j)*8) +#define NVA097_CALL_MME_DATA_V 31:0 + +#endif /* _cl_kepler_a_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cla0b5.h b/src/common/sdk/nvidia/inc/class/cla0b5.h new file mode 100644 index 0000000..92076c6 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cla0b5.h @@ -0,0 +1,262 @@ +/******************************************************************************* + Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "nvtypes.h" + +#ifndef _cla0b5_h_ +#define _cla0b5_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define KEPLER_DMA_COPY_A (0x0000A0B5) + +#define NVA0B5_NOP (0x00000100) +#define NVA0B5_NOP_PARAMETER 31:0 +#define NVA0B5_PM_TRIGGER (0x00000140) +#define NVA0B5_PM_TRIGGER_V 31:0 +#define NVA0B5_SET_SEMAPHORE_A (0x00000240) +#define NVA0B5_SET_SEMAPHORE_A_UPPER 7:0 +#define NVA0B5_SET_SEMAPHORE_B (0x00000244) +#define NVA0B5_SET_SEMAPHORE_B_LOWER 31:0 +#define NVA0B5_SET_SEMAPHORE_PAYLOAD (0x00000248) +#define NVA0B5_SET_SEMAPHORE_PAYLOAD_PAYLOAD 31:0 +#define NVA0B5_SET_RENDER_ENABLE_A (0x00000254) +#define NVA0B5_SET_RENDER_ENABLE_A_UPPER 7:0 +#define NVA0B5_SET_RENDER_ENABLE_B (0x00000258) +#define NVA0B5_SET_RENDER_ENABLE_B_LOWER 31:0 +#define NVA0B5_SET_RENDER_ENABLE_C (0x0000025C) +#define NVA0B5_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVA0B5_SET_RENDER_ENABLE_C_MODE_FALSE (0x00000000) +#define NVA0B5_SET_RENDER_ENABLE_C_MODE_TRUE (0x00000001) +#define NVA0B5_SET_RENDER_ENABLE_C_MODE_CONDITIONAL (0x00000002) +#define NVA0B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL (0x00000003) +#define NVA0B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL (0x00000004) +#define NVA0B5_SET_SRC_PHYS_MODE (0x00000260) +#define NVA0B5_SET_SRC_PHYS_MODE_TARGET 1:0 +#define NVA0B5_SET_SRC_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVA0B5_SET_SRC_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVA0B5_SET_SRC_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVA0B5_SET_DST_PHYS_MODE (0x00000264) +#define NVA0B5_SET_DST_PHYS_MODE_TARGET 1:0 +#define NVA0B5_SET_DST_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVA0B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVA0B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVA0B5_LAUNCH_DMA (0x00000300) +#define NVA0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0 +#define NVA0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000) +#define NVA0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PIPELINED (0x00000001) +#define NVA0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NON_PIPELINED (0x00000002) +#define NVA0B5_LAUNCH_DMA_FLUSH_ENABLE 2:2 +#define NVA0B5_LAUNCH_DMA_FLUSH_ENABLE_FALSE (0x00000000) +#define NVA0B5_LAUNCH_DMA_FLUSH_ENABLE_TRUE (0x00000001) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_TYPE 4:3 +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_TYPE_NONE (0x00000000) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_ONE_WORD_SEMAPHORE (0x00000001) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_FOUR_WORD_SEMAPHORE (0x00000002) +#define NVA0B5_LAUNCH_DMA_INTERRUPT_TYPE 6:5 +#define NVA0B5_LAUNCH_DMA_INTERRUPT_TYPE_NONE (0x00000000) +#define NVA0B5_LAUNCH_DMA_INTERRUPT_TYPE_BLOCKING (0x00000001) +#define NVA0B5_LAUNCH_DMA_INTERRUPT_TYPE_NON_BLOCKING (0x00000002) +#define NVA0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT 7:7 +#define NVA0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVA0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVA0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT 8:8 +#define NVA0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVA0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVA0B5_LAUNCH_DMA_MULTI_LINE_ENABLE 9:9 +#define NVA0B5_LAUNCH_DMA_MULTI_LINE_ENABLE_FALSE (0x00000000) +#define NVA0B5_LAUNCH_DMA_MULTI_LINE_ENABLE_TRUE (0x00000001) +#define NVA0B5_LAUNCH_DMA_REMAP_ENABLE 10:10 +#define NVA0B5_LAUNCH_DMA_REMAP_ENABLE_FALSE (0x00000000) +#define NVA0B5_LAUNCH_DMA_REMAP_ENABLE_TRUE (0x00000001) +#define NVA0B5_LAUNCH_DMA_BYPASS_L2 11:11 +#define NVA0B5_LAUNCH_DMA_BYPASS_L2_USE_PTE_SETTING (0x00000000) +#define NVA0B5_LAUNCH_DMA_BYPASS_L2_FORCE_VOLATILE (0x00000001) +#define NVA0B5_LAUNCH_DMA_SRC_TYPE 12:12 +#define NVA0B5_LAUNCH_DMA_SRC_TYPE_VIRTUAL (0x00000000) +#define NVA0B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL (0x00000001) +#define NVA0B5_LAUNCH_DMA_DST_TYPE 13:13 +#define NVA0B5_LAUNCH_DMA_DST_TYPE_VIRTUAL (0x00000000) +#define NVA0B5_LAUNCH_DMA_DST_TYPE_PHYSICAL (0x00000001) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION 17:14 +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMIN (0x00000000) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMAX (0x00000001) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IXOR (0x00000002) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IAND (0x00000003) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IOR (0x00000004) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IADD (0x00000005) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INC (0x00000006) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_DEC (0x00000007) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FADD (0x0000000A) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMIN (0x0000000B) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMAX (0x0000000C) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMUL (0x0000000D) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMUL (0x0000000E) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN 18:18 +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_SIGNED (0x00000000) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_UNSIGNED (0x00000001) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE 19:19 +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_FALSE (0x00000000) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_TRUE (0x00000001) +#define NVA0B5_OFFSET_IN_UPPER (0x00000400) +#define NVA0B5_OFFSET_IN_UPPER_UPPER 7:0 +#define NVA0B5_OFFSET_IN_LOWER (0x00000404) +#define NVA0B5_OFFSET_IN_LOWER_VALUE 31:0 +#define NVA0B5_OFFSET_OUT_UPPER (0x00000408) +#define NVA0B5_OFFSET_OUT_UPPER_UPPER 7:0 +#define NVA0B5_OFFSET_OUT_LOWER (0x0000040C) +#define NVA0B5_OFFSET_OUT_LOWER_VALUE 31:0 +#define NVA0B5_PITCH_IN (0x00000410) +#define NVA0B5_PITCH_IN_VALUE 31:0 +#define NVA0B5_PITCH_OUT (0x00000414) +#define NVA0B5_PITCH_OUT_VALUE 31:0 +#define NVA0B5_LINE_LENGTH_IN (0x00000418) +#define NVA0B5_LINE_LENGTH_IN_VALUE 31:0 +#define NVA0B5_LINE_COUNT (0x0000041C) +#define NVA0B5_LINE_COUNT_VALUE 31:0 +#define NVA0B5_SET_REMAP_CONST_A (0x00000700) +#define NVA0B5_SET_REMAP_CONST_A_V 31:0 +#define NVA0B5_SET_REMAP_CONST_B (0x00000704) +#define NVA0B5_SET_REMAP_CONST_B_V 31:0 +#define NVA0B5_SET_REMAP_COMPONENTS (0x00000708) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_X 2:0 +#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_SRC_X (0x00000000) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_SRC_Y (0x00000001) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_SRC_Z (0x00000002) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_SRC_W (0x00000003) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_CONST_A (0x00000004) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_CONST_B (0x00000005) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_NO_WRITE (0x00000006) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y 6:4 +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_X (0x00000000) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Y (0x00000001) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Z (0x00000002) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_W (0x00000003) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_CONST_A (0x00000004) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_CONST_B (0x00000005) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_NO_WRITE (0x00000006) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z 10:8 +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_X (0x00000000) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Y (0x00000001) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Z (0x00000002) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_W (0x00000003) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_CONST_A (0x00000004) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_CONST_B (0x00000005) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_NO_WRITE (0x00000006) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_W 14:12 +#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_SRC_X (0x00000000) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_SRC_Y (0x00000001) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_SRC_Z (0x00000002) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_SRC_W (0x00000003) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_CONST_A (0x00000004) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_CONST_B (0x00000005) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_NO_WRITE (0x00000006) +#define NVA0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE 17:16 +#define NVA0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_ONE (0x00000000) +#define NVA0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_TWO (0x00000001) +#define NVA0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_THREE (0x00000002) +#define NVA0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_FOUR (0x00000003) +#define NVA0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS 21:20 +#define NVA0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_ONE (0x00000000) +#define NVA0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_TWO (0x00000001) +#define NVA0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_THREE (0x00000002) +#define NVA0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_FOUR (0x00000003) +#define NVA0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS 25:24 +#define NVA0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_ONE (0x00000000) +#define NVA0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001) +#define NVA0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002) +#define NVA0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003) +#define NVA0B5_SET_DST_BLOCK_SIZE (0x0000070C) +#define NVA0B5_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVA0B5_SET_DST_BLOCK_SIZE_WIDTH_QUARTER_GOB (0x0000000E) +#define NVA0B5_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVA0B5_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVA0B5_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVA0B5_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVA0B5_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVA0B5_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVA0B5_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVA0B5_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVA0B5_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVA0B5_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVA0B5_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVA0B5_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVA0B5_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVA0B5_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVA0B5_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVA0B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVA0B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_TESLA_4 (0x00000000) +#define NVA0B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVA0B5_SET_DST_WIDTH (0x00000710) +#define NVA0B5_SET_DST_WIDTH_V 31:0 +#define NVA0B5_SET_DST_HEIGHT (0x00000714) +#define NVA0B5_SET_DST_HEIGHT_V 31:0 +#define NVA0B5_SET_DST_DEPTH (0x00000718) +#define NVA0B5_SET_DST_DEPTH_V 31:0 +#define NVA0B5_SET_DST_LAYER (0x0000071C) +#define NVA0B5_SET_DST_LAYER_V 31:0 +#define NVA0B5_SET_DST_ORIGIN (0x00000720) +#define NVA0B5_SET_DST_ORIGIN_X 15:0 +#define NVA0B5_SET_DST_ORIGIN_Y 31:16 +#define NVA0B5_SET_SRC_BLOCK_SIZE (0x00000728) +#define NVA0B5_SET_SRC_BLOCK_SIZE_WIDTH 3:0 +#define NVA0B5_SET_SRC_BLOCK_SIZE_WIDTH_QUARTER_GOB (0x0000000E) +#define NVA0B5_SET_SRC_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVA0B5_SET_SRC_BLOCK_SIZE_HEIGHT 7:4 +#define NVA0B5_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVA0B5_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVA0B5_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVA0B5_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVA0B5_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVA0B5_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVA0B5_SET_SRC_BLOCK_SIZE_DEPTH 11:8 +#define NVA0B5_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVA0B5_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVA0B5_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVA0B5_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVA0B5_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVA0B5_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVA0B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVA0B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_TESLA_4 (0x00000000) +#define NVA0B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVA0B5_SET_SRC_WIDTH (0x0000072C) +#define NVA0B5_SET_SRC_WIDTH_V 31:0 +#define NVA0B5_SET_SRC_HEIGHT (0x00000730) +#define NVA0B5_SET_SRC_HEIGHT_V 31:0 +#define NVA0B5_SET_SRC_DEPTH (0x00000734) +#define NVA0B5_SET_SRC_DEPTH_V 31:0 +#define NVA0B5_SET_SRC_LAYER (0x00000738) +#define NVA0B5_SET_SRC_LAYER_V 31:0 +#define NVA0B5_SET_SRC_ORIGIN (0x0000073C) +#define NVA0B5_SET_SRC_ORIGIN_X 15:0 +#define NVA0B5_SET_SRC_ORIGIN_Y 31:16 +#define NVA0B5_PM_TRIGGER_END (0x00001114) +#define NVA0B5_PM_TRIGGER_END_V 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cla0b5_h + diff --git a/src/common/sdk/nvidia/inc/class/cla16f.h b/src/common/sdk/nvidia/inc/class/cla16f.h new file mode 100644 index 0000000..66e7594 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cla16f.h @@ -0,0 +1,254 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cla16f_h_ +#define _cla16f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* class KEPLER_CHANNEL_GPFIFO */ +/* + * Documentation for KEPLER_CHANNEL_GPFIFO can be found in dev_pbdma.ref, + * chapter "User Control Registers". It is documented as device NV_UDMA. + * The GPFIFO format itself is also documented in dev_pbdma.ref, + * NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref, + * chapter "FIFO DMA RAM", NV_FIFO_DMA_*. + * + */ +#define KEPLER_CHANNEL_GPFIFO_B (0x0000A16F) + +/* pio method data structure */ +typedef volatile struct _cla16f_tag0 { + NvV32 Reserved00[0x7c0]; +} NvA16FTypedef, KEPLER_ChannelGPFifoB; +#define NVA16F_TYPEDEF KEPLER_CHANNELChannelGPFifo +/* dma flow control data structure */ +typedef volatile struct _cla16f_tag1 { + NvU32 Ignored00[0x010]; /* 0000-003f*/ + NvU32 Put; /* put offset, read/write 0040-0043*/ + NvU32 Get; /* get offset, read only 0044-0047*/ + NvU32 Reference; /* reference value, read only 0048-004b*/ + NvU32 PutHi; /* high order put offset bits 004c-004f*/ + NvU32 Ignored01[0x002]; /* 0050-0057*/ + NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/ + NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/ + NvU32 GetHi; /* high order get offset bits 0060-0063*/ + NvU32 Ignored02[0x007]; /* 0064-007f*/ + NvU32 Ignored03; /* used to be engine yield 0080-0083*/ + NvU32 Ignored04[0x001]; /* 0084-0087*/ + NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/ + NvU32 GPPut; /* GP FIFO put offset 008c-008f*/ + NvU32 Ignored05[0x5c]; +} NvA16FControl, KeplerBControlGPFifo; +/* fields and values */ +#define NVA16F_NUMBER_OF_SUBCHANNELS (8) +#define NVA16F_SET_OBJECT (0x00000000) +#define NVA16F_SET_OBJECT_NVCLASS 15:0 +#define NVA16F_SET_OBJECT_ENGINE 20:16 +#define NVA16F_SET_OBJECT_ENGINE_SW 0x0000001f +#define NVA16F_ILLEGAL (0x00000004) +#define NVA16F_ILLEGAL_HANDLE 31:0 +#define NVA16F_NOP (0x00000008) +#define NVA16F_NOP_HANDLE 31:0 +#define NVA16F_SEMAPHOREA (0x00000010) +#define NVA16F_SEMAPHOREA_OFFSET_UPPER 7:0 +#define NVA16F_SEMAPHOREB (0x00000014) +#define NVA16F_SEMAPHOREB_OFFSET_LOWER 31:2 +#define NVA16F_SEMAPHOREC (0x00000018) +#define NVA16F_SEMAPHOREC_PAYLOAD 31:0 +#define NVA16F_SEMAPHORED (0x0000001C) +#define NVA16F_SEMAPHORED_OPERATION 4:0 +#define NVA16F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001 +#define NVA16F_SEMAPHORED_OPERATION_RELEASE 0x00000002 +#define NVA16F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004 +#define NVA16F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008 +#define NVA16F_SEMAPHORED_OPERATION_REDUCTION 0x00000010 +#define NVA16F_SEMAPHORED_ACQUIRE_SWITCH 12:12 +#define NVA16F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000 +#define NVA16F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001 +#define NVA16F_SEMAPHORED_RELEASE_WFI 20:20 +#define NVA16F_SEMAPHORED_RELEASE_WFI_EN 0x00000000 +#define NVA16F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001 +#define NVA16F_SEMAPHORED_RELEASE_SIZE 24:24 +#define NVA16F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000 +#define NVA16F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001 +#define NVA16F_SEMAPHORED_REDUCTION 30:27 +#define NVA16F_SEMAPHORED_REDUCTION_MIN 0x00000000 +#define NVA16F_SEMAPHORED_REDUCTION_MAX 0x00000001 +#define NVA16F_SEMAPHORED_REDUCTION_XOR 0x00000002 +#define NVA16F_SEMAPHORED_REDUCTION_AND 0x00000003 +#define NVA16F_SEMAPHORED_REDUCTION_OR 0x00000004 +#define NVA16F_SEMAPHORED_REDUCTION_ADD 0x00000005 +#define NVA16F_SEMAPHORED_REDUCTION_INC 0x00000006 +#define NVA16F_SEMAPHORED_REDUCTION_DEC 0x00000007 +#define NVA16F_SEMAPHORED_FORMAT 31:31 +#define NVA16F_SEMAPHORED_FORMAT_SIGNED 0x00000000 +#define NVA16F_SEMAPHORED_FORMAT_UNSIGNED 0x00000001 +#define NVA16F_NON_STALL_INTERRUPT (0x00000020) +#define NVA16F_NON_STALL_INTERRUPT_HANDLE 31:0 +#define NVA16F_FB_FLUSH (0x00000024) +#define NVA16F_FB_FLUSH_HANDLE 31:0 +#define NVA16F_MEM_OP_A (0x00000028) +#define NVA16F_MEM_OP_A_OPERAND_LOW 31:2 +#define NVA16F_MEM_OP_A_TLB_INVALIDATE_ADDR 29:2 +#define NVA16F_MEM_OP_A_TLB_INVALIDATE_TARGET 31:30 +#define NVA16F_MEM_OP_A_TLB_INVALIDATE_TARGET_VID_MEM 0x00000000 +#define NVA16F_MEM_OP_A_TLB_INVALIDATE_TARGET_SYS_MEM_COHERENT 0x00000002 +#define NVA16F_MEM_OP_A_TLB_INVALIDATE_TARGET_SYS_MEM_NONCOHERENT 0x00000003 +#define NVA16F_MEM_OP_B (0x0000002c) +#define NVA16F_MEM_OP_B_OPERAND_HIGH 7:0 +#define NVA16F_MEM_OP_B_OPERATION 31:27 +#define NVA16F_MEM_OP_B_OPERATION_SYSMEMBAR_FLUSH 0x00000005 +#define NVA16F_MEM_OP_B_OPERATION_SOFT_FLUSH 0x00000006 +#define NVA16F_MEM_OP_B_OPERATION_MMU_TLB_INVALIDATE 0x00000009 +#define NVA16F_MEM_OP_B_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d +#define NVA16F_MEM_OP_B_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e +#define NVA16F_MEM_OP_B_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f +#define NVA16F_MEM_OP_B_OPERATION_L2_FLUSH_DIRTY 0x00000010 +#define NVA16F_MEM_OP_B_MMU_TLB_INVALIDATE_PDB 0:0 +#define NVA16F_MEM_OP_B_MMU_TLB_INVALIDATE_PDB_ONE 0x00000000 +#define NVA16F_MEM_OP_B_MMU_TLB_INVALIDATE_PDB_ALL 0x00000001 +#define NVA16F_MEM_OP_B_MMU_TLB_INVALIDATE_GPC 1:1 +#define NVA16F_MEM_OP_B_MMU_TLB_INVALIDATE_GPC_ENABLE 0x00000000 +#define NVA16F_MEM_OP_B_MMU_TLB_INVALIDATE_GPC_DISABLE 0x00000001 +#define NVA16F_SET_REFERENCE (0x00000050) +#define NVA16F_SET_REFERENCE_COUNT 31:0 +#define NVA16F_WFI (0x00000078) +#define NVA16F_WFI_HANDLE 31:0 +#define NVA16F_CRC_CHECK (0x0000007c) +#define NVA16F_CRC_CHECK_VALUE 31:0 +#define NVA16F_YIELD (0x00000080) +#define NVA16F_YIELD_OP 1:0 +#define NVA16F_YIELD_OP_NOP 0x00000000 + + +/* GPFIFO entry format */ +#define NVA16F_GP_ENTRY__SIZE 8 +#define NVA16F_GP_ENTRY0_FETCH 0:0 +#define NVA16F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000 +#define NVA16F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001 +#define NVA16F_GP_ENTRY0_GET 31:2 +#define NVA16F_GP_ENTRY0_OPERAND 31:0 +#define NVA16F_GP_ENTRY1_GET_HI 7:0 +#define NVA16F_GP_ENTRY1_PRIV 8:8 +#define NVA16F_GP_ENTRY1_PRIV_USER 0x00000000 +#define NVA16F_GP_ENTRY1_PRIV_KERNEL 0x00000001 +#define NVA16F_GP_ENTRY1_LEVEL 9:9 +#define NVA16F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NVA16F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NVA16F_GP_ENTRY1_LENGTH 30:10 +#define NVA16F_GP_ENTRY1_SYNC 31:31 +#define NVA16F_GP_ENTRY1_SYNC_PROCEED 0x00000000 +#define NVA16F_GP_ENTRY1_SYNC_WAIT 0x00000001 +#define NVA16F_GP_ENTRY1_OPCODE 7:0 +#define NVA16F_GP_ENTRY1_OPCODE_NOP 0x00000000 +#define NVA16F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001 +#define NVA16F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002 +#define NVA16F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003 + +/* dma method formats */ +#define NVA16F_DMA_METHOD_ADDRESS_OLD 12:2 +#define NVA16F_DMA_METHOD_ADDRESS 11:0 +#define NVA16F_DMA_SUBDEVICE_MASK 15:4 +#define NVA16F_DMA_METHOD_SUBCHANNEL 15:13 +#define NVA16F_DMA_TERT_OP 17:16 +#define NVA16F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000) +#define NVA16F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001) +#define NVA16F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002) +#define NVA16F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003) +#define NVA16F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000) +#define NVA16F_DMA_METHOD_COUNT_OLD 28:18 +#define NVA16F_DMA_METHOD_COUNT 28:16 +#define NVA16F_DMA_IMMD_DATA 28:16 +#define NVA16F_DMA_SEC_OP 31:29 +#define NVA16F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000) +#define NVA16F_DMA_SEC_OP_INC_METHOD (0x00000001) +#define NVA16F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002) +#define NVA16F_DMA_SEC_OP_NON_INC_METHOD (0x00000003) +#define NVA16F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004) +#define NVA16F_DMA_SEC_OP_ONE_INC (0x00000005) +#define NVA16F_DMA_SEC_OP_RESERVED6 (0x00000006) +#define NVA16F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007) +/* dma incrementing method format */ +#define NVA16F_DMA_INCR_ADDRESS 11:0 +#define NVA16F_DMA_INCR_SUBCHANNEL 15:13 +#define NVA16F_DMA_INCR_COUNT 28:16 +#define NVA16F_DMA_INCR_OPCODE 31:29 +#define NVA16F_DMA_INCR_OPCODE_VALUE (0x00000001) +#define NVA16F_DMA_INCR_DATA 31:0 +/* dma non-incrementing method format */ +#define NVA16F_DMA_NONINCR_ADDRESS 11:0 +#define NVA16F_DMA_NONINCR_SUBCHANNEL 15:13 +#define NVA16F_DMA_NONINCR_COUNT 28:16 +#define NVA16F_DMA_NONINCR_OPCODE 31:29 +#define NVA16F_DMA_NONINCR_OPCODE_VALUE (0x00000003) +#define NVA16F_DMA_NONINCR_DATA 31:0 +/* dma increment-once method format */ +#define NVA16F_DMA_ONEINCR_ADDRESS 11:0 +#define NVA16F_DMA_ONEINCR_SUBCHANNEL 15:13 +#define NVA16F_DMA_ONEINCR_COUNT 28:16 +#define NVA16F_DMA_ONEINCR_OPCODE 31:29 +#define NVA16F_DMA_ONEINCR_OPCODE_VALUE (0x00000005) +#define NVA16F_DMA_ONEINCR_DATA 31:0 +/* dma no-operation format */ +#define NVA16F_DMA_NOP (0x00000000) +/* dma immediate-data format */ +#define NVA16F_DMA_IMMD_ADDRESS 11:0 +#define NVA16F_DMA_IMMD_SUBCHANNEL 15:13 +#define NVA16F_DMA_IMMD_DATA 28:16 +#define NVA16F_DMA_IMMD_OPCODE 31:29 +#define NVA16F_DMA_IMMD_OPCODE_VALUE (0x00000004) +/* dma set sub-device mask format */ +#define NVA16F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4 +#define NVA16F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16 +#define NVA16F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001) +/* dma store sub-device mask format */ +#define NVA16F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4 +#define NVA16F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVA16F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002) +/* dma use sub-device mask format */ +#define NVA16F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVA16F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003) +/* dma end-segment format */ +#define NVA16F_DMA_ENDSEG_OPCODE 31:29 +#define NVA16F_DMA_ENDSEG_OPCODE_VALUE (0x00000007) +/* dma legacy incrementing/non-incrementing formats */ +#define NVA16F_DMA_ADDRESS 12:2 +#define NVA16F_DMA_SUBCH 15:13 +#define NVA16F_DMA_OPCODE3 17:16 +#define NVA16F_DMA_OPCODE3_NONE (0x00000000) +#define NVA16F_DMA_COUNT 28:18 +#define NVA16F_DMA_OPCODE 31:29 +#define NVA16F_DMA_OPCODE_METHOD (0x00000000) +#define NVA16F_DMA_OPCODE_NONINC_METHOD (0x00000002) +#define NVA16F_DMA_DATA 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cla16F_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cla26f.h b/src/common/sdk/nvidia/inc/class/cla26f.h new file mode 100644 index 0000000..807c5cf --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cla26f.h @@ -0,0 +1,254 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cla26f_h_ +#define _cla26f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* class KEPLER_CHANNEL_GPFIFO */ +/* + * Documentation for KEPLER_CHANNEL_GPFIFO can be found in dev_pbdma.ref, + * chapter "User Control Registers". It is documented as device NV_UDMA. + * The GPFIFO format itself is also documented in dev_pbdma.ref, + * NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref, + * chapter "FIFO DMA RAM", NV_FIFO_DMA_*. + * + */ +#define KEPLER_CHANNEL_GPFIFO_C (0x0000A26F) + +/* pio method data structure */ +typedef volatile struct _cla26f_tag0 { + NvV32 Reserved00[0x7c0]; +} NvA26FTypedef, KEPLER_ChannelGPFifoC; +#define NVA26F_TYPEDEF KEPLER_CHANNELChannelGPFifo +/* dma flow control data structure */ +typedef volatile struct _cla26f_tag1 { + NvU32 Ignored00[0x010]; /* 0000-003f*/ + NvU32 Put; /* put offset, read/write 0040-0043*/ + NvU32 Get; /* get offset, read only 0044-0047*/ + NvU32 Reference; /* reference value, read only 0048-004b*/ + NvU32 PutHi; /* high order put offset bits 004c-004f*/ + NvU32 Ignored01[0x002]; /* 0050-0057*/ + NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/ + NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/ + NvU32 GetHi; /* high order get offset bits 0060-0063*/ + NvU32 Ignored02[0x007]; /* 0064-007f*/ + NvU32 Ignored03; /* used to be engine yield 0080-0083*/ + NvU32 Ignored04[0x001]; /* 0084-0087*/ + NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/ + NvU32 GPPut; /* GP FIFO put offset 008c-008f*/ + NvU32 Ignored05[0x5c]; +} NvA26FControl, KeplerCControlGPFifo; +/* fields and values */ +#define NVA26F_NUMBER_OF_SUBCHANNELS (8) +#define NVA26F_SET_OBJECT (0x00000000) +#define NVA26F_SET_OBJECT_NVCLASS 15:0 +#define NVA26F_SET_OBJECT_ENGINE 20:16 +#define NVA26F_SET_OBJECT_ENGINE_SW 0x0000001f +#define NVA26F_ILLEGAL (0x00000004) +#define NVA26F_ILLEGAL_HANDLE 31:0 +#define NVA26F_NOP (0x00000008) +#define NVA26F_NOP_HANDLE 31:0 +#define NVA26F_SEMAPHOREA (0x00000010) +#define NVA26F_SEMAPHOREA_OFFSET_UPPER 7:0 +#define NVA26F_SEMAPHOREB (0x00000014) +#define NVA26F_SEMAPHOREB_OFFSET_LOWER 31:2 +#define NVA26F_SEMAPHOREC (0x00000018) +#define NVA26F_SEMAPHOREC_PAYLOAD 31:0 +#define NVA26F_SEMAPHORED (0x0000001C) +#define NVA26F_SEMAPHORED_OPERATION 3:0 +#define NVA26F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001 +#define NVA26F_SEMAPHORED_OPERATION_RELEASE 0x00000002 +#define NVA26F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004 +#define NVA26F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008 +#define NVA26F_SEMAPHORED_ACQUIRE_SWITCH 12:12 +#define NVA26F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000 +#define NVA26F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001 +#define NVA26F_SEMAPHORED_RELEASE_WFI 20:20 +#define NVA26F_SEMAPHORED_RELEASE_WFI_EN 0x00000000 +#define NVA26F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001 +#define NVA26F_SEMAPHORED_RELEASE_SIZE 24:24 +#define NVA26F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000 +#define NVA26F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001 +#define NVA26F_NON_STALL_INTERRUPT (0x00000020) +#define NVA26F_NON_STALL_INTERRUPT_HANDLE 31:0 +#define NVA26F_FB_FLUSH (0x00000024) +#define NVA26F_FB_FLUSH_HANDLE 31:0 +#define NVA26F_MEM_OP_A (0x00000028) +#define NVA26F_MEM_OP_A_OPERAND_LOW 31:2 +#define NVA26F_MEM_OP_A_TLB_INVALIDATE_ADDR 29:2 +#define NVA26F_MEM_OP_A_TLB_INVALIDATE_TARGET 31:30 +#define NVA26F_MEM_OP_A_TLB_INVALIDATE_TARGET_VID_MEM 0x00000000 +#define NVA26F_MEM_OP_A_TLB_INVALIDATE_TARGET_SYS_MEM_COHERENT 0x00000002 +#define NVA26F_MEM_OP_A_TLB_INVALIDATE_TARGET_SYS_MEM_NONCOHERENT 0x00000003 +#define NVA26F_MEM_OP_B (0x0000002c) +#define NVA26F_MEM_OP_B_OPERAND_HIGH 7:0 +#define NVA26F_MEM_OP_B_OPERATION 31:27 +#define NVA26F_MEM_OP_B_OPERATION_SYSMEMBAR_FLUSH 0x00000005 +#define NVA26F_MEM_OP_B_OPERATION_MMU_TLB_INVALIDATE 0x00000009 +#define NVA26F_MEM_OP_B_OPERATION_L2_INVALIDATE_CLEAN_LINES 0x0000000e +#define NVA26F_MEM_OP_B_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f +#define NVA26F_MEM_OP_B_OPERATION_L2_FLUSH_DIRTY 0x00000010 +#define NVA26F_MEM_OP_B_MMU_TLB_INVALIDATE_PDB 0:0 +#define NVA26F_MEM_OP_B_MMU_TLB_INVALIDATE_PDB_ONE 0x00000000 +#define NVA26F_MEM_OP_B_MMU_TLB_INVALIDATE_PDB_ALL 0x00000001 +#define NVA26F_MEM_OP_B_MMU_TLB_INVALIDATE_GPC 1:1 +#define NVA26F_MEM_OP_B_MMU_TLB_INVALIDATE_GPC_ENABLE 0x00000000 +#define NVA26F_MEM_OP_B_MMU_TLB_INVALIDATE_GPC_DISABLE 0x00000001 +#define NVA26F_SET_REFERENCE (0x00000050) +#define NVA26F_SET_REFERENCE_COUNT 31:0 +#define NVA26F_SYNCPOINTA (0x00000070) +#define NVA26F_SYNCPOINTA_PAYLOAD 31:0 +#define NVA26F_SYNCPOINTB (0x00000074) +#define NVA26F_SYNCPOINTB_OPERATION 1:0 +#define NVA26F_SYNCPOINTB_OPERATION_WAIT 0x00000000 +#define NVA26F_SYNCPOINTB_OPERATION_INCR 0x00000001 +#define NVA26F_SYNCPOINTB_OPERATION_BASE_ADD 0x00000002 +#define NVA26F_SYNCPOINTB_OPERATION_BASE_WRITE 0x00000003 +#define NVA26F_SYNCPOINTB_WAIT_SWITCH 4:4 +#define NVA26F_SYNCPOINTB_WAIT_SWITCH_DIS 0x00000000 +#define NVA26F_SYNCPOINTB_WAIT_SWITCH_EN 0x00000001 +#define NVA26F_SYNCPOINTB_BASE 5:5 +#define NVA26F_SYNCPOINTB_BASE_DIS 0x00000000 +#define NVA26F_SYNCPOINTB_BASE_EN 0x00000001 +#define NVA26F_SYNCPOINTB_SYNCPT_INDEX 15:8 +#define NVA26F_SYNCPOINTB_BASE_INDEX 25:20 +#define NVA26F_WFI (0x00000078) +#define NVA26F_WFI_HANDLE 31:0 +#define NVA26F_CRC_CHECK (0x0000007c) +#define NVA26F_CRC_CHECK_VALUE 31:0 +#define NVA26F_YIELD (0x00000080) +#define NVA26F_YIELD_OP 1:0 +#define NVA26F_YIELD_OP_NOP 0x00000000 + +/* GPFIFO entry format */ +#define NVA26F_GP_ENTRY__SIZE 8 +#define NVA26F_GP_ENTRY0_FETCH 0:0 +#define NVA26F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000 +#define NVA26F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001 +#define NVA26F_GP_ENTRY0_GET 31:2 +#define NVA26F_GP_ENTRY0_OPERAND 31:0 +#define NVA26F_GP_ENTRY1_GET_HI 7:0 +#define NVA26F_GP_ENTRY1_PRIV 8:8 +#define NVA26F_GP_ENTRY1_PRIV_USER 0x00000000 +#define NVA26F_GP_ENTRY1_PRIV_KERNEL 0x00000001 +#define NVA26F_GP_ENTRY1_LEVEL 9:9 +#define NVA26F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NVA26F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NVA26F_GP_ENTRY1_LENGTH 30:10 +#define NVA26F_GP_ENTRY1_SYNC 31:31 +#define NVA26F_GP_ENTRY1_SYNC_PROCEED 0x00000000 +#define NVA26F_GP_ENTRY1_SYNC_WAIT 0x00000001 +#define NVA26F_GP_ENTRY1_OPCODE 7:0 +#define NVA26F_GP_ENTRY1_OPCODE_NOP 0x00000000 +#define NVA26F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001 +#define NVA26F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002 +#define NVA26F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003 + +/* dma method formats */ +#define NVA26F_DMA_METHOD_ADDRESS_OLD 12:2 +#define NVA26F_DMA_METHOD_ADDRESS 11:0 +#define NVA26F_DMA_SUBDEVICE_MASK 15:4 +#define NVA26F_DMA_METHOD_SUBCHANNEL 15:13 +#define NVA26F_DMA_TERT_OP 17:16 +#define NVA26F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000) +#define NVA26F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001) +#define NVA26F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002) +#define NVA26F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003) +#define NVA26F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000) +#define NVA26F_DMA_METHOD_COUNT_OLD 28:18 +#define NVA26F_DMA_METHOD_COUNT 28:16 +#define NVA26F_DMA_IMMD_DATA 28:16 +#define NVA26F_DMA_SEC_OP 31:29 +#define NVA26F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000) +#define NVA26F_DMA_SEC_OP_INC_METHOD (0x00000001) +#define NVA26F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002) +#define NVA26F_DMA_SEC_OP_NON_INC_METHOD (0x00000003) +#define NVA26F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004) +#define NVA26F_DMA_SEC_OP_ONE_INC (0x00000005) +#define NVA26F_DMA_SEC_OP_RESERVED6 (0x00000006) +#define NVA26F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007) +/* dma incrementing method format */ +#define NVA26F_DMA_INCR_ADDRESS 11:0 +#define NVA26F_DMA_INCR_SUBCHANNEL 15:13 +#define NVA26F_DMA_INCR_COUNT 28:16 +#define NVA26F_DMA_INCR_OPCODE 31:29 +#define NVA26F_DMA_INCR_OPCODE_VALUE (0x00000001) +#define NVA26F_DMA_INCR_DATA 31:0 +/* dma non-incrementing method format */ +#define NVA26F_DMA_NONINCR_ADDRESS 11:0 +#define NVA26F_DMA_NONINCR_SUBCHANNEL 15:13 +#define NVA26F_DMA_NONINCR_COUNT 28:16 +#define NVA26F_DMA_NONINCR_OPCODE 31:29 +#define NVA26F_DMA_NONINCR_OPCODE_VALUE (0x00000003) +#define NVA26F_DMA_NONINCR_DATA 31:0 +/* dma increment-once method format */ +#define NVA26F_DMA_ONEINCR_ADDRESS 11:0 +#define NVA26F_DMA_ONEINCR_SUBCHANNEL 15:13 +#define NVA26F_DMA_ONEINCR_COUNT 28:16 +#define NVA26F_DMA_ONEINCR_OPCODE 31:29 +#define NVA26F_DMA_ONEINCR_OPCODE_VALUE (0x00000005) +#define NVA26F_DMA_ONEINCR_DATA 31:0 +/* dma no-operation format */ +#define NVA26F_DMA_NOP (0x00000000) +/* dma immediate-data format */ +#define NVA26F_DMA_IMMD_ADDRESS 11:0 +#define NVA26F_DMA_IMMD_SUBCHANNEL 15:13 +#define NVA26F_DMA_IMMD_DATA 28:16 +#define NVA26F_DMA_IMMD_OPCODE 31:29 +#define NVA26F_DMA_IMMD_OPCODE_VALUE (0x00000004) +/* dma set sub-device mask format */ +#define NVA26F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4 +#define NVA26F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16 +#define NVA26F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001) +/* dma store sub-device mask format */ +#define NVA26F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4 +#define NVA26F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVA26F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002) +/* dma use sub-device mask format */ +#define NVA26F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVA26F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003) +/* dma end-segment format */ +#define NVA26F_DMA_ENDSEG_OPCODE 31:29 +#define NVA26F_DMA_ENDSEG_OPCODE_VALUE (0x00000007) +/* dma legacy incrementing/non-incrementing formats */ +#define NVA26F_DMA_ADDRESS 12:2 +#define NVA26F_DMA_SUBCH 15:13 +#define NVA26F_DMA_OPCODE3 17:16 +#define NVA26F_DMA_OPCODE3_NONE (0x00000000) +#define NVA26F_DMA_COUNT 28:18 +#define NVA26F_DMA_OPCODE 31:29 +#define NVA26F_DMA_OPCODE_METHOD (0x00000000) +#define NVA26F_DMA_OPCODE_NONINC_METHOD (0x00000002) +#define NVA26F_DMA_DATA 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cla26f_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clb06f.h b/src/common/sdk/nvidia/inc/class/clb06f.h new file mode 100644 index 0000000..b2b51e0 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clb06f.h @@ -0,0 +1,260 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clb06f_h_ +#define _clb06f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* class MAXWELL_CHANNEL_GPFIFO */ +/* + * Documentation for MAXWELL_CHANNEL_GPFIFO can be found in dev_pbdma.ref, + * chapter "User Control Registers". It is documented as device NV_UDMA. + * The GPFIFO format itself is also documented in dev_pbdma.ref, + * NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref, + * chapter "FIFO DMA RAM", NV_FIFO_DMA_*. + * + */ +#define MAXWELL_CHANNEL_GPFIFO_A (0x0000B06F) + +#define NVB06F_TYPEDEF MAXWELL_CHANNELChannelGPFifoA + +/* dma flow control data structure */ +typedef volatile struct _clb06f_tag0 { + NvU32 Ignored00[0x010]; /* 0000-003f*/ + NvU32 Put; /* put offset, read/write 0040-0043*/ + NvU32 Get; /* get offset, read only 0044-0047*/ + NvU32 Reference; /* reference value, read only 0048-004b*/ + NvU32 PutHi; /* high order put offset bits 004c-004f*/ + NvU32 Ignored01[0x002]; /* 0050-0057*/ + NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/ + NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/ + NvU32 GetHi; /* high order get offset bits 0060-0063*/ + NvU32 Ignored02[0x007]; /* 0064-007f*/ + NvU32 Ignored03; /* used to be engine yield 0080-0083*/ + NvU32 Ignored04[0x001]; /* 0084-0087*/ + NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/ + NvU32 GPPut; /* GP FIFO put offset 008c-008f*/ + NvU32 Ignored05[0x5c]; +} Nvb06FControl, MaxwellAControlGPFifo; + +/* fields and values */ +#define NVB06F_NUMBER_OF_SUBCHANNELS (8) +#define NVB06F_SET_OBJECT (0x00000000) +#define NVB06F_SET_OBJECT_NVCLASS 15:0 +#define NVB06F_SET_OBJECT_ENGINE 20:16 +#define NVB06F_SET_OBJECT_ENGINE_SW 0x0000001f +#define NVB06F_ILLEGAL (0x00000004) +#define NVB06F_ILLEGAL_HANDLE 31:0 +#define NVB06F_NOP (0x00000008) +#define NVB06F_NOP_HANDLE 31:0 +#define NVB06F_SEMAPHOREA (0x00000010) +#define NVB06F_SEMAPHOREA_OFFSET_UPPER 7:0 +#define NVB06F_SEMAPHOREB (0x00000014) +#define NVB06F_SEMAPHOREB_OFFSET_LOWER 31:2 +#define NVB06F_SEMAPHOREC (0x00000018) +#define NVB06F_SEMAPHOREC_PAYLOAD 31:0 +#define NVB06F_SEMAPHORED (0x0000001C) +#define NVB06F_SEMAPHORED_OPERATION 4:0 +#define NVB06F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001 +#define NVB06F_SEMAPHORED_OPERATION_RELEASE 0x00000002 +#define NVB06F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004 +#define NVB06F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008 +#define NVB06F_SEMAPHORED_OPERATION_REDUCTION 0x00000010 +#define NVB06F_SEMAPHORED_ACQUIRE_SWITCH 12:12 +#define NVB06F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000 +#define NVB06F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001 +#define NVB06F_SEMAPHORED_RELEASE_WFI 20:20 +#define NVB06F_SEMAPHORED_RELEASE_WFI_EN 0x00000000 +#define NVB06F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001 +#define NVB06F_SEMAPHORED_RELEASE_SIZE 24:24 +#define NVB06F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000 +#define NVB06F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001 +#define NVB06F_SEMAPHORED_REDUCTION 30:27 +#define NVB06F_SEMAPHORED_REDUCTION_MIN 0x00000000 +#define NVB06F_SEMAPHORED_REDUCTION_MAX 0x00000001 +#define NVB06F_SEMAPHORED_REDUCTION_XOR 0x00000002 +#define NVB06F_SEMAPHORED_REDUCTION_AND 0x00000003 +#define NVB06F_SEMAPHORED_REDUCTION_OR 0x00000004 +#define NVB06F_SEMAPHORED_REDUCTION_ADD 0x00000005 +#define NVB06F_SEMAPHORED_REDUCTION_INC 0x00000006 +#define NVB06F_SEMAPHORED_REDUCTION_DEC 0x00000007 +#define NVB06F_SEMAPHORED_FORMAT 31:31 +#define NVB06F_SEMAPHORED_FORMAT_SIGNED 0x00000000 +#define NVB06F_SEMAPHORED_FORMAT_UNSIGNED 0x00000001 +#define NVB06F_NON_STALL_INTERRUPT (0x00000020) +#define NVB06F_NON_STALL_INTERRUPT_HANDLE 31:0 +#define NVB06F_FB_FLUSH (0x00000024) +#define NVB06F_FB_FLUSH_HANDLE 31:0 +// NOTE - MEM_OP_A and MEM_OP_B have been removed for gm20x to make room for +// possible future MEM_OP features. MEM_OP_C/D have identical functionality +// to the previous MEM_OP_A/B methods. +#define NVB06F_MEM_OP_C (0x00000030) +#define NVB06F_MEM_OP_C_OPERAND_LOW 31:2 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_PDB 0:0 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_PDB_ONE 0x00000000 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_PDB_ALL 0x00000001 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_GPC 1:1 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_GPC_ENABLE 0x00000000 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_GPC_DISABLE 0x00000001 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_TARGET 11:10 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_TARGET_VID_MEM 0x00000000 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_TARGET_SYS_MEM_COHERENT 0x00000002 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_TARGET_SYS_MEM_NONCOHERENT 0x00000003 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_ADDR_LO 31:12 +#define NVB06F_MEM_OP_D (0x00000034) +#define NVB06F_MEM_OP_D_OPERAND_HIGH 7:0 +#define NVB06F_MEM_OP_D_OPERATION 31:27 +#define NVB06F_MEM_OP_D_OPERATION_MEMBAR 0x00000005 +#define NVB06F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE 0x00000009 +#define NVB06F_MEM_OP_D_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d +#define NVB06F_MEM_OP_D_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e +#define NVB06F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f +#define NVB06F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY 0x00000010 +#define NVB06F_MEM_OP_D_TLB_INVALIDATE_ADDR_HI 7:0 +#define NVB06F_SET_REFERENCE (0x00000050) +#define NVB06F_SET_REFERENCE_COUNT 31:0 +#define NVB06F_WFI (0x00000078) +#define NVB06F_WFI_SCOPE 0:0 +#define NVB06F_WFI_SCOPE_CURRENT_SCG_TYPE 0x00000000 +#define NVB06F_WFI_SCOPE_ALL 0x00000001 +#define NVB06F_CRC_CHECK (0x0000007c) +#define NVB06F_CRC_CHECK_VALUE 31:0 +#define NVB06F_YIELD (0x00000080) +#define NVB06F_YIELD_OP 1:0 +#define NVB06F_YIELD_OP_NOP 0x00000000 +#define NVB06F_YIELD_OP_PBDMA_TIMESLICE 0x00000001 +#define NVB06F_YIELD_OP_RUNLIST_TIMESLICE 0x00000002 +#define NVB06F_YIELD_OP_TSG 0x00000003 + + +/* GPFIFO entry format */ +#define NVB06F_GP_ENTRY__SIZE 8 +#define NVB06F_GP_ENTRY0_FETCH 0:0 +#define NVB06F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000 +#define NVB06F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001 +#define NVB06F_GP_ENTRY0_GET 31:2 +#define NVB06F_GP_ENTRY0_OPERAND 31:0 +#define NVB06F_GP_ENTRY1_GET_HI 7:0 +#define NVB06F_GP_ENTRY1_PRIV 8:8 +#define NVB06F_GP_ENTRY1_PRIV_USER 0x00000000 +#define NVB06F_GP_ENTRY1_PRIV_KERNEL 0x00000001 +#define NVB06F_GP_ENTRY1_LEVEL 9:9 +#define NVB06F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NVB06F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NVB06F_GP_ENTRY1_LENGTH 30:10 +#define NVB06F_GP_ENTRY1_SYNC 31:31 +#define NVB06F_GP_ENTRY1_SYNC_PROCEED 0x00000000 +#define NVB06F_GP_ENTRY1_SYNC_WAIT 0x00000001 +#define NVB06F_GP_ENTRY1_OPCODE 7:0 +#define NVB06F_GP_ENTRY1_OPCODE_NOP 0x00000000 +#define NVB06F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001 +#define NVB06F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002 +#define NVB06F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003 + +/* dma method formats */ +#define NVB06F_DMA_METHOD_ADDRESS_OLD 12:2 +#define NVB06F_DMA_METHOD_ADDRESS 11:0 +#define NVB06F_DMA_SUBDEVICE_MASK 15:4 +#define NVB06F_DMA_METHOD_SUBCHANNEL 15:13 +#define NVB06F_DMA_TERT_OP 17:16 +#define NVB06F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000) +#define NVB06F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001) +#define NVB06F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002) +#define NVB06F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003) +#define NVB06F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000) +#define NVB06F_DMA_METHOD_COUNT_OLD 28:18 +#define NVB06F_DMA_METHOD_COUNT 28:16 +#define NVB06F_DMA_IMMD_DATA 28:16 +#define NVB06F_DMA_SEC_OP 31:29 +#define NVB06F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000) +#define NVB06F_DMA_SEC_OP_INC_METHOD (0x00000001) +#define NVB06F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002) +#define NVB06F_DMA_SEC_OP_NON_INC_METHOD (0x00000003) +#define NVB06F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004) +#define NVB06F_DMA_SEC_OP_ONE_INC (0x00000005) +#define NVB06F_DMA_SEC_OP_RESERVED6 (0x00000006) +#define NVB06F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007) +/* dma incrementing method format */ +#define NVB06F_DMA_INCR_ADDRESS 11:0 +#define NVB06F_DMA_INCR_SUBCHANNEL 15:13 +#define NVB06F_DMA_INCR_COUNT 28:16 +#define NVB06F_DMA_INCR_OPCODE 31:29 +#define NVB06F_DMA_INCR_OPCODE_VALUE (0x00000001) +#define NVB06F_DMA_INCR_DATA 31:0 +/* dma non-incrementing method format */ +#define NVB06F_DMA_NONINCR_ADDRESS 11:0 +#define NVB06F_DMA_NONINCR_SUBCHANNEL 15:13 +#define NVB06F_DMA_NONINCR_COUNT 28:16 +#define NVB06F_DMA_NONINCR_OPCODE 31:29 +#define NVB06F_DMA_NONINCR_OPCODE_VALUE (0x00000003) +#define NVB06F_DMA_NONINCR_DATA 31:0 +/* dma increment-once method format */ +#define NVB06F_DMA_ONEINCR_ADDRESS 11:0 +#define NVB06F_DMA_ONEINCR_SUBCHANNEL 15:13 +#define NVB06F_DMA_ONEINCR_COUNT 28:16 +#define NVB06F_DMA_ONEINCR_OPCODE 31:29 +#define NVB06F_DMA_ONEINCR_OPCODE_VALUE (0x00000005) +#define NVB06F_DMA_ONEINCR_DATA 31:0 +/* dma no-operation format */ +#define NVB06F_DMA_NOP (0x00000000) +/* dma immediate-data format */ +#define NVB06F_DMA_IMMD_ADDRESS 11:0 +#define NVB06F_DMA_IMMD_SUBCHANNEL 15:13 +#define NVB06F_DMA_IMMD_DATA 28:16 +#define NVB06F_DMA_IMMD_OPCODE 31:29 +#define NVB06F_DMA_IMMD_OPCODE_VALUE (0x00000004) +/* dma set sub-device mask format */ +#define NVB06F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4 +#define NVB06F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16 +#define NVB06F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001) +/* dma store sub-device mask format */ +#define NVB06F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4 +#define NVB06F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVB06F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002) +/* dma use sub-device mask format */ +#define NVB06F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVB06F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003) +/* dma end-segment format */ +#define NVB06F_DMA_ENDSEG_OPCODE 31:29 +#define NVB06F_DMA_ENDSEG_OPCODE_VALUE (0x00000007) +/* dma legacy incrementing/non-incrementing formats */ +#define NVB06F_DMA_ADDRESS 12:2 +#define NVB06F_DMA_SUBCH 15:13 +#define NVB06F_DMA_OPCODE3 17:16 +#define NVB06F_DMA_OPCODE3_NONE (0x00000000) +#define NVB06F_DMA_COUNT 28:18 +#define NVB06F_DMA_OPCODE 31:29 +#define NVB06F_DMA_OPCODE_METHOD (0x00000000) +#define NVB06F_DMA_OPCODE_NONINC_METHOD (0x00000002) +#define NVB06F_DMA_DATA 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clb06f_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clb097.h b/src/common/sdk/nvidia/inc/class/clb097.h new file mode 100644 index 0000000..52ee310 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clb097.h @@ -0,0 +1,3966 @@ +/******************************************************************************* + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef _cl_maxwell_a_h_ +#define _cl_maxwell_a_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../../../class/bin/sw_header.pl maxwell_a */ + +#include "nvtypes.h" + +#define MAXWELL_A 0xB097 + +#define NVB097_SET_OBJECT 0x0000 +#define NVB097_SET_OBJECT_CLASS_ID 15:0 +#define NVB097_SET_OBJECT_ENGINE_ID 20:16 + +#define NVB097_NO_OPERATION 0x0100 +#define NVB097_NO_OPERATION_V 31:0 + +#define NVB097_SET_NOTIFY_A 0x0104 +#define NVB097_SET_NOTIFY_A_ADDRESS_UPPER 7:0 + +#define NVB097_SET_NOTIFY_B 0x0108 +#define NVB097_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NVB097_NOTIFY 0x010c +#define NVB097_NOTIFY_TYPE 31:0 +#define NVB097_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NVB097_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NVB097_WAIT_FOR_IDLE 0x0110 +#define NVB097_WAIT_FOR_IDLE_V 31:0 + +#define NVB097_LOAD_MME_INSTRUCTION_RAM_POINTER 0x0114 +#define NVB097_LOAD_MME_INSTRUCTION_RAM_POINTER_V 31:0 + +#define NVB097_LOAD_MME_INSTRUCTION_RAM 0x0118 +#define NVB097_LOAD_MME_INSTRUCTION_RAM_V 31:0 + +#define NVB097_LOAD_MME_START_ADDRESS_RAM_POINTER 0x011c +#define NVB097_LOAD_MME_START_ADDRESS_RAM_POINTER_V 31:0 + +#define NVB097_LOAD_MME_START_ADDRESS_RAM 0x0120 +#define NVB097_LOAD_MME_START_ADDRESS_RAM_V 31:0 + +#define NVB097_SET_MME_SHADOW_RAM_CONTROL 0x0124 +#define NVB097_SET_MME_SHADOW_RAM_CONTROL_MODE 1:0 +#define NVB097_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK 0x00000000 +#define NVB097_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK_WITH_FILTER 0x00000001 +#define NVB097_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_PASSTHROUGH 0x00000002 +#define NVB097_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_REPLAY 0x00000003 + +#define NVB097_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER 0x0128 +#define NVB097_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER_V 7:0 + +#define NVB097_PEER_SEMAPHORE_RELEASE_OFFSET 0x012c +#define NVB097_PEER_SEMAPHORE_RELEASE_OFFSET_V 31:0 + +#define NVB097_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NVB097_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVB097_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NVB097_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVB097_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NVB097_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NVB097_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVB097_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVB097_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVB097_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVB097_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVB097_SEND_GO_IDLE 0x013c +#define NVB097_SEND_GO_IDLE_V 31:0 + +#define NVB097_PM_TRIGGER 0x0140 +#define NVB097_PM_TRIGGER_V 31:0 + +#define NVB097_PM_TRIGGER_WFI 0x0144 +#define NVB097_PM_TRIGGER_WFI_V 31:0 + +#define NVB097_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NVB097_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NVB097_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NVB097_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NVB097_LINE_LENGTH_IN 0x0180 +#define NVB097_LINE_LENGTH_IN_VALUE 31:0 + +#define NVB097_LINE_COUNT 0x0184 +#define NVB097_LINE_COUNT_VALUE 31:0 + +#define NVB097_OFFSET_OUT_UPPER 0x0188 +#define NVB097_OFFSET_OUT_UPPER_VALUE 7:0 + +#define NVB097_OFFSET_OUT 0x018c +#define NVB097_OFFSET_OUT_VALUE 31:0 + +#define NVB097_PITCH_OUT 0x0190 +#define NVB097_PITCH_OUT_VALUE 31:0 + +#define NVB097_SET_DST_BLOCK_SIZE 0x0194 +#define NVB097_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVB097_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVB097_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVB097_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVB097_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVB097_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVB097_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVB097_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVB097_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVB097_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVB097_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NVB097_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NVB097_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NVB097_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NVB097_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVB097_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NVB097_SET_DST_WIDTH 0x0198 +#define NVB097_SET_DST_WIDTH_V 31:0 + +#define NVB097_SET_DST_HEIGHT 0x019c +#define NVB097_SET_DST_HEIGHT_V 31:0 + +#define NVB097_SET_DST_DEPTH 0x01a0 +#define NVB097_SET_DST_DEPTH_V 31:0 + +#define NVB097_SET_DST_LAYER 0x01a4 +#define NVB097_SET_DST_LAYER_V 31:0 + +#define NVB097_SET_DST_ORIGIN_BYTES_X 0x01a8 +#define NVB097_SET_DST_ORIGIN_BYTES_X_V 19:0 + +#define NVB097_SET_DST_ORIGIN_SAMPLES_Y 0x01ac +#define NVB097_SET_DST_ORIGIN_SAMPLES_Y_V 15:0 + +#define NVB097_LAUNCH_DMA 0x01b0 +#define NVB097_LAUNCH_DMA_DST_MEMORY_LAYOUT 0:0 +#define NVB097_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVB097_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVB097_LAUNCH_DMA_COMPLETION_TYPE 5:4 +#define NVB097_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_DISABLE 0x00000000 +#define NVB097_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_ONLY 0x00000001 +#define NVB097_LAUNCH_DMA_COMPLETION_TYPE_RELEASE_SEMAPHORE 0x00000002 +#define NVB097_LAUNCH_DMA_INTERRUPT_TYPE 9:8 +#define NVB097_LAUNCH_DMA_INTERRUPT_TYPE_NONE 0x00000000 +#define NVB097_LAUNCH_DMA_INTERRUPT_TYPE_INTERRUPT 0x00000001 +#define NVB097_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE 12:12 +#define NVB097_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_FOUR_WORDS 0x00000000 +#define NVB097_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_ONE_WORD 0x00000001 +#define NVB097_LAUNCH_DMA_REDUCTION_ENABLE 1:1 +#define NVB097_LAUNCH_DMA_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVB097_LAUNCH_DMA_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVB097_LAUNCH_DMA_REDUCTION_OP 15:13 +#define NVB097_LAUNCH_DMA_REDUCTION_OP_RED_ADD 0x00000000 +#define NVB097_LAUNCH_DMA_REDUCTION_OP_RED_MIN 0x00000001 +#define NVB097_LAUNCH_DMA_REDUCTION_OP_RED_MAX 0x00000002 +#define NVB097_LAUNCH_DMA_REDUCTION_OP_RED_INC 0x00000003 +#define NVB097_LAUNCH_DMA_REDUCTION_OP_RED_DEC 0x00000004 +#define NVB097_LAUNCH_DMA_REDUCTION_OP_RED_AND 0x00000005 +#define NVB097_LAUNCH_DMA_REDUCTION_OP_RED_OR 0x00000006 +#define NVB097_LAUNCH_DMA_REDUCTION_OP_RED_XOR 0x00000007 +#define NVB097_LAUNCH_DMA_REDUCTION_FORMAT 3:2 +#define NVB097_LAUNCH_DMA_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVB097_LAUNCH_DMA_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVB097_LAUNCH_DMA_SYSMEMBAR_DISABLE 6:6 +#define NVB097_LAUNCH_DMA_SYSMEMBAR_DISABLE_FALSE 0x00000000 +#define NVB097_LAUNCH_DMA_SYSMEMBAR_DISABLE_TRUE 0x00000001 + +#define NVB097_LOAD_INLINE_DATA 0x01b4 +#define NVB097_LOAD_INLINE_DATA_V 31:0 + +#define NVB097_SET_I2M_SEMAPHORE_A 0x01dc +#define NVB097_SET_I2M_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVB097_SET_I2M_SEMAPHORE_B 0x01e0 +#define NVB097_SET_I2M_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVB097_SET_I2M_SEMAPHORE_C 0x01e4 +#define NVB097_SET_I2M_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVB097_SET_I2M_SPARE_NOOP00 0x01f0 +#define NVB097_SET_I2M_SPARE_NOOP00_V 31:0 + +#define NVB097_SET_I2M_SPARE_NOOP01 0x01f4 +#define NVB097_SET_I2M_SPARE_NOOP01_V 31:0 + +#define NVB097_SET_I2M_SPARE_NOOP02 0x01f8 +#define NVB097_SET_I2M_SPARE_NOOP02_V 31:0 + +#define NVB097_SET_I2M_SPARE_NOOP03 0x01fc +#define NVB097_SET_I2M_SPARE_NOOP03_V 31:0 + +#define NVB097_RUN_DS_NOW 0x0200 +#define NVB097_RUN_DS_NOW_V 31:0 + +#define NVB097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS 0x0204 +#define NVB097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD 4:0 +#define NVB097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_INSTANTANEOUS 0x00000000 +#define NVB097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16 0x00000001 +#define NVB097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32 0x00000002 +#define NVB097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__64 0x00000003 +#define NVB097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__128 0x00000004 +#define NVB097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__256 0x00000005 +#define NVB097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__512 0x00000006 +#define NVB097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1024 0x00000007 +#define NVB097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2048 0x00000008 +#define NVB097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4096 0x00000009 +#define NVB097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__8192 0x0000000A +#define NVB097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16384 0x0000000B +#define NVB097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32768 0x0000000C +#define NVB097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__65536 0x0000000D +#define NVB097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__131072 0x0000000E +#define NVB097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__262144 0x0000000F +#define NVB097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__524288 0x00000010 +#define NVB097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1048576 0x00000011 +#define NVB097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2097152 0x00000012 +#define NVB097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4194304 0x00000013 +#define NVB097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_LATEZ_ALWAYS 0x0000001F + +#define NVB097_SET_ALIASED_LINE_WIDTH_ENABLE 0x020c +#define NVB097_SET_ALIASED_LINE_WIDTH_ENABLE_V 0:0 +#define NVB097_SET_ALIASED_LINE_WIDTH_ENABLE_V_FALSE 0x00000000 +#define NVB097_SET_ALIASED_LINE_WIDTH_ENABLE_V_TRUE 0x00000001 + +#define NVB097_SET_API_MANDATED_EARLY_Z 0x0210 +#define NVB097_SET_API_MANDATED_EARLY_Z_ENABLE 0:0 +#define NVB097_SET_API_MANDATED_EARLY_Z_ENABLE_FALSE 0x00000000 +#define NVB097_SET_API_MANDATED_EARLY_Z_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_GS_DM_FIFO 0x0214 +#define NVB097_SET_GS_DM_FIFO_SIZE_RASTER_ON 12:0 +#define NVB097_SET_GS_DM_FIFO_SIZE_RASTER_OFF 28:16 +#define NVB097_SET_GS_DM_FIFO_SPILL_ENABLED 31:31 +#define NVB097_SET_GS_DM_FIFO_SPILL_ENABLED_FALSE 0x00000000 +#define NVB097_SET_GS_DM_FIFO_SPILL_ENABLED_TRUE 0x00000001 + +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS 0x0218 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY 5:4 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVB097_INVALIDATE_SHADER_CACHES 0x021c +#define NVB097_INVALIDATE_SHADER_CACHES_INSTRUCTION 0:0 +#define NVB097_INVALIDATE_SHADER_CACHES_INSTRUCTION_FALSE 0x00000000 +#define NVB097_INVALIDATE_SHADER_CACHES_INSTRUCTION_TRUE 0x00000001 +#define NVB097_INVALIDATE_SHADER_CACHES_DATA 4:4 +#define NVB097_INVALIDATE_SHADER_CACHES_DATA_FALSE 0x00000000 +#define NVB097_INVALIDATE_SHADER_CACHES_DATA_TRUE 0x00000001 +#define NVB097_INVALIDATE_SHADER_CACHES_CONSTANT 12:12 +#define NVB097_INVALIDATE_SHADER_CACHES_CONSTANT_FALSE 0x00000000 +#define NVB097_INVALIDATE_SHADER_CACHES_CONSTANT_TRUE 0x00000001 +#define NVB097_INVALIDATE_SHADER_CACHES_LOCKS 1:1 +#define NVB097_INVALIDATE_SHADER_CACHES_LOCKS_FALSE 0x00000000 +#define NVB097_INVALIDATE_SHADER_CACHES_LOCKS_TRUE 0x00000001 +#define NVB097_INVALIDATE_SHADER_CACHES_FLUSH_DATA 2:2 +#define NVB097_INVALIDATE_SHADER_CACHES_FLUSH_DATA_FALSE 0x00000000 +#define NVB097_INVALIDATE_SHADER_CACHES_FLUSH_DATA_TRUE 0x00000001 + +#define NVB097_INCREMENT_SYNC_POINT 0x02c8 +#define NVB097_INCREMENT_SYNC_POINT_INDEX 11:0 +#define NVB097_INCREMENT_SYNC_POINT_CLEAN_L2 16:16 +#define NVB097_INCREMENT_SYNC_POINT_CLEAN_L2_FALSE 0x00000000 +#define NVB097_INCREMENT_SYNC_POINT_CLEAN_L2_TRUE 0x00000001 +#define NVB097_INCREMENT_SYNC_POINT_CONDITION 20:20 +#define NVB097_INCREMENT_SYNC_POINT_CONDITION_STREAM_OUT_WRITES_DONE 0x00000000 +#define NVB097_INCREMENT_SYNC_POINT_CONDITION_ROP_WRITES_DONE 0x00000001 + +#define NVB097_SET_PRIM_CIRCULAR_BUFFER_THROTTLE 0x02d0 +#define NVB097_SET_PRIM_CIRCULAR_BUFFER_THROTTLE_PRIM_AREA 21:0 + +#define NVB097_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE 0x02d4 +#define NVB097_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE_V 0:0 + +#define NVB097_SET_SURFACE_CLIP_ID_BLOCK_SIZE 0x02d8 +#define NVB097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH 3:0 +#define NVB097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVB097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT 7:4 +#define NVB097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVB097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVB097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVB097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVB097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVB097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVB097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH 11:8 +#define NVB097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NVB097_SET_ALPHA_CIRCULAR_BUFFER_SIZE 0x02dc +#define NVB097_SET_ALPHA_CIRCULAR_BUFFER_SIZE_CACHE_LINES_PER_SM 13:0 + +#define NVB097_DECOMPRESS_SURFACE 0x02e0 +#define NVB097_DECOMPRESS_SURFACE_MRT_SELECT 2:0 +#define NVB097_DECOMPRESS_SURFACE_RT_ARRAY_INDEX 19:4 + +#define NVB097_SET_ZCULL_ROP_BYPASS 0x02e4 +#define NVB097_SET_ZCULL_ROP_BYPASS_ENABLE 0:0 +#define NVB097_SET_ZCULL_ROP_BYPASS_ENABLE_FALSE 0x00000000 +#define NVB097_SET_ZCULL_ROP_BYPASS_ENABLE_TRUE 0x00000001 +#define NVB097_SET_ZCULL_ROP_BYPASS_NO_STALL 4:4 +#define NVB097_SET_ZCULL_ROP_BYPASS_NO_STALL_FALSE 0x00000000 +#define NVB097_SET_ZCULL_ROP_BYPASS_NO_STALL_TRUE 0x00000001 +#define NVB097_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING 8:8 +#define NVB097_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING_FALSE 0x00000000 +#define NVB097_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING_TRUE 0x00000001 +#define NVB097_SET_ZCULL_ROP_BYPASS_THRESHOLD 15:12 + +#define NVB097_SET_ZCULL_SUBREGION 0x02e8 +#define NVB097_SET_ZCULL_SUBREGION_ENABLE 0:0 +#define NVB097_SET_ZCULL_SUBREGION_ENABLE_FALSE 0x00000000 +#define NVB097_SET_ZCULL_SUBREGION_ENABLE_TRUE 0x00000001 +#define NVB097_SET_ZCULL_SUBREGION_NORMALIZED_ALIQUOTS 27:4 + +#define NVB097_SET_RASTER_BOUNDING_BOX 0x02ec +#define NVB097_SET_RASTER_BOUNDING_BOX_MODE 0:0 +#define NVB097_SET_RASTER_BOUNDING_BOX_MODE_BOUNDING_BOX 0x00000000 +#define NVB097_SET_RASTER_BOUNDING_BOX_MODE_FULL_VIEWPORT 0x00000001 +#define NVB097_SET_RASTER_BOUNDING_BOX_PAD 11:4 + +#define NVB097_PEER_SEMAPHORE_RELEASE 0x02f0 +#define NVB097_PEER_SEMAPHORE_RELEASE_V 31:0 + +#define NVB097_SET_ITERATED_BLEND_OPTIMIZATION 0x02f4 +#define NVB097_SET_ITERATED_BLEND_OPTIMIZATION_NOOP 1:0 +#define NVB097_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_NEVER 0x00000000 +#define NVB097_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_RGBA_0000 0x00000001 +#define NVB097_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_ALPHA_0 0x00000002 +#define NVB097_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_RGBA_0001 0x00000003 + +#define NVB097_SET_ZCULL_SUBREGION_ALLOCATION 0x02f8 +#define NVB097_SET_ZCULL_SUBREGION_ALLOCATION_SUBREGION_ID 7:0 +#define NVB097_SET_ZCULL_SUBREGION_ALLOCATION_ALIQUOTS 23:8 +#define NVB097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT 27:24 +#define NVB097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16X2_4X4 0x00000000 +#define NVB097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X16_4X4 0x00000001 +#define NVB097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_4X2 0x00000002 +#define NVB097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_2X4 0x00000003 +#define NVB097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X8_4X4 0x00000004 +#define NVB097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_8X8_4X2 0x00000005 +#define NVB097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_8X8_2X4 0x00000006 +#define NVB097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_4X8 0x00000007 +#define NVB097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_4X8_2X2 0x00000008 +#define NVB097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X8_4X2 0x00000009 +#define NVB097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X8_2X4 0x0000000A +#define NVB097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_8X8_2X2 0x0000000B +#define NVB097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_4X8_1X1 0x0000000C +#define NVB097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_NONE 0x0000000F + +#define NVB097_ASSIGN_ZCULL_SUBREGIONS 0x02fc +#define NVB097_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM 1:0 +#define NVB097_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM_Static 0x00000000 +#define NVB097_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM_Adaptive 0x00000001 + +#define NVB097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE 0x0300 +#define NVB097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE 0:0 +#define NVB097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_FALSE 0x00000000 +#define NVB097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_TRUE 0x00000001 +#define NVB097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE 1:1 +#define NVB097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NVB097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 + +#define NVB097_DRAW_ZERO_INDEX 0x0304 +#define NVB097_DRAW_ZERO_INDEX_COUNT 31:0 + +#define NVB097_SET_L1_CONFIGURATION 0x0308 +#define NVB097_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY 2:0 +#define NVB097_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_16KB 0x00000001 +#define NVB097_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_48KB 0x00000003 + +#define NVB097_SET_RENDER_ENABLE_CONTROL 0x030c +#define NVB097_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER 0:0 +#define NVB097_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_FALSE 0x00000000 +#define NVB097_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_TRUE 0x00000001 + +#define NVB097_SET_SPA_VERSION 0x0310 +#define NVB097_SET_SPA_VERSION_MINOR 7:0 +#define NVB097_SET_SPA_VERSION_MAJOR 15:8 + +#define NVB097_SET_IEEE_CLEAN_UPDATE 0x0314 +#define NVB097_SET_IEEE_CLEAN_UPDATE_ENABLE 0:0 +#define NVB097_SET_IEEE_CLEAN_UPDATE_ENABLE_FALSE 0x00000000 +#define NVB097_SET_IEEE_CLEAN_UPDATE_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_SNAP_GRID_LINE 0x0318 +#define NVB097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NVB097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NVB097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NVB097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NVB097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NVB097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NVB097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NVB097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NVB097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NVB097_SET_SNAP_GRID_LINE_ROUNDING_MODE 8:8 +#define NVB097_SET_SNAP_GRID_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NVB097_SET_SNAP_GRID_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NVB097_SET_SNAP_GRID_NON_LINE 0x031c +#define NVB097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NVB097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NVB097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NVB097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NVB097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NVB097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NVB097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NVB097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NVB097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NVB097_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE 8:8 +#define NVB097_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NVB097_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NVB097_SET_TESSELLATION_PARAMETERS 0x0320 +#define NVB097_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE 1:0 +#define NVB097_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_ISOLINE 0x00000000 +#define NVB097_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_TRIANGLE 0x00000001 +#define NVB097_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_QUAD 0x00000002 +#define NVB097_SET_TESSELLATION_PARAMETERS_SPACING 5:4 +#define NVB097_SET_TESSELLATION_PARAMETERS_SPACING_INTEGER 0x00000000 +#define NVB097_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_ODD 0x00000001 +#define NVB097_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_EVEN 0x00000002 +#define NVB097_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES 9:8 +#define NVB097_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_POINTS 0x00000000 +#define NVB097_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_LINES 0x00000001 +#define NVB097_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CW 0x00000002 +#define NVB097_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CCW 0x00000003 + +#define NVB097_SET_TESSELLATION_LOD_U0_OR_DENSITY 0x0324 +#define NVB097_SET_TESSELLATION_LOD_U0_OR_DENSITY_V 31:0 + +#define NVB097_SET_TESSELLATION_LOD_V0_OR_DETAIL 0x0328 +#define NVB097_SET_TESSELLATION_LOD_V0_OR_DETAIL_V 31:0 + +#define NVB097_SET_TESSELLATION_LOD_U1_OR_W0 0x032c +#define NVB097_SET_TESSELLATION_LOD_U1_OR_W0_V 31:0 + +#define NVB097_SET_TESSELLATION_LOD_V1 0x0330 +#define NVB097_SET_TESSELLATION_LOD_V1_V 31:0 + +#define NVB097_SET_TG_LOD_INTERIOR_U 0x0334 +#define NVB097_SET_TG_LOD_INTERIOR_U_V 31:0 + +#define NVB097_SET_TG_LOD_INTERIOR_V 0x0338 +#define NVB097_SET_TG_LOD_INTERIOR_V_V 31:0 + +#define NVB097_RESERVED_TG07 0x033c +#define NVB097_RESERVED_TG07_V 0:0 + +#define NVB097_RESERVED_TG08 0x0340 +#define NVB097_RESERVED_TG08_V 0:0 + +#define NVB097_RESERVED_TG09 0x0344 +#define NVB097_RESERVED_TG09_V 0:0 + +#define NVB097_RESERVED_TG10 0x0348 +#define NVB097_RESERVED_TG10_V 0:0 + +#define NVB097_RESERVED_TG11 0x034c +#define NVB097_RESERVED_TG11_V 0:0 + +#define NVB097_RESERVED_TG12 0x0350 +#define NVB097_RESERVED_TG12_V 0:0 + +#define NVB097_RESERVED_TG13 0x0354 +#define NVB097_RESERVED_TG13_V 0:0 + +#define NVB097_RESERVED_TG14 0x0358 +#define NVB097_RESERVED_TG14_V 0:0 + +#define NVB097_RESERVED_TG15 0x035c +#define NVB097_RESERVED_TG15_V 0:0 + +#define NVB097_SET_SUBTILING_PERF_KNOB_A 0x0360 +#define NVB097_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_REGISTER_FILE_PER_SUBTILE 7:0 +#define NVB097_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_PIXEL_OUTPUT_BUFFER_PER_SUBTILE 15:8 +#define NVB097_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_TRIANGLE_RAM_PER_SUBTILE 23:16 +#define NVB097_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_MAX_QUADS_PER_SUBTILE 31:24 + +#define NVB097_SET_SUBTILING_PERF_KNOB_B 0x0364 +#define NVB097_SET_SUBTILING_PERF_KNOB_B_FRACTION_OF_MAX_PRIMITIVES_PER_SUBTILE 7:0 + +#define NVB097_SET_SUBTILING_PERF_KNOB_C 0x0368 +#define NVB097_SET_SUBTILING_PERF_KNOB_C_RESERVED 0:0 + +#define NVB097_SET_ZCULL_SUBREGION_TO_REPORT 0x036c +#define NVB097_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE 0:0 +#define NVB097_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE_FALSE 0x00000000 +#define NVB097_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE_TRUE 0x00000001 +#define NVB097_SET_ZCULL_SUBREGION_TO_REPORT_SUBREGION_ID 11:4 + +#define NVB097_SET_ZCULL_SUBREGION_REPORT_TYPE 0x0370 +#define NVB097_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE 0:0 +#define NVB097_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE_FALSE 0x00000000 +#define NVB097_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE_TRUE 0x00000001 +#define NVB097_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE 6:4 +#define NVB097_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST 0x00000000 +#define NVB097_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST_NO_ACCEPT 0x00000001 +#define NVB097_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST_LATE_Z 0x00000002 +#define NVB097_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_STENCIL_TEST 0x00000003 + +#define NVB097_SET_BALANCED_PRIMITIVE_WORKLOAD 0x0374 +#define NVB097_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE 0:0 +#define NVB097_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE_FALSE 0x00000000 +#define NVB097_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE_TRUE 0x00000001 +#define NVB097_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE 4:4 +#define NVB097_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE_FALSE 0x00000000 +#define NVB097_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE_TRUE 0x00000001 + +#define NVB097_SET_MAX_PATCHES_PER_BATCH 0x0378 +#define NVB097_SET_MAX_PATCHES_PER_BATCH_V 5:0 + +#define NVB097_SET_RASTER_ENABLE 0x037c +#define NVB097_SET_RASTER_ENABLE_V 0:0 +#define NVB097_SET_RASTER_ENABLE_V_FALSE 0x00000000 +#define NVB097_SET_RASTER_ENABLE_V_TRUE 0x00000001 + +#define NVB097_SET_STREAM_OUT_BUFFER_ENABLE(j) (0x0380+(j)*32) +#define NVB097_SET_STREAM_OUT_BUFFER_ENABLE_V 0:0 +#define NVB097_SET_STREAM_OUT_BUFFER_ENABLE_V_FALSE 0x00000000 +#define NVB097_SET_STREAM_OUT_BUFFER_ENABLE_V_TRUE 0x00000001 + +#define NVB097_SET_STREAM_OUT_BUFFER_ADDRESS_A(j) (0x0384+(j)*32) +#define NVB097_SET_STREAM_OUT_BUFFER_ADDRESS_A_UPPER 7:0 + +#define NVB097_SET_STREAM_OUT_BUFFER_ADDRESS_B(j) (0x0388+(j)*32) +#define NVB097_SET_STREAM_OUT_BUFFER_ADDRESS_B_LOWER 31:0 + +#define NVB097_SET_STREAM_OUT_BUFFER_SIZE(j) (0x038c+(j)*32) +#define NVB097_SET_STREAM_OUT_BUFFER_SIZE_BYTES 31:0 + +#define NVB097_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER(j) (0x0390+(j)*32) +#define NVB097_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER_START_OFFSET 31:0 + +#define NVB097_SET_STREAM_OUT_CONTROL_STREAM(j) (0x0700+(j)*16) +#define NVB097_SET_STREAM_OUT_CONTROL_STREAM_SELECT 1:0 + +#define NVB097_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT(j) (0x0704+(j)*16) +#define NVB097_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT_MAX 7:0 + +#define NVB097_SET_STREAM_OUT_CONTROL_STRIDE(j) (0x0708+(j)*16) +#define NVB097_SET_STREAM_OUT_CONTROL_STRIDE_BYTES 31:0 + +#define NVB097_SET_RASTER_INPUT 0x0740 +#define NVB097_SET_RASTER_INPUT_STREAM_SELECT 1:0 + +#define NVB097_SET_STREAM_OUTPUT 0x0744 +#define NVB097_SET_STREAM_OUTPUT_ENABLE 0:0 +#define NVB097_SET_STREAM_OUTPUT_ENABLE_FALSE 0x00000000 +#define NVB097_SET_STREAM_OUTPUT_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE 0x0748 +#define NVB097_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE 0:0 +#define NVB097_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_FALSE 0x00000000 +#define NVB097_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_ALPHA_FRACTION 0x074c +#define NVB097_SET_ALPHA_FRACTION_V 7:0 + +#define NVB097_SET_HYBRID_ANTI_ALIAS_CONTROL 0x0754 +#define NVB097_SET_HYBRID_ANTI_ALIAS_CONTROL_PASSES 3:0 +#define NVB097_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID 4:4 +#define NVB097_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_FRAGMENT 0x00000000 +#define NVB097_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_PASS 0x00000001 + +#define NVB097_SET_SHADER_LOCAL_MEMORY_WINDOW 0x077c +#define NVB097_SET_SHADER_LOCAL_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NVB097_SET_SHADER_LOCAL_MEMORY_A 0x0790 +#define NVB097_SET_SHADER_LOCAL_MEMORY_A_ADDRESS_UPPER 7:0 + +#define NVB097_SET_SHADER_LOCAL_MEMORY_B 0x0794 +#define NVB097_SET_SHADER_LOCAL_MEMORY_B_ADDRESS_LOWER 31:0 + +#define NVB097_SET_SHADER_LOCAL_MEMORY_C 0x0798 +#define NVB097_SET_SHADER_LOCAL_MEMORY_C_SIZE_UPPER 5:0 + +#define NVB097_SET_SHADER_LOCAL_MEMORY_D 0x079c +#define NVB097_SET_SHADER_LOCAL_MEMORY_D_SIZE_LOWER 31:0 + +#define NVB097_SET_SHADER_LOCAL_MEMORY_E 0x07a0 +#define NVB097_SET_SHADER_LOCAL_MEMORY_E_DEFAULT_SIZE_PER_WARP 25:0 + +#define NVB097_SET_COLOR_ZERO_BANDWIDTH_CLEAR 0x07a4 +#define NVB097_SET_COLOR_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVB097_SET_Z_ZERO_BANDWIDTH_CLEAR 0x07a8 +#define NVB097_SET_Z_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVB097_SET_ISBE_SAVE_RESTORE_PROGRAM 0x07ac +#define NVB097_SET_ISBE_SAVE_RESTORE_PROGRAM_OFFSET 31:0 + +#define NVB097_SET_ZCULL_REGION_SIZE_A 0x07c0 +#define NVB097_SET_ZCULL_REGION_SIZE_A_WIDTH 15:0 + +#define NVB097_SET_ZCULL_REGION_SIZE_B 0x07c4 +#define NVB097_SET_ZCULL_REGION_SIZE_B_HEIGHT 15:0 + +#define NVB097_SET_ZCULL_REGION_SIZE_C 0x07c8 +#define NVB097_SET_ZCULL_REGION_SIZE_C_DEPTH 15:0 + +#define NVB097_SET_ZCULL_REGION_PIXEL_OFFSET_C 0x07cc +#define NVB097_SET_ZCULL_REGION_PIXEL_OFFSET_C_DEPTH 15:0 + +#define NVB097_SET_CULL_BEFORE_FETCH 0x07dc +#define NVB097_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE 0:0 +#define NVB097_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_FALSE 0x00000000 +#define NVB097_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_TRUE 0x00000001 + +#define NVB097_SET_ZCULL_REGION_LOCATION 0x07e0 +#define NVB097_SET_ZCULL_REGION_LOCATION_START_ALIQUOT 15:0 +#define NVB097_SET_ZCULL_REGION_LOCATION_ALIQUOT_COUNT 31:16 + +#define NVB097_SET_ZCULL_REGION_ALIQUOTS 0x07e4 +#define NVB097_SET_ZCULL_REGION_ALIQUOTS_PER_LAYER 15:0 + +#define NVB097_SET_ZCULL_STORAGE_A 0x07e8 +#define NVB097_SET_ZCULL_STORAGE_A_ADDRESS_UPPER 7:0 + +#define NVB097_SET_ZCULL_STORAGE_B 0x07ec +#define NVB097_SET_ZCULL_STORAGE_B_ADDRESS_LOWER 31:0 + +#define NVB097_SET_ZCULL_STORAGE_C 0x07f0 +#define NVB097_SET_ZCULL_STORAGE_C_LIMIT_ADDRESS_UPPER 7:0 + +#define NVB097_SET_ZCULL_STORAGE_D 0x07f4 +#define NVB097_SET_ZCULL_STORAGE_D_LIMIT_ADDRESS_LOWER 31:0 + +#define NVB097_SET_ZT_READ_ONLY 0x07f8 +#define NVB097_SET_ZT_READ_ONLY_ENABLE_Z 0:0 +#define NVB097_SET_ZT_READ_ONLY_ENABLE_Z_FALSE 0x00000000 +#define NVB097_SET_ZT_READ_ONLY_ENABLE_Z_TRUE 0x00000001 +#define NVB097_SET_ZT_READ_ONLY_ENABLE_STENCIL 4:4 +#define NVB097_SET_ZT_READ_ONLY_ENABLE_STENCIL_FALSE 0x00000000 +#define NVB097_SET_ZT_READ_ONLY_ENABLE_STENCIL_TRUE 0x00000001 + +#define NVB097_SET_COLOR_TARGET_A(j) (0x0800+(j)*64) +#define NVB097_SET_COLOR_TARGET_A_OFFSET_UPPER 7:0 + +#define NVB097_SET_COLOR_TARGET_B(j) (0x0804+(j)*64) +#define NVB097_SET_COLOR_TARGET_B_OFFSET_LOWER 31:0 + +#define NVB097_SET_COLOR_TARGET_WIDTH(j) (0x0808+(j)*64) +#define NVB097_SET_COLOR_TARGET_WIDTH_V 27:0 + +#define NVB097_SET_COLOR_TARGET_HEIGHT(j) (0x080c+(j)*64) +#define NVB097_SET_COLOR_TARGET_HEIGHT_V 16:0 + +#define NVB097_SET_COLOR_TARGET_FORMAT(j) (0x0810+(j)*64) +#define NVB097_SET_COLOR_TARGET_FORMAT_V 7:0 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_DISABLED 0x00000000 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_AF32 0x000000C0 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_AS32 0x000000C1 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_AU32 0x000000C2 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_X32 0x000000C3 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_X32 0x000000C4 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_X32 0x000000C5 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_R16_G16_B16_A16 0x000000C6 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RN16_GN16_BN16_AN16 0x000000C7 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RS16_GS16_BS16_AS16 0x000000C8 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RU16_GU16_BU16_AU16 0x000000C9 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_AF16 0x000000CA +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RF32_GF32 0x000000CB +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RS32_GS32 0x000000CC +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RU32_GU32 0x000000CD +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_X16 0x000000CE +#define NVB097_SET_COLOR_TARGET_FORMAT_V_A8R8G8B8 0x000000CF +#define NVB097_SET_COLOR_TARGET_FORMAT_V_A8RL8GL8BL8 0x000000D0 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_A2B10G10R10 0x000000D1 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_AU2BU10GU10RU10 0x000000D2 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_A8B8G8R8 0x000000D5 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_A8BL8GL8RL8 0x000000D6 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_AN8BN8GN8RN8 0x000000D7 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_AS8BS8GS8RS8 0x000000D8 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_AU8BU8GU8RU8 0x000000D9 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_R16_G16 0x000000DA +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RN16_GN16 0x000000DB +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RS16_GS16 0x000000DC +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RU16_GU16 0x000000DD +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RF16_GF16 0x000000DE +#define NVB097_SET_COLOR_TARGET_FORMAT_V_A2R10G10B10 0x000000DF +#define NVB097_SET_COLOR_TARGET_FORMAT_V_BF10GF11RF11 0x000000E0 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RS32 0x000000E3 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RU32 0x000000E4 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RF32 0x000000E5 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_X8R8G8B8 0x000000E6 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_X8RL8GL8BL8 0x000000E7 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_R5G6B5 0x000000E8 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_A1R5G5B5 0x000000E9 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_G8R8 0x000000EA +#define NVB097_SET_COLOR_TARGET_FORMAT_V_GN8RN8 0x000000EB +#define NVB097_SET_COLOR_TARGET_FORMAT_V_GS8RS8 0x000000EC +#define NVB097_SET_COLOR_TARGET_FORMAT_V_GU8RU8 0x000000ED +#define NVB097_SET_COLOR_TARGET_FORMAT_V_R16 0x000000EE +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RN16 0x000000EF +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RS16 0x000000F0 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RU16 0x000000F1 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RF16 0x000000F2 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_R8 0x000000F3 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RN8 0x000000F4 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RS8 0x000000F5 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RU8 0x000000F6 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_A8 0x000000F7 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_X1R5G5B5 0x000000F8 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_X8B8G8R8 0x000000F9 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_X8BL8GL8RL8 0x000000FA +#define NVB097_SET_COLOR_TARGET_FORMAT_V_Z1R5G5B5 0x000000FB +#define NVB097_SET_COLOR_TARGET_FORMAT_V_O1R5G5B5 0x000000FC +#define NVB097_SET_COLOR_TARGET_FORMAT_V_Z8R8G8B8 0x000000FD +#define NVB097_SET_COLOR_TARGET_FORMAT_V_O8R8G8B8 0x000000FE +#define NVB097_SET_COLOR_TARGET_FORMAT_V_R32 0x000000FF +#define NVB097_SET_COLOR_TARGET_FORMAT_V_A16 0x00000040 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_AF16 0x00000041 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_AF32 0x00000042 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_A8R8 0x00000043 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_R16_A16 0x00000044 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RF16_AF16 0x00000045 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_RF32_AF32 0x00000046 +#define NVB097_SET_COLOR_TARGET_FORMAT_V_B8G8R8A8 0x00000047 + +#define NVB097_SET_COLOR_TARGET_MEMORY(j) (0x0814+(j)*64) +#define NVB097_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH 3:0 +#define NVB097_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH_ONE_GOB 0x00000000 +#define NVB097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT 7:4 +#define NVB097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_ONE_GOB 0x00000000 +#define NVB097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_TWO_GOBS 0x00000001 +#define NVB097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_FOUR_GOBS 0x00000002 +#define NVB097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVB097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVB097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVB097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH 11:8 +#define NVB097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_ONE_GOB 0x00000000 +#define NVB097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_TWO_GOBS 0x00000001 +#define NVB097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_FOUR_GOBS 0x00000002 +#define NVB097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_EIGHT_GOBS 0x00000003 +#define NVB097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVB097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_THIRTYTWO_GOBS 0x00000005 +#define NVB097_SET_COLOR_TARGET_MEMORY_LAYOUT 12:12 +#define NVB097_SET_COLOR_TARGET_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVB097_SET_COLOR_TARGET_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVB097_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL 16:16 +#define NVB097_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NVB097_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_DEPTH_SIZE 0x00000001 + +#define NVB097_SET_COLOR_TARGET_THIRD_DIMENSION(j) (0x0818+(j)*64) +#define NVB097_SET_COLOR_TARGET_THIRD_DIMENSION_V 27:0 + +#define NVB097_SET_COLOR_TARGET_ARRAY_PITCH(j) (0x081c+(j)*64) +#define NVB097_SET_COLOR_TARGET_ARRAY_PITCH_V 31:0 + +#define NVB097_SET_COLOR_TARGET_LAYER(j) (0x0820+(j)*64) +#define NVB097_SET_COLOR_TARGET_LAYER_OFFSET 15:0 + +#define NVB097_SET_COLOR_TARGET_MARK(j) (0x0824+(j)*64) +#define NVB097_SET_COLOR_TARGET_MARK_IEEE_CLEAN 0:0 +#define NVB097_SET_COLOR_TARGET_MARK_IEEE_CLEAN_FALSE 0x00000000 +#define NVB097_SET_COLOR_TARGET_MARK_IEEE_CLEAN_TRUE 0x00000001 + +#define NVB097_SET_VIEWPORT_SCALE_X(j) (0x0a00+(j)*32) +#define NVB097_SET_VIEWPORT_SCALE_X_V 31:0 + +#define NVB097_SET_VIEWPORT_SCALE_Y(j) (0x0a04+(j)*32) +#define NVB097_SET_VIEWPORT_SCALE_Y_V 31:0 + +#define NVB097_SET_VIEWPORT_SCALE_Z(j) (0x0a08+(j)*32) +#define NVB097_SET_VIEWPORT_SCALE_Z_V 31:0 + +#define NVB097_SET_VIEWPORT_OFFSET_X(j) (0x0a0c+(j)*32) +#define NVB097_SET_VIEWPORT_OFFSET_X_V 31:0 + +#define NVB097_SET_VIEWPORT_OFFSET_Y(j) (0x0a10+(j)*32) +#define NVB097_SET_VIEWPORT_OFFSET_Y_V 31:0 + +#define NVB097_SET_VIEWPORT_OFFSET_Z(j) (0x0a14+(j)*32) +#define NVB097_SET_VIEWPORT_OFFSET_Z_V 31:0 + +#define NVB097_SET_VIEWPORT_CLIP_HORIZONTAL(j) (0x0c00+(j)*16) +#define NVB097_SET_VIEWPORT_CLIP_HORIZONTAL_X0 15:0 +#define NVB097_SET_VIEWPORT_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NVB097_SET_VIEWPORT_CLIP_VERTICAL(j) (0x0c04+(j)*16) +#define NVB097_SET_VIEWPORT_CLIP_VERTICAL_Y0 15:0 +#define NVB097_SET_VIEWPORT_CLIP_VERTICAL_HEIGHT 31:16 + +#define NVB097_SET_VIEWPORT_CLIP_MIN_Z(j) (0x0c08+(j)*16) +#define NVB097_SET_VIEWPORT_CLIP_MIN_Z_V 31:0 + +#define NVB097_SET_VIEWPORT_CLIP_MAX_Z(j) (0x0c0c+(j)*16) +#define NVB097_SET_VIEWPORT_CLIP_MAX_Z_V 31:0 + +#define NVB097_SET_WINDOW_CLIP_HORIZONTAL(j) (0x0d00+(j)*8) +#define NVB097_SET_WINDOW_CLIP_HORIZONTAL_XMIN 15:0 +#define NVB097_SET_WINDOW_CLIP_HORIZONTAL_XMAX 31:16 + +#define NVB097_SET_WINDOW_CLIP_VERTICAL(j) (0x0d04+(j)*8) +#define NVB097_SET_WINDOW_CLIP_VERTICAL_YMIN 15:0 +#define NVB097_SET_WINDOW_CLIP_VERTICAL_YMAX 31:16 + +#define NVB097_SET_CLIP_ID_EXTENT_X(j) (0x0d40+(j)*8) +#define NVB097_SET_CLIP_ID_EXTENT_X_MINX 15:0 +#define NVB097_SET_CLIP_ID_EXTENT_X_WIDTH 31:16 + +#define NVB097_SET_CLIP_ID_EXTENT_Y(j) (0x0d44+(j)*8) +#define NVB097_SET_CLIP_ID_EXTENT_Y_MINY 15:0 +#define NVB097_SET_CLIP_ID_EXTENT_Y_HEIGHT 31:16 + +#define NVB097_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK 0x0d60 +#define NVB097_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK_V 10:0 + +#define NVB097_SET_API_VISIBLE_CALL_LIMIT 0x0d64 +#define NVB097_SET_API_VISIBLE_CALL_LIMIT_V 3:0 +#define NVB097_SET_API_VISIBLE_CALL_LIMIT_V__0 0x00000000 +#define NVB097_SET_API_VISIBLE_CALL_LIMIT_V__1 0x00000001 +#define NVB097_SET_API_VISIBLE_CALL_LIMIT_V__2 0x00000002 +#define NVB097_SET_API_VISIBLE_CALL_LIMIT_V__4 0x00000003 +#define NVB097_SET_API_VISIBLE_CALL_LIMIT_V__8 0x00000004 +#define NVB097_SET_API_VISIBLE_CALL_LIMIT_V__16 0x00000005 +#define NVB097_SET_API_VISIBLE_CALL_LIMIT_V__32 0x00000006 +#define NVB097_SET_API_VISIBLE_CALL_LIMIT_V__64 0x00000007 +#define NVB097_SET_API_VISIBLE_CALL_LIMIT_V__128 0x00000008 +#define NVB097_SET_API_VISIBLE_CALL_LIMIT_V_NO_CHECK 0x0000000F + +#define NVB097_SET_STATISTICS_COUNTER 0x0d68 +#define NVB097_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE 0:0 +#define NVB097_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVB097_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVB097_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE 1:1 +#define NVB097_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVB097_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVB097_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE 2:2 +#define NVB097_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVB097_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVB097_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE 3:3 +#define NVB097_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVB097_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVB097_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE 4:4 +#define NVB097_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVB097_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVB097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE 5:5 +#define NVB097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NVB097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NVB097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE 6:6 +#define NVB097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_FALSE 0x00000000 +#define NVB097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_TRUE 0x00000001 +#define NVB097_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE 7:7 +#define NVB097_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVB097_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVB097_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE 8:8 +#define NVB097_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVB097_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVB097_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE 9:9 +#define NVB097_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVB097_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVB097_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE 11:11 +#define NVB097_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVB097_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVB097_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE 12:12 +#define NVB097_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVB097_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVB097_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE 13:13 +#define NVB097_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVB097_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVB097_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE 14:14 +#define NVB097_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NVB097_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NVB097_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE 10:10 +#define NVB097_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_FALSE 0x00000000 +#define NVB097_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_TRUE 0x00000001 +#define NVB097_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE 15:15 +#define NVB097_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_FALSE 0x00000000 +#define NVB097_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_CLEAR_RECT_HORIZONTAL 0x0d6c +#define NVB097_SET_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NVB097_SET_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NVB097_SET_CLEAR_RECT_VERTICAL 0x0d70 +#define NVB097_SET_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NVB097_SET_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NVB097_SET_VERTEX_ARRAY_START 0x0d74 +#define NVB097_SET_VERTEX_ARRAY_START_V 31:0 + +#define NVB097_DRAW_VERTEX_ARRAY 0x0d78 +#define NVB097_DRAW_VERTEX_ARRAY_COUNT 31:0 + +#define NVB097_SET_VIEWPORT_Z_CLIP 0x0d7c +#define NVB097_SET_VIEWPORT_Z_CLIP_RANGE 0:0 +#define NVB097_SET_VIEWPORT_Z_CLIP_RANGE_NEGATIVE_W_TO_POSITIVE_W 0x00000000 +#define NVB097_SET_VIEWPORT_Z_CLIP_RANGE_ZERO_TO_POSITIVE_W 0x00000001 + +#define NVB097_SET_COLOR_CLEAR_VALUE(i) (0x0d80+(i)*4) +#define NVB097_SET_COLOR_CLEAR_VALUE_V 31:0 + +#define NVB097_SET_Z_CLEAR_VALUE 0x0d90 +#define NVB097_SET_Z_CLEAR_VALUE_V 31:0 + +#define NVB097_SET_SHADER_CACHE_CONTROL 0x0d94 +#define NVB097_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE 0:0 +#define NVB097_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_FALSE 0x00000000 +#define NVB097_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_TRUE 0x00000001 + +#define NVB097_FORCE_TRANSITION_TO_BETA 0x0d98 +#define NVB097_FORCE_TRANSITION_TO_BETA_V 0:0 + +#define NVB097_SET_REDUCE_COLOR_THRESHOLDS_ENABLE 0x0d9c +#define NVB097_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V 0:0 +#define NVB097_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_FALSE 0x00000000 +#define NVB097_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_TRUE 0x00000001 + +#define NVB097_SET_STENCIL_CLEAR_VALUE 0x0da0 +#define NVB097_SET_STENCIL_CLEAR_VALUE_V 7:0 + +#define NVB097_INVALIDATE_SHADER_CACHES_NO_WFI 0x0da4 +#define NVB097_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION 0:0 +#define NVB097_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_FALSE 0x00000000 +#define NVB097_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_TRUE 0x00000001 +#define NVB097_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA 4:4 +#define NVB097_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_FALSE 0x00000000 +#define NVB097_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_TRUE 0x00000001 +#define NVB097_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT 12:12 +#define NVB097_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_FALSE 0x00000000 +#define NVB097_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_TRUE 0x00000001 + +#define NVB097_SET_ZCULL_SERIALIZATION 0x0da8 +#define NVB097_SET_ZCULL_SERIALIZATION_ENABLE 0:0 +#define NVB097_SET_ZCULL_SERIALIZATION_ENABLE_FALSE 0x00000000 +#define NVB097_SET_ZCULL_SERIALIZATION_ENABLE_TRUE 0x00000001 +#define NVB097_SET_ZCULL_SERIALIZATION_APPLIED 5:4 +#define NVB097_SET_ZCULL_SERIALIZATION_APPLIED_ALWAYS 0x00000000 +#define NVB097_SET_ZCULL_SERIALIZATION_APPLIED_LATE_Z 0x00000001 +#define NVB097_SET_ZCULL_SERIALIZATION_APPLIED_OUT_OF_GAMUT_Z 0x00000002 +#define NVB097_SET_ZCULL_SERIALIZATION_APPLIED_LATE_Z_OR_OUT_OF_GAMUT_Z 0x00000003 + +#define NVB097_SET_FRONT_POLYGON_MODE 0x0dac +#define NVB097_SET_FRONT_POLYGON_MODE_V 31:0 +#define NVB097_SET_FRONT_POLYGON_MODE_V_POINT 0x00001B00 +#define NVB097_SET_FRONT_POLYGON_MODE_V_LINE 0x00001B01 +#define NVB097_SET_FRONT_POLYGON_MODE_V_FILL 0x00001B02 + +#define NVB097_SET_BACK_POLYGON_MODE 0x0db0 +#define NVB097_SET_BACK_POLYGON_MODE_V 31:0 +#define NVB097_SET_BACK_POLYGON_MODE_V_POINT 0x00001B00 +#define NVB097_SET_BACK_POLYGON_MODE_V_LINE 0x00001B01 +#define NVB097_SET_BACK_POLYGON_MODE_V_FILL 0x00001B02 + +#define NVB097_SET_POLY_SMOOTH 0x0db4 +#define NVB097_SET_POLY_SMOOTH_ENABLE 0:0 +#define NVB097_SET_POLY_SMOOTH_ENABLE_FALSE 0x00000000 +#define NVB097_SET_POLY_SMOOTH_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_ZT_MARK 0x0db8 +#define NVB097_SET_ZT_MARK_IEEE_CLEAN 0:0 +#define NVB097_SET_ZT_MARK_IEEE_CLEAN_FALSE 0x00000000 +#define NVB097_SET_ZT_MARK_IEEE_CLEAN_TRUE 0x00000001 + +#define NVB097_SET_ZCULL_DIR_FORMAT 0x0dbc +#define NVB097_SET_ZCULL_DIR_FORMAT_ZDIR 15:0 +#define NVB097_SET_ZCULL_DIR_FORMAT_ZDIR_LESS 0x00000000 +#define NVB097_SET_ZCULL_DIR_FORMAT_ZDIR_GREATER 0x00000001 +#define NVB097_SET_ZCULL_DIR_FORMAT_ZFORMAT 31:16 +#define NVB097_SET_ZCULL_DIR_FORMAT_ZFORMAT_MSB 0x00000000 +#define NVB097_SET_ZCULL_DIR_FORMAT_ZFORMAT_FP 0x00000001 +#define NVB097_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZTRICK 0x00000002 +#define NVB097_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZF32_1 0x00000003 + +#define NVB097_SET_POLY_OFFSET_POINT 0x0dc0 +#define NVB097_SET_POLY_OFFSET_POINT_ENABLE 0:0 +#define NVB097_SET_POLY_OFFSET_POINT_ENABLE_FALSE 0x00000000 +#define NVB097_SET_POLY_OFFSET_POINT_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_POLY_OFFSET_LINE 0x0dc4 +#define NVB097_SET_POLY_OFFSET_LINE_ENABLE 0:0 +#define NVB097_SET_POLY_OFFSET_LINE_ENABLE_FALSE 0x00000000 +#define NVB097_SET_POLY_OFFSET_LINE_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_POLY_OFFSET_FILL 0x0dc8 +#define NVB097_SET_POLY_OFFSET_FILL_ENABLE 0:0 +#define NVB097_SET_POLY_OFFSET_FILL_ENABLE_FALSE 0x00000000 +#define NVB097_SET_POLY_OFFSET_FILL_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_PATCH 0x0dcc +#define NVB097_SET_PATCH_SIZE 7:0 + +#define NVB097_SET_ITERATED_BLEND 0x0dd0 +#define NVB097_SET_ITERATED_BLEND_ENABLE 0:0 +#define NVB097_SET_ITERATED_BLEND_ENABLE_FALSE 0x00000000 +#define NVB097_SET_ITERATED_BLEND_ENABLE_TRUE 0x00000001 +#define NVB097_SET_ITERATED_BLEND_ALPHA_ENABLE 1:1 +#define NVB097_SET_ITERATED_BLEND_ALPHA_ENABLE_FALSE 0x00000000 +#define NVB097_SET_ITERATED_BLEND_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_ITERATED_BLEND_PASS 0x0dd4 +#define NVB097_SET_ITERATED_BLEND_PASS_COUNT 7:0 + +#define NVB097_SET_ZCULL_CRITERION 0x0dd8 +#define NVB097_SET_ZCULL_CRITERION_SFUNC 7:0 +#define NVB097_SET_ZCULL_CRITERION_SFUNC_NEVER 0x00000000 +#define NVB097_SET_ZCULL_CRITERION_SFUNC_LESS 0x00000001 +#define NVB097_SET_ZCULL_CRITERION_SFUNC_EQUAL 0x00000002 +#define NVB097_SET_ZCULL_CRITERION_SFUNC_LEQUAL 0x00000003 +#define NVB097_SET_ZCULL_CRITERION_SFUNC_GREATER 0x00000004 +#define NVB097_SET_ZCULL_CRITERION_SFUNC_NOTEQUAL 0x00000005 +#define NVB097_SET_ZCULL_CRITERION_SFUNC_GEQUAL 0x00000006 +#define NVB097_SET_ZCULL_CRITERION_SFUNC_ALWAYS 0x00000007 +#define NVB097_SET_ZCULL_CRITERION_NO_INVALIDATE 8:8 +#define NVB097_SET_ZCULL_CRITERION_NO_INVALIDATE_FALSE 0x00000000 +#define NVB097_SET_ZCULL_CRITERION_NO_INVALIDATE_TRUE 0x00000001 +#define NVB097_SET_ZCULL_CRITERION_FORCE_MATCH 9:9 +#define NVB097_SET_ZCULL_CRITERION_FORCE_MATCH_FALSE 0x00000000 +#define NVB097_SET_ZCULL_CRITERION_FORCE_MATCH_TRUE 0x00000001 +#define NVB097_SET_ZCULL_CRITERION_SREF 23:16 +#define NVB097_SET_ZCULL_CRITERION_SMASK 31:24 + +#define NVB097_PIXEL_SHADER_BARRIER 0x0de0 +#define NVB097_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE 0:0 +#define NVB097_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE_FALSE 0x00000000 +#define NVB097_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_SM_TIMEOUT_INTERVAL 0x0de4 +#define NVB097_SET_SM_TIMEOUT_INTERVAL_COUNTER_BIT 5:0 + +#define NVB097_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY 0x0de8 +#define NVB097_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE 0:0 +#define NVB097_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_FALSE 0x00000000 +#define NVB097_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_TRUE 0x00000001 + +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_POINTER 0x0df0 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_POINTER_V 7:0 + +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION 0x0df4 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC 2:0 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_FALSE 0x00000000 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_TRUE 0x00000001 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_EQ 0x00000002 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_NE 0x00000003 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_LT 0x00000004 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_LE 0x00000005 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_GT 0x00000006 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_GE 0x00000007 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION 5:3 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_ADD_PRODUCTS 0x00000000 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_SUB_PRODUCTS 0x00000001 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_MIN 0x00000002 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_MAX 0x00000003 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_RCP 0x00000004 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_ADD 0x00000005 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_SUBTRACT 0x00000006 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT 8:6 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT0 0x00000000 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT1 0x00000001 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT2 0x00000002 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT3 0x00000003 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT4 0x00000004 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT5 0x00000005 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT6 0x00000006 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT7 0x00000007 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT 11:9 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_SRC_RGB 0x00000000 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_DEST_RGB 0x00000001 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_SRC_AAA 0x00000002 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_DEST_AAA 0x00000003 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP0_RGB 0x00000004 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP1_RGB 0x00000005 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP2_RGB 0x00000006 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_PBR_RGB 0x00000007 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT 15:12 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ZERO 0x00000000 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE 0x00000001 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_SRC_RGB 0x00000002 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_SRC_AAA 0x00000003 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE_MINUS_SRC_AAA 0x00000004 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_DEST_RGB 0x00000005 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_DEST_AAA 0x00000006 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE_MINUS_DEST_AAA 0x00000007 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP0_RGB 0x00000009 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP1_RGB 0x0000000A +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP2_RGB 0x0000000B +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_PBR_RGB 0x0000000C +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_CONSTANT_RGB 0x0000000D +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ZERO_A_TIMES_B 0x0000000E +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT 18:16 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_SRC_RGB 0x00000000 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_DEST_RGB 0x00000001 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_SRC_AAA 0x00000002 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_DEST_AAA 0x00000003 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP0_RGB 0x00000004 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP1_RGB 0x00000005 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP2_RGB 0x00000006 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_PBR_RGB 0x00000007 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT 22:19 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ZERO 0x00000000 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE 0x00000001 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_SRC_RGB 0x00000002 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_SRC_AAA 0x00000003 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE_MINUS_SRC_AAA 0x00000004 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_DEST_RGB 0x00000005 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_DEST_AAA 0x00000006 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE_MINUS_DEST_AAA 0x00000007 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP0_RGB 0x00000009 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP1_RGB 0x0000000A +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP2_RGB 0x0000000B +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_PBR_RGB 0x0000000C +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_CONSTANT_RGB 0x0000000D +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ZERO_C_TIMES_D 0x0000000E +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE 25:23 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_RGB 0x00000000 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_GBR 0x00000001 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_RRR 0x00000002 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_GGG 0x00000003 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_BBB 0x00000004 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_R_TO_A 0x00000005 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK 27:26 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_RGB 0x00000000 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_R_ONLY 0x00000001 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_G_ONLY 0x00000002 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_B_ONLY 0x00000003 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT 29:28 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP0 0x00000000 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP1 0x00000001 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP2 0x00000002 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_NONE 0x00000003 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC 31:31 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC_FALSE 0x00000000 +#define NVB097_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC_TRUE 0x00000001 + +#define NVB097_SET_WINDOW_OFFSET_X 0x0df8 +#define NVB097_SET_WINDOW_OFFSET_X_V 16:0 + +#define NVB097_SET_WINDOW_OFFSET_Y 0x0dfc +#define NVB097_SET_WINDOW_OFFSET_Y_V 17:0 + +#define NVB097_SET_SCISSOR_ENABLE(j) (0x0e00+(j)*16) +#define NVB097_SET_SCISSOR_ENABLE_V 0:0 +#define NVB097_SET_SCISSOR_ENABLE_V_FALSE 0x00000000 +#define NVB097_SET_SCISSOR_ENABLE_V_TRUE 0x00000001 + +#define NVB097_SET_SCISSOR_HORIZONTAL(j) (0x0e04+(j)*16) +#define NVB097_SET_SCISSOR_HORIZONTAL_XMIN 15:0 +#define NVB097_SET_SCISSOR_HORIZONTAL_XMAX 31:16 + +#define NVB097_SET_SCISSOR_VERTICAL(j) (0x0e08+(j)*16) +#define NVB097_SET_SCISSOR_VERTICAL_YMIN 15:0 +#define NVB097_SET_SCISSOR_VERTICAL_YMAX 31:16 + +#define NVB097_SET_SELECT_MAXWELL_TEXTURE_HEADERS 0x0f10 +#define NVB097_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V 0:0 +#define NVB097_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V_FALSE 0x00000000 +#define NVB097_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V_TRUE 0x00000001 + +#define NVB097_SET_VPC_PERF_KNOB 0x0f14 +#define NVB097_SET_VPC_PERF_KNOB_CULLED_SMALL_LINES 7:0 +#define NVB097_SET_VPC_PERF_KNOB_CULLED_SMALL_TRIANGLES 15:8 +#define NVB097_SET_VPC_PERF_KNOB_NONCULLED_LINES_AND_POINTS 23:16 +#define NVB097_SET_VPC_PERF_KNOB_NONCULLED_TRIANGLES 31:24 + +#define NVB097_PM_LOCAL_TRIGGER 0x0f18 +#define NVB097_PM_LOCAL_TRIGGER_BOOKMARK 15:0 + +#define NVB097_SET_CONSTANT_COLOR_RENDERING 0x0f40 +#define NVB097_SET_CONSTANT_COLOR_RENDERING_ENABLE 0:0 +#define NVB097_SET_CONSTANT_COLOR_RENDERING_ENABLE_FALSE 0x00000000 +#define NVB097_SET_CONSTANT_COLOR_RENDERING_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_CONSTANT_COLOR_RENDERING_RED 0x0f44 +#define NVB097_SET_CONSTANT_COLOR_RENDERING_RED_V 31:0 + +#define NVB097_SET_CONSTANT_COLOR_RENDERING_GREEN 0x0f48 +#define NVB097_SET_CONSTANT_COLOR_RENDERING_GREEN_V 31:0 + +#define NVB097_SET_CONSTANT_COLOR_RENDERING_BLUE 0x0f4c +#define NVB097_SET_CONSTANT_COLOR_RENDERING_BLUE_V 31:0 + +#define NVB097_SET_CONSTANT_COLOR_RENDERING_ALPHA 0x0f50 +#define NVB097_SET_CONSTANT_COLOR_RENDERING_ALPHA_V 31:0 + +#define NVB097_SET_BACK_STENCIL_FUNC_REF 0x0f54 +#define NVB097_SET_BACK_STENCIL_FUNC_REF_V 7:0 + +#define NVB097_SET_BACK_STENCIL_MASK 0x0f58 +#define NVB097_SET_BACK_STENCIL_MASK_V 7:0 + +#define NVB097_SET_BACK_STENCIL_FUNC_MASK 0x0f5c +#define NVB097_SET_BACK_STENCIL_FUNC_MASK_V 7:0 + +#define NVB097_SET_VERTEX_STREAM_SUBSTITUTE_A 0x0f84 +#define NVB097_SET_VERTEX_STREAM_SUBSTITUTE_A_ADDRESS_UPPER 7:0 + +#define NVB097_SET_VERTEX_STREAM_SUBSTITUTE_B 0x0f88 +#define NVB097_SET_VERTEX_STREAM_SUBSTITUTE_B_ADDRESS_LOWER 31:0 + +#define NVB097_SET_LINE_MODE_POLYGON_CLIP 0x0f8c +#define NVB097_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE 0:0 +#define NVB097_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DRAW_LINE 0x00000000 +#define NVB097_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DO_NOT_DRAW_LINE 0x00000001 + +#define NVB097_SET_SINGLE_CT_WRITE_CONTROL 0x0f90 +#define NVB097_SET_SINGLE_CT_WRITE_CONTROL_ENABLE 0:0 +#define NVB097_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_FALSE 0x00000000 +#define NVB097_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_VTG_WARP_WATERMARKS 0x0f98 +#define NVB097_SET_VTG_WARP_WATERMARKS_LOW 15:0 +#define NVB097_SET_VTG_WARP_WATERMARKS_HIGH 31:16 + +#define NVB097_SET_DEPTH_BOUNDS_MIN 0x0f9c +#define NVB097_SET_DEPTH_BOUNDS_MIN_V 31:0 + +#define NVB097_SET_DEPTH_BOUNDS_MAX 0x0fa0 +#define NVB097_SET_DEPTH_BOUNDS_MAX_V 31:0 + +#define NVB097_SET_CT_MRT_ENABLE 0x0fac +#define NVB097_SET_CT_MRT_ENABLE_V 0:0 +#define NVB097_SET_CT_MRT_ENABLE_V_FALSE 0x00000000 +#define NVB097_SET_CT_MRT_ENABLE_V_TRUE 0x00000001 + +#define NVB097_SET_NONMULTISAMPLED_Z 0x0fb0 +#define NVB097_SET_NONMULTISAMPLED_Z_V 0:0 +#define NVB097_SET_NONMULTISAMPLED_Z_V_PER_SAMPLE 0x00000000 +#define NVB097_SET_NONMULTISAMPLED_Z_V_AT_PIXEL_CENTER 0x00000001 + +#define NVB097_SET_SAMPLE_MASK_X0_Y0 0x0fbc +#define NVB097_SET_SAMPLE_MASK_X0_Y0_V 15:0 + +#define NVB097_SET_SAMPLE_MASK_X1_Y0 0x0fc0 +#define NVB097_SET_SAMPLE_MASK_X1_Y0_V 15:0 + +#define NVB097_SET_SAMPLE_MASK_X0_Y1 0x0fc4 +#define NVB097_SET_SAMPLE_MASK_X0_Y1_V 15:0 + +#define NVB097_SET_SAMPLE_MASK_X1_Y1 0x0fc8 +#define NVB097_SET_SAMPLE_MASK_X1_Y1_V 15:0 + +#define NVB097_SET_SURFACE_CLIP_ID_MEMORY_A 0x0fcc +#define NVB097_SET_SURFACE_CLIP_ID_MEMORY_A_OFFSET_UPPER 7:0 + +#define NVB097_SET_SURFACE_CLIP_ID_MEMORY_B 0x0fd0 +#define NVB097_SET_SURFACE_CLIP_ID_MEMORY_B_OFFSET_LOWER 31:0 + +#define NVB097_SET_BLEND_OPT_CONTROL 0x0fdc +#define NVB097_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS 0:0 +#define NVB097_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_FALSE 0x00000000 +#define NVB097_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_TRUE 0x00000001 + +#define NVB097_SET_ZT_A 0x0fe0 +#define NVB097_SET_ZT_A_OFFSET_UPPER 7:0 + +#define NVB097_SET_ZT_B 0x0fe4 +#define NVB097_SET_ZT_B_OFFSET_LOWER 31:0 + +#define NVB097_SET_ZT_FORMAT 0x0fe8 +#define NVB097_SET_ZT_FORMAT_V 4:0 +#define NVB097_SET_ZT_FORMAT_V_Z16 0x00000013 +#define NVB097_SET_ZT_FORMAT_V_Z24S8 0x00000014 +#define NVB097_SET_ZT_FORMAT_V_X8Z24 0x00000015 +#define NVB097_SET_ZT_FORMAT_V_S8Z24 0x00000016 +#define NVB097_SET_ZT_FORMAT_V_V8Z24 0x00000018 +#define NVB097_SET_ZT_FORMAT_V_ZF32 0x0000000A +#define NVB097_SET_ZT_FORMAT_V_ZF32_X24S8 0x00000019 +#define NVB097_SET_ZT_FORMAT_V_X8Z24_X16V8S8 0x0000001D +#define NVB097_SET_ZT_FORMAT_V_ZF32_X16V8X8 0x0000001E +#define NVB097_SET_ZT_FORMAT_V_ZF32_X16V8S8 0x0000001F + +#define NVB097_SET_ZT_BLOCK_SIZE 0x0fec +#define NVB097_SET_ZT_BLOCK_SIZE_WIDTH 3:0 +#define NVB097_SET_ZT_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVB097_SET_ZT_BLOCK_SIZE_HEIGHT 7:4 +#define NVB097_SET_ZT_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVB097_SET_ZT_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVB097_SET_ZT_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVB097_SET_ZT_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVB097_SET_ZT_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVB097_SET_ZT_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVB097_SET_ZT_BLOCK_SIZE_DEPTH 11:8 +#define NVB097_SET_ZT_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NVB097_SET_ZT_ARRAY_PITCH 0x0ff0 +#define NVB097_SET_ZT_ARRAY_PITCH_V 31:0 + +#define NVB097_SET_SURFACE_CLIP_HORIZONTAL 0x0ff4 +#define NVB097_SET_SURFACE_CLIP_HORIZONTAL_X 15:0 +#define NVB097_SET_SURFACE_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NVB097_SET_SURFACE_CLIP_VERTICAL 0x0ff8 +#define NVB097_SET_SURFACE_CLIP_VERTICAL_Y 15:0 +#define NVB097_SET_SURFACE_CLIP_VERTICAL_HEIGHT 31:16 + +#define NVB097_SET_TILED_CACHE_BUNDLE_CONTROL 0x0ffc +#define NVB097_SET_TILED_CACHE_BUNDLE_CONTROL_TREAT_HEAVYWEIGHT_AS_LIGHTWEIGHT 0:0 +#define NVB097_SET_TILED_CACHE_BUNDLE_CONTROL_TREAT_HEAVYWEIGHT_AS_LIGHTWEIGHT_FALSE 0x00000000 +#define NVB097_SET_TILED_CACHE_BUNDLE_CONTROL_TREAT_HEAVYWEIGHT_AS_LIGHTWEIGHT_TRUE 0x00000001 + +#define NVB097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS 0x1000 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE 0:0 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_FALSE 0x00000000 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_TRUE 0x00000001 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY 5:4 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVB097_SET_TESSELLATION_CUT_HEIGHT 0x1008 +#define NVB097_SET_TESSELLATION_CUT_HEIGHT_V 4:0 + +#define NVB097_SET_MAX_GS_INSTANCES_PER_TASK 0x100c +#define NVB097_SET_MAX_GS_INSTANCES_PER_TASK_V 10:0 + +#define NVB097_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK 0x1010 +#define NVB097_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK_V 15:0 + +#define NVB097_SET_RESERVED_SW_METHOD00 0x1014 +#define NVB097_SET_RESERVED_SW_METHOD00_V 31:0 + +#define NVB097_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER 0x1018 +#define NVB097_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NVB097_SET_BETA_CB_STORAGE_CONSTRAINT 0x101c +#define NVB097_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NVB097_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NVB097_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER 0x1020 +#define NVB097_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NVB097_SET_ALPHA_CB_STORAGE_CONSTRAINT 0x1024 +#define NVB097_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NVB097_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NVB097_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_RESERVED_SW_METHOD01 0x1028 +#define NVB097_SET_RESERVED_SW_METHOD01_V 31:0 + +#define NVB097_SET_RESERVED_SW_METHOD02 0x102c +#define NVB097_SET_RESERVED_SW_METHOD02_V 31:0 + +#define NVB097_SET_SPARE_NOOP01 0x1044 +#define NVB097_SET_SPARE_NOOP01_V 31:0 + +#define NVB097_SET_SPARE_NOOP02 0x1048 +#define NVB097_SET_SPARE_NOOP02_V 31:0 + +#define NVB097_SET_SPARE_NOOP03 0x104c +#define NVB097_SET_SPARE_NOOP03_V 31:0 + +#define NVB097_SET_SPARE_NOOP04 0x1050 +#define NVB097_SET_SPARE_NOOP04_V 31:0 + +#define NVB097_SET_SPARE_NOOP05 0x1054 +#define NVB097_SET_SPARE_NOOP05_V 31:0 + +#define NVB097_SET_SPARE_NOOP06 0x1058 +#define NVB097_SET_SPARE_NOOP06_V 31:0 + +#define NVB097_SET_SPARE_NOOP07 0x105c +#define NVB097_SET_SPARE_NOOP07_V 31:0 + +#define NVB097_SET_SPARE_NOOP08 0x1060 +#define NVB097_SET_SPARE_NOOP08_V 31:0 + +#define NVB097_SET_SPARE_NOOP09 0x1064 +#define NVB097_SET_SPARE_NOOP09_V 31:0 + +#define NVB097_SET_SPARE_NOOP10 0x1068 +#define NVB097_SET_SPARE_NOOP10_V 31:0 + +#define NVB097_SET_SPARE_NOOP11 0x106c +#define NVB097_SET_SPARE_NOOP11_V 31:0 + +#define NVB097_SET_SPARE_NOOP12 0x1070 +#define NVB097_SET_SPARE_NOOP12_V 31:0 + +#define NVB097_SET_SPARE_NOOP13 0x1074 +#define NVB097_SET_SPARE_NOOP13_V 31:0 + +#define NVB097_SET_SPARE_NOOP14 0x1078 +#define NVB097_SET_SPARE_NOOP14_V 31:0 + +#define NVB097_SET_SPARE_NOOP15 0x107c +#define NVB097_SET_SPARE_NOOP15_V 31:0 + +#define NVB097_SET_RESERVED_SW_METHOD03 0x10b0 +#define NVB097_SET_RESERVED_SW_METHOD03_V 31:0 + +#define NVB097_SET_RESERVED_SW_METHOD04 0x10b4 +#define NVB097_SET_RESERVED_SW_METHOD04_V 31:0 + +#define NVB097_SET_RESERVED_SW_METHOD05 0x10b8 +#define NVB097_SET_RESERVED_SW_METHOD05_V 31:0 + +#define NVB097_SET_RESERVED_SW_METHOD06 0x10bc +#define NVB097_SET_RESERVED_SW_METHOD06_V 31:0 + +#define NVB097_SET_RESERVED_SW_METHOD07 0x10c0 +#define NVB097_SET_RESERVED_SW_METHOD07_V 31:0 + +#define NVB097_SET_RESERVED_SW_METHOD08 0x10c4 +#define NVB097_SET_RESERVED_SW_METHOD08_V 31:0 + +#define NVB097_SET_RESERVED_SW_METHOD09 0x10c8 +#define NVB097_SET_RESERVED_SW_METHOD09_V 31:0 + +#define NVB097_SET_REDUCE_COLOR_THRESHOLDS_UNORM8 0x10cc +#define NVB097_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVB097_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED 23:16 + +#define NVB097_SET_RESERVED_SW_METHOD10 0x10d0 +#define NVB097_SET_RESERVED_SW_METHOD10_V 31:0 + +#define NVB097_SET_RESERVED_SW_METHOD11 0x10d4 +#define NVB097_SET_RESERVED_SW_METHOD11_V 31:0 + +#define NVB097_SET_RESERVED_SW_METHOD12 0x10d8 +#define NVB097_SET_RESERVED_SW_METHOD12_V 31:0 + +#define NVB097_SET_RESERVED_SW_METHOD13 0x10dc +#define NVB097_SET_RESERVED_SW_METHOD13_V 31:0 + +#define NVB097_SET_REDUCE_COLOR_THRESHOLDS_UNORM10 0x10e0 +#define NVB097_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVB097_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED 23:16 + +#define NVB097_SET_REDUCE_COLOR_THRESHOLDS_UNORM16 0x10e4 +#define NVB097_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVB097_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED 23:16 + +#define NVB097_SET_REDUCE_COLOR_THRESHOLDS_FP11 0x10e8 +#define NVB097_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED_ALL_HIT_ONCE 5:0 +#define NVB097_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED 21:16 + +#define NVB097_SET_REDUCE_COLOR_THRESHOLDS_FP16 0x10ec +#define NVB097_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVB097_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED 23:16 + +#define NVB097_SET_REDUCE_COLOR_THRESHOLDS_SRGB8 0x10f0 +#define NVB097_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVB097_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED 23:16 + +#define NVB097_UNBIND_ALL 0x10f4 +#define NVB097_UNBIND_ALL_CONSTANT_BUFFERS 8:8 +#define NVB097_UNBIND_ALL_CONSTANT_BUFFERS_FALSE 0x00000000 +#define NVB097_UNBIND_ALL_CONSTANT_BUFFERS_TRUE 0x00000001 + +#define NVB097_SET_CLEAR_SURFACE_CONTROL 0x10f8 +#define NVB097_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK 0:0 +#define NVB097_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_FALSE 0x00000000 +#define NVB097_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_TRUE 0x00000001 +#define NVB097_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT 4:4 +#define NVB097_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_FALSE 0x00000000 +#define NVB097_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_TRUE 0x00000001 +#define NVB097_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0 8:8 +#define NVB097_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_FALSE 0x00000000 +#define NVB097_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_TRUE 0x00000001 +#define NVB097_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0 12:12 +#define NVB097_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_FALSE 0x00000000 +#define NVB097_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_TRUE 0x00000001 + +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS 0x10fc +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVB097_SET_RESERVED_SW_METHOD14 0x1100 +#define NVB097_SET_RESERVED_SW_METHOD14_V 31:0 + +#define NVB097_SET_RESERVED_SW_METHOD15 0x1104 +#define NVB097_SET_RESERVED_SW_METHOD15_V 31:0 + +#define NVB097_NO_OPERATION_DATA_HI 0x110c +#define NVB097_NO_OPERATION_DATA_HI_V 31:0 + +#define NVB097_SET_DEPTH_BIAS_CONTROL 0x1110 +#define NVB097_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT 0:0 +#define NVB097_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_FALSE 0x00000000 +#define NVB097_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_TRUE 0x00000001 + +#define NVB097_PM_TRIGGER_END 0x1114 +#define NVB097_PM_TRIGGER_END_V 31:0 + +#define NVB097_SET_VERTEX_ID_BASE 0x1118 +#define NVB097_SET_VERTEX_ID_BASE_V 31:0 + +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A(i) (0x1120+(i)*4) +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0 0:0 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1 1:1 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2 2:2 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3 3:3 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0 4:4 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1 5:5 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2 6:6 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3 7:7 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0 8:8 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1 9:9 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2 10:10 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3 11:11 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0 12:12 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1 13:13 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2 14:14 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3 15:15 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0 16:16 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1 17:17 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2 18:18 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3 19:19 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0 20:20 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1 21:21 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2 22:22 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3 23:23 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0 24:24 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1 25:25 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2 26:26 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3 27:27 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0 28:28 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1 29:29 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2 30:30 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3 31:31 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B(i) (0x1128+(i)*4) +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0 0:0 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1 1:1 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2 2:2 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3 3:3 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0 4:4 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1 5:5 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2 6:6 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3 7:7 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0 8:8 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1 9:9 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2 10:10 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3 11:11 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0 12:12 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1 13:13 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2 14:14 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3 15:15 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0 16:16 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1 17:17 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2 18:18 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3 19:19 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0 20:20 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1 21:21 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2 22:22 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3 23:23 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0 24:24 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1 25:25 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2 26:26 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3 27:27 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0 28:28 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1 29:29 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2 30:30 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3 31:31 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NVB097_SET_BLEND_PER_FORMAT_ENABLE 0x1140 +#define NVB097_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16 4:4 +#define NVB097_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_FALSE 0x00000000 +#define NVB097_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_TRUE 0x00000001 + +#define NVB097_FLUSH_PENDING_WRITES 0x1144 +#define NVB097_FLUSH_PENDING_WRITES_SM_DOES_GLOBAL_STORE 0:0 + +#define NVB097_SET_VERTEX_ATTRIBUTE_A(i) (0x1160+(i)*4) +#define NVB097_SET_VERTEX_ATTRIBUTE_A_STREAM 4:0 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_SOURCE 6:6 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_SOURCE_ACTIVE 0x00000000 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_SOURCE_INACTIVE 0x00000001 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_OFFSET 20:7 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS 26:21 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NVB097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NVB097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NVB097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NVB097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NVB097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE 29:27 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B 31:31 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_FALSE 0x00000000 +#define NVB097_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_TRUE 0x00000001 + +#define NVB097_SET_VERTEX_ATTRIBUTE_B(i) (0x11a0+(i)*4) +#define NVB097_SET_VERTEX_ATTRIBUTE_B_STREAM 4:0 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_SOURCE 6:6 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_SOURCE_ACTIVE 0x00000000 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_SOURCE_INACTIVE 0x00000001 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_OFFSET 20:7 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS 26:21 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NVB097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NVB097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NVB097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NVB097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NVB097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE 29:27 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B 31:31 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_FALSE 0x00000000 +#define NVB097_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_TRUE 0x00000001 + +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST 0x1214 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_START_INDEX 15:0 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT 0x1218 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_START_INDEX 15:0 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVB097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVB097_SET_CT_SELECT 0x121c +#define NVB097_SET_CT_SELECT_TARGET_COUNT 3:0 +#define NVB097_SET_CT_SELECT_TARGET0 6:4 +#define NVB097_SET_CT_SELECT_TARGET1 9:7 +#define NVB097_SET_CT_SELECT_TARGET2 12:10 +#define NVB097_SET_CT_SELECT_TARGET3 15:13 +#define NVB097_SET_CT_SELECT_TARGET4 18:16 +#define NVB097_SET_CT_SELECT_TARGET5 21:19 +#define NVB097_SET_CT_SELECT_TARGET6 24:22 +#define NVB097_SET_CT_SELECT_TARGET7 27:25 + +#define NVB097_SET_COMPRESSION_THRESHOLD 0x1220 +#define NVB097_SET_COMPRESSION_THRESHOLD_SAMPLES 3:0 +#define NVB097_SET_COMPRESSION_THRESHOLD_SAMPLES__0 0x00000000 +#define NVB097_SET_COMPRESSION_THRESHOLD_SAMPLES__1 0x00000001 +#define NVB097_SET_COMPRESSION_THRESHOLD_SAMPLES__2 0x00000002 +#define NVB097_SET_COMPRESSION_THRESHOLD_SAMPLES__4 0x00000003 +#define NVB097_SET_COMPRESSION_THRESHOLD_SAMPLES__8 0x00000004 +#define NVB097_SET_COMPRESSION_THRESHOLD_SAMPLES__16 0x00000005 +#define NVB097_SET_COMPRESSION_THRESHOLD_SAMPLES__32 0x00000006 +#define NVB097_SET_COMPRESSION_THRESHOLD_SAMPLES__64 0x00000007 +#define NVB097_SET_COMPRESSION_THRESHOLD_SAMPLES__128 0x00000008 +#define NVB097_SET_COMPRESSION_THRESHOLD_SAMPLES__256 0x00000009 +#define NVB097_SET_COMPRESSION_THRESHOLD_SAMPLES__512 0x0000000A +#define NVB097_SET_COMPRESSION_THRESHOLD_SAMPLES__1024 0x0000000B +#define NVB097_SET_COMPRESSION_THRESHOLD_SAMPLES__2048 0x0000000C + +#define NVB097_SET_ZT_SIZE_A 0x1228 +#define NVB097_SET_ZT_SIZE_A_WIDTH 27:0 + +#define NVB097_SET_ZT_SIZE_B 0x122c +#define NVB097_SET_ZT_SIZE_B_HEIGHT 16:0 + +#define NVB097_SET_ZT_SIZE_C 0x1230 +#define NVB097_SET_ZT_SIZE_C_THIRD_DIMENSION 15:0 +#define NVB097_SET_ZT_SIZE_C_CONTROL 16:16 +#define NVB097_SET_ZT_SIZE_C_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NVB097_SET_ZT_SIZE_C_CONTROL_ARRAY_SIZE_IS_ONE 0x00000001 + +#define NVB097_SET_SAMPLER_BINDING 0x1234 +#define NVB097_SET_SAMPLER_BINDING_V 0:0 +#define NVB097_SET_SAMPLER_BINDING_V_INDEPENDENTLY 0x00000000 +#define NVB097_SET_SAMPLER_BINDING_V_VIA_HEADER_BINDING 0x00000001 + +#define NVB097_DRAW_AUTO 0x123c +#define NVB097_DRAW_AUTO_BYTE_COUNT 31:0 + +#define NVB097_SET_CIRCULAR_BUFFER_SIZE 0x1280 +#define NVB097_SET_CIRCULAR_BUFFER_SIZE_CACHE_LINES_PER_SM 13:0 + +#define NVB097_SET_VTG_REGISTER_WATERMARKS 0x1284 +#define NVB097_SET_VTG_REGISTER_WATERMARKS_LOW 15:0 +#define NVB097_SET_VTG_REGISTER_WATERMARKS_HIGH 31:16 + +#define NVB097_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI 0x1288 +#define NVB097_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES 0:0 +#define NVB097_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVB097_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVB097_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_TAG 25:4 + +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS 0x1290 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVB097_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE 0x12a4 +#define NVB097_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE_V 31:0 + +#define NVB097_CLEAR_ZCULL_REGION 0x12c8 +#define NVB097_CLEAR_ZCULL_REGION_Z_ENABLE 0:0 +#define NVB097_CLEAR_ZCULL_REGION_Z_ENABLE_FALSE 0x00000000 +#define NVB097_CLEAR_ZCULL_REGION_Z_ENABLE_TRUE 0x00000001 +#define NVB097_CLEAR_ZCULL_REGION_STENCIL_ENABLE 4:4 +#define NVB097_CLEAR_ZCULL_REGION_STENCIL_ENABLE_FALSE 0x00000000 +#define NVB097_CLEAR_ZCULL_REGION_STENCIL_ENABLE_TRUE 0x00000001 +#define NVB097_CLEAR_ZCULL_REGION_USE_CLEAR_RECT 1:1 +#define NVB097_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_FALSE 0x00000000 +#define NVB097_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_TRUE 0x00000001 +#define NVB097_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX 2:2 +#define NVB097_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_FALSE 0x00000000 +#define NVB097_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_TRUE 0x00000001 +#define NVB097_CLEAR_ZCULL_REGION_RT_ARRAY_INDEX 20:5 +#define NVB097_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE 3:3 +#define NVB097_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_FALSE 0x00000000 +#define NVB097_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_TRUE 0x00000001 + +#define NVB097_SET_DEPTH_TEST 0x12cc +#define NVB097_SET_DEPTH_TEST_ENABLE 0:0 +#define NVB097_SET_DEPTH_TEST_ENABLE_FALSE 0x00000000 +#define NVB097_SET_DEPTH_TEST_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_FILL_MODE 0x12d0 +#define NVB097_SET_FILL_MODE_V 31:0 +#define NVB097_SET_FILL_MODE_V_POINT 0x00000001 +#define NVB097_SET_FILL_MODE_V_WIREFRAME 0x00000002 +#define NVB097_SET_FILL_MODE_V_SOLID 0x00000003 + +#define NVB097_SET_SHADE_MODE 0x12d4 +#define NVB097_SET_SHADE_MODE_V 31:0 +#define NVB097_SET_SHADE_MODE_V_FLAT 0x00000001 +#define NVB097_SET_SHADE_MODE_V_GOURAUD 0x00000002 +#define NVB097_SET_SHADE_MODE_V_OGL_FLAT 0x00001D00 +#define NVB097_SET_SHADE_MODE_V_OGL_SMOOTH 0x00001D01 + +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS 0x12d8 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS 0x12dc +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVB097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVB097_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL 0x12e0 +#define NVB097_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT 3:0 +#define NVB097_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1 0x00000000 +#define NVB097_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_2X2 0x00000001 +#define NVB097_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1_VIRTUAL_SAMPLES 0x00000002 + +#define NVB097_SET_BLEND_STATE_PER_TARGET 0x12e4 +#define NVB097_SET_BLEND_STATE_PER_TARGET_ENABLE 0:0 +#define NVB097_SET_BLEND_STATE_PER_TARGET_ENABLE_FALSE 0x00000000 +#define NVB097_SET_BLEND_STATE_PER_TARGET_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_DEPTH_WRITE 0x12e8 +#define NVB097_SET_DEPTH_WRITE_ENABLE 0:0 +#define NVB097_SET_DEPTH_WRITE_ENABLE_FALSE 0x00000000 +#define NVB097_SET_DEPTH_WRITE_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_ALPHA_TEST 0x12ec +#define NVB097_SET_ALPHA_TEST_ENABLE 0:0 +#define NVB097_SET_ALPHA_TEST_ENABLE_FALSE 0x00000000 +#define NVB097_SET_ALPHA_TEST_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_INLINE_INDEX4X8_ALIGN 0x1300 +#define NVB097_SET_INLINE_INDEX4X8_ALIGN_COUNT 29:0 +#define NVB097_SET_INLINE_INDEX4X8_ALIGN_START 31:30 + +#define NVB097_DRAW_INLINE_INDEX4X8 0x1304 +#define NVB097_DRAW_INLINE_INDEX4X8_INDEX0 7:0 +#define NVB097_DRAW_INLINE_INDEX4X8_INDEX1 15:8 +#define NVB097_DRAW_INLINE_INDEX4X8_INDEX2 23:16 +#define NVB097_DRAW_INLINE_INDEX4X8_INDEX3 31:24 + +#define NVB097_D3D_SET_CULL_MODE 0x1308 +#define NVB097_D3D_SET_CULL_MODE_V 31:0 +#define NVB097_D3D_SET_CULL_MODE_V_NONE 0x00000001 +#define NVB097_D3D_SET_CULL_MODE_V_CW 0x00000002 +#define NVB097_D3D_SET_CULL_MODE_V_CCW 0x00000003 + +#define NVB097_SET_DEPTH_FUNC 0x130c +#define NVB097_SET_DEPTH_FUNC_V 31:0 +#define NVB097_SET_DEPTH_FUNC_V_OGL_NEVER 0x00000200 +#define NVB097_SET_DEPTH_FUNC_V_OGL_LESS 0x00000201 +#define NVB097_SET_DEPTH_FUNC_V_OGL_EQUAL 0x00000202 +#define NVB097_SET_DEPTH_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVB097_SET_DEPTH_FUNC_V_OGL_GREATER 0x00000204 +#define NVB097_SET_DEPTH_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVB097_SET_DEPTH_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVB097_SET_DEPTH_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVB097_SET_DEPTH_FUNC_V_D3D_NEVER 0x00000001 +#define NVB097_SET_DEPTH_FUNC_V_D3D_LESS 0x00000002 +#define NVB097_SET_DEPTH_FUNC_V_D3D_EQUAL 0x00000003 +#define NVB097_SET_DEPTH_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVB097_SET_DEPTH_FUNC_V_D3D_GREATER 0x00000005 +#define NVB097_SET_DEPTH_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVB097_SET_DEPTH_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVB097_SET_DEPTH_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVB097_SET_ALPHA_REF 0x1310 +#define NVB097_SET_ALPHA_REF_V 31:0 + +#define NVB097_SET_ALPHA_FUNC 0x1314 +#define NVB097_SET_ALPHA_FUNC_V 31:0 +#define NVB097_SET_ALPHA_FUNC_V_OGL_NEVER 0x00000200 +#define NVB097_SET_ALPHA_FUNC_V_OGL_LESS 0x00000201 +#define NVB097_SET_ALPHA_FUNC_V_OGL_EQUAL 0x00000202 +#define NVB097_SET_ALPHA_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVB097_SET_ALPHA_FUNC_V_OGL_GREATER 0x00000204 +#define NVB097_SET_ALPHA_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVB097_SET_ALPHA_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVB097_SET_ALPHA_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVB097_SET_ALPHA_FUNC_V_D3D_NEVER 0x00000001 +#define NVB097_SET_ALPHA_FUNC_V_D3D_LESS 0x00000002 +#define NVB097_SET_ALPHA_FUNC_V_D3D_EQUAL 0x00000003 +#define NVB097_SET_ALPHA_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVB097_SET_ALPHA_FUNC_V_D3D_GREATER 0x00000005 +#define NVB097_SET_ALPHA_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVB097_SET_ALPHA_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVB097_SET_ALPHA_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVB097_SET_DRAW_AUTO_STRIDE 0x1318 +#define NVB097_SET_DRAW_AUTO_STRIDE_V 11:0 + +#define NVB097_SET_BLEND_CONST_RED 0x131c +#define NVB097_SET_BLEND_CONST_RED_V 31:0 + +#define NVB097_SET_BLEND_CONST_GREEN 0x1320 +#define NVB097_SET_BLEND_CONST_GREEN_V 31:0 + +#define NVB097_SET_BLEND_CONST_BLUE 0x1324 +#define NVB097_SET_BLEND_CONST_BLUE_V 31:0 + +#define NVB097_SET_BLEND_CONST_ALPHA 0x1328 +#define NVB097_SET_BLEND_CONST_ALPHA_V 31:0 + +#define NVB097_INVALIDATE_SAMPLER_CACHE 0x1330 +#define NVB097_INVALIDATE_SAMPLER_CACHE_LINES 0:0 +#define NVB097_INVALIDATE_SAMPLER_CACHE_LINES_ALL 0x00000000 +#define NVB097_INVALIDATE_SAMPLER_CACHE_LINES_ONE 0x00000001 +#define NVB097_INVALIDATE_SAMPLER_CACHE_TAG 25:4 + +#define NVB097_INVALIDATE_TEXTURE_HEADER_CACHE 0x1334 +#define NVB097_INVALIDATE_TEXTURE_HEADER_CACHE_LINES 0:0 +#define NVB097_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ALL 0x00000000 +#define NVB097_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ONE 0x00000001 +#define NVB097_INVALIDATE_TEXTURE_HEADER_CACHE_TAG 25:4 + +#define NVB097_INVALIDATE_TEXTURE_DATA_CACHE 0x1338 +#define NVB097_INVALIDATE_TEXTURE_DATA_CACHE_LINES 0:0 +#define NVB097_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ALL 0x00000000 +#define NVB097_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ONE 0x00000001 +#define NVB097_INVALIDATE_TEXTURE_DATA_CACHE_TAG 25:4 + +#define NVB097_SET_BLEND_SEPARATE_FOR_ALPHA 0x133c +#define NVB097_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NVB097_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NVB097_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_BLEND_COLOR_OP 0x1340 +#define NVB097_SET_BLEND_COLOR_OP_V 31:0 +#define NVB097_SET_BLEND_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVB097_SET_BLEND_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVB097_SET_BLEND_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVB097_SET_BLEND_COLOR_OP_V_OGL_MIN 0x00008007 +#define NVB097_SET_BLEND_COLOR_OP_V_OGL_MAX 0x00008008 +#define NVB097_SET_BLEND_COLOR_OP_V_D3D_ADD 0x00000001 +#define NVB097_SET_BLEND_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NVB097_SET_BLEND_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVB097_SET_BLEND_COLOR_OP_V_D3D_MIN 0x00000004 +#define NVB097_SET_BLEND_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF 0x1344 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V 31:0 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVB097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVB097_SET_BLEND_COLOR_DEST_COEFF 0x1348 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V 31:0 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVB097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVB097_SET_BLEND_ALPHA_OP 0x134c +#define NVB097_SET_BLEND_ALPHA_OP_V 31:0 +#define NVB097_SET_BLEND_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVB097_SET_BLEND_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVB097_SET_BLEND_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVB097_SET_BLEND_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NVB097_SET_BLEND_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NVB097_SET_BLEND_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NVB097_SET_BLEND_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NVB097_SET_BLEND_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVB097_SET_BLEND_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NVB097_SET_BLEND_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF 0x1350 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V 31:0 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVB097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVB097_SET_GLOBAL_COLOR_KEY 0x1354 +#define NVB097_SET_GLOBAL_COLOR_KEY_ENABLE 0:0 +#define NVB097_SET_GLOBAL_COLOR_KEY_ENABLE_FALSE 0x00000000 +#define NVB097_SET_GLOBAL_COLOR_KEY_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF 0x1358 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V 31:0 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVB097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVB097_SET_SINGLE_ROP_CONTROL 0x135c +#define NVB097_SET_SINGLE_ROP_CONTROL_ENABLE 0:0 +#define NVB097_SET_SINGLE_ROP_CONTROL_ENABLE_FALSE 0x00000000 +#define NVB097_SET_SINGLE_ROP_CONTROL_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_BLEND(i) (0x1360+(i)*4) +#define NVB097_SET_BLEND_ENABLE 0:0 +#define NVB097_SET_BLEND_ENABLE_FALSE 0x00000000 +#define NVB097_SET_BLEND_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_STENCIL_TEST 0x1380 +#define NVB097_SET_STENCIL_TEST_ENABLE 0:0 +#define NVB097_SET_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NVB097_SET_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_STENCIL_OP_FAIL 0x1384 +#define NVB097_SET_STENCIL_OP_FAIL_V 31:0 +#define NVB097_SET_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NVB097_SET_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NVB097_SET_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NVB097_SET_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NVB097_SET_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NVB097_SET_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NVB097_SET_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NVB097_SET_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NVB097_SET_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NVB097_SET_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NVB097_SET_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NVB097_SET_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NVB097_SET_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NVB097_SET_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NVB097_SET_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NVB097_SET_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NVB097_SET_STENCIL_OP_ZFAIL 0x1388 +#define NVB097_SET_STENCIL_OP_ZFAIL_V 31:0 +#define NVB097_SET_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NVB097_SET_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NVB097_SET_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NVB097_SET_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NVB097_SET_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NVB097_SET_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NVB097_SET_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NVB097_SET_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NVB097_SET_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NVB097_SET_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NVB097_SET_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NVB097_SET_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NVB097_SET_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NVB097_SET_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NVB097_SET_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NVB097_SET_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NVB097_SET_STENCIL_OP_ZPASS 0x138c +#define NVB097_SET_STENCIL_OP_ZPASS_V 31:0 +#define NVB097_SET_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NVB097_SET_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NVB097_SET_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NVB097_SET_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NVB097_SET_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NVB097_SET_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NVB097_SET_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NVB097_SET_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NVB097_SET_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NVB097_SET_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NVB097_SET_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NVB097_SET_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NVB097_SET_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NVB097_SET_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NVB097_SET_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NVB097_SET_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NVB097_SET_STENCIL_FUNC 0x1390 +#define NVB097_SET_STENCIL_FUNC_V 31:0 +#define NVB097_SET_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NVB097_SET_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NVB097_SET_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NVB097_SET_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVB097_SET_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NVB097_SET_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVB097_SET_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVB097_SET_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVB097_SET_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NVB097_SET_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NVB097_SET_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NVB097_SET_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVB097_SET_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NVB097_SET_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVB097_SET_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVB097_SET_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVB097_SET_STENCIL_FUNC_REF 0x1394 +#define NVB097_SET_STENCIL_FUNC_REF_V 7:0 + +#define NVB097_SET_STENCIL_FUNC_MASK 0x1398 +#define NVB097_SET_STENCIL_FUNC_MASK_V 7:0 + +#define NVB097_SET_STENCIL_MASK 0x139c +#define NVB097_SET_STENCIL_MASK_V 7:0 + +#define NVB097_SET_DRAW_AUTO_START 0x13a4 +#define NVB097_SET_DRAW_AUTO_START_BYTE_COUNT 31:0 + +#define NVB097_SET_PS_SATURATE 0x13a8 +#define NVB097_SET_PS_SATURATE_OUTPUT0 0:0 +#define NVB097_SET_PS_SATURATE_OUTPUT0_FALSE 0x00000000 +#define NVB097_SET_PS_SATURATE_OUTPUT0_TRUE 0x00000001 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE0 1:1 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE0_ZERO_TO_PLUS_ONE 0x00000000 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE0_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVB097_SET_PS_SATURATE_OUTPUT1 4:4 +#define NVB097_SET_PS_SATURATE_OUTPUT1_FALSE 0x00000000 +#define NVB097_SET_PS_SATURATE_OUTPUT1_TRUE 0x00000001 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE1 5:5 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE1_ZERO_TO_PLUS_ONE 0x00000000 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE1_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVB097_SET_PS_SATURATE_OUTPUT2 8:8 +#define NVB097_SET_PS_SATURATE_OUTPUT2_FALSE 0x00000000 +#define NVB097_SET_PS_SATURATE_OUTPUT2_TRUE 0x00000001 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE2 9:9 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE2_ZERO_TO_PLUS_ONE 0x00000000 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE2_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVB097_SET_PS_SATURATE_OUTPUT3 12:12 +#define NVB097_SET_PS_SATURATE_OUTPUT3_FALSE 0x00000000 +#define NVB097_SET_PS_SATURATE_OUTPUT3_TRUE 0x00000001 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE3 13:13 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE3_ZERO_TO_PLUS_ONE 0x00000000 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE3_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVB097_SET_PS_SATURATE_OUTPUT4 16:16 +#define NVB097_SET_PS_SATURATE_OUTPUT4_FALSE 0x00000000 +#define NVB097_SET_PS_SATURATE_OUTPUT4_TRUE 0x00000001 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE4 17:17 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE4_ZERO_TO_PLUS_ONE 0x00000000 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE4_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVB097_SET_PS_SATURATE_OUTPUT5 20:20 +#define NVB097_SET_PS_SATURATE_OUTPUT5_FALSE 0x00000000 +#define NVB097_SET_PS_SATURATE_OUTPUT5_TRUE 0x00000001 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE5 21:21 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE5_ZERO_TO_PLUS_ONE 0x00000000 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE5_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVB097_SET_PS_SATURATE_OUTPUT6 24:24 +#define NVB097_SET_PS_SATURATE_OUTPUT6_FALSE 0x00000000 +#define NVB097_SET_PS_SATURATE_OUTPUT6_TRUE 0x00000001 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE6 25:25 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE6_ZERO_TO_PLUS_ONE 0x00000000 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE6_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVB097_SET_PS_SATURATE_OUTPUT7 28:28 +#define NVB097_SET_PS_SATURATE_OUTPUT7_FALSE 0x00000000 +#define NVB097_SET_PS_SATURATE_OUTPUT7_TRUE 0x00000001 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE7 29:29 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE7_ZERO_TO_PLUS_ONE 0x00000000 +#define NVB097_SET_PS_SATURATE_CLAMP_RANGE7_MINUS_ONE_TO_PLUS_ONE 0x00000001 + +#define NVB097_SET_WINDOW_ORIGIN 0x13ac +#define NVB097_SET_WINDOW_ORIGIN_MODE 0:0 +#define NVB097_SET_WINDOW_ORIGIN_MODE_UPPER_LEFT 0x00000000 +#define NVB097_SET_WINDOW_ORIGIN_MODE_LOWER_LEFT 0x00000001 +#define NVB097_SET_WINDOW_ORIGIN_FLIP_Y 4:4 +#define NVB097_SET_WINDOW_ORIGIN_FLIP_Y_FALSE 0x00000000 +#define NVB097_SET_WINDOW_ORIGIN_FLIP_Y_TRUE 0x00000001 + +#define NVB097_SET_LINE_WIDTH_FLOAT 0x13b0 +#define NVB097_SET_LINE_WIDTH_FLOAT_V 31:0 + +#define NVB097_SET_ALIASED_LINE_WIDTH_FLOAT 0x13b4 +#define NVB097_SET_ALIASED_LINE_WIDTH_FLOAT_V 31:0 + +#define NVB097_SET_LINE_MULTISAMPLE_OVERRIDE 0x1418 +#define NVB097_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE 0:0 +#define NVB097_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_FALSE 0x00000000 +#define NVB097_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_ALPHA_HYSTERESIS 0x1420 +#define NVB097_SET_ALPHA_HYSTERESIS_ROUNDS_OF_ALPHA 7:0 + +#define NVB097_INVALIDATE_SAMPLER_CACHE_NO_WFI 0x1424 +#define NVB097_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES 0:0 +#define NVB097_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVB097_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVB097_INVALIDATE_SAMPLER_CACHE_NO_WFI_TAG 25:4 + +#define NVB097_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI 0x1428 +#define NVB097_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES 0:0 +#define NVB097_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVB097_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVB097_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_TAG 25:4 + +#define NVB097_SET_GLOBAL_BASE_VERTEX_INDEX 0x1434 +#define NVB097_SET_GLOBAL_BASE_VERTEX_INDEX_V 31:0 + +#define NVB097_SET_GLOBAL_BASE_INSTANCE_INDEX 0x1438 +#define NVB097_SET_GLOBAL_BASE_INSTANCE_INDEX_V 31:0 + +#define NVB097_SET_PS_WARP_WATERMARKS 0x1450 +#define NVB097_SET_PS_WARP_WATERMARKS_LOW 15:0 +#define NVB097_SET_PS_WARP_WATERMARKS_HIGH 31:16 + +#define NVB097_SET_PS_REGISTER_WATERMARKS 0x1454 +#define NVB097_SET_PS_REGISTER_WATERMARKS_LOW 15:0 +#define NVB097_SET_PS_REGISTER_WATERMARKS_HIGH 31:16 + +#define NVB097_STORE_ZCULL 0x1464 +#define NVB097_STORE_ZCULL_V 0:0 + +#define NVB097_SET_ITERATED_BLEND_CONSTANT_RED(j) (0x1480+(j)*16) +#define NVB097_SET_ITERATED_BLEND_CONSTANT_RED_V 15:0 + +#define NVB097_SET_ITERATED_BLEND_CONSTANT_GREEN(j) (0x1484+(j)*16) +#define NVB097_SET_ITERATED_BLEND_CONSTANT_GREEN_V 15:0 + +#define NVB097_SET_ITERATED_BLEND_CONSTANT_BLUE(j) (0x1488+(j)*16) +#define NVB097_SET_ITERATED_BLEND_CONSTANT_BLUE_V 15:0 + +#define NVB097_LOAD_ZCULL 0x1500 +#define NVB097_LOAD_ZCULL_V 0:0 + +#define NVB097_SET_SURFACE_CLIP_ID_HEIGHT 0x1504 +#define NVB097_SET_SURFACE_CLIP_ID_HEIGHT_V 31:0 + +#define NVB097_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL 0x1508 +#define NVB097_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NVB097_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NVB097_SET_CLIP_ID_CLEAR_RECT_VERTICAL 0x150c +#define NVB097_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NVB097_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NVB097_SET_USER_CLIP_ENABLE 0x1510 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE0 0:0 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE0_FALSE 0x00000000 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE0_TRUE 0x00000001 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE1 1:1 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE1_FALSE 0x00000000 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE1_TRUE 0x00000001 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE2 2:2 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE2_FALSE 0x00000000 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE2_TRUE 0x00000001 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE3 3:3 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE3_FALSE 0x00000000 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE3_TRUE 0x00000001 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE4 4:4 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE4_FALSE 0x00000000 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE4_TRUE 0x00000001 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE5 5:5 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE5_FALSE 0x00000000 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE5_TRUE 0x00000001 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE6 6:6 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE6_FALSE 0x00000000 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE6_TRUE 0x00000001 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE7 7:7 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE7_FALSE 0x00000000 +#define NVB097_SET_USER_CLIP_ENABLE_PLANE7_TRUE 0x00000001 + +#define NVB097_SET_ZPASS_PIXEL_COUNT 0x1514 +#define NVB097_SET_ZPASS_PIXEL_COUNT_ENABLE 0:0 +#define NVB097_SET_ZPASS_PIXEL_COUNT_ENABLE_FALSE 0x00000000 +#define NVB097_SET_ZPASS_PIXEL_COUNT_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_POINT_SIZE 0x1518 +#define NVB097_SET_POINT_SIZE_V 31:0 + +#define NVB097_SET_ZCULL_STATS 0x151c +#define NVB097_SET_ZCULL_STATS_ENABLE 0:0 +#define NVB097_SET_ZCULL_STATS_ENABLE_FALSE 0x00000000 +#define NVB097_SET_ZCULL_STATS_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_POINT_SPRITE 0x1520 +#define NVB097_SET_POINT_SPRITE_ENABLE 0:0 +#define NVB097_SET_POINT_SPRITE_ENABLE_FALSE 0x00000000 +#define NVB097_SET_POINT_SPRITE_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_SHADER_EXCEPTIONS 0x1528 +#define NVB097_SET_SHADER_EXCEPTIONS_ENABLE 0:0 +#define NVB097_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0x00000000 +#define NVB097_SET_SHADER_EXCEPTIONS_ENABLE_TRUE 0x00000001 + +#define NVB097_CLEAR_REPORT_VALUE 0x1530 +#define NVB097_CLEAR_REPORT_VALUE_TYPE 4:0 +#define NVB097_CLEAR_REPORT_VALUE_TYPE_DA_VERTICES_GENERATED 0x00000012 +#define NVB097_CLEAR_REPORT_VALUE_TYPE_DA_PRIMITIVES_GENERATED 0x00000013 +#define NVB097_CLEAR_REPORT_VALUE_TYPE_VS_INVOCATIONS 0x00000015 +#define NVB097_CLEAR_REPORT_VALUE_TYPE_TI_INVOCATIONS 0x00000016 +#define NVB097_CLEAR_REPORT_VALUE_TYPE_TS_INVOCATIONS 0x00000017 +#define NVB097_CLEAR_REPORT_VALUE_TYPE_TS_PRIMITIVES_GENERATED 0x00000018 +#define NVB097_CLEAR_REPORT_VALUE_TYPE_GS_INVOCATIONS 0x0000001A +#define NVB097_CLEAR_REPORT_VALUE_TYPE_GS_PRIMITIVES_GENERATED 0x0000001B +#define NVB097_CLEAR_REPORT_VALUE_TYPE_VTG_PRIMITIVES_OUT 0x0000001F +#define NVB097_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_SUCCEEDED 0x00000010 +#define NVB097_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_NEEDED 0x00000011 +#define NVB097_CLEAR_REPORT_VALUE_TYPE_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000003 +#define NVB097_CLEAR_REPORT_VALUE_TYPE_CLIPPER_INVOCATIONS 0x0000001C +#define NVB097_CLEAR_REPORT_VALUE_TYPE_CLIPPER_PRIMITIVES_GENERATED 0x0000001D +#define NVB097_CLEAR_REPORT_VALUE_TYPE_ZCULL_STATS 0x00000002 +#define NVB097_CLEAR_REPORT_VALUE_TYPE_PS_INVOCATIONS 0x0000001E +#define NVB097_CLEAR_REPORT_VALUE_TYPE_ZPASS_PIXEL_CNT 0x00000001 +#define NVB097_CLEAR_REPORT_VALUE_TYPE_ALPHA_BETA_CLOCKS 0x00000004 + +#define NVB097_SET_ANTI_ALIAS_ENABLE 0x1534 +#define NVB097_SET_ANTI_ALIAS_ENABLE_V 0:0 +#define NVB097_SET_ANTI_ALIAS_ENABLE_V_FALSE 0x00000000 +#define NVB097_SET_ANTI_ALIAS_ENABLE_V_TRUE 0x00000001 + +#define NVB097_SET_ZT_SELECT 0x1538 +#define NVB097_SET_ZT_SELECT_TARGET_COUNT 0:0 + +#define NVB097_SET_ANTI_ALIAS_ALPHA_CONTROL 0x153c +#define NVB097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE 0:0 +#define NVB097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_DISABLE 0x00000000 +#define NVB097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_ENABLE 0x00000001 +#define NVB097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE 4:4 +#define NVB097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_DISABLE 0x00000000 +#define NVB097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_ENABLE 0x00000001 + +#define NVB097_SET_RENDER_ENABLE_A 0x1550 +#define NVB097_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVB097_SET_RENDER_ENABLE_B 0x1554 +#define NVB097_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVB097_SET_RENDER_ENABLE_C 0x1558 +#define NVB097_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVB097_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVB097_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVB097_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVB097_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVB097_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVB097_SET_TEX_SAMPLER_POOL_A 0x155c +#define NVB097_SET_TEX_SAMPLER_POOL_A_OFFSET_UPPER 7:0 + +#define NVB097_SET_TEX_SAMPLER_POOL_B 0x1560 +#define NVB097_SET_TEX_SAMPLER_POOL_B_OFFSET_LOWER 31:0 + +#define NVB097_SET_TEX_SAMPLER_POOL_C 0x1564 +#define NVB097_SET_TEX_SAMPLER_POOL_C_MAXIMUM_INDEX 19:0 + +#define NVB097_SET_SLOPE_SCALE_DEPTH_BIAS 0x156c +#define NVB097_SET_SLOPE_SCALE_DEPTH_BIAS_V 31:0 + +#define NVB097_SET_ANTI_ALIASED_LINE 0x1570 +#define NVB097_SET_ANTI_ALIASED_LINE_ENABLE 0:0 +#define NVB097_SET_ANTI_ALIASED_LINE_ENABLE_FALSE 0x00000000 +#define NVB097_SET_ANTI_ALIASED_LINE_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_TEX_HEADER_POOL_A 0x1574 +#define NVB097_SET_TEX_HEADER_POOL_A_OFFSET_UPPER 7:0 + +#define NVB097_SET_TEX_HEADER_POOL_B 0x1578 +#define NVB097_SET_TEX_HEADER_POOL_B_OFFSET_LOWER 31:0 + +#define NVB097_SET_TEX_HEADER_POOL_C 0x157c +#define NVB097_SET_TEX_HEADER_POOL_C_MAXIMUM_INDEX 21:0 + +#define NVB097_SET_ACTIVE_ZCULL_REGION 0x1590 +#define NVB097_SET_ACTIVE_ZCULL_REGION_ID 5:0 + +#define NVB097_SET_TWO_SIDED_STENCIL_TEST 0x1594 +#define NVB097_SET_TWO_SIDED_STENCIL_TEST_ENABLE 0:0 +#define NVB097_SET_TWO_SIDED_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NVB097_SET_TWO_SIDED_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_BACK_STENCIL_OP_FAIL 0x1598 +#define NVB097_SET_BACK_STENCIL_OP_FAIL_V 31:0 +#define NVB097_SET_BACK_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NVB097_SET_BACK_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NVB097_SET_BACK_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NVB097_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NVB097_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NVB097_SET_BACK_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NVB097_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NVB097_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NVB097_SET_BACK_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NVB097_SET_BACK_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NVB097_SET_BACK_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NVB097_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NVB097_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NVB097_SET_BACK_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NVB097_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NVB097_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NVB097_SET_BACK_STENCIL_OP_ZFAIL 0x159c +#define NVB097_SET_BACK_STENCIL_OP_ZFAIL_V 31:0 +#define NVB097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NVB097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NVB097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NVB097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NVB097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NVB097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NVB097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NVB097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NVB097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NVB097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NVB097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NVB097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NVB097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NVB097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NVB097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NVB097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NVB097_SET_BACK_STENCIL_OP_ZPASS 0x15a0 +#define NVB097_SET_BACK_STENCIL_OP_ZPASS_V 31:0 +#define NVB097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NVB097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NVB097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NVB097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NVB097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NVB097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NVB097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NVB097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NVB097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NVB097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NVB097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NVB097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NVB097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NVB097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NVB097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NVB097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NVB097_SET_BACK_STENCIL_FUNC 0x15a4 +#define NVB097_SET_BACK_STENCIL_FUNC_V 31:0 +#define NVB097_SET_BACK_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NVB097_SET_BACK_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NVB097_SET_BACK_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NVB097_SET_BACK_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVB097_SET_BACK_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NVB097_SET_BACK_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVB097_SET_BACK_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVB097_SET_BACK_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVB097_SET_BACK_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NVB097_SET_BACK_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NVB097_SET_BACK_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NVB097_SET_BACK_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVB097_SET_BACK_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NVB097_SET_BACK_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVB097_SET_BACK_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVB097_SET_BACK_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVB097_SET_SRGB_WRITE 0x15b8 +#define NVB097_SET_SRGB_WRITE_ENABLE 0:0 +#define NVB097_SET_SRGB_WRITE_ENABLE_FALSE 0x00000000 +#define NVB097_SET_SRGB_WRITE_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_DEPTH_BIAS 0x15bc +#define NVB097_SET_DEPTH_BIAS_V 31:0 + +#define NVB097_SET_ZCULL_REGION_FORMAT 0x15c8 +#define NVB097_SET_ZCULL_REGION_FORMAT_TYPE 3:0 +#define NVB097_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X4 0x00000000 +#define NVB097_SET_ZCULL_REGION_FORMAT_TYPE_ZS_4X4 0x00000001 +#define NVB097_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X2 0x00000002 +#define NVB097_SET_ZCULL_REGION_FORMAT_TYPE_Z_2X4 0x00000003 +#define NVB097_SET_ZCULL_REGION_FORMAT_TYPE_Z_16X8_4X4 0x00000004 +#define NVB097_SET_ZCULL_REGION_FORMAT_TYPE_Z_8X8_4X2 0x00000005 +#define NVB097_SET_ZCULL_REGION_FORMAT_TYPE_Z_8X8_2X4 0x00000006 +#define NVB097_SET_ZCULL_REGION_FORMAT_TYPE_Z_16X16_4X8 0x00000007 +#define NVB097_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X8_2X2 0x00000008 +#define NVB097_SET_ZCULL_REGION_FORMAT_TYPE_ZS_16X8_4X2 0x00000009 +#define NVB097_SET_ZCULL_REGION_FORMAT_TYPE_ZS_16X8_2X4 0x0000000A +#define NVB097_SET_ZCULL_REGION_FORMAT_TYPE_ZS_8X8_2X2 0x0000000B +#define NVB097_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X8_1X1 0x0000000C + +#define NVB097_SET_RT_LAYER 0x15cc +#define NVB097_SET_RT_LAYER_V 15:0 +#define NVB097_SET_RT_LAYER_CONTROL 16:16 +#define NVB097_SET_RT_LAYER_CONTROL_V_SELECTS_LAYER 0x00000000 +#define NVB097_SET_RT_LAYER_CONTROL_GEOMETRY_SHADER_SELECTS_LAYER 0x00000001 + +#define NVB097_SET_ANTI_ALIAS 0x15d0 +#define NVB097_SET_ANTI_ALIAS_SAMPLES 3:0 +#define NVB097_SET_ANTI_ALIAS_SAMPLES_MODE_1X1 0x00000000 +#define NVB097_SET_ANTI_ALIAS_SAMPLES_MODE_2X1 0x00000001 +#define NVB097_SET_ANTI_ALIAS_SAMPLES_MODE_2X2 0x00000002 +#define NVB097_SET_ANTI_ALIAS_SAMPLES_MODE_4X2 0x00000003 +#define NVB097_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_D3D 0x00000004 +#define NVB097_SET_ANTI_ALIAS_SAMPLES_MODE_2X1_D3D 0x00000005 +#define NVB097_SET_ANTI_ALIAS_SAMPLES_MODE_4X4 0x00000006 +#define NVB097_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_4 0x00000008 +#define NVB097_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_12 0x00000009 +#define NVB097_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_8 0x0000000A +#define NVB097_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_24 0x0000000B + +#define NVB097_SET_EDGE_FLAG 0x15e4 +#define NVB097_SET_EDGE_FLAG_V 0:0 +#define NVB097_SET_EDGE_FLAG_V_FALSE 0x00000000 +#define NVB097_SET_EDGE_FLAG_V_TRUE 0x00000001 + +#define NVB097_DRAW_INLINE_INDEX 0x15e8 +#define NVB097_DRAW_INLINE_INDEX_V 31:0 + +#define NVB097_SET_INLINE_INDEX2X16_ALIGN 0x15ec +#define NVB097_SET_INLINE_INDEX2X16_ALIGN_COUNT 30:0 +#define NVB097_SET_INLINE_INDEX2X16_ALIGN_START_ODD 31:31 +#define NVB097_SET_INLINE_INDEX2X16_ALIGN_START_ODD_FALSE 0x00000000 +#define NVB097_SET_INLINE_INDEX2X16_ALIGN_START_ODD_TRUE 0x00000001 + +#define NVB097_DRAW_INLINE_INDEX2X16 0x15f0 +#define NVB097_DRAW_INLINE_INDEX2X16_EVEN 15:0 +#define NVB097_DRAW_INLINE_INDEX2X16_ODD 31:16 + +#define NVB097_SET_VERTEX_GLOBAL_BASE_OFFSET_A 0x15f4 +#define NVB097_SET_VERTEX_GLOBAL_BASE_OFFSET_A_UPPER 7:0 + +#define NVB097_SET_VERTEX_GLOBAL_BASE_OFFSET_B 0x15f8 +#define NVB097_SET_VERTEX_GLOBAL_BASE_OFFSET_B_LOWER 31:0 + +#define NVB097_SET_ZCULL_REGION_PIXEL_OFFSET_A 0x15fc +#define NVB097_SET_ZCULL_REGION_PIXEL_OFFSET_A_WIDTH 15:0 + +#define NVB097_SET_ZCULL_REGION_PIXEL_OFFSET_B 0x1600 +#define NVB097_SET_ZCULL_REGION_PIXEL_OFFSET_B_HEIGHT 15:0 + +#define NVB097_SET_POINT_SPRITE_SELECT 0x1604 +#define NVB097_SET_POINT_SPRITE_SELECT_RMODE 1:0 +#define NVB097_SET_POINT_SPRITE_SELECT_RMODE_ZERO 0x00000000 +#define NVB097_SET_POINT_SPRITE_SELECT_RMODE_FROM_R 0x00000001 +#define NVB097_SET_POINT_SPRITE_SELECT_RMODE_FROM_S 0x00000002 +#define NVB097_SET_POINT_SPRITE_SELECT_ORIGIN 2:2 +#define NVB097_SET_POINT_SPRITE_SELECT_ORIGIN_BOTTOM 0x00000000 +#define NVB097_SET_POINT_SPRITE_SELECT_ORIGIN_TOP 0x00000001 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE0 3:3 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE0_PASSTHROUGH 0x00000000 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE0_GENERATE 0x00000001 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE1 4:4 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE1_PASSTHROUGH 0x00000000 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE1_GENERATE 0x00000001 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE2 5:5 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE2_PASSTHROUGH 0x00000000 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE2_GENERATE 0x00000001 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE3 6:6 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE3_PASSTHROUGH 0x00000000 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE3_GENERATE 0x00000001 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE4 7:7 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE4_PASSTHROUGH 0x00000000 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE4_GENERATE 0x00000001 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE5 8:8 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE5_PASSTHROUGH 0x00000000 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE5_GENERATE 0x00000001 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE6 9:9 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE6_PASSTHROUGH 0x00000000 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE6_GENERATE 0x00000001 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE7 10:10 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE7_PASSTHROUGH 0x00000000 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE7_GENERATE 0x00000001 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE8 11:11 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE8_PASSTHROUGH 0x00000000 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE8_GENERATE 0x00000001 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE9 12:12 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE9_PASSTHROUGH 0x00000000 +#define NVB097_SET_POINT_SPRITE_SELECT_TEXTURE9_GENERATE 0x00000001 + +#define NVB097_SET_PROGRAM_REGION_A 0x1608 +#define NVB097_SET_PROGRAM_REGION_A_ADDRESS_UPPER 7:0 + +#define NVB097_SET_PROGRAM_REGION_B 0x160c +#define NVB097_SET_PROGRAM_REGION_B_ADDRESS_LOWER 31:0 + +#define NVB097_SET_ATTRIBUTE_DEFAULT 0x1610 +#define NVB097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE 0:0 +#define NVB097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_0001 0x00000000 +#define NVB097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_1111 0x00000001 +#define NVB097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR 1:1 +#define NVB097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0000 0x00000000 +#define NVB097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0001 0x00000001 +#define NVB097_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR 2:2 +#define NVB097_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0000 0x00000000 +#define NVB097_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0001 0x00000001 +#define NVB097_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE 3:3 +#define NVB097_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0000 0x00000000 +#define NVB097_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0001 0x00000001 +#define NVB097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0 4:4 +#define NVB097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_0001 0x00000000 +#define NVB097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_1111 0x00000001 +#define NVB097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15 5:5 +#define NVB097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0000 0x00000000 +#define NVB097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0001 0x00000001 + +#define NVB097_END 0x1614 +#define NVB097_END_V 0:0 + +#define NVB097_BEGIN 0x1618 +#define NVB097_BEGIN_OP 15:0 +#define NVB097_BEGIN_OP_POINTS 0x00000000 +#define NVB097_BEGIN_OP_LINES 0x00000001 +#define NVB097_BEGIN_OP_LINE_LOOP 0x00000002 +#define NVB097_BEGIN_OP_LINE_STRIP 0x00000003 +#define NVB097_BEGIN_OP_TRIANGLES 0x00000004 +#define NVB097_BEGIN_OP_TRIANGLE_STRIP 0x00000005 +#define NVB097_BEGIN_OP_TRIANGLE_FAN 0x00000006 +#define NVB097_BEGIN_OP_QUADS 0x00000007 +#define NVB097_BEGIN_OP_QUAD_STRIP 0x00000008 +#define NVB097_BEGIN_OP_POLYGON 0x00000009 +#define NVB097_BEGIN_OP_LINELIST_ADJCY 0x0000000A +#define NVB097_BEGIN_OP_LINESTRIP_ADJCY 0x0000000B +#define NVB097_BEGIN_OP_TRIANGLELIST_ADJCY 0x0000000C +#define NVB097_BEGIN_OP_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVB097_BEGIN_OP_PATCH 0x0000000E +#define NVB097_BEGIN_PRIMITIVE_ID 24:24 +#define NVB097_BEGIN_PRIMITIVE_ID_FIRST 0x00000000 +#define NVB097_BEGIN_PRIMITIVE_ID_UNCHANGED 0x00000001 +#define NVB097_BEGIN_INSTANCE_ID 27:26 +#define NVB097_BEGIN_INSTANCE_ID_FIRST 0x00000000 +#define NVB097_BEGIN_INSTANCE_ID_SUBSEQUENT 0x00000001 +#define NVB097_BEGIN_INSTANCE_ID_UNCHANGED 0x00000002 +#define NVB097_BEGIN_SPLIT_MODE 30:29 +#define NVB097_BEGIN_SPLIT_MODE_NORMAL_BEGIN_NORMAL_END 0x00000000 +#define NVB097_BEGIN_SPLIT_MODE_NORMAL_BEGIN_OPEN_END 0x00000001 +#define NVB097_BEGIN_SPLIT_MODE_OPEN_BEGIN_OPEN_END 0x00000002 +#define NVB097_BEGIN_SPLIT_MODE_OPEN_BEGIN_NORMAL_END 0x00000003 + +#define NVB097_SET_VERTEX_ID_COPY 0x161c +#define NVB097_SET_VERTEX_ID_COPY_ENABLE 0:0 +#define NVB097_SET_VERTEX_ID_COPY_ENABLE_FALSE 0x00000000 +#define NVB097_SET_VERTEX_ID_COPY_ENABLE_TRUE 0x00000001 +#define NVB097_SET_VERTEX_ID_COPY_ATTRIBUTE_SLOT 11:4 + +#define NVB097_ADD_TO_PRIMITIVE_ID 0x1620 +#define NVB097_ADD_TO_PRIMITIVE_ID_V 31:0 + +#define NVB097_LOAD_PRIMITIVE_ID 0x1624 +#define NVB097_LOAD_PRIMITIVE_ID_V 31:0 + +#define NVB097_SET_SHADER_BASED_CULL 0x162c +#define NVB097_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE 1:1 +#define NVB097_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_FALSE 0x00000000 +#define NVB097_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_TRUE 0x00000001 +#define NVB097_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE 0:0 +#define NVB097_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_FALSE 0x00000000 +#define NVB097_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_CLASS_VERSION 0x1638 +#define NVB097_SET_CLASS_VERSION_CURRENT 15:0 +#define NVB097_SET_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVB097_SET_DA_PRIMITIVE_RESTART 0x1644 +#define NVB097_SET_DA_PRIMITIVE_RESTART_ENABLE 0:0 +#define NVB097_SET_DA_PRIMITIVE_RESTART_ENABLE_FALSE 0x00000000 +#define NVB097_SET_DA_PRIMITIVE_RESTART_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_DA_PRIMITIVE_RESTART_INDEX 0x1648 +#define NVB097_SET_DA_PRIMITIVE_RESTART_INDEX_V 31:0 + +#define NVB097_SET_DA_OUTPUT 0x164c +#define NVB097_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START 12:12 +#define NVB097_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_FALSE 0x00000000 +#define NVB097_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_TRUE 0x00000001 + +#define NVB097_SET_ANTI_ALIASED_POINT 0x1658 +#define NVB097_SET_ANTI_ALIASED_POINT_ENABLE 0:0 +#define NVB097_SET_ANTI_ALIASED_POINT_ENABLE_FALSE 0x00000000 +#define NVB097_SET_ANTI_ALIASED_POINT_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_POINT_CENTER_MODE 0x165c +#define NVB097_SET_POINT_CENTER_MODE_V 31:0 +#define NVB097_SET_POINT_CENTER_MODE_V_OGL 0x00000000 +#define NVB097_SET_POINT_CENTER_MODE_V_D3D 0x00000001 + +#define NVB097_SET_LINE_SMOOTH_PARAMETERS 0x1668 +#define NVB097_SET_LINE_SMOOTH_PARAMETERS_FALLOFF 31:0 +#define NVB097_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_00 0x00000000 +#define NVB097_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_33 0x00000001 +#define NVB097_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_60 0x00000002 + +#define NVB097_SET_LINE_STIPPLE 0x166c +#define NVB097_SET_LINE_STIPPLE_ENABLE 0:0 +#define NVB097_SET_LINE_STIPPLE_ENABLE_FALSE 0x00000000 +#define NVB097_SET_LINE_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_LINE_SMOOTH_EDGE_TABLE(i) (0x1670+(i)*4) +#define NVB097_SET_LINE_SMOOTH_EDGE_TABLE_V0 7:0 +#define NVB097_SET_LINE_SMOOTH_EDGE_TABLE_V1 15:8 +#define NVB097_SET_LINE_SMOOTH_EDGE_TABLE_V2 23:16 +#define NVB097_SET_LINE_SMOOTH_EDGE_TABLE_V3 31:24 + +#define NVB097_SET_LINE_STIPPLE_PARAMETERS 0x1680 +#define NVB097_SET_LINE_STIPPLE_PARAMETERS_FACTOR 7:0 +#define NVB097_SET_LINE_STIPPLE_PARAMETERS_PATTERN 23:8 + +#define NVB097_SET_PROVOKING_VERTEX 0x1684 +#define NVB097_SET_PROVOKING_VERTEX_V 0:0 +#define NVB097_SET_PROVOKING_VERTEX_V_FIRST 0x00000000 +#define NVB097_SET_PROVOKING_VERTEX_V_LAST 0x00000001 + +#define NVB097_SET_TWO_SIDED_LIGHT 0x1688 +#define NVB097_SET_TWO_SIDED_LIGHT_ENABLE 0:0 +#define NVB097_SET_TWO_SIDED_LIGHT_ENABLE_FALSE 0x00000000 +#define NVB097_SET_TWO_SIDED_LIGHT_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_POLYGON_STIPPLE 0x168c +#define NVB097_SET_POLYGON_STIPPLE_ENABLE 0:0 +#define NVB097_SET_POLYGON_STIPPLE_ENABLE_FALSE 0x00000000 +#define NVB097_SET_POLYGON_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_SHADER_CONTROL 0x1690 +#define NVB097_SET_SHADER_CONTROL_DEFAULT_PARTIAL 0:0 +#define NVB097_SET_SHADER_CONTROL_DEFAULT_PARTIAL_ZERO 0x00000000 +#define NVB097_SET_SHADER_CONTROL_DEFAULT_PARTIAL_INFINITY 0x00000001 +#define NVB097_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR 1:1 +#define NVB097_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR_LEGACY 0x00000000 +#define NVB097_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR_FP64_COMPATIBLE 0x00000001 +#define NVB097_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR 2:2 +#define NVB097_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR_PASS_ZERO 0x00000000 +#define NVB097_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR_PASS_INDEFINITE 0x00000001 + +#define NVB097_CHECK_CLASS_VERSION 0x16a0 +#define NVB097_CHECK_CLASS_VERSION_CURRENT 15:0 +#define NVB097_CHECK_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVB097_SET_SPH_VERSION 0x16a4 +#define NVB097_SET_SPH_VERSION_CURRENT 15:0 +#define NVB097_SET_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVB097_CHECK_SPH_VERSION 0x16a8 +#define NVB097_CHECK_SPH_VERSION_CURRENT 15:0 +#define NVB097_CHECK_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVB097_SET_ALPHA_TO_COVERAGE_OVERRIDE 0x16b4 +#define NVB097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE 0:0 +#define NVB097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NVB097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 +#define NVB097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT 1:1 +#define NVB097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_DISABLE 0x00000000 +#define NVB097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_ENABLE 0x00000001 + +#define NVB097_SET_POLYGON_STIPPLE_PATTERN(i) (0x1700+(i)*4) +#define NVB097_SET_POLYGON_STIPPLE_PATTERN_V 31:0 + +#define NVB097_SET_AAM_VERSION 0x1790 +#define NVB097_SET_AAM_VERSION_CURRENT 15:0 +#define NVB097_SET_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVB097_CHECK_AAM_VERSION 0x1794 +#define NVB097_CHECK_AAM_VERSION_CURRENT 15:0 +#define NVB097_CHECK_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVB097_SET_ZT_LAYER 0x179c +#define NVB097_SET_ZT_LAYER_OFFSET 15:0 + +#define NVB097_SET_INDEX_BUFFER_A 0x17c8 +#define NVB097_SET_INDEX_BUFFER_A_ADDRESS_UPPER 7:0 + +#define NVB097_SET_INDEX_BUFFER_B 0x17cc +#define NVB097_SET_INDEX_BUFFER_B_ADDRESS_LOWER 31:0 + +#define NVB097_SET_INDEX_BUFFER_C 0x17d0 +#define NVB097_SET_INDEX_BUFFER_C_LIMIT_ADDRESS_UPPER 7:0 + +#define NVB097_SET_INDEX_BUFFER_D 0x17d4 +#define NVB097_SET_INDEX_BUFFER_D_LIMIT_ADDRESS_LOWER 31:0 + +#define NVB097_SET_INDEX_BUFFER_E 0x17d8 +#define NVB097_SET_INDEX_BUFFER_E_INDEX_SIZE 1:0 +#define NVB097_SET_INDEX_BUFFER_E_INDEX_SIZE_ONE_BYTE 0x00000000 +#define NVB097_SET_INDEX_BUFFER_E_INDEX_SIZE_TWO_BYTES 0x00000001 +#define NVB097_SET_INDEX_BUFFER_E_INDEX_SIZE_FOUR_BYTES 0x00000002 + +#define NVB097_SET_INDEX_BUFFER_F 0x17dc +#define NVB097_SET_INDEX_BUFFER_F_FIRST 31:0 + +#define NVB097_DRAW_INDEX_BUFFER 0x17e0 +#define NVB097_DRAW_INDEX_BUFFER_COUNT 31:0 + +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST 0x17e4 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST 0x17e8 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST 0x17ec +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f0 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVB097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f4 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVB097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f8 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVB097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVB097_SET_DEPTH_BIAS_CLAMP 0x187c +#define NVB097_SET_DEPTH_BIAS_CLAMP_V 31:0 + +#define NVB097_SET_VERTEX_STREAM_INSTANCE_A(i) (0x1880+(i)*4) +#define NVB097_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED 0:0 +#define NVB097_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_FALSE 0x00000000 +#define NVB097_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_TRUE 0x00000001 + +#define NVB097_SET_VERTEX_STREAM_INSTANCE_B(i) (0x18c0+(i)*4) +#define NVB097_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED 0:0 +#define NVB097_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_FALSE 0x00000000 +#define NVB097_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_TRUE 0x00000001 + +#define NVB097_SET_ATTRIBUTE_POINT_SIZE 0x1910 +#define NVB097_SET_ATTRIBUTE_POINT_SIZE_ENABLE 0:0 +#define NVB097_SET_ATTRIBUTE_POINT_SIZE_ENABLE_FALSE 0x00000000 +#define NVB097_SET_ATTRIBUTE_POINT_SIZE_ENABLE_TRUE 0x00000001 +#define NVB097_SET_ATTRIBUTE_POINT_SIZE_SLOT 11:4 + +#define NVB097_OGL_SET_CULL 0x1918 +#define NVB097_OGL_SET_CULL_ENABLE 0:0 +#define NVB097_OGL_SET_CULL_ENABLE_FALSE 0x00000000 +#define NVB097_OGL_SET_CULL_ENABLE_TRUE 0x00000001 + +#define NVB097_OGL_SET_FRONT_FACE 0x191c +#define NVB097_OGL_SET_FRONT_FACE_V 31:0 +#define NVB097_OGL_SET_FRONT_FACE_V_CW 0x00000900 +#define NVB097_OGL_SET_FRONT_FACE_V_CCW 0x00000901 + +#define NVB097_OGL_SET_CULL_FACE 0x1920 +#define NVB097_OGL_SET_CULL_FACE_V 31:0 +#define NVB097_OGL_SET_CULL_FACE_V_FRONT 0x00000404 +#define NVB097_OGL_SET_CULL_FACE_V_BACK 0x00000405 +#define NVB097_OGL_SET_CULL_FACE_V_FRONT_AND_BACK 0x00000408 + +#define NVB097_SET_VIEWPORT_PIXEL 0x1924 +#define NVB097_SET_VIEWPORT_PIXEL_CENTER 0:0 +#define NVB097_SET_VIEWPORT_PIXEL_CENTER_AT_HALF_INTEGERS 0x00000000 +#define NVB097_SET_VIEWPORT_PIXEL_CENTER_AT_INTEGERS 0x00000001 + +#define NVB097_SET_VIEWPORT_SCALE_OFFSET 0x192c +#define NVB097_SET_VIEWPORT_SCALE_OFFSET_ENABLE 0:0 +#define NVB097_SET_VIEWPORT_SCALE_OFFSET_ENABLE_FALSE 0x00000000 +#define NVB097_SET_VIEWPORT_SCALE_OFFSET_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_VIEWPORT_CLIP_CONTROL 0x193c +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE 0:0 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_FALSE 0x00000000 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_TRUE 0x00000001 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z 3:3 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLIP 0x00000000 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLAMP 0x00000001 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z 4:4 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLIP 0x00000000 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLAMP 0x00000001 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND 7:7 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_256 0x00000000 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_1 0x00000001 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND 10:10 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_256 0x00000000 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_1 0x00000001 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP 13:11 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP 0x00000000 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_PASSTHRU 0x00000001 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XY_CLIP 0x00000002 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XYZ_CLIP 0x00000003 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP_NO_Z_CULL 0x00000004 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_Z_CLIP 0x00000005 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z 2:1 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SAME_AS_XY_GUARDBAND 0x00000000 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_256 0x00000001 +#define NVB097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_1 0x00000002 + +#define NVB097_SET_USER_CLIP_OP 0x1940 +#define NVB097_SET_USER_CLIP_OP_PLANE0 0:0 +#define NVB097_SET_USER_CLIP_OP_PLANE0_CLIP 0x00000000 +#define NVB097_SET_USER_CLIP_OP_PLANE0_CULL 0x00000001 +#define NVB097_SET_USER_CLIP_OP_PLANE1 4:4 +#define NVB097_SET_USER_CLIP_OP_PLANE1_CLIP 0x00000000 +#define NVB097_SET_USER_CLIP_OP_PLANE1_CULL 0x00000001 +#define NVB097_SET_USER_CLIP_OP_PLANE2 8:8 +#define NVB097_SET_USER_CLIP_OP_PLANE2_CLIP 0x00000000 +#define NVB097_SET_USER_CLIP_OP_PLANE2_CULL 0x00000001 +#define NVB097_SET_USER_CLIP_OP_PLANE3 12:12 +#define NVB097_SET_USER_CLIP_OP_PLANE3_CLIP 0x00000000 +#define NVB097_SET_USER_CLIP_OP_PLANE3_CULL 0x00000001 +#define NVB097_SET_USER_CLIP_OP_PLANE4 16:16 +#define NVB097_SET_USER_CLIP_OP_PLANE4_CLIP 0x00000000 +#define NVB097_SET_USER_CLIP_OP_PLANE4_CULL 0x00000001 +#define NVB097_SET_USER_CLIP_OP_PLANE5 20:20 +#define NVB097_SET_USER_CLIP_OP_PLANE5_CLIP 0x00000000 +#define NVB097_SET_USER_CLIP_OP_PLANE5_CULL 0x00000001 +#define NVB097_SET_USER_CLIP_OP_PLANE6 24:24 +#define NVB097_SET_USER_CLIP_OP_PLANE6_CLIP 0x00000000 +#define NVB097_SET_USER_CLIP_OP_PLANE6_CULL 0x00000001 +#define NVB097_SET_USER_CLIP_OP_PLANE7 28:28 +#define NVB097_SET_USER_CLIP_OP_PLANE7_CLIP 0x00000000 +#define NVB097_SET_USER_CLIP_OP_PLANE7_CULL 0x00000001 + +#define NVB097_SET_RENDER_ENABLE_OVERRIDE 0x1944 +#define NVB097_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NVB097_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NVB097_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NVB097_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NVB097_SET_PRIMITIVE_TOPOLOGY_CONTROL 0x1948 +#define NVB097_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE 0:0 +#define NVB097_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_TOPOLOGY_IN_BEGIN_METHODS 0x00000000 +#define NVB097_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_SEPARATE_TOPOLOGY_STATE 0x00000001 + +#define NVB097_SET_WINDOW_CLIP_ENABLE 0x194c +#define NVB097_SET_WINDOW_CLIP_ENABLE_V 0:0 +#define NVB097_SET_WINDOW_CLIP_ENABLE_V_FALSE 0x00000000 +#define NVB097_SET_WINDOW_CLIP_ENABLE_V_TRUE 0x00000001 + +#define NVB097_SET_WINDOW_CLIP_TYPE 0x1950 +#define NVB097_SET_WINDOW_CLIP_TYPE_V 1:0 +#define NVB097_SET_WINDOW_CLIP_TYPE_V_INCLUSIVE 0x00000000 +#define NVB097_SET_WINDOW_CLIP_TYPE_V_EXCLUSIVE 0x00000001 +#define NVB097_SET_WINDOW_CLIP_TYPE_V_CLIPALL 0x00000002 + +#define NVB097_INVALIDATE_ZCULL 0x1958 +#define NVB097_INVALIDATE_ZCULL_V 31:0 +#define NVB097_INVALIDATE_ZCULL_V_INVALIDATE 0x00000000 + +#define NVB097_SET_ZCULL 0x1968 +#define NVB097_SET_ZCULL_Z_ENABLE 0:0 +#define NVB097_SET_ZCULL_Z_ENABLE_FALSE 0x00000000 +#define NVB097_SET_ZCULL_Z_ENABLE_TRUE 0x00000001 +#define NVB097_SET_ZCULL_STENCIL_ENABLE 4:4 +#define NVB097_SET_ZCULL_STENCIL_ENABLE_FALSE 0x00000000 +#define NVB097_SET_ZCULL_STENCIL_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_ZCULL_BOUNDS 0x196c +#define NVB097_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE 0:0 +#define NVB097_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NVB097_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_TRUE 0x00000001 +#define NVB097_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE 4:4 +#define NVB097_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NVB097_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_PRIMITIVE_TOPOLOGY 0x1970 +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V 15:0 +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_POINTLIST 0x00000001 +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_LINELIST 0x00000002 +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP 0x00000003 +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST 0x00000004 +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP 0x00000005 +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_LINELIST_ADJCY 0x0000000A +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP_ADJCY 0x0000000B +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST_ADJCY 0x0000000C +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_PATCHLIST 0x0000000E +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_POINTS 0x00001001 +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST 0x00001002 +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST 0x00001003 +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST 0x0000100F +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINESTRIP 0x00001010 +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINESTRIP 0x00001011 +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLELIST 0x00001012 +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLESTRIP 0x00001013 +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLESTRIP 0x00001014 +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN 0x00001015 +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLEFAN 0x00001016 +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN_IMM 0x00001017 +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST_IMM 0x00001018 +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST2 0x0000101A +#define NVB097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST2 0x0000101B + +#define NVB097_ZCULL_SYNC 0x1978 +#define NVB097_ZCULL_SYNC_V 31:0 + +#define NVB097_SET_CLIP_ID_TEST 0x197c +#define NVB097_SET_CLIP_ID_TEST_ENABLE 0:0 +#define NVB097_SET_CLIP_ID_TEST_ENABLE_FALSE 0x00000000 +#define NVB097_SET_CLIP_ID_TEST_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_SURFACE_CLIP_ID_WIDTH 0x1980 +#define NVB097_SET_SURFACE_CLIP_ID_WIDTH_V 31:0 + +#define NVB097_SET_CLIP_ID 0x1984 +#define NVB097_SET_CLIP_ID_V 31:0 + +#define NVB097_SET_DEPTH_BOUNDS_TEST 0x19bc +#define NVB097_SET_DEPTH_BOUNDS_TEST_ENABLE 0:0 +#define NVB097_SET_DEPTH_BOUNDS_TEST_ENABLE_FALSE 0x00000000 +#define NVB097_SET_DEPTH_BOUNDS_TEST_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_BLEND_FLOAT_OPTION 0x19c0 +#define NVB097_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO 0:0 +#define NVB097_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_FALSE 0x00000000 +#define NVB097_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_TRUE 0x00000001 + +#define NVB097_SET_LOGIC_OP 0x19c4 +#define NVB097_SET_LOGIC_OP_ENABLE 0:0 +#define NVB097_SET_LOGIC_OP_ENABLE_FALSE 0x00000000 +#define NVB097_SET_LOGIC_OP_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_LOGIC_OP_FUNC 0x19c8 +#define NVB097_SET_LOGIC_OP_FUNC_V 31:0 +#define NVB097_SET_LOGIC_OP_FUNC_V_CLEAR 0x00001500 +#define NVB097_SET_LOGIC_OP_FUNC_V_AND 0x00001501 +#define NVB097_SET_LOGIC_OP_FUNC_V_AND_REVERSE 0x00001502 +#define NVB097_SET_LOGIC_OP_FUNC_V_COPY 0x00001503 +#define NVB097_SET_LOGIC_OP_FUNC_V_AND_INVERTED 0x00001504 +#define NVB097_SET_LOGIC_OP_FUNC_V_NOOP 0x00001505 +#define NVB097_SET_LOGIC_OP_FUNC_V_XOR 0x00001506 +#define NVB097_SET_LOGIC_OP_FUNC_V_OR 0x00001507 +#define NVB097_SET_LOGIC_OP_FUNC_V_NOR 0x00001508 +#define NVB097_SET_LOGIC_OP_FUNC_V_EQUIV 0x00001509 +#define NVB097_SET_LOGIC_OP_FUNC_V_INVERT 0x0000150A +#define NVB097_SET_LOGIC_OP_FUNC_V_OR_REVERSE 0x0000150B +#define NVB097_SET_LOGIC_OP_FUNC_V_COPY_INVERTED 0x0000150C +#define NVB097_SET_LOGIC_OP_FUNC_V_OR_INVERTED 0x0000150D +#define NVB097_SET_LOGIC_OP_FUNC_V_NAND 0x0000150E +#define NVB097_SET_LOGIC_OP_FUNC_V_SET 0x0000150F + +#define NVB097_SET_Z_COMPRESSION 0x19cc +#define NVB097_SET_Z_COMPRESSION_ENABLE 0:0 +#define NVB097_SET_Z_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVB097_SET_Z_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVB097_CLEAR_SURFACE 0x19d0 +#define NVB097_CLEAR_SURFACE_Z_ENABLE 0:0 +#define NVB097_CLEAR_SURFACE_Z_ENABLE_FALSE 0x00000000 +#define NVB097_CLEAR_SURFACE_Z_ENABLE_TRUE 0x00000001 +#define NVB097_CLEAR_SURFACE_STENCIL_ENABLE 1:1 +#define NVB097_CLEAR_SURFACE_STENCIL_ENABLE_FALSE 0x00000000 +#define NVB097_CLEAR_SURFACE_STENCIL_ENABLE_TRUE 0x00000001 +#define NVB097_CLEAR_SURFACE_R_ENABLE 2:2 +#define NVB097_CLEAR_SURFACE_R_ENABLE_FALSE 0x00000000 +#define NVB097_CLEAR_SURFACE_R_ENABLE_TRUE 0x00000001 +#define NVB097_CLEAR_SURFACE_G_ENABLE 3:3 +#define NVB097_CLEAR_SURFACE_G_ENABLE_FALSE 0x00000000 +#define NVB097_CLEAR_SURFACE_G_ENABLE_TRUE 0x00000001 +#define NVB097_CLEAR_SURFACE_B_ENABLE 4:4 +#define NVB097_CLEAR_SURFACE_B_ENABLE_FALSE 0x00000000 +#define NVB097_CLEAR_SURFACE_B_ENABLE_TRUE 0x00000001 +#define NVB097_CLEAR_SURFACE_A_ENABLE 5:5 +#define NVB097_CLEAR_SURFACE_A_ENABLE_FALSE 0x00000000 +#define NVB097_CLEAR_SURFACE_A_ENABLE_TRUE 0x00000001 +#define NVB097_CLEAR_SURFACE_MRT_SELECT 9:6 +#define NVB097_CLEAR_SURFACE_RT_ARRAY_INDEX 25:10 + +#define NVB097_CLEAR_CLIP_ID_SURFACE 0x19d4 +#define NVB097_CLEAR_CLIP_ID_SURFACE_V 31:0 + +#define NVB097_SET_COLOR_COMPRESSION(i) (0x19e0+(i)*4) +#define NVB097_SET_COLOR_COMPRESSION_ENABLE 0:0 +#define NVB097_SET_COLOR_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVB097_SET_COLOR_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_CT_WRITE(i) (0x1a00+(i)*4) +#define NVB097_SET_CT_WRITE_R_ENABLE 0:0 +#define NVB097_SET_CT_WRITE_R_ENABLE_FALSE 0x00000000 +#define NVB097_SET_CT_WRITE_R_ENABLE_TRUE 0x00000001 +#define NVB097_SET_CT_WRITE_G_ENABLE 4:4 +#define NVB097_SET_CT_WRITE_G_ENABLE_FALSE 0x00000000 +#define NVB097_SET_CT_WRITE_G_ENABLE_TRUE 0x00000001 +#define NVB097_SET_CT_WRITE_B_ENABLE 8:8 +#define NVB097_SET_CT_WRITE_B_ENABLE_FALSE 0x00000000 +#define NVB097_SET_CT_WRITE_B_ENABLE_TRUE 0x00000001 +#define NVB097_SET_CT_WRITE_A_ENABLE 12:12 +#define NVB097_SET_CT_WRITE_A_ENABLE_FALSE 0x00000000 +#define NVB097_SET_CT_WRITE_A_ENABLE_TRUE 0x00000001 + +#define NVB097_PIPE_NOP 0x1a2c +#define NVB097_PIPE_NOP_V 31:0 + +#define NVB097_SET_SPARE00 0x1a30 +#define NVB097_SET_SPARE00_V 31:0 + +#define NVB097_SET_SPARE01 0x1a34 +#define NVB097_SET_SPARE01_V 31:0 + +#define NVB097_SET_SPARE02 0x1a38 +#define NVB097_SET_SPARE02_V 31:0 + +#define NVB097_SET_SPARE03 0x1a3c +#define NVB097_SET_SPARE03_V 31:0 + +#define NVB097_SET_REPORT_SEMAPHORE_A 0x1b00 +#define NVB097_SET_REPORT_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVB097_SET_REPORT_SEMAPHORE_B 0x1b04 +#define NVB097_SET_REPORT_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVB097_SET_REPORT_SEMAPHORE_C 0x1b08 +#define NVB097_SET_REPORT_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVB097_SET_REPORT_SEMAPHORE_D 0x1b0c +#define NVB097_SET_REPORT_SEMAPHORE_D_OPERATION 1:0 +#define NVB097_SET_REPORT_SEMAPHORE_D_OPERATION_RELEASE 0x00000000 +#define NVB097_SET_REPORT_SEMAPHORE_D_OPERATION_ACQUIRE 0x00000001 +#define NVB097_SET_REPORT_SEMAPHORE_D_OPERATION_REPORT_ONLY 0x00000002 +#define NVB097_SET_REPORT_SEMAPHORE_D_OPERATION_TRAP 0x00000003 +#define NVB097_SET_REPORT_SEMAPHORE_D_RELEASE 4:4 +#define NVB097_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_READS_COMPLETE 0x00000000 +#define NVB097_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_WRITES_COMPLETE 0x00000001 +#define NVB097_SET_REPORT_SEMAPHORE_D_ACQUIRE 8:8 +#define NVB097_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_WRITES_START 0x00000000 +#define NVB097_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_READS_START 0x00000001 +#define NVB097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION 15:12 +#define NVB097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_NONE 0x00000000 +#define NVB097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DATA_ASSEMBLER 0x00000001 +#define NVB097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VERTEX_SHADER 0x00000002 +#define NVB097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_INIT_SHADER 0x00000008 +#define NVB097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_SHADER 0x00000009 +#define NVB097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_GEOMETRY_SHADER 0x00000006 +#define NVB097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_STREAMING_OUTPUT 0x00000005 +#define NVB097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VPC 0x00000004 +#define NVB097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ZCULL 0x00000007 +#define NVB097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_PIXEL_SHADER 0x0000000A +#define NVB097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DEPTH_TEST 0x0000000C +#define NVB097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ALL 0x0000000F +#define NVB097_SET_REPORT_SEMAPHORE_D_COMPARISON 16:16 +#define NVB097_SET_REPORT_SEMAPHORE_D_COMPARISON_EQ 0x00000000 +#define NVB097_SET_REPORT_SEMAPHORE_D_COMPARISON_GE 0x00000001 +#define NVB097_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE 20:20 +#define NVB097_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVB097_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT 27:23 +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_NONE 0x00000000 +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_DA_VERTICES_GENERATED 0x00000001 +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_DA_PRIMITIVES_GENERATED 0x00000003 +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_VS_INVOCATIONS 0x00000005 +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_TI_INVOCATIONS 0x0000001B +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_TS_INVOCATIONS 0x0000001D +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_TS_PRIMITIVES_GENERATED 0x0000001F +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_GS_INVOCATIONS 0x00000007 +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_GS_PRIMITIVES_GENERATED 0x00000009 +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_ALPHA_BETA_CLOCKS 0x00000004 +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_VTG_PRIMITIVES_OUT 0x00000012 +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x0000001E +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_SUCCEEDED 0x0000000B +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED 0x0000000D +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000006 +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_BYTE_COUNT 0x0000001A +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_INVOCATIONS 0x0000000F +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_PRIMITIVES_GENERATED 0x00000011 +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS0 0x0000000A +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS1 0x0000000C +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS2 0x0000000E +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS3 0x00000010 +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_PS_INVOCATIONS 0x00000013 +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT 0x00000002 +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT64 0x00000015 +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_COLOR_TARGET 0x00000018 +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_ZETA_TARGET 0x00000019 +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_BOUNDING_RECTANGLE 0x0000001C +#define NVB097_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE 28:28 +#define NVB097_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVB097_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVB097_SET_REPORT_SEMAPHORE_D_SUB_REPORT 7:5 +#define NVB097_SET_REPORT_SEMAPHORE_D_REPORT_DWORD_NUMBER 21:21 +#define NVB097_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE 2:2 +#define NVB097_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_FALSE 0x00000000 +#define NVB097_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_TRUE 0x00000001 +#define NVB097_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE 3:3 +#define NVB097_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVB097_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVB097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP 11:9 +#define NVB097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_ADD 0x00000000 +#define NVB097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MIN 0x00000001 +#define NVB097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MAX 0x00000002 +#define NVB097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_INC 0x00000003 +#define NVB097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_DEC 0x00000004 +#define NVB097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_AND 0x00000005 +#define NVB097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_OR 0x00000006 +#define NVB097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_XOR 0x00000007 +#define NVB097_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT 18:17 +#define NVB097_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVB097_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_SIGNED_32 0x00000001 + +#define NVB097_SET_VERTEX_STREAM_A_FORMAT(j) (0x1c00+(j)*16) +#define NVB097_SET_VERTEX_STREAM_A_FORMAT_STRIDE 11:0 +#define NVB097_SET_VERTEX_STREAM_A_FORMAT_ENABLE 12:12 +#define NVB097_SET_VERTEX_STREAM_A_FORMAT_ENABLE_FALSE 0x00000000 +#define NVB097_SET_VERTEX_STREAM_A_FORMAT_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_VERTEX_STREAM_A_LOCATION_A(j) (0x1c04+(j)*16) +#define NVB097_SET_VERTEX_STREAM_A_LOCATION_A_OFFSET_UPPER 7:0 + +#define NVB097_SET_VERTEX_STREAM_A_LOCATION_B(j) (0x1c08+(j)*16) +#define NVB097_SET_VERTEX_STREAM_A_LOCATION_B_OFFSET_LOWER 31:0 + +#define NVB097_SET_VERTEX_STREAM_A_FREQUENCY(j) (0x1c0c+(j)*16) +#define NVB097_SET_VERTEX_STREAM_A_FREQUENCY_V 31:0 + +#define NVB097_SET_VERTEX_STREAM_B_FORMAT(j) (0x1d00+(j)*16) +#define NVB097_SET_VERTEX_STREAM_B_FORMAT_STRIDE 11:0 +#define NVB097_SET_VERTEX_STREAM_B_FORMAT_ENABLE 12:12 +#define NVB097_SET_VERTEX_STREAM_B_FORMAT_ENABLE_FALSE 0x00000000 +#define NVB097_SET_VERTEX_STREAM_B_FORMAT_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_VERTEX_STREAM_B_LOCATION_A(j) (0x1d04+(j)*16) +#define NVB097_SET_VERTEX_STREAM_B_LOCATION_A_OFFSET_UPPER 7:0 + +#define NVB097_SET_VERTEX_STREAM_B_LOCATION_B(j) (0x1d08+(j)*16) +#define NVB097_SET_VERTEX_STREAM_B_LOCATION_B_OFFSET_LOWER 31:0 + +#define NVB097_SET_VERTEX_STREAM_B_FREQUENCY(j) (0x1d0c+(j)*16) +#define NVB097_SET_VERTEX_STREAM_B_FREQUENCY_V 31:0 + +#define NVB097_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA(j) (0x1e00+(j)*32) +#define NVB097_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NVB097_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NVB097_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_BLEND_PER_TARGET_COLOR_OP(j) (0x1e04+(j)*32) +#define NVB097_SET_BLEND_PER_TARGET_COLOR_OP_V 31:0 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVB097_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVB097_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MIN 0x00008007 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MAX 0x00008008 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_ADD 0x00000001 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MIN 0x00000004 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF(j) (0x1e08+(j)*32) +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V 31:0 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF(j) (0x1e0c+(j)*32) +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V 31:0 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVB097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_OP(j) (0x1e10+(j)*32) +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_OP_V 31:0 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF(j) (0x1e14+(j)*32) +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V 31:0 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF(j) (0x1e18+(j)*32) +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V 31:0 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVB097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVB097_SET_VERTEX_STREAM_LIMIT_A_A(j) (0x1f00+(j)*8) +#define NVB097_SET_VERTEX_STREAM_LIMIT_A_A_UPPER 7:0 + +#define NVB097_SET_VERTEX_STREAM_LIMIT_A_B(j) (0x1f04+(j)*8) +#define NVB097_SET_VERTEX_STREAM_LIMIT_A_B_LOWER 31:0 + +#define NVB097_SET_VERTEX_STREAM_LIMIT_B_A(j) (0x1f80+(j)*8) +#define NVB097_SET_VERTEX_STREAM_LIMIT_B_A_UPPER 7:0 + +#define NVB097_SET_VERTEX_STREAM_LIMIT_B_B(j) (0x1f84+(j)*8) +#define NVB097_SET_VERTEX_STREAM_LIMIT_B_B_LOWER 31:0 + +#define NVB097_SET_PIPELINE_SHADER(j) (0x2000+(j)*64) +#define NVB097_SET_PIPELINE_SHADER_ENABLE 0:0 +#define NVB097_SET_PIPELINE_SHADER_ENABLE_FALSE 0x00000000 +#define NVB097_SET_PIPELINE_SHADER_ENABLE_TRUE 0x00000001 +#define NVB097_SET_PIPELINE_SHADER_TYPE 7:4 +#define NVB097_SET_PIPELINE_SHADER_TYPE_VERTEX_CULL_BEFORE_FETCH 0x00000000 +#define NVB097_SET_PIPELINE_SHADER_TYPE_VERTEX 0x00000001 +#define NVB097_SET_PIPELINE_SHADER_TYPE_TESSELLATION_INIT 0x00000002 +#define NVB097_SET_PIPELINE_SHADER_TYPE_TESSELLATION 0x00000003 +#define NVB097_SET_PIPELINE_SHADER_TYPE_GEOMETRY 0x00000004 +#define NVB097_SET_PIPELINE_SHADER_TYPE_PIXEL 0x00000005 + +#define NVB097_SET_PIPELINE_PROGRAM(j) (0x2004+(j)*64) +#define NVB097_SET_PIPELINE_PROGRAM_OFFSET 31:0 + +#define NVB097_SET_PIPELINE_RESERVED_A(j) (0x2008+(j)*64) +#define NVB097_SET_PIPELINE_RESERVED_A_V 0:0 + +#define NVB097_SET_PIPELINE_REGISTER_COUNT(j) (0x200c+(j)*64) +#define NVB097_SET_PIPELINE_REGISTER_COUNT_V 7:0 + +#define NVB097_SET_PIPELINE_BINDING(j) (0x2010+(j)*64) +#define NVB097_SET_PIPELINE_BINDING_GROUP 2:0 + +#define NVB097_SET_PIPELINE_RESERVED_B(j) (0x2014+(j)*64) +#define NVB097_SET_PIPELINE_RESERVED_B_V 0:0 + +#define NVB097_SET_PIPELINE_RESERVED_C(j) (0x2018+(j)*64) +#define NVB097_SET_PIPELINE_RESERVED_C_V 0:0 + +#define NVB097_SET_PIPELINE_RESERVED_D(j) (0x201c+(j)*64) +#define NVB097_SET_PIPELINE_RESERVED_D_V 0:0 + +#define NVB097_SET_PIPELINE_RESERVED_E(j) (0x2020+(j)*64) +#define NVB097_SET_PIPELINE_RESERVED_E_V 0:0 + +#define NVB097_SET_FALCON00 0x2300 +#define NVB097_SET_FALCON00_V 31:0 + +#define NVB097_SET_FALCON01 0x2304 +#define NVB097_SET_FALCON01_V 31:0 + +#define NVB097_SET_FALCON02 0x2308 +#define NVB097_SET_FALCON02_V 31:0 + +#define NVB097_SET_FALCON03 0x230c +#define NVB097_SET_FALCON03_V 31:0 + +#define NVB097_SET_FALCON04 0x2310 +#define NVB097_SET_FALCON04_V 31:0 + +#define NVB097_SET_FALCON05 0x2314 +#define NVB097_SET_FALCON05_V 31:0 + +#define NVB097_SET_FALCON06 0x2318 +#define NVB097_SET_FALCON06_V 31:0 + +#define NVB097_SET_FALCON07 0x231c +#define NVB097_SET_FALCON07_V 31:0 + +#define NVB097_SET_FALCON08 0x2320 +#define NVB097_SET_FALCON08_V 31:0 + +#define NVB097_SET_FALCON09 0x2324 +#define NVB097_SET_FALCON09_V 31:0 + +#define NVB097_SET_FALCON10 0x2328 +#define NVB097_SET_FALCON10_V 31:0 + +#define NVB097_SET_FALCON11 0x232c +#define NVB097_SET_FALCON11_V 31:0 + +#define NVB097_SET_FALCON12 0x2330 +#define NVB097_SET_FALCON12_V 31:0 + +#define NVB097_SET_FALCON13 0x2334 +#define NVB097_SET_FALCON13_V 31:0 + +#define NVB097_SET_FALCON14 0x2338 +#define NVB097_SET_FALCON14_V 31:0 + +#define NVB097_SET_FALCON15 0x233c +#define NVB097_SET_FALCON15_V 31:0 + +#define NVB097_SET_FALCON16 0x2340 +#define NVB097_SET_FALCON16_V 31:0 + +#define NVB097_SET_FALCON17 0x2344 +#define NVB097_SET_FALCON17_V 31:0 + +#define NVB097_SET_FALCON18 0x2348 +#define NVB097_SET_FALCON18_V 31:0 + +#define NVB097_SET_FALCON19 0x234c +#define NVB097_SET_FALCON19_V 31:0 + +#define NVB097_SET_FALCON20 0x2350 +#define NVB097_SET_FALCON20_V 31:0 + +#define NVB097_SET_FALCON21 0x2354 +#define NVB097_SET_FALCON21_V 31:0 + +#define NVB097_SET_FALCON22 0x2358 +#define NVB097_SET_FALCON22_V 31:0 + +#define NVB097_SET_FALCON23 0x235c +#define NVB097_SET_FALCON23_V 31:0 + +#define NVB097_SET_FALCON24 0x2360 +#define NVB097_SET_FALCON24_V 31:0 + +#define NVB097_SET_FALCON25 0x2364 +#define NVB097_SET_FALCON25_V 31:0 + +#define NVB097_SET_FALCON26 0x2368 +#define NVB097_SET_FALCON26_V 31:0 + +#define NVB097_SET_FALCON27 0x236c +#define NVB097_SET_FALCON27_V 31:0 + +#define NVB097_SET_FALCON28 0x2370 +#define NVB097_SET_FALCON28_V 31:0 + +#define NVB097_SET_FALCON29 0x2374 +#define NVB097_SET_FALCON29_V 31:0 + +#define NVB097_SET_FALCON30 0x2378 +#define NVB097_SET_FALCON30_V 31:0 + +#define NVB097_SET_FALCON31 0x237c +#define NVB097_SET_FALCON31_V 31:0 + +#define NVB097_SET_CONSTANT_BUFFER_SELECTOR_A 0x2380 +#define NVB097_SET_CONSTANT_BUFFER_SELECTOR_A_SIZE 16:0 + +#define NVB097_SET_CONSTANT_BUFFER_SELECTOR_B 0x2384 +#define NVB097_SET_CONSTANT_BUFFER_SELECTOR_B_ADDRESS_UPPER 7:0 + +#define NVB097_SET_CONSTANT_BUFFER_SELECTOR_C 0x2388 +#define NVB097_SET_CONSTANT_BUFFER_SELECTOR_C_ADDRESS_LOWER 31:0 + +#define NVB097_LOAD_CONSTANT_BUFFER_OFFSET 0x238c +#define NVB097_LOAD_CONSTANT_BUFFER_OFFSET_V 15:0 + +#define NVB097_LOAD_CONSTANT_BUFFER(i) (0x2390+(i)*4) +#define NVB097_LOAD_CONSTANT_BUFFER_V 31:0 + +#define NVB097_BIND_GROUP_RESERVED_A(j) (0x2400+(j)*32) +#define NVB097_BIND_GROUP_RESERVED_A_V 0:0 + +#define NVB097_BIND_GROUP_RESERVED_B(j) (0x2404+(j)*32) +#define NVB097_BIND_GROUP_RESERVED_B_V 0:0 + +#define NVB097_BIND_GROUP_RESERVED_C(j) (0x2408+(j)*32) +#define NVB097_BIND_GROUP_RESERVED_C_V 0:0 + +#define NVB097_BIND_GROUP_RESERVED_D(j) (0x240c+(j)*32) +#define NVB097_BIND_GROUP_RESERVED_D_V 0:0 + +#define NVB097_BIND_GROUP_CONSTANT_BUFFER(j) (0x2410+(j)*32) +#define NVB097_BIND_GROUP_CONSTANT_BUFFER_VALID 0:0 +#define NVB097_BIND_GROUP_CONSTANT_BUFFER_VALID_FALSE 0x00000000 +#define NVB097_BIND_GROUP_CONSTANT_BUFFER_VALID_TRUE 0x00000001 +#define NVB097_BIND_GROUP_CONSTANT_BUFFER_SHADER_SLOT 8:4 + +#define NVB097_SET_COLOR_CLAMP 0x2600 +#define NVB097_SET_COLOR_CLAMP_ENABLE 0:0 +#define NVB097_SET_COLOR_CLAMP_ENABLE_FALSE 0x00000000 +#define NVB097_SET_COLOR_CLAMP_ENABLE_TRUE 0x00000001 + +#define NVB097_SET_BINDLESS_TEXTURE 0x2608 +#define NVB097_SET_BINDLESS_TEXTURE_CONSTANT_BUFFER_SLOT_SELECT 4:0 + +#define NVB097_SET_TRAP_HANDLER 0x260c +#define NVB097_SET_TRAP_HANDLER_OFFSET 31:0 + +#define NVB097_SET_STREAM_OUT_LAYOUT_SELECT(i,j) (0x2800+(i)*128+(j)*4) +#define NVB097_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER00 7:0 +#define NVB097_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER01 15:8 +#define NVB097_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER02 23:16 +#define NVB097_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER03 31:24 + +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER(i) (0x333c+(i)*4) +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER_V 31:0 + +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_VALUE(i) (0x335c+(i)*4) +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_VALUE_V 31:0 + +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_EVENT(i) (0x337c+(i)*4) +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_EVENT_EVENT 7:0 + +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A(i) (0x339c+(i)*4) +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT0 1:0 +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT0 4:2 +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT1 6:5 +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT1 9:7 +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT2 11:10 +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT2 14:12 +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT3 16:15 +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT3 19:17 +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT4 21:20 +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT4 24:22 +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT5 26:25 +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT5 29:27 +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_SPARE 31:30 + +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B(i) (0x33bc+(i)*4) +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_EDGE 0:0 +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_MODE 2:1 +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_WINDOWED 3:3 +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_FUNC 19:4 + +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL 0x33dc +#define NVB097_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL_MASK 7:0 + +#define NVB097_START_SHADER_PERFORMANCE_COUNTER 0x33e0 +#define NVB097_START_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVB097_STOP_SHADER_PERFORMANCE_COUNTER 0x33e4 +#define NVB097_STOP_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVB097_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NVB097_SET_MME_SHADOW_SCRATCH_V 31:0 + +#define NVB097_CALL_MME_MACRO(j) (0x3800+(j)*8) +#define NVB097_CALL_MME_MACRO_V 31:0 + +#define NVB097_CALL_MME_DATA(j) (0x3804+(j)*8) +#define NVB097_CALL_MME_DATA_V 31:0 + +#endif /* _cl_maxwell_a_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clb097tex.h b/src/common/sdk/nvidia/inc/class/clb097tex.h new file mode 100644 index 0000000..e5543e3 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clb097tex.h @@ -0,0 +1,2050 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2010 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ + +#ifndef __CLB097TEX_H__ +#define __CLB097TEX_H__ + +/* +** Texture Header State + */ + +#define NVB097_TEXHEAD0_COMPONENT_SIZES 5:0 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_R32_G32_B32_A32 0x00000001 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_R32_G32_B32 0x00000002 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_R16_G16_B16_A16 0x00000003 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_R32_G32 0x00000004 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_R32_B24G8 0x00000005 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_X8B8G8R8 0x00000007 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_A8B8G8R8 0x00000008 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_A2B10G10R10 0x00000009 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_R16_G16 0x0000000c +#define NVB097_TEXHEAD0_COMPONENT_SIZES_G8R24 0x0000000d +#define NVB097_TEXHEAD0_COMPONENT_SIZES_G24R8 0x0000000e +#define NVB097_TEXHEAD0_COMPONENT_SIZES_R32 0x0000000f +#define NVB097_TEXHEAD0_COMPONENT_SIZES_A4B4G4R4 0x00000012 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_A5B5G5R1 0x00000013 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_A1B5G5R5 0x00000014 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_B5G6R5 0x00000015 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_B6G5R5 0x00000016 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_G8R8 0x00000018 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_R16 0x0000001b +#define NVB097_TEXHEAD0_COMPONENT_SIZES_Y8_VIDEO 0x0000001c +#define NVB097_TEXHEAD0_COMPONENT_SIZES_R8 0x0000001d +#define NVB097_TEXHEAD0_COMPONENT_SIZES_G4R4 0x0000001e +#define NVB097_TEXHEAD0_COMPONENT_SIZES_R1 0x0000001f +#define NVB097_TEXHEAD0_COMPONENT_SIZES_E5B9G9R9_SHAREDEXP 0x00000020 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_BF10GF11RF11 0x00000021 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_G8B8G8R8 0x00000022 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_B8G8R8G8 0x00000023 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_DXT1 0x00000024 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_DXT23 0x00000025 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_DXT45 0x00000026 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_DXN1 0x00000027 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_DXN2 0x00000028 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_BC6H_SF16 0x00000010 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_BC6H_UF16 0x00000011 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_BC7U 0x00000017 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_ETC2_RGB 0x00000006 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_ETC2_RGB_PTA 0x0000000a +#define NVB097_TEXHEAD0_COMPONENT_SIZES_ETC2_RGBA 0x0000000b +#define NVB097_TEXHEAD0_COMPONENT_SIZES_EAC 0x00000019 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_EACX2 0x0000001a +#define NVB097_TEXHEAD0_COMPONENT_SIZES_Z24S8 0x00000029 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_X8Z24 0x0000002a +#define NVB097_TEXHEAD0_COMPONENT_SIZES_S8Z24 0x0000002b +#define NVB097_TEXHEAD0_COMPONENT_SIZES_X4V4Z24__COV4R4V 0x0000002c +#define NVB097_TEXHEAD0_COMPONENT_SIZES_X4V4Z24__COV8R8V 0x0000002d +#define NVB097_TEXHEAD0_COMPONENT_SIZES_V8Z24__COV4R12V 0x0000002e +#define NVB097_TEXHEAD0_COMPONENT_SIZES_ZF32 0x0000002f +#define NVB097_TEXHEAD0_COMPONENT_SIZES_ZF32_X24S8 0x00000030 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_X8Z24_X20V4S8__COV4R4V 0x00000031 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_X8Z24_X20V4S8__COV8R8V 0x00000032 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_ZF32_X20V4X8__COV4R4V 0x00000033 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_ZF32_X20V4X8__COV8R8V 0x00000034 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_ZF32_X20V4S8__COV4R4V 0x00000035 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_ZF32_X20V4S8__COV8R8V 0x00000036 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_X8Z24_X16V8S8__COV4R12V 0x00000037 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_ZF32_X16V8X8__COV4R12V 0x00000038 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_ZF32_X16V8S8__COV4R12V 0x00000039 +#define NVB097_TEXHEAD0_COMPONENT_SIZES_Z16 0x0000003a +#define NVB097_TEXHEAD0_COMPONENT_SIZES_V8Z24__COV8R24V 0x0000003b +#define NVB097_TEXHEAD0_COMPONENT_SIZES_X8Z24_X16V8S8__COV8R24V 0x0000003c +#define NVB097_TEXHEAD0_COMPONENT_SIZES_ZF32_X16V8X8__COV8R24V 0x0000003d +#define NVB097_TEXHEAD0_COMPONENT_SIZES_ZF32_X16V8S8__COV8R24V 0x0000003e +#define NVB097_TEXHEAD0_COMPONENT_SIZES_CS_BITFIELD_SIZE 0x0000003f +#define NVB097_TEXHEAD0_R_DATA_TYPE 8:6 +#define NVB097_TEXHEAD0_R_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD0_R_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD0_R_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD0_R_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD0_R_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD0_R_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD0_R_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD0_G_DATA_TYPE 11:9 +#define NVB097_TEXHEAD0_G_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD0_G_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD0_G_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD0_G_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD0_G_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD0_G_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD0_G_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD0_B_DATA_TYPE 14:12 +#define NVB097_TEXHEAD0_B_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD0_B_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD0_B_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD0_B_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD0_B_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD0_B_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD0_B_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD0_A_DATA_TYPE 17:15 +#define NVB097_TEXHEAD0_A_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD0_A_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD0_A_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD0_A_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD0_A_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD0_A_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD0_A_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD0_X_SOURCE 20:18 +#define NVB097_TEXHEAD0_X_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD0_X_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD0_X_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD0_X_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD0_X_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD0_X_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD0_X_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD0_Y_SOURCE 23:21 +#define NVB097_TEXHEAD0_Y_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD0_Y_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD0_Y_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD0_Y_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD0_Y_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD0_Y_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD0_Y_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD0_Z_SOURCE 26:24 +#define NVB097_TEXHEAD0_Z_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD0_Z_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD0_Z_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD0_Z_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD0_Z_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD0_Z_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD0_Z_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD0_W_SOURCE 29:27 +#define NVB097_TEXHEAD0_W_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD0_W_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD0_W_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD0_W_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD0_W_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD0_W_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD0_W_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD0_PACK_COMPONENTS 30:30 +#define NVB097_TEXHEAD0_USE_COMPONENT_SIZES_EXTENDED 31:31 +#define NVB097_TEXHEAD1_OFFSET_LOWER 31:0 +#define NVB097_TEXHEAD2_OFFSET_UPPER 7:0 +#define NVB097_TEXHEAD2_ANISO_SPREAD_MAX_LOG2_L_S_B 9:8 +#define NVB097_TEXHEAD2_S_R_G_B_CONVERSION 10:10 +#define NVB097_TEXHEAD2_ANISO_SPREAD_MAX_LOG2_M_S_B 11:11 +#define NVB097_TEXHEAD2_LOD_ANISO_QUALITY2 12:12 +#define NVB097_TEXHEAD2_COLOR_KEY_OP 13:13 +#define NVB097_TEXHEAD2_TEXTURE_TYPE 17:14 +#define NVB097_TEXHEAD2_TEXTURE_TYPE_ONE_D 0x00000000 +#define NVB097_TEXHEAD2_TEXTURE_TYPE_TWO_D 0x00000001 +#define NVB097_TEXHEAD2_TEXTURE_TYPE_THREE_D 0x00000002 +#define NVB097_TEXHEAD2_TEXTURE_TYPE_CUBEMAP 0x00000003 +#define NVB097_TEXHEAD2_TEXTURE_TYPE_ONE_D_ARRAY 0x00000004 +#define NVB097_TEXHEAD2_TEXTURE_TYPE_TWO_D_ARRAY 0x00000005 +#define NVB097_TEXHEAD2_TEXTURE_TYPE_ONE_D_BUFFER 0x00000006 +#define NVB097_TEXHEAD2_TEXTURE_TYPE_TWO_D_NO_MIPMAP 0x00000007 +#define NVB097_TEXHEAD2_TEXTURE_TYPE_CUBEMAP_ARRAY 0x00000008 +#define NVB097_TEXHEAD2_TEXTURE_TYPE_TT_BIT_FIELD_SIZE 0x0000000f +#define NVB097_TEXHEAD2_MEMORY_LAYOUT 18:18 +#define NVB097_TEXHEAD2_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVB097_TEXHEAD2_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVB097_TEXHEAD2_GOBS_PER_BLOCK_WIDTH 21:19 +#define NVB097_TEXHEAD2_GOBS_PER_BLOCK_WIDTH_ONE_GOB 0x00000000 +#define NVB097_TEXHEAD2_GOBS_PER_BLOCK_HEIGHT 24:22 +#define NVB097_TEXHEAD2_GOBS_PER_BLOCK_HEIGHT_ONE_GOB 0x00000000 +#define NVB097_TEXHEAD2_GOBS_PER_BLOCK_HEIGHT_TWO_GOBS 0x00000001 +#define NVB097_TEXHEAD2_GOBS_PER_BLOCK_HEIGHT_FOUR_GOBS 0x00000002 +#define NVB097_TEXHEAD2_GOBS_PER_BLOCK_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVB097_TEXHEAD2_GOBS_PER_BLOCK_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVB097_TEXHEAD2_GOBS_PER_BLOCK_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVB097_TEXHEAD2_GOBS_PER_BLOCK_DEPTH 27:25 +#define NVB097_TEXHEAD2_GOBS_PER_BLOCK_DEPTH_ONE_GOB 0x00000000 +#define NVB097_TEXHEAD2_GOBS_PER_BLOCK_DEPTH_TWO_GOBS 0x00000001 +#define NVB097_TEXHEAD2_GOBS_PER_BLOCK_DEPTH_FOUR_GOBS 0x00000002 +#define NVB097_TEXHEAD2_GOBS_PER_BLOCK_DEPTH_EIGHT_GOBS 0x00000003 +#define NVB097_TEXHEAD2_GOBS_PER_BLOCK_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVB097_TEXHEAD2_GOBS_PER_BLOCK_DEPTH_THIRTYTWO_GOBS 0x00000005 +#define NVB097_TEXHEAD2_SECTOR_PROMOTION 29:28 +#define NVB097_TEXHEAD2_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVB097_TEXHEAD2_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVB097_TEXHEAD2_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVB097_TEXHEAD2_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVB097_TEXHEAD2_BORDER_SOURCE 30:30 +#define NVB097_TEXHEAD2_BORDER_SOURCE_BORDER_TEXTURE 0x00000000 +#define NVB097_TEXHEAD2_BORDER_SOURCE_BORDER_COLOR 0x00000001 +#define NVB097_TEXHEAD2_NORMALIZED_COORDS 31:31 +#define NVB097_TEXHEAD3_PITCH 19:0 +#define NVB097_TEXHEAD3_LOD_ANISO_QUALITY 20:20 +#define NVB097_TEXHEAD3_LOD_ANISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVB097_TEXHEAD3_LOD_ANISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVB097_TEXHEAD3_LOD_ISO_QUALITY 21:21 +#define NVB097_TEXHEAD3_LOD_ISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVB097_TEXHEAD3_LOD_ISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVB097_TEXHEAD3_ANISO_COARSE_SPREAD_MODIFIER 23:22 +#define NVB097_TEXHEAD3_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVB097_TEXHEAD3_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVB097_TEXHEAD3_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVB097_TEXHEAD3_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVB097_TEXHEAD3_ANISO_SPREAD_SCALE 28:24 +#define NVB097_TEXHEAD3_USE_HEADER_OPT_CONTROL 29:29 +#define NVB097_TEXHEAD3_RESERVED3A 30:30 +#define NVB097_TEXHEAD3_RESERVED3B 31:31 +#define NVB097_TEXHEAD4_WIDTH 29:0 +#define NVB097_TEXHEAD4_DEPTH_TEXTURE 30:30 +#define NVB097_TEXHEAD4_USE_TEXTURE_HEADER_VERSION2 31:31 +#define NVB097_TEXHEAD5_HEIGHT 15:0 +#define NVB097_TEXHEAD5_DEPTH 27:16 +#define NVB097_TEXHEAD5_MAX_MIP_LEVEL 31:28 +#define NVB097_TEXHEAD6_TRILIN_OPT 4:0 +#define NVB097_TEXHEAD6_MIP_LOD_BIAS 17:5 +#define NVB097_TEXHEAD6_RESERVED6A 18:18 +#define NVB097_TEXHEAD6_ANISO_BIAS 22:19 +#define NVB097_TEXHEAD6_ANISO_FINE_SPREAD_FUNC 24:23 +#define NVB097_TEXHEAD6_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVB097_TEXHEAD6_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVB097_TEXHEAD6_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVB097_TEXHEAD6_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVB097_TEXHEAD6_ANISO_COARSE_SPREAD_FUNC 26:25 +#define NVB097_TEXHEAD6_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVB097_TEXHEAD6_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVB097_TEXHEAD6_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVB097_TEXHEAD6_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVB097_TEXHEAD6_MAX_ANISOTROPY 29:27 +#define NVB097_TEXHEAD6_MAX_ANISOTROPY_ANISO_1_TO_1 0x00000000 +#define NVB097_TEXHEAD6_MAX_ANISOTROPY_ANISO_2_TO_1 0x00000001 +#define NVB097_TEXHEAD6_MAX_ANISOTROPY_ANISO_4_TO_1 0x00000002 +#define NVB097_TEXHEAD6_MAX_ANISOTROPY_ANISO_6_TO_1 0x00000003 +#define NVB097_TEXHEAD6_MAX_ANISOTROPY_ANISO_8_TO_1 0x00000004 +#define NVB097_TEXHEAD6_MAX_ANISOTROPY_ANISO_10_TO_1 0x00000005 +#define NVB097_TEXHEAD6_MAX_ANISOTROPY_ANISO_12_TO_1 0x00000006 +#define NVB097_TEXHEAD6_MAX_ANISOTROPY_ANISO_16_TO_1 0x00000007 +#define NVB097_TEXHEAD6_ANISO_FINE_SPREAD_MODIFIER 31:30 +#define NVB097_TEXHEAD6_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVB097_TEXHEAD6_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVB097_TEXHEAD6_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVB097_TEXHEAD6_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVB097_TEXHEAD7_COLOR_KEY_VALUE 31:0 + + +/* +** Texture Header State Blocklinear + */ + +#define NVB097_TEXHEAD_BL_COMPONENTS MW(6:0) +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_R32_G32_B32_A32 0x00000001 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_R32_G32_B32 0x00000002 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_R16_G16_B16_A16 0x00000003 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_R32_G32 0x00000004 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_R32_B24G8 0x00000005 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_X8B8G8R8 0x00000007 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_A8B8G8R8 0x00000008 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_A2B10G10R10 0x00000009 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_R16_G16 0x0000000c +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_G8R24 0x0000000d +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_G24R8 0x0000000e +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_R32 0x0000000f +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_A4B4G4R4 0x00000012 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_A5B5G5R1 0x00000013 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_A1B5G5R5 0x00000014 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_B5G6R5 0x00000015 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_B6G5R5 0x00000016 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_G8R8 0x00000018 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_R16 0x0000001b +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_Y8_VIDEO 0x0000001c +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_R8 0x0000001d +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_G4R4 0x0000001e +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_R1 0x0000001f +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_E5B9G9R9_SHAREDEXP 0x00000020 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_BF10GF11RF11 0x00000021 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_G8B8G8R8 0x00000022 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_B8G8R8G8 0x00000023 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_DXT1 0x00000024 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_DXT23 0x00000025 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_DXT45 0x00000026 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_DXN1 0x00000027 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_DXN2 0x00000028 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_BC6H_SF16 0x00000010 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_BC6H_UF16 0x00000011 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_BC7U 0x00000017 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ETC2_RGB 0x00000006 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ETC2_RGB_PTA 0x0000000a +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ETC2_RGBA 0x0000000b +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_EAC 0x00000019 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_EACX2 0x0000001a +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_Z24S8 0x00000029 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_X8Z24 0x0000002a +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_S8Z24 0x0000002b +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_X4V4Z24__COV4R4V 0x0000002c +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_X4V4Z24__COV8R8V 0x0000002d +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_V8Z24__COV4R12V 0x0000002e +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ZF32 0x0000002f +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ZF32_X24S8 0x00000030 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_X8Z24_X20V4S8__COV4R4V 0x00000031 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_X8Z24_X20V4S8__COV8R8V 0x00000032 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ZF32_X20V4X8__COV4R4V 0x00000033 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ZF32_X20V4X8__COV8R8V 0x00000034 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ZF32_X20V4S8__COV4R4V 0x00000035 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ZF32_X20V4S8__COV8R8V 0x00000036 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_X8Z24_X16V8S8__COV4R12V 0x00000037 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ZF32_X16V8X8__COV4R12V 0x00000038 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ZF32_X16V8S8__COV4R12V 0x00000039 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_Z16 0x0000003a +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_V8Z24__COV8R24V 0x0000003b +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_X8Z24_X16V8S8__COV8R24V 0x0000003c +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ZF32_X16V8X8__COV8R24V 0x0000003d +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ZF32_X16V8S8__COV8R24V 0x0000003e +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_4X4 0x00000040 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_5X4 0x00000050 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_5X5 0x00000041 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_6X5 0x00000051 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_6X6 0x00000042 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_8X5 0x00000055 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_8X6 0x00000052 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_8X8 0x00000044 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_10X5 0x00000056 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_10X6 0x00000057 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_10X8 0x00000053 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_10X10 0x00000045 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_12X10 0x00000054 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_12X12 0x00000046 +#define NVB097_TEXHEAD_BL_COMPONENTS_SIZES_CS_BITFIELD_SIZE 0x0000007f +#define NVB097_TEXHEAD_BL_R_DATA_TYPE MW(9:7) +#define NVB097_TEXHEAD_BL_R_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD_BL_R_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD_BL_R_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD_BL_R_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD_BL_R_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD_BL_R_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD_BL_R_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD_BL_G_DATA_TYPE MW(12:10) +#define NVB097_TEXHEAD_BL_G_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD_BL_G_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD_BL_G_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD_BL_G_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD_BL_G_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD_BL_G_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD_BL_G_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD_BL_B_DATA_TYPE MW(15:13) +#define NVB097_TEXHEAD_BL_B_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD_BL_B_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD_BL_B_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD_BL_B_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD_BL_B_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD_BL_B_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD_BL_B_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD_BL_A_DATA_TYPE MW(18:16) +#define NVB097_TEXHEAD_BL_A_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD_BL_A_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD_BL_A_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD_BL_A_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD_BL_A_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD_BL_A_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD_BL_A_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD_BL_X_SOURCE MW(21:19) +#define NVB097_TEXHEAD_BL_X_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD_BL_X_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD_BL_X_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD_BL_X_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD_BL_X_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD_BL_X_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD_BL_X_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD_BL_Y_SOURCE MW(24:22) +#define NVB097_TEXHEAD_BL_Y_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD_BL_Y_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD_BL_Y_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD_BL_Y_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD_BL_Y_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD_BL_Y_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD_BL_Y_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD_BL_Z_SOURCE MW(27:25) +#define NVB097_TEXHEAD_BL_Z_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD_BL_Z_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD_BL_Z_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD_BL_Z_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD_BL_Z_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD_BL_Z_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD_BL_Z_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD_BL_W_SOURCE MW(30:28) +#define NVB097_TEXHEAD_BL_W_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD_BL_W_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD_BL_W_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD_BL_W_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD_BL_W_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD_BL_W_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD_BL_W_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD_BL_PACK_COMPONENTS MW(31:31) +#define NVB097_TEXHEAD_BL_RESERVED1Y MW(36:32) +#define NVB097_TEXHEAD_BL_GOB_DEPTH_OFFSET MW(38:37) +#define NVB097_TEXHEAD_BL_RESERVED1X MW(40:39) +#define NVB097_TEXHEAD_BL_ADDRESS_BITS31TO9 MW(63:41) +#define NVB097_TEXHEAD_BL_ADDRESS_BITS47TO32 MW(79:64) +#define NVB097_TEXHEAD_BL_RESERVED_ADDRESS MW(84:80) +#define NVB097_TEXHEAD_BL_HEADER_VERSION MW(87:85) +#define NVB097_TEXHEAD_BL_HEADER_VERSION_SELECT_ONE_D_BUFFER 0x00000000 +#define NVB097_TEXHEAD_BL_HEADER_VERSION_SELECT_PITCH_COLOR_KEY 0x00000001 +#define NVB097_TEXHEAD_BL_HEADER_VERSION_SELECT_PITCH 0x00000002 +#define NVB097_TEXHEAD_BL_HEADER_VERSION_SELECT_BLOCKLINEAR 0x00000003 +#define NVB097_TEXHEAD_BL_HEADER_VERSION_SELECT_BLOCKLINEAR_COLOR_KEY 0x00000004 +#define NVB097_TEXHEAD_BL_RESERVED_HEADER_VERSION MW(88:88) +#define NVB097_TEXHEAD_BL_RESOURCE_VIEW_COHERENCY_HASH MW(92:89) +#define NVB097_TEXHEAD_BL_RESERVED2A MW(95:93) +#define NVB097_TEXHEAD_BL_GOBS_PER_BLOCK_WIDTH MW(98:96) +#define NVB097_TEXHEAD_BL_GOBS_PER_BLOCK_WIDTH_ONE_GOB 0x00000000 +#define NVB097_TEXHEAD_BL_GOBS_PER_BLOCK_HEIGHT MW(101:99) +#define NVB097_TEXHEAD_BL_GOBS_PER_BLOCK_HEIGHT_ONE_GOB 0x00000000 +#define NVB097_TEXHEAD_BL_GOBS_PER_BLOCK_HEIGHT_TWO_GOBS 0x00000001 +#define NVB097_TEXHEAD_BL_GOBS_PER_BLOCK_HEIGHT_FOUR_GOBS 0x00000002 +#define NVB097_TEXHEAD_BL_GOBS_PER_BLOCK_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVB097_TEXHEAD_BL_GOBS_PER_BLOCK_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVB097_TEXHEAD_BL_GOBS_PER_BLOCK_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVB097_TEXHEAD_BL_GOBS_PER_BLOCK_DEPTH MW(104:102) +#define NVB097_TEXHEAD_BL_GOBS_PER_BLOCK_DEPTH_ONE_GOB 0x00000000 +#define NVB097_TEXHEAD_BL_GOBS_PER_BLOCK_DEPTH_TWO_GOBS 0x00000001 +#define NVB097_TEXHEAD_BL_GOBS_PER_BLOCK_DEPTH_FOUR_GOBS 0x00000002 +#define NVB097_TEXHEAD_BL_GOBS_PER_BLOCK_DEPTH_EIGHT_GOBS 0x00000003 +#define NVB097_TEXHEAD_BL_GOBS_PER_BLOCK_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVB097_TEXHEAD_BL_GOBS_PER_BLOCK_DEPTH_THIRTYTWO_GOBS 0x00000005 +#define NVB097_TEXHEAD_BL_SPARSE_ENABLE MW(105:105) +#define NVB097_TEXHEAD_BL_TILE_WIDTH_IN_GOBS MW(108:106) +#define NVB097_TEXHEAD_BL_TILE_WIDTH_IN_GOBS_ONE_GOB 0x00000000 +#define NVB097_TEXHEAD_BL_TILE_WIDTH_IN_GOBS_TWO_GOBS 0x00000001 +#define NVB097_TEXHEAD_BL_TILE_WIDTH_IN_GOBS_FOUR_GOBS 0x00000002 +#define NVB097_TEXHEAD_BL_TILE_WIDTH_IN_GOBS_EIGHT_GOBS 0x00000003 +#define NVB097_TEXHEAD_BL_TILE_WIDTH_IN_GOBS_SIXTEEN_GOBS 0x00000004 +#define NVB097_TEXHEAD_BL_TILE_WIDTH_IN_GOBS_THIRTYTWO_GOBS 0x00000005 +#define NVB097_TEXHEAD_BL_GOB3D MW(109:109) +#define NVB097_TEXHEAD_BL_USE_ARRAY_TILE_ALIGNMENT MW(110:110) +#define NVB097_TEXHEAD_BL_RESERVED3Z MW(111:111) +#define NVB097_TEXHEAD_BL_LOD_ANISO_QUALITY2 MW(112:112) +#define NVB097_TEXHEAD_BL_LOD_ANISO_QUALITY MW(113:113) +#define NVB097_TEXHEAD_BL_LOD_ANISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVB097_TEXHEAD_BL_LOD_ANISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVB097_TEXHEAD_BL_LOD_ISO_QUALITY MW(114:114) +#define NVB097_TEXHEAD_BL_LOD_ISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVB097_TEXHEAD_BL_LOD_ISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVB097_TEXHEAD_BL_ANISO_COARSE_SPREAD_MODIFIER MW(116:115) +#define NVB097_TEXHEAD_BL_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVB097_TEXHEAD_BL_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVB097_TEXHEAD_BL_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVB097_TEXHEAD_BL_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVB097_TEXHEAD_BL_ANISO_SPREAD_SCALE MW(121:117) +#define NVB097_TEXHEAD_BL_USE_HEADER_OPT_CONTROL MW(122:122) +#define NVB097_TEXHEAD_BL_DEPTH_TEXTURE MW(123:123) +#define NVB097_TEXHEAD_BL_MAX_MIP_LEVEL MW(127:124) +#define NVB097_TEXHEAD_BL_WIDTH_MINUS_ONE MW(143:128) +#define NVB097_TEXHEAD_BL_RESERVED4A MW(146:144) +#define NVB097_TEXHEAD_BL_ANISO_SPREAD_MAX_LOG2 MW(149:147) +#define NVB097_TEXHEAD_BL_S_R_G_B_CONVERSION MW(150:150) +#define NVB097_TEXHEAD_BL_TEXTURE_TYPE MW(154:151) +#define NVB097_TEXHEAD_BL_TEXTURE_TYPE_ONE_D 0x00000000 +#define NVB097_TEXHEAD_BL_TEXTURE_TYPE_TWO_D 0x00000001 +#define NVB097_TEXHEAD_BL_TEXTURE_TYPE_THREE_D 0x00000002 +#define NVB097_TEXHEAD_BL_TEXTURE_TYPE_CUBEMAP 0x00000003 +#define NVB097_TEXHEAD_BL_TEXTURE_TYPE_ONE_D_ARRAY 0x00000004 +#define NVB097_TEXHEAD_BL_TEXTURE_TYPE_TWO_D_ARRAY 0x00000005 +#define NVB097_TEXHEAD_BL_TEXTURE_TYPE_ONE_D_BUFFER 0x00000006 +#define NVB097_TEXHEAD_BL_TEXTURE_TYPE_TWO_D_NO_MIPMAP 0x00000007 +#define NVB097_TEXHEAD_BL_TEXTURE_TYPE_CUBEMAP_ARRAY 0x00000008 +#define NVB097_TEXHEAD_BL_TEXTURE_TYPE_TT_BIT_FIELD_SIZE 0x0000000f +#define NVB097_TEXHEAD_BL_SECTOR_PROMOTION MW(156:155) +#define NVB097_TEXHEAD_BL_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVB097_TEXHEAD_BL_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVB097_TEXHEAD_BL_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVB097_TEXHEAD_BL_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVB097_TEXHEAD_BL_BORDER_SIZE MW(159:157) +#define NVB097_TEXHEAD_BL_BORDER_SIZE_BORDER_SIZE_ONE 0x00000000 +#define NVB097_TEXHEAD_BL_BORDER_SIZE_BORDER_SIZE_TWO 0x00000001 +#define NVB097_TEXHEAD_BL_BORDER_SIZE_BORDER_SIZE_FOUR 0x00000002 +#define NVB097_TEXHEAD_BL_BORDER_SIZE_BORDER_SIZE_EIGHT 0x00000003 +#define NVB097_TEXHEAD_BL_BORDER_SIZE_BORDER_SAMPLER_COLOR 0x00000007 +#define NVB097_TEXHEAD_BL_HEIGHT_MINUS_ONE MW(175:160) +#define NVB097_TEXHEAD_BL_DEPTH_MINUS_ONE MW(189:176) +#define NVB097_TEXHEAD_BL_RESERVED5A MW(190:190) +#define NVB097_TEXHEAD_BL_NORMALIZED_COORDS MW(191:191) +#define NVB097_TEXHEAD_BL_RESERVED6Y MW(192:192) +#define NVB097_TEXHEAD_BL_TRILIN_OPT MW(197:193) +#define NVB097_TEXHEAD_BL_MIP_LOD_BIAS MW(210:198) +#define NVB097_TEXHEAD_BL_ANISO_BIAS MW(214:211) +#define NVB097_TEXHEAD_BL_ANISO_FINE_SPREAD_FUNC MW(216:215) +#define NVB097_TEXHEAD_BL_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVB097_TEXHEAD_BL_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVB097_TEXHEAD_BL_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVB097_TEXHEAD_BL_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVB097_TEXHEAD_BL_ANISO_COARSE_SPREAD_FUNC MW(218:217) +#define NVB097_TEXHEAD_BL_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVB097_TEXHEAD_BL_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVB097_TEXHEAD_BL_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVB097_TEXHEAD_BL_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVB097_TEXHEAD_BL_MAX_ANISOTROPY MW(221:219) +#define NVB097_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_1_TO_1 0x00000000 +#define NVB097_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_2_TO_1 0x00000001 +#define NVB097_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_4_TO_1 0x00000002 +#define NVB097_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_6_TO_1 0x00000003 +#define NVB097_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_8_TO_1 0x00000004 +#define NVB097_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_10_TO_1 0x00000005 +#define NVB097_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_12_TO_1 0x00000006 +#define NVB097_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_16_TO_1 0x00000007 +#define NVB097_TEXHEAD_BL_ANISO_FINE_SPREAD_MODIFIER MW(223:222) +#define NVB097_TEXHEAD_BL_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVB097_TEXHEAD_BL_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVB097_TEXHEAD_BL_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVB097_TEXHEAD_BL_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVB097_TEXHEAD_BL_RES_VIEW_MIN_MIP_LEVEL MW(227:224) +#define NVB097_TEXHEAD_BL_RES_VIEW_MAX_MIP_LEVEL MW(231:228) +#define NVB097_TEXHEAD_BL_MULTI_SAMPLE_COUNT MW(235:232) +#define NVB097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_1X1 0x00000000 +#define NVB097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_2X1 0x00000001 +#define NVB097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_2X2 0x00000002 +#define NVB097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_4X2 0x00000003 +#define NVB097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_4X2_D3D 0x00000004 +#define NVB097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_2X1_D3D 0x00000005 +#define NVB097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_4X4 0x00000006 +#define NVB097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_2X2_VC_4 0x00000008 +#define NVB097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_2X2_VC_12 0x00000009 +#define NVB097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_4X2_VC_8 0x0000000a +#define NVB097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_4X2_VC_24 0x0000000b +#define NVB097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_2X1_CENTER 0x0000000c +#define NVB097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_2X2_CENTER 0x0000000d +#define NVB097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_4X2_CENTER 0x0000000e +#define NVB097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_4X4_REGULAR 0x0000000f +#define NVB097_TEXHEAD_BL_MIN_LOD_CLAMP MW(247:236) +#define NVB097_TEXHEAD_BL_RESERVED7Y MW(255:248) + + +/* +** Texture Header State Blocklinear Color Key + */ + +#define NVB097_TEXHEAD_BLCK_COMPONENTS MW(6:0) +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_R32_G32_B32_A32 0x00000001 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_R32_G32_B32 0x00000002 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_R16_G16_B16_A16 0x00000003 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_R32_G32 0x00000004 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_R32_B24G8 0x00000005 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_X8B8G8R8 0x00000007 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_A8B8G8R8 0x00000008 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_A2B10G10R10 0x00000009 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_R16_G16 0x0000000c +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_G8R24 0x0000000d +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_G24R8 0x0000000e +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_R32 0x0000000f +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_A4B4G4R4 0x00000012 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_A5B5G5R1 0x00000013 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_A1B5G5R5 0x00000014 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_B5G6R5 0x00000015 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_B6G5R5 0x00000016 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_G8R8 0x00000018 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_R16 0x0000001b +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_Y8_VIDEO 0x0000001c +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_R8 0x0000001d +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_G4R4 0x0000001e +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_R1 0x0000001f +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_E5B9G9R9_SHAREDEXP 0x00000020 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_BF10GF11RF11 0x00000021 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_G8B8G8R8 0x00000022 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_B8G8R8G8 0x00000023 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_DXT1 0x00000024 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_DXT23 0x00000025 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_DXT45 0x00000026 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_DXN1 0x00000027 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_DXN2 0x00000028 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_BC6H_SF16 0x00000010 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_BC6H_UF16 0x00000011 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_BC7U 0x00000017 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ETC2_RGB 0x00000006 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ETC2_RGB_PTA 0x0000000a +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ETC2_RGBA 0x0000000b +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_EAC 0x00000019 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_EACX2 0x0000001a +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_Z24S8 0x00000029 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_X8Z24 0x0000002a +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_S8Z24 0x0000002b +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_X4V4Z24__COV4R4V 0x0000002c +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_X4V4Z24__COV8R8V 0x0000002d +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_V8Z24__COV4R12V 0x0000002e +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ZF32 0x0000002f +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ZF32_X24S8 0x00000030 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_X8Z24_X20V4S8__COV4R4V 0x00000031 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_X8Z24_X20V4S8__COV8R8V 0x00000032 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ZF32_X20V4X8__COV4R4V 0x00000033 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ZF32_X20V4X8__COV8R8V 0x00000034 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ZF32_X20V4S8__COV4R4V 0x00000035 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ZF32_X20V4S8__COV8R8V 0x00000036 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_X8Z24_X16V8S8__COV4R12V 0x00000037 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ZF32_X16V8X8__COV4R12V 0x00000038 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ZF32_X16V8S8__COV4R12V 0x00000039 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_Z16 0x0000003a +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_V8Z24__COV8R24V 0x0000003b +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_X8Z24_X16V8S8__COV8R24V 0x0000003c +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ZF32_X16V8X8__COV8R24V 0x0000003d +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ZF32_X16V8S8__COV8R24V 0x0000003e +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_4X4 0x00000040 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_5X4 0x00000050 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_5X5 0x00000041 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_6X5 0x00000051 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_6X6 0x00000042 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_8X5 0x00000055 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_8X6 0x00000052 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_8X8 0x00000044 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_10X5 0x00000056 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_10X6 0x00000057 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_10X8 0x00000053 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_10X10 0x00000045 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_12X10 0x00000054 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_12X12 0x00000046 +#define NVB097_TEXHEAD_BLCK_COMPONENTS_SIZES_CS_BITFIELD_SIZE 0x0000007f +#define NVB097_TEXHEAD_BLCK_R_DATA_TYPE MW(9:7) +#define NVB097_TEXHEAD_BLCK_R_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD_BLCK_R_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD_BLCK_R_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD_BLCK_R_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD_BLCK_R_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD_BLCK_R_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD_BLCK_R_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD_BLCK_G_DATA_TYPE MW(12:10) +#define NVB097_TEXHEAD_BLCK_G_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD_BLCK_G_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD_BLCK_G_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD_BLCK_G_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD_BLCK_G_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD_BLCK_G_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD_BLCK_G_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD_BLCK_B_DATA_TYPE MW(15:13) +#define NVB097_TEXHEAD_BLCK_B_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD_BLCK_B_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD_BLCK_B_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD_BLCK_B_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD_BLCK_B_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD_BLCK_B_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD_BLCK_B_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD_BLCK_A_DATA_TYPE MW(18:16) +#define NVB097_TEXHEAD_BLCK_A_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD_BLCK_A_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD_BLCK_A_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD_BLCK_A_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD_BLCK_A_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD_BLCK_A_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD_BLCK_A_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD_BLCK_X_SOURCE MW(21:19) +#define NVB097_TEXHEAD_BLCK_X_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD_BLCK_X_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD_BLCK_X_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD_BLCK_X_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD_BLCK_X_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD_BLCK_X_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD_BLCK_X_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD_BLCK_Y_SOURCE MW(24:22) +#define NVB097_TEXHEAD_BLCK_Y_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD_BLCK_Y_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD_BLCK_Y_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD_BLCK_Y_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD_BLCK_Y_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD_BLCK_Y_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD_BLCK_Y_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD_BLCK_Z_SOURCE MW(27:25) +#define NVB097_TEXHEAD_BLCK_Z_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD_BLCK_Z_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD_BLCK_Z_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD_BLCK_Z_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD_BLCK_Z_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD_BLCK_Z_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD_BLCK_Z_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD_BLCK_W_SOURCE MW(30:28) +#define NVB097_TEXHEAD_BLCK_W_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD_BLCK_W_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD_BLCK_W_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD_BLCK_W_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD_BLCK_W_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD_BLCK_W_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD_BLCK_W_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD_BLCK_PACK_COMPONENTS MW(31:31) +#define NVB097_TEXHEAD_BLCK_RESERVED1Y MW(36:32) +#define NVB097_TEXHEAD_BLCK_GOB_DEPTH_OFFSET MW(38:37) +#define NVB097_TEXHEAD_BLCK_RESERVED1X MW(40:39) +#define NVB097_TEXHEAD_BLCK_ADDRESS_BITS31TO9 MW(63:41) +#define NVB097_TEXHEAD_BLCK_ADDRESS_BITS47TO32 MW(79:64) +#define NVB097_TEXHEAD_BLCK_RESERVED_ADDRESS MW(84:80) +#define NVB097_TEXHEAD_BLCK_HEADER_VERSION MW(87:85) +#define NVB097_TEXHEAD_BLCK_HEADER_VERSION_SELECT_ONE_D_BUFFER 0x00000000 +#define NVB097_TEXHEAD_BLCK_HEADER_VERSION_SELECT_PITCH_COLOR_KEY 0x00000001 +#define NVB097_TEXHEAD_BLCK_HEADER_VERSION_SELECT_PITCH 0x00000002 +#define NVB097_TEXHEAD_BLCK_HEADER_VERSION_SELECT_BLOCKLINEAR 0x00000003 +#define NVB097_TEXHEAD_BLCK_HEADER_VERSION_SELECT_BLOCKLINEAR_COLOR_KEY 0x00000004 +#define NVB097_TEXHEAD_BLCK_RESERVED_HEADER_VERSION MW(88:88) +#define NVB097_TEXHEAD_BLCK_RESOURCE_VIEW_COHERENCY_HASH MW(92:89) +#define NVB097_TEXHEAD_BLCK_RESERVED2A MW(95:93) +#define NVB097_TEXHEAD_BLCK_GOBS_PER_BLOCK_WIDTH MW(98:96) +#define NVB097_TEXHEAD_BLCK_GOBS_PER_BLOCK_WIDTH_ONE_GOB 0x00000000 +#define NVB097_TEXHEAD_BLCK_GOBS_PER_BLOCK_HEIGHT MW(101:99) +#define NVB097_TEXHEAD_BLCK_GOBS_PER_BLOCK_HEIGHT_ONE_GOB 0x00000000 +#define NVB097_TEXHEAD_BLCK_GOBS_PER_BLOCK_HEIGHT_TWO_GOBS 0x00000001 +#define NVB097_TEXHEAD_BLCK_GOBS_PER_BLOCK_HEIGHT_FOUR_GOBS 0x00000002 +#define NVB097_TEXHEAD_BLCK_GOBS_PER_BLOCK_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVB097_TEXHEAD_BLCK_GOBS_PER_BLOCK_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVB097_TEXHEAD_BLCK_GOBS_PER_BLOCK_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVB097_TEXHEAD_BLCK_GOBS_PER_BLOCK_DEPTH MW(104:102) +#define NVB097_TEXHEAD_BLCK_GOBS_PER_BLOCK_DEPTH_ONE_GOB 0x00000000 +#define NVB097_TEXHEAD_BLCK_GOBS_PER_BLOCK_DEPTH_TWO_GOBS 0x00000001 +#define NVB097_TEXHEAD_BLCK_GOBS_PER_BLOCK_DEPTH_FOUR_GOBS 0x00000002 +#define NVB097_TEXHEAD_BLCK_GOBS_PER_BLOCK_DEPTH_EIGHT_GOBS 0x00000003 +#define NVB097_TEXHEAD_BLCK_GOBS_PER_BLOCK_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVB097_TEXHEAD_BLCK_GOBS_PER_BLOCK_DEPTH_THIRTYTWO_GOBS 0x00000005 +#define NVB097_TEXHEAD_BLCK_SPARSE_ENABLE MW(105:105) +#define NVB097_TEXHEAD_BLCK_TILE_WIDTH_IN_GOBS MW(108:106) +#define NVB097_TEXHEAD_BLCK_TILE_WIDTH_IN_GOBS_ONE_GOB 0x00000000 +#define NVB097_TEXHEAD_BLCK_TILE_WIDTH_IN_GOBS_TWO_GOBS 0x00000001 +#define NVB097_TEXHEAD_BLCK_TILE_WIDTH_IN_GOBS_FOUR_GOBS 0x00000002 +#define NVB097_TEXHEAD_BLCK_TILE_WIDTH_IN_GOBS_EIGHT_GOBS 0x00000003 +#define NVB097_TEXHEAD_BLCK_TILE_WIDTH_IN_GOBS_SIXTEEN_GOBS 0x00000004 +#define NVB097_TEXHEAD_BLCK_TILE_WIDTH_IN_GOBS_THIRTYTWO_GOBS 0x00000005 +#define NVB097_TEXHEAD_BLCK_GOB3D MW(109:109) +#define NVB097_TEXHEAD_BLCK_USE_ARRAY_TILE_ALIGNMENT MW(110:110) +#define NVB097_TEXHEAD_BLCK_RESERVED3Z MW(111:111) +#define NVB097_TEXHEAD_BLCK_LOD_ANISO_QUALITY2 MW(112:112) +#define NVB097_TEXHEAD_BLCK_LOD_ANISO_QUALITY MW(113:113) +#define NVB097_TEXHEAD_BLCK_LOD_ANISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVB097_TEXHEAD_BLCK_LOD_ANISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVB097_TEXHEAD_BLCK_LOD_ISO_QUALITY MW(114:114) +#define NVB097_TEXHEAD_BLCK_LOD_ISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVB097_TEXHEAD_BLCK_LOD_ISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVB097_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_MODIFIER MW(116:115) +#define NVB097_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVB097_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVB097_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVB097_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVB097_TEXHEAD_BLCK_ANISO_SPREAD_SCALE MW(121:117) +#define NVB097_TEXHEAD_BLCK_USE_HEADER_OPT_CONTROL MW(122:122) +#define NVB097_TEXHEAD_BLCK_DEPTH_TEXTURE MW(123:123) +#define NVB097_TEXHEAD_BLCK_MAX_MIP_LEVEL MW(127:124) +#define NVB097_TEXHEAD_BLCK_WIDTH_MINUS_ONE MW(143:128) +#define NVB097_TEXHEAD_BLCK_RESERVED4A MW(146:144) +#define NVB097_TEXHEAD_BLCK_ANISO_SPREAD_MAX_LOG2 MW(149:147) +#define NVB097_TEXHEAD_BLCK_S_R_G_B_CONVERSION MW(150:150) +#define NVB097_TEXHEAD_BLCK_TEXTURE_TYPE MW(154:151) +#define NVB097_TEXHEAD_BLCK_TEXTURE_TYPE_ONE_D 0x00000000 +#define NVB097_TEXHEAD_BLCK_TEXTURE_TYPE_TWO_D 0x00000001 +#define NVB097_TEXHEAD_BLCK_TEXTURE_TYPE_THREE_D 0x00000002 +#define NVB097_TEXHEAD_BLCK_TEXTURE_TYPE_CUBEMAP 0x00000003 +#define NVB097_TEXHEAD_BLCK_TEXTURE_TYPE_ONE_D_ARRAY 0x00000004 +#define NVB097_TEXHEAD_BLCK_TEXTURE_TYPE_TWO_D_ARRAY 0x00000005 +#define NVB097_TEXHEAD_BLCK_TEXTURE_TYPE_ONE_D_BUFFER 0x00000006 +#define NVB097_TEXHEAD_BLCK_TEXTURE_TYPE_TWO_D_NO_MIPMAP 0x00000007 +#define NVB097_TEXHEAD_BLCK_TEXTURE_TYPE_CUBEMAP_ARRAY 0x00000008 +#define NVB097_TEXHEAD_BLCK_TEXTURE_TYPE_TT_BIT_FIELD_SIZE 0x0000000f +#define NVB097_TEXHEAD_BLCK_SECTOR_PROMOTION MW(156:155) +#define NVB097_TEXHEAD_BLCK_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVB097_TEXHEAD_BLCK_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVB097_TEXHEAD_BLCK_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVB097_TEXHEAD_BLCK_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVB097_TEXHEAD_BLCK_BORDER_SIZE MW(159:157) +#define NVB097_TEXHEAD_BLCK_BORDER_SIZE_BORDER_SIZE_ONE 0x00000000 +#define NVB097_TEXHEAD_BLCK_BORDER_SIZE_BORDER_SIZE_TWO 0x00000001 +#define NVB097_TEXHEAD_BLCK_BORDER_SIZE_BORDER_SIZE_FOUR 0x00000002 +#define NVB097_TEXHEAD_BLCK_BORDER_SIZE_BORDER_SIZE_EIGHT 0x00000003 +#define NVB097_TEXHEAD_BLCK_BORDER_SIZE_BORDER_SAMPLER_COLOR 0x00000007 +#define NVB097_TEXHEAD_BLCK_HEIGHT_MINUS_ONE MW(175:160) +#define NVB097_TEXHEAD_BLCK_DEPTH_MINUS_ONE MW(189:176) +#define NVB097_TEXHEAD_BLCK_RESERVED5A MW(190:190) +#define NVB097_TEXHEAD_BLCK_NORMALIZED_COORDS MW(191:191) +#define NVB097_TEXHEAD_BLCK_COLOR_KEY_OP MW(192:192) +#define NVB097_TEXHEAD_BLCK_TRILIN_OPT MW(197:193) +#define NVB097_TEXHEAD_BLCK_MIP_LOD_BIAS MW(210:198) +#define NVB097_TEXHEAD_BLCK_ANISO_BIAS MW(214:211) +#define NVB097_TEXHEAD_BLCK_ANISO_FINE_SPREAD_FUNC MW(216:215) +#define NVB097_TEXHEAD_BLCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVB097_TEXHEAD_BLCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVB097_TEXHEAD_BLCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVB097_TEXHEAD_BLCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVB097_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_FUNC MW(218:217) +#define NVB097_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVB097_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVB097_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVB097_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVB097_TEXHEAD_BLCK_MAX_ANISOTROPY MW(221:219) +#define NVB097_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_1_TO_1 0x00000000 +#define NVB097_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_2_TO_1 0x00000001 +#define NVB097_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_4_TO_1 0x00000002 +#define NVB097_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_6_TO_1 0x00000003 +#define NVB097_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_8_TO_1 0x00000004 +#define NVB097_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_10_TO_1 0x00000005 +#define NVB097_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_12_TO_1 0x00000006 +#define NVB097_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_16_TO_1 0x00000007 +#define NVB097_TEXHEAD_BLCK_ANISO_FINE_SPREAD_MODIFIER MW(223:222) +#define NVB097_TEXHEAD_BLCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVB097_TEXHEAD_BLCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVB097_TEXHEAD_BLCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVB097_TEXHEAD_BLCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVB097_TEXHEAD_BLCK_COLOR_KEY_VALUE MW(255:224) + + +/* +** Texture Header State One-D Buffer + */ + +#define NVB097_TEXHEAD_1D_COMPONENTS MW(6:0) +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_R32_G32_B32_A32 0x00000001 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_R32_G32_B32 0x00000002 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_R16_G16_B16_A16 0x00000003 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_R32_G32 0x00000004 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_R32_B24G8 0x00000005 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_X8B8G8R8 0x00000007 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_A8B8G8R8 0x00000008 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_A2B10G10R10 0x00000009 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_R16_G16 0x0000000c +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_G8R24 0x0000000d +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_G24R8 0x0000000e +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_R32 0x0000000f +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_A4B4G4R4 0x00000012 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_A5B5G5R1 0x00000013 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_A1B5G5R5 0x00000014 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_B5G6R5 0x00000015 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_B6G5R5 0x00000016 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_G8R8 0x00000018 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_R16 0x0000001b +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_Y8_VIDEO 0x0000001c +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_R8 0x0000001d +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_G4R4 0x0000001e +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_R1 0x0000001f +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_E5B9G9R9_SHAREDEXP 0x00000020 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_BF10GF11RF11 0x00000021 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_G8B8G8R8 0x00000022 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_B8G8R8G8 0x00000023 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_DXT1 0x00000024 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_DXT23 0x00000025 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_DXT45 0x00000026 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_DXN1 0x00000027 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_DXN2 0x00000028 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_BC6H_SF16 0x00000010 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_BC6H_UF16 0x00000011 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_BC7U 0x00000017 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ETC2_RGB 0x00000006 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ETC2_RGB_PTA 0x0000000a +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ETC2_RGBA 0x0000000b +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_EAC 0x00000019 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_EACX2 0x0000001a +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_Z24S8 0x00000029 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_X8Z24 0x0000002a +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_S8Z24 0x0000002b +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_X4V4Z24__COV4R4V 0x0000002c +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_X4V4Z24__COV8R8V 0x0000002d +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_V8Z24__COV4R12V 0x0000002e +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ZF32 0x0000002f +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ZF32_X24S8 0x00000030 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_X8Z24_X20V4S8__COV4R4V 0x00000031 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_X8Z24_X20V4S8__COV8R8V 0x00000032 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ZF32_X20V4X8__COV4R4V 0x00000033 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ZF32_X20V4X8__COV8R8V 0x00000034 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ZF32_X20V4S8__COV4R4V 0x00000035 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ZF32_X20V4S8__COV8R8V 0x00000036 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_X8Z24_X16V8S8__COV4R12V 0x00000037 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ZF32_X16V8X8__COV4R12V 0x00000038 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ZF32_X16V8S8__COV4R12V 0x00000039 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_Z16 0x0000003a +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_V8Z24__COV8R24V 0x0000003b +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_X8Z24_X16V8S8__COV8R24V 0x0000003c +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ZF32_X16V8X8__COV8R24V 0x0000003d +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ZF32_X16V8S8__COV8R24V 0x0000003e +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_4X4 0x00000040 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_5X4 0x00000050 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_5X5 0x00000041 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_6X5 0x00000051 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_6X6 0x00000042 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_8X5 0x00000055 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_8X6 0x00000052 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_8X8 0x00000044 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_10X5 0x00000056 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_10X6 0x00000057 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_10X8 0x00000053 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_10X10 0x00000045 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_12X10 0x00000054 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_12X12 0x00000046 +#define NVB097_TEXHEAD_1D_COMPONENTS_SIZES_CS_BITFIELD_SIZE 0x0000007f +#define NVB097_TEXHEAD_1D_R_DATA_TYPE MW(9:7) +#define NVB097_TEXHEAD_1D_R_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD_1D_R_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD_1D_R_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD_1D_R_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD_1D_R_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD_1D_R_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD_1D_R_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD_1D_G_DATA_TYPE MW(12:10) +#define NVB097_TEXHEAD_1D_G_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD_1D_G_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD_1D_G_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD_1D_G_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD_1D_G_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD_1D_G_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD_1D_G_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD_1D_B_DATA_TYPE MW(15:13) +#define NVB097_TEXHEAD_1D_B_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD_1D_B_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD_1D_B_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD_1D_B_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD_1D_B_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD_1D_B_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD_1D_B_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD_1D_A_DATA_TYPE MW(18:16) +#define NVB097_TEXHEAD_1D_A_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD_1D_A_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD_1D_A_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD_1D_A_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD_1D_A_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD_1D_A_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD_1D_A_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD_1D_X_SOURCE MW(21:19) +#define NVB097_TEXHEAD_1D_X_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD_1D_X_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD_1D_X_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD_1D_X_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD_1D_X_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD_1D_X_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD_1D_X_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD_1D_Y_SOURCE MW(24:22) +#define NVB097_TEXHEAD_1D_Y_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD_1D_Y_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD_1D_Y_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD_1D_Y_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD_1D_Y_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD_1D_Y_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD_1D_Y_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD_1D_Z_SOURCE MW(27:25) +#define NVB097_TEXHEAD_1D_Z_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD_1D_Z_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD_1D_Z_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD_1D_Z_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD_1D_Z_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD_1D_Z_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD_1D_Z_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD_1D_W_SOURCE MW(30:28) +#define NVB097_TEXHEAD_1D_W_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD_1D_W_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD_1D_W_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD_1D_W_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD_1D_W_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD_1D_W_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD_1D_W_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD_1D_PACK_COMPONENTS MW(31:31) +#define NVB097_TEXHEAD_1D_ADDRESS_BITS31TO0 MW(63:32) +#define NVB097_TEXHEAD_1D_ADDRESS_BITS47TO32 MW(79:64) +#define NVB097_TEXHEAD_1D_RESERVED_ADDRESS MW(84:80) +#define NVB097_TEXHEAD_1D_HEADER_VERSION MW(87:85) +#define NVB097_TEXHEAD_1D_HEADER_VERSION_SELECT_ONE_D_BUFFER 0x00000000 +#define NVB097_TEXHEAD_1D_HEADER_VERSION_SELECT_PITCH_COLOR_KEY 0x00000001 +#define NVB097_TEXHEAD_1D_HEADER_VERSION_SELECT_PITCH 0x00000002 +#define NVB097_TEXHEAD_1D_HEADER_VERSION_SELECT_BLOCKLINEAR 0x00000003 +#define NVB097_TEXHEAD_1D_HEADER_VERSION_SELECT_BLOCKLINEAR_COLOR_KEY 0x00000004 +#define NVB097_TEXHEAD_1D_RESERVED_HEADER_VERSION MW(88:88) +#define NVB097_TEXHEAD_1D_RESOURCE_VIEW_COHERENCY_HASH MW(92:89) +#define NVB097_TEXHEAD_1D_RESERVED2A MW(95:93) +#define NVB097_TEXHEAD_1D_WIDTH_MINUS_ONE_BITS31TO16 MW(111:96) +#define NVB097_TEXHEAD_1D_RESERVED3X MW(127:112) +#define NVB097_TEXHEAD_1D_WIDTH_MINUS_ONE_BITS15TO0 MW(143:128) +#define NVB097_TEXHEAD_1D_RESERVED4A MW(146:144) +#define NVB097_TEXHEAD_1D_RESERVED4X MW(149:147) +#define NVB097_TEXHEAD_1D_S_R_G_B_CONVERSION MW(150:150) +#define NVB097_TEXHEAD_1D_TEXTURE_TYPE MW(154:151) +#define NVB097_TEXHEAD_1D_TEXTURE_TYPE_ONE_D 0x00000000 +#define NVB097_TEXHEAD_1D_TEXTURE_TYPE_TWO_D 0x00000001 +#define NVB097_TEXHEAD_1D_TEXTURE_TYPE_THREE_D 0x00000002 +#define NVB097_TEXHEAD_1D_TEXTURE_TYPE_CUBEMAP 0x00000003 +#define NVB097_TEXHEAD_1D_TEXTURE_TYPE_ONE_D_ARRAY 0x00000004 +#define NVB097_TEXHEAD_1D_TEXTURE_TYPE_TWO_D_ARRAY 0x00000005 +#define NVB097_TEXHEAD_1D_TEXTURE_TYPE_ONE_D_BUFFER 0x00000006 +#define NVB097_TEXHEAD_1D_TEXTURE_TYPE_TWO_D_NO_MIPMAP 0x00000007 +#define NVB097_TEXHEAD_1D_TEXTURE_TYPE_CUBEMAP_ARRAY 0x00000008 +#define NVB097_TEXHEAD_1D_TEXTURE_TYPE_TT_BIT_FIELD_SIZE 0x0000000f +#define NVB097_TEXHEAD_1D_SECTOR_PROMOTION MW(156:155) +#define NVB097_TEXHEAD_1D_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVB097_TEXHEAD_1D_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVB097_TEXHEAD_1D_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVB097_TEXHEAD_1D_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVB097_TEXHEAD_1D_RESERVED4Y MW(159:157) +#define NVB097_TEXHEAD_1D_RESERVED5X MW(189:160) +#define NVB097_TEXHEAD_1D_RESERVED5A MW(190:190) +#define NVB097_TEXHEAD_1D_RESERVED5Y MW(191:191) +#define NVB097_TEXHEAD_1D_RESERVED6X MW(223:192) +#define NVB097_TEXHEAD_1D_RESERVED7X MW(255:224) + + +/* +** Texture Header State Pitch + */ + +#define NVB097_TEXHEAD_PITCH_COMPONENTS MW(6:0) +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_R32_G32_B32_A32 0x00000001 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_R32_G32_B32 0x00000002 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_R16_G16_B16_A16 0x00000003 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_R32_G32 0x00000004 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_R32_B24G8 0x00000005 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_X8B8G8R8 0x00000007 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_A8B8G8R8 0x00000008 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_A2B10G10R10 0x00000009 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_R16_G16 0x0000000c +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_G8R24 0x0000000d +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_G24R8 0x0000000e +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_R32 0x0000000f +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_A4B4G4R4 0x00000012 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_A5B5G5R1 0x00000013 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_A1B5G5R5 0x00000014 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_B5G6R5 0x00000015 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_B6G5R5 0x00000016 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_G8R8 0x00000018 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_R16 0x0000001b +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_Y8_VIDEO 0x0000001c +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_R8 0x0000001d +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_G4R4 0x0000001e +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_R1 0x0000001f +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_E5B9G9R9_SHAREDEXP 0x00000020 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_BF10GF11RF11 0x00000021 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_G8B8G8R8 0x00000022 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_B8G8R8G8 0x00000023 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_DXT1 0x00000024 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_DXT23 0x00000025 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_DXT45 0x00000026 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_DXN1 0x00000027 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_DXN2 0x00000028 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_BC6H_SF16 0x00000010 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_BC6H_UF16 0x00000011 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_BC7U 0x00000017 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ETC2_RGB 0x00000006 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ETC2_RGB_PTA 0x0000000a +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ETC2_RGBA 0x0000000b +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_EAC 0x00000019 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_EACX2 0x0000001a +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_Z24S8 0x00000029 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_X8Z24 0x0000002a +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_S8Z24 0x0000002b +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_X4V4Z24__COV4R4V 0x0000002c +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_X4V4Z24__COV8R8V 0x0000002d +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_V8Z24__COV4R12V 0x0000002e +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ZF32 0x0000002f +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ZF32_X24S8 0x00000030 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_X8Z24_X20V4S8__COV4R4V 0x00000031 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_X8Z24_X20V4S8__COV8R8V 0x00000032 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ZF32_X20V4X8__COV4R4V 0x00000033 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ZF32_X20V4X8__COV8R8V 0x00000034 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ZF32_X20V4S8__COV4R4V 0x00000035 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ZF32_X20V4S8__COV8R8V 0x00000036 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_X8Z24_X16V8S8__COV4R12V 0x00000037 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ZF32_X16V8X8__COV4R12V 0x00000038 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ZF32_X16V8S8__COV4R12V 0x00000039 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_Z16 0x0000003a +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_V8Z24__COV8R24V 0x0000003b +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_X8Z24_X16V8S8__COV8R24V 0x0000003c +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ZF32_X16V8X8__COV8R24V 0x0000003d +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ZF32_X16V8S8__COV8R24V 0x0000003e +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_4X4 0x00000040 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_5X4 0x00000050 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_5X5 0x00000041 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_6X5 0x00000051 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_6X6 0x00000042 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_8X5 0x00000055 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_8X6 0x00000052 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_8X8 0x00000044 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_10X5 0x00000056 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_10X6 0x00000057 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_10X8 0x00000053 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_10X10 0x00000045 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_12X10 0x00000054 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_12X12 0x00000046 +#define NVB097_TEXHEAD_PITCH_COMPONENTS_SIZES_CS_BITFIELD_SIZE 0x0000007f +#define NVB097_TEXHEAD_PITCH_R_DATA_TYPE MW(9:7) +#define NVB097_TEXHEAD_PITCH_R_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD_PITCH_R_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD_PITCH_R_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD_PITCH_R_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD_PITCH_R_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD_PITCH_R_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD_PITCH_R_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD_PITCH_G_DATA_TYPE MW(12:10) +#define NVB097_TEXHEAD_PITCH_G_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD_PITCH_G_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD_PITCH_G_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD_PITCH_G_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD_PITCH_G_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD_PITCH_G_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD_PITCH_G_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD_PITCH_B_DATA_TYPE MW(15:13) +#define NVB097_TEXHEAD_PITCH_B_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD_PITCH_B_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD_PITCH_B_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD_PITCH_B_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD_PITCH_B_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD_PITCH_B_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD_PITCH_B_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD_PITCH_A_DATA_TYPE MW(18:16) +#define NVB097_TEXHEAD_PITCH_A_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD_PITCH_A_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD_PITCH_A_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD_PITCH_A_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD_PITCH_A_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD_PITCH_A_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD_PITCH_A_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD_PITCH_X_SOURCE MW(21:19) +#define NVB097_TEXHEAD_PITCH_X_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD_PITCH_X_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD_PITCH_X_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD_PITCH_X_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD_PITCH_X_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD_PITCH_X_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD_PITCH_X_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD_PITCH_Y_SOURCE MW(24:22) +#define NVB097_TEXHEAD_PITCH_Y_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD_PITCH_Y_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD_PITCH_Y_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD_PITCH_Y_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD_PITCH_Y_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD_PITCH_Y_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD_PITCH_Y_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD_PITCH_Z_SOURCE MW(27:25) +#define NVB097_TEXHEAD_PITCH_Z_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD_PITCH_Z_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD_PITCH_Z_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD_PITCH_Z_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD_PITCH_Z_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD_PITCH_Z_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD_PITCH_Z_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD_PITCH_W_SOURCE MW(30:28) +#define NVB097_TEXHEAD_PITCH_W_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD_PITCH_W_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD_PITCH_W_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD_PITCH_W_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD_PITCH_W_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD_PITCH_W_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD_PITCH_W_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD_PITCH_PACK_COMPONENTS MW(31:31) +#define NVB097_TEXHEAD_PITCH_RESERVED1A MW(36:32) +#define NVB097_TEXHEAD_PITCH_ADDRESS_BITS31TO5 MW(63:37) +#define NVB097_TEXHEAD_PITCH_ADDRESS_BITS47TO32 MW(79:64) +#define NVB097_TEXHEAD_PITCH_RESERVED_ADDRESS MW(84:80) +#define NVB097_TEXHEAD_PITCH_HEADER_VERSION MW(87:85) +#define NVB097_TEXHEAD_PITCH_HEADER_VERSION_SELECT_ONE_D_BUFFER 0x00000000 +#define NVB097_TEXHEAD_PITCH_HEADER_VERSION_SELECT_PITCH_COLOR_KEY 0x00000001 +#define NVB097_TEXHEAD_PITCH_HEADER_VERSION_SELECT_PITCH 0x00000002 +#define NVB097_TEXHEAD_PITCH_HEADER_VERSION_SELECT_BLOCKLINEAR 0x00000003 +#define NVB097_TEXHEAD_PITCH_HEADER_VERSION_SELECT_BLOCKLINEAR_COLOR_KEY 0x00000004 +#define NVB097_TEXHEAD_PITCH_RESERVED_HEADER_VERSION MW(88:88) +#define NVB097_TEXHEAD_PITCH_RESOURCE_VIEW_COHERENCY_HASH MW(92:89) +#define NVB097_TEXHEAD_PITCH_RESERVED2A MW(95:93) +#define NVB097_TEXHEAD_PITCH_PITCH_BITS20TO5 MW(111:96) +#define NVB097_TEXHEAD_PITCH_LOD_ANISO_QUALITY2 MW(112:112) +#define NVB097_TEXHEAD_PITCH_LOD_ANISO_QUALITY MW(113:113) +#define NVB097_TEXHEAD_PITCH_LOD_ANISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVB097_TEXHEAD_PITCH_LOD_ANISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVB097_TEXHEAD_PITCH_LOD_ISO_QUALITY MW(114:114) +#define NVB097_TEXHEAD_PITCH_LOD_ISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVB097_TEXHEAD_PITCH_LOD_ISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVB097_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_MODIFIER MW(116:115) +#define NVB097_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVB097_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVB097_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVB097_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVB097_TEXHEAD_PITCH_ANISO_SPREAD_SCALE MW(121:117) +#define NVB097_TEXHEAD_PITCH_USE_HEADER_OPT_CONTROL MW(122:122) +#define NVB097_TEXHEAD_PITCH_DEPTH_TEXTURE MW(123:123) +#define NVB097_TEXHEAD_PITCH_MAX_MIP_LEVEL MW(127:124) +#define NVB097_TEXHEAD_PITCH_WIDTH_MINUS_ONE MW(143:128) +#define NVB097_TEXHEAD_PITCH_RESERVED4A MW(146:144) +#define NVB097_TEXHEAD_PITCH_ANISO_SPREAD_MAX_LOG2 MW(149:147) +#define NVB097_TEXHEAD_PITCH_S_R_G_B_CONVERSION MW(150:150) +#define NVB097_TEXHEAD_PITCH_TEXTURE_TYPE MW(154:151) +#define NVB097_TEXHEAD_PITCH_TEXTURE_TYPE_ONE_D 0x00000000 +#define NVB097_TEXHEAD_PITCH_TEXTURE_TYPE_TWO_D 0x00000001 +#define NVB097_TEXHEAD_PITCH_TEXTURE_TYPE_THREE_D 0x00000002 +#define NVB097_TEXHEAD_PITCH_TEXTURE_TYPE_CUBEMAP 0x00000003 +#define NVB097_TEXHEAD_PITCH_TEXTURE_TYPE_ONE_D_ARRAY 0x00000004 +#define NVB097_TEXHEAD_PITCH_TEXTURE_TYPE_TWO_D_ARRAY 0x00000005 +#define NVB097_TEXHEAD_PITCH_TEXTURE_TYPE_ONE_D_BUFFER 0x00000006 +#define NVB097_TEXHEAD_PITCH_TEXTURE_TYPE_TWO_D_NO_MIPMAP 0x00000007 +#define NVB097_TEXHEAD_PITCH_TEXTURE_TYPE_CUBEMAP_ARRAY 0x00000008 +#define NVB097_TEXHEAD_PITCH_TEXTURE_TYPE_TT_BIT_FIELD_SIZE 0x0000000f +#define NVB097_TEXHEAD_PITCH_SECTOR_PROMOTION MW(156:155) +#define NVB097_TEXHEAD_PITCH_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVB097_TEXHEAD_PITCH_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVB097_TEXHEAD_PITCH_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVB097_TEXHEAD_PITCH_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVB097_TEXHEAD_PITCH_BORDER_SIZE MW(159:157) +#define NVB097_TEXHEAD_PITCH_BORDER_SIZE_BORDER_SIZE_ONE 0x00000000 +#define NVB097_TEXHEAD_PITCH_BORDER_SIZE_BORDER_SIZE_TWO 0x00000001 +#define NVB097_TEXHEAD_PITCH_BORDER_SIZE_BORDER_SIZE_FOUR 0x00000002 +#define NVB097_TEXHEAD_PITCH_BORDER_SIZE_BORDER_SIZE_EIGHT 0x00000003 +#define NVB097_TEXHEAD_PITCH_BORDER_SIZE_BORDER_SAMPLER_COLOR 0x00000007 +#define NVB097_TEXHEAD_PITCH_HEIGHT_MINUS_ONE MW(175:160) +#define NVB097_TEXHEAD_PITCH_DEPTH_MINUS_ONE MW(189:176) +#define NVB097_TEXHEAD_PITCH_RESERVED5A MW(190:190) +#define NVB097_TEXHEAD_PITCH_NORMALIZED_COORDS MW(191:191) +#define NVB097_TEXHEAD_PITCH_RESERVED6Y MW(192:192) +#define NVB097_TEXHEAD_PITCH_TRILIN_OPT MW(197:193) +#define NVB097_TEXHEAD_PITCH_MIP_LOD_BIAS MW(210:198) +#define NVB097_TEXHEAD_PITCH_ANISO_BIAS MW(214:211) +#define NVB097_TEXHEAD_PITCH_ANISO_FINE_SPREAD_FUNC MW(216:215) +#define NVB097_TEXHEAD_PITCH_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVB097_TEXHEAD_PITCH_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVB097_TEXHEAD_PITCH_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVB097_TEXHEAD_PITCH_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVB097_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_FUNC MW(218:217) +#define NVB097_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVB097_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVB097_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVB097_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVB097_TEXHEAD_PITCH_MAX_ANISOTROPY MW(221:219) +#define NVB097_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_1_TO_1 0x00000000 +#define NVB097_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_2_TO_1 0x00000001 +#define NVB097_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_4_TO_1 0x00000002 +#define NVB097_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_6_TO_1 0x00000003 +#define NVB097_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_8_TO_1 0x00000004 +#define NVB097_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_10_TO_1 0x00000005 +#define NVB097_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_12_TO_1 0x00000006 +#define NVB097_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_16_TO_1 0x00000007 +#define NVB097_TEXHEAD_PITCH_ANISO_FINE_SPREAD_MODIFIER MW(223:222) +#define NVB097_TEXHEAD_PITCH_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVB097_TEXHEAD_PITCH_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVB097_TEXHEAD_PITCH_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVB097_TEXHEAD_PITCH_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVB097_TEXHEAD_PITCH_RES_VIEW_MIN_MIP_LEVEL MW(227:224) +#define NVB097_TEXHEAD_PITCH_RES_VIEW_MAX_MIP_LEVEL MW(231:228) +#define NVB097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT MW(235:232) +#define NVB097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_1X1 0x00000000 +#define NVB097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_2X1 0x00000001 +#define NVB097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_2X2 0x00000002 +#define NVB097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_4X2 0x00000003 +#define NVB097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_4X2_D3D 0x00000004 +#define NVB097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_2X1_D3D 0x00000005 +#define NVB097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_4X4 0x00000006 +#define NVB097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_2X2_VC_4 0x00000008 +#define NVB097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_2X2_VC_12 0x00000009 +#define NVB097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_4X2_VC_8 0x0000000a +#define NVB097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_4X2_VC_24 0x0000000b +#define NVB097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_2X1_CENTER 0x0000000c +#define NVB097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_2X2_CENTER 0x0000000d +#define NVB097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_4X2_CENTER 0x0000000e +#define NVB097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_4X4_REGULAR 0x0000000f +#define NVB097_TEXHEAD_PITCH_MIN_LOD_CLAMP MW(247:236) +#define NVB097_TEXHEAD_PITCH_RESERVED7Y MW(255:248) + + +/* +** Texture Header State Pitch Color Key + */ + +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS MW(6:0) +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R32_G32_B32_A32 0x00000001 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R32_G32_B32 0x00000002 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R16_G16_B16_A16 0x00000003 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R32_G32 0x00000004 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R32_B24G8 0x00000005 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_X8B8G8R8 0x00000007 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_A8B8G8R8 0x00000008 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_A2B10G10R10 0x00000009 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R16_G16 0x0000000c +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_G8R24 0x0000000d +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_G24R8 0x0000000e +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R32 0x0000000f +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_A4B4G4R4 0x00000012 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_A5B5G5R1 0x00000013 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_A1B5G5R5 0x00000014 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_B5G6R5 0x00000015 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_B6G5R5 0x00000016 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_G8R8 0x00000018 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R16 0x0000001b +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_Y8_VIDEO 0x0000001c +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R8 0x0000001d +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_G4R4 0x0000001e +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R1 0x0000001f +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_E5B9G9R9_SHAREDEXP 0x00000020 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_BF10GF11RF11 0x00000021 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_G8B8G8R8 0x00000022 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_B8G8R8G8 0x00000023 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_DXT1 0x00000024 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_DXT23 0x00000025 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_DXT45 0x00000026 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_DXN1 0x00000027 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_DXN2 0x00000028 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_BC6H_SF16 0x00000010 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_BC6H_UF16 0x00000011 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_BC7U 0x00000017 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ETC2_RGB 0x00000006 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ETC2_RGB_PTA 0x0000000a +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ETC2_RGBA 0x0000000b +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_EAC 0x00000019 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_EACX2 0x0000001a +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_Z24S8 0x00000029 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_X8Z24 0x0000002a +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_S8Z24 0x0000002b +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_X4V4Z24__COV4R4V 0x0000002c +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_X4V4Z24__COV8R8V 0x0000002d +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_V8Z24__COV4R12V 0x0000002e +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ZF32 0x0000002f +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ZF32_X24S8 0x00000030 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_X8Z24_X20V4S8__COV4R4V 0x00000031 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_X8Z24_X20V4S8__COV8R8V 0x00000032 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ZF32_X20V4X8__COV4R4V 0x00000033 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ZF32_X20V4X8__COV8R8V 0x00000034 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ZF32_X20V4S8__COV4R4V 0x00000035 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ZF32_X20V4S8__COV8R8V 0x00000036 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_X8Z24_X16V8S8__COV4R12V 0x00000037 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ZF32_X16V8X8__COV4R12V 0x00000038 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ZF32_X16V8S8__COV4R12V 0x00000039 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_Z16 0x0000003a +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_V8Z24__COV8R24V 0x0000003b +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_X8Z24_X16V8S8__COV8R24V 0x0000003c +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ZF32_X16V8X8__COV8R24V 0x0000003d +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ZF32_X16V8S8__COV8R24V 0x0000003e +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_4X4 0x00000040 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_5X4 0x00000050 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_5X5 0x00000041 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_6X5 0x00000051 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_6X6 0x00000042 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_8X5 0x00000055 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_8X6 0x00000052 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_8X8 0x00000044 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_10X5 0x00000056 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_10X6 0x00000057 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_10X8 0x00000053 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_10X10 0x00000045 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_12X10 0x00000054 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_12X12 0x00000046 +#define NVB097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_CS_BITFIELD_SIZE 0x0000007f +#define NVB097_TEXHEAD_PITCHCK_R_DATA_TYPE MW(9:7) +#define NVB097_TEXHEAD_PITCHCK_R_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD_PITCHCK_R_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD_PITCHCK_R_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD_PITCHCK_R_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD_PITCHCK_R_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD_PITCHCK_R_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD_PITCHCK_R_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD_PITCHCK_G_DATA_TYPE MW(12:10) +#define NVB097_TEXHEAD_PITCHCK_G_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD_PITCHCK_G_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD_PITCHCK_G_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD_PITCHCK_G_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD_PITCHCK_G_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD_PITCHCK_G_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD_PITCHCK_G_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD_PITCHCK_B_DATA_TYPE MW(15:13) +#define NVB097_TEXHEAD_PITCHCK_B_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD_PITCHCK_B_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD_PITCHCK_B_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD_PITCHCK_B_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD_PITCHCK_B_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD_PITCHCK_B_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD_PITCHCK_B_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD_PITCHCK_A_DATA_TYPE MW(18:16) +#define NVB097_TEXHEAD_PITCHCK_A_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEAD_PITCHCK_A_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEAD_PITCHCK_A_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEAD_PITCHCK_A_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEAD_PITCHCK_A_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEAD_PITCHCK_A_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEAD_PITCHCK_A_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEAD_PITCHCK_X_SOURCE MW(21:19) +#define NVB097_TEXHEAD_PITCHCK_X_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD_PITCHCK_X_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD_PITCHCK_X_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD_PITCHCK_X_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD_PITCHCK_X_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD_PITCHCK_X_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD_PITCHCK_X_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD_PITCHCK_Y_SOURCE MW(24:22) +#define NVB097_TEXHEAD_PITCHCK_Y_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD_PITCHCK_Y_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD_PITCHCK_Y_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD_PITCHCK_Y_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD_PITCHCK_Y_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD_PITCHCK_Y_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD_PITCHCK_Y_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD_PITCHCK_Z_SOURCE MW(27:25) +#define NVB097_TEXHEAD_PITCHCK_Z_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD_PITCHCK_Z_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD_PITCHCK_Z_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD_PITCHCK_Z_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD_PITCHCK_Z_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD_PITCHCK_Z_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD_PITCHCK_Z_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD_PITCHCK_W_SOURCE MW(30:28) +#define NVB097_TEXHEAD_PITCHCK_W_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEAD_PITCHCK_W_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEAD_PITCHCK_W_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEAD_PITCHCK_W_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEAD_PITCHCK_W_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEAD_PITCHCK_W_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEAD_PITCHCK_W_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEAD_PITCHCK_PACK_COMPONENTS MW(31:31) +#define NVB097_TEXHEAD_PITCHCK_RESERVED1A MW(36:32) +#define NVB097_TEXHEAD_PITCHCK_ADDRESS_BITS31TO5 MW(63:37) +#define NVB097_TEXHEAD_PITCHCK_ADDRESS_BITS47TO32 MW(79:64) +#define NVB097_TEXHEAD_PITCHCK_RESERVED_ADDRESS MW(84:80) +#define NVB097_TEXHEAD_PITCHCK_HEADER_VERSION MW(87:85) +#define NVB097_TEXHEAD_PITCHCK_HEADER_VERSION_SELECT_ONE_D_BUFFER 0x00000000 +#define NVB097_TEXHEAD_PITCHCK_HEADER_VERSION_SELECT_PITCH_COLOR_KEY 0x00000001 +#define NVB097_TEXHEAD_PITCHCK_HEADER_VERSION_SELECT_PITCH 0x00000002 +#define NVB097_TEXHEAD_PITCHCK_HEADER_VERSION_SELECT_BLOCKLINEAR 0x00000003 +#define NVB097_TEXHEAD_PITCHCK_HEADER_VERSION_SELECT_BLOCKLINEAR_COLOR_KEY 0x00000004 +#define NVB097_TEXHEAD_PITCHCK_RESERVED_HEADER_VERSION MW(88:88) +#define NVB097_TEXHEAD_PITCHCK_RESOURCE_VIEW_COHERENCY_HASH MW(92:89) +#define NVB097_TEXHEAD_PITCHCK_RESERVED2A MW(95:93) +#define NVB097_TEXHEAD_PITCHCK_PITCH_BITS20TO5 MW(111:96) +#define NVB097_TEXHEAD_PITCHCK_LOD_ANISO_QUALITY2 MW(112:112) +#define NVB097_TEXHEAD_PITCHCK_LOD_ANISO_QUALITY MW(113:113) +#define NVB097_TEXHEAD_PITCHCK_LOD_ANISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVB097_TEXHEAD_PITCHCK_LOD_ANISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVB097_TEXHEAD_PITCHCK_LOD_ISO_QUALITY MW(114:114) +#define NVB097_TEXHEAD_PITCHCK_LOD_ISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVB097_TEXHEAD_PITCHCK_LOD_ISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVB097_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_MODIFIER MW(116:115) +#define NVB097_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVB097_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVB097_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVB097_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVB097_TEXHEAD_PITCHCK_ANISO_SPREAD_SCALE MW(121:117) +#define NVB097_TEXHEAD_PITCHCK_USE_HEADER_OPT_CONTROL MW(122:122) +#define NVB097_TEXHEAD_PITCHCK_DEPTH_TEXTURE MW(123:123) +#define NVB097_TEXHEAD_PITCHCK_MAX_MIP_LEVEL MW(127:124) +#define NVB097_TEXHEAD_PITCHCK_WIDTH_MINUS_ONE MW(143:128) +#define NVB097_TEXHEAD_PITCHCK_RESERVED4A MW(146:144) +#define NVB097_TEXHEAD_PITCHCK_ANISO_SPREAD_MAX_LOG2 MW(149:147) +#define NVB097_TEXHEAD_PITCHCK_S_R_G_B_CONVERSION MW(150:150) +#define NVB097_TEXHEAD_PITCHCK_TEXTURE_TYPE MW(154:151) +#define NVB097_TEXHEAD_PITCHCK_TEXTURE_TYPE_ONE_D 0x00000000 +#define NVB097_TEXHEAD_PITCHCK_TEXTURE_TYPE_TWO_D 0x00000001 +#define NVB097_TEXHEAD_PITCHCK_TEXTURE_TYPE_THREE_D 0x00000002 +#define NVB097_TEXHEAD_PITCHCK_TEXTURE_TYPE_CUBEMAP 0x00000003 +#define NVB097_TEXHEAD_PITCHCK_TEXTURE_TYPE_ONE_D_ARRAY 0x00000004 +#define NVB097_TEXHEAD_PITCHCK_TEXTURE_TYPE_TWO_D_ARRAY 0x00000005 +#define NVB097_TEXHEAD_PITCHCK_TEXTURE_TYPE_ONE_D_BUFFER 0x00000006 +#define NVB097_TEXHEAD_PITCHCK_TEXTURE_TYPE_TWO_D_NO_MIPMAP 0x00000007 +#define NVB097_TEXHEAD_PITCHCK_TEXTURE_TYPE_CUBEMAP_ARRAY 0x00000008 +#define NVB097_TEXHEAD_PITCHCK_TEXTURE_TYPE_TT_BIT_FIELD_SIZE 0x0000000f +#define NVB097_TEXHEAD_PITCHCK_SECTOR_PROMOTION MW(156:155) +#define NVB097_TEXHEAD_PITCHCK_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVB097_TEXHEAD_PITCHCK_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVB097_TEXHEAD_PITCHCK_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVB097_TEXHEAD_PITCHCK_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVB097_TEXHEAD_PITCHCK_BORDER_SIZE MW(159:157) +#define NVB097_TEXHEAD_PITCHCK_BORDER_SIZE_BORDER_SIZE_ONE 0x00000000 +#define NVB097_TEXHEAD_PITCHCK_BORDER_SIZE_BORDER_SIZE_TWO 0x00000001 +#define NVB097_TEXHEAD_PITCHCK_BORDER_SIZE_BORDER_SIZE_FOUR 0x00000002 +#define NVB097_TEXHEAD_PITCHCK_BORDER_SIZE_BORDER_SIZE_EIGHT 0x00000003 +#define NVB097_TEXHEAD_PITCHCK_BORDER_SIZE_BORDER_SAMPLER_COLOR 0x00000007 +#define NVB097_TEXHEAD_PITCHCK_HEIGHT_MINUS_ONE MW(175:160) +#define NVB097_TEXHEAD_PITCHCK_DEPTH_MINUS_ONE MW(189:176) +#define NVB097_TEXHEAD_PITCHCK_RESERVED5A MW(190:190) +#define NVB097_TEXHEAD_PITCHCK_NORMALIZED_COORDS MW(191:191) +#define NVB097_TEXHEAD_PITCHCK_COLOR_KEY_OP MW(192:192) +#define NVB097_TEXHEAD_PITCHCK_TRILIN_OPT MW(197:193) +#define NVB097_TEXHEAD_PITCHCK_MIP_LOD_BIAS MW(210:198) +#define NVB097_TEXHEAD_PITCHCK_ANISO_BIAS MW(214:211) +#define NVB097_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_FUNC MW(216:215) +#define NVB097_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVB097_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVB097_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVB097_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVB097_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_FUNC MW(218:217) +#define NVB097_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVB097_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVB097_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVB097_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVB097_TEXHEAD_PITCHCK_MAX_ANISOTROPY MW(221:219) +#define NVB097_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_1_TO_1 0x00000000 +#define NVB097_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_2_TO_1 0x00000001 +#define NVB097_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_4_TO_1 0x00000002 +#define NVB097_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_6_TO_1 0x00000003 +#define NVB097_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_8_TO_1 0x00000004 +#define NVB097_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_10_TO_1 0x00000005 +#define NVB097_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_12_TO_1 0x00000006 +#define NVB097_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_16_TO_1 0x00000007 +#define NVB097_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_MODIFIER MW(223:222) +#define NVB097_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVB097_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVB097_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVB097_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVB097_TEXHEAD_PITCHCK_COLOR_KEY_VALUE MW(255:224) + + +/* +** Texture Header State, Version 2 + */ + +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES 5:0 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_R32_G32_B32_A32 0x00000001 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_R32_G32_B32 0x00000002 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_R16_G16_B16_A16 0x00000003 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_R32_G32 0x00000004 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_R32_B24G8 0x00000005 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_X8B8G8R8 0x00000007 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_A8B8G8R8 0x00000008 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_A2B10G10R10 0x00000009 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_R16_G16 0x0000000c +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_G8R24 0x0000000d +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_G24R8 0x0000000e +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_R32 0x0000000f +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_A4B4G4R4 0x00000012 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_A5B5G5R1 0x00000013 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_A1B5G5R5 0x00000014 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_B5G6R5 0x00000015 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_B6G5R5 0x00000016 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_G8R8 0x00000018 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_R16 0x0000001b +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_Y8_VIDEO 0x0000001c +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_R8 0x0000001d +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_G4R4 0x0000001e +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_R1 0x0000001f +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_E5B9G9R9_SHAREDEXP 0x00000020 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_BF10GF11RF11 0x00000021 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_G8B8G8R8 0x00000022 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_B8G8R8G8 0x00000023 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_DXT1 0x00000024 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_DXT23 0x00000025 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_DXT45 0x00000026 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_DXN1 0x00000027 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_DXN2 0x00000028 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_BC6H_SF16 0x00000010 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_BC6H_UF16 0x00000011 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_BC7U 0x00000017 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_ETC2_RGB 0x00000006 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_ETC2_RGB_PTA 0x0000000a +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_ETC2_RGBA 0x0000000b +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_EAC 0x00000019 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_EACX2 0x0000001a +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_Z24S8 0x00000029 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_X8Z24 0x0000002a +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_S8Z24 0x0000002b +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_X4V4Z24__COV4R4V 0x0000002c +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_X4V4Z24__COV8R8V 0x0000002d +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_V8Z24__COV4R12V 0x0000002e +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_ZF32 0x0000002f +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_ZF32_X24S8 0x00000030 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_X8Z24_X20V4S8__COV4R4V 0x00000031 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_X8Z24_X20V4S8__COV8R8V 0x00000032 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_ZF32_X20V4X8__COV4R4V 0x00000033 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_ZF32_X20V4X8__COV8R8V 0x00000034 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_ZF32_X20V4S8__COV4R4V 0x00000035 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_ZF32_X20V4S8__COV8R8V 0x00000036 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_X8Z24_X16V8S8__COV4R12V 0x00000037 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_ZF32_X16V8X8__COV4R12V 0x00000038 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_ZF32_X16V8S8__COV4R12V 0x00000039 +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_Z16 0x0000003a +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_V8Z24__COV8R24V 0x0000003b +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_X8Z24_X16V8S8__COV8R24V 0x0000003c +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_ZF32_X16V8X8__COV8R24V 0x0000003d +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_ZF32_X16V8S8__COV8R24V 0x0000003e +#define NVB097_TEXHEADV2_0_COMPONENT_SIZES_CS_BITFIELD_SIZE 0x0000003f +#define NVB097_TEXHEADV2_0_R_DATA_TYPE 8:6 +#define NVB097_TEXHEADV2_0_R_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEADV2_0_R_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEADV2_0_R_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEADV2_0_R_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEADV2_0_R_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEADV2_0_R_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEADV2_0_R_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEADV2_0_G_DATA_TYPE 11:9 +#define NVB097_TEXHEADV2_0_G_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEADV2_0_G_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEADV2_0_G_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEADV2_0_G_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEADV2_0_G_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEADV2_0_G_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEADV2_0_G_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEADV2_0_B_DATA_TYPE 14:12 +#define NVB097_TEXHEADV2_0_B_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEADV2_0_B_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEADV2_0_B_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEADV2_0_B_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEADV2_0_B_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEADV2_0_B_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEADV2_0_B_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEADV2_0_A_DATA_TYPE 17:15 +#define NVB097_TEXHEADV2_0_A_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEADV2_0_A_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEADV2_0_A_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEADV2_0_A_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEADV2_0_A_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEADV2_0_A_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEADV2_0_A_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEADV2_0_X_SOURCE 20:18 +#define NVB097_TEXHEADV2_0_X_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEADV2_0_X_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEADV2_0_X_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEADV2_0_X_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEADV2_0_X_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEADV2_0_X_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEADV2_0_X_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEADV2_0_Y_SOURCE 23:21 +#define NVB097_TEXHEADV2_0_Y_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEADV2_0_Y_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEADV2_0_Y_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEADV2_0_Y_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEADV2_0_Y_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEADV2_0_Y_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEADV2_0_Y_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEADV2_0_Z_SOURCE 26:24 +#define NVB097_TEXHEADV2_0_Z_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEADV2_0_Z_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEADV2_0_Z_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEADV2_0_Z_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEADV2_0_Z_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEADV2_0_Z_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEADV2_0_Z_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEADV2_0_W_SOURCE 29:27 +#define NVB097_TEXHEADV2_0_W_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEADV2_0_W_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEADV2_0_W_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEADV2_0_W_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEADV2_0_W_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEADV2_0_W_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEADV2_0_W_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEADV2_0_PACK_COMPONENTS 30:30 +#define NVB097_TEXHEADV2_0_USE_COMPONENT_SIZES_EXTENDED 31:31 +#define NVB097_TEXHEADV2_1_OFFSET_LOWER 31:0 +#define NVB097_TEXHEADV2_2_OFFSET_UPPER 7:0 +#define NVB097_TEXHEADV2_2_ANISO_SPREAD_MAX_LOG2_L_S_B 9:8 +#define NVB097_TEXHEADV2_2_S_R_G_B_CONVERSION 10:10 +#define NVB097_TEXHEADV2_2_ANISO_SPREAD_MAX_LOG2_M_S_B 11:11 +#define NVB097_TEXHEADV2_2_LOD_ANISO_QUALITY2 12:12 +#define NVB097_TEXHEADV2_2_COLOR_KEY_OP 13:13 +#define NVB097_TEXHEADV2_2_TEXTURE_TYPE 17:14 +#define NVB097_TEXHEADV2_2_TEXTURE_TYPE_ONE_D 0x00000000 +#define NVB097_TEXHEADV2_2_TEXTURE_TYPE_TWO_D 0x00000001 +#define NVB097_TEXHEADV2_2_TEXTURE_TYPE_THREE_D 0x00000002 +#define NVB097_TEXHEADV2_2_TEXTURE_TYPE_CUBEMAP 0x00000003 +#define NVB097_TEXHEADV2_2_TEXTURE_TYPE_ONE_D_ARRAY 0x00000004 +#define NVB097_TEXHEADV2_2_TEXTURE_TYPE_TWO_D_ARRAY 0x00000005 +#define NVB097_TEXHEADV2_2_TEXTURE_TYPE_ONE_D_BUFFER 0x00000006 +#define NVB097_TEXHEADV2_2_TEXTURE_TYPE_TWO_D_NO_MIPMAP 0x00000007 +#define NVB097_TEXHEADV2_2_TEXTURE_TYPE_CUBEMAP_ARRAY 0x00000008 +#define NVB097_TEXHEADV2_2_TEXTURE_TYPE_TT_BIT_FIELD_SIZE 0x0000000f +#define NVB097_TEXHEADV2_2_MEMORY_LAYOUT 18:18 +#define NVB097_TEXHEADV2_2_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVB097_TEXHEADV2_2_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVB097_TEXHEADV2_2_GOBS_PER_BLOCK_WIDTH 21:19 +#define NVB097_TEXHEADV2_2_GOBS_PER_BLOCK_WIDTH_ONE_GOB 0x00000000 +#define NVB097_TEXHEADV2_2_GOBS_PER_BLOCK_HEIGHT 24:22 +#define NVB097_TEXHEADV2_2_GOBS_PER_BLOCK_HEIGHT_ONE_GOB 0x00000000 +#define NVB097_TEXHEADV2_2_GOBS_PER_BLOCK_HEIGHT_TWO_GOBS 0x00000001 +#define NVB097_TEXHEADV2_2_GOBS_PER_BLOCK_HEIGHT_FOUR_GOBS 0x00000002 +#define NVB097_TEXHEADV2_2_GOBS_PER_BLOCK_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVB097_TEXHEADV2_2_GOBS_PER_BLOCK_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVB097_TEXHEADV2_2_GOBS_PER_BLOCK_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVB097_TEXHEADV2_2_GOBS_PER_BLOCK_DEPTH 27:25 +#define NVB097_TEXHEADV2_2_GOBS_PER_BLOCK_DEPTH_ONE_GOB 0x00000000 +#define NVB097_TEXHEADV2_2_GOBS_PER_BLOCK_DEPTH_TWO_GOBS 0x00000001 +#define NVB097_TEXHEADV2_2_GOBS_PER_BLOCK_DEPTH_FOUR_GOBS 0x00000002 +#define NVB097_TEXHEADV2_2_GOBS_PER_BLOCK_DEPTH_EIGHT_GOBS 0x00000003 +#define NVB097_TEXHEADV2_2_GOBS_PER_BLOCK_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVB097_TEXHEADV2_2_GOBS_PER_BLOCK_DEPTH_THIRTYTWO_GOBS 0x00000005 +#define NVB097_TEXHEADV2_2_SECTOR_PROMOTION 29:28 +#define NVB097_TEXHEADV2_2_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVB097_TEXHEADV2_2_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVB097_TEXHEADV2_2_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVB097_TEXHEADV2_2_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVB097_TEXHEADV2_2_BORDER_SOURCE 30:30 +#define NVB097_TEXHEADV2_2_BORDER_SOURCE_BORDER_TEXTURE 0x00000000 +#define NVB097_TEXHEADV2_2_BORDER_SOURCE_BORDER_COLOR 0x00000001 +#define NVB097_TEXHEADV2_2_NORMALIZED_COORDS 31:31 +#define NVB097_TEXHEADV2_3_PITCH 19:0 +#define NVB097_TEXHEADV2_3_LOD_ANISO_QUALITY 20:20 +#define NVB097_TEXHEADV2_3_LOD_ANISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVB097_TEXHEADV2_3_LOD_ANISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVB097_TEXHEADV2_3_LOD_ISO_QUALITY 21:21 +#define NVB097_TEXHEADV2_3_LOD_ISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVB097_TEXHEADV2_3_LOD_ISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVB097_TEXHEADV2_3_ANISO_COARSE_SPREAD_MODIFIER 23:22 +#define NVB097_TEXHEADV2_3_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVB097_TEXHEADV2_3_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVB097_TEXHEADV2_3_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVB097_TEXHEADV2_3_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVB097_TEXHEADV2_3_ANISO_SPREAD_SCALE 28:24 +#define NVB097_TEXHEADV2_3_USE_HEADER_OPT_CONTROL 29:29 +#define NVB097_TEXHEADV2_3_RESERVED3A 30:30 +#define NVB097_TEXHEADV2_3_RESERVED3B 31:31 +#define NVB097_TEXHEADV2_4_WIDTH 29:0 +#define NVB097_TEXHEADV2_4_DEPTH_TEXTURE 30:30 +#define NVB097_TEXHEADV2_4_USE_TEXTURE_HEADER_VERSION2 31:31 +#define NVB097_TEXHEADV2_5_HEIGHT 15:0 +#define NVB097_TEXHEADV2_5_DEPTH 27:16 +#define NVB097_TEXHEADV2_5_MAX_MIP_LEVEL 31:28 +#define NVB097_TEXHEADV2_6_TRILIN_OPT 4:0 +#define NVB097_TEXHEADV2_6_MIP_LOD_BIAS 17:5 +#define NVB097_TEXHEADV2_6_RESERVED6A 18:18 +#define NVB097_TEXHEADV2_6_ANISO_BIAS 22:19 +#define NVB097_TEXHEADV2_6_ANISO_FINE_SPREAD_FUNC 24:23 +#define NVB097_TEXHEADV2_6_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVB097_TEXHEADV2_6_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVB097_TEXHEADV2_6_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVB097_TEXHEADV2_6_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVB097_TEXHEADV2_6_ANISO_COARSE_SPREAD_FUNC 26:25 +#define NVB097_TEXHEADV2_6_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVB097_TEXHEADV2_6_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVB097_TEXHEADV2_6_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVB097_TEXHEADV2_6_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVB097_TEXHEADV2_6_MAX_ANISOTROPY 29:27 +#define NVB097_TEXHEADV2_6_MAX_ANISOTROPY_ANISO_1_TO_1 0x00000000 +#define NVB097_TEXHEADV2_6_MAX_ANISOTROPY_ANISO_2_TO_1 0x00000001 +#define NVB097_TEXHEADV2_6_MAX_ANISOTROPY_ANISO_4_TO_1 0x00000002 +#define NVB097_TEXHEADV2_6_MAX_ANISOTROPY_ANISO_6_TO_1 0x00000003 +#define NVB097_TEXHEADV2_6_MAX_ANISOTROPY_ANISO_8_TO_1 0x00000004 +#define NVB097_TEXHEADV2_6_MAX_ANISOTROPY_ANISO_10_TO_1 0x00000005 +#define NVB097_TEXHEADV2_6_MAX_ANISOTROPY_ANISO_12_TO_1 0x00000006 +#define NVB097_TEXHEADV2_6_MAX_ANISOTROPY_ANISO_16_TO_1 0x00000007 +#define NVB097_TEXHEADV2_6_ANISO_FINE_SPREAD_MODIFIER 31:30 +#define NVB097_TEXHEADV2_6_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVB097_TEXHEADV2_6_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVB097_TEXHEADV2_6_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVB097_TEXHEADV2_6_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVB097_TEXHEADV2_7_RES_VIEW_MIN_MIP_LEVEL 3:0 +#define NVB097_TEXHEADV2_7_RES_VIEW_MAX_MIP_LEVEL 7:4 +#define NVB097_TEXHEADV2_7_HEIGHT_MSB 8:8 +#define NVB097_TEXHEADV2_7_HEIGHT_MSB_RESERVED 11:9 +#define NVB097_TEXHEADV2_7_MULTI_SAMPLE_COUNT 15:12 +#define NVB097_TEXHEADV2_7_MULTI_SAMPLE_COUNT_MODE_1X1 0x00000000 +#define NVB097_TEXHEADV2_7_MULTI_SAMPLE_COUNT_MODE_2X1 0x00000001 +#define NVB097_TEXHEADV2_7_MULTI_SAMPLE_COUNT_MODE_2X2 0x00000002 +#define NVB097_TEXHEADV2_7_MULTI_SAMPLE_COUNT_MODE_4X2 0x00000003 +#define NVB097_TEXHEADV2_7_MULTI_SAMPLE_COUNT_MODE_4X2_D3D 0x00000004 +#define NVB097_TEXHEADV2_7_MULTI_SAMPLE_COUNT_MODE_2X1_D3D 0x00000005 +#define NVB097_TEXHEADV2_7_MULTI_SAMPLE_COUNT_MODE_4X4 0x00000006 +#define NVB097_TEXHEADV2_7_MULTI_SAMPLE_COUNT_MODE_2X2_VC_4 0x00000008 +#define NVB097_TEXHEADV2_7_MULTI_SAMPLE_COUNT_MODE_2X2_VC_12 0x00000009 +#define NVB097_TEXHEADV2_7_MULTI_SAMPLE_COUNT_MODE_4X2_VC_8 0x0000000a +#define NVB097_TEXHEADV2_7_MULTI_SAMPLE_COUNT_MODE_4X2_VC_24 0x0000000b +#define NVB097_TEXHEADV2_7_MULTI_SAMPLE_COUNT_MODE_2X1_CENTER 0x0000000c +#define NVB097_TEXHEADV2_7_MULTI_SAMPLE_COUNT_MODE_2X2_CENTER 0x0000000d +#define NVB097_TEXHEADV2_7_MULTI_SAMPLE_COUNT_MODE_4X2_CENTER 0x0000000e +#define NVB097_TEXHEADV2_7_MULTI_SAMPLE_COUNT_MODE_4X4_REGULAR 0x0000000f +#define NVB097_TEXHEADV2_7_MIN_LOD_CLAMP 27:16 +#define NVB097_TEXHEADV2_7_DEPTH_MSB 30:28 +#define NVB097_TEXHEADV2_7_RESERVED7A 31:31 + + +/* +** Texture Header State, Version 3 + */ + +#define NVB097_TEXHEADV3_0_COMPONENT_SIZES 5:0 +#define NVB097_TEXHEADV3_0_COMPONENT_SIZES_ASTC_2D_4X4 0x00000000 +#define NVB097_TEXHEADV3_0_COMPONENT_SIZES_ASTC_2D_5X4 0x00000010 +#define NVB097_TEXHEADV3_0_COMPONENT_SIZES_ASTC_2D_5X5 0x00000001 +#define NVB097_TEXHEADV3_0_COMPONENT_SIZES_ASTC_2D_6X5 0x00000011 +#define NVB097_TEXHEADV3_0_COMPONENT_SIZES_ASTC_2D_6X6 0x00000002 +#define NVB097_TEXHEADV3_0_COMPONENT_SIZES_ASTC_2D_8X5 0x00000015 +#define NVB097_TEXHEADV3_0_COMPONENT_SIZES_ASTC_2D_8X6 0x00000012 +#define NVB097_TEXHEADV3_0_COMPONENT_SIZES_ASTC_2D_8X8 0x00000004 +#define NVB097_TEXHEADV3_0_COMPONENT_SIZES_ASTC_2D_10X5 0x00000016 +#define NVB097_TEXHEADV3_0_COMPONENT_SIZES_ASTC_2D_10X6 0x00000017 +#define NVB097_TEXHEADV3_0_COMPONENT_SIZES_ASTC_2D_10X8 0x00000013 +#define NVB097_TEXHEADV3_0_COMPONENT_SIZES_ASTC_2D_10X10 0x00000005 +#define NVB097_TEXHEADV3_0_COMPONENT_SIZES_ASTC_2D_12X10 0x00000014 +#define NVB097_TEXHEADV3_0_COMPONENT_SIZES_ASTC_2D_12X12 0x00000006 +#define NVB097_TEXHEADV3_0_R_DATA_TYPE 8:6 +#define NVB097_TEXHEADV3_0_R_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEADV3_0_R_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEADV3_0_R_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEADV3_0_R_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEADV3_0_R_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEADV3_0_R_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEADV3_0_R_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEADV3_0_G_DATA_TYPE 11:9 +#define NVB097_TEXHEADV3_0_G_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEADV3_0_G_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEADV3_0_G_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEADV3_0_G_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEADV3_0_G_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEADV3_0_G_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEADV3_0_G_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEADV3_0_B_DATA_TYPE 14:12 +#define NVB097_TEXHEADV3_0_B_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEADV3_0_B_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEADV3_0_B_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEADV3_0_B_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEADV3_0_B_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEADV3_0_B_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEADV3_0_B_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEADV3_0_A_DATA_TYPE 17:15 +#define NVB097_TEXHEADV3_0_A_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVB097_TEXHEADV3_0_A_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVB097_TEXHEADV3_0_A_DATA_TYPE_NUM_SINT 0x00000003 +#define NVB097_TEXHEADV3_0_A_DATA_TYPE_NUM_UINT 0x00000004 +#define NVB097_TEXHEADV3_0_A_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVB097_TEXHEADV3_0_A_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVB097_TEXHEADV3_0_A_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVB097_TEXHEADV3_0_X_SOURCE 20:18 +#define NVB097_TEXHEADV3_0_X_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEADV3_0_X_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEADV3_0_X_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEADV3_0_X_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEADV3_0_X_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEADV3_0_X_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEADV3_0_X_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEADV3_0_Y_SOURCE 23:21 +#define NVB097_TEXHEADV3_0_Y_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEADV3_0_Y_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEADV3_0_Y_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEADV3_0_Y_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEADV3_0_Y_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEADV3_0_Y_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEADV3_0_Y_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEADV3_0_Z_SOURCE 26:24 +#define NVB097_TEXHEADV3_0_Z_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEADV3_0_Z_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEADV3_0_Z_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEADV3_0_Z_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEADV3_0_Z_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEADV3_0_Z_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEADV3_0_Z_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEADV3_0_W_SOURCE 29:27 +#define NVB097_TEXHEADV3_0_W_SOURCE_IN_ZERO 0x00000000 +#define NVB097_TEXHEADV3_0_W_SOURCE_IN_R 0x00000002 +#define NVB097_TEXHEADV3_0_W_SOURCE_IN_G 0x00000003 +#define NVB097_TEXHEADV3_0_W_SOURCE_IN_B 0x00000004 +#define NVB097_TEXHEADV3_0_W_SOURCE_IN_A 0x00000005 +#define NVB097_TEXHEADV3_0_W_SOURCE_IN_ONE_INT 0x00000006 +#define NVB097_TEXHEADV3_0_W_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVB097_TEXHEADV3_0_PACK_COMPONENTS 30:30 +#define NVB097_TEXHEADV3_0_USE_COMPONENT_SIZES_EXTENDED 31:31 +#define NVB097_TEXHEADV3_1_OFFSET_LOWER 31:0 +#define NVB097_TEXHEADV3_2_OFFSET_UPPER 7:0 +#define NVB097_TEXHEADV3_2_ANISO_SPREAD_MAX_LOG2_L_S_B 9:8 +#define NVB097_TEXHEADV3_2_S_R_G_B_CONVERSION 10:10 +#define NVB097_TEXHEADV3_2_ANISO_SPREAD_MAX_LOG2_M_S_B 11:11 +#define NVB097_TEXHEADV3_2_LOD_ANISO_QUALITY2 12:12 +#define NVB097_TEXHEADV3_2_COLOR_KEY_OP 13:13 +#define NVB097_TEXHEADV3_2_TEXTURE_TYPE 17:14 +#define NVB097_TEXHEADV3_2_TEXTURE_TYPE_ONE_D 0x00000000 +#define NVB097_TEXHEADV3_2_TEXTURE_TYPE_TWO_D 0x00000001 +#define NVB097_TEXHEADV3_2_TEXTURE_TYPE_THREE_D 0x00000002 +#define NVB097_TEXHEADV3_2_TEXTURE_TYPE_CUBEMAP 0x00000003 +#define NVB097_TEXHEADV3_2_TEXTURE_TYPE_ONE_D_ARRAY 0x00000004 +#define NVB097_TEXHEADV3_2_TEXTURE_TYPE_TWO_D_ARRAY 0x00000005 +#define NVB097_TEXHEADV3_2_TEXTURE_TYPE_ONE_D_BUFFER 0x00000006 +#define NVB097_TEXHEADV3_2_TEXTURE_TYPE_TWO_D_NO_MIPMAP 0x00000007 +#define NVB097_TEXHEADV3_2_TEXTURE_TYPE_CUBEMAP_ARRAY 0x00000008 +#define NVB097_TEXHEADV3_2_TEXTURE_TYPE_TT_BIT_FIELD_SIZE 0x0000000f +#define NVB097_TEXHEADV3_2_MEMORY_LAYOUT 18:18 +#define NVB097_TEXHEADV3_2_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVB097_TEXHEADV3_2_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVB097_TEXHEADV3_2_GOBS_PER_BLOCK_WIDTH 21:19 +#define NVB097_TEXHEADV3_2_GOBS_PER_BLOCK_WIDTH_ONE_GOB 0x00000000 +#define NVB097_TEXHEADV3_2_GOBS_PER_BLOCK_HEIGHT 24:22 +#define NVB097_TEXHEADV3_2_GOBS_PER_BLOCK_HEIGHT_ONE_GOB 0x00000000 +#define NVB097_TEXHEADV3_2_GOBS_PER_BLOCK_HEIGHT_TWO_GOBS 0x00000001 +#define NVB097_TEXHEADV3_2_GOBS_PER_BLOCK_HEIGHT_FOUR_GOBS 0x00000002 +#define NVB097_TEXHEADV3_2_GOBS_PER_BLOCK_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVB097_TEXHEADV3_2_GOBS_PER_BLOCK_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVB097_TEXHEADV3_2_GOBS_PER_BLOCK_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVB097_TEXHEADV3_2_GOBS_PER_BLOCK_DEPTH 27:25 +#define NVB097_TEXHEADV3_2_GOBS_PER_BLOCK_DEPTH_ONE_GOB 0x00000000 +#define NVB097_TEXHEADV3_2_GOBS_PER_BLOCK_DEPTH_TWO_GOBS 0x00000001 +#define NVB097_TEXHEADV3_2_GOBS_PER_BLOCK_DEPTH_FOUR_GOBS 0x00000002 +#define NVB097_TEXHEADV3_2_GOBS_PER_BLOCK_DEPTH_EIGHT_GOBS 0x00000003 +#define NVB097_TEXHEADV3_2_GOBS_PER_BLOCK_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVB097_TEXHEADV3_2_GOBS_PER_BLOCK_DEPTH_THIRTYTWO_GOBS 0x00000005 +#define NVB097_TEXHEADV3_2_SECTOR_PROMOTION 29:28 +#define NVB097_TEXHEADV3_2_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVB097_TEXHEADV3_2_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVB097_TEXHEADV3_2_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVB097_TEXHEADV3_2_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVB097_TEXHEADV3_2_BORDER_SOURCE 30:30 +#define NVB097_TEXHEADV3_2_BORDER_SOURCE_BORDER_TEXTURE 0x00000000 +#define NVB097_TEXHEADV3_2_BORDER_SOURCE_BORDER_COLOR 0x00000001 +#define NVB097_TEXHEADV3_2_NORMALIZED_COORDS 31:31 +#define NVB097_TEXHEADV3_3_PITCH 19:0 +#define NVB097_TEXHEADV3_3_LOD_ANISO_QUALITY 20:20 +#define NVB097_TEXHEADV3_3_LOD_ANISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVB097_TEXHEADV3_3_LOD_ANISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVB097_TEXHEADV3_3_LOD_ISO_QUALITY 21:21 +#define NVB097_TEXHEADV3_3_LOD_ISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVB097_TEXHEADV3_3_LOD_ISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVB097_TEXHEADV3_3_ANISO_COARSE_SPREAD_MODIFIER 23:22 +#define NVB097_TEXHEADV3_3_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVB097_TEXHEADV3_3_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVB097_TEXHEADV3_3_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVB097_TEXHEADV3_3_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVB097_TEXHEADV3_3_ANISO_SPREAD_SCALE 28:24 +#define NVB097_TEXHEADV3_3_USE_HEADER_OPT_CONTROL 29:29 +#define NVB097_TEXHEADV3_3_RESERVED3A 30:30 +#define NVB097_TEXHEADV3_3_RESERVED3B 31:31 +#define NVB097_TEXHEADV3_4_WIDTH 29:0 +#define NVB097_TEXHEADV3_4_DEPTH_TEXTURE 30:30 +#define NVB097_TEXHEADV3_4_USE_TEXTURE_HEADER_VERSION2 31:31 +#define NVB097_TEXHEADV3_5_HEIGHT 15:0 +#define NVB097_TEXHEADV3_5_DEPTH 27:16 +#define NVB097_TEXHEADV3_5_MAX_MIP_LEVEL 31:28 +#define NVB097_TEXHEADV3_6_TRILIN_OPT 4:0 +#define NVB097_TEXHEADV3_6_MIP_LOD_BIAS 17:5 +#define NVB097_TEXHEADV3_6_RESERVED6A 18:18 +#define NVB097_TEXHEADV3_6_ANISO_BIAS 22:19 +#define NVB097_TEXHEADV3_6_ANISO_FINE_SPREAD_FUNC 24:23 +#define NVB097_TEXHEADV3_6_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVB097_TEXHEADV3_6_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVB097_TEXHEADV3_6_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVB097_TEXHEADV3_6_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVB097_TEXHEADV3_6_ANISO_COARSE_SPREAD_FUNC 26:25 +#define NVB097_TEXHEADV3_6_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVB097_TEXHEADV3_6_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVB097_TEXHEADV3_6_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVB097_TEXHEADV3_6_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVB097_TEXHEADV3_6_MAX_ANISOTROPY 29:27 +#define NVB097_TEXHEADV3_6_MAX_ANISOTROPY_ANISO_1_TO_1 0x00000000 +#define NVB097_TEXHEADV3_6_MAX_ANISOTROPY_ANISO_2_TO_1 0x00000001 +#define NVB097_TEXHEADV3_6_MAX_ANISOTROPY_ANISO_4_TO_1 0x00000002 +#define NVB097_TEXHEADV3_6_MAX_ANISOTROPY_ANISO_6_TO_1 0x00000003 +#define NVB097_TEXHEADV3_6_MAX_ANISOTROPY_ANISO_8_TO_1 0x00000004 +#define NVB097_TEXHEADV3_6_MAX_ANISOTROPY_ANISO_10_TO_1 0x00000005 +#define NVB097_TEXHEADV3_6_MAX_ANISOTROPY_ANISO_12_TO_1 0x00000006 +#define NVB097_TEXHEADV3_6_MAX_ANISOTROPY_ANISO_16_TO_1 0x00000007 +#define NVB097_TEXHEADV3_6_ANISO_FINE_SPREAD_MODIFIER 31:30 +#define NVB097_TEXHEADV3_6_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVB097_TEXHEADV3_6_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVB097_TEXHEADV3_6_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVB097_TEXHEADV3_6_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVB097_TEXHEADV3_7_RES_VIEW_MIN_MIP_LEVEL 3:0 +#define NVB097_TEXHEADV3_7_RES_VIEW_MAX_MIP_LEVEL 7:4 +#define NVB097_TEXHEADV3_7_HEIGHT_MSB 8:8 +#define NVB097_TEXHEADV3_7_HEIGHT_MSB_RESERVED 11:9 +#define NVB097_TEXHEADV3_7_MULTI_SAMPLE_COUNT 15:12 +#define NVB097_TEXHEADV3_7_MULTI_SAMPLE_COUNT_MODE_1X1 0x00000000 +#define NVB097_TEXHEADV3_7_MULTI_SAMPLE_COUNT_MODE_2X1 0x00000001 +#define NVB097_TEXHEADV3_7_MULTI_SAMPLE_COUNT_MODE_2X2 0x00000002 +#define NVB097_TEXHEADV3_7_MULTI_SAMPLE_COUNT_MODE_4X2 0x00000003 +#define NVB097_TEXHEADV3_7_MULTI_SAMPLE_COUNT_MODE_4X2_D3D 0x00000004 +#define NVB097_TEXHEADV3_7_MULTI_SAMPLE_COUNT_MODE_2X1_D3D 0x00000005 +#define NVB097_TEXHEADV3_7_MULTI_SAMPLE_COUNT_MODE_4X4 0x00000006 +#define NVB097_TEXHEADV3_7_MULTI_SAMPLE_COUNT_MODE_2X2_VC_4 0x00000008 +#define NVB097_TEXHEADV3_7_MULTI_SAMPLE_COUNT_MODE_2X2_VC_12 0x00000009 +#define NVB097_TEXHEADV3_7_MULTI_SAMPLE_COUNT_MODE_4X2_VC_8 0x0000000a +#define NVB097_TEXHEADV3_7_MULTI_SAMPLE_COUNT_MODE_4X2_VC_24 0x0000000b +#define NVB097_TEXHEADV3_7_MULTI_SAMPLE_COUNT_MODE_2X1_CENTER 0x0000000c +#define NVB097_TEXHEADV3_7_MULTI_SAMPLE_COUNT_MODE_2X2_CENTER 0x0000000d +#define NVB097_TEXHEADV3_7_MULTI_SAMPLE_COUNT_MODE_4X2_CENTER 0x0000000e +#define NVB097_TEXHEADV3_7_MULTI_SAMPLE_COUNT_MODE_4X4_REGULAR 0x0000000f +#define NVB097_TEXHEADV3_7_MIN_LOD_CLAMP 27:16 +#define NVB097_TEXHEADV3_7_DEPTH_MSB 30:28 +#define NVB097_TEXHEADV3_7_RESERVED7A 31:31 + + +/* +** Texture Sampler State + */ + +#define NVB097_TEXSAMP0_ADDRESS_U 2:0 +#define NVB097_TEXSAMP0_ADDRESS_U_WRAP 0x00000000 +#define NVB097_TEXSAMP0_ADDRESS_U_MIRROR 0x00000001 +#define NVB097_TEXSAMP0_ADDRESS_U_CLAMP_TO_EDGE 0x00000002 +#define NVB097_TEXSAMP0_ADDRESS_U_BORDER 0x00000003 +#define NVB097_TEXSAMP0_ADDRESS_U_CLAMP_OGL 0x00000004 +#define NVB097_TEXSAMP0_ADDRESS_U_MIRROR_ONCE_CLAMP_TO_EDGE 0x00000005 +#define NVB097_TEXSAMP0_ADDRESS_U_MIRROR_ONCE_BORDER 0x00000006 +#define NVB097_TEXSAMP0_ADDRESS_U_MIRROR_ONCE_CLAMP_OGL 0x00000007 +#define NVB097_TEXSAMP0_ADDRESS_V 5:3 +#define NVB097_TEXSAMP0_ADDRESS_V_WRAP 0x00000000 +#define NVB097_TEXSAMP0_ADDRESS_V_MIRROR 0x00000001 +#define NVB097_TEXSAMP0_ADDRESS_V_CLAMP_TO_EDGE 0x00000002 +#define NVB097_TEXSAMP0_ADDRESS_V_BORDER 0x00000003 +#define NVB097_TEXSAMP0_ADDRESS_V_CLAMP_OGL 0x00000004 +#define NVB097_TEXSAMP0_ADDRESS_V_MIRROR_ONCE_CLAMP_TO_EDGE 0x00000005 +#define NVB097_TEXSAMP0_ADDRESS_V_MIRROR_ONCE_BORDER 0x00000006 +#define NVB097_TEXSAMP0_ADDRESS_V_MIRROR_ONCE_CLAMP_OGL 0x00000007 +#define NVB097_TEXSAMP0_ADDRESS_P 8:6 +#define NVB097_TEXSAMP0_ADDRESS_P_WRAP 0x00000000 +#define NVB097_TEXSAMP0_ADDRESS_P_MIRROR 0x00000001 +#define NVB097_TEXSAMP0_ADDRESS_P_CLAMP_TO_EDGE 0x00000002 +#define NVB097_TEXSAMP0_ADDRESS_P_BORDER 0x00000003 +#define NVB097_TEXSAMP0_ADDRESS_P_CLAMP_OGL 0x00000004 +#define NVB097_TEXSAMP0_ADDRESS_P_MIRROR_ONCE_CLAMP_TO_EDGE 0x00000005 +#define NVB097_TEXSAMP0_ADDRESS_P_MIRROR_ONCE_BORDER 0x00000006 +#define NVB097_TEXSAMP0_ADDRESS_P_MIRROR_ONCE_CLAMP_OGL 0x00000007 +#define NVB097_TEXSAMP0_DEPTH_COMPARE 9:9 +#define NVB097_TEXSAMP0_DEPTH_COMPARE_FUNC 12:10 +#define NVB097_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_NEVER 0x00000000 +#define NVB097_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_LESS 0x00000001 +#define NVB097_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_EQUAL 0x00000002 +#define NVB097_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_LEQUAL 0x00000003 +#define NVB097_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_GREATER 0x00000004 +#define NVB097_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_NOTEQUAL 0x00000005 +#define NVB097_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_GEQUAL 0x00000006 +#define NVB097_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_ALWAYS 0x00000007 +#define NVB097_TEXSAMP0_S_R_G_B_CONVERSION 13:13 +#define NVB097_TEXSAMP0_RESERVED0A 16:14 +#define NVB097_TEXSAMP0_RESERVED0B 19:17 +#define NVB097_TEXSAMP0_MAX_ANISOTROPY 22:20 +#define NVB097_TEXSAMP0_MAX_ANISOTROPY_ANISO_1_TO_1 0x00000000 +#define NVB097_TEXSAMP0_MAX_ANISOTROPY_ANISO_2_TO_1 0x00000001 +#define NVB097_TEXSAMP0_MAX_ANISOTROPY_ANISO_4_TO_1 0x00000002 +#define NVB097_TEXSAMP0_MAX_ANISOTROPY_ANISO_6_TO_1 0x00000003 +#define NVB097_TEXSAMP0_MAX_ANISOTROPY_ANISO_8_TO_1 0x00000004 +#define NVB097_TEXSAMP0_MAX_ANISOTROPY_ANISO_10_TO_1 0x00000005 +#define NVB097_TEXSAMP0_MAX_ANISOTROPY_ANISO_12_TO_1 0x00000006 +#define NVB097_TEXSAMP0_MAX_ANISOTROPY_ANISO_16_TO_1 0x00000007 +#define NVB097_TEXSAMP1_MAG_FILTER 2:0 +#define NVB097_TEXSAMP1_MAG_FILTER_MAG_POINT 0x00000001 +#define NVB097_TEXSAMP1_MAG_FILTER_MAG_LINEAR 0x00000002 +#define NVB097_TEXSAMP1_MAG_FILTER_VCAA_4_TAP 0x00000003 +#define NVB097_TEXSAMP1_MAG_FILTER_VCAA_8_TAP 0x00000004 +#define NVB097_TEXSAMP1_MIN_LOD_CLAMP_BEHAVIOR_FOR_NEAREST_MIP 3:3 +#define NVB097_TEXSAMP1_MIN_LOD_CLAMP_BEHAVIOR_FOR_NEAREST_MIP_INTEGER_AND_FRACTION 0x00000000 +#define NVB097_TEXSAMP1_MIN_LOD_CLAMP_BEHAVIOR_FOR_NEAREST_MIP_INTEGER_ONLY 0x00000001 +#define NVB097_TEXSAMP1_MIN_FILTER 5:4 +#define NVB097_TEXSAMP1_MIN_FILTER_MIN_POINT 0x00000001 +#define NVB097_TEXSAMP1_MIN_FILTER_MIN_LINEAR 0x00000002 +#define NVB097_TEXSAMP1_MIN_FILTER_MIN_ANISO 0x00000003 +#define NVB097_TEXSAMP1_MIP_FILTER 7:6 +#define NVB097_TEXSAMP1_MIP_FILTER_MIP_NONE 0x00000001 +#define NVB097_TEXSAMP1_MIP_FILTER_MIP_POINT 0x00000002 +#define NVB097_TEXSAMP1_MIP_FILTER_MIP_LINEAR 0x00000003 +#define NVB097_TEXSAMP1_CUBEMAP_INTERFACE_FILTERING 9:8 +#define NVB097_TEXSAMP1_CUBEMAP_INTERFACE_FILTERING_USE_WRAP 0x00000000 +#define NVB097_TEXSAMP1_CUBEMAP_INTERFACE_FILTERING_OVERRIDE_WRAP 0x00000001 +#define NVB097_TEXSAMP1_CUBEMAP_INTERFACE_FILTERING_AUTO_SPAN_SEAM 0x00000002 +#define NVB097_TEXSAMP1_CUBEMAP_INTERFACE_FILTERING_AUTO_CROSS_SEAM 0x00000003 +#define NVB097_TEXSAMP1_MIP_LOD_BIAS 24:12 +#define NVB097_TEXSAMP1_FLOAT_COORD_NORMALIZATION 25:25 +#define NVB097_TEXSAMP1_FLOAT_COORD_NORMALIZATION_USE_HEADER_SETTING 0x00000000 +#define NVB097_TEXSAMP1_FLOAT_COORD_NORMALIZATION_FORCE_UNNORMALIZED_COORDS 0x00000001 +#define NVB097_TEXSAMP1_TRILIN_OPT 30:26 +#define NVB097_TEXSAMP2_MIN_LOD_CLAMP 11:0 +#define NVB097_TEXSAMP2_MAX_LOD_CLAMP 23:12 +#define NVB097_TEXSAMP2_S_R_G_B_BORDER_COLOR_R 31:24 +#define NVB097_TEXSAMP3_RESERVED12 11:0 +#define NVB097_TEXSAMP3_S_R_G_B_BORDER_COLOR_G 19:12 +#define NVB097_TEXSAMP3_S_R_G_B_BORDER_COLOR_B 27:20 +#define NVB097_TEXSAMP4_BORDER_COLOR_R 31:0 +#define NVB097_TEXSAMP5_BORDER_COLOR_G 31:0 +#define NVB097_TEXSAMP6_BORDER_COLOR_B 31:0 +#define NVB097_TEXSAMP7_BORDER_COLOR_A 31:0 + + + +#endif // #ifndef __CLB097TEX_H__ diff --git a/src/common/sdk/nvidia/inc/class/clb0b5sw.h b/src/common/sdk/nvidia/inc/class/clb0b5sw.h new file mode 100644 index 0000000..06fba11 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clb0b5sw.h @@ -0,0 +1,54 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/clb0b5sw.finn +// + + + +// +// Using VERSION_0 will cause the API to interpret +// engineType as a CE engine instance. This allows +// for backward compatibility with 85B5sw and 90B5sw. +// +#define NVB0B5_ALLOCATION_PARAMETERS_VERSION_0 0 + +// +// Using VERSION_1 will cause the API to interpret +// engineType as an NV2080_ENGINE_TYPE ordinal. +// +#define NVB0B5_ALLOCATION_PARAMETERS_VERSION_1 1 + +#define NVB0B5_ALLOCATION_PARAMETERS_MESSAGE_ID (0xb0b5U) + +typedef struct NVB0B5_ALLOCATION_PARAMETERS { + NvU32 version; + NvU32 engineType; +} NVB0B5_ALLOCATION_PARAMETERS; + diff --git a/src/common/sdk/nvidia/inc/class/clb197.h b/src/common/sdk/nvidia/inc/class/clb197.h new file mode 100644 index 0000000..f8c976c --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clb197.h @@ -0,0 +1,4160 @@ +/******************************************************************************* + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef _cl_maxwell_b_h_ +#define _cl_maxwell_b_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../../../class/bin/sw_header.pl maxwell_b */ + +#include "nvtypes.h" + +#define MAXWELL_B 0xB197 + +#define NVB197_SET_OBJECT 0x0000 +#define NVB197_SET_OBJECT_CLASS_ID 15:0 +#define NVB197_SET_OBJECT_ENGINE_ID 20:16 + +#define NVB197_NO_OPERATION 0x0100 +#define NVB197_NO_OPERATION_V 31:0 + +#define NVB197_SET_NOTIFY_A 0x0104 +#define NVB197_SET_NOTIFY_A_ADDRESS_UPPER 7:0 + +#define NVB197_SET_NOTIFY_B 0x0108 +#define NVB197_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NVB197_NOTIFY 0x010c +#define NVB197_NOTIFY_TYPE 31:0 +#define NVB197_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NVB197_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NVB197_WAIT_FOR_IDLE 0x0110 +#define NVB197_WAIT_FOR_IDLE_V 31:0 + +#define NVB197_LOAD_MME_INSTRUCTION_RAM_POINTER 0x0114 +#define NVB197_LOAD_MME_INSTRUCTION_RAM_POINTER_V 31:0 + +#define NVB197_LOAD_MME_INSTRUCTION_RAM 0x0118 +#define NVB197_LOAD_MME_INSTRUCTION_RAM_V 31:0 + +#define NVB197_LOAD_MME_START_ADDRESS_RAM_POINTER 0x011c +#define NVB197_LOAD_MME_START_ADDRESS_RAM_POINTER_V 31:0 + +#define NVB197_LOAD_MME_START_ADDRESS_RAM 0x0120 +#define NVB197_LOAD_MME_START_ADDRESS_RAM_V 31:0 + +#define NVB197_SET_MME_SHADOW_RAM_CONTROL 0x0124 +#define NVB197_SET_MME_SHADOW_RAM_CONTROL_MODE 1:0 +#define NVB197_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK 0x00000000 +#define NVB197_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK_WITH_FILTER 0x00000001 +#define NVB197_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_PASSTHROUGH 0x00000002 +#define NVB197_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_REPLAY 0x00000003 + +#define NVB197_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER 0x0128 +#define NVB197_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER_V 7:0 + +#define NVB197_PEER_SEMAPHORE_RELEASE_OFFSET 0x012c +#define NVB197_PEER_SEMAPHORE_RELEASE_OFFSET_V 31:0 + +#define NVB197_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NVB197_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVB197_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NVB197_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVB197_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NVB197_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NVB197_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVB197_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVB197_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVB197_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVB197_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVB197_SEND_GO_IDLE 0x013c +#define NVB197_SEND_GO_IDLE_V 31:0 + +#define NVB197_PM_TRIGGER 0x0140 +#define NVB197_PM_TRIGGER_V 31:0 + +#define NVB197_PM_TRIGGER_WFI 0x0144 +#define NVB197_PM_TRIGGER_WFI_V 31:0 + +#define NVB197_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NVB197_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NVB197_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NVB197_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NVB197_LINE_LENGTH_IN 0x0180 +#define NVB197_LINE_LENGTH_IN_VALUE 31:0 + +#define NVB197_LINE_COUNT 0x0184 +#define NVB197_LINE_COUNT_VALUE 31:0 + +#define NVB197_OFFSET_OUT_UPPER 0x0188 +#define NVB197_OFFSET_OUT_UPPER_VALUE 7:0 + +#define NVB197_OFFSET_OUT 0x018c +#define NVB197_OFFSET_OUT_VALUE 31:0 + +#define NVB197_PITCH_OUT 0x0190 +#define NVB197_PITCH_OUT_VALUE 31:0 + +#define NVB197_SET_DST_BLOCK_SIZE 0x0194 +#define NVB197_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVB197_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVB197_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVB197_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVB197_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVB197_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVB197_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVB197_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVB197_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVB197_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVB197_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NVB197_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NVB197_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NVB197_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NVB197_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVB197_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NVB197_SET_DST_WIDTH 0x0198 +#define NVB197_SET_DST_WIDTH_V 31:0 + +#define NVB197_SET_DST_HEIGHT 0x019c +#define NVB197_SET_DST_HEIGHT_V 31:0 + +#define NVB197_SET_DST_DEPTH 0x01a0 +#define NVB197_SET_DST_DEPTH_V 31:0 + +#define NVB197_SET_DST_LAYER 0x01a4 +#define NVB197_SET_DST_LAYER_V 31:0 + +#define NVB197_SET_DST_ORIGIN_BYTES_X 0x01a8 +#define NVB197_SET_DST_ORIGIN_BYTES_X_V 19:0 + +#define NVB197_SET_DST_ORIGIN_SAMPLES_Y 0x01ac +#define NVB197_SET_DST_ORIGIN_SAMPLES_Y_V 15:0 + +#define NVB197_LAUNCH_DMA 0x01b0 +#define NVB197_LAUNCH_DMA_DST_MEMORY_LAYOUT 0:0 +#define NVB197_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVB197_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVB197_LAUNCH_DMA_COMPLETION_TYPE 5:4 +#define NVB197_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_DISABLE 0x00000000 +#define NVB197_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_ONLY 0x00000001 +#define NVB197_LAUNCH_DMA_COMPLETION_TYPE_RELEASE_SEMAPHORE 0x00000002 +#define NVB197_LAUNCH_DMA_INTERRUPT_TYPE 9:8 +#define NVB197_LAUNCH_DMA_INTERRUPT_TYPE_NONE 0x00000000 +#define NVB197_LAUNCH_DMA_INTERRUPT_TYPE_INTERRUPT 0x00000001 +#define NVB197_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE 12:12 +#define NVB197_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_FOUR_WORDS 0x00000000 +#define NVB197_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_ONE_WORD 0x00000001 +#define NVB197_LAUNCH_DMA_REDUCTION_ENABLE 1:1 +#define NVB197_LAUNCH_DMA_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVB197_LAUNCH_DMA_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVB197_LAUNCH_DMA_REDUCTION_OP 15:13 +#define NVB197_LAUNCH_DMA_REDUCTION_OP_RED_ADD 0x00000000 +#define NVB197_LAUNCH_DMA_REDUCTION_OP_RED_MIN 0x00000001 +#define NVB197_LAUNCH_DMA_REDUCTION_OP_RED_MAX 0x00000002 +#define NVB197_LAUNCH_DMA_REDUCTION_OP_RED_INC 0x00000003 +#define NVB197_LAUNCH_DMA_REDUCTION_OP_RED_DEC 0x00000004 +#define NVB197_LAUNCH_DMA_REDUCTION_OP_RED_AND 0x00000005 +#define NVB197_LAUNCH_DMA_REDUCTION_OP_RED_OR 0x00000006 +#define NVB197_LAUNCH_DMA_REDUCTION_OP_RED_XOR 0x00000007 +#define NVB197_LAUNCH_DMA_REDUCTION_FORMAT 3:2 +#define NVB197_LAUNCH_DMA_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVB197_LAUNCH_DMA_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVB197_LAUNCH_DMA_SYSMEMBAR_DISABLE 6:6 +#define NVB197_LAUNCH_DMA_SYSMEMBAR_DISABLE_FALSE 0x00000000 +#define NVB197_LAUNCH_DMA_SYSMEMBAR_DISABLE_TRUE 0x00000001 + +#define NVB197_LOAD_INLINE_DATA 0x01b4 +#define NVB197_LOAD_INLINE_DATA_V 31:0 + +#define NVB197_SET_I2M_SEMAPHORE_A 0x01dc +#define NVB197_SET_I2M_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVB197_SET_I2M_SEMAPHORE_B 0x01e0 +#define NVB197_SET_I2M_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVB197_SET_I2M_SEMAPHORE_C 0x01e4 +#define NVB197_SET_I2M_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVB197_SET_I2M_SPARE_NOOP00 0x01f0 +#define NVB197_SET_I2M_SPARE_NOOP00_V 31:0 + +#define NVB197_SET_I2M_SPARE_NOOP01 0x01f4 +#define NVB197_SET_I2M_SPARE_NOOP01_V 31:0 + +#define NVB197_SET_I2M_SPARE_NOOP02 0x01f8 +#define NVB197_SET_I2M_SPARE_NOOP02_V 31:0 + +#define NVB197_SET_I2M_SPARE_NOOP03 0x01fc +#define NVB197_SET_I2M_SPARE_NOOP03_V 31:0 + +#define NVB197_RUN_DS_NOW 0x0200 +#define NVB197_RUN_DS_NOW_V 31:0 + +#define NVB197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS 0x0204 +#define NVB197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD 4:0 +#define NVB197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_INSTANTANEOUS 0x00000000 +#define NVB197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16 0x00000001 +#define NVB197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32 0x00000002 +#define NVB197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__64 0x00000003 +#define NVB197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__128 0x00000004 +#define NVB197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__256 0x00000005 +#define NVB197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__512 0x00000006 +#define NVB197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1024 0x00000007 +#define NVB197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2048 0x00000008 +#define NVB197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4096 0x00000009 +#define NVB197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__8192 0x0000000A +#define NVB197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16384 0x0000000B +#define NVB197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32768 0x0000000C +#define NVB197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__65536 0x0000000D +#define NVB197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__131072 0x0000000E +#define NVB197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__262144 0x0000000F +#define NVB197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__524288 0x00000010 +#define NVB197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1048576 0x00000011 +#define NVB197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2097152 0x00000012 +#define NVB197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4194304 0x00000013 +#define NVB197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_LATEZ_ALWAYS 0x0000001F + +#define NVB197_SET_ALIASED_LINE_WIDTH_ENABLE 0x020c +#define NVB197_SET_ALIASED_LINE_WIDTH_ENABLE_V 0:0 +#define NVB197_SET_ALIASED_LINE_WIDTH_ENABLE_V_FALSE 0x00000000 +#define NVB197_SET_ALIASED_LINE_WIDTH_ENABLE_V_TRUE 0x00000001 + +#define NVB197_SET_API_MANDATED_EARLY_Z 0x0210 +#define NVB197_SET_API_MANDATED_EARLY_Z_ENABLE 0:0 +#define NVB197_SET_API_MANDATED_EARLY_Z_ENABLE_FALSE 0x00000000 +#define NVB197_SET_API_MANDATED_EARLY_Z_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_GS_DM_FIFO 0x0214 +#define NVB197_SET_GS_DM_FIFO_SIZE_RASTER_ON 12:0 +#define NVB197_SET_GS_DM_FIFO_SIZE_RASTER_OFF 28:16 +#define NVB197_SET_GS_DM_FIFO_SPILL_ENABLED 31:31 +#define NVB197_SET_GS_DM_FIFO_SPILL_ENABLED_FALSE 0x00000000 +#define NVB197_SET_GS_DM_FIFO_SPILL_ENABLED_TRUE 0x00000001 + +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS 0x0218 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY 5:4 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVB197_INVALIDATE_SHADER_CACHES 0x021c +#define NVB197_INVALIDATE_SHADER_CACHES_INSTRUCTION 0:0 +#define NVB197_INVALIDATE_SHADER_CACHES_INSTRUCTION_FALSE 0x00000000 +#define NVB197_INVALIDATE_SHADER_CACHES_INSTRUCTION_TRUE 0x00000001 +#define NVB197_INVALIDATE_SHADER_CACHES_DATA 4:4 +#define NVB197_INVALIDATE_SHADER_CACHES_DATA_FALSE 0x00000000 +#define NVB197_INVALIDATE_SHADER_CACHES_DATA_TRUE 0x00000001 +#define NVB197_INVALIDATE_SHADER_CACHES_CONSTANT 12:12 +#define NVB197_INVALIDATE_SHADER_CACHES_CONSTANT_FALSE 0x00000000 +#define NVB197_INVALIDATE_SHADER_CACHES_CONSTANT_TRUE 0x00000001 +#define NVB197_INVALIDATE_SHADER_CACHES_LOCKS 1:1 +#define NVB197_INVALIDATE_SHADER_CACHES_LOCKS_FALSE 0x00000000 +#define NVB197_INVALIDATE_SHADER_CACHES_LOCKS_TRUE 0x00000001 +#define NVB197_INVALIDATE_SHADER_CACHES_FLUSH_DATA 2:2 +#define NVB197_INVALIDATE_SHADER_CACHES_FLUSH_DATA_FALSE 0x00000000 +#define NVB197_INVALIDATE_SHADER_CACHES_FLUSH_DATA_TRUE 0x00000001 + +#define NVB197_INCREMENT_SYNC_POINT 0x02c8 +#define NVB197_INCREMENT_SYNC_POINT_INDEX 11:0 +#define NVB197_INCREMENT_SYNC_POINT_CLEAN_L2 16:16 +#define NVB197_INCREMENT_SYNC_POINT_CLEAN_L2_FALSE 0x00000000 +#define NVB197_INCREMENT_SYNC_POINT_CLEAN_L2_TRUE 0x00000001 +#define NVB197_INCREMENT_SYNC_POINT_CONDITION 20:20 +#define NVB197_INCREMENT_SYNC_POINT_CONDITION_STREAM_OUT_WRITES_DONE 0x00000000 +#define NVB197_INCREMENT_SYNC_POINT_CONDITION_ROP_WRITES_DONE 0x00000001 + +#define NVB197_SET_PRIM_CIRCULAR_BUFFER_THROTTLE 0x02d0 +#define NVB197_SET_PRIM_CIRCULAR_BUFFER_THROTTLE_PRIM_AREA 21:0 + +#define NVB197_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE 0x02d4 +#define NVB197_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE_V 0:0 + +#define NVB197_SET_SURFACE_CLIP_ID_BLOCK_SIZE 0x02d8 +#define NVB197_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH 3:0 +#define NVB197_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVB197_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT 7:4 +#define NVB197_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVB197_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVB197_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVB197_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVB197_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVB197_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVB197_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH 11:8 +#define NVB197_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NVB197_SET_ALPHA_CIRCULAR_BUFFER_SIZE 0x02dc +#define NVB197_SET_ALPHA_CIRCULAR_BUFFER_SIZE_CACHE_LINES_PER_SM 13:0 + +#define NVB197_DECOMPRESS_SURFACE 0x02e0 +#define NVB197_DECOMPRESS_SURFACE_MRT_SELECT 2:0 +#define NVB197_DECOMPRESS_SURFACE_RT_ARRAY_INDEX 19:4 + +#define NVB197_SET_ZCULL_ROP_BYPASS 0x02e4 +#define NVB197_SET_ZCULL_ROP_BYPASS_ENABLE 0:0 +#define NVB197_SET_ZCULL_ROP_BYPASS_ENABLE_FALSE 0x00000000 +#define NVB197_SET_ZCULL_ROP_BYPASS_ENABLE_TRUE 0x00000001 +#define NVB197_SET_ZCULL_ROP_BYPASS_NO_STALL 4:4 +#define NVB197_SET_ZCULL_ROP_BYPASS_NO_STALL_FALSE 0x00000000 +#define NVB197_SET_ZCULL_ROP_BYPASS_NO_STALL_TRUE 0x00000001 +#define NVB197_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING 8:8 +#define NVB197_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING_FALSE 0x00000000 +#define NVB197_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING_TRUE 0x00000001 +#define NVB197_SET_ZCULL_ROP_BYPASS_THRESHOLD 15:12 + +#define NVB197_SET_ZCULL_SUBREGION 0x02e8 +#define NVB197_SET_ZCULL_SUBREGION_ENABLE 0:0 +#define NVB197_SET_ZCULL_SUBREGION_ENABLE_FALSE 0x00000000 +#define NVB197_SET_ZCULL_SUBREGION_ENABLE_TRUE 0x00000001 +#define NVB197_SET_ZCULL_SUBREGION_NORMALIZED_ALIQUOTS 27:4 + +#define NVB197_SET_RASTER_BOUNDING_BOX 0x02ec +#define NVB197_SET_RASTER_BOUNDING_BOX_MODE 0:0 +#define NVB197_SET_RASTER_BOUNDING_BOX_MODE_BOUNDING_BOX 0x00000000 +#define NVB197_SET_RASTER_BOUNDING_BOX_MODE_FULL_VIEWPORT 0x00000001 +#define NVB197_SET_RASTER_BOUNDING_BOX_PAD 11:4 + +#define NVB197_PEER_SEMAPHORE_RELEASE 0x02f0 +#define NVB197_PEER_SEMAPHORE_RELEASE_V 31:0 + +#define NVB197_SET_ITERATED_BLEND_OPTIMIZATION 0x02f4 +#define NVB197_SET_ITERATED_BLEND_OPTIMIZATION_NOOP 1:0 +#define NVB197_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_NEVER 0x00000000 +#define NVB197_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_RGBA_0000 0x00000001 +#define NVB197_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_ALPHA_0 0x00000002 +#define NVB197_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_RGBA_0001 0x00000003 + +#define NVB197_SET_ZCULL_SUBREGION_ALLOCATION 0x02f8 +#define NVB197_SET_ZCULL_SUBREGION_ALLOCATION_SUBREGION_ID 7:0 +#define NVB197_SET_ZCULL_SUBREGION_ALLOCATION_ALIQUOTS 23:8 +#define NVB197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT 27:24 +#define NVB197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16X2_4X4 0x00000000 +#define NVB197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X16_4X4 0x00000001 +#define NVB197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_4X2 0x00000002 +#define NVB197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_2X4 0x00000003 +#define NVB197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X8_4X4 0x00000004 +#define NVB197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_8X8_4X2 0x00000005 +#define NVB197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_8X8_2X4 0x00000006 +#define NVB197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_4X8 0x00000007 +#define NVB197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_4X8_2X2 0x00000008 +#define NVB197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X8_4X2 0x00000009 +#define NVB197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X8_2X4 0x0000000A +#define NVB197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_8X8_2X2 0x0000000B +#define NVB197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_4X8_1X1 0x0000000C +#define NVB197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_NONE 0x0000000F + +#define NVB197_ASSIGN_ZCULL_SUBREGIONS 0x02fc +#define NVB197_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM 1:0 +#define NVB197_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM_Static 0x00000000 +#define NVB197_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM_Adaptive 0x00000001 + +#define NVB197_SET_PS_OUTPUT_SAMPLE_MASK_USAGE 0x0300 +#define NVB197_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE 0:0 +#define NVB197_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_FALSE 0x00000000 +#define NVB197_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_TRUE 0x00000001 +#define NVB197_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE 1:1 +#define NVB197_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NVB197_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 + +#define NVB197_DRAW_ZERO_INDEX 0x0304 +#define NVB197_DRAW_ZERO_INDEX_COUNT 31:0 + +#define NVB197_SET_L1_CONFIGURATION 0x0308 +#define NVB197_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY 2:0 +#define NVB197_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_16KB 0x00000001 +#define NVB197_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_48KB 0x00000003 + +#define NVB197_SET_RENDER_ENABLE_CONTROL 0x030c +#define NVB197_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER 0:0 +#define NVB197_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_FALSE 0x00000000 +#define NVB197_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_TRUE 0x00000001 + +#define NVB197_SET_SPA_VERSION 0x0310 +#define NVB197_SET_SPA_VERSION_MINOR 7:0 +#define NVB197_SET_SPA_VERSION_MAJOR 15:8 + +#define NVB197_SET_IEEE_CLEAN_UPDATE 0x0314 +#define NVB197_SET_IEEE_CLEAN_UPDATE_ENABLE 0:0 +#define NVB197_SET_IEEE_CLEAN_UPDATE_ENABLE_FALSE 0x00000000 +#define NVB197_SET_IEEE_CLEAN_UPDATE_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_SNAP_GRID_LINE 0x0318 +#define NVB197_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NVB197_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NVB197_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NVB197_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NVB197_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NVB197_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NVB197_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NVB197_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NVB197_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NVB197_SET_SNAP_GRID_LINE_ROUNDING_MODE 8:8 +#define NVB197_SET_SNAP_GRID_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NVB197_SET_SNAP_GRID_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NVB197_SET_SNAP_GRID_NON_LINE 0x031c +#define NVB197_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NVB197_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NVB197_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NVB197_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NVB197_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NVB197_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NVB197_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NVB197_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NVB197_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NVB197_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE 8:8 +#define NVB197_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NVB197_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NVB197_SET_TESSELLATION_PARAMETERS 0x0320 +#define NVB197_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE 1:0 +#define NVB197_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_ISOLINE 0x00000000 +#define NVB197_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_TRIANGLE 0x00000001 +#define NVB197_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_QUAD 0x00000002 +#define NVB197_SET_TESSELLATION_PARAMETERS_SPACING 5:4 +#define NVB197_SET_TESSELLATION_PARAMETERS_SPACING_INTEGER 0x00000000 +#define NVB197_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_ODD 0x00000001 +#define NVB197_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_EVEN 0x00000002 +#define NVB197_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES 9:8 +#define NVB197_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_POINTS 0x00000000 +#define NVB197_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_LINES 0x00000001 +#define NVB197_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CW 0x00000002 +#define NVB197_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CCW 0x00000003 + +#define NVB197_SET_TESSELLATION_LOD_U0_OR_DENSITY 0x0324 +#define NVB197_SET_TESSELLATION_LOD_U0_OR_DENSITY_V 31:0 + +#define NVB197_SET_TESSELLATION_LOD_V0_OR_DETAIL 0x0328 +#define NVB197_SET_TESSELLATION_LOD_V0_OR_DETAIL_V 31:0 + +#define NVB197_SET_TESSELLATION_LOD_U1_OR_W0 0x032c +#define NVB197_SET_TESSELLATION_LOD_U1_OR_W0_V 31:0 + +#define NVB197_SET_TESSELLATION_LOD_V1 0x0330 +#define NVB197_SET_TESSELLATION_LOD_V1_V 31:0 + +#define NVB197_SET_TG_LOD_INTERIOR_U 0x0334 +#define NVB197_SET_TG_LOD_INTERIOR_U_V 31:0 + +#define NVB197_SET_TG_LOD_INTERIOR_V 0x0338 +#define NVB197_SET_TG_LOD_INTERIOR_V_V 31:0 + +#define NVB197_RESERVED_TG07 0x033c +#define NVB197_RESERVED_TG07_V 0:0 + +#define NVB197_RESERVED_TG08 0x0340 +#define NVB197_RESERVED_TG08_V 0:0 + +#define NVB197_RESERVED_TG09 0x0344 +#define NVB197_RESERVED_TG09_V 0:0 + +#define NVB197_RESERVED_TG10 0x0348 +#define NVB197_RESERVED_TG10_V 0:0 + +#define NVB197_RESERVED_TG11 0x034c +#define NVB197_RESERVED_TG11_V 0:0 + +#define NVB197_RESERVED_TG12 0x0350 +#define NVB197_RESERVED_TG12_V 0:0 + +#define NVB197_RESERVED_TG13 0x0354 +#define NVB197_RESERVED_TG13_V 0:0 + +#define NVB197_RESERVED_TG14 0x0358 +#define NVB197_RESERVED_TG14_V 0:0 + +#define NVB197_RESERVED_TG15 0x035c +#define NVB197_RESERVED_TG15_V 0:0 + +#define NVB197_SET_SUBTILING_PERF_KNOB_A 0x0360 +#define NVB197_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_REGISTER_FILE_PER_SUBTILE 7:0 +#define NVB197_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_PIXEL_OUTPUT_BUFFER_PER_SUBTILE 15:8 +#define NVB197_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_TRIANGLE_RAM_PER_SUBTILE 23:16 +#define NVB197_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_MAX_QUADS_PER_SUBTILE 31:24 + +#define NVB197_SET_SUBTILING_PERF_KNOB_B 0x0364 +#define NVB197_SET_SUBTILING_PERF_KNOB_B_FRACTION_OF_MAX_PRIMITIVES_PER_SUBTILE 7:0 + +#define NVB197_SET_SUBTILING_PERF_KNOB_C 0x0368 +#define NVB197_SET_SUBTILING_PERF_KNOB_C_RESERVED 0:0 + +#define NVB197_SET_ZCULL_SUBREGION_TO_REPORT 0x036c +#define NVB197_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE 0:0 +#define NVB197_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE_FALSE 0x00000000 +#define NVB197_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE_TRUE 0x00000001 +#define NVB197_SET_ZCULL_SUBREGION_TO_REPORT_SUBREGION_ID 11:4 + +#define NVB197_SET_ZCULL_SUBREGION_REPORT_TYPE 0x0370 +#define NVB197_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE 0:0 +#define NVB197_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE_FALSE 0x00000000 +#define NVB197_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE_TRUE 0x00000001 +#define NVB197_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE 6:4 +#define NVB197_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST 0x00000000 +#define NVB197_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST_NO_ACCEPT 0x00000001 +#define NVB197_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST_LATE_Z 0x00000002 +#define NVB197_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_STENCIL_TEST 0x00000003 + +#define NVB197_SET_BALANCED_PRIMITIVE_WORKLOAD 0x0374 +#define NVB197_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE 0:0 +#define NVB197_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE_FALSE 0x00000000 +#define NVB197_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE_TRUE 0x00000001 +#define NVB197_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE 4:4 +#define NVB197_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE_FALSE 0x00000000 +#define NVB197_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE_TRUE 0x00000001 + +#define NVB197_SET_MAX_PATCHES_PER_BATCH 0x0378 +#define NVB197_SET_MAX_PATCHES_PER_BATCH_V 5:0 + +#define NVB197_SET_RASTER_ENABLE 0x037c +#define NVB197_SET_RASTER_ENABLE_V 0:0 +#define NVB197_SET_RASTER_ENABLE_V_FALSE 0x00000000 +#define NVB197_SET_RASTER_ENABLE_V_TRUE 0x00000001 + +#define NVB197_SET_STREAM_OUT_BUFFER_ENABLE(j) (0x0380+(j)*32) +#define NVB197_SET_STREAM_OUT_BUFFER_ENABLE_V 0:0 +#define NVB197_SET_STREAM_OUT_BUFFER_ENABLE_V_FALSE 0x00000000 +#define NVB197_SET_STREAM_OUT_BUFFER_ENABLE_V_TRUE 0x00000001 + +#define NVB197_SET_STREAM_OUT_BUFFER_ADDRESS_A(j) (0x0384+(j)*32) +#define NVB197_SET_STREAM_OUT_BUFFER_ADDRESS_A_UPPER 7:0 + +#define NVB197_SET_STREAM_OUT_BUFFER_ADDRESS_B(j) (0x0388+(j)*32) +#define NVB197_SET_STREAM_OUT_BUFFER_ADDRESS_B_LOWER 31:0 + +#define NVB197_SET_STREAM_OUT_BUFFER_SIZE(j) (0x038c+(j)*32) +#define NVB197_SET_STREAM_OUT_BUFFER_SIZE_BYTES 31:0 + +#define NVB197_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER(j) (0x0390+(j)*32) +#define NVB197_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER_START_OFFSET 31:0 + +#define NVB197_SET_STREAM_OUT_CONTROL_STREAM(j) (0x0700+(j)*16) +#define NVB197_SET_STREAM_OUT_CONTROL_STREAM_SELECT 1:0 + +#define NVB197_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT(j) (0x0704+(j)*16) +#define NVB197_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT_MAX 7:0 + +#define NVB197_SET_STREAM_OUT_CONTROL_STRIDE(j) (0x0708+(j)*16) +#define NVB197_SET_STREAM_OUT_CONTROL_STRIDE_BYTES 31:0 + +#define NVB197_SET_RASTER_INPUT 0x0740 +#define NVB197_SET_RASTER_INPUT_STREAM_SELECT 1:0 + +#define NVB197_SET_STREAM_OUTPUT 0x0744 +#define NVB197_SET_STREAM_OUTPUT_ENABLE 0:0 +#define NVB197_SET_STREAM_OUTPUT_ENABLE_FALSE 0x00000000 +#define NVB197_SET_STREAM_OUTPUT_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE 0x0748 +#define NVB197_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE 0:0 +#define NVB197_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_FALSE 0x00000000 +#define NVB197_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_ALPHA_FRACTION 0x074c +#define NVB197_SET_ALPHA_FRACTION_V 7:0 + +#define NVB197_SET_HYBRID_ANTI_ALIAS_CONTROL 0x0754 +#define NVB197_SET_HYBRID_ANTI_ALIAS_CONTROL_PASSES 3:0 +#define NVB197_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID 4:4 +#define NVB197_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_FRAGMENT 0x00000000 +#define NVB197_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_PASS 0x00000001 +#define NVB197_SET_HYBRID_ANTI_ALIAS_CONTROL_PASSES_EXTENDED 5:5 + +#define NVB197_SET_SHADER_LOCAL_MEMORY_WINDOW 0x077c +#define NVB197_SET_SHADER_LOCAL_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NVB197_SET_SHADER_LOCAL_MEMORY_A 0x0790 +#define NVB197_SET_SHADER_LOCAL_MEMORY_A_ADDRESS_UPPER 7:0 + +#define NVB197_SET_SHADER_LOCAL_MEMORY_B 0x0794 +#define NVB197_SET_SHADER_LOCAL_MEMORY_B_ADDRESS_LOWER 31:0 + +#define NVB197_SET_SHADER_LOCAL_MEMORY_C 0x0798 +#define NVB197_SET_SHADER_LOCAL_MEMORY_C_SIZE_UPPER 5:0 + +#define NVB197_SET_SHADER_LOCAL_MEMORY_D 0x079c +#define NVB197_SET_SHADER_LOCAL_MEMORY_D_SIZE_LOWER 31:0 + +#define NVB197_SET_SHADER_LOCAL_MEMORY_E 0x07a0 +#define NVB197_SET_SHADER_LOCAL_MEMORY_E_DEFAULT_SIZE_PER_WARP 25:0 + +#define NVB197_SET_COLOR_ZERO_BANDWIDTH_CLEAR 0x07a4 +#define NVB197_SET_COLOR_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVB197_SET_Z_ZERO_BANDWIDTH_CLEAR 0x07a8 +#define NVB197_SET_Z_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVB197_SET_ISBE_SAVE_RESTORE_PROGRAM 0x07ac +#define NVB197_SET_ISBE_SAVE_RESTORE_PROGRAM_OFFSET 31:0 + +#define NVB197_SET_ZCULL_REGION_SIZE_A 0x07c0 +#define NVB197_SET_ZCULL_REGION_SIZE_A_WIDTH 15:0 + +#define NVB197_SET_ZCULL_REGION_SIZE_B 0x07c4 +#define NVB197_SET_ZCULL_REGION_SIZE_B_HEIGHT 15:0 + +#define NVB197_SET_ZCULL_REGION_SIZE_C 0x07c8 +#define NVB197_SET_ZCULL_REGION_SIZE_C_DEPTH 15:0 + +#define NVB197_SET_ZCULL_REGION_PIXEL_OFFSET_C 0x07cc +#define NVB197_SET_ZCULL_REGION_PIXEL_OFFSET_C_DEPTH 15:0 + +#define NVB197_SET_CULL_BEFORE_FETCH 0x07dc +#define NVB197_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE 0:0 +#define NVB197_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_FALSE 0x00000000 +#define NVB197_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_TRUE 0x00000001 + +#define NVB197_SET_ZCULL_REGION_LOCATION 0x07e0 +#define NVB197_SET_ZCULL_REGION_LOCATION_START_ALIQUOT 15:0 +#define NVB197_SET_ZCULL_REGION_LOCATION_ALIQUOT_COUNT 31:16 + +#define NVB197_SET_ZCULL_REGION_ALIQUOTS 0x07e4 +#define NVB197_SET_ZCULL_REGION_ALIQUOTS_PER_LAYER 15:0 + +#define NVB197_SET_ZCULL_STORAGE_A 0x07e8 +#define NVB197_SET_ZCULL_STORAGE_A_ADDRESS_UPPER 7:0 + +#define NVB197_SET_ZCULL_STORAGE_B 0x07ec +#define NVB197_SET_ZCULL_STORAGE_B_ADDRESS_LOWER 31:0 + +#define NVB197_SET_ZCULL_STORAGE_C 0x07f0 +#define NVB197_SET_ZCULL_STORAGE_C_LIMIT_ADDRESS_UPPER 7:0 + +#define NVB197_SET_ZCULL_STORAGE_D 0x07f4 +#define NVB197_SET_ZCULL_STORAGE_D_LIMIT_ADDRESS_LOWER 31:0 + +#define NVB197_SET_ZT_READ_ONLY 0x07f8 +#define NVB197_SET_ZT_READ_ONLY_ENABLE_Z 0:0 +#define NVB197_SET_ZT_READ_ONLY_ENABLE_Z_FALSE 0x00000000 +#define NVB197_SET_ZT_READ_ONLY_ENABLE_Z_TRUE 0x00000001 +#define NVB197_SET_ZT_READ_ONLY_ENABLE_STENCIL 4:4 +#define NVB197_SET_ZT_READ_ONLY_ENABLE_STENCIL_FALSE 0x00000000 +#define NVB197_SET_ZT_READ_ONLY_ENABLE_STENCIL_TRUE 0x00000001 + +#define NVB197_SET_COLOR_TARGET_A(j) (0x0800+(j)*64) +#define NVB197_SET_COLOR_TARGET_A_OFFSET_UPPER 7:0 + +#define NVB197_SET_COLOR_TARGET_B(j) (0x0804+(j)*64) +#define NVB197_SET_COLOR_TARGET_B_OFFSET_LOWER 31:0 + +#define NVB197_SET_COLOR_TARGET_WIDTH(j) (0x0808+(j)*64) +#define NVB197_SET_COLOR_TARGET_WIDTH_V 27:0 + +#define NVB197_SET_COLOR_TARGET_HEIGHT(j) (0x080c+(j)*64) +#define NVB197_SET_COLOR_TARGET_HEIGHT_V 16:0 + +#define NVB197_SET_COLOR_TARGET_FORMAT(j) (0x0810+(j)*64) +#define NVB197_SET_COLOR_TARGET_FORMAT_V 7:0 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_DISABLED 0x00000000 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_AF32 0x000000C0 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_AS32 0x000000C1 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_AU32 0x000000C2 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_X32 0x000000C3 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_X32 0x000000C4 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_X32 0x000000C5 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_R16_G16_B16_A16 0x000000C6 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RN16_GN16_BN16_AN16 0x000000C7 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RS16_GS16_BS16_AS16 0x000000C8 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RU16_GU16_BU16_AU16 0x000000C9 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_AF16 0x000000CA +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RF32_GF32 0x000000CB +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RS32_GS32 0x000000CC +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RU32_GU32 0x000000CD +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_X16 0x000000CE +#define NVB197_SET_COLOR_TARGET_FORMAT_V_A8R8G8B8 0x000000CF +#define NVB197_SET_COLOR_TARGET_FORMAT_V_A8RL8GL8BL8 0x000000D0 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_A2B10G10R10 0x000000D1 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_AU2BU10GU10RU10 0x000000D2 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_A8B8G8R8 0x000000D5 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_A8BL8GL8RL8 0x000000D6 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_AN8BN8GN8RN8 0x000000D7 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_AS8BS8GS8RS8 0x000000D8 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_AU8BU8GU8RU8 0x000000D9 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_R16_G16 0x000000DA +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RN16_GN16 0x000000DB +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RS16_GS16 0x000000DC +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RU16_GU16 0x000000DD +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RF16_GF16 0x000000DE +#define NVB197_SET_COLOR_TARGET_FORMAT_V_A2R10G10B10 0x000000DF +#define NVB197_SET_COLOR_TARGET_FORMAT_V_BF10GF11RF11 0x000000E0 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RS32 0x000000E3 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RU32 0x000000E4 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RF32 0x000000E5 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_X8R8G8B8 0x000000E6 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_X8RL8GL8BL8 0x000000E7 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_R5G6B5 0x000000E8 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_A1R5G5B5 0x000000E9 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_G8R8 0x000000EA +#define NVB197_SET_COLOR_TARGET_FORMAT_V_GN8RN8 0x000000EB +#define NVB197_SET_COLOR_TARGET_FORMAT_V_GS8RS8 0x000000EC +#define NVB197_SET_COLOR_TARGET_FORMAT_V_GU8RU8 0x000000ED +#define NVB197_SET_COLOR_TARGET_FORMAT_V_R16 0x000000EE +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RN16 0x000000EF +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RS16 0x000000F0 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RU16 0x000000F1 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RF16 0x000000F2 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_R8 0x000000F3 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RN8 0x000000F4 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RS8 0x000000F5 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RU8 0x000000F6 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_A8 0x000000F7 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_X1R5G5B5 0x000000F8 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_X8B8G8R8 0x000000F9 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_X8BL8GL8RL8 0x000000FA +#define NVB197_SET_COLOR_TARGET_FORMAT_V_Z1R5G5B5 0x000000FB +#define NVB197_SET_COLOR_TARGET_FORMAT_V_O1R5G5B5 0x000000FC +#define NVB197_SET_COLOR_TARGET_FORMAT_V_Z8R8G8B8 0x000000FD +#define NVB197_SET_COLOR_TARGET_FORMAT_V_O8R8G8B8 0x000000FE +#define NVB197_SET_COLOR_TARGET_FORMAT_V_R32 0x000000FF +#define NVB197_SET_COLOR_TARGET_FORMAT_V_A16 0x00000040 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_AF16 0x00000041 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_AF32 0x00000042 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_A8R8 0x00000043 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_R16_A16 0x00000044 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RF16_AF16 0x00000045 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_RF32_AF32 0x00000046 +#define NVB197_SET_COLOR_TARGET_FORMAT_V_B8G8R8A8 0x00000047 + +#define NVB197_SET_COLOR_TARGET_MEMORY(j) (0x0814+(j)*64) +#define NVB197_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH 3:0 +#define NVB197_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH_ONE_GOB 0x00000000 +#define NVB197_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT 7:4 +#define NVB197_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_ONE_GOB 0x00000000 +#define NVB197_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_TWO_GOBS 0x00000001 +#define NVB197_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_FOUR_GOBS 0x00000002 +#define NVB197_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVB197_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVB197_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVB197_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH 11:8 +#define NVB197_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_ONE_GOB 0x00000000 +#define NVB197_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_TWO_GOBS 0x00000001 +#define NVB197_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_FOUR_GOBS 0x00000002 +#define NVB197_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_EIGHT_GOBS 0x00000003 +#define NVB197_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVB197_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_THIRTYTWO_GOBS 0x00000005 +#define NVB197_SET_COLOR_TARGET_MEMORY_LAYOUT 12:12 +#define NVB197_SET_COLOR_TARGET_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVB197_SET_COLOR_TARGET_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVB197_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL 16:16 +#define NVB197_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NVB197_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_DEPTH_SIZE 0x00000001 + +#define NVB197_SET_COLOR_TARGET_THIRD_DIMENSION(j) (0x0818+(j)*64) +#define NVB197_SET_COLOR_TARGET_THIRD_DIMENSION_V 27:0 + +#define NVB197_SET_COLOR_TARGET_ARRAY_PITCH(j) (0x081c+(j)*64) +#define NVB197_SET_COLOR_TARGET_ARRAY_PITCH_V 31:0 + +#define NVB197_SET_COLOR_TARGET_LAYER(j) (0x0820+(j)*64) +#define NVB197_SET_COLOR_TARGET_LAYER_OFFSET 15:0 + +#define NVB197_SET_COLOR_TARGET_MARK(j) (0x0824+(j)*64) +#define NVB197_SET_COLOR_TARGET_MARK_IEEE_CLEAN 0:0 +#define NVB197_SET_COLOR_TARGET_MARK_IEEE_CLEAN_FALSE 0x00000000 +#define NVB197_SET_COLOR_TARGET_MARK_IEEE_CLEAN_TRUE 0x00000001 + +#define NVB197_SET_VIEWPORT_SCALE_X(j) (0x0a00+(j)*32) +#define NVB197_SET_VIEWPORT_SCALE_X_V 31:0 + +#define NVB197_SET_VIEWPORT_SCALE_Y(j) (0x0a04+(j)*32) +#define NVB197_SET_VIEWPORT_SCALE_Y_V 31:0 + +#define NVB197_SET_VIEWPORT_SCALE_Z(j) (0x0a08+(j)*32) +#define NVB197_SET_VIEWPORT_SCALE_Z_V 31:0 + +#define NVB197_SET_VIEWPORT_OFFSET_X(j) (0x0a0c+(j)*32) +#define NVB197_SET_VIEWPORT_OFFSET_X_V 31:0 + +#define NVB197_SET_VIEWPORT_OFFSET_Y(j) (0x0a10+(j)*32) +#define NVB197_SET_VIEWPORT_OFFSET_Y_V 31:0 + +#define NVB197_SET_VIEWPORT_OFFSET_Z(j) (0x0a14+(j)*32) +#define NVB197_SET_VIEWPORT_OFFSET_Z_V 31:0 + +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE(j) (0x0a18+(j)*32) +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_X 2:0 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_X 0x00000000 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_X 0x00000001 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_Y 0x00000002 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_Y 0x00000003 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_Z 0x00000004 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_Z 0x00000005 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_W 0x00000006 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_W 0x00000007 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_Y 6:4 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_X 0x00000000 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_X 0x00000001 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_Y 0x00000002 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_Y 0x00000003 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_Z 0x00000004 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_Z 0x00000005 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_W 0x00000006 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_W 0x00000007 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_Z 10:8 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_X 0x00000000 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_X 0x00000001 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_Y 0x00000002 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_Y 0x00000003 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_Z 0x00000004 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_Z 0x00000005 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_W 0x00000006 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_W 0x00000007 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_W 14:12 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_X 0x00000000 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_X 0x00000001 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_Y 0x00000002 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_Y 0x00000003 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_Z 0x00000004 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_Z 0x00000005 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_W 0x00000006 +#define NVB197_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_W 0x00000007 + +#define NVB197_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION(j) (0x0a1c+(j)*32) +#define NVB197_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION_X_BITS 4:0 +#define NVB197_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION_Y_BITS 12:8 + +#define NVB197_SET_VIEWPORT_CLIP_HORIZONTAL(j) (0x0c00+(j)*16) +#define NVB197_SET_VIEWPORT_CLIP_HORIZONTAL_X0 15:0 +#define NVB197_SET_VIEWPORT_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NVB197_SET_VIEWPORT_CLIP_VERTICAL(j) (0x0c04+(j)*16) +#define NVB197_SET_VIEWPORT_CLIP_VERTICAL_Y0 15:0 +#define NVB197_SET_VIEWPORT_CLIP_VERTICAL_HEIGHT 31:16 + +#define NVB197_SET_VIEWPORT_CLIP_MIN_Z(j) (0x0c08+(j)*16) +#define NVB197_SET_VIEWPORT_CLIP_MIN_Z_V 31:0 + +#define NVB197_SET_VIEWPORT_CLIP_MAX_Z(j) (0x0c0c+(j)*16) +#define NVB197_SET_VIEWPORT_CLIP_MAX_Z_V 31:0 + +#define NVB197_SET_WINDOW_CLIP_HORIZONTAL(j) (0x0d00+(j)*8) +#define NVB197_SET_WINDOW_CLIP_HORIZONTAL_XMIN 15:0 +#define NVB197_SET_WINDOW_CLIP_HORIZONTAL_XMAX 31:16 + +#define NVB197_SET_WINDOW_CLIP_VERTICAL(j) (0x0d04+(j)*8) +#define NVB197_SET_WINDOW_CLIP_VERTICAL_YMIN 15:0 +#define NVB197_SET_WINDOW_CLIP_VERTICAL_YMAX 31:16 + +#define NVB197_SET_CLIP_ID_EXTENT_X(j) (0x0d40+(j)*8) +#define NVB197_SET_CLIP_ID_EXTENT_X_MINX 15:0 +#define NVB197_SET_CLIP_ID_EXTENT_X_WIDTH 31:16 + +#define NVB197_SET_CLIP_ID_EXTENT_Y(j) (0x0d44+(j)*8) +#define NVB197_SET_CLIP_ID_EXTENT_Y_MINY 15:0 +#define NVB197_SET_CLIP_ID_EXTENT_Y_HEIGHT 31:16 + +#define NVB197_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK 0x0d60 +#define NVB197_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK_V 10:0 + +#define NVB197_SET_API_VISIBLE_CALL_LIMIT 0x0d64 +#define NVB197_SET_API_VISIBLE_CALL_LIMIT_V 3:0 +#define NVB197_SET_API_VISIBLE_CALL_LIMIT_V__0 0x00000000 +#define NVB197_SET_API_VISIBLE_CALL_LIMIT_V__1 0x00000001 +#define NVB197_SET_API_VISIBLE_CALL_LIMIT_V__2 0x00000002 +#define NVB197_SET_API_VISIBLE_CALL_LIMIT_V__4 0x00000003 +#define NVB197_SET_API_VISIBLE_CALL_LIMIT_V__8 0x00000004 +#define NVB197_SET_API_VISIBLE_CALL_LIMIT_V__16 0x00000005 +#define NVB197_SET_API_VISIBLE_CALL_LIMIT_V__32 0x00000006 +#define NVB197_SET_API_VISIBLE_CALL_LIMIT_V__64 0x00000007 +#define NVB197_SET_API_VISIBLE_CALL_LIMIT_V__128 0x00000008 +#define NVB197_SET_API_VISIBLE_CALL_LIMIT_V_NO_CHECK 0x0000000F + +#define NVB197_SET_STATISTICS_COUNTER 0x0d68 +#define NVB197_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE 0:0 +#define NVB197_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVB197_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVB197_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE 1:1 +#define NVB197_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVB197_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVB197_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE 2:2 +#define NVB197_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVB197_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVB197_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE 3:3 +#define NVB197_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVB197_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVB197_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE 4:4 +#define NVB197_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVB197_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVB197_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE 5:5 +#define NVB197_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NVB197_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NVB197_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE 6:6 +#define NVB197_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_FALSE 0x00000000 +#define NVB197_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_TRUE 0x00000001 +#define NVB197_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE 7:7 +#define NVB197_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVB197_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVB197_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE 8:8 +#define NVB197_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVB197_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVB197_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE 9:9 +#define NVB197_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVB197_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVB197_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE 11:11 +#define NVB197_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVB197_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVB197_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE 12:12 +#define NVB197_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVB197_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVB197_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE 13:13 +#define NVB197_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVB197_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVB197_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE 14:14 +#define NVB197_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NVB197_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NVB197_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE 10:10 +#define NVB197_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_FALSE 0x00000000 +#define NVB197_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_TRUE 0x00000001 +#define NVB197_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE 15:15 +#define NVB197_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_FALSE 0x00000000 +#define NVB197_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_CLEAR_RECT_HORIZONTAL 0x0d6c +#define NVB197_SET_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NVB197_SET_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NVB197_SET_CLEAR_RECT_VERTICAL 0x0d70 +#define NVB197_SET_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NVB197_SET_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NVB197_SET_VERTEX_ARRAY_START 0x0d74 +#define NVB197_SET_VERTEX_ARRAY_START_V 31:0 + +#define NVB197_DRAW_VERTEX_ARRAY 0x0d78 +#define NVB197_DRAW_VERTEX_ARRAY_COUNT 31:0 + +#define NVB197_SET_VIEWPORT_Z_CLIP 0x0d7c +#define NVB197_SET_VIEWPORT_Z_CLIP_RANGE 0:0 +#define NVB197_SET_VIEWPORT_Z_CLIP_RANGE_NEGATIVE_W_TO_POSITIVE_W 0x00000000 +#define NVB197_SET_VIEWPORT_Z_CLIP_RANGE_ZERO_TO_POSITIVE_W 0x00000001 + +#define NVB197_SET_COLOR_CLEAR_VALUE(i) (0x0d80+(i)*4) +#define NVB197_SET_COLOR_CLEAR_VALUE_V 31:0 + +#define NVB197_SET_Z_CLEAR_VALUE 0x0d90 +#define NVB197_SET_Z_CLEAR_VALUE_V 31:0 + +#define NVB197_SET_SHADER_CACHE_CONTROL 0x0d94 +#define NVB197_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE 0:0 +#define NVB197_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_FALSE 0x00000000 +#define NVB197_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_TRUE 0x00000001 + +#define NVB197_FORCE_TRANSITION_TO_BETA 0x0d98 +#define NVB197_FORCE_TRANSITION_TO_BETA_V 0:0 + +#define NVB197_SET_REDUCE_COLOR_THRESHOLDS_ENABLE 0x0d9c +#define NVB197_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V 0:0 +#define NVB197_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_FALSE 0x00000000 +#define NVB197_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_TRUE 0x00000001 + +#define NVB197_SET_STENCIL_CLEAR_VALUE 0x0da0 +#define NVB197_SET_STENCIL_CLEAR_VALUE_V 7:0 + +#define NVB197_INVALIDATE_SHADER_CACHES_NO_WFI 0x0da4 +#define NVB197_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION 0:0 +#define NVB197_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_FALSE 0x00000000 +#define NVB197_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_TRUE 0x00000001 +#define NVB197_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA 4:4 +#define NVB197_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_FALSE 0x00000000 +#define NVB197_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_TRUE 0x00000001 +#define NVB197_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT 12:12 +#define NVB197_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_FALSE 0x00000000 +#define NVB197_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_TRUE 0x00000001 + +#define NVB197_SET_ZCULL_SERIALIZATION 0x0da8 +#define NVB197_SET_ZCULL_SERIALIZATION_ENABLE 0:0 +#define NVB197_SET_ZCULL_SERIALIZATION_ENABLE_FALSE 0x00000000 +#define NVB197_SET_ZCULL_SERIALIZATION_ENABLE_TRUE 0x00000001 +#define NVB197_SET_ZCULL_SERIALIZATION_APPLIED 5:4 +#define NVB197_SET_ZCULL_SERIALIZATION_APPLIED_ALWAYS 0x00000000 +#define NVB197_SET_ZCULL_SERIALIZATION_APPLIED_LATE_Z 0x00000001 +#define NVB197_SET_ZCULL_SERIALIZATION_APPLIED_OUT_OF_GAMUT_Z 0x00000002 +#define NVB197_SET_ZCULL_SERIALIZATION_APPLIED_LATE_Z_OR_OUT_OF_GAMUT_Z 0x00000003 + +#define NVB197_SET_FRONT_POLYGON_MODE 0x0dac +#define NVB197_SET_FRONT_POLYGON_MODE_V 31:0 +#define NVB197_SET_FRONT_POLYGON_MODE_V_POINT 0x00001B00 +#define NVB197_SET_FRONT_POLYGON_MODE_V_LINE 0x00001B01 +#define NVB197_SET_FRONT_POLYGON_MODE_V_FILL 0x00001B02 + +#define NVB197_SET_BACK_POLYGON_MODE 0x0db0 +#define NVB197_SET_BACK_POLYGON_MODE_V 31:0 +#define NVB197_SET_BACK_POLYGON_MODE_V_POINT 0x00001B00 +#define NVB197_SET_BACK_POLYGON_MODE_V_LINE 0x00001B01 +#define NVB197_SET_BACK_POLYGON_MODE_V_FILL 0x00001B02 + +#define NVB197_SET_POLY_SMOOTH 0x0db4 +#define NVB197_SET_POLY_SMOOTH_ENABLE 0:0 +#define NVB197_SET_POLY_SMOOTH_ENABLE_FALSE 0x00000000 +#define NVB197_SET_POLY_SMOOTH_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_ZT_MARK 0x0db8 +#define NVB197_SET_ZT_MARK_IEEE_CLEAN 0:0 +#define NVB197_SET_ZT_MARK_IEEE_CLEAN_FALSE 0x00000000 +#define NVB197_SET_ZT_MARK_IEEE_CLEAN_TRUE 0x00000001 + +#define NVB197_SET_ZCULL_DIR_FORMAT 0x0dbc +#define NVB197_SET_ZCULL_DIR_FORMAT_ZDIR 15:0 +#define NVB197_SET_ZCULL_DIR_FORMAT_ZDIR_LESS 0x00000000 +#define NVB197_SET_ZCULL_DIR_FORMAT_ZDIR_GREATER 0x00000001 +#define NVB197_SET_ZCULL_DIR_FORMAT_ZFORMAT 31:16 +#define NVB197_SET_ZCULL_DIR_FORMAT_ZFORMAT_MSB 0x00000000 +#define NVB197_SET_ZCULL_DIR_FORMAT_ZFORMAT_FP 0x00000001 +#define NVB197_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZTRICK 0x00000002 +#define NVB197_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZF32_1 0x00000003 + +#define NVB197_SET_POLY_OFFSET_POINT 0x0dc0 +#define NVB197_SET_POLY_OFFSET_POINT_ENABLE 0:0 +#define NVB197_SET_POLY_OFFSET_POINT_ENABLE_FALSE 0x00000000 +#define NVB197_SET_POLY_OFFSET_POINT_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_POLY_OFFSET_LINE 0x0dc4 +#define NVB197_SET_POLY_OFFSET_LINE_ENABLE 0:0 +#define NVB197_SET_POLY_OFFSET_LINE_ENABLE_FALSE 0x00000000 +#define NVB197_SET_POLY_OFFSET_LINE_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_POLY_OFFSET_FILL 0x0dc8 +#define NVB197_SET_POLY_OFFSET_FILL_ENABLE 0:0 +#define NVB197_SET_POLY_OFFSET_FILL_ENABLE_FALSE 0x00000000 +#define NVB197_SET_POLY_OFFSET_FILL_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_PATCH 0x0dcc +#define NVB197_SET_PATCH_SIZE 7:0 + +#define NVB197_SET_ITERATED_BLEND 0x0dd0 +#define NVB197_SET_ITERATED_BLEND_ENABLE 0:0 +#define NVB197_SET_ITERATED_BLEND_ENABLE_FALSE 0x00000000 +#define NVB197_SET_ITERATED_BLEND_ENABLE_TRUE 0x00000001 +#define NVB197_SET_ITERATED_BLEND_ALPHA_ENABLE 1:1 +#define NVB197_SET_ITERATED_BLEND_ALPHA_ENABLE_FALSE 0x00000000 +#define NVB197_SET_ITERATED_BLEND_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_ITERATED_BLEND_PASS 0x0dd4 +#define NVB197_SET_ITERATED_BLEND_PASS_COUNT 7:0 + +#define NVB197_SET_ZCULL_CRITERION 0x0dd8 +#define NVB197_SET_ZCULL_CRITERION_SFUNC 7:0 +#define NVB197_SET_ZCULL_CRITERION_SFUNC_NEVER 0x00000000 +#define NVB197_SET_ZCULL_CRITERION_SFUNC_LESS 0x00000001 +#define NVB197_SET_ZCULL_CRITERION_SFUNC_EQUAL 0x00000002 +#define NVB197_SET_ZCULL_CRITERION_SFUNC_LEQUAL 0x00000003 +#define NVB197_SET_ZCULL_CRITERION_SFUNC_GREATER 0x00000004 +#define NVB197_SET_ZCULL_CRITERION_SFUNC_NOTEQUAL 0x00000005 +#define NVB197_SET_ZCULL_CRITERION_SFUNC_GEQUAL 0x00000006 +#define NVB197_SET_ZCULL_CRITERION_SFUNC_ALWAYS 0x00000007 +#define NVB197_SET_ZCULL_CRITERION_NO_INVALIDATE 8:8 +#define NVB197_SET_ZCULL_CRITERION_NO_INVALIDATE_FALSE 0x00000000 +#define NVB197_SET_ZCULL_CRITERION_NO_INVALIDATE_TRUE 0x00000001 +#define NVB197_SET_ZCULL_CRITERION_FORCE_MATCH 9:9 +#define NVB197_SET_ZCULL_CRITERION_FORCE_MATCH_FALSE 0x00000000 +#define NVB197_SET_ZCULL_CRITERION_FORCE_MATCH_TRUE 0x00000001 +#define NVB197_SET_ZCULL_CRITERION_SREF 23:16 +#define NVB197_SET_ZCULL_CRITERION_SMASK 31:24 + +#define NVB197_PIXEL_SHADER_BARRIER 0x0de0 +#define NVB197_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE 0:0 +#define NVB197_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE_FALSE 0x00000000 +#define NVB197_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_SM_TIMEOUT_INTERVAL 0x0de4 +#define NVB197_SET_SM_TIMEOUT_INTERVAL_COUNTER_BIT 5:0 + +#define NVB197_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY 0x0de8 +#define NVB197_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE 0:0 +#define NVB197_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_FALSE 0x00000000 +#define NVB197_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_TRUE 0x00000001 + +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_POINTER 0x0df0 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_POINTER_V 7:0 + +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION 0x0df4 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC 2:0 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_FALSE 0x00000000 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_TRUE 0x00000001 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_EQ 0x00000002 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_NE 0x00000003 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_LT 0x00000004 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_LE 0x00000005 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_GT 0x00000006 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_GE 0x00000007 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION 5:3 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_ADD_PRODUCTS 0x00000000 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_SUB_PRODUCTS 0x00000001 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_MIN 0x00000002 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_MAX 0x00000003 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_RCP 0x00000004 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_ADD 0x00000005 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_SUBTRACT 0x00000006 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT 8:6 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT0 0x00000000 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT1 0x00000001 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT2 0x00000002 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT3 0x00000003 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT4 0x00000004 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT5 0x00000005 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT6 0x00000006 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT7 0x00000007 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT 11:9 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_SRC_RGB 0x00000000 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_DEST_RGB 0x00000001 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_SRC_AAA 0x00000002 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_DEST_AAA 0x00000003 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP0_RGB 0x00000004 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP1_RGB 0x00000005 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP2_RGB 0x00000006 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_PBR_RGB 0x00000007 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT 15:12 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ZERO 0x00000000 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE 0x00000001 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_SRC_RGB 0x00000002 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_SRC_AAA 0x00000003 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE_MINUS_SRC_AAA 0x00000004 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_DEST_RGB 0x00000005 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_DEST_AAA 0x00000006 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE_MINUS_DEST_AAA 0x00000007 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP0_RGB 0x00000009 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP1_RGB 0x0000000A +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP2_RGB 0x0000000B +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_PBR_RGB 0x0000000C +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_CONSTANT_RGB 0x0000000D +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ZERO_A_TIMES_B 0x0000000E +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT 18:16 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_SRC_RGB 0x00000000 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_DEST_RGB 0x00000001 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_SRC_AAA 0x00000002 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_DEST_AAA 0x00000003 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP0_RGB 0x00000004 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP1_RGB 0x00000005 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP2_RGB 0x00000006 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_PBR_RGB 0x00000007 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT 22:19 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ZERO 0x00000000 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE 0x00000001 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_SRC_RGB 0x00000002 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_SRC_AAA 0x00000003 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE_MINUS_SRC_AAA 0x00000004 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_DEST_RGB 0x00000005 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_DEST_AAA 0x00000006 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE_MINUS_DEST_AAA 0x00000007 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP0_RGB 0x00000009 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP1_RGB 0x0000000A +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP2_RGB 0x0000000B +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_PBR_RGB 0x0000000C +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_CONSTANT_RGB 0x0000000D +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ZERO_C_TIMES_D 0x0000000E +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE 25:23 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_RGB 0x00000000 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_GBR 0x00000001 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_RRR 0x00000002 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_GGG 0x00000003 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_BBB 0x00000004 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_R_TO_A 0x00000005 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK 27:26 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_RGB 0x00000000 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_R_ONLY 0x00000001 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_G_ONLY 0x00000002 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_B_ONLY 0x00000003 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT 29:28 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP0 0x00000000 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP1 0x00000001 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP2 0x00000002 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_NONE 0x00000003 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC 31:31 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC_FALSE 0x00000000 +#define NVB197_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC_TRUE 0x00000001 + +#define NVB197_SET_WINDOW_OFFSET_X 0x0df8 +#define NVB197_SET_WINDOW_OFFSET_X_V 16:0 + +#define NVB197_SET_WINDOW_OFFSET_Y 0x0dfc +#define NVB197_SET_WINDOW_OFFSET_Y_V 17:0 + +#define NVB197_SET_SCISSOR_ENABLE(j) (0x0e00+(j)*16) +#define NVB197_SET_SCISSOR_ENABLE_V 0:0 +#define NVB197_SET_SCISSOR_ENABLE_V_FALSE 0x00000000 +#define NVB197_SET_SCISSOR_ENABLE_V_TRUE 0x00000001 + +#define NVB197_SET_SCISSOR_HORIZONTAL(j) (0x0e04+(j)*16) +#define NVB197_SET_SCISSOR_HORIZONTAL_XMIN 15:0 +#define NVB197_SET_SCISSOR_HORIZONTAL_XMAX 31:16 + +#define NVB197_SET_SCISSOR_VERTICAL(j) (0x0e08+(j)*16) +#define NVB197_SET_SCISSOR_VERTICAL_YMIN 15:0 +#define NVB197_SET_SCISSOR_VERTICAL_YMAX 31:16 + +#define NVB197_SET_SELECT_MAXWELL_TEXTURE_HEADERS 0x0f10 +#define NVB197_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V 0:0 +#define NVB197_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V_FALSE 0x00000000 +#define NVB197_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V_TRUE 0x00000001 + +#define NVB197_SET_VPC_PERF_KNOB 0x0f14 +#define NVB197_SET_VPC_PERF_KNOB_CULLED_SMALL_LINES 7:0 +#define NVB197_SET_VPC_PERF_KNOB_CULLED_SMALL_TRIANGLES 15:8 +#define NVB197_SET_VPC_PERF_KNOB_NONCULLED_LINES_AND_POINTS 23:16 +#define NVB197_SET_VPC_PERF_KNOB_NONCULLED_TRIANGLES 31:24 + +#define NVB197_PM_LOCAL_TRIGGER 0x0f18 +#define NVB197_PM_LOCAL_TRIGGER_BOOKMARK 15:0 + +#define NVB197_SET_POST_Z_PS_IMASK 0x0f1c +#define NVB197_SET_POST_Z_PS_IMASK_ENABLE 0:0 +#define NVB197_SET_POST_Z_PS_IMASK_ENABLE_FALSE 0x00000000 +#define NVB197_SET_POST_Z_PS_IMASK_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_CONSTANT_COLOR_RENDERING 0x0f40 +#define NVB197_SET_CONSTANT_COLOR_RENDERING_ENABLE 0:0 +#define NVB197_SET_CONSTANT_COLOR_RENDERING_ENABLE_FALSE 0x00000000 +#define NVB197_SET_CONSTANT_COLOR_RENDERING_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_CONSTANT_COLOR_RENDERING_RED 0x0f44 +#define NVB197_SET_CONSTANT_COLOR_RENDERING_RED_V 31:0 + +#define NVB197_SET_CONSTANT_COLOR_RENDERING_GREEN 0x0f48 +#define NVB197_SET_CONSTANT_COLOR_RENDERING_GREEN_V 31:0 + +#define NVB197_SET_CONSTANT_COLOR_RENDERING_BLUE 0x0f4c +#define NVB197_SET_CONSTANT_COLOR_RENDERING_BLUE_V 31:0 + +#define NVB197_SET_CONSTANT_COLOR_RENDERING_ALPHA 0x0f50 +#define NVB197_SET_CONSTANT_COLOR_RENDERING_ALPHA_V 31:0 + +#define NVB197_SET_BACK_STENCIL_FUNC_REF 0x0f54 +#define NVB197_SET_BACK_STENCIL_FUNC_REF_V 7:0 + +#define NVB197_SET_BACK_STENCIL_MASK 0x0f58 +#define NVB197_SET_BACK_STENCIL_MASK_V 7:0 + +#define NVB197_SET_BACK_STENCIL_FUNC_MASK 0x0f5c +#define NVB197_SET_BACK_STENCIL_FUNC_MASK_V 7:0 + +#define NVB197_SET_VERTEX_STREAM_SUBSTITUTE_A 0x0f84 +#define NVB197_SET_VERTEX_STREAM_SUBSTITUTE_A_ADDRESS_UPPER 7:0 + +#define NVB197_SET_VERTEX_STREAM_SUBSTITUTE_B 0x0f88 +#define NVB197_SET_VERTEX_STREAM_SUBSTITUTE_B_ADDRESS_LOWER 31:0 + +#define NVB197_SET_LINE_MODE_POLYGON_CLIP 0x0f8c +#define NVB197_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE 0:0 +#define NVB197_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DRAW_LINE 0x00000000 +#define NVB197_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DO_NOT_DRAW_LINE 0x00000001 + +#define NVB197_SET_SINGLE_CT_WRITE_CONTROL 0x0f90 +#define NVB197_SET_SINGLE_CT_WRITE_CONTROL_ENABLE 0:0 +#define NVB197_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_FALSE 0x00000000 +#define NVB197_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_VTG_WARP_WATERMARKS 0x0f98 +#define NVB197_SET_VTG_WARP_WATERMARKS_LOW 15:0 +#define NVB197_SET_VTG_WARP_WATERMARKS_HIGH 31:16 + +#define NVB197_SET_DEPTH_BOUNDS_MIN 0x0f9c +#define NVB197_SET_DEPTH_BOUNDS_MIN_V 31:0 + +#define NVB197_SET_DEPTH_BOUNDS_MAX 0x0fa0 +#define NVB197_SET_DEPTH_BOUNDS_MAX_V 31:0 + +#define NVB197_SET_SAMPLE_MASK 0x0fa4 +#define NVB197_SET_SAMPLE_MASK_RASTER_OUT_ENABLE 0:0 +#define NVB197_SET_SAMPLE_MASK_RASTER_OUT_ENABLE_FALSE 0x00000000 +#define NVB197_SET_SAMPLE_MASK_RASTER_OUT_ENABLE_TRUE 0x00000001 +#define NVB197_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE 4:4 +#define NVB197_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE_FALSE 0x00000000 +#define NVB197_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_COLOR_TARGET_SAMPLE_MASK 0x0fa8 +#define NVB197_SET_COLOR_TARGET_SAMPLE_MASK_V 15:0 + +#define NVB197_SET_CT_MRT_ENABLE 0x0fac +#define NVB197_SET_CT_MRT_ENABLE_V 0:0 +#define NVB197_SET_CT_MRT_ENABLE_V_FALSE 0x00000000 +#define NVB197_SET_CT_MRT_ENABLE_V_TRUE 0x00000001 + +#define NVB197_SET_NONMULTISAMPLED_Z 0x0fb0 +#define NVB197_SET_NONMULTISAMPLED_Z_V 0:0 +#define NVB197_SET_NONMULTISAMPLED_Z_V_PER_SAMPLE 0x00000000 +#define NVB197_SET_NONMULTISAMPLED_Z_V_AT_PIXEL_CENTER 0x00000001 + +#define NVB197_SET_TIR 0x0fb4 +#define NVB197_SET_TIR_MODE 1:0 +#define NVB197_SET_TIR_MODE_DISABLED 0x00000000 +#define NVB197_SET_TIR_MODE_RASTER_N_TARGET_M 0x00000001 + +#define NVB197_SET_ANTI_ALIAS_RASTER 0x0fb8 +#define NVB197_SET_ANTI_ALIAS_RASTER_SAMPLES 2:0 +#define NVB197_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_1X1 0x00000000 +#define NVB197_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_2X2 0x00000002 +#define NVB197_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_4X2_D3D 0x00000004 +#define NVB197_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_2X1_D3D 0x00000005 +#define NVB197_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_4X4 0x00000006 + +#define NVB197_SET_SAMPLE_MASK_X0_Y0 0x0fbc +#define NVB197_SET_SAMPLE_MASK_X0_Y0_V 15:0 + +#define NVB197_SET_SAMPLE_MASK_X1_Y0 0x0fc0 +#define NVB197_SET_SAMPLE_MASK_X1_Y0_V 15:0 + +#define NVB197_SET_SAMPLE_MASK_X0_Y1 0x0fc4 +#define NVB197_SET_SAMPLE_MASK_X0_Y1_V 15:0 + +#define NVB197_SET_SAMPLE_MASK_X1_Y1 0x0fc8 +#define NVB197_SET_SAMPLE_MASK_X1_Y1_V 15:0 + +#define NVB197_SET_SURFACE_CLIP_ID_MEMORY_A 0x0fcc +#define NVB197_SET_SURFACE_CLIP_ID_MEMORY_A_OFFSET_UPPER 7:0 + +#define NVB197_SET_SURFACE_CLIP_ID_MEMORY_B 0x0fd0 +#define NVB197_SET_SURFACE_CLIP_ID_MEMORY_B_OFFSET_LOWER 31:0 + +#define NVB197_SET_TIR_MODULATION 0x0fd4 +#define NVB197_SET_TIR_MODULATION_COMPONENT_SELECT 1:0 +#define NVB197_SET_TIR_MODULATION_COMPONENT_SELECT_NO_MODULATION 0x00000000 +#define NVB197_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_RGB 0x00000001 +#define NVB197_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_ALPHA_ONLY 0x00000002 +#define NVB197_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_RGBA 0x00000003 + +#define NVB197_SET_TIR_MODULATION_FUNCTION 0x0fd8 +#define NVB197_SET_TIR_MODULATION_FUNCTION_SELECT 0:0 +#define NVB197_SET_TIR_MODULATION_FUNCTION_SELECT_LINEAR 0x00000000 +#define NVB197_SET_TIR_MODULATION_FUNCTION_SELECT_TABLE 0x00000001 + +#define NVB197_SET_BLEND_OPT_CONTROL 0x0fdc +#define NVB197_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS 0:0 +#define NVB197_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_FALSE 0x00000000 +#define NVB197_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_TRUE 0x00000001 + +#define NVB197_SET_ZT_A 0x0fe0 +#define NVB197_SET_ZT_A_OFFSET_UPPER 7:0 + +#define NVB197_SET_ZT_B 0x0fe4 +#define NVB197_SET_ZT_B_OFFSET_LOWER 31:0 + +#define NVB197_SET_ZT_FORMAT 0x0fe8 +#define NVB197_SET_ZT_FORMAT_V 4:0 +#define NVB197_SET_ZT_FORMAT_V_Z16 0x00000013 +#define NVB197_SET_ZT_FORMAT_V_Z24S8 0x00000014 +#define NVB197_SET_ZT_FORMAT_V_X8Z24 0x00000015 +#define NVB197_SET_ZT_FORMAT_V_S8Z24 0x00000016 +#define NVB197_SET_ZT_FORMAT_V_S8 0x00000017 +#define NVB197_SET_ZT_FORMAT_V_V8Z24 0x00000018 +#define NVB197_SET_ZT_FORMAT_V_ZF32 0x0000000A +#define NVB197_SET_ZT_FORMAT_V_ZF32_X24S8 0x00000019 +#define NVB197_SET_ZT_FORMAT_V_X8Z24_X16V8S8 0x0000001D +#define NVB197_SET_ZT_FORMAT_V_ZF32_X16V8X8 0x0000001E +#define NVB197_SET_ZT_FORMAT_V_ZF32_X16V8S8 0x0000001F + +#define NVB197_SET_ZT_BLOCK_SIZE 0x0fec +#define NVB197_SET_ZT_BLOCK_SIZE_WIDTH 3:0 +#define NVB197_SET_ZT_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVB197_SET_ZT_BLOCK_SIZE_HEIGHT 7:4 +#define NVB197_SET_ZT_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVB197_SET_ZT_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVB197_SET_ZT_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVB197_SET_ZT_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVB197_SET_ZT_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVB197_SET_ZT_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVB197_SET_ZT_BLOCK_SIZE_DEPTH 11:8 +#define NVB197_SET_ZT_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NVB197_SET_ZT_ARRAY_PITCH 0x0ff0 +#define NVB197_SET_ZT_ARRAY_PITCH_V 31:0 + +#define NVB197_SET_SURFACE_CLIP_HORIZONTAL 0x0ff4 +#define NVB197_SET_SURFACE_CLIP_HORIZONTAL_X 15:0 +#define NVB197_SET_SURFACE_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NVB197_SET_SURFACE_CLIP_VERTICAL 0x0ff8 +#define NVB197_SET_SURFACE_CLIP_VERTICAL_Y 15:0 +#define NVB197_SET_SURFACE_CLIP_VERTICAL_HEIGHT 31:16 + +#define NVB197_SET_TILED_CACHE_BUNDLE_CONTROL 0x0ffc +#define NVB197_SET_TILED_CACHE_BUNDLE_CONTROL_TREAT_HEAVYWEIGHT_AS_LIGHTWEIGHT 0:0 +#define NVB197_SET_TILED_CACHE_BUNDLE_CONTROL_TREAT_HEAVYWEIGHT_AS_LIGHTWEIGHT_FALSE 0x00000000 +#define NVB197_SET_TILED_CACHE_BUNDLE_CONTROL_TREAT_HEAVYWEIGHT_AS_LIGHTWEIGHT_TRUE 0x00000001 + +#define NVB197_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS 0x1000 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE 0:0 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_FALSE 0x00000000 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_TRUE 0x00000001 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY 5:4 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVB197_SET_VIEWPORT_MULTICAST 0x1004 +#define NVB197_SET_VIEWPORT_MULTICAST_ORDER 0:0 +#define NVB197_SET_VIEWPORT_MULTICAST_ORDER_VIEWPORT_ORDER 0x00000000 +#define NVB197_SET_VIEWPORT_MULTICAST_ORDER_PRIMITIVE_ORDER 0x00000001 + +#define NVB197_SET_TESSELLATION_CUT_HEIGHT 0x1008 +#define NVB197_SET_TESSELLATION_CUT_HEIGHT_V 4:0 + +#define NVB197_SET_MAX_GS_INSTANCES_PER_TASK 0x100c +#define NVB197_SET_MAX_GS_INSTANCES_PER_TASK_V 10:0 + +#define NVB197_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK 0x1010 +#define NVB197_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK_V 15:0 + +#define NVB197_SET_RESERVED_SW_METHOD00 0x1014 +#define NVB197_SET_RESERVED_SW_METHOD00_V 31:0 + +#define NVB197_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER 0x1018 +#define NVB197_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NVB197_SET_BETA_CB_STORAGE_CONSTRAINT 0x101c +#define NVB197_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NVB197_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NVB197_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER 0x1020 +#define NVB197_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NVB197_SET_ALPHA_CB_STORAGE_CONSTRAINT 0x1024 +#define NVB197_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NVB197_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NVB197_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_RESERVED_SW_METHOD01 0x1028 +#define NVB197_SET_RESERVED_SW_METHOD01_V 31:0 + +#define NVB197_SET_RESERVED_SW_METHOD02 0x102c +#define NVB197_SET_RESERVED_SW_METHOD02_V 31:0 + +#define NVB197_SET_TIR_MODULATION_COEFFICIENT_TABLE(i) (0x1030+(i)*4) +#define NVB197_SET_TIR_MODULATION_COEFFICIENT_TABLE_V0 7:0 +#define NVB197_SET_TIR_MODULATION_COEFFICIENT_TABLE_V1 15:8 +#define NVB197_SET_TIR_MODULATION_COEFFICIENT_TABLE_V2 23:16 +#define NVB197_SET_TIR_MODULATION_COEFFICIENT_TABLE_V3 31:24 + +#define NVB197_SET_SPARE_NOOP01 0x1044 +#define NVB197_SET_SPARE_NOOP01_V 31:0 + +#define NVB197_SET_SPARE_NOOP02 0x1048 +#define NVB197_SET_SPARE_NOOP02_V 31:0 + +#define NVB197_SET_SPARE_NOOP03 0x104c +#define NVB197_SET_SPARE_NOOP03_V 31:0 + +#define NVB197_SET_SPARE_NOOP04 0x1050 +#define NVB197_SET_SPARE_NOOP04_V 31:0 + +#define NVB197_SET_SPARE_NOOP05 0x1054 +#define NVB197_SET_SPARE_NOOP05_V 31:0 + +#define NVB197_SET_SPARE_NOOP06 0x1058 +#define NVB197_SET_SPARE_NOOP06_V 31:0 + +#define NVB197_SET_SPARE_NOOP07 0x105c +#define NVB197_SET_SPARE_NOOP07_V 31:0 + +#define NVB197_SET_SPARE_NOOP08 0x1060 +#define NVB197_SET_SPARE_NOOP08_V 31:0 + +#define NVB197_SET_SPARE_NOOP09 0x1064 +#define NVB197_SET_SPARE_NOOP09_V 31:0 + +#define NVB197_SET_SPARE_NOOP10 0x1068 +#define NVB197_SET_SPARE_NOOP10_V 31:0 + +#define NVB197_SET_SPARE_NOOP11 0x106c +#define NVB197_SET_SPARE_NOOP11_V 31:0 + +#define NVB197_SET_SPARE_NOOP12 0x1070 +#define NVB197_SET_SPARE_NOOP12_V 31:0 + +#define NVB197_SET_SPARE_NOOP13 0x1074 +#define NVB197_SET_SPARE_NOOP13_V 31:0 + +#define NVB197_SET_SPARE_NOOP14 0x1078 +#define NVB197_SET_SPARE_NOOP14_V 31:0 + +#define NVB197_SET_SPARE_NOOP15 0x107c +#define NVB197_SET_SPARE_NOOP15_V 31:0 + +#define NVB197_SET_RESERVED_SW_METHOD03 0x10b0 +#define NVB197_SET_RESERVED_SW_METHOD03_V 31:0 + +#define NVB197_SET_RESERVED_SW_METHOD04 0x10b4 +#define NVB197_SET_RESERVED_SW_METHOD04_V 31:0 + +#define NVB197_SET_RESERVED_SW_METHOD05 0x10b8 +#define NVB197_SET_RESERVED_SW_METHOD05_V 31:0 + +#define NVB197_SET_RESERVED_SW_METHOD06 0x10bc +#define NVB197_SET_RESERVED_SW_METHOD06_V 31:0 + +#define NVB197_SET_RESERVED_SW_METHOD07 0x10c0 +#define NVB197_SET_RESERVED_SW_METHOD07_V 31:0 + +#define NVB197_SET_RESERVED_SW_METHOD08 0x10c4 +#define NVB197_SET_RESERVED_SW_METHOD08_V 31:0 + +#define NVB197_SET_RESERVED_SW_METHOD09 0x10c8 +#define NVB197_SET_RESERVED_SW_METHOD09_V 31:0 + +#define NVB197_SET_REDUCE_COLOR_THRESHOLDS_UNORM8 0x10cc +#define NVB197_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVB197_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED 23:16 + +#define NVB197_SET_RESERVED_SW_METHOD10 0x10d0 +#define NVB197_SET_RESERVED_SW_METHOD10_V 31:0 + +#define NVB197_SET_RESERVED_SW_METHOD11 0x10d4 +#define NVB197_SET_RESERVED_SW_METHOD11_V 31:0 + +#define NVB197_SET_RESERVED_SW_METHOD12 0x10d8 +#define NVB197_SET_RESERVED_SW_METHOD12_V 31:0 + +#define NVB197_SET_RESERVED_SW_METHOD13 0x10dc +#define NVB197_SET_RESERVED_SW_METHOD13_V 31:0 + +#define NVB197_SET_REDUCE_COLOR_THRESHOLDS_UNORM10 0x10e0 +#define NVB197_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVB197_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED 23:16 + +#define NVB197_SET_REDUCE_COLOR_THRESHOLDS_UNORM16 0x10e4 +#define NVB197_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVB197_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED 23:16 + +#define NVB197_SET_REDUCE_COLOR_THRESHOLDS_FP11 0x10e8 +#define NVB197_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED_ALL_HIT_ONCE 5:0 +#define NVB197_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED 21:16 + +#define NVB197_SET_REDUCE_COLOR_THRESHOLDS_FP16 0x10ec +#define NVB197_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVB197_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED 23:16 + +#define NVB197_SET_REDUCE_COLOR_THRESHOLDS_SRGB8 0x10f0 +#define NVB197_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVB197_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED 23:16 + +#define NVB197_UNBIND_ALL 0x10f4 +#define NVB197_UNBIND_ALL_CONSTANT_BUFFERS 8:8 +#define NVB197_UNBIND_ALL_CONSTANT_BUFFERS_FALSE 0x00000000 +#define NVB197_UNBIND_ALL_CONSTANT_BUFFERS_TRUE 0x00000001 + +#define NVB197_SET_CLEAR_SURFACE_CONTROL 0x10f8 +#define NVB197_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK 0:0 +#define NVB197_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_FALSE 0x00000000 +#define NVB197_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_TRUE 0x00000001 +#define NVB197_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT 4:4 +#define NVB197_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_FALSE 0x00000000 +#define NVB197_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_TRUE 0x00000001 +#define NVB197_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0 8:8 +#define NVB197_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_FALSE 0x00000000 +#define NVB197_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_TRUE 0x00000001 +#define NVB197_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0 12:12 +#define NVB197_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_FALSE 0x00000000 +#define NVB197_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_TRUE 0x00000001 + +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS 0x10fc +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVB197_SET_RESERVED_SW_METHOD14 0x1100 +#define NVB197_SET_RESERVED_SW_METHOD14_V 31:0 + +#define NVB197_SET_RESERVED_SW_METHOD15 0x1104 +#define NVB197_SET_RESERVED_SW_METHOD15_V 31:0 + +#define NVB197_NO_OPERATION_DATA_HI 0x110c +#define NVB197_NO_OPERATION_DATA_HI_V 31:0 + +#define NVB197_SET_DEPTH_BIAS_CONTROL 0x1110 +#define NVB197_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT 0:0 +#define NVB197_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_FALSE 0x00000000 +#define NVB197_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_TRUE 0x00000001 + +#define NVB197_PM_TRIGGER_END 0x1114 +#define NVB197_PM_TRIGGER_END_V 31:0 + +#define NVB197_SET_VERTEX_ID_BASE 0x1118 +#define NVB197_SET_VERTEX_ID_BASE_V 31:0 + +#define NVB197_SET_STENCIL_COMPRESSION 0x111c +#define NVB197_SET_STENCIL_COMPRESSION_ENABLE 0:0 +#define NVB197_SET_STENCIL_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVB197_SET_STENCIL_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A(i) (0x1120+(i)*4) +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0 0:0 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1 1:1 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2 2:2 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3 3:3 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0 4:4 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1 5:5 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2 6:6 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3 7:7 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0 8:8 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1 9:9 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2 10:10 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3 11:11 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0 12:12 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1 13:13 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2 14:14 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3 15:15 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0 16:16 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1 17:17 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2 18:18 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3 19:19 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0 20:20 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1 21:21 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2 22:22 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3 23:23 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0 24:24 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1 25:25 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2 26:26 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3 27:27 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0 28:28 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1 29:29 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2 30:30 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3 31:31 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B(i) (0x1128+(i)*4) +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0 0:0 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1 1:1 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2 2:2 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3 3:3 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0 4:4 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1 5:5 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2 6:6 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3 7:7 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0 8:8 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1 9:9 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2 10:10 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3 11:11 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0 12:12 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1 13:13 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2 14:14 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3 15:15 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0 16:16 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1 17:17 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2 18:18 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3 19:19 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0 20:20 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1 21:21 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2 22:22 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3 23:23 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0 24:24 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1 25:25 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2 26:26 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3 27:27 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0 28:28 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1 29:29 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2 30:30 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3 31:31 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NVB197_SET_TIR_CONTROL 0x1130 +#define NVB197_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES 0:0 +#define NVB197_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES_DISABLE 0x00000000 +#define NVB197_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES_ENABLE 0x00000001 +#define NVB197_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES 4:4 +#define NVB197_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES_DISABLE 0x00000000 +#define NVB197_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES_ENABLE 0x00000001 +#define NVB197_SET_TIR_CONTROL_REDUCE_COVERAGE 1:1 +#define NVB197_SET_TIR_CONTROL_REDUCE_COVERAGE_DISABLE 0x00000000 +#define NVB197_SET_TIR_CONTROL_REDUCE_COVERAGE_ENABLE 0x00000001 + +#define NVB197_SET_MUTABLE_METHOD_CONTROL 0x1134 +#define NVB197_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT 0:0 +#define NVB197_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT_FALSE 0x00000000 +#define NVB197_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT_TRUE 0x00000001 + +#define NVB197_SET_POST_PS_INITIAL_COVERAGE 0x1138 +#define NVB197_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE 0:0 +#define NVB197_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE_FALSE 0x00000000 +#define NVB197_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE_TRUE 0x00000001 + +#define NVB197_SET_FILL_VIA_TRIANGLE 0x113c +#define NVB197_SET_FILL_VIA_TRIANGLE_MODE 1:0 +#define NVB197_SET_FILL_VIA_TRIANGLE_MODE_DISABLED 0x00000000 +#define NVB197_SET_FILL_VIA_TRIANGLE_MODE_FILL_ALL 0x00000001 +#define NVB197_SET_FILL_VIA_TRIANGLE_MODE_FILL_BBOX 0x00000002 + +#define NVB197_SET_BLEND_PER_FORMAT_ENABLE 0x1140 +#define NVB197_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16 4:4 +#define NVB197_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_FALSE 0x00000000 +#define NVB197_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_TRUE 0x00000001 + +#define NVB197_FLUSH_PENDING_WRITES 0x1144 +#define NVB197_FLUSH_PENDING_WRITES_SM_DOES_GLOBAL_STORE 0:0 + +#define NVB197_SET_VERTEX_ATTRIBUTE_A(i) (0x1160+(i)*4) +#define NVB197_SET_VERTEX_ATTRIBUTE_A_STREAM 4:0 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_SOURCE 6:6 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_SOURCE_ACTIVE 0x00000000 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_SOURCE_INACTIVE 0x00000001 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_OFFSET 20:7 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS 26:21 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NVB197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NVB197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NVB197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NVB197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NVB197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE 29:27 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B 31:31 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_FALSE 0x00000000 +#define NVB197_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_TRUE 0x00000001 + +#define NVB197_SET_VERTEX_ATTRIBUTE_B(i) (0x11a0+(i)*4) +#define NVB197_SET_VERTEX_ATTRIBUTE_B_STREAM 4:0 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_SOURCE 6:6 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_SOURCE_ACTIVE 0x00000000 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_SOURCE_INACTIVE 0x00000001 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_OFFSET 20:7 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS 26:21 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NVB197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NVB197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NVB197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NVB197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NVB197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE 29:27 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B 31:31 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_FALSE 0x00000000 +#define NVB197_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_TRUE 0x00000001 + +#define NVB197_SET_ANTI_ALIAS_SAMPLE_POSITIONS(i) (0x11e0+(i)*4) +#define NVB197_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X0 3:0 +#define NVB197_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y0 7:4 +#define NVB197_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X1 11:8 +#define NVB197_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y1 15:12 +#define NVB197_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X2 19:16 +#define NVB197_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y2 23:20 +#define NVB197_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X3 27:24 +#define NVB197_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y3 31:28 + +#define NVB197_SET_OFFSET_RENDER_TARGET_INDEX 0x11f0 +#define NVB197_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX 0:0 +#define NVB197_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX_FALSE 0x00000000 +#define NVB197_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX_TRUE 0x00000001 + +#define NVB197_FORCE_HEAVYWEIGHT_METHOD_SYNC 0x11f4 +#define NVB197_FORCE_HEAVYWEIGHT_METHOD_SYNC_V 31:0 + +#define NVB197_SET_COVERAGE_TO_COLOR 0x11f8 +#define NVB197_SET_COVERAGE_TO_COLOR_ENABLE 0:0 +#define NVB197_SET_COVERAGE_TO_COLOR_ENABLE_FALSE 0x00000000 +#define NVB197_SET_COVERAGE_TO_COLOR_ENABLE_TRUE 0x00000001 +#define NVB197_SET_COVERAGE_TO_COLOR_CT_SELECT 6:4 + +#define NVB197_DECOMPRESS_ZETA_SURFACE 0x11fc +#define NVB197_DECOMPRESS_ZETA_SURFACE_Z_ENABLE 0:0 +#define NVB197_DECOMPRESS_ZETA_SURFACE_Z_ENABLE_FALSE 0x00000000 +#define NVB197_DECOMPRESS_ZETA_SURFACE_Z_ENABLE_TRUE 0x00000001 +#define NVB197_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE 4:4 +#define NVB197_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE_FALSE 0x00000000 +#define NVB197_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_ZT_SPARSE 0x1208 +#define NVB197_SET_ZT_SPARSE_ENABLE 0:0 +#define NVB197_SET_ZT_SPARSE_ENABLE_FALSE 0x00000000 +#define NVB197_SET_ZT_SPARSE_ENABLE_TRUE 0x00000001 +#define NVB197_SET_ZT_SPARSE_UNMAPPED_COMPARE 1:1 +#define NVB197_SET_ZT_SPARSE_UNMAPPED_COMPARE_ZT_SPARSE_UNMAPPED_0 0x00000000 +#define NVB197_SET_ZT_SPARSE_UNMAPPED_COMPARE_ZT_SPARSE_FAIL_ALWAYS 0x00000001 + +#define NVB197_INVALIDATE_SAMPLER_CACHE_ALL 0x120c +#define NVB197_INVALIDATE_SAMPLER_CACHE_ALL_V 0:0 + +#define NVB197_INVALIDATE_TEXTURE_HEADER_CACHE_ALL 0x1210 +#define NVB197_INVALIDATE_TEXTURE_HEADER_CACHE_ALL_V 0:0 + +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST 0x1214 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_START_INDEX 15:0 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT 0x1218 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_START_INDEX 15:0 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVB197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVB197_SET_CT_SELECT 0x121c +#define NVB197_SET_CT_SELECT_TARGET_COUNT 3:0 +#define NVB197_SET_CT_SELECT_TARGET0 6:4 +#define NVB197_SET_CT_SELECT_TARGET1 9:7 +#define NVB197_SET_CT_SELECT_TARGET2 12:10 +#define NVB197_SET_CT_SELECT_TARGET3 15:13 +#define NVB197_SET_CT_SELECT_TARGET4 18:16 +#define NVB197_SET_CT_SELECT_TARGET5 21:19 +#define NVB197_SET_CT_SELECT_TARGET6 24:22 +#define NVB197_SET_CT_SELECT_TARGET7 27:25 + +#define NVB197_SET_COMPRESSION_THRESHOLD 0x1220 +#define NVB197_SET_COMPRESSION_THRESHOLD_SAMPLES 3:0 +#define NVB197_SET_COMPRESSION_THRESHOLD_SAMPLES__0 0x00000000 +#define NVB197_SET_COMPRESSION_THRESHOLD_SAMPLES__1 0x00000001 +#define NVB197_SET_COMPRESSION_THRESHOLD_SAMPLES__2 0x00000002 +#define NVB197_SET_COMPRESSION_THRESHOLD_SAMPLES__4 0x00000003 +#define NVB197_SET_COMPRESSION_THRESHOLD_SAMPLES__8 0x00000004 +#define NVB197_SET_COMPRESSION_THRESHOLD_SAMPLES__16 0x00000005 +#define NVB197_SET_COMPRESSION_THRESHOLD_SAMPLES__32 0x00000006 +#define NVB197_SET_COMPRESSION_THRESHOLD_SAMPLES__64 0x00000007 +#define NVB197_SET_COMPRESSION_THRESHOLD_SAMPLES__128 0x00000008 +#define NVB197_SET_COMPRESSION_THRESHOLD_SAMPLES__256 0x00000009 +#define NVB197_SET_COMPRESSION_THRESHOLD_SAMPLES__512 0x0000000A +#define NVB197_SET_COMPRESSION_THRESHOLD_SAMPLES__1024 0x0000000B +#define NVB197_SET_COMPRESSION_THRESHOLD_SAMPLES__2048 0x0000000C + +#define NVB197_SET_PIXEL_SHADER_INTERLOCK_CONTROL 0x1224 +#define NVB197_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE 1:0 +#define NVB197_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_NO_CONFLICT_DETECT 0x00000000 +#define NVB197_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_CONFLICT_DETECT_SAMPLE 0x00000001 +#define NVB197_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_CONFLICT_DETECT_PIXEL 0x00000002 +#define NVB197_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE 2:2 +#define NVB197_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE_TC_TILE_SIZE_16X16 0x00000000 +#define NVB197_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE_TC_TILE_SIZE_8X8 0x00000001 +#define NVB197_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER 3:3 +#define NVB197_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER_TC_FRAGMENT_ORDERED 0x00000000 +#define NVB197_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER_TC_FRAGMENT_UNORDERED 0x00000001 + +#define NVB197_SET_ZT_SIZE_A 0x1228 +#define NVB197_SET_ZT_SIZE_A_WIDTH 27:0 + +#define NVB197_SET_ZT_SIZE_B 0x122c +#define NVB197_SET_ZT_SIZE_B_HEIGHT 16:0 + +#define NVB197_SET_ZT_SIZE_C 0x1230 +#define NVB197_SET_ZT_SIZE_C_THIRD_DIMENSION 15:0 +#define NVB197_SET_ZT_SIZE_C_CONTROL 16:16 +#define NVB197_SET_ZT_SIZE_C_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NVB197_SET_ZT_SIZE_C_CONTROL_ARRAY_SIZE_IS_ONE 0x00000001 + +#define NVB197_SET_SAMPLER_BINDING 0x1234 +#define NVB197_SET_SAMPLER_BINDING_V 0:0 +#define NVB197_SET_SAMPLER_BINDING_V_INDEPENDENTLY 0x00000000 +#define NVB197_SET_SAMPLER_BINDING_V_VIA_HEADER_BINDING 0x00000001 + +#define NVB197_DRAW_AUTO 0x123c +#define NVB197_DRAW_AUTO_BYTE_COUNT 31:0 + +#define NVB197_SET_POST_VTG_SHADER_ATTRIBUTE_SKIP_MASK(i) (0x1240+(i)*4) +#define NVB197_SET_POST_VTG_SHADER_ATTRIBUTE_SKIP_MASK_V 31:0 + +#define NVB197_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE 0x1260 +#define NVB197_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE_TICKET_DISPENSER_INDEX 7:0 +#define NVB197_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE_TICKET_DISPENSER_VALUE 23:8 + +#define NVB197_SET_CIRCULAR_BUFFER_SIZE 0x1280 +#define NVB197_SET_CIRCULAR_BUFFER_SIZE_CACHE_LINES_PER_SM 13:0 + +#define NVB197_SET_VTG_REGISTER_WATERMARKS 0x1284 +#define NVB197_SET_VTG_REGISTER_WATERMARKS_LOW 15:0 +#define NVB197_SET_VTG_REGISTER_WATERMARKS_HIGH 31:16 + +#define NVB197_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI 0x1288 +#define NVB197_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES 0:0 +#define NVB197_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVB197_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVB197_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_TAG 25:4 + +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS 0x1290 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVB197_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE 0x12a4 +#define NVB197_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE_V 31:0 + +#define NVB197_CLEAR_ZCULL_REGION 0x12c8 +#define NVB197_CLEAR_ZCULL_REGION_Z_ENABLE 0:0 +#define NVB197_CLEAR_ZCULL_REGION_Z_ENABLE_FALSE 0x00000000 +#define NVB197_CLEAR_ZCULL_REGION_Z_ENABLE_TRUE 0x00000001 +#define NVB197_CLEAR_ZCULL_REGION_STENCIL_ENABLE 4:4 +#define NVB197_CLEAR_ZCULL_REGION_STENCIL_ENABLE_FALSE 0x00000000 +#define NVB197_CLEAR_ZCULL_REGION_STENCIL_ENABLE_TRUE 0x00000001 +#define NVB197_CLEAR_ZCULL_REGION_USE_CLEAR_RECT 1:1 +#define NVB197_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_FALSE 0x00000000 +#define NVB197_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_TRUE 0x00000001 +#define NVB197_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX 2:2 +#define NVB197_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_FALSE 0x00000000 +#define NVB197_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_TRUE 0x00000001 +#define NVB197_CLEAR_ZCULL_REGION_RT_ARRAY_INDEX 20:5 +#define NVB197_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE 3:3 +#define NVB197_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_FALSE 0x00000000 +#define NVB197_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_TRUE 0x00000001 + +#define NVB197_SET_DEPTH_TEST 0x12cc +#define NVB197_SET_DEPTH_TEST_ENABLE 0:0 +#define NVB197_SET_DEPTH_TEST_ENABLE_FALSE 0x00000000 +#define NVB197_SET_DEPTH_TEST_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_FILL_MODE 0x12d0 +#define NVB197_SET_FILL_MODE_V 31:0 +#define NVB197_SET_FILL_MODE_V_POINT 0x00000001 +#define NVB197_SET_FILL_MODE_V_WIREFRAME 0x00000002 +#define NVB197_SET_FILL_MODE_V_SOLID 0x00000003 + +#define NVB197_SET_SHADE_MODE 0x12d4 +#define NVB197_SET_SHADE_MODE_V 31:0 +#define NVB197_SET_SHADE_MODE_V_FLAT 0x00000001 +#define NVB197_SET_SHADE_MODE_V_GOURAUD 0x00000002 +#define NVB197_SET_SHADE_MODE_V_OGL_FLAT 0x00001D00 +#define NVB197_SET_SHADE_MODE_V_OGL_SMOOTH 0x00001D01 + +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS 0x12d8 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS 0x12dc +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVB197_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVB197_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL 0x12e0 +#define NVB197_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT 3:0 +#define NVB197_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1 0x00000000 +#define NVB197_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_2X2 0x00000001 +#define NVB197_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1_VIRTUAL_SAMPLES 0x00000002 + +#define NVB197_SET_BLEND_STATE_PER_TARGET 0x12e4 +#define NVB197_SET_BLEND_STATE_PER_TARGET_ENABLE 0:0 +#define NVB197_SET_BLEND_STATE_PER_TARGET_ENABLE_FALSE 0x00000000 +#define NVB197_SET_BLEND_STATE_PER_TARGET_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_DEPTH_WRITE 0x12e8 +#define NVB197_SET_DEPTH_WRITE_ENABLE 0:0 +#define NVB197_SET_DEPTH_WRITE_ENABLE_FALSE 0x00000000 +#define NVB197_SET_DEPTH_WRITE_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_ALPHA_TEST 0x12ec +#define NVB197_SET_ALPHA_TEST_ENABLE 0:0 +#define NVB197_SET_ALPHA_TEST_ENABLE_FALSE 0x00000000 +#define NVB197_SET_ALPHA_TEST_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_INLINE_INDEX4X8_ALIGN 0x1300 +#define NVB197_SET_INLINE_INDEX4X8_ALIGN_COUNT 29:0 +#define NVB197_SET_INLINE_INDEX4X8_ALIGN_START 31:30 + +#define NVB197_DRAW_INLINE_INDEX4X8 0x1304 +#define NVB197_DRAW_INLINE_INDEX4X8_INDEX0 7:0 +#define NVB197_DRAW_INLINE_INDEX4X8_INDEX1 15:8 +#define NVB197_DRAW_INLINE_INDEX4X8_INDEX2 23:16 +#define NVB197_DRAW_INLINE_INDEX4X8_INDEX3 31:24 + +#define NVB197_D3D_SET_CULL_MODE 0x1308 +#define NVB197_D3D_SET_CULL_MODE_V 31:0 +#define NVB197_D3D_SET_CULL_MODE_V_NONE 0x00000001 +#define NVB197_D3D_SET_CULL_MODE_V_CW 0x00000002 +#define NVB197_D3D_SET_CULL_MODE_V_CCW 0x00000003 + +#define NVB197_SET_DEPTH_FUNC 0x130c +#define NVB197_SET_DEPTH_FUNC_V 31:0 +#define NVB197_SET_DEPTH_FUNC_V_OGL_NEVER 0x00000200 +#define NVB197_SET_DEPTH_FUNC_V_OGL_LESS 0x00000201 +#define NVB197_SET_DEPTH_FUNC_V_OGL_EQUAL 0x00000202 +#define NVB197_SET_DEPTH_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVB197_SET_DEPTH_FUNC_V_OGL_GREATER 0x00000204 +#define NVB197_SET_DEPTH_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVB197_SET_DEPTH_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVB197_SET_DEPTH_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVB197_SET_DEPTH_FUNC_V_D3D_NEVER 0x00000001 +#define NVB197_SET_DEPTH_FUNC_V_D3D_LESS 0x00000002 +#define NVB197_SET_DEPTH_FUNC_V_D3D_EQUAL 0x00000003 +#define NVB197_SET_DEPTH_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVB197_SET_DEPTH_FUNC_V_D3D_GREATER 0x00000005 +#define NVB197_SET_DEPTH_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVB197_SET_DEPTH_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVB197_SET_DEPTH_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVB197_SET_ALPHA_REF 0x1310 +#define NVB197_SET_ALPHA_REF_V 31:0 + +#define NVB197_SET_ALPHA_FUNC 0x1314 +#define NVB197_SET_ALPHA_FUNC_V 31:0 +#define NVB197_SET_ALPHA_FUNC_V_OGL_NEVER 0x00000200 +#define NVB197_SET_ALPHA_FUNC_V_OGL_LESS 0x00000201 +#define NVB197_SET_ALPHA_FUNC_V_OGL_EQUAL 0x00000202 +#define NVB197_SET_ALPHA_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVB197_SET_ALPHA_FUNC_V_OGL_GREATER 0x00000204 +#define NVB197_SET_ALPHA_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVB197_SET_ALPHA_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVB197_SET_ALPHA_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVB197_SET_ALPHA_FUNC_V_D3D_NEVER 0x00000001 +#define NVB197_SET_ALPHA_FUNC_V_D3D_LESS 0x00000002 +#define NVB197_SET_ALPHA_FUNC_V_D3D_EQUAL 0x00000003 +#define NVB197_SET_ALPHA_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVB197_SET_ALPHA_FUNC_V_D3D_GREATER 0x00000005 +#define NVB197_SET_ALPHA_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVB197_SET_ALPHA_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVB197_SET_ALPHA_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVB197_SET_DRAW_AUTO_STRIDE 0x1318 +#define NVB197_SET_DRAW_AUTO_STRIDE_V 11:0 + +#define NVB197_SET_BLEND_CONST_RED 0x131c +#define NVB197_SET_BLEND_CONST_RED_V 31:0 + +#define NVB197_SET_BLEND_CONST_GREEN 0x1320 +#define NVB197_SET_BLEND_CONST_GREEN_V 31:0 + +#define NVB197_SET_BLEND_CONST_BLUE 0x1324 +#define NVB197_SET_BLEND_CONST_BLUE_V 31:0 + +#define NVB197_SET_BLEND_CONST_ALPHA 0x1328 +#define NVB197_SET_BLEND_CONST_ALPHA_V 31:0 + +#define NVB197_INVALIDATE_SAMPLER_CACHE 0x1330 +#define NVB197_INVALIDATE_SAMPLER_CACHE_LINES 0:0 +#define NVB197_INVALIDATE_SAMPLER_CACHE_LINES_ALL 0x00000000 +#define NVB197_INVALIDATE_SAMPLER_CACHE_LINES_ONE 0x00000001 +#define NVB197_INVALIDATE_SAMPLER_CACHE_TAG 25:4 + +#define NVB197_INVALIDATE_TEXTURE_HEADER_CACHE 0x1334 +#define NVB197_INVALIDATE_TEXTURE_HEADER_CACHE_LINES 0:0 +#define NVB197_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ALL 0x00000000 +#define NVB197_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ONE 0x00000001 +#define NVB197_INVALIDATE_TEXTURE_HEADER_CACHE_TAG 25:4 + +#define NVB197_INVALIDATE_TEXTURE_DATA_CACHE 0x1338 +#define NVB197_INVALIDATE_TEXTURE_DATA_CACHE_LINES 0:0 +#define NVB197_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ALL 0x00000000 +#define NVB197_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ONE 0x00000001 +#define NVB197_INVALIDATE_TEXTURE_DATA_CACHE_TAG 25:4 + +#define NVB197_SET_BLEND_SEPARATE_FOR_ALPHA 0x133c +#define NVB197_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NVB197_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NVB197_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_BLEND_COLOR_OP 0x1340 +#define NVB197_SET_BLEND_COLOR_OP_V 31:0 +#define NVB197_SET_BLEND_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVB197_SET_BLEND_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVB197_SET_BLEND_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVB197_SET_BLEND_COLOR_OP_V_OGL_MIN 0x00008007 +#define NVB197_SET_BLEND_COLOR_OP_V_OGL_MAX 0x00008008 +#define NVB197_SET_BLEND_COLOR_OP_V_D3D_ADD 0x00000001 +#define NVB197_SET_BLEND_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NVB197_SET_BLEND_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVB197_SET_BLEND_COLOR_OP_V_D3D_MIN 0x00000004 +#define NVB197_SET_BLEND_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF 0x1344 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V 31:0 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVB197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVB197_SET_BLEND_COLOR_DEST_COEFF 0x1348 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V 31:0 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVB197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVB197_SET_BLEND_ALPHA_OP 0x134c +#define NVB197_SET_BLEND_ALPHA_OP_V 31:0 +#define NVB197_SET_BLEND_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVB197_SET_BLEND_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVB197_SET_BLEND_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVB197_SET_BLEND_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NVB197_SET_BLEND_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NVB197_SET_BLEND_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NVB197_SET_BLEND_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NVB197_SET_BLEND_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVB197_SET_BLEND_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NVB197_SET_BLEND_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF 0x1350 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V 31:0 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVB197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVB197_SET_GLOBAL_COLOR_KEY 0x1354 +#define NVB197_SET_GLOBAL_COLOR_KEY_ENABLE 0:0 +#define NVB197_SET_GLOBAL_COLOR_KEY_ENABLE_FALSE 0x00000000 +#define NVB197_SET_GLOBAL_COLOR_KEY_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF 0x1358 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V 31:0 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVB197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVB197_SET_SINGLE_ROP_CONTROL 0x135c +#define NVB197_SET_SINGLE_ROP_CONTROL_ENABLE 0:0 +#define NVB197_SET_SINGLE_ROP_CONTROL_ENABLE_FALSE 0x00000000 +#define NVB197_SET_SINGLE_ROP_CONTROL_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_BLEND(i) (0x1360+(i)*4) +#define NVB197_SET_BLEND_ENABLE 0:0 +#define NVB197_SET_BLEND_ENABLE_FALSE 0x00000000 +#define NVB197_SET_BLEND_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_STENCIL_TEST 0x1380 +#define NVB197_SET_STENCIL_TEST_ENABLE 0:0 +#define NVB197_SET_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NVB197_SET_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_STENCIL_OP_FAIL 0x1384 +#define NVB197_SET_STENCIL_OP_FAIL_V 31:0 +#define NVB197_SET_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NVB197_SET_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NVB197_SET_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NVB197_SET_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NVB197_SET_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NVB197_SET_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NVB197_SET_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NVB197_SET_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NVB197_SET_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NVB197_SET_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NVB197_SET_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NVB197_SET_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NVB197_SET_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NVB197_SET_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NVB197_SET_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NVB197_SET_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NVB197_SET_STENCIL_OP_ZFAIL 0x1388 +#define NVB197_SET_STENCIL_OP_ZFAIL_V 31:0 +#define NVB197_SET_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NVB197_SET_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NVB197_SET_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NVB197_SET_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NVB197_SET_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NVB197_SET_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NVB197_SET_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NVB197_SET_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NVB197_SET_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NVB197_SET_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NVB197_SET_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NVB197_SET_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NVB197_SET_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NVB197_SET_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NVB197_SET_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NVB197_SET_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NVB197_SET_STENCIL_OP_ZPASS 0x138c +#define NVB197_SET_STENCIL_OP_ZPASS_V 31:0 +#define NVB197_SET_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NVB197_SET_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NVB197_SET_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NVB197_SET_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NVB197_SET_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NVB197_SET_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NVB197_SET_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NVB197_SET_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NVB197_SET_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NVB197_SET_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NVB197_SET_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NVB197_SET_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NVB197_SET_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NVB197_SET_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NVB197_SET_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NVB197_SET_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NVB197_SET_STENCIL_FUNC 0x1390 +#define NVB197_SET_STENCIL_FUNC_V 31:0 +#define NVB197_SET_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NVB197_SET_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NVB197_SET_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NVB197_SET_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVB197_SET_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NVB197_SET_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVB197_SET_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVB197_SET_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVB197_SET_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NVB197_SET_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NVB197_SET_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NVB197_SET_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVB197_SET_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NVB197_SET_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVB197_SET_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVB197_SET_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVB197_SET_STENCIL_FUNC_REF 0x1394 +#define NVB197_SET_STENCIL_FUNC_REF_V 7:0 + +#define NVB197_SET_STENCIL_FUNC_MASK 0x1398 +#define NVB197_SET_STENCIL_FUNC_MASK_V 7:0 + +#define NVB197_SET_STENCIL_MASK 0x139c +#define NVB197_SET_STENCIL_MASK_V 7:0 + +#define NVB197_SET_DRAW_AUTO_START 0x13a4 +#define NVB197_SET_DRAW_AUTO_START_BYTE_COUNT 31:0 + +#define NVB197_SET_PS_SATURATE 0x13a8 +#define NVB197_SET_PS_SATURATE_OUTPUT0 0:0 +#define NVB197_SET_PS_SATURATE_OUTPUT0_FALSE 0x00000000 +#define NVB197_SET_PS_SATURATE_OUTPUT0_TRUE 0x00000001 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE0 1:1 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE0_ZERO_TO_PLUS_ONE 0x00000000 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE0_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVB197_SET_PS_SATURATE_OUTPUT1 4:4 +#define NVB197_SET_PS_SATURATE_OUTPUT1_FALSE 0x00000000 +#define NVB197_SET_PS_SATURATE_OUTPUT1_TRUE 0x00000001 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE1 5:5 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE1_ZERO_TO_PLUS_ONE 0x00000000 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE1_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVB197_SET_PS_SATURATE_OUTPUT2 8:8 +#define NVB197_SET_PS_SATURATE_OUTPUT2_FALSE 0x00000000 +#define NVB197_SET_PS_SATURATE_OUTPUT2_TRUE 0x00000001 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE2 9:9 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE2_ZERO_TO_PLUS_ONE 0x00000000 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE2_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVB197_SET_PS_SATURATE_OUTPUT3 12:12 +#define NVB197_SET_PS_SATURATE_OUTPUT3_FALSE 0x00000000 +#define NVB197_SET_PS_SATURATE_OUTPUT3_TRUE 0x00000001 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE3 13:13 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE3_ZERO_TO_PLUS_ONE 0x00000000 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE3_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVB197_SET_PS_SATURATE_OUTPUT4 16:16 +#define NVB197_SET_PS_SATURATE_OUTPUT4_FALSE 0x00000000 +#define NVB197_SET_PS_SATURATE_OUTPUT4_TRUE 0x00000001 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE4 17:17 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE4_ZERO_TO_PLUS_ONE 0x00000000 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE4_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVB197_SET_PS_SATURATE_OUTPUT5 20:20 +#define NVB197_SET_PS_SATURATE_OUTPUT5_FALSE 0x00000000 +#define NVB197_SET_PS_SATURATE_OUTPUT5_TRUE 0x00000001 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE5 21:21 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE5_ZERO_TO_PLUS_ONE 0x00000000 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE5_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVB197_SET_PS_SATURATE_OUTPUT6 24:24 +#define NVB197_SET_PS_SATURATE_OUTPUT6_FALSE 0x00000000 +#define NVB197_SET_PS_SATURATE_OUTPUT6_TRUE 0x00000001 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE6 25:25 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE6_ZERO_TO_PLUS_ONE 0x00000000 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE6_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVB197_SET_PS_SATURATE_OUTPUT7 28:28 +#define NVB197_SET_PS_SATURATE_OUTPUT7_FALSE 0x00000000 +#define NVB197_SET_PS_SATURATE_OUTPUT7_TRUE 0x00000001 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE7 29:29 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE7_ZERO_TO_PLUS_ONE 0x00000000 +#define NVB197_SET_PS_SATURATE_CLAMP_RANGE7_MINUS_ONE_TO_PLUS_ONE 0x00000001 + +#define NVB197_SET_WINDOW_ORIGIN 0x13ac +#define NVB197_SET_WINDOW_ORIGIN_MODE 0:0 +#define NVB197_SET_WINDOW_ORIGIN_MODE_UPPER_LEFT 0x00000000 +#define NVB197_SET_WINDOW_ORIGIN_MODE_LOWER_LEFT 0x00000001 +#define NVB197_SET_WINDOW_ORIGIN_FLIP_Y 4:4 +#define NVB197_SET_WINDOW_ORIGIN_FLIP_Y_FALSE 0x00000000 +#define NVB197_SET_WINDOW_ORIGIN_FLIP_Y_TRUE 0x00000001 + +#define NVB197_SET_LINE_WIDTH_FLOAT 0x13b0 +#define NVB197_SET_LINE_WIDTH_FLOAT_V 31:0 + +#define NVB197_SET_ALIASED_LINE_WIDTH_FLOAT 0x13b4 +#define NVB197_SET_ALIASED_LINE_WIDTH_FLOAT_V 31:0 + +#define NVB197_SET_LINE_MULTISAMPLE_OVERRIDE 0x1418 +#define NVB197_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE 0:0 +#define NVB197_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_FALSE 0x00000000 +#define NVB197_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_ALPHA_HYSTERESIS 0x1420 +#define NVB197_SET_ALPHA_HYSTERESIS_ROUNDS_OF_ALPHA 7:0 + +#define NVB197_INVALIDATE_SAMPLER_CACHE_NO_WFI 0x1424 +#define NVB197_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES 0:0 +#define NVB197_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVB197_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVB197_INVALIDATE_SAMPLER_CACHE_NO_WFI_TAG 25:4 + +#define NVB197_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI 0x1428 +#define NVB197_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES 0:0 +#define NVB197_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVB197_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVB197_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_TAG 25:4 + +#define NVB197_SET_GLOBAL_BASE_VERTEX_INDEX 0x1434 +#define NVB197_SET_GLOBAL_BASE_VERTEX_INDEX_V 31:0 + +#define NVB197_SET_GLOBAL_BASE_INSTANCE_INDEX 0x1438 +#define NVB197_SET_GLOBAL_BASE_INSTANCE_INDEX_V 31:0 + +#define NVB197_SET_PS_WARP_WATERMARKS 0x1450 +#define NVB197_SET_PS_WARP_WATERMARKS_LOW 15:0 +#define NVB197_SET_PS_WARP_WATERMARKS_HIGH 31:16 + +#define NVB197_SET_PS_REGISTER_WATERMARKS 0x1454 +#define NVB197_SET_PS_REGISTER_WATERMARKS_LOW 15:0 +#define NVB197_SET_PS_REGISTER_WATERMARKS_HIGH 31:16 + +#define NVB197_STORE_ZCULL 0x1464 +#define NVB197_STORE_ZCULL_V 0:0 + +#define NVB197_SET_ITERATED_BLEND_CONSTANT_RED(j) (0x1480+(j)*16) +#define NVB197_SET_ITERATED_BLEND_CONSTANT_RED_V 15:0 + +#define NVB197_SET_ITERATED_BLEND_CONSTANT_GREEN(j) (0x1484+(j)*16) +#define NVB197_SET_ITERATED_BLEND_CONSTANT_GREEN_V 15:0 + +#define NVB197_SET_ITERATED_BLEND_CONSTANT_BLUE(j) (0x1488+(j)*16) +#define NVB197_SET_ITERATED_BLEND_CONSTANT_BLUE_V 15:0 + +#define NVB197_LOAD_ZCULL 0x1500 +#define NVB197_LOAD_ZCULL_V 0:0 + +#define NVB197_SET_SURFACE_CLIP_ID_HEIGHT 0x1504 +#define NVB197_SET_SURFACE_CLIP_ID_HEIGHT_V 31:0 + +#define NVB197_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL 0x1508 +#define NVB197_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NVB197_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NVB197_SET_CLIP_ID_CLEAR_RECT_VERTICAL 0x150c +#define NVB197_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NVB197_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NVB197_SET_USER_CLIP_ENABLE 0x1510 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE0 0:0 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE0_FALSE 0x00000000 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE0_TRUE 0x00000001 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE1 1:1 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE1_FALSE 0x00000000 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE1_TRUE 0x00000001 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE2 2:2 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE2_FALSE 0x00000000 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE2_TRUE 0x00000001 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE3 3:3 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE3_FALSE 0x00000000 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE3_TRUE 0x00000001 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE4 4:4 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE4_FALSE 0x00000000 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE4_TRUE 0x00000001 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE5 5:5 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE5_FALSE 0x00000000 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE5_TRUE 0x00000001 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE6 6:6 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE6_FALSE 0x00000000 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE6_TRUE 0x00000001 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE7 7:7 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE7_FALSE 0x00000000 +#define NVB197_SET_USER_CLIP_ENABLE_PLANE7_TRUE 0x00000001 + +#define NVB197_SET_ZPASS_PIXEL_COUNT 0x1514 +#define NVB197_SET_ZPASS_PIXEL_COUNT_ENABLE 0:0 +#define NVB197_SET_ZPASS_PIXEL_COUNT_ENABLE_FALSE 0x00000000 +#define NVB197_SET_ZPASS_PIXEL_COUNT_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_POINT_SIZE 0x1518 +#define NVB197_SET_POINT_SIZE_V 31:0 + +#define NVB197_SET_ZCULL_STATS 0x151c +#define NVB197_SET_ZCULL_STATS_ENABLE 0:0 +#define NVB197_SET_ZCULL_STATS_ENABLE_FALSE 0x00000000 +#define NVB197_SET_ZCULL_STATS_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_POINT_SPRITE 0x1520 +#define NVB197_SET_POINT_SPRITE_ENABLE 0:0 +#define NVB197_SET_POINT_SPRITE_ENABLE_FALSE 0x00000000 +#define NVB197_SET_POINT_SPRITE_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_SHADER_EXCEPTIONS 0x1528 +#define NVB197_SET_SHADER_EXCEPTIONS_ENABLE 0:0 +#define NVB197_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0x00000000 +#define NVB197_SET_SHADER_EXCEPTIONS_ENABLE_TRUE 0x00000001 + +#define NVB197_CLEAR_REPORT_VALUE 0x1530 +#define NVB197_CLEAR_REPORT_VALUE_TYPE 4:0 +#define NVB197_CLEAR_REPORT_VALUE_TYPE_DA_VERTICES_GENERATED 0x00000012 +#define NVB197_CLEAR_REPORT_VALUE_TYPE_DA_PRIMITIVES_GENERATED 0x00000013 +#define NVB197_CLEAR_REPORT_VALUE_TYPE_VS_INVOCATIONS 0x00000015 +#define NVB197_CLEAR_REPORT_VALUE_TYPE_TI_INVOCATIONS 0x00000016 +#define NVB197_CLEAR_REPORT_VALUE_TYPE_TS_INVOCATIONS 0x00000017 +#define NVB197_CLEAR_REPORT_VALUE_TYPE_TS_PRIMITIVES_GENERATED 0x00000018 +#define NVB197_CLEAR_REPORT_VALUE_TYPE_GS_INVOCATIONS 0x0000001A +#define NVB197_CLEAR_REPORT_VALUE_TYPE_GS_PRIMITIVES_GENERATED 0x0000001B +#define NVB197_CLEAR_REPORT_VALUE_TYPE_VTG_PRIMITIVES_OUT 0x0000001F +#define NVB197_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_SUCCEEDED 0x00000010 +#define NVB197_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_NEEDED 0x00000011 +#define NVB197_CLEAR_REPORT_VALUE_TYPE_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000003 +#define NVB197_CLEAR_REPORT_VALUE_TYPE_CLIPPER_INVOCATIONS 0x0000001C +#define NVB197_CLEAR_REPORT_VALUE_TYPE_CLIPPER_PRIMITIVES_GENERATED 0x0000001D +#define NVB197_CLEAR_REPORT_VALUE_TYPE_ZCULL_STATS 0x00000002 +#define NVB197_CLEAR_REPORT_VALUE_TYPE_PS_INVOCATIONS 0x0000001E +#define NVB197_CLEAR_REPORT_VALUE_TYPE_ZPASS_PIXEL_CNT 0x00000001 +#define NVB197_CLEAR_REPORT_VALUE_TYPE_ALPHA_BETA_CLOCKS 0x00000004 + +#define NVB197_SET_ANTI_ALIAS_ENABLE 0x1534 +#define NVB197_SET_ANTI_ALIAS_ENABLE_V 0:0 +#define NVB197_SET_ANTI_ALIAS_ENABLE_V_FALSE 0x00000000 +#define NVB197_SET_ANTI_ALIAS_ENABLE_V_TRUE 0x00000001 + +#define NVB197_SET_ZT_SELECT 0x1538 +#define NVB197_SET_ZT_SELECT_TARGET_COUNT 0:0 + +#define NVB197_SET_ANTI_ALIAS_ALPHA_CONTROL 0x153c +#define NVB197_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE 0:0 +#define NVB197_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_DISABLE 0x00000000 +#define NVB197_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_ENABLE 0x00000001 +#define NVB197_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE 4:4 +#define NVB197_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_DISABLE 0x00000000 +#define NVB197_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_ENABLE 0x00000001 + +#define NVB197_SET_RENDER_ENABLE_A 0x1550 +#define NVB197_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVB197_SET_RENDER_ENABLE_B 0x1554 +#define NVB197_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVB197_SET_RENDER_ENABLE_C 0x1558 +#define NVB197_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVB197_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVB197_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVB197_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVB197_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVB197_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVB197_SET_TEX_SAMPLER_POOL_A 0x155c +#define NVB197_SET_TEX_SAMPLER_POOL_A_OFFSET_UPPER 7:0 + +#define NVB197_SET_TEX_SAMPLER_POOL_B 0x1560 +#define NVB197_SET_TEX_SAMPLER_POOL_B_OFFSET_LOWER 31:0 + +#define NVB197_SET_TEX_SAMPLER_POOL_C 0x1564 +#define NVB197_SET_TEX_SAMPLER_POOL_C_MAXIMUM_INDEX 19:0 + +#define NVB197_SET_SLOPE_SCALE_DEPTH_BIAS 0x156c +#define NVB197_SET_SLOPE_SCALE_DEPTH_BIAS_V 31:0 + +#define NVB197_SET_ANTI_ALIASED_LINE 0x1570 +#define NVB197_SET_ANTI_ALIASED_LINE_ENABLE 0:0 +#define NVB197_SET_ANTI_ALIASED_LINE_ENABLE_FALSE 0x00000000 +#define NVB197_SET_ANTI_ALIASED_LINE_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_TEX_HEADER_POOL_A 0x1574 +#define NVB197_SET_TEX_HEADER_POOL_A_OFFSET_UPPER 7:0 + +#define NVB197_SET_TEX_HEADER_POOL_B 0x1578 +#define NVB197_SET_TEX_HEADER_POOL_B_OFFSET_LOWER 31:0 + +#define NVB197_SET_TEX_HEADER_POOL_C 0x157c +#define NVB197_SET_TEX_HEADER_POOL_C_MAXIMUM_INDEX 21:0 + +#define NVB197_SET_ACTIVE_ZCULL_REGION 0x1590 +#define NVB197_SET_ACTIVE_ZCULL_REGION_ID 5:0 + +#define NVB197_SET_TWO_SIDED_STENCIL_TEST 0x1594 +#define NVB197_SET_TWO_SIDED_STENCIL_TEST_ENABLE 0:0 +#define NVB197_SET_TWO_SIDED_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NVB197_SET_TWO_SIDED_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_BACK_STENCIL_OP_FAIL 0x1598 +#define NVB197_SET_BACK_STENCIL_OP_FAIL_V 31:0 +#define NVB197_SET_BACK_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NVB197_SET_BACK_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NVB197_SET_BACK_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NVB197_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NVB197_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NVB197_SET_BACK_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NVB197_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NVB197_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NVB197_SET_BACK_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NVB197_SET_BACK_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NVB197_SET_BACK_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NVB197_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NVB197_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NVB197_SET_BACK_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NVB197_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NVB197_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NVB197_SET_BACK_STENCIL_OP_ZFAIL 0x159c +#define NVB197_SET_BACK_STENCIL_OP_ZFAIL_V 31:0 +#define NVB197_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NVB197_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NVB197_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NVB197_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NVB197_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NVB197_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NVB197_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NVB197_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NVB197_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NVB197_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NVB197_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NVB197_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NVB197_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NVB197_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NVB197_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NVB197_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NVB197_SET_BACK_STENCIL_OP_ZPASS 0x15a0 +#define NVB197_SET_BACK_STENCIL_OP_ZPASS_V 31:0 +#define NVB197_SET_BACK_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NVB197_SET_BACK_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NVB197_SET_BACK_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NVB197_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NVB197_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NVB197_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NVB197_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NVB197_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NVB197_SET_BACK_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NVB197_SET_BACK_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NVB197_SET_BACK_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NVB197_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NVB197_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NVB197_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NVB197_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NVB197_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NVB197_SET_BACK_STENCIL_FUNC 0x15a4 +#define NVB197_SET_BACK_STENCIL_FUNC_V 31:0 +#define NVB197_SET_BACK_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NVB197_SET_BACK_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NVB197_SET_BACK_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NVB197_SET_BACK_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVB197_SET_BACK_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NVB197_SET_BACK_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVB197_SET_BACK_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVB197_SET_BACK_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVB197_SET_BACK_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NVB197_SET_BACK_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NVB197_SET_BACK_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NVB197_SET_BACK_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVB197_SET_BACK_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NVB197_SET_BACK_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVB197_SET_BACK_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVB197_SET_BACK_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVB197_SET_SRGB_WRITE 0x15b8 +#define NVB197_SET_SRGB_WRITE_ENABLE 0:0 +#define NVB197_SET_SRGB_WRITE_ENABLE_FALSE 0x00000000 +#define NVB197_SET_SRGB_WRITE_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_DEPTH_BIAS 0x15bc +#define NVB197_SET_DEPTH_BIAS_V 31:0 + +#define NVB197_SET_ZCULL_REGION_FORMAT 0x15c8 +#define NVB197_SET_ZCULL_REGION_FORMAT_TYPE 3:0 +#define NVB197_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X4 0x00000000 +#define NVB197_SET_ZCULL_REGION_FORMAT_TYPE_ZS_4X4 0x00000001 +#define NVB197_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X2 0x00000002 +#define NVB197_SET_ZCULL_REGION_FORMAT_TYPE_Z_2X4 0x00000003 +#define NVB197_SET_ZCULL_REGION_FORMAT_TYPE_Z_16X8_4X4 0x00000004 +#define NVB197_SET_ZCULL_REGION_FORMAT_TYPE_Z_8X8_4X2 0x00000005 +#define NVB197_SET_ZCULL_REGION_FORMAT_TYPE_Z_8X8_2X4 0x00000006 +#define NVB197_SET_ZCULL_REGION_FORMAT_TYPE_Z_16X16_4X8 0x00000007 +#define NVB197_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X8_2X2 0x00000008 +#define NVB197_SET_ZCULL_REGION_FORMAT_TYPE_ZS_16X8_4X2 0x00000009 +#define NVB197_SET_ZCULL_REGION_FORMAT_TYPE_ZS_16X8_2X4 0x0000000A +#define NVB197_SET_ZCULL_REGION_FORMAT_TYPE_ZS_8X8_2X2 0x0000000B +#define NVB197_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X8_1X1 0x0000000C + +#define NVB197_SET_RT_LAYER 0x15cc +#define NVB197_SET_RT_LAYER_V 15:0 +#define NVB197_SET_RT_LAYER_CONTROL 16:16 +#define NVB197_SET_RT_LAYER_CONTROL_V_SELECTS_LAYER 0x00000000 +#define NVB197_SET_RT_LAYER_CONTROL_GEOMETRY_SHADER_SELECTS_LAYER 0x00000001 + +#define NVB197_SET_ANTI_ALIAS 0x15d0 +#define NVB197_SET_ANTI_ALIAS_SAMPLES 3:0 +#define NVB197_SET_ANTI_ALIAS_SAMPLES_MODE_1X1 0x00000000 +#define NVB197_SET_ANTI_ALIAS_SAMPLES_MODE_2X1 0x00000001 +#define NVB197_SET_ANTI_ALIAS_SAMPLES_MODE_2X2 0x00000002 +#define NVB197_SET_ANTI_ALIAS_SAMPLES_MODE_4X2 0x00000003 +#define NVB197_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_D3D 0x00000004 +#define NVB197_SET_ANTI_ALIAS_SAMPLES_MODE_2X1_D3D 0x00000005 +#define NVB197_SET_ANTI_ALIAS_SAMPLES_MODE_4X4 0x00000006 +#define NVB197_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_4 0x00000008 +#define NVB197_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_12 0x00000009 +#define NVB197_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_8 0x0000000A +#define NVB197_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_24 0x0000000B + +#define NVB197_SET_EDGE_FLAG 0x15e4 +#define NVB197_SET_EDGE_FLAG_V 0:0 +#define NVB197_SET_EDGE_FLAG_V_FALSE 0x00000000 +#define NVB197_SET_EDGE_FLAG_V_TRUE 0x00000001 + +#define NVB197_DRAW_INLINE_INDEX 0x15e8 +#define NVB197_DRAW_INLINE_INDEX_V 31:0 + +#define NVB197_SET_INLINE_INDEX2X16_ALIGN 0x15ec +#define NVB197_SET_INLINE_INDEX2X16_ALIGN_COUNT 30:0 +#define NVB197_SET_INLINE_INDEX2X16_ALIGN_START_ODD 31:31 +#define NVB197_SET_INLINE_INDEX2X16_ALIGN_START_ODD_FALSE 0x00000000 +#define NVB197_SET_INLINE_INDEX2X16_ALIGN_START_ODD_TRUE 0x00000001 + +#define NVB197_DRAW_INLINE_INDEX2X16 0x15f0 +#define NVB197_DRAW_INLINE_INDEX2X16_EVEN 15:0 +#define NVB197_DRAW_INLINE_INDEX2X16_ODD 31:16 + +#define NVB197_SET_VERTEX_GLOBAL_BASE_OFFSET_A 0x15f4 +#define NVB197_SET_VERTEX_GLOBAL_BASE_OFFSET_A_UPPER 7:0 + +#define NVB197_SET_VERTEX_GLOBAL_BASE_OFFSET_B 0x15f8 +#define NVB197_SET_VERTEX_GLOBAL_BASE_OFFSET_B_LOWER 31:0 + +#define NVB197_SET_ZCULL_REGION_PIXEL_OFFSET_A 0x15fc +#define NVB197_SET_ZCULL_REGION_PIXEL_OFFSET_A_WIDTH 15:0 + +#define NVB197_SET_ZCULL_REGION_PIXEL_OFFSET_B 0x1600 +#define NVB197_SET_ZCULL_REGION_PIXEL_OFFSET_B_HEIGHT 15:0 + +#define NVB197_SET_POINT_SPRITE_SELECT 0x1604 +#define NVB197_SET_POINT_SPRITE_SELECT_RMODE 1:0 +#define NVB197_SET_POINT_SPRITE_SELECT_RMODE_ZERO 0x00000000 +#define NVB197_SET_POINT_SPRITE_SELECT_RMODE_FROM_R 0x00000001 +#define NVB197_SET_POINT_SPRITE_SELECT_RMODE_FROM_S 0x00000002 +#define NVB197_SET_POINT_SPRITE_SELECT_ORIGIN 2:2 +#define NVB197_SET_POINT_SPRITE_SELECT_ORIGIN_BOTTOM 0x00000000 +#define NVB197_SET_POINT_SPRITE_SELECT_ORIGIN_TOP 0x00000001 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE0 3:3 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE0_PASSTHROUGH 0x00000000 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE0_GENERATE 0x00000001 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE1 4:4 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE1_PASSTHROUGH 0x00000000 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE1_GENERATE 0x00000001 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE2 5:5 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE2_PASSTHROUGH 0x00000000 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE2_GENERATE 0x00000001 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE3 6:6 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE3_PASSTHROUGH 0x00000000 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE3_GENERATE 0x00000001 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE4 7:7 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE4_PASSTHROUGH 0x00000000 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE4_GENERATE 0x00000001 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE5 8:8 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE5_PASSTHROUGH 0x00000000 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE5_GENERATE 0x00000001 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE6 9:9 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE6_PASSTHROUGH 0x00000000 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE6_GENERATE 0x00000001 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE7 10:10 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE7_PASSTHROUGH 0x00000000 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE7_GENERATE 0x00000001 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE8 11:11 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE8_PASSTHROUGH 0x00000000 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE8_GENERATE 0x00000001 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE9 12:12 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE9_PASSTHROUGH 0x00000000 +#define NVB197_SET_POINT_SPRITE_SELECT_TEXTURE9_GENERATE 0x00000001 + +#define NVB197_SET_PROGRAM_REGION_A 0x1608 +#define NVB197_SET_PROGRAM_REGION_A_ADDRESS_UPPER 7:0 + +#define NVB197_SET_PROGRAM_REGION_B 0x160c +#define NVB197_SET_PROGRAM_REGION_B_ADDRESS_LOWER 31:0 + +#define NVB197_SET_ATTRIBUTE_DEFAULT 0x1610 +#define NVB197_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE 0:0 +#define NVB197_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_0001 0x00000000 +#define NVB197_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_1111 0x00000001 +#define NVB197_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR 1:1 +#define NVB197_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0000 0x00000000 +#define NVB197_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0001 0x00000001 +#define NVB197_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR 2:2 +#define NVB197_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0000 0x00000000 +#define NVB197_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0001 0x00000001 +#define NVB197_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE 3:3 +#define NVB197_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0000 0x00000000 +#define NVB197_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0001 0x00000001 +#define NVB197_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0 4:4 +#define NVB197_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_0001 0x00000000 +#define NVB197_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_1111 0x00000001 +#define NVB197_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15 5:5 +#define NVB197_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0000 0x00000000 +#define NVB197_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0001 0x00000001 + +#define NVB197_END 0x1614 +#define NVB197_END_V 0:0 + +#define NVB197_BEGIN 0x1618 +#define NVB197_BEGIN_OP 15:0 +#define NVB197_BEGIN_OP_POINTS 0x00000000 +#define NVB197_BEGIN_OP_LINES 0x00000001 +#define NVB197_BEGIN_OP_LINE_LOOP 0x00000002 +#define NVB197_BEGIN_OP_LINE_STRIP 0x00000003 +#define NVB197_BEGIN_OP_TRIANGLES 0x00000004 +#define NVB197_BEGIN_OP_TRIANGLE_STRIP 0x00000005 +#define NVB197_BEGIN_OP_TRIANGLE_FAN 0x00000006 +#define NVB197_BEGIN_OP_QUADS 0x00000007 +#define NVB197_BEGIN_OP_QUAD_STRIP 0x00000008 +#define NVB197_BEGIN_OP_POLYGON 0x00000009 +#define NVB197_BEGIN_OP_LINELIST_ADJCY 0x0000000A +#define NVB197_BEGIN_OP_LINESTRIP_ADJCY 0x0000000B +#define NVB197_BEGIN_OP_TRIANGLELIST_ADJCY 0x0000000C +#define NVB197_BEGIN_OP_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVB197_BEGIN_OP_PATCH 0x0000000E +#define NVB197_BEGIN_PRIMITIVE_ID 24:24 +#define NVB197_BEGIN_PRIMITIVE_ID_FIRST 0x00000000 +#define NVB197_BEGIN_PRIMITIVE_ID_UNCHANGED 0x00000001 +#define NVB197_BEGIN_INSTANCE_ID 27:26 +#define NVB197_BEGIN_INSTANCE_ID_FIRST 0x00000000 +#define NVB197_BEGIN_INSTANCE_ID_SUBSEQUENT 0x00000001 +#define NVB197_BEGIN_INSTANCE_ID_UNCHANGED 0x00000002 +#define NVB197_BEGIN_SPLIT_MODE 30:29 +#define NVB197_BEGIN_SPLIT_MODE_NORMAL_BEGIN_NORMAL_END 0x00000000 +#define NVB197_BEGIN_SPLIT_MODE_NORMAL_BEGIN_OPEN_END 0x00000001 +#define NVB197_BEGIN_SPLIT_MODE_OPEN_BEGIN_OPEN_END 0x00000002 +#define NVB197_BEGIN_SPLIT_MODE_OPEN_BEGIN_NORMAL_END 0x00000003 + +#define NVB197_SET_VERTEX_ID_COPY 0x161c +#define NVB197_SET_VERTEX_ID_COPY_ENABLE 0:0 +#define NVB197_SET_VERTEX_ID_COPY_ENABLE_FALSE 0x00000000 +#define NVB197_SET_VERTEX_ID_COPY_ENABLE_TRUE 0x00000001 +#define NVB197_SET_VERTEX_ID_COPY_ATTRIBUTE_SLOT 11:4 + +#define NVB197_ADD_TO_PRIMITIVE_ID 0x1620 +#define NVB197_ADD_TO_PRIMITIVE_ID_V 31:0 + +#define NVB197_LOAD_PRIMITIVE_ID 0x1624 +#define NVB197_LOAD_PRIMITIVE_ID_V 31:0 + +#define NVB197_SET_SHADER_BASED_CULL 0x162c +#define NVB197_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE 1:1 +#define NVB197_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_FALSE 0x00000000 +#define NVB197_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_TRUE 0x00000001 +#define NVB197_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE 0:0 +#define NVB197_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_FALSE 0x00000000 +#define NVB197_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_CLASS_VERSION 0x1638 +#define NVB197_SET_CLASS_VERSION_CURRENT 15:0 +#define NVB197_SET_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVB197_SET_DA_PRIMITIVE_RESTART 0x1644 +#define NVB197_SET_DA_PRIMITIVE_RESTART_ENABLE 0:0 +#define NVB197_SET_DA_PRIMITIVE_RESTART_ENABLE_FALSE 0x00000000 +#define NVB197_SET_DA_PRIMITIVE_RESTART_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_DA_PRIMITIVE_RESTART_INDEX 0x1648 +#define NVB197_SET_DA_PRIMITIVE_RESTART_INDEX_V 31:0 + +#define NVB197_SET_DA_OUTPUT 0x164c +#define NVB197_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START 12:12 +#define NVB197_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_FALSE 0x00000000 +#define NVB197_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_TRUE 0x00000001 + +#define NVB197_SET_ANTI_ALIASED_POINT 0x1658 +#define NVB197_SET_ANTI_ALIASED_POINT_ENABLE 0:0 +#define NVB197_SET_ANTI_ALIASED_POINT_ENABLE_FALSE 0x00000000 +#define NVB197_SET_ANTI_ALIASED_POINT_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_POINT_CENTER_MODE 0x165c +#define NVB197_SET_POINT_CENTER_MODE_V 31:0 +#define NVB197_SET_POINT_CENTER_MODE_V_OGL 0x00000000 +#define NVB197_SET_POINT_CENTER_MODE_V_D3D 0x00000001 + +#define NVB197_SET_LINE_SMOOTH_PARAMETERS 0x1668 +#define NVB197_SET_LINE_SMOOTH_PARAMETERS_FALLOFF 31:0 +#define NVB197_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_00 0x00000000 +#define NVB197_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_33 0x00000001 +#define NVB197_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_60 0x00000002 + +#define NVB197_SET_LINE_STIPPLE 0x166c +#define NVB197_SET_LINE_STIPPLE_ENABLE 0:0 +#define NVB197_SET_LINE_STIPPLE_ENABLE_FALSE 0x00000000 +#define NVB197_SET_LINE_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_LINE_SMOOTH_EDGE_TABLE(i) (0x1670+(i)*4) +#define NVB197_SET_LINE_SMOOTH_EDGE_TABLE_V0 7:0 +#define NVB197_SET_LINE_SMOOTH_EDGE_TABLE_V1 15:8 +#define NVB197_SET_LINE_SMOOTH_EDGE_TABLE_V2 23:16 +#define NVB197_SET_LINE_SMOOTH_EDGE_TABLE_V3 31:24 + +#define NVB197_SET_LINE_STIPPLE_PARAMETERS 0x1680 +#define NVB197_SET_LINE_STIPPLE_PARAMETERS_FACTOR 7:0 +#define NVB197_SET_LINE_STIPPLE_PARAMETERS_PATTERN 23:8 + +#define NVB197_SET_PROVOKING_VERTEX 0x1684 +#define NVB197_SET_PROVOKING_VERTEX_V 0:0 +#define NVB197_SET_PROVOKING_VERTEX_V_FIRST 0x00000000 +#define NVB197_SET_PROVOKING_VERTEX_V_LAST 0x00000001 + +#define NVB197_SET_TWO_SIDED_LIGHT 0x1688 +#define NVB197_SET_TWO_SIDED_LIGHT_ENABLE 0:0 +#define NVB197_SET_TWO_SIDED_LIGHT_ENABLE_FALSE 0x00000000 +#define NVB197_SET_TWO_SIDED_LIGHT_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_POLYGON_STIPPLE 0x168c +#define NVB197_SET_POLYGON_STIPPLE_ENABLE 0:0 +#define NVB197_SET_POLYGON_STIPPLE_ENABLE_FALSE 0x00000000 +#define NVB197_SET_POLYGON_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_SHADER_CONTROL 0x1690 +#define NVB197_SET_SHADER_CONTROL_DEFAULT_PARTIAL 0:0 +#define NVB197_SET_SHADER_CONTROL_DEFAULT_PARTIAL_ZERO 0x00000000 +#define NVB197_SET_SHADER_CONTROL_DEFAULT_PARTIAL_INFINITY 0x00000001 +#define NVB197_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR 1:1 +#define NVB197_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR_LEGACY 0x00000000 +#define NVB197_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR_FP64_COMPATIBLE 0x00000001 +#define NVB197_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR 2:2 +#define NVB197_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR_PASS_ZERO 0x00000000 +#define NVB197_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR_PASS_INDEFINITE 0x00000001 + +#define NVB197_CHECK_CLASS_VERSION 0x16a0 +#define NVB197_CHECK_CLASS_VERSION_CURRENT 15:0 +#define NVB197_CHECK_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVB197_SET_SPH_VERSION 0x16a4 +#define NVB197_SET_SPH_VERSION_CURRENT 15:0 +#define NVB197_SET_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVB197_CHECK_SPH_VERSION 0x16a8 +#define NVB197_CHECK_SPH_VERSION_CURRENT 15:0 +#define NVB197_CHECK_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVB197_SET_ALPHA_TO_COVERAGE_OVERRIDE 0x16b4 +#define NVB197_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE 0:0 +#define NVB197_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NVB197_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 +#define NVB197_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT 1:1 +#define NVB197_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_DISABLE 0x00000000 +#define NVB197_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_ENABLE 0x00000001 + +#define NVB197_SET_POLYGON_STIPPLE_PATTERN(i) (0x1700+(i)*4) +#define NVB197_SET_POLYGON_STIPPLE_PATTERN_V 31:0 + +#define NVB197_SET_AAM_VERSION 0x1790 +#define NVB197_SET_AAM_VERSION_CURRENT 15:0 +#define NVB197_SET_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVB197_CHECK_AAM_VERSION 0x1794 +#define NVB197_CHECK_AAM_VERSION_CURRENT 15:0 +#define NVB197_CHECK_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVB197_SET_ZT_LAYER 0x179c +#define NVB197_SET_ZT_LAYER_OFFSET 15:0 + +#define NVB197_SET_INDEX_BUFFER_A 0x17c8 +#define NVB197_SET_INDEX_BUFFER_A_ADDRESS_UPPER 7:0 + +#define NVB197_SET_INDEX_BUFFER_B 0x17cc +#define NVB197_SET_INDEX_BUFFER_B_ADDRESS_LOWER 31:0 + +#define NVB197_SET_INDEX_BUFFER_C 0x17d0 +#define NVB197_SET_INDEX_BUFFER_C_LIMIT_ADDRESS_UPPER 7:0 + +#define NVB197_SET_INDEX_BUFFER_D 0x17d4 +#define NVB197_SET_INDEX_BUFFER_D_LIMIT_ADDRESS_LOWER 31:0 + +#define NVB197_SET_INDEX_BUFFER_E 0x17d8 +#define NVB197_SET_INDEX_BUFFER_E_INDEX_SIZE 1:0 +#define NVB197_SET_INDEX_BUFFER_E_INDEX_SIZE_ONE_BYTE 0x00000000 +#define NVB197_SET_INDEX_BUFFER_E_INDEX_SIZE_TWO_BYTES 0x00000001 +#define NVB197_SET_INDEX_BUFFER_E_INDEX_SIZE_FOUR_BYTES 0x00000002 + +#define NVB197_SET_INDEX_BUFFER_F 0x17dc +#define NVB197_SET_INDEX_BUFFER_F_FIRST 31:0 + +#define NVB197_DRAW_INDEX_BUFFER 0x17e0 +#define NVB197_DRAW_INDEX_BUFFER_COUNT 31:0 + +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST 0x17e4 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST 0x17e8 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST 0x17ec +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f0 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVB197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f4 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVB197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f8 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVB197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVB197_SET_DEPTH_BIAS_CLAMP 0x187c +#define NVB197_SET_DEPTH_BIAS_CLAMP_V 31:0 + +#define NVB197_SET_VERTEX_STREAM_INSTANCE_A(i) (0x1880+(i)*4) +#define NVB197_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED 0:0 +#define NVB197_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_FALSE 0x00000000 +#define NVB197_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_TRUE 0x00000001 + +#define NVB197_SET_VERTEX_STREAM_INSTANCE_B(i) (0x18c0+(i)*4) +#define NVB197_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED 0:0 +#define NVB197_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_FALSE 0x00000000 +#define NVB197_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_TRUE 0x00000001 + +#define NVB197_SET_ATTRIBUTE_POINT_SIZE 0x1910 +#define NVB197_SET_ATTRIBUTE_POINT_SIZE_ENABLE 0:0 +#define NVB197_SET_ATTRIBUTE_POINT_SIZE_ENABLE_FALSE 0x00000000 +#define NVB197_SET_ATTRIBUTE_POINT_SIZE_ENABLE_TRUE 0x00000001 +#define NVB197_SET_ATTRIBUTE_POINT_SIZE_SLOT 11:4 + +#define NVB197_OGL_SET_CULL 0x1918 +#define NVB197_OGL_SET_CULL_ENABLE 0:0 +#define NVB197_OGL_SET_CULL_ENABLE_FALSE 0x00000000 +#define NVB197_OGL_SET_CULL_ENABLE_TRUE 0x00000001 + +#define NVB197_OGL_SET_FRONT_FACE 0x191c +#define NVB197_OGL_SET_FRONT_FACE_V 31:0 +#define NVB197_OGL_SET_FRONT_FACE_V_CW 0x00000900 +#define NVB197_OGL_SET_FRONT_FACE_V_CCW 0x00000901 + +#define NVB197_OGL_SET_CULL_FACE 0x1920 +#define NVB197_OGL_SET_CULL_FACE_V 31:0 +#define NVB197_OGL_SET_CULL_FACE_V_FRONT 0x00000404 +#define NVB197_OGL_SET_CULL_FACE_V_BACK 0x00000405 +#define NVB197_OGL_SET_CULL_FACE_V_FRONT_AND_BACK 0x00000408 + +#define NVB197_SET_VIEWPORT_PIXEL 0x1924 +#define NVB197_SET_VIEWPORT_PIXEL_CENTER 0:0 +#define NVB197_SET_VIEWPORT_PIXEL_CENTER_AT_HALF_INTEGERS 0x00000000 +#define NVB197_SET_VIEWPORT_PIXEL_CENTER_AT_INTEGERS 0x00000001 + +#define NVB197_SET_VIEWPORT_SCALE_OFFSET 0x192c +#define NVB197_SET_VIEWPORT_SCALE_OFFSET_ENABLE 0:0 +#define NVB197_SET_VIEWPORT_SCALE_OFFSET_ENABLE_FALSE 0x00000000 +#define NVB197_SET_VIEWPORT_SCALE_OFFSET_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_VIEWPORT_CLIP_CONTROL 0x193c +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE 0:0 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_FALSE 0x00000000 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_TRUE 0x00000001 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z 3:3 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLIP 0x00000000 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLAMP 0x00000001 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z 4:4 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLIP 0x00000000 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLAMP 0x00000001 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND 7:7 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_256 0x00000000 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_1 0x00000001 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND 10:10 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_256 0x00000000 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_1 0x00000001 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP 13:11 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP 0x00000000 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_PASSTHRU 0x00000001 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XY_CLIP 0x00000002 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XYZ_CLIP 0x00000003 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP_NO_Z_CULL 0x00000004 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_Z_CLIP 0x00000005 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_TRI_FILL_OR_CLIP 0x00000006 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z 2:1 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SAME_AS_XY_GUARDBAND 0x00000000 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_256 0x00000001 +#define NVB197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_1 0x00000002 + +#define NVB197_SET_USER_CLIP_OP 0x1940 +#define NVB197_SET_USER_CLIP_OP_PLANE0 0:0 +#define NVB197_SET_USER_CLIP_OP_PLANE0_CLIP 0x00000000 +#define NVB197_SET_USER_CLIP_OP_PLANE0_CULL 0x00000001 +#define NVB197_SET_USER_CLIP_OP_PLANE1 4:4 +#define NVB197_SET_USER_CLIP_OP_PLANE1_CLIP 0x00000000 +#define NVB197_SET_USER_CLIP_OP_PLANE1_CULL 0x00000001 +#define NVB197_SET_USER_CLIP_OP_PLANE2 8:8 +#define NVB197_SET_USER_CLIP_OP_PLANE2_CLIP 0x00000000 +#define NVB197_SET_USER_CLIP_OP_PLANE2_CULL 0x00000001 +#define NVB197_SET_USER_CLIP_OP_PLANE3 12:12 +#define NVB197_SET_USER_CLIP_OP_PLANE3_CLIP 0x00000000 +#define NVB197_SET_USER_CLIP_OP_PLANE3_CULL 0x00000001 +#define NVB197_SET_USER_CLIP_OP_PLANE4 16:16 +#define NVB197_SET_USER_CLIP_OP_PLANE4_CLIP 0x00000000 +#define NVB197_SET_USER_CLIP_OP_PLANE4_CULL 0x00000001 +#define NVB197_SET_USER_CLIP_OP_PLANE5 20:20 +#define NVB197_SET_USER_CLIP_OP_PLANE5_CLIP 0x00000000 +#define NVB197_SET_USER_CLIP_OP_PLANE5_CULL 0x00000001 +#define NVB197_SET_USER_CLIP_OP_PLANE6 24:24 +#define NVB197_SET_USER_CLIP_OP_PLANE6_CLIP 0x00000000 +#define NVB197_SET_USER_CLIP_OP_PLANE6_CULL 0x00000001 +#define NVB197_SET_USER_CLIP_OP_PLANE7 28:28 +#define NVB197_SET_USER_CLIP_OP_PLANE7_CLIP 0x00000000 +#define NVB197_SET_USER_CLIP_OP_PLANE7_CULL 0x00000001 + +#define NVB197_SET_RENDER_ENABLE_OVERRIDE 0x1944 +#define NVB197_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NVB197_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NVB197_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NVB197_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NVB197_SET_PRIMITIVE_TOPOLOGY_CONTROL 0x1948 +#define NVB197_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE 0:0 +#define NVB197_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_TOPOLOGY_IN_BEGIN_METHODS 0x00000000 +#define NVB197_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_SEPARATE_TOPOLOGY_STATE 0x00000001 + +#define NVB197_SET_WINDOW_CLIP_ENABLE 0x194c +#define NVB197_SET_WINDOW_CLIP_ENABLE_V 0:0 +#define NVB197_SET_WINDOW_CLIP_ENABLE_V_FALSE 0x00000000 +#define NVB197_SET_WINDOW_CLIP_ENABLE_V_TRUE 0x00000001 + +#define NVB197_SET_WINDOW_CLIP_TYPE 0x1950 +#define NVB197_SET_WINDOW_CLIP_TYPE_V 1:0 +#define NVB197_SET_WINDOW_CLIP_TYPE_V_INCLUSIVE 0x00000000 +#define NVB197_SET_WINDOW_CLIP_TYPE_V_EXCLUSIVE 0x00000001 +#define NVB197_SET_WINDOW_CLIP_TYPE_V_CLIPALL 0x00000002 + +#define NVB197_INVALIDATE_ZCULL 0x1958 +#define NVB197_INVALIDATE_ZCULL_V 31:0 +#define NVB197_INVALIDATE_ZCULL_V_INVALIDATE 0x00000000 + +#define NVB197_SET_ZCULL 0x1968 +#define NVB197_SET_ZCULL_Z_ENABLE 0:0 +#define NVB197_SET_ZCULL_Z_ENABLE_FALSE 0x00000000 +#define NVB197_SET_ZCULL_Z_ENABLE_TRUE 0x00000001 +#define NVB197_SET_ZCULL_STENCIL_ENABLE 4:4 +#define NVB197_SET_ZCULL_STENCIL_ENABLE_FALSE 0x00000000 +#define NVB197_SET_ZCULL_STENCIL_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_ZCULL_BOUNDS 0x196c +#define NVB197_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE 0:0 +#define NVB197_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NVB197_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_TRUE 0x00000001 +#define NVB197_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE 4:4 +#define NVB197_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NVB197_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_PRIMITIVE_TOPOLOGY 0x1970 +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V 15:0 +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_POINTLIST 0x00000001 +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_LINELIST 0x00000002 +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP 0x00000003 +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST 0x00000004 +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP 0x00000005 +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_LINELIST_ADJCY 0x0000000A +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP_ADJCY 0x0000000B +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST_ADJCY 0x0000000C +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_PATCHLIST 0x0000000E +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_POINTS 0x00001001 +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST 0x00001002 +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST 0x00001003 +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST 0x0000100F +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINESTRIP 0x00001010 +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINESTRIP 0x00001011 +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLELIST 0x00001012 +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLESTRIP 0x00001013 +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLESTRIP 0x00001014 +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN 0x00001015 +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLEFAN 0x00001016 +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN_IMM 0x00001017 +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST_IMM 0x00001018 +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST2 0x0000101A +#define NVB197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST2 0x0000101B + +#define NVB197_ZCULL_SYNC 0x1978 +#define NVB197_ZCULL_SYNC_V 31:0 + +#define NVB197_SET_CLIP_ID_TEST 0x197c +#define NVB197_SET_CLIP_ID_TEST_ENABLE 0:0 +#define NVB197_SET_CLIP_ID_TEST_ENABLE_FALSE 0x00000000 +#define NVB197_SET_CLIP_ID_TEST_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_SURFACE_CLIP_ID_WIDTH 0x1980 +#define NVB197_SET_SURFACE_CLIP_ID_WIDTH_V 31:0 + +#define NVB197_SET_CLIP_ID 0x1984 +#define NVB197_SET_CLIP_ID_V 31:0 + +#define NVB197_SET_DEPTH_BOUNDS_TEST 0x19bc +#define NVB197_SET_DEPTH_BOUNDS_TEST_ENABLE 0:0 +#define NVB197_SET_DEPTH_BOUNDS_TEST_ENABLE_FALSE 0x00000000 +#define NVB197_SET_DEPTH_BOUNDS_TEST_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_BLEND_FLOAT_OPTION 0x19c0 +#define NVB197_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO 0:0 +#define NVB197_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_FALSE 0x00000000 +#define NVB197_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_TRUE 0x00000001 + +#define NVB197_SET_LOGIC_OP 0x19c4 +#define NVB197_SET_LOGIC_OP_ENABLE 0:0 +#define NVB197_SET_LOGIC_OP_ENABLE_FALSE 0x00000000 +#define NVB197_SET_LOGIC_OP_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_LOGIC_OP_FUNC 0x19c8 +#define NVB197_SET_LOGIC_OP_FUNC_V 31:0 +#define NVB197_SET_LOGIC_OP_FUNC_V_CLEAR 0x00001500 +#define NVB197_SET_LOGIC_OP_FUNC_V_AND 0x00001501 +#define NVB197_SET_LOGIC_OP_FUNC_V_AND_REVERSE 0x00001502 +#define NVB197_SET_LOGIC_OP_FUNC_V_COPY 0x00001503 +#define NVB197_SET_LOGIC_OP_FUNC_V_AND_INVERTED 0x00001504 +#define NVB197_SET_LOGIC_OP_FUNC_V_NOOP 0x00001505 +#define NVB197_SET_LOGIC_OP_FUNC_V_XOR 0x00001506 +#define NVB197_SET_LOGIC_OP_FUNC_V_OR 0x00001507 +#define NVB197_SET_LOGIC_OP_FUNC_V_NOR 0x00001508 +#define NVB197_SET_LOGIC_OP_FUNC_V_EQUIV 0x00001509 +#define NVB197_SET_LOGIC_OP_FUNC_V_INVERT 0x0000150A +#define NVB197_SET_LOGIC_OP_FUNC_V_OR_REVERSE 0x0000150B +#define NVB197_SET_LOGIC_OP_FUNC_V_COPY_INVERTED 0x0000150C +#define NVB197_SET_LOGIC_OP_FUNC_V_OR_INVERTED 0x0000150D +#define NVB197_SET_LOGIC_OP_FUNC_V_NAND 0x0000150E +#define NVB197_SET_LOGIC_OP_FUNC_V_SET 0x0000150F + +#define NVB197_SET_Z_COMPRESSION 0x19cc +#define NVB197_SET_Z_COMPRESSION_ENABLE 0:0 +#define NVB197_SET_Z_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVB197_SET_Z_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVB197_CLEAR_SURFACE 0x19d0 +#define NVB197_CLEAR_SURFACE_Z_ENABLE 0:0 +#define NVB197_CLEAR_SURFACE_Z_ENABLE_FALSE 0x00000000 +#define NVB197_CLEAR_SURFACE_Z_ENABLE_TRUE 0x00000001 +#define NVB197_CLEAR_SURFACE_STENCIL_ENABLE 1:1 +#define NVB197_CLEAR_SURFACE_STENCIL_ENABLE_FALSE 0x00000000 +#define NVB197_CLEAR_SURFACE_STENCIL_ENABLE_TRUE 0x00000001 +#define NVB197_CLEAR_SURFACE_R_ENABLE 2:2 +#define NVB197_CLEAR_SURFACE_R_ENABLE_FALSE 0x00000000 +#define NVB197_CLEAR_SURFACE_R_ENABLE_TRUE 0x00000001 +#define NVB197_CLEAR_SURFACE_G_ENABLE 3:3 +#define NVB197_CLEAR_SURFACE_G_ENABLE_FALSE 0x00000000 +#define NVB197_CLEAR_SURFACE_G_ENABLE_TRUE 0x00000001 +#define NVB197_CLEAR_SURFACE_B_ENABLE 4:4 +#define NVB197_CLEAR_SURFACE_B_ENABLE_FALSE 0x00000000 +#define NVB197_CLEAR_SURFACE_B_ENABLE_TRUE 0x00000001 +#define NVB197_CLEAR_SURFACE_A_ENABLE 5:5 +#define NVB197_CLEAR_SURFACE_A_ENABLE_FALSE 0x00000000 +#define NVB197_CLEAR_SURFACE_A_ENABLE_TRUE 0x00000001 +#define NVB197_CLEAR_SURFACE_MRT_SELECT 9:6 +#define NVB197_CLEAR_SURFACE_RT_ARRAY_INDEX 25:10 + +#define NVB197_CLEAR_CLIP_ID_SURFACE 0x19d4 +#define NVB197_CLEAR_CLIP_ID_SURFACE_V 31:0 + +#define NVB197_SET_COLOR_COMPRESSION(i) (0x19e0+(i)*4) +#define NVB197_SET_COLOR_COMPRESSION_ENABLE 0:0 +#define NVB197_SET_COLOR_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVB197_SET_COLOR_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_CT_WRITE(i) (0x1a00+(i)*4) +#define NVB197_SET_CT_WRITE_R_ENABLE 0:0 +#define NVB197_SET_CT_WRITE_R_ENABLE_FALSE 0x00000000 +#define NVB197_SET_CT_WRITE_R_ENABLE_TRUE 0x00000001 +#define NVB197_SET_CT_WRITE_G_ENABLE 4:4 +#define NVB197_SET_CT_WRITE_G_ENABLE_FALSE 0x00000000 +#define NVB197_SET_CT_WRITE_G_ENABLE_TRUE 0x00000001 +#define NVB197_SET_CT_WRITE_B_ENABLE 8:8 +#define NVB197_SET_CT_WRITE_B_ENABLE_FALSE 0x00000000 +#define NVB197_SET_CT_WRITE_B_ENABLE_TRUE 0x00000001 +#define NVB197_SET_CT_WRITE_A_ENABLE 12:12 +#define NVB197_SET_CT_WRITE_A_ENABLE_FALSE 0x00000000 +#define NVB197_SET_CT_WRITE_A_ENABLE_TRUE 0x00000001 + +#define NVB197_PIPE_NOP 0x1a2c +#define NVB197_PIPE_NOP_V 31:0 + +#define NVB197_SET_SPARE00 0x1a30 +#define NVB197_SET_SPARE00_V 31:0 + +#define NVB197_SET_SPARE01 0x1a34 +#define NVB197_SET_SPARE01_V 31:0 + +#define NVB197_SET_SPARE02 0x1a38 +#define NVB197_SET_SPARE02_V 31:0 + +#define NVB197_SET_SPARE03 0x1a3c +#define NVB197_SET_SPARE03_V 31:0 + +#define NVB197_SET_REPORT_SEMAPHORE_A 0x1b00 +#define NVB197_SET_REPORT_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVB197_SET_REPORT_SEMAPHORE_B 0x1b04 +#define NVB197_SET_REPORT_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVB197_SET_REPORT_SEMAPHORE_C 0x1b08 +#define NVB197_SET_REPORT_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVB197_SET_REPORT_SEMAPHORE_D 0x1b0c +#define NVB197_SET_REPORT_SEMAPHORE_D_OPERATION 1:0 +#define NVB197_SET_REPORT_SEMAPHORE_D_OPERATION_RELEASE 0x00000000 +#define NVB197_SET_REPORT_SEMAPHORE_D_OPERATION_ACQUIRE 0x00000001 +#define NVB197_SET_REPORT_SEMAPHORE_D_OPERATION_REPORT_ONLY 0x00000002 +#define NVB197_SET_REPORT_SEMAPHORE_D_OPERATION_TRAP 0x00000003 +#define NVB197_SET_REPORT_SEMAPHORE_D_RELEASE 4:4 +#define NVB197_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_READS_COMPLETE 0x00000000 +#define NVB197_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_WRITES_COMPLETE 0x00000001 +#define NVB197_SET_REPORT_SEMAPHORE_D_ACQUIRE 8:8 +#define NVB197_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_WRITES_START 0x00000000 +#define NVB197_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_READS_START 0x00000001 +#define NVB197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION 15:12 +#define NVB197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_NONE 0x00000000 +#define NVB197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DATA_ASSEMBLER 0x00000001 +#define NVB197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VERTEX_SHADER 0x00000002 +#define NVB197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_INIT_SHADER 0x00000008 +#define NVB197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_SHADER 0x00000009 +#define NVB197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_GEOMETRY_SHADER 0x00000006 +#define NVB197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_STREAMING_OUTPUT 0x00000005 +#define NVB197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VPC 0x00000004 +#define NVB197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ZCULL 0x00000007 +#define NVB197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_PIXEL_SHADER 0x0000000A +#define NVB197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DEPTH_TEST 0x0000000C +#define NVB197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ALL 0x0000000F +#define NVB197_SET_REPORT_SEMAPHORE_D_COMPARISON 16:16 +#define NVB197_SET_REPORT_SEMAPHORE_D_COMPARISON_EQ 0x00000000 +#define NVB197_SET_REPORT_SEMAPHORE_D_COMPARISON_GE 0x00000001 +#define NVB197_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE 20:20 +#define NVB197_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVB197_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT 27:23 +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_NONE 0x00000000 +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_DA_VERTICES_GENERATED 0x00000001 +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_DA_PRIMITIVES_GENERATED 0x00000003 +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_VS_INVOCATIONS 0x00000005 +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_TI_INVOCATIONS 0x0000001B +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_TS_INVOCATIONS 0x0000001D +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_TS_PRIMITIVES_GENERATED 0x0000001F +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_GS_INVOCATIONS 0x00000007 +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_GS_PRIMITIVES_GENERATED 0x00000009 +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_ALPHA_BETA_CLOCKS 0x00000004 +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_VTG_PRIMITIVES_OUT 0x00000012 +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x0000001E +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_SUCCEEDED 0x0000000B +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED 0x0000000D +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000006 +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_BYTE_COUNT 0x0000001A +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_INVOCATIONS 0x0000000F +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_PRIMITIVES_GENERATED 0x00000011 +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS0 0x0000000A +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS1 0x0000000C +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS2 0x0000000E +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS3 0x00000010 +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_PS_INVOCATIONS 0x00000013 +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT 0x00000002 +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT64 0x00000015 +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_COLOR_TARGET 0x00000018 +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_ZETA_TARGET 0x00000019 +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_BOUNDING_RECTANGLE 0x0000001C +#define NVB197_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE 28:28 +#define NVB197_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVB197_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVB197_SET_REPORT_SEMAPHORE_D_SUB_REPORT 7:5 +#define NVB197_SET_REPORT_SEMAPHORE_D_REPORT_DWORD_NUMBER 21:21 +#define NVB197_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE 2:2 +#define NVB197_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_FALSE 0x00000000 +#define NVB197_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_TRUE 0x00000001 +#define NVB197_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE 3:3 +#define NVB197_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVB197_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVB197_SET_REPORT_SEMAPHORE_D_REDUCTION_OP 11:9 +#define NVB197_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_ADD 0x00000000 +#define NVB197_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MIN 0x00000001 +#define NVB197_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MAX 0x00000002 +#define NVB197_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_INC 0x00000003 +#define NVB197_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_DEC 0x00000004 +#define NVB197_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_AND 0x00000005 +#define NVB197_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_OR 0x00000006 +#define NVB197_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_XOR 0x00000007 +#define NVB197_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT 18:17 +#define NVB197_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVB197_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_SIGNED_32 0x00000001 + +#define NVB197_SET_VERTEX_STREAM_A_FORMAT(j) (0x1c00+(j)*16) +#define NVB197_SET_VERTEX_STREAM_A_FORMAT_STRIDE 11:0 +#define NVB197_SET_VERTEX_STREAM_A_FORMAT_ENABLE 12:12 +#define NVB197_SET_VERTEX_STREAM_A_FORMAT_ENABLE_FALSE 0x00000000 +#define NVB197_SET_VERTEX_STREAM_A_FORMAT_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_VERTEX_STREAM_A_LOCATION_A(j) (0x1c04+(j)*16) +#define NVB197_SET_VERTEX_STREAM_A_LOCATION_A_OFFSET_UPPER 7:0 + +#define NVB197_SET_VERTEX_STREAM_A_LOCATION_B(j) (0x1c08+(j)*16) +#define NVB197_SET_VERTEX_STREAM_A_LOCATION_B_OFFSET_LOWER 31:0 + +#define NVB197_SET_VERTEX_STREAM_A_FREQUENCY(j) (0x1c0c+(j)*16) +#define NVB197_SET_VERTEX_STREAM_A_FREQUENCY_V 31:0 + +#define NVB197_SET_VERTEX_STREAM_B_FORMAT(j) (0x1d00+(j)*16) +#define NVB197_SET_VERTEX_STREAM_B_FORMAT_STRIDE 11:0 +#define NVB197_SET_VERTEX_STREAM_B_FORMAT_ENABLE 12:12 +#define NVB197_SET_VERTEX_STREAM_B_FORMAT_ENABLE_FALSE 0x00000000 +#define NVB197_SET_VERTEX_STREAM_B_FORMAT_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_VERTEX_STREAM_B_LOCATION_A(j) (0x1d04+(j)*16) +#define NVB197_SET_VERTEX_STREAM_B_LOCATION_A_OFFSET_UPPER 7:0 + +#define NVB197_SET_VERTEX_STREAM_B_LOCATION_B(j) (0x1d08+(j)*16) +#define NVB197_SET_VERTEX_STREAM_B_LOCATION_B_OFFSET_LOWER 31:0 + +#define NVB197_SET_VERTEX_STREAM_B_FREQUENCY(j) (0x1d0c+(j)*16) +#define NVB197_SET_VERTEX_STREAM_B_FREQUENCY_V 31:0 + +#define NVB197_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA(j) (0x1e00+(j)*32) +#define NVB197_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NVB197_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NVB197_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_BLEND_PER_TARGET_COLOR_OP(j) (0x1e04+(j)*32) +#define NVB197_SET_BLEND_PER_TARGET_COLOR_OP_V 31:0 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVB197_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVB197_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MIN 0x00008007 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MAX 0x00008008 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_ADD 0x00000001 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MIN 0x00000004 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF(j) (0x1e08+(j)*32) +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V 31:0 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF(j) (0x1e0c+(j)*32) +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V 31:0 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVB197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_OP(j) (0x1e10+(j)*32) +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_OP_V 31:0 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF(j) (0x1e14+(j)*32) +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V 31:0 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF(j) (0x1e18+(j)*32) +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V 31:0 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVB197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVB197_SET_VERTEX_STREAM_LIMIT_A_A(j) (0x1f00+(j)*8) +#define NVB197_SET_VERTEX_STREAM_LIMIT_A_A_UPPER 7:0 + +#define NVB197_SET_VERTEX_STREAM_LIMIT_A_B(j) (0x1f04+(j)*8) +#define NVB197_SET_VERTEX_STREAM_LIMIT_A_B_LOWER 31:0 + +#define NVB197_SET_VERTEX_STREAM_LIMIT_B_A(j) (0x1f80+(j)*8) +#define NVB197_SET_VERTEX_STREAM_LIMIT_B_A_UPPER 7:0 + +#define NVB197_SET_VERTEX_STREAM_LIMIT_B_B(j) (0x1f84+(j)*8) +#define NVB197_SET_VERTEX_STREAM_LIMIT_B_B_LOWER 31:0 + +#define NVB197_SET_PIPELINE_SHADER(j) (0x2000+(j)*64) +#define NVB197_SET_PIPELINE_SHADER_ENABLE 0:0 +#define NVB197_SET_PIPELINE_SHADER_ENABLE_FALSE 0x00000000 +#define NVB197_SET_PIPELINE_SHADER_ENABLE_TRUE 0x00000001 +#define NVB197_SET_PIPELINE_SHADER_TYPE 7:4 +#define NVB197_SET_PIPELINE_SHADER_TYPE_VERTEX_CULL_BEFORE_FETCH 0x00000000 +#define NVB197_SET_PIPELINE_SHADER_TYPE_VERTEX 0x00000001 +#define NVB197_SET_PIPELINE_SHADER_TYPE_TESSELLATION_INIT 0x00000002 +#define NVB197_SET_PIPELINE_SHADER_TYPE_TESSELLATION 0x00000003 +#define NVB197_SET_PIPELINE_SHADER_TYPE_GEOMETRY 0x00000004 +#define NVB197_SET_PIPELINE_SHADER_TYPE_PIXEL 0x00000005 + +#define NVB197_SET_PIPELINE_PROGRAM(j) (0x2004+(j)*64) +#define NVB197_SET_PIPELINE_PROGRAM_OFFSET 31:0 + +#define NVB197_SET_PIPELINE_RESERVED_A(j) (0x2008+(j)*64) +#define NVB197_SET_PIPELINE_RESERVED_A_V 0:0 + +#define NVB197_SET_PIPELINE_REGISTER_COUNT(j) (0x200c+(j)*64) +#define NVB197_SET_PIPELINE_REGISTER_COUNT_V 7:0 + +#define NVB197_SET_PIPELINE_BINDING(j) (0x2010+(j)*64) +#define NVB197_SET_PIPELINE_BINDING_GROUP 2:0 + +#define NVB197_SET_PIPELINE_RESERVED_B(j) (0x2014+(j)*64) +#define NVB197_SET_PIPELINE_RESERVED_B_V 0:0 + +#define NVB197_SET_PIPELINE_RESERVED_C(j) (0x2018+(j)*64) +#define NVB197_SET_PIPELINE_RESERVED_C_V 0:0 + +#define NVB197_SET_PIPELINE_RESERVED_D(j) (0x201c+(j)*64) +#define NVB197_SET_PIPELINE_RESERVED_D_V 0:0 + +#define NVB197_SET_PIPELINE_RESERVED_E(j) (0x2020+(j)*64) +#define NVB197_SET_PIPELINE_RESERVED_E_V 0:0 + +#define NVB197_SET_FALCON00 0x2300 +#define NVB197_SET_FALCON00_V 31:0 + +#define NVB197_SET_FALCON01 0x2304 +#define NVB197_SET_FALCON01_V 31:0 + +#define NVB197_SET_FALCON02 0x2308 +#define NVB197_SET_FALCON02_V 31:0 + +#define NVB197_SET_FALCON03 0x230c +#define NVB197_SET_FALCON03_V 31:0 + +#define NVB197_SET_FALCON04 0x2310 +#define NVB197_SET_FALCON04_V 31:0 + +#define NVB197_SET_FALCON05 0x2314 +#define NVB197_SET_FALCON05_V 31:0 + +#define NVB197_SET_FALCON06 0x2318 +#define NVB197_SET_FALCON06_V 31:0 + +#define NVB197_SET_FALCON07 0x231c +#define NVB197_SET_FALCON07_V 31:0 + +#define NVB197_SET_FALCON08 0x2320 +#define NVB197_SET_FALCON08_V 31:0 + +#define NVB197_SET_FALCON09 0x2324 +#define NVB197_SET_FALCON09_V 31:0 + +#define NVB197_SET_FALCON10 0x2328 +#define NVB197_SET_FALCON10_V 31:0 + +#define NVB197_SET_FALCON11 0x232c +#define NVB197_SET_FALCON11_V 31:0 + +#define NVB197_SET_FALCON12 0x2330 +#define NVB197_SET_FALCON12_V 31:0 + +#define NVB197_SET_FALCON13 0x2334 +#define NVB197_SET_FALCON13_V 31:0 + +#define NVB197_SET_FALCON14 0x2338 +#define NVB197_SET_FALCON14_V 31:0 + +#define NVB197_SET_FALCON15 0x233c +#define NVB197_SET_FALCON15_V 31:0 + +#define NVB197_SET_FALCON16 0x2340 +#define NVB197_SET_FALCON16_V 31:0 + +#define NVB197_SET_FALCON17 0x2344 +#define NVB197_SET_FALCON17_V 31:0 + +#define NVB197_SET_FALCON18 0x2348 +#define NVB197_SET_FALCON18_V 31:0 + +#define NVB197_SET_FALCON19 0x234c +#define NVB197_SET_FALCON19_V 31:0 + +#define NVB197_SET_FALCON20 0x2350 +#define NVB197_SET_FALCON20_V 31:0 + +#define NVB197_SET_FALCON21 0x2354 +#define NVB197_SET_FALCON21_V 31:0 + +#define NVB197_SET_FALCON22 0x2358 +#define NVB197_SET_FALCON22_V 31:0 + +#define NVB197_SET_FALCON23 0x235c +#define NVB197_SET_FALCON23_V 31:0 + +#define NVB197_SET_FALCON24 0x2360 +#define NVB197_SET_FALCON24_V 31:0 + +#define NVB197_SET_FALCON25 0x2364 +#define NVB197_SET_FALCON25_V 31:0 + +#define NVB197_SET_FALCON26 0x2368 +#define NVB197_SET_FALCON26_V 31:0 + +#define NVB197_SET_FALCON27 0x236c +#define NVB197_SET_FALCON27_V 31:0 + +#define NVB197_SET_FALCON28 0x2370 +#define NVB197_SET_FALCON28_V 31:0 + +#define NVB197_SET_FALCON29 0x2374 +#define NVB197_SET_FALCON29_V 31:0 + +#define NVB197_SET_FALCON30 0x2378 +#define NVB197_SET_FALCON30_V 31:0 + +#define NVB197_SET_FALCON31 0x237c +#define NVB197_SET_FALCON31_V 31:0 + +#define NVB197_SET_CONSTANT_BUFFER_SELECTOR_A 0x2380 +#define NVB197_SET_CONSTANT_BUFFER_SELECTOR_A_SIZE 16:0 + +#define NVB197_SET_CONSTANT_BUFFER_SELECTOR_B 0x2384 +#define NVB197_SET_CONSTANT_BUFFER_SELECTOR_B_ADDRESS_UPPER 7:0 + +#define NVB197_SET_CONSTANT_BUFFER_SELECTOR_C 0x2388 +#define NVB197_SET_CONSTANT_BUFFER_SELECTOR_C_ADDRESS_LOWER 31:0 + +#define NVB197_LOAD_CONSTANT_BUFFER_OFFSET 0x238c +#define NVB197_LOAD_CONSTANT_BUFFER_OFFSET_V 15:0 + +#define NVB197_LOAD_CONSTANT_BUFFER(i) (0x2390+(i)*4) +#define NVB197_LOAD_CONSTANT_BUFFER_V 31:0 + +#define NVB197_BIND_GROUP_RESERVED_A(j) (0x2400+(j)*32) +#define NVB197_BIND_GROUP_RESERVED_A_V 0:0 + +#define NVB197_BIND_GROUP_RESERVED_B(j) (0x2404+(j)*32) +#define NVB197_BIND_GROUP_RESERVED_B_V 0:0 + +#define NVB197_BIND_GROUP_RESERVED_C(j) (0x2408+(j)*32) +#define NVB197_BIND_GROUP_RESERVED_C_V 0:0 + +#define NVB197_BIND_GROUP_RESERVED_D(j) (0x240c+(j)*32) +#define NVB197_BIND_GROUP_RESERVED_D_V 0:0 + +#define NVB197_BIND_GROUP_CONSTANT_BUFFER(j) (0x2410+(j)*32) +#define NVB197_BIND_GROUP_CONSTANT_BUFFER_VALID 0:0 +#define NVB197_BIND_GROUP_CONSTANT_BUFFER_VALID_FALSE 0x00000000 +#define NVB197_BIND_GROUP_CONSTANT_BUFFER_VALID_TRUE 0x00000001 +#define NVB197_BIND_GROUP_CONSTANT_BUFFER_SHADER_SLOT 8:4 + +#define NVB197_SET_COLOR_CLAMP 0x2600 +#define NVB197_SET_COLOR_CLAMP_ENABLE 0:0 +#define NVB197_SET_COLOR_CLAMP_ENABLE_FALSE 0x00000000 +#define NVB197_SET_COLOR_CLAMP_ENABLE_TRUE 0x00000001 + +#define NVB197_SET_BINDLESS_TEXTURE 0x2608 +#define NVB197_SET_BINDLESS_TEXTURE_CONSTANT_BUFFER_SLOT_SELECT 4:0 + +#define NVB197_SET_TRAP_HANDLER 0x260c +#define NVB197_SET_TRAP_HANDLER_OFFSET 31:0 + +#define NVB197_SET_STREAM_OUT_LAYOUT_SELECT(i,j) (0x2800+(i)*128+(j)*4) +#define NVB197_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER00 7:0 +#define NVB197_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER01 15:8 +#define NVB197_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER02 23:16 +#define NVB197_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER03 31:24 + +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER(i) (0x333c+(i)*4) +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER_V 31:0 + +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_VALUE(i) (0x335c+(i)*4) +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_VALUE_V 31:0 + +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_EVENT(i) (0x337c+(i)*4) +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_EVENT_EVENT 7:0 + +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A(i) (0x339c+(i)*4) +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT0 1:0 +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT0 4:2 +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT1 6:5 +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT1 9:7 +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT2 11:10 +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT2 14:12 +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT3 16:15 +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT3 19:17 +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT4 21:20 +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT4 24:22 +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT5 26:25 +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT5 29:27 +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_SPARE 31:30 + +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B(i) (0x33bc+(i)*4) +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_EDGE 0:0 +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_MODE 2:1 +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_WINDOWED 3:3 +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_FUNC 19:4 + +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL 0x33dc +#define NVB197_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL_MASK 7:0 + +#define NVB197_START_SHADER_PERFORMANCE_COUNTER 0x33e0 +#define NVB197_START_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVB197_STOP_SHADER_PERFORMANCE_COUNTER 0x33e4 +#define NVB197_STOP_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVB197_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NVB197_SET_MME_SHADOW_SCRATCH_V 31:0 + +#define NVB197_CALL_MME_MACRO(j) (0x3800+(j)*8) +#define NVB197_CALL_MME_MACRO_V 31:0 + +#define NVB197_CALL_MME_DATA(j) (0x3804+(j)*8) +#define NVB197_CALL_MME_DATA_V 31:0 + +#endif /* _cl_maxwell_b_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc06f.h b/src/common/sdk/nvidia/inc/class/clc06f.h new file mode 100644 index 0000000..d8b8607 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc06f.h @@ -0,0 +1,312 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc06f_h_ +#define _clc06f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* class PASCAL_CHANNEL_GPFIFO */ +/* + * Documentation for PASCAL_CHANNEL_GPFIFO can be found in dev_pbdma.ref, + * chapter "User Control Registers". It is documented as device NV_UDMA. + * The GPFIFO format itself is also documented in dev_pbdma.ref, + * NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref, + * chapter "FIFO DMA RAM", NV_FIFO_DMA_*. + * + * Note there is no .mfs file for this class. + */ +#define PASCAL_CHANNEL_GPFIFO_A (0x0000C06F) + +#define NVC06F_TYPEDEF PASCAL_CHANNELChannelGPFifoA + +/* dma flow control data structure */ +typedef volatile struct Nvc06fControl_struct { + NvU32 Ignored00[0x010]; /* 0000-003f*/ + NvU32 Put; /* put offset, read/write 0040-0043*/ + NvU32 Get; /* get offset, read only 0044-0047*/ + NvU32 Reference; /* reference value, read only 0048-004b*/ + NvU32 PutHi; /* high order put offset bits 004c-004f*/ + NvU32 Ignored01[0x002]; /* 0050-0057*/ + NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/ + NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/ + NvU32 GetHi; /* high order get offset bits 0060-0063*/ + NvU32 Ignored02[0x007]; /* 0064-007f*/ + NvU32 Ignored03; /* used to be engine yield 0080-0083*/ + NvU32 Ignored04[0x001]; /* 0084-0087*/ + NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/ + NvU32 GPPut; /* GP FIFO put offset 008c-008f*/ + NvU32 Ignored05[0x5c]; +} Nvc06fControl, PascalAControlGPFifo; + +/* fields and values */ +#define NVC06F_NUMBER_OF_SUBCHANNELS (8) +#define NVC06F_SET_OBJECT (0x00000000) +#define NVC06F_SET_OBJECT_NVCLASS 15:0 +#define NVC06F_SET_OBJECT_ENGINE 20:16 +#define NVC06F_SET_OBJECT_ENGINE_SW 0x0000001f +#define NVC06F_ILLEGAL (0x00000004) +#define NVC06F_ILLEGAL_HANDLE 31:0 +#define NVC06F_NOP (0x00000008) +#define NVC06F_NOP_HANDLE 31:0 +#define NVC06F_SEMAPHOREA (0x00000010) +#define NVC06F_SEMAPHOREA_OFFSET_UPPER 7:0 +#define NVC06F_SEMAPHOREB (0x00000014) +#define NVC06F_SEMAPHOREB_OFFSET_LOWER 31:2 +#define NVC06F_SEMAPHOREC (0x00000018) +#define NVC06F_SEMAPHOREC_PAYLOAD 31:0 +#define NVC06F_SEMAPHORED (0x0000001C) +#define NVC06F_SEMAPHORED_OPERATION 4:0 +#define NVC06F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001 +#define NVC06F_SEMAPHORED_OPERATION_RELEASE 0x00000002 +#define NVC06F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004 +#define NVC06F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008 +#define NVC06F_SEMAPHORED_OPERATION_REDUCTION 0x00000010 +#define NVC06F_SEMAPHORED_ACQUIRE_SWITCH 12:12 +#define NVC06F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000 +#define NVC06F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001 +#define NVC06F_SEMAPHORED_RELEASE_WFI 20:20 +#define NVC06F_SEMAPHORED_RELEASE_WFI_EN 0x00000000 +#define NVC06F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001 +#define NVC06F_SEMAPHORED_RELEASE_SIZE 24:24 +#define NVC06F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000 +#define NVC06F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001 +#define NVC06F_SEMAPHORED_REDUCTION 30:27 +#define NVC06F_SEMAPHORED_REDUCTION_MIN 0x00000000 +#define NVC06F_SEMAPHORED_REDUCTION_MAX 0x00000001 +#define NVC06F_SEMAPHORED_REDUCTION_XOR 0x00000002 +#define NVC06F_SEMAPHORED_REDUCTION_AND 0x00000003 +#define NVC06F_SEMAPHORED_REDUCTION_OR 0x00000004 +#define NVC06F_SEMAPHORED_REDUCTION_ADD 0x00000005 +#define NVC06F_SEMAPHORED_REDUCTION_INC 0x00000006 +#define NVC06F_SEMAPHORED_REDUCTION_DEC 0x00000007 +#define NVC06F_SEMAPHORED_FORMAT 31:31 +#define NVC06F_SEMAPHORED_FORMAT_SIGNED 0x00000000 +#define NVC06F_SEMAPHORED_FORMAT_UNSIGNED 0x00000001 +#define NVC06F_NON_STALL_INTERRUPT (0x00000020) +#define NVC06F_NON_STALL_INTERRUPT_HANDLE 31:0 +#define NVC06F_FB_FLUSH (0x00000024) // Deprecated - use MEMBAR TYPE SYS_MEMBAR +#define NVC06F_FB_FLUSH_HANDLE 31:0 +// NOTE - MEM_OP_A and MEM_OP_B have been replaced in gp100 with methods for +// specifying the page address for a targeted TLB invalidate and the uTLB for +// a targeted REPLAY_CANCEL for UVM. +// The previous MEM_OP_A/B functionality is in MEM_OP_C/D, with slightly +// rearranged fields. +#define NVC06F_MEM_OP_A (0x00000028) +#define NVC06F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_CLIENT_UNIT_ID 5:0 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC06F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_GPC_ID 10:6 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC06F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR 11:11 +#define NVC06F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_EN 0x00000001 +#define NVC06F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_DIS 0x00000000 +#define NVC06F_MEM_OP_A_TLB_INVALIDATE_TARGET_ADDR_LO 31:12 +#define NVC06F_MEM_OP_B (0x0000002c) +#define NVC06F_MEM_OP_B_TLB_INVALIDATE_TARGET_ADDR_HI 31:0 +#define NVC06F_MEM_OP_C (0x00000030) +#define NVC06F_MEM_OP_C_MEMBAR_TYPE 2:0 +#define NVC06F_MEM_OP_C_MEMBAR_TYPE_SYS_MEMBAR 0x00000000 +#define NVC06F_MEM_OP_C_MEMBAR_TYPE_MEMBAR 0x00000001 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB 0:0 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB_ONE 0x00000000 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB_ALL 0x00000001 // Probably nonsensical for MMU_TLB_INVALIDATE_TARGETED +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_GPC 1:1 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_GPC_ENABLE 0x00000000 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_GPC_DISABLE 0x00000001 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_REPLAY 4:2 // only relevant if GPC ENABLE +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE 0x00000000 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START 0x00000001 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START_ACK_ALL 0x00000002 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_TARGETED 0x00000003 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_GLOBAL 0x00000004 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE 6:5 // only relevant if GPC ENABLE +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_NONE 0x00000000 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_GLOBALLY 0x00000001 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_INTRANODE 0x00000002 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL 9:7 // Invalidate affects this level and all below +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_ALL 0x00000000 // Invalidate tlb caches at all levels of the page table +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_PTE_ONLY 0x00000001 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE0 0x00000002 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE1 0x00000003 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE2 0x00000004 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3 0x00000005 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE4 0x00000006 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE5 0x00000007 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE 11:10 // only relevant if PDB_ONE +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_VID_MEM 0x00000000 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB_ADDR_LO 31:12 // only relevant if PDB_ONE +// MEM_OP_D MUST be preceded by MEM_OPs A-C. +#define NVC06F_MEM_OP_D (0x00000034) +#define NVC06F_MEM_OP_D_TLB_INVALIDATE_PDB_ADDR_HI 26:0 // only relevant if PDB_ONE +#define NVC06F_MEM_OP_D_OPERATION 31:27 +#define NVC06F_MEM_OP_D_OPERATION_MEMBAR 0x00000005 +#define NVC06F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE 0x00000009 +#define NVC06F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE_TARGETED 0x0000000a +#define NVC06F_MEM_OP_D_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d +#define NVC06F_MEM_OP_D_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e +// CLEAN_LINES is an alias for Tegra/GPU IP usage +#define NVC06F_MEM_OP_D_OPERATION_L2_INVALIDATE_CLEAN_LINES 0x0000000e +// This B alias is confusing but it was missed as part of the update. Left here +// for compatibility. +#define NVC06F_MEM_OP_B_OPERATION_L2_INVALIDATE_CLEAN_LINES 0x0000000e +#define NVC06F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f +#define NVC06F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY 0x00000010 +#define NVC06F_MEM_OP_D_OPERATION_L2_WAIT_FOR_SYS_PENDING_READS 0x00000015 +#define NVC06F_SET_REFERENCE (0x00000050) +#define NVC06F_SET_REFERENCE_COUNT 31:0 +// Syncpoint methods are only available on Tegra parts. Attempting to use +// them on discrete GPUs will result in Host raising NV_PPBDMA_INTR_0_METHOD. +#define NVC06F_SYNCPOINTA (0x00000070) +#define NVC06F_SYNCPOINTA_PAYLOAD 31:0 +#define NVC06F_SYNCPOINTB (0x00000074) +#define NVC06F_SYNCPOINTB_OPERATION 0:0 +#define NVC06F_SYNCPOINTB_OPERATION_WAIT 0x00000000 +#define NVC06F_SYNCPOINTB_OPERATION_INCR 0x00000001 +#define NVC06F_SYNCPOINTB_WAIT_SWITCH 4:4 +#define NVC06F_SYNCPOINTB_WAIT_SWITCH_DIS 0x00000000 +#define NVC06F_SYNCPOINTB_WAIT_SWITCH_EN 0x00000001 +#define NVC06F_SYNCPOINTB_SYNCPT_INDEX 19:8 +#define NVC06F_WFI (0x00000078) +#define NVC06F_WFI_SCOPE 0:0 +#define NVC06F_WFI_SCOPE_CURRENT_SCG_TYPE 0x00000000 +#define NVC06F_WFI_SCOPE_ALL 0x00000001 +#define NVC06F_CRC_CHECK (0x0000007c) +#define NVC06F_CRC_CHECK_VALUE 31:0 +#define NVC06F_YIELD (0x00000080) +#define NVC06F_YIELD_OP 1:0 +#define NVC06F_YIELD_OP_NOP 0x00000000 +#define NVC06F_YIELD_OP_PBDMA_TIMESLICE 0x00000001 +#define NVC06F_YIELD_OP_RUNLIST_TIMESLICE 0x00000002 +#define NVC06F_YIELD_OP_TSG 0x00000003 + + +/* GPFIFO entry format */ +#define NVC06F_GP_ENTRY__SIZE 8 +#define NVC06F_GP_ENTRY0_FETCH 0:0 +#define NVC06F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000 +#define NVC06F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001 +#define NVC06F_GP_ENTRY0_GET 31:2 +#define NVC06F_GP_ENTRY0_OPERAND 31:0 +#define NVC06F_GP_ENTRY1_GET_HI 7:0 +#define NVC06F_GP_ENTRY1_PRIV 8:8 +#define NVC06F_GP_ENTRY1_PRIV_USER 0x00000000 +#define NVC06F_GP_ENTRY1_PRIV_KERNEL 0x00000001 +#define NVC06F_GP_ENTRY1_LEVEL 9:9 +#define NVC06F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NVC06F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NVC06F_GP_ENTRY1_LENGTH 30:10 +#define NVC06F_GP_ENTRY1_SYNC 31:31 +#define NVC06F_GP_ENTRY1_SYNC_PROCEED 0x00000000 +#define NVC06F_GP_ENTRY1_SYNC_WAIT 0x00000001 +#define NVC06F_GP_ENTRY1_OPCODE 7:0 +#define NVC06F_GP_ENTRY1_OPCODE_NOP 0x00000000 +#define NVC06F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001 +#define NVC06F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002 +#define NVC06F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003 + +/* dma method formats */ +#define NVC06F_DMA_METHOD_ADDRESS_OLD 12:2 +#define NVC06F_DMA_METHOD_ADDRESS 11:0 +#define NVC06F_DMA_SUBDEVICE_MASK 15:4 +#define NVC06F_DMA_METHOD_SUBCHANNEL 15:13 +#define NVC06F_DMA_TERT_OP 17:16 +#define NVC06F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000) +#define NVC06F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001) +#define NVC06F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002) +#define NVC06F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003) +#define NVC06F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000) +#define NVC06F_DMA_METHOD_COUNT_OLD 28:18 +#define NVC06F_DMA_METHOD_COUNT 28:16 +#define NVC06F_DMA_IMMD_DATA 28:16 +#define NVC06F_DMA_SEC_OP 31:29 +#define NVC06F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000) +#define NVC06F_DMA_SEC_OP_INC_METHOD (0x00000001) +#define NVC06F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002) +#define NVC06F_DMA_SEC_OP_NON_INC_METHOD (0x00000003) +#define NVC06F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004) +#define NVC06F_DMA_SEC_OP_ONE_INC (0x00000005) +#define NVC06F_DMA_SEC_OP_RESERVED6 (0x00000006) +#define NVC06F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007) +/* dma incrementing method format */ +#define NVC06F_DMA_INCR_ADDRESS 11:0 +#define NVC06F_DMA_INCR_SUBCHANNEL 15:13 +#define NVC06F_DMA_INCR_COUNT 28:16 +#define NVC06F_DMA_INCR_OPCODE 31:29 +#define NVC06F_DMA_INCR_OPCODE_VALUE (0x00000001) +#define NVC06F_DMA_INCR_DATA 31:0 +/* dma non-incrementing method format */ +#define NVC06F_DMA_NONINCR_ADDRESS 11:0 +#define NVC06F_DMA_NONINCR_SUBCHANNEL 15:13 +#define NVC06F_DMA_NONINCR_COUNT 28:16 +#define NVC06F_DMA_NONINCR_OPCODE 31:29 +#define NVC06F_DMA_NONINCR_OPCODE_VALUE (0x00000003) +#define NVC06F_DMA_NONINCR_DATA 31:0 +/* dma increment-once method format */ +#define NVC06F_DMA_ONEINCR_ADDRESS 11:0 +#define NVC06F_DMA_ONEINCR_SUBCHANNEL 15:13 +#define NVC06F_DMA_ONEINCR_COUNT 28:16 +#define NVC06F_DMA_ONEINCR_OPCODE 31:29 +#define NVC06F_DMA_ONEINCR_OPCODE_VALUE (0x00000005) +#define NVC06F_DMA_ONEINCR_DATA 31:0 +/* dma no-operation format */ +#define NVC06F_DMA_NOP (0x00000000) +/* dma immediate-data format */ +#define NVC06F_DMA_IMMD_ADDRESS 11:0 +#define NVC06F_DMA_IMMD_SUBCHANNEL 15:13 +#define NVC06F_DMA_IMMD_DATA 28:16 +#define NVC06F_DMA_IMMD_OPCODE 31:29 +#define NVC06F_DMA_IMMD_OPCODE_VALUE (0x00000004) +/* dma set sub-device mask format */ +#define NVC06F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4 +#define NVC06F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC06F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001) +/* dma store sub-device mask format */ +#define NVC06F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4 +#define NVC06F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC06F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002) +/* dma use sub-device mask format */ +#define NVC06F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC06F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003) +/* dma end-segment format */ +#define NVC06F_DMA_ENDSEG_OPCODE 31:29 +#define NVC06F_DMA_ENDSEG_OPCODE_VALUE (0x00000007) +/* dma legacy incrementing/non-incrementing formats */ +#define NVC06F_DMA_ADDRESS 12:2 +#define NVC06F_DMA_SUBCH 15:13 +#define NVC06F_DMA_OPCODE3 17:16 +#define NVC06F_DMA_OPCODE3_NONE (0x00000000) +#define NVC06F_DMA_COUNT 28:18 +#define NVC06F_DMA_OPCODE 31:29 +#define NVC06F_DMA_OPCODE_METHOD (0x00000000) +#define NVC06F_DMA_OPCODE_NONINC_METHOD (0x00000002) +#define NVC06F_DMA_DATA 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc06f_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc097.h b/src/common/sdk/nvidia/inc/class/clc097.h new file mode 100644 index 0000000..129056b --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc097.h @@ -0,0 +1,4191 @@ +/******************************************************************************* + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef _cl_pascal_a_h_ +#define _cl_pascal_a_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../../../class/bin/sw_header.pl pascal_a */ + +#include "nvtypes.h" + +#define PASCAL_A 0xC097 + +#define NVC097_SET_OBJECT 0x0000 +#define NVC097_SET_OBJECT_CLASS_ID 15:0 +#define NVC097_SET_OBJECT_ENGINE_ID 20:16 + +#define NVC097_NO_OPERATION 0x0100 +#define NVC097_NO_OPERATION_V 31:0 + +#define NVC097_SET_NOTIFY_A 0x0104 +#define NVC097_SET_NOTIFY_A_ADDRESS_UPPER 7:0 + +#define NVC097_SET_NOTIFY_B 0x0108 +#define NVC097_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NVC097_NOTIFY 0x010c +#define NVC097_NOTIFY_TYPE 31:0 +#define NVC097_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NVC097_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NVC097_WAIT_FOR_IDLE 0x0110 +#define NVC097_WAIT_FOR_IDLE_V 31:0 + +#define NVC097_LOAD_MME_INSTRUCTION_RAM_POINTER 0x0114 +#define NVC097_LOAD_MME_INSTRUCTION_RAM_POINTER_V 31:0 + +#define NVC097_LOAD_MME_INSTRUCTION_RAM 0x0118 +#define NVC097_LOAD_MME_INSTRUCTION_RAM_V 31:0 + +#define NVC097_LOAD_MME_START_ADDRESS_RAM_POINTER 0x011c +#define NVC097_LOAD_MME_START_ADDRESS_RAM_POINTER_V 31:0 + +#define NVC097_LOAD_MME_START_ADDRESS_RAM 0x0120 +#define NVC097_LOAD_MME_START_ADDRESS_RAM_V 31:0 + +#define NVC097_SET_MME_SHADOW_RAM_CONTROL 0x0124 +#define NVC097_SET_MME_SHADOW_RAM_CONTROL_MODE 1:0 +#define NVC097_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK 0x00000000 +#define NVC097_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK_WITH_FILTER 0x00000001 +#define NVC097_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_PASSTHROUGH 0x00000002 +#define NVC097_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_REPLAY 0x00000003 + +#define NVC097_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER 0x0128 +#define NVC097_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER_V 7:0 + +#define NVC097_PEER_SEMAPHORE_RELEASE_OFFSET 0x012c +#define NVC097_PEER_SEMAPHORE_RELEASE_OFFSET_V 31:0 + +#define NVC097_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NVC097_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC097_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NVC097_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC097_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NVC097_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NVC097_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC097_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC097_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC097_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC097_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC097_SEND_GO_IDLE 0x013c +#define NVC097_SEND_GO_IDLE_V 31:0 + +#define NVC097_PM_TRIGGER 0x0140 +#define NVC097_PM_TRIGGER_V 31:0 + +#define NVC097_PM_TRIGGER_WFI 0x0144 +#define NVC097_PM_TRIGGER_WFI_V 31:0 + +#define NVC097_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NVC097_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NVC097_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NVC097_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NVC097_LINE_LENGTH_IN 0x0180 +#define NVC097_LINE_LENGTH_IN_VALUE 31:0 + +#define NVC097_LINE_COUNT 0x0184 +#define NVC097_LINE_COUNT_VALUE 31:0 + +#define NVC097_OFFSET_OUT_UPPER 0x0188 +#define NVC097_OFFSET_OUT_UPPER_VALUE 7:0 + +#define NVC097_OFFSET_OUT 0x018c +#define NVC097_OFFSET_OUT_VALUE 31:0 + +#define NVC097_PITCH_OUT 0x0190 +#define NVC097_PITCH_OUT_VALUE 31:0 + +#define NVC097_SET_DST_BLOCK_SIZE 0x0194 +#define NVC097_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVC097_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC097_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVC097_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC097_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC097_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC097_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC097_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC097_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC097_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVC097_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NVC097_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NVC097_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NVC097_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NVC097_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVC097_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NVC097_SET_DST_WIDTH 0x0198 +#define NVC097_SET_DST_WIDTH_V 31:0 + +#define NVC097_SET_DST_HEIGHT 0x019c +#define NVC097_SET_DST_HEIGHT_V 31:0 + +#define NVC097_SET_DST_DEPTH 0x01a0 +#define NVC097_SET_DST_DEPTH_V 31:0 + +#define NVC097_SET_DST_LAYER 0x01a4 +#define NVC097_SET_DST_LAYER_V 31:0 + +#define NVC097_SET_DST_ORIGIN_BYTES_X 0x01a8 +#define NVC097_SET_DST_ORIGIN_BYTES_X_V 20:0 + +#define NVC097_SET_DST_ORIGIN_SAMPLES_Y 0x01ac +#define NVC097_SET_DST_ORIGIN_SAMPLES_Y_V 16:0 + +#define NVC097_LAUNCH_DMA 0x01b0 +#define NVC097_LAUNCH_DMA_DST_MEMORY_LAYOUT 0:0 +#define NVC097_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVC097_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVC097_LAUNCH_DMA_COMPLETION_TYPE 5:4 +#define NVC097_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_DISABLE 0x00000000 +#define NVC097_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_ONLY 0x00000001 +#define NVC097_LAUNCH_DMA_COMPLETION_TYPE_RELEASE_SEMAPHORE 0x00000002 +#define NVC097_LAUNCH_DMA_INTERRUPT_TYPE 9:8 +#define NVC097_LAUNCH_DMA_INTERRUPT_TYPE_NONE 0x00000000 +#define NVC097_LAUNCH_DMA_INTERRUPT_TYPE_INTERRUPT 0x00000001 +#define NVC097_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE 12:12 +#define NVC097_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_FOUR_WORDS 0x00000000 +#define NVC097_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_ONE_WORD 0x00000001 +#define NVC097_LAUNCH_DMA_REDUCTION_ENABLE 1:1 +#define NVC097_LAUNCH_DMA_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC097_LAUNCH_DMA_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC097_LAUNCH_DMA_REDUCTION_OP 15:13 +#define NVC097_LAUNCH_DMA_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC097_LAUNCH_DMA_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC097_LAUNCH_DMA_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC097_LAUNCH_DMA_REDUCTION_OP_RED_INC 0x00000003 +#define NVC097_LAUNCH_DMA_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC097_LAUNCH_DMA_REDUCTION_OP_RED_AND 0x00000005 +#define NVC097_LAUNCH_DMA_REDUCTION_OP_RED_OR 0x00000006 +#define NVC097_LAUNCH_DMA_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC097_LAUNCH_DMA_REDUCTION_FORMAT 3:2 +#define NVC097_LAUNCH_DMA_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC097_LAUNCH_DMA_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVC097_LAUNCH_DMA_SYSMEMBAR_DISABLE 6:6 +#define NVC097_LAUNCH_DMA_SYSMEMBAR_DISABLE_FALSE 0x00000000 +#define NVC097_LAUNCH_DMA_SYSMEMBAR_DISABLE_TRUE 0x00000001 + +#define NVC097_LOAD_INLINE_DATA 0x01b4 +#define NVC097_LOAD_INLINE_DATA_V 31:0 + +#define NVC097_SET_I2M_SEMAPHORE_A 0x01dc +#define NVC097_SET_I2M_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC097_SET_I2M_SEMAPHORE_B 0x01e0 +#define NVC097_SET_I2M_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC097_SET_I2M_SEMAPHORE_C 0x01e4 +#define NVC097_SET_I2M_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC097_SET_I2M_SPARE_NOOP00 0x01f0 +#define NVC097_SET_I2M_SPARE_NOOP00_V 31:0 + +#define NVC097_SET_I2M_SPARE_NOOP01 0x01f4 +#define NVC097_SET_I2M_SPARE_NOOP01_V 31:0 + +#define NVC097_SET_I2M_SPARE_NOOP02 0x01f8 +#define NVC097_SET_I2M_SPARE_NOOP02_V 31:0 + +#define NVC097_SET_I2M_SPARE_NOOP03 0x01fc +#define NVC097_SET_I2M_SPARE_NOOP03_V 31:0 + +#define NVC097_RUN_DS_NOW 0x0200 +#define NVC097_RUN_DS_NOW_V 31:0 + +#define NVC097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS 0x0204 +#define NVC097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD 4:0 +#define NVC097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_INSTANTANEOUS 0x00000000 +#define NVC097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16 0x00000001 +#define NVC097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32 0x00000002 +#define NVC097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__64 0x00000003 +#define NVC097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__128 0x00000004 +#define NVC097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__256 0x00000005 +#define NVC097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__512 0x00000006 +#define NVC097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1024 0x00000007 +#define NVC097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2048 0x00000008 +#define NVC097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4096 0x00000009 +#define NVC097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__8192 0x0000000A +#define NVC097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16384 0x0000000B +#define NVC097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32768 0x0000000C +#define NVC097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__65536 0x0000000D +#define NVC097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__131072 0x0000000E +#define NVC097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__262144 0x0000000F +#define NVC097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__524288 0x00000010 +#define NVC097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1048576 0x00000011 +#define NVC097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2097152 0x00000012 +#define NVC097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4194304 0x00000013 +#define NVC097_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_LATEZ_ALWAYS 0x0000001F + +#define NVC097_SET_ALIASED_LINE_WIDTH_ENABLE 0x020c +#define NVC097_SET_ALIASED_LINE_WIDTH_ENABLE_V 0:0 +#define NVC097_SET_ALIASED_LINE_WIDTH_ENABLE_V_FALSE 0x00000000 +#define NVC097_SET_ALIASED_LINE_WIDTH_ENABLE_V_TRUE 0x00000001 + +#define NVC097_SET_API_MANDATED_EARLY_Z 0x0210 +#define NVC097_SET_API_MANDATED_EARLY_Z_ENABLE 0:0 +#define NVC097_SET_API_MANDATED_EARLY_Z_ENABLE_FALSE 0x00000000 +#define NVC097_SET_API_MANDATED_EARLY_Z_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_GS_DM_FIFO 0x0214 +#define NVC097_SET_GS_DM_FIFO_SIZE_RASTER_ON 12:0 +#define NVC097_SET_GS_DM_FIFO_SIZE_RASTER_OFF 28:16 +#define NVC097_SET_GS_DM_FIFO_SPILL_ENABLED 31:31 +#define NVC097_SET_GS_DM_FIFO_SPILL_ENABLED_FALSE 0x00000000 +#define NVC097_SET_GS_DM_FIFO_SPILL_ENABLED_TRUE 0x00000001 + +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS 0x0218 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY 5:4 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC097_INVALIDATE_SHADER_CACHES 0x021c +#define NVC097_INVALIDATE_SHADER_CACHES_INSTRUCTION 0:0 +#define NVC097_INVALIDATE_SHADER_CACHES_INSTRUCTION_FALSE 0x00000000 +#define NVC097_INVALIDATE_SHADER_CACHES_INSTRUCTION_TRUE 0x00000001 +#define NVC097_INVALIDATE_SHADER_CACHES_DATA 4:4 +#define NVC097_INVALIDATE_SHADER_CACHES_DATA_FALSE 0x00000000 +#define NVC097_INVALIDATE_SHADER_CACHES_DATA_TRUE 0x00000001 +#define NVC097_INVALIDATE_SHADER_CACHES_CONSTANT 12:12 +#define NVC097_INVALIDATE_SHADER_CACHES_CONSTANT_FALSE 0x00000000 +#define NVC097_INVALIDATE_SHADER_CACHES_CONSTANT_TRUE 0x00000001 +#define NVC097_INVALIDATE_SHADER_CACHES_LOCKS 1:1 +#define NVC097_INVALIDATE_SHADER_CACHES_LOCKS_FALSE 0x00000000 +#define NVC097_INVALIDATE_SHADER_CACHES_LOCKS_TRUE 0x00000001 +#define NVC097_INVALIDATE_SHADER_CACHES_FLUSH_DATA 2:2 +#define NVC097_INVALIDATE_SHADER_CACHES_FLUSH_DATA_FALSE 0x00000000 +#define NVC097_INVALIDATE_SHADER_CACHES_FLUSH_DATA_TRUE 0x00000001 + +#define NVC097_INCREMENT_SYNC_POINT 0x02c8 +#define NVC097_INCREMENT_SYNC_POINT_INDEX 11:0 +#define NVC097_INCREMENT_SYNC_POINT_CLEAN_L2 16:16 +#define NVC097_INCREMENT_SYNC_POINT_CLEAN_L2_FALSE 0x00000000 +#define NVC097_INCREMENT_SYNC_POINT_CLEAN_L2_TRUE 0x00000001 +#define NVC097_INCREMENT_SYNC_POINT_CONDITION 20:20 +#define NVC097_INCREMENT_SYNC_POINT_CONDITION_STREAM_OUT_WRITES_DONE 0x00000000 +#define NVC097_INCREMENT_SYNC_POINT_CONDITION_ROP_WRITES_DONE 0x00000001 + +#define NVC097_SET_PRIM_CIRCULAR_BUFFER_THROTTLE 0x02d0 +#define NVC097_SET_PRIM_CIRCULAR_BUFFER_THROTTLE_PRIM_AREA 21:0 + +#define NVC097_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE 0x02d4 +#define NVC097_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE_V 0:0 + +#define NVC097_SET_SURFACE_CLIP_ID_BLOCK_SIZE 0x02d8 +#define NVC097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH 3:0 +#define NVC097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT 7:4 +#define NVC097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH 11:8 +#define NVC097_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NVC097_SET_ALPHA_CIRCULAR_BUFFER_SIZE 0x02dc +#define NVC097_SET_ALPHA_CIRCULAR_BUFFER_SIZE_CACHE_LINES_PER_SM 21:0 + +#define NVC097_DECOMPRESS_SURFACE 0x02e0 +#define NVC097_DECOMPRESS_SURFACE_MRT_SELECT 2:0 +#define NVC097_DECOMPRESS_SURFACE_RT_ARRAY_INDEX 19:4 + +#define NVC097_SET_ZCULL_ROP_BYPASS 0x02e4 +#define NVC097_SET_ZCULL_ROP_BYPASS_ENABLE 0:0 +#define NVC097_SET_ZCULL_ROP_BYPASS_ENABLE_FALSE 0x00000000 +#define NVC097_SET_ZCULL_ROP_BYPASS_ENABLE_TRUE 0x00000001 +#define NVC097_SET_ZCULL_ROP_BYPASS_NO_STALL 4:4 +#define NVC097_SET_ZCULL_ROP_BYPASS_NO_STALL_FALSE 0x00000000 +#define NVC097_SET_ZCULL_ROP_BYPASS_NO_STALL_TRUE 0x00000001 +#define NVC097_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING 8:8 +#define NVC097_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING_FALSE 0x00000000 +#define NVC097_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING_TRUE 0x00000001 +#define NVC097_SET_ZCULL_ROP_BYPASS_THRESHOLD 15:12 + +#define NVC097_SET_ZCULL_SUBREGION 0x02e8 +#define NVC097_SET_ZCULL_SUBREGION_ENABLE 0:0 +#define NVC097_SET_ZCULL_SUBREGION_ENABLE_FALSE 0x00000000 +#define NVC097_SET_ZCULL_SUBREGION_ENABLE_TRUE 0x00000001 +#define NVC097_SET_ZCULL_SUBREGION_NORMALIZED_ALIQUOTS 27:4 + +#define NVC097_SET_RASTER_BOUNDING_BOX 0x02ec +#define NVC097_SET_RASTER_BOUNDING_BOX_MODE 0:0 +#define NVC097_SET_RASTER_BOUNDING_BOX_MODE_BOUNDING_BOX 0x00000000 +#define NVC097_SET_RASTER_BOUNDING_BOX_MODE_FULL_VIEWPORT 0x00000001 +#define NVC097_SET_RASTER_BOUNDING_BOX_PAD 11:4 + +#define NVC097_PEER_SEMAPHORE_RELEASE 0x02f0 +#define NVC097_PEER_SEMAPHORE_RELEASE_V 31:0 + +#define NVC097_SET_ITERATED_BLEND_OPTIMIZATION 0x02f4 +#define NVC097_SET_ITERATED_BLEND_OPTIMIZATION_NOOP 1:0 +#define NVC097_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_NEVER 0x00000000 +#define NVC097_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_RGBA_0000 0x00000001 +#define NVC097_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_ALPHA_0 0x00000002 +#define NVC097_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_RGBA_0001 0x00000003 + +#define NVC097_SET_ZCULL_SUBREGION_ALLOCATION 0x02f8 +#define NVC097_SET_ZCULL_SUBREGION_ALLOCATION_SUBREGION_ID 7:0 +#define NVC097_SET_ZCULL_SUBREGION_ALLOCATION_ALIQUOTS 23:8 +#define NVC097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT 27:24 +#define NVC097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16X2_4X4 0x00000000 +#define NVC097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X16_4X4 0x00000001 +#define NVC097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_4X2 0x00000002 +#define NVC097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_2X4 0x00000003 +#define NVC097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X8_4X4 0x00000004 +#define NVC097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_8X8_4X2 0x00000005 +#define NVC097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_8X8_2X4 0x00000006 +#define NVC097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_4X8 0x00000007 +#define NVC097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_4X8_2X2 0x00000008 +#define NVC097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X8_4X2 0x00000009 +#define NVC097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X8_2X4 0x0000000A +#define NVC097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_8X8_2X2 0x0000000B +#define NVC097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_4X8_1X1 0x0000000C +#define NVC097_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_NONE 0x0000000F + +#define NVC097_ASSIGN_ZCULL_SUBREGIONS 0x02fc +#define NVC097_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM 1:0 +#define NVC097_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM_Static 0x00000000 +#define NVC097_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM_Adaptive 0x00000001 + +#define NVC097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE 0x0300 +#define NVC097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE 0:0 +#define NVC097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_FALSE 0x00000000 +#define NVC097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_TRUE 0x00000001 +#define NVC097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE 1:1 +#define NVC097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NVC097_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 + +#define NVC097_DRAW_ZERO_INDEX 0x0304 +#define NVC097_DRAW_ZERO_INDEX_COUNT 31:0 + +#define NVC097_SET_L1_CONFIGURATION 0x0308 +#define NVC097_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY 2:0 +#define NVC097_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_16KB 0x00000001 +#define NVC097_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_48KB 0x00000003 + +#define NVC097_SET_RENDER_ENABLE_CONTROL 0x030c +#define NVC097_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER 0:0 +#define NVC097_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_FALSE 0x00000000 +#define NVC097_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_TRUE 0x00000001 + +#define NVC097_SET_SPA_VERSION 0x0310 +#define NVC097_SET_SPA_VERSION_MINOR 7:0 +#define NVC097_SET_SPA_VERSION_MAJOR 15:8 + +#define NVC097_SET_IEEE_CLEAN_UPDATE 0x0314 +#define NVC097_SET_IEEE_CLEAN_UPDATE_ENABLE 0:0 +#define NVC097_SET_IEEE_CLEAN_UPDATE_ENABLE_FALSE 0x00000000 +#define NVC097_SET_IEEE_CLEAN_UPDATE_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_SNAP_GRID_LINE 0x0318 +#define NVC097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NVC097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NVC097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NVC097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NVC097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NVC097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NVC097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NVC097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NVC097_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NVC097_SET_SNAP_GRID_LINE_ROUNDING_MODE 8:8 +#define NVC097_SET_SNAP_GRID_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NVC097_SET_SNAP_GRID_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NVC097_SET_SNAP_GRID_NON_LINE 0x031c +#define NVC097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NVC097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NVC097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NVC097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NVC097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NVC097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NVC097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NVC097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NVC097_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NVC097_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE 8:8 +#define NVC097_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NVC097_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NVC097_SET_TESSELLATION_PARAMETERS 0x0320 +#define NVC097_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE 1:0 +#define NVC097_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_ISOLINE 0x00000000 +#define NVC097_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_TRIANGLE 0x00000001 +#define NVC097_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_QUAD 0x00000002 +#define NVC097_SET_TESSELLATION_PARAMETERS_SPACING 5:4 +#define NVC097_SET_TESSELLATION_PARAMETERS_SPACING_INTEGER 0x00000000 +#define NVC097_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_ODD 0x00000001 +#define NVC097_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_EVEN 0x00000002 +#define NVC097_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES 9:8 +#define NVC097_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_POINTS 0x00000000 +#define NVC097_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_LINES 0x00000001 +#define NVC097_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CW 0x00000002 +#define NVC097_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CCW 0x00000003 + +#define NVC097_SET_TESSELLATION_LOD_U0_OR_DENSITY 0x0324 +#define NVC097_SET_TESSELLATION_LOD_U0_OR_DENSITY_V 31:0 + +#define NVC097_SET_TESSELLATION_LOD_V0_OR_DETAIL 0x0328 +#define NVC097_SET_TESSELLATION_LOD_V0_OR_DETAIL_V 31:0 + +#define NVC097_SET_TESSELLATION_LOD_U1_OR_W0 0x032c +#define NVC097_SET_TESSELLATION_LOD_U1_OR_W0_V 31:0 + +#define NVC097_SET_TESSELLATION_LOD_V1 0x0330 +#define NVC097_SET_TESSELLATION_LOD_V1_V 31:0 + +#define NVC097_SET_TG_LOD_INTERIOR_U 0x0334 +#define NVC097_SET_TG_LOD_INTERIOR_U_V 31:0 + +#define NVC097_SET_TG_LOD_INTERIOR_V 0x0338 +#define NVC097_SET_TG_LOD_INTERIOR_V_V 31:0 + +#define NVC097_RESERVED_TG07 0x033c +#define NVC097_RESERVED_TG07_V 0:0 + +#define NVC097_RESERVED_TG08 0x0340 +#define NVC097_RESERVED_TG08_V 0:0 + +#define NVC097_RESERVED_TG09 0x0344 +#define NVC097_RESERVED_TG09_V 0:0 + +#define NVC097_RESERVED_TG10 0x0348 +#define NVC097_RESERVED_TG10_V 0:0 + +#define NVC097_RESERVED_TG11 0x034c +#define NVC097_RESERVED_TG11_V 0:0 + +#define NVC097_RESERVED_TG12 0x0350 +#define NVC097_RESERVED_TG12_V 0:0 + +#define NVC097_RESERVED_TG13 0x0354 +#define NVC097_RESERVED_TG13_V 0:0 + +#define NVC097_RESERVED_TG14 0x0358 +#define NVC097_RESERVED_TG14_V 0:0 + +#define NVC097_RESERVED_TG15 0x035c +#define NVC097_RESERVED_TG15_V 0:0 + +#define NVC097_SET_SUBTILING_PERF_KNOB_A 0x0360 +#define NVC097_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_REGISTER_FILE_PER_SUBTILE 7:0 +#define NVC097_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_PIXEL_OUTPUT_BUFFER_PER_SUBTILE 15:8 +#define NVC097_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_TRIANGLE_RAM_PER_SUBTILE 23:16 +#define NVC097_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_MAX_QUADS_PER_SUBTILE 31:24 + +#define NVC097_SET_SUBTILING_PERF_KNOB_B 0x0364 +#define NVC097_SET_SUBTILING_PERF_KNOB_B_FRACTION_OF_MAX_PRIMITIVES_PER_SUBTILE 7:0 + +#define NVC097_SET_SUBTILING_PERF_KNOB_C 0x0368 +#define NVC097_SET_SUBTILING_PERF_KNOB_C_RESERVED 0:0 + +#define NVC097_SET_ZCULL_SUBREGION_TO_REPORT 0x036c +#define NVC097_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE 0:0 +#define NVC097_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE_FALSE 0x00000000 +#define NVC097_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE_TRUE 0x00000001 +#define NVC097_SET_ZCULL_SUBREGION_TO_REPORT_SUBREGION_ID 11:4 + +#define NVC097_SET_ZCULL_SUBREGION_REPORT_TYPE 0x0370 +#define NVC097_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE 0:0 +#define NVC097_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE_FALSE 0x00000000 +#define NVC097_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE_TRUE 0x00000001 +#define NVC097_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE 6:4 +#define NVC097_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST 0x00000000 +#define NVC097_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST_NO_ACCEPT 0x00000001 +#define NVC097_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST_LATE_Z 0x00000002 +#define NVC097_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_STENCIL_TEST 0x00000003 + +#define NVC097_SET_BALANCED_PRIMITIVE_WORKLOAD 0x0374 +#define NVC097_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE 0:0 +#define NVC097_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE_FALSE 0x00000000 +#define NVC097_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE_TRUE 0x00000001 +#define NVC097_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE 4:4 +#define NVC097_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE_FALSE 0x00000000 +#define NVC097_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE_TRUE 0x00000001 + +#define NVC097_SET_MAX_PATCHES_PER_BATCH 0x0378 +#define NVC097_SET_MAX_PATCHES_PER_BATCH_V 5:0 + +#define NVC097_SET_RASTER_ENABLE 0x037c +#define NVC097_SET_RASTER_ENABLE_V 0:0 +#define NVC097_SET_RASTER_ENABLE_V_FALSE 0x00000000 +#define NVC097_SET_RASTER_ENABLE_V_TRUE 0x00000001 + +#define NVC097_SET_STREAM_OUT_BUFFER_ENABLE(j) (0x0380+(j)*32) +#define NVC097_SET_STREAM_OUT_BUFFER_ENABLE_V 0:0 +#define NVC097_SET_STREAM_OUT_BUFFER_ENABLE_V_FALSE 0x00000000 +#define NVC097_SET_STREAM_OUT_BUFFER_ENABLE_V_TRUE 0x00000001 + +#define NVC097_SET_STREAM_OUT_BUFFER_ADDRESS_A(j) (0x0384+(j)*32) +#define NVC097_SET_STREAM_OUT_BUFFER_ADDRESS_A_UPPER 7:0 + +#define NVC097_SET_STREAM_OUT_BUFFER_ADDRESS_B(j) (0x0388+(j)*32) +#define NVC097_SET_STREAM_OUT_BUFFER_ADDRESS_B_LOWER 31:0 + +#define NVC097_SET_STREAM_OUT_BUFFER_SIZE(j) (0x038c+(j)*32) +#define NVC097_SET_STREAM_OUT_BUFFER_SIZE_BYTES 31:0 + +#define NVC097_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER(j) (0x0390+(j)*32) +#define NVC097_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER_START_OFFSET 31:0 + +#define NVC097_SET_STREAM_OUT_CONTROL_STREAM(j) (0x0700+(j)*16) +#define NVC097_SET_STREAM_OUT_CONTROL_STREAM_SELECT 1:0 + +#define NVC097_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT(j) (0x0704+(j)*16) +#define NVC097_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT_MAX 7:0 + +#define NVC097_SET_STREAM_OUT_CONTROL_STRIDE(j) (0x0708+(j)*16) +#define NVC097_SET_STREAM_OUT_CONTROL_STRIDE_BYTES 31:0 + +#define NVC097_SET_RASTER_INPUT 0x0740 +#define NVC097_SET_RASTER_INPUT_STREAM_SELECT 1:0 + +#define NVC097_SET_STREAM_OUTPUT 0x0744 +#define NVC097_SET_STREAM_OUTPUT_ENABLE 0:0 +#define NVC097_SET_STREAM_OUTPUT_ENABLE_FALSE 0x00000000 +#define NVC097_SET_STREAM_OUTPUT_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE 0x0748 +#define NVC097_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE 0:0 +#define NVC097_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_FALSE 0x00000000 +#define NVC097_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_ALPHA_FRACTION 0x074c +#define NVC097_SET_ALPHA_FRACTION_V 7:0 + +#define NVC097_SET_HYBRID_ANTI_ALIAS_CONTROL 0x0754 +#define NVC097_SET_HYBRID_ANTI_ALIAS_CONTROL_PASSES 3:0 +#define NVC097_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID 4:4 +#define NVC097_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_FRAGMENT 0x00000000 +#define NVC097_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_PASS 0x00000001 +#define NVC097_SET_HYBRID_ANTI_ALIAS_CONTROL_PASSES_EXTENDED 5:5 + +#define NVC097_SET_SHADER_LOCAL_MEMORY_WINDOW 0x077c +#define NVC097_SET_SHADER_LOCAL_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NVC097_SET_SHADER_LOCAL_MEMORY_A 0x0790 +#define NVC097_SET_SHADER_LOCAL_MEMORY_A_ADDRESS_UPPER 7:0 + +#define NVC097_SET_SHADER_LOCAL_MEMORY_B 0x0794 +#define NVC097_SET_SHADER_LOCAL_MEMORY_B_ADDRESS_LOWER 31:0 + +#define NVC097_SET_SHADER_LOCAL_MEMORY_C 0x0798 +#define NVC097_SET_SHADER_LOCAL_MEMORY_C_SIZE_UPPER 5:0 + +#define NVC097_SET_SHADER_LOCAL_MEMORY_D 0x079c +#define NVC097_SET_SHADER_LOCAL_MEMORY_D_SIZE_LOWER 31:0 + +#define NVC097_SET_SHADER_LOCAL_MEMORY_E 0x07a0 +#define NVC097_SET_SHADER_LOCAL_MEMORY_E_DEFAULT_SIZE_PER_WARP 25:0 + +#define NVC097_SET_COLOR_ZERO_BANDWIDTH_CLEAR 0x07a4 +#define NVC097_SET_COLOR_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVC097_SET_Z_ZERO_BANDWIDTH_CLEAR 0x07a8 +#define NVC097_SET_Z_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVC097_SET_ISBE_SAVE_RESTORE_PROGRAM 0x07ac +#define NVC097_SET_ISBE_SAVE_RESTORE_PROGRAM_OFFSET 31:0 + +#define NVC097_SET_ZCULL_REGION_SIZE_A 0x07c0 +#define NVC097_SET_ZCULL_REGION_SIZE_A_WIDTH 15:0 + +#define NVC097_SET_ZCULL_REGION_SIZE_B 0x07c4 +#define NVC097_SET_ZCULL_REGION_SIZE_B_HEIGHT 15:0 + +#define NVC097_SET_ZCULL_REGION_SIZE_C 0x07c8 +#define NVC097_SET_ZCULL_REGION_SIZE_C_DEPTH 15:0 + +#define NVC097_SET_ZCULL_REGION_PIXEL_OFFSET_C 0x07cc +#define NVC097_SET_ZCULL_REGION_PIXEL_OFFSET_C_DEPTH 15:0 + +#define NVC097_SET_CULL_BEFORE_FETCH 0x07dc +#define NVC097_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE 0:0 +#define NVC097_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_FALSE 0x00000000 +#define NVC097_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_TRUE 0x00000001 + +#define NVC097_SET_ZCULL_REGION_LOCATION 0x07e0 +#define NVC097_SET_ZCULL_REGION_LOCATION_START_ALIQUOT 15:0 +#define NVC097_SET_ZCULL_REGION_LOCATION_ALIQUOT_COUNT 31:16 + +#define NVC097_SET_ZCULL_REGION_ALIQUOTS 0x07e4 +#define NVC097_SET_ZCULL_REGION_ALIQUOTS_PER_LAYER 15:0 + +#define NVC097_SET_ZCULL_STORAGE_A 0x07e8 +#define NVC097_SET_ZCULL_STORAGE_A_ADDRESS_UPPER 7:0 + +#define NVC097_SET_ZCULL_STORAGE_B 0x07ec +#define NVC097_SET_ZCULL_STORAGE_B_ADDRESS_LOWER 31:0 + +#define NVC097_SET_ZCULL_STORAGE_C 0x07f0 +#define NVC097_SET_ZCULL_STORAGE_C_LIMIT_ADDRESS_UPPER 7:0 + +#define NVC097_SET_ZCULL_STORAGE_D 0x07f4 +#define NVC097_SET_ZCULL_STORAGE_D_LIMIT_ADDRESS_LOWER 31:0 + +#define NVC097_SET_ZT_READ_ONLY 0x07f8 +#define NVC097_SET_ZT_READ_ONLY_ENABLE_Z 0:0 +#define NVC097_SET_ZT_READ_ONLY_ENABLE_Z_FALSE 0x00000000 +#define NVC097_SET_ZT_READ_ONLY_ENABLE_Z_TRUE 0x00000001 +#define NVC097_SET_ZT_READ_ONLY_ENABLE_STENCIL 4:4 +#define NVC097_SET_ZT_READ_ONLY_ENABLE_STENCIL_FALSE 0x00000000 +#define NVC097_SET_ZT_READ_ONLY_ENABLE_STENCIL_TRUE 0x00000001 + +#define NVC097_SET_COLOR_TARGET_A(j) (0x0800+(j)*64) +#define NVC097_SET_COLOR_TARGET_A_OFFSET_UPPER 7:0 + +#define NVC097_SET_COLOR_TARGET_B(j) (0x0804+(j)*64) +#define NVC097_SET_COLOR_TARGET_B_OFFSET_LOWER 31:0 + +#define NVC097_SET_COLOR_TARGET_WIDTH(j) (0x0808+(j)*64) +#define NVC097_SET_COLOR_TARGET_WIDTH_V 27:0 + +#define NVC097_SET_COLOR_TARGET_HEIGHT(j) (0x080c+(j)*64) +#define NVC097_SET_COLOR_TARGET_HEIGHT_V 16:0 + +#define NVC097_SET_COLOR_TARGET_FORMAT(j) (0x0810+(j)*64) +#define NVC097_SET_COLOR_TARGET_FORMAT_V 7:0 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_DISABLED 0x00000000 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_AF32 0x000000C0 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_AS32 0x000000C1 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_AU32 0x000000C2 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_X32 0x000000C3 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_X32 0x000000C4 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_X32 0x000000C5 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_R16_G16_B16_A16 0x000000C6 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RN16_GN16_BN16_AN16 0x000000C7 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RS16_GS16_BS16_AS16 0x000000C8 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RU16_GU16_BU16_AU16 0x000000C9 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_AF16 0x000000CA +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RF32_GF32 0x000000CB +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RS32_GS32 0x000000CC +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RU32_GU32 0x000000CD +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_X16 0x000000CE +#define NVC097_SET_COLOR_TARGET_FORMAT_V_A8R8G8B8 0x000000CF +#define NVC097_SET_COLOR_TARGET_FORMAT_V_A8RL8GL8BL8 0x000000D0 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_A2B10G10R10 0x000000D1 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_AU2BU10GU10RU10 0x000000D2 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_A8B8G8R8 0x000000D5 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_A8BL8GL8RL8 0x000000D6 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_AN8BN8GN8RN8 0x000000D7 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_AS8BS8GS8RS8 0x000000D8 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_AU8BU8GU8RU8 0x000000D9 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_R16_G16 0x000000DA +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RN16_GN16 0x000000DB +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RS16_GS16 0x000000DC +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RU16_GU16 0x000000DD +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RF16_GF16 0x000000DE +#define NVC097_SET_COLOR_TARGET_FORMAT_V_A2R10G10B10 0x000000DF +#define NVC097_SET_COLOR_TARGET_FORMAT_V_BF10GF11RF11 0x000000E0 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RS32 0x000000E3 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RU32 0x000000E4 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RF32 0x000000E5 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_X8R8G8B8 0x000000E6 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_X8RL8GL8BL8 0x000000E7 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_R5G6B5 0x000000E8 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_A1R5G5B5 0x000000E9 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_G8R8 0x000000EA +#define NVC097_SET_COLOR_TARGET_FORMAT_V_GN8RN8 0x000000EB +#define NVC097_SET_COLOR_TARGET_FORMAT_V_GS8RS8 0x000000EC +#define NVC097_SET_COLOR_TARGET_FORMAT_V_GU8RU8 0x000000ED +#define NVC097_SET_COLOR_TARGET_FORMAT_V_R16 0x000000EE +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RN16 0x000000EF +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RS16 0x000000F0 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RU16 0x000000F1 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RF16 0x000000F2 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_R8 0x000000F3 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RN8 0x000000F4 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RS8 0x000000F5 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RU8 0x000000F6 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_A8 0x000000F7 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_X1R5G5B5 0x000000F8 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_X8B8G8R8 0x000000F9 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_X8BL8GL8RL8 0x000000FA +#define NVC097_SET_COLOR_TARGET_FORMAT_V_Z1R5G5B5 0x000000FB +#define NVC097_SET_COLOR_TARGET_FORMAT_V_O1R5G5B5 0x000000FC +#define NVC097_SET_COLOR_TARGET_FORMAT_V_Z8R8G8B8 0x000000FD +#define NVC097_SET_COLOR_TARGET_FORMAT_V_O8R8G8B8 0x000000FE +#define NVC097_SET_COLOR_TARGET_FORMAT_V_R32 0x000000FF +#define NVC097_SET_COLOR_TARGET_FORMAT_V_A16 0x00000040 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_AF16 0x00000041 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_AF32 0x00000042 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_A8R8 0x00000043 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_R16_A16 0x00000044 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RF16_AF16 0x00000045 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_RF32_AF32 0x00000046 +#define NVC097_SET_COLOR_TARGET_FORMAT_V_B8G8R8A8 0x00000047 + +#define NVC097_SET_COLOR_TARGET_MEMORY(j) (0x0814+(j)*64) +#define NVC097_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH 3:0 +#define NVC097_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH_ONE_GOB 0x00000000 +#define NVC097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT 7:4 +#define NVC097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_ONE_GOB 0x00000000 +#define NVC097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_TWO_GOBS 0x00000001 +#define NVC097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC097_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH 11:8 +#define NVC097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_ONE_GOB 0x00000000 +#define NVC097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_TWO_GOBS 0x00000001 +#define NVC097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_FOUR_GOBS 0x00000002 +#define NVC097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_EIGHT_GOBS 0x00000003 +#define NVC097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVC097_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_THIRTYTWO_GOBS 0x00000005 +#define NVC097_SET_COLOR_TARGET_MEMORY_LAYOUT 12:12 +#define NVC097_SET_COLOR_TARGET_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVC097_SET_COLOR_TARGET_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVC097_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL 16:16 +#define NVC097_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NVC097_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_DEPTH_SIZE 0x00000001 + +#define NVC097_SET_COLOR_TARGET_THIRD_DIMENSION(j) (0x0818+(j)*64) +#define NVC097_SET_COLOR_TARGET_THIRD_DIMENSION_V 27:0 + +#define NVC097_SET_COLOR_TARGET_ARRAY_PITCH(j) (0x081c+(j)*64) +#define NVC097_SET_COLOR_TARGET_ARRAY_PITCH_V 31:0 + +#define NVC097_SET_COLOR_TARGET_LAYER(j) (0x0820+(j)*64) +#define NVC097_SET_COLOR_TARGET_LAYER_OFFSET 15:0 + +#define NVC097_SET_COLOR_TARGET_MARK(j) (0x0824+(j)*64) +#define NVC097_SET_COLOR_TARGET_MARK_IEEE_CLEAN 0:0 +#define NVC097_SET_COLOR_TARGET_MARK_IEEE_CLEAN_FALSE 0x00000000 +#define NVC097_SET_COLOR_TARGET_MARK_IEEE_CLEAN_TRUE 0x00000001 + +#define NVC097_SET_VIEWPORT_SCALE_X(j) (0x0a00+(j)*32) +#define NVC097_SET_VIEWPORT_SCALE_X_V 31:0 + +#define NVC097_SET_VIEWPORT_SCALE_Y(j) (0x0a04+(j)*32) +#define NVC097_SET_VIEWPORT_SCALE_Y_V 31:0 + +#define NVC097_SET_VIEWPORT_SCALE_Z(j) (0x0a08+(j)*32) +#define NVC097_SET_VIEWPORT_SCALE_Z_V 31:0 + +#define NVC097_SET_VIEWPORT_OFFSET_X(j) (0x0a0c+(j)*32) +#define NVC097_SET_VIEWPORT_OFFSET_X_V 31:0 + +#define NVC097_SET_VIEWPORT_OFFSET_Y(j) (0x0a10+(j)*32) +#define NVC097_SET_VIEWPORT_OFFSET_Y_V 31:0 + +#define NVC097_SET_VIEWPORT_OFFSET_Z(j) (0x0a14+(j)*32) +#define NVC097_SET_VIEWPORT_OFFSET_Z_V 31:0 + +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE(j) (0x0a18+(j)*32) +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_X 2:0 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_X 0x00000000 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_X 0x00000001 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_Y 0x00000002 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_Y 0x00000003 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_Z 0x00000004 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_Z 0x00000005 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_W 0x00000006 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_W 0x00000007 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_Y 6:4 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_X 0x00000000 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_X 0x00000001 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_Y 0x00000002 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_Y 0x00000003 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_Z 0x00000004 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_Z 0x00000005 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_W 0x00000006 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_W 0x00000007 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_Z 10:8 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_X 0x00000000 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_X 0x00000001 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_Y 0x00000002 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_Y 0x00000003 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_Z 0x00000004 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_Z 0x00000005 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_W 0x00000006 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_W 0x00000007 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_W 14:12 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_X 0x00000000 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_X 0x00000001 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_Y 0x00000002 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_Y 0x00000003 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_Z 0x00000004 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_Z 0x00000005 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_W 0x00000006 +#define NVC097_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_W 0x00000007 + +#define NVC097_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION(j) (0x0a1c+(j)*32) +#define NVC097_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION_X_BITS 4:0 +#define NVC097_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION_Y_BITS 12:8 + +#define NVC097_SET_VIEWPORT_CLIP_HORIZONTAL(j) (0x0c00+(j)*16) +#define NVC097_SET_VIEWPORT_CLIP_HORIZONTAL_X0 15:0 +#define NVC097_SET_VIEWPORT_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NVC097_SET_VIEWPORT_CLIP_VERTICAL(j) (0x0c04+(j)*16) +#define NVC097_SET_VIEWPORT_CLIP_VERTICAL_Y0 15:0 +#define NVC097_SET_VIEWPORT_CLIP_VERTICAL_HEIGHT 31:16 + +#define NVC097_SET_VIEWPORT_CLIP_MIN_Z(j) (0x0c08+(j)*16) +#define NVC097_SET_VIEWPORT_CLIP_MIN_Z_V 31:0 + +#define NVC097_SET_VIEWPORT_CLIP_MAX_Z(j) (0x0c0c+(j)*16) +#define NVC097_SET_VIEWPORT_CLIP_MAX_Z_V 31:0 + +#define NVC097_SET_WINDOW_CLIP_HORIZONTAL(j) (0x0d00+(j)*8) +#define NVC097_SET_WINDOW_CLIP_HORIZONTAL_XMIN 15:0 +#define NVC097_SET_WINDOW_CLIP_HORIZONTAL_XMAX 31:16 + +#define NVC097_SET_WINDOW_CLIP_VERTICAL(j) (0x0d04+(j)*8) +#define NVC097_SET_WINDOW_CLIP_VERTICAL_YMIN 15:0 +#define NVC097_SET_WINDOW_CLIP_VERTICAL_YMAX 31:16 + +#define NVC097_SET_CLIP_ID_EXTENT_X(j) (0x0d40+(j)*8) +#define NVC097_SET_CLIP_ID_EXTENT_X_MINX 15:0 +#define NVC097_SET_CLIP_ID_EXTENT_X_WIDTH 31:16 + +#define NVC097_SET_CLIP_ID_EXTENT_Y(j) (0x0d44+(j)*8) +#define NVC097_SET_CLIP_ID_EXTENT_Y_MINY 15:0 +#define NVC097_SET_CLIP_ID_EXTENT_Y_HEIGHT 31:16 + +#define NVC097_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK 0x0d60 +#define NVC097_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK_V 10:0 + +#define NVC097_SET_API_VISIBLE_CALL_LIMIT 0x0d64 +#define NVC097_SET_API_VISIBLE_CALL_LIMIT_V 3:0 +#define NVC097_SET_API_VISIBLE_CALL_LIMIT_V__0 0x00000000 +#define NVC097_SET_API_VISIBLE_CALL_LIMIT_V__1 0x00000001 +#define NVC097_SET_API_VISIBLE_CALL_LIMIT_V__2 0x00000002 +#define NVC097_SET_API_VISIBLE_CALL_LIMIT_V__4 0x00000003 +#define NVC097_SET_API_VISIBLE_CALL_LIMIT_V__8 0x00000004 +#define NVC097_SET_API_VISIBLE_CALL_LIMIT_V__16 0x00000005 +#define NVC097_SET_API_VISIBLE_CALL_LIMIT_V__32 0x00000006 +#define NVC097_SET_API_VISIBLE_CALL_LIMIT_V__64 0x00000007 +#define NVC097_SET_API_VISIBLE_CALL_LIMIT_V__128 0x00000008 +#define NVC097_SET_API_VISIBLE_CALL_LIMIT_V_NO_CHECK 0x0000000F + +#define NVC097_SET_STATISTICS_COUNTER 0x0d68 +#define NVC097_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE 0:0 +#define NVC097_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC097_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC097_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE 1:1 +#define NVC097_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC097_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC097_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE 2:2 +#define NVC097_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC097_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC097_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE 3:3 +#define NVC097_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC097_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC097_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE 4:4 +#define NVC097_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC097_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE 5:5 +#define NVC097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NVC097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NVC097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE 6:6 +#define NVC097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_FALSE 0x00000000 +#define NVC097_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_TRUE 0x00000001 +#define NVC097_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE 7:7 +#define NVC097_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC097_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC097_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE 8:8 +#define NVC097_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC097_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC097_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE 9:9 +#define NVC097_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC097_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC097_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE 11:11 +#define NVC097_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC097_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC097_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE 12:12 +#define NVC097_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC097_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC097_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE 13:13 +#define NVC097_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC097_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC097_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE 14:14 +#define NVC097_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NVC097_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NVC097_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE 10:10 +#define NVC097_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_FALSE 0x00000000 +#define NVC097_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_TRUE 0x00000001 +#define NVC097_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE 15:15 +#define NVC097_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_FALSE 0x00000000 +#define NVC097_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_CLEAR_RECT_HORIZONTAL 0x0d6c +#define NVC097_SET_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NVC097_SET_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NVC097_SET_CLEAR_RECT_VERTICAL 0x0d70 +#define NVC097_SET_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NVC097_SET_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NVC097_SET_VERTEX_ARRAY_START 0x0d74 +#define NVC097_SET_VERTEX_ARRAY_START_V 31:0 + +#define NVC097_DRAW_VERTEX_ARRAY 0x0d78 +#define NVC097_DRAW_VERTEX_ARRAY_COUNT 31:0 + +#define NVC097_SET_VIEWPORT_Z_CLIP 0x0d7c +#define NVC097_SET_VIEWPORT_Z_CLIP_RANGE 0:0 +#define NVC097_SET_VIEWPORT_Z_CLIP_RANGE_NEGATIVE_W_TO_POSITIVE_W 0x00000000 +#define NVC097_SET_VIEWPORT_Z_CLIP_RANGE_ZERO_TO_POSITIVE_W 0x00000001 + +#define NVC097_SET_COLOR_CLEAR_VALUE(i) (0x0d80+(i)*4) +#define NVC097_SET_COLOR_CLEAR_VALUE_V 31:0 + +#define NVC097_SET_Z_CLEAR_VALUE 0x0d90 +#define NVC097_SET_Z_CLEAR_VALUE_V 31:0 + +#define NVC097_SET_SHADER_CACHE_CONTROL 0x0d94 +#define NVC097_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE 0:0 +#define NVC097_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_FALSE 0x00000000 +#define NVC097_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_TRUE 0x00000001 + +#define NVC097_FORCE_TRANSITION_TO_BETA 0x0d98 +#define NVC097_FORCE_TRANSITION_TO_BETA_V 0:0 + +#define NVC097_SET_REDUCE_COLOR_THRESHOLDS_ENABLE 0x0d9c +#define NVC097_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V 0:0 +#define NVC097_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_FALSE 0x00000000 +#define NVC097_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_TRUE 0x00000001 + +#define NVC097_SET_STENCIL_CLEAR_VALUE 0x0da0 +#define NVC097_SET_STENCIL_CLEAR_VALUE_V 7:0 + +#define NVC097_INVALIDATE_SHADER_CACHES_NO_WFI 0x0da4 +#define NVC097_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION 0:0 +#define NVC097_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_FALSE 0x00000000 +#define NVC097_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_TRUE 0x00000001 +#define NVC097_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA 4:4 +#define NVC097_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_FALSE 0x00000000 +#define NVC097_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_TRUE 0x00000001 +#define NVC097_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT 12:12 +#define NVC097_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_FALSE 0x00000000 +#define NVC097_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_TRUE 0x00000001 + +#define NVC097_SET_ZCULL_SERIALIZATION 0x0da8 +#define NVC097_SET_ZCULL_SERIALIZATION_ENABLE 0:0 +#define NVC097_SET_ZCULL_SERIALIZATION_ENABLE_FALSE 0x00000000 +#define NVC097_SET_ZCULL_SERIALIZATION_ENABLE_TRUE 0x00000001 +#define NVC097_SET_ZCULL_SERIALIZATION_APPLIED 5:4 +#define NVC097_SET_ZCULL_SERIALIZATION_APPLIED_ALWAYS 0x00000000 +#define NVC097_SET_ZCULL_SERIALIZATION_APPLIED_LATE_Z 0x00000001 +#define NVC097_SET_ZCULL_SERIALIZATION_APPLIED_OUT_OF_GAMUT_Z 0x00000002 +#define NVC097_SET_ZCULL_SERIALIZATION_APPLIED_LATE_Z_OR_OUT_OF_GAMUT_Z 0x00000003 + +#define NVC097_SET_FRONT_POLYGON_MODE 0x0dac +#define NVC097_SET_FRONT_POLYGON_MODE_V 31:0 +#define NVC097_SET_FRONT_POLYGON_MODE_V_POINT 0x00001B00 +#define NVC097_SET_FRONT_POLYGON_MODE_V_LINE 0x00001B01 +#define NVC097_SET_FRONT_POLYGON_MODE_V_FILL 0x00001B02 + +#define NVC097_SET_BACK_POLYGON_MODE 0x0db0 +#define NVC097_SET_BACK_POLYGON_MODE_V 31:0 +#define NVC097_SET_BACK_POLYGON_MODE_V_POINT 0x00001B00 +#define NVC097_SET_BACK_POLYGON_MODE_V_LINE 0x00001B01 +#define NVC097_SET_BACK_POLYGON_MODE_V_FILL 0x00001B02 + +#define NVC097_SET_POLY_SMOOTH 0x0db4 +#define NVC097_SET_POLY_SMOOTH_ENABLE 0:0 +#define NVC097_SET_POLY_SMOOTH_ENABLE_FALSE 0x00000000 +#define NVC097_SET_POLY_SMOOTH_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_ZT_MARK 0x0db8 +#define NVC097_SET_ZT_MARK_IEEE_CLEAN 0:0 +#define NVC097_SET_ZT_MARK_IEEE_CLEAN_FALSE 0x00000000 +#define NVC097_SET_ZT_MARK_IEEE_CLEAN_TRUE 0x00000001 + +#define NVC097_SET_ZCULL_DIR_FORMAT 0x0dbc +#define NVC097_SET_ZCULL_DIR_FORMAT_ZDIR 15:0 +#define NVC097_SET_ZCULL_DIR_FORMAT_ZDIR_LESS 0x00000000 +#define NVC097_SET_ZCULL_DIR_FORMAT_ZDIR_GREATER 0x00000001 +#define NVC097_SET_ZCULL_DIR_FORMAT_ZFORMAT 31:16 +#define NVC097_SET_ZCULL_DIR_FORMAT_ZFORMAT_MSB 0x00000000 +#define NVC097_SET_ZCULL_DIR_FORMAT_ZFORMAT_FP 0x00000001 +#define NVC097_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZTRICK 0x00000002 +#define NVC097_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZF32_1 0x00000003 + +#define NVC097_SET_POLY_OFFSET_POINT 0x0dc0 +#define NVC097_SET_POLY_OFFSET_POINT_ENABLE 0:0 +#define NVC097_SET_POLY_OFFSET_POINT_ENABLE_FALSE 0x00000000 +#define NVC097_SET_POLY_OFFSET_POINT_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_POLY_OFFSET_LINE 0x0dc4 +#define NVC097_SET_POLY_OFFSET_LINE_ENABLE 0:0 +#define NVC097_SET_POLY_OFFSET_LINE_ENABLE_FALSE 0x00000000 +#define NVC097_SET_POLY_OFFSET_LINE_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_POLY_OFFSET_FILL 0x0dc8 +#define NVC097_SET_POLY_OFFSET_FILL_ENABLE 0:0 +#define NVC097_SET_POLY_OFFSET_FILL_ENABLE_FALSE 0x00000000 +#define NVC097_SET_POLY_OFFSET_FILL_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_PATCH 0x0dcc +#define NVC097_SET_PATCH_SIZE 7:0 + +#define NVC097_SET_ITERATED_BLEND 0x0dd0 +#define NVC097_SET_ITERATED_BLEND_ENABLE 0:0 +#define NVC097_SET_ITERATED_BLEND_ENABLE_FALSE 0x00000000 +#define NVC097_SET_ITERATED_BLEND_ENABLE_TRUE 0x00000001 +#define NVC097_SET_ITERATED_BLEND_ALPHA_ENABLE 1:1 +#define NVC097_SET_ITERATED_BLEND_ALPHA_ENABLE_FALSE 0x00000000 +#define NVC097_SET_ITERATED_BLEND_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_ITERATED_BLEND_PASS 0x0dd4 +#define NVC097_SET_ITERATED_BLEND_PASS_COUNT 7:0 + +#define NVC097_SET_ZCULL_CRITERION 0x0dd8 +#define NVC097_SET_ZCULL_CRITERION_SFUNC 7:0 +#define NVC097_SET_ZCULL_CRITERION_SFUNC_NEVER 0x00000000 +#define NVC097_SET_ZCULL_CRITERION_SFUNC_LESS 0x00000001 +#define NVC097_SET_ZCULL_CRITERION_SFUNC_EQUAL 0x00000002 +#define NVC097_SET_ZCULL_CRITERION_SFUNC_LEQUAL 0x00000003 +#define NVC097_SET_ZCULL_CRITERION_SFUNC_GREATER 0x00000004 +#define NVC097_SET_ZCULL_CRITERION_SFUNC_NOTEQUAL 0x00000005 +#define NVC097_SET_ZCULL_CRITERION_SFUNC_GEQUAL 0x00000006 +#define NVC097_SET_ZCULL_CRITERION_SFUNC_ALWAYS 0x00000007 +#define NVC097_SET_ZCULL_CRITERION_NO_INVALIDATE 8:8 +#define NVC097_SET_ZCULL_CRITERION_NO_INVALIDATE_FALSE 0x00000000 +#define NVC097_SET_ZCULL_CRITERION_NO_INVALIDATE_TRUE 0x00000001 +#define NVC097_SET_ZCULL_CRITERION_FORCE_MATCH 9:9 +#define NVC097_SET_ZCULL_CRITERION_FORCE_MATCH_FALSE 0x00000000 +#define NVC097_SET_ZCULL_CRITERION_FORCE_MATCH_TRUE 0x00000001 +#define NVC097_SET_ZCULL_CRITERION_SREF 23:16 +#define NVC097_SET_ZCULL_CRITERION_SMASK 31:24 + +#define NVC097_PIXEL_SHADER_BARRIER 0x0de0 +#define NVC097_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE 0:0 +#define NVC097_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE_FALSE 0x00000000 +#define NVC097_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_SM_TIMEOUT_INTERVAL 0x0de4 +#define NVC097_SET_SM_TIMEOUT_INTERVAL_COUNTER_BIT 5:0 + +#define NVC097_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY 0x0de8 +#define NVC097_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE 0:0 +#define NVC097_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_FALSE 0x00000000 +#define NVC097_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_TRUE 0x00000001 + +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_POINTER 0x0df0 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_POINTER_V 7:0 + +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION 0x0df4 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC 2:0 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_FALSE 0x00000000 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_TRUE 0x00000001 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_EQ 0x00000002 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_NE 0x00000003 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_LT 0x00000004 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_LE 0x00000005 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_GT 0x00000006 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_GE 0x00000007 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION 5:3 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_ADD_PRODUCTS 0x00000000 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_SUB_PRODUCTS 0x00000001 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_MIN 0x00000002 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_MAX 0x00000003 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_RCP 0x00000004 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_ADD 0x00000005 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_SUBTRACT 0x00000006 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT 8:6 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT0 0x00000000 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT1 0x00000001 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT2 0x00000002 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT3 0x00000003 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT4 0x00000004 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT5 0x00000005 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT6 0x00000006 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT7 0x00000007 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT 11:9 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_SRC_RGB 0x00000000 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_DEST_RGB 0x00000001 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_SRC_AAA 0x00000002 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_DEST_AAA 0x00000003 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP0_RGB 0x00000004 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP1_RGB 0x00000005 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP2_RGB 0x00000006 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_PBR_RGB 0x00000007 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT 15:12 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ZERO 0x00000000 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE 0x00000001 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_SRC_RGB 0x00000002 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_SRC_AAA 0x00000003 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE_MINUS_SRC_AAA 0x00000004 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_DEST_RGB 0x00000005 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_DEST_AAA 0x00000006 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE_MINUS_DEST_AAA 0x00000007 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP0_RGB 0x00000009 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP1_RGB 0x0000000A +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP2_RGB 0x0000000B +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_PBR_RGB 0x0000000C +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_CONSTANT_RGB 0x0000000D +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ZERO_A_TIMES_B 0x0000000E +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT 18:16 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_SRC_RGB 0x00000000 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_DEST_RGB 0x00000001 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_SRC_AAA 0x00000002 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_DEST_AAA 0x00000003 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP0_RGB 0x00000004 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP1_RGB 0x00000005 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP2_RGB 0x00000006 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_PBR_RGB 0x00000007 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT 22:19 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ZERO 0x00000000 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE 0x00000001 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_SRC_RGB 0x00000002 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_SRC_AAA 0x00000003 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE_MINUS_SRC_AAA 0x00000004 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_DEST_RGB 0x00000005 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_DEST_AAA 0x00000006 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE_MINUS_DEST_AAA 0x00000007 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP0_RGB 0x00000009 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP1_RGB 0x0000000A +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP2_RGB 0x0000000B +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_PBR_RGB 0x0000000C +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_CONSTANT_RGB 0x0000000D +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ZERO_C_TIMES_D 0x0000000E +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE 25:23 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_RGB 0x00000000 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_GBR 0x00000001 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_RRR 0x00000002 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_GGG 0x00000003 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_BBB 0x00000004 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_R_TO_A 0x00000005 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK 27:26 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_RGB 0x00000000 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_R_ONLY 0x00000001 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_G_ONLY 0x00000002 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_B_ONLY 0x00000003 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT 29:28 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP0 0x00000000 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP1 0x00000001 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP2 0x00000002 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_NONE 0x00000003 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC 31:31 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC_FALSE 0x00000000 +#define NVC097_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC_TRUE 0x00000001 + +#define NVC097_SET_WINDOW_OFFSET_X 0x0df8 +#define NVC097_SET_WINDOW_OFFSET_X_V 16:0 + +#define NVC097_SET_WINDOW_OFFSET_Y 0x0dfc +#define NVC097_SET_WINDOW_OFFSET_Y_V 17:0 + +#define NVC097_SET_SCISSOR_ENABLE(j) (0x0e00+(j)*16) +#define NVC097_SET_SCISSOR_ENABLE_V 0:0 +#define NVC097_SET_SCISSOR_ENABLE_V_FALSE 0x00000000 +#define NVC097_SET_SCISSOR_ENABLE_V_TRUE 0x00000001 + +#define NVC097_SET_SCISSOR_HORIZONTAL(j) (0x0e04+(j)*16) +#define NVC097_SET_SCISSOR_HORIZONTAL_XMIN 15:0 +#define NVC097_SET_SCISSOR_HORIZONTAL_XMAX 31:16 + +#define NVC097_SET_SCISSOR_VERTICAL(j) (0x0e08+(j)*16) +#define NVC097_SET_SCISSOR_VERTICAL_YMIN 15:0 +#define NVC097_SET_SCISSOR_VERTICAL_YMAX 31:16 + +#define NVC097_SET_SELECT_MAXWELL_TEXTURE_HEADERS 0x0f10 +#define NVC097_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V 0:0 +#define NVC097_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V_FALSE 0x00000000 +#define NVC097_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V_TRUE 0x00000001 + +#define NVC097_SET_VPC_PERF_KNOB 0x0f14 +#define NVC097_SET_VPC_PERF_KNOB_CULLED_SMALL_LINES 7:0 +#define NVC097_SET_VPC_PERF_KNOB_CULLED_SMALL_TRIANGLES 15:8 +#define NVC097_SET_VPC_PERF_KNOB_NONCULLED_LINES_AND_POINTS 23:16 +#define NVC097_SET_VPC_PERF_KNOB_NONCULLED_TRIANGLES 31:24 + +#define NVC097_PM_LOCAL_TRIGGER 0x0f18 +#define NVC097_PM_LOCAL_TRIGGER_BOOKMARK 15:0 + +#define NVC097_SET_POST_Z_PS_IMASK 0x0f1c +#define NVC097_SET_POST_Z_PS_IMASK_ENABLE 0:0 +#define NVC097_SET_POST_Z_PS_IMASK_ENABLE_FALSE 0x00000000 +#define NVC097_SET_POST_Z_PS_IMASK_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_CONSTANT_COLOR_RENDERING 0x0f40 +#define NVC097_SET_CONSTANT_COLOR_RENDERING_ENABLE 0:0 +#define NVC097_SET_CONSTANT_COLOR_RENDERING_ENABLE_FALSE 0x00000000 +#define NVC097_SET_CONSTANT_COLOR_RENDERING_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_CONSTANT_COLOR_RENDERING_RED 0x0f44 +#define NVC097_SET_CONSTANT_COLOR_RENDERING_RED_V 31:0 + +#define NVC097_SET_CONSTANT_COLOR_RENDERING_GREEN 0x0f48 +#define NVC097_SET_CONSTANT_COLOR_RENDERING_GREEN_V 31:0 + +#define NVC097_SET_CONSTANT_COLOR_RENDERING_BLUE 0x0f4c +#define NVC097_SET_CONSTANT_COLOR_RENDERING_BLUE_V 31:0 + +#define NVC097_SET_CONSTANT_COLOR_RENDERING_ALPHA 0x0f50 +#define NVC097_SET_CONSTANT_COLOR_RENDERING_ALPHA_V 31:0 + +#define NVC097_SET_BACK_STENCIL_FUNC_REF 0x0f54 +#define NVC097_SET_BACK_STENCIL_FUNC_REF_V 7:0 + +#define NVC097_SET_BACK_STENCIL_MASK 0x0f58 +#define NVC097_SET_BACK_STENCIL_MASK_V 7:0 + +#define NVC097_SET_BACK_STENCIL_FUNC_MASK 0x0f5c +#define NVC097_SET_BACK_STENCIL_FUNC_MASK_V 7:0 + +#define NVC097_SET_VERTEX_STREAM_SUBSTITUTE_A 0x0f84 +#define NVC097_SET_VERTEX_STREAM_SUBSTITUTE_A_ADDRESS_UPPER 7:0 + +#define NVC097_SET_VERTEX_STREAM_SUBSTITUTE_B 0x0f88 +#define NVC097_SET_VERTEX_STREAM_SUBSTITUTE_B_ADDRESS_LOWER 31:0 + +#define NVC097_SET_LINE_MODE_POLYGON_CLIP 0x0f8c +#define NVC097_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE 0:0 +#define NVC097_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DRAW_LINE 0x00000000 +#define NVC097_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DO_NOT_DRAW_LINE 0x00000001 + +#define NVC097_SET_SINGLE_CT_WRITE_CONTROL 0x0f90 +#define NVC097_SET_SINGLE_CT_WRITE_CONTROL_ENABLE 0:0 +#define NVC097_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_FALSE 0x00000000 +#define NVC097_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_VTG_WARP_WATERMARKS 0x0f98 +#define NVC097_SET_VTG_WARP_WATERMARKS_LOW 15:0 +#define NVC097_SET_VTG_WARP_WATERMARKS_HIGH 31:16 + +#define NVC097_SET_DEPTH_BOUNDS_MIN 0x0f9c +#define NVC097_SET_DEPTH_BOUNDS_MIN_V 31:0 + +#define NVC097_SET_DEPTH_BOUNDS_MAX 0x0fa0 +#define NVC097_SET_DEPTH_BOUNDS_MAX_V 31:0 + +#define NVC097_SET_SAMPLE_MASK 0x0fa4 +#define NVC097_SET_SAMPLE_MASK_RASTER_OUT_ENABLE 0:0 +#define NVC097_SET_SAMPLE_MASK_RASTER_OUT_ENABLE_FALSE 0x00000000 +#define NVC097_SET_SAMPLE_MASK_RASTER_OUT_ENABLE_TRUE 0x00000001 +#define NVC097_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE 4:4 +#define NVC097_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE_FALSE 0x00000000 +#define NVC097_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_COLOR_TARGET_SAMPLE_MASK 0x0fa8 +#define NVC097_SET_COLOR_TARGET_SAMPLE_MASK_V 15:0 + +#define NVC097_SET_CT_MRT_ENABLE 0x0fac +#define NVC097_SET_CT_MRT_ENABLE_V 0:0 +#define NVC097_SET_CT_MRT_ENABLE_V_FALSE 0x00000000 +#define NVC097_SET_CT_MRT_ENABLE_V_TRUE 0x00000001 + +#define NVC097_SET_NONMULTISAMPLED_Z 0x0fb0 +#define NVC097_SET_NONMULTISAMPLED_Z_V 0:0 +#define NVC097_SET_NONMULTISAMPLED_Z_V_PER_SAMPLE 0x00000000 +#define NVC097_SET_NONMULTISAMPLED_Z_V_AT_PIXEL_CENTER 0x00000001 + +#define NVC097_SET_TIR 0x0fb4 +#define NVC097_SET_TIR_MODE 1:0 +#define NVC097_SET_TIR_MODE_DISABLED 0x00000000 +#define NVC097_SET_TIR_MODE_RASTER_N_TARGET_M 0x00000001 + +#define NVC097_SET_ANTI_ALIAS_RASTER 0x0fb8 +#define NVC097_SET_ANTI_ALIAS_RASTER_SAMPLES 2:0 +#define NVC097_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_1X1 0x00000000 +#define NVC097_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_2X2 0x00000002 +#define NVC097_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_4X2_D3D 0x00000004 +#define NVC097_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_2X1_D3D 0x00000005 +#define NVC097_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_4X4 0x00000006 + +#define NVC097_SET_SAMPLE_MASK_X0_Y0 0x0fbc +#define NVC097_SET_SAMPLE_MASK_X0_Y0_V 15:0 + +#define NVC097_SET_SAMPLE_MASK_X1_Y0 0x0fc0 +#define NVC097_SET_SAMPLE_MASK_X1_Y0_V 15:0 + +#define NVC097_SET_SAMPLE_MASK_X0_Y1 0x0fc4 +#define NVC097_SET_SAMPLE_MASK_X0_Y1_V 15:0 + +#define NVC097_SET_SAMPLE_MASK_X1_Y1 0x0fc8 +#define NVC097_SET_SAMPLE_MASK_X1_Y1_V 15:0 + +#define NVC097_SET_SURFACE_CLIP_ID_MEMORY_A 0x0fcc +#define NVC097_SET_SURFACE_CLIP_ID_MEMORY_A_OFFSET_UPPER 7:0 + +#define NVC097_SET_SURFACE_CLIP_ID_MEMORY_B 0x0fd0 +#define NVC097_SET_SURFACE_CLIP_ID_MEMORY_B_OFFSET_LOWER 31:0 + +#define NVC097_SET_TIR_MODULATION 0x0fd4 +#define NVC097_SET_TIR_MODULATION_COMPONENT_SELECT 1:0 +#define NVC097_SET_TIR_MODULATION_COMPONENT_SELECT_NO_MODULATION 0x00000000 +#define NVC097_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_RGB 0x00000001 +#define NVC097_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_ALPHA_ONLY 0x00000002 +#define NVC097_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_RGBA 0x00000003 + +#define NVC097_SET_TIR_MODULATION_FUNCTION 0x0fd8 +#define NVC097_SET_TIR_MODULATION_FUNCTION_SELECT 0:0 +#define NVC097_SET_TIR_MODULATION_FUNCTION_SELECT_LINEAR 0x00000000 +#define NVC097_SET_TIR_MODULATION_FUNCTION_SELECT_TABLE 0x00000001 + +#define NVC097_SET_BLEND_OPT_CONTROL 0x0fdc +#define NVC097_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS 0:0 +#define NVC097_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_FALSE 0x00000000 +#define NVC097_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_TRUE 0x00000001 + +#define NVC097_SET_ZT_A 0x0fe0 +#define NVC097_SET_ZT_A_OFFSET_UPPER 7:0 + +#define NVC097_SET_ZT_B 0x0fe4 +#define NVC097_SET_ZT_B_OFFSET_LOWER 31:0 + +#define NVC097_SET_ZT_FORMAT 0x0fe8 +#define NVC097_SET_ZT_FORMAT_V 4:0 +#define NVC097_SET_ZT_FORMAT_V_Z16 0x00000013 +#define NVC097_SET_ZT_FORMAT_V_Z24S8 0x00000014 +#define NVC097_SET_ZT_FORMAT_V_X8Z24 0x00000015 +#define NVC097_SET_ZT_FORMAT_V_S8Z24 0x00000016 +#define NVC097_SET_ZT_FORMAT_V_S8 0x00000017 +#define NVC097_SET_ZT_FORMAT_V_V8Z24 0x00000018 +#define NVC097_SET_ZT_FORMAT_V_ZF32 0x0000000A +#define NVC097_SET_ZT_FORMAT_V_ZF32_X24S8 0x00000019 +#define NVC097_SET_ZT_FORMAT_V_X8Z24_X16V8S8 0x0000001D +#define NVC097_SET_ZT_FORMAT_V_ZF32_X16V8X8 0x0000001E +#define NVC097_SET_ZT_FORMAT_V_ZF32_X16V8S8 0x0000001F + +#define NVC097_SET_ZT_BLOCK_SIZE 0x0fec +#define NVC097_SET_ZT_BLOCK_SIZE_WIDTH 3:0 +#define NVC097_SET_ZT_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC097_SET_ZT_BLOCK_SIZE_HEIGHT 7:4 +#define NVC097_SET_ZT_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC097_SET_ZT_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC097_SET_ZT_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC097_SET_ZT_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC097_SET_ZT_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC097_SET_ZT_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC097_SET_ZT_BLOCK_SIZE_DEPTH 11:8 +#define NVC097_SET_ZT_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NVC097_SET_ZT_ARRAY_PITCH 0x0ff0 +#define NVC097_SET_ZT_ARRAY_PITCH_V 31:0 + +#define NVC097_SET_SURFACE_CLIP_HORIZONTAL 0x0ff4 +#define NVC097_SET_SURFACE_CLIP_HORIZONTAL_X 15:0 +#define NVC097_SET_SURFACE_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NVC097_SET_SURFACE_CLIP_VERTICAL 0x0ff8 +#define NVC097_SET_SURFACE_CLIP_VERTICAL_Y 15:0 +#define NVC097_SET_SURFACE_CLIP_VERTICAL_HEIGHT 31:16 + +#define NVC097_SET_TILED_CACHE_BUNDLE_CONTROL 0x0ffc +#define NVC097_SET_TILED_CACHE_BUNDLE_CONTROL_TREAT_HEAVYWEIGHT_AS_LIGHTWEIGHT 0:0 +#define NVC097_SET_TILED_CACHE_BUNDLE_CONTROL_TREAT_HEAVYWEIGHT_AS_LIGHTWEIGHT_FALSE 0x00000000 +#define NVC097_SET_TILED_CACHE_BUNDLE_CONTROL_TREAT_HEAVYWEIGHT_AS_LIGHTWEIGHT_TRUE 0x00000001 + +#define NVC097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS 0x1000 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE 0:0 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_FALSE 0x00000000 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_TRUE 0x00000001 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY 5:4 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC097_SET_VIEWPORT_MULTICAST 0x1004 +#define NVC097_SET_VIEWPORT_MULTICAST_ORDER 0:0 +#define NVC097_SET_VIEWPORT_MULTICAST_ORDER_VIEWPORT_ORDER 0x00000000 +#define NVC097_SET_VIEWPORT_MULTICAST_ORDER_PRIMITIVE_ORDER 0x00000001 + +#define NVC097_SET_TESSELLATION_CUT_HEIGHT 0x1008 +#define NVC097_SET_TESSELLATION_CUT_HEIGHT_V 4:0 + +#define NVC097_SET_MAX_GS_INSTANCES_PER_TASK 0x100c +#define NVC097_SET_MAX_GS_INSTANCES_PER_TASK_V 10:0 + +#define NVC097_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK 0x1010 +#define NVC097_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK_V 15:0 + +#define NVC097_SET_RESERVED_SW_METHOD00 0x1014 +#define NVC097_SET_RESERVED_SW_METHOD00_V 31:0 + +#define NVC097_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER 0x1018 +#define NVC097_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NVC097_SET_BETA_CB_STORAGE_CONSTRAINT 0x101c +#define NVC097_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NVC097_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NVC097_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER 0x1020 +#define NVC097_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NVC097_SET_ALPHA_CB_STORAGE_CONSTRAINT 0x1024 +#define NVC097_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NVC097_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NVC097_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_RESERVED_SW_METHOD01 0x1028 +#define NVC097_SET_RESERVED_SW_METHOD01_V 31:0 + +#define NVC097_SET_RESERVED_SW_METHOD02 0x102c +#define NVC097_SET_RESERVED_SW_METHOD02_V 31:0 + +#define NVC097_SET_TIR_MODULATION_COEFFICIENT_TABLE(i) (0x1030+(i)*4) +#define NVC097_SET_TIR_MODULATION_COEFFICIENT_TABLE_V0 7:0 +#define NVC097_SET_TIR_MODULATION_COEFFICIENT_TABLE_V1 15:8 +#define NVC097_SET_TIR_MODULATION_COEFFICIENT_TABLE_V2 23:16 +#define NVC097_SET_TIR_MODULATION_COEFFICIENT_TABLE_V3 31:24 + +#define NVC097_SET_SPARE_NOOP01 0x1044 +#define NVC097_SET_SPARE_NOOP01_V 31:0 + +#define NVC097_SET_SPARE_NOOP02 0x1048 +#define NVC097_SET_SPARE_NOOP02_V 31:0 + +#define NVC097_SET_SPARE_NOOP03 0x104c +#define NVC097_SET_SPARE_NOOP03_V 31:0 + +#define NVC097_SET_SPARE_NOOP04 0x1050 +#define NVC097_SET_SPARE_NOOP04_V 31:0 + +#define NVC097_SET_SPARE_NOOP05 0x1054 +#define NVC097_SET_SPARE_NOOP05_V 31:0 + +#define NVC097_SET_SPARE_NOOP06 0x1058 +#define NVC097_SET_SPARE_NOOP06_V 31:0 + +#define NVC097_SET_SPARE_NOOP07 0x105c +#define NVC097_SET_SPARE_NOOP07_V 31:0 + +#define NVC097_SET_SPARE_NOOP08 0x1060 +#define NVC097_SET_SPARE_NOOP08_V 31:0 + +#define NVC097_SET_SPARE_NOOP09 0x1064 +#define NVC097_SET_SPARE_NOOP09_V 31:0 + +#define NVC097_SET_SPARE_NOOP10 0x1068 +#define NVC097_SET_SPARE_NOOP10_V 31:0 + +#define NVC097_SET_SPARE_NOOP11 0x106c +#define NVC097_SET_SPARE_NOOP11_V 31:0 + +#define NVC097_SET_SPARE_NOOP12 0x1070 +#define NVC097_SET_SPARE_NOOP12_V 31:0 + +#define NVC097_SET_SPARE_NOOP13 0x1074 +#define NVC097_SET_SPARE_NOOP13_V 31:0 + +#define NVC097_SET_SPARE_NOOP14 0x1078 +#define NVC097_SET_SPARE_NOOP14_V 31:0 + +#define NVC097_SET_SPARE_NOOP15 0x107c +#define NVC097_SET_SPARE_NOOP15_V 31:0 + +#define NVC097_SET_RESERVED_SW_METHOD03 0x10b0 +#define NVC097_SET_RESERVED_SW_METHOD03_V 31:0 + +#define NVC097_SET_RESERVED_SW_METHOD04 0x10b4 +#define NVC097_SET_RESERVED_SW_METHOD04_V 31:0 + +#define NVC097_SET_RESERVED_SW_METHOD05 0x10b8 +#define NVC097_SET_RESERVED_SW_METHOD05_V 31:0 + +#define NVC097_SET_RESERVED_SW_METHOD06 0x10bc +#define NVC097_SET_RESERVED_SW_METHOD06_V 31:0 + +#define NVC097_SET_RESERVED_SW_METHOD07 0x10c0 +#define NVC097_SET_RESERVED_SW_METHOD07_V 31:0 + +#define NVC097_SET_RESERVED_SW_METHOD08 0x10c4 +#define NVC097_SET_RESERVED_SW_METHOD08_V 31:0 + +#define NVC097_SET_RESERVED_SW_METHOD09 0x10c8 +#define NVC097_SET_RESERVED_SW_METHOD09_V 31:0 + +#define NVC097_SET_REDUCE_COLOR_THRESHOLDS_UNORM8 0x10cc +#define NVC097_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC097_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED 23:16 + +#define NVC097_SET_RESERVED_SW_METHOD10 0x10d0 +#define NVC097_SET_RESERVED_SW_METHOD10_V 31:0 + +#define NVC097_SET_RESERVED_SW_METHOD11 0x10d4 +#define NVC097_SET_RESERVED_SW_METHOD11_V 31:0 + +#define NVC097_SET_RESERVED_SW_METHOD12 0x10d8 +#define NVC097_SET_RESERVED_SW_METHOD12_V 31:0 + +#define NVC097_SET_RESERVED_SW_METHOD13 0x10dc +#define NVC097_SET_RESERVED_SW_METHOD13_V 31:0 + +#define NVC097_SET_REDUCE_COLOR_THRESHOLDS_UNORM10 0x10e0 +#define NVC097_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC097_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED 23:16 + +#define NVC097_SET_REDUCE_COLOR_THRESHOLDS_UNORM16 0x10e4 +#define NVC097_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC097_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED 23:16 + +#define NVC097_SET_REDUCE_COLOR_THRESHOLDS_FP11 0x10e8 +#define NVC097_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED_ALL_HIT_ONCE 5:0 +#define NVC097_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED 21:16 + +#define NVC097_SET_REDUCE_COLOR_THRESHOLDS_FP16 0x10ec +#define NVC097_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC097_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED 23:16 + +#define NVC097_SET_REDUCE_COLOR_THRESHOLDS_SRGB8 0x10f0 +#define NVC097_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC097_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED 23:16 + +#define NVC097_UNBIND_ALL 0x10f4 +#define NVC097_UNBIND_ALL_CONSTANT_BUFFERS 8:8 +#define NVC097_UNBIND_ALL_CONSTANT_BUFFERS_FALSE 0x00000000 +#define NVC097_UNBIND_ALL_CONSTANT_BUFFERS_TRUE 0x00000001 + +#define NVC097_SET_CLEAR_SURFACE_CONTROL 0x10f8 +#define NVC097_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK 0:0 +#define NVC097_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_FALSE 0x00000000 +#define NVC097_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_TRUE 0x00000001 +#define NVC097_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT 4:4 +#define NVC097_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_FALSE 0x00000000 +#define NVC097_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_TRUE 0x00000001 +#define NVC097_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0 8:8 +#define NVC097_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_FALSE 0x00000000 +#define NVC097_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_TRUE 0x00000001 +#define NVC097_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0 12:12 +#define NVC097_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_FALSE 0x00000000 +#define NVC097_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_TRUE 0x00000001 + +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS 0x10fc +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC097_SET_RESERVED_SW_METHOD14 0x1100 +#define NVC097_SET_RESERVED_SW_METHOD14_V 31:0 + +#define NVC097_SET_RESERVED_SW_METHOD15 0x1104 +#define NVC097_SET_RESERVED_SW_METHOD15_V 31:0 + +#define NVC097_NO_OPERATION_DATA_HI 0x110c +#define NVC097_NO_OPERATION_DATA_HI_V 31:0 + +#define NVC097_SET_DEPTH_BIAS_CONTROL 0x1110 +#define NVC097_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT 0:0 +#define NVC097_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_FALSE 0x00000000 +#define NVC097_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_TRUE 0x00000001 + +#define NVC097_PM_TRIGGER_END 0x1114 +#define NVC097_PM_TRIGGER_END_V 31:0 + +#define NVC097_SET_VERTEX_ID_BASE 0x1118 +#define NVC097_SET_VERTEX_ID_BASE_V 31:0 + +#define NVC097_SET_STENCIL_COMPRESSION 0x111c +#define NVC097_SET_STENCIL_COMPRESSION_ENABLE 0:0 +#define NVC097_SET_STENCIL_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVC097_SET_STENCIL_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A(i) (0x1120+(i)*4) +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0 0:0 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1 1:1 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2 2:2 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3 3:3 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0 4:4 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1 5:5 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2 6:6 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3 7:7 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0 8:8 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1 9:9 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2 10:10 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3 11:11 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0 12:12 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1 13:13 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2 14:14 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3 15:15 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0 16:16 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1 17:17 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2 18:18 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3 19:19 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0 20:20 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1 21:21 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2 22:22 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3 23:23 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0 24:24 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1 25:25 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2 26:26 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3 27:27 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0 28:28 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1 29:29 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2 30:30 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3 31:31 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B(i) (0x1128+(i)*4) +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0 0:0 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1 1:1 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2 2:2 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3 3:3 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0 4:4 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1 5:5 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2 6:6 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3 7:7 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0 8:8 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1 9:9 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2 10:10 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3 11:11 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0 12:12 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1 13:13 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2 14:14 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3 15:15 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0 16:16 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1 17:17 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2 18:18 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3 19:19 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0 20:20 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1 21:21 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2 22:22 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3 23:23 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0 24:24 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1 25:25 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2 26:26 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3 27:27 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0 28:28 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1 29:29 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2 30:30 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3 31:31 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NVC097_SET_TIR_CONTROL 0x1130 +#define NVC097_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES 0:0 +#define NVC097_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES_DISABLE 0x00000000 +#define NVC097_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES_ENABLE 0x00000001 +#define NVC097_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES 4:4 +#define NVC097_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES_DISABLE 0x00000000 +#define NVC097_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES_ENABLE 0x00000001 +#define NVC097_SET_TIR_CONTROL_REDUCE_COVERAGE 1:1 +#define NVC097_SET_TIR_CONTROL_REDUCE_COVERAGE_DISABLE 0x00000000 +#define NVC097_SET_TIR_CONTROL_REDUCE_COVERAGE_ENABLE 0x00000001 + +#define NVC097_SET_MUTABLE_METHOD_CONTROL 0x1134 +#define NVC097_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT 0:0 +#define NVC097_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT_FALSE 0x00000000 +#define NVC097_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT_TRUE 0x00000001 + +#define NVC097_SET_POST_PS_INITIAL_COVERAGE 0x1138 +#define NVC097_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE 0:0 +#define NVC097_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE_FALSE 0x00000000 +#define NVC097_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE_TRUE 0x00000001 + +#define NVC097_SET_FILL_VIA_TRIANGLE 0x113c +#define NVC097_SET_FILL_VIA_TRIANGLE_MODE 1:0 +#define NVC097_SET_FILL_VIA_TRIANGLE_MODE_DISABLED 0x00000000 +#define NVC097_SET_FILL_VIA_TRIANGLE_MODE_FILL_ALL 0x00000001 +#define NVC097_SET_FILL_VIA_TRIANGLE_MODE_FILL_BBOX 0x00000002 + +#define NVC097_SET_BLEND_PER_FORMAT_ENABLE 0x1140 +#define NVC097_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16 4:4 +#define NVC097_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_FALSE 0x00000000 +#define NVC097_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_TRUE 0x00000001 + +#define NVC097_FLUSH_PENDING_WRITES 0x1144 +#define NVC097_FLUSH_PENDING_WRITES_SM_DOES_GLOBAL_STORE 0:0 + +#define NVC097_SET_VERTEX_ATTRIBUTE_A(i) (0x1160+(i)*4) +#define NVC097_SET_VERTEX_ATTRIBUTE_A_STREAM 4:0 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_SOURCE 6:6 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_SOURCE_ACTIVE 0x00000000 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_SOURCE_INACTIVE 0x00000001 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_OFFSET 20:7 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS 26:21 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NVC097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NVC097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NVC097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NVC097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NVC097_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE 29:27 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B 31:31 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_FALSE 0x00000000 +#define NVC097_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_TRUE 0x00000001 + +#define NVC097_SET_VERTEX_ATTRIBUTE_B(i) (0x11a0+(i)*4) +#define NVC097_SET_VERTEX_ATTRIBUTE_B_STREAM 4:0 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_SOURCE 6:6 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_SOURCE_ACTIVE 0x00000000 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_SOURCE_INACTIVE 0x00000001 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_OFFSET 20:7 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS 26:21 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NVC097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NVC097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NVC097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NVC097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NVC097_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE 29:27 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B 31:31 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_FALSE 0x00000000 +#define NVC097_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_TRUE 0x00000001 + +#define NVC097_SET_ANTI_ALIAS_SAMPLE_POSITIONS(i) (0x11e0+(i)*4) +#define NVC097_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X0 3:0 +#define NVC097_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y0 7:4 +#define NVC097_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X1 11:8 +#define NVC097_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y1 15:12 +#define NVC097_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X2 19:16 +#define NVC097_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y2 23:20 +#define NVC097_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X3 27:24 +#define NVC097_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y3 31:28 + +#define NVC097_SET_OFFSET_RENDER_TARGET_INDEX 0x11f0 +#define NVC097_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX 0:0 +#define NVC097_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX_FALSE 0x00000000 +#define NVC097_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX_TRUE 0x00000001 + +#define NVC097_FORCE_HEAVYWEIGHT_METHOD_SYNC 0x11f4 +#define NVC097_FORCE_HEAVYWEIGHT_METHOD_SYNC_V 31:0 + +#define NVC097_SET_COVERAGE_TO_COLOR 0x11f8 +#define NVC097_SET_COVERAGE_TO_COLOR_ENABLE 0:0 +#define NVC097_SET_COVERAGE_TO_COLOR_ENABLE_FALSE 0x00000000 +#define NVC097_SET_COVERAGE_TO_COLOR_ENABLE_TRUE 0x00000001 +#define NVC097_SET_COVERAGE_TO_COLOR_CT_SELECT 6:4 + +#define NVC097_DECOMPRESS_ZETA_SURFACE 0x11fc +#define NVC097_DECOMPRESS_ZETA_SURFACE_Z_ENABLE 0:0 +#define NVC097_DECOMPRESS_ZETA_SURFACE_Z_ENABLE_FALSE 0x00000000 +#define NVC097_DECOMPRESS_ZETA_SURFACE_Z_ENABLE_TRUE 0x00000001 +#define NVC097_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE 4:4 +#define NVC097_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC097_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_SCREEN_STATE_MASK 0x1204 +#define NVC097_SET_SCREEN_STATE_MASK_MASK 3:0 + +#define NVC097_SET_ZT_SPARSE 0x1208 +#define NVC097_SET_ZT_SPARSE_ENABLE 0:0 +#define NVC097_SET_ZT_SPARSE_ENABLE_FALSE 0x00000000 +#define NVC097_SET_ZT_SPARSE_ENABLE_TRUE 0x00000001 +#define NVC097_SET_ZT_SPARSE_UNMAPPED_COMPARE 1:1 +#define NVC097_SET_ZT_SPARSE_UNMAPPED_COMPARE_ZT_SPARSE_UNMAPPED_0 0x00000000 +#define NVC097_SET_ZT_SPARSE_UNMAPPED_COMPARE_ZT_SPARSE_FAIL_ALWAYS 0x00000001 + +#define NVC097_INVALIDATE_SAMPLER_CACHE_ALL 0x120c +#define NVC097_INVALIDATE_SAMPLER_CACHE_ALL_V 0:0 + +#define NVC097_INVALIDATE_TEXTURE_HEADER_CACHE_ALL 0x1210 +#define NVC097_INVALIDATE_TEXTURE_HEADER_CACHE_ALL_V 0:0 + +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST 0x1214 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_START_INDEX 15:0 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT 0x1218 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_START_INDEX 15:0 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC097_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC097_SET_CT_SELECT 0x121c +#define NVC097_SET_CT_SELECT_TARGET_COUNT 3:0 +#define NVC097_SET_CT_SELECT_TARGET0 6:4 +#define NVC097_SET_CT_SELECT_TARGET1 9:7 +#define NVC097_SET_CT_SELECT_TARGET2 12:10 +#define NVC097_SET_CT_SELECT_TARGET3 15:13 +#define NVC097_SET_CT_SELECT_TARGET4 18:16 +#define NVC097_SET_CT_SELECT_TARGET5 21:19 +#define NVC097_SET_CT_SELECT_TARGET6 24:22 +#define NVC097_SET_CT_SELECT_TARGET7 27:25 + +#define NVC097_SET_COMPRESSION_THRESHOLD 0x1220 +#define NVC097_SET_COMPRESSION_THRESHOLD_SAMPLES 3:0 +#define NVC097_SET_COMPRESSION_THRESHOLD_SAMPLES__0 0x00000000 +#define NVC097_SET_COMPRESSION_THRESHOLD_SAMPLES__1 0x00000001 +#define NVC097_SET_COMPRESSION_THRESHOLD_SAMPLES__2 0x00000002 +#define NVC097_SET_COMPRESSION_THRESHOLD_SAMPLES__4 0x00000003 +#define NVC097_SET_COMPRESSION_THRESHOLD_SAMPLES__8 0x00000004 +#define NVC097_SET_COMPRESSION_THRESHOLD_SAMPLES__16 0x00000005 +#define NVC097_SET_COMPRESSION_THRESHOLD_SAMPLES__32 0x00000006 +#define NVC097_SET_COMPRESSION_THRESHOLD_SAMPLES__64 0x00000007 +#define NVC097_SET_COMPRESSION_THRESHOLD_SAMPLES__128 0x00000008 +#define NVC097_SET_COMPRESSION_THRESHOLD_SAMPLES__256 0x00000009 +#define NVC097_SET_COMPRESSION_THRESHOLD_SAMPLES__512 0x0000000A +#define NVC097_SET_COMPRESSION_THRESHOLD_SAMPLES__1024 0x0000000B +#define NVC097_SET_COMPRESSION_THRESHOLD_SAMPLES__2048 0x0000000C + +#define NVC097_SET_PIXEL_SHADER_INTERLOCK_CONTROL 0x1224 +#define NVC097_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE 1:0 +#define NVC097_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_NO_CONFLICT_DETECT 0x00000000 +#define NVC097_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_CONFLICT_DETECT_SAMPLE 0x00000001 +#define NVC097_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_CONFLICT_DETECT_PIXEL 0x00000002 +#define NVC097_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE 2:2 +#define NVC097_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE_TC_TILE_SIZE_16X16 0x00000000 +#define NVC097_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE_TC_TILE_SIZE_8X8 0x00000001 +#define NVC097_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER 3:3 +#define NVC097_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER_TC_FRAGMENT_ORDERED 0x00000000 +#define NVC097_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER_TC_FRAGMENT_UNORDERED 0x00000001 + +#define NVC097_SET_ZT_SIZE_A 0x1228 +#define NVC097_SET_ZT_SIZE_A_WIDTH 27:0 + +#define NVC097_SET_ZT_SIZE_B 0x122c +#define NVC097_SET_ZT_SIZE_B_HEIGHT 17:0 + +#define NVC097_SET_ZT_SIZE_C 0x1230 +#define NVC097_SET_ZT_SIZE_C_THIRD_DIMENSION 15:0 +#define NVC097_SET_ZT_SIZE_C_CONTROL 16:16 +#define NVC097_SET_ZT_SIZE_C_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NVC097_SET_ZT_SIZE_C_CONTROL_ARRAY_SIZE_IS_ONE 0x00000001 + +#define NVC097_SET_SAMPLER_BINDING 0x1234 +#define NVC097_SET_SAMPLER_BINDING_V 0:0 +#define NVC097_SET_SAMPLER_BINDING_V_INDEPENDENTLY 0x00000000 +#define NVC097_SET_SAMPLER_BINDING_V_VIA_HEADER_BINDING 0x00000001 + +#define NVC097_DRAW_AUTO 0x123c +#define NVC097_DRAW_AUTO_BYTE_COUNT 31:0 + +#define NVC097_SET_POST_VTG_SHADER_ATTRIBUTE_SKIP_MASK(i) (0x1240+(i)*4) +#define NVC097_SET_POST_VTG_SHADER_ATTRIBUTE_SKIP_MASK_V 31:0 + +#define NVC097_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE 0x1260 +#define NVC097_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE_TICKET_DISPENSER_INDEX 7:0 +#define NVC097_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE_TICKET_DISPENSER_VALUE 23:8 + +#define NVC097_SET_BACK_END_COPY_A 0x1264 +#define NVC097_SET_BACK_END_COPY_A_DWORDS 7:0 +#define NVC097_SET_BACK_END_COPY_A_SATURATE32_ENABLE 8:8 +#define NVC097_SET_BACK_END_COPY_A_SATURATE32_ENABLE_FALSE 0x00000000 +#define NVC097_SET_BACK_END_COPY_A_SATURATE32_ENABLE_TRUE 0x00000001 +#define NVC097_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE 12:12 +#define NVC097_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE_FALSE 0x00000000 +#define NVC097_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_BACK_END_COPY_B 0x1268 +#define NVC097_SET_BACK_END_COPY_B_SRC_ADDRESS_UPPER 7:0 + +#define NVC097_SET_BACK_END_COPY_C 0x126c +#define NVC097_SET_BACK_END_COPY_C_SRC_ADDRESS_LOWER 31:0 + +#define NVC097_SET_BACK_END_COPY_D 0x1270 +#define NVC097_SET_BACK_END_COPY_D_DEST_ADDRESS_UPPER 7:0 + +#define NVC097_SET_BACK_END_COPY_E 0x1274 +#define NVC097_SET_BACK_END_COPY_E_DEST_ADDRESS_LOWER 31:0 + +#define NVC097_SET_CIRCULAR_BUFFER_SIZE 0x1280 +#define NVC097_SET_CIRCULAR_BUFFER_SIZE_CACHE_LINES_PER_SM 21:0 + +#define NVC097_SET_VTG_REGISTER_WATERMARKS 0x1284 +#define NVC097_SET_VTG_REGISTER_WATERMARKS_LOW 15:0 +#define NVC097_SET_VTG_REGISTER_WATERMARKS_HIGH 31:16 + +#define NVC097_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI 0x1288 +#define NVC097_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES 0:0 +#define NVC097_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC097_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC097_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_TAG 25:4 + +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS 0x1290 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC097_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE 0x12a4 +#define NVC097_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE_V 31:0 + +#define NVC097_CLEAR_ZCULL_REGION 0x12c8 +#define NVC097_CLEAR_ZCULL_REGION_Z_ENABLE 0:0 +#define NVC097_CLEAR_ZCULL_REGION_Z_ENABLE_FALSE 0x00000000 +#define NVC097_CLEAR_ZCULL_REGION_Z_ENABLE_TRUE 0x00000001 +#define NVC097_CLEAR_ZCULL_REGION_STENCIL_ENABLE 4:4 +#define NVC097_CLEAR_ZCULL_REGION_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC097_CLEAR_ZCULL_REGION_STENCIL_ENABLE_TRUE 0x00000001 +#define NVC097_CLEAR_ZCULL_REGION_USE_CLEAR_RECT 1:1 +#define NVC097_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_FALSE 0x00000000 +#define NVC097_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_TRUE 0x00000001 +#define NVC097_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX 2:2 +#define NVC097_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_FALSE 0x00000000 +#define NVC097_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_TRUE 0x00000001 +#define NVC097_CLEAR_ZCULL_REGION_RT_ARRAY_INDEX 20:5 +#define NVC097_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE 3:3 +#define NVC097_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_FALSE 0x00000000 +#define NVC097_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_TRUE 0x00000001 + +#define NVC097_SET_DEPTH_TEST 0x12cc +#define NVC097_SET_DEPTH_TEST_ENABLE 0:0 +#define NVC097_SET_DEPTH_TEST_ENABLE_FALSE 0x00000000 +#define NVC097_SET_DEPTH_TEST_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_FILL_MODE 0x12d0 +#define NVC097_SET_FILL_MODE_V 31:0 +#define NVC097_SET_FILL_MODE_V_POINT 0x00000001 +#define NVC097_SET_FILL_MODE_V_WIREFRAME 0x00000002 +#define NVC097_SET_FILL_MODE_V_SOLID 0x00000003 + +#define NVC097_SET_SHADE_MODE 0x12d4 +#define NVC097_SET_SHADE_MODE_V 31:0 +#define NVC097_SET_SHADE_MODE_V_FLAT 0x00000001 +#define NVC097_SET_SHADE_MODE_V_GOURAUD 0x00000002 +#define NVC097_SET_SHADE_MODE_V_OGL_FLAT 0x00001D00 +#define NVC097_SET_SHADE_MODE_V_OGL_SMOOTH 0x00001D01 + +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS 0x12d8 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS 0x12dc +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC097_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC097_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL 0x12e0 +#define NVC097_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT 3:0 +#define NVC097_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1 0x00000000 +#define NVC097_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_2X2 0x00000001 +#define NVC097_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1_VIRTUAL_SAMPLES 0x00000002 + +#define NVC097_SET_BLEND_STATE_PER_TARGET 0x12e4 +#define NVC097_SET_BLEND_STATE_PER_TARGET_ENABLE 0:0 +#define NVC097_SET_BLEND_STATE_PER_TARGET_ENABLE_FALSE 0x00000000 +#define NVC097_SET_BLEND_STATE_PER_TARGET_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_DEPTH_WRITE 0x12e8 +#define NVC097_SET_DEPTH_WRITE_ENABLE 0:0 +#define NVC097_SET_DEPTH_WRITE_ENABLE_FALSE 0x00000000 +#define NVC097_SET_DEPTH_WRITE_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_ALPHA_TEST 0x12ec +#define NVC097_SET_ALPHA_TEST_ENABLE 0:0 +#define NVC097_SET_ALPHA_TEST_ENABLE_FALSE 0x00000000 +#define NVC097_SET_ALPHA_TEST_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_INLINE_INDEX4X8_ALIGN 0x1300 +#define NVC097_SET_INLINE_INDEX4X8_ALIGN_COUNT 29:0 +#define NVC097_SET_INLINE_INDEX4X8_ALIGN_START 31:30 + +#define NVC097_DRAW_INLINE_INDEX4X8 0x1304 +#define NVC097_DRAW_INLINE_INDEX4X8_INDEX0 7:0 +#define NVC097_DRAW_INLINE_INDEX4X8_INDEX1 15:8 +#define NVC097_DRAW_INLINE_INDEX4X8_INDEX2 23:16 +#define NVC097_DRAW_INLINE_INDEX4X8_INDEX3 31:24 + +#define NVC097_D3D_SET_CULL_MODE 0x1308 +#define NVC097_D3D_SET_CULL_MODE_V 31:0 +#define NVC097_D3D_SET_CULL_MODE_V_NONE 0x00000001 +#define NVC097_D3D_SET_CULL_MODE_V_CW 0x00000002 +#define NVC097_D3D_SET_CULL_MODE_V_CCW 0x00000003 + +#define NVC097_SET_DEPTH_FUNC 0x130c +#define NVC097_SET_DEPTH_FUNC_V 31:0 +#define NVC097_SET_DEPTH_FUNC_V_OGL_NEVER 0x00000200 +#define NVC097_SET_DEPTH_FUNC_V_OGL_LESS 0x00000201 +#define NVC097_SET_DEPTH_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC097_SET_DEPTH_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC097_SET_DEPTH_FUNC_V_OGL_GREATER 0x00000204 +#define NVC097_SET_DEPTH_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC097_SET_DEPTH_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC097_SET_DEPTH_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC097_SET_DEPTH_FUNC_V_D3D_NEVER 0x00000001 +#define NVC097_SET_DEPTH_FUNC_V_D3D_LESS 0x00000002 +#define NVC097_SET_DEPTH_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC097_SET_DEPTH_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC097_SET_DEPTH_FUNC_V_D3D_GREATER 0x00000005 +#define NVC097_SET_DEPTH_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC097_SET_DEPTH_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC097_SET_DEPTH_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC097_SET_ALPHA_REF 0x1310 +#define NVC097_SET_ALPHA_REF_V 31:0 + +#define NVC097_SET_ALPHA_FUNC 0x1314 +#define NVC097_SET_ALPHA_FUNC_V 31:0 +#define NVC097_SET_ALPHA_FUNC_V_OGL_NEVER 0x00000200 +#define NVC097_SET_ALPHA_FUNC_V_OGL_LESS 0x00000201 +#define NVC097_SET_ALPHA_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC097_SET_ALPHA_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC097_SET_ALPHA_FUNC_V_OGL_GREATER 0x00000204 +#define NVC097_SET_ALPHA_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC097_SET_ALPHA_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC097_SET_ALPHA_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC097_SET_ALPHA_FUNC_V_D3D_NEVER 0x00000001 +#define NVC097_SET_ALPHA_FUNC_V_D3D_LESS 0x00000002 +#define NVC097_SET_ALPHA_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC097_SET_ALPHA_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC097_SET_ALPHA_FUNC_V_D3D_GREATER 0x00000005 +#define NVC097_SET_ALPHA_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC097_SET_ALPHA_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC097_SET_ALPHA_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC097_SET_DRAW_AUTO_STRIDE 0x1318 +#define NVC097_SET_DRAW_AUTO_STRIDE_V 11:0 + +#define NVC097_SET_BLEND_CONST_RED 0x131c +#define NVC097_SET_BLEND_CONST_RED_V 31:0 + +#define NVC097_SET_BLEND_CONST_GREEN 0x1320 +#define NVC097_SET_BLEND_CONST_GREEN_V 31:0 + +#define NVC097_SET_BLEND_CONST_BLUE 0x1324 +#define NVC097_SET_BLEND_CONST_BLUE_V 31:0 + +#define NVC097_SET_BLEND_CONST_ALPHA 0x1328 +#define NVC097_SET_BLEND_CONST_ALPHA_V 31:0 + +#define NVC097_INVALIDATE_SAMPLER_CACHE 0x1330 +#define NVC097_INVALIDATE_SAMPLER_CACHE_LINES 0:0 +#define NVC097_INVALIDATE_SAMPLER_CACHE_LINES_ALL 0x00000000 +#define NVC097_INVALIDATE_SAMPLER_CACHE_LINES_ONE 0x00000001 +#define NVC097_INVALIDATE_SAMPLER_CACHE_TAG 25:4 + +#define NVC097_INVALIDATE_TEXTURE_HEADER_CACHE 0x1334 +#define NVC097_INVALIDATE_TEXTURE_HEADER_CACHE_LINES 0:0 +#define NVC097_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ALL 0x00000000 +#define NVC097_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ONE 0x00000001 +#define NVC097_INVALIDATE_TEXTURE_HEADER_CACHE_TAG 25:4 + +#define NVC097_INVALIDATE_TEXTURE_DATA_CACHE 0x1338 +#define NVC097_INVALIDATE_TEXTURE_DATA_CACHE_LINES 0:0 +#define NVC097_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ALL 0x00000000 +#define NVC097_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ONE 0x00000001 +#define NVC097_INVALIDATE_TEXTURE_DATA_CACHE_TAG 25:4 + +#define NVC097_SET_BLEND_SEPARATE_FOR_ALPHA 0x133c +#define NVC097_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NVC097_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NVC097_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_BLEND_COLOR_OP 0x1340 +#define NVC097_SET_BLEND_COLOR_OP_V 31:0 +#define NVC097_SET_BLEND_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC097_SET_BLEND_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC097_SET_BLEND_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC097_SET_BLEND_COLOR_OP_V_OGL_MIN 0x00008007 +#define NVC097_SET_BLEND_COLOR_OP_V_OGL_MAX 0x00008008 +#define NVC097_SET_BLEND_COLOR_OP_V_D3D_ADD 0x00000001 +#define NVC097_SET_BLEND_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC097_SET_BLEND_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC097_SET_BLEND_COLOR_OP_V_D3D_MIN 0x00000004 +#define NVC097_SET_BLEND_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF 0x1344 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V 31:0 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC097_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC097_SET_BLEND_COLOR_DEST_COEFF 0x1348 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V 31:0 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC097_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC097_SET_BLEND_ALPHA_OP 0x134c +#define NVC097_SET_BLEND_ALPHA_OP_V 31:0 +#define NVC097_SET_BLEND_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC097_SET_BLEND_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC097_SET_BLEND_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC097_SET_BLEND_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NVC097_SET_BLEND_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NVC097_SET_BLEND_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NVC097_SET_BLEND_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC097_SET_BLEND_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC097_SET_BLEND_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NVC097_SET_BLEND_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF 0x1350 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V 31:0 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC097_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC097_SET_GLOBAL_COLOR_KEY 0x1354 +#define NVC097_SET_GLOBAL_COLOR_KEY_ENABLE 0:0 +#define NVC097_SET_GLOBAL_COLOR_KEY_ENABLE_FALSE 0x00000000 +#define NVC097_SET_GLOBAL_COLOR_KEY_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF 0x1358 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V 31:0 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC097_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC097_SET_SINGLE_ROP_CONTROL 0x135c +#define NVC097_SET_SINGLE_ROP_CONTROL_ENABLE 0:0 +#define NVC097_SET_SINGLE_ROP_CONTROL_ENABLE_FALSE 0x00000000 +#define NVC097_SET_SINGLE_ROP_CONTROL_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_BLEND(i) (0x1360+(i)*4) +#define NVC097_SET_BLEND_ENABLE 0:0 +#define NVC097_SET_BLEND_ENABLE_FALSE 0x00000000 +#define NVC097_SET_BLEND_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_STENCIL_TEST 0x1380 +#define NVC097_SET_STENCIL_TEST_ENABLE 0:0 +#define NVC097_SET_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NVC097_SET_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_STENCIL_OP_FAIL 0x1384 +#define NVC097_SET_STENCIL_OP_FAIL_V 31:0 +#define NVC097_SET_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NVC097_SET_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NVC097_SET_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NVC097_SET_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC097_SET_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC097_SET_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NVC097_SET_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NVC097_SET_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NVC097_SET_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NVC097_SET_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NVC097_SET_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NVC097_SET_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NVC097_SET_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NVC097_SET_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NVC097_SET_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NVC097_SET_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NVC097_SET_STENCIL_OP_ZFAIL 0x1388 +#define NVC097_SET_STENCIL_OP_ZFAIL_V 31:0 +#define NVC097_SET_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NVC097_SET_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NVC097_SET_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NVC097_SET_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC097_SET_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC097_SET_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NVC097_SET_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NVC097_SET_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NVC097_SET_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NVC097_SET_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NVC097_SET_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NVC097_SET_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NVC097_SET_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NVC097_SET_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NVC097_SET_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NVC097_SET_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NVC097_SET_STENCIL_OP_ZPASS 0x138c +#define NVC097_SET_STENCIL_OP_ZPASS_V 31:0 +#define NVC097_SET_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NVC097_SET_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NVC097_SET_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NVC097_SET_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NVC097_SET_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NVC097_SET_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NVC097_SET_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NVC097_SET_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NVC097_SET_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NVC097_SET_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NVC097_SET_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NVC097_SET_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NVC097_SET_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NVC097_SET_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NVC097_SET_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NVC097_SET_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NVC097_SET_STENCIL_FUNC 0x1390 +#define NVC097_SET_STENCIL_FUNC_V 31:0 +#define NVC097_SET_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NVC097_SET_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NVC097_SET_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC097_SET_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC097_SET_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NVC097_SET_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC097_SET_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC097_SET_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC097_SET_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NVC097_SET_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NVC097_SET_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC097_SET_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC097_SET_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NVC097_SET_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC097_SET_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC097_SET_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC097_SET_STENCIL_FUNC_REF 0x1394 +#define NVC097_SET_STENCIL_FUNC_REF_V 7:0 + +#define NVC097_SET_STENCIL_FUNC_MASK 0x1398 +#define NVC097_SET_STENCIL_FUNC_MASK_V 7:0 + +#define NVC097_SET_STENCIL_MASK 0x139c +#define NVC097_SET_STENCIL_MASK_V 7:0 + +#define NVC097_SET_DRAW_AUTO_START 0x13a4 +#define NVC097_SET_DRAW_AUTO_START_BYTE_COUNT 31:0 + +#define NVC097_SET_PS_SATURATE 0x13a8 +#define NVC097_SET_PS_SATURATE_OUTPUT0 0:0 +#define NVC097_SET_PS_SATURATE_OUTPUT0_FALSE 0x00000000 +#define NVC097_SET_PS_SATURATE_OUTPUT0_TRUE 0x00000001 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE0 1:1 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE0_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE0_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC097_SET_PS_SATURATE_OUTPUT1 4:4 +#define NVC097_SET_PS_SATURATE_OUTPUT1_FALSE 0x00000000 +#define NVC097_SET_PS_SATURATE_OUTPUT1_TRUE 0x00000001 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE1 5:5 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE1_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE1_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC097_SET_PS_SATURATE_OUTPUT2 8:8 +#define NVC097_SET_PS_SATURATE_OUTPUT2_FALSE 0x00000000 +#define NVC097_SET_PS_SATURATE_OUTPUT2_TRUE 0x00000001 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE2 9:9 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE2_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE2_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC097_SET_PS_SATURATE_OUTPUT3 12:12 +#define NVC097_SET_PS_SATURATE_OUTPUT3_FALSE 0x00000000 +#define NVC097_SET_PS_SATURATE_OUTPUT3_TRUE 0x00000001 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE3 13:13 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE3_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE3_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC097_SET_PS_SATURATE_OUTPUT4 16:16 +#define NVC097_SET_PS_SATURATE_OUTPUT4_FALSE 0x00000000 +#define NVC097_SET_PS_SATURATE_OUTPUT4_TRUE 0x00000001 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE4 17:17 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE4_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE4_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC097_SET_PS_SATURATE_OUTPUT5 20:20 +#define NVC097_SET_PS_SATURATE_OUTPUT5_FALSE 0x00000000 +#define NVC097_SET_PS_SATURATE_OUTPUT5_TRUE 0x00000001 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE5 21:21 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE5_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE5_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC097_SET_PS_SATURATE_OUTPUT6 24:24 +#define NVC097_SET_PS_SATURATE_OUTPUT6_FALSE 0x00000000 +#define NVC097_SET_PS_SATURATE_OUTPUT6_TRUE 0x00000001 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE6 25:25 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE6_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE6_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC097_SET_PS_SATURATE_OUTPUT7 28:28 +#define NVC097_SET_PS_SATURATE_OUTPUT7_FALSE 0x00000000 +#define NVC097_SET_PS_SATURATE_OUTPUT7_TRUE 0x00000001 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE7 29:29 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE7_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC097_SET_PS_SATURATE_CLAMP_RANGE7_MINUS_ONE_TO_PLUS_ONE 0x00000001 + +#define NVC097_SET_WINDOW_ORIGIN 0x13ac +#define NVC097_SET_WINDOW_ORIGIN_MODE 0:0 +#define NVC097_SET_WINDOW_ORIGIN_MODE_UPPER_LEFT 0x00000000 +#define NVC097_SET_WINDOW_ORIGIN_MODE_LOWER_LEFT 0x00000001 +#define NVC097_SET_WINDOW_ORIGIN_FLIP_Y 4:4 +#define NVC097_SET_WINDOW_ORIGIN_FLIP_Y_FALSE 0x00000000 +#define NVC097_SET_WINDOW_ORIGIN_FLIP_Y_TRUE 0x00000001 + +#define NVC097_SET_LINE_WIDTH_FLOAT 0x13b0 +#define NVC097_SET_LINE_WIDTH_FLOAT_V 31:0 + +#define NVC097_SET_ALIASED_LINE_WIDTH_FLOAT 0x13b4 +#define NVC097_SET_ALIASED_LINE_WIDTH_FLOAT_V 31:0 + +#define NVC097_SET_LINE_MULTISAMPLE_OVERRIDE 0x1418 +#define NVC097_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE 0:0 +#define NVC097_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_FALSE 0x00000000 +#define NVC097_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_ALPHA_HYSTERESIS 0x1420 +#define NVC097_SET_ALPHA_HYSTERESIS_ROUNDS_OF_ALPHA 7:0 + +#define NVC097_INVALIDATE_SAMPLER_CACHE_NO_WFI 0x1424 +#define NVC097_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES 0:0 +#define NVC097_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC097_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC097_INVALIDATE_SAMPLER_CACHE_NO_WFI_TAG 25:4 + +#define NVC097_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI 0x1428 +#define NVC097_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES 0:0 +#define NVC097_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC097_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC097_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_TAG 25:4 + +#define NVC097_SET_GLOBAL_BASE_VERTEX_INDEX 0x1434 +#define NVC097_SET_GLOBAL_BASE_VERTEX_INDEX_V 31:0 + +#define NVC097_SET_GLOBAL_BASE_INSTANCE_INDEX 0x1438 +#define NVC097_SET_GLOBAL_BASE_INSTANCE_INDEX_V 31:0 + +#define NVC097_SET_PS_WARP_WATERMARKS 0x1450 +#define NVC097_SET_PS_WARP_WATERMARKS_LOW 15:0 +#define NVC097_SET_PS_WARP_WATERMARKS_HIGH 31:16 + +#define NVC097_SET_PS_REGISTER_WATERMARKS 0x1454 +#define NVC097_SET_PS_REGISTER_WATERMARKS_LOW 15:0 +#define NVC097_SET_PS_REGISTER_WATERMARKS_HIGH 31:16 + +#define NVC097_STORE_ZCULL 0x1464 +#define NVC097_STORE_ZCULL_V 0:0 + +#define NVC097_SET_ITERATED_BLEND_CONSTANT_RED(j) (0x1480+(j)*16) +#define NVC097_SET_ITERATED_BLEND_CONSTANT_RED_V 15:0 + +#define NVC097_SET_ITERATED_BLEND_CONSTANT_GREEN(j) (0x1484+(j)*16) +#define NVC097_SET_ITERATED_BLEND_CONSTANT_GREEN_V 15:0 + +#define NVC097_SET_ITERATED_BLEND_CONSTANT_BLUE(j) (0x1488+(j)*16) +#define NVC097_SET_ITERATED_BLEND_CONSTANT_BLUE_V 15:0 + +#define NVC097_LOAD_ZCULL 0x1500 +#define NVC097_LOAD_ZCULL_V 0:0 + +#define NVC097_SET_SURFACE_CLIP_ID_HEIGHT 0x1504 +#define NVC097_SET_SURFACE_CLIP_ID_HEIGHT_V 31:0 + +#define NVC097_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL 0x1508 +#define NVC097_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NVC097_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NVC097_SET_CLIP_ID_CLEAR_RECT_VERTICAL 0x150c +#define NVC097_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NVC097_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NVC097_SET_USER_CLIP_ENABLE 0x1510 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE0 0:0 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE0_FALSE 0x00000000 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE0_TRUE 0x00000001 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE1 1:1 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE1_FALSE 0x00000000 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE1_TRUE 0x00000001 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE2 2:2 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE2_FALSE 0x00000000 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE2_TRUE 0x00000001 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE3 3:3 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE3_FALSE 0x00000000 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE3_TRUE 0x00000001 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE4 4:4 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE4_FALSE 0x00000000 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE4_TRUE 0x00000001 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE5 5:5 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE5_FALSE 0x00000000 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE5_TRUE 0x00000001 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE6 6:6 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE6_FALSE 0x00000000 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE6_TRUE 0x00000001 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE7 7:7 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE7_FALSE 0x00000000 +#define NVC097_SET_USER_CLIP_ENABLE_PLANE7_TRUE 0x00000001 + +#define NVC097_SET_ZPASS_PIXEL_COUNT 0x1514 +#define NVC097_SET_ZPASS_PIXEL_COUNT_ENABLE 0:0 +#define NVC097_SET_ZPASS_PIXEL_COUNT_ENABLE_FALSE 0x00000000 +#define NVC097_SET_ZPASS_PIXEL_COUNT_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_POINT_SIZE 0x1518 +#define NVC097_SET_POINT_SIZE_V 31:0 + +#define NVC097_SET_ZCULL_STATS 0x151c +#define NVC097_SET_ZCULL_STATS_ENABLE 0:0 +#define NVC097_SET_ZCULL_STATS_ENABLE_FALSE 0x00000000 +#define NVC097_SET_ZCULL_STATS_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_POINT_SPRITE 0x1520 +#define NVC097_SET_POINT_SPRITE_ENABLE 0:0 +#define NVC097_SET_POINT_SPRITE_ENABLE_FALSE 0x00000000 +#define NVC097_SET_POINT_SPRITE_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_SHADER_EXCEPTIONS 0x1528 +#define NVC097_SET_SHADER_EXCEPTIONS_ENABLE 0:0 +#define NVC097_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0x00000000 +#define NVC097_SET_SHADER_EXCEPTIONS_ENABLE_TRUE 0x00000001 + +#define NVC097_CLEAR_REPORT_VALUE 0x1530 +#define NVC097_CLEAR_REPORT_VALUE_TYPE 4:0 +#define NVC097_CLEAR_REPORT_VALUE_TYPE_DA_VERTICES_GENERATED 0x00000012 +#define NVC097_CLEAR_REPORT_VALUE_TYPE_DA_PRIMITIVES_GENERATED 0x00000013 +#define NVC097_CLEAR_REPORT_VALUE_TYPE_VS_INVOCATIONS 0x00000015 +#define NVC097_CLEAR_REPORT_VALUE_TYPE_TI_INVOCATIONS 0x00000016 +#define NVC097_CLEAR_REPORT_VALUE_TYPE_TS_INVOCATIONS 0x00000017 +#define NVC097_CLEAR_REPORT_VALUE_TYPE_TS_PRIMITIVES_GENERATED 0x00000018 +#define NVC097_CLEAR_REPORT_VALUE_TYPE_GS_INVOCATIONS 0x0000001A +#define NVC097_CLEAR_REPORT_VALUE_TYPE_GS_PRIMITIVES_GENERATED 0x0000001B +#define NVC097_CLEAR_REPORT_VALUE_TYPE_VTG_PRIMITIVES_OUT 0x0000001F +#define NVC097_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_SUCCEEDED 0x00000010 +#define NVC097_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_NEEDED 0x00000011 +#define NVC097_CLEAR_REPORT_VALUE_TYPE_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000003 +#define NVC097_CLEAR_REPORT_VALUE_TYPE_CLIPPER_INVOCATIONS 0x0000001C +#define NVC097_CLEAR_REPORT_VALUE_TYPE_CLIPPER_PRIMITIVES_GENERATED 0x0000001D +#define NVC097_CLEAR_REPORT_VALUE_TYPE_ZCULL_STATS 0x00000002 +#define NVC097_CLEAR_REPORT_VALUE_TYPE_PS_INVOCATIONS 0x0000001E +#define NVC097_CLEAR_REPORT_VALUE_TYPE_ZPASS_PIXEL_CNT 0x00000001 +#define NVC097_CLEAR_REPORT_VALUE_TYPE_ALPHA_BETA_CLOCKS 0x00000004 + +#define NVC097_SET_ANTI_ALIAS_ENABLE 0x1534 +#define NVC097_SET_ANTI_ALIAS_ENABLE_V 0:0 +#define NVC097_SET_ANTI_ALIAS_ENABLE_V_FALSE 0x00000000 +#define NVC097_SET_ANTI_ALIAS_ENABLE_V_TRUE 0x00000001 + +#define NVC097_SET_ZT_SELECT 0x1538 +#define NVC097_SET_ZT_SELECT_TARGET_COUNT 0:0 + +#define NVC097_SET_ANTI_ALIAS_ALPHA_CONTROL 0x153c +#define NVC097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE 0:0 +#define NVC097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_DISABLE 0x00000000 +#define NVC097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_ENABLE 0x00000001 +#define NVC097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE 4:4 +#define NVC097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_DISABLE 0x00000000 +#define NVC097_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_ENABLE 0x00000001 + +#define NVC097_SET_RENDER_ENABLE_A 0x1550 +#define NVC097_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC097_SET_RENDER_ENABLE_B 0x1554 +#define NVC097_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC097_SET_RENDER_ENABLE_C 0x1558 +#define NVC097_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC097_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC097_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC097_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC097_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC097_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC097_SET_TEX_SAMPLER_POOL_A 0x155c +#define NVC097_SET_TEX_SAMPLER_POOL_A_OFFSET_UPPER 7:0 + +#define NVC097_SET_TEX_SAMPLER_POOL_B 0x1560 +#define NVC097_SET_TEX_SAMPLER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC097_SET_TEX_SAMPLER_POOL_C 0x1564 +#define NVC097_SET_TEX_SAMPLER_POOL_C_MAXIMUM_INDEX 19:0 + +#define NVC097_SET_SLOPE_SCALE_DEPTH_BIAS 0x156c +#define NVC097_SET_SLOPE_SCALE_DEPTH_BIAS_V 31:0 + +#define NVC097_SET_ANTI_ALIASED_LINE 0x1570 +#define NVC097_SET_ANTI_ALIASED_LINE_ENABLE 0:0 +#define NVC097_SET_ANTI_ALIASED_LINE_ENABLE_FALSE 0x00000000 +#define NVC097_SET_ANTI_ALIASED_LINE_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_TEX_HEADER_POOL_A 0x1574 +#define NVC097_SET_TEX_HEADER_POOL_A_OFFSET_UPPER 7:0 + +#define NVC097_SET_TEX_HEADER_POOL_B 0x1578 +#define NVC097_SET_TEX_HEADER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC097_SET_TEX_HEADER_POOL_C 0x157c +#define NVC097_SET_TEX_HEADER_POOL_C_MAXIMUM_INDEX 21:0 + +#define NVC097_SET_ACTIVE_ZCULL_REGION 0x1590 +#define NVC097_SET_ACTIVE_ZCULL_REGION_ID 5:0 + +#define NVC097_SET_TWO_SIDED_STENCIL_TEST 0x1594 +#define NVC097_SET_TWO_SIDED_STENCIL_TEST_ENABLE 0:0 +#define NVC097_SET_TWO_SIDED_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NVC097_SET_TWO_SIDED_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_BACK_STENCIL_OP_FAIL 0x1598 +#define NVC097_SET_BACK_STENCIL_OP_FAIL_V 31:0 +#define NVC097_SET_BACK_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NVC097_SET_BACK_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NVC097_SET_BACK_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NVC097_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC097_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC097_SET_BACK_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NVC097_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NVC097_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NVC097_SET_BACK_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NVC097_SET_BACK_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NVC097_SET_BACK_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NVC097_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NVC097_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NVC097_SET_BACK_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NVC097_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NVC097_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NVC097_SET_BACK_STENCIL_OP_ZFAIL 0x159c +#define NVC097_SET_BACK_STENCIL_OP_ZFAIL_V 31:0 +#define NVC097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NVC097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NVC097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NVC097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NVC097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NVC097_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NVC097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NVC097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NVC097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NVC097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NVC097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NVC097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NVC097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NVC097_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NVC097_SET_BACK_STENCIL_OP_ZPASS 0x15a0 +#define NVC097_SET_BACK_STENCIL_OP_ZPASS_V 31:0 +#define NVC097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NVC097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NVC097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NVC097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NVC097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NVC097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NVC097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NVC097_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NVC097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NVC097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NVC097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NVC097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NVC097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NVC097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NVC097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NVC097_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NVC097_SET_BACK_STENCIL_FUNC 0x15a4 +#define NVC097_SET_BACK_STENCIL_FUNC_V 31:0 +#define NVC097_SET_BACK_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NVC097_SET_BACK_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NVC097_SET_BACK_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC097_SET_BACK_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC097_SET_BACK_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NVC097_SET_BACK_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC097_SET_BACK_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC097_SET_BACK_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC097_SET_BACK_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NVC097_SET_BACK_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NVC097_SET_BACK_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC097_SET_BACK_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC097_SET_BACK_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NVC097_SET_BACK_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC097_SET_BACK_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC097_SET_BACK_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC097_SET_SRGB_WRITE 0x15b8 +#define NVC097_SET_SRGB_WRITE_ENABLE 0:0 +#define NVC097_SET_SRGB_WRITE_ENABLE_FALSE 0x00000000 +#define NVC097_SET_SRGB_WRITE_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_DEPTH_BIAS 0x15bc +#define NVC097_SET_DEPTH_BIAS_V 31:0 + +#define NVC097_SET_ZCULL_REGION_FORMAT 0x15c8 +#define NVC097_SET_ZCULL_REGION_FORMAT_TYPE 3:0 +#define NVC097_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X4 0x00000000 +#define NVC097_SET_ZCULL_REGION_FORMAT_TYPE_ZS_4X4 0x00000001 +#define NVC097_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X2 0x00000002 +#define NVC097_SET_ZCULL_REGION_FORMAT_TYPE_Z_2X4 0x00000003 +#define NVC097_SET_ZCULL_REGION_FORMAT_TYPE_Z_16X8_4X4 0x00000004 +#define NVC097_SET_ZCULL_REGION_FORMAT_TYPE_Z_8X8_4X2 0x00000005 +#define NVC097_SET_ZCULL_REGION_FORMAT_TYPE_Z_8X8_2X4 0x00000006 +#define NVC097_SET_ZCULL_REGION_FORMAT_TYPE_Z_16X16_4X8 0x00000007 +#define NVC097_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X8_2X2 0x00000008 +#define NVC097_SET_ZCULL_REGION_FORMAT_TYPE_ZS_16X8_4X2 0x00000009 +#define NVC097_SET_ZCULL_REGION_FORMAT_TYPE_ZS_16X8_2X4 0x0000000A +#define NVC097_SET_ZCULL_REGION_FORMAT_TYPE_ZS_8X8_2X2 0x0000000B +#define NVC097_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X8_1X1 0x0000000C + +#define NVC097_SET_RT_LAYER 0x15cc +#define NVC097_SET_RT_LAYER_V 15:0 +#define NVC097_SET_RT_LAYER_CONTROL 16:16 +#define NVC097_SET_RT_LAYER_CONTROL_V_SELECTS_LAYER 0x00000000 +#define NVC097_SET_RT_LAYER_CONTROL_GEOMETRY_SHADER_SELECTS_LAYER 0x00000001 + +#define NVC097_SET_ANTI_ALIAS 0x15d0 +#define NVC097_SET_ANTI_ALIAS_SAMPLES 3:0 +#define NVC097_SET_ANTI_ALIAS_SAMPLES_MODE_1X1 0x00000000 +#define NVC097_SET_ANTI_ALIAS_SAMPLES_MODE_2X1 0x00000001 +#define NVC097_SET_ANTI_ALIAS_SAMPLES_MODE_2X2 0x00000002 +#define NVC097_SET_ANTI_ALIAS_SAMPLES_MODE_4X2 0x00000003 +#define NVC097_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_D3D 0x00000004 +#define NVC097_SET_ANTI_ALIAS_SAMPLES_MODE_2X1_D3D 0x00000005 +#define NVC097_SET_ANTI_ALIAS_SAMPLES_MODE_4X4 0x00000006 +#define NVC097_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_4 0x00000008 +#define NVC097_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_12 0x00000009 +#define NVC097_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_8 0x0000000A +#define NVC097_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_24 0x0000000B + +#define NVC097_SET_EDGE_FLAG 0x15e4 +#define NVC097_SET_EDGE_FLAG_V 0:0 +#define NVC097_SET_EDGE_FLAG_V_FALSE 0x00000000 +#define NVC097_SET_EDGE_FLAG_V_TRUE 0x00000001 + +#define NVC097_DRAW_INLINE_INDEX 0x15e8 +#define NVC097_DRAW_INLINE_INDEX_V 31:0 + +#define NVC097_SET_INLINE_INDEX2X16_ALIGN 0x15ec +#define NVC097_SET_INLINE_INDEX2X16_ALIGN_COUNT 30:0 +#define NVC097_SET_INLINE_INDEX2X16_ALIGN_START_ODD 31:31 +#define NVC097_SET_INLINE_INDEX2X16_ALIGN_START_ODD_FALSE 0x00000000 +#define NVC097_SET_INLINE_INDEX2X16_ALIGN_START_ODD_TRUE 0x00000001 + +#define NVC097_DRAW_INLINE_INDEX2X16 0x15f0 +#define NVC097_DRAW_INLINE_INDEX2X16_EVEN 15:0 +#define NVC097_DRAW_INLINE_INDEX2X16_ODD 31:16 + +#define NVC097_SET_VERTEX_GLOBAL_BASE_OFFSET_A 0x15f4 +#define NVC097_SET_VERTEX_GLOBAL_BASE_OFFSET_A_UPPER 7:0 + +#define NVC097_SET_VERTEX_GLOBAL_BASE_OFFSET_B 0x15f8 +#define NVC097_SET_VERTEX_GLOBAL_BASE_OFFSET_B_LOWER 31:0 + +#define NVC097_SET_ZCULL_REGION_PIXEL_OFFSET_A 0x15fc +#define NVC097_SET_ZCULL_REGION_PIXEL_OFFSET_A_WIDTH 15:0 + +#define NVC097_SET_ZCULL_REGION_PIXEL_OFFSET_B 0x1600 +#define NVC097_SET_ZCULL_REGION_PIXEL_OFFSET_B_HEIGHT 15:0 + +#define NVC097_SET_POINT_SPRITE_SELECT 0x1604 +#define NVC097_SET_POINT_SPRITE_SELECT_RMODE 1:0 +#define NVC097_SET_POINT_SPRITE_SELECT_RMODE_ZERO 0x00000000 +#define NVC097_SET_POINT_SPRITE_SELECT_RMODE_FROM_R 0x00000001 +#define NVC097_SET_POINT_SPRITE_SELECT_RMODE_FROM_S 0x00000002 +#define NVC097_SET_POINT_SPRITE_SELECT_ORIGIN 2:2 +#define NVC097_SET_POINT_SPRITE_SELECT_ORIGIN_BOTTOM 0x00000000 +#define NVC097_SET_POINT_SPRITE_SELECT_ORIGIN_TOP 0x00000001 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE0 3:3 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE0_PASSTHROUGH 0x00000000 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE0_GENERATE 0x00000001 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE1 4:4 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE1_PASSTHROUGH 0x00000000 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE1_GENERATE 0x00000001 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE2 5:5 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE2_PASSTHROUGH 0x00000000 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE2_GENERATE 0x00000001 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE3 6:6 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE3_PASSTHROUGH 0x00000000 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE3_GENERATE 0x00000001 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE4 7:7 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE4_PASSTHROUGH 0x00000000 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE4_GENERATE 0x00000001 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE5 8:8 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE5_PASSTHROUGH 0x00000000 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE5_GENERATE 0x00000001 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE6 9:9 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE6_PASSTHROUGH 0x00000000 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE6_GENERATE 0x00000001 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE7 10:10 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE7_PASSTHROUGH 0x00000000 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE7_GENERATE 0x00000001 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE8 11:11 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE8_PASSTHROUGH 0x00000000 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE8_GENERATE 0x00000001 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE9 12:12 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE9_PASSTHROUGH 0x00000000 +#define NVC097_SET_POINT_SPRITE_SELECT_TEXTURE9_GENERATE 0x00000001 + +#define NVC097_SET_PROGRAM_REGION_A 0x1608 +#define NVC097_SET_PROGRAM_REGION_A_ADDRESS_UPPER 7:0 + +#define NVC097_SET_PROGRAM_REGION_B 0x160c +#define NVC097_SET_PROGRAM_REGION_B_ADDRESS_LOWER 31:0 + +#define NVC097_SET_ATTRIBUTE_DEFAULT 0x1610 +#define NVC097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE 0:0 +#define NVC097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_0001 0x00000000 +#define NVC097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_1111 0x00000001 +#define NVC097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR 1:1 +#define NVC097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0000 0x00000000 +#define NVC097_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0001 0x00000001 +#define NVC097_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR 2:2 +#define NVC097_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0000 0x00000000 +#define NVC097_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0001 0x00000001 +#define NVC097_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE 3:3 +#define NVC097_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0000 0x00000000 +#define NVC097_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0001 0x00000001 +#define NVC097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0 4:4 +#define NVC097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_0001 0x00000000 +#define NVC097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_1111 0x00000001 +#define NVC097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15 5:5 +#define NVC097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0000 0x00000000 +#define NVC097_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0001 0x00000001 + +#define NVC097_END 0x1614 +#define NVC097_END_V 0:0 + +#define NVC097_BEGIN 0x1618 +#define NVC097_BEGIN_OP 15:0 +#define NVC097_BEGIN_OP_POINTS 0x00000000 +#define NVC097_BEGIN_OP_LINES 0x00000001 +#define NVC097_BEGIN_OP_LINE_LOOP 0x00000002 +#define NVC097_BEGIN_OP_LINE_STRIP 0x00000003 +#define NVC097_BEGIN_OP_TRIANGLES 0x00000004 +#define NVC097_BEGIN_OP_TRIANGLE_STRIP 0x00000005 +#define NVC097_BEGIN_OP_TRIANGLE_FAN 0x00000006 +#define NVC097_BEGIN_OP_QUADS 0x00000007 +#define NVC097_BEGIN_OP_QUAD_STRIP 0x00000008 +#define NVC097_BEGIN_OP_POLYGON 0x00000009 +#define NVC097_BEGIN_OP_LINELIST_ADJCY 0x0000000A +#define NVC097_BEGIN_OP_LINESTRIP_ADJCY 0x0000000B +#define NVC097_BEGIN_OP_TRIANGLELIST_ADJCY 0x0000000C +#define NVC097_BEGIN_OP_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC097_BEGIN_OP_PATCH 0x0000000E +#define NVC097_BEGIN_PRIMITIVE_ID 24:24 +#define NVC097_BEGIN_PRIMITIVE_ID_FIRST 0x00000000 +#define NVC097_BEGIN_PRIMITIVE_ID_UNCHANGED 0x00000001 +#define NVC097_BEGIN_INSTANCE_ID 27:26 +#define NVC097_BEGIN_INSTANCE_ID_FIRST 0x00000000 +#define NVC097_BEGIN_INSTANCE_ID_SUBSEQUENT 0x00000001 +#define NVC097_BEGIN_INSTANCE_ID_UNCHANGED 0x00000002 +#define NVC097_BEGIN_SPLIT_MODE 30:29 +#define NVC097_BEGIN_SPLIT_MODE_NORMAL_BEGIN_NORMAL_END 0x00000000 +#define NVC097_BEGIN_SPLIT_MODE_NORMAL_BEGIN_OPEN_END 0x00000001 +#define NVC097_BEGIN_SPLIT_MODE_OPEN_BEGIN_OPEN_END 0x00000002 +#define NVC097_BEGIN_SPLIT_MODE_OPEN_BEGIN_NORMAL_END 0x00000003 + +#define NVC097_SET_VERTEX_ID_COPY 0x161c +#define NVC097_SET_VERTEX_ID_COPY_ENABLE 0:0 +#define NVC097_SET_VERTEX_ID_COPY_ENABLE_FALSE 0x00000000 +#define NVC097_SET_VERTEX_ID_COPY_ENABLE_TRUE 0x00000001 +#define NVC097_SET_VERTEX_ID_COPY_ATTRIBUTE_SLOT 11:4 + +#define NVC097_ADD_TO_PRIMITIVE_ID 0x1620 +#define NVC097_ADD_TO_PRIMITIVE_ID_V 31:0 + +#define NVC097_LOAD_PRIMITIVE_ID 0x1624 +#define NVC097_LOAD_PRIMITIVE_ID_V 31:0 + +#define NVC097_SET_SHADER_BASED_CULL 0x162c +#define NVC097_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE 1:1 +#define NVC097_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_FALSE 0x00000000 +#define NVC097_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_TRUE 0x00000001 +#define NVC097_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE 0:0 +#define NVC097_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_FALSE 0x00000000 +#define NVC097_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_CLASS_VERSION 0x1638 +#define NVC097_SET_CLASS_VERSION_CURRENT 15:0 +#define NVC097_SET_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC097_SET_DA_PRIMITIVE_RESTART 0x1644 +#define NVC097_SET_DA_PRIMITIVE_RESTART_ENABLE 0:0 +#define NVC097_SET_DA_PRIMITIVE_RESTART_ENABLE_FALSE 0x00000000 +#define NVC097_SET_DA_PRIMITIVE_RESTART_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_DA_PRIMITIVE_RESTART_INDEX 0x1648 +#define NVC097_SET_DA_PRIMITIVE_RESTART_INDEX_V 31:0 + +#define NVC097_SET_DA_OUTPUT 0x164c +#define NVC097_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START 12:12 +#define NVC097_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_FALSE 0x00000000 +#define NVC097_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_TRUE 0x00000001 + +#define NVC097_SET_ANTI_ALIASED_POINT 0x1658 +#define NVC097_SET_ANTI_ALIASED_POINT_ENABLE 0:0 +#define NVC097_SET_ANTI_ALIASED_POINT_ENABLE_FALSE 0x00000000 +#define NVC097_SET_ANTI_ALIASED_POINT_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_POINT_CENTER_MODE 0x165c +#define NVC097_SET_POINT_CENTER_MODE_V 31:0 +#define NVC097_SET_POINT_CENTER_MODE_V_OGL 0x00000000 +#define NVC097_SET_POINT_CENTER_MODE_V_D3D 0x00000001 + +#define NVC097_SET_LINE_SMOOTH_PARAMETERS 0x1668 +#define NVC097_SET_LINE_SMOOTH_PARAMETERS_FALLOFF 31:0 +#define NVC097_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_00 0x00000000 +#define NVC097_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_33 0x00000001 +#define NVC097_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_60 0x00000002 + +#define NVC097_SET_LINE_STIPPLE 0x166c +#define NVC097_SET_LINE_STIPPLE_ENABLE 0:0 +#define NVC097_SET_LINE_STIPPLE_ENABLE_FALSE 0x00000000 +#define NVC097_SET_LINE_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_LINE_SMOOTH_EDGE_TABLE(i) (0x1670+(i)*4) +#define NVC097_SET_LINE_SMOOTH_EDGE_TABLE_V0 7:0 +#define NVC097_SET_LINE_SMOOTH_EDGE_TABLE_V1 15:8 +#define NVC097_SET_LINE_SMOOTH_EDGE_TABLE_V2 23:16 +#define NVC097_SET_LINE_SMOOTH_EDGE_TABLE_V3 31:24 + +#define NVC097_SET_LINE_STIPPLE_PARAMETERS 0x1680 +#define NVC097_SET_LINE_STIPPLE_PARAMETERS_FACTOR 7:0 +#define NVC097_SET_LINE_STIPPLE_PARAMETERS_PATTERN 23:8 + +#define NVC097_SET_PROVOKING_VERTEX 0x1684 +#define NVC097_SET_PROVOKING_VERTEX_V 0:0 +#define NVC097_SET_PROVOKING_VERTEX_V_FIRST 0x00000000 +#define NVC097_SET_PROVOKING_VERTEX_V_LAST 0x00000001 + +#define NVC097_SET_TWO_SIDED_LIGHT 0x1688 +#define NVC097_SET_TWO_SIDED_LIGHT_ENABLE 0:0 +#define NVC097_SET_TWO_SIDED_LIGHT_ENABLE_FALSE 0x00000000 +#define NVC097_SET_TWO_SIDED_LIGHT_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_POLYGON_STIPPLE 0x168c +#define NVC097_SET_POLYGON_STIPPLE_ENABLE 0:0 +#define NVC097_SET_POLYGON_STIPPLE_ENABLE_FALSE 0x00000000 +#define NVC097_SET_POLYGON_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_SHADER_CONTROL 0x1690 +#define NVC097_SET_SHADER_CONTROL_DEFAULT_PARTIAL 0:0 +#define NVC097_SET_SHADER_CONTROL_DEFAULT_PARTIAL_ZERO 0x00000000 +#define NVC097_SET_SHADER_CONTROL_DEFAULT_PARTIAL_INFINITY 0x00000001 +#define NVC097_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR 1:1 +#define NVC097_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR_LEGACY 0x00000000 +#define NVC097_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR_FP64_COMPATIBLE 0x00000001 +#define NVC097_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR 2:2 +#define NVC097_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR_PASS_ZERO 0x00000000 +#define NVC097_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR_PASS_INDEFINITE 0x00000001 + +#define NVC097_CHECK_CLASS_VERSION 0x16a0 +#define NVC097_CHECK_CLASS_VERSION_CURRENT 15:0 +#define NVC097_CHECK_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC097_SET_SPH_VERSION 0x16a4 +#define NVC097_SET_SPH_VERSION_CURRENT 15:0 +#define NVC097_SET_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC097_CHECK_SPH_VERSION 0x16a8 +#define NVC097_CHECK_SPH_VERSION_CURRENT 15:0 +#define NVC097_CHECK_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC097_SET_ALPHA_TO_COVERAGE_OVERRIDE 0x16b4 +#define NVC097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE 0:0 +#define NVC097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NVC097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 +#define NVC097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT 1:1 +#define NVC097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_DISABLE 0x00000000 +#define NVC097_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_ENABLE 0x00000001 + +#define NVC097_SET_POLYGON_STIPPLE_PATTERN(i) (0x1700+(i)*4) +#define NVC097_SET_POLYGON_STIPPLE_PATTERN_V 31:0 + +#define NVC097_SET_AAM_VERSION 0x1790 +#define NVC097_SET_AAM_VERSION_CURRENT 15:0 +#define NVC097_SET_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC097_CHECK_AAM_VERSION 0x1794 +#define NVC097_CHECK_AAM_VERSION_CURRENT 15:0 +#define NVC097_CHECK_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC097_SET_ZT_LAYER 0x179c +#define NVC097_SET_ZT_LAYER_OFFSET 15:0 + +#define NVC097_SET_INDEX_BUFFER_A 0x17c8 +#define NVC097_SET_INDEX_BUFFER_A_ADDRESS_UPPER 7:0 + +#define NVC097_SET_INDEX_BUFFER_B 0x17cc +#define NVC097_SET_INDEX_BUFFER_B_ADDRESS_LOWER 31:0 + +#define NVC097_SET_INDEX_BUFFER_C 0x17d0 +#define NVC097_SET_INDEX_BUFFER_C_LIMIT_ADDRESS_UPPER 7:0 + +#define NVC097_SET_INDEX_BUFFER_D 0x17d4 +#define NVC097_SET_INDEX_BUFFER_D_LIMIT_ADDRESS_LOWER 31:0 + +#define NVC097_SET_INDEX_BUFFER_E 0x17d8 +#define NVC097_SET_INDEX_BUFFER_E_INDEX_SIZE 1:0 +#define NVC097_SET_INDEX_BUFFER_E_INDEX_SIZE_ONE_BYTE 0x00000000 +#define NVC097_SET_INDEX_BUFFER_E_INDEX_SIZE_TWO_BYTES 0x00000001 +#define NVC097_SET_INDEX_BUFFER_E_INDEX_SIZE_FOUR_BYTES 0x00000002 + +#define NVC097_SET_INDEX_BUFFER_F 0x17dc +#define NVC097_SET_INDEX_BUFFER_F_FIRST 31:0 + +#define NVC097_DRAW_INDEX_BUFFER 0x17e0 +#define NVC097_DRAW_INDEX_BUFFER_COUNT 31:0 + +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST 0x17e4 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST 0x17e8 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST 0x17ec +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f0 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC097_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f4 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC097_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f8 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC097_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC097_SET_DEPTH_BIAS_CLAMP 0x187c +#define NVC097_SET_DEPTH_BIAS_CLAMP_V 31:0 + +#define NVC097_SET_VERTEX_STREAM_INSTANCE_A(i) (0x1880+(i)*4) +#define NVC097_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED 0:0 +#define NVC097_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_FALSE 0x00000000 +#define NVC097_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_TRUE 0x00000001 + +#define NVC097_SET_VERTEX_STREAM_INSTANCE_B(i) (0x18c0+(i)*4) +#define NVC097_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED 0:0 +#define NVC097_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_FALSE 0x00000000 +#define NVC097_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_TRUE 0x00000001 + +#define NVC097_SET_ATTRIBUTE_POINT_SIZE 0x1910 +#define NVC097_SET_ATTRIBUTE_POINT_SIZE_ENABLE 0:0 +#define NVC097_SET_ATTRIBUTE_POINT_SIZE_ENABLE_FALSE 0x00000000 +#define NVC097_SET_ATTRIBUTE_POINT_SIZE_ENABLE_TRUE 0x00000001 +#define NVC097_SET_ATTRIBUTE_POINT_SIZE_SLOT 11:4 + +#define NVC097_OGL_SET_CULL 0x1918 +#define NVC097_OGL_SET_CULL_ENABLE 0:0 +#define NVC097_OGL_SET_CULL_ENABLE_FALSE 0x00000000 +#define NVC097_OGL_SET_CULL_ENABLE_TRUE 0x00000001 + +#define NVC097_OGL_SET_FRONT_FACE 0x191c +#define NVC097_OGL_SET_FRONT_FACE_V 31:0 +#define NVC097_OGL_SET_FRONT_FACE_V_CW 0x00000900 +#define NVC097_OGL_SET_FRONT_FACE_V_CCW 0x00000901 + +#define NVC097_OGL_SET_CULL_FACE 0x1920 +#define NVC097_OGL_SET_CULL_FACE_V 31:0 +#define NVC097_OGL_SET_CULL_FACE_V_FRONT 0x00000404 +#define NVC097_OGL_SET_CULL_FACE_V_BACK 0x00000405 +#define NVC097_OGL_SET_CULL_FACE_V_FRONT_AND_BACK 0x00000408 + +#define NVC097_SET_VIEWPORT_PIXEL 0x1924 +#define NVC097_SET_VIEWPORT_PIXEL_CENTER 0:0 +#define NVC097_SET_VIEWPORT_PIXEL_CENTER_AT_HALF_INTEGERS 0x00000000 +#define NVC097_SET_VIEWPORT_PIXEL_CENTER_AT_INTEGERS 0x00000001 + +#define NVC097_SET_VIEWPORT_SCALE_OFFSET 0x192c +#define NVC097_SET_VIEWPORT_SCALE_OFFSET_ENABLE 0:0 +#define NVC097_SET_VIEWPORT_SCALE_OFFSET_ENABLE_FALSE 0x00000000 +#define NVC097_SET_VIEWPORT_SCALE_OFFSET_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_VIEWPORT_CLIP_CONTROL 0x193c +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE 0:0 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_FALSE 0x00000000 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_TRUE 0x00000001 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z 3:3 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLIP 0x00000000 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLAMP 0x00000001 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z 4:4 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLIP 0x00000000 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLAMP 0x00000001 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND 7:7 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_256 0x00000000 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_1 0x00000001 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND 10:10 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_256 0x00000000 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_1 0x00000001 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP 13:11 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP 0x00000000 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_PASSTHRU 0x00000001 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XY_CLIP 0x00000002 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XYZ_CLIP 0x00000003 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP_NO_Z_CULL 0x00000004 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_Z_CLIP 0x00000005 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_TRI_FILL_OR_CLIP 0x00000006 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z 2:1 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SAME_AS_XY_GUARDBAND 0x00000000 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_256 0x00000001 +#define NVC097_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_1 0x00000002 + +#define NVC097_SET_USER_CLIP_OP 0x1940 +#define NVC097_SET_USER_CLIP_OP_PLANE0 0:0 +#define NVC097_SET_USER_CLIP_OP_PLANE0_CLIP 0x00000000 +#define NVC097_SET_USER_CLIP_OP_PLANE0_CULL 0x00000001 +#define NVC097_SET_USER_CLIP_OP_PLANE1 4:4 +#define NVC097_SET_USER_CLIP_OP_PLANE1_CLIP 0x00000000 +#define NVC097_SET_USER_CLIP_OP_PLANE1_CULL 0x00000001 +#define NVC097_SET_USER_CLIP_OP_PLANE2 8:8 +#define NVC097_SET_USER_CLIP_OP_PLANE2_CLIP 0x00000000 +#define NVC097_SET_USER_CLIP_OP_PLANE2_CULL 0x00000001 +#define NVC097_SET_USER_CLIP_OP_PLANE3 12:12 +#define NVC097_SET_USER_CLIP_OP_PLANE3_CLIP 0x00000000 +#define NVC097_SET_USER_CLIP_OP_PLANE3_CULL 0x00000001 +#define NVC097_SET_USER_CLIP_OP_PLANE4 16:16 +#define NVC097_SET_USER_CLIP_OP_PLANE4_CLIP 0x00000000 +#define NVC097_SET_USER_CLIP_OP_PLANE4_CULL 0x00000001 +#define NVC097_SET_USER_CLIP_OP_PLANE5 20:20 +#define NVC097_SET_USER_CLIP_OP_PLANE5_CLIP 0x00000000 +#define NVC097_SET_USER_CLIP_OP_PLANE5_CULL 0x00000001 +#define NVC097_SET_USER_CLIP_OP_PLANE6 24:24 +#define NVC097_SET_USER_CLIP_OP_PLANE6_CLIP 0x00000000 +#define NVC097_SET_USER_CLIP_OP_PLANE6_CULL 0x00000001 +#define NVC097_SET_USER_CLIP_OP_PLANE7 28:28 +#define NVC097_SET_USER_CLIP_OP_PLANE7_CLIP 0x00000000 +#define NVC097_SET_USER_CLIP_OP_PLANE7_CULL 0x00000001 + +#define NVC097_SET_RENDER_ENABLE_OVERRIDE 0x1944 +#define NVC097_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NVC097_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NVC097_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NVC097_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NVC097_SET_PRIMITIVE_TOPOLOGY_CONTROL 0x1948 +#define NVC097_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE 0:0 +#define NVC097_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_TOPOLOGY_IN_BEGIN_METHODS 0x00000000 +#define NVC097_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_SEPARATE_TOPOLOGY_STATE 0x00000001 + +#define NVC097_SET_WINDOW_CLIP_ENABLE 0x194c +#define NVC097_SET_WINDOW_CLIP_ENABLE_V 0:0 +#define NVC097_SET_WINDOW_CLIP_ENABLE_V_FALSE 0x00000000 +#define NVC097_SET_WINDOW_CLIP_ENABLE_V_TRUE 0x00000001 + +#define NVC097_SET_WINDOW_CLIP_TYPE 0x1950 +#define NVC097_SET_WINDOW_CLIP_TYPE_V 1:0 +#define NVC097_SET_WINDOW_CLIP_TYPE_V_INCLUSIVE 0x00000000 +#define NVC097_SET_WINDOW_CLIP_TYPE_V_EXCLUSIVE 0x00000001 +#define NVC097_SET_WINDOW_CLIP_TYPE_V_CLIPALL 0x00000002 + +#define NVC097_INVALIDATE_ZCULL 0x1958 +#define NVC097_INVALIDATE_ZCULL_V 31:0 +#define NVC097_INVALIDATE_ZCULL_V_INVALIDATE 0x00000000 + +#define NVC097_SET_ZCULL 0x1968 +#define NVC097_SET_ZCULL_Z_ENABLE 0:0 +#define NVC097_SET_ZCULL_Z_ENABLE_FALSE 0x00000000 +#define NVC097_SET_ZCULL_Z_ENABLE_TRUE 0x00000001 +#define NVC097_SET_ZCULL_STENCIL_ENABLE 4:4 +#define NVC097_SET_ZCULL_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC097_SET_ZCULL_STENCIL_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_ZCULL_BOUNDS 0x196c +#define NVC097_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE 0:0 +#define NVC097_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NVC097_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_TRUE 0x00000001 +#define NVC097_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE 4:4 +#define NVC097_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NVC097_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_PRIMITIVE_TOPOLOGY 0x1970 +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V 15:0 +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_POINTLIST 0x00000001 +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_LINELIST 0x00000002 +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP 0x00000003 +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST 0x00000004 +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP 0x00000005 +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_LINELIST_ADJCY 0x0000000A +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP_ADJCY 0x0000000B +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST_ADJCY 0x0000000C +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_PATCHLIST 0x0000000E +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_POINTS 0x00001001 +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST 0x00001002 +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST 0x00001003 +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST 0x0000100F +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINESTRIP 0x00001010 +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINESTRIP 0x00001011 +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLELIST 0x00001012 +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLESTRIP 0x00001013 +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLESTRIP 0x00001014 +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN 0x00001015 +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLEFAN 0x00001016 +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN_IMM 0x00001017 +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST_IMM 0x00001018 +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST2 0x0000101A +#define NVC097_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST2 0x0000101B + +#define NVC097_ZCULL_SYNC 0x1978 +#define NVC097_ZCULL_SYNC_V 31:0 + +#define NVC097_SET_CLIP_ID_TEST 0x197c +#define NVC097_SET_CLIP_ID_TEST_ENABLE 0:0 +#define NVC097_SET_CLIP_ID_TEST_ENABLE_FALSE 0x00000000 +#define NVC097_SET_CLIP_ID_TEST_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_SURFACE_CLIP_ID_WIDTH 0x1980 +#define NVC097_SET_SURFACE_CLIP_ID_WIDTH_V 31:0 + +#define NVC097_SET_CLIP_ID 0x1984 +#define NVC097_SET_CLIP_ID_V 31:0 + +#define NVC097_SET_DEPTH_BOUNDS_TEST 0x19bc +#define NVC097_SET_DEPTH_BOUNDS_TEST_ENABLE 0:0 +#define NVC097_SET_DEPTH_BOUNDS_TEST_ENABLE_FALSE 0x00000000 +#define NVC097_SET_DEPTH_BOUNDS_TEST_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_BLEND_FLOAT_OPTION 0x19c0 +#define NVC097_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO 0:0 +#define NVC097_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_FALSE 0x00000000 +#define NVC097_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_TRUE 0x00000001 + +#define NVC097_SET_LOGIC_OP 0x19c4 +#define NVC097_SET_LOGIC_OP_ENABLE 0:0 +#define NVC097_SET_LOGIC_OP_ENABLE_FALSE 0x00000000 +#define NVC097_SET_LOGIC_OP_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_LOGIC_OP_FUNC 0x19c8 +#define NVC097_SET_LOGIC_OP_FUNC_V 31:0 +#define NVC097_SET_LOGIC_OP_FUNC_V_CLEAR 0x00001500 +#define NVC097_SET_LOGIC_OP_FUNC_V_AND 0x00001501 +#define NVC097_SET_LOGIC_OP_FUNC_V_AND_REVERSE 0x00001502 +#define NVC097_SET_LOGIC_OP_FUNC_V_COPY 0x00001503 +#define NVC097_SET_LOGIC_OP_FUNC_V_AND_INVERTED 0x00001504 +#define NVC097_SET_LOGIC_OP_FUNC_V_NOOP 0x00001505 +#define NVC097_SET_LOGIC_OP_FUNC_V_XOR 0x00001506 +#define NVC097_SET_LOGIC_OP_FUNC_V_OR 0x00001507 +#define NVC097_SET_LOGIC_OP_FUNC_V_NOR 0x00001508 +#define NVC097_SET_LOGIC_OP_FUNC_V_EQUIV 0x00001509 +#define NVC097_SET_LOGIC_OP_FUNC_V_INVERT 0x0000150A +#define NVC097_SET_LOGIC_OP_FUNC_V_OR_REVERSE 0x0000150B +#define NVC097_SET_LOGIC_OP_FUNC_V_COPY_INVERTED 0x0000150C +#define NVC097_SET_LOGIC_OP_FUNC_V_OR_INVERTED 0x0000150D +#define NVC097_SET_LOGIC_OP_FUNC_V_NAND 0x0000150E +#define NVC097_SET_LOGIC_OP_FUNC_V_SET 0x0000150F + +#define NVC097_SET_Z_COMPRESSION 0x19cc +#define NVC097_SET_Z_COMPRESSION_ENABLE 0:0 +#define NVC097_SET_Z_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVC097_SET_Z_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVC097_CLEAR_SURFACE 0x19d0 +#define NVC097_CLEAR_SURFACE_Z_ENABLE 0:0 +#define NVC097_CLEAR_SURFACE_Z_ENABLE_FALSE 0x00000000 +#define NVC097_CLEAR_SURFACE_Z_ENABLE_TRUE 0x00000001 +#define NVC097_CLEAR_SURFACE_STENCIL_ENABLE 1:1 +#define NVC097_CLEAR_SURFACE_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC097_CLEAR_SURFACE_STENCIL_ENABLE_TRUE 0x00000001 +#define NVC097_CLEAR_SURFACE_R_ENABLE 2:2 +#define NVC097_CLEAR_SURFACE_R_ENABLE_FALSE 0x00000000 +#define NVC097_CLEAR_SURFACE_R_ENABLE_TRUE 0x00000001 +#define NVC097_CLEAR_SURFACE_G_ENABLE 3:3 +#define NVC097_CLEAR_SURFACE_G_ENABLE_FALSE 0x00000000 +#define NVC097_CLEAR_SURFACE_G_ENABLE_TRUE 0x00000001 +#define NVC097_CLEAR_SURFACE_B_ENABLE 4:4 +#define NVC097_CLEAR_SURFACE_B_ENABLE_FALSE 0x00000000 +#define NVC097_CLEAR_SURFACE_B_ENABLE_TRUE 0x00000001 +#define NVC097_CLEAR_SURFACE_A_ENABLE 5:5 +#define NVC097_CLEAR_SURFACE_A_ENABLE_FALSE 0x00000000 +#define NVC097_CLEAR_SURFACE_A_ENABLE_TRUE 0x00000001 +#define NVC097_CLEAR_SURFACE_MRT_SELECT 9:6 +#define NVC097_CLEAR_SURFACE_RT_ARRAY_INDEX 25:10 + +#define NVC097_CLEAR_CLIP_ID_SURFACE 0x19d4 +#define NVC097_CLEAR_CLIP_ID_SURFACE_V 31:0 + +#define NVC097_SET_COLOR_COMPRESSION(i) (0x19e0+(i)*4) +#define NVC097_SET_COLOR_COMPRESSION_ENABLE 0:0 +#define NVC097_SET_COLOR_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVC097_SET_COLOR_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_CT_WRITE(i) (0x1a00+(i)*4) +#define NVC097_SET_CT_WRITE_R_ENABLE 0:0 +#define NVC097_SET_CT_WRITE_R_ENABLE_FALSE 0x00000000 +#define NVC097_SET_CT_WRITE_R_ENABLE_TRUE 0x00000001 +#define NVC097_SET_CT_WRITE_G_ENABLE 4:4 +#define NVC097_SET_CT_WRITE_G_ENABLE_FALSE 0x00000000 +#define NVC097_SET_CT_WRITE_G_ENABLE_TRUE 0x00000001 +#define NVC097_SET_CT_WRITE_B_ENABLE 8:8 +#define NVC097_SET_CT_WRITE_B_ENABLE_FALSE 0x00000000 +#define NVC097_SET_CT_WRITE_B_ENABLE_TRUE 0x00000001 +#define NVC097_SET_CT_WRITE_A_ENABLE 12:12 +#define NVC097_SET_CT_WRITE_A_ENABLE_FALSE 0x00000000 +#define NVC097_SET_CT_WRITE_A_ENABLE_TRUE 0x00000001 + +#define NVC097_PIPE_NOP 0x1a2c +#define NVC097_PIPE_NOP_V 31:0 + +#define NVC097_SET_SPARE00 0x1a30 +#define NVC097_SET_SPARE00_V 31:0 + +#define NVC097_SET_SPARE01 0x1a34 +#define NVC097_SET_SPARE01_V 31:0 + +#define NVC097_SET_SPARE02 0x1a38 +#define NVC097_SET_SPARE02_V 31:0 + +#define NVC097_SET_SPARE03 0x1a3c +#define NVC097_SET_SPARE03_V 31:0 + +#define NVC097_SET_REPORT_SEMAPHORE_A 0x1b00 +#define NVC097_SET_REPORT_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC097_SET_REPORT_SEMAPHORE_B 0x1b04 +#define NVC097_SET_REPORT_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC097_SET_REPORT_SEMAPHORE_C 0x1b08 +#define NVC097_SET_REPORT_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC097_SET_REPORT_SEMAPHORE_D 0x1b0c +#define NVC097_SET_REPORT_SEMAPHORE_D_OPERATION 1:0 +#define NVC097_SET_REPORT_SEMAPHORE_D_OPERATION_RELEASE 0x00000000 +#define NVC097_SET_REPORT_SEMAPHORE_D_OPERATION_ACQUIRE 0x00000001 +#define NVC097_SET_REPORT_SEMAPHORE_D_OPERATION_REPORT_ONLY 0x00000002 +#define NVC097_SET_REPORT_SEMAPHORE_D_OPERATION_TRAP 0x00000003 +#define NVC097_SET_REPORT_SEMAPHORE_D_RELEASE 4:4 +#define NVC097_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_READS_COMPLETE 0x00000000 +#define NVC097_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_WRITES_COMPLETE 0x00000001 +#define NVC097_SET_REPORT_SEMAPHORE_D_ACQUIRE 8:8 +#define NVC097_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_WRITES_START 0x00000000 +#define NVC097_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_READS_START 0x00000001 +#define NVC097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION 15:12 +#define NVC097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_NONE 0x00000000 +#define NVC097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DATA_ASSEMBLER 0x00000001 +#define NVC097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VERTEX_SHADER 0x00000002 +#define NVC097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_INIT_SHADER 0x00000008 +#define NVC097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_SHADER 0x00000009 +#define NVC097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_GEOMETRY_SHADER 0x00000006 +#define NVC097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_STREAMING_OUTPUT 0x00000005 +#define NVC097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VPC 0x00000004 +#define NVC097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ZCULL 0x00000007 +#define NVC097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_PIXEL_SHADER 0x0000000A +#define NVC097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DEPTH_TEST 0x0000000C +#define NVC097_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ALL 0x0000000F +#define NVC097_SET_REPORT_SEMAPHORE_D_COMPARISON 16:16 +#define NVC097_SET_REPORT_SEMAPHORE_D_COMPARISON_EQ 0x00000000 +#define NVC097_SET_REPORT_SEMAPHORE_D_COMPARISON_GE 0x00000001 +#define NVC097_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE 20:20 +#define NVC097_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVC097_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT 27:23 +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_NONE 0x00000000 +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_DA_VERTICES_GENERATED 0x00000001 +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_DA_PRIMITIVES_GENERATED 0x00000003 +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_VS_INVOCATIONS 0x00000005 +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_TI_INVOCATIONS 0x0000001B +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_TS_INVOCATIONS 0x0000001D +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_TS_PRIMITIVES_GENERATED 0x0000001F +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_GS_INVOCATIONS 0x00000007 +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_GS_PRIMITIVES_GENERATED 0x00000009 +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_ALPHA_BETA_CLOCKS 0x00000004 +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_VTG_PRIMITIVES_OUT 0x00000012 +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x0000001E +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_SUCCEEDED 0x0000000B +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED 0x0000000D +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000006 +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_BYTE_COUNT 0x0000001A +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_INVOCATIONS 0x0000000F +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_PRIMITIVES_GENERATED 0x00000011 +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS0 0x0000000A +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS1 0x0000000C +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS2 0x0000000E +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS3 0x00000010 +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_PS_INVOCATIONS 0x00000013 +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT 0x00000002 +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT64 0x00000015 +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_TILED_ZPASS_PIXEL_CNT64 0x00000017 +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_COLOR_TARGET 0x00000018 +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_ZETA_TARGET 0x00000019 +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_BOUNDING_RECTANGLE 0x0000001C +#define NVC097_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE 28:28 +#define NVC097_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVC097_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVC097_SET_REPORT_SEMAPHORE_D_SUB_REPORT 7:5 +#define NVC097_SET_REPORT_SEMAPHORE_D_REPORT_DWORD_NUMBER 21:21 +#define NVC097_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE 2:2 +#define NVC097_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_FALSE 0x00000000 +#define NVC097_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_TRUE 0x00000001 +#define NVC097_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE 3:3 +#define NVC097_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC097_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP 11:9 +#define NVC097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_INC 0x00000003 +#define NVC097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_AND 0x00000005 +#define NVC097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_OR 0x00000006 +#define NVC097_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC097_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT 18:17 +#define NVC097_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC097_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_SIGNED_32 0x00000001 + +#define NVC097_SET_VERTEX_STREAM_A_FORMAT(j) (0x1c00+(j)*16) +#define NVC097_SET_VERTEX_STREAM_A_FORMAT_STRIDE 11:0 +#define NVC097_SET_VERTEX_STREAM_A_FORMAT_ENABLE 12:12 +#define NVC097_SET_VERTEX_STREAM_A_FORMAT_ENABLE_FALSE 0x00000000 +#define NVC097_SET_VERTEX_STREAM_A_FORMAT_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_VERTEX_STREAM_A_LOCATION_A(j) (0x1c04+(j)*16) +#define NVC097_SET_VERTEX_STREAM_A_LOCATION_A_OFFSET_UPPER 7:0 + +#define NVC097_SET_VERTEX_STREAM_A_LOCATION_B(j) (0x1c08+(j)*16) +#define NVC097_SET_VERTEX_STREAM_A_LOCATION_B_OFFSET_LOWER 31:0 + +#define NVC097_SET_VERTEX_STREAM_A_FREQUENCY(j) (0x1c0c+(j)*16) +#define NVC097_SET_VERTEX_STREAM_A_FREQUENCY_V 31:0 + +#define NVC097_SET_VERTEX_STREAM_B_FORMAT(j) (0x1d00+(j)*16) +#define NVC097_SET_VERTEX_STREAM_B_FORMAT_STRIDE 11:0 +#define NVC097_SET_VERTEX_STREAM_B_FORMAT_ENABLE 12:12 +#define NVC097_SET_VERTEX_STREAM_B_FORMAT_ENABLE_FALSE 0x00000000 +#define NVC097_SET_VERTEX_STREAM_B_FORMAT_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_VERTEX_STREAM_B_LOCATION_A(j) (0x1d04+(j)*16) +#define NVC097_SET_VERTEX_STREAM_B_LOCATION_A_OFFSET_UPPER 7:0 + +#define NVC097_SET_VERTEX_STREAM_B_LOCATION_B(j) (0x1d08+(j)*16) +#define NVC097_SET_VERTEX_STREAM_B_LOCATION_B_OFFSET_LOWER 31:0 + +#define NVC097_SET_VERTEX_STREAM_B_FREQUENCY(j) (0x1d0c+(j)*16) +#define NVC097_SET_VERTEX_STREAM_B_FREQUENCY_V 31:0 + +#define NVC097_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA(j) (0x1e00+(j)*32) +#define NVC097_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NVC097_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NVC097_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_BLEND_PER_TARGET_COLOR_OP(j) (0x1e04+(j)*32) +#define NVC097_SET_BLEND_PER_TARGET_COLOR_OP_V 31:0 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC097_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC097_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MIN 0x00008007 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MAX 0x00008008 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_ADD 0x00000001 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MIN 0x00000004 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF(j) (0x1e08+(j)*32) +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V 31:0 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF(j) (0x1e0c+(j)*32) +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V 31:0 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC097_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_OP(j) (0x1e10+(j)*32) +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_OP_V 31:0 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF(j) (0x1e14+(j)*32) +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V 31:0 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF(j) (0x1e18+(j)*32) +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V 31:0 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC097_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC097_SET_VERTEX_STREAM_LIMIT_A_A(j) (0x1f00+(j)*8) +#define NVC097_SET_VERTEX_STREAM_LIMIT_A_A_UPPER 7:0 + +#define NVC097_SET_VERTEX_STREAM_LIMIT_A_B(j) (0x1f04+(j)*8) +#define NVC097_SET_VERTEX_STREAM_LIMIT_A_B_LOWER 31:0 + +#define NVC097_SET_VERTEX_STREAM_LIMIT_B_A(j) (0x1f80+(j)*8) +#define NVC097_SET_VERTEX_STREAM_LIMIT_B_A_UPPER 7:0 + +#define NVC097_SET_VERTEX_STREAM_LIMIT_B_B(j) (0x1f84+(j)*8) +#define NVC097_SET_VERTEX_STREAM_LIMIT_B_B_LOWER 31:0 + +#define NVC097_SET_PIPELINE_SHADER(j) (0x2000+(j)*64) +#define NVC097_SET_PIPELINE_SHADER_ENABLE 0:0 +#define NVC097_SET_PIPELINE_SHADER_ENABLE_FALSE 0x00000000 +#define NVC097_SET_PIPELINE_SHADER_ENABLE_TRUE 0x00000001 +#define NVC097_SET_PIPELINE_SHADER_TYPE 7:4 +#define NVC097_SET_PIPELINE_SHADER_TYPE_VERTEX_CULL_BEFORE_FETCH 0x00000000 +#define NVC097_SET_PIPELINE_SHADER_TYPE_VERTEX 0x00000001 +#define NVC097_SET_PIPELINE_SHADER_TYPE_TESSELLATION_INIT 0x00000002 +#define NVC097_SET_PIPELINE_SHADER_TYPE_TESSELLATION 0x00000003 +#define NVC097_SET_PIPELINE_SHADER_TYPE_GEOMETRY 0x00000004 +#define NVC097_SET_PIPELINE_SHADER_TYPE_PIXEL 0x00000005 + +#define NVC097_SET_PIPELINE_PROGRAM(j) (0x2004+(j)*64) +#define NVC097_SET_PIPELINE_PROGRAM_OFFSET 31:0 + +#define NVC097_SET_PIPELINE_RESERVED_A(j) (0x2008+(j)*64) +#define NVC097_SET_PIPELINE_RESERVED_A_V 0:0 + +#define NVC097_SET_PIPELINE_REGISTER_COUNT(j) (0x200c+(j)*64) +#define NVC097_SET_PIPELINE_REGISTER_COUNT_V 7:0 + +#define NVC097_SET_PIPELINE_BINDING(j) (0x2010+(j)*64) +#define NVC097_SET_PIPELINE_BINDING_GROUP 2:0 + +#define NVC097_SET_PIPELINE_RESERVED_B(j) (0x2014+(j)*64) +#define NVC097_SET_PIPELINE_RESERVED_B_V 0:0 + +#define NVC097_SET_PIPELINE_RESERVED_C(j) (0x2018+(j)*64) +#define NVC097_SET_PIPELINE_RESERVED_C_V 0:0 + +#define NVC097_SET_PIPELINE_RESERVED_D(j) (0x201c+(j)*64) +#define NVC097_SET_PIPELINE_RESERVED_D_V 0:0 + +#define NVC097_SET_PIPELINE_RESERVED_E(j) (0x2020+(j)*64) +#define NVC097_SET_PIPELINE_RESERVED_E_V 0:0 + +#define NVC097_SET_FALCON00 0x2300 +#define NVC097_SET_FALCON00_V 31:0 + +#define NVC097_SET_FALCON01 0x2304 +#define NVC097_SET_FALCON01_V 31:0 + +#define NVC097_SET_FALCON02 0x2308 +#define NVC097_SET_FALCON02_V 31:0 + +#define NVC097_SET_FALCON03 0x230c +#define NVC097_SET_FALCON03_V 31:0 + +#define NVC097_SET_FALCON04 0x2310 +#define NVC097_SET_FALCON04_V 31:0 + +#define NVC097_SET_FALCON05 0x2314 +#define NVC097_SET_FALCON05_V 31:0 + +#define NVC097_SET_FALCON06 0x2318 +#define NVC097_SET_FALCON06_V 31:0 + +#define NVC097_SET_FALCON07 0x231c +#define NVC097_SET_FALCON07_V 31:0 + +#define NVC097_SET_FALCON08 0x2320 +#define NVC097_SET_FALCON08_V 31:0 + +#define NVC097_SET_FALCON09 0x2324 +#define NVC097_SET_FALCON09_V 31:0 + +#define NVC097_SET_FALCON10 0x2328 +#define NVC097_SET_FALCON10_V 31:0 + +#define NVC097_SET_FALCON11 0x232c +#define NVC097_SET_FALCON11_V 31:0 + +#define NVC097_SET_FALCON12 0x2330 +#define NVC097_SET_FALCON12_V 31:0 + +#define NVC097_SET_FALCON13 0x2334 +#define NVC097_SET_FALCON13_V 31:0 + +#define NVC097_SET_FALCON14 0x2338 +#define NVC097_SET_FALCON14_V 31:0 + +#define NVC097_SET_FALCON15 0x233c +#define NVC097_SET_FALCON15_V 31:0 + +#define NVC097_SET_FALCON16 0x2340 +#define NVC097_SET_FALCON16_V 31:0 + +#define NVC097_SET_FALCON17 0x2344 +#define NVC097_SET_FALCON17_V 31:0 + +#define NVC097_SET_FALCON18 0x2348 +#define NVC097_SET_FALCON18_V 31:0 + +#define NVC097_SET_FALCON19 0x234c +#define NVC097_SET_FALCON19_V 31:0 + +#define NVC097_SET_FALCON20 0x2350 +#define NVC097_SET_FALCON20_V 31:0 + +#define NVC097_SET_FALCON21 0x2354 +#define NVC097_SET_FALCON21_V 31:0 + +#define NVC097_SET_FALCON22 0x2358 +#define NVC097_SET_FALCON22_V 31:0 + +#define NVC097_SET_FALCON23 0x235c +#define NVC097_SET_FALCON23_V 31:0 + +#define NVC097_SET_FALCON24 0x2360 +#define NVC097_SET_FALCON24_V 31:0 + +#define NVC097_SET_FALCON25 0x2364 +#define NVC097_SET_FALCON25_V 31:0 + +#define NVC097_SET_FALCON26 0x2368 +#define NVC097_SET_FALCON26_V 31:0 + +#define NVC097_SET_FALCON27 0x236c +#define NVC097_SET_FALCON27_V 31:0 + +#define NVC097_SET_FALCON28 0x2370 +#define NVC097_SET_FALCON28_V 31:0 + +#define NVC097_SET_FALCON29 0x2374 +#define NVC097_SET_FALCON29_V 31:0 + +#define NVC097_SET_FALCON30 0x2378 +#define NVC097_SET_FALCON30_V 31:0 + +#define NVC097_SET_FALCON31 0x237c +#define NVC097_SET_FALCON31_V 31:0 + +#define NVC097_SET_CONSTANT_BUFFER_SELECTOR_A 0x2380 +#define NVC097_SET_CONSTANT_BUFFER_SELECTOR_A_SIZE 16:0 + +#define NVC097_SET_CONSTANT_BUFFER_SELECTOR_B 0x2384 +#define NVC097_SET_CONSTANT_BUFFER_SELECTOR_B_ADDRESS_UPPER 7:0 + +#define NVC097_SET_CONSTANT_BUFFER_SELECTOR_C 0x2388 +#define NVC097_SET_CONSTANT_BUFFER_SELECTOR_C_ADDRESS_LOWER 31:0 + +#define NVC097_LOAD_CONSTANT_BUFFER_OFFSET 0x238c +#define NVC097_LOAD_CONSTANT_BUFFER_OFFSET_V 15:0 + +#define NVC097_LOAD_CONSTANT_BUFFER(i) (0x2390+(i)*4) +#define NVC097_LOAD_CONSTANT_BUFFER_V 31:0 + +#define NVC097_BIND_GROUP_RESERVED_A(j) (0x2400+(j)*32) +#define NVC097_BIND_GROUP_RESERVED_A_V 0:0 + +#define NVC097_BIND_GROUP_RESERVED_B(j) (0x2404+(j)*32) +#define NVC097_BIND_GROUP_RESERVED_B_V 0:0 + +#define NVC097_BIND_GROUP_RESERVED_C(j) (0x2408+(j)*32) +#define NVC097_BIND_GROUP_RESERVED_C_V 0:0 + +#define NVC097_BIND_GROUP_RESERVED_D(j) (0x240c+(j)*32) +#define NVC097_BIND_GROUP_RESERVED_D_V 0:0 + +#define NVC097_BIND_GROUP_CONSTANT_BUFFER(j) (0x2410+(j)*32) +#define NVC097_BIND_GROUP_CONSTANT_BUFFER_VALID 0:0 +#define NVC097_BIND_GROUP_CONSTANT_BUFFER_VALID_FALSE 0x00000000 +#define NVC097_BIND_GROUP_CONSTANT_BUFFER_VALID_TRUE 0x00000001 +#define NVC097_BIND_GROUP_CONSTANT_BUFFER_SHADER_SLOT 8:4 + +#define NVC097_SET_COLOR_CLAMP 0x2600 +#define NVC097_SET_COLOR_CLAMP_ENABLE 0:0 +#define NVC097_SET_COLOR_CLAMP_ENABLE_FALSE 0x00000000 +#define NVC097_SET_COLOR_CLAMP_ENABLE_TRUE 0x00000001 + +#define NVC097_SET_BINDLESS_TEXTURE 0x2608 +#define NVC097_SET_BINDLESS_TEXTURE_CONSTANT_BUFFER_SLOT_SELECT 4:0 + +#define NVC097_SET_TRAP_HANDLER 0x260c +#define NVC097_SET_TRAP_HANDLER_OFFSET 31:0 + +#define NVC097_SET_STREAM_OUT_LAYOUT_SELECT(i,j) (0x2800+(i)*128+(j)*4) +#define NVC097_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER00 7:0 +#define NVC097_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER01 15:8 +#define NVC097_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER02 23:16 +#define NVC097_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER03 31:24 + +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER(i) (0x333c+(i)*4) +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER_V 31:0 + +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_VALUE(i) (0x335c+(i)*4) +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_VALUE_V 31:0 + +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_EVENT(i) (0x337c+(i)*4) +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_EVENT_EVENT 7:0 + +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A(i) (0x339c+(i)*4) +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT0 1:0 +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT0 4:2 +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT1 6:5 +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT1 9:7 +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT2 11:10 +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT2 14:12 +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT3 16:15 +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT3 19:17 +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT4 21:20 +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT4 24:22 +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT5 26:25 +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT5 29:27 +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_SPARE 31:30 + +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B(i) (0x33bc+(i)*4) +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_EDGE 0:0 +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_MODE 2:1 +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_WINDOWED 3:3 +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_FUNC 19:4 + +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL 0x33dc +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL_MASK 7:0 + +#define NVC097_START_SHADER_PERFORMANCE_COUNTER 0x33e0 +#define NVC097_START_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC097_STOP_SHADER_PERFORMANCE_COUNTER 0x33e4 +#define NVC097_STOP_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER 0x33e8 +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER_V 31:0 + +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER 0x33ec +#define NVC097_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER_V 31:0 + +#define NVC097_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NVC097_SET_MME_SHADOW_SCRATCH_V 31:0 + +#define NVC097_CALL_MME_MACRO(j) (0x3800+(j)*8) +#define NVC097_CALL_MME_MACRO_V 31:0 + +#define NVC097_CALL_MME_DATA(j) (0x3804+(j)*8) +#define NVC097_CALL_MME_DATA_V 31:0 + +#endif /* _cl_pascal_a_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc097tex.h b/src/common/sdk/nvidia/inc/class/clc097tex.h new file mode 100644 index 0000000..7c5a822 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc097tex.h @@ -0,0 +1,1353 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2010 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ + +#ifndef __CLC097TEX_H__ +#define __CLC097TEX_H__ + +/* +** Texture Header State Blocklinear + */ + +#define NVC097_TEXHEAD_BL_COMPONENTS MW(6:0) +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_R32_G32_B32_A32 0x00000001 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_R32_G32_B32 0x00000002 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_R16_G16_B16_A16 0x00000003 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_R32_G32 0x00000004 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_R32_B24G8 0x00000005 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_X8B8G8R8 0x00000007 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_A8B8G8R8 0x00000008 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_A2B10G10R10 0x00000009 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_R16_G16 0x0000000c +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_G8R24 0x0000000d +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_G24R8 0x0000000e +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_R32 0x0000000f +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_A4B4G4R4 0x00000012 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_A5B5G5R1 0x00000013 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_A1B5G5R5 0x00000014 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_B5G6R5 0x00000015 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_B6G5R5 0x00000016 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_G8R8 0x00000018 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_R16 0x0000001b +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_Y8_VIDEO 0x0000001c +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_R8 0x0000001d +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_G4R4 0x0000001e +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_R1 0x0000001f +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_E5B9G9R9_SHAREDEXP 0x00000020 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_BF10GF11RF11 0x00000021 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_G8B8G8R8 0x00000022 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_B8G8R8G8 0x00000023 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_DXT1 0x00000024 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_DXT23 0x00000025 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_DXT45 0x00000026 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_DXN1 0x00000027 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_DXN2 0x00000028 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_BC6H_SF16 0x00000010 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_BC6H_UF16 0x00000011 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_BC7U 0x00000017 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ETC2_RGB 0x00000006 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ETC2_RGB_PTA 0x0000000a +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ETC2_RGBA 0x0000000b +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_EAC 0x00000019 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_EACX2 0x0000001a +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_Z24S8 0x00000029 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_X8Z24 0x0000002a +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_S8Z24 0x0000002b +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_X4V4Z24__COV4R4V 0x0000002c +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_X4V4Z24__COV8R8V 0x0000002d +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_V8Z24__COV4R12V 0x0000002e +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ZF32 0x0000002f +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ZF32_X24S8 0x00000030 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_X8Z24_X20V4S8__COV4R4V 0x00000031 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_X8Z24_X20V4S8__COV8R8V 0x00000032 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ZF32_X20V4X8__COV4R4V 0x00000033 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ZF32_X20V4X8__COV8R8V 0x00000034 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ZF32_X20V4S8__COV4R4V 0x00000035 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ZF32_X20V4S8__COV8R8V 0x00000036 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_X8Z24_X16V8S8__COV4R12V 0x00000037 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ZF32_X16V8X8__COV4R12V 0x00000038 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ZF32_X16V8S8__COV4R12V 0x00000039 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_Z16 0x0000003a +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_V8Z24__COV8R24V 0x0000003b +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_X8Z24_X16V8S8__COV8R24V 0x0000003c +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ZF32_X16V8X8__COV8R24V 0x0000003d +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ZF32_X16V8S8__COV8R24V 0x0000003e +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_4X4 0x00000040 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_5X4 0x00000050 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_5X5 0x00000041 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_6X5 0x00000051 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_6X6 0x00000042 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_8X5 0x00000055 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_8X6 0x00000052 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_8X8 0x00000044 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_10X5 0x00000056 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_10X6 0x00000057 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_10X8 0x00000053 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_10X10 0x00000045 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_12X10 0x00000054 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_12X12 0x00000046 +#define NVC097_TEXHEAD_BL_COMPONENTS_SIZES_CS_BITFIELD_SIZE 0x0000007f +#define NVC097_TEXHEAD_BL_R_DATA_TYPE MW(9:7) +#define NVC097_TEXHEAD_BL_R_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVC097_TEXHEAD_BL_R_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVC097_TEXHEAD_BL_R_DATA_TYPE_NUM_SINT 0x00000003 +#define NVC097_TEXHEAD_BL_R_DATA_TYPE_NUM_UINT 0x00000004 +#define NVC097_TEXHEAD_BL_R_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVC097_TEXHEAD_BL_R_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVC097_TEXHEAD_BL_R_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVC097_TEXHEAD_BL_G_DATA_TYPE MW(12:10) +#define NVC097_TEXHEAD_BL_G_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVC097_TEXHEAD_BL_G_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVC097_TEXHEAD_BL_G_DATA_TYPE_NUM_SINT 0x00000003 +#define NVC097_TEXHEAD_BL_G_DATA_TYPE_NUM_UINT 0x00000004 +#define NVC097_TEXHEAD_BL_G_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVC097_TEXHEAD_BL_G_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVC097_TEXHEAD_BL_G_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVC097_TEXHEAD_BL_B_DATA_TYPE MW(15:13) +#define NVC097_TEXHEAD_BL_B_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVC097_TEXHEAD_BL_B_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVC097_TEXHEAD_BL_B_DATA_TYPE_NUM_SINT 0x00000003 +#define NVC097_TEXHEAD_BL_B_DATA_TYPE_NUM_UINT 0x00000004 +#define NVC097_TEXHEAD_BL_B_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVC097_TEXHEAD_BL_B_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVC097_TEXHEAD_BL_B_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVC097_TEXHEAD_BL_A_DATA_TYPE MW(18:16) +#define NVC097_TEXHEAD_BL_A_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVC097_TEXHEAD_BL_A_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVC097_TEXHEAD_BL_A_DATA_TYPE_NUM_SINT 0x00000003 +#define NVC097_TEXHEAD_BL_A_DATA_TYPE_NUM_UINT 0x00000004 +#define NVC097_TEXHEAD_BL_A_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVC097_TEXHEAD_BL_A_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVC097_TEXHEAD_BL_A_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVC097_TEXHEAD_BL_X_SOURCE MW(21:19) +#define NVC097_TEXHEAD_BL_X_SOURCE_IN_ZERO 0x00000000 +#define NVC097_TEXHEAD_BL_X_SOURCE_IN_R 0x00000002 +#define NVC097_TEXHEAD_BL_X_SOURCE_IN_G 0x00000003 +#define NVC097_TEXHEAD_BL_X_SOURCE_IN_B 0x00000004 +#define NVC097_TEXHEAD_BL_X_SOURCE_IN_A 0x00000005 +#define NVC097_TEXHEAD_BL_X_SOURCE_IN_ONE_INT 0x00000006 +#define NVC097_TEXHEAD_BL_X_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVC097_TEXHEAD_BL_Y_SOURCE MW(24:22) +#define NVC097_TEXHEAD_BL_Y_SOURCE_IN_ZERO 0x00000000 +#define NVC097_TEXHEAD_BL_Y_SOURCE_IN_R 0x00000002 +#define NVC097_TEXHEAD_BL_Y_SOURCE_IN_G 0x00000003 +#define NVC097_TEXHEAD_BL_Y_SOURCE_IN_B 0x00000004 +#define NVC097_TEXHEAD_BL_Y_SOURCE_IN_A 0x00000005 +#define NVC097_TEXHEAD_BL_Y_SOURCE_IN_ONE_INT 0x00000006 +#define NVC097_TEXHEAD_BL_Y_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVC097_TEXHEAD_BL_Z_SOURCE MW(27:25) +#define NVC097_TEXHEAD_BL_Z_SOURCE_IN_ZERO 0x00000000 +#define NVC097_TEXHEAD_BL_Z_SOURCE_IN_R 0x00000002 +#define NVC097_TEXHEAD_BL_Z_SOURCE_IN_G 0x00000003 +#define NVC097_TEXHEAD_BL_Z_SOURCE_IN_B 0x00000004 +#define NVC097_TEXHEAD_BL_Z_SOURCE_IN_A 0x00000005 +#define NVC097_TEXHEAD_BL_Z_SOURCE_IN_ONE_INT 0x00000006 +#define NVC097_TEXHEAD_BL_Z_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVC097_TEXHEAD_BL_W_SOURCE MW(30:28) +#define NVC097_TEXHEAD_BL_W_SOURCE_IN_ZERO 0x00000000 +#define NVC097_TEXHEAD_BL_W_SOURCE_IN_R 0x00000002 +#define NVC097_TEXHEAD_BL_W_SOURCE_IN_G 0x00000003 +#define NVC097_TEXHEAD_BL_W_SOURCE_IN_B 0x00000004 +#define NVC097_TEXHEAD_BL_W_SOURCE_IN_A 0x00000005 +#define NVC097_TEXHEAD_BL_W_SOURCE_IN_ONE_INT 0x00000006 +#define NVC097_TEXHEAD_BL_W_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVC097_TEXHEAD_BL_PACK_COMPONENTS MW(31:31) +#define NVC097_TEXHEAD_BL_RESERVED1Y MW(36:32) +#define NVC097_TEXHEAD_BL_GOB_DEPTH_OFFSET MW(38:37) +#define NVC097_TEXHEAD_BL_RESERVED1X MW(40:39) +#define NVC097_TEXHEAD_BL_ADDRESS_BITS31TO9 MW(63:41) +#define NVC097_TEXHEAD_BL_ADDRESS_BITS48TO32 MW(80:64) +#define NVC097_TEXHEAD_BL_RESERVED_ADDRESS MW(84:81) +#define NVC097_TEXHEAD_BL_HEADER_VERSION MW(87:85) +#define NVC097_TEXHEAD_BL_HEADER_VERSION_SELECT_ONE_D_BUFFER 0x00000000 +#define NVC097_TEXHEAD_BL_HEADER_VERSION_SELECT_PITCH_COLOR_KEY 0x00000001 +#define NVC097_TEXHEAD_BL_HEADER_VERSION_SELECT_PITCH 0x00000002 +#define NVC097_TEXHEAD_BL_HEADER_VERSION_SELECT_BLOCKLINEAR 0x00000003 +#define NVC097_TEXHEAD_BL_HEADER_VERSION_SELECT_BLOCKLINEAR_COLOR_KEY 0x00000004 +#define NVC097_TEXHEAD_BL_RESERVED_HEADER_VERSION MW(88:88) +#define NVC097_TEXHEAD_BL_RESOURCE_VIEW_COHERENCY_HASH MW(92:89) +#define NVC097_TEXHEAD_BL_RESERVED2A MW(95:93) +#define NVC097_TEXHEAD_BL_GOBS_PER_BLOCK_WIDTH MW(98:96) +#define NVC097_TEXHEAD_BL_GOBS_PER_BLOCK_WIDTH_ONE_GOB 0x00000000 +#define NVC097_TEXHEAD_BL_GOBS_PER_BLOCK_HEIGHT MW(101:99) +#define NVC097_TEXHEAD_BL_GOBS_PER_BLOCK_HEIGHT_ONE_GOB 0x00000000 +#define NVC097_TEXHEAD_BL_GOBS_PER_BLOCK_HEIGHT_TWO_GOBS 0x00000001 +#define NVC097_TEXHEAD_BL_GOBS_PER_BLOCK_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC097_TEXHEAD_BL_GOBS_PER_BLOCK_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC097_TEXHEAD_BL_GOBS_PER_BLOCK_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC097_TEXHEAD_BL_GOBS_PER_BLOCK_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC097_TEXHEAD_BL_GOBS_PER_BLOCK_DEPTH MW(104:102) +#define NVC097_TEXHEAD_BL_GOBS_PER_BLOCK_DEPTH_ONE_GOB 0x00000000 +#define NVC097_TEXHEAD_BL_GOBS_PER_BLOCK_DEPTH_TWO_GOBS 0x00000001 +#define NVC097_TEXHEAD_BL_GOBS_PER_BLOCK_DEPTH_FOUR_GOBS 0x00000002 +#define NVC097_TEXHEAD_BL_GOBS_PER_BLOCK_DEPTH_EIGHT_GOBS 0x00000003 +#define NVC097_TEXHEAD_BL_GOBS_PER_BLOCK_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVC097_TEXHEAD_BL_GOBS_PER_BLOCK_DEPTH_THIRTYTWO_GOBS 0x00000005 +#define NVC097_TEXHEAD_BL_RESERVED3Y MW(105:105) +#define NVC097_TEXHEAD_BL_TILE_WIDTH_IN_GOBS MW(108:106) +#define NVC097_TEXHEAD_BL_TILE_WIDTH_IN_GOBS_ONE_GOB 0x00000000 +#define NVC097_TEXHEAD_BL_TILE_WIDTH_IN_GOBS_TWO_GOBS 0x00000001 +#define NVC097_TEXHEAD_BL_TILE_WIDTH_IN_GOBS_FOUR_GOBS 0x00000002 +#define NVC097_TEXHEAD_BL_TILE_WIDTH_IN_GOBS_EIGHT_GOBS 0x00000003 +#define NVC097_TEXHEAD_BL_TILE_WIDTH_IN_GOBS_SIXTEEN_GOBS 0x00000004 +#define NVC097_TEXHEAD_BL_TILE_WIDTH_IN_GOBS_THIRTYTWO_GOBS 0x00000005 +#define NVC097_TEXHEAD_BL_GOB3D MW(109:109) +#define NVC097_TEXHEAD_BL_RESERVED3Z MW(111:110) +#define NVC097_TEXHEAD_BL_LOD_ANISO_QUALITY2 MW(112:112) +#define NVC097_TEXHEAD_BL_LOD_ANISO_QUALITY MW(113:113) +#define NVC097_TEXHEAD_BL_LOD_ANISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVC097_TEXHEAD_BL_LOD_ANISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVC097_TEXHEAD_BL_LOD_ISO_QUALITY MW(114:114) +#define NVC097_TEXHEAD_BL_LOD_ISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVC097_TEXHEAD_BL_LOD_ISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVC097_TEXHEAD_BL_ANISO_COARSE_SPREAD_MODIFIER MW(116:115) +#define NVC097_TEXHEAD_BL_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVC097_TEXHEAD_BL_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVC097_TEXHEAD_BL_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVC097_TEXHEAD_BL_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVC097_TEXHEAD_BL_ANISO_SPREAD_SCALE MW(121:117) +#define NVC097_TEXHEAD_BL_USE_HEADER_OPT_CONTROL MW(122:122) +#define NVC097_TEXHEAD_BL_DEPTH_TEXTURE MW(123:123) +#define NVC097_TEXHEAD_BL_MAX_MIP_LEVEL MW(127:124) +#define NVC097_TEXHEAD_BL_WIDTH_MINUS_ONE MW(144:128) +#define NVC097_TEXHEAD_BL_DEPTH_MINUS_ONE_BIT14 MW(145:145) +#define NVC097_TEXHEAD_BL_HEIGHT_MINUS_ONE_BIT16 MW(146:146) +#define NVC097_TEXHEAD_BL_ANISO_SPREAD_MAX_LOG2 MW(149:147) +#define NVC097_TEXHEAD_BL_S_R_G_B_CONVERSION MW(150:150) +#define NVC097_TEXHEAD_BL_TEXTURE_TYPE MW(154:151) +#define NVC097_TEXHEAD_BL_TEXTURE_TYPE_ONE_D 0x00000000 +#define NVC097_TEXHEAD_BL_TEXTURE_TYPE_TWO_D 0x00000001 +#define NVC097_TEXHEAD_BL_TEXTURE_TYPE_THREE_D 0x00000002 +#define NVC097_TEXHEAD_BL_TEXTURE_TYPE_CUBEMAP 0x00000003 +#define NVC097_TEXHEAD_BL_TEXTURE_TYPE_ONE_D_ARRAY 0x00000004 +#define NVC097_TEXHEAD_BL_TEXTURE_TYPE_TWO_D_ARRAY 0x00000005 +#define NVC097_TEXHEAD_BL_TEXTURE_TYPE_ONE_D_BUFFER 0x00000006 +#define NVC097_TEXHEAD_BL_TEXTURE_TYPE_TWO_D_NO_MIPMAP 0x00000007 +#define NVC097_TEXHEAD_BL_TEXTURE_TYPE_CUBEMAP_ARRAY 0x00000008 +#define NVC097_TEXHEAD_BL_TEXTURE_TYPE_TT_BIT_FIELD_SIZE 0x0000000f +#define NVC097_TEXHEAD_BL_SECTOR_PROMOTION MW(156:155) +#define NVC097_TEXHEAD_BL_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVC097_TEXHEAD_BL_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVC097_TEXHEAD_BL_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVC097_TEXHEAD_BL_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVC097_TEXHEAD_BL_BORDER_SIZE MW(159:157) +#define NVC097_TEXHEAD_BL_BORDER_SIZE_BORDER_SIZE_ONE 0x00000000 +#define NVC097_TEXHEAD_BL_BORDER_SIZE_BORDER_SIZE_TWO 0x00000001 +#define NVC097_TEXHEAD_BL_BORDER_SIZE_BORDER_SIZE_FOUR 0x00000002 +#define NVC097_TEXHEAD_BL_BORDER_SIZE_BORDER_SIZE_EIGHT 0x00000003 +#define NVC097_TEXHEAD_BL_BORDER_SIZE_BORDER_SAMPLER_COLOR 0x00000007 +#define NVC097_TEXHEAD_BL_HEIGHT_MINUS_ONE MW(175:160) +#define NVC097_TEXHEAD_BL_DEPTH_MINUS_ONE MW(189:176) +#define NVC097_TEXHEAD_BL_RESERVED5A MW(190:190) +#define NVC097_TEXHEAD_BL_NORMALIZED_COORDS MW(191:191) +#define NVC097_TEXHEAD_BL_RESERVED6Y MW(192:192) +#define NVC097_TEXHEAD_BL_TRILIN_OPT MW(197:193) +#define NVC097_TEXHEAD_BL_MIP_LOD_BIAS MW(210:198) +#define NVC097_TEXHEAD_BL_ANISO_BIAS MW(214:211) +#define NVC097_TEXHEAD_BL_ANISO_FINE_SPREAD_FUNC MW(216:215) +#define NVC097_TEXHEAD_BL_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVC097_TEXHEAD_BL_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVC097_TEXHEAD_BL_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVC097_TEXHEAD_BL_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVC097_TEXHEAD_BL_ANISO_COARSE_SPREAD_FUNC MW(218:217) +#define NVC097_TEXHEAD_BL_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVC097_TEXHEAD_BL_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVC097_TEXHEAD_BL_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVC097_TEXHEAD_BL_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVC097_TEXHEAD_BL_MAX_ANISOTROPY MW(221:219) +#define NVC097_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_1_TO_1 0x00000000 +#define NVC097_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_2_TO_1 0x00000001 +#define NVC097_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_4_TO_1 0x00000002 +#define NVC097_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_6_TO_1 0x00000003 +#define NVC097_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_8_TO_1 0x00000004 +#define NVC097_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_10_TO_1 0x00000005 +#define NVC097_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_12_TO_1 0x00000006 +#define NVC097_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_16_TO_1 0x00000007 +#define NVC097_TEXHEAD_BL_ANISO_FINE_SPREAD_MODIFIER MW(223:222) +#define NVC097_TEXHEAD_BL_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVC097_TEXHEAD_BL_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVC097_TEXHEAD_BL_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVC097_TEXHEAD_BL_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVC097_TEXHEAD_BL_RES_VIEW_MIN_MIP_LEVEL MW(227:224) +#define NVC097_TEXHEAD_BL_RES_VIEW_MAX_MIP_LEVEL MW(231:228) +#define NVC097_TEXHEAD_BL_MULTI_SAMPLE_COUNT MW(235:232) +#define NVC097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_1X1 0x00000000 +#define NVC097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_2X1 0x00000001 +#define NVC097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_2X2 0x00000002 +#define NVC097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_4X2 0x00000003 +#define NVC097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_4X2_D3D 0x00000004 +#define NVC097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_2X1_D3D 0x00000005 +#define NVC097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_4X4 0x00000006 +#define NVC097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_2X2_VC_4 0x00000008 +#define NVC097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_2X2_VC_12 0x00000009 +#define NVC097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_4X2_VC_8 0x0000000a +#define NVC097_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_4X2_VC_24 0x0000000b +#define NVC097_TEXHEAD_BL_MIN_LOD_CLAMP MW(247:236) +#define NVC097_TEXHEAD_BL_RESERVED7Y MW(255:248) + + +/* +** Texture Header State Blocklinear Color Key + */ + +#define NVC097_TEXHEAD_BLCK_COMPONENTS MW(6:0) +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_R32_G32_B32_A32 0x00000001 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_R32_G32_B32 0x00000002 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_R16_G16_B16_A16 0x00000003 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_R32_G32 0x00000004 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_R32_B24G8 0x00000005 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_X8B8G8R8 0x00000007 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_A8B8G8R8 0x00000008 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_A2B10G10R10 0x00000009 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_R16_G16 0x0000000c +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_G8R24 0x0000000d +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_G24R8 0x0000000e +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_R32 0x0000000f +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_A4B4G4R4 0x00000012 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_A5B5G5R1 0x00000013 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_A1B5G5R5 0x00000014 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_B5G6R5 0x00000015 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_B6G5R5 0x00000016 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_G8R8 0x00000018 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_R16 0x0000001b +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_Y8_VIDEO 0x0000001c +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_R8 0x0000001d +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_G4R4 0x0000001e +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_R1 0x0000001f +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_E5B9G9R9_SHAREDEXP 0x00000020 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_BF10GF11RF11 0x00000021 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_G8B8G8R8 0x00000022 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_B8G8R8G8 0x00000023 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_DXT1 0x00000024 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_DXT23 0x00000025 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_DXT45 0x00000026 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_DXN1 0x00000027 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_DXN2 0x00000028 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_BC6H_SF16 0x00000010 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_BC6H_UF16 0x00000011 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_BC7U 0x00000017 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ETC2_RGB 0x00000006 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ETC2_RGB_PTA 0x0000000a +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ETC2_RGBA 0x0000000b +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_EAC 0x00000019 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_EACX2 0x0000001a +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_Z24S8 0x00000029 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_X8Z24 0x0000002a +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_S8Z24 0x0000002b +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_X4V4Z24__COV4R4V 0x0000002c +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_X4V4Z24__COV8R8V 0x0000002d +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_V8Z24__COV4R12V 0x0000002e +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ZF32 0x0000002f +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ZF32_X24S8 0x00000030 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_X8Z24_X20V4S8__COV4R4V 0x00000031 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_X8Z24_X20V4S8__COV8R8V 0x00000032 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ZF32_X20V4X8__COV4R4V 0x00000033 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ZF32_X20V4X8__COV8R8V 0x00000034 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ZF32_X20V4S8__COV4R4V 0x00000035 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ZF32_X20V4S8__COV8R8V 0x00000036 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_X8Z24_X16V8S8__COV4R12V 0x00000037 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ZF32_X16V8X8__COV4R12V 0x00000038 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ZF32_X16V8S8__COV4R12V 0x00000039 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_Z16 0x0000003a +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_V8Z24__COV8R24V 0x0000003b +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_X8Z24_X16V8S8__COV8R24V 0x0000003c +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ZF32_X16V8X8__COV8R24V 0x0000003d +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ZF32_X16V8S8__COV8R24V 0x0000003e +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_4X4 0x00000040 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_5X4 0x00000050 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_5X5 0x00000041 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_6X5 0x00000051 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_6X6 0x00000042 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_8X5 0x00000055 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_8X6 0x00000052 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_8X8 0x00000044 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_10X5 0x00000056 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_10X6 0x00000057 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_10X8 0x00000053 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_10X10 0x00000045 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_12X10 0x00000054 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_12X12 0x00000046 +#define NVC097_TEXHEAD_BLCK_COMPONENTS_SIZES_CS_BITFIELD_SIZE 0x0000007f +#define NVC097_TEXHEAD_BLCK_R_DATA_TYPE MW(9:7) +#define NVC097_TEXHEAD_BLCK_R_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVC097_TEXHEAD_BLCK_R_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVC097_TEXHEAD_BLCK_R_DATA_TYPE_NUM_SINT 0x00000003 +#define NVC097_TEXHEAD_BLCK_R_DATA_TYPE_NUM_UINT 0x00000004 +#define NVC097_TEXHEAD_BLCK_R_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVC097_TEXHEAD_BLCK_R_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVC097_TEXHEAD_BLCK_R_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVC097_TEXHEAD_BLCK_G_DATA_TYPE MW(12:10) +#define NVC097_TEXHEAD_BLCK_G_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVC097_TEXHEAD_BLCK_G_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVC097_TEXHEAD_BLCK_G_DATA_TYPE_NUM_SINT 0x00000003 +#define NVC097_TEXHEAD_BLCK_G_DATA_TYPE_NUM_UINT 0x00000004 +#define NVC097_TEXHEAD_BLCK_G_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVC097_TEXHEAD_BLCK_G_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVC097_TEXHEAD_BLCK_G_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVC097_TEXHEAD_BLCK_B_DATA_TYPE MW(15:13) +#define NVC097_TEXHEAD_BLCK_B_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVC097_TEXHEAD_BLCK_B_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVC097_TEXHEAD_BLCK_B_DATA_TYPE_NUM_SINT 0x00000003 +#define NVC097_TEXHEAD_BLCK_B_DATA_TYPE_NUM_UINT 0x00000004 +#define NVC097_TEXHEAD_BLCK_B_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVC097_TEXHEAD_BLCK_B_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVC097_TEXHEAD_BLCK_B_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVC097_TEXHEAD_BLCK_A_DATA_TYPE MW(18:16) +#define NVC097_TEXHEAD_BLCK_A_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVC097_TEXHEAD_BLCK_A_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVC097_TEXHEAD_BLCK_A_DATA_TYPE_NUM_SINT 0x00000003 +#define NVC097_TEXHEAD_BLCK_A_DATA_TYPE_NUM_UINT 0x00000004 +#define NVC097_TEXHEAD_BLCK_A_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVC097_TEXHEAD_BLCK_A_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVC097_TEXHEAD_BLCK_A_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVC097_TEXHEAD_BLCK_X_SOURCE MW(21:19) +#define NVC097_TEXHEAD_BLCK_X_SOURCE_IN_ZERO 0x00000000 +#define NVC097_TEXHEAD_BLCK_X_SOURCE_IN_R 0x00000002 +#define NVC097_TEXHEAD_BLCK_X_SOURCE_IN_G 0x00000003 +#define NVC097_TEXHEAD_BLCK_X_SOURCE_IN_B 0x00000004 +#define NVC097_TEXHEAD_BLCK_X_SOURCE_IN_A 0x00000005 +#define NVC097_TEXHEAD_BLCK_X_SOURCE_IN_ONE_INT 0x00000006 +#define NVC097_TEXHEAD_BLCK_X_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVC097_TEXHEAD_BLCK_Y_SOURCE MW(24:22) +#define NVC097_TEXHEAD_BLCK_Y_SOURCE_IN_ZERO 0x00000000 +#define NVC097_TEXHEAD_BLCK_Y_SOURCE_IN_R 0x00000002 +#define NVC097_TEXHEAD_BLCK_Y_SOURCE_IN_G 0x00000003 +#define NVC097_TEXHEAD_BLCK_Y_SOURCE_IN_B 0x00000004 +#define NVC097_TEXHEAD_BLCK_Y_SOURCE_IN_A 0x00000005 +#define NVC097_TEXHEAD_BLCK_Y_SOURCE_IN_ONE_INT 0x00000006 +#define NVC097_TEXHEAD_BLCK_Y_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVC097_TEXHEAD_BLCK_Z_SOURCE MW(27:25) +#define NVC097_TEXHEAD_BLCK_Z_SOURCE_IN_ZERO 0x00000000 +#define NVC097_TEXHEAD_BLCK_Z_SOURCE_IN_R 0x00000002 +#define NVC097_TEXHEAD_BLCK_Z_SOURCE_IN_G 0x00000003 +#define NVC097_TEXHEAD_BLCK_Z_SOURCE_IN_B 0x00000004 +#define NVC097_TEXHEAD_BLCK_Z_SOURCE_IN_A 0x00000005 +#define NVC097_TEXHEAD_BLCK_Z_SOURCE_IN_ONE_INT 0x00000006 +#define NVC097_TEXHEAD_BLCK_Z_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVC097_TEXHEAD_BLCK_W_SOURCE MW(30:28) +#define NVC097_TEXHEAD_BLCK_W_SOURCE_IN_ZERO 0x00000000 +#define NVC097_TEXHEAD_BLCK_W_SOURCE_IN_R 0x00000002 +#define NVC097_TEXHEAD_BLCK_W_SOURCE_IN_G 0x00000003 +#define NVC097_TEXHEAD_BLCK_W_SOURCE_IN_B 0x00000004 +#define NVC097_TEXHEAD_BLCK_W_SOURCE_IN_A 0x00000005 +#define NVC097_TEXHEAD_BLCK_W_SOURCE_IN_ONE_INT 0x00000006 +#define NVC097_TEXHEAD_BLCK_W_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVC097_TEXHEAD_BLCK_PACK_COMPONENTS MW(31:31) +#define NVC097_TEXHEAD_BLCK_RESERVED1Y MW(36:32) +#define NVC097_TEXHEAD_BLCK_GOB_DEPTH_OFFSET MW(38:37) +#define NVC097_TEXHEAD_BLCK_RESERVED1X MW(40:39) +#define NVC097_TEXHEAD_BLCK_ADDRESS_BITS31TO9 MW(63:41) +#define NVC097_TEXHEAD_BLCK_ADDRESS_BITS48TO32 MW(80:64) +#define NVC097_TEXHEAD_BLCK_RESERVED_ADDRESS MW(84:81) +#define NVC097_TEXHEAD_BLCK_HEADER_VERSION MW(87:85) +#define NVC097_TEXHEAD_BLCK_HEADER_VERSION_SELECT_ONE_D_BUFFER 0x00000000 +#define NVC097_TEXHEAD_BLCK_HEADER_VERSION_SELECT_PITCH_COLOR_KEY 0x00000001 +#define NVC097_TEXHEAD_BLCK_HEADER_VERSION_SELECT_PITCH 0x00000002 +#define NVC097_TEXHEAD_BLCK_HEADER_VERSION_SELECT_BLOCKLINEAR 0x00000003 +#define NVC097_TEXHEAD_BLCK_HEADER_VERSION_SELECT_BLOCKLINEAR_COLOR_KEY 0x00000004 +#define NVC097_TEXHEAD_BLCK_RESERVED_HEADER_VERSION MW(88:88) +#define NVC097_TEXHEAD_BLCK_RESOURCE_VIEW_COHERENCY_HASH MW(92:89) +#define NVC097_TEXHEAD_BLCK_RESERVED2A MW(95:93) +#define NVC097_TEXHEAD_BLCK_GOBS_PER_BLOCK_WIDTH MW(98:96) +#define NVC097_TEXHEAD_BLCK_GOBS_PER_BLOCK_WIDTH_ONE_GOB 0x00000000 +#define NVC097_TEXHEAD_BLCK_GOBS_PER_BLOCK_HEIGHT MW(101:99) +#define NVC097_TEXHEAD_BLCK_GOBS_PER_BLOCK_HEIGHT_ONE_GOB 0x00000000 +#define NVC097_TEXHEAD_BLCK_GOBS_PER_BLOCK_HEIGHT_TWO_GOBS 0x00000001 +#define NVC097_TEXHEAD_BLCK_GOBS_PER_BLOCK_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC097_TEXHEAD_BLCK_GOBS_PER_BLOCK_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC097_TEXHEAD_BLCK_GOBS_PER_BLOCK_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC097_TEXHEAD_BLCK_GOBS_PER_BLOCK_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC097_TEXHEAD_BLCK_GOBS_PER_BLOCK_DEPTH MW(104:102) +#define NVC097_TEXHEAD_BLCK_GOBS_PER_BLOCK_DEPTH_ONE_GOB 0x00000000 +#define NVC097_TEXHEAD_BLCK_GOBS_PER_BLOCK_DEPTH_TWO_GOBS 0x00000001 +#define NVC097_TEXHEAD_BLCK_GOBS_PER_BLOCK_DEPTH_FOUR_GOBS 0x00000002 +#define NVC097_TEXHEAD_BLCK_GOBS_PER_BLOCK_DEPTH_EIGHT_GOBS 0x00000003 +#define NVC097_TEXHEAD_BLCK_GOBS_PER_BLOCK_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVC097_TEXHEAD_BLCK_GOBS_PER_BLOCK_DEPTH_THIRTYTWO_GOBS 0x00000005 +#define NVC097_TEXHEAD_BLCK_RESERVED3Y MW(105:105) +#define NVC097_TEXHEAD_BLCK_TILE_WIDTH_IN_GOBS MW(108:106) +#define NVC097_TEXHEAD_BLCK_TILE_WIDTH_IN_GOBS_ONE_GOB 0x00000000 +#define NVC097_TEXHEAD_BLCK_TILE_WIDTH_IN_GOBS_TWO_GOBS 0x00000001 +#define NVC097_TEXHEAD_BLCK_TILE_WIDTH_IN_GOBS_FOUR_GOBS 0x00000002 +#define NVC097_TEXHEAD_BLCK_TILE_WIDTH_IN_GOBS_EIGHT_GOBS 0x00000003 +#define NVC097_TEXHEAD_BLCK_TILE_WIDTH_IN_GOBS_SIXTEEN_GOBS 0x00000004 +#define NVC097_TEXHEAD_BLCK_TILE_WIDTH_IN_GOBS_THIRTYTWO_GOBS 0x00000005 +#define NVC097_TEXHEAD_BLCK_GOB3D MW(109:109) +#define NVC097_TEXHEAD_BLCK_RESERVED3Z MW(111:110) +#define NVC097_TEXHEAD_BLCK_LOD_ANISO_QUALITY2 MW(112:112) +#define NVC097_TEXHEAD_BLCK_LOD_ANISO_QUALITY MW(113:113) +#define NVC097_TEXHEAD_BLCK_LOD_ANISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVC097_TEXHEAD_BLCK_LOD_ANISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVC097_TEXHEAD_BLCK_LOD_ISO_QUALITY MW(114:114) +#define NVC097_TEXHEAD_BLCK_LOD_ISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVC097_TEXHEAD_BLCK_LOD_ISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVC097_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_MODIFIER MW(116:115) +#define NVC097_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVC097_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVC097_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVC097_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVC097_TEXHEAD_BLCK_ANISO_SPREAD_SCALE MW(121:117) +#define NVC097_TEXHEAD_BLCK_USE_HEADER_OPT_CONTROL MW(122:122) +#define NVC097_TEXHEAD_BLCK_DEPTH_TEXTURE MW(123:123) +#define NVC097_TEXHEAD_BLCK_MAX_MIP_LEVEL MW(127:124) +#define NVC097_TEXHEAD_BLCK_WIDTH_MINUS_ONE MW(144:128) +#define NVC097_TEXHEAD_BLCK_DEPTH_MINUS_ONE_BIT14 MW(145:145) +#define NVC097_TEXHEAD_BLCK_HEIGHT_MINUS_ONE_BIT16 MW(146:146) +#define NVC097_TEXHEAD_BLCK_ANISO_SPREAD_MAX_LOG2 MW(149:147) +#define NVC097_TEXHEAD_BLCK_S_R_G_B_CONVERSION MW(150:150) +#define NVC097_TEXHEAD_BLCK_TEXTURE_TYPE MW(154:151) +#define NVC097_TEXHEAD_BLCK_TEXTURE_TYPE_ONE_D 0x00000000 +#define NVC097_TEXHEAD_BLCK_TEXTURE_TYPE_TWO_D 0x00000001 +#define NVC097_TEXHEAD_BLCK_TEXTURE_TYPE_THREE_D 0x00000002 +#define NVC097_TEXHEAD_BLCK_TEXTURE_TYPE_CUBEMAP 0x00000003 +#define NVC097_TEXHEAD_BLCK_TEXTURE_TYPE_ONE_D_ARRAY 0x00000004 +#define NVC097_TEXHEAD_BLCK_TEXTURE_TYPE_TWO_D_ARRAY 0x00000005 +#define NVC097_TEXHEAD_BLCK_TEXTURE_TYPE_ONE_D_BUFFER 0x00000006 +#define NVC097_TEXHEAD_BLCK_TEXTURE_TYPE_TWO_D_NO_MIPMAP 0x00000007 +#define NVC097_TEXHEAD_BLCK_TEXTURE_TYPE_CUBEMAP_ARRAY 0x00000008 +#define NVC097_TEXHEAD_BLCK_TEXTURE_TYPE_TT_BIT_FIELD_SIZE 0x0000000f +#define NVC097_TEXHEAD_BLCK_SECTOR_PROMOTION MW(156:155) +#define NVC097_TEXHEAD_BLCK_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVC097_TEXHEAD_BLCK_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVC097_TEXHEAD_BLCK_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVC097_TEXHEAD_BLCK_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVC097_TEXHEAD_BLCK_BORDER_SIZE MW(159:157) +#define NVC097_TEXHEAD_BLCK_BORDER_SIZE_BORDER_SIZE_ONE 0x00000000 +#define NVC097_TEXHEAD_BLCK_BORDER_SIZE_BORDER_SIZE_TWO 0x00000001 +#define NVC097_TEXHEAD_BLCK_BORDER_SIZE_BORDER_SIZE_FOUR 0x00000002 +#define NVC097_TEXHEAD_BLCK_BORDER_SIZE_BORDER_SIZE_EIGHT 0x00000003 +#define NVC097_TEXHEAD_BLCK_BORDER_SIZE_BORDER_SAMPLER_COLOR 0x00000007 +#define NVC097_TEXHEAD_BLCK_HEIGHT_MINUS_ONE MW(175:160) +#define NVC097_TEXHEAD_BLCK_DEPTH_MINUS_ONE MW(189:176) +#define NVC097_TEXHEAD_BLCK_RESERVED5A MW(190:190) +#define NVC097_TEXHEAD_BLCK_NORMALIZED_COORDS MW(191:191) +#define NVC097_TEXHEAD_BLCK_COLOR_KEY_OP MW(192:192) +#define NVC097_TEXHEAD_BLCK_TRILIN_OPT MW(197:193) +#define NVC097_TEXHEAD_BLCK_MIP_LOD_BIAS MW(210:198) +#define NVC097_TEXHEAD_BLCK_ANISO_BIAS MW(214:211) +#define NVC097_TEXHEAD_BLCK_ANISO_FINE_SPREAD_FUNC MW(216:215) +#define NVC097_TEXHEAD_BLCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVC097_TEXHEAD_BLCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVC097_TEXHEAD_BLCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVC097_TEXHEAD_BLCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVC097_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_FUNC MW(218:217) +#define NVC097_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVC097_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVC097_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVC097_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVC097_TEXHEAD_BLCK_MAX_ANISOTROPY MW(221:219) +#define NVC097_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_1_TO_1 0x00000000 +#define NVC097_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_2_TO_1 0x00000001 +#define NVC097_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_4_TO_1 0x00000002 +#define NVC097_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_6_TO_1 0x00000003 +#define NVC097_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_8_TO_1 0x00000004 +#define NVC097_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_10_TO_1 0x00000005 +#define NVC097_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_12_TO_1 0x00000006 +#define NVC097_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_16_TO_1 0x00000007 +#define NVC097_TEXHEAD_BLCK_ANISO_FINE_SPREAD_MODIFIER MW(223:222) +#define NVC097_TEXHEAD_BLCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVC097_TEXHEAD_BLCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVC097_TEXHEAD_BLCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVC097_TEXHEAD_BLCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVC097_TEXHEAD_BLCK_COLOR_KEY_VALUE MW(255:224) + + +/* +** Texture Header State One-D Buffer + */ + +#define NVC097_TEXHEAD_1D_COMPONENTS MW(6:0) +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_R32_G32_B32_A32 0x00000001 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_R32_G32_B32 0x00000002 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_R16_G16_B16_A16 0x00000003 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_R32_G32 0x00000004 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_R32_B24G8 0x00000005 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_X8B8G8R8 0x00000007 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_A8B8G8R8 0x00000008 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_A2B10G10R10 0x00000009 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_R16_G16 0x0000000c +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_G8R24 0x0000000d +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_G24R8 0x0000000e +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_R32 0x0000000f +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_A4B4G4R4 0x00000012 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_A5B5G5R1 0x00000013 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_A1B5G5R5 0x00000014 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_B5G6R5 0x00000015 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_B6G5R5 0x00000016 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_G8R8 0x00000018 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_R16 0x0000001b +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_Y8_VIDEO 0x0000001c +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_R8 0x0000001d +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_G4R4 0x0000001e +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_R1 0x0000001f +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_E5B9G9R9_SHAREDEXP 0x00000020 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_BF10GF11RF11 0x00000021 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_G8B8G8R8 0x00000022 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_B8G8R8G8 0x00000023 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_DXT1 0x00000024 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_DXT23 0x00000025 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_DXT45 0x00000026 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_DXN1 0x00000027 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_DXN2 0x00000028 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_BC6H_SF16 0x00000010 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_BC6H_UF16 0x00000011 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_BC7U 0x00000017 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ETC2_RGB 0x00000006 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ETC2_RGB_PTA 0x0000000a +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ETC2_RGBA 0x0000000b +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_EAC 0x00000019 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_EACX2 0x0000001a +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_Z24S8 0x00000029 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_X8Z24 0x0000002a +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_S8Z24 0x0000002b +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_X4V4Z24__COV4R4V 0x0000002c +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_X4V4Z24__COV8R8V 0x0000002d +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_V8Z24__COV4R12V 0x0000002e +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ZF32 0x0000002f +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ZF32_X24S8 0x00000030 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_X8Z24_X20V4S8__COV4R4V 0x00000031 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_X8Z24_X20V4S8__COV8R8V 0x00000032 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ZF32_X20V4X8__COV4R4V 0x00000033 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ZF32_X20V4X8__COV8R8V 0x00000034 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ZF32_X20V4S8__COV4R4V 0x00000035 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ZF32_X20V4S8__COV8R8V 0x00000036 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_X8Z24_X16V8S8__COV4R12V 0x00000037 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ZF32_X16V8X8__COV4R12V 0x00000038 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ZF32_X16V8S8__COV4R12V 0x00000039 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_Z16 0x0000003a +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_V8Z24__COV8R24V 0x0000003b +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_X8Z24_X16V8S8__COV8R24V 0x0000003c +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ZF32_X16V8X8__COV8R24V 0x0000003d +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ZF32_X16V8S8__COV8R24V 0x0000003e +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_4X4 0x00000040 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_5X4 0x00000050 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_5X5 0x00000041 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_6X5 0x00000051 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_6X6 0x00000042 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_8X5 0x00000055 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_8X6 0x00000052 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_8X8 0x00000044 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_10X5 0x00000056 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_10X6 0x00000057 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_10X8 0x00000053 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_10X10 0x00000045 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_12X10 0x00000054 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_12X12 0x00000046 +#define NVC097_TEXHEAD_1D_COMPONENTS_SIZES_CS_BITFIELD_SIZE 0x0000007f +#define NVC097_TEXHEAD_1D_R_DATA_TYPE MW(9:7) +#define NVC097_TEXHEAD_1D_R_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVC097_TEXHEAD_1D_R_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVC097_TEXHEAD_1D_R_DATA_TYPE_NUM_SINT 0x00000003 +#define NVC097_TEXHEAD_1D_R_DATA_TYPE_NUM_UINT 0x00000004 +#define NVC097_TEXHEAD_1D_R_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVC097_TEXHEAD_1D_R_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVC097_TEXHEAD_1D_R_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVC097_TEXHEAD_1D_G_DATA_TYPE MW(12:10) +#define NVC097_TEXHEAD_1D_G_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVC097_TEXHEAD_1D_G_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVC097_TEXHEAD_1D_G_DATA_TYPE_NUM_SINT 0x00000003 +#define NVC097_TEXHEAD_1D_G_DATA_TYPE_NUM_UINT 0x00000004 +#define NVC097_TEXHEAD_1D_G_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVC097_TEXHEAD_1D_G_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVC097_TEXHEAD_1D_G_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVC097_TEXHEAD_1D_B_DATA_TYPE MW(15:13) +#define NVC097_TEXHEAD_1D_B_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVC097_TEXHEAD_1D_B_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVC097_TEXHEAD_1D_B_DATA_TYPE_NUM_SINT 0x00000003 +#define NVC097_TEXHEAD_1D_B_DATA_TYPE_NUM_UINT 0x00000004 +#define NVC097_TEXHEAD_1D_B_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVC097_TEXHEAD_1D_B_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVC097_TEXHEAD_1D_B_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVC097_TEXHEAD_1D_A_DATA_TYPE MW(18:16) +#define NVC097_TEXHEAD_1D_A_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVC097_TEXHEAD_1D_A_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVC097_TEXHEAD_1D_A_DATA_TYPE_NUM_SINT 0x00000003 +#define NVC097_TEXHEAD_1D_A_DATA_TYPE_NUM_UINT 0x00000004 +#define NVC097_TEXHEAD_1D_A_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVC097_TEXHEAD_1D_A_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVC097_TEXHEAD_1D_A_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVC097_TEXHEAD_1D_X_SOURCE MW(21:19) +#define NVC097_TEXHEAD_1D_X_SOURCE_IN_ZERO 0x00000000 +#define NVC097_TEXHEAD_1D_X_SOURCE_IN_R 0x00000002 +#define NVC097_TEXHEAD_1D_X_SOURCE_IN_G 0x00000003 +#define NVC097_TEXHEAD_1D_X_SOURCE_IN_B 0x00000004 +#define NVC097_TEXHEAD_1D_X_SOURCE_IN_A 0x00000005 +#define NVC097_TEXHEAD_1D_X_SOURCE_IN_ONE_INT 0x00000006 +#define NVC097_TEXHEAD_1D_X_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVC097_TEXHEAD_1D_Y_SOURCE MW(24:22) +#define NVC097_TEXHEAD_1D_Y_SOURCE_IN_ZERO 0x00000000 +#define NVC097_TEXHEAD_1D_Y_SOURCE_IN_R 0x00000002 +#define NVC097_TEXHEAD_1D_Y_SOURCE_IN_G 0x00000003 +#define NVC097_TEXHEAD_1D_Y_SOURCE_IN_B 0x00000004 +#define NVC097_TEXHEAD_1D_Y_SOURCE_IN_A 0x00000005 +#define NVC097_TEXHEAD_1D_Y_SOURCE_IN_ONE_INT 0x00000006 +#define NVC097_TEXHEAD_1D_Y_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVC097_TEXHEAD_1D_Z_SOURCE MW(27:25) +#define NVC097_TEXHEAD_1D_Z_SOURCE_IN_ZERO 0x00000000 +#define NVC097_TEXHEAD_1D_Z_SOURCE_IN_R 0x00000002 +#define NVC097_TEXHEAD_1D_Z_SOURCE_IN_G 0x00000003 +#define NVC097_TEXHEAD_1D_Z_SOURCE_IN_B 0x00000004 +#define NVC097_TEXHEAD_1D_Z_SOURCE_IN_A 0x00000005 +#define NVC097_TEXHEAD_1D_Z_SOURCE_IN_ONE_INT 0x00000006 +#define NVC097_TEXHEAD_1D_Z_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVC097_TEXHEAD_1D_W_SOURCE MW(30:28) +#define NVC097_TEXHEAD_1D_W_SOURCE_IN_ZERO 0x00000000 +#define NVC097_TEXHEAD_1D_W_SOURCE_IN_R 0x00000002 +#define NVC097_TEXHEAD_1D_W_SOURCE_IN_G 0x00000003 +#define NVC097_TEXHEAD_1D_W_SOURCE_IN_B 0x00000004 +#define NVC097_TEXHEAD_1D_W_SOURCE_IN_A 0x00000005 +#define NVC097_TEXHEAD_1D_W_SOURCE_IN_ONE_INT 0x00000006 +#define NVC097_TEXHEAD_1D_W_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVC097_TEXHEAD_1D_PACK_COMPONENTS MW(31:31) +#define NVC097_TEXHEAD_1D_ADDRESS_BITS31TO0 MW(63:32) +#define NVC097_TEXHEAD_1D_ADDRESS_BITS48TO32 MW(80:64) +#define NVC097_TEXHEAD_1D_RESERVED_ADDRESS MW(84:81) +#define NVC097_TEXHEAD_1D_HEADER_VERSION MW(87:85) +#define NVC097_TEXHEAD_1D_HEADER_VERSION_SELECT_ONE_D_BUFFER 0x00000000 +#define NVC097_TEXHEAD_1D_HEADER_VERSION_SELECT_PITCH_COLOR_KEY 0x00000001 +#define NVC097_TEXHEAD_1D_HEADER_VERSION_SELECT_PITCH 0x00000002 +#define NVC097_TEXHEAD_1D_HEADER_VERSION_SELECT_BLOCKLINEAR 0x00000003 +#define NVC097_TEXHEAD_1D_HEADER_VERSION_SELECT_BLOCKLINEAR_COLOR_KEY 0x00000004 +#define NVC097_TEXHEAD_1D_RESERVED_HEADER_VERSION MW(88:88) +#define NVC097_TEXHEAD_1D_RESOURCE_VIEW_COHERENCY_HASH MW(92:89) +#define NVC097_TEXHEAD_1D_RESERVED2A MW(95:93) +#define NVC097_TEXHEAD_1D_WIDTH_MINUS_ONE_BITS31TO16 MW(111:96) +#define NVC097_TEXHEAD_1D_RESERVED3X MW(127:112) +#define NVC097_TEXHEAD_1D_WIDTH_MINUS_ONE_BITS15TO0 MW(143:128) +#define NVC097_TEXHEAD_1D_RESERVED4X MW(149:144) +#define NVC097_TEXHEAD_1D_S_R_G_B_CONVERSION MW(150:150) +#define NVC097_TEXHEAD_1D_TEXTURE_TYPE MW(154:151) +#define NVC097_TEXHEAD_1D_TEXTURE_TYPE_ONE_D 0x00000000 +#define NVC097_TEXHEAD_1D_TEXTURE_TYPE_TWO_D 0x00000001 +#define NVC097_TEXHEAD_1D_TEXTURE_TYPE_THREE_D 0x00000002 +#define NVC097_TEXHEAD_1D_TEXTURE_TYPE_CUBEMAP 0x00000003 +#define NVC097_TEXHEAD_1D_TEXTURE_TYPE_ONE_D_ARRAY 0x00000004 +#define NVC097_TEXHEAD_1D_TEXTURE_TYPE_TWO_D_ARRAY 0x00000005 +#define NVC097_TEXHEAD_1D_TEXTURE_TYPE_ONE_D_BUFFER 0x00000006 +#define NVC097_TEXHEAD_1D_TEXTURE_TYPE_TWO_D_NO_MIPMAP 0x00000007 +#define NVC097_TEXHEAD_1D_TEXTURE_TYPE_CUBEMAP_ARRAY 0x00000008 +#define NVC097_TEXHEAD_1D_TEXTURE_TYPE_TT_BIT_FIELD_SIZE 0x0000000f +#define NVC097_TEXHEAD_1D_SECTOR_PROMOTION MW(156:155) +#define NVC097_TEXHEAD_1D_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVC097_TEXHEAD_1D_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVC097_TEXHEAD_1D_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVC097_TEXHEAD_1D_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVC097_TEXHEAD_1D_RESERVED4Y MW(159:157) +#define NVC097_TEXHEAD_1D_RESERVED5X MW(189:160) +#define NVC097_TEXHEAD_1D_RESERVED5A MW(190:190) +#define NVC097_TEXHEAD_1D_RESERVED5Y MW(191:191) +#define NVC097_TEXHEAD_1D_RESERVED6X MW(223:192) +#define NVC097_TEXHEAD_1D_RESERVED7X MW(255:224) + + +/* +** Texture Header State Pitch + */ + +#define NVC097_TEXHEAD_PITCH_COMPONENTS MW(6:0) +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_R32_G32_B32_A32 0x00000001 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_R32_G32_B32 0x00000002 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_R16_G16_B16_A16 0x00000003 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_R32_G32 0x00000004 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_R32_B24G8 0x00000005 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_X8B8G8R8 0x00000007 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_A8B8G8R8 0x00000008 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_A2B10G10R10 0x00000009 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_R16_G16 0x0000000c +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_G8R24 0x0000000d +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_G24R8 0x0000000e +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_R32 0x0000000f +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_A4B4G4R4 0x00000012 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_A5B5G5R1 0x00000013 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_A1B5G5R5 0x00000014 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_B5G6R5 0x00000015 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_B6G5R5 0x00000016 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_G8R8 0x00000018 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_R16 0x0000001b +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_Y8_VIDEO 0x0000001c +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_R8 0x0000001d +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_G4R4 0x0000001e +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_R1 0x0000001f +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_E5B9G9R9_SHAREDEXP 0x00000020 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_BF10GF11RF11 0x00000021 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_G8B8G8R8 0x00000022 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_B8G8R8G8 0x00000023 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_DXT1 0x00000024 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_DXT23 0x00000025 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_DXT45 0x00000026 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_DXN1 0x00000027 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_DXN2 0x00000028 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_BC6H_SF16 0x00000010 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_BC6H_UF16 0x00000011 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_BC7U 0x00000017 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ETC2_RGB 0x00000006 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ETC2_RGB_PTA 0x0000000a +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ETC2_RGBA 0x0000000b +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_EAC 0x00000019 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_EACX2 0x0000001a +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_Z24S8 0x00000029 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_X8Z24 0x0000002a +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_S8Z24 0x0000002b +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_X4V4Z24__COV4R4V 0x0000002c +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_X4V4Z24__COV8R8V 0x0000002d +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_V8Z24__COV4R12V 0x0000002e +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ZF32 0x0000002f +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ZF32_X24S8 0x00000030 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_X8Z24_X20V4S8__COV4R4V 0x00000031 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_X8Z24_X20V4S8__COV8R8V 0x00000032 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ZF32_X20V4X8__COV4R4V 0x00000033 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ZF32_X20V4X8__COV8R8V 0x00000034 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ZF32_X20V4S8__COV4R4V 0x00000035 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ZF32_X20V4S8__COV8R8V 0x00000036 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_X8Z24_X16V8S8__COV4R12V 0x00000037 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ZF32_X16V8X8__COV4R12V 0x00000038 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ZF32_X16V8S8__COV4R12V 0x00000039 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_Z16 0x0000003a +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_V8Z24__COV8R24V 0x0000003b +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_X8Z24_X16V8S8__COV8R24V 0x0000003c +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ZF32_X16V8X8__COV8R24V 0x0000003d +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ZF32_X16V8S8__COV8R24V 0x0000003e +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_4X4 0x00000040 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_5X4 0x00000050 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_5X5 0x00000041 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_6X5 0x00000051 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_6X6 0x00000042 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_8X5 0x00000055 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_8X6 0x00000052 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_8X8 0x00000044 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_10X5 0x00000056 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_10X6 0x00000057 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_10X8 0x00000053 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_10X10 0x00000045 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_12X10 0x00000054 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_12X12 0x00000046 +#define NVC097_TEXHEAD_PITCH_COMPONENTS_SIZES_CS_BITFIELD_SIZE 0x0000007f +#define NVC097_TEXHEAD_PITCH_R_DATA_TYPE MW(9:7) +#define NVC097_TEXHEAD_PITCH_R_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVC097_TEXHEAD_PITCH_R_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVC097_TEXHEAD_PITCH_R_DATA_TYPE_NUM_SINT 0x00000003 +#define NVC097_TEXHEAD_PITCH_R_DATA_TYPE_NUM_UINT 0x00000004 +#define NVC097_TEXHEAD_PITCH_R_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVC097_TEXHEAD_PITCH_R_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVC097_TEXHEAD_PITCH_R_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVC097_TEXHEAD_PITCH_G_DATA_TYPE MW(12:10) +#define NVC097_TEXHEAD_PITCH_G_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVC097_TEXHEAD_PITCH_G_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVC097_TEXHEAD_PITCH_G_DATA_TYPE_NUM_SINT 0x00000003 +#define NVC097_TEXHEAD_PITCH_G_DATA_TYPE_NUM_UINT 0x00000004 +#define NVC097_TEXHEAD_PITCH_G_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVC097_TEXHEAD_PITCH_G_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVC097_TEXHEAD_PITCH_G_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVC097_TEXHEAD_PITCH_B_DATA_TYPE MW(15:13) +#define NVC097_TEXHEAD_PITCH_B_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVC097_TEXHEAD_PITCH_B_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVC097_TEXHEAD_PITCH_B_DATA_TYPE_NUM_SINT 0x00000003 +#define NVC097_TEXHEAD_PITCH_B_DATA_TYPE_NUM_UINT 0x00000004 +#define NVC097_TEXHEAD_PITCH_B_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVC097_TEXHEAD_PITCH_B_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVC097_TEXHEAD_PITCH_B_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVC097_TEXHEAD_PITCH_A_DATA_TYPE MW(18:16) +#define NVC097_TEXHEAD_PITCH_A_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVC097_TEXHEAD_PITCH_A_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVC097_TEXHEAD_PITCH_A_DATA_TYPE_NUM_SINT 0x00000003 +#define NVC097_TEXHEAD_PITCH_A_DATA_TYPE_NUM_UINT 0x00000004 +#define NVC097_TEXHEAD_PITCH_A_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVC097_TEXHEAD_PITCH_A_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVC097_TEXHEAD_PITCH_A_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVC097_TEXHEAD_PITCH_X_SOURCE MW(21:19) +#define NVC097_TEXHEAD_PITCH_X_SOURCE_IN_ZERO 0x00000000 +#define NVC097_TEXHEAD_PITCH_X_SOURCE_IN_R 0x00000002 +#define NVC097_TEXHEAD_PITCH_X_SOURCE_IN_G 0x00000003 +#define NVC097_TEXHEAD_PITCH_X_SOURCE_IN_B 0x00000004 +#define NVC097_TEXHEAD_PITCH_X_SOURCE_IN_A 0x00000005 +#define NVC097_TEXHEAD_PITCH_X_SOURCE_IN_ONE_INT 0x00000006 +#define NVC097_TEXHEAD_PITCH_X_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVC097_TEXHEAD_PITCH_Y_SOURCE MW(24:22) +#define NVC097_TEXHEAD_PITCH_Y_SOURCE_IN_ZERO 0x00000000 +#define NVC097_TEXHEAD_PITCH_Y_SOURCE_IN_R 0x00000002 +#define NVC097_TEXHEAD_PITCH_Y_SOURCE_IN_G 0x00000003 +#define NVC097_TEXHEAD_PITCH_Y_SOURCE_IN_B 0x00000004 +#define NVC097_TEXHEAD_PITCH_Y_SOURCE_IN_A 0x00000005 +#define NVC097_TEXHEAD_PITCH_Y_SOURCE_IN_ONE_INT 0x00000006 +#define NVC097_TEXHEAD_PITCH_Y_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVC097_TEXHEAD_PITCH_Z_SOURCE MW(27:25) +#define NVC097_TEXHEAD_PITCH_Z_SOURCE_IN_ZERO 0x00000000 +#define NVC097_TEXHEAD_PITCH_Z_SOURCE_IN_R 0x00000002 +#define NVC097_TEXHEAD_PITCH_Z_SOURCE_IN_G 0x00000003 +#define NVC097_TEXHEAD_PITCH_Z_SOURCE_IN_B 0x00000004 +#define NVC097_TEXHEAD_PITCH_Z_SOURCE_IN_A 0x00000005 +#define NVC097_TEXHEAD_PITCH_Z_SOURCE_IN_ONE_INT 0x00000006 +#define NVC097_TEXHEAD_PITCH_Z_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVC097_TEXHEAD_PITCH_W_SOURCE MW(30:28) +#define NVC097_TEXHEAD_PITCH_W_SOURCE_IN_ZERO 0x00000000 +#define NVC097_TEXHEAD_PITCH_W_SOURCE_IN_R 0x00000002 +#define NVC097_TEXHEAD_PITCH_W_SOURCE_IN_G 0x00000003 +#define NVC097_TEXHEAD_PITCH_W_SOURCE_IN_B 0x00000004 +#define NVC097_TEXHEAD_PITCH_W_SOURCE_IN_A 0x00000005 +#define NVC097_TEXHEAD_PITCH_W_SOURCE_IN_ONE_INT 0x00000006 +#define NVC097_TEXHEAD_PITCH_W_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVC097_TEXHEAD_PITCH_PACK_COMPONENTS MW(31:31) +#define NVC097_TEXHEAD_PITCH_RESERVED1A MW(36:32) +#define NVC097_TEXHEAD_PITCH_ADDRESS_BITS31TO5 MW(63:37) +#define NVC097_TEXHEAD_PITCH_ADDRESS_BITS48TO32 MW(80:64) +#define NVC097_TEXHEAD_PITCH_RESERVED_ADDRESS MW(84:81) +#define NVC097_TEXHEAD_PITCH_HEADER_VERSION MW(87:85) +#define NVC097_TEXHEAD_PITCH_HEADER_VERSION_SELECT_ONE_D_BUFFER 0x00000000 +#define NVC097_TEXHEAD_PITCH_HEADER_VERSION_SELECT_PITCH_COLOR_KEY 0x00000001 +#define NVC097_TEXHEAD_PITCH_HEADER_VERSION_SELECT_PITCH 0x00000002 +#define NVC097_TEXHEAD_PITCH_HEADER_VERSION_SELECT_BLOCKLINEAR 0x00000003 +#define NVC097_TEXHEAD_PITCH_HEADER_VERSION_SELECT_BLOCKLINEAR_COLOR_KEY 0x00000004 +#define NVC097_TEXHEAD_PITCH_RESERVED_HEADER_VERSION MW(88:88) +#define NVC097_TEXHEAD_PITCH_RESOURCE_VIEW_COHERENCY_HASH MW(92:89) +#define NVC097_TEXHEAD_PITCH_RESERVED2A MW(95:93) +#define NVC097_TEXHEAD_PITCH_PITCH_BITS20TO5 MW(111:96) +#define NVC097_TEXHEAD_PITCH_LOD_ANISO_QUALITY2 MW(112:112) +#define NVC097_TEXHEAD_PITCH_LOD_ANISO_QUALITY MW(113:113) +#define NVC097_TEXHEAD_PITCH_LOD_ANISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVC097_TEXHEAD_PITCH_LOD_ANISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVC097_TEXHEAD_PITCH_LOD_ISO_QUALITY MW(114:114) +#define NVC097_TEXHEAD_PITCH_LOD_ISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVC097_TEXHEAD_PITCH_LOD_ISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVC097_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_MODIFIER MW(116:115) +#define NVC097_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVC097_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVC097_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVC097_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVC097_TEXHEAD_PITCH_ANISO_SPREAD_SCALE MW(121:117) +#define NVC097_TEXHEAD_PITCH_USE_HEADER_OPT_CONTROL MW(122:122) +#define NVC097_TEXHEAD_PITCH_DEPTH_TEXTURE MW(123:123) +#define NVC097_TEXHEAD_PITCH_MAX_MIP_LEVEL MW(127:124) +#define NVC097_TEXHEAD_PITCH_WIDTH_MINUS_ONE MW(144:128) +#define NVC097_TEXHEAD_PITCH_PITCH_BIT21 MW(145:145) +#define NVC097_TEXHEAD_PITCH_HEIGHT_MINUS_ONE_BIT16 MW(146:146) +#define NVC097_TEXHEAD_PITCH_ANISO_SPREAD_MAX_LOG2 MW(149:147) +#define NVC097_TEXHEAD_PITCH_S_R_G_B_CONVERSION MW(150:150) +#define NVC097_TEXHEAD_PITCH_TEXTURE_TYPE MW(154:151) +#define NVC097_TEXHEAD_PITCH_TEXTURE_TYPE_ONE_D 0x00000000 +#define NVC097_TEXHEAD_PITCH_TEXTURE_TYPE_TWO_D 0x00000001 +#define NVC097_TEXHEAD_PITCH_TEXTURE_TYPE_THREE_D 0x00000002 +#define NVC097_TEXHEAD_PITCH_TEXTURE_TYPE_CUBEMAP 0x00000003 +#define NVC097_TEXHEAD_PITCH_TEXTURE_TYPE_ONE_D_ARRAY 0x00000004 +#define NVC097_TEXHEAD_PITCH_TEXTURE_TYPE_TWO_D_ARRAY 0x00000005 +#define NVC097_TEXHEAD_PITCH_TEXTURE_TYPE_ONE_D_BUFFER 0x00000006 +#define NVC097_TEXHEAD_PITCH_TEXTURE_TYPE_TWO_D_NO_MIPMAP 0x00000007 +#define NVC097_TEXHEAD_PITCH_TEXTURE_TYPE_CUBEMAP_ARRAY 0x00000008 +#define NVC097_TEXHEAD_PITCH_TEXTURE_TYPE_TT_BIT_FIELD_SIZE 0x0000000f +#define NVC097_TEXHEAD_PITCH_SECTOR_PROMOTION MW(156:155) +#define NVC097_TEXHEAD_PITCH_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVC097_TEXHEAD_PITCH_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVC097_TEXHEAD_PITCH_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVC097_TEXHEAD_PITCH_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVC097_TEXHEAD_PITCH_BORDER_SIZE MW(159:157) +#define NVC097_TEXHEAD_PITCH_BORDER_SIZE_BORDER_SIZE_ONE 0x00000000 +#define NVC097_TEXHEAD_PITCH_BORDER_SIZE_BORDER_SIZE_TWO 0x00000001 +#define NVC097_TEXHEAD_PITCH_BORDER_SIZE_BORDER_SIZE_FOUR 0x00000002 +#define NVC097_TEXHEAD_PITCH_BORDER_SIZE_BORDER_SIZE_EIGHT 0x00000003 +#define NVC097_TEXHEAD_PITCH_BORDER_SIZE_BORDER_SAMPLER_COLOR 0x00000007 +#define NVC097_TEXHEAD_PITCH_HEIGHT_MINUS_ONE MW(175:160) +#define NVC097_TEXHEAD_PITCH_DEPTH_MINUS_ONE MW(189:176) +#define NVC097_TEXHEAD_PITCH_RESERVED5A MW(190:190) +#define NVC097_TEXHEAD_PITCH_NORMALIZED_COORDS MW(191:191) +#define NVC097_TEXHEAD_PITCH_RESERVED6Y MW(192:192) +#define NVC097_TEXHEAD_PITCH_TRILIN_OPT MW(197:193) +#define NVC097_TEXHEAD_PITCH_MIP_LOD_BIAS MW(210:198) +#define NVC097_TEXHEAD_PITCH_ANISO_BIAS MW(214:211) +#define NVC097_TEXHEAD_PITCH_ANISO_FINE_SPREAD_FUNC MW(216:215) +#define NVC097_TEXHEAD_PITCH_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVC097_TEXHEAD_PITCH_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVC097_TEXHEAD_PITCH_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVC097_TEXHEAD_PITCH_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVC097_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_FUNC MW(218:217) +#define NVC097_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVC097_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVC097_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVC097_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVC097_TEXHEAD_PITCH_MAX_ANISOTROPY MW(221:219) +#define NVC097_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_1_TO_1 0x00000000 +#define NVC097_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_2_TO_1 0x00000001 +#define NVC097_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_4_TO_1 0x00000002 +#define NVC097_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_6_TO_1 0x00000003 +#define NVC097_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_8_TO_1 0x00000004 +#define NVC097_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_10_TO_1 0x00000005 +#define NVC097_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_12_TO_1 0x00000006 +#define NVC097_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_16_TO_1 0x00000007 +#define NVC097_TEXHEAD_PITCH_ANISO_FINE_SPREAD_MODIFIER MW(223:222) +#define NVC097_TEXHEAD_PITCH_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVC097_TEXHEAD_PITCH_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVC097_TEXHEAD_PITCH_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVC097_TEXHEAD_PITCH_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVC097_TEXHEAD_PITCH_RES_VIEW_MIN_MIP_LEVEL MW(227:224) +#define NVC097_TEXHEAD_PITCH_RES_VIEW_MAX_MIP_LEVEL MW(231:228) +#define NVC097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT MW(235:232) +#define NVC097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_1X1 0x00000000 +#define NVC097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_2X1 0x00000001 +#define NVC097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_2X2 0x00000002 +#define NVC097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_4X2 0x00000003 +#define NVC097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_4X2_D3D 0x00000004 +#define NVC097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_2X1_D3D 0x00000005 +#define NVC097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_4X4 0x00000006 +#define NVC097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_2X2_VC_4 0x00000008 +#define NVC097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_2X2_VC_12 0x00000009 +#define NVC097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_4X2_VC_8 0x0000000a +#define NVC097_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_4X2_VC_24 0x0000000b +#define NVC097_TEXHEAD_PITCH_MIN_LOD_CLAMP MW(247:236) +#define NVC097_TEXHEAD_PITCH_RESERVED7Y MW(255:248) + + +/* +** Texture Header State Pitch Color Key + */ + +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS MW(6:0) +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R32_G32_B32_A32 0x00000001 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R32_G32_B32 0x00000002 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R16_G16_B16_A16 0x00000003 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R32_G32 0x00000004 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R32_B24G8 0x00000005 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_X8B8G8R8 0x00000007 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_A8B8G8R8 0x00000008 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_A2B10G10R10 0x00000009 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R16_G16 0x0000000c +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_G8R24 0x0000000d +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_G24R8 0x0000000e +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R32 0x0000000f +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_A4B4G4R4 0x00000012 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_A5B5G5R1 0x00000013 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_A1B5G5R5 0x00000014 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_B5G6R5 0x00000015 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_B6G5R5 0x00000016 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_G8R8 0x00000018 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R16 0x0000001b +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_Y8_VIDEO 0x0000001c +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R8 0x0000001d +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_G4R4 0x0000001e +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R1 0x0000001f +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_E5B9G9R9_SHAREDEXP 0x00000020 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_BF10GF11RF11 0x00000021 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_G8B8G8R8 0x00000022 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_B8G8R8G8 0x00000023 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_DXT1 0x00000024 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_DXT23 0x00000025 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_DXT45 0x00000026 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_DXN1 0x00000027 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_DXN2 0x00000028 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_BC6H_SF16 0x00000010 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_BC6H_UF16 0x00000011 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_BC7U 0x00000017 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ETC2_RGB 0x00000006 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ETC2_RGB_PTA 0x0000000a +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ETC2_RGBA 0x0000000b +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_EAC 0x00000019 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_EACX2 0x0000001a +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_Z24S8 0x00000029 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_X8Z24 0x0000002a +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_S8Z24 0x0000002b +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_X4V4Z24__COV4R4V 0x0000002c +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_X4V4Z24__COV8R8V 0x0000002d +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_V8Z24__COV4R12V 0x0000002e +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ZF32 0x0000002f +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ZF32_X24S8 0x00000030 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_X8Z24_X20V4S8__COV4R4V 0x00000031 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_X8Z24_X20V4S8__COV8R8V 0x00000032 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ZF32_X20V4X8__COV4R4V 0x00000033 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ZF32_X20V4X8__COV8R8V 0x00000034 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ZF32_X20V4S8__COV4R4V 0x00000035 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ZF32_X20V4S8__COV8R8V 0x00000036 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_X8Z24_X16V8S8__COV4R12V 0x00000037 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ZF32_X16V8X8__COV4R12V 0x00000038 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ZF32_X16V8S8__COV4R12V 0x00000039 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_Z16 0x0000003a +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_V8Z24__COV8R24V 0x0000003b +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_X8Z24_X16V8S8__COV8R24V 0x0000003c +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ZF32_X16V8X8__COV8R24V 0x0000003d +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ZF32_X16V8S8__COV8R24V 0x0000003e +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_4X4 0x00000040 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_5X4 0x00000050 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_5X5 0x00000041 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_6X5 0x00000051 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_6X6 0x00000042 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_8X5 0x00000055 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_8X6 0x00000052 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_8X8 0x00000044 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_10X5 0x00000056 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_10X6 0x00000057 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_10X8 0x00000053 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_10X10 0x00000045 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_12X10 0x00000054 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_12X12 0x00000046 +#define NVC097_TEXHEAD_PITCHCK_COMPONENTS_SIZES_CS_BITFIELD_SIZE 0x0000007f +#define NVC097_TEXHEAD_PITCHCK_R_DATA_TYPE MW(9:7) +#define NVC097_TEXHEAD_PITCHCK_R_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVC097_TEXHEAD_PITCHCK_R_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVC097_TEXHEAD_PITCHCK_R_DATA_TYPE_NUM_SINT 0x00000003 +#define NVC097_TEXHEAD_PITCHCK_R_DATA_TYPE_NUM_UINT 0x00000004 +#define NVC097_TEXHEAD_PITCHCK_R_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVC097_TEXHEAD_PITCHCK_R_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVC097_TEXHEAD_PITCHCK_R_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVC097_TEXHEAD_PITCHCK_G_DATA_TYPE MW(12:10) +#define NVC097_TEXHEAD_PITCHCK_G_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVC097_TEXHEAD_PITCHCK_G_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVC097_TEXHEAD_PITCHCK_G_DATA_TYPE_NUM_SINT 0x00000003 +#define NVC097_TEXHEAD_PITCHCK_G_DATA_TYPE_NUM_UINT 0x00000004 +#define NVC097_TEXHEAD_PITCHCK_G_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVC097_TEXHEAD_PITCHCK_G_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVC097_TEXHEAD_PITCHCK_G_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVC097_TEXHEAD_PITCHCK_B_DATA_TYPE MW(15:13) +#define NVC097_TEXHEAD_PITCHCK_B_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVC097_TEXHEAD_PITCHCK_B_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVC097_TEXHEAD_PITCHCK_B_DATA_TYPE_NUM_SINT 0x00000003 +#define NVC097_TEXHEAD_PITCHCK_B_DATA_TYPE_NUM_UINT 0x00000004 +#define NVC097_TEXHEAD_PITCHCK_B_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVC097_TEXHEAD_PITCHCK_B_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVC097_TEXHEAD_PITCHCK_B_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVC097_TEXHEAD_PITCHCK_A_DATA_TYPE MW(18:16) +#define NVC097_TEXHEAD_PITCHCK_A_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVC097_TEXHEAD_PITCHCK_A_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVC097_TEXHEAD_PITCHCK_A_DATA_TYPE_NUM_SINT 0x00000003 +#define NVC097_TEXHEAD_PITCHCK_A_DATA_TYPE_NUM_UINT 0x00000004 +#define NVC097_TEXHEAD_PITCHCK_A_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVC097_TEXHEAD_PITCHCK_A_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVC097_TEXHEAD_PITCHCK_A_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVC097_TEXHEAD_PITCHCK_X_SOURCE MW(21:19) +#define NVC097_TEXHEAD_PITCHCK_X_SOURCE_IN_ZERO 0x00000000 +#define NVC097_TEXHEAD_PITCHCK_X_SOURCE_IN_R 0x00000002 +#define NVC097_TEXHEAD_PITCHCK_X_SOURCE_IN_G 0x00000003 +#define NVC097_TEXHEAD_PITCHCK_X_SOURCE_IN_B 0x00000004 +#define NVC097_TEXHEAD_PITCHCK_X_SOURCE_IN_A 0x00000005 +#define NVC097_TEXHEAD_PITCHCK_X_SOURCE_IN_ONE_INT 0x00000006 +#define NVC097_TEXHEAD_PITCHCK_X_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVC097_TEXHEAD_PITCHCK_Y_SOURCE MW(24:22) +#define NVC097_TEXHEAD_PITCHCK_Y_SOURCE_IN_ZERO 0x00000000 +#define NVC097_TEXHEAD_PITCHCK_Y_SOURCE_IN_R 0x00000002 +#define NVC097_TEXHEAD_PITCHCK_Y_SOURCE_IN_G 0x00000003 +#define NVC097_TEXHEAD_PITCHCK_Y_SOURCE_IN_B 0x00000004 +#define NVC097_TEXHEAD_PITCHCK_Y_SOURCE_IN_A 0x00000005 +#define NVC097_TEXHEAD_PITCHCK_Y_SOURCE_IN_ONE_INT 0x00000006 +#define NVC097_TEXHEAD_PITCHCK_Y_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVC097_TEXHEAD_PITCHCK_Z_SOURCE MW(27:25) +#define NVC097_TEXHEAD_PITCHCK_Z_SOURCE_IN_ZERO 0x00000000 +#define NVC097_TEXHEAD_PITCHCK_Z_SOURCE_IN_R 0x00000002 +#define NVC097_TEXHEAD_PITCHCK_Z_SOURCE_IN_G 0x00000003 +#define NVC097_TEXHEAD_PITCHCK_Z_SOURCE_IN_B 0x00000004 +#define NVC097_TEXHEAD_PITCHCK_Z_SOURCE_IN_A 0x00000005 +#define NVC097_TEXHEAD_PITCHCK_Z_SOURCE_IN_ONE_INT 0x00000006 +#define NVC097_TEXHEAD_PITCHCK_Z_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVC097_TEXHEAD_PITCHCK_W_SOURCE MW(30:28) +#define NVC097_TEXHEAD_PITCHCK_W_SOURCE_IN_ZERO 0x00000000 +#define NVC097_TEXHEAD_PITCHCK_W_SOURCE_IN_R 0x00000002 +#define NVC097_TEXHEAD_PITCHCK_W_SOURCE_IN_G 0x00000003 +#define NVC097_TEXHEAD_PITCHCK_W_SOURCE_IN_B 0x00000004 +#define NVC097_TEXHEAD_PITCHCK_W_SOURCE_IN_A 0x00000005 +#define NVC097_TEXHEAD_PITCHCK_W_SOURCE_IN_ONE_INT 0x00000006 +#define NVC097_TEXHEAD_PITCHCK_W_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVC097_TEXHEAD_PITCHCK_PACK_COMPONENTS MW(31:31) +#define NVC097_TEXHEAD_PITCHCK_RESERVED1A MW(36:32) +#define NVC097_TEXHEAD_PITCHCK_ADDRESS_BITS31TO5 MW(63:37) +#define NVC097_TEXHEAD_PITCHCK_ADDRESS_BITS48TO32 MW(80:64) +#define NVC097_TEXHEAD_PITCHCK_RESERVED_ADDRESS MW(84:81) +#define NVC097_TEXHEAD_PITCHCK_HEADER_VERSION MW(87:85) +#define NVC097_TEXHEAD_PITCHCK_HEADER_VERSION_SELECT_ONE_D_BUFFER 0x00000000 +#define NVC097_TEXHEAD_PITCHCK_HEADER_VERSION_SELECT_PITCH_COLOR_KEY 0x00000001 +#define NVC097_TEXHEAD_PITCHCK_HEADER_VERSION_SELECT_PITCH 0x00000002 +#define NVC097_TEXHEAD_PITCHCK_HEADER_VERSION_SELECT_BLOCKLINEAR 0x00000003 +#define NVC097_TEXHEAD_PITCHCK_HEADER_VERSION_SELECT_BLOCKLINEAR_COLOR_KEY 0x00000004 +#define NVC097_TEXHEAD_PITCHCK_RESERVED_HEADER_VERSION MW(88:88) +#define NVC097_TEXHEAD_PITCHCK_RESOURCE_VIEW_COHERENCY_HASH MW(92:89) +#define NVC097_TEXHEAD_PITCHCK_RESERVED2A MW(95:93) +#define NVC097_TEXHEAD_PITCHCK_PITCH_BITS20TO5 MW(111:96) +#define NVC097_TEXHEAD_PITCHCK_LOD_ANISO_QUALITY2 MW(112:112) +#define NVC097_TEXHEAD_PITCHCK_LOD_ANISO_QUALITY MW(113:113) +#define NVC097_TEXHEAD_PITCHCK_LOD_ANISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVC097_TEXHEAD_PITCHCK_LOD_ANISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVC097_TEXHEAD_PITCHCK_LOD_ISO_QUALITY MW(114:114) +#define NVC097_TEXHEAD_PITCHCK_LOD_ISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVC097_TEXHEAD_PITCHCK_LOD_ISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVC097_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_MODIFIER MW(116:115) +#define NVC097_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVC097_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVC097_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVC097_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVC097_TEXHEAD_PITCHCK_ANISO_SPREAD_SCALE MW(121:117) +#define NVC097_TEXHEAD_PITCHCK_USE_HEADER_OPT_CONTROL MW(122:122) +#define NVC097_TEXHEAD_PITCHCK_DEPTH_TEXTURE MW(123:123) +#define NVC097_TEXHEAD_PITCHCK_MAX_MIP_LEVEL MW(127:124) +#define NVC097_TEXHEAD_PITCHCK_WIDTH_MINUS_ONE MW(144:128) +#define NVC097_TEXHEAD_PITCHCK_PITCH_BIT21 MW(145:145) +#define NVC097_TEXHEAD_PITCHCK_HEIGHT_MINUS_ONE_BIT16 MW(146:146) +#define NVC097_TEXHEAD_PITCHCK_ANISO_SPREAD_MAX_LOG2 MW(149:147) +#define NVC097_TEXHEAD_PITCHCK_S_R_G_B_CONVERSION MW(150:150) +#define NVC097_TEXHEAD_PITCHCK_TEXTURE_TYPE MW(154:151) +#define NVC097_TEXHEAD_PITCHCK_TEXTURE_TYPE_ONE_D 0x00000000 +#define NVC097_TEXHEAD_PITCHCK_TEXTURE_TYPE_TWO_D 0x00000001 +#define NVC097_TEXHEAD_PITCHCK_TEXTURE_TYPE_THREE_D 0x00000002 +#define NVC097_TEXHEAD_PITCHCK_TEXTURE_TYPE_CUBEMAP 0x00000003 +#define NVC097_TEXHEAD_PITCHCK_TEXTURE_TYPE_ONE_D_ARRAY 0x00000004 +#define NVC097_TEXHEAD_PITCHCK_TEXTURE_TYPE_TWO_D_ARRAY 0x00000005 +#define NVC097_TEXHEAD_PITCHCK_TEXTURE_TYPE_ONE_D_BUFFER 0x00000006 +#define NVC097_TEXHEAD_PITCHCK_TEXTURE_TYPE_TWO_D_NO_MIPMAP 0x00000007 +#define NVC097_TEXHEAD_PITCHCK_TEXTURE_TYPE_CUBEMAP_ARRAY 0x00000008 +#define NVC097_TEXHEAD_PITCHCK_TEXTURE_TYPE_TT_BIT_FIELD_SIZE 0x0000000f +#define NVC097_TEXHEAD_PITCHCK_SECTOR_PROMOTION MW(156:155) +#define NVC097_TEXHEAD_PITCHCK_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVC097_TEXHEAD_PITCHCK_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVC097_TEXHEAD_PITCHCK_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVC097_TEXHEAD_PITCHCK_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVC097_TEXHEAD_PITCHCK_BORDER_SIZE MW(159:157) +#define NVC097_TEXHEAD_PITCHCK_BORDER_SIZE_BORDER_SIZE_ONE 0x00000000 +#define NVC097_TEXHEAD_PITCHCK_BORDER_SIZE_BORDER_SIZE_TWO 0x00000001 +#define NVC097_TEXHEAD_PITCHCK_BORDER_SIZE_BORDER_SIZE_FOUR 0x00000002 +#define NVC097_TEXHEAD_PITCHCK_BORDER_SIZE_BORDER_SIZE_EIGHT 0x00000003 +#define NVC097_TEXHEAD_PITCHCK_BORDER_SIZE_BORDER_SAMPLER_COLOR 0x00000007 +#define NVC097_TEXHEAD_PITCHCK_HEIGHT_MINUS_ONE MW(175:160) +#define NVC097_TEXHEAD_PITCHCK_DEPTH_MINUS_ONE MW(189:176) +#define NVC097_TEXHEAD_PITCHCK_RESERVED5A MW(190:190) +#define NVC097_TEXHEAD_PITCHCK_NORMALIZED_COORDS MW(191:191) +#define NVC097_TEXHEAD_PITCHCK_COLOR_KEY_OP MW(192:192) +#define NVC097_TEXHEAD_PITCHCK_TRILIN_OPT MW(197:193) +#define NVC097_TEXHEAD_PITCHCK_MIP_LOD_BIAS MW(210:198) +#define NVC097_TEXHEAD_PITCHCK_ANISO_BIAS MW(214:211) +#define NVC097_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_FUNC MW(216:215) +#define NVC097_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVC097_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVC097_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVC097_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVC097_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_FUNC MW(218:217) +#define NVC097_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVC097_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVC097_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVC097_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVC097_TEXHEAD_PITCHCK_MAX_ANISOTROPY MW(221:219) +#define NVC097_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_1_TO_1 0x00000000 +#define NVC097_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_2_TO_1 0x00000001 +#define NVC097_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_4_TO_1 0x00000002 +#define NVC097_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_6_TO_1 0x00000003 +#define NVC097_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_8_TO_1 0x00000004 +#define NVC097_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_10_TO_1 0x00000005 +#define NVC097_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_12_TO_1 0x00000006 +#define NVC097_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_16_TO_1 0x00000007 +#define NVC097_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_MODIFIER MW(223:222) +#define NVC097_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVC097_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVC097_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVC097_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVC097_TEXHEAD_PITCHCK_COLOR_KEY_VALUE MW(255:224) + + +/* +** Texture Sampler State + */ + +#define NVC097_TEXSAMP0_ADDRESS_U 2:0 +#define NVC097_TEXSAMP0_ADDRESS_U_WRAP 0x00000000 +#define NVC097_TEXSAMP0_ADDRESS_U_MIRROR 0x00000001 +#define NVC097_TEXSAMP0_ADDRESS_U_CLAMP_TO_EDGE 0x00000002 +#define NVC097_TEXSAMP0_ADDRESS_U_BORDER 0x00000003 +#define NVC097_TEXSAMP0_ADDRESS_U_CLAMP_OGL 0x00000004 +#define NVC097_TEXSAMP0_ADDRESS_U_MIRROR_ONCE_CLAMP_TO_EDGE 0x00000005 +#define NVC097_TEXSAMP0_ADDRESS_U_MIRROR_ONCE_BORDER 0x00000006 +#define NVC097_TEXSAMP0_ADDRESS_U_MIRROR_ONCE_CLAMP_OGL 0x00000007 +#define NVC097_TEXSAMP0_ADDRESS_V 5:3 +#define NVC097_TEXSAMP0_ADDRESS_V_WRAP 0x00000000 +#define NVC097_TEXSAMP0_ADDRESS_V_MIRROR 0x00000001 +#define NVC097_TEXSAMP0_ADDRESS_V_CLAMP_TO_EDGE 0x00000002 +#define NVC097_TEXSAMP0_ADDRESS_V_BORDER 0x00000003 +#define NVC097_TEXSAMP0_ADDRESS_V_CLAMP_OGL 0x00000004 +#define NVC097_TEXSAMP0_ADDRESS_V_MIRROR_ONCE_CLAMP_TO_EDGE 0x00000005 +#define NVC097_TEXSAMP0_ADDRESS_V_MIRROR_ONCE_BORDER 0x00000006 +#define NVC097_TEXSAMP0_ADDRESS_V_MIRROR_ONCE_CLAMP_OGL 0x00000007 +#define NVC097_TEXSAMP0_ADDRESS_P 8:6 +#define NVC097_TEXSAMP0_ADDRESS_P_WRAP 0x00000000 +#define NVC097_TEXSAMP0_ADDRESS_P_MIRROR 0x00000001 +#define NVC097_TEXSAMP0_ADDRESS_P_CLAMP_TO_EDGE 0x00000002 +#define NVC097_TEXSAMP0_ADDRESS_P_BORDER 0x00000003 +#define NVC097_TEXSAMP0_ADDRESS_P_CLAMP_OGL 0x00000004 +#define NVC097_TEXSAMP0_ADDRESS_P_MIRROR_ONCE_CLAMP_TO_EDGE 0x00000005 +#define NVC097_TEXSAMP0_ADDRESS_P_MIRROR_ONCE_BORDER 0x00000006 +#define NVC097_TEXSAMP0_ADDRESS_P_MIRROR_ONCE_CLAMP_OGL 0x00000007 +#define NVC097_TEXSAMP0_DEPTH_COMPARE 9:9 +#define NVC097_TEXSAMP0_DEPTH_COMPARE_FUNC 12:10 +#define NVC097_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_NEVER 0x00000000 +#define NVC097_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_LESS 0x00000001 +#define NVC097_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_EQUAL 0x00000002 +#define NVC097_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_LEQUAL 0x00000003 +#define NVC097_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_GREATER 0x00000004 +#define NVC097_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_NOTEQUAL 0x00000005 +#define NVC097_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_GEQUAL 0x00000006 +#define NVC097_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_ALWAYS 0x00000007 +#define NVC097_TEXSAMP0_S_R_G_B_CONVERSION 13:13 +#define NVC097_TEXSAMP0_RESERVED0A 16:14 +#define NVC097_TEXSAMP0_RESERVED0B 19:17 +#define NVC097_TEXSAMP0_MAX_ANISOTROPY 22:20 +#define NVC097_TEXSAMP0_MAX_ANISOTROPY_ANISO_1_TO_1 0x00000000 +#define NVC097_TEXSAMP0_MAX_ANISOTROPY_ANISO_2_TO_1 0x00000001 +#define NVC097_TEXSAMP0_MAX_ANISOTROPY_ANISO_4_TO_1 0x00000002 +#define NVC097_TEXSAMP0_MAX_ANISOTROPY_ANISO_6_TO_1 0x00000003 +#define NVC097_TEXSAMP0_MAX_ANISOTROPY_ANISO_8_TO_1 0x00000004 +#define NVC097_TEXSAMP0_MAX_ANISOTROPY_ANISO_10_TO_1 0x00000005 +#define NVC097_TEXSAMP0_MAX_ANISOTROPY_ANISO_12_TO_1 0x00000006 +#define NVC097_TEXSAMP0_MAX_ANISOTROPY_ANISO_16_TO_1 0x00000007 +#define NVC097_TEXSAMP1_MAG_FILTER 2:0 +#define NVC097_TEXSAMP1_MAG_FILTER_MAG_POINT 0x00000001 +#define NVC097_TEXSAMP1_MAG_FILTER_MAG_LINEAR 0x00000002 +#define NVC097_TEXSAMP1_MAG_FILTER_VCAA_4_TAP 0x00000003 +#define NVC097_TEXSAMP1_MAG_FILTER_VCAA_8_TAP 0x00000004 +#define NVC097_TEXSAMP1_MIN_LOD_CLAMP_BEHAVIOR_FOR_NEAREST_MIP 3:3 +#define NVC097_TEXSAMP1_MIN_LOD_CLAMP_BEHAVIOR_FOR_NEAREST_MIP_INTEGER_AND_FRACTION 0x00000000 +#define NVC097_TEXSAMP1_MIN_LOD_CLAMP_BEHAVIOR_FOR_NEAREST_MIP_INTEGER_ONLY 0x00000001 +#define NVC097_TEXSAMP1_MIN_FILTER 5:4 +#define NVC097_TEXSAMP1_MIN_FILTER_MIN_POINT 0x00000001 +#define NVC097_TEXSAMP1_MIN_FILTER_MIN_LINEAR 0x00000002 +#define NVC097_TEXSAMP1_MIN_FILTER_MIN_ANISO 0x00000003 +#define NVC097_TEXSAMP1_MIP_FILTER 7:6 +#define NVC097_TEXSAMP1_MIP_FILTER_MIP_NONE 0x00000001 +#define NVC097_TEXSAMP1_MIP_FILTER_MIP_POINT 0x00000002 +#define NVC097_TEXSAMP1_MIP_FILTER_MIP_LINEAR 0x00000003 +#define NVC097_TEXSAMP1_CUBEMAP_INTERFACE_FILTERING 9:8 +#define NVC097_TEXSAMP1_CUBEMAP_INTERFACE_FILTERING_USE_WRAP 0x00000000 +#define NVC097_TEXSAMP1_CUBEMAP_INTERFACE_FILTERING_OVERRIDE_WRAP 0x00000001 +#define NVC097_TEXSAMP1_CUBEMAP_INTERFACE_FILTERING_AUTO_SPAN_SEAM 0x00000002 +#define NVC097_TEXSAMP1_CUBEMAP_INTERFACE_FILTERING_AUTO_CROSS_SEAM 0x00000003 +#define NVC097_TEXSAMP1_REDUCTION_FILTER 11:10 +#define NVC097_TEXSAMP1_REDUCTION_FILTER_RED_NONE 0x00000000 +#define NVC097_TEXSAMP1_REDUCTION_FILTER_RED_MINIMUM 0x00000001 +#define NVC097_TEXSAMP1_REDUCTION_FILTER_RED_MAXIMUM 0x00000002 +#define NVC097_TEXSAMP1_MIP_LOD_BIAS 24:12 +#define NVC097_TEXSAMP1_FLOAT_COORD_NORMALIZATION 25:25 +#define NVC097_TEXSAMP1_FLOAT_COORD_NORMALIZATION_USE_HEADER_SETTING 0x00000000 +#define NVC097_TEXSAMP1_FLOAT_COORD_NORMALIZATION_FORCE_UNNORMALIZED_COORDS 0x00000001 +#define NVC097_TEXSAMP1_TRILIN_OPT 30:26 +#define NVC097_TEXSAMP2_MIN_LOD_CLAMP 11:0 +#define NVC097_TEXSAMP2_MAX_LOD_CLAMP 23:12 +#define NVC097_TEXSAMP2_S_R_G_B_BORDER_COLOR_R 31:24 +#define NVC097_TEXSAMP3_RESERVED12 11:0 +#define NVC097_TEXSAMP3_S_R_G_B_BORDER_COLOR_G 19:12 +#define NVC097_TEXSAMP3_S_R_G_B_BORDER_COLOR_B 27:20 +#define NVC097_TEXSAMP4_BORDER_COLOR_R 31:0 +#define NVC097_TEXSAMP5_BORDER_COLOR_G 31:0 +#define NVC097_TEXSAMP6_BORDER_COLOR_B 31:0 +#define NVC097_TEXSAMP7_BORDER_COLOR_A 31:0 + + + +#endif // #ifndef __CLC097TEX_H__ diff --git a/src/common/sdk/nvidia/inc/class/clc197.h b/src/common/sdk/nvidia/inc/class/clc197.h new file mode 100644 index 0000000..b03d735 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc197.h @@ -0,0 +1,4242 @@ +/******************************************************************************* + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef _cl_pascal_b_h_ +#define _cl_pascal_b_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../../../class/bin/sw_header.pl pascal_b */ + +#include "nvtypes.h" + +#define PASCAL_B 0xC197 + +#define NVC197_SET_OBJECT 0x0000 +#define NVC197_SET_OBJECT_CLASS_ID 15:0 +#define NVC197_SET_OBJECT_ENGINE_ID 20:16 + +#define NVC197_NO_OPERATION 0x0100 +#define NVC197_NO_OPERATION_V 31:0 + +#define NVC197_SET_NOTIFY_A 0x0104 +#define NVC197_SET_NOTIFY_A_ADDRESS_UPPER 7:0 + +#define NVC197_SET_NOTIFY_B 0x0108 +#define NVC197_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NVC197_NOTIFY 0x010c +#define NVC197_NOTIFY_TYPE 31:0 +#define NVC197_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NVC197_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NVC197_WAIT_FOR_IDLE 0x0110 +#define NVC197_WAIT_FOR_IDLE_V 31:0 + +#define NVC197_LOAD_MME_INSTRUCTION_RAM_POINTER 0x0114 +#define NVC197_LOAD_MME_INSTRUCTION_RAM_POINTER_V 31:0 + +#define NVC197_LOAD_MME_INSTRUCTION_RAM 0x0118 +#define NVC197_LOAD_MME_INSTRUCTION_RAM_V 31:0 + +#define NVC197_LOAD_MME_START_ADDRESS_RAM_POINTER 0x011c +#define NVC197_LOAD_MME_START_ADDRESS_RAM_POINTER_V 31:0 + +#define NVC197_LOAD_MME_START_ADDRESS_RAM 0x0120 +#define NVC197_LOAD_MME_START_ADDRESS_RAM_V 31:0 + +#define NVC197_SET_MME_SHADOW_RAM_CONTROL 0x0124 +#define NVC197_SET_MME_SHADOW_RAM_CONTROL_MODE 1:0 +#define NVC197_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK 0x00000000 +#define NVC197_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK_WITH_FILTER 0x00000001 +#define NVC197_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_PASSTHROUGH 0x00000002 +#define NVC197_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_REPLAY 0x00000003 + +#define NVC197_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER 0x0128 +#define NVC197_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER_V 7:0 + +#define NVC197_PEER_SEMAPHORE_RELEASE_OFFSET 0x012c +#define NVC197_PEER_SEMAPHORE_RELEASE_OFFSET_V 31:0 + +#define NVC197_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NVC197_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC197_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NVC197_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC197_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NVC197_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NVC197_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC197_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC197_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC197_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC197_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC197_SEND_GO_IDLE 0x013c +#define NVC197_SEND_GO_IDLE_V 31:0 + +#define NVC197_PM_TRIGGER 0x0140 +#define NVC197_PM_TRIGGER_V 31:0 + +#define NVC197_PM_TRIGGER_WFI 0x0144 +#define NVC197_PM_TRIGGER_WFI_V 31:0 + +#define NVC197_FE_ATOMIC_SEQUENCE_BEGIN 0x0148 +#define NVC197_FE_ATOMIC_SEQUENCE_BEGIN_V 31:0 + +#define NVC197_FE_ATOMIC_SEQUENCE_END 0x014c +#define NVC197_FE_ATOMIC_SEQUENCE_END_V 31:0 + +#define NVC197_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NVC197_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NVC197_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NVC197_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NVC197_LINE_LENGTH_IN 0x0180 +#define NVC197_LINE_LENGTH_IN_VALUE 31:0 + +#define NVC197_LINE_COUNT 0x0184 +#define NVC197_LINE_COUNT_VALUE 31:0 + +#define NVC197_OFFSET_OUT_UPPER 0x0188 +#define NVC197_OFFSET_OUT_UPPER_VALUE 7:0 + +#define NVC197_OFFSET_OUT 0x018c +#define NVC197_OFFSET_OUT_VALUE 31:0 + +#define NVC197_PITCH_OUT 0x0190 +#define NVC197_PITCH_OUT_VALUE 31:0 + +#define NVC197_SET_DST_BLOCK_SIZE 0x0194 +#define NVC197_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVC197_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC197_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVC197_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC197_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC197_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC197_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC197_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC197_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC197_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVC197_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NVC197_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NVC197_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NVC197_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NVC197_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVC197_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NVC197_SET_DST_WIDTH 0x0198 +#define NVC197_SET_DST_WIDTH_V 31:0 + +#define NVC197_SET_DST_HEIGHT 0x019c +#define NVC197_SET_DST_HEIGHT_V 31:0 + +#define NVC197_SET_DST_DEPTH 0x01a0 +#define NVC197_SET_DST_DEPTH_V 31:0 + +#define NVC197_SET_DST_LAYER 0x01a4 +#define NVC197_SET_DST_LAYER_V 31:0 + +#define NVC197_SET_DST_ORIGIN_BYTES_X 0x01a8 +#define NVC197_SET_DST_ORIGIN_BYTES_X_V 20:0 + +#define NVC197_SET_DST_ORIGIN_SAMPLES_Y 0x01ac +#define NVC197_SET_DST_ORIGIN_SAMPLES_Y_V 16:0 + +#define NVC197_LAUNCH_DMA 0x01b0 +#define NVC197_LAUNCH_DMA_DST_MEMORY_LAYOUT 0:0 +#define NVC197_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVC197_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVC197_LAUNCH_DMA_COMPLETION_TYPE 5:4 +#define NVC197_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_DISABLE 0x00000000 +#define NVC197_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_ONLY 0x00000001 +#define NVC197_LAUNCH_DMA_COMPLETION_TYPE_RELEASE_SEMAPHORE 0x00000002 +#define NVC197_LAUNCH_DMA_INTERRUPT_TYPE 9:8 +#define NVC197_LAUNCH_DMA_INTERRUPT_TYPE_NONE 0x00000000 +#define NVC197_LAUNCH_DMA_INTERRUPT_TYPE_INTERRUPT 0x00000001 +#define NVC197_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE 12:12 +#define NVC197_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_FOUR_WORDS 0x00000000 +#define NVC197_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_ONE_WORD 0x00000001 +#define NVC197_LAUNCH_DMA_REDUCTION_ENABLE 1:1 +#define NVC197_LAUNCH_DMA_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC197_LAUNCH_DMA_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC197_LAUNCH_DMA_REDUCTION_OP 15:13 +#define NVC197_LAUNCH_DMA_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC197_LAUNCH_DMA_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC197_LAUNCH_DMA_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC197_LAUNCH_DMA_REDUCTION_OP_RED_INC 0x00000003 +#define NVC197_LAUNCH_DMA_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC197_LAUNCH_DMA_REDUCTION_OP_RED_AND 0x00000005 +#define NVC197_LAUNCH_DMA_REDUCTION_OP_RED_OR 0x00000006 +#define NVC197_LAUNCH_DMA_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC197_LAUNCH_DMA_REDUCTION_FORMAT 3:2 +#define NVC197_LAUNCH_DMA_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC197_LAUNCH_DMA_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVC197_LAUNCH_DMA_SYSMEMBAR_DISABLE 6:6 +#define NVC197_LAUNCH_DMA_SYSMEMBAR_DISABLE_FALSE 0x00000000 +#define NVC197_LAUNCH_DMA_SYSMEMBAR_DISABLE_TRUE 0x00000001 + +#define NVC197_LOAD_INLINE_DATA 0x01b4 +#define NVC197_LOAD_INLINE_DATA_V 31:0 + +#define NVC197_SET_I2M_SEMAPHORE_A 0x01dc +#define NVC197_SET_I2M_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC197_SET_I2M_SEMAPHORE_B 0x01e0 +#define NVC197_SET_I2M_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC197_SET_I2M_SEMAPHORE_C 0x01e4 +#define NVC197_SET_I2M_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC197_SET_I2M_SPARE_NOOP00 0x01f0 +#define NVC197_SET_I2M_SPARE_NOOP00_V 31:0 + +#define NVC197_SET_I2M_SPARE_NOOP01 0x01f4 +#define NVC197_SET_I2M_SPARE_NOOP01_V 31:0 + +#define NVC197_SET_I2M_SPARE_NOOP02 0x01f8 +#define NVC197_SET_I2M_SPARE_NOOP02_V 31:0 + +#define NVC197_SET_I2M_SPARE_NOOP03 0x01fc +#define NVC197_SET_I2M_SPARE_NOOP03_V 31:0 + +#define NVC197_RUN_DS_NOW 0x0200 +#define NVC197_RUN_DS_NOW_V 31:0 + +#define NVC197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS 0x0204 +#define NVC197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD 4:0 +#define NVC197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_INSTANTANEOUS 0x00000000 +#define NVC197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16 0x00000001 +#define NVC197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32 0x00000002 +#define NVC197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__64 0x00000003 +#define NVC197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__128 0x00000004 +#define NVC197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__256 0x00000005 +#define NVC197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__512 0x00000006 +#define NVC197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1024 0x00000007 +#define NVC197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2048 0x00000008 +#define NVC197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4096 0x00000009 +#define NVC197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__8192 0x0000000A +#define NVC197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16384 0x0000000B +#define NVC197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32768 0x0000000C +#define NVC197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__65536 0x0000000D +#define NVC197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__131072 0x0000000E +#define NVC197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__262144 0x0000000F +#define NVC197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__524288 0x00000010 +#define NVC197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1048576 0x00000011 +#define NVC197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2097152 0x00000012 +#define NVC197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4194304 0x00000013 +#define NVC197_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_LATEZ_ALWAYS 0x0000001F + +#define NVC197_SET_GS_MODE 0x0208 +#define NVC197_SET_GS_MODE_TYPE 0:0 +#define NVC197_SET_GS_MODE_TYPE_ANY 0x00000000 +#define NVC197_SET_GS_MODE_TYPE_FAST_GS 0x00000001 + +#define NVC197_SET_ALIASED_LINE_WIDTH_ENABLE 0x020c +#define NVC197_SET_ALIASED_LINE_WIDTH_ENABLE_V 0:0 +#define NVC197_SET_ALIASED_LINE_WIDTH_ENABLE_V_FALSE 0x00000000 +#define NVC197_SET_ALIASED_LINE_WIDTH_ENABLE_V_TRUE 0x00000001 + +#define NVC197_SET_API_MANDATED_EARLY_Z 0x0210 +#define NVC197_SET_API_MANDATED_EARLY_Z_ENABLE 0:0 +#define NVC197_SET_API_MANDATED_EARLY_Z_ENABLE_FALSE 0x00000000 +#define NVC197_SET_API_MANDATED_EARLY_Z_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_GS_DM_FIFO 0x0214 +#define NVC197_SET_GS_DM_FIFO_SIZE_RASTER_ON 12:0 +#define NVC197_SET_GS_DM_FIFO_SIZE_RASTER_OFF 28:16 +#define NVC197_SET_GS_DM_FIFO_SPILL_ENABLED 31:31 +#define NVC197_SET_GS_DM_FIFO_SPILL_ENABLED_FALSE 0x00000000 +#define NVC197_SET_GS_DM_FIFO_SPILL_ENABLED_TRUE 0x00000001 + +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS 0x0218 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY 5:4 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC197_INVALIDATE_SHADER_CACHES 0x021c +#define NVC197_INVALIDATE_SHADER_CACHES_INSTRUCTION 0:0 +#define NVC197_INVALIDATE_SHADER_CACHES_INSTRUCTION_FALSE 0x00000000 +#define NVC197_INVALIDATE_SHADER_CACHES_INSTRUCTION_TRUE 0x00000001 +#define NVC197_INVALIDATE_SHADER_CACHES_DATA 4:4 +#define NVC197_INVALIDATE_SHADER_CACHES_DATA_FALSE 0x00000000 +#define NVC197_INVALIDATE_SHADER_CACHES_DATA_TRUE 0x00000001 +#define NVC197_INVALIDATE_SHADER_CACHES_CONSTANT 12:12 +#define NVC197_INVALIDATE_SHADER_CACHES_CONSTANT_FALSE 0x00000000 +#define NVC197_INVALIDATE_SHADER_CACHES_CONSTANT_TRUE 0x00000001 +#define NVC197_INVALIDATE_SHADER_CACHES_LOCKS 1:1 +#define NVC197_INVALIDATE_SHADER_CACHES_LOCKS_FALSE 0x00000000 +#define NVC197_INVALIDATE_SHADER_CACHES_LOCKS_TRUE 0x00000001 +#define NVC197_INVALIDATE_SHADER_CACHES_FLUSH_DATA 2:2 +#define NVC197_INVALIDATE_SHADER_CACHES_FLUSH_DATA_FALSE 0x00000000 +#define NVC197_INVALIDATE_SHADER_CACHES_FLUSH_DATA_TRUE 0x00000001 + +#define NVC197_SET_INSTANCE_COUNT 0x0220 +#define NVC197_SET_INSTANCE_COUNT_V 31:0 + +#define NVC197_SET_POSITION_W_SCALED_OFFSET_ENABLE 0x0224 +#define NVC197_SET_POSITION_W_SCALED_OFFSET_ENABLE_ENABLE 0:0 +#define NVC197_SET_POSITION_W_SCALED_OFFSET_ENABLE_ENABLE_FALSE 0x00000000 +#define NVC197_SET_POSITION_W_SCALED_OFFSET_ENABLE_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_GO_IDLE_TIMEOUT 0x022c +#define NVC197_SET_GO_IDLE_TIMEOUT_V 31:0 + +#define NVC197_INCREMENT_SYNC_POINT 0x02c8 +#define NVC197_INCREMENT_SYNC_POINT_INDEX 11:0 +#define NVC197_INCREMENT_SYNC_POINT_CLEAN_L2 16:16 +#define NVC197_INCREMENT_SYNC_POINT_CLEAN_L2_FALSE 0x00000000 +#define NVC197_INCREMENT_SYNC_POINT_CLEAN_L2_TRUE 0x00000001 +#define NVC197_INCREMENT_SYNC_POINT_CONDITION 20:20 +#define NVC197_INCREMENT_SYNC_POINT_CONDITION_STREAM_OUT_WRITES_DONE 0x00000000 +#define NVC197_INCREMENT_SYNC_POINT_CONDITION_ROP_WRITES_DONE 0x00000001 + +#define NVC197_SET_PRIM_CIRCULAR_BUFFER_THROTTLE 0x02d0 +#define NVC197_SET_PRIM_CIRCULAR_BUFFER_THROTTLE_PRIM_AREA 21:0 + +#define NVC197_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE 0x02d4 +#define NVC197_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE_V 0:0 + +#define NVC197_SET_SURFACE_CLIP_ID_BLOCK_SIZE 0x02d8 +#define NVC197_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH 3:0 +#define NVC197_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC197_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT 7:4 +#define NVC197_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC197_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC197_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC197_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC197_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC197_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC197_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH 11:8 +#define NVC197_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NVC197_SET_ALPHA_CIRCULAR_BUFFER_SIZE 0x02dc +#define NVC197_SET_ALPHA_CIRCULAR_BUFFER_SIZE_CACHE_LINES_PER_SM 13:0 + +#define NVC197_DECOMPRESS_SURFACE 0x02e0 +#define NVC197_DECOMPRESS_SURFACE_MRT_SELECT 2:0 +#define NVC197_DECOMPRESS_SURFACE_RT_ARRAY_INDEX 19:4 + +#define NVC197_SET_ZCULL_ROP_BYPASS 0x02e4 +#define NVC197_SET_ZCULL_ROP_BYPASS_ENABLE 0:0 +#define NVC197_SET_ZCULL_ROP_BYPASS_ENABLE_FALSE 0x00000000 +#define NVC197_SET_ZCULL_ROP_BYPASS_ENABLE_TRUE 0x00000001 +#define NVC197_SET_ZCULL_ROP_BYPASS_NO_STALL 4:4 +#define NVC197_SET_ZCULL_ROP_BYPASS_NO_STALL_FALSE 0x00000000 +#define NVC197_SET_ZCULL_ROP_BYPASS_NO_STALL_TRUE 0x00000001 +#define NVC197_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING 8:8 +#define NVC197_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING_FALSE 0x00000000 +#define NVC197_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING_TRUE 0x00000001 +#define NVC197_SET_ZCULL_ROP_BYPASS_THRESHOLD 15:12 + +#define NVC197_SET_ZCULL_SUBREGION 0x02e8 +#define NVC197_SET_ZCULL_SUBREGION_ENABLE 0:0 +#define NVC197_SET_ZCULL_SUBREGION_ENABLE_FALSE 0x00000000 +#define NVC197_SET_ZCULL_SUBREGION_ENABLE_TRUE 0x00000001 +#define NVC197_SET_ZCULL_SUBREGION_NORMALIZED_ALIQUOTS 27:4 + +#define NVC197_SET_RASTER_BOUNDING_BOX 0x02ec +#define NVC197_SET_RASTER_BOUNDING_BOX_MODE 0:0 +#define NVC197_SET_RASTER_BOUNDING_BOX_MODE_BOUNDING_BOX 0x00000000 +#define NVC197_SET_RASTER_BOUNDING_BOX_MODE_FULL_VIEWPORT 0x00000001 +#define NVC197_SET_RASTER_BOUNDING_BOX_PAD 11:4 + +#define NVC197_PEER_SEMAPHORE_RELEASE 0x02f0 +#define NVC197_PEER_SEMAPHORE_RELEASE_V 31:0 + +#define NVC197_SET_ITERATED_BLEND_OPTIMIZATION 0x02f4 +#define NVC197_SET_ITERATED_BLEND_OPTIMIZATION_NOOP 1:0 +#define NVC197_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_NEVER 0x00000000 +#define NVC197_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_RGBA_0000 0x00000001 +#define NVC197_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_ALPHA_0 0x00000002 +#define NVC197_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_RGBA_0001 0x00000003 + +#define NVC197_SET_ZCULL_SUBREGION_ALLOCATION 0x02f8 +#define NVC197_SET_ZCULL_SUBREGION_ALLOCATION_SUBREGION_ID 7:0 +#define NVC197_SET_ZCULL_SUBREGION_ALLOCATION_ALIQUOTS 23:8 +#define NVC197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT 27:24 +#define NVC197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16X2_4X4 0x00000000 +#define NVC197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X16_4X4 0x00000001 +#define NVC197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_4X2 0x00000002 +#define NVC197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_2X4 0x00000003 +#define NVC197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X8_4X4 0x00000004 +#define NVC197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_8X8_4X2 0x00000005 +#define NVC197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_8X8_2X4 0x00000006 +#define NVC197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_4X8 0x00000007 +#define NVC197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_4X8_2X2 0x00000008 +#define NVC197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X8_4X2 0x00000009 +#define NVC197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X8_2X4 0x0000000A +#define NVC197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_8X8_2X2 0x0000000B +#define NVC197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_4X8_1X1 0x0000000C +#define NVC197_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_NONE 0x0000000F + +#define NVC197_ASSIGN_ZCULL_SUBREGIONS 0x02fc +#define NVC197_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM 1:0 +#define NVC197_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM_Static 0x00000000 +#define NVC197_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM_Adaptive 0x00000001 + +#define NVC197_SET_PS_OUTPUT_SAMPLE_MASK_USAGE 0x0300 +#define NVC197_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE 0:0 +#define NVC197_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_FALSE 0x00000000 +#define NVC197_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_TRUE 0x00000001 +#define NVC197_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE 1:1 +#define NVC197_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NVC197_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 + +#define NVC197_DRAW_ZERO_INDEX 0x0304 +#define NVC197_DRAW_ZERO_INDEX_COUNT 31:0 + +#define NVC197_SET_L1_CONFIGURATION 0x0308 +#define NVC197_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY 2:0 +#define NVC197_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_16KB 0x00000001 +#define NVC197_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_48KB 0x00000003 + +#define NVC197_SET_RENDER_ENABLE_CONTROL 0x030c +#define NVC197_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER 0:0 +#define NVC197_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_FALSE 0x00000000 +#define NVC197_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_TRUE 0x00000001 + +#define NVC197_SET_SPA_VERSION 0x0310 +#define NVC197_SET_SPA_VERSION_MINOR 7:0 +#define NVC197_SET_SPA_VERSION_MAJOR 15:8 + +#define NVC197_SET_IEEE_CLEAN_UPDATE 0x0314 +#define NVC197_SET_IEEE_CLEAN_UPDATE_ENABLE 0:0 +#define NVC197_SET_IEEE_CLEAN_UPDATE_ENABLE_FALSE 0x00000000 +#define NVC197_SET_IEEE_CLEAN_UPDATE_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_SNAP_GRID_LINE 0x0318 +#define NVC197_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NVC197_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NVC197_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NVC197_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NVC197_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NVC197_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NVC197_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NVC197_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NVC197_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NVC197_SET_SNAP_GRID_LINE_ROUNDING_MODE 8:8 +#define NVC197_SET_SNAP_GRID_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NVC197_SET_SNAP_GRID_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NVC197_SET_SNAP_GRID_NON_LINE 0x031c +#define NVC197_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NVC197_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NVC197_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NVC197_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NVC197_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NVC197_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NVC197_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NVC197_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NVC197_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NVC197_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE 8:8 +#define NVC197_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NVC197_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NVC197_SET_TESSELLATION_PARAMETERS 0x0320 +#define NVC197_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE 1:0 +#define NVC197_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_ISOLINE 0x00000000 +#define NVC197_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_TRIANGLE 0x00000001 +#define NVC197_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_QUAD 0x00000002 +#define NVC197_SET_TESSELLATION_PARAMETERS_SPACING 5:4 +#define NVC197_SET_TESSELLATION_PARAMETERS_SPACING_INTEGER 0x00000000 +#define NVC197_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_ODD 0x00000001 +#define NVC197_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_EVEN 0x00000002 +#define NVC197_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES 9:8 +#define NVC197_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_POINTS 0x00000000 +#define NVC197_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_LINES 0x00000001 +#define NVC197_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CW 0x00000002 +#define NVC197_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CCW 0x00000003 + +#define NVC197_SET_TESSELLATION_LOD_U0_OR_DENSITY 0x0324 +#define NVC197_SET_TESSELLATION_LOD_U0_OR_DENSITY_V 31:0 + +#define NVC197_SET_TESSELLATION_LOD_V0_OR_DETAIL 0x0328 +#define NVC197_SET_TESSELLATION_LOD_V0_OR_DETAIL_V 31:0 + +#define NVC197_SET_TESSELLATION_LOD_U1_OR_W0 0x032c +#define NVC197_SET_TESSELLATION_LOD_U1_OR_W0_V 31:0 + +#define NVC197_SET_TESSELLATION_LOD_V1 0x0330 +#define NVC197_SET_TESSELLATION_LOD_V1_V 31:0 + +#define NVC197_SET_TG_LOD_INTERIOR_U 0x0334 +#define NVC197_SET_TG_LOD_INTERIOR_U_V 31:0 + +#define NVC197_SET_TG_LOD_INTERIOR_V 0x0338 +#define NVC197_SET_TG_LOD_INTERIOR_V_V 31:0 + +#define NVC197_RESERVED_TG07 0x033c +#define NVC197_RESERVED_TG07_V 0:0 + +#define NVC197_RESERVED_TG08 0x0340 +#define NVC197_RESERVED_TG08_V 0:0 + +#define NVC197_RESERVED_TG09 0x0344 +#define NVC197_RESERVED_TG09_V 0:0 + +#define NVC197_RESERVED_TG10 0x0348 +#define NVC197_RESERVED_TG10_V 0:0 + +#define NVC197_RESERVED_TG11 0x034c +#define NVC197_RESERVED_TG11_V 0:0 + +#define NVC197_RESERVED_TG12 0x0350 +#define NVC197_RESERVED_TG12_V 0:0 + +#define NVC197_RESERVED_TG13 0x0354 +#define NVC197_RESERVED_TG13_V 0:0 + +#define NVC197_RESERVED_TG14 0x0358 +#define NVC197_RESERVED_TG14_V 0:0 + +#define NVC197_RESERVED_TG15 0x035c +#define NVC197_RESERVED_TG15_V 0:0 + +#define NVC197_SET_SUBTILING_PERF_KNOB_A 0x0360 +#define NVC197_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_REGISTER_FILE_PER_SUBTILE 7:0 +#define NVC197_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_PIXEL_OUTPUT_BUFFER_PER_SUBTILE 15:8 +#define NVC197_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_TRIANGLE_RAM_PER_SUBTILE 23:16 +#define NVC197_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_MAX_QUADS_PER_SUBTILE 31:24 + +#define NVC197_SET_SUBTILING_PERF_KNOB_B 0x0364 +#define NVC197_SET_SUBTILING_PERF_KNOB_B_FRACTION_OF_MAX_PRIMITIVES_PER_SUBTILE 7:0 + +#define NVC197_SET_SUBTILING_PERF_KNOB_C 0x0368 +#define NVC197_SET_SUBTILING_PERF_KNOB_C_RESERVED 0:0 + +#define NVC197_SET_ZCULL_SUBREGION_TO_REPORT 0x036c +#define NVC197_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE 0:0 +#define NVC197_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE_FALSE 0x00000000 +#define NVC197_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE_TRUE 0x00000001 +#define NVC197_SET_ZCULL_SUBREGION_TO_REPORT_SUBREGION_ID 11:4 + +#define NVC197_SET_ZCULL_SUBREGION_REPORT_TYPE 0x0370 +#define NVC197_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE 0:0 +#define NVC197_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE_FALSE 0x00000000 +#define NVC197_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE_TRUE 0x00000001 +#define NVC197_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE 6:4 +#define NVC197_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST 0x00000000 +#define NVC197_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST_NO_ACCEPT 0x00000001 +#define NVC197_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST_LATE_Z 0x00000002 +#define NVC197_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_STENCIL_TEST 0x00000003 + +#define NVC197_SET_BALANCED_PRIMITIVE_WORKLOAD 0x0374 +#define NVC197_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE 0:0 +#define NVC197_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE_FALSE 0x00000000 +#define NVC197_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE_TRUE 0x00000001 +#define NVC197_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE 4:4 +#define NVC197_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE_FALSE 0x00000000 +#define NVC197_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE_TRUE 0x00000001 +#define NVC197_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_UNPARTITIONED_MODE 8:8 +#define NVC197_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_UNPARTITIONED_MODE_FALSE 0x00000000 +#define NVC197_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_UNPARTITIONED_MODE_TRUE 0x00000001 +#define NVC197_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_TIMESLICED_MODE 9:9 +#define NVC197_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_TIMESLICED_MODE_FALSE 0x00000000 +#define NVC197_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_TIMESLICED_MODE_TRUE 0x00000001 + +#define NVC197_SET_MAX_PATCHES_PER_BATCH 0x0378 +#define NVC197_SET_MAX_PATCHES_PER_BATCH_V 5:0 + +#define NVC197_SET_RASTER_ENABLE 0x037c +#define NVC197_SET_RASTER_ENABLE_V 0:0 +#define NVC197_SET_RASTER_ENABLE_V_FALSE 0x00000000 +#define NVC197_SET_RASTER_ENABLE_V_TRUE 0x00000001 + +#define NVC197_SET_STREAM_OUT_BUFFER_ENABLE(j) (0x0380+(j)*32) +#define NVC197_SET_STREAM_OUT_BUFFER_ENABLE_V 0:0 +#define NVC197_SET_STREAM_OUT_BUFFER_ENABLE_V_FALSE 0x00000000 +#define NVC197_SET_STREAM_OUT_BUFFER_ENABLE_V_TRUE 0x00000001 + +#define NVC197_SET_STREAM_OUT_BUFFER_ADDRESS_A(j) (0x0384+(j)*32) +#define NVC197_SET_STREAM_OUT_BUFFER_ADDRESS_A_UPPER 7:0 + +#define NVC197_SET_STREAM_OUT_BUFFER_ADDRESS_B(j) (0x0388+(j)*32) +#define NVC197_SET_STREAM_OUT_BUFFER_ADDRESS_B_LOWER 31:0 + +#define NVC197_SET_STREAM_OUT_BUFFER_SIZE(j) (0x038c+(j)*32) +#define NVC197_SET_STREAM_OUT_BUFFER_SIZE_BYTES 31:0 + +#define NVC197_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER(j) (0x0390+(j)*32) +#define NVC197_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER_START_OFFSET 31:0 + +#define NVC197_SET_POSITION_W_SCALED_OFFSET_SCALE_A(j) (0x0400+(j)*16) +#define NVC197_SET_POSITION_W_SCALED_OFFSET_SCALE_A_V 31:0 + +#define NVC197_SET_POSITION_W_SCALED_OFFSET_SCALE_B(j) (0x0404+(j)*16) +#define NVC197_SET_POSITION_W_SCALED_OFFSET_SCALE_B_V 31:0 + +#define NVC197_SET_POSITION_W_SCALED_OFFSET_RESERVED_A(j) (0x0408+(j)*16) +#define NVC197_SET_POSITION_W_SCALED_OFFSET_RESERVED_A_V 31:0 + +#define NVC197_SET_POSITION_W_SCALED_OFFSET_RESERVED_B(j) (0x040c+(j)*16) +#define NVC197_SET_POSITION_W_SCALED_OFFSET_RESERVED_B_V 31:0 + +#define NVC197_SET_STREAM_OUT_CONTROL_STREAM(j) (0x0700+(j)*16) +#define NVC197_SET_STREAM_OUT_CONTROL_STREAM_SELECT 1:0 + +#define NVC197_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT(j) (0x0704+(j)*16) +#define NVC197_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT_MAX 7:0 + +#define NVC197_SET_STREAM_OUT_CONTROL_STRIDE(j) (0x0708+(j)*16) +#define NVC197_SET_STREAM_OUT_CONTROL_STRIDE_BYTES 31:0 + +#define NVC197_SET_RASTER_INPUT 0x0740 +#define NVC197_SET_RASTER_INPUT_STREAM_SELECT 1:0 + +#define NVC197_SET_STREAM_OUTPUT 0x0744 +#define NVC197_SET_STREAM_OUTPUT_ENABLE 0:0 +#define NVC197_SET_STREAM_OUTPUT_ENABLE_FALSE 0x00000000 +#define NVC197_SET_STREAM_OUTPUT_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE 0x0748 +#define NVC197_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE 0:0 +#define NVC197_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_FALSE 0x00000000 +#define NVC197_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_ALPHA_FRACTION 0x074c +#define NVC197_SET_ALPHA_FRACTION_V 7:0 + +#define NVC197_SET_HYBRID_ANTI_ALIAS_CONTROL 0x0754 +#define NVC197_SET_HYBRID_ANTI_ALIAS_CONTROL_PASSES 3:0 +#define NVC197_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID 4:4 +#define NVC197_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_FRAGMENT 0x00000000 +#define NVC197_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_PASS 0x00000001 +#define NVC197_SET_HYBRID_ANTI_ALIAS_CONTROL_PASSES_EXTENDED 5:5 + +#define NVC197_SET_SHADER_LOCAL_MEMORY_WINDOW 0x077c +#define NVC197_SET_SHADER_LOCAL_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NVC197_SET_SHADER_LOCAL_MEMORY_A 0x0790 +#define NVC197_SET_SHADER_LOCAL_MEMORY_A_ADDRESS_UPPER 7:0 + +#define NVC197_SET_SHADER_LOCAL_MEMORY_B 0x0794 +#define NVC197_SET_SHADER_LOCAL_MEMORY_B_ADDRESS_LOWER 31:0 + +#define NVC197_SET_SHADER_LOCAL_MEMORY_C 0x0798 +#define NVC197_SET_SHADER_LOCAL_MEMORY_C_SIZE_UPPER 5:0 + +#define NVC197_SET_SHADER_LOCAL_MEMORY_D 0x079c +#define NVC197_SET_SHADER_LOCAL_MEMORY_D_SIZE_LOWER 31:0 + +#define NVC197_SET_SHADER_LOCAL_MEMORY_E 0x07a0 +#define NVC197_SET_SHADER_LOCAL_MEMORY_E_DEFAULT_SIZE_PER_WARP 25:0 + +#define NVC197_SET_COLOR_ZERO_BANDWIDTH_CLEAR 0x07a4 +#define NVC197_SET_COLOR_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVC197_SET_Z_ZERO_BANDWIDTH_CLEAR 0x07a8 +#define NVC197_SET_Z_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVC197_SET_ISBE_SAVE_RESTORE_PROGRAM 0x07ac +#define NVC197_SET_ISBE_SAVE_RESTORE_PROGRAM_OFFSET 31:0 + +#define NVC197_SET_STENCIL_ZERO_BANDWIDTH_CLEAR 0x07b0 +#define NVC197_SET_STENCIL_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVC197_SET_ZCULL_REGION_SIZE_A 0x07c0 +#define NVC197_SET_ZCULL_REGION_SIZE_A_WIDTH 15:0 + +#define NVC197_SET_ZCULL_REGION_SIZE_B 0x07c4 +#define NVC197_SET_ZCULL_REGION_SIZE_B_HEIGHT 15:0 + +#define NVC197_SET_ZCULL_REGION_SIZE_C 0x07c8 +#define NVC197_SET_ZCULL_REGION_SIZE_C_DEPTH 15:0 + +#define NVC197_SET_ZCULL_REGION_PIXEL_OFFSET_C 0x07cc +#define NVC197_SET_ZCULL_REGION_PIXEL_OFFSET_C_DEPTH 15:0 + +#define NVC197_SET_CULL_BEFORE_FETCH 0x07dc +#define NVC197_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE 0:0 +#define NVC197_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_FALSE 0x00000000 +#define NVC197_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_TRUE 0x00000001 + +#define NVC197_SET_ZCULL_REGION_LOCATION 0x07e0 +#define NVC197_SET_ZCULL_REGION_LOCATION_START_ALIQUOT 15:0 +#define NVC197_SET_ZCULL_REGION_LOCATION_ALIQUOT_COUNT 31:16 + +#define NVC197_SET_ZCULL_REGION_ALIQUOTS 0x07e4 +#define NVC197_SET_ZCULL_REGION_ALIQUOTS_PER_LAYER 15:0 + +#define NVC197_SET_ZCULL_STORAGE_A 0x07e8 +#define NVC197_SET_ZCULL_STORAGE_A_ADDRESS_UPPER 7:0 + +#define NVC197_SET_ZCULL_STORAGE_B 0x07ec +#define NVC197_SET_ZCULL_STORAGE_B_ADDRESS_LOWER 31:0 + +#define NVC197_SET_ZCULL_STORAGE_C 0x07f0 +#define NVC197_SET_ZCULL_STORAGE_C_LIMIT_ADDRESS_UPPER 7:0 + +#define NVC197_SET_ZCULL_STORAGE_D 0x07f4 +#define NVC197_SET_ZCULL_STORAGE_D_LIMIT_ADDRESS_LOWER 31:0 + +#define NVC197_SET_ZT_READ_ONLY 0x07f8 +#define NVC197_SET_ZT_READ_ONLY_ENABLE_Z 0:0 +#define NVC197_SET_ZT_READ_ONLY_ENABLE_Z_FALSE 0x00000000 +#define NVC197_SET_ZT_READ_ONLY_ENABLE_Z_TRUE 0x00000001 +#define NVC197_SET_ZT_READ_ONLY_ENABLE_STENCIL 4:4 +#define NVC197_SET_ZT_READ_ONLY_ENABLE_STENCIL_FALSE 0x00000000 +#define NVC197_SET_ZT_READ_ONLY_ENABLE_STENCIL_TRUE 0x00000001 + +#define NVC197_SET_COLOR_TARGET_A(j) (0x0800+(j)*64) +#define NVC197_SET_COLOR_TARGET_A_OFFSET_UPPER 7:0 + +#define NVC197_SET_COLOR_TARGET_B(j) (0x0804+(j)*64) +#define NVC197_SET_COLOR_TARGET_B_OFFSET_LOWER 31:0 + +#define NVC197_SET_COLOR_TARGET_WIDTH(j) (0x0808+(j)*64) +#define NVC197_SET_COLOR_TARGET_WIDTH_V 27:0 + +#define NVC197_SET_COLOR_TARGET_HEIGHT(j) (0x080c+(j)*64) +#define NVC197_SET_COLOR_TARGET_HEIGHT_V 16:0 + +#define NVC197_SET_COLOR_TARGET_FORMAT(j) (0x0810+(j)*64) +#define NVC197_SET_COLOR_TARGET_FORMAT_V 7:0 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_DISABLED 0x00000000 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_AF32 0x000000C0 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_AS32 0x000000C1 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_AU32 0x000000C2 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_X32 0x000000C3 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_X32 0x000000C4 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_X32 0x000000C5 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_R16_G16_B16_A16 0x000000C6 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RN16_GN16_BN16_AN16 0x000000C7 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RS16_GS16_BS16_AS16 0x000000C8 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RU16_GU16_BU16_AU16 0x000000C9 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_AF16 0x000000CA +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RF32_GF32 0x000000CB +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RS32_GS32 0x000000CC +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RU32_GU32 0x000000CD +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_X16 0x000000CE +#define NVC197_SET_COLOR_TARGET_FORMAT_V_A8R8G8B8 0x000000CF +#define NVC197_SET_COLOR_TARGET_FORMAT_V_A8RL8GL8BL8 0x000000D0 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_A2B10G10R10 0x000000D1 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_AU2BU10GU10RU10 0x000000D2 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_A8B8G8R8 0x000000D5 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_A8BL8GL8RL8 0x000000D6 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_AN8BN8GN8RN8 0x000000D7 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_AS8BS8GS8RS8 0x000000D8 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_AU8BU8GU8RU8 0x000000D9 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_R16_G16 0x000000DA +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RN16_GN16 0x000000DB +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RS16_GS16 0x000000DC +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RU16_GU16 0x000000DD +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RF16_GF16 0x000000DE +#define NVC197_SET_COLOR_TARGET_FORMAT_V_A2R10G10B10 0x000000DF +#define NVC197_SET_COLOR_TARGET_FORMAT_V_BF10GF11RF11 0x000000E0 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RS32 0x000000E3 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RU32 0x000000E4 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RF32 0x000000E5 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_X8R8G8B8 0x000000E6 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_X8RL8GL8BL8 0x000000E7 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_R5G6B5 0x000000E8 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_A1R5G5B5 0x000000E9 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_G8R8 0x000000EA +#define NVC197_SET_COLOR_TARGET_FORMAT_V_GN8RN8 0x000000EB +#define NVC197_SET_COLOR_TARGET_FORMAT_V_GS8RS8 0x000000EC +#define NVC197_SET_COLOR_TARGET_FORMAT_V_GU8RU8 0x000000ED +#define NVC197_SET_COLOR_TARGET_FORMAT_V_R16 0x000000EE +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RN16 0x000000EF +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RS16 0x000000F0 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RU16 0x000000F1 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RF16 0x000000F2 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_R8 0x000000F3 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RN8 0x000000F4 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RS8 0x000000F5 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RU8 0x000000F6 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_A8 0x000000F7 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_X1R5G5B5 0x000000F8 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_X8B8G8R8 0x000000F9 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_X8BL8GL8RL8 0x000000FA +#define NVC197_SET_COLOR_TARGET_FORMAT_V_Z1R5G5B5 0x000000FB +#define NVC197_SET_COLOR_TARGET_FORMAT_V_O1R5G5B5 0x000000FC +#define NVC197_SET_COLOR_TARGET_FORMAT_V_Z8R8G8B8 0x000000FD +#define NVC197_SET_COLOR_TARGET_FORMAT_V_O8R8G8B8 0x000000FE +#define NVC197_SET_COLOR_TARGET_FORMAT_V_R32 0x000000FF +#define NVC197_SET_COLOR_TARGET_FORMAT_V_A16 0x00000040 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_AF16 0x00000041 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_AF32 0x00000042 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_A8R8 0x00000043 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_R16_A16 0x00000044 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RF16_AF16 0x00000045 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_RF32_AF32 0x00000046 +#define NVC197_SET_COLOR_TARGET_FORMAT_V_B8G8R8A8 0x00000047 + +#define NVC197_SET_COLOR_TARGET_MEMORY(j) (0x0814+(j)*64) +#define NVC197_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH 3:0 +#define NVC197_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH_ONE_GOB 0x00000000 +#define NVC197_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT 7:4 +#define NVC197_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_ONE_GOB 0x00000000 +#define NVC197_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_TWO_GOBS 0x00000001 +#define NVC197_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC197_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC197_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC197_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC197_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH 11:8 +#define NVC197_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_ONE_GOB 0x00000000 +#define NVC197_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_TWO_GOBS 0x00000001 +#define NVC197_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_FOUR_GOBS 0x00000002 +#define NVC197_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_EIGHT_GOBS 0x00000003 +#define NVC197_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVC197_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_THIRTYTWO_GOBS 0x00000005 +#define NVC197_SET_COLOR_TARGET_MEMORY_LAYOUT 12:12 +#define NVC197_SET_COLOR_TARGET_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVC197_SET_COLOR_TARGET_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVC197_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL 16:16 +#define NVC197_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NVC197_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_DEPTH_SIZE 0x00000001 + +#define NVC197_SET_COLOR_TARGET_THIRD_DIMENSION(j) (0x0818+(j)*64) +#define NVC197_SET_COLOR_TARGET_THIRD_DIMENSION_V 27:0 + +#define NVC197_SET_COLOR_TARGET_ARRAY_PITCH(j) (0x081c+(j)*64) +#define NVC197_SET_COLOR_TARGET_ARRAY_PITCH_V 31:0 + +#define NVC197_SET_COLOR_TARGET_LAYER(j) (0x0820+(j)*64) +#define NVC197_SET_COLOR_TARGET_LAYER_OFFSET 15:0 + +#define NVC197_SET_COLOR_TARGET_MARK(j) (0x0824+(j)*64) +#define NVC197_SET_COLOR_TARGET_MARK_IEEE_CLEAN 0:0 +#define NVC197_SET_COLOR_TARGET_MARK_IEEE_CLEAN_FALSE 0x00000000 +#define NVC197_SET_COLOR_TARGET_MARK_IEEE_CLEAN_TRUE 0x00000001 + +#define NVC197_SET_VIEWPORT_SCALE_X(j) (0x0a00+(j)*32) +#define NVC197_SET_VIEWPORT_SCALE_X_V 31:0 + +#define NVC197_SET_VIEWPORT_SCALE_Y(j) (0x0a04+(j)*32) +#define NVC197_SET_VIEWPORT_SCALE_Y_V 31:0 + +#define NVC197_SET_VIEWPORT_SCALE_Z(j) (0x0a08+(j)*32) +#define NVC197_SET_VIEWPORT_SCALE_Z_V 31:0 + +#define NVC197_SET_VIEWPORT_OFFSET_X(j) (0x0a0c+(j)*32) +#define NVC197_SET_VIEWPORT_OFFSET_X_V 31:0 + +#define NVC197_SET_VIEWPORT_OFFSET_Y(j) (0x0a10+(j)*32) +#define NVC197_SET_VIEWPORT_OFFSET_Y_V 31:0 + +#define NVC197_SET_VIEWPORT_OFFSET_Z(j) (0x0a14+(j)*32) +#define NVC197_SET_VIEWPORT_OFFSET_Z_V 31:0 + +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE(j) (0x0a18+(j)*32) +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_X 2:0 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_X 0x00000000 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_X 0x00000001 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_Y 0x00000002 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_Y 0x00000003 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_Z 0x00000004 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_Z 0x00000005 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_W 0x00000006 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_W 0x00000007 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_Y 6:4 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_X 0x00000000 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_X 0x00000001 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_Y 0x00000002 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_Y 0x00000003 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_Z 0x00000004 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_Z 0x00000005 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_W 0x00000006 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_W 0x00000007 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_Z 10:8 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_X 0x00000000 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_X 0x00000001 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_Y 0x00000002 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_Y 0x00000003 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_Z 0x00000004 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_Z 0x00000005 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_W 0x00000006 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_W 0x00000007 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_W 14:12 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_X 0x00000000 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_X 0x00000001 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_Y 0x00000002 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_Y 0x00000003 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_Z 0x00000004 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_Z 0x00000005 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_W 0x00000006 +#define NVC197_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_W 0x00000007 + +#define NVC197_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION(j) (0x0a1c+(j)*32) +#define NVC197_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION_X_BITS 4:0 +#define NVC197_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION_Y_BITS 12:8 + +#define NVC197_SET_VIEWPORT_CLIP_HORIZONTAL(j) (0x0c00+(j)*16) +#define NVC197_SET_VIEWPORT_CLIP_HORIZONTAL_X0 15:0 +#define NVC197_SET_VIEWPORT_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NVC197_SET_VIEWPORT_CLIP_VERTICAL(j) (0x0c04+(j)*16) +#define NVC197_SET_VIEWPORT_CLIP_VERTICAL_Y0 15:0 +#define NVC197_SET_VIEWPORT_CLIP_VERTICAL_HEIGHT 31:16 + +#define NVC197_SET_VIEWPORT_CLIP_MIN_Z(j) (0x0c08+(j)*16) +#define NVC197_SET_VIEWPORT_CLIP_MIN_Z_V 31:0 + +#define NVC197_SET_VIEWPORT_CLIP_MAX_Z(j) (0x0c0c+(j)*16) +#define NVC197_SET_VIEWPORT_CLIP_MAX_Z_V 31:0 + +#define NVC197_SET_WINDOW_CLIP_HORIZONTAL(j) (0x0d00+(j)*8) +#define NVC197_SET_WINDOW_CLIP_HORIZONTAL_XMIN 15:0 +#define NVC197_SET_WINDOW_CLIP_HORIZONTAL_XMAX 31:16 + +#define NVC197_SET_WINDOW_CLIP_VERTICAL(j) (0x0d04+(j)*8) +#define NVC197_SET_WINDOW_CLIP_VERTICAL_YMIN 15:0 +#define NVC197_SET_WINDOW_CLIP_VERTICAL_YMAX 31:16 + +#define NVC197_SET_CLIP_ID_EXTENT_X(j) (0x0d40+(j)*8) +#define NVC197_SET_CLIP_ID_EXTENT_X_MINX 15:0 +#define NVC197_SET_CLIP_ID_EXTENT_X_WIDTH 31:16 + +#define NVC197_SET_CLIP_ID_EXTENT_Y(j) (0x0d44+(j)*8) +#define NVC197_SET_CLIP_ID_EXTENT_Y_MINY 15:0 +#define NVC197_SET_CLIP_ID_EXTENT_Y_HEIGHT 31:16 + +#define NVC197_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK 0x0d60 +#define NVC197_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK_V 10:0 + +#define NVC197_SET_API_VISIBLE_CALL_LIMIT 0x0d64 +#define NVC197_SET_API_VISIBLE_CALL_LIMIT_V 3:0 +#define NVC197_SET_API_VISIBLE_CALL_LIMIT_V__0 0x00000000 +#define NVC197_SET_API_VISIBLE_CALL_LIMIT_V__1 0x00000001 +#define NVC197_SET_API_VISIBLE_CALL_LIMIT_V__2 0x00000002 +#define NVC197_SET_API_VISIBLE_CALL_LIMIT_V__4 0x00000003 +#define NVC197_SET_API_VISIBLE_CALL_LIMIT_V__8 0x00000004 +#define NVC197_SET_API_VISIBLE_CALL_LIMIT_V__16 0x00000005 +#define NVC197_SET_API_VISIBLE_CALL_LIMIT_V__32 0x00000006 +#define NVC197_SET_API_VISIBLE_CALL_LIMIT_V__64 0x00000007 +#define NVC197_SET_API_VISIBLE_CALL_LIMIT_V__128 0x00000008 +#define NVC197_SET_API_VISIBLE_CALL_LIMIT_V_NO_CHECK 0x0000000F + +#define NVC197_SET_STATISTICS_COUNTER 0x0d68 +#define NVC197_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE 0:0 +#define NVC197_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC197_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC197_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE 1:1 +#define NVC197_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC197_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC197_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE 2:2 +#define NVC197_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC197_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC197_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE 3:3 +#define NVC197_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC197_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC197_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE 4:4 +#define NVC197_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC197_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC197_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE 5:5 +#define NVC197_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NVC197_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NVC197_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE 6:6 +#define NVC197_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_FALSE 0x00000000 +#define NVC197_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_TRUE 0x00000001 +#define NVC197_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE 7:7 +#define NVC197_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC197_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC197_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE 8:8 +#define NVC197_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC197_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC197_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE 9:9 +#define NVC197_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC197_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC197_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE 11:11 +#define NVC197_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC197_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC197_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE 12:12 +#define NVC197_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC197_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC197_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE 13:13 +#define NVC197_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC197_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC197_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE 14:14 +#define NVC197_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NVC197_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NVC197_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE 10:10 +#define NVC197_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_FALSE 0x00000000 +#define NVC197_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_TRUE 0x00000001 +#define NVC197_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE 15:15 +#define NVC197_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_FALSE 0x00000000 +#define NVC197_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_TRUE 0x00000001 +#define NVC197_SET_STATISTICS_COUNTER_SCG_CLOCKS_ENABLE 16:16 +#define NVC197_SET_STATISTICS_COUNTER_SCG_CLOCKS_ENABLE_FALSE 0x00000000 +#define NVC197_SET_STATISTICS_COUNTER_SCG_CLOCKS_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_CLEAR_RECT_HORIZONTAL 0x0d6c +#define NVC197_SET_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NVC197_SET_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NVC197_SET_CLEAR_RECT_VERTICAL 0x0d70 +#define NVC197_SET_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NVC197_SET_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NVC197_SET_VERTEX_ARRAY_START 0x0d74 +#define NVC197_SET_VERTEX_ARRAY_START_V 31:0 + +#define NVC197_DRAW_VERTEX_ARRAY 0x0d78 +#define NVC197_DRAW_VERTEX_ARRAY_COUNT 31:0 + +#define NVC197_SET_VIEWPORT_Z_CLIP 0x0d7c +#define NVC197_SET_VIEWPORT_Z_CLIP_RANGE 0:0 +#define NVC197_SET_VIEWPORT_Z_CLIP_RANGE_NEGATIVE_W_TO_POSITIVE_W 0x00000000 +#define NVC197_SET_VIEWPORT_Z_CLIP_RANGE_ZERO_TO_POSITIVE_W 0x00000001 + +#define NVC197_SET_COLOR_CLEAR_VALUE(i) (0x0d80+(i)*4) +#define NVC197_SET_COLOR_CLEAR_VALUE_V 31:0 + +#define NVC197_SET_Z_CLEAR_VALUE 0x0d90 +#define NVC197_SET_Z_CLEAR_VALUE_V 31:0 + +#define NVC197_SET_SHADER_CACHE_CONTROL 0x0d94 +#define NVC197_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE 0:0 +#define NVC197_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_FALSE 0x00000000 +#define NVC197_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_TRUE 0x00000001 + +#define NVC197_FORCE_TRANSITION_TO_BETA 0x0d98 +#define NVC197_FORCE_TRANSITION_TO_BETA_V 0:0 + +#define NVC197_SET_REDUCE_COLOR_THRESHOLDS_ENABLE 0x0d9c +#define NVC197_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V 0:0 +#define NVC197_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_FALSE 0x00000000 +#define NVC197_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_TRUE 0x00000001 + +#define NVC197_SET_STENCIL_CLEAR_VALUE 0x0da0 +#define NVC197_SET_STENCIL_CLEAR_VALUE_V 7:0 + +#define NVC197_INVALIDATE_SHADER_CACHES_NO_WFI 0x0da4 +#define NVC197_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION 0:0 +#define NVC197_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_FALSE 0x00000000 +#define NVC197_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_TRUE 0x00000001 +#define NVC197_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA 4:4 +#define NVC197_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_FALSE 0x00000000 +#define NVC197_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_TRUE 0x00000001 +#define NVC197_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT 12:12 +#define NVC197_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_FALSE 0x00000000 +#define NVC197_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_TRUE 0x00000001 + +#define NVC197_SET_ZCULL_SERIALIZATION 0x0da8 +#define NVC197_SET_ZCULL_SERIALIZATION_ENABLE 0:0 +#define NVC197_SET_ZCULL_SERIALIZATION_ENABLE_FALSE 0x00000000 +#define NVC197_SET_ZCULL_SERIALIZATION_ENABLE_TRUE 0x00000001 +#define NVC197_SET_ZCULL_SERIALIZATION_APPLIED 5:4 +#define NVC197_SET_ZCULL_SERIALIZATION_APPLIED_ALWAYS 0x00000000 +#define NVC197_SET_ZCULL_SERIALIZATION_APPLIED_LATE_Z 0x00000001 +#define NVC197_SET_ZCULL_SERIALIZATION_APPLIED_OUT_OF_GAMUT_Z 0x00000002 +#define NVC197_SET_ZCULL_SERIALIZATION_APPLIED_LATE_Z_OR_OUT_OF_GAMUT_Z 0x00000003 + +#define NVC197_SET_FRONT_POLYGON_MODE 0x0dac +#define NVC197_SET_FRONT_POLYGON_MODE_V 31:0 +#define NVC197_SET_FRONT_POLYGON_MODE_V_POINT 0x00001B00 +#define NVC197_SET_FRONT_POLYGON_MODE_V_LINE 0x00001B01 +#define NVC197_SET_FRONT_POLYGON_MODE_V_FILL 0x00001B02 + +#define NVC197_SET_BACK_POLYGON_MODE 0x0db0 +#define NVC197_SET_BACK_POLYGON_MODE_V 31:0 +#define NVC197_SET_BACK_POLYGON_MODE_V_POINT 0x00001B00 +#define NVC197_SET_BACK_POLYGON_MODE_V_LINE 0x00001B01 +#define NVC197_SET_BACK_POLYGON_MODE_V_FILL 0x00001B02 + +#define NVC197_SET_POLY_SMOOTH 0x0db4 +#define NVC197_SET_POLY_SMOOTH_ENABLE 0:0 +#define NVC197_SET_POLY_SMOOTH_ENABLE_FALSE 0x00000000 +#define NVC197_SET_POLY_SMOOTH_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_ZT_MARK 0x0db8 +#define NVC197_SET_ZT_MARK_IEEE_CLEAN 0:0 +#define NVC197_SET_ZT_MARK_IEEE_CLEAN_FALSE 0x00000000 +#define NVC197_SET_ZT_MARK_IEEE_CLEAN_TRUE 0x00000001 + +#define NVC197_SET_ZCULL_DIR_FORMAT 0x0dbc +#define NVC197_SET_ZCULL_DIR_FORMAT_ZDIR 15:0 +#define NVC197_SET_ZCULL_DIR_FORMAT_ZDIR_LESS 0x00000000 +#define NVC197_SET_ZCULL_DIR_FORMAT_ZDIR_GREATER 0x00000001 +#define NVC197_SET_ZCULL_DIR_FORMAT_ZFORMAT 31:16 +#define NVC197_SET_ZCULL_DIR_FORMAT_ZFORMAT_MSB 0x00000000 +#define NVC197_SET_ZCULL_DIR_FORMAT_ZFORMAT_FP 0x00000001 +#define NVC197_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZTRICK 0x00000002 +#define NVC197_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZF32_1 0x00000003 + +#define NVC197_SET_POLY_OFFSET_POINT 0x0dc0 +#define NVC197_SET_POLY_OFFSET_POINT_ENABLE 0:0 +#define NVC197_SET_POLY_OFFSET_POINT_ENABLE_FALSE 0x00000000 +#define NVC197_SET_POLY_OFFSET_POINT_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_POLY_OFFSET_LINE 0x0dc4 +#define NVC197_SET_POLY_OFFSET_LINE_ENABLE 0:0 +#define NVC197_SET_POLY_OFFSET_LINE_ENABLE_FALSE 0x00000000 +#define NVC197_SET_POLY_OFFSET_LINE_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_POLY_OFFSET_FILL 0x0dc8 +#define NVC197_SET_POLY_OFFSET_FILL_ENABLE 0:0 +#define NVC197_SET_POLY_OFFSET_FILL_ENABLE_FALSE 0x00000000 +#define NVC197_SET_POLY_OFFSET_FILL_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_PATCH 0x0dcc +#define NVC197_SET_PATCH_SIZE 7:0 + +#define NVC197_SET_ITERATED_BLEND 0x0dd0 +#define NVC197_SET_ITERATED_BLEND_ENABLE 0:0 +#define NVC197_SET_ITERATED_BLEND_ENABLE_FALSE 0x00000000 +#define NVC197_SET_ITERATED_BLEND_ENABLE_TRUE 0x00000001 +#define NVC197_SET_ITERATED_BLEND_ALPHA_ENABLE 1:1 +#define NVC197_SET_ITERATED_BLEND_ALPHA_ENABLE_FALSE 0x00000000 +#define NVC197_SET_ITERATED_BLEND_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_ITERATED_BLEND_PASS 0x0dd4 +#define NVC197_SET_ITERATED_BLEND_PASS_COUNT 7:0 + +#define NVC197_SET_ZCULL_CRITERION 0x0dd8 +#define NVC197_SET_ZCULL_CRITERION_SFUNC 7:0 +#define NVC197_SET_ZCULL_CRITERION_SFUNC_NEVER 0x00000000 +#define NVC197_SET_ZCULL_CRITERION_SFUNC_LESS 0x00000001 +#define NVC197_SET_ZCULL_CRITERION_SFUNC_EQUAL 0x00000002 +#define NVC197_SET_ZCULL_CRITERION_SFUNC_LEQUAL 0x00000003 +#define NVC197_SET_ZCULL_CRITERION_SFUNC_GREATER 0x00000004 +#define NVC197_SET_ZCULL_CRITERION_SFUNC_NOTEQUAL 0x00000005 +#define NVC197_SET_ZCULL_CRITERION_SFUNC_GEQUAL 0x00000006 +#define NVC197_SET_ZCULL_CRITERION_SFUNC_ALWAYS 0x00000007 +#define NVC197_SET_ZCULL_CRITERION_NO_INVALIDATE 8:8 +#define NVC197_SET_ZCULL_CRITERION_NO_INVALIDATE_FALSE 0x00000000 +#define NVC197_SET_ZCULL_CRITERION_NO_INVALIDATE_TRUE 0x00000001 +#define NVC197_SET_ZCULL_CRITERION_FORCE_MATCH 9:9 +#define NVC197_SET_ZCULL_CRITERION_FORCE_MATCH_FALSE 0x00000000 +#define NVC197_SET_ZCULL_CRITERION_FORCE_MATCH_TRUE 0x00000001 +#define NVC197_SET_ZCULL_CRITERION_SREF 23:16 +#define NVC197_SET_ZCULL_CRITERION_SMASK 31:24 + +#define NVC197_PIXEL_SHADER_BARRIER 0x0de0 +#define NVC197_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE 0:0 +#define NVC197_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE_FALSE 0x00000000 +#define NVC197_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_SM_TIMEOUT_INTERVAL 0x0de4 +#define NVC197_SET_SM_TIMEOUT_INTERVAL_COUNTER_BIT 5:0 + +#define NVC197_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY 0x0de8 +#define NVC197_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE 0:0 +#define NVC197_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_FALSE 0x00000000 +#define NVC197_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_TRUE 0x00000001 + +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_POINTER 0x0df0 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_POINTER_V 7:0 + +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION 0x0df4 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC 2:0 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_FALSE 0x00000000 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_TRUE 0x00000001 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_EQ 0x00000002 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_NE 0x00000003 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_LT 0x00000004 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_LE 0x00000005 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_GT 0x00000006 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_GE 0x00000007 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION 5:3 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_ADD_PRODUCTS 0x00000000 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_SUB_PRODUCTS 0x00000001 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_MIN 0x00000002 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_MAX 0x00000003 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_RCP 0x00000004 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_ADD 0x00000005 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_SUBTRACT 0x00000006 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT 8:6 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT0 0x00000000 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT1 0x00000001 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT2 0x00000002 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT3 0x00000003 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT4 0x00000004 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT5 0x00000005 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT6 0x00000006 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT7 0x00000007 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT 11:9 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_SRC_RGB 0x00000000 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_DEST_RGB 0x00000001 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_SRC_AAA 0x00000002 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_DEST_AAA 0x00000003 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP0_RGB 0x00000004 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP1_RGB 0x00000005 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP2_RGB 0x00000006 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_PBR_RGB 0x00000007 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT 15:12 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ZERO 0x00000000 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE 0x00000001 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_SRC_RGB 0x00000002 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_SRC_AAA 0x00000003 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE_MINUS_SRC_AAA 0x00000004 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_DEST_RGB 0x00000005 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_DEST_AAA 0x00000006 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE_MINUS_DEST_AAA 0x00000007 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP0_RGB 0x00000009 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP1_RGB 0x0000000A +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP2_RGB 0x0000000B +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_PBR_RGB 0x0000000C +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_CONSTANT_RGB 0x0000000D +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ZERO_A_TIMES_B 0x0000000E +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT 18:16 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_SRC_RGB 0x00000000 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_DEST_RGB 0x00000001 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_SRC_AAA 0x00000002 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_DEST_AAA 0x00000003 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP0_RGB 0x00000004 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP1_RGB 0x00000005 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP2_RGB 0x00000006 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_PBR_RGB 0x00000007 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT 22:19 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ZERO 0x00000000 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE 0x00000001 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_SRC_RGB 0x00000002 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_SRC_AAA 0x00000003 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE_MINUS_SRC_AAA 0x00000004 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_DEST_RGB 0x00000005 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_DEST_AAA 0x00000006 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE_MINUS_DEST_AAA 0x00000007 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP0_RGB 0x00000009 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP1_RGB 0x0000000A +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP2_RGB 0x0000000B +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_PBR_RGB 0x0000000C +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_CONSTANT_RGB 0x0000000D +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ZERO_C_TIMES_D 0x0000000E +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE 25:23 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_RGB 0x00000000 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_GBR 0x00000001 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_RRR 0x00000002 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_GGG 0x00000003 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_BBB 0x00000004 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_R_TO_A 0x00000005 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK 27:26 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_RGB 0x00000000 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_R_ONLY 0x00000001 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_G_ONLY 0x00000002 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_B_ONLY 0x00000003 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT 29:28 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP0 0x00000000 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP1 0x00000001 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP2 0x00000002 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_NONE 0x00000003 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC 31:31 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC_FALSE 0x00000000 +#define NVC197_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC_TRUE 0x00000001 + +#define NVC197_SET_WINDOW_OFFSET_X 0x0df8 +#define NVC197_SET_WINDOW_OFFSET_X_V 16:0 + +#define NVC197_SET_WINDOW_OFFSET_Y 0x0dfc +#define NVC197_SET_WINDOW_OFFSET_Y_V 17:0 + +#define NVC197_SET_SCISSOR_ENABLE(j) (0x0e00+(j)*16) +#define NVC197_SET_SCISSOR_ENABLE_V 0:0 +#define NVC197_SET_SCISSOR_ENABLE_V_FALSE 0x00000000 +#define NVC197_SET_SCISSOR_ENABLE_V_TRUE 0x00000001 + +#define NVC197_SET_SCISSOR_HORIZONTAL(j) (0x0e04+(j)*16) +#define NVC197_SET_SCISSOR_HORIZONTAL_XMIN 15:0 +#define NVC197_SET_SCISSOR_HORIZONTAL_XMAX 31:16 + +#define NVC197_SET_SCISSOR_VERTICAL(j) (0x0e08+(j)*16) +#define NVC197_SET_SCISSOR_VERTICAL_YMIN 15:0 +#define NVC197_SET_SCISSOR_VERTICAL_YMAX 31:16 + +#define NVC197_SET_SELECT_MAXWELL_TEXTURE_HEADERS 0x0f10 +#define NVC197_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V 0:0 +#define NVC197_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V_FALSE 0x00000000 +#define NVC197_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V_TRUE 0x00000001 + +#define NVC197_SET_VPC_PERF_KNOB 0x0f14 +#define NVC197_SET_VPC_PERF_KNOB_CULLED_SMALL_LINES 7:0 +#define NVC197_SET_VPC_PERF_KNOB_CULLED_SMALL_TRIANGLES 15:8 +#define NVC197_SET_VPC_PERF_KNOB_NONCULLED_LINES_AND_POINTS 23:16 +#define NVC197_SET_VPC_PERF_KNOB_NONCULLED_TRIANGLES 31:24 + +#define NVC197_PM_LOCAL_TRIGGER 0x0f18 +#define NVC197_PM_LOCAL_TRIGGER_BOOKMARK 15:0 + +#define NVC197_SET_POST_Z_PS_IMASK 0x0f1c +#define NVC197_SET_POST_Z_PS_IMASK_ENABLE 0:0 +#define NVC197_SET_POST_Z_PS_IMASK_ENABLE_FALSE 0x00000000 +#define NVC197_SET_POST_Z_PS_IMASK_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_CONSTANT_COLOR_RENDERING 0x0f40 +#define NVC197_SET_CONSTANT_COLOR_RENDERING_ENABLE 0:0 +#define NVC197_SET_CONSTANT_COLOR_RENDERING_ENABLE_FALSE 0x00000000 +#define NVC197_SET_CONSTANT_COLOR_RENDERING_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_CONSTANT_COLOR_RENDERING_RED 0x0f44 +#define NVC197_SET_CONSTANT_COLOR_RENDERING_RED_V 31:0 + +#define NVC197_SET_CONSTANT_COLOR_RENDERING_GREEN 0x0f48 +#define NVC197_SET_CONSTANT_COLOR_RENDERING_GREEN_V 31:0 + +#define NVC197_SET_CONSTANT_COLOR_RENDERING_BLUE 0x0f4c +#define NVC197_SET_CONSTANT_COLOR_RENDERING_BLUE_V 31:0 + +#define NVC197_SET_CONSTANT_COLOR_RENDERING_ALPHA 0x0f50 +#define NVC197_SET_CONSTANT_COLOR_RENDERING_ALPHA_V 31:0 + +#define NVC197_SET_BACK_STENCIL_FUNC_REF 0x0f54 +#define NVC197_SET_BACK_STENCIL_FUNC_REF_V 7:0 + +#define NVC197_SET_BACK_STENCIL_MASK 0x0f58 +#define NVC197_SET_BACK_STENCIL_MASK_V 7:0 + +#define NVC197_SET_BACK_STENCIL_FUNC_MASK 0x0f5c +#define NVC197_SET_BACK_STENCIL_FUNC_MASK_V 7:0 + +#define NVC197_SET_VERTEX_STREAM_SUBSTITUTE_A 0x0f84 +#define NVC197_SET_VERTEX_STREAM_SUBSTITUTE_A_ADDRESS_UPPER 7:0 + +#define NVC197_SET_VERTEX_STREAM_SUBSTITUTE_B 0x0f88 +#define NVC197_SET_VERTEX_STREAM_SUBSTITUTE_B_ADDRESS_LOWER 31:0 + +#define NVC197_SET_LINE_MODE_POLYGON_CLIP 0x0f8c +#define NVC197_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE 0:0 +#define NVC197_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DRAW_LINE 0x00000000 +#define NVC197_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DO_NOT_DRAW_LINE 0x00000001 + +#define NVC197_SET_SINGLE_CT_WRITE_CONTROL 0x0f90 +#define NVC197_SET_SINGLE_CT_WRITE_CONTROL_ENABLE 0:0 +#define NVC197_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_FALSE 0x00000000 +#define NVC197_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_VTG_WARP_WATERMARKS 0x0f98 +#define NVC197_SET_VTG_WARP_WATERMARKS_LOW 15:0 +#define NVC197_SET_VTG_WARP_WATERMARKS_HIGH 31:16 + +#define NVC197_SET_DEPTH_BOUNDS_MIN 0x0f9c +#define NVC197_SET_DEPTH_BOUNDS_MIN_V 31:0 + +#define NVC197_SET_DEPTH_BOUNDS_MAX 0x0fa0 +#define NVC197_SET_DEPTH_BOUNDS_MAX_V 31:0 + +#define NVC197_SET_SAMPLE_MASK 0x0fa4 +#define NVC197_SET_SAMPLE_MASK_RASTER_OUT_ENABLE 0:0 +#define NVC197_SET_SAMPLE_MASK_RASTER_OUT_ENABLE_FALSE 0x00000000 +#define NVC197_SET_SAMPLE_MASK_RASTER_OUT_ENABLE_TRUE 0x00000001 +#define NVC197_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE 4:4 +#define NVC197_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE_FALSE 0x00000000 +#define NVC197_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_COLOR_TARGET_SAMPLE_MASK 0x0fa8 +#define NVC197_SET_COLOR_TARGET_SAMPLE_MASK_V 15:0 + +#define NVC197_SET_CT_MRT_ENABLE 0x0fac +#define NVC197_SET_CT_MRT_ENABLE_V 0:0 +#define NVC197_SET_CT_MRT_ENABLE_V_FALSE 0x00000000 +#define NVC197_SET_CT_MRT_ENABLE_V_TRUE 0x00000001 + +#define NVC197_SET_NONMULTISAMPLED_Z 0x0fb0 +#define NVC197_SET_NONMULTISAMPLED_Z_V 0:0 +#define NVC197_SET_NONMULTISAMPLED_Z_V_PER_SAMPLE 0x00000000 +#define NVC197_SET_NONMULTISAMPLED_Z_V_AT_PIXEL_CENTER 0x00000001 + +#define NVC197_SET_TIR 0x0fb4 +#define NVC197_SET_TIR_MODE 1:0 +#define NVC197_SET_TIR_MODE_DISABLED 0x00000000 +#define NVC197_SET_TIR_MODE_RASTER_N_TARGET_M 0x00000001 + +#define NVC197_SET_ANTI_ALIAS_RASTER 0x0fb8 +#define NVC197_SET_ANTI_ALIAS_RASTER_SAMPLES 2:0 +#define NVC197_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_1X1 0x00000000 +#define NVC197_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_2X2 0x00000002 +#define NVC197_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_4X2_D3D 0x00000004 +#define NVC197_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_2X1_D3D 0x00000005 +#define NVC197_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_4X4 0x00000006 + +#define NVC197_SET_SAMPLE_MASK_X0_Y0 0x0fbc +#define NVC197_SET_SAMPLE_MASK_X0_Y0_V 15:0 + +#define NVC197_SET_SAMPLE_MASK_X1_Y0 0x0fc0 +#define NVC197_SET_SAMPLE_MASK_X1_Y0_V 15:0 + +#define NVC197_SET_SAMPLE_MASK_X0_Y1 0x0fc4 +#define NVC197_SET_SAMPLE_MASK_X0_Y1_V 15:0 + +#define NVC197_SET_SAMPLE_MASK_X1_Y1 0x0fc8 +#define NVC197_SET_SAMPLE_MASK_X1_Y1_V 15:0 + +#define NVC197_SET_SURFACE_CLIP_ID_MEMORY_A 0x0fcc +#define NVC197_SET_SURFACE_CLIP_ID_MEMORY_A_OFFSET_UPPER 7:0 + +#define NVC197_SET_SURFACE_CLIP_ID_MEMORY_B 0x0fd0 +#define NVC197_SET_SURFACE_CLIP_ID_MEMORY_B_OFFSET_LOWER 31:0 + +#define NVC197_SET_TIR_MODULATION 0x0fd4 +#define NVC197_SET_TIR_MODULATION_COMPONENT_SELECT 1:0 +#define NVC197_SET_TIR_MODULATION_COMPONENT_SELECT_NO_MODULATION 0x00000000 +#define NVC197_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_RGB 0x00000001 +#define NVC197_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_ALPHA_ONLY 0x00000002 +#define NVC197_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_RGBA 0x00000003 + +#define NVC197_SET_TIR_MODULATION_FUNCTION 0x0fd8 +#define NVC197_SET_TIR_MODULATION_FUNCTION_SELECT 0:0 +#define NVC197_SET_TIR_MODULATION_FUNCTION_SELECT_LINEAR 0x00000000 +#define NVC197_SET_TIR_MODULATION_FUNCTION_SELECT_TABLE 0x00000001 + +#define NVC197_SET_BLEND_OPT_CONTROL 0x0fdc +#define NVC197_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS 0:0 +#define NVC197_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_FALSE 0x00000000 +#define NVC197_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_TRUE 0x00000001 + +#define NVC197_SET_ZT_A 0x0fe0 +#define NVC197_SET_ZT_A_OFFSET_UPPER 7:0 + +#define NVC197_SET_ZT_B 0x0fe4 +#define NVC197_SET_ZT_B_OFFSET_LOWER 31:0 + +#define NVC197_SET_ZT_FORMAT 0x0fe8 +#define NVC197_SET_ZT_FORMAT_V 4:0 +#define NVC197_SET_ZT_FORMAT_V_Z16 0x00000013 +#define NVC197_SET_ZT_FORMAT_V_Z24S8 0x00000014 +#define NVC197_SET_ZT_FORMAT_V_X8Z24 0x00000015 +#define NVC197_SET_ZT_FORMAT_V_S8Z24 0x00000016 +#define NVC197_SET_ZT_FORMAT_V_S8 0x00000017 +#define NVC197_SET_ZT_FORMAT_V_V8Z24 0x00000018 +#define NVC197_SET_ZT_FORMAT_V_ZF32 0x0000000A +#define NVC197_SET_ZT_FORMAT_V_ZF32_X24S8 0x00000019 +#define NVC197_SET_ZT_FORMAT_V_X8Z24_X16V8S8 0x0000001D +#define NVC197_SET_ZT_FORMAT_V_ZF32_X16V8X8 0x0000001E +#define NVC197_SET_ZT_FORMAT_V_ZF32_X16V8S8 0x0000001F + +#define NVC197_SET_ZT_BLOCK_SIZE 0x0fec +#define NVC197_SET_ZT_BLOCK_SIZE_WIDTH 3:0 +#define NVC197_SET_ZT_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC197_SET_ZT_BLOCK_SIZE_HEIGHT 7:4 +#define NVC197_SET_ZT_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC197_SET_ZT_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC197_SET_ZT_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC197_SET_ZT_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC197_SET_ZT_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC197_SET_ZT_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC197_SET_ZT_BLOCK_SIZE_DEPTH 11:8 +#define NVC197_SET_ZT_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NVC197_SET_ZT_ARRAY_PITCH 0x0ff0 +#define NVC197_SET_ZT_ARRAY_PITCH_V 31:0 + +#define NVC197_SET_SURFACE_CLIP_HORIZONTAL 0x0ff4 +#define NVC197_SET_SURFACE_CLIP_HORIZONTAL_X 15:0 +#define NVC197_SET_SURFACE_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NVC197_SET_SURFACE_CLIP_VERTICAL 0x0ff8 +#define NVC197_SET_SURFACE_CLIP_VERTICAL_Y 15:0 +#define NVC197_SET_SURFACE_CLIP_VERTICAL_HEIGHT 31:16 + +#define NVC197_SET_TILED_CACHE_BUNDLE_CONTROL 0x0ffc +#define NVC197_SET_TILED_CACHE_BUNDLE_CONTROL_TREAT_HEAVYWEIGHT_AS_LIGHTWEIGHT 0:0 +#define NVC197_SET_TILED_CACHE_BUNDLE_CONTROL_TREAT_HEAVYWEIGHT_AS_LIGHTWEIGHT_FALSE 0x00000000 +#define NVC197_SET_TILED_CACHE_BUNDLE_CONTROL_TREAT_HEAVYWEIGHT_AS_LIGHTWEIGHT_TRUE 0x00000001 + +#define NVC197_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS 0x1000 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE 0:0 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_FALSE 0x00000000 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_TRUE 0x00000001 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY 5:4 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC197_SET_VIEWPORT_MULTICAST 0x1004 +#define NVC197_SET_VIEWPORT_MULTICAST_ORDER 0:0 +#define NVC197_SET_VIEWPORT_MULTICAST_ORDER_VIEWPORT_ORDER 0x00000000 +#define NVC197_SET_VIEWPORT_MULTICAST_ORDER_PRIMITIVE_ORDER 0x00000001 + +#define NVC197_SET_TESSELLATION_CUT_HEIGHT 0x1008 +#define NVC197_SET_TESSELLATION_CUT_HEIGHT_V 4:0 + +#define NVC197_SET_MAX_GS_INSTANCES_PER_TASK 0x100c +#define NVC197_SET_MAX_GS_INSTANCES_PER_TASK_V 10:0 + +#define NVC197_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK 0x1010 +#define NVC197_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK_V 15:0 + +#define NVC197_SET_RESERVED_SW_METHOD00 0x1014 +#define NVC197_SET_RESERVED_SW_METHOD00_V 31:0 + +#define NVC197_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER 0x1018 +#define NVC197_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NVC197_SET_BETA_CB_STORAGE_CONSTRAINT 0x101c +#define NVC197_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NVC197_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NVC197_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER 0x1020 +#define NVC197_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NVC197_SET_ALPHA_CB_STORAGE_CONSTRAINT 0x1024 +#define NVC197_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NVC197_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NVC197_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_RESERVED_SW_METHOD01 0x1028 +#define NVC197_SET_RESERVED_SW_METHOD01_V 31:0 + +#define NVC197_SET_RESERVED_SW_METHOD02 0x102c +#define NVC197_SET_RESERVED_SW_METHOD02_V 31:0 + +#define NVC197_SET_TIR_MODULATION_COEFFICIENT_TABLE(i) (0x1030+(i)*4) +#define NVC197_SET_TIR_MODULATION_COEFFICIENT_TABLE_V0 7:0 +#define NVC197_SET_TIR_MODULATION_COEFFICIENT_TABLE_V1 15:8 +#define NVC197_SET_TIR_MODULATION_COEFFICIENT_TABLE_V2 23:16 +#define NVC197_SET_TIR_MODULATION_COEFFICIENT_TABLE_V3 31:24 + +#define NVC197_SET_SPARE_NOOP01 0x1044 +#define NVC197_SET_SPARE_NOOP01_V 31:0 + +#define NVC197_SET_SPARE_NOOP02 0x1048 +#define NVC197_SET_SPARE_NOOP02_V 31:0 + +#define NVC197_SET_SPARE_NOOP03 0x104c +#define NVC197_SET_SPARE_NOOP03_V 31:0 + +#define NVC197_SET_SPARE_NOOP04 0x1050 +#define NVC197_SET_SPARE_NOOP04_V 31:0 + +#define NVC197_SET_SPARE_NOOP05 0x1054 +#define NVC197_SET_SPARE_NOOP05_V 31:0 + +#define NVC197_SET_SPARE_NOOP06 0x1058 +#define NVC197_SET_SPARE_NOOP06_V 31:0 + +#define NVC197_SET_SPARE_NOOP07 0x105c +#define NVC197_SET_SPARE_NOOP07_V 31:0 + +#define NVC197_SET_SPARE_NOOP08 0x1060 +#define NVC197_SET_SPARE_NOOP08_V 31:0 + +#define NVC197_SET_SPARE_NOOP09 0x1064 +#define NVC197_SET_SPARE_NOOP09_V 31:0 + +#define NVC197_SET_SPARE_NOOP10 0x1068 +#define NVC197_SET_SPARE_NOOP10_V 31:0 + +#define NVC197_SET_SPARE_NOOP11 0x106c +#define NVC197_SET_SPARE_NOOP11_V 31:0 + +#define NVC197_SET_SPARE_NOOP12 0x1070 +#define NVC197_SET_SPARE_NOOP12_V 31:0 + +#define NVC197_SET_SPARE_NOOP13 0x1074 +#define NVC197_SET_SPARE_NOOP13_V 31:0 + +#define NVC197_SET_SPARE_NOOP14 0x1078 +#define NVC197_SET_SPARE_NOOP14_V 31:0 + +#define NVC197_SET_SPARE_NOOP15 0x107c +#define NVC197_SET_SPARE_NOOP15_V 31:0 + +#define NVC197_SET_RESERVED_SW_METHOD03 0x10b0 +#define NVC197_SET_RESERVED_SW_METHOD03_V 31:0 + +#define NVC197_SET_RESERVED_SW_METHOD04 0x10b4 +#define NVC197_SET_RESERVED_SW_METHOD04_V 31:0 + +#define NVC197_SET_RESERVED_SW_METHOD05 0x10b8 +#define NVC197_SET_RESERVED_SW_METHOD05_V 31:0 + +#define NVC197_SET_RESERVED_SW_METHOD06 0x10bc +#define NVC197_SET_RESERVED_SW_METHOD06_V 31:0 + +#define NVC197_SET_RESERVED_SW_METHOD07 0x10c0 +#define NVC197_SET_RESERVED_SW_METHOD07_V 31:0 + +#define NVC197_SET_RESERVED_SW_METHOD08 0x10c4 +#define NVC197_SET_RESERVED_SW_METHOD08_V 31:0 + +#define NVC197_SET_RESERVED_SW_METHOD09 0x10c8 +#define NVC197_SET_RESERVED_SW_METHOD09_V 31:0 + +#define NVC197_SET_REDUCE_COLOR_THRESHOLDS_UNORM8 0x10cc +#define NVC197_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC197_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED 23:16 + +#define NVC197_SET_RESERVED_SW_METHOD10 0x10d0 +#define NVC197_SET_RESERVED_SW_METHOD10_V 31:0 + +#define NVC197_SET_RESERVED_SW_METHOD11 0x10d4 +#define NVC197_SET_RESERVED_SW_METHOD11_V 31:0 + +#define NVC197_SET_RESERVED_SW_METHOD12 0x10d8 +#define NVC197_SET_RESERVED_SW_METHOD12_V 31:0 + +#define NVC197_SET_RESERVED_SW_METHOD13 0x10dc +#define NVC197_SET_RESERVED_SW_METHOD13_V 31:0 + +#define NVC197_SET_REDUCE_COLOR_THRESHOLDS_UNORM10 0x10e0 +#define NVC197_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC197_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED 23:16 + +#define NVC197_SET_REDUCE_COLOR_THRESHOLDS_UNORM16 0x10e4 +#define NVC197_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC197_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED 23:16 + +#define NVC197_SET_REDUCE_COLOR_THRESHOLDS_FP11 0x10e8 +#define NVC197_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED_ALL_HIT_ONCE 5:0 +#define NVC197_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED 21:16 + +#define NVC197_SET_REDUCE_COLOR_THRESHOLDS_FP16 0x10ec +#define NVC197_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC197_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED 23:16 + +#define NVC197_SET_REDUCE_COLOR_THRESHOLDS_SRGB8 0x10f0 +#define NVC197_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC197_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED 23:16 + +#define NVC197_UNBIND_ALL 0x10f4 +#define NVC197_UNBIND_ALL_CONSTANT_BUFFERS 8:8 +#define NVC197_UNBIND_ALL_CONSTANT_BUFFERS_FALSE 0x00000000 +#define NVC197_UNBIND_ALL_CONSTANT_BUFFERS_TRUE 0x00000001 + +#define NVC197_SET_CLEAR_SURFACE_CONTROL 0x10f8 +#define NVC197_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK 0:0 +#define NVC197_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_FALSE 0x00000000 +#define NVC197_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_TRUE 0x00000001 +#define NVC197_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT 4:4 +#define NVC197_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_FALSE 0x00000000 +#define NVC197_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_TRUE 0x00000001 +#define NVC197_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0 8:8 +#define NVC197_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_FALSE 0x00000000 +#define NVC197_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_TRUE 0x00000001 +#define NVC197_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0 12:12 +#define NVC197_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_FALSE 0x00000000 +#define NVC197_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_TRUE 0x00000001 + +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS 0x10fc +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC197_SET_RESERVED_SW_METHOD14 0x1100 +#define NVC197_SET_RESERVED_SW_METHOD14_V 31:0 + +#define NVC197_SET_RESERVED_SW_METHOD15 0x1104 +#define NVC197_SET_RESERVED_SW_METHOD15_V 31:0 + +#define NVC197_NO_OPERATION_DATA_HI 0x110c +#define NVC197_NO_OPERATION_DATA_HI_V 31:0 + +#define NVC197_SET_DEPTH_BIAS_CONTROL 0x1110 +#define NVC197_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT 0:0 +#define NVC197_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_FALSE 0x00000000 +#define NVC197_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_TRUE 0x00000001 + +#define NVC197_PM_TRIGGER_END 0x1114 +#define NVC197_PM_TRIGGER_END_V 31:0 + +#define NVC197_SET_VERTEX_ID_BASE 0x1118 +#define NVC197_SET_VERTEX_ID_BASE_V 31:0 + +#define NVC197_SET_STENCIL_COMPRESSION 0x111c +#define NVC197_SET_STENCIL_COMPRESSION_ENABLE 0:0 +#define NVC197_SET_STENCIL_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVC197_SET_STENCIL_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A(i) (0x1120+(i)*4) +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0 0:0 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1 1:1 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2 2:2 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3 3:3 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0 4:4 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1 5:5 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2 6:6 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3 7:7 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0 8:8 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1 9:9 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2 10:10 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3 11:11 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0 12:12 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1 13:13 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2 14:14 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3 15:15 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0 16:16 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1 17:17 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2 18:18 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3 19:19 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0 20:20 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1 21:21 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2 22:22 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3 23:23 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0 24:24 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1 25:25 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2 26:26 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3 27:27 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0 28:28 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1 29:29 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2 30:30 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3 31:31 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B(i) (0x1128+(i)*4) +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0 0:0 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1 1:1 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2 2:2 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3 3:3 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0 4:4 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1 5:5 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2 6:6 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3 7:7 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0 8:8 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1 9:9 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2 10:10 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3 11:11 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0 12:12 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1 13:13 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2 14:14 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3 15:15 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0 16:16 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1 17:17 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2 18:18 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3 19:19 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0 20:20 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1 21:21 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2 22:22 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3 23:23 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0 24:24 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1 25:25 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2 26:26 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3 27:27 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0 28:28 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1 29:29 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2 30:30 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3 31:31 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NVC197_SET_TIR_CONTROL 0x1130 +#define NVC197_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES 0:0 +#define NVC197_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES_DISABLE 0x00000000 +#define NVC197_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES_ENABLE 0x00000001 +#define NVC197_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES 4:4 +#define NVC197_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES_DISABLE 0x00000000 +#define NVC197_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES_ENABLE 0x00000001 +#define NVC197_SET_TIR_CONTROL_REDUCE_COVERAGE 1:1 +#define NVC197_SET_TIR_CONTROL_REDUCE_COVERAGE_DISABLE 0x00000000 +#define NVC197_SET_TIR_CONTROL_REDUCE_COVERAGE_ENABLE 0x00000001 + +#define NVC197_SET_MUTABLE_METHOD_CONTROL 0x1134 +#define NVC197_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT 0:0 +#define NVC197_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT_FALSE 0x00000000 +#define NVC197_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT_TRUE 0x00000001 + +#define NVC197_SET_POST_PS_INITIAL_COVERAGE 0x1138 +#define NVC197_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE 0:0 +#define NVC197_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE_FALSE 0x00000000 +#define NVC197_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE_TRUE 0x00000001 + +#define NVC197_SET_FILL_VIA_TRIANGLE 0x113c +#define NVC197_SET_FILL_VIA_TRIANGLE_MODE 1:0 +#define NVC197_SET_FILL_VIA_TRIANGLE_MODE_DISABLED 0x00000000 +#define NVC197_SET_FILL_VIA_TRIANGLE_MODE_FILL_ALL 0x00000001 +#define NVC197_SET_FILL_VIA_TRIANGLE_MODE_FILL_BBOX 0x00000002 + +#define NVC197_SET_BLEND_PER_FORMAT_ENABLE 0x1140 +#define NVC197_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16 4:4 +#define NVC197_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_FALSE 0x00000000 +#define NVC197_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_TRUE 0x00000001 + +#define NVC197_FLUSH_PENDING_WRITES 0x1144 +#define NVC197_FLUSH_PENDING_WRITES_SM_DOES_GLOBAL_STORE 0:0 + +#define NVC197_SET_VERTEX_ATTRIBUTE_A(i) (0x1160+(i)*4) +#define NVC197_SET_VERTEX_ATTRIBUTE_A_STREAM 4:0 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_SOURCE 6:6 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_SOURCE_ACTIVE 0x00000000 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_SOURCE_INACTIVE 0x00000001 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_OFFSET 20:7 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS 26:21 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NVC197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NVC197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NVC197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NVC197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NVC197_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE 29:27 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B 31:31 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_FALSE 0x00000000 +#define NVC197_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_TRUE 0x00000001 + +#define NVC197_SET_VERTEX_ATTRIBUTE_B(i) (0x11a0+(i)*4) +#define NVC197_SET_VERTEX_ATTRIBUTE_B_STREAM 4:0 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_SOURCE 6:6 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_SOURCE_ACTIVE 0x00000000 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_SOURCE_INACTIVE 0x00000001 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_OFFSET 20:7 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS 26:21 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NVC197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NVC197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NVC197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NVC197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NVC197_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE 29:27 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B 31:31 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_FALSE 0x00000000 +#define NVC197_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_TRUE 0x00000001 + +#define NVC197_SET_ANTI_ALIAS_SAMPLE_POSITIONS(i) (0x11e0+(i)*4) +#define NVC197_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X0 3:0 +#define NVC197_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y0 7:4 +#define NVC197_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X1 11:8 +#define NVC197_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y1 15:12 +#define NVC197_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X2 19:16 +#define NVC197_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y2 23:20 +#define NVC197_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X3 27:24 +#define NVC197_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y3 31:28 + +#define NVC197_SET_OFFSET_RENDER_TARGET_INDEX 0x11f0 +#define NVC197_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX 0:0 +#define NVC197_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX_FALSE 0x00000000 +#define NVC197_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX_TRUE 0x00000001 + +#define NVC197_FORCE_HEAVYWEIGHT_METHOD_SYNC 0x11f4 +#define NVC197_FORCE_HEAVYWEIGHT_METHOD_SYNC_V 31:0 + +#define NVC197_SET_COVERAGE_TO_COLOR 0x11f8 +#define NVC197_SET_COVERAGE_TO_COLOR_ENABLE 0:0 +#define NVC197_SET_COVERAGE_TO_COLOR_ENABLE_FALSE 0x00000000 +#define NVC197_SET_COVERAGE_TO_COLOR_ENABLE_TRUE 0x00000001 +#define NVC197_SET_COVERAGE_TO_COLOR_CT_SELECT 6:4 + +#define NVC197_DECOMPRESS_ZETA_SURFACE 0x11fc +#define NVC197_DECOMPRESS_ZETA_SURFACE_Z_ENABLE 0:0 +#define NVC197_DECOMPRESS_ZETA_SURFACE_Z_ENABLE_FALSE 0x00000000 +#define NVC197_DECOMPRESS_ZETA_SURFACE_Z_ENABLE_TRUE 0x00000001 +#define NVC197_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE 4:4 +#define NVC197_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC197_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_SCREEN_STATE_MASK 0x1204 +#define NVC197_SET_SCREEN_STATE_MASK_MASK 3:0 + +#define NVC197_SET_ZT_SPARSE 0x1208 +#define NVC197_SET_ZT_SPARSE_ENABLE 0:0 +#define NVC197_SET_ZT_SPARSE_ENABLE_FALSE 0x00000000 +#define NVC197_SET_ZT_SPARSE_ENABLE_TRUE 0x00000001 +#define NVC197_SET_ZT_SPARSE_UNMAPPED_COMPARE 1:1 +#define NVC197_SET_ZT_SPARSE_UNMAPPED_COMPARE_ZT_SPARSE_UNMAPPED_0 0x00000000 +#define NVC197_SET_ZT_SPARSE_UNMAPPED_COMPARE_ZT_SPARSE_FAIL_ALWAYS 0x00000001 + +#define NVC197_INVALIDATE_SAMPLER_CACHE_ALL 0x120c +#define NVC197_INVALIDATE_SAMPLER_CACHE_ALL_V 0:0 + +#define NVC197_INVALIDATE_TEXTURE_HEADER_CACHE_ALL 0x1210 +#define NVC197_INVALIDATE_TEXTURE_HEADER_CACHE_ALL_V 0:0 + +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST 0x1214 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_START_INDEX 15:0 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT 0x1218 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_START_INDEX 15:0 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC197_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC197_SET_CT_SELECT 0x121c +#define NVC197_SET_CT_SELECT_TARGET_COUNT 3:0 +#define NVC197_SET_CT_SELECT_TARGET0 6:4 +#define NVC197_SET_CT_SELECT_TARGET1 9:7 +#define NVC197_SET_CT_SELECT_TARGET2 12:10 +#define NVC197_SET_CT_SELECT_TARGET3 15:13 +#define NVC197_SET_CT_SELECT_TARGET4 18:16 +#define NVC197_SET_CT_SELECT_TARGET5 21:19 +#define NVC197_SET_CT_SELECT_TARGET6 24:22 +#define NVC197_SET_CT_SELECT_TARGET7 27:25 + +#define NVC197_SET_COMPRESSION_THRESHOLD 0x1220 +#define NVC197_SET_COMPRESSION_THRESHOLD_SAMPLES 3:0 +#define NVC197_SET_COMPRESSION_THRESHOLD_SAMPLES__0 0x00000000 +#define NVC197_SET_COMPRESSION_THRESHOLD_SAMPLES__1 0x00000001 +#define NVC197_SET_COMPRESSION_THRESHOLD_SAMPLES__2 0x00000002 +#define NVC197_SET_COMPRESSION_THRESHOLD_SAMPLES__4 0x00000003 +#define NVC197_SET_COMPRESSION_THRESHOLD_SAMPLES__8 0x00000004 +#define NVC197_SET_COMPRESSION_THRESHOLD_SAMPLES__16 0x00000005 +#define NVC197_SET_COMPRESSION_THRESHOLD_SAMPLES__32 0x00000006 +#define NVC197_SET_COMPRESSION_THRESHOLD_SAMPLES__64 0x00000007 +#define NVC197_SET_COMPRESSION_THRESHOLD_SAMPLES__128 0x00000008 +#define NVC197_SET_COMPRESSION_THRESHOLD_SAMPLES__256 0x00000009 +#define NVC197_SET_COMPRESSION_THRESHOLD_SAMPLES__512 0x0000000A +#define NVC197_SET_COMPRESSION_THRESHOLD_SAMPLES__1024 0x0000000B +#define NVC197_SET_COMPRESSION_THRESHOLD_SAMPLES__2048 0x0000000C + +#define NVC197_SET_PIXEL_SHADER_INTERLOCK_CONTROL 0x1224 +#define NVC197_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE 1:0 +#define NVC197_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_NO_CONFLICT_DETECT 0x00000000 +#define NVC197_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_CONFLICT_DETECT_SAMPLE 0x00000001 +#define NVC197_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_CONFLICT_DETECT_PIXEL 0x00000002 +#define NVC197_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE 2:2 +#define NVC197_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE_TC_TILE_SIZE_16X16 0x00000000 +#define NVC197_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE_TC_TILE_SIZE_8X8 0x00000001 +#define NVC197_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER 3:3 +#define NVC197_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER_TC_FRAGMENT_ORDERED 0x00000000 +#define NVC197_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER_TC_FRAGMENT_UNORDERED 0x00000001 + +#define NVC197_SET_ZT_SIZE_A 0x1228 +#define NVC197_SET_ZT_SIZE_A_WIDTH 27:0 + +#define NVC197_SET_ZT_SIZE_B 0x122c +#define NVC197_SET_ZT_SIZE_B_HEIGHT 17:0 + +#define NVC197_SET_ZT_SIZE_C 0x1230 +#define NVC197_SET_ZT_SIZE_C_THIRD_DIMENSION 15:0 +#define NVC197_SET_ZT_SIZE_C_CONTROL 16:16 +#define NVC197_SET_ZT_SIZE_C_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NVC197_SET_ZT_SIZE_C_CONTROL_ARRAY_SIZE_IS_ONE 0x00000001 + +#define NVC197_SET_SAMPLER_BINDING 0x1234 +#define NVC197_SET_SAMPLER_BINDING_V 0:0 +#define NVC197_SET_SAMPLER_BINDING_V_INDEPENDENTLY 0x00000000 +#define NVC197_SET_SAMPLER_BINDING_V_VIA_HEADER_BINDING 0x00000001 + +#define NVC197_DRAW_AUTO 0x123c +#define NVC197_DRAW_AUTO_BYTE_COUNT 31:0 + +#define NVC197_SET_POST_VTG_SHADER_ATTRIBUTE_SKIP_MASK(i) (0x1240+(i)*4) +#define NVC197_SET_POST_VTG_SHADER_ATTRIBUTE_SKIP_MASK_V 31:0 + +#define NVC197_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE 0x1260 +#define NVC197_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE_TICKET_DISPENSER_INDEX 7:0 +#define NVC197_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE_TICKET_DISPENSER_VALUE 23:8 + +#define NVC197_SET_BACK_END_COPY_A 0x1264 +#define NVC197_SET_BACK_END_COPY_A_DWORDS 7:0 +#define NVC197_SET_BACK_END_COPY_A_SATURATE32_ENABLE 8:8 +#define NVC197_SET_BACK_END_COPY_A_SATURATE32_ENABLE_FALSE 0x00000000 +#define NVC197_SET_BACK_END_COPY_A_SATURATE32_ENABLE_TRUE 0x00000001 +#define NVC197_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE 12:12 +#define NVC197_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE_FALSE 0x00000000 +#define NVC197_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_BACK_END_COPY_B 0x1268 +#define NVC197_SET_BACK_END_COPY_B_SRC_ADDRESS_UPPER 7:0 + +#define NVC197_SET_BACK_END_COPY_C 0x126c +#define NVC197_SET_BACK_END_COPY_C_SRC_ADDRESS_LOWER 31:0 + +#define NVC197_SET_BACK_END_COPY_D 0x1270 +#define NVC197_SET_BACK_END_COPY_D_DEST_ADDRESS_UPPER 7:0 + +#define NVC197_SET_BACK_END_COPY_E 0x1274 +#define NVC197_SET_BACK_END_COPY_E_DEST_ADDRESS_LOWER 31:0 + +#define NVC197_SET_CIRCULAR_BUFFER_SIZE 0x1280 +#define NVC197_SET_CIRCULAR_BUFFER_SIZE_CACHE_LINES_PER_SM 19:0 + +#define NVC197_SET_VTG_REGISTER_WATERMARKS 0x1284 +#define NVC197_SET_VTG_REGISTER_WATERMARKS_LOW 15:0 +#define NVC197_SET_VTG_REGISTER_WATERMARKS_HIGH 31:16 + +#define NVC197_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI 0x1288 +#define NVC197_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES 0:0 +#define NVC197_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC197_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC197_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_TAG 25:4 + +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS 0x1290 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC197_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE 0x12a4 +#define NVC197_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE_V 31:0 + +#define NVC197_CLEAR_ZCULL_REGION 0x12c8 +#define NVC197_CLEAR_ZCULL_REGION_Z_ENABLE 0:0 +#define NVC197_CLEAR_ZCULL_REGION_Z_ENABLE_FALSE 0x00000000 +#define NVC197_CLEAR_ZCULL_REGION_Z_ENABLE_TRUE 0x00000001 +#define NVC197_CLEAR_ZCULL_REGION_STENCIL_ENABLE 4:4 +#define NVC197_CLEAR_ZCULL_REGION_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC197_CLEAR_ZCULL_REGION_STENCIL_ENABLE_TRUE 0x00000001 +#define NVC197_CLEAR_ZCULL_REGION_USE_CLEAR_RECT 1:1 +#define NVC197_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_FALSE 0x00000000 +#define NVC197_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_TRUE 0x00000001 +#define NVC197_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX 2:2 +#define NVC197_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_FALSE 0x00000000 +#define NVC197_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_TRUE 0x00000001 +#define NVC197_CLEAR_ZCULL_REGION_RT_ARRAY_INDEX 20:5 +#define NVC197_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE 3:3 +#define NVC197_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_FALSE 0x00000000 +#define NVC197_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_TRUE 0x00000001 + +#define NVC197_SET_DEPTH_TEST 0x12cc +#define NVC197_SET_DEPTH_TEST_ENABLE 0:0 +#define NVC197_SET_DEPTH_TEST_ENABLE_FALSE 0x00000000 +#define NVC197_SET_DEPTH_TEST_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_FILL_MODE 0x12d0 +#define NVC197_SET_FILL_MODE_V 31:0 +#define NVC197_SET_FILL_MODE_V_POINT 0x00000001 +#define NVC197_SET_FILL_MODE_V_WIREFRAME 0x00000002 +#define NVC197_SET_FILL_MODE_V_SOLID 0x00000003 + +#define NVC197_SET_SHADE_MODE 0x12d4 +#define NVC197_SET_SHADE_MODE_V 31:0 +#define NVC197_SET_SHADE_MODE_V_FLAT 0x00000001 +#define NVC197_SET_SHADE_MODE_V_GOURAUD 0x00000002 +#define NVC197_SET_SHADE_MODE_V_OGL_FLAT 0x00001D00 +#define NVC197_SET_SHADE_MODE_V_OGL_SMOOTH 0x00001D01 + +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS 0x12d8 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS 0x12dc +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC197_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC197_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL 0x12e0 +#define NVC197_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT 3:0 +#define NVC197_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1 0x00000000 +#define NVC197_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_2X2 0x00000001 +#define NVC197_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1_VIRTUAL_SAMPLES 0x00000002 + +#define NVC197_SET_BLEND_STATE_PER_TARGET 0x12e4 +#define NVC197_SET_BLEND_STATE_PER_TARGET_ENABLE 0:0 +#define NVC197_SET_BLEND_STATE_PER_TARGET_ENABLE_FALSE 0x00000000 +#define NVC197_SET_BLEND_STATE_PER_TARGET_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_DEPTH_WRITE 0x12e8 +#define NVC197_SET_DEPTH_WRITE_ENABLE 0:0 +#define NVC197_SET_DEPTH_WRITE_ENABLE_FALSE 0x00000000 +#define NVC197_SET_DEPTH_WRITE_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_ALPHA_TEST 0x12ec +#define NVC197_SET_ALPHA_TEST_ENABLE 0:0 +#define NVC197_SET_ALPHA_TEST_ENABLE_FALSE 0x00000000 +#define NVC197_SET_ALPHA_TEST_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_INLINE_INDEX4X8_ALIGN 0x1300 +#define NVC197_SET_INLINE_INDEX4X8_ALIGN_COUNT 29:0 +#define NVC197_SET_INLINE_INDEX4X8_ALIGN_START 31:30 + +#define NVC197_DRAW_INLINE_INDEX4X8 0x1304 +#define NVC197_DRAW_INLINE_INDEX4X8_INDEX0 7:0 +#define NVC197_DRAW_INLINE_INDEX4X8_INDEX1 15:8 +#define NVC197_DRAW_INLINE_INDEX4X8_INDEX2 23:16 +#define NVC197_DRAW_INLINE_INDEX4X8_INDEX3 31:24 + +#define NVC197_D3D_SET_CULL_MODE 0x1308 +#define NVC197_D3D_SET_CULL_MODE_V 31:0 +#define NVC197_D3D_SET_CULL_MODE_V_NONE 0x00000001 +#define NVC197_D3D_SET_CULL_MODE_V_CW 0x00000002 +#define NVC197_D3D_SET_CULL_MODE_V_CCW 0x00000003 + +#define NVC197_SET_DEPTH_FUNC 0x130c +#define NVC197_SET_DEPTH_FUNC_V 31:0 +#define NVC197_SET_DEPTH_FUNC_V_OGL_NEVER 0x00000200 +#define NVC197_SET_DEPTH_FUNC_V_OGL_LESS 0x00000201 +#define NVC197_SET_DEPTH_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC197_SET_DEPTH_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC197_SET_DEPTH_FUNC_V_OGL_GREATER 0x00000204 +#define NVC197_SET_DEPTH_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC197_SET_DEPTH_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC197_SET_DEPTH_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC197_SET_DEPTH_FUNC_V_D3D_NEVER 0x00000001 +#define NVC197_SET_DEPTH_FUNC_V_D3D_LESS 0x00000002 +#define NVC197_SET_DEPTH_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC197_SET_DEPTH_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC197_SET_DEPTH_FUNC_V_D3D_GREATER 0x00000005 +#define NVC197_SET_DEPTH_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC197_SET_DEPTH_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC197_SET_DEPTH_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC197_SET_ALPHA_REF 0x1310 +#define NVC197_SET_ALPHA_REF_V 31:0 + +#define NVC197_SET_ALPHA_FUNC 0x1314 +#define NVC197_SET_ALPHA_FUNC_V 31:0 +#define NVC197_SET_ALPHA_FUNC_V_OGL_NEVER 0x00000200 +#define NVC197_SET_ALPHA_FUNC_V_OGL_LESS 0x00000201 +#define NVC197_SET_ALPHA_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC197_SET_ALPHA_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC197_SET_ALPHA_FUNC_V_OGL_GREATER 0x00000204 +#define NVC197_SET_ALPHA_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC197_SET_ALPHA_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC197_SET_ALPHA_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC197_SET_ALPHA_FUNC_V_D3D_NEVER 0x00000001 +#define NVC197_SET_ALPHA_FUNC_V_D3D_LESS 0x00000002 +#define NVC197_SET_ALPHA_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC197_SET_ALPHA_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC197_SET_ALPHA_FUNC_V_D3D_GREATER 0x00000005 +#define NVC197_SET_ALPHA_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC197_SET_ALPHA_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC197_SET_ALPHA_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC197_SET_DRAW_AUTO_STRIDE 0x1318 +#define NVC197_SET_DRAW_AUTO_STRIDE_V 11:0 + +#define NVC197_SET_BLEND_CONST_RED 0x131c +#define NVC197_SET_BLEND_CONST_RED_V 31:0 + +#define NVC197_SET_BLEND_CONST_GREEN 0x1320 +#define NVC197_SET_BLEND_CONST_GREEN_V 31:0 + +#define NVC197_SET_BLEND_CONST_BLUE 0x1324 +#define NVC197_SET_BLEND_CONST_BLUE_V 31:0 + +#define NVC197_SET_BLEND_CONST_ALPHA 0x1328 +#define NVC197_SET_BLEND_CONST_ALPHA_V 31:0 + +#define NVC197_INVALIDATE_SAMPLER_CACHE 0x1330 +#define NVC197_INVALIDATE_SAMPLER_CACHE_LINES 0:0 +#define NVC197_INVALIDATE_SAMPLER_CACHE_LINES_ALL 0x00000000 +#define NVC197_INVALIDATE_SAMPLER_CACHE_LINES_ONE 0x00000001 +#define NVC197_INVALIDATE_SAMPLER_CACHE_TAG 25:4 + +#define NVC197_INVALIDATE_TEXTURE_HEADER_CACHE 0x1334 +#define NVC197_INVALIDATE_TEXTURE_HEADER_CACHE_LINES 0:0 +#define NVC197_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ALL 0x00000000 +#define NVC197_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ONE 0x00000001 +#define NVC197_INVALIDATE_TEXTURE_HEADER_CACHE_TAG 25:4 + +#define NVC197_INVALIDATE_TEXTURE_DATA_CACHE 0x1338 +#define NVC197_INVALIDATE_TEXTURE_DATA_CACHE_LINES 0:0 +#define NVC197_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ALL 0x00000000 +#define NVC197_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ONE 0x00000001 +#define NVC197_INVALIDATE_TEXTURE_DATA_CACHE_TAG 25:4 + +#define NVC197_SET_BLEND_SEPARATE_FOR_ALPHA 0x133c +#define NVC197_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NVC197_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NVC197_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_BLEND_COLOR_OP 0x1340 +#define NVC197_SET_BLEND_COLOR_OP_V 31:0 +#define NVC197_SET_BLEND_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC197_SET_BLEND_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC197_SET_BLEND_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC197_SET_BLEND_COLOR_OP_V_OGL_MIN 0x00008007 +#define NVC197_SET_BLEND_COLOR_OP_V_OGL_MAX 0x00008008 +#define NVC197_SET_BLEND_COLOR_OP_V_D3D_ADD 0x00000001 +#define NVC197_SET_BLEND_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC197_SET_BLEND_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC197_SET_BLEND_COLOR_OP_V_D3D_MIN 0x00000004 +#define NVC197_SET_BLEND_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF 0x1344 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V 31:0 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC197_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC197_SET_BLEND_COLOR_DEST_COEFF 0x1348 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V 31:0 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC197_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC197_SET_BLEND_ALPHA_OP 0x134c +#define NVC197_SET_BLEND_ALPHA_OP_V 31:0 +#define NVC197_SET_BLEND_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC197_SET_BLEND_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC197_SET_BLEND_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC197_SET_BLEND_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NVC197_SET_BLEND_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NVC197_SET_BLEND_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NVC197_SET_BLEND_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC197_SET_BLEND_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC197_SET_BLEND_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NVC197_SET_BLEND_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF 0x1350 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V 31:0 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC197_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC197_SET_GLOBAL_COLOR_KEY 0x1354 +#define NVC197_SET_GLOBAL_COLOR_KEY_ENABLE 0:0 +#define NVC197_SET_GLOBAL_COLOR_KEY_ENABLE_FALSE 0x00000000 +#define NVC197_SET_GLOBAL_COLOR_KEY_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF 0x1358 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V 31:0 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC197_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC197_SET_SINGLE_ROP_CONTROL 0x135c +#define NVC197_SET_SINGLE_ROP_CONTROL_ENABLE 0:0 +#define NVC197_SET_SINGLE_ROP_CONTROL_ENABLE_FALSE 0x00000000 +#define NVC197_SET_SINGLE_ROP_CONTROL_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_BLEND(i) (0x1360+(i)*4) +#define NVC197_SET_BLEND_ENABLE 0:0 +#define NVC197_SET_BLEND_ENABLE_FALSE 0x00000000 +#define NVC197_SET_BLEND_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_STENCIL_TEST 0x1380 +#define NVC197_SET_STENCIL_TEST_ENABLE 0:0 +#define NVC197_SET_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NVC197_SET_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_STENCIL_OP_FAIL 0x1384 +#define NVC197_SET_STENCIL_OP_FAIL_V 31:0 +#define NVC197_SET_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NVC197_SET_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NVC197_SET_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NVC197_SET_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC197_SET_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC197_SET_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NVC197_SET_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NVC197_SET_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NVC197_SET_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NVC197_SET_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NVC197_SET_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NVC197_SET_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NVC197_SET_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NVC197_SET_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NVC197_SET_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NVC197_SET_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NVC197_SET_STENCIL_OP_ZFAIL 0x1388 +#define NVC197_SET_STENCIL_OP_ZFAIL_V 31:0 +#define NVC197_SET_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NVC197_SET_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NVC197_SET_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NVC197_SET_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC197_SET_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC197_SET_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NVC197_SET_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NVC197_SET_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NVC197_SET_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NVC197_SET_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NVC197_SET_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NVC197_SET_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NVC197_SET_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NVC197_SET_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NVC197_SET_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NVC197_SET_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NVC197_SET_STENCIL_OP_ZPASS 0x138c +#define NVC197_SET_STENCIL_OP_ZPASS_V 31:0 +#define NVC197_SET_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NVC197_SET_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NVC197_SET_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NVC197_SET_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NVC197_SET_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NVC197_SET_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NVC197_SET_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NVC197_SET_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NVC197_SET_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NVC197_SET_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NVC197_SET_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NVC197_SET_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NVC197_SET_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NVC197_SET_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NVC197_SET_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NVC197_SET_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NVC197_SET_STENCIL_FUNC 0x1390 +#define NVC197_SET_STENCIL_FUNC_V 31:0 +#define NVC197_SET_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NVC197_SET_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NVC197_SET_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC197_SET_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC197_SET_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NVC197_SET_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC197_SET_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC197_SET_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC197_SET_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NVC197_SET_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NVC197_SET_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC197_SET_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC197_SET_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NVC197_SET_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC197_SET_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC197_SET_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC197_SET_STENCIL_FUNC_REF 0x1394 +#define NVC197_SET_STENCIL_FUNC_REF_V 7:0 + +#define NVC197_SET_STENCIL_FUNC_MASK 0x1398 +#define NVC197_SET_STENCIL_FUNC_MASK_V 7:0 + +#define NVC197_SET_STENCIL_MASK 0x139c +#define NVC197_SET_STENCIL_MASK_V 7:0 + +#define NVC197_SET_DRAW_AUTO_START 0x13a4 +#define NVC197_SET_DRAW_AUTO_START_BYTE_COUNT 31:0 + +#define NVC197_SET_PS_SATURATE 0x13a8 +#define NVC197_SET_PS_SATURATE_OUTPUT0 0:0 +#define NVC197_SET_PS_SATURATE_OUTPUT0_FALSE 0x00000000 +#define NVC197_SET_PS_SATURATE_OUTPUT0_TRUE 0x00000001 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE0 1:1 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE0_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE0_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC197_SET_PS_SATURATE_OUTPUT1 4:4 +#define NVC197_SET_PS_SATURATE_OUTPUT1_FALSE 0x00000000 +#define NVC197_SET_PS_SATURATE_OUTPUT1_TRUE 0x00000001 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE1 5:5 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE1_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE1_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC197_SET_PS_SATURATE_OUTPUT2 8:8 +#define NVC197_SET_PS_SATURATE_OUTPUT2_FALSE 0x00000000 +#define NVC197_SET_PS_SATURATE_OUTPUT2_TRUE 0x00000001 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE2 9:9 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE2_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE2_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC197_SET_PS_SATURATE_OUTPUT3 12:12 +#define NVC197_SET_PS_SATURATE_OUTPUT3_FALSE 0x00000000 +#define NVC197_SET_PS_SATURATE_OUTPUT3_TRUE 0x00000001 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE3 13:13 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE3_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE3_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC197_SET_PS_SATURATE_OUTPUT4 16:16 +#define NVC197_SET_PS_SATURATE_OUTPUT4_FALSE 0x00000000 +#define NVC197_SET_PS_SATURATE_OUTPUT4_TRUE 0x00000001 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE4 17:17 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE4_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE4_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC197_SET_PS_SATURATE_OUTPUT5 20:20 +#define NVC197_SET_PS_SATURATE_OUTPUT5_FALSE 0x00000000 +#define NVC197_SET_PS_SATURATE_OUTPUT5_TRUE 0x00000001 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE5 21:21 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE5_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE5_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC197_SET_PS_SATURATE_OUTPUT6 24:24 +#define NVC197_SET_PS_SATURATE_OUTPUT6_FALSE 0x00000000 +#define NVC197_SET_PS_SATURATE_OUTPUT6_TRUE 0x00000001 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE6 25:25 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE6_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE6_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC197_SET_PS_SATURATE_OUTPUT7 28:28 +#define NVC197_SET_PS_SATURATE_OUTPUT7_FALSE 0x00000000 +#define NVC197_SET_PS_SATURATE_OUTPUT7_TRUE 0x00000001 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE7 29:29 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE7_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC197_SET_PS_SATURATE_CLAMP_RANGE7_MINUS_ONE_TO_PLUS_ONE 0x00000001 + +#define NVC197_SET_WINDOW_ORIGIN 0x13ac +#define NVC197_SET_WINDOW_ORIGIN_MODE 0:0 +#define NVC197_SET_WINDOW_ORIGIN_MODE_UPPER_LEFT 0x00000000 +#define NVC197_SET_WINDOW_ORIGIN_MODE_LOWER_LEFT 0x00000001 +#define NVC197_SET_WINDOW_ORIGIN_FLIP_Y 4:4 +#define NVC197_SET_WINDOW_ORIGIN_FLIP_Y_FALSE 0x00000000 +#define NVC197_SET_WINDOW_ORIGIN_FLIP_Y_TRUE 0x00000001 + +#define NVC197_SET_LINE_WIDTH_FLOAT 0x13b0 +#define NVC197_SET_LINE_WIDTH_FLOAT_V 31:0 + +#define NVC197_SET_ALIASED_LINE_WIDTH_FLOAT 0x13b4 +#define NVC197_SET_ALIASED_LINE_WIDTH_FLOAT_V 31:0 + +#define NVC197_SET_LINE_MULTISAMPLE_OVERRIDE 0x1418 +#define NVC197_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE 0:0 +#define NVC197_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_FALSE 0x00000000 +#define NVC197_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_ALPHA_HYSTERESIS 0x1420 +#define NVC197_SET_ALPHA_HYSTERESIS_ROUNDS_OF_ALPHA 7:0 + +#define NVC197_INVALIDATE_SAMPLER_CACHE_NO_WFI 0x1424 +#define NVC197_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES 0:0 +#define NVC197_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC197_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC197_INVALIDATE_SAMPLER_CACHE_NO_WFI_TAG 25:4 + +#define NVC197_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI 0x1428 +#define NVC197_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES 0:0 +#define NVC197_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC197_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC197_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_TAG 25:4 + +#define NVC197_SET_GLOBAL_BASE_VERTEX_INDEX 0x1434 +#define NVC197_SET_GLOBAL_BASE_VERTEX_INDEX_V 31:0 + +#define NVC197_SET_GLOBAL_BASE_INSTANCE_INDEX 0x1438 +#define NVC197_SET_GLOBAL_BASE_INSTANCE_INDEX_V 31:0 + +#define NVC197_SET_PS_WARP_WATERMARKS 0x1450 +#define NVC197_SET_PS_WARP_WATERMARKS_LOW 15:0 +#define NVC197_SET_PS_WARP_WATERMARKS_HIGH 31:16 + +#define NVC197_SET_PS_REGISTER_WATERMARKS 0x1454 +#define NVC197_SET_PS_REGISTER_WATERMARKS_LOW 15:0 +#define NVC197_SET_PS_REGISTER_WATERMARKS_HIGH 31:16 + +#define NVC197_STORE_ZCULL 0x1464 +#define NVC197_STORE_ZCULL_V 0:0 + +#define NVC197_SET_ITERATED_BLEND_CONSTANT_RED(j) (0x1480+(j)*16) +#define NVC197_SET_ITERATED_BLEND_CONSTANT_RED_V 15:0 + +#define NVC197_SET_ITERATED_BLEND_CONSTANT_GREEN(j) (0x1484+(j)*16) +#define NVC197_SET_ITERATED_BLEND_CONSTANT_GREEN_V 15:0 + +#define NVC197_SET_ITERATED_BLEND_CONSTANT_BLUE(j) (0x1488+(j)*16) +#define NVC197_SET_ITERATED_BLEND_CONSTANT_BLUE_V 15:0 + +#define NVC197_LOAD_ZCULL 0x1500 +#define NVC197_LOAD_ZCULL_V 0:0 + +#define NVC197_SET_SURFACE_CLIP_ID_HEIGHT 0x1504 +#define NVC197_SET_SURFACE_CLIP_ID_HEIGHT_V 31:0 + +#define NVC197_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL 0x1508 +#define NVC197_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NVC197_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NVC197_SET_CLIP_ID_CLEAR_RECT_VERTICAL 0x150c +#define NVC197_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NVC197_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NVC197_SET_USER_CLIP_ENABLE 0x1510 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE0 0:0 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE0_FALSE 0x00000000 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE0_TRUE 0x00000001 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE1 1:1 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE1_FALSE 0x00000000 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE1_TRUE 0x00000001 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE2 2:2 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE2_FALSE 0x00000000 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE2_TRUE 0x00000001 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE3 3:3 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE3_FALSE 0x00000000 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE3_TRUE 0x00000001 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE4 4:4 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE4_FALSE 0x00000000 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE4_TRUE 0x00000001 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE5 5:5 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE5_FALSE 0x00000000 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE5_TRUE 0x00000001 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE6 6:6 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE6_FALSE 0x00000000 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE6_TRUE 0x00000001 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE7 7:7 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE7_FALSE 0x00000000 +#define NVC197_SET_USER_CLIP_ENABLE_PLANE7_TRUE 0x00000001 + +#define NVC197_SET_ZPASS_PIXEL_COUNT 0x1514 +#define NVC197_SET_ZPASS_PIXEL_COUNT_ENABLE 0:0 +#define NVC197_SET_ZPASS_PIXEL_COUNT_ENABLE_FALSE 0x00000000 +#define NVC197_SET_ZPASS_PIXEL_COUNT_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_POINT_SIZE 0x1518 +#define NVC197_SET_POINT_SIZE_V 31:0 + +#define NVC197_SET_ZCULL_STATS 0x151c +#define NVC197_SET_ZCULL_STATS_ENABLE 0:0 +#define NVC197_SET_ZCULL_STATS_ENABLE_FALSE 0x00000000 +#define NVC197_SET_ZCULL_STATS_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_POINT_SPRITE 0x1520 +#define NVC197_SET_POINT_SPRITE_ENABLE 0:0 +#define NVC197_SET_POINT_SPRITE_ENABLE_FALSE 0x00000000 +#define NVC197_SET_POINT_SPRITE_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_SHADER_EXCEPTIONS 0x1528 +#define NVC197_SET_SHADER_EXCEPTIONS_ENABLE 0:0 +#define NVC197_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0x00000000 +#define NVC197_SET_SHADER_EXCEPTIONS_ENABLE_TRUE 0x00000001 + +#define NVC197_CLEAR_REPORT_VALUE 0x1530 +#define NVC197_CLEAR_REPORT_VALUE_TYPE 4:0 +#define NVC197_CLEAR_REPORT_VALUE_TYPE_DA_VERTICES_GENERATED 0x00000012 +#define NVC197_CLEAR_REPORT_VALUE_TYPE_DA_PRIMITIVES_GENERATED 0x00000013 +#define NVC197_CLEAR_REPORT_VALUE_TYPE_VS_INVOCATIONS 0x00000015 +#define NVC197_CLEAR_REPORT_VALUE_TYPE_TI_INVOCATIONS 0x00000016 +#define NVC197_CLEAR_REPORT_VALUE_TYPE_TS_INVOCATIONS 0x00000017 +#define NVC197_CLEAR_REPORT_VALUE_TYPE_TS_PRIMITIVES_GENERATED 0x00000018 +#define NVC197_CLEAR_REPORT_VALUE_TYPE_GS_INVOCATIONS 0x0000001A +#define NVC197_CLEAR_REPORT_VALUE_TYPE_GS_PRIMITIVES_GENERATED 0x0000001B +#define NVC197_CLEAR_REPORT_VALUE_TYPE_VTG_PRIMITIVES_OUT 0x0000001F +#define NVC197_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_SUCCEEDED 0x00000010 +#define NVC197_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_NEEDED 0x00000011 +#define NVC197_CLEAR_REPORT_VALUE_TYPE_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000003 +#define NVC197_CLEAR_REPORT_VALUE_TYPE_CLIPPER_INVOCATIONS 0x0000001C +#define NVC197_CLEAR_REPORT_VALUE_TYPE_CLIPPER_PRIMITIVES_GENERATED 0x0000001D +#define NVC197_CLEAR_REPORT_VALUE_TYPE_ZCULL_STATS 0x00000002 +#define NVC197_CLEAR_REPORT_VALUE_TYPE_PS_INVOCATIONS 0x0000001E +#define NVC197_CLEAR_REPORT_VALUE_TYPE_ZPASS_PIXEL_CNT 0x00000001 +#define NVC197_CLEAR_REPORT_VALUE_TYPE_ALPHA_BETA_CLOCKS 0x00000004 +#define NVC197_CLEAR_REPORT_VALUE_TYPE_SCG_CLOCKS 0x00000009 + +#define NVC197_SET_ANTI_ALIAS_ENABLE 0x1534 +#define NVC197_SET_ANTI_ALIAS_ENABLE_V 0:0 +#define NVC197_SET_ANTI_ALIAS_ENABLE_V_FALSE 0x00000000 +#define NVC197_SET_ANTI_ALIAS_ENABLE_V_TRUE 0x00000001 + +#define NVC197_SET_ZT_SELECT 0x1538 +#define NVC197_SET_ZT_SELECT_TARGET_COUNT 0:0 + +#define NVC197_SET_ANTI_ALIAS_ALPHA_CONTROL 0x153c +#define NVC197_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE 0:0 +#define NVC197_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_DISABLE 0x00000000 +#define NVC197_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_ENABLE 0x00000001 +#define NVC197_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE 4:4 +#define NVC197_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_DISABLE 0x00000000 +#define NVC197_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_ENABLE 0x00000001 + +#define NVC197_SET_RENDER_ENABLE_A 0x1550 +#define NVC197_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC197_SET_RENDER_ENABLE_B 0x1554 +#define NVC197_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC197_SET_RENDER_ENABLE_C 0x1558 +#define NVC197_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC197_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC197_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC197_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC197_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC197_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC197_SET_TEX_SAMPLER_POOL_A 0x155c +#define NVC197_SET_TEX_SAMPLER_POOL_A_OFFSET_UPPER 7:0 + +#define NVC197_SET_TEX_SAMPLER_POOL_B 0x1560 +#define NVC197_SET_TEX_SAMPLER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC197_SET_TEX_SAMPLER_POOL_C 0x1564 +#define NVC197_SET_TEX_SAMPLER_POOL_C_MAXIMUM_INDEX 19:0 + +#define NVC197_SET_SLOPE_SCALE_DEPTH_BIAS 0x156c +#define NVC197_SET_SLOPE_SCALE_DEPTH_BIAS_V 31:0 + +#define NVC197_SET_ANTI_ALIASED_LINE 0x1570 +#define NVC197_SET_ANTI_ALIASED_LINE_ENABLE 0:0 +#define NVC197_SET_ANTI_ALIASED_LINE_ENABLE_FALSE 0x00000000 +#define NVC197_SET_ANTI_ALIASED_LINE_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_TEX_HEADER_POOL_A 0x1574 +#define NVC197_SET_TEX_HEADER_POOL_A_OFFSET_UPPER 7:0 + +#define NVC197_SET_TEX_HEADER_POOL_B 0x1578 +#define NVC197_SET_TEX_HEADER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC197_SET_TEX_HEADER_POOL_C 0x157c +#define NVC197_SET_TEX_HEADER_POOL_C_MAXIMUM_INDEX 21:0 + +#define NVC197_SET_ACTIVE_ZCULL_REGION 0x1590 +#define NVC197_SET_ACTIVE_ZCULL_REGION_ID 5:0 + +#define NVC197_SET_TWO_SIDED_STENCIL_TEST 0x1594 +#define NVC197_SET_TWO_SIDED_STENCIL_TEST_ENABLE 0:0 +#define NVC197_SET_TWO_SIDED_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NVC197_SET_TWO_SIDED_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_BACK_STENCIL_OP_FAIL 0x1598 +#define NVC197_SET_BACK_STENCIL_OP_FAIL_V 31:0 +#define NVC197_SET_BACK_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NVC197_SET_BACK_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NVC197_SET_BACK_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NVC197_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC197_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC197_SET_BACK_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NVC197_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NVC197_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NVC197_SET_BACK_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NVC197_SET_BACK_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NVC197_SET_BACK_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NVC197_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NVC197_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NVC197_SET_BACK_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NVC197_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NVC197_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NVC197_SET_BACK_STENCIL_OP_ZFAIL 0x159c +#define NVC197_SET_BACK_STENCIL_OP_ZFAIL_V 31:0 +#define NVC197_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NVC197_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NVC197_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NVC197_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC197_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC197_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NVC197_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NVC197_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NVC197_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NVC197_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NVC197_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NVC197_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NVC197_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NVC197_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NVC197_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NVC197_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NVC197_SET_BACK_STENCIL_OP_ZPASS 0x15a0 +#define NVC197_SET_BACK_STENCIL_OP_ZPASS_V 31:0 +#define NVC197_SET_BACK_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NVC197_SET_BACK_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NVC197_SET_BACK_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NVC197_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NVC197_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NVC197_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NVC197_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NVC197_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NVC197_SET_BACK_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NVC197_SET_BACK_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NVC197_SET_BACK_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NVC197_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NVC197_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NVC197_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NVC197_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NVC197_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NVC197_SET_BACK_STENCIL_FUNC 0x15a4 +#define NVC197_SET_BACK_STENCIL_FUNC_V 31:0 +#define NVC197_SET_BACK_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NVC197_SET_BACK_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NVC197_SET_BACK_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC197_SET_BACK_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC197_SET_BACK_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NVC197_SET_BACK_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC197_SET_BACK_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC197_SET_BACK_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC197_SET_BACK_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NVC197_SET_BACK_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NVC197_SET_BACK_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC197_SET_BACK_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC197_SET_BACK_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NVC197_SET_BACK_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC197_SET_BACK_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC197_SET_BACK_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC197_SET_SRGB_WRITE 0x15b8 +#define NVC197_SET_SRGB_WRITE_ENABLE 0:0 +#define NVC197_SET_SRGB_WRITE_ENABLE_FALSE 0x00000000 +#define NVC197_SET_SRGB_WRITE_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_DEPTH_BIAS 0x15bc +#define NVC197_SET_DEPTH_BIAS_V 31:0 + +#define NVC197_SET_ZCULL_REGION_FORMAT 0x15c8 +#define NVC197_SET_ZCULL_REGION_FORMAT_TYPE 3:0 +#define NVC197_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X4 0x00000000 +#define NVC197_SET_ZCULL_REGION_FORMAT_TYPE_ZS_4X4 0x00000001 +#define NVC197_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X2 0x00000002 +#define NVC197_SET_ZCULL_REGION_FORMAT_TYPE_Z_2X4 0x00000003 +#define NVC197_SET_ZCULL_REGION_FORMAT_TYPE_Z_16X8_4X4 0x00000004 +#define NVC197_SET_ZCULL_REGION_FORMAT_TYPE_Z_8X8_4X2 0x00000005 +#define NVC197_SET_ZCULL_REGION_FORMAT_TYPE_Z_8X8_2X4 0x00000006 +#define NVC197_SET_ZCULL_REGION_FORMAT_TYPE_Z_16X16_4X8 0x00000007 +#define NVC197_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X8_2X2 0x00000008 +#define NVC197_SET_ZCULL_REGION_FORMAT_TYPE_ZS_16X8_4X2 0x00000009 +#define NVC197_SET_ZCULL_REGION_FORMAT_TYPE_ZS_16X8_2X4 0x0000000A +#define NVC197_SET_ZCULL_REGION_FORMAT_TYPE_ZS_8X8_2X2 0x0000000B +#define NVC197_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X8_1X1 0x0000000C + +#define NVC197_SET_RT_LAYER 0x15cc +#define NVC197_SET_RT_LAYER_V 15:0 +#define NVC197_SET_RT_LAYER_CONTROL 16:16 +#define NVC197_SET_RT_LAYER_CONTROL_V_SELECTS_LAYER 0x00000000 +#define NVC197_SET_RT_LAYER_CONTROL_GEOMETRY_SHADER_SELECTS_LAYER 0x00000001 + +#define NVC197_SET_ANTI_ALIAS 0x15d0 +#define NVC197_SET_ANTI_ALIAS_SAMPLES 3:0 +#define NVC197_SET_ANTI_ALIAS_SAMPLES_MODE_1X1 0x00000000 +#define NVC197_SET_ANTI_ALIAS_SAMPLES_MODE_2X1 0x00000001 +#define NVC197_SET_ANTI_ALIAS_SAMPLES_MODE_2X2 0x00000002 +#define NVC197_SET_ANTI_ALIAS_SAMPLES_MODE_4X2 0x00000003 +#define NVC197_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_D3D 0x00000004 +#define NVC197_SET_ANTI_ALIAS_SAMPLES_MODE_2X1_D3D 0x00000005 +#define NVC197_SET_ANTI_ALIAS_SAMPLES_MODE_4X4 0x00000006 +#define NVC197_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_4 0x00000008 +#define NVC197_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_12 0x00000009 +#define NVC197_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_8 0x0000000A +#define NVC197_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_24 0x0000000B + +#define NVC197_SET_EDGE_FLAG 0x15e4 +#define NVC197_SET_EDGE_FLAG_V 0:0 +#define NVC197_SET_EDGE_FLAG_V_FALSE 0x00000000 +#define NVC197_SET_EDGE_FLAG_V_TRUE 0x00000001 + +#define NVC197_DRAW_INLINE_INDEX 0x15e8 +#define NVC197_DRAW_INLINE_INDEX_V 31:0 + +#define NVC197_SET_INLINE_INDEX2X16_ALIGN 0x15ec +#define NVC197_SET_INLINE_INDEX2X16_ALIGN_COUNT 30:0 +#define NVC197_SET_INLINE_INDEX2X16_ALIGN_START_ODD 31:31 +#define NVC197_SET_INLINE_INDEX2X16_ALIGN_START_ODD_FALSE 0x00000000 +#define NVC197_SET_INLINE_INDEX2X16_ALIGN_START_ODD_TRUE 0x00000001 + +#define NVC197_DRAW_INLINE_INDEX2X16 0x15f0 +#define NVC197_DRAW_INLINE_INDEX2X16_EVEN 15:0 +#define NVC197_DRAW_INLINE_INDEX2X16_ODD 31:16 + +#define NVC197_SET_VERTEX_GLOBAL_BASE_OFFSET_A 0x15f4 +#define NVC197_SET_VERTEX_GLOBAL_BASE_OFFSET_A_UPPER 7:0 + +#define NVC197_SET_VERTEX_GLOBAL_BASE_OFFSET_B 0x15f8 +#define NVC197_SET_VERTEX_GLOBAL_BASE_OFFSET_B_LOWER 31:0 + +#define NVC197_SET_ZCULL_REGION_PIXEL_OFFSET_A 0x15fc +#define NVC197_SET_ZCULL_REGION_PIXEL_OFFSET_A_WIDTH 15:0 + +#define NVC197_SET_ZCULL_REGION_PIXEL_OFFSET_B 0x1600 +#define NVC197_SET_ZCULL_REGION_PIXEL_OFFSET_B_HEIGHT 15:0 + +#define NVC197_SET_POINT_SPRITE_SELECT 0x1604 +#define NVC197_SET_POINT_SPRITE_SELECT_RMODE 1:0 +#define NVC197_SET_POINT_SPRITE_SELECT_RMODE_ZERO 0x00000000 +#define NVC197_SET_POINT_SPRITE_SELECT_RMODE_FROM_R 0x00000001 +#define NVC197_SET_POINT_SPRITE_SELECT_RMODE_FROM_S 0x00000002 +#define NVC197_SET_POINT_SPRITE_SELECT_ORIGIN 2:2 +#define NVC197_SET_POINT_SPRITE_SELECT_ORIGIN_BOTTOM 0x00000000 +#define NVC197_SET_POINT_SPRITE_SELECT_ORIGIN_TOP 0x00000001 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE0 3:3 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE0_PASSTHROUGH 0x00000000 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE0_GENERATE 0x00000001 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE1 4:4 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE1_PASSTHROUGH 0x00000000 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE1_GENERATE 0x00000001 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE2 5:5 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE2_PASSTHROUGH 0x00000000 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE2_GENERATE 0x00000001 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE3 6:6 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE3_PASSTHROUGH 0x00000000 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE3_GENERATE 0x00000001 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE4 7:7 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE4_PASSTHROUGH 0x00000000 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE4_GENERATE 0x00000001 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE5 8:8 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE5_PASSTHROUGH 0x00000000 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE5_GENERATE 0x00000001 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE6 9:9 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE6_PASSTHROUGH 0x00000000 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE6_GENERATE 0x00000001 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE7 10:10 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE7_PASSTHROUGH 0x00000000 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE7_GENERATE 0x00000001 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE8 11:11 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE8_PASSTHROUGH 0x00000000 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE8_GENERATE 0x00000001 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE9 12:12 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE9_PASSTHROUGH 0x00000000 +#define NVC197_SET_POINT_SPRITE_SELECT_TEXTURE9_GENERATE 0x00000001 + +#define NVC197_SET_PROGRAM_REGION_A 0x1608 +#define NVC197_SET_PROGRAM_REGION_A_ADDRESS_UPPER 7:0 + +#define NVC197_SET_PROGRAM_REGION_B 0x160c +#define NVC197_SET_PROGRAM_REGION_B_ADDRESS_LOWER 31:0 + +#define NVC197_SET_ATTRIBUTE_DEFAULT 0x1610 +#define NVC197_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE 0:0 +#define NVC197_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_0001 0x00000000 +#define NVC197_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_1111 0x00000001 +#define NVC197_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR 1:1 +#define NVC197_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0000 0x00000000 +#define NVC197_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0001 0x00000001 +#define NVC197_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR 2:2 +#define NVC197_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0000 0x00000000 +#define NVC197_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0001 0x00000001 +#define NVC197_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE 3:3 +#define NVC197_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0000 0x00000000 +#define NVC197_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0001 0x00000001 +#define NVC197_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0 4:4 +#define NVC197_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_0001 0x00000000 +#define NVC197_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_1111 0x00000001 +#define NVC197_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15 5:5 +#define NVC197_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0000 0x00000000 +#define NVC197_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0001 0x00000001 + +#define NVC197_END 0x1614 +#define NVC197_END_V 0:0 + +#define NVC197_BEGIN 0x1618 +#define NVC197_BEGIN_OP 15:0 +#define NVC197_BEGIN_OP_POINTS 0x00000000 +#define NVC197_BEGIN_OP_LINES 0x00000001 +#define NVC197_BEGIN_OP_LINE_LOOP 0x00000002 +#define NVC197_BEGIN_OP_LINE_STRIP 0x00000003 +#define NVC197_BEGIN_OP_TRIANGLES 0x00000004 +#define NVC197_BEGIN_OP_TRIANGLE_STRIP 0x00000005 +#define NVC197_BEGIN_OP_TRIANGLE_FAN 0x00000006 +#define NVC197_BEGIN_OP_QUADS 0x00000007 +#define NVC197_BEGIN_OP_QUAD_STRIP 0x00000008 +#define NVC197_BEGIN_OP_POLYGON 0x00000009 +#define NVC197_BEGIN_OP_LINELIST_ADJCY 0x0000000A +#define NVC197_BEGIN_OP_LINESTRIP_ADJCY 0x0000000B +#define NVC197_BEGIN_OP_TRIANGLELIST_ADJCY 0x0000000C +#define NVC197_BEGIN_OP_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC197_BEGIN_OP_PATCH 0x0000000E +#define NVC197_BEGIN_PRIMITIVE_ID 24:24 +#define NVC197_BEGIN_PRIMITIVE_ID_FIRST 0x00000000 +#define NVC197_BEGIN_PRIMITIVE_ID_UNCHANGED 0x00000001 +#define NVC197_BEGIN_INSTANCE_ID 27:26 +#define NVC197_BEGIN_INSTANCE_ID_FIRST 0x00000000 +#define NVC197_BEGIN_INSTANCE_ID_SUBSEQUENT 0x00000001 +#define NVC197_BEGIN_INSTANCE_ID_UNCHANGED 0x00000002 +#define NVC197_BEGIN_SPLIT_MODE 30:29 +#define NVC197_BEGIN_SPLIT_MODE_NORMAL_BEGIN_NORMAL_END 0x00000000 +#define NVC197_BEGIN_SPLIT_MODE_NORMAL_BEGIN_OPEN_END 0x00000001 +#define NVC197_BEGIN_SPLIT_MODE_OPEN_BEGIN_OPEN_END 0x00000002 +#define NVC197_BEGIN_SPLIT_MODE_OPEN_BEGIN_NORMAL_END 0x00000003 +#define NVC197_BEGIN_INSTANCE_ITERATE_ENABLE 31:31 +#define NVC197_BEGIN_INSTANCE_ITERATE_ENABLE_FALSE 0x00000000 +#define NVC197_BEGIN_INSTANCE_ITERATE_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_VERTEX_ID_COPY 0x161c +#define NVC197_SET_VERTEX_ID_COPY_ENABLE 0:0 +#define NVC197_SET_VERTEX_ID_COPY_ENABLE_FALSE 0x00000000 +#define NVC197_SET_VERTEX_ID_COPY_ENABLE_TRUE 0x00000001 +#define NVC197_SET_VERTEX_ID_COPY_ATTRIBUTE_SLOT 11:4 + +#define NVC197_ADD_TO_PRIMITIVE_ID 0x1620 +#define NVC197_ADD_TO_PRIMITIVE_ID_V 31:0 + +#define NVC197_LOAD_PRIMITIVE_ID 0x1624 +#define NVC197_LOAD_PRIMITIVE_ID_V 31:0 + +#define NVC197_SET_SHADER_BASED_CULL 0x162c +#define NVC197_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE 1:1 +#define NVC197_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_FALSE 0x00000000 +#define NVC197_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_TRUE 0x00000001 +#define NVC197_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE 0:0 +#define NVC197_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_FALSE 0x00000000 +#define NVC197_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_CLASS_VERSION 0x1638 +#define NVC197_SET_CLASS_VERSION_CURRENT 15:0 +#define NVC197_SET_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC197_SET_DA_PRIMITIVE_RESTART 0x1644 +#define NVC197_SET_DA_PRIMITIVE_RESTART_ENABLE 0:0 +#define NVC197_SET_DA_PRIMITIVE_RESTART_ENABLE_FALSE 0x00000000 +#define NVC197_SET_DA_PRIMITIVE_RESTART_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_DA_PRIMITIVE_RESTART_INDEX 0x1648 +#define NVC197_SET_DA_PRIMITIVE_RESTART_INDEX_V 31:0 + +#define NVC197_SET_DA_OUTPUT 0x164c +#define NVC197_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START 12:12 +#define NVC197_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_FALSE 0x00000000 +#define NVC197_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_TRUE 0x00000001 + +#define NVC197_SET_ANTI_ALIASED_POINT 0x1658 +#define NVC197_SET_ANTI_ALIASED_POINT_ENABLE 0:0 +#define NVC197_SET_ANTI_ALIASED_POINT_ENABLE_FALSE 0x00000000 +#define NVC197_SET_ANTI_ALIASED_POINT_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_POINT_CENTER_MODE 0x165c +#define NVC197_SET_POINT_CENTER_MODE_V 31:0 +#define NVC197_SET_POINT_CENTER_MODE_V_OGL 0x00000000 +#define NVC197_SET_POINT_CENTER_MODE_V_D3D 0x00000001 + +#define NVC197_SET_LINE_SMOOTH_PARAMETERS 0x1668 +#define NVC197_SET_LINE_SMOOTH_PARAMETERS_FALLOFF 31:0 +#define NVC197_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_00 0x00000000 +#define NVC197_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_33 0x00000001 +#define NVC197_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_60 0x00000002 + +#define NVC197_SET_LINE_STIPPLE 0x166c +#define NVC197_SET_LINE_STIPPLE_ENABLE 0:0 +#define NVC197_SET_LINE_STIPPLE_ENABLE_FALSE 0x00000000 +#define NVC197_SET_LINE_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_LINE_SMOOTH_EDGE_TABLE(i) (0x1670+(i)*4) +#define NVC197_SET_LINE_SMOOTH_EDGE_TABLE_V0 7:0 +#define NVC197_SET_LINE_SMOOTH_EDGE_TABLE_V1 15:8 +#define NVC197_SET_LINE_SMOOTH_EDGE_TABLE_V2 23:16 +#define NVC197_SET_LINE_SMOOTH_EDGE_TABLE_V3 31:24 + +#define NVC197_SET_LINE_STIPPLE_PARAMETERS 0x1680 +#define NVC197_SET_LINE_STIPPLE_PARAMETERS_FACTOR 7:0 +#define NVC197_SET_LINE_STIPPLE_PARAMETERS_PATTERN 23:8 + +#define NVC197_SET_PROVOKING_VERTEX 0x1684 +#define NVC197_SET_PROVOKING_VERTEX_V 0:0 +#define NVC197_SET_PROVOKING_VERTEX_V_FIRST 0x00000000 +#define NVC197_SET_PROVOKING_VERTEX_V_LAST 0x00000001 + +#define NVC197_SET_TWO_SIDED_LIGHT 0x1688 +#define NVC197_SET_TWO_SIDED_LIGHT_ENABLE 0:0 +#define NVC197_SET_TWO_SIDED_LIGHT_ENABLE_FALSE 0x00000000 +#define NVC197_SET_TWO_SIDED_LIGHT_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_POLYGON_STIPPLE 0x168c +#define NVC197_SET_POLYGON_STIPPLE_ENABLE 0:0 +#define NVC197_SET_POLYGON_STIPPLE_ENABLE_FALSE 0x00000000 +#define NVC197_SET_POLYGON_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_SHADER_CONTROL 0x1690 +#define NVC197_SET_SHADER_CONTROL_DEFAULT_PARTIAL 0:0 +#define NVC197_SET_SHADER_CONTROL_DEFAULT_PARTIAL_ZERO 0x00000000 +#define NVC197_SET_SHADER_CONTROL_DEFAULT_PARTIAL_INFINITY 0x00000001 +#define NVC197_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR 1:1 +#define NVC197_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR_LEGACY 0x00000000 +#define NVC197_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR_FP64_COMPATIBLE 0x00000001 +#define NVC197_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR 2:2 +#define NVC197_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR_PASS_ZERO 0x00000000 +#define NVC197_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR_PASS_INDEFINITE 0x00000001 + +#define NVC197_CHECK_CLASS_VERSION 0x16a0 +#define NVC197_CHECK_CLASS_VERSION_CURRENT 15:0 +#define NVC197_CHECK_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC197_SET_SPH_VERSION 0x16a4 +#define NVC197_SET_SPH_VERSION_CURRENT 15:0 +#define NVC197_SET_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC197_CHECK_SPH_VERSION 0x16a8 +#define NVC197_CHECK_SPH_VERSION_CURRENT 15:0 +#define NVC197_CHECK_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC197_SET_ALPHA_TO_COVERAGE_OVERRIDE 0x16b4 +#define NVC197_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE 0:0 +#define NVC197_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NVC197_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 +#define NVC197_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT 1:1 +#define NVC197_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_DISABLE 0x00000000 +#define NVC197_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_ENABLE 0x00000001 + +#define NVC197_SET_POLYGON_STIPPLE_PATTERN(i) (0x1700+(i)*4) +#define NVC197_SET_POLYGON_STIPPLE_PATTERN_V 31:0 + +#define NVC197_SET_AAM_VERSION 0x1790 +#define NVC197_SET_AAM_VERSION_CURRENT 15:0 +#define NVC197_SET_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC197_CHECK_AAM_VERSION 0x1794 +#define NVC197_CHECK_AAM_VERSION_CURRENT 15:0 +#define NVC197_CHECK_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC197_SET_ZT_LAYER 0x179c +#define NVC197_SET_ZT_LAYER_OFFSET 15:0 + +#define NVC197_SET_INDEX_BUFFER_A 0x17c8 +#define NVC197_SET_INDEX_BUFFER_A_ADDRESS_UPPER 7:0 + +#define NVC197_SET_INDEX_BUFFER_B 0x17cc +#define NVC197_SET_INDEX_BUFFER_B_ADDRESS_LOWER 31:0 + +#define NVC197_SET_INDEX_BUFFER_C 0x17d0 +#define NVC197_SET_INDEX_BUFFER_C_LIMIT_ADDRESS_UPPER 7:0 + +#define NVC197_SET_INDEX_BUFFER_D 0x17d4 +#define NVC197_SET_INDEX_BUFFER_D_LIMIT_ADDRESS_LOWER 31:0 + +#define NVC197_SET_INDEX_BUFFER_E 0x17d8 +#define NVC197_SET_INDEX_BUFFER_E_INDEX_SIZE 1:0 +#define NVC197_SET_INDEX_BUFFER_E_INDEX_SIZE_ONE_BYTE 0x00000000 +#define NVC197_SET_INDEX_BUFFER_E_INDEX_SIZE_TWO_BYTES 0x00000001 +#define NVC197_SET_INDEX_BUFFER_E_INDEX_SIZE_FOUR_BYTES 0x00000002 + +#define NVC197_SET_INDEX_BUFFER_F 0x17dc +#define NVC197_SET_INDEX_BUFFER_F_FIRST 31:0 + +#define NVC197_DRAW_INDEX_BUFFER 0x17e0 +#define NVC197_DRAW_INDEX_BUFFER_COUNT 31:0 + +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST 0x17e4 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST 0x17e8 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST 0x17ec +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f0 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC197_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f4 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC197_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f8 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC197_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC197_SET_DEPTH_BIAS_CLAMP 0x187c +#define NVC197_SET_DEPTH_BIAS_CLAMP_V 31:0 + +#define NVC197_SET_VERTEX_STREAM_INSTANCE_A(i) (0x1880+(i)*4) +#define NVC197_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED 0:0 +#define NVC197_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_FALSE 0x00000000 +#define NVC197_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_TRUE 0x00000001 + +#define NVC197_SET_VERTEX_STREAM_INSTANCE_B(i) (0x18c0+(i)*4) +#define NVC197_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED 0:0 +#define NVC197_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_FALSE 0x00000000 +#define NVC197_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_TRUE 0x00000001 + +#define NVC197_SET_ATTRIBUTE_POINT_SIZE 0x1910 +#define NVC197_SET_ATTRIBUTE_POINT_SIZE_ENABLE 0:0 +#define NVC197_SET_ATTRIBUTE_POINT_SIZE_ENABLE_FALSE 0x00000000 +#define NVC197_SET_ATTRIBUTE_POINT_SIZE_ENABLE_TRUE 0x00000001 +#define NVC197_SET_ATTRIBUTE_POINT_SIZE_SLOT 11:4 + +#define NVC197_OGL_SET_CULL 0x1918 +#define NVC197_OGL_SET_CULL_ENABLE 0:0 +#define NVC197_OGL_SET_CULL_ENABLE_FALSE 0x00000000 +#define NVC197_OGL_SET_CULL_ENABLE_TRUE 0x00000001 + +#define NVC197_OGL_SET_FRONT_FACE 0x191c +#define NVC197_OGL_SET_FRONT_FACE_V 31:0 +#define NVC197_OGL_SET_FRONT_FACE_V_CW 0x00000900 +#define NVC197_OGL_SET_FRONT_FACE_V_CCW 0x00000901 + +#define NVC197_OGL_SET_CULL_FACE 0x1920 +#define NVC197_OGL_SET_CULL_FACE_V 31:0 +#define NVC197_OGL_SET_CULL_FACE_V_FRONT 0x00000404 +#define NVC197_OGL_SET_CULL_FACE_V_BACK 0x00000405 +#define NVC197_OGL_SET_CULL_FACE_V_FRONT_AND_BACK 0x00000408 + +#define NVC197_SET_VIEWPORT_PIXEL 0x1924 +#define NVC197_SET_VIEWPORT_PIXEL_CENTER 0:0 +#define NVC197_SET_VIEWPORT_PIXEL_CENTER_AT_HALF_INTEGERS 0x00000000 +#define NVC197_SET_VIEWPORT_PIXEL_CENTER_AT_INTEGERS 0x00000001 + +#define NVC197_SET_VIEWPORT_SCALE_OFFSET 0x192c +#define NVC197_SET_VIEWPORT_SCALE_OFFSET_ENABLE 0:0 +#define NVC197_SET_VIEWPORT_SCALE_OFFSET_ENABLE_FALSE 0x00000000 +#define NVC197_SET_VIEWPORT_SCALE_OFFSET_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_VIEWPORT_CLIP_CONTROL 0x193c +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE 0:0 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_FALSE 0x00000000 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_TRUE 0x00000001 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z 3:3 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLIP 0x00000000 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLAMP 0x00000001 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z 4:4 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLIP 0x00000000 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLAMP 0x00000001 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND 7:7 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_256 0x00000000 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_1 0x00000001 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND 10:10 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_256 0x00000000 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_1 0x00000001 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP 13:11 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP 0x00000000 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_PASSTHRU 0x00000001 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XY_CLIP 0x00000002 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XYZ_CLIP 0x00000003 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP_NO_Z_CULL 0x00000004 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_Z_CLIP 0x00000005 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_TRI_FILL_OR_CLIP 0x00000006 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z 2:1 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SAME_AS_XY_GUARDBAND 0x00000000 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_256 0x00000001 +#define NVC197_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_1 0x00000002 + +#define NVC197_SET_USER_CLIP_OP 0x1940 +#define NVC197_SET_USER_CLIP_OP_PLANE0 0:0 +#define NVC197_SET_USER_CLIP_OP_PLANE0_CLIP 0x00000000 +#define NVC197_SET_USER_CLIP_OP_PLANE0_CULL 0x00000001 +#define NVC197_SET_USER_CLIP_OP_PLANE1 4:4 +#define NVC197_SET_USER_CLIP_OP_PLANE1_CLIP 0x00000000 +#define NVC197_SET_USER_CLIP_OP_PLANE1_CULL 0x00000001 +#define NVC197_SET_USER_CLIP_OP_PLANE2 8:8 +#define NVC197_SET_USER_CLIP_OP_PLANE2_CLIP 0x00000000 +#define NVC197_SET_USER_CLIP_OP_PLANE2_CULL 0x00000001 +#define NVC197_SET_USER_CLIP_OP_PLANE3 12:12 +#define NVC197_SET_USER_CLIP_OP_PLANE3_CLIP 0x00000000 +#define NVC197_SET_USER_CLIP_OP_PLANE3_CULL 0x00000001 +#define NVC197_SET_USER_CLIP_OP_PLANE4 16:16 +#define NVC197_SET_USER_CLIP_OP_PLANE4_CLIP 0x00000000 +#define NVC197_SET_USER_CLIP_OP_PLANE4_CULL 0x00000001 +#define NVC197_SET_USER_CLIP_OP_PLANE5 20:20 +#define NVC197_SET_USER_CLIP_OP_PLANE5_CLIP 0x00000000 +#define NVC197_SET_USER_CLIP_OP_PLANE5_CULL 0x00000001 +#define NVC197_SET_USER_CLIP_OP_PLANE6 24:24 +#define NVC197_SET_USER_CLIP_OP_PLANE6_CLIP 0x00000000 +#define NVC197_SET_USER_CLIP_OP_PLANE6_CULL 0x00000001 +#define NVC197_SET_USER_CLIP_OP_PLANE7 28:28 +#define NVC197_SET_USER_CLIP_OP_PLANE7_CLIP 0x00000000 +#define NVC197_SET_USER_CLIP_OP_PLANE7_CULL 0x00000001 + +#define NVC197_SET_RENDER_ENABLE_OVERRIDE 0x1944 +#define NVC197_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NVC197_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NVC197_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NVC197_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NVC197_SET_PRIMITIVE_TOPOLOGY_CONTROL 0x1948 +#define NVC197_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE 0:0 +#define NVC197_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_TOPOLOGY_IN_BEGIN_METHODS 0x00000000 +#define NVC197_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_SEPARATE_TOPOLOGY_STATE 0x00000001 + +#define NVC197_SET_WINDOW_CLIP_ENABLE 0x194c +#define NVC197_SET_WINDOW_CLIP_ENABLE_V 0:0 +#define NVC197_SET_WINDOW_CLIP_ENABLE_V_FALSE 0x00000000 +#define NVC197_SET_WINDOW_CLIP_ENABLE_V_TRUE 0x00000001 + +#define NVC197_SET_WINDOW_CLIP_TYPE 0x1950 +#define NVC197_SET_WINDOW_CLIP_TYPE_V 1:0 +#define NVC197_SET_WINDOW_CLIP_TYPE_V_INCLUSIVE 0x00000000 +#define NVC197_SET_WINDOW_CLIP_TYPE_V_EXCLUSIVE 0x00000001 +#define NVC197_SET_WINDOW_CLIP_TYPE_V_CLIPALL 0x00000002 + +#define NVC197_INVALIDATE_ZCULL 0x1958 +#define NVC197_INVALIDATE_ZCULL_V 31:0 +#define NVC197_INVALIDATE_ZCULL_V_INVALIDATE 0x00000000 + +#define NVC197_SET_ZCULL 0x1968 +#define NVC197_SET_ZCULL_Z_ENABLE 0:0 +#define NVC197_SET_ZCULL_Z_ENABLE_FALSE 0x00000000 +#define NVC197_SET_ZCULL_Z_ENABLE_TRUE 0x00000001 +#define NVC197_SET_ZCULL_STENCIL_ENABLE 4:4 +#define NVC197_SET_ZCULL_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC197_SET_ZCULL_STENCIL_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_ZCULL_BOUNDS 0x196c +#define NVC197_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE 0:0 +#define NVC197_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NVC197_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_TRUE 0x00000001 +#define NVC197_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE 4:4 +#define NVC197_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NVC197_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_PRIMITIVE_TOPOLOGY 0x1970 +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V 15:0 +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_POINTLIST 0x00000001 +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_LINELIST 0x00000002 +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP 0x00000003 +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST 0x00000004 +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP 0x00000005 +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_LINELIST_ADJCY 0x0000000A +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP_ADJCY 0x0000000B +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST_ADJCY 0x0000000C +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_PATCHLIST 0x0000000E +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_POINTS 0x00001001 +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST 0x00001002 +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST 0x00001003 +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST 0x0000100F +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINESTRIP 0x00001010 +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINESTRIP 0x00001011 +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLELIST 0x00001012 +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLESTRIP 0x00001013 +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLESTRIP 0x00001014 +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN 0x00001015 +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLEFAN 0x00001016 +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN_IMM 0x00001017 +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST_IMM 0x00001018 +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST2 0x0000101A +#define NVC197_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST2 0x0000101B + +#define NVC197_ZCULL_SYNC 0x1978 +#define NVC197_ZCULL_SYNC_V 31:0 + +#define NVC197_SET_CLIP_ID_TEST 0x197c +#define NVC197_SET_CLIP_ID_TEST_ENABLE 0:0 +#define NVC197_SET_CLIP_ID_TEST_ENABLE_FALSE 0x00000000 +#define NVC197_SET_CLIP_ID_TEST_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_SURFACE_CLIP_ID_WIDTH 0x1980 +#define NVC197_SET_SURFACE_CLIP_ID_WIDTH_V 31:0 + +#define NVC197_SET_CLIP_ID 0x1984 +#define NVC197_SET_CLIP_ID_V 31:0 + +#define NVC197_SET_DEPTH_BOUNDS_TEST 0x19bc +#define NVC197_SET_DEPTH_BOUNDS_TEST_ENABLE 0:0 +#define NVC197_SET_DEPTH_BOUNDS_TEST_ENABLE_FALSE 0x00000000 +#define NVC197_SET_DEPTH_BOUNDS_TEST_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_BLEND_FLOAT_OPTION 0x19c0 +#define NVC197_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO 0:0 +#define NVC197_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_FALSE 0x00000000 +#define NVC197_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_TRUE 0x00000001 + +#define NVC197_SET_LOGIC_OP 0x19c4 +#define NVC197_SET_LOGIC_OP_ENABLE 0:0 +#define NVC197_SET_LOGIC_OP_ENABLE_FALSE 0x00000000 +#define NVC197_SET_LOGIC_OP_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_LOGIC_OP_FUNC 0x19c8 +#define NVC197_SET_LOGIC_OP_FUNC_V 31:0 +#define NVC197_SET_LOGIC_OP_FUNC_V_CLEAR 0x00001500 +#define NVC197_SET_LOGIC_OP_FUNC_V_AND 0x00001501 +#define NVC197_SET_LOGIC_OP_FUNC_V_AND_REVERSE 0x00001502 +#define NVC197_SET_LOGIC_OP_FUNC_V_COPY 0x00001503 +#define NVC197_SET_LOGIC_OP_FUNC_V_AND_INVERTED 0x00001504 +#define NVC197_SET_LOGIC_OP_FUNC_V_NOOP 0x00001505 +#define NVC197_SET_LOGIC_OP_FUNC_V_XOR 0x00001506 +#define NVC197_SET_LOGIC_OP_FUNC_V_OR 0x00001507 +#define NVC197_SET_LOGIC_OP_FUNC_V_NOR 0x00001508 +#define NVC197_SET_LOGIC_OP_FUNC_V_EQUIV 0x00001509 +#define NVC197_SET_LOGIC_OP_FUNC_V_INVERT 0x0000150A +#define NVC197_SET_LOGIC_OP_FUNC_V_OR_REVERSE 0x0000150B +#define NVC197_SET_LOGIC_OP_FUNC_V_COPY_INVERTED 0x0000150C +#define NVC197_SET_LOGIC_OP_FUNC_V_OR_INVERTED 0x0000150D +#define NVC197_SET_LOGIC_OP_FUNC_V_NAND 0x0000150E +#define NVC197_SET_LOGIC_OP_FUNC_V_SET 0x0000150F + +#define NVC197_SET_Z_COMPRESSION 0x19cc +#define NVC197_SET_Z_COMPRESSION_ENABLE 0:0 +#define NVC197_SET_Z_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVC197_SET_Z_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVC197_CLEAR_SURFACE 0x19d0 +#define NVC197_CLEAR_SURFACE_Z_ENABLE 0:0 +#define NVC197_CLEAR_SURFACE_Z_ENABLE_FALSE 0x00000000 +#define NVC197_CLEAR_SURFACE_Z_ENABLE_TRUE 0x00000001 +#define NVC197_CLEAR_SURFACE_STENCIL_ENABLE 1:1 +#define NVC197_CLEAR_SURFACE_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC197_CLEAR_SURFACE_STENCIL_ENABLE_TRUE 0x00000001 +#define NVC197_CLEAR_SURFACE_R_ENABLE 2:2 +#define NVC197_CLEAR_SURFACE_R_ENABLE_FALSE 0x00000000 +#define NVC197_CLEAR_SURFACE_R_ENABLE_TRUE 0x00000001 +#define NVC197_CLEAR_SURFACE_G_ENABLE 3:3 +#define NVC197_CLEAR_SURFACE_G_ENABLE_FALSE 0x00000000 +#define NVC197_CLEAR_SURFACE_G_ENABLE_TRUE 0x00000001 +#define NVC197_CLEAR_SURFACE_B_ENABLE 4:4 +#define NVC197_CLEAR_SURFACE_B_ENABLE_FALSE 0x00000000 +#define NVC197_CLEAR_SURFACE_B_ENABLE_TRUE 0x00000001 +#define NVC197_CLEAR_SURFACE_A_ENABLE 5:5 +#define NVC197_CLEAR_SURFACE_A_ENABLE_FALSE 0x00000000 +#define NVC197_CLEAR_SURFACE_A_ENABLE_TRUE 0x00000001 +#define NVC197_CLEAR_SURFACE_MRT_SELECT 9:6 +#define NVC197_CLEAR_SURFACE_RT_ARRAY_INDEX 25:10 + +#define NVC197_CLEAR_CLIP_ID_SURFACE 0x19d4 +#define NVC197_CLEAR_CLIP_ID_SURFACE_V 31:0 + +#define NVC197_SET_COLOR_COMPRESSION(i) (0x19e0+(i)*4) +#define NVC197_SET_COLOR_COMPRESSION_ENABLE 0:0 +#define NVC197_SET_COLOR_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVC197_SET_COLOR_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_CT_WRITE(i) (0x1a00+(i)*4) +#define NVC197_SET_CT_WRITE_R_ENABLE 0:0 +#define NVC197_SET_CT_WRITE_R_ENABLE_FALSE 0x00000000 +#define NVC197_SET_CT_WRITE_R_ENABLE_TRUE 0x00000001 +#define NVC197_SET_CT_WRITE_G_ENABLE 4:4 +#define NVC197_SET_CT_WRITE_G_ENABLE_FALSE 0x00000000 +#define NVC197_SET_CT_WRITE_G_ENABLE_TRUE 0x00000001 +#define NVC197_SET_CT_WRITE_B_ENABLE 8:8 +#define NVC197_SET_CT_WRITE_B_ENABLE_FALSE 0x00000000 +#define NVC197_SET_CT_WRITE_B_ENABLE_TRUE 0x00000001 +#define NVC197_SET_CT_WRITE_A_ENABLE 12:12 +#define NVC197_SET_CT_WRITE_A_ENABLE_FALSE 0x00000000 +#define NVC197_SET_CT_WRITE_A_ENABLE_TRUE 0x00000001 + +#define NVC197_PIPE_NOP 0x1a2c +#define NVC197_PIPE_NOP_V 31:0 + +#define NVC197_SET_SPARE00 0x1a30 +#define NVC197_SET_SPARE00_V 31:0 + +#define NVC197_SET_SPARE01 0x1a34 +#define NVC197_SET_SPARE01_V 31:0 + +#define NVC197_SET_SPARE02 0x1a38 +#define NVC197_SET_SPARE02_V 31:0 + +#define NVC197_SET_SPARE03 0x1a3c +#define NVC197_SET_SPARE03_V 31:0 + +#define NVC197_SET_REPORT_SEMAPHORE_A 0x1b00 +#define NVC197_SET_REPORT_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC197_SET_REPORT_SEMAPHORE_B 0x1b04 +#define NVC197_SET_REPORT_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC197_SET_REPORT_SEMAPHORE_C 0x1b08 +#define NVC197_SET_REPORT_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC197_SET_REPORT_SEMAPHORE_D 0x1b0c +#define NVC197_SET_REPORT_SEMAPHORE_D_OPERATION 1:0 +#define NVC197_SET_REPORT_SEMAPHORE_D_OPERATION_RELEASE 0x00000000 +#define NVC197_SET_REPORT_SEMAPHORE_D_OPERATION_ACQUIRE 0x00000001 +#define NVC197_SET_REPORT_SEMAPHORE_D_OPERATION_REPORT_ONLY 0x00000002 +#define NVC197_SET_REPORT_SEMAPHORE_D_OPERATION_TRAP 0x00000003 +#define NVC197_SET_REPORT_SEMAPHORE_D_RELEASE 4:4 +#define NVC197_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_READS_COMPLETE 0x00000000 +#define NVC197_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_WRITES_COMPLETE 0x00000001 +#define NVC197_SET_REPORT_SEMAPHORE_D_ACQUIRE 8:8 +#define NVC197_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_WRITES_START 0x00000000 +#define NVC197_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_READS_START 0x00000001 +#define NVC197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION 15:12 +#define NVC197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_NONE 0x00000000 +#define NVC197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DATA_ASSEMBLER 0x00000001 +#define NVC197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VERTEX_SHADER 0x00000002 +#define NVC197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_INIT_SHADER 0x00000008 +#define NVC197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_SHADER 0x00000009 +#define NVC197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_GEOMETRY_SHADER 0x00000006 +#define NVC197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_STREAMING_OUTPUT 0x00000005 +#define NVC197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VPC 0x00000004 +#define NVC197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ZCULL 0x00000007 +#define NVC197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_PIXEL_SHADER 0x0000000A +#define NVC197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DEPTH_TEST 0x0000000C +#define NVC197_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ALL 0x0000000F +#define NVC197_SET_REPORT_SEMAPHORE_D_COMPARISON 16:16 +#define NVC197_SET_REPORT_SEMAPHORE_D_COMPARISON_EQ 0x00000000 +#define NVC197_SET_REPORT_SEMAPHORE_D_COMPARISON_GE 0x00000001 +#define NVC197_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE 20:20 +#define NVC197_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVC197_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT 27:23 +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_NONE 0x00000000 +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_DA_VERTICES_GENERATED 0x00000001 +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_DA_PRIMITIVES_GENERATED 0x00000003 +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_VS_INVOCATIONS 0x00000005 +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_TI_INVOCATIONS 0x0000001B +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_TS_INVOCATIONS 0x0000001D +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_TS_PRIMITIVES_GENERATED 0x0000001F +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_GS_INVOCATIONS 0x00000007 +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_GS_PRIMITIVES_GENERATED 0x00000009 +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_ALPHA_BETA_CLOCKS 0x00000004 +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_SCG_CLOCKS 0x00000008 +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_VTG_PRIMITIVES_OUT 0x00000012 +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x0000001E +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_SUCCEEDED 0x0000000B +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED 0x0000000D +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000006 +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_BYTE_COUNT 0x0000001A +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_INVOCATIONS 0x0000000F +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_PRIMITIVES_GENERATED 0x00000011 +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS0 0x0000000A +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS1 0x0000000C +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS2 0x0000000E +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS3 0x00000010 +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_PS_INVOCATIONS 0x00000013 +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT 0x00000002 +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT64 0x00000015 +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_TILED_ZPASS_PIXEL_CNT64 0x00000017 +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_COLOR_TARGET 0x00000018 +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_ZETA_TARGET 0x00000019 +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_BOUNDING_RECTANGLE 0x0000001C +#define NVC197_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE 28:28 +#define NVC197_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVC197_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVC197_SET_REPORT_SEMAPHORE_D_SUB_REPORT 7:5 +#define NVC197_SET_REPORT_SEMAPHORE_D_REPORT_DWORD_NUMBER 21:21 +#define NVC197_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE 2:2 +#define NVC197_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_FALSE 0x00000000 +#define NVC197_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_TRUE 0x00000001 +#define NVC197_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE 3:3 +#define NVC197_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC197_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC197_SET_REPORT_SEMAPHORE_D_REDUCTION_OP 11:9 +#define NVC197_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC197_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC197_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC197_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_INC 0x00000003 +#define NVC197_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC197_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_AND 0x00000005 +#define NVC197_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_OR 0x00000006 +#define NVC197_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC197_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT 18:17 +#define NVC197_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC197_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_SIGNED_32 0x00000001 + +#define NVC197_SET_VERTEX_STREAM_A_FORMAT(j) (0x1c00+(j)*16) +#define NVC197_SET_VERTEX_STREAM_A_FORMAT_STRIDE 11:0 +#define NVC197_SET_VERTEX_STREAM_A_FORMAT_ENABLE 12:12 +#define NVC197_SET_VERTEX_STREAM_A_FORMAT_ENABLE_FALSE 0x00000000 +#define NVC197_SET_VERTEX_STREAM_A_FORMAT_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_VERTEX_STREAM_A_LOCATION_A(j) (0x1c04+(j)*16) +#define NVC197_SET_VERTEX_STREAM_A_LOCATION_A_OFFSET_UPPER 7:0 + +#define NVC197_SET_VERTEX_STREAM_A_LOCATION_B(j) (0x1c08+(j)*16) +#define NVC197_SET_VERTEX_STREAM_A_LOCATION_B_OFFSET_LOWER 31:0 + +#define NVC197_SET_VERTEX_STREAM_A_FREQUENCY(j) (0x1c0c+(j)*16) +#define NVC197_SET_VERTEX_STREAM_A_FREQUENCY_V 31:0 + +#define NVC197_SET_VERTEX_STREAM_B_FORMAT(j) (0x1d00+(j)*16) +#define NVC197_SET_VERTEX_STREAM_B_FORMAT_STRIDE 11:0 +#define NVC197_SET_VERTEX_STREAM_B_FORMAT_ENABLE 12:12 +#define NVC197_SET_VERTEX_STREAM_B_FORMAT_ENABLE_FALSE 0x00000000 +#define NVC197_SET_VERTEX_STREAM_B_FORMAT_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_VERTEX_STREAM_B_LOCATION_A(j) (0x1d04+(j)*16) +#define NVC197_SET_VERTEX_STREAM_B_LOCATION_A_OFFSET_UPPER 7:0 + +#define NVC197_SET_VERTEX_STREAM_B_LOCATION_B(j) (0x1d08+(j)*16) +#define NVC197_SET_VERTEX_STREAM_B_LOCATION_B_OFFSET_LOWER 31:0 + +#define NVC197_SET_VERTEX_STREAM_B_FREQUENCY(j) (0x1d0c+(j)*16) +#define NVC197_SET_VERTEX_STREAM_B_FREQUENCY_V 31:0 + +#define NVC197_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA(j) (0x1e00+(j)*32) +#define NVC197_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NVC197_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NVC197_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_BLEND_PER_TARGET_COLOR_OP(j) (0x1e04+(j)*32) +#define NVC197_SET_BLEND_PER_TARGET_COLOR_OP_V 31:0 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC197_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC197_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MIN 0x00008007 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MAX 0x00008008 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_ADD 0x00000001 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MIN 0x00000004 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF(j) (0x1e08+(j)*32) +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V 31:0 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF(j) (0x1e0c+(j)*32) +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V 31:0 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC197_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_OP(j) (0x1e10+(j)*32) +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_OP_V 31:0 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF(j) (0x1e14+(j)*32) +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V 31:0 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF(j) (0x1e18+(j)*32) +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V 31:0 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC197_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC197_SET_VERTEX_STREAM_LIMIT_A_A(j) (0x1f00+(j)*8) +#define NVC197_SET_VERTEX_STREAM_LIMIT_A_A_UPPER 7:0 + +#define NVC197_SET_VERTEX_STREAM_LIMIT_A_B(j) (0x1f04+(j)*8) +#define NVC197_SET_VERTEX_STREAM_LIMIT_A_B_LOWER 31:0 + +#define NVC197_SET_VERTEX_STREAM_LIMIT_B_A(j) (0x1f80+(j)*8) +#define NVC197_SET_VERTEX_STREAM_LIMIT_B_A_UPPER 7:0 + +#define NVC197_SET_VERTEX_STREAM_LIMIT_B_B(j) (0x1f84+(j)*8) +#define NVC197_SET_VERTEX_STREAM_LIMIT_B_B_LOWER 31:0 + +#define NVC197_SET_PIPELINE_SHADER(j) (0x2000+(j)*64) +#define NVC197_SET_PIPELINE_SHADER_ENABLE 0:0 +#define NVC197_SET_PIPELINE_SHADER_ENABLE_FALSE 0x00000000 +#define NVC197_SET_PIPELINE_SHADER_ENABLE_TRUE 0x00000001 +#define NVC197_SET_PIPELINE_SHADER_TYPE 7:4 +#define NVC197_SET_PIPELINE_SHADER_TYPE_VERTEX_CULL_BEFORE_FETCH 0x00000000 +#define NVC197_SET_PIPELINE_SHADER_TYPE_VERTEX 0x00000001 +#define NVC197_SET_PIPELINE_SHADER_TYPE_TESSELLATION_INIT 0x00000002 +#define NVC197_SET_PIPELINE_SHADER_TYPE_TESSELLATION 0x00000003 +#define NVC197_SET_PIPELINE_SHADER_TYPE_GEOMETRY 0x00000004 +#define NVC197_SET_PIPELINE_SHADER_TYPE_PIXEL 0x00000005 + +#define NVC197_SET_PIPELINE_PROGRAM(j) (0x2004+(j)*64) +#define NVC197_SET_PIPELINE_PROGRAM_OFFSET 31:0 + +#define NVC197_SET_PIPELINE_RESERVED_A(j) (0x2008+(j)*64) +#define NVC197_SET_PIPELINE_RESERVED_A_V 0:0 + +#define NVC197_SET_PIPELINE_REGISTER_COUNT(j) (0x200c+(j)*64) +#define NVC197_SET_PIPELINE_REGISTER_COUNT_V 7:0 + +#define NVC197_SET_PIPELINE_BINDING(j) (0x2010+(j)*64) +#define NVC197_SET_PIPELINE_BINDING_GROUP 2:0 + +#define NVC197_SET_PIPELINE_RESERVED_B(j) (0x2014+(j)*64) +#define NVC197_SET_PIPELINE_RESERVED_B_V 0:0 + +#define NVC197_SET_PIPELINE_RESERVED_C(j) (0x2018+(j)*64) +#define NVC197_SET_PIPELINE_RESERVED_C_V 0:0 + +#define NVC197_SET_PIPELINE_RESERVED_D(j) (0x201c+(j)*64) +#define NVC197_SET_PIPELINE_RESERVED_D_V 0:0 + +#define NVC197_SET_PIPELINE_RESERVED_E(j) (0x2020+(j)*64) +#define NVC197_SET_PIPELINE_RESERVED_E_V 0:0 + +#define NVC197_SET_FALCON00 0x2300 +#define NVC197_SET_FALCON00_V 31:0 + +#define NVC197_SET_FALCON01 0x2304 +#define NVC197_SET_FALCON01_V 31:0 + +#define NVC197_SET_FALCON02 0x2308 +#define NVC197_SET_FALCON02_V 31:0 + +#define NVC197_SET_FALCON03 0x230c +#define NVC197_SET_FALCON03_V 31:0 + +#define NVC197_SET_FALCON04 0x2310 +#define NVC197_SET_FALCON04_V 31:0 + +#define NVC197_SET_FALCON05 0x2314 +#define NVC197_SET_FALCON05_V 31:0 + +#define NVC197_SET_FALCON06 0x2318 +#define NVC197_SET_FALCON06_V 31:0 + +#define NVC197_SET_FALCON07 0x231c +#define NVC197_SET_FALCON07_V 31:0 + +#define NVC197_SET_FALCON08 0x2320 +#define NVC197_SET_FALCON08_V 31:0 + +#define NVC197_SET_FALCON09 0x2324 +#define NVC197_SET_FALCON09_V 31:0 + +#define NVC197_SET_FALCON10 0x2328 +#define NVC197_SET_FALCON10_V 31:0 + +#define NVC197_SET_FALCON11 0x232c +#define NVC197_SET_FALCON11_V 31:0 + +#define NVC197_SET_FALCON12 0x2330 +#define NVC197_SET_FALCON12_V 31:0 + +#define NVC197_SET_FALCON13 0x2334 +#define NVC197_SET_FALCON13_V 31:0 + +#define NVC197_SET_FALCON14 0x2338 +#define NVC197_SET_FALCON14_V 31:0 + +#define NVC197_SET_FALCON15 0x233c +#define NVC197_SET_FALCON15_V 31:0 + +#define NVC197_SET_FALCON16 0x2340 +#define NVC197_SET_FALCON16_V 31:0 + +#define NVC197_SET_FALCON17 0x2344 +#define NVC197_SET_FALCON17_V 31:0 + +#define NVC197_SET_FALCON18 0x2348 +#define NVC197_SET_FALCON18_V 31:0 + +#define NVC197_SET_FALCON19 0x234c +#define NVC197_SET_FALCON19_V 31:0 + +#define NVC197_SET_FALCON20 0x2350 +#define NVC197_SET_FALCON20_V 31:0 + +#define NVC197_SET_FALCON21 0x2354 +#define NVC197_SET_FALCON21_V 31:0 + +#define NVC197_SET_FALCON22 0x2358 +#define NVC197_SET_FALCON22_V 31:0 + +#define NVC197_SET_FALCON23 0x235c +#define NVC197_SET_FALCON23_V 31:0 + +#define NVC197_SET_FALCON24 0x2360 +#define NVC197_SET_FALCON24_V 31:0 + +#define NVC197_SET_FALCON25 0x2364 +#define NVC197_SET_FALCON25_V 31:0 + +#define NVC197_SET_FALCON26 0x2368 +#define NVC197_SET_FALCON26_V 31:0 + +#define NVC197_SET_FALCON27 0x236c +#define NVC197_SET_FALCON27_V 31:0 + +#define NVC197_SET_FALCON28 0x2370 +#define NVC197_SET_FALCON28_V 31:0 + +#define NVC197_SET_FALCON29 0x2374 +#define NVC197_SET_FALCON29_V 31:0 + +#define NVC197_SET_FALCON30 0x2378 +#define NVC197_SET_FALCON30_V 31:0 + +#define NVC197_SET_FALCON31 0x237c +#define NVC197_SET_FALCON31_V 31:0 + +#define NVC197_SET_CONSTANT_BUFFER_SELECTOR_A 0x2380 +#define NVC197_SET_CONSTANT_BUFFER_SELECTOR_A_SIZE 16:0 + +#define NVC197_SET_CONSTANT_BUFFER_SELECTOR_B 0x2384 +#define NVC197_SET_CONSTANT_BUFFER_SELECTOR_B_ADDRESS_UPPER 7:0 + +#define NVC197_SET_CONSTANT_BUFFER_SELECTOR_C 0x2388 +#define NVC197_SET_CONSTANT_BUFFER_SELECTOR_C_ADDRESS_LOWER 31:0 + +#define NVC197_LOAD_CONSTANT_BUFFER_OFFSET 0x238c +#define NVC197_LOAD_CONSTANT_BUFFER_OFFSET_V 15:0 + +#define NVC197_LOAD_CONSTANT_BUFFER(i) (0x2390+(i)*4) +#define NVC197_LOAD_CONSTANT_BUFFER_V 31:0 + +#define NVC197_BIND_GROUP_RESERVED_A(j) (0x2400+(j)*32) +#define NVC197_BIND_GROUP_RESERVED_A_V 0:0 + +#define NVC197_BIND_GROUP_RESERVED_B(j) (0x2404+(j)*32) +#define NVC197_BIND_GROUP_RESERVED_B_V 0:0 + +#define NVC197_BIND_GROUP_RESERVED_C(j) (0x2408+(j)*32) +#define NVC197_BIND_GROUP_RESERVED_C_V 0:0 + +#define NVC197_BIND_GROUP_RESERVED_D(j) (0x240c+(j)*32) +#define NVC197_BIND_GROUP_RESERVED_D_V 0:0 + +#define NVC197_BIND_GROUP_CONSTANT_BUFFER(j) (0x2410+(j)*32) +#define NVC197_BIND_GROUP_CONSTANT_BUFFER_VALID 0:0 +#define NVC197_BIND_GROUP_CONSTANT_BUFFER_VALID_FALSE 0x00000000 +#define NVC197_BIND_GROUP_CONSTANT_BUFFER_VALID_TRUE 0x00000001 +#define NVC197_BIND_GROUP_CONSTANT_BUFFER_SHADER_SLOT 8:4 + +#define NVC197_SET_COLOR_CLAMP 0x2600 +#define NVC197_SET_COLOR_CLAMP_ENABLE 0:0 +#define NVC197_SET_COLOR_CLAMP_ENABLE_FALSE 0x00000000 +#define NVC197_SET_COLOR_CLAMP_ENABLE_TRUE 0x00000001 + +#define NVC197_SET_BINDLESS_TEXTURE 0x2608 +#define NVC197_SET_BINDLESS_TEXTURE_CONSTANT_BUFFER_SLOT_SELECT 4:0 + +#define NVC197_SET_TRAP_HANDLER 0x260c +#define NVC197_SET_TRAP_HANDLER_OFFSET 31:0 + +#define NVC197_SET_STREAM_OUT_LAYOUT_SELECT(i,j) (0x2800+(i)*128+(j)*4) +#define NVC197_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER00 7:0 +#define NVC197_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER01 15:8 +#define NVC197_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER02 23:16 +#define NVC197_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER03 31:24 + +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER(i) (0x333c+(i)*4) +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER_V 31:0 + +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_VALUE(i) (0x335c+(i)*4) +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_VALUE_V 31:0 + +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_EVENT(i) (0x337c+(i)*4) +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_EVENT_EVENT 7:0 + +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A(i) (0x339c+(i)*4) +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT0 1:0 +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT0 4:2 +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT1 6:5 +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT1 9:7 +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT2 11:10 +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT2 14:12 +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT3 16:15 +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT3 19:17 +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT4 21:20 +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT4 24:22 +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT5 26:25 +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT5 29:27 +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_SPARE 31:30 + +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B(i) (0x33bc+(i)*4) +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_EDGE 0:0 +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_MODE 2:1 +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_WINDOWED 3:3 +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_FUNC 19:4 + +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL 0x33dc +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL_MASK 7:0 + +#define NVC197_START_SHADER_PERFORMANCE_COUNTER 0x33e0 +#define NVC197_START_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC197_STOP_SHADER_PERFORMANCE_COUNTER 0x33e4 +#define NVC197_STOP_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER 0x33e8 +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER_V 31:0 + +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER 0x33ec +#define NVC197_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER_V 31:0 + +#define NVC197_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NVC197_SET_MME_SHADOW_SCRATCH_V 31:0 + +#define NVC197_CALL_MME_MACRO(j) (0x3800+(j)*8) +#define NVC197_CALL_MME_MACRO_V 31:0 + +#define NVC197_CALL_MME_DATA(j) (0x3804+(j)*8) +#define NVC197_CALL_MME_DATA_V 31:0 + +#endif /* _cl_pascal_b_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc361.h b/src/common/sdk/nvidia/inc/class/clc361.h new file mode 100644 index 0000000..daa8841 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc361.h @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc361_h_ +#define _clc361_h_ + +#define VOLTA_USERMODE_A (0xc361) + +#define NVC361 0x0081ffff:0x00810000 +#define NVC361_NV_USERMODE__SIZE 65536 +#define NVC361_TIME_0 0x00000080 +#define NVC361_TIME_1 0x00000084 +#define NVC361_NOTIFY_CHANNEL_PENDING 0x00000090 + +#endif // _clc361_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc36f.h b/src/common/sdk/nvidia/inc/class/clc36f.h new file mode 100644 index 0000000..cae1c68 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc36f.h @@ -0,0 +1,366 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc36f_h_ +#define _clc36f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* class VOLTA_CHANNEL_GPFIFO */ +/* + * Documentation for VOLTA_CHANNEL_GPFIFO can be found in dev_pbdma.ref, + * chapter "User Control Registers". It is documented as device NV_UDMA. + * The GPFIFO format itself is also documented in dev_pbdma.ref, + * NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref, + * chapter "FIFO DMA RAM", NV_FIFO_DMA_*. + * + * Note there is no .mfs file for this class. + */ +#define VOLTA_CHANNEL_GPFIFO_A (0x0000C36F) + +#define NVC36F_TYPEDEF VOLTA_CHANNELChannelGPFifoA + +/* dma flow control data structure */ +typedef volatile struct Nvc36fControl_struct { + NvU32 Ignored00[0x010]; /* 0000-003f*/ + NvU32 Put; /* put offset, read/write 0040-0043*/ + NvU32 Get; /* get offset, read only 0044-0047*/ + NvU32 Reference; /* reference value, read only 0048-004b*/ + NvU32 PutHi; /* high order put offset bits 004c-004f*/ + NvU32 Ignored01[0x002]; /* 0050-0057*/ + NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/ + NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/ + NvU32 GetHi; /* high order get offset bits 0060-0063*/ + NvU32 Ignored02[0x007]; /* 0064-007f*/ + NvU32 Ignored03; /* used to be engine yield 0080-0083*/ + NvU32 Ignored04[0x001]; /* 0084-0087*/ + NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/ + NvU32 GPPut; /* GP FIFO put offset 008c-008f*/ + NvU32 Ignored05[0x5c]; +} Nvc36fControl, VoltaAControlGPFifo; + +/* fields and values */ +#define NVC36F_NUMBER_OF_SUBCHANNELS (8) +#define NVC36F_SET_OBJECT (0x00000000) +#define NVC36F_SET_OBJECT_NVCLASS 15:0 +#define NVC36F_SET_OBJECT_ENGINE 20:16 +#define NVC36F_SET_OBJECT_ENGINE_SW 0x0000001f +#define NVC36F_ILLEGAL (0x00000004) +#define NVC36F_ILLEGAL_HANDLE 31:0 +#define NVC36F_NOP (0x00000008) +#define NVC36F_NOP_HANDLE 31:0 +#define NVC36F_SEMAPHOREA (0x00000010) +#define NVC36F_SEMAPHOREA_OFFSET_UPPER 7:0 +#define NVC36F_SEMAPHOREB (0x00000014) +#define NVC36F_SEMAPHOREB_OFFSET_LOWER 31:2 +#define NVC36F_SEMAPHOREC (0x00000018) +#define NVC36F_SEMAPHOREC_PAYLOAD 31:0 +#define NVC36F_SEMAPHORED (0x0000001C) +#define NVC36F_SEMAPHORED_OPERATION 4:0 +#define NVC36F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001 +#define NVC36F_SEMAPHORED_OPERATION_RELEASE 0x00000002 +#define NVC36F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004 +#define NVC36F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008 +#define NVC36F_SEMAPHORED_OPERATION_REDUCTION 0x00000010 +#define NVC36F_SEMAPHORED_ACQUIRE_SWITCH 12:12 +#define NVC36F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000 +#define NVC36F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001 +#define NVC36F_SEMAPHORED_RELEASE_WFI 20:20 +#define NVC36F_SEMAPHORED_RELEASE_WFI_EN 0x00000000 +#define NVC36F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001 +#define NVC36F_SEMAPHORED_RELEASE_SIZE 24:24 +#define NVC36F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000 +#define NVC36F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001 +#define NVC36F_SEMAPHORED_REDUCTION 30:27 +#define NVC36F_SEMAPHORED_REDUCTION_MIN 0x00000000 +#define NVC36F_SEMAPHORED_REDUCTION_MAX 0x00000001 +#define NVC36F_SEMAPHORED_REDUCTION_XOR 0x00000002 +#define NVC36F_SEMAPHORED_REDUCTION_AND 0x00000003 +#define NVC36F_SEMAPHORED_REDUCTION_OR 0x00000004 +#define NVC36F_SEMAPHORED_REDUCTION_ADD 0x00000005 +#define NVC36F_SEMAPHORED_REDUCTION_INC 0x00000006 +#define NVC36F_SEMAPHORED_REDUCTION_DEC 0x00000007 +#define NVC36F_SEMAPHORED_FORMAT 31:31 +#define NVC36F_SEMAPHORED_FORMAT_SIGNED 0x00000000 +#define NVC36F_SEMAPHORED_FORMAT_UNSIGNED 0x00000001 +#define NVC36F_NON_STALL_INTERRUPT (0x00000020) +#define NVC36F_NON_STALL_INTERRUPT_HANDLE 31:0 +#define NVC36F_FB_FLUSH (0x00000024) // Deprecated - use MEMBAR TYPE SYS_MEMBAR +#define NVC36F_FB_FLUSH_HANDLE 31:0 +// NOTE - MEM_OP_A and MEM_OP_B have been replaced in gp100 with methods for +// specifying the page address for a targeted TLB invalidate and the uTLB for +// a targeted REPLAY_CANCEL for UVM. +// The previous MEM_OP_A/B functionality is in MEM_OP_C/D, with slightly +// rearranged fields. +#define NVC36F_MEM_OP_A (0x00000028) +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_CLIENT_UNIT_ID 5:0 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_INVALIDATION_SIZE 5:0 // Used to specify size of invalidate, used for invalidates which are not of the REPLAY_CANCEL_TARGETED type +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_GPC_ID 10:6 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_CANCEL_MMU_ENGINE_ID 6:0 // only relevant for REPLAY_CANCEL_VA_GLOBAL +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR 11:11 +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_EN 0x00000001 +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_DIS 0x00000000 +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_TARGET_ADDR_LO 31:12 +#define NVC36F_MEM_OP_B (0x0000002c) +#define NVC36F_MEM_OP_B_TLB_INVALIDATE_TARGET_ADDR_HI 31:0 +#define NVC36F_MEM_OP_C (0x00000030) +#define NVC36F_MEM_OP_C_MEMBAR_TYPE 2:0 +#define NVC36F_MEM_OP_C_MEMBAR_TYPE_SYS_MEMBAR 0x00000000 +#define NVC36F_MEM_OP_C_MEMBAR_TYPE_MEMBAR 0x00000001 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB 0:0 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_ONE 0x00000000 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_ALL 0x00000001 // Probably nonsensical for MMU_TLB_INVALIDATE_TARGETED +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_GPC 1:1 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_GPC_ENABLE 0x00000000 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_GPC_DISABLE 0x00000001 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY 4:2 // only relevant if GPC ENABLE +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE 0x00000000 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START 0x00000001 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START_ACK_ALL 0x00000002 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_TARGETED 0x00000003 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_GLOBAL 0x00000004 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_VA_GLOBAL 0x00000005 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE 6:5 // only relevant if GPC ENABLE +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_NONE 0x00000000 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_GLOBALLY 0x00000001 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_INTRANODE 0x00000002 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE 9:7 //only relevant for REPLAY_CANCEL_VA_GLOBAL +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_READ 0 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE 1 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_STRONG 2 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_RSVRVD 3 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_WEAK 4 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_ALL 5 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE_AND_ATOMIC 6 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ALL 7 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL 9:7 // Invalidate affects this level and all below +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_ALL 0x00000000 // Invalidate tlb caches at all levels of the page table +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_PTE_ONLY 0x00000001 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE0 0x00000002 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE1 0x00000003 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE2 0x00000004 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3 0x00000005 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE4 0x00000006 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE5 0x00000007 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE 11:10 // only relevant if PDB_ONE +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_VID_MEM 0x00000000 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_ADDR_LO 31:12 // only relevant if PDB_ONE +#define NVC36F_MEM_OP_C_ACCESS_COUNTER_CLR_TARGETED_NOTIFY_TAG 19:0 +// MEM_OP_D MUST be preceded by MEM_OPs A-C. +#define NVC36F_MEM_OP_D (0x00000034) +#define NVC36F_MEM_OP_D_TLB_INVALIDATE_PDB_ADDR_HI 26:0 // only relevant if PDB_ONE +#define NVC36F_MEM_OP_D_OPERATION 31:27 +#define NVC36F_MEM_OP_D_OPERATION_MEMBAR 0x00000005 +#define NVC36F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE 0x00000009 +#define NVC36F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE_TARGETED 0x0000000a +#define NVC36F_MEM_OP_D_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d +#define NVC36F_MEM_OP_D_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e +// CLEAN_LINES is an alias for Tegra/GPU IP usage +#define NVC36F_MEM_OP_B_OPERATION_L2_INVALIDATE_CLEAN_LINES 0x0000000e +#define NVC36F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f +#define NVC36F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY 0x00000010 +#define NVC36F_MEM_OP_D_OPERATION_L2_WAIT_FOR_SYS_PENDING_READS 0x00000015 +#define NVC36F_MEM_OP_D_OPERATION_ACCESS_COUNTER_CLR 0x00000016 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE 1:0 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MIMC 0x00000000 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MOMC 0x00000001 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_ALL 0x00000002 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_TARGETED 0x00000003 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE 2:2 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MIMC 0x00000000 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MOMC 0x00000001 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_BANK 6:3 +#define NVC36F_SET_REFERENCE (0x00000050) +#define NVC36F_SET_REFERENCE_COUNT 31:0 +#define NVC36F_SEM_ADDR_LO (0x0000005c) +#define NVC36F_SEM_ADDR_LO_OFFSET 31:2 +#define NVC36F_SEM_ADDR_HI (0x00000060) +#define NVC36F_SEM_ADDR_HI_OFFSET 7:0 +#define NVC36F_SEM_PAYLOAD_LO (0x00000064) +#define NVC36F_SEM_PAYLOAD_LO_PAYLOAD 31:0 +#define NVC36F_SEM_PAYLOAD_HI (0x00000068) +#define NVC36F_SEM_PAYLOAD_HI_PAYLOAD 31:0 +#define NVC36F_SEM_EXECUTE (0x0000006c) +#define NVC36F_SEM_EXECUTE_OPERATION 2:0 +#define NVC36F_SEM_EXECUTE_OPERATION_ACQUIRE 0x00000000 +#define NVC36F_SEM_EXECUTE_OPERATION_RELEASE 0x00000001 +#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_STRICT_GEQ 0x00000002 +#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_CIRC_GEQ 0x00000003 +#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_AND 0x00000004 +#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_NOR 0x00000005 +#define NVC36F_SEM_EXECUTE_OPERATION_REDUCTION 0x00000006 +#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG 12:12 +#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_DIS 0x00000000 +#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_EN 0x00000001 +#define NVC36F_SEM_EXECUTE_RELEASE_WFI 20:20 +#define NVC36F_SEM_EXECUTE_RELEASE_WFI_DIS 0x00000000 +#define NVC36F_SEM_EXECUTE_RELEASE_WFI_EN 0x00000001 +#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE 24:24 +#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE_32BIT 0x00000000 +#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE_64BIT 0x00000001 +#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP 25:25 +#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP_DIS 0x00000000 +#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP_EN 0x00000001 +#define NVC36F_SEM_EXECUTE_REDUCTION 30:27 +#define NVC36F_SEM_EXECUTE_REDUCTION_IMIN 0x00000000 +#define NVC36F_SEM_EXECUTE_REDUCTION_IMAX 0x00000001 +#define NVC36F_SEM_EXECUTE_REDUCTION_IXOR 0x00000002 +#define NVC36F_SEM_EXECUTE_REDUCTION_IAND 0x00000003 +#define NVC36F_SEM_EXECUTE_REDUCTION_IOR 0x00000004 +#define NVC36F_SEM_EXECUTE_REDUCTION_IADD 0x00000005 +#define NVC36F_SEM_EXECUTE_REDUCTION_INC 0x00000006 +#define NVC36F_SEM_EXECUTE_REDUCTION_DEC 0x00000007 +#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT 31:31 +#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT_SIGNED 0x00000000 +#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT_UNSIGNED 0x00000001 +#define NVC36F_WFI (0x00000078) +#define NVC36F_WFI_SCOPE 0:0 +#define NVC36F_WFI_SCOPE_CURRENT_SCG_TYPE 0x00000000 +#define NVC36F_WFI_SCOPE_CURRENT_VEID 0x00000000 +#define NVC36F_WFI_SCOPE_ALL 0x00000001 +#define NVC36F_CRC_CHECK (0x0000007c) +#define NVC36F_CRC_CHECK_VALUE 31:0 +#define NVC36F_YIELD (0x00000080) +#define NVC36F_YIELD_OP 1:0 +#define NVC36F_YIELD_OP_NOP 0x00000000 +#define NVC36F_YIELD_OP_RUNLIST_TIMESLICE 0x00000002 +#define NVC36F_YIELD_OP_TSG 0x00000003 +#define NVC36F_CLEAR_FAULTED (0x00000084) +#define NVC36F_CLEAR_FAULTED_CHID 11:0 +#define NVC36F_CLEAR_FAULTED_TYPE 31:31 +#define NVC36F_CLEAR_FAULTED_TYPE_PBDMA_FAULTED 0x00000000 +#define NVC36F_CLEAR_FAULTED_TYPE_ENG_FAULTED 0x00000001 + + +/* GPFIFO entry format */ +#define NVC36F_GP_ENTRY__SIZE 8 +#define NVC36F_GP_ENTRY0_FETCH 0:0 +#define NVC36F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000 +#define NVC36F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001 +#define NVC36F_GP_ENTRY0_GET 31:2 +#define NVC36F_GP_ENTRY0_OPERAND 31:0 +#define NVC36F_GP_ENTRY1_GET_HI 7:0 +#define NVC36F_GP_ENTRY1_PRIV 8:8 +#define NVC36F_GP_ENTRY1_PRIV_USER 0x00000000 +#define NVC36F_GP_ENTRY1_PRIV_KERNEL 0x00000001 +#define NVC36F_GP_ENTRY1_LEVEL 9:9 +#define NVC36F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NVC36F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NVC36F_GP_ENTRY1_LENGTH 30:10 +#define NVC36F_GP_ENTRY1_SYNC 31:31 +#define NVC36F_GP_ENTRY1_SYNC_PROCEED 0x00000000 +#define NVC36F_GP_ENTRY1_SYNC_WAIT 0x00000001 +#define NVC36F_GP_ENTRY1_OPCODE 7:0 +#define NVC36F_GP_ENTRY1_OPCODE_NOP 0x00000000 +#define NVC36F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001 +#define NVC36F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002 +#define NVC36F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003 + +/* dma method formats */ +#define NVC36F_DMA_METHOD_ADDRESS_OLD 12:2 +#define NVC36F_DMA_METHOD_ADDRESS 11:0 +#define NVC36F_DMA_SUBDEVICE_MASK 15:4 +#define NVC36F_DMA_METHOD_SUBCHANNEL 15:13 +#define NVC36F_DMA_TERT_OP 17:16 +#define NVC36F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000) +#define NVC36F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001) +#define NVC36F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002) +#define NVC36F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003) +#define NVC36F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000) +#define NVC36F_DMA_METHOD_COUNT_OLD 28:18 +#define NVC36F_DMA_METHOD_COUNT 28:16 +#define NVC36F_DMA_IMMD_DATA 28:16 +#define NVC36F_DMA_SEC_OP 31:29 +#define NVC36F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000) +#define NVC36F_DMA_SEC_OP_INC_METHOD (0x00000001) +#define NVC36F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002) +#define NVC36F_DMA_SEC_OP_NON_INC_METHOD (0x00000003) +#define NVC36F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004) +#define NVC36F_DMA_SEC_OP_ONE_INC (0x00000005) +#define NVC36F_DMA_SEC_OP_RESERVED6 (0x00000006) +#define NVC36F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007) +/* dma incrementing method format */ +#define NVC36F_DMA_INCR_ADDRESS 11:0 +#define NVC36F_DMA_INCR_SUBCHANNEL 15:13 +#define NVC36F_DMA_INCR_COUNT 28:16 +#define NVC36F_DMA_INCR_OPCODE 31:29 +#define NVC36F_DMA_INCR_OPCODE_VALUE (0x00000001) +#define NVC36F_DMA_INCR_DATA 31:0 +/* dma non-incrementing method format */ +#define NVC36F_DMA_NONINCR_ADDRESS 11:0 +#define NVC36F_DMA_NONINCR_SUBCHANNEL 15:13 +#define NVC36F_DMA_NONINCR_COUNT 28:16 +#define NVC36F_DMA_NONINCR_OPCODE 31:29 +#define NVC36F_DMA_NONINCR_OPCODE_VALUE (0x00000003) +#define NVC36F_DMA_NONINCR_DATA 31:0 +/* dma increment-once method format */ +#define NVC36F_DMA_ONEINCR_ADDRESS 11:0 +#define NVC36F_DMA_ONEINCR_SUBCHANNEL 15:13 +#define NVC36F_DMA_ONEINCR_COUNT 28:16 +#define NVC36F_DMA_ONEINCR_OPCODE 31:29 +#define NVC36F_DMA_ONEINCR_OPCODE_VALUE (0x00000005) +#define NVC36F_DMA_ONEINCR_DATA 31:0 +/* dma no-operation format */ +#define NVC36F_DMA_NOP (0x00000000) +/* dma immediate-data format */ +#define NVC36F_DMA_IMMD_ADDRESS 11:0 +#define NVC36F_DMA_IMMD_SUBCHANNEL 15:13 +#define NVC36F_DMA_IMMD_DATA 28:16 +#define NVC36F_DMA_IMMD_OPCODE 31:29 +#define NVC36F_DMA_IMMD_OPCODE_VALUE (0x00000004) +/* dma set sub-device mask format */ +#define NVC36F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4 +#define NVC36F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC36F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001) +/* dma store sub-device mask format */ +#define NVC36F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4 +#define NVC36F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC36F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002) +/* dma use sub-device mask format */ +#define NVC36F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC36F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003) +/* dma end-segment format */ +#define NVC36F_DMA_ENDSEG_OPCODE 31:29 +#define NVC36F_DMA_ENDSEG_OPCODE_VALUE (0x00000007) +/* dma legacy incrementing/non-incrementing formats */ +#define NVC36F_DMA_ADDRESS 12:2 +#define NVC36F_DMA_SUBCH 15:13 +#define NVC36F_DMA_OPCODE3 17:16 +#define NVC36F_DMA_OPCODE3_NONE (0x00000000) +#define NVC36F_DMA_COUNT 28:18 +#define NVC36F_DMA_OPCODE 31:29 +#define NVC36F_DMA_OPCODE_METHOD (0x00000000) +#define NVC36F_DMA_OPCODE_NONINC_METHOD (0x00000002) +#define NVC36F_DMA_DATA 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc36f_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc370.h b/src/common/sdk/nvidia/inc/class/clc370.h new file mode 100644 index 0000000..762ecf0 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc370.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/clc370.finn +// + +#include "clc370_notification.h" + +#define NVC370_DISPLAY (0xc370U) /* finn: Evaluated from "NVC370_ALLOCATION_PARAMETERS_MESSAGE_ID" */ + +#define NVC370_ALLOCATION_PARAMETERS_MESSAGE_ID (0xc370U) + +typedef struct NVC370_ALLOCATION_PARAMETERS { + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NVC370_ALLOCATION_PARAMETERS; + diff --git a/src/common/sdk/nvidia/inc/class/clc370_notification.h b/src/common/sdk/nvidia/inc/class/clc370_notification.h new file mode 100644 index 0000000..c94556e --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc370_notification.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc370_notification_h_ +#define _clc370_notification_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "class/cl5070.h" + +/* event values */ +#define NVC370_NOTIFIERS_SW NV5070_NOTIFIERS_SW +#define NVC370_NOTIFIERS_BEGIN NV5070_NOTIFIERS_MAXCOUNT +#define NVC370_NOTIFIERS_RG_SEM_NOTIFICATION NVC370_NOTIFIERS_BEGIN + (0) +#define NVC370_NOTIFIERS_WIN_SEM_NOTIFICATION NVC370_NOTIFIERS_RG_SEM_NOTIFICATION + (1) +#define NVC370_NOTIFIERS_MAXCOUNT NVC370_NOTIFIERS_WIN_SEM_NOTIFICATION + (1) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc370_notification_h_ */ + diff --git a/src/common/sdk/nvidia/inc/class/clc371.h b/src/common/sdk/nvidia/inc/class/clc371.h new file mode 100644 index 0000000..e8f9977 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc371.h @@ -0,0 +1,109 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc371_h_ +#define _clc371_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC371_DISP_SF_USER (0x000C371) + +typedef volatile struct _clc371_tag0 { + NvU32 dispSfUserOffset[0x400]; +} _NvC371DispSfUser, NvC371DispSfUserMap; + +#define NVC371_SF_HDMI_AVI_INFOFRAME_CTRL(i) (0x00690000-0x00690000+(i)*1024) /* RWX4A */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 4 /* */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE 0:0 /* RWIVF */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER 4:4 /* RWIVF */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER_DIS 0x00000000 /* RWI-V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER_EN 0x00000001 /* RW--V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE 8:8 /* RWIVF */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE_DIS 0x00000000 /* RWI-V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE_EN 0x00000001 /* RW--V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_STATUS(i) (0x00690004-0x00690000+(i)*1024) /* R--4A */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_STATUS__SIZE_1 4 /* */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_STATUS_SENT 0:0 /* R--VF */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_HEADER(i) (0x00690008-0x00690000+(i)*1024) /* RWX4A */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_HEADER__SIZE_1 4 /* */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_HEADER_HB0 7:0 /* RWIVF */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_HEADER_HB1 15:8 /* RWIVF */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_HEADER_HB2 23:16 /* RWIVF */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(i) (0x0069000C-0x00690000+(i)*1024) /* RWX4A */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(i) (0x00690010-0x00690000+(i)*1024) /* RWX4A */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(i) (0x00690014-0x00690000+(i)*1024) /* RWX4A */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(i) (0x00690018-0x00690000+(i)*1024) /* RWX4A */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NVC371_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _clc371_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc372sw.h b/src/common/sdk/nvidia/inc/class/clc372sw.h new file mode 100644 index 0000000..552ea08 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc372sw.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc372sw_h_ +#define _clc372sw_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC372_DISPLAY_SW (0x0000C372) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc372sw_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc373.h b/src/common/sdk/nvidia/inc/class/clc373.h new file mode 100644 index 0000000..c707022 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc373.h @@ -0,0 +1,350 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc373_h_ +#define _clc373_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC373_DISP_CAPABILITIES 0xC373 + +typedef volatile struct _clc373_tag0 { + NvU32 dispCapabilities[0x400]; +} _NvC373DispCapabilities,NvC373DispCapabilities_Map ; + + +#define NVC373_SYS_CAP 0x0 /* RW-4R */ +#define NVC373_SYS_CAP_HEAD0_EXISTS 0:0 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD0_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD1_EXISTS 1:1 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD1_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD2_EXISTS 2:2 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD2_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD3_EXISTS 3:3 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD3_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD4_EXISTS 4:4 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD4_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD5_EXISTS 5:5 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD5_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD6_EXISTS 6:6 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD6_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD7_EXISTS 7:7 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD7_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD_EXISTS(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVC373_SYS_CAP_HEAD_EXISTS__SIZE_1 8 /* */ +#define NVC373_SYS_CAP_HEAD_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR0_EXISTS 8:8 /* RWIVF */ +#define NVC373_SYS_CAP_SOR0_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR1_EXISTS 9:9 /* RWIVF */ +#define NVC373_SYS_CAP_SOR1_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR2_EXISTS 10:10 /* RWIVF */ +#define NVC373_SYS_CAP_SOR2_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR3_EXISTS 11:11 /* RWIVF */ +#define NVC373_SYS_CAP_SOR3_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR4_EXISTS 12:12 /* RWIVF */ +#define NVC373_SYS_CAP_SOR4_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR5_EXISTS 13:13 /* RWIVF */ +#define NVC373_SYS_CAP_SOR5_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR6_EXISTS 14:14 /* RWIVF */ +#define NVC373_SYS_CAP_SOR6_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR7_EXISTS 15:15 /* RWIVF */ +#define NVC373_SYS_CAP_SOR7_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR_EXISTS(i) (8+(i)):(8+(i)) /* RWIVF */ +#define NVC373_SYS_CAP_SOR_EXISTS__SIZE_1 8 /* */ +#define NVC373_SYS_CAP_SOR_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB 0x4 /* RW-4R */ +#define NVC373_SYS_CAPB_WINDOW0_EXISTS 0:0 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW0_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW1_EXISTS 1:1 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW1_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW2_EXISTS 2:2 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW2_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW3_EXISTS 3:3 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW3_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW4_EXISTS 4:4 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW4_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW5_EXISTS 5:5 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW5_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW6_EXISTS 6:6 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW6_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW7_EXISTS 7:7 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW7_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW8_EXISTS 8:8 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW8_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW8_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW8_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW9_EXISTS 9:9 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW9_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW9_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW9_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW10_EXISTS 10:10 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW10_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW10_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW10_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW11_EXISTS 11:11 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW11_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW11_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW11_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW12_EXISTS 12:12 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW12_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW12_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW12_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW13_EXISTS 13:13 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW13_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW13_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW13_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW14_EXISTS 14:14 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW14_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW14_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW14_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW15_EXISTS 15:15 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW15_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW15_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW15_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW16_EXISTS 16:16 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW16_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW16_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW16_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW17_EXISTS 17:17 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW17_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW17_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW17_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW18_EXISTS 18:18 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW18_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW18_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW18_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW19_EXISTS 19:19 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW19_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW19_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW19_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW20_EXISTS 20:20 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW20_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW20_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW20_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW21_EXISTS 21:21 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW21_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW21_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW21_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW22_EXISTS 22:22 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW22_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW22_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW22_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW23_EXISTS 23:23 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW23_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW23_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW23_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW24_EXISTS 24:24 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW24_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW24_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW24_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW25_EXISTS 25:25 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW25_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW25_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW25_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW26_EXISTS 26:26 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW26_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW26_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW26_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW27_EXISTS 27:27 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW27_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW27_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW27_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW28_EXISTS 28:28 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW28_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW28_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW28_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW29_EXISTS 29:29 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW29_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW29_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW29_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW30_EXISTS 30:30 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW30_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW30_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW30_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW31_EXISTS 31:31 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW31_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW31_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW31_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW_EXISTS(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW_EXISTS__SIZE_1 32 /* */ +#define NVC373_SYS_CAPB_WINDOW_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA(i) (0x30+(i)*32) /* RW-4A */ +#define NVC373_HEAD_CAPA__SIZE_1 8 /* */ +#define NVC373_HEAD_CAPA_SCALER 0:0 /* RWIVF */ +#define NVC373_HEAD_CAPA_SCALER_TRUE 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA_SCALER_FALSE 0x00000000 /* RW--V */ +#define NVC373_HEAD_CAPA_SCALER_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPA_SCALER_HAS_YUV422 1:1 /* RWIVF */ +#define NVC373_HEAD_CAPA_SCALER_HAS_YUV422_TRUE 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA_SCALER_HAS_YUV422_FALSE 0x00000000 /* RW--V */ +#define NVC373_HEAD_CAPA_SCALER_HAS_YUV422_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPA_HSAT 2:2 /* RWIVF */ +#define NVC373_HEAD_CAPA_HSAT_TRUE 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA_HSAT_FALSE 0x00000000 /* RW--V */ +#define NVC373_HEAD_CAPA_HSAT_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPA_OCSC 3:3 /* RWIVF */ +#define NVC373_HEAD_CAPA_OCSC_TRUE 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA_OCSC_FALSE 0x00000000 /* RW--V */ +#define NVC373_HEAD_CAPA_OCSC_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPA_YUV422 4:4 /* RWIVF */ +#define NVC373_HEAD_CAPA_YUV422_TRUE 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA_YUV422_FALSE 0x00000000 /* RW--V */ +#define NVC373_HEAD_CAPA_YUV422_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPA_LUT_TYPE 6:5 /* RWIVF */ +#define NVC373_HEAD_CAPA_LUT_TYPE_NONE 0x00000000 /* RW--V */ +#define NVC373_HEAD_CAPA_LUT_TYPE_257 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA_LUT_TYPE_1025 0x00000002 /* RW--V */ +#define NVC373_HEAD_CAPA_LUT_TYPE_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPA_LUT_LOCATION 7:7 /* RWIVF */ +#define NVC373_HEAD_CAPA_LUT_LOCATION_EARLY 0x00000000 /* RW--V */ +#define NVC373_HEAD_CAPA_LUT_LOCATION_LATE 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA_LUT_LOCATION_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPD(i) (0x3c+(i)*32) /* RW-4A */ +#define NVC373_HEAD_CAPD__SIZE_1 8 /* */ +#define NVC373_HEAD_CAPD_MAX_PIXELS_2TAP422 15:0 /* RWIUF */ +#define NVC373_HEAD_CAPD_MAX_PIXELS_2TAP422_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPD_MAX_PIXELS_2TAP444 31:16 /* RWIUF */ +#define NVC373_HEAD_CAPD_MAX_PIXELS_2TAP444_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP(i) (0x144+(i)*8) /* RW-4A */ +#define NVC373_SOR_CAP__SIZE_1 8 /* */ +#define NVC373_SOR_CAP_SINGLE_LVDS_18 0:0 /* RWIVF */ +#define NVC373_SOR_CAP_SINGLE_LVDS_18_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_SINGLE_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_SINGLE_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_SINGLE_LVDS_24 1:1 /* RWIVF */ +#define NVC373_SOR_CAP_SINGLE_LVDS_24_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_SINGLE_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_SINGLE_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DUAL_LVDS_18 2:2 /* RWIVF */ +#define NVC373_SOR_CAP_DUAL_LVDS_18_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DUAL_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DUAL_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DUAL_LVDS_24 3:3 /* RWIVF */ +#define NVC373_SOR_CAP_DUAL_LVDS_24_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DUAL_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DUAL_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_SINGLE_TMDS_A 8:8 /* RWIVF */ +#define NVC373_SOR_CAP_SINGLE_TMDS_A_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_SINGLE_TMDS_A_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_SINGLE_TMDS_A_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_SINGLE_TMDS_B 9:9 /* RWIVF */ +#define NVC373_SOR_CAP_SINGLE_TMDS_B_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_SINGLE_TMDS_B_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_SINGLE_TMDS_B_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DUAL_TMDS 11:11 /* RWIVF */ +#define NVC373_SOR_CAP_DUAL_TMDS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DUAL_TMDS_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DUAL_TMDS_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DISPLAY_OVER_PCIE 13:13 /* RWIVF */ +#define NVC373_SOR_CAP_DISPLAY_OVER_PCIE_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DISPLAY_OVER_PCIE_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DISPLAY_OVER_PCIE_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_SDI 16:16 /* RWIVF */ +#define NVC373_SOR_CAP_SDI_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_SDI_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_SDI_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DP_A 24:24 /* RWIVF */ +#define NVC373_SOR_CAP_DP_A_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DP_A_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DP_A_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DP_B 25:25 /* RWIVF */ +#define NVC373_SOR_CAP_DP_B_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DP_B_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DP_B_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DP_INTERLACE 26:26 /* RWIVF */ +#define NVC373_SOR_CAP_DP_INTERLACE_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DP_INTERLACE_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DP_INTERLACE_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DP_8_LANES 27:27 /* RWIVF */ +#define NVC373_SOR_CAP_DP_8_LANES_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DP_8_LANES_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DP_8_LANES_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CLK_CAP(i) (0x608+(i)*4) /* RW-4A */ +#define NVC373_SOR_CLK_CAP__SIZE_1 8 /* */ +#define NVC373_SOR_CLK_CAP_DP_MAX 7:0 /* RWIUF */ +#define NVC373_SOR_CLK_CAP_DP_MAX_INIT 0x00000036 /* RWI-V */ +#define NVC373_SOR_CLK_CAP_TMDS_MAX 23:16 /* RWIUF */ +#define NVC373_SOR_CLK_CAP_TMDS_MAX_INIT 0x0000003C /* RWI-V */ +#define NVC373_SOR_CLK_CAP_LVDS_MAX 31:24 /* RWIUF */ +#define NVC373_SOR_CLK_CAP_LVDS_MAX_INIT 0x00000000 /* RWI-V */ + +#ifdef __cplusplus +}; +#endif /* extern C */ +#endif //_clc373_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc37a.h b/src/common/sdk/nvidia/inc/class/clc37a.h new file mode 100644 index 0000000..37fa7ce --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc37a.h @@ -0,0 +1,214 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _clc37a__h_ +#define _clc37a__h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC37A_CURSOR_IMM_CHANNEL_PIO (0x0000C37A) + +typedef volatile struct _clc37a_tag0 { + NvV32 Reserved00[0x2]; + NvV32 Free; // 0x00000008 - 0x0000000B + NvV32 Reserved01[0x7D]; + NvV32 Update; // 0x00000200 - 0x00000203 + NvV32 SetInterlockFlags; // 0x00000204 - 0x00000207 + NvV32 SetCursorHotSpotPointOut[2]; // 0x00000208 - 0x0000020F + NvV32 SetWindowInterlockFlags; // 0x00000210 - 0x00000213 + NvV32 Reserved02[0x37B]; +} NVC37ADispCursorImmControlPio; + +#define NVC37A_FREE (0x00000008) +#define NVC37A_FREE_COUNT 5:0 +#define NVC37A_UPDATE (0x00000200) +#define NVC37A_UPDATE_RELEASE_ELV 0:0 +#define NVC37A_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC37A_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC37A_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC37A_SET_INTERLOCK_FLAGS (0x00000204) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC37A_SET_CURSOR_HOT_SPOT_POINT_OUT(b) (0x00000208 + (b)*0x00000004) +#define NVC37A_SET_CURSOR_HOT_SPOT_POINT_OUT_X 15:0 +#define NVC37A_SET_CURSOR_HOT_SPOT_POINT_OUT_Y 31:16 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS (0x00000210) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc37a_h + diff --git a/src/common/sdk/nvidia/inc/class/clc37b.h b/src/common/sdk/nvidia/inc/class/clc37b.h new file mode 100644 index 0000000..05f47a2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc37b.h @@ -0,0 +1,68 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _clC37b_h_ +#define _clC37b_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC37B_WINDOW_IMM_CHANNEL_DMA (0x0000C37B) + +// dma opcode instructions +#define NVC37B_DMA +#define NVC37B_DMA_OPCODE 31:29 +#define NVC37B_DMA_OPCODE_METHOD 0x00000000 +#define NVC37B_DMA_OPCODE_JUMP 0x00000001 +#define NVC37B_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC37B_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC37B_DMA_METHOD_COUNT 27:18 +#define NVC37B_DMA_METHOD_OFFSET 13:2 +#define NVC37B_DMA_DATA 31:0 +#define NVC37B_DMA_DATA_NOP 0x00000000 +#define NVC37B_DMA_JUMP_OFFSET 11:2 +#define NVC37B_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NVC37B_PUT (0x00000000) +#define NVC37B_PUT_PTR 9:0 +#define NVC37B_GET (0x00000004) +#define NVC37B_GET_PTR 9:0 +#define NVC37B_UPDATE (0x00000200) +#define NVC37B_UPDATE_RELEASE_ELV 0:0 +#define NVC37B_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC37B_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC37B_UPDATE_INTERLOCK_WITH_WINDOW 1:1 +#define NVC37B_UPDATE_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC37B_UPDATE_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC37B_SET_POINT_OUT(b) (0x00000208 + (b)*0x00000004) +#define NVC37B_SET_POINT_OUT_X 15:0 +#define NVC37B_SET_POINT_OUT_Y 31:16 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC37b_h diff --git a/src/common/sdk/nvidia/inc/class/clc37d.h b/src/common/sdk/nvidia/inc/class/clc37d.h new file mode 100644 index 0000000..9ac7050 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc37d.h @@ -0,0 +1,953 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _clC37d_h_ +#define _clC37d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC37D_CORE_CHANNEL_DMA (0x0000C37D) + +#define NV_DISP_NOTIFIER 0x00000000 +#define NV_DISP_NOTIFIER_SIZEOF 0x00000010 +#define NV_DISP_NOTIFIER__0 0x00000000 +#define NV_DISP_NOTIFIER__0_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFIER__0_FIELD 8:8 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE 9:9 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_NON_TEARING 0x00000000 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_IMMEDIATE 0x00000001 +#define NV_DISP_NOTIFIER__0_R1 15:10 +#define NV_DISP_NOTIFIER__0_R2 23:16 +#define NV_DISP_NOTIFIER__0_R3 29:24 +#define NV_DISP_NOTIFIER__0_STATUS 31:30 +#define NV_DISP_NOTIFIER__0_STATUS_NOT_BEGUN 0x00000000 +#define NV_DISP_NOTIFIER__0_STATUS_BEGUN 0x00000001 +#define NV_DISP_NOTIFIER__0_STATUS_FINISHED 0x00000002 +#define NV_DISP_NOTIFIER__1 0x00000001 +#define NV_DISP_NOTIFIER__1_R4 31:0 +#define NV_DISP_NOTIFIER__2 0x00000002 +#define NV_DISP_NOTIFIER__2_TIMESTAMP_LO 31:0 +#define NV_DISP_NOTIFIER__3 0x00000003 +#define NV_DISP_NOTIFIER__3_TIMESTAMP_HI 31:0 + + +// dma opcode instructions +#define NVC37D_DMA +#define NVC37D_DMA_OPCODE 31:29 +#define NVC37D_DMA_OPCODE_METHOD 0x00000000 +#define NVC37D_DMA_OPCODE_JUMP 0x00000001 +#define NVC37D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC37D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC37D_DMA_METHOD_COUNT 27:18 +#define NVC37D_DMA_METHOD_OFFSET 13:2 +#define NVC37D_DMA_DATA 31:0 +#define NVC37D_DMA_DATA_NOP 0x00000000 +#define NVC37D_DMA_JUMP_OFFSET 11:2 +#define NVC37D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// if cap SUPPORT_FLEXIBLE_WIN_MAPPING is FALSE, this define can be used to obtain which head a window is mapped to +#define NVC37D_WINDOW_MAPPED_TO_HEAD(w) ((w)>>1) +#define NVC37D_GET_VALID_WINDOWMASK_FOR_HEAD(h) ((1<<((h)*2)) | (1<<((h)*2+1))) + +// class methods +#define NVC37D_PUT (0x00000000) +#define NVC37D_PUT_PTR 9:0 +#define NVC37D_GET (0x00000004) +#define NVC37D_GET_PTR 9:0 +#define NVC37D_UPDATE (0x00000200) +#define NVC37D_UPDATE_SPECIAL_HANDLING 21:20 +#define NVC37D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NVC37D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NVC37D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NVC37D_UPDATE_SPECIAL_HANDLING_REASON 19:12 +#define NVC37D_UPDATE_INHIBIT_INTERRUPTS 24:24 +#define NVC37D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NVC37D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NVC37D_UPDATE_RELEASE_ELV 0:0 +#define NVC37D_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC37D_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC37D_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC37D_SET_CONTEXT_DMA_NOTIFIER (0x00000208) +#define NVC37D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NVC37D_SET_NOTIFIER_CONTROL (0x0000020C) +#define NVC37D_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVC37D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVC37D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC37D_SET_NOTIFIER_CONTROL_OFFSET 11:4 +#define NVC37D_SET_NOTIFIER_CONTROL_NOTIFY 12:12 +#define NVC37D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NVC37D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NVC37D_SET_CONTROL (0x00000210) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN(i) ((i)+0):((i)+0) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN__SIZE_1 4 +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN_DISABLE (0x00000000) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN_ENABLE (0x00000001) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN0 0:0 +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN0_DISABLE (0x00000000) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN0_ENABLE (0x00000001) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN1 1:1 +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN1_DISABLE (0x00000000) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN1_ENABLE (0x00000001) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN2 2:2 +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN2_DISABLE (0x00000000) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN2_ENABLE (0x00000001) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN3 3:3 +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN3_DISABLE (0x00000000) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN3_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS (0x00000218) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+0):((i)+0) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS (0x0000021C) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) +#define NVC37D_GET_RG_SCAN_LINE(b) (0x00000220 + (b)*0x00000004) +#define NVC37D_GET_RG_SCAN_LINE_LINE 15:0 +#define NVC37D_GET_RG_SCAN_LINE_VBLANK 16:16 +#define NVC37D_GET_RG_SCAN_LINE_VBLANK_FALSE (0x00000000) +#define NVC37D_GET_RG_SCAN_LINE_VBLANK_TRUE (0x00000001) +#define NVC37D_SET_GET_BLANKING_CTRL(b) (0x00000240 + (b)*0x00000004) +#define NVC37D_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NVC37D_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NVC37D_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NVC37D_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NVC37D_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NVC37D_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) + +#define NVC37D_PIOR_SET_CONTROL(a) (0x00000280 + (a)*0x00000020) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK 7:0 +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVC37D_PIOR_SET_CONTROL_PROTOCOL 11:8 +#define NVC37D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC (0x00000000) +#define NVC37D_PIOR_SET_CONTROL_PROTOCOL_EXT_SDI_SD_ENC (0x00000001) +#define NVC37D_PIOR_SET_CONTROL_PROTOCOL_EXT_SDI_HD_ENC (0x00000002) +#define NVC37D_PIOR_SET_CONTROL_PROTOCOL_DIST_RENDER_OUT (0x00000004) +#define NVC37D_PIOR_SET_CONTROL_PROTOCOL_DIST_RENDER_IN (0x00000005) +#define NVC37D_PIOR_SET_CONTROL_PROTOCOL_DIST_RENDER_INOUT (0x00000006) +#define NVC37D_PIOR_SET_CONTROL_DE_SYNC_POLARITY 16:16 +#define NVC37D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC37D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC37D_PIOR_SET_CUSTOM_REASON(a) (0x00000284 + (a)*0x00000020) +#define NVC37D_PIOR_SET_CUSTOM_REASON_CODE 31:0 +#define NVC37D_PIOR_SET_SW_SPARE_A(a) (0x00000288 + (a)*0x00000020) +#define NVC37D_PIOR_SET_SW_SPARE_A_CODE 31:0 +#define NVC37D_PIOR_SET_SW_SPARE_B(a) (0x0000028C + (a)*0x00000020) +#define NVC37D_PIOR_SET_SW_SPARE_B_CODE 31:0 + +#define NVC37D_SOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK 7:0 +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_DSI (0x0000000A) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NVC37D_SOR_SET_CONTROL_DE_SYNC_POLARITY 16:16 +#define NVC37D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC37D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC37D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NVC37D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NVC37D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NVC37D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NVC37D_SOR_SET_CUSTOM_REASON(a) (0x00000304 + (a)*0x00000020) +#define NVC37D_SOR_SET_CUSTOM_REASON_CODE 31:0 +#define NVC37D_SOR_SET_SW_SPARE_A(a) (0x00000308 + (a)*0x00000020) +#define NVC37D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NVC37D_SOR_SET_SW_SPARE_B(a) (0x0000030C + (a)*0x00000020) +#define NVC37D_SOR_SET_SW_SPARE_B_CODE 31:0 + +#define NVC37D_WINDOW_SET_CONTROL(a) (0x00001000 + (a)*0x00000080) +#define NVC37D_WINDOW_SET_CONTROL_OWNER 3:0 +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD(i) (0x00000000 +(i)) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD__SIZE_1 8 +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD0 (0x00000000) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD1 (0x00000001) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD2 (0x00000002) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD3 (0x00000003) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD4 (0x00000004) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD5 (0x00000005) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD6 (0x00000006) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD7 (0x00000007) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_NONE (0x0000000F) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(a) (0x00001004 + (a)*0x00000080) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(a) (0x00001008 + (a)*0x00000080) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR(a) (0x0000100C + (a)*0x00000080) +#define NVC37D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC37D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS(a) (0x00001010 + (a)*0x00000080) +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_MAX_PIXELS_FETCHED_PER_LINE 14:0 +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_LUT 17:16 +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_LUT_USAGE_NONE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_LUT_USAGE_257 (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_LUT_USAGE_1025 (0x00000002) +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS 22:20 +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED 24:24 +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) + +#define NVC37D_HEAD_SET_PROCAMP(a) (0x00002000 + (a)*0x00000400) +#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003) +#define NVC37D_HEAD_SET_PROCAMP_CHROMA_LPF 3:3 +#define NVC37D_HEAD_SET_PROCAMP_CHROMA_LPF_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_PROCAMP_CHROMA_LPF_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_PROCAMP_SAT_COS 15:4 +#define NVC37D_HEAD_SET_PROCAMP_SAT_SINE 27:16 +#define NVC37D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 28:28 +#define NVC37D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NVC37D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NVC37D_HEAD_SET_PROCAMP_RANGE_COMPRESSION 29:29 +#define NVC37D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_PROCAMP_BLACK_LEVEL 31:30 +#define NVC37D_HEAD_SET_PROCAMP_BLACK_LEVEL_AUTO (0x00000000) +#define NVC37D_HEAD_SET_PROCAMP_BLACK_LEVEL_VIDEO (0x00000001) +#define NVC37D_HEAD_SET_PROCAMP_BLACK_LEVEL_GRAPHICS (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00002004 + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 2:2 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 3:3 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 7:4 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000004) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000005) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000006) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000007) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000008) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 24:24 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 23:12 +#define NVC37D_HEAD_SET_CONTROL(a) (0x00002008 + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTROL_STRUCTURE 1:0 +#define NVC37D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE 2:2 +#define NVC37D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_NORMAL (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 11:10 +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 8:4 +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 15:12 +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 23:22 +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 20:16 +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN 28:24 +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000004) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000005) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000006) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000007) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000008) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000009) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000B) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000C) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000D) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000E) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000F) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x00000010) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NVC37D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NVC37D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x0000200C + (a)*0x00000400) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC37D_HEAD_SET_PIXEL_REORDER_CONTROL(a) (0x00002010 + (a)*0x00000400) +#define NVC37D_HEAD_SET_PIXEL_REORDER_CONTROL_BANK_WIDTH 13:0 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00002014 + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVC37D_HEAD_SET_DITHER_CONTROL(a) (0x00002018 + (a)*0x00000400) +#define NVC37D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NVC37D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS 5:4 +#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS_TO_6_BITS (0x00000000) +#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS_TO_8_BITS (0x00000001) +#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS_TO_10_BITS (0x00000002) +#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS_TO_12_BITS (0x00000003) +#define NVC37D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE 2:2 +#define NVC37D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE 10:8 +#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NVC37D_HEAD_SET_DITHER_CONTROL_PHASE 13:12 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x0000201C + (a)*0x00000400) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 0:0 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING 4:4 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 9:8 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NVC37D_HEAD_SET_DISPLAY_ID(a,b) (0x00002020 + (a)*0x00000400 + (b)*0x00000004) +#define NVC37D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00002028 + (a)*0x00000400) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC37D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR(a) (0x0000202C + (a)*0x00000400) +#define NVC37D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC37D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS(a) (0x00002030 + (a)*0x00000400) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR 2:0 +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_NONE (0x00000000) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W32_H32 (0x00000001) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W64_H64 (0x00000002) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W128_H128 (0x00000003) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W256_H256 (0x00000004) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_LUT 5:4 +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED 8:8 +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) +#define NVC37D_HEAD_SET_STALL_LOCK(a) (0x00002034 + (a)*0x00000400) +#define NVC37D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NVC37D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NVC37D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NVC37D_HEAD_SET_STALL_LOCK_MODE 2:2 +#define NVC37D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NVC37D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN 8:4 +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC37D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 12:12 +#define NVC37D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NVC37D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NVC37D_HEAD_SET_STALL_LOCK_TEPOLARITY 14:14 +#define NVC37D_HEAD_SET_STALL_LOCK_TEPOLARITY_POSITIVE_TRUE (0x00000000) +#define NVC37D_HEAD_SET_STALL_LOCK_TEPOLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC37D_HEAD_SET_LOCK_CHAIN(a) (0x00002044 + (a)*0x00000400) +#define NVC37D_HEAD_SET_LOCK_CHAIN_POSITION 3:0 +#define NVC37D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x00002048 + (a)*0x00000400) +#define NVC37D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NVC37D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NVC37D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x0000204C + (a)*0x00000400) +#define NVC37D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NVC37D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NVC37D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x00002058 + (a)*0x00000400) +#define NVC37D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NVC37D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NVC37D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x0000205C + (a)*0x00000400) +#define NVC37D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NVC37D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NVC37D_HEAD_SET_DESKTOP_COLOR(a) (0x00002060 + (a)*0x00000400) +#define NVC37D_HEAD_SET_DESKTOP_COLOR_ALPHA 7:0 +#define NVC37D_HEAD_SET_DESKTOP_COLOR_RED 15:8 +#define NVC37D_HEAD_SET_DESKTOP_COLOR_GREEN 23:16 +#define NVC37D_HEAD_SET_DESKTOP_COLOR_BLUE 31:24 +#define NVC37D_HEAD_SET_RASTER_SIZE(a) (0x00002064 + (a)*0x00000400) +#define NVC37D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NVC37D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NVC37D_HEAD_SET_RASTER_SYNC_END(a) (0x00002068 + (a)*0x00000400) +#define NVC37D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NVC37D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NVC37D_HEAD_SET_RASTER_BLANK_END(a) (0x0000206C + (a)*0x00000400) +#define NVC37D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NVC37D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NVC37D_HEAD_SET_RASTER_BLANK_START(a) (0x00002070 + (a)*0x00000400) +#define NVC37D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NVC37D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NVC37D_HEAD_SET_OVERSCAN_COLOR(a) (0x00002078 + (a)*0x00000400) +#define NVC37D_HEAD_SET_OVERSCAN_COLOR_RED_CR 9:0 +#define NVC37D_HEAD_SET_OVERSCAN_COLOR_GREEN_Y 19:10 +#define NVC37D_HEAD_SET_OVERSCAN_COLOR_BLUE_CB 29:20 +#define NVC37D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR(a) (0x0000207C + (a)*0x00000400) +#define NVC37D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_RED_CR 9:0 +#define NVC37D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_GREEN_Y 19:10 +#define NVC37D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_BLUE_CB 29:20 +#define NVC37D_HEAD_SET_HDMI_CTRL(a) (0x00002080 + (a)*0x00000400) +#define NVC37D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NVC37D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NVC37D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NVC37D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NVC37D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NVC37D_HEAD_SET_CONTEXT_DMA_CURSOR(a,b) (0x00002088 + (a)*0x00000400 + (b)*0x00000004) +#define NVC37D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0 +#define NVC37D_HEAD_SET_OFFSET_CURSOR(a,b) (0x00002090 + (a)*0x00000400 + (b)*0x00000004) +#define NVC37D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0 +#define NVC37D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x00002098 + (a)*0x00000400) +#define NVC37D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 0:0 +#define NVC37D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NVC37D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_CURSOR(a) (0x0000209C + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_FORMAT 7:0 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x000000E9) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x000000CF) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE 9:8 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 19:12 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 27:20 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA 29:28 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA_NONE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA_SRGB (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA_YUV8_10 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA_YUV12 (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(a) (0x000020A0 + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_K1 7:0 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT 11:8 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT 15:12 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_ZERO (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE 16:16 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_BLEND (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_XOR (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT(a) (0x000020A4 + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_SIZE 1:0 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_SIZE_SIZE_257 (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_SIZE_SIZE_1025 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE 5:4 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE_UNITY (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE_XRBIAS (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE_XVYCC (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_OUTPUT_MODE 9:8 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_OUTPUT_MODE_INDEX (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_OUTPUT_MODE_INTERPOLATE (0x00000001) +#define NVC37D_HEAD_SET_OFFSET_OUTPUT_LUT(a) (0x000020A8 + (a)*0x00000400) +#define NVC37D_HEAD_SET_OFFSET_OUTPUT_LUT_ORIGIN 31:0 +#define NVC37D_HEAD_SET_CONTEXT_DMA_OUTPUT_LUT(a) (0x000020AC + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTEXT_DMA_OUTPUT_LUT_HANDLE 31:0 +#define NVC37D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00002180 + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NVC37D_HEAD_SET_CRC_CONTROL(a) (0x00002184 + (a)*0x00000400) +#define NVC37D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 4:0 +#define NVC37D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 8:8 +#define NVC37D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NVC37D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC 19:12 +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_NONE (0x00000000) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF (0x00000030) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR__SIZE_1 8 +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR0 (0x00000050) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR1 (0x00000051) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR2 (0x00000052) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR3 (0x00000053) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR4 (0x00000054) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR5 (0x00000055) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR6 (0x00000056) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR7 (0x00000057) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR(i) (0x00000060 +(i)) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR__SIZE_1 4 +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR0 (0x00000060) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR1 (0x00000061) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR2 (0x00000062) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR3 (0x00000063) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC 27:20 +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_NONE (0x00000000) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SF (0x00000030) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR__SIZE_1 8 +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR0 (0x00000050) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR1 (0x00000051) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR2 (0x00000052) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR3 (0x00000053) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR4 (0x00000054) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR5 (0x00000055) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR6 (0x00000056) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR7 (0x00000057) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR(i) (0x00000060 +(i)) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR__SIZE_1 4 +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR0 (0x00000060) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR1 (0x00000061) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR2 (0x00000062) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR3 (0x00000063) +#define NVC37D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 9:9 +#define NVC37D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_PRESENT_CONTROL(a) (0x0000218C + (a)*0x00000400) +#define NVC37D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 0:0 +#define NVC37D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NVC37D_HEAD_SET_SW_SPARE_A(a) (0x00002194 + (a)*0x00000400) +#define NVC37D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NVC37D_HEAD_SET_SW_SPARE_B(a) (0x00002198 + (a)*0x00000400) +#define NVC37D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NVC37D_HEAD_SET_SW_SPARE_C(a) (0x0000219C + (a)*0x00000400) +#define NVC37D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NVC37D_HEAD_SET_SW_SPARE_D(a) (0x000021A0 + (a)*0x00000400) +#define NVC37D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NVC37D_HEAD_SET_DISPLAY_RATE(a) (0x000021A8 + (a)*0x00000400) +#define NVC37D_HEAD_SET_DISPLAY_RATE_RUN_MODE 0:0 +#define NVC37D_HEAD_SET_DISPLAY_RATE_RUN_MODE_CONTINUOUS (0x00000000) +#define NVC37D_HEAD_SET_DISPLAY_RATE_RUN_MODE_ONE_SHOT (0x00000001) +#define NVC37D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_INTERVAL 25:4 +#define NVC37D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH 2:2 +#define NVC37D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_MIN_FRAME_IDLE(a) (0x00002218 + (a)*0x00000400) +#define NVC37D_HEAD_SET_MIN_FRAME_IDLE_LEADING_RASTER_LINES 14:0 +#define NVC37D_HEAD_SET_MIN_FRAME_IDLE_TRAILING_RASTER_LINES 30:16 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC37d_h diff --git a/src/common/sdk/nvidia/inc/class/clc37dcrcnotif.h b/src/common/sdk/nvidia/inc/class/clc37dcrcnotif.h new file mode 100644 index 0000000..bf0e9a2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc37dcrcnotif.h @@ -0,0 +1,50 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __clc37dcrcnotif_h__ +#define __clc37dcrcnotif_h__ +/* This file is autogenerated. Do not edit */ + +#define NVC37D_NOTIFIER_CRC_STATUS_0 0x00000000 +#define NVC37D_NOTIFIER_CRC_STATUS_0_DONE 0:0 +#define NVC37D_NOTIFIER_CRC_STATUS_0_DONE_FALSE 0x00000000 +#define NVC37D_NOTIFIER_CRC_STATUS_0_DONE_TRUE 0x00000001 +#define NVC37D_NOTIFIER_CRC_STATUS_0_COMPOSITOR_OVERFLOW 3:3 +#define NVC37D_NOTIFIER_CRC_STATUS_0_COMPOSITOR_OVERFLOW_FALSE 0x00000000 +#define NVC37D_NOTIFIER_CRC_STATUS_0_COMPOSITOR_OVERFLOW_TRUE 0x00000001 +#define NVC37D_NOTIFIER_CRC_STATUS_0_RG_OVERFLOW 4:4 +#define NVC37D_NOTIFIER_CRC_STATUS_0_RG_OVERFLOW_FALSE 0x00000000 +#define NVC37D_NOTIFIER_CRC_STATUS_0_RG_OVERFLOW_TRUE 0x00000001 +#define NVC37D_NOTIFIER_CRC_STATUS_0_PRIMARY_OUTPUT_OVERFLOW 5:5 +#define NVC37D_NOTIFIER_CRC_STATUS_0_PRIMARY_OUTPUT_OVERFLOW_FALSE 0x00000000 +#define NVC37D_NOTIFIER_CRC_STATUS_0_PRIMARY_OUTPUT_OVERFLOW_TRUE 0x00000001 +#define NVC37D_NOTIFIER_CRC_STATUS_0_COUNT 27:16 +#define NVC37D_NOTIFIER_CRC_CRC_ENTRY0_11 0x0000000B +#define NVC37D_NOTIFIER_CRC_CRC_ENTRY0_11_COMPOSITOR_CRC 31:0 +#define NVC37D_NOTIFIER_CRC_CRC_ENTRY0_12 0x0000000C +#define NVC37D_NOTIFIER_CRC_CRC_ENTRY0_12_RG_CRC 31:0 +#define NVC37D_NOTIFIER_CRC_CRC_ENTRY0_13 0x0000000D +#define NVC37D_NOTIFIER_CRC_CRC_ENTRY0_13_PRIMARY_OUTPUT_CRC 31:0 +#define NVC37D_NOTIFIER_CRC_CRC_ENTRY1_21 0x00000015 + +#endif // __clc37dcrcnotif_h__ diff --git a/src/common/sdk/nvidia/inc/class/clc37dswspare.h b/src/common/sdk/nvidia/inc/class/clc37dswspare.h new file mode 100644 index 0000000..44b21a8 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc37dswspare.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc37d_sw_spare_h_ +#define _clc37d_sw_spare_h_ + +/* This file is *not* auto-generated. */ + +#define NVC37D_HEAD_SET_SW_SPARE_A_CODE_VPLL_REF 1:0 +#define NVC37D_HEAD_SET_SW_SPARE_A_CODE_VPLL_REF_NO_PREF (0x00000000) +#define NVC37D_HEAD_SET_SW_SPARE_A_CODE_VPLL_REF_QSYNC (0x00000001) + +#define NVC37D_HEAD_SET_SW_SPARE_A_DISABLE_MID_FRAME_AND_DWCF_WATERMARK 2:2 +#define NVC37D_HEAD_SET_SW_SPARE_A_DISABLE_MID_FRAME_AND_DWCF_WATERMARK_FALSE (0x00000000) +#define NVC37D_HEAD_SET_SW_SPARE_A_DISABLE_MID_FRAME_AND_DWCF_WATERMARK_TRUE (0x00000001) + +#endif // _clc37d_sw_spare_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc37e.h b/src/common/sdk/nvidia/inc/class/clc37e.h new file mode 100644 index 0000000..f46929a --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc37e.h @@ -0,0 +1,498 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _clC37e_h_ +#define _clC37e_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC37E_WINDOW_CHANNEL_DMA (0x0000C37E) + +// dma opcode instructions +#define NVC37E_DMA +#define NVC37E_DMA_OPCODE 31:29 +#define NVC37E_DMA_OPCODE_METHOD 0x00000000 +#define NVC37E_DMA_OPCODE_JUMP 0x00000001 +#define NVC37E_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC37E_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC37E_DMA_METHOD_COUNT 27:18 +#define NVC37E_DMA_METHOD_OFFSET 13:2 +#define NVC37E_DMA_DATA 31:0 +#define NVC37E_DMA_DATA_NOP 0x00000000 +#define NVC37E_DMA_JUMP_OFFSET 11:2 +#define NVC37E_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NVC37E_PUT (0x00000000) +#define NVC37E_PUT_PTR 9:0 +#define NVC37E_GET (0x00000004) +#define NVC37E_GET_PTR 9:0 +#define NVC37E_UPDATE (0x00000200) +#define NVC37E_UPDATE_RELEASE_ELV 0:0 +#define NVC37E_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC37E_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC37E_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC37E_UPDATE_INTERLOCK_WITH_WIN_IMM 12:12 +#define NVC37E_UPDATE_INTERLOCK_WITH_WIN_IMM_DISABLE (0x00000000) +#define NVC37E_UPDATE_INTERLOCK_WITH_WIN_IMM_ENABLE (0x00000001) +#define NVC37E_GET_LINE (0x00000208) +#define NVC37E_GET_LINE_LINE 15:0 +#define NVC37E_SET_SEMAPHORE_CONTROL (0x0000020C) +#define NVC37E_SET_SEMAPHORE_CONTROL_OFFSET 7:0 +#define NVC37E_SET_SEMAPHORE_ACQUIRE (0x00000210) +#define NVC37E_SET_SEMAPHORE_ACQUIRE_VALUE 31:0 +#define NVC37E_SET_SEMAPHORE_RELEASE (0x00000214) +#define NVC37E_SET_SEMAPHORE_RELEASE_VALUE 31:0 +#define NVC37E_SET_CONTEXT_DMA_SEMAPHORE (0x00000218) +#define NVC37E_SET_CONTEXT_DMA_SEMAPHORE_HANDLE 31:0 +#define NVC37E_SET_CONTEXT_DMA_NOTIFIER (0x0000021C) +#define NVC37E_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NVC37E_SET_NOTIFIER_CONTROL (0x00000220) +#define NVC37E_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVC37E_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVC37E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC37E_SET_NOTIFIER_CONTROL_OFFSET 11:4 +#define NVC37E_SET_SIZE (0x00000224) +#define NVC37E_SET_SIZE_WIDTH 15:0 +#define NVC37E_SET_SIZE_HEIGHT 31:16 +#define NVC37E_SET_STORAGE (0x00000228) +#define NVC37E_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC37E_SET_STORAGE_MEMORY_LAYOUT 4:4 +#define NVC37E_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC37E_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC37E_SET_PARAMS (0x0000022C) +#define NVC37E_SET_PARAMS_FORMAT 7:0 +#define NVC37E_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NVC37E_SET_PARAMS_FORMAT_R4G4B4A4 (0x0000002F) +#define NVC37E_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NVC37E_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NVC37E_SET_PARAMS_FORMAT_R5G5B5A1 (0x0000002E) +#define NVC37E_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NVC37E_SET_PARAMS_FORMAT_X8R8G8B8 (0x000000E6) +#define NVC37E_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NVC37E_SET_PARAMS_FORMAT_X8B8G8R8 (0x000000F9) +#define NVC37E_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NVC37E_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NVC37E_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NVC37E_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NVC37E_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NVC37E_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NVC37E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NVC37E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422 (0x00000028) +#define NVC37E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422 (0x00000029) +#define NVC37E_SET_PARAMS_FORMAT_Y8___U8V8_N444 (0x00000035) +#define NVC37E_SET_PARAMS_FORMAT_Y8___U8V8_N422 (0x00000036) +#define NVC37E_SET_PARAMS_FORMAT_Y8___U8V8_N422R (0x00000037) +#define NVC37E_SET_PARAMS_FORMAT_Y8___V8U8_N420 (0x00000038) +#define NVC37E_SET_PARAMS_FORMAT_Y8___U8___V8_N444 (0x0000003A) +#define NVC37E_SET_PARAMS_FORMAT_Y8___U8___V8_N420 (0x0000003B) +#define NVC37E_SET_PARAMS_FORMAT_Y10___U10V10_N444 (0x00000055) +#define NVC37E_SET_PARAMS_FORMAT_Y10___U10V10_N422 (0x00000056) +#define NVC37E_SET_PARAMS_FORMAT_Y10___U10V10_N422R (0x00000057) +#define NVC37E_SET_PARAMS_FORMAT_Y10___V10U10_N420 (0x00000058) +#define NVC37E_SET_PARAMS_FORMAT_Y10___U10___V10_N444 (0x0000005A) +#define NVC37E_SET_PARAMS_FORMAT_Y10___U10___V10_N420 (0x0000005B) +#define NVC37E_SET_PARAMS_FORMAT_Y12___U12V12_N444 (0x00000075) +#define NVC37E_SET_PARAMS_FORMAT_Y12___U12V12_N422 (0x00000076) +#define NVC37E_SET_PARAMS_FORMAT_Y12___U12V12_N422R (0x00000077) +#define NVC37E_SET_PARAMS_FORMAT_Y12___V12U12_N420 (0x00000078) +#define NVC37E_SET_PARAMS_FORMAT_Y12___U12___V12_N444 (0x0000007A) +#define NVC37E_SET_PARAMS_FORMAT_Y12___U12___V12_N420 (0x0000007B) +#define NVC37E_SET_PARAMS_COLOR_SPACE 9:8 +#define NVC37E_SET_PARAMS_COLOR_SPACE_RGB (0x00000000) +#define NVC37E_SET_PARAMS_COLOR_SPACE_YUV_601 (0x00000001) +#define NVC37E_SET_PARAMS_COLOR_SPACE_YUV_709 (0x00000002) +#define NVC37E_SET_PARAMS_COLOR_SPACE_YUV_2020 (0x00000003) +#define NVC37E_SET_PARAMS_INPUT_RANGE 13:12 +#define NVC37E_SET_PARAMS_INPUT_RANGE_BYPASS (0x00000000) +#define NVC37E_SET_PARAMS_INPUT_RANGE_LIMITED (0x00000001) +#define NVC37E_SET_PARAMS_INPUT_RANGE_FULL (0x00000002) +#define NVC37E_SET_PARAMS_UNDERREPLICATE 16:16 +#define NVC37E_SET_PARAMS_UNDERREPLICATE_DISABLE (0x00000000) +#define NVC37E_SET_PARAMS_UNDERREPLICATE_ENABLE (0x00000001) +#define NVC37E_SET_PARAMS_DE_GAMMA 21:20 +#define NVC37E_SET_PARAMS_DE_GAMMA_NONE (0x00000000) +#define NVC37E_SET_PARAMS_DE_GAMMA_SRGB (0x00000001) +#define NVC37E_SET_PARAMS_DE_GAMMA_YUV8_10 (0x00000002) +#define NVC37E_SET_PARAMS_DE_GAMMA_YUV12 (0x00000003) +#define NVC37E_SET_PARAMS_CSC 17:17 +#define NVC37E_SET_PARAMS_CSC_DISABLE (0x00000000) +#define NVC37E_SET_PARAMS_CSC_ENABLE (0x00000001) +#define NVC37E_SET_PARAMS_CLAMP_BEFORE_BLEND 18:18 +#define NVC37E_SET_PARAMS_CLAMP_BEFORE_BLEND_DISABLE (0x00000000) +#define NVC37E_SET_PARAMS_CLAMP_BEFORE_BLEND_ENABLE (0x00000001) +#define NVC37E_SET_PARAMS_SWAP_UV 19:19 +#define NVC37E_SET_PARAMS_SWAP_UV_DISABLE (0x00000000) +#define NVC37E_SET_PARAMS_SWAP_UV_ENABLE (0x00000001) +#define NVC37E_SET_PLANAR_STORAGE(b) (0x00000230 + (b)*0x00000004) +#define NVC37E_SET_PLANAR_STORAGE_PITCH 12:0 +#define NVC37E_SET_CONTEXT_DMA_ISO(b) (0x00000240 + (b)*0x00000004) +#define NVC37E_SET_CONTEXT_DMA_ISO_HANDLE 31:0 +#define NVC37E_SET_OFFSET(b) (0x00000260 + (b)*0x00000004) +#define NVC37E_SET_OFFSET_ORIGIN 31:0 +#define NVC37E_SET_PROCESSING (0x00000280) +#define NVC37E_SET_PROCESSING_USE_GAIN_OFFSETS 0:0 +#define NVC37E_SET_PROCESSING_USE_GAIN_OFFSETS_DISABLE (0x00000000) +#define NVC37E_SET_PROCESSING_USE_GAIN_OFFSETS_ENABLE (0x00000001) +#define NVC37E_SET_CONVERSION_RED (0x00000284) +#define NVC37E_SET_CONVERSION_RED_GAIN 15:0 +#define NVC37E_SET_CONVERSION_RED_OFFSET 31:16 +#define NVC37E_SET_CONVERSION_GREEN (0x00000288) +#define NVC37E_SET_CONVERSION_GREEN_GAIN 15:0 +#define NVC37E_SET_CONVERSION_GREEN_OFFSET 31:16 +#define NVC37E_SET_CONVERSION_BLUE (0x0000028C) +#define NVC37E_SET_CONVERSION_BLUE_GAIN 15:0 +#define NVC37E_SET_CONVERSION_BLUE_OFFSET 31:16 +#define NVC37E_SET_POINT_IN(b) (0x00000290 + (b)*0x00000004) +#define NVC37E_SET_POINT_IN_X 15:0 +#define NVC37E_SET_POINT_IN_Y 31:16 +#define NVC37E_SET_SIZE_IN (0x00000298) +#define NVC37E_SET_SIZE_IN_WIDTH 14:0 +#define NVC37E_SET_SIZE_IN_HEIGHT 30:16 +#define NVC37E_SET_SIZE_OUT (0x000002A4) +#define NVC37E_SET_SIZE_OUT_WIDTH 14:0 +#define NVC37E_SET_SIZE_OUT_HEIGHT 30:16 +#define NVC37E_SET_CONTROL_INPUT_SCALER (0x000002A8) +#define NVC37E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVC37E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVC37E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVC37E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVC37E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVC37E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVC37E_SET_INPUT_SCALER_COEFF_VALUE (0x000002AC) +#define NVC37E_SET_INPUT_SCALER_COEFF_VALUE_DATA 9:0 +#define NVC37E_SET_INPUT_SCALER_COEFF_VALUE_INDEX 19:12 +#define NVC37E_SET_CONTROL_INPUT_LUT (0x000002B0) +#define NVC37E_SET_CONTROL_INPUT_LUT_SIZE 1:0 +#define NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_257 (0x00000000) +#define NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_1025 (0x00000002) +#define NVC37E_SET_CONTROL_INPUT_LUT_RANGE 5:4 +#define NVC37E_SET_CONTROL_INPUT_LUT_RANGE_UNITY (0x00000000) +#define NVC37E_SET_CONTROL_INPUT_LUT_RANGE_XRBIAS (0x00000001) +#define NVC37E_SET_CONTROL_INPUT_LUT_RANGE_XVYCC (0x00000002) +#define NVC37E_SET_CONTROL_INPUT_LUT_OUTPUT_MODE 9:8 +#define NVC37E_SET_CONTROL_INPUT_LUT_OUTPUT_MODE_INDEX (0x00000000) +#define NVC37E_SET_CONTROL_INPUT_LUT_OUTPUT_MODE_INTERPOLATE (0x00000001) +#define NVC37E_SET_OFFSET_INPUT_LUT (0x000002B4) +#define NVC37E_SET_OFFSET_INPUT_LUT_ORIGIN 31:0 +#define NVC37E_SET_CONTEXT_DMA_INPUT_LUT (0x000002B8) +#define NVC37E_SET_CONTEXT_DMA_INPUT_LUT_HANDLE 31:0 +#define NVC37E_SET_CSC_RED2RED (0x000002BC) +#define NVC37E_SET_CSC_RED2RED_COEFF 18:0 +#define NVC37E_SET_CSC_GREEN2RED (0x000002C0) +#define NVC37E_SET_CSC_GREEN2RED_COEFF 18:0 +#define NVC37E_SET_CSC_BLUE2RED (0x000002C4) +#define NVC37E_SET_CSC_BLUE2RED_COEFF 18:0 +#define NVC37E_SET_CSC_CONSTANT2RED (0x000002C8) +#define NVC37E_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NVC37E_SET_CSC_RED2GREEN (0x000002CC) +#define NVC37E_SET_CSC_RED2GREEN_COEFF 18:0 +#define NVC37E_SET_CSC_GREEN2GREEN (0x000002D0) +#define NVC37E_SET_CSC_GREEN2GREEN_COEFF 18:0 +#define NVC37E_SET_CSC_BLUE2GREEN (0x000002D4) +#define NVC37E_SET_CSC_BLUE2GREEN_COEFF 18:0 +#define NVC37E_SET_CSC_CONSTANT2GREEN (0x000002D8) +#define NVC37E_SET_CSC_CONSTANT2GREEN_COEFF 18:0 +#define NVC37E_SET_CSC_RED2BLUE (0x000002DC) +#define NVC37E_SET_CSC_RED2BLUE_COEFF 18:0 +#define NVC37E_SET_CSC_GREEN2BLUE (0x000002E0) +#define NVC37E_SET_CSC_GREEN2BLUE_COEFF 18:0 +#define NVC37E_SET_CSC_BLUE2BLUE (0x000002E4) +#define NVC37E_SET_CSC_BLUE2BLUE_COEFF 18:0 +#define NVC37E_SET_CSC_CONSTANT2BLUE (0x000002E8) +#define NVC37E_SET_CSC_CONSTANT2BLUE_COEFF 18:0 +#define NVC37E_SET_COMPOSITION_CONTROL (0x000002EC) +#define NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT 1:0 +#define NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DISABLE (0x00000000) +#define NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_SRC (0x00000001) +#define NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DST (0x00000002) +#define NVC37E_SET_COMPOSITION_CONTROL_DEPTH 11:4 +#define NVC37E_SET_COMPOSITION_CONSTANT_ALPHA (0x000002F0) +#define NVC37E_SET_COMPOSITION_CONSTANT_ALPHA_K1 7:0 +#define NVC37E_SET_COMPOSITION_CONSTANT_ALPHA_K2 15:8 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT (0x000002F4) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT 3:0 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT 7:4 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT 11:8 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT 15:12 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT 19:16 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT 23:20 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT 27:24 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT 31:28 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC37E_SET_KEY_ALPHA (0x000002F8) +#define NVC37E_SET_KEY_ALPHA_MIN 15:0 +#define NVC37E_SET_KEY_ALPHA_MAX 31:16 +#define NVC37E_SET_KEY_RED_CR (0x000002FC) +#define NVC37E_SET_KEY_RED_CR_MIN 15:0 +#define NVC37E_SET_KEY_RED_CR_MAX 31:16 +#define NVC37E_SET_KEY_GREEN_Y (0x00000300) +#define NVC37E_SET_KEY_GREEN_Y_MIN 15:0 +#define NVC37E_SET_KEY_GREEN_Y_MAX 31:16 +#define NVC37E_SET_KEY_BLUE_CB (0x00000304) +#define NVC37E_SET_KEY_BLUE_CB_MIN 15:0 +#define NVC37E_SET_KEY_BLUE_CB_MAX 31:16 +#define NVC37E_SET_PRESENT_CONTROL (0x00000308) +#define NVC37E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NVC37E_SET_PRESENT_CONTROL_BEGIN_MODE 6:4 +#define NVC37E_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000) +#define NVC37E_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001) +#define NVC37E_SET_PRESENT_CONTROL_TIMESTAMP_MODE 8:8 +#define NVC37E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000) +#define NVC37E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001) +#define NVC37E_SET_PRESENT_CONTROL_STEREO_MODE 13:12 +#define NVC37E_SET_PRESENT_CONTROL_STEREO_MODE_MONO (0x00000000) +#define NVC37E_SET_PRESENT_CONTROL_STEREO_MODE_PAIR_FLIP (0x00000001) +#define NVC37E_SET_PRESENT_CONTROL_STEREO_MODE_AT_ANY_FRAME (0x00000002) +#define NVC37E_SET_TIMESTAMP_ORIGIN_LO (0x00000340) +#define NVC37E_SET_TIMESTAMP_ORIGIN_LO_TIMESTAMP_LO 31:0 +#define NVC37E_SET_TIMESTAMP_ORIGIN_HI (0x00000344) +#define NVC37E_SET_TIMESTAMP_ORIGIN_HI_TIMESTAMP_HI 31:0 +#define NVC37E_SET_UPDATE_TIMESTAMP_LO (0x00000348) +#define NVC37E_SET_UPDATE_TIMESTAMP_LO_TIMESTAMP_LO 31:0 +#define NVC37E_SET_UPDATE_TIMESTAMP_HI (0x0000034C) +#define NVC37E_SET_UPDATE_TIMESTAMP_HI_TIMESTAMP_HI 31:0 +#define NVC37E_SET_INTERLOCK_FLAGS (0x00000370) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 0:0 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+1):((i)+1) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 1:1 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 2:2 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 3:3 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 4:4 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 5:5 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 6:6 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 7:7 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 8:8 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS (0x00000374) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC37e_h diff --git a/src/common/sdk/nvidia/inc/class/clc397.h b/src/common/sdk/nvidia/inc/class/clc397.h new file mode 100644 index 0000000..2122c75 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc397.h @@ -0,0 +1,4219 @@ +/******************************************************************************* + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef _cl_volta_a_h_ +#define _cl_volta_a_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../../../class/bin/sw_header.pl volta_a */ + +#include "nvtypes.h" + +#define VOLTA_A 0xC397 + +#define NVC397_SET_OBJECT 0x0000 +#define NVC397_SET_OBJECT_CLASS_ID 15:0 +#define NVC397_SET_OBJECT_ENGINE_ID 20:16 + +#define NVC397_NO_OPERATION 0x0100 +#define NVC397_NO_OPERATION_V 31:0 + +#define NVC397_SET_NOTIFY_A 0x0104 +#define NVC397_SET_NOTIFY_A_ADDRESS_UPPER 7:0 + +#define NVC397_SET_NOTIFY_B 0x0108 +#define NVC397_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NVC397_NOTIFY 0x010c +#define NVC397_NOTIFY_TYPE 31:0 +#define NVC397_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NVC397_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NVC397_WAIT_FOR_IDLE 0x0110 +#define NVC397_WAIT_FOR_IDLE_V 31:0 + +#define NVC397_LOAD_MME_INSTRUCTION_RAM_POINTER 0x0114 +#define NVC397_LOAD_MME_INSTRUCTION_RAM_POINTER_V 31:0 + +#define NVC397_LOAD_MME_INSTRUCTION_RAM 0x0118 +#define NVC397_LOAD_MME_INSTRUCTION_RAM_V 31:0 + +#define NVC397_LOAD_MME_START_ADDRESS_RAM_POINTER 0x011c +#define NVC397_LOAD_MME_START_ADDRESS_RAM_POINTER_V 31:0 + +#define NVC397_LOAD_MME_START_ADDRESS_RAM 0x0120 +#define NVC397_LOAD_MME_START_ADDRESS_RAM_V 31:0 + +#define NVC397_SET_MME_SHADOW_RAM_CONTROL 0x0124 +#define NVC397_SET_MME_SHADOW_RAM_CONTROL_MODE 1:0 +#define NVC397_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK 0x00000000 +#define NVC397_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK_WITH_FILTER 0x00000001 +#define NVC397_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_PASSTHROUGH 0x00000002 +#define NVC397_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_REPLAY 0x00000003 + +#define NVC397_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER 0x0128 +#define NVC397_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER_V 7:0 + +#define NVC397_PEER_SEMAPHORE_RELEASE_OFFSET 0x012c +#define NVC397_PEER_SEMAPHORE_RELEASE_OFFSET_V 31:0 + +#define NVC397_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NVC397_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC397_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NVC397_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC397_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NVC397_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NVC397_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC397_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC397_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC397_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC397_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC397_SEND_GO_IDLE 0x013c +#define NVC397_SEND_GO_IDLE_V 31:0 + +#define NVC397_PM_TRIGGER 0x0140 +#define NVC397_PM_TRIGGER_V 31:0 + +#define NVC397_PM_TRIGGER_WFI 0x0144 +#define NVC397_PM_TRIGGER_WFI_V 31:0 + +#define NVC397_FE_ATOMIC_SEQUENCE_BEGIN 0x0148 +#define NVC397_FE_ATOMIC_SEQUENCE_BEGIN_V 31:0 + +#define NVC397_FE_ATOMIC_SEQUENCE_END 0x014c +#define NVC397_FE_ATOMIC_SEQUENCE_END_V 31:0 + +#define NVC397_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NVC397_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NVC397_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NVC397_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NVC397_LINE_LENGTH_IN 0x0180 +#define NVC397_LINE_LENGTH_IN_VALUE 31:0 + +#define NVC397_LINE_COUNT 0x0184 +#define NVC397_LINE_COUNT_VALUE 31:0 + +#define NVC397_OFFSET_OUT_UPPER 0x0188 +#define NVC397_OFFSET_OUT_UPPER_VALUE 7:0 + +#define NVC397_OFFSET_OUT 0x018c +#define NVC397_OFFSET_OUT_VALUE 31:0 + +#define NVC397_PITCH_OUT 0x0190 +#define NVC397_PITCH_OUT_VALUE 31:0 + +#define NVC397_SET_DST_BLOCK_SIZE 0x0194 +#define NVC397_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVC397_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC397_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVC397_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC397_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC397_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC397_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC397_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC397_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC397_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVC397_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NVC397_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NVC397_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NVC397_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NVC397_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVC397_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NVC397_SET_DST_WIDTH 0x0198 +#define NVC397_SET_DST_WIDTH_V 31:0 + +#define NVC397_SET_DST_HEIGHT 0x019c +#define NVC397_SET_DST_HEIGHT_V 31:0 + +#define NVC397_SET_DST_DEPTH 0x01a0 +#define NVC397_SET_DST_DEPTH_V 31:0 + +#define NVC397_SET_DST_LAYER 0x01a4 +#define NVC397_SET_DST_LAYER_V 31:0 + +#define NVC397_SET_DST_ORIGIN_BYTES_X 0x01a8 +#define NVC397_SET_DST_ORIGIN_BYTES_X_V 20:0 + +#define NVC397_SET_DST_ORIGIN_SAMPLES_Y 0x01ac +#define NVC397_SET_DST_ORIGIN_SAMPLES_Y_V 16:0 + +#define NVC397_LAUNCH_DMA 0x01b0 +#define NVC397_LAUNCH_DMA_DST_MEMORY_LAYOUT 0:0 +#define NVC397_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVC397_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVC397_LAUNCH_DMA_COMPLETION_TYPE 5:4 +#define NVC397_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_DISABLE 0x00000000 +#define NVC397_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_ONLY 0x00000001 +#define NVC397_LAUNCH_DMA_COMPLETION_TYPE_RELEASE_SEMAPHORE 0x00000002 +#define NVC397_LAUNCH_DMA_INTERRUPT_TYPE 9:8 +#define NVC397_LAUNCH_DMA_INTERRUPT_TYPE_NONE 0x00000000 +#define NVC397_LAUNCH_DMA_INTERRUPT_TYPE_INTERRUPT 0x00000001 +#define NVC397_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE 12:12 +#define NVC397_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_FOUR_WORDS 0x00000000 +#define NVC397_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_ONE_WORD 0x00000001 +#define NVC397_LAUNCH_DMA_REDUCTION_ENABLE 1:1 +#define NVC397_LAUNCH_DMA_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC397_LAUNCH_DMA_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC397_LAUNCH_DMA_REDUCTION_OP 15:13 +#define NVC397_LAUNCH_DMA_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC397_LAUNCH_DMA_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC397_LAUNCH_DMA_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC397_LAUNCH_DMA_REDUCTION_OP_RED_INC 0x00000003 +#define NVC397_LAUNCH_DMA_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC397_LAUNCH_DMA_REDUCTION_OP_RED_AND 0x00000005 +#define NVC397_LAUNCH_DMA_REDUCTION_OP_RED_OR 0x00000006 +#define NVC397_LAUNCH_DMA_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC397_LAUNCH_DMA_REDUCTION_FORMAT 3:2 +#define NVC397_LAUNCH_DMA_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC397_LAUNCH_DMA_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVC397_LAUNCH_DMA_SYSMEMBAR_DISABLE 6:6 +#define NVC397_LAUNCH_DMA_SYSMEMBAR_DISABLE_FALSE 0x00000000 +#define NVC397_LAUNCH_DMA_SYSMEMBAR_DISABLE_TRUE 0x00000001 + +#define NVC397_LOAD_INLINE_DATA 0x01b4 +#define NVC397_LOAD_INLINE_DATA_V 31:0 + +#define NVC397_SET_I2M_SEMAPHORE_A 0x01dc +#define NVC397_SET_I2M_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC397_SET_I2M_SEMAPHORE_B 0x01e0 +#define NVC397_SET_I2M_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC397_SET_I2M_SEMAPHORE_C 0x01e4 +#define NVC397_SET_I2M_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC397_SET_I2M_SPARE_NOOP00 0x01f0 +#define NVC397_SET_I2M_SPARE_NOOP00_V 31:0 + +#define NVC397_SET_I2M_SPARE_NOOP01 0x01f4 +#define NVC397_SET_I2M_SPARE_NOOP01_V 31:0 + +#define NVC397_SET_I2M_SPARE_NOOP02 0x01f8 +#define NVC397_SET_I2M_SPARE_NOOP02_V 31:0 + +#define NVC397_SET_I2M_SPARE_NOOP03 0x01fc +#define NVC397_SET_I2M_SPARE_NOOP03_V 31:0 + +#define NVC397_RUN_DS_NOW 0x0200 +#define NVC397_RUN_DS_NOW_V 31:0 + +#define NVC397_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS 0x0204 +#define NVC397_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD 4:0 +#define NVC397_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_INSTANTANEOUS 0x00000000 +#define NVC397_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16 0x00000001 +#define NVC397_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32 0x00000002 +#define NVC397_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__64 0x00000003 +#define NVC397_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__128 0x00000004 +#define NVC397_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__256 0x00000005 +#define NVC397_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__512 0x00000006 +#define NVC397_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1024 0x00000007 +#define NVC397_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2048 0x00000008 +#define NVC397_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4096 0x00000009 +#define NVC397_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__8192 0x0000000A +#define NVC397_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16384 0x0000000B +#define NVC397_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32768 0x0000000C +#define NVC397_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__65536 0x0000000D +#define NVC397_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__131072 0x0000000E +#define NVC397_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__262144 0x0000000F +#define NVC397_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__524288 0x00000010 +#define NVC397_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1048576 0x00000011 +#define NVC397_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2097152 0x00000012 +#define NVC397_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4194304 0x00000013 +#define NVC397_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_LATEZ_ALWAYS 0x0000001F + +#define NVC397_SET_GS_MODE 0x0208 +#define NVC397_SET_GS_MODE_TYPE 0:0 +#define NVC397_SET_GS_MODE_TYPE_ANY 0x00000000 +#define NVC397_SET_GS_MODE_TYPE_FAST_GS 0x00000001 + +#define NVC397_SET_ALIASED_LINE_WIDTH_ENABLE 0x020c +#define NVC397_SET_ALIASED_LINE_WIDTH_ENABLE_V 0:0 +#define NVC397_SET_ALIASED_LINE_WIDTH_ENABLE_V_FALSE 0x00000000 +#define NVC397_SET_ALIASED_LINE_WIDTH_ENABLE_V_TRUE 0x00000001 + +#define NVC397_SET_API_MANDATED_EARLY_Z 0x0210 +#define NVC397_SET_API_MANDATED_EARLY_Z_ENABLE 0:0 +#define NVC397_SET_API_MANDATED_EARLY_Z_ENABLE_FALSE 0x00000000 +#define NVC397_SET_API_MANDATED_EARLY_Z_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_GS_DM_FIFO 0x0214 +#define NVC397_SET_GS_DM_FIFO_SIZE_RASTER_ON 12:0 +#define NVC397_SET_GS_DM_FIFO_SIZE_RASTER_OFF 28:16 +#define NVC397_SET_GS_DM_FIFO_SPILL_ENABLED 31:31 +#define NVC397_SET_GS_DM_FIFO_SPILL_ENABLED_FALSE 0x00000000 +#define NVC397_SET_GS_DM_FIFO_SPILL_ENABLED_TRUE 0x00000001 + +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS 0x0218 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY 5:4 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC397_INVALIDATE_SHADER_CACHES 0x021c +#define NVC397_INVALIDATE_SHADER_CACHES_INSTRUCTION 0:0 +#define NVC397_INVALIDATE_SHADER_CACHES_INSTRUCTION_FALSE 0x00000000 +#define NVC397_INVALIDATE_SHADER_CACHES_INSTRUCTION_TRUE 0x00000001 +#define NVC397_INVALIDATE_SHADER_CACHES_DATA 4:4 +#define NVC397_INVALIDATE_SHADER_CACHES_DATA_FALSE 0x00000000 +#define NVC397_INVALIDATE_SHADER_CACHES_DATA_TRUE 0x00000001 +#define NVC397_INVALIDATE_SHADER_CACHES_CONSTANT 12:12 +#define NVC397_INVALIDATE_SHADER_CACHES_CONSTANT_FALSE 0x00000000 +#define NVC397_INVALIDATE_SHADER_CACHES_CONSTANT_TRUE 0x00000001 +#define NVC397_INVALIDATE_SHADER_CACHES_LOCKS 1:1 +#define NVC397_INVALIDATE_SHADER_CACHES_LOCKS_FALSE 0x00000000 +#define NVC397_INVALIDATE_SHADER_CACHES_LOCKS_TRUE 0x00000001 +#define NVC397_INVALIDATE_SHADER_CACHES_FLUSH_DATA 2:2 +#define NVC397_INVALIDATE_SHADER_CACHES_FLUSH_DATA_FALSE 0x00000000 +#define NVC397_INVALIDATE_SHADER_CACHES_FLUSH_DATA_TRUE 0x00000001 + +#define NVC397_SET_INSTANCE_COUNT 0x0220 +#define NVC397_SET_INSTANCE_COUNT_V 31:0 + +#define NVC397_SET_POSITION_W_SCALED_OFFSET_ENABLE 0x0224 +#define NVC397_SET_POSITION_W_SCALED_OFFSET_ENABLE_ENABLE 0:0 +#define NVC397_SET_POSITION_W_SCALED_OFFSET_ENABLE_ENABLE_FALSE 0x00000000 +#define NVC397_SET_POSITION_W_SCALED_OFFSET_ENABLE_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_GO_IDLE_TIMEOUT 0x022c +#define NVC397_SET_GO_IDLE_TIMEOUT_V 31:0 + +#define NVC397_SET_MME_VERSION 0x0234 +#define NVC397_SET_MME_VERSION_MAJOR 7:0 + +#define NVC397_INCREMENT_SYNC_POINT 0x02c8 +#define NVC397_INCREMENT_SYNC_POINT_INDEX 11:0 +#define NVC397_INCREMENT_SYNC_POINT_CLEAN_L2 16:16 +#define NVC397_INCREMENT_SYNC_POINT_CLEAN_L2_FALSE 0x00000000 +#define NVC397_INCREMENT_SYNC_POINT_CLEAN_L2_TRUE 0x00000001 +#define NVC397_INCREMENT_SYNC_POINT_CONDITION 20:20 +#define NVC397_INCREMENT_SYNC_POINT_CONDITION_STREAM_OUT_WRITES_DONE 0x00000000 +#define NVC397_INCREMENT_SYNC_POINT_CONDITION_ROP_WRITES_DONE 0x00000001 + +#define NVC397_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE 0x02d4 +#define NVC397_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE_V 0:0 + +#define NVC397_SET_SURFACE_CLIP_ID_BLOCK_SIZE 0x02d8 +#define NVC397_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH 3:0 +#define NVC397_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC397_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT 7:4 +#define NVC397_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC397_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC397_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC397_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC397_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC397_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC397_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH 11:8 +#define NVC397_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NVC397_SET_ALPHA_CIRCULAR_BUFFER_SIZE 0x02dc +#define NVC397_SET_ALPHA_CIRCULAR_BUFFER_SIZE_CACHE_LINES_PER_SM 13:0 + +#define NVC397_DECOMPRESS_SURFACE 0x02e0 +#define NVC397_DECOMPRESS_SURFACE_MRT_SELECT 2:0 +#define NVC397_DECOMPRESS_SURFACE_RT_ARRAY_INDEX 19:4 + +#define NVC397_SET_ZCULL_ROP_BYPASS 0x02e4 +#define NVC397_SET_ZCULL_ROP_BYPASS_ENABLE 0:0 +#define NVC397_SET_ZCULL_ROP_BYPASS_ENABLE_FALSE 0x00000000 +#define NVC397_SET_ZCULL_ROP_BYPASS_ENABLE_TRUE 0x00000001 +#define NVC397_SET_ZCULL_ROP_BYPASS_NO_STALL 4:4 +#define NVC397_SET_ZCULL_ROP_BYPASS_NO_STALL_FALSE 0x00000000 +#define NVC397_SET_ZCULL_ROP_BYPASS_NO_STALL_TRUE 0x00000001 +#define NVC397_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING 8:8 +#define NVC397_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING_FALSE 0x00000000 +#define NVC397_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING_TRUE 0x00000001 +#define NVC397_SET_ZCULL_ROP_BYPASS_THRESHOLD 15:12 + +#define NVC397_SET_ZCULL_SUBREGION 0x02e8 +#define NVC397_SET_ZCULL_SUBREGION_ENABLE 0:0 +#define NVC397_SET_ZCULL_SUBREGION_ENABLE_FALSE 0x00000000 +#define NVC397_SET_ZCULL_SUBREGION_ENABLE_TRUE 0x00000001 +#define NVC397_SET_ZCULL_SUBREGION_NORMALIZED_ALIQUOTS 27:4 + +#define NVC397_SET_RASTER_BOUNDING_BOX 0x02ec +#define NVC397_SET_RASTER_BOUNDING_BOX_MODE 0:0 +#define NVC397_SET_RASTER_BOUNDING_BOX_MODE_BOUNDING_BOX 0x00000000 +#define NVC397_SET_RASTER_BOUNDING_BOX_MODE_FULL_VIEWPORT 0x00000001 +#define NVC397_SET_RASTER_BOUNDING_BOX_PAD 11:4 + +#define NVC397_PEER_SEMAPHORE_RELEASE 0x02f0 +#define NVC397_PEER_SEMAPHORE_RELEASE_V 31:0 + +#define NVC397_SET_ITERATED_BLEND_OPTIMIZATION 0x02f4 +#define NVC397_SET_ITERATED_BLEND_OPTIMIZATION_NOOP 1:0 +#define NVC397_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_NEVER 0x00000000 +#define NVC397_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_RGBA_0000 0x00000001 +#define NVC397_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_ALPHA_0 0x00000002 +#define NVC397_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_RGBA_0001 0x00000003 + +#define NVC397_SET_ZCULL_SUBREGION_ALLOCATION 0x02f8 +#define NVC397_SET_ZCULL_SUBREGION_ALLOCATION_SUBREGION_ID 7:0 +#define NVC397_SET_ZCULL_SUBREGION_ALLOCATION_ALIQUOTS 23:8 +#define NVC397_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT 27:24 +#define NVC397_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16X2_4X4 0x00000000 +#define NVC397_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X16_4X4 0x00000001 +#define NVC397_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_4X2 0x00000002 +#define NVC397_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_2X4 0x00000003 +#define NVC397_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X8_4X4 0x00000004 +#define NVC397_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_8X8_4X2 0x00000005 +#define NVC397_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_8X8_2X4 0x00000006 +#define NVC397_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_4X8 0x00000007 +#define NVC397_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_4X8_2X2 0x00000008 +#define NVC397_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X8_4X2 0x00000009 +#define NVC397_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X8_2X4 0x0000000A +#define NVC397_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_8X8_2X2 0x0000000B +#define NVC397_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_4X8_1X1 0x0000000C +#define NVC397_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_NONE 0x0000000F + +#define NVC397_ASSIGN_ZCULL_SUBREGIONS 0x02fc +#define NVC397_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM 1:0 +#define NVC397_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM_Static 0x00000000 +#define NVC397_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM_Adaptive 0x00000001 + +#define NVC397_SET_PS_OUTPUT_SAMPLE_MASK_USAGE 0x0300 +#define NVC397_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE 0:0 +#define NVC397_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_FALSE 0x00000000 +#define NVC397_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_TRUE 0x00000001 +#define NVC397_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE 1:1 +#define NVC397_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NVC397_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 + +#define NVC397_DRAW_ZERO_INDEX 0x0304 +#define NVC397_DRAW_ZERO_INDEX_COUNT 31:0 + +#define NVC397_SET_L1_CONFIGURATION 0x0308 +#define NVC397_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY 2:0 +#define NVC397_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_16KB 0x00000001 +#define NVC397_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_48KB 0x00000003 + +#define NVC397_SET_RENDER_ENABLE_CONTROL 0x030c +#define NVC397_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER 0:0 +#define NVC397_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_FALSE 0x00000000 +#define NVC397_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_TRUE 0x00000001 + +#define NVC397_SET_SPA_VERSION 0x0310 +#define NVC397_SET_SPA_VERSION_MINOR 7:0 +#define NVC397_SET_SPA_VERSION_MAJOR 15:8 + +#define NVC397_SET_SNAP_GRID_LINE 0x0318 +#define NVC397_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NVC397_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NVC397_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NVC397_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NVC397_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NVC397_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NVC397_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NVC397_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NVC397_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NVC397_SET_SNAP_GRID_LINE_ROUNDING_MODE 8:8 +#define NVC397_SET_SNAP_GRID_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NVC397_SET_SNAP_GRID_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NVC397_SET_SNAP_GRID_NON_LINE 0x031c +#define NVC397_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NVC397_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NVC397_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NVC397_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NVC397_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NVC397_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NVC397_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NVC397_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NVC397_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NVC397_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE 8:8 +#define NVC397_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NVC397_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NVC397_SET_TESSELLATION_PARAMETERS 0x0320 +#define NVC397_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE 1:0 +#define NVC397_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_ISOLINE 0x00000000 +#define NVC397_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_TRIANGLE 0x00000001 +#define NVC397_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_QUAD 0x00000002 +#define NVC397_SET_TESSELLATION_PARAMETERS_SPACING 5:4 +#define NVC397_SET_TESSELLATION_PARAMETERS_SPACING_INTEGER 0x00000000 +#define NVC397_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_ODD 0x00000001 +#define NVC397_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_EVEN 0x00000002 +#define NVC397_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES 9:8 +#define NVC397_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_POINTS 0x00000000 +#define NVC397_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_LINES 0x00000001 +#define NVC397_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CW 0x00000002 +#define NVC397_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CCW 0x00000003 + +#define NVC397_SET_TESSELLATION_LOD_U0_OR_DENSITY 0x0324 +#define NVC397_SET_TESSELLATION_LOD_U0_OR_DENSITY_V 31:0 + +#define NVC397_SET_TESSELLATION_LOD_V0_OR_DETAIL 0x0328 +#define NVC397_SET_TESSELLATION_LOD_V0_OR_DETAIL_V 31:0 + +#define NVC397_SET_TESSELLATION_LOD_U1_OR_W0 0x032c +#define NVC397_SET_TESSELLATION_LOD_U1_OR_W0_V 31:0 + +#define NVC397_SET_TESSELLATION_LOD_V1 0x0330 +#define NVC397_SET_TESSELLATION_LOD_V1_V 31:0 + +#define NVC397_SET_TG_LOD_INTERIOR_U 0x0334 +#define NVC397_SET_TG_LOD_INTERIOR_U_V 31:0 + +#define NVC397_SET_TG_LOD_INTERIOR_V 0x0338 +#define NVC397_SET_TG_LOD_INTERIOR_V_V 31:0 + +#define NVC397_RESERVED_TG07 0x033c +#define NVC397_RESERVED_TG07_V 0:0 + +#define NVC397_RESERVED_TG08 0x0340 +#define NVC397_RESERVED_TG08_V 0:0 + +#define NVC397_RESERVED_TG09 0x0344 +#define NVC397_RESERVED_TG09_V 0:0 + +#define NVC397_RESERVED_TG10 0x0348 +#define NVC397_RESERVED_TG10_V 0:0 + +#define NVC397_RESERVED_TG11 0x034c +#define NVC397_RESERVED_TG11_V 0:0 + +#define NVC397_RESERVED_TG12 0x0350 +#define NVC397_RESERVED_TG12_V 0:0 + +#define NVC397_RESERVED_TG13 0x0354 +#define NVC397_RESERVED_TG13_V 0:0 + +#define NVC397_RESERVED_TG14 0x0358 +#define NVC397_RESERVED_TG14_V 0:0 + +#define NVC397_RESERVED_TG15 0x035c +#define NVC397_RESERVED_TG15_V 0:0 + +#define NVC397_SET_SUBTILING_PERF_KNOB_A 0x0360 +#define NVC397_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_REGISTER_FILE_PER_SUBTILE 7:0 +#define NVC397_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_PIXEL_OUTPUT_BUFFER_PER_SUBTILE 15:8 +#define NVC397_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_TRIANGLE_RAM_PER_SUBTILE 23:16 +#define NVC397_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_MAX_QUADS_PER_SUBTILE 31:24 + +#define NVC397_SET_SUBTILING_PERF_KNOB_B 0x0364 +#define NVC397_SET_SUBTILING_PERF_KNOB_B_FRACTION_OF_MAX_PRIMITIVES_PER_SUBTILE 7:0 + +#define NVC397_SET_SUBTILING_PERF_KNOB_C 0x0368 +#define NVC397_SET_SUBTILING_PERF_KNOB_C_RESERVED 0:0 + +#define NVC397_SET_ZCULL_SUBREGION_TO_REPORT 0x036c +#define NVC397_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE 0:0 +#define NVC397_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE_FALSE 0x00000000 +#define NVC397_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE_TRUE 0x00000001 +#define NVC397_SET_ZCULL_SUBREGION_TO_REPORT_SUBREGION_ID 11:4 + +#define NVC397_SET_ZCULL_SUBREGION_REPORT_TYPE 0x0370 +#define NVC397_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE 0:0 +#define NVC397_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE_FALSE 0x00000000 +#define NVC397_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE_TRUE 0x00000001 +#define NVC397_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE 6:4 +#define NVC397_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST 0x00000000 +#define NVC397_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST_NO_ACCEPT 0x00000001 +#define NVC397_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST_LATE_Z 0x00000002 +#define NVC397_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_STENCIL_TEST 0x00000003 + +#define NVC397_SET_BALANCED_PRIMITIVE_WORKLOAD 0x0374 +#define NVC397_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE 0:0 +#define NVC397_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE_FALSE 0x00000000 +#define NVC397_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE_TRUE 0x00000001 +#define NVC397_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE 4:4 +#define NVC397_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE_FALSE 0x00000000 +#define NVC397_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE_TRUE 0x00000001 +#define NVC397_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_UNPARTITIONED_MODE 8:8 +#define NVC397_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_UNPARTITIONED_MODE_FALSE 0x00000000 +#define NVC397_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_UNPARTITIONED_MODE_TRUE 0x00000001 +#define NVC397_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_TIMESLICED_MODE 9:9 +#define NVC397_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_TIMESLICED_MODE_FALSE 0x00000000 +#define NVC397_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_TIMESLICED_MODE_TRUE 0x00000001 + +#define NVC397_SET_MAX_PATCHES_PER_BATCH 0x0378 +#define NVC397_SET_MAX_PATCHES_PER_BATCH_V 5:0 + +#define NVC397_SET_RASTER_ENABLE 0x037c +#define NVC397_SET_RASTER_ENABLE_V 0:0 +#define NVC397_SET_RASTER_ENABLE_V_FALSE 0x00000000 +#define NVC397_SET_RASTER_ENABLE_V_TRUE 0x00000001 + +#define NVC397_SET_STREAM_OUT_BUFFER_ENABLE(j) (0x0380+(j)*32) +#define NVC397_SET_STREAM_OUT_BUFFER_ENABLE_V 0:0 +#define NVC397_SET_STREAM_OUT_BUFFER_ENABLE_V_FALSE 0x00000000 +#define NVC397_SET_STREAM_OUT_BUFFER_ENABLE_V_TRUE 0x00000001 + +#define NVC397_SET_STREAM_OUT_BUFFER_ADDRESS_A(j) (0x0384+(j)*32) +#define NVC397_SET_STREAM_OUT_BUFFER_ADDRESS_A_UPPER 7:0 + +#define NVC397_SET_STREAM_OUT_BUFFER_ADDRESS_B(j) (0x0388+(j)*32) +#define NVC397_SET_STREAM_OUT_BUFFER_ADDRESS_B_LOWER 31:0 + +#define NVC397_SET_STREAM_OUT_BUFFER_SIZE(j) (0x038c+(j)*32) +#define NVC397_SET_STREAM_OUT_BUFFER_SIZE_BYTES 31:0 + +#define NVC397_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER(j) (0x0390+(j)*32) +#define NVC397_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER_START_OFFSET 31:0 + +#define NVC397_SET_POSITION_W_SCALED_OFFSET_SCALE_A(j) (0x0400+(j)*16) +#define NVC397_SET_POSITION_W_SCALED_OFFSET_SCALE_A_V 31:0 + +#define NVC397_SET_POSITION_W_SCALED_OFFSET_SCALE_B(j) (0x0404+(j)*16) +#define NVC397_SET_POSITION_W_SCALED_OFFSET_SCALE_B_V 31:0 + +#define NVC397_SET_POSITION_W_SCALED_OFFSET_RESERVED_A(j) (0x0408+(j)*16) +#define NVC397_SET_POSITION_W_SCALED_OFFSET_RESERVED_A_V 31:0 + +#define NVC397_SET_POSITION_W_SCALED_OFFSET_RESERVED_B(j) (0x040c+(j)*16) +#define NVC397_SET_POSITION_W_SCALED_OFFSET_RESERVED_B_V 31:0 + +#define NVC397_SET_STREAM_OUT_CONTROL_STREAM(j) (0x0700+(j)*16) +#define NVC397_SET_STREAM_OUT_CONTROL_STREAM_SELECT 1:0 + +#define NVC397_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT(j) (0x0704+(j)*16) +#define NVC397_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT_MAX 7:0 + +#define NVC397_SET_STREAM_OUT_CONTROL_STRIDE(j) (0x0708+(j)*16) +#define NVC397_SET_STREAM_OUT_CONTROL_STRIDE_BYTES 31:0 + +#define NVC397_SET_RASTER_INPUT 0x0740 +#define NVC397_SET_RASTER_INPUT_STREAM_SELECT 1:0 + +#define NVC397_SET_STREAM_OUTPUT 0x0744 +#define NVC397_SET_STREAM_OUTPUT_ENABLE 0:0 +#define NVC397_SET_STREAM_OUTPUT_ENABLE_FALSE 0x00000000 +#define NVC397_SET_STREAM_OUTPUT_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE 0x0748 +#define NVC397_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE 0:0 +#define NVC397_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_FALSE 0x00000000 +#define NVC397_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_HYBRID_ANTI_ALIAS_CONTROL 0x0754 +#define NVC397_SET_HYBRID_ANTI_ALIAS_CONTROL_PASSES 3:0 +#define NVC397_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID 4:4 +#define NVC397_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_FRAGMENT 0x00000000 +#define NVC397_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_PASS 0x00000001 +#define NVC397_SET_HYBRID_ANTI_ALIAS_CONTROL_PASSES_EXTENDED 5:5 + +#define NVC397_SET_SHADER_LOCAL_MEMORY_WINDOW 0x077c +#define NVC397_SET_SHADER_LOCAL_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NVC397_SET_SHADER_LOCAL_MEMORY_A 0x0790 +#define NVC397_SET_SHADER_LOCAL_MEMORY_A_ADDRESS_UPPER 7:0 + +#define NVC397_SET_SHADER_LOCAL_MEMORY_B 0x0794 +#define NVC397_SET_SHADER_LOCAL_MEMORY_B_ADDRESS_LOWER 31:0 + +#define NVC397_SET_SHADER_LOCAL_MEMORY_C 0x0798 +#define NVC397_SET_SHADER_LOCAL_MEMORY_C_SIZE_UPPER 5:0 + +#define NVC397_SET_SHADER_LOCAL_MEMORY_D 0x079c +#define NVC397_SET_SHADER_LOCAL_MEMORY_D_SIZE_LOWER 31:0 + +#define NVC397_SET_SHADER_LOCAL_MEMORY_E 0x07a0 +#define NVC397_SET_SHADER_LOCAL_MEMORY_E_DEFAULT_SIZE_PER_WARP 25:0 + +#define NVC397_SET_COLOR_ZERO_BANDWIDTH_CLEAR 0x07a4 +#define NVC397_SET_COLOR_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVC397_SET_Z_ZERO_BANDWIDTH_CLEAR 0x07a8 +#define NVC397_SET_Z_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVC397_SET_STENCIL_ZERO_BANDWIDTH_CLEAR 0x07b0 +#define NVC397_SET_STENCIL_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVC397_SET_ZCULL_REGION_SIZE_A 0x07c0 +#define NVC397_SET_ZCULL_REGION_SIZE_A_WIDTH 15:0 + +#define NVC397_SET_ZCULL_REGION_SIZE_B 0x07c4 +#define NVC397_SET_ZCULL_REGION_SIZE_B_HEIGHT 15:0 + +#define NVC397_SET_ZCULL_REGION_SIZE_C 0x07c8 +#define NVC397_SET_ZCULL_REGION_SIZE_C_DEPTH 15:0 + +#define NVC397_SET_ZCULL_REGION_PIXEL_OFFSET_C 0x07cc +#define NVC397_SET_ZCULL_REGION_PIXEL_OFFSET_C_DEPTH 15:0 + +#define NVC397_SET_CULL_BEFORE_FETCH 0x07dc +#define NVC397_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE 0:0 +#define NVC397_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_FALSE 0x00000000 +#define NVC397_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_TRUE 0x00000001 + +#define NVC397_SET_ZCULL_REGION_LOCATION 0x07e0 +#define NVC397_SET_ZCULL_REGION_LOCATION_START_ALIQUOT 15:0 +#define NVC397_SET_ZCULL_REGION_LOCATION_ALIQUOT_COUNT 31:16 + +#define NVC397_SET_ZCULL_REGION_ALIQUOTS 0x07e4 +#define NVC397_SET_ZCULL_REGION_ALIQUOTS_PER_LAYER 15:0 + +#define NVC397_SET_ZCULL_STORAGE_A 0x07e8 +#define NVC397_SET_ZCULL_STORAGE_A_ADDRESS_UPPER 7:0 + +#define NVC397_SET_ZCULL_STORAGE_B 0x07ec +#define NVC397_SET_ZCULL_STORAGE_B_ADDRESS_LOWER 31:0 + +#define NVC397_SET_ZCULL_STORAGE_C 0x07f0 +#define NVC397_SET_ZCULL_STORAGE_C_LIMIT_ADDRESS_UPPER 7:0 + +#define NVC397_SET_ZCULL_STORAGE_D 0x07f4 +#define NVC397_SET_ZCULL_STORAGE_D_LIMIT_ADDRESS_LOWER 31:0 + +#define NVC397_SET_ZT_READ_ONLY 0x07f8 +#define NVC397_SET_ZT_READ_ONLY_ENABLE_Z 0:0 +#define NVC397_SET_ZT_READ_ONLY_ENABLE_Z_FALSE 0x00000000 +#define NVC397_SET_ZT_READ_ONLY_ENABLE_Z_TRUE 0x00000001 +#define NVC397_SET_ZT_READ_ONLY_ENABLE_STENCIL 4:4 +#define NVC397_SET_ZT_READ_ONLY_ENABLE_STENCIL_FALSE 0x00000000 +#define NVC397_SET_ZT_READ_ONLY_ENABLE_STENCIL_TRUE 0x00000001 + +#define NVC397_SET_COLOR_TARGET_A(j) (0x0800+(j)*64) +#define NVC397_SET_COLOR_TARGET_A_OFFSET_UPPER 7:0 + +#define NVC397_SET_COLOR_TARGET_B(j) (0x0804+(j)*64) +#define NVC397_SET_COLOR_TARGET_B_OFFSET_LOWER 31:0 + +#define NVC397_SET_COLOR_TARGET_WIDTH(j) (0x0808+(j)*64) +#define NVC397_SET_COLOR_TARGET_WIDTH_V 27:0 + +#define NVC397_SET_COLOR_TARGET_HEIGHT(j) (0x080c+(j)*64) +#define NVC397_SET_COLOR_TARGET_HEIGHT_V 16:0 + +#define NVC397_SET_COLOR_TARGET_FORMAT(j) (0x0810+(j)*64) +#define NVC397_SET_COLOR_TARGET_FORMAT_V 7:0 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_DISABLED 0x00000000 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_AF32 0x000000C0 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_AS32 0x000000C1 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_AU32 0x000000C2 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_X32 0x000000C3 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_X32 0x000000C4 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_X32 0x000000C5 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_R16_G16_B16_A16 0x000000C6 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RN16_GN16_BN16_AN16 0x000000C7 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RS16_GS16_BS16_AS16 0x000000C8 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RU16_GU16_BU16_AU16 0x000000C9 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_AF16 0x000000CA +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RF32_GF32 0x000000CB +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RS32_GS32 0x000000CC +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RU32_GU32 0x000000CD +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_X16 0x000000CE +#define NVC397_SET_COLOR_TARGET_FORMAT_V_A8R8G8B8 0x000000CF +#define NVC397_SET_COLOR_TARGET_FORMAT_V_A8RL8GL8BL8 0x000000D0 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_A2B10G10R10 0x000000D1 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_AU2BU10GU10RU10 0x000000D2 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_A8B8G8R8 0x000000D5 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_A8BL8GL8RL8 0x000000D6 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_AN8BN8GN8RN8 0x000000D7 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_AS8BS8GS8RS8 0x000000D8 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_AU8BU8GU8RU8 0x000000D9 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_R16_G16 0x000000DA +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RN16_GN16 0x000000DB +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RS16_GS16 0x000000DC +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RU16_GU16 0x000000DD +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RF16_GF16 0x000000DE +#define NVC397_SET_COLOR_TARGET_FORMAT_V_A2R10G10B10 0x000000DF +#define NVC397_SET_COLOR_TARGET_FORMAT_V_BF10GF11RF11 0x000000E0 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RS32 0x000000E3 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RU32 0x000000E4 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RF32 0x000000E5 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_X8R8G8B8 0x000000E6 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_X8RL8GL8BL8 0x000000E7 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_R5G6B5 0x000000E8 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_A1R5G5B5 0x000000E9 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_G8R8 0x000000EA +#define NVC397_SET_COLOR_TARGET_FORMAT_V_GN8RN8 0x000000EB +#define NVC397_SET_COLOR_TARGET_FORMAT_V_GS8RS8 0x000000EC +#define NVC397_SET_COLOR_TARGET_FORMAT_V_GU8RU8 0x000000ED +#define NVC397_SET_COLOR_TARGET_FORMAT_V_R16 0x000000EE +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RN16 0x000000EF +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RS16 0x000000F0 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RU16 0x000000F1 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RF16 0x000000F2 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_R8 0x000000F3 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RN8 0x000000F4 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RS8 0x000000F5 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RU8 0x000000F6 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_A8 0x000000F7 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_X1R5G5B5 0x000000F8 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_X8B8G8R8 0x000000F9 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_X8BL8GL8RL8 0x000000FA +#define NVC397_SET_COLOR_TARGET_FORMAT_V_Z1R5G5B5 0x000000FB +#define NVC397_SET_COLOR_TARGET_FORMAT_V_O1R5G5B5 0x000000FC +#define NVC397_SET_COLOR_TARGET_FORMAT_V_Z8R8G8B8 0x000000FD +#define NVC397_SET_COLOR_TARGET_FORMAT_V_O8R8G8B8 0x000000FE +#define NVC397_SET_COLOR_TARGET_FORMAT_V_R32 0x000000FF +#define NVC397_SET_COLOR_TARGET_FORMAT_V_A16 0x00000040 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_AF16 0x00000041 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_AF32 0x00000042 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_A8R8 0x00000043 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_R16_A16 0x00000044 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RF16_AF16 0x00000045 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_RF32_AF32 0x00000046 +#define NVC397_SET_COLOR_TARGET_FORMAT_V_B8G8R8A8 0x00000047 + +#define NVC397_SET_COLOR_TARGET_MEMORY(j) (0x0814+(j)*64) +#define NVC397_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH 3:0 +#define NVC397_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH_ONE_GOB 0x00000000 +#define NVC397_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT 7:4 +#define NVC397_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_ONE_GOB 0x00000000 +#define NVC397_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_TWO_GOBS 0x00000001 +#define NVC397_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC397_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC397_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC397_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC397_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH 11:8 +#define NVC397_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_ONE_GOB 0x00000000 +#define NVC397_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_TWO_GOBS 0x00000001 +#define NVC397_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_FOUR_GOBS 0x00000002 +#define NVC397_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_EIGHT_GOBS 0x00000003 +#define NVC397_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVC397_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_THIRTYTWO_GOBS 0x00000005 +#define NVC397_SET_COLOR_TARGET_MEMORY_LAYOUT 12:12 +#define NVC397_SET_COLOR_TARGET_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVC397_SET_COLOR_TARGET_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVC397_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL 16:16 +#define NVC397_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NVC397_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_DEPTH_SIZE 0x00000001 + +#define NVC397_SET_COLOR_TARGET_THIRD_DIMENSION(j) (0x0818+(j)*64) +#define NVC397_SET_COLOR_TARGET_THIRD_DIMENSION_V 27:0 + +#define NVC397_SET_COLOR_TARGET_ARRAY_PITCH(j) (0x081c+(j)*64) +#define NVC397_SET_COLOR_TARGET_ARRAY_PITCH_V 31:0 + +#define NVC397_SET_COLOR_TARGET_LAYER(j) (0x0820+(j)*64) +#define NVC397_SET_COLOR_TARGET_LAYER_OFFSET 15:0 + +#define NVC397_SET_VIEWPORT_SCALE_X(j) (0x0a00+(j)*32) +#define NVC397_SET_VIEWPORT_SCALE_X_V 31:0 + +#define NVC397_SET_VIEWPORT_SCALE_Y(j) (0x0a04+(j)*32) +#define NVC397_SET_VIEWPORT_SCALE_Y_V 31:0 + +#define NVC397_SET_VIEWPORT_SCALE_Z(j) (0x0a08+(j)*32) +#define NVC397_SET_VIEWPORT_SCALE_Z_V 31:0 + +#define NVC397_SET_VIEWPORT_OFFSET_X(j) (0x0a0c+(j)*32) +#define NVC397_SET_VIEWPORT_OFFSET_X_V 31:0 + +#define NVC397_SET_VIEWPORT_OFFSET_Y(j) (0x0a10+(j)*32) +#define NVC397_SET_VIEWPORT_OFFSET_Y_V 31:0 + +#define NVC397_SET_VIEWPORT_OFFSET_Z(j) (0x0a14+(j)*32) +#define NVC397_SET_VIEWPORT_OFFSET_Z_V 31:0 + +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE(j) (0x0a18+(j)*32) +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_X 2:0 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_X 0x00000000 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_X 0x00000001 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_Y 0x00000002 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_Y 0x00000003 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_Z 0x00000004 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_Z 0x00000005 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_W 0x00000006 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_W 0x00000007 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_Y 6:4 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_X 0x00000000 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_X 0x00000001 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_Y 0x00000002 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_Y 0x00000003 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_Z 0x00000004 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_Z 0x00000005 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_W 0x00000006 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_W 0x00000007 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_Z 10:8 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_X 0x00000000 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_X 0x00000001 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_Y 0x00000002 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_Y 0x00000003 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_Z 0x00000004 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_Z 0x00000005 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_W 0x00000006 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_W 0x00000007 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_W 14:12 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_X 0x00000000 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_X 0x00000001 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_Y 0x00000002 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_Y 0x00000003 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_Z 0x00000004 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_Z 0x00000005 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_W 0x00000006 +#define NVC397_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_W 0x00000007 + +#define NVC397_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION(j) (0x0a1c+(j)*32) +#define NVC397_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION_X_BITS 4:0 +#define NVC397_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION_Y_BITS 12:8 + +#define NVC397_SET_VIEWPORT_CLIP_HORIZONTAL(j) (0x0c00+(j)*16) +#define NVC397_SET_VIEWPORT_CLIP_HORIZONTAL_X0 15:0 +#define NVC397_SET_VIEWPORT_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NVC397_SET_VIEWPORT_CLIP_VERTICAL(j) (0x0c04+(j)*16) +#define NVC397_SET_VIEWPORT_CLIP_VERTICAL_Y0 15:0 +#define NVC397_SET_VIEWPORT_CLIP_VERTICAL_HEIGHT 31:16 + +#define NVC397_SET_VIEWPORT_CLIP_MIN_Z(j) (0x0c08+(j)*16) +#define NVC397_SET_VIEWPORT_CLIP_MIN_Z_V 31:0 + +#define NVC397_SET_VIEWPORT_CLIP_MAX_Z(j) (0x0c0c+(j)*16) +#define NVC397_SET_VIEWPORT_CLIP_MAX_Z_V 31:0 + +#define NVC397_SET_WINDOW_CLIP_HORIZONTAL(j) (0x0d00+(j)*8) +#define NVC397_SET_WINDOW_CLIP_HORIZONTAL_XMIN 15:0 +#define NVC397_SET_WINDOW_CLIP_HORIZONTAL_XMAX 31:16 + +#define NVC397_SET_WINDOW_CLIP_VERTICAL(j) (0x0d04+(j)*8) +#define NVC397_SET_WINDOW_CLIP_VERTICAL_YMIN 15:0 +#define NVC397_SET_WINDOW_CLIP_VERTICAL_YMAX 31:16 + +#define NVC397_SET_CLIP_ID_EXTENT_X(j) (0x0d40+(j)*8) +#define NVC397_SET_CLIP_ID_EXTENT_X_MINX 15:0 +#define NVC397_SET_CLIP_ID_EXTENT_X_WIDTH 31:16 + +#define NVC397_SET_CLIP_ID_EXTENT_Y(j) (0x0d44+(j)*8) +#define NVC397_SET_CLIP_ID_EXTENT_Y_MINY 15:0 +#define NVC397_SET_CLIP_ID_EXTENT_Y_HEIGHT 31:16 + +#define NVC397_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK 0x0d60 +#define NVC397_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK_V 10:0 + +#define NVC397_SET_API_VISIBLE_CALL_LIMIT 0x0d64 +#define NVC397_SET_API_VISIBLE_CALL_LIMIT_V 3:0 +#define NVC397_SET_API_VISIBLE_CALL_LIMIT_V__0 0x00000000 +#define NVC397_SET_API_VISIBLE_CALL_LIMIT_V__1 0x00000001 +#define NVC397_SET_API_VISIBLE_CALL_LIMIT_V__2 0x00000002 +#define NVC397_SET_API_VISIBLE_CALL_LIMIT_V__4 0x00000003 +#define NVC397_SET_API_VISIBLE_CALL_LIMIT_V__8 0x00000004 +#define NVC397_SET_API_VISIBLE_CALL_LIMIT_V__16 0x00000005 +#define NVC397_SET_API_VISIBLE_CALL_LIMIT_V__32 0x00000006 +#define NVC397_SET_API_VISIBLE_CALL_LIMIT_V__64 0x00000007 +#define NVC397_SET_API_VISIBLE_CALL_LIMIT_V__128 0x00000008 +#define NVC397_SET_API_VISIBLE_CALL_LIMIT_V_NO_CHECK 0x0000000F + +#define NVC397_SET_STATISTICS_COUNTER 0x0d68 +#define NVC397_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE 0:0 +#define NVC397_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC397_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC397_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE 1:1 +#define NVC397_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC397_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC397_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE 2:2 +#define NVC397_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC397_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC397_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE 3:3 +#define NVC397_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC397_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC397_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE 4:4 +#define NVC397_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC397_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC397_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE 5:5 +#define NVC397_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NVC397_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NVC397_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE 6:6 +#define NVC397_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_FALSE 0x00000000 +#define NVC397_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_TRUE 0x00000001 +#define NVC397_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE 7:7 +#define NVC397_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC397_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC397_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE 8:8 +#define NVC397_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC397_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC397_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE 9:9 +#define NVC397_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC397_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC397_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE 11:11 +#define NVC397_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC397_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC397_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE 12:12 +#define NVC397_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC397_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC397_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE 13:13 +#define NVC397_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC397_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC397_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE 14:14 +#define NVC397_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NVC397_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NVC397_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE 10:10 +#define NVC397_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_FALSE 0x00000000 +#define NVC397_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_TRUE 0x00000001 +#define NVC397_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE 15:15 +#define NVC397_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_FALSE 0x00000000 +#define NVC397_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_TRUE 0x00000001 +#define NVC397_SET_STATISTICS_COUNTER_SCG_CLOCKS_ENABLE 16:16 +#define NVC397_SET_STATISTICS_COUNTER_SCG_CLOCKS_ENABLE_FALSE 0x00000000 +#define NVC397_SET_STATISTICS_COUNTER_SCG_CLOCKS_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_CLEAR_RECT_HORIZONTAL 0x0d6c +#define NVC397_SET_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NVC397_SET_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NVC397_SET_CLEAR_RECT_VERTICAL 0x0d70 +#define NVC397_SET_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NVC397_SET_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NVC397_SET_VERTEX_ARRAY_START 0x0d74 +#define NVC397_SET_VERTEX_ARRAY_START_V 31:0 + +#define NVC397_DRAW_VERTEX_ARRAY 0x0d78 +#define NVC397_DRAW_VERTEX_ARRAY_COUNT 31:0 + +#define NVC397_SET_VIEWPORT_Z_CLIP 0x0d7c +#define NVC397_SET_VIEWPORT_Z_CLIP_RANGE 0:0 +#define NVC397_SET_VIEWPORT_Z_CLIP_RANGE_NEGATIVE_W_TO_POSITIVE_W 0x00000000 +#define NVC397_SET_VIEWPORT_Z_CLIP_RANGE_ZERO_TO_POSITIVE_W 0x00000001 + +#define NVC397_SET_COLOR_CLEAR_VALUE(i) (0x0d80+(i)*4) +#define NVC397_SET_COLOR_CLEAR_VALUE_V 31:0 + +#define NVC397_SET_Z_CLEAR_VALUE 0x0d90 +#define NVC397_SET_Z_CLEAR_VALUE_V 31:0 + +#define NVC397_SET_SHADER_CACHE_CONTROL 0x0d94 +#define NVC397_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE 0:0 +#define NVC397_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_FALSE 0x00000000 +#define NVC397_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_TRUE 0x00000001 + +#define NVC397_FORCE_TRANSITION_TO_BETA 0x0d98 +#define NVC397_FORCE_TRANSITION_TO_BETA_V 0:0 + +#define NVC397_SET_REDUCE_COLOR_THRESHOLDS_ENABLE 0x0d9c +#define NVC397_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V 0:0 +#define NVC397_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_FALSE 0x00000000 +#define NVC397_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_TRUE 0x00000001 + +#define NVC397_SET_STENCIL_CLEAR_VALUE 0x0da0 +#define NVC397_SET_STENCIL_CLEAR_VALUE_V 7:0 + +#define NVC397_INVALIDATE_SHADER_CACHES_NO_WFI 0x0da4 +#define NVC397_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION 0:0 +#define NVC397_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_FALSE 0x00000000 +#define NVC397_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_TRUE 0x00000001 +#define NVC397_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA 4:4 +#define NVC397_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_FALSE 0x00000000 +#define NVC397_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_TRUE 0x00000001 +#define NVC397_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT 12:12 +#define NVC397_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_FALSE 0x00000000 +#define NVC397_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_TRUE 0x00000001 + +#define NVC397_SET_ZCULL_SERIALIZATION 0x0da8 +#define NVC397_SET_ZCULL_SERIALIZATION_ENABLE 0:0 +#define NVC397_SET_ZCULL_SERIALIZATION_ENABLE_FALSE 0x00000000 +#define NVC397_SET_ZCULL_SERIALIZATION_ENABLE_TRUE 0x00000001 +#define NVC397_SET_ZCULL_SERIALIZATION_APPLIED 5:4 +#define NVC397_SET_ZCULL_SERIALIZATION_APPLIED_ALWAYS 0x00000000 +#define NVC397_SET_ZCULL_SERIALIZATION_APPLIED_LATE_Z 0x00000001 +#define NVC397_SET_ZCULL_SERIALIZATION_APPLIED_OUT_OF_GAMUT_Z 0x00000002 +#define NVC397_SET_ZCULL_SERIALIZATION_APPLIED_LATE_Z_OR_OUT_OF_GAMUT_Z 0x00000003 + +#define NVC397_SET_FRONT_POLYGON_MODE 0x0dac +#define NVC397_SET_FRONT_POLYGON_MODE_V 31:0 +#define NVC397_SET_FRONT_POLYGON_MODE_V_POINT 0x00001B00 +#define NVC397_SET_FRONT_POLYGON_MODE_V_LINE 0x00001B01 +#define NVC397_SET_FRONT_POLYGON_MODE_V_FILL 0x00001B02 + +#define NVC397_SET_BACK_POLYGON_MODE 0x0db0 +#define NVC397_SET_BACK_POLYGON_MODE_V 31:0 +#define NVC397_SET_BACK_POLYGON_MODE_V_POINT 0x00001B00 +#define NVC397_SET_BACK_POLYGON_MODE_V_LINE 0x00001B01 +#define NVC397_SET_BACK_POLYGON_MODE_V_FILL 0x00001B02 + +#define NVC397_SET_POLY_SMOOTH 0x0db4 +#define NVC397_SET_POLY_SMOOTH_ENABLE 0:0 +#define NVC397_SET_POLY_SMOOTH_ENABLE_FALSE 0x00000000 +#define NVC397_SET_POLY_SMOOTH_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_ZCULL_DIR_FORMAT 0x0dbc +#define NVC397_SET_ZCULL_DIR_FORMAT_ZDIR 15:0 +#define NVC397_SET_ZCULL_DIR_FORMAT_ZDIR_LESS 0x00000000 +#define NVC397_SET_ZCULL_DIR_FORMAT_ZDIR_GREATER 0x00000001 +#define NVC397_SET_ZCULL_DIR_FORMAT_ZFORMAT 31:16 +#define NVC397_SET_ZCULL_DIR_FORMAT_ZFORMAT_MSB 0x00000000 +#define NVC397_SET_ZCULL_DIR_FORMAT_ZFORMAT_FP 0x00000001 +#define NVC397_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZTRICK 0x00000002 +#define NVC397_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZF32_1 0x00000003 + +#define NVC397_SET_POLY_OFFSET_POINT 0x0dc0 +#define NVC397_SET_POLY_OFFSET_POINT_ENABLE 0:0 +#define NVC397_SET_POLY_OFFSET_POINT_ENABLE_FALSE 0x00000000 +#define NVC397_SET_POLY_OFFSET_POINT_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_POLY_OFFSET_LINE 0x0dc4 +#define NVC397_SET_POLY_OFFSET_LINE_ENABLE 0:0 +#define NVC397_SET_POLY_OFFSET_LINE_ENABLE_FALSE 0x00000000 +#define NVC397_SET_POLY_OFFSET_LINE_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_POLY_OFFSET_FILL 0x0dc8 +#define NVC397_SET_POLY_OFFSET_FILL_ENABLE 0:0 +#define NVC397_SET_POLY_OFFSET_FILL_ENABLE_FALSE 0x00000000 +#define NVC397_SET_POLY_OFFSET_FILL_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_PATCH 0x0dcc +#define NVC397_SET_PATCH_SIZE 7:0 + +#define NVC397_SET_ITERATED_BLEND 0x0dd0 +#define NVC397_SET_ITERATED_BLEND_ENABLE 0:0 +#define NVC397_SET_ITERATED_BLEND_ENABLE_FALSE 0x00000000 +#define NVC397_SET_ITERATED_BLEND_ENABLE_TRUE 0x00000001 +#define NVC397_SET_ITERATED_BLEND_ALPHA_ENABLE 1:1 +#define NVC397_SET_ITERATED_BLEND_ALPHA_ENABLE_FALSE 0x00000000 +#define NVC397_SET_ITERATED_BLEND_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_ITERATED_BLEND_PASS 0x0dd4 +#define NVC397_SET_ITERATED_BLEND_PASS_COUNT 7:0 + +#define NVC397_SET_ZCULL_CRITERION 0x0dd8 +#define NVC397_SET_ZCULL_CRITERION_SFUNC 7:0 +#define NVC397_SET_ZCULL_CRITERION_SFUNC_NEVER 0x00000000 +#define NVC397_SET_ZCULL_CRITERION_SFUNC_LESS 0x00000001 +#define NVC397_SET_ZCULL_CRITERION_SFUNC_EQUAL 0x00000002 +#define NVC397_SET_ZCULL_CRITERION_SFUNC_LEQUAL 0x00000003 +#define NVC397_SET_ZCULL_CRITERION_SFUNC_GREATER 0x00000004 +#define NVC397_SET_ZCULL_CRITERION_SFUNC_NOTEQUAL 0x00000005 +#define NVC397_SET_ZCULL_CRITERION_SFUNC_GEQUAL 0x00000006 +#define NVC397_SET_ZCULL_CRITERION_SFUNC_ALWAYS 0x00000007 +#define NVC397_SET_ZCULL_CRITERION_NO_INVALIDATE 8:8 +#define NVC397_SET_ZCULL_CRITERION_NO_INVALIDATE_FALSE 0x00000000 +#define NVC397_SET_ZCULL_CRITERION_NO_INVALIDATE_TRUE 0x00000001 +#define NVC397_SET_ZCULL_CRITERION_FORCE_MATCH 9:9 +#define NVC397_SET_ZCULL_CRITERION_FORCE_MATCH_FALSE 0x00000000 +#define NVC397_SET_ZCULL_CRITERION_FORCE_MATCH_TRUE 0x00000001 +#define NVC397_SET_ZCULL_CRITERION_SREF 23:16 +#define NVC397_SET_ZCULL_CRITERION_SMASK 31:24 + +#define NVC397_PIXEL_SHADER_BARRIER 0x0de0 +#define NVC397_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE 0:0 +#define NVC397_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE_FALSE 0x00000000 +#define NVC397_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_SM_TIMEOUT_INTERVAL 0x0de4 +#define NVC397_SET_SM_TIMEOUT_INTERVAL_COUNTER_BIT 5:0 + +#define NVC397_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY 0x0de8 +#define NVC397_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE 0:0 +#define NVC397_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_FALSE 0x00000000 +#define NVC397_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_TRUE 0x00000001 + +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_POINTER 0x0df0 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_POINTER_V 7:0 + +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION 0x0df4 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC 2:0 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_FALSE 0x00000000 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_TRUE 0x00000001 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_EQ 0x00000002 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_NE 0x00000003 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_LT 0x00000004 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_LE 0x00000005 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_GT 0x00000006 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_GE 0x00000007 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION 5:3 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_ADD_PRODUCTS 0x00000000 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_SUB_PRODUCTS 0x00000001 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_MIN 0x00000002 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_MAX 0x00000003 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_RCP 0x00000004 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_ADD 0x00000005 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_SUBTRACT 0x00000006 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT 8:6 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT0 0x00000000 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT1 0x00000001 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT2 0x00000002 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT3 0x00000003 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT4 0x00000004 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT5 0x00000005 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT6 0x00000006 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT7 0x00000007 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT 11:9 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_SRC_RGB 0x00000000 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_DEST_RGB 0x00000001 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_SRC_AAA 0x00000002 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_DEST_AAA 0x00000003 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP0_RGB 0x00000004 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP1_RGB 0x00000005 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP2_RGB 0x00000006 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_PBR_RGB 0x00000007 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT 15:12 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ZERO 0x00000000 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE 0x00000001 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_SRC_RGB 0x00000002 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_SRC_AAA 0x00000003 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE_MINUS_SRC_AAA 0x00000004 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_DEST_RGB 0x00000005 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_DEST_AAA 0x00000006 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE_MINUS_DEST_AAA 0x00000007 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP0_RGB 0x00000009 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP1_RGB 0x0000000A +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP2_RGB 0x0000000B +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_PBR_RGB 0x0000000C +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_CONSTANT_RGB 0x0000000D +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ZERO_A_TIMES_B 0x0000000E +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT 18:16 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_SRC_RGB 0x00000000 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_DEST_RGB 0x00000001 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_SRC_AAA 0x00000002 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_DEST_AAA 0x00000003 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP0_RGB 0x00000004 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP1_RGB 0x00000005 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP2_RGB 0x00000006 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_PBR_RGB 0x00000007 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT 22:19 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ZERO 0x00000000 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE 0x00000001 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_SRC_RGB 0x00000002 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_SRC_AAA 0x00000003 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE_MINUS_SRC_AAA 0x00000004 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_DEST_RGB 0x00000005 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_DEST_AAA 0x00000006 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE_MINUS_DEST_AAA 0x00000007 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP0_RGB 0x00000009 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP1_RGB 0x0000000A +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP2_RGB 0x0000000B +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_PBR_RGB 0x0000000C +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_CONSTANT_RGB 0x0000000D +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ZERO_C_TIMES_D 0x0000000E +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE 25:23 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_RGB 0x00000000 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_GBR 0x00000001 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_RRR 0x00000002 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_GGG 0x00000003 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_BBB 0x00000004 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_R_TO_A 0x00000005 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK 27:26 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_RGB 0x00000000 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_R_ONLY 0x00000001 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_G_ONLY 0x00000002 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_B_ONLY 0x00000003 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT 29:28 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP0 0x00000000 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP1 0x00000001 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP2 0x00000002 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_NONE 0x00000003 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC 31:31 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC_FALSE 0x00000000 +#define NVC397_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC_TRUE 0x00000001 + +#define NVC397_SET_WINDOW_OFFSET_X 0x0df8 +#define NVC397_SET_WINDOW_OFFSET_X_V 16:0 + +#define NVC397_SET_WINDOW_OFFSET_Y 0x0dfc +#define NVC397_SET_WINDOW_OFFSET_Y_V 17:0 + +#define NVC397_SET_SCISSOR_ENABLE(j) (0x0e00+(j)*16) +#define NVC397_SET_SCISSOR_ENABLE_V 0:0 +#define NVC397_SET_SCISSOR_ENABLE_V_FALSE 0x00000000 +#define NVC397_SET_SCISSOR_ENABLE_V_TRUE 0x00000001 + +#define NVC397_SET_SCISSOR_HORIZONTAL(j) (0x0e04+(j)*16) +#define NVC397_SET_SCISSOR_HORIZONTAL_XMIN 15:0 +#define NVC397_SET_SCISSOR_HORIZONTAL_XMAX 31:16 + +#define NVC397_SET_SCISSOR_VERTICAL(j) (0x0e08+(j)*16) +#define NVC397_SET_SCISSOR_VERTICAL_YMIN 15:0 +#define NVC397_SET_SCISSOR_VERTICAL_YMAX 31:16 + +#define NVC397_SET_VPC_PERF_KNOB 0x0f14 +#define NVC397_SET_VPC_PERF_KNOB_CULLED_SMALL_LINES 7:0 +#define NVC397_SET_VPC_PERF_KNOB_CULLED_SMALL_TRIANGLES 15:8 +#define NVC397_SET_VPC_PERF_KNOB_NONCULLED_LINES_AND_POINTS 23:16 +#define NVC397_SET_VPC_PERF_KNOB_NONCULLED_TRIANGLES 31:24 + +#define NVC397_PM_LOCAL_TRIGGER 0x0f18 +#define NVC397_PM_LOCAL_TRIGGER_BOOKMARK 15:0 + +#define NVC397_SET_POST_Z_PS_IMASK 0x0f1c +#define NVC397_SET_POST_Z_PS_IMASK_ENABLE 0:0 +#define NVC397_SET_POST_Z_PS_IMASK_ENABLE_FALSE 0x00000000 +#define NVC397_SET_POST_Z_PS_IMASK_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_CONSTANT_COLOR_RENDERING 0x0f40 +#define NVC397_SET_CONSTANT_COLOR_RENDERING_ENABLE 0:0 +#define NVC397_SET_CONSTANT_COLOR_RENDERING_ENABLE_FALSE 0x00000000 +#define NVC397_SET_CONSTANT_COLOR_RENDERING_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_CONSTANT_COLOR_RENDERING_RED 0x0f44 +#define NVC397_SET_CONSTANT_COLOR_RENDERING_RED_V 31:0 + +#define NVC397_SET_CONSTANT_COLOR_RENDERING_GREEN 0x0f48 +#define NVC397_SET_CONSTANT_COLOR_RENDERING_GREEN_V 31:0 + +#define NVC397_SET_CONSTANT_COLOR_RENDERING_BLUE 0x0f4c +#define NVC397_SET_CONSTANT_COLOR_RENDERING_BLUE_V 31:0 + +#define NVC397_SET_CONSTANT_COLOR_RENDERING_ALPHA 0x0f50 +#define NVC397_SET_CONSTANT_COLOR_RENDERING_ALPHA_V 31:0 + +#define NVC397_SET_BACK_STENCIL_FUNC_REF 0x0f54 +#define NVC397_SET_BACK_STENCIL_FUNC_REF_V 7:0 + +#define NVC397_SET_BACK_STENCIL_MASK 0x0f58 +#define NVC397_SET_BACK_STENCIL_MASK_V 7:0 + +#define NVC397_SET_BACK_STENCIL_FUNC_MASK 0x0f5c +#define NVC397_SET_BACK_STENCIL_FUNC_MASK_V 7:0 + +#define NVC397_SET_VERTEX_STREAM_SUBSTITUTE_A 0x0f84 +#define NVC397_SET_VERTEX_STREAM_SUBSTITUTE_A_ADDRESS_UPPER 7:0 + +#define NVC397_SET_VERTEX_STREAM_SUBSTITUTE_B 0x0f88 +#define NVC397_SET_VERTEX_STREAM_SUBSTITUTE_B_ADDRESS_LOWER 31:0 + +#define NVC397_SET_LINE_MODE_POLYGON_CLIP 0x0f8c +#define NVC397_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE 0:0 +#define NVC397_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DRAW_LINE 0x00000000 +#define NVC397_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DO_NOT_DRAW_LINE 0x00000001 + +#define NVC397_SET_SINGLE_CT_WRITE_CONTROL 0x0f90 +#define NVC397_SET_SINGLE_CT_WRITE_CONTROL_ENABLE 0:0 +#define NVC397_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_FALSE 0x00000000 +#define NVC397_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_VTG_WARP_WATERMARKS 0x0f98 +#define NVC397_SET_VTG_WARP_WATERMARKS_LOW 15:0 +#define NVC397_SET_VTG_WARP_WATERMARKS_HIGH 31:16 + +#define NVC397_SET_DEPTH_BOUNDS_MIN 0x0f9c +#define NVC397_SET_DEPTH_BOUNDS_MIN_V 31:0 + +#define NVC397_SET_DEPTH_BOUNDS_MAX 0x0fa0 +#define NVC397_SET_DEPTH_BOUNDS_MAX_V 31:0 + +#define NVC397_SET_SAMPLE_MASK 0x0fa4 +#define NVC397_SET_SAMPLE_MASK_RASTER_OUT_ENABLE 0:0 +#define NVC397_SET_SAMPLE_MASK_RASTER_OUT_ENABLE_FALSE 0x00000000 +#define NVC397_SET_SAMPLE_MASK_RASTER_OUT_ENABLE_TRUE 0x00000001 +#define NVC397_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE 4:4 +#define NVC397_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE_FALSE 0x00000000 +#define NVC397_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_COLOR_TARGET_SAMPLE_MASK 0x0fa8 +#define NVC397_SET_COLOR_TARGET_SAMPLE_MASK_V 15:0 + +#define NVC397_SET_CT_MRT_ENABLE 0x0fac +#define NVC397_SET_CT_MRT_ENABLE_V 0:0 +#define NVC397_SET_CT_MRT_ENABLE_V_FALSE 0x00000000 +#define NVC397_SET_CT_MRT_ENABLE_V_TRUE 0x00000001 + +#define NVC397_SET_NONMULTISAMPLED_Z 0x0fb0 +#define NVC397_SET_NONMULTISAMPLED_Z_V 0:0 +#define NVC397_SET_NONMULTISAMPLED_Z_V_PER_SAMPLE 0x00000000 +#define NVC397_SET_NONMULTISAMPLED_Z_V_AT_PIXEL_CENTER 0x00000001 + +#define NVC397_SET_TIR 0x0fb4 +#define NVC397_SET_TIR_MODE 1:0 +#define NVC397_SET_TIR_MODE_DISABLED 0x00000000 +#define NVC397_SET_TIR_MODE_RASTER_N_TARGET_M 0x00000001 + +#define NVC397_SET_ANTI_ALIAS_RASTER 0x0fb8 +#define NVC397_SET_ANTI_ALIAS_RASTER_SAMPLES 2:0 +#define NVC397_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_1X1 0x00000000 +#define NVC397_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_2X2 0x00000002 +#define NVC397_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_4X2_D3D 0x00000004 +#define NVC397_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_2X1_D3D 0x00000005 +#define NVC397_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_4X4 0x00000006 + +#define NVC397_SET_SAMPLE_MASK_X0_Y0 0x0fbc +#define NVC397_SET_SAMPLE_MASK_X0_Y0_V 15:0 + +#define NVC397_SET_SAMPLE_MASK_X1_Y0 0x0fc0 +#define NVC397_SET_SAMPLE_MASK_X1_Y0_V 15:0 + +#define NVC397_SET_SAMPLE_MASK_X0_Y1 0x0fc4 +#define NVC397_SET_SAMPLE_MASK_X0_Y1_V 15:0 + +#define NVC397_SET_SAMPLE_MASK_X1_Y1 0x0fc8 +#define NVC397_SET_SAMPLE_MASK_X1_Y1_V 15:0 + +#define NVC397_SET_SURFACE_CLIP_ID_MEMORY_A 0x0fcc +#define NVC397_SET_SURFACE_CLIP_ID_MEMORY_A_OFFSET_UPPER 7:0 + +#define NVC397_SET_SURFACE_CLIP_ID_MEMORY_B 0x0fd0 +#define NVC397_SET_SURFACE_CLIP_ID_MEMORY_B_OFFSET_LOWER 31:0 + +#define NVC397_SET_TIR_MODULATION 0x0fd4 +#define NVC397_SET_TIR_MODULATION_COMPONENT_SELECT 1:0 +#define NVC397_SET_TIR_MODULATION_COMPONENT_SELECT_NO_MODULATION 0x00000000 +#define NVC397_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_RGB 0x00000001 +#define NVC397_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_ALPHA_ONLY 0x00000002 +#define NVC397_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_RGBA 0x00000003 + +#define NVC397_SET_TIR_MODULATION_FUNCTION 0x0fd8 +#define NVC397_SET_TIR_MODULATION_FUNCTION_SELECT 0:0 +#define NVC397_SET_TIR_MODULATION_FUNCTION_SELECT_LINEAR 0x00000000 +#define NVC397_SET_TIR_MODULATION_FUNCTION_SELECT_TABLE 0x00000001 + +#define NVC397_SET_BLEND_OPT_CONTROL 0x0fdc +#define NVC397_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS 0:0 +#define NVC397_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_FALSE 0x00000000 +#define NVC397_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_TRUE 0x00000001 + +#define NVC397_SET_ZT_A 0x0fe0 +#define NVC397_SET_ZT_A_OFFSET_UPPER 7:0 + +#define NVC397_SET_ZT_B 0x0fe4 +#define NVC397_SET_ZT_B_OFFSET_LOWER 31:0 + +#define NVC397_SET_ZT_FORMAT 0x0fe8 +#define NVC397_SET_ZT_FORMAT_V 4:0 +#define NVC397_SET_ZT_FORMAT_V_Z16 0x00000013 +#define NVC397_SET_ZT_FORMAT_V_Z24S8 0x00000014 +#define NVC397_SET_ZT_FORMAT_V_X8Z24 0x00000015 +#define NVC397_SET_ZT_FORMAT_V_S8Z24 0x00000016 +#define NVC397_SET_ZT_FORMAT_V_S8 0x00000017 +#define NVC397_SET_ZT_FORMAT_V_V8Z24 0x00000018 +#define NVC397_SET_ZT_FORMAT_V_ZF32 0x0000000A +#define NVC397_SET_ZT_FORMAT_V_ZF32_X24S8 0x00000019 +#define NVC397_SET_ZT_FORMAT_V_X8Z24_X16V8S8 0x0000001D +#define NVC397_SET_ZT_FORMAT_V_ZF32_X16V8X8 0x0000001E +#define NVC397_SET_ZT_FORMAT_V_ZF32_X16V8S8 0x0000001F + +#define NVC397_SET_ZT_BLOCK_SIZE 0x0fec +#define NVC397_SET_ZT_BLOCK_SIZE_WIDTH 3:0 +#define NVC397_SET_ZT_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC397_SET_ZT_BLOCK_SIZE_HEIGHT 7:4 +#define NVC397_SET_ZT_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC397_SET_ZT_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC397_SET_ZT_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC397_SET_ZT_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC397_SET_ZT_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC397_SET_ZT_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC397_SET_ZT_BLOCK_SIZE_DEPTH 11:8 +#define NVC397_SET_ZT_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NVC397_SET_ZT_ARRAY_PITCH 0x0ff0 +#define NVC397_SET_ZT_ARRAY_PITCH_V 31:0 + +#define NVC397_SET_SURFACE_CLIP_HORIZONTAL 0x0ff4 +#define NVC397_SET_SURFACE_CLIP_HORIZONTAL_X 15:0 +#define NVC397_SET_SURFACE_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NVC397_SET_SURFACE_CLIP_VERTICAL 0x0ff8 +#define NVC397_SET_SURFACE_CLIP_VERTICAL_Y 15:0 +#define NVC397_SET_SURFACE_CLIP_VERTICAL_HEIGHT 31:16 + +#define NVC397_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS 0x1000 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE 0:0 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_FALSE 0x00000000 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_TRUE 0x00000001 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY 5:4 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC397_SET_VIEWPORT_MULTICAST 0x1004 +#define NVC397_SET_VIEWPORT_MULTICAST_ORDER 0:0 +#define NVC397_SET_VIEWPORT_MULTICAST_ORDER_VIEWPORT_ORDER 0x00000000 +#define NVC397_SET_VIEWPORT_MULTICAST_ORDER_PRIMITIVE_ORDER 0x00000001 + +#define NVC397_SET_TESSELLATION_CUT_HEIGHT 0x1008 +#define NVC397_SET_TESSELLATION_CUT_HEIGHT_V 4:0 + +#define NVC397_SET_MAX_GS_INSTANCES_PER_TASK 0x100c +#define NVC397_SET_MAX_GS_INSTANCES_PER_TASK_V 10:0 + +#define NVC397_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK 0x1010 +#define NVC397_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK_V 15:0 + +#define NVC397_SET_RESERVED_SW_METHOD00 0x1014 +#define NVC397_SET_RESERVED_SW_METHOD00_V 31:0 + +#define NVC397_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER 0x1018 +#define NVC397_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NVC397_SET_BETA_CB_STORAGE_CONSTRAINT 0x101c +#define NVC397_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NVC397_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NVC397_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER 0x1020 +#define NVC397_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NVC397_SET_ALPHA_CB_STORAGE_CONSTRAINT 0x1024 +#define NVC397_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NVC397_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NVC397_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_RESERVED_SW_METHOD01 0x1028 +#define NVC397_SET_RESERVED_SW_METHOD01_V 31:0 + +#define NVC397_SET_RESERVED_SW_METHOD02 0x102c +#define NVC397_SET_RESERVED_SW_METHOD02_V 31:0 + +#define NVC397_SET_TIR_MODULATION_COEFFICIENT_TABLE(i) (0x1030+(i)*4) +#define NVC397_SET_TIR_MODULATION_COEFFICIENT_TABLE_V0 7:0 +#define NVC397_SET_TIR_MODULATION_COEFFICIENT_TABLE_V1 15:8 +#define NVC397_SET_TIR_MODULATION_COEFFICIENT_TABLE_V2 23:16 +#define NVC397_SET_TIR_MODULATION_COEFFICIENT_TABLE_V3 31:24 + +#define NVC397_SET_SPARE_NOOP01 0x1044 +#define NVC397_SET_SPARE_NOOP01_V 31:0 + +#define NVC397_SET_SPARE_NOOP02 0x1048 +#define NVC397_SET_SPARE_NOOP02_V 31:0 + +#define NVC397_SET_SPARE_NOOP03 0x104c +#define NVC397_SET_SPARE_NOOP03_V 31:0 + +#define NVC397_SET_SPARE_NOOP04 0x1050 +#define NVC397_SET_SPARE_NOOP04_V 31:0 + +#define NVC397_SET_SPARE_NOOP05 0x1054 +#define NVC397_SET_SPARE_NOOP05_V 31:0 + +#define NVC397_SET_SPARE_NOOP06 0x1058 +#define NVC397_SET_SPARE_NOOP06_V 31:0 + +#define NVC397_SET_SPARE_NOOP07 0x105c +#define NVC397_SET_SPARE_NOOP07_V 31:0 + +#define NVC397_SET_SPARE_NOOP08 0x1060 +#define NVC397_SET_SPARE_NOOP08_V 31:0 + +#define NVC397_SET_SPARE_NOOP09 0x1064 +#define NVC397_SET_SPARE_NOOP09_V 31:0 + +#define NVC397_SET_SPARE_NOOP10 0x1068 +#define NVC397_SET_SPARE_NOOP10_V 31:0 + +#define NVC397_SET_SPARE_NOOP11 0x106c +#define NVC397_SET_SPARE_NOOP11_V 31:0 + +#define NVC397_SET_SPARE_NOOP12 0x1070 +#define NVC397_SET_SPARE_NOOP12_V 31:0 + +#define NVC397_SET_SPARE_NOOP13 0x1074 +#define NVC397_SET_SPARE_NOOP13_V 31:0 + +#define NVC397_SET_SPARE_NOOP14 0x1078 +#define NVC397_SET_SPARE_NOOP14_V 31:0 + +#define NVC397_SET_SPARE_NOOP15 0x107c +#define NVC397_SET_SPARE_NOOP15_V 31:0 + +#define NVC397_SET_RESERVED_SW_METHOD03 0x10b0 +#define NVC397_SET_RESERVED_SW_METHOD03_V 31:0 + +#define NVC397_SET_RESERVED_SW_METHOD04 0x10b4 +#define NVC397_SET_RESERVED_SW_METHOD04_V 31:0 + +#define NVC397_SET_RESERVED_SW_METHOD05 0x10b8 +#define NVC397_SET_RESERVED_SW_METHOD05_V 31:0 + +#define NVC397_SET_RESERVED_SW_METHOD06 0x10bc +#define NVC397_SET_RESERVED_SW_METHOD06_V 31:0 + +#define NVC397_SET_RESERVED_SW_METHOD07 0x10c0 +#define NVC397_SET_RESERVED_SW_METHOD07_V 31:0 + +#define NVC397_SET_RESERVED_SW_METHOD08 0x10c4 +#define NVC397_SET_RESERVED_SW_METHOD08_V 31:0 + +#define NVC397_SET_RESERVED_SW_METHOD09 0x10c8 +#define NVC397_SET_RESERVED_SW_METHOD09_V 31:0 + +#define NVC397_SET_REDUCE_COLOR_THRESHOLDS_UNORM8 0x10cc +#define NVC397_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC397_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED 23:16 + +#define NVC397_SET_RESERVED_SW_METHOD10 0x10d0 +#define NVC397_SET_RESERVED_SW_METHOD10_V 31:0 + +#define NVC397_SET_RESERVED_SW_METHOD11 0x10d4 +#define NVC397_SET_RESERVED_SW_METHOD11_V 31:0 + +#define NVC397_SET_RESERVED_SW_METHOD12 0x10d8 +#define NVC397_SET_RESERVED_SW_METHOD12_V 31:0 + +#define NVC397_SET_RESERVED_SW_METHOD13 0x10dc +#define NVC397_SET_RESERVED_SW_METHOD13_V 31:0 + +#define NVC397_SET_REDUCE_COLOR_THRESHOLDS_UNORM10 0x10e0 +#define NVC397_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC397_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED 23:16 + +#define NVC397_SET_REDUCE_COLOR_THRESHOLDS_UNORM16 0x10e4 +#define NVC397_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC397_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED 23:16 + +#define NVC397_SET_REDUCE_COLOR_THRESHOLDS_FP11 0x10e8 +#define NVC397_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED_ALL_HIT_ONCE 5:0 +#define NVC397_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED 21:16 + +#define NVC397_SET_REDUCE_COLOR_THRESHOLDS_FP16 0x10ec +#define NVC397_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC397_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED 23:16 + +#define NVC397_SET_REDUCE_COLOR_THRESHOLDS_SRGB8 0x10f0 +#define NVC397_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC397_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED 23:16 + +#define NVC397_UNBIND_ALL 0x10f4 +#define NVC397_UNBIND_ALL_CONSTANT_BUFFERS 8:8 +#define NVC397_UNBIND_ALL_CONSTANT_BUFFERS_FALSE 0x00000000 +#define NVC397_UNBIND_ALL_CONSTANT_BUFFERS_TRUE 0x00000001 + +#define NVC397_SET_CLEAR_SURFACE_CONTROL 0x10f8 +#define NVC397_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK 0:0 +#define NVC397_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_FALSE 0x00000000 +#define NVC397_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_TRUE 0x00000001 +#define NVC397_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT 4:4 +#define NVC397_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_FALSE 0x00000000 +#define NVC397_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_TRUE 0x00000001 +#define NVC397_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0 8:8 +#define NVC397_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_FALSE 0x00000000 +#define NVC397_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_TRUE 0x00000001 +#define NVC397_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0 12:12 +#define NVC397_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_FALSE 0x00000000 +#define NVC397_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_TRUE 0x00000001 + +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS 0x10fc +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC397_SET_RESERVED_SW_METHOD14 0x1100 +#define NVC397_SET_RESERVED_SW_METHOD14_V 31:0 + +#define NVC397_SET_RESERVED_SW_METHOD15 0x1104 +#define NVC397_SET_RESERVED_SW_METHOD15_V 31:0 + +#define NVC397_NO_OPERATION_DATA_HI 0x110c +#define NVC397_NO_OPERATION_DATA_HI_V 31:0 + +#define NVC397_SET_DEPTH_BIAS_CONTROL 0x1110 +#define NVC397_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT 0:0 +#define NVC397_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_FALSE 0x00000000 +#define NVC397_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_TRUE 0x00000001 + +#define NVC397_PM_TRIGGER_END 0x1114 +#define NVC397_PM_TRIGGER_END_V 31:0 + +#define NVC397_SET_VERTEX_ID_BASE 0x1118 +#define NVC397_SET_VERTEX_ID_BASE_V 31:0 + +#define NVC397_SET_STENCIL_COMPRESSION 0x111c +#define NVC397_SET_STENCIL_COMPRESSION_ENABLE 0:0 +#define NVC397_SET_STENCIL_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVC397_SET_STENCIL_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A(i) (0x1120+(i)*4) +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0 0:0 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1 1:1 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2 2:2 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3 3:3 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0 4:4 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1 5:5 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2 6:6 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3 7:7 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0 8:8 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1 9:9 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2 10:10 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3 11:11 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0 12:12 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1 13:13 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2 14:14 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3 15:15 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0 16:16 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1 17:17 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2 18:18 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3 19:19 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0 20:20 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1 21:21 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2 22:22 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3 23:23 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0 24:24 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1 25:25 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2 26:26 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3 27:27 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0 28:28 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1 29:29 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2 30:30 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3 31:31 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B(i) (0x1128+(i)*4) +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0 0:0 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1 1:1 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2 2:2 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3 3:3 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0 4:4 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1 5:5 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2 6:6 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3 7:7 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0 8:8 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1 9:9 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2 10:10 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3 11:11 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0 12:12 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1 13:13 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2 14:14 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3 15:15 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0 16:16 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1 17:17 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2 18:18 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3 19:19 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0 20:20 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1 21:21 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2 22:22 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3 23:23 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0 24:24 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1 25:25 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2 26:26 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3 27:27 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0 28:28 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1 29:29 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2 30:30 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3 31:31 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NVC397_SET_TIR_CONTROL 0x1130 +#define NVC397_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES 0:0 +#define NVC397_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES_DISABLE 0x00000000 +#define NVC397_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES_ENABLE 0x00000001 +#define NVC397_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES 4:4 +#define NVC397_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES_DISABLE 0x00000000 +#define NVC397_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES_ENABLE 0x00000001 +#define NVC397_SET_TIR_CONTROL_REDUCE_COVERAGE 1:1 +#define NVC397_SET_TIR_CONTROL_REDUCE_COVERAGE_DISABLE 0x00000000 +#define NVC397_SET_TIR_CONTROL_REDUCE_COVERAGE_ENABLE 0x00000001 + +#define NVC397_SET_MUTABLE_METHOD_CONTROL 0x1134 +#define NVC397_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT 0:0 +#define NVC397_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT_FALSE 0x00000000 +#define NVC397_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT_TRUE 0x00000001 + +#define NVC397_SET_POST_PS_INITIAL_COVERAGE 0x1138 +#define NVC397_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE 0:0 +#define NVC397_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE_FALSE 0x00000000 +#define NVC397_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE_TRUE 0x00000001 + +#define NVC397_SET_FILL_VIA_TRIANGLE 0x113c +#define NVC397_SET_FILL_VIA_TRIANGLE_MODE 1:0 +#define NVC397_SET_FILL_VIA_TRIANGLE_MODE_DISABLED 0x00000000 +#define NVC397_SET_FILL_VIA_TRIANGLE_MODE_FILL_ALL 0x00000001 +#define NVC397_SET_FILL_VIA_TRIANGLE_MODE_FILL_BBOX 0x00000002 + +#define NVC397_SET_BLEND_PER_FORMAT_ENABLE 0x1140 +#define NVC397_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16 4:4 +#define NVC397_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_FALSE 0x00000000 +#define NVC397_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_TRUE 0x00000001 + +#define NVC397_FLUSH_PENDING_WRITES 0x1144 +#define NVC397_FLUSH_PENDING_WRITES_SM_DOES_GLOBAL_STORE 0:0 + +#define NVC397_SET_VERTEX_ATTRIBUTE_A(i) (0x1160+(i)*4) +#define NVC397_SET_VERTEX_ATTRIBUTE_A_STREAM 4:0 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_SOURCE 6:6 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_SOURCE_ACTIVE 0x00000000 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_SOURCE_INACTIVE 0x00000001 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_OFFSET 20:7 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS 26:21 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NVC397_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NVC397_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NVC397_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NVC397_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NVC397_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE 29:27 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B 31:31 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_FALSE 0x00000000 +#define NVC397_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_TRUE 0x00000001 + +#define NVC397_SET_VERTEX_ATTRIBUTE_B(i) (0x11a0+(i)*4) +#define NVC397_SET_VERTEX_ATTRIBUTE_B_STREAM 4:0 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_SOURCE 6:6 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_SOURCE_ACTIVE 0x00000000 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_SOURCE_INACTIVE 0x00000001 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_OFFSET 20:7 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS 26:21 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NVC397_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NVC397_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NVC397_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NVC397_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NVC397_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE 29:27 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B 31:31 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_FALSE 0x00000000 +#define NVC397_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_TRUE 0x00000001 + +#define NVC397_SET_ANTI_ALIAS_SAMPLE_POSITIONS(i) (0x11e0+(i)*4) +#define NVC397_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X0 3:0 +#define NVC397_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y0 7:4 +#define NVC397_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X1 11:8 +#define NVC397_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y1 15:12 +#define NVC397_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X2 19:16 +#define NVC397_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y2 23:20 +#define NVC397_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X3 27:24 +#define NVC397_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y3 31:28 + +#define NVC397_SET_OFFSET_RENDER_TARGET_INDEX 0x11f0 +#define NVC397_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX 0:0 +#define NVC397_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX_FALSE 0x00000000 +#define NVC397_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX_TRUE 0x00000001 + +#define NVC397_FORCE_HEAVYWEIGHT_METHOD_SYNC 0x11f4 +#define NVC397_FORCE_HEAVYWEIGHT_METHOD_SYNC_V 31:0 + +#define NVC397_SET_COVERAGE_TO_COLOR 0x11f8 +#define NVC397_SET_COVERAGE_TO_COLOR_ENABLE 0:0 +#define NVC397_SET_COVERAGE_TO_COLOR_ENABLE_FALSE 0x00000000 +#define NVC397_SET_COVERAGE_TO_COLOR_ENABLE_TRUE 0x00000001 +#define NVC397_SET_COVERAGE_TO_COLOR_CT_SELECT 6:4 + +#define NVC397_DECOMPRESS_ZETA_SURFACE 0x11fc +#define NVC397_DECOMPRESS_ZETA_SURFACE_Z_ENABLE 0:0 +#define NVC397_DECOMPRESS_ZETA_SURFACE_Z_ENABLE_FALSE 0x00000000 +#define NVC397_DECOMPRESS_ZETA_SURFACE_Z_ENABLE_TRUE 0x00000001 +#define NVC397_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE 4:4 +#define NVC397_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC397_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_SCREEN_STATE_MASK 0x1204 +#define NVC397_SET_SCREEN_STATE_MASK_MASK 3:0 + +#define NVC397_SET_ZT_SPARSE 0x1208 +#define NVC397_SET_ZT_SPARSE_ENABLE 0:0 +#define NVC397_SET_ZT_SPARSE_ENABLE_FALSE 0x00000000 +#define NVC397_SET_ZT_SPARSE_ENABLE_TRUE 0x00000001 +#define NVC397_SET_ZT_SPARSE_UNMAPPED_COMPARE 1:1 +#define NVC397_SET_ZT_SPARSE_UNMAPPED_COMPARE_ZT_SPARSE_UNMAPPED_0 0x00000000 +#define NVC397_SET_ZT_SPARSE_UNMAPPED_COMPARE_ZT_SPARSE_FAIL_ALWAYS 0x00000001 + +#define NVC397_INVALIDATE_SAMPLER_CACHE_ALL 0x120c +#define NVC397_INVALIDATE_SAMPLER_CACHE_ALL_V 0:0 + +#define NVC397_INVALIDATE_TEXTURE_HEADER_CACHE_ALL 0x1210 +#define NVC397_INVALIDATE_TEXTURE_HEADER_CACHE_ALL_V 0:0 + +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST 0x1214 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_START_INDEX 15:0 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT 0x1218 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_START_INDEX 15:0 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC397_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC397_SET_CT_SELECT 0x121c +#define NVC397_SET_CT_SELECT_TARGET_COUNT 3:0 +#define NVC397_SET_CT_SELECT_TARGET0 6:4 +#define NVC397_SET_CT_SELECT_TARGET1 9:7 +#define NVC397_SET_CT_SELECT_TARGET2 12:10 +#define NVC397_SET_CT_SELECT_TARGET3 15:13 +#define NVC397_SET_CT_SELECT_TARGET4 18:16 +#define NVC397_SET_CT_SELECT_TARGET5 21:19 +#define NVC397_SET_CT_SELECT_TARGET6 24:22 +#define NVC397_SET_CT_SELECT_TARGET7 27:25 + +#define NVC397_SET_COMPRESSION_THRESHOLD 0x1220 +#define NVC397_SET_COMPRESSION_THRESHOLD_SAMPLES 3:0 +#define NVC397_SET_COMPRESSION_THRESHOLD_SAMPLES__0 0x00000000 +#define NVC397_SET_COMPRESSION_THRESHOLD_SAMPLES__1 0x00000001 +#define NVC397_SET_COMPRESSION_THRESHOLD_SAMPLES__2 0x00000002 +#define NVC397_SET_COMPRESSION_THRESHOLD_SAMPLES__4 0x00000003 +#define NVC397_SET_COMPRESSION_THRESHOLD_SAMPLES__8 0x00000004 +#define NVC397_SET_COMPRESSION_THRESHOLD_SAMPLES__16 0x00000005 +#define NVC397_SET_COMPRESSION_THRESHOLD_SAMPLES__32 0x00000006 +#define NVC397_SET_COMPRESSION_THRESHOLD_SAMPLES__64 0x00000007 +#define NVC397_SET_COMPRESSION_THRESHOLD_SAMPLES__128 0x00000008 +#define NVC397_SET_COMPRESSION_THRESHOLD_SAMPLES__256 0x00000009 +#define NVC397_SET_COMPRESSION_THRESHOLD_SAMPLES__512 0x0000000A +#define NVC397_SET_COMPRESSION_THRESHOLD_SAMPLES__1024 0x0000000B +#define NVC397_SET_COMPRESSION_THRESHOLD_SAMPLES__2048 0x0000000C + +#define NVC397_SET_PIXEL_SHADER_INTERLOCK_CONTROL 0x1224 +#define NVC397_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE 1:0 +#define NVC397_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_NO_CONFLICT_DETECT 0x00000000 +#define NVC397_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_CONFLICT_DETECT_SAMPLE 0x00000001 +#define NVC397_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_CONFLICT_DETECT_PIXEL 0x00000002 +#define NVC397_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE 2:2 +#define NVC397_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE_TC_TILE_SIZE_16X16 0x00000000 +#define NVC397_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE_TC_TILE_SIZE_8X8 0x00000001 +#define NVC397_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER 3:3 +#define NVC397_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER_TC_FRAGMENT_ORDERED 0x00000000 +#define NVC397_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER_TC_FRAGMENT_UNORDERED 0x00000001 + +#define NVC397_SET_ZT_SIZE_A 0x1228 +#define NVC397_SET_ZT_SIZE_A_WIDTH 27:0 + +#define NVC397_SET_ZT_SIZE_B 0x122c +#define NVC397_SET_ZT_SIZE_B_HEIGHT 17:0 + +#define NVC397_SET_ZT_SIZE_C 0x1230 +#define NVC397_SET_ZT_SIZE_C_THIRD_DIMENSION 15:0 +#define NVC397_SET_ZT_SIZE_C_CONTROL 16:16 +#define NVC397_SET_ZT_SIZE_C_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NVC397_SET_ZT_SIZE_C_CONTROL_ARRAY_SIZE_IS_ONE 0x00000001 + +#define NVC397_SET_SAMPLER_BINDING 0x1234 +#define NVC397_SET_SAMPLER_BINDING_V 0:0 +#define NVC397_SET_SAMPLER_BINDING_V_INDEPENDENTLY 0x00000000 +#define NVC397_SET_SAMPLER_BINDING_V_VIA_HEADER_BINDING 0x00000001 + +#define NVC397_DRAW_AUTO 0x123c +#define NVC397_DRAW_AUTO_BYTE_COUNT 31:0 + +#define NVC397_SET_POST_VTG_SHADER_ATTRIBUTE_SKIP_MASK(i) (0x1240+(i)*4) +#define NVC397_SET_POST_VTG_SHADER_ATTRIBUTE_SKIP_MASK_V 31:0 + +#define NVC397_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE 0x1260 +#define NVC397_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE_TICKET_DISPENSER_INDEX 7:0 +#define NVC397_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE_TICKET_DISPENSER_VALUE 23:8 + +#define NVC397_SET_BACK_END_COPY_A 0x1264 +#define NVC397_SET_BACK_END_COPY_A_DWORDS 7:0 +#define NVC397_SET_BACK_END_COPY_A_SATURATE32_ENABLE 8:8 +#define NVC397_SET_BACK_END_COPY_A_SATURATE32_ENABLE_FALSE 0x00000000 +#define NVC397_SET_BACK_END_COPY_A_SATURATE32_ENABLE_TRUE 0x00000001 +#define NVC397_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE 12:12 +#define NVC397_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE_FALSE 0x00000000 +#define NVC397_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_BACK_END_COPY_B 0x1268 +#define NVC397_SET_BACK_END_COPY_B_SRC_ADDRESS_UPPER 7:0 + +#define NVC397_SET_BACK_END_COPY_C 0x126c +#define NVC397_SET_BACK_END_COPY_C_SRC_ADDRESS_LOWER 31:0 + +#define NVC397_SET_BACK_END_COPY_D 0x1270 +#define NVC397_SET_BACK_END_COPY_D_DEST_ADDRESS_UPPER 7:0 + +#define NVC397_SET_BACK_END_COPY_E 0x1274 +#define NVC397_SET_BACK_END_COPY_E_DEST_ADDRESS_LOWER 31:0 + +#define NVC397_SET_CIRCULAR_BUFFER_SIZE 0x1280 +#define NVC397_SET_CIRCULAR_BUFFER_SIZE_CACHE_LINES_PER_SM 19:0 + +#define NVC397_SET_VTG_REGISTER_WATERMARKS 0x1284 +#define NVC397_SET_VTG_REGISTER_WATERMARKS_LOW 15:0 +#define NVC397_SET_VTG_REGISTER_WATERMARKS_HIGH 31:16 + +#define NVC397_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI 0x1288 +#define NVC397_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES 0:0 +#define NVC397_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC397_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC397_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_TAG 25:4 + +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS 0x1290 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC397_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE 0x12a4 +#define NVC397_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE_V 31:0 + +#define NVC397_CLEAR_ZCULL_REGION 0x12c8 +#define NVC397_CLEAR_ZCULL_REGION_Z_ENABLE 0:0 +#define NVC397_CLEAR_ZCULL_REGION_Z_ENABLE_FALSE 0x00000000 +#define NVC397_CLEAR_ZCULL_REGION_Z_ENABLE_TRUE 0x00000001 +#define NVC397_CLEAR_ZCULL_REGION_STENCIL_ENABLE 4:4 +#define NVC397_CLEAR_ZCULL_REGION_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC397_CLEAR_ZCULL_REGION_STENCIL_ENABLE_TRUE 0x00000001 +#define NVC397_CLEAR_ZCULL_REGION_USE_CLEAR_RECT 1:1 +#define NVC397_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_FALSE 0x00000000 +#define NVC397_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_TRUE 0x00000001 +#define NVC397_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX 2:2 +#define NVC397_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_FALSE 0x00000000 +#define NVC397_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_TRUE 0x00000001 +#define NVC397_CLEAR_ZCULL_REGION_RT_ARRAY_INDEX 20:5 +#define NVC397_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE 3:3 +#define NVC397_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_FALSE 0x00000000 +#define NVC397_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_TRUE 0x00000001 + +#define NVC397_SET_DEPTH_TEST 0x12cc +#define NVC397_SET_DEPTH_TEST_ENABLE 0:0 +#define NVC397_SET_DEPTH_TEST_ENABLE_FALSE 0x00000000 +#define NVC397_SET_DEPTH_TEST_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_FILL_MODE 0x12d0 +#define NVC397_SET_FILL_MODE_V 31:0 +#define NVC397_SET_FILL_MODE_V_POINT 0x00000001 +#define NVC397_SET_FILL_MODE_V_WIREFRAME 0x00000002 +#define NVC397_SET_FILL_MODE_V_SOLID 0x00000003 + +#define NVC397_SET_SHADE_MODE 0x12d4 +#define NVC397_SET_SHADE_MODE_V 31:0 +#define NVC397_SET_SHADE_MODE_V_FLAT 0x00000001 +#define NVC397_SET_SHADE_MODE_V_GOURAUD 0x00000002 +#define NVC397_SET_SHADE_MODE_V_OGL_FLAT 0x00001D00 +#define NVC397_SET_SHADE_MODE_V_OGL_SMOOTH 0x00001D01 + +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS 0x12d8 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS 0x12dc +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC397_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC397_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL 0x12e0 +#define NVC397_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT 3:0 +#define NVC397_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1 0x00000000 +#define NVC397_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_2X2 0x00000001 +#define NVC397_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1_VIRTUAL_SAMPLES 0x00000002 + +#define NVC397_SET_BLEND_STATE_PER_TARGET 0x12e4 +#define NVC397_SET_BLEND_STATE_PER_TARGET_ENABLE 0:0 +#define NVC397_SET_BLEND_STATE_PER_TARGET_ENABLE_FALSE 0x00000000 +#define NVC397_SET_BLEND_STATE_PER_TARGET_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_DEPTH_WRITE 0x12e8 +#define NVC397_SET_DEPTH_WRITE_ENABLE 0:0 +#define NVC397_SET_DEPTH_WRITE_ENABLE_FALSE 0x00000000 +#define NVC397_SET_DEPTH_WRITE_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_ALPHA_TEST 0x12ec +#define NVC397_SET_ALPHA_TEST_ENABLE 0:0 +#define NVC397_SET_ALPHA_TEST_ENABLE_FALSE 0x00000000 +#define NVC397_SET_ALPHA_TEST_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_INLINE_INDEX4X8_ALIGN 0x1300 +#define NVC397_SET_INLINE_INDEX4X8_ALIGN_COUNT 29:0 +#define NVC397_SET_INLINE_INDEX4X8_ALIGN_START 31:30 + +#define NVC397_DRAW_INLINE_INDEX4X8 0x1304 +#define NVC397_DRAW_INLINE_INDEX4X8_INDEX0 7:0 +#define NVC397_DRAW_INLINE_INDEX4X8_INDEX1 15:8 +#define NVC397_DRAW_INLINE_INDEX4X8_INDEX2 23:16 +#define NVC397_DRAW_INLINE_INDEX4X8_INDEX3 31:24 + +#define NVC397_D3D_SET_CULL_MODE 0x1308 +#define NVC397_D3D_SET_CULL_MODE_V 31:0 +#define NVC397_D3D_SET_CULL_MODE_V_NONE 0x00000001 +#define NVC397_D3D_SET_CULL_MODE_V_CW 0x00000002 +#define NVC397_D3D_SET_CULL_MODE_V_CCW 0x00000003 + +#define NVC397_SET_DEPTH_FUNC 0x130c +#define NVC397_SET_DEPTH_FUNC_V 31:0 +#define NVC397_SET_DEPTH_FUNC_V_OGL_NEVER 0x00000200 +#define NVC397_SET_DEPTH_FUNC_V_OGL_LESS 0x00000201 +#define NVC397_SET_DEPTH_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC397_SET_DEPTH_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC397_SET_DEPTH_FUNC_V_OGL_GREATER 0x00000204 +#define NVC397_SET_DEPTH_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC397_SET_DEPTH_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC397_SET_DEPTH_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC397_SET_DEPTH_FUNC_V_D3D_NEVER 0x00000001 +#define NVC397_SET_DEPTH_FUNC_V_D3D_LESS 0x00000002 +#define NVC397_SET_DEPTH_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC397_SET_DEPTH_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC397_SET_DEPTH_FUNC_V_D3D_GREATER 0x00000005 +#define NVC397_SET_DEPTH_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC397_SET_DEPTH_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC397_SET_DEPTH_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC397_SET_ALPHA_REF 0x1310 +#define NVC397_SET_ALPHA_REF_V 31:0 + +#define NVC397_SET_ALPHA_FUNC 0x1314 +#define NVC397_SET_ALPHA_FUNC_V 31:0 +#define NVC397_SET_ALPHA_FUNC_V_OGL_NEVER 0x00000200 +#define NVC397_SET_ALPHA_FUNC_V_OGL_LESS 0x00000201 +#define NVC397_SET_ALPHA_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC397_SET_ALPHA_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC397_SET_ALPHA_FUNC_V_OGL_GREATER 0x00000204 +#define NVC397_SET_ALPHA_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC397_SET_ALPHA_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC397_SET_ALPHA_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC397_SET_ALPHA_FUNC_V_D3D_NEVER 0x00000001 +#define NVC397_SET_ALPHA_FUNC_V_D3D_LESS 0x00000002 +#define NVC397_SET_ALPHA_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC397_SET_ALPHA_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC397_SET_ALPHA_FUNC_V_D3D_GREATER 0x00000005 +#define NVC397_SET_ALPHA_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC397_SET_ALPHA_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC397_SET_ALPHA_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC397_SET_DRAW_AUTO_STRIDE 0x1318 +#define NVC397_SET_DRAW_AUTO_STRIDE_V 11:0 + +#define NVC397_SET_BLEND_CONST_RED 0x131c +#define NVC397_SET_BLEND_CONST_RED_V 31:0 + +#define NVC397_SET_BLEND_CONST_GREEN 0x1320 +#define NVC397_SET_BLEND_CONST_GREEN_V 31:0 + +#define NVC397_SET_BLEND_CONST_BLUE 0x1324 +#define NVC397_SET_BLEND_CONST_BLUE_V 31:0 + +#define NVC397_SET_BLEND_CONST_ALPHA 0x1328 +#define NVC397_SET_BLEND_CONST_ALPHA_V 31:0 + +#define NVC397_INVALIDATE_SAMPLER_CACHE 0x1330 +#define NVC397_INVALIDATE_SAMPLER_CACHE_LINES 0:0 +#define NVC397_INVALIDATE_SAMPLER_CACHE_LINES_ALL 0x00000000 +#define NVC397_INVALIDATE_SAMPLER_CACHE_LINES_ONE 0x00000001 +#define NVC397_INVALIDATE_SAMPLER_CACHE_TAG 25:4 + +#define NVC397_INVALIDATE_TEXTURE_HEADER_CACHE 0x1334 +#define NVC397_INVALIDATE_TEXTURE_HEADER_CACHE_LINES 0:0 +#define NVC397_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ALL 0x00000000 +#define NVC397_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ONE 0x00000001 +#define NVC397_INVALIDATE_TEXTURE_HEADER_CACHE_TAG 25:4 + +#define NVC397_INVALIDATE_TEXTURE_DATA_CACHE 0x1338 +#define NVC397_INVALIDATE_TEXTURE_DATA_CACHE_LINES 0:0 +#define NVC397_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ALL 0x00000000 +#define NVC397_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ONE 0x00000001 +#define NVC397_INVALIDATE_TEXTURE_DATA_CACHE_TAG 25:4 + +#define NVC397_SET_BLEND_SEPARATE_FOR_ALPHA 0x133c +#define NVC397_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NVC397_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NVC397_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_BLEND_COLOR_OP 0x1340 +#define NVC397_SET_BLEND_COLOR_OP_V 31:0 +#define NVC397_SET_BLEND_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC397_SET_BLEND_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC397_SET_BLEND_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC397_SET_BLEND_COLOR_OP_V_OGL_MIN 0x00008007 +#define NVC397_SET_BLEND_COLOR_OP_V_OGL_MAX 0x00008008 +#define NVC397_SET_BLEND_COLOR_OP_V_D3D_ADD 0x00000001 +#define NVC397_SET_BLEND_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC397_SET_BLEND_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC397_SET_BLEND_COLOR_OP_V_D3D_MIN 0x00000004 +#define NVC397_SET_BLEND_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF 0x1344 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V 31:0 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC397_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC397_SET_BLEND_COLOR_DEST_COEFF 0x1348 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V 31:0 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC397_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC397_SET_BLEND_ALPHA_OP 0x134c +#define NVC397_SET_BLEND_ALPHA_OP_V 31:0 +#define NVC397_SET_BLEND_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC397_SET_BLEND_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC397_SET_BLEND_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC397_SET_BLEND_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NVC397_SET_BLEND_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NVC397_SET_BLEND_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NVC397_SET_BLEND_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC397_SET_BLEND_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC397_SET_BLEND_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NVC397_SET_BLEND_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF 0x1350 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V 31:0 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC397_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC397_SET_GLOBAL_COLOR_KEY 0x1354 +#define NVC397_SET_GLOBAL_COLOR_KEY_ENABLE 0:0 +#define NVC397_SET_GLOBAL_COLOR_KEY_ENABLE_FALSE 0x00000000 +#define NVC397_SET_GLOBAL_COLOR_KEY_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF 0x1358 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V 31:0 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC397_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC397_SET_SINGLE_ROP_CONTROL 0x135c +#define NVC397_SET_SINGLE_ROP_CONTROL_ENABLE 0:0 +#define NVC397_SET_SINGLE_ROP_CONTROL_ENABLE_FALSE 0x00000000 +#define NVC397_SET_SINGLE_ROP_CONTROL_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_BLEND(i) (0x1360+(i)*4) +#define NVC397_SET_BLEND_ENABLE 0:0 +#define NVC397_SET_BLEND_ENABLE_FALSE 0x00000000 +#define NVC397_SET_BLEND_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_STENCIL_TEST 0x1380 +#define NVC397_SET_STENCIL_TEST_ENABLE 0:0 +#define NVC397_SET_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NVC397_SET_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_STENCIL_OP_FAIL 0x1384 +#define NVC397_SET_STENCIL_OP_FAIL_V 31:0 +#define NVC397_SET_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NVC397_SET_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NVC397_SET_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NVC397_SET_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC397_SET_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC397_SET_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NVC397_SET_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NVC397_SET_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NVC397_SET_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NVC397_SET_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NVC397_SET_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NVC397_SET_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NVC397_SET_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NVC397_SET_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NVC397_SET_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NVC397_SET_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NVC397_SET_STENCIL_OP_ZFAIL 0x1388 +#define NVC397_SET_STENCIL_OP_ZFAIL_V 31:0 +#define NVC397_SET_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NVC397_SET_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NVC397_SET_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NVC397_SET_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC397_SET_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC397_SET_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NVC397_SET_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NVC397_SET_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NVC397_SET_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NVC397_SET_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NVC397_SET_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NVC397_SET_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NVC397_SET_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NVC397_SET_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NVC397_SET_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NVC397_SET_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NVC397_SET_STENCIL_OP_ZPASS 0x138c +#define NVC397_SET_STENCIL_OP_ZPASS_V 31:0 +#define NVC397_SET_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NVC397_SET_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NVC397_SET_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NVC397_SET_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NVC397_SET_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NVC397_SET_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NVC397_SET_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NVC397_SET_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NVC397_SET_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NVC397_SET_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NVC397_SET_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NVC397_SET_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NVC397_SET_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NVC397_SET_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NVC397_SET_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NVC397_SET_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NVC397_SET_STENCIL_FUNC 0x1390 +#define NVC397_SET_STENCIL_FUNC_V 31:0 +#define NVC397_SET_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NVC397_SET_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NVC397_SET_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC397_SET_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC397_SET_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NVC397_SET_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC397_SET_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC397_SET_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC397_SET_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NVC397_SET_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NVC397_SET_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC397_SET_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC397_SET_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NVC397_SET_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC397_SET_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC397_SET_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC397_SET_STENCIL_FUNC_REF 0x1394 +#define NVC397_SET_STENCIL_FUNC_REF_V 7:0 + +#define NVC397_SET_STENCIL_FUNC_MASK 0x1398 +#define NVC397_SET_STENCIL_FUNC_MASK_V 7:0 + +#define NVC397_SET_STENCIL_MASK 0x139c +#define NVC397_SET_STENCIL_MASK_V 7:0 + +#define NVC397_SET_DRAW_AUTO_START 0x13a4 +#define NVC397_SET_DRAW_AUTO_START_BYTE_COUNT 31:0 + +#define NVC397_SET_PS_SATURATE 0x13a8 +#define NVC397_SET_PS_SATURATE_OUTPUT0 0:0 +#define NVC397_SET_PS_SATURATE_OUTPUT0_FALSE 0x00000000 +#define NVC397_SET_PS_SATURATE_OUTPUT0_TRUE 0x00000001 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE0 1:1 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE0_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE0_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC397_SET_PS_SATURATE_OUTPUT1 4:4 +#define NVC397_SET_PS_SATURATE_OUTPUT1_FALSE 0x00000000 +#define NVC397_SET_PS_SATURATE_OUTPUT1_TRUE 0x00000001 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE1 5:5 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE1_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE1_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC397_SET_PS_SATURATE_OUTPUT2 8:8 +#define NVC397_SET_PS_SATURATE_OUTPUT2_FALSE 0x00000000 +#define NVC397_SET_PS_SATURATE_OUTPUT2_TRUE 0x00000001 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE2 9:9 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE2_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE2_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC397_SET_PS_SATURATE_OUTPUT3 12:12 +#define NVC397_SET_PS_SATURATE_OUTPUT3_FALSE 0x00000000 +#define NVC397_SET_PS_SATURATE_OUTPUT3_TRUE 0x00000001 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE3 13:13 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE3_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE3_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC397_SET_PS_SATURATE_OUTPUT4 16:16 +#define NVC397_SET_PS_SATURATE_OUTPUT4_FALSE 0x00000000 +#define NVC397_SET_PS_SATURATE_OUTPUT4_TRUE 0x00000001 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE4 17:17 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE4_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE4_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC397_SET_PS_SATURATE_OUTPUT5 20:20 +#define NVC397_SET_PS_SATURATE_OUTPUT5_FALSE 0x00000000 +#define NVC397_SET_PS_SATURATE_OUTPUT5_TRUE 0x00000001 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE5 21:21 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE5_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE5_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC397_SET_PS_SATURATE_OUTPUT6 24:24 +#define NVC397_SET_PS_SATURATE_OUTPUT6_FALSE 0x00000000 +#define NVC397_SET_PS_SATURATE_OUTPUT6_TRUE 0x00000001 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE6 25:25 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE6_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE6_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC397_SET_PS_SATURATE_OUTPUT7 28:28 +#define NVC397_SET_PS_SATURATE_OUTPUT7_FALSE 0x00000000 +#define NVC397_SET_PS_SATURATE_OUTPUT7_TRUE 0x00000001 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE7 29:29 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE7_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC397_SET_PS_SATURATE_CLAMP_RANGE7_MINUS_ONE_TO_PLUS_ONE 0x00000001 + +#define NVC397_SET_WINDOW_ORIGIN 0x13ac +#define NVC397_SET_WINDOW_ORIGIN_MODE 0:0 +#define NVC397_SET_WINDOW_ORIGIN_MODE_UPPER_LEFT 0x00000000 +#define NVC397_SET_WINDOW_ORIGIN_MODE_LOWER_LEFT 0x00000001 +#define NVC397_SET_WINDOW_ORIGIN_FLIP_Y 4:4 +#define NVC397_SET_WINDOW_ORIGIN_FLIP_Y_FALSE 0x00000000 +#define NVC397_SET_WINDOW_ORIGIN_FLIP_Y_TRUE 0x00000001 + +#define NVC397_SET_LINE_WIDTH_FLOAT 0x13b0 +#define NVC397_SET_LINE_WIDTH_FLOAT_V 31:0 + +#define NVC397_SET_ALIASED_LINE_WIDTH_FLOAT 0x13b4 +#define NVC397_SET_ALIASED_LINE_WIDTH_FLOAT_V 31:0 + +#define NVC397_SET_LINE_MULTISAMPLE_OVERRIDE 0x1418 +#define NVC397_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE 0:0 +#define NVC397_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_FALSE 0x00000000 +#define NVC397_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_TRUE 0x00000001 + +#define NVC397_INVALIDATE_SAMPLER_CACHE_NO_WFI 0x1424 +#define NVC397_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES 0:0 +#define NVC397_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC397_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC397_INVALIDATE_SAMPLER_CACHE_NO_WFI_TAG 25:4 + +#define NVC397_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI 0x1428 +#define NVC397_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES 0:0 +#define NVC397_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC397_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC397_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_TAG 25:4 + +#define NVC397_SET_GLOBAL_BASE_VERTEX_INDEX 0x1434 +#define NVC397_SET_GLOBAL_BASE_VERTEX_INDEX_V 31:0 + +#define NVC397_SET_GLOBAL_BASE_INSTANCE_INDEX 0x1438 +#define NVC397_SET_GLOBAL_BASE_INSTANCE_INDEX_V 31:0 + +#define NVC397_SET_PS_WARP_WATERMARKS 0x1450 +#define NVC397_SET_PS_WARP_WATERMARKS_LOW 15:0 +#define NVC397_SET_PS_WARP_WATERMARKS_HIGH 31:16 + +#define NVC397_SET_PS_REGISTER_WATERMARKS 0x1454 +#define NVC397_SET_PS_REGISTER_WATERMARKS_LOW 15:0 +#define NVC397_SET_PS_REGISTER_WATERMARKS_HIGH 31:16 + +#define NVC397_STORE_ZCULL 0x1464 +#define NVC397_STORE_ZCULL_V 0:0 + +#define NVC397_SET_ITERATED_BLEND_CONSTANT_RED(j) (0x1480+(j)*16) +#define NVC397_SET_ITERATED_BLEND_CONSTANT_RED_V 15:0 + +#define NVC397_SET_ITERATED_BLEND_CONSTANT_GREEN(j) (0x1484+(j)*16) +#define NVC397_SET_ITERATED_BLEND_CONSTANT_GREEN_V 15:0 + +#define NVC397_SET_ITERATED_BLEND_CONSTANT_BLUE(j) (0x1488+(j)*16) +#define NVC397_SET_ITERATED_BLEND_CONSTANT_BLUE_V 15:0 + +#define NVC397_LOAD_ZCULL 0x1500 +#define NVC397_LOAD_ZCULL_V 0:0 + +#define NVC397_SET_SURFACE_CLIP_ID_HEIGHT 0x1504 +#define NVC397_SET_SURFACE_CLIP_ID_HEIGHT_V 31:0 + +#define NVC397_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL 0x1508 +#define NVC397_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NVC397_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NVC397_SET_CLIP_ID_CLEAR_RECT_VERTICAL 0x150c +#define NVC397_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NVC397_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NVC397_SET_USER_CLIP_ENABLE 0x1510 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE0 0:0 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE0_FALSE 0x00000000 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE0_TRUE 0x00000001 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE1 1:1 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE1_FALSE 0x00000000 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE1_TRUE 0x00000001 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE2 2:2 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE2_FALSE 0x00000000 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE2_TRUE 0x00000001 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE3 3:3 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE3_FALSE 0x00000000 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE3_TRUE 0x00000001 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE4 4:4 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE4_FALSE 0x00000000 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE4_TRUE 0x00000001 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE5 5:5 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE5_FALSE 0x00000000 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE5_TRUE 0x00000001 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE6 6:6 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE6_FALSE 0x00000000 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE6_TRUE 0x00000001 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE7 7:7 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE7_FALSE 0x00000000 +#define NVC397_SET_USER_CLIP_ENABLE_PLANE7_TRUE 0x00000001 + +#define NVC397_SET_ZPASS_PIXEL_COUNT 0x1514 +#define NVC397_SET_ZPASS_PIXEL_COUNT_ENABLE 0:0 +#define NVC397_SET_ZPASS_PIXEL_COUNT_ENABLE_FALSE 0x00000000 +#define NVC397_SET_ZPASS_PIXEL_COUNT_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_POINT_SIZE 0x1518 +#define NVC397_SET_POINT_SIZE_V 31:0 + +#define NVC397_SET_ZCULL_STATS 0x151c +#define NVC397_SET_ZCULL_STATS_ENABLE 0:0 +#define NVC397_SET_ZCULL_STATS_ENABLE_FALSE 0x00000000 +#define NVC397_SET_ZCULL_STATS_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_POINT_SPRITE 0x1520 +#define NVC397_SET_POINT_SPRITE_ENABLE 0:0 +#define NVC397_SET_POINT_SPRITE_ENABLE_FALSE 0x00000000 +#define NVC397_SET_POINT_SPRITE_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_SHADER_EXCEPTIONS 0x1528 +#define NVC397_SET_SHADER_EXCEPTIONS_ENABLE 0:0 +#define NVC397_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0x00000000 +#define NVC397_SET_SHADER_EXCEPTIONS_ENABLE_TRUE 0x00000001 + +#define NVC397_CLEAR_REPORT_VALUE 0x1530 +#define NVC397_CLEAR_REPORT_VALUE_TYPE 4:0 +#define NVC397_CLEAR_REPORT_VALUE_TYPE_DA_VERTICES_GENERATED 0x00000012 +#define NVC397_CLEAR_REPORT_VALUE_TYPE_DA_PRIMITIVES_GENERATED 0x00000013 +#define NVC397_CLEAR_REPORT_VALUE_TYPE_VS_INVOCATIONS 0x00000015 +#define NVC397_CLEAR_REPORT_VALUE_TYPE_TI_INVOCATIONS 0x00000016 +#define NVC397_CLEAR_REPORT_VALUE_TYPE_TS_INVOCATIONS 0x00000017 +#define NVC397_CLEAR_REPORT_VALUE_TYPE_TS_PRIMITIVES_GENERATED 0x00000018 +#define NVC397_CLEAR_REPORT_VALUE_TYPE_GS_INVOCATIONS 0x0000001A +#define NVC397_CLEAR_REPORT_VALUE_TYPE_GS_PRIMITIVES_GENERATED 0x0000001B +#define NVC397_CLEAR_REPORT_VALUE_TYPE_VTG_PRIMITIVES_OUT 0x0000001F +#define NVC397_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_SUCCEEDED 0x00000010 +#define NVC397_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_NEEDED 0x00000011 +#define NVC397_CLEAR_REPORT_VALUE_TYPE_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000003 +#define NVC397_CLEAR_REPORT_VALUE_TYPE_CLIPPER_INVOCATIONS 0x0000001C +#define NVC397_CLEAR_REPORT_VALUE_TYPE_CLIPPER_PRIMITIVES_GENERATED 0x0000001D +#define NVC397_CLEAR_REPORT_VALUE_TYPE_ZCULL_STATS 0x00000002 +#define NVC397_CLEAR_REPORT_VALUE_TYPE_PS_INVOCATIONS 0x0000001E +#define NVC397_CLEAR_REPORT_VALUE_TYPE_ZPASS_PIXEL_CNT 0x00000001 +#define NVC397_CLEAR_REPORT_VALUE_TYPE_ALPHA_BETA_CLOCKS 0x00000004 +#define NVC397_CLEAR_REPORT_VALUE_TYPE_SCG_CLOCKS 0x00000009 + +#define NVC397_SET_ANTI_ALIAS_ENABLE 0x1534 +#define NVC397_SET_ANTI_ALIAS_ENABLE_V 0:0 +#define NVC397_SET_ANTI_ALIAS_ENABLE_V_FALSE 0x00000000 +#define NVC397_SET_ANTI_ALIAS_ENABLE_V_TRUE 0x00000001 + +#define NVC397_SET_ZT_SELECT 0x1538 +#define NVC397_SET_ZT_SELECT_TARGET_COUNT 0:0 + +#define NVC397_SET_ANTI_ALIAS_ALPHA_CONTROL 0x153c +#define NVC397_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE 0:0 +#define NVC397_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_DISABLE 0x00000000 +#define NVC397_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_ENABLE 0x00000001 +#define NVC397_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE 4:4 +#define NVC397_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_DISABLE 0x00000000 +#define NVC397_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_ENABLE 0x00000001 + +#define NVC397_SET_RENDER_ENABLE_A 0x1550 +#define NVC397_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC397_SET_RENDER_ENABLE_B 0x1554 +#define NVC397_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC397_SET_RENDER_ENABLE_C 0x1558 +#define NVC397_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC397_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC397_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC397_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC397_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC397_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC397_SET_TEX_SAMPLER_POOL_A 0x155c +#define NVC397_SET_TEX_SAMPLER_POOL_A_OFFSET_UPPER 7:0 + +#define NVC397_SET_TEX_SAMPLER_POOL_B 0x1560 +#define NVC397_SET_TEX_SAMPLER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC397_SET_TEX_SAMPLER_POOL_C 0x1564 +#define NVC397_SET_TEX_SAMPLER_POOL_C_MAXIMUM_INDEX 19:0 + +#define NVC397_SET_SLOPE_SCALE_DEPTH_BIAS 0x156c +#define NVC397_SET_SLOPE_SCALE_DEPTH_BIAS_V 31:0 + +#define NVC397_SET_ANTI_ALIASED_LINE 0x1570 +#define NVC397_SET_ANTI_ALIASED_LINE_ENABLE 0:0 +#define NVC397_SET_ANTI_ALIASED_LINE_ENABLE_FALSE 0x00000000 +#define NVC397_SET_ANTI_ALIASED_LINE_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_TEX_HEADER_POOL_A 0x1574 +#define NVC397_SET_TEX_HEADER_POOL_A_OFFSET_UPPER 7:0 + +#define NVC397_SET_TEX_HEADER_POOL_B 0x1578 +#define NVC397_SET_TEX_HEADER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC397_SET_TEX_HEADER_POOL_C 0x157c +#define NVC397_SET_TEX_HEADER_POOL_C_MAXIMUM_INDEX 21:0 + +#define NVC397_SET_ACTIVE_ZCULL_REGION 0x1590 +#define NVC397_SET_ACTIVE_ZCULL_REGION_ID 5:0 + +#define NVC397_SET_TWO_SIDED_STENCIL_TEST 0x1594 +#define NVC397_SET_TWO_SIDED_STENCIL_TEST_ENABLE 0:0 +#define NVC397_SET_TWO_SIDED_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NVC397_SET_TWO_SIDED_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_BACK_STENCIL_OP_FAIL 0x1598 +#define NVC397_SET_BACK_STENCIL_OP_FAIL_V 31:0 +#define NVC397_SET_BACK_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NVC397_SET_BACK_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NVC397_SET_BACK_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NVC397_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC397_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC397_SET_BACK_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NVC397_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NVC397_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NVC397_SET_BACK_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NVC397_SET_BACK_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NVC397_SET_BACK_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NVC397_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NVC397_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NVC397_SET_BACK_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NVC397_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NVC397_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NVC397_SET_BACK_STENCIL_OP_ZFAIL 0x159c +#define NVC397_SET_BACK_STENCIL_OP_ZFAIL_V 31:0 +#define NVC397_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NVC397_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NVC397_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NVC397_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC397_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC397_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NVC397_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NVC397_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NVC397_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NVC397_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NVC397_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NVC397_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NVC397_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NVC397_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NVC397_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NVC397_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NVC397_SET_BACK_STENCIL_OP_ZPASS 0x15a0 +#define NVC397_SET_BACK_STENCIL_OP_ZPASS_V 31:0 +#define NVC397_SET_BACK_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NVC397_SET_BACK_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NVC397_SET_BACK_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NVC397_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NVC397_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NVC397_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NVC397_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NVC397_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NVC397_SET_BACK_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NVC397_SET_BACK_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NVC397_SET_BACK_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NVC397_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NVC397_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NVC397_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NVC397_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NVC397_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NVC397_SET_BACK_STENCIL_FUNC 0x15a4 +#define NVC397_SET_BACK_STENCIL_FUNC_V 31:0 +#define NVC397_SET_BACK_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NVC397_SET_BACK_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NVC397_SET_BACK_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC397_SET_BACK_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC397_SET_BACK_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NVC397_SET_BACK_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC397_SET_BACK_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC397_SET_BACK_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC397_SET_BACK_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NVC397_SET_BACK_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NVC397_SET_BACK_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC397_SET_BACK_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC397_SET_BACK_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NVC397_SET_BACK_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC397_SET_BACK_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC397_SET_BACK_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC397_SET_SRGB_WRITE 0x15b8 +#define NVC397_SET_SRGB_WRITE_ENABLE 0:0 +#define NVC397_SET_SRGB_WRITE_ENABLE_FALSE 0x00000000 +#define NVC397_SET_SRGB_WRITE_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_DEPTH_BIAS 0x15bc +#define NVC397_SET_DEPTH_BIAS_V 31:0 + +#define NVC397_SET_ZCULL_REGION_FORMAT 0x15c8 +#define NVC397_SET_ZCULL_REGION_FORMAT_TYPE 3:0 +#define NVC397_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X4 0x00000000 +#define NVC397_SET_ZCULL_REGION_FORMAT_TYPE_ZS_4X4 0x00000001 +#define NVC397_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X2 0x00000002 +#define NVC397_SET_ZCULL_REGION_FORMAT_TYPE_Z_2X4 0x00000003 +#define NVC397_SET_ZCULL_REGION_FORMAT_TYPE_Z_16X8_4X4 0x00000004 +#define NVC397_SET_ZCULL_REGION_FORMAT_TYPE_Z_8X8_4X2 0x00000005 +#define NVC397_SET_ZCULL_REGION_FORMAT_TYPE_Z_8X8_2X4 0x00000006 +#define NVC397_SET_ZCULL_REGION_FORMAT_TYPE_Z_16X16_4X8 0x00000007 +#define NVC397_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X8_2X2 0x00000008 +#define NVC397_SET_ZCULL_REGION_FORMAT_TYPE_ZS_16X8_4X2 0x00000009 +#define NVC397_SET_ZCULL_REGION_FORMAT_TYPE_ZS_16X8_2X4 0x0000000A +#define NVC397_SET_ZCULL_REGION_FORMAT_TYPE_ZS_8X8_2X2 0x0000000B +#define NVC397_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X8_1X1 0x0000000C + +#define NVC397_SET_RT_LAYER 0x15cc +#define NVC397_SET_RT_LAYER_V 15:0 +#define NVC397_SET_RT_LAYER_CONTROL 16:16 +#define NVC397_SET_RT_LAYER_CONTROL_V_SELECTS_LAYER 0x00000000 +#define NVC397_SET_RT_LAYER_CONTROL_GEOMETRY_SHADER_SELECTS_LAYER 0x00000001 + +#define NVC397_SET_ANTI_ALIAS 0x15d0 +#define NVC397_SET_ANTI_ALIAS_SAMPLES 3:0 +#define NVC397_SET_ANTI_ALIAS_SAMPLES_MODE_1X1 0x00000000 +#define NVC397_SET_ANTI_ALIAS_SAMPLES_MODE_2X1 0x00000001 +#define NVC397_SET_ANTI_ALIAS_SAMPLES_MODE_2X2 0x00000002 +#define NVC397_SET_ANTI_ALIAS_SAMPLES_MODE_4X2 0x00000003 +#define NVC397_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_D3D 0x00000004 +#define NVC397_SET_ANTI_ALIAS_SAMPLES_MODE_2X1_D3D 0x00000005 +#define NVC397_SET_ANTI_ALIAS_SAMPLES_MODE_4X4 0x00000006 +#define NVC397_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_4 0x00000008 +#define NVC397_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_12 0x00000009 +#define NVC397_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_8 0x0000000A +#define NVC397_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_24 0x0000000B + +#define NVC397_SET_EDGE_FLAG 0x15e4 +#define NVC397_SET_EDGE_FLAG_V 0:0 +#define NVC397_SET_EDGE_FLAG_V_FALSE 0x00000000 +#define NVC397_SET_EDGE_FLAG_V_TRUE 0x00000001 + +#define NVC397_DRAW_INLINE_INDEX 0x15e8 +#define NVC397_DRAW_INLINE_INDEX_V 31:0 + +#define NVC397_SET_INLINE_INDEX2X16_ALIGN 0x15ec +#define NVC397_SET_INLINE_INDEX2X16_ALIGN_COUNT 30:0 +#define NVC397_SET_INLINE_INDEX2X16_ALIGN_START_ODD 31:31 +#define NVC397_SET_INLINE_INDEX2X16_ALIGN_START_ODD_FALSE 0x00000000 +#define NVC397_SET_INLINE_INDEX2X16_ALIGN_START_ODD_TRUE 0x00000001 + +#define NVC397_DRAW_INLINE_INDEX2X16 0x15f0 +#define NVC397_DRAW_INLINE_INDEX2X16_EVEN 15:0 +#define NVC397_DRAW_INLINE_INDEX2X16_ODD 31:16 + +#define NVC397_SET_VERTEX_GLOBAL_BASE_OFFSET_A 0x15f4 +#define NVC397_SET_VERTEX_GLOBAL_BASE_OFFSET_A_UPPER 7:0 + +#define NVC397_SET_VERTEX_GLOBAL_BASE_OFFSET_B 0x15f8 +#define NVC397_SET_VERTEX_GLOBAL_BASE_OFFSET_B_LOWER 31:0 + +#define NVC397_SET_ZCULL_REGION_PIXEL_OFFSET_A 0x15fc +#define NVC397_SET_ZCULL_REGION_PIXEL_OFFSET_A_WIDTH 15:0 + +#define NVC397_SET_ZCULL_REGION_PIXEL_OFFSET_B 0x1600 +#define NVC397_SET_ZCULL_REGION_PIXEL_OFFSET_B_HEIGHT 15:0 + +#define NVC397_SET_POINT_SPRITE_SELECT 0x1604 +#define NVC397_SET_POINT_SPRITE_SELECT_RMODE 1:0 +#define NVC397_SET_POINT_SPRITE_SELECT_RMODE_ZERO 0x00000000 +#define NVC397_SET_POINT_SPRITE_SELECT_RMODE_FROM_R 0x00000001 +#define NVC397_SET_POINT_SPRITE_SELECT_RMODE_FROM_S 0x00000002 +#define NVC397_SET_POINT_SPRITE_SELECT_ORIGIN 2:2 +#define NVC397_SET_POINT_SPRITE_SELECT_ORIGIN_BOTTOM 0x00000000 +#define NVC397_SET_POINT_SPRITE_SELECT_ORIGIN_TOP 0x00000001 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE0 3:3 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE0_PASSTHROUGH 0x00000000 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE0_GENERATE 0x00000001 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE1 4:4 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE1_PASSTHROUGH 0x00000000 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE1_GENERATE 0x00000001 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE2 5:5 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE2_PASSTHROUGH 0x00000000 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE2_GENERATE 0x00000001 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE3 6:6 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE3_PASSTHROUGH 0x00000000 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE3_GENERATE 0x00000001 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE4 7:7 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE4_PASSTHROUGH 0x00000000 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE4_GENERATE 0x00000001 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE5 8:8 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE5_PASSTHROUGH 0x00000000 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE5_GENERATE 0x00000001 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE6 9:9 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE6_PASSTHROUGH 0x00000000 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE6_GENERATE 0x00000001 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE7 10:10 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE7_PASSTHROUGH 0x00000000 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE7_GENERATE 0x00000001 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE8 11:11 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE8_PASSTHROUGH 0x00000000 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE8_GENERATE 0x00000001 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE9 12:12 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE9_PASSTHROUGH 0x00000000 +#define NVC397_SET_POINT_SPRITE_SELECT_TEXTURE9_GENERATE 0x00000001 + +#define NVC397_SET_ATTRIBUTE_DEFAULT 0x1610 +#define NVC397_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE 0:0 +#define NVC397_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_0001 0x00000000 +#define NVC397_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_1111 0x00000001 +#define NVC397_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR 1:1 +#define NVC397_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0000 0x00000000 +#define NVC397_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0001 0x00000001 +#define NVC397_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR 2:2 +#define NVC397_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0000 0x00000000 +#define NVC397_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0001 0x00000001 +#define NVC397_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE 3:3 +#define NVC397_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0000 0x00000000 +#define NVC397_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0001 0x00000001 +#define NVC397_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0 4:4 +#define NVC397_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_0001 0x00000000 +#define NVC397_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_1111 0x00000001 +#define NVC397_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15 5:5 +#define NVC397_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0000 0x00000000 +#define NVC397_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0001 0x00000001 + +#define NVC397_END 0x1614 +#define NVC397_END_V 0:0 + +#define NVC397_BEGIN 0x1618 +#define NVC397_BEGIN_OP 15:0 +#define NVC397_BEGIN_OP_POINTS 0x00000000 +#define NVC397_BEGIN_OP_LINES 0x00000001 +#define NVC397_BEGIN_OP_LINE_LOOP 0x00000002 +#define NVC397_BEGIN_OP_LINE_STRIP 0x00000003 +#define NVC397_BEGIN_OP_TRIANGLES 0x00000004 +#define NVC397_BEGIN_OP_TRIANGLE_STRIP 0x00000005 +#define NVC397_BEGIN_OP_TRIANGLE_FAN 0x00000006 +#define NVC397_BEGIN_OP_QUADS 0x00000007 +#define NVC397_BEGIN_OP_QUAD_STRIP 0x00000008 +#define NVC397_BEGIN_OP_POLYGON 0x00000009 +#define NVC397_BEGIN_OP_LINELIST_ADJCY 0x0000000A +#define NVC397_BEGIN_OP_LINESTRIP_ADJCY 0x0000000B +#define NVC397_BEGIN_OP_TRIANGLELIST_ADJCY 0x0000000C +#define NVC397_BEGIN_OP_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC397_BEGIN_OP_PATCH 0x0000000E +#define NVC397_BEGIN_PRIMITIVE_ID 24:24 +#define NVC397_BEGIN_PRIMITIVE_ID_FIRST 0x00000000 +#define NVC397_BEGIN_PRIMITIVE_ID_UNCHANGED 0x00000001 +#define NVC397_BEGIN_INSTANCE_ID 27:26 +#define NVC397_BEGIN_INSTANCE_ID_FIRST 0x00000000 +#define NVC397_BEGIN_INSTANCE_ID_SUBSEQUENT 0x00000001 +#define NVC397_BEGIN_INSTANCE_ID_UNCHANGED 0x00000002 +#define NVC397_BEGIN_SPLIT_MODE 30:29 +#define NVC397_BEGIN_SPLIT_MODE_NORMAL_BEGIN_NORMAL_END 0x00000000 +#define NVC397_BEGIN_SPLIT_MODE_NORMAL_BEGIN_OPEN_END 0x00000001 +#define NVC397_BEGIN_SPLIT_MODE_OPEN_BEGIN_OPEN_END 0x00000002 +#define NVC397_BEGIN_SPLIT_MODE_OPEN_BEGIN_NORMAL_END 0x00000003 +#define NVC397_BEGIN_INSTANCE_ITERATE_ENABLE 31:31 +#define NVC397_BEGIN_INSTANCE_ITERATE_ENABLE_FALSE 0x00000000 +#define NVC397_BEGIN_INSTANCE_ITERATE_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_VERTEX_ID_COPY 0x161c +#define NVC397_SET_VERTEX_ID_COPY_ENABLE 0:0 +#define NVC397_SET_VERTEX_ID_COPY_ENABLE_FALSE 0x00000000 +#define NVC397_SET_VERTEX_ID_COPY_ENABLE_TRUE 0x00000001 +#define NVC397_SET_VERTEX_ID_COPY_ATTRIBUTE_SLOT 11:4 + +#define NVC397_ADD_TO_PRIMITIVE_ID 0x1620 +#define NVC397_ADD_TO_PRIMITIVE_ID_V 31:0 + +#define NVC397_LOAD_PRIMITIVE_ID 0x1624 +#define NVC397_LOAD_PRIMITIVE_ID_V 31:0 + +#define NVC397_SET_SHADER_BASED_CULL 0x162c +#define NVC397_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE 1:1 +#define NVC397_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_FALSE 0x00000000 +#define NVC397_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_TRUE 0x00000001 +#define NVC397_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE 0:0 +#define NVC397_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_FALSE 0x00000000 +#define NVC397_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_CLASS_VERSION 0x1638 +#define NVC397_SET_CLASS_VERSION_CURRENT 15:0 +#define NVC397_SET_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC397_SET_DA_PRIMITIVE_RESTART 0x1644 +#define NVC397_SET_DA_PRIMITIVE_RESTART_ENABLE 0:0 +#define NVC397_SET_DA_PRIMITIVE_RESTART_ENABLE_FALSE 0x00000000 +#define NVC397_SET_DA_PRIMITIVE_RESTART_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_DA_PRIMITIVE_RESTART_INDEX 0x1648 +#define NVC397_SET_DA_PRIMITIVE_RESTART_INDEX_V 31:0 + +#define NVC397_SET_DA_OUTPUT 0x164c +#define NVC397_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START 12:12 +#define NVC397_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_FALSE 0x00000000 +#define NVC397_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_TRUE 0x00000001 + +#define NVC397_SET_ANTI_ALIASED_POINT 0x1658 +#define NVC397_SET_ANTI_ALIASED_POINT_ENABLE 0:0 +#define NVC397_SET_ANTI_ALIASED_POINT_ENABLE_FALSE 0x00000000 +#define NVC397_SET_ANTI_ALIASED_POINT_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_POINT_CENTER_MODE 0x165c +#define NVC397_SET_POINT_CENTER_MODE_V 31:0 +#define NVC397_SET_POINT_CENTER_MODE_V_OGL 0x00000000 +#define NVC397_SET_POINT_CENTER_MODE_V_D3D 0x00000001 + +#define NVC397_SET_LINE_SMOOTH_PARAMETERS 0x1668 +#define NVC397_SET_LINE_SMOOTH_PARAMETERS_FALLOFF 31:0 +#define NVC397_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_00 0x00000000 +#define NVC397_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_33 0x00000001 +#define NVC397_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_60 0x00000002 + +#define NVC397_SET_LINE_STIPPLE 0x166c +#define NVC397_SET_LINE_STIPPLE_ENABLE 0:0 +#define NVC397_SET_LINE_STIPPLE_ENABLE_FALSE 0x00000000 +#define NVC397_SET_LINE_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_LINE_SMOOTH_EDGE_TABLE(i) (0x1670+(i)*4) +#define NVC397_SET_LINE_SMOOTH_EDGE_TABLE_V0 7:0 +#define NVC397_SET_LINE_SMOOTH_EDGE_TABLE_V1 15:8 +#define NVC397_SET_LINE_SMOOTH_EDGE_TABLE_V2 23:16 +#define NVC397_SET_LINE_SMOOTH_EDGE_TABLE_V3 31:24 + +#define NVC397_SET_LINE_STIPPLE_PARAMETERS 0x1680 +#define NVC397_SET_LINE_STIPPLE_PARAMETERS_FACTOR 7:0 +#define NVC397_SET_LINE_STIPPLE_PARAMETERS_PATTERN 23:8 + +#define NVC397_SET_PROVOKING_VERTEX 0x1684 +#define NVC397_SET_PROVOKING_VERTEX_V 0:0 +#define NVC397_SET_PROVOKING_VERTEX_V_FIRST 0x00000000 +#define NVC397_SET_PROVOKING_VERTEX_V_LAST 0x00000001 + +#define NVC397_SET_TWO_SIDED_LIGHT 0x1688 +#define NVC397_SET_TWO_SIDED_LIGHT_ENABLE 0:0 +#define NVC397_SET_TWO_SIDED_LIGHT_ENABLE_FALSE 0x00000000 +#define NVC397_SET_TWO_SIDED_LIGHT_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_POLYGON_STIPPLE 0x168c +#define NVC397_SET_POLYGON_STIPPLE_ENABLE 0:0 +#define NVC397_SET_POLYGON_STIPPLE_ENABLE_FALSE 0x00000000 +#define NVC397_SET_POLYGON_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_SHADER_CONTROL 0x1690 +#define NVC397_SET_SHADER_CONTROL_DEFAULT_PARTIAL 0:0 +#define NVC397_SET_SHADER_CONTROL_DEFAULT_PARTIAL_ZERO 0x00000000 +#define NVC397_SET_SHADER_CONTROL_DEFAULT_PARTIAL_INFINITY 0x00000001 +#define NVC397_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR 1:1 +#define NVC397_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR_LEGACY 0x00000000 +#define NVC397_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR_FP64_COMPATIBLE 0x00000001 +#define NVC397_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR 2:2 +#define NVC397_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR_PASS_ZERO 0x00000000 +#define NVC397_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR_PASS_INDEFINITE 0x00000001 + +#define NVC397_CHECK_CLASS_VERSION 0x16a0 +#define NVC397_CHECK_CLASS_VERSION_CURRENT 15:0 +#define NVC397_CHECK_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC397_SET_SPH_VERSION 0x16a4 +#define NVC397_SET_SPH_VERSION_CURRENT 15:0 +#define NVC397_SET_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC397_CHECK_SPH_VERSION 0x16a8 +#define NVC397_CHECK_SPH_VERSION_CURRENT 15:0 +#define NVC397_CHECK_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC397_SET_ALPHA_TO_COVERAGE_OVERRIDE 0x16b4 +#define NVC397_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE 0:0 +#define NVC397_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NVC397_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 +#define NVC397_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT 1:1 +#define NVC397_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_DISABLE 0x00000000 +#define NVC397_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_ENABLE 0x00000001 + +#define NVC397_SET_POLYGON_STIPPLE_PATTERN(i) (0x1700+(i)*4) +#define NVC397_SET_POLYGON_STIPPLE_PATTERN_V 31:0 + +#define NVC397_SET_AAM_VERSION 0x1790 +#define NVC397_SET_AAM_VERSION_CURRENT 15:0 +#define NVC397_SET_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC397_CHECK_AAM_VERSION 0x1794 +#define NVC397_CHECK_AAM_VERSION_CURRENT 15:0 +#define NVC397_CHECK_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC397_SET_ZT_LAYER 0x179c +#define NVC397_SET_ZT_LAYER_OFFSET 15:0 + +#define NVC397_SET_INDEX_BUFFER_A 0x17c8 +#define NVC397_SET_INDEX_BUFFER_A_ADDRESS_UPPER 7:0 + +#define NVC397_SET_INDEX_BUFFER_B 0x17cc +#define NVC397_SET_INDEX_BUFFER_B_ADDRESS_LOWER 31:0 + +#define NVC397_SET_INDEX_BUFFER_C 0x17d0 +#define NVC397_SET_INDEX_BUFFER_C_LIMIT_ADDRESS_UPPER 7:0 + +#define NVC397_SET_INDEX_BUFFER_D 0x17d4 +#define NVC397_SET_INDEX_BUFFER_D_LIMIT_ADDRESS_LOWER 31:0 + +#define NVC397_SET_INDEX_BUFFER_E 0x17d8 +#define NVC397_SET_INDEX_BUFFER_E_INDEX_SIZE 1:0 +#define NVC397_SET_INDEX_BUFFER_E_INDEX_SIZE_ONE_BYTE 0x00000000 +#define NVC397_SET_INDEX_BUFFER_E_INDEX_SIZE_TWO_BYTES 0x00000001 +#define NVC397_SET_INDEX_BUFFER_E_INDEX_SIZE_FOUR_BYTES 0x00000002 + +#define NVC397_SET_INDEX_BUFFER_F 0x17dc +#define NVC397_SET_INDEX_BUFFER_F_FIRST 31:0 + +#define NVC397_DRAW_INDEX_BUFFER 0x17e0 +#define NVC397_DRAW_INDEX_BUFFER_COUNT 31:0 + +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST 0x17e4 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST 0x17e8 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST 0x17ec +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f0 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC397_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f4 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC397_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f8 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC397_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC397_SET_DEPTH_BIAS_CLAMP 0x187c +#define NVC397_SET_DEPTH_BIAS_CLAMP_V 31:0 + +#define NVC397_SET_VERTEX_STREAM_INSTANCE_A(i) (0x1880+(i)*4) +#define NVC397_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED 0:0 +#define NVC397_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_FALSE 0x00000000 +#define NVC397_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_TRUE 0x00000001 + +#define NVC397_SET_VERTEX_STREAM_INSTANCE_B(i) (0x18c0+(i)*4) +#define NVC397_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED 0:0 +#define NVC397_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_FALSE 0x00000000 +#define NVC397_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_TRUE 0x00000001 + +#define NVC397_SET_ATTRIBUTE_POINT_SIZE 0x1910 +#define NVC397_SET_ATTRIBUTE_POINT_SIZE_ENABLE 0:0 +#define NVC397_SET_ATTRIBUTE_POINT_SIZE_ENABLE_FALSE 0x00000000 +#define NVC397_SET_ATTRIBUTE_POINT_SIZE_ENABLE_TRUE 0x00000001 +#define NVC397_SET_ATTRIBUTE_POINT_SIZE_SLOT 11:4 + +#define NVC397_OGL_SET_CULL 0x1918 +#define NVC397_OGL_SET_CULL_ENABLE 0:0 +#define NVC397_OGL_SET_CULL_ENABLE_FALSE 0x00000000 +#define NVC397_OGL_SET_CULL_ENABLE_TRUE 0x00000001 + +#define NVC397_OGL_SET_FRONT_FACE 0x191c +#define NVC397_OGL_SET_FRONT_FACE_V 31:0 +#define NVC397_OGL_SET_FRONT_FACE_V_CW 0x00000900 +#define NVC397_OGL_SET_FRONT_FACE_V_CCW 0x00000901 + +#define NVC397_OGL_SET_CULL_FACE 0x1920 +#define NVC397_OGL_SET_CULL_FACE_V 31:0 +#define NVC397_OGL_SET_CULL_FACE_V_FRONT 0x00000404 +#define NVC397_OGL_SET_CULL_FACE_V_BACK 0x00000405 +#define NVC397_OGL_SET_CULL_FACE_V_FRONT_AND_BACK 0x00000408 + +#define NVC397_SET_VIEWPORT_PIXEL 0x1924 +#define NVC397_SET_VIEWPORT_PIXEL_CENTER 0:0 +#define NVC397_SET_VIEWPORT_PIXEL_CENTER_AT_HALF_INTEGERS 0x00000000 +#define NVC397_SET_VIEWPORT_PIXEL_CENTER_AT_INTEGERS 0x00000001 + +#define NVC397_SET_VIEWPORT_SCALE_OFFSET 0x192c +#define NVC397_SET_VIEWPORT_SCALE_OFFSET_ENABLE 0:0 +#define NVC397_SET_VIEWPORT_SCALE_OFFSET_ENABLE_FALSE 0x00000000 +#define NVC397_SET_VIEWPORT_SCALE_OFFSET_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_VIEWPORT_CLIP_CONTROL 0x193c +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE 0:0 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_FALSE 0x00000000 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_TRUE 0x00000001 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE 17:16 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_USE_FIELD_MIN_Z_ZERO_MAX_Z_ONE 0x00000000 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_MIN_Z_MAX_Z 0x00000001 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_ZERO_ONE 0x00000002 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_MINUS_INF_PLUS_INF 0x00000003 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z 3:3 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLIP 0x00000000 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLAMP 0x00000001 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z 4:4 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLIP 0x00000000 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLAMP 0x00000001 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND 7:7 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_256 0x00000000 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_1 0x00000001 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND 10:10 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_256 0x00000000 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_1 0x00000001 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP 13:11 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP 0x00000000 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_PASSTHRU 0x00000001 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XY_CLIP 0x00000002 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XYZ_CLIP 0x00000003 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP_NO_Z_CULL 0x00000004 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_Z_CLIP 0x00000005 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_TRI_FILL_OR_CLIP 0x00000006 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z 2:1 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SAME_AS_XY_GUARDBAND 0x00000000 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_256 0x00000001 +#define NVC397_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_1 0x00000002 + +#define NVC397_SET_USER_CLIP_OP 0x1940 +#define NVC397_SET_USER_CLIP_OP_PLANE0 0:0 +#define NVC397_SET_USER_CLIP_OP_PLANE0_CLIP 0x00000000 +#define NVC397_SET_USER_CLIP_OP_PLANE0_CULL 0x00000001 +#define NVC397_SET_USER_CLIP_OP_PLANE1 4:4 +#define NVC397_SET_USER_CLIP_OP_PLANE1_CLIP 0x00000000 +#define NVC397_SET_USER_CLIP_OP_PLANE1_CULL 0x00000001 +#define NVC397_SET_USER_CLIP_OP_PLANE2 8:8 +#define NVC397_SET_USER_CLIP_OP_PLANE2_CLIP 0x00000000 +#define NVC397_SET_USER_CLIP_OP_PLANE2_CULL 0x00000001 +#define NVC397_SET_USER_CLIP_OP_PLANE3 12:12 +#define NVC397_SET_USER_CLIP_OP_PLANE3_CLIP 0x00000000 +#define NVC397_SET_USER_CLIP_OP_PLANE3_CULL 0x00000001 +#define NVC397_SET_USER_CLIP_OP_PLANE4 16:16 +#define NVC397_SET_USER_CLIP_OP_PLANE4_CLIP 0x00000000 +#define NVC397_SET_USER_CLIP_OP_PLANE4_CULL 0x00000001 +#define NVC397_SET_USER_CLIP_OP_PLANE5 20:20 +#define NVC397_SET_USER_CLIP_OP_PLANE5_CLIP 0x00000000 +#define NVC397_SET_USER_CLIP_OP_PLANE5_CULL 0x00000001 +#define NVC397_SET_USER_CLIP_OP_PLANE6 24:24 +#define NVC397_SET_USER_CLIP_OP_PLANE6_CLIP 0x00000000 +#define NVC397_SET_USER_CLIP_OP_PLANE6_CULL 0x00000001 +#define NVC397_SET_USER_CLIP_OP_PLANE7 28:28 +#define NVC397_SET_USER_CLIP_OP_PLANE7_CLIP 0x00000000 +#define NVC397_SET_USER_CLIP_OP_PLANE7_CULL 0x00000001 + +#define NVC397_SET_RENDER_ENABLE_OVERRIDE 0x1944 +#define NVC397_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NVC397_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NVC397_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NVC397_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NVC397_SET_PRIMITIVE_TOPOLOGY_CONTROL 0x1948 +#define NVC397_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE 0:0 +#define NVC397_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_TOPOLOGY_IN_BEGIN_METHODS 0x00000000 +#define NVC397_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_SEPARATE_TOPOLOGY_STATE 0x00000001 + +#define NVC397_SET_WINDOW_CLIP_ENABLE 0x194c +#define NVC397_SET_WINDOW_CLIP_ENABLE_V 0:0 +#define NVC397_SET_WINDOW_CLIP_ENABLE_V_FALSE 0x00000000 +#define NVC397_SET_WINDOW_CLIP_ENABLE_V_TRUE 0x00000001 + +#define NVC397_SET_WINDOW_CLIP_TYPE 0x1950 +#define NVC397_SET_WINDOW_CLIP_TYPE_V 1:0 +#define NVC397_SET_WINDOW_CLIP_TYPE_V_INCLUSIVE 0x00000000 +#define NVC397_SET_WINDOW_CLIP_TYPE_V_EXCLUSIVE 0x00000001 +#define NVC397_SET_WINDOW_CLIP_TYPE_V_CLIPALL 0x00000002 + +#define NVC397_INVALIDATE_ZCULL 0x1958 +#define NVC397_INVALIDATE_ZCULL_V 31:0 +#define NVC397_INVALIDATE_ZCULL_V_INVALIDATE 0x00000000 + +#define NVC397_SET_ZCULL 0x1968 +#define NVC397_SET_ZCULL_Z_ENABLE 0:0 +#define NVC397_SET_ZCULL_Z_ENABLE_FALSE 0x00000000 +#define NVC397_SET_ZCULL_Z_ENABLE_TRUE 0x00000001 +#define NVC397_SET_ZCULL_STENCIL_ENABLE 4:4 +#define NVC397_SET_ZCULL_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC397_SET_ZCULL_STENCIL_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_ZCULL_BOUNDS 0x196c +#define NVC397_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE 0:0 +#define NVC397_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NVC397_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_TRUE 0x00000001 +#define NVC397_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE 4:4 +#define NVC397_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NVC397_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_PRIMITIVE_TOPOLOGY 0x1970 +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V 15:0 +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_POINTLIST 0x00000001 +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_LINELIST 0x00000002 +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP 0x00000003 +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST 0x00000004 +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP 0x00000005 +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_LINELIST_ADJCY 0x0000000A +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP_ADJCY 0x0000000B +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST_ADJCY 0x0000000C +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_PATCHLIST 0x0000000E +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_POINTS 0x00001001 +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST 0x00001002 +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST 0x00001003 +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST 0x0000100F +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINESTRIP 0x00001010 +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINESTRIP 0x00001011 +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLELIST 0x00001012 +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLESTRIP 0x00001013 +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLESTRIP 0x00001014 +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN 0x00001015 +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLEFAN 0x00001016 +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN_IMM 0x00001017 +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST_IMM 0x00001018 +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST2 0x0000101A +#define NVC397_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST2 0x0000101B + +#define NVC397_ZCULL_SYNC 0x1978 +#define NVC397_ZCULL_SYNC_V 31:0 + +#define NVC397_SET_CLIP_ID_TEST 0x197c +#define NVC397_SET_CLIP_ID_TEST_ENABLE 0:0 +#define NVC397_SET_CLIP_ID_TEST_ENABLE_FALSE 0x00000000 +#define NVC397_SET_CLIP_ID_TEST_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_SURFACE_CLIP_ID_WIDTH 0x1980 +#define NVC397_SET_SURFACE_CLIP_ID_WIDTH_V 31:0 + +#define NVC397_SET_CLIP_ID 0x1984 +#define NVC397_SET_CLIP_ID_V 31:0 + +#define NVC397_SET_DEPTH_BOUNDS_TEST 0x19bc +#define NVC397_SET_DEPTH_BOUNDS_TEST_ENABLE 0:0 +#define NVC397_SET_DEPTH_BOUNDS_TEST_ENABLE_FALSE 0x00000000 +#define NVC397_SET_DEPTH_BOUNDS_TEST_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_BLEND_FLOAT_OPTION 0x19c0 +#define NVC397_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO 0:0 +#define NVC397_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_FALSE 0x00000000 +#define NVC397_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_TRUE 0x00000001 + +#define NVC397_SET_LOGIC_OP 0x19c4 +#define NVC397_SET_LOGIC_OP_ENABLE 0:0 +#define NVC397_SET_LOGIC_OP_ENABLE_FALSE 0x00000000 +#define NVC397_SET_LOGIC_OP_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_LOGIC_OP_FUNC 0x19c8 +#define NVC397_SET_LOGIC_OP_FUNC_V 31:0 +#define NVC397_SET_LOGIC_OP_FUNC_V_CLEAR 0x00001500 +#define NVC397_SET_LOGIC_OP_FUNC_V_AND 0x00001501 +#define NVC397_SET_LOGIC_OP_FUNC_V_AND_REVERSE 0x00001502 +#define NVC397_SET_LOGIC_OP_FUNC_V_COPY 0x00001503 +#define NVC397_SET_LOGIC_OP_FUNC_V_AND_INVERTED 0x00001504 +#define NVC397_SET_LOGIC_OP_FUNC_V_NOOP 0x00001505 +#define NVC397_SET_LOGIC_OP_FUNC_V_XOR 0x00001506 +#define NVC397_SET_LOGIC_OP_FUNC_V_OR 0x00001507 +#define NVC397_SET_LOGIC_OP_FUNC_V_NOR 0x00001508 +#define NVC397_SET_LOGIC_OP_FUNC_V_EQUIV 0x00001509 +#define NVC397_SET_LOGIC_OP_FUNC_V_INVERT 0x0000150A +#define NVC397_SET_LOGIC_OP_FUNC_V_OR_REVERSE 0x0000150B +#define NVC397_SET_LOGIC_OP_FUNC_V_COPY_INVERTED 0x0000150C +#define NVC397_SET_LOGIC_OP_FUNC_V_OR_INVERTED 0x0000150D +#define NVC397_SET_LOGIC_OP_FUNC_V_NAND 0x0000150E +#define NVC397_SET_LOGIC_OP_FUNC_V_SET 0x0000150F + +#define NVC397_SET_Z_COMPRESSION 0x19cc +#define NVC397_SET_Z_COMPRESSION_ENABLE 0:0 +#define NVC397_SET_Z_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVC397_SET_Z_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVC397_CLEAR_SURFACE 0x19d0 +#define NVC397_CLEAR_SURFACE_Z_ENABLE 0:0 +#define NVC397_CLEAR_SURFACE_Z_ENABLE_FALSE 0x00000000 +#define NVC397_CLEAR_SURFACE_Z_ENABLE_TRUE 0x00000001 +#define NVC397_CLEAR_SURFACE_STENCIL_ENABLE 1:1 +#define NVC397_CLEAR_SURFACE_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC397_CLEAR_SURFACE_STENCIL_ENABLE_TRUE 0x00000001 +#define NVC397_CLEAR_SURFACE_R_ENABLE 2:2 +#define NVC397_CLEAR_SURFACE_R_ENABLE_FALSE 0x00000000 +#define NVC397_CLEAR_SURFACE_R_ENABLE_TRUE 0x00000001 +#define NVC397_CLEAR_SURFACE_G_ENABLE 3:3 +#define NVC397_CLEAR_SURFACE_G_ENABLE_FALSE 0x00000000 +#define NVC397_CLEAR_SURFACE_G_ENABLE_TRUE 0x00000001 +#define NVC397_CLEAR_SURFACE_B_ENABLE 4:4 +#define NVC397_CLEAR_SURFACE_B_ENABLE_FALSE 0x00000000 +#define NVC397_CLEAR_SURFACE_B_ENABLE_TRUE 0x00000001 +#define NVC397_CLEAR_SURFACE_A_ENABLE 5:5 +#define NVC397_CLEAR_SURFACE_A_ENABLE_FALSE 0x00000000 +#define NVC397_CLEAR_SURFACE_A_ENABLE_TRUE 0x00000001 +#define NVC397_CLEAR_SURFACE_MRT_SELECT 9:6 +#define NVC397_CLEAR_SURFACE_RT_ARRAY_INDEX 25:10 + +#define NVC397_CLEAR_CLIP_ID_SURFACE 0x19d4 +#define NVC397_CLEAR_CLIP_ID_SURFACE_V 31:0 + +#define NVC397_SET_COLOR_COMPRESSION(i) (0x19e0+(i)*4) +#define NVC397_SET_COLOR_COMPRESSION_ENABLE 0:0 +#define NVC397_SET_COLOR_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVC397_SET_COLOR_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_CT_WRITE(i) (0x1a00+(i)*4) +#define NVC397_SET_CT_WRITE_R_ENABLE 0:0 +#define NVC397_SET_CT_WRITE_R_ENABLE_FALSE 0x00000000 +#define NVC397_SET_CT_WRITE_R_ENABLE_TRUE 0x00000001 +#define NVC397_SET_CT_WRITE_G_ENABLE 4:4 +#define NVC397_SET_CT_WRITE_G_ENABLE_FALSE 0x00000000 +#define NVC397_SET_CT_WRITE_G_ENABLE_TRUE 0x00000001 +#define NVC397_SET_CT_WRITE_B_ENABLE 8:8 +#define NVC397_SET_CT_WRITE_B_ENABLE_FALSE 0x00000000 +#define NVC397_SET_CT_WRITE_B_ENABLE_TRUE 0x00000001 +#define NVC397_SET_CT_WRITE_A_ENABLE 12:12 +#define NVC397_SET_CT_WRITE_A_ENABLE_FALSE 0x00000000 +#define NVC397_SET_CT_WRITE_A_ENABLE_TRUE 0x00000001 + +#define NVC397_PIPE_NOP 0x1a2c +#define NVC397_PIPE_NOP_V 31:0 + +#define NVC397_SET_SPARE00 0x1a30 +#define NVC397_SET_SPARE00_V 31:0 + +#define NVC397_SET_SPARE01 0x1a34 +#define NVC397_SET_SPARE01_V 31:0 + +#define NVC397_SET_SPARE02 0x1a38 +#define NVC397_SET_SPARE02_V 31:0 + +#define NVC397_SET_SPARE03 0x1a3c +#define NVC397_SET_SPARE03_V 31:0 + +#define NVC397_SET_REPORT_SEMAPHORE_A 0x1b00 +#define NVC397_SET_REPORT_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC397_SET_REPORT_SEMAPHORE_B 0x1b04 +#define NVC397_SET_REPORT_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC397_SET_REPORT_SEMAPHORE_C 0x1b08 +#define NVC397_SET_REPORT_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC397_SET_REPORT_SEMAPHORE_D 0x1b0c +#define NVC397_SET_REPORT_SEMAPHORE_D_OPERATION 1:0 +#define NVC397_SET_REPORT_SEMAPHORE_D_OPERATION_RELEASE 0x00000000 +#define NVC397_SET_REPORT_SEMAPHORE_D_OPERATION_ACQUIRE 0x00000001 +#define NVC397_SET_REPORT_SEMAPHORE_D_OPERATION_REPORT_ONLY 0x00000002 +#define NVC397_SET_REPORT_SEMAPHORE_D_OPERATION_TRAP 0x00000003 +#define NVC397_SET_REPORT_SEMAPHORE_D_RELEASE 4:4 +#define NVC397_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_READS_COMPLETE 0x00000000 +#define NVC397_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_WRITES_COMPLETE 0x00000001 +#define NVC397_SET_REPORT_SEMAPHORE_D_ACQUIRE 8:8 +#define NVC397_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_WRITES_START 0x00000000 +#define NVC397_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_READS_START 0x00000001 +#define NVC397_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION 15:12 +#define NVC397_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_NONE 0x00000000 +#define NVC397_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DATA_ASSEMBLER 0x00000001 +#define NVC397_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VERTEX_SHADER 0x00000002 +#define NVC397_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_INIT_SHADER 0x00000008 +#define NVC397_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_SHADER 0x00000009 +#define NVC397_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_GEOMETRY_SHADER 0x00000006 +#define NVC397_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_STREAMING_OUTPUT 0x00000005 +#define NVC397_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VPC 0x00000004 +#define NVC397_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ZCULL 0x00000007 +#define NVC397_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_PIXEL_SHADER 0x0000000A +#define NVC397_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DEPTH_TEST 0x0000000C +#define NVC397_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ALL 0x0000000F +#define NVC397_SET_REPORT_SEMAPHORE_D_COMPARISON 16:16 +#define NVC397_SET_REPORT_SEMAPHORE_D_COMPARISON_EQ 0x00000000 +#define NVC397_SET_REPORT_SEMAPHORE_D_COMPARISON_GE 0x00000001 +#define NVC397_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE 20:20 +#define NVC397_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVC397_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT 27:23 +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_NONE 0x00000000 +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_DA_VERTICES_GENERATED 0x00000001 +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_DA_PRIMITIVES_GENERATED 0x00000003 +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_VS_INVOCATIONS 0x00000005 +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_TI_INVOCATIONS 0x0000001B +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_TS_INVOCATIONS 0x0000001D +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_TS_PRIMITIVES_GENERATED 0x0000001F +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_GS_INVOCATIONS 0x00000007 +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_GS_PRIMITIVES_GENERATED 0x00000009 +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_ALPHA_BETA_CLOCKS 0x00000004 +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_SCG_CLOCKS 0x00000008 +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_VTG_PRIMITIVES_OUT 0x00000012 +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x0000001E +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_SUCCEEDED 0x0000000B +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED 0x0000000D +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000006 +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_BYTE_COUNT 0x0000001A +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_INVOCATIONS 0x0000000F +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_PRIMITIVES_GENERATED 0x00000011 +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS0 0x0000000A +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS1 0x0000000C +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS2 0x0000000E +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS3 0x00000010 +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_PS_INVOCATIONS 0x00000013 +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT 0x00000002 +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT64 0x00000015 +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_TILED_ZPASS_PIXEL_CNT64 0x00000017 +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_COLOR_TARGET 0x00000018 +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_ZETA_TARGET 0x00000019 +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_BOUNDING_RECTANGLE 0x0000001C +#define NVC397_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE 28:28 +#define NVC397_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVC397_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVC397_SET_REPORT_SEMAPHORE_D_SUB_REPORT 7:5 +#define NVC397_SET_REPORT_SEMAPHORE_D_REPORT_DWORD_NUMBER 21:21 +#define NVC397_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE 2:2 +#define NVC397_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_FALSE 0x00000000 +#define NVC397_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_TRUE 0x00000001 +#define NVC397_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE 3:3 +#define NVC397_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC397_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC397_SET_REPORT_SEMAPHORE_D_REDUCTION_OP 11:9 +#define NVC397_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC397_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC397_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC397_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_INC 0x00000003 +#define NVC397_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC397_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_AND 0x00000005 +#define NVC397_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_OR 0x00000006 +#define NVC397_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC397_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT 18:17 +#define NVC397_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC397_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_SIGNED_32 0x00000001 + +#define NVC397_SET_VERTEX_STREAM_A_FORMAT(j) (0x1c00+(j)*16) +#define NVC397_SET_VERTEX_STREAM_A_FORMAT_STRIDE 11:0 +#define NVC397_SET_VERTEX_STREAM_A_FORMAT_ENABLE 12:12 +#define NVC397_SET_VERTEX_STREAM_A_FORMAT_ENABLE_FALSE 0x00000000 +#define NVC397_SET_VERTEX_STREAM_A_FORMAT_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_VERTEX_STREAM_A_LOCATION_A(j) (0x1c04+(j)*16) +#define NVC397_SET_VERTEX_STREAM_A_LOCATION_A_OFFSET_UPPER 7:0 + +#define NVC397_SET_VERTEX_STREAM_A_LOCATION_B(j) (0x1c08+(j)*16) +#define NVC397_SET_VERTEX_STREAM_A_LOCATION_B_OFFSET_LOWER 31:0 + +#define NVC397_SET_VERTEX_STREAM_A_FREQUENCY(j) (0x1c0c+(j)*16) +#define NVC397_SET_VERTEX_STREAM_A_FREQUENCY_V 31:0 + +#define NVC397_SET_VERTEX_STREAM_B_FORMAT(j) (0x1d00+(j)*16) +#define NVC397_SET_VERTEX_STREAM_B_FORMAT_STRIDE 11:0 +#define NVC397_SET_VERTEX_STREAM_B_FORMAT_ENABLE 12:12 +#define NVC397_SET_VERTEX_STREAM_B_FORMAT_ENABLE_FALSE 0x00000000 +#define NVC397_SET_VERTEX_STREAM_B_FORMAT_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_VERTEX_STREAM_B_LOCATION_A(j) (0x1d04+(j)*16) +#define NVC397_SET_VERTEX_STREAM_B_LOCATION_A_OFFSET_UPPER 7:0 + +#define NVC397_SET_VERTEX_STREAM_B_LOCATION_B(j) (0x1d08+(j)*16) +#define NVC397_SET_VERTEX_STREAM_B_LOCATION_B_OFFSET_LOWER 31:0 + +#define NVC397_SET_VERTEX_STREAM_B_FREQUENCY(j) (0x1d0c+(j)*16) +#define NVC397_SET_VERTEX_STREAM_B_FREQUENCY_V 31:0 + +#define NVC397_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA(j) (0x1e00+(j)*32) +#define NVC397_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NVC397_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NVC397_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_BLEND_PER_TARGET_COLOR_OP(j) (0x1e04+(j)*32) +#define NVC397_SET_BLEND_PER_TARGET_COLOR_OP_V 31:0 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC397_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC397_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MIN 0x00008007 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MAX 0x00008008 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_ADD 0x00000001 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MIN 0x00000004 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF(j) (0x1e08+(j)*32) +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V 31:0 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF(j) (0x1e0c+(j)*32) +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V 31:0 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC397_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_OP(j) (0x1e10+(j)*32) +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_OP_V 31:0 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF(j) (0x1e14+(j)*32) +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V 31:0 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF(j) (0x1e18+(j)*32) +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V 31:0 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC397_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC397_SET_VERTEX_STREAM_LIMIT_A_A(j) (0x1f00+(j)*8) +#define NVC397_SET_VERTEX_STREAM_LIMIT_A_A_UPPER 7:0 + +#define NVC397_SET_VERTEX_STREAM_LIMIT_A_B(j) (0x1f04+(j)*8) +#define NVC397_SET_VERTEX_STREAM_LIMIT_A_B_LOWER 31:0 + +#define NVC397_SET_VERTEX_STREAM_LIMIT_B_A(j) (0x1f80+(j)*8) +#define NVC397_SET_VERTEX_STREAM_LIMIT_B_A_UPPER 7:0 + +#define NVC397_SET_VERTEX_STREAM_LIMIT_B_B(j) (0x1f84+(j)*8) +#define NVC397_SET_VERTEX_STREAM_LIMIT_B_B_LOWER 31:0 + +#define NVC397_SET_PIPELINE_SHADER(j) (0x2000+(j)*64) +#define NVC397_SET_PIPELINE_SHADER_ENABLE 0:0 +#define NVC397_SET_PIPELINE_SHADER_ENABLE_FALSE 0x00000000 +#define NVC397_SET_PIPELINE_SHADER_ENABLE_TRUE 0x00000001 +#define NVC397_SET_PIPELINE_SHADER_TYPE 7:4 +#define NVC397_SET_PIPELINE_SHADER_TYPE_VERTEX_CULL_BEFORE_FETCH 0x00000000 +#define NVC397_SET_PIPELINE_SHADER_TYPE_VERTEX 0x00000001 +#define NVC397_SET_PIPELINE_SHADER_TYPE_TESSELLATION_INIT 0x00000002 +#define NVC397_SET_PIPELINE_SHADER_TYPE_TESSELLATION 0x00000003 +#define NVC397_SET_PIPELINE_SHADER_TYPE_GEOMETRY 0x00000004 +#define NVC397_SET_PIPELINE_SHADER_TYPE_PIXEL 0x00000005 + +#define NVC397_SET_PIPELINE_RESERVED_A(j) (0x2008+(j)*64) +#define NVC397_SET_PIPELINE_RESERVED_A_V 0:0 + +#define NVC397_SET_PIPELINE_REGISTER_COUNT(j) (0x200c+(j)*64) +#define NVC397_SET_PIPELINE_REGISTER_COUNT_V 8:0 + +#define NVC397_SET_PIPELINE_BINDING(j) (0x2010+(j)*64) +#define NVC397_SET_PIPELINE_BINDING_GROUP 2:0 + +#define NVC397_SET_PIPELINE_PROGRAM_ADDRESS_A(j) (0x2014+(j)*64) +#define NVC397_SET_PIPELINE_PROGRAM_ADDRESS_A_UPPER 7:0 + +#define NVC397_SET_PIPELINE_PROGRAM_ADDRESS_B(j) (0x2018+(j)*64) +#define NVC397_SET_PIPELINE_PROGRAM_ADDRESS_B_LOWER 31:0 + +#define NVC397_SET_PIPELINE_RESERVED_D(j) (0x201c+(j)*64) +#define NVC397_SET_PIPELINE_RESERVED_D_V 0:0 + +#define NVC397_SET_PIPELINE_RESERVED_E(j) (0x2020+(j)*64) +#define NVC397_SET_PIPELINE_RESERVED_E_V 0:0 + +#define NVC397_SET_FALCON00 0x2300 +#define NVC397_SET_FALCON00_V 31:0 + +#define NVC397_SET_FALCON01 0x2304 +#define NVC397_SET_FALCON01_V 31:0 + +#define NVC397_SET_FALCON02 0x2308 +#define NVC397_SET_FALCON02_V 31:0 + +#define NVC397_SET_FALCON03 0x230c +#define NVC397_SET_FALCON03_V 31:0 + +#define NVC397_SET_FALCON04 0x2310 +#define NVC397_SET_FALCON04_V 31:0 + +#define NVC397_SET_FALCON05 0x2314 +#define NVC397_SET_FALCON05_V 31:0 + +#define NVC397_SET_FALCON06 0x2318 +#define NVC397_SET_FALCON06_V 31:0 + +#define NVC397_SET_FALCON07 0x231c +#define NVC397_SET_FALCON07_V 31:0 + +#define NVC397_SET_FALCON08 0x2320 +#define NVC397_SET_FALCON08_V 31:0 + +#define NVC397_SET_FALCON09 0x2324 +#define NVC397_SET_FALCON09_V 31:0 + +#define NVC397_SET_FALCON10 0x2328 +#define NVC397_SET_FALCON10_V 31:0 + +#define NVC397_SET_FALCON11 0x232c +#define NVC397_SET_FALCON11_V 31:0 + +#define NVC397_SET_FALCON12 0x2330 +#define NVC397_SET_FALCON12_V 31:0 + +#define NVC397_SET_FALCON13 0x2334 +#define NVC397_SET_FALCON13_V 31:0 + +#define NVC397_SET_FALCON14 0x2338 +#define NVC397_SET_FALCON14_V 31:0 + +#define NVC397_SET_FALCON15 0x233c +#define NVC397_SET_FALCON15_V 31:0 + +#define NVC397_SET_FALCON16 0x2340 +#define NVC397_SET_FALCON16_V 31:0 + +#define NVC397_SET_FALCON17 0x2344 +#define NVC397_SET_FALCON17_V 31:0 + +#define NVC397_SET_FALCON18 0x2348 +#define NVC397_SET_FALCON18_V 31:0 + +#define NVC397_SET_FALCON19 0x234c +#define NVC397_SET_FALCON19_V 31:0 + +#define NVC397_SET_FALCON20 0x2350 +#define NVC397_SET_FALCON20_V 31:0 + +#define NVC397_SET_FALCON21 0x2354 +#define NVC397_SET_FALCON21_V 31:0 + +#define NVC397_SET_FALCON22 0x2358 +#define NVC397_SET_FALCON22_V 31:0 + +#define NVC397_SET_FALCON23 0x235c +#define NVC397_SET_FALCON23_V 31:0 + +#define NVC397_SET_FALCON24 0x2360 +#define NVC397_SET_FALCON24_V 31:0 + +#define NVC397_SET_FALCON25 0x2364 +#define NVC397_SET_FALCON25_V 31:0 + +#define NVC397_SET_FALCON26 0x2368 +#define NVC397_SET_FALCON26_V 31:0 + +#define NVC397_SET_FALCON27 0x236c +#define NVC397_SET_FALCON27_V 31:0 + +#define NVC397_SET_FALCON28 0x2370 +#define NVC397_SET_FALCON28_V 31:0 + +#define NVC397_SET_FALCON29 0x2374 +#define NVC397_SET_FALCON29_V 31:0 + +#define NVC397_SET_FALCON30 0x2378 +#define NVC397_SET_FALCON30_V 31:0 + +#define NVC397_SET_FALCON31 0x237c +#define NVC397_SET_FALCON31_V 31:0 + +#define NVC397_SET_CONSTANT_BUFFER_SELECTOR_A 0x2380 +#define NVC397_SET_CONSTANT_BUFFER_SELECTOR_A_SIZE 16:0 + +#define NVC397_SET_CONSTANT_BUFFER_SELECTOR_B 0x2384 +#define NVC397_SET_CONSTANT_BUFFER_SELECTOR_B_ADDRESS_UPPER 7:0 + +#define NVC397_SET_CONSTANT_BUFFER_SELECTOR_C 0x2388 +#define NVC397_SET_CONSTANT_BUFFER_SELECTOR_C_ADDRESS_LOWER 31:0 + +#define NVC397_LOAD_CONSTANT_BUFFER_OFFSET 0x238c +#define NVC397_LOAD_CONSTANT_BUFFER_OFFSET_V 15:0 + +#define NVC397_LOAD_CONSTANT_BUFFER(i) (0x2390+(i)*4) +#define NVC397_LOAD_CONSTANT_BUFFER_V 31:0 + +#define NVC397_BIND_GROUP_RESERVED_A(j) (0x2400+(j)*32) +#define NVC397_BIND_GROUP_RESERVED_A_V 0:0 + +#define NVC397_BIND_GROUP_RESERVED_B(j) (0x2404+(j)*32) +#define NVC397_BIND_GROUP_RESERVED_B_V 0:0 + +#define NVC397_BIND_GROUP_RESERVED_C(j) (0x2408+(j)*32) +#define NVC397_BIND_GROUP_RESERVED_C_V 0:0 + +#define NVC397_BIND_GROUP_RESERVED_D(j) (0x240c+(j)*32) +#define NVC397_BIND_GROUP_RESERVED_D_V 0:0 + +#define NVC397_BIND_GROUP_CONSTANT_BUFFER(j) (0x2410+(j)*32) +#define NVC397_BIND_GROUP_CONSTANT_BUFFER_VALID 0:0 +#define NVC397_BIND_GROUP_CONSTANT_BUFFER_VALID_FALSE 0x00000000 +#define NVC397_BIND_GROUP_CONSTANT_BUFFER_VALID_TRUE 0x00000001 +#define NVC397_BIND_GROUP_CONSTANT_BUFFER_SHADER_SLOT 8:4 + +#define NVC397_SET_TRAP_HANDLER_A 0x25f8 +#define NVC397_SET_TRAP_HANDLER_A_ADDRESS_UPPER 16:0 + +#define NVC397_SET_TRAP_HANDLER_B 0x25fc +#define NVC397_SET_TRAP_HANDLER_B_ADDRESS_LOWER 31:0 + +#define NVC397_SET_COLOR_CLAMP 0x2600 +#define NVC397_SET_COLOR_CLAMP_ENABLE 0:0 +#define NVC397_SET_COLOR_CLAMP_ENABLE_FALSE 0x00000000 +#define NVC397_SET_COLOR_CLAMP_ENABLE_TRUE 0x00000001 + +#define NVC397_SET_BINDLESS_TEXTURE 0x2608 +#define NVC397_SET_BINDLESS_TEXTURE_CONSTANT_BUFFER_SLOT_SELECT 4:0 + +#define NVC397_SET_STREAM_OUT_LAYOUT_SELECT(i,j) (0x2800+(i)*128+(j)*4) +#define NVC397_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER00 7:0 +#define NVC397_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER01 15:8 +#define NVC397_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER02 23:16 +#define NVC397_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER03 31:24 + +#define NVC397_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE(i) (0x32f4+(i)*4) +#define NVC397_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_V 31:0 + +#define NVC397_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_UPPER(i) (0x3314+(i)*4) +#define NVC397_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_UPPER_V 31:0 + +#define NVC397_ENABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER 0x3334 +#define NVC397_ENABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_V 0:0 + +#define NVC397_DISABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER 0x3338 +#define NVC397_DISABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_V 0:0 + +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER(i) (0x333c+(i)*4) +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER_V 31:0 + +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_VALUE(i) (0x335c+(i)*4) +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_VALUE_V 31:0 + +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_EVENT(i) (0x337c+(i)*4) +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_EVENT_EVENT 7:0 + +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A(i) (0x339c+(i)*4) +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT0 1:0 +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT0 4:2 +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT1 6:5 +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT1 9:7 +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT2 11:10 +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT2 14:12 +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT3 16:15 +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT3 19:17 +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT4 21:20 +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT4 24:22 +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT5 26:25 +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT5 29:27 +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_SPARE 31:30 + +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B(i) (0x33bc+(i)*4) +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_EDGE 0:0 +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_MODE 2:1 +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_WINDOWED 3:3 +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_FUNC 19:4 + +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL 0x33dc +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL_MASK 7:0 + +#define NVC397_START_SHADER_PERFORMANCE_COUNTER 0x33e0 +#define NVC397_START_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC397_STOP_SHADER_PERFORMANCE_COUNTER 0x33e4 +#define NVC397_STOP_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER 0x33e8 +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER_V 31:0 + +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER 0x33ec +#define NVC397_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER_V 31:0 + +#define NVC397_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NVC397_SET_MME_SHADOW_SCRATCH_V 31:0 + +#define NVC397_CALL_MME_MACRO(j) (0x3800+(j)*8) +#define NVC397_CALL_MME_MACRO_V 31:0 + +#define NVC397_CALL_MME_DATA(j) (0x3804+(j)*8) +#define NVC397_CALL_MME_DATA_V 31:0 + +#endif /* _cl_volta_a_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc46f.h b/src/common/sdk/nvidia/inc/class/clc46f.h new file mode 100644 index 0000000..fb1c594 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc46f.h @@ -0,0 +1,365 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc46f_h_ +#define _clc46f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* class TURING_CHANNEL_GPFIFO */ +/* + * Documentation for TURING_CHANNEL_GPFIFO can be found in dev_pbdma.ref, + * chapter "User Control Registers". It is documented as device NV_UDMA. + * The GPFIFO format itself is also documented in dev_pbdma.ref, + * NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref, + * chapter "FIFO DMA RAM", NV_FIFO_DMA_*. + * + * Note there is no .mfs file for this class. + */ +#define TURING_CHANNEL_GPFIFO_A (0x0000C46F) + +#define NVC46F_TYPEDEF TURING_CHANNELChannelGPFifoA + +/* dma flow control data structure */ +typedef volatile struct Nvc46fControl_struct { + NvU32 Ignored00[0x010]; /* 0000-003f*/ + NvU32 Put; /* put offset, read/write 0040-0043*/ + NvU32 Get; /* get offset, read only 0044-0047*/ + NvU32 Reference; /* reference value, read only 0048-004b*/ + NvU32 PutHi; /* high order put offset bits 004c-004f*/ + NvU32 Ignored01[0x002]; /* 0050-0057*/ + NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/ + NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/ + NvU32 GetHi; /* high order get offset bits 0060-0063*/ + NvU32 Ignored02[0x007]; /* 0064-007f*/ + NvU32 Ignored03; /* used to be engine yield 0080-0083*/ + NvU32 Ignored04[0x001]; /* 0084-0087*/ + NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/ + NvU32 GPPut; /* GP FIFO put offset 008c-008f*/ + NvU32 Ignored05[0x5c]; +} Nvc46fControl, TuringAControlGPFifo; + +/* fields and values */ +#define NVC46F_NUMBER_OF_SUBCHANNELS (8) +#define NVC46F_SET_OBJECT (0x00000000) +#define NVC46F_SET_OBJECT_NVCLASS 15:0 +#define NVC46F_SET_OBJECT_ENGINE 20:16 +#define NVC46F_SET_OBJECT_ENGINE_SW 0x0000001f +#define NVC46F_ILLEGAL (0x00000004) +#define NVC46F_ILLEGAL_HANDLE 31:0 +#define NVC46F_NOP (0x00000008) +#define NVC46F_NOP_HANDLE 31:0 +#define NVC46F_SEMAPHOREA (0x00000010) +#define NVC46F_SEMAPHOREA_OFFSET_UPPER 7:0 +#define NVC46F_SEMAPHOREB (0x00000014) +#define NVC46F_SEMAPHOREB_OFFSET_LOWER 31:2 +#define NVC46F_SEMAPHOREC (0x00000018) +#define NVC46F_SEMAPHOREC_PAYLOAD 31:0 +#define NVC46F_SEMAPHORED (0x0000001C) +#define NVC46F_SEMAPHORED_OPERATION 4:0 +#define NVC46F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001 +#define NVC46F_SEMAPHORED_OPERATION_RELEASE 0x00000002 +#define NVC46F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004 +#define NVC46F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008 +#define NVC46F_SEMAPHORED_OPERATION_REDUCTION 0x00000010 +#define NVC46F_SEMAPHORED_ACQUIRE_SWITCH 12:12 +#define NVC46F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000 +#define NVC46F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001 +#define NVC46F_SEMAPHORED_RELEASE_WFI 20:20 +#define NVC46F_SEMAPHORED_RELEASE_WFI_EN 0x00000000 +#define NVC46F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001 +#define NVC46F_SEMAPHORED_RELEASE_SIZE 24:24 +#define NVC46F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000 +#define NVC46F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001 +#define NVC46F_SEMAPHORED_REDUCTION 30:27 +#define NVC46F_SEMAPHORED_REDUCTION_MIN 0x00000000 +#define NVC46F_SEMAPHORED_REDUCTION_MAX 0x00000001 +#define NVC46F_SEMAPHORED_REDUCTION_XOR 0x00000002 +#define NVC46F_SEMAPHORED_REDUCTION_AND 0x00000003 +#define NVC46F_SEMAPHORED_REDUCTION_OR 0x00000004 +#define NVC46F_SEMAPHORED_REDUCTION_ADD 0x00000005 +#define NVC46F_SEMAPHORED_REDUCTION_INC 0x00000006 +#define NVC46F_SEMAPHORED_REDUCTION_DEC 0x00000007 +#define NVC46F_SEMAPHORED_FORMAT 31:31 +#define NVC46F_SEMAPHORED_FORMAT_SIGNED 0x00000000 +#define NVC46F_SEMAPHORED_FORMAT_UNSIGNED 0x00000001 +#define NVC46F_NON_STALL_INTERRUPT (0x00000020) +#define NVC46F_NON_STALL_INTERRUPT_HANDLE 31:0 +#define NVC46F_FB_FLUSH (0x00000024) // Deprecated - use MEMBAR TYPE SYS_MEMBAR +#define NVC46F_FB_FLUSH_HANDLE 31:0 +// NOTE - MEM_OP_A and MEM_OP_B have been replaced in gp100 with methods for +// specifying the page address for a targeted TLB invalidate and the uTLB for +// a targeted REPLAY_CANCEL for UVM. +// The previous MEM_OP_A/B functionality is in MEM_OP_C/D, with slightly +// rearranged fields. +#define NVC46F_MEM_OP_A (0x00000028) +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_CLIENT_UNIT_ID 5:0 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_INVALIDATION_SIZE 5:0 // Used to specify size of invalidate, used for invalidates which are not of the REPLAY_CANCEL_TARGETED type +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_GPC_ID 10:6 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_CANCEL_MMU_ENGINE_ID 6:0 // only relevant for REPLAY_CANCEL_VA_GLOBAL +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR 11:11 +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_EN 0x00000001 +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_DIS 0x00000000 +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_TARGET_ADDR_LO 31:12 +#define NVC46F_MEM_OP_B (0x0000002c) +#define NVC46F_MEM_OP_B_TLB_INVALIDATE_TARGET_ADDR_HI 31:0 +#define NVC46F_MEM_OP_C (0x00000030) +#define NVC46F_MEM_OP_C_MEMBAR_TYPE 2:0 +#define NVC46F_MEM_OP_C_MEMBAR_TYPE_SYS_MEMBAR 0x00000000 +#define NVC46F_MEM_OP_C_MEMBAR_TYPE_MEMBAR 0x00000001 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB 0:0 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB_ONE 0x00000000 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB_ALL 0x00000001 // Probably nonsensical for MMU_TLB_INVALIDATE_TARGETED +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_GPC 1:1 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_GPC_ENABLE 0x00000000 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_GPC_DISABLE 0x00000001 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_REPLAY 4:2 // only relevant if GPC ENABLE +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE 0x00000000 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START 0x00000001 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START_ACK_ALL 0x00000002 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_TARGETED 0x00000003 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_GLOBAL 0x00000004 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_VA_GLOBAL 0x00000005 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE 6:5 // only relevant if GPC ENABLE +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_NONE 0x00000000 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_GLOBALLY 0x00000001 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_INTRANODE 0x00000002 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE 9:7 //only relevant for REPLAY_CANCEL_VA_GLOBAL +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_READ 0 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE 1 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_STRONG 2 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_RSVRVD 3 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_WEAK 4 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_ALL 5 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE_AND_ATOMIC 6 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ALL 7 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL 9:7 // Invalidate affects this level and all below +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_ALL 0x00000000 // Invalidate tlb caches at all levels of the page table +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_PTE_ONLY 0x00000001 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE0 0x00000002 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE1 0x00000003 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE2 0x00000004 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3 0x00000005 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE4 0x00000006 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE5 0x00000007 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE 11:10 // only relevant if PDB_ONE +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_VID_MEM 0x00000000 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB_ADDR_LO 31:12 // only relevant if PDB_ONE +#define NVC46F_MEM_OP_C_ACCESS_COUNTER_CLR_TARGETED_NOTIFY_TAG 19:0 +// MEM_OP_D MUST be preceded by MEM_OPs A-C. +#define NVC46F_MEM_OP_D (0x00000034) +#define NVC46F_MEM_OP_D_TLB_INVALIDATE_PDB_ADDR_HI 26:0 // only relevant if PDB_ONE +#define NVC46F_MEM_OP_D_OPERATION 31:27 +#define NVC46F_MEM_OP_D_OPERATION_MEMBAR 0x00000005 +#define NVC46F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE 0x00000009 +#define NVC46F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE_TARGETED 0x0000000a +#define NVC46F_MEM_OP_D_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d +#define NVC46F_MEM_OP_D_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e +// CLEAN_LINES is an alias for Tegra/GPU IP usage +#define NVC46F_MEM_OP_B_OPERATION_L2_INVALIDATE_CLEAN_LINES 0x0000000e +#define NVC46F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f +#define NVC46F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY 0x00000010 +#define NVC46F_MEM_OP_D_OPERATION_L2_WAIT_FOR_SYS_PENDING_READS 0x00000015 +#define NVC46F_MEM_OP_D_OPERATION_ACCESS_COUNTER_CLR 0x00000016 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE 1:0 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MIMC 0x00000000 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MOMC 0x00000001 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_ALL 0x00000002 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_TARGETED 0x00000003 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE 2:2 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MIMC 0x00000000 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MOMC 0x00000001 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_BANK 6:3 +#define NVC46F_SET_REFERENCE (0x00000050) +#define NVC46F_SET_REFERENCE_COUNT 31:0 +#define NVC46F_SEM_ADDR_LO (0x0000005c) +#define NVC46F_SEM_ADDR_LO_OFFSET 31:2 +#define NVC46F_SEM_ADDR_HI (0x00000060) +#define NVC46F_SEM_ADDR_HI_OFFSET 7:0 +#define NVC46F_SEM_PAYLOAD_LO (0x00000064) +#define NVC46F_SEM_PAYLOAD_LO_PAYLOAD 31:0 +#define NVC46F_SEM_PAYLOAD_HI (0x00000068) +#define NVC46F_SEM_PAYLOAD_HI_PAYLOAD 31:0 +#define NVC46F_SEM_EXECUTE (0x0000006c) +#define NVC46F_SEM_EXECUTE_OPERATION 2:0 +#define NVC46F_SEM_EXECUTE_OPERATION_ACQUIRE 0x00000000 +#define NVC46F_SEM_EXECUTE_OPERATION_RELEASE 0x00000001 +#define NVC46F_SEM_EXECUTE_OPERATION_ACQ_STRICT_GEQ 0x00000002 +#define NVC46F_SEM_EXECUTE_OPERATION_ACQ_CIRC_GEQ 0x00000003 +#define NVC46F_SEM_EXECUTE_OPERATION_ACQ_AND 0x00000004 +#define NVC46F_SEM_EXECUTE_OPERATION_ACQ_NOR 0x00000005 +#define NVC46F_SEM_EXECUTE_OPERATION_REDUCTION 0x00000006 +#define NVC46F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG 12:12 +#define NVC46F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_DIS 0x00000000 +#define NVC46F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_EN 0x00000001 +#define NVC46F_SEM_EXECUTE_RELEASE_WFI 20:20 +#define NVC46F_SEM_EXECUTE_RELEASE_WFI_DIS 0x00000000 +#define NVC46F_SEM_EXECUTE_RELEASE_WFI_EN 0x00000001 +#define NVC46F_SEM_EXECUTE_PAYLOAD_SIZE 24:24 +#define NVC46F_SEM_EXECUTE_PAYLOAD_SIZE_32BIT 0x00000000 +#define NVC46F_SEM_EXECUTE_PAYLOAD_SIZE_64BIT 0x00000001 +#define NVC46F_SEM_EXECUTE_RELEASE_TIMESTAMP 25:25 +#define NVC46F_SEM_EXECUTE_RELEASE_TIMESTAMP_DIS 0x00000000 +#define NVC46F_SEM_EXECUTE_RELEASE_TIMESTAMP_EN 0x00000001 +#define NVC46F_SEM_EXECUTE_REDUCTION 30:27 +#define NVC46F_SEM_EXECUTE_REDUCTION_IMIN 0x00000000 +#define NVC46F_SEM_EXECUTE_REDUCTION_IMAX 0x00000001 +#define NVC46F_SEM_EXECUTE_REDUCTION_IXOR 0x00000002 +#define NVC46F_SEM_EXECUTE_REDUCTION_IAND 0x00000003 +#define NVC46F_SEM_EXECUTE_REDUCTION_IOR 0x00000004 +#define NVC46F_SEM_EXECUTE_REDUCTION_IADD 0x00000005 +#define NVC46F_SEM_EXECUTE_REDUCTION_INC 0x00000006 +#define NVC46F_SEM_EXECUTE_REDUCTION_DEC 0x00000007 +#define NVC46F_SEM_EXECUTE_REDUCTION_FORMAT 31:31 +#define NVC46F_SEM_EXECUTE_REDUCTION_FORMAT_SIGNED 0x00000000 +#define NVC46F_SEM_EXECUTE_REDUCTION_FORMAT_UNSIGNED 0x00000001 +#define NVC46F_WFI (0x00000078) +#define NVC46F_WFI_SCOPE 0:0 +#define NVC46F_WFI_SCOPE_CURRENT_SCG_TYPE 0x00000000 +#define NVC46F_WFI_SCOPE_CURRENT_VEID 0x00000000 +#define NVC46F_WFI_SCOPE_ALL 0x00000001 +#define NVC46F_CRC_CHECK (0x0000007c) +#define NVC46F_CRC_CHECK_VALUE 31:0 +#define NVC46F_YIELD (0x00000080) +#define NVC46F_YIELD_OP 1:0 +#define NVC46F_YIELD_OP_NOP 0x00000000 +#define NVC46F_YIELD_OP_RUNLIST_TIMESLICE 0x00000002 +#define NVC46F_YIELD_OP_TSG 0x00000003 +#define NVC46F_CLEAR_FAULTED (0x00000084) +// Note: RM provides the HANDLE as an opaque value; the internal detail fields +// are intentionally not exposed to the driver through these defines. +#define NVC46F_CLEAR_FAULTED_HANDLE 30:0 +#define NVC46F_CLEAR_FAULTED_TYPE 31:31 +#define NVC46F_CLEAR_FAULTED_TYPE_PBDMA_FAULTED 0x00000000 +#define NVC46F_CLEAR_FAULTED_TYPE_ENG_FAULTED 0x00000001 + + +/* GPFIFO entry format */ +#define NVC46F_GP_ENTRY__SIZE 8 +#define NVC46F_GP_ENTRY0_FETCH 0:0 +#define NVC46F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000 +#define NVC46F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001 +#define NVC46F_GP_ENTRY0_GET 31:2 +#define NVC46F_GP_ENTRY0_OPERAND 31:0 +#define NVC46F_GP_ENTRY1_GET_HI 7:0 +#define NVC46F_GP_ENTRY1_LEVEL 9:9 +#define NVC46F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NVC46F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NVC46F_GP_ENTRY1_LENGTH 30:10 +#define NVC46F_GP_ENTRY1_SYNC 31:31 +#define NVC46F_GP_ENTRY1_SYNC_PROCEED 0x00000000 +#define NVC46F_GP_ENTRY1_SYNC_WAIT 0x00000001 +#define NVC46F_GP_ENTRY1_OPCODE 7:0 +#define NVC46F_GP_ENTRY1_OPCODE_NOP 0x00000000 +#define NVC46F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001 +#define NVC46F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002 +#define NVC46F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003 + +/* dma method formats */ +#define NVC46F_DMA_METHOD_ADDRESS_OLD 12:2 +#define NVC46F_DMA_METHOD_ADDRESS 11:0 +#define NVC46F_DMA_SUBDEVICE_MASK 15:4 +#define NVC46F_DMA_METHOD_SUBCHANNEL 15:13 +#define NVC46F_DMA_TERT_OP 17:16 +#define NVC46F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000) +#define NVC46F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001) +#define NVC46F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002) +#define NVC46F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003) +#define NVC46F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000) +#define NVC46F_DMA_METHOD_COUNT_OLD 28:18 +#define NVC46F_DMA_METHOD_COUNT 28:16 +#define NVC46F_DMA_IMMD_DATA 28:16 +#define NVC46F_DMA_SEC_OP 31:29 +#define NVC46F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000) +#define NVC46F_DMA_SEC_OP_INC_METHOD (0x00000001) +#define NVC46F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002) +#define NVC46F_DMA_SEC_OP_NON_INC_METHOD (0x00000003) +#define NVC46F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004) +#define NVC46F_DMA_SEC_OP_ONE_INC (0x00000005) +#define NVC46F_DMA_SEC_OP_RESERVED6 (0x00000006) +#define NVC46F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007) +/* dma incrementing method format */ +#define NVC46F_DMA_INCR_ADDRESS 11:0 +#define NVC46F_DMA_INCR_SUBCHANNEL 15:13 +#define NVC46F_DMA_INCR_COUNT 28:16 +#define NVC46F_DMA_INCR_OPCODE 31:29 +#define NVC46F_DMA_INCR_OPCODE_VALUE (0x00000001) +#define NVC46F_DMA_INCR_DATA 31:0 +/* dma non-incrementing method format */ +#define NVC46F_DMA_NONINCR_ADDRESS 11:0 +#define NVC46F_DMA_NONINCR_SUBCHANNEL 15:13 +#define NVC46F_DMA_NONINCR_COUNT 28:16 +#define NVC46F_DMA_NONINCR_OPCODE 31:29 +#define NVC46F_DMA_NONINCR_OPCODE_VALUE (0x00000003) +#define NVC46F_DMA_NONINCR_DATA 31:0 +/* dma increment-once method format */ +#define NVC46F_DMA_ONEINCR_ADDRESS 11:0 +#define NVC46F_DMA_ONEINCR_SUBCHANNEL 15:13 +#define NVC46F_DMA_ONEINCR_COUNT 28:16 +#define NVC46F_DMA_ONEINCR_OPCODE 31:29 +#define NVC46F_DMA_ONEINCR_OPCODE_VALUE (0x00000005) +#define NVC46F_DMA_ONEINCR_DATA 31:0 +/* dma no-operation format */ +#define NVC46F_DMA_NOP (0x00000000) +/* dma immediate-data format */ +#define NVC46F_DMA_IMMD_ADDRESS 11:0 +#define NVC46F_DMA_IMMD_SUBCHANNEL 15:13 +#define NVC46F_DMA_IMMD_DATA 28:16 +#define NVC46F_DMA_IMMD_OPCODE 31:29 +#define NVC46F_DMA_IMMD_OPCODE_VALUE (0x00000004) +/* dma set sub-device mask format */ +#define NVC46F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4 +#define NVC46F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC46F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001) +/* dma store sub-device mask format */ +#define NVC46F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4 +#define NVC46F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC46F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002) +/* dma use sub-device mask format */ +#define NVC46F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC46F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003) +/* dma end-segment format */ +#define NVC46F_DMA_ENDSEG_OPCODE 31:29 +#define NVC46F_DMA_ENDSEG_OPCODE_VALUE (0x00000007) +/* dma legacy incrementing/non-incrementing formats */ +#define NVC46F_DMA_ADDRESS 12:2 +#define NVC46F_DMA_SUBCH 15:13 +#define NVC46F_DMA_OPCODE3 17:16 +#define NVC46F_DMA_OPCODE3_NONE (0x00000000) +#define NVC46F_DMA_COUNT 28:18 +#define NVC46F_DMA_OPCODE 31:29 +#define NVC46F_DMA_OPCODE_METHOD (0x00000000) +#define NVC46F_DMA_OPCODE_NONINC_METHOD (0x00000002) +#define NVC46F_DMA_DATA 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc46f_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc56f.h b/src/common/sdk/nvidia/inc/class/clc56f.h new file mode 100644 index 0000000..bc8b675 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc56f.h @@ -0,0 +1,367 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc56f_h_ +#define _clc56f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* class AMPERE_CHANNEL_GPFIFO */ +/* + * Documentation for AMPERE_CHANNEL_GPFIFO can be found in dev_pbdma.ref, + * chapter "User Control Registers". It is documented as device NV_UDMA. + * The GPFIFO format itself is also documented in dev_pbdma.ref, + * NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref, + * chapter "FIFO DMA RAM", NV_FIFO_DMA_*. + * + * Note there is no .mfs file for this class. + */ +#define AMPERE_CHANNEL_GPFIFO_A (0x0000C56F) + +#define NVC56F_TYPEDEF AMPERE_CHANNELChannelGPFifoA + +/* dma flow control data structure */ +typedef volatile struct Nvc56fControl_struct { + NvU32 Ignored00[0x010]; /* 0000-003f*/ + NvU32 Put; /* put offset, read/write 0040-0043*/ + NvU32 Get; /* get offset, read only 0044-0047*/ + NvU32 Reference; /* reference value, read only 0048-004b*/ + NvU32 PutHi; /* high order put offset bits 004c-004f*/ + NvU32 Ignored01[0x002]; /* 0050-0057*/ + NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/ + NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/ + NvU32 GetHi; /* high order get offset bits 0060-0063*/ + NvU32 Ignored02[0x007]; /* 0064-007f*/ + NvU32 Ignored03; /* used to be engine yield 0080-0083*/ + NvU32 Ignored04[0x001]; /* 0084-0087*/ + NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/ + NvU32 GPPut; /* GP FIFO put offset 008c-008f*/ + NvU32 Ignored05[0x5c]; +} Nvc56fControl, AmpereAControlGPFifo; + +/* fields and values */ +#define NVC56F_NUMBER_OF_SUBCHANNELS (8) +#define NVC56F_SET_OBJECT (0x00000000) +#define NVC56F_SET_OBJECT_NVCLASS 15:0 +#define NVC56F_SET_OBJECT_ENGINE 20:16 +#define NVC56F_SET_OBJECT_ENGINE_SW 0x0000001f +#define NVC56F_ILLEGAL (0x00000004) +#define NVC56F_ILLEGAL_HANDLE 31:0 +#define NVC56F_NOP (0x00000008) +#define NVC56F_NOP_HANDLE 31:0 +#define NVC56F_SEMAPHOREA (0x00000010) +#define NVC56F_SEMAPHOREA_OFFSET_UPPER 7:0 +#define NVC56F_SEMAPHOREB (0x00000014) +#define NVC56F_SEMAPHOREB_OFFSET_LOWER 31:2 +#define NVC56F_SEMAPHOREC (0x00000018) +#define NVC56F_SEMAPHOREC_PAYLOAD 31:0 +#define NVC56F_SEMAPHORED (0x0000001C) +#define NVC56F_SEMAPHORED_OPERATION 4:0 +#define NVC56F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001 +#define NVC56F_SEMAPHORED_OPERATION_RELEASE 0x00000002 +#define NVC56F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004 +#define NVC56F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008 +#define NVC56F_SEMAPHORED_OPERATION_REDUCTION 0x00000010 +#define NVC56F_SEMAPHORED_ACQUIRE_SWITCH 12:12 +#define NVC56F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000 +#define NVC56F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001 +#define NVC56F_SEMAPHORED_RELEASE_WFI 20:20 +#define NVC56F_SEMAPHORED_RELEASE_WFI_EN 0x00000000 +#define NVC56F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001 +#define NVC56F_SEMAPHORED_RELEASE_SIZE 24:24 +#define NVC56F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000 +#define NVC56F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001 +#define NVC56F_SEMAPHORED_REDUCTION 30:27 +#define NVC56F_SEMAPHORED_REDUCTION_MIN 0x00000000 +#define NVC56F_SEMAPHORED_REDUCTION_MAX 0x00000001 +#define NVC56F_SEMAPHORED_REDUCTION_XOR 0x00000002 +#define NVC56F_SEMAPHORED_REDUCTION_AND 0x00000003 +#define NVC56F_SEMAPHORED_REDUCTION_OR 0x00000004 +#define NVC56F_SEMAPHORED_REDUCTION_ADD 0x00000005 +#define NVC56F_SEMAPHORED_REDUCTION_INC 0x00000006 +#define NVC56F_SEMAPHORED_REDUCTION_DEC 0x00000007 +#define NVC56F_SEMAPHORED_FORMAT 31:31 +#define NVC56F_SEMAPHORED_FORMAT_SIGNED 0x00000000 +#define NVC56F_SEMAPHORED_FORMAT_UNSIGNED 0x00000001 +#define NVC56F_NON_STALL_INTERRUPT (0x00000020) +#define NVC56F_NON_STALL_INTERRUPT_HANDLE 31:0 +#define NVC56F_FB_FLUSH (0x00000024) // Deprecated - use MEMBAR TYPE SYS_MEMBAR +#define NVC56F_FB_FLUSH_HANDLE 31:0 +// NOTE - MEM_OP_A and MEM_OP_B have been replaced in gp100 with methods for +// specifying the page address for a targeted TLB invalidate and the uTLB for +// a targeted REPLAY_CANCEL for UVM. +// The previous MEM_OP_A/B functionality is in MEM_OP_C/D, with slightly +// rearranged fields. +#define NVC56F_MEM_OP_A (0x00000028) +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_CLIENT_UNIT_ID 5:0 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_INVALIDATION_SIZE 5:0 // Used to specify size of invalidate, used for invalidates which are not of the REPLAY_CANCEL_TARGETED type +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_GPC_ID 10:6 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE 7:6 // only relevant for invalidates with NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE for invalidating link TLB only, or non-link TLB only or all TLBs +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_ALL_TLBS 0 +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_LINK_TLBS 1 +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_NON_LINK_TLBS 2 +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_RSVRVD 3 +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_CANCEL_MMU_ENGINE_ID 6:0 // only relevant for REPLAY_CANCEL_VA_GLOBAL +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR 11:11 +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_EN 0x00000001 +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_DIS 0x00000000 +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_TARGET_ADDR_LO 31:12 +#define NVC56F_MEM_OP_B (0x0000002c) +#define NVC56F_MEM_OP_B_TLB_INVALIDATE_TARGET_ADDR_HI 31:0 +#define NVC56F_MEM_OP_C (0x00000030) +#define NVC56F_MEM_OP_C_MEMBAR_TYPE 2:0 +#define NVC56F_MEM_OP_C_MEMBAR_TYPE_SYS_MEMBAR 0x00000000 +#define NVC56F_MEM_OP_C_MEMBAR_TYPE_MEMBAR 0x00000001 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB 0:0 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB_ONE 0x00000000 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB_ALL 0x00000001 // Probably nonsensical for MMU_TLB_INVALIDATE_TARGETED +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_GPC 1:1 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_GPC_ENABLE 0x00000000 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_GPC_DISABLE 0x00000001 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY 4:2 // only relevant if GPC ENABLE +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE 0x00000000 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START 0x00000001 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START_ACK_ALL 0x00000002 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_TARGETED 0x00000003 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_GLOBAL 0x00000004 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_VA_GLOBAL 0x00000005 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE 6:5 // only relevant if GPC ENABLE +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_NONE 0x00000000 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_GLOBALLY 0x00000001 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_INTRANODE 0x00000002 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE 9:7 //only relevant for REPLAY_CANCEL_VA_GLOBAL +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_READ 0 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE 1 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_STRONG 2 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_RSVRVD 3 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_WEAK 4 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_ALL 5 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE_AND_ATOMIC 6 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ALL 7 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL 9:7 // Invalidate affects this level and all below +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_ALL 0x00000000 // Invalidate tlb caches at all levels of the page table +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_PTE_ONLY 0x00000001 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE0 0x00000002 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE1 0x00000003 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE2 0x00000004 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3 0x00000005 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE4 0x00000006 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE5 0x00000007 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE 11:10 // only relevant if PDB_ONE +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_VID_MEM 0x00000000 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB_ADDR_LO 31:12 // only relevant if PDB_ONE +#define NVC56F_MEM_OP_C_ACCESS_COUNTER_CLR_TARGETED_NOTIFY_TAG 19:0 +// MEM_OP_D MUST be preceded by MEM_OPs A-C. +#define NVC56F_MEM_OP_D (0x00000034) +#define NVC56F_MEM_OP_D_TLB_INVALIDATE_PDB_ADDR_HI 26:0 // only relevant if PDB_ONE +#define NVC56F_MEM_OP_D_OPERATION 31:27 +#define NVC56F_MEM_OP_D_OPERATION_MEMBAR 0x00000005 +#define NVC56F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE 0x00000009 +#define NVC56F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE_TARGETED 0x0000000a +#define NVC56F_MEM_OP_D_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d +#define NVC56F_MEM_OP_D_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e +// CLEAN_LINES is an alias for Tegra/GPU IP usage +#define NVC56F_MEM_OP_B_OPERATION_L2_INVALIDATE_CLEAN_LINES 0x0000000e +#define NVC56F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f +#define NVC56F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY 0x00000010 +#define NVC56F_MEM_OP_D_OPERATION_L2_WAIT_FOR_SYS_PENDING_READS 0x00000015 +#define NVC56F_MEM_OP_D_OPERATION_ACCESS_COUNTER_CLR 0x00000016 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE 1:0 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MIMC 0x00000000 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MOMC 0x00000001 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_ALL 0x00000002 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_TARGETED 0x00000003 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE 2:2 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MIMC 0x00000000 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MOMC 0x00000001 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_BANK 6:3 +#define NVC56F_SET_REFERENCE (0x00000050) +#define NVC56F_SET_REFERENCE_COUNT 31:0 +#define NVC56F_SEM_ADDR_LO (0x0000005c) +#define NVC56F_SEM_ADDR_LO_OFFSET 31:2 +#define NVC56F_SEM_ADDR_HI (0x00000060) +#define NVC56F_SEM_ADDR_HI_OFFSET 7:0 +#define NVC56F_SEM_PAYLOAD_LO (0x00000064) +#define NVC56F_SEM_PAYLOAD_LO_PAYLOAD 31:0 +#define NVC56F_SEM_PAYLOAD_HI (0x00000068) +#define NVC56F_SEM_PAYLOAD_HI_PAYLOAD 31:0 +#define NVC56F_SEM_EXECUTE (0x0000006c) +#define NVC56F_SEM_EXECUTE_OPERATION 2:0 +#define NVC56F_SEM_EXECUTE_OPERATION_ACQUIRE 0x00000000 +#define NVC56F_SEM_EXECUTE_OPERATION_RELEASE 0x00000001 +#define NVC56F_SEM_EXECUTE_OPERATION_ACQ_STRICT_GEQ 0x00000002 +#define NVC56F_SEM_EXECUTE_OPERATION_ACQ_CIRC_GEQ 0x00000003 +#define NVC56F_SEM_EXECUTE_OPERATION_ACQ_AND 0x00000004 +#define NVC56F_SEM_EXECUTE_OPERATION_ACQ_NOR 0x00000005 +#define NVC56F_SEM_EXECUTE_OPERATION_REDUCTION 0x00000006 +#define NVC56F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG 12:12 +#define NVC56F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_DIS 0x00000000 +#define NVC56F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_EN 0x00000001 +#define NVC56F_SEM_EXECUTE_RELEASE_WFI 20:20 +#define NVC56F_SEM_EXECUTE_RELEASE_WFI_DIS 0x00000000 +#define NVC56F_SEM_EXECUTE_RELEASE_WFI_EN 0x00000001 +#define NVC56F_SEM_EXECUTE_PAYLOAD_SIZE 24:24 +#define NVC56F_SEM_EXECUTE_PAYLOAD_SIZE_32BIT 0x00000000 +#define NVC56F_SEM_EXECUTE_PAYLOAD_SIZE_64BIT 0x00000001 +#define NVC56F_SEM_EXECUTE_RELEASE_TIMESTAMP 25:25 +#define NVC56F_SEM_EXECUTE_RELEASE_TIMESTAMP_DIS 0x00000000 +#define NVC56F_SEM_EXECUTE_RELEASE_TIMESTAMP_EN 0x00000001 +#define NVC56F_SEM_EXECUTE_REDUCTION 30:27 +#define NVC56F_SEM_EXECUTE_REDUCTION_IMIN 0x00000000 +#define NVC56F_SEM_EXECUTE_REDUCTION_IMAX 0x00000001 +#define NVC56F_SEM_EXECUTE_REDUCTION_IXOR 0x00000002 +#define NVC56F_SEM_EXECUTE_REDUCTION_IAND 0x00000003 +#define NVC56F_SEM_EXECUTE_REDUCTION_IOR 0x00000004 +#define NVC56F_SEM_EXECUTE_REDUCTION_IADD 0x00000005 +#define NVC56F_SEM_EXECUTE_REDUCTION_INC 0x00000006 +#define NVC56F_SEM_EXECUTE_REDUCTION_DEC 0x00000007 +#define NVC56F_SEM_EXECUTE_REDUCTION_FORMAT 31:31 +#define NVC56F_SEM_EXECUTE_REDUCTION_FORMAT_SIGNED 0x00000000 +#define NVC56F_SEM_EXECUTE_REDUCTION_FORMAT_UNSIGNED 0x00000001 +#define NVC56F_WFI (0x00000078) +#define NVC56F_WFI_SCOPE 0:0 +#define NVC56F_WFI_SCOPE_CURRENT_SCG_TYPE 0x00000000 +#define NVC56F_WFI_SCOPE_CURRENT_VEID 0x00000000 +#define NVC56F_WFI_SCOPE_ALL 0x00000001 +#define NVC56F_YIELD (0x00000080) +#define NVC56F_YIELD_OP 1:0 +#define NVC56F_YIELD_OP_NOP 0x00000000 +#define NVC56F_YIELD_OP_TSG 0x00000003 +#define NVC56F_CLEAR_FAULTED (0x00000084) +// Note: RM provides the HANDLE as an opaque value; the internal detail fields +// are intentionally not exposed to the driver through these defines. +#define NVC56F_CLEAR_FAULTED_HANDLE 30:0 +#define NVC56F_CLEAR_FAULTED_TYPE 31:31 +#define NVC56F_CLEAR_FAULTED_TYPE_PBDMA_FAULTED 0x00000000 +#define NVC56F_CLEAR_FAULTED_TYPE_ENG_FAULTED 0x00000001 + + +/* GPFIFO entry format */ +#define NVC56F_GP_ENTRY__SIZE 8 +#define NVC56F_GP_ENTRY0_FETCH 0:0 +#define NVC56F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000 +#define NVC56F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001 +#define NVC56F_GP_ENTRY0_GET 31:2 +#define NVC56F_GP_ENTRY0_OPERAND 31:0 +#define NVC56F_GP_ENTRY1_GET_HI 7:0 +#define NVC56F_GP_ENTRY1_LEVEL 9:9 +#define NVC56F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NVC56F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NVC56F_GP_ENTRY1_LENGTH 30:10 +#define NVC56F_GP_ENTRY1_SYNC 31:31 +#define NVC56F_GP_ENTRY1_SYNC_PROCEED 0x00000000 +#define NVC56F_GP_ENTRY1_SYNC_WAIT 0x00000001 +#define NVC56F_GP_ENTRY1_OPCODE 7:0 +#define NVC56F_GP_ENTRY1_OPCODE_NOP 0x00000000 +#define NVC56F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001 +#define NVC56F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002 +#define NVC56F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003 + +/* dma method formats */ +#define NVC56F_DMA_METHOD_ADDRESS_OLD 12:2 +#define NVC56F_DMA_METHOD_ADDRESS 11:0 +#define NVC56F_DMA_SUBDEVICE_MASK 15:4 +#define NVC56F_DMA_METHOD_SUBCHANNEL 15:13 +#define NVC56F_DMA_TERT_OP 17:16 +#define NVC56F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000) +#define NVC56F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001) +#define NVC56F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002) +#define NVC56F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003) +#define NVC56F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000) +#define NVC56F_DMA_METHOD_COUNT_OLD 28:18 +#define NVC56F_DMA_METHOD_COUNT 28:16 +#define NVC56F_DMA_IMMD_DATA 28:16 +#define NVC56F_DMA_SEC_OP 31:29 +#define NVC56F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000) +#define NVC56F_DMA_SEC_OP_INC_METHOD (0x00000001) +#define NVC56F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002) +#define NVC56F_DMA_SEC_OP_NON_INC_METHOD (0x00000003) +#define NVC56F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004) +#define NVC56F_DMA_SEC_OP_ONE_INC (0x00000005) +#define NVC56F_DMA_SEC_OP_RESERVED6 (0x00000006) +#define NVC56F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007) +/* dma incrementing method format */ +#define NVC56F_DMA_INCR_ADDRESS 11:0 +#define NVC56F_DMA_INCR_SUBCHANNEL 15:13 +#define NVC56F_DMA_INCR_COUNT 28:16 +#define NVC56F_DMA_INCR_OPCODE 31:29 +#define NVC56F_DMA_INCR_OPCODE_VALUE (0x00000001) +#define NVC56F_DMA_INCR_DATA 31:0 +/* dma non-incrementing method format */ +#define NVC56F_DMA_NONINCR_ADDRESS 11:0 +#define NVC56F_DMA_NONINCR_SUBCHANNEL 15:13 +#define NVC56F_DMA_NONINCR_COUNT 28:16 +#define NVC56F_DMA_NONINCR_OPCODE 31:29 +#define NVC56F_DMA_NONINCR_OPCODE_VALUE (0x00000003) +#define NVC56F_DMA_NONINCR_DATA 31:0 +/* dma increment-once method format */ +#define NVC56F_DMA_ONEINCR_ADDRESS 11:0 +#define NVC56F_DMA_ONEINCR_SUBCHANNEL 15:13 +#define NVC56F_DMA_ONEINCR_COUNT 28:16 +#define NVC56F_DMA_ONEINCR_OPCODE 31:29 +#define NVC56F_DMA_ONEINCR_OPCODE_VALUE (0x00000005) +#define NVC56F_DMA_ONEINCR_DATA 31:0 +/* dma no-operation format */ +#define NVC56F_DMA_NOP (0x00000000) +/* dma immediate-data format */ +#define NVC56F_DMA_IMMD_ADDRESS 11:0 +#define NVC56F_DMA_IMMD_SUBCHANNEL 15:13 +#define NVC56F_DMA_IMMD_DATA 28:16 +#define NVC56F_DMA_IMMD_OPCODE 31:29 +#define NVC56F_DMA_IMMD_OPCODE_VALUE (0x00000004) +/* dma set sub-device mask format */ +#define NVC56F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4 +#define NVC56F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC56F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001) +/* dma store sub-device mask format */ +#define NVC56F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4 +#define NVC56F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC56F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002) +/* dma use sub-device mask format */ +#define NVC56F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC56F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003) +/* dma end-segment format */ +#define NVC56F_DMA_ENDSEG_OPCODE 31:29 +#define NVC56F_DMA_ENDSEG_OPCODE_VALUE (0x00000007) +/* dma legacy incrementing/non-incrementing formats */ +#define NVC56F_DMA_ADDRESS 12:2 +#define NVC56F_DMA_SUBCH 15:13 +#define NVC56F_DMA_OPCODE3 17:16 +#define NVC56F_DMA_OPCODE3_NONE (0x00000000) +#define NVC56F_DMA_COUNT 28:18 +#define NVC56F_DMA_OPCODE 31:29 +#define NVC56F_DMA_OPCODE_METHOD (0x00000000) +#define NVC56F_DMA_OPCODE_NONINC_METHOD (0x00000002) +#define NVC56F_DMA_DATA 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc56f_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc570.h b/src/common/sdk/nvidia/inc/class/clc570.h new file mode 100644 index 0000000..461736b --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc570.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/clc570.finn +// + +#define NVC570_DISPLAY (0xc570U) /* finn: Evaluated from "NVC570_ALLOCATION_PARAMETERS_MESSAGE_ID" */ + +#define NVC570_ALLOCATION_PARAMETERS_MESSAGE_ID (0xc570U) + +typedef struct NVC570_ALLOCATION_PARAMETERS { + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NVC570_ALLOCATION_PARAMETERS; + diff --git a/src/common/sdk/nvidia/inc/class/clc573.h b/src/common/sdk/nvidia/inc/class/clc573.h new file mode 100644 index 0000000..5cd57a8 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc573.h @@ -0,0 +1,606 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc573_h_ +#define _clc573_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC573_DISP_CAPABILITIES 0xC573 + +typedef volatile struct _clc573_tag0 { + NvU32 dispCapabilities[0x400]; +} _NvC573DispCapabilities,NvC573DispCapabilities_Map ; + + +#define NVC573_SYS_CAP 0x0 /* RW-4R */ +#define NVC573_SYS_CAP_HEAD0_EXISTS 0:0 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD0_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD1_EXISTS 1:1 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD1_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD2_EXISTS 2:2 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD2_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD3_EXISTS 3:3 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD3_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD4_EXISTS 4:4 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD4_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD5_EXISTS 5:5 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD5_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD6_EXISTS 6:6 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD6_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD7_EXISTS 7:7 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD7_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD_EXISTS(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVC573_SYS_CAP_HEAD_EXISTS__SIZE_1 8 /* */ +#define NVC573_SYS_CAP_HEAD_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR0_EXISTS 8:8 /* RWIVF */ +#define NVC573_SYS_CAP_SOR0_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR1_EXISTS 9:9 /* RWIVF */ +#define NVC573_SYS_CAP_SOR1_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR2_EXISTS 10:10 /* RWIVF */ +#define NVC573_SYS_CAP_SOR2_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR3_EXISTS 11:11 /* RWIVF */ +#define NVC573_SYS_CAP_SOR3_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR4_EXISTS 12:12 /* RWIVF */ +#define NVC573_SYS_CAP_SOR4_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR5_EXISTS 13:13 /* RWIVF */ +#define NVC573_SYS_CAP_SOR5_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR6_EXISTS 14:14 /* RWIVF */ +#define NVC573_SYS_CAP_SOR6_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR7_EXISTS 15:15 /* RWIVF */ +#define NVC573_SYS_CAP_SOR7_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR_EXISTS(i) (8+(i)):(8+(i)) /* RWIVF */ +#define NVC573_SYS_CAP_SOR_EXISTS__SIZE_1 8 /* */ +#define NVC573_SYS_CAP_SOR_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB 0x4 /* RW-4R */ +#define NVC573_SYS_CAPB_WINDOW0_EXISTS 0:0 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW0_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW1_EXISTS 1:1 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW1_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW2_EXISTS 2:2 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW2_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW3_EXISTS 3:3 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW3_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW4_EXISTS 4:4 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW4_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW5_EXISTS 5:5 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW5_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW6_EXISTS 6:6 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW6_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW7_EXISTS 7:7 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW7_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW8_EXISTS 8:8 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW8_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW8_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW8_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW9_EXISTS 9:9 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW9_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW9_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW9_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW10_EXISTS 10:10 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW10_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW10_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW10_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW11_EXISTS 11:11 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW11_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW11_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW11_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW12_EXISTS 12:12 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW12_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW12_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW12_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW13_EXISTS 13:13 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW13_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW13_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW13_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW14_EXISTS 14:14 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW14_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW14_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW14_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW15_EXISTS 15:15 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW15_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW15_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW15_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW16_EXISTS 16:16 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW16_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW16_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW16_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW17_EXISTS 17:17 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW17_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW17_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW17_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW18_EXISTS 18:18 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW18_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW18_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW18_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW19_EXISTS 19:19 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW19_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW19_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW19_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW20_EXISTS 20:20 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW20_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW20_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW20_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW21_EXISTS 21:21 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW21_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW21_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW21_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW22_EXISTS 22:22 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW22_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW22_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW22_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW23_EXISTS 23:23 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW23_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW23_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW23_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW24_EXISTS 24:24 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW24_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW24_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW24_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW25_EXISTS 25:25 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW25_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW25_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW25_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW26_EXISTS 26:26 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW26_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW26_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW26_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW27_EXISTS 27:27 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW27_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW27_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW27_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW28_EXISTS 28:28 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW28_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW28_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW28_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW29_EXISTS 29:29 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW29_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW29_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW29_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW30_EXISTS 30:30 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW30_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW30_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW30_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW31_EXISTS 31:31 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW31_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW31_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW31_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW_EXISTS(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW_EXISTS__SIZE_1 32 /* */ +#define NVC573_SYS_CAPB_WINDOW_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA 0x10 /* RW-4R */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRIES 15:0 /* RWIUF */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRIES_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH 17:16 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_32B 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_64B 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_128B 0x00000002 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_256B 0x00000003 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_PLANAR 19:19 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_PLANAR_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_PLANAR_FALSE 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_PLANAR_TRUE 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_VGA 20:20 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_VGA_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_VGA_FALSE 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_VGA_TRUE 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION 21:21 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION_FALSE 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION_TRUE 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MSCG 22:22 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MSCG_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MSCG_FALSE 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MSCG_TRUE 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH 23:23 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH_FALSE 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH_TRUE 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT 26:26 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION 31:30 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_32B 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_64B 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_128B 0x00000002 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_256B 0x00000003 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA(i) (0x680+(i)*32) /* RW-4A */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA__SIZE_1 8 /* */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_FULL_WIDTH 4:0 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_FULL_WIDTH_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_UNIT_WIDTH 9:5 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_UNIT_WIDTH_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT 16:16 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT 17:17 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT 18:18 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OLPF_PRESENT 19:19 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OLPF_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OLPF_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OLPF_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT 20:20 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT 21:21 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT 22:22 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB(i) (0x684+(i)*32) /* RW-4A */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB__SIZE_1 8 /* */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_VGA 0:0 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_VGA_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_VGA_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_VGA_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_LOGSZ 9:6 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_LOGSZ_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_LOGNR 12:10 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_LOGNR_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT 15:15 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC(i) (0x688+(i)*32) /* RW-4A */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC__SIZE_1 8 /* */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC0_PRECISION 4:0 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC0_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP_FALSE_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC1_PRECISION 12:8 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC1_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP 13:13 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP_FALSE_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_SF_PRECISION 20:16 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_SF_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_CI_PRECISION 24:21 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_CI_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB 25:25 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB_FALSE_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR 28:28 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR 30:30 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPD(i) (0x68c+(i)*32) /* RW-4A */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPD__SIZE_1 8 /* */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPD_VSCLR_MAX_PIXELS_2TAP 15:0 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPD_VSCLR_MAX_PIXELS_2TAP_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPD_VSCLR_MAX_PIXELS_5TAP 31:16 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPD_VSCLR_MAX_PIXELS_5TAP_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE(i) (0x690+(i)*32) /* RW-4A */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE__SIZE_1 8 /* */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_RATEBUFSIZE 3:0 /* RWIUF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_RATEBUFSIZE_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_LINEBUFSIZE 13:8 /* RWIUF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_LINEBUFSIZE_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422 16:16 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420 17:17 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP(i) (0x144+(i)*8) /* RW-4A */ +#define NVC573_SOR_CAP__SIZE_1 8 /* */ +#define NVC573_SOR_CAP_SINGLE_LVDS_18 0:0 /* RWIVF */ +#define NVC573_SOR_CAP_SINGLE_LVDS_18_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_SINGLE_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_SINGLE_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_SINGLE_LVDS_24 1:1 /* RWIVF */ +#define NVC573_SOR_CAP_SINGLE_LVDS_24_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_SINGLE_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_SINGLE_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DUAL_LVDS_18 2:2 /* RWIVF */ +#define NVC573_SOR_CAP_DUAL_LVDS_18_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DUAL_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DUAL_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DUAL_LVDS_24 3:3 /* RWIVF */ +#define NVC573_SOR_CAP_DUAL_LVDS_24_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DUAL_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DUAL_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_SINGLE_TMDS_A 8:8 /* RWIVF */ +#define NVC573_SOR_CAP_SINGLE_TMDS_A_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_SINGLE_TMDS_A_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_SINGLE_TMDS_A_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_SINGLE_TMDS_B 9:9 /* RWIVF */ +#define NVC573_SOR_CAP_SINGLE_TMDS_B_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_SINGLE_TMDS_B_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_SINGLE_TMDS_B_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DUAL_TMDS 11:11 /* RWIVF */ +#define NVC573_SOR_CAP_DUAL_TMDS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DUAL_TMDS_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DUAL_TMDS_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DISPLAY_OVER_PCIE 13:13 /* RWIVF */ +#define NVC573_SOR_CAP_DISPLAY_OVER_PCIE_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DISPLAY_OVER_PCIE_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DISPLAY_OVER_PCIE_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_SDI 16:16 /* RWIVF */ +#define NVC573_SOR_CAP_SDI_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_SDI_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_SDI_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DP_A 24:24 /* RWIVF */ +#define NVC573_SOR_CAP_DP_A_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DP_A_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DP_A_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DP_B 25:25 /* RWIVF */ +#define NVC573_SOR_CAP_DP_B_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DP_B_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DP_B_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DP_INTERLACE 26:26 /* RWIVF */ +#define NVC573_SOR_CAP_DP_INTERLACE_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DP_INTERLACE_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DP_INTERLACE_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DP_8_LANES 27:27 /* RWIVF */ +#define NVC573_SOR_CAP_DP_8_LANES_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DP_8_LANES_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DP_8_LANES_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA(i) (0x780+(i)*32) /* RW-4A */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA__SIZE_1 32 /* */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_FULL_WIDTH 4:0 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_FULL_WIDTH_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_UNIT_WIDTH 9:5 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_UNIT_WIDTH_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_ALPHA_WIDTH 13:10 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_ALPHA_WIDTH_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT 16:16 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT 17:17 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT 18:18 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT 19:19 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT 20:20 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT 21:21 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT 22:22 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT 23:23 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT 24:24 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB(i) (0x784+(i)*32) /* RW-4A */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB__SIZE_1 32 /* */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_FMT_PRECISION 4:0 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_FMT_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGSZ 9:6 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGSZ_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGNR 12:10 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGNR_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT 15:15 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC(i) (0x788+(i)*32) /* RW-4A */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC__SIZE_1 32 /* */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_PRECISION 4:0 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_LOGSZ 9:6 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_LOGSZ_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_LOGNR 12:10 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_LOGNR_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT 15:15 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_PRECISION 20:16 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP 21:21 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD(i) (0x78c+(i)*32) /* RW-4A */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD__SIZE_1 32 /* */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGSZ 3:0 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGSZ_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGNR 6:4 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGNR_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD 8:8 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT 9:9 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_SF_PRECISION 16:12 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_SF_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_CI_PRECISION 20:17 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_CI_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB 21:21 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA 22:22 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR 28:28 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR 30:30 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE(i) (0x790+(i)*32) /* RW-4A */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE__SIZE_1 32 /* */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_PRECISION 4:0 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_LOGSZ 9:6 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_LOGSZ_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_LOGNR 12:10 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_LOGNR_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT 15:15 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_PRECISION 20:16 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP 21:21 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPF(i) (0x794+(i)*32) /* RW-4A */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPF__SIZE_1 32 /* */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_2TAP 15:0 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_2TAP_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_5TAP 31:16 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_5TAP_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CLK_CAP(i) (0x608+(i)*4) /* RW-4A */ +#define NVC573_SOR_CLK_CAP__SIZE_1 8 /* */ +#define NVC573_SOR_CLK_CAP_DP_MAX 7:0 /* RWIUF */ +#define NVC573_SOR_CLK_CAP_DP_MAX_INIT 0x00000051 /* RWI-V */ +#define NVC573_SOR_CLK_CAP_TMDS_MAX 23:16 /* RWIUF */ +#define NVC573_SOR_CLK_CAP_TMDS_MAX_INIT 0x0000003C /* RWI-V */ +#define NVC573_SOR_CLK_CAP_LVDS_MAX 31:24 /* RWIUF */ +#define NVC573_SOR_CLK_CAP_LVDS_MAX_INIT 0x00000000 /* RWI-V */ + +#ifdef __cplusplus +}; +#endif /* extern C */ +#endif //_clc573_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc574.h b/src/common/sdk/nvidia/inc/class/clc574.h new file mode 100644 index 0000000..077a57f --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc574.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/clc574.finn +// + +#define UVM_CHANNEL_RETAINER (0xc574U) /* finn: Evaluated from "NV_UVM_CHANNEL_RETAINER_ALLOC_PARAMS_MESSAGE_ID" */ + +#define NV_UVM_CHANNEL_RETAINER_ALLOC_PARAMS_MESSAGE_ID (0xc574U) + +typedef struct NV_UVM_CHANNEL_RETAINER_ALLOC_PARAMS { + NvHandle hClient; + NvHandle hChannel; +} NV_UVM_CHANNEL_RETAINER_ALLOC_PARAMS; + diff --git a/src/common/sdk/nvidia/inc/class/clc57a.h b/src/common/sdk/nvidia/inc/class/clc57a.h new file mode 100644 index 0000000..2dcc60b --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc57a.h @@ -0,0 +1,213 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc57a__h_ +#define _clc57a__h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC57A_CURSOR_IMM_CHANNEL_PIO (0x0000C57A) + +typedef volatile struct _clc57a_tag0 { + NvV32 Reserved00[0x2]; + NvV32 Free; // 0x00000008 - 0x0000000B + NvV32 Reserved01[0x7D]; + NvV32 Update; // 0x00000200 - 0x00000203 + NvV32 SetInterlockFlags; // 0x00000204 - 0x00000207 + NvV32 SetCursorHotSpotPointOut[2]; // 0x00000208 - 0x0000020F + NvV32 SetWindowInterlockFlags; // 0x00000210 - 0x00000213 + NvV32 Reserved02[0x37B]; +} NVC57ADispCursorImmControlPio; + +#define NVC57A_FREE (0x00000008) +#define NVC57A_FREE_COUNT 5:0 +#define NVC57A_UPDATE (0x00000200) +#define NVC57A_UPDATE_RELEASE_ELV 0:0 +#define NVC57A_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC57A_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC57A_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC57A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC57A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC57A_SET_INTERLOCK_FLAGS (0x00000204) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC57A_SET_CURSOR_HOT_SPOT_POINT_OUT(b) (0x00000208 + (b)*0x00000004) +#define NVC57A_SET_CURSOR_HOT_SPOT_POINT_OUT_X 15:0 +#define NVC57A_SET_CURSOR_HOT_SPOT_POINT_OUT_Y 31:16 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS (0x00000210) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc57a_h + diff --git a/src/common/sdk/nvidia/inc/class/clc57b.h b/src/common/sdk/nvidia/inc/class/clc57b.h new file mode 100644 index 0000000..f44d9a2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc57b.h @@ -0,0 +1,67 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clC57b_h_ +#define _clC57b_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC57B_WINDOW_IMM_CHANNEL_DMA (0x0000C57B) + +// dma opcode instructions +#define NVC57B_DMA +#define NVC57B_DMA_OPCODE 31:29 +#define NVC57B_DMA_OPCODE_METHOD 0x00000000 +#define NVC57B_DMA_OPCODE_JUMP 0x00000001 +#define NVC57B_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC57B_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC57B_DMA_METHOD_COUNT 27:18 +#define NVC57B_DMA_METHOD_OFFSET 13:2 +#define NVC57B_DMA_DATA 31:0 +#define NVC57B_DMA_DATA_NOP 0x00000000 +#define NVC57B_DMA_JUMP_OFFSET 11:2 +#define NVC57B_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NVC57B_PUT (0x00000000) +#define NVC57B_PUT_PTR 9:0 +#define NVC57B_GET (0x00000004) +#define NVC57B_GET_PTR 9:0 +#define NVC57B_UPDATE (0x00000200) +#define NVC57B_UPDATE_RELEASE_ELV 0:0 +#define NVC57B_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC57B_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC57B_UPDATE_INTERLOCK_WITH_WINDOW 1:1 +#define NVC57B_UPDATE_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC57B_UPDATE_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC57B_SET_POINT_OUT(b) (0x00000208 + (b)*0x00000004) +#define NVC57B_SET_POINT_OUT_X 15:0 +#define NVC57B_SET_POINT_OUT_Y 31:16 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC57b_h diff --git a/src/common/sdk/nvidia/inc/class/clc57d.h b/src/common/sdk/nvidia/inc/class/clc57d.h new file mode 100644 index 0000000..4f415d0 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc57d.h @@ -0,0 +1,1277 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clC57d_h_ +#define _clC57d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC57D_CORE_CHANNEL_DMA (0x0000C57D) + +#define NV_DISP_NOTIFIER 0x00000000 +#define NV_DISP_NOTIFIER_SIZEOF 0x00000010 +#define NV_DISP_NOTIFIER__0 0x00000000 +#define NV_DISP_NOTIFIER__0_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFIER__0_FIELD 8:8 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE 9:9 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_NON_TEARING 0x00000000 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_IMMEDIATE 0x00000001 +#define NV_DISP_NOTIFIER__0_R1 15:10 +#define NV_DISP_NOTIFIER__0_R2 23:16 +#define NV_DISP_NOTIFIER__0_R3 29:24 +#define NV_DISP_NOTIFIER__0_STATUS 31:30 +#define NV_DISP_NOTIFIER__0_STATUS_NOT_BEGUN 0x00000000 +#define NV_DISP_NOTIFIER__0_STATUS_BEGUN 0x00000001 +#define NV_DISP_NOTIFIER__0_STATUS_FINISHED 0x00000002 +#define NV_DISP_NOTIFIER__1 0x00000001 +#define NV_DISP_NOTIFIER__1_R4 31:0 +#define NV_DISP_NOTIFIER__2 0x00000002 +#define NV_DISP_NOTIFIER__2_TIMESTAMP_LO 31:0 +#define NV_DISP_NOTIFIER__3 0x00000003 +#define NV_DISP_NOTIFIER__3_TIMESTAMP_HI 31:0 + + +// dma opcode instructions +#define NVC57D_DMA +#define NVC57D_DMA_OPCODE 31:29 +#define NVC57D_DMA_OPCODE_METHOD 0x00000000 +#define NVC57D_DMA_OPCODE_JUMP 0x00000001 +#define NVC57D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC57D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC57D_DMA_METHOD_COUNT 27:18 +#define NVC57D_DMA_METHOD_OFFSET 13:2 +#define NVC57D_DMA_DATA 31:0 +#define NVC57D_DMA_DATA_NOP 0x00000000 +#define NVC57D_DMA_JUMP_OFFSET 11:2 +#define NVC57D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// if cap SUPPORT_FLEXIBLE_WIN_MAPPING is FALSE, this define can be used to obtain which head a window is mapped to +#define NVC37D_WINDOW_MAPPED_TO_HEAD(w) ((w)>>1) +#define NVC37D_GET_VALID_WINDOWMASK_FOR_HEAD(h) ((1<<((h)*2)) | (1<<((h)*2+1))) + +// class methods +#define NVC57D_PUT (0x00000000) +#define NVC57D_PUT_PTR 9:0 +#define NVC57D_GET (0x00000004) +#define NVC57D_GET_PTR 9:0 +#define NVC57D_UPDATE (0x00000200) +#define NVC57D_UPDATE_SPECIAL_HANDLING 21:20 +#define NVC57D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NVC57D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NVC57D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NVC57D_UPDATE_SPECIAL_HANDLING_REASON 19:12 +#define NVC57D_UPDATE_INHIBIT_INTERRUPTS 24:24 +#define NVC57D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NVC57D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NVC57D_UPDATE_RELEASE_ELV 0:0 +#define NVC57D_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC57D_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC57D_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC57D_SET_CONTEXT_DMA_NOTIFIER (0x00000208) +#define NVC57D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NVC57D_SET_NOTIFIER_CONTROL (0x0000020C) +#define NVC57D_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVC57D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVC57D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC57D_SET_NOTIFIER_CONTROL_OFFSET 11:4 +#define NVC57D_SET_NOTIFIER_CONTROL_NOTIFY 12:12 +#define NVC57D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NVC57D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NVC57D_SET_CONTROL (0x00000210) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN(i) ((i)+0):((i)+0) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN__SIZE_1 4 +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN_DISABLE (0x00000000) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN_ENABLE (0x00000001) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN0 0:0 +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN0_DISABLE (0x00000000) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN0_ENABLE (0x00000001) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN1 1:1 +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN1_DISABLE (0x00000000) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN1_ENABLE (0x00000001) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN2 2:2 +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN2_DISABLE (0x00000000) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN2_ENABLE (0x00000001) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN3 3:3 +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN3_DISABLE (0x00000000) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN3_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS (0x00000218) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+0):((i)+0) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS (0x0000021C) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) +#define NVC57D_GET_RG_SCAN_LINE(b) (0x00000220 + (b)*0x00000004) +#define NVC57D_GET_RG_SCAN_LINE_LINE 15:0 +#define NVC57D_GET_RG_SCAN_LINE_VBLANK 16:16 +#define NVC57D_GET_RG_SCAN_LINE_VBLANK_FALSE (0x00000000) +#define NVC57D_GET_RG_SCAN_LINE_VBLANK_TRUE (0x00000001) +#define NVC57D_SET_GET_BLANKING_CTRL(b) (0x00000240 + (b)*0x00000004) +#define NVC57D_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NVC57D_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NVC57D_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NVC57D_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NVC57D_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NVC57D_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) + +#define NVC57D_SOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK 7:0 +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_DSI (0x0000000A) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NVC57D_SOR_SET_CONTROL_DE_SYNC_POLARITY 16:16 +#define NVC57D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC57D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC57D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NVC57D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NVC57D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NVC57D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NVC57D_SOR_SET_CUSTOM_REASON(a) (0x00000304 + (a)*0x00000020) +#define NVC57D_SOR_SET_CUSTOM_REASON_CODE 31:0 +#define NVC57D_SOR_SET_SW_SPARE_A(a) (0x00000308 + (a)*0x00000020) +#define NVC57D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NVC57D_SOR_SET_SW_SPARE_B(a) (0x0000030C + (a)*0x00000020) +#define NVC57D_SOR_SET_SW_SPARE_B_CODE 31:0 + +#define NVC57D_WINDOW_SET_CONTROL(a) (0x00001000 + (a)*0x00000080) +#define NVC57D_WINDOW_SET_CONTROL_OWNER 3:0 +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD(i) (0x00000000 +(i)) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD__SIZE_1 8 +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD0 (0x00000000) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD1 (0x00000001) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD2 (0x00000002) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD3 (0x00000003) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD4 (0x00000004) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD5 (0x00000005) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD6 (0x00000006) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD7 (0x00000007) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_NONE (0x0000000F) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(a) (0x00001004 + (a)*0x00000080) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(a) (0x00001008 + (a)*0x00000080) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR(a) (0x0000100C + (a)*0x00000080) +#define NVC57D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC57D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS(a) (0x00001010 + (a)*0x00000080) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_MAX_PIXELS_FETCHED_PER_LINE 14:0 +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED 16:16 +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED 28:28 +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS 22:20 +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED 24:24 +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) + +#define NVC57D_HEAD_SET_PROCAMP(a) (0x00002000 + (a)*0x00000400) +#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003) +#define NVC57D_HEAD_SET_PROCAMP_CHROMA_LPF 3:3 +#define NVC57D_HEAD_SET_PROCAMP_CHROMA_LPF_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_PROCAMP_CHROMA_LPF_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 28:28 +#define NVC57D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NVC57D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00002004 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 2:2 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 3:3 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 7:4 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000004) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000005) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000006) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000007) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000008) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 24:24 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 23:12 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN 31:26 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN0 (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN1 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN2 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN3 (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN4 (0x00000004) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN5 (0x00000005) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN6 (0x00000006) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN7 (0x00000007) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN8 (0x00000008) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN9 (0x00000009) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN10 (0x0000000A) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN11 (0x0000000B) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN12 (0x0000000C) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN13 (0x0000000D) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN14 (0x0000000E) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN15 (0x0000000F) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN16 (0x00000010) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN17 (0x00000011) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN18 (0x00000012) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN19 (0x00000013) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN20 (0x00000014) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN21 (0x00000015) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN22 (0x00000016) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN23 (0x00000017) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN24 (0x00000018) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN25 (0x00000019) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN26 (0x0000001A) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN27 (0x0000001B) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN28 (0x0000001C) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN29 (0x0000001D) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN30 (0x0000001E) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN31 (0x0000001F) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_NONE (0x0000003F) +#define NVC57D_HEAD_SET_CONTROL(a) (0x00002008 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CONTROL_STRUCTURE 1:0 +#define NVC57D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE 2:2 +#define NVC57D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_NORMAL (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 11:10 +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 8:4 +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 15:12 +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 23:22 +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 20:16 +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN 28:24 +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000004) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000005) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000006) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000007) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000008) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000009) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000B) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000C) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000D) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000E) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000F) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x00000010) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NVC57D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NVC57D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x0000200C + (a)*0x00000400) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC57D_HEAD_SET_PIXEL_REORDER_CONTROL(a) (0x00002010 + (a)*0x00000400) +#define NVC57D_HEAD_SET_PIXEL_REORDER_CONTROL_BANK_WIDTH 13:0 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00002014 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVC57D_HEAD_SET_DITHER_CONTROL(a) (0x00002018 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NVC57D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_DITHER_CONTROL_BITS 5:4 +#define NVC57D_HEAD_SET_DITHER_CONTROL_BITS_TO_6_BITS (0x00000000) +#define NVC57D_HEAD_SET_DITHER_CONTROL_BITS_TO_8_BITS (0x00000001) +#define NVC57D_HEAD_SET_DITHER_CONTROL_BITS_TO_10_BITS (0x00000002) +#define NVC57D_HEAD_SET_DITHER_CONTROL_BITS_TO_12_BITS (0x00000003) +#define NVC57D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE 2:2 +#define NVC57D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_DITHER_CONTROL_MODE 10:8 +#define NVC57D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NVC57D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NVC57D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NVC57D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NVC57D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NVC57D_HEAD_SET_DITHER_CONTROL_MODE_ROUND (0x00000005) +#define NVC57D_HEAD_SET_DITHER_CONTROL_PHASE 13:12 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x0000201C + (a)*0x00000400) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 0:0 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING 4:4 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 9:8 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NVC57D_HEAD_SET_DISPLAY_ID(a,b) (0x00002020 + (a)*0x00000400 + (b)*0x00000004) +#define NVC57D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00002028 + (a)*0x00000400) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC57D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR(a) (0x0000202C + (a)*0x00000400) +#define NVC57D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC57D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS(a) (0x00002030 + (a)*0x00000400) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR 2:0 +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_NONE (0x00000000) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W32_H32 (0x00000001) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W64_H64 (0x00000002) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W128_H128 (0x00000003) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W256_H256 (0x00000004) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED 4:4 +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_FALSE (0x00000000) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_TRUE (0x00000001) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS 14:12 +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED 8:8 +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) +#define NVC57D_HEAD_SET_STALL_LOCK(a) (0x00002034 + (a)*0x00000400) +#define NVC57D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NVC57D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NVC57D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NVC57D_HEAD_SET_STALL_LOCK_MODE 2:2 +#define NVC57D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NVC57D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN 8:4 +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC57D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 12:12 +#define NVC57D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NVC57D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NVC57D_HEAD_SET_STALL_LOCK_TEPOLARITY 14:14 +#define NVC57D_HEAD_SET_STALL_LOCK_TEPOLARITY_POSITIVE_TRUE (0x00000000) +#define NVC57D_HEAD_SET_STALL_LOCK_TEPOLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC57D_HEAD_SET_LOCK_CHAIN(a) (0x00002044 + (a)*0x00000400) +#define NVC57D_HEAD_SET_LOCK_CHAIN_POSITION 3:0 +#define NVC57D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x00002048 + (a)*0x00000400) +#define NVC57D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NVC57D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NVC57D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x0000204C + (a)*0x00000400) +#define NVC57D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NVC57D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NVC57D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x00002058 + (a)*0x00000400) +#define NVC57D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NVC57D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NVC57D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x0000205C + (a)*0x00000400) +#define NVC57D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NVC57D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NVC57D_HEAD_SET_RASTER_SIZE(a) (0x00002064 + (a)*0x00000400) +#define NVC57D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NVC57D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NVC57D_HEAD_SET_RASTER_SYNC_END(a) (0x00002068 + (a)*0x00000400) +#define NVC57D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NVC57D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NVC57D_HEAD_SET_RASTER_BLANK_END(a) (0x0000206C + (a)*0x00000400) +#define NVC57D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NVC57D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NVC57D_HEAD_SET_RASTER_BLANK_START(a) (0x00002070 + (a)*0x00000400) +#define NVC57D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NVC57D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NVC57D_HEAD_SET_OVERSCAN_COLOR(a) (0x00002078 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OVERSCAN_COLOR_RED_CR 9:0 +#define NVC57D_HEAD_SET_OVERSCAN_COLOR_GREEN_Y 19:10 +#define NVC57D_HEAD_SET_OVERSCAN_COLOR_BLUE_CB 29:20 +#define NVC57D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR(a) (0x0000207C + (a)*0x00000400) +#define NVC57D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_RED_CR 9:0 +#define NVC57D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_GREEN_Y 19:10 +#define NVC57D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_BLUE_CB 29:20 +#define NVC57D_HEAD_SET_HDMI_CTRL(a) (0x00002080 + (a)*0x00000400) +#define NVC57D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NVC57D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NVC57D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NVC57D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NVC57D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NVC57D_HEAD_SET_CONTEXT_DMA_CURSOR(a,b) (0x00002088 + (a)*0x00000400 + (b)*0x00000004) +#define NVC57D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0 +#define NVC57D_HEAD_SET_OFFSET_CURSOR(a,b) (0x00002090 + (a)*0x00000400 + (b)*0x00000004) +#define NVC57D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0 +#define NVC57D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x00002098 + (a)*0x00000400) +#define NVC57D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 0:0 +#define NVC57D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NVC57D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_CURSOR(a) (0x0000209C + (a)*0x00000400) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_FORMAT 7:0 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x000000E9) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x000000CF) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_SIZE 9:8 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 19:12 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 27:20 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(a) (0x000020A0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_K1 7:0 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT 11:8 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT 15:12 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_ZERO (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE 16:16 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_BLEND (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_XOR (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS 20:20 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00002180 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NVC57D_HEAD_SET_CRC_CONTROL(a) (0x00002184 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 5:0 +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_0 (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_1 (0x00000001) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_2 (0x00000002) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_3 (0x00000003) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_4 (0x00000004) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_5 (0x00000005) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_6 (0x00000006) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_7 (0x00000007) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_8 (0x00000008) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_9 (0x00000009) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_10 (0x0000000A) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_11 (0x0000000B) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_12 (0x0000000C) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_13 (0x0000000D) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_14 (0x0000000E) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_15 (0x0000000F) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_16 (0x00000010) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_17 (0x00000011) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_18 (0x00000012) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_19 (0x00000013) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_20 (0x00000014) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_21 (0x00000015) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_22 (0x00000016) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_23 (0x00000017) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_24 (0x00000018) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_25 (0x00000019) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_26 (0x0000001A) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_27 (0x0000001B) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_28 (0x0000001C) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_29 (0x0000001D) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_30 (0x0000001E) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_31 (0x0000001F) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000020) +#define NVC57D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 8:8 +#define NVC57D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC 19:12 +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_NONE (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF (0x00000030) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR__SIZE_1 8 +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR0 (0x00000050) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR1 (0x00000051) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR2 (0x00000052) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR3 (0x00000053) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR4 (0x00000054) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR5 (0x00000055) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR6 (0x00000056) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR7 (0x00000057) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC 27:20 +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_NONE (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SF (0x00000030) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR__SIZE_1 8 +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR0 (0x00000050) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR1 (0x00000051) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR2 (0x00000052) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR3 (0x00000053) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR4 (0x00000054) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR5 (0x00000055) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR6 (0x00000056) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR7 (0x00000057) +#define NVC57D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 9:9 +#define NVC57D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_PRESENT_CONTROL(a) (0x0000218C + (a)*0x00000400) +#define NVC57D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 0:0 +#define NVC57D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NVC57D_HEAD_SET_SW_SPARE_A(a) (0x00002194 + (a)*0x00000400) +#define NVC57D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NVC57D_HEAD_SET_SW_SPARE_B(a) (0x00002198 + (a)*0x00000400) +#define NVC57D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NVC57D_HEAD_SET_SW_SPARE_C(a) (0x0000219C + (a)*0x00000400) +#define NVC57D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NVC57D_HEAD_SET_SW_SPARE_D(a) (0x000021A0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NVC57D_HEAD_SET_DISPLAY_RATE(a) (0x000021A8 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DISPLAY_RATE_RUN_MODE 0:0 +#define NVC57D_HEAD_SET_DISPLAY_RATE_RUN_MODE_CONTINUOUS (0x00000000) +#define NVC57D_HEAD_SET_DISPLAY_RATE_RUN_MODE_ONE_SHOT (0x00000001) +#define NVC57D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_INTERVAL 25:4 +#define NVC57D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH 2:2 +#define NVC57D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE(a) (0x00002214 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE_DATA 9:0 +#define NVC57D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE_INDEX 19:12 +#define NVC57D_HEAD_SET_MIN_FRAME_IDLE(a) (0x00002218 + (a)*0x00000400) +#define NVC57D_HEAD_SET_MIN_FRAME_IDLE_LEADING_RASTER_LINES 14:0 +#define NVC57D_HEAD_SET_MIN_FRAME_IDLE_TRAILING_RASTER_LINES 30:16 +#define NVC57D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED(a) (0x00002220 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED_ALPHA 7:0 +#define NVC57D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED_RED 31:16 +#define NVC57D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE(a) (0x00002224 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE_GREEN 15:0 +#define NVC57D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE_BLUE 31:16 +#define NVC57D_HEAD_SET_CURSOR_COLOR_NORM_SCALE(a) (0x00002228 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CURSOR_COLOR_NORM_SCALE_VALUE 15:0 +#define NVC57D_HEAD_SET_XOR_BLEND_FACTOR(a) (0x0000222C + (a)*0x00000400) +#define NVC57D_HEAD_SET_XOR_BLEND_FACTOR_LOG2PEAK_LUMINANCE 3:0 +#define NVC57D_HEAD_SET_XOR_BLEND_FACTOR_S1 16:4 +#define NVC57D_HEAD_SET_XOR_BLEND_FACTOR_S2 30:18 +#define NVC57D_HEAD_SET_CLAMP_RANGE_GREEN(a) (0x00002238 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CLAMP_RANGE_GREEN_LOW 11:0 +#define NVC57D_HEAD_SET_CLAMP_RANGE_GREEN_HIGH 27:16 +#define NVC57D_HEAD_SET_CLAMP_RANGE_RED_BLUE(a) (0x0000223C + (a)*0x00000400) +#define NVC57D_HEAD_SET_CLAMP_RANGE_RED_BLUE_LOW 11:0 +#define NVC57D_HEAD_SET_CLAMP_RANGE_RED_BLUE_HIGH 27:16 +#define NVC57D_HEAD_SET_OCSC0CONTROL(a) (0x00002240 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0CONTROL_ENABLE 0:0 +#define NVC57D_HEAD_SET_OCSC0CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_OCSC0CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C00(a) (0x00002244 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C00_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C01(a) (0x00002248 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C01_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C02(a) (0x0000224C + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C02_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C03(a) (0x00002250 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C03_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C10(a) (0x00002254 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C10_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C11(a) (0x00002258 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C11_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C12(a) (0x0000225C + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C12_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C13(a) (0x00002260 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C13_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C20(a) (0x00002264 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C20_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C21(a) (0x00002268 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C21_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C22(a) (0x0000226C + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C22_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C23(a) (0x00002270 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C23_VALUE 20:0 +#define NVC57D_HEAD_SET_OLUT_CONTROL(a) (0x00002280 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE 0:0 +#define NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_OLUT_CONTROL_MIRROR 1:1 +#define NVC57D_HEAD_SET_OLUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_OLUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_OLUT_CONTROL_MODE 3:2 +#define NVC57D_HEAD_SET_OLUT_CONTROL_MODE_SEGMENTED (0x00000000) +#define NVC57D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT8 (0x00000001) +#define NVC57D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT10 (0x00000002) +#define NVC57D_HEAD_SET_OLUT_CONTROL_SIZE 18:8 +#define NVC57D_HEAD_SET_OLUT_FP_NORM_SCALE(a) (0x00002284 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OLUT_FP_NORM_SCALE_VALUE 31:0 +#define NVC57D_HEAD_SET_CONTEXT_DMA_OLUT(a) (0x00002288 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CONTEXT_DMA_OLUT_HANDLE 31:0 +#define NVC57D_HEAD_SET_OFFSET_OLUT(a) (0x0000228C + (a)*0x00000400) +#define NVC57D_HEAD_SET_OFFSET_OLUT_ORIGIN 31:0 +#define NVC57D_HEAD_SET_OCSC1CONTROL(a) (0x0000229C + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1CONTROL_ENABLE 0:0 +#define NVC57D_HEAD_SET_OCSC1CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_OCSC1CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C00(a) (0x000022A0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C00_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C01(a) (0x000022A4 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C01_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C02(a) (0x000022A8 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C02_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C03(a) (0x000022AC + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C03_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C10(a) (0x000022B0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C10_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C11(a) (0x000022B4 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C11_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C12(a) (0x000022B8 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C12_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C13(a) (0x000022BC + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C13_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C20(a) (0x000022C0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C20_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C21(a) (0x000022C4 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C21_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C22(a) (0x000022C8 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C22_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C23(a) (0x000022CC + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C23_VALUE 20:0 +#define NVC57D_HEAD_SET_TILE_POSITION(a) (0x000022D0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_TILE_POSITION_X 2:0 +#define NVC57D_HEAD_SET_TILE_POSITION_Y 6:4 +#define NVC57D_HEAD_SET_DSC_CONTROL(a) (0x000022D4 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_CONTROL_ENABLE 0:0 +#define NVC57D_HEAD_SET_DSC_CONTROL_ENABLE_FALSE (0x00000000) +#define NVC57D_HEAD_SET_DSC_CONTROL_ENABLE_TRUE (0x00000001) +#define NVC57D_HEAD_SET_DSC_CONTROL_MODE 2:1 +#define NVC57D_HEAD_SET_DSC_CONTROL_MODE_SINGLE (0x00000000) +#define NVC57D_HEAD_SET_DSC_CONTROL_MODE_DUAL (0x00000001) +#define NVC57D_HEAD_SET_DSC_CONTROL_MODE_QUAD (0x00000002) +#define NVC57D_HEAD_SET_DSC_CONTROL_MODE_DROP (0x00000003) +#define NVC57D_HEAD_SET_DSC_CONTROL_AUTO_RESET 3:3 +#define NVC57D_HEAD_SET_DSC_CONTROL_AUTO_RESET_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_DSC_CONTROL_AUTO_RESET_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION 4:4 +#define NVC57D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET 5:5 +#define NVC57D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET_FALSE (0x00000000) +#define NVC57D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET_TRUE (0x00000001) +#define NVC57D_HEAD_SET_DSC_CONTROL_FLATNESS_DET_THRESH 15:6 +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL(a) (0x000022D8 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_ENABLE 0:0 +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_ENABLE_FALSE (0x00000000) +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_ENABLE_TRUE (0x00000001) +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_LOCATION 1:1 +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_LOCATION_VSYNC (0x00000000) +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_LOCATION_VBLANK (0x00000001) +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_SIZE 9:2 +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY 10:10 +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY_EVERY_FRAME (0x00000000) +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY_ONCE (0x00000001) +#define NVC57D_HEAD_SET_DSC_PPS_HEAD(a) (0x000022DC + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_HEAD_BYTE0 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_HEAD_BYTE1 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_HEAD_BYTE2 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_HEAD_BYTE3 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA0(a) (0x000022E0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA0_DSC_VERSION_MINOR 3:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA0_DSC_VERSION_MAJOR 7:4 +#define NVC57D_HEAD_SET_DSC_PPS_DATA0_PPS_IDENTIFIER 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA0_RESERVED 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA0_LINEBUF_DEPTH 27:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA0_BITS_PER_COMPONENT 31:28 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1(a) (0x000022E4 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_BITS_PER_PIXEL_HIGH 1:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_VBR_ENABLE 2:2 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_SIMPLE422 3:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_CONVERT_RGB 4:4 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_BLOCK_PRED_ENABLE 5:5 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_RESERVED 7:6 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_BITS_PER_PIXEL_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_PIC_HEIGHT_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_PIC_HEIGHT_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA2(a) (0x000022E8 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA2_PIC_WIDTH_HIGH 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA2_PIC_WIDTH_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA2_SLICE_HEIGHT_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA2_SLICE_HEIGHT_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA3(a) (0x000022EC + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA3_SLICE_WIDTH_HIGH 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA3_SLICE_WIDTH_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA3_CHUNK_SIZE_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA3_CHUNK_SIZE_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA4(a) (0x000022F0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA4_INITIAL_XMIT_DELAY_HIGH 1:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA4_RESERVED 7:2 +#define NVC57D_HEAD_SET_DSC_PPS_DATA4_INITIAL_XMIT_DELAY_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA4_INITIAL_DEC_DELAY_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA4_INITIAL_DEC_DELAY_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA5(a) (0x000022F4 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA5_RESERVED0 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA5_INITIAL_SCALE_VALUE 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA5_RESERVED1 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA5_SCALE_INCREMENT_INTERVAL_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA5_SCALE_INCREMENT_INTERVAL_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA6(a) (0x000022F8 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA6_SCALE_DECREMENT_INTERVAL_HIGH 3:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA6_RESERVED0 7:4 +#define NVC57D_HEAD_SET_DSC_PPS_DATA6_SCALE_DECREMENT_INTERVAL_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA6_RESERVED1 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA6_FIRST_LINE_BPG_OFFSET 28:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA6_RESERVED2 31:29 +#define NVC57D_HEAD_SET_DSC_PPS_DATA7(a) (0x000022FC + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA7_NFL_BPG_OFFSET_HIGH 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA7_NFL_BPG_OFFSET_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA7_SLICE_BPG_OFFSET_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA7_SLICE_BPG_OFFSET_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA8(a) (0x00002300 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA8_INITIAL_OFFSET_HIGH 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA8_INITIAL_OFFSET_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA8_FINAL_OFFSET_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA8_FINAL_OFFSET_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA9(a) (0x00002304 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA9_FLATNESS_MIN_QP 4:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA9_RESERVED0 7:5 +#define NVC57D_HEAD_SET_DSC_PPS_DATA9_FLATNESS_MAX_QP 12:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA9_RESERVED1 15:13 +#define NVC57D_HEAD_SET_DSC_PPS_DATA9_RC_MODEL_SIZE_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA9_RC_MODEL_SIZE_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10(a) (0x00002308 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RC_EDGE_FACTOR 3:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RESERVED0 7:4 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RC_QUANT_INCR_LIMIT0 12:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RESERVED1 15:13 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RC_QUANT_INCR_LIMIT1 20:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RESERVED2 23:21 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RC_TGT_OFFSET_LO 27:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RC_TGT_OFFSET_HI 31:28 +#define NVC57D_HEAD_SET_DSC_PPS_DATA11(a) (0x0000230C + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH0 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH1 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH2 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH3 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA12(a) (0x00002310 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH4 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH5 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH6 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH7 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA13(a) (0x00002314 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH8 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH9 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH10 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH11 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA14(a) (0x00002318 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA14_RC_BUF_THRESH12 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA14_RC_BUF_THRESH13 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MAX_QP_HIGH0 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MIN_QP0 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_BPG_OFFSET0 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MAX_QP_LOW0 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15(a) (0x0000231C + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_HIGH1 2:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MIN_QP1 7:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_BPG_OFFSET1 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_LOW1 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_HIGH2 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MIN_QP2 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_BPG_OFFSET2 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_LOW2 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16(a) (0x00002320 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_HIGH3 2:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MIN_QP3 7:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_BPG_OFFSET3 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_LOW3 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_HIGH4 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MIN_QP4 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_BPG_OFFSET4 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_LOW4 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17(a) (0x00002324 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_HIGH5 2:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MIN_QP5 7:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_BPG_OFFSET5 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_LOW5 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_HIGH6 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MIN_QP6 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_BPG_OFFSET6 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_LOW6 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18(a) (0x00002328 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_HIGH7 2:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MIN_QP7 7:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_BPG_OFFSET7 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_LOW7 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_HIGH8 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MIN_QP8 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_BPG_OFFSET8 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_LOW8 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19(a) (0x0000232C + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_HIGH9 2:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MIN_QP9 7:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_BPG_OFFSET9 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_LOW9 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_HIGH10 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MIN_QP10 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_BPG_OFFSET10 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_LOW10 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20(a) (0x00002330 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_HIGH11 2:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MIN_QP11 7:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_BPG_OFFSET11 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_LOW11 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_HIGH12 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MIN_QP12 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_BPG_OFFSET12 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_LOW12 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21(a) (0x00002334 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_HIGH13 2:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MIN_QP13 7:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_BPG_OFFSET13 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_LOW13 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_HIGH14 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MIN_QP14 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_BPG_OFFSET14 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_LOW14 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA22(a) (0x00002338 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA22_NATIVE422 0:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA22_NATIVE420 1:1 +#define NVC57D_HEAD_SET_DSC_PPS_DATA22_RESERVED0 7:2 +#define NVC57D_HEAD_SET_DSC_PPS_DATA22_SECOND_LINE_BPG_OFFSET 12:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA22_RESERVED1 15:13 +#define NVC57D_HEAD_SET_DSC_PPS_DATA22_NSL_BPG_OFFSET_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA22_NSL_BPG_OFFSETLOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA23(a) (0x0000233C + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA23_SECOND_LINE_OFFSET_ADJ_HIGH 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA23_SECOND_LINE_OFFSET_ADJ_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA23_RESERVED 31:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA24(a) (0x00002340 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA24_RESERVED 31:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA25(a) (0x00002344 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA25_RESERVED 31:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA26(a) (0x00002348 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA26_RESERVED 31:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA27(a) (0x0000234C + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA27_RESERVED 31:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA28(a) (0x00002350 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA28_RESERVED 31:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA29(a) (0x00002354 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA29_RESERVED 31:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA30(a) (0x00002358 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA30_RESERVED 31:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA31(a) (0x0000235C + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA31_RESERVED 31:0 +#define NVC57D_HEAD_SET_RG_MERGE(a) (0x00002360 + (a)*0x00000400) +#define NVC57D_HEAD_SET_RG_MERGE_MODE 1:0 +#define NVC57D_HEAD_SET_RG_MERGE_MODE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_RG_MERGE_MODE_SETUP (0x00000001) +#define NVC57D_HEAD_SET_RG_MERGE_MODE_MASTER (0x00000002) +#define NVC57D_HEAD_SET_RG_MERGE_MODE_SLAVE (0x00000003) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC57d_h diff --git a/src/common/sdk/nvidia/inc/class/clc57e.h b/src/common/sdk/nvidia/inc/class/clc57e.h new file mode 100644 index 0000000..d613410 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc57e.h @@ -0,0 +1,657 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clC57e_h_ +#define _clC57e_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC57E_WINDOW_CHANNEL_DMA (0x0000C57E) + +// dma opcode instructions +#define NVC57E_DMA +#define NVC57E_DMA_OPCODE 31:29 +#define NVC57E_DMA_OPCODE_METHOD 0x00000000 +#define NVC57E_DMA_OPCODE_JUMP 0x00000001 +#define NVC57E_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC57E_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC57E_DMA_METHOD_COUNT 27:18 +#define NVC57E_DMA_METHOD_OFFSET 13:2 +#define NVC57E_DMA_DATA 31:0 +#define NVC57E_DMA_DATA_NOP 0x00000000 +#define NVC57E_DMA_JUMP_OFFSET 11:2 +#define NVC57E_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NVC57E_PUT (0x00000000) +#define NVC57E_PUT_PTR 9:0 +#define NVC57E_GET (0x00000004) +#define NVC57E_GET_PTR 9:0 +#define NVC57E_UPDATE (0x00000200) +#define NVC57E_UPDATE_RELEASE_ELV 0:0 +#define NVC57E_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC57E_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC57E_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC57E_UPDATE_INTERLOCK_WITH_WIN_IMM 12:12 +#define NVC57E_UPDATE_INTERLOCK_WITH_WIN_IMM_DISABLE (0x00000000) +#define NVC57E_UPDATE_INTERLOCK_WITH_WIN_IMM_ENABLE (0x00000001) +#define NVC57E_GET_LINE (0x00000208) +#define NVC57E_GET_LINE_LINE 15:0 +#define NVC57E_SET_SEMAPHORE_CONTROL (0x0000020C) +#define NVC57E_SET_SEMAPHORE_CONTROL_OFFSET 7:0 +#define NVC57E_SET_SEMAPHORE_ACQUIRE (0x00000210) +#define NVC57E_SET_SEMAPHORE_ACQUIRE_VALUE 31:0 +#define NVC57E_SET_SEMAPHORE_RELEASE (0x00000214) +#define NVC57E_SET_SEMAPHORE_RELEASE_VALUE 31:0 +#define NVC57E_SET_CONTEXT_DMA_SEMAPHORE (0x00000218) +#define NVC57E_SET_CONTEXT_DMA_SEMAPHORE_HANDLE 31:0 +#define NVC57E_SET_CONTEXT_DMA_NOTIFIER (0x0000021C) +#define NVC57E_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NVC57E_SET_NOTIFIER_CONTROL (0x00000220) +#define NVC57E_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVC57E_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVC57E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC57E_SET_NOTIFIER_CONTROL_OFFSET 11:4 +#define NVC57E_SET_SIZE (0x00000224) +#define NVC57E_SET_SIZE_WIDTH 15:0 +#define NVC57E_SET_SIZE_HEIGHT 31:16 +#define NVC57E_SET_STORAGE (0x00000228) +#define NVC57E_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC57E_SET_STORAGE_MEMORY_LAYOUT 4:4 +#define NVC57E_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC57E_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC57E_SET_PARAMS (0x0000022C) +#define NVC57E_SET_PARAMS_FORMAT 7:0 +#define NVC57E_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NVC57E_SET_PARAMS_FORMAT_R4G4B4A4 (0x0000002F) +#define NVC57E_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NVC57E_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NVC57E_SET_PARAMS_FORMAT_R5G5B5A1 (0x0000002E) +#define NVC57E_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NVC57E_SET_PARAMS_FORMAT_X8R8G8B8 (0x000000E6) +#define NVC57E_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NVC57E_SET_PARAMS_FORMAT_X8B8G8R8 (0x000000F9) +#define NVC57E_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NVC57E_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NVC57E_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NVC57E_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NVC57E_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NVC57E_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NVC57E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NVC57E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422 (0x00000028) +#define NVC57E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422 (0x00000029) +#define NVC57E_SET_PARAMS_FORMAT_Y8___U8V8_N444 (0x00000035) +#define NVC57E_SET_PARAMS_FORMAT_Y8___U8V8_N422 (0x00000036) +#define NVC57E_SET_PARAMS_FORMAT_Y8___V8U8_N420 (0x00000038) +#define NVC57E_SET_PARAMS_FORMAT_Y10___U10V10_N444 (0x00000055) +#define NVC57E_SET_PARAMS_FORMAT_Y10___U10V10_N422 (0x00000056) +#define NVC57E_SET_PARAMS_FORMAT_Y10___V10U10_N420 (0x00000058) +#define NVC57E_SET_PARAMS_FORMAT_Y12___U12V12_N444 (0x00000075) +#define NVC57E_SET_PARAMS_FORMAT_Y12___U12V12_N422 (0x00000076) +#define NVC57E_SET_PARAMS_FORMAT_Y12___V12U12_N420 (0x00000078) +#define NVC57E_SET_PARAMS_CLAMP_BEFORE_BLEND 18:18 +#define NVC57E_SET_PARAMS_CLAMP_BEFORE_BLEND_DISABLE (0x00000000) +#define NVC57E_SET_PARAMS_CLAMP_BEFORE_BLEND_ENABLE (0x00000001) +#define NVC57E_SET_PARAMS_SWAP_UV 19:19 +#define NVC57E_SET_PARAMS_SWAP_UV_DISABLE (0x00000000) +#define NVC57E_SET_PARAMS_SWAP_UV_ENABLE (0x00000001) +#define NVC57E_SET_PARAMS_FMT_ROUNDING_MODE 22:22 +#define NVC57E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_TO_NEAREST (0x00000000) +#define NVC57E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_DOWN (0x00000001) +#define NVC57E_SET_PLANAR_STORAGE(b) (0x00000230 + (b)*0x00000004) +#define NVC57E_SET_PLANAR_STORAGE_PITCH 12:0 +#define NVC57E_SET_CONTEXT_DMA_ISO(b) (0x00000240 + (b)*0x00000004) +#define NVC57E_SET_CONTEXT_DMA_ISO_HANDLE 31:0 +#define NVC57E_SET_OFFSET(b) (0x00000260 + (b)*0x00000004) +#define NVC57E_SET_OFFSET_ORIGIN 31:0 +#define NVC57E_SET_POINT_IN(b) (0x00000290 + (b)*0x00000004) +#define NVC57E_SET_POINT_IN_X 15:0 +#define NVC57E_SET_POINT_IN_Y 31:16 +#define NVC57E_SET_SIZE_IN (0x00000298) +#define NVC57E_SET_SIZE_IN_WIDTH 15:0 +#define NVC57E_SET_SIZE_IN_HEIGHT 31:16 +#define NVC57E_SET_SIZE_OUT (0x000002A4) +#define NVC57E_SET_SIZE_OUT_WIDTH 15:0 +#define NVC57E_SET_SIZE_OUT_HEIGHT 31:16 +#define NVC57E_SET_CONTROL_INPUT_SCALER (0x000002A8) +#define NVC57E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVC57E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVC57E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVC57E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVC57E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVC57E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVC57E_SET_INPUT_SCALER_COEFF_VALUE (0x000002AC) +#define NVC57E_SET_INPUT_SCALER_COEFF_VALUE_DATA 9:0 +#define NVC57E_SET_INPUT_SCALER_COEFF_VALUE_INDEX 19:12 +#define NVC57E_SET_COMPOSITION_CONTROL (0x000002EC) +#define NVC57E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT 1:0 +#define NVC57E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DISABLE (0x00000000) +#define NVC57E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_SRC (0x00000001) +#define NVC57E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DST (0x00000002) +#define NVC57E_SET_COMPOSITION_CONTROL_DEPTH 11:4 +#define NVC57E_SET_COMPOSITION_CONTROL_BYPASS 16:16 +#define NVC57E_SET_COMPOSITION_CONTROL_BYPASS_DISABLE (0x00000000) +#define NVC57E_SET_COMPOSITION_CONTROL_BYPASS_ENABLE (0x00000001) +#define NVC57E_SET_COMPOSITION_CONSTANT_ALPHA (0x000002F0) +#define NVC57E_SET_COMPOSITION_CONSTANT_ALPHA_K1 7:0 +#define NVC57E_SET_COMPOSITION_CONSTANT_ALPHA_K2 15:8 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT (0x000002F4) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT 3:0 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT 7:4 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT 11:8 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT 15:12 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT 19:16 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT 23:20 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT 27:24 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT 31:28 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC57E_SET_KEY_ALPHA (0x000002F8) +#define NVC57E_SET_KEY_ALPHA_MIN 15:0 +#define NVC57E_SET_KEY_ALPHA_MAX 31:16 +#define NVC57E_SET_KEY_RED_CR (0x000002FC) +#define NVC57E_SET_KEY_RED_CR_MIN 15:0 +#define NVC57E_SET_KEY_RED_CR_MAX 31:16 +#define NVC57E_SET_KEY_GREEN_Y (0x00000300) +#define NVC57E_SET_KEY_GREEN_Y_MIN 15:0 +#define NVC57E_SET_KEY_GREEN_Y_MAX 31:16 +#define NVC57E_SET_KEY_BLUE_CB (0x00000304) +#define NVC57E_SET_KEY_BLUE_CB_MIN 15:0 +#define NVC57E_SET_KEY_BLUE_CB_MAX 31:16 +#define NVC57E_SET_PRESENT_CONTROL (0x00000308) +#define NVC57E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NVC57E_SET_PRESENT_CONTROL_BEGIN_MODE 6:4 +#define NVC57E_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000) +#define NVC57E_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001) +#define NVC57E_SET_PRESENT_CONTROL_TIMESTAMP_MODE 8:8 +#define NVC57E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000) +#define NVC57E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001) +#define NVC57E_SET_PRESENT_CONTROL_STEREO_MODE 13:12 +#define NVC57E_SET_PRESENT_CONTROL_STEREO_MODE_MONO (0x00000000) +#define NVC57E_SET_PRESENT_CONTROL_STEREO_MODE_PAIR_FLIP (0x00000001) +#define NVC57E_SET_PRESENT_CONTROL_STEREO_MODE_AT_ANY_FRAME (0x00000002) +#define NVC57E_SET_TIMESTAMP_ORIGIN_LO (0x00000340) +#define NVC57E_SET_TIMESTAMP_ORIGIN_LO_TIMESTAMP_LO 31:0 +#define NVC57E_SET_TIMESTAMP_ORIGIN_HI (0x00000344) +#define NVC57E_SET_TIMESTAMP_ORIGIN_HI_TIMESTAMP_HI 31:0 +#define NVC57E_SET_UPDATE_TIMESTAMP_LO (0x00000348) +#define NVC57E_SET_UPDATE_TIMESTAMP_LO_TIMESTAMP_LO 31:0 +#define NVC57E_SET_UPDATE_TIMESTAMP_HI (0x0000034C) +#define NVC57E_SET_UPDATE_TIMESTAMP_HI_TIMESTAMP_HI 31:0 +#define NVC57E_SET_INTERLOCK_FLAGS (0x00000370) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 0:0 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+1):((i)+1) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 1:1 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 2:2 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 3:3 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 4:4 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 5:5 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 6:6 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 7:7 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 8:8 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS (0x00000374) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) +#define NVC57E_SET_EXT_PACKET_CONTROL (0x00000398) +#define NVC57E_SET_EXT_PACKET_CONTROL_ENABLE 0:0 +#define NVC57E_SET_EXT_PACKET_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57E_SET_EXT_PACKET_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57E_SET_EXT_PACKET_CONTROL_LOCATION 4:4 +#define NVC57E_SET_EXT_PACKET_CONTROL_LOCATION_VSYNC (0x00000000) +#define NVC57E_SET_EXT_PACKET_CONTROL_LOCATION_VBLANK (0x00000001) +#define NVC57E_SET_EXT_PACKET_CONTROL_FREQUENCY 8:8 +#define NVC57E_SET_EXT_PACKET_CONTROL_FREQUENCY_EVERY_FRAME (0x00000000) +#define NVC57E_SET_EXT_PACKET_CONTROL_FREQUENCY_ONCE (0x00000001) +#define NVC57E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE 12:12 +#define NVC57E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE_DISABLE (0x00000000) +#define NVC57E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE_ENABLE (0x00000001) +#define NVC57E_SET_EXT_PACKET_CONTROL_SIZE 27:16 +#define NVC57E_SET_EXT_PACKET_DATA (0x0000039C) +#define NVC57E_SET_EXT_PACKET_DATA_DB0 7:0 +#define NVC57E_SET_EXT_PACKET_DATA_DB1 15:8 +#define NVC57E_SET_EXT_PACKET_DATA_DB2 23:16 +#define NVC57E_SET_EXT_PACKET_DATA_DB3 31:24 +#define NVC57E_SET_FMT_COEFFICIENT_C00 (0x00000400) +#define NVC57E_SET_FMT_COEFFICIENT_C00_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C01 (0x00000404) +#define NVC57E_SET_FMT_COEFFICIENT_C01_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C02 (0x00000408) +#define NVC57E_SET_FMT_COEFFICIENT_C02_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C03 (0x0000040C) +#define NVC57E_SET_FMT_COEFFICIENT_C03_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C10 (0x00000410) +#define NVC57E_SET_FMT_COEFFICIENT_C10_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C11 (0x00000414) +#define NVC57E_SET_FMT_COEFFICIENT_C11_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C12 (0x00000418) +#define NVC57E_SET_FMT_COEFFICIENT_C12_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C13 (0x0000041C) +#define NVC57E_SET_FMT_COEFFICIENT_C13_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C20 (0x00000420) +#define NVC57E_SET_FMT_COEFFICIENT_C20_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C21 (0x00000424) +#define NVC57E_SET_FMT_COEFFICIENT_C21_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C22 (0x00000428) +#define NVC57E_SET_FMT_COEFFICIENT_C22_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C23 (0x0000042C) +#define NVC57E_SET_FMT_COEFFICIENT_C23_VALUE 20:0 +#define NVC57E_SET_ILUT_CONTROL (0x00000440) +#define NVC57E_SET_ILUT_CONTROL_INTERPOLATE 0:0 +#define NVC57E_SET_ILUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC57E_SET_ILUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC57E_SET_ILUT_CONTROL_MIRROR 1:1 +#define NVC57E_SET_ILUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC57E_SET_ILUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC57E_SET_ILUT_CONTROL_MODE 3:2 +#define NVC57E_SET_ILUT_CONTROL_MODE_SEGMENTED (0x00000000) +#define NVC57E_SET_ILUT_CONTROL_MODE_DIRECT8 (0x00000001) +#define NVC57E_SET_ILUT_CONTROL_MODE_DIRECT10 (0x00000002) +#define NVC57E_SET_ILUT_CONTROL_SIZE 18:8 +#define NVC57E_SET_CONTEXT_DMA_ILUT (0x00000444) +#define NVC57E_SET_CONTEXT_DMA_ILUT_HANDLE 31:0 +#define NVC57E_SET_OFFSET_ILUT (0x00000448) +#define NVC57E_SET_OFFSET_ILUT_ORIGIN 31:0 +#define NVC57E_SET_CSC00CONTROL (0x0000045C) +#define NVC57E_SET_CSC00CONTROL_ENABLE 0:0 +#define NVC57E_SET_CSC00CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57E_SET_CSC00CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57E_SET_CSC00COEFFICIENT_C00 (0x00000460) +#define NVC57E_SET_CSC00COEFFICIENT_C00_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C01 (0x00000464) +#define NVC57E_SET_CSC00COEFFICIENT_C01_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C02 (0x00000468) +#define NVC57E_SET_CSC00COEFFICIENT_C02_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C03 (0x0000046C) +#define NVC57E_SET_CSC00COEFFICIENT_C03_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C10 (0x00000470) +#define NVC57E_SET_CSC00COEFFICIENT_C10_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C11 (0x00000474) +#define NVC57E_SET_CSC00COEFFICIENT_C11_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C12 (0x00000478) +#define NVC57E_SET_CSC00COEFFICIENT_C12_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C13 (0x0000047C) +#define NVC57E_SET_CSC00COEFFICIENT_C13_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C20 (0x00000480) +#define NVC57E_SET_CSC00COEFFICIENT_C20_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C21 (0x00000484) +#define NVC57E_SET_CSC00COEFFICIENT_C21_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C22 (0x00000488) +#define NVC57E_SET_CSC00COEFFICIENT_C22_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C23 (0x0000048C) +#define NVC57E_SET_CSC00COEFFICIENT_C23_VALUE 20:0 +#define NVC57E_SET_CSC0LUT_CONTROL (0x000004A0) +#define NVC57E_SET_CSC0LUT_CONTROL_INTERPOLATE 0:0 +#define NVC57E_SET_CSC0LUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC57E_SET_CSC0LUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC57E_SET_CSC0LUT_CONTROL_MIRROR 1:1 +#define NVC57E_SET_CSC0LUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC57E_SET_CSC0LUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC57E_SET_CSC0LUT_CONTROL_ENABLE 4:4 +#define NVC57E_SET_CSC0LUT_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57E_SET_CSC0LUT_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57E_SET_CSC0LUT_SEGMENT_SIZE (0x000004A4) +#define NVC57E_SET_CSC0LUT_SEGMENT_SIZE_IDX 5:0 +#define NVC57E_SET_CSC0LUT_SEGMENT_SIZE_VALUE 18:16 +#define NVC57E_SET_CSC0LUT_ENTRY (0x000004A8) +#define NVC57E_SET_CSC0LUT_ENTRY_IDX 10:0 +#define NVC57E_SET_CSC0LUT_ENTRY_VALUE 31:16 +#define NVC57E_SET_CSC01CONTROL (0x000004BC) +#define NVC57E_SET_CSC01CONTROL_ENABLE 0:0 +#define NVC57E_SET_CSC01CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57E_SET_CSC01CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57E_SET_CSC01COEFFICIENT_C00 (0x000004C0) +#define NVC57E_SET_CSC01COEFFICIENT_C00_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C01 (0x000004C4) +#define NVC57E_SET_CSC01COEFFICIENT_C01_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C02 (0x000004C8) +#define NVC57E_SET_CSC01COEFFICIENT_C02_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C03 (0x000004CC) +#define NVC57E_SET_CSC01COEFFICIENT_C03_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C10 (0x000004D0) +#define NVC57E_SET_CSC01COEFFICIENT_C10_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C11 (0x000004D4) +#define NVC57E_SET_CSC01COEFFICIENT_C11_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C12 (0x000004D8) +#define NVC57E_SET_CSC01COEFFICIENT_C12_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C13 (0x000004DC) +#define NVC57E_SET_CSC01COEFFICIENT_C13_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C20 (0x000004E0) +#define NVC57E_SET_CSC01COEFFICIENT_C20_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C21 (0x000004E4) +#define NVC57E_SET_CSC01COEFFICIENT_C21_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C22 (0x000004E8) +#define NVC57E_SET_CSC01COEFFICIENT_C22_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C23 (0x000004EC) +#define NVC57E_SET_CSC01COEFFICIENT_C23_VALUE 20:0 +#define NVC57E_SET_TMO_CONTROL (0x00000500) +#define NVC57E_SET_TMO_CONTROL_INTERPOLATE 0:0 +#define NVC57E_SET_TMO_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC57E_SET_TMO_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC57E_SET_TMO_CONTROL_SAT_MODE 3:2 +#define NVC57E_SET_TMO_CONTROL_SIZE 18:8 +#define NVC57E_SET_TMO_LOW_INTENSITY_ZONE (0x00000508) +#define NVC57E_SET_TMO_LOW_INTENSITY_ZONE_END 29:16 +#define NVC57E_SET_TMO_LOW_INTENSITY_VALUE (0x0000050C) +#define NVC57E_SET_TMO_LOW_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC57E_SET_TMO_LOW_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC57E_SET_TMO_LOW_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC57E_SET_TMO_MEDIUM_INTENSITY_ZONE (0x00000510) +#define NVC57E_SET_TMO_MEDIUM_INTENSITY_ZONE_START 13:0 +#define NVC57E_SET_TMO_MEDIUM_INTENSITY_ZONE_END 29:16 +#define NVC57E_SET_TMO_MEDIUM_INTENSITY_VALUE (0x00000514) +#define NVC57E_SET_TMO_MEDIUM_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC57E_SET_TMO_MEDIUM_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC57E_SET_TMO_MEDIUM_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC57E_SET_TMO_HIGH_INTENSITY_ZONE (0x00000518) +#define NVC57E_SET_TMO_HIGH_INTENSITY_ZONE_START 13:0 +#define NVC57E_SET_TMO_HIGH_INTENSITY_VALUE (0x0000051C) +#define NVC57E_SET_TMO_HIGH_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC57E_SET_TMO_HIGH_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC57E_SET_TMO_HIGH_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC57E_SET_CONTEXT_DMA_TMO_LUT (0x00000528) +#define NVC57E_SET_CONTEXT_DMA_TMO_LUT_HANDLE 31:0 +#define NVC57E_SET_OFFSET_TMO_LUT (0x0000052C) +#define NVC57E_SET_OFFSET_TMO_LUT_ORIGIN 31:0 +#define NVC57E_SET_CSC10CONTROL (0x0000053C) +#define NVC57E_SET_CSC10CONTROL_ENABLE 0:0 +#define NVC57E_SET_CSC10CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57E_SET_CSC10CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57E_SET_CSC10COEFFICIENT_C00 (0x00000540) +#define NVC57E_SET_CSC10COEFFICIENT_C00_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C01 (0x00000544) +#define NVC57E_SET_CSC10COEFFICIENT_C01_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C02 (0x00000548) +#define NVC57E_SET_CSC10COEFFICIENT_C02_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C03 (0x0000054C) +#define NVC57E_SET_CSC10COEFFICIENT_C03_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C10 (0x00000550) +#define NVC57E_SET_CSC10COEFFICIENT_C10_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C11 (0x00000554) +#define NVC57E_SET_CSC10COEFFICIENT_C11_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C12 (0x00000558) +#define NVC57E_SET_CSC10COEFFICIENT_C12_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C13 (0x0000055C) +#define NVC57E_SET_CSC10COEFFICIENT_C13_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C20 (0x00000560) +#define NVC57E_SET_CSC10COEFFICIENT_C20_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C21 (0x00000564) +#define NVC57E_SET_CSC10COEFFICIENT_C21_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C22 (0x00000568) +#define NVC57E_SET_CSC10COEFFICIENT_C22_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C23 (0x0000056C) +#define NVC57E_SET_CSC10COEFFICIENT_C23_VALUE 20:0 +#define NVC57E_SET_CSC1LUT_CONTROL (0x00000580) +#define NVC57E_SET_CSC1LUT_CONTROL_INTERPOLATE 0:0 +#define NVC57E_SET_CSC1LUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC57E_SET_CSC1LUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC57E_SET_CSC1LUT_CONTROL_MIRROR 1:1 +#define NVC57E_SET_CSC1LUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC57E_SET_CSC1LUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC57E_SET_CSC1LUT_CONTROL_ENABLE 4:4 +#define NVC57E_SET_CSC1LUT_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57E_SET_CSC1LUT_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57E_SET_CSC1LUT_SEGMENT_SIZE (0x00000584) +#define NVC57E_SET_CSC1LUT_SEGMENT_SIZE_IDX 5:0 +#define NVC57E_SET_CSC1LUT_SEGMENT_SIZE_VALUE 18:16 +#define NVC57E_SET_CSC1LUT_ENTRY (0x00000588) +#define NVC57E_SET_CSC1LUT_ENTRY_IDX 10:0 +#define NVC57E_SET_CSC1LUT_ENTRY_VALUE 31:16 +#define NVC57E_SET_CSC11CONTROL (0x0000059C) +#define NVC57E_SET_CSC11CONTROL_ENABLE 0:0 +#define NVC57E_SET_CSC11CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57E_SET_CSC11CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57E_SET_CSC11COEFFICIENT_C00 (0x000005A0) +#define NVC57E_SET_CSC11COEFFICIENT_C00_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C01 (0x000005A4) +#define NVC57E_SET_CSC11COEFFICIENT_C01_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C02 (0x000005A8) +#define NVC57E_SET_CSC11COEFFICIENT_C02_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C03 (0x000005AC) +#define NVC57E_SET_CSC11COEFFICIENT_C03_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C10 (0x000005B0) +#define NVC57E_SET_CSC11COEFFICIENT_C10_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C11 (0x000005B4) +#define NVC57E_SET_CSC11COEFFICIENT_C11_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C12 (0x000005B8) +#define NVC57E_SET_CSC11COEFFICIENT_C12_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C13 (0x000005BC) +#define NVC57E_SET_CSC11COEFFICIENT_C13_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C20 (0x000005C0) +#define NVC57E_SET_CSC11COEFFICIENT_C20_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C21 (0x000005C4) +#define NVC57E_SET_CSC11COEFFICIENT_C21_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C22 (0x000005C8) +#define NVC57E_SET_CSC11COEFFICIENT_C22_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C23 (0x000005CC) +#define NVC57E_SET_CSC11COEFFICIENT_C23_VALUE 20:0 +#define NVC57E_SET_CLAMP_RANGE (0x000005D0) +#define NVC57E_SET_CLAMP_RANGE_LOW 15:0 +#define NVC57E_SET_CLAMP_RANGE_HIGH 31:16 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC57e_h diff --git a/src/common/sdk/nvidia/inc/class/clc57esw.h b/src/common/sdk/nvidia/inc/class/clc57esw.h new file mode 100644 index 0000000..8c106b4 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc57esw.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2009-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc57e_sw_h_ +#define _clc57e_sw_h_ + +/* This file is *not* auto-generated. */ + +#define NVC57E_WINDOWS_NOTIFY_RM (0x0000058C) +#define NVC57E_WINDOWS_NOTIFY_RM_VSYNC_STATE_CHANGE 0:0 +#define NVC57E_WINDOWS_NOTIFY_RM_VSYNC_STATE_CHANGE_FALSE (0x00000000) +#define NVC57E_WINDOWS_NOTIFY_RM_VSYNC_STATE_CHANGE_TRUE (0x00000001) +#define NVC57E_WINDOWS_NOTIFY_RM_VSYNC_STATE 1:1 +#define NVC57E_WINDOWS_NOTIFY_RM_VSYNC_STATE_OFF (0x00000000) +#define NVC57E_WINDOWS_NOTIFY_RM_VSYNC_STATE_ON (0x00000001) +#define NVC57E_WINDOWS_NOTIFY_RM_ASSOCIATED_HEAD 7:4 + +#define SwSetMClkSwitch Reserved05[1] + +#define NVC57E_SW_SET_MCLK_SWITCH (0x000002B4) +#define NVC57E_SW_SET_MCLK_SWITCH_ENABLE 0:0 +#define NVC57E_SW_SET_MCLK_SWITCH_ENABLE_FALSE (0x00000000) +#define NVC57E_SW_SET_MCLK_SWITCH_ENABLE_TRUE (0x00000001) + +#endif // _clc57e_sw_h_ + diff --git a/src/common/sdk/nvidia/inc/class/clc597.h b/src/common/sdk/nvidia/inc/class/clc597.h new file mode 100644 index 0000000..711dae1 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc597.h @@ -0,0 +1,4352 @@ +/******************************************************************************* + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef _cl_turing_a_h_ +#define _cl_turing_a_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../../../class/bin/sw_header.pl turing_a */ + +#include "nvtypes.h" + +#define TURING_A 0xC597 + +#define NVC597_SET_OBJECT 0x0000 +#define NVC597_SET_OBJECT_CLASS_ID 15:0 +#define NVC597_SET_OBJECT_ENGINE_ID 20:16 + +#define NVC597_NO_OPERATION 0x0100 +#define NVC597_NO_OPERATION_V 31:0 + +#define NVC597_SET_NOTIFY_A 0x0104 +#define NVC597_SET_NOTIFY_A_ADDRESS_UPPER 7:0 + +#define NVC597_SET_NOTIFY_B 0x0108 +#define NVC597_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NVC597_NOTIFY 0x010c +#define NVC597_NOTIFY_TYPE 31:0 +#define NVC597_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NVC597_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NVC597_WAIT_FOR_IDLE 0x0110 +#define NVC597_WAIT_FOR_IDLE_V 31:0 + +#define NVC597_LOAD_MME_INSTRUCTION_RAM_POINTER 0x0114 +#define NVC597_LOAD_MME_INSTRUCTION_RAM_POINTER_V 31:0 + +#define NVC597_LOAD_MME_INSTRUCTION_RAM 0x0118 +#define NVC597_LOAD_MME_INSTRUCTION_RAM_V 31:0 + +#define NVC597_LOAD_MME_START_ADDRESS_RAM_POINTER 0x011c +#define NVC597_LOAD_MME_START_ADDRESS_RAM_POINTER_V 31:0 + +#define NVC597_LOAD_MME_START_ADDRESS_RAM 0x0120 +#define NVC597_LOAD_MME_START_ADDRESS_RAM_V 31:0 + +#define NVC597_SET_MME_SHADOW_RAM_CONTROL 0x0124 +#define NVC597_SET_MME_SHADOW_RAM_CONTROL_MODE 1:0 +#define NVC597_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK 0x00000000 +#define NVC597_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK_WITH_FILTER 0x00000001 +#define NVC597_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_PASSTHROUGH 0x00000002 +#define NVC597_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_REPLAY 0x00000003 + +#define NVC597_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER 0x0128 +#define NVC597_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER_V 7:0 + +#define NVC597_PEER_SEMAPHORE_RELEASE_OFFSET 0x012c +#define NVC597_PEER_SEMAPHORE_RELEASE_OFFSET_V 31:0 + +#define NVC597_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NVC597_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC597_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NVC597_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC597_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NVC597_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NVC597_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC597_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC597_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC597_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC597_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC597_SEND_GO_IDLE 0x013c +#define NVC597_SEND_GO_IDLE_V 31:0 + +#define NVC597_PM_TRIGGER 0x0140 +#define NVC597_PM_TRIGGER_V 31:0 + +#define NVC597_PM_TRIGGER_WFI 0x0144 +#define NVC597_PM_TRIGGER_WFI_V 31:0 + +#define NVC597_FE_ATOMIC_SEQUENCE_BEGIN 0x0148 +#define NVC597_FE_ATOMIC_SEQUENCE_BEGIN_V 31:0 + +#define NVC597_FE_ATOMIC_SEQUENCE_END 0x014c +#define NVC597_FE_ATOMIC_SEQUENCE_END_V 31:0 + +#define NVC597_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NVC597_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NVC597_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NVC597_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NVC597_LINE_LENGTH_IN 0x0180 +#define NVC597_LINE_LENGTH_IN_VALUE 31:0 + +#define NVC597_LINE_COUNT 0x0184 +#define NVC597_LINE_COUNT_VALUE 31:0 + +#define NVC597_OFFSET_OUT_UPPER 0x0188 +#define NVC597_OFFSET_OUT_UPPER_VALUE 7:0 + +#define NVC597_OFFSET_OUT 0x018c +#define NVC597_OFFSET_OUT_VALUE 31:0 + +#define NVC597_PITCH_OUT 0x0190 +#define NVC597_PITCH_OUT_VALUE 31:0 + +#define NVC597_SET_DST_BLOCK_SIZE 0x0194 +#define NVC597_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVC597_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC597_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVC597_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC597_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC597_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC597_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC597_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC597_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC597_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVC597_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NVC597_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NVC597_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NVC597_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NVC597_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVC597_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NVC597_SET_DST_WIDTH 0x0198 +#define NVC597_SET_DST_WIDTH_V 31:0 + +#define NVC597_SET_DST_HEIGHT 0x019c +#define NVC597_SET_DST_HEIGHT_V 31:0 + +#define NVC597_SET_DST_DEPTH 0x01a0 +#define NVC597_SET_DST_DEPTH_V 31:0 + +#define NVC597_SET_DST_LAYER 0x01a4 +#define NVC597_SET_DST_LAYER_V 31:0 + +#define NVC597_SET_DST_ORIGIN_BYTES_X 0x01a8 +#define NVC597_SET_DST_ORIGIN_BYTES_X_V 20:0 + +#define NVC597_SET_DST_ORIGIN_SAMPLES_Y 0x01ac +#define NVC597_SET_DST_ORIGIN_SAMPLES_Y_V 16:0 + +#define NVC597_LAUNCH_DMA 0x01b0 +#define NVC597_LAUNCH_DMA_DST_MEMORY_LAYOUT 0:0 +#define NVC597_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVC597_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVC597_LAUNCH_DMA_COMPLETION_TYPE 5:4 +#define NVC597_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_DISABLE 0x00000000 +#define NVC597_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_ONLY 0x00000001 +#define NVC597_LAUNCH_DMA_COMPLETION_TYPE_RELEASE_SEMAPHORE 0x00000002 +#define NVC597_LAUNCH_DMA_INTERRUPT_TYPE 9:8 +#define NVC597_LAUNCH_DMA_INTERRUPT_TYPE_NONE 0x00000000 +#define NVC597_LAUNCH_DMA_INTERRUPT_TYPE_INTERRUPT 0x00000001 +#define NVC597_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE 12:12 +#define NVC597_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_FOUR_WORDS 0x00000000 +#define NVC597_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_ONE_WORD 0x00000001 +#define NVC597_LAUNCH_DMA_REDUCTION_ENABLE 1:1 +#define NVC597_LAUNCH_DMA_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC597_LAUNCH_DMA_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC597_LAUNCH_DMA_REDUCTION_OP 15:13 +#define NVC597_LAUNCH_DMA_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC597_LAUNCH_DMA_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC597_LAUNCH_DMA_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC597_LAUNCH_DMA_REDUCTION_OP_RED_INC 0x00000003 +#define NVC597_LAUNCH_DMA_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC597_LAUNCH_DMA_REDUCTION_OP_RED_AND 0x00000005 +#define NVC597_LAUNCH_DMA_REDUCTION_OP_RED_OR 0x00000006 +#define NVC597_LAUNCH_DMA_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC597_LAUNCH_DMA_REDUCTION_FORMAT 3:2 +#define NVC597_LAUNCH_DMA_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC597_LAUNCH_DMA_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVC597_LAUNCH_DMA_SYSMEMBAR_DISABLE 6:6 +#define NVC597_LAUNCH_DMA_SYSMEMBAR_DISABLE_FALSE 0x00000000 +#define NVC597_LAUNCH_DMA_SYSMEMBAR_DISABLE_TRUE 0x00000001 + +#define NVC597_LOAD_INLINE_DATA 0x01b4 +#define NVC597_LOAD_INLINE_DATA_V 31:0 + +#define NVC597_SET_I2M_SEMAPHORE_A 0x01dc +#define NVC597_SET_I2M_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC597_SET_I2M_SEMAPHORE_B 0x01e0 +#define NVC597_SET_I2M_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC597_SET_I2M_SEMAPHORE_C 0x01e4 +#define NVC597_SET_I2M_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC597_SET_I2M_SPARE_NOOP00 0x01f0 +#define NVC597_SET_I2M_SPARE_NOOP00_V 31:0 + +#define NVC597_SET_I2M_SPARE_NOOP01 0x01f4 +#define NVC597_SET_I2M_SPARE_NOOP01_V 31:0 + +#define NVC597_SET_I2M_SPARE_NOOP02 0x01f8 +#define NVC597_SET_I2M_SPARE_NOOP02_V 31:0 + +#define NVC597_SET_I2M_SPARE_NOOP03 0x01fc +#define NVC597_SET_I2M_SPARE_NOOP03_V 31:0 + +#define NVC597_RUN_DS_NOW 0x0200 +#define NVC597_RUN_DS_NOW_V 31:0 + +#define NVC597_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS 0x0204 +#define NVC597_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD 4:0 +#define NVC597_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_INSTANTANEOUS 0x00000000 +#define NVC597_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16 0x00000001 +#define NVC597_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32 0x00000002 +#define NVC597_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__64 0x00000003 +#define NVC597_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__128 0x00000004 +#define NVC597_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__256 0x00000005 +#define NVC597_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__512 0x00000006 +#define NVC597_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1024 0x00000007 +#define NVC597_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2048 0x00000008 +#define NVC597_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4096 0x00000009 +#define NVC597_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__8192 0x0000000A +#define NVC597_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16384 0x0000000B +#define NVC597_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32768 0x0000000C +#define NVC597_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__65536 0x0000000D +#define NVC597_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__131072 0x0000000E +#define NVC597_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__262144 0x0000000F +#define NVC597_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__524288 0x00000010 +#define NVC597_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1048576 0x00000011 +#define NVC597_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2097152 0x00000012 +#define NVC597_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4194304 0x00000013 +#define NVC597_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_LATEZ_ALWAYS 0x0000001F + +#define NVC597_SET_GS_MODE 0x0208 +#define NVC597_SET_GS_MODE_TYPE 0:0 +#define NVC597_SET_GS_MODE_TYPE_ANY 0x00000000 +#define NVC597_SET_GS_MODE_TYPE_FAST_GS 0x00000001 + +#define NVC597_SET_ALIASED_LINE_WIDTH_ENABLE 0x020c +#define NVC597_SET_ALIASED_LINE_WIDTH_ENABLE_V 0:0 +#define NVC597_SET_ALIASED_LINE_WIDTH_ENABLE_V_FALSE 0x00000000 +#define NVC597_SET_ALIASED_LINE_WIDTH_ENABLE_V_TRUE 0x00000001 + +#define NVC597_SET_API_MANDATED_EARLY_Z 0x0210 +#define NVC597_SET_API_MANDATED_EARLY_Z_ENABLE 0:0 +#define NVC597_SET_API_MANDATED_EARLY_Z_ENABLE_FALSE 0x00000000 +#define NVC597_SET_API_MANDATED_EARLY_Z_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_GS_DM_FIFO 0x0214 +#define NVC597_SET_GS_DM_FIFO_SIZE_RASTER_ON 12:0 +#define NVC597_SET_GS_DM_FIFO_SIZE_RASTER_OFF 28:16 +#define NVC597_SET_GS_DM_FIFO_SPILL_ENABLED 31:31 +#define NVC597_SET_GS_DM_FIFO_SPILL_ENABLED_FALSE 0x00000000 +#define NVC597_SET_GS_DM_FIFO_SPILL_ENABLED_TRUE 0x00000001 + +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS 0x0218 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY 5:4 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC597_INVALIDATE_SHADER_CACHES 0x021c +#define NVC597_INVALIDATE_SHADER_CACHES_INSTRUCTION 0:0 +#define NVC597_INVALIDATE_SHADER_CACHES_INSTRUCTION_FALSE 0x00000000 +#define NVC597_INVALIDATE_SHADER_CACHES_INSTRUCTION_TRUE 0x00000001 +#define NVC597_INVALIDATE_SHADER_CACHES_DATA 4:4 +#define NVC597_INVALIDATE_SHADER_CACHES_DATA_FALSE 0x00000000 +#define NVC597_INVALIDATE_SHADER_CACHES_DATA_TRUE 0x00000001 +#define NVC597_INVALIDATE_SHADER_CACHES_CONSTANT 12:12 +#define NVC597_INVALIDATE_SHADER_CACHES_CONSTANT_FALSE 0x00000000 +#define NVC597_INVALIDATE_SHADER_CACHES_CONSTANT_TRUE 0x00000001 +#define NVC597_INVALIDATE_SHADER_CACHES_LOCKS 1:1 +#define NVC597_INVALIDATE_SHADER_CACHES_LOCKS_FALSE 0x00000000 +#define NVC597_INVALIDATE_SHADER_CACHES_LOCKS_TRUE 0x00000001 +#define NVC597_INVALIDATE_SHADER_CACHES_FLUSH_DATA 2:2 +#define NVC597_INVALIDATE_SHADER_CACHES_FLUSH_DATA_FALSE 0x00000000 +#define NVC597_INVALIDATE_SHADER_CACHES_FLUSH_DATA_TRUE 0x00000001 + +#define NVC597_SET_INSTANCE_COUNT 0x0220 +#define NVC597_SET_INSTANCE_COUNT_V 31:0 + +#define NVC597_SET_POSITION_W_SCALED_OFFSET_ENABLE 0x0224 +#define NVC597_SET_POSITION_W_SCALED_OFFSET_ENABLE_ENABLE 0:0 +#define NVC597_SET_POSITION_W_SCALED_OFFSET_ENABLE_ENABLE_FALSE 0x00000000 +#define NVC597_SET_POSITION_W_SCALED_OFFSET_ENABLE_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_GO_IDLE_TIMEOUT 0x022c +#define NVC597_SET_GO_IDLE_TIMEOUT_V 31:0 + +#define NVC597_SET_MME_VERSION 0x0234 +#define NVC597_SET_MME_VERSION_MAJOR 7:0 + +#define NVC597_SET_INDEX_BUFFER_SIZE_A 0x0238 +#define NVC597_SET_INDEX_BUFFER_SIZE_A_UPPER 7:0 + +#define NVC597_SET_INDEX_BUFFER_SIZE_B 0x023c +#define NVC597_SET_INDEX_BUFFER_SIZE_B_LOWER 31:0 + +#define NVC597_SET_ROOT_TABLE_VISIBILITY(i) (0x0240+(i)*4) +#define NVC597_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP0_ENABLE 1:0 +#define NVC597_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP1_ENABLE 5:4 +#define NVC597_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP2_ENABLE 9:8 +#define NVC597_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP3_ENABLE 13:12 +#define NVC597_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP4_ENABLE 17:16 + +#define NVC597_SET_DRAW_CONTROL_A 0x0260 +#define NVC597_SET_DRAW_CONTROL_A_TOPOLOGY 3:0 +#define NVC597_SET_DRAW_CONTROL_A_TOPOLOGY_POINTS 0x00000000 +#define NVC597_SET_DRAW_CONTROL_A_TOPOLOGY_LINES 0x00000001 +#define NVC597_SET_DRAW_CONTROL_A_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC597_SET_DRAW_CONTROL_A_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC597_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC597_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC597_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC597_SET_DRAW_CONTROL_A_TOPOLOGY_QUADS 0x00000007 +#define NVC597_SET_DRAW_CONTROL_A_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC597_SET_DRAW_CONTROL_A_TOPOLOGY_POLYGON 0x00000009 +#define NVC597_SET_DRAW_CONTROL_A_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC597_SET_DRAW_CONTROL_A_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC597_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC597_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC597_SET_DRAW_CONTROL_A_TOPOLOGY_PATCH 0x0000000E +#define NVC597_SET_DRAW_CONTROL_A_PRIMITIVE_ID 4:4 +#define NVC597_SET_DRAW_CONTROL_A_PRIMITIVE_ID_FIRST 0x00000000 +#define NVC597_SET_DRAW_CONTROL_A_PRIMITIVE_ID_UNCHANGED 0x00000001 +#define NVC597_SET_DRAW_CONTROL_A_INSTANCE_ID 6:5 +#define NVC597_SET_DRAW_CONTROL_A_INSTANCE_ID_FIRST 0x00000000 +#define NVC597_SET_DRAW_CONTROL_A_INSTANCE_ID_SUBSEQUENT 0x00000001 +#define NVC597_SET_DRAW_CONTROL_A_INSTANCE_ID_UNCHANGED 0x00000002 +#define NVC597_SET_DRAW_CONTROL_A_SPLIT_MODE 8:7 +#define NVC597_SET_DRAW_CONTROL_A_SPLIT_MODE_NORMAL_BEGIN_NORMAL_END 0x00000000 +#define NVC597_SET_DRAW_CONTROL_A_SPLIT_MODE_NORMAL_BEGIN_OPEN_END 0x00000001 +#define NVC597_SET_DRAW_CONTROL_A_SPLIT_MODE_OPEN_BEGIN_OPEN_END 0x00000002 +#define NVC597_SET_DRAW_CONTROL_A_SPLIT_MODE_OPEN_BEGIN_NORMAL_END 0x00000003 +#define NVC597_SET_DRAW_CONTROL_A_INSTANCE_ITERATE_ENABLE 9:9 +#define NVC597_SET_DRAW_CONTROL_A_INSTANCE_ITERATE_ENABLE_FALSE 0x00000000 +#define NVC597_SET_DRAW_CONTROL_A_INSTANCE_ITERATE_ENABLE_TRUE 0x00000001 +#define NVC597_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_VERTEX_INDEX 10:10 +#define NVC597_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_VERTEX_INDEX_FALSE 0x00000000 +#define NVC597_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_VERTEX_INDEX_TRUE 0x00000001 +#define NVC597_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_INSTANCE_INDEX 11:11 +#define NVC597_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_INSTANCE_INDEX_FALSE 0x00000000 +#define NVC597_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_INSTANCE_INDEX_TRUE 0x00000001 + +#define NVC597_SET_DRAW_CONTROL_B 0x0264 +#define NVC597_SET_DRAW_CONTROL_B_INSTANCE_COUNT 31:0 + +#define NVC597_DRAW_INDEX_BUFFER_BEGIN_END_A 0x0268 +#define NVC597_DRAW_INDEX_BUFFER_BEGIN_END_A_FIRST 31:0 + +#define NVC597_DRAW_INDEX_BUFFER_BEGIN_END_B 0x026c +#define NVC597_DRAW_INDEX_BUFFER_BEGIN_END_B_COUNT 31:0 + +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_A 0x0270 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_A_START 31:0 + +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_B 0x0274 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_B_COUNT 31:0 + +#define NVC597_INVALIDATE_RASTER_CACHE_NO_WFI 0x027c +#define NVC597_INVALIDATE_RASTER_CACHE_NO_WFI_V 0:0 + +#define NVC597_SET_COLOR_RENDER_TO_ZETA_SURFACE 0x02b8 +#define NVC597_SET_COLOR_RENDER_TO_ZETA_SURFACE_V 0:0 +#define NVC597_SET_COLOR_RENDER_TO_ZETA_SURFACE_V_FALSE 0x00000000 +#define NVC597_SET_COLOR_RENDER_TO_ZETA_SURFACE_V_TRUE 0x00000001 + +#define NVC597_SET_ZCULL_VISIBLE_PRIM_OPTIMIZATION 0x02bc +#define NVC597_SET_ZCULL_VISIBLE_PRIM_OPTIMIZATION_V 0:0 +#define NVC597_SET_ZCULL_VISIBLE_PRIM_OPTIMIZATION_V_FALSE 0x00000000 +#define NVC597_SET_ZCULL_VISIBLE_PRIM_OPTIMIZATION_V_TRUE 0x00000001 + +#define NVC597_INCREMENT_SYNC_POINT 0x02c8 +#define NVC597_INCREMENT_SYNC_POINT_INDEX 11:0 +#define NVC597_INCREMENT_SYNC_POINT_CLEAN_L2 16:16 +#define NVC597_INCREMENT_SYNC_POINT_CLEAN_L2_FALSE 0x00000000 +#define NVC597_INCREMENT_SYNC_POINT_CLEAN_L2_TRUE 0x00000001 +#define NVC597_INCREMENT_SYNC_POINT_CONDITION 20:20 +#define NVC597_INCREMENT_SYNC_POINT_CONDITION_STREAM_OUT_WRITES_DONE 0x00000000 +#define NVC597_INCREMENT_SYNC_POINT_CONDITION_ROP_WRITES_DONE 0x00000001 + +#define NVC597_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE 0x02d4 +#define NVC597_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE_V 0:0 + +#define NVC597_SET_SURFACE_CLIP_ID_BLOCK_SIZE 0x02d8 +#define NVC597_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH 3:0 +#define NVC597_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC597_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT 7:4 +#define NVC597_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC597_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC597_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC597_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC597_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC597_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC597_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH 11:8 +#define NVC597_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NVC597_SET_ALPHA_CIRCULAR_BUFFER_SIZE 0x02dc +#define NVC597_SET_ALPHA_CIRCULAR_BUFFER_SIZE_CACHE_LINES_PER_SM 13:0 + +#define NVC597_DECOMPRESS_SURFACE 0x02e0 +#define NVC597_DECOMPRESS_SURFACE_MRT_SELECT 2:0 +#define NVC597_DECOMPRESS_SURFACE_RT_ARRAY_INDEX 19:4 + +#define NVC597_SET_ZCULL_ROP_BYPASS 0x02e4 +#define NVC597_SET_ZCULL_ROP_BYPASS_ENABLE 0:0 +#define NVC597_SET_ZCULL_ROP_BYPASS_ENABLE_FALSE 0x00000000 +#define NVC597_SET_ZCULL_ROP_BYPASS_ENABLE_TRUE 0x00000001 +#define NVC597_SET_ZCULL_ROP_BYPASS_NO_STALL 4:4 +#define NVC597_SET_ZCULL_ROP_BYPASS_NO_STALL_FALSE 0x00000000 +#define NVC597_SET_ZCULL_ROP_BYPASS_NO_STALL_TRUE 0x00000001 +#define NVC597_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING 8:8 +#define NVC597_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING_FALSE 0x00000000 +#define NVC597_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING_TRUE 0x00000001 +#define NVC597_SET_ZCULL_ROP_BYPASS_THRESHOLD 15:12 + +#define NVC597_SET_ZCULL_SUBREGION 0x02e8 +#define NVC597_SET_ZCULL_SUBREGION_ENABLE 0:0 +#define NVC597_SET_ZCULL_SUBREGION_ENABLE_FALSE 0x00000000 +#define NVC597_SET_ZCULL_SUBREGION_ENABLE_TRUE 0x00000001 +#define NVC597_SET_ZCULL_SUBREGION_NORMALIZED_ALIQUOTS 27:4 + +#define NVC597_SET_RASTER_BOUNDING_BOX 0x02ec +#define NVC597_SET_RASTER_BOUNDING_BOX_MODE 0:0 +#define NVC597_SET_RASTER_BOUNDING_BOX_MODE_BOUNDING_BOX 0x00000000 +#define NVC597_SET_RASTER_BOUNDING_BOX_MODE_FULL_VIEWPORT 0x00000001 +#define NVC597_SET_RASTER_BOUNDING_BOX_PAD 11:4 + +#define NVC597_PEER_SEMAPHORE_RELEASE 0x02f0 +#define NVC597_PEER_SEMAPHORE_RELEASE_V 31:0 + +#define NVC597_SET_ITERATED_BLEND_OPTIMIZATION 0x02f4 +#define NVC597_SET_ITERATED_BLEND_OPTIMIZATION_NOOP 1:0 +#define NVC597_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_NEVER 0x00000000 +#define NVC597_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_RGBA_0000 0x00000001 +#define NVC597_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_ALPHA_0 0x00000002 +#define NVC597_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_RGBA_0001 0x00000003 + +#define NVC597_SET_ZCULL_SUBREGION_ALLOCATION 0x02f8 +#define NVC597_SET_ZCULL_SUBREGION_ALLOCATION_SUBREGION_ID 7:0 +#define NVC597_SET_ZCULL_SUBREGION_ALLOCATION_ALIQUOTS 23:8 +#define NVC597_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT 27:24 +#define NVC597_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16X2_4X4 0x00000000 +#define NVC597_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X16_4X4 0x00000001 +#define NVC597_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_4X2 0x00000002 +#define NVC597_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_2X4 0x00000003 +#define NVC597_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X8_4X4 0x00000004 +#define NVC597_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_8X8_4X2 0x00000005 +#define NVC597_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_8X8_2X4 0x00000006 +#define NVC597_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_4X8 0x00000007 +#define NVC597_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_4X8_2X2 0x00000008 +#define NVC597_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X8_4X2 0x00000009 +#define NVC597_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X8_2X4 0x0000000A +#define NVC597_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_8X8_2X2 0x0000000B +#define NVC597_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_4X8_1X1 0x0000000C +#define NVC597_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_NONE 0x0000000F + +#define NVC597_ASSIGN_ZCULL_SUBREGIONS 0x02fc +#define NVC597_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM 1:0 +#define NVC597_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM_Static 0x00000000 +#define NVC597_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM_Adaptive 0x00000001 + +#define NVC597_SET_PS_OUTPUT_SAMPLE_MASK_USAGE 0x0300 +#define NVC597_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE 0:0 +#define NVC597_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_FALSE 0x00000000 +#define NVC597_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_TRUE 0x00000001 +#define NVC597_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE 1:1 +#define NVC597_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NVC597_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 + +#define NVC597_DRAW_ZERO_INDEX 0x0304 +#define NVC597_DRAW_ZERO_INDEX_COUNT 31:0 + +#define NVC597_SET_L1_CONFIGURATION 0x0308 +#define NVC597_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY 2:0 +#define NVC597_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_16KB 0x00000001 +#define NVC597_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_48KB 0x00000003 + +#define NVC597_SET_RENDER_ENABLE_CONTROL 0x030c +#define NVC597_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER 0:0 +#define NVC597_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_FALSE 0x00000000 +#define NVC597_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_TRUE 0x00000001 + +#define NVC597_SET_SPA_VERSION 0x0310 +#define NVC597_SET_SPA_VERSION_MINOR 7:0 +#define NVC597_SET_SPA_VERSION_MAJOR 15:8 + +#define NVC597_SET_TIMESLICE_BATCH_LIMIT 0x0314 +#define NVC597_SET_TIMESLICE_BATCH_LIMIT_BATCH_LIMIT 15:0 + +#define NVC597_SET_SNAP_GRID_LINE 0x0318 +#define NVC597_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NVC597_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NVC597_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NVC597_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NVC597_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NVC597_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NVC597_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NVC597_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NVC597_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NVC597_SET_SNAP_GRID_LINE_ROUNDING_MODE 8:8 +#define NVC597_SET_SNAP_GRID_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NVC597_SET_SNAP_GRID_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NVC597_SET_SNAP_GRID_NON_LINE 0x031c +#define NVC597_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NVC597_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NVC597_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NVC597_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NVC597_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NVC597_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NVC597_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NVC597_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NVC597_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NVC597_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE 8:8 +#define NVC597_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NVC597_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NVC597_SET_TESSELLATION_PARAMETERS 0x0320 +#define NVC597_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE 1:0 +#define NVC597_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_ISOLINE 0x00000000 +#define NVC597_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_TRIANGLE 0x00000001 +#define NVC597_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_QUAD 0x00000002 +#define NVC597_SET_TESSELLATION_PARAMETERS_SPACING 5:4 +#define NVC597_SET_TESSELLATION_PARAMETERS_SPACING_INTEGER 0x00000000 +#define NVC597_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_ODD 0x00000001 +#define NVC597_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_EVEN 0x00000002 +#define NVC597_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES 9:8 +#define NVC597_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_POINTS 0x00000000 +#define NVC597_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_LINES 0x00000001 +#define NVC597_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CW 0x00000002 +#define NVC597_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CCW 0x00000003 + +#define NVC597_SET_TESSELLATION_LOD_U0_OR_DENSITY 0x0324 +#define NVC597_SET_TESSELLATION_LOD_U0_OR_DENSITY_V 31:0 + +#define NVC597_SET_TESSELLATION_LOD_V0_OR_DETAIL 0x0328 +#define NVC597_SET_TESSELLATION_LOD_V0_OR_DETAIL_V 31:0 + +#define NVC597_SET_TESSELLATION_LOD_U1_OR_W0 0x032c +#define NVC597_SET_TESSELLATION_LOD_U1_OR_W0_V 31:0 + +#define NVC597_SET_TESSELLATION_LOD_V1 0x0330 +#define NVC597_SET_TESSELLATION_LOD_V1_V 31:0 + +#define NVC597_SET_TG_LOD_INTERIOR_U 0x0334 +#define NVC597_SET_TG_LOD_INTERIOR_U_V 31:0 + +#define NVC597_SET_TG_LOD_INTERIOR_V 0x0338 +#define NVC597_SET_TG_LOD_INTERIOR_V_V 31:0 + +#define NVC597_RESERVED_TG07 0x033c +#define NVC597_RESERVED_TG07_V 0:0 + +#define NVC597_RESERVED_TG08 0x0340 +#define NVC597_RESERVED_TG08_V 0:0 + +#define NVC597_RESERVED_TG09 0x0344 +#define NVC597_RESERVED_TG09_V 0:0 + +#define NVC597_RESERVED_TG10 0x0348 +#define NVC597_RESERVED_TG10_V 0:0 + +#define NVC597_RESERVED_TG11 0x034c +#define NVC597_RESERVED_TG11_V 0:0 + +#define NVC597_RESERVED_TG12 0x0350 +#define NVC597_RESERVED_TG12_V 0:0 + +#define NVC597_RESERVED_TG13 0x0354 +#define NVC597_RESERVED_TG13_V 0:0 + +#define NVC597_RESERVED_TG14 0x0358 +#define NVC597_RESERVED_TG14_V 0:0 + +#define NVC597_RESERVED_TG15 0x035c +#define NVC597_RESERVED_TG15_V 0:0 + +#define NVC597_SET_SUBTILING_PERF_KNOB_A 0x0360 +#define NVC597_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_REGISTER_FILE_PER_SUBTILE 7:0 +#define NVC597_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_PIXEL_OUTPUT_BUFFER_PER_SUBTILE 15:8 +#define NVC597_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_TRIANGLE_RAM_PER_SUBTILE 23:16 +#define NVC597_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_MAX_QUADS_PER_SUBTILE 31:24 + +#define NVC597_SET_SUBTILING_PERF_KNOB_B 0x0364 +#define NVC597_SET_SUBTILING_PERF_KNOB_B_FRACTION_OF_MAX_PRIMITIVES_PER_SUBTILE 7:0 + +#define NVC597_SET_SUBTILING_PERF_KNOB_C 0x0368 +#define NVC597_SET_SUBTILING_PERF_KNOB_C_RESERVED 0:0 + +#define NVC597_SET_ZCULL_SUBREGION_TO_REPORT 0x036c +#define NVC597_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE 0:0 +#define NVC597_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE_FALSE 0x00000000 +#define NVC597_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE_TRUE 0x00000001 +#define NVC597_SET_ZCULL_SUBREGION_TO_REPORT_SUBREGION_ID 11:4 + +#define NVC597_SET_ZCULL_SUBREGION_REPORT_TYPE 0x0370 +#define NVC597_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE 0:0 +#define NVC597_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE_FALSE 0x00000000 +#define NVC597_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE_TRUE 0x00000001 +#define NVC597_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE 6:4 +#define NVC597_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST 0x00000000 +#define NVC597_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST_NO_ACCEPT 0x00000001 +#define NVC597_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST_LATE_Z 0x00000002 +#define NVC597_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_STENCIL_TEST 0x00000003 + +#define NVC597_SET_BALANCED_PRIMITIVE_WORKLOAD 0x0374 +#define NVC597_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE 0:0 +#define NVC597_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE_FALSE 0x00000000 +#define NVC597_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE_TRUE 0x00000001 +#define NVC597_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE 4:4 +#define NVC597_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE_FALSE 0x00000000 +#define NVC597_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE_TRUE 0x00000001 +#define NVC597_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_UNPARTITIONED_MODE 8:8 +#define NVC597_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_UNPARTITIONED_MODE_FALSE 0x00000000 +#define NVC597_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_UNPARTITIONED_MODE_TRUE 0x00000001 +#define NVC597_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_TIMESLICED_MODE 9:9 +#define NVC597_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_TIMESLICED_MODE_FALSE 0x00000000 +#define NVC597_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_TIMESLICED_MODE_TRUE 0x00000001 + +#define NVC597_SET_MAX_PATCHES_PER_BATCH 0x0378 +#define NVC597_SET_MAX_PATCHES_PER_BATCH_V 5:0 + +#define NVC597_SET_RASTER_ENABLE 0x037c +#define NVC597_SET_RASTER_ENABLE_V 0:0 +#define NVC597_SET_RASTER_ENABLE_V_FALSE 0x00000000 +#define NVC597_SET_RASTER_ENABLE_V_TRUE 0x00000001 + +#define NVC597_SET_STREAM_OUT_BUFFER_ENABLE(j) (0x0380+(j)*32) +#define NVC597_SET_STREAM_OUT_BUFFER_ENABLE_V 0:0 +#define NVC597_SET_STREAM_OUT_BUFFER_ENABLE_V_FALSE 0x00000000 +#define NVC597_SET_STREAM_OUT_BUFFER_ENABLE_V_TRUE 0x00000001 + +#define NVC597_SET_STREAM_OUT_BUFFER_ADDRESS_A(j) (0x0384+(j)*32) +#define NVC597_SET_STREAM_OUT_BUFFER_ADDRESS_A_UPPER 7:0 + +#define NVC597_SET_STREAM_OUT_BUFFER_ADDRESS_B(j) (0x0388+(j)*32) +#define NVC597_SET_STREAM_OUT_BUFFER_ADDRESS_B_LOWER 31:0 + +#define NVC597_SET_STREAM_OUT_BUFFER_SIZE(j) (0x038c+(j)*32) +#define NVC597_SET_STREAM_OUT_BUFFER_SIZE_BYTES 31:0 + +#define NVC597_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER(j) (0x0390+(j)*32) +#define NVC597_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER_START_OFFSET 31:0 + +#define NVC597_SET_POSITION_W_SCALED_OFFSET_SCALE_A(j) (0x0400+(j)*16) +#define NVC597_SET_POSITION_W_SCALED_OFFSET_SCALE_A_V 31:0 + +#define NVC597_SET_POSITION_W_SCALED_OFFSET_SCALE_B(j) (0x0404+(j)*16) +#define NVC597_SET_POSITION_W_SCALED_OFFSET_SCALE_B_V 31:0 + +#define NVC597_SET_POSITION_W_SCALED_OFFSET_RESERVED_A(j) (0x0408+(j)*16) +#define NVC597_SET_POSITION_W_SCALED_OFFSET_RESERVED_A_V 31:0 + +#define NVC597_SET_POSITION_W_SCALED_OFFSET_RESERVED_B(j) (0x040c+(j)*16) +#define NVC597_SET_POSITION_W_SCALED_OFFSET_RESERVED_B_V 31:0 + +#define NVC597_SET_ROOT_TABLE_SELECTOR 0x0504 +#define NVC597_SET_ROOT_TABLE_SELECTOR_ROOT_TABLE 2:0 +#define NVC597_SET_ROOT_TABLE_SELECTOR_OFFSET 15:8 + +#define NVC597_LOAD_ROOT_TABLE 0x0508 +#define NVC597_LOAD_ROOT_TABLE_V 31:0 + +#define NVC597_SET_MME_MEM_ADDRESS_A 0x0550 +#define NVC597_SET_MME_MEM_ADDRESS_A_UPPER 7:0 + +#define NVC597_SET_MME_MEM_ADDRESS_B 0x0554 +#define NVC597_SET_MME_MEM_ADDRESS_B_LOWER 31:0 + +#define NVC597_SET_MME_DATA_RAM_ADDRESS 0x0558 +#define NVC597_SET_MME_DATA_RAM_ADDRESS_WORD 31:0 + +#define NVC597_MME_DMA_READ 0x055c +#define NVC597_MME_DMA_READ_LENGTH 31:0 + +#define NVC597_MME_DMA_READ_FIFOED 0x0560 +#define NVC597_MME_DMA_READ_FIFOED_LENGTH 31:0 + +#define NVC597_MME_DMA_WRITE 0x0564 +#define NVC597_MME_DMA_WRITE_LENGTH 31:0 + +#define NVC597_MME_DMA_REDUCTION 0x0568 +#define NVC597_MME_DMA_REDUCTION_REDUCTION_OP 2:0 +#define NVC597_MME_DMA_REDUCTION_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC597_MME_DMA_REDUCTION_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC597_MME_DMA_REDUCTION_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC597_MME_DMA_REDUCTION_REDUCTION_OP_RED_INC 0x00000003 +#define NVC597_MME_DMA_REDUCTION_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC597_MME_DMA_REDUCTION_REDUCTION_OP_RED_AND 0x00000005 +#define NVC597_MME_DMA_REDUCTION_REDUCTION_OP_RED_OR 0x00000006 +#define NVC597_MME_DMA_REDUCTION_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC597_MME_DMA_REDUCTION_REDUCTION_FORMAT 5:4 +#define NVC597_MME_DMA_REDUCTION_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC597_MME_DMA_REDUCTION_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVC597_MME_DMA_REDUCTION_REDUCTION_SIZE 8:8 +#define NVC597_MME_DMA_REDUCTION_REDUCTION_SIZE_FOUR_BYTES 0x00000000 +#define NVC597_MME_DMA_REDUCTION_REDUCTION_SIZE_EIGHT_BYTES 0x00000001 + +#define NVC597_MME_DMA_SYSMEMBAR 0x056c +#define NVC597_MME_DMA_SYSMEMBAR_V 0:0 + +#define NVC597_MME_DMA_SYNC 0x0570 +#define NVC597_MME_DMA_SYNC_VALUE 31:0 + +#define NVC597_SET_MME_DATA_FIFO_CONFIG 0x0574 +#define NVC597_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE 2:0 +#define NVC597_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_0KB 0x00000000 +#define NVC597_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_4KB 0x00000001 +#define NVC597_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_8KB 0x00000002 +#define NVC597_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_12KB 0x00000003 +#define NVC597_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_16KB 0x00000004 + +#define NVC597_SET_VERTEX_STREAM_SIZE_A(j) (0x0600+(j)*8) +#define NVC597_SET_VERTEX_STREAM_SIZE_A_UPPER 7:0 + +#define NVC597_SET_VERTEX_STREAM_SIZE_B(j) (0x0604+(j)*8) +#define NVC597_SET_VERTEX_STREAM_SIZE_B_LOWER 31:0 + +#define NVC597_SET_STREAM_OUT_CONTROL_STREAM(j) (0x0700+(j)*16) +#define NVC597_SET_STREAM_OUT_CONTROL_STREAM_SELECT 1:0 + +#define NVC597_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT(j) (0x0704+(j)*16) +#define NVC597_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT_MAX 7:0 + +#define NVC597_SET_STREAM_OUT_CONTROL_STRIDE(j) (0x0708+(j)*16) +#define NVC597_SET_STREAM_OUT_CONTROL_STRIDE_BYTES 31:0 + +#define NVC597_SET_RASTER_INPUT 0x0740 +#define NVC597_SET_RASTER_INPUT_STREAM_SELECT 1:0 + +#define NVC597_SET_STREAM_OUTPUT 0x0744 +#define NVC597_SET_STREAM_OUTPUT_ENABLE 0:0 +#define NVC597_SET_STREAM_OUTPUT_ENABLE_FALSE 0x00000000 +#define NVC597_SET_STREAM_OUTPUT_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE 0x0748 +#define NVC597_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE 0:0 +#define NVC597_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_FALSE 0x00000000 +#define NVC597_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_HYBRID_ANTI_ALIAS_CONTROL 0x0754 +#define NVC597_SET_HYBRID_ANTI_ALIAS_CONTROL_PASSES 3:0 +#define NVC597_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID 4:4 +#define NVC597_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_FRAGMENT 0x00000000 +#define NVC597_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_PASS 0x00000001 +#define NVC597_SET_HYBRID_ANTI_ALIAS_CONTROL_PASSES_EXTENDED 5:5 + +#define NVC597_SET_SHADER_LOCAL_MEMORY_WINDOW 0x077c +#define NVC597_SET_SHADER_LOCAL_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NVC597_SET_SHADER_LOCAL_MEMORY_A 0x0790 +#define NVC597_SET_SHADER_LOCAL_MEMORY_A_ADDRESS_UPPER 7:0 + +#define NVC597_SET_SHADER_LOCAL_MEMORY_B 0x0794 +#define NVC597_SET_SHADER_LOCAL_MEMORY_B_ADDRESS_LOWER 31:0 + +#define NVC597_SET_SHADER_LOCAL_MEMORY_C 0x0798 +#define NVC597_SET_SHADER_LOCAL_MEMORY_C_SIZE_UPPER 5:0 + +#define NVC597_SET_SHADER_LOCAL_MEMORY_D 0x079c +#define NVC597_SET_SHADER_LOCAL_MEMORY_D_SIZE_LOWER 31:0 + +#define NVC597_SET_SHADER_LOCAL_MEMORY_E 0x07a0 +#define NVC597_SET_SHADER_LOCAL_MEMORY_E_DEFAULT_SIZE_PER_WARP 25:0 + +#define NVC597_SET_COLOR_ZERO_BANDWIDTH_CLEAR 0x07a4 +#define NVC597_SET_COLOR_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVC597_SET_Z_ZERO_BANDWIDTH_CLEAR 0x07a8 +#define NVC597_SET_Z_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVC597_SET_STENCIL_ZERO_BANDWIDTH_CLEAR 0x07b0 +#define NVC597_SET_STENCIL_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVC597_SET_ZCULL_REGION_SIZE_A 0x07c0 +#define NVC597_SET_ZCULL_REGION_SIZE_A_WIDTH 15:0 + +#define NVC597_SET_ZCULL_REGION_SIZE_B 0x07c4 +#define NVC597_SET_ZCULL_REGION_SIZE_B_HEIGHT 15:0 + +#define NVC597_SET_ZCULL_REGION_SIZE_C 0x07c8 +#define NVC597_SET_ZCULL_REGION_SIZE_C_DEPTH 15:0 + +#define NVC597_SET_ZCULL_REGION_PIXEL_OFFSET_C 0x07cc +#define NVC597_SET_ZCULL_REGION_PIXEL_OFFSET_C_DEPTH 15:0 + +#define NVC597_SET_CULL_BEFORE_FETCH 0x07dc +#define NVC597_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE 0:0 +#define NVC597_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_FALSE 0x00000000 +#define NVC597_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_TRUE 0x00000001 + +#define NVC597_SET_ZCULL_REGION_LOCATION 0x07e0 +#define NVC597_SET_ZCULL_REGION_LOCATION_START_ALIQUOT 15:0 +#define NVC597_SET_ZCULL_REGION_LOCATION_ALIQUOT_COUNT 31:16 + +#define NVC597_SET_ZCULL_REGION_ALIQUOTS 0x07e4 +#define NVC597_SET_ZCULL_REGION_ALIQUOTS_PER_LAYER 15:0 + +#define NVC597_SET_ZCULL_STORAGE_A 0x07e8 +#define NVC597_SET_ZCULL_STORAGE_A_ADDRESS_UPPER 7:0 + +#define NVC597_SET_ZCULL_STORAGE_B 0x07ec +#define NVC597_SET_ZCULL_STORAGE_B_ADDRESS_LOWER 31:0 + +#define NVC597_SET_ZCULL_STORAGE_C 0x07f0 +#define NVC597_SET_ZCULL_STORAGE_C_LIMIT_ADDRESS_UPPER 7:0 + +#define NVC597_SET_ZCULL_STORAGE_D 0x07f4 +#define NVC597_SET_ZCULL_STORAGE_D_LIMIT_ADDRESS_LOWER 31:0 + +#define NVC597_SET_ZT_READ_ONLY 0x07f8 +#define NVC597_SET_ZT_READ_ONLY_ENABLE_Z 0:0 +#define NVC597_SET_ZT_READ_ONLY_ENABLE_Z_FALSE 0x00000000 +#define NVC597_SET_ZT_READ_ONLY_ENABLE_Z_TRUE 0x00000001 +#define NVC597_SET_ZT_READ_ONLY_ENABLE_STENCIL 4:4 +#define NVC597_SET_ZT_READ_ONLY_ENABLE_STENCIL_FALSE 0x00000000 +#define NVC597_SET_ZT_READ_ONLY_ENABLE_STENCIL_TRUE 0x00000001 + +#define NVC597_SET_COLOR_TARGET_A(j) (0x0800+(j)*64) +#define NVC597_SET_COLOR_TARGET_A_OFFSET_UPPER 7:0 + +#define NVC597_SET_COLOR_TARGET_B(j) (0x0804+(j)*64) +#define NVC597_SET_COLOR_TARGET_B_OFFSET_LOWER 31:0 + +#define NVC597_SET_COLOR_TARGET_WIDTH(j) (0x0808+(j)*64) +#define NVC597_SET_COLOR_TARGET_WIDTH_V 27:0 + +#define NVC597_SET_COLOR_TARGET_HEIGHT(j) (0x080c+(j)*64) +#define NVC597_SET_COLOR_TARGET_HEIGHT_V 16:0 + +#define NVC597_SET_COLOR_TARGET_FORMAT(j) (0x0810+(j)*64) +#define NVC597_SET_COLOR_TARGET_FORMAT_V 7:0 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_DISABLED 0x00000000 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_AF32 0x000000C0 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_AS32 0x000000C1 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_AU32 0x000000C2 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_X32 0x000000C3 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_X32 0x000000C4 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_X32 0x000000C5 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_R16_G16_B16_A16 0x000000C6 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RN16_GN16_BN16_AN16 0x000000C7 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RS16_GS16_BS16_AS16 0x000000C8 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RU16_GU16_BU16_AU16 0x000000C9 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_AF16 0x000000CA +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RF32_GF32 0x000000CB +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RS32_GS32 0x000000CC +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RU32_GU32 0x000000CD +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_X16 0x000000CE +#define NVC597_SET_COLOR_TARGET_FORMAT_V_A8R8G8B8 0x000000CF +#define NVC597_SET_COLOR_TARGET_FORMAT_V_A8RL8GL8BL8 0x000000D0 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_A2B10G10R10 0x000000D1 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_AU2BU10GU10RU10 0x000000D2 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_A8B8G8R8 0x000000D5 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_A8BL8GL8RL8 0x000000D6 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_AN8BN8GN8RN8 0x000000D7 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_AS8BS8GS8RS8 0x000000D8 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_AU8BU8GU8RU8 0x000000D9 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_R16_G16 0x000000DA +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RN16_GN16 0x000000DB +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RS16_GS16 0x000000DC +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RU16_GU16 0x000000DD +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RF16_GF16 0x000000DE +#define NVC597_SET_COLOR_TARGET_FORMAT_V_A2R10G10B10 0x000000DF +#define NVC597_SET_COLOR_TARGET_FORMAT_V_BF10GF11RF11 0x000000E0 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RS32 0x000000E3 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RU32 0x000000E4 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RF32 0x000000E5 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_X8R8G8B8 0x000000E6 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_X8RL8GL8BL8 0x000000E7 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_R5G6B5 0x000000E8 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_A1R5G5B5 0x000000E9 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_G8R8 0x000000EA +#define NVC597_SET_COLOR_TARGET_FORMAT_V_GN8RN8 0x000000EB +#define NVC597_SET_COLOR_TARGET_FORMAT_V_GS8RS8 0x000000EC +#define NVC597_SET_COLOR_TARGET_FORMAT_V_GU8RU8 0x000000ED +#define NVC597_SET_COLOR_TARGET_FORMAT_V_R16 0x000000EE +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RN16 0x000000EF +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RS16 0x000000F0 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RU16 0x000000F1 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RF16 0x000000F2 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_R8 0x000000F3 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RN8 0x000000F4 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RS8 0x000000F5 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RU8 0x000000F6 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_A8 0x000000F7 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_X1R5G5B5 0x000000F8 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_X8B8G8R8 0x000000F9 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_X8BL8GL8RL8 0x000000FA +#define NVC597_SET_COLOR_TARGET_FORMAT_V_Z1R5G5B5 0x000000FB +#define NVC597_SET_COLOR_TARGET_FORMAT_V_O1R5G5B5 0x000000FC +#define NVC597_SET_COLOR_TARGET_FORMAT_V_Z8R8G8B8 0x000000FD +#define NVC597_SET_COLOR_TARGET_FORMAT_V_O8R8G8B8 0x000000FE +#define NVC597_SET_COLOR_TARGET_FORMAT_V_R32 0x000000FF +#define NVC597_SET_COLOR_TARGET_FORMAT_V_A16 0x00000040 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_AF16 0x00000041 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_AF32 0x00000042 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_A8R8 0x00000043 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_R16_A16 0x00000044 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RF16_AF16 0x00000045 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_RF32_AF32 0x00000046 +#define NVC597_SET_COLOR_TARGET_FORMAT_V_B8G8R8A8 0x00000047 + +#define NVC597_SET_COLOR_TARGET_MEMORY(j) (0x0814+(j)*64) +#define NVC597_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH 3:0 +#define NVC597_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH_ONE_GOB 0x00000000 +#define NVC597_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT 7:4 +#define NVC597_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_ONE_GOB 0x00000000 +#define NVC597_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_TWO_GOBS 0x00000001 +#define NVC597_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC597_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC597_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC597_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC597_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH 11:8 +#define NVC597_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_ONE_GOB 0x00000000 +#define NVC597_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_TWO_GOBS 0x00000001 +#define NVC597_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_FOUR_GOBS 0x00000002 +#define NVC597_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_EIGHT_GOBS 0x00000003 +#define NVC597_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVC597_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_THIRTYTWO_GOBS 0x00000005 +#define NVC597_SET_COLOR_TARGET_MEMORY_LAYOUT 12:12 +#define NVC597_SET_COLOR_TARGET_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVC597_SET_COLOR_TARGET_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVC597_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL 16:16 +#define NVC597_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NVC597_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_DEPTH_SIZE 0x00000001 + +#define NVC597_SET_COLOR_TARGET_THIRD_DIMENSION(j) (0x0818+(j)*64) +#define NVC597_SET_COLOR_TARGET_THIRD_DIMENSION_V 27:0 + +#define NVC597_SET_COLOR_TARGET_ARRAY_PITCH(j) (0x081c+(j)*64) +#define NVC597_SET_COLOR_TARGET_ARRAY_PITCH_V 31:0 + +#define NVC597_SET_COLOR_TARGET_LAYER(j) (0x0820+(j)*64) +#define NVC597_SET_COLOR_TARGET_LAYER_OFFSET 15:0 + +#define NVC597_SET_COLOR_TARGET_RESERVED_A(j) (0x0824+(j)*64) +#define NVC597_SET_COLOR_TARGET_RESERVED_A_V 0:0 + +#define NVC597_SET_VIEWPORT_SCALE_X(j) (0x0a00+(j)*32) +#define NVC597_SET_VIEWPORT_SCALE_X_V 31:0 + +#define NVC597_SET_VIEWPORT_SCALE_Y(j) (0x0a04+(j)*32) +#define NVC597_SET_VIEWPORT_SCALE_Y_V 31:0 + +#define NVC597_SET_VIEWPORT_SCALE_Z(j) (0x0a08+(j)*32) +#define NVC597_SET_VIEWPORT_SCALE_Z_V 31:0 + +#define NVC597_SET_VIEWPORT_OFFSET_X(j) (0x0a0c+(j)*32) +#define NVC597_SET_VIEWPORT_OFFSET_X_V 31:0 + +#define NVC597_SET_VIEWPORT_OFFSET_Y(j) (0x0a10+(j)*32) +#define NVC597_SET_VIEWPORT_OFFSET_Y_V 31:0 + +#define NVC597_SET_VIEWPORT_OFFSET_Z(j) (0x0a14+(j)*32) +#define NVC597_SET_VIEWPORT_OFFSET_Z_V 31:0 + +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE(j) (0x0a18+(j)*32) +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_X 2:0 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_X 0x00000000 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_X 0x00000001 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_Y 0x00000002 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_Y 0x00000003 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_Z 0x00000004 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_Z 0x00000005 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_W 0x00000006 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_W 0x00000007 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_Y 6:4 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_X 0x00000000 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_X 0x00000001 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_Y 0x00000002 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_Y 0x00000003 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_Z 0x00000004 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_Z 0x00000005 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_W 0x00000006 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_W 0x00000007 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_Z 10:8 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_X 0x00000000 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_X 0x00000001 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_Y 0x00000002 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_Y 0x00000003 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_Z 0x00000004 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_Z 0x00000005 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_W 0x00000006 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_W 0x00000007 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_W 14:12 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_X 0x00000000 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_X 0x00000001 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_Y 0x00000002 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_Y 0x00000003 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_Z 0x00000004 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_Z 0x00000005 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_W 0x00000006 +#define NVC597_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_W 0x00000007 + +#define NVC597_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION(j) (0x0a1c+(j)*32) +#define NVC597_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION_X_BITS 4:0 +#define NVC597_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION_Y_BITS 12:8 + +#define NVC597_SET_VIEWPORT_CLIP_HORIZONTAL(j) (0x0c00+(j)*16) +#define NVC597_SET_VIEWPORT_CLIP_HORIZONTAL_X0 15:0 +#define NVC597_SET_VIEWPORT_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NVC597_SET_VIEWPORT_CLIP_VERTICAL(j) (0x0c04+(j)*16) +#define NVC597_SET_VIEWPORT_CLIP_VERTICAL_Y0 15:0 +#define NVC597_SET_VIEWPORT_CLIP_VERTICAL_HEIGHT 31:16 + +#define NVC597_SET_VIEWPORT_CLIP_MIN_Z(j) (0x0c08+(j)*16) +#define NVC597_SET_VIEWPORT_CLIP_MIN_Z_V 31:0 + +#define NVC597_SET_VIEWPORT_CLIP_MAX_Z(j) (0x0c0c+(j)*16) +#define NVC597_SET_VIEWPORT_CLIP_MAX_Z_V 31:0 + +#define NVC597_SET_WINDOW_CLIP_HORIZONTAL(j) (0x0d00+(j)*8) +#define NVC597_SET_WINDOW_CLIP_HORIZONTAL_XMIN 15:0 +#define NVC597_SET_WINDOW_CLIP_HORIZONTAL_XMAX 31:16 + +#define NVC597_SET_WINDOW_CLIP_VERTICAL(j) (0x0d04+(j)*8) +#define NVC597_SET_WINDOW_CLIP_VERTICAL_YMIN 15:0 +#define NVC597_SET_WINDOW_CLIP_VERTICAL_YMAX 31:16 + +#define NVC597_SET_CLIP_ID_EXTENT_X(j) (0x0d40+(j)*8) +#define NVC597_SET_CLIP_ID_EXTENT_X_MINX 15:0 +#define NVC597_SET_CLIP_ID_EXTENT_X_WIDTH 31:16 + +#define NVC597_SET_CLIP_ID_EXTENT_Y(j) (0x0d44+(j)*8) +#define NVC597_SET_CLIP_ID_EXTENT_Y_MINY 15:0 +#define NVC597_SET_CLIP_ID_EXTENT_Y_HEIGHT 31:16 + +#define NVC597_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK 0x0d60 +#define NVC597_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK_V 10:0 + +#define NVC597_SET_API_VISIBLE_CALL_LIMIT 0x0d64 +#define NVC597_SET_API_VISIBLE_CALL_LIMIT_V 3:0 +#define NVC597_SET_API_VISIBLE_CALL_LIMIT_V__0 0x00000000 +#define NVC597_SET_API_VISIBLE_CALL_LIMIT_V__1 0x00000001 +#define NVC597_SET_API_VISIBLE_CALL_LIMIT_V__2 0x00000002 +#define NVC597_SET_API_VISIBLE_CALL_LIMIT_V__4 0x00000003 +#define NVC597_SET_API_VISIBLE_CALL_LIMIT_V__8 0x00000004 +#define NVC597_SET_API_VISIBLE_CALL_LIMIT_V__16 0x00000005 +#define NVC597_SET_API_VISIBLE_CALL_LIMIT_V__32 0x00000006 +#define NVC597_SET_API_VISIBLE_CALL_LIMIT_V__64 0x00000007 +#define NVC597_SET_API_VISIBLE_CALL_LIMIT_V__128 0x00000008 +#define NVC597_SET_API_VISIBLE_CALL_LIMIT_V_NO_CHECK 0x0000000F + +#define NVC597_SET_STATISTICS_COUNTER 0x0d68 +#define NVC597_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE 0:0 +#define NVC597_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC597_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC597_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE 1:1 +#define NVC597_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC597_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC597_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE 2:2 +#define NVC597_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC597_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC597_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE 3:3 +#define NVC597_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC597_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC597_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE 4:4 +#define NVC597_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC597_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC597_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE 5:5 +#define NVC597_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NVC597_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NVC597_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE 6:6 +#define NVC597_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_FALSE 0x00000000 +#define NVC597_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_TRUE 0x00000001 +#define NVC597_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE 7:7 +#define NVC597_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC597_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC597_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE 8:8 +#define NVC597_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC597_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC597_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE 9:9 +#define NVC597_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC597_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC597_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE 11:11 +#define NVC597_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC597_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC597_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE 12:12 +#define NVC597_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC597_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC597_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE 13:13 +#define NVC597_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC597_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC597_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE 14:14 +#define NVC597_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NVC597_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NVC597_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE 10:10 +#define NVC597_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_FALSE 0x00000000 +#define NVC597_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_TRUE 0x00000001 +#define NVC597_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE 15:15 +#define NVC597_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_FALSE 0x00000000 +#define NVC597_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_TRUE 0x00000001 +#define NVC597_SET_STATISTICS_COUNTER_SCG_CLOCKS_ENABLE 16:16 +#define NVC597_SET_STATISTICS_COUNTER_SCG_CLOCKS_ENABLE_FALSE 0x00000000 +#define NVC597_SET_STATISTICS_COUNTER_SCG_CLOCKS_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_CLEAR_RECT_HORIZONTAL 0x0d6c +#define NVC597_SET_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NVC597_SET_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NVC597_SET_CLEAR_RECT_VERTICAL 0x0d70 +#define NVC597_SET_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NVC597_SET_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NVC597_SET_VERTEX_ARRAY_START 0x0d74 +#define NVC597_SET_VERTEX_ARRAY_START_V 31:0 + +#define NVC597_DRAW_VERTEX_ARRAY 0x0d78 +#define NVC597_DRAW_VERTEX_ARRAY_COUNT 31:0 + +#define NVC597_SET_VIEWPORT_Z_CLIP 0x0d7c +#define NVC597_SET_VIEWPORT_Z_CLIP_RANGE 0:0 +#define NVC597_SET_VIEWPORT_Z_CLIP_RANGE_NEGATIVE_W_TO_POSITIVE_W 0x00000000 +#define NVC597_SET_VIEWPORT_Z_CLIP_RANGE_ZERO_TO_POSITIVE_W 0x00000001 + +#define NVC597_SET_COLOR_CLEAR_VALUE(i) (0x0d80+(i)*4) +#define NVC597_SET_COLOR_CLEAR_VALUE_V 31:0 + +#define NVC597_SET_Z_CLEAR_VALUE 0x0d90 +#define NVC597_SET_Z_CLEAR_VALUE_V 31:0 + +#define NVC597_SET_SHADER_CACHE_CONTROL 0x0d94 +#define NVC597_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE 0:0 +#define NVC597_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_FALSE 0x00000000 +#define NVC597_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_TRUE 0x00000001 + +#define NVC597_FORCE_TRANSITION_TO_BETA 0x0d98 +#define NVC597_FORCE_TRANSITION_TO_BETA_V 0:0 + +#define NVC597_SET_REDUCE_COLOR_THRESHOLDS_ENABLE 0x0d9c +#define NVC597_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V 0:0 +#define NVC597_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_FALSE 0x00000000 +#define NVC597_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_TRUE 0x00000001 + +#define NVC597_SET_STENCIL_CLEAR_VALUE 0x0da0 +#define NVC597_SET_STENCIL_CLEAR_VALUE_V 7:0 + +#define NVC597_INVALIDATE_SHADER_CACHES_NO_WFI 0x0da4 +#define NVC597_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION 0:0 +#define NVC597_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_FALSE 0x00000000 +#define NVC597_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_TRUE 0x00000001 +#define NVC597_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA 4:4 +#define NVC597_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_FALSE 0x00000000 +#define NVC597_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_TRUE 0x00000001 +#define NVC597_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT 12:12 +#define NVC597_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_FALSE 0x00000000 +#define NVC597_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_TRUE 0x00000001 + +#define NVC597_SET_ZCULL_SERIALIZATION 0x0da8 +#define NVC597_SET_ZCULL_SERIALIZATION_ENABLE 0:0 +#define NVC597_SET_ZCULL_SERIALIZATION_ENABLE_FALSE 0x00000000 +#define NVC597_SET_ZCULL_SERIALIZATION_ENABLE_TRUE 0x00000001 +#define NVC597_SET_ZCULL_SERIALIZATION_APPLIED 5:4 +#define NVC597_SET_ZCULL_SERIALIZATION_APPLIED_ALWAYS 0x00000000 +#define NVC597_SET_ZCULL_SERIALIZATION_APPLIED_LATE_Z 0x00000001 +#define NVC597_SET_ZCULL_SERIALIZATION_APPLIED_OUT_OF_GAMUT_Z 0x00000002 +#define NVC597_SET_ZCULL_SERIALIZATION_APPLIED_LATE_Z_OR_OUT_OF_GAMUT_Z 0x00000003 + +#define NVC597_SET_FRONT_POLYGON_MODE 0x0dac +#define NVC597_SET_FRONT_POLYGON_MODE_V 31:0 +#define NVC597_SET_FRONT_POLYGON_MODE_V_POINT 0x00001B00 +#define NVC597_SET_FRONT_POLYGON_MODE_V_LINE 0x00001B01 +#define NVC597_SET_FRONT_POLYGON_MODE_V_FILL 0x00001B02 + +#define NVC597_SET_BACK_POLYGON_MODE 0x0db0 +#define NVC597_SET_BACK_POLYGON_MODE_V 31:0 +#define NVC597_SET_BACK_POLYGON_MODE_V_POINT 0x00001B00 +#define NVC597_SET_BACK_POLYGON_MODE_V_LINE 0x00001B01 +#define NVC597_SET_BACK_POLYGON_MODE_V_FILL 0x00001B02 + +#define NVC597_SET_POLY_SMOOTH 0x0db4 +#define NVC597_SET_POLY_SMOOTH_ENABLE 0:0 +#define NVC597_SET_POLY_SMOOTH_ENABLE_FALSE 0x00000000 +#define NVC597_SET_POLY_SMOOTH_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_ZCULL_DIR_FORMAT 0x0dbc +#define NVC597_SET_ZCULL_DIR_FORMAT_ZDIR 15:0 +#define NVC597_SET_ZCULL_DIR_FORMAT_ZDIR_LESS 0x00000000 +#define NVC597_SET_ZCULL_DIR_FORMAT_ZDIR_GREATER 0x00000001 +#define NVC597_SET_ZCULL_DIR_FORMAT_ZFORMAT 31:16 +#define NVC597_SET_ZCULL_DIR_FORMAT_ZFORMAT_MSB 0x00000000 +#define NVC597_SET_ZCULL_DIR_FORMAT_ZFORMAT_FP 0x00000001 +#define NVC597_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZTRICK 0x00000002 +#define NVC597_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZF32_1 0x00000003 + +#define NVC597_SET_POLY_OFFSET_POINT 0x0dc0 +#define NVC597_SET_POLY_OFFSET_POINT_ENABLE 0:0 +#define NVC597_SET_POLY_OFFSET_POINT_ENABLE_FALSE 0x00000000 +#define NVC597_SET_POLY_OFFSET_POINT_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_POLY_OFFSET_LINE 0x0dc4 +#define NVC597_SET_POLY_OFFSET_LINE_ENABLE 0:0 +#define NVC597_SET_POLY_OFFSET_LINE_ENABLE_FALSE 0x00000000 +#define NVC597_SET_POLY_OFFSET_LINE_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_POLY_OFFSET_FILL 0x0dc8 +#define NVC597_SET_POLY_OFFSET_FILL_ENABLE 0:0 +#define NVC597_SET_POLY_OFFSET_FILL_ENABLE_FALSE 0x00000000 +#define NVC597_SET_POLY_OFFSET_FILL_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_PATCH 0x0dcc +#define NVC597_SET_PATCH_SIZE 7:0 + +#define NVC597_SET_ITERATED_BLEND 0x0dd0 +#define NVC597_SET_ITERATED_BLEND_ENABLE 0:0 +#define NVC597_SET_ITERATED_BLEND_ENABLE_FALSE 0x00000000 +#define NVC597_SET_ITERATED_BLEND_ENABLE_TRUE 0x00000001 +#define NVC597_SET_ITERATED_BLEND_ALPHA_ENABLE 1:1 +#define NVC597_SET_ITERATED_BLEND_ALPHA_ENABLE_FALSE 0x00000000 +#define NVC597_SET_ITERATED_BLEND_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_ITERATED_BLEND_PASS 0x0dd4 +#define NVC597_SET_ITERATED_BLEND_PASS_COUNT 7:0 + +#define NVC597_SET_ZCULL_CRITERION 0x0dd8 +#define NVC597_SET_ZCULL_CRITERION_SFUNC 7:0 +#define NVC597_SET_ZCULL_CRITERION_SFUNC_NEVER 0x00000000 +#define NVC597_SET_ZCULL_CRITERION_SFUNC_LESS 0x00000001 +#define NVC597_SET_ZCULL_CRITERION_SFUNC_EQUAL 0x00000002 +#define NVC597_SET_ZCULL_CRITERION_SFUNC_LEQUAL 0x00000003 +#define NVC597_SET_ZCULL_CRITERION_SFUNC_GREATER 0x00000004 +#define NVC597_SET_ZCULL_CRITERION_SFUNC_NOTEQUAL 0x00000005 +#define NVC597_SET_ZCULL_CRITERION_SFUNC_GEQUAL 0x00000006 +#define NVC597_SET_ZCULL_CRITERION_SFUNC_ALWAYS 0x00000007 +#define NVC597_SET_ZCULL_CRITERION_NO_INVALIDATE 8:8 +#define NVC597_SET_ZCULL_CRITERION_NO_INVALIDATE_FALSE 0x00000000 +#define NVC597_SET_ZCULL_CRITERION_NO_INVALIDATE_TRUE 0x00000001 +#define NVC597_SET_ZCULL_CRITERION_FORCE_MATCH 9:9 +#define NVC597_SET_ZCULL_CRITERION_FORCE_MATCH_FALSE 0x00000000 +#define NVC597_SET_ZCULL_CRITERION_FORCE_MATCH_TRUE 0x00000001 +#define NVC597_SET_ZCULL_CRITERION_SREF 23:16 +#define NVC597_SET_ZCULL_CRITERION_SMASK 31:24 + +#define NVC597_PIXEL_SHADER_BARRIER 0x0de0 +#define NVC597_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE 0:0 +#define NVC597_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE_FALSE 0x00000000 +#define NVC597_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_SM_TIMEOUT_INTERVAL 0x0de4 +#define NVC597_SET_SM_TIMEOUT_INTERVAL_COUNTER_BIT 5:0 + +#define NVC597_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY 0x0de8 +#define NVC597_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE 0:0 +#define NVC597_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_FALSE 0x00000000 +#define NVC597_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_TRUE 0x00000001 + +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_POINTER 0x0df0 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_POINTER_V 7:0 + +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION 0x0df4 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC 2:0 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_FALSE 0x00000000 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_TRUE 0x00000001 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_EQ 0x00000002 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_NE 0x00000003 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_LT 0x00000004 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_LE 0x00000005 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_GT 0x00000006 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_GE 0x00000007 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION 5:3 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_ADD_PRODUCTS 0x00000000 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_SUB_PRODUCTS 0x00000001 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_MIN 0x00000002 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_MAX 0x00000003 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_RCP 0x00000004 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_ADD 0x00000005 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_SUBTRACT 0x00000006 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT 8:6 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT0 0x00000000 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT1 0x00000001 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT2 0x00000002 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT3 0x00000003 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT4 0x00000004 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT5 0x00000005 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT6 0x00000006 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT7 0x00000007 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT 11:9 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_SRC_RGB 0x00000000 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_DEST_RGB 0x00000001 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_SRC_AAA 0x00000002 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_DEST_AAA 0x00000003 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP0_RGB 0x00000004 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP1_RGB 0x00000005 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP2_RGB 0x00000006 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_PBR_RGB 0x00000007 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT 15:12 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ZERO 0x00000000 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE 0x00000001 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_SRC_RGB 0x00000002 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_SRC_AAA 0x00000003 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE_MINUS_SRC_AAA 0x00000004 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_DEST_RGB 0x00000005 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_DEST_AAA 0x00000006 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE_MINUS_DEST_AAA 0x00000007 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP0_RGB 0x00000009 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP1_RGB 0x0000000A +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP2_RGB 0x0000000B +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_PBR_RGB 0x0000000C +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_CONSTANT_RGB 0x0000000D +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ZERO_A_TIMES_B 0x0000000E +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT 18:16 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_SRC_RGB 0x00000000 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_DEST_RGB 0x00000001 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_SRC_AAA 0x00000002 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_DEST_AAA 0x00000003 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP0_RGB 0x00000004 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP1_RGB 0x00000005 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP2_RGB 0x00000006 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_PBR_RGB 0x00000007 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT 22:19 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ZERO 0x00000000 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE 0x00000001 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_SRC_RGB 0x00000002 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_SRC_AAA 0x00000003 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE_MINUS_SRC_AAA 0x00000004 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_DEST_RGB 0x00000005 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_DEST_AAA 0x00000006 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE_MINUS_DEST_AAA 0x00000007 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP0_RGB 0x00000009 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP1_RGB 0x0000000A +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP2_RGB 0x0000000B +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_PBR_RGB 0x0000000C +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_CONSTANT_RGB 0x0000000D +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ZERO_C_TIMES_D 0x0000000E +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE 25:23 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_RGB 0x00000000 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_GBR 0x00000001 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_RRR 0x00000002 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_GGG 0x00000003 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_BBB 0x00000004 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_R_TO_A 0x00000005 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK 27:26 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_RGB 0x00000000 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_R_ONLY 0x00000001 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_G_ONLY 0x00000002 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_B_ONLY 0x00000003 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT 29:28 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP0 0x00000000 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP1 0x00000001 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP2 0x00000002 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_NONE 0x00000003 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC 31:31 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC_FALSE 0x00000000 +#define NVC597_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC_TRUE 0x00000001 + +#define NVC597_SET_WINDOW_OFFSET_X 0x0df8 +#define NVC597_SET_WINDOW_OFFSET_X_V 16:0 + +#define NVC597_SET_WINDOW_OFFSET_Y 0x0dfc +#define NVC597_SET_WINDOW_OFFSET_Y_V 17:0 + +#define NVC597_SET_SCISSOR_ENABLE(j) (0x0e00+(j)*16) +#define NVC597_SET_SCISSOR_ENABLE_V 0:0 +#define NVC597_SET_SCISSOR_ENABLE_V_FALSE 0x00000000 +#define NVC597_SET_SCISSOR_ENABLE_V_TRUE 0x00000001 + +#define NVC597_SET_SCISSOR_HORIZONTAL(j) (0x0e04+(j)*16) +#define NVC597_SET_SCISSOR_HORIZONTAL_XMIN 15:0 +#define NVC597_SET_SCISSOR_HORIZONTAL_XMAX 31:16 + +#define NVC597_SET_SCISSOR_VERTICAL(j) (0x0e08+(j)*16) +#define NVC597_SET_SCISSOR_VERTICAL_YMIN 15:0 +#define NVC597_SET_SCISSOR_VERTICAL_YMAX 31:16 + +#define NVC597_SET_VPC_PERF_KNOB 0x0f14 +#define NVC597_SET_VPC_PERF_KNOB_CULLED_SMALL_LINES 7:0 +#define NVC597_SET_VPC_PERF_KNOB_CULLED_SMALL_TRIANGLES 15:8 +#define NVC597_SET_VPC_PERF_KNOB_NONCULLED_LINES_AND_POINTS 23:16 +#define NVC597_SET_VPC_PERF_KNOB_NONCULLED_TRIANGLES 31:24 + +#define NVC597_PM_LOCAL_TRIGGER 0x0f18 +#define NVC597_PM_LOCAL_TRIGGER_BOOKMARK 15:0 + +#define NVC597_SET_POST_Z_PS_IMASK 0x0f1c +#define NVC597_SET_POST_Z_PS_IMASK_ENABLE 0:0 +#define NVC597_SET_POST_Z_PS_IMASK_ENABLE_FALSE 0x00000000 +#define NVC597_SET_POST_Z_PS_IMASK_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_CONSTANT_COLOR_RENDERING 0x0f40 +#define NVC597_SET_CONSTANT_COLOR_RENDERING_ENABLE 0:0 +#define NVC597_SET_CONSTANT_COLOR_RENDERING_ENABLE_FALSE 0x00000000 +#define NVC597_SET_CONSTANT_COLOR_RENDERING_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_CONSTANT_COLOR_RENDERING_RED 0x0f44 +#define NVC597_SET_CONSTANT_COLOR_RENDERING_RED_V 31:0 + +#define NVC597_SET_CONSTANT_COLOR_RENDERING_GREEN 0x0f48 +#define NVC597_SET_CONSTANT_COLOR_RENDERING_GREEN_V 31:0 + +#define NVC597_SET_CONSTANT_COLOR_RENDERING_BLUE 0x0f4c +#define NVC597_SET_CONSTANT_COLOR_RENDERING_BLUE_V 31:0 + +#define NVC597_SET_CONSTANT_COLOR_RENDERING_ALPHA 0x0f50 +#define NVC597_SET_CONSTANT_COLOR_RENDERING_ALPHA_V 31:0 + +#define NVC597_SET_BACK_STENCIL_FUNC_REF 0x0f54 +#define NVC597_SET_BACK_STENCIL_FUNC_REF_V 7:0 + +#define NVC597_SET_BACK_STENCIL_MASK 0x0f58 +#define NVC597_SET_BACK_STENCIL_MASK_V 7:0 + +#define NVC597_SET_BACK_STENCIL_FUNC_MASK 0x0f5c +#define NVC597_SET_BACK_STENCIL_FUNC_MASK_V 7:0 + +#define NVC597_SET_VERTEX_STREAM_SUBSTITUTE_A 0x0f84 +#define NVC597_SET_VERTEX_STREAM_SUBSTITUTE_A_ADDRESS_UPPER 7:0 + +#define NVC597_SET_VERTEX_STREAM_SUBSTITUTE_B 0x0f88 +#define NVC597_SET_VERTEX_STREAM_SUBSTITUTE_B_ADDRESS_LOWER 31:0 + +#define NVC597_SET_LINE_MODE_POLYGON_CLIP 0x0f8c +#define NVC597_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE 0:0 +#define NVC597_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DRAW_LINE 0x00000000 +#define NVC597_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DO_NOT_DRAW_LINE 0x00000001 + +#define NVC597_SET_SINGLE_CT_WRITE_CONTROL 0x0f90 +#define NVC597_SET_SINGLE_CT_WRITE_CONTROL_ENABLE 0:0 +#define NVC597_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_FALSE 0x00000000 +#define NVC597_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_VTG_WARP_WATERMARKS 0x0f98 +#define NVC597_SET_VTG_WARP_WATERMARKS_LOW 15:0 +#define NVC597_SET_VTG_WARP_WATERMARKS_HIGH 31:16 + +#define NVC597_SET_DEPTH_BOUNDS_MIN 0x0f9c +#define NVC597_SET_DEPTH_BOUNDS_MIN_V 31:0 + +#define NVC597_SET_DEPTH_BOUNDS_MAX 0x0fa0 +#define NVC597_SET_DEPTH_BOUNDS_MAX_V 31:0 + +#define NVC597_SET_SAMPLE_MASK 0x0fa4 +#define NVC597_SET_SAMPLE_MASK_RASTER_OUT_ENABLE 0:0 +#define NVC597_SET_SAMPLE_MASK_RASTER_OUT_ENABLE_FALSE 0x00000000 +#define NVC597_SET_SAMPLE_MASK_RASTER_OUT_ENABLE_TRUE 0x00000001 +#define NVC597_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE 4:4 +#define NVC597_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE_FALSE 0x00000000 +#define NVC597_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_COLOR_TARGET_SAMPLE_MASK 0x0fa8 +#define NVC597_SET_COLOR_TARGET_SAMPLE_MASK_V 15:0 + +#define NVC597_SET_CT_MRT_ENABLE 0x0fac +#define NVC597_SET_CT_MRT_ENABLE_V 0:0 +#define NVC597_SET_CT_MRT_ENABLE_V_FALSE 0x00000000 +#define NVC597_SET_CT_MRT_ENABLE_V_TRUE 0x00000001 + +#define NVC597_SET_NONMULTISAMPLED_Z 0x0fb0 +#define NVC597_SET_NONMULTISAMPLED_Z_V 0:0 +#define NVC597_SET_NONMULTISAMPLED_Z_V_PER_SAMPLE 0x00000000 +#define NVC597_SET_NONMULTISAMPLED_Z_V_AT_PIXEL_CENTER 0x00000001 + +#define NVC597_SET_TIR 0x0fb4 +#define NVC597_SET_TIR_MODE 1:0 +#define NVC597_SET_TIR_MODE_DISABLED 0x00000000 +#define NVC597_SET_TIR_MODE_RASTER_N_TARGET_M 0x00000001 + +#define NVC597_SET_ANTI_ALIAS_RASTER 0x0fb8 +#define NVC597_SET_ANTI_ALIAS_RASTER_SAMPLES 2:0 +#define NVC597_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_1X1 0x00000000 +#define NVC597_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_2X2 0x00000002 +#define NVC597_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_4X2_D3D 0x00000004 +#define NVC597_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_2X1_D3D 0x00000005 +#define NVC597_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_4X4 0x00000006 + +#define NVC597_SET_SAMPLE_MASK_X0_Y0 0x0fbc +#define NVC597_SET_SAMPLE_MASK_X0_Y0_V 15:0 + +#define NVC597_SET_SAMPLE_MASK_X1_Y0 0x0fc0 +#define NVC597_SET_SAMPLE_MASK_X1_Y0_V 15:0 + +#define NVC597_SET_SAMPLE_MASK_X0_Y1 0x0fc4 +#define NVC597_SET_SAMPLE_MASK_X0_Y1_V 15:0 + +#define NVC597_SET_SAMPLE_MASK_X1_Y1 0x0fc8 +#define NVC597_SET_SAMPLE_MASK_X1_Y1_V 15:0 + +#define NVC597_SET_SURFACE_CLIP_ID_MEMORY_A 0x0fcc +#define NVC597_SET_SURFACE_CLIP_ID_MEMORY_A_OFFSET_UPPER 7:0 + +#define NVC597_SET_SURFACE_CLIP_ID_MEMORY_B 0x0fd0 +#define NVC597_SET_SURFACE_CLIP_ID_MEMORY_B_OFFSET_LOWER 31:0 + +#define NVC597_SET_TIR_MODULATION 0x0fd4 +#define NVC597_SET_TIR_MODULATION_COMPONENT_SELECT 1:0 +#define NVC597_SET_TIR_MODULATION_COMPONENT_SELECT_NO_MODULATION 0x00000000 +#define NVC597_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_RGB 0x00000001 +#define NVC597_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_ALPHA_ONLY 0x00000002 +#define NVC597_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_RGBA 0x00000003 + +#define NVC597_SET_TIR_MODULATION_FUNCTION 0x0fd8 +#define NVC597_SET_TIR_MODULATION_FUNCTION_SELECT 0:0 +#define NVC597_SET_TIR_MODULATION_FUNCTION_SELECT_LINEAR 0x00000000 +#define NVC597_SET_TIR_MODULATION_FUNCTION_SELECT_TABLE 0x00000001 + +#define NVC597_SET_BLEND_OPT_CONTROL 0x0fdc +#define NVC597_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS 0:0 +#define NVC597_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_FALSE 0x00000000 +#define NVC597_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_TRUE 0x00000001 + +#define NVC597_SET_ZT_A 0x0fe0 +#define NVC597_SET_ZT_A_OFFSET_UPPER 7:0 + +#define NVC597_SET_ZT_B 0x0fe4 +#define NVC597_SET_ZT_B_OFFSET_LOWER 31:0 + +#define NVC597_SET_ZT_FORMAT 0x0fe8 +#define NVC597_SET_ZT_FORMAT_V 4:0 +#define NVC597_SET_ZT_FORMAT_V_Z16 0x00000013 +#define NVC597_SET_ZT_FORMAT_V_Z24S8 0x00000014 +#define NVC597_SET_ZT_FORMAT_V_X8Z24 0x00000015 +#define NVC597_SET_ZT_FORMAT_V_S8Z24 0x00000016 +#define NVC597_SET_ZT_FORMAT_V_S8 0x00000017 +#define NVC597_SET_ZT_FORMAT_V_V8Z24 0x00000018 +#define NVC597_SET_ZT_FORMAT_V_ZF32 0x0000000A +#define NVC597_SET_ZT_FORMAT_V_ZF32_X24S8 0x00000019 +#define NVC597_SET_ZT_FORMAT_V_X8Z24_X16V8S8 0x0000001D +#define NVC597_SET_ZT_FORMAT_V_ZF32_X16V8X8 0x0000001E +#define NVC597_SET_ZT_FORMAT_V_ZF32_X16V8S8 0x0000001F + +#define NVC597_SET_ZT_BLOCK_SIZE 0x0fec +#define NVC597_SET_ZT_BLOCK_SIZE_WIDTH 3:0 +#define NVC597_SET_ZT_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC597_SET_ZT_BLOCK_SIZE_HEIGHT 7:4 +#define NVC597_SET_ZT_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC597_SET_ZT_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC597_SET_ZT_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC597_SET_ZT_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC597_SET_ZT_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC597_SET_ZT_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC597_SET_ZT_BLOCK_SIZE_DEPTH 11:8 +#define NVC597_SET_ZT_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NVC597_SET_ZT_ARRAY_PITCH 0x0ff0 +#define NVC597_SET_ZT_ARRAY_PITCH_V 31:0 + +#define NVC597_SET_SURFACE_CLIP_HORIZONTAL 0x0ff4 +#define NVC597_SET_SURFACE_CLIP_HORIZONTAL_X 15:0 +#define NVC597_SET_SURFACE_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NVC597_SET_SURFACE_CLIP_VERTICAL 0x0ff8 +#define NVC597_SET_SURFACE_CLIP_VERTICAL_Y 15:0 +#define NVC597_SET_SURFACE_CLIP_VERTICAL_HEIGHT 31:16 + +#define NVC597_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS 0x1000 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE 0:0 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_FALSE 0x00000000 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_TRUE 0x00000001 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY 5:4 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC597_SET_VIEWPORT_MULTICAST 0x1004 +#define NVC597_SET_VIEWPORT_MULTICAST_ORDER 0:0 +#define NVC597_SET_VIEWPORT_MULTICAST_ORDER_VIEWPORT_ORDER 0x00000000 +#define NVC597_SET_VIEWPORT_MULTICAST_ORDER_PRIMITIVE_ORDER 0x00000001 + +#define NVC597_SET_TESSELLATION_CUT_HEIGHT 0x1008 +#define NVC597_SET_TESSELLATION_CUT_HEIGHT_V 4:0 + +#define NVC597_SET_MAX_GS_INSTANCES_PER_TASK 0x100c +#define NVC597_SET_MAX_GS_INSTANCES_PER_TASK_V 10:0 + +#define NVC597_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK 0x1010 +#define NVC597_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK_V 15:0 + +#define NVC597_SET_RESERVED_SW_METHOD00 0x1014 +#define NVC597_SET_RESERVED_SW_METHOD00_V 31:0 + +#define NVC597_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER 0x1018 +#define NVC597_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NVC597_SET_BETA_CB_STORAGE_CONSTRAINT 0x101c +#define NVC597_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NVC597_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NVC597_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER 0x1020 +#define NVC597_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NVC597_SET_ALPHA_CB_STORAGE_CONSTRAINT 0x1024 +#define NVC597_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NVC597_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NVC597_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_RESERVED_SW_METHOD01 0x1028 +#define NVC597_SET_RESERVED_SW_METHOD01_V 31:0 + +#define NVC597_SET_RESERVED_SW_METHOD02 0x102c +#define NVC597_SET_RESERVED_SW_METHOD02_V 31:0 + +#define NVC597_SET_TIR_MODULATION_COEFFICIENT_TABLE(i) (0x1030+(i)*4) +#define NVC597_SET_TIR_MODULATION_COEFFICIENT_TABLE_V0 7:0 +#define NVC597_SET_TIR_MODULATION_COEFFICIENT_TABLE_V1 15:8 +#define NVC597_SET_TIR_MODULATION_COEFFICIENT_TABLE_V2 23:16 +#define NVC597_SET_TIR_MODULATION_COEFFICIENT_TABLE_V3 31:24 + +#define NVC597_SET_SPARE_NOOP01 0x1044 +#define NVC597_SET_SPARE_NOOP01_V 31:0 + +#define NVC597_SET_SPARE_NOOP02 0x1048 +#define NVC597_SET_SPARE_NOOP02_V 31:0 + +#define NVC597_SET_SPARE_NOOP03 0x104c +#define NVC597_SET_SPARE_NOOP03_V 31:0 + +#define NVC597_SET_SPARE_NOOP04 0x1050 +#define NVC597_SET_SPARE_NOOP04_V 31:0 + +#define NVC597_SET_SPARE_NOOP05 0x1054 +#define NVC597_SET_SPARE_NOOP05_V 31:0 + +#define NVC597_SET_SPARE_NOOP06 0x1058 +#define NVC597_SET_SPARE_NOOP06_V 31:0 + +#define NVC597_SET_SPARE_NOOP07 0x105c +#define NVC597_SET_SPARE_NOOP07_V 31:0 + +#define NVC597_SET_SPARE_NOOP08 0x1060 +#define NVC597_SET_SPARE_NOOP08_V 31:0 + +#define NVC597_SET_SPARE_NOOP09 0x1064 +#define NVC597_SET_SPARE_NOOP09_V 31:0 + +#define NVC597_SET_SPARE_NOOP10 0x1068 +#define NVC597_SET_SPARE_NOOP10_V 31:0 + +#define NVC597_SET_SPARE_NOOP11 0x106c +#define NVC597_SET_SPARE_NOOP11_V 31:0 + +#define NVC597_SET_SPARE_NOOP12 0x1070 +#define NVC597_SET_SPARE_NOOP12_V 31:0 + +#define NVC597_SET_SPARE_NOOP13 0x1074 +#define NVC597_SET_SPARE_NOOP13_V 31:0 + +#define NVC597_SET_SPARE_NOOP14 0x1078 +#define NVC597_SET_SPARE_NOOP14_V 31:0 + +#define NVC597_SET_SPARE_NOOP15 0x107c +#define NVC597_SET_SPARE_NOOP15_V 31:0 + +#define NVC597_SET_RESERVED_SW_METHOD03 0x10b0 +#define NVC597_SET_RESERVED_SW_METHOD03_V 31:0 + +#define NVC597_SET_RESERVED_SW_METHOD04 0x10b4 +#define NVC597_SET_RESERVED_SW_METHOD04_V 31:0 + +#define NVC597_SET_RESERVED_SW_METHOD05 0x10b8 +#define NVC597_SET_RESERVED_SW_METHOD05_V 31:0 + +#define NVC597_SET_RESERVED_SW_METHOD06 0x10bc +#define NVC597_SET_RESERVED_SW_METHOD06_V 31:0 + +#define NVC597_SET_RESERVED_SW_METHOD07 0x10c0 +#define NVC597_SET_RESERVED_SW_METHOD07_V 31:0 + +#define NVC597_SET_RESERVED_SW_METHOD08 0x10c4 +#define NVC597_SET_RESERVED_SW_METHOD08_V 31:0 + +#define NVC597_SET_RESERVED_SW_METHOD09 0x10c8 +#define NVC597_SET_RESERVED_SW_METHOD09_V 31:0 + +#define NVC597_SET_REDUCE_COLOR_THRESHOLDS_UNORM8 0x10cc +#define NVC597_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC597_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED 23:16 + +#define NVC597_SET_RESERVED_SW_METHOD10 0x10d0 +#define NVC597_SET_RESERVED_SW_METHOD10_V 31:0 + +#define NVC597_SET_RESERVED_SW_METHOD11 0x10d4 +#define NVC597_SET_RESERVED_SW_METHOD11_V 31:0 + +#define NVC597_SET_RESERVED_SW_METHOD12 0x10d8 +#define NVC597_SET_RESERVED_SW_METHOD12_V 31:0 + +#define NVC597_SET_RESERVED_SW_METHOD13 0x10dc +#define NVC597_SET_RESERVED_SW_METHOD13_V 31:0 + +#define NVC597_SET_REDUCE_COLOR_THRESHOLDS_UNORM10 0x10e0 +#define NVC597_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC597_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED 23:16 + +#define NVC597_SET_REDUCE_COLOR_THRESHOLDS_UNORM16 0x10e4 +#define NVC597_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC597_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED 23:16 + +#define NVC597_SET_REDUCE_COLOR_THRESHOLDS_FP11 0x10e8 +#define NVC597_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED_ALL_HIT_ONCE 5:0 +#define NVC597_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED 21:16 + +#define NVC597_SET_REDUCE_COLOR_THRESHOLDS_FP16 0x10ec +#define NVC597_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC597_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED 23:16 + +#define NVC597_SET_REDUCE_COLOR_THRESHOLDS_SRGB8 0x10f0 +#define NVC597_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC597_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED 23:16 + +#define NVC597_UNBIND_ALL 0x10f4 +#define NVC597_UNBIND_ALL_CONSTANT_BUFFERS 8:8 +#define NVC597_UNBIND_ALL_CONSTANT_BUFFERS_FALSE 0x00000000 +#define NVC597_UNBIND_ALL_CONSTANT_BUFFERS_TRUE 0x00000001 + +#define NVC597_SET_CLEAR_SURFACE_CONTROL 0x10f8 +#define NVC597_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK 0:0 +#define NVC597_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_FALSE 0x00000000 +#define NVC597_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_TRUE 0x00000001 +#define NVC597_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT 4:4 +#define NVC597_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_FALSE 0x00000000 +#define NVC597_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_TRUE 0x00000001 +#define NVC597_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0 8:8 +#define NVC597_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_FALSE 0x00000000 +#define NVC597_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_TRUE 0x00000001 +#define NVC597_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0 12:12 +#define NVC597_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_FALSE 0x00000000 +#define NVC597_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_TRUE 0x00000001 + +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS 0x10fc +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC597_SET_RESERVED_SW_METHOD14 0x1100 +#define NVC597_SET_RESERVED_SW_METHOD14_V 31:0 + +#define NVC597_SET_RESERVED_SW_METHOD15 0x1104 +#define NVC597_SET_RESERVED_SW_METHOD15_V 31:0 + +#define NVC597_NO_OPERATION_DATA_HI 0x110c +#define NVC597_NO_OPERATION_DATA_HI_V 31:0 + +#define NVC597_SET_DEPTH_BIAS_CONTROL 0x1110 +#define NVC597_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT 0:0 +#define NVC597_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_FALSE 0x00000000 +#define NVC597_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_TRUE 0x00000001 + +#define NVC597_PM_TRIGGER_END 0x1114 +#define NVC597_PM_TRIGGER_END_V 31:0 + +#define NVC597_SET_VERTEX_ID_BASE 0x1118 +#define NVC597_SET_VERTEX_ID_BASE_V 31:0 + +#define NVC597_SET_STENCIL_COMPRESSION 0x111c +#define NVC597_SET_STENCIL_COMPRESSION_ENABLE 0:0 +#define NVC597_SET_STENCIL_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVC597_SET_STENCIL_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A(i) (0x1120+(i)*4) +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0 0:0 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1 1:1 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2 2:2 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3 3:3 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0 4:4 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1 5:5 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2 6:6 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3 7:7 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0 8:8 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1 9:9 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2 10:10 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3 11:11 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0 12:12 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1 13:13 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2 14:14 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3 15:15 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0 16:16 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1 17:17 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2 18:18 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3 19:19 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0 20:20 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1 21:21 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2 22:22 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3 23:23 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0 24:24 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1 25:25 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2 26:26 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3 27:27 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0 28:28 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1 29:29 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2 30:30 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3 31:31 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B(i) (0x1128+(i)*4) +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0 0:0 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1 1:1 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2 2:2 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3 3:3 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0 4:4 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1 5:5 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2 6:6 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3 7:7 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0 8:8 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1 9:9 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2 10:10 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3 11:11 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0 12:12 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1 13:13 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2 14:14 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3 15:15 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0 16:16 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1 17:17 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2 18:18 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3 19:19 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0 20:20 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1 21:21 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2 22:22 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3 23:23 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0 24:24 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1 25:25 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2 26:26 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3 27:27 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0 28:28 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1 29:29 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2 30:30 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3 31:31 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NVC597_SET_TIR_CONTROL 0x1130 +#define NVC597_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES 0:0 +#define NVC597_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES_DISABLE 0x00000000 +#define NVC597_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES_ENABLE 0x00000001 +#define NVC597_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES 4:4 +#define NVC597_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES_DISABLE 0x00000000 +#define NVC597_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES_ENABLE 0x00000001 +#define NVC597_SET_TIR_CONTROL_REDUCE_COVERAGE 1:1 +#define NVC597_SET_TIR_CONTROL_REDUCE_COVERAGE_DISABLE 0x00000000 +#define NVC597_SET_TIR_CONTROL_REDUCE_COVERAGE_ENABLE 0x00000001 + +#define NVC597_SET_MUTABLE_METHOD_CONTROL 0x1134 +#define NVC597_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT 0:0 +#define NVC597_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT_FALSE 0x00000000 +#define NVC597_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT_TRUE 0x00000001 + +#define NVC597_SET_POST_PS_INITIAL_COVERAGE 0x1138 +#define NVC597_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE 0:0 +#define NVC597_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE_FALSE 0x00000000 +#define NVC597_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE_TRUE 0x00000001 + +#define NVC597_SET_FILL_VIA_TRIANGLE 0x113c +#define NVC597_SET_FILL_VIA_TRIANGLE_MODE 1:0 +#define NVC597_SET_FILL_VIA_TRIANGLE_MODE_DISABLED 0x00000000 +#define NVC597_SET_FILL_VIA_TRIANGLE_MODE_FILL_ALL 0x00000001 +#define NVC597_SET_FILL_VIA_TRIANGLE_MODE_FILL_BBOX 0x00000002 + +#define NVC597_SET_BLEND_PER_FORMAT_ENABLE 0x1140 +#define NVC597_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16 4:4 +#define NVC597_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_FALSE 0x00000000 +#define NVC597_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_TRUE 0x00000001 + +#define NVC597_FLUSH_PENDING_WRITES 0x1144 +#define NVC597_FLUSH_PENDING_WRITES_SM_DOES_GLOBAL_STORE 0:0 + +#define NVC597_SET_VERTEX_ATTRIBUTE_A(i) (0x1160+(i)*4) +#define NVC597_SET_VERTEX_ATTRIBUTE_A_STREAM 4:0 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_SOURCE 6:6 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_SOURCE_ACTIVE 0x00000000 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_SOURCE_INACTIVE 0x00000001 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_OFFSET 20:7 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS 26:21 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NVC597_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NVC597_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NVC597_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NVC597_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NVC597_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE 29:27 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B 31:31 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_FALSE 0x00000000 +#define NVC597_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_TRUE 0x00000001 + +#define NVC597_SET_VERTEX_ATTRIBUTE_B(i) (0x11a0+(i)*4) +#define NVC597_SET_VERTEX_ATTRIBUTE_B_STREAM 4:0 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_SOURCE 6:6 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_SOURCE_ACTIVE 0x00000000 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_SOURCE_INACTIVE 0x00000001 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_OFFSET 20:7 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS 26:21 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NVC597_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NVC597_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NVC597_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NVC597_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NVC597_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE 29:27 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B 31:31 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_FALSE 0x00000000 +#define NVC597_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_TRUE 0x00000001 + +#define NVC597_SET_ANTI_ALIAS_SAMPLE_POSITIONS(i) (0x11e0+(i)*4) +#define NVC597_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X0 3:0 +#define NVC597_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y0 7:4 +#define NVC597_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X1 11:8 +#define NVC597_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y1 15:12 +#define NVC597_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X2 19:16 +#define NVC597_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y2 23:20 +#define NVC597_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X3 27:24 +#define NVC597_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y3 31:28 + +#define NVC597_SET_OFFSET_RENDER_TARGET_INDEX 0x11f0 +#define NVC597_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX 0:0 +#define NVC597_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX_FALSE 0x00000000 +#define NVC597_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX_TRUE 0x00000001 + +#define NVC597_FORCE_HEAVYWEIGHT_METHOD_SYNC 0x11f4 +#define NVC597_FORCE_HEAVYWEIGHT_METHOD_SYNC_V 31:0 + +#define NVC597_SET_COVERAGE_TO_COLOR 0x11f8 +#define NVC597_SET_COVERAGE_TO_COLOR_ENABLE 0:0 +#define NVC597_SET_COVERAGE_TO_COLOR_ENABLE_FALSE 0x00000000 +#define NVC597_SET_COVERAGE_TO_COLOR_ENABLE_TRUE 0x00000001 +#define NVC597_SET_COVERAGE_TO_COLOR_CT_SELECT 6:4 + +#define NVC597_DECOMPRESS_ZETA_SURFACE 0x11fc +#define NVC597_DECOMPRESS_ZETA_SURFACE_Z_ENABLE 0:0 +#define NVC597_DECOMPRESS_ZETA_SURFACE_Z_ENABLE_FALSE 0x00000000 +#define NVC597_DECOMPRESS_ZETA_SURFACE_Z_ENABLE_TRUE 0x00000001 +#define NVC597_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE 4:4 +#define NVC597_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC597_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_SCREEN_STATE_MASK 0x1204 +#define NVC597_SET_SCREEN_STATE_MASK_MASK 3:0 + +#define NVC597_SET_ZT_SPARSE 0x1208 +#define NVC597_SET_ZT_SPARSE_ENABLE 0:0 +#define NVC597_SET_ZT_SPARSE_ENABLE_FALSE 0x00000000 +#define NVC597_SET_ZT_SPARSE_ENABLE_TRUE 0x00000001 +#define NVC597_SET_ZT_SPARSE_UNMAPPED_COMPARE 1:1 +#define NVC597_SET_ZT_SPARSE_UNMAPPED_COMPARE_ZT_SPARSE_UNMAPPED_0 0x00000000 +#define NVC597_SET_ZT_SPARSE_UNMAPPED_COMPARE_ZT_SPARSE_FAIL_ALWAYS 0x00000001 + +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST 0x1214 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_START_INDEX 15:0 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT 0x1218 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_START_INDEX 15:0 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC597_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC597_SET_CT_SELECT 0x121c +#define NVC597_SET_CT_SELECT_TARGET_COUNT 3:0 +#define NVC597_SET_CT_SELECT_TARGET0 6:4 +#define NVC597_SET_CT_SELECT_TARGET1 9:7 +#define NVC597_SET_CT_SELECT_TARGET2 12:10 +#define NVC597_SET_CT_SELECT_TARGET3 15:13 +#define NVC597_SET_CT_SELECT_TARGET4 18:16 +#define NVC597_SET_CT_SELECT_TARGET5 21:19 +#define NVC597_SET_CT_SELECT_TARGET6 24:22 +#define NVC597_SET_CT_SELECT_TARGET7 27:25 + +#define NVC597_SET_COMPRESSION_THRESHOLD 0x1220 +#define NVC597_SET_COMPRESSION_THRESHOLD_SAMPLES 3:0 +#define NVC597_SET_COMPRESSION_THRESHOLD_SAMPLES__0 0x00000000 +#define NVC597_SET_COMPRESSION_THRESHOLD_SAMPLES__1 0x00000001 +#define NVC597_SET_COMPRESSION_THRESHOLD_SAMPLES__2 0x00000002 +#define NVC597_SET_COMPRESSION_THRESHOLD_SAMPLES__4 0x00000003 +#define NVC597_SET_COMPRESSION_THRESHOLD_SAMPLES__8 0x00000004 +#define NVC597_SET_COMPRESSION_THRESHOLD_SAMPLES__16 0x00000005 +#define NVC597_SET_COMPRESSION_THRESHOLD_SAMPLES__32 0x00000006 +#define NVC597_SET_COMPRESSION_THRESHOLD_SAMPLES__64 0x00000007 +#define NVC597_SET_COMPRESSION_THRESHOLD_SAMPLES__128 0x00000008 +#define NVC597_SET_COMPRESSION_THRESHOLD_SAMPLES__256 0x00000009 +#define NVC597_SET_COMPRESSION_THRESHOLD_SAMPLES__512 0x0000000A +#define NVC597_SET_COMPRESSION_THRESHOLD_SAMPLES__1024 0x0000000B +#define NVC597_SET_COMPRESSION_THRESHOLD_SAMPLES__2048 0x0000000C + +#define NVC597_SET_PIXEL_SHADER_INTERLOCK_CONTROL 0x1224 +#define NVC597_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE 1:0 +#define NVC597_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_NO_CONFLICT_DETECT 0x00000000 +#define NVC597_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_CONFLICT_DETECT_SAMPLE 0x00000001 +#define NVC597_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_CONFLICT_DETECT_PIXEL 0x00000002 +#define NVC597_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE 2:2 +#define NVC597_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE_TC_TILE_SIZE_16X16 0x00000000 +#define NVC597_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE_TC_TILE_SIZE_8X8 0x00000001 +#define NVC597_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER 3:3 +#define NVC597_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER_TC_FRAGMENT_ORDERED 0x00000000 +#define NVC597_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER_TC_FRAGMENT_UNORDERED 0x00000001 + +#define NVC597_SET_ZT_SIZE_A 0x1228 +#define NVC597_SET_ZT_SIZE_A_WIDTH 27:0 + +#define NVC597_SET_ZT_SIZE_B 0x122c +#define NVC597_SET_ZT_SIZE_B_HEIGHT 17:0 + +#define NVC597_SET_ZT_SIZE_C 0x1230 +#define NVC597_SET_ZT_SIZE_C_THIRD_DIMENSION 15:0 +#define NVC597_SET_ZT_SIZE_C_CONTROL 16:16 +#define NVC597_SET_ZT_SIZE_C_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NVC597_SET_ZT_SIZE_C_CONTROL_ARRAY_SIZE_IS_ONE 0x00000001 + +#define NVC597_SET_SAMPLER_BINDING 0x1234 +#define NVC597_SET_SAMPLER_BINDING_V 0:0 +#define NVC597_SET_SAMPLER_BINDING_V_INDEPENDENTLY 0x00000000 +#define NVC597_SET_SAMPLER_BINDING_V_VIA_HEADER_BINDING 0x00000001 + +#define NVC597_DRAW_AUTO 0x123c +#define NVC597_DRAW_AUTO_BYTE_COUNT 31:0 + +#define NVC597_SET_POST_VTG_SHADER_ATTRIBUTE_SKIP_MASK(i) (0x1240+(i)*4) +#define NVC597_SET_POST_VTG_SHADER_ATTRIBUTE_SKIP_MASK_V 31:0 + +#define NVC597_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE 0x1260 +#define NVC597_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE_TICKET_DISPENSER_INDEX 7:0 +#define NVC597_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE_TICKET_DISPENSER_VALUE 23:8 + +#define NVC597_SET_BACK_END_COPY_A 0x1264 +#define NVC597_SET_BACK_END_COPY_A_DWORDS 7:0 +#define NVC597_SET_BACK_END_COPY_A_SATURATE32_ENABLE 8:8 +#define NVC597_SET_BACK_END_COPY_A_SATURATE32_ENABLE_FALSE 0x00000000 +#define NVC597_SET_BACK_END_COPY_A_SATURATE32_ENABLE_TRUE 0x00000001 +#define NVC597_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE 12:12 +#define NVC597_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE_FALSE 0x00000000 +#define NVC597_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_BACK_END_COPY_B 0x1268 +#define NVC597_SET_BACK_END_COPY_B_SRC_ADDRESS_UPPER 7:0 + +#define NVC597_SET_BACK_END_COPY_C 0x126c +#define NVC597_SET_BACK_END_COPY_C_SRC_ADDRESS_LOWER 31:0 + +#define NVC597_SET_BACK_END_COPY_D 0x1270 +#define NVC597_SET_BACK_END_COPY_D_DEST_ADDRESS_UPPER 7:0 + +#define NVC597_SET_BACK_END_COPY_E 0x1274 +#define NVC597_SET_BACK_END_COPY_E_DEST_ADDRESS_LOWER 31:0 + +#define NVC597_SET_CIRCULAR_BUFFER_SIZE 0x1280 +#define NVC597_SET_CIRCULAR_BUFFER_SIZE_CACHE_LINES_PER_SM 19:0 + +#define NVC597_SET_VTG_REGISTER_WATERMARKS 0x1284 +#define NVC597_SET_VTG_REGISTER_WATERMARKS_LOW 15:0 +#define NVC597_SET_VTG_REGISTER_WATERMARKS_HIGH 31:16 + +#define NVC597_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI 0x1288 +#define NVC597_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES 0:0 +#define NVC597_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC597_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC597_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_TAG 25:4 + +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS 0x1290 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC597_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE 0x12a4 +#define NVC597_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE_V 31:0 + +#define NVC597_CLEAR_ZCULL_REGION 0x12c8 +#define NVC597_CLEAR_ZCULL_REGION_Z_ENABLE 0:0 +#define NVC597_CLEAR_ZCULL_REGION_Z_ENABLE_FALSE 0x00000000 +#define NVC597_CLEAR_ZCULL_REGION_Z_ENABLE_TRUE 0x00000001 +#define NVC597_CLEAR_ZCULL_REGION_STENCIL_ENABLE 4:4 +#define NVC597_CLEAR_ZCULL_REGION_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC597_CLEAR_ZCULL_REGION_STENCIL_ENABLE_TRUE 0x00000001 +#define NVC597_CLEAR_ZCULL_REGION_USE_CLEAR_RECT 1:1 +#define NVC597_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_FALSE 0x00000000 +#define NVC597_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_TRUE 0x00000001 +#define NVC597_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX 2:2 +#define NVC597_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_FALSE 0x00000000 +#define NVC597_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_TRUE 0x00000001 +#define NVC597_CLEAR_ZCULL_REGION_RT_ARRAY_INDEX 20:5 +#define NVC597_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE 3:3 +#define NVC597_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_FALSE 0x00000000 +#define NVC597_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_TRUE 0x00000001 + +#define NVC597_SET_DEPTH_TEST 0x12cc +#define NVC597_SET_DEPTH_TEST_ENABLE 0:0 +#define NVC597_SET_DEPTH_TEST_ENABLE_FALSE 0x00000000 +#define NVC597_SET_DEPTH_TEST_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_FILL_MODE 0x12d0 +#define NVC597_SET_FILL_MODE_V 31:0 +#define NVC597_SET_FILL_MODE_V_POINT 0x00000001 +#define NVC597_SET_FILL_MODE_V_WIREFRAME 0x00000002 +#define NVC597_SET_FILL_MODE_V_SOLID 0x00000003 + +#define NVC597_SET_SHADE_MODE 0x12d4 +#define NVC597_SET_SHADE_MODE_V 31:0 +#define NVC597_SET_SHADE_MODE_V_FLAT 0x00000001 +#define NVC597_SET_SHADE_MODE_V_GOURAUD 0x00000002 +#define NVC597_SET_SHADE_MODE_V_OGL_FLAT 0x00001D00 +#define NVC597_SET_SHADE_MODE_V_OGL_SMOOTH 0x00001D01 + +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS 0x12d8 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS 0x12dc +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC597_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC597_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL 0x12e0 +#define NVC597_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT 3:0 +#define NVC597_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1 0x00000000 +#define NVC597_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_2X2 0x00000001 +#define NVC597_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1_VIRTUAL_SAMPLES 0x00000002 + +#define NVC597_SET_BLEND_STATE_PER_TARGET 0x12e4 +#define NVC597_SET_BLEND_STATE_PER_TARGET_ENABLE 0:0 +#define NVC597_SET_BLEND_STATE_PER_TARGET_ENABLE_FALSE 0x00000000 +#define NVC597_SET_BLEND_STATE_PER_TARGET_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_DEPTH_WRITE 0x12e8 +#define NVC597_SET_DEPTH_WRITE_ENABLE 0:0 +#define NVC597_SET_DEPTH_WRITE_ENABLE_FALSE 0x00000000 +#define NVC597_SET_DEPTH_WRITE_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_ALPHA_TEST 0x12ec +#define NVC597_SET_ALPHA_TEST_ENABLE 0:0 +#define NVC597_SET_ALPHA_TEST_ENABLE_FALSE 0x00000000 +#define NVC597_SET_ALPHA_TEST_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_INLINE_INDEX4X8_ALIGN 0x1300 +#define NVC597_SET_INLINE_INDEX4X8_ALIGN_COUNT 29:0 +#define NVC597_SET_INLINE_INDEX4X8_ALIGN_START 31:30 + +#define NVC597_DRAW_INLINE_INDEX4X8 0x1304 +#define NVC597_DRAW_INLINE_INDEX4X8_INDEX0 7:0 +#define NVC597_DRAW_INLINE_INDEX4X8_INDEX1 15:8 +#define NVC597_DRAW_INLINE_INDEX4X8_INDEX2 23:16 +#define NVC597_DRAW_INLINE_INDEX4X8_INDEX3 31:24 + +#define NVC597_D3D_SET_CULL_MODE 0x1308 +#define NVC597_D3D_SET_CULL_MODE_V 31:0 +#define NVC597_D3D_SET_CULL_MODE_V_NONE 0x00000001 +#define NVC597_D3D_SET_CULL_MODE_V_CW 0x00000002 +#define NVC597_D3D_SET_CULL_MODE_V_CCW 0x00000003 + +#define NVC597_SET_DEPTH_FUNC 0x130c +#define NVC597_SET_DEPTH_FUNC_V 31:0 +#define NVC597_SET_DEPTH_FUNC_V_OGL_NEVER 0x00000200 +#define NVC597_SET_DEPTH_FUNC_V_OGL_LESS 0x00000201 +#define NVC597_SET_DEPTH_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC597_SET_DEPTH_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC597_SET_DEPTH_FUNC_V_OGL_GREATER 0x00000204 +#define NVC597_SET_DEPTH_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC597_SET_DEPTH_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC597_SET_DEPTH_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC597_SET_DEPTH_FUNC_V_D3D_NEVER 0x00000001 +#define NVC597_SET_DEPTH_FUNC_V_D3D_LESS 0x00000002 +#define NVC597_SET_DEPTH_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC597_SET_DEPTH_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC597_SET_DEPTH_FUNC_V_D3D_GREATER 0x00000005 +#define NVC597_SET_DEPTH_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC597_SET_DEPTH_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC597_SET_DEPTH_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC597_SET_ALPHA_REF 0x1310 +#define NVC597_SET_ALPHA_REF_V 31:0 + +#define NVC597_SET_ALPHA_FUNC 0x1314 +#define NVC597_SET_ALPHA_FUNC_V 31:0 +#define NVC597_SET_ALPHA_FUNC_V_OGL_NEVER 0x00000200 +#define NVC597_SET_ALPHA_FUNC_V_OGL_LESS 0x00000201 +#define NVC597_SET_ALPHA_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC597_SET_ALPHA_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC597_SET_ALPHA_FUNC_V_OGL_GREATER 0x00000204 +#define NVC597_SET_ALPHA_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC597_SET_ALPHA_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC597_SET_ALPHA_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC597_SET_ALPHA_FUNC_V_D3D_NEVER 0x00000001 +#define NVC597_SET_ALPHA_FUNC_V_D3D_LESS 0x00000002 +#define NVC597_SET_ALPHA_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC597_SET_ALPHA_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC597_SET_ALPHA_FUNC_V_D3D_GREATER 0x00000005 +#define NVC597_SET_ALPHA_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC597_SET_ALPHA_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC597_SET_ALPHA_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC597_SET_DRAW_AUTO_STRIDE 0x1318 +#define NVC597_SET_DRAW_AUTO_STRIDE_V 11:0 + +#define NVC597_SET_BLEND_CONST_RED 0x131c +#define NVC597_SET_BLEND_CONST_RED_V 31:0 + +#define NVC597_SET_BLEND_CONST_GREEN 0x1320 +#define NVC597_SET_BLEND_CONST_GREEN_V 31:0 + +#define NVC597_SET_BLEND_CONST_BLUE 0x1324 +#define NVC597_SET_BLEND_CONST_BLUE_V 31:0 + +#define NVC597_SET_BLEND_CONST_ALPHA 0x1328 +#define NVC597_SET_BLEND_CONST_ALPHA_V 31:0 + +#define NVC597_INVALIDATE_SAMPLER_CACHE 0x1330 +#define NVC597_INVALIDATE_SAMPLER_CACHE_LINES 0:0 +#define NVC597_INVALIDATE_SAMPLER_CACHE_LINES_ALL 0x00000000 +#define NVC597_INVALIDATE_SAMPLER_CACHE_LINES_ONE 0x00000001 +#define NVC597_INVALIDATE_SAMPLER_CACHE_TAG 25:4 + +#define NVC597_INVALIDATE_TEXTURE_HEADER_CACHE 0x1334 +#define NVC597_INVALIDATE_TEXTURE_HEADER_CACHE_LINES 0:0 +#define NVC597_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ALL 0x00000000 +#define NVC597_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ONE 0x00000001 +#define NVC597_INVALIDATE_TEXTURE_HEADER_CACHE_TAG 25:4 + +#define NVC597_INVALIDATE_TEXTURE_DATA_CACHE 0x1338 +#define NVC597_INVALIDATE_TEXTURE_DATA_CACHE_LINES 0:0 +#define NVC597_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ALL 0x00000000 +#define NVC597_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ONE 0x00000001 +#define NVC597_INVALIDATE_TEXTURE_DATA_CACHE_TAG 25:4 + +#define NVC597_SET_BLEND_SEPARATE_FOR_ALPHA 0x133c +#define NVC597_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NVC597_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NVC597_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_BLEND_COLOR_OP 0x1340 +#define NVC597_SET_BLEND_COLOR_OP_V 31:0 +#define NVC597_SET_BLEND_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC597_SET_BLEND_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC597_SET_BLEND_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC597_SET_BLEND_COLOR_OP_V_OGL_MIN 0x00008007 +#define NVC597_SET_BLEND_COLOR_OP_V_OGL_MAX 0x00008008 +#define NVC597_SET_BLEND_COLOR_OP_V_D3D_ADD 0x00000001 +#define NVC597_SET_BLEND_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC597_SET_BLEND_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC597_SET_BLEND_COLOR_OP_V_D3D_MIN 0x00000004 +#define NVC597_SET_BLEND_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF 0x1344 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V 31:0 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC597_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC597_SET_BLEND_COLOR_DEST_COEFF 0x1348 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V 31:0 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC597_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC597_SET_BLEND_ALPHA_OP 0x134c +#define NVC597_SET_BLEND_ALPHA_OP_V 31:0 +#define NVC597_SET_BLEND_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC597_SET_BLEND_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC597_SET_BLEND_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC597_SET_BLEND_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NVC597_SET_BLEND_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NVC597_SET_BLEND_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NVC597_SET_BLEND_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC597_SET_BLEND_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC597_SET_BLEND_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NVC597_SET_BLEND_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF 0x1350 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V 31:0 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC597_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC597_SET_GLOBAL_COLOR_KEY 0x1354 +#define NVC597_SET_GLOBAL_COLOR_KEY_ENABLE 0:0 +#define NVC597_SET_GLOBAL_COLOR_KEY_ENABLE_FALSE 0x00000000 +#define NVC597_SET_GLOBAL_COLOR_KEY_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF 0x1358 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V 31:0 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC597_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC597_SET_SINGLE_ROP_CONTROL 0x135c +#define NVC597_SET_SINGLE_ROP_CONTROL_ENABLE 0:0 +#define NVC597_SET_SINGLE_ROP_CONTROL_ENABLE_FALSE 0x00000000 +#define NVC597_SET_SINGLE_ROP_CONTROL_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_BLEND(i) (0x1360+(i)*4) +#define NVC597_SET_BLEND_ENABLE 0:0 +#define NVC597_SET_BLEND_ENABLE_FALSE 0x00000000 +#define NVC597_SET_BLEND_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_STENCIL_TEST 0x1380 +#define NVC597_SET_STENCIL_TEST_ENABLE 0:0 +#define NVC597_SET_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NVC597_SET_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_STENCIL_OP_FAIL 0x1384 +#define NVC597_SET_STENCIL_OP_FAIL_V 31:0 +#define NVC597_SET_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NVC597_SET_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NVC597_SET_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NVC597_SET_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC597_SET_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC597_SET_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NVC597_SET_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NVC597_SET_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NVC597_SET_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NVC597_SET_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NVC597_SET_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NVC597_SET_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NVC597_SET_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NVC597_SET_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NVC597_SET_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NVC597_SET_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NVC597_SET_STENCIL_OP_ZFAIL 0x1388 +#define NVC597_SET_STENCIL_OP_ZFAIL_V 31:0 +#define NVC597_SET_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NVC597_SET_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NVC597_SET_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NVC597_SET_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC597_SET_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC597_SET_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NVC597_SET_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NVC597_SET_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NVC597_SET_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NVC597_SET_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NVC597_SET_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NVC597_SET_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NVC597_SET_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NVC597_SET_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NVC597_SET_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NVC597_SET_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NVC597_SET_STENCIL_OP_ZPASS 0x138c +#define NVC597_SET_STENCIL_OP_ZPASS_V 31:0 +#define NVC597_SET_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NVC597_SET_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NVC597_SET_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NVC597_SET_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NVC597_SET_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NVC597_SET_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NVC597_SET_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NVC597_SET_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NVC597_SET_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NVC597_SET_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NVC597_SET_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NVC597_SET_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NVC597_SET_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NVC597_SET_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NVC597_SET_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NVC597_SET_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NVC597_SET_STENCIL_FUNC 0x1390 +#define NVC597_SET_STENCIL_FUNC_V 31:0 +#define NVC597_SET_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NVC597_SET_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NVC597_SET_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC597_SET_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC597_SET_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NVC597_SET_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC597_SET_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC597_SET_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC597_SET_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NVC597_SET_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NVC597_SET_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC597_SET_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC597_SET_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NVC597_SET_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC597_SET_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC597_SET_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC597_SET_STENCIL_FUNC_REF 0x1394 +#define NVC597_SET_STENCIL_FUNC_REF_V 7:0 + +#define NVC597_SET_STENCIL_FUNC_MASK 0x1398 +#define NVC597_SET_STENCIL_FUNC_MASK_V 7:0 + +#define NVC597_SET_STENCIL_MASK 0x139c +#define NVC597_SET_STENCIL_MASK_V 7:0 + +#define NVC597_SET_DRAW_AUTO_START 0x13a4 +#define NVC597_SET_DRAW_AUTO_START_BYTE_COUNT 31:0 + +#define NVC597_SET_PS_SATURATE 0x13a8 +#define NVC597_SET_PS_SATURATE_OUTPUT0 0:0 +#define NVC597_SET_PS_SATURATE_OUTPUT0_FALSE 0x00000000 +#define NVC597_SET_PS_SATURATE_OUTPUT0_TRUE 0x00000001 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE0 1:1 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE0_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE0_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC597_SET_PS_SATURATE_OUTPUT1 4:4 +#define NVC597_SET_PS_SATURATE_OUTPUT1_FALSE 0x00000000 +#define NVC597_SET_PS_SATURATE_OUTPUT1_TRUE 0x00000001 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE1 5:5 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE1_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE1_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC597_SET_PS_SATURATE_OUTPUT2 8:8 +#define NVC597_SET_PS_SATURATE_OUTPUT2_FALSE 0x00000000 +#define NVC597_SET_PS_SATURATE_OUTPUT2_TRUE 0x00000001 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE2 9:9 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE2_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE2_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC597_SET_PS_SATURATE_OUTPUT3 12:12 +#define NVC597_SET_PS_SATURATE_OUTPUT3_FALSE 0x00000000 +#define NVC597_SET_PS_SATURATE_OUTPUT3_TRUE 0x00000001 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE3 13:13 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE3_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE3_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC597_SET_PS_SATURATE_OUTPUT4 16:16 +#define NVC597_SET_PS_SATURATE_OUTPUT4_FALSE 0x00000000 +#define NVC597_SET_PS_SATURATE_OUTPUT4_TRUE 0x00000001 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE4 17:17 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE4_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE4_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC597_SET_PS_SATURATE_OUTPUT5 20:20 +#define NVC597_SET_PS_SATURATE_OUTPUT5_FALSE 0x00000000 +#define NVC597_SET_PS_SATURATE_OUTPUT5_TRUE 0x00000001 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE5 21:21 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE5_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE5_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC597_SET_PS_SATURATE_OUTPUT6 24:24 +#define NVC597_SET_PS_SATURATE_OUTPUT6_FALSE 0x00000000 +#define NVC597_SET_PS_SATURATE_OUTPUT6_TRUE 0x00000001 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE6 25:25 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE6_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE6_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC597_SET_PS_SATURATE_OUTPUT7 28:28 +#define NVC597_SET_PS_SATURATE_OUTPUT7_FALSE 0x00000000 +#define NVC597_SET_PS_SATURATE_OUTPUT7_TRUE 0x00000001 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE7 29:29 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE7_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC597_SET_PS_SATURATE_CLAMP_RANGE7_MINUS_ONE_TO_PLUS_ONE 0x00000001 + +#define NVC597_SET_WINDOW_ORIGIN 0x13ac +#define NVC597_SET_WINDOW_ORIGIN_MODE 0:0 +#define NVC597_SET_WINDOW_ORIGIN_MODE_UPPER_LEFT 0x00000000 +#define NVC597_SET_WINDOW_ORIGIN_MODE_LOWER_LEFT 0x00000001 +#define NVC597_SET_WINDOW_ORIGIN_FLIP_Y 4:4 +#define NVC597_SET_WINDOW_ORIGIN_FLIP_Y_FALSE 0x00000000 +#define NVC597_SET_WINDOW_ORIGIN_FLIP_Y_TRUE 0x00000001 + +#define NVC597_SET_LINE_WIDTH_FLOAT 0x13b0 +#define NVC597_SET_LINE_WIDTH_FLOAT_V 31:0 + +#define NVC597_SET_ALIASED_LINE_WIDTH_FLOAT 0x13b4 +#define NVC597_SET_ALIASED_LINE_WIDTH_FLOAT_V 31:0 + +#define NVC597_SET_LINE_MULTISAMPLE_OVERRIDE 0x1418 +#define NVC597_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE 0:0 +#define NVC597_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_FALSE 0x00000000 +#define NVC597_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_TRUE 0x00000001 + +#define NVC597_INVALIDATE_SAMPLER_CACHE_NO_WFI 0x1424 +#define NVC597_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES 0:0 +#define NVC597_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC597_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC597_INVALIDATE_SAMPLER_CACHE_NO_WFI_TAG 25:4 + +#define NVC597_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI 0x1428 +#define NVC597_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES 0:0 +#define NVC597_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC597_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC597_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_TAG 25:4 + +#define NVC597_SET_GLOBAL_BASE_VERTEX_INDEX 0x1434 +#define NVC597_SET_GLOBAL_BASE_VERTEX_INDEX_V 31:0 + +#define NVC597_SET_GLOBAL_BASE_INSTANCE_INDEX 0x1438 +#define NVC597_SET_GLOBAL_BASE_INSTANCE_INDEX_V 31:0 + +#define NVC597_SET_PS_WARP_WATERMARKS 0x1450 +#define NVC597_SET_PS_WARP_WATERMARKS_LOW 15:0 +#define NVC597_SET_PS_WARP_WATERMARKS_HIGH 31:16 + +#define NVC597_SET_PS_REGISTER_WATERMARKS 0x1454 +#define NVC597_SET_PS_REGISTER_WATERMARKS_LOW 15:0 +#define NVC597_SET_PS_REGISTER_WATERMARKS_HIGH 31:16 + +#define NVC597_STORE_ZCULL 0x1464 +#define NVC597_STORE_ZCULL_V 0:0 + +#define NVC597_SET_ITERATED_BLEND_CONSTANT_RED(j) (0x1480+(j)*16) +#define NVC597_SET_ITERATED_BLEND_CONSTANT_RED_V 15:0 + +#define NVC597_SET_ITERATED_BLEND_CONSTANT_GREEN(j) (0x1484+(j)*16) +#define NVC597_SET_ITERATED_BLEND_CONSTANT_GREEN_V 15:0 + +#define NVC597_SET_ITERATED_BLEND_CONSTANT_BLUE(j) (0x1488+(j)*16) +#define NVC597_SET_ITERATED_BLEND_CONSTANT_BLUE_V 15:0 + +#define NVC597_LOAD_ZCULL 0x1500 +#define NVC597_LOAD_ZCULL_V 0:0 + +#define NVC597_SET_SURFACE_CLIP_ID_HEIGHT 0x1504 +#define NVC597_SET_SURFACE_CLIP_ID_HEIGHT_V 31:0 + +#define NVC597_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL 0x1508 +#define NVC597_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NVC597_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NVC597_SET_CLIP_ID_CLEAR_RECT_VERTICAL 0x150c +#define NVC597_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NVC597_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NVC597_SET_USER_CLIP_ENABLE 0x1510 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE0 0:0 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE0_FALSE 0x00000000 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE0_TRUE 0x00000001 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE1 1:1 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE1_FALSE 0x00000000 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE1_TRUE 0x00000001 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE2 2:2 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE2_FALSE 0x00000000 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE2_TRUE 0x00000001 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE3 3:3 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE3_FALSE 0x00000000 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE3_TRUE 0x00000001 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE4 4:4 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE4_FALSE 0x00000000 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE4_TRUE 0x00000001 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE5 5:5 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE5_FALSE 0x00000000 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE5_TRUE 0x00000001 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE6 6:6 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE6_FALSE 0x00000000 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE6_TRUE 0x00000001 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE7 7:7 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE7_FALSE 0x00000000 +#define NVC597_SET_USER_CLIP_ENABLE_PLANE7_TRUE 0x00000001 + +#define NVC597_SET_ZPASS_PIXEL_COUNT 0x1514 +#define NVC597_SET_ZPASS_PIXEL_COUNT_ENABLE 0:0 +#define NVC597_SET_ZPASS_PIXEL_COUNT_ENABLE_FALSE 0x00000000 +#define NVC597_SET_ZPASS_PIXEL_COUNT_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_POINT_SIZE 0x1518 +#define NVC597_SET_POINT_SIZE_V 31:0 + +#define NVC597_SET_ZCULL_STATS 0x151c +#define NVC597_SET_ZCULL_STATS_ENABLE 0:0 +#define NVC597_SET_ZCULL_STATS_ENABLE_FALSE 0x00000000 +#define NVC597_SET_ZCULL_STATS_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_POINT_SPRITE 0x1520 +#define NVC597_SET_POINT_SPRITE_ENABLE 0:0 +#define NVC597_SET_POINT_SPRITE_ENABLE_FALSE 0x00000000 +#define NVC597_SET_POINT_SPRITE_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_SHADER_EXCEPTIONS 0x1528 +#define NVC597_SET_SHADER_EXCEPTIONS_ENABLE 0:0 +#define NVC597_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0x00000000 +#define NVC597_SET_SHADER_EXCEPTIONS_ENABLE_TRUE 0x00000001 + +#define NVC597_CLEAR_REPORT_VALUE 0x1530 +#define NVC597_CLEAR_REPORT_VALUE_TYPE 4:0 +#define NVC597_CLEAR_REPORT_VALUE_TYPE_DA_VERTICES_GENERATED 0x00000012 +#define NVC597_CLEAR_REPORT_VALUE_TYPE_DA_PRIMITIVES_GENERATED 0x00000013 +#define NVC597_CLEAR_REPORT_VALUE_TYPE_VS_INVOCATIONS 0x00000015 +#define NVC597_CLEAR_REPORT_VALUE_TYPE_TI_INVOCATIONS 0x00000016 +#define NVC597_CLEAR_REPORT_VALUE_TYPE_TS_INVOCATIONS 0x00000017 +#define NVC597_CLEAR_REPORT_VALUE_TYPE_TS_PRIMITIVES_GENERATED 0x00000018 +#define NVC597_CLEAR_REPORT_VALUE_TYPE_GS_INVOCATIONS 0x0000001A +#define NVC597_CLEAR_REPORT_VALUE_TYPE_GS_PRIMITIVES_GENERATED 0x0000001B +#define NVC597_CLEAR_REPORT_VALUE_TYPE_VTG_PRIMITIVES_OUT 0x0000001F +#define NVC597_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_SUCCEEDED 0x00000010 +#define NVC597_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_NEEDED 0x00000011 +#define NVC597_CLEAR_REPORT_VALUE_TYPE_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000003 +#define NVC597_CLEAR_REPORT_VALUE_TYPE_CLIPPER_INVOCATIONS 0x0000001C +#define NVC597_CLEAR_REPORT_VALUE_TYPE_CLIPPER_PRIMITIVES_GENERATED 0x0000001D +#define NVC597_CLEAR_REPORT_VALUE_TYPE_ZCULL_STATS 0x00000002 +#define NVC597_CLEAR_REPORT_VALUE_TYPE_PS_INVOCATIONS 0x0000001E +#define NVC597_CLEAR_REPORT_VALUE_TYPE_ZPASS_PIXEL_CNT 0x00000001 +#define NVC597_CLEAR_REPORT_VALUE_TYPE_ALPHA_BETA_CLOCKS 0x00000004 +#define NVC597_CLEAR_REPORT_VALUE_TYPE_SCG_CLOCKS 0x00000009 + +#define NVC597_SET_ANTI_ALIAS_ENABLE 0x1534 +#define NVC597_SET_ANTI_ALIAS_ENABLE_V 0:0 +#define NVC597_SET_ANTI_ALIAS_ENABLE_V_FALSE 0x00000000 +#define NVC597_SET_ANTI_ALIAS_ENABLE_V_TRUE 0x00000001 + +#define NVC597_SET_ZT_SELECT 0x1538 +#define NVC597_SET_ZT_SELECT_TARGET_COUNT 0:0 + +#define NVC597_SET_ANTI_ALIAS_ALPHA_CONTROL 0x153c +#define NVC597_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE 0:0 +#define NVC597_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_DISABLE 0x00000000 +#define NVC597_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_ENABLE 0x00000001 +#define NVC597_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE 4:4 +#define NVC597_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_DISABLE 0x00000000 +#define NVC597_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_ENABLE 0x00000001 + +#define NVC597_SET_RENDER_ENABLE_A 0x1550 +#define NVC597_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC597_SET_RENDER_ENABLE_B 0x1554 +#define NVC597_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC597_SET_RENDER_ENABLE_C 0x1558 +#define NVC597_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC597_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC597_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC597_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC597_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC597_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC597_SET_TEX_SAMPLER_POOL_A 0x155c +#define NVC597_SET_TEX_SAMPLER_POOL_A_OFFSET_UPPER 7:0 + +#define NVC597_SET_TEX_SAMPLER_POOL_B 0x1560 +#define NVC597_SET_TEX_SAMPLER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC597_SET_TEX_SAMPLER_POOL_C 0x1564 +#define NVC597_SET_TEX_SAMPLER_POOL_C_MAXIMUM_INDEX 19:0 + +#define NVC597_SET_SLOPE_SCALE_DEPTH_BIAS 0x156c +#define NVC597_SET_SLOPE_SCALE_DEPTH_BIAS_V 31:0 + +#define NVC597_SET_ANTI_ALIASED_LINE 0x1570 +#define NVC597_SET_ANTI_ALIASED_LINE_ENABLE 0:0 +#define NVC597_SET_ANTI_ALIASED_LINE_ENABLE_FALSE 0x00000000 +#define NVC597_SET_ANTI_ALIASED_LINE_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_TEX_HEADER_POOL_A 0x1574 +#define NVC597_SET_TEX_HEADER_POOL_A_OFFSET_UPPER 7:0 + +#define NVC597_SET_TEX_HEADER_POOL_B 0x1578 +#define NVC597_SET_TEX_HEADER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC597_SET_TEX_HEADER_POOL_C 0x157c +#define NVC597_SET_TEX_HEADER_POOL_C_MAXIMUM_INDEX 21:0 + +#define NVC597_SET_ACTIVE_ZCULL_REGION 0x1590 +#define NVC597_SET_ACTIVE_ZCULL_REGION_ID 5:0 + +#define NVC597_SET_TWO_SIDED_STENCIL_TEST 0x1594 +#define NVC597_SET_TWO_SIDED_STENCIL_TEST_ENABLE 0:0 +#define NVC597_SET_TWO_SIDED_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NVC597_SET_TWO_SIDED_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_BACK_STENCIL_OP_FAIL 0x1598 +#define NVC597_SET_BACK_STENCIL_OP_FAIL_V 31:0 +#define NVC597_SET_BACK_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NVC597_SET_BACK_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NVC597_SET_BACK_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NVC597_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC597_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC597_SET_BACK_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NVC597_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NVC597_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NVC597_SET_BACK_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NVC597_SET_BACK_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NVC597_SET_BACK_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NVC597_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NVC597_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NVC597_SET_BACK_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NVC597_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NVC597_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NVC597_SET_BACK_STENCIL_OP_ZFAIL 0x159c +#define NVC597_SET_BACK_STENCIL_OP_ZFAIL_V 31:0 +#define NVC597_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NVC597_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NVC597_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NVC597_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC597_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC597_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NVC597_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NVC597_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NVC597_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NVC597_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NVC597_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NVC597_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NVC597_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NVC597_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NVC597_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NVC597_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NVC597_SET_BACK_STENCIL_OP_ZPASS 0x15a0 +#define NVC597_SET_BACK_STENCIL_OP_ZPASS_V 31:0 +#define NVC597_SET_BACK_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NVC597_SET_BACK_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NVC597_SET_BACK_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NVC597_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NVC597_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NVC597_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NVC597_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NVC597_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NVC597_SET_BACK_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NVC597_SET_BACK_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NVC597_SET_BACK_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NVC597_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NVC597_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NVC597_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NVC597_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NVC597_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NVC597_SET_BACK_STENCIL_FUNC 0x15a4 +#define NVC597_SET_BACK_STENCIL_FUNC_V 31:0 +#define NVC597_SET_BACK_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NVC597_SET_BACK_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NVC597_SET_BACK_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC597_SET_BACK_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC597_SET_BACK_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NVC597_SET_BACK_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC597_SET_BACK_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC597_SET_BACK_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC597_SET_BACK_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NVC597_SET_BACK_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NVC597_SET_BACK_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC597_SET_BACK_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC597_SET_BACK_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NVC597_SET_BACK_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC597_SET_BACK_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC597_SET_BACK_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC597_SET_SRGB_WRITE 0x15b8 +#define NVC597_SET_SRGB_WRITE_ENABLE 0:0 +#define NVC597_SET_SRGB_WRITE_ENABLE_FALSE 0x00000000 +#define NVC597_SET_SRGB_WRITE_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_DEPTH_BIAS 0x15bc +#define NVC597_SET_DEPTH_BIAS_V 31:0 + +#define NVC597_SET_ZCULL_REGION_FORMAT 0x15c8 +#define NVC597_SET_ZCULL_REGION_FORMAT_TYPE 3:0 +#define NVC597_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X4 0x00000000 +#define NVC597_SET_ZCULL_REGION_FORMAT_TYPE_ZS_4X4 0x00000001 +#define NVC597_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X2 0x00000002 +#define NVC597_SET_ZCULL_REGION_FORMAT_TYPE_Z_2X4 0x00000003 +#define NVC597_SET_ZCULL_REGION_FORMAT_TYPE_Z_16X8_4X4 0x00000004 +#define NVC597_SET_ZCULL_REGION_FORMAT_TYPE_Z_8X8_4X2 0x00000005 +#define NVC597_SET_ZCULL_REGION_FORMAT_TYPE_Z_8X8_2X4 0x00000006 +#define NVC597_SET_ZCULL_REGION_FORMAT_TYPE_Z_16X16_4X8 0x00000007 +#define NVC597_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X8_2X2 0x00000008 +#define NVC597_SET_ZCULL_REGION_FORMAT_TYPE_ZS_16X8_4X2 0x00000009 +#define NVC597_SET_ZCULL_REGION_FORMAT_TYPE_ZS_16X8_2X4 0x0000000A +#define NVC597_SET_ZCULL_REGION_FORMAT_TYPE_ZS_8X8_2X2 0x0000000B +#define NVC597_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X8_1X1 0x0000000C + +#define NVC597_SET_RT_LAYER 0x15cc +#define NVC597_SET_RT_LAYER_V 15:0 +#define NVC597_SET_RT_LAYER_CONTROL 16:16 +#define NVC597_SET_RT_LAYER_CONTROL_V_SELECTS_LAYER 0x00000000 +#define NVC597_SET_RT_LAYER_CONTROL_GEOMETRY_SHADER_SELECTS_LAYER 0x00000001 + +#define NVC597_SET_ANTI_ALIAS 0x15d0 +#define NVC597_SET_ANTI_ALIAS_SAMPLES 3:0 +#define NVC597_SET_ANTI_ALIAS_SAMPLES_MODE_1X1 0x00000000 +#define NVC597_SET_ANTI_ALIAS_SAMPLES_MODE_2X1 0x00000001 +#define NVC597_SET_ANTI_ALIAS_SAMPLES_MODE_2X2 0x00000002 +#define NVC597_SET_ANTI_ALIAS_SAMPLES_MODE_4X2 0x00000003 +#define NVC597_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_D3D 0x00000004 +#define NVC597_SET_ANTI_ALIAS_SAMPLES_MODE_2X1_D3D 0x00000005 +#define NVC597_SET_ANTI_ALIAS_SAMPLES_MODE_4X4 0x00000006 +#define NVC597_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_4 0x00000008 +#define NVC597_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_12 0x00000009 +#define NVC597_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_8 0x0000000A +#define NVC597_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_24 0x0000000B + +#define NVC597_SET_EDGE_FLAG 0x15e4 +#define NVC597_SET_EDGE_FLAG_V 0:0 +#define NVC597_SET_EDGE_FLAG_V_FALSE 0x00000000 +#define NVC597_SET_EDGE_FLAG_V_TRUE 0x00000001 + +#define NVC597_DRAW_INLINE_INDEX 0x15e8 +#define NVC597_DRAW_INLINE_INDEX_V 31:0 + +#define NVC597_SET_INLINE_INDEX2X16_ALIGN 0x15ec +#define NVC597_SET_INLINE_INDEX2X16_ALIGN_COUNT 30:0 +#define NVC597_SET_INLINE_INDEX2X16_ALIGN_START_ODD 31:31 +#define NVC597_SET_INLINE_INDEX2X16_ALIGN_START_ODD_FALSE 0x00000000 +#define NVC597_SET_INLINE_INDEX2X16_ALIGN_START_ODD_TRUE 0x00000001 + +#define NVC597_DRAW_INLINE_INDEX2X16 0x15f0 +#define NVC597_DRAW_INLINE_INDEX2X16_EVEN 15:0 +#define NVC597_DRAW_INLINE_INDEX2X16_ODD 31:16 + +#define NVC597_SET_VERTEX_GLOBAL_BASE_OFFSET_A 0x15f4 +#define NVC597_SET_VERTEX_GLOBAL_BASE_OFFSET_A_UPPER 7:0 + +#define NVC597_SET_VERTEX_GLOBAL_BASE_OFFSET_B 0x15f8 +#define NVC597_SET_VERTEX_GLOBAL_BASE_OFFSET_B_LOWER 31:0 + +#define NVC597_SET_ZCULL_REGION_PIXEL_OFFSET_A 0x15fc +#define NVC597_SET_ZCULL_REGION_PIXEL_OFFSET_A_WIDTH 15:0 + +#define NVC597_SET_ZCULL_REGION_PIXEL_OFFSET_B 0x1600 +#define NVC597_SET_ZCULL_REGION_PIXEL_OFFSET_B_HEIGHT 15:0 + +#define NVC597_SET_POINT_SPRITE_SELECT 0x1604 +#define NVC597_SET_POINT_SPRITE_SELECT_RMODE 1:0 +#define NVC597_SET_POINT_SPRITE_SELECT_RMODE_ZERO 0x00000000 +#define NVC597_SET_POINT_SPRITE_SELECT_RMODE_FROM_R 0x00000001 +#define NVC597_SET_POINT_SPRITE_SELECT_RMODE_FROM_S 0x00000002 +#define NVC597_SET_POINT_SPRITE_SELECT_ORIGIN 2:2 +#define NVC597_SET_POINT_SPRITE_SELECT_ORIGIN_BOTTOM 0x00000000 +#define NVC597_SET_POINT_SPRITE_SELECT_ORIGIN_TOP 0x00000001 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE0 3:3 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE0_PASSTHROUGH 0x00000000 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE0_GENERATE 0x00000001 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE1 4:4 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE1_PASSTHROUGH 0x00000000 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE1_GENERATE 0x00000001 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE2 5:5 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE2_PASSTHROUGH 0x00000000 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE2_GENERATE 0x00000001 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE3 6:6 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE3_PASSTHROUGH 0x00000000 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE3_GENERATE 0x00000001 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE4 7:7 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE4_PASSTHROUGH 0x00000000 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE4_GENERATE 0x00000001 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE5 8:8 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE5_PASSTHROUGH 0x00000000 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE5_GENERATE 0x00000001 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE6 9:9 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE6_PASSTHROUGH 0x00000000 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE6_GENERATE 0x00000001 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE7 10:10 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE7_PASSTHROUGH 0x00000000 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE7_GENERATE 0x00000001 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE8 11:11 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE8_PASSTHROUGH 0x00000000 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE8_GENERATE 0x00000001 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE9 12:12 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE9_PASSTHROUGH 0x00000000 +#define NVC597_SET_POINT_SPRITE_SELECT_TEXTURE9_GENERATE 0x00000001 + +#define NVC597_SET_ATTRIBUTE_DEFAULT 0x1610 +#define NVC597_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE 0:0 +#define NVC597_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_0001 0x00000000 +#define NVC597_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_1111 0x00000001 +#define NVC597_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR 1:1 +#define NVC597_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0000 0x00000000 +#define NVC597_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0001 0x00000001 +#define NVC597_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR 2:2 +#define NVC597_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0000 0x00000000 +#define NVC597_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0001 0x00000001 +#define NVC597_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE 3:3 +#define NVC597_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0000 0x00000000 +#define NVC597_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0001 0x00000001 +#define NVC597_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0 4:4 +#define NVC597_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_0001 0x00000000 +#define NVC597_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_1111 0x00000001 +#define NVC597_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15 5:5 +#define NVC597_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0000 0x00000000 +#define NVC597_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0001 0x00000001 + +#define NVC597_END 0x1614 +#define NVC597_END_V 0:0 + +#define NVC597_BEGIN 0x1618 +#define NVC597_BEGIN_OP 15:0 +#define NVC597_BEGIN_OP_POINTS 0x00000000 +#define NVC597_BEGIN_OP_LINES 0x00000001 +#define NVC597_BEGIN_OP_LINE_LOOP 0x00000002 +#define NVC597_BEGIN_OP_LINE_STRIP 0x00000003 +#define NVC597_BEGIN_OP_TRIANGLES 0x00000004 +#define NVC597_BEGIN_OP_TRIANGLE_STRIP 0x00000005 +#define NVC597_BEGIN_OP_TRIANGLE_FAN 0x00000006 +#define NVC597_BEGIN_OP_QUADS 0x00000007 +#define NVC597_BEGIN_OP_QUAD_STRIP 0x00000008 +#define NVC597_BEGIN_OP_POLYGON 0x00000009 +#define NVC597_BEGIN_OP_LINELIST_ADJCY 0x0000000A +#define NVC597_BEGIN_OP_LINESTRIP_ADJCY 0x0000000B +#define NVC597_BEGIN_OP_TRIANGLELIST_ADJCY 0x0000000C +#define NVC597_BEGIN_OP_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC597_BEGIN_OP_PATCH 0x0000000E +#define NVC597_BEGIN_PRIMITIVE_ID 24:24 +#define NVC597_BEGIN_PRIMITIVE_ID_FIRST 0x00000000 +#define NVC597_BEGIN_PRIMITIVE_ID_UNCHANGED 0x00000001 +#define NVC597_BEGIN_INSTANCE_ID 27:26 +#define NVC597_BEGIN_INSTANCE_ID_FIRST 0x00000000 +#define NVC597_BEGIN_INSTANCE_ID_SUBSEQUENT 0x00000001 +#define NVC597_BEGIN_INSTANCE_ID_UNCHANGED 0x00000002 +#define NVC597_BEGIN_SPLIT_MODE 30:29 +#define NVC597_BEGIN_SPLIT_MODE_NORMAL_BEGIN_NORMAL_END 0x00000000 +#define NVC597_BEGIN_SPLIT_MODE_NORMAL_BEGIN_OPEN_END 0x00000001 +#define NVC597_BEGIN_SPLIT_MODE_OPEN_BEGIN_OPEN_END 0x00000002 +#define NVC597_BEGIN_SPLIT_MODE_OPEN_BEGIN_NORMAL_END 0x00000003 +#define NVC597_BEGIN_INSTANCE_ITERATE_ENABLE 31:31 +#define NVC597_BEGIN_INSTANCE_ITERATE_ENABLE_FALSE 0x00000000 +#define NVC597_BEGIN_INSTANCE_ITERATE_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_VERTEX_ID_COPY 0x161c +#define NVC597_SET_VERTEX_ID_COPY_ENABLE 0:0 +#define NVC597_SET_VERTEX_ID_COPY_ENABLE_FALSE 0x00000000 +#define NVC597_SET_VERTEX_ID_COPY_ENABLE_TRUE 0x00000001 +#define NVC597_SET_VERTEX_ID_COPY_ATTRIBUTE_SLOT 11:4 + +#define NVC597_ADD_TO_PRIMITIVE_ID 0x1620 +#define NVC597_ADD_TO_PRIMITIVE_ID_V 31:0 + +#define NVC597_LOAD_PRIMITIVE_ID 0x1624 +#define NVC597_LOAD_PRIMITIVE_ID_V 31:0 + +#define NVC597_SET_SHADER_BASED_CULL 0x162c +#define NVC597_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE 1:1 +#define NVC597_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_FALSE 0x00000000 +#define NVC597_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_TRUE 0x00000001 +#define NVC597_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE 0:0 +#define NVC597_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_FALSE 0x00000000 +#define NVC597_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_CLASS_VERSION 0x1638 +#define NVC597_SET_CLASS_VERSION_CURRENT 15:0 +#define NVC597_SET_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC597_SET_DA_PRIMITIVE_RESTART 0x1644 +#define NVC597_SET_DA_PRIMITIVE_RESTART_ENABLE 0:0 +#define NVC597_SET_DA_PRIMITIVE_RESTART_ENABLE_FALSE 0x00000000 +#define NVC597_SET_DA_PRIMITIVE_RESTART_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_DA_PRIMITIVE_RESTART_INDEX 0x1648 +#define NVC597_SET_DA_PRIMITIVE_RESTART_INDEX_V 31:0 + +#define NVC597_SET_DA_OUTPUT 0x164c +#define NVC597_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START 12:12 +#define NVC597_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_FALSE 0x00000000 +#define NVC597_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_TRUE 0x00000001 + +#define NVC597_SET_ANTI_ALIASED_POINT 0x1658 +#define NVC597_SET_ANTI_ALIASED_POINT_ENABLE 0:0 +#define NVC597_SET_ANTI_ALIASED_POINT_ENABLE_FALSE 0x00000000 +#define NVC597_SET_ANTI_ALIASED_POINT_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_POINT_CENTER_MODE 0x165c +#define NVC597_SET_POINT_CENTER_MODE_V 31:0 +#define NVC597_SET_POINT_CENTER_MODE_V_OGL 0x00000000 +#define NVC597_SET_POINT_CENTER_MODE_V_D3D 0x00000001 + +#define NVC597_SET_LINE_SMOOTH_PARAMETERS 0x1668 +#define NVC597_SET_LINE_SMOOTH_PARAMETERS_FALLOFF 31:0 +#define NVC597_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_00 0x00000000 +#define NVC597_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_33 0x00000001 +#define NVC597_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_60 0x00000002 + +#define NVC597_SET_LINE_STIPPLE 0x166c +#define NVC597_SET_LINE_STIPPLE_ENABLE 0:0 +#define NVC597_SET_LINE_STIPPLE_ENABLE_FALSE 0x00000000 +#define NVC597_SET_LINE_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_LINE_SMOOTH_EDGE_TABLE(i) (0x1670+(i)*4) +#define NVC597_SET_LINE_SMOOTH_EDGE_TABLE_V0 7:0 +#define NVC597_SET_LINE_SMOOTH_EDGE_TABLE_V1 15:8 +#define NVC597_SET_LINE_SMOOTH_EDGE_TABLE_V2 23:16 +#define NVC597_SET_LINE_SMOOTH_EDGE_TABLE_V3 31:24 + +#define NVC597_SET_LINE_STIPPLE_PARAMETERS 0x1680 +#define NVC597_SET_LINE_STIPPLE_PARAMETERS_FACTOR 7:0 +#define NVC597_SET_LINE_STIPPLE_PARAMETERS_PATTERN 23:8 + +#define NVC597_SET_PROVOKING_VERTEX 0x1684 +#define NVC597_SET_PROVOKING_VERTEX_V 0:0 +#define NVC597_SET_PROVOKING_VERTEX_V_FIRST 0x00000000 +#define NVC597_SET_PROVOKING_VERTEX_V_LAST 0x00000001 + +#define NVC597_SET_TWO_SIDED_LIGHT 0x1688 +#define NVC597_SET_TWO_SIDED_LIGHT_ENABLE 0:0 +#define NVC597_SET_TWO_SIDED_LIGHT_ENABLE_FALSE 0x00000000 +#define NVC597_SET_TWO_SIDED_LIGHT_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_POLYGON_STIPPLE 0x168c +#define NVC597_SET_POLYGON_STIPPLE_ENABLE 0:0 +#define NVC597_SET_POLYGON_STIPPLE_ENABLE_FALSE 0x00000000 +#define NVC597_SET_POLYGON_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_SHADER_CONTROL 0x1690 +#define NVC597_SET_SHADER_CONTROL_DEFAULT_PARTIAL 0:0 +#define NVC597_SET_SHADER_CONTROL_DEFAULT_PARTIAL_ZERO 0x00000000 +#define NVC597_SET_SHADER_CONTROL_DEFAULT_PARTIAL_INFINITY 0x00000001 +#define NVC597_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR 1:1 +#define NVC597_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR_LEGACY 0x00000000 +#define NVC597_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR_FP64_COMPATIBLE 0x00000001 +#define NVC597_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR 2:2 +#define NVC597_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR_PASS_ZERO 0x00000000 +#define NVC597_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR_PASS_INDEFINITE 0x00000001 + +#define NVC597_CHECK_CLASS_VERSION 0x16a0 +#define NVC597_CHECK_CLASS_VERSION_CURRENT 15:0 +#define NVC597_CHECK_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC597_SET_SPH_VERSION 0x16a4 +#define NVC597_SET_SPH_VERSION_CURRENT 15:0 +#define NVC597_SET_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC597_CHECK_SPH_VERSION 0x16a8 +#define NVC597_CHECK_SPH_VERSION_CURRENT 15:0 +#define NVC597_CHECK_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC597_SET_ALPHA_TO_COVERAGE_OVERRIDE 0x16b4 +#define NVC597_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE 0:0 +#define NVC597_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NVC597_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 +#define NVC597_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT 1:1 +#define NVC597_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_DISABLE 0x00000000 +#define NVC597_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_ENABLE 0x00000001 + +#define NVC597_SET_SCG_GRAPHICS_PRIORITY 0x16bc +#define NVC597_SET_SCG_GRAPHICS_PRIORITY_PRIORITY 5:0 + +#define NVC597_SET_SCG_GRAPHICS_SCHEDULING_PARAMETERS(i) (0x16c0+(i)*4) +#define NVC597_SET_SCG_GRAPHICS_SCHEDULING_PARAMETERS_V 31:0 + +#define NVC597_SET_POLYGON_STIPPLE_PATTERN(i) (0x1700+(i)*4) +#define NVC597_SET_POLYGON_STIPPLE_PATTERN_V 31:0 + +#define NVC597_SET_AAM_VERSION 0x1790 +#define NVC597_SET_AAM_VERSION_CURRENT 15:0 +#define NVC597_SET_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC597_CHECK_AAM_VERSION 0x1794 +#define NVC597_CHECK_AAM_VERSION_CURRENT 15:0 +#define NVC597_CHECK_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC597_SET_ZT_LAYER 0x179c +#define NVC597_SET_ZT_LAYER_OFFSET 15:0 + +#define NVC597_SET_INDEX_BUFFER_A 0x17c8 +#define NVC597_SET_INDEX_BUFFER_A_ADDRESS_UPPER 7:0 + +#define NVC597_SET_INDEX_BUFFER_B 0x17cc +#define NVC597_SET_INDEX_BUFFER_B_ADDRESS_LOWER 31:0 + +#define NVC597_SET_INDEX_BUFFER_E 0x17d8 +#define NVC597_SET_INDEX_BUFFER_E_INDEX_SIZE 1:0 +#define NVC597_SET_INDEX_BUFFER_E_INDEX_SIZE_ONE_BYTE 0x00000000 +#define NVC597_SET_INDEX_BUFFER_E_INDEX_SIZE_TWO_BYTES 0x00000001 +#define NVC597_SET_INDEX_BUFFER_E_INDEX_SIZE_FOUR_BYTES 0x00000002 + +#define NVC597_SET_INDEX_BUFFER_F 0x17dc +#define NVC597_SET_INDEX_BUFFER_F_FIRST 31:0 + +#define NVC597_DRAW_INDEX_BUFFER 0x17e0 +#define NVC597_DRAW_INDEX_BUFFER_COUNT 31:0 + +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST 0x17e4 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST 0x17e8 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST 0x17ec +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f0 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC597_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f4 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC597_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f8 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC597_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC597_SET_DEPTH_BIAS_CLAMP 0x187c +#define NVC597_SET_DEPTH_BIAS_CLAMP_V 31:0 + +#define NVC597_SET_VERTEX_STREAM_INSTANCE_A(i) (0x1880+(i)*4) +#define NVC597_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED 0:0 +#define NVC597_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_FALSE 0x00000000 +#define NVC597_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_TRUE 0x00000001 + +#define NVC597_SET_VERTEX_STREAM_INSTANCE_B(i) (0x18c0+(i)*4) +#define NVC597_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED 0:0 +#define NVC597_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_FALSE 0x00000000 +#define NVC597_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_TRUE 0x00000001 + +#define NVC597_SET_ATTRIBUTE_POINT_SIZE 0x1910 +#define NVC597_SET_ATTRIBUTE_POINT_SIZE_ENABLE 0:0 +#define NVC597_SET_ATTRIBUTE_POINT_SIZE_ENABLE_FALSE 0x00000000 +#define NVC597_SET_ATTRIBUTE_POINT_SIZE_ENABLE_TRUE 0x00000001 +#define NVC597_SET_ATTRIBUTE_POINT_SIZE_SLOT 11:4 + +#define NVC597_OGL_SET_CULL 0x1918 +#define NVC597_OGL_SET_CULL_ENABLE 0:0 +#define NVC597_OGL_SET_CULL_ENABLE_FALSE 0x00000000 +#define NVC597_OGL_SET_CULL_ENABLE_TRUE 0x00000001 + +#define NVC597_OGL_SET_FRONT_FACE 0x191c +#define NVC597_OGL_SET_FRONT_FACE_V 31:0 +#define NVC597_OGL_SET_FRONT_FACE_V_CW 0x00000900 +#define NVC597_OGL_SET_FRONT_FACE_V_CCW 0x00000901 + +#define NVC597_OGL_SET_CULL_FACE 0x1920 +#define NVC597_OGL_SET_CULL_FACE_V 31:0 +#define NVC597_OGL_SET_CULL_FACE_V_FRONT 0x00000404 +#define NVC597_OGL_SET_CULL_FACE_V_BACK 0x00000405 +#define NVC597_OGL_SET_CULL_FACE_V_FRONT_AND_BACK 0x00000408 + +#define NVC597_SET_VIEWPORT_PIXEL 0x1924 +#define NVC597_SET_VIEWPORT_PIXEL_CENTER 0:0 +#define NVC597_SET_VIEWPORT_PIXEL_CENTER_AT_HALF_INTEGERS 0x00000000 +#define NVC597_SET_VIEWPORT_PIXEL_CENTER_AT_INTEGERS 0x00000001 + +#define NVC597_SET_VIEWPORT_SCALE_OFFSET 0x192c +#define NVC597_SET_VIEWPORT_SCALE_OFFSET_ENABLE 0:0 +#define NVC597_SET_VIEWPORT_SCALE_OFFSET_ENABLE_FALSE 0x00000000 +#define NVC597_SET_VIEWPORT_SCALE_OFFSET_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_VIEWPORT_CLIP_CONTROL 0x193c +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE 0:0 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_FALSE 0x00000000 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_TRUE 0x00000001 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE 17:16 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_USE_FIELD_MIN_Z_ZERO_MAX_Z_ONE 0x00000000 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_MIN_Z_MAX_Z 0x00000001 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_ZERO_ONE 0x00000002 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_MINUS_INF_PLUS_INF 0x00000003 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z 3:3 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLIP 0x00000000 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLAMP 0x00000001 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z 4:4 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLIP 0x00000000 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLAMP 0x00000001 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND 7:7 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_256 0x00000000 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_1 0x00000001 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND 10:10 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_256 0x00000000 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_1 0x00000001 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP 13:11 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP 0x00000000 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_PASSTHRU 0x00000001 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XY_CLIP 0x00000002 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XYZ_CLIP 0x00000003 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP_NO_Z_CULL 0x00000004 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_Z_CLIP 0x00000005 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_TRI_FILL_OR_CLIP 0x00000006 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z 2:1 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SAME_AS_XY_GUARDBAND 0x00000000 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_256 0x00000001 +#define NVC597_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_1 0x00000002 + +#define NVC597_SET_USER_CLIP_OP 0x1940 +#define NVC597_SET_USER_CLIP_OP_PLANE0 0:0 +#define NVC597_SET_USER_CLIP_OP_PLANE0_CLIP 0x00000000 +#define NVC597_SET_USER_CLIP_OP_PLANE0_CULL 0x00000001 +#define NVC597_SET_USER_CLIP_OP_PLANE1 4:4 +#define NVC597_SET_USER_CLIP_OP_PLANE1_CLIP 0x00000000 +#define NVC597_SET_USER_CLIP_OP_PLANE1_CULL 0x00000001 +#define NVC597_SET_USER_CLIP_OP_PLANE2 8:8 +#define NVC597_SET_USER_CLIP_OP_PLANE2_CLIP 0x00000000 +#define NVC597_SET_USER_CLIP_OP_PLANE2_CULL 0x00000001 +#define NVC597_SET_USER_CLIP_OP_PLANE3 12:12 +#define NVC597_SET_USER_CLIP_OP_PLANE3_CLIP 0x00000000 +#define NVC597_SET_USER_CLIP_OP_PLANE3_CULL 0x00000001 +#define NVC597_SET_USER_CLIP_OP_PLANE4 16:16 +#define NVC597_SET_USER_CLIP_OP_PLANE4_CLIP 0x00000000 +#define NVC597_SET_USER_CLIP_OP_PLANE4_CULL 0x00000001 +#define NVC597_SET_USER_CLIP_OP_PLANE5 20:20 +#define NVC597_SET_USER_CLIP_OP_PLANE5_CLIP 0x00000000 +#define NVC597_SET_USER_CLIP_OP_PLANE5_CULL 0x00000001 +#define NVC597_SET_USER_CLIP_OP_PLANE6 24:24 +#define NVC597_SET_USER_CLIP_OP_PLANE6_CLIP 0x00000000 +#define NVC597_SET_USER_CLIP_OP_PLANE6_CULL 0x00000001 +#define NVC597_SET_USER_CLIP_OP_PLANE7 28:28 +#define NVC597_SET_USER_CLIP_OP_PLANE7_CLIP 0x00000000 +#define NVC597_SET_USER_CLIP_OP_PLANE7_CULL 0x00000001 + +#define NVC597_SET_RENDER_ENABLE_OVERRIDE 0x1944 +#define NVC597_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NVC597_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NVC597_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NVC597_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NVC597_SET_PRIMITIVE_TOPOLOGY_CONTROL 0x1948 +#define NVC597_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE 0:0 +#define NVC597_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_TOPOLOGY_IN_BEGIN_METHODS 0x00000000 +#define NVC597_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_SEPARATE_TOPOLOGY_STATE 0x00000001 + +#define NVC597_SET_WINDOW_CLIP_ENABLE 0x194c +#define NVC597_SET_WINDOW_CLIP_ENABLE_V 0:0 +#define NVC597_SET_WINDOW_CLIP_ENABLE_V_FALSE 0x00000000 +#define NVC597_SET_WINDOW_CLIP_ENABLE_V_TRUE 0x00000001 + +#define NVC597_SET_WINDOW_CLIP_TYPE 0x1950 +#define NVC597_SET_WINDOW_CLIP_TYPE_V 1:0 +#define NVC597_SET_WINDOW_CLIP_TYPE_V_INCLUSIVE 0x00000000 +#define NVC597_SET_WINDOW_CLIP_TYPE_V_EXCLUSIVE 0x00000001 +#define NVC597_SET_WINDOW_CLIP_TYPE_V_CLIPALL 0x00000002 + +#define NVC597_INVALIDATE_ZCULL 0x1958 +#define NVC597_INVALIDATE_ZCULL_V 31:0 +#define NVC597_INVALIDATE_ZCULL_V_INVALIDATE 0x00000000 + +#define NVC597_SET_ZCULL 0x1968 +#define NVC597_SET_ZCULL_Z_ENABLE 0:0 +#define NVC597_SET_ZCULL_Z_ENABLE_FALSE 0x00000000 +#define NVC597_SET_ZCULL_Z_ENABLE_TRUE 0x00000001 +#define NVC597_SET_ZCULL_STENCIL_ENABLE 4:4 +#define NVC597_SET_ZCULL_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC597_SET_ZCULL_STENCIL_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_ZCULL_BOUNDS 0x196c +#define NVC597_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE 0:0 +#define NVC597_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NVC597_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_TRUE 0x00000001 +#define NVC597_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE 4:4 +#define NVC597_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NVC597_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_PRIMITIVE_TOPOLOGY 0x1970 +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V 15:0 +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_POINTLIST 0x00000001 +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_LINELIST 0x00000002 +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP 0x00000003 +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST 0x00000004 +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP 0x00000005 +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_LINELIST_ADJCY 0x0000000A +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP_ADJCY 0x0000000B +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST_ADJCY 0x0000000C +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_PATCHLIST 0x0000000E +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_POINTS 0x00001001 +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST 0x00001002 +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST 0x00001003 +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST 0x0000100F +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINESTRIP 0x00001010 +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINESTRIP 0x00001011 +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLELIST 0x00001012 +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLESTRIP 0x00001013 +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLESTRIP 0x00001014 +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN 0x00001015 +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLEFAN 0x00001016 +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN_IMM 0x00001017 +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST_IMM 0x00001018 +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST2 0x0000101A +#define NVC597_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST2 0x0000101B + +#define NVC597_ZCULL_SYNC 0x1978 +#define NVC597_ZCULL_SYNC_V 31:0 + +#define NVC597_SET_CLIP_ID_TEST 0x197c +#define NVC597_SET_CLIP_ID_TEST_ENABLE 0:0 +#define NVC597_SET_CLIP_ID_TEST_ENABLE_FALSE 0x00000000 +#define NVC597_SET_CLIP_ID_TEST_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_SURFACE_CLIP_ID_WIDTH 0x1980 +#define NVC597_SET_SURFACE_CLIP_ID_WIDTH_V 31:0 + +#define NVC597_SET_CLIP_ID 0x1984 +#define NVC597_SET_CLIP_ID_V 31:0 + +#define NVC597_SET_DEPTH_BOUNDS_TEST 0x19bc +#define NVC597_SET_DEPTH_BOUNDS_TEST_ENABLE 0:0 +#define NVC597_SET_DEPTH_BOUNDS_TEST_ENABLE_FALSE 0x00000000 +#define NVC597_SET_DEPTH_BOUNDS_TEST_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_BLEND_FLOAT_OPTION 0x19c0 +#define NVC597_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO 0:0 +#define NVC597_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_FALSE 0x00000000 +#define NVC597_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_TRUE 0x00000001 + +#define NVC597_SET_LOGIC_OP 0x19c4 +#define NVC597_SET_LOGIC_OP_ENABLE 0:0 +#define NVC597_SET_LOGIC_OP_ENABLE_FALSE 0x00000000 +#define NVC597_SET_LOGIC_OP_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_LOGIC_OP_FUNC 0x19c8 +#define NVC597_SET_LOGIC_OP_FUNC_V 31:0 +#define NVC597_SET_LOGIC_OP_FUNC_V_CLEAR 0x00001500 +#define NVC597_SET_LOGIC_OP_FUNC_V_AND 0x00001501 +#define NVC597_SET_LOGIC_OP_FUNC_V_AND_REVERSE 0x00001502 +#define NVC597_SET_LOGIC_OP_FUNC_V_COPY 0x00001503 +#define NVC597_SET_LOGIC_OP_FUNC_V_AND_INVERTED 0x00001504 +#define NVC597_SET_LOGIC_OP_FUNC_V_NOOP 0x00001505 +#define NVC597_SET_LOGIC_OP_FUNC_V_XOR 0x00001506 +#define NVC597_SET_LOGIC_OP_FUNC_V_OR 0x00001507 +#define NVC597_SET_LOGIC_OP_FUNC_V_NOR 0x00001508 +#define NVC597_SET_LOGIC_OP_FUNC_V_EQUIV 0x00001509 +#define NVC597_SET_LOGIC_OP_FUNC_V_INVERT 0x0000150A +#define NVC597_SET_LOGIC_OP_FUNC_V_OR_REVERSE 0x0000150B +#define NVC597_SET_LOGIC_OP_FUNC_V_COPY_INVERTED 0x0000150C +#define NVC597_SET_LOGIC_OP_FUNC_V_OR_INVERTED 0x0000150D +#define NVC597_SET_LOGIC_OP_FUNC_V_NAND 0x0000150E +#define NVC597_SET_LOGIC_OP_FUNC_V_SET 0x0000150F + +#define NVC597_SET_Z_COMPRESSION 0x19cc +#define NVC597_SET_Z_COMPRESSION_ENABLE 0:0 +#define NVC597_SET_Z_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVC597_SET_Z_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVC597_CLEAR_SURFACE 0x19d0 +#define NVC597_CLEAR_SURFACE_Z_ENABLE 0:0 +#define NVC597_CLEAR_SURFACE_Z_ENABLE_FALSE 0x00000000 +#define NVC597_CLEAR_SURFACE_Z_ENABLE_TRUE 0x00000001 +#define NVC597_CLEAR_SURFACE_STENCIL_ENABLE 1:1 +#define NVC597_CLEAR_SURFACE_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC597_CLEAR_SURFACE_STENCIL_ENABLE_TRUE 0x00000001 +#define NVC597_CLEAR_SURFACE_R_ENABLE 2:2 +#define NVC597_CLEAR_SURFACE_R_ENABLE_FALSE 0x00000000 +#define NVC597_CLEAR_SURFACE_R_ENABLE_TRUE 0x00000001 +#define NVC597_CLEAR_SURFACE_G_ENABLE 3:3 +#define NVC597_CLEAR_SURFACE_G_ENABLE_FALSE 0x00000000 +#define NVC597_CLEAR_SURFACE_G_ENABLE_TRUE 0x00000001 +#define NVC597_CLEAR_SURFACE_B_ENABLE 4:4 +#define NVC597_CLEAR_SURFACE_B_ENABLE_FALSE 0x00000000 +#define NVC597_CLEAR_SURFACE_B_ENABLE_TRUE 0x00000001 +#define NVC597_CLEAR_SURFACE_A_ENABLE 5:5 +#define NVC597_CLEAR_SURFACE_A_ENABLE_FALSE 0x00000000 +#define NVC597_CLEAR_SURFACE_A_ENABLE_TRUE 0x00000001 +#define NVC597_CLEAR_SURFACE_MRT_SELECT 9:6 +#define NVC597_CLEAR_SURFACE_RT_ARRAY_INDEX 25:10 + +#define NVC597_CLEAR_CLIP_ID_SURFACE 0x19d4 +#define NVC597_CLEAR_CLIP_ID_SURFACE_V 31:0 + +#define NVC597_SET_COLOR_COMPRESSION(i) (0x19e0+(i)*4) +#define NVC597_SET_COLOR_COMPRESSION_ENABLE 0:0 +#define NVC597_SET_COLOR_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVC597_SET_COLOR_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_CT_WRITE(i) (0x1a00+(i)*4) +#define NVC597_SET_CT_WRITE_R_ENABLE 0:0 +#define NVC597_SET_CT_WRITE_R_ENABLE_FALSE 0x00000000 +#define NVC597_SET_CT_WRITE_R_ENABLE_TRUE 0x00000001 +#define NVC597_SET_CT_WRITE_G_ENABLE 4:4 +#define NVC597_SET_CT_WRITE_G_ENABLE_FALSE 0x00000000 +#define NVC597_SET_CT_WRITE_G_ENABLE_TRUE 0x00000001 +#define NVC597_SET_CT_WRITE_B_ENABLE 8:8 +#define NVC597_SET_CT_WRITE_B_ENABLE_FALSE 0x00000000 +#define NVC597_SET_CT_WRITE_B_ENABLE_TRUE 0x00000001 +#define NVC597_SET_CT_WRITE_A_ENABLE 12:12 +#define NVC597_SET_CT_WRITE_A_ENABLE_FALSE 0x00000000 +#define NVC597_SET_CT_WRITE_A_ENABLE_TRUE 0x00000001 + +#define NVC597_PIPE_NOP 0x1a2c +#define NVC597_PIPE_NOP_V 31:0 + +#define NVC597_SET_SPARE00 0x1a30 +#define NVC597_SET_SPARE00_V 31:0 + +#define NVC597_SET_SPARE01 0x1a34 +#define NVC597_SET_SPARE01_V 31:0 + +#define NVC597_SET_SPARE02 0x1a38 +#define NVC597_SET_SPARE02_V 31:0 + +#define NVC597_SET_SPARE03 0x1a3c +#define NVC597_SET_SPARE03_V 31:0 + +#define NVC597_SET_REPORT_SEMAPHORE_A 0x1b00 +#define NVC597_SET_REPORT_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC597_SET_REPORT_SEMAPHORE_B 0x1b04 +#define NVC597_SET_REPORT_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC597_SET_REPORT_SEMAPHORE_C 0x1b08 +#define NVC597_SET_REPORT_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC597_SET_REPORT_SEMAPHORE_D 0x1b0c +#define NVC597_SET_REPORT_SEMAPHORE_D_OPERATION 1:0 +#define NVC597_SET_REPORT_SEMAPHORE_D_OPERATION_RELEASE 0x00000000 +#define NVC597_SET_REPORT_SEMAPHORE_D_OPERATION_ACQUIRE 0x00000001 +#define NVC597_SET_REPORT_SEMAPHORE_D_OPERATION_REPORT_ONLY 0x00000002 +#define NVC597_SET_REPORT_SEMAPHORE_D_OPERATION_TRAP 0x00000003 +#define NVC597_SET_REPORT_SEMAPHORE_D_RELEASE 4:4 +#define NVC597_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_READS_COMPLETE 0x00000000 +#define NVC597_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_WRITES_COMPLETE 0x00000001 +#define NVC597_SET_REPORT_SEMAPHORE_D_ACQUIRE 8:8 +#define NVC597_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_WRITES_START 0x00000000 +#define NVC597_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_READS_START 0x00000001 +#define NVC597_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION 15:12 +#define NVC597_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_NONE 0x00000000 +#define NVC597_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DATA_ASSEMBLER 0x00000001 +#define NVC597_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VERTEX_SHADER 0x00000002 +#define NVC597_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_INIT_SHADER 0x00000008 +#define NVC597_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_SHADER 0x00000009 +#define NVC597_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_GEOMETRY_SHADER 0x00000006 +#define NVC597_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_STREAMING_OUTPUT 0x00000005 +#define NVC597_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VPC 0x00000004 +#define NVC597_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ZCULL 0x00000007 +#define NVC597_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_PIXEL_SHADER 0x0000000A +#define NVC597_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DEPTH_TEST 0x0000000C +#define NVC597_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ALL 0x0000000F +#define NVC597_SET_REPORT_SEMAPHORE_D_COMPARISON 16:16 +#define NVC597_SET_REPORT_SEMAPHORE_D_COMPARISON_EQ 0x00000000 +#define NVC597_SET_REPORT_SEMAPHORE_D_COMPARISON_GE 0x00000001 +#define NVC597_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE 20:20 +#define NVC597_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVC597_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT 27:23 +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_NONE 0x00000000 +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_DA_VERTICES_GENERATED 0x00000001 +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_DA_PRIMITIVES_GENERATED 0x00000003 +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_VS_INVOCATIONS 0x00000005 +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_TI_INVOCATIONS 0x0000001B +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_TS_INVOCATIONS 0x0000001D +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_TS_PRIMITIVES_GENERATED 0x0000001F +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_GS_INVOCATIONS 0x00000007 +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_GS_PRIMITIVES_GENERATED 0x00000009 +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_ALPHA_BETA_CLOCKS 0x00000004 +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_SCG_CLOCKS 0x00000008 +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_VTG_PRIMITIVES_OUT 0x00000012 +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x0000001E +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_SUCCEEDED 0x0000000B +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED 0x0000000D +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000006 +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_BYTE_COUNT 0x0000001A +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_INVOCATIONS 0x0000000F +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_PRIMITIVES_GENERATED 0x00000011 +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS0 0x0000000A +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS1 0x0000000C +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS2 0x0000000E +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS3 0x00000010 +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_PS_INVOCATIONS 0x00000013 +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT 0x00000002 +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT64 0x00000015 +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_TILED_ZPASS_PIXEL_CNT64 0x00000017 +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_COLOR_TARGET 0x00000018 +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_ZETA_TARGET 0x00000019 +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_BOUNDING_RECTANGLE 0x0000001C +#define NVC597_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE 28:28 +#define NVC597_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVC597_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVC597_SET_REPORT_SEMAPHORE_D_SUB_REPORT 7:5 +#define NVC597_SET_REPORT_SEMAPHORE_D_REPORT_DWORD_NUMBER 21:21 +#define NVC597_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE 2:2 +#define NVC597_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_FALSE 0x00000000 +#define NVC597_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_TRUE 0x00000001 +#define NVC597_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE 3:3 +#define NVC597_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC597_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC597_SET_REPORT_SEMAPHORE_D_REDUCTION_OP 11:9 +#define NVC597_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC597_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC597_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC597_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_INC 0x00000003 +#define NVC597_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC597_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_AND 0x00000005 +#define NVC597_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_OR 0x00000006 +#define NVC597_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC597_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT 18:17 +#define NVC597_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC597_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVC597_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP 19:19 +#define NVC597_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP_FALSE 0x00000000 +#define NVC597_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP_TRUE 0x00000001 + +#define NVC597_SET_VERTEX_STREAM_A_FORMAT(j) (0x1c00+(j)*16) +#define NVC597_SET_VERTEX_STREAM_A_FORMAT_STRIDE 11:0 +#define NVC597_SET_VERTEX_STREAM_A_FORMAT_ENABLE 12:12 +#define NVC597_SET_VERTEX_STREAM_A_FORMAT_ENABLE_FALSE 0x00000000 +#define NVC597_SET_VERTEX_STREAM_A_FORMAT_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_VERTEX_STREAM_A_LOCATION_A(j) (0x1c04+(j)*16) +#define NVC597_SET_VERTEX_STREAM_A_LOCATION_A_OFFSET_UPPER 7:0 + +#define NVC597_SET_VERTEX_STREAM_A_LOCATION_B(j) (0x1c08+(j)*16) +#define NVC597_SET_VERTEX_STREAM_A_LOCATION_B_OFFSET_LOWER 31:0 + +#define NVC597_SET_VERTEX_STREAM_A_FREQUENCY(j) (0x1c0c+(j)*16) +#define NVC597_SET_VERTEX_STREAM_A_FREQUENCY_V 31:0 + +#define NVC597_SET_VERTEX_STREAM_B_FORMAT(j) (0x1d00+(j)*16) +#define NVC597_SET_VERTEX_STREAM_B_FORMAT_STRIDE 11:0 +#define NVC597_SET_VERTEX_STREAM_B_FORMAT_ENABLE 12:12 +#define NVC597_SET_VERTEX_STREAM_B_FORMAT_ENABLE_FALSE 0x00000000 +#define NVC597_SET_VERTEX_STREAM_B_FORMAT_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_VERTEX_STREAM_B_LOCATION_A(j) (0x1d04+(j)*16) +#define NVC597_SET_VERTEX_STREAM_B_LOCATION_A_OFFSET_UPPER 7:0 + +#define NVC597_SET_VERTEX_STREAM_B_LOCATION_B(j) (0x1d08+(j)*16) +#define NVC597_SET_VERTEX_STREAM_B_LOCATION_B_OFFSET_LOWER 31:0 + +#define NVC597_SET_VERTEX_STREAM_B_FREQUENCY(j) (0x1d0c+(j)*16) +#define NVC597_SET_VERTEX_STREAM_B_FREQUENCY_V 31:0 + +#define NVC597_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA(j) (0x1e00+(j)*32) +#define NVC597_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NVC597_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NVC597_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_BLEND_PER_TARGET_COLOR_OP(j) (0x1e04+(j)*32) +#define NVC597_SET_BLEND_PER_TARGET_COLOR_OP_V 31:0 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC597_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC597_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MIN 0x00008007 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MAX 0x00008008 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_ADD 0x00000001 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MIN 0x00000004 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF(j) (0x1e08+(j)*32) +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V 31:0 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF(j) (0x1e0c+(j)*32) +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V 31:0 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC597_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_OP(j) (0x1e10+(j)*32) +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_OP_V 31:0 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF(j) (0x1e14+(j)*32) +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V 31:0 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF(j) (0x1e18+(j)*32) +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V 31:0 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC597_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC597_SET_PIPELINE_SHADER(j) (0x2000+(j)*64) +#define NVC597_SET_PIPELINE_SHADER_ENABLE 0:0 +#define NVC597_SET_PIPELINE_SHADER_ENABLE_FALSE 0x00000000 +#define NVC597_SET_PIPELINE_SHADER_ENABLE_TRUE 0x00000001 +#define NVC597_SET_PIPELINE_SHADER_TYPE 7:4 +#define NVC597_SET_PIPELINE_SHADER_TYPE_VERTEX_CULL_BEFORE_FETCH 0x00000000 +#define NVC597_SET_PIPELINE_SHADER_TYPE_VERTEX 0x00000001 +#define NVC597_SET_PIPELINE_SHADER_TYPE_TESSELLATION_INIT 0x00000002 +#define NVC597_SET_PIPELINE_SHADER_TYPE_TESSELLATION 0x00000003 +#define NVC597_SET_PIPELINE_SHADER_TYPE_GEOMETRY 0x00000004 +#define NVC597_SET_PIPELINE_SHADER_TYPE_PIXEL 0x00000005 + +#define NVC597_SET_PIPELINE_RESERVED_A(j) (0x2008+(j)*64) +#define NVC597_SET_PIPELINE_RESERVED_A_V 0:0 + +#define NVC597_SET_PIPELINE_REGISTER_COUNT(j) (0x200c+(j)*64) +#define NVC597_SET_PIPELINE_REGISTER_COUNT_V 8:0 + +#define NVC597_SET_PIPELINE_BINDING(j) (0x2010+(j)*64) +#define NVC597_SET_PIPELINE_BINDING_GROUP 2:0 + +#define NVC597_SET_PIPELINE_PROGRAM_ADDRESS_A(j) (0x2014+(j)*64) +#define NVC597_SET_PIPELINE_PROGRAM_ADDRESS_A_UPPER 7:0 + +#define NVC597_SET_PIPELINE_PROGRAM_ADDRESS_B(j) (0x2018+(j)*64) +#define NVC597_SET_PIPELINE_PROGRAM_ADDRESS_B_LOWER 31:0 + +#define NVC597_SET_PIPELINE_RESERVED_D(j) (0x201c+(j)*64) +#define NVC597_SET_PIPELINE_RESERVED_D_V 0:0 + +#define NVC597_SET_PIPELINE_RESERVED_E(j) (0x2020+(j)*64) +#define NVC597_SET_PIPELINE_RESERVED_E_V 0:0 + +#define NVC597_SET_FALCON00 0x2300 +#define NVC597_SET_FALCON00_V 31:0 + +#define NVC597_SET_FALCON01 0x2304 +#define NVC597_SET_FALCON01_V 31:0 + +#define NVC597_SET_FALCON02 0x2308 +#define NVC597_SET_FALCON02_V 31:0 + +#define NVC597_SET_FALCON03 0x230c +#define NVC597_SET_FALCON03_V 31:0 + +#define NVC597_SET_FALCON04 0x2310 +#define NVC597_SET_FALCON04_V 31:0 + +#define NVC597_SET_FALCON05 0x2314 +#define NVC597_SET_FALCON05_V 31:0 + +#define NVC597_SET_FALCON06 0x2318 +#define NVC597_SET_FALCON06_V 31:0 + +#define NVC597_SET_FALCON07 0x231c +#define NVC597_SET_FALCON07_V 31:0 + +#define NVC597_SET_FALCON08 0x2320 +#define NVC597_SET_FALCON08_V 31:0 + +#define NVC597_SET_FALCON09 0x2324 +#define NVC597_SET_FALCON09_V 31:0 + +#define NVC597_SET_FALCON10 0x2328 +#define NVC597_SET_FALCON10_V 31:0 + +#define NVC597_SET_FALCON11 0x232c +#define NVC597_SET_FALCON11_V 31:0 + +#define NVC597_SET_FALCON12 0x2330 +#define NVC597_SET_FALCON12_V 31:0 + +#define NVC597_SET_FALCON13 0x2334 +#define NVC597_SET_FALCON13_V 31:0 + +#define NVC597_SET_FALCON14 0x2338 +#define NVC597_SET_FALCON14_V 31:0 + +#define NVC597_SET_FALCON15 0x233c +#define NVC597_SET_FALCON15_V 31:0 + +#define NVC597_SET_FALCON16 0x2340 +#define NVC597_SET_FALCON16_V 31:0 + +#define NVC597_SET_FALCON17 0x2344 +#define NVC597_SET_FALCON17_V 31:0 + +#define NVC597_SET_FALCON18 0x2348 +#define NVC597_SET_FALCON18_V 31:0 + +#define NVC597_SET_FALCON19 0x234c +#define NVC597_SET_FALCON19_V 31:0 + +#define NVC597_SET_FALCON20 0x2350 +#define NVC597_SET_FALCON20_V 31:0 + +#define NVC597_SET_FALCON21 0x2354 +#define NVC597_SET_FALCON21_V 31:0 + +#define NVC597_SET_FALCON22 0x2358 +#define NVC597_SET_FALCON22_V 31:0 + +#define NVC597_SET_FALCON23 0x235c +#define NVC597_SET_FALCON23_V 31:0 + +#define NVC597_SET_FALCON24 0x2360 +#define NVC597_SET_FALCON24_V 31:0 + +#define NVC597_SET_FALCON25 0x2364 +#define NVC597_SET_FALCON25_V 31:0 + +#define NVC597_SET_FALCON26 0x2368 +#define NVC597_SET_FALCON26_V 31:0 + +#define NVC597_SET_FALCON27 0x236c +#define NVC597_SET_FALCON27_V 31:0 + +#define NVC597_SET_FALCON28 0x2370 +#define NVC597_SET_FALCON28_V 31:0 + +#define NVC597_SET_FALCON29 0x2374 +#define NVC597_SET_FALCON29_V 31:0 + +#define NVC597_SET_FALCON30 0x2378 +#define NVC597_SET_FALCON30_V 31:0 + +#define NVC597_SET_FALCON31 0x237c +#define NVC597_SET_FALCON31_V 31:0 + +#define NVC597_SET_CONSTANT_BUFFER_SELECTOR_A 0x2380 +#define NVC597_SET_CONSTANT_BUFFER_SELECTOR_A_SIZE 16:0 + +#define NVC597_SET_CONSTANT_BUFFER_SELECTOR_B 0x2384 +#define NVC597_SET_CONSTANT_BUFFER_SELECTOR_B_ADDRESS_UPPER 7:0 + +#define NVC597_SET_CONSTANT_BUFFER_SELECTOR_C 0x2388 +#define NVC597_SET_CONSTANT_BUFFER_SELECTOR_C_ADDRESS_LOWER 31:0 + +#define NVC597_LOAD_CONSTANT_BUFFER_OFFSET 0x238c +#define NVC597_LOAD_CONSTANT_BUFFER_OFFSET_V 15:0 + +#define NVC597_LOAD_CONSTANT_BUFFER(i) (0x2390+(i)*4) +#define NVC597_LOAD_CONSTANT_BUFFER_V 31:0 + +#define NVC597_BIND_GROUP_RESERVED_A(j) (0x2400+(j)*32) +#define NVC597_BIND_GROUP_RESERVED_A_V 0:0 + +#define NVC597_BIND_GROUP_RESERVED_B(j) (0x2404+(j)*32) +#define NVC597_BIND_GROUP_RESERVED_B_V 0:0 + +#define NVC597_BIND_GROUP_RESERVED_C(j) (0x2408+(j)*32) +#define NVC597_BIND_GROUP_RESERVED_C_V 0:0 + +#define NVC597_BIND_GROUP_RESERVED_D(j) (0x240c+(j)*32) +#define NVC597_BIND_GROUP_RESERVED_D_V 0:0 + +#define NVC597_BIND_GROUP_CONSTANT_BUFFER(j) (0x2410+(j)*32) +#define NVC597_BIND_GROUP_CONSTANT_BUFFER_VALID 0:0 +#define NVC597_BIND_GROUP_CONSTANT_BUFFER_VALID_FALSE 0x00000000 +#define NVC597_BIND_GROUP_CONSTANT_BUFFER_VALID_TRUE 0x00000001 +#define NVC597_BIND_GROUP_CONSTANT_BUFFER_SHADER_SLOT 8:4 + +#define NVC597_SET_TRAP_HANDLER_A 0x25f8 +#define NVC597_SET_TRAP_HANDLER_A_ADDRESS_UPPER 16:0 + +#define NVC597_SET_TRAP_HANDLER_B 0x25fc +#define NVC597_SET_TRAP_HANDLER_B_ADDRESS_LOWER 31:0 + +#define NVC597_SET_COLOR_CLAMP 0x2600 +#define NVC597_SET_COLOR_CLAMP_ENABLE 0:0 +#define NVC597_SET_COLOR_CLAMP_ENABLE_FALSE 0x00000000 +#define NVC597_SET_COLOR_CLAMP_ENABLE_TRUE 0x00000001 + +#define NVC597_SET_BINDLESS_TEXTURE 0x2608 +#define NVC597_SET_BINDLESS_TEXTURE_CONSTANT_BUFFER_SLOT_SELECT 4:0 + +#define NVC597_SET_STREAM_OUT_LAYOUT_SELECT(i,j) (0x2800+(i)*128+(j)*4) +#define NVC597_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER00 7:0 +#define NVC597_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER01 15:8 +#define NVC597_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER02 23:16 +#define NVC597_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER03 31:24 + +#define NVC597_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE(i) (0x32f4+(i)*4) +#define NVC597_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_V 31:0 + +#define NVC597_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_UPPER(i) (0x3314+(i)*4) +#define NVC597_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_UPPER_V 31:0 + +#define NVC597_ENABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER 0x3334 +#define NVC597_ENABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_V 0:0 + +#define NVC597_DISABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER 0x3338 +#define NVC597_DISABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_V 0:0 + +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER(i) (0x333c+(i)*4) +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER_V 31:0 + +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_VALUE(i) (0x335c+(i)*4) +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_VALUE_V 31:0 + +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_EVENT(i) (0x337c+(i)*4) +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_EVENT_EVENT 7:0 + +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A(i) (0x339c+(i)*4) +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT0 1:0 +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT0 4:2 +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT1 6:5 +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT1 9:7 +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT2 11:10 +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT2 14:12 +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT3 16:15 +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT3 19:17 +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT4 21:20 +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT4 24:22 +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT5 26:25 +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT5 29:27 +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_SPARE 31:30 + +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B(i) (0x33bc+(i)*4) +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_EDGE 0:0 +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_MODE 2:1 +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_WINDOWED 3:3 +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_FUNC 19:4 + +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL 0x33dc +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL_MASK 7:0 + +#define NVC597_START_SHADER_PERFORMANCE_COUNTER 0x33e0 +#define NVC597_START_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC597_STOP_SHADER_PERFORMANCE_COUNTER 0x33e4 +#define NVC597_STOP_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER 0x33e8 +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER_V 31:0 + +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER 0x33ec +#define NVC597_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER_V 31:0 + +#define NVC597_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NVC597_SET_MME_SHADOW_SCRATCH_V 31:0 + +#define NVC597_CALL_MME_MACRO(j) (0x3800+(j)*8) +#define NVC597_CALL_MME_MACRO_V 31:0 + +#define NVC597_CALL_MME_DATA(j) (0x3804+(j)*8) +#define NVC597_CALL_MME_DATA_V 31:0 + +#endif /* _cl_turing_a_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc637.h b/src/common/sdk/nvidia/inc/class/clc637.h new file mode 100644 index 0000000..2f8ec0c --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc637.h @@ -0,0 +1,62 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/clc637.finn +// + +#define AMPERE_SMC_PARTITION_REF (0xc637U) /* finn: Evaluated from "NVC637_ALLOCATION_PARAMETERS_MESSAGE_ID" */ + +// +// This swizzId can be used by root clients like tools for device level +// profiling +// +#define NVC637_DEVICE_PROFILING_SWIZZID (0xFFFFFFFE) + +// +// TODO: Deprecate NVC637_DEVICE_LEVEL_SWIZZID once all the clients are moved to +// NVC637_DEVICE_PROFILING_SWIZZID +// +#define NVC637_DEVICE_LEVEL_SWIZZID NVC637_DEVICE_PROFILING_SWIZZID + +/* NvRmAlloc parameters */ +#define NVC637_ALLOCATION_PARAMETERS_MESSAGE_ID (0xc637U) + +typedef struct NVC637_ALLOCATION_PARAMETERS { + // + // capDescriptor is a file descriptor for unix RM clients, but a void + // pointer for windows RM clients. + // + // capDescriptor is transparent to RM clients i.e. RM's user-mode shim + // populates this field on behalf of clients. + // + NV_DECLARE_ALIGNED(NvU64 capDescriptor, 8); + + NvU32 swizzId; +} NVC637_ALLOCATION_PARAMETERS; + diff --git a/src/common/sdk/nvidia/inc/class/clc638.h b/src/common/sdk/nvidia/inc/class/clc638.h new file mode 100644 index 0000000..fc038b3 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc638.h @@ -0,0 +1,50 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/clc638.finn +// + +#define AMPERE_SMC_EXEC_PARTITION_REF (0xc638U) /* finn: Evaluated from "NVC638_ALLOCATION_PARAMETERS_MESSAGE_ID" */ + +/* NvRmAlloc parameters */ +#define NVC638_ALLOCATION_PARAMETERS_MESSAGE_ID (0xc638U) + +typedef struct NVC638_ALLOCATION_PARAMETERS { + // + // capDescriptor is a file descriptor for unix RM clients, but a void + // pointer for windows RM clients. + // + // capDescriptor is transparent to RM clients i.e. RM's user-mode shim + // populates this field on behalf of clients. + // + NV_DECLARE_ALIGNED(NvU64 capDescriptor, 8); + + NvU32 execPartitionId; +} NVC638_ALLOCATION_PARAMETERS; + diff --git a/src/common/sdk/nvidia/inc/class/clc661.h b/src/common/sdk/nvidia/inc/class/clc661.h new file mode 100644 index 0000000..d9f8a48 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc661.h @@ -0,0 +1,27 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gh100_clc661_h__ +#define __gh100_clc661_h__ +#define HOPPER_USERMODE_A (0xc661) +#endif // __gh100_clc661_h__ diff --git a/src/common/sdk/nvidia/inc/class/clc670.h b/src/common/sdk/nvidia/inc/class/clc670.h new file mode 100644 index 0000000..4828651 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc670.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/clc670.finn +// + +#define NVC670_DISPLAY (0xc670U) /* finn: Evaluated from "NVC670_ALLOCATION_PARAMETERS_MESSAGE_ID" */ + +#define NVC670_ALLOCATION_PARAMETERS_MESSAGE_ID (0xc670U) + +typedef struct NVC670_ALLOCATION_PARAMETERS { + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numDsis; // Number of DSIs in this chip/display +} NVC670_ALLOCATION_PARAMETERS; + diff --git a/src/common/sdk/nvidia/inc/class/clc671.h b/src/common/sdk/nvidia/inc/class/clc671.h new file mode 100644 index 0000000..0255c8b --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc671.h @@ -0,0 +1,124 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc671_h_ +#define _clc671_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC671_DISP_SF_USER (0x000C671) + +typedef volatile struct _clc671_tag0 { + NvU32 dispSfUserOffset[0x400]; +} _NvC671DispSfUser, NvC671DispSfUserMap; + +#define NVC671_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME 0x00000001 /* */ +#define NVC671_SF_HDMI_INFO_IDX_VSI 0x00000004 /* */ +#define NVC671_SF_HDMI_INFO_CTRL(i,j) (0x000E0000-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC671_SF_HDMI_INFO_CTRL__SIZE_1 4 /* */ +#define NVC671_SF_HDMI_INFO_CTRL__SIZE_2 5 /* */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_CTRL(i) (0x000E0000-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 8 /* */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE 0:0 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER 4:4 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER_DIS 0x00000000 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER_EN 0x00000001 /* RW--V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE 8:8 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE_DIS 0x00000000 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE_EN 0x00000001 /* RW--V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_STATUS(i) (0x000E0004-0x000E0000+(i)*1024) /* R--4A */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_STATUS__SIZE_1 8 /* */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_STATUS_SENT 0:0 /* R-IVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_HEADER(i) (0x000E0008-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_HEADER__SIZE_1 8 /* */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_HEADER_HB0 7:0 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_HEADER_HB1 15:8 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_HEADER_HB2 23:16 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(i) (0x000E000C-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW__SIZE_1 8 /* */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(i) (0x000E0010-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH__SIZE_1 8 /* */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(i) (0x000E0014-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW__SIZE_1 8 /* */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(i) (0x000E0018-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH__SIZE_1 8 /* */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW(i) (0x000E001C-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW__SIZE_1 8 /* */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NVC671_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _clc671_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc673.h b/src/common/sdk/nvidia/inc/class/clc673.h new file mode 100644 index 0000000..b5942aa --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc673.h @@ -0,0 +1,505 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc673_h_ +#define _clc673_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC673_DISP_CAPABILITIES 0xC673 + +typedef volatile struct _clc673_tag0 { + NvU32 dispCapabilities[0x400]; +} _NvC673DispCapabilities,NvC673DispCapabilities_Map ; + + +#define NVC673_SYS_CAP 0x0 /* RW-4R */ +#define NVC673_SYS_CAP_HEAD0_EXISTS 0:0 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD1_EXISTS 1:1 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD2_EXISTS 2:2 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD3_EXISTS 3:3 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD4_EXISTS 4:4 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD5_EXISTS 5:5 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD6_EXISTS 6:6 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD7_EXISTS 7:7 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD_EXISTS(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVC673_SYS_CAP_HEAD_EXISTS__SIZE_1 8 /* */ +#define NVC673_SYS_CAP_HEAD_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR0_EXISTS 8:8 /* RWIVF */ +#define NVC673_SYS_CAP_SOR0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR1_EXISTS 9:9 /* RWIVF */ +#define NVC673_SYS_CAP_SOR1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR2_EXISTS 10:10 /* RWIVF */ +#define NVC673_SYS_CAP_SOR2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR3_EXISTS 11:11 /* RWIVF */ +#define NVC673_SYS_CAP_SOR3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR4_EXISTS 12:12 /* RWIVF */ +#define NVC673_SYS_CAP_SOR4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR5_EXISTS 13:13 /* RWIVF */ +#define NVC673_SYS_CAP_SOR5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR6_EXISTS 14:14 /* RWIVF */ +#define NVC673_SYS_CAP_SOR6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR7_EXISTS 15:15 /* RWIVF */ +#define NVC673_SYS_CAP_SOR7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR_EXISTS(i) (8+(i)):(8+(i)) /* RWIVF */ +#define NVC673_SYS_CAP_SOR_EXISTS__SIZE_1 8 /* */ +#define NVC673_SYS_CAP_SOR_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_DSI0_EXISTS 20:20 /* RWIVF */ +#define NVC673_SYS_CAP_DSI0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_DSI0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_DSI1_EXISTS 21:21 /* RWIVF */ +#define NVC673_SYS_CAP_DSI1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_DSI1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_DSI2_EXISTS 22:22 /* RWIVF */ +#define NVC673_SYS_CAP_DSI2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_DSI2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_DSI3_EXISTS 23:23 /* RWIVF */ +#define NVC673_SYS_CAP_DSI3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_DSI3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_DSI_EXISTS(i) (20+(i)):(20+(i)) /* RWIVF */ +#define NVC673_SYS_CAP_DSI_EXISTS__SIZE_1 4 /* */ +#define NVC673_SYS_CAP_DSI_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_DSI_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB 0x4 /* RW-4R */ +#define NVC673_SYS_CAPB_WINDOW0_EXISTS 0:0 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW1_EXISTS 1:1 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW2_EXISTS 2:2 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW3_EXISTS 3:3 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW4_EXISTS 4:4 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW5_EXISTS 5:5 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW6_EXISTS 6:6 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW7_EXISTS 7:7 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW8_EXISTS 8:8 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW8_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW8_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW9_EXISTS 9:9 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW9_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW9_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW10_EXISTS 10:10 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW10_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW10_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW11_EXISTS 11:11 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW11_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW11_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW12_EXISTS 12:12 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW12_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW12_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW13_EXISTS 13:13 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW13_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW13_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW14_EXISTS 14:14 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW14_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW14_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW15_EXISTS 15:15 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW15_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW15_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW16_EXISTS 16:16 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW16_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW16_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW17_EXISTS 17:17 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW17_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW17_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW18_EXISTS 18:18 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW18_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW18_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW19_EXISTS 19:19 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW19_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW19_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW20_EXISTS 20:20 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW20_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW20_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW21_EXISTS 21:21 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW21_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW21_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW22_EXISTS 22:22 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW22_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW22_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW23_EXISTS 23:23 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW23_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW23_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW24_EXISTS 24:24 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW24_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW24_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW25_EXISTS 25:25 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW25_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW25_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW26_EXISTS 26:26 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW26_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW26_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW27_EXISTS 27:27 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW27_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW27_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW28_EXISTS 28:28 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW28_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW28_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW29_EXISTS 29:29 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW29_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW29_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW30_EXISTS 30:30 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW30_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW30_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW31_EXISTS 31:31 /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW31_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW31_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW_EXISTS(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVC673_SYS_CAPB_WINDOW_EXISTS__SIZE_1 32 /* */ +#define NVC673_SYS_CAPB_WINDOW_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAPB_WINDOW_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA 0x10 /* RW-4R */ +#define NVC673_IHUB_COMMON_CAPA_MEMPOOL_ENTRIES 15:0 /* RWIUF */ +#define NVC673_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH 17:16 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_32B 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_64B 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_128B 0x00000002 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_256B 0x00000003 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_ROTATION 18:18 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_ROTATION_FALSE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_ROTATION_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_PLANAR 19:19 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_PLANAR_FALSE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_PLANAR_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_VGA 20:20 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_VGA_FALSE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_VGA_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION 21:21 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION_FALSE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MSCG 22:22 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MSCG_FALSE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MSCG_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH 23:23 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH_FALSE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT 26:26 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION 31:30 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_32B 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_64B 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_128B 0x00000002 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_256B 0x00000003 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC 0x18 /* RW-4R */ +#define NVC673_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE 1:0 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_32B 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_64B 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_128B 0x00000002 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_256B 0x00000003 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED 6:4 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_NONE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_TWO 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_FOUR 0x00000002 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_EIGHT 0x00000003 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_SIXTEEN 0x00000004 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_SUPPORT_SEMI_PLANAR 11:11 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPC_SUPPORT_SEMI_PLANAR_FALSE 0x00000000 /* RWI-V */ +#define NVC673_IHUB_COMMON_CAPC_SUPPORT_SEMI_PLANAR_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_SUPPORT_HOR_VER_FLIP 12:12 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPC_SUPPORT_HOR_VER_FLIP_FALSE 0x00000000 /* RWI-V */ +#define NVC673_IHUB_COMMON_CAPC_SUPPORT_HOR_VER_FLIP_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA(i) (0x680+(i)*32) /* RW-4A */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA__SIZE_1 8 /* */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_FULL_WIDTH 4:0 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_UNIT_WIDTH 9:5 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT 16:16 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT 17:17 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT 18:18 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_HCLPF_PRESENT 19:19 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_HCLPF_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_HCLPF_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT 20:20 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT 21:21 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT 22:22 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_VFILTER_PRESENT 23:23 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_VFILTER_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_VFILTER_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_RCRC_PRESENT 24:24 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_RCRC_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_RCRC_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB(i) (0x684+(i)*32) /* RW-4A */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB__SIZE_1 8 /* */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_VGA 0:0 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_VGA_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_VGA_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_LOGSZ 9:6 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_LOGNR 12:10 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT 15:15 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC(i) (0x688+(i)*32) /* RW-4A */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC__SIZE_1 8 /* */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC0_PRECISION 4:0 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC1_PRECISION 12:8 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP 13:13 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_SF_PRECISION 20:16 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_CI_PRECISION 24:21 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB 25:25 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR 28:28 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR 30:30 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPD(i) (0x68c+(i)*32) /* RW-4A */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPD__SIZE_1 8 /* */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPD_VSCLR_MAX_PIXELS_2TAP 15:0 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPD_VSCLR_MAX_PIXELS_5TAP 31:16 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE(i) (0x690+(i)*32) /* RW-4A */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE__SIZE_1 8 /* */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_RATEBUFSIZE 3:0 /* RWIUF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_LINEBUFSIZE 13:8 /* RWIUF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422 16:16 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420 17:17 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPF(i) (0x694+(i)*32) /* RW-4A */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPF__SIZE_1 8 /* */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPF_VFILTER_MAX_PIXELS 15:0 /* RWIVF */ +#define NVC673_SOR_CAP(i) (0x144+(i)*8) /* RW-4A */ +#define NVC673_SOR_CAP__SIZE_1 8 /* */ +#define NVC673_SOR_CAP_SINGLE_LVDS_18 0:0 /* RWIVF */ +#define NVC673_SOR_CAP_SINGLE_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_SINGLE_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_SINGLE_LVDS_24 1:1 /* RWIVF */ +#define NVC673_SOR_CAP_SINGLE_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_SINGLE_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DUAL_LVDS_18 2:2 /* RWIVF */ +#define NVC673_SOR_CAP_DUAL_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DUAL_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DUAL_LVDS_24 3:3 /* RWIVF */ +#define NVC673_SOR_CAP_DUAL_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DUAL_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_SINGLE_TMDS_A 8:8 /* RWIVF */ +#define NVC673_SOR_CAP_SINGLE_TMDS_A_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_SINGLE_TMDS_A_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_SINGLE_TMDS_B 9:9 /* RWIVF */ +#define NVC673_SOR_CAP_SINGLE_TMDS_B_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_SINGLE_TMDS_B_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DUAL_TMDS 11:11 /* RWIVF */ +#define NVC673_SOR_CAP_DUAL_TMDS_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DUAL_TMDS_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DISPLAY_OVER_PCIE 13:13 /* RWIVF */ +#define NVC673_SOR_CAP_DISPLAY_OVER_PCIE_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DISPLAY_OVER_PCIE_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_SDI 16:16 /* RWIVF */ +#define NVC673_SOR_CAP_SDI_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_SDI_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DP_A 24:24 /* RWIVF */ +#define NVC673_SOR_CAP_DP_A_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DP_A_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DP_B 25:25 /* RWIVF */ +#define NVC673_SOR_CAP_DP_B_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DP_B_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DP_INTERLACE 26:26 /* RWIVF */ +#define NVC673_SOR_CAP_DP_INTERLACE_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DP_INTERLACE_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DP_8_LANES 27:27 /* RWIVF */ +#define NVC673_SOR_CAP_DP_8_LANES_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DP_8_LANES_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_HDMI_FRL 28:28 /* RWIVF */ +#define NVC673_SOR_CAP_HDMI_FRL_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_HDMI_FRL_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA(i) (0x780+(i)*32) /* RW-4A */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA__SIZE_1 32 /* */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_FULL_WIDTH 4:0 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_UNIT_WIDTH 9:5 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_ALPHA_WIDTH 13:10 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT 16:16 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT 17:17 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT 18:18 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT 19:19 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT 20:20 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT 21:21 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT 22:22 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT 23:23 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT 24:24 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB(i) (0x784+(i)*32) /* RW-4A */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB__SIZE_1 32 /* */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_FMT_PRECISION 4:0 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGSZ 9:6 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGNR 12:10 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT 15:15 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC(i) (0x788+(i)*32) /* RW-4A */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC__SIZE_1 32 /* */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_PRECISION 4:0 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_LOGSZ 9:6 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_LOGNR 12:10 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT 15:15 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_PRECISION 20:16 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP 21:21 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD(i) (0x78c+(i)*32) /* RW-4A */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD__SIZE_1 32 /* */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGSZ 3:0 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGNR 6:4 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD 8:8 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT 9:9 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_SF_PRECISION 16:12 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_CI_PRECISION 20:17 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB 21:21 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA 22:22 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR 28:28 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR 30:30 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE(i) (0x790+(i)*32) /* RW-4A */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE__SIZE_1 32 /* */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_PRECISION 4:0 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_LOGSZ 9:6 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_LOGNR 12:10 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT 15:15 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_PRECISION 20:16 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP 21:21 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPF(i) (0x794+(i)*32) /* RW-4A */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPF__SIZE_1 32 /* */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_2TAP 15:0 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_5TAP 31:16 /* RWIVF */ +#define NVC673_SOR_CLK_CAP(i) (0x608+(i)*4) /* RW-4A */ +#define NVC673_SOR_CLK_CAP__SIZE_1 8 /* */ +#define NVC673_SOR_CLK_CAP_DP_MAX 7:0 /* RWIUF */ +#define NVC673_SOR_CLK_CAP_TMDS_MAX 23:16 /* RWIUF */ +#define NVC673_SOR_CLK_CAP_LVDS_MAX 31:24 /* RWIUF */ + +#ifdef __cplusplus +}; +#endif /* extern C */ +#endif //_clc673_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc67a.h b/src/common/sdk/nvidia/inc/class/clc67a.h new file mode 100644 index 0000000..4496969 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc67a.h @@ -0,0 +1,213 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc67a__h_ +#define _clc67a__h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC67A_CURSOR_IMM_CHANNEL_PIO (0x0000C67A) + +typedef volatile struct _clc67a_tag0 { + NvV32 Reserved00[0x2]; + NvV32 Free; // 0x00000008 - 0x0000000B + NvV32 Reserved01[0x7D]; + NvV32 Update; // 0x00000200 - 0x00000203 + NvV32 SetInterlockFlags; // 0x00000204 - 0x00000207 + NvV32 SetCursorHotSpotPointOut[2]; // 0x00000208 - 0x0000020F + NvV32 SetWindowInterlockFlags; // 0x00000210 - 0x00000213 + NvV32 Reserved02[0x37B]; +} NVC67ADispCursorImmControlPio; + +#define NVC67A_FREE (0x00000008) +#define NVC67A_FREE_COUNT 5:0 +#define NVC67A_UPDATE (0x00000200) +#define NVC67A_UPDATE_RELEASE_ELV 0:0 +#define NVC67A_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC67A_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC67A_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC67A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC67A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC67A_SET_INTERLOCK_FLAGS (0x00000204) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC67A_SET_CURSOR_HOT_SPOT_POINT_OUT(b) (0x00000208 + (b)*0x00000004) +#define NVC67A_SET_CURSOR_HOT_SPOT_POINT_OUT_X 15:0 +#define NVC67A_SET_CURSOR_HOT_SPOT_POINT_OUT_Y 31:16 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS (0x00000210) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc67a_h + diff --git a/src/common/sdk/nvidia/inc/class/clc67b.h b/src/common/sdk/nvidia/inc/class/clc67b.h new file mode 100644 index 0000000..5d73866 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc67b.h @@ -0,0 +1,67 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clC67b_h_ +#define _clC67b_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC67B_WINDOW_IMM_CHANNEL_DMA (0x0000C67B) + +// dma opcode instructions +#define NVC67B_DMA +#define NVC67B_DMA_OPCODE 31:29 +#define NVC67B_DMA_OPCODE_METHOD 0x00000000 +#define NVC67B_DMA_OPCODE_JUMP 0x00000001 +#define NVC67B_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC67B_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC67B_DMA_METHOD_COUNT 27:18 +#define NVC67B_DMA_METHOD_OFFSET 13:2 +#define NVC67B_DMA_DATA 31:0 +#define NVC67B_DMA_DATA_NOP 0x00000000 +#define NVC67B_DMA_JUMP_OFFSET 11:2 +#define NVC67B_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NVC67B_PUT (0x00000000) +#define NVC67B_PUT_PTR 9:0 +#define NVC67B_GET (0x00000004) +#define NVC67B_GET_PTR 9:0 +#define NVC67B_UPDATE (0x00000200) +#define NVC67B_UPDATE_RELEASE_ELV 0:0 +#define NVC67B_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC67B_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC67B_UPDATE_INTERLOCK_WITH_WINDOW 1:1 +#define NVC67B_UPDATE_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC67B_UPDATE_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC67B_SET_POINT_OUT(b) (0x00000208 + (b)*0x00000004) +#define NVC67B_SET_POINT_OUT_X 15:0 +#define NVC67B_SET_POINT_OUT_Y 31:16 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC67b_h diff --git a/src/common/sdk/nvidia/inc/class/clc67d.h b/src/common/sdk/nvidia/inc/class/clc67d.h new file mode 100644 index 0000000..6b09ded --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc67d.h @@ -0,0 +1,1337 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clC67d_h_ +#define _clC67d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC67D_CORE_CHANNEL_DMA (0x0000C67D) + +#define NV_DISP_NOTIFIER 0x00000000 +#define NV_DISP_NOTIFIER_SIZEOF 0x00000010 +#define NV_DISP_NOTIFIER__0 0x00000000 +#define NV_DISP_NOTIFIER__0_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFIER__0_FIELD 8:8 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE 9:9 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_NON_TEARING 0x00000000 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_IMMEDIATE 0x00000001 +#define NV_DISP_NOTIFIER__0_R1 15:10 +#define NV_DISP_NOTIFIER__0_R2 23:16 +#define NV_DISP_NOTIFIER__0_R3 29:24 +#define NV_DISP_NOTIFIER__0_STATUS 31:30 +#define NV_DISP_NOTIFIER__0_STATUS_NOT_BEGUN 0x00000000 +#define NV_DISP_NOTIFIER__0_STATUS_BEGUN 0x00000001 +#define NV_DISP_NOTIFIER__0_STATUS_FINISHED 0x00000002 +#define NV_DISP_NOTIFIER__1 0x00000001 +#define NV_DISP_NOTIFIER__1_R4 31:0 +#define NV_DISP_NOTIFIER__2 0x00000002 +#define NV_DISP_NOTIFIER__2_TIMESTAMP_LO 31:0 +#define NV_DISP_NOTIFIER__3 0x00000003 +#define NV_DISP_NOTIFIER__3_TIMESTAMP_HI 31:0 + + +// dma opcode instructions +#define NVC67D_DMA +#define NVC67D_DMA_OPCODE 31:29 +#define NVC67D_DMA_OPCODE_METHOD 0x00000000 +#define NVC67D_DMA_OPCODE_JUMP 0x00000001 +#define NVC67D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC67D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC67D_DMA_METHOD_COUNT 27:18 +#define NVC67D_DMA_METHOD_OFFSET 13:2 +#define NVC67D_DMA_DATA 31:0 +#define NVC67D_DMA_DATA_NOP 0x00000000 +#define NVC67D_DMA_JUMP_OFFSET 11:2 +#define NVC67D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// if cap SUPPORT_FLEXIBLE_WIN_MAPPING is FALSE, this define can be used to obtain which head a window is mapped to +#define NVC37D_WINDOW_MAPPED_TO_HEAD(w) ((w)>>1) +#define NVC37D_GET_VALID_WINDOWMASK_FOR_HEAD(h) ((1<<((h)*2)) | (1<<((h)*2+1))) + +// class methods +#define NVC67D_PUT (0x00000000) +#define NVC67D_PUT_PTR 9:0 +#define NVC67D_GET (0x00000004) +#define NVC67D_GET_PTR 9:0 +#define NVC67D_UPDATE (0x00000200) +#define NVC67D_UPDATE_SPECIAL_HANDLING 21:20 +#define NVC67D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NVC67D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NVC67D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NVC67D_UPDATE_SPECIAL_HANDLING_REASON 19:12 +#define NVC67D_UPDATE_INHIBIT_INTERRUPTS 24:24 +#define NVC67D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NVC67D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NVC67D_UPDATE_RELEASE_ELV 0:0 +#define NVC67D_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC67D_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC67D_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC67D_SET_CONTEXT_DMA_NOTIFIER (0x00000208) +#define NVC67D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NVC67D_SET_NOTIFIER_CONTROL (0x0000020C) +#define NVC67D_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVC67D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVC67D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC67D_SET_NOTIFIER_CONTROL_OFFSET 11:4 +#define NVC67D_SET_NOTIFIER_CONTROL_NOTIFY 12:12 +#define NVC67D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NVC67D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NVC67D_SET_CONTROL (0x00000210) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN(i) ((i)+0):((i)+0) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN__SIZE_1 4 +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN_DISABLE (0x00000000) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN_ENABLE (0x00000001) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN0 0:0 +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN0_DISABLE (0x00000000) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN0_ENABLE (0x00000001) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN1 1:1 +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN1_DISABLE (0x00000000) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN1_ENABLE (0x00000001) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN2 2:2 +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN2_DISABLE (0x00000000) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN2_ENABLE (0x00000001) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN3 3:3 +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN3_DISABLE (0x00000000) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN3_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS (0x00000218) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+0):((i)+0) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS (0x0000021C) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) +#define NVC67D_GET_RG_SCAN_LINE(b) (0x00000220 + (b)*0x00000004) +#define NVC67D_GET_RG_SCAN_LINE_LINE 15:0 +#define NVC67D_GET_RG_SCAN_LINE_VBLANK 16:16 +#define NVC67D_GET_RG_SCAN_LINE_VBLANK_FALSE (0x00000000) +#define NVC67D_GET_RG_SCAN_LINE_VBLANK_TRUE (0x00000001) +#define NVC67D_SET_GET_BLANKING_CTRL(b) (0x00000240 + (b)*0x00000004) +#define NVC67D_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NVC67D_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NVC67D_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NVC67D_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NVC67D_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NVC67D_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) + +#define NVC67D_SOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK 7:0 +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_HDMI_FRL (0x0000000C) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NVC67D_SOR_SET_CONTROL_DE_SYNC_POLARITY 16:16 +#define NVC67D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC67D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC67D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NVC67D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NVC67D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NVC67D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NVC67D_SOR_SET_CUSTOM_REASON(a) (0x00000304 + (a)*0x00000020) +#define NVC67D_SOR_SET_CUSTOM_REASON_CODE 31:0 +#define NVC67D_SOR_SET_SW_SPARE_A(a) (0x00000308 + (a)*0x00000020) +#define NVC67D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NVC67D_SOR_SET_SW_SPARE_B(a) (0x0000030C + (a)*0x00000020) +#define NVC67D_SOR_SET_SW_SPARE_B_CODE 31:0 + +#define NVC67D_DSI_SET_CONTROL(a) (0x00000500 + (a)*0x00000020) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK 7:0 +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVC67D_DSI_SET_CUSTOM_REASON(a) (0x00000504 + (a)*0x00000020) +#define NVC67D_DSI_SET_CUSTOM_REASON_CODE 31:0 +#define NVC67D_DSI_SET_SW_SPARE_A(a) (0x00000508 + (a)*0x00000020) +#define NVC67D_DSI_SET_SW_SPARE_A_CODE 31:0 +#define NVC67D_DSI_SET_SW_SPARE_B(a) (0x0000050C + (a)*0x00000020) +#define NVC67D_DSI_SET_SW_SPARE_B_CODE 31:0 + +#define NVC67D_WINDOW_SET_CONTROL(a) (0x00001000 + (a)*0x00000080) +#define NVC67D_WINDOW_SET_CONTROL_OWNER 3:0 +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD(i) (0x00000000 +(i)) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD__SIZE_1 8 +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD0 (0x00000000) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD1 (0x00000001) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD2 (0x00000002) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD3 (0x00000003) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD4 (0x00000004) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD5 (0x00000005) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD6 (0x00000006) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD7 (0x00000007) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_NONE (0x0000000F) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(a) (0x00001004 + (a)*0x00000080) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(a) (0x00001008 + (a)*0x00000080) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR(a) (0x0000100C + (a)*0x00000080) +#define NVC67D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC67D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS(a) (0x00001010 + (a)*0x00000080) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_MAX_PIXELS_FETCHED_PER_LINE 14:0 +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED 16:16 +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED 28:28 +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS 22:20 +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED 24:24 +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED 30:30 +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001) + +#define NVC67D_HEAD_SET_PROCAMP(a) (0x00002000 + (a)*0x00000400) +#define NVC67D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NVC67D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NVC67D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NVC67D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NVC67D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003) +#define NVC67D_HEAD_SET_PROCAMP_CHROMA_LPF 3:3 +#define NVC67D_HEAD_SET_PROCAMP_CHROMA_LPF_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_PROCAMP_CHROMA_LPF_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_PROCAMP_CHROMA_DOWN_V 4:4 +#define NVC67D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 28:28 +#define NVC67D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NVC67D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00002004 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 2:2 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 3:3 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 7:4 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000004) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000005) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000006) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000007) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000008) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_444 (0x00000009) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444NP (0x0000000A) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 24:24 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 23:12 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN 31:26 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN0 (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN1 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN2 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN3 (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN4 (0x00000004) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN5 (0x00000005) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN6 (0x00000006) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN7 (0x00000007) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN8 (0x00000008) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN9 (0x00000009) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN10 (0x0000000A) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN11 (0x0000000B) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN12 (0x0000000C) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN13 (0x0000000D) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN14 (0x0000000E) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN15 (0x0000000F) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN16 (0x00000010) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN17 (0x00000011) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN18 (0x00000012) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN19 (0x00000013) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN20 (0x00000014) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN21 (0x00000015) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN22 (0x00000016) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN23 (0x00000017) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN24 (0x00000018) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN25 (0x00000019) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN26 (0x0000001A) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN27 (0x0000001B) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN28 (0x0000001C) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN29 (0x0000001D) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN30 (0x0000001E) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN31 (0x0000001F) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_NONE (0x0000003F) +#define NVC67D_HEAD_SET_CONTROL(a) (0x00002008 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CONTROL_STRUCTURE 1:0 +#define NVC67D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE 2:2 +#define NVC67D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_NORMAL (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_YUV420PACKER 3:3 +#define NVC67D_HEAD_SET_CONTROL_YUV420PACKER_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_YUV420PACKER_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 11:10 +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 8:4 +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 15:12 +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 23:22 +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 20:16 +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN 28:24 +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000004) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000005) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000006) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000007) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000008) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000009) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000B) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000C) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000D) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000E) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000F) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x00000010) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NVC67D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NVC67D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x0000200C + (a)*0x00000400) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC67D_HEAD_SET_PIXEL_REORDER_CONTROL(a) (0x00002010 + (a)*0x00000400) +#define NVC67D_HEAD_SET_PIXEL_REORDER_CONTROL_BANK_WIDTH 13:0 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00002014 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVC67D_HEAD_SET_DITHER_CONTROL(a) (0x00002018 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NVC67D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_DITHER_CONTROL_BITS 5:4 +#define NVC67D_HEAD_SET_DITHER_CONTROL_BITS_TO_6_BITS (0x00000000) +#define NVC67D_HEAD_SET_DITHER_CONTROL_BITS_TO_8_BITS (0x00000001) +#define NVC67D_HEAD_SET_DITHER_CONTROL_BITS_TO_10_BITS (0x00000002) +#define NVC67D_HEAD_SET_DITHER_CONTROL_BITS_TO_12_BITS (0x00000003) +#define NVC67D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE 2:2 +#define NVC67D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_DITHER_CONTROL_MODE 10:8 +#define NVC67D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NVC67D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NVC67D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NVC67D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NVC67D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NVC67D_HEAD_SET_DITHER_CONTROL_MODE_ROUND (0x00000005) +#define NVC67D_HEAD_SET_DITHER_CONTROL_PHASE 13:12 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x0000201C + (a)*0x00000400) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 0:0 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING 4:4 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 9:8 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NVC67D_HEAD_SET_DISPLAY_ID(a,b) (0x00002020 + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00002028 + (a)*0x00000400) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC67D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR(a) (0x0000202C + (a)*0x00000400) +#define NVC67D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC67D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS(a) (0x00002030 + (a)*0x00000400) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR 2:0 +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_NONE (0x00000000) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W32_H32 (0x00000001) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W64_H64 (0x00000002) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W128_H128 (0x00000003) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W256_H256 (0x00000004) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED 4:4 +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_FALSE (0x00000000) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_TRUE (0x00000001) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS 14:12 +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED 8:8 +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED 16:16 +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001) +#define NVC67D_HEAD_SET_STALL_LOCK(a) (0x00002034 + (a)*0x00000400) +#define NVC67D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NVC67D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NVC67D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NVC67D_HEAD_SET_STALL_LOCK_MODE 2:2 +#define NVC67D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NVC67D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN 8:4 +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC67D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 12:12 +#define NVC67D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NVC67D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NVC67D_HEAD_SET_STALL_LOCK_TEPOLARITY 14:14 +#define NVC67D_HEAD_SET_STALL_LOCK_TEPOLARITY_POSITIVE_TRUE (0x00000000) +#define NVC67D_HEAD_SET_STALL_LOCK_TEPOLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC67D_HEAD_SET_STALL_LOCK_UNSTALL_SYNC_ADVANCE 25:16 +#define NVC67D_HEAD_SET_LOCK_CHAIN(a) (0x00002044 + (a)*0x00000400) +#define NVC67D_HEAD_SET_LOCK_CHAIN_POSITION 3:0 +#define NVC67D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x00002048 + (a)*0x00000400) +#define NVC67D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NVC67D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NVC67D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x0000204C + (a)*0x00000400) +#define NVC67D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NVC67D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NVC67D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x00002058 + (a)*0x00000400) +#define NVC67D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NVC67D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NVC67D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x0000205C + (a)*0x00000400) +#define NVC67D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NVC67D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NVC67D_HEAD_SET_RASTER_SIZE(a) (0x00002064 + (a)*0x00000400) +#define NVC67D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NVC67D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NVC67D_HEAD_SET_RASTER_SYNC_END(a) (0x00002068 + (a)*0x00000400) +#define NVC67D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NVC67D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NVC67D_HEAD_SET_RASTER_BLANK_END(a) (0x0000206C + (a)*0x00000400) +#define NVC67D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NVC67D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NVC67D_HEAD_SET_RASTER_BLANK_START(a) (0x00002070 + (a)*0x00000400) +#define NVC67D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NVC67D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NVC67D_HEAD_SET_OVERSCAN_COLOR(a) (0x00002078 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OVERSCAN_COLOR_RED_CR 9:0 +#define NVC67D_HEAD_SET_OVERSCAN_COLOR_GREEN_Y 19:10 +#define NVC67D_HEAD_SET_OVERSCAN_COLOR_BLUE_CB 29:20 +#define NVC67D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR(a) (0x0000207C + (a)*0x00000400) +#define NVC67D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_RED_CR 9:0 +#define NVC67D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_GREEN_Y 19:10 +#define NVC67D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_BLUE_CB 29:20 +#define NVC67D_HEAD_SET_HDMI_CTRL(a) (0x00002080 + (a)*0x00000400) +#define NVC67D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NVC67D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NVC67D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NVC67D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NVC67D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NVC67D_HEAD_SET_CONTEXT_DMA_CURSOR(a,b) (0x00002088 + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0 +#define NVC67D_HEAD_SET_OFFSET_CURSOR(a,b) (0x00002090 + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0 +#define NVC67D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x00002098 + (a)*0x00000400) +#define NVC67D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 0:0 +#define NVC67D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NVC67D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_CURSOR(a) (0x0000209C + (a)*0x00000400) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_FORMAT 7:0 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x000000E9) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x000000CF) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_SIZE 9:8 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 19:12 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 27:20 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(a) (0x000020A0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_K1 7:0 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT 11:8 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT 15:12 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_ZERO (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE 16:16 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_BLEND (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_XOR (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS 20:20 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00002180 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NVC67D_HEAD_SET_CRC_CONTROL(a) (0x00002184 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 5:0 +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_0 (0x00000000) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_1 (0x00000001) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_2 (0x00000002) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_3 (0x00000003) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_4 (0x00000004) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_5 (0x00000005) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_6 (0x00000006) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_7 (0x00000007) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_8 (0x00000008) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_9 (0x00000009) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_10 (0x0000000A) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_11 (0x0000000B) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_12 (0x0000000C) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_13 (0x0000000D) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_14 (0x0000000E) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_15 (0x0000000F) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_16 (0x00000010) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_17 (0x00000011) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_18 (0x00000012) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_19 (0x00000013) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_20 (0x00000014) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_21 (0x00000015) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_22 (0x00000016) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_23 (0x00000017) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_24 (0x00000018) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_25 (0x00000019) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_26 (0x0000001A) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_27 (0x0000001B) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_28 (0x0000001C) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_29 (0x0000001D) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_30 (0x0000001E) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_31 (0x0000001F) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000020) +#define NVC67D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 8:8 +#define NVC67D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NVC67D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC 19:12 +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_NONE (0x00000000) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF (0x00000030) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR__SIZE_1 8 +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR0 (0x00000050) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR1 (0x00000051) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR2 (0x00000052) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR3 (0x00000053) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR4 (0x00000054) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR5 (0x00000055) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR6 (0x00000056) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR7 (0x00000057) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC 27:20 +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_NONE (0x00000000) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SF (0x00000030) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR__SIZE_1 8 +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR0 (0x00000050) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR1 (0x00000051) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR2 (0x00000052) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR3 (0x00000053) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR4 (0x00000054) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR5 (0x00000055) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR6 (0x00000056) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR7 (0x00000057) +#define NVC67D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 9:9 +#define NVC67D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_PRESENT_CONTROL(a) (0x0000218C + (a)*0x00000400) +#define NVC67D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 0:0 +#define NVC67D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NVC67D_HEAD_SET_SW_SPARE_A(a) (0x00002194 + (a)*0x00000400) +#define NVC67D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NVC67D_HEAD_SET_SW_SPARE_B(a) (0x00002198 + (a)*0x00000400) +#define NVC67D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NVC67D_HEAD_SET_SW_SPARE_C(a) (0x0000219C + (a)*0x00000400) +#define NVC67D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NVC67D_HEAD_SET_SW_SPARE_D(a) (0x000021A0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NVC67D_HEAD_SET_DISPLAY_RATE(a) (0x000021A8 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DISPLAY_RATE_RUN_MODE 0:0 +#define NVC67D_HEAD_SET_DISPLAY_RATE_RUN_MODE_CONTINUOUS (0x00000000) +#define NVC67D_HEAD_SET_DISPLAY_RATE_RUN_MODE_ONE_SHOT (0x00000001) +#define NVC67D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_INTERVAL 25:4 +#define NVC67D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH 2:2 +#define NVC67D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_CONTEXT_DMA_RG_REL_SEMAPHORE(a,b) (0x000021AC + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SET_CONTEXT_DMA_RG_REL_SEMAPHORE_HANDLE 31:0 +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL(a,b) (0x000021CC + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_OFFSET 7:0 +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_PAYLOAD_SIZE 15:15 +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_32BIT (0x00000000) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_64BIT (0x00000001) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_REL_MODE 14:14 +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_REL_MODE_WRITE (0x00000000) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_REL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RUN_MODE 10:10 +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RUN_MODE_ONE_TIME (0x00000000) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RUN_MODE_CONTINUOUS (0x00000001) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RASTER_LINE 30:16 +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_VALUE(a,b) (0x000021EC + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_VALUE_VALUE 31:0 +#define NVC67D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE(a) (0x00002214 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE_DATA 9:0 +#define NVC67D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE_INDEX 19:12 +#define NVC67D_HEAD_SET_MIN_FRAME_IDLE(a) (0x00002218 + (a)*0x00000400) +#define NVC67D_HEAD_SET_MIN_FRAME_IDLE_LEADING_RASTER_LINES 14:0 +#define NVC67D_HEAD_SET_MIN_FRAME_IDLE_TRAILING_RASTER_LINES 30:16 +#define NVC67D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED(a) (0x00002220 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED_ALPHA 7:0 +#define NVC67D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED_RED 31:16 +#define NVC67D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE(a) (0x00002224 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE_GREEN 15:0 +#define NVC67D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE_BLUE 31:16 +#define NVC67D_HEAD_SET_CURSOR_COLOR_NORM_SCALE(a) (0x00002228 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CURSOR_COLOR_NORM_SCALE_VALUE 15:0 +#define NVC67D_HEAD_SET_XOR_BLEND_FACTOR(a) (0x0000222C + (a)*0x00000400) +#define NVC67D_HEAD_SET_XOR_BLEND_FACTOR_LOG2PEAK_LUMINANCE 3:0 +#define NVC67D_HEAD_SET_XOR_BLEND_FACTOR_S1 16:4 +#define NVC67D_HEAD_SET_XOR_BLEND_FACTOR_S2 30:18 +#define NVC67D_HEAD_SET_CLAMP_RANGE_GREEN(a) (0x00002238 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CLAMP_RANGE_GREEN_LOW 11:0 +#define NVC67D_HEAD_SET_CLAMP_RANGE_GREEN_HIGH 27:16 +#define NVC67D_HEAD_SET_CLAMP_RANGE_RED_BLUE(a) (0x0000223C + (a)*0x00000400) +#define NVC67D_HEAD_SET_CLAMP_RANGE_RED_BLUE_LOW 11:0 +#define NVC67D_HEAD_SET_CLAMP_RANGE_RED_BLUE_HIGH 27:16 +#define NVC67D_HEAD_SET_OCSC0CONTROL(a) (0x00002240 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0CONTROL_ENABLE 0:0 +#define NVC67D_HEAD_SET_OCSC0CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_OCSC0CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C00(a) (0x00002244 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C00_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C01(a) (0x00002248 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C01_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C02(a) (0x0000224C + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C02_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C03(a) (0x00002250 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C03_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C10(a) (0x00002254 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C10_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C11(a) (0x00002258 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C11_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C12(a) (0x0000225C + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C12_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C13(a) (0x00002260 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C13_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C20(a) (0x00002264 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C20_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C21(a) (0x00002268 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C21_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C22(a) (0x0000226C + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C22_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C23(a) (0x00002270 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C23_VALUE 20:0 +#define NVC67D_HEAD_SET_OLUT_CONTROL(a) (0x00002280 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OLUT_CONTROL_INTERPOLATE 0:0 +#define NVC67D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_OLUT_CONTROL_MIRROR 1:1 +#define NVC67D_HEAD_SET_OLUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_OLUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_OLUT_CONTROL_MODE 3:2 +#define NVC67D_HEAD_SET_OLUT_CONTROL_MODE_SEGMENTED (0x00000000) +#define NVC67D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT8 (0x00000001) +#define NVC67D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT10 (0x00000002) +#define NVC67D_HEAD_SET_OLUT_CONTROL_SIZE 18:8 +#define NVC67D_HEAD_SET_OLUT_FP_NORM_SCALE(a) (0x00002284 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OLUT_FP_NORM_SCALE_VALUE 31:0 +#define NVC67D_HEAD_SET_CONTEXT_DMA_OLUT(a) (0x00002288 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CONTEXT_DMA_OLUT_HANDLE 31:0 +#define NVC67D_HEAD_SET_OFFSET_OLUT(a) (0x0000228C + (a)*0x00000400) +#define NVC67D_HEAD_SET_OFFSET_OLUT_ORIGIN 31:0 +#define NVC67D_HEAD_SET_OCSC1CONTROL(a) (0x0000229C + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1CONTROL_ENABLE 0:0 +#define NVC67D_HEAD_SET_OCSC1CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_OCSC1CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C00(a) (0x000022A0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C00_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C01(a) (0x000022A4 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C01_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C02(a) (0x000022A8 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C02_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C03(a) (0x000022AC + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C03_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C10(a) (0x000022B0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C10_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C11(a) (0x000022B4 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C11_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C12(a) (0x000022B8 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C12_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C13(a) (0x000022BC + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C13_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C20(a) (0x000022C0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C20_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C21(a) (0x000022C4 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C21_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C22(a) (0x000022C8 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C22_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C23(a) (0x000022CC + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C23_VALUE 20:0 +#define NVC67D_HEAD_SET_TILE_POSITION(a) (0x000022D0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_TILE_POSITION_X 2:0 +#define NVC67D_HEAD_SET_TILE_POSITION_Y 6:4 +#define NVC67D_HEAD_SET_DSC_CONTROL(a) (0x000022D4 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_CONTROL_ENABLE 0:0 +#define NVC67D_HEAD_SET_DSC_CONTROL_ENABLE_FALSE (0x00000000) +#define NVC67D_HEAD_SET_DSC_CONTROL_ENABLE_TRUE (0x00000001) +#define NVC67D_HEAD_SET_DSC_CONTROL_MODE 2:1 +#define NVC67D_HEAD_SET_DSC_CONTROL_MODE_SINGLE (0x00000000) +#define NVC67D_HEAD_SET_DSC_CONTROL_MODE_DUAL (0x00000001) +#define NVC67D_HEAD_SET_DSC_CONTROL_MODE_QUAD (0x00000002) +#define NVC67D_HEAD_SET_DSC_CONTROL_AUTO_RESET 3:3 +#define NVC67D_HEAD_SET_DSC_CONTROL_AUTO_RESET_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_DSC_CONTROL_AUTO_RESET_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION 4:4 +#define NVC67D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET 5:5 +#define NVC67D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET_FALSE (0x00000000) +#define NVC67D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET_TRUE (0x00000001) +#define NVC67D_HEAD_SET_DSC_CONTROL_FLATNESS_DET_THRESH 15:6 +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL(a) (0x000022D8 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_ENABLE 0:0 +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_ENABLE_FALSE (0x00000000) +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_ENABLE_TRUE (0x00000001) +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_LOCATION 1:1 +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_LOCATION_VSYNC (0x00000000) +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_LOCATION_VBLANK (0x00000001) +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_SIZE 9:2 +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY 10:10 +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY_EVERY_FRAME (0x00000000) +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY_ONCE (0x00000001) +#define NVC67D_HEAD_SET_DSC_PPS_HEAD(a) (0x000022DC + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_HEAD_BYTE0 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_HEAD_BYTE1 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_HEAD_BYTE2 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_HEAD_BYTE3 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA0(a) (0x000022E0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA0_DSC_VERSION_MINOR 3:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA0_DSC_VERSION_MAJOR 7:4 +#define NVC67D_HEAD_SET_DSC_PPS_DATA0_PPS_IDENTIFIER 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA0_RESERVED 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA0_LINEBUF_DEPTH 27:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA0_BITS_PER_COMPONENT 31:28 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1(a) (0x000022E4 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_BITS_PER_PIXEL_HIGH 1:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_VBR_ENABLE 2:2 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_SIMPLE422 3:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_CONVERT_RGB 4:4 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_BLOCK_PRED_ENABLE 5:5 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_RESERVED 7:6 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_BITS_PER_PIXEL_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_PIC_HEIGHT_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_PIC_HEIGHT_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA2(a) (0x000022E8 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA2_PIC_WIDTH_HIGH 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA2_PIC_WIDTH_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA2_SLICE_HEIGHT_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA2_SLICE_HEIGHT_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA3(a) (0x000022EC + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA3_SLICE_WIDTH_HIGH 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA3_SLICE_WIDTH_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA3_CHUNK_SIZE_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA3_CHUNK_SIZE_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA4(a) (0x000022F0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA4_INITIAL_XMIT_DELAY_HIGH 1:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA4_RESERVED 7:2 +#define NVC67D_HEAD_SET_DSC_PPS_DATA4_INITIAL_XMIT_DELAY_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA4_INITIAL_DEC_DELAY_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA4_INITIAL_DEC_DELAY_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA5(a) (0x000022F4 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA5_RESERVED0 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA5_INITIAL_SCALE_VALUE 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA5_RESERVED1 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA5_SCALE_INCREMENT_INTERVAL_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA5_SCALE_INCREMENT_INTERVAL_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA6(a) (0x000022F8 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA6_SCALE_DECREMENT_INTERVAL_HIGH 3:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA6_RESERVED0 7:4 +#define NVC67D_HEAD_SET_DSC_PPS_DATA6_SCALE_DECREMENT_INTERVAL_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA6_RESERVED1 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA6_FIRST_LINE_BPG_OFFSET 28:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA6_RESERVED2 31:29 +#define NVC67D_HEAD_SET_DSC_PPS_DATA7(a) (0x000022FC + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA7_NFL_BPG_OFFSET_HIGH 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA7_NFL_BPG_OFFSET_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA7_SLICE_BPG_OFFSET_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA7_SLICE_BPG_OFFSET_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA8(a) (0x00002300 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA8_INITIAL_OFFSET_HIGH 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA8_INITIAL_OFFSET_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA8_FINAL_OFFSET_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA8_FINAL_OFFSET_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA9(a) (0x00002304 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA9_FLATNESS_MIN_QP 4:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA9_RESERVED0 7:5 +#define NVC67D_HEAD_SET_DSC_PPS_DATA9_FLATNESS_MAX_QP 12:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA9_RESERVED1 15:13 +#define NVC67D_HEAD_SET_DSC_PPS_DATA9_RC_MODEL_SIZE_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA9_RC_MODEL_SIZE_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10(a) (0x00002308 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RC_EDGE_FACTOR 3:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RESERVED0 7:4 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RC_QUANT_INCR_LIMIT0 12:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RESERVED1 15:13 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RC_QUANT_INCR_LIMIT1 20:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RESERVED2 23:21 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RC_TGT_OFFSET_LO 27:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RC_TGT_OFFSET_HI 31:28 +#define NVC67D_HEAD_SET_DSC_PPS_DATA11(a) (0x0000230C + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH0 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH1 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH2 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH3 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA12(a) (0x00002310 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH4 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH5 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH6 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH7 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA13(a) (0x00002314 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH8 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH9 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH10 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH11 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA14(a) (0x00002318 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA14_RC_BUF_THRESH12 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA14_RC_BUF_THRESH13 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MAX_QP_HIGH0 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MIN_QP0 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_BPG_OFFSET0 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MAX_QP_LOW0 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15(a) (0x0000231C + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_HIGH1 2:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MIN_QP1 7:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_BPG_OFFSET1 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_LOW1 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_HIGH2 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MIN_QP2 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_BPG_OFFSET2 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_LOW2 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16(a) (0x00002320 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_HIGH3 2:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MIN_QP3 7:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_BPG_OFFSET3 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_LOW3 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_HIGH4 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MIN_QP4 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_BPG_OFFSET4 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_LOW4 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17(a) (0x00002324 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_HIGH5 2:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MIN_QP5 7:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_BPG_OFFSET5 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_LOW5 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_HIGH6 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MIN_QP6 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_BPG_OFFSET6 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_LOW6 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18(a) (0x00002328 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_HIGH7 2:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MIN_QP7 7:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_BPG_OFFSET7 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_LOW7 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_HIGH8 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MIN_QP8 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_BPG_OFFSET8 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_LOW8 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19(a) (0x0000232C + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_HIGH9 2:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MIN_QP9 7:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_BPG_OFFSET9 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_LOW9 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_HIGH10 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MIN_QP10 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_BPG_OFFSET10 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_LOW10 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20(a) (0x00002330 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_HIGH11 2:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MIN_QP11 7:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_BPG_OFFSET11 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_LOW11 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_HIGH12 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MIN_QP12 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_BPG_OFFSET12 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_LOW12 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21(a) (0x00002334 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_HIGH13 2:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MIN_QP13 7:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_BPG_OFFSET13 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_LOW13 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_HIGH14 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MIN_QP14 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_BPG_OFFSET14 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_LOW14 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA22(a) (0x00002338 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA22_NATIVE422 0:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA22_NATIVE420 1:1 +#define NVC67D_HEAD_SET_DSC_PPS_DATA22_RESERVED0 7:2 +#define NVC67D_HEAD_SET_DSC_PPS_DATA22_SECOND_LINE_BPG_OFFSET 12:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA22_RESERVED1 15:13 +#define NVC67D_HEAD_SET_DSC_PPS_DATA22_NSL_BPG_OFFSET_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA22_NSL_BPG_OFFSETLOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA23(a) (0x0000233C + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA23_SECOND_LINE_OFFSET_ADJ_HIGH 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA23_SECOND_LINE_OFFSET_ADJ_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA23_RESERVED 31:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA24(a) (0x00002340 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA24_RESERVED 31:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA25(a) (0x00002344 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA25_RESERVED 31:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA26(a) (0x00002348 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA26_RESERVED 31:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA27(a) (0x0000234C + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA27_RESERVED 31:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA28(a) (0x00002350 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA28_RESERVED 31:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA29(a) (0x00002354 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA29_RESERVED 31:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA30(a) (0x00002358 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA30_RESERVED 31:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA31(a) (0x0000235C + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA31_RESERVED 31:0 +#define NVC67D_HEAD_SET_RG_MERGE(a) (0x00002360 + (a)*0x00000400) +#define NVC67D_HEAD_SET_RG_MERGE_MODE 1:0 +#define NVC67D_HEAD_SET_RG_MERGE_MODE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_RG_MERGE_MODE_SETUP (0x00000001) +#define NVC67D_HEAD_SET_RG_MERGE_MODE_MASTER (0x00000002) +#define NVC67D_HEAD_SET_RG_MERGE_MODE_SLAVE (0x00000003) +#define NVC67D_HEAD_SET_RASTER_HBLANK_DELAY(a) (0x00002364 + (a)*0x00000400) +#define NVC67D_HEAD_SET_RASTER_HBLANK_DELAY_BLANK_START 15:0 +#define NVC67D_HEAD_SET_RASTER_HBLANK_DELAY_BLANK_END 31:16 +#define NVC67D_HEAD_SET_HDMI_DSC_HCACTIVE(a) (0x00002368 + (a)*0x00000400) +#define NVC67D_HEAD_SET_HDMI_DSC_HCACTIVE_BYTES 15:0 +#define NVC67D_HEAD_SET_HDMI_DSC_HCACTIVE_TRI_BYTES 31:16 +#define NVC67D_HEAD_SET_HDMI_DSC_HCBLANK(a) (0x0000236C + (a)*0x00000400) +#define NVC67D_HEAD_SET_HDMI_DSC_HCBLANK_WIDTH 15:0 +#define NVC67D_HEAD_SW_RESERVED(a,b) (0x00002370 + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SW_RESERVED_VALUE 31:0 +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_VALUE_HI(a,b) (0x00002380 + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_VALUE_HI_VALUE 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC67d_h diff --git a/src/common/sdk/nvidia/inc/class/clc67e.h b/src/common/sdk/nvidia/inc/class/clc67e.h new file mode 100644 index 0000000..871e8cd --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc67e.h @@ -0,0 +1,698 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clC67e_h_ +#define _clC67e_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC67E_WINDOW_CHANNEL_DMA (0x0000C67E) + +// dma opcode instructions +#define NVC67E_DMA +#define NVC67E_DMA_OPCODE 31:29 +#define NVC67E_DMA_OPCODE_METHOD 0x00000000 +#define NVC67E_DMA_OPCODE_JUMP 0x00000001 +#define NVC67E_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC67E_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC67E_DMA_METHOD_COUNT 27:18 +#define NVC67E_DMA_METHOD_OFFSET 13:2 +#define NVC67E_DMA_DATA 31:0 +#define NVC67E_DMA_DATA_NOP 0x00000000 +#define NVC67E_DMA_JUMP_OFFSET 11:2 +#define NVC67E_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NVC67E_PUT (0x00000000) +#define NVC67E_PUT_PTR 9:0 +#define NVC67E_GET (0x00000004) +#define NVC67E_GET_PTR 9:0 +#define NVC67E_UPDATE (0x00000200) +#define NVC67E_UPDATE_RELEASE_ELV 0:0 +#define NVC67E_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC67E_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC67E_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC67E_UPDATE_INTERLOCK_WITH_WIN_IMM 12:12 +#define NVC67E_UPDATE_INTERLOCK_WITH_WIN_IMM_DISABLE (0x00000000) +#define NVC67E_UPDATE_INTERLOCK_WITH_WIN_IMM_ENABLE (0x00000001) +#define NVC67E_SET_SEMAPHORE_ACQUIRE_HI (0x00000204) +#define NVC67E_SET_SEMAPHORE_ACQUIRE_HI_VALUE 31:0 +#define NVC67E_GET_LINE (0x00000208) +#define NVC67E_GET_LINE_LINE 15:0 +#define NVC67E_SET_SEMAPHORE_CONTROL (0x0000020C) +#define NVC67E_SET_SEMAPHORE_CONTROL_OFFSET 7:0 +#define NVC67E_SET_SEMAPHORE_CONTROL_SKIP_ACQ 11:11 +#define NVC67E_SET_SEMAPHORE_CONTROL_SKIP_ACQ_FALSE (0x00000000) +#define NVC67E_SET_SEMAPHORE_CONTROL_SKIP_ACQ_TRUE (0x00000001) +#define NVC67E_SET_SEMAPHORE_CONTROL_PAYLOAD_SIZE 15:15 +#define NVC67E_SET_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_32BIT (0x00000000) +#define NVC67E_SET_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_64BIT (0x00000001) +#define NVC67E_SET_SEMAPHORE_CONTROL_ACQ_MODE 13:12 +#define NVC67E_SET_SEMAPHORE_CONTROL_ACQ_MODE_EQ (0x00000000) +#define NVC67E_SET_SEMAPHORE_CONTROL_ACQ_MODE_CGEQ (0x00000001) +#define NVC67E_SET_SEMAPHORE_CONTROL_ACQ_MODE_STRICT_GEQ (0x00000002) +#define NVC67E_SET_SEMAPHORE_CONTROL_REL_MODE 14:14 +#define NVC67E_SET_SEMAPHORE_CONTROL_REL_MODE_WRITE (0x00000000) +#define NVC67E_SET_SEMAPHORE_CONTROL_REL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC67E_SET_SEMAPHORE_ACQUIRE (0x00000210) +#define NVC67E_SET_SEMAPHORE_ACQUIRE_VALUE 31:0 +#define NVC67E_SET_SEMAPHORE_RELEASE (0x00000214) +#define NVC67E_SET_SEMAPHORE_RELEASE_VALUE 31:0 +#define NVC67E_SET_CONTEXT_DMA_SEMAPHORE (0x00000218) +#define NVC67E_SET_CONTEXT_DMA_SEMAPHORE_HANDLE 31:0 +#define NVC67E_SET_CONTEXT_DMA_NOTIFIER (0x0000021C) +#define NVC67E_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NVC67E_SET_NOTIFIER_CONTROL (0x00000220) +#define NVC67E_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVC67E_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVC67E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC67E_SET_NOTIFIER_CONTROL_OFFSET 11:4 +#define NVC67E_SET_SIZE (0x00000224) +#define NVC67E_SET_SIZE_WIDTH 15:0 +#define NVC67E_SET_SIZE_HEIGHT 31:16 +#define NVC67E_SET_STORAGE (0x00000228) +#define NVC67E_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NVC67E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NVC67E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NVC67E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC67E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC67E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC67E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC67E_SET_PARAMS (0x0000022C) +#define NVC67E_SET_PARAMS_FORMAT 7:0 +#define NVC67E_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NVC67E_SET_PARAMS_FORMAT_R4G4B4A4 (0x0000002F) +#define NVC67E_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NVC67E_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NVC67E_SET_PARAMS_FORMAT_R5G5B5A1 (0x0000002E) +#define NVC67E_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NVC67E_SET_PARAMS_FORMAT_X8R8G8B8 (0x000000E6) +#define NVC67E_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NVC67E_SET_PARAMS_FORMAT_X8B8G8R8 (0x000000F9) +#define NVC67E_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NVC67E_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NVC67E_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NVC67E_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NVC67E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NVC67E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422 (0x00000028) +#define NVC67E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422 (0x00000029) +#define NVC67E_SET_PARAMS_FORMAT_Y8___U8V8_N444 (0x00000035) +#define NVC67E_SET_PARAMS_FORMAT_Y8___U8V8_N422 (0x00000036) +#define NVC67E_SET_PARAMS_FORMAT_Y8___V8U8_N420 (0x00000038) +#define NVC67E_SET_PARAMS_FORMAT_Y8___U8___V8_N444 (0x0000003A) +#define NVC67E_SET_PARAMS_FORMAT_Y8___U8___V8_N420 (0x0000003B) +#define NVC67E_SET_PARAMS_FORMAT_Y10___U10V10_N444 (0x00000055) +#define NVC67E_SET_PARAMS_FORMAT_Y10___U10V10_N422 (0x00000056) +#define NVC67E_SET_PARAMS_FORMAT_Y10___V10U10_N420 (0x00000058) +#define NVC67E_SET_PARAMS_FORMAT_Y12___U12V12_N444 (0x00000075) +#define NVC67E_SET_PARAMS_FORMAT_Y12___U12V12_N422 (0x00000076) +#define NVC67E_SET_PARAMS_FORMAT_Y12___V12U12_N420 (0x00000078) +#define NVC67E_SET_PARAMS_CLAMP_BEFORE_BLEND 18:18 +#define NVC67E_SET_PARAMS_CLAMP_BEFORE_BLEND_DISABLE (0x00000000) +#define NVC67E_SET_PARAMS_CLAMP_BEFORE_BLEND_ENABLE (0x00000001) +#define NVC67E_SET_PARAMS_SWAP_UV 19:19 +#define NVC67E_SET_PARAMS_SWAP_UV_DISABLE (0x00000000) +#define NVC67E_SET_PARAMS_SWAP_UV_ENABLE (0x00000001) +#define NVC67E_SET_PARAMS_FMT_ROUNDING_MODE 22:22 +#define NVC67E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_TO_NEAREST (0x00000000) +#define NVC67E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_DOWN (0x00000001) +#define NVC67E_SET_PLANAR_STORAGE(b) (0x00000230 + (b)*0x00000004) +#define NVC67E_SET_PLANAR_STORAGE_PITCH 12:0 +#define NVC67E_SET_SEMAPHORE_RELEASE_HI (0x0000023C) +#define NVC67E_SET_SEMAPHORE_RELEASE_HI_VALUE 31:0 +#define NVC67E_SET_CONTEXT_DMA_ISO(b) (0x00000240 + (b)*0x00000004) +#define NVC67E_SET_CONTEXT_DMA_ISO_HANDLE 31:0 +#define NVC67E_SET_OFFSET(b) (0x00000260 + (b)*0x00000004) +#define NVC67E_SET_OFFSET_ORIGIN 31:0 +#define NVC67E_SET_POINT_IN(b) (0x00000290 + (b)*0x00000004) +#define NVC67E_SET_POINT_IN_X 15:0 +#define NVC67E_SET_POINT_IN_Y 31:16 +#define NVC67E_SET_SIZE_IN (0x00000298) +#define NVC67E_SET_SIZE_IN_WIDTH 15:0 +#define NVC67E_SET_SIZE_IN_HEIGHT 31:16 +#define NVC67E_SET_SIZE_OUT (0x000002A4) +#define NVC67E_SET_SIZE_OUT_WIDTH 15:0 +#define NVC67E_SET_SIZE_OUT_HEIGHT 31:16 +#define NVC67E_SET_CONTROL_INPUT_SCALER (0x000002A8) +#define NVC67E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVC67E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVC67E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVC67E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVC67E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVC67E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVC67E_SET_INPUT_SCALER_COEFF_VALUE (0x000002AC) +#define NVC67E_SET_INPUT_SCALER_COEFF_VALUE_DATA 9:0 +#define NVC67E_SET_INPUT_SCALER_COEFF_VALUE_INDEX 19:12 +#define NVC67E_SET_COMPOSITION_CONTROL (0x000002EC) +#define NVC67E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT 1:0 +#define NVC67E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DISABLE (0x00000000) +#define NVC67E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_SRC (0x00000001) +#define NVC67E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DST (0x00000002) +#define NVC67E_SET_COMPOSITION_CONTROL_DEPTH 11:4 +#define NVC67E_SET_COMPOSITION_CONTROL_BYPASS 16:16 +#define NVC67E_SET_COMPOSITION_CONTROL_BYPASS_DISABLE (0x00000000) +#define NVC67E_SET_COMPOSITION_CONTROL_BYPASS_ENABLE (0x00000001) +#define NVC67E_SET_COMPOSITION_CONSTANT_ALPHA (0x000002F0) +#define NVC67E_SET_COMPOSITION_CONSTANT_ALPHA_K1 7:0 +#define NVC67E_SET_COMPOSITION_CONSTANT_ALPHA_K2 15:8 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT (0x000002F4) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT 3:0 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT 7:4 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT 11:8 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT 15:12 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT 19:16 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT 23:20 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT 27:24 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT 31:28 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC67E_SET_KEY_ALPHA (0x000002F8) +#define NVC67E_SET_KEY_ALPHA_MIN 15:0 +#define NVC67E_SET_KEY_ALPHA_MAX 31:16 +#define NVC67E_SET_KEY_RED_CR (0x000002FC) +#define NVC67E_SET_KEY_RED_CR_MIN 15:0 +#define NVC67E_SET_KEY_RED_CR_MAX 31:16 +#define NVC67E_SET_KEY_GREEN_Y (0x00000300) +#define NVC67E_SET_KEY_GREEN_Y_MIN 15:0 +#define NVC67E_SET_KEY_GREEN_Y_MAX 31:16 +#define NVC67E_SET_KEY_BLUE_CB (0x00000304) +#define NVC67E_SET_KEY_BLUE_CB_MIN 15:0 +#define NVC67E_SET_KEY_BLUE_CB_MAX 31:16 +#define NVC67E_SET_PRESENT_CONTROL (0x00000308) +#define NVC67E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NVC67E_SET_PRESENT_CONTROL_BEGIN_MODE 6:4 +#define NVC67E_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000) +#define NVC67E_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001) +#define NVC67E_SET_PRESENT_CONTROL_TIMESTAMP_MODE 8:8 +#define NVC67E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000) +#define NVC67E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001) +#define NVC67E_SET_PRESENT_CONTROL_STEREO_MODE 13:12 +#define NVC67E_SET_PRESENT_CONTROL_STEREO_MODE_MONO (0x00000000) +#define NVC67E_SET_PRESENT_CONTROL_STEREO_MODE_PAIR_FLIP (0x00000001) +#define NVC67E_SET_PRESENT_CONTROL_STEREO_MODE_AT_ANY_FRAME (0x00000002) +#define NVC67E_SET_ACQ_SEMAPHORE_VALUE_HI (0x0000030C) +#define NVC67E_SET_ACQ_SEMAPHORE_VALUE_HI_VALUE 31:0 +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL (0x00000330) +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_OFFSET 7:0 +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_PAYLOAD_SIZE 15:15 +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_32BIT (0x00000000) +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_64BIT (0x00000001) +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE 13:12 +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE_EQ (0x00000000) +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE_CGEQ (0x00000001) +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE_STRICT_GEQ (0x00000002) +#define NVC67E_SET_ACQ_SEMAPHORE_VALUE (0x00000334) +#define NVC67E_SET_ACQ_SEMAPHORE_VALUE_VALUE 31:0 +#define NVC67E_SET_CONTEXT_DMA_ACQ_SEMAPHORE (0x00000338) +#define NVC67E_SET_CONTEXT_DMA_ACQ_SEMAPHORE_HANDLE 31:0 +#define NVC67E_SET_SCAN_DIRECTION (0x0000033C) +#define NVC67E_SET_SCAN_DIRECTION_HORIZONTAL_DIRECTION 0:0 +#define NVC67E_SET_SCAN_DIRECTION_HORIZONTAL_DIRECTION_FROM_LEFT (0x00000000) +#define NVC67E_SET_SCAN_DIRECTION_HORIZONTAL_DIRECTION_FROM_RIGHT (0x00000001) +#define NVC67E_SET_SCAN_DIRECTION_VERTICAL_DIRECTION 1:1 +#define NVC67E_SET_SCAN_DIRECTION_VERTICAL_DIRECTION_FROM_TOP (0x00000000) +#define NVC67E_SET_SCAN_DIRECTION_VERTICAL_DIRECTION_FROM_BOTTOM (0x00000001) +#define NVC67E_SET_SCAN_DIRECTION_COLUMN_ORDER 2:2 +#define NVC67E_SET_SCAN_DIRECTION_COLUMN_ORDER_FALSE (0x00000000) +#define NVC67E_SET_SCAN_DIRECTION_COLUMN_ORDER_TRUE (0x00000001) +#define NVC67E_SET_TIMESTAMP_ORIGIN_LO (0x00000340) +#define NVC67E_SET_TIMESTAMP_ORIGIN_LO_TIMESTAMP_LO 31:0 +#define NVC67E_SET_TIMESTAMP_ORIGIN_HI (0x00000344) +#define NVC67E_SET_TIMESTAMP_ORIGIN_HI_TIMESTAMP_HI 31:0 +#define NVC67E_SET_UPDATE_TIMESTAMP_LO (0x00000348) +#define NVC67E_SET_UPDATE_TIMESTAMP_LO_TIMESTAMP_LO 31:0 +#define NVC67E_SET_UPDATE_TIMESTAMP_HI (0x0000034C) +#define NVC67E_SET_UPDATE_TIMESTAMP_HI_TIMESTAMP_HI 31:0 +#define NVC67E_SET_INTERLOCK_FLAGS (0x00000370) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 0:0 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+1):((i)+1) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 1:1 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 2:2 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 3:3 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 4:4 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 5:5 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 6:6 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 7:7 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 8:8 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS (0x00000374) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) +#define NVC67E_SET_EXT_PACKET_CONTROL (0x00000398) +#define NVC67E_SET_EXT_PACKET_CONTROL_ENABLE 0:0 +#define NVC67E_SET_EXT_PACKET_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67E_SET_EXT_PACKET_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67E_SET_EXT_PACKET_CONTROL_LOCATION 4:4 +#define NVC67E_SET_EXT_PACKET_CONTROL_LOCATION_VSYNC (0x00000000) +#define NVC67E_SET_EXT_PACKET_CONTROL_LOCATION_VBLANK (0x00000001) +#define NVC67E_SET_EXT_PACKET_CONTROL_FREQUENCY 8:8 +#define NVC67E_SET_EXT_PACKET_CONTROL_FREQUENCY_EVERY_FRAME (0x00000000) +#define NVC67E_SET_EXT_PACKET_CONTROL_FREQUENCY_ONCE (0x00000001) +#define NVC67E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE 12:12 +#define NVC67E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE_DISABLE (0x00000000) +#define NVC67E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE_ENABLE (0x00000001) +#define NVC67E_SET_EXT_PACKET_CONTROL_SIZE 27:16 +#define NVC67E_SET_EXT_PACKET_DATA (0x0000039C) +#define NVC67E_SET_EXT_PACKET_DATA_DB0 7:0 +#define NVC67E_SET_EXT_PACKET_DATA_DB1 15:8 +#define NVC67E_SET_EXT_PACKET_DATA_DB2 23:16 +#define NVC67E_SET_EXT_PACKET_DATA_DB3 31:24 +#define NVC67E_SET_FMT_COEFFICIENT_C00 (0x00000400) +#define NVC67E_SET_FMT_COEFFICIENT_C00_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C01 (0x00000404) +#define NVC67E_SET_FMT_COEFFICIENT_C01_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C02 (0x00000408) +#define NVC67E_SET_FMT_COEFFICIENT_C02_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C03 (0x0000040C) +#define NVC67E_SET_FMT_COEFFICIENT_C03_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C10 (0x00000410) +#define NVC67E_SET_FMT_COEFFICIENT_C10_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C11 (0x00000414) +#define NVC67E_SET_FMT_COEFFICIENT_C11_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C12 (0x00000418) +#define NVC67E_SET_FMT_COEFFICIENT_C12_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C13 (0x0000041C) +#define NVC67E_SET_FMT_COEFFICIENT_C13_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C20 (0x00000420) +#define NVC67E_SET_FMT_COEFFICIENT_C20_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C21 (0x00000424) +#define NVC67E_SET_FMT_COEFFICIENT_C21_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C22 (0x00000428) +#define NVC67E_SET_FMT_COEFFICIENT_C22_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C23 (0x0000042C) +#define NVC67E_SET_FMT_COEFFICIENT_C23_VALUE 20:0 +#define NVC67E_SET_ILUT_CONTROL (0x00000440) +#define NVC67E_SET_ILUT_CONTROL_INTERPOLATE 0:0 +#define NVC67E_SET_ILUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC67E_SET_ILUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC67E_SET_ILUT_CONTROL_MIRROR 1:1 +#define NVC67E_SET_ILUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC67E_SET_ILUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC67E_SET_ILUT_CONTROL_MODE 3:2 +#define NVC67E_SET_ILUT_CONTROL_MODE_SEGMENTED (0x00000000) +#define NVC67E_SET_ILUT_CONTROL_MODE_DIRECT8 (0x00000001) +#define NVC67E_SET_ILUT_CONTROL_MODE_DIRECT10 (0x00000002) +#define NVC67E_SET_ILUT_CONTROL_SIZE 18:8 +#define NVC67E_SET_CONTEXT_DMA_ILUT (0x00000444) +#define NVC67E_SET_CONTEXT_DMA_ILUT_HANDLE 31:0 +#define NVC67E_SET_OFFSET_ILUT (0x00000448) +#define NVC67E_SET_OFFSET_ILUT_ORIGIN 31:0 +#define NVC67E_SET_CSC00CONTROL (0x0000045C) +#define NVC67E_SET_CSC00CONTROL_ENABLE 0:0 +#define NVC67E_SET_CSC00CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67E_SET_CSC00CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67E_SET_CSC00COEFFICIENT_C00 (0x00000460) +#define NVC67E_SET_CSC00COEFFICIENT_C00_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C01 (0x00000464) +#define NVC67E_SET_CSC00COEFFICIENT_C01_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C02 (0x00000468) +#define NVC67E_SET_CSC00COEFFICIENT_C02_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C03 (0x0000046C) +#define NVC67E_SET_CSC00COEFFICIENT_C03_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C10 (0x00000470) +#define NVC67E_SET_CSC00COEFFICIENT_C10_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C11 (0x00000474) +#define NVC67E_SET_CSC00COEFFICIENT_C11_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C12 (0x00000478) +#define NVC67E_SET_CSC00COEFFICIENT_C12_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C13 (0x0000047C) +#define NVC67E_SET_CSC00COEFFICIENT_C13_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C20 (0x00000480) +#define NVC67E_SET_CSC00COEFFICIENT_C20_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C21 (0x00000484) +#define NVC67E_SET_CSC00COEFFICIENT_C21_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C22 (0x00000488) +#define NVC67E_SET_CSC00COEFFICIENT_C22_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C23 (0x0000048C) +#define NVC67E_SET_CSC00COEFFICIENT_C23_VALUE 20:0 +#define NVC67E_SET_CSC0LUT_CONTROL (0x000004A0) +#define NVC67E_SET_CSC0LUT_CONTROL_INTERPOLATE 0:0 +#define NVC67E_SET_CSC0LUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC67E_SET_CSC0LUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC67E_SET_CSC0LUT_CONTROL_MIRROR 1:1 +#define NVC67E_SET_CSC0LUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC67E_SET_CSC0LUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC67E_SET_CSC0LUT_CONTROL_ENABLE 4:4 +#define NVC67E_SET_CSC0LUT_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67E_SET_CSC0LUT_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67E_SET_CSC0LUT_SEGMENT_SIZE (0x000004A4) +#define NVC67E_SET_CSC0LUT_SEGMENT_SIZE_IDX 5:0 +#define NVC67E_SET_CSC0LUT_SEGMENT_SIZE_VALUE 18:16 +#define NVC67E_SET_CSC0LUT_ENTRY (0x000004A8) +#define NVC67E_SET_CSC0LUT_ENTRY_IDX 10:0 +#define NVC67E_SET_CSC0LUT_ENTRY_VALUE 31:16 +#define NVC67E_SET_CSC01CONTROL (0x000004BC) +#define NVC67E_SET_CSC01CONTROL_ENABLE 0:0 +#define NVC67E_SET_CSC01CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67E_SET_CSC01CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67E_SET_CSC01COEFFICIENT_C00 (0x000004C0) +#define NVC67E_SET_CSC01COEFFICIENT_C00_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C01 (0x000004C4) +#define NVC67E_SET_CSC01COEFFICIENT_C01_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C02 (0x000004C8) +#define NVC67E_SET_CSC01COEFFICIENT_C02_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C03 (0x000004CC) +#define NVC67E_SET_CSC01COEFFICIENT_C03_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C10 (0x000004D0) +#define NVC67E_SET_CSC01COEFFICIENT_C10_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C11 (0x000004D4) +#define NVC67E_SET_CSC01COEFFICIENT_C11_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C12 (0x000004D8) +#define NVC67E_SET_CSC01COEFFICIENT_C12_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C13 (0x000004DC) +#define NVC67E_SET_CSC01COEFFICIENT_C13_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C20 (0x000004E0) +#define NVC67E_SET_CSC01COEFFICIENT_C20_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C21 (0x000004E4) +#define NVC67E_SET_CSC01COEFFICIENT_C21_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C22 (0x000004E8) +#define NVC67E_SET_CSC01COEFFICIENT_C22_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C23 (0x000004EC) +#define NVC67E_SET_CSC01COEFFICIENT_C23_VALUE 20:0 +#define NVC67E_SET_TMO_CONTROL (0x00000500) +#define NVC67E_SET_TMO_CONTROL_INTERPOLATE 0:0 +#define NVC67E_SET_TMO_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC67E_SET_TMO_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC67E_SET_TMO_CONTROL_SAT_MODE 3:2 +#define NVC67E_SET_TMO_CONTROL_SIZE 18:8 +#define NVC67E_SET_TMO_LOW_INTENSITY_ZONE (0x00000508) +#define NVC67E_SET_TMO_LOW_INTENSITY_ZONE_END 29:16 +#define NVC67E_SET_TMO_LOW_INTENSITY_VALUE (0x0000050C) +#define NVC67E_SET_TMO_LOW_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC67E_SET_TMO_LOW_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC67E_SET_TMO_LOW_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC67E_SET_TMO_MEDIUM_INTENSITY_ZONE (0x00000510) +#define NVC67E_SET_TMO_MEDIUM_INTENSITY_ZONE_START 13:0 +#define NVC67E_SET_TMO_MEDIUM_INTENSITY_ZONE_END 29:16 +#define NVC67E_SET_TMO_MEDIUM_INTENSITY_VALUE (0x00000514) +#define NVC67E_SET_TMO_MEDIUM_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC67E_SET_TMO_MEDIUM_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC67E_SET_TMO_MEDIUM_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC67E_SET_TMO_HIGH_INTENSITY_ZONE (0x00000518) +#define NVC67E_SET_TMO_HIGH_INTENSITY_ZONE_START 13:0 +#define NVC67E_SET_TMO_HIGH_INTENSITY_VALUE (0x0000051C) +#define NVC67E_SET_TMO_HIGH_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC67E_SET_TMO_HIGH_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC67E_SET_TMO_HIGH_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC67E_SET_CONTEXT_DMA_TMO_LUT (0x00000528) +#define NVC67E_SET_CONTEXT_DMA_TMO_LUT_HANDLE 31:0 +#define NVC67E_SET_OFFSET_TMO_LUT (0x0000052C) +#define NVC67E_SET_OFFSET_TMO_LUT_ORIGIN 31:0 +#define NVC67E_SET_CSC10CONTROL (0x0000053C) +#define NVC67E_SET_CSC10CONTROL_ENABLE 0:0 +#define NVC67E_SET_CSC10CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67E_SET_CSC10CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67E_SET_CSC10COEFFICIENT_C00 (0x00000540) +#define NVC67E_SET_CSC10COEFFICIENT_C00_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C01 (0x00000544) +#define NVC67E_SET_CSC10COEFFICIENT_C01_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C02 (0x00000548) +#define NVC67E_SET_CSC10COEFFICIENT_C02_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C03 (0x0000054C) +#define NVC67E_SET_CSC10COEFFICIENT_C03_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C10 (0x00000550) +#define NVC67E_SET_CSC10COEFFICIENT_C10_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C11 (0x00000554) +#define NVC67E_SET_CSC10COEFFICIENT_C11_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C12 (0x00000558) +#define NVC67E_SET_CSC10COEFFICIENT_C12_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C13 (0x0000055C) +#define NVC67E_SET_CSC10COEFFICIENT_C13_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C20 (0x00000560) +#define NVC67E_SET_CSC10COEFFICIENT_C20_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C21 (0x00000564) +#define NVC67E_SET_CSC10COEFFICIENT_C21_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C22 (0x00000568) +#define NVC67E_SET_CSC10COEFFICIENT_C22_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C23 (0x0000056C) +#define NVC67E_SET_CSC10COEFFICIENT_C23_VALUE 20:0 +#define NVC67E_SET_CSC1LUT_CONTROL (0x00000580) +#define NVC67E_SET_CSC1LUT_CONTROL_INTERPOLATE 0:0 +#define NVC67E_SET_CSC1LUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC67E_SET_CSC1LUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC67E_SET_CSC1LUT_CONTROL_MIRROR 1:1 +#define NVC67E_SET_CSC1LUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC67E_SET_CSC1LUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC67E_SET_CSC1LUT_CONTROL_ENABLE 4:4 +#define NVC67E_SET_CSC1LUT_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67E_SET_CSC1LUT_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67E_SET_CSC1LUT_SEGMENT_SIZE (0x00000584) +#define NVC67E_SET_CSC1LUT_SEGMENT_SIZE_IDX 5:0 +#define NVC67E_SET_CSC1LUT_SEGMENT_SIZE_VALUE 18:16 +#define NVC67E_SET_CSC1LUT_ENTRY (0x00000588) +#define NVC67E_SET_CSC1LUT_ENTRY_IDX 10:0 +#define NVC67E_SET_CSC1LUT_ENTRY_VALUE 31:16 +#define NVC67E_SET_CSC11CONTROL (0x0000059C) +#define NVC67E_SET_CSC11CONTROL_ENABLE 0:0 +#define NVC67E_SET_CSC11CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67E_SET_CSC11CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67E_SET_CSC11COEFFICIENT_C00 (0x000005A0) +#define NVC67E_SET_CSC11COEFFICIENT_C00_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C01 (0x000005A4) +#define NVC67E_SET_CSC11COEFFICIENT_C01_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C02 (0x000005A8) +#define NVC67E_SET_CSC11COEFFICIENT_C02_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C03 (0x000005AC) +#define NVC67E_SET_CSC11COEFFICIENT_C03_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C10 (0x000005B0) +#define NVC67E_SET_CSC11COEFFICIENT_C10_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C11 (0x000005B4) +#define NVC67E_SET_CSC11COEFFICIENT_C11_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C12 (0x000005B8) +#define NVC67E_SET_CSC11COEFFICIENT_C12_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C13 (0x000005BC) +#define NVC67E_SET_CSC11COEFFICIENT_C13_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C20 (0x000005C0) +#define NVC67E_SET_CSC11COEFFICIENT_C20_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C21 (0x000005C4) +#define NVC67E_SET_CSC11COEFFICIENT_C21_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C22 (0x000005C8) +#define NVC67E_SET_CSC11COEFFICIENT_C22_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C23 (0x000005CC) +#define NVC67E_SET_CSC11COEFFICIENT_C23_VALUE 20:0 +#define NVC67E_SET_CLAMP_RANGE (0x000005D0) +#define NVC67E_SET_CLAMP_RANGE_LOW 15:0 +#define NVC67E_SET_CLAMP_RANGE_HIGH 31:16 +#define NVC67E_SW_RESERVED(b) (0x000005D4 + (b)*0x00000004) +#define NVC67E_SW_RESERVED_VALUE 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC67e_h diff --git a/src/common/sdk/nvidia/inc/class/clc697.h b/src/common/sdk/nvidia/inc/class/clc697.h new file mode 100644 index 0000000..a8b447e --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc697.h @@ -0,0 +1,4352 @@ +/******************************************************************************* + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef _cl_ampere_a_h_ +#define _cl_ampere_a_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../../../class/bin/sw_header.pl ampere_a */ + +#include "nvtypes.h" + +#define AMPERE_A 0xC697 + +#define NVC697_SET_OBJECT 0x0000 +#define NVC697_SET_OBJECT_CLASS_ID 15:0 +#define NVC697_SET_OBJECT_ENGINE_ID 20:16 + +#define NVC697_NO_OPERATION 0x0100 +#define NVC697_NO_OPERATION_V 31:0 + +#define NVC697_SET_NOTIFY_A 0x0104 +#define NVC697_SET_NOTIFY_A_ADDRESS_UPPER 7:0 + +#define NVC697_SET_NOTIFY_B 0x0108 +#define NVC697_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NVC697_NOTIFY 0x010c +#define NVC697_NOTIFY_TYPE 31:0 +#define NVC697_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NVC697_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NVC697_WAIT_FOR_IDLE 0x0110 +#define NVC697_WAIT_FOR_IDLE_V 31:0 + +#define NVC697_LOAD_MME_INSTRUCTION_RAM_POINTER 0x0114 +#define NVC697_LOAD_MME_INSTRUCTION_RAM_POINTER_V 31:0 + +#define NVC697_LOAD_MME_INSTRUCTION_RAM 0x0118 +#define NVC697_LOAD_MME_INSTRUCTION_RAM_V 31:0 + +#define NVC697_LOAD_MME_START_ADDRESS_RAM_POINTER 0x011c +#define NVC697_LOAD_MME_START_ADDRESS_RAM_POINTER_V 31:0 + +#define NVC697_LOAD_MME_START_ADDRESS_RAM 0x0120 +#define NVC697_LOAD_MME_START_ADDRESS_RAM_V 31:0 + +#define NVC697_SET_MME_SHADOW_RAM_CONTROL 0x0124 +#define NVC697_SET_MME_SHADOW_RAM_CONTROL_MODE 1:0 +#define NVC697_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK 0x00000000 +#define NVC697_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK_WITH_FILTER 0x00000001 +#define NVC697_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_PASSTHROUGH 0x00000002 +#define NVC697_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_REPLAY 0x00000003 + +#define NVC697_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER 0x0128 +#define NVC697_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER_V 7:0 + +#define NVC697_PEER_SEMAPHORE_RELEASE_OFFSET 0x012c +#define NVC697_PEER_SEMAPHORE_RELEASE_OFFSET_V 31:0 + +#define NVC697_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NVC697_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC697_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NVC697_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC697_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NVC697_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NVC697_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC697_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC697_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC697_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC697_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC697_SEND_GO_IDLE 0x013c +#define NVC697_SEND_GO_IDLE_V 31:0 + +#define NVC697_PM_TRIGGER 0x0140 +#define NVC697_PM_TRIGGER_V 31:0 + +#define NVC697_PM_TRIGGER_WFI 0x0144 +#define NVC697_PM_TRIGGER_WFI_V 31:0 + +#define NVC697_FE_ATOMIC_SEQUENCE_BEGIN 0x0148 +#define NVC697_FE_ATOMIC_SEQUENCE_BEGIN_V 31:0 + +#define NVC697_FE_ATOMIC_SEQUENCE_END 0x014c +#define NVC697_FE_ATOMIC_SEQUENCE_END_V 31:0 + +#define NVC697_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NVC697_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NVC697_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NVC697_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NVC697_LINE_LENGTH_IN 0x0180 +#define NVC697_LINE_LENGTH_IN_VALUE 31:0 + +#define NVC697_LINE_COUNT 0x0184 +#define NVC697_LINE_COUNT_VALUE 31:0 + +#define NVC697_OFFSET_OUT_UPPER 0x0188 +#define NVC697_OFFSET_OUT_UPPER_VALUE 7:0 + +#define NVC697_OFFSET_OUT 0x018c +#define NVC697_OFFSET_OUT_VALUE 31:0 + +#define NVC697_PITCH_OUT 0x0190 +#define NVC697_PITCH_OUT_VALUE 31:0 + +#define NVC697_SET_DST_BLOCK_SIZE 0x0194 +#define NVC697_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVC697_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC697_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVC697_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC697_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC697_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC697_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC697_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC697_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC697_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVC697_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NVC697_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NVC697_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NVC697_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NVC697_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVC697_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NVC697_SET_DST_WIDTH 0x0198 +#define NVC697_SET_DST_WIDTH_V 31:0 + +#define NVC697_SET_DST_HEIGHT 0x019c +#define NVC697_SET_DST_HEIGHT_V 31:0 + +#define NVC697_SET_DST_DEPTH 0x01a0 +#define NVC697_SET_DST_DEPTH_V 31:0 + +#define NVC697_SET_DST_LAYER 0x01a4 +#define NVC697_SET_DST_LAYER_V 31:0 + +#define NVC697_SET_DST_ORIGIN_BYTES_X 0x01a8 +#define NVC697_SET_DST_ORIGIN_BYTES_X_V 20:0 + +#define NVC697_SET_DST_ORIGIN_SAMPLES_Y 0x01ac +#define NVC697_SET_DST_ORIGIN_SAMPLES_Y_V 16:0 + +#define NVC697_LAUNCH_DMA 0x01b0 +#define NVC697_LAUNCH_DMA_DST_MEMORY_LAYOUT 0:0 +#define NVC697_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVC697_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVC697_LAUNCH_DMA_COMPLETION_TYPE 5:4 +#define NVC697_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_DISABLE 0x00000000 +#define NVC697_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_ONLY 0x00000001 +#define NVC697_LAUNCH_DMA_COMPLETION_TYPE_RELEASE_SEMAPHORE 0x00000002 +#define NVC697_LAUNCH_DMA_INTERRUPT_TYPE 9:8 +#define NVC697_LAUNCH_DMA_INTERRUPT_TYPE_NONE 0x00000000 +#define NVC697_LAUNCH_DMA_INTERRUPT_TYPE_INTERRUPT 0x00000001 +#define NVC697_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE 12:12 +#define NVC697_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_FOUR_WORDS 0x00000000 +#define NVC697_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_ONE_WORD 0x00000001 +#define NVC697_LAUNCH_DMA_REDUCTION_ENABLE 1:1 +#define NVC697_LAUNCH_DMA_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC697_LAUNCH_DMA_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC697_LAUNCH_DMA_REDUCTION_OP 15:13 +#define NVC697_LAUNCH_DMA_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC697_LAUNCH_DMA_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC697_LAUNCH_DMA_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC697_LAUNCH_DMA_REDUCTION_OP_RED_INC 0x00000003 +#define NVC697_LAUNCH_DMA_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC697_LAUNCH_DMA_REDUCTION_OP_RED_AND 0x00000005 +#define NVC697_LAUNCH_DMA_REDUCTION_OP_RED_OR 0x00000006 +#define NVC697_LAUNCH_DMA_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC697_LAUNCH_DMA_REDUCTION_FORMAT 3:2 +#define NVC697_LAUNCH_DMA_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC697_LAUNCH_DMA_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVC697_LAUNCH_DMA_SYSMEMBAR_DISABLE 6:6 +#define NVC697_LAUNCH_DMA_SYSMEMBAR_DISABLE_FALSE 0x00000000 +#define NVC697_LAUNCH_DMA_SYSMEMBAR_DISABLE_TRUE 0x00000001 + +#define NVC697_LOAD_INLINE_DATA 0x01b4 +#define NVC697_LOAD_INLINE_DATA_V 31:0 + +#define NVC697_SET_I2M_SEMAPHORE_A 0x01dc +#define NVC697_SET_I2M_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC697_SET_I2M_SEMAPHORE_B 0x01e0 +#define NVC697_SET_I2M_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC697_SET_I2M_SEMAPHORE_C 0x01e4 +#define NVC697_SET_I2M_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC697_SET_I2M_SPARE_NOOP00 0x01f0 +#define NVC697_SET_I2M_SPARE_NOOP00_V 31:0 + +#define NVC697_SET_I2M_SPARE_NOOP01 0x01f4 +#define NVC697_SET_I2M_SPARE_NOOP01_V 31:0 + +#define NVC697_SET_I2M_SPARE_NOOP02 0x01f8 +#define NVC697_SET_I2M_SPARE_NOOP02_V 31:0 + +#define NVC697_SET_I2M_SPARE_NOOP03 0x01fc +#define NVC697_SET_I2M_SPARE_NOOP03_V 31:0 + +#define NVC697_RUN_DS_NOW 0x0200 +#define NVC697_RUN_DS_NOW_V 31:0 + +#define NVC697_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS 0x0204 +#define NVC697_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD 4:0 +#define NVC697_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_INSTANTANEOUS 0x00000000 +#define NVC697_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16 0x00000001 +#define NVC697_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32 0x00000002 +#define NVC697_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__64 0x00000003 +#define NVC697_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__128 0x00000004 +#define NVC697_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__256 0x00000005 +#define NVC697_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__512 0x00000006 +#define NVC697_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1024 0x00000007 +#define NVC697_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2048 0x00000008 +#define NVC697_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4096 0x00000009 +#define NVC697_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__8192 0x0000000A +#define NVC697_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16384 0x0000000B +#define NVC697_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32768 0x0000000C +#define NVC697_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__65536 0x0000000D +#define NVC697_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__131072 0x0000000E +#define NVC697_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__262144 0x0000000F +#define NVC697_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__524288 0x00000010 +#define NVC697_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1048576 0x00000011 +#define NVC697_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2097152 0x00000012 +#define NVC697_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4194304 0x00000013 +#define NVC697_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_LATEZ_ALWAYS 0x0000001F + +#define NVC697_SET_GS_MODE 0x0208 +#define NVC697_SET_GS_MODE_TYPE 0:0 +#define NVC697_SET_GS_MODE_TYPE_ANY 0x00000000 +#define NVC697_SET_GS_MODE_TYPE_FAST_GS 0x00000001 + +#define NVC697_SET_ALIASED_LINE_WIDTH_ENABLE 0x020c +#define NVC697_SET_ALIASED_LINE_WIDTH_ENABLE_V 0:0 +#define NVC697_SET_ALIASED_LINE_WIDTH_ENABLE_V_FALSE 0x00000000 +#define NVC697_SET_ALIASED_LINE_WIDTH_ENABLE_V_TRUE 0x00000001 + +#define NVC697_SET_API_MANDATED_EARLY_Z 0x0210 +#define NVC697_SET_API_MANDATED_EARLY_Z_ENABLE 0:0 +#define NVC697_SET_API_MANDATED_EARLY_Z_ENABLE_FALSE 0x00000000 +#define NVC697_SET_API_MANDATED_EARLY_Z_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_GS_DM_FIFO 0x0214 +#define NVC697_SET_GS_DM_FIFO_SIZE_RASTER_ON 12:0 +#define NVC697_SET_GS_DM_FIFO_SIZE_RASTER_OFF 28:16 +#define NVC697_SET_GS_DM_FIFO_SPILL_ENABLED 31:31 +#define NVC697_SET_GS_DM_FIFO_SPILL_ENABLED_FALSE 0x00000000 +#define NVC697_SET_GS_DM_FIFO_SPILL_ENABLED_TRUE 0x00000001 + +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS 0x0218 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY 5:4 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC697_INVALIDATE_SHADER_CACHES 0x021c +#define NVC697_INVALIDATE_SHADER_CACHES_INSTRUCTION 0:0 +#define NVC697_INVALIDATE_SHADER_CACHES_INSTRUCTION_FALSE 0x00000000 +#define NVC697_INVALIDATE_SHADER_CACHES_INSTRUCTION_TRUE 0x00000001 +#define NVC697_INVALIDATE_SHADER_CACHES_DATA 4:4 +#define NVC697_INVALIDATE_SHADER_CACHES_DATA_FALSE 0x00000000 +#define NVC697_INVALIDATE_SHADER_CACHES_DATA_TRUE 0x00000001 +#define NVC697_INVALIDATE_SHADER_CACHES_CONSTANT 12:12 +#define NVC697_INVALIDATE_SHADER_CACHES_CONSTANT_FALSE 0x00000000 +#define NVC697_INVALIDATE_SHADER_CACHES_CONSTANT_TRUE 0x00000001 +#define NVC697_INVALIDATE_SHADER_CACHES_LOCKS 1:1 +#define NVC697_INVALIDATE_SHADER_CACHES_LOCKS_FALSE 0x00000000 +#define NVC697_INVALIDATE_SHADER_CACHES_LOCKS_TRUE 0x00000001 +#define NVC697_INVALIDATE_SHADER_CACHES_FLUSH_DATA 2:2 +#define NVC697_INVALIDATE_SHADER_CACHES_FLUSH_DATA_FALSE 0x00000000 +#define NVC697_INVALIDATE_SHADER_CACHES_FLUSH_DATA_TRUE 0x00000001 + +#define NVC697_SET_INSTANCE_COUNT 0x0220 +#define NVC697_SET_INSTANCE_COUNT_V 31:0 + +#define NVC697_SET_POSITION_W_SCALED_OFFSET_ENABLE 0x0224 +#define NVC697_SET_POSITION_W_SCALED_OFFSET_ENABLE_ENABLE 0:0 +#define NVC697_SET_POSITION_W_SCALED_OFFSET_ENABLE_ENABLE_FALSE 0x00000000 +#define NVC697_SET_POSITION_W_SCALED_OFFSET_ENABLE_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_GO_IDLE_TIMEOUT 0x022c +#define NVC697_SET_GO_IDLE_TIMEOUT_V 31:0 + +#define NVC697_SET_MME_VERSION 0x0234 +#define NVC697_SET_MME_VERSION_MAJOR 7:0 + +#define NVC697_SET_INDEX_BUFFER_SIZE_A 0x0238 +#define NVC697_SET_INDEX_BUFFER_SIZE_A_UPPER 7:0 + +#define NVC697_SET_INDEX_BUFFER_SIZE_B 0x023c +#define NVC697_SET_INDEX_BUFFER_SIZE_B_LOWER 31:0 + +#define NVC697_SET_ROOT_TABLE_VISIBILITY(i) (0x0240+(i)*4) +#define NVC697_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP0_ENABLE 1:0 +#define NVC697_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP1_ENABLE 5:4 +#define NVC697_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP2_ENABLE 9:8 +#define NVC697_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP3_ENABLE 13:12 +#define NVC697_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP4_ENABLE 17:16 + +#define NVC697_SET_DRAW_CONTROL_A 0x0260 +#define NVC697_SET_DRAW_CONTROL_A_TOPOLOGY 3:0 +#define NVC697_SET_DRAW_CONTROL_A_TOPOLOGY_POINTS 0x00000000 +#define NVC697_SET_DRAW_CONTROL_A_TOPOLOGY_LINES 0x00000001 +#define NVC697_SET_DRAW_CONTROL_A_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC697_SET_DRAW_CONTROL_A_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC697_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC697_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC697_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC697_SET_DRAW_CONTROL_A_TOPOLOGY_QUADS 0x00000007 +#define NVC697_SET_DRAW_CONTROL_A_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC697_SET_DRAW_CONTROL_A_TOPOLOGY_POLYGON 0x00000009 +#define NVC697_SET_DRAW_CONTROL_A_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC697_SET_DRAW_CONTROL_A_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC697_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC697_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC697_SET_DRAW_CONTROL_A_TOPOLOGY_PATCH 0x0000000E +#define NVC697_SET_DRAW_CONTROL_A_PRIMITIVE_ID 4:4 +#define NVC697_SET_DRAW_CONTROL_A_PRIMITIVE_ID_FIRST 0x00000000 +#define NVC697_SET_DRAW_CONTROL_A_PRIMITIVE_ID_UNCHANGED 0x00000001 +#define NVC697_SET_DRAW_CONTROL_A_INSTANCE_ID 6:5 +#define NVC697_SET_DRAW_CONTROL_A_INSTANCE_ID_FIRST 0x00000000 +#define NVC697_SET_DRAW_CONTROL_A_INSTANCE_ID_SUBSEQUENT 0x00000001 +#define NVC697_SET_DRAW_CONTROL_A_INSTANCE_ID_UNCHANGED 0x00000002 +#define NVC697_SET_DRAW_CONTROL_A_SPLIT_MODE 8:7 +#define NVC697_SET_DRAW_CONTROL_A_SPLIT_MODE_NORMAL_BEGIN_NORMAL_END 0x00000000 +#define NVC697_SET_DRAW_CONTROL_A_SPLIT_MODE_NORMAL_BEGIN_OPEN_END 0x00000001 +#define NVC697_SET_DRAW_CONTROL_A_SPLIT_MODE_OPEN_BEGIN_OPEN_END 0x00000002 +#define NVC697_SET_DRAW_CONTROL_A_SPLIT_MODE_OPEN_BEGIN_NORMAL_END 0x00000003 +#define NVC697_SET_DRAW_CONTROL_A_INSTANCE_ITERATE_ENABLE 9:9 +#define NVC697_SET_DRAW_CONTROL_A_INSTANCE_ITERATE_ENABLE_FALSE 0x00000000 +#define NVC697_SET_DRAW_CONTROL_A_INSTANCE_ITERATE_ENABLE_TRUE 0x00000001 +#define NVC697_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_VERTEX_INDEX 10:10 +#define NVC697_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_VERTEX_INDEX_FALSE 0x00000000 +#define NVC697_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_VERTEX_INDEX_TRUE 0x00000001 +#define NVC697_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_INSTANCE_INDEX 11:11 +#define NVC697_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_INSTANCE_INDEX_FALSE 0x00000000 +#define NVC697_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_INSTANCE_INDEX_TRUE 0x00000001 + +#define NVC697_SET_DRAW_CONTROL_B 0x0264 +#define NVC697_SET_DRAW_CONTROL_B_INSTANCE_COUNT 31:0 + +#define NVC697_DRAW_INDEX_BUFFER_BEGIN_END_A 0x0268 +#define NVC697_DRAW_INDEX_BUFFER_BEGIN_END_A_FIRST 31:0 + +#define NVC697_DRAW_INDEX_BUFFER_BEGIN_END_B 0x026c +#define NVC697_DRAW_INDEX_BUFFER_BEGIN_END_B_COUNT 31:0 + +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_A 0x0270 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_A_START 31:0 + +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_B 0x0274 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_B_COUNT 31:0 + +#define NVC697_INVALIDATE_RASTER_CACHE_NO_WFI 0x027c +#define NVC697_INVALIDATE_RASTER_CACHE_NO_WFI_V 0:0 + +#define NVC697_SET_COLOR_RENDER_TO_ZETA_SURFACE 0x02b8 +#define NVC697_SET_COLOR_RENDER_TO_ZETA_SURFACE_V 0:0 +#define NVC697_SET_COLOR_RENDER_TO_ZETA_SURFACE_V_FALSE 0x00000000 +#define NVC697_SET_COLOR_RENDER_TO_ZETA_SURFACE_V_TRUE 0x00000001 + +#define NVC697_SET_ZCULL_VISIBLE_PRIM_OPTIMIZATION 0x02bc +#define NVC697_SET_ZCULL_VISIBLE_PRIM_OPTIMIZATION_V 0:0 +#define NVC697_SET_ZCULL_VISIBLE_PRIM_OPTIMIZATION_V_FALSE 0x00000000 +#define NVC697_SET_ZCULL_VISIBLE_PRIM_OPTIMIZATION_V_TRUE 0x00000001 + +#define NVC697_INCREMENT_SYNC_POINT 0x02c8 +#define NVC697_INCREMENT_SYNC_POINT_INDEX 11:0 +#define NVC697_INCREMENT_SYNC_POINT_CLEAN_L2 16:16 +#define NVC697_INCREMENT_SYNC_POINT_CLEAN_L2_FALSE 0x00000000 +#define NVC697_INCREMENT_SYNC_POINT_CLEAN_L2_TRUE 0x00000001 +#define NVC697_INCREMENT_SYNC_POINT_CONDITION 20:20 +#define NVC697_INCREMENT_SYNC_POINT_CONDITION_STREAM_OUT_WRITES_DONE 0x00000000 +#define NVC697_INCREMENT_SYNC_POINT_CONDITION_ROP_WRITES_DONE 0x00000001 + +#define NVC697_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE 0x02d4 +#define NVC697_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE_V 0:0 + +#define NVC697_SET_SURFACE_CLIP_ID_BLOCK_SIZE 0x02d8 +#define NVC697_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH 3:0 +#define NVC697_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC697_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT 7:4 +#define NVC697_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC697_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC697_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC697_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC697_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC697_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC697_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH 11:8 +#define NVC697_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NVC697_SET_ALPHA_CIRCULAR_BUFFER_SIZE 0x02dc +#define NVC697_SET_ALPHA_CIRCULAR_BUFFER_SIZE_CACHE_LINES_PER_SM 13:0 + +#define NVC697_DECOMPRESS_SURFACE 0x02e0 +#define NVC697_DECOMPRESS_SURFACE_MRT_SELECT 2:0 +#define NVC697_DECOMPRESS_SURFACE_RT_ARRAY_INDEX 19:4 + +#define NVC697_SET_ZCULL_ROP_BYPASS 0x02e4 +#define NVC697_SET_ZCULL_ROP_BYPASS_ENABLE 0:0 +#define NVC697_SET_ZCULL_ROP_BYPASS_ENABLE_FALSE 0x00000000 +#define NVC697_SET_ZCULL_ROP_BYPASS_ENABLE_TRUE 0x00000001 +#define NVC697_SET_ZCULL_ROP_BYPASS_NO_STALL 4:4 +#define NVC697_SET_ZCULL_ROP_BYPASS_NO_STALL_FALSE 0x00000000 +#define NVC697_SET_ZCULL_ROP_BYPASS_NO_STALL_TRUE 0x00000001 +#define NVC697_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING 8:8 +#define NVC697_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING_FALSE 0x00000000 +#define NVC697_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING_TRUE 0x00000001 +#define NVC697_SET_ZCULL_ROP_BYPASS_THRESHOLD 15:12 + +#define NVC697_SET_ZCULL_SUBREGION 0x02e8 +#define NVC697_SET_ZCULL_SUBREGION_ENABLE 0:0 +#define NVC697_SET_ZCULL_SUBREGION_ENABLE_FALSE 0x00000000 +#define NVC697_SET_ZCULL_SUBREGION_ENABLE_TRUE 0x00000001 +#define NVC697_SET_ZCULL_SUBREGION_NORMALIZED_ALIQUOTS 27:4 + +#define NVC697_SET_RASTER_BOUNDING_BOX 0x02ec +#define NVC697_SET_RASTER_BOUNDING_BOX_MODE 0:0 +#define NVC697_SET_RASTER_BOUNDING_BOX_MODE_BOUNDING_BOX 0x00000000 +#define NVC697_SET_RASTER_BOUNDING_BOX_MODE_FULL_VIEWPORT 0x00000001 +#define NVC697_SET_RASTER_BOUNDING_BOX_PAD 11:4 + +#define NVC697_PEER_SEMAPHORE_RELEASE 0x02f0 +#define NVC697_PEER_SEMAPHORE_RELEASE_V 31:0 + +#define NVC697_SET_ITERATED_BLEND_OPTIMIZATION 0x02f4 +#define NVC697_SET_ITERATED_BLEND_OPTIMIZATION_NOOP 1:0 +#define NVC697_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_NEVER 0x00000000 +#define NVC697_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_RGBA_0000 0x00000001 +#define NVC697_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_ALPHA_0 0x00000002 +#define NVC697_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_RGBA_0001 0x00000003 + +#define NVC697_SET_ZCULL_SUBREGION_ALLOCATION 0x02f8 +#define NVC697_SET_ZCULL_SUBREGION_ALLOCATION_SUBREGION_ID 7:0 +#define NVC697_SET_ZCULL_SUBREGION_ALLOCATION_ALIQUOTS 23:8 +#define NVC697_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT 27:24 +#define NVC697_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16X2_4X4 0x00000000 +#define NVC697_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X16_4X4 0x00000001 +#define NVC697_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_4X2 0x00000002 +#define NVC697_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_2X4 0x00000003 +#define NVC697_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X8_4X4 0x00000004 +#define NVC697_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_8X8_4X2 0x00000005 +#define NVC697_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_8X8_2X4 0x00000006 +#define NVC697_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_4X8 0x00000007 +#define NVC697_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_4X8_2X2 0x00000008 +#define NVC697_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X8_4X2 0x00000009 +#define NVC697_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X8_2X4 0x0000000A +#define NVC697_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_8X8_2X2 0x0000000B +#define NVC697_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_4X8_1X1 0x0000000C +#define NVC697_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_NONE 0x0000000F + +#define NVC697_ASSIGN_ZCULL_SUBREGIONS 0x02fc +#define NVC697_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM 1:0 +#define NVC697_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM_Static 0x00000000 +#define NVC697_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM_Adaptive 0x00000001 + +#define NVC697_SET_PS_OUTPUT_SAMPLE_MASK_USAGE 0x0300 +#define NVC697_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE 0:0 +#define NVC697_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_FALSE 0x00000000 +#define NVC697_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_TRUE 0x00000001 +#define NVC697_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE 1:1 +#define NVC697_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NVC697_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 + +#define NVC697_DRAW_ZERO_INDEX 0x0304 +#define NVC697_DRAW_ZERO_INDEX_COUNT 31:0 + +#define NVC697_SET_L1_CONFIGURATION 0x0308 +#define NVC697_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY 2:0 +#define NVC697_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_16KB 0x00000001 +#define NVC697_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_48KB 0x00000003 + +#define NVC697_SET_RENDER_ENABLE_CONTROL 0x030c +#define NVC697_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER 0:0 +#define NVC697_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_FALSE 0x00000000 +#define NVC697_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_TRUE 0x00000001 + +#define NVC697_SET_SPA_VERSION 0x0310 +#define NVC697_SET_SPA_VERSION_MINOR 7:0 +#define NVC697_SET_SPA_VERSION_MAJOR 15:8 + +#define NVC697_SET_TIMESLICE_BATCH_LIMIT 0x0314 +#define NVC697_SET_TIMESLICE_BATCH_LIMIT_BATCH_LIMIT 15:0 + +#define NVC697_SET_SNAP_GRID_LINE 0x0318 +#define NVC697_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NVC697_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NVC697_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NVC697_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NVC697_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NVC697_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NVC697_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NVC697_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NVC697_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NVC697_SET_SNAP_GRID_LINE_ROUNDING_MODE 8:8 +#define NVC697_SET_SNAP_GRID_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NVC697_SET_SNAP_GRID_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NVC697_SET_SNAP_GRID_NON_LINE 0x031c +#define NVC697_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NVC697_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NVC697_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NVC697_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NVC697_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NVC697_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NVC697_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NVC697_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NVC697_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NVC697_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE 8:8 +#define NVC697_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NVC697_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NVC697_SET_TESSELLATION_PARAMETERS 0x0320 +#define NVC697_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE 1:0 +#define NVC697_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_ISOLINE 0x00000000 +#define NVC697_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_TRIANGLE 0x00000001 +#define NVC697_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_QUAD 0x00000002 +#define NVC697_SET_TESSELLATION_PARAMETERS_SPACING 5:4 +#define NVC697_SET_TESSELLATION_PARAMETERS_SPACING_INTEGER 0x00000000 +#define NVC697_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_ODD 0x00000001 +#define NVC697_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_EVEN 0x00000002 +#define NVC697_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES 9:8 +#define NVC697_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_POINTS 0x00000000 +#define NVC697_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_LINES 0x00000001 +#define NVC697_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CW 0x00000002 +#define NVC697_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CCW 0x00000003 + +#define NVC697_SET_TESSELLATION_LOD_U0_OR_DENSITY 0x0324 +#define NVC697_SET_TESSELLATION_LOD_U0_OR_DENSITY_V 31:0 + +#define NVC697_SET_TESSELLATION_LOD_V0_OR_DETAIL 0x0328 +#define NVC697_SET_TESSELLATION_LOD_V0_OR_DETAIL_V 31:0 + +#define NVC697_SET_TESSELLATION_LOD_U1_OR_W0 0x032c +#define NVC697_SET_TESSELLATION_LOD_U1_OR_W0_V 31:0 + +#define NVC697_SET_TESSELLATION_LOD_V1 0x0330 +#define NVC697_SET_TESSELLATION_LOD_V1_V 31:0 + +#define NVC697_SET_TG_LOD_INTERIOR_U 0x0334 +#define NVC697_SET_TG_LOD_INTERIOR_U_V 31:0 + +#define NVC697_SET_TG_LOD_INTERIOR_V 0x0338 +#define NVC697_SET_TG_LOD_INTERIOR_V_V 31:0 + +#define NVC697_RESERVED_TG07 0x033c +#define NVC697_RESERVED_TG07_V 0:0 + +#define NVC697_RESERVED_TG08 0x0340 +#define NVC697_RESERVED_TG08_V 0:0 + +#define NVC697_RESERVED_TG09 0x0344 +#define NVC697_RESERVED_TG09_V 0:0 + +#define NVC697_RESERVED_TG10 0x0348 +#define NVC697_RESERVED_TG10_V 0:0 + +#define NVC697_RESERVED_TG11 0x034c +#define NVC697_RESERVED_TG11_V 0:0 + +#define NVC697_RESERVED_TG12 0x0350 +#define NVC697_RESERVED_TG12_V 0:0 + +#define NVC697_RESERVED_TG13 0x0354 +#define NVC697_RESERVED_TG13_V 0:0 + +#define NVC697_RESERVED_TG14 0x0358 +#define NVC697_RESERVED_TG14_V 0:0 + +#define NVC697_RESERVED_TG15 0x035c +#define NVC697_RESERVED_TG15_V 0:0 + +#define NVC697_SET_SUBTILING_PERF_KNOB_A 0x0360 +#define NVC697_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_REGISTER_FILE_PER_SUBTILE 7:0 +#define NVC697_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_PIXEL_OUTPUT_BUFFER_PER_SUBTILE 15:8 +#define NVC697_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_TRIANGLE_RAM_PER_SUBTILE 23:16 +#define NVC697_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_MAX_QUADS_PER_SUBTILE 31:24 + +#define NVC697_SET_SUBTILING_PERF_KNOB_B 0x0364 +#define NVC697_SET_SUBTILING_PERF_KNOB_B_FRACTION_OF_MAX_PRIMITIVES_PER_SUBTILE 7:0 + +#define NVC697_SET_SUBTILING_PERF_KNOB_C 0x0368 +#define NVC697_SET_SUBTILING_PERF_KNOB_C_RESERVED 0:0 + +#define NVC697_SET_ZCULL_SUBREGION_TO_REPORT 0x036c +#define NVC697_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE 0:0 +#define NVC697_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE_FALSE 0x00000000 +#define NVC697_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE_TRUE 0x00000001 +#define NVC697_SET_ZCULL_SUBREGION_TO_REPORT_SUBREGION_ID 11:4 + +#define NVC697_SET_ZCULL_SUBREGION_REPORT_TYPE 0x0370 +#define NVC697_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE 0:0 +#define NVC697_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE_FALSE 0x00000000 +#define NVC697_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE_TRUE 0x00000001 +#define NVC697_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE 6:4 +#define NVC697_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST 0x00000000 +#define NVC697_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST_NO_ACCEPT 0x00000001 +#define NVC697_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST_LATE_Z 0x00000002 +#define NVC697_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_STENCIL_TEST 0x00000003 + +#define NVC697_SET_BALANCED_PRIMITIVE_WORKLOAD 0x0374 +#define NVC697_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE 0:0 +#define NVC697_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE_FALSE 0x00000000 +#define NVC697_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE_TRUE 0x00000001 +#define NVC697_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE 4:4 +#define NVC697_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE_FALSE 0x00000000 +#define NVC697_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE_TRUE 0x00000001 +#define NVC697_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_UNPARTITIONED_MODE 8:8 +#define NVC697_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_UNPARTITIONED_MODE_FALSE 0x00000000 +#define NVC697_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_UNPARTITIONED_MODE_TRUE 0x00000001 +#define NVC697_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_TIMESLICED_MODE 9:9 +#define NVC697_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_TIMESLICED_MODE_FALSE 0x00000000 +#define NVC697_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_TIMESLICED_MODE_TRUE 0x00000001 + +#define NVC697_SET_MAX_PATCHES_PER_BATCH 0x0378 +#define NVC697_SET_MAX_PATCHES_PER_BATCH_V 5:0 + +#define NVC697_SET_RASTER_ENABLE 0x037c +#define NVC697_SET_RASTER_ENABLE_V 0:0 +#define NVC697_SET_RASTER_ENABLE_V_FALSE 0x00000000 +#define NVC697_SET_RASTER_ENABLE_V_TRUE 0x00000001 + +#define NVC697_SET_STREAM_OUT_BUFFER_ENABLE(j) (0x0380+(j)*32) +#define NVC697_SET_STREAM_OUT_BUFFER_ENABLE_V 0:0 +#define NVC697_SET_STREAM_OUT_BUFFER_ENABLE_V_FALSE 0x00000000 +#define NVC697_SET_STREAM_OUT_BUFFER_ENABLE_V_TRUE 0x00000001 + +#define NVC697_SET_STREAM_OUT_BUFFER_ADDRESS_A(j) (0x0384+(j)*32) +#define NVC697_SET_STREAM_OUT_BUFFER_ADDRESS_A_UPPER 7:0 + +#define NVC697_SET_STREAM_OUT_BUFFER_ADDRESS_B(j) (0x0388+(j)*32) +#define NVC697_SET_STREAM_OUT_BUFFER_ADDRESS_B_LOWER 31:0 + +#define NVC697_SET_STREAM_OUT_BUFFER_SIZE(j) (0x038c+(j)*32) +#define NVC697_SET_STREAM_OUT_BUFFER_SIZE_BYTES 31:0 + +#define NVC697_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER(j) (0x0390+(j)*32) +#define NVC697_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER_START_OFFSET 31:0 + +#define NVC697_SET_POSITION_W_SCALED_OFFSET_SCALE_A(j) (0x0400+(j)*16) +#define NVC697_SET_POSITION_W_SCALED_OFFSET_SCALE_A_V 31:0 + +#define NVC697_SET_POSITION_W_SCALED_OFFSET_SCALE_B(j) (0x0404+(j)*16) +#define NVC697_SET_POSITION_W_SCALED_OFFSET_SCALE_B_V 31:0 + +#define NVC697_SET_POSITION_W_SCALED_OFFSET_RESERVED_A(j) (0x0408+(j)*16) +#define NVC697_SET_POSITION_W_SCALED_OFFSET_RESERVED_A_V 31:0 + +#define NVC697_SET_POSITION_W_SCALED_OFFSET_RESERVED_B(j) (0x040c+(j)*16) +#define NVC697_SET_POSITION_W_SCALED_OFFSET_RESERVED_B_V 31:0 + +#define NVC697_SET_ROOT_TABLE_SELECTOR 0x0504 +#define NVC697_SET_ROOT_TABLE_SELECTOR_ROOT_TABLE 2:0 +#define NVC697_SET_ROOT_TABLE_SELECTOR_OFFSET 15:8 + +#define NVC697_LOAD_ROOT_TABLE 0x0508 +#define NVC697_LOAD_ROOT_TABLE_V 31:0 + +#define NVC697_SET_MME_MEM_ADDRESS_A 0x0550 +#define NVC697_SET_MME_MEM_ADDRESS_A_UPPER 7:0 + +#define NVC697_SET_MME_MEM_ADDRESS_B 0x0554 +#define NVC697_SET_MME_MEM_ADDRESS_B_LOWER 31:0 + +#define NVC697_SET_MME_DATA_RAM_ADDRESS 0x0558 +#define NVC697_SET_MME_DATA_RAM_ADDRESS_WORD 31:0 + +#define NVC697_MME_DMA_READ 0x055c +#define NVC697_MME_DMA_READ_LENGTH 31:0 + +#define NVC697_MME_DMA_READ_FIFOED 0x0560 +#define NVC697_MME_DMA_READ_FIFOED_LENGTH 31:0 + +#define NVC697_MME_DMA_WRITE 0x0564 +#define NVC697_MME_DMA_WRITE_LENGTH 31:0 + +#define NVC697_MME_DMA_REDUCTION 0x0568 +#define NVC697_MME_DMA_REDUCTION_REDUCTION_OP 2:0 +#define NVC697_MME_DMA_REDUCTION_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC697_MME_DMA_REDUCTION_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC697_MME_DMA_REDUCTION_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC697_MME_DMA_REDUCTION_REDUCTION_OP_RED_INC 0x00000003 +#define NVC697_MME_DMA_REDUCTION_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC697_MME_DMA_REDUCTION_REDUCTION_OP_RED_AND 0x00000005 +#define NVC697_MME_DMA_REDUCTION_REDUCTION_OP_RED_OR 0x00000006 +#define NVC697_MME_DMA_REDUCTION_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC697_MME_DMA_REDUCTION_REDUCTION_FORMAT 5:4 +#define NVC697_MME_DMA_REDUCTION_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC697_MME_DMA_REDUCTION_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVC697_MME_DMA_REDUCTION_REDUCTION_SIZE 8:8 +#define NVC697_MME_DMA_REDUCTION_REDUCTION_SIZE_FOUR_BYTES 0x00000000 +#define NVC697_MME_DMA_REDUCTION_REDUCTION_SIZE_EIGHT_BYTES 0x00000001 + +#define NVC697_MME_DMA_SYSMEMBAR 0x056c +#define NVC697_MME_DMA_SYSMEMBAR_V 0:0 + +#define NVC697_MME_DMA_SYNC 0x0570 +#define NVC697_MME_DMA_SYNC_VALUE 31:0 + +#define NVC697_SET_MME_DATA_FIFO_CONFIG 0x0574 +#define NVC697_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE 2:0 +#define NVC697_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_0KB 0x00000000 +#define NVC697_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_4KB 0x00000001 +#define NVC697_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_8KB 0x00000002 +#define NVC697_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_12KB 0x00000003 +#define NVC697_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_16KB 0x00000004 + +#define NVC697_SET_VERTEX_STREAM_SIZE_A(j) (0x0600+(j)*8) +#define NVC697_SET_VERTEX_STREAM_SIZE_A_UPPER 7:0 + +#define NVC697_SET_VERTEX_STREAM_SIZE_B(j) (0x0604+(j)*8) +#define NVC697_SET_VERTEX_STREAM_SIZE_B_LOWER 31:0 + +#define NVC697_SET_STREAM_OUT_CONTROL_STREAM(j) (0x0700+(j)*16) +#define NVC697_SET_STREAM_OUT_CONTROL_STREAM_SELECT 1:0 + +#define NVC697_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT(j) (0x0704+(j)*16) +#define NVC697_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT_MAX 7:0 + +#define NVC697_SET_STREAM_OUT_CONTROL_STRIDE(j) (0x0708+(j)*16) +#define NVC697_SET_STREAM_OUT_CONTROL_STRIDE_BYTES 31:0 + +#define NVC697_SET_RASTER_INPUT 0x0740 +#define NVC697_SET_RASTER_INPUT_STREAM_SELECT 1:0 + +#define NVC697_SET_STREAM_OUTPUT 0x0744 +#define NVC697_SET_STREAM_OUTPUT_ENABLE 0:0 +#define NVC697_SET_STREAM_OUTPUT_ENABLE_FALSE 0x00000000 +#define NVC697_SET_STREAM_OUTPUT_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE 0x0748 +#define NVC697_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE 0:0 +#define NVC697_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_FALSE 0x00000000 +#define NVC697_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_HYBRID_ANTI_ALIAS_CONTROL 0x0754 +#define NVC697_SET_HYBRID_ANTI_ALIAS_CONTROL_PASSES 3:0 +#define NVC697_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID 4:4 +#define NVC697_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_FRAGMENT 0x00000000 +#define NVC697_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_PASS 0x00000001 +#define NVC697_SET_HYBRID_ANTI_ALIAS_CONTROL_PASSES_EXTENDED 5:5 + +#define NVC697_SET_SHADER_LOCAL_MEMORY_WINDOW 0x077c +#define NVC697_SET_SHADER_LOCAL_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NVC697_SET_SHADER_LOCAL_MEMORY_A 0x0790 +#define NVC697_SET_SHADER_LOCAL_MEMORY_A_ADDRESS_UPPER 7:0 + +#define NVC697_SET_SHADER_LOCAL_MEMORY_B 0x0794 +#define NVC697_SET_SHADER_LOCAL_MEMORY_B_ADDRESS_LOWER 31:0 + +#define NVC697_SET_SHADER_LOCAL_MEMORY_C 0x0798 +#define NVC697_SET_SHADER_LOCAL_MEMORY_C_SIZE_UPPER 5:0 + +#define NVC697_SET_SHADER_LOCAL_MEMORY_D 0x079c +#define NVC697_SET_SHADER_LOCAL_MEMORY_D_SIZE_LOWER 31:0 + +#define NVC697_SET_SHADER_LOCAL_MEMORY_E 0x07a0 +#define NVC697_SET_SHADER_LOCAL_MEMORY_E_DEFAULT_SIZE_PER_WARP 25:0 + +#define NVC697_SET_COLOR_ZERO_BANDWIDTH_CLEAR 0x07a4 +#define NVC697_SET_COLOR_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVC697_SET_Z_ZERO_BANDWIDTH_CLEAR 0x07a8 +#define NVC697_SET_Z_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVC697_SET_STENCIL_ZERO_BANDWIDTH_CLEAR 0x07b0 +#define NVC697_SET_STENCIL_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVC697_SET_ZCULL_REGION_SIZE_A 0x07c0 +#define NVC697_SET_ZCULL_REGION_SIZE_A_WIDTH 15:0 + +#define NVC697_SET_ZCULL_REGION_SIZE_B 0x07c4 +#define NVC697_SET_ZCULL_REGION_SIZE_B_HEIGHT 15:0 + +#define NVC697_SET_ZCULL_REGION_SIZE_C 0x07c8 +#define NVC697_SET_ZCULL_REGION_SIZE_C_DEPTH 15:0 + +#define NVC697_SET_ZCULL_REGION_PIXEL_OFFSET_C 0x07cc +#define NVC697_SET_ZCULL_REGION_PIXEL_OFFSET_C_DEPTH 15:0 + +#define NVC697_SET_CULL_BEFORE_FETCH 0x07dc +#define NVC697_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE 0:0 +#define NVC697_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_FALSE 0x00000000 +#define NVC697_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_TRUE 0x00000001 + +#define NVC697_SET_ZCULL_REGION_LOCATION 0x07e0 +#define NVC697_SET_ZCULL_REGION_LOCATION_START_ALIQUOT 15:0 +#define NVC697_SET_ZCULL_REGION_LOCATION_ALIQUOT_COUNT 31:16 + +#define NVC697_SET_ZCULL_REGION_ALIQUOTS 0x07e4 +#define NVC697_SET_ZCULL_REGION_ALIQUOTS_PER_LAYER 15:0 + +#define NVC697_SET_ZCULL_STORAGE_A 0x07e8 +#define NVC697_SET_ZCULL_STORAGE_A_ADDRESS_UPPER 7:0 + +#define NVC697_SET_ZCULL_STORAGE_B 0x07ec +#define NVC697_SET_ZCULL_STORAGE_B_ADDRESS_LOWER 31:0 + +#define NVC697_SET_ZCULL_STORAGE_C 0x07f0 +#define NVC697_SET_ZCULL_STORAGE_C_LIMIT_ADDRESS_UPPER 7:0 + +#define NVC697_SET_ZCULL_STORAGE_D 0x07f4 +#define NVC697_SET_ZCULL_STORAGE_D_LIMIT_ADDRESS_LOWER 31:0 + +#define NVC697_SET_ZT_READ_ONLY 0x07f8 +#define NVC697_SET_ZT_READ_ONLY_ENABLE_Z 0:0 +#define NVC697_SET_ZT_READ_ONLY_ENABLE_Z_FALSE 0x00000000 +#define NVC697_SET_ZT_READ_ONLY_ENABLE_Z_TRUE 0x00000001 +#define NVC697_SET_ZT_READ_ONLY_ENABLE_STENCIL 4:4 +#define NVC697_SET_ZT_READ_ONLY_ENABLE_STENCIL_FALSE 0x00000000 +#define NVC697_SET_ZT_READ_ONLY_ENABLE_STENCIL_TRUE 0x00000001 + +#define NVC697_SET_COLOR_TARGET_A(j) (0x0800+(j)*64) +#define NVC697_SET_COLOR_TARGET_A_OFFSET_UPPER 7:0 + +#define NVC697_SET_COLOR_TARGET_B(j) (0x0804+(j)*64) +#define NVC697_SET_COLOR_TARGET_B_OFFSET_LOWER 31:0 + +#define NVC697_SET_COLOR_TARGET_WIDTH(j) (0x0808+(j)*64) +#define NVC697_SET_COLOR_TARGET_WIDTH_V 27:0 + +#define NVC697_SET_COLOR_TARGET_HEIGHT(j) (0x080c+(j)*64) +#define NVC697_SET_COLOR_TARGET_HEIGHT_V 16:0 + +#define NVC697_SET_COLOR_TARGET_FORMAT(j) (0x0810+(j)*64) +#define NVC697_SET_COLOR_TARGET_FORMAT_V 7:0 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_DISABLED 0x00000000 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_AF32 0x000000C0 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_AS32 0x000000C1 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_AU32 0x000000C2 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_X32 0x000000C3 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_X32 0x000000C4 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_X32 0x000000C5 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_R16_G16_B16_A16 0x000000C6 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RN16_GN16_BN16_AN16 0x000000C7 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RS16_GS16_BS16_AS16 0x000000C8 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RU16_GU16_BU16_AU16 0x000000C9 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_AF16 0x000000CA +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RF32_GF32 0x000000CB +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RS32_GS32 0x000000CC +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RU32_GU32 0x000000CD +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_X16 0x000000CE +#define NVC697_SET_COLOR_TARGET_FORMAT_V_A8R8G8B8 0x000000CF +#define NVC697_SET_COLOR_TARGET_FORMAT_V_A8RL8GL8BL8 0x000000D0 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_A2B10G10R10 0x000000D1 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_AU2BU10GU10RU10 0x000000D2 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_A8B8G8R8 0x000000D5 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_A8BL8GL8RL8 0x000000D6 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_AN8BN8GN8RN8 0x000000D7 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_AS8BS8GS8RS8 0x000000D8 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_AU8BU8GU8RU8 0x000000D9 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_R16_G16 0x000000DA +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RN16_GN16 0x000000DB +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RS16_GS16 0x000000DC +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RU16_GU16 0x000000DD +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RF16_GF16 0x000000DE +#define NVC697_SET_COLOR_TARGET_FORMAT_V_A2R10G10B10 0x000000DF +#define NVC697_SET_COLOR_TARGET_FORMAT_V_BF10GF11RF11 0x000000E0 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RS32 0x000000E3 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RU32 0x000000E4 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RF32 0x000000E5 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_X8R8G8B8 0x000000E6 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_X8RL8GL8BL8 0x000000E7 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_R5G6B5 0x000000E8 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_A1R5G5B5 0x000000E9 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_G8R8 0x000000EA +#define NVC697_SET_COLOR_TARGET_FORMAT_V_GN8RN8 0x000000EB +#define NVC697_SET_COLOR_TARGET_FORMAT_V_GS8RS8 0x000000EC +#define NVC697_SET_COLOR_TARGET_FORMAT_V_GU8RU8 0x000000ED +#define NVC697_SET_COLOR_TARGET_FORMAT_V_R16 0x000000EE +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RN16 0x000000EF +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RS16 0x000000F0 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RU16 0x000000F1 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RF16 0x000000F2 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_R8 0x000000F3 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RN8 0x000000F4 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RS8 0x000000F5 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RU8 0x000000F6 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_A8 0x000000F7 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_X1R5G5B5 0x000000F8 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_X8B8G8R8 0x000000F9 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_X8BL8GL8RL8 0x000000FA +#define NVC697_SET_COLOR_TARGET_FORMAT_V_Z1R5G5B5 0x000000FB +#define NVC697_SET_COLOR_TARGET_FORMAT_V_O1R5G5B5 0x000000FC +#define NVC697_SET_COLOR_TARGET_FORMAT_V_Z8R8G8B8 0x000000FD +#define NVC697_SET_COLOR_TARGET_FORMAT_V_O8R8G8B8 0x000000FE +#define NVC697_SET_COLOR_TARGET_FORMAT_V_R32 0x000000FF +#define NVC697_SET_COLOR_TARGET_FORMAT_V_A16 0x00000040 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_AF16 0x00000041 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_AF32 0x00000042 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_A8R8 0x00000043 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_R16_A16 0x00000044 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RF16_AF16 0x00000045 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_RF32_AF32 0x00000046 +#define NVC697_SET_COLOR_TARGET_FORMAT_V_B8G8R8A8 0x00000047 + +#define NVC697_SET_COLOR_TARGET_MEMORY(j) (0x0814+(j)*64) +#define NVC697_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH 3:0 +#define NVC697_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH_ONE_GOB 0x00000000 +#define NVC697_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT 7:4 +#define NVC697_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_ONE_GOB 0x00000000 +#define NVC697_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_TWO_GOBS 0x00000001 +#define NVC697_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC697_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC697_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC697_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC697_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH 11:8 +#define NVC697_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_ONE_GOB 0x00000000 +#define NVC697_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_TWO_GOBS 0x00000001 +#define NVC697_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_FOUR_GOBS 0x00000002 +#define NVC697_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_EIGHT_GOBS 0x00000003 +#define NVC697_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVC697_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_THIRTYTWO_GOBS 0x00000005 +#define NVC697_SET_COLOR_TARGET_MEMORY_LAYOUT 12:12 +#define NVC697_SET_COLOR_TARGET_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVC697_SET_COLOR_TARGET_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVC697_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL 16:16 +#define NVC697_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NVC697_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_DEPTH_SIZE 0x00000001 + +#define NVC697_SET_COLOR_TARGET_THIRD_DIMENSION(j) (0x0818+(j)*64) +#define NVC697_SET_COLOR_TARGET_THIRD_DIMENSION_V 27:0 + +#define NVC697_SET_COLOR_TARGET_ARRAY_PITCH(j) (0x081c+(j)*64) +#define NVC697_SET_COLOR_TARGET_ARRAY_PITCH_V 31:0 + +#define NVC697_SET_COLOR_TARGET_LAYER(j) (0x0820+(j)*64) +#define NVC697_SET_COLOR_TARGET_LAYER_OFFSET 15:0 + +#define NVC697_SET_COLOR_TARGET_RESERVED_A(j) (0x0824+(j)*64) +#define NVC697_SET_COLOR_TARGET_RESERVED_A_V 0:0 + +#define NVC697_SET_VIEWPORT_SCALE_X(j) (0x0a00+(j)*32) +#define NVC697_SET_VIEWPORT_SCALE_X_V 31:0 + +#define NVC697_SET_VIEWPORT_SCALE_Y(j) (0x0a04+(j)*32) +#define NVC697_SET_VIEWPORT_SCALE_Y_V 31:0 + +#define NVC697_SET_VIEWPORT_SCALE_Z(j) (0x0a08+(j)*32) +#define NVC697_SET_VIEWPORT_SCALE_Z_V 31:0 + +#define NVC697_SET_VIEWPORT_OFFSET_X(j) (0x0a0c+(j)*32) +#define NVC697_SET_VIEWPORT_OFFSET_X_V 31:0 + +#define NVC697_SET_VIEWPORT_OFFSET_Y(j) (0x0a10+(j)*32) +#define NVC697_SET_VIEWPORT_OFFSET_Y_V 31:0 + +#define NVC697_SET_VIEWPORT_OFFSET_Z(j) (0x0a14+(j)*32) +#define NVC697_SET_VIEWPORT_OFFSET_Z_V 31:0 + +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE(j) (0x0a18+(j)*32) +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_X 2:0 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_X 0x00000000 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_X 0x00000001 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_Y 0x00000002 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_Y 0x00000003 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_Z 0x00000004 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_Z 0x00000005 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_W 0x00000006 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_W 0x00000007 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_Y 6:4 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_X 0x00000000 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_X 0x00000001 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_Y 0x00000002 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_Y 0x00000003 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_Z 0x00000004 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_Z 0x00000005 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_W 0x00000006 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_W 0x00000007 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_Z 10:8 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_X 0x00000000 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_X 0x00000001 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_Y 0x00000002 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_Y 0x00000003 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_Z 0x00000004 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_Z 0x00000005 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_W 0x00000006 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_W 0x00000007 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_W 14:12 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_X 0x00000000 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_X 0x00000001 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_Y 0x00000002 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_Y 0x00000003 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_Z 0x00000004 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_Z 0x00000005 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_W 0x00000006 +#define NVC697_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_W 0x00000007 + +#define NVC697_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION(j) (0x0a1c+(j)*32) +#define NVC697_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION_X_BITS 4:0 +#define NVC697_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION_Y_BITS 12:8 + +#define NVC697_SET_VIEWPORT_CLIP_HORIZONTAL(j) (0x0c00+(j)*16) +#define NVC697_SET_VIEWPORT_CLIP_HORIZONTAL_X0 15:0 +#define NVC697_SET_VIEWPORT_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NVC697_SET_VIEWPORT_CLIP_VERTICAL(j) (0x0c04+(j)*16) +#define NVC697_SET_VIEWPORT_CLIP_VERTICAL_Y0 15:0 +#define NVC697_SET_VIEWPORT_CLIP_VERTICAL_HEIGHT 31:16 + +#define NVC697_SET_VIEWPORT_CLIP_MIN_Z(j) (0x0c08+(j)*16) +#define NVC697_SET_VIEWPORT_CLIP_MIN_Z_V 31:0 + +#define NVC697_SET_VIEWPORT_CLIP_MAX_Z(j) (0x0c0c+(j)*16) +#define NVC697_SET_VIEWPORT_CLIP_MAX_Z_V 31:0 + +#define NVC697_SET_WINDOW_CLIP_HORIZONTAL(j) (0x0d00+(j)*8) +#define NVC697_SET_WINDOW_CLIP_HORIZONTAL_XMIN 15:0 +#define NVC697_SET_WINDOW_CLIP_HORIZONTAL_XMAX 31:16 + +#define NVC697_SET_WINDOW_CLIP_VERTICAL(j) (0x0d04+(j)*8) +#define NVC697_SET_WINDOW_CLIP_VERTICAL_YMIN 15:0 +#define NVC697_SET_WINDOW_CLIP_VERTICAL_YMAX 31:16 + +#define NVC697_SET_CLIP_ID_EXTENT_X(j) (0x0d40+(j)*8) +#define NVC697_SET_CLIP_ID_EXTENT_X_MINX 15:0 +#define NVC697_SET_CLIP_ID_EXTENT_X_WIDTH 31:16 + +#define NVC697_SET_CLIP_ID_EXTENT_Y(j) (0x0d44+(j)*8) +#define NVC697_SET_CLIP_ID_EXTENT_Y_MINY 15:0 +#define NVC697_SET_CLIP_ID_EXTENT_Y_HEIGHT 31:16 + +#define NVC697_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK 0x0d60 +#define NVC697_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK_V 10:0 + +#define NVC697_SET_API_VISIBLE_CALL_LIMIT 0x0d64 +#define NVC697_SET_API_VISIBLE_CALL_LIMIT_V 3:0 +#define NVC697_SET_API_VISIBLE_CALL_LIMIT_V__0 0x00000000 +#define NVC697_SET_API_VISIBLE_CALL_LIMIT_V__1 0x00000001 +#define NVC697_SET_API_VISIBLE_CALL_LIMIT_V__2 0x00000002 +#define NVC697_SET_API_VISIBLE_CALL_LIMIT_V__4 0x00000003 +#define NVC697_SET_API_VISIBLE_CALL_LIMIT_V__8 0x00000004 +#define NVC697_SET_API_VISIBLE_CALL_LIMIT_V__16 0x00000005 +#define NVC697_SET_API_VISIBLE_CALL_LIMIT_V__32 0x00000006 +#define NVC697_SET_API_VISIBLE_CALL_LIMIT_V__64 0x00000007 +#define NVC697_SET_API_VISIBLE_CALL_LIMIT_V__128 0x00000008 +#define NVC697_SET_API_VISIBLE_CALL_LIMIT_V_NO_CHECK 0x0000000F + +#define NVC697_SET_STATISTICS_COUNTER 0x0d68 +#define NVC697_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE 0:0 +#define NVC697_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC697_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC697_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE 1:1 +#define NVC697_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC697_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC697_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE 2:2 +#define NVC697_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC697_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC697_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE 3:3 +#define NVC697_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC697_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC697_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE 4:4 +#define NVC697_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC697_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC697_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE 5:5 +#define NVC697_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NVC697_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NVC697_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE 6:6 +#define NVC697_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_FALSE 0x00000000 +#define NVC697_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_TRUE 0x00000001 +#define NVC697_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE 7:7 +#define NVC697_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC697_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC697_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE 8:8 +#define NVC697_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC697_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC697_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE 9:9 +#define NVC697_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC697_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC697_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE 11:11 +#define NVC697_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC697_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC697_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE 12:12 +#define NVC697_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC697_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC697_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE 13:13 +#define NVC697_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC697_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC697_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE 14:14 +#define NVC697_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NVC697_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NVC697_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE 10:10 +#define NVC697_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_FALSE 0x00000000 +#define NVC697_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_TRUE 0x00000001 +#define NVC697_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE 15:15 +#define NVC697_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_FALSE 0x00000000 +#define NVC697_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_TRUE 0x00000001 +#define NVC697_SET_STATISTICS_COUNTER_SCG_CLOCKS_ENABLE 16:16 +#define NVC697_SET_STATISTICS_COUNTER_SCG_CLOCKS_ENABLE_FALSE 0x00000000 +#define NVC697_SET_STATISTICS_COUNTER_SCG_CLOCKS_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_CLEAR_RECT_HORIZONTAL 0x0d6c +#define NVC697_SET_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NVC697_SET_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NVC697_SET_CLEAR_RECT_VERTICAL 0x0d70 +#define NVC697_SET_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NVC697_SET_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NVC697_SET_VERTEX_ARRAY_START 0x0d74 +#define NVC697_SET_VERTEX_ARRAY_START_V 31:0 + +#define NVC697_DRAW_VERTEX_ARRAY 0x0d78 +#define NVC697_DRAW_VERTEX_ARRAY_COUNT 31:0 + +#define NVC697_SET_VIEWPORT_Z_CLIP 0x0d7c +#define NVC697_SET_VIEWPORT_Z_CLIP_RANGE 0:0 +#define NVC697_SET_VIEWPORT_Z_CLIP_RANGE_NEGATIVE_W_TO_POSITIVE_W 0x00000000 +#define NVC697_SET_VIEWPORT_Z_CLIP_RANGE_ZERO_TO_POSITIVE_W 0x00000001 + +#define NVC697_SET_COLOR_CLEAR_VALUE(i) (0x0d80+(i)*4) +#define NVC697_SET_COLOR_CLEAR_VALUE_V 31:0 + +#define NVC697_SET_Z_CLEAR_VALUE 0x0d90 +#define NVC697_SET_Z_CLEAR_VALUE_V 31:0 + +#define NVC697_SET_SHADER_CACHE_CONTROL 0x0d94 +#define NVC697_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE 0:0 +#define NVC697_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_FALSE 0x00000000 +#define NVC697_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_TRUE 0x00000001 + +#define NVC697_FORCE_TRANSITION_TO_BETA 0x0d98 +#define NVC697_FORCE_TRANSITION_TO_BETA_V 0:0 + +#define NVC697_SET_REDUCE_COLOR_THRESHOLDS_ENABLE 0x0d9c +#define NVC697_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V 0:0 +#define NVC697_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_FALSE 0x00000000 +#define NVC697_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_TRUE 0x00000001 + +#define NVC697_SET_STENCIL_CLEAR_VALUE 0x0da0 +#define NVC697_SET_STENCIL_CLEAR_VALUE_V 7:0 + +#define NVC697_INVALIDATE_SHADER_CACHES_NO_WFI 0x0da4 +#define NVC697_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION 0:0 +#define NVC697_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_FALSE 0x00000000 +#define NVC697_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_TRUE 0x00000001 +#define NVC697_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA 4:4 +#define NVC697_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_FALSE 0x00000000 +#define NVC697_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_TRUE 0x00000001 +#define NVC697_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT 12:12 +#define NVC697_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_FALSE 0x00000000 +#define NVC697_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_TRUE 0x00000001 + +#define NVC697_SET_ZCULL_SERIALIZATION 0x0da8 +#define NVC697_SET_ZCULL_SERIALIZATION_ENABLE 0:0 +#define NVC697_SET_ZCULL_SERIALIZATION_ENABLE_FALSE 0x00000000 +#define NVC697_SET_ZCULL_SERIALIZATION_ENABLE_TRUE 0x00000001 +#define NVC697_SET_ZCULL_SERIALIZATION_APPLIED 5:4 +#define NVC697_SET_ZCULL_SERIALIZATION_APPLIED_ALWAYS 0x00000000 +#define NVC697_SET_ZCULL_SERIALIZATION_APPLIED_LATE_Z 0x00000001 +#define NVC697_SET_ZCULL_SERIALIZATION_APPLIED_OUT_OF_GAMUT_Z 0x00000002 +#define NVC697_SET_ZCULL_SERIALIZATION_APPLIED_LATE_Z_OR_OUT_OF_GAMUT_Z 0x00000003 + +#define NVC697_SET_FRONT_POLYGON_MODE 0x0dac +#define NVC697_SET_FRONT_POLYGON_MODE_V 31:0 +#define NVC697_SET_FRONT_POLYGON_MODE_V_POINT 0x00001B00 +#define NVC697_SET_FRONT_POLYGON_MODE_V_LINE 0x00001B01 +#define NVC697_SET_FRONT_POLYGON_MODE_V_FILL 0x00001B02 + +#define NVC697_SET_BACK_POLYGON_MODE 0x0db0 +#define NVC697_SET_BACK_POLYGON_MODE_V 31:0 +#define NVC697_SET_BACK_POLYGON_MODE_V_POINT 0x00001B00 +#define NVC697_SET_BACK_POLYGON_MODE_V_LINE 0x00001B01 +#define NVC697_SET_BACK_POLYGON_MODE_V_FILL 0x00001B02 + +#define NVC697_SET_POLY_SMOOTH 0x0db4 +#define NVC697_SET_POLY_SMOOTH_ENABLE 0:0 +#define NVC697_SET_POLY_SMOOTH_ENABLE_FALSE 0x00000000 +#define NVC697_SET_POLY_SMOOTH_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_ZCULL_DIR_FORMAT 0x0dbc +#define NVC697_SET_ZCULL_DIR_FORMAT_ZDIR 15:0 +#define NVC697_SET_ZCULL_DIR_FORMAT_ZDIR_LESS 0x00000000 +#define NVC697_SET_ZCULL_DIR_FORMAT_ZDIR_GREATER 0x00000001 +#define NVC697_SET_ZCULL_DIR_FORMAT_ZFORMAT 31:16 +#define NVC697_SET_ZCULL_DIR_FORMAT_ZFORMAT_MSB 0x00000000 +#define NVC697_SET_ZCULL_DIR_FORMAT_ZFORMAT_FP 0x00000001 +#define NVC697_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZTRICK 0x00000002 +#define NVC697_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZF32_1 0x00000003 + +#define NVC697_SET_POLY_OFFSET_POINT 0x0dc0 +#define NVC697_SET_POLY_OFFSET_POINT_ENABLE 0:0 +#define NVC697_SET_POLY_OFFSET_POINT_ENABLE_FALSE 0x00000000 +#define NVC697_SET_POLY_OFFSET_POINT_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_POLY_OFFSET_LINE 0x0dc4 +#define NVC697_SET_POLY_OFFSET_LINE_ENABLE 0:0 +#define NVC697_SET_POLY_OFFSET_LINE_ENABLE_FALSE 0x00000000 +#define NVC697_SET_POLY_OFFSET_LINE_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_POLY_OFFSET_FILL 0x0dc8 +#define NVC697_SET_POLY_OFFSET_FILL_ENABLE 0:0 +#define NVC697_SET_POLY_OFFSET_FILL_ENABLE_FALSE 0x00000000 +#define NVC697_SET_POLY_OFFSET_FILL_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_PATCH 0x0dcc +#define NVC697_SET_PATCH_SIZE 7:0 + +#define NVC697_SET_ITERATED_BLEND 0x0dd0 +#define NVC697_SET_ITERATED_BLEND_ENABLE 0:0 +#define NVC697_SET_ITERATED_BLEND_ENABLE_FALSE 0x00000000 +#define NVC697_SET_ITERATED_BLEND_ENABLE_TRUE 0x00000001 +#define NVC697_SET_ITERATED_BLEND_ALPHA_ENABLE 1:1 +#define NVC697_SET_ITERATED_BLEND_ALPHA_ENABLE_FALSE 0x00000000 +#define NVC697_SET_ITERATED_BLEND_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_ITERATED_BLEND_PASS 0x0dd4 +#define NVC697_SET_ITERATED_BLEND_PASS_COUNT 7:0 + +#define NVC697_SET_ZCULL_CRITERION 0x0dd8 +#define NVC697_SET_ZCULL_CRITERION_SFUNC 7:0 +#define NVC697_SET_ZCULL_CRITERION_SFUNC_NEVER 0x00000000 +#define NVC697_SET_ZCULL_CRITERION_SFUNC_LESS 0x00000001 +#define NVC697_SET_ZCULL_CRITERION_SFUNC_EQUAL 0x00000002 +#define NVC697_SET_ZCULL_CRITERION_SFUNC_LEQUAL 0x00000003 +#define NVC697_SET_ZCULL_CRITERION_SFUNC_GREATER 0x00000004 +#define NVC697_SET_ZCULL_CRITERION_SFUNC_NOTEQUAL 0x00000005 +#define NVC697_SET_ZCULL_CRITERION_SFUNC_GEQUAL 0x00000006 +#define NVC697_SET_ZCULL_CRITERION_SFUNC_ALWAYS 0x00000007 +#define NVC697_SET_ZCULL_CRITERION_NO_INVALIDATE 8:8 +#define NVC697_SET_ZCULL_CRITERION_NO_INVALIDATE_FALSE 0x00000000 +#define NVC697_SET_ZCULL_CRITERION_NO_INVALIDATE_TRUE 0x00000001 +#define NVC697_SET_ZCULL_CRITERION_FORCE_MATCH 9:9 +#define NVC697_SET_ZCULL_CRITERION_FORCE_MATCH_FALSE 0x00000000 +#define NVC697_SET_ZCULL_CRITERION_FORCE_MATCH_TRUE 0x00000001 +#define NVC697_SET_ZCULL_CRITERION_SREF 23:16 +#define NVC697_SET_ZCULL_CRITERION_SMASK 31:24 + +#define NVC697_PIXEL_SHADER_BARRIER 0x0de0 +#define NVC697_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE 0:0 +#define NVC697_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE_FALSE 0x00000000 +#define NVC697_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_SM_TIMEOUT_INTERVAL 0x0de4 +#define NVC697_SET_SM_TIMEOUT_INTERVAL_COUNTER_BIT 5:0 + +#define NVC697_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY 0x0de8 +#define NVC697_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE 0:0 +#define NVC697_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_FALSE 0x00000000 +#define NVC697_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_TRUE 0x00000001 + +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_POINTER 0x0df0 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_POINTER_V 7:0 + +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION 0x0df4 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC 2:0 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_FALSE 0x00000000 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_TRUE 0x00000001 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_EQ 0x00000002 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_NE 0x00000003 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_LT 0x00000004 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_LE 0x00000005 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_GT 0x00000006 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_GE 0x00000007 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION 5:3 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_ADD_PRODUCTS 0x00000000 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_SUB_PRODUCTS 0x00000001 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_MIN 0x00000002 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_MAX 0x00000003 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_RCP 0x00000004 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_ADD 0x00000005 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_SUBTRACT 0x00000006 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT 8:6 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT0 0x00000000 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT1 0x00000001 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT2 0x00000002 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT3 0x00000003 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT4 0x00000004 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT5 0x00000005 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT6 0x00000006 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT7 0x00000007 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT 11:9 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_SRC_RGB 0x00000000 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_DEST_RGB 0x00000001 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_SRC_AAA 0x00000002 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_DEST_AAA 0x00000003 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP0_RGB 0x00000004 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP1_RGB 0x00000005 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP2_RGB 0x00000006 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_PBR_RGB 0x00000007 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT 15:12 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ZERO 0x00000000 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE 0x00000001 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_SRC_RGB 0x00000002 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_SRC_AAA 0x00000003 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE_MINUS_SRC_AAA 0x00000004 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_DEST_RGB 0x00000005 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_DEST_AAA 0x00000006 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE_MINUS_DEST_AAA 0x00000007 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP0_RGB 0x00000009 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP1_RGB 0x0000000A +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP2_RGB 0x0000000B +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_PBR_RGB 0x0000000C +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_CONSTANT_RGB 0x0000000D +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ZERO_A_TIMES_B 0x0000000E +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT 18:16 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_SRC_RGB 0x00000000 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_DEST_RGB 0x00000001 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_SRC_AAA 0x00000002 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_DEST_AAA 0x00000003 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP0_RGB 0x00000004 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP1_RGB 0x00000005 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP2_RGB 0x00000006 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_PBR_RGB 0x00000007 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT 22:19 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ZERO 0x00000000 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE 0x00000001 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_SRC_RGB 0x00000002 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_SRC_AAA 0x00000003 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE_MINUS_SRC_AAA 0x00000004 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_DEST_RGB 0x00000005 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_DEST_AAA 0x00000006 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE_MINUS_DEST_AAA 0x00000007 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP0_RGB 0x00000009 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP1_RGB 0x0000000A +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP2_RGB 0x0000000B +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_PBR_RGB 0x0000000C +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_CONSTANT_RGB 0x0000000D +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ZERO_C_TIMES_D 0x0000000E +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE 25:23 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_RGB 0x00000000 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_GBR 0x00000001 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_RRR 0x00000002 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_GGG 0x00000003 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_BBB 0x00000004 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_R_TO_A 0x00000005 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK 27:26 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_RGB 0x00000000 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_R_ONLY 0x00000001 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_G_ONLY 0x00000002 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_B_ONLY 0x00000003 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT 29:28 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP0 0x00000000 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP1 0x00000001 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP2 0x00000002 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_NONE 0x00000003 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC 31:31 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC_FALSE 0x00000000 +#define NVC697_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC_TRUE 0x00000001 + +#define NVC697_SET_WINDOW_OFFSET_X 0x0df8 +#define NVC697_SET_WINDOW_OFFSET_X_V 16:0 + +#define NVC697_SET_WINDOW_OFFSET_Y 0x0dfc +#define NVC697_SET_WINDOW_OFFSET_Y_V 17:0 + +#define NVC697_SET_SCISSOR_ENABLE(j) (0x0e00+(j)*16) +#define NVC697_SET_SCISSOR_ENABLE_V 0:0 +#define NVC697_SET_SCISSOR_ENABLE_V_FALSE 0x00000000 +#define NVC697_SET_SCISSOR_ENABLE_V_TRUE 0x00000001 + +#define NVC697_SET_SCISSOR_HORIZONTAL(j) (0x0e04+(j)*16) +#define NVC697_SET_SCISSOR_HORIZONTAL_XMIN 15:0 +#define NVC697_SET_SCISSOR_HORIZONTAL_XMAX 31:16 + +#define NVC697_SET_SCISSOR_VERTICAL(j) (0x0e08+(j)*16) +#define NVC697_SET_SCISSOR_VERTICAL_YMIN 15:0 +#define NVC697_SET_SCISSOR_VERTICAL_YMAX 31:16 + +#define NVC697_SET_VPC_PERF_KNOB 0x0f14 +#define NVC697_SET_VPC_PERF_KNOB_CULLED_SMALL_LINES 7:0 +#define NVC697_SET_VPC_PERF_KNOB_CULLED_SMALL_TRIANGLES 15:8 +#define NVC697_SET_VPC_PERF_KNOB_NONCULLED_LINES_AND_POINTS 23:16 +#define NVC697_SET_VPC_PERF_KNOB_NONCULLED_TRIANGLES 31:24 + +#define NVC697_PM_LOCAL_TRIGGER 0x0f18 +#define NVC697_PM_LOCAL_TRIGGER_BOOKMARK 15:0 + +#define NVC697_SET_POST_Z_PS_IMASK 0x0f1c +#define NVC697_SET_POST_Z_PS_IMASK_ENABLE 0:0 +#define NVC697_SET_POST_Z_PS_IMASK_ENABLE_FALSE 0x00000000 +#define NVC697_SET_POST_Z_PS_IMASK_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_CONSTANT_COLOR_RENDERING 0x0f40 +#define NVC697_SET_CONSTANT_COLOR_RENDERING_ENABLE 0:0 +#define NVC697_SET_CONSTANT_COLOR_RENDERING_ENABLE_FALSE 0x00000000 +#define NVC697_SET_CONSTANT_COLOR_RENDERING_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_CONSTANT_COLOR_RENDERING_RED 0x0f44 +#define NVC697_SET_CONSTANT_COLOR_RENDERING_RED_V 31:0 + +#define NVC697_SET_CONSTANT_COLOR_RENDERING_GREEN 0x0f48 +#define NVC697_SET_CONSTANT_COLOR_RENDERING_GREEN_V 31:0 + +#define NVC697_SET_CONSTANT_COLOR_RENDERING_BLUE 0x0f4c +#define NVC697_SET_CONSTANT_COLOR_RENDERING_BLUE_V 31:0 + +#define NVC697_SET_CONSTANT_COLOR_RENDERING_ALPHA 0x0f50 +#define NVC697_SET_CONSTANT_COLOR_RENDERING_ALPHA_V 31:0 + +#define NVC697_SET_BACK_STENCIL_FUNC_REF 0x0f54 +#define NVC697_SET_BACK_STENCIL_FUNC_REF_V 7:0 + +#define NVC697_SET_BACK_STENCIL_MASK 0x0f58 +#define NVC697_SET_BACK_STENCIL_MASK_V 7:0 + +#define NVC697_SET_BACK_STENCIL_FUNC_MASK 0x0f5c +#define NVC697_SET_BACK_STENCIL_FUNC_MASK_V 7:0 + +#define NVC697_SET_VERTEX_STREAM_SUBSTITUTE_A 0x0f84 +#define NVC697_SET_VERTEX_STREAM_SUBSTITUTE_A_ADDRESS_UPPER 7:0 + +#define NVC697_SET_VERTEX_STREAM_SUBSTITUTE_B 0x0f88 +#define NVC697_SET_VERTEX_STREAM_SUBSTITUTE_B_ADDRESS_LOWER 31:0 + +#define NVC697_SET_LINE_MODE_POLYGON_CLIP 0x0f8c +#define NVC697_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE 0:0 +#define NVC697_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DRAW_LINE 0x00000000 +#define NVC697_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DO_NOT_DRAW_LINE 0x00000001 + +#define NVC697_SET_SINGLE_CT_WRITE_CONTROL 0x0f90 +#define NVC697_SET_SINGLE_CT_WRITE_CONTROL_ENABLE 0:0 +#define NVC697_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_FALSE 0x00000000 +#define NVC697_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_VTG_WARP_WATERMARKS 0x0f98 +#define NVC697_SET_VTG_WARP_WATERMARKS_LOW 15:0 +#define NVC697_SET_VTG_WARP_WATERMARKS_HIGH 31:16 + +#define NVC697_SET_DEPTH_BOUNDS_MIN 0x0f9c +#define NVC697_SET_DEPTH_BOUNDS_MIN_V 31:0 + +#define NVC697_SET_DEPTH_BOUNDS_MAX 0x0fa0 +#define NVC697_SET_DEPTH_BOUNDS_MAX_V 31:0 + +#define NVC697_SET_SAMPLE_MASK 0x0fa4 +#define NVC697_SET_SAMPLE_MASK_RASTER_OUT_ENABLE 0:0 +#define NVC697_SET_SAMPLE_MASK_RASTER_OUT_ENABLE_FALSE 0x00000000 +#define NVC697_SET_SAMPLE_MASK_RASTER_OUT_ENABLE_TRUE 0x00000001 +#define NVC697_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE 4:4 +#define NVC697_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE_FALSE 0x00000000 +#define NVC697_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_COLOR_TARGET_SAMPLE_MASK 0x0fa8 +#define NVC697_SET_COLOR_TARGET_SAMPLE_MASK_V 15:0 + +#define NVC697_SET_CT_MRT_ENABLE 0x0fac +#define NVC697_SET_CT_MRT_ENABLE_V 0:0 +#define NVC697_SET_CT_MRT_ENABLE_V_FALSE 0x00000000 +#define NVC697_SET_CT_MRT_ENABLE_V_TRUE 0x00000001 + +#define NVC697_SET_NONMULTISAMPLED_Z 0x0fb0 +#define NVC697_SET_NONMULTISAMPLED_Z_V 0:0 +#define NVC697_SET_NONMULTISAMPLED_Z_V_PER_SAMPLE 0x00000000 +#define NVC697_SET_NONMULTISAMPLED_Z_V_AT_PIXEL_CENTER 0x00000001 + +#define NVC697_SET_TIR 0x0fb4 +#define NVC697_SET_TIR_MODE 1:0 +#define NVC697_SET_TIR_MODE_DISABLED 0x00000000 +#define NVC697_SET_TIR_MODE_RASTER_N_TARGET_M 0x00000001 + +#define NVC697_SET_ANTI_ALIAS_RASTER 0x0fb8 +#define NVC697_SET_ANTI_ALIAS_RASTER_SAMPLES 2:0 +#define NVC697_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_1X1 0x00000000 +#define NVC697_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_2X2 0x00000002 +#define NVC697_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_4X2_D3D 0x00000004 +#define NVC697_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_2X1_D3D 0x00000005 +#define NVC697_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_4X4 0x00000006 + +#define NVC697_SET_SAMPLE_MASK_X0_Y0 0x0fbc +#define NVC697_SET_SAMPLE_MASK_X0_Y0_V 15:0 + +#define NVC697_SET_SAMPLE_MASK_X1_Y0 0x0fc0 +#define NVC697_SET_SAMPLE_MASK_X1_Y0_V 15:0 + +#define NVC697_SET_SAMPLE_MASK_X0_Y1 0x0fc4 +#define NVC697_SET_SAMPLE_MASK_X0_Y1_V 15:0 + +#define NVC697_SET_SAMPLE_MASK_X1_Y1 0x0fc8 +#define NVC697_SET_SAMPLE_MASK_X1_Y1_V 15:0 + +#define NVC697_SET_SURFACE_CLIP_ID_MEMORY_A 0x0fcc +#define NVC697_SET_SURFACE_CLIP_ID_MEMORY_A_OFFSET_UPPER 7:0 + +#define NVC697_SET_SURFACE_CLIP_ID_MEMORY_B 0x0fd0 +#define NVC697_SET_SURFACE_CLIP_ID_MEMORY_B_OFFSET_LOWER 31:0 + +#define NVC697_SET_TIR_MODULATION 0x0fd4 +#define NVC697_SET_TIR_MODULATION_COMPONENT_SELECT 1:0 +#define NVC697_SET_TIR_MODULATION_COMPONENT_SELECT_NO_MODULATION 0x00000000 +#define NVC697_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_RGB 0x00000001 +#define NVC697_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_ALPHA_ONLY 0x00000002 +#define NVC697_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_RGBA 0x00000003 + +#define NVC697_SET_TIR_MODULATION_FUNCTION 0x0fd8 +#define NVC697_SET_TIR_MODULATION_FUNCTION_SELECT 0:0 +#define NVC697_SET_TIR_MODULATION_FUNCTION_SELECT_LINEAR 0x00000000 +#define NVC697_SET_TIR_MODULATION_FUNCTION_SELECT_TABLE 0x00000001 + +#define NVC697_SET_BLEND_OPT_CONTROL 0x0fdc +#define NVC697_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS 0:0 +#define NVC697_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_FALSE 0x00000000 +#define NVC697_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_TRUE 0x00000001 + +#define NVC697_SET_ZT_A 0x0fe0 +#define NVC697_SET_ZT_A_OFFSET_UPPER 7:0 + +#define NVC697_SET_ZT_B 0x0fe4 +#define NVC697_SET_ZT_B_OFFSET_LOWER 31:0 + +#define NVC697_SET_ZT_FORMAT 0x0fe8 +#define NVC697_SET_ZT_FORMAT_V 4:0 +#define NVC697_SET_ZT_FORMAT_V_Z16 0x00000013 +#define NVC697_SET_ZT_FORMAT_V_Z24S8 0x00000014 +#define NVC697_SET_ZT_FORMAT_V_X8Z24 0x00000015 +#define NVC697_SET_ZT_FORMAT_V_S8Z24 0x00000016 +#define NVC697_SET_ZT_FORMAT_V_S8 0x00000017 +#define NVC697_SET_ZT_FORMAT_V_V8Z24 0x00000018 +#define NVC697_SET_ZT_FORMAT_V_ZF32 0x0000000A +#define NVC697_SET_ZT_FORMAT_V_ZF32_X24S8 0x00000019 +#define NVC697_SET_ZT_FORMAT_V_X8Z24_X16V8S8 0x0000001D +#define NVC697_SET_ZT_FORMAT_V_ZF32_X16V8X8 0x0000001E +#define NVC697_SET_ZT_FORMAT_V_ZF32_X16V8S8 0x0000001F + +#define NVC697_SET_ZT_BLOCK_SIZE 0x0fec +#define NVC697_SET_ZT_BLOCK_SIZE_WIDTH 3:0 +#define NVC697_SET_ZT_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC697_SET_ZT_BLOCK_SIZE_HEIGHT 7:4 +#define NVC697_SET_ZT_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC697_SET_ZT_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC697_SET_ZT_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC697_SET_ZT_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC697_SET_ZT_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC697_SET_ZT_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC697_SET_ZT_BLOCK_SIZE_DEPTH 11:8 +#define NVC697_SET_ZT_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NVC697_SET_ZT_ARRAY_PITCH 0x0ff0 +#define NVC697_SET_ZT_ARRAY_PITCH_V 31:0 + +#define NVC697_SET_SURFACE_CLIP_HORIZONTAL 0x0ff4 +#define NVC697_SET_SURFACE_CLIP_HORIZONTAL_X 15:0 +#define NVC697_SET_SURFACE_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NVC697_SET_SURFACE_CLIP_VERTICAL 0x0ff8 +#define NVC697_SET_SURFACE_CLIP_VERTICAL_Y 15:0 +#define NVC697_SET_SURFACE_CLIP_VERTICAL_HEIGHT 31:16 + +#define NVC697_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS 0x1000 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE 0:0 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_FALSE 0x00000000 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_TRUE 0x00000001 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY 5:4 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC697_SET_VIEWPORT_MULTICAST 0x1004 +#define NVC697_SET_VIEWPORT_MULTICAST_ORDER 0:0 +#define NVC697_SET_VIEWPORT_MULTICAST_ORDER_VIEWPORT_ORDER 0x00000000 +#define NVC697_SET_VIEWPORT_MULTICAST_ORDER_PRIMITIVE_ORDER 0x00000001 + +#define NVC697_SET_TESSELLATION_CUT_HEIGHT 0x1008 +#define NVC697_SET_TESSELLATION_CUT_HEIGHT_V 4:0 + +#define NVC697_SET_MAX_GS_INSTANCES_PER_TASK 0x100c +#define NVC697_SET_MAX_GS_INSTANCES_PER_TASK_V 10:0 + +#define NVC697_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK 0x1010 +#define NVC697_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK_V 15:0 + +#define NVC697_SET_RESERVED_SW_METHOD00 0x1014 +#define NVC697_SET_RESERVED_SW_METHOD00_V 31:0 + +#define NVC697_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER 0x1018 +#define NVC697_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NVC697_SET_BETA_CB_STORAGE_CONSTRAINT 0x101c +#define NVC697_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NVC697_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NVC697_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER 0x1020 +#define NVC697_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NVC697_SET_ALPHA_CB_STORAGE_CONSTRAINT 0x1024 +#define NVC697_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NVC697_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NVC697_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_RESERVED_SW_METHOD01 0x1028 +#define NVC697_SET_RESERVED_SW_METHOD01_V 31:0 + +#define NVC697_SET_RESERVED_SW_METHOD02 0x102c +#define NVC697_SET_RESERVED_SW_METHOD02_V 31:0 + +#define NVC697_SET_TIR_MODULATION_COEFFICIENT_TABLE(i) (0x1030+(i)*4) +#define NVC697_SET_TIR_MODULATION_COEFFICIENT_TABLE_V0 7:0 +#define NVC697_SET_TIR_MODULATION_COEFFICIENT_TABLE_V1 15:8 +#define NVC697_SET_TIR_MODULATION_COEFFICIENT_TABLE_V2 23:16 +#define NVC697_SET_TIR_MODULATION_COEFFICIENT_TABLE_V3 31:24 + +#define NVC697_SET_SPARE_NOOP01 0x1044 +#define NVC697_SET_SPARE_NOOP01_V 31:0 + +#define NVC697_SET_SPARE_NOOP02 0x1048 +#define NVC697_SET_SPARE_NOOP02_V 31:0 + +#define NVC697_SET_SPARE_NOOP03 0x104c +#define NVC697_SET_SPARE_NOOP03_V 31:0 + +#define NVC697_SET_SPARE_NOOP04 0x1050 +#define NVC697_SET_SPARE_NOOP04_V 31:0 + +#define NVC697_SET_SPARE_NOOP05 0x1054 +#define NVC697_SET_SPARE_NOOP05_V 31:0 + +#define NVC697_SET_SPARE_NOOP06 0x1058 +#define NVC697_SET_SPARE_NOOP06_V 31:0 + +#define NVC697_SET_SPARE_NOOP07 0x105c +#define NVC697_SET_SPARE_NOOP07_V 31:0 + +#define NVC697_SET_SPARE_NOOP08 0x1060 +#define NVC697_SET_SPARE_NOOP08_V 31:0 + +#define NVC697_SET_SPARE_NOOP09 0x1064 +#define NVC697_SET_SPARE_NOOP09_V 31:0 + +#define NVC697_SET_SPARE_NOOP10 0x1068 +#define NVC697_SET_SPARE_NOOP10_V 31:0 + +#define NVC697_SET_SPARE_NOOP11 0x106c +#define NVC697_SET_SPARE_NOOP11_V 31:0 + +#define NVC697_SET_SPARE_NOOP12 0x1070 +#define NVC697_SET_SPARE_NOOP12_V 31:0 + +#define NVC697_SET_SPARE_NOOP13 0x1074 +#define NVC697_SET_SPARE_NOOP13_V 31:0 + +#define NVC697_SET_SPARE_NOOP14 0x1078 +#define NVC697_SET_SPARE_NOOP14_V 31:0 + +#define NVC697_SET_SPARE_NOOP15 0x107c +#define NVC697_SET_SPARE_NOOP15_V 31:0 + +#define NVC697_SET_RESERVED_SW_METHOD03 0x10b0 +#define NVC697_SET_RESERVED_SW_METHOD03_V 31:0 + +#define NVC697_SET_RESERVED_SW_METHOD04 0x10b4 +#define NVC697_SET_RESERVED_SW_METHOD04_V 31:0 + +#define NVC697_SET_RESERVED_SW_METHOD05 0x10b8 +#define NVC697_SET_RESERVED_SW_METHOD05_V 31:0 + +#define NVC697_SET_RESERVED_SW_METHOD06 0x10bc +#define NVC697_SET_RESERVED_SW_METHOD06_V 31:0 + +#define NVC697_SET_RESERVED_SW_METHOD07 0x10c0 +#define NVC697_SET_RESERVED_SW_METHOD07_V 31:0 + +#define NVC697_SET_RESERVED_SW_METHOD08 0x10c4 +#define NVC697_SET_RESERVED_SW_METHOD08_V 31:0 + +#define NVC697_SET_RESERVED_SW_METHOD09 0x10c8 +#define NVC697_SET_RESERVED_SW_METHOD09_V 31:0 + +#define NVC697_SET_REDUCE_COLOR_THRESHOLDS_UNORM8 0x10cc +#define NVC697_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC697_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED 23:16 + +#define NVC697_SET_RESERVED_SW_METHOD10 0x10d0 +#define NVC697_SET_RESERVED_SW_METHOD10_V 31:0 + +#define NVC697_SET_RESERVED_SW_METHOD11 0x10d4 +#define NVC697_SET_RESERVED_SW_METHOD11_V 31:0 + +#define NVC697_SET_RESERVED_SW_METHOD12 0x10d8 +#define NVC697_SET_RESERVED_SW_METHOD12_V 31:0 + +#define NVC697_SET_RESERVED_SW_METHOD13 0x10dc +#define NVC697_SET_RESERVED_SW_METHOD13_V 31:0 + +#define NVC697_SET_REDUCE_COLOR_THRESHOLDS_UNORM10 0x10e0 +#define NVC697_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC697_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED 23:16 + +#define NVC697_SET_REDUCE_COLOR_THRESHOLDS_UNORM16 0x10e4 +#define NVC697_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC697_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED 23:16 + +#define NVC697_SET_REDUCE_COLOR_THRESHOLDS_FP11 0x10e8 +#define NVC697_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED_ALL_HIT_ONCE 5:0 +#define NVC697_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED 21:16 + +#define NVC697_SET_REDUCE_COLOR_THRESHOLDS_FP16 0x10ec +#define NVC697_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC697_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED 23:16 + +#define NVC697_SET_REDUCE_COLOR_THRESHOLDS_SRGB8 0x10f0 +#define NVC697_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC697_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED 23:16 + +#define NVC697_UNBIND_ALL 0x10f4 +#define NVC697_UNBIND_ALL_CONSTANT_BUFFERS 8:8 +#define NVC697_UNBIND_ALL_CONSTANT_BUFFERS_FALSE 0x00000000 +#define NVC697_UNBIND_ALL_CONSTANT_BUFFERS_TRUE 0x00000001 + +#define NVC697_SET_CLEAR_SURFACE_CONTROL 0x10f8 +#define NVC697_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK 0:0 +#define NVC697_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_FALSE 0x00000000 +#define NVC697_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_TRUE 0x00000001 +#define NVC697_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT 4:4 +#define NVC697_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_FALSE 0x00000000 +#define NVC697_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_TRUE 0x00000001 +#define NVC697_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0 8:8 +#define NVC697_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_FALSE 0x00000000 +#define NVC697_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_TRUE 0x00000001 +#define NVC697_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0 12:12 +#define NVC697_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_FALSE 0x00000000 +#define NVC697_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_TRUE 0x00000001 + +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS 0x10fc +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC697_SET_RESERVED_SW_METHOD14 0x1100 +#define NVC697_SET_RESERVED_SW_METHOD14_V 31:0 + +#define NVC697_SET_RESERVED_SW_METHOD15 0x1104 +#define NVC697_SET_RESERVED_SW_METHOD15_V 31:0 + +#define NVC697_NO_OPERATION_DATA_HI 0x110c +#define NVC697_NO_OPERATION_DATA_HI_V 31:0 + +#define NVC697_SET_DEPTH_BIAS_CONTROL 0x1110 +#define NVC697_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT 0:0 +#define NVC697_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_FALSE 0x00000000 +#define NVC697_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_TRUE 0x00000001 + +#define NVC697_PM_TRIGGER_END 0x1114 +#define NVC697_PM_TRIGGER_END_V 31:0 + +#define NVC697_SET_VERTEX_ID_BASE 0x1118 +#define NVC697_SET_VERTEX_ID_BASE_V 31:0 + +#define NVC697_SET_STENCIL_COMPRESSION 0x111c +#define NVC697_SET_STENCIL_COMPRESSION_ENABLE 0:0 +#define NVC697_SET_STENCIL_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVC697_SET_STENCIL_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A(i) (0x1120+(i)*4) +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0 0:0 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1 1:1 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2 2:2 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3 3:3 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0 4:4 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1 5:5 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2 6:6 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3 7:7 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0 8:8 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1 9:9 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2 10:10 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3 11:11 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0 12:12 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1 13:13 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2 14:14 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3 15:15 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0 16:16 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1 17:17 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2 18:18 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3 19:19 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0 20:20 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1 21:21 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2 22:22 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3 23:23 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0 24:24 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1 25:25 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2 26:26 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3 27:27 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0 28:28 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1 29:29 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2 30:30 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3 31:31 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B(i) (0x1128+(i)*4) +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0 0:0 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1 1:1 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2 2:2 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3 3:3 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0 4:4 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1 5:5 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2 6:6 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3 7:7 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0 8:8 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1 9:9 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2 10:10 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3 11:11 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0 12:12 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1 13:13 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2 14:14 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3 15:15 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0 16:16 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1 17:17 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2 18:18 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3 19:19 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0 20:20 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1 21:21 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2 22:22 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3 23:23 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0 24:24 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1 25:25 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2 26:26 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3 27:27 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0 28:28 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1 29:29 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2 30:30 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3 31:31 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NVC697_SET_TIR_CONTROL 0x1130 +#define NVC697_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES 0:0 +#define NVC697_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES_DISABLE 0x00000000 +#define NVC697_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES_ENABLE 0x00000001 +#define NVC697_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES 4:4 +#define NVC697_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES_DISABLE 0x00000000 +#define NVC697_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES_ENABLE 0x00000001 +#define NVC697_SET_TIR_CONTROL_REDUCE_COVERAGE 1:1 +#define NVC697_SET_TIR_CONTROL_REDUCE_COVERAGE_DISABLE 0x00000000 +#define NVC697_SET_TIR_CONTROL_REDUCE_COVERAGE_ENABLE 0x00000001 + +#define NVC697_SET_MUTABLE_METHOD_CONTROL 0x1134 +#define NVC697_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT 0:0 +#define NVC697_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT_FALSE 0x00000000 +#define NVC697_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT_TRUE 0x00000001 + +#define NVC697_SET_POST_PS_INITIAL_COVERAGE 0x1138 +#define NVC697_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE 0:0 +#define NVC697_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE_FALSE 0x00000000 +#define NVC697_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE_TRUE 0x00000001 + +#define NVC697_SET_FILL_VIA_TRIANGLE 0x113c +#define NVC697_SET_FILL_VIA_TRIANGLE_MODE 1:0 +#define NVC697_SET_FILL_VIA_TRIANGLE_MODE_DISABLED 0x00000000 +#define NVC697_SET_FILL_VIA_TRIANGLE_MODE_FILL_ALL 0x00000001 +#define NVC697_SET_FILL_VIA_TRIANGLE_MODE_FILL_BBOX 0x00000002 + +#define NVC697_SET_BLEND_PER_FORMAT_ENABLE 0x1140 +#define NVC697_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16 4:4 +#define NVC697_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_FALSE 0x00000000 +#define NVC697_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_TRUE 0x00000001 + +#define NVC697_FLUSH_PENDING_WRITES 0x1144 +#define NVC697_FLUSH_PENDING_WRITES_SM_DOES_GLOBAL_STORE 0:0 + +#define NVC697_SET_VERTEX_ATTRIBUTE_A(i) (0x1160+(i)*4) +#define NVC697_SET_VERTEX_ATTRIBUTE_A_STREAM 4:0 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_SOURCE 6:6 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_SOURCE_ACTIVE 0x00000000 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_SOURCE_INACTIVE 0x00000001 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_OFFSET 20:7 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS 26:21 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NVC697_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NVC697_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NVC697_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NVC697_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NVC697_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE 29:27 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B 31:31 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_FALSE 0x00000000 +#define NVC697_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_TRUE 0x00000001 + +#define NVC697_SET_VERTEX_ATTRIBUTE_B(i) (0x11a0+(i)*4) +#define NVC697_SET_VERTEX_ATTRIBUTE_B_STREAM 4:0 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_SOURCE 6:6 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_SOURCE_ACTIVE 0x00000000 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_SOURCE_INACTIVE 0x00000001 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_OFFSET 20:7 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS 26:21 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NVC697_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NVC697_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NVC697_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NVC697_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NVC697_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE 29:27 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B 31:31 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_FALSE 0x00000000 +#define NVC697_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_TRUE 0x00000001 + +#define NVC697_SET_ANTI_ALIAS_SAMPLE_POSITIONS(i) (0x11e0+(i)*4) +#define NVC697_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X0 3:0 +#define NVC697_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y0 7:4 +#define NVC697_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X1 11:8 +#define NVC697_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y1 15:12 +#define NVC697_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X2 19:16 +#define NVC697_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y2 23:20 +#define NVC697_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X3 27:24 +#define NVC697_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y3 31:28 + +#define NVC697_SET_OFFSET_RENDER_TARGET_INDEX 0x11f0 +#define NVC697_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX 0:0 +#define NVC697_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX_FALSE 0x00000000 +#define NVC697_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX_TRUE 0x00000001 + +#define NVC697_FORCE_HEAVYWEIGHT_METHOD_SYNC 0x11f4 +#define NVC697_FORCE_HEAVYWEIGHT_METHOD_SYNC_V 31:0 + +#define NVC697_SET_COVERAGE_TO_COLOR 0x11f8 +#define NVC697_SET_COVERAGE_TO_COLOR_ENABLE 0:0 +#define NVC697_SET_COVERAGE_TO_COLOR_ENABLE_FALSE 0x00000000 +#define NVC697_SET_COVERAGE_TO_COLOR_ENABLE_TRUE 0x00000001 +#define NVC697_SET_COVERAGE_TO_COLOR_CT_SELECT 6:4 + +#define NVC697_DECOMPRESS_ZETA_SURFACE 0x11fc +#define NVC697_DECOMPRESS_ZETA_SURFACE_Z_ENABLE 0:0 +#define NVC697_DECOMPRESS_ZETA_SURFACE_Z_ENABLE_FALSE 0x00000000 +#define NVC697_DECOMPRESS_ZETA_SURFACE_Z_ENABLE_TRUE 0x00000001 +#define NVC697_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE 4:4 +#define NVC697_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC697_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_SCREEN_STATE_MASK 0x1204 +#define NVC697_SET_SCREEN_STATE_MASK_MASK 3:0 + +#define NVC697_SET_ZT_SPARSE 0x1208 +#define NVC697_SET_ZT_SPARSE_ENABLE 0:0 +#define NVC697_SET_ZT_SPARSE_ENABLE_FALSE 0x00000000 +#define NVC697_SET_ZT_SPARSE_ENABLE_TRUE 0x00000001 +#define NVC697_SET_ZT_SPARSE_UNMAPPED_COMPARE 1:1 +#define NVC697_SET_ZT_SPARSE_UNMAPPED_COMPARE_ZT_SPARSE_UNMAPPED_0 0x00000000 +#define NVC697_SET_ZT_SPARSE_UNMAPPED_COMPARE_ZT_SPARSE_FAIL_ALWAYS 0x00000001 + +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST 0x1214 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_START_INDEX 15:0 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT 0x1218 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_START_INDEX 15:0 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC697_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC697_SET_CT_SELECT 0x121c +#define NVC697_SET_CT_SELECT_TARGET_COUNT 3:0 +#define NVC697_SET_CT_SELECT_TARGET0 6:4 +#define NVC697_SET_CT_SELECT_TARGET1 9:7 +#define NVC697_SET_CT_SELECT_TARGET2 12:10 +#define NVC697_SET_CT_SELECT_TARGET3 15:13 +#define NVC697_SET_CT_SELECT_TARGET4 18:16 +#define NVC697_SET_CT_SELECT_TARGET5 21:19 +#define NVC697_SET_CT_SELECT_TARGET6 24:22 +#define NVC697_SET_CT_SELECT_TARGET7 27:25 + +#define NVC697_SET_COMPRESSION_THRESHOLD 0x1220 +#define NVC697_SET_COMPRESSION_THRESHOLD_SAMPLES 3:0 +#define NVC697_SET_COMPRESSION_THRESHOLD_SAMPLES__0 0x00000000 +#define NVC697_SET_COMPRESSION_THRESHOLD_SAMPLES__1 0x00000001 +#define NVC697_SET_COMPRESSION_THRESHOLD_SAMPLES__2 0x00000002 +#define NVC697_SET_COMPRESSION_THRESHOLD_SAMPLES__4 0x00000003 +#define NVC697_SET_COMPRESSION_THRESHOLD_SAMPLES__8 0x00000004 +#define NVC697_SET_COMPRESSION_THRESHOLD_SAMPLES__16 0x00000005 +#define NVC697_SET_COMPRESSION_THRESHOLD_SAMPLES__32 0x00000006 +#define NVC697_SET_COMPRESSION_THRESHOLD_SAMPLES__64 0x00000007 +#define NVC697_SET_COMPRESSION_THRESHOLD_SAMPLES__128 0x00000008 +#define NVC697_SET_COMPRESSION_THRESHOLD_SAMPLES__256 0x00000009 +#define NVC697_SET_COMPRESSION_THRESHOLD_SAMPLES__512 0x0000000A +#define NVC697_SET_COMPRESSION_THRESHOLD_SAMPLES__1024 0x0000000B +#define NVC697_SET_COMPRESSION_THRESHOLD_SAMPLES__2048 0x0000000C + +#define NVC697_SET_PIXEL_SHADER_INTERLOCK_CONTROL 0x1224 +#define NVC697_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE 1:0 +#define NVC697_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_NO_CONFLICT_DETECT 0x00000000 +#define NVC697_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_CONFLICT_DETECT_SAMPLE 0x00000001 +#define NVC697_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_CONFLICT_DETECT_PIXEL 0x00000002 +#define NVC697_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE 2:2 +#define NVC697_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE_TC_TILE_SIZE_16X16 0x00000000 +#define NVC697_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE_TC_TILE_SIZE_8X8 0x00000001 +#define NVC697_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER 3:3 +#define NVC697_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER_TC_FRAGMENT_ORDERED 0x00000000 +#define NVC697_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER_TC_FRAGMENT_UNORDERED 0x00000001 + +#define NVC697_SET_ZT_SIZE_A 0x1228 +#define NVC697_SET_ZT_SIZE_A_WIDTH 27:0 + +#define NVC697_SET_ZT_SIZE_B 0x122c +#define NVC697_SET_ZT_SIZE_B_HEIGHT 17:0 + +#define NVC697_SET_ZT_SIZE_C 0x1230 +#define NVC697_SET_ZT_SIZE_C_THIRD_DIMENSION 15:0 +#define NVC697_SET_ZT_SIZE_C_CONTROL 16:16 +#define NVC697_SET_ZT_SIZE_C_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NVC697_SET_ZT_SIZE_C_CONTROL_ARRAY_SIZE_IS_ONE 0x00000001 + +#define NVC697_SET_SAMPLER_BINDING 0x1234 +#define NVC697_SET_SAMPLER_BINDING_V 0:0 +#define NVC697_SET_SAMPLER_BINDING_V_INDEPENDENTLY 0x00000000 +#define NVC697_SET_SAMPLER_BINDING_V_VIA_HEADER_BINDING 0x00000001 + +#define NVC697_DRAW_AUTO 0x123c +#define NVC697_DRAW_AUTO_BYTE_COUNT 31:0 + +#define NVC697_SET_POST_VTG_SHADER_ATTRIBUTE_SKIP_MASK(i) (0x1240+(i)*4) +#define NVC697_SET_POST_VTG_SHADER_ATTRIBUTE_SKIP_MASK_V 31:0 + +#define NVC697_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE 0x1260 +#define NVC697_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE_TICKET_DISPENSER_INDEX 7:0 +#define NVC697_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE_TICKET_DISPENSER_VALUE 23:8 + +#define NVC697_SET_BACK_END_COPY_A 0x1264 +#define NVC697_SET_BACK_END_COPY_A_DWORDS 7:0 +#define NVC697_SET_BACK_END_COPY_A_SATURATE32_ENABLE 8:8 +#define NVC697_SET_BACK_END_COPY_A_SATURATE32_ENABLE_FALSE 0x00000000 +#define NVC697_SET_BACK_END_COPY_A_SATURATE32_ENABLE_TRUE 0x00000001 +#define NVC697_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE 12:12 +#define NVC697_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE_FALSE 0x00000000 +#define NVC697_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_BACK_END_COPY_B 0x1268 +#define NVC697_SET_BACK_END_COPY_B_SRC_ADDRESS_UPPER 7:0 + +#define NVC697_SET_BACK_END_COPY_C 0x126c +#define NVC697_SET_BACK_END_COPY_C_SRC_ADDRESS_LOWER 31:0 + +#define NVC697_SET_BACK_END_COPY_D 0x1270 +#define NVC697_SET_BACK_END_COPY_D_DEST_ADDRESS_UPPER 7:0 + +#define NVC697_SET_BACK_END_COPY_E 0x1274 +#define NVC697_SET_BACK_END_COPY_E_DEST_ADDRESS_LOWER 31:0 + +#define NVC697_SET_CIRCULAR_BUFFER_SIZE 0x1280 +#define NVC697_SET_CIRCULAR_BUFFER_SIZE_CACHE_LINES_PER_SM 19:0 + +#define NVC697_SET_VTG_REGISTER_WATERMARKS 0x1284 +#define NVC697_SET_VTG_REGISTER_WATERMARKS_LOW 15:0 +#define NVC697_SET_VTG_REGISTER_WATERMARKS_HIGH 31:16 + +#define NVC697_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI 0x1288 +#define NVC697_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES 0:0 +#define NVC697_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC697_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC697_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_TAG 25:4 + +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS 0x1290 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC697_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE 0x12a4 +#define NVC697_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE_V 31:0 + +#define NVC697_CLEAR_ZCULL_REGION 0x12c8 +#define NVC697_CLEAR_ZCULL_REGION_Z_ENABLE 0:0 +#define NVC697_CLEAR_ZCULL_REGION_Z_ENABLE_FALSE 0x00000000 +#define NVC697_CLEAR_ZCULL_REGION_Z_ENABLE_TRUE 0x00000001 +#define NVC697_CLEAR_ZCULL_REGION_STENCIL_ENABLE 4:4 +#define NVC697_CLEAR_ZCULL_REGION_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC697_CLEAR_ZCULL_REGION_STENCIL_ENABLE_TRUE 0x00000001 +#define NVC697_CLEAR_ZCULL_REGION_USE_CLEAR_RECT 1:1 +#define NVC697_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_FALSE 0x00000000 +#define NVC697_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_TRUE 0x00000001 +#define NVC697_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX 2:2 +#define NVC697_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_FALSE 0x00000000 +#define NVC697_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_TRUE 0x00000001 +#define NVC697_CLEAR_ZCULL_REGION_RT_ARRAY_INDEX 20:5 +#define NVC697_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE 3:3 +#define NVC697_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_FALSE 0x00000000 +#define NVC697_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_TRUE 0x00000001 + +#define NVC697_SET_DEPTH_TEST 0x12cc +#define NVC697_SET_DEPTH_TEST_ENABLE 0:0 +#define NVC697_SET_DEPTH_TEST_ENABLE_FALSE 0x00000000 +#define NVC697_SET_DEPTH_TEST_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_FILL_MODE 0x12d0 +#define NVC697_SET_FILL_MODE_V 31:0 +#define NVC697_SET_FILL_MODE_V_POINT 0x00000001 +#define NVC697_SET_FILL_MODE_V_WIREFRAME 0x00000002 +#define NVC697_SET_FILL_MODE_V_SOLID 0x00000003 + +#define NVC697_SET_SHADE_MODE 0x12d4 +#define NVC697_SET_SHADE_MODE_V 31:0 +#define NVC697_SET_SHADE_MODE_V_FLAT 0x00000001 +#define NVC697_SET_SHADE_MODE_V_GOURAUD 0x00000002 +#define NVC697_SET_SHADE_MODE_V_OGL_FLAT 0x00001D00 +#define NVC697_SET_SHADE_MODE_V_OGL_SMOOTH 0x00001D01 + +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS 0x12d8 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS 0x12dc +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC697_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC697_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL 0x12e0 +#define NVC697_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT 3:0 +#define NVC697_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1 0x00000000 +#define NVC697_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_2X2 0x00000001 +#define NVC697_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1_VIRTUAL_SAMPLES 0x00000002 + +#define NVC697_SET_BLEND_STATE_PER_TARGET 0x12e4 +#define NVC697_SET_BLEND_STATE_PER_TARGET_ENABLE 0:0 +#define NVC697_SET_BLEND_STATE_PER_TARGET_ENABLE_FALSE 0x00000000 +#define NVC697_SET_BLEND_STATE_PER_TARGET_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_DEPTH_WRITE 0x12e8 +#define NVC697_SET_DEPTH_WRITE_ENABLE 0:0 +#define NVC697_SET_DEPTH_WRITE_ENABLE_FALSE 0x00000000 +#define NVC697_SET_DEPTH_WRITE_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_ALPHA_TEST 0x12ec +#define NVC697_SET_ALPHA_TEST_ENABLE 0:0 +#define NVC697_SET_ALPHA_TEST_ENABLE_FALSE 0x00000000 +#define NVC697_SET_ALPHA_TEST_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_INLINE_INDEX4X8_ALIGN 0x1300 +#define NVC697_SET_INLINE_INDEX4X8_ALIGN_COUNT 29:0 +#define NVC697_SET_INLINE_INDEX4X8_ALIGN_START 31:30 + +#define NVC697_DRAW_INLINE_INDEX4X8 0x1304 +#define NVC697_DRAW_INLINE_INDEX4X8_INDEX0 7:0 +#define NVC697_DRAW_INLINE_INDEX4X8_INDEX1 15:8 +#define NVC697_DRAW_INLINE_INDEX4X8_INDEX2 23:16 +#define NVC697_DRAW_INLINE_INDEX4X8_INDEX3 31:24 + +#define NVC697_D3D_SET_CULL_MODE 0x1308 +#define NVC697_D3D_SET_CULL_MODE_V 31:0 +#define NVC697_D3D_SET_CULL_MODE_V_NONE 0x00000001 +#define NVC697_D3D_SET_CULL_MODE_V_CW 0x00000002 +#define NVC697_D3D_SET_CULL_MODE_V_CCW 0x00000003 + +#define NVC697_SET_DEPTH_FUNC 0x130c +#define NVC697_SET_DEPTH_FUNC_V 31:0 +#define NVC697_SET_DEPTH_FUNC_V_OGL_NEVER 0x00000200 +#define NVC697_SET_DEPTH_FUNC_V_OGL_LESS 0x00000201 +#define NVC697_SET_DEPTH_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC697_SET_DEPTH_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC697_SET_DEPTH_FUNC_V_OGL_GREATER 0x00000204 +#define NVC697_SET_DEPTH_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC697_SET_DEPTH_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC697_SET_DEPTH_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC697_SET_DEPTH_FUNC_V_D3D_NEVER 0x00000001 +#define NVC697_SET_DEPTH_FUNC_V_D3D_LESS 0x00000002 +#define NVC697_SET_DEPTH_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC697_SET_DEPTH_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC697_SET_DEPTH_FUNC_V_D3D_GREATER 0x00000005 +#define NVC697_SET_DEPTH_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC697_SET_DEPTH_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC697_SET_DEPTH_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC697_SET_ALPHA_REF 0x1310 +#define NVC697_SET_ALPHA_REF_V 31:0 + +#define NVC697_SET_ALPHA_FUNC 0x1314 +#define NVC697_SET_ALPHA_FUNC_V 31:0 +#define NVC697_SET_ALPHA_FUNC_V_OGL_NEVER 0x00000200 +#define NVC697_SET_ALPHA_FUNC_V_OGL_LESS 0x00000201 +#define NVC697_SET_ALPHA_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC697_SET_ALPHA_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC697_SET_ALPHA_FUNC_V_OGL_GREATER 0x00000204 +#define NVC697_SET_ALPHA_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC697_SET_ALPHA_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC697_SET_ALPHA_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC697_SET_ALPHA_FUNC_V_D3D_NEVER 0x00000001 +#define NVC697_SET_ALPHA_FUNC_V_D3D_LESS 0x00000002 +#define NVC697_SET_ALPHA_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC697_SET_ALPHA_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC697_SET_ALPHA_FUNC_V_D3D_GREATER 0x00000005 +#define NVC697_SET_ALPHA_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC697_SET_ALPHA_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC697_SET_ALPHA_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC697_SET_DRAW_AUTO_STRIDE 0x1318 +#define NVC697_SET_DRAW_AUTO_STRIDE_V 11:0 + +#define NVC697_SET_BLEND_CONST_RED 0x131c +#define NVC697_SET_BLEND_CONST_RED_V 31:0 + +#define NVC697_SET_BLEND_CONST_GREEN 0x1320 +#define NVC697_SET_BLEND_CONST_GREEN_V 31:0 + +#define NVC697_SET_BLEND_CONST_BLUE 0x1324 +#define NVC697_SET_BLEND_CONST_BLUE_V 31:0 + +#define NVC697_SET_BLEND_CONST_ALPHA 0x1328 +#define NVC697_SET_BLEND_CONST_ALPHA_V 31:0 + +#define NVC697_INVALIDATE_SAMPLER_CACHE 0x1330 +#define NVC697_INVALIDATE_SAMPLER_CACHE_LINES 0:0 +#define NVC697_INVALIDATE_SAMPLER_CACHE_LINES_ALL 0x00000000 +#define NVC697_INVALIDATE_SAMPLER_CACHE_LINES_ONE 0x00000001 +#define NVC697_INVALIDATE_SAMPLER_CACHE_TAG 25:4 + +#define NVC697_INVALIDATE_TEXTURE_HEADER_CACHE 0x1334 +#define NVC697_INVALIDATE_TEXTURE_HEADER_CACHE_LINES 0:0 +#define NVC697_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ALL 0x00000000 +#define NVC697_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ONE 0x00000001 +#define NVC697_INVALIDATE_TEXTURE_HEADER_CACHE_TAG 25:4 + +#define NVC697_INVALIDATE_TEXTURE_DATA_CACHE 0x1338 +#define NVC697_INVALIDATE_TEXTURE_DATA_CACHE_LINES 0:0 +#define NVC697_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ALL 0x00000000 +#define NVC697_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ONE 0x00000001 +#define NVC697_INVALIDATE_TEXTURE_DATA_CACHE_TAG 25:4 + +#define NVC697_SET_BLEND_SEPARATE_FOR_ALPHA 0x133c +#define NVC697_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NVC697_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NVC697_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_BLEND_COLOR_OP 0x1340 +#define NVC697_SET_BLEND_COLOR_OP_V 31:0 +#define NVC697_SET_BLEND_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC697_SET_BLEND_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC697_SET_BLEND_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC697_SET_BLEND_COLOR_OP_V_OGL_MIN 0x00008007 +#define NVC697_SET_BLEND_COLOR_OP_V_OGL_MAX 0x00008008 +#define NVC697_SET_BLEND_COLOR_OP_V_D3D_ADD 0x00000001 +#define NVC697_SET_BLEND_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC697_SET_BLEND_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC697_SET_BLEND_COLOR_OP_V_D3D_MIN 0x00000004 +#define NVC697_SET_BLEND_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF 0x1344 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V 31:0 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC697_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC697_SET_BLEND_COLOR_DEST_COEFF 0x1348 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V 31:0 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC697_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC697_SET_BLEND_ALPHA_OP 0x134c +#define NVC697_SET_BLEND_ALPHA_OP_V 31:0 +#define NVC697_SET_BLEND_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC697_SET_BLEND_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC697_SET_BLEND_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC697_SET_BLEND_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NVC697_SET_BLEND_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NVC697_SET_BLEND_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NVC697_SET_BLEND_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC697_SET_BLEND_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC697_SET_BLEND_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NVC697_SET_BLEND_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF 0x1350 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V 31:0 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC697_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC697_SET_GLOBAL_COLOR_KEY 0x1354 +#define NVC697_SET_GLOBAL_COLOR_KEY_ENABLE 0:0 +#define NVC697_SET_GLOBAL_COLOR_KEY_ENABLE_FALSE 0x00000000 +#define NVC697_SET_GLOBAL_COLOR_KEY_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF 0x1358 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V 31:0 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC697_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC697_SET_SINGLE_ROP_CONTROL 0x135c +#define NVC697_SET_SINGLE_ROP_CONTROL_ENABLE 0:0 +#define NVC697_SET_SINGLE_ROP_CONTROL_ENABLE_FALSE 0x00000000 +#define NVC697_SET_SINGLE_ROP_CONTROL_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_BLEND(i) (0x1360+(i)*4) +#define NVC697_SET_BLEND_ENABLE 0:0 +#define NVC697_SET_BLEND_ENABLE_FALSE 0x00000000 +#define NVC697_SET_BLEND_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_STENCIL_TEST 0x1380 +#define NVC697_SET_STENCIL_TEST_ENABLE 0:0 +#define NVC697_SET_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NVC697_SET_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_STENCIL_OP_FAIL 0x1384 +#define NVC697_SET_STENCIL_OP_FAIL_V 31:0 +#define NVC697_SET_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NVC697_SET_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NVC697_SET_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NVC697_SET_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC697_SET_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC697_SET_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NVC697_SET_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NVC697_SET_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NVC697_SET_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NVC697_SET_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NVC697_SET_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NVC697_SET_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NVC697_SET_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NVC697_SET_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NVC697_SET_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NVC697_SET_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NVC697_SET_STENCIL_OP_ZFAIL 0x1388 +#define NVC697_SET_STENCIL_OP_ZFAIL_V 31:0 +#define NVC697_SET_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NVC697_SET_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NVC697_SET_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NVC697_SET_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC697_SET_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC697_SET_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NVC697_SET_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NVC697_SET_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NVC697_SET_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NVC697_SET_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NVC697_SET_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NVC697_SET_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NVC697_SET_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NVC697_SET_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NVC697_SET_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NVC697_SET_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NVC697_SET_STENCIL_OP_ZPASS 0x138c +#define NVC697_SET_STENCIL_OP_ZPASS_V 31:0 +#define NVC697_SET_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NVC697_SET_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NVC697_SET_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NVC697_SET_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NVC697_SET_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NVC697_SET_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NVC697_SET_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NVC697_SET_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NVC697_SET_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NVC697_SET_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NVC697_SET_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NVC697_SET_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NVC697_SET_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NVC697_SET_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NVC697_SET_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NVC697_SET_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NVC697_SET_STENCIL_FUNC 0x1390 +#define NVC697_SET_STENCIL_FUNC_V 31:0 +#define NVC697_SET_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NVC697_SET_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NVC697_SET_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC697_SET_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC697_SET_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NVC697_SET_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC697_SET_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC697_SET_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC697_SET_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NVC697_SET_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NVC697_SET_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC697_SET_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC697_SET_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NVC697_SET_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC697_SET_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC697_SET_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC697_SET_STENCIL_FUNC_REF 0x1394 +#define NVC697_SET_STENCIL_FUNC_REF_V 7:0 + +#define NVC697_SET_STENCIL_FUNC_MASK 0x1398 +#define NVC697_SET_STENCIL_FUNC_MASK_V 7:0 + +#define NVC697_SET_STENCIL_MASK 0x139c +#define NVC697_SET_STENCIL_MASK_V 7:0 + +#define NVC697_SET_DRAW_AUTO_START 0x13a4 +#define NVC697_SET_DRAW_AUTO_START_BYTE_COUNT 31:0 + +#define NVC697_SET_PS_SATURATE 0x13a8 +#define NVC697_SET_PS_SATURATE_OUTPUT0 0:0 +#define NVC697_SET_PS_SATURATE_OUTPUT0_FALSE 0x00000000 +#define NVC697_SET_PS_SATURATE_OUTPUT0_TRUE 0x00000001 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE0 1:1 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE0_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE0_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC697_SET_PS_SATURATE_OUTPUT1 4:4 +#define NVC697_SET_PS_SATURATE_OUTPUT1_FALSE 0x00000000 +#define NVC697_SET_PS_SATURATE_OUTPUT1_TRUE 0x00000001 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE1 5:5 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE1_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE1_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC697_SET_PS_SATURATE_OUTPUT2 8:8 +#define NVC697_SET_PS_SATURATE_OUTPUT2_FALSE 0x00000000 +#define NVC697_SET_PS_SATURATE_OUTPUT2_TRUE 0x00000001 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE2 9:9 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE2_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE2_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC697_SET_PS_SATURATE_OUTPUT3 12:12 +#define NVC697_SET_PS_SATURATE_OUTPUT3_FALSE 0x00000000 +#define NVC697_SET_PS_SATURATE_OUTPUT3_TRUE 0x00000001 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE3 13:13 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE3_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE3_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC697_SET_PS_SATURATE_OUTPUT4 16:16 +#define NVC697_SET_PS_SATURATE_OUTPUT4_FALSE 0x00000000 +#define NVC697_SET_PS_SATURATE_OUTPUT4_TRUE 0x00000001 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE4 17:17 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE4_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE4_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC697_SET_PS_SATURATE_OUTPUT5 20:20 +#define NVC697_SET_PS_SATURATE_OUTPUT5_FALSE 0x00000000 +#define NVC697_SET_PS_SATURATE_OUTPUT5_TRUE 0x00000001 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE5 21:21 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE5_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE5_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC697_SET_PS_SATURATE_OUTPUT6 24:24 +#define NVC697_SET_PS_SATURATE_OUTPUT6_FALSE 0x00000000 +#define NVC697_SET_PS_SATURATE_OUTPUT6_TRUE 0x00000001 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE6 25:25 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE6_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE6_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC697_SET_PS_SATURATE_OUTPUT7 28:28 +#define NVC697_SET_PS_SATURATE_OUTPUT7_FALSE 0x00000000 +#define NVC697_SET_PS_SATURATE_OUTPUT7_TRUE 0x00000001 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE7 29:29 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE7_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC697_SET_PS_SATURATE_CLAMP_RANGE7_MINUS_ONE_TO_PLUS_ONE 0x00000001 + +#define NVC697_SET_WINDOW_ORIGIN 0x13ac +#define NVC697_SET_WINDOW_ORIGIN_MODE 0:0 +#define NVC697_SET_WINDOW_ORIGIN_MODE_UPPER_LEFT 0x00000000 +#define NVC697_SET_WINDOW_ORIGIN_MODE_LOWER_LEFT 0x00000001 +#define NVC697_SET_WINDOW_ORIGIN_FLIP_Y 4:4 +#define NVC697_SET_WINDOW_ORIGIN_FLIP_Y_FALSE 0x00000000 +#define NVC697_SET_WINDOW_ORIGIN_FLIP_Y_TRUE 0x00000001 + +#define NVC697_SET_LINE_WIDTH_FLOAT 0x13b0 +#define NVC697_SET_LINE_WIDTH_FLOAT_V 31:0 + +#define NVC697_SET_ALIASED_LINE_WIDTH_FLOAT 0x13b4 +#define NVC697_SET_ALIASED_LINE_WIDTH_FLOAT_V 31:0 + +#define NVC697_SET_LINE_MULTISAMPLE_OVERRIDE 0x1418 +#define NVC697_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE 0:0 +#define NVC697_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_FALSE 0x00000000 +#define NVC697_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_TRUE 0x00000001 + +#define NVC697_INVALIDATE_SAMPLER_CACHE_NO_WFI 0x1424 +#define NVC697_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES 0:0 +#define NVC697_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC697_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC697_INVALIDATE_SAMPLER_CACHE_NO_WFI_TAG 25:4 + +#define NVC697_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI 0x1428 +#define NVC697_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES 0:0 +#define NVC697_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC697_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC697_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_TAG 25:4 + +#define NVC697_SET_GLOBAL_BASE_VERTEX_INDEX 0x1434 +#define NVC697_SET_GLOBAL_BASE_VERTEX_INDEX_V 31:0 + +#define NVC697_SET_GLOBAL_BASE_INSTANCE_INDEX 0x1438 +#define NVC697_SET_GLOBAL_BASE_INSTANCE_INDEX_V 31:0 + +#define NVC697_SET_PS_WARP_WATERMARKS 0x1450 +#define NVC697_SET_PS_WARP_WATERMARKS_LOW 15:0 +#define NVC697_SET_PS_WARP_WATERMARKS_HIGH 31:16 + +#define NVC697_SET_PS_REGISTER_WATERMARKS 0x1454 +#define NVC697_SET_PS_REGISTER_WATERMARKS_LOW 15:0 +#define NVC697_SET_PS_REGISTER_WATERMARKS_HIGH 31:16 + +#define NVC697_STORE_ZCULL 0x1464 +#define NVC697_STORE_ZCULL_V 0:0 + +#define NVC697_SET_ITERATED_BLEND_CONSTANT_RED(j) (0x1480+(j)*16) +#define NVC697_SET_ITERATED_BLEND_CONSTANT_RED_V 15:0 + +#define NVC697_SET_ITERATED_BLEND_CONSTANT_GREEN(j) (0x1484+(j)*16) +#define NVC697_SET_ITERATED_BLEND_CONSTANT_GREEN_V 15:0 + +#define NVC697_SET_ITERATED_BLEND_CONSTANT_BLUE(j) (0x1488+(j)*16) +#define NVC697_SET_ITERATED_BLEND_CONSTANT_BLUE_V 15:0 + +#define NVC697_LOAD_ZCULL 0x1500 +#define NVC697_LOAD_ZCULL_V 0:0 + +#define NVC697_SET_SURFACE_CLIP_ID_HEIGHT 0x1504 +#define NVC697_SET_SURFACE_CLIP_ID_HEIGHT_V 31:0 + +#define NVC697_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL 0x1508 +#define NVC697_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NVC697_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NVC697_SET_CLIP_ID_CLEAR_RECT_VERTICAL 0x150c +#define NVC697_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NVC697_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NVC697_SET_USER_CLIP_ENABLE 0x1510 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE0 0:0 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE0_FALSE 0x00000000 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE0_TRUE 0x00000001 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE1 1:1 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE1_FALSE 0x00000000 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE1_TRUE 0x00000001 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE2 2:2 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE2_FALSE 0x00000000 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE2_TRUE 0x00000001 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE3 3:3 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE3_FALSE 0x00000000 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE3_TRUE 0x00000001 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE4 4:4 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE4_FALSE 0x00000000 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE4_TRUE 0x00000001 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE5 5:5 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE5_FALSE 0x00000000 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE5_TRUE 0x00000001 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE6 6:6 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE6_FALSE 0x00000000 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE6_TRUE 0x00000001 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE7 7:7 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE7_FALSE 0x00000000 +#define NVC697_SET_USER_CLIP_ENABLE_PLANE7_TRUE 0x00000001 + +#define NVC697_SET_ZPASS_PIXEL_COUNT 0x1514 +#define NVC697_SET_ZPASS_PIXEL_COUNT_ENABLE 0:0 +#define NVC697_SET_ZPASS_PIXEL_COUNT_ENABLE_FALSE 0x00000000 +#define NVC697_SET_ZPASS_PIXEL_COUNT_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_POINT_SIZE 0x1518 +#define NVC697_SET_POINT_SIZE_V 31:0 + +#define NVC697_SET_ZCULL_STATS 0x151c +#define NVC697_SET_ZCULL_STATS_ENABLE 0:0 +#define NVC697_SET_ZCULL_STATS_ENABLE_FALSE 0x00000000 +#define NVC697_SET_ZCULL_STATS_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_POINT_SPRITE 0x1520 +#define NVC697_SET_POINT_SPRITE_ENABLE 0:0 +#define NVC697_SET_POINT_SPRITE_ENABLE_FALSE 0x00000000 +#define NVC697_SET_POINT_SPRITE_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_SHADER_EXCEPTIONS 0x1528 +#define NVC697_SET_SHADER_EXCEPTIONS_ENABLE 0:0 +#define NVC697_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0x00000000 +#define NVC697_SET_SHADER_EXCEPTIONS_ENABLE_TRUE 0x00000001 + +#define NVC697_CLEAR_REPORT_VALUE 0x1530 +#define NVC697_CLEAR_REPORT_VALUE_TYPE 4:0 +#define NVC697_CLEAR_REPORT_VALUE_TYPE_DA_VERTICES_GENERATED 0x00000012 +#define NVC697_CLEAR_REPORT_VALUE_TYPE_DA_PRIMITIVES_GENERATED 0x00000013 +#define NVC697_CLEAR_REPORT_VALUE_TYPE_VS_INVOCATIONS 0x00000015 +#define NVC697_CLEAR_REPORT_VALUE_TYPE_TI_INVOCATIONS 0x00000016 +#define NVC697_CLEAR_REPORT_VALUE_TYPE_TS_INVOCATIONS 0x00000017 +#define NVC697_CLEAR_REPORT_VALUE_TYPE_TS_PRIMITIVES_GENERATED 0x00000018 +#define NVC697_CLEAR_REPORT_VALUE_TYPE_GS_INVOCATIONS 0x0000001A +#define NVC697_CLEAR_REPORT_VALUE_TYPE_GS_PRIMITIVES_GENERATED 0x0000001B +#define NVC697_CLEAR_REPORT_VALUE_TYPE_VTG_PRIMITIVES_OUT 0x0000001F +#define NVC697_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_SUCCEEDED 0x00000010 +#define NVC697_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_NEEDED 0x00000011 +#define NVC697_CLEAR_REPORT_VALUE_TYPE_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000003 +#define NVC697_CLEAR_REPORT_VALUE_TYPE_CLIPPER_INVOCATIONS 0x0000001C +#define NVC697_CLEAR_REPORT_VALUE_TYPE_CLIPPER_PRIMITIVES_GENERATED 0x0000001D +#define NVC697_CLEAR_REPORT_VALUE_TYPE_ZCULL_STATS 0x00000002 +#define NVC697_CLEAR_REPORT_VALUE_TYPE_PS_INVOCATIONS 0x0000001E +#define NVC697_CLEAR_REPORT_VALUE_TYPE_ZPASS_PIXEL_CNT 0x00000001 +#define NVC697_CLEAR_REPORT_VALUE_TYPE_ALPHA_BETA_CLOCKS 0x00000004 +#define NVC697_CLEAR_REPORT_VALUE_TYPE_SCG_CLOCKS 0x00000009 + +#define NVC697_SET_ANTI_ALIAS_ENABLE 0x1534 +#define NVC697_SET_ANTI_ALIAS_ENABLE_V 0:0 +#define NVC697_SET_ANTI_ALIAS_ENABLE_V_FALSE 0x00000000 +#define NVC697_SET_ANTI_ALIAS_ENABLE_V_TRUE 0x00000001 + +#define NVC697_SET_ZT_SELECT 0x1538 +#define NVC697_SET_ZT_SELECT_TARGET_COUNT 0:0 + +#define NVC697_SET_ANTI_ALIAS_ALPHA_CONTROL 0x153c +#define NVC697_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE 0:0 +#define NVC697_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_DISABLE 0x00000000 +#define NVC697_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_ENABLE 0x00000001 +#define NVC697_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE 4:4 +#define NVC697_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_DISABLE 0x00000000 +#define NVC697_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_ENABLE 0x00000001 + +#define NVC697_SET_RENDER_ENABLE_A 0x1550 +#define NVC697_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC697_SET_RENDER_ENABLE_B 0x1554 +#define NVC697_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC697_SET_RENDER_ENABLE_C 0x1558 +#define NVC697_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC697_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC697_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC697_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC697_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC697_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC697_SET_TEX_SAMPLER_POOL_A 0x155c +#define NVC697_SET_TEX_SAMPLER_POOL_A_OFFSET_UPPER 7:0 + +#define NVC697_SET_TEX_SAMPLER_POOL_B 0x1560 +#define NVC697_SET_TEX_SAMPLER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC697_SET_TEX_SAMPLER_POOL_C 0x1564 +#define NVC697_SET_TEX_SAMPLER_POOL_C_MAXIMUM_INDEX 19:0 + +#define NVC697_SET_SLOPE_SCALE_DEPTH_BIAS 0x156c +#define NVC697_SET_SLOPE_SCALE_DEPTH_BIAS_V 31:0 + +#define NVC697_SET_ANTI_ALIASED_LINE 0x1570 +#define NVC697_SET_ANTI_ALIASED_LINE_ENABLE 0:0 +#define NVC697_SET_ANTI_ALIASED_LINE_ENABLE_FALSE 0x00000000 +#define NVC697_SET_ANTI_ALIASED_LINE_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_TEX_HEADER_POOL_A 0x1574 +#define NVC697_SET_TEX_HEADER_POOL_A_OFFSET_UPPER 7:0 + +#define NVC697_SET_TEX_HEADER_POOL_B 0x1578 +#define NVC697_SET_TEX_HEADER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC697_SET_TEX_HEADER_POOL_C 0x157c +#define NVC697_SET_TEX_HEADER_POOL_C_MAXIMUM_INDEX 21:0 + +#define NVC697_SET_ACTIVE_ZCULL_REGION 0x1590 +#define NVC697_SET_ACTIVE_ZCULL_REGION_ID 5:0 + +#define NVC697_SET_TWO_SIDED_STENCIL_TEST 0x1594 +#define NVC697_SET_TWO_SIDED_STENCIL_TEST_ENABLE 0:0 +#define NVC697_SET_TWO_SIDED_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NVC697_SET_TWO_SIDED_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_BACK_STENCIL_OP_FAIL 0x1598 +#define NVC697_SET_BACK_STENCIL_OP_FAIL_V 31:0 +#define NVC697_SET_BACK_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NVC697_SET_BACK_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NVC697_SET_BACK_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NVC697_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC697_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC697_SET_BACK_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NVC697_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NVC697_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NVC697_SET_BACK_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NVC697_SET_BACK_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NVC697_SET_BACK_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NVC697_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NVC697_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NVC697_SET_BACK_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NVC697_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NVC697_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NVC697_SET_BACK_STENCIL_OP_ZFAIL 0x159c +#define NVC697_SET_BACK_STENCIL_OP_ZFAIL_V 31:0 +#define NVC697_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NVC697_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NVC697_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NVC697_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC697_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC697_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NVC697_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NVC697_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NVC697_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NVC697_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NVC697_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NVC697_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NVC697_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NVC697_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NVC697_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NVC697_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NVC697_SET_BACK_STENCIL_OP_ZPASS 0x15a0 +#define NVC697_SET_BACK_STENCIL_OP_ZPASS_V 31:0 +#define NVC697_SET_BACK_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NVC697_SET_BACK_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NVC697_SET_BACK_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NVC697_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NVC697_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NVC697_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NVC697_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NVC697_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NVC697_SET_BACK_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NVC697_SET_BACK_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NVC697_SET_BACK_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NVC697_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NVC697_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NVC697_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NVC697_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NVC697_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NVC697_SET_BACK_STENCIL_FUNC 0x15a4 +#define NVC697_SET_BACK_STENCIL_FUNC_V 31:0 +#define NVC697_SET_BACK_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NVC697_SET_BACK_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NVC697_SET_BACK_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC697_SET_BACK_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC697_SET_BACK_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NVC697_SET_BACK_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC697_SET_BACK_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC697_SET_BACK_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC697_SET_BACK_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NVC697_SET_BACK_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NVC697_SET_BACK_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC697_SET_BACK_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC697_SET_BACK_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NVC697_SET_BACK_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC697_SET_BACK_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC697_SET_BACK_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC697_SET_SRGB_WRITE 0x15b8 +#define NVC697_SET_SRGB_WRITE_ENABLE 0:0 +#define NVC697_SET_SRGB_WRITE_ENABLE_FALSE 0x00000000 +#define NVC697_SET_SRGB_WRITE_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_DEPTH_BIAS 0x15bc +#define NVC697_SET_DEPTH_BIAS_V 31:0 + +#define NVC697_SET_ZCULL_REGION_FORMAT 0x15c8 +#define NVC697_SET_ZCULL_REGION_FORMAT_TYPE 3:0 +#define NVC697_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X4 0x00000000 +#define NVC697_SET_ZCULL_REGION_FORMAT_TYPE_ZS_4X4 0x00000001 +#define NVC697_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X2 0x00000002 +#define NVC697_SET_ZCULL_REGION_FORMAT_TYPE_Z_2X4 0x00000003 +#define NVC697_SET_ZCULL_REGION_FORMAT_TYPE_Z_16X8_4X4 0x00000004 +#define NVC697_SET_ZCULL_REGION_FORMAT_TYPE_Z_8X8_4X2 0x00000005 +#define NVC697_SET_ZCULL_REGION_FORMAT_TYPE_Z_8X8_2X4 0x00000006 +#define NVC697_SET_ZCULL_REGION_FORMAT_TYPE_Z_16X16_4X8 0x00000007 +#define NVC697_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X8_2X2 0x00000008 +#define NVC697_SET_ZCULL_REGION_FORMAT_TYPE_ZS_16X8_4X2 0x00000009 +#define NVC697_SET_ZCULL_REGION_FORMAT_TYPE_ZS_16X8_2X4 0x0000000A +#define NVC697_SET_ZCULL_REGION_FORMAT_TYPE_ZS_8X8_2X2 0x0000000B +#define NVC697_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X8_1X1 0x0000000C + +#define NVC697_SET_RT_LAYER 0x15cc +#define NVC697_SET_RT_LAYER_V 15:0 +#define NVC697_SET_RT_LAYER_CONTROL 16:16 +#define NVC697_SET_RT_LAYER_CONTROL_V_SELECTS_LAYER 0x00000000 +#define NVC697_SET_RT_LAYER_CONTROL_GEOMETRY_SHADER_SELECTS_LAYER 0x00000001 + +#define NVC697_SET_ANTI_ALIAS 0x15d0 +#define NVC697_SET_ANTI_ALIAS_SAMPLES 3:0 +#define NVC697_SET_ANTI_ALIAS_SAMPLES_MODE_1X1 0x00000000 +#define NVC697_SET_ANTI_ALIAS_SAMPLES_MODE_2X1 0x00000001 +#define NVC697_SET_ANTI_ALIAS_SAMPLES_MODE_2X2 0x00000002 +#define NVC697_SET_ANTI_ALIAS_SAMPLES_MODE_4X2 0x00000003 +#define NVC697_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_D3D 0x00000004 +#define NVC697_SET_ANTI_ALIAS_SAMPLES_MODE_2X1_D3D 0x00000005 +#define NVC697_SET_ANTI_ALIAS_SAMPLES_MODE_4X4 0x00000006 +#define NVC697_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_4 0x00000008 +#define NVC697_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_12 0x00000009 +#define NVC697_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_8 0x0000000A +#define NVC697_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_24 0x0000000B + +#define NVC697_SET_EDGE_FLAG 0x15e4 +#define NVC697_SET_EDGE_FLAG_V 0:0 +#define NVC697_SET_EDGE_FLAG_V_FALSE 0x00000000 +#define NVC697_SET_EDGE_FLAG_V_TRUE 0x00000001 + +#define NVC697_DRAW_INLINE_INDEX 0x15e8 +#define NVC697_DRAW_INLINE_INDEX_V 31:0 + +#define NVC697_SET_INLINE_INDEX2X16_ALIGN 0x15ec +#define NVC697_SET_INLINE_INDEX2X16_ALIGN_COUNT 30:0 +#define NVC697_SET_INLINE_INDEX2X16_ALIGN_START_ODD 31:31 +#define NVC697_SET_INLINE_INDEX2X16_ALIGN_START_ODD_FALSE 0x00000000 +#define NVC697_SET_INLINE_INDEX2X16_ALIGN_START_ODD_TRUE 0x00000001 + +#define NVC697_DRAW_INLINE_INDEX2X16 0x15f0 +#define NVC697_DRAW_INLINE_INDEX2X16_EVEN 15:0 +#define NVC697_DRAW_INLINE_INDEX2X16_ODD 31:16 + +#define NVC697_SET_VERTEX_GLOBAL_BASE_OFFSET_A 0x15f4 +#define NVC697_SET_VERTEX_GLOBAL_BASE_OFFSET_A_UPPER 7:0 + +#define NVC697_SET_VERTEX_GLOBAL_BASE_OFFSET_B 0x15f8 +#define NVC697_SET_VERTEX_GLOBAL_BASE_OFFSET_B_LOWER 31:0 + +#define NVC697_SET_ZCULL_REGION_PIXEL_OFFSET_A 0x15fc +#define NVC697_SET_ZCULL_REGION_PIXEL_OFFSET_A_WIDTH 15:0 + +#define NVC697_SET_ZCULL_REGION_PIXEL_OFFSET_B 0x1600 +#define NVC697_SET_ZCULL_REGION_PIXEL_OFFSET_B_HEIGHT 15:0 + +#define NVC697_SET_POINT_SPRITE_SELECT 0x1604 +#define NVC697_SET_POINT_SPRITE_SELECT_RMODE 1:0 +#define NVC697_SET_POINT_SPRITE_SELECT_RMODE_ZERO 0x00000000 +#define NVC697_SET_POINT_SPRITE_SELECT_RMODE_FROM_R 0x00000001 +#define NVC697_SET_POINT_SPRITE_SELECT_RMODE_FROM_S 0x00000002 +#define NVC697_SET_POINT_SPRITE_SELECT_ORIGIN 2:2 +#define NVC697_SET_POINT_SPRITE_SELECT_ORIGIN_BOTTOM 0x00000000 +#define NVC697_SET_POINT_SPRITE_SELECT_ORIGIN_TOP 0x00000001 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE0 3:3 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE0_PASSTHROUGH 0x00000000 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE0_GENERATE 0x00000001 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE1 4:4 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE1_PASSTHROUGH 0x00000000 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE1_GENERATE 0x00000001 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE2 5:5 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE2_PASSTHROUGH 0x00000000 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE2_GENERATE 0x00000001 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE3 6:6 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE3_PASSTHROUGH 0x00000000 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE3_GENERATE 0x00000001 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE4 7:7 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE4_PASSTHROUGH 0x00000000 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE4_GENERATE 0x00000001 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE5 8:8 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE5_PASSTHROUGH 0x00000000 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE5_GENERATE 0x00000001 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE6 9:9 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE6_PASSTHROUGH 0x00000000 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE6_GENERATE 0x00000001 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE7 10:10 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE7_PASSTHROUGH 0x00000000 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE7_GENERATE 0x00000001 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE8 11:11 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE8_PASSTHROUGH 0x00000000 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE8_GENERATE 0x00000001 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE9 12:12 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE9_PASSTHROUGH 0x00000000 +#define NVC697_SET_POINT_SPRITE_SELECT_TEXTURE9_GENERATE 0x00000001 + +#define NVC697_SET_ATTRIBUTE_DEFAULT 0x1610 +#define NVC697_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE 0:0 +#define NVC697_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_0001 0x00000000 +#define NVC697_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_1111 0x00000001 +#define NVC697_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR 1:1 +#define NVC697_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0000 0x00000000 +#define NVC697_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0001 0x00000001 +#define NVC697_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR 2:2 +#define NVC697_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0000 0x00000000 +#define NVC697_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0001 0x00000001 +#define NVC697_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE 3:3 +#define NVC697_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0000 0x00000000 +#define NVC697_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0001 0x00000001 +#define NVC697_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0 4:4 +#define NVC697_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_0001 0x00000000 +#define NVC697_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_1111 0x00000001 +#define NVC697_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15 5:5 +#define NVC697_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0000 0x00000000 +#define NVC697_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0001 0x00000001 + +#define NVC697_END 0x1614 +#define NVC697_END_V 0:0 + +#define NVC697_BEGIN 0x1618 +#define NVC697_BEGIN_OP 15:0 +#define NVC697_BEGIN_OP_POINTS 0x00000000 +#define NVC697_BEGIN_OP_LINES 0x00000001 +#define NVC697_BEGIN_OP_LINE_LOOP 0x00000002 +#define NVC697_BEGIN_OP_LINE_STRIP 0x00000003 +#define NVC697_BEGIN_OP_TRIANGLES 0x00000004 +#define NVC697_BEGIN_OP_TRIANGLE_STRIP 0x00000005 +#define NVC697_BEGIN_OP_TRIANGLE_FAN 0x00000006 +#define NVC697_BEGIN_OP_QUADS 0x00000007 +#define NVC697_BEGIN_OP_QUAD_STRIP 0x00000008 +#define NVC697_BEGIN_OP_POLYGON 0x00000009 +#define NVC697_BEGIN_OP_LINELIST_ADJCY 0x0000000A +#define NVC697_BEGIN_OP_LINESTRIP_ADJCY 0x0000000B +#define NVC697_BEGIN_OP_TRIANGLELIST_ADJCY 0x0000000C +#define NVC697_BEGIN_OP_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC697_BEGIN_OP_PATCH 0x0000000E +#define NVC697_BEGIN_PRIMITIVE_ID 24:24 +#define NVC697_BEGIN_PRIMITIVE_ID_FIRST 0x00000000 +#define NVC697_BEGIN_PRIMITIVE_ID_UNCHANGED 0x00000001 +#define NVC697_BEGIN_INSTANCE_ID 27:26 +#define NVC697_BEGIN_INSTANCE_ID_FIRST 0x00000000 +#define NVC697_BEGIN_INSTANCE_ID_SUBSEQUENT 0x00000001 +#define NVC697_BEGIN_INSTANCE_ID_UNCHANGED 0x00000002 +#define NVC697_BEGIN_SPLIT_MODE 30:29 +#define NVC697_BEGIN_SPLIT_MODE_NORMAL_BEGIN_NORMAL_END 0x00000000 +#define NVC697_BEGIN_SPLIT_MODE_NORMAL_BEGIN_OPEN_END 0x00000001 +#define NVC697_BEGIN_SPLIT_MODE_OPEN_BEGIN_OPEN_END 0x00000002 +#define NVC697_BEGIN_SPLIT_MODE_OPEN_BEGIN_NORMAL_END 0x00000003 +#define NVC697_BEGIN_INSTANCE_ITERATE_ENABLE 31:31 +#define NVC697_BEGIN_INSTANCE_ITERATE_ENABLE_FALSE 0x00000000 +#define NVC697_BEGIN_INSTANCE_ITERATE_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_VERTEX_ID_COPY 0x161c +#define NVC697_SET_VERTEX_ID_COPY_ENABLE 0:0 +#define NVC697_SET_VERTEX_ID_COPY_ENABLE_FALSE 0x00000000 +#define NVC697_SET_VERTEX_ID_COPY_ENABLE_TRUE 0x00000001 +#define NVC697_SET_VERTEX_ID_COPY_ATTRIBUTE_SLOT 11:4 + +#define NVC697_ADD_TO_PRIMITIVE_ID 0x1620 +#define NVC697_ADD_TO_PRIMITIVE_ID_V 31:0 + +#define NVC697_LOAD_PRIMITIVE_ID 0x1624 +#define NVC697_LOAD_PRIMITIVE_ID_V 31:0 + +#define NVC697_SET_SHADER_BASED_CULL 0x162c +#define NVC697_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE 1:1 +#define NVC697_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_FALSE 0x00000000 +#define NVC697_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_TRUE 0x00000001 +#define NVC697_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE 0:0 +#define NVC697_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_FALSE 0x00000000 +#define NVC697_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_CLASS_VERSION 0x1638 +#define NVC697_SET_CLASS_VERSION_CURRENT 15:0 +#define NVC697_SET_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC697_SET_DA_PRIMITIVE_RESTART 0x1644 +#define NVC697_SET_DA_PRIMITIVE_RESTART_ENABLE 0:0 +#define NVC697_SET_DA_PRIMITIVE_RESTART_ENABLE_FALSE 0x00000000 +#define NVC697_SET_DA_PRIMITIVE_RESTART_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_DA_PRIMITIVE_RESTART_INDEX 0x1648 +#define NVC697_SET_DA_PRIMITIVE_RESTART_INDEX_V 31:0 + +#define NVC697_SET_DA_OUTPUT 0x164c +#define NVC697_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START 12:12 +#define NVC697_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_FALSE 0x00000000 +#define NVC697_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_TRUE 0x00000001 + +#define NVC697_SET_ANTI_ALIASED_POINT 0x1658 +#define NVC697_SET_ANTI_ALIASED_POINT_ENABLE 0:0 +#define NVC697_SET_ANTI_ALIASED_POINT_ENABLE_FALSE 0x00000000 +#define NVC697_SET_ANTI_ALIASED_POINT_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_POINT_CENTER_MODE 0x165c +#define NVC697_SET_POINT_CENTER_MODE_V 31:0 +#define NVC697_SET_POINT_CENTER_MODE_V_OGL 0x00000000 +#define NVC697_SET_POINT_CENTER_MODE_V_D3D 0x00000001 + +#define NVC697_SET_LINE_SMOOTH_PARAMETERS 0x1668 +#define NVC697_SET_LINE_SMOOTH_PARAMETERS_FALLOFF 31:0 +#define NVC697_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_00 0x00000000 +#define NVC697_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_33 0x00000001 +#define NVC697_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_60 0x00000002 + +#define NVC697_SET_LINE_STIPPLE 0x166c +#define NVC697_SET_LINE_STIPPLE_ENABLE 0:0 +#define NVC697_SET_LINE_STIPPLE_ENABLE_FALSE 0x00000000 +#define NVC697_SET_LINE_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_LINE_SMOOTH_EDGE_TABLE(i) (0x1670+(i)*4) +#define NVC697_SET_LINE_SMOOTH_EDGE_TABLE_V0 7:0 +#define NVC697_SET_LINE_SMOOTH_EDGE_TABLE_V1 15:8 +#define NVC697_SET_LINE_SMOOTH_EDGE_TABLE_V2 23:16 +#define NVC697_SET_LINE_SMOOTH_EDGE_TABLE_V3 31:24 + +#define NVC697_SET_LINE_STIPPLE_PARAMETERS 0x1680 +#define NVC697_SET_LINE_STIPPLE_PARAMETERS_FACTOR 7:0 +#define NVC697_SET_LINE_STIPPLE_PARAMETERS_PATTERN 23:8 + +#define NVC697_SET_PROVOKING_VERTEX 0x1684 +#define NVC697_SET_PROVOKING_VERTEX_V 0:0 +#define NVC697_SET_PROVOKING_VERTEX_V_FIRST 0x00000000 +#define NVC697_SET_PROVOKING_VERTEX_V_LAST 0x00000001 + +#define NVC697_SET_TWO_SIDED_LIGHT 0x1688 +#define NVC697_SET_TWO_SIDED_LIGHT_ENABLE 0:0 +#define NVC697_SET_TWO_SIDED_LIGHT_ENABLE_FALSE 0x00000000 +#define NVC697_SET_TWO_SIDED_LIGHT_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_POLYGON_STIPPLE 0x168c +#define NVC697_SET_POLYGON_STIPPLE_ENABLE 0:0 +#define NVC697_SET_POLYGON_STIPPLE_ENABLE_FALSE 0x00000000 +#define NVC697_SET_POLYGON_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_SHADER_CONTROL 0x1690 +#define NVC697_SET_SHADER_CONTROL_DEFAULT_PARTIAL 0:0 +#define NVC697_SET_SHADER_CONTROL_DEFAULT_PARTIAL_ZERO 0x00000000 +#define NVC697_SET_SHADER_CONTROL_DEFAULT_PARTIAL_INFINITY 0x00000001 +#define NVC697_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR 1:1 +#define NVC697_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR_LEGACY 0x00000000 +#define NVC697_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR_FP64_COMPATIBLE 0x00000001 +#define NVC697_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR 2:2 +#define NVC697_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR_PASS_ZERO 0x00000000 +#define NVC697_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR_PASS_INDEFINITE 0x00000001 + +#define NVC697_CHECK_CLASS_VERSION 0x16a0 +#define NVC697_CHECK_CLASS_VERSION_CURRENT 15:0 +#define NVC697_CHECK_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC697_SET_SPH_VERSION 0x16a4 +#define NVC697_SET_SPH_VERSION_CURRENT 15:0 +#define NVC697_SET_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC697_CHECK_SPH_VERSION 0x16a8 +#define NVC697_CHECK_SPH_VERSION_CURRENT 15:0 +#define NVC697_CHECK_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC697_SET_ALPHA_TO_COVERAGE_OVERRIDE 0x16b4 +#define NVC697_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE 0:0 +#define NVC697_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NVC697_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 +#define NVC697_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT 1:1 +#define NVC697_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_DISABLE 0x00000000 +#define NVC697_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_ENABLE 0x00000001 + +#define NVC697_SET_SCG_GRAPHICS_PRIORITY 0x16bc +#define NVC697_SET_SCG_GRAPHICS_PRIORITY_PRIORITY 5:0 + +#define NVC697_SET_SCG_GRAPHICS_SCHEDULING_PARAMETERS(i) (0x16c0+(i)*4) +#define NVC697_SET_SCG_GRAPHICS_SCHEDULING_PARAMETERS_V 31:0 + +#define NVC697_SET_POLYGON_STIPPLE_PATTERN(i) (0x1700+(i)*4) +#define NVC697_SET_POLYGON_STIPPLE_PATTERN_V 31:0 + +#define NVC697_SET_AAM_VERSION 0x1790 +#define NVC697_SET_AAM_VERSION_CURRENT 15:0 +#define NVC697_SET_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC697_CHECK_AAM_VERSION 0x1794 +#define NVC697_CHECK_AAM_VERSION_CURRENT 15:0 +#define NVC697_CHECK_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC697_SET_ZT_LAYER 0x179c +#define NVC697_SET_ZT_LAYER_OFFSET 15:0 + +#define NVC697_SET_INDEX_BUFFER_A 0x17c8 +#define NVC697_SET_INDEX_BUFFER_A_ADDRESS_UPPER 7:0 + +#define NVC697_SET_INDEX_BUFFER_B 0x17cc +#define NVC697_SET_INDEX_BUFFER_B_ADDRESS_LOWER 31:0 + +#define NVC697_SET_INDEX_BUFFER_E 0x17d8 +#define NVC697_SET_INDEX_BUFFER_E_INDEX_SIZE 1:0 +#define NVC697_SET_INDEX_BUFFER_E_INDEX_SIZE_ONE_BYTE 0x00000000 +#define NVC697_SET_INDEX_BUFFER_E_INDEX_SIZE_TWO_BYTES 0x00000001 +#define NVC697_SET_INDEX_BUFFER_E_INDEX_SIZE_FOUR_BYTES 0x00000002 + +#define NVC697_SET_INDEX_BUFFER_F 0x17dc +#define NVC697_SET_INDEX_BUFFER_F_FIRST 31:0 + +#define NVC697_DRAW_INDEX_BUFFER 0x17e0 +#define NVC697_DRAW_INDEX_BUFFER_COUNT 31:0 + +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST 0x17e4 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST 0x17e8 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST 0x17ec +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f0 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC697_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f4 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC697_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f8 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC697_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC697_SET_DEPTH_BIAS_CLAMP 0x187c +#define NVC697_SET_DEPTH_BIAS_CLAMP_V 31:0 + +#define NVC697_SET_VERTEX_STREAM_INSTANCE_A(i) (0x1880+(i)*4) +#define NVC697_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED 0:0 +#define NVC697_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_FALSE 0x00000000 +#define NVC697_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_TRUE 0x00000001 + +#define NVC697_SET_VERTEX_STREAM_INSTANCE_B(i) (0x18c0+(i)*4) +#define NVC697_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED 0:0 +#define NVC697_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_FALSE 0x00000000 +#define NVC697_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_TRUE 0x00000001 + +#define NVC697_SET_ATTRIBUTE_POINT_SIZE 0x1910 +#define NVC697_SET_ATTRIBUTE_POINT_SIZE_ENABLE 0:0 +#define NVC697_SET_ATTRIBUTE_POINT_SIZE_ENABLE_FALSE 0x00000000 +#define NVC697_SET_ATTRIBUTE_POINT_SIZE_ENABLE_TRUE 0x00000001 +#define NVC697_SET_ATTRIBUTE_POINT_SIZE_SLOT 11:4 + +#define NVC697_OGL_SET_CULL 0x1918 +#define NVC697_OGL_SET_CULL_ENABLE 0:0 +#define NVC697_OGL_SET_CULL_ENABLE_FALSE 0x00000000 +#define NVC697_OGL_SET_CULL_ENABLE_TRUE 0x00000001 + +#define NVC697_OGL_SET_FRONT_FACE 0x191c +#define NVC697_OGL_SET_FRONT_FACE_V 31:0 +#define NVC697_OGL_SET_FRONT_FACE_V_CW 0x00000900 +#define NVC697_OGL_SET_FRONT_FACE_V_CCW 0x00000901 + +#define NVC697_OGL_SET_CULL_FACE 0x1920 +#define NVC697_OGL_SET_CULL_FACE_V 31:0 +#define NVC697_OGL_SET_CULL_FACE_V_FRONT 0x00000404 +#define NVC697_OGL_SET_CULL_FACE_V_BACK 0x00000405 +#define NVC697_OGL_SET_CULL_FACE_V_FRONT_AND_BACK 0x00000408 + +#define NVC697_SET_VIEWPORT_PIXEL 0x1924 +#define NVC697_SET_VIEWPORT_PIXEL_CENTER 0:0 +#define NVC697_SET_VIEWPORT_PIXEL_CENTER_AT_HALF_INTEGERS 0x00000000 +#define NVC697_SET_VIEWPORT_PIXEL_CENTER_AT_INTEGERS 0x00000001 + +#define NVC697_SET_VIEWPORT_SCALE_OFFSET 0x192c +#define NVC697_SET_VIEWPORT_SCALE_OFFSET_ENABLE 0:0 +#define NVC697_SET_VIEWPORT_SCALE_OFFSET_ENABLE_FALSE 0x00000000 +#define NVC697_SET_VIEWPORT_SCALE_OFFSET_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_VIEWPORT_CLIP_CONTROL 0x193c +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE 0:0 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_FALSE 0x00000000 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_TRUE 0x00000001 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE 17:16 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_USE_FIELD_MIN_Z_ZERO_MAX_Z_ONE 0x00000000 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_MIN_Z_MAX_Z 0x00000001 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_ZERO_ONE 0x00000002 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_MINUS_INF_PLUS_INF 0x00000003 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z 3:3 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLIP 0x00000000 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLAMP 0x00000001 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z 4:4 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLIP 0x00000000 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLAMP 0x00000001 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND 7:7 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_256 0x00000000 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_1 0x00000001 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND 10:10 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_256 0x00000000 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_1 0x00000001 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP 13:11 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP 0x00000000 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_PASSTHRU 0x00000001 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XY_CLIP 0x00000002 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XYZ_CLIP 0x00000003 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP_NO_Z_CULL 0x00000004 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_Z_CLIP 0x00000005 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_TRI_FILL_OR_CLIP 0x00000006 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z 2:1 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SAME_AS_XY_GUARDBAND 0x00000000 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_256 0x00000001 +#define NVC697_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_1 0x00000002 + +#define NVC697_SET_USER_CLIP_OP 0x1940 +#define NVC697_SET_USER_CLIP_OP_PLANE0 0:0 +#define NVC697_SET_USER_CLIP_OP_PLANE0_CLIP 0x00000000 +#define NVC697_SET_USER_CLIP_OP_PLANE0_CULL 0x00000001 +#define NVC697_SET_USER_CLIP_OP_PLANE1 4:4 +#define NVC697_SET_USER_CLIP_OP_PLANE1_CLIP 0x00000000 +#define NVC697_SET_USER_CLIP_OP_PLANE1_CULL 0x00000001 +#define NVC697_SET_USER_CLIP_OP_PLANE2 8:8 +#define NVC697_SET_USER_CLIP_OP_PLANE2_CLIP 0x00000000 +#define NVC697_SET_USER_CLIP_OP_PLANE2_CULL 0x00000001 +#define NVC697_SET_USER_CLIP_OP_PLANE3 12:12 +#define NVC697_SET_USER_CLIP_OP_PLANE3_CLIP 0x00000000 +#define NVC697_SET_USER_CLIP_OP_PLANE3_CULL 0x00000001 +#define NVC697_SET_USER_CLIP_OP_PLANE4 16:16 +#define NVC697_SET_USER_CLIP_OP_PLANE4_CLIP 0x00000000 +#define NVC697_SET_USER_CLIP_OP_PLANE4_CULL 0x00000001 +#define NVC697_SET_USER_CLIP_OP_PLANE5 20:20 +#define NVC697_SET_USER_CLIP_OP_PLANE5_CLIP 0x00000000 +#define NVC697_SET_USER_CLIP_OP_PLANE5_CULL 0x00000001 +#define NVC697_SET_USER_CLIP_OP_PLANE6 24:24 +#define NVC697_SET_USER_CLIP_OP_PLANE6_CLIP 0x00000000 +#define NVC697_SET_USER_CLIP_OP_PLANE6_CULL 0x00000001 +#define NVC697_SET_USER_CLIP_OP_PLANE7 28:28 +#define NVC697_SET_USER_CLIP_OP_PLANE7_CLIP 0x00000000 +#define NVC697_SET_USER_CLIP_OP_PLANE7_CULL 0x00000001 + +#define NVC697_SET_RENDER_ENABLE_OVERRIDE 0x1944 +#define NVC697_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NVC697_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NVC697_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NVC697_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NVC697_SET_PRIMITIVE_TOPOLOGY_CONTROL 0x1948 +#define NVC697_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE 0:0 +#define NVC697_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_TOPOLOGY_IN_BEGIN_METHODS 0x00000000 +#define NVC697_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_SEPARATE_TOPOLOGY_STATE 0x00000001 + +#define NVC697_SET_WINDOW_CLIP_ENABLE 0x194c +#define NVC697_SET_WINDOW_CLIP_ENABLE_V 0:0 +#define NVC697_SET_WINDOW_CLIP_ENABLE_V_FALSE 0x00000000 +#define NVC697_SET_WINDOW_CLIP_ENABLE_V_TRUE 0x00000001 + +#define NVC697_SET_WINDOW_CLIP_TYPE 0x1950 +#define NVC697_SET_WINDOW_CLIP_TYPE_V 1:0 +#define NVC697_SET_WINDOW_CLIP_TYPE_V_INCLUSIVE 0x00000000 +#define NVC697_SET_WINDOW_CLIP_TYPE_V_EXCLUSIVE 0x00000001 +#define NVC697_SET_WINDOW_CLIP_TYPE_V_CLIPALL 0x00000002 + +#define NVC697_INVALIDATE_ZCULL 0x1958 +#define NVC697_INVALIDATE_ZCULL_V 31:0 +#define NVC697_INVALIDATE_ZCULL_V_INVALIDATE 0x00000000 + +#define NVC697_SET_ZCULL 0x1968 +#define NVC697_SET_ZCULL_Z_ENABLE 0:0 +#define NVC697_SET_ZCULL_Z_ENABLE_FALSE 0x00000000 +#define NVC697_SET_ZCULL_Z_ENABLE_TRUE 0x00000001 +#define NVC697_SET_ZCULL_STENCIL_ENABLE 4:4 +#define NVC697_SET_ZCULL_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC697_SET_ZCULL_STENCIL_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_ZCULL_BOUNDS 0x196c +#define NVC697_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE 0:0 +#define NVC697_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NVC697_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_TRUE 0x00000001 +#define NVC697_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE 4:4 +#define NVC697_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NVC697_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_PRIMITIVE_TOPOLOGY 0x1970 +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V 15:0 +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_POINTLIST 0x00000001 +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_LINELIST 0x00000002 +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP 0x00000003 +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST 0x00000004 +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP 0x00000005 +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_LINELIST_ADJCY 0x0000000A +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP_ADJCY 0x0000000B +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST_ADJCY 0x0000000C +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_PATCHLIST 0x0000000E +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_POINTS 0x00001001 +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST 0x00001002 +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST 0x00001003 +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST 0x0000100F +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINESTRIP 0x00001010 +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINESTRIP 0x00001011 +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLELIST 0x00001012 +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLESTRIP 0x00001013 +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLESTRIP 0x00001014 +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN 0x00001015 +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLEFAN 0x00001016 +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN_IMM 0x00001017 +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST_IMM 0x00001018 +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST2 0x0000101A +#define NVC697_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST2 0x0000101B + +#define NVC697_ZCULL_SYNC 0x1978 +#define NVC697_ZCULL_SYNC_V 31:0 + +#define NVC697_SET_CLIP_ID_TEST 0x197c +#define NVC697_SET_CLIP_ID_TEST_ENABLE 0:0 +#define NVC697_SET_CLIP_ID_TEST_ENABLE_FALSE 0x00000000 +#define NVC697_SET_CLIP_ID_TEST_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_SURFACE_CLIP_ID_WIDTH 0x1980 +#define NVC697_SET_SURFACE_CLIP_ID_WIDTH_V 31:0 + +#define NVC697_SET_CLIP_ID 0x1984 +#define NVC697_SET_CLIP_ID_V 31:0 + +#define NVC697_SET_DEPTH_BOUNDS_TEST 0x19bc +#define NVC697_SET_DEPTH_BOUNDS_TEST_ENABLE 0:0 +#define NVC697_SET_DEPTH_BOUNDS_TEST_ENABLE_FALSE 0x00000000 +#define NVC697_SET_DEPTH_BOUNDS_TEST_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_BLEND_FLOAT_OPTION 0x19c0 +#define NVC697_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO 0:0 +#define NVC697_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_FALSE 0x00000000 +#define NVC697_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_TRUE 0x00000001 + +#define NVC697_SET_LOGIC_OP 0x19c4 +#define NVC697_SET_LOGIC_OP_ENABLE 0:0 +#define NVC697_SET_LOGIC_OP_ENABLE_FALSE 0x00000000 +#define NVC697_SET_LOGIC_OP_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_LOGIC_OP_FUNC 0x19c8 +#define NVC697_SET_LOGIC_OP_FUNC_V 31:0 +#define NVC697_SET_LOGIC_OP_FUNC_V_CLEAR 0x00001500 +#define NVC697_SET_LOGIC_OP_FUNC_V_AND 0x00001501 +#define NVC697_SET_LOGIC_OP_FUNC_V_AND_REVERSE 0x00001502 +#define NVC697_SET_LOGIC_OP_FUNC_V_COPY 0x00001503 +#define NVC697_SET_LOGIC_OP_FUNC_V_AND_INVERTED 0x00001504 +#define NVC697_SET_LOGIC_OP_FUNC_V_NOOP 0x00001505 +#define NVC697_SET_LOGIC_OP_FUNC_V_XOR 0x00001506 +#define NVC697_SET_LOGIC_OP_FUNC_V_OR 0x00001507 +#define NVC697_SET_LOGIC_OP_FUNC_V_NOR 0x00001508 +#define NVC697_SET_LOGIC_OP_FUNC_V_EQUIV 0x00001509 +#define NVC697_SET_LOGIC_OP_FUNC_V_INVERT 0x0000150A +#define NVC697_SET_LOGIC_OP_FUNC_V_OR_REVERSE 0x0000150B +#define NVC697_SET_LOGIC_OP_FUNC_V_COPY_INVERTED 0x0000150C +#define NVC697_SET_LOGIC_OP_FUNC_V_OR_INVERTED 0x0000150D +#define NVC697_SET_LOGIC_OP_FUNC_V_NAND 0x0000150E +#define NVC697_SET_LOGIC_OP_FUNC_V_SET 0x0000150F + +#define NVC697_SET_Z_COMPRESSION 0x19cc +#define NVC697_SET_Z_COMPRESSION_ENABLE 0:0 +#define NVC697_SET_Z_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVC697_SET_Z_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVC697_CLEAR_SURFACE 0x19d0 +#define NVC697_CLEAR_SURFACE_Z_ENABLE 0:0 +#define NVC697_CLEAR_SURFACE_Z_ENABLE_FALSE 0x00000000 +#define NVC697_CLEAR_SURFACE_Z_ENABLE_TRUE 0x00000001 +#define NVC697_CLEAR_SURFACE_STENCIL_ENABLE 1:1 +#define NVC697_CLEAR_SURFACE_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC697_CLEAR_SURFACE_STENCIL_ENABLE_TRUE 0x00000001 +#define NVC697_CLEAR_SURFACE_R_ENABLE 2:2 +#define NVC697_CLEAR_SURFACE_R_ENABLE_FALSE 0x00000000 +#define NVC697_CLEAR_SURFACE_R_ENABLE_TRUE 0x00000001 +#define NVC697_CLEAR_SURFACE_G_ENABLE 3:3 +#define NVC697_CLEAR_SURFACE_G_ENABLE_FALSE 0x00000000 +#define NVC697_CLEAR_SURFACE_G_ENABLE_TRUE 0x00000001 +#define NVC697_CLEAR_SURFACE_B_ENABLE 4:4 +#define NVC697_CLEAR_SURFACE_B_ENABLE_FALSE 0x00000000 +#define NVC697_CLEAR_SURFACE_B_ENABLE_TRUE 0x00000001 +#define NVC697_CLEAR_SURFACE_A_ENABLE 5:5 +#define NVC697_CLEAR_SURFACE_A_ENABLE_FALSE 0x00000000 +#define NVC697_CLEAR_SURFACE_A_ENABLE_TRUE 0x00000001 +#define NVC697_CLEAR_SURFACE_MRT_SELECT 9:6 +#define NVC697_CLEAR_SURFACE_RT_ARRAY_INDEX 25:10 + +#define NVC697_CLEAR_CLIP_ID_SURFACE 0x19d4 +#define NVC697_CLEAR_CLIP_ID_SURFACE_V 31:0 + +#define NVC697_SET_COLOR_COMPRESSION(i) (0x19e0+(i)*4) +#define NVC697_SET_COLOR_COMPRESSION_ENABLE 0:0 +#define NVC697_SET_COLOR_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVC697_SET_COLOR_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_CT_WRITE(i) (0x1a00+(i)*4) +#define NVC697_SET_CT_WRITE_R_ENABLE 0:0 +#define NVC697_SET_CT_WRITE_R_ENABLE_FALSE 0x00000000 +#define NVC697_SET_CT_WRITE_R_ENABLE_TRUE 0x00000001 +#define NVC697_SET_CT_WRITE_G_ENABLE 4:4 +#define NVC697_SET_CT_WRITE_G_ENABLE_FALSE 0x00000000 +#define NVC697_SET_CT_WRITE_G_ENABLE_TRUE 0x00000001 +#define NVC697_SET_CT_WRITE_B_ENABLE 8:8 +#define NVC697_SET_CT_WRITE_B_ENABLE_FALSE 0x00000000 +#define NVC697_SET_CT_WRITE_B_ENABLE_TRUE 0x00000001 +#define NVC697_SET_CT_WRITE_A_ENABLE 12:12 +#define NVC697_SET_CT_WRITE_A_ENABLE_FALSE 0x00000000 +#define NVC697_SET_CT_WRITE_A_ENABLE_TRUE 0x00000001 + +#define NVC697_PIPE_NOP 0x1a2c +#define NVC697_PIPE_NOP_V 31:0 + +#define NVC697_SET_SPARE00 0x1a30 +#define NVC697_SET_SPARE00_V 31:0 + +#define NVC697_SET_SPARE01 0x1a34 +#define NVC697_SET_SPARE01_V 31:0 + +#define NVC697_SET_SPARE02 0x1a38 +#define NVC697_SET_SPARE02_V 31:0 + +#define NVC697_SET_SPARE03 0x1a3c +#define NVC697_SET_SPARE03_V 31:0 + +#define NVC697_SET_REPORT_SEMAPHORE_A 0x1b00 +#define NVC697_SET_REPORT_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC697_SET_REPORT_SEMAPHORE_B 0x1b04 +#define NVC697_SET_REPORT_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC697_SET_REPORT_SEMAPHORE_C 0x1b08 +#define NVC697_SET_REPORT_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC697_SET_REPORT_SEMAPHORE_D 0x1b0c +#define NVC697_SET_REPORT_SEMAPHORE_D_OPERATION 1:0 +#define NVC697_SET_REPORT_SEMAPHORE_D_OPERATION_RELEASE 0x00000000 +#define NVC697_SET_REPORT_SEMAPHORE_D_OPERATION_ACQUIRE 0x00000001 +#define NVC697_SET_REPORT_SEMAPHORE_D_OPERATION_REPORT_ONLY 0x00000002 +#define NVC697_SET_REPORT_SEMAPHORE_D_OPERATION_TRAP 0x00000003 +#define NVC697_SET_REPORT_SEMAPHORE_D_RELEASE 4:4 +#define NVC697_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_READS_COMPLETE 0x00000000 +#define NVC697_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_WRITES_COMPLETE 0x00000001 +#define NVC697_SET_REPORT_SEMAPHORE_D_ACQUIRE 8:8 +#define NVC697_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_WRITES_START 0x00000000 +#define NVC697_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_READS_START 0x00000001 +#define NVC697_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION 15:12 +#define NVC697_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_NONE 0x00000000 +#define NVC697_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DATA_ASSEMBLER 0x00000001 +#define NVC697_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VERTEX_SHADER 0x00000002 +#define NVC697_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_INIT_SHADER 0x00000008 +#define NVC697_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_SHADER 0x00000009 +#define NVC697_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_GEOMETRY_SHADER 0x00000006 +#define NVC697_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_STREAMING_OUTPUT 0x00000005 +#define NVC697_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VPC 0x00000004 +#define NVC697_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ZCULL 0x00000007 +#define NVC697_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_PIXEL_SHADER 0x0000000A +#define NVC697_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DEPTH_TEST 0x0000000C +#define NVC697_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ALL 0x0000000F +#define NVC697_SET_REPORT_SEMAPHORE_D_COMPARISON 16:16 +#define NVC697_SET_REPORT_SEMAPHORE_D_COMPARISON_EQ 0x00000000 +#define NVC697_SET_REPORT_SEMAPHORE_D_COMPARISON_GE 0x00000001 +#define NVC697_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE 20:20 +#define NVC697_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVC697_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT 27:23 +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_NONE 0x00000000 +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_DA_VERTICES_GENERATED 0x00000001 +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_DA_PRIMITIVES_GENERATED 0x00000003 +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_VS_INVOCATIONS 0x00000005 +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_TI_INVOCATIONS 0x0000001B +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_TS_INVOCATIONS 0x0000001D +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_TS_PRIMITIVES_GENERATED 0x0000001F +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_GS_INVOCATIONS 0x00000007 +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_GS_PRIMITIVES_GENERATED 0x00000009 +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_ALPHA_BETA_CLOCKS 0x00000004 +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_SCG_CLOCKS 0x00000008 +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_VTG_PRIMITIVES_OUT 0x00000012 +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x0000001E +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_SUCCEEDED 0x0000000B +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED 0x0000000D +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000006 +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_BYTE_COUNT 0x0000001A +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_INVOCATIONS 0x0000000F +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_PRIMITIVES_GENERATED 0x00000011 +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS0 0x0000000A +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS1 0x0000000C +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS2 0x0000000E +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS3 0x00000010 +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_PS_INVOCATIONS 0x00000013 +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT 0x00000002 +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT64 0x00000015 +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_TILED_ZPASS_PIXEL_CNT64 0x00000017 +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_COLOR_TARGET 0x00000018 +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_ZETA_TARGET 0x00000019 +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_BOUNDING_RECTANGLE 0x0000001C +#define NVC697_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE 28:28 +#define NVC697_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVC697_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVC697_SET_REPORT_SEMAPHORE_D_SUB_REPORT 7:5 +#define NVC697_SET_REPORT_SEMAPHORE_D_REPORT_DWORD_NUMBER 21:21 +#define NVC697_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE 2:2 +#define NVC697_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_FALSE 0x00000000 +#define NVC697_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_TRUE 0x00000001 +#define NVC697_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE 3:3 +#define NVC697_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC697_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC697_SET_REPORT_SEMAPHORE_D_REDUCTION_OP 11:9 +#define NVC697_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC697_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC697_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC697_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_INC 0x00000003 +#define NVC697_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC697_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_AND 0x00000005 +#define NVC697_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_OR 0x00000006 +#define NVC697_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC697_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT 18:17 +#define NVC697_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC697_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVC697_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP 19:19 +#define NVC697_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP_FALSE 0x00000000 +#define NVC697_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP_TRUE 0x00000001 + +#define NVC697_SET_VERTEX_STREAM_A_FORMAT(j) (0x1c00+(j)*16) +#define NVC697_SET_VERTEX_STREAM_A_FORMAT_STRIDE 11:0 +#define NVC697_SET_VERTEX_STREAM_A_FORMAT_ENABLE 12:12 +#define NVC697_SET_VERTEX_STREAM_A_FORMAT_ENABLE_FALSE 0x00000000 +#define NVC697_SET_VERTEX_STREAM_A_FORMAT_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_VERTEX_STREAM_A_LOCATION_A(j) (0x1c04+(j)*16) +#define NVC697_SET_VERTEX_STREAM_A_LOCATION_A_OFFSET_UPPER 7:0 + +#define NVC697_SET_VERTEX_STREAM_A_LOCATION_B(j) (0x1c08+(j)*16) +#define NVC697_SET_VERTEX_STREAM_A_LOCATION_B_OFFSET_LOWER 31:0 + +#define NVC697_SET_VERTEX_STREAM_A_FREQUENCY(j) (0x1c0c+(j)*16) +#define NVC697_SET_VERTEX_STREAM_A_FREQUENCY_V 31:0 + +#define NVC697_SET_VERTEX_STREAM_B_FORMAT(j) (0x1d00+(j)*16) +#define NVC697_SET_VERTEX_STREAM_B_FORMAT_STRIDE 11:0 +#define NVC697_SET_VERTEX_STREAM_B_FORMAT_ENABLE 12:12 +#define NVC697_SET_VERTEX_STREAM_B_FORMAT_ENABLE_FALSE 0x00000000 +#define NVC697_SET_VERTEX_STREAM_B_FORMAT_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_VERTEX_STREAM_B_LOCATION_A(j) (0x1d04+(j)*16) +#define NVC697_SET_VERTEX_STREAM_B_LOCATION_A_OFFSET_UPPER 7:0 + +#define NVC697_SET_VERTEX_STREAM_B_LOCATION_B(j) (0x1d08+(j)*16) +#define NVC697_SET_VERTEX_STREAM_B_LOCATION_B_OFFSET_LOWER 31:0 + +#define NVC697_SET_VERTEX_STREAM_B_FREQUENCY(j) (0x1d0c+(j)*16) +#define NVC697_SET_VERTEX_STREAM_B_FREQUENCY_V 31:0 + +#define NVC697_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA(j) (0x1e00+(j)*32) +#define NVC697_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NVC697_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NVC697_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_BLEND_PER_TARGET_COLOR_OP(j) (0x1e04+(j)*32) +#define NVC697_SET_BLEND_PER_TARGET_COLOR_OP_V 31:0 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC697_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC697_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MIN 0x00008007 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MAX 0x00008008 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_ADD 0x00000001 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MIN 0x00000004 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF(j) (0x1e08+(j)*32) +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V 31:0 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF(j) (0x1e0c+(j)*32) +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V 31:0 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC697_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_OP(j) (0x1e10+(j)*32) +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_OP_V 31:0 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF(j) (0x1e14+(j)*32) +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V 31:0 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF(j) (0x1e18+(j)*32) +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V 31:0 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC697_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC697_SET_PIPELINE_SHADER(j) (0x2000+(j)*64) +#define NVC697_SET_PIPELINE_SHADER_ENABLE 0:0 +#define NVC697_SET_PIPELINE_SHADER_ENABLE_FALSE 0x00000000 +#define NVC697_SET_PIPELINE_SHADER_ENABLE_TRUE 0x00000001 +#define NVC697_SET_PIPELINE_SHADER_TYPE 7:4 +#define NVC697_SET_PIPELINE_SHADER_TYPE_VERTEX_CULL_BEFORE_FETCH 0x00000000 +#define NVC697_SET_PIPELINE_SHADER_TYPE_VERTEX 0x00000001 +#define NVC697_SET_PIPELINE_SHADER_TYPE_TESSELLATION_INIT 0x00000002 +#define NVC697_SET_PIPELINE_SHADER_TYPE_TESSELLATION 0x00000003 +#define NVC697_SET_PIPELINE_SHADER_TYPE_GEOMETRY 0x00000004 +#define NVC697_SET_PIPELINE_SHADER_TYPE_PIXEL 0x00000005 + +#define NVC697_SET_PIPELINE_RESERVED_B(j) (0x2004+(j)*64) +#define NVC697_SET_PIPELINE_RESERVED_B_V 0:0 + +#define NVC697_SET_PIPELINE_RESERVED_A(j) (0x2008+(j)*64) +#define NVC697_SET_PIPELINE_RESERVED_A_V 0:0 + +#define NVC697_SET_PIPELINE_REGISTER_COUNT(j) (0x200c+(j)*64) +#define NVC697_SET_PIPELINE_REGISTER_COUNT_V 8:0 + +#define NVC697_SET_PIPELINE_BINDING(j) (0x2010+(j)*64) +#define NVC697_SET_PIPELINE_BINDING_GROUP 2:0 + +#define NVC697_SET_PIPELINE_PROGRAM_ADDRESS_A(j) (0x2014+(j)*64) +#define NVC697_SET_PIPELINE_PROGRAM_ADDRESS_A_UPPER 7:0 + +#define NVC697_SET_PIPELINE_PROGRAM_ADDRESS_B(j) (0x2018+(j)*64) +#define NVC697_SET_PIPELINE_PROGRAM_ADDRESS_B_LOWER 31:0 + +#define NVC697_SET_PIPELINE_RESERVED_D(j) (0x201c+(j)*64) +#define NVC697_SET_PIPELINE_RESERVED_D_V 0:0 + +#define NVC697_SET_PIPELINE_RESERVED_E(j) (0x2020+(j)*64) +#define NVC697_SET_PIPELINE_RESERVED_E_V 0:0 + +#define NVC697_SET_FALCON00 0x2300 +#define NVC697_SET_FALCON00_V 31:0 + +#define NVC697_SET_FALCON01 0x2304 +#define NVC697_SET_FALCON01_V 31:0 + +#define NVC697_SET_FALCON02 0x2308 +#define NVC697_SET_FALCON02_V 31:0 + +#define NVC697_SET_FALCON03 0x230c +#define NVC697_SET_FALCON03_V 31:0 + +#define NVC697_SET_FALCON04 0x2310 +#define NVC697_SET_FALCON04_V 31:0 + +#define NVC697_SET_FALCON05 0x2314 +#define NVC697_SET_FALCON05_V 31:0 + +#define NVC697_SET_FALCON06 0x2318 +#define NVC697_SET_FALCON06_V 31:0 + +#define NVC697_SET_FALCON07 0x231c +#define NVC697_SET_FALCON07_V 31:0 + +#define NVC697_SET_FALCON08 0x2320 +#define NVC697_SET_FALCON08_V 31:0 + +#define NVC697_SET_FALCON09 0x2324 +#define NVC697_SET_FALCON09_V 31:0 + +#define NVC697_SET_FALCON10 0x2328 +#define NVC697_SET_FALCON10_V 31:0 + +#define NVC697_SET_FALCON11 0x232c +#define NVC697_SET_FALCON11_V 31:0 + +#define NVC697_SET_FALCON12 0x2330 +#define NVC697_SET_FALCON12_V 31:0 + +#define NVC697_SET_FALCON13 0x2334 +#define NVC697_SET_FALCON13_V 31:0 + +#define NVC697_SET_FALCON14 0x2338 +#define NVC697_SET_FALCON14_V 31:0 + +#define NVC697_SET_FALCON15 0x233c +#define NVC697_SET_FALCON15_V 31:0 + +#define NVC697_SET_FALCON16 0x2340 +#define NVC697_SET_FALCON16_V 31:0 + +#define NVC697_SET_FALCON17 0x2344 +#define NVC697_SET_FALCON17_V 31:0 + +#define NVC697_SET_FALCON18 0x2348 +#define NVC697_SET_FALCON18_V 31:0 + +#define NVC697_SET_FALCON19 0x234c +#define NVC697_SET_FALCON19_V 31:0 + +#define NVC697_SET_FALCON20 0x2350 +#define NVC697_SET_FALCON20_V 31:0 + +#define NVC697_SET_FALCON21 0x2354 +#define NVC697_SET_FALCON21_V 31:0 + +#define NVC697_SET_FALCON22 0x2358 +#define NVC697_SET_FALCON22_V 31:0 + +#define NVC697_SET_FALCON23 0x235c +#define NVC697_SET_FALCON23_V 31:0 + +#define NVC697_SET_FALCON24 0x2360 +#define NVC697_SET_FALCON24_V 31:0 + +#define NVC697_SET_FALCON25 0x2364 +#define NVC697_SET_FALCON25_V 31:0 + +#define NVC697_SET_FALCON26 0x2368 +#define NVC697_SET_FALCON26_V 31:0 + +#define NVC697_SET_FALCON27 0x236c +#define NVC697_SET_FALCON27_V 31:0 + +#define NVC697_SET_FALCON28 0x2370 +#define NVC697_SET_FALCON28_V 31:0 + +#define NVC697_SET_FALCON29 0x2374 +#define NVC697_SET_FALCON29_V 31:0 + +#define NVC697_SET_FALCON30 0x2378 +#define NVC697_SET_FALCON30_V 31:0 + +#define NVC697_SET_FALCON31 0x237c +#define NVC697_SET_FALCON31_V 31:0 + +#define NVC697_SET_CONSTANT_BUFFER_SELECTOR_A 0x2380 +#define NVC697_SET_CONSTANT_BUFFER_SELECTOR_A_SIZE 16:0 + +#define NVC697_SET_CONSTANT_BUFFER_SELECTOR_B 0x2384 +#define NVC697_SET_CONSTANT_BUFFER_SELECTOR_B_ADDRESS_UPPER 7:0 + +#define NVC697_SET_CONSTANT_BUFFER_SELECTOR_C 0x2388 +#define NVC697_SET_CONSTANT_BUFFER_SELECTOR_C_ADDRESS_LOWER 31:0 + +#define NVC697_LOAD_CONSTANT_BUFFER_OFFSET 0x238c +#define NVC697_LOAD_CONSTANT_BUFFER_OFFSET_V 15:0 + +#define NVC697_LOAD_CONSTANT_BUFFER(i) (0x2390+(i)*4) +#define NVC697_LOAD_CONSTANT_BUFFER_V 31:0 + +#define NVC697_BIND_GROUP_RESERVED_A(j) (0x2400+(j)*32) +#define NVC697_BIND_GROUP_RESERVED_A_V 0:0 + +#define NVC697_BIND_GROUP_RESERVED_B(j) (0x2404+(j)*32) +#define NVC697_BIND_GROUP_RESERVED_B_V 0:0 + +#define NVC697_BIND_GROUP_RESERVED_C(j) (0x2408+(j)*32) +#define NVC697_BIND_GROUP_RESERVED_C_V 0:0 + +#define NVC697_BIND_GROUP_RESERVED_D(j) (0x240c+(j)*32) +#define NVC697_BIND_GROUP_RESERVED_D_V 0:0 + +#define NVC697_BIND_GROUP_CONSTANT_BUFFER(j) (0x2410+(j)*32) +#define NVC697_BIND_GROUP_CONSTANT_BUFFER_VALID 0:0 +#define NVC697_BIND_GROUP_CONSTANT_BUFFER_VALID_FALSE 0x00000000 +#define NVC697_BIND_GROUP_CONSTANT_BUFFER_VALID_TRUE 0x00000001 +#define NVC697_BIND_GROUP_CONSTANT_BUFFER_SHADER_SLOT 8:4 + +#define NVC697_SET_TRAP_HANDLER_A 0x25f8 +#define NVC697_SET_TRAP_HANDLER_A_ADDRESS_UPPER 16:0 + +#define NVC697_SET_TRAP_HANDLER_B 0x25fc +#define NVC697_SET_TRAP_HANDLER_B_ADDRESS_LOWER 31:0 + +#define NVC697_SET_COLOR_CLAMP 0x2600 +#define NVC697_SET_COLOR_CLAMP_ENABLE 0:0 +#define NVC697_SET_COLOR_CLAMP_ENABLE_FALSE 0x00000000 +#define NVC697_SET_COLOR_CLAMP_ENABLE_TRUE 0x00000001 + +#define NVC697_SET_STREAM_OUT_LAYOUT_SELECT(i,j) (0x2800+(i)*128+(j)*4) +#define NVC697_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER00 7:0 +#define NVC697_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER01 15:8 +#define NVC697_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER02 23:16 +#define NVC697_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER03 31:24 + +#define NVC697_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE(i) (0x32f4+(i)*4) +#define NVC697_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_V 31:0 + +#define NVC697_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_UPPER(i) (0x3314+(i)*4) +#define NVC697_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_UPPER_V 31:0 + +#define NVC697_ENABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER 0x3334 +#define NVC697_ENABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_V 0:0 + +#define NVC697_DISABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER 0x3338 +#define NVC697_DISABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_V 0:0 + +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER(i) (0x333c+(i)*4) +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER_V 31:0 + +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_VALUE(i) (0x335c+(i)*4) +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_VALUE_V 31:0 + +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_EVENT(i) (0x337c+(i)*4) +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_EVENT_EVENT 7:0 + +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A(i) (0x339c+(i)*4) +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT0 1:0 +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT0 4:2 +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT1 6:5 +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT1 9:7 +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT2 11:10 +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT2 14:12 +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT3 16:15 +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT3 19:17 +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT4 21:20 +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT4 24:22 +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT5 26:25 +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT5 29:27 +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_SPARE 31:30 + +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B(i) (0x33bc+(i)*4) +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_EDGE 0:0 +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_MODE 2:1 +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_WINDOWED 3:3 +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_FUNC 19:4 + +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL 0x33dc +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL_MASK 7:0 + +#define NVC697_START_SHADER_PERFORMANCE_COUNTER 0x33e0 +#define NVC697_START_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC697_STOP_SHADER_PERFORMANCE_COUNTER 0x33e4 +#define NVC697_STOP_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER 0x33e8 +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER_V 31:0 + +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER 0x33ec +#define NVC697_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER_V 31:0 + +#define NVC697_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NVC697_SET_MME_SHADOW_SCRATCH_V 31:0 + +#define NVC697_CALL_MME_MACRO(j) (0x3800+(j)*8) +#define NVC697_CALL_MME_MACRO_V 31:0 + +#define NVC697_CALL_MME_DATA(j) (0x3804+(j)*8) +#define NVC697_CALL_MME_DATA_V 31:0 + +#endif /* _cl_ampere_a_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc770.h b/src/common/sdk/nvidia/inc/class/clc770.h new file mode 100644 index 0000000..87580e5 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc770.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/clc770.finn +// + +#define NVC770_DISPLAY (0xc770U) /* finn: Evaluated from "NVC770_ALLOCATION_PARAMETERS_MESSAGE_ID" */ + + + diff --git a/src/common/sdk/nvidia/inc/class/clc771.h b/src/common/sdk/nvidia/inc/class/clc771.h new file mode 100644 index 0000000..9d484a7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc771.h @@ -0,0 +1,257 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc771_h_ +#define _clc771_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC771_DISP_SF_USER (0x000C771) + +typedef volatile struct _clc771_tag0 { + NvU32 dispSfUserOffset[0x400]; /* NV_PDISP_SF_USER 0x000D0FFF:0x000D0000 */ +} _NvC771DispSfUser, NvC771DispSfUserMap; + +#define NVC771_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME 0x00000001 /* */ +#define NVC771_SF_HDMI_INFO_IDX_VSI 0x00000004 /* */ +#define NVC771_SF_HDMI_INFO_CTRL(i,j) (0x000E0000-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC771_SF_HDMI_INFO_CTRL__SIZE_1 4 /* */ +#define NVC771_SF_HDMI_INFO_CTRL__SIZE_2 5 /* */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_CTRL(i) (0x000E0000-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 8 /* */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE 0:0 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER 4:4 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER_DIS 0x00000000 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER_EN 0x00000001 /* RW--V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE 8:8 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE_DIS 0x00000000 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE_EN 0x00000001 /* RW--V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_STATUS(i) (0x000E0004-0x000E0000+(i)*1024) /* R--4A */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_STATUS__SIZE_1 8 /* */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_STATUS_SENT 0:0 /* R-IVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_HEADER(i) (0x000E0008-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_HEADER__SIZE_1 8 /* */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_HEADER_HB0 7:0 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_HEADER_HB1 15:8 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_HEADER_HB2 23:16 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(i) (0x000E000C-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW__SIZE_1 8 /* */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(i) (0x000E0010-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH__SIZE_1 8 /* */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(i) (0x000E0014-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW__SIZE_1 8 /* */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(i) (0x000E0018-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH__SIZE_1 8 /* */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW(i) (0x000E001C-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW__SIZE_1 8 /* */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NVC771_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_CTRL(i,j) (0x000E0200-0x000E0000+(i)*1024+(j)*40) /* RW-4A */ +#define NVC771_SF_SHARED_GENERIC_CTRL__SIZE_1 8 /* */ +#define NVC771_SF_SHARED_GENERIC_CTRL__SIZE_2 6 /* */ +#define NVC771_SF_SHARED_GENERIC_CTRL_ENABLE 0:0 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NVC771_SF_SHARED_GENERIC_CTRL_SINGLE 4:4 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_CTRL_SINGLE_NO 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_CTRL_SINGLE_YES 0x00000001 /* RW--V */ +#define NVC771_SF_SHARED_GENERIC_CTRL_LOC 9:8 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_CTRL_LOC_VBLANK 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_CTRL_LOC_VSYNC 0x00000001 /* RW--V */ +#define NVC771_SF_SHARED_GENERIC_CTRL_LOC_LOADV 0x00000002 /* RW--V */ +#define NVC771_SF_SHARED_GENERIC_CTRL_NEW 12:12 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_CTRL_NEW_INIT 0x00000000 /* R-I-V */ +#define NVC771_SF_SHARED_GENERIC_CTRL_NEW_DONE 0x00000000 /* R---V */ +#define NVC771_SF_SHARED_GENERIC_CTRL_NEW_PENDING 0x00000001 /* R---V */ +#define NVC771_SF_SHARED_GENERIC_CTRL_NEW_TRIGGER 0x00000001 /* -W--T */ +#define NVC771_SF_SHARED_GENERIC_CTRL_STATUS 13:13 /* R-IVF */ +#define NVC771_SF_SHARED_GENERIC_CTRL_STATUS_DONE 0x00000001 /* R---V */ +#define NVC771_SF_SHARED_GENERIC_CTRL_STATUS_WAIT 0x00000000 /* R---V */ +#define NVC771_SF_SHARED_GENERIC_CTRL_STATUS_INIT 0x00000000 /* R-I-V */ +#define NVC771_SF_SHARED_GENERIC_CTRL_CHECKSUM_HW 16:16 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_CTRL_CHECKSUM_HW_NO 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_CTRL_CHECKSUM_HW_YES 0x00000001 /* RW--V */ +#define NVC771_SF_SHARED_GENERIC_CTRL_BUSY 20:20 /* R-IVF */ +#define NVC771_SF_SHARED_GENERIC_CTRL_BUSY_NO 0x00000000 /* R-I-V */ +#define NVC771_SF_SHARED_GENERIC_CTRL_BUSY_YES 0x00000001 /* R---V */ +#define NVC771_SF_SHARED_GENERIC_CTRL_VSC_SDP_UPDATE_RFB_OVERRIDE 28:28 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_CTRL_VSC_SDP_UPDATE_RFB_OVERRIDE_DISABLE 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_CTRL_VSC_SDP_UPDATE_RFB_OVERRIDE_ENABLE 0x00000001 /* RW--V */ +#define NVC771_SF_SHARED_GENERIC_CTRL_VSC_SDP_SU_COORDINATES_VALID_OVERRIDE 31:31 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_CTRL_VSC_SDP_SU_COORDINATES_VALID_OVERRIDE_DISABLE 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_CTRL_VSC_SDP_SU_COORDINATES_VALID_OVERRIDE_ENABLE 0x00000001 /* RW--V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_HEADER(i,j) (0x000E0204-0x000E0000+(i)*1024+(j)*40) /* RW-4A */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_HEADER__SIZE_1 8 /* */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_HEADER__SIZE_2 6 /* */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_HEADER_HB0 7:0 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_HEADER_HB1 15:8 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_HEADER_HB2 23:16 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_HEADER_HB3 31:24 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_HEADER_HB3_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK0(i,j) (0x000E0208-0x000E0000+(i)*1024+(j)*40) /* RW-4A */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK0__SIZE_1 8 /* */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK0__SIZE_2 6 /* */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK0_DB0 7:0 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK0_DB0_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK0_DB1 15:8 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK0_DB1_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK0_DB2 23:16 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK0_DB2_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK0_DB3 31:24 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK0_DB3_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK1(i,j) (0x000E020C-0x000E0000+(i)*1024+(j)*40) /* RW-4A */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK1__SIZE_1 8 /* */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK1__SIZE_2 6 /* */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK1_DB4 7:0 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK1_DB4_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK1_DB5 15:8 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK1_DB5_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK1_DB6 23:16 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK1_DB6_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK1_DB7 31:24 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK1_DB7_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK2(i,j) (0x000E0210-0x000E0000+(i)*1024+(j)*40) /* RW-4A */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK2__SIZE_1 8 /* */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK2__SIZE_2 6 /* */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK2_DB8 7:0 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK2_DB8_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK2_DB9 15:8 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK2_DB9_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK2_DB10 23:16 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK2_DB10_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK2_DB11 31:24 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK2_DB11_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK3(i,j) (0x000E0214-0x000E0000+(i)*1024+(j)*40) /* RW-4A */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK3__SIZE_1 8 /* */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK3__SIZE_2 6 /* */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK3_DB12 7:0 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK3_DB12_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK3_DB13 15:8 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK3_DB13_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK3_DB14 23:16 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK3_DB14_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK3_DB15 31:24 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK3_DB15_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK4(i,j) (0x000E0218-0x000E0000+(i)*1024+(j)*40) /* RW-4A */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK4__SIZE_1 8 /* */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK4__SIZE_2 6 /* */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK4_DB16 7:0 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK4_DB16_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK4_DB17 15:8 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK4_DB17_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK4_DB18 23:16 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK4_DB18_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK4_DB19 31:24 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK4_DB19_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK5(i,j) (0x000E021C-0x000E0000+(i)*1024+(j)*40) /* RW-4A */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK5__SIZE_1 8 /* */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK5__SIZE_2 6 /* */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK5_DB20 7:0 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK5_DB20_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK5_DB21 15:8 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK5_DB21_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK5_DB22 23:16 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK5_DB22_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK5_DB23 31:24 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK5_DB23_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK6(i,j) (0x000E0220-0x000E0000+(i)*1024+(j)*40) /* RW-4A */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK6__SIZE_1 8 /* */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK6__SIZE_2 6 /* */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK6_DB24 7:0 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK6_DB24_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK6_DB25 15:8 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK6_DB25_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK6_DB26 23:16 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK6_DB26_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK6_DB27 31:24 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK6_DB27_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK7(i,j) (0x000E0224-0x000E0000+(i)*1024+(j)*40) /* RW-4A */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK7__SIZE_1 8 /* */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK7__SIZE_2 6 /* */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK7_DB28 7:0 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK7_DB28_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK7_DB29 15:8 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK7_DB29_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK7_DB30 23:16 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK7_DB30_INIT 0x00000000 /* RWI-V */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK7_DB31 31:24 /* RWIVF */ +#define NVC771_SF_SHARED_GENERIC_INFOFRAME_SUBPACK7_DB31_INIT 0x00000000 /* RWI-V */ + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _clc771_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc77d.h b/src/common/sdk/nvidia/inc/class/clc77d.h new file mode 100644 index 0000000..50bc709 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc77d.h @@ -0,0 +1,1372 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clC77d_h_ +#define _clC77d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC77D_CORE_CHANNEL_DMA (0x0000C77D) + +#define NV_DISP_NOTIFIER 0x00000000 +#define NV_DISP_NOTIFIER_SIZEOF 0x00000010 +#define NV_DISP_NOTIFIER__0 0x00000000 +#define NV_DISP_NOTIFIER__0_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFIER__0_FIELD 8:8 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE 9:9 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_NON_TEARING 0x00000000 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_IMMEDIATE 0x00000001 +#define NV_DISP_NOTIFIER__0_R1 15:10 +#define NV_DISP_NOTIFIER__0_R2 23:16 +#define NV_DISP_NOTIFIER__0_R3 29:24 +#define NV_DISP_NOTIFIER__0_STATUS 31:30 +#define NV_DISP_NOTIFIER__0_STATUS_NOT_BEGUN 0x00000000 +#define NV_DISP_NOTIFIER__0_STATUS_BEGUN 0x00000001 +#define NV_DISP_NOTIFIER__0_STATUS_FINISHED 0x00000002 +#define NV_DISP_NOTIFIER__1 0x00000001 +#define NV_DISP_NOTIFIER__1_R4 31:0 +#define NV_DISP_NOTIFIER__2 0x00000002 +#define NV_DISP_NOTIFIER__2_TIMESTAMP_LO 31:0 +#define NV_DISP_NOTIFIER__3 0x00000003 +#define NV_DISP_NOTIFIER__3_TIMESTAMP_HI 31:0 + + +// dma opcode instructions +#define NVC77D_DMA +#define NVC77D_DMA_OPCODE 31:29 +#define NVC77D_DMA_OPCODE_METHOD 0x00000000 +#define NVC77D_DMA_OPCODE_JUMP 0x00000001 +#define NVC77D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC77D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC77D_DMA_METHOD_COUNT 27:18 +#define NVC77D_DMA_METHOD_OFFSET 13:2 +#define NVC77D_DMA_DATA 31:0 +#define NVC77D_DMA_DATA_NOP 0x00000000 +#define NVC77D_DMA_JUMP_OFFSET 11:2 +#define NVC77D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// if cap SUPPORT_FLEXIBLE_WIN_MAPPING is FALSE, this define can be used to obtain which head a window is mapped to +#define NVC37D_WINDOW_MAPPED_TO_HEAD(w) ((w)>>1) +#define NVC37D_GET_VALID_WINDOWMASK_FOR_HEAD(h) ((1<<((h)*2)) | (1<<((h)*2+1))) + +// class methods +#define NVC77D_PUT (0x00000000) +#define NVC77D_PUT_PTR 9:0 +#define NVC77D_GET (0x00000004) +#define NVC77D_GET_PTR 9:0 +#define NVC77D_UPDATE (0x00000200) +#define NVC77D_UPDATE_SPECIAL_HANDLING 21:20 +#define NVC77D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NVC77D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NVC77D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NVC77D_UPDATE_SPECIAL_HANDLING_REASON 19:12 +#define NVC77D_UPDATE_INHIBIT_INTERRUPTS 24:24 +#define NVC77D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NVC77D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NVC77D_UPDATE_RELEASE_ELV 0:0 +#define NVC77D_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC77D_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC77D_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC77D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC77D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC77D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC77D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC77D_SET_CONTEXT_DMA_NOTIFIER (0x00000208) +#define NVC77D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NVC77D_SET_NOTIFIER_CONTROL (0x0000020C) +#define NVC77D_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVC77D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVC77D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC77D_SET_NOTIFIER_CONTROL_OFFSET 11:4 +#define NVC77D_SET_NOTIFIER_CONTROL_NOTIFY 12:12 +#define NVC77D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NVC77D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NVC77D_SET_CONTROL (0x00000210) +#define NVC77D_SET_CONTROL_FLIP_LOCK_PIN(i) ((i)+0):((i)+0) +#define NVC77D_SET_CONTROL_FLIP_LOCK_PIN__SIZE_1 4 +#define NVC77D_SET_CONTROL_FLIP_LOCK_PIN_DISABLE (0x00000000) +#define NVC77D_SET_CONTROL_FLIP_LOCK_PIN_ENABLE (0x00000001) +#define NVC77D_SET_CONTROL_FLIP_LOCK_PIN0 0:0 +#define NVC77D_SET_CONTROL_FLIP_LOCK_PIN0_DISABLE (0x00000000) +#define NVC77D_SET_CONTROL_FLIP_LOCK_PIN0_ENABLE (0x00000001) +#define NVC77D_SET_CONTROL_FLIP_LOCK_PIN1 1:1 +#define NVC77D_SET_CONTROL_FLIP_LOCK_PIN1_DISABLE (0x00000000) +#define NVC77D_SET_CONTROL_FLIP_LOCK_PIN1_ENABLE (0x00000001) +#define NVC77D_SET_CONTROL_FLIP_LOCK_PIN2 2:2 +#define NVC77D_SET_CONTROL_FLIP_LOCK_PIN2_DISABLE (0x00000000) +#define NVC77D_SET_CONTROL_FLIP_LOCK_PIN2_ENABLE (0x00000001) +#define NVC77D_SET_CONTROL_FLIP_LOCK_PIN3 3:3 +#define NVC77D_SET_CONTROL_FLIP_LOCK_PIN3_DISABLE (0x00000000) +#define NVC77D_SET_CONTROL_FLIP_LOCK_PIN3_ENABLE (0x00000001) +#define NVC77D_SET_INTERLOCK_FLAGS (0x00000218) +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+0):((i)+0) +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC77D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS (0x0000021C) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC77D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) +#define NVC77D_GET_RG_SCAN_LINE(b) (0x00000220 + (b)*0x00000004) +#define NVC77D_GET_RG_SCAN_LINE_LINE 15:0 +#define NVC77D_GET_RG_SCAN_LINE_VBLANK 16:16 +#define NVC77D_GET_RG_SCAN_LINE_VBLANK_FALSE (0x00000000) +#define NVC77D_GET_RG_SCAN_LINE_VBLANK_TRUE (0x00000001) +#define NVC77D_SET_GET_BLANKING_CTRL(b) (0x00000240 + (b)*0x00000004) +#define NVC77D_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NVC77D_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NVC77D_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NVC77D_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NVC77D_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NVC77D_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) + +#define NVC77D_SOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NVC77D_SOR_SET_CONTROL_OWNER_MASK 7:0 +#define NVC77D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVC77D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVC77D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVC77D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVC77D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVC77D_SOR_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVC77D_SOR_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVC77D_SOR_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVC77D_SOR_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVC77D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NVC77D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NVC77D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NVC77D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NVC77D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NVC77D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NVC77D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NVC77D_SOR_SET_CONTROL_PROTOCOL_HDMI_FRL (0x0000000C) +#define NVC77D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NVC77D_SOR_SET_CONTROL_DE_SYNC_POLARITY 16:16 +#define NVC77D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC77D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC77D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NVC77D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NVC77D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NVC77D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NVC77D_SOR_SET_CUSTOM_REASON(a) (0x00000304 + (a)*0x00000020) +#define NVC77D_SOR_SET_CUSTOM_REASON_CODE 31:0 +#define NVC77D_SOR_SET_SW_SPARE_A(a) (0x00000308 + (a)*0x00000020) +#define NVC77D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NVC77D_SOR_SET_SW_SPARE_B(a) (0x0000030C + (a)*0x00000020) +#define NVC77D_SOR_SET_SW_SPARE_B_CODE 31:0 + +#define NVC77D_DSI_SET_CONTROL(a) (0x00000500 + (a)*0x00000020) +#define NVC77D_DSI_SET_CONTROL_OWNER_MASK 7:0 +#define NVC77D_DSI_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVC77D_DSI_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVC77D_DSI_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVC77D_DSI_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVC77D_DSI_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVC77D_DSI_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVC77D_DSI_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVC77D_DSI_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVC77D_DSI_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVC77D_DSI_SET_CUSTOM_REASON(a) (0x00000504 + (a)*0x00000020) +#define NVC77D_DSI_SET_CUSTOM_REASON_CODE 31:0 +#define NVC77D_DSI_SET_SW_SPARE_A(a) (0x00000508 + (a)*0x00000020) +#define NVC77D_DSI_SET_SW_SPARE_A_CODE 31:0 +#define NVC77D_DSI_SET_SW_SPARE_B(a) (0x0000050C + (a)*0x00000020) +#define NVC77D_DSI_SET_SW_SPARE_B_CODE 31:0 + +#define NVC77D_WINDOW_SET_CONTROL(a) (0x00001000 + (a)*0x00000080) +#define NVC77D_WINDOW_SET_CONTROL_OWNER 3:0 +#define NVC77D_WINDOW_SET_CONTROL_OWNER_HEAD(i) (0x00000000 +(i)) +#define NVC77D_WINDOW_SET_CONTROL_OWNER_HEAD__SIZE_1 8 +#define NVC77D_WINDOW_SET_CONTROL_OWNER_HEAD0 (0x00000000) +#define NVC77D_WINDOW_SET_CONTROL_OWNER_HEAD1 (0x00000001) +#define NVC77D_WINDOW_SET_CONTROL_OWNER_HEAD2 (0x00000002) +#define NVC77D_WINDOW_SET_CONTROL_OWNER_HEAD3 (0x00000003) +#define NVC77D_WINDOW_SET_CONTROL_OWNER_HEAD4 (0x00000004) +#define NVC77D_WINDOW_SET_CONTROL_OWNER_HEAD5 (0x00000005) +#define NVC77D_WINDOW_SET_CONTROL_OWNER_HEAD6 (0x00000006) +#define NVC77D_WINDOW_SET_CONTROL_OWNER_HEAD7 (0x00000007) +#define NVC77D_WINDOW_SET_CONTROL_OWNER_NONE (0x0000000F) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(a) (0x00001004 + (a)*0x00000080) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(a) (0x00001008 + (a)*0x00000080) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR(a) (0x0000100C + (a)*0x00000080) +#define NVC77D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC77D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC77D_WINDOW_SET_WINDOW_USAGE_BOUNDS(a) (0x00001010 + (a)*0x00000080) +#define NVC77D_WINDOW_SET_WINDOW_USAGE_BOUNDS_MAX_PIXELS_FETCHED_PER_LINE 14:0 +#define NVC77D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED 16:16 +#define NVC77D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED 28:28 +#define NVC77D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS 22:20 +#define NVC77D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVC77D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED 24:24 +#define NVC77D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) +#define NVC77D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED 30:30 +#define NVC77D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000) +#define NVC77D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001) + +#define NVC77D_HEAD_SET_PROCAMP(a) (0x00002000 + (a)*0x00000400) +#define NVC77D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NVC77D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NVC77D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NVC77D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NVC77D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003) +#define NVC77D_HEAD_SET_PROCAMP_CHROMA_LPF 3:3 +#define NVC77D_HEAD_SET_PROCAMP_CHROMA_LPF_DISABLE (0x00000000) +#define NVC77D_HEAD_SET_PROCAMP_CHROMA_LPF_ENABLE (0x00000001) +#define NVC77D_HEAD_SET_PROCAMP_CHROMA_DOWN_V 4:4 +#define NVC77D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_DISABLE (0x00000000) +#define NVC77D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_ENABLE (0x00000001) +#define NVC77D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 28:28 +#define NVC77D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NVC77D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00002004 + (a)*0x00000400) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 2:2 +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 3:3 +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 7:4 +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000000) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000001) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000002) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000003) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000004) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000005) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000006) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000007) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000008) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_444 (0x00000009) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444NP (0x0000000A) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 24:24 +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 23:12 +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN 31:26 +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN0 (0x00000000) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN1 (0x00000001) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN2 (0x00000002) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN3 (0x00000003) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN4 (0x00000004) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN5 (0x00000005) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN6 (0x00000006) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN7 (0x00000007) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN8 (0x00000008) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN9 (0x00000009) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN10 (0x0000000A) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN11 (0x0000000B) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN12 (0x0000000C) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN13 (0x0000000D) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN14 (0x0000000E) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN15 (0x0000000F) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN16 (0x00000010) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN17 (0x00000011) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN18 (0x00000012) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN19 (0x00000013) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN20 (0x00000014) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN21 (0x00000015) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN22 (0x00000016) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN23 (0x00000017) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN24 (0x00000018) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN25 (0x00000019) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN26 (0x0000001A) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN27 (0x0000001B) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN28 (0x0000001C) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN29 (0x0000001D) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN30 (0x0000001E) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN31 (0x0000001F) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_NONE (0x0000003F) +#define NVC77D_HEAD_SET_CONTROL(a) (0x00002008 + (a)*0x00000400) +#define NVC77D_HEAD_SET_CONTROL_STRUCTURE 1:0 +#define NVC77D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NVC77D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE 2:2 +#define NVC77D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_NORMAL (0x00000000) +#define NVC77D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000001) +#define NVC77D_HEAD_SET_CONTROL_YUV420PACKER 3:3 +#define NVC77D_HEAD_SET_CONTROL_YUV420PACKER_DISABLE (0x00000000) +#define NVC77D_HEAD_SET_CONTROL_YUV420PACKER_ENABLE (0x00000001) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 11:10 +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 8:4 +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 15:12 +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 23:22 +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 20:16 +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC77D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN 28:24 +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000001) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000002) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000003) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000004) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000005) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000006) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000007) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000008) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000009) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000B) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000C) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000D) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000E) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000F) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x00000010) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC77D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NVC77D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC77D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC77D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NVC77D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC77D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC77D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x0000200C + (a)*0x00000400) +#define NVC77D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NVC77D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NVC77D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC77D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC77D_HEAD_SET_PIXEL_REORDER_CONTROL(a) (0x00002010 + (a)*0x00000400) +#define NVC77D_HEAD_SET_PIXEL_REORDER_CONTROL_BANK_WIDTH 13:0 +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00002014 + (a)*0x00000400) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVC77D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVC77D_HEAD_SET_DITHER_CONTROL(a) (0x00002018 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NVC77D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC77D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC77D_HEAD_SET_DITHER_CONTROL_BITS 5:4 +#define NVC77D_HEAD_SET_DITHER_CONTROL_BITS_TO_6_BITS (0x00000000) +#define NVC77D_HEAD_SET_DITHER_CONTROL_BITS_TO_8_BITS (0x00000001) +#define NVC77D_HEAD_SET_DITHER_CONTROL_BITS_TO_10_BITS (0x00000002) +#define NVC77D_HEAD_SET_DITHER_CONTROL_BITS_TO_12_BITS (0x00000003) +#define NVC77D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE 2:2 +#define NVC77D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_DISABLE (0x00000000) +#define NVC77D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_ENABLE (0x00000001) +#define NVC77D_HEAD_SET_DITHER_CONTROL_MODE 10:8 +#define NVC77D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NVC77D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NVC77D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NVC77D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NVC77D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NVC77D_HEAD_SET_DITHER_CONTROL_MODE_ROUND (0x00000005) +#define NVC77D_HEAD_SET_DITHER_CONTROL_PHASE 13:12 +#define NVC77D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x0000201C + (a)*0x00000400) +#define NVC77D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 0:0 +#define NVC77D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NVC77D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NVC77D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING 4:4 +#define NVC77D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_DISABLE (0x00000000) +#define NVC77D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_ENABLE (0x00000001) +#define NVC77D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 9:8 +#define NVC77D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NVC77D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NVC77D_HEAD_SET_DISPLAY_ID(a,b) (0x00002020 + (a)*0x00000400 + (b)*0x00000004) +#define NVC77D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NVC77D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00002028 + (a)*0x00000400) +#define NVC77D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NVC77D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NVC77D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC77D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC77D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR(a) (0x0000202C + (a)*0x00000400) +#define NVC77D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC77D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC77D_HEAD_SET_HEAD_USAGE_BOUNDS(a) (0x00002030 + (a)*0x00000400) +#define NVC77D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR 2:0 +#define NVC77D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_NONE (0x00000000) +#define NVC77D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W32_H32 (0x00000001) +#define NVC77D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W64_H64 (0x00000002) +#define NVC77D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W128_H128 (0x00000003) +#define NVC77D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W256_H256 (0x00000004) +#define NVC77D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED 4:4 +#define NVC77D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_FALSE (0x00000000) +#define NVC77D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_TRUE (0x00000001) +#define NVC77D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS 14:12 +#define NVC77D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVC77D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVC77D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED 8:8 +#define NVC77D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC77D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) +#define NVC77D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED 16:16 +#define NVC77D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000) +#define NVC77D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001) +#define NVC77D_HEAD_SET_HEAD_USAGE_BOUNDS_ELV_START 31:17 +#define NVC77D_HEAD_SET_STALL_LOCK(a) (0x00002034 + (a)*0x00000400) +#define NVC77D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NVC77D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NVC77D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NVC77D_HEAD_SET_STALL_LOCK_MODE 2:2 +#define NVC77D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NVC77D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN 8:4 +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC77D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC77D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 12:12 +#define NVC77D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NVC77D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NVC77D_HEAD_SET_STALL_LOCK_TEPOLARITY 14:14 +#define NVC77D_HEAD_SET_STALL_LOCK_TEPOLARITY_POSITIVE_TRUE (0x00000000) +#define NVC77D_HEAD_SET_STALL_LOCK_TEPOLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC77D_HEAD_SET_STALL_LOCK_UNSTALL_SYNC_ADVANCE 25:16 +#define NVC77D_HEAD_SET_LOCK_CHAIN(a) (0x00002044 + (a)*0x00000400) +#define NVC77D_HEAD_SET_LOCK_CHAIN_POSITION 3:0 +#define NVC77D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x00002048 + (a)*0x00000400) +#define NVC77D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NVC77D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NVC77D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x0000204C + (a)*0x00000400) +#define NVC77D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NVC77D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NVC77D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x00002058 + (a)*0x00000400) +#define NVC77D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NVC77D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NVC77D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x0000205C + (a)*0x00000400) +#define NVC77D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NVC77D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NVC77D_HEAD_SET_RASTER_SIZE(a) (0x00002064 + (a)*0x00000400) +#define NVC77D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NVC77D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NVC77D_HEAD_SET_RASTER_SYNC_END(a) (0x00002068 + (a)*0x00000400) +#define NVC77D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NVC77D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NVC77D_HEAD_SET_RASTER_BLANK_END(a) (0x0000206C + (a)*0x00000400) +#define NVC77D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NVC77D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NVC77D_HEAD_SET_RASTER_BLANK_START(a) (0x00002070 + (a)*0x00000400) +#define NVC77D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NVC77D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NVC77D_HEAD_SET_OVERSCAN_COLOR(a) (0x00002078 + (a)*0x00000400) +#define NVC77D_HEAD_SET_OVERSCAN_COLOR_RED_CR 9:0 +#define NVC77D_HEAD_SET_OVERSCAN_COLOR_GREEN_Y 19:10 +#define NVC77D_HEAD_SET_OVERSCAN_COLOR_BLUE_CB 29:20 +#define NVC77D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR(a) (0x0000207C + (a)*0x00000400) +#define NVC77D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_RED_CR 9:0 +#define NVC77D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_GREEN_Y 19:10 +#define NVC77D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_BLUE_CB 29:20 +#define NVC77D_HEAD_SET_HDMI_CTRL(a) (0x00002080 + (a)*0x00000400) +#define NVC77D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NVC77D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NVC77D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NVC77D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NVC77D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NVC77D_HEAD_SET_CONTEXT_DMA_CURSOR(a,b) (0x00002088 + (a)*0x00000400 + (b)*0x00000004) +#define NVC77D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0 +#define NVC77D_HEAD_SET_OFFSET_CURSOR(a,b) (0x00002090 + (a)*0x00000400 + (b)*0x00000004) +#define NVC77D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0 +#define NVC77D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x00002098 + (a)*0x00000400) +#define NVC77D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 0:0 +#define NVC77D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NVC77D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NVC77D_HEAD_SET_CONTROL_CURSOR(a) (0x0000209C + (a)*0x00000400) +#define NVC77D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NVC77D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NVC77D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NVC77D_HEAD_SET_CONTROL_CURSOR_FORMAT 7:0 +#define NVC77D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x000000E9) +#define NVC77D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x000000CF) +#define NVC77D_HEAD_SET_CONTROL_CURSOR_SIZE 9:8 +#define NVC77D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NVC77D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NVC77D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NVC77D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NVC77D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 19:12 +#define NVC77D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 27:20 +#define NVC77D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(a) (0x000020A0 + (a)*0x00000400) +#define NVC77D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_K1 7:0 +#define NVC77D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT 11:8 +#define NVC77D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC77D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC77D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT 15:12 +#define NVC77D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_ZERO (0x00000000) +#define NVC77D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC77D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC77D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE 16:16 +#define NVC77D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_BLEND (0x00000000) +#define NVC77D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_XOR (0x00000001) +#define NVC77D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS 20:20 +#define NVC77D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_DISABLE (0x00000000) +#define NVC77D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_ENABLE (0x00000001) +#define NVC77D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00002180 + (a)*0x00000400) +#define NVC77D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NVC77D_HEAD_SET_CRC_CONTROL(a) (0x00002184 + (a)*0x00000400) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 5:0 +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_0 (0x00000000) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_1 (0x00000001) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_2 (0x00000002) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_3 (0x00000003) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_4 (0x00000004) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_5 (0x00000005) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_6 (0x00000006) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_7 (0x00000007) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_8 (0x00000008) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_9 (0x00000009) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_10 (0x0000000A) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_11 (0x0000000B) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_12 (0x0000000C) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_13 (0x0000000D) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_14 (0x0000000E) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_15 (0x0000000F) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_16 (0x00000010) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_17 (0x00000011) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_18 (0x00000012) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_19 (0x00000013) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_20 (0x00000014) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_21 (0x00000015) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_22 (0x00000016) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_23 (0x00000017) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_24 (0x00000018) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_25 (0x00000019) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_26 (0x0000001A) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_27 (0x0000001B) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_28 (0x0000001C) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_29 (0x0000001D) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_30 (0x0000001E) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_31 (0x0000001F) +#define NVC77D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000020) +#define NVC77D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 8:8 +#define NVC77D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NVC77D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NVC77D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC 19:12 +#define NVC77D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_NONE (0x00000000) +#define NVC77D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF (0x00000030) +#define NVC77D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC77D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR__SIZE_1 8 +#define NVC77D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR0 (0x00000050) +#define NVC77D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR1 (0x00000051) +#define NVC77D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR2 (0x00000052) +#define NVC77D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR3 (0x00000053) +#define NVC77D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR4 (0x00000054) +#define NVC77D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR5 (0x00000055) +#define NVC77D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR6 (0x00000056) +#define NVC77D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR7 (0x00000057) +#define NVC77D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC 27:20 +#define NVC77D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_NONE (0x00000000) +#define NVC77D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SF (0x00000030) +#define NVC77D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC77D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR__SIZE_1 8 +#define NVC77D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR0 (0x00000050) +#define NVC77D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR1 (0x00000051) +#define NVC77D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR2 (0x00000052) +#define NVC77D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR3 (0x00000053) +#define NVC77D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR4 (0x00000054) +#define NVC77D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR5 (0x00000055) +#define NVC77D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR6 (0x00000056) +#define NVC77D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR7 (0x00000057) +#define NVC77D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 9:9 +#define NVC77D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NVC77D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NVC77D_HEAD_SET_PRESENT_CONTROL(a) (0x0000218C + (a)*0x00000400) +#define NVC77D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 0:0 +#define NVC77D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NVC77D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NVC77D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NVC77D_HEAD_SET_SW_SPARE_A(a) (0x00002194 + (a)*0x00000400) +#define NVC77D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NVC77D_HEAD_SET_SW_SPARE_B(a) (0x00002198 + (a)*0x00000400) +#define NVC77D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NVC77D_HEAD_SET_SW_SPARE_C(a) (0x0000219C + (a)*0x00000400) +#define NVC77D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NVC77D_HEAD_SET_SW_SPARE_D(a) (0x000021A0 + (a)*0x00000400) +#define NVC77D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NVC77D_HEAD_SET_DISPLAY_RATE(a) (0x000021A8 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DISPLAY_RATE_RUN_MODE 0:0 +#define NVC77D_HEAD_SET_DISPLAY_RATE_RUN_MODE_CONTINUOUS (0x00000000) +#define NVC77D_HEAD_SET_DISPLAY_RATE_RUN_MODE_ONE_SHOT (0x00000001) +#define NVC77D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_INTERVAL 25:4 +#define NVC77D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH 2:2 +#define NVC77D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_DISABLE (0x00000000) +#define NVC77D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_ENABLE (0x00000001) +#define NVC77D_HEAD_SET_CONTEXT_DMA_RG_REL_SEMAPHORE(a,b) (0x000021AC + (a)*0x00000400 + (b)*0x00000004) +#define NVC77D_HEAD_SET_CONTEXT_DMA_RG_REL_SEMAPHORE_HANDLE 31:0 +#define NVC77D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL(a,b) (0x000021CC + (a)*0x00000400 + (b)*0x00000004) +#define NVC77D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_OFFSET 7:0 +#define NVC77D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_PAYLOAD_SIZE 15:15 +#define NVC77D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_32BIT (0x00000000) +#define NVC77D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_64BIT (0x00000001) +#define NVC77D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_REL_MODE 14:14 +#define NVC77D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_REL_MODE_WRITE (0x00000000) +#define NVC77D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_REL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC77D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RUN_MODE 10:10 +#define NVC77D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RUN_MODE_ONE_TIME (0x00000000) +#define NVC77D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RUN_MODE_CONTINUOUS (0x00000001) +#define NVC77D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RASTER_LINE 30:16 +#define NVC77D_HEAD_SET_RG_REL_SEMAPHORE_VALUE(a,b) (0x000021EC + (a)*0x00000400 + (b)*0x00000004) +#define NVC77D_HEAD_SET_RG_REL_SEMAPHORE_VALUE_VALUE 31:0 +#define NVC77D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE(a) (0x00002214 + (a)*0x00000400) +#define NVC77D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE_DATA 9:0 +#define NVC77D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE_INDEX 19:12 +#define NVC77D_HEAD_SET_MIN_FRAME_IDLE(a) (0x00002218 + (a)*0x00000400) +#define NVC77D_HEAD_SET_MIN_FRAME_IDLE_LEADING_RASTER_LINES 14:0 +#define NVC77D_HEAD_SET_MIN_FRAME_IDLE_TRAILING_RASTER_LINES 30:16 +#define NVC77D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED(a) (0x00002220 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED_ALPHA 7:0 +#define NVC77D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED_RED 31:16 +#define NVC77D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE(a) (0x00002224 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE_GREEN 15:0 +#define NVC77D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE_BLUE 31:16 +#define NVC77D_HEAD_SET_CURSOR_COLOR_NORM_SCALE(a) (0x00002228 + (a)*0x00000400) +#define NVC77D_HEAD_SET_CURSOR_COLOR_NORM_SCALE_VALUE 15:0 +#define NVC77D_HEAD_SET_XOR_BLEND_FACTOR(a) (0x0000222C + (a)*0x00000400) +#define NVC77D_HEAD_SET_XOR_BLEND_FACTOR_LOG2PEAK_LUMINANCE 3:0 +#define NVC77D_HEAD_SET_XOR_BLEND_FACTOR_S1 16:4 +#define NVC77D_HEAD_SET_XOR_BLEND_FACTOR_S2 30:18 +#define NVC77D_HEAD_SET_CLAMP_RANGE_GREEN(a) (0x00002238 + (a)*0x00000400) +#define NVC77D_HEAD_SET_CLAMP_RANGE_GREEN_LOW 11:0 +#define NVC77D_HEAD_SET_CLAMP_RANGE_GREEN_HIGH 27:16 +#define NVC77D_HEAD_SET_CLAMP_RANGE_RED_BLUE(a) (0x0000223C + (a)*0x00000400) +#define NVC77D_HEAD_SET_CLAMP_RANGE_RED_BLUE_LOW 11:0 +#define NVC77D_HEAD_SET_CLAMP_RANGE_RED_BLUE_HIGH 27:16 +#define NVC77D_HEAD_SET_OCSC0CONTROL(a) (0x00002240 + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC0CONTROL_ENABLE 0:0 +#define NVC77D_HEAD_SET_OCSC0CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC77D_HEAD_SET_OCSC0CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C00(a) (0x00002244 + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C00_VALUE 20:0 +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C01(a) (0x00002248 + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C01_VALUE 20:0 +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C02(a) (0x0000224C + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C02_VALUE 20:0 +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C03(a) (0x00002250 + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C03_VALUE 20:0 +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C10(a) (0x00002254 + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C10_VALUE 20:0 +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C11(a) (0x00002258 + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C11_VALUE 20:0 +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C12(a) (0x0000225C + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C12_VALUE 20:0 +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C13(a) (0x00002260 + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C13_VALUE 20:0 +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C20(a) (0x00002264 + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C20_VALUE 20:0 +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C21(a) (0x00002268 + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C21_VALUE 20:0 +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C22(a) (0x0000226C + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C22_VALUE 20:0 +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C23(a) (0x00002270 + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC0COEFFICIENT_C23_VALUE 20:0 +#define NVC77D_HEAD_SET_OLUT_CONTROL(a) (0x00002280 + (a)*0x00000400) +#define NVC77D_HEAD_SET_OLUT_CONTROL_INTERPOLATE 0:0 +#define NVC77D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC77D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC77D_HEAD_SET_OLUT_CONTROL_MIRROR 1:1 +#define NVC77D_HEAD_SET_OLUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC77D_HEAD_SET_OLUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC77D_HEAD_SET_OLUT_CONTROL_MODE 3:2 +#define NVC77D_HEAD_SET_OLUT_CONTROL_MODE_SEGMENTED (0x00000000) +#define NVC77D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT8 (0x00000001) +#define NVC77D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT10 (0x00000002) +#define NVC77D_HEAD_SET_OLUT_CONTROL_SIZE 18:8 +#define NVC77D_HEAD_SET_OLUT_FP_NORM_SCALE(a) (0x00002284 + (a)*0x00000400) +#define NVC77D_HEAD_SET_OLUT_FP_NORM_SCALE_VALUE 31:0 +#define NVC77D_HEAD_SET_CONTEXT_DMA_OLUT(a) (0x00002288 + (a)*0x00000400) +#define NVC77D_HEAD_SET_CONTEXT_DMA_OLUT_HANDLE 31:0 +#define NVC77D_HEAD_SET_OFFSET_OLUT(a) (0x0000228C + (a)*0x00000400) +#define NVC77D_HEAD_SET_OFFSET_OLUT_ORIGIN 31:0 +#define NVC77D_HEAD_SET_OCSC1CONTROL(a) (0x0000229C + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC1CONTROL_ENABLE 0:0 +#define NVC77D_HEAD_SET_OCSC1CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC77D_HEAD_SET_OCSC1CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C00(a) (0x000022A0 + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C00_VALUE 20:0 +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C01(a) (0x000022A4 + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C01_VALUE 20:0 +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C02(a) (0x000022A8 + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C02_VALUE 20:0 +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C03(a) (0x000022AC + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C03_VALUE 20:0 +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C10(a) (0x000022B0 + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C10_VALUE 20:0 +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C11(a) (0x000022B4 + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C11_VALUE 20:0 +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C12(a) (0x000022B8 + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C12_VALUE 20:0 +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C13(a) (0x000022BC + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C13_VALUE 20:0 +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C20(a) (0x000022C0 + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C20_VALUE 20:0 +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C21(a) (0x000022C4 + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C21_VALUE 20:0 +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C22(a) (0x000022C8 + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C22_VALUE 20:0 +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C23(a) (0x000022CC + (a)*0x00000400) +#define NVC77D_HEAD_SET_OCSC1COEFFICIENT_C23_VALUE 20:0 +#define NVC77D_HEAD_SET_TILE_POSITION(a) (0x000022D0 + (a)*0x00000400) +#define NVC77D_HEAD_SET_TILE_POSITION_X 2:0 +#define NVC77D_HEAD_SET_TILE_POSITION_Y 6:4 +#define NVC77D_HEAD_SET_DSC_CONTROL(a) (0x000022D4 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_CONTROL_ENABLE 0:0 +#define NVC77D_HEAD_SET_DSC_CONTROL_ENABLE_FALSE (0x00000000) +#define NVC77D_HEAD_SET_DSC_CONTROL_ENABLE_TRUE (0x00000001) +#define NVC77D_HEAD_SET_DSC_CONTROL_MODE 2:1 +#define NVC77D_HEAD_SET_DSC_CONTROL_MODE_SINGLE (0x00000000) +#define NVC77D_HEAD_SET_DSC_CONTROL_MODE_DUAL (0x00000001) +#define NVC77D_HEAD_SET_DSC_CONTROL_MODE_QUAD (0x00000002) +#define NVC77D_HEAD_SET_DSC_CONTROL_AUTO_RESET 3:3 +#define NVC77D_HEAD_SET_DSC_CONTROL_AUTO_RESET_DISABLE (0x00000000) +#define NVC77D_HEAD_SET_DSC_CONTROL_AUTO_RESET_ENABLE (0x00000001) +#define NVC77D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION 4:4 +#define NVC77D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION_DISABLE (0x00000000) +#define NVC77D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION_ENABLE (0x00000001) +#define NVC77D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET 5:5 +#define NVC77D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET_FALSE (0x00000000) +#define NVC77D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET_TRUE (0x00000001) +#define NVC77D_HEAD_SET_DSC_CONTROL_FLATNESS_DET_THRESH 15:6 +#define NVC77D_HEAD_SET_DSC_PPS_CONTROL(a) (0x000022D8 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_CONTROL_ENABLE 0:0 +#define NVC77D_HEAD_SET_DSC_PPS_CONTROL_ENABLE_FALSE (0x00000000) +#define NVC77D_HEAD_SET_DSC_PPS_CONTROL_ENABLE_TRUE (0x00000001) +#define NVC77D_HEAD_SET_DSC_PPS_CONTROL_LOCATION 1:1 +#define NVC77D_HEAD_SET_DSC_PPS_CONTROL_LOCATION_VSYNC (0x00000000) +#define NVC77D_HEAD_SET_DSC_PPS_CONTROL_LOCATION_VBLANK (0x00000001) +#define NVC77D_HEAD_SET_DSC_PPS_CONTROL_SIZE 9:2 +#define NVC77D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY 10:10 +#define NVC77D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY_EVERY_FRAME (0x00000000) +#define NVC77D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY_ONCE (0x00000001) +#define NVC77D_HEAD_SET_DSC_PPS_HEAD(a) (0x000022DC + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_HEAD_BYTE0 7:0 +#define NVC77D_HEAD_SET_DSC_PPS_HEAD_BYTE1 15:8 +#define NVC77D_HEAD_SET_DSC_PPS_HEAD_BYTE2 23:16 +#define NVC77D_HEAD_SET_DSC_PPS_HEAD_BYTE3 31:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA0(a) (0x000022E0 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA0_DSC_VERSION_MINOR 3:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA0_DSC_VERSION_MAJOR 7:4 +#define NVC77D_HEAD_SET_DSC_PPS_DATA0_PPS_IDENTIFIER 15:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA0_RESERVED 23:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA0_LINEBUF_DEPTH 27:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA0_BITS_PER_COMPONENT 31:28 +#define NVC77D_HEAD_SET_DSC_PPS_DATA1(a) (0x000022E4 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA1_BITS_PER_PIXEL_HIGH 1:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA1_VBR_ENABLE 2:2 +#define NVC77D_HEAD_SET_DSC_PPS_DATA1_SIMPLE422 3:3 +#define NVC77D_HEAD_SET_DSC_PPS_DATA1_CONVERT_RGB 4:4 +#define NVC77D_HEAD_SET_DSC_PPS_DATA1_BLOCK_PRED_ENABLE 5:5 +#define NVC77D_HEAD_SET_DSC_PPS_DATA1_RESERVED 7:6 +#define NVC77D_HEAD_SET_DSC_PPS_DATA1_BITS_PER_PIXEL_LOW 15:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA1_PIC_HEIGHT_HIGH 23:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA1_PIC_HEIGHT_LOW 31:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA2(a) (0x000022E8 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA2_PIC_WIDTH_HIGH 7:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA2_PIC_WIDTH_LOW 15:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA2_SLICE_HEIGHT_HIGH 23:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA2_SLICE_HEIGHT_LOW 31:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA3(a) (0x000022EC + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA3_SLICE_WIDTH_HIGH 7:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA3_SLICE_WIDTH_LOW 15:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA3_CHUNK_SIZE_HIGH 23:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA3_CHUNK_SIZE_LOW 31:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA4(a) (0x000022F0 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA4_INITIAL_XMIT_DELAY_HIGH 1:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA4_RESERVED 7:2 +#define NVC77D_HEAD_SET_DSC_PPS_DATA4_INITIAL_XMIT_DELAY_LOW 15:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA4_INITIAL_DEC_DELAY_HIGH 23:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA4_INITIAL_DEC_DELAY_LOW 31:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA5(a) (0x000022F4 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA5_RESERVED0 7:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA5_INITIAL_SCALE_VALUE 13:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA5_RESERVED1 15:14 +#define NVC77D_HEAD_SET_DSC_PPS_DATA5_SCALE_INCREMENT_INTERVAL_HIGH 23:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA5_SCALE_INCREMENT_INTERVAL_LOW 31:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA6(a) (0x000022F8 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA6_SCALE_DECREMENT_INTERVAL_HIGH 3:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA6_RESERVED0 7:4 +#define NVC77D_HEAD_SET_DSC_PPS_DATA6_SCALE_DECREMENT_INTERVAL_LOW 15:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA6_RESERVED1 23:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA6_FIRST_LINE_BPG_OFFSET 28:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA6_RESERVED2 31:29 +#define NVC77D_HEAD_SET_DSC_PPS_DATA7(a) (0x000022FC + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA7_NFL_BPG_OFFSET_HIGH 7:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA7_NFL_BPG_OFFSET_LOW 15:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA7_SLICE_BPG_OFFSET_HIGH 23:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA7_SLICE_BPG_OFFSET_LOW 31:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA8(a) (0x00002300 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA8_INITIAL_OFFSET_HIGH 7:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA8_INITIAL_OFFSET_LOW 15:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA8_FINAL_OFFSET_HIGH 23:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA8_FINAL_OFFSET_LOW 31:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA9(a) (0x00002304 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA9_FLATNESS_MIN_QP 4:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA9_RESERVED0 7:5 +#define NVC77D_HEAD_SET_DSC_PPS_DATA9_FLATNESS_MAX_QP 12:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA9_RESERVED1 15:13 +#define NVC77D_HEAD_SET_DSC_PPS_DATA9_RC_MODEL_SIZE_HIGH 23:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA9_RC_MODEL_SIZE_LOW 31:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA10(a) (0x00002308 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA10_RC_EDGE_FACTOR 3:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA10_RESERVED0 7:4 +#define NVC77D_HEAD_SET_DSC_PPS_DATA10_RC_QUANT_INCR_LIMIT0 12:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA10_RESERVED1 15:13 +#define NVC77D_HEAD_SET_DSC_PPS_DATA10_RC_QUANT_INCR_LIMIT1 20:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA10_RESERVED2 23:21 +#define NVC77D_HEAD_SET_DSC_PPS_DATA10_RC_TGT_OFFSET_LO 27:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA10_RC_TGT_OFFSET_HI 31:28 +#define NVC77D_HEAD_SET_DSC_PPS_DATA11(a) (0x0000230C + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH0 7:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH1 15:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH2 23:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH3 31:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA12(a) (0x00002310 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH4 7:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH5 15:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH6 23:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH7 31:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA13(a) (0x00002314 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH8 7:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH9 15:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH10 23:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH11 31:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA14(a) (0x00002318 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA14_RC_BUF_THRESH12 7:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA14_RC_BUF_THRESH13 15:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MAX_QP_HIGH0 18:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MIN_QP0 23:19 +#define NVC77D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_BPG_OFFSET0 29:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MAX_QP_LOW0 31:30 +#define NVC77D_HEAD_SET_DSC_PPS_DATA15(a) (0x0000231C + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_HIGH1 2:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MIN_QP1 7:3 +#define NVC77D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_BPG_OFFSET1 13:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_LOW1 15:14 +#define NVC77D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_HIGH2 18:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MIN_QP2 23:19 +#define NVC77D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_BPG_OFFSET2 29:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_LOW2 31:30 +#define NVC77D_HEAD_SET_DSC_PPS_DATA16(a) (0x00002320 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_HIGH3 2:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MIN_QP3 7:3 +#define NVC77D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_BPG_OFFSET3 13:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_LOW3 15:14 +#define NVC77D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_HIGH4 18:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MIN_QP4 23:19 +#define NVC77D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_BPG_OFFSET4 29:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_LOW4 31:30 +#define NVC77D_HEAD_SET_DSC_PPS_DATA17(a) (0x00002324 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_HIGH5 2:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MIN_QP5 7:3 +#define NVC77D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_BPG_OFFSET5 13:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_LOW5 15:14 +#define NVC77D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_HIGH6 18:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MIN_QP6 23:19 +#define NVC77D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_BPG_OFFSET6 29:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_LOW6 31:30 +#define NVC77D_HEAD_SET_DSC_PPS_DATA18(a) (0x00002328 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_HIGH7 2:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MIN_QP7 7:3 +#define NVC77D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_BPG_OFFSET7 13:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_LOW7 15:14 +#define NVC77D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_HIGH8 18:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MIN_QP8 23:19 +#define NVC77D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_BPG_OFFSET8 29:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_LOW8 31:30 +#define NVC77D_HEAD_SET_DSC_PPS_DATA19(a) (0x0000232C + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_HIGH9 2:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MIN_QP9 7:3 +#define NVC77D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_BPG_OFFSET9 13:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_LOW9 15:14 +#define NVC77D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_HIGH10 18:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MIN_QP10 23:19 +#define NVC77D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_BPG_OFFSET10 29:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_LOW10 31:30 +#define NVC77D_HEAD_SET_DSC_PPS_DATA20(a) (0x00002330 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_HIGH11 2:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MIN_QP11 7:3 +#define NVC77D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_BPG_OFFSET11 13:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_LOW11 15:14 +#define NVC77D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_HIGH12 18:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MIN_QP12 23:19 +#define NVC77D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_BPG_OFFSET12 29:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_LOW12 31:30 +#define NVC77D_HEAD_SET_DSC_PPS_DATA21(a) (0x00002334 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_HIGH13 2:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MIN_QP13 7:3 +#define NVC77D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_BPG_OFFSET13 13:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_LOW13 15:14 +#define NVC77D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_HIGH14 18:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MIN_QP14 23:19 +#define NVC77D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_BPG_OFFSET14 29:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_LOW14 31:30 +#define NVC77D_HEAD_SET_DSC_PPS_DATA22(a) (0x00002338 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA22_NATIVE422 0:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA22_NATIVE420 1:1 +#define NVC77D_HEAD_SET_DSC_PPS_DATA22_RESERVED0 7:2 +#define NVC77D_HEAD_SET_DSC_PPS_DATA22_SECOND_LINE_BPG_OFFSET 12:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA22_RESERVED1 15:13 +#define NVC77D_HEAD_SET_DSC_PPS_DATA22_NSL_BPG_OFFSET_HIGH 23:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA22_NSL_BPG_OFFSETLOW 31:24 +#define NVC77D_HEAD_SET_DSC_PPS_DATA23(a) (0x0000233C + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA23_SECOND_LINE_OFFSET_ADJ_HIGH 7:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA23_SECOND_LINE_OFFSET_ADJ_LOW 15:8 +#define NVC77D_HEAD_SET_DSC_PPS_DATA23_RESERVED 31:16 +#define NVC77D_HEAD_SET_DSC_PPS_DATA24(a) (0x00002340 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA24_RESERVED 31:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA25(a) (0x00002344 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA25_RESERVED 31:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA26(a) (0x00002348 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA26_RESERVED 31:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA27(a) (0x0000234C + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA27_RESERVED 31:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA28(a) (0x00002350 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA28_RESERVED 31:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA29(a) (0x00002354 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA29_RESERVED 31:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA30(a) (0x00002358 + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA30_RESERVED 31:0 +#define NVC77D_HEAD_SET_DSC_PPS_DATA31(a) (0x0000235C + (a)*0x00000400) +#define NVC77D_HEAD_SET_DSC_PPS_DATA31_RESERVED 31:0 +#define NVC77D_HEAD_SET_RG_MERGE(a) (0x00002360 + (a)*0x00000400) +#define NVC77D_HEAD_SET_RG_MERGE_MODE 1:0 +#define NVC77D_HEAD_SET_RG_MERGE_MODE_DISABLE (0x00000000) +#define NVC77D_HEAD_SET_RG_MERGE_MODE_SETUP (0x00000001) +#define NVC77D_HEAD_SET_RG_MERGE_MODE_MASTER (0x00000002) +#define NVC77D_HEAD_SET_RG_MERGE_MODE_SLAVE (0x00000003) +#define NVC77D_HEAD_SET_RASTER_HBLANK_DELAY(a) (0x00002364 + (a)*0x00000400) +#define NVC77D_HEAD_SET_RASTER_HBLANK_DELAY_BLANK_START 15:0 +#define NVC77D_HEAD_SET_RASTER_HBLANK_DELAY_BLANK_END 31:16 +#define NVC77D_HEAD_SET_HDMI_DSC_HCACTIVE(a) (0x00002368 + (a)*0x00000400) +#define NVC77D_HEAD_SET_HDMI_DSC_HCACTIVE_BYTES 15:0 +#define NVC77D_HEAD_SET_HDMI_DSC_HCACTIVE_TRI_BYTES 31:16 +#define NVC77D_HEAD_SET_HDMI_DSC_HCBLANK(a) (0x0000236C + (a)*0x00000400) +#define NVC77D_HEAD_SET_HDMI_DSC_HCBLANK_WIDTH 15:0 +#define NVC77D_HEAD_SW_RESERVED(a,b) (0x00002370 + (a)*0x00000400 + (b)*0x00000004) +#define NVC77D_HEAD_SW_RESERVED_VALUE 31:0 +#define NVC77D_HEAD_SET_RG_REL_SEMAPHORE_VALUE_HI(a,b) (0x00002380 + (a)*0x00000400 + (b)*0x00000004) +#define NVC77D_HEAD_SET_RG_REL_SEMAPHORE_VALUE_HI_VALUE 31:0 +#define NVC77D_HEAD_SET_INFOFRAME_CTRL(a) (0x000023A4 + (a)*0x00000400) +#define NVC77D_HEAD_SET_INFOFRAME_CTRL_MODE 1:0 +#define NVC77D_HEAD_SET_INFOFRAME_CTRL_MODE_DISABLE (0x00000000) +#define NVC77D_HEAD_SET_INFOFRAME_CTRL_MODE_GENERIC (0x00000001) +#define NVC77D_HEAD_SET_INFOFRAME_CTRL_MODE_EMP_OTHERS (0x00000002) +#define NVC77D_HEAD_SET_INFOFRAME_CTRL_FREQUENCY 12:12 +#define NVC77D_HEAD_SET_INFOFRAME_CTRL_FREQUENCY_EVERY_FRAME (0x00000000) +#define NVC77D_HEAD_SET_INFOFRAME_CTRL_FREQUENCY_ONCE (0x00000001) +#define NVC77D_HEAD_SET_INFOFRAME_CTRL_LOCATION 5:4 +#define NVC77D_HEAD_SET_INFOFRAME_CTRL_LOCATION_VSYNC (0x00000000) +#define NVC77D_HEAD_SET_INFOFRAME_CTRL_LOCATION_VBLANK (0x00000001) +#define NVC77D_HEAD_SET_INFOFRAME_CTRL_MTD_STATE_CTRL 14:14 +#define NVC77D_HEAD_SET_INFOFRAME_CTRL_MTD_STATE_CTRL_ACTIVE (0x00000000) +#define NVC77D_HEAD_SET_INFOFRAME_CTRL_MTD_STATE_CTRL_ARM (0x00000001) +#define NVC77D_HEAD_SET_INFOFRAME_HEADER(a) (0x000023A8 + (a)*0x00000400) +#define NVC77D_HEAD_SET_INFOFRAME_HEADER_HB0 7:0 +#define NVC77D_HEAD_SET_INFOFRAME_HEADER_HB1 15:8 +#define NVC77D_HEAD_SET_INFOFRAME_HEADER_HB2 23:16 +#define NVC77D_HEAD_SET_INFOFRAME_HEADER_HB3 31:24 +#define NVC77D_HEAD_SET_INFOFRAME_DATA0(a) (0x000023AC + (a)*0x00000400) +#define NVC77D_HEAD_SET_INFOFRAME_DATA0_DB0 7:0 +#define NVC77D_HEAD_SET_INFOFRAME_DATA0_DB1 15:8 +#define NVC77D_HEAD_SET_INFOFRAME_DATA0_DB2 23:16 +#define NVC77D_HEAD_SET_INFOFRAME_DATA0_DB3 31:24 +#define NVC77D_HEAD_SET_INFOFRAME_DATA1(a) (0x000023B0 + (a)*0x00000400) +#define NVC77D_HEAD_SET_INFOFRAME_DATA1_DB4 7:0 +#define NVC77D_HEAD_SET_INFOFRAME_DATA1_DB5 15:8 +#define NVC77D_HEAD_SET_INFOFRAME_DATA1_DB6 23:16 +#define NVC77D_HEAD_SET_INFOFRAME_DATA1_DB7 31:24 +#define NVC77D_HEAD_SET_INFOFRAME_DATA2(a) (0x000023B4 + (a)*0x00000400) +#define NVC77D_HEAD_SET_INFOFRAME_DATA2_DB8 7:0 +#define NVC77D_HEAD_SET_INFOFRAME_DATA2_DB9 15:8 +#define NVC77D_HEAD_SET_INFOFRAME_DATA2_DB10 23:16 +#define NVC77D_HEAD_SET_INFOFRAME_DATA2_DB11 31:24 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC77d_h diff --git a/src/common/sdk/nvidia/inc/class/clc77f.h b/src/common/sdk/nvidia/inc/class/clc77f.h new file mode 100644 index 0000000..3651a58 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc77f.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clC77F_h_ +#define _clC77F_h_ + +// +// This class provides functional support of Display ANYChannel, Display Contextdmas bound to +// ANYChannel can be used on any other display window channels. +// +#define NVC77F_ANY_CHANNEL_DMA (0x0000C77F) + +#endif // _clC77F_h_ + diff --git a/src/common/sdk/nvidia/inc/class/clc797.h b/src/common/sdk/nvidia/inc/class/clc797.h new file mode 100644 index 0000000..dc4b600 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc797.h @@ -0,0 +1,4481 @@ +/******************************************************************************* + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef _cl_ampere_b_h_ +#define _cl_ampere_b_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../../../class/bin/sw_header.pl ampere_b */ + +#include "nvtypes.h" + +#define AMPERE_B 0xC797 + +#define NVC797_SET_OBJECT 0x0000 +#define NVC797_SET_OBJECT_CLASS_ID 15:0 +#define NVC797_SET_OBJECT_ENGINE_ID 20:16 + +#define NVC797_NO_OPERATION 0x0100 +#define NVC797_NO_OPERATION_V 31:0 + +#define NVC797_SET_NOTIFY_A 0x0104 +#define NVC797_SET_NOTIFY_A_ADDRESS_UPPER 7:0 + +#define NVC797_SET_NOTIFY_B 0x0108 +#define NVC797_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NVC797_NOTIFY 0x010c +#define NVC797_NOTIFY_TYPE 31:0 +#define NVC797_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NVC797_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NVC797_WAIT_FOR_IDLE 0x0110 +#define NVC797_WAIT_FOR_IDLE_V 31:0 + +#define NVC797_LOAD_MME_INSTRUCTION_RAM_POINTER 0x0114 +#define NVC797_LOAD_MME_INSTRUCTION_RAM_POINTER_V 31:0 + +#define NVC797_LOAD_MME_INSTRUCTION_RAM 0x0118 +#define NVC797_LOAD_MME_INSTRUCTION_RAM_V 31:0 + +#define NVC797_LOAD_MME_START_ADDRESS_RAM_POINTER 0x011c +#define NVC797_LOAD_MME_START_ADDRESS_RAM_POINTER_V 31:0 + +#define NVC797_LOAD_MME_START_ADDRESS_RAM 0x0120 +#define NVC797_LOAD_MME_START_ADDRESS_RAM_V 31:0 + +#define NVC797_SET_MME_SHADOW_RAM_CONTROL 0x0124 +#define NVC797_SET_MME_SHADOW_RAM_CONTROL_MODE 1:0 +#define NVC797_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK 0x00000000 +#define NVC797_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK_WITH_FILTER 0x00000001 +#define NVC797_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_PASSTHROUGH 0x00000002 +#define NVC797_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_REPLAY 0x00000003 + +#define NVC797_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER 0x0128 +#define NVC797_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER_V 7:0 + +#define NVC797_PEER_SEMAPHORE_RELEASE_OFFSET 0x012c +#define NVC797_PEER_SEMAPHORE_RELEASE_OFFSET_V 31:0 + +#define NVC797_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NVC797_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC797_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NVC797_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC797_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NVC797_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NVC797_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC797_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC797_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC797_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC797_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC797_SEND_GO_IDLE 0x013c +#define NVC797_SEND_GO_IDLE_V 31:0 + +#define NVC797_PM_TRIGGER 0x0140 +#define NVC797_PM_TRIGGER_V 31:0 + +#define NVC797_PM_TRIGGER_WFI 0x0144 +#define NVC797_PM_TRIGGER_WFI_V 31:0 + +#define NVC797_FE_ATOMIC_SEQUENCE_BEGIN 0x0148 +#define NVC797_FE_ATOMIC_SEQUENCE_BEGIN_V 31:0 + +#define NVC797_FE_ATOMIC_SEQUENCE_END 0x014c +#define NVC797_FE_ATOMIC_SEQUENCE_END_V 31:0 + +#define NVC797_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NVC797_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NVC797_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NVC797_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NVC797_SET_REPORT_SEMAPHORE_PAYLOAD_LOWER 0x0158 +#define NVC797_SET_REPORT_SEMAPHORE_PAYLOAD_LOWER_PAYLOAD_LOWER 31:0 + +#define NVC797_SET_REPORT_SEMAPHORE_PAYLOAD_UPPER 0x015c +#define NVC797_SET_REPORT_SEMAPHORE_PAYLOAD_UPPER_PAYLOAD_UPPER 31:0 + +#define NVC797_SET_REPORT_SEMAPHORE_ADDRESS_LOWER 0x0160 +#define NVC797_SET_REPORT_SEMAPHORE_ADDRESS_LOWER_LOWER 31:0 + +#define NVC797_SET_REPORT_SEMAPHORE_ADDRESS_UPPER 0x0164 +#define NVC797_SET_REPORT_SEMAPHORE_ADDRESS_UPPER_UPPER 7:0 + +#define NVC797_REPORT_SEMAPHORE_EXECUTE 0x0168 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_OPERATION 1:0 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_OPERATION_RELEASE 0x00000000 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_OPERATION_ACQUIRE 0x00000001 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_OPERATION_REPORT_ONLY 0x00000002 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_OPERATION_TRAP 0x00000003 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION 5:2 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_NONE 0x00000000 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_DATA_ASSEMBLER 0x00000001 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_VERTEX_SHADER 0x00000002 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_TESSELATION_INIT_SHADER 0x00000008 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_TESSELATION_SHADER 0x00000009 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_GEOMETRY_SHADER 0x00000006 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_STREAMING_OUTPUT 0x00000005 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_VPC 0x00000004 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_ZCULL 0x00000007 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_PIXEL_SHADER 0x0000000A +#define NVC797_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_DEPTH_TEST 0x0000000C +#define NVC797_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_ALL 0x0000000F +#define NVC797_REPORT_SEMAPHORE_EXECUTE_AWAKEN_ENABLE 6:6 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT 11:7 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_NONE 0x00000000 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_DA_VERTICES_GENERATED 0x00000001 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_DA_PRIMITIVES_GENERATED 0x00000003 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_VS_INVOCATIONS 0x00000005 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_TI_INVOCATIONS 0x0000001B +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_TS_INVOCATIONS 0x0000001D +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_TS_PRIMITIVES_GENERATED 0x0000001F +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_GS_INVOCATIONS 0x00000007 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_GS_PRIMITIVES_GENERATED 0x00000009 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_ALPHA_BETA_CLOCKS 0x00000004 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_SCG_CLOCKS 0x00000008 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_VTG_PRIMITIVES_OUT 0x00000012 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x0000001E +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_STREAMING_PRIMITIVES_SUCCEEDED 0x0000000B +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_STREAMING_PRIMITIVES_NEEDED 0x0000000D +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000006 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_STREAMING_BYTE_COUNT 0x0000001A +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_CLIPPER_INVOCATIONS 0x0000000F +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_CLIPPER_PRIMITIVES_GENERATED 0x00000011 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_ZCULL_STATS0 0x0000000A +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_ZCULL_STATS1 0x0000000C +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_ZCULL_STATS2 0x0000000E +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_ZCULL_STATS3 0x00000010 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_PS_INVOCATIONS 0x00000013 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_ZPASS_PIXEL_CNT 0x00000002 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_ZPASS_PIXEL_CNT64 0x00000015 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_TILED_ZPASS_PIXEL_CNT64 0x00000017 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_IEEE_CLEAN_COLOR_TARGET 0x00000018 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_IEEE_CLEAN_ZETA_TARGET 0x00000019 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_BOUNDING_RECTANGLE 0x0000001C +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REPORT_TIMESTAMP 0x00000014 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_STRUCTURE_SIZE 14:13 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_STRUCTURE_SIZE_SEMAPHORE_FOUR_WORDS 0x00000000 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_STRUCTURE_SIZE_SEMAPHORE_ONE_WORD 0x00000001 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_STRUCTURE_SIZE_SEMAPHORE_TWO_WORDS 0x00000002 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_SUB_REPORT 17:15 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_FLUSH_DISABLE 19:19 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_FLUSH_DISABLE_FALSE 0x00000000 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_FLUSH_DISABLE_TRUE 0x00000001 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_ROP_FLUSH_DISABLE 18:18 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_ROP_FLUSH_DISABLE_FALSE 0x00000000 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_ROP_FLUSH_DISABLE_TRUE 0x00000001 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REDUCTION_ENABLE 20:20 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP 23:21 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_INC 0x00000003 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_AND 0x00000005 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_OR 0x00000006 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REDUCTION_FORMAT 25:24 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REDUCTION_FORMAT_UNSIGNED 0x00000000 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_REDUCTION_FORMAT_SIGNED 0x00000001 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_PAYLOAD_SIZE64 27:27 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_PAYLOAD_SIZE64_FALSE 0x00000000 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_PAYLOAD_SIZE64_TRUE 0x00000001 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_TRAP_TYPE 29:28 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_TRAP_TYPE_TRAP_NONE 0x00000000 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_TRAP_TYPE_TRAP_UNCONDITIONAL 0x00000001 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_TRAP_TYPE_TRAP_CONDITIONAL 0x00000002 +#define NVC797_REPORT_SEMAPHORE_EXECUTE_TRAP_TYPE_TRAP_CONDITIONAL_EXT 0x00000003 + +#define NVC797_LINE_LENGTH_IN 0x0180 +#define NVC797_LINE_LENGTH_IN_VALUE 31:0 + +#define NVC797_LINE_COUNT 0x0184 +#define NVC797_LINE_COUNT_VALUE 31:0 + +#define NVC797_OFFSET_OUT_UPPER 0x0188 +#define NVC797_OFFSET_OUT_UPPER_VALUE 7:0 + +#define NVC797_OFFSET_OUT 0x018c +#define NVC797_OFFSET_OUT_VALUE 31:0 + +#define NVC797_PITCH_OUT 0x0190 +#define NVC797_PITCH_OUT_VALUE 31:0 + +#define NVC797_SET_DST_BLOCK_SIZE 0x0194 +#define NVC797_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVC797_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC797_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVC797_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC797_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC797_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC797_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC797_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC797_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC797_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVC797_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NVC797_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NVC797_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NVC797_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NVC797_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVC797_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NVC797_SET_DST_WIDTH 0x0198 +#define NVC797_SET_DST_WIDTH_V 31:0 + +#define NVC797_SET_DST_HEIGHT 0x019c +#define NVC797_SET_DST_HEIGHT_V 31:0 + +#define NVC797_SET_DST_DEPTH 0x01a0 +#define NVC797_SET_DST_DEPTH_V 31:0 + +#define NVC797_SET_DST_LAYER 0x01a4 +#define NVC797_SET_DST_LAYER_V 31:0 + +#define NVC797_SET_DST_ORIGIN_BYTES_X 0x01a8 +#define NVC797_SET_DST_ORIGIN_BYTES_X_V 20:0 + +#define NVC797_SET_DST_ORIGIN_SAMPLES_Y 0x01ac +#define NVC797_SET_DST_ORIGIN_SAMPLES_Y_V 16:0 + +#define NVC797_LAUNCH_DMA 0x01b0 +#define NVC797_LAUNCH_DMA_DST_MEMORY_LAYOUT 0:0 +#define NVC797_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVC797_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVC797_LAUNCH_DMA_COMPLETION_TYPE 5:4 +#define NVC797_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_DISABLE 0x00000000 +#define NVC797_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_ONLY 0x00000001 +#define NVC797_LAUNCH_DMA_COMPLETION_TYPE_RELEASE_SEMAPHORE 0x00000002 +#define NVC797_LAUNCH_DMA_INTERRUPT_TYPE 9:8 +#define NVC797_LAUNCH_DMA_INTERRUPT_TYPE_NONE 0x00000000 +#define NVC797_LAUNCH_DMA_INTERRUPT_TYPE_INTERRUPT 0x00000001 +#define NVC797_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE 12:12 +#define NVC797_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_FOUR_WORDS 0x00000000 +#define NVC797_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_ONE_WORD 0x00000001 +#define NVC797_LAUNCH_DMA_REDUCTION_ENABLE 1:1 +#define NVC797_LAUNCH_DMA_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC797_LAUNCH_DMA_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC797_LAUNCH_DMA_REDUCTION_OP 15:13 +#define NVC797_LAUNCH_DMA_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC797_LAUNCH_DMA_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC797_LAUNCH_DMA_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC797_LAUNCH_DMA_REDUCTION_OP_RED_INC 0x00000003 +#define NVC797_LAUNCH_DMA_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC797_LAUNCH_DMA_REDUCTION_OP_RED_AND 0x00000005 +#define NVC797_LAUNCH_DMA_REDUCTION_OP_RED_OR 0x00000006 +#define NVC797_LAUNCH_DMA_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC797_LAUNCH_DMA_REDUCTION_FORMAT 3:2 +#define NVC797_LAUNCH_DMA_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC797_LAUNCH_DMA_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVC797_LAUNCH_DMA_SYSMEMBAR_DISABLE 6:6 +#define NVC797_LAUNCH_DMA_SYSMEMBAR_DISABLE_FALSE 0x00000000 +#define NVC797_LAUNCH_DMA_SYSMEMBAR_DISABLE_TRUE 0x00000001 + +#define NVC797_LOAD_INLINE_DATA 0x01b4 +#define NVC797_LOAD_INLINE_DATA_V 31:0 + +#define NVC797_SET_I2M_SEMAPHORE_A 0x01dc +#define NVC797_SET_I2M_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC797_SET_I2M_SEMAPHORE_B 0x01e0 +#define NVC797_SET_I2M_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC797_SET_I2M_SEMAPHORE_C 0x01e4 +#define NVC797_SET_I2M_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC797_SET_MME_SWITCH_STATE 0x01ec +#define NVC797_SET_MME_SWITCH_STATE_VALID 0:0 +#define NVC797_SET_MME_SWITCH_STATE_VALID_FALSE 0x00000000 +#define NVC797_SET_MME_SWITCH_STATE_VALID_TRUE 0x00000001 +#define NVC797_SET_MME_SWITCH_STATE_SAVE_MACRO 11:4 +#define NVC797_SET_MME_SWITCH_STATE_RESTORE_MACRO 19:12 + +#define NVC797_SET_I2M_SPARE_NOOP00 0x01f0 +#define NVC797_SET_I2M_SPARE_NOOP00_V 31:0 + +#define NVC797_SET_I2M_SPARE_NOOP01 0x01f4 +#define NVC797_SET_I2M_SPARE_NOOP01_V 31:0 + +#define NVC797_SET_I2M_SPARE_NOOP02 0x01f8 +#define NVC797_SET_I2M_SPARE_NOOP02_V 31:0 + +#define NVC797_SET_I2M_SPARE_NOOP03 0x01fc +#define NVC797_SET_I2M_SPARE_NOOP03_V 31:0 + +#define NVC797_RUN_DS_NOW 0x0200 +#define NVC797_RUN_DS_NOW_V 31:0 + +#define NVC797_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS 0x0204 +#define NVC797_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD 4:0 +#define NVC797_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_INSTANTANEOUS 0x00000000 +#define NVC797_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16 0x00000001 +#define NVC797_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32 0x00000002 +#define NVC797_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__64 0x00000003 +#define NVC797_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__128 0x00000004 +#define NVC797_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__256 0x00000005 +#define NVC797_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__512 0x00000006 +#define NVC797_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1024 0x00000007 +#define NVC797_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2048 0x00000008 +#define NVC797_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4096 0x00000009 +#define NVC797_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__8192 0x0000000A +#define NVC797_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16384 0x0000000B +#define NVC797_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32768 0x0000000C +#define NVC797_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__65536 0x0000000D +#define NVC797_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__131072 0x0000000E +#define NVC797_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__262144 0x0000000F +#define NVC797_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__524288 0x00000010 +#define NVC797_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1048576 0x00000011 +#define NVC797_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2097152 0x00000012 +#define NVC797_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4194304 0x00000013 +#define NVC797_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_LATEZ_ALWAYS 0x0000001F + +#define NVC797_SET_GS_MODE 0x0208 +#define NVC797_SET_GS_MODE_TYPE 0:0 +#define NVC797_SET_GS_MODE_TYPE_ANY 0x00000000 +#define NVC797_SET_GS_MODE_TYPE_FAST_GS 0x00000001 + +#define NVC797_SET_ALIASED_LINE_WIDTH_ENABLE 0x020c +#define NVC797_SET_ALIASED_LINE_WIDTH_ENABLE_V 0:0 +#define NVC797_SET_ALIASED_LINE_WIDTH_ENABLE_V_FALSE 0x00000000 +#define NVC797_SET_ALIASED_LINE_WIDTH_ENABLE_V_TRUE 0x00000001 + +#define NVC797_SET_API_MANDATED_EARLY_Z 0x0210 +#define NVC797_SET_API_MANDATED_EARLY_Z_ENABLE 0:0 +#define NVC797_SET_API_MANDATED_EARLY_Z_ENABLE_FALSE 0x00000000 +#define NVC797_SET_API_MANDATED_EARLY_Z_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_GS_DM_FIFO 0x0214 +#define NVC797_SET_GS_DM_FIFO_SIZE_RASTER_ON 12:0 +#define NVC797_SET_GS_DM_FIFO_SIZE_RASTER_OFF 28:16 +#define NVC797_SET_GS_DM_FIFO_SPILL_ENABLED 31:31 +#define NVC797_SET_GS_DM_FIFO_SPILL_ENABLED_FALSE 0x00000000 +#define NVC797_SET_GS_DM_FIFO_SPILL_ENABLED_TRUE 0x00000001 + +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS 0x0218 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY 5:4 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC797_INVALIDATE_SHADER_CACHES 0x021c +#define NVC797_INVALIDATE_SHADER_CACHES_INSTRUCTION 0:0 +#define NVC797_INVALIDATE_SHADER_CACHES_INSTRUCTION_FALSE 0x00000000 +#define NVC797_INVALIDATE_SHADER_CACHES_INSTRUCTION_TRUE 0x00000001 +#define NVC797_INVALIDATE_SHADER_CACHES_DATA 4:4 +#define NVC797_INVALIDATE_SHADER_CACHES_DATA_FALSE 0x00000000 +#define NVC797_INVALIDATE_SHADER_CACHES_DATA_TRUE 0x00000001 +#define NVC797_INVALIDATE_SHADER_CACHES_CONSTANT 12:12 +#define NVC797_INVALIDATE_SHADER_CACHES_CONSTANT_FALSE 0x00000000 +#define NVC797_INVALIDATE_SHADER_CACHES_CONSTANT_TRUE 0x00000001 +#define NVC797_INVALIDATE_SHADER_CACHES_LOCKS 1:1 +#define NVC797_INVALIDATE_SHADER_CACHES_LOCKS_FALSE 0x00000000 +#define NVC797_INVALIDATE_SHADER_CACHES_LOCKS_TRUE 0x00000001 +#define NVC797_INVALIDATE_SHADER_CACHES_FLUSH_DATA 2:2 +#define NVC797_INVALIDATE_SHADER_CACHES_FLUSH_DATA_FALSE 0x00000000 +#define NVC797_INVALIDATE_SHADER_CACHES_FLUSH_DATA_TRUE 0x00000001 + +#define NVC797_SET_INSTANCE_COUNT 0x0220 +#define NVC797_SET_INSTANCE_COUNT_V 31:0 + +#define NVC797_SET_POSITION_W_SCALED_OFFSET_ENABLE 0x0224 +#define NVC797_SET_POSITION_W_SCALED_OFFSET_ENABLE_ENABLE 0:0 +#define NVC797_SET_POSITION_W_SCALED_OFFSET_ENABLE_ENABLE_FALSE 0x00000000 +#define NVC797_SET_POSITION_W_SCALED_OFFSET_ENABLE_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_GO_IDLE_TIMEOUT 0x022c +#define NVC797_SET_GO_IDLE_TIMEOUT_V 31:0 + +#define NVC797_SET_MME_VERSION 0x0234 +#define NVC797_SET_MME_VERSION_MAJOR 7:0 + +#define NVC797_SET_INDEX_BUFFER_SIZE_A 0x0238 +#define NVC797_SET_INDEX_BUFFER_SIZE_A_UPPER 7:0 + +#define NVC797_SET_INDEX_BUFFER_SIZE_B 0x023c +#define NVC797_SET_INDEX_BUFFER_SIZE_B_LOWER 31:0 + +#define NVC797_SET_ROOT_TABLE_VISIBILITY(i) (0x0240+(i)*4) +#define NVC797_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP0_ENABLE 1:0 +#define NVC797_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP1_ENABLE 5:4 +#define NVC797_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP2_ENABLE 9:8 +#define NVC797_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP3_ENABLE 13:12 +#define NVC797_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP4_ENABLE 17:16 + +#define NVC797_SET_DRAW_CONTROL_A 0x0260 +#define NVC797_SET_DRAW_CONTROL_A_TOPOLOGY 3:0 +#define NVC797_SET_DRAW_CONTROL_A_TOPOLOGY_POINTS 0x00000000 +#define NVC797_SET_DRAW_CONTROL_A_TOPOLOGY_LINES 0x00000001 +#define NVC797_SET_DRAW_CONTROL_A_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC797_SET_DRAW_CONTROL_A_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC797_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC797_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC797_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC797_SET_DRAW_CONTROL_A_TOPOLOGY_QUADS 0x00000007 +#define NVC797_SET_DRAW_CONTROL_A_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC797_SET_DRAW_CONTROL_A_TOPOLOGY_POLYGON 0x00000009 +#define NVC797_SET_DRAW_CONTROL_A_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC797_SET_DRAW_CONTROL_A_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC797_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC797_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC797_SET_DRAW_CONTROL_A_TOPOLOGY_PATCH 0x0000000E +#define NVC797_SET_DRAW_CONTROL_A_PRIMITIVE_ID 4:4 +#define NVC797_SET_DRAW_CONTROL_A_PRIMITIVE_ID_FIRST 0x00000000 +#define NVC797_SET_DRAW_CONTROL_A_PRIMITIVE_ID_UNCHANGED 0x00000001 +#define NVC797_SET_DRAW_CONTROL_A_INSTANCE_ID 6:5 +#define NVC797_SET_DRAW_CONTROL_A_INSTANCE_ID_FIRST 0x00000000 +#define NVC797_SET_DRAW_CONTROL_A_INSTANCE_ID_SUBSEQUENT 0x00000001 +#define NVC797_SET_DRAW_CONTROL_A_INSTANCE_ID_UNCHANGED 0x00000002 +#define NVC797_SET_DRAW_CONTROL_A_SPLIT_MODE 8:7 +#define NVC797_SET_DRAW_CONTROL_A_SPLIT_MODE_NORMAL_BEGIN_NORMAL_END 0x00000000 +#define NVC797_SET_DRAW_CONTROL_A_SPLIT_MODE_NORMAL_BEGIN_OPEN_END 0x00000001 +#define NVC797_SET_DRAW_CONTROL_A_SPLIT_MODE_OPEN_BEGIN_OPEN_END 0x00000002 +#define NVC797_SET_DRAW_CONTROL_A_SPLIT_MODE_OPEN_BEGIN_NORMAL_END 0x00000003 +#define NVC797_SET_DRAW_CONTROL_A_INSTANCE_ITERATE_ENABLE 9:9 +#define NVC797_SET_DRAW_CONTROL_A_INSTANCE_ITERATE_ENABLE_FALSE 0x00000000 +#define NVC797_SET_DRAW_CONTROL_A_INSTANCE_ITERATE_ENABLE_TRUE 0x00000001 +#define NVC797_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_VERTEX_INDEX 10:10 +#define NVC797_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_VERTEX_INDEX_FALSE 0x00000000 +#define NVC797_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_VERTEX_INDEX_TRUE 0x00000001 +#define NVC797_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_INSTANCE_INDEX 11:11 +#define NVC797_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_INSTANCE_INDEX_FALSE 0x00000000 +#define NVC797_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_INSTANCE_INDEX_TRUE 0x00000001 + +#define NVC797_SET_DRAW_CONTROL_B 0x0264 +#define NVC797_SET_DRAW_CONTROL_B_INSTANCE_COUNT 31:0 + +#define NVC797_DRAW_INDEX_BUFFER_BEGIN_END_A 0x0268 +#define NVC797_DRAW_INDEX_BUFFER_BEGIN_END_A_FIRST 31:0 + +#define NVC797_DRAW_INDEX_BUFFER_BEGIN_END_B 0x026c +#define NVC797_DRAW_INDEX_BUFFER_BEGIN_END_B_COUNT 31:0 + +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_A 0x0270 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_A_START 31:0 + +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_B 0x0274 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_B_COUNT 31:0 + +#define NVC797_INVALIDATE_RASTER_CACHE_NO_WFI 0x027c +#define NVC797_INVALIDATE_RASTER_CACHE_NO_WFI_V 0:0 + +#define NVC797_SET_COLOR_RENDER_TO_ZETA_SURFACE 0x02b8 +#define NVC797_SET_COLOR_RENDER_TO_ZETA_SURFACE_V 0:0 +#define NVC797_SET_COLOR_RENDER_TO_ZETA_SURFACE_V_FALSE 0x00000000 +#define NVC797_SET_COLOR_RENDER_TO_ZETA_SURFACE_V_TRUE 0x00000001 + +#define NVC797_SET_ZCULL_VISIBLE_PRIM_OPTIMIZATION 0x02bc +#define NVC797_SET_ZCULL_VISIBLE_PRIM_OPTIMIZATION_V 0:0 +#define NVC797_SET_ZCULL_VISIBLE_PRIM_OPTIMIZATION_V_FALSE 0x00000000 +#define NVC797_SET_ZCULL_VISIBLE_PRIM_OPTIMIZATION_V_TRUE 0x00000001 + +#define NVC797_INCREMENT_SYNC_POINT 0x02c8 +#define NVC797_INCREMENT_SYNC_POINT_INDEX 11:0 +#define NVC797_INCREMENT_SYNC_POINT_CLEAN_L2 16:16 +#define NVC797_INCREMENT_SYNC_POINT_CLEAN_L2_FALSE 0x00000000 +#define NVC797_INCREMENT_SYNC_POINT_CLEAN_L2_TRUE 0x00000001 +#define NVC797_INCREMENT_SYNC_POINT_CONDITION 20:20 +#define NVC797_INCREMENT_SYNC_POINT_CONDITION_STREAM_OUT_WRITES_DONE 0x00000000 +#define NVC797_INCREMENT_SYNC_POINT_CONDITION_ROP_WRITES_DONE 0x00000001 + +#define NVC797_SET_ROOT_TABLE_PREFETCH 0x02d0 +#define NVC797_SET_ROOT_TABLE_PREFETCH_STAGE_ENABLES 5:0 + +#define NVC797_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE 0x02d4 +#define NVC797_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE_V 0:0 + +#define NVC797_SET_SURFACE_CLIP_ID_BLOCK_SIZE 0x02d8 +#define NVC797_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH 3:0 +#define NVC797_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC797_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT 7:4 +#define NVC797_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC797_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC797_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC797_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC797_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC797_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC797_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH 11:8 +#define NVC797_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NVC797_SET_ALPHA_CIRCULAR_BUFFER_SIZE 0x02dc +#define NVC797_SET_ALPHA_CIRCULAR_BUFFER_SIZE_CACHE_LINES_PER_SM 13:0 + +#define NVC797_DECOMPRESS_SURFACE 0x02e0 +#define NVC797_DECOMPRESS_SURFACE_MRT_SELECT 2:0 +#define NVC797_DECOMPRESS_SURFACE_RT_ARRAY_INDEX 19:4 + +#define NVC797_SET_ZCULL_ROP_BYPASS 0x02e4 +#define NVC797_SET_ZCULL_ROP_BYPASS_ENABLE 0:0 +#define NVC797_SET_ZCULL_ROP_BYPASS_ENABLE_FALSE 0x00000000 +#define NVC797_SET_ZCULL_ROP_BYPASS_ENABLE_TRUE 0x00000001 +#define NVC797_SET_ZCULL_ROP_BYPASS_NO_STALL 4:4 +#define NVC797_SET_ZCULL_ROP_BYPASS_NO_STALL_FALSE 0x00000000 +#define NVC797_SET_ZCULL_ROP_BYPASS_NO_STALL_TRUE 0x00000001 +#define NVC797_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING 8:8 +#define NVC797_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING_FALSE 0x00000000 +#define NVC797_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING_TRUE 0x00000001 +#define NVC797_SET_ZCULL_ROP_BYPASS_THRESHOLD 15:12 + +#define NVC797_SET_ZCULL_SUBREGION 0x02e8 +#define NVC797_SET_ZCULL_SUBREGION_ENABLE 0:0 +#define NVC797_SET_ZCULL_SUBREGION_ENABLE_FALSE 0x00000000 +#define NVC797_SET_ZCULL_SUBREGION_ENABLE_TRUE 0x00000001 +#define NVC797_SET_ZCULL_SUBREGION_NORMALIZED_ALIQUOTS 27:4 + +#define NVC797_SET_RASTER_BOUNDING_BOX 0x02ec +#define NVC797_SET_RASTER_BOUNDING_BOX_MODE 0:0 +#define NVC797_SET_RASTER_BOUNDING_BOX_MODE_BOUNDING_BOX 0x00000000 +#define NVC797_SET_RASTER_BOUNDING_BOX_MODE_FULL_VIEWPORT 0x00000001 +#define NVC797_SET_RASTER_BOUNDING_BOX_PAD 11:4 + +#define NVC797_PEER_SEMAPHORE_RELEASE 0x02f0 +#define NVC797_PEER_SEMAPHORE_RELEASE_V 31:0 + +#define NVC797_SET_ITERATED_BLEND_OPTIMIZATION 0x02f4 +#define NVC797_SET_ITERATED_BLEND_OPTIMIZATION_NOOP 1:0 +#define NVC797_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_NEVER 0x00000000 +#define NVC797_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_RGBA_0000 0x00000001 +#define NVC797_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_ALPHA_0 0x00000002 +#define NVC797_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_RGBA_0001 0x00000003 + +#define NVC797_SET_ZCULL_SUBREGION_ALLOCATION 0x02f8 +#define NVC797_SET_ZCULL_SUBREGION_ALLOCATION_SUBREGION_ID 7:0 +#define NVC797_SET_ZCULL_SUBREGION_ALLOCATION_ALIQUOTS 23:8 +#define NVC797_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT 27:24 +#define NVC797_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16X2_4X4 0x00000000 +#define NVC797_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X16_4X4 0x00000001 +#define NVC797_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_4X2 0x00000002 +#define NVC797_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_2X4 0x00000003 +#define NVC797_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X8_4X4 0x00000004 +#define NVC797_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_8X8_4X2 0x00000005 +#define NVC797_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_8X8_2X4 0x00000006 +#define NVC797_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_4X8 0x00000007 +#define NVC797_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_4X8_2X2 0x00000008 +#define NVC797_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X8_4X2 0x00000009 +#define NVC797_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X8_2X4 0x0000000A +#define NVC797_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_8X8_2X2 0x0000000B +#define NVC797_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_4X8_1X1 0x0000000C +#define NVC797_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_NONE 0x0000000F + +#define NVC797_ASSIGN_ZCULL_SUBREGIONS 0x02fc +#define NVC797_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM 1:0 +#define NVC797_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM_Static 0x00000000 +#define NVC797_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM_Adaptive 0x00000001 + +#define NVC797_SET_PS_OUTPUT_SAMPLE_MASK_USAGE 0x0300 +#define NVC797_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE 0:0 +#define NVC797_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_FALSE 0x00000000 +#define NVC797_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_TRUE 0x00000001 +#define NVC797_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE 1:1 +#define NVC797_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NVC797_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 + +#define NVC797_DRAW_ZERO_INDEX 0x0304 +#define NVC797_DRAW_ZERO_INDEX_COUNT 31:0 + +#define NVC797_SET_L1_CONFIGURATION 0x0308 +#define NVC797_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY 2:0 +#define NVC797_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_16KB 0x00000001 +#define NVC797_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_48KB 0x00000003 + +#define NVC797_SET_RENDER_ENABLE_CONTROL 0x030c +#define NVC797_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER 0:0 +#define NVC797_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_FALSE 0x00000000 +#define NVC797_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_TRUE 0x00000001 + +#define NVC797_SET_SPA_VERSION 0x0310 +#define NVC797_SET_SPA_VERSION_MINOR 7:0 +#define NVC797_SET_SPA_VERSION_MAJOR 15:8 + +#define NVC797_SET_TIMESLICE_BATCH_LIMIT 0x0314 +#define NVC797_SET_TIMESLICE_BATCH_LIMIT_BATCH_LIMIT 15:0 + +#define NVC797_SET_SNAP_GRID_LINE 0x0318 +#define NVC797_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NVC797_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NVC797_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NVC797_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NVC797_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NVC797_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NVC797_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NVC797_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NVC797_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NVC797_SET_SNAP_GRID_LINE_ROUNDING_MODE 8:8 +#define NVC797_SET_SNAP_GRID_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NVC797_SET_SNAP_GRID_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NVC797_SET_SNAP_GRID_NON_LINE 0x031c +#define NVC797_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NVC797_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NVC797_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NVC797_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NVC797_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NVC797_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NVC797_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NVC797_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NVC797_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NVC797_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE 8:8 +#define NVC797_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NVC797_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NVC797_SET_TESSELLATION_PARAMETERS 0x0320 +#define NVC797_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE 1:0 +#define NVC797_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_ISOLINE 0x00000000 +#define NVC797_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_TRIANGLE 0x00000001 +#define NVC797_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_QUAD 0x00000002 +#define NVC797_SET_TESSELLATION_PARAMETERS_SPACING 5:4 +#define NVC797_SET_TESSELLATION_PARAMETERS_SPACING_INTEGER 0x00000000 +#define NVC797_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_ODD 0x00000001 +#define NVC797_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_EVEN 0x00000002 +#define NVC797_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES 9:8 +#define NVC797_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_POINTS 0x00000000 +#define NVC797_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_LINES 0x00000001 +#define NVC797_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CW 0x00000002 +#define NVC797_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CCW 0x00000003 + +#define NVC797_SET_TESSELLATION_LOD_U0_OR_DENSITY 0x0324 +#define NVC797_SET_TESSELLATION_LOD_U0_OR_DENSITY_V 31:0 + +#define NVC797_SET_TESSELLATION_LOD_V0_OR_DETAIL 0x0328 +#define NVC797_SET_TESSELLATION_LOD_V0_OR_DETAIL_V 31:0 + +#define NVC797_SET_TESSELLATION_LOD_U1_OR_W0 0x032c +#define NVC797_SET_TESSELLATION_LOD_U1_OR_W0_V 31:0 + +#define NVC797_SET_TESSELLATION_LOD_V1 0x0330 +#define NVC797_SET_TESSELLATION_LOD_V1_V 31:0 + +#define NVC797_SET_TG_LOD_INTERIOR_U 0x0334 +#define NVC797_SET_TG_LOD_INTERIOR_U_V 31:0 + +#define NVC797_SET_TG_LOD_INTERIOR_V 0x0338 +#define NVC797_SET_TG_LOD_INTERIOR_V_V 31:0 + +#define NVC797_RESERVED_TG07 0x033c +#define NVC797_RESERVED_TG07_V 0:0 + +#define NVC797_RESERVED_TG08 0x0340 +#define NVC797_RESERVED_TG08_V 0:0 + +#define NVC797_RESERVED_TG09 0x0344 +#define NVC797_RESERVED_TG09_V 0:0 + +#define NVC797_RESERVED_TG10 0x0348 +#define NVC797_RESERVED_TG10_V 0:0 + +#define NVC797_RESERVED_TG11 0x034c +#define NVC797_RESERVED_TG11_V 0:0 + +#define NVC797_RESERVED_TG12 0x0350 +#define NVC797_RESERVED_TG12_V 0:0 + +#define NVC797_RESERVED_TG13 0x0354 +#define NVC797_RESERVED_TG13_V 0:0 + +#define NVC797_RESERVED_TG14 0x0358 +#define NVC797_RESERVED_TG14_V 0:0 + +#define NVC797_RESERVED_TG15 0x035c +#define NVC797_RESERVED_TG15_V 0:0 + +#define NVC797_SET_SUBTILING_PERF_KNOB_A 0x0360 +#define NVC797_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_REGISTER_FILE_PER_SUBTILE 7:0 +#define NVC797_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_PIXEL_OUTPUT_BUFFER_PER_SUBTILE 15:8 +#define NVC797_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_TRIANGLE_RAM_PER_SUBTILE 23:16 +#define NVC797_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_MAX_QUADS_PER_SUBTILE 31:24 + +#define NVC797_SET_SUBTILING_PERF_KNOB_B 0x0364 +#define NVC797_SET_SUBTILING_PERF_KNOB_B_FRACTION_OF_MAX_PRIMITIVES_PER_SUBTILE 7:0 + +#define NVC797_SET_SUBTILING_PERF_KNOB_C 0x0368 +#define NVC797_SET_SUBTILING_PERF_KNOB_C_RESERVED 0:0 + +#define NVC797_SET_ZCULL_SUBREGION_TO_REPORT 0x036c +#define NVC797_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE 0:0 +#define NVC797_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE_FALSE 0x00000000 +#define NVC797_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE_TRUE 0x00000001 +#define NVC797_SET_ZCULL_SUBREGION_TO_REPORT_SUBREGION_ID 11:4 + +#define NVC797_SET_ZCULL_SUBREGION_REPORT_TYPE 0x0370 +#define NVC797_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE 0:0 +#define NVC797_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE_FALSE 0x00000000 +#define NVC797_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE_TRUE 0x00000001 +#define NVC797_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE 6:4 +#define NVC797_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST 0x00000000 +#define NVC797_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST_NO_ACCEPT 0x00000001 +#define NVC797_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST_LATE_Z 0x00000002 +#define NVC797_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_STENCIL_TEST 0x00000003 + +#define NVC797_SET_BALANCED_PRIMITIVE_WORKLOAD 0x0374 +#define NVC797_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE 0:0 +#define NVC797_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE_FALSE 0x00000000 +#define NVC797_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE_TRUE 0x00000001 +#define NVC797_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE 4:4 +#define NVC797_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE_FALSE 0x00000000 +#define NVC797_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE_TRUE 0x00000001 +#define NVC797_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_UNPARTITIONED_MODE 8:8 +#define NVC797_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_UNPARTITIONED_MODE_FALSE 0x00000000 +#define NVC797_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_UNPARTITIONED_MODE_TRUE 0x00000001 +#define NVC797_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_TIMESLICED_MODE 9:9 +#define NVC797_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_TIMESLICED_MODE_FALSE 0x00000000 +#define NVC797_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_TIMESLICED_MODE_TRUE 0x00000001 + +#define NVC797_SET_MAX_PATCHES_PER_BATCH 0x0378 +#define NVC797_SET_MAX_PATCHES_PER_BATCH_V 5:0 + +#define NVC797_SET_RASTER_ENABLE 0x037c +#define NVC797_SET_RASTER_ENABLE_V 0:0 +#define NVC797_SET_RASTER_ENABLE_V_FALSE 0x00000000 +#define NVC797_SET_RASTER_ENABLE_V_TRUE 0x00000001 + +#define NVC797_SET_STREAM_OUT_BUFFER_ENABLE(j) (0x0380+(j)*32) +#define NVC797_SET_STREAM_OUT_BUFFER_ENABLE_V 0:0 +#define NVC797_SET_STREAM_OUT_BUFFER_ENABLE_V_FALSE 0x00000000 +#define NVC797_SET_STREAM_OUT_BUFFER_ENABLE_V_TRUE 0x00000001 + +#define NVC797_SET_STREAM_OUT_BUFFER_ADDRESS_A(j) (0x0384+(j)*32) +#define NVC797_SET_STREAM_OUT_BUFFER_ADDRESS_A_UPPER 7:0 + +#define NVC797_SET_STREAM_OUT_BUFFER_ADDRESS_B(j) (0x0388+(j)*32) +#define NVC797_SET_STREAM_OUT_BUFFER_ADDRESS_B_LOWER 31:0 + +#define NVC797_SET_STREAM_OUT_BUFFER_SIZE(j) (0x038c+(j)*32) +#define NVC797_SET_STREAM_OUT_BUFFER_SIZE_BYTES 31:0 + +#define NVC797_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER(j) (0x0390+(j)*32) +#define NVC797_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER_START_OFFSET 31:0 + +#define NVC797_SET_POSITION_W_SCALED_OFFSET_SCALE_A(j) (0x0400+(j)*16) +#define NVC797_SET_POSITION_W_SCALED_OFFSET_SCALE_A_V 31:0 + +#define NVC797_SET_POSITION_W_SCALED_OFFSET_SCALE_B(j) (0x0404+(j)*16) +#define NVC797_SET_POSITION_W_SCALED_OFFSET_SCALE_B_V 31:0 + +#define NVC797_SET_POSITION_W_SCALED_OFFSET_RESERVED_A(j) (0x0408+(j)*16) +#define NVC797_SET_POSITION_W_SCALED_OFFSET_RESERVED_A_V 31:0 + +#define NVC797_SET_POSITION_W_SCALED_OFFSET_RESERVED_B(j) (0x040c+(j)*16) +#define NVC797_SET_POSITION_W_SCALED_OFFSET_RESERVED_B_V 31:0 + +#define NVC797_SET_Z_ROP_SLICE_MAP 0x0500 +#define NVC797_SET_Z_ROP_SLICE_MAP_VIRTUAL_ADDRESS_MASK 31:0 + +#define NVC797_SET_ROOT_TABLE_SELECTOR 0x0504 +#define NVC797_SET_ROOT_TABLE_SELECTOR_ROOT_TABLE 2:0 +#define NVC797_SET_ROOT_TABLE_SELECTOR_OFFSET 15:8 + +#define NVC797_LOAD_ROOT_TABLE 0x0508 +#define NVC797_LOAD_ROOT_TABLE_V 31:0 + +#define NVC797_SET_MME_MEM_ADDRESS_A 0x0550 +#define NVC797_SET_MME_MEM_ADDRESS_A_UPPER 16:0 + +#define NVC797_SET_MME_MEM_ADDRESS_B 0x0554 +#define NVC797_SET_MME_MEM_ADDRESS_B_LOWER 31:0 + +#define NVC797_SET_MME_DATA_RAM_ADDRESS 0x0558 +#define NVC797_SET_MME_DATA_RAM_ADDRESS_WORD 31:0 + +#define NVC797_MME_DMA_READ 0x055c +#define NVC797_MME_DMA_READ_LENGTH 31:0 + +#define NVC797_MME_DMA_READ_FIFOED 0x0560 +#define NVC797_MME_DMA_READ_FIFOED_LENGTH 31:0 + +#define NVC797_MME_DMA_WRITE 0x0564 +#define NVC797_MME_DMA_WRITE_LENGTH 31:0 + +#define NVC797_MME_DMA_REDUCTION 0x0568 +#define NVC797_MME_DMA_REDUCTION_REDUCTION_OP 2:0 +#define NVC797_MME_DMA_REDUCTION_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC797_MME_DMA_REDUCTION_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC797_MME_DMA_REDUCTION_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC797_MME_DMA_REDUCTION_REDUCTION_OP_RED_INC 0x00000003 +#define NVC797_MME_DMA_REDUCTION_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC797_MME_DMA_REDUCTION_REDUCTION_OP_RED_AND 0x00000005 +#define NVC797_MME_DMA_REDUCTION_REDUCTION_OP_RED_OR 0x00000006 +#define NVC797_MME_DMA_REDUCTION_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC797_MME_DMA_REDUCTION_REDUCTION_FORMAT 5:4 +#define NVC797_MME_DMA_REDUCTION_REDUCTION_FORMAT_UNSIGNED 0x00000000 +#define NVC797_MME_DMA_REDUCTION_REDUCTION_FORMAT_SIGNED 0x00000001 +#define NVC797_MME_DMA_REDUCTION_REDUCTION_SIZE 8:8 +#define NVC797_MME_DMA_REDUCTION_REDUCTION_SIZE_FOUR_BYTES 0x00000000 +#define NVC797_MME_DMA_REDUCTION_REDUCTION_SIZE_EIGHT_BYTES 0x00000001 + +#define NVC797_MME_DMA_SYSMEMBAR 0x056c +#define NVC797_MME_DMA_SYSMEMBAR_V 0:0 + +#define NVC797_MME_DMA_SYNC 0x0570 +#define NVC797_MME_DMA_SYNC_VALUE 31:0 + +#define NVC797_SET_MME_DATA_FIFO_CONFIG 0x0574 +#define NVC797_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE 2:0 +#define NVC797_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_0KB 0x00000000 +#define NVC797_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_4KB 0x00000001 +#define NVC797_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_8KB 0x00000002 +#define NVC797_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_12KB 0x00000003 +#define NVC797_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_16KB 0x00000004 + +#define NVC797_SET_VERTEX_STREAM_SIZE_A(j) (0x0600+(j)*8) +#define NVC797_SET_VERTEX_STREAM_SIZE_A_UPPER 7:0 + +#define NVC797_SET_VERTEX_STREAM_SIZE_B(j) (0x0604+(j)*8) +#define NVC797_SET_VERTEX_STREAM_SIZE_B_LOWER 31:0 + +#define NVC797_SET_STREAM_OUT_CONTROL_STREAM(j) (0x0700+(j)*16) +#define NVC797_SET_STREAM_OUT_CONTROL_STREAM_SELECT 1:0 + +#define NVC797_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT(j) (0x0704+(j)*16) +#define NVC797_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT_MAX 7:0 + +#define NVC797_SET_STREAM_OUT_CONTROL_STRIDE(j) (0x0708+(j)*16) +#define NVC797_SET_STREAM_OUT_CONTROL_STRIDE_BYTES 31:0 + +#define NVC797_SET_RASTER_INPUT 0x0740 +#define NVC797_SET_RASTER_INPUT_STREAM_SELECT 1:0 + +#define NVC797_SET_STREAM_OUTPUT 0x0744 +#define NVC797_SET_STREAM_OUTPUT_ENABLE 0:0 +#define NVC797_SET_STREAM_OUTPUT_ENABLE_FALSE 0x00000000 +#define NVC797_SET_STREAM_OUTPUT_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE 0x0748 +#define NVC797_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE 0:0 +#define NVC797_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_FALSE 0x00000000 +#define NVC797_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_HYBRID_ANTI_ALIAS_CONTROL 0x0754 +#define NVC797_SET_HYBRID_ANTI_ALIAS_CONTROL_PASSES 3:0 +#define NVC797_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID 4:4 +#define NVC797_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_FRAGMENT 0x00000000 +#define NVC797_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_PASS 0x00000001 +#define NVC797_SET_HYBRID_ANTI_ALIAS_CONTROL_PASSES_EXTENDED 5:5 + +#define NVC797_SET_SHADER_LOCAL_MEMORY_WINDOW 0x077c +#define NVC797_SET_SHADER_LOCAL_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NVC797_SET_SHADER_LOCAL_MEMORY_A 0x0790 +#define NVC797_SET_SHADER_LOCAL_MEMORY_A_ADDRESS_UPPER 7:0 + +#define NVC797_SET_SHADER_LOCAL_MEMORY_B 0x0794 +#define NVC797_SET_SHADER_LOCAL_MEMORY_B_ADDRESS_LOWER 31:0 + +#define NVC797_SET_SHADER_LOCAL_MEMORY_C 0x0798 +#define NVC797_SET_SHADER_LOCAL_MEMORY_C_SIZE_UPPER 5:0 + +#define NVC797_SET_SHADER_LOCAL_MEMORY_D 0x079c +#define NVC797_SET_SHADER_LOCAL_MEMORY_D_SIZE_LOWER 31:0 + +#define NVC797_SET_SHADER_LOCAL_MEMORY_E 0x07a0 +#define NVC797_SET_SHADER_LOCAL_MEMORY_E_DEFAULT_SIZE_PER_WARP 25:0 + +#define NVC797_SET_COLOR_ZERO_BANDWIDTH_CLEAR 0x07a4 +#define NVC797_SET_COLOR_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVC797_SET_Z_ZERO_BANDWIDTH_CLEAR 0x07a8 +#define NVC797_SET_Z_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVC797_SET_STENCIL_ZERO_BANDWIDTH_CLEAR 0x07b0 +#define NVC797_SET_STENCIL_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVC797_SET_ZCULL_REGION_SIZE_A 0x07c0 +#define NVC797_SET_ZCULL_REGION_SIZE_A_WIDTH 15:0 + +#define NVC797_SET_ZCULL_REGION_SIZE_B 0x07c4 +#define NVC797_SET_ZCULL_REGION_SIZE_B_HEIGHT 15:0 + +#define NVC797_SET_ZCULL_REGION_SIZE_C 0x07c8 +#define NVC797_SET_ZCULL_REGION_SIZE_C_DEPTH 15:0 + +#define NVC797_SET_ZCULL_REGION_PIXEL_OFFSET_C 0x07cc +#define NVC797_SET_ZCULL_REGION_PIXEL_OFFSET_C_DEPTH 15:0 + +#define NVC797_SET_CULL_BEFORE_FETCH 0x07dc +#define NVC797_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE 0:0 +#define NVC797_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_FALSE 0x00000000 +#define NVC797_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_TRUE 0x00000001 + +#define NVC797_SET_ZCULL_REGION_LOCATION 0x07e0 +#define NVC797_SET_ZCULL_REGION_LOCATION_START_ALIQUOT 15:0 +#define NVC797_SET_ZCULL_REGION_LOCATION_ALIQUOT_COUNT 31:16 + +#define NVC797_SET_ZCULL_REGION_ALIQUOTS 0x07e4 +#define NVC797_SET_ZCULL_REGION_ALIQUOTS_PER_LAYER 15:0 + +#define NVC797_SET_ZCULL_STORAGE_A 0x07e8 +#define NVC797_SET_ZCULL_STORAGE_A_ADDRESS_UPPER 7:0 + +#define NVC797_SET_ZCULL_STORAGE_B 0x07ec +#define NVC797_SET_ZCULL_STORAGE_B_ADDRESS_LOWER 31:0 + +#define NVC797_SET_ZCULL_STORAGE_C 0x07f0 +#define NVC797_SET_ZCULL_STORAGE_C_LIMIT_ADDRESS_UPPER 7:0 + +#define NVC797_SET_ZCULL_STORAGE_D 0x07f4 +#define NVC797_SET_ZCULL_STORAGE_D_LIMIT_ADDRESS_LOWER 31:0 + +#define NVC797_SET_ZT_READ_ONLY 0x07f8 +#define NVC797_SET_ZT_READ_ONLY_ENABLE_Z 0:0 +#define NVC797_SET_ZT_READ_ONLY_ENABLE_Z_FALSE 0x00000000 +#define NVC797_SET_ZT_READ_ONLY_ENABLE_Z_TRUE 0x00000001 +#define NVC797_SET_ZT_READ_ONLY_ENABLE_STENCIL 4:4 +#define NVC797_SET_ZT_READ_ONLY_ENABLE_STENCIL_FALSE 0x00000000 +#define NVC797_SET_ZT_READ_ONLY_ENABLE_STENCIL_TRUE 0x00000001 + +#define NVC797_THROTTLE_SM 0x07fc +#define NVC797_THROTTLE_SM_MULTIPLY_ADD 0:0 +#define NVC797_THROTTLE_SM_MULTIPLY_ADD_FALSE 0x00000000 +#define NVC797_THROTTLE_SM_MULTIPLY_ADD_TRUE 0x00000001 + +#define NVC797_SET_COLOR_TARGET_A(j) (0x0800+(j)*64) +#define NVC797_SET_COLOR_TARGET_A_OFFSET_UPPER 7:0 + +#define NVC797_SET_COLOR_TARGET_B(j) (0x0804+(j)*64) +#define NVC797_SET_COLOR_TARGET_B_OFFSET_LOWER 31:0 + +#define NVC797_SET_COLOR_TARGET_WIDTH(j) (0x0808+(j)*64) +#define NVC797_SET_COLOR_TARGET_WIDTH_V 27:0 + +#define NVC797_SET_COLOR_TARGET_HEIGHT(j) (0x080c+(j)*64) +#define NVC797_SET_COLOR_TARGET_HEIGHT_V 16:0 + +#define NVC797_SET_COLOR_TARGET_FORMAT(j) (0x0810+(j)*64) +#define NVC797_SET_COLOR_TARGET_FORMAT_V 7:0 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_DISABLED 0x00000000 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_AF32 0x000000C0 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_AS32 0x000000C1 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_AU32 0x000000C2 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_X32 0x000000C3 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_X32 0x000000C4 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_X32 0x000000C5 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_R16_G16_B16_A16 0x000000C6 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RN16_GN16_BN16_AN16 0x000000C7 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RS16_GS16_BS16_AS16 0x000000C8 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RU16_GU16_BU16_AU16 0x000000C9 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_AF16 0x000000CA +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RF32_GF32 0x000000CB +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RS32_GS32 0x000000CC +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RU32_GU32 0x000000CD +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_X16 0x000000CE +#define NVC797_SET_COLOR_TARGET_FORMAT_V_A8R8G8B8 0x000000CF +#define NVC797_SET_COLOR_TARGET_FORMAT_V_A8RL8GL8BL8 0x000000D0 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_A2B10G10R10 0x000000D1 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_AU2BU10GU10RU10 0x000000D2 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_A8B8G8R8 0x000000D5 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_A8BL8GL8RL8 0x000000D6 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_AN8BN8GN8RN8 0x000000D7 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_AS8BS8GS8RS8 0x000000D8 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_AU8BU8GU8RU8 0x000000D9 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_R16_G16 0x000000DA +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RN16_GN16 0x000000DB +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RS16_GS16 0x000000DC +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RU16_GU16 0x000000DD +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RF16_GF16 0x000000DE +#define NVC797_SET_COLOR_TARGET_FORMAT_V_A2R10G10B10 0x000000DF +#define NVC797_SET_COLOR_TARGET_FORMAT_V_BF10GF11RF11 0x000000E0 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RS32 0x000000E3 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RU32 0x000000E4 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RF32 0x000000E5 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_X8R8G8B8 0x000000E6 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_X8RL8GL8BL8 0x000000E7 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_R5G6B5 0x000000E8 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_A1R5G5B5 0x000000E9 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_G8R8 0x000000EA +#define NVC797_SET_COLOR_TARGET_FORMAT_V_GN8RN8 0x000000EB +#define NVC797_SET_COLOR_TARGET_FORMAT_V_GS8RS8 0x000000EC +#define NVC797_SET_COLOR_TARGET_FORMAT_V_GU8RU8 0x000000ED +#define NVC797_SET_COLOR_TARGET_FORMAT_V_R16 0x000000EE +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RN16 0x000000EF +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RS16 0x000000F0 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RU16 0x000000F1 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RF16 0x000000F2 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_R8 0x000000F3 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RN8 0x000000F4 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RS8 0x000000F5 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RU8 0x000000F6 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_A8 0x000000F7 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_X1R5G5B5 0x000000F8 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_X8B8G8R8 0x000000F9 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_X8BL8GL8RL8 0x000000FA +#define NVC797_SET_COLOR_TARGET_FORMAT_V_Z1R5G5B5 0x000000FB +#define NVC797_SET_COLOR_TARGET_FORMAT_V_O1R5G5B5 0x000000FC +#define NVC797_SET_COLOR_TARGET_FORMAT_V_Z8R8G8B8 0x000000FD +#define NVC797_SET_COLOR_TARGET_FORMAT_V_O8R8G8B8 0x000000FE +#define NVC797_SET_COLOR_TARGET_FORMAT_V_R32 0x000000FF +#define NVC797_SET_COLOR_TARGET_FORMAT_V_A16 0x00000040 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_AF16 0x00000041 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_AF32 0x00000042 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_A8R8 0x00000043 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_R16_A16 0x00000044 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RF16_AF16 0x00000045 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_RF32_AF32 0x00000046 +#define NVC797_SET_COLOR_TARGET_FORMAT_V_B8G8R8A8 0x00000047 + +#define NVC797_SET_COLOR_TARGET_MEMORY(j) (0x0814+(j)*64) +#define NVC797_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH 3:0 +#define NVC797_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH_ONE_GOB 0x00000000 +#define NVC797_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT 7:4 +#define NVC797_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_ONE_GOB 0x00000000 +#define NVC797_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_TWO_GOBS 0x00000001 +#define NVC797_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC797_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC797_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC797_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC797_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH 11:8 +#define NVC797_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_ONE_GOB 0x00000000 +#define NVC797_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_TWO_GOBS 0x00000001 +#define NVC797_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_FOUR_GOBS 0x00000002 +#define NVC797_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_EIGHT_GOBS 0x00000003 +#define NVC797_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVC797_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_THIRTYTWO_GOBS 0x00000005 +#define NVC797_SET_COLOR_TARGET_MEMORY_LAYOUT 12:12 +#define NVC797_SET_COLOR_TARGET_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVC797_SET_COLOR_TARGET_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVC797_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL 16:16 +#define NVC797_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NVC797_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_DEPTH_SIZE 0x00000001 + +#define NVC797_SET_COLOR_TARGET_THIRD_DIMENSION(j) (0x0818+(j)*64) +#define NVC797_SET_COLOR_TARGET_THIRD_DIMENSION_V 27:0 + +#define NVC797_SET_COLOR_TARGET_ARRAY_PITCH(j) (0x081c+(j)*64) +#define NVC797_SET_COLOR_TARGET_ARRAY_PITCH_V 31:0 + +#define NVC797_SET_COLOR_TARGET_LAYER(j) (0x0820+(j)*64) +#define NVC797_SET_COLOR_TARGET_LAYER_OFFSET 15:0 + +#define NVC797_SET_COLOR_TARGET_C_ROP_SLICE_MAP(j) (0x0824+(j)*64) +#define NVC797_SET_COLOR_TARGET_C_ROP_SLICE_MAP_VIRTUAL_ADDRESS_MASK 31:0 + +#define NVC797_SET_VIEWPORT_SCALE_X(j) (0x0a00+(j)*32) +#define NVC797_SET_VIEWPORT_SCALE_X_V 31:0 + +#define NVC797_SET_VIEWPORT_SCALE_Y(j) (0x0a04+(j)*32) +#define NVC797_SET_VIEWPORT_SCALE_Y_V 31:0 + +#define NVC797_SET_VIEWPORT_SCALE_Z(j) (0x0a08+(j)*32) +#define NVC797_SET_VIEWPORT_SCALE_Z_V 31:0 + +#define NVC797_SET_VIEWPORT_OFFSET_X(j) (0x0a0c+(j)*32) +#define NVC797_SET_VIEWPORT_OFFSET_X_V 31:0 + +#define NVC797_SET_VIEWPORT_OFFSET_Y(j) (0x0a10+(j)*32) +#define NVC797_SET_VIEWPORT_OFFSET_Y_V 31:0 + +#define NVC797_SET_VIEWPORT_OFFSET_Z(j) (0x0a14+(j)*32) +#define NVC797_SET_VIEWPORT_OFFSET_Z_V 31:0 + +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE(j) (0x0a18+(j)*32) +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_X 2:0 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_X 0x00000000 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_X 0x00000001 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_Y 0x00000002 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_Y 0x00000003 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_Z 0x00000004 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_Z 0x00000005 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_W 0x00000006 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_W 0x00000007 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_Y 6:4 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_X 0x00000000 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_X 0x00000001 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_Y 0x00000002 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_Y 0x00000003 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_Z 0x00000004 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_Z 0x00000005 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_W 0x00000006 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_W 0x00000007 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_Z 10:8 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_X 0x00000000 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_X 0x00000001 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_Y 0x00000002 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_Y 0x00000003 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_Z 0x00000004 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_Z 0x00000005 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_W 0x00000006 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_W 0x00000007 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_W 14:12 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_X 0x00000000 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_X 0x00000001 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_Y 0x00000002 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_Y 0x00000003 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_Z 0x00000004 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_Z 0x00000005 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_W 0x00000006 +#define NVC797_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_W 0x00000007 + +#define NVC797_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION(j) (0x0a1c+(j)*32) +#define NVC797_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION_X_BITS 4:0 +#define NVC797_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION_Y_BITS 12:8 + +#define NVC797_SET_VIEWPORT_CLIP_HORIZONTAL(j) (0x0c00+(j)*16) +#define NVC797_SET_VIEWPORT_CLIP_HORIZONTAL_X0 15:0 +#define NVC797_SET_VIEWPORT_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NVC797_SET_VIEWPORT_CLIP_VERTICAL(j) (0x0c04+(j)*16) +#define NVC797_SET_VIEWPORT_CLIP_VERTICAL_Y0 15:0 +#define NVC797_SET_VIEWPORT_CLIP_VERTICAL_HEIGHT 31:16 + +#define NVC797_SET_VIEWPORT_CLIP_MIN_Z(j) (0x0c08+(j)*16) +#define NVC797_SET_VIEWPORT_CLIP_MIN_Z_V 31:0 + +#define NVC797_SET_VIEWPORT_CLIP_MAX_Z(j) (0x0c0c+(j)*16) +#define NVC797_SET_VIEWPORT_CLIP_MAX_Z_V 31:0 + +#define NVC797_SET_WINDOW_CLIP_HORIZONTAL(j) (0x0d00+(j)*8) +#define NVC797_SET_WINDOW_CLIP_HORIZONTAL_XMIN 15:0 +#define NVC797_SET_WINDOW_CLIP_HORIZONTAL_XMAX 31:16 + +#define NVC797_SET_WINDOW_CLIP_VERTICAL(j) (0x0d04+(j)*8) +#define NVC797_SET_WINDOW_CLIP_VERTICAL_YMIN 15:0 +#define NVC797_SET_WINDOW_CLIP_VERTICAL_YMAX 31:16 + +#define NVC797_SET_CLIP_ID_EXTENT_X(j) (0x0d40+(j)*8) +#define NVC797_SET_CLIP_ID_EXTENT_X_MINX 15:0 +#define NVC797_SET_CLIP_ID_EXTENT_X_WIDTH 31:16 + +#define NVC797_SET_CLIP_ID_EXTENT_Y(j) (0x0d44+(j)*8) +#define NVC797_SET_CLIP_ID_EXTENT_Y_MINY 15:0 +#define NVC797_SET_CLIP_ID_EXTENT_Y_HEIGHT 31:16 + +#define NVC797_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK 0x0d60 +#define NVC797_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK_V 10:0 + +#define NVC797_SET_API_VISIBLE_CALL_LIMIT 0x0d64 +#define NVC797_SET_API_VISIBLE_CALL_LIMIT_V 3:0 +#define NVC797_SET_API_VISIBLE_CALL_LIMIT_V__0 0x00000000 +#define NVC797_SET_API_VISIBLE_CALL_LIMIT_V__1 0x00000001 +#define NVC797_SET_API_VISIBLE_CALL_LIMIT_V__2 0x00000002 +#define NVC797_SET_API_VISIBLE_CALL_LIMIT_V__4 0x00000003 +#define NVC797_SET_API_VISIBLE_CALL_LIMIT_V__8 0x00000004 +#define NVC797_SET_API_VISIBLE_CALL_LIMIT_V__16 0x00000005 +#define NVC797_SET_API_VISIBLE_CALL_LIMIT_V__32 0x00000006 +#define NVC797_SET_API_VISIBLE_CALL_LIMIT_V__64 0x00000007 +#define NVC797_SET_API_VISIBLE_CALL_LIMIT_V__128 0x00000008 +#define NVC797_SET_API_VISIBLE_CALL_LIMIT_V_NO_CHECK 0x0000000F + +#define NVC797_SET_STATISTICS_COUNTER 0x0d68 +#define NVC797_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE 0:0 +#define NVC797_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC797_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC797_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE 1:1 +#define NVC797_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC797_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC797_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE 2:2 +#define NVC797_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC797_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC797_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE 3:3 +#define NVC797_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC797_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC797_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE 4:4 +#define NVC797_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC797_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC797_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE 5:5 +#define NVC797_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NVC797_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NVC797_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE 6:6 +#define NVC797_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_FALSE 0x00000000 +#define NVC797_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_TRUE 0x00000001 +#define NVC797_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE 7:7 +#define NVC797_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC797_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC797_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE 8:8 +#define NVC797_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC797_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC797_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE 9:9 +#define NVC797_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC797_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC797_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE 11:11 +#define NVC797_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC797_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC797_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE 12:12 +#define NVC797_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC797_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC797_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE 13:13 +#define NVC797_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC797_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC797_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE 14:14 +#define NVC797_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NVC797_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NVC797_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE 10:10 +#define NVC797_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_FALSE 0x00000000 +#define NVC797_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_TRUE 0x00000001 +#define NVC797_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE 15:15 +#define NVC797_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_FALSE 0x00000000 +#define NVC797_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_TRUE 0x00000001 +#define NVC797_SET_STATISTICS_COUNTER_SCG_CLOCKS_ENABLE 16:16 +#define NVC797_SET_STATISTICS_COUNTER_SCG_CLOCKS_ENABLE_FALSE 0x00000000 +#define NVC797_SET_STATISTICS_COUNTER_SCG_CLOCKS_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_CLEAR_RECT_HORIZONTAL 0x0d6c +#define NVC797_SET_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NVC797_SET_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NVC797_SET_CLEAR_RECT_VERTICAL 0x0d70 +#define NVC797_SET_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NVC797_SET_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NVC797_SET_VERTEX_ARRAY_START 0x0d74 +#define NVC797_SET_VERTEX_ARRAY_START_V 31:0 + +#define NVC797_DRAW_VERTEX_ARRAY 0x0d78 +#define NVC797_DRAW_VERTEX_ARRAY_COUNT 31:0 + +#define NVC797_SET_VIEWPORT_Z_CLIP 0x0d7c +#define NVC797_SET_VIEWPORT_Z_CLIP_RANGE 0:0 +#define NVC797_SET_VIEWPORT_Z_CLIP_RANGE_NEGATIVE_W_TO_POSITIVE_W 0x00000000 +#define NVC797_SET_VIEWPORT_Z_CLIP_RANGE_ZERO_TO_POSITIVE_W 0x00000001 + +#define NVC797_SET_COLOR_CLEAR_VALUE(i) (0x0d80+(i)*4) +#define NVC797_SET_COLOR_CLEAR_VALUE_V 31:0 + +#define NVC797_SET_Z_CLEAR_VALUE 0x0d90 +#define NVC797_SET_Z_CLEAR_VALUE_V 31:0 + +#define NVC797_SET_SHADER_CACHE_CONTROL 0x0d94 +#define NVC797_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE 0:0 +#define NVC797_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_FALSE 0x00000000 +#define NVC797_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_TRUE 0x00000001 + +#define NVC797_FORCE_TRANSITION_TO_BETA 0x0d98 +#define NVC797_FORCE_TRANSITION_TO_BETA_V 0:0 + +#define NVC797_SET_REDUCE_COLOR_THRESHOLDS_ENABLE 0x0d9c +#define NVC797_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V 0:0 +#define NVC797_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_FALSE 0x00000000 +#define NVC797_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_TRUE 0x00000001 + +#define NVC797_SET_STENCIL_CLEAR_VALUE 0x0da0 +#define NVC797_SET_STENCIL_CLEAR_VALUE_V 7:0 + +#define NVC797_INVALIDATE_SHADER_CACHES_NO_WFI 0x0da4 +#define NVC797_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION 0:0 +#define NVC797_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_FALSE 0x00000000 +#define NVC797_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_TRUE 0x00000001 +#define NVC797_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA 4:4 +#define NVC797_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_FALSE 0x00000000 +#define NVC797_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_TRUE 0x00000001 +#define NVC797_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT 12:12 +#define NVC797_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_FALSE 0x00000000 +#define NVC797_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_TRUE 0x00000001 + +#define NVC797_SET_ZCULL_SERIALIZATION 0x0da8 +#define NVC797_SET_ZCULL_SERIALIZATION_ENABLE 0:0 +#define NVC797_SET_ZCULL_SERIALIZATION_ENABLE_FALSE 0x00000000 +#define NVC797_SET_ZCULL_SERIALIZATION_ENABLE_TRUE 0x00000001 +#define NVC797_SET_ZCULL_SERIALIZATION_APPLIED 5:4 +#define NVC797_SET_ZCULL_SERIALIZATION_APPLIED_ALWAYS 0x00000000 +#define NVC797_SET_ZCULL_SERIALIZATION_APPLIED_LATE_Z 0x00000001 +#define NVC797_SET_ZCULL_SERIALIZATION_APPLIED_OUT_OF_GAMUT_Z 0x00000002 +#define NVC797_SET_ZCULL_SERIALIZATION_APPLIED_LATE_Z_OR_OUT_OF_GAMUT_Z 0x00000003 + +#define NVC797_SET_FRONT_POLYGON_MODE 0x0dac +#define NVC797_SET_FRONT_POLYGON_MODE_V 31:0 +#define NVC797_SET_FRONT_POLYGON_MODE_V_POINT 0x00001B00 +#define NVC797_SET_FRONT_POLYGON_MODE_V_LINE 0x00001B01 +#define NVC797_SET_FRONT_POLYGON_MODE_V_FILL 0x00001B02 + +#define NVC797_SET_BACK_POLYGON_MODE 0x0db0 +#define NVC797_SET_BACK_POLYGON_MODE_V 31:0 +#define NVC797_SET_BACK_POLYGON_MODE_V_POINT 0x00001B00 +#define NVC797_SET_BACK_POLYGON_MODE_V_LINE 0x00001B01 +#define NVC797_SET_BACK_POLYGON_MODE_V_FILL 0x00001B02 + +#define NVC797_SET_POLY_SMOOTH 0x0db4 +#define NVC797_SET_POLY_SMOOTH_ENABLE 0:0 +#define NVC797_SET_POLY_SMOOTH_ENABLE_FALSE 0x00000000 +#define NVC797_SET_POLY_SMOOTH_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_ZCULL_DIR_FORMAT 0x0dbc +#define NVC797_SET_ZCULL_DIR_FORMAT_ZDIR 15:0 +#define NVC797_SET_ZCULL_DIR_FORMAT_ZDIR_LESS 0x00000000 +#define NVC797_SET_ZCULL_DIR_FORMAT_ZDIR_GREATER 0x00000001 +#define NVC797_SET_ZCULL_DIR_FORMAT_ZFORMAT 31:16 +#define NVC797_SET_ZCULL_DIR_FORMAT_ZFORMAT_MSB 0x00000000 +#define NVC797_SET_ZCULL_DIR_FORMAT_ZFORMAT_FP 0x00000001 +#define NVC797_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZTRICK 0x00000002 +#define NVC797_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZF32_1 0x00000003 + +#define NVC797_SET_POLY_OFFSET_POINT 0x0dc0 +#define NVC797_SET_POLY_OFFSET_POINT_ENABLE 0:0 +#define NVC797_SET_POLY_OFFSET_POINT_ENABLE_FALSE 0x00000000 +#define NVC797_SET_POLY_OFFSET_POINT_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_POLY_OFFSET_LINE 0x0dc4 +#define NVC797_SET_POLY_OFFSET_LINE_ENABLE 0:0 +#define NVC797_SET_POLY_OFFSET_LINE_ENABLE_FALSE 0x00000000 +#define NVC797_SET_POLY_OFFSET_LINE_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_POLY_OFFSET_FILL 0x0dc8 +#define NVC797_SET_POLY_OFFSET_FILL_ENABLE 0:0 +#define NVC797_SET_POLY_OFFSET_FILL_ENABLE_FALSE 0x00000000 +#define NVC797_SET_POLY_OFFSET_FILL_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_PATCH 0x0dcc +#define NVC797_SET_PATCH_SIZE 7:0 + +#define NVC797_SET_ITERATED_BLEND 0x0dd0 +#define NVC797_SET_ITERATED_BLEND_ENABLE 0:0 +#define NVC797_SET_ITERATED_BLEND_ENABLE_FALSE 0x00000000 +#define NVC797_SET_ITERATED_BLEND_ENABLE_TRUE 0x00000001 +#define NVC797_SET_ITERATED_BLEND_ALPHA_ENABLE 1:1 +#define NVC797_SET_ITERATED_BLEND_ALPHA_ENABLE_FALSE 0x00000000 +#define NVC797_SET_ITERATED_BLEND_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_ITERATED_BLEND_PASS 0x0dd4 +#define NVC797_SET_ITERATED_BLEND_PASS_COUNT 7:0 + +#define NVC797_SET_ZCULL_CRITERION 0x0dd8 +#define NVC797_SET_ZCULL_CRITERION_SFUNC 7:0 +#define NVC797_SET_ZCULL_CRITERION_SFUNC_NEVER 0x00000000 +#define NVC797_SET_ZCULL_CRITERION_SFUNC_LESS 0x00000001 +#define NVC797_SET_ZCULL_CRITERION_SFUNC_EQUAL 0x00000002 +#define NVC797_SET_ZCULL_CRITERION_SFUNC_LEQUAL 0x00000003 +#define NVC797_SET_ZCULL_CRITERION_SFUNC_GREATER 0x00000004 +#define NVC797_SET_ZCULL_CRITERION_SFUNC_NOTEQUAL 0x00000005 +#define NVC797_SET_ZCULL_CRITERION_SFUNC_GEQUAL 0x00000006 +#define NVC797_SET_ZCULL_CRITERION_SFUNC_ALWAYS 0x00000007 +#define NVC797_SET_ZCULL_CRITERION_NO_INVALIDATE 8:8 +#define NVC797_SET_ZCULL_CRITERION_NO_INVALIDATE_FALSE 0x00000000 +#define NVC797_SET_ZCULL_CRITERION_NO_INVALIDATE_TRUE 0x00000001 +#define NVC797_SET_ZCULL_CRITERION_FORCE_MATCH 9:9 +#define NVC797_SET_ZCULL_CRITERION_FORCE_MATCH_FALSE 0x00000000 +#define NVC797_SET_ZCULL_CRITERION_FORCE_MATCH_TRUE 0x00000001 +#define NVC797_SET_ZCULL_CRITERION_SREF 23:16 +#define NVC797_SET_ZCULL_CRITERION_SMASK 31:24 + +#define NVC797_PIXEL_SHADER_BARRIER 0x0de0 +#define NVC797_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE 0:0 +#define NVC797_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE_FALSE 0x00000000 +#define NVC797_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE_TRUE 0x00000001 +#define NVC797_PIXEL_SHADER_BARRIER_BARRIER_LOCATION 1:1 +#define NVC797_PIXEL_SHADER_BARRIER_BARRIER_LOCATION_BLOCK_BEFORE_PS 0x00000000 +#define NVC797_PIXEL_SHADER_BARRIER_BARRIER_LOCATION_BLOCK_BEFORE_PS_AND_ZTEST 0x00000001 + +#define NVC797_SET_SM_TIMEOUT_INTERVAL 0x0de4 +#define NVC797_SET_SM_TIMEOUT_INTERVAL_COUNTER_BIT 5:0 + +#define NVC797_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY 0x0de8 +#define NVC797_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE 0:0 +#define NVC797_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_FALSE 0x00000000 +#define NVC797_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_TRUE 0x00000001 + +#define NVC797_MME_DMA_WRITE_METHOD_BARRIER 0x0dec +#define NVC797_MME_DMA_WRITE_METHOD_BARRIER_V 0:0 + +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_POINTER 0x0df0 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_POINTER_V 7:0 + +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION 0x0df4 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC 2:0 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_FALSE 0x00000000 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_TRUE 0x00000001 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_EQ 0x00000002 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_NE 0x00000003 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_LT 0x00000004 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_LE 0x00000005 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_GT 0x00000006 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_GE 0x00000007 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION 5:3 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_ADD_PRODUCTS 0x00000000 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_SUB_PRODUCTS 0x00000001 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_MIN 0x00000002 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_MAX 0x00000003 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_RCP 0x00000004 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_ADD 0x00000005 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_SUBTRACT 0x00000006 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT 8:6 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT0 0x00000000 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT1 0x00000001 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT2 0x00000002 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT3 0x00000003 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT4 0x00000004 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT5 0x00000005 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT6 0x00000006 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT7 0x00000007 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT 11:9 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_SRC_RGB 0x00000000 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_DEST_RGB 0x00000001 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_SRC_AAA 0x00000002 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_DEST_AAA 0x00000003 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP0_RGB 0x00000004 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP1_RGB 0x00000005 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP2_RGB 0x00000006 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_PBR_RGB 0x00000007 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT 15:12 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ZERO 0x00000000 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE 0x00000001 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_SRC_RGB 0x00000002 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_SRC_AAA 0x00000003 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE_MINUS_SRC_AAA 0x00000004 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_DEST_RGB 0x00000005 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_DEST_AAA 0x00000006 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE_MINUS_DEST_AAA 0x00000007 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP0_RGB 0x00000009 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP1_RGB 0x0000000A +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP2_RGB 0x0000000B +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_PBR_RGB 0x0000000C +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_CONSTANT_RGB 0x0000000D +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ZERO_A_TIMES_B 0x0000000E +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT 18:16 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_SRC_RGB 0x00000000 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_DEST_RGB 0x00000001 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_SRC_AAA 0x00000002 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_DEST_AAA 0x00000003 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP0_RGB 0x00000004 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP1_RGB 0x00000005 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP2_RGB 0x00000006 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_PBR_RGB 0x00000007 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT 22:19 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ZERO 0x00000000 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE 0x00000001 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_SRC_RGB 0x00000002 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_SRC_AAA 0x00000003 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE_MINUS_SRC_AAA 0x00000004 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_DEST_RGB 0x00000005 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_DEST_AAA 0x00000006 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE_MINUS_DEST_AAA 0x00000007 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP0_RGB 0x00000009 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP1_RGB 0x0000000A +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP2_RGB 0x0000000B +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_PBR_RGB 0x0000000C +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_CONSTANT_RGB 0x0000000D +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ZERO_C_TIMES_D 0x0000000E +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE 25:23 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_RGB 0x00000000 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_GBR 0x00000001 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_RRR 0x00000002 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_GGG 0x00000003 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_BBB 0x00000004 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_R_TO_A 0x00000005 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK 27:26 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_RGB 0x00000000 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_R_ONLY 0x00000001 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_G_ONLY 0x00000002 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_B_ONLY 0x00000003 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT 29:28 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP0 0x00000000 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP1 0x00000001 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP2 0x00000002 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_NONE 0x00000003 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC 31:31 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC_FALSE 0x00000000 +#define NVC797_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC_TRUE 0x00000001 + +#define NVC797_SET_WINDOW_OFFSET_X 0x0df8 +#define NVC797_SET_WINDOW_OFFSET_X_V 16:0 + +#define NVC797_SET_WINDOW_OFFSET_Y 0x0dfc +#define NVC797_SET_WINDOW_OFFSET_Y_V 17:0 + +#define NVC797_SET_SCISSOR_ENABLE(j) (0x0e00+(j)*16) +#define NVC797_SET_SCISSOR_ENABLE_V 0:0 +#define NVC797_SET_SCISSOR_ENABLE_V_FALSE 0x00000000 +#define NVC797_SET_SCISSOR_ENABLE_V_TRUE 0x00000001 + +#define NVC797_SET_SCISSOR_HORIZONTAL(j) (0x0e04+(j)*16) +#define NVC797_SET_SCISSOR_HORIZONTAL_XMIN 15:0 +#define NVC797_SET_SCISSOR_HORIZONTAL_XMAX 31:16 + +#define NVC797_SET_SCISSOR_VERTICAL(j) (0x0e08+(j)*16) +#define NVC797_SET_SCISSOR_VERTICAL_YMIN 15:0 +#define NVC797_SET_SCISSOR_VERTICAL_YMAX 31:16 + +#define NVC797_SET_VPC_PERF_KNOB 0x0f14 +#define NVC797_SET_VPC_PERF_KNOB_CULLED_SMALL_LINES 7:0 +#define NVC797_SET_VPC_PERF_KNOB_CULLED_SMALL_TRIANGLES 15:8 +#define NVC797_SET_VPC_PERF_KNOB_NONCULLED_LINES_AND_POINTS 23:16 +#define NVC797_SET_VPC_PERF_KNOB_NONCULLED_TRIANGLES 31:24 + +#define NVC797_PM_LOCAL_TRIGGER 0x0f18 +#define NVC797_PM_LOCAL_TRIGGER_BOOKMARK 15:0 + +#define NVC797_SET_POST_Z_PS_IMASK 0x0f1c +#define NVC797_SET_POST_Z_PS_IMASK_ENABLE 0:0 +#define NVC797_SET_POST_Z_PS_IMASK_ENABLE_FALSE 0x00000000 +#define NVC797_SET_POST_Z_PS_IMASK_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_CONSTANT_COLOR_RENDERING 0x0f40 +#define NVC797_SET_CONSTANT_COLOR_RENDERING_ENABLE 0:0 +#define NVC797_SET_CONSTANT_COLOR_RENDERING_ENABLE_FALSE 0x00000000 +#define NVC797_SET_CONSTANT_COLOR_RENDERING_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_CONSTANT_COLOR_RENDERING_RED 0x0f44 +#define NVC797_SET_CONSTANT_COLOR_RENDERING_RED_V 31:0 + +#define NVC797_SET_CONSTANT_COLOR_RENDERING_GREEN 0x0f48 +#define NVC797_SET_CONSTANT_COLOR_RENDERING_GREEN_V 31:0 + +#define NVC797_SET_CONSTANT_COLOR_RENDERING_BLUE 0x0f4c +#define NVC797_SET_CONSTANT_COLOR_RENDERING_BLUE_V 31:0 + +#define NVC797_SET_CONSTANT_COLOR_RENDERING_ALPHA 0x0f50 +#define NVC797_SET_CONSTANT_COLOR_RENDERING_ALPHA_V 31:0 + +#define NVC797_SET_BACK_STENCIL_FUNC_REF 0x0f54 +#define NVC797_SET_BACK_STENCIL_FUNC_REF_V 7:0 + +#define NVC797_SET_BACK_STENCIL_MASK 0x0f58 +#define NVC797_SET_BACK_STENCIL_MASK_V 7:0 + +#define NVC797_SET_BACK_STENCIL_FUNC_MASK 0x0f5c +#define NVC797_SET_BACK_STENCIL_FUNC_MASK_V 7:0 + +#define NVC797_SET_VERTEX_STREAM_SUBSTITUTE_A 0x0f84 +#define NVC797_SET_VERTEX_STREAM_SUBSTITUTE_A_ADDRESS_UPPER 7:0 + +#define NVC797_SET_VERTEX_STREAM_SUBSTITUTE_B 0x0f88 +#define NVC797_SET_VERTEX_STREAM_SUBSTITUTE_B_ADDRESS_LOWER 31:0 + +#define NVC797_SET_LINE_MODE_POLYGON_CLIP 0x0f8c +#define NVC797_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE 0:0 +#define NVC797_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DRAW_LINE 0x00000000 +#define NVC797_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DO_NOT_DRAW_LINE 0x00000001 + +#define NVC797_SET_SINGLE_CT_WRITE_CONTROL 0x0f90 +#define NVC797_SET_SINGLE_CT_WRITE_CONTROL_ENABLE 0:0 +#define NVC797_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_FALSE 0x00000000 +#define NVC797_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_VTG_WARP_WATERMARKS 0x0f98 +#define NVC797_SET_VTG_WARP_WATERMARKS_LOW 15:0 +#define NVC797_SET_VTG_WARP_WATERMARKS_HIGH 31:16 + +#define NVC797_SET_DEPTH_BOUNDS_MIN 0x0f9c +#define NVC797_SET_DEPTH_BOUNDS_MIN_V 31:0 + +#define NVC797_SET_DEPTH_BOUNDS_MAX 0x0fa0 +#define NVC797_SET_DEPTH_BOUNDS_MAX_V 31:0 + +#define NVC797_SET_SAMPLE_MASK 0x0fa4 +#define NVC797_SET_SAMPLE_MASK_RASTER_OUT_ENABLE 0:0 +#define NVC797_SET_SAMPLE_MASK_RASTER_OUT_ENABLE_FALSE 0x00000000 +#define NVC797_SET_SAMPLE_MASK_RASTER_OUT_ENABLE_TRUE 0x00000001 +#define NVC797_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE 4:4 +#define NVC797_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE_FALSE 0x00000000 +#define NVC797_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_COLOR_TARGET_SAMPLE_MASK 0x0fa8 +#define NVC797_SET_COLOR_TARGET_SAMPLE_MASK_V 15:0 + +#define NVC797_SET_CT_MRT_ENABLE 0x0fac +#define NVC797_SET_CT_MRT_ENABLE_V 0:0 +#define NVC797_SET_CT_MRT_ENABLE_V_FALSE 0x00000000 +#define NVC797_SET_CT_MRT_ENABLE_V_TRUE 0x00000001 + +#define NVC797_SET_NONMULTISAMPLED_Z 0x0fb0 +#define NVC797_SET_NONMULTISAMPLED_Z_V 0:0 +#define NVC797_SET_NONMULTISAMPLED_Z_V_PER_SAMPLE 0x00000000 +#define NVC797_SET_NONMULTISAMPLED_Z_V_AT_PIXEL_CENTER 0x00000001 + +#define NVC797_SET_TIR 0x0fb4 +#define NVC797_SET_TIR_MODE 1:0 +#define NVC797_SET_TIR_MODE_DISABLED 0x00000000 +#define NVC797_SET_TIR_MODE_RASTER_N_TARGET_M 0x00000001 + +#define NVC797_SET_ANTI_ALIAS_RASTER 0x0fb8 +#define NVC797_SET_ANTI_ALIAS_RASTER_SAMPLES 2:0 +#define NVC797_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_1X1 0x00000000 +#define NVC797_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_2X2 0x00000002 +#define NVC797_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_4X2_D3D 0x00000004 +#define NVC797_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_2X1_D3D 0x00000005 +#define NVC797_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_4X4 0x00000006 + +#define NVC797_SET_SAMPLE_MASK_X0_Y0 0x0fbc +#define NVC797_SET_SAMPLE_MASK_X0_Y0_V 15:0 + +#define NVC797_SET_SAMPLE_MASK_X1_Y0 0x0fc0 +#define NVC797_SET_SAMPLE_MASK_X1_Y0_V 15:0 + +#define NVC797_SET_SAMPLE_MASK_X0_Y1 0x0fc4 +#define NVC797_SET_SAMPLE_MASK_X0_Y1_V 15:0 + +#define NVC797_SET_SAMPLE_MASK_X1_Y1 0x0fc8 +#define NVC797_SET_SAMPLE_MASK_X1_Y1_V 15:0 + +#define NVC797_SET_SURFACE_CLIP_ID_MEMORY_A 0x0fcc +#define NVC797_SET_SURFACE_CLIP_ID_MEMORY_A_OFFSET_UPPER 7:0 + +#define NVC797_SET_SURFACE_CLIP_ID_MEMORY_B 0x0fd0 +#define NVC797_SET_SURFACE_CLIP_ID_MEMORY_B_OFFSET_LOWER 31:0 + +#define NVC797_SET_TIR_MODULATION 0x0fd4 +#define NVC797_SET_TIR_MODULATION_COMPONENT_SELECT 1:0 +#define NVC797_SET_TIR_MODULATION_COMPONENT_SELECT_NO_MODULATION 0x00000000 +#define NVC797_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_RGB 0x00000001 +#define NVC797_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_ALPHA_ONLY 0x00000002 +#define NVC797_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_RGBA 0x00000003 + +#define NVC797_SET_TIR_MODULATION_FUNCTION 0x0fd8 +#define NVC797_SET_TIR_MODULATION_FUNCTION_SELECT 0:0 +#define NVC797_SET_TIR_MODULATION_FUNCTION_SELECT_LINEAR 0x00000000 +#define NVC797_SET_TIR_MODULATION_FUNCTION_SELECT_TABLE 0x00000001 + +#define NVC797_SET_BLEND_OPT_CONTROL 0x0fdc +#define NVC797_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS 0:0 +#define NVC797_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_FALSE 0x00000000 +#define NVC797_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_TRUE 0x00000001 + +#define NVC797_SET_ZT_A 0x0fe0 +#define NVC797_SET_ZT_A_OFFSET_UPPER 7:0 + +#define NVC797_SET_ZT_B 0x0fe4 +#define NVC797_SET_ZT_B_OFFSET_LOWER 31:0 + +#define NVC797_SET_ZT_FORMAT 0x0fe8 +#define NVC797_SET_ZT_FORMAT_V 4:0 +#define NVC797_SET_ZT_FORMAT_V_Z16 0x00000013 +#define NVC797_SET_ZT_FORMAT_V_Z24S8 0x00000014 +#define NVC797_SET_ZT_FORMAT_V_X8Z24 0x00000015 +#define NVC797_SET_ZT_FORMAT_V_S8Z24 0x00000016 +#define NVC797_SET_ZT_FORMAT_V_S8 0x00000017 +#define NVC797_SET_ZT_FORMAT_V_V8Z24 0x00000018 +#define NVC797_SET_ZT_FORMAT_V_ZF32 0x0000000A +#define NVC797_SET_ZT_FORMAT_V_ZF32_X24S8 0x00000019 +#define NVC797_SET_ZT_FORMAT_V_X8Z24_X16V8S8 0x0000001D +#define NVC797_SET_ZT_FORMAT_V_ZF32_X16V8X8 0x0000001E +#define NVC797_SET_ZT_FORMAT_V_ZF32_X16V8S8 0x0000001F + +#define NVC797_SET_ZT_BLOCK_SIZE 0x0fec +#define NVC797_SET_ZT_BLOCK_SIZE_WIDTH 3:0 +#define NVC797_SET_ZT_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC797_SET_ZT_BLOCK_SIZE_HEIGHT 7:4 +#define NVC797_SET_ZT_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC797_SET_ZT_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC797_SET_ZT_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC797_SET_ZT_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC797_SET_ZT_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC797_SET_ZT_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC797_SET_ZT_BLOCK_SIZE_DEPTH 11:8 +#define NVC797_SET_ZT_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NVC797_SET_ZT_ARRAY_PITCH 0x0ff0 +#define NVC797_SET_ZT_ARRAY_PITCH_V 31:0 + +#define NVC797_SET_SURFACE_CLIP_HORIZONTAL 0x0ff4 +#define NVC797_SET_SURFACE_CLIP_HORIZONTAL_X 15:0 +#define NVC797_SET_SURFACE_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NVC797_SET_SURFACE_CLIP_VERTICAL 0x0ff8 +#define NVC797_SET_SURFACE_CLIP_VERTICAL_Y 15:0 +#define NVC797_SET_SURFACE_CLIP_VERTICAL_HEIGHT 31:16 + +#define NVC797_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS 0x1000 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE 0:0 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_FALSE 0x00000000 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_TRUE 0x00000001 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY 5:4 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC797_SET_VIEWPORT_MULTICAST 0x1004 +#define NVC797_SET_VIEWPORT_MULTICAST_ORDER 0:0 +#define NVC797_SET_VIEWPORT_MULTICAST_ORDER_VIEWPORT_ORDER 0x00000000 +#define NVC797_SET_VIEWPORT_MULTICAST_ORDER_PRIMITIVE_ORDER 0x00000001 + +#define NVC797_SET_TESSELLATION_CUT_HEIGHT 0x1008 +#define NVC797_SET_TESSELLATION_CUT_HEIGHT_V 4:0 + +#define NVC797_SET_MAX_GS_INSTANCES_PER_TASK 0x100c +#define NVC797_SET_MAX_GS_INSTANCES_PER_TASK_V 10:0 + +#define NVC797_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK 0x1010 +#define NVC797_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK_V 15:0 + +#define NVC797_SET_RESERVED_SW_METHOD00 0x1014 +#define NVC797_SET_RESERVED_SW_METHOD00_V 31:0 + +#define NVC797_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER 0x1018 +#define NVC797_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NVC797_SET_BETA_CB_STORAGE_CONSTRAINT 0x101c +#define NVC797_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NVC797_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NVC797_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER 0x1020 +#define NVC797_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NVC797_SET_ALPHA_CB_STORAGE_CONSTRAINT 0x1024 +#define NVC797_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NVC797_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NVC797_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_RESERVED_SW_METHOD01 0x1028 +#define NVC797_SET_RESERVED_SW_METHOD01_V 31:0 + +#define NVC797_SET_RESERVED_SW_METHOD02 0x102c +#define NVC797_SET_RESERVED_SW_METHOD02_V 31:0 + +#define NVC797_SET_TIR_MODULATION_COEFFICIENT_TABLE(i) (0x1030+(i)*4) +#define NVC797_SET_TIR_MODULATION_COEFFICIENT_TABLE_V0 7:0 +#define NVC797_SET_TIR_MODULATION_COEFFICIENT_TABLE_V1 15:8 +#define NVC797_SET_TIR_MODULATION_COEFFICIENT_TABLE_V2 23:16 +#define NVC797_SET_TIR_MODULATION_COEFFICIENT_TABLE_V3 31:24 + +#define NVC797_SET_SPARE_NOOP01 0x1044 +#define NVC797_SET_SPARE_NOOP01_V 31:0 + +#define NVC797_SET_SPARE_NOOP02 0x1048 +#define NVC797_SET_SPARE_NOOP02_V 31:0 + +#define NVC797_SET_SPARE_NOOP03 0x104c +#define NVC797_SET_SPARE_NOOP03_V 31:0 + +#define NVC797_SET_SPARE_NOOP04 0x1050 +#define NVC797_SET_SPARE_NOOP04_V 31:0 + +#define NVC797_SET_SPARE_NOOP05 0x1054 +#define NVC797_SET_SPARE_NOOP05_V 31:0 + +#define NVC797_SET_SPARE_NOOP06 0x1058 +#define NVC797_SET_SPARE_NOOP06_V 31:0 + +#define NVC797_SET_SPARE_NOOP07 0x105c +#define NVC797_SET_SPARE_NOOP07_V 31:0 + +#define NVC797_SET_SPARE_NOOP08 0x1060 +#define NVC797_SET_SPARE_NOOP08_V 31:0 + +#define NVC797_SET_SPARE_NOOP09 0x1064 +#define NVC797_SET_SPARE_NOOP09_V 31:0 + +#define NVC797_SET_SPARE_NOOP10 0x1068 +#define NVC797_SET_SPARE_NOOP10_V 31:0 + +#define NVC797_SET_SPARE_NOOP11 0x106c +#define NVC797_SET_SPARE_NOOP11_V 31:0 + +#define NVC797_SET_SPARE_NOOP12 0x1070 +#define NVC797_SET_SPARE_NOOP12_V 31:0 + +#define NVC797_SET_SPARE_NOOP13 0x1074 +#define NVC797_SET_SPARE_NOOP13_V 31:0 + +#define NVC797_SET_SPARE_NOOP14 0x1078 +#define NVC797_SET_SPARE_NOOP14_V 31:0 + +#define NVC797_SET_SPARE_NOOP15 0x107c +#define NVC797_SET_SPARE_NOOP15_V 31:0 + +#define NVC797_SET_RESERVED_SW_METHOD03 0x10b0 +#define NVC797_SET_RESERVED_SW_METHOD03_V 31:0 + +#define NVC797_SET_RESERVED_SW_METHOD04 0x10b4 +#define NVC797_SET_RESERVED_SW_METHOD04_V 31:0 + +#define NVC797_SET_RESERVED_SW_METHOD05 0x10b8 +#define NVC797_SET_RESERVED_SW_METHOD05_V 31:0 + +#define NVC797_SET_RESERVED_SW_METHOD06 0x10bc +#define NVC797_SET_RESERVED_SW_METHOD06_V 31:0 + +#define NVC797_SET_RESERVED_SW_METHOD07 0x10c0 +#define NVC797_SET_RESERVED_SW_METHOD07_V 31:0 + +#define NVC797_SET_RESERVED_SW_METHOD08 0x10c4 +#define NVC797_SET_RESERVED_SW_METHOD08_V 31:0 + +#define NVC797_SET_RESERVED_SW_METHOD09 0x10c8 +#define NVC797_SET_RESERVED_SW_METHOD09_V 31:0 + +#define NVC797_SET_REDUCE_COLOR_THRESHOLDS_UNORM8 0x10cc +#define NVC797_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC797_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED 23:16 + +#define NVC797_SET_RESERVED_SW_METHOD10 0x10d0 +#define NVC797_SET_RESERVED_SW_METHOD10_V 31:0 + +#define NVC797_SET_RESERVED_SW_METHOD11 0x10d4 +#define NVC797_SET_RESERVED_SW_METHOD11_V 31:0 + +#define NVC797_SET_RESERVED_SW_METHOD12 0x10d8 +#define NVC797_SET_RESERVED_SW_METHOD12_V 31:0 + +#define NVC797_SET_RESERVED_SW_METHOD13 0x10dc +#define NVC797_SET_RESERVED_SW_METHOD13_V 31:0 + +#define NVC797_SET_REDUCE_COLOR_THRESHOLDS_UNORM10 0x10e0 +#define NVC797_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC797_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED 23:16 + +#define NVC797_SET_REDUCE_COLOR_THRESHOLDS_UNORM16 0x10e4 +#define NVC797_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC797_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED 23:16 + +#define NVC797_SET_REDUCE_COLOR_THRESHOLDS_FP11 0x10e8 +#define NVC797_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED_ALL_HIT_ONCE 5:0 +#define NVC797_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED 21:16 + +#define NVC797_SET_REDUCE_COLOR_THRESHOLDS_FP16 0x10ec +#define NVC797_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC797_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED 23:16 + +#define NVC797_SET_REDUCE_COLOR_THRESHOLDS_SRGB8 0x10f0 +#define NVC797_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC797_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED 23:16 + +#define NVC797_UNBIND_ALL 0x10f4 +#define NVC797_UNBIND_ALL_CONSTANT_BUFFERS 8:8 +#define NVC797_UNBIND_ALL_CONSTANT_BUFFERS_FALSE 0x00000000 +#define NVC797_UNBIND_ALL_CONSTANT_BUFFERS_TRUE 0x00000001 + +#define NVC797_SET_CLEAR_SURFACE_CONTROL 0x10f8 +#define NVC797_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK 0:0 +#define NVC797_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_FALSE 0x00000000 +#define NVC797_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_TRUE 0x00000001 +#define NVC797_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT 4:4 +#define NVC797_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_FALSE 0x00000000 +#define NVC797_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_TRUE 0x00000001 +#define NVC797_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0 8:8 +#define NVC797_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_FALSE 0x00000000 +#define NVC797_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_TRUE 0x00000001 +#define NVC797_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0 12:12 +#define NVC797_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_FALSE 0x00000000 +#define NVC797_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_TRUE 0x00000001 + +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS 0x10fc +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC797_SET_RESERVED_SW_METHOD14 0x1100 +#define NVC797_SET_RESERVED_SW_METHOD14_V 31:0 + +#define NVC797_SET_RESERVED_SW_METHOD15 0x1104 +#define NVC797_SET_RESERVED_SW_METHOD15_V 31:0 + +#define NVC797_NO_OPERATION_DATA_HI 0x110c +#define NVC797_NO_OPERATION_DATA_HI_V 31:0 + +#define NVC797_SET_DEPTH_BIAS_CONTROL 0x1110 +#define NVC797_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT 0:0 +#define NVC797_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_FALSE 0x00000000 +#define NVC797_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_TRUE 0x00000001 + +#define NVC797_PM_TRIGGER_END 0x1114 +#define NVC797_PM_TRIGGER_END_V 31:0 + +#define NVC797_SET_VERTEX_ID_BASE 0x1118 +#define NVC797_SET_VERTEX_ID_BASE_V 31:0 + +#define NVC797_SET_STENCIL_COMPRESSION 0x111c +#define NVC797_SET_STENCIL_COMPRESSION_ENABLE 0:0 +#define NVC797_SET_STENCIL_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVC797_SET_STENCIL_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A(i) (0x1120+(i)*4) +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0 0:0 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1 1:1 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2 2:2 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3 3:3 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0 4:4 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1 5:5 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2 6:6 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3 7:7 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0 8:8 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1 9:9 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2 10:10 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3 11:11 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0 12:12 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1 13:13 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2 14:14 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3 15:15 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0 16:16 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1 17:17 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2 18:18 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3 19:19 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0 20:20 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1 21:21 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2 22:22 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3 23:23 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0 24:24 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1 25:25 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2 26:26 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3 27:27 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0 28:28 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1 29:29 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2 30:30 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3 31:31 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B(i) (0x1128+(i)*4) +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0 0:0 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1 1:1 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2 2:2 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3 3:3 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0 4:4 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1 5:5 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2 6:6 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3 7:7 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0 8:8 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1 9:9 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2 10:10 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3 11:11 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0 12:12 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1 13:13 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2 14:14 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3 15:15 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0 16:16 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1 17:17 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2 18:18 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3 19:19 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0 20:20 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1 21:21 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2 22:22 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3 23:23 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0 24:24 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1 25:25 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2 26:26 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3 27:27 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0 28:28 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1 29:29 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2 30:30 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3 31:31 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NVC797_SET_TIR_CONTROL 0x1130 +#define NVC797_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES 0:0 +#define NVC797_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES_DISABLE 0x00000000 +#define NVC797_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES_ENABLE 0x00000001 +#define NVC797_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES 4:4 +#define NVC797_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES_DISABLE 0x00000000 +#define NVC797_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES_ENABLE 0x00000001 +#define NVC797_SET_TIR_CONTROL_REDUCE_COVERAGE 1:1 +#define NVC797_SET_TIR_CONTROL_REDUCE_COVERAGE_DISABLE 0x00000000 +#define NVC797_SET_TIR_CONTROL_REDUCE_COVERAGE_ENABLE 0x00000001 +#define NVC797_SET_TIR_CONTROL_REDUCTION_MODE 2:2 +#define NVC797_SET_TIR_CONTROL_REDUCTION_MODE_AFFINITY_MAP 0x00000000 +#define NVC797_SET_TIR_CONTROL_REDUCTION_MODE_TRUNCATION 0x00000001 + +#define NVC797_SET_MUTABLE_METHOD_CONTROL 0x1134 +#define NVC797_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT 0:0 +#define NVC797_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT_FALSE 0x00000000 +#define NVC797_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT_TRUE 0x00000001 + +#define NVC797_SET_POST_PS_INITIAL_COVERAGE 0x1138 +#define NVC797_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE 0:0 +#define NVC797_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE_FALSE 0x00000000 +#define NVC797_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE_TRUE 0x00000001 + +#define NVC797_SET_FILL_VIA_TRIANGLE 0x113c +#define NVC797_SET_FILL_VIA_TRIANGLE_MODE 1:0 +#define NVC797_SET_FILL_VIA_TRIANGLE_MODE_DISABLED 0x00000000 +#define NVC797_SET_FILL_VIA_TRIANGLE_MODE_FILL_ALL 0x00000001 +#define NVC797_SET_FILL_VIA_TRIANGLE_MODE_FILL_BBOX 0x00000002 + +#define NVC797_SET_BLEND_PER_FORMAT_ENABLE 0x1140 +#define NVC797_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16 4:4 +#define NVC797_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_FALSE 0x00000000 +#define NVC797_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_TRUE 0x00000001 + +#define NVC797_FLUSH_PENDING_WRITES 0x1144 +#define NVC797_FLUSH_PENDING_WRITES_SM_DOES_GLOBAL_STORE 0:0 + +#define NVC797_SET_VERTEX_ATTRIBUTE_A(i) (0x1160+(i)*4) +#define NVC797_SET_VERTEX_ATTRIBUTE_A_STREAM 4:0 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_SOURCE 6:6 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_SOURCE_ACTIVE 0x00000000 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_SOURCE_INACTIVE 0x00000001 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_OFFSET 20:7 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS 26:21 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NVC797_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NVC797_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NVC797_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NVC797_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NVC797_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE 29:27 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B 31:31 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_FALSE 0x00000000 +#define NVC797_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_TRUE 0x00000001 + +#define NVC797_SET_VERTEX_ATTRIBUTE_B(i) (0x11a0+(i)*4) +#define NVC797_SET_VERTEX_ATTRIBUTE_B_STREAM 4:0 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_SOURCE 6:6 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_SOURCE_ACTIVE 0x00000000 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_SOURCE_INACTIVE 0x00000001 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_OFFSET 20:7 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS 26:21 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NVC797_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NVC797_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NVC797_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NVC797_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NVC797_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE 29:27 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B 31:31 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_FALSE 0x00000000 +#define NVC797_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_TRUE 0x00000001 + +#define NVC797_SET_ANTI_ALIAS_SAMPLE_POSITIONS(i) (0x11e0+(i)*4) +#define NVC797_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X0 3:0 +#define NVC797_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y0 7:4 +#define NVC797_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X1 11:8 +#define NVC797_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y1 15:12 +#define NVC797_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X2 19:16 +#define NVC797_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y2 23:20 +#define NVC797_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X3 27:24 +#define NVC797_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y3 31:28 + +#define NVC797_SET_OFFSET_RENDER_TARGET_INDEX 0x11f0 +#define NVC797_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX 0:0 +#define NVC797_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX_FALSE 0x00000000 +#define NVC797_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX_TRUE 0x00000001 + +#define NVC797_FORCE_HEAVYWEIGHT_METHOD_SYNC 0x11f4 +#define NVC797_FORCE_HEAVYWEIGHT_METHOD_SYNC_V 31:0 + +#define NVC797_SET_COVERAGE_TO_COLOR 0x11f8 +#define NVC797_SET_COVERAGE_TO_COLOR_ENABLE 0:0 +#define NVC797_SET_COVERAGE_TO_COLOR_ENABLE_FALSE 0x00000000 +#define NVC797_SET_COVERAGE_TO_COLOR_ENABLE_TRUE 0x00000001 +#define NVC797_SET_COVERAGE_TO_COLOR_CT_SELECT 6:4 + +#define NVC797_DECOMPRESS_ZETA_SURFACE 0x11fc +#define NVC797_DECOMPRESS_ZETA_SURFACE_Z_ENABLE 0:0 +#define NVC797_DECOMPRESS_ZETA_SURFACE_Z_ENABLE_FALSE 0x00000000 +#define NVC797_DECOMPRESS_ZETA_SURFACE_Z_ENABLE_TRUE 0x00000001 +#define NVC797_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE 4:4 +#define NVC797_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC797_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_SCREEN_STATE_MASK 0x1204 +#define NVC797_SET_SCREEN_STATE_MASK_MASK 3:0 + +#define NVC797_SET_ZT_SPARSE 0x1208 +#define NVC797_SET_ZT_SPARSE_ENABLE 0:0 +#define NVC797_SET_ZT_SPARSE_ENABLE_FALSE 0x00000000 +#define NVC797_SET_ZT_SPARSE_ENABLE_TRUE 0x00000001 +#define NVC797_SET_ZT_SPARSE_UNMAPPED_COMPARE 1:1 +#define NVC797_SET_ZT_SPARSE_UNMAPPED_COMPARE_ZT_SPARSE_UNMAPPED_0 0x00000000 +#define NVC797_SET_ZT_SPARSE_UNMAPPED_COMPARE_ZT_SPARSE_FAIL_ALWAYS 0x00000001 + +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST 0x1214 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_START_INDEX 15:0 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT 0x1218 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_START_INDEX 15:0 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC797_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC797_SET_CT_SELECT 0x121c +#define NVC797_SET_CT_SELECT_TARGET_COUNT 3:0 +#define NVC797_SET_CT_SELECT_TARGET0 6:4 +#define NVC797_SET_CT_SELECT_TARGET1 9:7 +#define NVC797_SET_CT_SELECT_TARGET2 12:10 +#define NVC797_SET_CT_SELECT_TARGET3 15:13 +#define NVC797_SET_CT_SELECT_TARGET4 18:16 +#define NVC797_SET_CT_SELECT_TARGET5 21:19 +#define NVC797_SET_CT_SELECT_TARGET6 24:22 +#define NVC797_SET_CT_SELECT_TARGET7 27:25 + +#define NVC797_SET_COMPRESSION_THRESHOLD 0x1220 +#define NVC797_SET_COMPRESSION_THRESHOLD_SAMPLES 3:0 +#define NVC797_SET_COMPRESSION_THRESHOLD_SAMPLES__0 0x00000000 +#define NVC797_SET_COMPRESSION_THRESHOLD_SAMPLES__1 0x00000001 +#define NVC797_SET_COMPRESSION_THRESHOLD_SAMPLES__2 0x00000002 +#define NVC797_SET_COMPRESSION_THRESHOLD_SAMPLES__4 0x00000003 +#define NVC797_SET_COMPRESSION_THRESHOLD_SAMPLES__8 0x00000004 +#define NVC797_SET_COMPRESSION_THRESHOLD_SAMPLES__16 0x00000005 +#define NVC797_SET_COMPRESSION_THRESHOLD_SAMPLES__32 0x00000006 +#define NVC797_SET_COMPRESSION_THRESHOLD_SAMPLES__64 0x00000007 +#define NVC797_SET_COMPRESSION_THRESHOLD_SAMPLES__128 0x00000008 +#define NVC797_SET_COMPRESSION_THRESHOLD_SAMPLES__256 0x00000009 +#define NVC797_SET_COMPRESSION_THRESHOLD_SAMPLES__512 0x0000000A +#define NVC797_SET_COMPRESSION_THRESHOLD_SAMPLES__1024 0x0000000B +#define NVC797_SET_COMPRESSION_THRESHOLD_SAMPLES__2048 0x0000000C + +#define NVC797_SET_PIXEL_SHADER_INTERLOCK_CONTROL 0x1224 +#define NVC797_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE 1:0 +#define NVC797_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_NO_CONFLICT_DETECT 0x00000000 +#define NVC797_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_CONFLICT_DETECT_SAMPLE 0x00000001 +#define NVC797_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_CONFLICT_DETECT_PIXEL 0x00000002 +#define NVC797_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE 2:2 +#define NVC797_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE_TC_TILE_SIZE_16X16 0x00000000 +#define NVC797_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE_TC_TILE_SIZE_8X8 0x00000001 +#define NVC797_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER 3:3 +#define NVC797_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER_TC_FRAGMENT_ORDERED 0x00000000 +#define NVC797_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER_TC_FRAGMENT_UNORDERED 0x00000001 + +#define NVC797_SET_ZT_SIZE_A 0x1228 +#define NVC797_SET_ZT_SIZE_A_WIDTH 27:0 + +#define NVC797_SET_ZT_SIZE_B 0x122c +#define NVC797_SET_ZT_SIZE_B_HEIGHT 17:0 + +#define NVC797_SET_ZT_SIZE_C 0x1230 +#define NVC797_SET_ZT_SIZE_C_THIRD_DIMENSION 15:0 +#define NVC797_SET_ZT_SIZE_C_CONTROL 16:16 +#define NVC797_SET_ZT_SIZE_C_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NVC797_SET_ZT_SIZE_C_CONTROL_ARRAY_SIZE_IS_ONE 0x00000001 + +#define NVC797_SET_SAMPLER_BINDING 0x1234 +#define NVC797_SET_SAMPLER_BINDING_V 0:0 +#define NVC797_SET_SAMPLER_BINDING_V_INDEPENDENTLY 0x00000000 +#define NVC797_SET_SAMPLER_BINDING_V_VIA_HEADER_BINDING 0x00000001 + +#define NVC797_DRAW_AUTO 0x123c +#define NVC797_DRAW_AUTO_BYTE_COUNT 31:0 + +#define NVC797_SET_POST_VTG_SHADER_ATTRIBUTE_SKIP_MASK(i) (0x1240+(i)*4) +#define NVC797_SET_POST_VTG_SHADER_ATTRIBUTE_SKIP_MASK_V 31:0 + +#define NVC797_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE 0x1260 +#define NVC797_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE_TICKET_DISPENSER_INDEX 7:0 +#define NVC797_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE_TICKET_DISPENSER_VALUE 23:8 + +#define NVC797_SET_BACK_END_COPY_A 0x1264 +#define NVC797_SET_BACK_END_COPY_A_DWORDS 7:0 +#define NVC797_SET_BACK_END_COPY_A_SATURATE32_ENABLE 8:8 +#define NVC797_SET_BACK_END_COPY_A_SATURATE32_ENABLE_FALSE 0x00000000 +#define NVC797_SET_BACK_END_COPY_A_SATURATE32_ENABLE_TRUE 0x00000001 +#define NVC797_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE 12:12 +#define NVC797_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE_FALSE 0x00000000 +#define NVC797_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_BACK_END_COPY_B 0x1268 +#define NVC797_SET_BACK_END_COPY_B_SRC_ADDRESS_UPPER 7:0 + +#define NVC797_SET_BACK_END_COPY_C 0x126c +#define NVC797_SET_BACK_END_COPY_C_SRC_ADDRESS_LOWER 31:0 + +#define NVC797_SET_BACK_END_COPY_D 0x1270 +#define NVC797_SET_BACK_END_COPY_D_DEST_ADDRESS_UPPER 7:0 + +#define NVC797_SET_BACK_END_COPY_E 0x1274 +#define NVC797_SET_BACK_END_COPY_E_DEST_ADDRESS_LOWER 31:0 + +#define NVC797_SET_CIRCULAR_BUFFER_SIZE 0x1280 +#define NVC797_SET_CIRCULAR_BUFFER_SIZE_CACHE_LINES_PER_SM 19:0 + +#define NVC797_SET_VTG_REGISTER_WATERMARKS 0x1284 +#define NVC797_SET_VTG_REGISTER_WATERMARKS_LOW 15:0 +#define NVC797_SET_VTG_REGISTER_WATERMARKS_HIGH 31:16 + +#define NVC797_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI 0x1288 +#define NVC797_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES 0:0 +#define NVC797_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC797_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC797_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_TAG 25:4 + +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS 0x1290 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC797_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE 0x12a4 +#define NVC797_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE_V 31:0 + +#define NVC797_CLEAR_ZCULL_REGION 0x12c8 +#define NVC797_CLEAR_ZCULL_REGION_Z_ENABLE 0:0 +#define NVC797_CLEAR_ZCULL_REGION_Z_ENABLE_FALSE 0x00000000 +#define NVC797_CLEAR_ZCULL_REGION_Z_ENABLE_TRUE 0x00000001 +#define NVC797_CLEAR_ZCULL_REGION_STENCIL_ENABLE 4:4 +#define NVC797_CLEAR_ZCULL_REGION_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC797_CLEAR_ZCULL_REGION_STENCIL_ENABLE_TRUE 0x00000001 +#define NVC797_CLEAR_ZCULL_REGION_USE_CLEAR_RECT 1:1 +#define NVC797_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_FALSE 0x00000000 +#define NVC797_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_TRUE 0x00000001 +#define NVC797_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX 2:2 +#define NVC797_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_FALSE 0x00000000 +#define NVC797_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_TRUE 0x00000001 +#define NVC797_CLEAR_ZCULL_REGION_RT_ARRAY_INDEX 20:5 +#define NVC797_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE 3:3 +#define NVC797_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_FALSE 0x00000000 +#define NVC797_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_TRUE 0x00000001 + +#define NVC797_SET_DEPTH_TEST 0x12cc +#define NVC797_SET_DEPTH_TEST_ENABLE 0:0 +#define NVC797_SET_DEPTH_TEST_ENABLE_FALSE 0x00000000 +#define NVC797_SET_DEPTH_TEST_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_FILL_MODE 0x12d0 +#define NVC797_SET_FILL_MODE_V 31:0 +#define NVC797_SET_FILL_MODE_V_POINT 0x00000001 +#define NVC797_SET_FILL_MODE_V_WIREFRAME 0x00000002 +#define NVC797_SET_FILL_MODE_V_SOLID 0x00000003 + +#define NVC797_SET_SHADE_MODE 0x12d4 +#define NVC797_SET_SHADE_MODE_V 31:0 +#define NVC797_SET_SHADE_MODE_V_FLAT 0x00000001 +#define NVC797_SET_SHADE_MODE_V_GOURAUD 0x00000002 +#define NVC797_SET_SHADE_MODE_V_OGL_FLAT 0x00001D00 +#define NVC797_SET_SHADE_MODE_V_OGL_SMOOTH 0x00001D01 + +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS 0x12d8 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS 0x12dc +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC797_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC797_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL 0x12e0 +#define NVC797_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT 3:0 +#define NVC797_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1 0x00000000 +#define NVC797_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_2X2 0x00000001 +#define NVC797_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1_VIRTUAL_SAMPLES 0x00000002 + +#define NVC797_SET_BLEND_STATE_PER_TARGET 0x12e4 +#define NVC797_SET_BLEND_STATE_PER_TARGET_ENABLE 0:0 +#define NVC797_SET_BLEND_STATE_PER_TARGET_ENABLE_FALSE 0x00000000 +#define NVC797_SET_BLEND_STATE_PER_TARGET_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_DEPTH_WRITE 0x12e8 +#define NVC797_SET_DEPTH_WRITE_ENABLE 0:0 +#define NVC797_SET_DEPTH_WRITE_ENABLE_FALSE 0x00000000 +#define NVC797_SET_DEPTH_WRITE_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_ALPHA_TEST 0x12ec +#define NVC797_SET_ALPHA_TEST_ENABLE 0:0 +#define NVC797_SET_ALPHA_TEST_ENABLE_FALSE 0x00000000 +#define NVC797_SET_ALPHA_TEST_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_INLINE_INDEX4X8_ALIGN 0x1300 +#define NVC797_SET_INLINE_INDEX4X8_ALIGN_COUNT 29:0 +#define NVC797_SET_INLINE_INDEX4X8_ALIGN_START 31:30 + +#define NVC797_DRAW_INLINE_INDEX4X8 0x1304 +#define NVC797_DRAW_INLINE_INDEX4X8_INDEX0 7:0 +#define NVC797_DRAW_INLINE_INDEX4X8_INDEX1 15:8 +#define NVC797_DRAW_INLINE_INDEX4X8_INDEX2 23:16 +#define NVC797_DRAW_INLINE_INDEX4X8_INDEX3 31:24 + +#define NVC797_D3D_SET_CULL_MODE 0x1308 +#define NVC797_D3D_SET_CULL_MODE_V 31:0 +#define NVC797_D3D_SET_CULL_MODE_V_NONE 0x00000001 +#define NVC797_D3D_SET_CULL_MODE_V_CW 0x00000002 +#define NVC797_D3D_SET_CULL_MODE_V_CCW 0x00000003 + +#define NVC797_SET_DEPTH_FUNC 0x130c +#define NVC797_SET_DEPTH_FUNC_V 31:0 +#define NVC797_SET_DEPTH_FUNC_V_OGL_NEVER 0x00000200 +#define NVC797_SET_DEPTH_FUNC_V_OGL_LESS 0x00000201 +#define NVC797_SET_DEPTH_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC797_SET_DEPTH_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC797_SET_DEPTH_FUNC_V_OGL_GREATER 0x00000204 +#define NVC797_SET_DEPTH_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC797_SET_DEPTH_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC797_SET_DEPTH_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC797_SET_DEPTH_FUNC_V_D3D_NEVER 0x00000001 +#define NVC797_SET_DEPTH_FUNC_V_D3D_LESS 0x00000002 +#define NVC797_SET_DEPTH_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC797_SET_DEPTH_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC797_SET_DEPTH_FUNC_V_D3D_GREATER 0x00000005 +#define NVC797_SET_DEPTH_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC797_SET_DEPTH_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC797_SET_DEPTH_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC797_SET_ALPHA_REF 0x1310 +#define NVC797_SET_ALPHA_REF_V 31:0 + +#define NVC797_SET_ALPHA_FUNC 0x1314 +#define NVC797_SET_ALPHA_FUNC_V 31:0 +#define NVC797_SET_ALPHA_FUNC_V_OGL_NEVER 0x00000200 +#define NVC797_SET_ALPHA_FUNC_V_OGL_LESS 0x00000201 +#define NVC797_SET_ALPHA_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC797_SET_ALPHA_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC797_SET_ALPHA_FUNC_V_OGL_GREATER 0x00000204 +#define NVC797_SET_ALPHA_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC797_SET_ALPHA_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC797_SET_ALPHA_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC797_SET_ALPHA_FUNC_V_D3D_NEVER 0x00000001 +#define NVC797_SET_ALPHA_FUNC_V_D3D_LESS 0x00000002 +#define NVC797_SET_ALPHA_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC797_SET_ALPHA_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC797_SET_ALPHA_FUNC_V_D3D_GREATER 0x00000005 +#define NVC797_SET_ALPHA_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC797_SET_ALPHA_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC797_SET_ALPHA_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC797_SET_DRAW_AUTO_STRIDE 0x1318 +#define NVC797_SET_DRAW_AUTO_STRIDE_V 11:0 + +#define NVC797_SET_BLEND_CONST_RED 0x131c +#define NVC797_SET_BLEND_CONST_RED_V 31:0 + +#define NVC797_SET_BLEND_CONST_GREEN 0x1320 +#define NVC797_SET_BLEND_CONST_GREEN_V 31:0 + +#define NVC797_SET_BLEND_CONST_BLUE 0x1324 +#define NVC797_SET_BLEND_CONST_BLUE_V 31:0 + +#define NVC797_SET_BLEND_CONST_ALPHA 0x1328 +#define NVC797_SET_BLEND_CONST_ALPHA_V 31:0 + +#define NVC797_INVALIDATE_SAMPLER_CACHE 0x1330 +#define NVC797_INVALIDATE_SAMPLER_CACHE_LINES 0:0 +#define NVC797_INVALIDATE_SAMPLER_CACHE_LINES_ALL 0x00000000 +#define NVC797_INVALIDATE_SAMPLER_CACHE_LINES_ONE 0x00000001 +#define NVC797_INVALIDATE_SAMPLER_CACHE_TAG 25:4 + +#define NVC797_INVALIDATE_TEXTURE_HEADER_CACHE 0x1334 +#define NVC797_INVALIDATE_TEXTURE_HEADER_CACHE_LINES 0:0 +#define NVC797_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ALL 0x00000000 +#define NVC797_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ONE 0x00000001 +#define NVC797_INVALIDATE_TEXTURE_HEADER_CACHE_TAG 25:4 + +#define NVC797_INVALIDATE_TEXTURE_DATA_CACHE 0x1338 +#define NVC797_INVALIDATE_TEXTURE_DATA_CACHE_LINES 0:0 +#define NVC797_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ALL 0x00000000 +#define NVC797_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ONE 0x00000001 +#define NVC797_INVALIDATE_TEXTURE_DATA_CACHE_TAG 25:4 + +#define NVC797_SET_BLEND_SEPARATE_FOR_ALPHA 0x133c +#define NVC797_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NVC797_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NVC797_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_BLEND_COLOR_OP 0x1340 +#define NVC797_SET_BLEND_COLOR_OP_V 31:0 +#define NVC797_SET_BLEND_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC797_SET_BLEND_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC797_SET_BLEND_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC797_SET_BLEND_COLOR_OP_V_OGL_MIN 0x00008007 +#define NVC797_SET_BLEND_COLOR_OP_V_OGL_MAX 0x00008008 +#define NVC797_SET_BLEND_COLOR_OP_V_D3D_ADD 0x00000001 +#define NVC797_SET_BLEND_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC797_SET_BLEND_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC797_SET_BLEND_COLOR_OP_V_D3D_MIN 0x00000004 +#define NVC797_SET_BLEND_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF 0x1344 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V 31:0 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC797_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC797_SET_BLEND_COLOR_DEST_COEFF 0x1348 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V 31:0 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC797_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC797_SET_BLEND_ALPHA_OP 0x134c +#define NVC797_SET_BLEND_ALPHA_OP_V 31:0 +#define NVC797_SET_BLEND_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC797_SET_BLEND_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC797_SET_BLEND_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC797_SET_BLEND_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NVC797_SET_BLEND_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NVC797_SET_BLEND_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NVC797_SET_BLEND_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC797_SET_BLEND_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC797_SET_BLEND_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NVC797_SET_BLEND_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF 0x1350 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V 31:0 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC797_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC797_SET_GLOBAL_COLOR_KEY 0x1354 +#define NVC797_SET_GLOBAL_COLOR_KEY_ENABLE 0:0 +#define NVC797_SET_GLOBAL_COLOR_KEY_ENABLE_FALSE 0x00000000 +#define NVC797_SET_GLOBAL_COLOR_KEY_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF 0x1358 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V 31:0 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC797_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC797_SET_SINGLE_ROP_CONTROL 0x135c +#define NVC797_SET_SINGLE_ROP_CONTROL_ENABLE 0:0 +#define NVC797_SET_SINGLE_ROP_CONTROL_ENABLE_FALSE 0x00000000 +#define NVC797_SET_SINGLE_ROP_CONTROL_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_BLEND(i) (0x1360+(i)*4) +#define NVC797_SET_BLEND_ENABLE 0:0 +#define NVC797_SET_BLEND_ENABLE_FALSE 0x00000000 +#define NVC797_SET_BLEND_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_STENCIL_TEST 0x1380 +#define NVC797_SET_STENCIL_TEST_ENABLE 0:0 +#define NVC797_SET_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NVC797_SET_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_STENCIL_OP_FAIL 0x1384 +#define NVC797_SET_STENCIL_OP_FAIL_V 31:0 +#define NVC797_SET_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NVC797_SET_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NVC797_SET_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NVC797_SET_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC797_SET_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC797_SET_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NVC797_SET_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NVC797_SET_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NVC797_SET_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NVC797_SET_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NVC797_SET_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NVC797_SET_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NVC797_SET_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NVC797_SET_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NVC797_SET_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NVC797_SET_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NVC797_SET_STENCIL_OP_ZFAIL 0x1388 +#define NVC797_SET_STENCIL_OP_ZFAIL_V 31:0 +#define NVC797_SET_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NVC797_SET_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NVC797_SET_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NVC797_SET_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC797_SET_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC797_SET_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NVC797_SET_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NVC797_SET_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NVC797_SET_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NVC797_SET_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NVC797_SET_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NVC797_SET_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NVC797_SET_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NVC797_SET_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NVC797_SET_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NVC797_SET_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NVC797_SET_STENCIL_OP_ZPASS 0x138c +#define NVC797_SET_STENCIL_OP_ZPASS_V 31:0 +#define NVC797_SET_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NVC797_SET_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NVC797_SET_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NVC797_SET_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NVC797_SET_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NVC797_SET_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NVC797_SET_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NVC797_SET_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NVC797_SET_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NVC797_SET_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NVC797_SET_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NVC797_SET_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NVC797_SET_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NVC797_SET_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NVC797_SET_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NVC797_SET_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NVC797_SET_STENCIL_FUNC 0x1390 +#define NVC797_SET_STENCIL_FUNC_V 31:0 +#define NVC797_SET_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NVC797_SET_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NVC797_SET_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC797_SET_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC797_SET_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NVC797_SET_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC797_SET_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC797_SET_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC797_SET_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NVC797_SET_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NVC797_SET_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC797_SET_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC797_SET_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NVC797_SET_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC797_SET_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC797_SET_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC797_SET_STENCIL_FUNC_REF 0x1394 +#define NVC797_SET_STENCIL_FUNC_REF_V 7:0 + +#define NVC797_SET_STENCIL_FUNC_MASK 0x1398 +#define NVC797_SET_STENCIL_FUNC_MASK_V 7:0 + +#define NVC797_SET_STENCIL_MASK 0x139c +#define NVC797_SET_STENCIL_MASK_V 7:0 + +#define NVC797_SET_DRAW_AUTO_START 0x13a4 +#define NVC797_SET_DRAW_AUTO_START_BYTE_COUNT 31:0 + +#define NVC797_SET_PS_SATURATE 0x13a8 +#define NVC797_SET_PS_SATURATE_OUTPUT0 0:0 +#define NVC797_SET_PS_SATURATE_OUTPUT0_FALSE 0x00000000 +#define NVC797_SET_PS_SATURATE_OUTPUT0_TRUE 0x00000001 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE0 1:1 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE0_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE0_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC797_SET_PS_SATURATE_OUTPUT1 4:4 +#define NVC797_SET_PS_SATURATE_OUTPUT1_FALSE 0x00000000 +#define NVC797_SET_PS_SATURATE_OUTPUT1_TRUE 0x00000001 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE1 5:5 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE1_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE1_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC797_SET_PS_SATURATE_OUTPUT2 8:8 +#define NVC797_SET_PS_SATURATE_OUTPUT2_FALSE 0x00000000 +#define NVC797_SET_PS_SATURATE_OUTPUT2_TRUE 0x00000001 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE2 9:9 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE2_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE2_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC797_SET_PS_SATURATE_OUTPUT3 12:12 +#define NVC797_SET_PS_SATURATE_OUTPUT3_FALSE 0x00000000 +#define NVC797_SET_PS_SATURATE_OUTPUT3_TRUE 0x00000001 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE3 13:13 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE3_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE3_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC797_SET_PS_SATURATE_OUTPUT4 16:16 +#define NVC797_SET_PS_SATURATE_OUTPUT4_FALSE 0x00000000 +#define NVC797_SET_PS_SATURATE_OUTPUT4_TRUE 0x00000001 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE4 17:17 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE4_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE4_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC797_SET_PS_SATURATE_OUTPUT5 20:20 +#define NVC797_SET_PS_SATURATE_OUTPUT5_FALSE 0x00000000 +#define NVC797_SET_PS_SATURATE_OUTPUT5_TRUE 0x00000001 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE5 21:21 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE5_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE5_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC797_SET_PS_SATURATE_OUTPUT6 24:24 +#define NVC797_SET_PS_SATURATE_OUTPUT6_FALSE 0x00000000 +#define NVC797_SET_PS_SATURATE_OUTPUT6_TRUE 0x00000001 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE6 25:25 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE6_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE6_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC797_SET_PS_SATURATE_OUTPUT7 28:28 +#define NVC797_SET_PS_SATURATE_OUTPUT7_FALSE 0x00000000 +#define NVC797_SET_PS_SATURATE_OUTPUT7_TRUE 0x00000001 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE7 29:29 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE7_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC797_SET_PS_SATURATE_CLAMP_RANGE7_MINUS_ONE_TO_PLUS_ONE 0x00000001 + +#define NVC797_SET_WINDOW_ORIGIN 0x13ac +#define NVC797_SET_WINDOW_ORIGIN_MODE 0:0 +#define NVC797_SET_WINDOW_ORIGIN_MODE_UPPER_LEFT 0x00000000 +#define NVC797_SET_WINDOW_ORIGIN_MODE_LOWER_LEFT 0x00000001 +#define NVC797_SET_WINDOW_ORIGIN_FLIP_Y 4:4 +#define NVC797_SET_WINDOW_ORIGIN_FLIP_Y_FALSE 0x00000000 +#define NVC797_SET_WINDOW_ORIGIN_FLIP_Y_TRUE 0x00000001 + +#define NVC797_SET_LINE_WIDTH_FLOAT 0x13b0 +#define NVC797_SET_LINE_WIDTH_FLOAT_V 31:0 + +#define NVC797_SET_ALIASED_LINE_WIDTH_FLOAT 0x13b4 +#define NVC797_SET_ALIASED_LINE_WIDTH_FLOAT_V 31:0 + +#define NVC797_SET_LINE_MULTISAMPLE_OVERRIDE 0x1418 +#define NVC797_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE 0:0 +#define NVC797_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_FALSE 0x00000000 +#define NVC797_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_TRUE 0x00000001 + +#define NVC797_INVALIDATE_SAMPLER_CACHE_NO_WFI 0x1424 +#define NVC797_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES 0:0 +#define NVC797_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC797_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC797_INVALIDATE_SAMPLER_CACHE_NO_WFI_TAG 25:4 + +#define NVC797_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI 0x1428 +#define NVC797_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES 0:0 +#define NVC797_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC797_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC797_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_TAG 25:4 + +#define NVC797_SET_GLOBAL_BASE_VERTEX_INDEX 0x1434 +#define NVC797_SET_GLOBAL_BASE_VERTEX_INDEX_V 31:0 + +#define NVC797_SET_GLOBAL_BASE_INSTANCE_INDEX 0x1438 +#define NVC797_SET_GLOBAL_BASE_INSTANCE_INDEX_V 31:0 + +#define NVC797_SET_PS_WARP_WATERMARKS 0x1450 +#define NVC797_SET_PS_WARP_WATERMARKS_LOW 15:0 +#define NVC797_SET_PS_WARP_WATERMARKS_HIGH 31:16 + +#define NVC797_SET_PS_REGISTER_WATERMARKS 0x1454 +#define NVC797_SET_PS_REGISTER_WATERMARKS_LOW 15:0 +#define NVC797_SET_PS_REGISTER_WATERMARKS_HIGH 31:16 + +#define NVC797_STORE_ZCULL 0x1464 +#define NVC797_STORE_ZCULL_V 0:0 + +#define NVC797_SET_ITERATED_BLEND_CONSTANT_RED(j) (0x1480+(j)*16) +#define NVC797_SET_ITERATED_BLEND_CONSTANT_RED_V 15:0 + +#define NVC797_SET_ITERATED_BLEND_CONSTANT_GREEN(j) (0x1484+(j)*16) +#define NVC797_SET_ITERATED_BLEND_CONSTANT_GREEN_V 15:0 + +#define NVC797_SET_ITERATED_BLEND_CONSTANT_BLUE(j) (0x1488+(j)*16) +#define NVC797_SET_ITERATED_BLEND_CONSTANT_BLUE_V 15:0 + +#define NVC797_LOAD_ZCULL 0x1500 +#define NVC797_LOAD_ZCULL_V 0:0 + +#define NVC797_SET_SURFACE_CLIP_ID_HEIGHT 0x1504 +#define NVC797_SET_SURFACE_CLIP_ID_HEIGHT_V 31:0 + +#define NVC797_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL 0x1508 +#define NVC797_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NVC797_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NVC797_SET_CLIP_ID_CLEAR_RECT_VERTICAL 0x150c +#define NVC797_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NVC797_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NVC797_SET_USER_CLIP_ENABLE 0x1510 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE0 0:0 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE0_FALSE 0x00000000 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE0_TRUE 0x00000001 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE1 1:1 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE1_FALSE 0x00000000 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE1_TRUE 0x00000001 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE2 2:2 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE2_FALSE 0x00000000 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE2_TRUE 0x00000001 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE3 3:3 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE3_FALSE 0x00000000 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE3_TRUE 0x00000001 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE4 4:4 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE4_FALSE 0x00000000 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE4_TRUE 0x00000001 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE5 5:5 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE5_FALSE 0x00000000 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE5_TRUE 0x00000001 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE6 6:6 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE6_FALSE 0x00000000 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE6_TRUE 0x00000001 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE7 7:7 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE7_FALSE 0x00000000 +#define NVC797_SET_USER_CLIP_ENABLE_PLANE7_TRUE 0x00000001 + +#define NVC797_SET_ZPASS_PIXEL_COUNT 0x1514 +#define NVC797_SET_ZPASS_PIXEL_COUNT_ENABLE 0:0 +#define NVC797_SET_ZPASS_PIXEL_COUNT_ENABLE_FALSE 0x00000000 +#define NVC797_SET_ZPASS_PIXEL_COUNT_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_POINT_SIZE 0x1518 +#define NVC797_SET_POINT_SIZE_V 31:0 + +#define NVC797_SET_ZCULL_STATS 0x151c +#define NVC797_SET_ZCULL_STATS_ENABLE 0:0 +#define NVC797_SET_ZCULL_STATS_ENABLE_FALSE 0x00000000 +#define NVC797_SET_ZCULL_STATS_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_POINT_SPRITE 0x1520 +#define NVC797_SET_POINT_SPRITE_ENABLE 0:0 +#define NVC797_SET_POINT_SPRITE_ENABLE_FALSE 0x00000000 +#define NVC797_SET_POINT_SPRITE_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_SHADER_EXCEPTIONS 0x1528 +#define NVC797_SET_SHADER_EXCEPTIONS_ENABLE 0:0 +#define NVC797_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0x00000000 +#define NVC797_SET_SHADER_EXCEPTIONS_ENABLE_TRUE 0x00000001 + +#define NVC797_CLEAR_REPORT_VALUE 0x1530 +#define NVC797_CLEAR_REPORT_VALUE_TYPE 4:0 +#define NVC797_CLEAR_REPORT_VALUE_TYPE_DA_VERTICES_GENERATED 0x00000012 +#define NVC797_CLEAR_REPORT_VALUE_TYPE_DA_PRIMITIVES_GENERATED 0x00000013 +#define NVC797_CLEAR_REPORT_VALUE_TYPE_VS_INVOCATIONS 0x00000015 +#define NVC797_CLEAR_REPORT_VALUE_TYPE_TI_INVOCATIONS 0x00000016 +#define NVC797_CLEAR_REPORT_VALUE_TYPE_TS_INVOCATIONS 0x00000017 +#define NVC797_CLEAR_REPORT_VALUE_TYPE_TS_PRIMITIVES_GENERATED 0x00000018 +#define NVC797_CLEAR_REPORT_VALUE_TYPE_GS_INVOCATIONS 0x0000001A +#define NVC797_CLEAR_REPORT_VALUE_TYPE_GS_PRIMITIVES_GENERATED 0x0000001B +#define NVC797_CLEAR_REPORT_VALUE_TYPE_VTG_PRIMITIVES_OUT 0x0000001F +#define NVC797_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_SUCCEEDED 0x00000010 +#define NVC797_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_NEEDED 0x00000011 +#define NVC797_CLEAR_REPORT_VALUE_TYPE_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000003 +#define NVC797_CLEAR_REPORT_VALUE_TYPE_CLIPPER_INVOCATIONS 0x0000001C +#define NVC797_CLEAR_REPORT_VALUE_TYPE_CLIPPER_PRIMITIVES_GENERATED 0x0000001D +#define NVC797_CLEAR_REPORT_VALUE_TYPE_ZCULL_STATS 0x00000002 +#define NVC797_CLEAR_REPORT_VALUE_TYPE_PS_INVOCATIONS 0x0000001E +#define NVC797_CLEAR_REPORT_VALUE_TYPE_ZPASS_PIXEL_CNT 0x00000001 +#define NVC797_CLEAR_REPORT_VALUE_TYPE_ALPHA_BETA_CLOCKS 0x00000004 +#define NVC797_CLEAR_REPORT_VALUE_TYPE_SCG_CLOCKS 0x00000009 + +#define NVC797_SET_ANTI_ALIAS_ENABLE 0x1534 +#define NVC797_SET_ANTI_ALIAS_ENABLE_V 0:0 +#define NVC797_SET_ANTI_ALIAS_ENABLE_V_FALSE 0x00000000 +#define NVC797_SET_ANTI_ALIAS_ENABLE_V_TRUE 0x00000001 + +#define NVC797_SET_ZT_SELECT 0x1538 +#define NVC797_SET_ZT_SELECT_TARGET_COUNT 0:0 + +#define NVC797_SET_ANTI_ALIAS_ALPHA_CONTROL 0x153c +#define NVC797_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE 0:0 +#define NVC797_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_DISABLE 0x00000000 +#define NVC797_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_ENABLE 0x00000001 +#define NVC797_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE 4:4 +#define NVC797_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_DISABLE 0x00000000 +#define NVC797_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_ENABLE 0x00000001 + +#define NVC797_SET_RENDER_ENABLE_A 0x1550 +#define NVC797_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC797_SET_RENDER_ENABLE_B 0x1554 +#define NVC797_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC797_SET_RENDER_ENABLE_C 0x1558 +#define NVC797_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC797_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC797_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC797_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC797_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC797_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC797_SET_TEX_SAMPLER_POOL_A 0x155c +#define NVC797_SET_TEX_SAMPLER_POOL_A_OFFSET_UPPER 7:0 + +#define NVC797_SET_TEX_SAMPLER_POOL_B 0x1560 +#define NVC797_SET_TEX_SAMPLER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC797_SET_TEX_SAMPLER_POOL_C 0x1564 +#define NVC797_SET_TEX_SAMPLER_POOL_C_MAXIMUM_INDEX 19:0 + +#define NVC797_SET_SLOPE_SCALE_DEPTH_BIAS 0x156c +#define NVC797_SET_SLOPE_SCALE_DEPTH_BIAS_V 31:0 + +#define NVC797_SET_ANTI_ALIASED_LINE 0x1570 +#define NVC797_SET_ANTI_ALIASED_LINE_ENABLE 0:0 +#define NVC797_SET_ANTI_ALIASED_LINE_ENABLE_FALSE 0x00000000 +#define NVC797_SET_ANTI_ALIASED_LINE_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_TEX_HEADER_POOL_A 0x1574 +#define NVC797_SET_TEX_HEADER_POOL_A_OFFSET_UPPER 7:0 + +#define NVC797_SET_TEX_HEADER_POOL_B 0x1578 +#define NVC797_SET_TEX_HEADER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC797_SET_TEX_HEADER_POOL_C 0x157c +#define NVC797_SET_TEX_HEADER_POOL_C_MAXIMUM_INDEX 21:0 + +#define NVC797_SET_ACTIVE_ZCULL_REGION 0x1590 +#define NVC797_SET_ACTIVE_ZCULL_REGION_ID 5:0 + +#define NVC797_SET_TWO_SIDED_STENCIL_TEST 0x1594 +#define NVC797_SET_TWO_SIDED_STENCIL_TEST_ENABLE 0:0 +#define NVC797_SET_TWO_SIDED_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NVC797_SET_TWO_SIDED_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_BACK_STENCIL_OP_FAIL 0x1598 +#define NVC797_SET_BACK_STENCIL_OP_FAIL_V 31:0 +#define NVC797_SET_BACK_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NVC797_SET_BACK_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NVC797_SET_BACK_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NVC797_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC797_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC797_SET_BACK_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NVC797_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NVC797_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NVC797_SET_BACK_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NVC797_SET_BACK_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NVC797_SET_BACK_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NVC797_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NVC797_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NVC797_SET_BACK_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NVC797_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NVC797_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NVC797_SET_BACK_STENCIL_OP_ZFAIL 0x159c +#define NVC797_SET_BACK_STENCIL_OP_ZFAIL_V 31:0 +#define NVC797_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NVC797_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NVC797_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NVC797_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC797_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC797_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NVC797_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NVC797_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NVC797_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NVC797_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NVC797_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NVC797_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NVC797_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NVC797_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NVC797_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NVC797_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NVC797_SET_BACK_STENCIL_OP_ZPASS 0x15a0 +#define NVC797_SET_BACK_STENCIL_OP_ZPASS_V 31:0 +#define NVC797_SET_BACK_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NVC797_SET_BACK_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NVC797_SET_BACK_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NVC797_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NVC797_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NVC797_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NVC797_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NVC797_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NVC797_SET_BACK_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NVC797_SET_BACK_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NVC797_SET_BACK_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NVC797_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NVC797_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NVC797_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NVC797_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NVC797_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NVC797_SET_BACK_STENCIL_FUNC 0x15a4 +#define NVC797_SET_BACK_STENCIL_FUNC_V 31:0 +#define NVC797_SET_BACK_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NVC797_SET_BACK_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NVC797_SET_BACK_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC797_SET_BACK_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC797_SET_BACK_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NVC797_SET_BACK_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC797_SET_BACK_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC797_SET_BACK_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC797_SET_BACK_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NVC797_SET_BACK_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NVC797_SET_BACK_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC797_SET_BACK_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC797_SET_BACK_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NVC797_SET_BACK_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC797_SET_BACK_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC797_SET_BACK_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC797_SET_SRGB_WRITE 0x15b8 +#define NVC797_SET_SRGB_WRITE_ENABLE 0:0 +#define NVC797_SET_SRGB_WRITE_ENABLE_FALSE 0x00000000 +#define NVC797_SET_SRGB_WRITE_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_DEPTH_BIAS 0x15bc +#define NVC797_SET_DEPTH_BIAS_V 31:0 + +#define NVC797_SET_ZCULL_REGION_FORMAT 0x15c8 +#define NVC797_SET_ZCULL_REGION_FORMAT_TYPE 3:0 +#define NVC797_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X4 0x00000000 +#define NVC797_SET_ZCULL_REGION_FORMAT_TYPE_ZS_4X4 0x00000001 +#define NVC797_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X2 0x00000002 +#define NVC797_SET_ZCULL_REGION_FORMAT_TYPE_Z_2X4 0x00000003 +#define NVC797_SET_ZCULL_REGION_FORMAT_TYPE_Z_16X8_4X4 0x00000004 +#define NVC797_SET_ZCULL_REGION_FORMAT_TYPE_Z_8X8_4X2 0x00000005 +#define NVC797_SET_ZCULL_REGION_FORMAT_TYPE_Z_8X8_2X4 0x00000006 +#define NVC797_SET_ZCULL_REGION_FORMAT_TYPE_Z_16X16_4X8 0x00000007 +#define NVC797_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X8_2X2 0x00000008 +#define NVC797_SET_ZCULL_REGION_FORMAT_TYPE_ZS_16X8_4X2 0x00000009 +#define NVC797_SET_ZCULL_REGION_FORMAT_TYPE_ZS_16X8_2X4 0x0000000A +#define NVC797_SET_ZCULL_REGION_FORMAT_TYPE_ZS_8X8_2X2 0x0000000B +#define NVC797_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X8_1X1 0x0000000C + +#define NVC797_SET_RT_LAYER 0x15cc +#define NVC797_SET_RT_LAYER_V 15:0 +#define NVC797_SET_RT_LAYER_CONTROL 16:16 +#define NVC797_SET_RT_LAYER_CONTROL_V_SELECTS_LAYER 0x00000000 +#define NVC797_SET_RT_LAYER_CONTROL_GEOMETRY_SHADER_SELECTS_LAYER 0x00000001 + +#define NVC797_SET_ANTI_ALIAS 0x15d0 +#define NVC797_SET_ANTI_ALIAS_SAMPLES 3:0 +#define NVC797_SET_ANTI_ALIAS_SAMPLES_MODE_1X1 0x00000000 +#define NVC797_SET_ANTI_ALIAS_SAMPLES_MODE_2X1 0x00000001 +#define NVC797_SET_ANTI_ALIAS_SAMPLES_MODE_2X2 0x00000002 +#define NVC797_SET_ANTI_ALIAS_SAMPLES_MODE_4X2 0x00000003 +#define NVC797_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_D3D 0x00000004 +#define NVC797_SET_ANTI_ALIAS_SAMPLES_MODE_2X1_D3D 0x00000005 +#define NVC797_SET_ANTI_ALIAS_SAMPLES_MODE_4X4 0x00000006 +#define NVC797_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_4 0x00000008 +#define NVC797_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_12 0x00000009 +#define NVC797_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_8 0x0000000A +#define NVC797_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_24 0x0000000B + +#define NVC797_SET_EDGE_FLAG 0x15e4 +#define NVC797_SET_EDGE_FLAG_V 0:0 +#define NVC797_SET_EDGE_FLAG_V_FALSE 0x00000000 +#define NVC797_SET_EDGE_FLAG_V_TRUE 0x00000001 + +#define NVC797_DRAW_INLINE_INDEX 0x15e8 +#define NVC797_DRAW_INLINE_INDEX_V 31:0 + +#define NVC797_SET_INLINE_INDEX2X16_ALIGN 0x15ec +#define NVC797_SET_INLINE_INDEX2X16_ALIGN_COUNT 30:0 +#define NVC797_SET_INLINE_INDEX2X16_ALIGN_START_ODD 31:31 +#define NVC797_SET_INLINE_INDEX2X16_ALIGN_START_ODD_FALSE 0x00000000 +#define NVC797_SET_INLINE_INDEX2X16_ALIGN_START_ODD_TRUE 0x00000001 + +#define NVC797_DRAW_INLINE_INDEX2X16 0x15f0 +#define NVC797_DRAW_INLINE_INDEX2X16_EVEN 15:0 +#define NVC797_DRAW_INLINE_INDEX2X16_ODD 31:16 + +#define NVC797_SET_VERTEX_GLOBAL_BASE_OFFSET_A 0x15f4 +#define NVC797_SET_VERTEX_GLOBAL_BASE_OFFSET_A_UPPER 7:0 + +#define NVC797_SET_VERTEX_GLOBAL_BASE_OFFSET_B 0x15f8 +#define NVC797_SET_VERTEX_GLOBAL_BASE_OFFSET_B_LOWER 31:0 + +#define NVC797_SET_ZCULL_REGION_PIXEL_OFFSET_A 0x15fc +#define NVC797_SET_ZCULL_REGION_PIXEL_OFFSET_A_WIDTH 15:0 + +#define NVC797_SET_ZCULL_REGION_PIXEL_OFFSET_B 0x1600 +#define NVC797_SET_ZCULL_REGION_PIXEL_OFFSET_B_HEIGHT 15:0 + +#define NVC797_SET_POINT_SPRITE_SELECT 0x1604 +#define NVC797_SET_POINT_SPRITE_SELECT_RMODE 1:0 +#define NVC797_SET_POINT_SPRITE_SELECT_RMODE_ZERO 0x00000000 +#define NVC797_SET_POINT_SPRITE_SELECT_RMODE_FROM_R 0x00000001 +#define NVC797_SET_POINT_SPRITE_SELECT_RMODE_FROM_S 0x00000002 +#define NVC797_SET_POINT_SPRITE_SELECT_ORIGIN 2:2 +#define NVC797_SET_POINT_SPRITE_SELECT_ORIGIN_BOTTOM 0x00000000 +#define NVC797_SET_POINT_SPRITE_SELECT_ORIGIN_TOP 0x00000001 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE0 3:3 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE0_PASSTHROUGH 0x00000000 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE0_GENERATE 0x00000001 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE1 4:4 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE1_PASSTHROUGH 0x00000000 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE1_GENERATE 0x00000001 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE2 5:5 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE2_PASSTHROUGH 0x00000000 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE2_GENERATE 0x00000001 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE3 6:6 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE3_PASSTHROUGH 0x00000000 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE3_GENERATE 0x00000001 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE4 7:7 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE4_PASSTHROUGH 0x00000000 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE4_GENERATE 0x00000001 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE5 8:8 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE5_PASSTHROUGH 0x00000000 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE5_GENERATE 0x00000001 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE6 9:9 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE6_PASSTHROUGH 0x00000000 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE6_GENERATE 0x00000001 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE7 10:10 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE7_PASSTHROUGH 0x00000000 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE7_GENERATE 0x00000001 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE8 11:11 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE8_PASSTHROUGH 0x00000000 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE8_GENERATE 0x00000001 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE9 12:12 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE9_PASSTHROUGH 0x00000000 +#define NVC797_SET_POINT_SPRITE_SELECT_TEXTURE9_GENERATE 0x00000001 + +#define NVC797_SET_ATTRIBUTE_DEFAULT 0x1610 +#define NVC797_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE 0:0 +#define NVC797_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_0001 0x00000000 +#define NVC797_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_1111 0x00000001 +#define NVC797_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR 1:1 +#define NVC797_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0000 0x00000000 +#define NVC797_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0001 0x00000001 +#define NVC797_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR 2:2 +#define NVC797_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0000 0x00000000 +#define NVC797_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0001 0x00000001 +#define NVC797_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE 3:3 +#define NVC797_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0000 0x00000000 +#define NVC797_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0001 0x00000001 +#define NVC797_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0 4:4 +#define NVC797_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_0001 0x00000000 +#define NVC797_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_1111 0x00000001 +#define NVC797_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15 5:5 +#define NVC797_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0000 0x00000000 +#define NVC797_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0001 0x00000001 + +#define NVC797_END 0x1614 +#define NVC797_END_V 0:0 + +#define NVC797_BEGIN 0x1618 +#define NVC797_BEGIN_OP 15:0 +#define NVC797_BEGIN_OP_POINTS 0x00000000 +#define NVC797_BEGIN_OP_LINES 0x00000001 +#define NVC797_BEGIN_OP_LINE_LOOP 0x00000002 +#define NVC797_BEGIN_OP_LINE_STRIP 0x00000003 +#define NVC797_BEGIN_OP_TRIANGLES 0x00000004 +#define NVC797_BEGIN_OP_TRIANGLE_STRIP 0x00000005 +#define NVC797_BEGIN_OP_TRIANGLE_FAN 0x00000006 +#define NVC797_BEGIN_OP_QUADS 0x00000007 +#define NVC797_BEGIN_OP_QUAD_STRIP 0x00000008 +#define NVC797_BEGIN_OP_POLYGON 0x00000009 +#define NVC797_BEGIN_OP_LINELIST_ADJCY 0x0000000A +#define NVC797_BEGIN_OP_LINESTRIP_ADJCY 0x0000000B +#define NVC797_BEGIN_OP_TRIANGLELIST_ADJCY 0x0000000C +#define NVC797_BEGIN_OP_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC797_BEGIN_OP_PATCH 0x0000000E +#define NVC797_BEGIN_PRIMITIVE_ID 24:24 +#define NVC797_BEGIN_PRIMITIVE_ID_FIRST 0x00000000 +#define NVC797_BEGIN_PRIMITIVE_ID_UNCHANGED 0x00000001 +#define NVC797_BEGIN_INSTANCE_ID 27:26 +#define NVC797_BEGIN_INSTANCE_ID_FIRST 0x00000000 +#define NVC797_BEGIN_INSTANCE_ID_SUBSEQUENT 0x00000001 +#define NVC797_BEGIN_INSTANCE_ID_UNCHANGED 0x00000002 +#define NVC797_BEGIN_SPLIT_MODE 30:29 +#define NVC797_BEGIN_SPLIT_MODE_NORMAL_BEGIN_NORMAL_END 0x00000000 +#define NVC797_BEGIN_SPLIT_MODE_NORMAL_BEGIN_OPEN_END 0x00000001 +#define NVC797_BEGIN_SPLIT_MODE_OPEN_BEGIN_OPEN_END 0x00000002 +#define NVC797_BEGIN_SPLIT_MODE_OPEN_BEGIN_NORMAL_END 0x00000003 +#define NVC797_BEGIN_INSTANCE_ITERATE_ENABLE 31:31 +#define NVC797_BEGIN_INSTANCE_ITERATE_ENABLE_FALSE 0x00000000 +#define NVC797_BEGIN_INSTANCE_ITERATE_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_VERTEX_ID_COPY 0x161c +#define NVC797_SET_VERTEX_ID_COPY_ENABLE 0:0 +#define NVC797_SET_VERTEX_ID_COPY_ENABLE_FALSE 0x00000000 +#define NVC797_SET_VERTEX_ID_COPY_ENABLE_TRUE 0x00000001 +#define NVC797_SET_VERTEX_ID_COPY_ATTRIBUTE_SLOT 11:4 + +#define NVC797_ADD_TO_PRIMITIVE_ID 0x1620 +#define NVC797_ADD_TO_PRIMITIVE_ID_V 31:0 + +#define NVC797_LOAD_PRIMITIVE_ID 0x1624 +#define NVC797_LOAD_PRIMITIVE_ID_V 31:0 + +#define NVC797_SET_SHADER_BASED_CULL 0x162c +#define NVC797_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE 1:1 +#define NVC797_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_FALSE 0x00000000 +#define NVC797_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_TRUE 0x00000001 +#define NVC797_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE 0:0 +#define NVC797_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_FALSE 0x00000000 +#define NVC797_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_CLASS_VERSION 0x1638 +#define NVC797_SET_CLASS_VERSION_CURRENT 15:0 +#define NVC797_SET_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC797_SET_DA_PRIMITIVE_RESTART 0x1644 +#define NVC797_SET_DA_PRIMITIVE_RESTART_ENABLE 0:0 +#define NVC797_SET_DA_PRIMITIVE_RESTART_ENABLE_FALSE 0x00000000 +#define NVC797_SET_DA_PRIMITIVE_RESTART_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_DA_PRIMITIVE_RESTART_INDEX 0x1648 +#define NVC797_SET_DA_PRIMITIVE_RESTART_INDEX_V 31:0 + +#define NVC797_SET_DA_OUTPUT 0x164c +#define NVC797_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START 12:12 +#define NVC797_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_FALSE 0x00000000 +#define NVC797_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_TRUE 0x00000001 + +#define NVC797_SET_ANTI_ALIASED_POINT 0x1658 +#define NVC797_SET_ANTI_ALIASED_POINT_ENABLE 0:0 +#define NVC797_SET_ANTI_ALIASED_POINT_ENABLE_FALSE 0x00000000 +#define NVC797_SET_ANTI_ALIASED_POINT_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_POINT_CENTER_MODE 0x165c +#define NVC797_SET_POINT_CENTER_MODE_V 31:0 +#define NVC797_SET_POINT_CENTER_MODE_V_OGL 0x00000000 +#define NVC797_SET_POINT_CENTER_MODE_V_D3D 0x00000001 + +#define NVC797_SET_LINE_SMOOTH_PARAMETERS 0x1668 +#define NVC797_SET_LINE_SMOOTH_PARAMETERS_FALLOFF 31:0 +#define NVC797_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_00 0x00000000 +#define NVC797_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_33 0x00000001 +#define NVC797_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_60 0x00000002 + +#define NVC797_SET_LINE_STIPPLE 0x166c +#define NVC797_SET_LINE_STIPPLE_ENABLE 0:0 +#define NVC797_SET_LINE_STIPPLE_ENABLE_FALSE 0x00000000 +#define NVC797_SET_LINE_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_LINE_SMOOTH_EDGE_TABLE(i) (0x1670+(i)*4) +#define NVC797_SET_LINE_SMOOTH_EDGE_TABLE_V0 7:0 +#define NVC797_SET_LINE_SMOOTH_EDGE_TABLE_V1 15:8 +#define NVC797_SET_LINE_SMOOTH_EDGE_TABLE_V2 23:16 +#define NVC797_SET_LINE_SMOOTH_EDGE_TABLE_V3 31:24 + +#define NVC797_SET_LINE_STIPPLE_PARAMETERS 0x1680 +#define NVC797_SET_LINE_STIPPLE_PARAMETERS_FACTOR 7:0 +#define NVC797_SET_LINE_STIPPLE_PARAMETERS_PATTERN 23:8 + +#define NVC797_SET_PROVOKING_VERTEX 0x1684 +#define NVC797_SET_PROVOKING_VERTEX_V 0:0 +#define NVC797_SET_PROVOKING_VERTEX_V_FIRST 0x00000000 +#define NVC797_SET_PROVOKING_VERTEX_V_LAST 0x00000001 + +#define NVC797_SET_TWO_SIDED_LIGHT 0x1688 +#define NVC797_SET_TWO_SIDED_LIGHT_ENABLE 0:0 +#define NVC797_SET_TWO_SIDED_LIGHT_ENABLE_FALSE 0x00000000 +#define NVC797_SET_TWO_SIDED_LIGHT_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_POLYGON_STIPPLE 0x168c +#define NVC797_SET_POLYGON_STIPPLE_ENABLE 0:0 +#define NVC797_SET_POLYGON_STIPPLE_ENABLE_FALSE 0x00000000 +#define NVC797_SET_POLYGON_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_SHADER_CONTROL 0x1690 +#define NVC797_SET_SHADER_CONTROL_DEFAULT_PARTIAL 0:0 +#define NVC797_SET_SHADER_CONTROL_DEFAULT_PARTIAL_ZERO 0x00000000 +#define NVC797_SET_SHADER_CONTROL_DEFAULT_PARTIAL_INFINITY 0x00000001 +#define NVC797_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR 1:1 +#define NVC797_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR_LEGACY 0x00000000 +#define NVC797_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR_FP64_COMPATIBLE 0x00000001 +#define NVC797_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR 2:2 +#define NVC797_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR_PASS_ZERO 0x00000000 +#define NVC797_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR_PASS_INDEFINITE 0x00000001 + +#define NVC797_CHECK_CLASS_VERSION 0x16a0 +#define NVC797_CHECK_CLASS_VERSION_CURRENT 15:0 +#define NVC797_CHECK_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC797_SET_SPH_VERSION 0x16a4 +#define NVC797_SET_SPH_VERSION_CURRENT 15:0 +#define NVC797_SET_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC797_CHECK_SPH_VERSION 0x16a8 +#define NVC797_CHECK_SPH_VERSION_CURRENT 15:0 +#define NVC797_CHECK_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC797_SET_ALPHA_TO_COVERAGE_OVERRIDE 0x16b4 +#define NVC797_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE 0:0 +#define NVC797_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NVC797_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 +#define NVC797_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT 1:1 +#define NVC797_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_DISABLE 0x00000000 +#define NVC797_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_ENABLE 0x00000001 + +#define NVC797_SET_SCG_GRAPHICS_PRIORITY 0x16bc +#define NVC797_SET_SCG_GRAPHICS_PRIORITY_PRIORITY 5:0 + +#define NVC797_SET_SCG_GRAPHICS_SCHEDULING_PARAMETERS(i) (0x16c0+(i)*4) +#define NVC797_SET_SCG_GRAPHICS_SCHEDULING_PARAMETERS_V 31:0 + +#define NVC797_SET_POLYGON_STIPPLE_PATTERN(i) (0x1700+(i)*4) +#define NVC797_SET_POLYGON_STIPPLE_PATTERN_V 31:0 + +#define NVC797_SET_AAM_VERSION 0x1790 +#define NVC797_SET_AAM_VERSION_CURRENT 15:0 +#define NVC797_SET_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC797_CHECK_AAM_VERSION 0x1794 +#define NVC797_CHECK_AAM_VERSION_CURRENT 15:0 +#define NVC797_CHECK_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC797_SET_ZT_LAYER 0x179c +#define NVC797_SET_ZT_LAYER_OFFSET 15:0 + +#define NVC797_SET_INDEX_BUFFER_A 0x17c8 +#define NVC797_SET_INDEX_BUFFER_A_ADDRESS_UPPER 7:0 + +#define NVC797_SET_INDEX_BUFFER_B 0x17cc +#define NVC797_SET_INDEX_BUFFER_B_ADDRESS_LOWER 31:0 + +#define NVC797_SET_INDEX_BUFFER_E 0x17d8 +#define NVC797_SET_INDEX_BUFFER_E_INDEX_SIZE 1:0 +#define NVC797_SET_INDEX_BUFFER_E_INDEX_SIZE_ONE_BYTE 0x00000000 +#define NVC797_SET_INDEX_BUFFER_E_INDEX_SIZE_TWO_BYTES 0x00000001 +#define NVC797_SET_INDEX_BUFFER_E_INDEX_SIZE_FOUR_BYTES 0x00000002 + +#define NVC797_SET_INDEX_BUFFER_F 0x17dc +#define NVC797_SET_INDEX_BUFFER_F_FIRST 31:0 + +#define NVC797_DRAW_INDEX_BUFFER 0x17e0 +#define NVC797_DRAW_INDEX_BUFFER_COUNT 31:0 + +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST 0x17e4 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST 0x17e8 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST 0x17ec +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f0 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC797_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f4 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC797_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f8 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC797_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC797_SET_DEPTH_BIAS_CLAMP 0x187c +#define NVC797_SET_DEPTH_BIAS_CLAMP_V 31:0 + +#define NVC797_SET_VERTEX_STREAM_INSTANCE_A(i) (0x1880+(i)*4) +#define NVC797_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED 0:0 +#define NVC797_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_FALSE 0x00000000 +#define NVC797_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_TRUE 0x00000001 + +#define NVC797_SET_VERTEX_STREAM_INSTANCE_B(i) (0x18c0+(i)*4) +#define NVC797_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED 0:0 +#define NVC797_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_FALSE 0x00000000 +#define NVC797_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_TRUE 0x00000001 + +#define NVC797_SET_ATTRIBUTE_POINT_SIZE 0x1910 +#define NVC797_SET_ATTRIBUTE_POINT_SIZE_ENABLE 0:0 +#define NVC797_SET_ATTRIBUTE_POINT_SIZE_ENABLE_FALSE 0x00000000 +#define NVC797_SET_ATTRIBUTE_POINT_SIZE_ENABLE_TRUE 0x00000001 +#define NVC797_SET_ATTRIBUTE_POINT_SIZE_SLOT 11:4 + +#define NVC797_OGL_SET_CULL 0x1918 +#define NVC797_OGL_SET_CULL_ENABLE 0:0 +#define NVC797_OGL_SET_CULL_ENABLE_FALSE 0x00000000 +#define NVC797_OGL_SET_CULL_ENABLE_TRUE 0x00000001 + +#define NVC797_OGL_SET_FRONT_FACE 0x191c +#define NVC797_OGL_SET_FRONT_FACE_V 31:0 +#define NVC797_OGL_SET_FRONT_FACE_V_CW 0x00000900 +#define NVC797_OGL_SET_FRONT_FACE_V_CCW 0x00000901 + +#define NVC797_OGL_SET_CULL_FACE 0x1920 +#define NVC797_OGL_SET_CULL_FACE_V 31:0 +#define NVC797_OGL_SET_CULL_FACE_V_FRONT 0x00000404 +#define NVC797_OGL_SET_CULL_FACE_V_BACK 0x00000405 +#define NVC797_OGL_SET_CULL_FACE_V_FRONT_AND_BACK 0x00000408 + +#define NVC797_SET_VIEWPORT_PIXEL 0x1924 +#define NVC797_SET_VIEWPORT_PIXEL_CENTER 0:0 +#define NVC797_SET_VIEWPORT_PIXEL_CENTER_AT_HALF_INTEGERS 0x00000000 +#define NVC797_SET_VIEWPORT_PIXEL_CENTER_AT_INTEGERS 0x00000001 + +#define NVC797_SET_VIEWPORT_SCALE_OFFSET 0x192c +#define NVC797_SET_VIEWPORT_SCALE_OFFSET_ENABLE 0:0 +#define NVC797_SET_VIEWPORT_SCALE_OFFSET_ENABLE_FALSE 0x00000000 +#define NVC797_SET_VIEWPORT_SCALE_OFFSET_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_VIEWPORT_CLIP_CONTROL 0x193c +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE 0:0 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_FALSE 0x00000000 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_TRUE 0x00000001 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE 17:16 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_USE_FIELD_MIN_Z_ZERO_MAX_Z_ONE 0x00000000 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_MIN_Z_MAX_Z 0x00000001 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_ZERO_ONE 0x00000002 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_MINUS_INF_PLUS_INF 0x00000003 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z 3:3 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLIP 0x00000000 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLAMP 0x00000001 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z 4:4 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLIP 0x00000000 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLAMP 0x00000001 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND 7:7 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_256 0x00000000 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_1 0x00000001 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND 10:10 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_256 0x00000000 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_1 0x00000001 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP 13:11 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP 0x00000000 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_PASSTHRU 0x00000001 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XY_CLIP 0x00000002 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XYZ_CLIP 0x00000003 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP_NO_Z_CULL 0x00000004 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_Z_CLIP 0x00000005 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_TRI_FILL_OR_CLIP 0x00000006 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z 2:1 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SAME_AS_XY_GUARDBAND 0x00000000 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_256 0x00000001 +#define NVC797_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_1 0x00000002 + +#define NVC797_SET_USER_CLIP_OP 0x1940 +#define NVC797_SET_USER_CLIP_OP_PLANE0 0:0 +#define NVC797_SET_USER_CLIP_OP_PLANE0_CLIP 0x00000000 +#define NVC797_SET_USER_CLIP_OP_PLANE0_CULL 0x00000001 +#define NVC797_SET_USER_CLIP_OP_PLANE1 4:4 +#define NVC797_SET_USER_CLIP_OP_PLANE1_CLIP 0x00000000 +#define NVC797_SET_USER_CLIP_OP_PLANE1_CULL 0x00000001 +#define NVC797_SET_USER_CLIP_OP_PLANE2 8:8 +#define NVC797_SET_USER_CLIP_OP_PLANE2_CLIP 0x00000000 +#define NVC797_SET_USER_CLIP_OP_PLANE2_CULL 0x00000001 +#define NVC797_SET_USER_CLIP_OP_PLANE3 12:12 +#define NVC797_SET_USER_CLIP_OP_PLANE3_CLIP 0x00000000 +#define NVC797_SET_USER_CLIP_OP_PLANE3_CULL 0x00000001 +#define NVC797_SET_USER_CLIP_OP_PLANE4 16:16 +#define NVC797_SET_USER_CLIP_OP_PLANE4_CLIP 0x00000000 +#define NVC797_SET_USER_CLIP_OP_PLANE4_CULL 0x00000001 +#define NVC797_SET_USER_CLIP_OP_PLANE5 20:20 +#define NVC797_SET_USER_CLIP_OP_PLANE5_CLIP 0x00000000 +#define NVC797_SET_USER_CLIP_OP_PLANE5_CULL 0x00000001 +#define NVC797_SET_USER_CLIP_OP_PLANE6 24:24 +#define NVC797_SET_USER_CLIP_OP_PLANE6_CLIP 0x00000000 +#define NVC797_SET_USER_CLIP_OP_PLANE6_CULL 0x00000001 +#define NVC797_SET_USER_CLIP_OP_PLANE7 28:28 +#define NVC797_SET_USER_CLIP_OP_PLANE7_CLIP 0x00000000 +#define NVC797_SET_USER_CLIP_OP_PLANE7_CULL 0x00000001 + +#define NVC797_SET_RENDER_ENABLE_OVERRIDE 0x1944 +#define NVC797_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NVC797_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NVC797_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NVC797_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NVC797_SET_PRIMITIVE_TOPOLOGY_CONTROL 0x1948 +#define NVC797_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE 0:0 +#define NVC797_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_TOPOLOGY_IN_BEGIN_METHODS 0x00000000 +#define NVC797_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_SEPARATE_TOPOLOGY_STATE 0x00000001 + +#define NVC797_SET_WINDOW_CLIP_ENABLE 0x194c +#define NVC797_SET_WINDOW_CLIP_ENABLE_V 0:0 +#define NVC797_SET_WINDOW_CLIP_ENABLE_V_FALSE 0x00000000 +#define NVC797_SET_WINDOW_CLIP_ENABLE_V_TRUE 0x00000001 + +#define NVC797_SET_WINDOW_CLIP_TYPE 0x1950 +#define NVC797_SET_WINDOW_CLIP_TYPE_V 1:0 +#define NVC797_SET_WINDOW_CLIP_TYPE_V_INCLUSIVE 0x00000000 +#define NVC797_SET_WINDOW_CLIP_TYPE_V_EXCLUSIVE 0x00000001 +#define NVC797_SET_WINDOW_CLIP_TYPE_V_CLIPALL 0x00000002 + +#define NVC797_INVALIDATE_ZCULL 0x1958 +#define NVC797_INVALIDATE_ZCULL_V 31:0 +#define NVC797_INVALIDATE_ZCULL_V_INVALIDATE 0x00000000 + +#define NVC797_SET_ZCULL 0x1968 +#define NVC797_SET_ZCULL_Z_ENABLE 0:0 +#define NVC797_SET_ZCULL_Z_ENABLE_FALSE 0x00000000 +#define NVC797_SET_ZCULL_Z_ENABLE_TRUE 0x00000001 +#define NVC797_SET_ZCULL_STENCIL_ENABLE 4:4 +#define NVC797_SET_ZCULL_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC797_SET_ZCULL_STENCIL_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_ZCULL_BOUNDS 0x196c +#define NVC797_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE 0:0 +#define NVC797_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NVC797_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_TRUE 0x00000001 +#define NVC797_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE 4:4 +#define NVC797_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NVC797_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_PRIMITIVE_TOPOLOGY 0x1970 +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V 15:0 +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_POINTLIST 0x00000001 +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_LINELIST 0x00000002 +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP 0x00000003 +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST 0x00000004 +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP 0x00000005 +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_LINELIST_ADJCY 0x0000000A +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP_ADJCY 0x0000000B +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST_ADJCY 0x0000000C +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_PATCHLIST 0x0000000E +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_POINTS 0x00001001 +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST 0x00001002 +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST 0x00001003 +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST 0x0000100F +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINESTRIP 0x00001010 +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINESTRIP 0x00001011 +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLELIST 0x00001012 +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLESTRIP 0x00001013 +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLESTRIP 0x00001014 +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN 0x00001015 +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLEFAN 0x00001016 +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN_IMM 0x00001017 +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST_IMM 0x00001018 +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST2 0x0000101A +#define NVC797_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST2 0x0000101B + +#define NVC797_ZCULL_SYNC 0x1978 +#define NVC797_ZCULL_SYNC_V 31:0 + +#define NVC797_SET_CLIP_ID_TEST 0x197c +#define NVC797_SET_CLIP_ID_TEST_ENABLE 0:0 +#define NVC797_SET_CLIP_ID_TEST_ENABLE_FALSE 0x00000000 +#define NVC797_SET_CLIP_ID_TEST_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_SURFACE_CLIP_ID_WIDTH 0x1980 +#define NVC797_SET_SURFACE_CLIP_ID_WIDTH_V 31:0 + +#define NVC797_SET_CLIP_ID 0x1984 +#define NVC797_SET_CLIP_ID_V 31:0 + +#define NVC797_SET_DEPTH_BOUNDS_TEST 0x19bc +#define NVC797_SET_DEPTH_BOUNDS_TEST_ENABLE 0:0 +#define NVC797_SET_DEPTH_BOUNDS_TEST_ENABLE_FALSE 0x00000000 +#define NVC797_SET_DEPTH_BOUNDS_TEST_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_BLEND_FLOAT_OPTION 0x19c0 +#define NVC797_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO 0:0 +#define NVC797_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_FALSE 0x00000000 +#define NVC797_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_TRUE 0x00000001 + +#define NVC797_SET_LOGIC_OP 0x19c4 +#define NVC797_SET_LOGIC_OP_ENABLE 0:0 +#define NVC797_SET_LOGIC_OP_ENABLE_FALSE 0x00000000 +#define NVC797_SET_LOGIC_OP_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_LOGIC_OP_FUNC 0x19c8 +#define NVC797_SET_LOGIC_OP_FUNC_V 31:0 +#define NVC797_SET_LOGIC_OP_FUNC_V_CLEAR 0x00001500 +#define NVC797_SET_LOGIC_OP_FUNC_V_AND 0x00001501 +#define NVC797_SET_LOGIC_OP_FUNC_V_AND_REVERSE 0x00001502 +#define NVC797_SET_LOGIC_OP_FUNC_V_COPY 0x00001503 +#define NVC797_SET_LOGIC_OP_FUNC_V_AND_INVERTED 0x00001504 +#define NVC797_SET_LOGIC_OP_FUNC_V_NOOP 0x00001505 +#define NVC797_SET_LOGIC_OP_FUNC_V_XOR 0x00001506 +#define NVC797_SET_LOGIC_OP_FUNC_V_OR 0x00001507 +#define NVC797_SET_LOGIC_OP_FUNC_V_NOR 0x00001508 +#define NVC797_SET_LOGIC_OP_FUNC_V_EQUIV 0x00001509 +#define NVC797_SET_LOGIC_OP_FUNC_V_INVERT 0x0000150A +#define NVC797_SET_LOGIC_OP_FUNC_V_OR_REVERSE 0x0000150B +#define NVC797_SET_LOGIC_OP_FUNC_V_COPY_INVERTED 0x0000150C +#define NVC797_SET_LOGIC_OP_FUNC_V_OR_INVERTED 0x0000150D +#define NVC797_SET_LOGIC_OP_FUNC_V_NAND 0x0000150E +#define NVC797_SET_LOGIC_OP_FUNC_V_SET 0x0000150F + +#define NVC797_SET_Z_COMPRESSION 0x19cc +#define NVC797_SET_Z_COMPRESSION_ENABLE 0:0 +#define NVC797_SET_Z_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVC797_SET_Z_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVC797_CLEAR_SURFACE 0x19d0 +#define NVC797_CLEAR_SURFACE_Z_ENABLE 0:0 +#define NVC797_CLEAR_SURFACE_Z_ENABLE_FALSE 0x00000000 +#define NVC797_CLEAR_SURFACE_Z_ENABLE_TRUE 0x00000001 +#define NVC797_CLEAR_SURFACE_STENCIL_ENABLE 1:1 +#define NVC797_CLEAR_SURFACE_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC797_CLEAR_SURFACE_STENCIL_ENABLE_TRUE 0x00000001 +#define NVC797_CLEAR_SURFACE_R_ENABLE 2:2 +#define NVC797_CLEAR_SURFACE_R_ENABLE_FALSE 0x00000000 +#define NVC797_CLEAR_SURFACE_R_ENABLE_TRUE 0x00000001 +#define NVC797_CLEAR_SURFACE_G_ENABLE 3:3 +#define NVC797_CLEAR_SURFACE_G_ENABLE_FALSE 0x00000000 +#define NVC797_CLEAR_SURFACE_G_ENABLE_TRUE 0x00000001 +#define NVC797_CLEAR_SURFACE_B_ENABLE 4:4 +#define NVC797_CLEAR_SURFACE_B_ENABLE_FALSE 0x00000000 +#define NVC797_CLEAR_SURFACE_B_ENABLE_TRUE 0x00000001 +#define NVC797_CLEAR_SURFACE_A_ENABLE 5:5 +#define NVC797_CLEAR_SURFACE_A_ENABLE_FALSE 0x00000000 +#define NVC797_CLEAR_SURFACE_A_ENABLE_TRUE 0x00000001 +#define NVC797_CLEAR_SURFACE_MRT_SELECT 9:6 +#define NVC797_CLEAR_SURFACE_RT_ARRAY_INDEX 25:10 + +#define NVC797_CLEAR_CLIP_ID_SURFACE 0x19d4 +#define NVC797_CLEAR_CLIP_ID_SURFACE_V 31:0 + +#define NVC797_SET_COLOR_COMPRESSION(i) (0x19e0+(i)*4) +#define NVC797_SET_COLOR_COMPRESSION_ENABLE 0:0 +#define NVC797_SET_COLOR_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVC797_SET_COLOR_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_CT_WRITE(i) (0x1a00+(i)*4) +#define NVC797_SET_CT_WRITE_R_ENABLE 0:0 +#define NVC797_SET_CT_WRITE_R_ENABLE_FALSE 0x00000000 +#define NVC797_SET_CT_WRITE_R_ENABLE_TRUE 0x00000001 +#define NVC797_SET_CT_WRITE_G_ENABLE 4:4 +#define NVC797_SET_CT_WRITE_G_ENABLE_FALSE 0x00000000 +#define NVC797_SET_CT_WRITE_G_ENABLE_TRUE 0x00000001 +#define NVC797_SET_CT_WRITE_B_ENABLE 8:8 +#define NVC797_SET_CT_WRITE_B_ENABLE_FALSE 0x00000000 +#define NVC797_SET_CT_WRITE_B_ENABLE_TRUE 0x00000001 +#define NVC797_SET_CT_WRITE_A_ENABLE 12:12 +#define NVC797_SET_CT_WRITE_A_ENABLE_FALSE 0x00000000 +#define NVC797_SET_CT_WRITE_A_ENABLE_TRUE 0x00000001 + +#define NVC797_PIPE_NOP 0x1a2c +#define NVC797_PIPE_NOP_V 31:0 + +#define NVC797_SET_SPARE00 0x1a30 +#define NVC797_SET_SPARE00_V 31:0 + +#define NVC797_SET_SPARE01 0x1a34 +#define NVC797_SET_SPARE01_V 31:0 + +#define NVC797_SET_SPARE02 0x1a38 +#define NVC797_SET_SPARE02_V 31:0 + +#define NVC797_SET_SPARE03 0x1a3c +#define NVC797_SET_SPARE03_V 31:0 + +#define NVC797_SET_REPORT_SEMAPHORE_A 0x1b00 +#define NVC797_SET_REPORT_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC797_SET_REPORT_SEMAPHORE_B 0x1b04 +#define NVC797_SET_REPORT_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC797_SET_REPORT_SEMAPHORE_C 0x1b08 +#define NVC797_SET_REPORT_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC797_SET_REPORT_SEMAPHORE_D 0x1b0c +#define NVC797_SET_REPORT_SEMAPHORE_D_OPERATION 1:0 +#define NVC797_SET_REPORT_SEMAPHORE_D_OPERATION_RELEASE 0x00000000 +#define NVC797_SET_REPORT_SEMAPHORE_D_OPERATION_ACQUIRE 0x00000001 +#define NVC797_SET_REPORT_SEMAPHORE_D_OPERATION_REPORT_ONLY 0x00000002 +#define NVC797_SET_REPORT_SEMAPHORE_D_OPERATION_TRAP 0x00000003 +#define NVC797_SET_REPORT_SEMAPHORE_D_RELEASE 4:4 +#define NVC797_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_READS_COMPLETE 0x00000000 +#define NVC797_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_WRITES_COMPLETE 0x00000001 +#define NVC797_SET_REPORT_SEMAPHORE_D_ACQUIRE 8:8 +#define NVC797_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_WRITES_START 0x00000000 +#define NVC797_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_READS_START 0x00000001 +#define NVC797_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION 15:12 +#define NVC797_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_NONE 0x00000000 +#define NVC797_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DATA_ASSEMBLER 0x00000001 +#define NVC797_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VERTEX_SHADER 0x00000002 +#define NVC797_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_INIT_SHADER 0x00000008 +#define NVC797_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_SHADER 0x00000009 +#define NVC797_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_GEOMETRY_SHADER 0x00000006 +#define NVC797_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_STREAMING_OUTPUT 0x00000005 +#define NVC797_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VPC 0x00000004 +#define NVC797_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ZCULL 0x00000007 +#define NVC797_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_PIXEL_SHADER 0x0000000A +#define NVC797_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DEPTH_TEST 0x0000000C +#define NVC797_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ALL 0x0000000F +#define NVC797_SET_REPORT_SEMAPHORE_D_COMPARISON 16:16 +#define NVC797_SET_REPORT_SEMAPHORE_D_COMPARISON_EQ 0x00000000 +#define NVC797_SET_REPORT_SEMAPHORE_D_COMPARISON_GE 0x00000001 +#define NVC797_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE 20:20 +#define NVC797_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVC797_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT 27:23 +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_NONE 0x00000000 +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_DA_VERTICES_GENERATED 0x00000001 +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_DA_PRIMITIVES_GENERATED 0x00000003 +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_VS_INVOCATIONS 0x00000005 +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_TI_INVOCATIONS 0x0000001B +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_TS_INVOCATIONS 0x0000001D +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_TS_PRIMITIVES_GENERATED 0x0000001F +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_GS_INVOCATIONS 0x00000007 +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_GS_PRIMITIVES_GENERATED 0x00000009 +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_ALPHA_BETA_CLOCKS 0x00000004 +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_SCG_CLOCKS 0x00000008 +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_VTG_PRIMITIVES_OUT 0x00000012 +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x0000001E +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_SUCCEEDED 0x0000000B +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED 0x0000000D +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000006 +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_BYTE_COUNT 0x0000001A +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_INVOCATIONS 0x0000000F +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_PRIMITIVES_GENERATED 0x00000011 +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS0 0x0000000A +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS1 0x0000000C +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS2 0x0000000E +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS3 0x00000010 +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_PS_INVOCATIONS 0x00000013 +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT 0x00000002 +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT64 0x00000015 +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_TILED_ZPASS_PIXEL_CNT64 0x00000017 +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_COLOR_TARGET 0x00000018 +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_ZETA_TARGET 0x00000019 +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_BOUNDING_RECTANGLE 0x0000001C +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_TIMESTAMP 0x00000014 +#define NVC797_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE 28:28 +#define NVC797_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVC797_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVC797_SET_REPORT_SEMAPHORE_D_SUB_REPORT 7:5 +#define NVC797_SET_REPORT_SEMAPHORE_D_REPORT_DWORD_NUMBER 21:21 +#define NVC797_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE 2:2 +#define NVC797_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_FALSE 0x00000000 +#define NVC797_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_TRUE 0x00000001 +#define NVC797_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE 3:3 +#define NVC797_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC797_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC797_SET_REPORT_SEMAPHORE_D_REDUCTION_OP 11:9 +#define NVC797_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC797_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC797_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC797_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_INC 0x00000003 +#define NVC797_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC797_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_AND 0x00000005 +#define NVC797_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_OR 0x00000006 +#define NVC797_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC797_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT 18:17 +#define NVC797_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC797_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVC797_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP 19:19 +#define NVC797_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP_FALSE 0x00000000 +#define NVC797_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP_TRUE 0x00000001 + +#define NVC797_SET_VERTEX_STREAM_A_FORMAT(j) (0x1c00+(j)*16) +#define NVC797_SET_VERTEX_STREAM_A_FORMAT_STRIDE 11:0 +#define NVC797_SET_VERTEX_STREAM_A_FORMAT_ENABLE 12:12 +#define NVC797_SET_VERTEX_STREAM_A_FORMAT_ENABLE_FALSE 0x00000000 +#define NVC797_SET_VERTEX_STREAM_A_FORMAT_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_VERTEX_STREAM_A_LOCATION_A(j) (0x1c04+(j)*16) +#define NVC797_SET_VERTEX_STREAM_A_LOCATION_A_OFFSET_UPPER 7:0 + +#define NVC797_SET_VERTEX_STREAM_A_LOCATION_B(j) (0x1c08+(j)*16) +#define NVC797_SET_VERTEX_STREAM_A_LOCATION_B_OFFSET_LOWER 31:0 + +#define NVC797_SET_VERTEX_STREAM_A_FREQUENCY(j) (0x1c0c+(j)*16) +#define NVC797_SET_VERTEX_STREAM_A_FREQUENCY_V 31:0 + +#define NVC797_SET_VERTEX_STREAM_B_FORMAT(j) (0x1d00+(j)*16) +#define NVC797_SET_VERTEX_STREAM_B_FORMAT_STRIDE 11:0 +#define NVC797_SET_VERTEX_STREAM_B_FORMAT_ENABLE 12:12 +#define NVC797_SET_VERTEX_STREAM_B_FORMAT_ENABLE_FALSE 0x00000000 +#define NVC797_SET_VERTEX_STREAM_B_FORMAT_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_VERTEX_STREAM_B_LOCATION_A(j) (0x1d04+(j)*16) +#define NVC797_SET_VERTEX_STREAM_B_LOCATION_A_OFFSET_UPPER 7:0 + +#define NVC797_SET_VERTEX_STREAM_B_LOCATION_B(j) (0x1d08+(j)*16) +#define NVC797_SET_VERTEX_STREAM_B_LOCATION_B_OFFSET_LOWER 31:0 + +#define NVC797_SET_VERTEX_STREAM_B_FREQUENCY(j) (0x1d0c+(j)*16) +#define NVC797_SET_VERTEX_STREAM_B_FREQUENCY_V 31:0 + +#define NVC797_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA(j) (0x1e00+(j)*32) +#define NVC797_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NVC797_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NVC797_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_BLEND_PER_TARGET_COLOR_OP(j) (0x1e04+(j)*32) +#define NVC797_SET_BLEND_PER_TARGET_COLOR_OP_V 31:0 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC797_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC797_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MIN 0x00008007 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MAX 0x00008008 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_ADD 0x00000001 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MIN 0x00000004 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF(j) (0x1e08+(j)*32) +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V 31:0 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF(j) (0x1e0c+(j)*32) +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V 31:0 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC797_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_OP(j) (0x1e10+(j)*32) +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_OP_V 31:0 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF(j) (0x1e14+(j)*32) +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V 31:0 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF(j) (0x1e18+(j)*32) +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V 31:0 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC797_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC797_SET_PIPELINE_SHADER(j) (0x2000+(j)*64) +#define NVC797_SET_PIPELINE_SHADER_ENABLE 0:0 +#define NVC797_SET_PIPELINE_SHADER_ENABLE_FALSE 0x00000000 +#define NVC797_SET_PIPELINE_SHADER_ENABLE_TRUE 0x00000001 +#define NVC797_SET_PIPELINE_SHADER_TYPE 7:4 +#define NVC797_SET_PIPELINE_SHADER_TYPE_VERTEX_CULL_BEFORE_FETCH 0x00000000 +#define NVC797_SET_PIPELINE_SHADER_TYPE_VERTEX 0x00000001 +#define NVC797_SET_PIPELINE_SHADER_TYPE_TESSELLATION_INIT 0x00000002 +#define NVC797_SET_PIPELINE_SHADER_TYPE_TESSELLATION 0x00000003 +#define NVC797_SET_PIPELINE_SHADER_TYPE_GEOMETRY 0x00000004 +#define NVC797_SET_PIPELINE_SHADER_TYPE_PIXEL 0x00000005 + +#define NVC797_SET_PIPELINE_RESERVED_B(j) (0x2004+(j)*64) +#define NVC797_SET_PIPELINE_RESERVED_B_V 0:0 + +#define NVC797_SET_PIPELINE_RESERVED_A(j) (0x2008+(j)*64) +#define NVC797_SET_PIPELINE_RESERVED_A_V 0:0 + +#define NVC797_SET_PIPELINE_REGISTER_COUNT(j) (0x200c+(j)*64) +#define NVC797_SET_PIPELINE_REGISTER_COUNT_V 8:0 + +#define NVC797_SET_PIPELINE_BINDING(j) (0x2010+(j)*64) +#define NVC797_SET_PIPELINE_BINDING_GROUP 2:0 + +#define NVC797_SET_PIPELINE_PROGRAM_ADDRESS_A(j) (0x2014+(j)*64) +#define NVC797_SET_PIPELINE_PROGRAM_ADDRESS_A_UPPER 7:0 + +#define NVC797_SET_PIPELINE_PROGRAM_ADDRESS_B(j) (0x2018+(j)*64) +#define NVC797_SET_PIPELINE_PROGRAM_ADDRESS_B_LOWER 31:0 + +#define NVC797_SET_PIPELINE_PROGRAM_PREFETCH(j) (0x201c+(j)*64) +#define NVC797_SET_PIPELINE_PROGRAM_PREFETCH_SIZE_IN_BLOCKS 6:0 + +#define NVC797_SET_PIPELINE_RESERVED_E(j) (0x2020+(j)*64) +#define NVC797_SET_PIPELINE_RESERVED_E_V 0:0 + +#define NVC797_SET_FALCON00 0x2300 +#define NVC797_SET_FALCON00_V 31:0 + +#define NVC797_SET_FALCON01 0x2304 +#define NVC797_SET_FALCON01_V 31:0 + +#define NVC797_SET_FALCON02 0x2308 +#define NVC797_SET_FALCON02_V 31:0 + +#define NVC797_SET_FALCON03 0x230c +#define NVC797_SET_FALCON03_V 31:0 + +#define NVC797_SET_FALCON04 0x2310 +#define NVC797_SET_FALCON04_V 31:0 + +#define NVC797_SET_FALCON05 0x2314 +#define NVC797_SET_FALCON05_V 31:0 + +#define NVC797_SET_FALCON06 0x2318 +#define NVC797_SET_FALCON06_V 31:0 + +#define NVC797_SET_FALCON07 0x231c +#define NVC797_SET_FALCON07_V 31:0 + +#define NVC797_SET_FALCON08 0x2320 +#define NVC797_SET_FALCON08_V 31:0 + +#define NVC797_SET_FALCON09 0x2324 +#define NVC797_SET_FALCON09_V 31:0 + +#define NVC797_SET_FALCON10 0x2328 +#define NVC797_SET_FALCON10_V 31:0 + +#define NVC797_SET_FALCON11 0x232c +#define NVC797_SET_FALCON11_V 31:0 + +#define NVC797_SET_FALCON12 0x2330 +#define NVC797_SET_FALCON12_V 31:0 + +#define NVC797_SET_FALCON13 0x2334 +#define NVC797_SET_FALCON13_V 31:0 + +#define NVC797_SET_FALCON14 0x2338 +#define NVC797_SET_FALCON14_V 31:0 + +#define NVC797_SET_FALCON15 0x233c +#define NVC797_SET_FALCON15_V 31:0 + +#define NVC797_SET_FALCON16 0x2340 +#define NVC797_SET_FALCON16_V 31:0 + +#define NVC797_SET_FALCON17 0x2344 +#define NVC797_SET_FALCON17_V 31:0 + +#define NVC797_SET_FALCON18 0x2348 +#define NVC797_SET_FALCON18_V 31:0 + +#define NVC797_SET_FALCON19 0x234c +#define NVC797_SET_FALCON19_V 31:0 + +#define NVC797_SET_FALCON20 0x2350 +#define NVC797_SET_FALCON20_V 31:0 + +#define NVC797_SET_FALCON21 0x2354 +#define NVC797_SET_FALCON21_V 31:0 + +#define NVC797_SET_FALCON22 0x2358 +#define NVC797_SET_FALCON22_V 31:0 + +#define NVC797_SET_FALCON23 0x235c +#define NVC797_SET_FALCON23_V 31:0 + +#define NVC797_SET_FALCON24 0x2360 +#define NVC797_SET_FALCON24_V 31:0 + +#define NVC797_SET_FALCON25 0x2364 +#define NVC797_SET_FALCON25_V 31:0 + +#define NVC797_SET_FALCON26 0x2368 +#define NVC797_SET_FALCON26_V 31:0 + +#define NVC797_SET_FALCON27 0x236c +#define NVC797_SET_FALCON27_V 31:0 + +#define NVC797_SET_FALCON28 0x2370 +#define NVC797_SET_FALCON28_V 31:0 + +#define NVC797_SET_FALCON29 0x2374 +#define NVC797_SET_FALCON29_V 31:0 + +#define NVC797_SET_FALCON30 0x2378 +#define NVC797_SET_FALCON30_V 31:0 + +#define NVC797_SET_FALCON31 0x237c +#define NVC797_SET_FALCON31_V 31:0 + +#define NVC797_SET_CONSTANT_BUFFER_SELECTOR_A 0x2380 +#define NVC797_SET_CONSTANT_BUFFER_SELECTOR_A_SIZE 16:0 + +#define NVC797_SET_CONSTANT_BUFFER_SELECTOR_B 0x2384 +#define NVC797_SET_CONSTANT_BUFFER_SELECTOR_B_ADDRESS_UPPER 7:0 + +#define NVC797_SET_CONSTANT_BUFFER_SELECTOR_C 0x2388 +#define NVC797_SET_CONSTANT_BUFFER_SELECTOR_C_ADDRESS_LOWER 31:0 + +#define NVC797_LOAD_CONSTANT_BUFFER_OFFSET 0x238c +#define NVC797_LOAD_CONSTANT_BUFFER_OFFSET_V 15:0 + +#define NVC797_LOAD_CONSTANT_BUFFER(i) (0x2390+(i)*4) +#define NVC797_LOAD_CONSTANT_BUFFER_V 31:0 + +#define NVC797_BIND_GROUP_RESERVED_A(j) (0x2400+(j)*32) +#define NVC797_BIND_GROUP_RESERVED_A_V 0:0 + +#define NVC797_BIND_GROUP_RESERVED_B(j) (0x2404+(j)*32) +#define NVC797_BIND_GROUP_RESERVED_B_V 0:0 + +#define NVC797_BIND_GROUP_RESERVED_C(j) (0x2408+(j)*32) +#define NVC797_BIND_GROUP_RESERVED_C_V 0:0 + +#define NVC797_BIND_GROUP_RESERVED_D(j) (0x240c+(j)*32) +#define NVC797_BIND_GROUP_RESERVED_D_V 0:0 + +#define NVC797_BIND_GROUP_CONSTANT_BUFFER(j) (0x2410+(j)*32) +#define NVC797_BIND_GROUP_CONSTANT_BUFFER_VALID 0:0 +#define NVC797_BIND_GROUP_CONSTANT_BUFFER_VALID_FALSE 0x00000000 +#define NVC797_BIND_GROUP_CONSTANT_BUFFER_VALID_TRUE 0x00000001 +#define NVC797_BIND_GROUP_CONSTANT_BUFFER_SHADER_SLOT 8:4 + +#define NVC797_SET_TRAP_HANDLER_A 0x25f8 +#define NVC797_SET_TRAP_HANDLER_A_ADDRESS_UPPER 16:0 + +#define NVC797_SET_TRAP_HANDLER_B 0x25fc +#define NVC797_SET_TRAP_HANDLER_B_ADDRESS_LOWER 31:0 + +#define NVC797_SET_COLOR_CLAMP 0x2600 +#define NVC797_SET_COLOR_CLAMP_ENABLE 0:0 +#define NVC797_SET_COLOR_CLAMP_ENABLE_FALSE 0x00000000 +#define NVC797_SET_COLOR_CLAMP_ENABLE_TRUE 0x00000001 + +#define NVC797_SET_STREAM_OUT_LAYOUT_SELECT(i,j) (0x2800+(i)*128+(j)*4) +#define NVC797_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER00 7:0 +#define NVC797_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER01 15:8 +#define NVC797_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER02 23:16 +#define NVC797_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER03 31:24 + +#define NVC797_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE(i) (0x32f4+(i)*4) +#define NVC797_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_V 31:0 + +#define NVC797_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_UPPER(i) (0x3314+(i)*4) +#define NVC797_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_UPPER_V 31:0 + +#define NVC797_ENABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER 0x3334 +#define NVC797_ENABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_V 0:0 + +#define NVC797_DISABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER 0x3338 +#define NVC797_DISABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_V 0:0 + +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER(i) (0x333c+(i)*4) +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER_V 31:0 + +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_VALUE(i) (0x335c+(i)*4) +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_VALUE_V 31:0 + +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_EVENT(i) (0x337c+(i)*4) +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_EVENT_EVENT 7:0 + +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A(i) (0x339c+(i)*4) +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT0 1:0 +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT0 4:2 +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT1 6:5 +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT1 9:7 +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT2 11:10 +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT2 14:12 +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT3 16:15 +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT3 19:17 +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT4 21:20 +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT4 24:22 +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT5 26:25 +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT5 29:27 +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_SPARE 31:30 + +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B(i) (0x33bc+(i)*4) +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_EDGE 0:0 +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_MODE 2:1 +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_WINDOWED 3:3 +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_FUNC 19:4 + +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL 0x33dc +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL_MASK 7:0 + +#define NVC797_START_SHADER_PERFORMANCE_COUNTER 0x33e0 +#define NVC797_START_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC797_STOP_SHADER_PERFORMANCE_COUNTER 0x33e4 +#define NVC797_STOP_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER 0x33e8 +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER_V 31:0 + +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER 0x33ec +#define NVC797_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER_V 31:0 + +#define NVC797_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NVC797_SET_MME_SHADOW_SCRATCH_V 31:0 + +#define NVC797_CALL_MME_MACRO(j) (0x3800+(j)*8) +#define NVC797_CALL_MME_MACRO_V 31:0 + +#define NVC797_CALL_MME_DATA(j) (0x3804+(j)*8) +#define NVC797_CALL_MME_DATA_V 31:0 + +#endif /* _cl_ampere_b_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc7b5.h b/src/common/sdk/nvidia/inc/class/clc7b5.h new file mode 100644 index 0000000..df331e8 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc7b5.h @@ -0,0 +1,304 @@ +/******************************************************************************* + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "nvtypes.h" + +#ifndef _clc7b5_h_ +#define _clc7b5_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define AMPERE_DMA_COPY_B (0x0000C7B5) + +#define NVC7B5_NOP (0x00000100) +#define NVC7B5_NOP_PARAMETER 31:0 +#define NVC7B5_PM_TRIGGER (0x00000140) +#define NVC7B5_PM_TRIGGER_V 31:0 +#define NVC7B5_SET_MONITORED_FENCE_TYPE (0x0000021C) +#define NVC7B5_SET_MONITORED_FENCE_TYPE_TYPE 0:0 +#define NVC7B5_SET_MONITORED_FENCE_TYPE_TYPE_MONITORED_FENCE (0x00000000) +#define NVC7B5_SET_MONITORED_FENCE_TYPE_TYPE_MONITORED_FENCE_EXT (0x00000001) +#define NVC7B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_UPPER (0x00000220) +#define NVC7B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_UPPER_UPPER 16:0 +#define NVC7B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_LOWER (0x00000224) +#define NVC7B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_LOWER_LOWER 31:0 +#define NVC7B5_SET_SEMAPHORE_A (0x00000240) +#define NVC7B5_SET_SEMAPHORE_A_UPPER 16:0 +#define NVC7B5_SET_SEMAPHORE_B (0x00000244) +#define NVC7B5_SET_SEMAPHORE_B_LOWER 31:0 +#define NVC7B5_SET_SEMAPHORE_PAYLOAD (0x00000248) +#define NVC7B5_SET_SEMAPHORE_PAYLOAD_PAYLOAD 31:0 +#define NVC7B5_SET_SEMAPHORE_PAYLOAD_UPPER (0x0000024C) +#define NVC7B5_SET_SEMAPHORE_PAYLOAD_UPPER_PAYLOAD 31:0 +#define NVC7B5_SET_RENDER_ENABLE_A (0x00000254) +#define NVC7B5_SET_RENDER_ENABLE_A_UPPER 7:0 +#define NVC7B5_SET_RENDER_ENABLE_B (0x00000258) +#define NVC7B5_SET_RENDER_ENABLE_B_LOWER 31:0 +#define NVC7B5_SET_RENDER_ENABLE_C (0x0000025C) +#define NVC7B5_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC7B5_SET_RENDER_ENABLE_C_MODE_FALSE (0x00000000) +#define NVC7B5_SET_RENDER_ENABLE_C_MODE_TRUE (0x00000001) +#define NVC7B5_SET_RENDER_ENABLE_C_MODE_CONDITIONAL (0x00000002) +#define NVC7B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL (0x00000003) +#define NVC7B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL (0x00000004) +#define NVC7B5_SET_SRC_PHYS_MODE (0x00000260) +#define NVC7B5_SET_SRC_PHYS_MODE_TARGET 1:0 +#define NVC7B5_SET_SRC_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC7B5_SET_SRC_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC7B5_SET_SRC_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC7B5_SET_SRC_PHYS_MODE_TARGET_PEERMEM (0x00000003) +#define NVC7B5_SET_SRC_PHYS_MODE_BASIC_KIND 5:2 +#define NVC7B5_SET_SRC_PHYS_MODE_PEER_ID 8:6 +#define NVC7B5_SET_SRC_PHYS_MODE_FLA 9:9 +#define NVC7B5_SET_DST_PHYS_MODE (0x00000264) +#define NVC7B5_SET_DST_PHYS_MODE_TARGET 1:0 +#define NVC7B5_SET_DST_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC7B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC7B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC7B5_SET_DST_PHYS_MODE_TARGET_PEERMEM (0x00000003) +#define NVC7B5_SET_DST_PHYS_MODE_BASIC_KIND 5:2 +#define NVC7B5_SET_DST_PHYS_MODE_PEER_ID 8:6 +#define NVC7B5_SET_DST_PHYS_MODE_FLA 9:9 +#define NVC7B5_LAUNCH_DMA (0x00000300) +#define NVC7B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0 +#define NVC7B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000) +#define NVC7B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PIPELINED (0x00000001) +#define NVC7B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NON_PIPELINED (0x00000002) +#define NVC7B5_LAUNCH_DMA_FLUSH_ENABLE 2:2 +#define NVC7B5_LAUNCH_DMA_FLUSH_ENABLE_FALSE (0x00000000) +#define NVC7B5_LAUNCH_DMA_FLUSH_ENABLE_TRUE (0x00000001) +#define NVC7B5_LAUNCH_DMA_FLUSH_TYPE 25:25 +#define NVC7B5_LAUNCH_DMA_FLUSH_TYPE_SYS (0x00000000) +#define NVC7B5_LAUNCH_DMA_FLUSH_TYPE_GL (0x00000001) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_TYPE 4:3 +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_TYPE_NONE (0x00000000) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_SEMAPHORE_NO_TIMESTAMP (0x00000001) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_SEMAPHORE_WITH_TIMESTAMP (0x00000002) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_ONE_WORD_SEMAPHORE (0x00000001) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_FOUR_WORD_SEMAPHORE (0x00000002) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_CONDITIONAL_INTR_SEMAPHORE (0x00000003) +#define NVC7B5_LAUNCH_DMA_INTERRUPT_TYPE 6:5 +#define NVC7B5_LAUNCH_DMA_INTERRUPT_TYPE_NONE (0x00000000) +#define NVC7B5_LAUNCH_DMA_INTERRUPT_TYPE_BLOCKING (0x00000001) +#define NVC7B5_LAUNCH_DMA_INTERRUPT_TYPE_NON_BLOCKING (0x00000002) +#define NVC7B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT 7:7 +#define NVC7B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC7B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC7B5_LAUNCH_DMA_DST_MEMORY_LAYOUT 8:8 +#define NVC7B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC7B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC7B5_LAUNCH_DMA_MULTI_LINE_ENABLE 9:9 +#define NVC7B5_LAUNCH_DMA_MULTI_LINE_ENABLE_FALSE (0x00000000) +#define NVC7B5_LAUNCH_DMA_MULTI_LINE_ENABLE_TRUE (0x00000001) +#define NVC7B5_LAUNCH_DMA_REMAP_ENABLE 10:10 +#define NVC7B5_LAUNCH_DMA_REMAP_ENABLE_FALSE (0x00000000) +#define NVC7B5_LAUNCH_DMA_REMAP_ENABLE_TRUE (0x00000001) +#define NVC7B5_LAUNCH_DMA_FORCE_RMWDISABLE 11:11 +#define NVC7B5_LAUNCH_DMA_FORCE_RMWDISABLE_FALSE (0x00000000) +#define NVC7B5_LAUNCH_DMA_FORCE_RMWDISABLE_TRUE (0x00000001) +#define NVC7B5_LAUNCH_DMA_SRC_TYPE 12:12 +#define NVC7B5_LAUNCH_DMA_SRC_TYPE_VIRTUAL (0x00000000) +#define NVC7B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL (0x00000001) +#define NVC7B5_LAUNCH_DMA_DST_TYPE 13:13 +#define NVC7B5_LAUNCH_DMA_DST_TYPE_VIRTUAL (0x00000000) +#define NVC7B5_LAUNCH_DMA_DST_TYPE_PHYSICAL (0x00000001) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION 17:14 +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMIN (0x00000000) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMAX (0x00000001) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IXOR (0x00000002) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IAND (0x00000003) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IOR (0x00000004) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IADD (0x00000005) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INC (0x00000006) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_DEC (0x00000007) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDA (0x00000008) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDB (0x00000009) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FADD (0x0000000A) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMIN (0x0000000B) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMAX (0x0000000C) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDC (0x0000000D) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDD (0x0000000E) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDE (0x0000000F) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN 18:18 +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_SIGNED (0x00000000) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_UNSIGNED (0x00000001) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE 19:19 +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_FALSE (0x00000000) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_TRUE (0x00000001) +#define NVC7B5_LAUNCH_DMA_VPRMODE 23:22 +#define NVC7B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000) +#define NVC7B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001) +#define NVC7B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24 +#define NVC7B5_LAUNCH_DMA_DISABLE_PLC 26:26 +#define NVC7B5_LAUNCH_DMA_DISABLE_PLC_FALSE (0x00000000) +#define NVC7B5_LAUNCH_DMA_DISABLE_PLC_TRUE (0x00000001) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_PAYLOAD_SIZE 27:27 +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_PAYLOAD_SIZE_ONE_WORD (0x00000000) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_PAYLOAD_SIZE_TWO_WORD (0x00000001) +#define NVC7B5_LAUNCH_DMA_RESERVED_ERR_CODE 31:28 +#define NVC7B5_OFFSET_IN_UPPER (0x00000400) +#define NVC7B5_OFFSET_IN_UPPER_UPPER 16:0 +#define NVC7B5_OFFSET_IN_LOWER (0x00000404) +#define NVC7B5_OFFSET_IN_LOWER_VALUE 31:0 +#define NVC7B5_OFFSET_OUT_UPPER (0x00000408) +#define NVC7B5_OFFSET_OUT_UPPER_UPPER 16:0 +#define NVC7B5_OFFSET_OUT_LOWER (0x0000040C) +#define NVC7B5_OFFSET_OUT_LOWER_VALUE 31:0 +#define NVC7B5_PITCH_IN (0x00000410) +#define NVC7B5_PITCH_IN_VALUE 31:0 +#define NVC7B5_PITCH_OUT (0x00000414) +#define NVC7B5_PITCH_OUT_VALUE 31:0 +#define NVC7B5_LINE_LENGTH_IN (0x00000418) +#define NVC7B5_LINE_LENGTH_IN_VALUE 31:0 +#define NVC7B5_LINE_COUNT (0x0000041C) +#define NVC7B5_LINE_COUNT_VALUE 31:0 +#define NVC7B5_SET_REMAP_CONST_A (0x00000700) +#define NVC7B5_SET_REMAP_CONST_A_V 31:0 +#define NVC7B5_SET_REMAP_CONST_B (0x00000704) +#define NVC7B5_SET_REMAP_CONST_B_V 31:0 +#define NVC7B5_SET_REMAP_COMPONENTS (0x00000708) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X 2:0 +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X_SRC_X (0x00000000) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X_SRC_Y (0x00000001) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X_SRC_Z (0x00000002) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X_SRC_W (0x00000003) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X_CONST_A (0x00000004) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X_CONST_B (0x00000005) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X_NO_WRITE (0x00000006) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y 6:4 +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y_SRC_X (0x00000000) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Y (0x00000001) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Z (0x00000002) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y_SRC_W (0x00000003) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y_CONST_A (0x00000004) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y_CONST_B (0x00000005) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y_NO_WRITE (0x00000006) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z 10:8 +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z_SRC_X (0x00000000) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Y (0x00000001) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Z (0x00000002) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z_SRC_W (0x00000003) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z_CONST_A (0x00000004) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z_CONST_B (0x00000005) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z_NO_WRITE (0x00000006) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W 14:12 +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W_SRC_X (0x00000000) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W_SRC_Y (0x00000001) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W_SRC_Z (0x00000002) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W_SRC_W (0x00000003) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W_CONST_A (0x00000004) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W_CONST_B (0x00000005) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W_NO_WRITE (0x00000006) +#define NVC7B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE 17:16 +#define NVC7B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_ONE (0x00000000) +#define NVC7B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_TWO (0x00000001) +#define NVC7B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_THREE (0x00000002) +#define NVC7B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_FOUR (0x00000003) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS 21:20 +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_ONE (0x00000000) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_TWO (0x00000001) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_THREE (0x00000002) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_FOUR (0x00000003) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS 25:24 +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_ONE (0x00000000) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003) +#define NVC7B5_SET_DST_BLOCK_SIZE (0x0000070C) +#define NVC7B5_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVC7B5_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVC7B5_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVC7B5_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVC7B5_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVC7B5_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC7B5_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC7B5_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC7B5_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC7B5_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVC7B5_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVC7B5_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVC7B5_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVC7B5_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVC7B5_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVC7B5_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVC7B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVC7B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVC7B5_SET_DST_WIDTH (0x00000710) +#define NVC7B5_SET_DST_WIDTH_V 31:0 +#define NVC7B5_SET_DST_HEIGHT (0x00000714) +#define NVC7B5_SET_DST_HEIGHT_V 31:0 +#define NVC7B5_SET_DST_DEPTH (0x00000718) +#define NVC7B5_SET_DST_DEPTH_V 31:0 +#define NVC7B5_SET_DST_LAYER (0x0000071C) +#define NVC7B5_SET_DST_LAYER_V 31:0 +#define NVC7B5_SET_DST_ORIGIN (0x00000720) +#define NVC7B5_SET_DST_ORIGIN_X 15:0 +#define NVC7B5_SET_DST_ORIGIN_Y 31:16 +#define NVC7B5_SET_SRC_BLOCK_SIZE (0x00000728) +#define NVC7B5_SET_SRC_BLOCK_SIZE_WIDTH 3:0 +#define NVC7B5_SET_SRC_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVC7B5_SET_SRC_BLOCK_SIZE_HEIGHT 7:4 +#define NVC7B5_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVC7B5_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVC7B5_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC7B5_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC7B5_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC7B5_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC7B5_SET_SRC_BLOCK_SIZE_DEPTH 11:8 +#define NVC7B5_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVC7B5_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVC7B5_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVC7B5_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVC7B5_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVC7B5_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVC7B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVC7B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVC7B5_SET_SRC_WIDTH (0x0000072C) +#define NVC7B5_SET_SRC_WIDTH_V 31:0 +#define NVC7B5_SET_SRC_HEIGHT (0x00000730) +#define NVC7B5_SET_SRC_HEIGHT_V 31:0 +#define NVC7B5_SET_SRC_DEPTH (0x00000734) +#define NVC7B5_SET_SRC_DEPTH_V 31:0 +#define NVC7B5_SET_SRC_LAYER (0x00000738) +#define NVC7B5_SET_SRC_LAYER_V 31:0 +#define NVC7B5_SET_SRC_ORIGIN (0x0000073C) +#define NVC7B5_SET_SRC_ORIGIN_X 15:0 +#define NVC7B5_SET_SRC_ORIGIN_Y 31:16 +#define NVC7B5_SRC_ORIGIN_X (0x00000744) +#define NVC7B5_SRC_ORIGIN_X_VALUE 31:0 +#define NVC7B5_SRC_ORIGIN_Y (0x00000748) +#define NVC7B5_SRC_ORIGIN_Y_VALUE 31:0 +#define NVC7B5_DST_ORIGIN_X (0x0000074C) +#define NVC7B5_DST_ORIGIN_X_VALUE 31:0 +#define NVC7B5_DST_ORIGIN_Y (0x00000750) +#define NVC7B5_DST_ORIGIN_Y_VALUE 31:0 +#define NVC7B5_PM_TRIGGER_END (0x00001114) +#define NVC7B5_PM_TRIGGER_END_V 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc7b5_h + diff --git a/src/common/sdk/nvidia/inc/class/clc86f.h b/src/common/sdk/nvidia/inc/class/clc86f.h new file mode 100644 index 0000000..1548cd2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc86f.h @@ -0,0 +1,191 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gh100_clc86f_h__ +#define __gh100_clc86f_h__ + +#define HOPPER_CHANNEL_GPFIFO_A (0x0000C86F) + +typedef volatile struct Nvc86fControl_struct { + NvU32 Ignored00[0x010]; /* 0000-003f*/ + NvU32 Put; /* put offset, read/write 0040-0043*/ + NvU32 Get; /* get offset, read only 0044-0047*/ + NvU32 Reference; /* reference value, read only 0048-004b*/ + NvU32 PutHi; /* high order put offset bits 004c-004f*/ + NvU32 Ignored01[0x002]; /* 0050-0057*/ + NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/ + NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/ + NvU32 GetHi; /* high order get offset bits 0060-0063*/ + NvU32 Ignored02[0x007]; /* 0064-007f*/ + NvU32 Ignored03; /* used to be engine yield 0080-0083*/ + NvU32 Ignored04[0x001]; /* 0084-0087*/ + NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/ + NvU32 GPPut; /* GP FIFO put offset 008c-008f*/ + NvU32 Ignored05[0x5c]; +} Nvc86fControl, HopperAControlGPFifo; + +#define NVC86F_SET_OBJECT (0x00000000) +// NOTE - MEM_OP_A and MEM_OP_B have been replaced in gp100 with methods for +// specifying the page address for a targeted TLB invalidate and the uTLB for +// a targeted REPLAY_CANCEL for UVM. +// The previous MEM_OP_A/B functionality is in MEM_OP_C/D, with slightly +// rearranged fields. +#define NVC86F_MEM_OP_A (0x00000028) +#define NVC86F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_CLIENT_UNIT_ID 5:0 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC86F_MEM_OP_A_TLB_INVALIDATE_INVALIDATION_SIZE 5:0 // Used to specify size of invalidate, used for invalidates which are not of the REPLAY_CANCEL_TARGETED type +#define NVC86F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_GPC_ID 10:6 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC86F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE 7:6 // only relevant for invalidates with NVC86F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE for invalidating link TLB only, or non-link TLB only or all TLBs +#define NVC86F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_ALL_TLBS 0 +#define NVC86F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_LINK_TLBS 1 +#define NVC86F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_NON_LINK_TLBS 2 +#define NVC86F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_RSVRVD 3 +#define NVC86F_MEM_OP_A_TLB_INVALIDATE_CANCEL_MMU_ENGINE_ID 8:0 // only relevant for REPLAY_CANCEL_VA_GLOBAL +#define NVC86F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR 11:11 +#define NVC86F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_EN 0x00000001 +#define NVC86F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_DIS 0x00000000 +#define NVC86F_MEM_OP_A_TLB_INVALIDATE_TARGET_ADDR_LO 31:12 +#define NVC86F_MEM_OP_B (0x0000002c) +#define NVC86F_MEM_OP_B_TLB_INVALIDATE_TARGET_ADDR_HI 31:0 +#define NVC86F_MEM_OP_C (0x00000030) +#define NVC86F_MEM_OP_C_MEMBAR_TYPE 2:0 +#define NVC86F_MEM_OP_C_MEMBAR_TYPE_SYS_MEMBAR 0x00000000 +#define NVC86F_MEM_OP_C_MEMBAR_TYPE_MEMBAR 0x00000001 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_PDB 0:0 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_PDB_ONE 0x00000000 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_PDB_ALL 0x00000001 // Probably nonsensical for MMU_TLB_INVALIDATE_TARGETED +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_GPC 1:1 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_GPC_ENABLE 0x00000000 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_GPC_DISABLE 0x00000001 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_REPLAY 4:2 // only relevant if GPC ENABLE +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE 0x00000000 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START 0x00000001 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START_ACK_ALL 0x00000002 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_TARGETED 0x00000003 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_GLOBAL 0x00000004 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_VA_GLOBAL 0x00000005 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE 6:5 // only relevant if GPC ENABLE +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_NONE 0x00000000 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_GLOBALLY 0x00000001 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_INTRANODE 0x00000002 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE 9:7 //only relevant for REPLAY_CANCEL_VA_GLOBAL +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_READ 0 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE 1 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_STRONG 2 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_RSVRVD 3 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_WEAK 4 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_ALL 5 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE_AND_ATOMIC 6 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ALL 7 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL 9:7 // Invalidate affects this level and all below +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_ALL 0x00000000 // Invalidate tlb caches at all levels of the page table +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_PTE_ONLY 0x00000001 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE0 0x00000002 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE1 0x00000003 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE2 0x00000004 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3 0x00000005 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE4 0x00000006 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE5 0x00000007 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE 11:10 // only relevant if PDB_ONE +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_VID_MEM 0x00000000 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 +#define NVC86F_MEM_OP_C_TLB_INVALIDATE_PDB_ADDR_LO 31:12 // only relevant if PDB_ONE +#define NVC86F_MEM_OP_C_ACCESS_COUNTER_CLR_TARGETED_NOTIFY_TAG 19:0 +// MEM_OP_D MUST be preceded by MEM_OPs A-C. +#define NVC86F_MEM_OP_D (0x00000034) +#define NVC86F_MEM_OP_D_TLB_INVALIDATE_PDB_ADDR_HI 26:0 // only relevant if PDB_ONE +#define NVC86F_MEM_OP_D_OPERATION 31:27 +#define NVC86F_MEM_OP_D_OPERATION_MEMBAR 0x00000005 +#define NVC86F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE 0x00000009 +#define NVC86F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE_TARGETED 0x0000000a +#define NVC86F_MEM_OP_D_OPERATION_MMU_OPERATION 0x0000000b +#define NVC86F_MEM_OP_D_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d +#define NVC86F_MEM_OP_D_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e +// CLEAN_LINES is an alias for Tegra/GPU IP usage +#define NVC86F_MEM_OP_B_OPERATION_L2_INVALIDATE_CLEAN_LINES 0x0000000e +#define NVC86F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f +#define NVC86F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY 0x00000010 +#define NVC86F_MEM_OP_D_OPERATION_L2_WAIT_FOR_SYS_PENDING_READS 0x00000015 +#define NVC86F_MEM_OP_D_OPERATION_ACCESS_COUNTER_CLR 0x00000016 +#define NVC86F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE 1:0 +#define NVC86F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MIMC 0x00000000 +#define NVC86F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MOMC 0x00000001 +#define NVC86F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_ALL 0x00000002 +#define NVC86F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_TARGETED 0x00000003 +#define NVC86F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE 2:2 +#define NVC86F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MIMC 0x00000000 +#define NVC86F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MOMC 0x00000001 +#define NVC86F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_BANK 6:3 +#define NVC86F_MEM_OP_D_MMU_OPERATION_TYPE 23:20 +#define NVC86F_MEM_OP_D_MMU_OPERATION_TYPE_RESERVED 0x00000000 +#define NVC86F_MEM_OP_D_MMU_OPERATION_TYPE_VIDMEM_ACCESS_BIT_DUMP 0x00000001 +#define NVC86F_SEM_ADDR_LO (0x0000005c) +#define NVC86F_SEM_ADDR_LO_OFFSET 31:2 +#define NVC86F_SEM_ADDR_HI (0x00000060) +#define NVC86F_SEM_ADDR_HI_OFFSET 24:0 +#define NVC86F_SEM_PAYLOAD_LO (0x00000064) +#define NVC86F_SEM_PAYLOAD_HI (0x00000068) +#define NVC86F_SEM_EXECUTE (0x0000006c) +#define NVC86F_SEM_EXECUTE_OPERATION 2:0 +#define NVC86F_SEM_EXECUTE_OPERATION_ACQUIRE 0x00000000 +#define NVC86F_SEM_EXECUTE_OPERATION_RELEASE 0x00000001 +#define NVC86F_SEM_EXECUTE_OPERATION_ACQ_CIRC_GEQ 0x00000003 +#define NVC86F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG 12:12 +#define NVC86F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_EN 0x00000001 +#define NVC86F_SEM_EXECUTE_RELEASE_WFI 20:20 +#define NVC86F_SEM_EXECUTE_RELEASE_WFI_DIS 0x00000000 +#define NVC86F_SEM_EXECUTE_PAYLOAD_SIZE 24:24 +#define NVC86F_SEM_EXECUTE_PAYLOAD_SIZE_32BIT 0x00000000 +#define NVC86F_SEM_EXECUTE_RELEASE_TIMESTAMP 25:25 +#define NVC86F_SEM_EXECUTE_RELEASE_TIMESTAMP_DIS 0x00000000 +#define NVC86F_SEM_EXECUTE_RELEASE_TIMESTAMP_EN 0x00000001 +#define NVC86F_WFI (0x00000078) +#define NVC86F_WFI_SCOPE 0:0 +#define NVC86F_WFI_SCOPE_CURRENT_SCG_TYPE 0x00000000 +#define NVC86F_WFI_SCOPE_CURRENT_VEID 0x00000000 +#define NVC86F_WFI_SCOPE_ALL 0x00000001 + +/* GPFIFO entry format */ +#define NVC86F_GP_ENTRY__SIZE 8 +#define NVC86F_GP_ENTRY0_FETCH 0:0 +#define NVC86F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000 +#define NVC86F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001 +#define NVC86F_GP_ENTRY0_GET 31:2 +#define NVC86F_GP_ENTRY0_OPERAND 31:0 +#define NVC86F_GP_ENTRY0_PB_EXTENDED_BASE_OPERAND 24:8 +#define NVC86F_GP_ENTRY1_GET_HI 7:0 +#define NVC86F_GP_ENTRY1_LEVEL 9:9 +#define NVC86F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NVC86F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NVC86F_GP_ENTRY1_LENGTH 30:10 +#define NVC86F_GP_ENTRY1_SYNC 31:31 +#define NVC86F_GP_ENTRY1_SYNC_PROCEED 0x00000000 +#define NVC86F_GP_ENTRY1_SYNC_WAIT 0x00000001 +#define NVC86F_GP_ENTRY1_OPCODE 7:0 +#define NVC86F_GP_ENTRY1_OPCODE_NOP 0x00000000 +#define NVC86F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001 +#define NVC86F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002 +#define NVC86F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003 +#define NVC86F_GP_ENTRY1_OPCODE_SET_PB_SEGMENT_EXTENDED_BASE 0x00000004 + +#endif // __gh100_clc86f_h__ diff --git a/src/common/sdk/nvidia/inc/class/clc870.h b/src/common/sdk/nvidia/inc/class/clc870.h new file mode 100644 index 0000000..d2d35f1 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc870.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/clc870.finn +// + +#define NVC870_DISPLAY (0xc870U) /* finn: Evaluated from "NVC870_ALLOCATION_PARAMETERS_MESSAGE_ID" */ + +#define NVC870_ALLOCATION_PARAMETERS_MESSAGE_ID (0xc870U) + +typedef struct NVC870_ALLOCATION_PARAMETERS { + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numDsis; // Number of DSIs in this chip/display +} NVC870_ALLOCATION_PARAMETERS; + diff --git a/src/common/sdk/nvidia/inc/class/clc871.h b/src/common/sdk/nvidia/inc/class/clc871.h new file mode 100644 index 0000000..a8d348b --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc871.h @@ -0,0 +1,380 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc871_h_ +#define _clc871_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC871_DISP_SF_USER (0x000C871) + +typedef volatile struct _clc871_tag0 { + NvU32 dispSfUserOffset[0x400]; +} _NvC871DispSfUser, NvC871DispSfUserMap; + +#define NVC871_SF_HDMI_INFO_IDX_AVI_INFOFRAME 0x00000000 /* */ +#define NVC871_SF_HDMI_INFO_IDX_GCP 0x00000001 /* */ +#define NVC871_SF_HDMI_INFO_IDX_ACR 0x00000002 /* */ +#define NVC871_SF_HDMI_INFO_CTRL(i,j) (0x000E0000-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC871_SF_HDMI_INFO_CTRL__SIZE_1 8 /* */ +#define NVC871_SF_HDMI_INFO_CTRL__SIZE_2 3 /* */ +#define NVC871_SF_HDMI_INFO_CTRL_ENABLE 0:0 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NVC871_SF_HDMI_INFO_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NVC871_SF_HDMI_INFO_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NVC871_SF_HDMI_INFO_CTRL_OTHER 4:4 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_CTRL_OTHER_DIS 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_CTRL_OTHER_EN 0x00000001 /* RW--V */ +#define NVC871_SF_HDMI_INFO_CTRL_SINGLE 8:8 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_CTRL_SINGLE_DIS 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_CTRL_SINGLE_EN 0x00000001 /* RW--V */ +#define NVC871_SF_HDMI_INFO_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NVC871_SF_HDMI_INFO_CTRL_CHKSUM_HW_EN 0x00000001 /* RW--V */ +#define NVC871_SF_HDMI_INFO_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NVC871_SF_HDMI_INFO_CTRL_CHKSUM_HW_DIS 0x00000000 /* RW--V */ +#define NVC871_SF_HDMI_INFO_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_CTRL_HBLANK 12:12 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_CTRL_HBLANK_DIS 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_CTRL_HBLANK_EN 0x00000001 /* RW--V */ +#define NVC871_SF_HDMI_INFO_CTRL_VIDEO_FMT 16:16 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_CTRL_VIDEO_FMT_SW_CONTROLLED 0x00000000 /* RW--V */ +#define NVC871_SF_HDMI_INFO_CTRL_VIDEO_FMT_HW_CONTROLLED 0x00000001 /* RW--V */ +#define NVC871_SF_HDMI_INFO_CTRL_VIDEO_FMT_INIT 0x00000001 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_STATUS(i,j) (0x000E0004-0x000E0000+(i)*1024+(j)*64) /* R--4A */ +#define NVC871_SF_HDMI_INFO_STATUS__SIZE_1 8 /* */ +#define NVC871_SF_HDMI_INFO_STATUS__SIZE_2 3 /* */ +#define NVC871_SF_HDMI_INFO_STATUS_SENT 0:0 /* R--VF */ +#define NVC871_SF_HDMI_INFO_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NVC871_SF_HDMI_INFO_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NVC871_SF_HDMI_INFO_HEADER(i,j) (0x000E0008-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC871_SF_HDMI_INFO_HEADER__SIZE_1 8 /* */ +#define NVC871_SF_HDMI_INFO_HEADER__SIZE_2 3 /* */ +#define NVC871_SF_HDMI_INFO_HEADER_HB0 7:0 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_HEADER_HB1 15:8 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_HEADER_HB2 23:16 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK0_LOW(i,j) (0x000E000C-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC871_SF_HDMI_INFO_SUBPACK0_LOW__SIZE_1 8 /* */ +#define NVC871_SF_HDMI_INFO_SUBPACK0_LOW__SIZE_2 3 /* */ +#define NVC871_SF_HDMI_INFO_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK0_HIGH(i,j) (0x000E0010-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC871_SF_HDMI_INFO_SUBPACK0_HIGH__SIZE_1 8 /* */ +#define NVC871_SF_HDMI_INFO_SUBPACK0_HIGH__SIZE_2 3 /* */ +#define NVC871_SF_HDMI_INFO_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK1_LOW(i,j) (0x000E0014-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC871_SF_HDMI_INFO_SUBPACK1_LOW__SIZE_1 8 /* */ +#define NVC871_SF_HDMI_INFO_SUBPACK1_LOW__SIZE_2 3 /* */ +#define NVC871_SF_HDMI_INFO_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK1_HIGH(i,j) (0x000E0018-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC871_SF_HDMI_INFO_SUBPACK1_HIGH__SIZE_1 8 /* */ +#define NVC871_SF_HDMI_INFO_SUBPACK1_HIGH__SIZE_2 3 /* */ +#define NVC871_SF_HDMI_INFO_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK2_LOW(i,j) (0x000E001C-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC871_SF_HDMI_INFO_SUBPACK2_LOW__SIZE_1 8 /* */ +#define NVC871_SF_HDMI_INFO_SUBPACK2_LOW__SIZE_2 3 /* */ +#define NVC871_SF_HDMI_INFO_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK2_HIGH(i,j) (0x000E0020-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC871_SF_HDMI_INFO_SUBPACK2_HIGH__SIZE_1 8 /* */ +#define NVC871_SF_HDMI_INFO_SUBPACK2_HIGH__SIZE_2 3 /* */ +#define NVC871_SF_HDMI_INFO_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK3_LOW(i,j) (0x000E0024-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC871_SF_HDMI_INFO_SUBPACK3_LOW__SIZE_1 8 /* */ +#define NVC871_SF_HDMI_INFO_SUBPACK3_LOW__SIZE_2 3 /* */ +#define NVC871_SF_HDMI_INFO_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK3_HIGH(i,j) (0x000E0028-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC871_SF_HDMI_INFO_SUBPACK3_HIGH__SIZE_1 8 /* */ +#define NVC871_SF_HDMI_INFO_SUBPACK3_HIGH__SIZE_2 3 /* */ +#define NVC871_SF_HDMI_INFO_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_INFO_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NVC871_SF_HDMI_INFO_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_CTRL(i) (0x000E0000-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 8 /* */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE 0:0 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER 4:4 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER_DIS 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_CTRL_OTHER_EN 0x00000001 /* RW--V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE 8:8 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE_DIS 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_CTRL_SINGLE_EN 0x00000001 /* RW--V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_STATUS(i) (0x000E0004-0x000E0000+(i)*1024) /* R--4A */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_STATUS__SIZE_1 8 /* */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_STATUS_SENT 0:0 /* R-IVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_HEADER(i) (0x000E0008-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_HEADER__SIZE_1 8 /* */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_HEADER_HB0 7:0 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_HEADER_HB1 15:8 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_HEADER_HB2 23:16 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(i) (0x000E000C-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW__SIZE_1 8 /* */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(i) (0x000E0010-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH__SIZE_1 8 /* */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(i) (0x000E0014-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW__SIZE_1 8 /* */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(i) (0x000E0018-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH__SIZE_1 8 /* */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW(i) (0x000E001C-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW__SIZE_1 8 /* */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NVC871_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_GCP_CTRL(i) (0x000E0040-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC871_SF_HDMI_GCP_CTRL__SIZE_1 8 /* */ +#define NVC871_SF_HDMI_GCP_CTRL_ENABLE 0:0 /* RWIVF */ +#define NVC871_SF_HDMI_GCP_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_GCP_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NVC871_SF_HDMI_GCP_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NVC871_SF_HDMI_GCP_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NVC871_SF_HDMI_GCP_CTRL_OTHER 4:4 /* RWIVF */ +#define NVC871_SF_HDMI_GCP_CTRL_OTHER_DIS 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_GCP_CTRL_OTHER_EN 0x00000001 /* RW--V */ +#define NVC871_SF_HDMI_GCP_CTRL_SINGLE 8:8 /* RWIVF */ +#define NVC871_SF_HDMI_GCP_CTRL_SINGLE_DIS 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_GCP_CTRL_SINGLE_EN 0x00000001 /* RW--V */ +#define NVC871_SF_HDMI_GCP_STATUS(i) (0x000E0044-0x000E0000+(i)*1024) /* R--4A */ +#define NVC871_SF_HDMI_GCP_STATUS__SIZE_1 8 /* */ +#define NVC871_SF_HDMI_GCP_STATUS_SENT 0:0 /* R-IVF */ +#define NVC871_SF_HDMI_GCP_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NVC871_SF_HDMI_GCP_STATUS_ACTIVE_START_PP 6:4 /* R--VF */ +#define NVC871_SF_HDMI_GCP_STATUS_ACTIVE_START_PP_0 0x00000004 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_ACTIVE_START_PP_1 0x00000001 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_ACTIVE_START_PP_2 0x00000002 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_ACTIVE_START_PP_3 0x00000003 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_ACTIVE_END_PP 10:8 /* R--VF */ +#define NVC871_SF_HDMI_GCP_STATUS_ACTIVE_END_PP_0 0x00000004 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_ACTIVE_END_PP_1 0x00000001 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_ACTIVE_END_PP_2 0x00000002 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_ACTIVE_END_PP_3 0x00000003 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_VSYNC_START_PP 14:12 /* R--VF */ +#define NVC871_SF_HDMI_GCP_STATUS_VSYNC_START_PP_0 0x00000004 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_VSYNC_START_PP_1 0x00000001 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_VSYNC_START_PP_2 0x00000002 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_VSYNC_START_PP_3 0x00000003 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_VSYNC_END_PP 18:16 /* R--VF */ +#define NVC871_SF_HDMI_GCP_STATUS_VSYNC_END_PP_0 0x00000004 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_VSYNC_END_PP_1 0x00000001 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_VSYNC_END_PP_2 0x00000002 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_VSYNC_END_PP_3 0x00000003 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_HSYNC_START_PP 22:20 /* R--VF */ +#define NVC871_SF_HDMI_GCP_STATUS_HSYNC_START_PP_0 0x00000004 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_HSYNC_START_PP_1 0x00000001 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_HSYNC_START_PP_2 0x00000002 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_HSYNC_START_PP_3 0x00000003 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_HSYNC_END_PP 26:24 /* R--VF */ +#define NVC871_SF_HDMI_GCP_STATUS_HSYNC_END_PP_0 0x00000004 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_HSYNC_END_PP_1 0x00000001 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_HSYNC_END_PP_2 0x00000002 /* R---V */ +#define NVC871_SF_HDMI_GCP_STATUS_HSYNC_END_PP_3 0x00000003 /* R---V */ +#define NVC871_SF_HDMI_GCP_SUBPACK(i) (0x000E004C-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC871_SF_HDMI_GCP_SUBPACK__SIZE_1 8 /* */ +#define NVC871_SF_HDMI_GCP_SUBPACK_SB0 7:0 /* RWIVF */ +#define NVC871_SF_HDMI_GCP_SUBPACK_SB0_INIT 0x00000001 /* RWI-V */ +#define NVC871_SF_HDMI_GCP_SUBPACK_SB0_SET_AVMUTE 0x00000001 /* RW--V */ +#define NVC871_SF_HDMI_GCP_SUBPACK_SB0_CLR_AVMUTE 0x00000010 /* RW--V */ +#define NVC871_SF_HDMI_GCP_SUBPACK_SB1 15:8 /* RWIVF */ +#define NVC871_SF_HDMI_GCP_SUBPACK_SB1_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_GCP_SUBPACK_SB2 23:16 /* RWIVF */ +#define NVC871_SF_HDMI_GCP_SUBPACK_SB2_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_HDMI_GCP_SUBPACK_SB1_CTRL 24:24 /* RWIVF */ +#define NVC871_SF_HDMI_GCP_SUBPACK_SB1_CTRL_INIT 0x00000001 /* RWI-V */ +#define NVC871_SF_HDMI_GCP_SUBPACK_SB1_CTRL_SW 0x00000000 /* RW--V */ +#define NVC871_SF_HDMI_GCP_SUBPACK_SB1_CTRL_HW 0x00000001 /* RW--V */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL(i,j) (0x000E0130-0x000E0000+(i)*1024+(j)*8) /* RW-4A */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL__SIZE_1 8 /* */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL__SIZE_2 10 /* */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_ENABLE 0:0 /* RWIVF */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_RUN_MODE 3:1 /* RWIVF */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_RUN_MODE_ALWAYS 0x00000000 /* RWI-V */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_RUN_MODE_ONCE 0x00000001 /* RW--V */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_RUN_MODE_FID_ALWAYS 0x00000002 /* RW--V */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_RUN_MODE_FID_ONCE 0x00000003 /* RW--V */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_RUN_MODE_FID_TRIGGER 0x00000004 /* RW--V */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_LOC 5:4 /* RWIVF */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_LOC_VBLANK 0x00000000 /* RWI-V */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_LOC_VSYNC 0x00000001 /* RW--V */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_LOC_LINE 0x00000002 /* RW--V */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_LOC_LOADV 0x00000003 /* RW--V */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_OFFSET 10:6 /* RWIVF */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_OFFSET_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_SIZE 18:14 /* RWIVF */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_SIZE_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_BUSY 22:22 /* R-IVF */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_BUSY_NO 0x00000000 /* R-I-V */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_BUSY_YES 0x00000001 /* R---V */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_SENT 23:23 /* R-IVF */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_SENT_NO 0x00000000 /* R-I-V */ +#define NVC871_SF_GENERIC_INFOFRAME_CTRL_SENT_YES 0x00000001 /* R---V */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG(i,j) (0x000E0134-0x000E0000+(i)*1024+(j)*8) /* RW-4A */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG__SIZE_1 8 /* */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG__SIZE_2 10 /* */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG_FID 7:0 /* RWIVF */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG_FID_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG_LINE_ID 23:8 /* RWIVF */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG_LINE_ID_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG_LINE_ID_REVERSED 24:24 /* RWIVF */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG_LINE_ID_REVERSED_NO 0x00000000 /* RWI-V */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG_LINE_ID_REVERSED_YES 0x00000001 /* RW--V */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG_CRC_OVERRIDE 28:28 /* RWIVF */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG_CRC_OVERRIDE_NO 0x00000000 /* RWI-V */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG_CRC_OVERRIDE_YES 0x00000001 /* RW--V */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG_HW_CHECKSUM 29:29 /* RWIVF */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG_HW_CHECKSUM_NO 0x00000000 /* RWI-V */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG_HW_CHECKSUM_YES 0x00000001 /* RW--V */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG_NEW 30:30 /* RWIVF */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG_NEW_INIT 0x00000000 /* R-I-V */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG_NEW_DONE 0x00000000 /* R---V */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG_NEW_PENDING 0x00000001 /* R---V */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG_NEW_TRIGGER 0x00000001 /* -W--T */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG_MTD_STATE_CTRL 31:31 /* RWIVF */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG_MTD_STATE_CTRL_ACT 0x00000000 /* RWI-V */ +#define NVC871_SF_GENERIC_INFOFRAME_CONFIG_MTD_STATE_CTRL_ARM 0x00000001 /* RW--V */ +#define NVC871_SF_GENERIC_INFOFRAME_DATA_CTRL(i) (0x000E03F0-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC871_SF_GENERIC_INFOFRAME_DATA_CTRL__SIZE_1 8 /* */ +#define NVC871_SF_GENERIC_INFOFRAME_DATA_CTRL_OFFSET 4:0 /* RWIVF */ +#define NVC871_SF_GENERIC_INFOFRAME_DATA_CTRL_OFFSET_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_GENERIC_INFOFRAME_DATA(i) (0x000E03F4-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC871_SF_GENERIC_INFOFRAME_DATA__SIZE_1 8 /* */ +#define NVC871_SF_GENERIC_INFOFRAME_DATA_BYTE0 7:0 /* RWIVF */ +#define NVC871_SF_GENERIC_INFOFRAME_DATA_BYTE0_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_GENERIC_INFOFRAME_DATA_BYTE1 15:8 /* RWIVF */ +#define NVC871_SF_GENERIC_INFOFRAME_DATA_BYTE1_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_GENERIC_INFOFRAME_DATA_BYTE2 23:16 /* RWIVF */ +#define NVC871_SF_GENERIC_INFOFRAME_DATA_BYTE2_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_GENERIC_INFOFRAME_DATA_BYTE3 31:24 /* RWIVF */ +#define NVC871_SF_GENERIC_INFOFRAME_DATA_BYTE3_INIT 0x00000000 /* RWI-V */ +#define NVC871_SF_GENERIC_INFOFRAME_MISC_CTRL(i) (0x000E03F8-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC871_SF_GENERIC_INFOFRAME_MISC_CTRL__SIZE_1 8 /* */ +#define NVC871_SF_GENERIC_INFOFRAME_MISC_CTRL_WIN_CHN_SEL 0:0 /* RWIVF */ +#define NVC871_SF_GENERIC_INFOFRAME_MISC_CTRL_WIN_CHN_SEL_PRIVATE 0x00000000 /* RWI-V */ +#define NVC871_SF_GENERIC_INFOFRAME_MISC_CTRL_WIN_CHN_SEL_PUBLIC 0x00000001 /* RW--V */ +#define NVC871_SF_GENERIC_INFOFRAME_MISC_CTRL_AUDIO_PRIORITY 1:1 /* RWIVF */ +#define NVC871_SF_GENERIC_INFOFRAME_MISC_CTRL_AUDIO_PRIORITY_HIGH 0x00000000 /* RW--V */ +#define NVC871_SF_GENERIC_INFOFRAME_MISC_CTRL_AUDIO_PRIORITY_LOW 0x00000001 /* RWI-V */ + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _clc871_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc873.h b/src/common/sdk/nvidia/inc/class/clc873.h new file mode 100644 index 0000000..2d32b6d --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc873.h @@ -0,0 +1,399 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc873_h_ +#define _clc873_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC873_DISP_CAPABILITIES 0xC873 + +typedef volatile struct _clc873_tag0 { + NvU32 dispCapabilities[0x400]; +} _NvC873DispCapabilities,NvC873DispCapabilities_Map ; + + +#define NVC873_SYS_CAP 0x0 /* RW-4R */ +#define NVC873_SYS_CAP_HEAD0_EXISTS 0:0 /* RWIVF */ +#define NVC873_SYS_CAP_HEAD0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC873_SYS_CAP_HEAD0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC873_SYS_CAP_HEAD1_EXISTS 1:1 /* RWIVF */ +#define NVC873_SYS_CAP_HEAD1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC873_SYS_CAP_HEAD1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC873_SYS_CAP_HEAD2_EXISTS 2:2 /* RWIVF */ +#define NVC873_SYS_CAP_HEAD2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC873_SYS_CAP_HEAD2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC873_SYS_CAP_HEAD3_EXISTS 3:3 /* RWIVF */ +#define NVC873_SYS_CAP_HEAD3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC873_SYS_CAP_HEAD3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC873_SYS_CAP_HEAD4_EXISTS 4:4 /* RWIVF */ +#define NVC873_SYS_CAP_HEAD4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC873_SYS_CAP_HEAD4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC873_SYS_CAP_HEAD5_EXISTS 5:5 /* RWIVF */ +#define NVC873_SYS_CAP_HEAD5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC873_SYS_CAP_HEAD5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC873_SYS_CAP_HEAD6_EXISTS 6:6 /* RWIVF */ +#define NVC873_SYS_CAP_HEAD6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC873_SYS_CAP_HEAD6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC873_SYS_CAP_HEAD7_EXISTS 7:7 /* RWIVF */ +#define NVC873_SYS_CAP_HEAD7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC873_SYS_CAP_HEAD7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC873_SYS_CAP_HEAD_EXISTS(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVC873_SYS_CAP_HEAD_EXISTS__SIZE_1 8 /* */ +#define NVC873_SYS_CAP_HEAD_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC873_SYS_CAP_HEAD_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC873_SYS_CAP_SOR0_EXISTS 8:8 /* RWIVF */ +#define NVC873_SYS_CAP_SOR0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC873_SYS_CAP_SOR0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC873_SYS_CAP_SOR1_EXISTS 9:9 /* RWIVF */ +#define NVC873_SYS_CAP_SOR1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC873_SYS_CAP_SOR1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC873_SYS_CAP_SOR2_EXISTS 10:10 /* RWIVF */ +#define NVC873_SYS_CAP_SOR2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC873_SYS_CAP_SOR2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC873_SYS_CAP_SOR3_EXISTS 11:11 /* RWIVF */ +#define NVC873_SYS_CAP_SOR3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC873_SYS_CAP_SOR3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC873_SYS_CAP_SOR4_EXISTS 12:12 /* RWIVF */ +#define NVC873_SYS_CAP_SOR4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC873_SYS_CAP_SOR4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC873_SYS_CAP_SOR5_EXISTS 13:13 /* RWIVF */ +#define NVC873_SYS_CAP_SOR5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC873_SYS_CAP_SOR5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC873_SYS_CAP_SOR6_EXISTS 14:14 /* RWIVF */ +#define NVC873_SYS_CAP_SOR6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC873_SYS_CAP_SOR6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC873_SYS_CAP_SOR7_EXISTS 15:15 /* RWIVF */ +#define NVC873_SYS_CAP_SOR7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC873_SYS_CAP_SOR7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC873_SYS_CAP_SOR_EXISTS(i) (8+(i)):(8+(i)) /* RWIVF */ +#define NVC873_SYS_CAP_SOR_EXISTS__SIZE_1 8 /* */ +#define NVC873_SYS_CAP_SOR_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC873_SYS_CAP_SOR_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC873_SYS_CAP_DSI0_EXISTS 20:20 /* RWIVF */ +#define NVC873_SYS_CAP_DSI0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC873_SYS_CAP_DSI0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC873_SYS_CAP_DSI1_EXISTS 21:21 /* RWIVF */ +#define NVC873_SYS_CAP_DSI1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC873_SYS_CAP_DSI1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC873_SYS_CAP_DSI2_EXISTS 22:22 /* RWIVF */ +#define NVC873_SYS_CAP_DSI2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC873_SYS_CAP_DSI2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC873_SYS_CAP_DSI3_EXISTS 23:23 /* RWIVF */ +#define NVC873_SYS_CAP_DSI3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC873_SYS_CAP_DSI3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC873_SYS_CAP_DSI_EXISTS(i) (20+(i)):(20+(i)) /* RWIVF */ +#define NVC873_SYS_CAP_DSI_EXISTS__SIZE_1 4 /* */ +#define NVC873_SYS_CAP_DSI_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC873_SYS_CAP_DSI_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPA 0x10 /* RW-4R */ +#define NVC873_IHUB_COMMON_CAPA_MEMPOOL_ENTRIES 15:0 /* RWIUF */ +#define NVC873_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH 17:16 /* RWIVF */ +#define NVC873_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_32B 0x00000000 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_64B 0x00000001 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_128B 0x00000002 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_256B 0x00000003 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPA_SUPPORT_ROTATION 18:18 /* RWIVF */ +#define NVC873_IHUB_COMMON_CAPA_SUPPORT_ROTATION_FALSE 0x00000000 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPA_SUPPORT_ROTATION_TRUE 0x00000001 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPA_SUPPORT_PLANAR 19:19 /* RWIVF */ +#define NVC873_IHUB_COMMON_CAPA_SUPPORT_PLANAR_FALSE 0x00000000 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPA_SUPPORT_PLANAR_TRUE 0x00000001 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPA_SUPPORT_VGA 20:20 /* RWIVF */ +#define NVC873_IHUB_COMMON_CAPA_SUPPORT_VGA_FALSE 0x00000000 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPA_SUPPORT_VGA_TRUE 0x00000001 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION 21:21 /* RWIVF */ +#define NVC873_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION_FALSE 0x00000000 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION_TRUE 0x00000001 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPA_SUPPORT_MSCG 22:22 /* RWIVF */ +#define NVC873_IHUB_COMMON_CAPA_SUPPORT_MSCG_FALSE 0x00000000 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPA_SUPPORT_MSCG_TRUE 0x00000001 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH 23:23 /* RWIVF */ +#define NVC873_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH_FALSE 0x00000000 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH_TRUE 0x00000001 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT 26:26 /* RWIVF */ +#define NVC873_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT_FALSE 0x00000000 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT_TRUE 0x00000001 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION 31:30 /* RWIVF */ +#define NVC873_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_32B 0x00000000 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_64B 0x00000001 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_128B 0x00000002 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_256B 0x00000003 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPC 0x18 /* RW-4R */ +#define NVC873_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE 1:0 /* RWIVF */ +#define NVC873_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_32B 0x00000000 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_64B 0x00000001 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_128B 0x00000002 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_256B 0x00000003 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED 6:4 /* RWIVF */ +#define NVC873_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_NONE 0x00000000 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_TWO 0x00000001 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_FOUR 0x00000002 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_EIGHT 0x00000003 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_SIXTEEN 0x00000004 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPC_SUPPORT_SEMI_PLANAR 11:11 /* RWIVF */ +#define NVC873_IHUB_COMMON_CAPC_SUPPORT_SEMI_PLANAR_FALSE 0x00000000 /* RWI-V */ +#define NVC873_IHUB_COMMON_CAPC_SUPPORT_SEMI_PLANAR_TRUE 0x00000001 /* RW--V */ +#define NVC873_IHUB_COMMON_CAPC_SUPPORT_HOR_VER_FLIP 12:12 /* RWIVF */ +#define NVC873_IHUB_COMMON_CAPC_SUPPORT_HOR_VER_FLIP_FALSE 0x00000000 /* RWI-V */ +#define NVC873_IHUB_COMMON_CAPC_SUPPORT_HOR_VER_FLIP_TRUE 0x00000001 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA(i) (0x680+(i)*32) /* RW-4A */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA__SIZE_1 8 /* */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_FULL_WIDTH 4:0 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_UNIT_WIDTH 9:5 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT 16:16 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT 17:17 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT 18:18 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_HCLPF_PRESENT 19:19 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_HCLPF_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_HCLPF_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT 20:20 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT 21:21 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT 22:22 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_VFILTER_PRESENT 23:23 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_VFILTER_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_VFILTER_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_RCRC_PRESENT 24:24 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_RCRC_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPA_RCRC_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPB(i) (0x684+(i)*32) /* RW-4A */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPB__SIZE_1 8 /* */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPB_VGA 0:0 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPB_VGA_TRUE 0x00000001 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPB_VGA_FALSE 0x00000000 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPB_OLUT_LOGSZ 9:6 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPB_OLUT_LOGNR 12:10 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT 15:15 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPC(i) (0x688+(i)*32) /* RW-4A */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPC__SIZE_1 8 /* */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPC_OCSC0_PRECISION 4:0 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPC_OCSC1_PRECISION 12:8 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP 13:13 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPC_SCLR_SF_PRECISION 20:16 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPC_SCLR_CI_PRECISION 24:21 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB 25:25 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB_TRUE 0x00000001 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB_FALSE 0x00000000 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR 28:28 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR 30:30 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPD(i) (0x68c+(i)*32) /* RW-4A */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPD__SIZE_1 8 /* */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPD_VSCLR_MAX_PIXELS_2TAP 15:0 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPD_VSCLR_MAX_PIXELS_5TAP 31:16 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPE(i) (0x690+(i)*32) /* RW-4A */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPE__SIZE_1 8 /* */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPE_DSC_RATEBUFSIZE 3:0 /* RWIUF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPE_DSC_LINEBUFSIZE 13:8 /* RWIUF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422 16:16 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422_TRUE 0x00000001 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422_FALSE 0x00000000 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420 17:17 /* RWIVF */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420_TRUE 0x00000001 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420_FALSE 0x00000000 /* RW--V */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPF(i) (0x694+(i)*32) /* RW-4A */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPF__SIZE_1 8 /* */ +#define NVC873_POSTCOMP_HEAD_HDR_CAPF_VFILTER_MAX_PIXELS 15:0 /* RWIVF */ +#define NVC873_SOR_CAP(i) (0x144+(i)*8) /* RW-4A */ +#define NVC873_SOR_CAP__SIZE_1 8 /* */ +#define NVC873_SOR_CAP_SINGLE_LVDS_18 0:0 /* RWIVF */ +#define NVC873_SOR_CAP_SINGLE_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVC873_SOR_CAP_SINGLE_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVC873_SOR_CAP_SINGLE_LVDS_24 1:1 /* RWIVF */ +#define NVC873_SOR_CAP_SINGLE_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVC873_SOR_CAP_SINGLE_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVC873_SOR_CAP_DUAL_LVDS_18 2:2 /* RWIVF */ +#define NVC873_SOR_CAP_DUAL_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVC873_SOR_CAP_DUAL_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVC873_SOR_CAP_DUAL_LVDS_24 3:3 /* RWIVF */ +#define NVC873_SOR_CAP_DUAL_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVC873_SOR_CAP_DUAL_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVC873_SOR_CAP_SINGLE_TMDS_A 8:8 /* RWIVF */ +#define NVC873_SOR_CAP_SINGLE_TMDS_A_FALSE 0x00000000 /* RW--V */ +#define NVC873_SOR_CAP_SINGLE_TMDS_A_TRUE 0x00000001 /* RW--V */ +#define NVC873_SOR_CAP_SINGLE_TMDS_B 9:9 /* RWIVF */ +#define NVC873_SOR_CAP_SINGLE_TMDS_B_FALSE 0x00000000 /* RW--V */ +#define NVC873_SOR_CAP_SINGLE_TMDS_B_TRUE 0x00000001 /* RW--V */ +#define NVC873_SOR_CAP_DUAL_TMDS 11:11 /* RWIVF */ +#define NVC873_SOR_CAP_DUAL_TMDS_FALSE 0x00000000 /* RW--V */ +#define NVC873_SOR_CAP_DUAL_TMDS_TRUE 0x00000001 /* RW--V */ +#define NVC873_SOR_CAP_DISPLAY_OVER_PCIE 13:13 /* RWIVF */ +#define NVC873_SOR_CAP_DISPLAY_OVER_PCIE_FALSE 0x00000000 /* RW--V */ +#define NVC873_SOR_CAP_DISPLAY_OVER_PCIE_TRUE 0x00000001 /* RW--V */ +#define NVC873_SOR_CAP_SDI 16:16 /* RWIVF */ +#define NVC873_SOR_CAP_SDI_FALSE 0x00000000 /* RW--V */ +#define NVC873_SOR_CAP_SDI_TRUE 0x00000001 /* RW--V */ +#define NVC873_SOR_CAP_DP_A 24:24 /* RWIVF */ +#define NVC873_SOR_CAP_DP_A_FALSE 0x00000000 /* RW--V */ +#define NVC873_SOR_CAP_DP_A_TRUE 0x00000001 /* RW--V */ +#define NVC873_SOR_CAP_DP_B 25:25 /* RWIVF */ +#define NVC873_SOR_CAP_DP_B_FALSE 0x00000000 /* RW--V */ +#define NVC873_SOR_CAP_DP_B_TRUE 0x00000001 /* RW--V */ +#define NVC873_SOR_CAP_DP_INTERLACE 26:26 /* RWIVF */ +#define NVC873_SOR_CAP_DP_INTERLACE_FALSE 0x00000000 /* RW--V */ +#define NVC873_SOR_CAP_DP_INTERLACE_TRUE 0x00000001 /* RW--V */ +#define NVC873_SOR_CAP_DP_8_LANES 27:27 /* RWIVF */ +#define NVC873_SOR_CAP_DP_8_LANES_FALSE 0x00000000 /* RW--V */ +#define NVC873_SOR_CAP_DP_8_LANES_TRUE 0x00000001 /* RW--V */ +#define NVC873_SOR_CAP_HDMI_FRL 28:28 /* RWIVF */ +#define NVC873_SOR_CAP_HDMI_FRL_FALSE 0x00000000 /* RW--V */ +#define NVC873_SOR_CAP_HDMI_FRL_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA(i) (0x780+(i)*32) /* RW-4A */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA__SIZE_1 32 /* */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_FULL_WIDTH 4:0 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_UNIT_WIDTH 9:5 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_ALPHA_WIDTH 13:10 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT 16:16 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT 17:17 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT 18:18 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT 19:19 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT 20:20 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT 21:21 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT 22:22 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT 23:23 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT 24:24 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPB(i) (0x784+(i)*32) /* RW-4A */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPB__SIZE_1 32 /* */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPB_FMT_PRECISION 4:0 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGSZ 9:6 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGNR 12:10 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT 15:15 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPC(i) (0x788+(i)*32) /* RW-4A */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPC__SIZE_1 32 /* */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_PRECISION 4:0 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_LOGSZ 9:6 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_LOGNR 12:10 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT 15:15 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_PRECISION 20:16 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP 21:21 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD(i) (0x78c+(i)*32) /* RW-4A */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD__SIZE_1 32 /* */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGSZ 3:0 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGNR 6:4 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD 8:8 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT 9:9 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_SF_PRECISION 16:12 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_CI_PRECISION 20:17 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB 21:21 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB_FALSE 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA 22:22 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA_FALSE 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR 28:28 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR 30:30 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPE(i) (0x790+(i)*32) /* RW-4A */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPE__SIZE_1 32 /* */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_PRECISION 4:0 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_LOGSZ 9:6 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_LOGNR 12:10 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT 15:15 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_PRECISION 20:16 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP 21:21 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPF(i) (0x794+(i)*32) /* RW-4A */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPF__SIZE_1 32 /* */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_2TAP 15:0 /* RWIVF */ +#define NVC873_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_5TAP 31:16 /* RWIVF */ + +#ifdef __cplusplus +}; +#endif /* extern C */ +#endif //_clc873_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc87d.h b/src/common/sdk/nvidia/inc/class/clc87d.h new file mode 100644 index 0000000..2e54ae2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc87d.h @@ -0,0 +1,1336 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clC87d_h_ +#define _clC87d_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC87D_CORE_CHANNEL_DMA (0x0000C87D) + +#define NV_DISP_NOTIFIER 0x00000000 +#define NV_DISP_NOTIFIER_SIZEOF 0x00000010 +#define NV_DISP_NOTIFIER__0 0x00000000 +#define NV_DISP_NOTIFIER__0_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFIER__0_FIELD 8:8 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE 9:9 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_NON_TEARING 0x00000000 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_IMMEDIATE 0x00000001 +#define NV_DISP_NOTIFIER__0_R1 15:10 +#define NV_DISP_NOTIFIER__0_R2 23:16 +#define NV_DISP_NOTIFIER__0_R3 29:24 +#define NV_DISP_NOTIFIER__0_STATUS 31:30 +#define NV_DISP_NOTIFIER__0_STATUS_NOT_BEGUN 0x00000000 +#define NV_DISP_NOTIFIER__0_STATUS_BEGUN 0x00000001 +#define NV_DISP_NOTIFIER__0_STATUS_FINISHED 0x00000002 +#define NV_DISP_NOTIFIER__1 0x00000001 +#define NV_DISP_NOTIFIER__1_R4 31:0 +#define NV_DISP_NOTIFIER__2 0x00000002 +#define NV_DISP_NOTIFIER__2_TIMESTAMP_LO 31:0 +#define NV_DISP_NOTIFIER__3 0x00000003 +#define NV_DISP_NOTIFIER__3_TIMESTAMP_HI 31:0 + + +// dma opcode instructions +#define NVC87D_DMA +#define NVC87D_DMA_OPCODE 31:29 +#define NVC87D_DMA_OPCODE_METHOD 0x00000000 +#define NVC87D_DMA_OPCODE_JUMP 0x00000001 +#define NVC87D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC87D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC87D_DMA_METHOD_COUNT 27:18 +#define NVC87D_DMA_METHOD_OFFSET 13:2 +#define NVC87D_DMA_DATA 31:0 +#define NVC87D_DMA_DATA_NOP 0x00000000 +#define NVC87D_DMA_JUMP_OFFSET 11:2 +#define NVC87D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// if cap SUPPORT_FLEXIBLE_WIN_MAPPING is FALSE, this define can be used to obtain which head a window is mapped to +#define NVC37D_WINDOW_MAPPED_TO_HEAD(w) ((w)>>1) +#define NVC37D_GET_VALID_WINDOWMASK_FOR_HEAD(h) ((1<<((h)*2)) | (1<<((h)*2+1))) + +// class methods +#define NVC87D_PUT (0x00000000) +#define NVC87D_PUT_PTR 9:0 +#define NVC87D_GET (0x00000004) +#define NVC87D_GET_PTR 9:0 +#define NVC87D_UPDATE (0x00000200) +#define NVC87D_UPDATE_SPECIAL_HANDLING 21:20 +#define NVC87D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NVC87D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NVC87D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NVC87D_UPDATE_SPECIAL_HANDLING_REASON 19:12 +#define NVC87D_UPDATE_INHIBIT_INTERRUPTS 24:24 +#define NVC87D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NVC87D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NVC87D_UPDATE_RELEASE_ELV 0:0 +#define NVC87D_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC87D_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC87D_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC87D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC87D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC87D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC87D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC87D_SET_CONTEXT_DMA_NOTIFIER (0x00000208) +#define NVC87D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NVC87D_SET_NOTIFIER_CONTROL (0x0000020C) +#define NVC87D_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVC87D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVC87D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC87D_SET_NOTIFIER_CONTROL_OFFSET 11:4 +#define NVC87D_SET_NOTIFIER_CONTROL_NOTIFY 12:12 +#define NVC87D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NVC87D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NVC87D_SET_CONTROL (0x00000210) +#define NVC87D_SET_CONTROL_FLIP_LOCK_PIN(i) ((i)+0):((i)+0) +#define NVC87D_SET_CONTROL_FLIP_LOCK_PIN__SIZE_1 4 +#define NVC87D_SET_CONTROL_FLIP_LOCK_PIN_DISABLE (0x00000000) +#define NVC87D_SET_CONTROL_FLIP_LOCK_PIN_ENABLE (0x00000001) +#define NVC87D_SET_CONTROL_FLIP_LOCK_PIN0 0:0 +#define NVC87D_SET_CONTROL_FLIP_LOCK_PIN0_DISABLE (0x00000000) +#define NVC87D_SET_CONTROL_FLIP_LOCK_PIN0_ENABLE (0x00000001) +#define NVC87D_SET_CONTROL_FLIP_LOCK_PIN1 1:1 +#define NVC87D_SET_CONTROL_FLIP_LOCK_PIN1_DISABLE (0x00000000) +#define NVC87D_SET_CONTROL_FLIP_LOCK_PIN1_ENABLE (0x00000001) +#define NVC87D_SET_CONTROL_FLIP_LOCK_PIN2 2:2 +#define NVC87D_SET_CONTROL_FLIP_LOCK_PIN2_DISABLE (0x00000000) +#define NVC87D_SET_CONTROL_FLIP_LOCK_PIN2_ENABLE (0x00000001) +#define NVC87D_SET_CONTROL_FLIP_LOCK_PIN3 3:3 +#define NVC87D_SET_CONTROL_FLIP_LOCK_PIN3_DISABLE (0x00000000) +#define NVC87D_SET_CONTROL_FLIP_LOCK_PIN3_ENABLE (0x00000001) +#define NVC87D_SET_INTERLOCK_FLAGS (0x00000218) +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+0):((i)+0) +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC87D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS (0x0000021C) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC87D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) +#define NVC87D_GET_RG_SCAN_LINE(b) (0x00000220 + (b)*0x00000004) +#define NVC87D_GET_RG_SCAN_LINE_LINE 15:0 +#define NVC87D_GET_RG_SCAN_LINE_VBLANK 16:16 +#define NVC87D_GET_RG_SCAN_LINE_VBLANK_FALSE (0x00000000) +#define NVC87D_GET_RG_SCAN_LINE_VBLANK_TRUE (0x00000001) +#define NVC87D_SET_GET_BLANKING_CTRL(b) (0x00000240 + (b)*0x00000004) +#define NVC87D_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NVC87D_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NVC87D_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NVC87D_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NVC87D_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NVC87D_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) + +#define NVC87D_SOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NVC87D_SOR_SET_CONTROL_OWNER_MASK 7:0 +#define NVC87D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVC87D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVC87D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVC87D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVC87D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVC87D_SOR_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVC87D_SOR_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVC87D_SOR_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVC87D_SOR_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVC87D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NVC87D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NVC87D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NVC87D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NVC87D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NVC87D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NVC87D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NVC87D_SOR_SET_CONTROL_PROTOCOL_HDMI_FRL (0x0000000C) +#define NVC87D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NVC87D_SOR_SET_CONTROL_DE_SYNC_POLARITY 16:16 +#define NVC87D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC87D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC87D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NVC87D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NVC87D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NVC87D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NVC87D_SOR_SET_CUSTOM_REASON(a) (0x00000304 + (a)*0x00000020) +#define NVC87D_SOR_SET_CUSTOM_REASON_CODE 31:0 +#define NVC87D_SOR_SET_SW_SPARE_A(a) (0x00000308 + (a)*0x00000020) +#define NVC87D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NVC87D_SOR_SET_SW_SPARE_B(a) (0x0000030C + (a)*0x00000020) +#define NVC87D_SOR_SET_SW_SPARE_B_CODE 31:0 + +#define NVC87D_DSI_SET_CONTROL(a) (0x00000500 + (a)*0x00000020) +#define NVC87D_DSI_SET_CONTROL_OWNER_MASK 7:0 +#define NVC87D_DSI_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVC87D_DSI_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVC87D_DSI_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVC87D_DSI_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVC87D_DSI_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVC87D_DSI_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVC87D_DSI_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVC87D_DSI_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVC87D_DSI_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVC87D_DSI_SET_CUSTOM_REASON(a) (0x00000504 + (a)*0x00000020) +#define NVC87D_DSI_SET_CUSTOM_REASON_CODE 31:0 +#define NVC87D_DSI_SET_SW_SPARE_A(a) (0x00000508 + (a)*0x00000020) +#define NVC87D_DSI_SET_SW_SPARE_A_CODE 31:0 +#define NVC87D_DSI_SET_SW_SPARE_B(a) (0x0000050C + (a)*0x00000020) +#define NVC87D_DSI_SET_SW_SPARE_B_CODE 31:0 + +#define NVC87D_WINDOW_SET_CONTROL(a) (0x00001000 + (a)*0x00000080) +#define NVC87D_WINDOW_SET_CONTROL_OWNER 3:0 +#define NVC87D_WINDOW_SET_CONTROL_OWNER_HEAD(i) (0x00000000 +(i)) +#define NVC87D_WINDOW_SET_CONTROL_OWNER_HEAD__SIZE_1 8 +#define NVC87D_WINDOW_SET_CONTROL_OWNER_HEAD0 (0x00000000) +#define NVC87D_WINDOW_SET_CONTROL_OWNER_HEAD1 (0x00000001) +#define NVC87D_WINDOW_SET_CONTROL_OWNER_HEAD2 (0x00000002) +#define NVC87D_WINDOW_SET_CONTROL_OWNER_HEAD3 (0x00000003) +#define NVC87D_WINDOW_SET_CONTROL_OWNER_HEAD4 (0x00000004) +#define NVC87D_WINDOW_SET_CONTROL_OWNER_HEAD5 (0x00000005) +#define NVC87D_WINDOW_SET_CONTROL_OWNER_HEAD6 (0x00000006) +#define NVC87D_WINDOW_SET_CONTROL_OWNER_HEAD7 (0x00000007) +#define NVC87D_WINDOW_SET_CONTROL_OWNER_NONE (0x0000000F) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(a) (0x00001004 + (a)*0x00000080) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(a) (0x00001008 + (a)*0x00000080) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR(a) (0x0000100C + (a)*0x00000080) +#define NVC87D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC87D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC87D_WINDOW_SET_WINDOW_USAGE_BOUNDS(a) (0x00001010 + (a)*0x00000080) +#define NVC87D_WINDOW_SET_WINDOW_USAGE_BOUNDS_MAX_PIXELS_FETCHED_PER_LINE 14:0 +#define NVC87D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED 16:16 +#define NVC87D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED 28:28 +#define NVC87D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS 22:20 +#define NVC87D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVC87D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED 24:24 +#define NVC87D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) +#define NVC87D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED 30:30 +#define NVC87D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000) +#define NVC87D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001) + +#define NVC87D_HEAD_SET_PROCAMP(a) (0x00002000 + (a)*0x00000400) +#define NVC87D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NVC87D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NVC87D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NVC87D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NVC87D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003) +#define NVC87D_HEAD_SET_PROCAMP_CHROMA_LPF 3:3 +#define NVC87D_HEAD_SET_PROCAMP_CHROMA_LPF_DISABLE (0x00000000) +#define NVC87D_HEAD_SET_PROCAMP_CHROMA_LPF_ENABLE (0x00000001) +#define NVC87D_HEAD_SET_PROCAMP_CHROMA_DOWN_V 4:4 +#define NVC87D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_DISABLE (0x00000000) +#define NVC87D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_ENABLE (0x00000001) +#define NVC87D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 28:28 +#define NVC87D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NVC87D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00002004 + (a)*0x00000400) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 2:2 +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 3:3 +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 7:4 +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000000) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000001) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000002) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000003) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000004) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000005) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000006) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000007) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000008) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_444 (0x00000009) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444NP (0x0000000A) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 24:24 +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 23:12 +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN 31:26 +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN0 (0x00000000) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN1 (0x00000001) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN2 (0x00000002) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN3 (0x00000003) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN4 (0x00000004) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN5 (0x00000005) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN6 (0x00000006) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN7 (0x00000007) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN8 (0x00000008) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN9 (0x00000009) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN10 (0x0000000A) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN11 (0x0000000B) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN12 (0x0000000C) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN13 (0x0000000D) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN14 (0x0000000E) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN15 (0x0000000F) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN16 (0x00000010) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN17 (0x00000011) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN18 (0x00000012) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN19 (0x00000013) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN20 (0x00000014) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN21 (0x00000015) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN22 (0x00000016) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN23 (0x00000017) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN24 (0x00000018) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN25 (0x00000019) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN26 (0x0000001A) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN27 (0x0000001B) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN28 (0x0000001C) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN29 (0x0000001D) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN30 (0x0000001E) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN31 (0x0000001F) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_NONE (0x0000003F) +#define NVC87D_HEAD_SET_CONTROL(a) (0x00002008 + (a)*0x00000400) +#define NVC87D_HEAD_SET_CONTROL_STRUCTURE 1:0 +#define NVC87D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NVC87D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE 2:2 +#define NVC87D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_NORMAL (0x00000000) +#define NVC87D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000001) +#define NVC87D_HEAD_SET_CONTROL_YUV420PACKER 3:3 +#define NVC87D_HEAD_SET_CONTROL_YUV420PACKER_DISABLE (0x00000000) +#define NVC87D_HEAD_SET_CONTROL_YUV420PACKER_ENABLE (0x00000001) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 11:10 +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 8:4 +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 15:12 +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 23:22 +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 20:16 +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC87D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN 28:24 +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000001) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000002) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000003) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000004) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000005) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000006) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000007) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000008) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000009) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000B) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000C) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000D) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000E) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000F) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x00000010) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC87D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NVC87D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC87D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC87D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NVC87D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC87D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC87D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x0000200C + (a)*0x00000400) +#define NVC87D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NVC87D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NVC87D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC87D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC87D_HEAD_SET_PIXEL_REORDER_CONTROL(a) (0x00002010 + (a)*0x00000400) +#define NVC87D_HEAD_SET_PIXEL_REORDER_CONTROL_BANK_WIDTH 13:0 +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00002014 + (a)*0x00000400) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVC87D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVC87D_HEAD_SET_DITHER_CONTROL(a) (0x00002018 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NVC87D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC87D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC87D_HEAD_SET_DITHER_CONTROL_BITS 5:4 +#define NVC87D_HEAD_SET_DITHER_CONTROL_BITS_TO_6_BITS (0x00000000) +#define NVC87D_HEAD_SET_DITHER_CONTROL_BITS_TO_8_BITS (0x00000001) +#define NVC87D_HEAD_SET_DITHER_CONTROL_BITS_TO_10_BITS (0x00000002) +#define NVC87D_HEAD_SET_DITHER_CONTROL_BITS_TO_12_BITS (0x00000003) +#define NVC87D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE 2:2 +#define NVC87D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_DISABLE (0x00000000) +#define NVC87D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_ENABLE (0x00000001) +#define NVC87D_HEAD_SET_DITHER_CONTROL_MODE 10:8 +#define NVC87D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NVC87D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NVC87D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NVC87D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NVC87D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NVC87D_HEAD_SET_DITHER_CONTROL_MODE_ROUND (0x00000005) +#define NVC87D_HEAD_SET_DITHER_CONTROL_PHASE 13:12 +#define NVC87D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x0000201C + (a)*0x00000400) +#define NVC87D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 0:0 +#define NVC87D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NVC87D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NVC87D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING 4:4 +#define NVC87D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_DISABLE (0x00000000) +#define NVC87D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_ENABLE (0x00000001) +#define NVC87D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 9:8 +#define NVC87D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NVC87D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NVC87D_HEAD_SET_DISPLAY_ID(a,b) (0x00002020 + (a)*0x00000400 + (b)*0x00000004) +#define NVC87D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NVC87D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00002028 + (a)*0x00000400) +#define NVC87D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NVC87D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NVC87D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC87D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC87D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR(a) (0x0000202C + (a)*0x00000400) +#define NVC87D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC87D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC87D_HEAD_SET_HEAD_USAGE_BOUNDS(a) (0x00002030 + (a)*0x00000400) +#define NVC87D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR 2:0 +#define NVC87D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_NONE (0x00000000) +#define NVC87D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W32_H32 (0x00000001) +#define NVC87D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W64_H64 (0x00000002) +#define NVC87D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W128_H128 (0x00000003) +#define NVC87D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W256_H256 (0x00000004) +#define NVC87D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED 4:4 +#define NVC87D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_FALSE (0x00000000) +#define NVC87D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_TRUE (0x00000001) +#define NVC87D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS 14:12 +#define NVC87D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVC87D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVC87D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED 8:8 +#define NVC87D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC87D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) +#define NVC87D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED 16:16 +#define NVC87D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000) +#define NVC87D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001) +#define NVC87D_HEAD_SET_STALL_LOCK(a) (0x00002034 + (a)*0x00000400) +#define NVC87D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NVC87D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NVC87D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NVC87D_HEAD_SET_STALL_LOCK_MODE 2:2 +#define NVC87D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NVC87D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN 8:4 +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC87D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC87D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 12:12 +#define NVC87D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NVC87D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NVC87D_HEAD_SET_STALL_LOCK_TEPOLARITY 14:14 +#define NVC87D_HEAD_SET_STALL_LOCK_TEPOLARITY_POSITIVE_TRUE (0x00000000) +#define NVC87D_HEAD_SET_STALL_LOCK_TEPOLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC87D_HEAD_SET_STALL_LOCK_UNSTALL_SYNC_ADVANCE 25:16 +#define NVC87D_HEAD_SET_LOCK_CHAIN(a) (0x00002044 + (a)*0x00000400) +#define NVC87D_HEAD_SET_LOCK_CHAIN_POSITION 3:0 +#define NVC87D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x00002048 + (a)*0x00000400) +#define NVC87D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NVC87D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NVC87D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x0000204C + (a)*0x00000400) +#define NVC87D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NVC87D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NVC87D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x00002058 + (a)*0x00000400) +#define NVC87D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NVC87D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NVC87D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x0000205C + (a)*0x00000400) +#define NVC87D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NVC87D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NVC87D_HEAD_SET_RASTER_SIZE(a) (0x00002064 + (a)*0x00000400) +#define NVC87D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NVC87D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NVC87D_HEAD_SET_RASTER_SYNC_END(a) (0x00002068 + (a)*0x00000400) +#define NVC87D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NVC87D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NVC87D_HEAD_SET_RASTER_BLANK_END(a) (0x0000206C + (a)*0x00000400) +#define NVC87D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NVC87D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NVC87D_HEAD_SET_RASTER_BLANK_START(a) (0x00002070 + (a)*0x00000400) +#define NVC87D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NVC87D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NVC87D_HEAD_SET_OVERSCAN_COLOR(a) (0x00002078 + (a)*0x00000400) +#define NVC87D_HEAD_SET_OVERSCAN_COLOR_RED_CR 9:0 +#define NVC87D_HEAD_SET_OVERSCAN_COLOR_GREEN_Y 19:10 +#define NVC87D_HEAD_SET_OVERSCAN_COLOR_BLUE_CB 29:20 +#define NVC87D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR(a) (0x0000207C + (a)*0x00000400) +#define NVC87D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_RED_CR 9:0 +#define NVC87D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_GREEN_Y 19:10 +#define NVC87D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_BLUE_CB 29:20 +#define NVC87D_HEAD_SET_HDMI_CTRL(a) (0x00002080 + (a)*0x00000400) +#define NVC87D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NVC87D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NVC87D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NVC87D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NVC87D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NVC87D_HEAD_SET_CONTEXT_DMA_CURSOR(a,b) (0x00002088 + (a)*0x00000400 + (b)*0x00000004) +#define NVC87D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0 +#define NVC87D_HEAD_SET_OFFSET_CURSOR(a,b) (0x00002090 + (a)*0x00000400 + (b)*0x00000004) +#define NVC87D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0 +#define NVC87D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x00002098 + (a)*0x00000400) +#define NVC87D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 0:0 +#define NVC87D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NVC87D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NVC87D_HEAD_SET_CONTROL_CURSOR(a) (0x0000209C + (a)*0x00000400) +#define NVC87D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NVC87D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NVC87D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NVC87D_HEAD_SET_CONTROL_CURSOR_FORMAT 7:0 +#define NVC87D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x000000E9) +#define NVC87D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x000000CF) +#define NVC87D_HEAD_SET_CONTROL_CURSOR_SIZE 9:8 +#define NVC87D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NVC87D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NVC87D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NVC87D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NVC87D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 19:12 +#define NVC87D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 27:20 +#define NVC87D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(a) (0x000020A0 + (a)*0x00000400) +#define NVC87D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_K1 7:0 +#define NVC87D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT 11:8 +#define NVC87D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC87D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC87D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT 15:12 +#define NVC87D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_ZERO (0x00000000) +#define NVC87D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC87D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC87D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE 16:16 +#define NVC87D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_BLEND (0x00000000) +#define NVC87D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_XOR (0x00000001) +#define NVC87D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS 20:20 +#define NVC87D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_DISABLE (0x00000000) +#define NVC87D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_ENABLE (0x00000001) +#define NVC87D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00002180 + (a)*0x00000400) +#define NVC87D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NVC87D_HEAD_SET_CRC_CONTROL(a) (0x00002184 + (a)*0x00000400) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 5:0 +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_0 (0x00000000) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_1 (0x00000001) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_2 (0x00000002) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_3 (0x00000003) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_4 (0x00000004) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_5 (0x00000005) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_6 (0x00000006) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_7 (0x00000007) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_8 (0x00000008) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_9 (0x00000009) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_10 (0x0000000A) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_11 (0x0000000B) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_12 (0x0000000C) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_13 (0x0000000D) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_14 (0x0000000E) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_15 (0x0000000F) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_16 (0x00000010) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_17 (0x00000011) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_18 (0x00000012) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_19 (0x00000013) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_20 (0x00000014) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_21 (0x00000015) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_22 (0x00000016) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_23 (0x00000017) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_24 (0x00000018) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_25 (0x00000019) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_26 (0x0000001A) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_27 (0x0000001B) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_28 (0x0000001C) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_29 (0x0000001D) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_30 (0x0000001E) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_31 (0x0000001F) +#define NVC87D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000020) +#define NVC87D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 8:8 +#define NVC87D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NVC87D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NVC87D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC 19:12 +#define NVC87D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_NONE (0x00000000) +#define NVC87D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF (0x00000030) +#define NVC87D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC87D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR__SIZE_1 8 +#define NVC87D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR0 (0x00000050) +#define NVC87D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR1 (0x00000051) +#define NVC87D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR2 (0x00000052) +#define NVC87D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR3 (0x00000053) +#define NVC87D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR4 (0x00000054) +#define NVC87D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR5 (0x00000055) +#define NVC87D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR6 (0x00000056) +#define NVC87D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR7 (0x00000057) +#define NVC87D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC 27:20 +#define NVC87D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_NONE (0x00000000) +#define NVC87D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SF (0x00000030) +#define NVC87D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC87D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR__SIZE_1 8 +#define NVC87D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR0 (0x00000050) +#define NVC87D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR1 (0x00000051) +#define NVC87D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR2 (0x00000052) +#define NVC87D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR3 (0x00000053) +#define NVC87D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR4 (0x00000054) +#define NVC87D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR5 (0x00000055) +#define NVC87D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR6 (0x00000056) +#define NVC87D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR7 (0x00000057) +#define NVC87D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 9:9 +#define NVC87D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NVC87D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NVC87D_HEAD_SET_PRESENT_CONTROL(a) (0x0000218C + (a)*0x00000400) +#define NVC87D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 0:0 +#define NVC87D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NVC87D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NVC87D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NVC87D_HEAD_SET_SW_SPARE_A(a) (0x00002194 + (a)*0x00000400) +#define NVC87D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NVC87D_HEAD_SET_SW_SPARE_B(a) (0x00002198 + (a)*0x00000400) +#define NVC87D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NVC87D_HEAD_SET_SW_SPARE_C(a) (0x0000219C + (a)*0x00000400) +#define NVC87D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NVC87D_HEAD_SET_SW_SPARE_D(a) (0x000021A0 + (a)*0x00000400) +#define NVC87D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NVC87D_HEAD_SET_DISPLAY_RATE(a) (0x000021A8 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DISPLAY_RATE_RUN_MODE 0:0 +#define NVC87D_HEAD_SET_DISPLAY_RATE_RUN_MODE_CONTINUOUS (0x00000000) +#define NVC87D_HEAD_SET_DISPLAY_RATE_RUN_MODE_ONE_SHOT (0x00000001) +#define NVC87D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_INTERVAL 25:4 +#define NVC87D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH 2:2 +#define NVC87D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_DISABLE (0x00000000) +#define NVC87D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_ENABLE (0x00000001) +#define NVC87D_HEAD_SET_CONTEXT_DMA_RG_REL_SEMAPHORE(a,b) (0x000021AC + (a)*0x00000400 + (b)*0x00000004) +#define NVC87D_HEAD_SET_CONTEXT_DMA_RG_REL_SEMAPHORE_HANDLE 31:0 +#define NVC87D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL(a,b) (0x000021CC + (a)*0x00000400 + (b)*0x00000004) +#define NVC87D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_OFFSET 7:0 +#define NVC87D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_PAYLOAD_SIZE 15:15 +#define NVC87D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_32BIT (0x00000000) +#define NVC87D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_64BIT (0x00000001) +#define NVC87D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_REL_MODE 14:14 +#define NVC87D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_REL_MODE_WRITE (0x00000000) +#define NVC87D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_REL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC87D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RUN_MODE 10:10 +#define NVC87D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RUN_MODE_ONE_TIME (0x00000000) +#define NVC87D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RUN_MODE_CONTINUOUS (0x00000001) +#define NVC87D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RASTER_LINE 30:16 +#define NVC87D_HEAD_SET_RG_REL_SEMAPHORE_VALUE(a,b) (0x000021EC + (a)*0x00000400 + (b)*0x00000004) +#define NVC87D_HEAD_SET_RG_REL_SEMAPHORE_VALUE_VALUE 31:0 +#define NVC87D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE(a) (0x00002214 + (a)*0x00000400) +#define NVC87D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE_DATA 9:0 +#define NVC87D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE_INDEX 19:12 +#define NVC87D_HEAD_SET_MIN_FRAME_IDLE(a) (0x00002218 + (a)*0x00000400) +#define NVC87D_HEAD_SET_MIN_FRAME_IDLE_LEADING_RASTER_LINES 14:0 +#define NVC87D_HEAD_SET_MIN_FRAME_IDLE_TRAILING_RASTER_LINES 30:16 +#define NVC87D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED(a) (0x00002220 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED_ALPHA 7:0 +#define NVC87D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED_RED 31:16 +#define NVC87D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE(a) (0x00002224 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE_GREEN 15:0 +#define NVC87D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE_BLUE 31:16 +#define NVC87D_HEAD_SET_CURSOR_COLOR_NORM_SCALE(a) (0x00002228 + (a)*0x00000400) +#define NVC87D_HEAD_SET_CURSOR_COLOR_NORM_SCALE_VALUE 15:0 +#define NVC87D_HEAD_SET_XOR_BLEND_FACTOR(a) (0x0000222C + (a)*0x00000400) +#define NVC87D_HEAD_SET_XOR_BLEND_FACTOR_LOG2PEAK_LUMINANCE 3:0 +#define NVC87D_HEAD_SET_XOR_BLEND_FACTOR_S1 16:4 +#define NVC87D_HEAD_SET_XOR_BLEND_FACTOR_S2 30:18 +#define NVC87D_HEAD_SET_CLAMP_RANGE_GREEN(a) (0x00002238 + (a)*0x00000400) +#define NVC87D_HEAD_SET_CLAMP_RANGE_GREEN_LOW 11:0 +#define NVC87D_HEAD_SET_CLAMP_RANGE_GREEN_HIGH 27:16 +#define NVC87D_HEAD_SET_CLAMP_RANGE_RED_BLUE(a) (0x0000223C + (a)*0x00000400) +#define NVC87D_HEAD_SET_CLAMP_RANGE_RED_BLUE_LOW 11:0 +#define NVC87D_HEAD_SET_CLAMP_RANGE_RED_BLUE_HIGH 27:16 +#define NVC87D_HEAD_SET_OCSC0CONTROL(a) (0x00002240 + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC0CONTROL_ENABLE 0:0 +#define NVC87D_HEAD_SET_OCSC0CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC87D_HEAD_SET_OCSC0CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C00(a) (0x00002244 + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C00_VALUE 20:0 +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C01(a) (0x00002248 + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C01_VALUE 20:0 +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C02(a) (0x0000224C + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C02_VALUE 20:0 +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C03(a) (0x00002250 + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C03_VALUE 20:0 +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C10(a) (0x00002254 + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C10_VALUE 20:0 +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C11(a) (0x00002258 + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C11_VALUE 20:0 +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C12(a) (0x0000225C + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C12_VALUE 20:0 +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C13(a) (0x00002260 + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C13_VALUE 20:0 +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C20(a) (0x00002264 + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C20_VALUE 20:0 +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C21(a) (0x00002268 + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C21_VALUE 20:0 +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C22(a) (0x0000226C + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C22_VALUE 20:0 +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C23(a) (0x00002270 + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC0COEFFICIENT_C23_VALUE 20:0 +#define NVC87D_HEAD_SET_OLUT_CONTROL(a) (0x00002280 + (a)*0x00000400) +#define NVC87D_HEAD_SET_OLUT_CONTROL_INTERPOLATE 0:0 +#define NVC87D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC87D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC87D_HEAD_SET_OLUT_CONTROL_MIRROR 1:1 +#define NVC87D_HEAD_SET_OLUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC87D_HEAD_SET_OLUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC87D_HEAD_SET_OLUT_CONTROL_MODE 3:2 +#define NVC87D_HEAD_SET_OLUT_CONTROL_MODE_SEGMENTED (0x00000000) +#define NVC87D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT8 (0x00000001) +#define NVC87D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT10 (0x00000002) +#define NVC87D_HEAD_SET_OLUT_CONTROL_SIZE 18:8 +#define NVC87D_HEAD_SET_OLUT_FP_NORM_SCALE(a) (0x00002284 + (a)*0x00000400) +#define NVC87D_HEAD_SET_OLUT_FP_NORM_SCALE_VALUE 31:0 +#define NVC87D_HEAD_SET_CONTEXT_DMA_OLUT(a) (0x00002288 + (a)*0x00000400) +#define NVC87D_HEAD_SET_CONTEXT_DMA_OLUT_HANDLE 31:0 +#define NVC87D_HEAD_SET_OFFSET_OLUT(a) (0x0000228C + (a)*0x00000400) +#define NVC87D_HEAD_SET_OFFSET_OLUT_ORIGIN 31:0 +#define NVC87D_HEAD_SET_OCSC1CONTROL(a) (0x0000229C + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC1CONTROL_ENABLE 0:0 +#define NVC87D_HEAD_SET_OCSC1CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC87D_HEAD_SET_OCSC1CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C00(a) (0x000022A0 + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C00_VALUE 20:0 +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C01(a) (0x000022A4 + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C01_VALUE 20:0 +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C02(a) (0x000022A8 + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C02_VALUE 20:0 +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C03(a) (0x000022AC + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C03_VALUE 20:0 +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C10(a) (0x000022B0 + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C10_VALUE 20:0 +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C11(a) (0x000022B4 + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C11_VALUE 20:0 +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C12(a) (0x000022B8 + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C12_VALUE 20:0 +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C13(a) (0x000022BC + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C13_VALUE 20:0 +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C20(a) (0x000022C0 + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C20_VALUE 20:0 +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C21(a) (0x000022C4 + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C21_VALUE 20:0 +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C22(a) (0x000022C8 + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C22_VALUE 20:0 +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C23(a) (0x000022CC + (a)*0x00000400) +#define NVC87D_HEAD_SET_OCSC1COEFFICIENT_C23_VALUE 20:0 +#define NVC87D_HEAD_SET_TILE_POSITION(a) (0x000022D0 + (a)*0x00000400) +#define NVC87D_HEAD_SET_TILE_POSITION_X 2:0 +#define NVC87D_HEAD_SET_TILE_POSITION_Y 6:4 +#define NVC87D_HEAD_SET_DSC_CONTROL(a) (0x000022D4 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_CONTROL_ENABLE 0:0 +#define NVC87D_HEAD_SET_DSC_CONTROL_ENABLE_FALSE (0x00000000) +#define NVC87D_HEAD_SET_DSC_CONTROL_ENABLE_TRUE (0x00000001) +#define NVC87D_HEAD_SET_DSC_CONTROL_MODE 2:1 +#define NVC87D_HEAD_SET_DSC_CONTROL_MODE_SINGLE (0x00000000) +#define NVC87D_HEAD_SET_DSC_CONTROL_MODE_DUAL (0x00000001) +#define NVC87D_HEAD_SET_DSC_CONTROL_MODE_QUAD (0x00000002) +#define NVC87D_HEAD_SET_DSC_CONTROL_AUTO_RESET 3:3 +#define NVC87D_HEAD_SET_DSC_CONTROL_AUTO_RESET_DISABLE (0x00000000) +#define NVC87D_HEAD_SET_DSC_CONTROL_AUTO_RESET_ENABLE (0x00000001) +#define NVC87D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION 4:4 +#define NVC87D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION_DISABLE (0x00000000) +#define NVC87D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION_ENABLE (0x00000001) +#define NVC87D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET 5:5 +#define NVC87D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET_FALSE (0x00000000) +#define NVC87D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET_TRUE (0x00000001) +#define NVC87D_HEAD_SET_DSC_CONTROL_FLATNESS_DET_THRESH 15:6 +#define NVC87D_HEAD_SET_DSC_PPS_CONTROL(a) (0x000022D8 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_CONTROL_ENABLE 0:0 +#define NVC87D_HEAD_SET_DSC_PPS_CONTROL_ENABLE_FALSE (0x00000000) +#define NVC87D_HEAD_SET_DSC_PPS_CONTROL_ENABLE_TRUE (0x00000001) +#define NVC87D_HEAD_SET_DSC_PPS_CONTROL_LOCATION 1:1 +#define NVC87D_HEAD_SET_DSC_PPS_CONTROL_LOCATION_VSYNC (0x00000000) +#define NVC87D_HEAD_SET_DSC_PPS_CONTROL_LOCATION_VBLANK (0x00000001) +#define NVC87D_HEAD_SET_DSC_PPS_CONTROL_SIZE 9:2 +#define NVC87D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY 10:10 +#define NVC87D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY_EVERY_FRAME (0x00000000) +#define NVC87D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY_ONCE (0x00000001) +#define NVC87D_HEAD_SET_DSC_PPS_HEAD(a) (0x000022DC + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_HEAD_BYTE0 7:0 +#define NVC87D_HEAD_SET_DSC_PPS_HEAD_BYTE1 15:8 +#define NVC87D_HEAD_SET_DSC_PPS_HEAD_BYTE2 23:16 +#define NVC87D_HEAD_SET_DSC_PPS_HEAD_BYTE3 31:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA0(a) (0x000022E0 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA0_DSC_VERSION_MINOR 3:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA0_DSC_VERSION_MAJOR 7:4 +#define NVC87D_HEAD_SET_DSC_PPS_DATA0_PPS_IDENTIFIER 15:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA0_RESERVED 23:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA0_LINEBUF_DEPTH 27:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA0_BITS_PER_COMPONENT 31:28 +#define NVC87D_HEAD_SET_DSC_PPS_DATA1(a) (0x000022E4 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA1_BITS_PER_PIXEL_HIGH 1:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA1_VBR_ENABLE 2:2 +#define NVC87D_HEAD_SET_DSC_PPS_DATA1_SIMPLE422 3:3 +#define NVC87D_HEAD_SET_DSC_PPS_DATA1_CONVERT_RGB 4:4 +#define NVC87D_HEAD_SET_DSC_PPS_DATA1_BLOCK_PRED_ENABLE 5:5 +#define NVC87D_HEAD_SET_DSC_PPS_DATA1_RESERVED 7:6 +#define NVC87D_HEAD_SET_DSC_PPS_DATA1_BITS_PER_PIXEL_LOW 15:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA1_PIC_HEIGHT_HIGH 23:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA1_PIC_HEIGHT_LOW 31:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA2(a) (0x000022E8 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA2_PIC_WIDTH_HIGH 7:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA2_PIC_WIDTH_LOW 15:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA2_SLICE_HEIGHT_HIGH 23:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA2_SLICE_HEIGHT_LOW 31:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA3(a) (0x000022EC + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA3_SLICE_WIDTH_HIGH 7:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA3_SLICE_WIDTH_LOW 15:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA3_CHUNK_SIZE_HIGH 23:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA3_CHUNK_SIZE_LOW 31:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA4(a) (0x000022F0 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA4_INITIAL_XMIT_DELAY_HIGH 1:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA4_RESERVED 7:2 +#define NVC87D_HEAD_SET_DSC_PPS_DATA4_INITIAL_XMIT_DELAY_LOW 15:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA4_INITIAL_DEC_DELAY_HIGH 23:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA4_INITIAL_DEC_DELAY_LOW 31:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA5(a) (0x000022F4 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA5_RESERVED0 7:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA5_INITIAL_SCALE_VALUE 13:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA5_RESERVED1 15:14 +#define NVC87D_HEAD_SET_DSC_PPS_DATA5_SCALE_INCREMENT_INTERVAL_HIGH 23:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA5_SCALE_INCREMENT_INTERVAL_LOW 31:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA6(a) (0x000022F8 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA6_SCALE_DECREMENT_INTERVAL_HIGH 3:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA6_RESERVED0 7:4 +#define NVC87D_HEAD_SET_DSC_PPS_DATA6_SCALE_DECREMENT_INTERVAL_LOW 15:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA6_RESERVED1 23:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA6_FIRST_LINE_BPG_OFFSET 28:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA6_RESERVED2 31:29 +#define NVC87D_HEAD_SET_DSC_PPS_DATA7(a) (0x000022FC + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA7_NFL_BPG_OFFSET_HIGH 7:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA7_NFL_BPG_OFFSET_LOW 15:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA7_SLICE_BPG_OFFSET_HIGH 23:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA7_SLICE_BPG_OFFSET_LOW 31:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA8(a) (0x00002300 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA8_INITIAL_OFFSET_HIGH 7:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA8_INITIAL_OFFSET_LOW 15:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA8_FINAL_OFFSET_HIGH 23:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA8_FINAL_OFFSET_LOW 31:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA9(a) (0x00002304 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA9_FLATNESS_MIN_QP 4:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA9_RESERVED0 7:5 +#define NVC87D_HEAD_SET_DSC_PPS_DATA9_FLATNESS_MAX_QP 12:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA9_RESERVED1 15:13 +#define NVC87D_HEAD_SET_DSC_PPS_DATA9_RC_MODEL_SIZE_HIGH 23:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA9_RC_MODEL_SIZE_LOW 31:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA10(a) (0x00002308 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA10_RC_EDGE_FACTOR 3:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA10_RESERVED0 7:4 +#define NVC87D_HEAD_SET_DSC_PPS_DATA10_RC_QUANT_INCR_LIMIT0 12:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA10_RESERVED1 15:13 +#define NVC87D_HEAD_SET_DSC_PPS_DATA10_RC_QUANT_INCR_LIMIT1 20:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA10_RESERVED2 23:21 +#define NVC87D_HEAD_SET_DSC_PPS_DATA10_RC_TGT_OFFSET_LO 27:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA10_RC_TGT_OFFSET_HI 31:28 +#define NVC87D_HEAD_SET_DSC_PPS_DATA11(a) (0x0000230C + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH0 7:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH1 15:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH2 23:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH3 31:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA12(a) (0x00002310 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH4 7:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH5 15:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH6 23:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH7 31:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA13(a) (0x00002314 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH8 7:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH9 15:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH10 23:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH11 31:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA14(a) (0x00002318 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA14_RC_BUF_THRESH12 7:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA14_RC_BUF_THRESH13 15:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MAX_QP_HIGH0 18:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MIN_QP0 23:19 +#define NVC87D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_BPG_OFFSET0 29:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MAX_QP_LOW0 31:30 +#define NVC87D_HEAD_SET_DSC_PPS_DATA15(a) (0x0000231C + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_HIGH1 2:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MIN_QP1 7:3 +#define NVC87D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_BPG_OFFSET1 13:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_LOW1 15:14 +#define NVC87D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_HIGH2 18:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MIN_QP2 23:19 +#define NVC87D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_BPG_OFFSET2 29:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_LOW2 31:30 +#define NVC87D_HEAD_SET_DSC_PPS_DATA16(a) (0x00002320 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_HIGH3 2:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MIN_QP3 7:3 +#define NVC87D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_BPG_OFFSET3 13:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_LOW3 15:14 +#define NVC87D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_HIGH4 18:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MIN_QP4 23:19 +#define NVC87D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_BPG_OFFSET4 29:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_LOW4 31:30 +#define NVC87D_HEAD_SET_DSC_PPS_DATA17(a) (0x00002324 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_HIGH5 2:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MIN_QP5 7:3 +#define NVC87D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_BPG_OFFSET5 13:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_LOW5 15:14 +#define NVC87D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_HIGH6 18:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MIN_QP6 23:19 +#define NVC87D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_BPG_OFFSET6 29:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_LOW6 31:30 +#define NVC87D_HEAD_SET_DSC_PPS_DATA18(a) (0x00002328 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_HIGH7 2:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MIN_QP7 7:3 +#define NVC87D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_BPG_OFFSET7 13:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_LOW7 15:14 +#define NVC87D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_HIGH8 18:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MIN_QP8 23:19 +#define NVC87D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_BPG_OFFSET8 29:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_LOW8 31:30 +#define NVC87D_HEAD_SET_DSC_PPS_DATA19(a) (0x0000232C + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_HIGH9 2:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MIN_QP9 7:3 +#define NVC87D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_BPG_OFFSET9 13:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_LOW9 15:14 +#define NVC87D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_HIGH10 18:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MIN_QP10 23:19 +#define NVC87D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_BPG_OFFSET10 29:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_LOW10 31:30 +#define NVC87D_HEAD_SET_DSC_PPS_DATA20(a) (0x00002330 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_HIGH11 2:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MIN_QP11 7:3 +#define NVC87D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_BPG_OFFSET11 13:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_LOW11 15:14 +#define NVC87D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_HIGH12 18:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MIN_QP12 23:19 +#define NVC87D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_BPG_OFFSET12 29:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_LOW12 31:30 +#define NVC87D_HEAD_SET_DSC_PPS_DATA21(a) (0x00002334 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_HIGH13 2:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MIN_QP13 7:3 +#define NVC87D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_BPG_OFFSET13 13:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_LOW13 15:14 +#define NVC87D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_HIGH14 18:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MIN_QP14 23:19 +#define NVC87D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_BPG_OFFSET14 29:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_LOW14 31:30 +#define NVC87D_HEAD_SET_DSC_PPS_DATA22(a) (0x00002338 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA22_NATIVE422 0:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA22_NATIVE420 1:1 +#define NVC87D_HEAD_SET_DSC_PPS_DATA22_RESERVED0 7:2 +#define NVC87D_HEAD_SET_DSC_PPS_DATA22_SECOND_LINE_BPG_OFFSET 12:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA22_RESERVED1 15:13 +#define NVC87D_HEAD_SET_DSC_PPS_DATA22_NSL_BPG_OFFSET_HIGH 23:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA22_NSL_BPG_OFFSETLOW 31:24 +#define NVC87D_HEAD_SET_DSC_PPS_DATA23(a) (0x0000233C + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA23_SECOND_LINE_OFFSET_ADJ_HIGH 7:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA23_SECOND_LINE_OFFSET_ADJ_LOW 15:8 +#define NVC87D_HEAD_SET_DSC_PPS_DATA23_RESERVED 31:16 +#define NVC87D_HEAD_SET_DSC_PPS_DATA24(a) (0x00002340 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA24_RESERVED 31:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA25(a) (0x00002344 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA25_RESERVED 31:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA26(a) (0x00002348 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA26_RESERVED 31:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA27(a) (0x0000234C + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA27_RESERVED 31:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA28(a) (0x00002350 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA28_RESERVED 31:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA29(a) (0x00002354 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA29_RESERVED 31:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA30(a) (0x00002358 + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA30_RESERVED 31:0 +#define NVC87D_HEAD_SET_DSC_PPS_DATA31(a) (0x0000235C + (a)*0x00000400) +#define NVC87D_HEAD_SET_DSC_PPS_DATA31_RESERVED 31:0 +#define NVC87D_HEAD_SET_RG_MERGE(a) (0x00002360 + (a)*0x00000400) +#define NVC87D_HEAD_SET_RG_MERGE_MODE 1:0 +#define NVC87D_HEAD_SET_RG_MERGE_MODE_DISABLE (0x00000000) +#define NVC87D_HEAD_SET_RG_MERGE_MODE_SETUP (0x00000001) +#define NVC87D_HEAD_SET_RG_MERGE_MODE_MASTER (0x00000002) +#define NVC87D_HEAD_SET_RG_MERGE_MODE_SLAVE (0x00000003) +#define NVC87D_HEAD_SET_RASTER_HBLANK_DELAY(a) (0x00002364 + (a)*0x00000400) +#define NVC87D_HEAD_SET_RASTER_HBLANK_DELAY_BLANK_START 15:0 +#define NVC87D_HEAD_SET_RASTER_HBLANK_DELAY_BLANK_END 31:16 +#define NVC87D_HEAD_SET_HDMI_DSC_HCACTIVE(a) (0x00002368 + (a)*0x00000400) +#define NVC87D_HEAD_SET_HDMI_DSC_HCACTIVE_BYTES 15:0 +#define NVC87D_HEAD_SET_HDMI_DSC_HCACTIVE_TRI_BYTES 31:16 +#define NVC87D_HEAD_SET_HDMI_DSC_HCBLANK(a) (0x0000236C + (a)*0x00000400) +#define NVC87D_HEAD_SET_HDMI_DSC_HCBLANK_WIDTH 15:0 +#define NVC87D_HEAD_SW_RESERVED(a,b) (0x00002370 + (a)*0x00000400 + (b)*0x00000004) +#define NVC87D_HEAD_SW_RESERVED_VALUE 31:0 +#define NVC87D_HEAD_SET_RG_REL_SEMAPHORE_VALUE_HI(a,b) (0x00002380 + (a)*0x00000400 + (b)*0x00000004) +#define NVC87D_HEAD_SET_RG_REL_SEMAPHORE_VALUE_HI_VALUE 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC87d_h diff --git a/src/common/sdk/nvidia/inc/class/clc87e.h b/src/common/sdk/nvidia/inc/class/clc87e.h new file mode 100644 index 0000000..61c58f1 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc87e.h @@ -0,0 +1,697 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clC87e_h_ +#define _clC87e_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC87E_WINDOW_CHANNEL_DMA (0x0000C87E) + +// dma opcode instructions +#define NVC87E_DMA +#define NVC87E_DMA_OPCODE 31:29 +#define NVC87E_DMA_OPCODE_METHOD 0x00000000 +#define NVC87E_DMA_OPCODE_JUMP 0x00000001 +#define NVC87E_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC87E_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC87E_DMA_METHOD_COUNT 27:18 +#define NVC87E_DMA_METHOD_OFFSET 13:2 +#define NVC87E_DMA_DATA 31:0 +#define NVC87E_DMA_DATA_NOP 0x00000000 +#define NVC87E_DMA_JUMP_OFFSET 11:2 +#define NVC87E_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NVC87E_PUT (0x00000000) +#define NVC87E_PUT_PTR 9:0 +#define NVC87E_GET (0x00000004) +#define NVC87E_GET_PTR 9:0 +#define NVC87E_UPDATE (0x00000200) +#define NVC87E_UPDATE_RELEASE_ELV 0:0 +#define NVC87E_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC87E_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC87E_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC87E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC87E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC87E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC87E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC87E_UPDATE_INTERLOCK_WITH_WIN_IMM 12:12 +#define NVC87E_UPDATE_INTERLOCK_WITH_WIN_IMM_DISABLE (0x00000000) +#define NVC87E_UPDATE_INTERLOCK_WITH_WIN_IMM_ENABLE (0x00000001) +#define NVC87E_SET_SEMAPHORE_ACQUIRE_HI (0x00000204) +#define NVC87E_SET_SEMAPHORE_ACQUIRE_HI_VALUE 31:0 +#define NVC87E_GET_LINE (0x00000208) +#define NVC87E_GET_LINE_LINE 15:0 +#define NVC87E_SET_SEMAPHORE_CONTROL (0x0000020C) +#define NVC87E_SET_SEMAPHORE_CONTROL_OFFSET 7:0 +#define NVC87E_SET_SEMAPHORE_CONTROL_SKIP_ACQ 11:11 +#define NVC87E_SET_SEMAPHORE_CONTROL_SKIP_ACQ_FALSE (0x00000000) +#define NVC87E_SET_SEMAPHORE_CONTROL_SKIP_ACQ_TRUE (0x00000001) +#define NVC87E_SET_SEMAPHORE_CONTROL_PAYLOAD_SIZE 15:15 +#define NVC87E_SET_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_32BIT (0x00000000) +#define NVC87E_SET_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_64BIT (0x00000001) +#define NVC87E_SET_SEMAPHORE_CONTROL_ACQ_MODE 13:12 +#define NVC87E_SET_SEMAPHORE_CONTROL_ACQ_MODE_EQ (0x00000000) +#define NVC87E_SET_SEMAPHORE_CONTROL_ACQ_MODE_CGEQ (0x00000001) +#define NVC87E_SET_SEMAPHORE_CONTROL_ACQ_MODE_STRICT_GEQ (0x00000002) +#define NVC87E_SET_SEMAPHORE_CONTROL_REL_MODE 14:14 +#define NVC87E_SET_SEMAPHORE_CONTROL_REL_MODE_WRITE (0x00000000) +#define NVC87E_SET_SEMAPHORE_CONTROL_REL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC87E_SET_SEMAPHORE_ACQUIRE (0x00000210) +#define NVC87E_SET_SEMAPHORE_ACQUIRE_VALUE 31:0 +#define NVC87E_SET_SEMAPHORE_RELEASE (0x00000214) +#define NVC87E_SET_SEMAPHORE_RELEASE_VALUE 31:0 +#define NVC87E_SET_CONTEXT_DMA_SEMAPHORE (0x00000218) +#define NVC87E_SET_CONTEXT_DMA_SEMAPHORE_HANDLE 31:0 +#define NVC87E_SET_CONTEXT_DMA_NOTIFIER (0x0000021C) +#define NVC87E_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NVC87E_SET_NOTIFIER_CONTROL (0x00000220) +#define NVC87E_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVC87E_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVC87E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC87E_SET_NOTIFIER_CONTROL_OFFSET 11:4 +#define NVC87E_SET_SIZE (0x00000224) +#define NVC87E_SET_SIZE_WIDTH 15:0 +#define NVC87E_SET_SIZE_HEIGHT 31:16 +#define NVC87E_SET_STORAGE (0x00000228) +#define NVC87E_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NVC87E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NVC87E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NVC87E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC87E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC87E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC87E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC87E_SET_PARAMS (0x0000022C) +#define NVC87E_SET_PARAMS_FORMAT 7:0 +#define NVC87E_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NVC87E_SET_PARAMS_FORMAT_R4G4B4A4 (0x0000002F) +#define NVC87E_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NVC87E_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NVC87E_SET_PARAMS_FORMAT_R5G5B5A1 (0x0000002E) +#define NVC87E_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NVC87E_SET_PARAMS_FORMAT_X8R8G8B8 (0x000000E6) +#define NVC87E_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NVC87E_SET_PARAMS_FORMAT_X8B8G8R8 (0x000000F9) +#define NVC87E_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NVC87E_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NVC87E_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NVC87E_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NVC87E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NVC87E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422 (0x00000028) +#define NVC87E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422 (0x00000029) +#define NVC87E_SET_PARAMS_FORMAT_Y8___U8V8_N444 (0x00000035) +#define NVC87E_SET_PARAMS_FORMAT_Y8___U8V8_N422 (0x00000036) +#define NVC87E_SET_PARAMS_FORMAT_Y8___V8U8_N420 (0x00000038) +#define NVC87E_SET_PARAMS_FORMAT_Y8___U8___V8_N444 (0x0000003A) +#define NVC87E_SET_PARAMS_FORMAT_Y8___U8___V8_N420 (0x0000003B) +#define NVC87E_SET_PARAMS_FORMAT_Y10___U10V10_N444 (0x00000055) +#define NVC87E_SET_PARAMS_FORMAT_Y10___U10V10_N422 (0x00000056) +#define NVC87E_SET_PARAMS_FORMAT_Y10___V10U10_N420 (0x00000058) +#define NVC87E_SET_PARAMS_FORMAT_Y12___U12V12_N444 (0x00000075) +#define NVC87E_SET_PARAMS_FORMAT_Y12___U12V12_N422 (0x00000076) +#define NVC87E_SET_PARAMS_FORMAT_Y12___V12U12_N420 (0x00000078) +#define NVC87E_SET_PARAMS_CLAMP_BEFORE_BLEND 18:18 +#define NVC87E_SET_PARAMS_CLAMP_BEFORE_BLEND_DISABLE (0x00000000) +#define NVC87E_SET_PARAMS_CLAMP_BEFORE_BLEND_ENABLE (0x00000001) +#define NVC87E_SET_PARAMS_SWAP_UV 19:19 +#define NVC87E_SET_PARAMS_SWAP_UV_DISABLE (0x00000000) +#define NVC87E_SET_PARAMS_SWAP_UV_ENABLE (0x00000001) +#define NVC87E_SET_PARAMS_FMT_ROUNDING_MODE 22:22 +#define NVC87E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_TO_NEAREST (0x00000000) +#define NVC87E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_DOWN (0x00000001) +#define NVC87E_SET_PLANAR_STORAGE(b) (0x00000230 + (b)*0x00000004) +#define NVC87E_SET_PLANAR_STORAGE_PITCH 12:0 +#define NVC87E_SET_SEMAPHORE_RELEASE_HI (0x0000023C) +#define NVC87E_SET_SEMAPHORE_RELEASE_HI_VALUE 31:0 +#define NVC87E_SET_CONTEXT_DMA_ISO(b) (0x00000240 + (b)*0x00000004) +#define NVC87E_SET_CONTEXT_DMA_ISO_HANDLE 31:0 +#define NVC87E_SET_OFFSET(b) (0x00000260 + (b)*0x00000004) +#define NVC87E_SET_OFFSET_ORIGIN 31:0 +#define NVC87E_SET_POINT_IN(b) (0x00000290 + (b)*0x00000004) +#define NVC87E_SET_POINT_IN_X 15:0 +#define NVC87E_SET_POINT_IN_Y 31:16 +#define NVC87E_SET_SIZE_IN (0x00000298) +#define NVC87E_SET_SIZE_IN_WIDTH 15:0 +#define NVC87E_SET_SIZE_IN_HEIGHT 31:16 +#define NVC87E_SET_SIZE_OUT (0x000002A4) +#define NVC87E_SET_SIZE_OUT_WIDTH 15:0 +#define NVC87E_SET_SIZE_OUT_HEIGHT 31:16 +#define NVC87E_SET_CONTROL_INPUT_SCALER (0x000002A8) +#define NVC87E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVC87E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVC87E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVC87E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVC87E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVC87E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVC87E_SET_INPUT_SCALER_COEFF_VALUE (0x000002AC) +#define NVC87E_SET_INPUT_SCALER_COEFF_VALUE_DATA 9:0 +#define NVC87E_SET_INPUT_SCALER_COEFF_VALUE_INDEX 19:12 +#define NVC87E_SET_COMPOSITION_CONTROL (0x000002EC) +#define NVC87E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT 1:0 +#define NVC87E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DISABLE (0x00000000) +#define NVC87E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_SRC (0x00000001) +#define NVC87E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DST (0x00000002) +#define NVC87E_SET_COMPOSITION_CONTROL_DEPTH 11:4 +#define NVC87E_SET_COMPOSITION_CONTROL_BYPASS 16:16 +#define NVC87E_SET_COMPOSITION_CONTROL_BYPASS_DISABLE (0x00000000) +#define NVC87E_SET_COMPOSITION_CONTROL_BYPASS_ENABLE (0x00000001) +#define NVC87E_SET_COMPOSITION_CONSTANT_ALPHA (0x000002F0) +#define NVC87E_SET_COMPOSITION_CONSTANT_ALPHA_K1 7:0 +#define NVC87E_SET_COMPOSITION_CONSTANT_ALPHA_K2 15:8 +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT (0x000002F4) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT 3:0 +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT 7:4 +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT 11:8 +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT 15:12 +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT 19:16 +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT 23:20 +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT 27:24 +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT 31:28 +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC87E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC87E_SET_KEY_ALPHA (0x000002F8) +#define NVC87E_SET_KEY_ALPHA_MIN 15:0 +#define NVC87E_SET_KEY_ALPHA_MAX 31:16 +#define NVC87E_SET_KEY_RED_CR (0x000002FC) +#define NVC87E_SET_KEY_RED_CR_MIN 15:0 +#define NVC87E_SET_KEY_RED_CR_MAX 31:16 +#define NVC87E_SET_KEY_GREEN_Y (0x00000300) +#define NVC87E_SET_KEY_GREEN_Y_MIN 15:0 +#define NVC87E_SET_KEY_GREEN_Y_MAX 31:16 +#define NVC87E_SET_KEY_BLUE_CB (0x00000304) +#define NVC87E_SET_KEY_BLUE_CB_MIN 15:0 +#define NVC87E_SET_KEY_BLUE_CB_MAX 31:16 +#define NVC87E_SET_PRESENT_CONTROL (0x00000308) +#define NVC87E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NVC87E_SET_PRESENT_CONTROL_BEGIN_MODE 6:4 +#define NVC87E_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000) +#define NVC87E_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001) +#define NVC87E_SET_PRESENT_CONTROL_TIMESTAMP_MODE 8:8 +#define NVC87E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000) +#define NVC87E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001) +#define NVC87E_SET_PRESENT_CONTROL_STEREO_MODE 13:12 +#define NVC87E_SET_PRESENT_CONTROL_STEREO_MODE_MONO (0x00000000) +#define NVC87E_SET_PRESENT_CONTROL_STEREO_MODE_PAIR_FLIP (0x00000001) +#define NVC87E_SET_PRESENT_CONTROL_STEREO_MODE_AT_ANY_FRAME (0x00000002) +#define NVC87E_SET_ACQ_SEMAPHORE_VALUE_HI (0x0000030C) +#define NVC87E_SET_ACQ_SEMAPHORE_VALUE_HI_VALUE 31:0 +#define NVC87E_SET_ACQ_SEMAPHORE_CONTROL (0x00000330) +#define NVC87E_SET_ACQ_SEMAPHORE_CONTROL_OFFSET 7:0 +#define NVC87E_SET_ACQ_SEMAPHORE_CONTROL_PAYLOAD_SIZE 15:15 +#define NVC87E_SET_ACQ_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_32BIT (0x00000000) +#define NVC87E_SET_ACQ_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_64BIT (0x00000001) +#define NVC87E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE 13:12 +#define NVC87E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE_EQ (0x00000000) +#define NVC87E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE_CGEQ (0x00000001) +#define NVC87E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE_STRICT_GEQ (0x00000002) +#define NVC87E_SET_ACQ_SEMAPHORE_VALUE (0x00000334) +#define NVC87E_SET_ACQ_SEMAPHORE_VALUE_VALUE 31:0 +#define NVC87E_SET_CONTEXT_DMA_ACQ_SEMAPHORE (0x00000338) +#define NVC87E_SET_CONTEXT_DMA_ACQ_SEMAPHORE_HANDLE 31:0 +#define NVC87E_SET_SCAN_DIRECTION (0x0000033C) +#define NVC87E_SET_SCAN_DIRECTION_HORIZONTAL_DIRECTION 0:0 +#define NVC87E_SET_SCAN_DIRECTION_HORIZONTAL_DIRECTION_FROM_LEFT (0x00000000) +#define NVC87E_SET_SCAN_DIRECTION_HORIZONTAL_DIRECTION_FROM_RIGHT (0x00000001) +#define NVC87E_SET_SCAN_DIRECTION_VERTICAL_DIRECTION 1:1 +#define NVC87E_SET_SCAN_DIRECTION_VERTICAL_DIRECTION_FROM_TOP (0x00000000) +#define NVC87E_SET_SCAN_DIRECTION_VERTICAL_DIRECTION_FROM_BOTTOM (0x00000001) +#define NVC87E_SET_SCAN_DIRECTION_COLUMN_ORDER 2:2 +#define NVC87E_SET_SCAN_DIRECTION_COLUMN_ORDER_FALSE (0x00000000) +#define NVC87E_SET_SCAN_DIRECTION_COLUMN_ORDER_TRUE (0x00000001) +#define NVC87E_SET_TIMESTAMP_ORIGIN_LO (0x00000340) +#define NVC87E_SET_TIMESTAMP_ORIGIN_LO_TIMESTAMP_LO 31:0 +#define NVC87E_SET_TIMESTAMP_ORIGIN_HI (0x00000344) +#define NVC87E_SET_TIMESTAMP_ORIGIN_HI_TIMESTAMP_HI 31:0 +#define NVC87E_SET_UPDATE_TIMESTAMP_LO (0x00000348) +#define NVC87E_SET_UPDATE_TIMESTAMP_LO_TIMESTAMP_LO 31:0 +#define NVC87E_SET_UPDATE_TIMESTAMP_HI (0x0000034C) +#define NVC87E_SET_UPDATE_TIMESTAMP_HI_TIMESTAMP_HI 31:0 +#define NVC87E_SET_INTERLOCK_FLAGS (0x00000370) +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 0:0 +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+1):((i)+1) +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 1:1 +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 2:2 +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 3:3 +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 4:4 +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 5:5 +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 6:6 +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 7:7 +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 8:8 +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC87E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS (0x00000374) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC87E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) +#define NVC87E_SET_EXT_PACKET_CONTROL (0x00000398) +#define NVC87E_SET_EXT_PACKET_CONTROL_ENABLE 0:0 +#define NVC87E_SET_EXT_PACKET_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC87E_SET_EXT_PACKET_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC87E_SET_EXT_PACKET_CONTROL_LOCATION 4:4 +#define NVC87E_SET_EXT_PACKET_CONTROL_LOCATION_VSYNC (0x00000000) +#define NVC87E_SET_EXT_PACKET_CONTROL_LOCATION_VBLANK (0x00000001) +#define NVC87E_SET_EXT_PACKET_CONTROL_FREQUENCY 8:8 +#define NVC87E_SET_EXT_PACKET_CONTROL_FREQUENCY_EVERY_FRAME (0x00000000) +#define NVC87E_SET_EXT_PACKET_CONTROL_FREQUENCY_ONCE (0x00000001) +#define NVC87E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE 12:12 +#define NVC87E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE_DISABLE (0x00000000) +#define NVC87E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE_ENABLE (0x00000001) +#define NVC87E_SET_EXT_PACKET_CONTROL_SIZE 27:16 +#define NVC87E_SET_EXT_PACKET_DATA (0x0000039C) +#define NVC87E_SET_EXT_PACKET_DATA_DB0 7:0 +#define NVC87E_SET_EXT_PACKET_DATA_DB1 15:8 +#define NVC87E_SET_EXT_PACKET_DATA_DB2 23:16 +#define NVC87E_SET_EXT_PACKET_DATA_DB3 31:24 +#define NVC87E_SET_FMT_COEFFICIENT_C00 (0x00000400) +#define NVC87E_SET_FMT_COEFFICIENT_C00_VALUE 20:0 +#define NVC87E_SET_FMT_COEFFICIENT_C01 (0x00000404) +#define NVC87E_SET_FMT_COEFFICIENT_C01_VALUE 20:0 +#define NVC87E_SET_FMT_COEFFICIENT_C02 (0x00000408) +#define NVC87E_SET_FMT_COEFFICIENT_C02_VALUE 20:0 +#define NVC87E_SET_FMT_COEFFICIENT_C03 (0x0000040C) +#define NVC87E_SET_FMT_COEFFICIENT_C03_VALUE 20:0 +#define NVC87E_SET_FMT_COEFFICIENT_C10 (0x00000410) +#define NVC87E_SET_FMT_COEFFICIENT_C10_VALUE 20:0 +#define NVC87E_SET_FMT_COEFFICIENT_C11 (0x00000414) +#define NVC87E_SET_FMT_COEFFICIENT_C11_VALUE 20:0 +#define NVC87E_SET_FMT_COEFFICIENT_C12 (0x00000418) +#define NVC87E_SET_FMT_COEFFICIENT_C12_VALUE 20:0 +#define NVC87E_SET_FMT_COEFFICIENT_C13 (0x0000041C) +#define NVC87E_SET_FMT_COEFFICIENT_C13_VALUE 20:0 +#define NVC87E_SET_FMT_COEFFICIENT_C20 (0x00000420) +#define NVC87E_SET_FMT_COEFFICIENT_C20_VALUE 20:0 +#define NVC87E_SET_FMT_COEFFICIENT_C21 (0x00000424) +#define NVC87E_SET_FMT_COEFFICIENT_C21_VALUE 20:0 +#define NVC87E_SET_FMT_COEFFICIENT_C22 (0x00000428) +#define NVC87E_SET_FMT_COEFFICIENT_C22_VALUE 20:0 +#define NVC87E_SET_FMT_COEFFICIENT_C23 (0x0000042C) +#define NVC87E_SET_FMT_COEFFICIENT_C23_VALUE 20:0 +#define NVC87E_SET_ILUT_CONTROL (0x00000440) +#define NVC87E_SET_ILUT_CONTROL_INTERPOLATE 0:0 +#define NVC87E_SET_ILUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC87E_SET_ILUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC87E_SET_ILUT_CONTROL_MIRROR 1:1 +#define NVC87E_SET_ILUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC87E_SET_ILUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC87E_SET_ILUT_CONTROL_MODE 3:2 +#define NVC87E_SET_ILUT_CONTROL_MODE_SEGMENTED (0x00000000) +#define NVC87E_SET_ILUT_CONTROL_MODE_DIRECT8 (0x00000001) +#define NVC87E_SET_ILUT_CONTROL_MODE_DIRECT10 (0x00000002) +#define NVC87E_SET_ILUT_CONTROL_SIZE 18:8 +#define NVC87E_SET_CONTEXT_DMA_ILUT (0x00000444) +#define NVC87E_SET_CONTEXT_DMA_ILUT_HANDLE 31:0 +#define NVC87E_SET_OFFSET_ILUT (0x00000448) +#define NVC87E_SET_OFFSET_ILUT_ORIGIN 31:0 +#define NVC87E_SET_CSC00CONTROL (0x0000045C) +#define NVC87E_SET_CSC00CONTROL_ENABLE 0:0 +#define NVC87E_SET_CSC00CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC87E_SET_CSC00CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC87E_SET_CSC00COEFFICIENT_C00 (0x00000460) +#define NVC87E_SET_CSC00COEFFICIENT_C00_VALUE 20:0 +#define NVC87E_SET_CSC00COEFFICIENT_C01 (0x00000464) +#define NVC87E_SET_CSC00COEFFICIENT_C01_VALUE 20:0 +#define NVC87E_SET_CSC00COEFFICIENT_C02 (0x00000468) +#define NVC87E_SET_CSC00COEFFICIENT_C02_VALUE 20:0 +#define NVC87E_SET_CSC00COEFFICIENT_C03 (0x0000046C) +#define NVC87E_SET_CSC00COEFFICIENT_C03_VALUE 20:0 +#define NVC87E_SET_CSC00COEFFICIENT_C10 (0x00000470) +#define NVC87E_SET_CSC00COEFFICIENT_C10_VALUE 20:0 +#define NVC87E_SET_CSC00COEFFICIENT_C11 (0x00000474) +#define NVC87E_SET_CSC00COEFFICIENT_C11_VALUE 20:0 +#define NVC87E_SET_CSC00COEFFICIENT_C12 (0x00000478) +#define NVC87E_SET_CSC00COEFFICIENT_C12_VALUE 20:0 +#define NVC87E_SET_CSC00COEFFICIENT_C13 (0x0000047C) +#define NVC87E_SET_CSC00COEFFICIENT_C13_VALUE 20:0 +#define NVC87E_SET_CSC00COEFFICIENT_C20 (0x00000480) +#define NVC87E_SET_CSC00COEFFICIENT_C20_VALUE 20:0 +#define NVC87E_SET_CSC00COEFFICIENT_C21 (0x00000484) +#define NVC87E_SET_CSC00COEFFICIENT_C21_VALUE 20:0 +#define NVC87E_SET_CSC00COEFFICIENT_C22 (0x00000488) +#define NVC87E_SET_CSC00COEFFICIENT_C22_VALUE 20:0 +#define NVC87E_SET_CSC00COEFFICIENT_C23 (0x0000048C) +#define NVC87E_SET_CSC00COEFFICIENT_C23_VALUE 20:0 +#define NVC87E_SET_CSC0LUT_CONTROL (0x000004A0) +#define NVC87E_SET_CSC0LUT_CONTROL_INTERPOLATE 0:0 +#define NVC87E_SET_CSC0LUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC87E_SET_CSC0LUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC87E_SET_CSC0LUT_CONTROL_MIRROR 1:1 +#define NVC87E_SET_CSC0LUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC87E_SET_CSC0LUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC87E_SET_CSC0LUT_CONTROL_ENABLE 4:4 +#define NVC87E_SET_CSC0LUT_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC87E_SET_CSC0LUT_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC87E_SET_CSC0LUT_SEGMENT_SIZE (0x000004A4) +#define NVC87E_SET_CSC0LUT_SEGMENT_SIZE_IDX 5:0 +#define NVC87E_SET_CSC0LUT_SEGMENT_SIZE_VALUE 18:16 +#define NVC87E_SET_CSC0LUT_ENTRY (0x000004A8) +#define NVC87E_SET_CSC0LUT_ENTRY_IDX 10:0 +#define NVC87E_SET_CSC0LUT_ENTRY_VALUE 31:16 +#define NVC87E_SET_CSC01CONTROL (0x000004BC) +#define NVC87E_SET_CSC01CONTROL_ENABLE 0:0 +#define NVC87E_SET_CSC01CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC87E_SET_CSC01CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC87E_SET_CSC01COEFFICIENT_C00 (0x000004C0) +#define NVC87E_SET_CSC01COEFFICIENT_C00_VALUE 20:0 +#define NVC87E_SET_CSC01COEFFICIENT_C01 (0x000004C4) +#define NVC87E_SET_CSC01COEFFICIENT_C01_VALUE 20:0 +#define NVC87E_SET_CSC01COEFFICIENT_C02 (0x000004C8) +#define NVC87E_SET_CSC01COEFFICIENT_C02_VALUE 20:0 +#define NVC87E_SET_CSC01COEFFICIENT_C03 (0x000004CC) +#define NVC87E_SET_CSC01COEFFICIENT_C03_VALUE 20:0 +#define NVC87E_SET_CSC01COEFFICIENT_C10 (0x000004D0) +#define NVC87E_SET_CSC01COEFFICIENT_C10_VALUE 20:0 +#define NVC87E_SET_CSC01COEFFICIENT_C11 (0x000004D4) +#define NVC87E_SET_CSC01COEFFICIENT_C11_VALUE 20:0 +#define NVC87E_SET_CSC01COEFFICIENT_C12 (0x000004D8) +#define NVC87E_SET_CSC01COEFFICIENT_C12_VALUE 20:0 +#define NVC87E_SET_CSC01COEFFICIENT_C13 (0x000004DC) +#define NVC87E_SET_CSC01COEFFICIENT_C13_VALUE 20:0 +#define NVC87E_SET_CSC01COEFFICIENT_C20 (0x000004E0) +#define NVC87E_SET_CSC01COEFFICIENT_C20_VALUE 20:0 +#define NVC87E_SET_CSC01COEFFICIENT_C21 (0x000004E4) +#define NVC87E_SET_CSC01COEFFICIENT_C21_VALUE 20:0 +#define NVC87E_SET_CSC01COEFFICIENT_C22 (0x000004E8) +#define NVC87E_SET_CSC01COEFFICIENT_C22_VALUE 20:0 +#define NVC87E_SET_CSC01COEFFICIENT_C23 (0x000004EC) +#define NVC87E_SET_CSC01COEFFICIENT_C23_VALUE 20:0 +#define NVC87E_SET_TMO_CONTROL (0x00000500) +#define NVC87E_SET_TMO_CONTROL_INTERPOLATE 0:0 +#define NVC87E_SET_TMO_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC87E_SET_TMO_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC87E_SET_TMO_CONTROL_SAT_MODE 3:2 +#define NVC87E_SET_TMO_CONTROL_SIZE 18:8 +#define NVC87E_SET_TMO_LOW_INTENSITY_ZONE (0x00000508) +#define NVC87E_SET_TMO_LOW_INTENSITY_ZONE_END 29:16 +#define NVC87E_SET_TMO_LOW_INTENSITY_VALUE (0x0000050C) +#define NVC87E_SET_TMO_LOW_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC87E_SET_TMO_LOW_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC87E_SET_TMO_LOW_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC87E_SET_TMO_MEDIUM_INTENSITY_ZONE (0x00000510) +#define NVC87E_SET_TMO_MEDIUM_INTENSITY_ZONE_START 13:0 +#define NVC87E_SET_TMO_MEDIUM_INTENSITY_ZONE_END 29:16 +#define NVC87E_SET_TMO_MEDIUM_INTENSITY_VALUE (0x00000514) +#define NVC87E_SET_TMO_MEDIUM_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC87E_SET_TMO_MEDIUM_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC87E_SET_TMO_MEDIUM_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC87E_SET_TMO_HIGH_INTENSITY_ZONE (0x00000518) +#define NVC87E_SET_TMO_HIGH_INTENSITY_ZONE_START 13:0 +#define NVC87E_SET_TMO_HIGH_INTENSITY_VALUE (0x0000051C) +#define NVC87E_SET_TMO_HIGH_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC87E_SET_TMO_HIGH_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC87E_SET_TMO_HIGH_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC87E_SET_CONTEXT_DMA_TMO_LUT (0x00000528) +#define NVC87E_SET_CONTEXT_DMA_TMO_LUT_HANDLE 31:0 +#define NVC87E_SET_OFFSET_TMO_LUT (0x0000052C) +#define NVC87E_SET_OFFSET_TMO_LUT_ORIGIN 31:0 +#define NVC87E_SET_CSC10CONTROL (0x0000053C) +#define NVC87E_SET_CSC10CONTROL_ENABLE 0:0 +#define NVC87E_SET_CSC10CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC87E_SET_CSC10CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC87E_SET_CSC10COEFFICIENT_C00 (0x00000540) +#define NVC87E_SET_CSC10COEFFICIENT_C00_VALUE 20:0 +#define NVC87E_SET_CSC10COEFFICIENT_C01 (0x00000544) +#define NVC87E_SET_CSC10COEFFICIENT_C01_VALUE 20:0 +#define NVC87E_SET_CSC10COEFFICIENT_C02 (0x00000548) +#define NVC87E_SET_CSC10COEFFICIENT_C02_VALUE 20:0 +#define NVC87E_SET_CSC10COEFFICIENT_C03 (0x0000054C) +#define NVC87E_SET_CSC10COEFFICIENT_C03_VALUE 20:0 +#define NVC87E_SET_CSC10COEFFICIENT_C10 (0x00000550) +#define NVC87E_SET_CSC10COEFFICIENT_C10_VALUE 20:0 +#define NVC87E_SET_CSC10COEFFICIENT_C11 (0x00000554) +#define NVC87E_SET_CSC10COEFFICIENT_C11_VALUE 20:0 +#define NVC87E_SET_CSC10COEFFICIENT_C12 (0x00000558) +#define NVC87E_SET_CSC10COEFFICIENT_C12_VALUE 20:0 +#define NVC87E_SET_CSC10COEFFICIENT_C13 (0x0000055C) +#define NVC87E_SET_CSC10COEFFICIENT_C13_VALUE 20:0 +#define NVC87E_SET_CSC10COEFFICIENT_C20 (0x00000560) +#define NVC87E_SET_CSC10COEFFICIENT_C20_VALUE 20:0 +#define NVC87E_SET_CSC10COEFFICIENT_C21 (0x00000564) +#define NVC87E_SET_CSC10COEFFICIENT_C21_VALUE 20:0 +#define NVC87E_SET_CSC10COEFFICIENT_C22 (0x00000568) +#define NVC87E_SET_CSC10COEFFICIENT_C22_VALUE 20:0 +#define NVC87E_SET_CSC10COEFFICIENT_C23 (0x0000056C) +#define NVC87E_SET_CSC10COEFFICIENT_C23_VALUE 20:0 +#define NVC87E_SET_CSC1LUT_CONTROL (0x00000580) +#define NVC87E_SET_CSC1LUT_CONTROL_INTERPOLATE 0:0 +#define NVC87E_SET_CSC1LUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC87E_SET_CSC1LUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC87E_SET_CSC1LUT_CONTROL_MIRROR 1:1 +#define NVC87E_SET_CSC1LUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC87E_SET_CSC1LUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC87E_SET_CSC1LUT_CONTROL_ENABLE 4:4 +#define NVC87E_SET_CSC1LUT_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC87E_SET_CSC1LUT_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC87E_SET_CSC1LUT_SEGMENT_SIZE (0x00000584) +#define NVC87E_SET_CSC1LUT_SEGMENT_SIZE_IDX 5:0 +#define NVC87E_SET_CSC1LUT_SEGMENT_SIZE_VALUE 18:16 +#define NVC87E_SET_CSC1LUT_ENTRY (0x00000588) +#define NVC87E_SET_CSC1LUT_ENTRY_IDX 10:0 +#define NVC87E_SET_CSC1LUT_ENTRY_VALUE 31:16 +#define NVC87E_SET_CSC11CONTROL (0x0000059C) +#define NVC87E_SET_CSC11CONTROL_ENABLE 0:0 +#define NVC87E_SET_CSC11CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC87E_SET_CSC11CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC87E_SET_CSC11COEFFICIENT_C00 (0x000005A0) +#define NVC87E_SET_CSC11COEFFICIENT_C00_VALUE 20:0 +#define NVC87E_SET_CSC11COEFFICIENT_C01 (0x000005A4) +#define NVC87E_SET_CSC11COEFFICIENT_C01_VALUE 20:0 +#define NVC87E_SET_CSC11COEFFICIENT_C02 (0x000005A8) +#define NVC87E_SET_CSC11COEFFICIENT_C02_VALUE 20:0 +#define NVC87E_SET_CSC11COEFFICIENT_C03 (0x000005AC) +#define NVC87E_SET_CSC11COEFFICIENT_C03_VALUE 20:0 +#define NVC87E_SET_CSC11COEFFICIENT_C10 (0x000005B0) +#define NVC87E_SET_CSC11COEFFICIENT_C10_VALUE 20:0 +#define NVC87E_SET_CSC11COEFFICIENT_C11 (0x000005B4) +#define NVC87E_SET_CSC11COEFFICIENT_C11_VALUE 20:0 +#define NVC87E_SET_CSC11COEFFICIENT_C12 (0x000005B8) +#define NVC87E_SET_CSC11COEFFICIENT_C12_VALUE 20:0 +#define NVC87E_SET_CSC11COEFFICIENT_C13 (0x000005BC) +#define NVC87E_SET_CSC11COEFFICIENT_C13_VALUE 20:0 +#define NVC87E_SET_CSC11COEFFICIENT_C20 (0x000005C0) +#define NVC87E_SET_CSC11COEFFICIENT_C20_VALUE 20:0 +#define NVC87E_SET_CSC11COEFFICIENT_C21 (0x000005C4) +#define NVC87E_SET_CSC11COEFFICIENT_C21_VALUE 20:0 +#define NVC87E_SET_CSC11COEFFICIENT_C22 (0x000005C8) +#define NVC87E_SET_CSC11COEFFICIENT_C22_VALUE 20:0 +#define NVC87E_SET_CSC11COEFFICIENT_C23 (0x000005CC) +#define NVC87E_SET_CSC11COEFFICIENT_C23_VALUE 20:0 +#define NVC87E_SET_CLAMP_RANGE (0x000005D0) +#define NVC87E_SET_CLAMP_RANGE_LOW 15:0 +#define NVC87E_SET_CLAMP_RANGE_HIGH 31:16 +#define NVC87E_SW_RESERVED(b) (0x000005D4 + (b)*0x00000004) +#define NVC87E_SW_RESERVED_VALUE 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC87e_h diff --git a/src/common/sdk/nvidia/inc/class/clc970.h b/src/common/sdk/nvidia/inc/class/clc970.h new file mode 100644 index 0000000..769cd74 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc970.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/clc970.finn +// + +#define NVC970_DISPLAY (0xc970U) /* finn: Evaluated from "NVC970_ALLOCATION_PARAMETERS_MESSAGE_ID" */ + +#define NVC970_ALLOCATION_PARAMETERS_MESSAGE_ID (0xc970U) + +typedef struct NVC970_ALLOCATION_PARAMETERS { + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numDsis; // Number of DSIs in this chip/display +} NVC970_ALLOCATION_PARAMETERS; + diff --git a/src/common/sdk/nvidia/inc/class/clc971.h b/src/common/sdk/nvidia/inc/class/clc971.h new file mode 100644 index 0000000..7cdb433 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc971.h @@ -0,0 +1,329 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc971_h_ +#define _clc971_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC971_DISP_SF_USER (0x000C971) + +typedef volatile struct { + NvU32 dispSfUserOffset[0x400]; +} _NvC971DispSfUser, NvC971DispSfUserMap; + +#define NVC971_SF_HDMI_INFO_IDX_AVI_INFOFRAME 0x00000000 /* */ +#define NVC971_SF_HDMI_INFO_IDX_GCP 0x00000001 /* */ +#define NVC971_SF_HDMI_INFO_IDX_ACR 0x00000002 /* */ +#define NVC971_SF_HDMI_INFO_CTRL(i,j) (0x000E0000-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC971_SF_HDMI_INFO_CTRL__SIZE_1 8 /* */ +#define NVC971_SF_HDMI_INFO_CTRL__SIZE_2 3 /* */ +#define NVC971_SF_HDMI_INFO_CTRL_ENABLE 0:0 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NVC971_SF_HDMI_INFO_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NVC971_SF_HDMI_INFO_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NVC971_SF_HDMI_INFO_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NVC971_SF_HDMI_INFO_CTRL_CHKSUM_HW_EN 0x00000001 /* RW--V */ +#define NVC971_SF_HDMI_INFO_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NVC971_SF_HDMI_INFO_CTRL_CHKSUM_HW_DIS 0x00000000 /* RW--V */ +#define NVC971_SF_HDMI_INFO_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_CTRL_HBLANK 12:12 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_CTRL_HBLANK_DIS 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_CTRL_HBLANK_EN 0x00000001 /* RW--V */ +#define NVC971_SF_HDMI_INFO_CTRL_VIDEO_FMT 16:16 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_CTRL_VIDEO_FMT_SW_CONTROLLED 0x00000000 /* RW--V */ +#define NVC971_SF_HDMI_INFO_CTRL_VIDEO_FMT_HW_CONTROLLED 0x00000001 /* RW--V */ +#define NVC971_SF_HDMI_INFO_CTRL_VIDEO_FMT_INIT 0x00000001 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_STATUS(i,j) (0x000E0004-0x000E0000+(i)*1024+(j)*64) /* R--4A */ +#define NVC971_SF_HDMI_INFO_STATUS__SIZE_1 8 /* */ +#define NVC971_SF_HDMI_INFO_STATUS__SIZE_2 3 /* */ +#define NVC971_SF_HDMI_INFO_STATUS_SENT 0:0 /* R--VF */ +#define NVC971_SF_HDMI_INFO_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NVC971_SF_HDMI_INFO_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NVC971_SF_HDMI_INFO_HEADER(i,j) (0x000E0008-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC971_SF_HDMI_INFO_HEADER__SIZE_1 8 /* */ +#define NVC971_SF_HDMI_INFO_HEADER__SIZE_2 3 /* */ +#define NVC971_SF_HDMI_INFO_HEADER_HB0 7:0 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_HEADER_HB1 15:8 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_HEADER_HB2 23:16 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK0_LOW(i,j) (0x000E000C-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC971_SF_HDMI_INFO_SUBPACK0_LOW__SIZE_1 8 /* */ +#define NVC971_SF_HDMI_INFO_SUBPACK0_LOW__SIZE_2 3 /* */ +#define NVC971_SF_HDMI_INFO_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK0_HIGH(i,j) (0x000E0010-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC971_SF_HDMI_INFO_SUBPACK0_HIGH__SIZE_1 8 /* */ +#define NVC971_SF_HDMI_INFO_SUBPACK0_HIGH__SIZE_2 3 /* */ +#define NVC971_SF_HDMI_INFO_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK1_LOW(i,j) (0x000E0014-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC971_SF_HDMI_INFO_SUBPACK1_LOW__SIZE_1 8 /* */ +#define NVC971_SF_HDMI_INFO_SUBPACK1_LOW__SIZE_2 3 /* */ +#define NVC971_SF_HDMI_INFO_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK1_HIGH(i,j) (0x000E0018-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC971_SF_HDMI_INFO_SUBPACK1_HIGH__SIZE_1 8 /* */ +#define NVC971_SF_HDMI_INFO_SUBPACK1_HIGH__SIZE_2 3 /* */ +#define NVC971_SF_HDMI_INFO_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK2_LOW(i,j) (0x000E001C-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC971_SF_HDMI_INFO_SUBPACK2_LOW__SIZE_1 8 /* */ +#define NVC971_SF_HDMI_INFO_SUBPACK2_LOW__SIZE_2 3 /* */ +#define NVC971_SF_HDMI_INFO_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK2_HIGH(i,j) (0x000E0020-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC971_SF_HDMI_INFO_SUBPACK2_HIGH__SIZE_1 8 /* */ +#define NVC971_SF_HDMI_INFO_SUBPACK2_HIGH__SIZE_2 3 /* */ +#define NVC971_SF_HDMI_INFO_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK3_LOW(i,j) (0x000E0024-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC971_SF_HDMI_INFO_SUBPACK3_LOW__SIZE_1 8 /* */ +#define NVC971_SF_HDMI_INFO_SUBPACK3_LOW__SIZE_2 3 /* */ +#define NVC971_SF_HDMI_INFO_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK3_HIGH(i,j) (0x000E0028-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC971_SF_HDMI_INFO_SUBPACK3_HIGH__SIZE_1 8 /* */ +#define NVC971_SF_HDMI_INFO_SUBPACK3_HIGH__SIZE_2 3 /* */ +#define NVC971_SF_HDMI_INFO_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_INFO_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NVC971_SF_HDMI_INFO_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_CTRL(i) (0x000E0000-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 8 /* */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE 0:0 /* RWIVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_STATUS(i) (0x000E0004-0x000E0000+(i)*1024) /* R--4A */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_STATUS__SIZE_1 8 /* */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_STATUS_SENT 0:0 /* R-IVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_HEADER(i) (0x000E0008-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_HEADER__SIZE_1 8 /* */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_HEADER_HB0 7:0 /* RWIVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_HEADER_HB1 15:8 /* RWIVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_HEADER_HB2 23:16 /* RWIVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(i) (0x000E000C-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW__SIZE_1 8 /* */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(i) (0x000E0010-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH__SIZE_1 8 /* */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(i) (0x000E0014-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW__SIZE_1 8 /* */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(i) (0x000E0018-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH__SIZE_1 8 /* */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW(i) (0x000E001C-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW__SIZE_1 8 /* */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NVC971_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_GCP_CTRL(i) (0x000E0040-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC971_SF_HDMI_GCP_CTRL__SIZE_1 8 /* */ +#define NVC971_SF_HDMI_GCP_CTRL_ENABLE 0:0 /* RWIVF */ +#define NVC971_SF_HDMI_GCP_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_GCP_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NVC971_SF_HDMI_GCP_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NVC971_SF_HDMI_GCP_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NVC971_SF_HDMI_GCP_STATUS(i) (0x000E0044-0x000E0000+(i)*1024) /* R--4A */ +#define NVC971_SF_HDMI_GCP_STATUS__SIZE_1 8 /* */ +#define NVC971_SF_HDMI_GCP_STATUS_SENT 0:0 /* R-IVF */ +#define NVC971_SF_HDMI_GCP_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NVC971_SF_HDMI_GCP_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NVC971_SF_HDMI_GCP_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NVC971_SF_HDMI_GCP_SUBPACK(i) (0x000E004C-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC971_SF_HDMI_GCP_SUBPACK__SIZE_1 8 /* */ +#define NVC971_SF_HDMI_GCP_SUBPACK_SB0 7:0 /* RWIVF */ +#define NVC971_SF_HDMI_GCP_SUBPACK_SB0_INIT 0x00000001 /* RWI-V */ +#define NVC971_SF_HDMI_GCP_SUBPACK_SB0_SET_AVMUTE 0x00000001 /* RW--V */ +#define NVC971_SF_HDMI_GCP_SUBPACK_SB0_CLR_AVMUTE 0x00000010 /* RW--V */ +#define NVC971_SF_HDMI_GCP_SUBPACK_SB1 15:8 /* RWIVF */ +#define NVC971_SF_HDMI_GCP_SUBPACK_SB1_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_GCP_SUBPACK_SB2 23:16 /* RWIVF */ +#define NVC971_SF_HDMI_GCP_SUBPACK_SB2_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_HDMI_GCP_SUBPACK_SB1_CTRL 24:24 /* RWIVF */ +#define NVC971_SF_HDMI_GCP_SUBPACK_SB1_CTRL_INIT 0x00000001 /* RWI-V */ +#define NVC971_SF_HDMI_GCP_SUBPACK_SB1_CTRL_SW 0x00000000 /* RW--V */ +#define NVC971_SF_HDMI_GCP_SUBPACK_SB1_CTRL_HW 0x00000001 /* RW--V */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL(i,j) (0x000E0130-0x000E0000+(i)*1024+(j)*8) /* RW-4A */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL__SIZE_1 8 /* */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL__SIZE_2 10 /* */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_ENABLE 0:0 /* RWIVF */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_RUN_MODE 3:1 /* RWIVF */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_RUN_MODE_ALWAYS 0x00000000 /* RWI-V */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_RUN_MODE_ONCE 0x00000001 /* RW--V */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_RUN_MODE_FID_ALWAYS 0x00000002 /* RW--V */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_RUN_MODE_FID_ONCE 0x00000003 /* RW--V */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_RUN_MODE_FID_TRIGGER 0x00000004 /* RW--V */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_LOC 5:4 /* RWIVF */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_LOC_VBLANK 0x00000000 /* RWI-V */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_LOC_VSYNC 0x00000001 /* RW--V */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_LOC_LINE 0x00000002 /* RW--V */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_OFFSET 10:6 /* RWIVF */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_OFFSET_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_SIZE 18:14 /* RWIVF */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_SIZE_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_BUSY 22:22 /* R-IVF */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_BUSY_NO 0x00000000 /* R-I-V */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_BUSY_YES 0x00000001 /* R---V */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_SENT 23:23 /* RWIVF */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_SENT_NO 0x00000000 /* R-I-V */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_SENT_YES 0x00000001 /* R---V */ +#define NVC971_SF_GENERIC_INFOFRAME_CTRL_SENT_CLEAR 0x00000001 /* -W--C */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG(i,j) (0x000E0134-0x000E0000+(i)*1024+(j)*8) /* RW-4A */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG__SIZE_1 8 /* */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG__SIZE_2 10 /* */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG_FID 7:0 /* RWIVF */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG_FID_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG_LINE_ID 23:8 /* RWIVF */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG_LINE_ID_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG_LINE_ID_REVERSED 24:24 /* RWIVF */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG_LINE_ID_REVERSED_NO 0x00000000 /* RWI-V */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG_LINE_ID_REVERSED_YES 0x00000001 /* RW--V */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG_AS_SDP_OVERRIDE_EN 25:25 /* RWIVF */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG_AS_SDP_OVERRIDE_EN_NO 0x00000000 /* RWI-V */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG_AS_SDP_OVERRIDE_EN_YES 0x00000001 /* RW--V */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG_HW_CHECKSUM 29:29 /* RWIVF */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG_HW_CHECKSUM_NO 0x00000000 /* RWI-V */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG_HW_CHECKSUM_YES 0x00000001 /* RW--V */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG_NEW 30:30 /* RWIVF */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG_NEW_INIT 0x00000000 /* R-I-V */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG_NEW_DONE 0x00000000 /* R---V */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG_NEW_PENDING 0x00000001 /* R---T */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG_NEW_TRIGGER 0x00000001 /* -W--T */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG_MTD_STATE_CTRL 31:31 /* RWIVF */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG_MTD_STATE_CTRL_ACT 0x00000000 /* RWI-V */ +#define NVC971_SF_GENERIC_INFOFRAME_CONFIG_MTD_STATE_CTRL_ARM 0x00000001 /* RW--V */ +#define NVC971_SF_GENERIC_INFOFRAME_DATA_CTRL(i) (0x000E03F0-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC971_SF_GENERIC_INFOFRAME_DATA_CTRL__SIZE_1 8 /* */ +#define NVC971_SF_GENERIC_INFOFRAME_DATA_CTRL_OFFSET 4:0 /* RWIVF */ +#define NVC971_SF_GENERIC_INFOFRAME_DATA_CTRL_OFFSET_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_GENERIC_INFOFRAME_DATA(i) (0x000E03F4-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC971_SF_GENERIC_INFOFRAME_DATA__SIZE_1 8 /* */ +#define NVC971_SF_GENERIC_INFOFRAME_DATA_BYTE0 7:0 /* RWIVF */ +#define NVC971_SF_GENERIC_INFOFRAME_DATA_BYTE0_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_GENERIC_INFOFRAME_DATA_BYTE1 15:8 /* RWIVF */ +#define NVC971_SF_GENERIC_INFOFRAME_DATA_BYTE1_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_GENERIC_INFOFRAME_DATA_BYTE2 23:16 /* RWIVF */ +#define NVC971_SF_GENERIC_INFOFRAME_DATA_BYTE2_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_GENERIC_INFOFRAME_DATA_BYTE3 31:24 /* RWIVF */ +#define NVC971_SF_GENERIC_INFOFRAME_DATA_BYTE3_INIT 0x00000000 /* RWI-V */ +#define NVC971_SF_GENERIC_INFOFRAME_MISC_CTRL(i) (0x000E03F8-0x000E0000+(i)*1024) /* RW-4A */ +#define NVC971_SF_GENERIC_INFOFRAME_MISC_CTRL__SIZE_1 8 /* */ +#define NVC971_SF_GENERIC_INFOFRAME_MISC_CTRL_AUDIO_PRIORITY 1:1 /* RWIVF */ +#define NVC971_SF_GENERIC_INFOFRAME_MISC_CTRL_AUDIO_PRIORITY_HIGH 0x00000000 /* RW--V */ +#define NVC971_SF_GENERIC_INFOFRAME_MISC_CTRL_AUDIO_PRIORITY_LOW 0x00000001 /* RWI-V */ + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _clc971_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc973.h b/src/common/sdk/nvidia/inc/class/clc973.h new file mode 100644 index 0000000..f770493 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc973.h @@ -0,0 +1,379 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc973_h_ +#define _clc973_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC973_DISP_CAPABILITIES 0xC973 + +#define NVC973_SYS_CAP 0x0 /* RW-4R */ +#define NVC973_SYS_CAP_HEAD0_EXISTS 0:0 /* RWIVF */ +#define NVC973_SYS_CAP_HEAD0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC973_SYS_CAP_HEAD0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC973_SYS_CAP_HEAD1_EXISTS 1:1 /* RWIVF */ +#define NVC973_SYS_CAP_HEAD1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC973_SYS_CAP_HEAD1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC973_SYS_CAP_HEAD2_EXISTS 2:2 /* RWIVF */ +#define NVC973_SYS_CAP_HEAD2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC973_SYS_CAP_HEAD2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC973_SYS_CAP_HEAD3_EXISTS 3:3 /* RWIVF */ +#define NVC973_SYS_CAP_HEAD3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC973_SYS_CAP_HEAD3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC973_SYS_CAP_HEAD4_EXISTS 4:4 /* RWIVF */ +#define NVC973_SYS_CAP_HEAD4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC973_SYS_CAP_HEAD4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC973_SYS_CAP_HEAD5_EXISTS 5:5 /* RWIVF */ +#define NVC973_SYS_CAP_HEAD5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC973_SYS_CAP_HEAD5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC973_SYS_CAP_HEAD6_EXISTS 6:6 /* RWIVF */ +#define NVC973_SYS_CAP_HEAD6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC973_SYS_CAP_HEAD6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC973_SYS_CAP_HEAD7_EXISTS 7:7 /* RWIVF */ +#define NVC973_SYS_CAP_HEAD7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC973_SYS_CAP_HEAD7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC973_SYS_CAP_HEAD_EXISTS(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVC973_SYS_CAP_HEAD_EXISTS__SIZE_1 8 /* */ +#define NVC973_SYS_CAP_HEAD_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC973_SYS_CAP_HEAD_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC973_SYS_CAP_SOR0_EXISTS 8:8 /* RWIVF */ +#define NVC973_SYS_CAP_SOR0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC973_SYS_CAP_SOR0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC973_SYS_CAP_SOR1_EXISTS 9:9 /* RWIVF */ +#define NVC973_SYS_CAP_SOR1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC973_SYS_CAP_SOR1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC973_SYS_CAP_SOR2_EXISTS 10:10 /* RWIVF */ +#define NVC973_SYS_CAP_SOR2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC973_SYS_CAP_SOR2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC973_SYS_CAP_SOR3_EXISTS 11:11 /* RWIVF */ +#define NVC973_SYS_CAP_SOR3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC973_SYS_CAP_SOR3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC973_SYS_CAP_SOR4_EXISTS 12:12 /* RWIVF */ +#define NVC973_SYS_CAP_SOR4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC973_SYS_CAP_SOR4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC973_SYS_CAP_SOR5_EXISTS 13:13 /* RWIVF */ +#define NVC973_SYS_CAP_SOR5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC973_SYS_CAP_SOR5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC973_SYS_CAP_SOR6_EXISTS 14:14 /* RWIVF */ +#define NVC973_SYS_CAP_SOR6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC973_SYS_CAP_SOR6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC973_SYS_CAP_SOR7_EXISTS 15:15 /* RWIVF */ +#define NVC973_SYS_CAP_SOR7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC973_SYS_CAP_SOR7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC973_SYS_CAP_SOR_EXISTS(i) (8+(i)):(8+(i)) /* RWIVF */ +#define NVC973_SYS_CAP_SOR_EXISTS__SIZE_1 8 /* */ +#define NVC973_SYS_CAP_SOR_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC973_SYS_CAP_SOR_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC973_SYS_CAP_DSI0_EXISTS 20:20 /* RWIVF */ +#define NVC973_SYS_CAP_DSI0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC973_SYS_CAP_DSI0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC973_SYS_CAP_DSI1_EXISTS 21:21 /* RWIVF */ +#define NVC973_SYS_CAP_DSI1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC973_SYS_CAP_DSI1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC973_SYS_CAP_DSI2_EXISTS 22:22 /* RWIVF */ +#define NVC973_SYS_CAP_DSI2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC973_SYS_CAP_DSI2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC973_SYS_CAP_DSI3_EXISTS 23:23 /* RWIVF */ +#define NVC973_SYS_CAP_DSI3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC973_SYS_CAP_DSI3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC973_SYS_CAP_DSI_EXISTS(i) (20+(i)):(20+(i)) /* RWIVF */ +#define NVC973_SYS_CAP_DSI_EXISTS__SIZE_1 4 /* */ +#define NVC973_SYS_CAP_DSI_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC973_SYS_CAP_DSI_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPA 0x10 /* RW-4R */ +#define NVC973_IHUB_COMMON_CAPA_MEMPOOL_ENTRIES 15:0 /* RWIUF */ +#define NVC973_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH 17:16 /* RWIVF */ +#define NVC973_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_32B 0x00000000 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_64B 0x00000001 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_128B 0x00000002 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_256B 0x00000003 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPA_SUPPORT_ROTATION 18:18 /* RWIVF */ +#define NVC973_IHUB_COMMON_CAPA_SUPPORT_ROTATION_FALSE 0x00000000 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPA_SUPPORT_ROTATION_TRUE 0x00000001 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPA_SUPPORT_PLANAR 19:19 /* RWIVF */ +#define NVC973_IHUB_COMMON_CAPA_SUPPORT_PLANAR_FALSE 0x00000000 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPA_SUPPORT_PLANAR_TRUE 0x00000001 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPA_SUPPORT_VGA 20:20 /* RWIVF */ +#define NVC973_IHUB_COMMON_CAPA_SUPPORT_VGA_FALSE 0x00000000 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPA_SUPPORT_VGA_TRUE 0x00000001 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION 21:21 /* RWIVF */ +#define NVC973_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION_FALSE 0x00000000 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION_TRUE 0x00000001 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPA_SUPPORT_MSCG 22:22 /* RWIVF */ +#define NVC973_IHUB_COMMON_CAPA_SUPPORT_MSCG_FALSE 0x00000000 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPA_SUPPORT_MSCG_TRUE 0x00000001 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH 23:23 /* RWIVF */ +#define NVC973_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH_FALSE 0x00000000 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH_TRUE 0x00000001 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT 26:26 /* RWIVF */ +#define NVC973_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT_FALSE 0x00000000 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT_TRUE 0x00000001 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION 31:30 /* RWIVF */ +#define NVC973_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_32B 0x00000000 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_64B 0x00000001 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_128B 0x00000002 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_256B 0x00000003 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPC 0x18 /* RW-4R */ +#define NVC973_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE 1:0 /* RWIVF */ +#define NVC973_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_32B 0x00000000 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_64B 0x00000001 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_128B 0x00000002 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_256B 0x00000003 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED 6:4 /* RWIVF */ +#define NVC973_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_NONE 0x00000000 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_TWO 0x00000001 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_FOUR 0x00000002 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_EIGHT 0x00000003 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_SIXTEEN 0x00000004 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPC_SUPPORT_SEMI_PLANAR 11:11 /* RWIVF */ +#define NVC973_IHUB_COMMON_CAPC_SUPPORT_SEMI_PLANAR_FALSE 0x00000000 /* RWI-V */ +#define NVC973_IHUB_COMMON_CAPC_SUPPORT_SEMI_PLANAR_TRUE 0x00000001 /* RW--V */ +#define NVC973_IHUB_COMMON_CAPC_SUPPORT_HOR_VER_FLIP 12:12 /* RWIVF */ +#define NVC973_IHUB_COMMON_CAPC_SUPPORT_HOR_VER_FLIP_FALSE 0x00000000 /* RWI-V */ +#define NVC973_IHUB_COMMON_CAPC_SUPPORT_HOR_VER_FLIP_TRUE 0x00000001 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPA(i) (0x680+(i)*32) /* RW-4A */ +#define NVC973_POSTCOMP_HDR_CAPA__SIZE_1 8 /* */ +#define NVC973_POSTCOMP_HDR_CAPA_FULL_WIDTH 4:0 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPA_UNIT_WIDTH 9:5 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPA_OCSC0_PRESENT 16:16 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPA_OCSC0_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPA_OCSC0_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPA_OCSC1_PRESENT 17:17 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPA_OCSC1_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPA_OCSC1_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPA_SCLR_PRESENT 18:18 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPA_SCLR_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPA_SCLR_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPA_HCLPF_PRESENT 19:19 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPA_HCLPF_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPA_HCLPF_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPA_DTH_PRESENT 20:20 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPA_DTH_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPA_DTH_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPA_OSCAN_PRESENT 21:21 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPA_OSCAN_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPA_OSCAN_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPA_DSC_PRESENT 22:22 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPA_DSC_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPA_DSC_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPA_VFILTER_PRESENT 23:23 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPA_VFILTER_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPA_VFILTER_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPB(i) (0x684+(i)*32) /* RW-4A */ +#define NVC973_POSTCOMP_HDR_CAPB__SIZE_1 8 /* */ +#define NVC973_POSTCOMP_HDR_CAPB_VGA 0:0 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPB_VGA_TRUE 0x00000001 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPB_VGA_FALSE 0x00000000 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPB_OLUT_SZ 12:1 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPB_OLUT_LOGNR 15:13 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPB_OLUT_SFCLOAD 17:17 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPB_OLUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPB_OLUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPB_OLUT_DIRECT 18:18 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPB_OLUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPB_OLUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPC(i) (0x688+(i)*32) /* RW-4A */ +#define NVC973_POSTCOMP_HDR_CAPC__SIZE_1 8 /* */ +#define NVC973_POSTCOMP_HDR_CAPC_OCSC0_PRECISION 4:0 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPC_OCSC0_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPC_OCSC0_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPC_OCSC0_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPC_OCSC1_PRECISION 12:8 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPC_OCSC1_UNITY_CLAMP 13:13 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPC_OCSC1_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPC_OCSC1_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPC_SCLR_SF_PRECISION 20:16 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPC_SCLR_CI_PRECISION 24:21 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPC_SCLR_VS_EXT_RGB 25:25 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPC_SCLR_VS_EXT_RGB_TRUE 0x00000001 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPC_SCLR_VS_EXT_RGB_FALSE 0x00000000 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR 28:28 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR 30:30 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPD(i) (0x68c+(i)*32) /* RW-4A */ +#define NVC973_POSTCOMP_HDR_CAPD__SIZE_1 8 /* */ +#define NVC973_POSTCOMP_HDR_CAPD_VSCLR_MAX_PIXELS_2TAP 15:0 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPD_VSCLR_MAX_PIXELS_5TAP 31:16 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPE(i) (0x690+(i)*32) /* RW-4A */ +#define NVC973_POSTCOMP_HDR_CAPE__SIZE_1 8 /* */ +#define NVC973_POSTCOMP_HDR_CAPE_DSC_NATIVE422 16:16 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPE_DSC_NATIVE422_TRUE 0x00000001 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPE_DSC_NATIVE422_FALSE 0x00000000 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPE_DSC_NATIVE420 17:17 /* RWIVF */ +#define NVC973_POSTCOMP_HDR_CAPE_DSC_NATIVE420_TRUE 0x00000001 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPE_DSC_NATIVE420_FALSE 0x00000000 /* RW--V */ +#define NVC973_POSTCOMP_HDR_CAPF(i) (0x694+(i)*32) /* RW-4A */ +#define NVC973_POSTCOMP_HDR_CAPF__SIZE_1 8 /* */ +#define NVC973_POSTCOMP_HDR_CAPF_VFILTER_MAX_PIXELS 15:0 /* RWIVF */ +#define NVC973_SOR_CAP(i) (0x144+(i)*8) /* RW-4A */ +#define NVC973_SOR_CAP__SIZE_1 8 /* */ +#define NVC973_SOR_CAP_SINGLE_LVDS_18 0:0 /* RWIVF */ +#define NVC973_SOR_CAP_SINGLE_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVC973_SOR_CAP_SINGLE_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVC973_SOR_CAP_SINGLE_LVDS_24 1:1 /* RWIVF */ +#define NVC973_SOR_CAP_SINGLE_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVC973_SOR_CAP_SINGLE_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVC973_SOR_CAP_DUAL_LVDS_18 2:2 /* RWIVF */ +#define NVC973_SOR_CAP_DUAL_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVC973_SOR_CAP_DUAL_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVC973_SOR_CAP_DUAL_LVDS_24 3:3 /* RWIVF */ +#define NVC973_SOR_CAP_DUAL_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVC973_SOR_CAP_DUAL_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVC973_SOR_CAP_SINGLE_TMDS_A 8:8 /* RWIVF */ +#define NVC973_SOR_CAP_SINGLE_TMDS_A_FALSE 0x00000000 /* RW--V */ +#define NVC973_SOR_CAP_SINGLE_TMDS_A_TRUE 0x00000001 /* RW--V */ +#define NVC973_SOR_CAP_SINGLE_TMDS_B 9:9 /* RWIVF */ +#define NVC973_SOR_CAP_SINGLE_TMDS_B_FALSE 0x00000000 /* RW--V */ +#define NVC973_SOR_CAP_SINGLE_TMDS_B_TRUE 0x00000001 /* RW--V */ +#define NVC973_SOR_CAP_DUAL_TMDS 11:11 /* RWIVF */ +#define NVC973_SOR_CAP_DUAL_TMDS_FALSE 0x00000000 /* RW--V */ +#define NVC973_SOR_CAP_DUAL_TMDS_TRUE 0x00000001 /* RW--V */ +#define NVC973_SOR_CAP_DISPLAY_OVER_PCIE 13:13 /* RWIVF */ +#define NVC973_SOR_CAP_DISPLAY_OVER_PCIE_FALSE 0x00000000 /* RW--V */ +#define NVC973_SOR_CAP_DISPLAY_OVER_PCIE_TRUE 0x00000001 /* RW--V */ +#define NVC973_SOR_CAP_SDI 16:16 /* RWIVF */ +#define NVC973_SOR_CAP_SDI_FALSE 0x00000000 /* RW--V */ +#define NVC973_SOR_CAP_SDI_TRUE 0x00000001 /* RW--V */ +#define NVC973_SOR_CAP_DP_A 24:24 /* RWIVF */ +#define NVC973_SOR_CAP_DP_A_FALSE 0x00000000 /* RW--V */ +#define NVC973_SOR_CAP_DP_A_TRUE 0x00000001 /* RW--V */ +#define NVC973_SOR_CAP_DP_B 25:25 /* RWIVF */ +#define NVC973_SOR_CAP_DP_B_FALSE 0x00000000 /* RW--V */ +#define NVC973_SOR_CAP_DP_B_TRUE 0x00000001 /* RW--V */ +#define NVC973_SOR_CAP_DP_INTERLACE 26:26 /* RWIVF */ +#define NVC973_SOR_CAP_DP_INTERLACE_FALSE 0x00000000 /* RW--V */ +#define NVC973_SOR_CAP_DP_INTERLACE_TRUE 0x00000001 /* RW--V */ +#define NVC973_SOR_CAP_DP_8_LANES 27:27 /* RWIVF */ +#define NVC973_SOR_CAP_DP_8_LANES_FALSE 0x00000000 /* RW--V */ +#define NVC973_SOR_CAP_DP_8_LANES_TRUE 0x00000001 /* RW--V */ +#define NVC973_SOR_CAP_HDMI_FRL 28:28 /* RWIVF */ +#define NVC973_SOR_CAP_HDMI_FRL_FALSE 0x00000000 /* RW--V */ +#define NVC973_SOR_CAP_HDMI_FRL_TRUE 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA(i) (0x780+(i)*32) /* RW-4A */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA__SIZE_1 32 /* */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_FULL_WIDTH 4:0 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_UNIT_WIDTH 9:5 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_ALPHA_WIDTH 13:10 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT 16:16 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT 17:17 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT 18:18 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT 19:19 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT 20:20 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT 21:21 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT 22:22 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT 23:23 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT 24:24 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPB(i) (0x784+(i)*32) /* RW-4A */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPB__SIZE_1 32 /* */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPB_FMT_PRECISION 4:0 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGSZ 9:6 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGNR 12:10 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT 15:15 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPC(i) (0x788+(i)*32) /* RW-4A */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPC__SIZE_1 32 /* */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_PRECISION 4:0 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT 15:15 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_PRECISION 20:16 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP 21:21 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD(i) (0x78c+(i)*32) /* RW-4A */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD__SIZE_1 32 /* */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGSZ 3:0 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGNR 6:4 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD 8:8 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT 9:9 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_SF_PRECISION 16:12 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_CI_PRECISION 20:17 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB 21:21 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB_TRUE 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB_FALSE 0x00000000 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA 22:22 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA_TRUE 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA_FALSE 0x00000000 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR 28:28 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR 30:30 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPE(i) (0x790+(i)*32) /* RW-4A */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPE__SIZE_1 32 /* */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_PRECISION 4:0 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT 15:15 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_PRECISION 20:16 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP 21:21 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPF(i) (0x794+(i)*32) /* RW-4A */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPF__SIZE_1 32 /* */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_2TAP 15:0 /* RWIVF */ +#define NVC973_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_5TAP 31:16 /* RWIVF */ + +#ifdef __cplusplus +}; +#endif /* extern C */ +#endif //_clc973_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc97a.h b/src/common/sdk/nvidia/inc/class/clc97a.h new file mode 100644 index 0000000..fd26c95 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc97a.h @@ -0,0 +1,168 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc97a__h_ +#define _clc97a__h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC97A_CURSOR_IMM_CHANNEL_PIO (0x0000C97A) + +#define NVC97A_FREE (0x00000008) +#define NVC97A_FREE_COUNT 5:0 +#define NVC97A_UPDATE (0x00000200) +#define NVC97A_SET_INTERLOCK_FLAGS (0x00000204) +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC97A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC97A_SET_CURSOR_HOT_SPOT_POINT_OUT(b) (0x00000208 + (b)*0x00000004) +#define NVC97A_SET_CURSOR_HOT_SPOT_POINT_OUT_X 15:0 +#define NVC97A_SET_CURSOR_HOT_SPOT_POINT_OUT_Y 31:16 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS (0x00000210) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC97A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc97a_h diff --git a/src/common/sdk/nvidia/inc/class/clc97b.h b/src/common/sdk/nvidia/inc/class/clc97b.h new file mode 100644 index 0000000..40c1e10 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc97b.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc97b_h_ +#define _clc97b_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC97B_WINDOW_IMM_CHANNEL_DMA (0x0000C97B) + +// dma opcode instructions +#define NVC97B_DMA +#define NVC97B_DMA_OPCODE 31:29 +#define NVC97B_DMA_OPCODE_METHOD 0x00000000 +#define NVC97B_DMA_OPCODE_JUMP 0x00000001 +#define NVC97B_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC97B_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC97B_DMA_METHOD_COUNT 27:18 +#define NVC97B_DMA_METHOD_OFFSET 15:2 +#define NVC97B_DMA_DATA 31:0 +#define NVC97B_DMA_DATA_NOP 0x00000000 +#define NVC97B_DMA_JUMP_OFFSET 15:2 +#define NVC97B_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NVC97B_PUT (0x00000000) +#define NVC97B_PUT_PTR 9:0 +#define NVC97B_GET (0x00000004) +#define NVC97B_GET_PTR 9:0 +#define NVC97B_UPDATE (0x00000200) +#define NVC97B_UPDATE_INTERLOCK_WITH_WINDOW 1:1 +#define NVC97B_UPDATE_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC97B_UPDATE_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC97B_SET_POINT_OUT(b) (0x00000208 + (b)*0x00000004) +#define NVC97B_SET_POINT_OUT_X 15:0 +#define NVC97B_SET_POINT_OUT_Y 31:16 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc97b_h diff --git a/src/common/sdk/nvidia/inc/class/clc97d.h b/src/common/sdk/nvidia/inc/class/clc97d.h new file mode 100644 index 0000000..69254b4 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc97d.h @@ -0,0 +1,1377 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc97d_h_ +#define _clc97d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC97D_CORE_CHANNEL_DMA (0x0000C97D) + +#define NV_DISP_NOTIFIER 0x00000000 +#define NV_DISP_NOTIFIER_SIZEOF 0x00000010 +#define NV_DISP_NOTIFIER__0 0x00000000 +#define NV_DISP_NOTIFIER__0_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFIER__0_FIELD 8:8 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE 9:9 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_NON_TEARING 0x00000000 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_IMMEDIATE 0x00000001 +#define NV_DISP_NOTIFIER__0_R1 15:10 +#define NV_DISP_NOTIFIER__0_R2 23:16 +#define NV_DISP_NOTIFIER__0_R3 29:24 +#define NV_DISP_NOTIFIER__0_STATUS 31:30 +#define NV_DISP_NOTIFIER__0_STATUS_NOT_BEGUN 0x00000000 +#define NV_DISP_NOTIFIER__0_STATUS_BEGUN 0x00000001 +#define NV_DISP_NOTIFIER__0_STATUS_FINISHED 0x00000002 +#define NV_DISP_NOTIFIER__1 0x00000001 +#define NV_DISP_NOTIFIER__1_R4 31:0 +#define NV_DISP_NOTIFIER__2 0x00000002 +#define NV_DISP_NOTIFIER__2_TIMESTAMP_LO 31:0 +#define NV_DISP_NOTIFIER__3 0x00000003 +#define NV_DISP_NOTIFIER__3_TIMESTAMP_HI 31:0 + + +// dma opcode instructions +#define NVC97D_DMA +#define NVC97D_DMA_OPCODE 31:29 +#define NVC97D_DMA_OPCODE_METHOD 0x00000000 +#define NVC97D_DMA_OPCODE_JUMP 0x00000001 +#define NVC97D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC97D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC97D_DMA_METHOD_COUNT 27:18 +#define NVC97D_DMA_METHOD_OFFSET 15:2 +#define NVC97D_DMA_DATA 31:0 +#define NVC97D_DMA_DATA_NOP 0x00000000 +#define NVC97D_DMA_JUMP_OFFSET 15:2 +#define NVC97D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// if cap SUPPORT_FLEXIBLE_WIN_MAPPING is FALSE, this define can be used to obtain which head a window is mapped to +#define NVC37D_WINDOW_MAPPED_TO_HEAD(w) ((w)>>1) +#define NVC37D_GET_VALID_WINDOWMASK_FOR_HEAD(h) ((1<<((h)*2)) | (1<<((h)*2+1))) + + +// class methods +#define NVC97D_PUT (0x00000000) +#define NVC97D_PUT_PTR 9:0 +#define NVC97D_GET (0x00000004) +#define NVC97D_GET_PTR 9:0 +#define NVC97D_UPDATE (0x00000200) +#define NVC97D_UPDATE_SPECIAL_HANDLING 21:20 +#define NVC97D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NVC97D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NVC97D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NVC97D_UPDATE_SPECIAL_HANDLING_REASON 19:12 +#define NVC97D_UPDATE_INHIBIT_INTERRUPTS 24:24 +#define NVC97D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NVC97D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NVC97D_UPDATE_RELEASE_ELV 0:0 +#define NVC97D_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC97D_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC97D_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC97D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC97D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC97D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC97D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC97D_SET_NOTIFIER_CONTROL (0x0000020C) +#define NVC97D_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVC97D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVC97D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC97D_SET_NOTIFIER_CONTROL_NOTIFY 12:12 +#define NVC97D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NVC97D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NVC97D_SET_CONTROL (0x00000210) +#define NVC97D_SET_CONTROL_FLIP_LOCK_PIN(i) ((i)+0):((i)+0) +#define NVC97D_SET_CONTROL_FLIP_LOCK_PIN__SIZE_1 4 +#define NVC97D_SET_CONTROL_FLIP_LOCK_PIN_DISABLE (0x00000000) +#define NVC97D_SET_CONTROL_FLIP_LOCK_PIN_ENABLE (0x00000001) +#define NVC97D_SET_CONTROL_FLIP_LOCK_PIN0 0:0 +#define NVC97D_SET_CONTROL_FLIP_LOCK_PIN0_DISABLE (0x00000000) +#define NVC97D_SET_CONTROL_FLIP_LOCK_PIN0_ENABLE (0x00000001) +#define NVC97D_SET_CONTROL_FLIP_LOCK_PIN1 1:1 +#define NVC97D_SET_CONTROL_FLIP_LOCK_PIN1_DISABLE (0x00000000) +#define NVC97D_SET_CONTROL_FLIP_LOCK_PIN1_ENABLE (0x00000001) +#define NVC97D_SET_CONTROL_FLIP_LOCK_PIN2 2:2 +#define NVC97D_SET_CONTROL_FLIP_LOCK_PIN2_DISABLE (0x00000000) +#define NVC97D_SET_CONTROL_FLIP_LOCK_PIN2_ENABLE (0x00000001) +#define NVC97D_SET_CONTROL_FLIP_LOCK_PIN3 3:3 +#define NVC97D_SET_CONTROL_FLIP_LOCK_PIN3_DISABLE (0x00000000) +#define NVC97D_SET_CONTROL_FLIP_LOCK_PIN3_ENABLE (0x00000001) +#define NVC97D_SET_INTERLOCK_FLAGS (0x00000218) +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+0):((i)+0) +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC97D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS (0x0000021C) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC97D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) +#define NVC97D_GET_RG_SCAN_LINE(b) (0x00000220 + (b)*0x00000004) +#define NVC97D_GET_RG_SCAN_LINE_LINE 15:0 +#define NVC97D_GET_RG_SCAN_LINE_VBLANK 16:16 +#define NVC97D_GET_RG_SCAN_LINE_VBLANK_FALSE (0x00000000) +#define NVC97D_GET_RG_SCAN_LINE_VBLANK_TRUE (0x00000001) +#define NVC97D_SET_GET_BLANKING_CTRL(b) (0x00000240 + (b)*0x00000004) +#define NVC97D_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NVC97D_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NVC97D_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NVC97D_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NVC97D_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NVC97D_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) +#define NVC97D_SET_SURFACE_ADDRESS_HI_NOTIFIER (0x00000260) +#define NVC97D_SET_SURFACE_ADDRESS_HI_NOTIFIER_ADDRESS_HI 31:0 +#define NVC97D_SET_SURFACE_ADDRESS_LO_NOTIFIER (0x00000264) +#define NVC97D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ADDRESS_LO 31:4 +#define NVC97D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET 3:2 +#define NVC97D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_IOVA (0x00000000) +#define NVC97D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_NVM (0x00000001) +#define NVC97D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI (0x00000002) +#define NVC97D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVC97D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE 0:0 +#define NVC97D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_DISABLE (0x00000000) +#define NVC97D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_ENABLE (0x00000001) + +#define NVC97D_SOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NVC97D_SOR_SET_CONTROL_OWNER_MASK 7:0 +#define NVC97D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVC97D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVC97D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVC97D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVC97D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVC97D_SOR_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVC97D_SOR_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVC97D_SOR_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVC97D_SOR_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVC97D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NVC97D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NVC97D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NVC97D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NVC97D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NVC97D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NVC97D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NVC97D_SOR_SET_CONTROL_PROTOCOL_HDMI_FRL (0x0000000C) +#define NVC97D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NVC97D_SOR_SET_CONTROL_DE_SYNC_POLARITY 16:16 +#define NVC97D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC97D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC97D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NVC97D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NVC97D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NVC97D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NVC97D_SOR_SET_CUSTOM_REASON(a) (0x00000304 + (a)*0x00000020) +#define NVC97D_SOR_SET_CUSTOM_REASON_CODE 31:0 +#define NVC97D_SOR_SET_SW_SPARE_A(a) (0x00000308 + (a)*0x00000020) +#define NVC97D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NVC97D_SOR_SET_SW_SPARE_B(a) (0x0000030C + (a)*0x00000020) +#define NVC97D_SOR_SET_SW_SPARE_B_CODE 31:0 + +#define NVC97D_DSI_SET_CONTROL(a) (0x00000500 + (a)*0x00000020) +#define NVC97D_DSI_SET_CONTROL_OWNER_MASK 7:0 +#define NVC97D_DSI_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVC97D_DSI_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVC97D_DSI_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVC97D_DSI_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVC97D_DSI_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVC97D_DSI_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVC97D_DSI_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVC97D_DSI_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVC97D_DSI_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVC97D_DSI_SET_CUSTOM_REASON(a) (0x00000504 + (a)*0x00000020) +#define NVC97D_DSI_SET_CUSTOM_REASON_CODE 31:0 +#define NVC97D_DSI_SET_SW_SPARE_A(a) (0x00000508 + (a)*0x00000020) +#define NVC97D_DSI_SET_SW_SPARE_A_CODE 31:0 +#define NVC97D_DSI_SET_SW_SPARE_B(a) (0x0000050C + (a)*0x00000020) +#define NVC97D_DSI_SET_SW_SPARE_B_CODE 31:0 + +#define NVC97D_WINDOW_SET_CONTROL(a) (0x00001000 + (a)*0x00000080) +#define NVC97D_WINDOW_SET_CONTROL_OWNER 3:0 +#define NVC97D_WINDOW_SET_CONTROL_OWNER_HEAD(i) (0x00000000 +(i)) +#define NVC97D_WINDOW_SET_CONTROL_OWNER_HEAD__SIZE_1 8 +#define NVC97D_WINDOW_SET_CONTROL_OWNER_HEAD0 (0x00000000) +#define NVC97D_WINDOW_SET_CONTROL_OWNER_HEAD1 (0x00000001) +#define NVC97D_WINDOW_SET_CONTROL_OWNER_HEAD2 (0x00000002) +#define NVC97D_WINDOW_SET_CONTROL_OWNER_HEAD3 (0x00000003) +#define NVC97D_WINDOW_SET_CONTROL_OWNER_HEAD4 (0x00000004) +#define NVC97D_WINDOW_SET_CONTROL_OWNER_HEAD5 (0x00000005) +#define NVC97D_WINDOW_SET_CONTROL_OWNER_HEAD6 (0x00000006) +#define NVC97D_WINDOW_SET_CONTROL_OWNER_HEAD7 (0x00000007) +#define NVC97D_WINDOW_SET_CONTROL_OWNER_NONE (0x0000000F) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(a) (0x00001004 + (a)*0x00000080) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(a) (0x00001008 + (a)*0x00000080) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR(a) (0x0000100C + (a)*0x00000080) +#define NVC97D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC97D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC97D_WINDOW_SET_WINDOW_USAGE_BOUNDS(a) (0x00001010 + (a)*0x00000080) +#define NVC97D_WINDOW_SET_WINDOW_USAGE_BOUNDS_MAX_PIXELS_FETCHED_PER_LINE 14:0 +#define NVC97D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED 16:16 +#define NVC97D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED 28:28 +#define NVC97D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS 22:20 +#define NVC97D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVC97D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED 24:24 +#define NVC97D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) +#define NVC97D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED 30:30 +#define NVC97D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000) +#define NVC97D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001) + +#define NVC97D_HEAD_SET_PROCAMP(a) (0x00002000 + (a)*0x00000800) +#define NVC97D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NVC97D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NVC97D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NVC97D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NVC97D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003) +#define NVC97D_HEAD_SET_PROCAMP_CHROMA_LPF 3:3 +#define NVC97D_HEAD_SET_PROCAMP_CHROMA_LPF_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_PROCAMP_CHROMA_LPF_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_PROCAMP_CHROMA_DOWN_V 4:4 +#define NVC97D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 28:28 +#define NVC97D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NVC97D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00002004 + (a)*0x00000800) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 2:2 +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 3:3 +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 7:4 +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000000) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000001) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000002) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000003) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000004) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000005) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000006) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000007) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000008) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_444 (0x00000009) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444NP (0x0000000A) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 24:24 +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 23:12 +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN 31:26 +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN0 (0x00000000) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN1 (0x00000001) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN2 (0x00000002) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN3 (0x00000003) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN4 (0x00000004) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN5 (0x00000005) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN6 (0x00000006) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN7 (0x00000007) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN8 (0x00000008) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN9 (0x00000009) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN10 (0x0000000A) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN11 (0x0000000B) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN12 (0x0000000C) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN13 (0x0000000D) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN14 (0x0000000E) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN15 (0x0000000F) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN16 (0x00000010) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN17 (0x00000011) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN18 (0x00000012) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN19 (0x00000013) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN20 (0x00000014) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN21 (0x00000015) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN22 (0x00000016) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN23 (0x00000017) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN24 (0x00000018) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN25 (0x00000019) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN26 (0x0000001A) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN27 (0x0000001B) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN28 (0x0000001C) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN29 (0x0000001D) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN30 (0x0000001E) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN31 (0x0000001F) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_NONE (0x0000003F) +#define NVC97D_HEAD_SET_CONTROL(a) (0x00002008 + (a)*0x00000800) +#define NVC97D_HEAD_SET_CONTROL_STRUCTURE 1:0 +#define NVC97D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NVC97D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE 2:2 +#define NVC97D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_NORMAL (0x00000000) +#define NVC97D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000001) +#define NVC97D_HEAD_SET_CONTROL_YUV420PACKER 3:3 +#define NVC97D_HEAD_SET_CONTROL_YUV420PACKER_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_CONTROL_YUV420PACKER_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 11:10 +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 8:4 +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 15:12 +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 23:22 +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 20:16 +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN 28:24 +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000001) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000002) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000003) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000004) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000005) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000006) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000007) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000008) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000009) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000B) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000C) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000D) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000E) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000F) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x00000010) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC97D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NVC97D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NVC97D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x0000200C + (a)*0x00000800) +#define NVC97D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NVC97D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NVC97D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC97D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00002014 + (a)*0x00000800) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVC97D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVC97D_HEAD_SET_DITHER_CONTROL(a) (0x00002018 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NVC97D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_DITHER_CONTROL_BITS 5:4 +#define NVC97D_HEAD_SET_DITHER_CONTROL_BITS_TO_6_BITS (0x00000000) +#define NVC97D_HEAD_SET_DITHER_CONTROL_BITS_TO_8_BITS (0x00000001) +#define NVC97D_HEAD_SET_DITHER_CONTROL_BITS_TO_10_BITS (0x00000002) +#define NVC97D_HEAD_SET_DITHER_CONTROL_BITS_TO_12_BITS (0x00000003) +#define NVC97D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE 2:2 +#define NVC97D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_DITHER_CONTROL_MODE 10:8 +#define NVC97D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NVC97D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NVC97D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NVC97D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NVC97D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NVC97D_HEAD_SET_DITHER_CONTROL_MODE_ROUND (0x00000005) +#define NVC97D_HEAD_SET_DITHER_CONTROL_PHASE 13:12 +#define NVC97D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x0000201C + (a)*0x00000800) +#define NVC97D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 0:0 +#define NVC97D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NVC97D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NVC97D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING 4:4 +#define NVC97D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 9:8 +#define NVC97D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NVC97D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NVC97D_HEAD_SET_DISPLAY_ID(a,b) (0x00002020 + (a)*0x00000800 + (b)*0x00000004) +#define NVC97D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NVC97D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00002028 + (a)*0x00000800) +#define NVC97D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NVC97D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NVC97D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC97D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC97D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR(a) (0x0000202C + (a)*0x00000800) +#define NVC97D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC97D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC97D_HEAD_SET_HEAD_USAGE_BOUNDS(a) (0x00002030 + (a)*0x00000800) +#define NVC97D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR 2:0 +#define NVC97D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_NONE (0x00000000) +#define NVC97D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W32_H32 (0x00000001) +#define NVC97D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W64_H64 (0x00000002) +#define NVC97D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W128_H128 (0x00000003) +#define NVC97D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W256_H256 (0x00000004) +#define NVC97D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED 4:4 +#define NVC97D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_FALSE (0x00000000) +#define NVC97D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_TRUE (0x00000001) +#define NVC97D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS 14:12 +#define NVC97D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVC97D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVC97D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED 8:8 +#define NVC97D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC97D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) +#define NVC97D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED 16:16 +#define NVC97D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000) +#define NVC97D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001) +#define NVC97D_HEAD_SET_STALL_LOCK(a) (0x00002034 + (a)*0x00000800) +#define NVC97D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NVC97D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NVC97D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NVC97D_HEAD_SET_STALL_LOCK_MODE 2:2 +#define NVC97D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NVC97D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN 8:4 +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC97D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 12:12 +#define NVC97D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NVC97D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NVC97D_HEAD_SET_STALL_LOCK_TEPOLARITY 14:14 +#define NVC97D_HEAD_SET_STALL_LOCK_TEPOLARITY_POSITIVE_TRUE (0x00000000) +#define NVC97D_HEAD_SET_STALL_LOCK_TEPOLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC97D_HEAD_SET_STALL_LOCK_UNSTALL_SYNC_ADVANCE 25:16 +#define NVC97D_HEAD_SET_LOCK_CHAIN(a) (0x00002044 + (a)*0x00000800) +#define NVC97D_HEAD_SET_LOCK_CHAIN_POSITION 3:0 +#define NVC97D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x00002048 + (a)*0x00000800) +#define NVC97D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NVC97D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NVC97D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x0000204C + (a)*0x00000800) +#define NVC97D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NVC97D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NVC97D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x00002058 + (a)*0x00000800) +#define NVC97D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NVC97D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NVC97D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x0000205C + (a)*0x00000800) +#define NVC97D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NVC97D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NVC97D_HEAD_SET_RASTER_SIZE(a) (0x00002064 + (a)*0x00000800) +#define NVC97D_HEAD_SET_RASTER_SIZE_WIDTH 15:0 +#define NVC97D_HEAD_SET_RASTER_SIZE_HEIGHT 31:16 +#define NVC97D_HEAD_SET_RASTER_SYNC_END(a) (0x00002068 + (a)*0x00000800) +#define NVC97D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NVC97D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NVC97D_HEAD_SET_RASTER_BLANK_END(a) (0x0000206C + (a)*0x00000800) +#define NVC97D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NVC97D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NVC97D_HEAD_SET_RASTER_BLANK_START(a) (0x00002070 + (a)*0x00000800) +#define NVC97D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NVC97D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NVC97D_HEAD_SET_OVERSCAN_COLOR(a) (0x00002078 + (a)*0x00000800) +#define NVC97D_HEAD_SET_OVERSCAN_COLOR_RED_CR 9:0 +#define NVC97D_HEAD_SET_OVERSCAN_COLOR_GREEN_Y 19:10 +#define NVC97D_HEAD_SET_OVERSCAN_COLOR_BLUE_CB 29:20 +#define NVC97D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR(a) (0x0000207C + (a)*0x00000800) +#define NVC97D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_RED_CR 9:0 +#define NVC97D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_GREEN_Y 19:10 +#define NVC97D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_BLUE_CB 29:20 +#define NVC97D_HEAD_SET_HDMI_CTRL(a) (0x00002080 + (a)*0x00000800) +#define NVC97D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NVC97D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NVC97D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NVC97D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NVC97D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NVC97D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x00002098 + (a)*0x00000800) +#define NVC97D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 0:0 +#define NVC97D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NVC97D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NVC97D_HEAD_SET_CONTROL_CURSOR(a) (0x0000209C + (a)*0x00000800) +#define NVC97D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NVC97D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_CONTROL_CURSOR_FORMAT 7:0 +#define NVC97D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x000000E9) +#define NVC97D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x000000CF) +#define NVC97D_HEAD_SET_CONTROL_CURSOR_SIZE 9:8 +#define NVC97D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NVC97D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NVC97D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NVC97D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NVC97D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 19:12 +#define NVC97D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 27:20 +#define NVC97D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(a) (0x000020A0 + (a)*0x00000800) +#define NVC97D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_K1 7:0 +#define NVC97D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT 11:8 +#define NVC97D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC97D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC97D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT 15:12 +#define NVC97D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_ZERO (0x00000000) +#define NVC97D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC97D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC97D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE 16:16 +#define NVC97D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_BLEND (0x00000000) +#define NVC97D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_XOR (0x00000001) +#define NVC97D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS 20:20 +#define NVC97D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HI(a) (0x000020C0 + (a)*0x00000800) +#define NVC97D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HI_HERTZ 3:0 +#define NVC97D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HI_MAX(a) (0x000020C4 + (a)*0x00000800) +#define NVC97D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HI_MAX_HERTZ 3:0 +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_HI_RG_REL_SEMAPHORE(a,b) (0x00002110 + (a)*0x00000800 + (b)*0x00000004) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_HI_RG_REL_SEMAPHORE_ADDRESS_HI 31:0 +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE(a,b) (0x00002130 + (a)*0x00000800 + (b)*0x00000004) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE_ADDRESS_LO 31:4 +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE_TARGET 3:2 +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE_TARGET_IOVA (0x00000000) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE_TARGET_PHYSICAL_NVM (0x00000001) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE_TARGET_PHYSICAL_PCI (0x00000002) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE_ENABLE 0:0 +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE_ENABLE_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE_ENABLE_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_HI_CRC(a) (0x00002150 + (a)*0x00000800) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_HI_CRC_ADDRESS_HI 31:0 +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CRC(a) (0x00002154 + (a)*0x00000800) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ADDRESS_LO 31:4 +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET 3:2 +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_IOVA (0x00000000) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_PHYSICAL_NVM (0x00000001) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_PHYSICAL_PCI (0x00000002) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ENABLE 0:0 +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ENABLE_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ENABLE_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_HI_OLUT(a) (0x00002158 + (a)*0x00000800) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_HI_OLUT_ADDRESS_HI 31:0 +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT(a) (0x0000215C + (a)*0x00000800) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ADDRESS_LO 31:4 +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET 3:2 +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_IOVA (0x00000000) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_PHYSICAL_NVM (0x00000001) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_PHYSICAL_PCI (0x00000002) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ENABLE 0:0 +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ENABLE_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ENABLE_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_HI_CURSOR(a,b) (0x00002170 + (a)*0x00000800 + (b)*0x00000004) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_HI_CURSOR_ADDRESS_HI 31:0 +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR(a,b) (0x00002178 + (a)*0x00000800 + (b)*0x00000004) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ADDRESS_LO 31:4 +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET 3:2 +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_IOVA (0x00000000) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_PHYSICAL_NVM (0x00000001) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_PHYSICAL_PCI (0x00000002) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_KIND 1:1 +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_KIND_PITCH (0x00000000) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_KIND_BLOCKLINEAR (0x00000001) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ENABLE 0:0 +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ENABLE_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ENABLE_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_CRC_CONTROL(a) (0x00002184 + (a)*0x00000800) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 5:0 +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_0 (0x00000000) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_1 (0x00000001) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_2 (0x00000002) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_3 (0x00000003) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_4 (0x00000004) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_5 (0x00000005) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_6 (0x00000006) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_7 (0x00000007) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_8 (0x00000008) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_9 (0x00000009) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_10 (0x0000000A) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_11 (0x0000000B) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_12 (0x0000000C) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_13 (0x0000000D) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_14 (0x0000000E) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_15 (0x0000000F) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_16 (0x00000010) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_17 (0x00000011) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_18 (0x00000012) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_19 (0x00000013) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_20 (0x00000014) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_21 (0x00000015) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_22 (0x00000016) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_23 (0x00000017) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_24 (0x00000018) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_25 (0x00000019) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_26 (0x0000001A) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_27 (0x0000001B) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_28 (0x0000001C) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_29 (0x0000001D) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_30 (0x0000001E) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_31 (0x0000001F) +#define NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000020) +#define NVC97D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 8:8 +#define NVC97D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NVC97D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NVC97D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC 19:12 +#define NVC97D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_NONE (0x00000000) +#define NVC97D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF (0x00000030) +#define NVC97D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC97D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR__SIZE_1 8 +#define NVC97D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR0 (0x00000050) +#define NVC97D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR1 (0x00000051) +#define NVC97D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR2 (0x00000052) +#define NVC97D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR3 (0x00000053) +#define NVC97D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR4 (0x00000054) +#define NVC97D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR5 (0x00000055) +#define NVC97D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR6 (0x00000056) +#define NVC97D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR7 (0x00000057) +#define NVC97D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC 27:20 +#define NVC97D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_NONE (0x00000000) +#define NVC97D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SF (0x00000030) +#define NVC97D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC97D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR__SIZE_1 8 +#define NVC97D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR0 (0x00000050) +#define NVC97D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR1 (0x00000051) +#define NVC97D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR2 (0x00000052) +#define NVC97D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR3 (0x00000053) +#define NVC97D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR4 (0x00000054) +#define NVC97D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR5 (0x00000055) +#define NVC97D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR6 (0x00000056) +#define NVC97D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR7 (0x00000057) +#define NVC97D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 9:9 +#define NVC97D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_PRESENT_CONTROL(a) (0x0000218C + (a)*0x00000800) +#define NVC97D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 0:0 +#define NVC97D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NVC97D_HEAD_SET_SW_SPARE_A(a) (0x00002194 + (a)*0x00000800) +#define NVC97D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NVC97D_HEAD_SET_SW_SPARE_B(a) (0x00002198 + (a)*0x00000800) +#define NVC97D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NVC97D_HEAD_SET_SW_SPARE_C(a) (0x0000219C + (a)*0x00000800) +#define NVC97D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NVC97D_HEAD_SET_SW_SPARE_D(a) (0x000021A0 + (a)*0x00000800) +#define NVC97D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NVC97D_HEAD_SET_DISPLAY_RATE(a) (0x000021A8 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DISPLAY_RATE_RUN_MODE 1:0 +#define NVC97D_HEAD_SET_DISPLAY_RATE_RUN_MODE_CONTINUOUS (0x00000000) +#define NVC97D_HEAD_SET_DISPLAY_RATE_RUN_MODE_ONE_SHOT (0x00000001) +#define NVC97D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_INTERVAL 25:4 +#define NVC97D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH 2:2 +#define NVC97D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL(a,b) (0x000021CC + (a)*0x00000800 + (b)*0x00000004) +#define NVC97D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_PAYLOAD_SIZE 15:15 +#define NVC97D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_32BIT (0x00000000) +#define NVC97D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_64BIT (0x00000001) +#define NVC97D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_REL_MODE 14:14 +#define NVC97D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_REL_MODE_WRITE (0x00000000) +#define NVC97D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_REL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC97D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RUN_MODE 10:10 +#define NVC97D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RUN_MODE_ONE_TIME (0x00000000) +#define NVC97D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RUN_MODE_CONTINUOUS (0x00000001) +#define NVC97D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RASTER_LINE 31:16 +#define NVC97D_HEAD_SET_RG_REL_SEMAPHORE_VALUE(a,b) (0x000021EC + (a)*0x00000800 + (b)*0x00000004) +#define NVC97D_HEAD_SET_RG_REL_SEMAPHORE_VALUE_VALUE 31:0 +#define NVC97D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE(a) (0x00002214 + (a)*0x00000800) +#define NVC97D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE_DATA 9:0 +#define NVC97D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE_INDEX 19:12 +#define NVC97D_HEAD_SET_MIN_FRAME_IDLE(a) (0x00002218 + (a)*0x00000800) +#define NVC97D_HEAD_SET_MIN_FRAME_IDLE_LEADING_RASTER_LINES 14:0 +#define NVC97D_HEAD_SET_MIN_FRAME_IDLE_TRAILING_RASTER_LINES 30:16 +#define NVC97D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED(a) (0x00002220 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED_ALPHA 7:0 +#define NVC97D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED_RED 31:16 +#define NVC97D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE(a) (0x00002224 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE_GREEN 15:0 +#define NVC97D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE_BLUE 31:16 +#define NVC97D_HEAD_SET_CURSOR_COLOR_NORM_SCALE(a) (0x00002228 + (a)*0x00000800) +#define NVC97D_HEAD_SET_CURSOR_COLOR_NORM_SCALE_VALUE 15:0 +#define NVC97D_HEAD_SET_XOR_BLEND_FACTOR(a) (0x0000222C + (a)*0x00000800) +#define NVC97D_HEAD_SET_XOR_BLEND_FACTOR_LOG2PEAK_LUMINANCE 3:0 +#define NVC97D_HEAD_SET_XOR_BLEND_FACTOR_S1 16:4 +#define NVC97D_HEAD_SET_XOR_BLEND_FACTOR_S2 30:18 +#define NVC97D_HEAD_SET_CLAMP_RANGE_GREEN(a) (0x00002238 + (a)*0x00000800) +#define NVC97D_HEAD_SET_CLAMP_RANGE_GREEN_LOW 11:0 +#define NVC97D_HEAD_SET_CLAMP_RANGE_GREEN_HIGH 27:16 +#define NVC97D_HEAD_SET_CLAMP_RANGE_RED_BLUE(a) (0x0000223C + (a)*0x00000800) +#define NVC97D_HEAD_SET_CLAMP_RANGE_RED_BLUE_LOW 11:0 +#define NVC97D_HEAD_SET_CLAMP_RANGE_RED_BLUE_HIGH 27:16 +#define NVC97D_HEAD_SET_OCSC0CONTROL(a) (0x00002240 + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC0CONTROL_ENABLE 0:0 +#define NVC97D_HEAD_SET_OCSC0CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_OCSC0CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C00(a) (0x00002244 + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C00_VALUE 20:0 +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C01(a) (0x00002248 + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C01_VALUE 20:0 +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C02(a) (0x0000224C + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C02_VALUE 20:0 +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C03(a) (0x00002250 + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C03_VALUE 20:0 +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C10(a) (0x00002254 + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C10_VALUE 20:0 +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C11(a) (0x00002258 + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C11_VALUE 20:0 +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C12(a) (0x0000225C + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C12_VALUE 20:0 +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C13(a) (0x00002260 + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C13_VALUE 20:0 +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C20(a) (0x00002264 + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C20_VALUE 20:0 +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C21(a) (0x00002268 + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C21_VALUE 20:0 +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C22(a) (0x0000226C + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C22_VALUE 20:0 +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C23(a) (0x00002270 + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC0COEFFICIENT_C23_VALUE 20:0 +#define NVC97D_HEAD_SET_OLUT_CONTROL(a) (0x00002280 + (a)*0x00000800) +#define NVC97D_HEAD_SET_OLUT_CONTROL_INTERPOLATE 0:0 +#define NVC97D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_OLUT_CONTROL_MIRROR 1:1 +#define NVC97D_HEAD_SET_OLUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_OLUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_OLUT_CONTROL_MODE 3:2 +#define NVC97D_HEAD_SET_OLUT_CONTROL_MODE_SEGMENTED (0x00000000) +#define NVC97D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT8 (0x00000001) +#define NVC97D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT10 (0x00000002) +#define NVC97D_HEAD_SET_OLUT_CONTROL_SIZE 18:8 +#define NVC97D_HEAD_SET_OLUT_FP_NORM_SCALE(a) (0x00002284 + (a)*0x00000800) +#define NVC97D_HEAD_SET_OLUT_FP_NORM_SCALE_VALUE 31:0 +#define NVC97D_HEAD_SET_OCSC1CONTROL(a) (0x0000229C + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC1CONTROL_ENABLE 0:0 +#define NVC97D_HEAD_SET_OCSC1CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_OCSC1CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C00(a) (0x000022A0 + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C00_VALUE 20:0 +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C01(a) (0x000022A4 + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C01_VALUE 20:0 +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C02(a) (0x000022A8 + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C02_VALUE 20:0 +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C03(a) (0x000022AC + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C03_VALUE 20:0 +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C10(a) (0x000022B0 + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C10_VALUE 20:0 +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C11(a) (0x000022B4 + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C11_VALUE 20:0 +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C12(a) (0x000022B8 + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C12_VALUE 20:0 +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C13(a) (0x000022BC + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C13_VALUE 20:0 +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C20(a) (0x000022C0 + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C20_VALUE 20:0 +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C21(a) (0x000022C4 + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C21_VALUE 20:0 +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C22(a) (0x000022C8 + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C22_VALUE 20:0 +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C23(a) (0x000022CC + (a)*0x00000800) +#define NVC97D_HEAD_SET_OCSC1COEFFICIENT_C23_VALUE 20:0 +#define NVC97D_HEAD_SET_HEAD_POSITION(a) (0x000022D0 + (a)*0x00000800) +#define NVC97D_HEAD_SET_HEAD_POSITION_X 2:0 +#define NVC97D_HEAD_SET_HEAD_POSITION_Y 6:4 +#define NVC97D_HEAD_SET_DSC_CONTROL(a) (0x000022D4 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_CONTROL_ENABLE 0:0 +#define NVC97D_HEAD_SET_DSC_CONTROL_ENABLE_FALSE (0x00000000) +#define NVC97D_HEAD_SET_DSC_CONTROL_ENABLE_TRUE (0x00000001) +#define NVC97D_HEAD_SET_DSC_CONTROL_AUTO_RESET 3:3 +#define NVC97D_HEAD_SET_DSC_CONTROL_AUTO_RESET_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_DSC_CONTROL_AUTO_RESET_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION 4:4 +#define NVC97D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION_DISABLE (0x00000000) +#define NVC97D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION_ENABLE (0x00000001) +#define NVC97D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET 5:5 +#define NVC97D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET_FALSE (0x00000000) +#define NVC97D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET_TRUE (0x00000001) +#define NVC97D_HEAD_SET_DSC_CONTROL_FLATNESS_DET_THRESH 15:6 +#define NVC97D_HEAD_SET_DSC_PPS_CONTROL(a) (0x000022D8 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_CONTROL_ENABLE 0:0 +#define NVC97D_HEAD_SET_DSC_PPS_CONTROL_ENABLE_FALSE (0x00000000) +#define NVC97D_HEAD_SET_DSC_PPS_CONTROL_ENABLE_TRUE (0x00000001) +#define NVC97D_HEAD_SET_DSC_PPS_CONTROL_LOCATION 2:1 +#define NVC97D_HEAD_SET_DSC_PPS_CONTROL_LOCATION_VBLANK (0x00000000) +#define NVC97D_HEAD_SET_DSC_PPS_CONTROL_LOCATION_VSYNC (0x00000001) +#define NVC97D_HEAD_SET_DSC_PPS_CONTROL_SIZE 10:3 +#define NVC97D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY 11:11 +#define NVC97D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY_EVERY_FRAME (0x00000000) +#define NVC97D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY_ONCE (0x00000001) +#define NVC97D_HEAD_SET_DSC_PPS_HEAD(a) (0x000022DC + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_HEAD_BYTE0 7:0 +#define NVC97D_HEAD_SET_DSC_PPS_HEAD_BYTE1 15:8 +#define NVC97D_HEAD_SET_DSC_PPS_HEAD_BYTE2 23:16 +#define NVC97D_HEAD_SET_DSC_PPS_HEAD_BYTE3 31:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA0(a) (0x000022E0 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA0_DSC_VERSION_MINOR 3:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA0_DSC_VERSION_MAJOR 7:4 +#define NVC97D_HEAD_SET_DSC_PPS_DATA0_PPS_IDENTIFIER 15:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA0_RESERVED 23:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA0_LINEBUF_DEPTH 27:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA0_BITS_PER_COMPONENT 31:28 +#define NVC97D_HEAD_SET_DSC_PPS_DATA1(a) (0x000022E4 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA1_BITS_PER_PIXEL_HIGH 1:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA1_VBR_ENABLE 2:2 +#define NVC97D_HEAD_SET_DSC_PPS_DATA1_SIMPLE422 3:3 +#define NVC97D_HEAD_SET_DSC_PPS_DATA1_CONVERT_RGB 4:4 +#define NVC97D_HEAD_SET_DSC_PPS_DATA1_BLOCK_PRED_ENABLE 5:5 +#define NVC97D_HEAD_SET_DSC_PPS_DATA1_RESERVED 7:6 +#define NVC97D_HEAD_SET_DSC_PPS_DATA1_BITS_PER_PIXEL_LOW 15:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA1_PIC_HEIGHT_HIGH 23:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA1_PIC_HEIGHT_LOW 31:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA2(a) (0x000022E8 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA2_PIC_WIDTH_HIGH 7:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA2_PIC_WIDTH_LOW 15:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA2_SLICE_HEIGHT_HIGH 23:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA2_SLICE_HEIGHT_LOW 31:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA3(a) (0x000022EC + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA3_SLICE_WIDTH_HIGH 7:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA3_SLICE_WIDTH_LOW 15:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA3_CHUNK_SIZE_HIGH 23:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA3_CHUNK_SIZE_LOW 31:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA4(a) (0x000022F0 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA4_INITIAL_XMIT_DELAY_HIGH 1:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA4_RESERVED 7:2 +#define NVC97D_HEAD_SET_DSC_PPS_DATA4_INITIAL_XMIT_DELAY_LOW 15:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA4_INITIAL_DEC_DELAY_HIGH 23:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA4_INITIAL_DEC_DELAY_LOW 31:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA5(a) (0x000022F4 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA5_RESERVED0 7:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA5_INITIAL_SCALE_VALUE 13:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA5_RESERVED1 15:14 +#define NVC97D_HEAD_SET_DSC_PPS_DATA5_SCALE_INCREMENT_INTERVAL_HIGH 23:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA5_SCALE_INCREMENT_INTERVAL_LOW 31:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA6(a) (0x000022F8 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA6_SCALE_DECREMENT_INTERVAL_HIGH 3:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA6_RESERVED0 7:4 +#define NVC97D_HEAD_SET_DSC_PPS_DATA6_SCALE_DECREMENT_INTERVAL_LOW 15:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA6_RESERVED1 23:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA6_FIRST_LINE_BPG_OFFSET 28:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA6_RESERVED2 31:29 +#define NVC97D_HEAD_SET_DSC_PPS_DATA7(a) (0x000022FC + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA7_NFL_BPG_OFFSET_HIGH 7:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA7_NFL_BPG_OFFSET_LOW 15:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA7_SLICE_BPG_OFFSET_HIGH 23:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA7_SLICE_BPG_OFFSET_LOW 31:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA8(a) (0x00002300 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA8_INITIAL_OFFSET_HIGH 7:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA8_INITIAL_OFFSET_LOW 15:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA8_FINAL_OFFSET_HIGH 23:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA8_FINAL_OFFSET_LOW 31:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA9(a) (0x00002304 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA9_FLATNESS_MIN_QP 4:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA9_RESERVED0 7:5 +#define NVC97D_HEAD_SET_DSC_PPS_DATA9_FLATNESS_MAX_QP 12:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA9_RESERVED1 15:13 +#define NVC97D_HEAD_SET_DSC_PPS_DATA9_RC_MODEL_SIZE_HIGH 23:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA9_RC_MODEL_SIZE_LOW 31:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA10(a) (0x00002308 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA10_RC_EDGE_FACTOR 3:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA10_RESERVED0 7:4 +#define NVC97D_HEAD_SET_DSC_PPS_DATA10_RC_QUANT_INCR_LIMIT0 12:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA10_RESERVED1 15:13 +#define NVC97D_HEAD_SET_DSC_PPS_DATA10_RC_QUANT_INCR_LIMIT1 20:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA10_RESERVED2 23:21 +#define NVC97D_HEAD_SET_DSC_PPS_DATA10_RC_TGT_OFFSET_LO 27:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA10_RC_TGT_OFFSET_HI 31:28 +#define NVC97D_HEAD_SET_DSC_PPS_DATA11(a) (0x0000230C + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH0 7:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH1 15:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH2 23:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH3 31:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA12(a) (0x00002310 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH4 7:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH5 15:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH6 23:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH7 31:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA13(a) (0x00002314 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH8 7:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH9 15:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH10 23:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH11 31:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA14(a) (0x00002318 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA14_RC_BUF_THRESH12 7:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA14_RC_BUF_THRESH13 15:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MAX_QP_HIGH0 18:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MIN_QP0 23:19 +#define NVC97D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_BPG_OFFSET0 29:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MAX_QP_LOW0 31:30 +#define NVC97D_HEAD_SET_DSC_PPS_DATA15(a) (0x0000231C + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_HIGH1 2:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MIN_QP1 7:3 +#define NVC97D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_BPG_OFFSET1 13:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_LOW1 15:14 +#define NVC97D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_HIGH2 18:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MIN_QP2 23:19 +#define NVC97D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_BPG_OFFSET2 29:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_LOW2 31:30 +#define NVC97D_HEAD_SET_DSC_PPS_DATA16(a) (0x00002320 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_HIGH3 2:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MIN_QP3 7:3 +#define NVC97D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_BPG_OFFSET3 13:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_LOW3 15:14 +#define NVC97D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_HIGH4 18:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MIN_QP4 23:19 +#define NVC97D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_BPG_OFFSET4 29:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_LOW4 31:30 +#define NVC97D_HEAD_SET_DSC_PPS_DATA17(a) (0x00002324 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_HIGH5 2:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MIN_QP5 7:3 +#define NVC97D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_BPG_OFFSET5 13:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_LOW5 15:14 +#define NVC97D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_HIGH6 18:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MIN_QP6 23:19 +#define NVC97D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_BPG_OFFSET6 29:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_LOW6 31:30 +#define NVC97D_HEAD_SET_DSC_PPS_DATA18(a) (0x00002328 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_HIGH7 2:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MIN_QP7 7:3 +#define NVC97D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_BPG_OFFSET7 13:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_LOW7 15:14 +#define NVC97D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_HIGH8 18:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MIN_QP8 23:19 +#define NVC97D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_BPG_OFFSET8 29:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_LOW8 31:30 +#define NVC97D_HEAD_SET_DSC_PPS_DATA19(a) (0x0000232C + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_HIGH9 2:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MIN_QP9 7:3 +#define NVC97D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_BPG_OFFSET9 13:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_LOW9 15:14 +#define NVC97D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_HIGH10 18:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MIN_QP10 23:19 +#define NVC97D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_BPG_OFFSET10 29:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_LOW10 31:30 +#define NVC97D_HEAD_SET_DSC_PPS_DATA20(a) (0x00002330 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_HIGH11 2:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MIN_QP11 7:3 +#define NVC97D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_BPG_OFFSET11 13:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_LOW11 15:14 +#define NVC97D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_HIGH12 18:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MIN_QP12 23:19 +#define NVC97D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_BPG_OFFSET12 29:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_LOW12 31:30 +#define NVC97D_HEAD_SET_DSC_PPS_DATA21(a) (0x00002334 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_HIGH13 2:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MIN_QP13 7:3 +#define NVC97D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_BPG_OFFSET13 13:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_LOW13 15:14 +#define NVC97D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_HIGH14 18:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MIN_QP14 23:19 +#define NVC97D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_BPG_OFFSET14 29:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_LOW14 31:30 +#define NVC97D_HEAD_SET_DSC_PPS_DATA22(a) (0x00002338 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA22_NATIVE422 0:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA22_NATIVE420 1:1 +#define NVC97D_HEAD_SET_DSC_PPS_DATA22_RESERVED0 7:2 +#define NVC97D_HEAD_SET_DSC_PPS_DATA22_SECOND_LINE_BPG_OFFSET 12:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA22_RESERVED1 15:13 +#define NVC97D_HEAD_SET_DSC_PPS_DATA22_NSL_BPG_OFFSET_HIGH 23:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA22_NSL_BPG_OFFSETLOW 31:24 +#define NVC97D_HEAD_SET_DSC_PPS_DATA23(a) (0x0000233C + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA23_SECOND_LINE_OFFSET_ADJ_HIGH 7:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA23_SECOND_LINE_OFFSET_ADJ_LOW 15:8 +#define NVC97D_HEAD_SET_DSC_PPS_DATA23_RESERVED 31:16 +#define NVC97D_HEAD_SET_DSC_PPS_DATA24(a) (0x00002340 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA24_RESERVED 31:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA25(a) (0x00002344 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA25_RESERVED 31:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA26(a) (0x00002348 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA26_RESERVED 31:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA27(a) (0x0000234C + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA27_RESERVED 31:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA28(a) (0x00002350 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA28_RESERVED 31:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA29(a) (0x00002354 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA29_RESERVED 31:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA30(a) (0x00002358 + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA30_RESERVED 31:0 +#define NVC97D_HEAD_SET_DSC_PPS_DATA31(a) (0x0000235C + (a)*0x00000800) +#define NVC97D_HEAD_SET_DSC_PPS_DATA31_RESERVED 31:0 +#define NVC97D_HEAD_SET_RASTER_HBLANK_DELAY(a) (0x00002364 + (a)*0x00000800) +#define NVC97D_HEAD_SET_RASTER_HBLANK_DELAY_BLANK_START 15:0 +#define NVC97D_HEAD_SET_RASTER_HBLANK_DELAY_BLANK_END 31:16 +#define NVC97D_HEAD_SET_HDMI_DSC_HCACTIVE(a) (0x00002368 + (a)*0x00000800) +#define NVC97D_HEAD_SET_HDMI_DSC_HCACTIVE_BYTES 15:0 +#define NVC97D_HEAD_SET_HDMI_DSC_HCACTIVE_TRI_BYTES 31:16 +#define NVC97D_HEAD_SET_HDMI_DSC_HCBLANK(a) (0x0000236C + (a)*0x00000800) +#define NVC97D_HEAD_SET_HDMI_DSC_HCBLANK_WIDTH 15:0 +#define NVC97D_HEAD_SW_RESERVED(a,b) (0x00002370 + (a)*0x00000800 + (b)*0x00000004) +#define NVC97D_HEAD_SW_RESERVED_VALUE 31:0 +#define NVC97D_HEAD_SET_RG_REL_SEMAPHORE_VALUE_HI(a,b) (0x00002380 + (a)*0x00000800 + (b)*0x00000004) +#define NVC97D_HEAD_SET_RG_REL_SEMAPHORE_VALUE_HI_VALUE 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc97d_h diff --git a/src/common/sdk/nvidia/inc/class/clc97dswspare.h b/src/common/sdk/nvidia/inc/class/clc97dswspare.h new file mode 100644 index 0000000..1116835 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc97dswspare.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc97d_sw_spare_h_ +#define _clc97d_sw_spare_h_ + +/* This file is *not* auto-generated. */ + +// +// Quadro Sync is a mechanism used to synchronize scanout and flips between +// GPUs in different systems (e.g., to drive large video walls, such as in a +// planetarium). Special FPGA boards (e.g., P2060 or P2061) are added to the +// system to provide the reference frame lock signal. The VPLL_REF field below +// is set to "QSYNC" on the head which is selected to be driven by the external +// reference signal. As with any HEAD_SET_SW_SPARE method, changing the value +// of a field will trigger a supervisor interrupt sequence. +// +#define NVC97D_HEAD_SET_SW_SPARE_A_CODE_VPLL_REF 1:0 +#define NVC97D_HEAD_SET_SW_SPARE_A_CODE_VPLL_REF_NO_PREF (0x00000000) +#define NVC97D_HEAD_SET_SW_SPARE_A_CODE_VPLL_REF_QSYNC (0x00000001) + +#endif // _clc97d_sw_spare_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc97e.h b/src/common/sdk/nvidia/inc/class/clc97e.h new file mode 100644 index 0000000..55f514c --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc97e.h @@ -0,0 +1,740 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc97e_h_ +#define _clc97e_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC97E_WINDOW_CHANNEL_DMA (0x0000C97E) + +// dma opcode instructions +#define NVC97E_DMA +#define NVC97E_DMA_OPCODE 31:29 +#define NVC97E_DMA_OPCODE_METHOD 0x00000000 +#define NVC97E_DMA_OPCODE_JUMP 0x00000001 +#define NVC97E_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC97E_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC97E_DMA_METHOD_COUNT 27:18 +#define NVC97E_DMA_METHOD_OFFSET 15:2 +#define NVC97E_DMA_DATA 31:0 +#define NVC97E_DMA_DATA_NOP 0x00000000 +#define NVC97E_DMA_JUMP_OFFSET 15:2 +#define NVC97E_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NVC97E_PUT (0x00000000) +#define NVC97E_PUT_PTR 9:0 +#define NVC97E_GET (0x00000004) +#define NVC97E_GET_PTR 9:0 +#define NVC97E_UPDATE (0x00000200) +#define NVC97E_UPDATE_RELEASE_ELV 0:0 +#define NVC97E_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC97E_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC97E_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC97E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC97E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC97E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC97E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC97E_UPDATE_INTERLOCK_WITH_WIN_IMM 12:12 +#define NVC97E_UPDATE_INTERLOCK_WITH_WIN_IMM_DISABLE (0x00000000) +#define NVC97E_UPDATE_INTERLOCK_WITH_WIN_IMM_ENABLE (0x00000001) +#define NVC97E_SET_SEMAPHORE_ACQUIRE_HI (0x00000204) +#define NVC97E_SET_SEMAPHORE_ACQUIRE_HI_VALUE 31:0 +#define NVC97E_GET_LINE (0x00000208) +#define NVC97E_GET_LINE_LINE 15:0 +#define NVC97E_SET_SEMAPHORE_CONTROL (0x0000020C) +#define NVC97E_SET_SEMAPHORE_CONTROL_SKIP_ACQ 11:11 +#define NVC97E_SET_SEMAPHORE_CONTROL_SKIP_ACQ_FALSE (0x00000000) +#define NVC97E_SET_SEMAPHORE_CONTROL_SKIP_ACQ_TRUE (0x00000001) +#define NVC97E_SET_SEMAPHORE_CONTROL_PAYLOAD_SIZE 15:15 +#define NVC97E_SET_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_32BIT (0x00000000) +#define NVC97E_SET_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_64BIT (0x00000001) +#define NVC97E_SET_SEMAPHORE_CONTROL_ACQ_MODE 13:12 +#define NVC97E_SET_SEMAPHORE_CONTROL_ACQ_MODE_EQ (0x00000000) +#define NVC97E_SET_SEMAPHORE_CONTROL_ACQ_MODE_CGEQ (0x00000001) +#define NVC97E_SET_SEMAPHORE_CONTROL_ACQ_MODE_STRICT_GEQ (0x00000002) +#define NVC97E_SET_SEMAPHORE_CONTROL_REL_MODE 14:14 +#define NVC97E_SET_SEMAPHORE_CONTROL_REL_MODE_WRITE (0x00000000) +#define NVC97E_SET_SEMAPHORE_CONTROL_REL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC97E_SET_SEMAPHORE_ACQUIRE (0x00000210) +#define NVC97E_SET_SEMAPHORE_ACQUIRE_VALUE 31:0 +#define NVC97E_SET_SEMAPHORE_RELEASE (0x00000214) +#define NVC97E_SET_SEMAPHORE_RELEASE_VALUE 31:0 +#define NVC97E_SET_NOTIFIER_CONTROL (0x00000220) +#define NVC97E_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVC97E_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVC97E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC97E_SET_SIZE (0x00000224) +#define NVC97E_SET_SIZE_WIDTH 15:0 +#define NVC97E_SET_SIZE_HEIGHT 31:16 +#define NVC97E_SET_STORAGE (0x00000228) +#define NVC97E_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NVC97E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NVC97E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NVC97E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC97E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC97E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC97E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC97E_SET_PARAMS (0x0000022C) +#define NVC97E_SET_PARAMS_FORMAT 7:0 +#define NVC97E_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NVC97E_SET_PARAMS_FORMAT_R4G4B4A4 (0x0000002F) +#define NVC97E_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NVC97E_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NVC97E_SET_PARAMS_FORMAT_R5G5B5A1 (0x0000002E) +#define NVC97E_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NVC97E_SET_PARAMS_FORMAT_X8R8G8B8 (0x000000E6) +#define NVC97E_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NVC97E_SET_PARAMS_FORMAT_X8B8G8R8 (0x000000F9) +#define NVC97E_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NVC97E_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NVC97E_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NVC97E_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NVC97E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NVC97E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422 (0x00000028) +#define NVC97E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422 (0x00000029) +#define NVC97E_SET_PARAMS_FORMAT_Y8___U8V8_N444 (0x00000035) +#define NVC97E_SET_PARAMS_FORMAT_Y8___U8V8_N422 (0x00000036) +#define NVC97E_SET_PARAMS_FORMAT_Y8___V8U8_N420 (0x00000038) +#define NVC97E_SET_PARAMS_FORMAT_Y8___U8___V8_N444 (0x0000003A) +#define NVC97E_SET_PARAMS_FORMAT_Y8___U8___V8_N420 (0x0000003B) +#define NVC97E_SET_PARAMS_FORMAT_Y10___U10V10_N444 (0x00000055) +#define NVC97E_SET_PARAMS_FORMAT_Y10___U10V10_N422 (0x00000056) +#define NVC97E_SET_PARAMS_FORMAT_Y10___V10U10_N420 (0x00000058) +#define NVC97E_SET_PARAMS_FORMAT_Y12___U12V12_N444 (0x00000075) +#define NVC97E_SET_PARAMS_FORMAT_Y12___U12V12_N422 (0x00000076) +#define NVC97E_SET_PARAMS_FORMAT_Y12___V12U12_N420 (0x00000078) +#define NVC97E_SET_PARAMS_CLAMP_BEFORE_BLEND 18:18 +#define NVC97E_SET_PARAMS_CLAMP_BEFORE_BLEND_DISABLE (0x00000000) +#define NVC97E_SET_PARAMS_CLAMP_BEFORE_BLEND_ENABLE (0x00000001) +#define NVC97E_SET_PARAMS_SWAP_UV 19:19 +#define NVC97E_SET_PARAMS_SWAP_UV_DISABLE (0x00000000) +#define NVC97E_SET_PARAMS_SWAP_UV_ENABLE (0x00000001) +#define NVC97E_SET_PARAMS_FMT_ROUNDING_MODE 22:22 +#define NVC97E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_TO_NEAREST (0x00000000) +#define NVC97E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_DOWN (0x00000001) +#define NVC97E_SET_PLANAR_STORAGE(b) (0x00000230 + (b)*0x00000004) +#define NVC97E_SET_PLANAR_STORAGE_PITCH 12:0 +#define NVC97E_SET_SEMAPHORE_RELEASE_HI (0x0000023C) +#define NVC97E_SET_SEMAPHORE_RELEASE_HI_VALUE 31:0 +#define NVC97E_SET_POINT_IN(b) (0x00000290 + (b)*0x00000004) +#define NVC97E_SET_POINT_IN_X 15:0 +#define NVC97E_SET_POINT_IN_Y 31:16 +#define NVC97E_SET_SIZE_IN (0x00000298) +#define NVC97E_SET_SIZE_IN_WIDTH 15:0 +#define NVC97E_SET_SIZE_IN_HEIGHT 31:16 +#define NVC97E_SET_SIZE_OUT (0x000002A4) +#define NVC97E_SET_SIZE_OUT_WIDTH 15:0 +#define NVC97E_SET_SIZE_OUT_HEIGHT 31:16 +#define NVC97E_SET_CONTROL_INPUT_SCALER (0x000002A8) +#define NVC97E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVC97E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVC97E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVC97E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVC97E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVC97E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVC97E_SET_INPUT_SCALER_COEFF_VALUE (0x000002AC) +#define NVC97E_SET_INPUT_SCALER_COEFF_VALUE_DATA 9:0 +#define NVC97E_SET_INPUT_SCALER_COEFF_VALUE_INDEX 19:12 +#define NVC97E_SET_COMPOSITION_CONTROL (0x000002EC) +#define NVC97E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT 1:0 +#define NVC97E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DISABLE (0x00000000) +#define NVC97E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_SRC (0x00000001) +#define NVC97E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DST (0x00000002) +#define NVC97E_SET_COMPOSITION_CONTROL_DEPTH 11:4 +#define NVC97E_SET_COMPOSITION_CONTROL_BYPASS 16:16 +#define NVC97E_SET_COMPOSITION_CONTROL_BYPASS_DISABLE (0x00000000) +#define NVC97E_SET_COMPOSITION_CONTROL_BYPASS_ENABLE (0x00000001) +#define NVC97E_SET_COMPOSITION_CONSTANT_ALPHA (0x000002F0) +#define NVC97E_SET_COMPOSITION_CONSTANT_ALPHA_K1 7:0 +#define NVC97E_SET_COMPOSITION_CONSTANT_ALPHA_K2 15:8 +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT (0x000002F4) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT 3:0 +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT 7:4 +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT 11:8 +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT 15:12 +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT 19:16 +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT 23:20 +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT 27:24 +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT 31:28 +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC97E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC97E_SET_KEY_ALPHA (0x000002F8) +#define NVC97E_SET_KEY_ALPHA_MIN 15:0 +#define NVC97E_SET_KEY_ALPHA_MAX 31:16 +#define NVC97E_SET_KEY_RED_CR (0x000002FC) +#define NVC97E_SET_KEY_RED_CR_MIN 15:0 +#define NVC97E_SET_KEY_RED_CR_MAX 31:16 +#define NVC97E_SET_KEY_GREEN_Y (0x00000300) +#define NVC97E_SET_KEY_GREEN_Y_MIN 15:0 +#define NVC97E_SET_KEY_GREEN_Y_MAX 31:16 +#define NVC97E_SET_KEY_BLUE_CB (0x00000304) +#define NVC97E_SET_KEY_BLUE_CB_MIN 15:0 +#define NVC97E_SET_KEY_BLUE_CB_MAX 31:16 +#define NVC97E_SET_PRESENT_CONTROL (0x00000308) +#define NVC97E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NVC97E_SET_PRESENT_CONTROL_BEGIN_MODE 6:4 +#define NVC97E_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000) +#define NVC97E_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001) +#define NVC97E_SET_PRESENT_CONTROL_TIMESTAMP_MODE 8:8 +#define NVC97E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000) +#define NVC97E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001) +#define NVC97E_SET_PRESENT_CONTROL_STEREO_MODE 13:12 +#define NVC97E_SET_PRESENT_CONTROL_STEREO_MODE_MONO (0x00000000) +#define NVC97E_SET_PRESENT_CONTROL_STEREO_MODE_PAIR_FLIP (0x00000001) +#define NVC97E_SET_PRESENT_CONTROL_STEREO_MODE_AT_ANY_FRAME (0x00000002) +#define NVC97E_SET_ACQ_SEMAPHORE_VALUE_HI (0x0000030C) +#define NVC97E_SET_ACQ_SEMAPHORE_VALUE_HI_VALUE 31:0 +#define NVC97E_SET_ACQ_SEMAPHORE_CONTROL (0x00000330) +#define NVC97E_SET_ACQ_SEMAPHORE_CONTROL_PAYLOAD_SIZE 15:15 +#define NVC97E_SET_ACQ_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_32BIT (0x00000000) +#define NVC97E_SET_ACQ_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_64BIT (0x00000001) +#define NVC97E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE 13:12 +#define NVC97E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE_EQ (0x00000000) +#define NVC97E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE_CGEQ (0x00000001) +#define NVC97E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE_STRICT_GEQ (0x00000002) +#define NVC97E_SET_ACQ_SEMAPHORE_VALUE (0x00000334) +#define NVC97E_SET_ACQ_SEMAPHORE_VALUE_VALUE 31:0 +#define NVC97E_SET_SCAN_DIRECTION (0x0000033C) +#define NVC97E_SET_SCAN_DIRECTION_HORIZONTAL_DIRECTION 0:0 +#define NVC97E_SET_SCAN_DIRECTION_HORIZONTAL_DIRECTION_FROM_LEFT (0x00000000) +#define NVC97E_SET_SCAN_DIRECTION_HORIZONTAL_DIRECTION_FROM_RIGHT (0x00000001) +#define NVC97E_SET_SCAN_DIRECTION_VERTICAL_DIRECTION 1:1 +#define NVC97E_SET_SCAN_DIRECTION_VERTICAL_DIRECTION_FROM_TOP (0x00000000) +#define NVC97E_SET_SCAN_DIRECTION_VERTICAL_DIRECTION_FROM_BOTTOM (0x00000001) +#define NVC97E_SET_SCAN_DIRECTION_COLUMN_ORDER 2:2 +#define NVC97E_SET_SCAN_DIRECTION_COLUMN_ORDER_FALSE (0x00000000) +#define NVC97E_SET_SCAN_DIRECTION_COLUMN_ORDER_TRUE (0x00000001) +#define NVC97E_SET_TIMESTAMP_ORIGIN_LO (0x00000340) +#define NVC97E_SET_TIMESTAMP_ORIGIN_LO_TIMESTAMP_LO 31:0 +#define NVC97E_SET_TIMESTAMP_ORIGIN_HI (0x00000344) +#define NVC97E_SET_TIMESTAMP_ORIGIN_HI_TIMESTAMP_HI 31:0 +#define NVC97E_SET_UPDATE_TIMESTAMP_LO (0x00000348) +#define NVC97E_SET_UPDATE_TIMESTAMP_LO_TIMESTAMP_LO 31:0 +#define NVC97E_SET_UPDATE_TIMESTAMP_HI (0x0000034C) +#define NVC97E_SET_UPDATE_TIMESTAMP_HI_TIMESTAMP_HI 31:0 +#define NVC97E_SET_INTERLOCK_FLAGS (0x00000370) +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 0:0 +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+1):((i)+1) +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 1:1 +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 2:2 +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 3:3 +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 4:4 +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 5:5 +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 6:6 +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 7:7 +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 8:8 +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC97E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS (0x00000374) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC97E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) +#define NVC97E_SET_EXT_PACKET_CONTROL (0x00000398) +#define NVC97E_SET_EXT_PACKET_CONTROL_ENABLE 0:0 +#define NVC97E_SET_EXT_PACKET_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC97E_SET_EXT_PACKET_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC97E_SET_EXT_PACKET_CONTROL_LOCATION 4:4 +#define NVC97E_SET_EXT_PACKET_CONTROL_LOCATION_VSYNC (0x00000000) +#define NVC97E_SET_EXT_PACKET_CONTROL_LOCATION_VBLANK (0x00000001) +#define NVC97E_SET_EXT_PACKET_CONTROL_FREQUENCY 8:8 +#define NVC97E_SET_EXT_PACKET_CONTROL_FREQUENCY_EVERY_FRAME (0x00000000) +#define NVC97E_SET_EXT_PACKET_CONTROL_FREQUENCY_ONCE (0x00000001) +#define NVC97E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE 12:12 +#define NVC97E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE_DISABLE (0x00000000) +#define NVC97E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE_ENABLE (0x00000001) +#define NVC97E_SET_EXT_PACKET_CONTROL_SIZE 27:16 +#define NVC97E_SET_EXT_PACKET_DATA (0x0000039C) +#define NVC97E_SET_EXT_PACKET_DATA_DB0 7:0 +#define NVC97E_SET_EXT_PACKET_DATA_DB1 15:8 +#define NVC97E_SET_EXT_PACKET_DATA_DB2 23:16 +#define NVC97E_SET_EXT_PACKET_DATA_DB3 31:24 +#define NVC97E_SET_FMT_COEFFICIENT_C00 (0x00000400) +#define NVC97E_SET_FMT_COEFFICIENT_C00_VALUE 20:0 +#define NVC97E_SET_FMT_COEFFICIENT_C01 (0x00000404) +#define NVC97E_SET_FMT_COEFFICIENT_C01_VALUE 20:0 +#define NVC97E_SET_FMT_COEFFICIENT_C02 (0x00000408) +#define NVC97E_SET_FMT_COEFFICIENT_C02_VALUE 20:0 +#define NVC97E_SET_FMT_COEFFICIENT_C03 (0x0000040C) +#define NVC97E_SET_FMT_COEFFICIENT_C03_VALUE 20:0 +#define NVC97E_SET_FMT_COEFFICIENT_C10 (0x00000410) +#define NVC97E_SET_FMT_COEFFICIENT_C10_VALUE 20:0 +#define NVC97E_SET_FMT_COEFFICIENT_C11 (0x00000414) +#define NVC97E_SET_FMT_COEFFICIENT_C11_VALUE 20:0 +#define NVC97E_SET_FMT_COEFFICIENT_C12 (0x00000418) +#define NVC97E_SET_FMT_COEFFICIENT_C12_VALUE 20:0 +#define NVC97E_SET_FMT_COEFFICIENT_C13 (0x0000041C) +#define NVC97E_SET_FMT_COEFFICIENT_C13_VALUE 20:0 +#define NVC97E_SET_FMT_COEFFICIENT_C20 (0x00000420) +#define NVC97E_SET_FMT_COEFFICIENT_C20_VALUE 20:0 +#define NVC97E_SET_FMT_COEFFICIENT_C21 (0x00000424) +#define NVC97E_SET_FMT_COEFFICIENT_C21_VALUE 20:0 +#define NVC97E_SET_FMT_COEFFICIENT_C22 (0x00000428) +#define NVC97E_SET_FMT_COEFFICIENT_C22_VALUE 20:0 +#define NVC97E_SET_FMT_COEFFICIENT_C23 (0x0000042C) +#define NVC97E_SET_FMT_COEFFICIENT_C23_VALUE 20:0 +#define NVC97E_SET_ILUT_CONTROL (0x00000440) +#define NVC97E_SET_ILUT_CONTROL_INTERPOLATE 0:0 +#define NVC97E_SET_ILUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC97E_SET_ILUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC97E_SET_ILUT_CONTROL_MIRROR 1:1 +#define NVC97E_SET_ILUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC97E_SET_ILUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC97E_SET_ILUT_CONTROL_MODE 3:2 +#define NVC97E_SET_ILUT_CONTROL_MODE_SEGMENTED (0x00000000) +#define NVC97E_SET_ILUT_CONTROL_MODE_DIRECT8 (0x00000001) +#define NVC97E_SET_ILUT_CONTROL_MODE_DIRECT10 (0x00000002) +#define NVC97E_SET_ILUT_CONTROL_SIZE 18:8 +#define NVC97E_SET_CSC00CONTROL (0x0000045C) +#define NVC97E_SET_CSC00CONTROL_ENABLE 0:0 +#define NVC97E_SET_CSC00CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC97E_SET_CSC00CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC97E_SET_CSC00COEFFICIENT_C00 (0x00000460) +#define NVC97E_SET_CSC00COEFFICIENT_C00_VALUE 20:0 +#define NVC97E_SET_CSC00COEFFICIENT_C01 (0x00000464) +#define NVC97E_SET_CSC00COEFFICIENT_C01_VALUE 20:0 +#define NVC97E_SET_CSC00COEFFICIENT_C02 (0x00000468) +#define NVC97E_SET_CSC00COEFFICIENT_C02_VALUE 20:0 +#define NVC97E_SET_CSC00COEFFICIENT_C03 (0x0000046C) +#define NVC97E_SET_CSC00COEFFICIENT_C03_VALUE 20:0 +#define NVC97E_SET_CSC00COEFFICIENT_C10 (0x00000470) +#define NVC97E_SET_CSC00COEFFICIENT_C10_VALUE 20:0 +#define NVC97E_SET_CSC00COEFFICIENT_C11 (0x00000474) +#define NVC97E_SET_CSC00COEFFICIENT_C11_VALUE 20:0 +#define NVC97E_SET_CSC00COEFFICIENT_C12 (0x00000478) +#define NVC97E_SET_CSC00COEFFICIENT_C12_VALUE 20:0 +#define NVC97E_SET_CSC00COEFFICIENT_C13 (0x0000047C) +#define NVC97E_SET_CSC00COEFFICIENT_C13_VALUE 20:0 +#define NVC97E_SET_CSC00COEFFICIENT_C20 (0x00000480) +#define NVC97E_SET_CSC00COEFFICIENT_C20_VALUE 20:0 +#define NVC97E_SET_CSC00COEFFICIENT_C21 (0x00000484) +#define NVC97E_SET_CSC00COEFFICIENT_C21_VALUE 20:0 +#define NVC97E_SET_CSC00COEFFICIENT_C22 (0x00000488) +#define NVC97E_SET_CSC00COEFFICIENT_C22_VALUE 20:0 +#define NVC97E_SET_CSC00COEFFICIENT_C23 (0x0000048C) +#define NVC97E_SET_CSC00COEFFICIENT_C23_VALUE 20:0 +#define NVC97E_SET_CSC0LUT_CONTROL (0x000004A0) +#define NVC97E_SET_CSC0LUT_CONTROL_INTERPOLATE 0:0 +#define NVC97E_SET_CSC0LUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC97E_SET_CSC0LUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC97E_SET_CSC0LUT_CONTROL_MIRROR 1:1 +#define NVC97E_SET_CSC0LUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC97E_SET_CSC0LUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC97E_SET_CSC0LUT_CONTROL_ENABLE 4:4 +#define NVC97E_SET_CSC0LUT_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC97E_SET_CSC0LUT_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC97E_SET_CSC01CONTROL (0x000004BC) +#define NVC97E_SET_CSC01CONTROL_ENABLE 0:0 +#define NVC97E_SET_CSC01CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC97E_SET_CSC01CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC97E_SET_CSC01COEFFICIENT_C00 (0x000004C0) +#define NVC97E_SET_CSC01COEFFICIENT_C00_VALUE 20:0 +#define NVC97E_SET_CSC01COEFFICIENT_C01 (0x000004C4) +#define NVC97E_SET_CSC01COEFFICIENT_C01_VALUE 20:0 +#define NVC97E_SET_CSC01COEFFICIENT_C02 (0x000004C8) +#define NVC97E_SET_CSC01COEFFICIENT_C02_VALUE 20:0 +#define NVC97E_SET_CSC01COEFFICIENT_C03 (0x000004CC) +#define NVC97E_SET_CSC01COEFFICIENT_C03_VALUE 20:0 +#define NVC97E_SET_CSC01COEFFICIENT_C10 (0x000004D0) +#define NVC97E_SET_CSC01COEFFICIENT_C10_VALUE 20:0 +#define NVC97E_SET_CSC01COEFFICIENT_C11 (0x000004D4) +#define NVC97E_SET_CSC01COEFFICIENT_C11_VALUE 20:0 +#define NVC97E_SET_CSC01COEFFICIENT_C12 (0x000004D8) +#define NVC97E_SET_CSC01COEFFICIENT_C12_VALUE 20:0 +#define NVC97E_SET_CSC01COEFFICIENT_C13 (0x000004DC) +#define NVC97E_SET_CSC01COEFFICIENT_C13_VALUE 20:0 +#define NVC97E_SET_CSC01COEFFICIENT_C20 (0x000004E0) +#define NVC97E_SET_CSC01COEFFICIENT_C20_VALUE 20:0 +#define NVC97E_SET_CSC01COEFFICIENT_C21 (0x000004E4) +#define NVC97E_SET_CSC01COEFFICIENT_C21_VALUE 20:0 +#define NVC97E_SET_CSC01COEFFICIENT_C22 (0x000004E8) +#define NVC97E_SET_CSC01COEFFICIENT_C22_VALUE 20:0 +#define NVC97E_SET_CSC01COEFFICIENT_C23 (0x000004EC) +#define NVC97E_SET_CSC01COEFFICIENT_C23_VALUE 20:0 +#define NVC97E_SET_TMO_CONTROL (0x00000500) +#define NVC97E_SET_TMO_CONTROL_INTERPOLATE 0:0 +#define NVC97E_SET_TMO_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC97E_SET_TMO_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC97E_SET_TMO_CONTROL_SAT_MODE 3:2 +#define NVC97E_SET_TMO_CONTROL_SIZE 18:8 +#define NVC97E_SET_TMO_LOW_INTENSITY_ZONE (0x00000508) +#define NVC97E_SET_TMO_LOW_INTENSITY_ZONE_END 29:16 +#define NVC97E_SET_TMO_LOW_INTENSITY_VALUE (0x0000050C) +#define NVC97E_SET_TMO_LOW_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC97E_SET_TMO_LOW_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC97E_SET_TMO_LOW_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC97E_SET_TMO_MEDIUM_INTENSITY_ZONE (0x00000510) +#define NVC97E_SET_TMO_MEDIUM_INTENSITY_ZONE_START 13:0 +#define NVC97E_SET_TMO_MEDIUM_INTENSITY_ZONE_END 29:16 +#define NVC97E_SET_TMO_MEDIUM_INTENSITY_VALUE (0x00000514) +#define NVC97E_SET_TMO_MEDIUM_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC97E_SET_TMO_MEDIUM_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC97E_SET_TMO_MEDIUM_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC97E_SET_TMO_HIGH_INTENSITY_ZONE (0x00000518) +#define NVC97E_SET_TMO_HIGH_INTENSITY_ZONE_START 13:0 +#define NVC97E_SET_TMO_HIGH_INTENSITY_VALUE (0x0000051C) +#define NVC97E_SET_TMO_HIGH_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC97E_SET_TMO_HIGH_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC97E_SET_TMO_HIGH_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC97E_SET_CSC10CONTROL (0x0000053C) +#define NVC97E_SET_CSC10CONTROL_ENABLE 0:0 +#define NVC97E_SET_CSC10CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC97E_SET_CSC10CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC97E_SET_CSC10COEFFICIENT_C00 (0x00000540) +#define NVC97E_SET_CSC10COEFFICIENT_C00_VALUE 20:0 +#define NVC97E_SET_CSC10COEFFICIENT_C01 (0x00000544) +#define NVC97E_SET_CSC10COEFFICIENT_C01_VALUE 20:0 +#define NVC97E_SET_CSC10COEFFICIENT_C02 (0x00000548) +#define NVC97E_SET_CSC10COEFFICIENT_C02_VALUE 20:0 +#define NVC97E_SET_CSC10COEFFICIENT_C03 (0x0000054C) +#define NVC97E_SET_CSC10COEFFICIENT_C03_VALUE 20:0 +#define NVC97E_SET_CSC10COEFFICIENT_C10 (0x00000550) +#define NVC97E_SET_CSC10COEFFICIENT_C10_VALUE 20:0 +#define NVC97E_SET_CSC10COEFFICIENT_C11 (0x00000554) +#define NVC97E_SET_CSC10COEFFICIENT_C11_VALUE 20:0 +#define NVC97E_SET_CSC10COEFFICIENT_C12 (0x00000558) +#define NVC97E_SET_CSC10COEFFICIENT_C12_VALUE 20:0 +#define NVC97E_SET_CSC10COEFFICIENT_C13 (0x0000055C) +#define NVC97E_SET_CSC10COEFFICIENT_C13_VALUE 20:0 +#define NVC97E_SET_CSC10COEFFICIENT_C20 (0x00000560) +#define NVC97E_SET_CSC10COEFFICIENT_C20_VALUE 20:0 +#define NVC97E_SET_CSC10COEFFICIENT_C21 (0x00000564) +#define NVC97E_SET_CSC10COEFFICIENT_C21_VALUE 20:0 +#define NVC97E_SET_CSC10COEFFICIENT_C22 (0x00000568) +#define NVC97E_SET_CSC10COEFFICIENT_C22_VALUE 20:0 +#define NVC97E_SET_CSC10COEFFICIENT_C23 (0x0000056C) +#define NVC97E_SET_CSC10COEFFICIENT_C23_VALUE 20:0 +#define NVC97E_SET_CSC1LUT_CONTROL (0x00000580) +#define NVC97E_SET_CSC1LUT_CONTROL_INTERPOLATE 0:0 +#define NVC97E_SET_CSC1LUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC97E_SET_CSC1LUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC97E_SET_CSC1LUT_CONTROL_MIRROR 1:1 +#define NVC97E_SET_CSC1LUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC97E_SET_CSC1LUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC97E_SET_CSC1LUT_CONTROL_ENABLE 4:4 +#define NVC97E_SET_CSC1LUT_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC97E_SET_CSC1LUT_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC97E_SET_CSC11CONTROL (0x0000059C) +#define NVC97E_SET_CSC11CONTROL_ENABLE 0:0 +#define NVC97E_SET_CSC11CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC97E_SET_CSC11CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC97E_SET_CSC11COEFFICIENT_C00 (0x000005A0) +#define NVC97E_SET_CSC11COEFFICIENT_C00_VALUE 20:0 +#define NVC97E_SET_CSC11COEFFICIENT_C01 (0x000005A4) +#define NVC97E_SET_CSC11COEFFICIENT_C01_VALUE 20:0 +#define NVC97E_SET_CSC11COEFFICIENT_C02 (0x000005A8) +#define NVC97E_SET_CSC11COEFFICIENT_C02_VALUE 20:0 +#define NVC97E_SET_CSC11COEFFICIENT_C03 (0x000005AC) +#define NVC97E_SET_CSC11COEFFICIENT_C03_VALUE 20:0 +#define NVC97E_SET_CSC11COEFFICIENT_C10 (0x000005B0) +#define NVC97E_SET_CSC11COEFFICIENT_C10_VALUE 20:0 +#define NVC97E_SET_CSC11COEFFICIENT_C11 (0x000005B4) +#define NVC97E_SET_CSC11COEFFICIENT_C11_VALUE 20:0 +#define NVC97E_SET_CSC11COEFFICIENT_C12 (0x000005B8) +#define NVC97E_SET_CSC11COEFFICIENT_C12_VALUE 20:0 +#define NVC97E_SET_CSC11COEFFICIENT_C13 (0x000005BC) +#define NVC97E_SET_CSC11COEFFICIENT_C13_VALUE 20:0 +#define NVC97E_SET_CSC11COEFFICIENT_C20 (0x000005C0) +#define NVC97E_SET_CSC11COEFFICIENT_C20_VALUE 20:0 +#define NVC97E_SET_CSC11COEFFICIENT_C21 (0x000005C4) +#define NVC97E_SET_CSC11COEFFICIENT_C21_VALUE 20:0 +#define NVC97E_SET_CSC11COEFFICIENT_C22 (0x000005C8) +#define NVC97E_SET_CSC11COEFFICIENT_C22_VALUE 20:0 +#define NVC97E_SET_CSC11COEFFICIENT_C23 (0x000005CC) +#define NVC97E_SET_CSC11COEFFICIENT_C23_VALUE 20:0 +#define NVC97E_SET_CLAMP_RANGE (0x000005D0) +#define NVC97E_SET_CLAMP_RANGE_LOW 15:0 +#define NVC97E_SET_CLAMP_RANGE_HIGH 31:16 +#define NVC97E_SW_RESERVED(b) (0x000005D4 + (b)*0x00000004) +#define NVC97E_SW_RESERVED_VALUE 31:0 +#define NVC97E_SET_SURFACE_ADDRESS_HI_SEMAPHORE (0x00000640) +#define NVC97E_SET_SURFACE_ADDRESS_HI_SEMAPHORE_ADDRESS_HI 31:0 +#define NVC97E_SET_SURFACE_ADDRESS_LO_SEMAPHORE (0x00000644) +#define NVC97E_SET_SURFACE_ADDRESS_LO_SEMAPHORE_ADDRESS_LO 31:4 +#define NVC97E_SET_SURFACE_ADDRESS_LO_SEMAPHORE_TARGET 3:2 +#define NVC97E_SET_SURFACE_ADDRESS_LO_SEMAPHORE_TARGET_IOVA (0x00000000) +#define NVC97E_SET_SURFACE_ADDRESS_LO_SEMAPHORE_TARGET_PHYSICAL_NVM (0x00000001) +#define NVC97E_SET_SURFACE_ADDRESS_LO_SEMAPHORE_TARGET_PHYSICAL_PCI (0x00000002) +#define NVC97E_SET_SURFACE_ADDRESS_LO_SEMAPHORE_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVC97E_SET_SURFACE_ADDRESS_LO_SEMAPHORE_ENABLE 0:0 +#define NVC97E_SET_SURFACE_ADDRESS_LO_SEMAPHORE_ENABLE_DISABLE (0x00000000) +#define NVC97E_SET_SURFACE_ADDRESS_LO_SEMAPHORE_ENABLE_ENABLE (0x00000001) +#define NVC97E_SET_SURFACE_ADDRESS_HI_ACQ_SEMAPHORE (0x00000648) +#define NVC97E_SET_SURFACE_ADDRESS_HI_ACQ_SEMAPHORE_ADDRESS_HI 31:0 +#define NVC97E_SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE (0x0000064C) +#define NVC97E_SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE_ADDRESS_LO 31:4 +#define NVC97E_SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE_TARGET 3:2 +#define NVC97E_SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE_TARGET_IOVA (0x00000000) +#define NVC97E_SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE_TARGET_PHYSICAL_NVM (0x00000001) +#define NVC97E_SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE_TARGET_PHYSICAL_PCI (0x00000002) +#define NVC97E_SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVC97E_SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE_ENABLE 0:0 +#define NVC97E_SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE_ENABLE_DISABLE (0x00000000) +#define NVC97E_SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE_ENABLE_ENABLE (0x00000001) +#define NVC97E_SET_SURFACE_ADDRESS_HI_NOTIFIER (0x00000650) +#define NVC97E_SET_SURFACE_ADDRESS_HI_NOTIFIER_ADDRESS_HI 31:0 +#define NVC97E_SET_SURFACE_ADDRESS_LO_NOTIFIER (0x00000654) +#define NVC97E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ADDRESS_LO 31:4 +#define NVC97E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET 3:2 +#define NVC97E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_IOVA (0x00000000) +#define NVC97E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_NVM (0x00000001) +#define NVC97E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI (0x00000002) +#define NVC97E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVC97E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE 0:0 +#define NVC97E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_DISABLE (0x00000000) +#define NVC97E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_ENABLE (0x00000001) +#define NVC97E_SET_SURFACE_ADDRESS_HI_ISO(b) (0x00000658 + (b)*0x00000004) +#define NVC97E_SET_SURFACE_ADDRESS_HI_ISO_ADDRESS_HI 31:0 +#define NVC97E_SET_SURFACE_ADDRESS_LO_ISO(b) (0x00000670 + (b)*0x00000004) +#define NVC97E_SET_SURFACE_ADDRESS_LO_ISO_ADDRESS_LO 31:4 +#define NVC97E_SET_SURFACE_ADDRESS_LO_ISO_TARGET 3:2 +#define NVC97E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_IOVA (0x00000000) +#define NVC97E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_PHYSICAL_NVM (0x00000001) +#define NVC97E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_PHYSICAL_PCI (0x00000002) +#define NVC97E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVC97E_SET_SURFACE_ADDRESS_LO_ISO_KIND 1:1 +#define NVC97E_SET_SURFACE_ADDRESS_LO_ISO_KIND_PITCH (0x00000000) +#define NVC97E_SET_SURFACE_ADDRESS_LO_ISO_KIND_BLOCKLINEAR (0x00000001) +#define NVC97E_SET_SURFACE_ADDRESS_LO_ISO_ENABLE 0:0 +#define NVC97E_SET_SURFACE_ADDRESS_LO_ISO_ENABLE_DISABLE (0x00000000) +#define NVC97E_SET_SURFACE_ADDRESS_LO_ISO_ENABLE_ENABLE (0x00000001) +#define NVC97E_SET_SURFACE_ADDRESS_HI_ILUT (0x00000688) +#define NVC97E_SET_SURFACE_ADDRESS_HI_ILUT_ADDRESS_HI 31:0 +#define NVC97E_SET_SURFACE_ADDRESS_LO_ILUT (0x0000068C) +#define NVC97E_SET_SURFACE_ADDRESS_LO_ILUT_ADDRESS_LO 31:4 +#define NVC97E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET 3:2 +#define NVC97E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_IOVA (0x00000000) +#define NVC97E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_PHYSICAL_NVM (0x00000001) +#define NVC97E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_PHYSICAL_PCI (0x00000002) +#define NVC97E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVC97E_SET_SURFACE_ADDRESS_LO_ILUT_ENABLE 0:0 +#define NVC97E_SET_SURFACE_ADDRESS_LO_ILUT_ENABLE_DISABLE (0x00000000) +#define NVC97E_SET_SURFACE_ADDRESS_LO_ILUT_ENABLE_ENABLE (0x00000001) +#define NVC97E_SET_SURFACE_ADDRESS_HI_TMO_LUT (0x00000690) +#define NVC97E_SET_SURFACE_ADDRESS_HI_TMO_LUT_ADDRESS_HI 31:0 +#define NVC97E_SET_SURFACE_ADDRESS_LO_TMO_LUT (0x00000694) +#define NVC97E_SET_SURFACE_ADDRESS_LO_TMO_LUT_ADDRESS_LO 31:4 +#define NVC97E_SET_SURFACE_ADDRESS_LO_TMO_LUT_TARGET 3:2 +#define NVC97E_SET_SURFACE_ADDRESS_LO_TMO_LUT_TARGET_IOVA (0x00000000) +#define NVC97E_SET_SURFACE_ADDRESS_LO_TMO_LUT_TARGET_PHYSICAL_NVM (0x00000001) +#define NVC97E_SET_SURFACE_ADDRESS_LO_TMO_LUT_TARGET_PHYSICAL_PCI (0x00000002) +#define NVC97E_SET_SURFACE_ADDRESS_LO_TMO_LUT_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVC97E_SET_SURFACE_ADDRESS_LO_TMO_LUT_ENABLE 0:0 +#define NVC97E_SET_SURFACE_ADDRESS_LO_TMO_LUT_ENABLE_DISABLE (0x00000000) +#define NVC97E_SET_SURFACE_ADDRESS_LO_TMO_LUT_ENABLE_ENABLE (0x00000001) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc97e_h diff --git a/src/common/sdk/nvidia/inc/class/clc997.h b/src/common/sdk/nvidia/inc/class/clc997.h new file mode 100644 index 0000000..81607be --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc997.h @@ -0,0 +1,4481 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl_ada_a_h_ +#define _cl_ada_a_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../../../class/bin/sw_header.pl ada_a */ + +#include "nvtypes.h" + +#define ADA_A 0xC997 + +#define NVC997_SET_OBJECT 0x0000 +#define NVC997_SET_OBJECT_CLASS_ID 15:0 +#define NVC997_SET_OBJECT_ENGINE_ID 20:16 + +#define NVC997_NO_OPERATION 0x0100 +#define NVC997_NO_OPERATION_V 31:0 + +#define NVC997_SET_NOTIFY_A 0x0104 +#define NVC997_SET_NOTIFY_A_ADDRESS_UPPER 7:0 + +#define NVC997_SET_NOTIFY_B 0x0108 +#define NVC997_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NVC997_NOTIFY 0x010c +#define NVC997_NOTIFY_TYPE 31:0 +#define NVC997_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NVC997_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NVC997_WAIT_FOR_IDLE 0x0110 +#define NVC997_WAIT_FOR_IDLE_V 31:0 + +#define NVC997_LOAD_MME_INSTRUCTION_RAM_POINTER 0x0114 +#define NVC997_LOAD_MME_INSTRUCTION_RAM_POINTER_V 31:0 + +#define NVC997_LOAD_MME_INSTRUCTION_RAM 0x0118 +#define NVC997_LOAD_MME_INSTRUCTION_RAM_V 31:0 + +#define NVC997_LOAD_MME_START_ADDRESS_RAM_POINTER 0x011c +#define NVC997_LOAD_MME_START_ADDRESS_RAM_POINTER_V 31:0 + +#define NVC997_LOAD_MME_START_ADDRESS_RAM 0x0120 +#define NVC997_LOAD_MME_START_ADDRESS_RAM_V 31:0 + +#define NVC997_SET_MME_SHADOW_RAM_CONTROL 0x0124 +#define NVC997_SET_MME_SHADOW_RAM_CONTROL_MODE 1:0 +#define NVC997_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK 0x00000000 +#define NVC997_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK_WITH_FILTER 0x00000001 +#define NVC997_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_PASSTHROUGH 0x00000002 +#define NVC997_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_REPLAY 0x00000003 + +#define NVC997_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER 0x0128 +#define NVC997_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER_V 7:0 + +#define NVC997_PEER_SEMAPHORE_RELEASE_OFFSET 0x012c +#define NVC997_PEER_SEMAPHORE_RELEASE_OFFSET_V 31:0 + +#define NVC997_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NVC997_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC997_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NVC997_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC997_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NVC997_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NVC997_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC997_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC997_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC997_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC997_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC997_SEND_GO_IDLE 0x013c +#define NVC997_SEND_GO_IDLE_V 31:0 + +#define NVC997_PM_TRIGGER 0x0140 +#define NVC997_PM_TRIGGER_V 31:0 + +#define NVC997_PM_TRIGGER_WFI 0x0144 +#define NVC997_PM_TRIGGER_WFI_V 31:0 + +#define NVC997_FE_ATOMIC_SEQUENCE_BEGIN 0x0148 +#define NVC997_FE_ATOMIC_SEQUENCE_BEGIN_V 31:0 + +#define NVC997_FE_ATOMIC_SEQUENCE_END 0x014c +#define NVC997_FE_ATOMIC_SEQUENCE_END_V 31:0 + +#define NVC997_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NVC997_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NVC997_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NVC997_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NVC997_SET_REPORT_SEMAPHORE_PAYLOAD_LOWER 0x0158 +#define NVC997_SET_REPORT_SEMAPHORE_PAYLOAD_LOWER_PAYLOAD_LOWER 31:0 + +#define NVC997_SET_REPORT_SEMAPHORE_PAYLOAD_UPPER 0x015c +#define NVC997_SET_REPORT_SEMAPHORE_PAYLOAD_UPPER_PAYLOAD_UPPER 31:0 + +#define NVC997_SET_REPORT_SEMAPHORE_ADDRESS_LOWER 0x0160 +#define NVC997_SET_REPORT_SEMAPHORE_ADDRESS_LOWER_LOWER 31:0 + +#define NVC997_SET_REPORT_SEMAPHORE_ADDRESS_UPPER 0x0164 +#define NVC997_SET_REPORT_SEMAPHORE_ADDRESS_UPPER_UPPER 7:0 + +#define NVC997_REPORT_SEMAPHORE_EXECUTE 0x0168 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_OPERATION 1:0 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_OPERATION_RELEASE 0x00000000 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_OPERATION_ACQUIRE 0x00000001 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_OPERATION_REPORT_ONLY 0x00000002 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_OPERATION_TRAP 0x00000003 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION 5:2 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_NONE 0x00000000 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_DATA_ASSEMBLER 0x00000001 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_VERTEX_SHADER 0x00000002 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_TESSELATION_INIT_SHADER 0x00000008 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_TESSELATION_SHADER 0x00000009 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_GEOMETRY_SHADER 0x00000006 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_STREAMING_OUTPUT 0x00000005 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_VPC 0x00000004 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_ZCULL 0x00000007 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_PIXEL_SHADER 0x0000000A +#define NVC997_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_DEPTH_TEST 0x0000000C +#define NVC997_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_ALL 0x0000000F +#define NVC997_REPORT_SEMAPHORE_EXECUTE_AWAKEN_ENABLE 6:6 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT 11:7 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_NONE 0x00000000 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_DA_VERTICES_GENERATED 0x00000001 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_DA_PRIMITIVES_GENERATED 0x00000003 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_VS_INVOCATIONS 0x00000005 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_TI_INVOCATIONS 0x0000001B +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_TS_INVOCATIONS 0x0000001D +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_TS_PRIMITIVES_GENERATED 0x0000001F +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_GS_INVOCATIONS 0x00000007 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_GS_PRIMITIVES_GENERATED 0x00000009 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_ALPHA_BETA_CLOCKS 0x00000004 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_SCG_CLOCKS 0x00000008 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_VTG_PRIMITIVES_OUT 0x00000012 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x0000001E +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_STREAMING_PRIMITIVES_SUCCEEDED 0x0000000B +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_STREAMING_PRIMITIVES_NEEDED 0x0000000D +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000006 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_STREAMING_BYTE_COUNT 0x0000001A +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_CLIPPER_INVOCATIONS 0x0000000F +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_CLIPPER_PRIMITIVES_GENERATED 0x00000011 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_ZCULL_STATS0 0x0000000A +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_ZCULL_STATS1 0x0000000C +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_ZCULL_STATS2 0x0000000E +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_ZCULL_STATS3 0x00000010 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_PS_INVOCATIONS 0x00000013 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_ZPASS_PIXEL_CNT 0x00000002 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_ZPASS_PIXEL_CNT64 0x00000015 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_TILED_ZPASS_PIXEL_CNT64 0x00000017 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_IEEE_CLEAN_COLOR_TARGET 0x00000018 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_IEEE_CLEAN_ZETA_TARGET 0x00000019 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_BOUNDING_RECTANGLE 0x0000001C +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REPORT_TIMESTAMP 0x00000014 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_STRUCTURE_SIZE 14:13 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_STRUCTURE_SIZE_SEMAPHORE_FOUR_WORDS 0x00000000 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_STRUCTURE_SIZE_SEMAPHORE_ONE_WORD 0x00000001 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_STRUCTURE_SIZE_SEMAPHORE_TWO_WORDS 0x00000002 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_SUB_REPORT 17:15 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_FLUSH_DISABLE 19:19 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_FLUSH_DISABLE_FALSE 0x00000000 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_FLUSH_DISABLE_TRUE 0x00000001 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_ROP_FLUSH_DISABLE 18:18 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_ROP_FLUSH_DISABLE_FALSE 0x00000000 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_ROP_FLUSH_DISABLE_TRUE 0x00000001 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REDUCTION_ENABLE 20:20 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP 23:21 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_INC 0x00000003 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_AND 0x00000005 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_OR 0x00000006 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REDUCTION_FORMAT 25:24 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REDUCTION_FORMAT_UNSIGNED 0x00000000 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_REDUCTION_FORMAT_SIGNED 0x00000001 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_PAYLOAD_SIZE64 27:27 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_PAYLOAD_SIZE64_FALSE 0x00000000 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_PAYLOAD_SIZE64_TRUE 0x00000001 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_TRAP_TYPE 29:28 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_TRAP_TYPE_TRAP_NONE 0x00000000 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_TRAP_TYPE_TRAP_UNCONDITIONAL 0x00000001 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_TRAP_TYPE_TRAP_CONDITIONAL 0x00000002 +#define NVC997_REPORT_SEMAPHORE_EXECUTE_TRAP_TYPE_TRAP_CONDITIONAL_EXT 0x00000003 + +#define NVC997_LINE_LENGTH_IN 0x0180 +#define NVC997_LINE_LENGTH_IN_VALUE 31:0 + +#define NVC997_LINE_COUNT 0x0184 +#define NVC997_LINE_COUNT_VALUE 31:0 + +#define NVC997_OFFSET_OUT_UPPER 0x0188 +#define NVC997_OFFSET_OUT_UPPER_VALUE 7:0 + +#define NVC997_OFFSET_OUT 0x018c +#define NVC997_OFFSET_OUT_VALUE 31:0 + +#define NVC997_PITCH_OUT 0x0190 +#define NVC997_PITCH_OUT_VALUE 31:0 + +#define NVC997_SET_DST_BLOCK_SIZE 0x0194 +#define NVC997_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVC997_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC997_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVC997_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC997_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC997_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC997_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC997_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC997_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC997_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVC997_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NVC997_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NVC997_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NVC997_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NVC997_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVC997_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NVC997_SET_DST_WIDTH 0x0198 +#define NVC997_SET_DST_WIDTH_V 31:0 + +#define NVC997_SET_DST_HEIGHT 0x019c +#define NVC997_SET_DST_HEIGHT_V 31:0 + +#define NVC997_SET_DST_DEPTH 0x01a0 +#define NVC997_SET_DST_DEPTH_V 31:0 + +#define NVC997_SET_DST_LAYER 0x01a4 +#define NVC997_SET_DST_LAYER_V 31:0 + +#define NVC997_SET_DST_ORIGIN_BYTES_X 0x01a8 +#define NVC997_SET_DST_ORIGIN_BYTES_X_V 20:0 + +#define NVC997_SET_DST_ORIGIN_SAMPLES_Y 0x01ac +#define NVC997_SET_DST_ORIGIN_SAMPLES_Y_V 16:0 + +#define NVC997_LAUNCH_DMA 0x01b0 +#define NVC997_LAUNCH_DMA_DST_MEMORY_LAYOUT 0:0 +#define NVC997_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVC997_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVC997_LAUNCH_DMA_COMPLETION_TYPE 5:4 +#define NVC997_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_DISABLE 0x00000000 +#define NVC997_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_ONLY 0x00000001 +#define NVC997_LAUNCH_DMA_COMPLETION_TYPE_RELEASE_SEMAPHORE 0x00000002 +#define NVC997_LAUNCH_DMA_INTERRUPT_TYPE 9:8 +#define NVC997_LAUNCH_DMA_INTERRUPT_TYPE_NONE 0x00000000 +#define NVC997_LAUNCH_DMA_INTERRUPT_TYPE_INTERRUPT 0x00000001 +#define NVC997_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE 12:12 +#define NVC997_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_FOUR_WORDS 0x00000000 +#define NVC997_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_ONE_WORD 0x00000001 +#define NVC997_LAUNCH_DMA_REDUCTION_ENABLE 1:1 +#define NVC997_LAUNCH_DMA_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC997_LAUNCH_DMA_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC997_LAUNCH_DMA_REDUCTION_OP 15:13 +#define NVC997_LAUNCH_DMA_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC997_LAUNCH_DMA_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC997_LAUNCH_DMA_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC997_LAUNCH_DMA_REDUCTION_OP_RED_INC 0x00000003 +#define NVC997_LAUNCH_DMA_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC997_LAUNCH_DMA_REDUCTION_OP_RED_AND 0x00000005 +#define NVC997_LAUNCH_DMA_REDUCTION_OP_RED_OR 0x00000006 +#define NVC997_LAUNCH_DMA_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC997_LAUNCH_DMA_REDUCTION_FORMAT 3:2 +#define NVC997_LAUNCH_DMA_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC997_LAUNCH_DMA_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVC997_LAUNCH_DMA_SYSMEMBAR_DISABLE 6:6 +#define NVC997_LAUNCH_DMA_SYSMEMBAR_DISABLE_FALSE 0x00000000 +#define NVC997_LAUNCH_DMA_SYSMEMBAR_DISABLE_TRUE 0x00000001 + +#define NVC997_LOAD_INLINE_DATA 0x01b4 +#define NVC997_LOAD_INLINE_DATA_V 31:0 + +#define NVC997_SET_I2M_SEMAPHORE_A 0x01dc +#define NVC997_SET_I2M_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC997_SET_I2M_SEMAPHORE_B 0x01e0 +#define NVC997_SET_I2M_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC997_SET_I2M_SEMAPHORE_C 0x01e4 +#define NVC997_SET_I2M_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC997_SET_MME_SWITCH_STATE 0x01ec +#define NVC997_SET_MME_SWITCH_STATE_VALID 0:0 +#define NVC997_SET_MME_SWITCH_STATE_VALID_FALSE 0x00000000 +#define NVC997_SET_MME_SWITCH_STATE_VALID_TRUE 0x00000001 +#define NVC997_SET_MME_SWITCH_STATE_SAVE_MACRO 11:4 +#define NVC997_SET_MME_SWITCH_STATE_RESTORE_MACRO 19:12 + +#define NVC997_SET_I2M_SPARE_NOOP00 0x01f0 +#define NVC997_SET_I2M_SPARE_NOOP00_V 31:0 + +#define NVC997_SET_I2M_SPARE_NOOP01 0x01f4 +#define NVC997_SET_I2M_SPARE_NOOP01_V 31:0 + +#define NVC997_SET_I2M_SPARE_NOOP02 0x01f8 +#define NVC997_SET_I2M_SPARE_NOOP02_V 31:0 + +#define NVC997_SET_I2M_SPARE_NOOP03 0x01fc +#define NVC997_SET_I2M_SPARE_NOOP03_V 31:0 + +#define NVC997_RUN_DS_NOW 0x0200 +#define NVC997_RUN_DS_NOW_V 31:0 + +#define NVC997_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS 0x0204 +#define NVC997_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD 4:0 +#define NVC997_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_INSTANTANEOUS 0x00000000 +#define NVC997_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16 0x00000001 +#define NVC997_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32 0x00000002 +#define NVC997_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__64 0x00000003 +#define NVC997_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__128 0x00000004 +#define NVC997_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__256 0x00000005 +#define NVC997_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__512 0x00000006 +#define NVC997_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1024 0x00000007 +#define NVC997_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2048 0x00000008 +#define NVC997_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4096 0x00000009 +#define NVC997_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__8192 0x0000000A +#define NVC997_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16384 0x0000000B +#define NVC997_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32768 0x0000000C +#define NVC997_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__65536 0x0000000D +#define NVC997_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__131072 0x0000000E +#define NVC997_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__262144 0x0000000F +#define NVC997_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__524288 0x00000010 +#define NVC997_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1048576 0x00000011 +#define NVC997_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2097152 0x00000012 +#define NVC997_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4194304 0x00000013 +#define NVC997_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_LATEZ_ALWAYS 0x0000001F + +#define NVC997_SET_GS_MODE 0x0208 +#define NVC997_SET_GS_MODE_TYPE 0:0 +#define NVC997_SET_GS_MODE_TYPE_ANY 0x00000000 +#define NVC997_SET_GS_MODE_TYPE_FAST_GS 0x00000001 + +#define NVC997_SET_ALIASED_LINE_WIDTH_ENABLE 0x020c +#define NVC997_SET_ALIASED_LINE_WIDTH_ENABLE_V 0:0 +#define NVC997_SET_ALIASED_LINE_WIDTH_ENABLE_V_FALSE 0x00000000 +#define NVC997_SET_ALIASED_LINE_WIDTH_ENABLE_V_TRUE 0x00000001 + +#define NVC997_SET_API_MANDATED_EARLY_Z 0x0210 +#define NVC997_SET_API_MANDATED_EARLY_Z_ENABLE 0:0 +#define NVC997_SET_API_MANDATED_EARLY_Z_ENABLE_FALSE 0x00000000 +#define NVC997_SET_API_MANDATED_EARLY_Z_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_GS_DM_FIFO 0x0214 +#define NVC997_SET_GS_DM_FIFO_SIZE_RASTER_ON 12:0 +#define NVC997_SET_GS_DM_FIFO_SIZE_RASTER_OFF 28:16 +#define NVC997_SET_GS_DM_FIFO_SPILL_ENABLED 31:31 +#define NVC997_SET_GS_DM_FIFO_SPILL_ENABLED_FALSE 0x00000000 +#define NVC997_SET_GS_DM_FIFO_SPILL_ENABLED_TRUE 0x00000001 + +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS 0x0218 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY 5:4 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC997_INVALIDATE_SHADER_CACHES 0x021c +#define NVC997_INVALIDATE_SHADER_CACHES_INSTRUCTION 0:0 +#define NVC997_INVALIDATE_SHADER_CACHES_INSTRUCTION_FALSE 0x00000000 +#define NVC997_INVALIDATE_SHADER_CACHES_INSTRUCTION_TRUE 0x00000001 +#define NVC997_INVALIDATE_SHADER_CACHES_DATA 4:4 +#define NVC997_INVALIDATE_SHADER_CACHES_DATA_FALSE 0x00000000 +#define NVC997_INVALIDATE_SHADER_CACHES_DATA_TRUE 0x00000001 +#define NVC997_INVALIDATE_SHADER_CACHES_CONSTANT 12:12 +#define NVC997_INVALIDATE_SHADER_CACHES_CONSTANT_FALSE 0x00000000 +#define NVC997_INVALIDATE_SHADER_CACHES_CONSTANT_TRUE 0x00000001 +#define NVC997_INVALIDATE_SHADER_CACHES_LOCKS 1:1 +#define NVC997_INVALIDATE_SHADER_CACHES_LOCKS_FALSE 0x00000000 +#define NVC997_INVALIDATE_SHADER_CACHES_LOCKS_TRUE 0x00000001 +#define NVC997_INVALIDATE_SHADER_CACHES_FLUSH_DATA 2:2 +#define NVC997_INVALIDATE_SHADER_CACHES_FLUSH_DATA_FALSE 0x00000000 +#define NVC997_INVALIDATE_SHADER_CACHES_FLUSH_DATA_TRUE 0x00000001 + +#define NVC997_SET_INSTANCE_COUNT 0x0220 +#define NVC997_SET_INSTANCE_COUNT_V 31:0 + +#define NVC997_SET_POSITION_W_SCALED_OFFSET_ENABLE 0x0224 +#define NVC997_SET_POSITION_W_SCALED_OFFSET_ENABLE_ENABLE 0:0 +#define NVC997_SET_POSITION_W_SCALED_OFFSET_ENABLE_ENABLE_FALSE 0x00000000 +#define NVC997_SET_POSITION_W_SCALED_OFFSET_ENABLE_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_GO_IDLE_TIMEOUT 0x022c +#define NVC997_SET_GO_IDLE_TIMEOUT_V 31:0 + +#define NVC997_SET_MME_VERSION 0x0234 +#define NVC997_SET_MME_VERSION_MAJOR 7:0 + +#define NVC997_SET_INDEX_BUFFER_SIZE_A 0x0238 +#define NVC997_SET_INDEX_BUFFER_SIZE_A_UPPER 7:0 + +#define NVC997_SET_INDEX_BUFFER_SIZE_B 0x023c +#define NVC997_SET_INDEX_BUFFER_SIZE_B_LOWER 31:0 + +#define NVC997_SET_ROOT_TABLE_VISIBILITY(i) (0x0240+(i)*4) +#define NVC997_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP0_ENABLE 1:0 +#define NVC997_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP1_ENABLE 5:4 +#define NVC997_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP2_ENABLE 9:8 +#define NVC997_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP3_ENABLE 13:12 +#define NVC997_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP4_ENABLE 17:16 + +#define NVC997_SET_DRAW_CONTROL_A 0x0260 +#define NVC997_SET_DRAW_CONTROL_A_TOPOLOGY 3:0 +#define NVC997_SET_DRAW_CONTROL_A_TOPOLOGY_POINTS 0x00000000 +#define NVC997_SET_DRAW_CONTROL_A_TOPOLOGY_LINES 0x00000001 +#define NVC997_SET_DRAW_CONTROL_A_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC997_SET_DRAW_CONTROL_A_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC997_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC997_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC997_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC997_SET_DRAW_CONTROL_A_TOPOLOGY_QUADS 0x00000007 +#define NVC997_SET_DRAW_CONTROL_A_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC997_SET_DRAW_CONTROL_A_TOPOLOGY_POLYGON 0x00000009 +#define NVC997_SET_DRAW_CONTROL_A_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC997_SET_DRAW_CONTROL_A_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC997_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC997_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC997_SET_DRAW_CONTROL_A_TOPOLOGY_PATCH 0x0000000E +#define NVC997_SET_DRAW_CONTROL_A_PRIMITIVE_ID 4:4 +#define NVC997_SET_DRAW_CONTROL_A_PRIMITIVE_ID_FIRST 0x00000000 +#define NVC997_SET_DRAW_CONTROL_A_PRIMITIVE_ID_UNCHANGED 0x00000001 +#define NVC997_SET_DRAW_CONTROL_A_INSTANCE_ID 6:5 +#define NVC997_SET_DRAW_CONTROL_A_INSTANCE_ID_FIRST 0x00000000 +#define NVC997_SET_DRAW_CONTROL_A_INSTANCE_ID_SUBSEQUENT 0x00000001 +#define NVC997_SET_DRAW_CONTROL_A_INSTANCE_ID_UNCHANGED 0x00000002 +#define NVC997_SET_DRAW_CONTROL_A_SPLIT_MODE 8:7 +#define NVC997_SET_DRAW_CONTROL_A_SPLIT_MODE_NORMAL_BEGIN_NORMAL_END 0x00000000 +#define NVC997_SET_DRAW_CONTROL_A_SPLIT_MODE_NORMAL_BEGIN_OPEN_END 0x00000001 +#define NVC997_SET_DRAW_CONTROL_A_SPLIT_MODE_OPEN_BEGIN_OPEN_END 0x00000002 +#define NVC997_SET_DRAW_CONTROL_A_SPLIT_MODE_OPEN_BEGIN_NORMAL_END 0x00000003 +#define NVC997_SET_DRAW_CONTROL_A_INSTANCE_ITERATE_ENABLE 9:9 +#define NVC997_SET_DRAW_CONTROL_A_INSTANCE_ITERATE_ENABLE_FALSE 0x00000000 +#define NVC997_SET_DRAW_CONTROL_A_INSTANCE_ITERATE_ENABLE_TRUE 0x00000001 +#define NVC997_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_VERTEX_INDEX 10:10 +#define NVC997_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_VERTEX_INDEX_FALSE 0x00000000 +#define NVC997_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_VERTEX_INDEX_TRUE 0x00000001 +#define NVC997_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_INSTANCE_INDEX 11:11 +#define NVC997_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_INSTANCE_INDEX_FALSE 0x00000000 +#define NVC997_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_INSTANCE_INDEX_TRUE 0x00000001 + +#define NVC997_SET_DRAW_CONTROL_B 0x0264 +#define NVC997_SET_DRAW_CONTROL_B_INSTANCE_COUNT 31:0 + +#define NVC997_DRAW_INDEX_BUFFER_BEGIN_END_A 0x0268 +#define NVC997_DRAW_INDEX_BUFFER_BEGIN_END_A_FIRST 31:0 + +#define NVC997_DRAW_INDEX_BUFFER_BEGIN_END_B 0x026c +#define NVC997_DRAW_INDEX_BUFFER_BEGIN_END_B_COUNT 31:0 + +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_A 0x0270 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_A_START 31:0 + +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_B 0x0274 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_B_COUNT 31:0 + +#define NVC997_INVALIDATE_RASTER_CACHE_NO_WFI 0x027c +#define NVC997_INVALIDATE_RASTER_CACHE_NO_WFI_V 0:0 + +#define NVC997_SET_COLOR_RENDER_TO_ZETA_SURFACE 0x02b8 +#define NVC997_SET_COLOR_RENDER_TO_ZETA_SURFACE_V 0:0 +#define NVC997_SET_COLOR_RENDER_TO_ZETA_SURFACE_V_FALSE 0x00000000 +#define NVC997_SET_COLOR_RENDER_TO_ZETA_SURFACE_V_TRUE 0x00000001 + +#define NVC997_SET_ZCULL_VISIBLE_PRIM_OPTIMIZATION 0x02bc +#define NVC997_SET_ZCULL_VISIBLE_PRIM_OPTIMIZATION_V 0:0 +#define NVC997_SET_ZCULL_VISIBLE_PRIM_OPTIMIZATION_V_FALSE 0x00000000 +#define NVC997_SET_ZCULL_VISIBLE_PRIM_OPTIMIZATION_V_TRUE 0x00000001 + +#define NVC997_INCREMENT_SYNC_POINT 0x02c8 +#define NVC997_INCREMENT_SYNC_POINT_INDEX 11:0 +#define NVC997_INCREMENT_SYNC_POINT_CLEAN_L2 16:16 +#define NVC997_INCREMENT_SYNC_POINT_CLEAN_L2_FALSE 0x00000000 +#define NVC997_INCREMENT_SYNC_POINT_CLEAN_L2_TRUE 0x00000001 +#define NVC997_INCREMENT_SYNC_POINT_CONDITION 20:20 +#define NVC997_INCREMENT_SYNC_POINT_CONDITION_STREAM_OUT_WRITES_DONE 0x00000000 +#define NVC997_INCREMENT_SYNC_POINT_CONDITION_ROP_WRITES_DONE 0x00000001 + +#define NVC997_SET_ROOT_TABLE_PREFETCH 0x02d0 +#define NVC997_SET_ROOT_TABLE_PREFETCH_STAGE_ENABLES 5:0 + +#define NVC997_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE 0x02d4 +#define NVC997_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE_V 0:0 + +#define NVC997_SET_SURFACE_CLIP_ID_BLOCK_SIZE 0x02d8 +#define NVC997_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH 3:0 +#define NVC997_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC997_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT 7:4 +#define NVC997_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC997_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC997_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC997_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC997_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC997_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC997_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH 11:8 +#define NVC997_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NVC997_SET_ALPHA_CIRCULAR_BUFFER_SIZE 0x02dc +#define NVC997_SET_ALPHA_CIRCULAR_BUFFER_SIZE_CACHE_LINES_PER_SM 13:0 + +#define NVC997_DECOMPRESS_SURFACE 0x02e0 +#define NVC997_DECOMPRESS_SURFACE_MRT_SELECT 2:0 +#define NVC997_DECOMPRESS_SURFACE_RT_ARRAY_INDEX 19:4 + +#define NVC997_SET_ZCULL_ROP_BYPASS 0x02e4 +#define NVC997_SET_ZCULL_ROP_BYPASS_ENABLE 0:0 +#define NVC997_SET_ZCULL_ROP_BYPASS_ENABLE_FALSE 0x00000000 +#define NVC997_SET_ZCULL_ROP_BYPASS_ENABLE_TRUE 0x00000001 +#define NVC997_SET_ZCULL_ROP_BYPASS_NO_STALL 4:4 +#define NVC997_SET_ZCULL_ROP_BYPASS_NO_STALL_FALSE 0x00000000 +#define NVC997_SET_ZCULL_ROP_BYPASS_NO_STALL_TRUE 0x00000001 +#define NVC997_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING 8:8 +#define NVC997_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING_FALSE 0x00000000 +#define NVC997_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING_TRUE 0x00000001 +#define NVC997_SET_ZCULL_ROP_BYPASS_THRESHOLD 15:12 + +#define NVC997_SET_ZCULL_SUBREGION 0x02e8 +#define NVC997_SET_ZCULL_SUBREGION_ENABLE 0:0 +#define NVC997_SET_ZCULL_SUBREGION_ENABLE_FALSE 0x00000000 +#define NVC997_SET_ZCULL_SUBREGION_ENABLE_TRUE 0x00000001 +#define NVC997_SET_ZCULL_SUBREGION_NORMALIZED_ALIQUOTS 27:4 + +#define NVC997_SET_RASTER_BOUNDING_BOX 0x02ec +#define NVC997_SET_RASTER_BOUNDING_BOX_MODE 0:0 +#define NVC997_SET_RASTER_BOUNDING_BOX_MODE_BOUNDING_BOX 0x00000000 +#define NVC997_SET_RASTER_BOUNDING_BOX_MODE_FULL_VIEWPORT 0x00000001 +#define NVC997_SET_RASTER_BOUNDING_BOX_PAD 11:4 + +#define NVC997_PEER_SEMAPHORE_RELEASE 0x02f0 +#define NVC997_PEER_SEMAPHORE_RELEASE_V 31:0 + +#define NVC997_SET_ITERATED_BLEND_OPTIMIZATION 0x02f4 +#define NVC997_SET_ITERATED_BLEND_OPTIMIZATION_NOOP 1:0 +#define NVC997_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_NEVER 0x00000000 +#define NVC997_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_RGBA_0000 0x00000001 +#define NVC997_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_ALPHA_0 0x00000002 +#define NVC997_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_RGBA_0001 0x00000003 + +#define NVC997_SET_ZCULL_SUBREGION_ALLOCATION 0x02f8 +#define NVC997_SET_ZCULL_SUBREGION_ALLOCATION_SUBREGION_ID 7:0 +#define NVC997_SET_ZCULL_SUBREGION_ALLOCATION_ALIQUOTS 23:8 +#define NVC997_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT 27:24 +#define NVC997_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16X2_4X4 0x00000000 +#define NVC997_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X16_4X4 0x00000001 +#define NVC997_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_4X2 0x00000002 +#define NVC997_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_2X4 0x00000003 +#define NVC997_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X8_4X4 0x00000004 +#define NVC997_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_8X8_4X2 0x00000005 +#define NVC997_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_8X8_2X4 0x00000006 +#define NVC997_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_4X8 0x00000007 +#define NVC997_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_4X8_2X2 0x00000008 +#define NVC997_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X8_4X2 0x00000009 +#define NVC997_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X8_2X4 0x0000000A +#define NVC997_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_8X8_2X2 0x0000000B +#define NVC997_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_4X8_1X1 0x0000000C +#define NVC997_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_NONE 0x0000000F + +#define NVC997_ASSIGN_ZCULL_SUBREGIONS 0x02fc +#define NVC997_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM 1:0 +#define NVC997_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM_Static 0x00000000 +#define NVC997_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM_Adaptive 0x00000001 + +#define NVC997_SET_PS_OUTPUT_SAMPLE_MASK_USAGE 0x0300 +#define NVC997_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE 0:0 +#define NVC997_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_FALSE 0x00000000 +#define NVC997_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_TRUE 0x00000001 +#define NVC997_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE 1:1 +#define NVC997_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NVC997_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 + +#define NVC997_DRAW_ZERO_INDEX 0x0304 +#define NVC997_DRAW_ZERO_INDEX_COUNT 31:0 + +#define NVC997_SET_L1_CONFIGURATION 0x0308 +#define NVC997_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY 2:0 +#define NVC997_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_16KB 0x00000001 +#define NVC997_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_48KB 0x00000003 + +#define NVC997_SET_RENDER_ENABLE_CONTROL 0x030c +#define NVC997_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER 0:0 +#define NVC997_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_FALSE 0x00000000 +#define NVC997_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_TRUE 0x00000001 + +#define NVC997_SET_SPA_VERSION 0x0310 +#define NVC997_SET_SPA_VERSION_MINOR 7:0 +#define NVC997_SET_SPA_VERSION_MAJOR 15:8 + +#define NVC997_SET_TIMESLICE_BATCH_LIMIT 0x0314 +#define NVC997_SET_TIMESLICE_BATCH_LIMIT_BATCH_LIMIT 15:0 + +#define NVC997_SET_SNAP_GRID_LINE 0x0318 +#define NVC997_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NVC997_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NVC997_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NVC997_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NVC997_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NVC997_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NVC997_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NVC997_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NVC997_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NVC997_SET_SNAP_GRID_LINE_ROUNDING_MODE 8:8 +#define NVC997_SET_SNAP_GRID_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NVC997_SET_SNAP_GRID_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NVC997_SET_SNAP_GRID_NON_LINE 0x031c +#define NVC997_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NVC997_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NVC997_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NVC997_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NVC997_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NVC997_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NVC997_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NVC997_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NVC997_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NVC997_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE 8:8 +#define NVC997_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NVC997_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NVC997_SET_TESSELLATION_PARAMETERS 0x0320 +#define NVC997_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE 1:0 +#define NVC997_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_ISOLINE 0x00000000 +#define NVC997_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_TRIANGLE 0x00000001 +#define NVC997_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_QUAD 0x00000002 +#define NVC997_SET_TESSELLATION_PARAMETERS_SPACING 5:4 +#define NVC997_SET_TESSELLATION_PARAMETERS_SPACING_INTEGER 0x00000000 +#define NVC997_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_ODD 0x00000001 +#define NVC997_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_EVEN 0x00000002 +#define NVC997_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES 9:8 +#define NVC997_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_POINTS 0x00000000 +#define NVC997_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_LINES 0x00000001 +#define NVC997_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CW 0x00000002 +#define NVC997_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CCW 0x00000003 + +#define NVC997_SET_TESSELLATION_LOD_U0_OR_DENSITY 0x0324 +#define NVC997_SET_TESSELLATION_LOD_U0_OR_DENSITY_V 31:0 + +#define NVC997_SET_TESSELLATION_LOD_V0_OR_DETAIL 0x0328 +#define NVC997_SET_TESSELLATION_LOD_V0_OR_DETAIL_V 31:0 + +#define NVC997_SET_TESSELLATION_LOD_U1_OR_W0 0x032c +#define NVC997_SET_TESSELLATION_LOD_U1_OR_W0_V 31:0 + +#define NVC997_SET_TESSELLATION_LOD_V1 0x0330 +#define NVC997_SET_TESSELLATION_LOD_V1_V 31:0 + +#define NVC997_SET_TG_LOD_INTERIOR_U 0x0334 +#define NVC997_SET_TG_LOD_INTERIOR_U_V 31:0 + +#define NVC997_SET_TG_LOD_INTERIOR_V 0x0338 +#define NVC997_SET_TG_LOD_INTERIOR_V_V 31:0 + +#define NVC997_RESERVED_TG07 0x033c +#define NVC997_RESERVED_TG07_V 0:0 + +#define NVC997_RESERVED_TG08 0x0340 +#define NVC997_RESERVED_TG08_V 0:0 + +#define NVC997_RESERVED_TG09 0x0344 +#define NVC997_RESERVED_TG09_V 0:0 + +#define NVC997_RESERVED_TG10 0x0348 +#define NVC997_RESERVED_TG10_V 0:0 + +#define NVC997_RESERVED_TG11 0x034c +#define NVC997_RESERVED_TG11_V 0:0 + +#define NVC997_RESERVED_TG12 0x0350 +#define NVC997_RESERVED_TG12_V 0:0 + +#define NVC997_RESERVED_TG13 0x0354 +#define NVC997_RESERVED_TG13_V 0:0 + +#define NVC997_RESERVED_TG14 0x0358 +#define NVC997_RESERVED_TG14_V 0:0 + +#define NVC997_RESERVED_TG15 0x035c +#define NVC997_RESERVED_TG15_V 0:0 + +#define NVC997_SET_SUBTILING_PERF_KNOB_A 0x0360 +#define NVC997_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_REGISTER_FILE_PER_SUBTILE 7:0 +#define NVC997_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_PIXEL_OUTPUT_BUFFER_PER_SUBTILE 15:8 +#define NVC997_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_TRIANGLE_RAM_PER_SUBTILE 23:16 +#define NVC997_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_MAX_QUADS_PER_SUBTILE 31:24 + +#define NVC997_SET_SUBTILING_PERF_KNOB_B 0x0364 +#define NVC997_SET_SUBTILING_PERF_KNOB_B_FRACTION_OF_MAX_PRIMITIVES_PER_SUBTILE 7:0 + +#define NVC997_SET_SUBTILING_PERF_KNOB_C 0x0368 +#define NVC997_SET_SUBTILING_PERF_KNOB_C_RESERVED 0:0 + +#define NVC997_SET_ZCULL_SUBREGION_TO_REPORT 0x036c +#define NVC997_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE 0:0 +#define NVC997_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE_FALSE 0x00000000 +#define NVC997_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE_TRUE 0x00000001 +#define NVC997_SET_ZCULL_SUBREGION_TO_REPORT_SUBREGION_ID 11:4 + +#define NVC997_SET_ZCULL_SUBREGION_REPORT_TYPE 0x0370 +#define NVC997_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE 0:0 +#define NVC997_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE_FALSE 0x00000000 +#define NVC997_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE_TRUE 0x00000001 +#define NVC997_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE 6:4 +#define NVC997_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST 0x00000000 +#define NVC997_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST_NO_ACCEPT 0x00000001 +#define NVC997_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST_LATE_Z 0x00000002 +#define NVC997_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_STENCIL_TEST 0x00000003 + +#define NVC997_SET_BALANCED_PRIMITIVE_WORKLOAD 0x0374 +#define NVC997_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE 0:0 +#define NVC997_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE_FALSE 0x00000000 +#define NVC997_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE_TRUE 0x00000001 +#define NVC997_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE 4:4 +#define NVC997_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE_FALSE 0x00000000 +#define NVC997_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE_TRUE 0x00000001 +#define NVC997_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_UNPARTITIONED_MODE 8:8 +#define NVC997_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_UNPARTITIONED_MODE_FALSE 0x00000000 +#define NVC997_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_UNPARTITIONED_MODE_TRUE 0x00000001 +#define NVC997_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_TIMESLICED_MODE 9:9 +#define NVC997_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_TIMESLICED_MODE_FALSE 0x00000000 +#define NVC997_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_TIMESLICED_MODE_TRUE 0x00000001 + +#define NVC997_SET_MAX_PATCHES_PER_BATCH 0x0378 +#define NVC997_SET_MAX_PATCHES_PER_BATCH_V 5:0 + +#define NVC997_SET_RASTER_ENABLE 0x037c +#define NVC997_SET_RASTER_ENABLE_V 0:0 +#define NVC997_SET_RASTER_ENABLE_V_FALSE 0x00000000 +#define NVC997_SET_RASTER_ENABLE_V_TRUE 0x00000001 + +#define NVC997_SET_STREAM_OUT_BUFFER_ENABLE(j) (0x0380+(j)*32) +#define NVC997_SET_STREAM_OUT_BUFFER_ENABLE_V 0:0 +#define NVC997_SET_STREAM_OUT_BUFFER_ENABLE_V_FALSE 0x00000000 +#define NVC997_SET_STREAM_OUT_BUFFER_ENABLE_V_TRUE 0x00000001 + +#define NVC997_SET_STREAM_OUT_BUFFER_ADDRESS_A(j) (0x0384+(j)*32) +#define NVC997_SET_STREAM_OUT_BUFFER_ADDRESS_A_UPPER 7:0 + +#define NVC997_SET_STREAM_OUT_BUFFER_ADDRESS_B(j) (0x0388+(j)*32) +#define NVC997_SET_STREAM_OUT_BUFFER_ADDRESS_B_LOWER 31:0 + +#define NVC997_SET_STREAM_OUT_BUFFER_SIZE(j) (0x038c+(j)*32) +#define NVC997_SET_STREAM_OUT_BUFFER_SIZE_BYTES 31:0 + +#define NVC997_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER(j) (0x0390+(j)*32) +#define NVC997_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER_START_OFFSET 31:0 + +#define NVC997_SET_POSITION_W_SCALED_OFFSET_SCALE_A(j) (0x0400+(j)*16) +#define NVC997_SET_POSITION_W_SCALED_OFFSET_SCALE_A_V 31:0 + +#define NVC997_SET_POSITION_W_SCALED_OFFSET_SCALE_B(j) (0x0404+(j)*16) +#define NVC997_SET_POSITION_W_SCALED_OFFSET_SCALE_B_V 31:0 + +#define NVC997_SET_POSITION_W_SCALED_OFFSET_RESERVED_A(j) (0x0408+(j)*16) +#define NVC997_SET_POSITION_W_SCALED_OFFSET_RESERVED_A_V 31:0 + +#define NVC997_SET_POSITION_W_SCALED_OFFSET_RESERVED_B(j) (0x040c+(j)*16) +#define NVC997_SET_POSITION_W_SCALED_OFFSET_RESERVED_B_V 31:0 + +#define NVC997_SET_Z_ROP_SLICE_MAP 0x0500 +#define NVC997_SET_Z_ROP_SLICE_MAP_VIRTUAL_ADDRESS_MASK 31:0 + +#define NVC997_SET_ROOT_TABLE_SELECTOR 0x0504 +#define NVC997_SET_ROOT_TABLE_SELECTOR_ROOT_TABLE 2:0 +#define NVC997_SET_ROOT_TABLE_SELECTOR_OFFSET 15:8 + +#define NVC997_LOAD_ROOT_TABLE 0x0508 +#define NVC997_LOAD_ROOT_TABLE_V 31:0 + +#define NVC997_SET_MME_MEM_ADDRESS_A 0x0550 +#define NVC997_SET_MME_MEM_ADDRESS_A_UPPER 16:0 + +#define NVC997_SET_MME_MEM_ADDRESS_B 0x0554 +#define NVC997_SET_MME_MEM_ADDRESS_B_LOWER 31:0 + +#define NVC997_SET_MME_DATA_RAM_ADDRESS 0x0558 +#define NVC997_SET_MME_DATA_RAM_ADDRESS_WORD 31:0 + +#define NVC997_MME_DMA_READ 0x055c +#define NVC997_MME_DMA_READ_LENGTH 31:0 + +#define NVC997_MME_DMA_READ_FIFOED 0x0560 +#define NVC997_MME_DMA_READ_FIFOED_LENGTH 31:0 + +#define NVC997_MME_DMA_WRITE 0x0564 +#define NVC997_MME_DMA_WRITE_LENGTH 31:0 + +#define NVC997_MME_DMA_REDUCTION 0x0568 +#define NVC997_MME_DMA_REDUCTION_REDUCTION_OP 2:0 +#define NVC997_MME_DMA_REDUCTION_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC997_MME_DMA_REDUCTION_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC997_MME_DMA_REDUCTION_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC997_MME_DMA_REDUCTION_REDUCTION_OP_RED_INC 0x00000003 +#define NVC997_MME_DMA_REDUCTION_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC997_MME_DMA_REDUCTION_REDUCTION_OP_RED_AND 0x00000005 +#define NVC997_MME_DMA_REDUCTION_REDUCTION_OP_RED_OR 0x00000006 +#define NVC997_MME_DMA_REDUCTION_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC997_MME_DMA_REDUCTION_REDUCTION_FORMAT 5:4 +#define NVC997_MME_DMA_REDUCTION_REDUCTION_FORMAT_UNSIGNED 0x00000000 +#define NVC997_MME_DMA_REDUCTION_REDUCTION_FORMAT_SIGNED 0x00000001 +#define NVC997_MME_DMA_REDUCTION_REDUCTION_SIZE 8:8 +#define NVC997_MME_DMA_REDUCTION_REDUCTION_SIZE_FOUR_BYTES 0x00000000 +#define NVC997_MME_DMA_REDUCTION_REDUCTION_SIZE_EIGHT_BYTES 0x00000001 + +#define NVC997_MME_DMA_SYSMEMBAR 0x056c +#define NVC997_MME_DMA_SYSMEMBAR_V 0:0 + +#define NVC997_MME_DMA_SYNC 0x0570 +#define NVC997_MME_DMA_SYNC_VALUE 31:0 + +#define NVC997_SET_MME_DATA_FIFO_CONFIG 0x0574 +#define NVC997_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE 2:0 +#define NVC997_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_0KB 0x00000000 +#define NVC997_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_4KB 0x00000001 +#define NVC997_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_8KB 0x00000002 +#define NVC997_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_12KB 0x00000003 +#define NVC997_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_16KB 0x00000004 + +#define NVC997_SET_VERTEX_STREAM_SIZE_A(j) (0x0600+(j)*8) +#define NVC997_SET_VERTEX_STREAM_SIZE_A_UPPER 7:0 + +#define NVC997_SET_VERTEX_STREAM_SIZE_B(j) (0x0604+(j)*8) +#define NVC997_SET_VERTEX_STREAM_SIZE_B_LOWER 31:0 + +#define NVC997_SET_STREAM_OUT_CONTROL_STREAM(j) (0x0700+(j)*16) +#define NVC997_SET_STREAM_OUT_CONTROL_STREAM_SELECT 1:0 + +#define NVC997_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT(j) (0x0704+(j)*16) +#define NVC997_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT_MAX 7:0 + +#define NVC997_SET_STREAM_OUT_CONTROL_STRIDE(j) (0x0708+(j)*16) +#define NVC997_SET_STREAM_OUT_CONTROL_STRIDE_BYTES 31:0 + +#define NVC997_SET_RASTER_INPUT 0x0740 +#define NVC997_SET_RASTER_INPUT_STREAM_SELECT 1:0 + +#define NVC997_SET_STREAM_OUTPUT 0x0744 +#define NVC997_SET_STREAM_OUTPUT_ENABLE 0:0 +#define NVC997_SET_STREAM_OUTPUT_ENABLE_FALSE 0x00000000 +#define NVC997_SET_STREAM_OUTPUT_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE 0x0748 +#define NVC997_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE 0:0 +#define NVC997_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_FALSE 0x00000000 +#define NVC997_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_HYBRID_ANTI_ALIAS_CONTROL 0x0754 +#define NVC997_SET_HYBRID_ANTI_ALIAS_CONTROL_PASSES 3:0 +#define NVC997_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID 4:4 +#define NVC997_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_FRAGMENT 0x00000000 +#define NVC997_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_PASS 0x00000001 +#define NVC997_SET_HYBRID_ANTI_ALIAS_CONTROL_PASSES_EXTENDED 5:5 + +#define NVC997_SET_SHADER_LOCAL_MEMORY_WINDOW 0x077c +#define NVC997_SET_SHADER_LOCAL_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NVC997_SET_SHADER_LOCAL_MEMORY_A 0x0790 +#define NVC997_SET_SHADER_LOCAL_MEMORY_A_ADDRESS_UPPER 7:0 + +#define NVC997_SET_SHADER_LOCAL_MEMORY_B 0x0794 +#define NVC997_SET_SHADER_LOCAL_MEMORY_B_ADDRESS_LOWER 31:0 + +#define NVC997_SET_SHADER_LOCAL_MEMORY_C 0x0798 +#define NVC997_SET_SHADER_LOCAL_MEMORY_C_SIZE_UPPER 5:0 + +#define NVC997_SET_SHADER_LOCAL_MEMORY_D 0x079c +#define NVC997_SET_SHADER_LOCAL_MEMORY_D_SIZE_LOWER 31:0 + +#define NVC997_SET_SHADER_LOCAL_MEMORY_E 0x07a0 +#define NVC997_SET_SHADER_LOCAL_MEMORY_E_DEFAULT_SIZE_PER_WARP 25:0 + +#define NVC997_SET_COLOR_ZERO_BANDWIDTH_CLEAR 0x07a4 +#define NVC997_SET_COLOR_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVC997_SET_Z_ZERO_BANDWIDTH_CLEAR 0x07a8 +#define NVC997_SET_Z_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVC997_SET_STENCIL_ZERO_BANDWIDTH_CLEAR 0x07b0 +#define NVC997_SET_STENCIL_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVC997_SET_ZCULL_REGION_SIZE_A 0x07c0 +#define NVC997_SET_ZCULL_REGION_SIZE_A_WIDTH 15:0 + +#define NVC997_SET_ZCULL_REGION_SIZE_B 0x07c4 +#define NVC997_SET_ZCULL_REGION_SIZE_B_HEIGHT 15:0 + +#define NVC997_SET_ZCULL_REGION_SIZE_C 0x07c8 +#define NVC997_SET_ZCULL_REGION_SIZE_C_DEPTH 15:0 + +#define NVC997_SET_ZCULL_REGION_PIXEL_OFFSET_C 0x07cc +#define NVC997_SET_ZCULL_REGION_PIXEL_OFFSET_C_DEPTH 15:0 + +#define NVC997_SET_CULL_BEFORE_FETCH 0x07dc +#define NVC997_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE 0:0 +#define NVC997_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_FALSE 0x00000000 +#define NVC997_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_TRUE 0x00000001 + +#define NVC997_SET_ZCULL_REGION_LOCATION 0x07e0 +#define NVC997_SET_ZCULL_REGION_LOCATION_START_ALIQUOT 15:0 +#define NVC997_SET_ZCULL_REGION_LOCATION_ALIQUOT_COUNT 31:16 + +#define NVC997_SET_ZCULL_REGION_ALIQUOTS 0x07e4 +#define NVC997_SET_ZCULL_REGION_ALIQUOTS_PER_LAYER 15:0 + +#define NVC997_SET_ZCULL_STORAGE_A 0x07e8 +#define NVC997_SET_ZCULL_STORAGE_A_ADDRESS_UPPER 7:0 + +#define NVC997_SET_ZCULL_STORAGE_B 0x07ec +#define NVC997_SET_ZCULL_STORAGE_B_ADDRESS_LOWER 31:0 + +#define NVC997_SET_ZCULL_STORAGE_C 0x07f0 +#define NVC997_SET_ZCULL_STORAGE_C_LIMIT_ADDRESS_UPPER 7:0 + +#define NVC997_SET_ZCULL_STORAGE_D 0x07f4 +#define NVC997_SET_ZCULL_STORAGE_D_LIMIT_ADDRESS_LOWER 31:0 + +#define NVC997_SET_ZT_READ_ONLY 0x07f8 +#define NVC997_SET_ZT_READ_ONLY_ENABLE_Z 0:0 +#define NVC997_SET_ZT_READ_ONLY_ENABLE_Z_FALSE 0x00000000 +#define NVC997_SET_ZT_READ_ONLY_ENABLE_Z_TRUE 0x00000001 +#define NVC997_SET_ZT_READ_ONLY_ENABLE_STENCIL 4:4 +#define NVC997_SET_ZT_READ_ONLY_ENABLE_STENCIL_FALSE 0x00000000 +#define NVC997_SET_ZT_READ_ONLY_ENABLE_STENCIL_TRUE 0x00000001 + +#define NVC997_THROTTLE_SM 0x07fc +#define NVC997_THROTTLE_SM_MULTIPLY_ADD 0:0 +#define NVC997_THROTTLE_SM_MULTIPLY_ADD_FALSE 0x00000000 +#define NVC997_THROTTLE_SM_MULTIPLY_ADD_TRUE 0x00000001 + +#define NVC997_SET_COLOR_TARGET_A(j) (0x0800+(j)*64) +#define NVC997_SET_COLOR_TARGET_A_OFFSET_UPPER 7:0 + +#define NVC997_SET_COLOR_TARGET_B(j) (0x0804+(j)*64) +#define NVC997_SET_COLOR_TARGET_B_OFFSET_LOWER 31:0 + +#define NVC997_SET_COLOR_TARGET_WIDTH(j) (0x0808+(j)*64) +#define NVC997_SET_COLOR_TARGET_WIDTH_V 27:0 + +#define NVC997_SET_COLOR_TARGET_HEIGHT(j) (0x080c+(j)*64) +#define NVC997_SET_COLOR_TARGET_HEIGHT_V 16:0 + +#define NVC997_SET_COLOR_TARGET_FORMAT(j) (0x0810+(j)*64) +#define NVC997_SET_COLOR_TARGET_FORMAT_V 7:0 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_DISABLED 0x00000000 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_AF32 0x000000C0 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_AS32 0x000000C1 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_AU32 0x000000C2 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_X32 0x000000C3 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_X32 0x000000C4 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_X32 0x000000C5 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_R16_G16_B16_A16 0x000000C6 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RN16_GN16_BN16_AN16 0x000000C7 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RS16_GS16_BS16_AS16 0x000000C8 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RU16_GU16_BU16_AU16 0x000000C9 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_AF16 0x000000CA +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RF32_GF32 0x000000CB +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RS32_GS32 0x000000CC +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RU32_GU32 0x000000CD +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_X16 0x000000CE +#define NVC997_SET_COLOR_TARGET_FORMAT_V_A8R8G8B8 0x000000CF +#define NVC997_SET_COLOR_TARGET_FORMAT_V_A8RL8GL8BL8 0x000000D0 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_A2B10G10R10 0x000000D1 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_AU2BU10GU10RU10 0x000000D2 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_A8B8G8R8 0x000000D5 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_A8BL8GL8RL8 0x000000D6 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_AN8BN8GN8RN8 0x000000D7 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_AS8BS8GS8RS8 0x000000D8 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_AU8BU8GU8RU8 0x000000D9 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_R16_G16 0x000000DA +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RN16_GN16 0x000000DB +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RS16_GS16 0x000000DC +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RU16_GU16 0x000000DD +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RF16_GF16 0x000000DE +#define NVC997_SET_COLOR_TARGET_FORMAT_V_A2R10G10B10 0x000000DF +#define NVC997_SET_COLOR_TARGET_FORMAT_V_BF10GF11RF11 0x000000E0 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RS32 0x000000E3 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RU32 0x000000E4 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RF32 0x000000E5 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_X8R8G8B8 0x000000E6 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_X8RL8GL8BL8 0x000000E7 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_R5G6B5 0x000000E8 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_A1R5G5B5 0x000000E9 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_G8R8 0x000000EA +#define NVC997_SET_COLOR_TARGET_FORMAT_V_GN8RN8 0x000000EB +#define NVC997_SET_COLOR_TARGET_FORMAT_V_GS8RS8 0x000000EC +#define NVC997_SET_COLOR_TARGET_FORMAT_V_GU8RU8 0x000000ED +#define NVC997_SET_COLOR_TARGET_FORMAT_V_R16 0x000000EE +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RN16 0x000000EF +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RS16 0x000000F0 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RU16 0x000000F1 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RF16 0x000000F2 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_R8 0x000000F3 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RN8 0x000000F4 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RS8 0x000000F5 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RU8 0x000000F6 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_A8 0x000000F7 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_X1R5G5B5 0x000000F8 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_X8B8G8R8 0x000000F9 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_X8BL8GL8RL8 0x000000FA +#define NVC997_SET_COLOR_TARGET_FORMAT_V_Z1R5G5B5 0x000000FB +#define NVC997_SET_COLOR_TARGET_FORMAT_V_O1R5G5B5 0x000000FC +#define NVC997_SET_COLOR_TARGET_FORMAT_V_Z8R8G8B8 0x000000FD +#define NVC997_SET_COLOR_TARGET_FORMAT_V_O8R8G8B8 0x000000FE +#define NVC997_SET_COLOR_TARGET_FORMAT_V_R32 0x000000FF +#define NVC997_SET_COLOR_TARGET_FORMAT_V_A16 0x00000040 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_AF16 0x00000041 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_AF32 0x00000042 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_A8R8 0x00000043 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_R16_A16 0x00000044 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RF16_AF16 0x00000045 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_RF32_AF32 0x00000046 +#define NVC997_SET_COLOR_TARGET_FORMAT_V_B8G8R8A8 0x00000047 + +#define NVC997_SET_COLOR_TARGET_MEMORY(j) (0x0814+(j)*64) +#define NVC997_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH 3:0 +#define NVC997_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH_ONE_GOB 0x00000000 +#define NVC997_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT 7:4 +#define NVC997_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_ONE_GOB 0x00000000 +#define NVC997_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_TWO_GOBS 0x00000001 +#define NVC997_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC997_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC997_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC997_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC997_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH 11:8 +#define NVC997_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_ONE_GOB 0x00000000 +#define NVC997_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_TWO_GOBS 0x00000001 +#define NVC997_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_FOUR_GOBS 0x00000002 +#define NVC997_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_EIGHT_GOBS 0x00000003 +#define NVC997_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVC997_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_THIRTYTWO_GOBS 0x00000005 +#define NVC997_SET_COLOR_TARGET_MEMORY_LAYOUT 12:12 +#define NVC997_SET_COLOR_TARGET_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVC997_SET_COLOR_TARGET_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVC997_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL 16:16 +#define NVC997_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NVC997_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_DEPTH_SIZE 0x00000001 + +#define NVC997_SET_COLOR_TARGET_THIRD_DIMENSION(j) (0x0818+(j)*64) +#define NVC997_SET_COLOR_TARGET_THIRD_DIMENSION_V 27:0 + +#define NVC997_SET_COLOR_TARGET_ARRAY_PITCH(j) (0x081c+(j)*64) +#define NVC997_SET_COLOR_TARGET_ARRAY_PITCH_V 31:0 + +#define NVC997_SET_COLOR_TARGET_LAYER(j) (0x0820+(j)*64) +#define NVC997_SET_COLOR_TARGET_LAYER_OFFSET 15:0 + +#define NVC997_SET_COLOR_TARGET_C_ROP_SLICE_MAP(j) (0x0824+(j)*64) +#define NVC997_SET_COLOR_TARGET_C_ROP_SLICE_MAP_VIRTUAL_ADDRESS_MASK 31:0 + +#define NVC997_SET_VIEWPORT_SCALE_X(j) (0x0a00+(j)*32) +#define NVC997_SET_VIEWPORT_SCALE_X_V 31:0 + +#define NVC997_SET_VIEWPORT_SCALE_Y(j) (0x0a04+(j)*32) +#define NVC997_SET_VIEWPORT_SCALE_Y_V 31:0 + +#define NVC997_SET_VIEWPORT_SCALE_Z(j) (0x0a08+(j)*32) +#define NVC997_SET_VIEWPORT_SCALE_Z_V 31:0 + +#define NVC997_SET_VIEWPORT_OFFSET_X(j) (0x0a0c+(j)*32) +#define NVC997_SET_VIEWPORT_OFFSET_X_V 31:0 + +#define NVC997_SET_VIEWPORT_OFFSET_Y(j) (0x0a10+(j)*32) +#define NVC997_SET_VIEWPORT_OFFSET_Y_V 31:0 + +#define NVC997_SET_VIEWPORT_OFFSET_Z(j) (0x0a14+(j)*32) +#define NVC997_SET_VIEWPORT_OFFSET_Z_V 31:0 + +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE(j) (0x0a18+(j)*32) +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_X 2:0 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_X 0x00000000 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_X 0x00000001 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_Y 0x00000002 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_Y 0x00000003 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_Z 0x00000004 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_Z 0x00000005 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_W 0x00000006 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_W 0x00000007 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_Y 6:4 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_X 0x00000000 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_X 0x00000001 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_Y 0x00000002 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_Y 0x00000003 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_Z 0x00000004 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_Z 0x00000005 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_W 0x00000006 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_W 0x00000007 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_Z 10:8 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_X 0x00000000 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_X 0x00000001 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_Y 0x00000002 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_Y 0x00000003 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_Z 0x00000004 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_Z 0x00000005 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_W 0x00000006 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_W 0x00000007 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_W 14:12 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_X 0x00000000 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_X 0x00000001 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_Y 0x00000002 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_Y 0x00000003 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_Z 0x00000004 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_Z 0x00000005 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_W 0x00000006 +#define NVC997_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_W 0x00000007 + +#define NVC997_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION(j) (0x0a1c+(j)*32) +#define NVC997_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION_X_BITS 4:0 +#define NVC997_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION_Y_BITS 12:8 + +#define NVC997_SET_VIEWPORT_CLIP_HORIZONTAL(j) (0x0c00+(j)*16) +#define NVC997_SET_VIEWPORT_CLIP_HORIZONTAL_X0 15:0 +#define NVC997_SET_VIEWPORT_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NVC997_SET_VIEWPORT_CLIP_VERTICAL(j) (0x0c04+(j)*16) +#define NVC997_SET_VIEWPORT_CLIP_VERTICAL_Y0 15:0 +#define NVC997_SET_VIEWPORT_CLIP_VERTICAL_HEIGHT 31:16 + +#define NVC997_SET_VIEWPORT_CLIP_MIN_Z(j) (0x0c08+(j)*16) +#define NVC997_SET_VIEWPORT_CLIP_MIN_Z_V 31:0 + +#define NVC997_SET_VIEWPORT_CLIP_MAX_Z(j) (0x0c0c+(j)*16) +#define NVC997_SET_VIEWPORT_CLIP_MAX_Z_V 31:0 + +#define NVC997_SET_WINDOW_CLIP_HORIZONTAL(j) (0x0d00+(j)*8) +#define NVC997_SET_WINDOW_CLIP_HORIZONTAL_XMIN 15:0 +#define NVC997_SET_WINDOW_CLIP_HORIZONTAL_XMAX 31:16 + +#define NVC997_SET_WINDOW_CLIP_VERTICAL(j) (0x0d04+(j)*8) +#define NVC997_SET_WINDOW_CLIP_VERTICAL_YMIN 15:0 +#define NVC997_SET_WINDOW_CLIP_VERTICAL_YMAX 31:16 + +#define NVC997_SET_CLIP_ID_EXTENT_X(j) (0x0d40+(j)*8) +#define NVC997_SET_CLIP_ID_EXTENT_X_MINX 15:0 +#define NVC997_SET_CLIP_ID_EXTENT_X_WIDTH 31:16 + +#define NVC997_SET_CLIP_ID_EXTENT_Y(j) (0x0d44+(j)*8) +#define NVC997_SET_CLIP_ID_EXTENT_Y_MINY 15:0 +#define NVC997_SET_CLIP_ID_EXTENT_Y_HEIGHT 31:16 + +#define NVC997_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK 0x0d60 +#define NVC997_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK_V 10:0 + +#define NVC997_SET_API_VISIBLE_CALL_LIMIT 0x0d64 +#define NVC997_SET_API_VISIBLE_CALL_LIMIT_V 3:0 +#define NVC997_SET_API_VISIBLE_CALL_LIMIT_V__0 0x00000000 +#define NVC997_SET_API_VISIBLE_CALL_LIMIT_V__1 0x00000001 +#define NVC997_SET_API_VISIBLE_CALL_LIMIT_V__2 0x00000002 +#define NVC997_SET_API_VISIBLE_CALL_LIMIT_V__4 0x00000003 +#define NVC997_SET_API_VISIBLE_CALL_LIMIT_V__8 0x00000004 +#define NVC997_SET_API_VISIBLE_CALL_LIMIT_V__16 0x00000005 +#define NVC997_SET_API_VISIBLE_CALL_LIMIT_V__32 0x00000006 +#define NVC997_SET_API_VISIBLE_CALL_LIMIT_V__64 0x00000007 +#define NVC997_SET_API_VISIBLE_CALL_LIMIT_V__128 0x00000008 +#define NVC997_SET_API_VISIBLE_CALL_LIMIT_V_NO_CHECK 0x0000000F + +#define NVC997_SET_STATISTICS_COUNTER 0x0d68 +#define NVC997_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE 0:0 +#define NVC997_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC997_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC997_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE 1:1 +#define NVC997_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC997_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC997_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE 2:2 +#define NVC997_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC997_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC997_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE 3:3 +#define NVC997_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC997_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC997_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE 4:4 +#define NVC997_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC997_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC997_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE 5:5 +#define NVC997_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NVC997_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NVC997_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE 6:6 +#define NVC997_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_FALSE 0x00000000 +#define NVC997_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_TRUE 0x00000001 +#define NVC997_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE 7:7 +#define NVC997_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC997_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC997_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE 8:8 +#define NVC997_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC997_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC997_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE 9:9 +#define NVC997_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC997_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC997_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE 11:11 +#define NVC997_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC997_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC997_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE 12:12 +#define NVC997_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVC997_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVC997_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE 13:13 +#define NVC997_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVC997_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVC997_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE 14:14 +#define NVC997_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NVC997_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NVC997_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE 10:10 +#define NVC997_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_FALSE 0x00000000 +#define NVC997_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_TRUE 0x00000001 +#define NVC997_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE 15:15 +#define NVC997_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_FALSE 0x00000000 +#define NVC997_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_TRUE 0x00000001 +#define NVC997_SET_STATISTICS_COUNTER_SCG_CLOCKS_ENABLE 16:16 +#define NVC997_SET_STATISTICS_COUNTER_SCG_CLOCKS_ENABLE_FALSE 0x00000000 +#define NVC997_SET_STATISTICS_COUNTER_SCG_CLOCKS_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_CLEAR_RECT_HORIZONTAL 0x0d6c +#define NVC997_SET_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NVC997_SET_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NVC997_SET_CLEAR_RECT_VERTICAL 0x0d70 +#define NVC997_SET_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NVC997_SET_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NVC997_SET_VERTEX_ARRAY_START 0x0d74 +#define NVC997_SET_VERTEX_ARRAY_START_V 31:0 + +#define NVC997_DRAW_VERTEX_ARRAY 0x0d78 +#define NVC997_DRAW_VERTEX_ARRAY_COUNT 31:0 + +#define NVC997_SET_VIEWPORT_Z_CLIP 0x0d7c +#define NVC997_SET_VIEWPORT_Z_CLIP_RANGE 0:0 +#define NVC997_SET_VIEWPORT_Z_CLIP_RANGE_NEGATIVE_W_TO_POSITIVE_W 0x00000000 +#define NVC997_SET_VIEWPORT_Z_CLIP_RANGE_ZERO_TO_POSITIVE_W 0x00000001 + +#define NVC997_SET_COLOR_CLEAR_VALUE(i) (0x0d80+(i)*4) +#define NVC997_SET_COLOR_CLEAR_VALUE_V 31:0 + +#define NVC997_SET_Z_CLEAR_VALUE 0x0d90 +#define NVC997_SET_Z_CLEAR_VALUE_V 31:0 + +#define NVC997_SET_SHADER_CACHE_CONTROL 0x0d94 +#define NVC997_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE 0:0 +#define NVC997_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_FALSE 0x00000000 +#define NVC997_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_TRUE 0x00000001 + +#define NVC997_FORCE_TRANSITION_TO_BETA 0x0d98 +#define NVC997_FORCE_TRANSITION_TO_BETA_V 0:0 + +#define NVC997_SET_REDUCE_COLOR_THRESHOLDS_ENABLE 0x0d9c +#define NVC997_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V 0:0 +#define NVC997_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_FALSE 0x00000000 +#define NVC997_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_TRUE 0x00000001 + +#define NVC997_SET_STENCIL_CLEAR_VALUE 0x0da0 +#define NVC997_SET_STENCIL_CLEAR_VALUE_V 7:0 + +#define NVC997_INVALIDATE_SHADER_CACHES_NO_WFI 0x0da4 +#define NVC997_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION 0:0 +#define NVC997_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_FALSE 0x00000000 +#define NVC997_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_TRUE 0x00000001 +#define NVC997_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA 4:4 +#define NVC997_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_FALSE 0x00000000 +#define NVC997_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_TRUE 0x00000001 +#define NVC997_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT 12:12 +#define NVC997_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_FALSE 0x00000000 +#define NVC997_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_TRUE 0x00000001 + +#define NVC997_SET_ZCULL_SERIALIZATION 0x0da8 +#define NVC997_SET_ZCULL_SERIALIZATION_ENABLE 0:0 +#define NVC997_SET_ZCULL_SERIALIZATION_ENABLE_FALSE 0x00000000 +#define NVC997_SET_ZCULL_SERIALIZATION_ENABLE_TRUE 0x00000001 +#define NVC997_SET_ZCULL_SERIALIZATION_APPLIED 5:4 +#define NVC997_SET_ZCULL_SERIALIZATION_APPLIED_ALWAYS 0x00000000 +#define NVC997_SET_ZCULL_SERIALIZATION_APPLIED_LATE_Z 0x00000001 +#define NVC997_SET_ZCULL_SERIALIZATION_APPLIED_OUT_OF_GAMUT_Z 0x00000002 +#define NVC997_SET_ZCULL_SERIALIZATION_APPLIED_LATE_Z_OR_OUT_OF_GAMUT_Z 0x00000003 + +#define NVC997_SET_FRONT_POLYGON_MODE 0x0dac +#define NVC997_SET_FRONT_POLYGON_MODE_V 31:0 +#define NVC997_SET_FRONT_POLYGON_MODE_V_POINT 0x00001B00 +#define NVC997_SET_FRONT_POLYGON_MODE_V_LINE 0x00001B01 +#define NVC997_SET_FRONT_POLYGON_MODE_V_FILL 0x00001B02 + +#define NVC997_SET_BACK_POLYGON_MODE 0x0db0 +#define NVC997_SET_BACK_POLYGON_MODE_V 31:0 +#define NVC997_SET_BACK_POLYGON_MODE_V_POINT 0x00001B00 +#define NVC997_SET_BACK_POLYGON_MODE_V_LINE 0x00001B01 +#define NVC997_SET_BACK_POLYGON_MODE_V_FILL 0x00001B02 + +#define NVC997_SET_POLY_SMOOTH 0x0db4 +#define NVC997_SET_POLY_SMOOTH_ENABLE 0:0 +#define NVC997_SET_POLY_SMOOTH_ENABLE_FALSE 0x00000000 +#define NVC997_SET_POLY_SMOOTH_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_ZCULL_DIR_FORMAT 0x0dbc +#define NVC997_SET_ZCULL_DIR_FORMAT_ZDIR 15:0 +#define NVC997_SET_ZCULL_DIR_FORMAT_ZDIR_LESS 0x00000000 +#define NVC997_SET_ZCULL_DIR_FORMAT_ZDIR_GREATER 0x00000001 +#define NVC997_SET_ZCULL_DIR_FORMAT_ZFORMAT 31:16 +#define NVC997_SET_ZCULL_DIR_FORMAT_ZFORMAT_MSB 0x00000000 +#define NVC997_SET_ZCULL_DIR_FORMAT_ZFORMAT_FP 0x00000001 +#define NVC997_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZTRICK 0x00000002 +#define NVC997_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZF32_1 0x00000003 + +#define NVC997_SET_POLY_OFFSET_POINT 0x0dc0 +#define NVC997_SET_POLY_OFFSET_POINT_ENABLE 0:0 +#define NVC997_SET_POLY_OFFSET_POINT_ENABLE_FALSE 0x00000000 +#define NVC997_SET_POLY_OFFSET_POINT_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_POLY_OFFSET_LINE 0x0dc4 +#define NVC997_SET_POLY_OFFSET_LINE_ENABLE 0:0 +#define NVC997_SET_POLY_OFFSET_LINE_ENABLE_FALSE 0x00000000 +#define NVC997_SET_POLY_OFFSET_LINE_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_POLY_OFFSET_FILL 0x0dc8 +#define NVC997_SET_POLY_OFFSET_FILL_ENABLE 0:0 +#define NVC997_SET_POLY_OFFSET_FILL_ENABLE_FALSE 0x00000000 +#define NVC997_SET_POLY_OFFSET_FILL_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_PATCH 0x0dcc +#define NVC997_SET_PATCH_SIZE 7:0 + +#define NVC997_SET_ITERATED_BLEND 0x0dd0 +#define NVC997_SET_ITERATED_BLEND_ENABLE 0:0 +#define NVC997_SET_ITERATED_BLEND_ENABLE_FALSE 0x00000000 +#define NVC997_SET_ITERATED_BLEND_ENABLE_TRUE 0x00000001 +#define NVC997_SET_ITERATED_BLEND_ALPHA_ENABLE 1:1 +#define NVC997_SET_ITERATED_BLEND_ALPHA_ENABLE_FALSE 0x00000000 +#define NVC997_SET_ITERATED_BLEND_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_ITERATED_BLEND_PASS 0x0dd4 +#define NVC997_SET_ITERATED_BLEND_PASS_COUNT 7:0 + +#define NVC997_SET_ZCULL_CRITERION 0x0dd8 +#define NVC997_SET_ZCULL_CRITERION_SFUNC 7:0 +#define NVC997_SET_ZCULL_CRITERION_SFUNC_NEVER 0x00000000 +#define NVC997_SET_ZCULL_CRITERION_SFUNC_LESS 0x00000001 +#define NVC997_SET_ZCULL_CRITERION_SFUNC_EQUAL 0x00000002 +#define NVC997_SET_ZCULL_CRITERION_SFUNC_LEQUAL 0x00000003 +#define NVC997_SET_ZCULL_CRITERION_SFUNC_GREATER 0x00000004 +#define NVC997_SET_ZCULL_CRITERION_SFUNC_NOTEQUAL 0x00000005 +#define NVC997_SET_ZCULL_CRITERION_SFUNC_GEQUAL 0x00000006 +#define NVC997_SET_ZCULL_CRITERION_SFUNC_ALWAYS 0x00000007 +#define NVC997_SET_ZCULL_CRITERION_NO_INVALIDATE 8:8 +#define NVC997_SET_ZCULL_CRITERION_NO_INVALIDATE_FALSE 0x00000000 +#define NVC997_SET_ZCULL_CRITERION_NO_INVALIDATE_TRUE 0x00000001 +#define NVC997_SET_ZCULL_CRITERION_FORCE_MATCH 9:9 +#define NVC997_SET_ZCULL_CRITERION_FORCE_MATCH_FALSE 0x00000000 +#define NVC997_SET_ZCULL_CRITERION_FORCE_MATCH_TRUE 0x00000001 +#define NVC997_SET_ZCULL_CRITERION_SREF 23:16 +#define NVC997_SET_ZCULL_CRITERION_SMASK 31:24 + +#define NVC997_PIXEL_SHADER_BARRIER 0x0de0 +#define NVC997_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE 0:0 +#define NVC997_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE_FALSE 0x00000000 +#define NVC997_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE_TRUE 0x00000001 +#define NVC997_PIXEL_SHADER_BARRIER_BARRIER_LOCATION 1:1 +#define NVC997_PIXEL_SHADER_BARRIER_BARRIER_LOCATION_BLOCK_BEFORE_PS 0x00000000 +#define NVC997_PIXEL_SHADER_BARRIER_BARRIER_LOCATION_BLOCK_BEFORE_PS_AND_ZTEST 0x00000001 + +#define NVC997_SET_SM_TIMEOUT_INTERVAL 0x0de4 +#define NVC997_SET_SM_TIMEOUT_INTERVAL_COUNTER_BIT 5:0 + +#define NVC997_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY 0x0de8 +#define NVC997_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE 0:0 +#define NVC997_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_FALSE 0x00000000 +#define NVC997_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_TRUE 0x00000001 + +#define NVC997_MME_DMA_WRITE_METHOD_BARRIER 0x0dec +#define NVC997_MME_DMA_WRITE_METHOD_BARRIER_V 0:0 + +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_POINTER 0x0df0 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_POINTER_V 7:0 + +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION 0x0df4 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC 2:0 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_FALSE 0x00000000 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_TRUE 0x00000001 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_EQ 0x00000002 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_NE 0x00000003 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_LT 0x00000004 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_LE 0x00000005 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_GT 0x00000006 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_GE 0x00000007 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION 5:3 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_ADD_PRODUCTS 0x00000000 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_SUB_PRODUCTS 0x00000001 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_MIN 0x00000002 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_MAX 0x00000003 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_RCP 0x00000004 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_ADD 0x00000005 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_SUBTRACT 0x00000006 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT 8:6 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT0 0x00000000 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT1 0x00000001 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT2 0x00000002 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT3 0x00000003 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT4 0x00000004 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT5 0x00000005 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT6 0x00000006 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT7 0x00000007 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT 11:9 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_SRC_RGB 0x00000000 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_DEST_RGB 0x00000001 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_SRC_AAA 0x00000002 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_DEST_AAA 0x00000003 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP0_RGB 0x00000004 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP1_RGB 0x00000005 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP2_RGB 0x00000006 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_PBR_RGB 0x00000007 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT 15:12 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ZERO 0x00000000 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE 0x00000001 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_SRC_RGB 0x00000002 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_SRC_AAA 0x00000003 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE_MINUS_SRC_AAA 0x00000004 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_DEST_RGB 0x00000005 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_DEST_AAA 0x00000006 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE_MINUS_DEST_AAA 0x00000007 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP0_RGB 0x00000009 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP1_RGB 0x0000000A +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP2_RGB 0x0000000B +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_PBR_RGB 0x0000000C +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_CONSTANT_RGB 0x0000000D +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ZERO_A_TIMES_B 0x0000000E +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT 18:16 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_SRC_RGB 0x00000000 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_DEST_RGB 0x00000001 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_SRC_AAA 0x00000002 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_DEST_AAA 0x00000003 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP0_RGB 0x00000004 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP1_RGB 0x00000005 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP2_RGB 0x00000006 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_PBR_RGB 0x00000007 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT 22:19 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ZERO 0x00000000 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE 0x00000001 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_SRC_RGB 0x00000002 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_SRC_AAA 0x00000003 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE_MINUS_SRC_AAA 0x00000004 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_DEST_RGB 0x00000005 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_DEST_AAA 0x00000006 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE_MINUS_DEST_AAA 0x00000007 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP0_RGB 0x00000009 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP1_RGB 0x0000000A +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP2_RGB 0x0000000B +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_PBR_RGB 0x0000000C +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_CONSTANT_RGB 0x0000000D +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ZERO_C_TIMES_D 0x0000000E +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE 25:23 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_RGB 0x00000000 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_GBR 0x00000001 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_RRR 0x00000002 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_GGG 0x00000003 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_BBB 0x00000004 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_R_TO_A 0x00000005 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK 27:26 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_RGB 0x00000000 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_R_ONLY 0x00000001 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_G_ONLY 0x00000002 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_B_ONLY 0x00000003 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT 29:28 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP0 0x00000000 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP1 0x00000001 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP2 0x00000002 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_NONE 0x00000003 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC 31:31 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC_FALSE 0x00000000 +#define NVC997_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC_TRUE 0x00000001 + +#define NVC997_SET_WINDOW_OFFSET_X 0x0df8 +#define NVC997_SET_WINDOW_OFFSET_X_V 16:0 + +#define NVC997_SET_WINDOW_OFFSET_Y 0x0dfc +#define NVC997_SET_WINDOW_OFFSET_Y_V 17:0 + +#define NVC997_SET_SCISSOR_ENABLE(j) (0x0e00+(j)*16) +#define NVC997_SET_SCISSOR_ENABLE_V 0:0 +#define NVC997_SET_SCISSOR_ENABLE_V_FALSE 0x00000000 +#define NVC997_SET_SCISSOR_ENABLE_V_TRUE 0x00000001 + +#define NVC997_SET_SCISSOR_HORIZONTAL(j) (0x0e04+(j)*16) +#define NVC997_SET_SCISSOR_HORIZONTAL_XMIN 15:0 +#define NVC997_SET_SCISSOR_HORIZONTAL_XMAX 31:16 + +#define NVC997_SET_SCISSOR_VERTICAL(j) (0x0e08+(j)*16) +#define NVC997_SET_SCISSOR_VERTICAL_YMIN 15:0 +#define NVC997_SET_SCISSOR_VERTICAL_YMAX 31:16 + +#define NVC997_SET_VPC_PERF_KNOB 0x0f14 +#define NVC997_SET_VPC_PERF_KNOB_CULLED_SMALL_LINES 7:0 +#define NVC997_SET_VPC_PERF_KNOB_CULLED_SMALL_TRIANGLES 15:8 +#define NVC997_SET_VPC_PERF_KNOB_NONCULLED_LINES_AND_POINTS 23:16 +#define NVC997_SET_VPC_PERF_KNOB_NONCULLED_TRIANGLES 31:24 + +#define NVC997_PM_LOCAL_TRIGGER 0x0f18 +#define NVC997_PM_LOCAL_TRIGGER_BOOKMARK 15:0 + +#define NVC997_SET_POST_Z_PS_IMASK 0x0f1c +#define NVC997_SET_POST_Z_PS_IMASK_ENABLE 0:0 +#define NVC997_SET_POST_Z_PS_IMASK_ENABLE_FALSE 0x00000000 +#define NVC997_SET_POST_Z_PS_IMASK_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_CONSTANT_COLOR_RENDERING 0x0f40 +#define NVC997_SET_CONSTANT_COLOR_RENDERING_ENABLE 0:0 +#define NVC997_SET_CONSTANT_COLOR_RENDERING_ENABLE_FALSE 0x00000000 +#define NVC997_SET_CONSTANT_COLOR_RENDERING_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_CONSTANT_COLOR_RENDERING_RED 0x0f44 +#define NVC997_SET_CONSTANT_COLOR_RENDERING_RED_V 31:0 + +#define NVC997_SET_CONSTANT_COLOR_RENDERING_GREEN 0x0f48 +#define NVC997_SET_CONSTANT_COLOR_RENDERING_GREEN_V 31:0 + +#define NVC997_SET_CONSTANT_COLOR_RENDERING_BLUE 0x0f4c +#define NVC997_SET_CONSTANT_COLOR_RENDERING_BLUE_V 31:0 + +#define NVC997_SET_CONSTANT_COLOR_RENDERING_ALPHA 0x0f50 +#define NVC997_SET_CONSTANT_COLOR_RENDERING_ALPHA_V 31:0 + +#define NVC997_SET_BACK_STENCIL_FUNC_REF 0x0f54 +#define NVC997_SET_BACK_STENCIL_FUNC_REF_V 7:0 + +#define NVC997_SET_BACK_STENCIL_MASK 0x0f58 +#define NVC997_SET_BACK_STENCIL_MASK_V 7:0 + +#define NVC997_SET_BACK_STENCIL_FUNC_MASK 0x0f5c +#define NVC997_SET_BACK_STENCIL_FUNC_MASK_V 7:0 + +#define NVC997_SET_VERTEX_STREAM_SUBSTITUTE_A 0x0f84 +#define NVC997_SET_VERTEX_STREAM_SUBSTITUTE_A_ADDRESS_UPPER 7:0 + +#define NVC997_SET_VERTEX_STREAM_SUBSTITUTE_B 0x0f88 +#define NVC997_SET_VERTEX_STREAM_SUBSTITUTE_B_ADDRESS_LOWER 31:0 + +#define NVC997_SET_LINE_MODE_POLYGON_CLIP 0x0f8c +#define NVC997_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE 0:0 +#define NVC997_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DRAW_LINE 0x00000000 +#define NVC997_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DO_NOT_DRAW_LINE 0x00000001 + +#define NVC997_SET_SINGLE_CT_WRITE_CONTROL 0x0f90 +#define NVC997_SET_SINGLE_CT_WRITE_CONTROL_ENABLE 0:0 +#define NVC997_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_FALSE 0x00000000 +#define NVC997_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_VTG_WARP_WATERMARKS 0x0f98 +#define NVC997_SET_VTG_WARP_WATERMARKS_LOW 15:0 +#define NVC997_SET_VTG_WARP_WATERMARKS_HIGH 31:16 + +#define NVC997_SET_DEPTH_BOUNDS_MIN 0x0f9c +#define NVC997_SET_DEPTH_BOUNDS_MIN_V 31:0 + +#define NVC997_SET_DEPTH_BOUNDS_MAX 0x0fa0 +#define NVC997_SET_DEPTH_BOUNDS_MAX_V 31:0 + +#define NVC997_SET_SAMPLE_MASK 0x0fa4 +#define NVC997_SET_SAMPLE_MASK_RASTER_OUT_ENABLE 0:0 +#define NVC997_SET_SAMPLE_MASK_RASTER_OUT_ENABLE_FALSE 0x00000000 +#define NVC997_SET_SAMPLE_MASK_RASTER_OUT_ENABLE_TRUE 0x00000001 +#define NVC997_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE 4:4 +#define NVC997_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE_FALSE 0x00000000 +#define NVC997_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_COLOR_TARGET_SAMPLE_MASK 0x0fa8 +#define NVC997_SET_COLOR_TARGET_SAMPLE_MASK_V 15:0 + +#define NVC997_SET_CT_MRT_ENABLE 0x0fac +#define NVC997_SET_CT_MRT_ENABLE_V 0:0 +#define NVC997_SET_CT_MRT_ENABLE_V_FALSE 0x00000000 +#define NVC997_SET_CT_MRT_ENABLE_V_TRUE 0x00000001 + +#define NVC997_SET_NONMULTISAMPLED_Z 0x0fb0 +#define NVC997_SET_NONMULTISAMPLED_Z_V 0:0 +#define NVC997_SET_NONMULTISAMPLED_Z_V_PER_SAMPLE 0x00000000 +#define NVC997_SET_NONMULTISAMPLED_Z_V_AT_PIXEL_CENTER 0x00000001 + +#define NVC997_SET_TIR 0x0fb4 +#define NVC997_SET_TIR_MODE 1:0 +#define NVC997_SET_TIR_MODE_DISABLED 0x00000000 +#define NVC997_SET_TIR_MODE_RASTER_N_TARGET_M 0x00000001 + +#define NVC997_SET_ANTI_ALIAS_RASTER 0x0fb8 +#define NVC997_SET_ANTI_ALIAS_RASTER_SAMPLES 2:0 +#define NVC997_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_1X1 0x00000000 +#define NVC997_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_2X2 0x00000002 +#define NVC997_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_4X2_D3D 0x00000004 +#define NVC997_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_2X1_D3D 0x00000005 +#define NVC997_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_4X4 0x00000006 + +#define NVC997_SET_SAMPLE_MASK_X0_Y0 0x0fbc +#define NVC997_SET_SAMPLE_MASK_X0_Y0_V 15:0 + +#define NVC997_SET_SAMPLE_MASK_X1_Y0 0x0fc0 +#define NVC997_SET_SAMPLE_MASK_X1_Y0_V 15:0 + +#define NVC997_SET_SAMPLE_MASK_X0_Y1 0x0fc4 +#define NVC997_SET_SAMPLE_MASK_X0_Y1_V 15:0 + +#define NVC997_SET_SAMPLE_MASK_X1_Y1 0x0fc8 +#define NVC997_SET_SAMPLE_MASK_X1_Y1_V 15:0 + +#define NVC997_SET_SURFACE_CLIP_ID_MEMORY_A 0x0fcc +#define NVC997_SET_SURFACE_CLIP_ID_MEMORY_A_OFFSET_UPPER 7:0 + +#define NVC997_SET_SURFACE_CLIP_ID_MEMORY_B 0x0fd0 +#define NVC997_SET_SURFACE_CLIP_ID_MEMORY_B_OFFSET_LOWER 31:0 + +#define NVC997_SET_TIR_MODULATION 0x0fd4 +#define NVC997_SET_TIR_MODULATION_COMPONENT_SELECT 1:0 +#define NVC997_SET_TIR_MODULATION_COMPONENT_SELECT_NO_MODULATION 0x00000000 +#define NVC997_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_RGB 0x00000001 +#define NVC997_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_ALPHA_ONLY 0x00000002 +#define NVC997_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_RGBA 0x00000003 + +#define NVC997_SET_TIR_MODULATION_FUNCTION 0x0fd8 +#define NVC997_SET_TIR_MODULATION_FUNCTION_SELECT 0:0 +#define NVC997_SET_TIR_MODULATION_FUNCTION_SELECT_LINEAR 0x00000000 +#define NVC997_SET_TIR_MODULATION_FUNCTION_SELECT_TABLE 0x00000001 + +#define NVC997_SET_BLEND_OPT_CONTROL 0x0fdc +#define NVC997_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS 0:0 +#define NVC997_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_FALSE 0x00000000 +#define NVC997_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_TRUE 0x00000001 + +#define NVC997_SET_ZT_A 0x0fe0 +#define NVC997_SET_ZT_A_OFFSET_UPPER 7:0 + +#define NVC997_SET_ZT_B 0x0fe4 +#define NVC997_SET_ZT_B_OFFSET_LOWER 31:0 + +#define NVC997_SET_ZT_FORMAT 0x0fe8 +#define NVC997_SET_ZT_FORMAT_V 4:0 +#define NVC997_SET_ZT_FORMAT_V_Z16 0x00000013 +#define NVC997_SET_ZT_FORMAT_V_Z24S8 0x00000014 +#define NVC997_SET_ZT_FORMAT_V_X8Z24 0x00000015 +#define NVC997_SET_ZT_FORMAT_V_S8Z24 0x00000016 +#define NVC997_SET_ZT_FORMAT_V_S8 0x00000017 +#define NVC997_SET_ZT_FORMAT_V_V8Z24 0x00000018 +#define NVC997_SET_ZT_FORMAT_V_ZF32 0x0000000A +#define NVC997_SET_ZT_FORMAT_V_ZF32_X24S8 0x00000019 +#define NVC997_SET_ZT_FORMAT_V_X8Z24_X16V8S8 0x0000001D +#define NVC997_SET_ZT_FORMAT_V_ZF32_X16V8X8 0x0000001E +#define NVC997_SET_ZT_FORMAT_V_ZF32_X16V8S8 0x0000001F + +#define NVC997_SET_ZT_BLOCK_SIZE 0x0fec +#define NVC997_SET_ZT_BLOCK_SIZE_WIDTH 3:0 +#define NVC997_SET_ZT_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC997_SET_ZT_BLOCK_SIZE_HEIGHT 7:4 +#define NVC997_SET_ZT_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC997_SET_ZT_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC997_SET_ZT_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC997_SET_ZT_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC997_SET_ZT_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC997_SET_ZT_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC997_SET_ZT_BLOCK_SIZE_DEPTH 11:8 +#define NVC997_SET_ZT_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NVC997_SET_ZT_ARRAY_PITCH 0x0ff0 +#define NVC997_SET_ZT_ARRAY_PITCH_V 31:0 + +#define NVC997_SET_SURFACE_CLIP_HORIZONTAL 0x0ff4 +#define NVC997_SET_SURFACE_CLIP_HORIZONTAL_X 15:0 +#define NVC997_SET_SURFACE_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NVC997_SET_SURFACE_CLIP_VERTICAL 0x0ff8 +#define NVC997_SET_SURFACE_CLIP_VERTICAL_Y 15:0 +#define NVC997_SET_SURFACE_CLIP_VERTICAL_HEIGHT 31:16 + +#define NVC997_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS 0x1000 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE 0:0 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_FALSE 0x00000000 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_TRUE 0x00000001 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY 5:4 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC997_SET_VIEWPORT_MULTICAST 0x1004 +#define NVC997_SET_VIEWPORT_MULTICAST_ORDER 0:0 +#define NVC997_SET_VIEWPORT_MULTICAST_ORDER_VIEWPORT_ORDER 0x00000000 +#define NVC997_SET_VIEWPORT_MULTICAST_ORDER_PRIMITIVE_ORDER 0x00000001 + +#define NVC997_SET_TESSELLATION_CUT_HEIGHT 0x1008 +#define NVC997_SET_TESSELLATION_CUT_HEIGHT_V 4:0 + +#define NVC997_SET_MAX_GS_INSTANCES_PER_TASK 0x100c +#define NVC997_SET_MAX_GS_INSTANCES_PER_TASK_V 10:0 + +#define NVC997_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK 0x1010 +#define NVC997_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK_V 15:0 + +#define NVC997_SET_RESERVED_SW_METHOD00 0x1014 +#define NVC997_SET_RESERVED_SW_METHOD00_V 31:0 + +#define NVC997_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER 0x1018 +#define NVC997_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NVC997_SET_BETA_CB_STORAGE_CONSTRAINT 0x101c +#define NVC997_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NVC997_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NVC997_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER 0x1020 +#define NVC997_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NVC997_SET_ALPHA_CB_STORAGE_CONSTRAINT 0x1024 +#define NVC997_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NVC997_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NVC997_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_RESERVED_SW_METHOD01 0x1028 +#define NVC997_SET_RESERVED_SW_METHOD01_V 31:0 + +#define NVC997_SET_RESERVED_SW_METHOD02 0x102c +#define NVC997_SET_RESERVED_SW_METHOD02_V 31:0 + +#define NVC997_SET_TIR_MODULATION_COEFFICIENT_TABLE(i) (0x1030+(i)*4) +#define NVC997_SET_TIR_MODULATION_COEFFICIENT_TABLE_V0 7:0 +#define NVC997_SET_TIR_MODULATION_COEFFICIENT_TABLE_V1 15:8 +#define NVC997_SET_TIR_MODULATION_COEFFICIENT_TABLE_V2 23:16 +#define NVC997_SET_TIR_MODULATION_COEFFICIENT_TABLE_V3 31:24 + +#define NVC997_SET_SPARE_NOOP01 0x1044 +#define NVC997_SET_SPARE_NOOP01_V 31:0 + +#define NVC997_SET_SPARE_NOOP02 0x1048 +#define NVC997_SET_SPARE_NOOP02_V 31:0 + +#define NVC997_SET_SPARE_NOOP03 0x104c +#define NVC997_SET_SPARE_NOOP03_V 31:0 + +#define NVC997_SET_SPARE_NOOP04 0x1050 +#define NVC997_SET_SPARE_NOOP04_V 31:0 + +#define NVC997_SET_SPARE_NOOP05 0x1054 +#define NVC997_SET_SPARE_NOOP05_V 31:0 + +#define NVC997_SET_SPARE_NOOP06 0x1058 +#define NVC997_SET_SPARE_NOOP06_V 31:0 + +#define NVC997_SET_SPARE_NOOP07 0x105c +#define NVC997_SET_SPARE_NOOP07_V 31:0 + +#define NVC997_SET_SPARE_NOOP08 0x1060 +#define NVC997_SET_SPARE_NOOP08_V 31:0 + +#define NVC997_SET_SPARE_NOOP09 0x1064 +#define NVC997_SET_SPARE_NOOP09_V 31:0 + +#define NVC997_SET_SPARE_NOOP10 0x1068 +#define NVC997_SET_SPARE_NOOP10_V 31:0 + +#define NVC997_SET_SPARE_NOOP11 0x106c +#define NVC997_SET_SPARE_NOOP11_V 31:0 + +#define NVC997_SET_SPARE_NOOP12 0x1070 +#define NVC997_SET_SPARE_NOOP12_V 31:0 + +#define NVC997_SET_SPARE_NOOP13 0x1074 +#define NVC997_SET_SPARE_NOOP13_V 31:0 + +#define NVC997_SET_SPARE_NOOP14 0x1078 +#define NVC997_SET_SPARE_NOOP14_V 31:0 + +#define NVC997_SET_SPARE_NOOP15 0x107c +#define NVC997_SET_SPARE_NOOP15_V 31:0 + +#define NVC997_SET_RESERVED_SW_METHOD03 0x10b0 +#define NVC997_SET_RESERVED_SW_METHOD03_V 31:0 + +#define NVC997_SET_RESERVED_SW_METHOD04 0x10b4 +#define NVC997_SET_RESERVED_SW_METHOD04_V 31:0 + +#define NVC997_SET_RESERVED_SW_METHOD05 0x10b8 +#define NVC997_SET_RESERVED_SW_METHOD05_V 31:0 + +#define NVC997_SET_RESERVED_SW_METHOD06 0x10bc +#define NVC997_SET_RESERVED_SW_METHOD06_V 31:0 + +#define NVC997_SET_RESERVED_SW_METHOD07 0x10c0 +#define NVC997_SET_RESERVED_SW_METHOD07_V 31:0 + +#define NVC997_SET_RESERVED_SW_METHOD08 0x10c4 +#define NVC997_SET_RESERVED_SW_METHOD08_V 31:0 + +#define NVC997_SET_RESERVED_SW_METHOD09 0x10c8 +#define NVC997_SET_RESERVED_SW_METHOD09_V 31:0 + +#define NVC997_SET_REDUCE_COLOR_THRESHOLDS_UNORM8 0x10cc +#define NVC997_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC997_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED 23:16 + +#define NVC997_SET_RESERVED_SW_METHOD10 0x10d0 +#define NVC997_SET_RESERVED_SW_METHOD10_V 31:0 + +#define NVC997_SET_RESERVED_SW_METHOD11 0x10d4 +#define NVC997_SET_RESERVED_SW_METHOD11_V 31:0 + +#define NVC997_SET_RESERVED_SW_METHOD12 0x10d8 +#define NVC997_SET_RESERVED_SW_METHOD12_V 31:0 + +#define NVC997_SET_RESERVED_SW_METHOD13 0x10dc +#define NVC997_SET_RESERVED_SW_METHOD13_V 31:0 + +#define NVC997_SET_REDUCE_COLOR_THRESHOLDS_UNORM10 0x10e0 +#define NVC997_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC997_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED 23:16 + +#define NVC997_SET_REDUCE_COLOR_THRESHOLDS_UNORM16 0x10e4 +#define NVC997_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC997_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED 23:16 + +#define NVC997_SET_REDUCE_COLOR_THRESHOLDS_FP11 0x10e8 +#define NVC997_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED_ALL_HIT_ONCE 5:0 +#define NVC997_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED 21:16 + +#define NVC997_SET_REDUCE_COLOR_THRESHOLDS_FP16 0x10ec +#define NVC997_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC997_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED 23:16 + +#define NVC997_SET_REDUCE_COLOR_THRESHOLDS_SRGB8 0x10f0 +#define NVC997_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVC997_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED 23:16 + +#define NVC997_UNBIND_ALL 0x10f4 +#define NVC997_UNBIND_ALL_CONSTANT_BUFFERS 8:8 +#define NVC997_UNBIND_ALL_CONSTANT_BUFFERS_FALSE 0x00000000 +#define NVC997_UNBIND_ALL_CONSTANT_BUFFERS_TRUE 0x00000001 + +#define NVC997_SET_CLEAR_SURFACE_CONTROL 0x10f8 +#define NVC997_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK 0:0 +#define NVC997_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_FALSE 0x00000000 +#define NVC997_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_TRUE 0x00000001 +#define NVC997_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT 4:4 +#define NVC997_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_FALSE 0x00000000 +#define NVC997_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_TRUE 0x00000001 +#define NVC997_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0 8:8 +#define NVC997_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_FALSE 0x00000000 +#define NVC997_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_TRUE 0x00000001 +#define NVC997_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0 12:12 +#define NVC997_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_FALSE 0x00000000 +#define NVC997_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_TRUE 0x00000001 + +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS 0x10fc +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC997_SET_RESERVED_SW_METHOD14 0x1100 +#define NVC997_SET_RESERVED_SW_METHOD14_V 31:0 + +#define NVC997_SET_RESERVED_SW_METHOD15 0x1104 +#define NVC997_SET_RESERVED_SW_METHOD15_V 31:0 + +#define NVC997_NO_OPERATION_DATA_HI 0x110c +#define NVC997_NO_OPERATION_DATA_HI_V 31:0 + +#define NVC997_SET_DEPTH_BIAS_CONTROL 0x1110 +#define NVC997_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT 0:0 +#define NVC997_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_FALSE 0x00000000 +#define NVC997_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_TRUE 0x00000001 + +#define NVC997_PM_TRIGGER_END 0x1114 +#define NVC997_PM_TRIGGER_END_V 31:0 + +#define NVC997_SET_VERTEX_ID_BASE 0x1118 +#define NVC997_SET_VERTEX_ID_BASE_V 31:0 + +#define NVC997_SET_STENCIL_COMPRESSION 0x111c +#define NVC997_SET_STENCIL_COMPRESSION_ENABLE 0:0 +#define NVC997_SET_STENCIL_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVC997_SET_STENCIL_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A(i) (0x1120+(i)*4) +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0 0:0 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1 1:1 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2 2:2 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3 3:3 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0 4:4 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1 5:5 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2 6:6 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3 7:7 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0 8:8 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1 9:9 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2 10:10 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3 11:11 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0 12:12 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1 13:13 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2 14:14 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3 15:15 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0 16:16 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1 17:17 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2 18:18 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3 19:19 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0 20:20 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1 21:21 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2 22:22 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3 23:23 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0 24:24 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1 25:25 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2 26:26 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3 27:27 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0 28:28 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1 29:29 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2 30:30 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3 31:31 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B(i) (0x1128+(i)*4) +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0 0:0 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1 1:1 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2 2:2 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3 3:3 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0 4:4 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1 5:5 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2 6:6 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3 7:7 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0 8:8 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1 9:9 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2 10:10 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3 11:11 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0 12:12 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1 13:13 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2 14:14 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3 15:15 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0 16:16 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1 17:17 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2 18:18 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3 19:19 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0 20:20 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1 21:21 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2 22:22 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3 23:23 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0 24:24 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1 25:25 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2 26:26 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3 27:27 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0 28:28 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1 29:29 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2 30:30 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3 31:31 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NVC997_SET_TIR_CONTROL 0x1130 +#define NVC997_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES 0:0 +#define NVC997_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES_DISABLE 0x00000000 +#define NVC997_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES_ENABLE 0x00000001 +#define NVC997_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES 4:4 +#define NVC997_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES_DISABLE 0x00000000 +#define NVC997_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES_ENABLE 0x00000001 +#define NVC997_SET_TIR_CONTROL_REDUCE_COVERAGE 1:1 +#define NVC997_SET_TIR_CONTROL_REDUCE_COVERAGE_DISABLE 0x00000000 +#define NVC997_SET_TIR_CONTROL_REDUCE_COVERAGE_ENABLE 0x00000001 +#define NVC997_SET_TIR_CONTROL_REDUCTION_MODE 2:2 +#define NVC997_SET_TIR_CONTROL_REDUCTION_MODE_AFFINITY_MAP 0x00000000 +#define NVC997_SET_TIR_CONTROL_REDUCTION_MODE_TRUNCATION 0x00000001 + +#define NVC997_SET_MUTABLE_METHOD_CONTROL 0x1134 +#define NVC997_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT 0:0 +#define NVC997_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT_FALSE 0x00000000 +#define NVC997_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT_TRUE 0x00000001 + +#define NVC997_SET_POST_PS_INITIAL_COVERAGE 0x1138 +#define NVC997_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE 0:0 +#define NVC997_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE_FALSE 0x00000000 +#define NVC997_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE_TRUE 0x00000001 + +#define NVC997_SET_FILL_VIA_TRIANGLE 0x113c +#define NVC997_SET_FILL_VIA_TRIANGLE_MODE 1:0 +#define NVC997_SET_FILL_VIA_TRIANGLE_MODE_DISABLED 0x00000000 +#define NVC997_SET_FILL_VIA_TRIANGLE_MODE_FILL_ALL 0x00000001 +#define NVC997_SET_FILL_VIA_TRIANGLE_MODE_FILL_BBOX 0x00000002 + +#define NVC997_SET_BLEND_PER_FORMAT_ENABLE 0x1140 +#define NVC997_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16 4:4 +#define NVC997_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_FALSE 0x00000000 +#define NVC997_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_TRUE 0x00000001 + +#define NVC997_FLUSH_PENDING_WRITES 0x1144 +#define NVC997_FLUSH_PENDING_WRITES_SM_DOES_GLOBAL_STORE 0:0 + +#define NVC997_SET_VERTEX_ATTRIBUTE_A(i) (0x1160+(i)*4) +#define NVC997_SET_VERTEX_ATTRIBUTE_A_STREAM 4:0 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_SOURCE 6:6 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_SOURCE_ACTIVE 0x00000000 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_SOURCE_INACTIVE 0x00000001 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_OFFSET 20:7 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS 26:21 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NVC997_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NVC997_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NVC997_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NVC997_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NVC997_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE 29:27 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B 31:31 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_FALSE 0x00000000 +#define NVC997_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_TRUE 0x00000001 + +#define NVC997_SET_VERTEX_ATTRIBUTE_B(i) (0x11a0+(i)*4) +#define NVC997_SET_VERTEX_ATTRIBUTE_B_STREAM 4:0 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_SOURCE 6:6 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_SOURCE_ACTIVE 0x00000000 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_SOURCE_INACTIVE 0x00000001 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_OFFSET 20:7 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS 26:21 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NVC997_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NVC997_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NVC997_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NVC997_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NVC997_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE 29:27 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B 31:31 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_FALSE 0x00000000 +#define NVC997_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_TRUE 0x00000001 + +#define NVC997_SET_ANTI_ALIAS_SAMPLE_POSITIONS(i) (0x11e0+(i)*4) +#define NVC997_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X0 3:0 +#define NVC997_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y0 7:4 +#define NVC997_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X1 11:8 +#define NVC997_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y1 15:12 +#define NVC997_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X2 19:16 +#define NVC997_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y2 23:20 +#define NVC997_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X3 27:24 +#define NVC997_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y3 31:28 + +#define NVC997_SET_OFFSET_RENDER_TARGET_INDEX 0x11f0 +#define NVC997_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX 0:0 +#define NVC997_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX_FALSE 0x00000000 +#define NVC997_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX_TRUE 0x00000001 + +#define NVC997_FORCE_HEAVYWEIGHT_METHOD_SYNC 0x11f4 +#define NVC997_FORCE_HEAVYWEIGHT_METHOD_SYNC_V 31:0 + +#define NVC997_SET_COVERAGE_TO_COLOR 0x11f8 +#define NVC997_SET_COVERAGE_TO_COLOR_ENABLE 0:0 +#define NVC997_SET_COVERAGE_TO_COLOR_ENABLE_FALSE 0x00000000 +#define NVC997_SET_COVERAGE_TO_COLOR_ENABLE_TRUE 0x00000001 +#define NVC997_SET_COVERAGE_TO_COLOR_CT_SELECT 6:4 + +#define NVC997_DECOMPRESS_ZETA_SURFACE 0x11fc +#define NVC997_DECOMPRESS_ZETA_SURFACE_Z_ENABLE 0:0 +#define NVC997_DECOMPRESS_ZETA_SURFACE_Z_ENABLE_FALSE 0x00000000 +#define NVC997_DECOMPRESS_ZETA_SURFACE_Z_ENABLE_TRUE 0x00000001 +#define NVC997_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE 4:4 +#define NVC997_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC997_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_SCREEN_STATE_MASK 0x1204 +#define NVC997_SET_SCREEN_STATE_MASK_MASK 3:0 + +#define NVC997_SET_ZT_SPARSE 0x1208 +#define NVC997_SET_ZT_SPARSE_ENABLE 0:0 +#define NVC997_SET_ZT_SPARSE_ENABLE_FALSE 0x00000000 +#define NVC997_SET_ZT_SPARSE_ENABLE_TRUE 0x00000001 +#define NVC997_SET_ZT_SPARSE_UNMAPPED_COMPARE 1:1 +#define NVC997_SET_ZT_SPARSE_UNMAPPED_COMPARE_ZT_SPARSE_UNMAPPED_0 0x00000000 +#define NVC997_SET_ZT_SPARSE_UNMAPPED_COMPARE_ZT_SPARSE_FAIL_ALWAYS 0x00000001 + +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST 0x1214 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_START_INDEX 15:0 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT 0x1218 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_START_INDEX 15:0 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC997_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC997_SET_CT_SELECT 0x121c +#define NVC997_SET_CT_SELECT_TARGET_COUNT 3:0 +#define NVC997_SET_CT_SELECT_TARGET0 6:4 +#define NVC997_SET_CT_SELECT_TARGET1 9:7 +#define NVC997_SET_CT_SELECT_TARGET2 12:10 +#define NVC997_SET_CT_SELECT_TARGET3 15:13 +#define NVC997_SET_CT_SELECT_TARGET4 18:16 +#define NVC997_SET_CT_SELECT_TARGET5 21:19 +#define NVC997_SET_CT_SELECT_TARGET6 24:22 +#define NVC997_SET_CT_SELECT_TARGET7 27:25 + +#define NVC997_SET_COMPRESSION_THRESHOLD 0x1220 +#define NVC997_SET_COMPRESSION_THRESHOLD_SAMPLES 3:0 +#define NVC997_SET_COMPRESSION_THRESHOLD_SAMPLES__0 0x00000000 +#define NVC997_SET_COMPRESSION_THRESHOLD_SAMPLES__1 0x00000001 +#define NVC997_SET_COMPRESSION_THRESHOLD_SAMPLES__2 0x00000002 +#define NVC997_SET_COMPRESSION_THRESHOLD_SAMPLES__4 0x00000003 +#define NVC997_SET_COMPRESSION_THRESHOLD_SAMPLES__8 0x00000004 +#define NVC997_SET_COMPRESSION_THRESHOLD_SAMPLES__16 0x00000005 +#define NVC997_SET_COMPRESSION_THRESHOLD_SAMPLES__32 0x00000006 +#define NVC997_SET_COMPRESSION_THRESHOLD_SAMPLES__64 0x00000007 +#define NVC997_SET_COMPRESSION_THRESHOLD_SAMPLES__128 0x00000008 +#define NVC997_SET_COMPRESSION_THRESHOLD_SAMPLES__256 0x00000009 +#define NVC997_SET_COMPRESSION_THRESHOLD_SAMPLES__512 0x0000000A +#define NVC997_SET_COMPRESSION_THRESHOLD_SAMPLES__1024 0x0000000B +#define NVC997_SET_COMPRESSION_THRESHOLD_SAMPLES__2048 0x0000000C + +#define NVC997_SET_PIXEL_SHADER_INTERLOCK_CONTROL 0x1224 +#define NVC997_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE 1:0 +#define NVC997_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_NO_CONFLICT_DETECT 0x00000000 +#define NVC997_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_CONFLICT_DETECT_SAMPLE 0x00000001 +#define NVC997_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_CONFLICT_DETECT_PIXEL 0x00000002 +#define NVC997_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE 2:2 +#define NVC997_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE_TC_TILE_SIZE_16X16 0x00000000 +#define NVC997_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE_TC_TILE_SIZE_8X8 0x00000001 +#define NVC997_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER 3:3 +#define NVC997_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER_TC_FRAGMENT_ORDERED 0x00000000 +#define NVC997_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER_TC_FRAGMENT_UNORDERED 0x00000001 + +#define NVC997_SET_ZT_SIZE_A 0x1228 +#define NVC997_SET_ZT_SIZE_A_WIDTH 27:0 + +#define NVC997_SET_ZT_SIZE_B 0x122c +#define NVC997_SET_ZT_SIZE_B_HEIGHT 17:0 + +#define NVC997_SET_ZT_SIZE_C 0x1230 +#define NVC997_SET_ZT_SIZE_C_THIRD_DIMENSION 15:0 +#define NVC997_SET_ZT_SIZE_C_CONTROL 16:16 +#define NVC997_SET_ZT_SIZE_C_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NVC997_SET_ZT_SIZE_C_CONTROL_ARRAY_SIZE_IS_ONE 0x00000001 + +#define NVC997_SET_SAMPLER_BINDING 0x1234 +#define NVC997_SET_SAMPLER_BINDING_V 0:0 +#define NVC997_SET_SAMPLER_BINDING_V_INDEPENDENTLY 0x00000000 +#define NVC997_SET_SAMPLER_BINDING_V_VIA_HEADER_BINDING 0x00000001 + +#define NVC997_DRAW_AUTO 0x123c +#define NVC997_DRAW_AUTO_BYTE_COUNT 31:0 + +#define NVC997_SET_POST_VTG_SHADER_ATTRIBUTE_SKIP_MASK(i) (0x1240+(i)*4) +#define NVC997_SET_POST_VTG_SHADER_ATTRIBUTE_SKIP_MASK_V 31:0 + +#define NVC997_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE 0x1260 +#define NVC997_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE_TICKET_DISPENSER_INDEX 7:0 +#define NVC997_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE_TICKET_DISPENSER_VALUE 23:8 + +#define NVC997_SET_BACK_END_COPY_A 0x1264 +#define NVC997_SET_BACK_END_COPY_A_DWORDS 7:0 +#define NVC997_SET_BACK_END_COPY_A_SATURATE32_ENABLE 8:8 +#define NVC997_SET_BACK_END_COPY_A_SATURATE32_ENABLE_FALSE 0x00000000 +#define NVC997_SET_BACK_END_COPY_A_SATURATE32_ENABLE_TRUE 0x00000001 +#define NVC997_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE 12:12 +#define NVC997_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE_FALSE 0x00000000 +#define NVC997_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_BACK_END_COPY_B 0x1268 +#define NVC997_SET_BACK_END_COPY_B_SRC_ADDRESS_UPPER 7:0 + +#define NVC997_SET_BACK_END_COPY_C 0x126c +#define NVC997_SET_BACK_END_COPY_C_SRC_ADDRESS_LOWER 31:0 + +#define NVC997_SET_BACK_END_COPY_D 0x1270 +#define NVC997_SET_BACK_END_COPY_D_DEST_ADDRESS_UPPER 7:0 + +#define NVC997_SET_BACK_END_COPY_E 0x1274 +#define NVC997_SET_BACK_END_COPY_E_DEST_ADDRESS_LOWER 31:0 + +#define NVC997_SET_CIRCULAR_BUFFER_SIZE 0x1280 +#define NVC997_SET_CIRCULAR_BUFFER_SIZE_CACHE_LINES_PER_SM 19:0 + +#define NVC997_SET_VTG_REGISTER_WATERMARKS 0x1284 +#define NVC997_SET_VTG_REGISTER_WATERMARKS_LOW 15:0 +#define NVC997_SET_VTG_REGISTER_WATERMARKS_HIGH 31:16 + +#define NVC997_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI 0x1288 +#define NVC997_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES 0:0 +#define NVC997_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC997_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC997_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_TAG 25:4 + +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS 0x1290 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC997_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE 0x12a4 +#define NVC997_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE_V 31:0 + +#define NVC997_CLEAR_ZCULL_REGION 0x12c8 +#define NVC997_CLEAR_ZCULL_REGION_Z_ENABLE 0:0 +#define NVC997_CLEAR_ZCULL_REGION_Z_ENABLE_FALSE 0x00000000 +#define NVC997_CLEAR_ZCULL_REGION_Z_ENABLE_TRUE 0x00000001 +#define NVC997_CLEAR_ZCULL_REGION_STENCIL_ENABLE 4:4 +#define NVC997_CLEAR_ZCULL_REGION_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC997_CLEAR_ZCULL_REGION_STENCIL_ENABLE_TRUE 0x00000001 +#define NVC997_CLEAR_ZCULL_REGION_USE_CLEAR_RECT 1:1 +#define NVC997_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_FALSE 0x00000000 +#define NVC997_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_TRUE 0x00000001 +#define NVC997_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX 2:2 +#define NVC997_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_FALSE 0x00000000 +#define NVC997_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_TRUE 0x00000001 +#define NVC997_CLEAR_ZCULL_REGION_RT_ARRAY_INDEX 20:5 +#define NVC997_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE 3:3 +#define NVC997_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_FALSE 0x00000000 +#define NVC997_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_TRUE 0x00000001 + +#define NVC997_SET_DEPTH_TEST 0x12cc +#define NVC997_SET_DEPTH_TEST_ENABLE 0:0 +#define NVC997_SET_DEPTH_TEST_ENABLE_FALSE 0x00000000 +#define NVC997_SET_DEPTH_TEST_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_FILL_MODE 0x12d0 +#define NVC997_SET_FILL_MODE_V 31:0 +#define NVC997_SET_FILL_MODE_V_POINT 0x00000001 +#define NVC997_SET_FILL_MODE_V_WIREFRAME 0x00000002 +#define NVC997_SET_FILL_MODE_V_SOLID 0x00000003 + +#define NVC997_SET_SHADE_MODE 0x12d4 +#define NVC997_SET_SHADE_MODE_V 31:0 +#define NVC997_SET_SHADE_MODE_V_FLAT 0x00000001 +#define NVC997_SET_SHADE_MODE_V_GOURAUD 0x00000002 +#define NVC997_SET_SHADE_MODE_V_OGL_FLAT 0x00001D00 +#define NVC997_SET_SHADE_MODE_V_OGL_SMOOTH 0x00001D01 + +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS 0x12d8 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS 0x12dc +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVC997_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVC997_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL 0x12e0 +#define NVC997_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT 3:0 +#define NVC997_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1 0x00000000 +#define NVC997_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_2X2 0x00000001 +#define NVC997_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1_VIRTUAL_SAMPLES 0x00000002 + +#define NVC997_SET_BLEND_STATE_PER_TARGET 0x12e4 +#define NVC997_SET_BLEND_STATE_PER_TARGET_ENABLE 0:0 +#define NVC997_SET_BLEND_STATE_PER_TARGET_ENABLE_FALSE 0x00000000 +#define NVC997_SET_BLEND_STATE_PER_TARGET_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_DEPTH_WRITE 0x12e8 +#define NVC997_SET_DEPTH_WRITE_ENABLE 0:0 +#define NVC997_SET_DEPTH_WRITE_ENABLE_FALSE 0x00000000 +#define NVC997_SET_DEPTH_WRITE_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_ALPHA_TEST 0x12ec +#define NVC997_SET_ALPHA_TEST_ENABLE 0:0 +#define NVC997_SET_ALPHA_TEST_ENABLE_FALSE 0x00000000 +#define NVC997_SET_ALPHA_TEST_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_INLINE_INDEX4X8_ALIGN 0x1300 +#define NVC997_SET_INLINE_INDEX4X8_ALIGN_COUNT 29:0 +#define NVC997_SET_INLINE_INDEX4X8_ALIGN_START 31:30 + +#define NVC997_DRAW_INLINE_INDEX4X8 0x1304 +#define NVC997_DRAW_INLINE_INDEX4X8_INDEX0 7:0 +#define NVC997_DRAW_INLINE_INDEX4X8_INDEX1 15:8 +#define NVC997_DRAW_INLINE_INDEX4X8_INDEX2 23:16 +#define NVC997_DRAW_INLINE_INDEX4X8_INDEX3 31:24 + +#define NVC997_D3D_SET_CULL_MODE 0x1308 +#define NVC997_D3D_SET_CULL_MODE_V 31:0 +#define NVC997_D3D_SET_CULL_MODE_V_NONE 0x00000001 +#define NVC997_D3D_SET_CULL_MODE_V_CW 0x00000002 +#define NVC997_D3D_SET_CULL_MODE_V_CCW 0x00000003 + +#define NVC997_SET_DEPTH_FUNC 0x130c +#define NVC997_SET_DEPTH_FUNC_V 31:0 +#define NVC997_SET_DEPTH_FUNC_V_OGL_NEVER 0x00000200 +#define NVC997_SET_DEPTH_FUNC_V_OGL_LESS 0x00000201 +#define NVC997_SET_DEPTH_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC997_SET_DEPTH_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC997_SET_DEPTH_FUNC_V_OGL_GREATER 0x00000204 +#define NVC997_SET_DEPTH_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC997_SET_DEPTH_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC997_SET_DEPTH_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC997_SET_DEPTH_FUNC_V_D3D_NEVER 0x00000001 +#define NVC997_SET_DEPTH_FUNC_V_D3D_LESS 0x00000002 +#define NVC997_SET_DEPTH_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC997_SET_DEPTH_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC997_SET_DEPTH_FUNC_V_D3D_GREATER 0x00000005 +#define NVC997_SET_DEPTH_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC997_SET_DEPTH_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC997_SET_DEPTH_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC997_SET_ALPHA_REF 0x1310 +#define NVC997_SET_ALPHA_REF_V 31:0 + +#define NVC997_SET_ALPHA_FUNC 0x1314 +#define NVC997_SET_ALPHA_FUNC_V 31:0 +#define NVC997_SET_ALPHA_FUNC_V_OGL_NEVER 0x00000200 +#define NVC997_SET_ALPHA_FUNC_V_OGL_LESS 0x00000201 +#define NVC997_SET_ALPHA_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC997_SET_ALPHA_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC997_SET_ALPHA_FUNC_V_OGL_GREATER 0x00000204 +#define NVC997_SET_ALPHA_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC997_SET_ALPHA_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC997_SET_ALPHA_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC997_SET_ALPHA_FUNC_V_D3D_NEVER 0x00000001 +#define NVC997_SET_ALPHA_FUNC_V_D3D_LESS 0x00000002 +#define NVC997_SET_ALPHA_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC997_SET_ALPHA_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC997_SET_ALPHA_FUNC_V_D3D_GREATER 0x00000005 +#define NVC997_SET_ALPHA_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC997_SET_ALPHA_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC997_SET_ALPHA_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC997_SET_DRAW_AUTO_STRIDE 0x1318 +#define NVC997_SET_DRAW_AUTO_STRIDE_V 11:0 + +#define NVC997_SET_BLEND_CONST_RED 0x131c +#define NVC997_SET_BLEND_CONST_RED_V 31:0 + +#define NVC997_SET_BLEND_CONST_GREEN 0x1320 +#define NVC997_SET_BLEND_CONST_GREEN_V 31:0 + +#define NVC997_SET_BLEND_CONST_BLUE 0x1324 +#define NVC997_SET_BLEND_CONST_BLUE_V 31:0 + +#define NVC997_SET_BLEND_CONST_ALPHA 0x1328 +#define NVC997_SET_BLEND_CONST_ALPHA_V 31:0 + +#define NVC997_INVALIDATE_SAMPLER_CACHE 0x1330 +#define NVC997_INVALIDATE_SAMPLER_CACHE_LINES 0:0 +#define NVC997_INVALIDATE_SAMPLER_CACHE_LINES_ALL 0x00000000 +#define NVC997_INVALIDATE_SAMPLER_CACHE_LINES_ONE 0x00000001 +#define NVC997_INVALIDATE_SAMPLER_CACHE_TAG 25:4 + +#define NVC997_INVALIDATE_TEXTURE_HEADER_CACHE 0x1334 +#define NVC997_INVALIDATE_TEXTURE_HEADER_CACHE_LINES 0:0 +#define NVC997_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ALL 0x00000000 +#define NVC997_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ONE 0x00000001 +#define NVC997_INVALIDATE_TEXTURE_HEADER_CACHE_TAG 25:4 + +#define NVC997_INVALIDATE_TEXTURE_DATA_CACHE 0x1338 +#define NVC997_INVALIDATE_TEXTURE_DATA_CACHE_LINES 0:0 +#define NVC997_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ALL 0x00000000 +#define NVC997_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ONE 0x00000001 +#define NVC997_INVALIDATE_TEXTURE_DATA_CACHE_TAG 25:4 + +#define NVC997_SET_BLEND_SEPARATE_FOR_ALPHA 0x133c +#define NVC997_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NVC997_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NVC997_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_BLEND_COLOR_OP 0x1340 +#define NVC997_SET_BLEND_COLOR_OP_V 31:0 +#define NVC997_SET_BLEND_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC997_SET_BLEND_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC997_SET_BLEND_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC997_SET_BLEND_COLOR_OP_V_OGL_MIN 0x00008007 +#define NVC997_SET_BLEND_COLOR_OP_V_OGL_MAX 0x00008008 +#define NVC997_SET_BLEND_COLOR_OP_V_D3D_ADD 0x00000001 +#define NVC997_SET_BLEND_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC997_SET_BLEND_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC997_SET_BLEND_COLOR_OP_V_D3D_MIN 0x00000004 +#define NVC997_SET_BLEND_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF 0x1344 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V 31:0 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC997_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC997_SET_BLEND_COLOR_DEST_COEFF 0x1348 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V 31:0 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC997_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC997_SET_BLEND_ALPHA_OP 0x134c +#define NVC997_SET_BLEND_ALPHA_OP_V 31:0 +#define NVC997_SET_BLEND_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC997_SET_BLEND_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC997_SET_BLEND_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC997_SET_BLEND_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NVC997_SET_BLEND_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NVC997_SET_BLEND_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NVC997_SET_BLEND_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC997_SET_BLEND_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC997_SET_BLEND_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NVC997_SET_BLEND_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF 0x1350 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V 31:0 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC997_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC997_SET_GLOBAL_COLOR_KEY 0x1354 +#define NVC997_SET_GLOBAL_COLOR_KEY_ENABLE 0:0 +#define NVC997_SET_GLOBAL_COLOR_KEY_ENABLE_FALSE 0x00000000 +#define NVC997_SET_GLOBAL_COLOR_KEY_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF 0x1358 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V 31:0 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC997_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC997_SET_SINGLE_ROP_CONTROL 0x135c +#define NVC997_SET_SINGLE_ROP_CONTROL_ENABLE 0:0 +#define NVC997_SET_SINGLE_ROP_CONTROL_ENABLE_FALSE 0x00000000 +#define NVC997_SET_SINGLE_ROP_CONTROL_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_BLEND(i) (0x1360+(i)*4) +#define NVC997_SET_BLEND_ENABLE 0:0 +#define NVC997_SET_BLEND_ENABLE_FALSE 0x00000000 +#define NVC997_SET_BLEND_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_STENCIL_TEST 0x1380 +#define NVC997_SET_STENCIL_TEST_ENABLE 0:0 +#define NVC997_SET_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NVC997_SET_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_STENCIL_OP_FAIL 0x1384 +#define NVC997_SET_STENCIL_OP_FAIL_V 31:0 +#define NVC997_SET_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NVC997_SET_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NVC997_SET_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NVC997_SET_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC997_SET_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC997_SET_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NVC997_SET_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NVC997_SET_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NVC997_SET_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NVC997_SET_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NVC997_SET_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NVC997_SET_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NVC997_SET_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NVC997_SET_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NVC997_SET_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NVC997_SET_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NVC997_SET_STENCIL_OP_ZFAIL 0x1388 +#define NVC997_SET_STENCIL_OP_ZFAIL_V 31:0 +#define NVC997_SET_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NVC997_SET_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NVC997_SET_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NVC997_SET_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC997_SET_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC997_SET_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NVC997_SET_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NVC997_SET_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NVC997_SET_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NVC997_SET_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NVC997_SET_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NVC997_SET_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NVC997_SET_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NVC997_SET_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NVC997_SET_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NVC997_SET_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NVC997_SET_STENCIL_OP_ZPASS 0x138c +#define NVC997_SET_STENCIL_OP_ZPASS_V 31:0 +#define NVC997_SET_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NVC997_SET_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NVC997_SET_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NVC997_SET_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NVC997_SET_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NVC997_SET_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NVC997_SET_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NVC997_SET_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NVC997_SET_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NVC997_SET_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NVC997_SET_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NVC997_SET_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NVC997_SET_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NVC997_SET_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NVC997_SET_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NVC997_SET_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NVC997_SET_STENCIL_FUNC 0x1390 +#define NVC997_SET_STENCIL_FUNC_V 31:0 +#define NVC997_SET_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NVC997_SET_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NVC997_SET_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC997_SET_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC997_SET_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NVC997_SET_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC997_SET_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC997_SET_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC997_SET_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NVC997_SET_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NVC997_SET_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC997_SET_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC997_SET_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NVC997_SET_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC997_SET_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC997_SET_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC997_SET_STENCIL_FUNC_REF 0x1394 +#define NVC997_SET_STENCIL_FUNC_REF_V 7:0 + +#define NVC997_SET_STENCIL_FUNC_MASK 0x1398 +#define NVC997_SET_STENCIL_FUNC_MASK_V 7:0 + +#define NVC997_SET_STENCIL_MASK 0x139c +#define NVC997_SET_STENCIL_MASK_V 7:0 + +#define NVC997_SET_DRAW_AUTO_START 0x13a4 +#define NVC997_SET_DRAW_AUTO_START_BYTE_COUNT 31:0 + +#define NVC997_SET_PS_SATURATE 0x13a8 +#define NVC997_SET_PS_SATURATE_OUTPUT0 0:0 +#define NVC997_SET_PS_SATURATE_OUTPUT0_FALSE 0x00000000 +#define NVC997_SET_PS_SATURATE_OUTPUT0_TRUE 0x00000001 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE0 1:1 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE0_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE0_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC997_SET_PS_SATURATE_OUTPUT1 4:4 +#define NVC997_SET_PS_SATURATE_OUTPUT1_FALSE 0x00000000 +#define NVC997_SET_PS_SATURATE_OUTPUT1_TRUE 0x00000001 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE1 5:5 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE1_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE1_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC997_SET_PS_SATURATE_OUTPUT2 8:8 +#define NVC997_SET_PS_SATURATE_OUTPUT2_FALSE 0x00000000 +#define NVC997_SET_PS_SATURATE_OUTPUT2_TRUE 0x00000001 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE2 9:9 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE2_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE2_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC997_SET_PS_SATURATE_OUTPUT3 12:12 +#define NVC997_SET_PS_SATURATE_OUTPUT3_FALSE 0x00000000 +#define NVC997_SET_PS_SATURATE_OUTPUT3_TRUE 0x00000001 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE3 13:13 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE3_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE3_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC997_SET_PS_SATURATE_OUTPUT4 16:16 +#define NVC997_SET_PS_SATURATE_OUTPUT4_FALSE 0x00000000 +#define NVC997_SET_PS_SATURATE_OUTPUT4_TRUE 0x00000001 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE4 17:17 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE4_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE4_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC997_SET_PS_SATURATE_OUTPUT5 20:20 +#define NVC997_SET_PS_SATURATE_OUTPUT5_FALSE 0x00000000 +#define NVC997_SET_PS_SATURATE_OUTPUT5_TRUE 0x00000001 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE5 21:21 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE5_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE5_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC997_SET_PS_SATURATE_OUTPUT6 24:24 +#define NVC997_SET_PS_SATURATE_OUTPUT6_FALSE 0x00000000 +#define NVC997_SET_PS_SATURATE_OUTPUT6_TRUE 0x00000001 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE6 25:25 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE6_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE6_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVC997_SET_PS_SATURATE_OUTPUT7 28:28 +#define NVC997_SET_PS_SATURATE_OUTPUT7_FALSE 0x00000000 +#define NVC997_SET_PS_SATURATE_OUTPUT7_TRUE 0x00000001 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE7 29:29 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE7_ZERO_TO_PLUS_ONE 0x00000000 +#define NVC997_SET_PS_SATURATE_CLAMP_RANGE7_MINUS_ONE_TO_PLUS_ONE 0x00000001 + +#define NVC997_SET_WINDOW_ORIGIN 0x13ac +#define NVC997_SET_WINDOW_ORIGIN_MODE 0:0 +#define NVC997_SET_WINDOW_ORIGIN_MODE_UPPER_LEFT 0x00000000 +#define NVC997_SET_WINDOW_ORIGIN_MODE_LOWER_LEFT 0x00000001 +#define NVC997_SET_WINDOW_ORIGIN_FLIP_Y 4:4 +#define NVC997_SET_WINDOW_ORIGIN_FLIP_Y_FALSE 0x00000000 +#define NVC997_SET_WINDOW_ORIGIN_FLIP_Y_TRUE 0x00000001 + +#define NVC997_SET_LINE_WIDTH_FLOAT 0x13b0 +#define NVC997_SET_LINE_WIDTH_FLOAT_V 31:0 + +#define NVC997_SET_ALIASED_LINE_WIDTH_FLOAT 0x13b4 +#define NVC997_SET_ALIASED_LINE_WIDTH_FLOAT_V 31:0 + +#define NVC997_SET_LINE_MULTISAMPLE_OVERRIDE 0x1418 +#define NVC997_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE 0:0 +#define NVC997_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_FALSE 0x00000000 +#define NVC997_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_TRUE 0x00000001 + +#define NVC997_INVALIDATE_SAMPLER_CACHE_NO_WFI 0x1424 +#define NVC997_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES 0:0 +#define NVC997_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC997_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC997_INVALIDATE_SAMPLER_CACHE_NO_WFI_TAG 25:4 + +#define NVC997_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI 0x1428 +#define NVC997_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES 0:0 +#define NVC997_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC997_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC997_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_TAG 25:4 + +#define NVC997_SET_GLOBAL_BASE_VERTEX_INDEX 0x1434 +#define NVC997_SET_GLOBAL_BASE_VERTEX_INDEX_V 31:0 + +#define NVC997_SET_GLOBAL_BASE_INSTANCE_INDEX 0x1438 +#define NVC997_SET_GLOBAL_BASE_INSTANCE_INDEX_V 31:0 + +#define NVC997_SET_PS_WARP_WATERMARKS 0x1450 +#define NVC997_SET_PS_WARP_WATERMARKS_LOW 15:0 +#define NVC997_SET_PS_WARP_WATERMARKS_HIGH 31:16 + +#define NVC997_SET_PS_REGISTER_WATERMARKS 0x1454 +#define NVC997_SET_PS_REGISTER_WATERMARKS_LOW 15:0 +#define NVC997_SET_PS_REGISTER_WATERMARKS_HIGH 31:16 + +#define NVC997_STORE_ZCULL 0x1464 +#define NVC997_STORE_ZCULL_V 0:0 + +#define NVC997_SET_ITERATED_BLEND_CONSTANT_RED(j) (0x1480+(j)*16) +#define NVC997_SET_ITERATED_BLEND_CONSTANT_RED_V 15:0 + +#define NVC997_SET_ITERATED_BLEND_CONSTANT_GREEN(j) (0x1484+(j)*16) +#define NVC997_SET_ITERATED_BLEND_CONSTANT_GREEN_V 15:0 + +#define NVC997_SET_ITERATED_BLEND_CONSTANT_BLUE(j) (0x1488+(j)*16) +#define NVC997_SET_ITERATED_BLEND_CONSTANT_BLUE_V 15:0 + +#define NVC997_LOAD_ZCULL 0x1500 +#define NVC997_LOAD_ZCULL_V 0:0 + +#define NVC997_SET_SURFACE_CLIP_ID_HEIGHT 0x1504 +#define NVC997_SET_SURFACE_CLIP_ID_HEIGHT_V 31:0 + +#define NVC997_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL 0x1508 +#define NVC997_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NVC997_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NVC997_SET_CLIP_ID_CLEAR_RECT_VERTICAL 0x150c +#define NVC997_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NVC997_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NVC997_SET_USER_CLIP_ENABLE 0x1510 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE0 0:0 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE0_FALSE 0x00000000 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE0_TRUE 0x00000001 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE1 1:1 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE1_FALSE 0x00000000 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE1_TRUE 0x00000001 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE2 2:2 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE2_FALSE 0x00000000 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE2_TRUE 0x00000001 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE3 3:3 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE3_FALSE 0x00000000 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE3_TRUE 0x00000001 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE4 4:4 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE4_FALSE 0x00000000 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE4_TRUE 0x00000001 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE5 5:5 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE5_FALSE 0x00000000 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE5_TRUE 0x00000001 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE6 6:6 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE6_FALSE 0x00000000 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE6_TRUE 0x00000001 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE7 7:7 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE7_FALSE 0x00000000 +#define NVC997_SET_USER_CLIP_ENABLE_PLANE7_TRUE 0x00000001 + +#define NVC997_SET_ZPASS_PIXEL_COUNT 0x1514 +#define NVC997_SET_ZPASS_PIXEL_COUNT_ENABLE 0:0 +#define NVC997_SET_ZPASS_PIXEL_COUNT_ENABLE_FALSE 0x00000000 +#define NVC997_SET_ZPASS_PIXEL_COUNT_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_POINT_SIZE 0x1518 +#define NVC997_SET_POINT_SIZE_V 31:0 + +#define NVC997_SET_ZCULL_STATS 0x151c +#define NVC997_SET_ZCULL_STATS_ENABLE 0:0 +#define NVC997_SET_ZCULL_STATS_ENABLE_FALSE 0x00000000 +#define NVC997_SET_ZCULL_STATS_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_POINT_SPRITE 0x1520 +#define NVC997_SET_POINT_SPRITE_ENABLE 0:0 +#define NVC997_SET_POINT_SPRITE_ENABLE_FALSE 0x00000000 +#define NVC997_SET_POINT_SPRITE_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_SHADER_EXCEPTIONS 0x1528 +#define NVC997_SET_SHADER_EXCEPTIONS_ENABLE 0:0 +#define NVC997_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0x00000000 +#define NVC997_SET_SHADER_EXCEPTIONS_ENABLE_TRUE 0x00000001 + +#define NVC997_CLEAR_REPORT_VALUE 0x1530 +#define NVC997_CLEAR_REPORT_VALUE_TYPE 4:0 +#define NVC997_CLEAR_REPORT_VALUE_TYPE_DA_VERTICES_GENERATED 0x00000012 +#define NVC997_CLEAR_REPORT_VALUE_TYPE_DA_PRIMITIVES_GENERATED 0x00000013 +#define NVC997_CLEAR_REPORT_VALUE_TYPE_VS_INVOCATIONS 0x00000015 +#define NVC997_CLEAR_REPORT_VALUE_TYPE_TI_INVOCATIONS 0x00000016 +#define NVC997_CLEAR_REPORT_VALUE_TYPE_TS_INVOCATIONS 0x00000017 +#define NVC997_CLEAR_REPORT_VALUE_TYPE_TS_PRIMITIVES_GENERATED 0x00000018 +#define NVC997_CLEAR_REPORT_VALUE_TYPE_GS_INVOCATIONS 0x0000001A +#define NVC997_CLEAR_REPORT_VALUE_TYPE_GS_PRIMITIVES_GENERATED 0x0000001B +#define NVC997_CLEAR_REPORT_VALUE_TYPE_VTG_PRIMITIVES_OUT 0x0000001F +#define NVC997_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_SUCCEEDED 0x00000010 +#define NVC997_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_NEEDED 0x00000011 +#define NVC997_CLEAR_REPORT_VALUE_TYPE_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000003 +#define NVC997_CLEAR_REPORT_VALUE_TYPE_CLIPPER_INVOCATIONS 0x0000001C +#define NVC997_CLEAR_REPORT_VALUE_TYPE_CLIPPER_PRIMITIVES_GENERATED 0x0000001D +#define NVC997_CLEAR_REPORT_VALUE_TYPE_ZCULL_STATS 0x00000002 +#define NVC997_CLEAR_REPORT_VALUE_TYPE_PS_INVOCATIONS 0x0000001E +#define NVC997_CLEAR_REPORT_VALUE_TYPE_ZPASS_PIXEL_CNT 0x00000001 +#define NVC997_CLEAR_REPORT_VALUE_TYPE_ALPHA_BETA_CLOCKS 0x00000004 +#define NVC997_CLEAR_REPORT_VALUE_TYPE_SCG_CLOCKS 0x00000009 + +#define NVC997_SET_ANTI_ALIAS_ENABLE 0x1534 +#define NVC997_SET_ANTI_ALIAS_ENABLE_V 0:0 +#define NVC997_SET_ANTI_ALIAS_ENABLE_V_FALSE 0x00000000 +#define NVC997_SET_ANTI_ALIAS_ENABLE_V_TRUE 0x00000001 + +#define NVC997_SET_ZT_SELECT 0x1538 +#define NVC997_SET_ZT_SELECT_TARGET_COUNT 0:0 + +#define NVC997_SET_ANTI_ALIAS_ALPHA_CONTROL 0x153c +#define NVC997_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE 0:0 +#define NVC997_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_DISABLE 0x00000000 +#define NVC997_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_ENABLE 0x00000001 +#define NVC997_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE 4:4 +#define NVC997_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_DISABLE 0x00000000 +#define NVC997_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_ENABLE 0x00000001 + +#define NVC997_SET_RENDER_ENABLE_A 0x1550 +#define NVC997_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC997_SET_RENDER_ENABLE_B 0x1554 +#define NVC997_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC997_SET_RENDER_ENABLE_C 0x1558 +#define NVC997_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC997_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC997_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC997_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC997_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC997_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC997_SET_TEX_SAMPLER_POOL_A 0x155c +#define NVC997_SET_TEX_SAMPLER_POOL_A_OFFSET_UPPER 7:0 + +#define NVC997_SET_TEX_SAMPLER_POOL_B 0x1560 +#define NVC997_SET_TEX_SAMPLER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC997_SET_TEX_SAMPLER_POOL_C 0x1564 +#define NVC997_SET_TEX_SAMPLER_POOL_C_MAXIMUM_INDEX 19:0 + +#define NVC997_SET_SLOPE_SCALE_DEPTH_BIAS 0x156c +#define NVC997_SET_SLOPE_SCALE_DEPTH_BIAS_V 31:0 + +#define NVC997_SET_ANTI_ALIASED_LINE 0x1570 +#define NVC997_SET_ANTI_ALIASED_LINE_ENABLE 0:0 +#define NVC997_SET_ANTI_ALIASED_LINE_ENABLE_FALSE 0x00000000 +#define NVC997_SET_ANTI_ALIASED_LINE_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_TEX_HEADER_POOL_A 0x1574 +#define NVC997_SET_TEX_HEADER_POOL_A_OFFSET_UPPER 7:0 + +#define NVC997_SET_TEX_HEADER_POOL_B 0x1578 +#define NVC997_SET_TEX_HEADER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC997_SET_TEX_HEADER_POOL_C 0x157c +#define NVC997_SET_TEX_HEADER_POOL_C_MAXIMUM_INDEX 21:0 + +#define NVC997_SET_ACTIVE_ZCULL_REGION 0x1590 +#define NVC997_SET_ACTIVE_ZCULL_REGION_ID 5:0 + +#define NVC997_SET_TWO_SIDED_STENCIL_TEST 0x1594 +#define NVC997_SET_TWO_SIDED_STENCIL_TEST_ENABLE 0:0 +#define NVC997_SET_TWO_SIDED_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NVC997_SET_TWO_SIDED_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_BACK_STENCIL_OP_FAIL 0x1598 +#define NVC997_SET_BACK_STENCIL_OP_FAIL_V 31:0 +#define NVC997_SET_BACK_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NVC997_SET_BACK_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NVC997_SET_BACK_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NVC997_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC997_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC997_SET_BACK_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NVC997_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NVC997_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NVC997_SET_BACK_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NVC997_SET_BACK_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NVC997_SET_BACK_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NVC997_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NVC997_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NVC997_SET_BACK_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NVC997_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NVC997_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NVC997_SET_BACK_STENCIL_OP_ZFAIL 0x159c +#define NVC997_SET_BACK_STENCIL_OP_ZFAIL_V 31:0 +#define NVC997_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NVC997_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NVC997_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NVC997_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NVC997_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NVC997_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NVC997_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NVC997_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NVC997_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NVC997_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NVC997_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NVC997_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NVC997_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NVC997_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NVC997_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NVC997_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NVC997_SET_BACK_STENCIL_OP_ZPASS 0x15a0 +#define NVC997_SET_BACK_STENCIL_OP_ZPASS_V 31:0 +#define NVC997_SET_BACK_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NVC997_SET_BACK_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NVC997_SET_BACK_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NVC997_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NVC997_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NVC997_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NVC997_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NVC997_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NVC997_SET_BACK_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NVC997_SET_BACK_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NVC997_SET_BACK_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NVC997_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NVC997_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NVC997_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NVC997_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NVC997_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NVC997_SET_BACK_STENCIL_FUNC 0x15a4 +#define NVC997_SET_BACK_STENCIL_FUNC_V 31:0 +#define NVC997_SET_BACK_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NVC997_SET_BACK_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NVC997_SET_BACK_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NVC997_SET_BACK_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVC997_SET_BACK_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NVC997_SET_BACK_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVC997_SET_BACK_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVC997_SET_BACK_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVC997_SET_BACK_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NVC997_SET_BACK_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NVC997_SET_BACK_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NVC997_SET_BACK_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVC997_SET_BACK_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NVC997_SET_BACK_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVC997_SET_BACK_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVC997_SET_BACK_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVC997_SET_SRGB_WRITE 0x15b8 +#define NVC997_SET_SRGB_WRITE_ENABLE 0:0 +#define NVC997_SET_SRGB_WRITE_ENABLE_FALSE 0x00000000 +#define NVC997_SET_SRGB_WRITE_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_DEPTH_BIAS 0x15bc +#define NVC997_SET_DEPTH_BIAS_V 31:0 + +#define NVC997_SET_ZCULL_REGION_FORMAT 0x15c8 +#define NVC997_SET_ZCULL_REGION_FORMAT_TYPE 3:0 +#define NVC997_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X4 0x00000000 +#define NVC997_SET_ZCULL_REGION_FORMAT_TYPE_ZS_4X4 0x00000001 +#define NVC997_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X2 0x00000002 +#define NVC997_SET_ZCULL_REGION_FORMAT_TYPE_Z_2X4 0x00000003 +#define NVC997_SET_ZCULL_REGION_FORMAT_TYPE_Z_16X8_4X4 0x00000004 +#define NVC997_SET_ZCULL_REGION_FORMAT_TYPE_Z_8X8_4X2 0x00000005 +#define NVC997_SET_ZCULL_REGION_FORMAT_TYPE_Z_8X8_2X4 0x00000006 +#define NVC997_SET_ZCULL_REGION_FORMAT_TYPE_Z_16X16_4X8 0x00000007 +#define NVC997_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X8_2X2 0x00000008 +#define NVC997_SET_ZCULL_REGION_FORMAT_TYPE_ZS_16X8_4X2 0x00000009 +#define NVC997_SET_ZCULL_REGION_FORMAT_TYPE_ZS_16X8_2X4 0x0000000A +#define NVC997_SET_ZCULL_REGION_FORMAT_TYPE_ZS_8X8_2X2 0x0000000B +#define NVC997_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X8_1X1 0x0000000C + +#define NVC997_SET_RT_LAYER 0x15cc +#define NVC997_SET_RT_LAYER_V 15:0 +#define NVC997_SET_RT_LAYER_CONTROL 16:16 +#define NVC997_SET_RT_LAYER_CONTROL_V_SELECTS_LAYER 0x00000000 +#define NVC997_SET_RT_LAYER_CONTROL_GEOMETRY_SHADER_SELECTS_LAYER 0x00000001 + +#define NVC997_SET_ANTI_ALIAS 0x15d0 +#define NVC997_SET_ANTI_ALIAS_SAMPLES 3:0 +#define NVC997_SET_ANTI_ALIAS_SAMPLES_MODE_1X1 0x00000000 +#define NVC997_SET_ANTI_ALIAS_SAMPLES_MODE_2X1 0x00000001 +#define NVC997_SET_ANTI_ALIAS_SAMPLES_MODE_2X2 0x00000002 +#define NVC997_SET_ANTI_ALIAS_SAMPLES_MODE_4X2 0x00000003 +#define NVC997_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_D3D 0x00000004 +#define NVC997_SET_ANTI_ALIAS_SAMPLES_MODE_2X1_D3D 0x00000005 +#define NVC997_SET_ANTI_ALIAS_SAMPLES_MODE_4X4 0x00000006 +#define NVC997_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_4 0x00000008 +#define NVC997_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_12 0x00000009 +#define NVC997_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_8 0x0000000A +#define NVC997_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_24 0x0000000B + +#define NVC997_SET_EDGE_FLAG 0x15e4 +#define NVC997_SET_EDGE_FLAG_V 0:0 +#define NVC997_SET_EDGE_FLAG_V_FALSE 0x00000000 +#define NVC997_SET_EDGE_FLAG_V_TRUE 0x00000001 + +#define NVC997_DRAW_INLINE_INDEX 0x15e8 +#define NVC997_DRAW_INLINE_INDEX_V 31:0 + +#define NVC997_SET_INLINE_INDEX2X16_ALIGN 0x15ec +#define NVC997_SET_INLINE_INDEX2X16_ALIGN_COUNT 30:0 +#define NVC997_SET_INLINE_INDEX2X16_ALIGN_START_ODD 31:31 +#define NVC997_SET_INLINE_INDEX2X16_ALIGN_START_ODD_FALSE 0x00000000 +#define NVC997_SET_INLINE_INDEX2X16_ALIGN_START_ODD_TRUE 0x00000001 + +#define NVC997_DRAW_INLINE_INDEX2X16 0x15f0 +#define NVC997_DRAW_INLINE_INDEX2X16_EVEN 15:0 +#define NVC997_DRAW_INLINE_INDEX2X16_ODD 31:16 + +#define NVC997_SET_VERTEX_GLOBAL_BASE_OFFSET_A 0x15f4 +#define NVC997_SET_VERTEX_GLOBAL_BASE_OFFSET_A_UPPER 7:0 + +#define NVC997_SET_VERTEX_GLOBAL_BASE_OFFSET_B 0x15f8 +#define NVC997_SET_VERTEX_GLOBAL_BASE_OFFSET_B_LOWER 31:0 + +#define NVC997_SET_ZCULL_REGION_PIXEL_OFFSET_A 0x15fc +#define NVC997_SET_ZCULL_REGION_PIXEL_OFFSET_A_WIDTH 15:0 + +#define NVC997_SET_ZCULL_REGION_PIXEL_OFFSET_B 0x1600 +#define NVC997_SET_ZCULL_REGION_PIXEL_OFFSET_B_HEIGHT 15:0 + +#define NVC997_SET_POINT_SPRITE_SELECT 0x1604 +#define NVC997_SET_POINT_SPRITE_SELECT_RMODE 1:0 +#define NVC997_SET_POINT_SPRITE_SELECT_RMODE_ZERO 0x00000000 +#define NVC997_SET_POINT_SPRITE_SELECT_RMODE_FROM_R 0x00000001 +#define NVC997_SET_POINT_SPRITE_SELECT_RMODE_FROM_S 0x00000002 +#define NVC997_SET_POINT_SPRITE_SELECT_ORIGIN 2:2 +#define NVC997_SET_POINT_SPRITE_SELECT_ORIGIN_BOTTOM 0x00000000 +#define NVC997_SET_POINT_SPRITE_SELECT_ORIGIN_TOP 0x00000001 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE0 3:3 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE0_PASSTHROUGH 0x00000000 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE0_GENERATE 0x00000001 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE1 4:4 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE1_PASSTHROUGH 0x00000000 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE1_GENERATE 0x00000001 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE2 5:5 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE2_PASSTHROUGH 0x00000000 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE2_GENERATE 0x00000001 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE3 6:6 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE3_PASSTHROUGH 0x00000000 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE3_GENERATE 0x00000001 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE4 7:7 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE4_PASSTHROUGH 0x00000000 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE4_GENERATE 0x00000001 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE5 8:8 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE5_PASSTHROUGH 0x00000000 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE5_GENERATE 0x00000001 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE6 9:9 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE6_PASSTHROUGH 0x00000000 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE6_GENERATE 0x00000001 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE7 10:10 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE7_PASSTHROUGH 0x00000000 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE7_GENERATE 0x00000001 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE8 11:11 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE8_PASSTHROUGH 0x00000000 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE8_GENERATE 0x00000001 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE9 12:12 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE9_PASSTHROUGH 0x00000000 +#define NVC997_SET_POINT_SPRITE_SELECT_TEXTURE9_GENERATE 0x00000001 + +#define NVC997_SET_ATTRIBUTE_DEFAULT 0x1610 +#define NVC997_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE 0:0 +#define NVC997_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_0001 0x00000000 +#define NVC997_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_1111 0x00000001 +#define NVC997_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR 1:1 +#define NVC997_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0000 0x00000000 +#define NVC997_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0001 0x00000001 +#define NVC997_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR 2:2 +#define NVC997_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0000 0x00000000 +#define NVC997_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0001 0x00000001 +#define NVC997_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE 3:3 +#define NVC997_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0000 0x00000000 +#define NVC997_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0001 0x00000001 +#define NVC997_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0 4:4 +#define NVC997_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_0001 0x00000000 +#define NVC997_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_1111 0x00000001 +#define NVC997_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15 5:5 +#define NVC997_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0000 0x00000000 +#define NVC997_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0001 0x00000001 + +#define NVC997_END 0x1614 +#define NVC997_END_V 0:0 + +#define NVC997_BEGIN 0x1618 +#define NVC997_BEGIN_OP 15:0 +#define NVC997_BEGIN_OP_POINTS 0x00000000 +#define NVC997_BEGIN_OP_LINES 0x00000001 +#define NVC997_BEGIN_OP_LINE_LOOP 0x00000002 +#define NVC997_BEGIN_OP_LINE_STRIP 0x00000003 +#define NVC997_BEGIN_OP_TRIANGLES 0x00000004 +#define NVC997_BEGIN_OP_TRIANGLE_STRIP 0x00000005 +#define NVC997_BEGIN_OP_TRIANGLE_FAN 0x00000006 +#define NVC997_BEGIN_OP_QUADS 0x00000007 +#define NVC997_BEGIN_OP_QUAD_STRIP 0x00000008 +#define NVC997_BEGIN_OP_POLYGON 0x00000009 +#define NVC997_BEGIN_OP_LINELIST_ADJCY 0x0000000A +#define NVC997_BEGIN_OP_LINESTRIP_ADJCY 0x0000000B +#define NVC997_BEGIN_OP_TRIANGLELIST_ADJCY 0x0000000C +#define NVC997_BEGIN_OP_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC997_BEGIN_OP_PATCH 0x0000000E +#define NVC997_BEGIN_PRIMITIVE_ID 24:24 +#define NVC997_BEGIN_PRIMITIVE_ID_FIRST 0x00000000 +#define NVC997_BEGIN_PRIMITIVE_ID_UNCHANGED 0x00000001 +#define NVC997_BEGIN_INSTANCE_ID 27:26 +#define NVC997_BEGIN_INSTANCE_ID_FIRST 0x00000000 +#define NVC997_BEGIN_INSTANCE_ID_SUBSEQUENT 0x00000001 +#define NVC997_BEGIN_INSTANCE_ID_UNCHANGED 0x00000002 +#define NVC997_BEGIN_SPLIT_MODE 30:29 +#define NVC997_BEGIN_SPLIT_MODE_NORMAL_BEGIN_NORMAL_END 0x00000000 +#define NVC997_BEGIN_SPLIT_MODE_NORMAL_BEGIN_OPEN_END 0x00000001 +#define NVC997_BEGIN_SPLIT_MODE_OPEN_BEGIN_OPEN_END 0x00000002 +#define NVC997_BEGIN_SPLIT_MODE_OPEN_BEGIN_NORMAL_END 0x00000003 +#define NVC997_BEGIN_INSTANCE_ITERATE_ENABLE 31:31 +#define NVC997_BEGIN_INSTANCE_ITERATE_ENABLE_FALSE 0x00000000 +#define NVC997_BEGIN_INSTANCE_ITERATE_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_VERTEX_ID_COPY 0x161c +#define NVC997_SET_VERTEX_ID_COPY_ENABLE 0:0 +#define NVC997_SET_VERTEX_ID_COPY_ENABLE_FALSE 0x00000000 +#define NVC997_SET_VERTEX_ID_COPY_ENABLE_TRUE 0x00000001 +#define NVC997_SET_VERTEX_ID_COPY_ATTRIBUTE_SLOT 11:4 + +#define NVC997_ADD_TO_PRIMITIVE_ID 0x1620 +#define NVC997_ADD_TO_PRIMITIVE_ID_V 31:0 + +#define NVC997_LOAD_PRIMITIVE_ID 0x1624 +#define NVC997_LOAD_PRIMITIVE_ID_V 31:0 + +#define NVC997_SET_SHADER_BASED_CULL 0x162c +#define NVC997_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE 1:1 +#define NVC997_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_FALSE 0x00000000 +#define NVC997_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_TRUE 0x00000001 +#define NVC997_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE 0:0 +#define NVC997_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_FALSE 0x00000000 +#define NVC997_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_CLASS_VERSION 0x1638 +#define NVC997_SET_CLASS_VERSION_CURRENT 15:0 +#define NVC997_SET_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC997_SET_DA_PRIMITIVE_RESTART 0x1644 +#define NVC997_SET_DA_PRIMITIVE_RESTART_ENABLE 0:0 +#define NVC997_SET_DA_PRIMITIVE_RESTART_ENABLE_FALSE 0x00000000 +#define NVC997_SET_DA_PRIMITIVE_RESTART_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_DA_PRIMITIVE_RESTART_INDEX 0x1648 +#define NVC997_SET_DA_PRIMITIVE_RESTART_INDEX_V 31:0 + +#define NVC997_SET_DA_OUTPUT 0x164c +#define NVC997_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START 12:12 +#define NVC997_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_FALSE 0x00000000 +#define NVC997_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_TRUE 0x00000001 + +#define NVC997_SET_ANTI_ALIASED_POINT 0x1658 +#define NVC997_SET_ANTI_ALIASED_POINT_ENABLE 0:0 +#define NVC997_SET_ANTI_ALIASED_POINT_ENABLE_FALSE 0x00000000 +#define NVC997_SET_ANTI_ALIASED_POINT_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_POINT_CENTER_MODE 0x165c +#define NVC997_SET_POINT_CENTER_MODE_V 31:0 +#define NVC997_SET_POINT_CENTER_MODE_V_OGL 0x00000000 +#define NVC997_SET_POINT_CENTER_MODE_V_D3D 0x00000001 + +#define NVC997_SET_LINE_SMOOTH_PARAMETERS 0x1668 +#define NVC997_SET_LINE_SMOOTH_PARAMETERS_FALLOFF 31:0 +#define NVC997_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_00 0x00000000 +#define NVC997_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_33 0x00000001 +#define NVC997_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_60 0x00000002 + +#define NVC997_SET_LINE_STIPPLE 0x166c +#define NVC997_SET_LINE_STIPPLE_ENABLE 0:0 +#define NVC997_SET_LINE_STIPPLE_ENABLE_FALSE 0x00000000 +#define NVC997_SET_LINE_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_LINE_SMOOTH_EDGE_TABLE(i) (0x1670+(i)*4) +#define NVC997_SET_LINE_SMOOTH_EDGE_TABLE_V0 7:0 +#define NVC997_SET_LINE_SMOOTH_EDGE_TABLE_V1 15:8 +#define NVC997_SET_LINE_SMOOTH_EDGE_TABLE_V2 23:16 +#define NVC997_SET_LINE_SMOOTH_EDGE_TABLE_V3 31:24 + +#define NVC997_SET_LINE_STIPPLE_PARAMETERS 0x1680 +#define NVC997_SET_LINE_STIPPLE_PARAMETERS_FACTOR 7:0 +#define NVC997_SET_LINE_STIPPLE_PARAMETERS_PATTERN 23:8 + +#define NVC997_SET_PROVOKING_VERTEX 0x1684 +#define NVC997_SET_PROVOKING_VERTEX_V 0:0 +#define NVC997_SET_PROVOKING_VERTEX_V_FIRST 0x00000000 +#define NVC997_SET_PROVOKING_VERTEX_V_LAST 0x00000001 + +#define NVC997_SET_TWO_SIDED_LIGHT 0x1688 +#define NVC997_SET_TWO_SIDED_LIGHT_ENABLE 0:0 +#define NVC997_SET_TWO_SIDED_LIGHT_ENABLE_FALSE 0x00000000 +#define NVC997_SET_TWO_SIDED_LIGHT_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_POLYGON_STIPPLE 0x168c +#define NVC997_SET_POLYGON_STIPPLE_ENABLE 0:0 +#define NVC997_SET_POLYGON_STIPPLE_ENABLE_FALSE 0x00000000 +#define NVC997_SET_POLYGON_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_SHADER_CONTROL 0x1690 +#define NVC997_SET_SHADER_CONTROL_DEFAULT_PARTIAL 0:0 +#define NVC997_SET_SHADER_CONTROL_DEFAULT_PARTIAL_ZERO 0x00000000 +#define NVC997_SET_SHADER_CONTROL_DEFAULT_PARTIAL_INFINITY 0x00000001 +#define NVC997_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR 1:1 +#define NVC997_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR_LEGACY 0x00000000 +#define NVC997_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR_FP64_COMPATIBLE 0x00000001 +#define NVC997_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR 2:2 +#define NVC997_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR_PASS_ZERO 0x00000000 +#define NVC997_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR_PASS_INDEFINITE 0x00000001 + +#define NVC997_CHECK_CLASS_VERSION 0x16a0 +#define NVC997_CHECK_CLASS_VERSION_CURRENT 15:0 +#define NVC997_CHECK_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC997_SET_SPH_VERSION 0x16a4 +#define NVC997_SET_SPH_VERSION_CURRENT 15:0 +#define NVC997_SET_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC997_CHECK_SPH_VERSION 0x16a8 +#define NVC997_CHECK_SPH_VERSION_CURRENT 15:0 +#define NVC997_CHECK_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC997_SET_ALPHA_TO_COVERAGE_OVERRIDE 0x16b4 +#define NVC997_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE 0:0 +#define NVC997_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NVC997_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 +#define NVC997_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT 1:1 +#define NVC997_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_DISABLE 0x00000000 +#define NVC997_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_ENABLE 0x00000001 + +#define NVC997_SET_SCG_GRAPHICS_PRIORITY 0x16bc +#define NVC997_SET_SCG_GRAPHICS_PRIORITY_PRIORITY 5:0 + +#define NVC997_SET_SCG_GRAPHICS_SCHEDULING_PARAMETERS(i) (0x16c0+(i)*4) +#define NVC997_SET_SCG_GRAPHICS_SCHEDULING_PARAMETERS_V 31:0 + +#define NVC997_SET_POLYGON_STIPPLE_PATTERN(i) (0x1700+(i)*4) +#define NVC997_SET_POLYGON_STIPPLE_PATTERN_V 31:0 + +#define NVC997_SET_AAM_VERSION 0x1790 +#define NVC997_SET_AAM_VERSION_CURRENT 15:0 +#define NVC997_SET_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC997_CHECK_AAM_VERSION 0x1794 +#define NVC997_CHECK_AAM_VERSION_CURRENT 15:0 +#define NVC997_CHECK_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC997_SET_ZT_LAYER 0x179c +#define NVC997_SET_ZT_LAYER_OFFSET 15:0 + +#define NVC997_SET_INDEX_BUFFER_A 0x17c8 +#define NVC997_SET_INDEX_BUFFER_A_ADDRESS_UPPER 7:0 + +#define NVC997_SET_INDEX_BUFFER_B 0x17cc +#define NVC997_SET_INDEX_BUFFER_B_ADDRESS_LOWER 31:0 + +#define NVC997_SET_INDEX_BUFFER_E 0x17d8 +#define NVC997_SET_INDEX_BUFFER_E_INDEX_SIZE 1:0 +#define NVC997_SET_INDEX_BUFFER_E_INDEX_SIZE_ONE_BYTE 0x00000000 +#define NVC997_SET_INDEX_BUFFER_E_INDEX_SIZE_TWO_BYTES 0x00000001 +#define NVC997_SET_INDEX_BUFFER_E_INDEX_SIZE_FOUR_BYTES 0x00000002 + +#define NVC997_SET_INDEX_BUFFER_F 0x17dc +#define NVC997_SET_INDEX_BUFFER_F_FIRST 31:0 + +#define NVC997_DRAW_INDEX_BUFFER 0x17e0 +#define NVC997_DRAW_INDEX_BUFFER_COUNT 31:0 + +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST 0x17e4 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST 0x17e8 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST 0x17ec +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f0 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC997_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f4 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC997_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f8 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC997_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVC997_SET_DEPTH_BIAS_CLAMP 0x187c +#define NVC997_SET_DEPTH_BIAS_CLAMP_V 31:0 + +#define NVC997_SET_VERTEX_STREAM_INSTANCE_A(i) (0x1880+(i)*4) +#define NVC997_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED 0:0 +#define NVC997_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_FALSE 0x00000000 +#define NVC997_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_TRUE 0x00000001 + +#define NVC997_SET_VERTEX_STREAM_INSTANCE_B(i) (0x18c0+(i)*4) +#define NVC997_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED 0:0 +#define NVC997_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_FALSE 0x00000000 +#define NVC997_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_TRUE 0x00000001 + +#define NVC997_SET_ATTRIBUTE_POINT_SIZE 0x1910 +#define NVC997_SET_ATTRIBUTE_POINT_SIZE_ENABLE 0:0 +#define NVC997_SET_ATTRIBUTE_POINT_SIZE_ENABLE_FALSE 0x00000000 +#define NVC997_SET_ATTRIBUTE_POINT_SIZE_ENABLE_TRUE 0x00000001 +#define NVC997_SET_ATTRIBUTE_POINT_SIZE_SLOT 11:4 + +#define NVC997_OGL_SET_CULL 0x1918 +#define NVC997_OGL_SET_CULL_ENABLE 0:0 +#define NVC997_OGL_SET_CULL_ENABLE_FALSE 0x00000000 +#define NVC997_OGL_SET_CULL_ENABLE_TRUE 0x00000001 + +#define NVC997_OGL_SET_FRONT_FACE 0x191c +#define NVC997_OGL_SET_FRONT_FACE_V 31:0 +#define NVC997_OGL_SET_FRONT_FACE_V_CW 0x00000900 +#define NVC997_OGL_SET_FRONT_FACE_V_CCW 0x00000901 + +#define NVC997_OGL_SET_CULL_FACE 0x1920 +#define NVC997_OGL_SET_CULL_FACE_V 31:0 +#define NVC997_OGL_SET_CULL_FACE_V_FRONT 0x00000404 +#define NVC997_OGL_SET_CULL_FACE_V_BACK 0x00000405 +#define NVC997_OGL_SET_CULL_FACE_V_FRONT_AND_BACK 0x00000408 + +#define NVC997_SET_VIEWPORT_PIXEL 0x1924 +#define NVC997_SET_VIEWPORT_PIXEL_CENTER 0:0 +#define NVC997_SET_VIEWPORT_PIXEL_CENTER_AT_HALF_INTEGERS 0x00000000 +#define NVC997_SET_VIEWPORT_PIXEL_CENTER_AT_INTEGERS 0x00000001 + +#define NVC997_SET_VIEWPORT_SCALE_OFFSET 0x192c +#define NVC997_SET_VIEWPORT_SCALE_OFFSET_ENABLE 0:0 +#define NVC997_SET_VIEWPORT_SCALE_OFFSET_ENABLE_FALSE 0x00000000 +#define NVC997_SET_VIEWPORT_SCALE_OFFSET_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_VIEWPORT_CLIP_CONTROL 0x193c +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE 0:0 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_FALSE 0x00000000 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_TRUE 0x00000001 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE 17:16 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_USE_FIELD_MIN_Z_ZERO_MAX_Z_ONE 0x00000000 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_MIN_Z_MAX_Z 0x00000001 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_ZERO_ONE 0x00000002 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_MINUS_INF_PLUS_INF 0x00000003 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z 3:3 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLIP 0x00000000 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLAMP 0x00000001 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z 4:4 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLIP 0x00000000 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLAMP 0x00000001 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND 7:7 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_256 0x00000000 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_1 0x00000001 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND 10:10 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_256 0x00000000 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_1 0x00000001 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP 13:11 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP 0x00000000 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_PASSTHRU 0x00000001 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XY_CLIP 0x00000002 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XYZ_CLIP 0x00000003 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP_NO_Z_CULL 0x00000004 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_Z_CLIP 0x00000005 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_TRI_FILL_OR_CLIP 0x00000006 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z 2:1 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SAME_AS_XY_GUARDBAND 0x00000000 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_256 0x00000001 +#define NVC997_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_1 0x00000002 + +#define NVC997_SET_USER_CLIP_OP 0x1940 +#define NVC997_SET_USER_CLIP_OP_PLANE0 0:0 +#define NVC997_SET_USER_CLIP_OP_PLANE0_CLIP 0x00000000 +#define NVC997_SET_USER_CLIP_OP_PLANE0_CULL 0x00000001 +#define NVC997_SET_USER_CLIP_OP_PLANE1 4:4 +#define NVC997_SET_USER_CLIP_OP_PLANE1_CLIP 0x00000000 +#define NVC997_SET_USER_CLIP_OP_PLANE1_CULL 0x00000001 +#define NVC997_SET_USER_CLIP_OP_PLANE2 8:8 +#define NVC997_SET_USER_CLIP_OP_PLANE2_CLIP 0x00000000 +#define NVC997_SET_USER_CLIP_OP_PLANE2_CULL 0x00000001 +#define NVC997_SET_USER_CLIP_OP_PLANE3 12:12 +#define NVC997_SET_USER_CLIP_OP_PLANE3_CLIP 0x00000000 +#define NVC997_SET_USER_CLIP_OP_PLANE3_CULL 0x00000001 +#define NVC997_SET_USER_CLIP_OP_PLANE4 16:16 +#define NVC997_SET_USER_CLIP_OP_PLANE4_CLIP 0x00000000 +#define NVC997_SET_USER_CLIP_OP_PLANE4_CULL 0x00000001 +#define NVC997_SET_USER_CLIP_OP_PLANE5 20:20 +#define NVC997_SET_USER_CLIP_OP_PLANE5_CLIP 0x00000000 +#define NVC997_SET_USER_CLIP_OP_PLANE5_CULL 0x00000001 +#define NVC997_SET_USER_CLIP_OP_PLANE6 24:24 +#define NVC997_SET_USER_CLIP_OP_PLANE6_CLIP 0x00000000 +#define NVC997_SET_USER_CLIP_OP_PLANE6_CULL 0x00000001 +#define NVC997_SET_USER_CLIP_OP_PLANE7 28:28 +#define NVC997_SET_USER_CLIP_OP_PLANE7_CLIP 0x00000000 +#define NVC997_SET_USER_CLIP_OP_PLANE7_CULL 0x00000001 + +#define NVC997_SET_RENDER_ENABLE_OVERRIDE 0x1944 +#define NVC997_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NVC997_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NVC997_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NVC997_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NVC997_SET_PRIMITIVE_TOPOLOGY_CONTROL 0x1948 +#define NVC997_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE 0:0 +#define NVC997_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_TOPOLOGY_IN_BEGIN_METHODS 0x00000000 +#define NVC997_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_SEPARATE_TOPOLOGY_STATE 0x00000001 + +#define NVC997_SET_WINDOW_CLIP_ENABLE 0x194c +#define NVC997_SET_WINDOW_CLIP_ENABLE_V 0:0 +#define NVC997_SET_WINDOW_CLIP_ENABLE_V_FALSE 0x00000000 +#define NVC997_SET_WINDOW_CLIP_ENABLE_V_TRUE 0x00000001 + +#define NVC997_SET_WINDOW_CLIP_TYPE 0x1950 +#define NVC997_SET_WINDOW_CLIP_TYPE_V 1:0 +#define NVC997_SET_WINDOW_CLIP_TYPE_V_INCLUSIVE 0x00000000 +#define NVC997_SET_WINDOW_CLIP_TYPE_V_EXCLUSIVE 0x00000001 +#define NVC997_SET_WINDOW_CLIP_TYPE_V_CLIPALL 0x00000002 + +#define NVC997_INVALIDATE_ZCULL 0x1958 +#define NVC997_INVALIDATE_ZCULL_V 31:0 +#define NVC997_INVALIDATE_ZCULL_V_INVALIDATE 0x00000000 + +#define NVC997_SET_ZCULL 0x1968 +#define NVC997_SET_ZCULL_Z_ENABLE 0:0 +#define NVC997_SET_ZCULL_Z_ENABLE_FALSE 0x00000000 +#define NVC997_SET_ZCULL_Z_ENABLE_TRUE 0x00000001 +#define NVC997_SET_ZCULL_STENCIL_ENABLE 4:4 +#define NVC997_SET_ZCULL_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC997_SET_ZCULL_STENCIL_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_ZCULL_BOUNDS 0x196c +#define NVC997_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE 0:0 +#define NVC997_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NVC997_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_TRUE 0x00000001 +#define NVC997_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE 4:4 +#define NVC997_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NVC997_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_PRIMITIVE_TOPOLOGY 0x1970 +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V 15:0 +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_POINTLIST 0x00000001 +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_LINELIST 0x00000002 +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP 0x00000003 +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST 0x00000004 +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP 0x00000005 +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_LINELIST_ADJCY 0x0000000A +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP_ADJCY 0x0000000B +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST_ADJCY 0x0000000C +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_PATCHLIST 0x0000000E +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_POINTS 0x00001001 +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST 0x00001002 +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST 0x00001003 +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST 0x0000100F +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINESTRIP 0x00001010 +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINESTRIP 0x00001011 +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLELIST 0x00001012 +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLESTRIP 0x00001013 +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLESTRIP 0x00001014 +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN 0x00001015 +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLEFAN 0x00001016 +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN_IMM 0x00001017 +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST_IMM 0x00001018 +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST2 0x0000101A +#define NVC997_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST2 0x0000101B + +#define NVC997_ZCULL_SYNC 0x1978 +#define NVC997_ZCULL_SYNC_V 31:0 + +#define NVC997_SET_CLIP_ID_TEST 0x197c +#define NVC997_SET_CLIP_ID_TEST_ENABLE 0:0 +#define NVC997_SET_CLIP_ID_TEST_ENABLE_FALSE 0x00000000 +#define NVC997_SET_CLIP_ID_TEST_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_SURFACE_CLIP_ID_WIDTH 0x1980 +#define NVC997_SET_SURFACE_CLIP_ID_WIDTH_V 31:0 + +#define NVC997_SET_CLIP_ID 0x1984 +#define NVC997_SET_CLIP_ID_V 31:0 + +#define NVC997_SET_DEPTH_BOUNDS_TEST 0x19bc +#define NVC997_SET_DEPTH_BOUNDS_TEST_ENABLE 0:0 +#define NVC997_SET_DEPTH_BOUNDS_TEST_ENABLE_FALSE 0x00000000 +#define NVC997_SET_DEPTH_BOUNDS_TEST_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_BLEND_FLOAT_OPTION 0x19c0 +#define NVC997_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO 0:0 +#define NVC997_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_FALSE 0x00000000 +#define NVC997_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_TRUE 0x00000001 + +#define NVC997_SET_LOGIC_OP 0x19c4 +#define NVC997_SET_LOGIC_OP_ENABLE 0:0 +#define NVC997_SET_LOGIC_OP_ENABLE_FALSE 0x00000000 +#define NVC997_SET_LOGIC_OP_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_LOGIC_OP_FUNC 0x19c8 +#define NVC997_SET_LOGIC_OP_FUNC_V 31:0 +#define NVC997_SET_LOGIC_OP_FUNC_V_CLEAR 0x00001500 +#define NVC997_SET_LOGIC_OP_FUNC_V_AND 0x00001501 +#define NVC997_SET_LOGIC_OP_FUNC_V_AND_REVERSE 0x00001502 +#define NVC997_SET_LOGIC_OP_FUNC_V_COPY 0x00001503 +#define NVC997_SET_LOGIC_OP_FUNC_V_AND_INVERTED 0x00001504 +#define NVC997_SET_LOGIC_OP_FUNC_V_NOOP 0x00001505 +#define NVC997_SET_LOGIC_OP_FUNC_V_XOR 0x00001506 +#define NVC997_SET_LOGIC_OP_FUNC_V_OR 0x00001507 +#define NVC997_SET_LOGIC_OP_FUNC_V_NOR 0x00001508 +#define NVC997_SET_LOGIC_OP_FUNC_V_EQUIV 0x00001509 +#define NVC997_SET_LOGIC_OP_FUNC_V_INVERT 0x0000150A +#define NVC997_SET_LOGIC_OP_FUNC_V_OR_REVERSE 0x0000150B +#define NVC997_SET_LOGIC_OP_FUNC_V_COPY_INVERTED 0x0000150C +#define NVC997_SET_LOGIC_OP_FUNC_V_OR_INVERTED 0x0000150D +#define NVC997_SET_LOGIC_OP_FUNC_V_NAND 0x0000150E +#define NVC997_SET_LOGIC_OP_FUNC_V_SET 0x0000150F + +#define NVC997_SET_Z_COMPRESSION 0x19cc +#define NVC997_SET_Z_COMPRESSION_ENABLE 0:0 +#define NVC997_SET_Z_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVC997_SET_Z_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVC997_CLEAR_SURFACE 0x19d0 +#define NVC997_CLEAR_SURFACE_Z_ENABLE 0:0 +#define NVC997_CLEAR_SURFACE_Z_ENABLE_FALSE 0x00000000 +#define NVC997_CLEAR_SURFACE_Z_ENABLE_TRUE 0x00000001 +#define NVC997_CLEAR_SURFACE_STENCIL_ENABLE 1:1 +#define NVC997_CLEAR_SURFACE_STENCIL_ENABLE_FALSE 0x00000000 +#define NVC997_CLEAR_SURFACE_STENCIL_ENABLE_TRUE 0x00000001 +#define NVC997_CLEAR_SURFACE_R_ENABLE 2:2 +#define NVC997_CLEAR_SURFACE_R_ENABLE_FALSE 0x00000000 +#define NVC997_CLEAR_SURFACE_R_ENABLE_TRUE 0x00000001 +#define NVC997_CLEAR_SURFACE_G_ENABLE 3:3 +#define NVC997_CLEAR_SURFACE_G_ENABLE_FALSE 0x00000000 +#define NVC997_CLEAR_SURFACE_G_ENABLE_TRUE 0x00000001 +#define NVC997_CLEAR_SURFACE_B_ENABLE 4:4 +#define NVC997_CLEAR_SURFACE_B_ENABLE_FALSE 0x00000000 +#define NVC997_CLEAR_SURFACE_B_ENABLE_TRUE 0x00000001 +#define NVC997_CLEAR_SURFACE_A_ENABLE 5:5 +#define NVC997_CLEAR_SURFACE_A_ENABLE_FALSE 0x00000000 +#define NVC997_CLEAR_SURFACE_A_ENABLE_TRUE 0x00000001 +#define NVC997_CLEAR_SURFACE_MRT_SELECT 9:6 +#define NVC997_CLEAR_SURFACE_RT_ARRAY_INDEX 25:10 + +#define NVC997_CLEAR_CLIP_ID_SURFACE 0x19d4 +#define NVC997_CLEAR_CLIP_ID_SURFACE_V 31:0 + +#define NVC997_SET_COLOR_COMPRESSION(i) (0x19e0+(i)*4) +#define NVC997_SET_COLOR_COMPRESSION_ENABLE 0:0 +#define NVC997_SET_COLOR_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVC997_SET_COLOR_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_CT_WRITE(i) (0x1a00+(i)*4) +#define NVC997_SET_CT_WRITE_R_ENABLE 0:0 +#define NVC997_SET_CT_WRITE_R_ENABLE_FALSE 0x00000000 +#define NVC997_SET_CT_WRITE_R_ENABLE_TRUE 0x00000001 +#define NVC997_SET_CT_WRITE_G_ENABLE 4:4 +#define NVC997_SET_CT_WRITE_G_ENABLE_FALSE 0x00000000 +#define NVC997_SET_CT_WRITE_G_ENABLE_TRUE 0x00000001 +#define NVC997_SET_CT_WRITE_B_ENABLE 8:8 +#define NVC997_SET_CT_WRITE_B_ENABLE_FALSE 0x00000000 +#define NVC997_SET_CT_WRITE_B_ENABLE_TRUE 0x00000001 +#define NVC997_SET_CT_WRITE_A_ENABLE 12:12 +#define NVC997_SET_CT_WRITE_A_ENABLE_FALSE 0x00000000 +#define NVC997_SET_CT_WRITE_A_ENABLE_TRUE 0x00000001 + +#define NVC997_PIPE_NOP 0x1a2c +#define NVC997_PIPE_NOP_V 31:0 + +#define NVC997_SET_SPARE00 0x1a30 +#define NVC997_SET_SPARE00_V 31:0 + +#define NVC997_SET_SPARE01 0x1a34 +#define NVC997_SET_SPARE01_V 31:0 + +#define NVC997_SET_SPARE02 0x1a38 +#define NVC997_SET_SPARE02_V 31:0 + +#define NVC997_SET_SPARE03 0x1a3c +#define NVC997_SET_SPARE03_V 31:0 + +#define NVC997_SET_REPORT_SEMAPHORE_A 0x1b00 +#define NVC997_SET_REPORT_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC997_SET_REPORT_SEMAPHORE_B 0x1b04 +#define NVC997_SET_REPORT_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC997_SET_REPORT_SEMAPHORE_C 0x1b08 +#define NVC997_SET_REPORT_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC997_SET_REPORT_SEMAPHORE_D 0x1b0c +#define NVC997_SET_REPORT_SEMAPHORE_D_OPERATION 1:0 +#define NVC997_SET_REPORT_SEMAPHORE_D_OPERATION_RELEASE 0x00000000 +#define NVC997_SET_REPORT_SEMAPHORE_D_OPERATION_ACQUIRE 0x00000001 +#define NVC997_SET_REPORT_SEMAPHORE_D_OPERATION_REPORT_ONLY 0x00000002 +#define NVC997_SET_REPORT_SEMAPHORE_D_OPERATION_TRAP 0x00000003 +#define NVC997_SET_REPORT_SEMAPHORE_D_RELEASE 4:4 +#define NVC997_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_READS_COMPLETE 0x00000000 +#define NVC997_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_WRITES_COMPLETE 0x00000001 +#define NVC997_SET_REPORT_SEMAPHORE_D_ACQUIRE 8:8 +#define NVC997_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_WRITES_START 0x00000000 +#define NVC997_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_READS_START 0x00000001 +#define NVC997_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION 15:12 +#define NVC997_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_NONE 0x00000000 +#define NVC997_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DATA_ASSEMBLER 0x00000001 +#define NVC997_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VERTEX_SHADER 0x00000002 +#define NVC997_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_INIT_SHADER 0x00000008 +#define NVC997_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_SHADER 0x00000009 +#define NVC997_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_GEOMETRY_SHADER 0x00000006 +#define NVC997_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_STREAMING_OUTPUT 0x00000005 +#define NVC997_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VPC 0x00000004 +#define NVC997_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ZCULL 0x00000007 +#define NVC997_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_PIXEL_SHADER 0x0000000A +#define NVC997_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DEPTH_TEST 0x0000000C +#define NVC997_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ALL 0x0000000F +#define NVC997_SET_REPORT_SEMAPHORE_D_COMPARISON 16:16 +#define NVC997_SET_REPORT_SEMAPHORE_D_COMPARISON_EQ 0x00000000 +#define NVC997_SET_REPORT_SEMAPHORE_D_COMPARISON_GE 0x00000001 +#define NVC997_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE 20:20 +#define NVC997_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVC997_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT 27:23 +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_NONE 0x00000000 +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_DA_VERTICES_GENERATED 0x00000001 +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_DA_PRIMITIVES_GENERATED 0x00000003 +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_VS_INVOCATIONS 0x00000005 +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_TI_INVOCATIONS 0x0000001B +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_TS_INVOCATIONS 0x0000001D +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_TS_PRIMITIVES_GENERATED 0x0000001F +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_GS_INVOCATIONS 0x00000007 +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_GS_PRIMITIVES_GENERATED 0x00000009 +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_ALPHA_BETA_CLOCKS 0x00000004 +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_SCG_CLOCKS 0x00000008 +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_VTG_PRIMITIVES_OUT 0x00000012 +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x0000001E +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_SUCCEEDED 0x0000000B +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED 0x0000000D +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000006 +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_BYTE_COUNT 0x0000001A +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_INVOCATIONS 0x0000000F +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_PRIMITIVES_GENERATED 0x00000011 +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS0 0x0000000A +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS1 0x0000000C +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS2 0x0000000E +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS3 0x00000010 +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_PS_INVOCATIONS 0x00000013 +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT 0x00000002 +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT64 0x00000015 +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_TILED_ZPASS_PIXEL_CNT64 0x00000017 +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_COLOR_TARGET 0x00000018 +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_ZETA_TARGET 0x00000019 +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_BOUNDING_RECTANGLE 0x0000001C +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_TIMESTAMP 0x00000014 +#define NVC997_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE 28:28 +#define NVC997_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVC997_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVC997_SET_REPORT_SEMAPHORE_D_SUB_REPORT 7:5 +#define NVC997_SET_REPORT_SEMAPHORE_D_REPORT_DWORD_NUMBER 21:21 +#define NVC997_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE 2:2 +#define NVC997_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_FALSE 0x00000000 +#define NVC997_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_TRUE 0x00000001 +#define NVC997_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE 3:3 +#define NVC997_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC997_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC997_SET_REPORT_SEMAPHORE_D_REDUCTION_OP 11:9 +#define NVC997_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC997_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC997_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC997_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_INC 0x00000003 +#define NVC997_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC997_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_AND 0x00000005 +#define NVC997_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_OR 0x00000006 +#define NVC997_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC997_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT 18:17 +#define NVC997_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC997_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVC997_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP 19:19 +#define NVC997_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP_FALSE 0x00000000 +#define NVC997_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP_TRUE 0x00000001 + +#define NVC997_SET_VERTEX_STREAM_A_FORMAT(j) (0x1c00+(j)*16) +#define NVC997_SET_VERTEX_STREAM_A_FORMAT_STRIDE 11:0 +#define NVC997_SET_VERTEX_STREAM_A_FORMAT_ENABLE 12:12 +#define NVC997_SET_VERTEX_STREAM_A_FORMAT_ENABLE_FALSE 0x00000000 +#define NVC997_SET_VERTEX_STREAM_A_FORMAT_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_VERTEX_STREAM_A_LOCATION_A(j) (0x1c04+(j)*16) +#define NVC997_SET_VERTEX_STREAM_A_LOCATION_A_OFFSET_UPPER 7:0 + +#define NVC997_SET_VERTEX_STREAM_A_LOCATION_B(j) (0x1c08+(j)*16) +#define NVC997_SET_VERTEX_STREAM_A_LOCATION_B_OFFSET_LOWER 31:0 + +#define NVC997_SET_VERTEX_STREAM_A_FREQUENCY(j) (0x1c0c+(j)*16) +#define NVC997_SET_VERTEX_STREAM_A_FREQUENCY_V 31:0 + +#define NVC997_SET_VERTEX_STREAM_B_FORMAT(j) (0x1d00+(j)*16) +#define NVC997_SET_VERTEX_STREAM_B_FORMAT_STRIDE 11:0 +#define NVC997_SET_VERTEX_STREAM_B_FORMAT_ENABLE 12:12 +#define NVC997_SET_VERTEX_STREAM_B_FORMAT_ENABLE_FALSE 0x00000000 +#define NVC997_SET_VERTEX_STREAM_B_FORMAT_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_VERTEX_STREAM_B_LOCATION_A(j) (0x1d04+(j)*16) +#define NVC997_SET_VERTEX_STREAM_B_LOCATION_A_OFFSET_UPPER 7:0 + +#define NVC997_SET_VERTEX_STREAM_B_LOCATION_B(j) (0x1d08+(j)*16) +#define NVC997_SET_VERTEX_STREAM_B_LOCATION_B_OFFSET_LOWER 31:0 + +#define NVC997_SET_VERTEX_STREAM_B_FREQUENCY(j) (0x1d0c+(j)*16) +#define NVC997_SET_VERTEX_STREAM_B_FREQUENCY_V 31:0 + +#define NVC997_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA(j) (0x1e00+(j)*32) +#define NVC997_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NVC997_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NVC997_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_BLEND_PER_TARGET_COLOR_OP(j) (0x1e04+(j)*32) +#define NVC997_SET_BLEND_PER_TARGET_COLOR_OP_V 31:0 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC997_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC997_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MIN 0x00008007 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MAX 0x00008008 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_ADD 0x00000001 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MIN 0x00000004 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF(j) (0x1e08+(j)*32) +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V 31:0 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF(j) (0x1e0c+(j)*32) +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V 31:0 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC997_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_OP(j) (0x1e10+(j)*32) +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_OP_V 31:0 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF(j) (0x1e14+(j)*32) +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V 31:0 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF(j) (0x1e18+(j)*32) +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V 31:0 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVC997_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVC997_SET_PIPELINE_SHADER(j) (0x2000+(j)*64) +#define NVC997_SET_PIPELINE_SHADER_ENABLE 0:0 +#define NVC997_SET_PIPELINE_SHADER_ENABLE_FALSE 0x00000000 +#define NVC997_SET_PIPELINE_SHADER_ENABLE_TRUE 0x00000001 +#define NVC997_SET_PIPELINE_SHADER_TYPE 7:4 +#define NVC997_SET_PIPELINE_SHADER_TYPE_VERTEX_CULL_BEFORE_FETCH 0x00000000 +#define NVC997_SET_PIPELINE_SHADER_TYPE_VERTEX 0x00000001 +#define NVC997_SET_PIPELINE_SHADER_TYPE_TESSELLATION_INIT 0x00000002 +#define NVC997_SET_PIPELINE_SHADER_TYPE_TESSELLATION 0x00000003 +#define NVC997_SET_PIPELINE_SHADER_TYPE_GEOMETRY 0x00000004 +#define NVC997_SET_PIPELINE_SHADER_TYPE_PIXEL 0x00000005 + +#define NVC997_SET_PIPELINE_RESERVED_B(j) (0x2004+(j)*64) +#define NVC997_SET_PIPELINE_RESERVED_B_V 0:0 + +#define NVC997_SET_PIPELINE_RESERVED_A(j) (0x2008+(j)*64) +#define NVC997_SET_PIPELINE_RESERVED_A_V 0:0 + +#define NVC997_SET_PIPELINE_REGISTER_COUNT(j) (0x200c+(j)*64) +#define NVC997_SET_PIPELINE_REGISTER_COUNT_V 8:0 + +#define NVC997_SET_PIPELINE_BINDING(j) (0x2010+(j)*64) +#define NVC997_SET_PIPELINE_BINDING_GROUP 2:0 + +#define NVC997_SET_PIPELINE_PROGRAM_ADDRESS_A(j) (0x2014+(j)*64) +#define NVC997_SET_PIPELINE_PROGRAM_ADDRESS_A_UPPER 7:0 + +#define NVC997_SET_PIPELINE_PROGRAM_ADDRESS_B(j) (0x2018+(j)*64) +#define NVC997_SET_PIPELINE_PROGRAM_ADDRESS_B_LOWER 31:0 + +#define NVC997_SET_PIPELINE_PROGRAM_PREFETCH(j) (0x201c+(j)*64) +#define NVC997_SET_PIPELINE_PROGRAM_PREFETCH_SIZE_IN_BLOCKS 6:0 + +#define NVC997_SET_PIPELINE_RESERVED_E(j) (0x2020+(j)*64) +#define NVC997_SET_PIPELINE_RESERVED_E_V 0:0 + +#define NVC997_SET_FALCON00 0x2300 +#define NVC997_SET_FALCON00_V 31:0 + +#define NVC997_SET_FALCON01 0x2304 +#define NVC997_SET_FALCON01_V 31:0 + +#define NVC997_SET_FALCON02 0x2308 +#define NVC997_SET_FALCON02_V 31:0 + +#define NVC997_SET_FALCON03 0x230c +#define NVC997_SET_FALCON03_V 31:0 + +#define NVC997_SET_FALCON04 0x2310 +#define NVC997_SET_FALCON04_V 31:0 + +#define NVC997_SET_FALCON05 0x2314 +#define NVC997_SET_FALCON05_V 31:0 + +#define NVC997_SET_FALCON06 0x2318 +#define NVC997_SET_FALCON06_V 31:0 + +#define NVC997_SET_FALCON07 0x231c +#define NVC997_SET_FALCON07_V 31:0 + +#define NVC997_SET_FALCON08 0x2320 +#define NVC997_SET_FALCON08_V 31:0 + +#define NVC997_SET_FALCON09 0x2324 +#define NVC997_SET_FALCON09_V 31:0 + +#define NVC997_SET_FALCON10 0x2328 +#define NVC997_SET_FALCON10_V 31:0 + +#define NVC997_SET_FALCON11 0x232c +#define NVC997_SET_FALCON11_V 31:0 + +#define NVC997_SET_FALCON12 0x2330 +#define NVC997_SET_FALCON12_V 31:0 + +#define NVC997_SET_FALCON13 0x2334 +#define NVC997_SET_FALCON13_V 31:0 + +#define NVC997_SET_FALCON14 0x2338 +#define NVC997_SET_FALCON14_V 31:0 + +#define NVC997_SET_FALCON15 0x233c +#define NVC997_SET_FALCON15_V 31:0 + +#define NVC997_SET_FALCON16 0x2340 +#define NVC997_SET_FALCON16_V 31:0 + +#define NVC997_SET_FALCON17 0x2344 +#define NVC997_SET_FALCON17_V 31:0 + +#define NVC997_SET_FALCON18 0x2348 +#define NVC997_SET_FALCON18_V 31:0 + +#define NVC997_SET_FALCON19 0x234c +#define NVC997_SET_FALCON19_V 31:0 + +#define NVC997_SET_FALCON20 0x2350 +#define NVC997_SET_FALCON20_V 31:0 + +#define NVC997_SET_FALCON21 0x2354 +#define NVC997_SET_FALCON21_V 31:0 + +#define NVC997_SET_FALCON22 0x2358 +#define NVC997_SET_FALCON22_V 31:0 + +#define NVC997_SET_FALCON23 0x235c +#define NVC997_SET_FALCON23_V 31:0 + +#define NVC997_SET_FALCON24 0x2360 +#define NVC997_SET_FALCON24_V 31:0 + +#define NVC997_SET_FALCON25 0x2364 +#define NVC997_SET_FALCON25_V 31:0 + +#define NVC997_SET_FALCON26 0x2368 +#define NVC997_SET_FALCON26_V 31:0 + +#define NVC997_SET_FALCON27 0x236c +#define NVC997_SET_FALCON27_V 31:0 + +#define NVC997_SET_FALCON28 0x2370 +#define NVC997_SET_FALCON28_V 31:0 + +#define NVC997_SET_FALCON29 0x2374 +#define NVC997_SET_FALCON29_V 31:0 + +#define NVC997_SET_FALCON30 0x2378 +#define NVC997_SET_FALCON30_V 31:0 + +#define NVC997_SET_FALCON31 0x237c +#define NVC997_SET_FALCON31_V 31:0 + +#define NVC997_SET_CONSTANT_BUFFER_SELECTOR_A 0x2380 +#define NVC997_SET_CONSTANT_BUFFER_SELECTOR_A_SIZE 16:0 + +#define NVC997_SET_CONSTANT_BUFFER_SELECTOR_B 0x2384 +#define NVC997_SET_CONSTANT_BUFFER_SELECTOR_B_ADDRESS_UPPER 7:0 + +#define NVC997_SET_CONSTANT_BUFFER_SELECTOR_C 0x2388 +#define NVC997_SET_CONSTANT_BUFFER_SELECTOR_C_ADDRESS_LOWER 31:0 + +#define NVC997_LOAD_CONSTANT_BUFFER_OFFSET 0x238c +#define NVC997_LOAD_CONSTANT_BUFFER_OFFSET_V 15:0 + +#define NVC997_LOAD_CONSTANT_BUFFER(i) (0x2390+(i)*4) +#define NVC997_LOAD_CONSTANT_BUFFER_V 31:0 + +#define NVC997_BIND_GROUP_RESERVED_A(j) (0x2400+(j)*32) +#define NVC997_BIND_GROUP_RESERVED_A_V 0:0 + +#define NVC997_BIND_GROUP_RESERVED_B(j) (0x2404+(j)*32) +#define NVC997_BIND_GROUP_RESERVED_B_V 0:0 + +#define NVC997_BIND_GROUP_RESERVED_C(j) (0x2408+(j)*32) +#define NVC997_BIND_GROUP_RESERVED_C_V 0:0 + +#define NVC997_BIND_GROUP_RESERVED_D(j) (0x240c+(j)*32) +#define NVC997_BIND_GROUP_RESERVED_D_V 0:0 + +#define NVC997_BIND_GROUP_CONSTANT_BUFFER(j) (0x2410+(j)*32) +#define NVC997_BIND_GROUP_CONSTANT_BUFFER_VALID 0:0 +#define NVC997_BIND_GROUP_CONSTANT_BUFFER_VALID_FALSE 0x00000000 +#define NVC997_BIND_GROUP_CONSTANT_BUFFER_VALID_TRUE 0x00000001 +#define NVC997_BIND_GROUP_CONSTANT_BUFFER_SHADER_SLOT 8:4 + +#define NVC997_SET_TRAP_HANDLER_A 0x25f8 +#define NVC997_SET_TRAP_HANDLER_A_ADDRESS_UPPER 16:0 + +#define NVC997_SET_TRAP_HANDLER_B 0x25fc +#define NVC997_SET_TRAP_HANDLER_B_ADDRESS_LOWER 31:0 + +#define NVC997_SET_COLOR_CLAMP 0x2600 +#define NVC997_SET_COLOR_CLAMP_ENABLE 0:0 +#define NVC997_SET_COLOR_CLAMP_ENABLE_FALSE 0x00000000 +#define NVC997_SET_COLOR_CLAMP_ENABLE_TRUE 0x00000001 + +#define NVC997_SET_STREAM_OUT_LAYOUT_SELECT(i,j) (0x2800+(i)*128+(j)*4) +#define NVC997_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER00 7:0 +#define NVC997_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER01 15:8 +#define NVC997_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER02 23:16 +#define NVC997_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER03 31:24 + +#define NVC997_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE(i) (0x32f4+(i)*4) +#define NVC997_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_V 31:0 + +#define NVC997_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_UPPER(i) (0x3314+(i)*4) +#define NVC997_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_UPPER_V 31:0 + +#define NVC997_ENABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER 0x3334 +#define NVC997_ENABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_V 0:0 + +#define NVC997_DISABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER 0x3338 +#define NVC997_DISABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_V 0:0 + +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER(i) (0x333c+(i)*4) +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER_V 31:0 + +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_VALUE(i) (0x335c+(i)*4) +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_VALUE_V 31:0 + +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_EVENT(i) (0x337c+(i)*4) +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_EVENT_EVENT 7:0 + +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A(i) (0x339c+(i)*4) +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT0 1:0 +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT0 4:2 +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT1 6:5 +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT1 9:7 +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT2 11:10 +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT2 14:12 +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT3 16:15 +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT3 19:17 +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT4 21:20 +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT4 24:22 +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT5 26:25 +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT5 29:27 +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_SPARE 31:30 + +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B(i) (0x33bc+(i)*4) +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_EDGE 0:0 +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_MODE 2:1 +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_WINDOWED 3:3 +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_FUNC 19:4 + +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL 0x33dc +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL_MASK 7:0 + +#define NVC997_START_SHADER_PERFORMANCE_COUNTER 0x33e0 +#define NVC997_START_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC997_STOP_SHADER_PERFORMANCE_COUNTER 0x33e4 +#define NVC997_STOP_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER 0x33e8 +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER_V 31:0 + +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER 0x33ec +#define NVC997_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER_V 31:0 + +#define NVC997_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NVC997_SET_MME_SHADOW_SCRATCH_V 31:0 + +#define NVC997_CALL_MME_MACRO(j) (0x3800+(j)*8) +#define NVC997_CALL_MME_MACRO_V 31:0 + +#define NVC997_CALL_MME_DATA(j) (0x3804+(j)*8) +#define NVC997_CALL_MME_DATA_V 31:0 + +#endif /* _cl_ada_a_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clca70.h b/src/common/sdk/nvidia/inc/class/clca70.h new file mode 100644 index 0000000..647051d --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clca70.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/clca70.finn +// + +#define NVCA70_DISPLAY (0xca70U) /* finn: Evaluated from "NVCA70_ALLOCATION_PARAMETERS_MESSAGE_ID" */ + +#define NVCA70_ALLOCATION_PARAMETERS_MESSAGE_ID (0xca70U) + +typedef struct NVCA70_ALLOCATION_PARAMETERS { + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numDsis; // Number of DSIs in this chip/display +} NVCA70_ALLOCATION_PARAMETERS; + diff --git a/src/common/sdk/nvidia/inc/class/clcb97.h b/src/common/sdk/nvidia/inc/class/clcb97.h new file mode 100644 index 0000000..7528a2d --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clcb97.h @@ -0,0 +1,4480 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl_hopper_a_h_ +#define _cl_hopper_a_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../../../class/bin/sw_header.pl hopper_a */ + +#include "nvtypes.h" + +#define HOPPER_A 0xCB97 + +#define NVCB97_SET_OBJECT 0x0000 +#define NVCB97_SET_OBJECT_CLASS_ID 15:0 +#define NVCB97_SET_OBJECT_ENGINE_ID 20:16 + +#define NVCB97_NO_OPERATION 0x0100 +#define NVCB97_NO_OPERATION_V 31:0 + +#define NVCB97_SET_NOTIFY_A 0x0104 +#define NVCB97_SET_NOTIFY_A_ADDRESS_UPPER 24:0 + +#define NVCB97_SET_NOTIFY_B 0x0108 +#define NVCB97_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NVCB97_NOTIFY 0x010c +#define NVCB97_NOTIFY_TYPE 31:0 +#define NVCB97_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NVCB97_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NVCB97_WAIT_FOR_IDLE 0x0110 +#define NVCB97_WAIT_FOR_IDLE_V 31:0 + +#define NVCB97_LOAD_MME_INSTRUCTION_RAM_POINTER 0x0114 +#define NVCB97_LOAD_MME_INSTRUCTION_RAM_POINTER_V 31:0 + +#define NVCB97_LOAD_MME_INSTRUCTION_RAM 0x0118 +#define NVCB97_LOAD_MME_INSTRUCTION_RAM_V 31:0 + +#define NVCB97_LOAD_MME_START_ADDRESS_RAM_POINTER 0x011c +#define NVCB97_LOAD_MME_START_ADDRESS_RAM_POINTER_V 31:0 + +#define NVCB97_LOAD_MME_START_ADDRESS_RAM 0x0120 +#define NVCB97_LOAD_MME_START_ADDRESS_RAM_V 31:0 + +#define NVCB97_SET_MME_SHADOW_RAM_CONTROL 0x0124 +#define NVCB97_SET_MME_SHADOW_RAM_CONTROL_MODE 1:0 +#define NVCB97_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK 0x00000000 +#define NVCB97_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK_WITH_FILTER 0x00000001 +#define NVCB97_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_PASSTHROUGH 0x00000002 +#define NVCB97_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_REPLAY 0x00000003 + +#define NVCB97_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER 0x0128 +#define NVCB97_PEER_SEMAPHORE_RELEASE_OFFSET_UPPER_V 7:0 + +#define NVCB97_PEER_SEMAPHORE_RELEASE_OFFSET 0x012c +#define NVCB97_PEER_SEMAPHORE_RELEASE_OFFSET_V 31:0 + +#define NVCB97_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NVCB97_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVCB97_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NVCB97_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVCB97_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NVCB97_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NVCB97_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVCB97_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVCB97_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVCB97_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVCB97_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVCB97_SEND_GO_IDLE 0x013c +#define NVCB97_SEND_GO_IDLE_V 31:0 + +#define NVCB97_PM_TRIGGER 0x0140 +#define NVCB97_PM_TRIGGER_V 31:0 + +#define NVCB97_PM_TRIGGER_WFI 0x0144 +#define NVCB97_PM_TRIGGER_WFI_V 31:0 + +#define NVCB97_FE_ATOMIC_SEQUENCE_BEGIN 0x0148 +#define NVCB97_FE_ATOMIC_SEQUENCE_BEGIN_V 31:0 + +#define NVCB97_FE_ATOMIC_SEQUENCE_END 0x014c +#define NVCB97_FE_ATOMIC_SEQUENCE_END_V 31:0 + +#define NVCB97_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NVCB97_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NVCB97_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NVCB97_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NVCB97_SET_REPORT_SEMAPHORE_PAYLOAD_LOWER 0x0158 +#define NVCB97_SET_REPORT_SEMAPHORE_PAYLOAD_LOWER_PAYLOAD_LOWER 31:0 + +#define NVCB97_SET_REPORT_SEMAPHORE_PAYLOAD_UPPER 0x015c +#define NVCB97_SET_REPORT_SEMAPHORE_PAYLOAD_UPPER_PAYLOAD_UPPER 31:0 + +#define NVCB97_SET_REPORT_SEMAPHORE_ADDRESS_LOWER 0x0160 +#define NVCB97_SET_REPORT_SEMAPHORE_ADDRESS_LOWER_LOWER 31:0 + +#define NVCB97_SET_REPORT_SEMAPHORE_ADDRESS_UPPER 0x0164 +#define NVCB97_SET_REPORT_SEMAPHORE_ADDRESS_UPPER_UPPER 24:0 + +#define NVCB97_REPORT_SEMAPHORE_EXECUTE 0x0168 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_OPERATION 1:0 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_OPERATION_RELEASE 0x00000000 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_OPERATION_ACQUIRE 0x00000001 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_OPERATION_REPORT_ONLY 0x00000002 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_OPERATION_TRAP 0x00000003 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION 5:2 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_NONE 0x00000000 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_DATA_ASSEMBLER 0x00000001 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_VERTEX_SHADER 0x00000002 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_TESSELATION_INIT_SHADER 0x00000008 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_TESSELATION_SHADER 0x00000009 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_GEOMETRY_SHADER 0x00000006 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_STREAMING_OUTPUT 0x00000005 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_VPC 0x00000004 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_ZCULL 0x00000007 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_PIXEL_SHADER 0x0000000A +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_DEPTH_TEST 0x0000000C +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_PIPELINE_LOCATION_ALL 0x0000000F +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_AWAKEN_ENABLE 6:6 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT 11:7 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_NONE 0x00000000 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_DA_VERTICES_GENERATED 0x00000001 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_DA_PRIMITIVES_GENERATED 0x00000003 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_VS_INVOCATIONS 0x00000005 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_TI_INVOCATIONS 0x0000001B +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_TS_INVOCATIONS 0x0000001D +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_TS_PRIMITIVES_GENERATED 0x0000001F +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_GS_INVOCATIONS 0x00000007 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_GS_PRIMITIVES_GENERATED 0x00000009 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_ALPHA_BETA_CLOCKS 0x00000004 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_SCG_CLOCKS 0x00000008 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_VTG_PRIMITIVES_OUT 0x00000012 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x0000001E +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_STREAMING_PRIMITIVES_SUCCEEDED 0x0000000B +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_STREAMING_PRIMITIVES_NEEDED 0x0000000D +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000006 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_STREAMING_BYTE_COUNT 0x0000001A +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_CLIPPER_INVOCATIONS 0x0000000F +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_CLIPPER_PRIMITIVES_GENERATED 0x00000011 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_ZCULL_STATS0 0x0000000A +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_ZCULL_STATS1 0x0000000C +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_ZCULL_STATS2 0x0000000E +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_ZCULL_STATS3 0x00000010 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_PS_INVOCATIONS 0x00000013 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_ZPASS_PIXEL_CNT 0x00000002 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_ZPASS_PIXEL_CNT64 0x00000015 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_TILED_ZPASS_PIXEL_CNT64 0x00000017 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_IEEE_CLEAN_COLOR_TARGET 0x00000018 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_IEEE_CLEAN_ZETA_TARGET 0x00000019 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_BOUNDING_RECTANGLE 0x0000001C +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REPORT_TIMESTAMP 0x00000014 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_STRUCTURE_SIZE 14:13 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_STRUCTURE_SIZE_SEMAPHORE_FOUR_WORDS 0x00000000 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_STRUCTURE_SIZE_SEMAPHORE_ONE_WORD 0x00000001 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_STRUCTURE_SIZE_SEMAPHORE_TWO_WORDS 0x00000002 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_SUB_REPORT 17:15 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_FLUSH_DISABLE 19:19 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_FLUSH_DISABLE_FALSE 0x00000000 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_FLUSH_DISABLE_TRUE 0x00000001 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_ROP_FLUSH_DISABLE 18:18 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_ROP_FLUSH_DISABLE_FALSE 0x00000000 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_ROP_FLUSH_DISABLE_TRUE 0x00000001 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REDUCTION_ENABLE 20:20 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP 23:21 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_ADD 0x00000000 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_MIN 0x00000001 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_MAX 0x00000002 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_INC 0x00000003 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_DEC 0x00000004 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_AND 0x00000005 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_OR 0x00000006 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_XOR 0x00000007 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REDUCTION_FORMAT 25:24 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REDUCTION_FORMAT_UNSIGNED 0x00000000 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_REDUCTION_FORMAT_SIGNED 0x00000001 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_PAYLOAD_SIZE64 27:27 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_PAYLOAD_SIZE64_FALSE 0x00000000 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_PAYLOAD_SIZE64_TRUE 0x00000001 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_TRAP_TYPE 29:28 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_TRAP_TYPE_TRAP_NONE 0x00000000 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_TRAP_TYPE_TRAP_UNCONDITIONAL 0x00000001 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_TRAP_TYPE_TRAP_CONDITIONAL 0x00000002 +#define NVCB97_REPORT_SEMAPHORE_EXECUTE_TRAP_TYPE_TRAP_CONDITIONAL_EXT 0x00000003 + +#define NVCB97_LINE_LENGTH_IN 0x0180 +#define NVCB97_LINE_LENGTH_IN_VALUE 31:0 + +#define NVCB97_LINE_COUNT 0x0184 +#define NVCB97_LINE_COUNT_VALUE 31:0 + +#define NVCB97_OFFSET_OUT_UPPER 0x0188 +#define NVCB97_OFFSET_OUT_UPPER_VALUE 24:0 + +#define NVCB97_OFFSET_OUT 0x018c +#define NVCB97_OFFSET_OUT_VALUE 31:0 + +#define NVCB97_PITCH_OUT 0x0190 +#define NVCB97_PITCH_OUT_VALUE 31:0 + +#define NVCB97_SET_DST_BLOCK_SIZE 0x0194 +#define NVCB97_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVCB97_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVCB97_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVCB97_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVCB97_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVCB97_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVCB97_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVCB97_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVCB97_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVCB97_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVCB97_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NVCB97_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NVCB97_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NVCB97_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NVCB97_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVCB97_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NVCB97_SET_DST_WIDTH 0x0198 +#define NVCB97_SET_DST_WIDTH_V 31:0 + +#define NVCB97_SET_DST_HEIGHT 0x019c +#define NVCB97_SET_DST_HEIGHT_V 31:0 + +#define NVCB97_SET_DST_DEPTH 0x01a0 +#define NVCB97_SET_DST_DEPTH_V 31:0 + +#define NVCB97_SET_DST_LAYER 0x01a4 +#define NVCB97_SET_DST_LAYER_V 31:0 + +#define NVCB97_SET_DST_ORIGIN_BYTES_X 0x01a8 +#define NVCB97_SET_DST_ORIGIN_BYTES_X_V 20:0 + +#define NVCB97_SET_DST_ORIGIN_SAMPLES_Y 0x01ac +#define NVCB97_SET_DST_ORIGIN_SAMPLES_Y_V 16:0 + +#define NVCB97_LAUNCH_DMA 0x01b0 +#define NVCB97_LAUNCH_DMA_DST_MEMORY_LAYOUT 0:0 +#define NVCB97_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVCB97_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVCB97_LAUNCH_DMA_COMPLETION_TYPE 5:4 +#define NVCB97_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_DISABLE 0x00000000 +#define NVCB97_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_ONLY 0x00000001 +#define NVCB97_LAUNCH_DMA_COMPLETION_TYPE_RELEASE_SEMAPHORE 0x00000002 +#define NVCB97_LAUNCH_DMA_INTERRUPT_TYPE 9:8 +#define NVCB97_LAUNCH_DMA_INTERRUPT_TYPE_NONE 0x00000000 +#define NVCB97_LAUNCH_DMA_INTERRUPT_TYPE_INTERRUPT 0x00000001 +#define NVCB97_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE 12:12 +#define NVCB97_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_FOUR_WORDS 0x00000000 +#define NVCB97_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_ONE_WORD 0x00000001 +#define NVCB97_LAUNCH_DMA_REDUCTION_ENABLE 1:1 +#define NVCB97_LAUNCH_DMA_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVCB97_LAUNCH_DMA_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVCB97_LAUNCH_DMA_REDUCTION_OP 15:13 +#define NVCB97_LAUNCH_DMA_REDUCTION_OP_RED_ADD 0x00000000 +#define NVCB97_LAUNCH_DMA_REDUCTION_OP_RED_MIN 0x00000001 +#define NVCB97_LAUNCH_DMA_REDUCTION_OP_RED_MAX 0x00000002 +#define NVCB97_LAUNCH_DMA_REDUCTION_OP_RED_INC 0x00000003 +#define NVCB97_LAUNCH_DMA_REDUCTION_OP_RED_DEC 0x00000004 +#define NVCB97_LAUNCH_DMA_REDUCTION_OP_RED_AND 0x00000005 +#define NVCB97_LAUNCH_DMA_REDUCTION_OP_RED_OR 0x00000006 +#define NVCB97_LAUNCH_DMA_REDUCTION_OP_RED_XOR 0x00000007 +#define NVCB97_LAUNCH_DMA_REDUCTION_FORMAT 3:2 +#define NVCB97_LAUNCH_DMA_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVCB97_LAUNCH_DMA_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVCB97_LAUNCH_DMA_SYSMEMBAR_DISABLE 6:6 +#define NVCB97_LAUNCH_DMA_SYSMEMBAR_DISABLE_FALSE 0x00000000 +#define NVCB97_LAUNCH_DMA_SYSMEMBAR_DISABLE_TRUE 0x00000001 + +#define NVCB97_LOAD_INLINE_DATA 0x01b4 +#define NVCB97_LOAD_INLINE_DATA_V 31:0 + +#define NVCB97_SET_I2M_SEMAPHORE_A 0x01dc +#define NVCB97_SET_I2M_SEMAPHORE_A_OFFSET_UPPER 24:0 + +#define NVCB97_SET_I2M_SEMAPHORE_B 0x01e0 +#define NVCB97_SET_I2M_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVCB97_SET_I2M_SEMAPHORE_C 0x01e4 +#define NVCB97_SET_I2M_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVCB97_SET_MME_SWITCH_STATE 0x01ec +#define NVCB97_SET_MME_SWITCH_STATE_VALID 0:0 +#define NVCB97_SET_MME_SWITCH_STATE_VALID_FALSE 0x00000000 +#define NVCB97_SET_MME_SWITCH_STATE_VALID_TRUE 0x00000001 +#define NVCB97_SET_MME_SWITCH_STATE_SAVE_MACRO 11:4 +#define NVCB97_SET_MME_SWITCH_STATE_RESTORE_MACRO 19:12 + +#define NVCB97_SET_I2M_SPARE_NOOP00 0x01f0 +#define NVCB97_SET_I2M_SPARE_NOOP00_V 31:0 + +#define NVCB97_SET_I2M_SPARE_NOOP01 0x01f4 +#define NVCB97_SET_I2M_SPARE_NOOP01_V 31:0 + +#define NVCB97_SET_I2M_SPARE_NOOP02 0x01f8 +#define NVCB97_SET_I2M_SPARE_NOOP02_V 31:0 + +#define NVCB97_SET_I2M_SPARE_NOOP03 0x01fc +#define NVCB97_SET_I2M_SPARE_NOOP03_V 31:0 + +#define NVCB97_RUN_DS_NOW 0x0200 +#define NVCB97_RUN_DS_NOW_V 31:0 + +#define NVCB97_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS 0x0204 +#define NVCB97_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD 4:0 +#define NVCB97_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_INSTANTANEOUS 0x00000000 +#define NVCB97_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16 0x00000001 +#define NVCB97_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32 0x00000002 +#define NVCB97_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__64 0x00000003 +#define NVCB97_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__128 0x00000004 +#define NVCB97_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__256 0x00000005 +#define NVCB97_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__512 0x00000006 +#define NVCB97_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1024 0x00000007 +#define NVCB97_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2048 0x00000008 +#define NVCB97_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4096 0x00000009 +#define NVCB97_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__8192 0x0000000A +#define NVCB97_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__16384 0x0000000B +#define NVCB97_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__32768 0x0000000C +#define NVCB97_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__65536 0x0000000D +#define NVCB97_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__131072 0x0000000E +#define NVCB97_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__262144 0x0000000F +#define NVCB97_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__524288 0x00000010 +#define NVCB97_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__1048576 0x00000011 +#define NVCB97_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__2097152 0x00000012 +#define NVCB97_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD__4194304 0x00000013 +#define NVCB97_SET_OPPORTUNISTIC_EARLY_Z_HYSTERESIS_ACCUMULATED_PRIM_AREA_THRESHOLD_LATEZ_ALWAYS 0x0000001F + +#define NVCB97_SET_GS_MODE 0x0208 +#define NVCB97_SET_GS_MODE_TYPE 0:0 +#define NVCB97_SET_GS_MODE_TYPE_ANY 0x00000000 +#define NVCB97_SET_GS_MODE_TYPE_FAST_GS 0x00000001 + +#define NVCB97_SET_ALIASED_LINE_WIDTH_ENABLE 0x020c +#define NVCB97_SET_ALIASED_LINE_WIDTH_ENABLE_V 0:0 +#define NVCB97_SET_ALIASED_LINE_WIDTH_ENABLE_V_FALSE 0x00000000 +#define NVCB97_SET_ALIASED_LINE_WIDTH_ENABLE_V_TRUE 0x00000001 + +#define NVCB97_SET_API_MANDATED_EARLY_Z 0x0210 +#define NVCB97_SET_API_MANDATED_EARLY_Z_ENABLE 0:0 +#define NVCB97_SET_API_MANDATED_EARLY_Z_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_API_MANDATED_EARLY_Z_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_GS_DM_FIFO 0x0214 +#define NVCB97_SET_GS_DM_FIFO_SIZE_RASTER_ON 12:0 +#define NVCB97_SET_GS_DM_FIFO_SIZE_RASTER_OFF 28:16 +#define NVCB97_SET_GS_DM_FIFO_SPILL_ENABLED 31:31 +#define NVCB97_SET_GS_DM_FIFO_SPILL_ENABLED_FALSE 0x00000000 +#define NVCB97_SET_GS_DM_FIFO_SPILL_ENABLED_TRUE 0x00000001 + +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS 0x0218 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY 5:4 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_PREFETCH_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVCB97_INVALIDATE_SHADER_CACHES 0x021c +#define NVCB97_INVALIDATE_SHADER_CACHES_INSTRUCTION 0:0 +#define NVCB97_INVALIDATE_SHADER_CACHES_INSTRUCTION_FALSE 0x00000000 +#define NVCB97_INVALIDATE_SHADER_CACHES_INSTRUCTION_TRUE 0x00000001 +#define NVCB97_INVALIDATE_SHADER_CACHES_DATA 4:4 +#define NVCB97_INVALIDATE_SHADER_CACHES_DATA_FALSE 0x00000000 +#define NVCB97_INVALIDATE_SHADER_CACHES_DATA_TRUE 0x00000001 +#define NVCB97_INVALIDATE_SHADER_CACHES_CONSTANT 12:12 +#define NVCB97_INVALIDATE_SHADER_CACHES_CONSTANT_FALSE 0x00000000 +#define NVCB97_INVALIDATE_SHADER_CACHES_CONSTANT_TRUE 0x00000001 +#define NVCB97_INVALIDATE_SHADER_CACHES_LOCKS 1:1 +#define NVCB97_INVALIDATE_SHADER_CACHES_LOCKS_FALSE 0x00000000 +#define NVCB97_INVALIDATE_SHADER_CACHES_LOCKS_TRUE 0x00000001 +#define NVCB97_INVALIDATE_SHADER_CACHES_FLUSH_DATA 2:2 +#define NVCB97_INVALIDATE_SHADER_CACHES_FLUSH_DATA_FALSE 0x00000000 +#define NVCB97_INVALIDATE_SHADER_CACHES_FLUSH_DATA_TRUE 0x00000001 + +#define NVCB97_SET_INSTANCE_COUNT 0x0220 +#define NVCB97_SET_INSTANCE_COUNT_V 31:0 + +#define NVCB97_SET_POSITION_W_SCALED_OFFSET_ENABLE 0x0224 +#define NVCB97_SET_POSITION_W_SCALED_OFFSET_ENABLE_ENABLE 0:0 +#define NVCB97_SET_POSITION_W_SCALED_OFFSET_ENABLE_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_POSITION_W_SCALED_OFFSET_ENABLE_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_GO_IDLE_TIMEOUT 0x022c +#define NVCB97_SET_GO_IDLE_TIMEOUT_V 31:0 + +#define NVCB97_SET_MME_VERSION 0x0234 +#define NVCB97_SET_MME_VERSION_MAJOR 7:0 + +#define NVCB97_SET_INDEX_BUFFER_SIZE_A 0x0238 +#define NVCB97_SET_INDEX_BUFFER_SIZE_A_UPPER 7:0 + +#define NVCB97_SET_INDEX_BUFFER_SIZE_B 0x023c +#define NVCB97_SET_INDEX_BUFFER_SIZE_B_LOWER 31:0 + +#define NVCB97_SET_ROOT_TABLE_VISIBILITY(i) (0x0240+(i)*4) +#define NVCB97_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP0_ENABLE 1:0 +#define NVCB97_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP1_ENABLE 5:4 +#define NVCB97_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP2_ENABLE 9:8 +#define NVCB97_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP3_ENABLE 13:12 +#define NVCB97_SET_ROOT_TABLE_VISIBILITY_BINDING_GROUP4_ENABLE 17:16 + +#define NVCB97_SET_DRAW_CONTROL_A 0x0260 +#define NVCB97_SET_DRAW_CONTROL_A_TOPOLOGY 3:0 +#define NVCB97_SET_DRAW_CONTROL_A_TOPOLOGY_POINTS 0x00000000 +#define NVCB97_SET_DRAW_CONTROL_A_TOPOLOGY_LINES 0x00000001 +#define NVCB97_SET_DRAW_CONTROL_A_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVCB97_SET_DRAW_CONTROL_A_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVCB97_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLES 0x00000004 +#define NVCB97_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVCB97_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVCB97_SET_DRAW_CONTROL_A_TOPOLOGY_QUADS 0x00000007 +#define NVCB97_SET_DRAW_CONTROL_A_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVCB97_SET_DRAW_CONTROL_A_TOPOLOGY_POLYGON 0x00000009 +#define NVCB97_SET_DRAW_CONTROL_A_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVCB97_SET_DRAW_CONTROL_A_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVCB97_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVCB97_SET_DRAW_CONTROL_A_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVCB97_SET_DRAW_CONTROL_A_TOPOLOGY_PATCH 0x0000000E +#define NVCB97_SET_DRAW_CONTROL_A_PRIMITIVE_ID 4:4 +#define NVCB97_SET_DRAW_CONTROL_A_PRIMITIVE_ID_FIRST 0x00000000 +#define NVCB97_SET_DRAW_CONTROL_A_PRIMITIVE_ID_UNCHANGED 0x00000001 +#define NVCB97_SET_DRAW_CONTROL_A_INSTANCE_ID 6:5 +#define NVCB97_SET_DRAW_CONTROL_A_INSTANCE_ID_FIRST 0x00000000 +#define NVCB97_SET_DRAW_CONTROL_A_INSTANCE_ID_SUBSEQUENT 0x00000001 +#define NVCB97_SET_DRAW_CONTROL_A_INSTANCE_ID_UNCHANGED 0x00000002 +#define NVCB97_SET_DRAW_CONTROL_A_SPLIT_MODE 8:7 +#define NVCB97_SET_DRAW_CONTROL_A_SPLIT_MODE_NORMAL_BEGIN_NORMAL_END 0x00000000 +#define NVCB97_SET_DRAW_CONTROL_A_SPLIT_MODE_NORMAL_BEGIN_OPEN_END 0x00000001 +#define NVCB97_SET_DRAW_CONTROL_A_SPLIT_MODE_OPEN_BEGIN_OPEN_END 0x00000002 +#define NVCB97_SET_DRAW_CONTROL_A_SPLIT_MODE_OPEN_BEGIN_NORMAL_END 0x00000003 +#define NVCB97_SET_DRAW_CONTROL_A_INSTANCE_ITERATE_ENABLE 9:9 +#define NVCB97_SET_DRAW_CONTROL_A_INSTANCE_ITERATE_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_DRAW_CONTROL_A_INSTANCE_ITERATE_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_VERTEX_INDEX 10:10 +#define NVCB97_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_VERTEX_INDEX_FALSE 0x00000000 +#define NVCB97_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_VERTEX_INDEX_TRUE 0x00000001 +#define NVCB97_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_INSTANCE_INDEX 11:11 +#define NVCB97_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_INSTANCE_INDEX_FALSE 0x00000000 +#define NVCB97_SET_DRAW_CONTROL_A_IGNORE_GLOBAL_BASE_INSTANCE_INDEX_TRUE 0x00000001 + +#define NVCB97_SET_DRAW_CONTROL_B 0x0264 +#define NVCB97_SET_DRAW_CONTROL_B_INSTANCE_COUNT 31:0 + +#define NVCB97_DRAW_INDEX_BUFFER_BEGIN_END_A 0x0268 +#define NVCB97_DRAW_INDEX_BUFFER_BEGIN_END_A_FIRST 31:0 + +#define NVCB97_DRAW_INDEX_BUFFER_BEGIN_END_B 0x026c +#define NVCB97_DRAW_INDEX_BUFFER_BEGIN_END_B_COUNT 31:0 + +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_A 0x0270 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_A_START 31:0 + +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_B 0x0274 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_B_COUNT 31:0 + +#define NVCB97_INVALIDATE_RASTER_CACHE_NO_WFI 0x027c +#define NVCB97_INVALIDATE_RASTER_CACHE_NO_WFI_V 0:0 + +#define NVCB97_SET_COLOR_RENDER_TO_ZETA_SURFACE 0x02b8 +#define NVCB97_SET_COLOR_RENDER_TO_ZETA_SURFACE_V 0:0 +#define NVCB97_SET_COLOR_RENDER_TO_ZETA_SURFACE_V_FALSE 0x00000000 +#define NVCB97_SET_COLOR_RENDER_TO_ZETA_SURFACE_V_TRUE 0x00000001 + +#define NVCB97_SET_ZCULL_VISIBLE_PRIM_OPTIMIZATION 0x02bc +#define NVCB97_SET_ZCULL_VISIBLE_PRIM_OPTIMIZATION_V 0:0 +#define NVCB97_SET_ZCULL_VISIBLE_PRIM_OPTIMIZATION_V_FALSE 0x00000000 +#define NVCB97_SET_ZCULL_VISIBLE_PRIM_OPTIMIZATION_V_TRUE 0x00000001 + +#define NVCB97_INCREMENT_SYNC_POINT 0x02c8 +#define NVCB97_INCREMENT_SYNC_POINT_INDEX 11:0 +#define NVCB97_INCREMENT_SYNC_POINT_CLEAN_L2 16:16 +#define NVCB97_INCREMENT_SYNC_POINT_CLEAN_L2_FALSE 0x00000000 +#define NVCB97_INCREMENT_SYNC_POINT_CLEAN_L2_TRUE 0x00000001 +#define NVCB97_INCREMENT_SYNC_POINT_CONDITION 20:20 +#define NVCB97_INCREMENT_SYNC_POINT_CONDITION_STREAM_OUT_WRITES_DONE 0x00000000 +#define NVCB97_INCREMENT_SYNC_POINT_CONDITION_ROP_WRITES_DONE 0x00000001 + +#define NVCB97_SET_ROOT_TABLE_PREFETCH 0x02d0 +#define NVCB97_SET_ROOT_TABLE_PREFETCH_STAGE_ENABLES 5:0 + +#define NVCB97_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE 0x02d4 +#define NVCB97_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE_V 0:0 + +#define NVCB97_SET_SURFACE_CLIP_ID_BLOCK_SIZE 0x02d8 +#define NVCB97_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH 3:0 +#define NVCB97_SET_SURFACE_CLIP_ID_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVCB97_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT 7:4 +#define NVCB97_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVCB97_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVCB97_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVCB97_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVCB97_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVCB97_SET_SURFACE_CLIP_ID_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVCB97_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH 11:8 +#define NVCB97_SET_SURFACE_CLIP_ID_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NVCB97_SET_ALPHA_CIRCULAR_BUFFER_SIZE 0x02dc +#define NVCB97_SET_ALPHA_CIRCULAR_BUFFER_SIZE_CACHE_LINES_PER_SM 13:0 + +#define NVCB97_DECOMPRESS_SURFACE 0x02e0 +#define NVCB97_DECOMPRESS_SURFACE_MRT_SELECT 2:0 +#define NVCB97_DECOMPRESS_SURFACE_RT_ARRAY_INDEX 19:4 + +#define NVCB97_SET_ZCULL_ROP_BYPASS 0x02e4 +#define NVCB97_SET_ZCULL_ROP_BYPASS_ENABLE 0:0 +#define NVCB97_SET_ZCULL_ROP_BYPASS_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_ZCULL_ROP_BYPASS_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_ZCULL_ROP_BYPASS_NO_STALL 4:4 +#define NVCB97_SET_ZCULL_ROP_BYPASS_NO_STALL_FALSE 0x00000000 +#define NVCB97_SET_ZCULL_ROP_BYPASS_NO_STALL_TRUE 0x00000001 +#define NVCB97_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING 8:8 +#define NVCB97_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING_FALSE 0x00000000 +#define NVCB97_SET_ZCULL_ROP_BYPASS_CULL_EVERYTHING_TRUE 0x00000001 +#define NVCB97_SET_ZCULL_ROP_BYPASS_THRESHOLD 15:12 + +#define NVCB97_SET_ZCULL_SUBREGION 0x02e8 +#define NVCB97_SET_ZCULL_SUBREGION_ENABLE 0:0 +#define NVCB97_SET_ZCULL_SUBREGION_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_ZCULL_SUBREGION_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_ZCULL_SUBREGION_NORMALIZED_ALIQUOTS 27:4 + +#define NVCB97_SET_RASTER_BOUNDING_BOX 0x02ec +#define NVCB97_SET_RASTER_BOUNDING_BOX_MODE 0:0 +#define NVCB97_SET_RASTER_BOUNDING_BOX_MODE_BOUNDING_BOX 0x00000000 +#define NVCB97_SET_RASTER_BOUNDING_BOX_MODE_FULL_VIEWPORT 0x00000001 +#define NVCB97_SET_RASTER_BOUNDING_BOX_PAD 11:4 + +#define NVCB97_PEER_SEMAPHORE_RELEASE 0x02f0 +#define NVCB97_PEER_SEMAPHORE_RELEASE_V 31:0 + +#define NVCB97_SET_ITERATED_BLEND_OPTIMIZATION 0x02f4 +#define NVCB97_SET_ITERATED_BLEND_OPTIMIZATION_NOOP 1:0 +#define NVCB97_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_NEVER 0x00000000 +#define NVCB97_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_RGBA_0000 0x00000001 +#define NVCB97_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_ALPHA_0 0x00000002 +#define NVCB97_SET_ITERATED_BLEND_OPTIMIZATION_NOOP_SOURCE_RGBA_0001 0x00000003 + +#define NVCB97_SET_ZCULL_SUBREGION_ALLOCATION 0x02f8 +#define NVCB97_SET_ZCULL_SUBREGION_ALLOCATION_SUBREGION_ID 7:0 +#define NVCB97_SET_ZCULL_SUBREGION_ALLOCATION_ALIQUOTS 23:8 +#define NVCB97_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT 27:24 +#define NVCB97_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16X2_4X4 0x00000000 +#define NVCB97_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X16_4X4 0x00000001 +#define NVCB97_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_4X2 0x00000002 +#define NVCB97_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_2X4 0x00000003 +#define NVCB97_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X8_4X4 0x00000004 +#define NVCB97_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_8X8_4X2 0x00000005 +#define NVCB97_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_8X8_2X4 0x00000006 +#define NVCB97_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_16X16_4X8 0x00000007 +#define NVCB97_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_4X8_2X2 0x00000008 +#define NVCB97_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X8_4X2 0x00000009 +#define NVCB97_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_16X8_2X4 0x0000000A +#define NVCB97_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_ZS_8X8_2X2 0x0000000B +#define NVCB97_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_Z_4X8_1X1 0x0000000C +#define NVCB97_SET_ZCULL_SUBREGION_ALLOCATION_FORMAT_NONE 0x0000000F + +#define NVCB97_ASSIGN_ZCULL_SUBREGIONS 0x02fc +#define NVCB97_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM 1:0 +#define NVCB97_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM_Static 0x00000000 +#define NVCB97_ASSIGN_ZCULL_SUBREGIONS_ALGORITHM_Adaptive 0x00000001 + +#define NVCB97_SET_PS_OUTPUT_SAMPLE_MASK_USAGE 0x0300 +#define NVCB97_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE 0:0 +#define NVCB97_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE 1:1 +#define NVCB97_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NVCB97_SET_PS_OUTPUT_SAMPLE_MASK_USAGE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 + +#define NVCB97_DRAW_ZERO_INDEX 0x0304 +#define NVCB97_DRAW_ZERO_INDEX_COUNT 31:0 + +#define NVCB97_SET_L1_CONFIGURATION 0x0308 +#define NVCB97_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY 2:0 +#define NVCB97_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_16KB 0x00000001 +#define NVCB97_SET_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_48KB 0x00000003 + +#define NVCB97_SET_RENDER_ENABLE_CONTROL 0x030c +#define NVCB97_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER 0:0 +#define NVCB97_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_FALSE 0x00000000 +#define NVCB97_SET_RENDER_ENABLE_CONTROL_CONDITIONAL_LOAD_CONSTANT_BUFFER_TRUE 0x00000001 + +#define NVCB97_SET_SPA_VERSION 0x0310 +#define NVCB97_SET_SPA_VERSION_MINOR 7:0 +#define NVCB97_SET_SPA_VERSION_MAJOR 15:8 + +#define NVCB97_SET_TIMESLICE_BATCH_LIMIT 0x0314 +#define NVCB97_SET_TIMESLICE_BATCH_LIMIT_BATCH_LIMIT 15:0 + +#define NVCB97_SET_SNAP_GRID_LINE 0x0318 +#define NVCB97_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NVCB97_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NVCB97_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NVCB97_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NVCB97_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NVCB97_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NVCB97_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NVCB97_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NVCB97_SET_SNAP_GRID_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NVCB97_SET_SNAP_GRID_LINE_ROUNDING_MODE 8:8 +#define NVCB97_SET_SNAP_GRID_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NVCB97_SET_SNAP_GRID_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NVCB97_SET_SNAP_GRID_NON_LINE 0x031c +#define NVCB97_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL 3:0 +#define NVCB97_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__2X2 0x00000001 +#define NVCB97_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__4X4 0x00000002 +#define NVCB97_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__8X8 0x00000003 +#define NVCB97_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__16X16 0x00000004 +#define NVCB97_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__32X32 0x00000005 +#define NVCB97_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__64X64 0x00000006 +#define NVCB97_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__128X128 0x00000007 +#define NVCB97_SET_SNAP_GRID_NON_LINE_LOCATIONS_PER_PIXEL__256X256 0x00000008 +#define NVCB97_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE 8:8 +#define NVCB97_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_RTNE 0x00000000 +#define NVCB97_SET_SNAP_GRID_NON_LINE_ROUNDING_MODE_TESLA 0x00000001 + +#define NVCB97_SET_TESSELLATION_PARAMETERS 0x0320 +#define NVCB97_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE 1:0 +#define NVCB97_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_ISOLINE 0x00000000 +#define NVCB97_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_TRIANGLE 0x00000001 +#define NVCB97_SET_TESSELLATION_PARAMETERS_DOMAIN_TYPE_QUAD 0x00000002 +#define NVCB97_SET_TESSELLATION_PARAMETERS_SPACING 5:4 +#define NVCB97_SET_TESSELLATION_PARAMETERS_SPACING_INTEGER 0x00000000 +#define NVCB97_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_ODD 0x00000001 +#define NVCB97_SET_TESSELLATION_PARAMETERS_SPACING_FRACTIONAL_EVEN 0x00000002 +#define NVCB97_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES 9:8 +#define NVCB97_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_POINTS 0x00000000 +#define NVCB97_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_LINES 0x00000001 +#define NVCB97_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CW 0x00000002 +#define NVCB97_SET_TESSELLATION_PARAMETERS_OUTPUT_PRIMITIVES_TRIANGLES_CCW 0x00000003 + +#define NVCB97_SET_TESSELLATION_LOD_U0_OR_DENSITY 0x0324 +#define NVCB97_SET_TESSELLATION_LOD_U0_OR_DENSITY_V 31:0 + +#define NVCB97_SET_TESSELLATION_LOD_V0_OR_DETAIL 0x0328 +#define NVCB97_SET_TESSELLATION_LOD_V0_OR_DETAIL_V 31:0 + +#define NVCB97_SET_TESSELLATION_LOD_U1_OR_W0 0x032c +#define NVCB97_SET_TESSELLATION_LOD_U1_OR_W0_V 31:0 + +#define NVCB97_SET_TESSELLATION_LOD_V1 0x0330 +#define NVCB97_SET_TESSELLATION_LOD_V1_V 31:0 + +#define NVCB97_SET_TG_LOD_INTERIOR_U 0x0334 +#define NVCB97_SET_TG_LOD_INTERIOR_U_V 31:0 + +#define NVCB97_SET_TG_LOD_INTERIOR_V 0x0338 +#define NVCB97_SET_TG_LOD_INTERIOR_V_V 31:0 + +#define NVCB97_RESERVED_TG07 0x033c +#define NVCB97_RESERVED_TG07_V 0:0 + +#define NVCB97_RESERVED_TG08 0x0340 +#define NVCB97_RESERVED_TG08_V 0:0 + +#define NVCB97_RESERVED_TG09 0x0344 +#define NVCB97_RESERVED_TG09_V 0:0 + +#define NVCB97_RESERVED_TG10 0x0348 +#define NVCB97_RESERVED_TG10_V 0:0 + +#define NVCB97_RESERVED_TG11 0x034c +#define NVCB97_RESERVED_TG11_V 0:0 + +#define NVCB97_RESERVED_TG12 0x0350 +#define NVCB97_RESERVED_TG12_V 0:0 + +#define NVCB97_RESERVED_TG13 0x0354 +#define NVCB97_RESERVED_TG13_V 0:0 + +#define NVCB97_RESERVED_TG14 0x0358 +#define NVCB97_RESERVED_TG14_V 0:0 + +#define NVCB97_RESERVED_TG15 0x035c +#define NVCB97_RESERVED_TG15_V 0:0 + +#define NVCB97_SET_SUBTILING_PERF_KNOB_A 0x0360 +#define NVCB97_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_REGISTER_FILE_PER_SUBTILE 7:0 +#define NVCB97_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_PIXEL_OUTPUT_BUFFER_PER_SUBTILE 15:8 +#define NVCB97_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_SPM_TRIANGLE_RAM_PER_SUBTILE 23:16 +#define NVCB97_SET_SUBTILING_PERF_KNOB_A_FRACTION_OF_MAX_QUADS_PER_SUBTILE 31:24 + +#define NVCB97_SET_SUBTILING_PERF_KNOB_B 0x0364 +#define NVCB97_SET_SUBTILING_PERF_KNOB_B_FRACTION_OF_MAX_PRIMITIVES_PER_SUBTILE 7:0 + +#define NVCB97_SET_SUBTILING_PERF_KNOB_C 0x0368 +#define NVCB97_SET_SUBTILING_PERF_KNOB_C_RESERVED 0:0 + +#define NVCB97_SET_ZCULL_SUBREGION_TO_REPORT 0x036c +#define NVCB97_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE 0:0 +#define NVCB97_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_ZCULL_SUBREGION_TO_REPORT_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_ZCULL_SUBREGION_TO_REPORT_SUBREGION_ID 11:4 + +#define NVCB97_SET_ZCULL_SUBREGION_REPORT_TYPE 0x0370 +#define NVCB97_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE 0:0 +#define NVCB97_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_ZCULL_SUBREGION_REPORT_TYPE_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE 6:4 +#define NVCB97_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST 0x00000000 +#define NVCB97_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST_NO_ACCEPT 0x00000001 +#define NVCB97_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_DEPTH_TEST_LATE_Z 0x00000002 +#define NVCB97_SET_ZCULL_SUBREGION_REPORT_TYPE_TYPE_STENCIL_TEST 0x00000003 + +#define NVCB97_SET_BALANCED_PRIMITIVE_WORKLOAD 0x0374 +#define NVCB97_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE 0:0 +#define NVCB97_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE_FALSE 0x00000000 +#define NVCB97_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_UNPARTITIONED_MODE_TRUE 0x00000001 +#define NVCB97_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE 4:4 +#define NVCB97_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE_FALSE 0x00000000 +#define NVCB97_SET_BALANCED_PRIMITIVE_WORKLOAD_IN_TIMESLICED_MODE_TRUE 0x00000001 +#define NVCB97_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_UNPARTITIONED_MODE 8:8 +#define NVCB97_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_UNPARTITIONED_MODE_FALSE 0x00000000 +#define NVCB97_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_UNPARTITIONED_MODE_TRUE 0x00000001 +#define NVCB97_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_TIMESLICED_MODE 9:9 +#define NVCB97_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_TIMESLICED_MODE_FALSE 0x00000000 +#define NVCB97_SET_BALANCED_PRIMITIVE_WORKLOAD_BY_PES_IN_TIMESLICED_MODE_TRUE 0x00000001 + +#define NVCB97_SET_MAX_PATCHES_PER_BATCH 0x0378 +#define NVCB97_SET_MAX_PATCHES_PER_BATCH_V 5:0 + +#define NVCB97_SET_RASTER_ENABLE 0x037c +#define NVCB97_SET_RASTER_ENABLE_V 0:0 +#define NVCB97_SET_RASTER_ENABLE_V_FALSE 0x00000000 +#define NVCB97_SET_RASTER_ENABLE_V_TRUE 0x00000001 + +#define NVCB97_SET_STREAM_OUT_BUFFER_ENABLE(j) (0x0380+(j)*32) +#define NVCB97_SET_STREAM_OUT_BUFFER_ENABLE_V 0:0 +#define NVCB97_SET_STREAM_OUT_BUFFER_ENABLE_V_FALSE 0x00000000 +#define NVCB97_SET_STREAM_OUT_BUFFER_ENABLE_V_TRUE 0x00000001 + +#define NVCB97_SET_STREAM_OUT_BUFFER_ADDRESS_A(j) (0x0384+(j)*32) +#define NVCB97_SET_STREAM_OUT_BUFFER_ADDRESS_A_UPPER 7:0 + +#define NVCB97_SET_STREAM_OUT_BUFFER_ADDRESS_B(j) (0x0388+(j)*32) +#define NVCB97_SET_STREAM_OUT_BUFFER_ADDRESS_B_LOWER 31:0 + +#define NVCB97_SET_STREAM_OUT_BUFFER_SIZE(j) (0x038c+(j)*32) +#define NVCB97_SET_STREAM_OUT_BUFFER_SIZE_BYTES 31:0 + +#define NVCB97_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER(j) (0x0390+(j)*32) +#define NVCB97_SET_STREAM_OUT_BUFFER_LOAD_WRITE_POINTER_START_OFFSET 31:0 + +#define NVCB97_SET_POSITION_W_SCALED_OFFSET_SCALE_A(j) (0x0400+(j)*16) +#define NVCB97_SET_POSITION_W_SCALED_OFFSET_SCALE_A_V 31:0 + +#define NVCB97_SET_POSITION_W_SCALED_OFFSET_SCALE_B(j) (0x0404+(j)*16) +#define NVCB97_SET_POSITION_W_SCALED_OFFSET_SCALE_B_V 31:0 + +#define NVCB97_SET_POSITION_W_SCALED_OFFSET_RESERVED_A(j) (0x0408+(j)*16) +#define NVCB97_SET_POSITION_W_SCALED_OFFSET_RESERVED_A_V 31:0 + +#define NVCB97_SET_POSITION_W_SCALED_OFFSET_RESERVED_B(j) (0x040c+(j)*16) +#define NVCB97_SET_POSITION_W_SCALED_OFFSET_RESERVED_B_V 31:0 + +#define NVCB97_SET_Z_ROP_SLICE_MAP 0x0500 +#define NVCB97_SET_Z_ROP_SLICE_MAP_VIRTUAL_ADDRESS_MASK 31:0 + +#define NVCB97_SET_ROOT_TABLE_SELECTOR 0x0504 +#define NVCB97_SET_ROOT_TABLE_SELECTOR_ROOT_TABLE 2:0 +#define NVCB97_SET_ROOT_TABLE_SELECTOR_OFFSET 15:8 + +#define NVCB97_LOAD_ROOT_TABLE 0x0508 +#define NVCB97_LOAD_ROOT_TABLE_V 31:0 + +#define NVCB97_SET_MME_MEM_ADDRESS_A 0x0550 +#define NVCB97_SET_MME_MEM_ADDRESS_A_UPPER 24:0 + +#define NVCB97_SET_MME_MEM_ADDRESS_B 0x0554 +#define NVCB97_SET_MME_MEM_ADDRESS_B_LOWER 31:0 + +#define NVCB97_SET_MME_DATA_RAM_ADDRESS 0x0558 +#define NVCB97_SET_MME_DATA_RAM_ADDRESS_WORD 31:0 + +#define NVCB97_MME_DMA_READ 0x055c +#define NVCB97_MME_DMA_READ_LENGTH 31:0 + +#define NVCB97_MME_DMA_READ_FIFOED 0x0560 +#define NVCB97_MME_DMA_READ_FIFOED_LENGTH 31:0 + +#define NVCB97_MME_DMA_WRITE 0x0564 +#define NVCB97_MME_DMA_WRITE_LENGTH 31:0 + +#define NVCB97_MME_DMA_REDUCTION 0x0568 +#define NVCB97_MME_DMA_REDUCTION_REDUCTION_OP 2:0 +#define NVCB97_MME_DMA_REDUCTION_REDUCTION_OP_RED_ADD 0x00000000 +#define NVCB97_MME_DMA_REDUCTION_REDUCTION_OP_RED_MIN 0x00000001 +#define NVCB97_MME_DMA_REDUCTION_REDUCTION_OP_RED_MAX 0x00000002 +#define NVCB97_MME_DMA_REDUCTION_REDUCTION_OP_RED_INC 0x00000003 +#define NVCB97_MME_DMA_REDUCTION_REDUCTION_OP_RED_DEC 0x00000004 +#define NVCB97_MME_DMA_REDUCTION_REDUCTION_OP_RED_AND 0x00000005 +#define NVCB97_MME_DMA_REDUCTION_REDUCTION_OP_RED_OR 0x00000006 +#define NVCB97_MME_DMA_REDUCTION_REDUCTION_OP_RED_XOR 0x00000007 +#define NVCB97_MME_DMA_REDUCTION_REDUCTION_FORMAT 5:4 +#define NVCB97_MME_DMA_REDUCTION_REDUCTION_FORMAT_UNSIGNED 0x00000000 +#define NVCB97_MME_DMA_REDUCTION_REDUCTION_FORMAT_SIGNED 0x00000001 +#define NVCB97_MME_DMA_REDUCTION_REDUCTION_SIZE 8:8 +#define NVCB97_MME_DMA_REDUCTION_REDUCTION_SIZE_FOUR_BYTES 0x00000000 +#define NVCB97_MME_DMA_REDUCTION_REDUCTION_SIZE_EIGHT_BYTES 0x00000001 + +#define NVCB97_MME_DMA_SYSMEMBAR 0x056c +#define NVCB97_MME_DMA_SYSMEMBAR_V 0:0 + +#define NVCB97_MME_DMA_SYNC 0x0570 +#define NVCB97_MME_DMA_SYNC_VALUE 31:0 + +#define NVCB97_SET_MME_DATA_FIFO_CONFIG 0x0574 +#define NVCB97_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE 2:0 +#define NVCB97_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_0KB 0x00000000 +#define NVCB97_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_4KB 0x00000001 +#define NVCB97_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_8KB 0x00000002 +#define NVCB97_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_12KB 0x00000003 +#define NVCB97_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_16KB 0x00000004 + +#define NVCB97_SET_VERTEX_STREAM_SIZE_A(j) (0x0600+(j)*8) +#define NVCB97_SET_VERTEX_STREAM_SIZE_A_UPPER 7:0 + +#define NVCB97_SET_VERTEX_STREAM_SIZE_B(j) (0x0604+(j)*8) +#define NVCB97_SET_VERTEX_STREAM_SIZE_B_LOWER 31:0 + +#define NVCB97_SET_STREAM_OUT_CONTROL_STREAM(j) (0x0700+(j)*16) +#define NVCB97_SET_STREAM_OUT_CONTROL_STREAM_SELECT 1:0 + +#define NVCB97_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT(j) (0x0704+(j)*16) +#define NVCB97_SET_STREAM_OUT_CONTROL_COMPONENT_COUNT_MAX 7:0 + +#define NVCB97_SET_STREAM_OUT_CONTROL_STRIDE(j) (0x0708+(j)*16) +#define NVCB97_SET_STREAM_OUT_CONTROL_STRIDE_BYTES 31:0 + +#define NVCB97_SET_RASTER_INPUT 0x0740 +#define NVCB97_SET_RASTER_INPUT_STREAM_SELECT 1:0 + +#define NVCB97_SET_STREAM_OUTPUT 0x0744 +#define NVCB97_SET_STREAM_OUTPUT_ENABLE 0:0 +#define NVCB97_SET_STREAM_OUTPUT_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_STREAM_OUTPUT_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE 0x0748 +#define NVCB97_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE 0:0 +#define NVCB97_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_DA_PRIMITIVE_RESTART_TOPOLOGY_CHANGE_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_HYBRID_ANTI_ALIAS_CONTROL 0x0754 +#define NVCB97_SET_HYBRID_ANTI_ALIAS_CONTROL_PASSES 3:0 +#define NVCB97_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID 4:4 +#define NVCB97_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_FRAGMENT 0x00000000 +#define NVCB97_SET_HYBRID_ANTI_ALIAS_CONTROL_CENTROID_PER_PASS 0x00000001 +#define NVCB97_SET_HYBRID_ANTI_ALIAS_CONTROL_PASSES_EXTENDED 5:5 + +#define NVCB97_SET_SHADER_LOCAL_MEMORY_WINDOW 0x077c +#define NVCB97_SET_SHADER_LOCAL_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NVCB97_SET_SHADER_LOCAL_MEMORY_A 0x0790 +#define NVCB97_SET_SHADER_LOCAL_MEMORY_A_ADDRESS_UPPER 7:0 + +#define NVCB97_SET_SHADER_LOCAL_MEMORY_B 0x0794 +#define NVCB97_SET_SHADER_LOCAL_MEMORY_B_ADDRESS_LOWER 31:0 + +#define NVCB97_SET_SHADER_LOCAL_MEMORY_C 0x0798 +#define NVCB97_SET_SHADER_LOCAL_MEMORY_C_SIZE_UPPER 6:0 + +#define NVCB97_SET_SHADER_LOCAL_MEMORY_D 0x079c +#define NVCB97_SET_SHADER_LOCAL_MEMORY_D_SIZE_LOWER 31:0 + +#define NVCB97_SET_SHADER_LOCAL_MEMORY_E 0x07a0 +#define NVCB97_SET_SHADER_LOCAL_MEMORY_E_DEFAULT_SIZE_PER_WARP 25:0 + +#define NVCB97_SET_COLOR_ZERO_BANDWIDTH_CLEAR 0x07a4 +#define NVCB97_SET_COLOR_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVCB97_SET_Z_ZERO_BANDWIDTH_CLEAR 0x07a8 +#define NVCB97_SET_Z_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVCB97_SET_TEXTURE_HEADER_VERSION 0x07ac +#define NVCB97_SET_TEXTURE_HEADER_VERSION_MAJOR 7:0 + +#define NVCB97_SET_STENCIL_ZERO_BANDWIDTH_CLEAR 0x07b0 +#define NVCB97_SET_STENCIL_ZERO_BANDWIDTH_CLEAR_SLOT_DISABLE_MASK 14:0 + +#define NVCB97_SET_ZCULL_REGION_SIZE_A 0x07c0 +#define NVCB97_SET_ZCULL_REGION_SIZE_A_WIDTH 15:0 + +#define NVCB97_SET_ZCULL_REGION_SIZE_B 0x07c4 +#define NVCB97_SET_ZCULL_REGION_SIZE_B_HEIGHT 15:0 + +#define NVCB97_SET_ZCULL_REGION_SIZE_C 0x07c8 +#define NVCB97_SET_ZCULL_REGION_SIZE_C_DEPTH 15:0 + +#define NVCB97_SET_ZCULL_REGION_PIXEL_OFFSET_C 0x07cc +#define NVCB97_SET_ZCULL_REGION_PIXEL_OFFSET_C_DEPTH 15:0 + +#define NVCB97_SET_CULL_BEFORE_FETCH 0x07dc +#define NVCB97_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE 0:0 +#define NVCB97_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_FALSE 0x00000000 +#define NVCB97_SET_CULL_BEFORE_FETCH_FETCH_STREAMS_ONCE_TRUE 0x00000001 + +#define NVCB97_SET_ZCULL_REGION_LOCATION 0x07e0 +#define NVCB97_SET_ZCULL_REGION_LOCATION_START_ALIQUOT 15:0 +#define NVCB97_SET_ZCULL_REGION_LOCATION_ALIQUOT_COUNT 31:16 + +#define NVCB97_SET_ZCULL_REGION_ALIQUOTS 0x07e4 +#define NVCB97_SET_ZCULL_REGION_ALIQUOTS_PER_LAYER 15:0 + +#define NVCB97_SET_ZCULL_STORAGE_A 0x07e8 +#define NVCB97_SET_ZCULL_STORAGE_A_ADDRESS_UPPER 7:0 + +#define NVCB97_SET_ZCULL_STORAGE_B 0x07ec +#define NVCB97_SET_ZCULL_STORAGE_B_ADDRESS_LOWER 31:0 + +#define NVCB97_SET_ZCULL_STORAGE_C 0x07f0 +#define NVCB97_SET_ZCULL_STORAGE_C_LIMIT_ADDRESS_UPPER 7:0 + +#define NVCB97_SET_ZCULL_STORAGE_D 0x07f4 +#define NVCB97_SET_ZCULL_STORAGE_D_LIMIT_ADDRESS_LOWER 31:0 + +#define NVCB97_SET_ZT_READ_ONLY 0x07f8 +#define NVCB97_SET_ZT_READ_ONLY_ENABLE_Z 0:0 +#define NVCB97_SET_ZT_READ_ONLY_ENABLE_Z_FALSE 0x00000000 +#define NVCB97_SET_ZT_READ_ONLY_ENABLE_Z_TRUE 0x00000001 +#define NVCB97_SET_ZT_READ_ONLY_ENABLE_STENCIL 4:4 +#define NVCB97_SET_ZT_READ_ONLY_ENABLE_STENCIL_FALSE 0x00000000 +#define NVCB97_SET_ZT_READ_ONLY_ENABLE_STENCIL_TRUE 0x00000001 + +#define NVCB97_THROTTLE_SM 0x07fc +#define NVCB97_THROTTLE_SM_MULTIPLY_ADD 0:0 +#define NVCB97_THROTTLE_SM_MULTIPLY_ADD_FALSE 0x00000000 +#define NVCB97_THROTTLE_SM_MULTIPLY_ADD_TRUE 0x00000001 + +#define NVCB97_SET_COLOR_TARGET_A(j) (0x0800+(j)*64) +#define NVCB97_SET_COLOR_TARGET_A_OFFSET_UPPER 7:0 + +#define NVCB97_SET_COLOR_TARGET_B(j) (0x0804+(j)*64) +#define NVCB97_SET_COLOR_TARGET_B_OFFSET_LOWER 31:0 + +#define NVCB97_SET_COLOR_TARGET_WIDTH(j) (0x0808+(j)*64) +#define NVCB97_SET_COLOR_TARGET_WIDTH_V 27:0 + +#define NVCB97_SET_COLOR_TARGET_HEIGHT(j) (0x080c+(j)*64) +#define NVCB97_SET_COLOR_TARGET_HEIGHT_V 16:0 + +#define NVCB97_SET_COLOR_TARGET_FORMAT(j) (0x0810+(j)*64) +#define NVCB97_SET_COLOR_TARGET_FORMAT_V 7:0 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_DISABLED 0x00000000 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_AF32 0x000000C0 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_AS32 0x000000C1 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_AU32 0x000000C2 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RF32_GF32_BF32_X32 0x000000C3 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RS32_GS32_BS32_X32 0x000000C4 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RU32_GU32_BU32_X32 0x000000C5 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_R16_G16_B16_A16 0x000000C6 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RN16_GN16_BN16_AN16 0x000000C7 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RS16_GS16_BS16_AS16 0x000000C8 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RU16_GU16_BU16_AU16 0x000000C9 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_AF16 0x000000CA +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RF32_GF32 0x000000CB +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RS32_GS32 0x000000CC +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RU32_GU32 0x000000CD +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RF16_GF16_BF16_X16 0x000000CE +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_A8R8G8B8 0x000000CF +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_A8RL8GL8BL8 0x000000D0 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_A2B10G10R10 0x000000D1 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_AU2BU10GU10RU10 0x000000D2 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_A8B8G8R8 0x000000D5 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_A8BL8GL8RL8 0x000000D6 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_AN8BN8GN8RN8 0x000000D7 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_AS8BS8GS8RS8 0x000000D8 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_AU8BU8GU8RU8 0x000000D9 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_R16_G16 0x000000DA +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RN16_GN16 0x000000DB +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RS16_GS16 0x000000DC +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RU16_GU16 0x000000DD +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RF16_GF16 0x000000DE +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_A2R10G10B10 0x000000DF +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_BF10GF11RF11 0x000000E0 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RS32 0x000000E3 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RU32 0x000000E4 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RF32 0x000000E5 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_X8R8G8B8 0x000000E6 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_X8RL8GL8BL8 0x000000E7 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_R5G6B5 0x000000E8 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_A1R5G5B5 0x000000E9 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_G8R8 0x000000EA +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_GN8RN8 0x000000EB +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_GS8RS8 0x000000EC +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_GU8RU8 0x000000ED +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_R16 0x000000EE +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RN16 0x000000EF +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RS16 0x000000F0 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RU16 0x000000F1 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RF16 0x000000F2 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_R8 0x000000F3 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RN8 0x000000F4 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RS8 0x000000F5 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RU8 0x000000F6 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_A8 0x000000F7 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_X1R5G5B5 0x000000F8 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_X8B8G8R8 0x000000F9 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_X8BL8GL8RL8 0x000000FA +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_Z1R5G5B5 0x000000FB +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_O1R5G5B5 0x000000FC +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_Z8R8G8B8 0x000000FD +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_O8R8G8B8 0x000000FE +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_R32 0x000000FF +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_A16 0x00000040 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_AF16 0x00000041 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_AF32 0x00000042 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_A8R8 0x00000043 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_R16_A16 0x00000044 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RF16_AF16 0x00000045 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_RF32_AF32 0x00000046 +#define NVCB97_SET_COLOR_TARGET_FORMAT_V_B8G8R8A8 0x00000047 + +#define NVCB97_SET_COLOR_TARGET_MEMORY(j) (0x0814+(j)*64) +#define NVCB97_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH 3:0 +#define NVCB97_SET_COLOR_TARGET_MEMORY_BLOCK_WIDTH_ONE_GOB 0x00000000 +#define NVCB97_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT 7:4 +#define NVCB97_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_ONE_GOB 0x00000000 +#define NVCB97_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_TWO_GOBS 0x00000001 +#define NVCB97_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_FOUR_GOBS 0x00000002 +#define NVCB97_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVCB97_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVCB97_SET_COLOR_TARGET_MEMORY_BLOCK_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVCB97_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH 11:8 +#define NVCB97_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_ONE_GOB 0x00000000 +#define NVCB97_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_TWO_GOBS 0x00000001 +#define NVCB97_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_FOUR_GOBS 0x00000002 +#define NVCB97_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_EIGHT_GOBS 0x00000003 +#define NVCB97_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVCB97_SET_COLOR_TARGET_MEMORY_BLOCK_DEPTH_THIRTYTWO_GOBS 0x00000005 +#define NVCB97_SET_COLOR_TARGET_MEMORY_LAYOUT 12:12 +#define NVCB97_SET_COLOR_TARGET_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVCB97_SET_COLOR_TARGET_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVCB97_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL 16:16 +#define NVCB97_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NVCB97_SET_COLOR_TARGET_MEMORY_THIRD_DIMENSION_CONTROL_THIRD_DIMENSION_DEFINES_DEPTH_SIZE 0x00000001 + +#define NVCB97_SET_COLOR_TARGET_THIRD_DIMENSION(j) (0x0818+(j)*64) +#define NVCB97_SET_COLOR_TARGET_THIRD_DIMENSION_V 27:0 + +#define NVCB97_SET_COLOR_TARGET_ARRAY_PITCH(j) (0x081c+(j)*64) +#define NVCB97_SET_COLOR_TARGET_ARRAY_PITCH_V 31:0 + +#define NVCB97_SET_COLOR_TARGET_LAYER(j) (0x0820+(j)*64) +#define NVCB97_SET_COLOR_TARGET_LAYER_OFFSET 15:0 + +#define NVCB97_SET_COLOR_TARGET_C_ROP_SLICE_MAP(j) (0x0824+(j)*64) +#define NVCB97_SET_COLOR_TARGET_C_ROP_SLICE_MAP_VIRTUAL_ADDRESS_MASK 31:0 + +#define NVCB97_SET_VIEWPORT_SCALE_X(j) (0x0a00+(j)*32) +#define NVCB97_SET_VIEWPORT_SCALE_X_V 31:0 + +#define NVCB97_SET_VIEWPORT_SCALE_Y(j) (0x0a04+(j)*32) +#define NVCB97_SET_VIEWPORT_SCALE_Y_V 31:0 + +#define NVCB97_SET_VIEWPORT_SCALE_Z(j) (0x0a08+(j)*32) +#define NVCB97_SET_VIEWPORT_SCALE_Z_V 31:0 + +#define NVCB97_SET_VIEWPORT_OFFSET_X(j) (0x0a0c+(j)*32) +#define NVCB97_SET_VIEWPORT_OFFSET_X_V 31:0 + +#define NVCB97_SET_VIEWPORT_OFFSET_Y(j) (0x0a10+(j)*32) +#define NVCB97_SET_VIEWPORT_OFFSET_Y_V 31:0 + +#define NVCB97_SET_VIEWPORT_OFFSET_Z(j) (0x0a14+(j)*32) +#define NVCB97_SET_VIEWPORT_OFFSET_Z_V 31:0 + +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE(j) (0x0a18+(j)*32) +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_X 2:0 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_X 0x00000000 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_X 0x00000001 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_Y 0x00000002 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_Y 0x00000003 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_Z 0x00000004 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_Z 0x00000005 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_X_POS_W 0x00000006 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_X_NEG_W 0x00000007 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_Y 6:4 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_X 0x00000000 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_X 0x00000001 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_Y 0x00000002 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_Y 0x00000003 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_Z 0x00000004 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_Z 0x00000005 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_POS_W 0x00000006 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_Y_NEG_W 0x00000007 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_Z 10:8 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_X 0x00000000 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_X 0x00000001 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_Y 0x00000002 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_Y 0x00000003 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_Z 0x00000004 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_Z 0x00000005 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_POS_W 0x00000006 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_Z_NEG_W 0x00000007 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_W 14:12 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_X 0x00000000 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_X 0x00000001 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_Y 0x00000002 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_Y 0x00000003 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_Z 0x00000004 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_Z 0x00000005 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_W_POS_W 0x00000006 +#define NVCB97_SET_VIEWPORT_COORDINATE_SWIZZLE_W_NEG_W 0x00000007 + +#define NVCB97_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION(j) (0x0a1c+(j)*32) +#define NVCB97_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION_X_BITS 4:0 +#define NVCB97_SET_VIEWPORT_INCREASE_SNAP_GRID_PRECISION_Y_BITS 12:8 + +#define NVCB97_SET_VIEWPORT_CLIP_HORIZONTAL(j) (0x0c00+(j)*16) +#define NVCB97_SET_VIEWPORT_CLIP_HORIZONTAL_X0 15:0 +#define NVCB97_SET_VIEWPORT_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NVCB97_SET_VIEWPORT_CLIP_VERTICAL(j) (0x0c04+(j)*16) +#define NVCB97_SET_VIEWPORT_CLIP_VERTICAL_Y0 15:0 +#define NVCB97_SET_VIEWPORT_CLIP_VERTICAL_HEIGHT 31:16 + +#define NVCB97_SET_VIEWPORT_CLIP_MIN_Z(j) (0x0c08+(j)*16) +#define NVCB97_SET_VIEWPORT_CLIP_MIN_Z_V 31:0 + +#define NVCB97_SET_VIEWPORT_CLIP_MAX_Z(j) (0x0c0c+(j)*16) +#define NVCB97_SET_VIEWPORT_CLIP_MAX_Z_V 31:0 + +#define NVCB97_SET_WINDOW_CLIP_HORIZONTAL(j) (0x0d00+(j)*8) +#define NVCB97_SET_WINDOW_CLIP_HORIZONTAL_XMIN 15:0 +#define NVCB97_SET_WINDOW_CLIP_HORIZONTAL_XMAX 31:16 + +#define NVCB97_SET_WINDOW_CLIP_VERTICAL(j) (0x0d04+(j)*8) +#define NVCB97_SET_WINDOW_CLIP_VERTICAL_YMIN 15:0 +#define NVCB97_SET_WINDOW_CLIP_VERTICAL_YMAX 31:16 + +#define NVCB97_SET_CLIP_ID_EXTENT_X(j) (0x0d40+(j)*8) +#define NVCB97_SET_CLIP_ID_EXTENT_X_MINX 15:0 +#define NVCB97_SET_CLIP_ID_EXTENT_X_WIDTH 31:16 + +#define NVCB97_SET_CLIP_ID_EXTENT_Y(j) (0x0d44+(j)*8) +#define NVCB97_SET_CLIP_ID_EXTENT_Y_MINY 15:0 +#define NVCB97_SET_CLIP_ID_EXTENT_Y_HEIGHT 31:16 + +#define NVCB97_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK 0x0d60 +#define NVCB97_SET_MAX_STREAM_OUTPUT_GS_INSTANCES_PER_TASK_V 10:0 + +#define NVCB97_SET_API_VISIBLE_CALL_LIMIT 0x0d64 +#define NVCB97_SET_API_VISIBLE_CALL_LIMIT_V 3:0 +#define NVCB97_SET_API_VISIBLE_CALL_LIMIT_V__0 0x00000000 +#define NVCB97_SET_API_VISIBLE_CALL_LIMIT_V__1 0x00000001 +#define NVCB97_SET_API_VISIBLE_CALL_LIMIT_V__2 0x00000002 +#define NVCB97_SET_API_VISIBLE_CALL_LIMIT_V__4 0x00000003 +#define NVCB97_SET_API_VISIBLE_CALL_LIMIT_V__8 0x00000004 +#define NVCB97_SET_API_VISIBLE_CALL_LIMIT_V__16 0x00000005 +#define NVCB97_SET_API_VISIBLE_CALL_LIMIT_V__32 0x00000006 +#define NVCB97_SET_API_VISIBLE_CALL_LIMIT_V__64 0x00000007 +#define NVCB97_SET_API_VISIBLE_CALL_LIMIT_V__128 0x00000008 +#define NVCB97_SET_API_VISIBLE_CALL_LIMIT_V_NO_CHECK 0x0000000F + +#define NVCB97_SET_STATISTICS_COUNTER 0x0d68 +#define NVCB97_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE 0:0 +#define NVCB97_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_STATISTICS_COUNTER_DA_VERTICES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE 1:1 +#define NVCB97_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_STATISTICS_COUNTER_DA_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE 2:2 +#define NVCB97_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_STATISTICS_COUNTER_VS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE 3:3 +#define NVCB97_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_STATISTICS_COUNTER_GS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE 4:4 +#define NVCB97_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_STATISTICS_COUNTER_GS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE 5:5 +#define NVCB97_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE 6:6 +#define NVCB97_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_STATISTICS_COUNTER_STREAMING_PRIMITIVES_NEEDED_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE 7:7 +#define NVCB97_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_STATISTICS_COUNTER_CLIPPER_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE 8:8 +#define NVCB97_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_STATISTICS_COUNTER_CLIPPER_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE 9:9 +#define NVCB97_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_STATISTICS_COUNTER_PS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE 11:11 +#define NVCB97_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_STATISTICS_COUNTER_TI_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE 12:12 +#define NVCB97_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_STATISTICS_COUNTER_TS_INVOCATIONS_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE 13:13 +#define NVCB97_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_STATISTICS_COUNTER_TS_PRIMITIVES_GENERATED_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE 14:14 +#define NVCB97_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_STATISTICS_COUNTER_TOTAL_STREAMING_PRIMITIVES_NEEDED_SUCCEEDED_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE 10:10 +#define NVCB97_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_STATISTICS_COUNTER_VTG_PRIMITIVES_OUT_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE 15:15 +#define NVCB97_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_STATISTICS_COUNTER_ALPHA_BETA_CLOCKS_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_STATISTICS_COUNTER_SCG_CLOCKS_ENABLE 16:16 +#define NVCB97_SET_STATISTICS_COUNTER_SCG_CLOCKS_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_STATISTICS_COUNTER_SCG_CLOCKS_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_CLEAR_RECT_HORIZONTAL 0x0d6c +#define NVCB97_SET_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NVCB97_SET_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NVCB97_SET_CLEAR_RECT_VERTICAL 0x0d70 +#define NVCB97_SET_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NVCB97_SET_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NVCB97_SET_VERTEX_ARRAY_START 0x0d74 +#define NVCB97_SET_VERTEX_ARRAY_START_V 31:0 + +#define NVCB97_DRAW_VERTEX_ARRAY 0x0d78 +#define NVCB97_DRAW_VERTEX_ARRAY_COUNT 31:0 + +#define NVCB97_SET_VIEWPORT_Z_CLIP 0x0d7c +#define NVCB97_SET_VIEWPORT_Z_CLIP_RANGE 0:0 +#define NVCB97_SET_VIEWPORT_Z_CLIP_RANGE_NEGATIVE_W_TO_POSITIVE_W 0x00000000 +#define NVCB97_SET_VIEWPORT_Z_CLIP_RANGE_ZERO_TO_POSITIVE_W 0x00000001 + +#define NVCB97_SET_COLOR_CLEAR_VALUE(i) (0x0d80+(i)*4) +#define NVCB97_SET_COLOR_CLEAR_VALUE_V 31:0 + +#define NVCB97_SET_Z_CLEAR_VALUE 0x0d90 +#define NVCB97_SET_Z_CLEAR_VALUE_V 31:0 + +#define NVCB97_SET_SHADER_CACHE_CONTROL 0x0d94 +#define NVCB97_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE 0:0 +#define NVCB97_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_TRUE 0x00000001 + +#define NVCB97_FORCE_TRANSITION_TO_BETA 0x0d98 +#define NVCB97_FORCE_TRANSITION_TO_BETA_V 0:0 + +#define NVCB97_SET_REDUCE_COLOR_THRESHOLDS_ENABLE 0x0d9c +#define NVCB97_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V 0:0 +#define NVCB97_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_FALSE 0x00000000 +#define NVCB97_SET_REDUCE_COLOR_THRESHOLDS_ENABLE_V_TRUE 0x00000001 + +#define NVCB97_SET_STENCIL_CLEAR_VALUE 0x0da0 +#define NVCB97_SET_STENCIL_CLEAR_VALUE_V 7:0 + +#define NVCB97_INVALIDATE_SHADER_CACHES_NO_WFI 0x0da4 +#define NVCB97_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION 0:0 +#define NVCB97_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_FALSE 0x00000000 +#define NVCB97_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_TRUE 0x00000001 +#define NVCB97_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA 4:4 +#define NVCB97_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_FALSE 0x00000000 +#define NVCB97_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_TRUE 0x00000001 +#define NVCB97_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT 12:12 +#define NVCB97_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_FALSE 0x00000000 +#define NVCB97_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_TRUE 0x00000001 + +#define NVCB97_SET_ZCULL_SERIALIZATION 0x0da8 +#define NVCB97_SET_ZCULL_SERIALIZATION_ENABLE 0:0 +#define NVCB97_SET_ZCULL_SERIALIZATION_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_ZCULL_SERIALIZATION_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_ZCULL_SERIALIZATION_APPLIED 5:4 +#define NVCB97_SET_ZCULL_SERIALIZATION_APPLIED_ALWAYS 0x00000000 +#define NVCB97_SET_ZCULL_SERIALIZATION_APPLIED_LATE_Z 0x00000001 +#define NVCB97_SET_ZCULL_SERIALIZATION_APPLIED_OUT_OF_GAMUT_Z 0x00000002 +#define NVCB97_SET_ZCULL_SERIALIZATION_APPLIED_LATE_Z_OR_OUT_OF_GAMUT_Z 0x00000003 + +#define NVCB97_SET_FRONT_POLYGON_MODE 0x0dac +#define NVCB97_SET_FRONT_POLYGON_MODE_V 31:0 +#define NVCB97_SET_FRONT_POLYGON_MODE_V_POINT 0x00001B00 +#define NVCB97_SET_FRONT_POLYGON_MODE_V_LINE 0x00001B01 +#define NVCB97_SET_FRONT_POLYGON_MODE_V_FILL 0x00001B02 + +#define NVCB97_SET_BACK_POLYGON_MODE 0x0db0 +#define NVCB97_SET_BACK_POLYGON_MODE_V 31:0 +#define NVCB97_SET_BACK_POLYGON_MODE_V_POINT 0x00001B00 +#define NVCB97_SET_BACK_POLYGON_MODE_V_LINE 0x00001B01 +#define NVCB97_SET_BACK_POLYGON_MODE_V_FILL 0x00001B02 + +#define NVCB97_SET_POLY_SMOOTH 0x0db4 +#define NVCB97_SET_POLY_SMOOTH_ENABLE 0:0 +#define NVCB97_SET_POLY_SMOOTH_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_POLY_SMOOTH_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_ZCULL_DIR_FORMAT 0x0dbc +#define NVCB97_SET_ZCULL_DIR_FORMAT_ZDIR 15:0 +#define NVCB97_SET_ZCULL_DIR_FORMAT_ZDIR_LESS 0x00000000 +#define NVCB97_SET_ZCULL_DIR_FORMAT_ZDIR_GREATER 0x00000001 +#define NVCB97_SET_ZCULL_DIR_FORMAT_ZFORMAT 31:16 +#define NVCB97_SET_ZCULL_DIR_FORMAT_ZFORMAT_MSB 0x00000000 +#define NVCB97_SET_ZCULL_DIR_FORMAT_ZFORMAT_FP 0x00000001 +#define NVCB97_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZTRICK 0x00000002 +#define NVCB97_SET_ZCULL_DIR_FORMAT_ZFORMAT_ZF32_1 0x00000003 + +#define NVCB97_SET_POLY_OFFSET_POINT 0x0dc0 +#define NVCB97_SET_POLY_OFFSET_POINT_ENABLE 0:0 +#define NVCB97_SET_POLY_OFFSET_POINT_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_POLY_OFFSET_POINT_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_POLY_OFFSET_LINE 0x0dc4 +#define NVCB97_SET_POLY_OFFSET_LINE_ENABLE 0:0 +#define NVCB97_SET_POLY_OFFSET_LINE_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_POLY_OFFSET_LINE_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_POLY_OFFSET_FILL 0x0dc8 +#define NVCB97_SET_POLY_OFFSET_FILL_ENABLE 0:0 +#define NVCB97_SET_POLY_OFFSET_FILL_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_POLY_OFFSET_FILL_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_PATCH 0x0dcc +#define NVCB97_SET_PATCH_SIZE 7:0 + +#define NVCB97_SET_ITERATED_BLEND 0x0dd0 +#define NVCB97_SET_ITERATED_BLEND_ENABLE 0:0 +#define NVCB97_SET_ITERATED_BLEND_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_ITERATED_BLEND_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_ITERATED_BLEND_ALPHA_ENABLE 1:1 +#define NVCB97_SET_ITERATED_BLEND_ALPHA_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_ITERATED_BLEND_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_ITERATED_BLEND_PASS 0x0dd4 +#define NVCB97_SET_ITERATED_BLEND_PASS_COUNT 7:0 + +#define NVCB97_SET_ZCULL_CRITERION 0x0dd8 +#define NVCB97_SET_ZCULL_CRITERION_SFUNC 7:0 +#define NVCB97_SET_ZCULL_CRITERION_SFUNC_NEVER 0x00000000 +#define NVCB97_SET_ZCULL_CRITERION_SFUNC_LESS 0x00000001 +#define NVCB97_SET_ZCULL_CRITERION_SFUNC_EQUAL 0x00000002 +#define NVCB97_SET_ZCULL_CRITERION_SFUNC_LEQUAL 0x00000003 +#define NVCB97_SET_ZCULL_CRITERION_SFUNC_GREATER 0x00000004 +#define NVCB97_SET_ZCULL_CRITERION_SFUNC_NOTEQUAL 0x00000005 +#define NVCB97_SET_ZCULL_CRITERION_SFUNC_GEQUAL 0x00000006 +#define NVCB97_SET_ZCULL_CRITERION_SFUNC_ALWAYS 0x00000007 +#define NVCB97_SET_ZCULL_CRITERION_NO_INVALIDATE 8:8 +#define NVCB97_SET_ZCULL_CRITERION_NO_INVALIDATE_FALSE 0x00000000 +#define NVCB97_SET_ZCULL_CRITERION_NO_INVALIDATE_TRUE 0x00000001 +#define NVCB97_SET_ZCULL_CRITERION_FORCE_MATCH 9:9 +#define NVCB97_SET_ZCULL_CRITERION_FORCE_MATCH_FALSE 0x00000000 +#define NVCB97_SET_ZCULL_CRITERION_FORCE_MATCH_TRUE 0x00000001 +#define NVCB97_SET_ZCULL_CRITERION_SREF 23:16 +#define NVCB97_SET_ZCULL_CRITERION_SMASK 31:24 + +#define NVCB97_PIXEL_SHADER_BARRIER 0x0de0 +#define NVCB97_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE 0:0 +#define NVCB97_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE_FALSE 0x00000000 +#define NVCB97_PIXEL_SHADER_BARRIER_SYSMEMBAR_ENABLE_TRUE 0x00000001 +#define NVCB97_PIXEL_SHADER_BARRIER_BARRIER_LOCATION 1:1 +#define NVCB97_PIXEL_SHADER_BARRIER_BARRIER_LOCATION_BLOCK_BEFORE_PS 0x00000000 +#define NVCB97_PIXEL_SHADER_BARRIER_BARRIER_LOCATION_BLOCK_BEFORE_PS_AND_ZTEST 0x00000001 + +#define NVCB97_SET_SM_TIMEOUT_INTERVAL 0x0de4 +#define NVCB97_SET_SM_TIMEOUT_INTERVAL_COUNTER_BIT 5:0 + +#define NVCB97_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY 0x0de8 +#define NVCB97_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE 0:0 +#define NVCB97_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_DA_PRIMITIVE_RESTART_VERTEX_ARRAY_ENABLE_TRUE 0x00000001 + +#define NVCB97_MME_DMA_WRITE_METHOD_BARRIER 0x0dec +#define NVCB97_MME_DMA_WRITE_METHOD_BARRIER_V 0:0 + +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_POINTER 0x0df0 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_POINTER_V 7:0 + +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION 0x0df4 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC 2:0 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_FALSE 0x00000000 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_TRUE 0x00000001 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_EQ 0x00000002 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_NE 0x00000003 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_LT 0x00000004 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_LE 0x00000005 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_GT 0x00000006 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_TEST_CC_GE 0x00000007 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION 5:3 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_ADD_PRODUCTS 0x00000000 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_SUB_PRODUCTS 0x00000001 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_MIN 0x00000002 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_MAX 0x00000003 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_RCP 0x00000004 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_ADD 0x00000005 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERATION_SUBTRACT 0x00000006 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT 8:6 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT0 0x00000000 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT1 0x00000001 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT2 0x00000002 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT3 0x00000003 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT4 0x00000004 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT5 0x00000005 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT6 0x00000006 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_CONSTANT_INPUT_SELECT_CONSTANT7 0x00000007 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT 11:9 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_SRC_RGB 0x00000000 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_DEST_RGB 0x00000001 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_SRC_AAA 0x00000002 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_DEST_AAA 0x00000003 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP0_RGB 0x00000004 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP1_RGB 0x00000005 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_TEMP2_RGB 0x00000006 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_A_SELECT_PBR_RGB 0x00000007 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT 15:12 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ZERO 0x00000000 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE 0x00000001 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_SRC_RGB 0x00000002 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_SRC_AAA 0x00000003 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE_MINUS_SRC_AAA 0x00000004 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_DEST_RGB 0x00000005 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_DEST_AAA 0x00000006 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ONE_MINUS_DEST_AAA 0x00000007 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP0_RGB 0x00000009 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP1_RGB 0x0000000A +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_TEMP2_RGB 0x0000000B +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_PBR_RGB 0x0000000C +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_CONSTANT_RGB 0x0000000D +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_B_SELECT_ZERO_A_TIMES_B 0x0000000E +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT 18:16 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_SRC_RGB 0x00000000 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_DEST_RGB 0x00000001 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_SRC_AAA 0x00000002 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_DEST_AAA 0x00000003 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP0_RGB 0x00000004 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP1_RGB 0x00000005 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_TEMP2_RGB 0x00000006 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_C_SELECT_PBR_RGB 0x00000007 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT 22:19 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ZERO 0x00000000 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE 0x00000001 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_SRC_RGB 0x00000002 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_SRC_AAA 0x00000003 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE_MINUS_SRC_AAA 0x00000004 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_DEST_RGB 0x00000005 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_DEST_AAA 0x00000006 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ONE_MINUS_DEST_AAA 0x00000007 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP0_RGB 0x00000009 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP1_RGB 0x0000000A +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_TEMP2_RGB 0x0000000B +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_PBR_RGB 0x0000000C +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_CONSTANT_RGB 0x0000000D +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OPERAND_D_SELECT_ZERO_C_TIMES_D 0x0000000E +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE 25:23 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_RGB 0x00000000 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_GBR 0x00000001 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_RRR 0x00000002 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_GGG 0x00000003 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_BBB 0x00000004 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_SWIZZLE_R_TO_A 0x00000005 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK 27:26 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_RGB 0x00000000 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_R_ONLY 0x00000001 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_G_ONLY 0x00000002 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_OUTPUT_WRITE_MASK_B_ONLY 0x00000003 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT 29:28 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP0 0x00000000 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP1 0x00000001 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_TEMP2 0x00000002 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_PASS_OUTPUT_NONE 0x00000003 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC 31:31 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC_FALSE 0x00000000 +#define NVCB97_LOAD_ITERATED_BLEND_INSTRUCTION_SET_CC_TRUE 0x00000001 + +#define NVCB97_SET_WINDOW_OFFSET_X 0x0df8 +#define NVCB97_SET_WINDOW_OFFSET_X_V 16:0 + +#define NVCB97_SET_WINDOW_OFFSET_Y 0x0dfc +#define NVCB97_SET_WINDOW_OFFSET_Y_V 17:0 + +#define NVCB97_SET_SCISSOR_ENABLE(j) (0x0e00+(j)*16) +#define NVCB97_SET_SCISSOR_ENABLE_V 0:0 +#define NVCB97_SET_SCISSOR_ENABLE_V_FALSE 0x00000000 +#define NVCB97_SET_SCISSOR_ENABLE_V_TRUE 0x00000001 + +#define NVCB97_SET_SCISSOR_HORIZONTAL(j) (0x0e04+(j)*16) +#define NVCB97_SET_SCISSOR_HORIZONTAL_XMIN 15:0 +#define NVCB97_SET_SCISSOR_HORIZONTAL_XMAX 31:16 + +#define NVCB97_SET_SCISSOR_VERTICAL(j) (0x0e08+(j)*16) +#define NVCB97_SET_SCISSOR_VERTICAL_YMIN 15:0 +#define NVCB97_SET_SCISSOR_VERTICAL_YMAX 31:16 + +#define NVCB97_SET_VPC_PERF_KNOB 0x0f14 +#define NVCB97_SET_VPC_PERF_KNOB_CULLED_SMALL_LINES 7:0 +#define NVCB97_SET_VPC_PERF_KNOB_CULLED_SMALL_TRIANGLES 15:8 +#define NVCB97_SET_VPC_PERF_KNOB_NONCULLED_LINES_AND_POINTS 23:16 +#define NVCB97_SET_VPC_PERF_KNOB_NONCULLED_TRIANGLES 31:24 + +#define NVCB97_PM_LOCAL_TRIGGER 0x0f18 +#define NVCB97_PM_LOCAL_TRIGGER_BOOKMARK 15:0 + +#define NVCB97_SET_POST_Z_PS_IMASK 0x0f1c +#define NVCB97_SET_POST_Z_PS_IMASK_ENABLE 0:0 +#define NVCB97_SET_POST_Z_PS_IMASK_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_POST_Z_PS_IMASK_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_CONSTANT_COLOR_RENDERING 0x0f40 +#define NVCB97_SET_CONSTANT_COLOR_RENDERING_ENABLE 0:0 +#define NVCB97_SET_CONSTANT_COLOR_RENDERING_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_CONSTANT_COLOR_RENDERING_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_CONSTANT_COLOR_RENDERING_RED 0x0f44 +#define NVCB97_SET_CONSTANT_COLOR_RENDERING_RED_V 31:0 + +#define NVCB97_SET_CONSTANT_COLOR_RENDERING_GREEN 0x0f48 +#define NVCB97_SET_CONSTANT_COLOR_RENDERING_GREEN_V 31:0 + +#define NVCB97_SET_CONSTANT_COLOR_RENDERING_BLUE 0x0f4c +#define NVCB97_SET_CONSTANT_COLOR_RENDERING_BLUE_V 31:0 + +#define NVCB97_SET_CONSTANT_COLOR_RENDERING_ALPHA 0x0f50 +#define NVCB97_SET_CONSTANT_COLOR_RENDERING_ALPHA_V 31:0 + +#define NVCB97_SET_BACK_STENCIL_FUNC_REF 0x0f54 +#define NVCB97_SET_BACK_STENCIL_FUNC_REF_V 7:0 + +#define NVCB97_SET_BACK_STENCIL_MASK 0x0f58 +#define NVCB97_SET_BACK_STENCIL_MASK_V 7:0 + +#define NVCB97_SET_BACK_STENCIL_FUNC_MASK 0x0f5c +#define NVCB97_SET_BACK_STENCIL_FUNC_MASK_V 7:0 + +#define NVCB97_SET_VERTEX_STREAM_SUBSTITUTE_A 0x0f84 +#define NVCB97_SET_VERTEX_STREAM_SUBSTITUTE_A_ADDRESS_UPPER 7:0 + +#define NVCB97_SET_VERTEX_STREAM_SUBSTITUTE_B 0x0f88 +#define NVCB97_SET_VERTEX_STREAM_SUBSTITUTE_B_ADDRESS_LOWER 31:0 + +#define NVCB97_SET_LINE_MODE_POLYGON_CLIP 0x0f8c +#define NVCB97_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE 0:0 +#define NVCB97_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DRAW_LINE 0x00000000 +#define NVCB97_SET_LINE_MODE_POLYGON_CLIP_GENERATED_EDGE_DO_NOT_DRAW_LINE 0x00000001 + +#define NVCB97_SET_SINGLE_CT_WRITE_CONTROL 0x0f90 +#define NVCB97_SET_SINGLE_CT_WRITE_CONTROL_ENABLE 0:0 +#define NVCB97_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_SINGLE_CT_WRITE_CONTROL_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_VTG_WARP_WATERMARKS 0x0f98 +#define NVCB97_SET_VTG_WARP_WATERMARKS_LOW 15:0 +#define NVCB97_SET_VTG_WARP_WATERMARKS_HIGH 31:16 + +#define NVCB97_SET_DEPTH_BOUNDS_MIN 0x0f9c +#define NVCB97_SET_DEPTH_BOUNDS_MIN_V 31:0 + +#define NVCB97_SET_DEPTH_BOUNDS_MAX 0x0fa0 +#define NVCB97_SET_DEPTH_BOUNDS_MAX_V 31:0 + +#define NVCB97_SET_SAMPLE_MASK 0x0fa4 +#define NVCB97_SET_SAMPLE_MASK_RASTER_OUT_ENABLE 0:0 +#define NVCB97_SET_SAMPLE_MASK_RASTER_OUT_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_SAMPLE_MASK_RASTER_OUT_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE 4:4 +#define NVCB97_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_SAMPLE_MASK_COLOR_TARGET_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_COLOR_TARGET_SAMPLE_MASK 0x0fa8 +#define NVCB97_SET_COLOR_TARGET_SAMPLE_MASK_V 15:0 + +#define NVCB97_SET_CT_MRT_ENABLE 0x0fac +#define NVCB97_SET_CT_MRT_ENABLE_V 0:0 +#define NVCB97_SET_CT_MRT_ENABLE_V_FALSE 0x00000000 +#define NVCB97_SET_CT_MRT_ENABLE_V_TRUE 0x00000001 + +#define NVCB97_SET_NONMULTISAMPLED_Z 0x0fb0 +#define NVCB97_SET_NONMULTISAMPLED_Z_V 0:0 +#define NVCB97_SET_NONMULTISAMPLED_Z_V_PER_SAMPLE 0x00000000 +#define NVCB97_SET_NONMULTISAMPLED_Z_V_AT_PIXEL_CENTER 0x00000001 + +#define NVCB97_SET_TIR 0x0fb4 +#define NVCB97_SET_TIR_MODE 1:0 +#define NVCB97_SET_TIR_MODE_DISABLED 0x00000000 +#define NVCB97_SET_TIR_MODE_RASTER_N_TARGET_M 0x00000001 + +#define NVCB97_SET_ANTI_ALIAS_RASTER 0x0fb8 +#define NVCB97_SET_ANTI_ALIAS_RASTER_SAMPLES 2:0 +#define NVCB97_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_1X1 0x00000000 +#define NVCB97_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_2X2 0x00000002 +#define NVCB97_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_4X2_D3D 0x00000004 +#define NVCB97_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_2X1_D3D 0x00000005 +#define NVCB97_SET_ANTI_ALIAS_RASTER_SAMPLES_MODE_4X4 0x00000006 + +#define NVCB97_SET_SAMPLE_MASK_X0_Y0 0x0fbc +#define NVCB97_SET_SAMPLE_MASK_X0_Y0_V 15:0 + +#define NVCB97_SET_SAMPLE_MASK_X1_Y0 0x0fc0 +#define NVCB97_SET_SAMPLE_MASK_X1_Y0_V 15:0 + +#define NVCB97_SET_SAMPLE_MASK_X0_Y1 0x0fc4 +#define NVCB97_SET_SAMPLE_MASK_X0_Y1_V 15:0 + +#define NVCB97_SET_SAMPLE_MASK_X1_Y1 0x0fc8 +#define NVCB97_SET_SAMPLE_MASK_X1_Y1_V 15:0 + +#define NVCB97_SET_SURFACE_CLIP_ID_MEMORY_A 0x0fcc +#define NVCB97_SET_SURFACE_CLIP_ID_MEMORY_A_OFFSET_UPPER 7:0 + +#define NVCB97_SET_SURFACE_CLIP_ID_MEMORY_B 0x0fd0 +#define NVCB97_SET_SURFACE_CLIP_ID_MEMORY_B_OFFSET_LOWER 31:0 + +#define NVCB97_SET_TIR_MODULATION 0x0fd4 +#define NVCB97_SET_TIR_MODULATION_COMPONENT_SELECT 1:0 +#define NVCB97_SET_TIR_MODULATION_COMPONENT_SELECT_NO_MODULATION 0x00000000 +#define NVCB97_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_RGB 0x00000001 +#define NVCB97_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_ALPHA_ONLY 0x00000002 +#define NVCB97_SET_TIR_MODULATION_COMPONENT_SELECT_MODULATE_RGBA 0x00000003 + +#define NVCB97_SET_TIR_MODULATION_FUNCTION 0x0fd8 +#define NVCB97_SET_TIR_MODULATION_FUNCTION_SELECT 0:0 +#define NVCB97_SET_TIR_MODULATION_FUNCTION_SELECT_LINEAR 0x00000000 +#define NVCB97_SET_TIR_MODULATION_FUNCTION_SELECT_TABLE 0x00000001 + +#define NVCB97_SET_BLEND_OPT_CONTROL 0x0fdc +#define NVCB97_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS 0:0 +#define NVCB97_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_FALSE 0x00000000 +#define NVCB97_SET_BLEND_OPT_CONTROL_ALLOW_FLOAT_PIXEL_KILLS_TRUE 0x00000001 + +#define NVCB97_SET_ZT_A 0x0fe0 +#define NVCB97_SET_ZT_A_OFFSET_UPPER 7:0 + +#define NVCB97_SET_ZT_B 0x0fe4 +#define NVCB97_SET_ZT_B_OFFSET_LOWER 31:0 + +#define NVCB97_SET_ZT_FORMAT 0x0fe8 +#define NVCB97_SET_ZT_FORMAT_V 4:0 +#define NVCB97_SET_ZT_FORMAT_V_Z16 0x00000013 +#define NVCB97_SET_ZT_FORMAT_V_Z24S8 0x00000014 +#define NVCB97_SET_ZT_FORMAT_V_X8Z24 0x00000015 +#define NVCB97_SET_ZT_FORMAT_V_S8Z24 0x00000016 +#define NVCB97_SET_ZT_FORMAT_V_S8 0x00000017 +#define NVCB97_SET_ZT_FORMAT_V_ZF32 0x0000000A +#define NVCB97_SET_ZT_FORMAT_V_ZF32_X24S8 0x00000019 + +#define NVCB97_SET_ZT_BLOCK_SIZE 0x0fec +#define NVCB97_SET_ZT_BLOCK_SIZE_WIDTH 3:0 +#define NVCB97_SET_ZT_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVCB97_SET_ZT_BLOCK_SIZE_HEIGHT 7:4 +#define NVCB97_SET_ZT_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVCB97_SET_ZT_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVCB97_SET_ZT_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVCB97_SET_ZT_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVCB97_SET_ZT_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVCB97_SET_ZT_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVCB97_SET_ZT_BLOCK_SIZE_DEPTH 11:8 +#define NVCB97_SET_ZT_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 + +#define NVCB97_SET_ZT_ARRAY_PITCH 0x0ff0 +#define NVCB97_SET_ZT_ARRAY_PITCH_V 31:0 + +#define NVCB97_SET_SURFACE_CLIP_HORIZONTAL 0x0ff4 +#define NVCB97_SET_SURFACE_CLIP_HORIZONTAL_X 15:0 +#define NVCB97_SET_SURFACE_CLIP_HORIZONTAL_WIDTH 31:16 + +#define NVCB97_SET_SURFACE_CLIP_VERTICAL 0x0ff8 +#define NVCB97_SET_SURFACE_CLIP_VERTICAL_Y 15:0 +#define NVCB97_SET_SURFACE_CLIP_VERTICAL_HEIGHT 31:16 + +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS 0x1000 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE 0:0 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_FALSE 0x00000000 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_SYSTEM_MEMORY_VOLATILE_TRUE 0x00000001 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY 5:4 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_VAF_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVCB97_SET_VIEWPORT_MULTICAST 0x1004 +#define NVCB97_SET_VIEWPORT_MULTICAST_ORDER 0:0 +#define NVCB97_SET_VIEWPORT_MULTICAST_ORDER_VIEWPORT_ORDER 0x00000000 +#define NVCB97_SET_VIEWPORT_MULTICAST_ORDER_PRIMITIVE_ORDER 0x00000001 + +#define NVCB97_SET_TESSELLATION_CUT_HEIGHT 0x1008 +#define NVCB97_SET_TESSELLATION_CUT_HEIGHT_V 4:0 + +#define NVCB97_SET_MAX_GS_INSTANCES_PER_TASK 0x100c +#define NVCB97_SET_MAX_GS_INSTANCES_PER_TASK_V 10:0 + +#define NVCB97_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK 0x1010 +#define NVCB97_SET_MAX_GS_OUTPUT_VERTICES_PER_TASK_V 15:0 + +#define NVCB97_SET_RESERVED_SW_METHOD00 0x1014 +#define NVCB97_SET_RESERVED_SW_METHOD00_V 31:0 + +#define NVCB97_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER 0x1018 +#define NVCB97_SET_GS_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NVCB97_SET_BETA_CB_STORAGE_CONSTRAINT 0x101c +#define NVCB97_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NVCB97_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_BETA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER 0x1020 +#define NVCB97_SET_TI_OUTPUT_CB_STORAGE_MULTIPLIER_V 9:0 + +#define NVCB97_SET_ALPHA_CB_STORAGE_CONSTRAINT 0x1024 +#define NVCB97_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE 0:0 +#define NVCB97_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_ALPHA_CB_STORAGE_CONSTRAINT_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_RESERVED_SW_METHOD01 0x1028 +#define NVCB97_SET_RESERVED_SW_METHOD01_V 31:0 + +#define NVCB97_SET_RESERVED_SW_METHOD02 0x102c +#define NVCB97_SET_RESERVED_SW_METHOD02_V 31:0 + +#define NVCB97_SET_TIR_MODULATION_COEFFICIENT_TABLE(i) (0x1030+(i)*4) +#define NVCB97_SET_TIR_MODULATION_COEFFICIENT_TABLE_V0 7:0 +#define NVCB97_SET_TIR_MODULATION_COEFFICIENT_TABLE_V1 15:8 +#define NVCB97_SET_TIR_MODULATION_COEFFICIENT_TABLE_V2 23:16 +#define NVCB97_SET_TIR_MODULATION_COEFFICIENT_TABLE_V3 31:24 + +#define NVCB97_SET_SPARE_NOOP01 0x1044 +#define NVCB97_SET_SPARE_NOOP01_V 31:0 + +#define NVCB97_SET_SPARE_NOOP02 0x1048 +#define NVCB97_SET_SPARE_NOOP02_V 31:0 + +#define NVCB97_SET_SPARE_NOOP03 0x104c +#define NVCB97_SET_SPARE_NOOP03_V 31:0 + +#define NVCB97_SET_SPARE_NOOP04 0x1050 +#define NVCB97_SET_SPARE_NOOP04_V 31:0 + +#define NVCB97_SET_SPARE_NOOP05 0x1054 +#define NVCB97_SET_SPARE_NOOP05_V 31:0 + +#define NVCB97_SET_SPARE_NOOP06 0x1058 +#define NVCB97_SET_SPARE_NOOP06_V 31:0 + +#define NVCB97_SET_SPARE_NOOP07 0x105c +#define NVCB97_SET_SPARE_NOOP07_V 31:0 + +#define NVCB97_SET_SPARE_NOOP08 0x1060 +#define NVCB97_SET_SPARE_NOOP08_V 31:0 + +#define NVCB97_SET_SPARE_NOOP09 0x1064 +#define NVCB97_SET_SPARE_NOOP09_V 31:0 + +#define NVCB97_SET_SPARE_NOOP10 0x1068 +#define NVCB97_SET_SPARE_NOOP10_V 31:0 + +#define NVCB97_SET_SPARE_NOOP11 0x106c +#define NVCB97_SET_SPARE_NOOP11_V 31:0 + +#define NVCB97_SET_SPARE_NOOP12 0x1070 +#define NVCB97_SET_SPARE_NOOP12_V 31:0 + +#define NVCB97_SET_SPARE_NOOP13 0x1074 +#define NVCB97_SET_SPARE_NOOP13_V 31:0 + +#define NVCB97_SET_SPARE_NOOP14 0x1078 +#define NVCB97_SET_SPARE_NOOP14_V 31:0 + +#define NVCB97_SET_SPARE_NOOP15 0x107c +#define NVCB97_SET_SPARE_NOOP15_V 31:0 + +#define NVCB97_SET_RESERVED_SW_METHOD03 0x10b0 +#define NVCB97_SET_RESERVED_SW_METHOD03_V 31:0 + +#define NVCB97_SET_RESERVED_SW_METHOD04 0x10b4 +#define NVCB97_SET_RESERVED_SW_METHOD04_V 31:0 + +#define NVCB97_SET_RESERVED_SW_METHOD05 0x10b8 +#define NVCB97_SET_RESERVED_SW_METHOD05_V 31:0 + +#define NVCB97_SET_RESERVED_SW_METHOD06 0x10bc +#define NVCB97_SET_RESERVED_SW_METHOD06_V 31:0 + +#define NVCB97_SET_RESERVED_SW_METHOD07 0x10c0 +#define NVCB97_SET_RESERVED_SW_METHOD07_V 31:0 + +#define NVCB97_SET_RESERVED_SW_METHOD08 0x10c4 +#define NVCB97_SET_RESERVED_SW_METHOD08_V 31:0 + +#define NVCB97_SET_RESERVED_SW_METHOD09 0x10c8 +#define NVCB97_SET_RESERVED_SW_METHOD09_V 31:0 + +#define NVCB97_SET_REDUCE_COLOR_THRESHOLDS_UNORM8 0x10cc +#define NVCB97_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVCB97_SET_REDUCE_COLOR_THRESHOLDS_UNORM8_ALL_COVERED 23:16 + +#define NVCB97_SET_RESERVED_SW_METHOD10 0x10d0 +#define NVCB97_SET_RESERVED_SW_METHOD10_V 31:0 + +#define NVCB97_SET_RESERVED_SW_METHOD11 0x10d4 +#define NVCB97_SET_RESERVED_SW_METHOD11_V 31:0 + +#define NVCB97_SET_RESERVED_SW_METHOD12 0x10d8 +#define NVCB97_SET_RESERVED_SW_METHOD12_V 31:0 + +#define NVCB97_SET_RESERVED_SW_METHOD13 0x10dc +#define NVCB97_SET_RESERVED_SW_METHOD13_V 31:0 + +#define NVCB97_SET_REDUCE_COLOR_THRESHOLDS_UNORM10 0x10e0 +#define NVCB97_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVCB97_SET_REDUCE_COLOR_THRESHOLDS_UNORM10_ALL_COVERED 23:16 + +#define NVCB97_SET_REDUCE_COLOR_THRESHOLDS_UNORM16 0x10e4 +#define NVCB97_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVCB97_SET_REDUCE_COLOR_THRESHOLDS_UNORM16_ALL_COVERED 23:16 + +#define NVCB97_SET_REDUCE_COLOR_THRESHOLDS_FP11 0x10e8 +#define NVCB97_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED_ALL_HIT_ONCE 5:0 +#define NVCB97_SET_REDUCE_COLOR_THRESHOLDS_FP11_ALL_COVERED 21:16 + +#define NVCB97_SET_REDUCE_COLOR_THRESHOLDS_FP16 0x10ec +#define NVCB97_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVCB97_SET_REDUCE_COLOR_THRESHOLDS_FP16_ALL_COVERED 23:16 + +#define NVCB97_SET_REDUCE_COLOR_THRESHOLDS_SRGB8 0x10f0 +#define NVCB97_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED_ALL_HIT_ONCE 7:0 +#define NVCB97_SET_REDUCE_COLOR_THRESHOLDS_SRGB8_ALL_COVERED 23:16 + +#define NVCB97_UNBIND_ALL 0x10f4 +#define NVCB97_UNBIND_ALL_CONSTANT_BUFFERS 8:8 +#define NVCB97_UNBIND_ALL_CONSTANT_BUFFERS_FALSE 0x00000000 +#define NVCB97_UNBIND_ALL_CONSTANT_BUFFERS_TRUE 0x00000001 + +#define NVCB97_SET_CLEAR_SURFACE_CONTROL 0x10f8 +#define NVCB97_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK 0:0 +#define NVCB97_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_FALSE 0x00000000 +#define NVCB97_SET_CLEAR_SURFACE_CONTROL_RESPECT_STENCIL_MASK_TRUE 0x00000001 +#define NVCB97_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT 4:4 +#define NVCB97_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_FALSE 0x00000000 +#define NVCB97_SET_CLEAR_SURFACE_CONTROL_USE_CLEAR_RECT_TRUE 0x00000001 +#define NVCB97_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0 8:8 +#define NVCB97_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_FALSE 0x00000000 +#define NVCB97_SET_CLEAR_SURFACE_CONTROL_USE_SCISSOR0_TRUE 0x00000001 +#define NVCB97_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0 12:12 +#define NVCB97_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_FALSE 0x00000000 +#define NVCB97_SET_CLEAR_SURFACE_CONTROL_USE_VIEWPORT_CLIP0_TRUE 0x00000001 + +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS 0x10fc +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVCB97_SET_RESERVED_SW_METHOD14 0x1100 +#define NVCB97_SET_RESERVED_SW_METHOD14_V 31:0 + +#define NVCB97_SET_RESERVED_SW_METHOD15 0x1104 +#define NVCB97_SET_RESERVED_SW_METHOD15_V 31:0 + +#define NVCB97_NO_OPERATION_DATA_HI 0x110c +#define NVCB97_NO_OPERATION_DATA_HI_V 31:0 + +#define NVCB97_SET_DEPTH_BIAS_CONTROL 0x1110 +#define NVCB97_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT 0:0 +#define NVCB97_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_FALSE 0x00000000 +#define NVCB97_SET_DEPTH_BIAS_CONTROL_DEPTH_FORMAT_DEPENDENT_TRUE 0x00000001 + +#define NVCB97_PM_TRIGGER_END 0x1114 +#define NVCB97_PM_TRIGGER_END_V 31:0 + +#define NVCB97_SET_VERTEX_ID_BASE 0x1118 +#define NVCB97_SET_VERTEX_ID_BASE_V 31:0 + +#define NVCB97_SET_STENCIL_COMPRESSION 0x111c +#define NVCB97_SET_STENCIL_COMPRESSION_ENABLE 0:0 +#define NVCB97_SET_STENCIL_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_STENCIL_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A(i) (0x1120+(i)*4) +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0 0:0 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1 1:1 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2 2:2 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3 3:3 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0 4:4 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1 5:5 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2 6:6 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3 7:7 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0 8:8 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1 9:9 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2 10:10 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3 11:11 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0 12:12 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1 13:13 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2 14:14 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3 15:15 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0 16:16 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1 17:17 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2 18:18 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3 19:19 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0 20:20 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1 21:21 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2 22:22 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3 23:23 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0 24:24 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1 25:25 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2 26:26 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3 27:27 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0 28:28 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1 29:29 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2 30:30 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3 31:31 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B(i) (0x1128+(i)*4) +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0 0:0 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP0_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1 1:1 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP1_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2 2:2 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP2_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3 3:3 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE0_COMP3_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0 4:4 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP0_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1 5:5 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP1_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2 6:6 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP2_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3 7:7 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE1_COMP3_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0 8:8 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP0_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1 9:9 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP1_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2 10:10 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP2_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3 11:11 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE2_COMP3_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0 12:12 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP0_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1 13:13 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP1_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2 14:14 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP2_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3 15:15 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE3_COMP3_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0 16:16 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP0_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1 17:17 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP1_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2 18:18 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP2_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3 19:19 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE4_COMP3_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0 20:20 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP0_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1 21:21 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP1_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2 22:22 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP2_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3 23:23 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE5_COMP3_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0 24:24 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP0_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1 25:25 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP1_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2 26:26 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP2_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3 27:27 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE6_COMP3_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0 28:28 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP0_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1 29:29 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP1_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2 30:30 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP2_TRUE 0x00000001 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3 31:31 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B_ATTRIBUTE7_COMP3_TRUE 0x00000001 + +#define NVCB97_SET_TIR_CONTROL 0x1130 +#define NVCB97_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES 0:0 +#define NVCB97_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES_DISABLE 0x00000000 +#define NVCB97_SET_TIR_CONTROL_Z_PASS_PIXEL_COUNT_USE_RASTER_SAMPLES_ENABLE 0x00000001 +#define NVCB97_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES 4:4 +#define NVCB97_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES_DISABLE 0x00000000 +#define NVCB97_SET_TIR_CONTROL_ALPHA_TO_COVERAGE_USE_RASTER_SAMPLES_ENABLE 0x00000001 +#define NVCB97_SET_TIR_CONTROL_REDUCE_COVERAGE 1:1 +#define NVCB97_SET_TIR_CONTROL_REDUCE_COVERAGE_DISABLE 0x00000000 +#define NVCB97_SET_TIR_CONTROL_REDUCE_COVERAGE_ENABLE 0x00000001 +#define NVCB97_SET_TIR_CONTROL_REDUCTION_MODE 2:2 +#define NVCB97_SET_TIR_CONTROL_REDUCTION_MODE_AFFINITY_MAP 0x00000000 +#define NVCB97_SET_TIR_CONTROL_REDUCTION_MODE_TRUNCATION 0x00000001 + +#define NVCB97_SET_MUTABLE_METHOD_CONTROL 0x1134 +#define NVCB97_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT 0:0 +#define NVCB97_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT_FALSE 0x00000000 +#define NVCB97_SET_MUTABLE_METHOD_CONTROL_TREAT_MUTABLE_AS_HEAVYWEIGHT_TRUE 0x00000001 + +#define NVCB97_SET_POST_PS_INITIAL_COVERAGE 0x1138 +#define NVCB97_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE 0:0 +#define NVCB97_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE_FALSE 0x00000000 +#define NVCB97_SET_POST_PS_INITIAL_COVERAGE_USE_PRE_PS_COVERAGE_TRUE 0x00000001 + +#define NVCB97_SET_FILL_VIA_TRIANGLE 0x113c +#define NVCB97_SET_FILL_VIA_TRIANGLE_MODE 1:0 +#define NVCB97_SET_FILL_VIA_TRIANGLE_MODE_DISABLED 0x00000000 +#define NVCB97_SET_FILL_VIA_TRIANGLE_MODE_FILL_ALL 0x00000001 +#define NVCB97_SET_FILL_VIA_TRIANGLE_MODE_FILL_BBOX 0x00000002 + +#define NVCB97_SET_BLEND_PER_FORMAT_ENABLE 0x1140 +#define NVCB97_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16 4:4 +#define NVCB97_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_FALSE 0x00000000 +#define NVCB97_SET_BLEND_PER_FORMAT_ENABLE_SNORM8_UNORM16_SNORM16_TRUE 0x00000001 + +#define NVCB97_FLUSH_PENDING_WRITES 0x1144 +#define NVCB97_FLUSH_PENDING_WRITES_SM_DOES_GLOBAL_STORE 0:0 + +#define NVCB97_SET_VERTEX_ATTRIBUTE_A(i) (0x1160+(i)*4) +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_STREAM 4:0 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_SOURCE 6:6 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_SOURCE_ACTIVE 0x00000000 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_SOURCE_INACTIVE 0x00000001 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_OFFSET 20:7 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS 26:21 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE 29:27 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B 31:31 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_FALSE 0x00000000 +#define NVCB97_SET_VERTEX_ATTRIBUTE_A_SWAP_R_AND_B_TRUE 0x00000001 + +#define NVCB97_SET_VERTEX_ATTRIBUTE_B(i) (0x11a0+(i)*4) +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_STREAM 4:0 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_SOURCE 6:6 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_SOURCE_ACTIVE 0x00000000 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_SOURCE_INACTIVE 0x00000001 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_OFFSET 20:7 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS 26:21 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32_A32 0x00000001 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32_B32 0x00000002 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16_A16 0x00000003 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32_G32 0x00000004 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16_B16 0x00000005 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8B8G8R8 0x0000002F +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8_A8 0x0000000A +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_X8B8G8R8 0x00000033 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A2B10G10R10 0x00000030 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_B10G11R11 0x00000031 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16_G16 0x0000000F +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R32 0x00000012 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8_B8 0x00000013 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_G8R8 0x00000032 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8_G8 0x00000018 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R16 0x0000001B +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_R8 0x0000001D +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_COMPONENT_BIT_WIDTHS_A8 0x00000034 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE 29:27 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_UNUSED_ENUM_DO_NOT_USE_BECAUSE_IT_WILL_GO_AWAY 0x00000000 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SNORM 0x00000001 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UNORM 0x00000002 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SINT 0x00000003 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_UINT 0x00000004 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_USCALED 0x00000005 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_SSCALED 0x00000006 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_NUMERICAL_TYPE_NUM_FLOAT 0x00000007 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B 31:31 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_FALSE 0x00000000 +#define NVCB97_SET_VERTEX_ATTRIBUTE_B_SWAP_R_AND_B_TRUE 0x00000001 + +#define NVCB97_SET_ANTI_ALIAS_SAMPLE_POSITIONS(i) (0x11e0+(i)*4) +#define NVCB97_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X0 3:0 +#define NVCB97_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y0 7:4 +#define NVCB97_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X1 11:8 +#define NVCB97_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y1 15:12 +#define NVCB97_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X2 19:16 +#define NVCB97_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y2 23:20 +#define NVCB97_SET_ANTI_ALIAS_SAMPLE_POSITIONS_X3 27:24 +#define NVCB97_SET_ANTI_ALIAS_SAMPLE_POSITIONS_Y3 31:28 + +#define NVCB97_SET_OFFSET_RENDER_TARGET_INDEX 0x11f0 +#define NVCB97_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX 0:0 +#define NVCB97_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX_FALSE 0x00000000 +#define NVCB97_SET_OFFSET_RENDER_TARGET_INDEX_BY_VIEWPORT_INDEX_TRUE 0x00000001 + +#define NVCB97_FORCE_HEAVYWEIGHT_METHOD_SYNC 0x11f4 +#define NVCB97_FORCE_HEAVYWEIGHT_METHOD_SYNC_V 31:0 + +#define NVCB97_SET_COVERAGE_TO_COLOR 0x11f8 +#define NVCB97_SET_COVERAGE_TO_COLOR_ENABLE 0:0 +#define NVCB97_SET_COVERAGE_TO_COLOR_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_COVERAGE_TO_COLOR_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_COVERAGE_TO_COLOR_CT_SELECT 6:4 + +#define NVCB97_DECOMPRESS_ZETA_SURFACE 0x11fc +#define NVCB97_DECOMPRESS_ZETA_SURFACE_Z_ENABLE 0:0 +#define NVCB97_DECOMPRESS_ZETA_SURFACE_Z_ENABLE_FALSE 0x00000000 +#define NVCB97_DECOMPRESS_ZETA_SURFACE_Z_ENABLE_TRUE 0x00000001 +#define NVCB97_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE 4:4 +#define NVCB97_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE_FALSE 0x00000000 +#define NVCB97_DECOMPRESS_ZETA_SURFACE_STENCIL_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_SCREEN_STATE_MASK 0x1204 +#define NVCB97_SET_SCREEN_STATE_MASK_MASK 3:0 + +#define NVCB97_SET_ZT_SPARSE 0x1208 +#define NVCB97_SET_ZT_SPARSE_ENABLE 0:0 +#define NVCB97_SET_ZT_SPARSE_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_ZT_SPARSE_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_ZT_SPARSE_UNMAPPED_COMPARE 1:1 +#define NVCB97_SET_ZT_SPARSE_UNMAPPED_COMPARE_ZT_SPARSE_UNMAPPED_0 0x00000000 +#define NVCB97_SET_ZT_SPARSE_UNMAPPED_COMPARE_ZT_SPARSE_FAIL_ALWAYS 0x00000001 + +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST 0x1214 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_START_INDEX 15:0 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT 0x1218 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_START_INDEX 15:0 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVCB97_DRAW_VERTEX_ARRAY_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVCB97_SET_CT_SELECT 0x121c +#define NVCB97_SET_CT_SELECT_TARGET_COUNT 3:0 +#define NVCB97_SET_CT_SELECT_TARGET0 6:4 +#define NVCB97_SET_CT_SELECT_TARGET1 9:7 +#define NVCB97_SET_CT_SELECT_TARGET2 12:10 +#define NVCB97_SET_CT_SELECT_TARGET3 15:13 +#define NVCB97_SET_CT_SELECT_TARGET4 18:16 +#define NVCB97_SET_CT_SELECT_TARGET5 21:19 +#define NVCB97_SET_CT_SELECT_TARGET6 24:22 +#define NVCB97_SET_CT_SELECT_TARGET7 27:25 + +#define NVCB97_SET_COMPRESSION_THRESHOLD 0x1220 +#define NVCB97_SET_COMPRESSION_THRESHOLD_SAMPLES 3:0 +#define NVCB97_SET_COMPRESSION_THRESHOLD_SAMPLES__0 0x00000000 +#define NVCB97_SET_COMPRESSION_THRESHOLD_SAMPLES__1 0x00000001 +#define NVCB97_SET_COMPRESSION_THRESHOLD_SAMPLES__2 0x00000002 +#define NVCB97_SET_COMPRESSION_THRESHOLD_SAMPLES__4 0x00000003 +#define NVCB97_SET_COMPRESSION_THRESHOLD_SAMPLES__8 0x00000004 +#define NVCB97_SET_COMPRESSION_THRESHOLD_SAMPLES__16 0x00000005 +#define NVCB97_SET_COMPRESSION_THRESHOLD_SAMPLES__32 0x00000006 +#define NVCB97_SET_COMPRESSION_THRESHOLD_SAMPLES__64 0x00000007 +#define NVCB97_SET_COMPRESSION_THRESHOLD_SAMPLES__128 0x00000008 +#define NVCB97_SET_COMPRESSION_THRESHOLD_SAMPLES__256 0x00000009 +#define NVCB97_SET_COMPRESSION_THRESHOLD_SAMPLES__512 0x0000000A +#define NVCB97_SET_COMPRESSION_THRESHOLD_SAMPLES__1024 0x0000000B +#define NVCB97_SET_COMPRESSION_THRESHOLD_SAMPLES__2048 0x0000000C + +#define NVCB97_SET_PIXEL_SHADER_INTERLOCK_CONTROL 0x1224 +#define NVCB97_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE 1:0 +#define NVCB97_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_NO_CONFLICT_DETECT 0x00000000 +#define NVCB97_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_CONFLICT_DETECT_SAMPLE 0x00000001 +#define NVCB97_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_MODE_CONFLICT_DETECT_PIXEL 0x00000002 +#define NVCB97_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE 2:2 +#define NVCB97_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE_TC_TILE_SIZE_16X16 0x00000000 +#define NVCB97_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_TILE_SIZE_TC_TILE_SIZE_8X8 0x00000001 +#define NVCB97_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER 3:3 +#define NVCB97_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER_TC_FRAGMENT_ORDERED 0x00000000 +#define NVCB97_SET_PIXEL_SHADER_INTERLOCK_CONTROL_TILE_COALESCER_FRAGMENT_ORDER_TC_FRAGMENT_UNORDERED 0x00000001 + +#define NVCB97_SET_ZT_SIZE_A 0x1228 +#define NVCB97_SET_ZT_SIZE_A_WIDTH 27:0 + +#define NVCB97_SET_ZT_SIZE_B 0x122c +#define NVCB97_SET_ZT_SIZE_B_HEIGHT 17:0 + +#define NVCB97_SET_ZT_SIZE_C 0x1230 +#define NVCB97_SET_ZT_SIZE_C_THIRD_DIMENSION 15:0 +#define NVCB97_SET_ZT_SIZE_C_CONTROL 16:16 +#define NVCB97_SET_ZT_SIZE_C_CONTROL_THIRD_DIMENSION_DEFINES_ARRAY_SIZE 0x00000000 +#define NVCB97_SET_ZT_SIZE_C_CONTROL_ARRAY_SIZE_IS_ONE 0x00000001 + +#define NVCB97_SET_SAMPLER_BINDING 0x1234 +#define NVCB97_SET_SAMPLER_BINDING_V 0:0 +#define NVCB97_SET_SAMPLER_BINDING_V_INDEPENDENTLY 0x00000000 +#define NVCB97_SET_SAMPLER_BINDING_V_VIA_HEADER_BINDING 0x00000001 + +#define NVCB97_DRAW_AUTO 0x123c +#define NVCB97_DRAW_AUTO_BYTE_COUNT 31:0 + +#define NVCB97_SET_POST_VTG_SHADER_ATTRIBUTE_SKIP_MASK(i) (0x1240+(i)*4) +#define NVCB97_SET_POST_VTG_SHADER_ATTRIBUTE_SKIP_MASK_V 31:0 + +#define NVCB97_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE 0x1260 +#define NVCB97_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE_TICKET_DISPENSER_INDEX 7:0 +#define NVCB97_SET_PIXEL_SHADER_TICKET_DISPENSER_VALUE_TICKET_DISPENSER_VALUE 23:8 + +#define NVCB97_SET_BACK_END_COPY_A 0x1264 +#define NVCB97_SET_BACK_END_COPY_A_DWORDS 7:0 +#define NVCB97_SET_BACK_END_COPY_A_SATURATE32_ENABLE 8:8 +#define NVCB97_SET_BACK_END_COPY_A_SATURATE32_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_BACK_END_COPY_A_SATURATE32_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE 12:12 +#define NVCB97_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_BACK_END_COPY_A_TIMESTAMP_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_BACK_END_COPY_B 0x1268 +#define NVCB97_SET_BACK_END_COPY_B_SRC_ADDRESS_UPPER 7:0 + +#define NVCB97_SET_BACK_END_COPY_C 0x126c +#define NVCB97_SET_BACK_END_COPY_C_SRC_ADDRESS_LOWER 31:0 + +#define NVCB97_SET_BACK_END_COPY_D 0x1270 +#define NVCB97_SET_BACK_END_COPY_D_DEST_ADDRESS_UPPER 7:0 + +#define NVCB97_SET_BACK_END_COPY_E 0x1274 +#define NVCB97_SET_BACK_END_COPY_E_DEST_ADDRESS_LOWER 31:0 + +#define NVCB97_SET_CIRCULAR_BUFFER_SIZE 0x1280 +#define NVCB97_SET_CIRCULAR_BUFFER_SIZE_CACHE_LINES_PER_SM 19:0 + +#define NVCB97_SET_VTG_REGISTER_WATERMARKS 0x1284 +#define NVCB97_SET_VTG_REGISTER_WATERMARKS_LOW 15:0 +#define NVCB97_SET_VTG_REGISTER_WATERMARKS_HIGH 31:16 + +#define NVCB97_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI 0x1288 +#define NVCB97_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES 0:0 +#define NVCB97_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVCB97_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVCB97_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_TAG 25:4 + +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS 0x1290 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY 5:4 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_READ_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVCB97_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE 0x12a4 +#define NVCB97_SET_DA_PRIMITIVE_RESTART_INDEX_TOPOLOGY_CHANGE_V 31:0 + +#define NVCB97_CLEAR_ZCULL_REGION 0x12c8 +#define NVCB97_CLEAR_ZCULL_REGION_Z_ENABLE 0:0 +#define NVCB97_CLEAR_ZCULL_REGION_Z_ENABLE_FALSE 0x00000000 +#define NVCB97_CLEAR_ZCULL_REGION_Z_ENABLE_TRUE 0x00000001 +#define NVCB97_CLEAR_ZCULL_REGION_STENCIL_ENABLE 4:4 +#define NVCB97_CLEAR_ZCULL_REGION_STENCIL_ENABLE_FALSE 0x00000000 +#define NVCB97_CLEAR_ZCULL_REGION_STENCIL_ENABLE_TRUE 0x00000001 +#define NVCB97_CLEAR_ZCULL_REGION_USE_CLEAR_RECT 1:1 +#define NVCB97_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_FALSE 0x00000000 +#define NVCB97_CLEAR_ZCULL_REGION_USE_CLEAR_RECT_TRUE 0x00000001 +#define NVCB97_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX 2:2 +#define NVCB97_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_FALSE 0x00000000 +#define NVCB97_CLEAR_ZCULL_REGION_USE_RT_ARRAY_INDEX_TRUE 0x00000001 +#define NVCB97_CLEAR_ZCULL_REGION_RT_ARRAY_INDEX 20:5 +#define NVCB97_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE 3:3 +#define NVCB97_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_FALSE 0x00000000 +#define NVCB97_CLEAR_ZCULL_REGION_MAKE_CONSERVATIVE_TRUE 0x00000001 + +#define NVCB97_SET_DEPTH_TEST 0x12cc +#define NVCB97_SET_DEPTH_TEST_ENABLE 0:0 +#define NVCB97_SET_DEPTH_TEST_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_DEPTH_TEST_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_FILL_MODE 0x12d0 +#define NVCB97_SET_FILL_MODE_V 31:0 +#define NVCB97_SET_FILL_MODE_V_POINT 0x00000001 +#define NVCB97_SET_FILL_MODE_V_WIREFRAME 0x00000002 +#define NVCB97_SET_FILL_MODE_V_SOLID 0x00000003 + +#define NVCB97_SET_SHADE_MODE 0x12d4 +#define NVCB97_SET_SHADE_MODE_V 31:0 +#define NVCB97_SET_SHADE_MODE_V_FLAT 0x00000001 +#define NVCB97_SET_SHADE_MODE_V_GOURAUD 0x00000002 +#define NVCB97_SET_SHADE_MODE_V_OGL_FLAT 0x00001D00 +#define NVCB97_SET_SHADE_MODE_V_OGL_SMOOTH 0x00001D01 + +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS 0x12d8 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_NONINTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS 0x12dc +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY 5:4 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_FIRST 0x00000000 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_NORMAL 0x00000001 +#define NVCB97_SET_L2_CACHE_CONTROL_FOR_ROP_INTERLOCKED_WRITE_REQUESTS_POLICY_EVICT_LAST 0x00000002 + +#define NVCB97_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL 0x12e0 +#define NVCB97_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT 3:0 +#define NVCB97_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1 0x00000000 +#define NVCB97_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_2X2 0x00000001 +#define NVCB97_SET_ALPHA_TO_COVERAGE_DITHER_CONTROL_DITHER_FOOTPRINT_PIXELS_1X1_VIRTUAL_SAMPLES 0x00000002 + +#define NVCB97_SET_BLEND_STATE_PER_TARGET 0x12e4 +#define NVCB97_SET_BLEND_STATE_PER_TARGET_ENABLE 0:0 +#define NVCB97_SET_BLEND_STATE_PER_TARGET_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_BLEND_STATE_PER_TARGET_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_DEPTH_WRITE 0x12e8 +#define NVCB97_SET_DEPTH_WRITE_ENABLE 0:0 +#define NVCB97_SET_DEPTH_WRITE_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_DEPTH_WRITE_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_ALPHA_TEST 0x12ec +#define NVCB97_SET_ALPHA_TEST_ENABLE 0:0 +#define NVCB97_SET_ALPHA_TEST_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_ALPHA_TEST_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_INLINE_INDEX4X8_ALIGN 0x1300 +#define NVCB97_SET_INLINE_INDEX4X8_ALIGN_COUNT 29:0 +#define NVCB97_SET_INLINE_INDEX4X8_ALIGN_START 31:30 + +#define NVCB97_DRAW_INLINE_INDEX4X8 0x1304 +#define NVCB97_DRAW_INLINE_INDEX4X8_INDEX0 7:0 +#define NVCB97_DRAW_INLINE_INDEX4X8_INDEX1 15:8 +#define NVCB97_DRAW_INLINE_INDEX4X8_INDEX2 23:16 +#define NVCB97_DRAW_INLINE_INDEX4X8_INDEX3 31:24 + +#define NVCB97_D3D_SET_CULL_MODE 0x1308 +#define NVCB97_D3D_SET_CULL_MODE_V 31:0 +#define NVCB97_D3D_SET_CULL_MODE_V_NONE 0x00000001 +#define NVCB97_D3D_SET_CULL_MODE_V_CW 0x00000002 +#define NVCB97_D3D_SET_CULL_MODE_V_CCW 0x00000003 + +#define NVCB97_SET_DEPTH_FUNC 0x130c +#define NVCB97_SET_DEPTH_FUNC_V 31:0 +#define NVCB97_SET_DEPTH_FUNC_V_OGL_NEVER 0x00000200 +#define NVCB97_SET_DEPTH_FUNC_V_OGL_LESS 0x00000201 +#define NVCB97_SET_DEPTH_FUNC_V_OGL_EQUAL 0x00000202 +#define NVCB97_SET_DEPTH_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVCB97_SET_DEPTH_FUNC_V_OGL_GREATER 0x00000204 +#define NVCB97_SET_DEPTH_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVCB97_SET_DEPTH_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVCB97_SET_DEPTH_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVCB97_SET_DEPTH_FUNC_V_D3D_NEVER 0x00000001 +#define NVCB97_SET_DEPTH_FUNC_V_D3D_LESS 0x00000002 +#define NVCB97_SET_DEPTH_FUNC_V_D3D_EQUAL 0x00000003 +#define NVCB97_SET_DEPTH_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVCB97_SET_DEPTH_FUNC_V_D3D_GREATER 0x00000005 +#define NVCB97_SET_DEPTH_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVCB97_SET_DEPTH_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVCB97_SET_DEPTH_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVCB97_SET_ALPHA_REF 0x1310 +#define NVCB97_SET_ALPHA_REF_V 31:0 + +#define NVCB97_SET_ALPHA_FUNC 0x1314 +#define NVCB97_SET_ALPHA_FUNC_V 31:0 +#define NVCB97_SET_ALPHA_FUNC_V_OGL_NEVER 0x00000200 +#define NVCB97_SET_ALPHA_FUNC_V_OGL_LESS 0x00000201 +#define NVCB97_SET_ALPHA_FUNC_V_OGL_EQUAL 0x00000202 +#define NVCB97_SET_ALPHA_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVCB97_SET_ALPHA_FUNC_V_OGL_GREATER 0x00000204 +#define NVCB97_SET_ALPHA_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVCB97_SET_ALPHA_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVCB97_SET_ALPHA_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVCB97_SET_ALPHA_FUNC_V_D3D_NEVER 0x00000001 +#define NVCB97_SET_ALPHA_FUNC_V_D3D_LESS 0x00000002 +#define NVCB97_SET_ALPHA_FUNC_V_D3D_EQUAL 0x00000003 +#define NVCB97_SET_ALPHA_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVCB97_SET_ALPHA_FUNC_V_D3D_GREATER 0x00000005 +#define NVCB97_SET_ALPHA_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVCB97_SET_ALPHA_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVCB97_SET_ALPHA_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVCB97_SET_DRAW_AUTO_STRIDE 0x1318 +#define NVCB97_SET_DRAW_AUTO_STRIDE_V 11:0 + +#define NVCB97_SET_BLEND_CONST_RED 0x131c +#define NVCB97_SET_BLEND_CONST_RED_V 31:0 + +#define NVCB97_SET_BLEND_CONST_GREEN 0x1320 +#define NVCB97_SET_BLEND_CONST_GREEN_V 31:0 + +#define NVCB97_SET_BLEND_CONST_BLUE 0x1324 +#define NVCB97_SET_BLEND_CONST_BLUE_V 31:0 + +#define NVCB97_SET_BLEND_CONST_ALPHA 0x1328 +#define NVCB97_SET_BLEND_CONST_ALPHA_V 31:0 + +#define NVCB97_INVALIDATE_SAMPLER_CACHE 0x1330 +#define NVCB97_INVALIDATE_SAMPLER_CACHE_LINES 0:0 +#define NVCB97_INVALIDATE_SAMPLER_CACHE_LINES_ALL 0x00000000 +#define NVCB97_INVALIDATE_SAMPLER_CACHE_LINES_ONE 0x00000001 +#define NVCB97_INVALIDATE_SAMPLER_CACHE_TAG 25:4 + +#define NVCB97_INVALIDATE_TEXTURE_HEADER_CACHE 0x1334 +#define NVCB97_INVALIDATE_TEXTURE_HEADER_CACHE_LINES 0:0 +#define NVCB97_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ALL 0x00000000 +#define NVCB97_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ONE 0x00000001 +#define NVCB97_INVALIDATE_TEXTURE_HEADER_CACHE_TAG 25:4 + +#define NVCB97_INVALIDATE_TEXTURE_DATA_CACHE 0x1338 +#define NVCB97_INVALIDATE_TEXTURE_DATA_CACHE_LINES 0:0 +#define NVCB97_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ALL 0x00000000 +#define NVCB97_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ONE 0x00000001 +#define NVCB97_INVALIDATE_TEXTURE_DATA_CACHE_TAG 25:4 + +#define NVCB97_SET_BLEND_SEPARATE_FOR_ALPHA 0x133c +#define NVCB97_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NVCB97_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_BLEND_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_BLEND_COLOR_OP 0x1340 +#define NVCB97_SET_BLEND_COLOR_OP_V 31:0 +#define NVCB97_SET_BLEND_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVCB97_SET_BLEND_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVCB97_SET_BLEND_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVCB97_SET_BLEND_COLOR_OP_V_OGL_MIN 0x00008007 +#define NVCB97_SET_BLEND_COLOR_OP_V_OGL_MAX 0x00008008 +#define NVCB97_SET_BLEND_COLOR_OP_V_D3D_ADD 0x00000001 +#define NVCB97_SET_BLEND_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NVCB97_SET_BLEND_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVCB97_SET_BLEND_COLOR_OP_V_D3D_MIN 0x00000004 +#define NVCB97_SET_BLEND_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF 0x1344 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V 31:0 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVCB97_SET_BLEND_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF 0x1348 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V 31:0 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVCB97_SET_BLEND_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVCB97_SET_BLEND_ALPHA_OP 0x134c +#define NVCB97_SET_BLEND_ALPHA_OP_V 31:0 +#define NVCB97_SET_BLEND_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVCB97_SET_BLEND_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVCB97_SET_BLEND_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVCB97_SET_BLEND_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NVCB97_SET_BLEND_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NVCB97_SET_BLEND_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NVCB97_SET_BLEND_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NVCB97_SET_BLEND_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVCB97_SET_BLEND_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NVCB97_SET_BLEND_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF 0x1350 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V 31:0 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVCB97_SET_BLEND_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVCB97_SET_GLOBAL_COLOR_KEY 0x1354 +#define NVCB97_SET_GLOBAL_COLOR_KEY_ENABLE 0:0 +#define NVCB97_SET_GLOBAL_COLOR_KEY_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_GLOBAL_COLOR_KEY_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF 0x1358 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V 31:0 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVCB97_SET_BLEND_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVCB97_SET_SINGLE_ROP_CONTROL 0x135c +#define NVCB97_SET_SINGLE_ROP_CONTROL_ENABLE 0:0 +#define NVCB97_SET_SINGLE_ROP_CONTROL_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_SINGLE_ROP_CONTROL_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_BLEND(i) (0x1360+(i)*4) +#define NVCB97_SET_BLEND_ENABLE 0:0 +#define NVCB97_SET_BLEND_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_BLEND_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_STENCIL_TEST 0x1380 +#define NVCB97_SET_STENCIL_TEST_ENABLE 0:0 +#define NVCB97_SET_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_STENCIL_OP_FAIL 0x1384 +#define NVCB97_SET_STENCIL_OP_FAIL_V 31:0 +#define NVCB97_SET_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NVCB97_SET_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NVCB97_SET_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NVCB97_SET_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NVCB97_SET_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NVCB97_SET_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NVCB97_SET_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NVCB97_SET_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NVCB97_SET_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NVCB97_SET_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NVCB97_SET_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NVCB97_SET_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NVCB97_SET_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NVCB97_SET_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NVCB97_SET_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NVCB97_SET_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NVCB97_SET_STENCIL_OP_ZFAIL 0x1388 +#define NVCB97_SET_STENCIL_OP_ZFAIL_V 31:0 +#define NVCB97_SET_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NVCB97_SET_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NVCB97_SET_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NVCB97_SET_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NVCB97_SET_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NVCB97_SET_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NVCB97_SET_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NVCB97_SET_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NVCB97_SET_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NVCB97_SET_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NVCB97_SET_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NVCB97_SET_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NVCB97_SET_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NVCB97_SET_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NVCB97_SET_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NVCB97_SET_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NVCB97_SET_STENCIL_OP_ZPASS 0x138c +#define NVCB97_SET_STENCIL_OP_ZPASS_V 31:0 +#define NVCB97_SET_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NVCB97_SET_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NVCB97_SET_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NVCB97_SET_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NVCB97_SET_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NVCB97_SET_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NVCB97_SET_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NVCB97_SET_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NVCB97_SET_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NVCB97_SET_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NVCB97_SET_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NVCB97_SET_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NVCB97_SET_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NVCB97_SET_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NVCB97_SET_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NVCB97_SET_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NVCB97_SET_STENCIL_FUNC 0x1390 +#define NVCB97_SET_STENCIL_FUNC_V 31:0 +#define NVCB97_SET_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NVCB97_SET_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NVCB97_SET_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NVCB97_SET_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVCB97_SET_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NVCB97_SET_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVCB97_SET_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVCB97_SET_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVCB97_SET_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NVCB97_SET_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NVCB97_SET_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NVCB97_SET_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVCB97_SET_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NVCB97_SET_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVCB97_SET_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVCB97_SET_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVCB97_SET_STENCIL_FUNC_REF 0x1394 +#define NVCB97_SET_STENCIL_FUNC_REF_V 7:0 + +#define NVCB97_SET_STENCIL_FUNC_MASK 0x1398 +#define NVCB97_SET_STENCIL_FUNC_MASK_V 7:0 + +#define NVCB97_SET_STENCIL_MASK 0x139c +#define NVCB97_SET_STENCIL_MASK_V 7:0 + +#define NVCB97_SET_DRAW_AUTO_START 0x13a4 +#define NVCB97_SET_DRAW_AUTO_START_BYTE_COUNT 31:0 + +#define NVCB97_SET_PS_SATURATE 0x13a8 +#define NVCB97_SET_PS_SATURATE_OUTPUT0 0:0 +#define NVCB97_SET_PS_SATURATE_OUTPUT0_FALSE 0x00000000 +#define NVCB97_SET_PS_SATURATE_OUTPUT0_TRUE 0x00000001 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE0 1:1 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE0_ZERO_TO_PLUS_ONE 0x00000000 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE0_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVCB97_SET_PS_SATURATE_OUTPUT1 4:4 +#define NVCB97_SET_PS_SATURATE_OUTPUT1_FALSE 0x00000000 +#define NVCB97_SET_PS_SATURATE_OUTPUT1_TRUE 0x00000001 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE1 5:5 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE1_ZERO_TO_PLUS_ONE 0x00000000 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE1_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVCB97_SET_PS_SATURATE_OUTPUT2 8:8 +#define NVCB97_SET_PS_SATURATE_OUTPUT2_FALSE 0x00000000 +#define NVCB97_SET_PS_SATURATE_OUTPUT2_TRUE 0x00000001 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE2 9:9 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE2_ZERO_TO_PLUS_ONE 0x00000000 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE2_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVCB97_SET_PS_SATURATE_OUTPUT3 12:12 +#define NVCB97_SET_PS_SATURATE_OUTPUT3_FALSE 0x00000000 +#define NVCB97_SET_PS_SATURATE_OUTPUT3_TRUE 0x00000001 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE3 13:13 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE3_ZERO_TO_PLUS_ONE 0x00000000 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE3_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVCB97_SET_PS_SATURATE_OUTPUT4 16:16 +#define NVCB97_SET_PS_SATURATE_OUTPUT4_FALSE 0x00000000 +#define NVCB97_SET_PS_SATURATE_OUTPUT4_TRUE 0x00000001 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE4 17:17 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE4_ZERO_TO_PLUS_ONE 0x00000000 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE4_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVCB97_SET_PS_SATURATE_OUTPUT5 20:20 +#define NVCB97_SET_PS_SATURATE_OUTPUT5_FALSE 0x00000000 +#define NVCB97_SET_PS_SATURATE_OUTPUT5_TRUE 0x00000001 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE5 21:21 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE5_ZERO_TO_PLUS_ONE 0x00000000 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE5_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVCB97_SET_PS_SATURATE_OUTPUT6 24:24 +#define NVCB97_SET_PS_SATURATE_OUTPUT6_FALSE 0x00000000 +#define NVCB97_SET_PS_SATURATE_OUTPUT6_TRUE 0x00000001 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE6 25:25 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE6_ZERO_TO_PLUS_ONE 0x00000000 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE6_MINUS_ONE_TO_PLUS_ONE 0x00000001 +#define NVCB97_SET_PS_SATURATE_OUTPUT7 28:28 +#define NVCB97_SET_PS_SATURATE_OUTPUT7_FALSE 0x00000000 +#define NVCB97_SET_PS_SATURATE_OUTPUT7_TRUE 0x00000001 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE7 29:29 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE7_ZERO_TO_PLUS_ONE 0x00000000 +#define NVCB97_SET_PS_SATURATE_CLAMP_RANGE7_MINUS_ONE_TO_PLUS_ONE 0x00000001 + +#define NVCB97_SET_WINDOW_ORIGIN 0x13ac +#define NVCB97_SET_WINDOW_ORIGIN_MODE 0:0 +#define NVCB97_SET_WINDOW_ORIGIN_MODE_UPPER_LEFT 0x00000000 +#define NVCB97_SET_WINDOW_ORIGIN_MODE_LOWER_LEFT 0x00000001 +#define NVCB97_SET_WINDOW_ORIGIN_FLIP_Y 4:4 +#define NVCB97_SET_WINDOW_ORIGIN_FLIP_Y_FALSE 0x00000000 +#define NVCB97_SET_WINDOW_ORIGIN_FLIP_Y_TRUE 0x00000001 + +#define NVCB97_SET_LINE_WIDTH_FLOAT 0x13b0 +#define NVCB97_SET_LINE_WIDTH_FLOAT_V 31:0 + +#define NVCB97_SET_ALIASED_LINE_WIDTH_FLOAT 0x13b4 +#define NVCB97_SET_ALIASED_LINE_WIDTH_FLOAT_V 31:0 + +#define NVCB97_SET_LINE_MULTISAMPLE_OVERRIDE 0x1418 +#define NVCB97_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE 0:0 +#define NVCB97_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_LINE_MULTISAMPLE_OVERRIDE_ENABLE_TRUE 0x00000001 + +#define NVCB97_INVALIDATE_SAMPLER_CACHE_NO_WFI 0x1424 +#define NVCB97_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES 0:0 +#define NVCB97_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVCB97_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVCB97_INVALIDATE_SAMPLER_CACHE_NO_WFI_TAG 25:4 + +#define NVCB97_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI 0x1428 +#define NVCB97_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES 0:0 +#define NVCB97_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVCB97_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVCB97_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_TAG 25:4 + +#define NVCB97_SET_GLOBAL_BASE_VERTEX_INDEX 0x1434 +#define NVCB97_SET_GLOBAL_BASE_VERTEX_INDEX_V 31:0 + +#define NVCB97_SET_GLOBAL_BASE_INSTANCE_INDEX 0x1438 +#define NVCB97_SET_GLOBAL_BASE_INSTANCE_INDEX_V 31:0 + +#define NVCB97_SET_PS_WARP_WATERMARKS 0x1450 +#define NVCB97_SET_PS_WARP_WATERMARKS_LOW 15:0 +#define NVCB97_SET_PS_WARP_WATERMARKS_HIGH 31:16 + +#define NVCB97_SET_PS_REGISTER_WATERMARKS 0x1454 +#define NVCB97_SET_PS_REGISTER_WATERMARKS_LOW 15:0 +#define NVCB97_SET_PS_REGISTER_WATERMARKS_HIGH 31:16 + +#define NVCB97_STORE_ZCULL 0x1464 +#define NVCB97_STORE_ZCULL_V 0:0 + +#define NVCB97_SET_ITERATED_BLEND_CONSTANT_RED(j) (0x1480+(j)*16) +#define NVCB97_SET_ITERATED_BLEND_CONSTANT_RED_V 15:0 + +#define NVCB97_SET_ITERATED_BLEND_CONSTANT_GREEN(j) (0x1484+(j)*16) +#define NVCB97_SET_ITERATED_BLEND_CONSTANT_GREEN_V 15:0 + +#define NVCB97_SET_ITERATED_BLEND_CONSTANT_BLUE(j) (0x1488+(j)*16) +#define NVCB97_SET_ITERATED_BLEND_CONSTANT_BLUE_V 15:0 + +#define NVCB97_LOAD_ZCULL 0x1500 +#define NVCB97_LOAD_ZCULL_V 0:0 + +#define NVCB97_SET_SURFACE_CLIP_ID_HEIGHT 0x1504 +#define NVCB97_SET_SURFACE_CLIP_ID_HEIGHT_V 31:0 + +#define NVCB97_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL 0x1508 +#define NVCB97_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMIN 15:0 +#define NVCB97_SET_CLIP_ID_CLEAR_RECT_HORIZONTAL_XMAX 31:16 + +#define NVCB97_SET_CLIP_ID_CLEAR_RECT_VERTICAL 0x150c +#define NVCB97_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMIN 15:0 +#define NVCB97_SET_CLIP_ID_CLEAR_RECT_VERTICAL_YMAX 31:16 + +#define NVCB97_SET_USER_CLIP_ENABLE 0x1510 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE0 0:0 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE0_FALSE 0x00000000 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE0_TRUE 0x00000001 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE1 1:1 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE1_FALSE 0x00000000 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE1_TRUE 0x00000001 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE2 2:2 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE2_FALSE 0x00000000 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE2_TRUE 0x00000001 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE3 3:3 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE3_FALSE 0x00000000 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE3_TRUE 0x00000001 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE4 4:4 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE4_FALSE 0x00000000 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE4_TRUE 0x00000001 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE5 5:5 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE5_FALSE 0x00000000 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE5_TRUE 0x00000001 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE6 6:6 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE6_FALSE 0x00000000 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE6_TRUE 0x00000001 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE7 7:7 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE7_FALSE 0x00000000 +#define NVCB97_SET_USER_CLIP_ENABLE_PLANE7_TRUE 0x00000001 + +#define NVCB97_SET_ZPASS_PIXEL_COUNT 0x1514 +#define NVCB97_SET_ZPASS_PIXEL_COUNT_ENABLE 0:0 +#define NVCB97_SET_ZPASS_PIXEL_COUNT_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_ZPASS_PIXEL_COUNT_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_POINT_SIZE 0x1518 +#define NVCB97_SET_POINT_SIZE_V 31:0 + +#define NVCB97_SET_ZCULL_STATS 0x151c +#define NVCB97_SET_ZCULL_STATS_ENABLE 0:0 +#define NVCB97_SET_ZCULL_STATS_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_ZCULL_STATS_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_POINT_SPRITE 0x1520 +#define NVCB97_SET_POINT_SPRITE_ENABLE 0:0 +#define NVCB97_SET_POINT_SPRITE_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_POINT_SPRITE_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_SHADER_EXCEPTIONS 0x1528 +#define NVCB97_SET_SHADER_EXCEPTIONS_ENABLE 0:0 +#define NVCB97_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_SHADER_EXCEPTIONS_ENABLE_TRUE 0x00000001 + +#define NVCB97_CLEAR_REPORT_VALUE 0x1530 +#define NVCB97_CLEAR_REPORT_VALUE_TYPE 4:0 +#define NVCB97_CLEAR_REPORT_VALUE_TYPE_DA_VERTICES_GENERATED 0x00000012 +#define NVCB97_CLEAR_REPORT_VALUE_TYPE_DA_PRIMITIVES_GENERATED 0x00000013 +#define NVCB97_CLEAR_REPORT_VALUE_TYPE_VS_INVOCATIONS 0x00000015 +#define NVCB97_CLEAR_REPORT_VALUE_TYPE_TI_INVOCATIONS 0x00000016 +#define NVCB97_CLEAR_REPORT_VALUE_TYPE_TS_INVOCATIONS 0x00000017 +#define NVCB97_CLEAR_REPORT_VALUE_TYPE_TS_PRIMITIVES_GENERATED 0x00000018 +#define NVCB97_CLEAR_REPORT_VALUE_TYPE_GS_INVOCATIONS 0x0000001A +#define NVCB97_CLEAR_REPORT_VALUE_TYPE_GS_PRIMITIVES_GENERATED 0x0000001B +#define NVCB97_CLEAR_REPORT_VALUE_TYPE_VTG_PRIMITIVES_OUT 0x0000001F +#define NVCB97_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_SUCCEEDED 0x00000010 +#define NVCB97_CLEAR_REPORT_VALUE_TYPE_STREAMING_PRIMITIVES_NEEDED 0x00000011 +#define NVCB97_CLEAR_REPORT_VALUE_TYPE_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000003 +#define NVCB97_CLEAR_REPORT_VALUE_TYPE_CLIPPER_INVOCATIONS 0x0000001C +#define NVCB97_CLEAR_REPORT_VALUE_TYPE_CLIPPER_PRIMITIVES_GENERATED 0x0000001D +#define NVCB97_CLEAR_REPORT_VALUE_TYPE_ZCULL_STATS 0x00000002 +#define NVCB97_CLEAR_REPORT_VALUE_TYPE_PS_INVOCATIONS 0x0000001E +#define NVCB97_CLEAR_REPORT_VALUE_TYPE_ZPASS_PIXEL_CNT 0x00000001 +#define NVCB97_CLEAR_REPORT_VALUE_TYPE_ALPHA_BETA_CLOCKS 0x00000004 +#define NVCB97_CLEAR_REPORT_VALUE_TYPE_SCG_CLOCKS 0x00000009 + +#define NVCB97_SET_ANTI_ALIAS_ENABLE 0x1534 +#define NVCB97_SET_ANTI_ALIAS_ENABLE_V 0:0 +#define NVCB97_SET_ANTI_ALIAS_ENABLE_V_FALSE 0x00000000 +#define NVCB97_SET_ANTI_ALIAS_ENABLE_V_TRUE 0x00000001 + +#define NVCB97_SET_ZT_SELECT 0x1538 +#define NVCB97_SET_ZT_SELECT_TARGET_COUNT 0:0 + +#define NVCB97_SET_ANTI_ALIAS_ALPHA_CONTROL 0x153c +#define NVCB97_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE 0:0 +#define NVCB97_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_DISABLE 0x00000000 +#define NVCB97_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_COVERAGE_ENABLE 0x00000001 +#define NVCB97_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE 4:4 +#define NVCB97_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_DISABLE 0x00000000 +#define NVCB97_SET_ANTI_ALIAS_ALPHA_CONTROL_ALPHA_TO_ONE_ENABLE 0x00000001 + +#define NVCB97_SET_RENDER_ENABLE_A 0x1550 +#define NVCB97_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVCB97_SET_RENDER_ENABLE_B 0x1554 +#define NVCB97_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVCB97_SET_RENDER_ENABLE_C 0x1558 +#define NVCB97_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVCB97_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVCB97_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVCB97_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVCB97_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVCB97_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVCB97_SET_TEX_SAMPLER_POOL_A 0x155c +#define NVCB97_SET_TEX_SAMPLER_POOL_A_OFFSET_UPPER 7:0 + +#define NVCB97_SET_TEX_SAMPLER_POOL_B 0x1560 +#define NVCB97_SET_TEX_SAMPLER_POOL_B_OFFSET_LOWER 31:0 + +#define NVCB97_SET_TEX_SAMPLER_POOL_C 0x1564 +#define NVCB97_SET_TEX_SAMPLER_POOL_C_MAXIMUM_INDEX 19:0 + +#define NVCB97_SET_SLOPE_SCALE_DEPTH_BIAS 0x156c +#define NVCB97_SET_SLOPE_SCALE_DEPTH_BIAS_V 31:0 + +#define NVCB97_SET_ANTI_ALIASED_LINE 0x1570 +#define NVCB97_SET_ANTI_ALIASED_LINE_ENABLE 0:0 +#define NVCB97_SET_ANTI_ALIASED_LINE_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_ANTI_ALIASED_LINE_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_TEX_HEADER_POOL_A 0x1574 +#define NVCB97_SET_TEX_HEADER_POOL_A_OFFSET_UPPER 7:0 + +#define NVCB97_SET_TEX_HEADER_POOL_B 0x1578 +#define NVCB97_SET_TEX_HEADER_POOL_B_OFFSET_LOWER 31:0 + +#define NVCB97_SET_TEX_HEADER_POOL_C 0x157c +#define NVCB97_SET_TEX_HEADER_POOL_C_MAXIMUM_INDEX 21:0 + +#define NVCB97_SET_ACTIVE_ZCULL_REGION 0x1590 +#define NVCB97_SET_ACTIVE_ZCULL_REGION_ID 5:0 + +#define NVCB97_SET_TWO_SIDED_STENCIL_TEST 0x1594 +#define NVCB97_SET_TWO_SIDED_STENCIL_TEST_ENABLE 0:0 +#define NVCB97_SET_TWO_SIDED_STENCIL_TEST_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_TWO_SIDED_STENCIL_TEST_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_BACK_STENCIL_OP_FAIL 0x1598 +#define NVCB97_SET_BACK_STENCIL_OP_FAIL_V 31:0 +#define NVCB97_SET_BACK_STENCIL_OP_FAIL_V_OGL_KEEP 0x00001E00 +#define NVCB97_SET_BACK_STENCIL_OP_FAIL_V_OGL_ZERO 0x00000000 +#define NVCB97_SET_BACK_STENCIL_OP_FAIL_V_OGL_REPLACE 0x00001E01 +#define NVCB97_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCRSAT 0x00001E02 +#define NVCB97_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECRSAT 0x00001E03 +#define NVCB97_SET_BACK_STENCIL_OP_FAIL_V_OGL_INVERT 0x0000150A +#define NVCB97_SET_BACK_STENCIL_OP_FAIL_V_OGL_INCR 0x00008507 +#define NVCB97_SET_BACK_STENCIL_OP_FAIL_V_OGL_DECR 0x00008508 +#define NVCB97_SET_BACK_STENCIL_OP_FAIL_V_D3D_KEEP 0x00000001 +#define NVCB97_SET_BACK_STENCIL_OP_FAIL_V_D3D_ZERO 0x00000002 +#define NVCB97_SET_BACK_STENCIL_OP_FAIL_V_D3D_REPLACE 0x00000003 +#define NVCB97_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCRSAT 0x00000004 +#define NVCB97_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECRSAT 0x00000005 +#define NVCB97_SET_BACK_STENCIL_OP_FAIL_V_D3D_INVERT 0x00000006 +#define NVCB97_SET_BACK_STENCIL_OP_FAIL_V_D3D_INCR 0x00000007 +#define NVCB97_SET_BACK_STENCIL_OP_FAIL_V_D3D_DECR 0x00000008 + +#define NVCB97_SET_BACK_STENCIL_OP_ZFAIL 0x159c +#define NVCB97_SET_BACK_STENCIL_OP_ZFAIL_V 31:0 +#define NVCB97_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_KEEP 0x00001E00 +#define NVCB97_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_ZERO 0x00000000 +#define NVCB97_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_REPLACE 0x00001E01 +#define NVCB97_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCRSAT 0x00001E02 +#define NVCB97_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECRSAT 0x00001E03 +#define NVCB97_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INVERT 0x0000150A +#define NVCB97_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_INCR 0x00008507 +#define NVCB97_SET_BACK_STENCIL_OP_ZFAIL_V_OGL_DECR 0x00008508 +#define NVCB97_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_KEEP 0x00000001 +#define NVCB97_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_ZERO 0x00000002 +#define NVCB97_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_REPLACE 0x00000003 +#define NVCB97_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCRSAT 0x00000004 +#define NVCB97_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECRSAT 0x00000005 +#define NVCB97_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INVERT 0x00000006 +#define NVCB97_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_INCR 0x00000007 +#define NVCB97_SET_BACK_STENCIL_OP_ZFAIL_V_D3D_DECR 0x00000008 + +#define NVCB97_SET_BACK_STENCIL_OP_ZPASS 0x15a0 +#define NVCB97_SET_BACK_STENCIL_OP_ZPASS_V 31:0 +#define NVCB97_SET_BACK_STENCIL_OP_ZPASS_V_OGL_KEEP 0x00001E00 +#define NVCB97_SET_BACK_STENCIL_OP_ZPASS_V_OGL_ZERO 0x00000000 +#define NVCB97_SET_BACK_STENCIL_OP_ZPASS_V_OGL_REPLACE 0x00001E01 +#define NVCB97_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCRSAT 0x00001E02 +#define NVCB97_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECRSAT 0x00001E03 +#define NVCB97_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INVERT 0x0000150A +#define NVCB97_SET_BACK_STENCIL_OP_ZPASS_V_OGL_INCR 0x00008507 +#define NVCB97_SET_BACK_STENCIL_OP_ZPASS_V_OGL_DECR 0x00008508 +#define NVCB97_SET_BACK_STENCIL_OP_ZPASS_V_D3D_KEEP 0x00000001 +#define NVCB97_SET_BACK_STENCIL_OP_ZPASS_V_D3D_ZERO 0x00000002 +#define NVCB97_SET_BACK_STENCIL_OP_ZPASS_V_D3D_REPLACE 0x00000003 +#define NVCB97_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCRSAT 0x00000004 +#define NVCB97_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECRSAT 0x00000005 +#define NVCB97_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INVERT 0x00000006 +#define NVCB97_SET_BACK_STENCIL_OP_ZPASS_V_D3D_INCR 0x00000007 +#define NVCB97_SET_BACK_STENCIL_OP_ZPASS_V_D3D_DECR 0x00000008 + +#define NVCB97_SET_BACK_STENCIL_FUNC 0x15a4 +#define NVCB97_SET_BACK_STENCIL_FUNC_V 31:0 +#define NVCB97_SET_BACK_STENCIL_FUNC_V_OGL_NEVER 0x00000200 +#define NVCB97_SET_BACK_STENCIL_FUNC_V_OGL_LESS 0x00000201 +#define NVCB97_SET_BACK_STENCIL_FUNC_V_OGL_EQUAL 0x00000202 +#define NVCB97_SET_BACK_STENCIL_FUNC_V_OGL_LEQUAL 0x00000203 +#define NVCB97_SET_BACK_STENCIL_FUNC_V_OGL_GREATER 0x00000204 +#define NVCB97_SET_BACK_STENCIL_FUNC_V_OGL_NOTEQUAL 0x00000205 +#define NVCB97_SET_BACK_STENCIL_FUNC_V_OGL_GEQUAL 0x00000206 +#define NVCB97_SET_BACK_STENCIL_FUNC_V_OGL_ALWAYS 0x00000207 +#define NVCB97_SET_BACK_STENCIL_FUNC_V_D3D_NEVER 0x00000001 +#define NVCB97_SET_BACK_STENCIL_FUNC_V_D3D_LESS 0x00000002 +#define NVCB97_SET_BACK_STENCIL_FUNC_V_D3D_EQUAL 0x00000003 +#define NVCB97_SET_BACK_STENCIL_FUNC_V_D3D_LESSEQUAL 0x00000004 +#define NVCB97_SET_BACK_STENCIL_FUNC_V_D3D_GREATER 0x00000005 +#define NVCB97_SET_BACK_STENCIL_FUNC_V_D3D_NOTEQUAL 0x00000006 +#define NVCB97_SET_BACK_STENCIL_FUNC_V_D3D_GREATEREQUAL 0x00000007 +#define NVCB97_SET_BACK_STENCIL_FUNC_V_D3D_ALWAYS 0x00000008 + +#define NVCB97_SET_SRGB_WRITE 0x15b8 +#define NVCB97_SET_SRGB_WRITE_ENABLE 0:0 +#define NVCB97_SET_SRGB_WRITE_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_SRGB_WRITE_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_DEPTH_BIAS 0x15bc +#define NVCB97_SET_DEPTH_BIAS_V 31:0 + +#define NVCB97_SET_ZCULL_REGION_FORMAT 0x15c8 +#define NVCB97_SET_ZCULL_REGION_FORMAT_TYPE 3:0 +#define NVCB97_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X4 0x00000000 +#define NVCB97_SET_ZCULL_REGION_FORMAT_TYPE_ZS_4X4 0x00000001 +#define NVCB97_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X2 0x00000002 +#define NVCB97_SET_ZCULL_REGION_FORMAT_TYPE_Z_2X4 0x00000003 +#define NVCB97_SET_ZCULL_REGION_FORMAT_TYPE_Z_16X8_4X4 0x00000004 +#define NVCB97_SET_ZCULL_REGION_FORMAT_TYPE_Z_8X8_4X2 0x00000005 +#define NVCB97_SET_ZCULL_REGION_FORMAT_TYPE_Z_8X8_2X4 0x00000006 +#define NVCB97_SET_ZCULL_REGION_FORMAT_TYPE_Z_16X16_4X8 0x00000007 +#define NVCB97_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X8_2X2 0x00000008 +#define NVCB97_SET_ZCULL_REGION_FORMAT_TYPE_ZS_16X8_4X2 0x00000009 +#define NVCB97_SET_ZCULL_REGION_FORMAT_TYPE_ZS_16X8_2X4 0x0000000A +#define NVCB97_SET_ZCULL_REGION_FORMAT_TYPE_ZS_8X8_2X2 0x0000000B +#define NVCB97_SET_ZCULL_REGION_FORMAT_TYPE_Z_4X8_1X1 0x0000000C + +#define NVCB97_SET_RT_LAYER 0x15cc +#define NVCB97_SET_RT_LAYER_V 15:0 +#define NVCB97_SET_RT_LAYER_CONTROL 16:16 +#define NVCB97_SET_RT_LAYER_CONTROL_V_SELECTS_LAYER 0x00000000 +#define NVCB97_SET_RT_LAYER_CONTROL_GEOMETRY_SHADER_SELECTS_LAYER 0x00000001 + +#define NVCB97_SET_ANTI_ALIAS 0x15d0 +#define NVCB97_SET_ANTI_ALIAS_SAMPLES 3:0 +#define NVCB97_SET_ANTI_ALIAS_SAMPLES_MODE_1X1 0x00000000 +#define NVCB97_SET_ANTI_ALIAS_SAMPLES_MODE_2X1 0x00000001 +#define NVCB97_SET_ANTI_ALIAS_SAMPLES_MODE_2X2 0x00000002 +#define NVCB97_SET_ANTI_ALIAS_SAMPLES_MODE_4X2 0x00000003 +#define NVCB97_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_D3D 0x00000004 +#define NVCB97_SET_ANTI_ALIAS_SAMPLES_MODE_2X1_D3D 0x00000005 +#define NVCB97_SET_ANTI_ALIAS_SAMPLES_MODE_4X4 0x00000006 +#define NVCB97_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_4 0x00000008 +#define NVCB97_SET_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_12 0x00000009 +#define NVCB97_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_8 0x0000000A +#define NVCB97_SET_ANTI_ALIAS_SAMPLES_MODE_4X2_VC_24 0x0000000B + +#define NVCB97_SET_EDGE_FLAG 0x15e4 +#define NVCB97_SET_EDGE_FLAG_V 0:0 +#define NVCB97_SET_EDGE_FLAG_V_FALSE 0x00000000 +#define NVCB97_SET_EDGE_FLAG_V_TRUE 0x00000001 + +#define NVCB97_DRAW_INLINE_INDEX 0x15e8 +#define NVCB97_DRAW_INLINE_INDEX_V 31:0 + +#define NVCB97_SET_INLINE_INDEX2X16_ALIGN 0x15ec +#define NVCB97_SET_INLINE_INDEX2X16_ALIGN_COUNT 30:0 +#define NVCB97_SET_INLINE_INDEX2X16_ALIGN_START_ODD 31:31 +#define NVCB97_SET_INLINE_INDEX2X16_ALIGN_START_ODD_FALSE 0x00000000 +#define NVCB97_SET_INLINE_INDEX2X16_ALIGN_START_ODD_TRUE 0x00000001 + +#define NVCB97_DRAW_INLINE_INDEX2X16 0x15f0 +#define NVCB97_DRAW_INLINE_INDEX2X16_EVEN 15:0 +#define NVCB97_DRAW_INLINE_INDEX2X16_ODD 31:16 + +#define NVCB97_SET_VERTEX_GLOBAL_BASE_OFFSET_A 0x15f4 +#define NVCB97_SET_VERTEX_GLOBAL_BASE_OFFSET_A_UPPER 7:0 + +#define NVCB97_SET_VERTEX_GLOBAL_BASE_OFFSET_B 0x15f8 +#define NVCB97_SET_VERTEX_GLOBAL_BASE_OFFSET_B_LOWER 31:0 + +#define NVCB97_SET_ZCULL_REGION_PIXEL_OFFSET_A 0x15fc +#define NVCB97_SET_ZCULL_REGION_PIXEL_OFFSET_A_WIDTH 15:0 + +#define NVCB97_SET_ZCULL_REGION_PIXEL_OFFSET_B 0x1600 +#define NVCB97_SET_ZCULL_REGION_PIXEL_OFFSET_B_HEIGHT 15:0 + +#define NVCB97_SET_POINT_SPRITE_SELECT 0x1604 +#define NVCB97_SET_POINT_SPRITE_SELECT_RMODE 1:0 +#define NVCB97_SET_POINT_SPRITE_SELECT_RMODE_ZERO 0x00000000 +#define NVCB97_SET_POINT_SPRITE_SELECT_RMODE_FROM_R 0x00000001 +#define NVCB97_SET_POINT_SPRITE_SELECT_RMODE_FROM_S 0x00000002 +#define NVCB97_SET_POINT_SPRITE_SELECT_ORIGIN 2:2 +#define NVCB97_SET_POINT_SPRITE_SELECT_ORIGIN_BOTTOM 0x00000000 +#define NVCB97_SET_POINT_SPRITE_SELECT_ORIGIN_TOP 0x00000001 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE0 3:3 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE0_PASSTHROUGH 0x00000000 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE0_GENERATE 0x00000001 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE1 4:4 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE1_PASSTHROUGH 0x00000000 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE1_GENERATE 0x00000001 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE2 5:5 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE2_PASSTHROUGH 0x00000000 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE2_GENERATE 0x00000001 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE3 6:6 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE3_PASSTHROUGH 0x00000000 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE3_GENERATE 0x00000001 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE4 7:7 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE4_PASSTHROUGH 0x00000000 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE4_GENERATE 0x00000001 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE5 8:8 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE5_PASSTHROUGH 0x00000000 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE5_GENERATE 0x00000001 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE6 9:9 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE6_PASSTHROUGH 0x00000000 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE6_GENERATE 0x00000001 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE7 10:10 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE7_PASSTHROUGH 0x00000000 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE7_GENERATE 0x00000001 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE8 11:11 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE8_PASSTHROUGH 0x00000000 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE8_GENERATE 0x00000001 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE9 12:12 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE9_PASSTHROUGH 0x00000000 +#define NVCB97_SET_POINT_SPRITE_SELECT_TEXTURE9_GENERATE 0x00000001 + +#define NVCB97_SET_ATTRIBUTE_DEFAULT 0x1610 +#define NVCB97_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE 0:0 +#define NVCB97_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_0001 0x00000000 +#define NVCB97_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_DIFFUSE_VECTOR_1111 0x00000001 +#define NVCB97_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR 1:1 +#define NVCB97_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0000 0x00000000 +#define NVCB97_SET_ATTRIBUTE_DEFAULT_COLOR_FRONT_SPECULAR_VECTOR_0001 0x00000001 +#define NVCB97_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR 2:2 +#define NVCB97_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0000 0x00000000 +#define NVCB97_SET_ATTRIBUTE_DEFAULT_GENERIC_VECTOR_VECTOR_0001 0x00000001 +#define NVCB97_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE 3:3 +#define NVCB97_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0000 0x00000000 +#define NVCB97_SET_ATTRIBUTE_DEFAULT_FIXED_FNC_TEXTURE_VECTOR_0001 0x00000001 +#define NVCB97_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0 4:4 +#define NVCB97_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_0001 0x00000000 +#define NVCB97_SET_ATTRIBUTE_DEFAULT_DX9_COLOR0_VECTOR_1111 0x00000001 +#define NVCB97_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15 5:5 +#define NVCB97_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0000 0x00000000 +#define NVCB97_SET_ATTRIBUTE_DEFAULT_DX9_COLOR1_TO_COLOR15_VECTOR_0001 0x00000001 + +#define NVCB97_END 0x1614 +#define NVCB97_END_V 0:0 + +#define NVCB97_BEGIN 0x1618 +#define NVCB97_BEGIN_OP 15:0 +#define NVCB97_BEGIN_OP_POINTS 0x00000000 +#define NVCB97_BEGIN_OP_LINES 0x00000001 +#define NVCB97_BEGIN_OP_LINE_LOOP 0x00000002 +#define NVCB97_BEGIN_OP_LINE_STRIP 0x00000003 +#define NVCB97_BEGIN_OP_TRIANGLES 0x00000004 +#define NVCB97_BEGIN_OP_TRIANGLE_STRIP 0x00000005 +#define NVCB97_BEGIN_OP_TRIANGLE_FAN 0x00000006 +#define NVCB97_BEGIN_OP_QUADS 0x00000007 +#define NVCB97_BEGIN_OP_QUAD_STRIP 0x00000008 +#define NVCB97_BEGIN_OP_POLYGON 0x00000009 +#define NVCB97_BEGIN_OP_LINELIST_ADJCY 0x0000000A +#define NVCB97_BEGIN_OP_LINESTRIP_ADJCY 0x0000000B +#define NVCB97_BEGIN_OP_TRIANGLELIST_ADJCY 0x0000000C +#define NVCB97_BEGIN_OP_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVCB97_BEGIN_OP_PATCH 0x0000000E +#define NVCB97_BEGIN_PRIMITIVE_ID 24:24 +#define NVCB97_BEGIN_PRIMITIVE_ID_FIRST 0x00000000 +#define NVCB97_BEGIN_PRIMITIVE_ID_UNCHANGED 0x00000001 +#define NVCB97_BEGIN_INSTANCE_ID 27:26 +#define NVCB97_BEGIN_INSTANCE_ID_FIRST 0x00000000 +#define NVCB97_BEGIN_INSTANCE_ID_SUBSEQUENT 0x00000001 +#define NVCB97_BEGIN_INSTANCE_ID_UNCHANGED 0x00000002 +#define NVCB97_BEGIN_SPLIT_MODE 30:29 +#define NVCB97_BEGIN_SPLIT_MODE_NORMAL_BEGIN_NORMAL_END 0x00000000 +#define NVCB97_BEGIN_SPLIT_MODE_NORMAL_BEGIN_OPEN_END 0x00000001 +#define NVCB97_BEGIN_SPLIT_MODE_OPEN_BEGIN_OPEN_END 0x00000002 +#define NVCB97_BEGIN_SPLIT_MODE_OPEN_BEGIN_NORMAL_END 0x00000003 +#define NVCB97_BEGIN_INSTANCE_ITERATE_ENABLE 31:31 +#define NVCB97_BEGIN_INSTANCE_ITERATE_ENABLE_FALSE 0x00000000 +#define NVCB97_BEGIN_INSTANCE_ITERATE_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_VERTEX_ID_COPY 0x161c +#define NVCB97_SET_VERTEX_ID_COPY_ENABLE 0:0 +#define NVCB97_SET_VERTEX_ID_COPY_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_VERTEX_ID_COPY_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_VERTEX_ID_COPY_ATTRIBUTE_SLOT 11:4 + +#define NVCB97_ADD_TO_PRIMITIVE_ID 0x1620 +#define NVCB97_ADD_TO_PRIMITIVE_ID_V 31:0 + +#define NVCB97_LOAD_PRIMITIVE_ID 0x1624 +#define NVCB97_LOAD_PRIMITIVE_ID_V 31:0 + +#define NVCB97_SET_SHADER_BASED_CULL 0x162c +#define NVCB97_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE 1:1 +#define NVCB97_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_SHADER_BASED_CULL_BATCH_CULL_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE 0:0 +#define NVCB97_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_SHADER_BASED_CULL_BEFORE_FETCH_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_CLASS_VERSION 0x1638 +#define NVCB97_SET_CLASS_VERSION_CURRENT 15:0 +#define NVCB97_SET_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVCB97_SET_DA_PRIMITIVE_RESTART 0x1644 +#define NVCB97_SET_DA_PRIMITIVE_RESTART_ENABLE 0:0 +#define NVCB97_SET_DA_PRIMITIVE_RESTART_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_DA_PRIMITIVE_RESTART_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_DA_PRIMITIVE_RESTART_INDEX 0x1648 +#define NVCB97_SET_DA_PRIMITIVE_RESTART_INDEX_V 31:0 + +#define NVCB97_SET_DA_OUTPUT 0x164c +#define NVCB97_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START 12:12 +#define NVCB97_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_FALSE 0x00000000 +#define NVCB97_SET_DA_OUTPUT_VERTEX_ID_USES_ARRAY_START_TRUE 0x00000001 + +#define NVCB97_SET_ANTI_ALIASED_POINT 0x1658 +#define NVCB97_SET_ANTI_ALIASED_POINT_ENABLE 0:0 +#define NVCB97_SET_ANTI_ALIASED_POINT_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_ANTI_ALIASED_POINT_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_POINT_CENTER_MODE 0x165c +#define NVCB97_SET_POINT_CENTER_MODE_V 31:0 +#define NVCB97_SET_POINT_CENTER_MODE_V_OGL 0x00000000 +#define NVCB97_SET_POINT_CENTER_MODE_V_D3D 0x00000001 + +#define NVCB97_SET_LINE_SMOOTH_PARAMETERS 0x1668 +#define NVCB97_SET_LINE_SMOOTH_PARAMETERS_FALLOFF 31:0 +#define NVCB97_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_00 0x00000000 +#define NVCB97_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_33 0x00000001 +#define NVCB97_SET_LINE_SMOOTH_PARAMETERS_FALLOFF__1_60 0x00000002 + +#define NVCB97_SET_LINE_STIPPLE 0x166c +#define NVCB97_SET_LINE_STIPPLE_ENABLE 0:0 +#define NVCB97_SET_LINE_STIPPLE_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_LINE_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_LINE_SMOOTH_EDGE_TABLE(i) (0x1670+(i)*4) +#define NVCB97_SET_LINE_SMOOTH_EDGE_TABLE_V0 7:0 +#define NVCB97_SET_LINE_SMOOTH_EDGE_TABLE_V1 15:8 +#define NVCB97_SET_LINE_SMOOTH_EDGE_TABLE_V2 23:16 +#define NVCB97_SET_LINE_SMOOTH_EDGE_TABLE_V3 31:24 + +#define NVCB97_SET_LINE_STIPPLE_PARAMETERS 0x1680 +#define NVCB97_SET_LINE_STIPPLE_PARAMETERS_FACTOR 7:0 +#define NVCB97_SET_LINE_STIPPLE_PARAMETERS_PATTERN 23:8 + +#define NVCB97_SET_PROVOKING_VERTEX 0x1684 +#define NVCB97_SET_PROVOKING_VERTEX_V 0:0 +#define NVCB97_SET_PROVOKING_VERTEX_V_FIRST 0x00000000 +#define NVCB97_SET_PROVOKING_VERTEX_V_LAST 0x00000001 + +#define NVCB97_SET_TWO_SIDED_LIGHT 0x1688 +#define NVCB97_SET_TWO_SIDED_LIGHT_ENABLE 0:0 +#define NVCB97_SET_TWO_SIDED_LIGHT_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_TWO_SIDED_LIGHT_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_POLYGON_STIPPLE 0x168c +#define NVCB97_SET_POLYGON_STIPPLE_ENABLE 0:0 +#define NVCB97_SET_POLYGON_STIPPLE_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_POLYGON_STIPPLE_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_SHADER_CONTROL 0x1690 +#define NVCB97_SET_SHADER_CONTROL_DEFAULT_PARTIAL 0:0 +#define NVCB97_SET_SHADER_CONTROL_DEFAULT_PARTIAL_ZERO 0x00000000 +#define NVCB97_SET_SHADER_CONTROL_DEFAULT_PARTIAL_INFINITY 0x00000001 +#define NVCB97_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR 1:1 +#define NVCB97_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR_LEGACY 0x00000000 +#define NVCB97_SET_SHADER_CONTROL_FP32_NAN_BEHAVIOR_FP64_COMPATIBLE 0x00000001 +#define NVCB97_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR 2:2 +#define NVCB97_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR_PASS_ZERO 0x00000000 +#define NVCB97_SET_SHADER_CONTROL_FP32_F2I_NAN_BEHAVIOR_PASS_INDEFINITE 0x00000001 + +#define NVCB97_CHECK_CLASS_VERSION 0x16a0 +#define NVCB97_CHECK_CLASS_VERSION_CURRENT 15:0 +#define NVCB97_CHECK_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVCB97_SET_SPH_VERSION 0x16a4 +#define NVCB97_SET_SPH_VERSION_CURRENT 15:0 +#define NVCB97_SET_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVCB97_CHECK_SPH_VERSION 0x16a8 +#define NVCB97_CHECK_SPH_VERSION_CURRENT 15:0 +#define NVCB97_CHECK_SPH_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVCB97_SET_ALPHA_TO_COVERAGE_OVERRIDE 0x16b4 +#define NVCB97_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE 0:0 +#define NVCB97_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_DISABLE 0x00000000 +#define NVCB97_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_ANTI_ALIAS_ENABLE_ENABLE 0x00000001 +#define NVCB97_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT 1:1 +#define NVCB97_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_DISABLE 0x00000000 +#define NVCB97_SET_ALPHA_TO_COVERAGE_OVERRIDE_QUALIFY_BY_PS_SAMPLE_MASK_OUTPUT_ENABLE 0x00000001 + +#define NVCB97_SET_SCG_GRAPHICS_PRIORITY 0x16bc +#define NVCB97_SET_SCG_GRAPHICS_PRIORITY_PRIORITY 5:0 + +#define NVCB97_SET_SCG_GRAPHICS_SCHEDULING_PARAMETERS(i) (0x16c0+(i)*4) +#define NVCB97_SET_SCG_GRAPHICS_SCHEDULING_PARAMETERS_V 31:0 + +#define NVCB97_SET_POLYGON_STIPPLE_PATTERN(i) (0x1700+(i)*4) +#define NVCB97_SET_POLYGON_STIPPLE_PATTERN_V 31:0 + +#define NVCB97_SET_AAM_VERSION 0x1790 +#define NVCB97_SET_AAM_VERSION_CURRENT 15:0 +#define NVCB97_SET_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVCB97_CHECK_AAM_VERSION 0x1794 +#define NVCB97_CHECK_AAM_VERSION_CURRENT 15:0 +#define NVCB97_CHECK_AAM_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVCB97_SET_ZT_LAYER 0x179c +#define NVCB97_SET_ZT_LAYER_OFFSET 15:0 + +#define NVCB97_SET_INDEX_BUFFER_A 0x17c8 +#define NVCB97_SET_INDEX_BUFFER_A_ADDRESS_UPPER 7:0 + +#define NVCB97_SET_INDEX_BUFFER_B 0x17cc +#define NVCB97_SET_INDEX_BUFFER_B_ADDRESS_LOWER 31:0 + +#define NVCB97_SET_INDEX_BUFFER_E 0x17d8 +#define NVCB97_SET_INDEX_BUFFER_E_INDEX_SIZE 1:0 +#define NVCB97_SET_INDEX_BUFFER_E_INDEX_SIZE_ONE_BYTE 0x00000000 +#define NVCB97_SET_INDEX_BUFFER_E_INDEX_SIZE_TWO_BYTES 0x00000001 +#define NVCB97_SET_INDEX_BUFFER_E_INDEX_SIZE_FOUR_BYTES 0x00000002 + +#define NVCB97_SET_INDEX_BUFFER_F 0x17dc +#define NVCB97_SET_INDEX_BUFFER_F_FIRST 31:0 + +#define NVCB97_DRAW_INDEX_BUFFER 0x17e0 +#define NVCB97_DRAW_INDEX_BUFFER_COUNT 31:0 + +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST 0x17e4 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST 0x17e8 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST 0x17ec +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_FIRST 15:0 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_COUNT 27:16 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY 31:28 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POINTS 0x00000000 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINES 0x00000001 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLES 0x00000004 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUADS 0x00000007 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_POLYGON 0x00000009 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_FIRST_TOPOLOGY_PATCH 0x0000000E + +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f0 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVCB97_DRAW_INDEX_BUFFER32_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f4 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVCB97_DRAW_INDEX_BUFFER16_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT 0x17f8 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_FIRST 15:0 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_COUNT 27:16 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY 31:28 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POINTS 0x00000000 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINES 0x00000001 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_LOOP 0x00000002 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINE_STRIP 0x00000003 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLES 0x00000004 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_STRIP 0x00000005 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLE_FAN 0x00000006 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUADS 0x00000007 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_QUAD_STRIP 0x00000008 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_POLYGON 0x00000009 +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINELIST_ADJCY 0x0000000A +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_LINESTRIP_ADJCY 0x0000000B +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLELIST_ADJCY 0x0000000C +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVCB97_DRAW_INDEX_BUFFER8_BEGIN_END_INSTANCE_SUBSEQUENT_TOPOLOGY_PATCH 0x0000000E + +#define NVCB97_SET_DEPTH_BIAS_CLAMP 0x187c +#define NVCB97_SET_DEPTH_BIAS_CLAMP_V 31:0 + +#define NVCB97_SET_VERTEX_STREAM_INSTANCE_A(i) (0x1880+(i)*4) +#define NVCB97_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED 0:0 +#define NVCB97_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_FALSE 0x00000000 +#define NVCB97_SET_VERTEX_STREAM_INSTANCE_A_IS_INSTANCED_TRUE 0x00000001 + +#define NVCB97_SET_VERTEX_STREAM_INSTANCE_B(i) (0x18c0+(i)*4) +#define NVCB97_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED 0:0 +#define NVCB97_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_FALSE 0x00000000 +#define NVCB97_SET_VERTEX_STREAM_INSTANCE_B_IS_INSTANCED_TRUE 0x00000001 + +#define NVCB97_SET_ATTRIBUTE_POINT_SIZE 0x1910 +#define NVCB97_SET_ATTRIBUTE_POINT_SIZE_ENABLE 0:0 +#define NVCB97_SET_ATTRIBUTE_POINT_SIZE_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_ATTRIBUTE_POINT_SIZE_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_ATTRIBUTE_POINT_SIZE_SLOT 11:4 + +#define NVCB97_OGL_SET_CULL 0x1918 +#define NVCB97_OGL_SET_CULL_ENABLE 0:0 +#define NVCB97_OGL_SET_CULL_ENABLE_FALSE 0x00000000 +#define NVCB97_OGL_SET_CULL_ENABLE_TRUE 0x00000001 + +#define NVCB97_OGL_SET_FRONT_FACE 0x191c +#define NVCB97_OGL_SET_FRONT_FACE_V 31:0 +#define NVCB97_OGL_SET_FRONT_FACE_V_CW 0x00000900 +#define NVCB97_OGL_SET_FRONT_FACE_V_CCW 0x00000901 + +#define NVCB97_OGL_SET_CULL_FACE 0x1920 +#define NVCB97_OGL_SET_CULL_FACE_V 31:0 +#define NVCB97_OGL_SET_CULL_FACE_V_FRONT 0x00000404 +#define NVCB97_OGL_SET_CULL_FACE_V_BACK 0x00000405 +#define NVCB97_OGL_SET_CULL_FACE_V_FRONT_AND_BACK 0x00000408 + +#define NVCB97_SET_VIEWPORT_PIXEL 0x1924 +#define NVCB97_SET_VIEWPORT_PIXEL_CENTER 0:0 +#define NVCB97_SET_VIEWPORT_PIXEL_CENTER_AT_HALF_INTEGERS 0x00000000 +#define NVCB97_SET_VIEWPORT_PIXEL_CENTER_AT_INTEGERS 0x00000001 + +#define NVCB97_SET_VIEWPORT_SCALE_OFFSET 0x192c +#define NVCB97_SET_VIEWPORT_SCALE_OFFSET_ENABLE 0:0 +#define NVCB97_SET_VIEWPORT_SCALE_OFFSET_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_VIEWPORT_SCALE_OFFSET_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL 0x193c +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE 0:0 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_FALSE 0x00000000 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_MIN_Z_ZERO_MAX_Z_ONE_TRUE 0x00000001 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE 17:16 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_USE_FIELD_MIN_Z_ZERO_MAX_Z_ONE 0x00000000 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_MIN_Z_MAX_Z 0x00000001 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_ZERO_ONE 0x00000002 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_Z_CLIP_RANGE_MINUS_INF_PLUS_INF 0x00000003 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z 3:3 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLIP 0x00000000 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MIN_Z_CLAMP 0x00000001 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z 4:4 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLIP 0x00000000 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_PIXEL_MAX_Z_CLAMP 0x00000001 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND 7:7 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_256 0x00000000 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_SCALE_1 0x00000001 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND 10:10 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_256 0x00000000 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_LINE_POINT_CULL_GUARDBAND_SCALE_1 0x00000001 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP 13:11 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP 0x00000000 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_PASSTHRU 0x00000001 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XY_CLIP 0x00000002 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_XYZ_CLIP 0x00000003 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_CLIP_NO_Z_CULL 0x00000004 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_FRUSTUM_Z_CLIP 0x00000005 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_CLIP_WZERO_TRI_FILL_OR_CLIP 0x00000006 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z 2:1 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SAME_AS_XY_GUARDBAND 0x00000000 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_256 0x00000001 +#define NVCB97_SET_VIEWPORT_CLIP_CONTROL_GEOMETRY_GUARDBAND_Z_SCALE_1 0x00000002 + +#define NVCB97_SET_USER_CLIP_OP 0x1940 +#define NVCB97_SET_USER_CLIP_OP_PLANE0 0:0 +#define NVCB97_SET_USER_CLIP_OP_PLANE0_CLIP 0x00000000 +#define NVCB97_SET_USER_CLIP_OP_PLANE0_CULL 0x00000001 +#define NVCB97_SET_USER_CLIP_OP_PLANE1 4:4 +#define NVCB97_SET_USER_CLIP_OP_PLANE1_CLIP 0x00000000 +#define NVCB97_SET_USER_CLIP_OP_PLANE1_CULL 0x00000001 +#define NVCB97_SET_USER_CLIP_OP_PLANE2 8:8 +#define NVCB97_SET_USER_CLIP_OP_PLANE2_CLIP 0x00000000 +#define NVCB97_SET_USER_CLIP_OP_PLANE2_CULL 0x00000001 +#define NVCB97_SET_USER_CLIP_OP_PLANE3 12:12 +#define NVCB97_SET_USER_CLIP_OP_PLANE3_CLIP 0x00000000 +#define NVCB97_SET_USER_CLIP_OP_PLANE3_CULL 0x00000001 +#define NVCB97_SET_USER_CLIP_OP_PLANE4 16:16 +#define NVCB97_SET_USER_CLIP_OP_PLANE4_CLIP 0x00000000 +#define NVCB97_SET_USER_CLIP_OP_PLANE4_CULL 0x00000001 +#define NVCB97_SET_USER_CLIP_OP_PLANE5 20:20 +#define NVCB97_SET_USER_CLIP_OP_PLANE5_CLIP 0x00000000 +#define NVCB97_SET_USER_CLIP_OP_PLANE5_CULL 0x00000001 +#define NVCB97_SET_USER_CLIP_OP_PLANE6 24:24 +#define NVCB97_SET_USER_CLIP_OP_PLANE6_CLIP 0x00000000 +#define NVCB97_SET_USER_CLIP_OP_PLANE6_CULL 0x00000001 +#define NVCB97_SET_USER_CLIP_OP_PLANE7 28:28 +#define NVCB97_SET_USER_CLIP_OP_PLANE7_CLIP 0x00000000 +#define NVCB97_SET_USER_CLIP_OP_PLANE7_CULL 0x00000001 + +#define NVCB97_SET_RENDER_ENABLE_OVERRIDE 0x1944 +#define NVCB97_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NVCB97_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NVCB97_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NVCB97_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_CONTROL 0x1948 +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE 0:0 +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_TOPOLOGY_IN_BEGIN_METHODS 0x00000000 +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_CONTROL_OVERRIDE_USE_SEPARATE_TOPOLOGY_STATE 0x00000001 + +#define NVCB97_SET_WINDOW_CLIP_ENABLE 0x194c +#define NVCB97_SET_WINDOW_CLIP_ENABLE_V 0:0 +#define NVCB97_SET_WINDOW_CLIP_ENABLE_V_FALSE 0x00000000 +#define NVCB97_SET_WINDOW_CLIP_ENABLE_V_TRUE 0x00000001 + +#define NVCB97_SET_WINDOW_CLIP_TYPE 0x1950 +#define NVCB97_SET_WINDOW_CLIP_TYPE_V 1:0 +#define NVCB97_SET_WINDOW_CLIP_TYPE_V_INCLUSIVE 0x00000000 +#define NVCB97_SET_WINDOW_CLIP_TYPE_V_EXCLUSIVE 0x00000001 +#define NVCB97_SET_WINDOW_CLIP_TYPE_V_CLIPALL 0x00000002 + +#define NVCB97_INVALIDATE_ZCULL 0x1958 +#define NVCB97_INVALIDATE_ZCULL_V 31:0 +#define NVCB97_INVALIDATE_ZCULL_V_INVALIDATE 0x00000000 + +#define NVCB97_SET_ZCULL 0x1968 +#define NVCB97_SET_ZCULL_Z_ENABLE 0:0 +#define NVCB97_SET_ZCULL_Z_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_ZCULL_Z_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_ZCULL_STENCIL_ENABLE 4:4 +#define NVCB97_SET_ZCULL_STENCIL_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_ZCULL_STENCIL_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_ZCULL_BOUNDS 0x196c +#define NVCB97_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE 0:0 +#define NVCB97_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_ZCULL_BOUNDS_Z_MIN_UNBOUNDED_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE 4:4 +#define NVCB97_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_ZCULL_BOUNDS_Z_MAX_UNBOUNDED_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_PRIMITIVE_TOPOLOGY 0x1970 +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V 15:0 +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_POINTLIST 0x00000001 +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_LINELIST 0x00000002 +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP 0x00000003 +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST 0x00000004 +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP 0x00000005 +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_LINELIST_ADJCY 0x0000000A +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_LINESTRIP_ADJCY 0x0000000B +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLELIST_ADJCY 0x0000000C +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_TRIANGLESTRIP_ADJCY 0x0000000D +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_PATCHLIST 0x0000000E +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_POINTS 0x00001001 +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST 0x00001002 +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST 0x00001003 +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST 0x0000100F +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINESTRIP 0x00001010 +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINESTRIP 0x00001011 +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLELIST 0x00001012 +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLESTRIP 0x00001013 +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLESTRIP 0x00001014 +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN 0x00001015 +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLEFAN 0x00001016 +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_TRIANGLEFAN_IMM 0x00001017 +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_LINELIST_IMM 0x00001018 +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDTRIANGLELIST2 0x0000101A +#define NVCB97_SET_PRIMITIVE_TOPOLOGY_V_LEGACY_INDEXEDLINELIST2 0x0000101B + +#define NVCB97_ZCULL_SYNC 0x1978 +#define NVCB97_ZCULL_SYNC_V 31:0 + +#define NVCB97_SET_CLIP_ID_TEST 0x197c +#define NVCB97_SET_CLIP_ID_TEST_ENABLE 0:0 +#define NVCB97_SET_CLIP_ID_TEST_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_CLIP_ID_TEST_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_SURFACE_CLIP_ID_WIDTH 0x1980 +#define NVCB97_SET_SURFACE_CLIP_ID_WIDTH_V 31:0 + +#define NVCB97_SET_CLIP_ID 0x1984 +#define NVCB97_SET_CLIP_ID_V 31:0 + +#define NVCB97_SET_DEPTH_BOUNDS_TEST 0x19bc +#define NVCB97_SET_DEPTH_BOUNDS_TEST_ENABLE 0:0 +#define NVCB97_SET_DEPTH_BOUNDS_TEST_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_DEPTH_BOUNDS_TEST_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_BLEND_FLOAT_OPTION 0x19c0 +#define NVCB97_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO 0:0 +#define NVCB97_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_FALSE 0x00000000 +#define NVCB97_SET_BLEND_FLOAT_OPTION_ZERO_TIMES_ANYTHING_IS_ZERO_TRUE 0x00000001 + +#define NVCB97_SET_LOGIC_OP 0x19c4 +#define NVCB97_SET_LOGIC_OP_ENABLE 0:0 +#define NVCB97_SET_LOGIC_OP_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_LOGIC_OP_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_LOGIC_OP_FUNC 0x19c8 +#define NVCB97_SET_LOGIC_OP_FUNC_V 31:0 +#define NVCB97_SET_LOGIC_OP_FUNC_V_CLEAR 0x00001500 +#define NVCB97_SET_LOGIC_OP_FUNC_V_AND 0x00001501 +#define NVCB97_SET_LOGIC_OP_FUNC_V_AND_REVERSE 0x00001502 +#define NVCB97_SET_LOGIC_OP_FUNC_V_COPY 0x00001503 +#define NVCB97_SET_LOGIC_OP_FUNC_V_AND_INVERTED 0x00001504 +#define NVCB97_SET_LOGIC_OP_FUNC_V_NOOP 0x00001505 +#define NVCB97_SET_LOGIC_OP_FUNC_V_XOR 0x00001506 +#define NVCB97_SET_LOGIC_OP_FUNC_V_OR 0x00001507 +#define NVCB97_SET_LOGIC_OP_FUNC_V_NOR 0x00001508 +#define NVCB97_SET_LOGIC_OP_FUNC_V_EQUIV 0x00001509 +#define NVCB97_SET_LOGIC_OP_FUNC_V_INVERT 0x0000150A +#define NVCB97_SET_LOGIC_OP_FUNC_V_OR_REVERSE 0x0000150B +#define NVCB97_SET_LOGIC_OP_FUNC_V_COPY_INVERTED 0x0000150C +#define NVCB97_SET_LOGIC_OP_FUNC_V_OR_INVERTED 0x0000150D +#define NVCB97_SET_LOGIC_OP_FUNC_V_NAND 0x0000150E +#define NVCB97_SET_LOGIC_OP_FUNC_V_SET 0x0000150F + +#define NVCB97_SET_Z_COMPRESSION 0x19cc +#define NVCB97_SET_Z_COMPRESSION_ENABLE 0:0 +#define NVCB97_SET_Z_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_Z_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVCB97_CLEAR_SURFACE 0x19d0 +#define NVCB97_CLEAR_SURFACE_Z_ENABLE 0:0 +#define NVCB97_CLEAR_SURFACE_Z_ENABLE_FALSE 0x00000000 +#define NVCB97_CLEAR_SURFACE_Z_ENABLE_TRUE 0x00000001 +#define NVCB97_CLEAR_SURFACE_STENCIL_ENABLE 1:1 +#define NVCB97_CLEAR_SURFACE_STENCIL_ENABLE_FALSE 0x00000000 +#define NVCB97_CLEAR_SURFACE_STENCIL_ENABLE_TRUE 0x00000001 +#define NVCB97_CLEAR_SURFACE_R_ENABLE 2:2 +#define NVCB97_CLEAR_SURFACE_R_ENABLE_FALSE 0x00000000 +#define NVCB97_CLEAR_SURFACE_R_ENABLE_TRUE 0x00000001 +#define NVCB97_CLEAR_SURFACE_G_ENABLE 3:3 +#define NVCB97_CLEAR_SURFACE_G_ENABLE_FALSE 0x00000000 +#define NVCB97_CLEAR_SURFACE_G_ENABLE_TRUE 0x00000001 +#define NVCB97_CLEAR_SURFACE_B_ENABLE 4:4 +#define NVCB97_CLEAR_SURFACE_B_ENABLE_FALSE 0x00000000 +#define NVCB97_CLEAR_SURFACE_B_ENABLE_TRUE 0x00000001 +#define NVCB97_CLEAR_SURFACE_A_ENABLE 5:5 +#define NVCB97_CLEAR_SURFACE_A_ENABLE_FALSE 0x00000000 +#define NVCB97_CLEAR_SURFACE_A_ENABLE_TRUE 0x00000001 +#define NVCB97_CLEAR_SURFACE_MRT_SELECT 9:6 +#define NVCB97_CLEAR_SURFACE_RT_ARRAY_INDEX 25:10 + +#define NVCB97_CLEAR_CLIP_ID_SURFACE 0x19d4 +#define NVCB97_CLEAR_CLIP_ID_SURFACE_V 31:0 + +#define NVCB97_SET_COLOR_COMPRESSION(i) (0x19e0+(i)*4) +#define NVCB97_SET_COLOR_COMPRESSION_ENABLE 0:0 +#define NVCB97_SET_COLOR_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_COLOR_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_CT_WRITE(i) (0x1a00+(i)*4) +#define NVCB97_SET_CT_WRITE_R_ENABLE 0:0 +#define NVCB97_SET_CT_WRITE_R_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_CT_WRITE_R_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_CT_WRITE_G_ENABLE 4:4 +#define NVCB97_SET_CT_WRITE_G_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_CT_WRITE_G_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_CT_WRITE_B_ENABLE 8:8 +#define NVCB97_SET_CT_WRITE_B_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_CT_WRITE_B_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_CT_WRITE_A_ENABLE 12:12 +#define NVCB97_SET_CT_WRITE_A_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_CT_WRITE_A_ENABLE_TRUE 0x00000001 + +#define NVCB97_PIPE_NOP 0x1a2c +#define NVCB97_PIPE_NOP_V 31:0 + +#define NVCB97_SET_SPARE00 0x1a30 +#define NVCB97_SET_SPARE00_V 31:0 + +#define NVCB97_SET_SPARE01 0x1a34 +#define NVCB97_SET_SPARE01_V 31:0 + +#define NVCB97_SET_SPARE02 0x1a38 +#define NVCB97_SET_SPARE02_V 31:0 + +#define NVCB97_SET_SPARE03 0x1a3c +#define NVCB97_SET_SPARE03_V 31:0 + +#define NVCB97_SET_REPORT_SEMAPHORE_A 0x1b00 +#define NVCB97_SET_REPORT_SEMAPHORE_A_OFFSET_UPPER 24:0 + +#define NVCB97_SET_REPORT_SEMAPHORE_B 0x1b04 +#define NVCB97_SET_REPORT_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVCB97_SET_REPORT_SEMAPHORE_C 0x1b08 +#define NVCB97_SET_REPORT_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVCB97_SET_REPORT_SEMAPHORE_D 0x1b0c +#define NVCB97_SET_REPORT_SEMAPHORE_D_OPERATION 1:0 +#define NVCB97_SET_REPORT_SEMAPHORE_D_OPERATION_RELEASE 0x00000000 +#define NVCB97_SET_REPORT_SEMAPHORE_D_OPERATION_ACQUIRE 0x00000001 +#define NVCB97_SET_REPORT_SEMAPHORE_D_OPERATION_REPORT_ONLY 0x00000002 +#define NVCB97_SET_REPORT_SEMAPHORE_D_OPERATION_TRAP 0x00000003 +#define NVCB97_SET_REPORT_SEMAPHORE_D_RELEASE 4:4 +#define NVCB97_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_READS_COMPLETE 0x00000000 +#define NVCB97_SET_REPORT_SEMAPHORE_D_RELEASE_AFTER_ALL_PRECEEDING_WRITES_COMPLETE 0x00000001 +#define NVCB97_SET_REPORT_SEMAPHORE_D_ACQUIRE 8:8 +#define NVCB97_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_WRITES_START 0x00000000 +#define NVCB97_SET_REPORT_SEMAPHORE_D_ACQUIRE_BEFORE_ANY_FOLLOWING_READS_START 0x00000001 +#define NVCB97_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION 15:12 +#define NVCB97_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_NONE 0x00000000 +#define NVCB97_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DATA_ASSEMBLER 0x00000001 +#define NVCB97_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VERTEX_SHADER 0x00000002 +#define NVCB97_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_INIT_SHADER 0x00000008 +#define NVCB97_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_TESSELATION_SHADER 0x00000009 +#define NVCB97_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_GEOMETRY_SHADER 0x00000006 +#define NVCB97_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_STREAMING_OUTPUT 0x00000005 +#define NVCB97_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_VPC 0x00000004 +#define NVCB97_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ZCULL 0x00000007 +#define NVCB97_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_PIXEL_SHADER 0x0000000A +#define NVCB97_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_DEPTH_TEST 0x0000000C +#define NVCB97_SET_REPORT_SEMAPHORE_D_PIPELINE_LOCATION_ALL 0x0000000F +#define NVCB97_SET_REPORT_SEMAPHORE_D_COMPARISON 16:16 +#define NVCB97_SET_REPORT_SEMAPHORE_D_COMPARISON_EQ 0x00000000 +#define NVCB97_SET_REPORT_SEMAPHORE_D_COMPARISON_GE 0x00000001 +#define NVCB97_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE 20:20 +#define NVCB97_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT 27:23 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_NONE 0x00000000 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_DA_VERTICES_GENERATED 0x00000001 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_DA_PRIMITIVES_GENERATED 0x00000003 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_VS_INVOCATIONS 0x00000005 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_TI_INVOCATIONS 0x0000001B +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_TS_INVOCATIONS 0x0000001D +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_TS_PRIMITIVES_GENERATED 0x0000001F +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_GS_INVOCATIONS 0x00000007 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_GS_PRIMITIVES_GENERATED 0x00000009 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_ALPHA_BETA_CLOCKS 0x00000004 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_SCG_CLOCKS 0x00000008 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_VTG_PRIMITIVES_OUT 0x00000012 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_TOTAL_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x0000001E +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_SUCCEEDED 0x0000000B +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED 0x0000000D +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_PRIMITIVES_NEEDED_MINUS_SUCCEEDED 0x00000006 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_STREAMING_BYTE_COUNT 0x0000001A +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_INVOCATIONS 0x0000000F +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_CLIPPER_PRIMITIVES_GENERATED 0x00000011 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS0 0x0000000A +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS1 0x0000000C +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS2 0x0000000E +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_ZCULL_STATS3 0x00000010 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_PS_INVOCATIONS 0x00000013 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT 0x00000002 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_ZPASS_PIXEL_CNT64 0x00000015 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_TILED_ZPASS_PIXEL_CNT64 0x00000017 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_COLOR_TARGET 0x00000018 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_IEEE_CLEAN_ZETA_TARGET 0x00000019 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_BOUNDING_RECTANGLE 0x0000001C +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_TIMESTAMP 0x00000014 +#define NVCB97_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE 28:28 +#define NVCB97_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVCB97_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVCB97_SET_REPORT_SEMAPHORE_D_SUB_REPORT 7:5 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REPORT_DWORD_NUMBER 21:21 +#define NVCB97_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE 2:2 +#define NVCB97_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_FALSE 0x00000000 +#define NVCB97_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_TRUE 0x00000001 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE 3:3 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REDUCTION_OP 11:9 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_ADD 0x00000000 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MIN 0x00000001 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MAX 0x00000002 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_INC 0x00000003 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_DEC 0x00000004 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_AND 0x00000005 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_OR 0x00000006 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_XOR 0x00000007 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT 18:17 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVCB97_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVCB97_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP 19:19 +#define NVCB97_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP_FALSE 0x00000000 +#define NVCB97_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP_TRUE 0x00000001 + +#define NVCB97_SET_VERTEX_STREAM_A_FORMAT(j) (0x1c00+(j)*16) +#define NVCB97_SET_VERTEX_STREAM_A_FORMAT_STRIDE 11:0 +#define NVCB97_SET_VERTEX_STREAM_A_FORMAT_ENABLE 12:12 +#define NVCB97_SET_VERTEX_STREAM_A_FORMAT_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_VERTEX_STREAM_A_FORMAT_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_VERTEX_STREAM_A_LOCATION_A(j) (0x1c04+(j)*16) +#define NVCB97_SET_VERTEX_STREAM_A_LOCATION_A_OFFSET_UPPER 7:0 + +#define NVCB97_SET_VERTEX_STREAM_A_LOCATION_B(j) (0x1c08+(j)*16) +#define NVCB97_SET_VERTEX_STREAM_A_LOCATION_B_OFFSET_LOWER 31:0 + +#define NVCB97_SET_VERTEX_STREAM_A_FREQUENCY(j) (0x1c0c+(j)*16) +#define NVCB97_SET_VERTEX_STREAM_A_FREQUENCY_V 31:0 + +#define NVCB97_SET_VERTEX_STREAM_B_FORMAT(j) (0x1d00+(j)*16) +#define NVCB97_SET_VERTEX_STREAM_B_FORMAT_STRIDE 11:0 +#define NVCB97_SET_VERTEX_STREAM_B_FORMAT_ENABLE 12:12 +#define NVCB97_SET_VERTEX_STREAM_B_FORMAT_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_VERTEX_STREAM_B_FORMAT_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_VERTEX_STREAM_B_LOCATION_A(j) (0x1d04+(j)*16) +#define NVCB97_SET_VERTEX_STREAM_B_LOCATION_A_OFFSET_UPPER 7:0 + +#define NVCB97_SET_VERTEX_STREAM_B_LOCATION_B(j) (0x1d08+(j)*16) +#define NVCB97_SET_VERTEX_STREAM_B_LOCATION_B_OFFSET_LOWER 31:0 + +#define NVCB97_SET_VERTEX_STREAM_B_FREQUENCY(j) (0x1d0c+(j)*16) +#define NVCB97_SET_VERTEX_STREAM_B_FREQUENCY_V 31:0 + +#define NVCB97_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA(j) (0x1e00+(j)*32) +#define NVCB97_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE 0:0 +#define NVCB97_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_BLEND_PER_TARGET_SEPARATE_FOR_ALPHA_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_OP(j) (0x1e04+(j)*32) +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_OP_V 31:0 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MIN 0x00008007 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_OP_V_OGL_MAX 0x00008008 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_ADD 0x00000001 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_SUBTRACT 0x00000002 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MIN 0x00000004 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_OP_V_D3D_MAX 0x00000005 + +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF(j) (0x1e08+(j)*32) +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V 31:0 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF(j) (0x1e0c+(j)*32) +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V 31:0 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVCB97_SET_BLEND_PER_TARGET_COLOR_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_OP(j) (0x1e10+(j)*32) +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_OP_V 31:0 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_SUBTRACT 0x0000800A +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_REVERSE_SUBTRACT 0x0000800B +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_FUNC_ADD 0x00008006 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MIN 0x00008007 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_OP_V_OGL_MAX 0x00008008 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_ADD 0x00000001 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_SUBTRACT 0x00000002 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_REVSUBTRACT 0x00000003 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MIN 0x00000004 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_OP_V_D3D_MAX 0x00000005 + +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF(j) (0x1e14+(j)*32) +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V 31:0 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ZERO 0x00004000 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE 0x00004001 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ZERO 0x00000001 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_ONE 0x00000002 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHSRCALPHA 0x0000000C +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BOTHINVSRCALPHA 0x0000000D +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_SOURCE_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF(j) (0x1e18+(j)*32) +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V 31:0 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ZERO 0x00004000 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE 0x00004001 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_COLOR 0x00004300 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_COLOR 0x00004301 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA 0x00004302 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_SRC_ALPHA 0x00004303 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_ALPHA 0x00004304 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_ALPHA 0x00004305 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_DST_COLOR 0x00004306 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_DST_COLOR 0x00004307 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC_ALPHA_SATURATE 0x00004308 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_COLOR 0x0000C001 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_COLOR 0x0000C002 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_CONSTANT_ALPHA 0x0000C003 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_ONE_MINUS_CONSTANT_ALPHA 0x0000C004 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1COLOR 0x0000C900 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1COLOR 0x0000C901 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_SRC1ALPHA 0x0000C902 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_OGL_INVSRC1ALPHA 0x0000C903 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ZERO 0x00000001 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_ONE 0x00000002 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCCOLOR 0x00000003 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCCOLOR 0x00000004 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHA 0x00000005 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRCALPHA 0x00000006 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTALPHA 0x00000007 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTALPHA 0x00000008 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_DESTCOLOR 0x00000009 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVDESTCOLOR 0x0000000A +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRCALPHASAT 0x0000000B +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_BLENDFACTOR 0x0000000E +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVBLENDFACTOR 0x0000000F +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1COLOR 0x00000010 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1COLOR 0x00000011 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_SRC1ALPHA 0x00000012 +#define NVCB97_SET_BLEND_PER_TARGET_ALPHA_DEST_COEFF_V_D3D_INVSRC1ALPHA 0x00000013 + +#define NVCB97_SET_PIPELINE_SHADER(j) (0x2000+(j)*64) +#define NVCB97_SET_PIPELINE_SHADER_ENABLE 0:0 +#define NVCB97_SET_PIPELINE_SHADER_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_PIPELINE_SHADER_ENABLE_TRUE 0x00000001 +#define NVCB97_SET_PIPELINE_SHADER_TYPE 7:4 +#define NVCB97_SET_PIPELINE_SHADER_TYPE_VERTEX_CULL_BEFORE_FETCH 0x00000000 +#define NVCB97_SET_PIPELINE_SHADER_TYPE_VERTEX 0x00000001 +#define NVCB97_SET_PIPELINE_SHADER_TYPE_TESSELLATION_INIT 0x00000002 +#define NVCB97_SET_PIPELINE_SHADER_TYPE_TESSELLATION 0x00000003 +#define NVCB97_SET_PIPELINE_SHADER_TYPE_GEOMETRY 0x00000004 +#define NVCB97_SET_PIPELINE_SHADER_TYPE_PIXEL 0x00000005 + +#define NVCB97_SET_PIPELINE_RESERVED_B(j) (0x2004+(j)*64) +#define NVCB97_SET_PIPELINE_RESERVED_B_V 0:0 + +#define NVCB97_SET_PIPELINE_RESERVED_A(j) (0x2008+(j)*64) +#define NVCB97_SET_PIPELINE_RESERVED_A_V 0:0 + +#define NVCB97_SET_PIPELINE_REGISTER_COUNT(j) (0x200c+(j)*64) +#define NVCB97_SET_PIPELINE_REGISTER_COUNT_V 8:0 + +#define NVCB97_SET_PIPELINE_BINDING(j) (0x2010+(j)*64) +#define NVCB97_SET_PIPELINE_BINDING_GROUP 2:0 + +#define NVCB97_SET_PIPELINE_PROGRAM_ADDRESS_A(j) (0x2014+(j)*64) +#define NVCB97_SET_PIPELINE_PROGRAM_ADDRESS_A_UPPER 7:0 + +#define NVCB97_SET_PIPELINE_PROGRAM_ADDRESS_B(j) (0x2018+(j)*64) +#define NVCB97_SET_PIPELINE_PROGRAM_ADDRESS_B_LOWER 31:0 + +#define NVCB97_SET_PIPELINE_PROGRAM_PREFETCH(j) (0x201c+(j)*64) +#define NVCB97_SET_PIPELINE_PROGRAM_PREFETCH_SIZE_IN_BLOCKS 6:0 + +#define NVCB97_SET_PIPELINE_RESERVED_E(j) (0x2020+(j)*64) +#define NVCB97_SET_PIPELINE_RESERVED_E_V 0:0 + +#define NVCB97_SET_FALCON00 0x2300 +#define NVCB97_SET_FALCON00_V 31:0 + +#define NVCB97_SET_FALCON01 0x2304 +#define NVCB97_SET_FALCON01_V 31:0 + +#define NVCB97_SET_FALCON02 0x2308 +#define NVCB97_SET_FALCON02_V 31:0 + +#define NVCB97_SET_FALCON03 0x230c +#define NVCB97_SET_FALCON03_V 31:0 + +#define NVCB97_SET_FALCON04 0x2310 +#define NVCB97_SET_FALCON04_V 31:0 + +#define NVCB97_SET_FALCON05 0x2314 +#define NVCB97_SET_FALCON05_V 31:0 + +#define NVCB97_SET_FALCON06 0x2318 +#define NVCB97_SET_FALCON06_V 31:0 + +#define NVCB97_SET_FALCON07 0x231c +#define NVCB97_SET_FALCON07_V 31:0 + +#define NVCB97_SET_FALCON08 0x2320 +#define NVCB97_SET_FALCON08_V 31:0 + +#define NVCB97_SET_FALCON09 0x2324 +#define NVCB97_SET_FALCON09_V 31:0 + +#define NVCB97_SET_FALCON10 0x2328 +#define NVCB97_SET_FALCON10_V 31:0 + +#define NVCB97_SET_FALCON11 0x232c +#define NVCB97_SET_FALCON11_V 31:0 + +#define NVCB97_SET_FALCON12 0x2330 +#define NVCB97_SET_FALCON12_V 31:0 + +#define NVCB97_SET_FALCON13 0x2334 +#define NVCB97_SET_FALCON13_V 31:0 + +#define NVCB97_SET_FALCON14 0x2338 +#define NVCB97_SET_FALCON14_V 31:0 + +#define NVCB97_SET_FALCON15 0x233c +#define NVCB97_SET_FALCON15_V 31:0 + +#define NVCB97_SET_FALCON16 0x2340 +#define NVCB97_SET_FALCON16_V 31:0 + +#define NVCB97_SET_FALCON17 0x2344 +#define NVCB97_SET_FALCON17_V 31:0 + +#define NVCB97_SET_FALCON18 0x2348 +#define NVCB97_SET_FALCON18_V 31:0 + +#define NVCB97_SET_FALCON19 0x234c +#define NVCB97_SET_FALCON19_V 31:0 + +#define NVCB97_SET_FALCON20 0x2350 +#define NVCB97_SET_FALCON20_V 31:0 + +#define NVCB97_SET_FALCON21 0x2354 +#define NVCB97_SET_FALCON21_V 31:0 + +#define NVCB97_SET_FALCON22 0x2358 +#define NVCB97_SET_FALCON22_V 31:0 + +#define NVCB97_SET_FALCON23 0x235c +#define NVCB97_SET_FALCON23_V 31:0 + +#define NVCB97_SET_FALCON24 0x2360 +#define NVCB97_SET_FALCON24_V 31:0 + +#define NVCB97_SET_FALCON25 0x2364 +#define NVCB97_SET_FALCON25_V 31:0 + +#define NVCB97_SET_FALCON26 0x2368 +#define NVCB97_SET_FALCON26_V 31:0 + +#define NVCB97_SET_FALCON27 0x236c +#define NVCB97_SET_FALCON27_V 31:0 + +#define NVCB97_SET_FALCON28 0x2370 +#define NVCB97_SET_FALCON28_V 31:0 + +#define NVCB97_SET_FALCON29 0x2374 +#define NVCB97_SET_FALCON29_V 31:0 + +#define NVCB97_SET_FALCON30 0x2378 +#define NVCB97_SET_FALCON30_V 31:0 + +#define NVCB97_SET_FALCON31 0x237c +#define NVCB97_SET_FALCON31_V 31:0 + +#define NVCB97_SET_CONSTANT_BUFFER_SELECTOR_A 0x2380 +#define NVCB97_SET_CONSTANT_BUFFER_SELECTOR_A_SIZE 16:0 + +#define NVCB97_SET_CONSTANT_BUFFER_SELECTOR_B 0x2384 +#define NVCB97_SET_CONSTANT_BUFFER_SELECTOR_B_ADDRESS_UPPER 7:0 + +#define NVCB97_SET_CONSTANT_BUFFER_SELECTOR_C 0x2388 +#define NVCB97_SET_CONSTANT_BUFFER_SELECTOR_C_ADDRESS_LOWER 31:0 + +#define NVCB97_LOAD_CONSTANT_BUFFER_OFFSET 0x238c +#define NVCB97_LOAD_CONSTANT_BUFFER_OFFSET_V 15:0 + +#define NVCB97_LOAD_CONSTANT_BUFFER(i) (0x2390+(i)*4) +#define NVCB97_LOAD_CONSTANT_BUFFER_V 31:0 + +#define NVCB97_BIND_GROUP_RESERVED_A(j) (0x2400+(j)*32) +#define NVCB97_BIND_GROUP_RESERVED_A_V 0:0 + +#define NVCB97_BIND_GROUP_RESERVED_B(j) (0x2404+(j)*32) +#define NVCB97_BIND_GROUP_RESERVED_B_V 0:0 + +#define NVCB97_BIND_GROUP_RESERVED_C(j) (0x2408+(j)*32) +#define NVCB97_BIND_GROUP_RESERVED_C_V 0:0 + +#define NVCB97_BIND_GROUP_RESERVED_D(j) (0x240c+(j)*32) +#define NVCB97_BIND_GROUP_RESERVED_D_V 0:0 + +#define NVCB97_BIND_GROUP_CONSTANT_BUFFER(j) (0x2410+(j)*32) +#define NVCB97_BIND_GROUP_CONSTANT_BUFFER_VALID 0:0 +#define NVCB97_BIND_GROUP_CONSTANT_BUFFER_VALID_FALSE 0x00000000 +#define NVCB97_BIND_GROUP_CONSTANT_BUFFER_VALID_TRUE 0x00000001 +#define NVCB97_BIND_GROUP_CONSTANT_BUFFER_SHADER_SLOT 8:4 + +#define NVCB97_SET_TRAP_HANDLER_A 0x25f8 +#define NVCB97_SET_TRAP_HANDLER_A_ADDRESS_UPPER 16:0 + +#define NVCB97_SET_TRAP_HANDLER_B 0x25fc +#define NVCB97_SET_TRAP_HANDLER_B_ADDRESS_LOWER 31:0 + +#define NVCB97_SET_COLOR_CLAMP 0x2600 +#define NVCB97_SET_COLOR_CLAMP_ENABLE 0:0 +#define NVCB97_SET_COLOR_CLAMP_ENABLE_FALSE 0x00000000 +#define NVCB97_SET_COLOR_CLAMP_ENABLE_TRUE 0x00000001 + +#define NVCB97_SET_STREAM_OUT_LAYOUT_SELECT(i,j) (0x2800+(i)*128+(j)*4) +#define NVCB97_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER00 7:0 +#define NVCB97_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER01 15:8 +#define NVCB97_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER02 23:16 +#define NVCB97_SET_STREAM_OUT_LAYOUT_SELECT_ATTRIBUTE_NUMBER03 31:24 + +#define NVCB97_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE(i) (0x32f4+(i)*4) +#define NVCB97_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_V 31:0 + +#define NVCB97_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_UPPER(i) (0x3314+(i)*4) +#define NVCB97_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_UPPER_V 31:0 + +#define NVCB97_ENABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER 0x3334 +#define NVCB97_ENABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_V 0:0 + +#define NVCB97_DISABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER 0x3338 +#define NVCB97_DISABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_V 0:0 + +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER(i) (0x333c+(i)*4) +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER_V 31:0 + +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_VALUE(i) (0x335c+(i)*4) +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_VALUE_V 31:0 + +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_EVENT(i) (0x337c+(i)*4) +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_EVENT_EVENT 7:0 + +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A(i) (0x339c+(i)*4) +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT0 1:0 +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT0 4:2 +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT1 6:5 +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT1 9:7 +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT2 11:10 +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT2 14:12 +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT3 16:15 +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT3 19:17 +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT4 21:20 +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT4 24:22 +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT5 26:25 +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT5 29:27 +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_SPARE 31:30 + +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B(i) (0x33bc+(i)*4) +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_EDGE 0:0 +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_MODE 2:1 +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_WINDOWED 3:3 +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_FUNC 19:4 + +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL 0x33dc +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL_MASK 7:0 + +#define NVCB97_START_SHADER_PERFORMANCE_COUNTER 0x33e0 +#define NVCB97_START_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVCB97_STOP_SHADER_PERFORMANCE_COUNTER 0x33e4 +#define NVCB97_STOP_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER 0x33e8 +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER_V 31:0 + +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER 0x33ec +#define NVCB97_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER_V 31:0 + +#define NVCB97_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NVCB97_SET_MME_SHADOW_SCRATCH_V 31:0 + +#define NVCB97_CALL_MME_MACRO(j) (0x3800+(j)*8) +#define NVCB97_CALL_MME_MACRO_V 31:0 + +#define NVCB97_CALL_MME_DATA(j) (0x3804+(j)*8) +#define NVCB97_CALL_MME_DATA_V 31:0 + +#endif /* _cl_hopper_a_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clcb97tex.h b/src/common/sdk/nvidia/inc/class/clcb97tex.h new file mode 100644 index 0000000..f9871b7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clcb97tex.h @@ -0,0 +1,2437 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ + +#ifndef __CLCB97TEX_H__ +#define __CLCB97TEX_H__ + +/* +** Texture Header State Blocklinear + */ + +#define NVCB97_TEXHEAD_BL_COMPONENTS MW(6:0) +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_INVALID 0x00000000 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_R32_G32_B32_A32 0x00000001 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_R32_G32_B32 0x00000002 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_R16_G16_B16_A16 0x00000003 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_R32_G32 0x00000004 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_R32_B24G8 0x00000005 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_X8B8G8R8 0x00000007 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_A8B8G8R8 0x00000008 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_A2B10G10R10 0x00000009 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_R16_G16 0x0000000c +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_G8R24 0x0000000d +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_G24R8 0x0000000e +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_R32 0x0000000f +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_A4B4G4R4 0x00000012 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_A5B5G5R1 0x00000013 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_A1B5G5R5 0x00000014 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_B5G6R5 0x00000015 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_B6G5R5 0x00000016 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_G8R8 0x00000018 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_R16 0x0000001b +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_Y8_VIDEO 0x0000001c +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_R8 0x0000001d +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_G4R4 0x0000001e +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_R1 0x0000001f +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_E5B9G9R9_SHAREDEXP 0x00000020 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_BF10GF11RF11 0x00000021 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_G8B8G8R8 0x00000022 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_B8G8R8G8 0x00000023 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_DXT1 0x00000024 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_DXT23 0x00000025 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_DXT45 0x00000026 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_DXN1 0x00000027 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_DXN2 0x00000028 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_BC6H_SF16 0x00000010 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_BC6H_UF16 0x00000011 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_BC7U 0x00000017 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_ETC2_RGB 0x00000006 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_ETC2_RGB_PTA 0x0000000a +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_ETC2_RGBA 0x0000000b +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_EAC 0x00000019 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_EACX2 0x0000001a +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_Z24S8 0x00000029 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_X8Z24 0x0000002a +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_S8Z24 0x0000002b +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_ZF32 0x0000002f +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_ZF32_X24S8 0x00000030 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_Z16 0x0000003a +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_4X4 0x00000040 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_5X4 0x00000050 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_5X5 0x00000041 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_6X5 0x00000051 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_6X6 0x00000042 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_8X5 0x00000055 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_8X6 0x00000052 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_8X8 0x00000044 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_10X5 0x00000056 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_10X6 0x00000057 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_10X8 0x00000053 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_10X10 0x00000045 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_12X10 0x00000054 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_ASTC_2D_12X12 0x00000046 +#define NVCB97_TEXHEAD_BL_COMPONENTS_SIZES_CS_BITFIELD_SIZE 0x0000007f +#define NVCB97_TEXHEAD_BL_R_DATA_TYPE MW(9:7) +#define NVCB97_TEXHEAD_BL_R_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVCB97_TEXHEAD_BL_R_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVCB97_TEXHEAD_BL_R_DATA_TYPE_NUM_SINT 0x00000003 +#define NVCB97_TEXHEAD_BL_R_DATA_TYPE_NUM_UINT 0x00000004 +#define NVCB97_TEXHEAD_BL_R_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVCB97_TEXHEAD_BL_R_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVCB97_TEXHEAD_BL_R_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_BL_G_DATA_TYPE MW(12:10) +#define NVCB97_TEXHEAD_BL_G_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVCB97_TEXHEAD_BL_G_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVCB97_TEXHEAD_BL_G_DATA_TYPE_NUM_SINT 0x00000003 +#define NVCB97_TEXHEAD_BL_G_DATA_TYPE_NUM_UINT 0x00000004 +#define NVCB97_TEXHEAD_BL_G_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVCB97_TEXHEAD_BL_G_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVCB97_TEXHEAD_BL_G_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_BL_B_DATA_TYPE MW(15:13) +#define NVCB97_TEXHEAD_BL_B_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVCB97_TEXHEAD_BL_B_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVCB97_TEXHEAD_BL_B_DATA_TYPE_NUM_SINT 0x00000003 +#define NVCB97_TEXHEAD_BL_B_DATA_TYPE_NUM_UINT 0x00000004 +#define NVCB97_TEXHEAD_BL_B_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVCB97_TEXHEAD_BL_B_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVCB97_TEXHEAD_BL_B_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_BL_A_DATA_TYPE MW(18:16) +#define NVCB97_TEXHEAD_BL_A_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVCB97_TEXHEAD_BL_A_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVCB97_TEXHEAD_BL_A_DATA_TYPE_NUM_SINT 0x00000003 +#define NVCB97_TEXHEAD_BL_A_DATA_TYPE_NUM_UINT 0x00000004 +#define NVCB97_TEXHEAD_BL_A_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVCB97_TEXHEAD_BL_A_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVCB97_TEXHEAD_BL_A_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_BL_X_SOURCE MW(21:19) +#define NVCB97_TEXHEAD_BL_X_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_BL_X_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_BL_X_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_BL_X_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_BL_X_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_BL_X_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_BL_X_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_BL_Y_SOURCE MW(24:22) +#define NVCB97_TEXHEAD_BL_Y_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_BL_Y_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_BL_Y_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_BL_Y_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_BL_Y_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_BL_Y_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_BL_Y_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_BL_Z_SOURCE MW(27:25) +#define NVCB97_TEXHEAD_BL_Z_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_BL_Z_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_BL_Z_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_BL_Z_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_BL_Z_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_BL_Z_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_BL_Z_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_BL_W_SOURCE MW(30:28) +#define NVCB97_TEXHEAD_BL_W_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_BL_W_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_BL_W_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_BL_W_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_BL_W_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_BL_W_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_BL_W_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_BL_PACK_COMPONENTS MW(31:31) +#define NVCB97_TEXHEAD_BL_RESERVED1Y MW(36:32) +#define NVCB97_TEXHEAD_BL_GOB_DEPTH_OFFSET MW(38:37) +#define NVCB97_TEXHEAD_BL_RESERVED1X MW(40:39) +#define NVCB97_TEXHEAD_BL_ADDRESS_BITS31TO9 MW(63:41) +#define NVCB97_TEXHEAD_BL_ADDRESS_BITS48TO32 MW(80:64) +#define NVCB97_TEXHEAD_BL_RESERVED_ADDRESS MW(84:81) +#define NVCB97_TEXHEAD_BL_HEADER_VERSION MW(87:85) +#define NVCB97_TEXHEAD_BL_HEADER_VERSION_SELECT_ONE_D_BUFFER 0x00000000 +#define NVCB97_TEXHEAD_BL_HEADER_VERSION_SELECT_PITCH_COLOR_KEY 0x00000001 +#define NVCB97_TEXHEAD_BL_HEADER_VERSION_SELECT_PITCH 0x00000002 +#define NVCB97_TEXHEAD_BL_HEADER_VERSION_SELECT_BLOCKLINEAR 0x00000003 +#define NVCB97_TEXHEAD_BL_HEADER_VERSION_SELECT_BLOCKLINEAR_COLOR_KEY 0x00000004 +#define NVCB97_TEXHEAD_BL_RESERVED_HEADER_VERSION MW(88:88) +#define NVCB97_TEXHEAD_BL_RESOURCE_VIEW_COHERENCY_HASH MW(92:89) +#define NVCB97_TEXHEAD_BL_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_0 0x00000000 +#define NVCB97_TEXHEAD_BL_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_1 0x00000001 +#define NVCB97_TEXHEAD_BL_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_2 0x00000002 +#define NVCB97_TEXHEAD_BL_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_3 0x00000003 +#define NVCB97_TEXHEAD_BL_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_4 0x00000004 +#define NVCB97_TEXHEAD_BL_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_5 0x00000005 +#define NVCB97_TEXHEAD_BL_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_6 0x00000006 +#define NVCB97_TEXHEAD_BL_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_7 0x00000007 +#define NVCB97_TEXHEAD_BL_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_8 0x00000008 +#define NVCB97_TEXHEAD_BL_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_9 0x00000009 +#define NVCB97_TEXHEAD_BL_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_10 0x0000000a +#define NVCB97_TEXHEAD_BL_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_11 0x0000000b +#define NVCB97_TEXHEAD_BL_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_12 0x0000000c +#define NVCB97_TEXHEAD_BL_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_13 0x0000000d +#define NVCB97_TEXHEAD_BL_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_14 0x0000000e +#define NVCB97_TEXHEAD_BL_RESOURCE_VIEW_COHERENCY_HASH_HASH_UNALIASED 0x0000000f +#define NVCB97_TEXHEAD_BL_RESERVED2A MW(95:93) +#define NVCB97_TEXHEAD_BL_GOBS_PER_BLOCK_WIDTH MW(98:96) +#define NVCB97_TEXHEAD_BL_GOBS_PER_BLOCK_WIDTH_ONE_GOB 0x00000000 +#define NVCB97_TEXHEAD_BL_GOBS_PER_BLOCK_HEIGHT MW(101:99) +#define NVCB97_TEXHEAD_BL_GOBS_PER_BLOCK_HEIGHT_ONE_GOB 0x00000000 +#define NVCB97_TEXHEAD_BL_GOBS_PER_BLOCK_HEIGHT_TWO_GOBS 0x00000001 +#define NVCB97_TEXHEAD_BL_GOBS_PER_BLOCK_HEIGHT_FOUR_GOBS 0x00000002 +#define NVCB97_TEXHEAD_BL_GOBS_PER_BLOCK_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVCB97_TEXHEAD_BL_GOBS_PER_BLOCK_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVCB97_TEXHEAD_BL_GOBS_PER_BLOCK_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVCB97_TEXHEAD_BL_GOBS_PER_BLOCK_DEPTH MW(104:102) +#define NVCB97_TEXHEAD_BL_GOBS_PER_BLOCK_DEPTH_ONE_GOB 0x00000000 +#define NVCB97_TEXHEAD_BL_GOBS_PER_BLOCK_DEPTH_TWO_GOBS 0x00000001 +#define NVCB97_TEXHEAD_BL_GOBS_PER_BLOCK_DEPTH_FOUR_GOBS 0x00000002 +#define NVCB97_TEXHEAD_BL_GOBS_PER_BLOCK_DEPTH_EIGHT_GOBS 0x00000003 +#define NVCB97_TEXHEAD_BL_GOBS_PER_BLOCK_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVCB97_TEXHEAD_BL_GOBS_PER_BLOCK_DEPTH_THIRTYTWO_GOBS 0x00000005 +#define NVCB97_TEXHEAD_BL_RESERVED3Y MW(105:105) +#define NVCB97_TEXHEAD_BL_TILE_WIDTH_IN_GOBS MW(108:106) +#define NVCB97_TEXHEAD_BL_TILE_WIDTH_IN_GOBS_ONE_GOB 0x00000000 +#define NVCB97_TEXHEAD_BL_TILE_WIDTH_IN_GOBS_TWO_GOBS 0x00000001 +#define NVCB97_TEXHEAD_BL_TILE_WIDTH_IN_GOBS_FOUR_GOBS 0x00000002 +#define NVCB97_TEXHEAD_BL_TILE_WIDTH_IN_GOBS_EIGHT_GOBS 0x00000003 +#define NVCB97_TEXHEAD_BL_TILE_WIDTH_IN_GOBS_SIXTEEN_GOBS 0x00000004 +#define NVCB97_TEXHEAD_BL_TILE_WIDTH_IN_GOBS_THIRTYTWO_GOBS 0x00000005 +#define NVCB97_TEXHEAD_BL_GOB3D MW(109:109) +#define NVCB97_TEXHEAD_BL_RESERVED3Z MW(111:110) +#define NVCB97_TEXHEAD_BL_LOD_ANISO_QUALITY2 MW(112:112) +#define NVCB97_TEXHEAD_BL_LOD_ANISO_QUALITY MW(113:113) +#define NVCB97_TEXHEAD_BL_LOD_ANISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVCB97_TEXHEAD_BL_LOD_ANISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVCB97_TEXHEAD_BL_LOD_ISO_QUALITY MW(114:114) +#define NVCB97_TEXHEAD_BL_LOD_ISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVCB97_TEXHEAD_BL_LOD_ISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVCB97_TEXHEAD_BL_ANISO_COARSE_SPREAD_MODIFIER MW(116:115) +#define NVCB97_TEXHEAD_BL_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVCB97_TEXHEAD_BL_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVCB97_TEXHEAD_BL_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVCB97_TEXHEAD_BL_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVCB97_TEXHEAD_BL_ANISO_SPREAD_SCALE MW(121:117) +#define NVCB97_TEXHEAD_BL_USE_HEADER_OPT_CONTROL MW(122:122) +#define NVCB97_TEXHEAD_BL_DEPTH_TEXTURE MW(123:123) +#define NVCB97_TEXHEAD_BL_MAX_MIP_LEVEL MW(127:124) +#define NVCB97_TEXHEAD_BL_WIDTH_MINUS_ONE MW(144:128) +#define NVCB97_TEXHEAD_BL_DEPTH_MINUS_ONE_BIT14 MW(145:145) +#define NVCB97_TEXHEAD_BL_HEIGHT_MINUS_ONE_BIT16 MW(146:146) +#define NVCB97_TEXHEAD_BL_ANISO_SPREAD_MAX_LOG2 MW(149:147) +#define NVCB97_TEXHEAD_BL_S_R_G_B_CONVERSION MW(150:150) +#define NVCB97_TEXHEAD_BL_TEXTURE_TYPE MW(154:151) +#define NVCB97_TEXHEAD_BL_TEXTURE_TYPE_ONE_D 0x00000000 +#define NVCB97_TEXHEAD_BL_TEXTURE_TYPE_TWO_D 0x00000001 +#define NVCB97_TEXHEAD_BL_TEXTURE_TYPE_THREE_D 0x00000002 +#define NVCB97_TEXHEAD_BL_TEXTURE_TYPE_CUBEMAP 0x00000003 +#define NVCB97_TEXHEAD_BL_TEXTURE_TYPE_ONE_D_ARRAY 0x00000004 +#define NVCB97_TEXHEAD_BL_TEXTURE_TYPE_TWO_D_ARRAY 0x00000005 +#define NVCB97_TEXHEAD_BL_TEXTURE_TYPE_ONE_D_BUFFER 0x00000006 +#define NVCB97_TEXHEAD_BL_TEXTURE_TYPE_TWO_D_NO_MIPMAP 0x00000007 +#define NVCB97_TEXHEAD_BL_TEXTURE_TYPE_CUBEMAP_ARRAY 0x00000008 +#define NVCB97_TEXHEAD_BL_TEXTURE_TYPE_HTEX_TWOD 0x0000000a +#define NVCB97_TEXHEAD_BL_TEXTURE_TYPE_HTEX_THREE_D 0x0000000b +#define NVCB97_TEXHEAD_BL_TEXTURE_TYPE_HTEX_TWOD_ARRAY 0x0000000e +#define NVCB97_TEXHEAD_BL_TEXTURE_TYPE_TT_BIT_FIELD_SIZE 0x0000000f +#define NVCB97_TEXHEAD_BL_SECTOR_PROMOTION MW(156:155) +#define NVCB97_TEXHEAD_BL_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVCB97_TEXHEAD_BL_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVCB97_TEXHEAD_BL_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVCB97_TEXHEAD_BL_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVCB97_TEXHEAD_BL_BORDER_SIZE MW(159:157) +#define NVCB97_TEXHEAD_BL_BORDER_SIZE_BORDER_SIZE_ONE 0x00000000 +#define NVCB97_TEXHEAD_BL_BORDER_SIZE_BORDER_SIZE_TWO 0x00000001 +#define NVCB97_TEXHEAD_BL_BORDER_SIZE_BORDER_SIZE_FOUR 0x00000002 +#define NVCB97_TEXHEAD_BL_BORDER_SIZE_BORDER_SIZE_EIGHT 0x00000003 +#define NVCB97_TEXHEAD_BL_BORDER_SIZE_BORDER_SAMPLER_COLOR 0x00000007 +#define NVCB97_TEXHEAD_BL_HEIGHT_MINUS_ONE MW(175:160) +#define NVCB97_TEXHEAD_BL_DEPTH_MINUS_ONE MW(189:176) +#define NVCB97_TEXHEAD_BL_RESERVED5A MW(190:190) +#define NVCB97_TEXHEAD_BL_NORMALIZED_COORDS MW(191:191) +#define NVCB97_TEXHEAD_BL_RESERVED6Y MW(192:192) +#define NVCB97_TEXHEAD_BL_TRILIN_OPT MW(197:193) +#define NVCB97_TEXHEAD_BL_MIP_LOD_BIAS MW(210:198) +#define NVCB97_TEXHEAD_BL_ANISO_BIAS MW(214:211) +#define NVCB97_TEXHEAD_BL_ANISO_FINE_SPREAD_FUNC MW(216:215) +#define NVCB97_TEXHEAD_BL_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVCB97_TEXHEAD_BL_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVCB97_TEXHEAD_BL_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVCB97_TEXHEAD_BL_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVCB97_TEXHEAD_BL_ANISO_COARSE_SPREAD_FUNC MW(218:217) +#define NVCB97_TEXHEAD_BL_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVCB97_TEXHEAD_BL_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVCB97_TEXHEAD_BL_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVCB97_TEXHEAD_BL_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVCB97_TEXHEAD_BL_MAX_ANISOTROPY MW(221:219) +#define NVCB97_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_1_TO_1 0x00000000 +#define NVCB97_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_2_TO_1 0x00000001 +#define NVCB97_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_4_TO_1 0x00000002 +#define NVCB97_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_6_TO_1 0x00000003 +#define NVCB97_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_8_TO_1 0x00000004 +#define NVCB97_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_10_TO_1 0x00000005 +#define NVCB97_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_12_TO_1 0x00000006 +#define NVCB97_TEXHEAD_BL_MAX_ANISOTROPY_ANISO_16_TO_1 0x00000007 +#define NVCB97_TEXHEAD_BL_ANISO_FINE_SPREAD_MODIFIER MW(223:222) +#define NVCB97_TEXHEAD_BL_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVCB97_TEXHEAD_BL_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVCB97_TEXHEAD_BL_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVCB97_TEXHEAD_BL_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVCB97_TEXHEAD_BL_RES_VIEW_MIN_MIP_LEVEL MW(227:224) +#define NVCB97_TEXHEAD_BL_RES_VIEW_MAX_MIP_LEVEL MW(231:228) +#define NVCB97_TEXHEAD_BL_MULTI_SAMPLE_COUNT MW(235:232) +#define NVCB97_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_1X1 0x00000000 +#define NVCB97_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_2X1 0x00000001 +#define NVCB97_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_2X2 0x00000002 +#define NVCB97_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_4X2 0x00000003 +#define NVCB97_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_4X2_D3D 0x00000004 +#define NVCB97_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_2X1_D3D 0x00000005 +#define NVCB97_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_4X4 0x00000006 +#define NVCB97_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_2X2_VC_4 0x00000008 +#define NVCB97_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_2X2_VC_12 0x00000009 +#define NVCB97_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_4X2_VC_8 0x0000000a +#define NVCB97_TEXHEAD_BL_MULTI_SAMPLE_COUNT_MODE_4X2_VC_24 0x0000000b +#define NVCB97_TEXHEAD_BL_MIN_LOD_CLAMP MW(247:236) +#define NVCB97_TEXHEAD_BL_RESERVED7Y MW(255:248) + + +/* +** Texture Header State Blocklinear Color Key + */ + +#define NVCB97_TEXHEAD_BLCK_COMPONENTS MW(6:0) +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_INVALID 0x00000000 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_R32_G32_B32_A32 0x00000001 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_R32_G32_B32 0x00000002 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_R16_G16_B16_A16 0x00000003 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_R32_G32 0x00000004 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_R32_B24G8 0x00000005 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_X8B8G8R8 0x00000007 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_A8B8G8R8 0x00000008 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_A2B10G10R10 0x00000009 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_R16_G16 0x0000000c +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_G8R24 0x0000000d +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_G24R8 0x0000000e +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_R32 0x0000000f +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_A4B4G4R4 0x00000012 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_A5B5G5R1 0x00000013 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_A1B5G5R5 0x00000014 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_B5G6R5 0x00000015 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_B6G5R5 0x00000016 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_G8R8 0x00000018 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_R16 0x0000001b +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_Y8_VIDEO 0x0000001c +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_R8 0x0000001d +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_G4R4 0x0000001e +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_R1 0x0000001f +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_E5B9G9R9_SHAREDEXP 0x00000020 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_BF10GF11RF11 0x00000021 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_G8B8G8R8 0x00000022 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_B8G8R8G8 0x00000023 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_DXT1 0x00000024 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_DXT23 0x00000025 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_DXT45 0x00000026 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_DXN1 0x00000027 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_DXN2 0x00000028 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_BC6H_SF16 0x00000010 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_BC6H_UF16 0x00000011 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_BC7U 0x00000017 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_ETC2_RGB 0x00000006 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_ETC2_RGB_PTA 0x0000000a +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_ETC2_RGBA 0x0000000b +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_EAC 0x00000019 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_EACX2 0x0000001a +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_Z24S8 0x00000029 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_X8Z24 0x0000002a +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_S8Z24 0x0000002b +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_ZF32 0x0000002f +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_ZF32_X24S8 0x00000030 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_Z16 0x0000003a +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_4X4 0x00000040 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_5X4 0x00000050 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_5X5 0x00000041 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_6X5 0x00000051 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_6X6 0x00000042 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_8X5 0x00000055 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_8X6 0x00000052 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_8X8 0x00000044 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_10X5 0x00000056 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_10X6 0x00000057 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_10X8 0x00000053 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_10X10 0x00000045 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_12X10 0x00000054 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_ASTC_2D_12X12 0x00000046 +#define NVCB97_TEXHEAD_BLCK_COMPONENTS_SIZES_CS_BITFIELD_SIZE 0x0000007f +#define NVCB97_TEXHEAD_BLCK_R_DATA_TYPE MW(9:7) +#define NVCB97_TEXHEAD_BLCK_R_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVCB97_TEXHEAD_BLCK_R_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVCB97_TEXHEAD_BLCK_R_DATA_TYPE_NUM_SINT 0x00000003 +#define NVCB97_TEXHEAD_BLCK_R_DATA_TYPE_NUM_UINT 0x00000004 +#define NVCB97_TEXHEAD_BLCK_R_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVCB97_TEXHEAD_BLCK_R_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVCB97_TEXHEAD_BLCK_R_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_BLCK_G_DATA_TYPE MW(12:10) +#define NVCB97_TEXHEAD_BLCK_G_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVCB97_TEXHEAD_BLCK_G_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVCB97_TEXHEAD_BLCK_G_DATA_TYPE_NUM_SINT 0x00000003 +#define NVCB97_TEXHEAD_BLCK_G_DATA_TYPE_NUM_UINT 0x00000004 +#define NVCB97_TEXHEAD_BLCK_G_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVCB97_TEXHEAD_BLCK_G_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVCB97_TEXHEAD_BLCK_G_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_BLCK_B_DATA_TYPE MW(15:13) +#define NVCB97_TEXHEAD_BLCK_B_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVCB97_TEXHEAD_BLCK_B_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVCB97_TEXHEAD_BLCK_B_DATA_TYPE_NUM_SINT 0x00000003 +#define NVCB97_TEXHEAD_BLCK_B_DATA_TYPE_NUM_UINT 0x00000004 +#define NVCB97_TEXHEAD_BLCK_B_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVCB97_TEXHEAD_BLCK_B_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVCB97_TEXHEAD_BLCK_B_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_BLCK_A_DATA_TYPE MW(18:16) +#define NVCB97_TEXHEAD_BLCK_A_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVCB97_TEXHEAD_BLCK_A_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVCB97_TEXHEAD_BLCK_A_DATA_TYPE_NUM_SINT 0x00000003 +#define NVCB97_TEXHEAD_BLCK_A_DATA_TYPE_NUM_UINT 0x00000004 +#define NVCB97_TEXHEAD_BLCK_A_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVCB97_TEXHEAD_BLCK_A_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVCB97_TEXHEAD_BLCK_A_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_BLCK_X_SOURCE MW(21:19) +#define NVCB97_TEXHEAD_BLCK_X_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_BLCK_X_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_BLCK_X_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_BLCK_X_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_BLCK_X_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_BLCK_X_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_BLCK_X_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_BLCK_Y_SOURCE MW(24:22) +#define NVCB97_TEXHEAD_BLCK_Y_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_BLCK_Y_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_BLCK_Y_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_BLCK_Y_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_BLCK_Y_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_BLCK_Y_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_BLCK_Y_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_BLCK_Z_SOURCE MW(27:25) +#define NVCB97_TEXHEAD_BLCK_Z_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_BLCK_Z_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_BLCK_Z_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_BLCK_Z_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_BLCK_Z_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_BLCK_Z_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_BLCK_Z_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_BLCK_W_SOURCE MW(30:28) +#define NVCB97_TEXHEAD_BLCK_W_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_BLCK_W_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_BLCK_W_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_BLCK_W_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_BLCK_W_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_BLCK_W_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_BLCK_W_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_BLCK_PACK_COMPONENTS MW(31:31) +#define NVCB97_TEXHEAD_BLCK_RESERVED1Y MW(36:32) +#define NVCB97_TEXHEAD_BLCK_GOB_DEPTH_OFFSET MW(38:37) +#define NVCB97_TEXHEAD_BLCK_RESERVED1X MW(40:39) +#define NVCB97_TEXHEAD_BLCK_ADDRESS_BITS31TO9 MW(63:41) +#define NVCB97_TEXHEAD_BLCK_ADDRESS_BITS48TO32 MW(80:64) +#define NVCB97_TEXHEAD_BLCK_RESERVED_ADDRESS MW(84:81) +#define NVCB97_TEXHEAD_BLCK_HEADER_VERSION MW(87:85) +#define NVCB97_TEXHEAD_BLCK_HEADER_VERSION_SELECT_ONE_D_BUFFER 0x00000000 +#define NVCB97_TEXHEAD_BLCK_HEADER_VERSION_SELECT_PITCH_COLOR_KEY 0x00000001 +#define NVCB97_TEXHEAD_BLCK_HEADER_VERSION_SELECT_PITCH 0x00000002 +#define NVCB97_TEXHEAD_BLCK_HEADER_VERSION_SELECT_BLOCKLINEAR 0x00000003 +#define NVCB97_TEXHEAD_BLCK_HEADER_VERSION_SELECT_BLOCKLINEAR_COLOR_KEY 0x00000004 +#define NVCB97_TEXHEAD_BLCK_RESERVED_HEADER_VERSION MW(88:88) +#define NVCB97_TEXHEAD_BLCK_RESOURCE_VIEW_COHERENCY_HASH MW(92:89) +#define NVCB97_TEXHEAD_BLCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_0 0x00000000 +#define NVCB97_TEXHEAD_BLCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_1 0x00000001 +#define NVCB97_TEXHEAD_BLCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_2 0x00000002 +#define NVCB97_TEXHEAD_BLCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_3 0x00000003 +#define NVCB97_TEXHEAD_BLCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_4 0x00000004 +#define NVCB97_TEXHEAD_BLCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_5 0x00000005 +#define NVCB97_TEXHEAD_BLCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_6 0x00000006 +#define NVCB97_TEXHEAD_BLCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_7 0x00000007 +#define NVCB97_TEXHEAD_BLCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_8 0x00000008 +#define NVCB97_TEXHEAD_BLCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_9 0x00000009 +#define NVCB97_TEXHEAD_BLCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_10 0x0000000a +#define NVCB97_TEXHEAD_BLCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_11 0x0000000b +#define NVCB97_TEXHEAD_BLCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_12 0x0000000c +#define NVCB97_TEXHEAD_BLCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_13 0x0000000d +#define NVCB97_TEXHEAD_BLCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_14 0x0000000e +#define NVCB97_TEXHEAD_BLCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_UNALIASED 0x0000000f +#define NVCB97_TEXHEAD_BLCK_RESERVED2A MW(95:93) +#define NVCB97_TEXHEAD_BLCK_GOBS_PER_BLOCK_WIDTH MW(98:96) +#define NVCB97_TEXHEAD_BLCK_GOBS_PER_BLOCK_WIDTH_ONE_GOB 0x00000000 +#define NVCB97_TEXHEAD_BLCK_GOBS_PER_BLOCK_HEIGHT MW(101:99) +#define NVCB97_TEXHEAD_BLCK_GOBS_PER_BLOCK_HEIGHT_ONE_GOB 0x00000000 +#define NVCB97_TEXHEAD_BLCK_GOBS_PER_BLOCK_HEIGHT_TWO_GOBS 0x00000001 +#define NVCB97_TEXHEAD_BLCK_GOBS_PER_BLOCK_HEIGHT_FOUR_GOBS 0x00000002 +#define NVCB97_TEXHEAD_BLCK_GOBS_PER_BLOCK_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVCB97_TEXHEAD_BLCK_GOBS_PER_BLOCK_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVCB97_TEXHEAD_BLCK_GOBS_PER_BLOCK_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVCB97_TEXHEAD_BLCK_GOBS_PER_BLOCK_DEPTH MW(104:102) +#define NVCB97_TEXHEAD_BLCK_GOBS_PER_BLOCK_DEPTH_ONE_GOB 0x00000000 +#define NVCB97_TEXHEAD_BLCK_GOBS_PER_BLOCK_DEPTH_TWO_GOBS 0x00000001 +#define NVCB97_TEXHEAD_BLCK_GOBS_PER_BLOCK_DEPTH_FOUR_GOBS 0x00000002 +#define NVCB97_TEXHEAD_BLCK_GOBS_PER_BLOCK_DEPTH_EIGHT_GOBS 0x00000003 +#define NVCB97_TEXHEAD_BLCK_GOBS_PER_BLOCK_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVCB97_TEXHEAD_BLCK_GOBS_PER_BLOCK_DEPTH_THIRTYTWO_GOBS 0x00000005 +#define NVCB97_TEXHEAD_BLCK_RESERVED3Y MW(105:105) +#define NVCB97_TEXHEAD_BLCK_TILE_WIDTH_IN_GOBS MW(108:106) +#define NVCB97_TEXHEAD_BLCK_TILE_WIDTH_IN_GOBS_ONE_GOB 0x00000000 +#define NVCB97_TEXHEAD_BLCK_TILE_WIDTH_IN_GOBS_TWO_GOBS 0x00000001 +#define NVCB97_TEXHEAD_BLCK_TILE_WIDTH_IN_GOBS_FOUR_GOBS 0x00000002 +#define NVCB97_TEXHEAD_BLCK_TILE_WIDTH_IN_GOBS_EIGHT_GOBS 0x00000003 +#define NVCB97_TEXHEAD_BLCK_TILE_WIDTH_IN_GOBS_SIXTEEN_GOBS 0x00000004 +#define NVCB97_TEXHEAD_BLCK_TILE_WIDTH_IN_GOBS_THIRTYTWO_GOBS 0x00000005 +#define NVCB97_TEXHEAD_BLCK_GOB3D MW(109:109) +#define NVCB97_TEXHEAD_BLCK_RESERVED3Z MW(111:110) +#define NVCB97_TEXHEAD_BLCK_LOD_ANISO_QUALITY2 MW(112:112) +#define NVCB97_TEXHEAD_BLCK_LOD_ANISO_QUALITY MW(113:113) +#define NVCB97_TEXHEAD_BLCK_LOD_ANISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVCB97_TEXHEAD_BLCK_LOD_ANISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVCB97_TEXHEAD_BLCK_LOD_ISO_QUALITY MW(114:114) +#define NVCB97_TEXHEAD_BLCK_LOD_ISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVCB97_TEXHEAD_BLCK_LOD_ISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVCB97_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_MODIFIER MW(116:115) +#define NVCB97_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVCB97_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVCB97_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVCB97_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVCB97_TEXHEAD_BLCK_ANISO_SPREAD_SCALE MW(121:117) +#define NVCB97_TEXHEAD_BLCK_USE_HEADER_OPT_CONTROL MW(122:122) +#define NVCB97_TEXHEAD_BLCK_DEPTH_TEXTURE MW(123:123) +#define NVCB97_TEXHEAD_BLCK_MAX_MIP_LEVEL MW(127:124) +#define NVCB97_TEXHEAD_BLCK_WIDTH_MINUS_ONE MW(144:128) +#define NVCB97_TEXHEAD_BLCK_DEPTH_MINUS_ONE_BIT14 MW(145:145) +#define NVCB97_TEXHEAD_BLCK_HEIGHT_MINUS_ONE_BIT16 MW(146:146) +#define NVCB97_TEXHEAD_BLCK_ANISO_SPREAD_MAX_LOG2 MW(149:147) +#define NVCB97_TEXHEAD_BLCK_S_R_G_B_CONVERSION MW(150:150) +#define NVCB97_TEXHEAD_BLCK_TEXTURE_TYPE MW(154:151) +#define NVCB97_TEXHEAD_BLCK_TEXTURE_TYPE_ONE_D 0x00000000 +#define NVCB97_TEXHEAD_BLCK_TEXTURE_TYPE_TWO_D 0x00000001 +#define NVCB97_TEXHEAD_BLCK_TEXTURE_TYPE_THREE_D 0x00000002 +#define NVCB97_TEXHEAD_BLCK_TEXTURE_TYPE_CUBEMAP 0x00000003 +#define NVCB97_TEXHEAD_BLCK_TEXTURE_TYPE_ONE_D_ARRAY 0x00000004 +#define NVCB97_TEXHEAD_BLCK_TEXTURE_TYPE_TWO_D_ARRAY 0x00000005 +#define NVCB97_TEXHEAD_BLCK_TEXTURE_TYPE_ONE_D_BUFFER 0x00000006 +#define NVCB97_TEXHEAD_BLCK_TEXTURE_TYPE_TWO_D_NO_MIPMAP 0x00000007 +#define NVCB97_TEXHEAD_BLCK_TEXTURE_TYPE_CUBEMAP_ARRAY 0x00000008 +#define NVCB97_TEXHEAD_BLCK_TEXTURE_TYPE_HTEX_TWOD 0x0000000a +#define NVCB97_TEXHEAD_BLCK_TEXTURE_TYPE_HTEX_THREE_D 0x0000000b +#define NVCB97_TEXHEAD_BLCK_TEXTURE_TYPE_HTEX_TWOD_ARRAY 0x0000000e +#define NVCB97_TEXHEAD_BLCK_TEXTURE_TYPE_TT_BIT_FIELD_SIZE 0x0000000f +#define NVCB97_TEXHEAD_BLCK_SECTOR_PROMOTION MW(156:155) +#define NVCB97_TEXHEAD_BLCK_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVCB97_TEXHEAD_BLCK_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVCB97_TEXHEAD_BLCK_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVCB97_TEXHEAD_BLCK_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVCB97_TEXHEAD_BLCK_BORDER_SIZE MW(159:157) +#define NVCB97_TEXHEAD_BLCK_BORDER_SIZE_BORDER_SIZE_ONE 0x00000000 +#define NVCB97_TEXHEAD_BLCK_BORDER_SIZE_BORDER_SIZE_TWO 0x00000001 +#define NVCB97_TEXHEAD_BLCK_BORDER_SIZE_BORDER_SIZE_FOUR 0x00000002 +#define NVCB97_TEXHEAD_BLCK_BORDER_SIZE_BORDER_SIZE_EIGHT 0x00000003 +#define NVCB97_TEXHEAD_BLCK_BORDER_SIZE_BORDER_SAMPLER_COLOR 0x00000007 +#define NVCB97_TEXHEAD_BLCK_HEIGHT_MINUS_ONE MW(175:160) +#define NVCB97_TEXHEAD_BLCK_DEPTH_MINUS_ONE MW(189:176) +#define NVCB97_TEXHEAD_BLCK_RESERVED5A MW(190:190) +#define NVCB97_TEXHEAD_BLCK_NORMALIZED_COORDS MW(191:191) +#define NVCB97_TEXHEAD_BLCK_COLOR_KEY_OP MW(192:192) +#define NVCB97_TEXHEAD_BLCK_TRILIN_OPT MW(197:193) +#define NVCB97_TEXHEAD_BLCK_MIP_LOD_BIAS MW(210:198) +#define NVCB97_TEXHEAD_BLCK_ANISO_BIAS MW(214:211) +#define NVCB97_TEXHEAD_BLCK_ANISO_FINE_SPREAD_FUNC MW(216:215) +#define NVCB97_TEXHEAD_BLCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVCB97_TEXHEAD_BLCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVCB97_TEXHEAD_BLCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVCB97_TEXHEAD_BLCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVCB97_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_FUNC MW(218:217) +#define NVCB97_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVCB97_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVCB97_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVCB97_TEXHEAD_BLCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVCB97_TEXHEAD_BLCK_MAX_ANISOTROPY MW(221:219) +#define NVCB97_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_1_TO_1 0x00000000 +#define NVCB97_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_2_TO_1 0x00000001 +#define NVCB97_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_4_TO_1 0x00000002 +#define NVCB97_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_6_TO_1 0x00000003 +#define NVCB97_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_8_TO_1 0x00000004 +#define NVCB97_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_10_TO_1 0x00000005 +#define NVCB97_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_12_TO_1 0x00000006 +#define NVCB97_TEXHEAD_BLCK_MAX_ANISOTROPY_ANISO_16_TO_1 0x00000007 +#define NVCB97_TEXHEAD_BLCK_ANISO_FINE_SPREAD_MODIFIER MW(223:222) +#define NVCB97_TEXHEAD_BLCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVCB97_TEXHEAD_BLCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVCB97_TEXHEAD_BLCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVCB97_TEXHEAD_BLCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVCB97_TEXHEAD_BLCK_COLOR_KEY_VALUE MW(255:224) + + +/* +** Texture Header State One-D Buffer + */ + +#define NVCB97_TEXHEAD_1D_COMPONENTS MW(6:0) +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_INVALID 0x00000000 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_R32_G32_B32_A32 0x00000001 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_R32_G32_B32 0x00000002 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_R16_G16_B16_A16 0x00000003 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_R32_G32 0x00000004 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_R32_B24G8 0x00000005 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_X8B8G8R8 0x00000007 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_A8B8G8R8 0x00000008 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_A2B10G10R10 0x00000009 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_R16_G16 0x0000000c +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_G8R24 0x0000000d +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_G24R8 0x0000000e +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_R32 0x0000000f +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_A4B4G4R4 0x00000012 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_A5B5G5R1 0x00000013 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_A1B5G5R5 0x00000014 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_B5G6R5 0x00000015 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_B6G5R5 0x00000016 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_G8R8 0x00000018 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_R16 0x0000001b +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_Y8_VIDEO 0x0000001c +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_R8 0x0000001d +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_G4R4 0x0000001e +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_R1 0x0000001f +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_E5B9G9R9_SHAREDEXP 0x00000020 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_BF10GF11RF11 0x00000021 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_G8B8G8R8 0x00000022 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_B8G8R8G8 0x00000023 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_DXT1 0x00000024 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_DXT23 0x00000025 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_DXT45 0x00000026 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_DXN1 0x00000027 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_DXN2 0x00000028 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_BC6H_SF16 0x00000010 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_BC6H_UF16 0x00000011 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_BC7U 0x00000017 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_ETC2_RGB 0x00000006 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_ETC2_RGB_PTA 0x0000000a +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_ETC2_RGBA 0x0000000b +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_EAC 0x00000019 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_EACX2 0x0000001a +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_Z24S8 0x00000029 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_X8Z24 0x0000002a +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_S8Z24 0x0000002b +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_ZF32 0x0000002f +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_ZF32_X24S8 0x00000030 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_Z16 0x0000003a +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_4X4 0x00000040 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_5X4 0x00000050 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_5X5 0x00000041 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_6X5 0x00000051 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_6X6 0x00000042 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_8X5 0x00000055 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_8X6 0x00000052 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_8X8 0x00000044 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_10X5 0x00000056 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_10X6 0x00000057 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_10X8 0x00000053 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_10X10 0x00000045 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_12X10 0x00000054 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_ASTC_2D_12X12 0x00000046 +#define NVCB97_TEXHEAD_1D_COMPONENTS_SIZES_CS_BITFIELD_SIZE 0x0000007f +#define NVCB97_TEXHEAD_1D_R_DATA_TYPE MW(9:7) +#define NVCB97_TEXHEAD_1D_R_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVCB97_TEXHEAD_1D_R_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVCB97_TEXHEAD_1D_R_DATA_TYPE_NUM_SINT 0x00000003 +#define NVCB97_TEXHEAD_1D_R_DATA_TYPE_NUM_UINT 0x00000004 +#define NVCB97_TEXHEAD_1D_R_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVCB97_TEXHEAD_1D_R_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVCB97_TEXHEAD_1D_R_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_1D_G_DATA_TYPE MW(12:10) +#define NVCB97_TEXHEAD_1D_G_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVCB97_TEXHEAD_1D_G_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVCB97_TEXHEAD_1D_G_DATA_TYPE_NUM_SINT 0x00000003 +#define NVCB97_TEXHEAD_1D_G_DATA_TYPE_NUM_UINT 0x00000004 +#define NVCB97_TEXHEAD_1D_G_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVCB97_TEXHEAD_1D_G_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVCB97_TEXHEAD_1D_G_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_1D_B_DATA_TYPE MW(15:13) +#define NVCB97_TEXHEAD_1D_B_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVCB97_TEXHEAD_1D_B_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVCB97_TEXHEAD_1D_B_DATA_TYPE_NUM_SINT 0x00000003 +#define NVCB97_TEXHEAD_1D_B_DATA_TYPE_NUM_UINT 0x00000004 +#define NVCB97_TEXHEAD_1D_B_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVCB97_TEXHEAD_1D_B_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVCB97_TEXHEAD_1D_B_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_1D_A_DATA_TYPE MW(18:16) +#define NVCB97_TEXHEAD_1D_A_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVCB97_TEXHEAD_1D_A_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVCB97_TEXHEAD_1D_A_DATA_TYPE_NUM_SINT 0x00000003 +#define NVCB97_TEXHEAD_1D_A_DATA_TYPE_NUM_UINT 0x00000004 +#define NVCB97_TEXHEAD_1D_A_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVCB97_TEXHEAD_1D_A_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVCB97_TEXHEAD_1D_A_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_1D_X_SOURCE MW(21:19) +#define NVCB97_TEXHEAD_1D_X_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_1D_X_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_1D_X_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_1D_X_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_1D_X_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_1D_X_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_1D_X_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_1D_Y_SOURCE MW(24:22) +#define NVCB97_TEXHEAD_1D_Y_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_1D_Y_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_1D_Y_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_1D_Y_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_1D_Y_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_1D_Y_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_1D_Y_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_1D_Z_SOURCE MW(27:25) +#define NVCB97_TEXHEAD_1D_Z_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_1D_Z_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_1D_Z_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_1D_Z_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_1D_Z_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_1D_Z_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_1D_Z_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_1D_W_SOURCE MW(30:28) +#define NVCB97_TEXHEAD_1D_W_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_1D_W_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_1D_W_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_1D_W_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_1D_W_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_1D_W_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_1D_W_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_1D_PACK_COMPONENTS MW(31:31) +#define NVCB97_TEXHEAD_1D_ADDRESS_BITS31TO0 MW(63:32) +#define NVCB97_TEXHEAD_1D_ADDRESS_BITS48TO32 MW(80:64) +#define NVCB97_TEXHEAD_1D_RESERVED_ADDRESS MW(84:81) +#define NVCB97_TEXHEAD_1D_HEADER_VERSION MW(87:85) +#define NVCB97_TEXHEAD_1D_HEADER_VERSION_SELECT_ONE_D_BUFFER 0x00000000 +#define NVCB97_TEXHEAD_1D_HEADER_VERSION_SELECT_PITCH_COLOR_KEY 0x00000001 +#define NVCB97_TEXHEAD_1D_HEADER_VERSION_SELECT_PITCH 0x00000002 +#define NVCB97_TEXHEAD_1D_HEADER_VERSION_SELECT_BLOCKLINEAR 0x00000003 +#define NVCB97_TEXHEAD_1D_HEADER_VERSION_SELECT_BLOCKLINEAR_COLOR_KEY 0x00000004 +#define NVCB97_TEXHEAD_1D_RESERVED_HEADER_VERSION MW(88:88) +#define NVCB97_TEXHEAD_1D_RESOURCE_VIEW_COHERENCY_HASH MW(92:89) +#define NVCB97_TEXHEAD_1D_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_0 0x00000000 +#define NVCB97_TEXHEAD_1D_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_1 0x00000001 +#define NVCB97_TEXHEAD_1D_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_2 0x00000002 +#define NVCB97_TEXHEAD_1D_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_3 0x00000003 +#define NVCB97_TEXHEAD_1D_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_4 0x00000004 +#define NVCB97_TEXHEAD_1D_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_5 0x00000005 +#define NVCB97_TEXHEAD_1D_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_6 0x00000006 +#define NVCB97_TEXHEAD_1D_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_7 0x00000007 +#define NVCB97_TEXHEAD_1D_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_8 0x00000008 +#define NVCB97_TEXHEAD_1D_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_9 0x00000009 +#define NVCB97_TEXHEAD_1D_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_10 0x0000000a +#define NVCB97_TEXHEAD_1D_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_11 0x0000000b +#define NVCB97_TEXHEAD_1D_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_12 0x0000000c +#define NVCB97_TEXHEAD_1D_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_13 0x0000000d +#define NVCB97_TEXHEAD_1D_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_14 0x0000000e +#define NVCB97_TEXHEAD_1D_RESOURCE_VIEW_COHERENCY_HASH_HASH_UNALIASED 0x0000000f +#define NVCB97_TEXHEAD_1D_RESERVED2A MW(95:93) +#define NVCB97_TEXHEAD_1D_WIDTH_MINUS_ONE_BITS31TO16 MW(111:96) +#define NVCB97_TEXHEAD_1D_RESERVED3X MW(127:112) +#define NVCB97_TEXHEAD_1D_WIDTH_MINUS_ONE_BITS15TO0 MW(143:128) +#define NVCB97_TEXHEAD_1D_RESERVED4X MW(149:144) +#define NVCB97_TEXHEAD_1D_S_R_G_B_CONVERSION MW(150:150) +#define NVCB97_TEXHEAD_1D_TEXTURE_TYPE MW(154:151) +#define NVCB97_TEXHEAD_1D_TEXTURE_TYPE_ONE_D 0x00000000 +#define NVCB97_TEXHEAD_1D_TEXTURE_TYPE_TWO_D 0x00000001 +#define NVCB97_TEXHEAD_1D_TEXTURE_TYPE_THREE_D 0x00000002 +#define NVCB97_TEXHEAD_1D_TEXTURE_TYPE_CUBEMAP 0x00000003 +#define NVCB97_TEXHEAD_1D_TEXTURE_TYPE_ONE_D_ARRAY 0x00000004 +#define NVCB97_TEXHEAD_1D_TEXTURE_TYPE_TWO_D_ARRAY 0x00000005 +#define NVCB97_TEXHEAD_1D_TEXTURE_TYPE_ONE_D_BUFFER 0x00000006 +#define NVCB97_TEXHEAD_1D_TEXTURE_TYPE_TWO_D_NO_MIPMAP 0x00000007 +#define NVCB97_TEXHEAD_1D_TEXTURE_TYPE_CUBEMAP_ARRAY 0x00000008 +#define NVCB97_TEXHEAD_1D_TEXTURE_TYPE_HTEX_TWOD 0x0000000a +#define NVCB97_TEXHEAD_1D_TEXTURE_TYPE_HTEX_THREE_D 0x0000000b +#define NVCB97_TEXHEAD_1D_TEXTURE_TYPE_HTEX_TWOD_ARRAY 0x0000000e +#define NVCB97_TEXHEAD_1D_TEXTURE_TYPE_TT_BIT_FIELD_SIZE 0x0000000f +#define NVCB97_TEXHEAD_1D_SECTOR_PROMOTION MW(156:155) +#define NVCB97_TEXHEAD_1D_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVCB97_TEXHEAD_1D_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVCB97_TEXHEAD_1D_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVCB97_TEXHEAD_1D_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVCB97_TEXHEAD_1D_RESERVED4Y MW(159:157) +#define NVCB97_TEXHEAD_1D_RESERVED5X MW(189:160) +#define NVCB97_TEXHEAD_1D_RESERVED5A MW(190:190) +#define NVCB97_TEXHEAD_1D_RESERVED5Y MW(191:191) +#define NVCB97_TEXHEAD_1D_RESERVED6X MW(223:192) +#define NVCB97_TEXHEAD_1D_RESERVED7X MW(255:224) + + +/* +** Texture Header State Pitch + */ + +#define NVCB97_TEXHEAD_PITCH_COMPONENTS MW(6:0) +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_INVALID 0x00000000 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_R32_G32_B32_A32 0x00000001 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_R32_G32_B32 0x00000002 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_R16_G16_B16_A16 0x00000003 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_R32_G32 0x00000004 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_R32_B24G8 0x00000005 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_X8B8G8R8 0x00000007 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_A8B8G8R8 0x00000008 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_A2B10G10R10 0x00000009 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_R16_G16 0x0000000c +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_G8R24 0x0000000d +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_G24R8 0x0000000e +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_R32 0x0000000f +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_A4B4G4R4 0x00000012 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_A5B5G5R1 0x00000013 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_A1B5G5R5 0x00000014 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_B5G6R5 0x00000015 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_B6G5R5 0x00000016 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_G8R8 0x00000018 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_R16 0x0000001b +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_Y8_VIDEO 0x0000001c +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_R8 0x0000001d +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_G4R4 0x0000001e +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_R1 0x0000001f +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_E5B9G9R9_SHAREDEXP 0x00000020 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_BF10GF11RF11 0x00000021 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_G8B8G8R8 0x00000022 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_B8G8R8G8 0x00000023 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_DXT1 0x00000024 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_DXT23 0x00000025 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_DXT45 0x00000026 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_DXN1 0x00000027 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_DXN2 0x00000028 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_BC6H_SF16 0x00000010 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_BC6H_UF16 0x00000011 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_BC7U 0x00000017 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_ETC2_RGB 0x00000006 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_ETC2_RGB_PTA 0x0000000a +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_ETC2_RGBA 0x0000000b +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_EAC 0x00000019 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_EACX2 0x0000001a +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_Z24S8 0x00000029 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_X8Z24 0x0000002a +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_S8Z24 0x0000002b +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_ZF32 0x0000002f +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_ZF32_X24S8 0x00000030 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_Z16 0x0000003a +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_4X4 0x00000040 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_5X4 0x00000050 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_5X5 0x00000041 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_6X5 0x00000051 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_6X6 0x00000042 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_8X5 0x00000055 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_8X6 0x00000052 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_8X8 0x00000044 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_10X5 0x00000056 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_10X6 0x00000057 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_10X8 0x00000053 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_10X10 0x00000045 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_12X10 0x00000054 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_ASTC_2D_12X12 0x00000046 +#define NVCB97_TEXHEAD_PITCH_COMPONENTS_SIZES_CS_BITFIELD_SIZE 0x0000007f +#define NVCB97_TEXHEAD_PITCH_R_DATA_TYPE MW(9:7) +#define NVCB97_TEXHEAD_PITCH_R_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVCB97_TEXHEAD_PITCH_R_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVCB97_TEXHEAD_PITCH_R_DATA_TYPE_NUM_SINT 0x00000003 +#define NVCB97_TEXHEAD_PITCH_R_DATA_TYPE_NUM_UINT 0x00000004 +#define NVCB97_TEXHEAD_PITCH_R_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVCB97_TEXHEAD_PITCH_R_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVCB97_TEXHEAD_PITCH_R_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_PITCH_G_DATA_TYPE MW(12:10) +#define NVCB97_TEXHEAD_PITCH_G_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVCB97_TEXHEAD_PITCH_G_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVCB97_TEXHEAD_PITCH_G_DATA_TYPE_NUM_SINT 0x00000003 +#define NVCB97_TEXHEAD_PITCH_G_DATA_TYPE_NUM_UINT 0x00000004 +#define NVCB97_TEXHEAD_PITCH_G_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVCB97_TEXHEAD_PITCH_G_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVCB97_TEXHEAD_PITCH_G_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_PITCH_B_DATA_TYPE MW(15:13) +#define NVCB97_TEXHEAD_PITCH_B_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVCB97_TEXHEAD_PITCH_B_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVCB97_TEXHEAD_PITCH_B_DATA_TYPE_NUM_SINT 0x00000003 +#define NVCB97_TEXHEAD_PITCH_B_DATA_TYPE_NUM_UINT 0x00000004 +#define NVCB97_TEXHEAD_PITCH_B_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVCB97_TEXHEAD_PITCH_B_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVCB97_TEXHEAD_PITCH_B_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_PITCH_A_DATA_TYPE MW(18:16) +#define NVCB97_TEXHEAD_PITCH_A_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVCB97_TEXHEAD_PITCH_A_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVCB97_TEXHEAD_PITCH_A_DATA_TYPE_NUM_SINT 0x00000003 +#define NVCB97_TEXHEAD_PITCH_A_DATA_TYPE_NUM_UINT 0x00000004 +#define NVCB97_TEXHEAD_PITCH_A_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVCB97_TEXHEAD_PITCH_A_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVCB97_TEXHEAD_PITCH_A_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_PITCH_X_SOURCE MW(21:19) +#define NVCB97_TEXHEAD_PITCH_X_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_PITCH_X_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_PITCH_X_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_PITCH_X_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_PITCH_X_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_PITCH_X_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_PITCH_X_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_PITCH_Y_SOURCE MW(24:22) +#define NVCB97_TEXHEAD_PITCH_Y_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_PITCH_Y_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_PITCH_Y_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_PITCH_Y_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_PITCH_Y_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_PITCH_Y_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_PITCH_Y_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_PITCH_Z_SOURCE MW(27:25) +#define NVCB97_TEXHEAD_PITCH_Z_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_PITCH_Z_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_PITCH_Z_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_PITCH_Z_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_PITCH_Z_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_PITCH_Z_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_PITCH_Z_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_PITCH_W_SOURCE MW(30:28) +#define NVCB97_TEXHEAD_PITCH_W_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_PITCH_W_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_PITCH_W_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_PITCH_W_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_PITCH_W_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_PITCH_W_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_PITCH_W_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_PITCH_PACK_COMPONENTS MW(31:31) +#define NVCB97_TEXHEAD_PITCH_RESERVED1A MW(36:32) +#define NVCB97_TEXHEAD_PITCH_ADDRESS_BITS31TO5 MW(63:37) +#define NVCB97_TEXHEAD_PITCH_ADDRESS_BITS48TO32 MW(80:64) +#define NVCB97_TEXHEAD_PITCH_RESERVED_ADDRESS MW(84:81) +#define NVCB97_TEXHEAD_PITCH_HEADER_VERSION MW(87:85) +#define NVCB97_TEXHEAD_PITCH_HEADER_VERSION_SELECT_ONE_D_BUFFER 0x00000000 +#define NVCB97_TEXHEAD_PITCH_HEADER_VERSION_SELECT_PITCH_COLOR_KEY 0x00000001 +#define NVCB97_TEXHEAD_PITCH_HEADER_VERSION_SELECT_PITCH 0x00000002 +#define NVCB97_TEXHEAD_PITCH_HEADER_VERSION_SELECT_BLOCKLINEAR 0x00000003 +#define NVCB97_TEXHEAD_PITCH_HEADER_VERSION_SELECT_BLOCKLINEAR_COLOR_KEY 0x00000004 +#define NVCB97_TEXHEAD_PITCH_RESERVED_HEADER_VERSION MW(88:88) +#define NVCB97_TEXHEAD_PITCH_RESOURCE_VIEW_COHERENCY_HASH MW(92:89) +#define NVCB97_TEXHEAD_PITCH_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_0 0x00000000 +#define NVCB97_TEXHEAD_PITCH_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_1 0x00000001 +#define NVCB97_TEXHEAD_PITCH_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_2 0x00000002 +#define NVCB97_TEXHEAD_PITCH_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_3 0x00000003 +#define NVCB97_TEXHEAD_PITCH_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_4 0x00000004 +#define NVCB97_TEXHEAD_PITCH_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_5 0x00000005 +#define NVCB97_TEXHEAD_PITCH_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_6 0x00000006 +#define NVCB97_TEXHEAD_PITCH_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_7 0x00000007 +#define NVCB97_TEXHEAD_PITCH_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_8 0x00000008 +#define NVCB97_TEXHEAD_PITCH_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_9 0x00000009 +#define NVCB97_TEXHEAD_PITCH_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_10 0x0000000a +#define NVCB97_TEXHEAD_PITCH_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_11 0x0000000b +#define NVCB97_TEXHEAD_PITCH_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_12 0x0000000c +#define NVCB97_TEXHEAD_PITCH_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_13 0x0000000d +#define NVCB97_TEXHEAD_PITCH_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_14 0x0000000e +#define NVCB97_TEXHEAD_PITCH_RESOURCE_VIEW_COHERENCY_HASH_HASH_UNALIASED 0x0000000f +#define NVCB97_TEXHEAD_PITCH_RESERVED2A MW(95:93) +#define NVCB97_TEXHEAD_PITCH_PITCH_BITS20TO5 MW(111:96) +#define NVCB97_TEXHEAD_PITCH_LOD_ANISO_QUALITY2 MW(112:112) +#define NVCB97_TEXHEAD_PITCH_LOD_ANISO_QUALITY MW(113:113) +#define NVCB97_TEXHEAD_PITCH_LOD_ANISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVCB97_TEXHEAD_PITCH_LOD_ANISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVCB97_TEXHEAD_PITCH_LOD_ISO_QUALITY MW(114:114) +#define NVCB97_TEXHEAD_PITCH_LOD_ISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVCB97_TEXHEAD_PITCH_LOD_ISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVCB97_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_MODIFIER MW(116:115) +#define NVCB97_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVCB97_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVCB97_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVCB97_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVCB97_TEXHEAD_PITCH_ANISO_SPREAD_SCALE MW(121:117) +#define NVCB97_TEXHEAD_PITCH_USE_HEADER_OPT_CONTROL MW(122:122) +#define NVCB97_TEXHEAD_PITCH_DEPTH_TEXTURE MW(123:123) +#define NVCB97_TEXHEAD_PITCH_MAX_MIP_LEVEL MW(127:124) +#define NVCB97_TEXHEAD_PITCH_WIDTH_MINUS_ONE MW(144:128) +#define NVCB97_TEXHEAD_PITCH_PITCH_BIT21 MW(145:145) +#define NVCB97_TEXHEAD_PITCH_HEIGHT_MINUS_ONE_BIT16 MW(146:146) +#define NVCB97_TEXHEAD_PITCH_ANISO_SPREAD_MAX_LOG2 MW(149:147) +#define NVCB97_TEXHEAD_PITCH_S_R_G_B_CONVERSION MW(150:150) +#define NVCB97_TEXHEAD_PITCH_TEXTURE_TYPE MW(154:151) +#define NVCB97_TEXHEAD_PITCH_TEXTURE_TYPE_ONE_D 0x00000000 +#define NVCB97_TEXHEAD_PITCH_TEXTURE_TYPE_TWO_D 0x00000001 +#define NVCB97_TEXHEAD_PITCH_TEXTURE_TYPE_THREE_D 0x00000002 +#define NVCB97_TEXHEAD_PITCH_TEXTURE_TYPE_CUBEMAP 0x00000003 +#define NVCB97_TEXHEAD_PITCH_TEXTURE_TYPE_ONE_D_ARRAY 0x00000004 +#define NVCB97_TEXHEAD_PITCH_TEXTURE_TYPE_TWO_D_ARRAY 0x00000005 +#define NVCB97_TEXHEAD_PITCH_TEXTURE_TYPE_ONE_D_BUFFER 0x00000006 +#define NVCB97_TEXHEAD_PITCH_TEXTURE_TYPE_TWO_D_NO_MIPMAP 0x00000007 +#define NVCB97_TEXHEAD_PITCH_TEXTURE_TYPE_CUBEMAP_ARRAY 0x00000008 +#define NVCB97_TEXHEAD_PITCH_TEXTURE_TYPE_HTEX_TWOD 0x0000000a +#define NVCB97_TEXHEAD_PITCH_TEXTURE_TYPE_HTEX_THREE_D 0x0000000b +#define NVCB97_TEXHEAD_PITCH_TEXTURE_TYPE_HTEX_TWOD_ARRAY 0x0000000e +#define NVCB97_TEXHEAD_PITCH_TEXTURE_TYPE_TT_BIT_FIELD_SIZE 0x0000000f +#define NVCB97_TEXHEAD_PITCH_SECTOR_PROMOTION MW(156:155) +#define NVCB97_TEXHEAD_PITCH_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVCB97_TEXHEAD_PITCH_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVCB97_TEXHEAD_PITCH_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVCB97_TEXHEAD_PITCH_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVCB97_TEXHEAD_PITCH_BORDER_SIZE MW(159:157) +#define NVCB97_TEXHEAD_PITCH_BORDER_SIZE_BORDER_SIZE_ONE 0x00000000 +#define NVCB97_TEXHEAD_PITCH_BORDER_SIZE_BORDER_SIZE_TWO 0x00000001 +#define NVCB97_TEXHEAD_PITCH_BORDER_SIZE_BORDER_SIZE_FOUR 0x00000002 +#define NVCB97_TEXHEAD_PITCH_BORDER_SIZE_BORDER_SIZE_EIGHT 0x00000003 +#define NVCB97_TEXHEAD_PITCH_BORDER_SIZE_BORDER_SAMPLER_COLOR 0x00000007 +#define NVCB97_TEXHEAD_PITCH_HEIGHT_MINUS_ONE MW(175:160) +#define NVCB97_TEXHEAD_PITCH_DEPTH_MINUS_ONE MW(189:176) +#define NVCB97_TEXHEAD_PITCH_RESERVED5A MW(190:190) +#define NVCB97_TEXHEAD_PITCH_NORMALIZED_COORDS MW(191:191) +#define NVCB97_TEXHEAD_PITCH_RESERVED6Y MW(192:192) +#define NVCB97_TEXHEAD_PITCH_TRILIN_OPT MW(197:193) +#define NVCB97_TEXHEAD_PITCH_MIP_LOD_BIAS MW(210:198) +#define NVCB97_TEXHEAD_PITCH_ANISO_BIAS MW(214:211) +#define NVCB97_TEXHEAD_PITCH_ANISO_FINE_SPREAD_FUNC MW(216:215) +#define NVCB97_TEXHEAD_PITCH_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVCB97_TEXHEAD_PITCH_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVCB97_TEXHEAD_PITCH_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVCB97_TEXHEAD_PITCH_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVCB97_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_FUNC MW(218:217) +#define NVCB97_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVCB97_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVCB97_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVCB97_TEXHEAD_PITCH_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVCB97_TEXHEAD_PITCH_MAX_ANISOTROPY MW(221:219) +#define NVCB97_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_1_TO_1 0x00000000 +#define NVCB97_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_2_TO_1 0x00000001 +#define NVCB97_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_4_TO_1 0x00000002 +#define NVCB97_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_6_TO_1 0x00000003 +#define NVCB97_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_8_TO_1 0x00000004 +#define NVCB97_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_10_TO_1 0x00000005 +#define NVCB97_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_12_TO_1 0x00000006 +#define NVCB97_TEXHEAD_PITCH_MAX_ANISOTROPY_ANISO_16_TO_1 0x00000007 +#define NVCB97_TEXHEAD_PITCH_ANISO_FINE_SPREAD_MODIFIER MW(223:222) +#define NVCB97_TEXHEAD_PITCH_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVCB97_TEXHEAD_PITCH_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVCB97_TEXHEAD_PITCH_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVCB97_TEXHEAD_PITCH_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVCB97_TEXHEAD_PITCH_RES_VIEW_MIN_MIP_LEVEL MW(227:224) +#define NVCB97_TEXHEAD_PITCH_RES_VIEW_MAX_MIP_LEVEL MW(231:228) +#define NVCB97_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT MW(235:232) +#define NVCB97_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_1X1 0x00000000 +#define NVCB97_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_2X1 0x00000001 +#define NVCB97_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_2X2 0x00000002 +#define NVCB97_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_4X2 0x00000003 +#define NVCB97_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_4X2_D3D 0x00000004 +#define NVCB97_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_2X1_D3D 0x00000005 +#define NVCB97_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_4X4 0x00000006 +#define NVCB97_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_2X2_VC_4 0x00000008 +#define NVCB97_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_2X2_VC_12 0x00000009 +#define NVCB97_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_4X2_VC_8 0x0000000a +#define NVCB97_TEXHEAD_PITCH_MULTI_SAMPLE_COUNT_MODE_4X2_VC_24 0x0000000b +#define NVCB97_TEXHEAD_PITCH_MIN_LOD_CLAMP MW(247:236) +#define NVCB97_TEXHEAD_PITCH_RESERVED7Y MW(255:248) + + +/* +** Texture Header State Pitch Color Key + */ + +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS MW(6:0) +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_INVALID 0x00000000 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R32_G32_B32_A32 0x00000001 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R32_G32_B32 0x00000002 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R16_G16_B16_A16 0x00000003 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R32_G32 0x00000004 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R32_B24G8 0x00000005 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_X8B8G8R8 0x00000007 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_A8B8G8R8 0x00000008 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_A2B10G10R10 0x00000009 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R16_G16 0x0000000c +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_G8R24 0x0000000d +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_G24R8 0x0000000e +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R32 0x0000000f +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_A4B4G4R4 0x00000012 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_A5B5G5R1 0x00000013 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_A1B5G5R5 0x00000014 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_B5G6R5 0x00000015 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_B6G5R5 0x00000016 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_G8R8 0x00000018 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R16 0x0000001b +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_Y8_VIDEO 0x0000001c +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R8 0x0000001d +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_G4R4 0x0000001e +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_R1 0x0000001f +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_E5B9G9R9_SHAREDEXP 0x00000020 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_BF10GF11RF11 0x00000021 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_G8B8G8R8 0x00000022 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_B8G8R8G8 0x00000023 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_DXT1 0x00000024 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_DXT23 0x00000025 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_DXT45 0x00000026 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_DXN1 0x00000027 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_DXN2 0x00000028 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_BC6H_SF16 0x00000010 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_BC6H_UF16 0x00000011 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_BC7U 0x00000017 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ETC2_RGB 0x00000006 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ETC2_RGB_PTA 0x0000000a +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ETC2_RGBA 0x0000000b +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_EAC 0x00000019 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_EACX2 0x0000001a +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_Z24S8 0x00000029 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_X8Z24 0x0000002a +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_S8Z24 0x0000002b +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ZF32 0x0000002f +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ZF32_X24S8 0x00000030 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_Z16 0x0000003a +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_4X4 0x00000040 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_5X4 0x00000050 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_5X5 0x00000041 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_6X5 0x00000051 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_6X6 0x00000042 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_8X5 0x00000055 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_8X6 0x00000052 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_8X8 0x00000044 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_10X5 0x00000056 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_10X6 0x00000057 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_10X8 0x00000053 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_10X10 0x00000045 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_12X10 0x00000054 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_ASTC_2D_12X12 0x00000046 +#define NVCB97_TEXHEAD_PITCHCK_COMPONENTS_SIZES_CS_BITFIELD_SIZE 0x0000007f +#define NVCB97_TEXHEAD_PITCHCK_R_DATA_TYPE MW(9:7) +#define NVCB97_TEXHEAD_PITCHCK_R_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVCB97_TEXHEAD_PITCHCK_R_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVCB97_TEXHEAD_PITCHCK_R_DATA_TYPE_NUM_SINT 0x00000003 +#define NVCB97_TEXHEAD_PITCHCK_R_DATA_TYPE_NUM_UINT 0x00000004 +#define NVCB97_TEXHEAD_PITCHCK_R_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVCB97_TEXHEAD_PITCHCK_R_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVCB97_TEXHEAD_PITCHCK_R_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_PITCHCK_G_DATA_TYPE MW(12:10) +#define NVCB97_TEXHEAD_PITCHCK_G_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVCB97_TEXHEAD_PITCHCK_G_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVCB97_TEXHEAD_PITCHCK_G_DATA_TYPE_NUM_SINT 0x00000003 +#define NVCB97_TEXHEAD_PITCHCK_G_DATA_TYPE_NUM_UINT 0x00000004 +#define NVCB97_TEXHEAD_PITCHCK_G_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVCB97_TEXHEAD_PITCHCK_G_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVCB97_TEXHEAD_PITCHCK_G_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_PITCHCK_B_DATA_TYPE MW(15:13) +#define NVCB97_TEXHEAD_PITCHCK_B_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVCB97_TEXHEAD_PITCHCK_B_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVCB97_TEXHEAD_PITCHCK_B_DATA_TYPE_NUM_SINT 0x00000003 +#define NVCB97_TEXHEAD_PITCHCK_B_DATA_TYPE_NUM_UINT 0x00000004 +#define NVCB97_TEXHEAD_PITCHCK_B_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVCB97_TEXHEAD_PITCHCK_B_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVCB97_TEXHEAD_PITCHCK_B_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_PITCHCK_A_DATA_TYPE MW(18:16) +#define NVCB97_TEXHEAD_PITCHCK_A_DATA_TYPE_NUM_SNORM 0x00000001 +#define NVCB97_TEXHEAD_PITCHCK_A_DATA_TYPE_NUM_UNORM 0x00000002 +#define NVCB97_TEXHEAD_PITCHCK_A_DATA_TYPE_NUM_SINT 0x00000003 +#define NVCB97_TEXHEAD_PITCHCK_A_DATA_TYPE_NUM_UINT 0x00000004 +#define NVCB97_TEXHEAD_PITCHCK_A_DATA_TYPE_NUM_SNORM_FORCE_FP16 0x00000005 +#define NVCB97_TEXHEAD_PITCHCK_A_DATA_TYPE_NUM_UNORM_FORCE_FP16 0x00000006 +#define NVCB97_TEXHEAD_PITCHCK_A_DATA_TYPE_NUM_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_PITCHCK_X_SOURCE MW(21:19) +#define NVCB97_TEXHEAD_PITCHCK_X_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_PITCHCK_X_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_PITCHCK_X_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_PITCHCK_X_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_PITCHCK_X_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_PITCHCK_X_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_PITCHCK_X_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_PITCHCK_Y_SOURCE MW(24:22) +#define NVCB97_TEXHEAD_PITCHCK_Y_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_PITCHCK_Y_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_PITCHCK_Y_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_PITCHCK_Y_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_PITCHCK_Y_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_PITCHCK_Y_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_PITCHCK_Y_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_PITCHCK_Z_SOURCE MW(27:25) +#define NVCB97_TEXHEAD_PITCHCK_Z_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_PITCHCK_Z_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_PITCHCK_Z_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_PITCHCK_Z_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_PITCHCK_Z_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_PITCHCK_Z_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_PITCHCK_Z_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_PITCHCK_W_SOURCE MW(30:28) +#define NVCB97_TEXHEAD_PITCHCK_W_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_PITCHCK_W_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_PITCHCK_W_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_PITCHCK_W_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_PITCHCK_W_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_PITCHCK_W_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_PITCHCK_W_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_PITCHCK_PACK_COMPONENTS MW(31:31) +#define NVCB97_TEXHEAD_PITCHCK_RESERVED1A MW(36:32) +#define NVCB97_TEXHEAD_PITCHCK_ADDRESS_BITS31TO5 MW(63:37) +#define NVCB97_TEXHEAD_PITCHCK_ADDRESS_BITS48TO32 MW(80:64) +#define NVCB97_TEXHEAD_PITCHCK_RESERVED_ADDRESS MW(84:81) +#define NVCB97_TEXHEAD_PITCHCK_HEADER_VERSION MW(87:85) +#define NVCB97_TEXHEAD_PITCHCK_HEADER_VERSION_SELECT_ONE_D_BUFFER 0x00000000 +#define NVCB97_TEXHEAD_PITCHCK_HEADER_VERSION_SELECT_PITCH_COLOR_KEY 0x00000001 +#define NVCB97_TEXHEAD_PITCHCK_HEADER_VERSION_SELECT_PITCH 0x00000002 +#define NVCB97_TEXHEAD_PITCHCK_HEADER_VERSION_SELECT_BLOCKLINEAR 0x00000003 +#define NVCB97_TEXHEAD_PITCHCK_HEADER_VERSION_SELECT_BLOCKLINEAR_COLOR_KEY 0x00000004 +#define NVCB97_TEXHEAD_PITCHCK_RESERVED_HEADER_VERSION MW(88:88) +#define NVCB97_TEXHEAD_PITCHCK_RESOURCE_VIEW_COHERENCY_HASH MW(92:89) +#define NVCB97_TEXHEAD_PITCHCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_0 0x00000000 +#define NVCB97_TEXHEAD_PITCHCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_1 0x00000001 +#define NVCB97_TEXHEAD_PITCHCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_2 0x00000002 +#define NVCB97_TEXHEAD_PITCHCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_3 0x00000003 +#define NVCB97_TEXHEAD_PITCHCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_4 0x00000004 +#define NVCB97_TEXHEAD_PITCHCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_5 0x00000005 +#define NVCB97_TEXHEAD_PITCHCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_6 0x00000006 +#define NVCB97_TEXHEAD_PITCHCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_7 0x00000007 +#define NVCB97_TEXHEAD_PITCHCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_8 0x00000008 +#define NVCB97_TEXHEAD_PITCHCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_9 0x00000009 +#define NVCB97_TEXHEAD_PITCHCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_10 0x0000000a +#define NVCB97_TEXHEAD_PITCHCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_11 0x0000000b +#define NVCB97_TEXHEAD_PITCHCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_12 0x0000000c +#define NVCB97_TEXHEAD_PITCHCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_13 0x0000000d +#define NVCB97_TEXHEAD_PITCHCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_ALIASED_14 0x0000000e +#define NVCB97_TEXHEAD_PITCHCK_RESOURCE_VIEW_COHERENCY_HASH_HASH_UNALIASED 0x0000000f +#define NVCB97_TEXHEAD_PITCHCK_RESERVED2A MW(95:93) +#define NVCB97_TEXHEAD_PITCHCK_PITCH_BITS20TO5 MW(111:96) +#define NVCB97_TEXHEAD_PITCHCK_LOD_ANISO_QUALITY2 MW(112:112) +#define NVCB97_TEXHEAD_PITCHCK_LOD_ANISO_QUALITY MW(113:113) +#define NVCB97_TEXHEAD_PITCHCK_LOD_ANISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVCB97_TEXHEAD_PITCHCK_LOD_ANISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVCB97_TEXHEAD_PITCHCK_LOD_ISO_QUALITY MW(114:114) +#define NVCB97_TEXHEAD_PITCHCK_LOD_ISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVCB97_TEXHEAD_PITCHCK_LOD_ISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVCB97_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_MODIFIER MW(116:115) +#define NVCB97_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVCB97_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVCB97_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVCB97_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVCB97_TEXHEAD_PITCHCK_ANISO_SPREAD_SCALE MW(121:117) +#define NVCB97_TEXHEAD_PITCHCK_USE_HEADER_OPT_CONTROL MW(122:122) +#define NVCB97_TEXHEAD_PITCHCK_DEPTH_TEXTURE MW(123:123) +#define NVCB97_TEXHEAD_PITCHCK_MAX_MIP_LEVEL MW(127:124) +#define NVCB97_TEXHEAD_PITCHCK_WIDTH_MINUS_ONE MW(144:128) +#define NVCB97_TEXHEAD_PITCHCK_PITCH_BIT21 MW(145:145) +#define NVCB97_TEXHEAD_PITCHCK_HEIGHT_MINUS_ONE_BIT16 MW(146:146) +#define NVCB97_TEXHEAD_PITCHCK_ANISO_SPREAD_MAX_LOG2 MW(149:147) +#define NVCB97_TEXHEAD_PITCHCK_S_R_G_B_CONVERSION MW(150:150) +#define NVCB97_TEXHEAD_PITCHCK_TEXTURE_TYPE MW(154:151) +#define NVCB97_TEXHEAD_PITCHCK_TEXTURE_TYPE_ONE_D 0x00000000 +#define NVCB97_TEXHEAD_PITCHCK_TEXTURE_TYPE_TWO_D 0x00000001 +#define NVCB97_TEXHEAD_PITCHCK_TEXTURE_TYPE_THREE_D 0x00000002 +#define NVCB97_TEXHEAD_PITCHCK_TEXTURE_TYPE_CUBEMAP 0x00000003 +#define NVCB97_TEXHEAD_PITCHCK_TEXTURE_TYPE_ONE_D_ARRAY 0x00000004 +#define NVCB97_TEXHEAD_PITCHCK_TEXTURE_TYPE_TWO_D_ARRAY 0x00000005 +#define NVCB97_TEXHEAD_PITCHCK_TEXTURE_TYPE_ONE_D_BUFFER 0x00000006 +#define NVCB97_TEXHEAD_PITCHCK_TEXTURE_TYPE_TWO_D_NO_MIPMAP 0x00000007 +#define NVCB97_TEXHEAD_PITCHCK_TEXTURE_TYPE_CUBEMAP_ARRAY 0x00000008 +#define NVCB97_TEXHEAD_PITCHCK_TEXTURE_TYPE_HTEX_TWOD 0x0000000a +#define NVCB97_TEXHEAD_PITCHCK_TEXTURE_TYPE_HTEX_THREE_D 0x0000000b +#define NVCB97_TEXHEAD_PITCHCK_TEXTURE_TYPE_HTEX_TWOD_ARRAY 0x0000000e +#define NVCB97_TEXHEAD_PITCHCK_TEXTURE_TYPE_TT_BIT_FIELD_SIZE 0x0000000f +#define NVCB97_TEXHEAD_PITCHCK_SECTOR_PROMOTION MW(156:155) +#define NVCB97_TEXHEAD_PITCHCK_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVCB97_TEXHEAD_PITCHCK_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVCB97_TEXHEAD_PITCHCK_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVCB97_TEXHEAD_PITCHCK_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVCB97_TEXHEAD_PITCHCK_BORDER_SIZE MW(159:157) +#define NVCB97_TEXHEAD_PITCHCK_BORDER_SIZE_BORDER_SIZE_ONE 0x00000000 +#define NVCB97_TEXHEAD_PITCHCK_BORDER_SIZE_BORDER_SIZE_TWO 0x00000001 +#define NVCB97_TEXHEAD_PITCHCK_BORDER_SIZE_BORDER_SIZE_FOUR 0x00000002 +#define NVCB97_TEXHEAD_PITCHCK_BORDER_SIZE_BORDER_SIZE_EIGHT 0x00000003 +#define NVCB97_TEXHEAD_PITCHCK_BORDER_SIZE_BORDER_SAMPLER_COLOR 0x00000007 +#define NVCB97_TEXHEAD_PITCHCK_HEIGHT_MINUS_ONE MW(175:160) +#define NVCB97_TEXHEAD_PITCHCK_DEPTH_MINUS_ONE MW(189:176) +#define NVCB97_TEXHEAD_PITCHCK_RESERVED5A MW(190:190) +#define NVCB97_TEXHEAD_PITCHCK_NORMALIZED_COORDS MW(191:191) +#define NVCB97_TEXHEAD_PITCHCK_COLOR_KEY_OP MW(192:192) +#define NVCB97_TEXHEAD_PITCHCK_TRILIN_OPT MW(197:193) +#define NVCB97_TEXHEAD_PITCHCK_MIP_LOD_BIAS MW(210:198) +#define NVCB97_TEXHEAD_PITCHCK_ANISO_BIAS MW(214:211) +#define NVCB97_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_FUNC MW(216:215) +#define NVCB97_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVCB97_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVCB97_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVCB97_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVCB97_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_FUNC MW(218:217) +#define NVCB97_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVCB97_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVCB97_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVCB97_TEXHEAD_PITCHCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVCB97_TEXHEAD_PITCHCK_MAX_ANISOTROPY MW(221:219) +#define NVCB97_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_1_TO_1 0x00000000 +#define NVCB97_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_2_TO_1 0x00000001 +#define NVCB97_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_4_TO_1 0x00000002 +#define NVCB97_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_6_TO_1 0x00000003 +#define NVCB97_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_8_TO_1 0x00000004 +#define NVCB97_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_10_TO_1 0x00000005 +#define NVCB97_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_12_TO_1 0x00000006 +#define NVCB97_TEXHEAD_PITCHCK_MAX_ANISOTROPY_ANISO_16_TO_1 0x00000007 +#define NVCB97_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_MODIFIER MW(223:222) +#define NVCB97_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVCB97_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVCB97_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVCB97_TEXHEAD_PITCHCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVCB97_TEXHEAD_PITCHCK_COLOR_KEY_VALUE MW(255:224) + + +/* +** Texture Header V2 Blocklinear + */ + +#define NVCB97_TEXHEAD_V2_BL_RESERVED0A MW(3:0) +#define NVCB97_TEXHEAD_V2_BL_GOB_DEPTH_OFFSET MW(8:4) +#define NVCB97_TEXHEAD_V2_BL_ADDRESS_BITS31TO9 MW(31:9) +#define NVCB97_TEXHEAD_V2_BL_ADDRESS_BITS56TO32 MW(56:32) +#define NVCB97_TEXHEAD_V2_BL_RESERVED1A MW(63:57) +#define NVCB97_TEXHEAD_V2_BL_GOBS_PER_BLOCK_WIDTH MW(66:64) +#define NVCB97_TEXHEAD_V2_BL_GOBS_PER_BLOCK_WIDTH_ONE_GOB 0x00000000 +#define NVCB97_TEXHEAD_V2_BL_GOBS_PER_BLOCK_HEIGHT MW(69:67) +#define NVCB97_TEXHEAD_V2_BL_GOBS_PER_BLOCK_HEIGHT_ONE_GOB 0x00000000 +#define NVCB97_TEXHEAD_V2_BL_GOBS_PER_BLOCK_HEIGHT_TWO_GOBS 0x00000001 +#define NVCB97_TEXHEAD_V2_BL_GOBS_PER_BLOCK_HEIGHT_FOUR_GOBS 0x00000002 +#define NVCB97_TEXHEAD_V2_BL_GOBS_PER_BLOCK_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVCB97_TEXHEAD_V2_BL_GOBS_PER_BLOCK_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVCB97_TEXHEAD_V2_BL_GOBS_PER_BLOCK_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVCB97_TEXHEAD_V2_BL_GOBS_PER_BLOCK_DEPTH MW(72:70) +#define NVCB97_TEXHEAD_V2_BL_GOBS_PER_BLOCK_DEPTH_ONE_GOB 0x00000000 +#define NVCB97_TEXHEAD_V2_BL_GOBS_PER_BLOCK_DEPTH_TWO_GOBS 0x00000001 +#define NVCB97_TEXHEAD_V2_BL_GOBS_PER_BLOCK_DEPTH_FOUR_GOBS 0x00000002 +#define NVCB97_TEXHEAD_V2_BL_GOBS_PER_BLOCK_DEPTH_EIGHT_GOBS 0x00000003 +#define NVCB97_TEXHEAD_V2_BL_GOBS_PER_BLOCK_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVCB97_TEXHEAD_V2_BL_GOBS_PER_BLOCK_DEPTH_THIRTYTWO_GOBS 0x00000005 +#define NVCB97_TEXHEAD_V2_BL_RESERVED3Y MW(73:73) +#define NVCB97_TEXHEAD_V2_BL_TILE_WIDTH_IN_GOBS MW(76:74) +#define NVCB97_TEXHEAD_V2_BL_TILE_WIDTH_IN_GOBS_ONE_GOB 0x00000000 +#define NVCB97_TEXHEAD_V2_BL_TILE_WIDTH_IN_GOBS_TWO_GOBS 0x00000001 +#define NVCB97_TEXHEAD_V2_BL_TILE_WIDTH_IN_GOBS_FOUR_GOBS 0x00000002 +#define NVCB97_TEXHEAD_V2_BL_TILE_WIDTH_IN_GOBS_EIGHT_GOBS 0x00000003 +#define NVCB97_TEXHEAD_V2_BL_TILE_WIDTH_IN_GOBS_SIXTEEN_GOBS 0x00000004 +#define NVCB97_TEXHEAD_V2_BL_TILE_WIDTH_IN_GOBS_THIRTYTWO_GOBS 0x00000005 +#define NVCB97_TEXHEAD_V2_BL_GOB3D MW(77:77) +#define NVCB97_TEXHEAD_V2_BL_RESERVED2Z MW(79:78) +#define NVCB97_TEXHEAD_V2_BL_LOD_ANISO_QUALITY2 MW(80:80) +#define NVCB97_TEXHEAD_V2_BL_LOD_ANISO_QUALITY MW(81:81) +#define NVCB97_TEXHEAD_V2_BL_LOD_ANISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVCB97_TEXHEAD_V2_BL_LOD_ANISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVCB97_TEXHEAD_V2_BL_LOD_ISO_QUALITY MW(82:82) +#define NVCB97_TEXHEAD_V2_BL_LOD_ISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVCB97_TEXHEAD_V2_BL_LOD_ISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVCB97_TEXHEAD_V2_BL_ANISO_COARSE_SPREAD_MODIFIER MW(84:83) +#define NVCB97_TEXHEAD_V2_BL_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVCB97_TEXHEAD_V2_BL_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVCB97_TEXHEAD_V2_BL_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVCB97_TEXHEAD_V2_BL_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVCB97_TEXHEAD_V2_BL_ANISO_SPREAD_SCALE MW(89:85) +#define NVCB97_TEXHEAD_V2_BL_USE_HEADER_OPT_CONTROL MW(90:90) +#define NVCB97_TEXHEAD_V2_BL_DEPTH_TEXTURE MW(91:91) +#define NVCB97_TEXHEAD_V2_BL_MAX_MIP_LEVEL MW(95:92) +#define NVCB97_TEXHEAD_V2_BL_X_SOURCE MW(98:96) +#define NVCB97_TEXHEAD_V2_BL_X_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_V2_BL_X_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_V2_BL_X_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_V2_BL_X_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_V2_BL_X_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_V2_BL_X_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_V2_BL_X_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_V2_BL_Y_SOURCE MW(101:99) +#define NVCB97_TEXHEAD_V2_BL_Y_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_V2_BL_Y_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_V2_BL_Y_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_V2_BL_Y_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_V2_BL_Y_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_V2_BL_Y_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_V2_BL_Y_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_V2_BL_Z_SOURCE MW(104:102) +#define NVCB97_TEXHEAD_V2_BL_Z_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_V2_BL_Z_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_V2_BL_Z_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_V2_BL_Z_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_V2_BL_Z_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_V2_BL_Z_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_V2_BL_Z_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_V2_BL_W_SOURCE MW(107:105) +#define NVCB97_TEXHEAD_V2_BL_W_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_V2_BL_W_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_V2_BL_W_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_V2_BL_W_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_V2_BL_W_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_V2_BL_W_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_V2_BL_W_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_V2_BL_DATA_TYPE MW(111:108) +#define NVCB97_TEXHEAD_V2_BL_DATA_TYPE_TEX_DATA_TYPE_UNORM 0x00000000 +#define NVCB97_TEXHEAD_V2_BL_DATA_TYPE_TEX_DATA_TYPE_SNORM 0x00000001 +#define NVCB97_TEXHEAD_V2_BL_DATA_TYPE_TEX_DATA_TYPE_FLOAT 0x00000002 +#define NVCB97_TEXHEAD_V2_BL_DATA_TYPE_TEX_DATA_TYPE_SGNRGB 0x00000003 +#define NVCB97_TEXHEAD_V2_BL_DATA_TYPE_TEX_DATA_TYPE_SGNA 0x00000004 +#define NVCB97_TEXHEAD_V2_BL_DATA_TYPE_TEX_DATA_TYPE_DSDT 0x00000005 +#define NVCB97_TEXHEAD_V2_BL_DATA_TYPE_TEX_DATA_TYPE_UINT 0x00000006 +#define NVCB97_TEXHEAD_V2_BL_DATA_TYPE_TEX_DATA_TYPE_SINT 0x00000007 +#define NVCB97_TEXHEAD_V2_BL_DATA_TYPE_TEX_DATA_TYPE_ZS 0x00000008 +#define NVCB97_TEXHEAD_V2_BL_DATA_TYPE_TEX_DATA_TYPE_SZ 0x00000009 +#define NVCB97_TEXHEAD_V2_BL_DATA_TYPE_TEX_DATA_TYPE_ZFS 0x0000000a +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS MW(118:112) +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_INVALID 0x00000000 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_R32_G32_B32_A32 0x00000001 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_R32_G32_B32 0x00000002 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_R16_G16_B16_A16 0x00000003 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_R32_G32 0x00000004 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_R32_B24G8 0x00000005 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_X8B8G8R8 0x00000007 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_A8B8G8R8 0x00000008 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_A2B10G10R10 0x00000009 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_R16_G16 0x0000000c +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_G8R24 0x0000000d +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_G24R8 0x0000000e +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_R32 0x0000000f +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_A4B4G4R4 0x00000012 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_A5B5G5R1 0x00000013 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_A1B5G5R5 0x00000014 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_B5G6R5 0x00000015 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_B6G5R5 0x00000016 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_G8R8 0x00000018 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_R16 0x0000001b +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_Y8_VIDEO 0x0000001c +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_R8 0x0000001d +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_G4R4 0x0000001e +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_R1 0x0000001f +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_E5B9G9R9_SHAREDEXP 0x00000020 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_BF10GF11RF11 0x00000021 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_G8B8G8R8 0x00000022 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_B8G8R8G8 0x00000023 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_DXT1 0x00000024 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_DXT23 0x00000025 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_DXT45 0x00000026 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_DXN1 0x00000027 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_DXN2 0x00000028 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_BC6H_SF16 0x00000010 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_BC6H_UF16 0x00000011 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_BC7U 0x00000017 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_ETC2_RGB 0x00000006 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_ETC2_RGB_PTA 0x0000000a +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_ETC2_RGBA 0x0000000b +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_EAC 0x00000019 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_EACX2 0x0000001a +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_Z24S8 0x00000029 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_X8Z24 0x0000002a +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_S8Z24 0x0000002b +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_ZF32 0x0000002f +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_ZF32_X24S8 0x00000030 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_Z16 0x0000003a +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_ASTC_2D_4X4 0x00000040 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_ASTC_2D_5X4 0x00000050 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_ASTC_2D_5X5 0x00000041 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_ASTC_2D_6X5 0x00000051 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_ASTC_2D_6X6 0x00000042 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_ASTC_2D_8X5 0x00000055 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_ASTC_2D_8X6 0x00000052 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_ASTC_2D_8X8 0x00000044 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_ASTC_2D_10X5 0x00000056 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_ASTC_2D_10X6 0x00000057 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_ASTC_2D_10X8 0x00000053 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_ASTC_2D_10X10 0x00000045 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_ASTC_2D_12X10 0x00000054 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_ASTC_2D_12X12 0x00000046 +#define NVCB97_TEXHEAD_V2_BL_COMPONENTS_SIZES_CS_BITFIELD_SIZE 0x0000007f +#define NVCB97_TEXHEAD_V2_BL_PACK_COMPONENTS MW(119:119) +#define NVCB97_TEXHEAD_V2_BL_RESERVED3A MW(123:120) +#define NVCB97_TEXHEAD_V2_BL_HEADER_VERSION MW(127:124) +#define NVCB97_TEXHEAD_V2_BL_HEADER_VERSION_SELECT_PITCH_COLOR_KEY_V2 0x00000001 +#define NVCB97_TEXHEAD_V2_BL_HEADER_VERSION_SELECT_PITCH_V2 0x00000002 +#define NVCB97_TEXHEAD_V2_BL_HEADER_VERSION_SELECT_BLOCKLINEAR_V2 0x00000003 +#define NVCB97_TEXHEAD_V2_BL_HEADER_VERSION_SELECT_BLOCKLINEAR_COLOR_KEY_V2 0x00000004 +#define NVCB97_TEXHEAD_V2_BL_HEADER_VERSION_SELECT_ONE_D_RAW_TYPED 0x00000005 +#define NVCB97_TEXHEAD_V2_BL_HEADER_VERSION_SELECT_ONE_D_STRUCT_BUF 0x00000006 +#define NVCB97_TEXHEAD_V2_BL_WIDTH_MINUS_ONE MW(144:128) +#define NVCB97_TEXHEAD_V2_BL_NORMALIZED_COORDS MW(145:145) +#define NVCB97_TEXHEAD_V2_BL_ANISO_SPREAD_MAX_LOG2 MW(148:146) +#define NVCB97_TEXHEAD_V2_BL_S_R_G_B_CONVERSION MW(149:149) +#define NVCB97_TEXHEAD_V2_BL_TEXTURE_TYPE MW(153:150) +#define NVCB97_TEXHEAD_V2_BL_TEXTURE_TYPE_ONE_D 0x00000000 +#define NVCB97_TEXHEAD_V2_BL_TEXTURE_TYPE_TWO_D 0x00000001 +#define NVCB97_TEXHEAD_V2_BL_TEXTURE_TYPE_THREE_D 0x00000002 +#define NVCB97_TEXHEAD_V2_BL_TEXTURE_TYPE_CUBEMAP 0x00000003 +#define NVCB97_TEXHEAD_V2_BL_TEXTURE_TYPE_ONE_D_ARRAY 0x00000004 +#define NVCB97_TEXHEAD_V2_BL_TEXTURE_TYPE_TWO_D_ARRAY 0x00000005 +#define NVCB97_TEXHEAD_V2_BL_TEXTURE_TYPE_ONE_D_BUFFER 0x00000006 +#define NVCB97_TEXHEAD_V2_BL_TEXTURE_TYPE_TWO_D_NO_MIPMAP 0x00000007 +#define NVCB97_TEXHEAD_V2_BL_TEXTURE_TYPE_CUBEMAP_ARRAY 0x00000008 +#define NVCB97_TEXHEAD_V2_BL_TEXTURE_TYPE_HTEX_TWOD 0x0000000a +#define NVCB97_TEXHEAD_V2_BL_TEXTURE_TYPE_HTEX_THREE_D 0x0000000b +#define NVCB97_TEXHEAD_V2_BL_TEXTURE_TYPE_HTEX_TWOD_ARRAY 0x0000000e +#define NVCB97_TEXHEAD_V2_BL_TEXTURE_TYPE_TT_BIT_FIELD_SIZE 0x0000000f +#define NVCB97_TEXHEAD_V2_BL_SECTOR_PROMOTION MW(155:154) +#define NVCB97_TEXHEAD_V2_BL_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVCB97_TEXHEAD_V2_BL_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVCB97_TEXHEAD_V2_BL_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVCB97_TEXHEAD_V2_BL_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVCB97_TEXHEAD_V2_BL_BORDER_SOURCE MW(156:156) +#define NVCB97_TEXHEAD_V2_BL_BORDER_SOURCE_BORDER_TEXTURE 0x00000000 +#define NVCB97_TEXHEAD_V2_BL_BORDER_SOURCE_BORDER_COLOR 0x00000001 +#define NVCB97_TEXHEAD_V2_BL_RESERVED4A MW(159:157) +#define NVCB97_TEXHEAD_V2_BL_HEIGHT_MINUS_ONE MW(176:160) +#define NVCB97_TEXHEAD_V2_BL_DEPTH_MINUS_ONE MW(191:177) +#define NVCB97_TEXHEAD_V2_BL_RESERVED6Y MW(192:192) +#define NVCB97_TEXHEAD_V2_BL_TRILIN_OPT MW(197:193) +#define NVCB97_TEXHEAD_V2_BL_MIP_LOD_BIAS MW(210:198) +#define NVCB97_TEXHEAD_V2_BL_ANISO_BIAS MW(214:211) +#define NVCB97_TEXHEAD_V2_BL_ANISO_FINE_SPREAD_FUNC MW(216:215) +#define NVCB97_TEXHEAD_V2_BL_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVCB97_TEXHEAD_V2_BL_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVCB97_TEXHEAD_V2_BL_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVCB97_TEXHEAD_V2_BL_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVCB97_TEXHEAD_V2_BL_ANISO_COARSE_SPREAD_FUNC MW(218:217) +#define NVCB97_TEXHEAD_V2_BL_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVCB97_TEXHEAD_V2_BL_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVCB97_TEXHEAD_V2_BL_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVCB97_TEXHEAD_V2_BL_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVCB97_TEXHEAD_V2_BL_MAX_ANISOTROPY MW(221:219) +#define NVCB97_TEXHEAD_V2_BL_MAX_ANISOTROPY_ANISO_1_TO_1 0x00000000 +#define NVCB97_TEXHEAD_V2_BL_MAX_ANISOTROPY_ANISO_2_TO_1 0x00000001 +#define NVCB97_TEXHEAD_V2_BL_MAX_ANISOTROPY_ANISO_4_TO_1 0x00000002 +#define NVCB97_TEXHEAD_V2_BL_MAX_ANISOTROPY_ANISO_6_TO_1 0x00000003 +#define NVCB97_TEXHEAD_V2_BL_MAX_ANISOTROPY_ANISO_8_TO_1 0x00000004 +#define NVCB97_TEXHEAD_V2_BL_MAX_ANISOTROPY_ANISO_10_TO_1 0x00000005 +#define NVCB97_TEXHEAD_V2_BL_MAX_ANISOTROPY_ANISO_12_TO_1 0x00000006 +#define NVCB97_TEXHEAD_V2_BL_MAX_ANISOTROPY_ANISO_16_TO_1 0x00000007 +#define NVCB97_TEXHEAD_V2_BL_ANISO_FINE_SPREAD_MODIFIER MW(223:222) +#define NVCB97_TEXHEAD_V2_BL_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVCB97_TEXHEAD_V2_BL_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVCB97_TEXHEAD_V2_BL_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVCB97_TEXHEAD_V2_BL_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVCB97_TEXHEAD_V2_BL_RES_VIEW_MIN_MIP_LEVEL MW(227:224) +#define NVCB97_TEXHEAD_V2_BL_RES_VIEW_MAX_MIP_LEVEL MW(231:228) +#define NVCB97_TEXHEAD_V2_BL_MULTI_SAMPLE_COUNT MW(235:232) +#define NVCB97_TEXHEAD_V2_BL_MULTI_SAMPLE_COUNT_MODE_1X1 0x00000000 +#define NVCB97_TEXHEAD_V2_BL_MULTI_SAMPLE_COUNT_MODE_2X1 0x00000001 +#define NVCB97_TEXHEAD_V2_BL_MULTI_SAMPLE_COUNT_MODE_2X2 0x00000002 +#define NVCB97_TEXHEAD_V2_BL_MULTI_SAMPLE_COUNT_MODE_4X2 0x00000003 +#define NVCB97_TEXHEAD_V2_BL_MULTI_SAMPLE_COUNT_MODE_4X2_D3D 0x00000004 +#define NVCB97_TEXHEAD_V2_BL_MULTI_SAMPLE_COUNT_MODE_2X1_D3D 0x00000005 +#define NVCB97_TEXHEAD_V2_BL_MULTI_SAMPLE_COUNT_MODE_4X4 0x00000006 +#define NVCB97_TEXHEAD_V2_BL_MULTI_SAMPLE_COUNT_MODE_2X2_VC_4 0x00000008 +#define NVCB97_TEXHEAD_V2_BL_MULTI_SAMPLE_COUNT_MODE_2X2_VC_12 0x00000009 +#define NVCB97_TEXHEAD_V2_BL_MULTI_SAMPLE_COUNT_MODE_4X2_VC_8 0x0000000a +#define NVCB97_TEXHEAD_V2_BL_MULTI_SAMPLE_COUNT_MODE_4X2_VC_24 0x0000000b +#define NVCB97_TEXHEAD_V2_BL_MIN_LOD_CLAMP MW(247:236) +#define NVCB97_TEXHEAD_V2_BL_RESERVED7Y MW(255:248) + + +/* +** Texture Header V2 Blocklinear Color Key + */ + +#define NVCB97_TEXHEAD_V2_BLCK_RESERVED0A MW(3:0) +#define NVCB97_TEXHEAD_V2_BLCK_GOB_DEPTH_OFFSET MW(8:4) +#define NVCB97_TEXHEAD_V2_BLCK_ADDRESS_BITS31TO9 MW(31:9) +#define NVCB97_TEXHEAD_V2_BLCK_ADDRESS_BITS56TO32 MW(56:32) +#define NVCB97_TEXHEAD_V2_BLCK_RESERVED1A MW(63:57) +#define NVCB97_TEXHEAD_V2_BLCK_GOBS_PER_BLOCK_WIDTH MW(66:64) +#define NVCB97_TEXHEAD_V2_BLCK_GOBS_PER_BLOCK_WIDTH_ONE_GOB 0x00000000 +#define NVCB97_TEXHEAD_V2_BLCK_GOBS_PER_BLOCK_HEIGHT MW(69:67) +#define NVCB97_TEXHEAD_V2_BLCK_GOBS_PER_BLOCK_HEIGHT_ONE_GOB 0x00000000 +#define NVCB97_TEXHEAD_V2_BLCK_GOBS_PER_BLOCK_HEIGHT_TWO_GOBS 0x00000001 +#define NVCB97_TEXHEAD_V2_BLCK_GOBS_PER_BLOCK_HEIGHT_FOUR_GOBS 0x00000002 +#define NVCB97_TEXHEAD_V2_BLCK_GOBS_PER_BLOCK_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVCB97_TEXHEAD_V2_BLCK_GOBS_PER_BLOCK_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVCB97_TEXHEAD_V2_BLCK_GOBS_PER_BLOCK_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVCB97_TEXHEAD_V2_BLCK_GOBS_PER_BLOCK_DEPTH MW(72:70) +#define NVCB97_TEXHEAD_V2_BLCK_GOBS_PER_BLOCK_DEPTH_ONE_GOB 0x00000000 +#define NVCB97_TEXHEAD_V2_BLCK_GOBS_PER_BLOCK_DEPTH_TWO_GOBS 0x00000001 +#define NVCB97_TEXHEAD_V2_BLCK_GOBS_PER_BLOCK_DEPTH_FOUR_GOBS 0x00000002 +#define NVCB97_TEXHEAD_V2_BLCK_GOBS_PER_BLOCK_DEPTH_EIGHT_GOBS 0x00000003 +#define NVCB97_TEXHEAD_V2_BLCK_GOBS_PER_BLOCK_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVCB97_TEXHEAD_V2_BLCK_GOBS_PER_BLOCK_DEPTH_THIRTYTWO_GOBS 0x00000005 +#define NVCB97_TEXHEAD_V2_BLCK_RESERVED3Y MW(73:73) +#define NVCB97_TEXHEAD_V2_BLCK_TILE_WIDTH_IN_GOBS MW(76:74) +#define NVCB97_TEXHEAD_V2_BLCK_TILE_WIDTH_IN_GOBS_ONE_GOB 0x00000000 +#define NVCB97_TEXHEAD_V2_BLCK_TILE_WIDTH_IN_GOBS_TWO_GOBS 0x00000001 +#define NVCB97_TEXHEAD_V2_BLCK_TILE_WIDTH_IN_GOBS_FOUR_GOBS 0x00000002 +#define NVCB97_TEXHEAD_V2_BLCK_TILE_WIDTH_IN_GOBS_EIGHT_GOBS 0x00000003 +#define NVCB97_TEXHEAD_V2_BLCK_TILE_WIDTH_IN_GOBS_SIXTEEN_GOBS 0x00000004 +#define NVCB97_TEXHEAD_V2_BLCK_TILE_WIDTH_IN_GOBS_THIRTYTWO_GOBS 0x00000005 +#define NVCB97_TEXHEAD_V2_BLCK_GOB3D MW(77:77) +#define NVCB97_TEXHEAD_V2_BLCK_RESERVED2Z MW(79:78) +#define NVCB97_TEXHEAD_V2_BLCK_LOD_ANISO_QUALITY2 MW(80:80) +#define NVCB97_TEXHEAD_V2_BLCK_LOD_ANISO_QUALITY MW(81:81) +#define NVCB97_TEXHEAD_V2_BLCK_LOD_ANISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVCB97_TEXHEAD_V2_BLCK_LOD_ANISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVCB97_TEXHEAD_V2_BLCK_LOD_ISO_QUALITY MW(82:82) +#define NVCB97_TEXHEAD_V2_BLCK_LOD_ISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVCB97_TEXHEAD_V2_BLCK_LOD_ISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVCB97_TEXHEAD_V2_BLCK_ANISO_COARSE_SPREAD_MODIFIER MW(84:83) +#define NVCB97_TEXHEAD_V2_BLCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVCB97_TEXHEAD_V2_BLCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVCB97_TEXHEAD_V2_BLCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVCB97_TEXHEAD_V2_BLCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVCB97_TEXHEAD_V2_BLCK_ANISO_SPREAD_SCALE MW(89:85) +#define NVCB97_TEXHEAD_V2_BLCK_USE_HEADER_OPT_CONTROL MW(90:90) +#define NVCB97_TEXHEAD_V2_BLCK_DEPTH_TEXTURE MW(91:91) +#define NVCB97_TEXHEAD_V2_BLCK_MAX_MIP_LEVEL MW(95:92) +#define NVCB97_TEXHEAD_V2_BLCK_X_SOURCE MW(98:96) +#define NVCB97_TEXHEAD_V2_BLCK_X_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_V2_BLCK_X_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_V2_BLCK_X_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_V2_BLCK_X_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_V2_BLCK_X_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_V2_BLCK_X_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_V2_BLCK_X_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_V2_BLCK_Y_SOURCE MW(101:99) +#define NVCB97_TEXHEAD_V2_BLCK_Y_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_V2_BLCK_Y_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_V2_BLCK_Y_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_V2_BLCK_Y_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_V2_BLCK_Y_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_V2_BLCK_Y_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_V2_BLCK_Y_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_V2_BLCK_Z_SOURCE MW(104:102) +#define NVCB97_TEXHEAD_V2_BLCK_Z_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_V2_BLCK_Z_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_V2_BLCK_Z_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_V2_BLCK_Z_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_V2_BLCK_Z_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_V2_BLCK_Z_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_V2_BLCK_Z_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_V2_BLCK_W_SOURCE MW(107:105) +#define NVCB97_TEXHEAD_V2_BLCK_W_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_V2_BLCK_W_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_V2_BLCK_W_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_V2_BLCK_W_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_V2_BLCK_W_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_V2_BLCK_W_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_V2_BLCK_W_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_V2_BLCK_DATA_TYPE MW(111:108) +#define NVCB97_TEXHEAD_V2_BLCK_DATA_TYPE_TEX_DATA_TYPE_UNORM 0x00000000 +#define NVCB97_TEXHEAD_V2_BLCK_DATA_TYPE_TEX_DATA_TYPE_SNORM 0x00000001 +#define NVCB97_TEXHEAD_V2_BLCK_DATA_TYPE_TEX_DATA_TYPE_FLOAT 0x00000002 +#define NVCB97_TEXHEAD_V2_BLCK_DATA_TYPE_TEX_DATA_TYPE_SGNRGB 0x00000003 +#define NVCB97_TEXHEAD_V2_BLCK_DATA_TYPE_TEX_DATA_TYPE_SGNA 0x00000004 +#define NVCB97_TEXHEAD_V2_BLCK_DATA_TYPE_TEX_DATA_TYPE_DSDT 0x00000005 +#define NVCB97_TEXHEAD_V2_BLCK_DATA_TYPE_TEX_DATA_TYPE_UINT 0x00000006 +#define NVCB97_TEXHEAD_V2_BLCK_DATA_TYPE_TEX_DATA_TYPE_SINT 0x00000007 +#define NVCB97_TEXHEAD_V2_BLCK_DATA_TYPE_TEX_DATA_TYPE_ZS 0x00000008 +#define NVCB97_TEXHEAD_V2_BLCK_DATA_TYPE_TEX_DATA_TYPE_SZ 0x00000009 +#define NVCB97_TEXHEAD_V2_BLCK_DATA_TYPE_TEX_DATA_TYPE_ZFS 0x0000000a +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS MW(118:112) +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_INVALID 0x00000000 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_R32_G32_B32_A32 0x00000001 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_R32_G32_B32 0x00000002 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_R16_G16_B16_A16 0x00000003 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_R32_G32 0x00000004 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_R32_B24G8 0x00000005 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_X8B8G8R8 0x00000007 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_A8B8G8R8 0x00000008 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_A2B10G10R10 0x00000009 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_R16_G16 0x0000000c +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_G8R24 0x0000000d +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_G24R8 0x0000000e +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_R32 0x0000000f +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_A4B4G4R4 0x00000012 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_A5B5G5R1 0x00000013 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_A1B5G5R5 0x00000014 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_B5G6R5 0x00000015 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_B6G5R5 0x00000016 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_G8R8 0x00000018 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_R16 0x0000001b +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_Y8_VIDEO 0x0000001c +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_R8 0x0000001d +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_G4R4 0x0000001e +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_R1 0x0000001f +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_E5B9G9R9_SHAREDEXP 0x00000020 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_BF10GF11RF11 0x00000021 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_G8B8G8R8 0x00000022 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_B8G8R8G8 0x00000023 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_DXT1 0x00000024 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_DXT23 0x00000025 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_DXT45 0x00000026 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_DXN1 0x00000027 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_DXN2 0x00000028 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_BC6H_SF16 0x00000010 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_BC6H_UF16 0x00000011 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_BC7U 0x00000017 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_ETC2_RGB 0x00000006 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_ETC2_RGB_PTA 0x0000000a +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_ETC2_RGBA 0x0000000b +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_EAC 0x00000019 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_EACX2 0x0000001a +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_Z24S8 0x00000029 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_X8Z24 0x0000002a +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_S8Z24 0x0000002b +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_ZF32 0x0000002f +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_ZF32_X24S8 0x00000030 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_Z16 0x0000003a +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_ASTC_2D_4X4 0x00000040 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_ASTC_2D_5X4 0x00000050 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_ASTC_2D_5X5 0x00000041 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_ASTC_2D_6X5 0x00000051 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_ASTC_2D_6X6 0x00000042 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_ASTC_2D_8X5 0x00000055 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_ASTC_2D_8X6 0x00000052 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_ASTC_2D_8X8 0x00000044 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_ASTC_2D_10X5 0x00000056 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_ASTC_2D_10X6 0x00000057 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_ASTC_2D_10X8 0x00000053 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_ASTC_2D_10X10 0x00000045 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_ASTC_2D_12X10 0x00000054 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_ASTC_2D_12X12 0x00000046 +#define NVCB97_TEXHEAD_V2_BLCK_COMPONENTS_SIZES_CS_BITFIELD_SIZE 0x0000007f +#define NVCB97_TEXHEAD_V2_BLCK_PACK_COMPONENTS MW(119:119) +#define NVCB97_TEXHEAD_V2_BLCK_RESERVED3A MW(123:120) +#define NVCB97_TEXHEAD_V2_BLCK_HEADER_VERSION MW(127:124) +#define NVCB97_TEXHEAD_V2_BLCK_HEADER_VERSION_SELECT_PITCH_COLOR_KEY_V2 0x00000001 +#define NVCB97_TEXHEAD_V2_BLCK_HEADER_VERSION_SELECT_PITCH_V2 0x00000002 +#define NVCB97_TEXHEAD_V2_BLCK_HEADER_VERSION_SELECT_BLOCKLINEAR_V2 0x00000003 +#define NVCB97_TEXHEAD_V2_BLCK_HEADER_VERSION_SELECT_BLOCKLINEAR_COLOR_KEY_V2 0x00000004 +#define NVCB97_TEXHEAD_V2_BLCK_HEADER_VERSION_SELECT_ONE_D_RAW_TYPED 0x00000005 +#define NVCB97_TEXHEAD_V2_BLCK_HEADER_VERSION_SELECT_ONE_D_STRUCT_BUF 0x00000006 +#define NVCB97_TEXHEAD_V2_BLCK_WIDTH_MINUS_ONE MW(144:128) +#define NVCB97_TEXHEAD_V2_BLCK_NORMALIZED_COORDS MW(145:145) +#define NVCB97_TEXHEAD_V2_BLCK_ANISO_SPREAD_MAX_LOG2 MW(148:146) +#define NVCB97_TEXHEAD_V2_BLCK_S_R_G_B_CONVERSION MW(149:149) +#define NVCB97_TEXHEAD_V2_BLCK_TEXTURE_TYPE MW(153:150) +#define NVCB97_TEXHEAD_V2_BLCK_TEXTURE_TYPE_ONE_D 0x00000000 +#define NVCB97_TEXHEAD_V2_BLCK_TEXTURE_TYPE_TWO_D 0x00000001 +#define NVCB97_TEXHEAD_V2_BLCK_TEXTURE_TYPE_THREE_D 0x00000002 +#define NVCB97_TEXHEAD_V2_BLCK_TEXTURE_TYPE_CUBEMAP 0x00000003 +#define NVCB97_TEXHEAD_V2_BLCK_TEXTURE_TYPE_ONE_D_ARRAY 0x00000004 +#define NVCB97_TEXHEAD_V2_BLCK_TEXTURE_TYPE_TWO_D_ARRAY 0x00000005 +#define NVCB97_TEXHEAD_V2_BLCK_TEXTURE_TYPE_ONE_D_BUFFER 0x00000006 +#define NVCB97_TEXHEAD_V2_BLCK_TEXTURE_TYPE_TWO_D_NO_MIPMAP 0x00000007 +#define NVCB97_TEXHEAD_V2_BLCK_TEXTURE_TYPE_CUBEMAP_ARRAY 0x00000008 +#define NVCB97_TEXHEAD_V2_BLCK_TEXTURE_TYPE_HTEX_TWOD 0x0000000a +#define NVCB97_TEXHEAD_V2_BLCK_TEXTURE_TYPE_HTEX_THREE_D 0x0000000b +#define NVCB97_TEXHEAD_V2_BLCK_TEXTURE_TYPE_HTEX_TWOD_ARRAY 0x0000000e +#define NVCB97_TEXHEAD_V2_BLCK_TEXTURE_TYPE_TT_BIT_FIELD_SIZE 0x0000000f +#define NVCB97_TEXHEAD_V2_BLCK_SECTOR_PROMOTION MW(155:154) +#define NVCB97_TEXHEAD_V2_BLCK_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVCB97_TEXHEAD_V2_BLCK_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVCB97_TEXHEAD_V2_BLCK_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVCB97_TEXHEAD_V2_BLCK_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVCB97_TEXHEAD_V2_BLCK_BORDER_SOURCE MW(156:156) +#define NVCB97_TEXHEAD_V2_BLCK_BORDER_SOURCE_BORDER_TEXTURE 0x00000000 +#define NVCB97_TEXHEAD_V2_BLCK_BORDER_SOURCE_BORDER_COLOR 0x00000001 +#define NVCB97_TEXHEAD_V2_BLCK_RESERVED4A MW(159:157) +#define NVCB97_TEXHEAD_V2_BLCK_HEIGHT_MINUS_ONE MW(176:160) +#define NVCB97_TEXHEAD_V2_BLCK_DEPTH_MINUS_ONE MW(191:177) +#define NVCB97_TEXHEAD_V2_BLCK_COLOR_KEY_OP MW(192:192) +#define NVCB97_TEXHEAD_V2_BLCK_TRILIN_OPT MW(197:193) +#define NVCB97_TEXHEAD_V2_BLCK_MIP_LOD_BIAS MW(210:198) +#define NVCB97_TEXHEAD_V2_BLCK_ANISO_BIAS MW(214:211) +#define NVCB97_TEXHEAD_V2_BLCK_ANISO_FINE_SPREAD_FUNC MW(216:215) +#define NVCB97_TEXHEAD_V2_BLCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVCB97_TEXHEAD_V2_BLCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVCB97_TEXHEAD_V2_BLCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVCB97_TEXHEAD_V2_BLCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVCB97_TEXHEAD_V2_BLCK_ANISO_COARSE_SPREAD_FUNC MW(218:217) +#define NVCB97_TEXHEAD_V2_BLCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVCB97_TEXHEAD_V2_BLCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVCB97_TEXHEAD_V2_BLCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVCB97_TEXHEAD_V2_BLCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVCB97_TEXHEAD_V2_BLCK_MAX_ANISOTROPY MW(221:219) +#define NVCB97_TEXHEAD_V2_BLCK_MAX_ANISOTROPY_ANISO_1_TO_1 0x00000000 +#define NVCB97_TEXHEAD_V2_BLCK_MAX_ANISOTROPY_ANISO_2_TO_1 0x00000001 +#define NVCB97_TEXHEAD_V2_BLCK_MAX_ANISOTROPY_ANISO_4_TO_1 0x00000002 +#define NVCB97_TEXHEAD_V2_BLCK_MAX_ANISOTROPY_ANISO_6_TO_1 0x00000003 +#define NVCB97_TEXHEAD_V2_BLCK_MAX_ANISOTROPY_ANISO_8_TO_1 0x00000004 +#define NVCB97_TEXHEAD_V2_BLCK_MAX_ANISOTROPY_ANISO_10_TO_1 0x00000005 +#define NVCB97_TEXHEAD_V2_BLCK_MAX_ANISOTROPY_ANISO_12_TO_1 0x00000006 +#define NVCB97_TEXHEAD_V2_BLCK_MAX_ANISOTROPY_ANISO_16_TO_1 0x00000007 +#define NVCB97_TEXHEAD_V2_BLCK_ANISO_FINE_SPREAD_MODIFIER MW(223:222) +#define NVCB97_TEXHEAD_V2_BLCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVCB97_TEXHEAD_V2_BLCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVCB97_TEXHEAD_V2_BLCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVCB97_TEXHEAD_V2_BLCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVCB97_TEXHEAD_V2_BLCK_COLOR_KEY_VALUE MW(255:224) + + +/* +** Texture Header V2 One-D Raw Typed + */ + +#define NVCB97_TEXHEAD_V2_1DRT_ADDRESS_BITS31TO0 MW(31:0) +#define NVCB97_TEXHEAD_V2_1DRT_ADDRESS_BITS63TO32 MW(63:32) +#define NVCB97_TEXHEAD_V2_1DRT_WIDTH_MINUS_ONE MW(95:64) +#define NVCB97_TEXHEAD_V2_1DRT_X_SOURCE MW(98:96) +#define NVCB97_TEXHEAD_V2_1DRT_X_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_V2_1DRT_X_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_V2_1DRT_X_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_V2_1DRT_X_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_V2_1DRT_X_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_V2_1DRT_X_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_V2_1DRT_X_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_V2_1DRT_Y_SOURCE MW(101:99) +#define NVCB97_TEXHEAD_V2_1DRT_Y_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_V2_1DRT_Y_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_V2_1DRT_Y_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_V2_1DRT_Y_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_V2_1DRT_Y_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_V2_1DRT_Y_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_V2_1DRT_Y_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_V2_1DRT_Z_SOURCE MW(104:102) +#define NVCB97_TEXHEAD_V2_1DRT_Z_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_V2_1DRT_Z_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_V2_1DRT_Z_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_V2_1DRT_Z_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_V2_1DRT_Z_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_V2_1DRT_Z_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_V2_1DRT_Z_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_V2_1DRT_W_SOURCE MW(107:105) +#define NVCB97_TEXHEAD_V2_1DRT_W_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_V2_1DRT_W_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_V2_1DRT_W_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_V2_1DRT_W_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_V2_1DRT_W_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_V2_1DRT_W_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_V2_1DRT_W_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_V2_1DRT_DATA_TYPE MW(111:108) +#define NVCB97_TEXHEAD_V2_1DRT_DATA_TYPE_TEX_DATA_TYPE_UNORM 0x00000000 +#define NVCB97_TEXHEAD_V2_1DRT_DATA_TYPE_TEX_DATA_TYPE_SNORM 0x00000001 +#define NVCB97_TEXHEAD_V2_1DRT_DATA_TYPE_TEX_DATA_TYPE_FLOAT 0x00000002 +#define NVCB97_TEXHEAD_V2_1DRT_DATA_TYPE_TEX_DATA_TYPE_SGNRGB 0x00000003 +#define NVCB97_TEXHEAD_V2_1DRT_DATA_TYPE_TEX_DATA_TYPE_SGNA 0x00000004 +#define NVCB97_TEXHEAD_V2_1DRT_DATA_TYPE_TEX_DATA_TYPE_DSDT 0x00000005 +#define NVCB97_TEXHEAD_V2_1DRT_DATA_TYPE_TEX_DATA_TYPE_UINT 0x00000006 +#define NVCB97_TEXHEAD_V2_1DRT_DATA_TYPE_TEX_DATA_TYPE_SINT 0x00000007 +#define NVCB97_TEXHEAD_V2_1DRT_DATA_TYPE_TEX_DATA_TYPE_ZS 0x00000008 +#define NVCB97_TEXHEAD_V2_1DRT_DATA_TYPE_TEX_DATA_TYPE_SZ 0x00000009 +#define NVCB97_TEXHEAD_V2_1DRT_DATA_TYPE_TEX_DATA_TYPE_ZFS 0x0000000a +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS MW(117:112) +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_INVALID 0x00000000 +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_R32_G32_B32_A32 0x00000001 +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_R32_G32_B32 0x00000002 +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_R16_G16_B16_A16 0x00000003 +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_R32_G32 0x00000004 +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_R32_B24G8 0x00000005 +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_X8B8G8R8 0x00000007 +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_A8B8G8R8 0x00000008 +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_A2B10G10R10 0x00000009 +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_R16_G16 0x0000000c +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_G8R24 0x0000000d +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_G24R8 0x0000000e +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_R32 0x0000000f +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_A4B4G4R4 0x00000012 +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_A5B5G5R1 0x00000013 +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_A1B5G5R5 0x00000014 +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_B5G6R5 0x00000015 +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_B6G5R5 0x00000016 +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_G8R8 0x00000018 +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_R16 0x0000001b +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_Y8_VIDEO 0x0000001c +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_R8 0x0000001d +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_G4R4 0x0000001e +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_E5B9G9R9_SHAREDEXP 0x00000020 +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_BF10GF11RF11 0x00000021 +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_G8B8G8R8 0x00000022 +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_B8G8R8G8 0x00000023 +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_X8B8G8R8_SRGB 0x00000031 +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_A8B8G8R8_SRGB 0x00000032 +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_G8R8_SRGB 0x00000033 +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZES_1D_R8_SRGB 0x00000034 +#define NVCB97_TEXHEAD_V2_1DRT_COMPONENTS_SIZESV2_CS_BITFIELD_SIZE 0x0000003f +#define NVCB97_TEXHEAD_V2_1DRT_SECTOR_PROMOTION MW(119:118) +#define NVCB97_TEXHEAD_V2_1DRT_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVCB97_TEXHEAD_V2_1DRT_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVCB97_TEXHEAD_V2_1DRT_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVCB97_TEXHEAD_V2_1DRT_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVCB97_TEXHEAD_V2_1DRT_RESERVED3A MW(123:120) +#define NVCB97_TEXHEAD_V2_1DRT_HEADER_VERSION MW(127:124) +#define NVCB97_TEXHEAD_V2_1DRT_HEADER_VERSION_SELECT_PITCH_COLOR_KEY_V2 0x00000001 +#define NVCB97_TEXHEAD_V2_1DRT_HEADER_VERSION_SELECT_PITCH_V2 0x00000002 +#define NVCB97_TEXHEAD_V2_1DRT_HEADER_VERSION_SELECT_BLOCKLINEAR_V2 0x00000003 +#define NVCB97_TEXHEAD_V2_1DRT_HEADER_VERSION_SELECT_BLOCKLINEAR_COLOR_KEY_V2 0x00000004 +#define NVCB97_TEXHEAD_V2_1DRT_HEADER_VERSION_SELECT_ONE_D_RAW_TYPED 0x00000005 +#define NVCB97_TEXHEAD_V2_1DRT_HEADER_VERSION_SELECT_ONE_D_STRUCT_BUF 0x00000006 +#define NVCB97_TEXHEAD_V2_1DRT_RESERVED4X MW(156:128) +#define NVCB97_TEXHEAD_V2_1DRT_RESERVED4A MW(159:157) +#define NVCB97_TEXHEAD_V2_1DRT_RESERVED5X MW(191:160) +#define NVCB97_TEXHEAD_V2_1DRT_RESERVED6X MW(223:192) +#define NVCB97_TEXHEAD_V2_1DRT_RESERVED7X MW(255:224) + + +/* +** Texture Header V2 One-D Structured Buffer + */ + +#define NVCB97_TEXHEAD_V2_1DSB_ADDRESS_BITS31TO0 MW(31:0) +#define NVCB97_TEXHEAD_V2_1DSB_ADDRESS_BITS63TO32 MW(63:32) +#define NVCB97_TEXHEAD_V2_1DSB_WIDTH_MINUS_ONE MW(95:64) +#define NVCB97_TEXHEAD_V2_1DSB_STRIDE MW(107:96) +#define NVCB97_TEXHEAD_V2_1DSB_DATA_TYPE MW(111:108) +#define NVCB97_TEXHEAD_V2_1DSB_DATA_TYPE_TEX_DATA_TYPE_UNORM 0x00000000 +#define NVCB97_TEXHEAD_V2_1DSB_DATA_TYPE_TEX_DATA_TYPE_SNORM 0x00000001 +#define NVCB97_TEXHEAD_V2_1DSB_DATA_TYPE_TEX_DATA_TYPE_FLOAT 0x00000002 +#define NVCB97_TEXHEAD_V2_1DSB_DATA_TYPE_TEX_DATA_TYPE_SGNRGB 0x00000003 +#define NVCB97_TEXHEAD_V2_1DSB_DATA_TYPE_TEX_DATA_TYPE_SGNA 0x00000004 +#define NVCB97_TEXHEAD_V2_1DSB_DATA_TYPE_TEX_DATA_TYPE_DSDT 0x00000005 +#define NVCB97_TEXHEAD_V2_1DSB_DATA_TYPE_TEX_DATA_TYPE_UINT 0x00000006 +#define NVCB97_TEXHEAD_V2_1DSB_DATA_TYPE_TEX_DATA_TYPE_SINT 0x00000007 +#define NVCB97_TEXHEAD_V2_1DSB_DATA_TYPE_TEX_DATA_TYPE_ZS 0x00000008 +#define NVCB97_TEXHEAD_V2_1DSB_DATA_TYPE_TEX_DATA_TYPE_SZ 0x00000009 +#define NVCB97_TEXHEAD_V2_1DSB_DATA_TYPE_TEX_DATA_TYPE_ZFS 0x0000000a +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS MW(117:112) +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_INVALID 0x00000000 +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_R32_G32_B32_A32 0x00000001 +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_R32_G32_B32 0x00000002 +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_R16_G16_B16_A16 0x00000003 +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_R32_G32 0x00000004 +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_R32_B24G8 0x00000005 +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_X8B8G8R8 0x00000007 +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_A8B8G8R8 0x00000008 +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_A2B10G10R10 0x00000009 +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_R16_G16 0x0000000c +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_G8R24 0x0000000d +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_G24R8 0x0000000e +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_R32 0x0000000f +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_A4B4G4R4 0x00000012 +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_A5B5G5R1 0x00000013 +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_A1B5G5R5 0x00000014 +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_B5G6R5 0x00000015 +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_B6G5R5 0x00000016 +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_G8R8 0x00000018 +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_R16 0x0000001b +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_Y8_VIDEO 0x0000001c +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_R8 0x0000001d +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_G4R4 0x0000001e +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_E5B9G9R9_SHAREDEXP 0x00000020 +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_BF10GF11RF11 0x00000021 +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_G8B8G8R8 0x00000022 +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_B8G8R8G8 0x00000023 +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_X8B8G8R8_SRGB 0x00000031 +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_A8B8G8R8_SRGB 0x00000032 +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_G8R8_SRGB 0x00000033 +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZES_1D_R8_SRGB 0x00000034 +#define NVCB97_TEXHEAD_V2_1DSB_COMPONENTS_SIZESV2_CS_BITFIELD_SIZE 0x0000003f +#define NVCB97_TEXHEAD_V2_1DSB_SECTOR_PROMOTION MW(119:118) +#define NVCB97_TEXHEAD_V2_1DSB_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVCB97_TEXHEAD_V2_1DSB_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVCB97_TEXHEAD_V2_1DSB_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVCB97_TEXHEAD_V2_1DSB_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVCB97_TEXHEAD_V2_1DSB_RESERVED3A MW(123:120) +#define NVCB97_TEXHEAD_V2_1DSB_HEADER_VERSION MW(127:124) +#define NVCB97_TEXHEAD_V2_1DSB_HEADER_VERSION_SELECT_PITCH_COLOR_KEY_V2 0x00000001 +#define NVCB97_TEXHEAD_V2_1DSB_HEADER_VERSION_SELECT_PITCH_V2 0x00000002 +#define NVCB97_TEXHEAD_V2_1DSB_HEADER_VERSION_SELECT_BLOCKLINEAR_V2 0x00000003 +#define NVCB97_TEXHEAD_V2_1DSB_HEADER_VERSION_SELECT_BLOCKLINEAR_COLOR_KEY_V2 0x00000004 +#define NVCB97_TEXHEAD_V2_1DSB_HEADER_VERSION_SELECT_ONE_D_RAW_TYPED 0x00000005 +#define NVCB97_TEXHEAD_V2_1DSB_HEADER_VERSION_SELECT_ONE_D_STRUCT_BUF 0x00000006 +#define NVCB97_TEXHEAD_V2_1DSB_RESERVED4X MW(156:128) +#define NVCB97_TEXHEAD_V2_1DSB_RESERVED4A MW(159:157) +#define NVCB97_TEXHEAD_V2_1DSB_RESERVED5X MW(191:160) +#define NVCB97_TEXHEAD_V2_1DSB_RESERVED6X MW(223:192) +#define NVCB97_TEXHEAD_V2_1DSB_RESERVED7X MW(255:224) + + +/* +** Texture Header V2 Pitch + */ + +#define NVCB97_TEXHEAD_V2_PITCH_RESERVED0A MW(3:0) +#define NVCB97_TEXHEAD_V2_PITCH_RESERVED0X MW(4:4) +#define NVCB97_TEXHEAD_V2_PITCH_ADDRESS_BITS31TO5 MW(31:5) +#define NVCB97_TEXHEAD_V2_PITCH_ADDRESS_BITS56TO32 MW(56:32) +#define NVCB97_TEXHEAD_V2_PITCH_RESERVED1A MW(63:57) +#define NVCB97_TEXHEAD_V2_PITCH_PITCH_BITS21TO5 MW(80:64) +#define NVCB97_TEXHEAD_V2_PITCH_LOD_ANISO_QUALITY2 MW(81:81) +#define NVCB97_TEXHEAD_V2_PITCH_LOD_ANISO_QUALITY MW(82:82) +#define NVCB97_TEXHEAD_V2_PITCH_LOD_ANISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCH_LOD_ANISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCH_LOD_ISO_QUALITY MW(83:83) +#define NVCB97_TEXHEAD_V2_PITCH_LOD_ISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCH_LOD_ISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCH_ANISO_COARSE_SPREAD_MODIFIER MW(85:84) +#define NVCB97_TEXHEAD_V2_PITCH_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCH_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCH_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCH_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCH_ANISO_SPREAD_SCALE MW(90:86) +#define NVCB97_TEXHEAD_V2_PITCH_DEPTH_TEXTURE MW(91:91) +#define NVCB97_TEXHEAD_V2_PITCH_MAX_MIP_LEVEL MW(95:92) +#define NVCB97_TEXHEAD_V2_PITCH_X_SOURCE MW(98:96) +#define NVCB97_TEXHEAD_V2_PITCH_X_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCH_X_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCH_X_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCH_X_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_V2_PITCH_X_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_V2_PITCH_X_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_V2_PITCH_X_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_V2_PITCH_Y_SOURCE MW(101:99) +#define NVCB97_TEXHEAD_V2_PITCH_Y_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCH_Y_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCH_Y_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCH_Y_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_V2_PITCH_Y_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_V2_PITCH_Y_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_V2_PITCH_Y_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_V2_PITCH_Z_SOURCE MW(104:102) +#define NVCB97_TEXHEAD_V2_PITCH_Z_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCH_Z_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCH_Z_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCH_Z_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_V2_PITCH_Z_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_V2_PITCH_Z_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_V2_PITCH_Z_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_V2_PITCH_W_SOURCE MW(107:105) +#define NVCB97_TEXHEAD_V2_PITCH_W_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCH_W_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCH_W_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCH_W_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_V2_PITCH_W_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_V2_PITCH_W_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_V2_PITCH_W_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_V2_PITCH_DATA_TYPE MW(111:108) +#define NVCB97_TEXHEAD_V2_PITCH_DATA_TYPE_TEX_DATA_TYPE_UNORM 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCH_DATA_TYPE_TEX_DATA_TYPE_SNORM 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCH_DATA_TYPE_TEX_DATA_TYPE_FLOAT 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCH_DATA_TYPE_TEX_DATA_TYPE_SGNRGB 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCH_DATA_TYPE_TEX_DATA_TYPE_SGNA 0x00000004 +#define NVCB97_TEXHEAD_V2_PITCH_DATA_TYPE_TEX_DATA_TYPE_DSDT 0x00000005 +#define NVCB97_TEXHEAD_V2_PITCH_DATA_TYPE_TEX_DATA_TYPE_UINT 0x00000006 +#define NVCB97_TEXHEAD_V2_PITCH_DATA_TYPE_TEX_DATA_TYPE_SINT 0x00000007 +#define NVCB97_TEXHEAD_V2_PITCH_DATA_TYPE_TEX_DATA_TYPE_ZS 0x00000008 +#define NVCB97_TEXHEAD_V2_PITCH_DATA_TYPE_TEX_DATA_TYPE_SZ 0x00000009 +#define NVCB97_TEXHEAD_V2_PITCH_DATA_TYPE_TEX_DATA_TYPE_ZFS 0x0000000a +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS MW(118:112) +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_INVALID 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_R32_G32_B32_A32 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_R32_G32_B32 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_R16_G16_B16_A16 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_R32_G32 0x00000004 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_R32_B24G8 0x00000005 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_X8B8G8R8 0x00000007 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_A8B8G8R8 0x00000008 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_A2B10G10R10 0x00000009 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_R16_G16 0x0000000c +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_G8R24 0x0000000d +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_G24R8 0x0000000e +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_R32 0x0000000f +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_A4B4G4R4 0x00000012 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_A5B5G5R1 0x00000013 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_A1B5G5R5 0x00000014 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_B5G6R5 0x00000015 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_B6G5R5 0x00000016 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_G8R8 0x00000018 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_R16 0x0000001b +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_Y8_VIDEO 0x0000001c +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_R8 0x0000001d +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_G4R4 0x0000001e +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_R1 0x0000001f +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_E5B9G9R9_SHAREDEXP 0x00000020 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_BF10GF11RF11 0x00000021 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_G8B8G8R8 0x00000022 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_B8G8R8G8 0x00000023 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_DXT1 0x00000024 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_DXT23 0x00000025 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_DXT45 0x00000026 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_DXN1 0x00000027 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_DXN2 0x00000028 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_BC6H_SF16 0x00000010 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_BC6H_UF16 0x00000011 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_BC7U 0x00000017 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_ETC2_RGB 0x00000006 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_ETC2_RGB_PTA 0x0000000a +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_ETC2_RGBA 0x0000000b +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_EAC 0x00000019 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_EACX2 0x0000001a +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_Z24S8 0x00000029 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_X8Z24 0x0000002a +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_S8Z24 0x0000002b +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_ZF32 0x0000002f +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_ZF32_X24S8 0x00000030 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_Z16 0x0000003a +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_ASTC_2D_4X4 0x00000040 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_ASTC_2D_5X4 0x00000050 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_ASTC_2D_5X5 0x00000041 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_ASTC_2D_6X5 0x00000051 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_ASTC_2D_6X6 0x00000042 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_ASTC_2D_8X5 0x00000055 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_ASTC_2D_8X6 0x00000052 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_ASTC_2D_8X8 0x00000044 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_ASTC_2D_10X5 0x00000056 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_ASTC_2D_10X6 0x00000057 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_ASTC_2D_10X8 0x00000053 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_ASTC_2D_10X10 0x00000045 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_ASTC_2D_12X10 0x00000054 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_ASTC_2D_12X12 0x00000046 +#define NVCB97_TEXHEAD_V2_PITCH_COMPONENTS_SIZES_CS_BITFIELD_SIZE 0x0000007f +#define NVCB97_TEXHEAD_V2_PITCH_PACK_COMPONENTS MW(119:119) +#define NVCB97_TEXHEAD_V2_PITCH_RESERVED3A MW(123:120) +#define NVCB97_TEXHEAD_V2_PITCH_HEADER_VERSION MW(127:124) +#define NVCB97_TEXHEAD_V2_PITCH_HEADER_VERSION_SELECT_PITCH_COLOR_KEY_V2 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCH_HEADER_VERSION_SELECT_PITCH_V2 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCH_HEADER_VERSION_SELECT_BLOCKLINEAR_V2 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCH_HEADER_VERSION_SELECT_BLOCKLINEAR_COLOR_KEY_V2 0x00000004 +#define NVCB97_TEXHEAD_V2_PITCH_HEADER_VERSION_SELECT_ONE_D_RAW_TYPED 0x00000005 +#define NVCB97_TEXHEAD_V2_PITCH_HEADER_VERSION_SELECT_ONE_D_STRUCT_BUF 0x00000006 +#define NVCB97_TEXHEAD_V2_PITCH_WIDTH_MINUS_ONE MW(144:128) +#define NVCB97_TEXHEAD_V2_PITCH_NORMALIZED_COORDS MW(145:145) +#define NVCB97_TEXHEAD_V2_PITCH_ANISO_SPREAD_MAX_LOG2 MW(148:146) +#define NVCB97_TEXHEAD_V2_PITCH_S_R_G_B_CONVERSION MW(149:149) +#define NVCB97_TEXHEAD_V2_PITCH_TEXTURE_TYPE MW(153:150) +#define NVCB97_TEXHEAD_V2_PITCH_TEXTURE_TYPE_ONE_D 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCH_TEXTURE_TYPE_TWO_D 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCH_TEXTURE_TYPE_THREE_D 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCH_TEXTURE_TYPE_CUBEMAP 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCH_TEXTURE_TYPE_ONE_D_ARRAY 0x00000004 +#define NVCB97_TEXHEAD_V2_PITCH_TEXTURE_TYPE_TWO_D_ARRAY 0x00000005 +#define NVCB97_TEXHEAD_V2_PITCH_TEXTURE_TYPE_ONE_D_BUFFER 0x00000006 +#define NVCB97_TEXHEAD_V2_PITCH_TEXTURE_TYPE_TWO_D_NO_MIPMAP 0x00000007 +#define NVCB97_TEXHEAD_V2_PITCH_TEXTURE_TYPE_CUBEMAP_ARRAY 0x00000008 +#define NVCB97_TEXHEAD_V2_PITCH_TEXTURE_TYPE_HTEX_TWOD 0x0000000a +#define NVCB97_TEXHEAD_V2_PITCH_TEXTURE_TYPE_HTEX_THREE_D 0x0000000b +#define NVCB97_TEXHEAD_V2_PITCH_TEXTURE_TYPE_HTEX_TWOD_ARRAY 0x0000000e +#define NVCB97_TEXHEAD_V2_PITCH_TEXTURE_TYPE_TT_BIT_FIELD_SIZE 0x0000000f +#define NVCB97_TEXHEAD_V2_PITCH_SECTOR_PROMOTION MW(155:154) +#define NVCB97_TEXHEAD_V2_PITCH_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCH_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCH_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCH_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCH_BORDER_SOURCE MW(156:156) +#define NVCB97_TEXHEAD_V2_PITCH_BORDER_SOURCE_BORDER_TEXTURE 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCH_BORDER_SOURCE_BORDER_COLOR 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCH_RESERVED4A MW(159:157) +#define NVCB97_TEXHEAD_V2_PITCH_HEIGHT_MINUS_ONE MW(176:160) +#define NVCB97_TEXHEAD_V2_PITCH_RESERVED5Y MW(191:177) +#define NVCB97_TEXHEAD_V2_PITCH_RESERVED6Y MW(192:192) +#define NVCB97_TEXHEAD_V2_PITCH_TRILIN_OPT MW(197:193) +#define NVCB97_TEXHEAD_V2_PITCH_MIP_LOD_BIAS MW(210:198) +#define NVCB97_TEXHEAD_V2_PITCH_ANISO_BIAS MW(214:211) +#define NVCB97_TEXHEAD_V2_PITCH_ANISO_FINE_SPREAD_FUNC MW(216:215) +#define NVCB97_TEXHEAD_V2_PITCH_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCH_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCH_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCH_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCH_ANISO_COARSE_SPREAD_FUNC MW(218:217) +#define NVCB97_TEXHEAD_V2_PITCH_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCH_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCH_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCH_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCH_MAX_ANISOTROPY MW(221:219) +#define NVCB97_TEXHEAD_V2_PITCH_MAX_ANISOTROPY_ANISO_1_TO_1 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCH_MAX_ANISOTROPY_ANISO_2_TO_1 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCH_MAX_ANISOTROPY_ANISO_4_TO_1 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCH_MAX_ANISOTROPY_ANISO_6_TO_1 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCH_MAX_ANISOTROPY_ANISO_8_TO_1 0x00000004 +#define NVCB97_TEXHEAD_V2_PITCH_MAX_ANISOTROPY_ANISO_10_TO_1 0x00000005 +#define NVCB97_TEXHEAD_V2_PITCH_MAX_ANISOTROPY_ANISO_12_TO_1 0x00000006 +#define NVCB97_TEXHEAD_V2_PITCH_MAX_ANISOTROPY_ANISO_16_TO_1 0x00000007 +#define NVCB97_TEXHEAD_V2_PITCH_ANISO_FINE_SPREAD_MODIFIER MW(223:222) +#define NVCB97_TEXHEAD_V2_PITCH_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCH_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCH_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCH_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCH_RES_VIEW_MIN_MIP_LEVEL MW(227:224) +#define NVCB97_TEXHEAD_V2_PITCH_RES_VIEW_MAX_MIP_LEVEL MW(231:228) +#define NVCB97_TEXHEAD_V2_PITCH_MULTI_SAMPLE_COUNT MW(235:232) +#define NVCB97_TEXHEAD_V2_PITCH_MULTI_SAMPLE_COUNT_MODE_1X1 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCH_MULTI_SAMPLE_COUNT_MODE_2X1 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCH_MULTI_SAMPLE_COUNT_MODE_2X2 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCH_MULTI_SAMPLE_COUNT_MODE_4X2 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCH_MULTI_SAMPLE_COUNT_MODE_4X2_D3D 0x00000004 +#define NVCB97_TEXHEAD_V2_PITCH_MULTI_SAMPLE_COUNT_MODE_2X1_D3D 0x00000005 +#define NVCB97_TEXHEAD_V2_PITCH_MULTI_SAMPLE_COUNT_MODE_4X4 0x00000006 +#define NVCB97_TEXHEAD_V2_PITCH_MULTI_SAMPLE_COUNT_MODE_2X2_VC_4 0x00000008 +#define NVCB97_TEXHEAD_V2_PITCH_MULTI_SAMPLE_COUNT_MODE_2X2_VC_12 0x00000009 +#define NVCB97_TEXHEAD_V2_PITCH_MULTI_SAMPLE_COUNT_MODE_4X2_VC_8 0x0000000a +#define NVCB97_TEXHEAD_V2_PITCH_MULTI_SAMPLE_COUNT_MODE_4X2_VC_24 0x0000000b +#define NVCB97_TEXHEAD_V2_PITCH_MIN_LOD_CLAMP MW(247:236) +#define NVCB97_TEXHEAD_V2_PITCH_RESERVED7Y MW(255:248) + + +/* +** Texture Header V2 Pitch Color Key + */ + +#define NVCB97_TEXHEAD_V2_PITCHCK_RESERVED0A MW(3:0) +#define NVCB97_TEXHEAD_V2_PITCHCK_RESERVED0X MW(4:4) +#define NVCB97_TEXHEAD_V2_PITCHCK_ADDRESS_BITS31TO5 MW(31:5) +#define NVCB97_TEXHEAD_V2_PITCHCK_ADDRESS_BITS56TO32 MW(56:32) +#define NVCB97_TEXHEAD_V2_PITCHCK_RESERVED1A MW(63:57) +#define NVCB97_TEXHEAD_V2_PITCHCK_PITCH_BITS21TO5 MW(80:64) +#define NVCB97_TEXHEAD_V2_PITCHCK_LOD_ANISO_QUALITY2 MW(81:81) +#define NVCB97_TEXHEAD_V2_PITCHCK_LOD_ANISO_QUALITY MW(82:82) +#define NVCB97_TEXHEAD_V2_PITCHCK_LOD_ANISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCHCK_LOD_ANISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCHCK_LOD_ISO_QUALITY MW(83:83) +#define NVCB97_TEXHEAD_V2_PITCHCK_LOD_ISO_QUALITY_LOD_QUALITY_LOW 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCHCK_LOD_ISO_QUALITY_LOD_QUALITY_HIGH 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCHCK_ANISO_COARSE_SPREAD_MODIFIER MW(85:84) +#define NVCB97_TEXHEAD_V2_PITCHCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCHCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCHCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCHCK_ANISO_COARSE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCHCK_ANISO_SPREAD_SCALE MW(90:86) +#define NVCB97_TEXHEAD_V2_PITCHCK_DEPTH_TEXTURE MW(91:91) +#define NVCB97_TEXHEAD_V2_PITCHCK_MAX_MIP_LEVEL MW(95:92) +#define NVCB97_TEXHEAD_V2_PITCHCK_X_SOURCE MW(98:96) +#define NVCB97_TEXHEAD_V2_PITCHCK_X_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCHCK_X_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCHCK_X_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCHCK_X_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_V2_PITCHCK_X_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_V2_PITCHCK_X_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_V2_PITCHCK_X_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_V2_PITCHCK_Y_SOURCE MW(101:99) +#define NVCB97_TEXHEAD_V2_PITCHCK_Y_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCHCK_Y_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCHCK_Y_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCHCK_Y_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_V2_PITCHCK_Y_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_V2_PITCHCK_Y_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_V2_PITCHCK_Y_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_V2_PITCHCK_Z_SOURCE MW(104:102) +#define NVCB97_TEXHEAD_V2_PITCHCK_Z_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCHCK_Z_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCHCK_Z_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCHCK_Z_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_V2_PITCHCK_Z_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_V2_PITCHCK_Z_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_V2_PITCHCK_Z_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_V2_PITCHCK_W_SOURCE MW(107:105) +#define NVCB97_TEXHEAD_V2_PITCHCK_W_SOURCE_IN_ZERO 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCHCK_W_SOURCE_IN_R 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCHCK_W_SOURCE_IN_G 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCHCK_W_SOURCE_IN_B 0x00000004 +#define NVCB97_TEXHEAD_V2_PITCHCK_W_SOURCE_IN_A 0x00000005 +#define NVCB97_TEXHEAD_V2_PITCHCK_W_SOURCE_IN_ONE_INT 0x00000006 +#define NVCB97_TEXHEAD_V2_PITCHCK_W_SOURCE_IN_ONE_FLOAT 0x00000007 +#define NVCB97_TEXHEAD_V2_PITCHCK_DATA_TYPE MW(111:108) +#define NVCB97_TEXHEAD_V2_PITCHCK_DATA_TYPE_TEX_DATA_TYPE_UNORM 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCHCK_DATA_TYPE_TEX_DATA_TYPE_SNORM 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCHCK_DATA_TYPE_TEX_DATA_TYPE_FLOAT 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCHCK_DATA_TYPE_TEX_DATA_TYPE_SGNRGB 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCHCK_DATA_TYPE_TEX_DATA_TYPE_SGNA 0x00000004 +#define NVCB97_TEXHEAD_V2_PITCHCK_DATA_TYPE_TEX_DATA_TYPE_DSDT 0x00000005 +#define NVCB97_TEXHEAD_V2_PITCHCK_DATA_TYPE_TEX_DATA_TYPE_UINT 0x00000006 +#define NVCB97_TEXHEAD_V2_PITCHCK_DATA_TYPE_TEX_DATA_TYPE_SINT 0x00000007 +#define NVCB97_TEXHEAD_V2_PITCHCK_DATA_TYPE_TEX_DATA_TYPE_ZS 0x00000008 +#define NVCB97_TEXHEAD_V2_PITCHCK_DATA_TYPE_TEX_DATA_TYPE_SZ 0x00000009 +#define NVCB97_TEXHEAD_V2_PITCHCK_DATA_TYPE_TEX_DATA_TYPE_ZFS 0x0000000a +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS MW(118:112) +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_INVALID 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_R32_G32_B32_A32 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_R32_G32_B32 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_R16_G16_B16_A16 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_R32_G32 0x00000004 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_R32_B24G8 0x00000005 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_X8B8G8R8 0x00000007 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_A8B8G8R8 0x00000008 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_A2B10G10R10 0x00000009 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_R16_G16 0x0000000c +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_G8R24 0x0000000d +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_G24R8 0x0000000e +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_R32 0x0000000f +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_A4B4G4R4 0x00000012 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_A5B5G5R1 0x00000013 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_A1B5G5R5 0x00000014 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_B5G6R5 0x00000015 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_B6G5R5 0x00000016 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_G8R8 0x00000018 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_R16 0x0000001b +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_Y8_VIDEO 0x0000001c +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_R8 0x0000001d +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_G4R4 0x0000001e +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_R1 0x0000001f +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_E5B9G9R9_SHAREDEXP 0x00000020 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_BF10GF11RF11 0x00000021 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_G8B8G8R8 0x00000022 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_B8G8R8G8 0x00000023 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_DXT1 0x00000024 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_DXT23 0x00000025 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_DXT45 0x00000026 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_DXN1 0x00000027 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_DXN2 0x00000028 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_BC6H_SF16 0x00000010 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_BC6H_UF16 0x00000011 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_BC7U 0x00000017 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_ETC2_RGB 0x00000006 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_ETC2_RGB_PTA 0x0000000a +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_ETC2_RGBA 0x0000000b +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_EAC 0x00000019 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_EACX2 0x0000001a +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_Z24S8 0x00000029 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_X8Z24 0x0000002a +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_S8Z24 0x0000002b +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_ZF32 0x0000002f +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_ZF32_X24S8 0x00000030 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_Z16 0x0000003a +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_ASTC_2D_4X4 0x00000040 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_ASTC_2D_5X4 0x00000050 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_ASTC_2D_5X5 0x00000041 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_ASTC_2D_6X5 0x00000051 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_ASTC_2D_6X6 0x00000042 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_ASTC_2D_8X5 0x00000055 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_ASTC_2D_8X6 0x00000052 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_ASTC_2D_8X8 0x00000044 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_ASTC_2D_10X5 0x00000056 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_ASTC_2D_10X6 0x00000057 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_ASTC_2D_10X8 0x00000053 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_ASTC_2D_10X10 0x00000045 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_ASTC_2D_12X10 0x00000054 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_ASTC_2D_12X12 0x00000046 +#define NVCB97_TEXHEAD_V2_PITCHCK_COMPONENTS_SIZES_CS_BITFIELD_SIZE 0x0000007f +#define NVCB97_TEXHEAD_V2_PITCHCK_PACK_COMPONENTS MW(119:119) +#define NVCB97_TEXHEAD_V2_PITCHCK_RESERVED3A MW(123:120) +#define NVCB97_TEXHEAD_V2_PITCHCK_HEADER_VERSION MW(127:124) +#define NVCB97_TEXHEAD_V2_PITCHCK_HEADER_VERSION_SELECT_PITCH_COLOR_KEY_V2 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCHCK_HEADER_VERSION_SELECT_PITCH_V2 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCHCK_HEADER_VERSION_SELECT_BLOCKLINEAR_V2 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCHCK_HEADER_VERSION_SELECT_BLOCKLINEAR_COLOR_KEY_V2 0x00000004 +#define NVCB97_TEXHEAD_V2_PITCHCK_HEADER_VERSION_SELECT_ONE_D_RAW_TYPED 0x00000005 +#define NVCB97_TEXHEAD_V2_PITCHCK_HEADER_VERSION_SELECT_ONE_D_STRUCT_BUF 0x00000006 +#define NVCB97_TEXHEAD_V2_PITCHCK_WIDTH_MINUS_ONE MW(144:128) +#define NVCB97_TEXHEAD_V2_PITCHCK_NORMALIZED_COORDS MW(145:145) +#define NVCB97_TEXHEAD_V2_PITCHCK_ANISO_SPREAD_MAX_LOG2 MW(148:146) +#define NVCB97_TEXHEAD_V2_PITCHCK_S_R_G_B_CONVERSION MW(149:149) +#define NVCB97_TEXHEAD_V2_PITCHCK_TEXTURE_TYPE MW(153:150) +#define NVCB97_TEXHEAD_V2_PITCHCK_TEXTURE_TYPE_ONE_D 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCHCK_TEXTURE_TYPE_TWO_D 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCHCK_TEXTURE_TYPE_THREE_D 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCHCK_TEXTURE_TYPE_CUBEMAP 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCHCK_TEXTURE_TYPE_ONE_D_ARRAY 0x00000004 +#define NVCB97_TEXHEAD_V2_PITCHCK_TEXTURE_TYPE_TWO_D_ARRAY 0x00000005 +#define NVCB97_TEXHEAD_V2_PITCHCK_TEXTURE_TYPE_ONE_D_BUFFER 0x00000006 +#define NVCB97_TEXHEAD_V2_PITCHCK_TEXTURE_TYPE_TWO_D_NO_MIPMAP 0x00000007 +#define NVCB97_TEXHEAD_V2_PITCHCK_TEXTURE_TYPE_CUBEMAP_ARRAY 0x00000008 +#define NVCB97_TEXHEAD_V2_PITCHCK_TEXTURE_TYPE_HTEX_TWOD 0x0000000a +#define NVCB97_TEXHEAD_V2_PITCHCK_TEXTURE_TYPE_HTEX_THREE_D 0x0000000b +#define NVCB97_TEXHEAD_V2_PITCHCK_TEXTURE_TYPE_HTEX_TWOD_ARRAY 0x0000000e +#define NVCB97_TEXHEAD_V2_PITCHCK_TEXTURE_TYPE_TT_BIT_FIELD_SIZE 0x0000000f +#define NVCB97_TEXHEAD_V2_PITCHCK_SECTOR_PROMOTION MW(155:154) +#define NVCB97_TEXHEAD_V2_PITCHCK_SECTOR_PROMOTION_NO_PROMOTION 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCHCK_SECTOR_PROMOTION_PROMOTE_TO_2_V 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCHCK_SECTOR_PROMOTION_PROMOTE_TO_2_H 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCHCK_SECTOR_PROMOTION_PROMOTE_TO_4 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCHCK_BORDER_SOURCE MW(156:156) +#define NVCB97_TEXHEAD_V2_PITCHCK_BORDER_SOURCE_BORDER_TEXTURE 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCHCK_BORDER_SOURCE_BORDER_COLOR 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCHCK_RESERVED4A MW(159:157) +#define NVCB97_TEXHEAD_V2_PITCHCK_HEIGHT_MINUS_ONE MW(176:160) +#define NVCB97_TEXHEAD_V2_PITCHCK_RESERVED5Y MW(191:177) +#define NVCB97_TEXHEAD_V2_PITCHCK_COLOR_KEY_OP MW(192:192) +#define NVCB97_TEXHEAD_V2_PITCHCK_TRILIN_OPT MW(197:193) +#define NVCB97_TEXHEAD_V2_PITCHCK_MIP_LOD_BIAS MW(210:198) +#define NVCB97_TEXHEAD_V2_PITCHCK_ANISO_BIAS MW(214:211) +#define NVCB97_TEXHEAD_V2_PITCHCK_ANISO_FINE_SPREAD_FUNC MW(216:215) +#define NVCB97_TEXHEAD_V2_PITCHCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCHCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCHCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCHCK_ANISO_FINE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCHCK_ANISO_COARSE_SPREAD_FUNC MW(218:217) +#define NVCB97_TEXHEAD_V2_PITCHCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_HALF 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCHCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_ONE 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCHCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_TWO 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCHCK_ANISO_COARSE_SPREAD_FUNC_SPREAD_FUNC_MAX 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCHCK_MAX_ANISOTROPY MW(221:219) +#define NVCB97_TEXHEAD_V2_PITCHCK_MAX_ANISOTROPY_ANISO_1_TO_1 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCHCK_MAX_ANISOTROPY_ANISO_2_TO_1 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCHCK_MAX_ANISOTROPY_ANISO_4_TO_1 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCHCK_MAX_ANISOTROPY_ANISO_6_TO_1 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCHCK_MAX_ANISOTROPY_ANISO_8_TO_1 0x00000004 +#define NVCB97_TEXHEAD_V2_PITCHCK_MAX_ANISOTROPY_ANISO_10_TO_1 0x00000005 +#define NVCB97_TEXHEAD_V2_PITCHCK_MAX_ANISOTROPY_ANISO_12_TO_1 0x00000006 +#define NVCB97_TEXHEAD_V2_PITCHCK_MAX_ANISOTROPY_ANISO_16_TO_1 0x00000007 +#define NVCB97_TEXHEAD_V2_PITCHCK_ANISO_FINE_SPREAD_MODIFIER MW(223:222) +#define NVCB97_TEXHEAD_V2_PITCHCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_NONE 0x00000000 +#define NVCB97_TEXHEAD_V2_PITCHCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_ONE 0x00000001 +#define NVCB97_TEXHEAD_V2_PITCHCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_CONST_TWO 0x00000002 +#define NVCB97_TEXHEAD_V2_PITCHCK_ANISO_FINE_SPREAD_MODIFIER_SPREAD_MODIFIER_SQRT 0x00000003 +#define NVCB97_TEXHEAD_V2_PITCHCK_COLOR_KEY_VALUE MW(255:224) + + +/* +** Texture Sampler State + */ + +#define NVCB97_TEXSAMP0_ADDRESS_U 2:0 +#define NVCB97_TEXSAMP0_ADDRESS_U_WRAP 0x00000000 +#define NVCB97_TEXSAMP0_ADDRESS_U_MIRROR 0x00000001 +#define NVCB97_TEXSAMP0_ADDRESS_U_CLAMP_TO_EDGE 0x00000002 +#define NVCB97_TEXSAMP0_ADDRESS_U_BORDER 0x00000003 +#define NVCB97_TEXSAMP0_ADDRESS_U_CLAMP_OGL 0x00000004 +#define NVCB97_TEXSAMP0_ADDRESS_U_MIRROR_ONCE_CLAMP_TO_EDGE 0x00000005 +#define NVCB97_TEXSAMP0_ADDRESS_U_MIRROR_ONCE_BORDER 0x00000006 +#define NVCB97_TEXSAMP0_ADDRESS_U_MIRROR_ONCE_CLAMP_OGL 0x00000007 +#define NVCB97_TEXSAMP0_ADDRESS_V 5:3 +#define NVCB97_TEXSAMP0_ADDRESS_V_WRAP 0x00000000 +#define NVCB97_TEXSAMP0_ADDRESS_V_MIRROR 0x00000001 +#define NVCB97_TEXSAMP0_ADDRESS_V_CLAMP_TO_EDGE 0x00000002 +#define NVCB97_TEXSAMP0_ADDRESS_V_BORDER 0x00000003 +#define NVCB97_TEXSAMP0_ADDRESS_V_CLAMP_OGL 0x00000004 +#define NVCB97_TEXSAMP0_ADDRESS_V_MIRROR_ONCE_CLAMP_TO_EDGE 0x00000005 +#define NVCB97_TEXSAMP0_ADDRESS_V_MIRROR_ONCE_BORDER 0x00000006 +#define NVCB97_TEXSAMP0_ADDRESS_V_MIRROR_ONCE_CLAMP_OGL 0x00000007 +#define NVCB97_TEXSAMP0_ADDRESS_P 8:6 +#define NVCB97_TEXSAMP0_ADDRESS_P_WRAP 0x00000000 +#define NVCB97_TEXSAMP0_ADDRESS_P_MIRROR 0x00000001 +#define NVCB97_TEXSAMP0_ADDRESS_P_CLAMP_TO_EDGE 0x00000002 +#define NVCB97_TEXSAMP0_ADDRESS_P_BORDER 0x00000003 +#define NVCB97_TEXSAMP0_ADDRESS_P_CLAMP_OGL 0x00000004 +#define NVCB97_TEXSAMP0_ADDRESS_P_MIRROR_ONCE_CLAMP_TO_EDGE 0x00000005 +#define NVCB97_TEXSAMP0_ADDRESS_P_MIRROR_ONCE_BORDER 0x00000006 +#define NVCB97_TEXSAMP0_ADDRESS_P_MIRROR_ONCE_CLAMP_OGL 0x00000007 +#define NVCB97_TEXSAMP0_DEPTH_COMPARE 9:9 +#define NVCB97_TEXSAMP0_DEPTH_COMPARE_FUNC 12:10 +#define NVCB97_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_NEVER 0x00000000 +#define NVCB97_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_LESS 0x00000001 +#define NVCB97_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_EQUAL 0x00000002 +#define NVCB97_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_LEQUAL 0x00000003 +#define NVCB97_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_GREATER 0x00000004 +#define NVCB97_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_NOTEQUAL 0x00000005 +#define NVCB97_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_GEQUAL 0x00000006 +#define NVCB97_TEXSAMP0_DEPTH_COMPARE_FUNC_ZC_ALWAYS 0x00000007 +#define NVCB97_TEXSAMP0_S_R_G_B_CONVERSION 13:13 +#define NVCB97_TEXSAMP0_RESERVED0A 16:14 +#define NVCB97_TEXSAMP0_RESERVED0B 19:17 +#define NVCB97_TEXSAMP0_MAX_ANISOTROPY 22:20 +#define NVCB97_TEXSAMP0_MAX_ANISOTROPY_ANISO_1_TO_1 0x00000000 +#define NVCB97_TEXSAMP0_MAX_ANISOTROPY_ANISO_2_TO_1 0x00000001 +#define NVCB97_TEXSAMP0_MAX_ANISOTROPY_ANISO_4_TO_1 0x00000002 +#define NVCB97_TEXSAMP0_MAX_ANISOTROPY_ANISO_6_TO_1 0x00000003 +#define NVCB97_TEXSAMP0_MAX_ANISOTROPY_ANISO_8_TO_1 0x00000004 +#define NVCB97_TEXSAMP0_MAX_ANISOTROPY_ANISO_10_TO_1 0x00000005 +#define NVCB97_TEXSAMP0_MAX_ANISOTROPY_ANISO_12_TO_1 0x00000006 +#define NVCB97_TEXSAMP0_MAX_ANISOTROPY_ANISO_16_TO_1 0x00000007 +#define NVCB97_TEXSAMP0_FORCED_INVALID 31:31 +#define NVCB97_TEXSAMP1_MAG_FILTER 2:0 +#define NVCB97_TEXSAMP1_MAG_FILTER_MAG_POINT 0x00000001 +#define NVCB97_TEXSAMP1_MAG_FILTER_MAG_LINEAR 0x00000002 +#define NVCB97_TEXSAMP1_MAG_FILTER_VCAA_4_TAP 0x00000003 +#define NVCB97_TEXSAMP1_MAG_FILTER_VCAA_8_TAP 0x00000004 +#define NVCB97_TEXSAMP1_MIN_LOD_CLAMP_BEHAVIOR_FOR_NEAREST_MIP 3:3 +#define NVCB97_TEXSAMP1_MIN_LOD_CLAMP_BEHAVIOR_FOR_NEAREST_MIP_INTEGER_AND_FRACTION 0x00000000 +#define NVCB97_TEXSAMP1_MIN_LOD_CLAMP_BEHAVIOR_FOR_NEAREST_MIP_INTEGER_ONLY 0x00000001 +#define NVCB97_TEXSAMP1_MIN_FILTER 5:4 +#define NVCB97_TEXSAMP1_MIN_FILTER_MIN_POINT 0x00000001 +#define NVCB97_TEXSAMP1_MIN_FILTER_MIN_LINEAR 0x00000002 +#define NVCB97_TEXSAMP1_MIN_FILTER_MIN_ANISO 0x00000003 +#define NVCB97_TEXSAMP1_MIP_FILTER 7:6 +#define NVCB97_TEXSAMP1_MIP_FILTER_MIP_NONE 0x00000001 +#define NVCB97_TEXSAMP1_MIP_FILTER_MIP_POINT 0x00000002 +#define NVCB97_TEXSAMP1_MIP_FILTER_MIP_LINEAR 0x00000003 +#define NVCB97_TEXSAMP1_CUBEMAP_INTERFACE_FILTERING 9:8 +#define NVCB97_TEXSAMP1_CUBEMAP_INTERFACE_FILTERING_USE_WRAP 0x00000000 +#define NVCB97_TEXSAMP1_CUBEMAP_INTERFACE_FILTERING_OVERRIDE_WRAP 0x00000001 +#define NVCB97_TEXSAMP1_CUBEMAP_INTERFACE_FILTERING_AUTO_SPAN_SEAM 0x00000002 +#define NVCB97_TEXSAMP1_CUBEMAP_INTERFACE_FILTERING_AUTO_CROSS_SEAM 0x00000003 +#define NVCB97_TEXSAMP1_REDUCTION_FILTER 11:10 +#define NVCB97_TEXSAMP1_REDUCTION_FILTER_RED_NONE 0x00000000 +#define NVCB97_TEXSAMP1_REDUCTION_FILTER_RED_MINIMUM 0x00000001 +#define NVCB97_TEXSAMP1_REDUCTION_FILTER_RED_MAXIMUM 0x00000002 +#define NVCB97_TEXSAMP1_MIP_LOD_BIAS 24:12 +#define NVCB97_TEXSAMP1_FLOAT_COORD_NORMALIZATION 25:25 +#define NVCB97_TEXSAMP1_FLOAT_COORD_NORMALIZATION_USE_HEADER_SETTING 0x00000000 +#define NVCB97_TEXSAMP1_FLOAT_COORD_NORMALIZATION_FORCE_UNNORMALIZED_COORDS 0x00000001 +#define NVCB97_TEXSAMP1_TRILIN_OPT 30:26 +#define NVCB97_TEXSAMP2_MIN_LOD_CLAMP 11:0 +#define NVCB97_TEXSAMP2_MAX_LOD_CLAMP 23:12 +#define NVCB97_TEXSAMP2_S_R_G_B_BORDER_COLOR_R 31:24 +#define NVCB97_TEXSAMP3_RESERVED12 11:0 +#define NVCB97_TEXSAMP3_S_R_G_B_BORDER_COLOR_G 19:12 +#define NVCB97_TEXSAMP3_S_R_G_B_BORDER_COLOR_B 27:20 +#define NVCB97_TEXSAMP4_BORDER_COLOR_R 31:0 +#define NVCB97_TEXSAMP5_BORDER_COLOR_G 31:0 +#define NVCB97_TEXSAMP6_BORDER_COLOR_B 31:0 +#define NVCB97_TEXSAMP7_BORDER_COLOR_A 31:0 + + + +#endif // #ifndef __CLCB97TEX_H__ diff --git a/src/common/sdk/nvidia/inc/class/clcc70.h b/src/common/sdk/nvidia/inc/class/clcc70.h new file mode 100644 index 0000000..79aeafc --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clcc70.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/clcc70.finn +// + +#define NVCC70_DISPLAY (0xcc70U) /* finn: Evaluated from "NVCC70_ALLOCATION_PARAMETERS_MESSAGE_ID" */ + +#define NVCC70_ALLOCATION_PARAMETERS_MESSAGE_ID (0xcc70U) + +typedef struct NVCC70_ALLOCATION_PARAMETERS { + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numDsis; // Number of DSIs in this chip/display +} NVCC70_ALLOCATION_PARAMETERS; + diff --git a/src/common/sdk/nvidia/inc/class/clcc71.h b/src/common/sdk/nvidia/inc/class/clcc71.h new file mode 100644 index 0000000..0520145 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clcc71.h @@ -0,0 +1,186 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clcc71_h_ +#define _clcc71_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVCC71_DISP_SF_USER (0x000CC71) + +typedef volatile struct _clcc71_tag0 { + NvU32 dispSfUserOffset[0x400]; /* NV_PDISP_SF_USER 0x000D0FFF:0x000D0000 */ +} _NvCC71DispSfUser, NvCC71DispSfUserMap; + +#define NVCC71_SF_HDMI_INFO_CTRL(i,j) (0x000E0000-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVCC71_SF_HDMI_INFO_CTRL__SIZE_1 8 /* */ +#define NVCC71_SF_HDMI_INFO_CTRL__SIZE_2 3 /* */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_CTRL(i) (0x000E0000-0x000E0000+(i)*1024) /* RW-4A */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 8 /* */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE 0:0 /* RWIVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_STATUS(i) (0x000E0004-0x000E0000+(i)*1024) /* R--4A */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_STATUS__SIZE_1 8 /* */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_STATUS_SENT 0:0 /* R-IVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_HEADER(i) (0x000E0008-0x000E0000+(i)*1024) /* RW-4A */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_HEADER__SIZE_1 8 /* */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_HEADER_HB0 7:0 /* RWIVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_HEADER_HB1 15:8 /* RWIVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_HEADER_HB2 23:16 /* RWIVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(i) (0x000E000C-0x000E0000+(i)*1024) /* RW-4A */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW__SIZE_1 8 /* */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(i) (0x000E0010-0x000E0000+(i)*1024) /* RW-4A */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH__SIZE_1 8 /* */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(i) (0x000E0014-0x000E0000+(i)*1024) /* RW-4A */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW__SIZE_1 8 /* */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(i) (0x000E0018-0x000E0000+(i)*1024) /* RW-4A */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH__SIZE_1 8 /* */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW(i) (0x000E001C-0x000E0000+(i)*1024) /* RW-4A */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW__SIZE_1 8 /* */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NVCC71_SF_HDMI_AVI_INFOFRAME_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL(i,j) (0x000E0130-0x000E0000+(i)*1024+(j)*8) /* RW-4A */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL__SIZE_1 8 /* */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL__SIZE_2 10 /* */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_ENABLE 0:0 /* RWIVF */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_RUN_MODE 3:1 /* RWIVF */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_RUN_MODE_ALWAYS 0x00000000 /* RWI-V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_RUN_MODE_ONCE 0x00000001 /* RW--V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_RUN_MODE_FID_ALWAYS 0x00000002 /* RW--V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_RUN_MODE_FID_ONCE 0x00000003 /* RW--V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_RUN_MODE_FID_TRIGGER 0x00000004 /* RW--V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_LOC 5:4 /* RWIVF */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_LOC_VBLANK 0x00000000 /* RWI-V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_LOC_VSYNC 0x00000001 /* RW--V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_LOC_LINE 0x00000002 /* RW--V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_OFFSET 10:6 /* RWIVF */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_OFFSET_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_SIZE 18:14 /* RWIVF */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_SIZE_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_BUSY 22:22 /* R-IVF */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_BUSY_NO 0x00000000 /* R-I-V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_BUSY_YES 0x00000001 /* R---V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_SENT 23:23 /* RWIVF */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_SENT_NO 0x00000000 /* R-I-V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_SENT_YES 0x00000001 /* R---V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CTRL_SENT_CLEAR 0x00000001 /* -W--C */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG(i,j) (0x000E0134-0x000E0000+(i)*1024+(j)*8) /* RW-4A */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG__SIZE_1 8 /* */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG__SIZE_2 10 /* */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG_FID 7:0 /* RWIVF */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG_FID_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG_LINE_ID 23:8 /* RWIVF */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG_LINE_ID_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG_LINE_ID_REVERSED 24:24 /* RWIVF */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG_LINE_ID_REVERSED_NO 0x00000000 /* RWI-V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG_LINE_ID_REVERSED_YES 0x00000001 /* RW--V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG_AS_SDP_OVERRIDE_EN 25:25 /* RWIVF */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG_AS_SDP_OVERRIDE_EN_NO 0x00000000 /* RWI-V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG_AS_SDP_OVERRIDE_EN_YES 0x00000001 /* RW--V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG_HW_CHECKSUM 29:29 /* RWIVF */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG_HW_CHECKSUM_NO 0x00000000 /* RWI-V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG_HW_CHECKSUM_YES 0x00000001 /* RW--V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG_NEW 30:30 /* RWIVF */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG_NEW_INIT 0x00000000 /* R-I-V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG_NEW_DONE 0x00000000 /* R---V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG_NEW_PENDING 0x00000001 /* R---T */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG_NEW_TRIGGER 0x00000001 /* -W--T */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG_MTD_STATE_CTRL 31:31 /* RWIVF */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG_MTD_STATE_CTRL_ACT 0x00000000 /* RWI-V */ +#define NVCC71_SF_GENERIC_INFOFRAME_CONFIG_MTD_STATE_CTRL_ARM 0x00000001 /* RW--V */ +#define NVCC71_SF_GENERIC_INFOFRAME_DATA_CTRL(i) (0x000E03F0-0x000E0000+(i)*1024) /* RW-4A */ +#define NVCC71_SF_GENERIC_INFOFRAME_DATA_CTRL__SIZE_1 8 /* */ +#define NVCC71_SF_GENERIC_INFOFRAME_DATA_CTRL_OFFSET 4:0 /* RWIVF */ +#define NVCC71_SF_GENERIC_INFOFRAME_DATA_CTRL_OFFSET_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_GENERIC_INFOFRAME_DATA(i) (0x000E03F4-0x000E0000+(i)*1024) /* RW-4A */ +#define NVCC71_SF_GENERIC_INFOFRAME_DATA__SIZE_1 8 /* */ +#define NVCC71_SF_GENERIC_INFOFRAME_DATA_BYTE0 7:0 /* RWIVF */ +#define NVCC71_SF_GENERIC_INFOFRAME_DATA_BYTE0_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_GENERIC_INFOFRAME_DATA_BYTE1 15:8 /* RWIVF */ +#define NVCC71_SF_GENERIC_INFOFRAME_DATA_BYTE1_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_GENERIC_INFOFRAME_DATA_BYTE2 23:16 /* RWIVF */ +#define NVCC71_SF_GENERIC_INFOFRAME_DATA_BYTE2_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_GENERIC_INFOFRAME_DATA_BYTE3 31:24 /* RWIVF */ +#define NVCC71_SF_GENERIC_INFOFRAME_DATA_BYTE3_INIT 0x00000000 /* RWI-V */ +#define NVCC71_SF_GENERIC_INFOFRAME_MISC_CTRL(i) (0x000E03F8-0x000E0000+(i)*1024) /* RW-4A */ +#define NVCC71_SF_GENERIC_INFOFRAME_MISC_CTRL__SIZE_1 8 /* */ +#define NVCC71_SF_GENERIC_INFOFRAME_MISC_CTRL_AUDIO_PRIORITY 1:1 /* RWIVF */ +#define NVCC71_SF_GENERIC_INFOFRAME_MISC_CTRL_AUDIO_PRIORITY_HIGH 0x00000000 /* RW--V */ +#define NVCC71_SF_GENERIC_INFOFRAME_MISC_CTRL_AUDIO_PRIORITY_LOW 0x00000001 /* RWI-V */ + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _clcc71_h_ diff --git a/src/common/sdk/nvidia/inc/class/clcc73.h b/src/common/sdk/nvidia/inc/class/clcc73.h new file mode 100644 index 0000000..306ae29 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clcc73.h @@ -0,0 +1,915 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clcc73_h_ +#define _clcc73_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVCC73_DISP_CAPABILITIES 0xCC73 + +typedef volatile struct _clcc73_tag0 { + NvU32 dispCapabilities[0x400]; +} _NvCC73DispCapabilities,NvCC73DispCapabilities_Map ; + + +#define NVCC73_SYS_CAP 0x0 /* RW-4R */ +#define NVCC73_SYS_CAP_HEAD0_EXISTS 0:0 /* RWIVF */ +#define NVCC73_SYS_CAP_HEAD0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_HEAD0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAP_HEAD1_EXISTS 1:1 /* RWIVF */ +#define NVCC73_SYS_CAP_HEAD1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_HEAD1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAP_HEAD2_EXISTS 2:2 /* RWIVF */ +#define NVCC73_SYS_CAP_HEAD2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_HEAD2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAP_HEAD3_EXISTS 3:3 /* RWIVF */ +#define NVCC73_SYS_CAP_HEAD3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_HEAD3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAP_HEAD4_EXISTS 4:4 /* RWIVF */ +#define NVCC73_SYS_CAP_HEAD4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_HEAD4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAP_HEAD5_EXISTS 5:5 /* RWIVF */ +#define NVCC73_SYS_CAP_HEAD5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_HEAD5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAP_HEAD6_EXISTS 6:6 /* RWIVF */ +#define NVCC73_SYS_CAP_HEAD6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_HEAD6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAP_HEAD7_EXISTS 7:7 /* RWIVF */ +#define NVCC73_SYS_CAP_HEAD7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_HEAD7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAP_HEAD_EXISTS(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVCC73_SYS_CAP_HEAD_EXISTS__SIZE_1 8 /* */ +#define NVCC73_SYS_CAP_HEAD_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_HEAD_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAP_SOR0_EXISTS 8:8 /* RWIVF */ +#define NVCC73_SYS_CAP_SOR0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_SOR0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAP_SOR1_EXISTS 9:9 /* RWIVF */ +#define NVCC73_SYS_CAP_SOR1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_SOR1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAP_SOR2_EXISTS 10:10 /* RWIVF */ +#define NVCC73_SYS_CAP_SOR2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_SOR2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAP_SOR3_EXISTS 11:11 /* RWIVF */ +#define NVCC73_SYS_CAP_SOR3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_SOR3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAP_SOR4_EXISTS 12:12 /* RWIVF */ +#define NVCC73_SYS_CAP_SOR4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_SOR4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAP_SOR5_EXISTS 13:13 /* RWIVF */ +#define NVCC73_SYS_CAP_SOR5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_SOR5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAP_SOR6_EXISTS 14:14 /* RWIVF */ +#define NVCC73_SYS_CAP_SOR6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_SOR6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAP_SOR7_EXISTS 15:15 /* RWIVF */ +#define NVCC73_SYS_CAP_SOR7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_SOR7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAP_SOR_EXISTS(i) (8+(i)):(8+(i)) /* RWIVF */ +#define NVCC73_SYS_CAP_SOR_EXISTS__SIZE_1 8 /* */ +#define NVCC73_SYS_CAP_SOR_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_SOR_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAP_RISCV0_EXISTS 16:16 /* RWIVF */ +#define NVCC73_SYS_CAP_RISCV0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_RISCV0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAP_DSI0_EXISTS 20:20 /* RWIVF */ +#define NVCC73_SYS_CAP_DSI0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_DSI0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAP_DSI1_EXISTS 21:21 /* RWIVF */ +#define NVCC73_SYS_CAP_DSI1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_DSI1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAP_DSI2_EXISTS 22:22 /* RWIVF */ +#define NVCC73_SYS_CAP_DSI2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_DSI2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAP_DSI3_EXISTS 23:23 /* RWIVF */ +#define NVCC73_SYS_CAP_DSI3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_DSI3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAP_DSI_EXISTS(i) (20+(i)):(20+(i)) /* RWIVF */ +#define NVCC73_SYS_CAP_DSI_EXISTS__SIZE_1 4 /* */ +#define NVCC73_SYS_CAP_DSI_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAP_DSI_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB 0x4 /* RW-4R */ +#define NVCC73_SYS_CAPB_WINDOW0_EXISTS 0:0 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW1_EXISTS 1:1 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW2_EXISTS 2:2 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW3_EXISTS 3:3 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW4_EXISTS 4:4 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW5_EXISTS 5:5 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW6_EXISTS 6:6 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW7_EXISTS 7:7 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW8_EXISTS 8:8 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW8_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW8_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW9_EXISTS 9:9 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW9_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW9_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW10_EXISTS 10:10 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW10_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW10_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW11_EXISTS 11:11 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW11_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW11_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW12_EXISTS 12:12 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW12_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW12_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW13_EXISTS 13:13 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW13_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW13_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW14_EXISTS 14:14 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW14_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW14_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW15_EXISTS 15:15 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW15_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW15_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW16_EXISTS 16:16 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW16_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW16_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW17_EXISTS 17:17 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW17_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW17_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW18_EXISTS 18:18 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW18_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW18_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW19_EXISTS 19:19 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW19_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW19_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW20_EXISTS 20:20 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW20_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW20_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW21_EXISTS 21:21 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW21_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW21_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW22_EXISTS 22:22 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW22_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW22_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW23_EXISTS 23:23 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW23_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW23_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW24_EXISTS 24:24 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW24_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW24_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW25_EXISTS 25:25 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW25_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW25_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW26_EXISTS 26:26 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW26_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW26_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW27_EXISTS 27:27 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW27_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW27_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW28_EXISTS 28:28 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW28_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW28_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW29_EXISTS 29:29 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW29_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW29_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW30_EXISTS 30:30 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW30_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW30_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW31_EXISTS 31:31 /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW31_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW31_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW_EXISTS(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVCC73_SYS_CAPB_WINDOW_EXISTS__SIZE_1 32 /* */ +#define NVCC73_SYS_CAPB_WINDOW_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPB_WINDOW_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPC 0x20 /* RW-4R */ +#define NVCC73_SYS_CAPC_TILE0_EXISTS 0:0 /* RWIVF */ +#define NVCC73_SYS_CAPC_TILE0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE1_EXISTS 1:1 /* RWIVF */ +#define NVCC73_SYS_CAPC_TILE1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE2_EXISTS 2:2 /* RWIVF */ +#define NVCC73_SYS_CAPC_TILE2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE3_EXISTS 3:3 /* RWIVF */ +#define NVCC73_SYS_CAPC_TILE3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE4_EXISTS 4:4 /* RWIVF */ +#define NVCC73_SYS_CAPC_TILE4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE5_EXISTS 5:5 /* RWIVF */ +#define NVCC73_SYS_CAPC_TILE5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE6_EXISTS 6:6 /* RWIVF */ +#define NVCC73_SYS_CAPC_TILE6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE7_EXISTS 7:7 /* RWIVF */ +#define NVCC73_SYS_CAPC_TILE7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE_EXISTS(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVCC73_SYS_CAPC_TILE_EXISTS__SIZE_1 8 /* */ +#define NVCC73_SYS_CAPC_TILE_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE0_SUPPORT_MULTI_TILE 8:8 /* RWIVF */ +#define NVCC73_SYS_CAPC_TILE0_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE0_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE1_SUPPORT_MULTI_TILE 9:9 /* RWIVF */ +#define NVCC73_SYS_CAPC_TILE1_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE1_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE2_SUPPORT_MULTI_TILE 10:10 /* RWIVF */ +#define NVCC73_SYS_CAPC_TILE2_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE2_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE3_SUPPORT_MULTI_TILE 11:11 /* RWIVF */ +#define NVCC73_SYS_CAPC_TILE3_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE3_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE4_SUPPORT_MULTI_TILE 12:12 /* RWIVF */ +#define NVCC73_SYS_CAPC_TILE4_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE4_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE5_SUPPORT_MULTI_TILE 13:13 /* RWIVF */ +#define NVCC73_SYS_CAPC_TILE5_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE5_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE6_SUPPORT_MULTI_TILE 14:14 /* RWIVF */ +#define NVCC73_SYS_CAPC_TILE6_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE6_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE7_SUPPORT_MULTI_TILE 15:15 /* RWIVF */ +#define NVCC73_SYS_CAPC_TILE7_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE7_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE_SUPPORT_MULTI_TILE(i) (8+(i)):(8+(i)) /* RWIVF */ +#define NVCC73_SYS_CAPC_TILE_SUPPORT_MULTI_TILE__SIZE_1 8 /* */ +#define NVCC73_SYS_CAPC_TILE_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPC_TILE_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPC_MERGER_TILE_BUFFER_SIZE 31:16 /* RWIUF */ +#define NVCC73_SYS_CAPD 0x2c /* RW-4R */ +#define NVCC73_SYS_CAPD_NUM_TELLTALE_REGIONS 4:0 /* RWIUF */ +#define NVCC73_SYS_CAPD_NUM_FROZEN_FRAME_REGIONS 12:8 /* RWIUF */ +#define NVCC73_SYS_CAPD_NUM_ROI 20:16 /* RWIUF */ +#define NVCC73_SYS_CAPD_AE_SDP_EXISTS 30:30 /* RWIVF */ +#define NVCC73_SYS_CAPD_AE_SDP_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPD_AE_SDP_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPD_AMSS_EXISTS 31:31 /* RWIVF */ +#define NVCC73_SYS_CAPD_AMSS_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPD_AMSS_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE 0x34 /* RW-4R */ +#define NVCC73_SYS_CAPE_VIRWIN0_EXISTS 0:0 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN1_EXISTS 1:1 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN2_EXISTS 2:2 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN3_EXISTS 3:3 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN4_EXISTS 4:4 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN5_EXISTS 5:5 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN6_EXISTS 6:6 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN7_EXISTS 7:7 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN8_EXISTS 8:8 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN8_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN8_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN9_EXISTS 9:9 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN9_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN9_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN10_EXISTS 10:10 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN10_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN10_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN11_EXISTS 11:11 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN11_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN11_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN12_EXISTS 12:12 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN12_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN12_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN13_EXISTS 13:13 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN13_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN13_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN14_EXISTS 14:14 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN14_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN14_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN15_EXISTS 15:15 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN15_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN15_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN16_EXISTS 16:16 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN16_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN16_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN17_EXISTS 17:17 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN17_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN17_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN18_EXISTS 18:18 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN18_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN18_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN19_EXISTS 19:19 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN19_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN19_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN20_EXISTS 20:20 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN20_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN20_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN21_EXISTS 21:21 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN21_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN21_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN22_EXISTS 22:22 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN22_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN22_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN23_EXISTS 23:23 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN23_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN23_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN24_EXISTS 24:24 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN24_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN24_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN25_EXISTS 25:25 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN25_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN25_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN26_EXISTS 26:26 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN26_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN26_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN27_EXISTS 27:27 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN27_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN27_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN28_EXISTS 28:28 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN28_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN28_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN29_EXISTS 29:29 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN29_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN29_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN30_EXISTS 30:30 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN30_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN30_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN31_EXISTS 31:31 /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN31_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN31_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN_EXISTS(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVCC73_SYS_CAPE_VIRWIN_EXISTS__SIZE_1 32 /* */ +#define NVCC73_SYS_CAPE_VIRWIN_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_SYS_CAPE_VIRWIN_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_MISC_CAPA_NUM_VMS 17:13 /* RWIUF */ +#define NVCC73_LINK_CAP 0x30 /* RW-4R */ +#define NVCC73_LINK_CAP_PHYCTRL0_EXISTS 0:0 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL1_EXISTS 1:1 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL2_EXISTS 2:2 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL3_EXISTS 3:3 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL4_EXISTS 4:4 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL5_EXISTS 5:5 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL6_EXISTS 6:6 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL7_EXISTS 7:7 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL8_EXISTS 8:8 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL8_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL8_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL9_EXISTS 9:9 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL9_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL9_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL10_EXISTS 10:10 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL10_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL10_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL11_EXISTS 11:11 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL11_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL11_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL12_EXISTS 12:12 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL12_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL12_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL13_EXISTS 13:13 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL13_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL13_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL14_EXISTS 14:14 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL14_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL14_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL15_EXISTS 15:15 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL15_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL15_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL16_EXISTS 16:16 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL16_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL16_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL17_EXISTS 17:17 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL17_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL17_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL18_EXISTS 18:18 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL18_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL18_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL19_EXISTS 19:19 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL19_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL19_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL20_EXISTS 20:20 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL20_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL20_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL21_EXISTS 21:21 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL21_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL21_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL22_EXISTS 22:22 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL22_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL22_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL23_EXISTS 23:23 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL23_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL23_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL24_EXISTS 24:24 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL24_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL24_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL25_EXISTS 25:25 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL25_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL25_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL26_EXISTS 26:26 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL26_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL26_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL27_EXISTS 27:27 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL27_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL27_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL28_EXISTS 28:28 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL28_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL28_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL29_EXISTS 29:29 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL29_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL29_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL30_EXISTS 30:30 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL30_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL30_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL31_EXISTS 31:31 /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL31_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL31_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL_EXISTS(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVCC73_LINK_CAP_PHYCTRL_EXISTS__SIZE_1 32 /* */ +#define NVCC73_LINK_CAP_PHYCTRL_EXISTS_NO 0x00000000 /* RW--V */ +#define NVCC73_LINK_CAP_PHYCTRL_EXISTS_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPA 0x10 /* RW-4R */ +#define NVCC73_IHUB_COMMON_CAPA_MEMPOOL_ENTRIES 15:0 /* RWIUF */ +#define NVCC73_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH 17:16 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_32B 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_64B 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_128B 0x00000002 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_256B 0x00000003 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPA_SUPPORT_ROTATION 18:18 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPA_SUPPORT_ROTATION_FALSE 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPA_SUPPORT_ROTATION_TRUE 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPA_SUPPORT_PLANAR 19:19 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPA_SUPPORT_PLANAR_FALSE 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPA_SUPPORT_PLANAR_TRUE 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPA_SUPPORT_VGA 20:20 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPA_SUPPORT_VGA_FALSE 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPA_SUPPORT_VGA_TRUE 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION 21:21 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION_FALSE 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION_TRUE 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPA_SUPPORT_MSCG 22:22 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPA_SUPPORT_MSCG_FALSE 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPA_SUPPORT_MSCG_TRUE 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH 23:23 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH_FALSE 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH_TRUE 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPA_SUPPORT_MSCG_LPS 26:26 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPA_SUPPORT_MSCG_LPS_FALSE 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPA_SUPPORT_MSCG_LPS_TRUE 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION 31:30 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_32B 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_64B 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_128B 0x00000002 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_256B 0x00000003 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPC 0x18 /* RW-4R */ +#define NVCC73_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE 1:0 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_32B 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_64B 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_128B 0x00000002 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_256B 0x00000003 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED 6:4 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_NONE 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_TWO 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_FOUR 0x00000002 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_EIGHT 0x00000003 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_SIXTEEN 0x00000004 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPC_SUPPORT_SEMI_PLANAR 11:11 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPC_SUPPORT_SEMI_PLANAR_FALSE 0x00000000 /* RWI-V */ +#define NVCC73_IHUB_COMMON_CAPC_SUPPORT_SEMI_PLANAR_TRUE 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPC_SUPPORT_HOR_VER_FLIP 12:12 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPC_SUPPORT_HOR_VER_FLIP_FALSE 0x00000000 /* RWI-V */ +#define NVCC73_IHUB_COMMON_CAPC_SUPPORT_HOR_VER_FLIP_TRUE 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPC_SUPPORT_MEMPOOL_YUV_COMPRESSION 13:13 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPC_SUPPORT_MEMPOOL_YUV_COMPRESSION_FALSE 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPC_SUPPORT_MEMPOOL_YUV_COMPRESSION_TRUE 0x00000001 /* RWI-V */ +#define NVCC73_IHUB_COMMON_CAPE 0x24 /* RW-4R */ +#define NVCC73_IHUB_COMMON_CAPE_PHYWIN_BUFFER_SIZE 15:0 /* RWIUF */ +#define NVCC73_IHUB_COMMON_CAPF 0x28 /* RW-4R */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN0_SUPPORT_MULTI_TILE 0:0 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN0_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN0_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN1_SUPPORT_MULTI_TILE 1:1 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN1_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN1_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN2_SUPPORT_MULTI_TILE 2:2 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN2_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN2_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN3_SUPPORT_MULTI_TILE 3:3 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN3_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN3_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN4_SUPPORT_MULTI_TILE 4:4 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN4_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN4_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN5_SUPPORT_MULTI_TILE 5:5 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN5_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN5_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN6_SUPPORT_MULTI_TILE 6:6 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN6_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN6_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN7_SUPPORT_MULTI_TILE 7:7 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN7_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN7_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN8_SUPPORT_MULTI_TILE 8:8 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN8_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN8_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN9_SUPPORT_MULTI_TILE 9:9 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN9_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN9_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN10_SUPPORT_MULTI_TILE 10:10 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN10_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN10_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN11_SUPPORT_MULTI_TILE 11:11 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN11_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN11_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN12_SUPPORT_MULTI_TILE 12:12 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN12_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN12_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN13_SUPPORT_MULTI_TILE 13:13 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN13_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN13_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN14_SUPPORT_MULTI_TILE 14:14 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN14_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN14_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN15_SUPPORT_MULTI_TILE 15:15 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN15_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN15_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN16_SUPPORT_MULTI_TILE 16:16 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN16_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN16_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN17_SUPPORT_MULTI_TILE 17:17 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN17_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN17_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN18_SUPPORT_MULTI_TILE 18:18 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN18_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN18_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN19_SUPPORT_MULTI_TILE 19:19 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN19_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN19_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN20_SUPPORT_MULTI_TILE 20:20 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN20_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN20_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN21_SUPPORT_MULTI_TILE 21:21 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN21_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN21_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN22_SUPPORT_MULTI_TILE 22:22 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN22_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN22_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN23_SUPPORT_MULTI_TILE 23:23 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN23_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN23_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN24_SUPPORT_MULTI_TILE 24:24 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN24_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN24_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN25_SUPPORT_MULTI_TILE 25:25 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN25_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN25_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN26_SUPPORT_MULTI_TILE 26:26 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN26_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN26_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN27_SUPPORT_MULTI_TILE 27:27 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN27_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN27_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN28_SUPPORT_MULTI_TILE 28:28 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN28_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN28_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN29_SUPPORT_MULTI_TILE 29:29 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN29_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN29_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN30_SUPPORT_MULTI_TILE 30:30 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN30_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN30_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN31_SUPPORT_MULTI_TILE 31:31 /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN31_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN31_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN_SUPPORT_MULTI_TILE(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN_SUPPORT_MULTI_TILE__SIZE_1 32 /* */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN_SUPPORT_MULTI_TILE_NO 0x00000000 /* RW--V */ +#define NVCC73_IHUB_COMMON_CAPF_PHYWIN_SUPPORT_MULTI_TILE_YES 0x00000001 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPA(i) (0x680+(i)*32) /* RW-4A */ +#define NVCC73_POSTCOMP_HDR_CAPA__SIZE_1 8 /* */ +#define NVCC73_POSTCOMP_HDR_CAPA_FULL_WIDTH 4:0 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPA_UNIT_WIDTH 9:5 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPA_OCSC0_PRESENT 16:16 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPA_OCSC0_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPA_OCSC0_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPA_OCSC1_PRESENT 17:17 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPA_OCSC1_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPA_OCSC1_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPA_SCLR_PRESENT 18:18 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPA_SCLR_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPA_SCLR_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPA_HCLPF_PRESENT 19:19 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPA_HCLPF_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPA_HCLPF_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPA_DTH_PRESENT 20:20 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPA_DTH_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPA_DTH_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPA_OSCAN_PRESENT 21:21 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPA_OSCAN_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPA_OSCAN_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPA_DSC_PRESENT 22:22 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPA_DSC_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPA_DSC_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPA_VFILTER_PRESENT 23:23 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPA_VFILTER_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPA_VFILTER_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPA_LTM_PRESENT 25:25 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPA_LTM_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPA_LTM_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPB(i) (0x684+(i)*32) /* RW-4A */ +#define NVCC73_POSTCOMP_HDR_CAPB__SIZE_1 8 /* */ +#define NVCC73_POSTCOMP_HDR_CAPB_VGA 0:0 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPB_VGA_TRUE 0x00000001 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPB_VGA_FALSE 0x00000000 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPB_OLUT_SZ 12:1 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPB_OLUT_LOGNR 15:13 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPB_OLUT_SFCLOAD 17:17 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPB_OLUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPB_OLUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPB_OLUT_DIRECT 18:18 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPB_OLUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPB_OLUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPC(i) (0x688+(i)*32) /* RW-4A */ +#define NVCC73_POSTCOMP_HDR_CAPC__SIZE_1 8 /* */ +#define NVCC73_POSTCOMP_HDR_CAPC_OCSC0_PRECISION 4:0 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPC_OCSC0_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPC_OCSC0_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPC_OCSC0_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPC_OCSC1_PRECISION 12:8 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPC_OCSC1_UNITY_CLAMP 13:13 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPC_OCSC1_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPC_OCSC1_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPC_SCLR_SF_PRECISION 20:16 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPC_SCLR_CI_PRECISION 24:21 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPC_SCLR_VS_EXT_RGB 25:25 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPC_SCLR_VS_EXT_RGB_TRUE 0x00000001 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPC_SCLR_VS_EXT_RGB_FALSE 0x00000000 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR 28:28 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR 30:30 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPD(i) (0x68c+(i)*32) /* RW-4A */ +#define NVCC73_POSTCOMP_HDR_CAPD__SIZE_1 8 /* */ +#define NVCC73_POSTCOMP_HDR_CAPD_VSCLR_MAX_PIXELS_2TAP 15:0 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPD_VSCLR_MAX_PIXELS_5TAP 31:16 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPE(i) (0x690+(i)*32) /* RW-4A */ +#define NVCC73_POSTCOMP_HDR_CAPE__SIZE_1 8 /* */ +#define NVCC73_POSTCOMP_HDR_CAPE_DSC_MAXLINEWIDTH 15:0 /* RWIUF */ +#define NVCC73_POSTCOMP_HDR_CAPE_DSC_NATIVE422 16:16 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPE_DSC_NATIVE422_TRUE 0x00000001 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPE_DSC_NATIVE422_FALSE 0x00000000 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPE_DSC_NATIVE420 17:17 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPE_DSC_NATIVE420_TRUE 0x00000001 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPE_DSC_NATIVE420_FALSE 0x00000000 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPE_3DLUT_PRESENT 18:18 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPE_3DLUT_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPE_3DLUT_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPE_3DLUT_MAX_SIZE 21:19 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPE_3DLUT_MAX_SIZE_9x9x9 0x00000000 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPE_3DLUT_MAX_SIZE_17x17x17 0x00000001 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPE_3DLUT_MAX_SIZE_25x25x25 0x00000002 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPE_3DLUT_MAX_SIZE_33x33x33 0x00000003 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPE_3DLUT_MAX_SIZE_RESERVED_4 0x00000004 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPE_3DLUT_MAX_SIZE_RESERVED_5 0x00000005 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPE_3DLUT_MAX_SIZE_RESERVED_6 0x00000006 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPE_3DLUT_MAX_SIZE_RESERVED_7 0x00000007 /* RW--V */ +#define NVCC73_POSTCOMP_HDR_CAPE_3DLUT_NUM_CURVES 23:22 /* RWIUF */ +#define NVCC73_POSTCOMP_HDR_CAPF(i) (0x694+(i)*32) /* RW-4A */ +#define NVCC73_POSTCOMP_HDR_CAPF__SIZE_1 8 /* */ +#define NVCC73_POSTCOMP_HDR_CAPF_VFILTER_MAX_PIXELS 15:0 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPF_LTM_MAX_PIXELS 31:16 /* RWIVF */ +#define NVCC73_POSTCOMP_HDR_CAPG(i) (0x698+(i)*32) /* RW-4A */ +#define NVCC73_POSTCOMP_HDR_CAPG__SIZE_1 8 /* */ +#define NVCC73_POSTCOMP_HDR_CAPG_CMI_SZ 11:0 /* RWIUF */ +#define NVCC73_POSTCOMP_HDR_CAPG_CMI_LOGNR 14:12 /* RWIUF */ +#define NVCC73_POSTCOMP_HDR_CAPG_CMO_SZ 26:15 /* RWIUF */ +#define NVCC73_POSTCOMP_HDR_CAPG_CMO_LOGNR 29:27 /* RWIUF */ +#define NVCC73_SOR_CAP(i) (0x144+(i)*8) /* RW-4A */ +#define NVCC73_SOR_CAP__SIZE_1 8 /* */ +#define NVCC73_SOR_CAP_SINGLE_LVDS_18 0:0 /* RWIVF */ +#define NVCC73_SOR_CAP_SINGLE_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVCC73_SOR_CAP_SINGLE_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVCC73_SOR_CAP_SINGLE_LVDS_24 1:1 /* RWIVF */ +#define NVCC73_SOR_CAP_SINGLE_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVCC73_SOR_CAP_SINGLE_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVCC73_SOR_CAP_DUAL_LVDS_18 2:2 /* RWIVF */ +#define NVCC73_SOR_CAP_DUAL_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVCC73_SOR_CAP_DUAL_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVCC73_SOR_CAP_DUAL_LVDS_24 3:3 /* RWIVF */ +#define NVCC73_SOR_CAP_DUAL_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVCC73_SOR_CAP_DUAL_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVCC73_SOR_CAP_DP_DUAL_MODE 4:4 /* RWIVF */ +#define NVCC73_SOR_CAP_DP_DUAL_MODE_FALSE 0x00000000 /* RW--V */ +#define NVCC73_SOR_CAP_DP_DUAL_MODE_TRUE 0x00000001 /* RW--V */ +#define NVCC73_SOR_CAP_SINGLE_TMDS_A 8:8 /* RWIVF */ +#define NVCC73_SOR_CAP_SINGLE_TMDS_A_FALSE 0x00000000 /* RW--V */ +#define NVCC73_SOR_CAP_SINGLE_TMDS_A_TRUE 0x00000001 /* RW--V */ +#define NVCC73_SOR_CAP_SINGLE_TMDS_B 9:9 /* RWIVF */ +#define NVCC73_SOR_CAP_SINGLE_TMDS_B_FALSE 0x00000000 /* RW--V */ +#define NVCC73_SOR_CAP_SINGLE_TMDS_B_TRUE 0x00000001 /* RW--V */ +#define NVCC73_SOR_CAP_DUAL_TMDS 11:11 /* RWIVF */ +#define NVCC73_SOR_CAP_DUAL_TMDS_FALSE 0x00000000 /* RW--V */ +#define NVCC73_SOR_CAP_DUAL_TMDS_TRUE 0x00000001 /* RW--V */ +#define NVCC73_SOR_CAP_DISPLAY_OVER_PCIE 13:13 /* RWIVF */ +#define NVCC73_SOR_CAP_DISPLAY_OVER_PCIE_FALSE 0x00000000 /* RW--V */ +#define NVCC73_SOR_CAP_DISPLAY_OVER_PCIE_TRUE 0x00000001 /* RW--V */ +#define NVCC73_SOR_CAP_DP_TUNNELING_OVER_USB4 15:15 /* RWIVF */ +#define NVCC73_SOR_CAP_DP_TUNNELING_OVER_USB4_FALSE 0x00000000 /* RW--V */ +#define NVCC73_SOR_CAP_DP_TUNNELING_OVER_USB4_TRUE 0x00000001 /* RW--V */ +#define NVCC73_SOR_CAP_SDI 16:16 /* RWIVF */ +#define NVCC73_SOR_CAP_SDI_FALSE 0x00000000 /* RW--V */ +#define NVCC73_SOR_CAP_SDI_TRUE 0x00000001 /* RW--V */ +#define NVCC73_SOR_CAP_DP_DUAL_MST 23:23 /* RWIVF */ +#define NVCC73_SOR_CAP_DP_DUAL_MST_FALSE 0x00000000 /* RW--V */ +#define NVCC73_SOR_CAP_DP_DUAL_MST_TRUE 0x00000001 /* RW--V */ +#define NVCC73_SOR_CAP_DP_A 24:24 /* RWIVF */ +#define NVCC73_SOR_CAP_DP_A_FALSE 0x00000000 /* RW--V */ +#define NVCC73_SOR_CAP_DP_A_TRUE 0x00000001 /* RW--V */ +#define NVCC73_SOR_CAP_DP_B 25:25 /* RWIVF */ +#define NVCC73_SOR_CAP_DP_B_FALSE 0x00000000 /* RW--V */ +#define NVCC73_SOR_CAP_DP_B_TRUE 0x00000001 /* RW--V */ +#define NVCC73_SOR_CAP_DP_INTERLACE 26:26 /* RWIVF */ +#define NVCC73_SOR_CAP_DP_INTERLACE_FALSE 0x00000000 /* RW--V */ +#define NVCC73_SOR_CAP_DP_INTERLACE_TRUE 0x00000001 /* RW--V */ +#define NVCC73_SOR_CAP_DP_8_LANES 27:27 /* RWIVF */ +#define NVCC73_SOR_CAP_DP_8_LANES_FALSE 0x00000000 /* RW--V */ +#define NVCC73_SOR_CAP_DP_8_LANES_TRUE 0x00000001 /* RW--V */ +#define NVCC73_SOR_CAP_HDMI_FRL 28:28 /* RWIVF */ +#define NVCC73_SOR_CAP_HDMI_FRL_FALSE 0x00000000 /* RW--V */ +#define NVCC73_SOR_CAP_HDMI_FRL_TRUE 0x00000001 /* RW--V */ +#define NVCC73_SOR_CAP_HDMI_FRL_YUV422 29:29 /* RWIVF */ +#define NVCC73_SOR_CAP_HDMI_FRL_YUV422_FALSE 0x00000000 /* RW--V */ +#define NVCC73_SOR_CAP_HDMI_FRL_YUV422_TRUE 0x00000001 /* RW--V */ +#define NVCC73_SOR_CAP_DP_128B132B 30:30 /* RWIVF */ +#define NVCC73_SOR_CAP_DP_128B132B_FALSE 0x00000000 /* RW--V */ +#define NVCC73_SOR_CAP_DP_128B132B_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA(i) (0x780+(i)*32) /* RW-4A */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA__SIZE_1 32 /* */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_FULL_WIDTH 4:0 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_UNIT_WIDTH 9:5 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_ALPHA_WIDTH 13:10 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT 16:16 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT 17:17 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT 18:18 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT 19:19 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT 20:20 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT 21:21 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT 22:22 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT 23:23 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT 24:24 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPB(i) (0x784+(i)*32) /* RW-4A */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPB__SIZE_1 32 /* */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPB_FMT_PRECISION 4:0 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGSZ 9:6 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGNR 12:10 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD 14:14 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT 15:15 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPC(i) (0x788+(i)*32) /* RW-4A */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPC__SIZE_1 32 /* */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_PRECISION 4:0 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_FVLUT 13:13 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_FVLUT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_FVLUT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT 15:15 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_PRECISION 20:16 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP 21:21 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD(i) (0x78c+(i)*32) /* RW-4A */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD__SIZE_1 32 /* */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGSZ 3:0 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGNR 6:4 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD 8:8 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT 9:9 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_SF_PRECISION 16:12 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_CI_PRECISION 20:17 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB 21:21 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB_FALSE 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA 22:22 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA_FALSE 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR 28:28 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR 30:30 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPE(i) (0x790+(i)*32) /* RW-4A */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPE__SIZE_1 32 /* */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_PRECISION 4:0 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_FVLUT 13:13 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_FVLUT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_FVLUT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT 15:15 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_PRECISION 20:16 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP 21:21 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPF(i) (0x794+(i)*32) /* RW-4A */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPF__SIZE_1 32 /* */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_2TAP 15:0 /* RWIVF */ +#define NVCC73_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_5TAP 31:16 /* RWIVF */ +#define NVCC73_SOR_CLK_CAP(i) (0x608+(i)*4) /* RW-4A */ +#define NVCC73_SOR_CLK_CAP__SIZE_1 8 /* */ +#define NVCC73_SOR_CLK_CAP_DP_MAX 7:0 /* RWIUF */ +#define NVCC73_SOR_CLK_CAP_TMDS_MAX 23:16 /* RWIUF */ +#define NVCC73_SOR_CLK_CAP_LVDS_MAX 31:24 /* RWIUF */ + +#ifdef __cplusplus +}; +#endif /* extern C */ +#endif //_clcc73_h_ diff --git a/src/common/sdk/nvidia/inc/class/clcc7a.h b/src/common/sdk/nvidia/inc/class/clcc7a.h new file mode 100644 index 0000000..1596460 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clcc7a.h @@ -0,0 +1,216 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clcc7a__h_ +#define _clcc7a__h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVCC7A_CURSOR_IMM_CHANNEL_PIO (0x0000CC7A) + +typedef volatile struct _clcc7a_tag0 { + NvV32 Reserved00[0x2]; + NvV32 Free; // 0x00000008 - 0x0000000B + NvV32 Reserved01[0x7D]; + NvV32 Update; // 0x00000200 - 0x00000203 + NvV32 SetInterlockFlags; // 0x00000204 - 0x00000207 + NvV32 SetCursorHotSpotPointOut[2]; // 0x00000208 - 0x0000020F + NvV32 SetWindowInterlockFlags; // 0x00000210 - 0x00000213 + NvV32 Reserved02[0x3F7B]; +} NVCC7ADispCursorImmControlPio; + +#define NVCC7A_FREE (0x00000008) +#define NVCC7A_FREE_COUNT 5:0 +#define NVCC7A_UPDATE (0x00000200) +#define NVCC7A_UPDATE_RELEASE_ELV 0:0 +#define NVCC7A_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVCC7A_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVCC7A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVCC7A_UPDATE_FORCE_FULLSCREEN 12:12 +#define NVCC7A_UPDATE_FORCE_FULLSCREEN_FALSE (0x00000000) +#define NVCC7A_UPDATE_FORCE_FULLSCREEN_TRUE (0x00000001) +#define NVCC7A_SET_INTERLOCK_FLAGS (0x00000204) +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVCC7A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVCC7A_SET_CURSOR_HOT_SPOT_POINT_OUT(b) (0x00000208 + (b)*0x00000004) +#define NVCC7A_SET_CURSOR_HOT_SPOT_POINT_OUT_X 15:0 +#define NVCC7A_SET_CURSOR_HOT_SPOT_POINT_OUT_Y 31:16 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS (0x00000210) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVCC7A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clcc7a_h + \ No newline at end of file diff --git a/src/common/sdk/nvidia/inc/class/clcc7b.h b/src/common/sdk/nvidia/inc/class/clcc7b.h new file mode 100644 index 0000000..a97a324 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clcc7b.h @@ -0,0 +1,70 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clcc7b_h_ +#define _clcc7b_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVCC7B_WINDOW_IMM_CHANNEL_DMA (0x0000CC7B) + +// dma opcode instructions +#define NVCC7B_DMA +#define NVCC7B_DMA_OPCODE 31:29 +#define NVCC7B_DMA_OPCODE_METHOD 0x00000000 +#define NVCC7B_DMA_OPCODE_JUMP 0x00000001 +#define NVCC7B_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVCC7B_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVCC7B_DMA_METHOD_COUNT 27:18 +#define NVCC7B_DMA_METHOD_OFFSET 15:2 +#define NVCC7B_DMA_DATA 31:0 +#define NVCC7B_DMA_DATA_NOP 0x00000000 +#define NVCC7B_DMA_JUMP_OFFSET 15:2 +#define NVCC7B_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NVCC7B_PUT (0x00000000) +#define NVCC7B_PUT_PTR 13:0 +#define NVCC7B_GET (0x00000004) +#define NVCC7B_GET_PTR 13:0 +#define NVCC7B_UPDATE (0x00000200) +#define NVCC7B_UPDATE_RELEASE_ELV 0:0 +#define NVCC7B_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVCC7B_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVCC7B_UPDATE_INTERLOCK_WITH_WINDOW 1:1 +#define NVCC7B_UPDATE_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVCC7B_UPDATE_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVCC7B_UPDATE_FORCE_FULLSCREEN 4:4 +#define NVCC7B_UPDATE_FORCE_FULLSCREEN_FALSE (0x00000000) +#define NVCC7B_UPDATE_FORCE_FULLSCREEN_TRUE (0x00000001) +#define NVCC7B_SET_POINT_OUT(b) (0x00000208 + (b)*0x00000004) +#define NVCC7B_SET_POINT_OUT_X 15:0 +#define NVCC7B_SET_POINT_OUT_Y 31:16 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clcc7b_h diff --git a/src/common/sdk/nvidia/inc/class/clcc7d.h b/src/common/sdk/nvidia/inc/class/clcc7d.h new file mode 100644 index 0000000..948ca11 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clcc7d.h @@ -0,0 +1,2007 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clcc7d_h_ +#define _clcc7d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVCC7D_CORE_CHANNEL_DMA (0x0000CC7D) + +#define NV_DISP_NOTIFIER 0x00000000 +#define NV_DISP_NOTIFIER_SIZEOF 0x00000010 +#define NV_DISP_NOTIFIER__0 0x00000000 +#define NV_DISP_NOTIFIER__0_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFIER__0_FIELD 8:8 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE 9:9 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_NON_TEARING 0x00000000 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_IMMEDIATE 0x00000001 +#define NV_DISP_NOTIFIER__0_R1 15:10 +#define NV_DISP_NOTIFIER__0_R2 23:16 +#define NV_DISP_NOTIFIER__0_R3 29:24 +#define NV_DISP_NOTIFIER__0_STATUS 31:30 +#define NV_DISP_NOTIFIER__0_STATUS_NOT_BEGUN 0x00000000 +#define NV_DISP_NOTIFIER__0_STATUS_BEGUN 0x00000001 +#define NV_DISP_NOTIFIER__0_STATUS_FINISHED 0x00000002 +#define NV_DISP_NOTIFIER__1 0x00000001 +#define NV_DISP_NOTIFIER__1_R4 31:0 +#define NV_DISP_NOTIFIER__2 0x00000002 +#define NV_DISP_NOTIFIER__2_TIMESTAMP_LO 31:0 +#define NV_DISP_NOTIFIER__3 0x00000003 +#define NV_DISP_NOTIFIER__3_TIMESTAMP_HI 31:0 + + +// dma opcode instructions +#define NVCC7D_DMA +#define NVCC7D_DMA_OPCODE 31:29 +#define NVCC7D_DMA_OPCODE_METHOD 0x00000000 +#define NVCC7D_DMA_OPCODE_JUMP 0x00000001 +#define NVCC7D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVCC7D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVCC7D_DMA_METHOD_COUNT 27:18 +#define NVCC7D_DMA_METHOD_OFFSET 15:2 +#define NVCC7D_DMA_DATA 31:0 +#define NVCC7D_DMA_DATA_NOP 0x00000000 +#define NVCC7D_DMA_JUMP_OFFSET 15:2 +#define NVCC7D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// if cap SUPPORT_FLEXIBLE_WIN_MAPPING is FALSE, this define can be used to obtain which head a window is mapped to +#define NVC37D_WINDOW_MAPPED_TO_HEAD(w) ((w)>>1) +#define NVC37D_GET_VALID_WINDOWMASK_FOR_HEAD(h) ((1<<((h)*2)) | (1<<((h)*2+1))) + +// class methods +#define NVCC7D_PUT (0x00000000) +#define NVCC7D_PUT_PTR 13:0 +#define NVCC7D_GET (0x00000004) +#define NVCC7D_GET_PTR 13:0 +#define NVCC7D_UPDATE (0x00000200) +#define NVCC7D_UPDATE_SPECIAL_HANDLING 21:20 +#define NVCC7D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NVCC7D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NVCC7D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NVCC7D_UPDATE_SPECIAL_HANDLING_REASON 19:12 +#define NVCC7D_UPDATE_INHIBIT_INTERRUPTS 24:24 +#define NVCC7D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NVCC7D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NVCC7D_UPDATE_RELEASE_ELV 0:0 +#define NVCC7D_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVCC7D_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVCC7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVCC7D_UPDATE_FORCE_FULLSCREEN 28:28 +#define NVCC7D_UPDATE_FORCE_FULLSCREEN_FALSE (0x00000000) +#define NVCC7D_UPDATE_FORCE_FULLSCREEN_TRUE (0x00000001) +#define NVCC7D_SET_NOTIFIER_CONTROL (0x0000020C) +#define NVCC7D_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVCC7D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVCC7D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVCC7D_SET_NOTIFIER_CONTROL_NOTIFY 12:12 +#define NVCC7D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NVCC7D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NVCC7D_SET_CONTROL (0x00000210) +#define NVCC7D_SET_CONTROL_FLIP_LOCK_PIN(i) ((i)+0):((i)+0) +#define NVCC7D_SET_CONTROL_FLIP_LOCK_PIN__SIZE_1 4 +#define NVCC7D_SET_CONTROL_FLIP_LOCK_PIN_DISABLE (0x00000000) +#define NVCC7D_SET_CONTROL_FLIP_LOCK_PIN_ENABLE (0x00000001) +#define NVCC7D_SET_CONTROL_FLIP_LOCK_PIN0 0:0 +#define NVCC7D_SET_CONTROL_FLIP_LOCK_PIN0_DISABLE (0x00000000) +#define NVCC7D_SET_CONTROL_FLIP_LOCK_PIN0_ENABLE (0x00000001) +#define NVCC7D_SET_CONTROL_FLIP_LOCK_PIN1 1:1 +#define NVCC7D_SET_CONTROL_FLIP_LOCK_PIN1_DISABLE (0x00000000) +#define NVCC7D_SET_CONTROL_FLIP_LOCK_PIN1_ENABLE (0x00000001) +#define NVCC7D_SET_CONTROL_FLIP_LOCK_PIN2 2:2 +#define NVCC7D_SET_CONTROL_FLIP_LOCK_PIN2_DISABLE (0x00000000) +#define NVCC7D_SET_CONTROL_FLIP_LOCK_PIN2_ENABLE (0x00000001) +#define NVCC7D_SET_CONTROL_FLIP_LOCK_PIN3 3:3 +#define NVCC7D_SET_CONTROL_FLIP_LOCK_PIN3_DISABLE (0x00000000) +#define NVCC7D_SET_CONTROL_FLIP_LOCK_PIN3_ENABLE (0x00000001) +#define NVCC7D_SET_INTERLOCK_FLAGS (0x00000218) +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+0):((i)+0) +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVCC7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS (0x0000021C) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVCC7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) +#define NVCC7D_GET_RG_SCAN_LINE(b) (0x00000220 + (b)*0x00000004) +#define NVCC7D_GET_RG_SCAN_LINE_LINE 15:0 +#define NVCC7D_GET_RG_SCAN_LINE_VBLANK 16:16 +#define NVCC7D_GET_RG_SCAN_LINE_VBLANK_FALSE (0x00000000) +#define NVCC7D_GET_RG_SCAN_LINE_VBLANK_TRUE (0x00000001) +#define NVCC7D_SET_GET_BLANKING_CTRL(b) (0x00000240 + (b)*0x00000004) +#define NVCC7D_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NVCC7D_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NVCC7D_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NVCC7D_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NVCC7D_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NVCC7D_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) +#define NVCC7D_SET_SURFACE_ADDRESS_HI_NOTIFIER (0x00000260) +#define NVCC7D_SET_SURFACE_ADDRESS_HI_NOTIFIER_ADDRESS_HI 31:0 +#define NVCC7D_SET_SURFACE_ADDRESS_LO_NOTIFIER (0x00000264) +#define NVCC7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ADDRESS_LO 31:4 +#define NVCC7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET 3:2 +#define NVCC7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_IOVA (0x00000000) +#define NVCC7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_NVM (0x00000001) +#define NVCC7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI (0x00000002) +#define NVCC7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVCC7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE 0:0 +#define NVCC7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_DISABLE (0x00000000) +#define NVCC7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_ENABLE (0x00000001) + +#define NVCC7D_SOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NVCC7D_SOR_SET_CONTROL_OWNER_MASK 7:0 +#define NVCC7D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVCC7D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVCC7D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVCC7D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVCC7D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVCC7D_SOR_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVCC7D_SOR_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVCC7D_SOR_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVCC7D_SOR_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVCC7D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NVCC7D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NVCC7D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NVCC7D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NVCC7D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NVCC7D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NVCC7D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NVCC7D_SOR_SET_CONTROL_PROTOCOL_HDMI_FRL (0x0000000C) +#define NVCC7D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NVCC7D_SOR_SET_CONTROL_DE_SYNC_POLARITY 16:16 +#define NVCC7D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVCC7D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVCC7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NVCC7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NVCC7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NVCC7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NVCC7D_SOR_SET_CUSTOM_REASON(a) (0x00000304 + (a)*0x00000020) +#define NVCC7D_SOR_SET_CUSTOM_REASON_CODE 31:0 +#define NVCC7D_SOR_SET_SW_SPARE_A(a) (0x00000308 + (a)*0x00000020) +#define NVCC7D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NVCC7D_SOR_SET_SW_SPARE_B(a) (0x0000030C + (a)*0x00000020) +#define NVCC7D_SOR_SET_SW_SPARE_B_CODE 31:0 + +#define NVCC7D_DSI_SET_CONTROL(a) (0x00000500 + (a)*0x00000020) +#define NVCC7D_DSI_SET_CONTROL_OWNER_MASK 7:0 +#define NVCC7D_DSI_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVCC7D_DSI_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVCC7D_DSI_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVCC7D_DSI_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVCC7D_DSI_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVCC7D_DSI_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVCC7D_DSI_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVCC7D_DSI_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVCC7D_DSI_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVCC7D_DSI_SET_CUSTOM_REASON(a) (0x00000504 + (a)*0x00000020) +#define NVCC7D_DSI_SET_CUSTOM_REASON_CODE 31:0 +#define NVCC7D_DSI_SET_SW_SPARE_A(a) (0x00000508 + (a)*0x00000020) +#define NVCC7D_DSI_SET_SW_SPARE_A_CODE 31:0 +#define NVCC7D_DSI_SET_SW_SPARE_B(a) (0x0000050C + (a)*0x00000020) +#define NVCC7D_DSI_SET_SW_SPARE_B_CODE 31:0 + +#define NVCC7D_WINDOW_SET_CONTROL(a) (0x00001000 + (a)*0x00000080) +#define NVCC7D_WINDOW_SET_CONTROL_OWNER 3:0 +#define NVCC7D_WINDOW_SET_CONTROL_OWNER_HEAD(i) (0x00000000 +(i)) +#define NVCC7D_WINDOW_SET_CONTROL_OWNER_HEAD__SIZE_1 8 +#define NVCC7D_WINDOW_SET_CONTROL_OWNER_HEAD0 (0x00000000) +#define NVCC7D_WINDOW_SET_CONTROL_OWNER_HEAD1 (0x00000001) +#define NVCC7D_WINDOW_SET_CONTROL_OWNER_HEAD2 (0x00000002) +#define NVCC7D_WINDOW_SET_CONTROL_OWNER_HEAD3 (0x00000003) +#define NVCC7D_WINDOW_SET_CONTROL_OWNER_HEAD4 (0x00000004) +#define NVCC7D_WINDOW_SET_CONTROL_OWNER_HEAD5 (0x00000005) +#define NVCC7D_WINDOW_SET_CONTROL_OWNER_HEAD6 (0x00000006) +#define NVCC7D_WINDOW_SET_CONTROL_OWNER_HEAD7 (0x00000007) +#define NVCC7D_WINDOW_SET_CONTROL_OWNER_NONE (0x0000000F) +#define NVCC7D_WINDOW_SET_CONTROL_HIDE 8:8 +#define NVCC7D_WINDOW_SET_CONTROL_HIDE_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_CONTROL_HIDE_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_CONTROL_DISABLE_PHYSICAL_FLIPS 9:9 +#define NVCC7D_WINDOW_SET_CONTROL_DISABLE_PHYSICAL_FLIPS_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_CONTROL_DISABLE_PHYSICAL_FLIPS_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_CONTROL_ALLOW_SUPERFRAME 10:10 +#define NVCC7D_WINDOW_SET_CONTROL_ALLOW_SUPERFRAME_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_CONTROL_ALLOW_SUPERFRAME_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(a) (0x00001004 + (a)*0x00000080) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(a) (0x00001008 + (a)*0x00000080) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR(a) (0x0000100C + (a)*0x00000080) +#define NVCC7D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVCC7D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVCC7D_WINDOW_SET_WINDOW_USAGE_BOUNDS(a) (0x00001010 + (a)*0x00000080) +#define NVCC7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_MAX_PIXELS_FETCHED_PER_LINE 14:0 +#define NVCC7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED 16:16 +#define NVCC7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED 28:28 +#define NVCC7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS 22:20 +#define NVCC7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVCC7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED 24:24 +#define NVCC7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED 30:30 +#define NVCC7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT 26:25 +#define NVCC7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT_PITCH_BLOCKLINEAR (0x00000000) +#define NVCC7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT_PITCH (0x00000001) +#define NVCC7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT_BLOCKLINEAR (0x00000002) +#define NVCC7D_WINDOW_SET_PHYSICAL(a) (0x00001014 + (a)*0x00000080) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW 31:0 +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_NONE (0x00000000) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW0 (0x00000001) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW1 (0x00000002) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW2 (0x00000004) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW3 (0x00000008) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW4 (0x00000010) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW5 (0x00000020) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW6 (0x00000040) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW7 (0x00000080) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW8 (0x00000100) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW9 (0x00000200) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW10 (0x00000400) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW11 (0x00000800) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW12 (0x00001000) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW13 (0x00002000) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW14 (0x00004000) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW15 (0x00008000) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW16 (0x00010000) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW17 (0x00020000) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW18 (0x00040000) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW19 (0x00080000) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW20 (0x00100000) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW21 (0x00200000) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW22 (0x00400000) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW23 (0x00800000) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW24 (0x01000000) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW25 (0x02000000) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW26 (0x04000000) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW27 (0x08000000) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW28 (0x10000000) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW29 (0x20000000) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW30 (0x40000000) +#define NVCC7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW31 (0x80000000) +#define NVCC7D_WINDOW_SET_GUEST_OVERRIDES(a) (0x00001024 + (a)*0x00000080) +#define NVCC7D_WINDOW_SET_GUEST_OVERRIDES_SET_SIZE_OUT 0:0 +#define NVCC7D_WINDOW_SET_GUEST_OVERRIDES_SET_SIZE_OUT_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_GUEST_OVERRIDES_SET_SIZE_OUT_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_GUEST_OVERRIDES_SET_COMPOSITION_CONTROL 1:1 +#define NVCC7D_WINDOW_SET_GUEST_OVERRIDES_SET_COMPOSITION_CONTROL_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_GUEST_OVERRIDES_SET_COMPOSITION_CONTROL_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_GUEST_OVERRIDES_SET_COMPOSITION_CONSTANT_ALPHA 2:2 +#define NVCC7D_WINDOW_SET_GUEST_OVERRIDES_SET_COMPOSITION_CONSTANT_ALPHA_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_GUEST_OVERRIDES_SET_COMPOSITION_CONSTANT_ALPHA_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_GUEST_OVERRIDES_SET_COMPOSITION_FACTOR_SELECT 3:3 +#define NVCC7D_WINDOW_SET_GUEST_OVERRIDES_SET_COMPOSITION_FACTOR_SELECT_FALSE (0x00000000) +#define NVCC7D_WINDOW_SET_GUEST_OVERRIDES_SET_COMPOSITION_FACTOR_SELECT_TRUE (0x00000001) +#define NVCC7D_WINDOW_SET_GUEST_SIZE_OUT(a) (0x00001028 + (a)*0x00000080) +#define NVCC7D_WINDOW_SET_GUEST_SIZE_OUT_WIDTH 15:0 +#define NVCC7D_WINDOW_SET_GUEST_SIZE_OUT_HEIGHT 31:16 +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_CONTROL(a) (0x0000102C + (a)*0x00000080) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_CONTROL_COLOR_KEY_SELECT 1:0 +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DISABLE (0x00000000) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_CONTROL_COLOR_KEY_SELECT_SRC (0x00000001) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DST (0x00000002) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_CONTROL_DEPTH 11:4 +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_CONTROL_BYPASS 16:16 +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_CONTROL_BYPASS_DISABLE (0x00000000) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_CONTROL_BYPASS_ENABLE (0x00000001) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_CONSTANT_ALPHA(a) (0x00001030 + (a)*0x00000080) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_CONSTANT_ALPHA_K1 7:0 +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_CONSTANT_ALPHA_K2 15:8 +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT(a) (0x00001034 + (a)*0x00000080) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT 3:0 +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT 7:4 +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT 11:8 +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT 15:12 +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT 19:16 +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT 23:20 +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT 27:24 +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT 31:28 +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVCC7D_WINDOW_SET_GUEST_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) + +#define NVCC7D_HEAD_SET_PROCAMP(a) (0x00002000 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NVCC7D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NVCC7D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NVCC7D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NVCC7D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003) +#define NVCC7D_HEAD_SET_PROCAMP_CHROMA_LPF 3:3 +#define NVCC7D_HEAD_SET_PROCAMP_CHROMA_LPF_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_PROCAMP_CHROMA_LPF_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_PROCAMP_CHROMA_DOWN_V 4:4 +#define NVCC7D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 28:28 +#define NVCC7D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NVCC7D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00002004 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 2:2 +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 3:3 +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 7:4 +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000000) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000001) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000002) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000003) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000004) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000005) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000006) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000007) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000008) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_444 (0x00000009) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444NP (0x0000000A) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 24:24 +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 23:12 +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN 31:26 +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN0 (0x00000000) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN1 (0x00000001) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN2 (0x00000002) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN3 (0x00000003) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN4 (0x00000004) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN5 (0x00000005) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN6 (0x00000006) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN7 (0x00000007) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN8 (0x00000008) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN9 (0x00000009) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN10 (0x0000000A) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN11 (0x0000000B) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN12 (0x0000000C) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN13 (0x0000000D) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN14 (0x0000000E) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN15 (0x0000000F) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN16 (0x00000010) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN17 (0x00000011) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN18 (0x00000012) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN19 (0x00000013) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN20 (0x00000014) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN21 (0x00000015) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN22 (0x00000016) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN23 (0x00000017) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN24 (0x00000018) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN25 (0x00000019) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN26 (0x0000001A) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN27 (0x0000001B) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN28 (0x0000001C) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN29 (0x0000001D) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN30 (0x0000001E) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN31 (0x0000001F) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_NONE (0x0000003F) +#define NVCC7D_HEAD_SET_CONTROL(a) (0x00002008 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_CONTROL_STRUCTURE 1:0 +#define NVCC7D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NVCC7D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE 2:2 +#define NVCC7D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_NORMAL (0x00000000) +#define NVCC7D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000001) +#define NVCC7D_HEAD_SET_CONTROL_YUV420PACKER 3:3 +#define NVCC7D_HEAD_SET_CONTROL_YUV420PACKER_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_CONTROL_YUV420PACKER_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_MODE 11:10 +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_MODE_NO_LOCK (0x00000000) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN 8:4 +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVCC7D_HEAD_SET_CONTROL_SINK_LOCKOUT_WINDOW 15:12 +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE 23:22 +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE_NO_LOCK (0x00000000) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN 20:16 +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN 28:24 +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_NONE (0x00000000) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000001) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000002) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000003) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000004) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000005) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000006) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000007) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000008) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000009) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x0000000A) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000B) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000C) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000D) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000E) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000F) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x00000010) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVCC7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVCC7D_HEAD_SET_CONTROL_SINK_STEREO_LOCK_MODE 30:30 +#define NVCC7D_HEAD_SET_CONTROL_SINK_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_CONTROL_SINK_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_STEREO_LOCK_MODE 31:31 +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_CONTROL_SOURCE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x0000200C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00002014 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVCC7D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVCC7D_HEAD_SET_DITHER_CONTROL(a) (0x00002018 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NVCC7D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_DITHER_CONTROL_BITS 5:4 +#define NVCC7D_HEAD_SET_DITHER_CONTROL_BITS_TO_6_BITS (0x00000000) +#define NVCC7D_HEAD_SET_DITHER_CONTROL_BITS_TO_8_BITS (0x00000001) +#define NVCC7D_HEAD_SET_DITHER_CONTROL_BITS_TO_10_BITS (0x00000002) +#define NVCC7D_HEAD_SET_DITHER_CONTROL_BITS_TO_12_BITS (0x00000003) +#define NVCC7D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE 2:2 +#define NVCC7D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_DITHER_CONTROL_MODE 10:8 +#define NVCC7D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NVCC7D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NVCC7D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NVCC7D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NVCC7D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NVCC7D_HEAD_SET_DITHER_CONTROL_MODE_ROUND (0x00000005) +#define NVCC7D_HEAD_SET_DITHER_CONTROL_PHASE 13:12 +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x0000201C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 0:0 +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING 4:4 +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 9:8 +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NVCC7D_HEAD_SET_DISPLAY_ID(a,b) (0x00002020 + (a)*0x00000800 + (b)*0x00000004) +#define NVCC7D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00002028 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NVCC7D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR(a) (0x0000202C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVCC7D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVCC7D_HEAD_SET_HEAD_USAGE_BOUNDS(a) (0x00002030 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR 2:0 +#define NVCC7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_NONE (0x00000000) +#define NVCC7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W32_H32 (0x00000001) +#define NVCC7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W64_H64 (0x00000002) +#define NVCC7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W128_H128 (0x00000003) +#define NVCC7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W256_H256 (0x00000004) +#define NVCC7D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED 4:4 +#define NVCC7D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_FALSE (0x00000000) +#define NVCC7D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_TRUE (0x00000001) +#define NVCC7D_HEAD_SET_HEAD_USAGE_BOUNDS_LTM_ALLOWED 5:5 +#define NVCC7D_HEAD_SET_HEAD_USAGE_BOUNDS_LTM_ALLOWED_FALSE (0x00000000) +#define NVCC7D_HEAD_SET_HEAD_USAGE_BOUNDS_LTM_ALLOWED_TRUE (0x00000001) +#define NVCC7D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS 14:12 +#define NVCC7D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVCC7D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVCC7D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED 8:8 +#define NVCC7D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVCC7D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) +#define NVCC7D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED 16:16 +#define NVCC7D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000) +#define NVCC7D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001) +#define NVCC7D_HEAD_SET_HEAD_USAGE_BOUNDS_ELV_START 31:17 +#define NVCC7D_HEAD_SET_STALL_LOCK(a) (0x00002034 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NVCC7D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NVCC7D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NVCC7D_HEAD_SET_STALL_LOCK_MODE 2:2 +#define NVCC7D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NVCC7D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN 8:4 +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVCC7D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVCC7D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 12:12 +#define NVCC7D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NVCC7D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NVCC7D_HEAD_SET_STALL_LOCK_TEPOLARITY 14:14 +#define NVCC7D_HEAD_SET_STALL_LOCK_TEPOLARITY_POSITIVE_TRUE (0x00000000) +#define NVCC7D_HEAD_SET_STALL_LOCK_TEPOLARITY_NEGATIVE_TRUE (0x00000001) +#define NVCC7D_HEAD_SET_STALL_LOCK_UNSTALL_SYNC_ADVANCE 25:16 +#define NVCC7D_HEAD_SET_LOCK_CHAIN(a) (0x00002044 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_LOCK_CHAIN_POSITION 3:0 +#define NVCC7D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x00002048 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NVCC7D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NVCC7D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x0000204C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NVCC7D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NVCC7D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x00002058 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NVCC7D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NVCC7D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x0000205C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NVCC7D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NVCC7D_HEAD_SET_TILE_MASK(a) (0x00002060 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_TILE_MASK_TILE 7:0 +#define NVCC7D_HEAD_SET_TILE_MASK_TILE_NONE (0x00000000) +#define NVCC7D_HEAD_SET_TILE_MASK_TILE_TILE0 (0x00000001) +#define NVCC7D_HEAD_SET_TILE_MASK_TILE_TILE1 (0x00000002) +#define NVCC7D_HEAD_SET_TILE_MASK_TILE_TILE2 (0x00000004) +#define NVCC7D_HEAD_SET_TILE_MASK_TILE_TILE3 (0x00000008) +#define NVCC7D_HEAD_SET_TILE_MASK_TILE_TILE4 (0x00000010) +#define NVCC7D_HEAD_SET_TILE_MASK_TILE_TILE5 (0x00000020) +#define NVCC7D_HEAD_SET_TILE_MASK_TILE_TILE6 (0x00000040) +#define NVCC7D_HEAD_SET_TILE_MASK_TILE_TILE7 (0x00000080) +#define NVCC7D_HEAD_SET_RASTER_SIZE(a) (0x00002064 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_RASTER_SIZE_WIDTH 15:0 +#define NVCC7D_HEAD_SET_RASTER_SIZE_HEIGHT 31:16 +#define NVCC7D_HEAD_SET_RASTER_SYNC_END(a) (0x00002068 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NVCC7D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NVCC7D_HEAD_SET_RASTER_BLANK_END(a) (0x0000206C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NVCC7D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NVCC7D_HEAD_SET_RASTER_BLANK_START(a) (0x00002070 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NVCC7D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NVCC7D_HEAD_SET_OVERSCAN_COLOR(a) (0x00002078 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OVERSCAN_COLOR_RED_CR 9:0 +#define NVCC7D_HEAD_SET_OVERSCAN_COLOR_GREEN_Y 19:10 +#define NVCC7D_HEAD_SET_OVERSCAN_COLOR_BLUE_CB 29:20 +#define NVCC7D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR(a) (0x0000207C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_RED_CR 9:0 +#define NVCC7D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_GREEN_Y 19:10 +#define NVCC7D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_BLUE_CB 29:20 +#define NVCC7D_HEAD_SET_HDMI_CTRL(a) (0x00002080 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NVCC7D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NVCC7D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NVCC7D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NVCC7D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NVCC7D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x00002098 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 0:0 +#define NVCC7D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NVCC7D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NVCC7D_HEAD_SET_CONTROL_CURSOR(a) (0x0000209C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_FORMAT 7:0 +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x000000E9) +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x000000CF) +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_SIZE 9:8 +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 19:12 +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 27:20 +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(a) (0x000020A0 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_K1 7:0 +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT 11:8 +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1_TIMES_SRC (0x00000005) +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT 15:12 +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_ZERO (0x00000000) +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE 16:16 +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_BLEND (0x00000000) +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_XOR (0x00000001) +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS 20:20 +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_CMI_LUT_CONTROL(a) (0x000020A4 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_CMI_LUT_CONTROL_ENABLE 0:0 +#define NVCC7D_HEAD_SET_CMI_LUT_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_CMI_LUT_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_CMI_LUT_CONTROL_INTERPOLATE 1:1 +#define NVCC7D_HEAD_SET_CMI_LUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_CMI_LUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_CMI_LUT_CONTROL_MIRROR 2:2 +#define NVCC7D_HEAD_SET_CMI_LUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_CMI_LUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_CMI_FP_NORM_SCALE(a) (0x000020A8 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_CMI_FP_NORM_SCALE_FPNORM 31:0 +#define NVCC7D_HEAD_SET_CMO_LUT_CONTROL(a) (0x000020AC + (a)*0x00000800) +#define NVCC7D_HEAD_SET_CMO_LUT_CONTROL_ENABLE 0:0 +#define NVCC7D_HEAD_SET_CMO_LUT_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_CMO_LUT_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_CMO_LUT_CONTROL_INTERPOLATE 1:1 +#define NVCC7D_HEAD_SET_CMO_LUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_CMO_LUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_CMO_LUT_CONTROL_MIRROR 2:2 +#define NVCC7D_HEAD_SET_CMO_LUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_CMO_LUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_CM3D0CONTROL(a) (0x000020B8 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_CM3D0CONTROL_MODE 0:0 +#define NVCC7D_HEAD_SET_CM3D0CONTROL_MODE_GAMUT_MAPPING (0x00000000) +#define NVCC7D_HEAD_SET_CM3D0CONTROL_MODE_PANEL_CORR (0x00000001) +#define NVCC7D_HEAD_SET_CM3D0CONTROL_ROUND 1:1 +#define NVCC7D_HEAD_SET_CM3D0CONTROL_ROUND_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_CM3D0CONTROL_ROUND_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_CM3D0CONTROL_LAST_ENTRY_ADD1 2:2 +#define NVCC7D_HEAD_SET_CM3D0CONTROL_LAST_ENTRY_ADD1_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_CM3D0CONTROL_LAST_ENTRY_ADD1_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_CM3D0CONTROL_SIZE 3:3 +#define NVCC7D_HEAD_SET_CM3D0CONTROL_SIZE_SIZE_9X9X9 (0x00000000) +#define NVCC7D_HEAD_SET_CM3D0CONTROL_SIZE_SIZE_17X17X17 (0x00000001) +#define NVCC7D_HEAD_SET_CM3D0CONTROL_RANGE_SCALE 8:6 +#define NVCC7D_HEAD_SET_CM3D0CONTROL_CONTENT_OFFSET 22:9 +#define NVCC7D_HEAD_SET_CM3D1CONTROL(a) (0x000020BC + (a)*0x00000800) +#define NVCC7D_HEAD_SET_CM3D1CONTROL_MODE 0:0 +#define NVCC7D_HEAD_SET_CM3D1CONTROL_MODE_GAMUT_MAPPING (0x00000000) +#define NVCC7D_HEAD_SET_CM3D1CONTROL_MODE_PANEL_CORR (0x00000001) +#define NVCC7D_HEAD_SET_CM3D1CONTROL_ROUND 1:1 +#define NVCC7D_HEAD_SET_CM3D1CONTROL_ROUND_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_CM3D1CONTROL_ROUND_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_CM3D1CONTROL_LAST_ENTRY_ADD1 2:2 +#define NVCC7D_HEAD_SET_CM3D1CONTROL_LAST_ENTRY_ADD1_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_CM3D1CONTROL_LAST_ENTRY_ADD1_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_CM3D1CONTROL_SIZE 3:3 +#define NVCC7D_HEAD_SET_CM3D1CONTROL_SIZE_SIZE_9X9X9 (0x00000000) +#define NVCC7D_HEAD_SET_CM3D1CONTROL_SIZE_SIZE_17X17X17 (0x00000001) +#define NVCC7D_HEAD_SET_CM3D1CONTROL_RANGE_SCALE 8:6 +#define NVCC7D_HEAD_SET_CM3D1CONTROL_CONTENT_OFFSET 22:9 +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HI(a) (0x000020C0 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HI_HERTZ 3:0 +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HI_MAX(a) (0x000020C4 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HI_MAX_HERTZ 3:0 +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE(a,b) (0x000020C8 + (a)*0x00000800 + (b)*0x00000004) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW 15:10 +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN0 (0x00000000) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN1 (0x00000001) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN2 (0x00000002) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN3 (0x00000003) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN4 (0x00000004) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN5 (0x00000005) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN6 (0x00000006) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN7 (0x00000007) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN8 (0x00000008) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN9 (0x00000009) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN10 (0x0000000A) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN11 (0x0000000B) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN12 (0x0000000C) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN13 (0x0000000D) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN14 (0x0000000E) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN15 (0x0000000F) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN16 (0x00000010) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN17 (0x00000011) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN18 (0x00000012) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN19 (0x00000013) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN20 (0x00000014) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN21 (0x00000015) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN22 (0x00000016) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN23 (0x00000017) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN24 (0x00000018) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN25 (0x00000019) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN26 (0x0000001A) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN27 (0x0000001B) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN28 (0x0000001C) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN29 (0x0000001D) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN30 (0x0000001E) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_WIN31 (0x0000001F) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_WINDOW_NONE (0x0000003F) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_INDEX 1:1 +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_ENABLE 0:0 +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_ENABLE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_ENABLE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_LOCATION_OVERRIDE 2:2 +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_LOCATION_OVERRIDE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_LOCATION_OVERRIDE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_LOCATION 9:8 +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_LOCATION_VBLANK (0x00000000) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_LOCATION_VSYNC (0x00000001) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_LOCATION_LINE (0x00000002) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_LINE_ID 30:16 +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_LINE_ID_REVERSED 31:31 +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_LINE_ID_REVERSED_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_LINE_ID_REVERSED_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_HB0OVERRIDE 4:4 +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_HB0OVERRIDE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_HB0OVERRIDE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_HB1OVERRIDE 5:5 +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_HB1OVERRIDE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_HB1OVERRIDE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_HB2OVERRIDE 6:6 +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_HB2OVERRIDE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_HB2OVERRIDE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_HB3OVERRIDE 7:7 +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_HB3OVERRIDE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_INFOFRAME_OVERRIDE_HB3OVERRIDE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_INFOFRAME_HEADER_OVERRIDE(a,b) (0x000020D0 + (a)*0x00000800 + (b)*0x00000004) +#define NVCC7D_HEAD_SET_INFOFRAME_HEADER_OVERRIDE_HB0 7:0 +#define NVCC7D_HEAD_SET_INFOFRAME_HEADER_OVERRIDE_HB1 15:8 +#define NVCC7D_HEAD_SET_INFOFRAME_HEADER_OVERRIDE_HB2 23:16 +#define NVCC7D_HEAD_SET_INFOFRAME_HEADER_OVERRIDE_HB3 31:24 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_HI_RG_REL_SEMAPHORE(a,b) (0x00002110 + (a)*0x00000800 + (b)*0x00000004) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_HI_RG_REL_SEMAPHORE_ADDRESS_HI 31:0 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE(a,b) (0x00002130 + (a)*0x00000800 + (b)*0x00000004) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE_ADDRESS_LO 31:4 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE_TARGET 3:2 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE_TARGET_IOVA (0x00000000) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE_TARGET_PHYSICAL_NVM (0x00000001) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE_TARGET_PHYSICAL_PCI (0x00000002) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE_ENABLE 0:0 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE_ENABLE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE_ENABLE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_HI_CRC(a) (0x00002150 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_HI_CRC_ADDRESS_HI 31:0 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC(a) (0x00002154 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ADDRESS_LO 31:4 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET 3:2 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_IOVA (0x00000000) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_PHYSICAL_NVM (0x00000001) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_PHYSICAL_PCI (0x00000002) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ENABLE 0:0 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ENABLE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ENABLE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_HI_OLUT(a) (0x00002158 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_HI_OLUT_ADDRESS_HI 31:0 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT(a) (0x0000215C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ADDRESS_LO 31:4 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET 3:2 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_IOVA (0x00000000) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_PHYSICAL_NVM (0x00000001) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_PHYSICAL_PCI (0x00000002) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ENABLE 0:0 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ENABLE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ENABLE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_HI_CM3D0(a) (0x00002160 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_HI_CM3D0_ADDRESS_HI 31:0 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CM3D0(a) (0x00002164 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CM3D0_ADDRESS_LO 31:4 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CM3D0_TARGET 3:2 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CM3D0_TARGET_IOVA (0x00000000) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CM3D0_TARGET_PHYSICAL_NVM (0x00000001) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CM3D0_TARGET_PHYSICAL_PCI (0x00000002) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CM3D0_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CM3D0_ENABLE 0:0 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CM3D0_ENABLE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CM3D0_ENABLE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_HI_CM3D1(a) (0x00002168 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_HI_CM3D1_ADDRESS_HI 31:0 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CM3D1(a) (0x0000216C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CM3D1_ADDRESS_LO 31:4 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CM3D1_TARGET 3:2 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CM3D1_TARGET_IOVA (0x00000000) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CM3D1_TARGET_PHYSICAL_NVM (0x00000001) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CM3D1_TARGET_PHYSICAL_PCI (0x00000002) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CM3D1_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CM3D1_ENABLE 0:0 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CM3D1_ENABLE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CM3D1_ENABLE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_HI_CURSOR(a,b) (0x00002170 + (a)*0x00000800 + (b)*0x00000004) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_HI_CURSOR_ADDRESS_HI 31:0 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR(a,b) (0x00002178 + (a)*0x00000800 + (b)*0x00000004) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ADDRESS_LO 31:4 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET 3:2 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_IOVA (0x00000000) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_PHYSICAL_NVM (0x00000001) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_PHYSICAL_PCI (0x00000002) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_KIND 1:1 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_KIND_PITCH (0x00000000) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_KIND_BLOCKLINEAR (0x00000001) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ENABLE 0:0 +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ENABLE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ENABLE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_CRC_CONTROL(a) (0x00002184 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 5:0 +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_0 (0x00000000) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_1 (0x00000001) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_2 (0x00000002) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_3 (0x00000003) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_4 (0x00000004) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_5 (0x00000005) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_6 (0x00000006) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_7 (0x00000007) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_8 (0x00000008) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_9 (0x00000009) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_10 (0x0000000A) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_11 (0x0000000B) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_12 (0x0000000C) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_13 (0x0000000D) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_14 (0x0000000E) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_15 (0x0000000F) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_16 (0x00000010) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_17 (0x00000011) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_18 (0x00000012) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_19 (0x00000013) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_20 (0x00000014) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_21 (0x00000015) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_22 (0x00000016) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_23 (0x00000017) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_24 (0x00000018) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_25 (0x00000019) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_26 (0x0000001A) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_27 (0x0000001B) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_28 (0x0000001C) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_29 (0x0000001D) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_30 (0x0000001E) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_31 (0x0000001F) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000020) +#define NVCC7D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 8:8 +#define NVCC7D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NVCC7D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NVCC7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC 19:12 +#define NVCC7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_NONE (0x00000000) +#define NVCC7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF (0x00000030) +#define NVCC7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVCC7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR__SIZE_1 8 +#define NVCC7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR0 (0x00000050) +#define NVCC7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR1 (0x00000051) +#define NVCC7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR2 (0x00000052) +#define NVCC7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR3 (0x00000053) +#define NVCC7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR4 (0x00000054) +#define NVCC7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR5 (0x00000055) +#define NVCC7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR6 (0x00000056) +#define NVCC7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR7 (0x00000057) +#define NVCC7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC 27:20 +#define NVCC7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_NONE (0x00000000) +#define NVCC7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SF (0x00000030) +#define NVCC7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVCC7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR__SIZE_1 8 +#define NVCC7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR0 (0x00000050) +#define NVCC7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR1 (0x00000051) +#define NVCC7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR2 (0x00000052) +#define NVCC7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR3 (0x00000053) +#define NVCC7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR4 (0x00000054) +#define NVCC7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR5 (0x00000055) +#define NVCC7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR6 (0x00000056) +#define NVCC7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR7 (0x00000057) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 9:9 +#define NVCC7D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_PRESENT_CONTROL(a) (0x0000218C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 0:0 +#define NVCC7D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NVCC7D_HEAD_SET_SW_SPARE_A(a) (0x00002194 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NVCC7D_HEAD_SET_SW_SPARE_B(a) (0x00002198 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NVCC7D_HEAD_SET_SW_SPARE_C(a) (0x0000219C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NVCC7D_HEAD_SET_SW_SPARE_D(a) (0x000021A0 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NVCC7D_HEAD_SET_DISPLAY_RATE(a) (0x000021A8 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DISPLAY_RATE_RUN_MODE 1:0 +#define NVCC7D_HEAD_SET_DISPLAY_RATE_RUN_MODE_CONTINUOUS (0x00000000) +#define NVCC7D_HEAD_SET_DISPLAY_RATE_RUN_MODE_ONE_SHOT (0x00000001) +#define NVCC7D_HEAD_SET_DISPLAY_RATE_RUN_MODE_ONE_SHOT_SELF_REFRESH (0x00000002) +#define NVCC7D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_INTERVAL 25:4 +#define NVCC7D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH 2:2 +#define NVCC7D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL(a,b) (0x000021CC + (a)*0x00000800 + (b)*0x00000004) +#define NVCC7D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_PAYLOAD_SIZE 15:15 +#define NVCC7D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_32BIT (0x00000000) +#define NVCC7D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_64BIT (0x00000001) +#define NVCC7D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_REL_MODE 14:14 +#define NVCC7D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_REL_MODE_WRITE (0x00000000) +#define NVCC7D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_REL_MODE_WRITE_AWAKEN (0x00000001) +#define NVCC7D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RUN_MODE 10:10 +#define NVCC7D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RUN_MODE_ONE_TIME (0x00000000) +#define NVCC7D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RUN_MODE_CONTINUOUS (0x00000001) +#define NVCC7D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RASTER_LINE 31:16 +#define NVCC7D_HEAD_SET_RG_REL_SEMAPHORE_VALUE(a,b) (0x000021EC + (a)*0x00000800 + (b)*0x00000004) +#define NVCC7D_HEAD_SET_RG_REL_SEMAPHORE_VALUE_VALUE 31:0 +#define NVCC7D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE(a) (0x00002214 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE_DATA 9:0 +#define NVCC7D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE_INDEX 19:12 +#define NVCC7D_HEAD_SET_MIN_FRAME_IDLE(a) (0x00002218 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_MIN_FRAME_IDLE_LEADING_RASTER_LINES 14:0 +#define NVCC7D_HEAD_SET_MIN_FRAME_IDLE_TRAILING_RASTER_LINES 30:16 +#define NVCC7D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED(a) (0x00002220 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED_ALPHA 7:0 +#define NVCC7D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED_RED 31:16 +#define NVCC7D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE(a) (0x00002224 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE_GREEN 15:0 +#define NVCC7D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE_BLUE 31:16 +#define NVCC7D_HEAD_SET_CURSOR_COLOR_NORM_SCALE(a) (0x00002228 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_CURSOR_COLOR_NORM_SCALE_VALUE 15:0 +#define NVCC7D_HEAD_SET_XOR_BLEND_FACTOR(a) (0x0000222C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_XOR_BLEND_FACTOR_LOG2PEAK_LUMINANCE 3:0 +#define NVCC7D_HEAD_SET_XOR_BLEND_FACTOR_S1 16:4 +#define NVCC7D_HEAD_SET_XOR_BLEND_FACTOR_S2 30:18 +#define NVCC7D_HEAD_SET_CLAMP_RANGE_GREEN(a) (0x00002238 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_CLAMP_RANGE_GREEN_LOW 11:0 +#define NVCC7D_HEAD_SET_CLAMP_RANGE_GREEN_HIGH 27:16 +#define NVCC7D_HEAD_SET_CLAMP_RANGE_RED_BLUE(a) (0x0000223C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_CLAMP_RANGE_RED_BLUE_LOW 11:0 +#define NVCC7D_HEAD_SET_CLAMP_RANGE_RED_BLUE_HIGH 27:16 +#define NVCC7D_HEAD_SET_OCSC0CONTROL(a) (0x00002240 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC0CONTROL_ENABLE 0:0 +#define NVCC7D_HEAD_SET_OCSC0CONTROL_ENABLE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_OCSC0CONTROL_ENABLE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C00(a) (0x00002244 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C00_VALUE 20:0 +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C01(a) (0x00002248 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C01_VALUE 20:0 +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C02(a) (0x0000224C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C02_VALUE 20:0 +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C03(a) (0x00002250 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C03_VALUE 20:0 +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C10(a) (0x00002254 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C10_VALUE 20:0 +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C11(a) (0x00002258 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C11_VALUE 20:0 +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C12(a) (0x0000225C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C12_VALUE 20:0 +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C13(a) (0x00002260 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C13_VALUE 20:0 +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C20(a) (0x00002264 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C20_VALUE 20:0 +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C21(a) (0x00002268 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C21_VALUE 20:0 +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C22(a) (0x0000226C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C22_VALUE 20:0 +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C23(a) (0x00002270 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC0COEFFICIENT_C23_VALUE 20:0 +#define NVCC7D_HEAD_SET_OLUT_CONTROL(a) (0x00002280 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OLUT_CONTROL_INTERPOLATE 0:0 +#define NVCC7D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_OLUT_CONTROL_MIRROR 1:1 +#define NVCC7D_HEAD_SET_OLUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_OLUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_OLUT_CONTROL_MODE 3:2 +#define NVCC7D_HEAD_SET_OLUT_CONTROL_MODE_SEGMENTED (0x00000000) +#define NVCC7D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT8 (0x00000001) +#define NVCC7D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT10 (0x00000002) +#define NVCC7D_HEAD_SET_OLUT_CONTROL_SIZE 18:8 +#define NVCC7D_HEAD_SET_OLUT_CONTROL_DIRECT_ROUND 4:4 +#define NVCC7D_HEAD_SET_OLUT_CONTROL_DIRECT_ROUND_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_OLUT_CONTROL_DIRECT_ROUND_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_OLUT_CONTROL_LEVEL 25:20 +#define NVCC7D_HEAD_SET_OLUT_CONTROL_SEGMENT_SIZE_BITS 5:5 +#define NVCC7D_HEAD_SET_OLUT_CONTROL_SEGMENT_SIZE_BITS_SIZE_3BITS (0x00000000) +#define NVCC7D_HEAD_SET_OLUT_CONTROL_SEGMENT_SIZE_BITS_SIZE_4BITS (0x00000001) +#define NVCC7D_HEAD_SET_OLUT_FP_NORM_SCALE(a) (0x00002284 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OLUT_FP_NORM_SCALE_VALUE 31:0 +#define NVCC7D_HEAD_SET_OCSC1CONTROL(a) (0x0000229C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC1CONTROL_ENABLE 0:0 +#define NVCC7D_HEAD_SET_OCSC1CONTROL_ENABLE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_OCSC1CONTROL_ENABLE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C00(a) (0x000022A0 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C00_VALUE 20:0 +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C01(a) (0x000022A4 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C01_VALUE 20:0 +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C02(a) (0x000022A8 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C02_VALUE 20:0 +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C03(a) (0x000022AC + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C03_VALUE 20:0 +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C10(a) (0x000022B0 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C10_VALUE 20:0 +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C11(a) (0x000022B4 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C11_VALUE 20:0 +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C12(a) (0x000022B8 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C12_VALUE 20:0 +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C13(a) (0x000022BC + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C13_VALUE 20:0 +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C20(a) (0x000022C0 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C20_VALUE 20:0 +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C21(a) (0x000022C4 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C21_VALUE 20:0 +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C22(a) (0x000022C8 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C22_VALUE 20:0 +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C23(a) (0x000022CC + (a)*0x00000800) +#define NVCC7D_HEAD_SET_OCSC1COEFFICIENT_C23_VALUE 20:0 +#define NVCC7D_HEAD_SET_HEAD_POSITION(a) (0x000022D0 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_HEAD_POSITION_X 2:0 +#define NVCC7D_HEAD_SET_HEAD_POSITION_Y 6:4 +#define NVCC7D_HEAD_SET_DSC_CONTROL(a) (0x000022D4 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_CONTROL_ENABLE 0:0 +#define NVCC7D_HEAD_SET_DSC_CONTROL_ENABLE_FALSE (0x00000000) +#define NVCC7D_HEAD_SET_DSC_CONTROL_ENABLE_TRUE (0x00000001) +#define NVCC7D_HEAD_SET_DSC_CONTROL_AUTO_RESET 3:3 +#define NVCC7D_HEAD_SET_DSC_CONTROL_AUTO_RESET_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_DSC_CONTROL_AUTO_RESET_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION 4:4 +#define NVCC7D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET 5:5 +#define NVCC7D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET_FALSE (0x00000000) +#define NVCC7D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET_TRUE (0x00000001) +#define NVCC7D_HEAD_SET_DSC_CONTROL_FLATNESS_DET_THRESH 15:6 +#define NVCC7D_HEAD_SET_DSC_PPS_CONTROL(a) (0x000022D8 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_CONTROL_ENABLE 0:0 +#define NVCC7D_HEAD_SET_DSC_PPS_CONTROL_ENABLE_FALSE (0x00000000) +#define NVCC7D_HEAD_SET_DSC_PPS_CONTROL_ENABLE_TRUE (0x00000001) +#define NVCC7D_HEAD_SET_DSC_PPS_CONTROL_LOCATION 2:1 +#define NVCC7D_HEAD_SET_DSC_PPS_CONTROL_LOCATION_VBLANK (0x00000000) +#define NVCC7D_HEAD_SET_DSC_PPS_CONTROL_LOCATION_VSYNC (0x00000001) +#define NVCC7D_HEAD_SET_DSC_PPS_CONTROL_LOCATION_LINE (0x00000002) +#define NVCC7D_HEAD_SET_DSC_PPS_CONTROL_SIZE 10:3 +#define NVCC7D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY 11:11 +#define NVCC7D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY_EVERY_FRAME (0x00000000) +#define NVCC7D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY_ONCE (0x00000001) +#define NVCC7D_HEAD_SET_DSC_PPS_CONTROL_LINE_ID 26:12 +#define NVCC7D_HEAD_SET_DSC_PPS_CONTROL_LINE_ID_REVERSED 27:27 +#define NVCC7D_HEAD_SET_DSC_PPS_CONTROL_LINE_ID_REVERSED_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_DSC_PPS_CONTROL_LINE_ID_REVERSED_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_DSC_PPS_HEAD(a) (0x000022DC + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_HEAD_BYTE0 7:0 +#define NVCC7D_HEAD_SET_DSC_PPS_HEAD_BYTE1 15:8 +#define NVCC7D_HEAD_SET_DSC_PPS_HEAD_BYTE2 23:16 +#define NVCC7D_HEAD_SET_DSC_PPS_HEAD_BYTE3 31:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA0(a) (0x000022E0 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA0_DSC_VERSION_MINOR 3:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA0_DSC_VERSION_MAJOR 7:4 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA0_PPS_IDENTIFIER 15:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA0_RESERVED 23:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA0_LINEBUF_DEPTH 27:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA0_BITS_PER_COMPONENT 31:28 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA1(a) (0x000022E4 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA1_BITS_PER_PIXEL_HIGH 1:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA1_VBR_ENABLE 2:2 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA1_SIMPLE422 3:3 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA1_CONVERT_RGB 4:4 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA1_BLOCK_PRED_ENABLE 5:5 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA1_RESERVED 7:6 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA1_BITS_PER_PIXEL_LOW 15:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA1_PIC_HEIGHT_HIGH 23:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA1_PIC_HEIGHT_LOW 31:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA2(a) (0x000022E8 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA2_PIC_WIDTH_HIGH 7:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA2_PIC_WIDTH_LOW 15:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA2_SLICE_HEIGHT_HIGH 23:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA2_SLICE_HEIGHT_LOW 31:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA3(a) (0x000022EC + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA3_SLICE_WIDTH_HIGH 7:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA3_SLICE_WIDTH_LOW 15:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA3_CHUNK_SIZE_HIGH 23:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA3_CHUNK_SIZE_LOW 31:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA4(a) (0x000022F0 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA4_INITIAL_XMIT_DELAY_HIGH 1:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA4_RESERVED 7:2 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA4_INITIAL_XMIT_DELAY_LOW 15:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA4_INITIAL_DEC_DELAY_HIGH 23:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA4_INITIAL_DEC_DELAY_LOW 31:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA5(a) (0x000022F4 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA5_RESERVED0 7:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA5_INITIAL_SCALE_VALUE 13:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA5_RESERVED1 15:14 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA5_SCALE_INCREMENT_INTERVAL_HIGH 23:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA5_SCALE_INCREMENT_INTERVAL_LOW 31:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA6(a) (0x000022F8 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA6_SCALE_DECREMENT_INTERVAL_HIGH 3:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA6_RESERVED0 7:4 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA6_SCALE_DECREMENT_INTERVAL_LOW 15:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA6_RESERVED1 23:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA6_FIRST_LINE_BPG_OFFSET 28:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA6_RESERVED2 31:29 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA7(a) (0x000022FC + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA7_NFL_BPG_OFFSET_HIGH 7:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA7_NFL_BPG_OFFSET_LOW 15:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA7_SLICE_BPG_OFFSET_HIGH 23:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA7_SLICE_BPG_OFFSET_LOW 31:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA8(a) (0x00002300 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA8_INITIAL_OFFSET_HIGH 7:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA8_INITIAL_OFFSET_LOW 15:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA8_FINAL_OFFSET_HIGH 23:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA8_FINAL_OFFSET_LOW 31:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA9(a) (0x00002304 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA9_FLATNESS_MIN_QP 4:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA9_RESERVED0 7:5 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA9_FLATNESS_MAX_QP 12:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA9_RESERVED1 15:13 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA9_RC_MODEL_SIZE_HIGH 23:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA9_RC_MODEL_SIZE_LOW 31:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA10(a) (0x00002308 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA10_RC_EDGE_FACTOR 3:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA10_RESERVED0 7:4 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA10_RC_QUANT_INCR_LIMIT0 12:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA10_RESERVED1 15:13 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA10_RC_QUANT_INCR_LIMIT1 20:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA10_RESERVED2 23:21 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA10_RC_TGT_OFFSET_LO 27:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA10_RC_TGT_OFFSET_HI 31:28 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA11(a) (0x0000230C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH0 7:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH1 15:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH2 23:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH3 31:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA12(a) (0x00002310 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH4 7:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH5 15:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH6 23:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH7 31:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA13(a) (0x00002314 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH8 7:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH9 15:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH10 23:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH11 31:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA14(a) (0x00002318 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA14_RC_BUF_THRESH12 7:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA14_RC_BUF_THRESH13 15:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MAX_QP_HIGH0 18:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MIN_QP0 23:19 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_BPG_OFFSET0 29:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MAX_QP_LOW0 31:30 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA15(a) (0x0000231C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_HIGH1 2:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MIN_QP1 7:3 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_BPG_OFFSET1 13:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_LOW1 15:14 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_HIGH2 18:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MIN_QP2 23:19 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_BPG_OFFSET2 29:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_LOW2 31:30 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA16(a) (0x00002320 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_HIGH3 2:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MIN_QP3 7:3 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_BPG_OFFSET3 13:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_LOW3 15:14 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_HIGH4 18:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MIN_QP4 23:19 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_BPG_OFFSET4 29:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_LOW4 31:30 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA17(a) (0x00002324 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_HIGH5 2:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MIN_QP5 7:3 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_BPG_OFFSET5 13:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_LOW5 15:14 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_HIGH6 18:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MIN_QP6 23:19 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_BPG_OFFSET6 29:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_LOW6 31:30 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA18(a) (0x00002328 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_HIGH7 2:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MIN_QP7 7:3 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_BPG_OFFSET7 13:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_LOW7 15:14 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_HIGH8 18:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MIN_QP8 23:19 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_BPG_OFFSET8 29:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_LOW8 31:30 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA19(a) (0x0000232C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_HIGH9 2:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MIN_QP9 7:3 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_BPG_OFFSET9 13:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_LOW9 15:14 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_HIGH10 18:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MIN_QP10 23:19 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_BPG_OFFSET10 29:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_LOW10 31:30 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA20(a) (0x00002330 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_HIGH11 2:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MIN_QP11 7:3 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_BPG_OFFSET11 13:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_LOW11 15:14 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_HIGH12 18:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MIN_QP12 23:19 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_BPG_OFFSET12 29:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_LOW12 31:30 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA21(a) (0x00002334 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_HIGH13 2:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MIN_QP13 7:3 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_BPG_OFFSET13 13:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_LOW13 15:14 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_HIGH14 18:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MIN_QP14 23:19 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_BPG_OFFSET14 29:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_LOW14 31:30 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA22(a) (0x00002338 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA22_NATIVE422 0:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA22_NATIVE420 1:1 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA22_RESERVED0 7:2 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA22_SECOND_LINE_BPG_OFFSET 12:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA22_RESERVED1 15:13 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA22_NSL_BPG_OFFSET_HIGH 23:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA22_NSL_BPG_OFFSETLOW 31:24 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA23(a) (0x0000233C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA23_SECOND_LINE_OFFSET_ADJ_HIGH 7:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA23_SECOND_LINE_OFFSET_ADJ_LOW 15:8 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA23_RESERVED 31:16 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA24(a) (0x00002340 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA24_RESERVED 31:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA25(a) (0x00002344 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA25_RESERVED 31:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA26(a) (0x00002348 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA26_RESERVED 31:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA27(a) (0x0000234C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA27_RESERVED 31:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA28(a) (0x00002350 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA28_RESERVED 31:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA29(a) (0x00002354 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA29_RESERVED 31:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA30(a) (0x00002358 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA30_RESERVED 31:0 +#define NVCC7D_HEAD_SET_DSC_PPS_DATA31(a) (0x0000235C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DSC_PPS_DATA31_RESERVED 31:0 +#define NVCC7D_HEAD_SET_RASTER_HBLANK_DELAY(a) (0x00002364 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_RASTER_HBLANK_DELAY_BLANK_START 15:0 +#define NVCC7D_HEAD_SET_RASTER_HBLANK_DELAY_BLANK_END 31:16 +#define NVCC7D_HEAD_SET_HDMI_DSC_HCACTIVE(a) (0x00002368 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_HDMI_DSC_HCACTIVE_BYTES 15:0 +#define NVCC7D_HEAD_SET_HDMI_DSC_HCACTIVE_TRI_BYTES 31:16 +#define NVCC7D_HEAD_SET_HDMI_DSC_HCBLANK(a) (0x0000236C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_HDMI_DSC_HCBLANK_WIDTH 15:0 +#define NVCC7D_HEAD_SW_RESERVED(a,b) (0x00002370 + (a)*0x00000800 + (b)*0x00000004) +#define NVCC7D_HEAD_SW_RESERVED_VALUE 31:0 +#define NVCC7D_HEAD_SET_DP_CTRL(a) (0x0000237C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DP_CTRL_EFFECTIVE_BITS_PER_PIXEL_ADJ 9:0 +#define NVCC7D_HEAD_SET_RG_REL_SEMAPHORE_VALUE_HI(a,b) (0x00002380 + (a)*0x00000800 + (b)*0x00000004) +#define NVCC7D_HEAD_SET_RG_REL_SEMAPHORE_VALUE_HI_VALUE 31:0 +#define NVCC7D_HEAD_SET_HEAD_INFOFRAME(a) (0x000023A0 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_HEAD_INFOFRAME_FID 7:0 +#define NVCC7D_HEAD_SET_HEAD_INFOFRAME_EN 16:16 +#define NVCC7D_HEAD_SET_HEAD_INFOFRAME_EN_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_HEAD_INFOFRAME_EN_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_INFOFRAME_CTRL(a) (0x000023A4 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_INFOFRAME_CTRL_MODE 1:0 +#define NVCC7D_HEAD_SET_INFOFRAME_CTRL_MODE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_INFOFRAME_CTRL_MODE_EMP_VTEM_QMS (0x00000001) +#define NVCC7D_HEAD_SET_INFOFRAME_CTRL_MODE_EMP_OTHERS (0x00000002) +#define NVCC7D_HEAD_SET_INFOFRAME_CTRL_FREQUENCY 12:12 +#define NVCC7D_HEAD_SET_INFOFRAME_CTRL_FREQUENCY_EVERY_FRAME (0x00000000) +#define NVCC7D_HEAD_SET_INFOFRAME_CTRL_FREQUENCY_ONCE (0x00000001) +#define NVCC7D_HEAD_SET_INFOFRAME_CTRL_LOCATION 5:4 +#define NVCC7D_HEAD_SET_INFOFRAME_CTRL_LOCATION_VBLANK (0x00000000) +#define NVCC7D_HEAD_SET_INFOFRAME_CTRL_LOCATION_VSYNC (0x00000001) +#define NVCC7D_HEAD_SET_INFOFRAME_CTRL_LOCATION_LINE (0x00000002) +#define NVCC7D_HEAD_SET_INFOFRAME_CTRL_LINE_ID 30:16 +#define NVCC7D_HEAD_SET_INFOFRAME_CTRL_LINE_ID_REVERSED 31:31 +#define NVCC7D_HEAD_SET_INFOFRAME_CTRL_LINE_ID_REVERSED_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_INFOFRAME_CTRL_LINE_ID_REVERSED_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_INFOFRAME_CTRL_MTD_STATE_CTRL 14:14 +#define NVCC7D_HEAD_SET_INFOFRAME_CTRL_MTD_STATE_CTRL_ACTIVE (0x00000000) +#define NVCC7D_HEAD_SET_INFOFRAME_CTRL_MTD_STATE_CTRL_ARM (0x00000001) +#define NVCC7D_HEAD_SET_INFOFRAME_HEADER(a) (0x000023A8 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_INFOFRAME_HEADER_HB0 7:0 +#define NVCC7D_HEAD_SET_INFOFRAME_HEADER_HB1 15:8 +#define NVCC7D_HEAD_SET_INFOFRAME_HEADER_HB2 23:16 +#define NVCC7D_HEAD_SET_INFOFRAME_HEADER_HB3 31:24 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA0(a) (0x000023AC + (a)*0x00000800) +#define NVCC7D_HEAD_SET_INFOFRAME_DATA0_DB0 7:0 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA0_DB1 15:8 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA0_DB2 23:16 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA0_DB3 31:24 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA1(a) (0x000023B0 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_INFOFRAME_DATA1_DB4 7:0 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA1_DB5 15:8 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA1_DB6 23:16 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA1_DB7 31:24 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA2(a) (0x000023B4 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_INFOFRAME_DATA2_DB8 7:0 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA2_DB9 15:8 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA2_DB10 23:16 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA2_DB11 31:24 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA3(a) (0x000023B8 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_INFOFRAME_DATA3_DB12 7:0 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA3_DB13 15:8 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA3_DB14 23:16 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA3_DB15 31:24 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA4(a) (0x000023BC + (a)*0x00000800) +#define NVCC7D_HEAD_SET_INFOFRAME_DATA4_DB16 7:0 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA4_DB17 15:8 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA4_DB18 23:16 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA4_DB19 31:24 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA5(a) (0x000023C0 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_INFOFRAME_DATA5_DB20 7:0 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA5_DB21 15:8 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA5_DB22 23:16 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA5_DB23 31:24 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA6(a) (0x000023C4 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_INFOFRAME_DATA6_DB24 7:0 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA6_DB25 15:8 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA6_DB26 23:16 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA6_DB27 31:24 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA7(a) (0x000023C8 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_INFOFRAME_DATA7_DB28 7:0 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA7_DB29 15:8 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA7_DB30 23:16 +#define NVCC7D_HEAD_SET_INFOFRAME_DATA7_DB31 31:24 +#define NVCC7D_HEAD_SET_RASTER_SIZE_FRACTION(a) (0x000023CC + (a)*0x00000800) +#define NVCC7D_HEAD_SET_RASTER_SIZE_FRACTION_HEIGHT 15:0 +#define NVCC7D_HEAD_SET_LTM_CONTROL(a) (0x000023D0 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_LTM_CONTROL_ENABLE 0:0 +#define NVCC7D_HEAD_SET_LTM_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_LTM_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_LTM_CONTROL_MODE 1:1 +#define NVCC7D_HEAD_SET_LTM_CONTROL_MODE_TMO (0x00000000) +#define NVCC7D_HEAD_SET_LTM_CONTROL_MODE_LTM (0x00000001) +#define NVCC7D_HEAD_SET_LTM_CONTROL_TONE_SELECT 2:2 +#define NVCC7D_HEAD_SET_LTM_CONTROL_TONE_SELECT_INTENSITY (0x00000000) +#define NVCC7D_HEAD_SET_LTM_CONTROL_TONE_SELECT_CDB (0x00000001) +#define NVCC7D_HEAD_SET_LTM_CONTROL_SKIN_TONE_ENABLE 3:3 +#define NVCC7D_HEAD_SET_LTM_CONTROL_SKIN_TONE_ENABLE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_LTM_CONTROL_SKIN_TONE_ENABLE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_LTM_CONTROL_LUT_INTERPOLATE 4:4 +#define NVCC7D_HEAD_SET_LTM_CONTROL_LUT_INTERPOLATE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_LTM_CONTROL_LUT_INTERPOLATE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_LTM_CONTROL_LUT_DIRECT8 5:5 +#define NVCC7D_HEAD_SET_LTM_CONTROL_LUT_DIRECT8_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_LTM_CONTROL_LUT_DIRECT8_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_LTM_CONTROL_LUT_DIRECT10 6:6 +#define NVCC7D_HEAD_SET_LTM_CONTROL_LUT_DIRECT10_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_LTM_CONTROL_LUT_DIRECT10_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_LTM_CONTROL_LUT_DIRECT_ROUND 7:7 +#define NVCC7D_HEAD_SET_LTM_CONTROL_LUT_DIRECT_ROUND_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_LTM_CONTROL_LUT_DIRECT_ROUND_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_LTM_CONTROL_HIST_SOURCE 9:8 +#define NVCC7D_HEAD_SET_LTM_CONTROL_HIST_SOURCE_MAX_RGB (0x00000000) +#define NVCC7D_HEAD_SET_LTM_CONTROL_HIST_SOURCE_LIN_I (0x00000001) +#define NVCC7D_HEAD_SET_LTM_CONTROL_HIST_SOURCE_PQI (0x00000002) +#define NVCC7D_HEAD_SET_LTM_CONTROL_HIST_SOURCE_CDB (0x00000003) +#define NVCC7D_HEAD_SET_LTM_CONTROL_HIST_RANGE 11:10 +#define NVCC7D_HEAD_SET_LTM_CONTROL_HIST_RANGE_RANGE_10000 (0x00000000) +#define NVCC7D_HEAD_SET_LTM_CONTROL_HIST_RANGE_RANGE_5000 (0x00000001) +#define NVCC7D_HEAD_SET_LTM_CONTROL_HIST_RANGE_RANGE_2500 (0x00000002) +#define NVCC7D_HEAD_SET_LTM_CONTROL_HIST_RANGE_RANGE_1250 (0x00000003) +#define NVCC7D_HEAD_SET_LTM_CONTROL_HIST_END_LINE 31:16 +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C00(a) (0x000023D4 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C00_VALUE 20:0 +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C01(a) (0x000023D8 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C01_VALUE 20:0 +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C02(a) (0x000023DC + (a)*0x00000800) +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C02_VALUE 20:0 +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C03(a) (0x000023E0 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C03_VALUE 20:0 +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C10(a) (0x000023E4 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C10_VALUE 20:0 +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C11(a) (0x000023E8 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C11_VALUE 20:0 +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C12(a) (0x000023EC + (a)*0x00000800) +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C12_VALUE 20:0 +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C13(a) (0x000023F0 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C13_VALUE 20:0 +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C20(a) (0x000023F4 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C20_VALUE 20:0 +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C21(a) (0x000023F8 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C21_VALUE 20:0 +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C22(a) (0x000023FC + (a)*0x00000800) +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C22_VALUE 20:0 +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C23(a) (0x00002400 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_RGB2ITP_COEFF_C23_VALUE 20:0 +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C00(a) (0x00002404 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C00_VALUE 20:0 +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C01(a) (0x00002408 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C01_VALUE 20:0 +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C02(a) (0x0000240C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C02_VALUE 20:0 +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C03(a) (0x00002410 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C03_VALUE 20:0 +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C10(a) (0x00002414 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C10_VALUE 20:0 +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C11(a) (0x00002418 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C11_VALUE 20:0 +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C12(a) (0x0000241C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C12_VALUE 20:0 +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C13(a) (0x00002420 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C13_VALUE 20:0 +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C20(a) (0x00002424 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C20_VALUE 20:0 +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C21(a) (0x00002428 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C21_VALUE 20:0 +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C22(a) (0x0000242C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C22_VALUE 20:0 +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C23(a) (0x00002430 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_ITP2RGB_COEFF_C23_VALUE 20:0 +#define NVCC7D_HEAD_SET_LTM_MAX_SELECTIVE_UPDATE(a) (0x00002434 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_LTM_MAX_SELECTIVE_UPDATE_SIZE 31:0 +#define NVCC7D_HEAD_SET_SELF_REFRESH_OVERRIDE(a) (0x00002438 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_SELF_REFRESH_OVERRIDE_FRAME_RELEASE 0:0 +#define NVCC7D_HEAD_SET_SELF_REFRESH_OVERRIDE_FRAME_RELEASE_AUTOMATIC (0x00000000) +#define NVCC7D_HEAD_SET_SELF_REFRESH_OVERRIDE_FRAME_RELEASE_MANUAL (0x00000001) +#define NVCC7D_HEAD_SET_SELECTIVE_UPDATE(a) (0x0000243C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_SELECTIVE_UPDATE_ENABLE 0:0 +#define NVCC7D_HEAD_SET_SELECTIVE_UPDATE_ENABLE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_SELECTIVE_UPDATE_ENABLE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_SELECTIVE_UPDATE_EARLY_REGION 1:1 +#define NVCC7D_HEAD_SET_SELECTIVE_UPDATE_EARLY_REGION_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_SELECTIVE_UPDATE_EARLY_REGION_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_CURSOR_DIRTY(a) (0x00002440 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_CURSOR_DIRTY_STATE 0:0 +#define NVCC7D_HEAD_SET_CURSOR_DIRTY_STATE_FALSE (0x00000000) +#define NVCC7D_HEAD_SET_CURSOR_DIRTY_STATE_TRUE (0x00000001) +#define NVCC7D_HEAD_SET_DPM_CONTROL(a) (0x00002444 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_DPM_CONTROL_ENABLE 0:0 +#define NVCC7D_HEAD_SET_DPM_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_DPM_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_DPM_CONTROL_ALPM 1:1 +#define NVCC7D_HEAD_SET_DPM_CONTROL_ALPM_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_DPM_CONTROL_ALPM_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_DPM_CONTROL_ALPM_TYPE 2:2 +#define NVCC7D_HEAD_SET_DPM_CONTROL_ALPM_TYPE_AUX_LESS (0x00000000) +#define NVCC7D_HEAD_SET_DPM_CONTROL_AS_SDP 3:3 +#define NVCC7D_HEAD_SET_DPM_CONTROL_AS_SDP_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_DPM_CONTROL_AS_SDP_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL(a) (0x00002448 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_ENABLE 0:0 +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_ENABLE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_ENABLE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_SELF_REFRESH 1:1 +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_SELF_REFRESH_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_SELF_REFRESH_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_MODE 3:2 +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_MODE_PSR1 (0x00000000) +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_MODE_PANEL_REPLAY (0x00000002) +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_CRC 4:4 +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_CRC_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_CRC_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_LOCATION 7:6 +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_LOCATION_VBLANK (0x00000000) +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_LOCATION_VSYNC (0x00000001) +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_LOCATION_LINE (0x00000002) +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_LINE_ID 22:8 +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_CAPTURE_INDICATOR 23:23 +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_CAPTURE_INDICATOR_SAME (0x00000000) +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_CAPTURE_INDICATOR_EARLY (0x00000001) +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_IDLE_SDP 24:24 +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_IDLE_SDP_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_IDLE_SDP_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_OVERRIDE_SU_VSC 25:25 +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_OVERRIDE_SU_VSC_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_VSC_SDP_CTRL_OVERRIDE_SU_VSC_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_VSC_SDP_HEADER(a) (0x0000244C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_VSC_SDP_HEADER_HEADER_BYTE 31:0 +#define NVCC7D_HEAD_SET_VSC_SDP_DATA0(a) (0x00002450 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_VSC_SDP_DATA0_DATA_BYTE 31:0 +#define NVCC7D_HEAD_SET_VSC_SDP_DATA1(a) (0x00002454 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_VSC_SDP_DATA1_DATA_BYTE 31:0 +#define NVCC7D_HEAD_SET_VSC_SDP_DATA2(a) (0x00002458 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_VSC_SDP_DATA2_DATA_BYTE 31:0 +#define NVCC7D_HEAD_SET_VSC_SDP_DATA3(a) (0x0000245C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_VSC_SDP_DATA3_DATA_BYTE 31:0 +#define NVCC7D_HEAD_SET_VSC_SDP_DATA4(a) (0x00002460 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_VSC_SDP_DATA4_DATA_BYTE 31:0 +#define NVCC7D_HEAD_SET_VSC_SDP_DATA5(a) (0x00002464 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_VSC_SDP_DATA5_DATA_BYTE 31:0 +#define NVCC7D_HEAD_SET_VSC_SDP_DATA6(a) (0x00002468 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_VSC_SDP_DATA6_DATA_BYTE 31:0 +#define NVCC7D_HEAD_SET_VSC_SDP_DATA7(a) (0x0000246C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_VSC_SDP_DATA7_DATA_BYTE 31:0 +#define NVCC7D_HEAD_SET_RGB2ITP_CONTROL(a) (0x00002470 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_RGB2ITP_CONTROL_ENABLE_INPUT_MATRIX 0:0 +#define NVCC7D_HEAD_SET_RGB2ITP_CONTROL_ENABLE_INPUT_MATRIX_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_RGB2ITP_CONTROL_ENABLE_INPUT_MATRIX_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_RGB2ITP_CONTROL_ENABLE_FVLUT 1:1 +#define NVCC7D_HEAD_SET_RGB2ITP_CONTROL_ENABLE_FVLUT_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_RGB2ITP_CONTROL_ENABLE_FVLUT_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_RGB2ITP_CONTROL_FVLUT_INTERPOLATE 4:4 +#define NVCC7D_HEAD_SET_RGB2ITP_CONTROL_FVLUT_INTERPOLATE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_RGB2ITP_CONTROL_FVLUT_INTERPOLATE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_RGB2ITP_CONTROL_FVLUT_MIRROR 5:5 +#define NVCC7D_HEAD_SET_RGB2ITP_CONTROL_FVLUT_MIRROR_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_RGB2ITP_CONTROL_FVLUT_MIRROR_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_RGB2ITP_CONTROL_FVLUT_CURVE 7:6 +#define NVCC7D_HEAD_SET_RGB2ITP_CONTROL_FVLUT_CURVE_CSCLUT_PQ (0x00000000) +#define NVCC7D_HEAD_SET_RGB2ITP_CONTROL_FVLUT_CURVE_CSCLUT_HLG (0x00000001) +#define NVCC7D_HEAD_SET_RGB2ITP_CONTROL_FVLUT_CURVE_CSCLUT_DIRECT (0x00000002) +#define NVCC7D_HEAD_SET_RGB2ITP_CONTROL_ENABLE_OUTPUT_MATRIX 2:2 +#define NVCC7D_HEAD_SET_RGB2ITP_CONTROL_ENABLE_OUTPUT_MATRIX_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_RGB2ITP_CONTROL_ENABLE_OUTPUT_MATRIX_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_RGB2ITP_LUT_FP_NORM_SCALE(a) (0x00002474 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_RGB2ITP_LUT_FP_NORM_SCALE_VALUE 31:0 +#define NVCC7D_HEAD_SET_ITP2RGB_CONTROL(a) (0x00002478 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_ITP2RGB_CONTROL_ENABLE_INPUT_MATRIX 0:0 +#define NVCC7D_HEAD_SET_ITP2RGB_CONTROL_ENABLE_INPUT_MATRIX_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_ITP2RGB_CONTROL_ENABLE_INPUT_MATRIX_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_ITP2RGB_CONTROL_ENABLE_FVLUT 1:1 +#define NVCC7D_HEAD_SET_ITP2RGB_CONTROL_ENABLE_FVLUT_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_ITP2RGB_CONTROL_ENABLE_FVLUT_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_ITP2RGB_CONTROL_FVLUT_INTERPOLATE 4:4 +#define NVCC7D_HEAD_SET_ITP2RGB_CONTROL_FVLUT_INTERPOLATE_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_ITP2RGB_CONTROL_FVLUT_INTERPOLATE_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_ITP2RGB_CONTROL_FVLUT_MIRROR 5:5 +#define NVCC7D_HEAD_SET_ITP2RGB_CONTROL_FVLUT_MIRROR_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_ITP2RGB_CONTROL_FVLUT_MIRROR_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_ITP2RGB_CONTROL_FVLUT_CURVE 7:6 +#define NVCC7D_HEAD_SET_ITP2RGB_CONTROL_FVLUT_CURVE_CSCLUT_PQ (0x00000000) +#define NVCC7D_HEAD_SET_ITP2RGB_CONTROL_FVLUT_CURVE_CSCLUT_HLG (0x00000001) +#define NVCC7D_HEAD_SET_ITP2RGB_CONTROL_FVLUT_CURVE_CSCLUT_DIRECT (0x00000002) +#define NVCC7D_HEAD_SET_ITP2RGB_CONTROL_ENABLE_OUTPUT_MATRIX 2:2 +#define NVCC7D_HEAD_SET_ITP2RGB_CONTROL_ENABLE_OUTPUT_MATRIX_DISABLE (0x00000000) +#define NVCC7D_HEAD_SET_ITP2RGB_CONTROL_ENABLE_OUTPUT_MATRIX_ENABLE (0x00000001) +#define NVCC7D_HEAD_SET_ITP2RGB_LUT_FP_SCALE(a) (0x0000247C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_ITP2RGB_LUT_FP_SCALE_VALUE 15:0 +#define NVCC7D_HEAD_SET_BLANK_OUTPUT_COLOR(a) (0x00002484 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_BLANK_OUTPUT_COLOR_RED_CR 9:0 +#define NVCC7D_HEAD_SET_BLANK_OUTPUT_COLOR_GREEN_Y 19:10 +#define NVCC7D_HEAD_SET_BLANK_OUTPUT_COLOR_BLUE_CB 29:20 +#define NVCC7D_HEAD_SET_SELECTIVE_UPDATE_GRANULARITY(a) (0x0000249C + (a)*0x00000800) +#define NVCC7D_HEAD_SET_SELECTIVE_UPDATE_GRANULARITY_X 15:0 +#define NVCC7D_HEAD_SET_SELECTIVE_UPDATE_GRANULARITY_Y 31:16 +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C00(a) (0x000024A0 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C00_VALUE 20:0 +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C01(a) (0x000024A4 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C01_VALUE 20:0 +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C02(a) (0x000024A8 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C02_VALUE 20:0 +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C03(a) (0x000024AC + (a)*0x00000800) +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C03_VALUE 20:0 +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C10(a) (0x000024B0 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C10_VALUE 20:0 +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C11(a) (0x000024B4 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C11_VALUE 20:0 +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C12(a) (0x000024B8 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C12_VALUE 20:0 +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C13(a) (0x000024BC + (a)*0x00000800) +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C13_VALUE 20:0 +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C20(a) (0x000024C0 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C20_VALUE 20:0 +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C21(a) (0x000024C4 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C21_VALUE 20:0 +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C22(a) (0x000024C8 + (a)*0x00000800) +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C22_VALUE 20:0 +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C23(a) (0x000024CC + (a)*0x00000800) +#define NVCC7D_HEAD_SET_LTM_HIST_COEFF_C23_VALUE 20:0 + +#define NVCC7D_TILE_SET_TILE_SIZE(a) (0x00006000 + (a)*0x00000200) +#define NVCC7D_TILE_SET_TILE_SIZE_START 14:0 +#define NVCC7D_TILE_SET_TILE_SIZE_WIDTH 30:16 +#define NVCC7D_TILE_SET_CMI_LUT_SEGMENT_SIZE(a) (0x00006074 + (a)*0x00000200) +#define NVCC7D_TILE_SET_CMI_LUT_SEGMENT_SIZE_IDX 5:0 +#define NVCC7D_TILE_SET_CMI_LUT_SEGMENT_SIZE_VALUE 8:6 +#define NVCC7D_TILE_SET_CMI_LUT_ENTRY(a) (0x00006078 + (a)*0x00000200) +#define NVCC7D_TILE_SET_CMI_LUT_ENTRY_IDX 10:0 +#define NVCC7D_TILE_SET_CMI_LUT_ENTRY_VALUE 24:11 +#define NVCC7D_TILE_SET_CMI_LUT_ENTRY_LAST 25:25 +#define NVCC7D_TILE_SET_CMI_LUT_ENTRY_LAST_FALSE (0x00000000) +#define NVCC7D_TILE_SET_CMI_LUT_ENTRY_LAST_TRUE (0x00000001) +#define NVCC7D_TILE_SET_CMO_LUT_SEGMENT_SIZE(a) (0x0000607C + (a)*0x00000200) +#define NVCC7D_TILE_SET_CMO_LUT_SEGMENT_SIZE_IDX 5:0 +#define NVCC7D_TILE_SET_CMO_LUT_SEGMENT_SIZE_VALUE 8:6 +#define NVCC7D_TILE_SET_CMO_LUT_ENTRY(a) (0x00006080 + (a)*0x00000200) +#define NVCC7D_TILE_SET_CMO_LUT_ENTRY_IDX 10:0 +#define NVCC7D_TILE_SET_CMO_LUT_ENTRY_VALUE 26:11 +#define NVCC7D_TILE_SET_CMO_LUT_ENTRY_LAST 27:27 +#define NVCC7D_TILE_SET_CMO_LUT_ENTRY_LAST_FALSE (0x00000000) +#define NVCC7D_TILE_SET_CMO_LUT_ENTRY_LAST_TRUE (0x00000001) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clcc7d_h diff --git a/src/common/sdk/nvidia/inc/class/clcc7e.h b/src/common/sdk/nvidia/inc/class/clcc7e.h new file mode 100644 index 0000000..bc46f69 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clcc7e.h @@ -0,0 +1,906 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clcc7e_h_ +#define _clcc7e_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVCC7E_WINDOW_CHANNEL_DMA (0x0000CC7E) + +// dma opcode instructions +#define NVCC7E_DMA +#define NVCC7E_DMA_OPCODE 31:29 +#define NVCC7E_DMA_OPCODE_METHOD 0x00000000 +#define NVCC7E_DMA_OPCODE_JUMP 0x00000001 +#define NVCC7E_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVCC7E_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVCC7E_DMA_METHOD_COUNT 27:18 +#define NVCC7E_DMA_METHOD_OFFSET 15:2 +#define NVCC7E_DMA_DATA 31:0 +#define NVCC7E_DMA_DATA_NOP 0x00000000 +#define NVCC7E_DMA_JUMP_OFFSET 15:2 +#define NVCC7E_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NVCC7E_PUT (0x00000000) +#define NVCC7E_PUT_PTR 13:0 +#define NVCC7E_GET (0x00000004) +#define NVCC7E_GET_PTR 13:0 +#define NVCC7E_UPDATE (0x00000200) +#define NVCC7E_UPDATE_RELEASE_ELV 0:0 +#define NVCC7E_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVCC7E_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVCC7E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVCC7E_UPDATE_INTERLOCK_WITH_WIN_IMM 12:12 +#define NVCC7E_UPDATE_INTERLOCK_WITH_WIN_IMM_DISABLE (0x00000000) +#define NVCC7E_UPDATE_INTERLOCK_WITH_WIN_IMM_ENABLE (0x00000001) +#define NVCC7E_UPDATE_FORCE_FULLSCREEN 16:16 +#define NVCC7E_UPDATE_FORCE_FULLSCREEN_FALSE (0x00000000) +#define NVCC7E_UPDATE_FORCE_FULLSCREEN_TRUE (0x00000001) +#define NVCC7E_SET_SEMAPHORE_ACQUIRE_HI (0x00000204) +#define NVCC7E_SET_SEMAPHORE_ACQUIRE_HI_VALUE 31:0 +#define NVCC7E_GET_LINE (0x00000208) +#define NVCC7E_GET_LINE_LINE 15:0 +#define NVCC7E_SET_SEMAPHORE_CONTROL (0x0000020C) +#define NVCC7E_SET_SEMAPHORE_CONTROL_SKIP_ACQ 11:11 +#define NVCC7E_SET_SEMAPHORE_CONTROL_SKIP_ACQ_FALSE (0x00000000) +#define NVCC7E_SET_SEMAPHORE_CONTROL_SKIP_ACQ_TRUE (0x00000001) +#define NVCC7E_SET_SEMAPHORE_CONTROL_PAYLOAD_SIZE 15:15 +#define NVCC7E_SET_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_32BIT (0x00000000) +#define NVCC7E_SET_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_64BIT (0x00000001) +#define NVCC7E_SET_SEMAPHORE_CONTROL_ACQ_MODE 13:12 +#define NVCC7E_SET_SEMAPHORE_CONTROL_ACQ_MODE_EQ (0x00000000) +#define NVCC7E_SET_SEMAPHORE_CONTROL_ACQ_MODE_CGEQ (0x00000001) +#define NVCC7E_SET_SEMAPHORE_CONTROL_ACQ_MODE_STRICT_GEQ (0x00000002) +#define NVCC7E_SET_SEMAPHORE_CONTROL_REL_MODE 14:14 +#define NVCC7E_SET_SEMAPHORE_CONTROL_REL_MODE_WRITE (0x00000000) +#define NVCC7E_SET_SEMAPHORE_CONTROL_REL_MODE_WRITE_AWAKEN (0x00000001) +#define NVCC7E_SET_SEMAPHORE_ACQUIRE (0x00000210) +#define NVCC7E_SET_SEMAPHORE_ACQUIRE_VALUE 31:0 +#define NVCC7E_SET_SEMAPHORE_RELEASE (0x00000214) +#define NVCC7E_SET_SEMAPHORE_RELEASE_VALUE 31:0 +#define NVCC7E_SET_NOTIFIER_CONTROL (0x00000220) +#define NVCC7E_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVCC7E_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVCC7E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVCC7E_SET_SIZE (0x00000224) +#define NVCC7E_SET_SIZE_WIDTH 15:0 +#define NVCC7E_SET_SIZE_HEIGHT 31:16 +#define NVCC7E_SET_STORAGE (0x00000228) +#define NVCC7E_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NVCC7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NVCC7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NVCC7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NVCC7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVCC7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVCC7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVCC7E_SET_PARAMS (0x0000022C) +#define NVCC7E_SET_PARAMS_FORMAT 7:0 +#define NVCC7E_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NVCC7E_SET_PARAMS_FORMAT_R4G4B4A4 (0x0000002F) +#define NVCC7E_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NVCC7E_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NVCC7E_SET_PARAMS_FORMAT_R5G5B5A1 (0x0000002E) +#define NVCC7E_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NVCC7E_SET_PARAMS_FORMAT_X8R8G8B8 (0x000000E6) +#define NVCC7E_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NVCC7E_SET_PARAMS_FORMAT_X8B8G8R8 (0x000000F9) +#define NVCC7E_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NVCC7E_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NVCC7E_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NVCC7E_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NVCC7E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NVCC7E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422 (0x00000028) +#define NVCC7E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422 (0x00000029) +#define NVCC7E_SET_PARAMS_FORMAT_Y8___U8V8_N444 (0x00000035) +#define NVCC7E_SET_PARAMS_FORMAT_Y8___U8V8_N422 (0x00000036) +#define NVCC7E_SET_PARAMS_FORMAT_Y8___V8U8_N420 (0x00000038) +#define NVCC7E_SET_PARAMS_FORMAT_Y8___U8___V8_N444 (0x0000003A) +#define NVCC7E_SET_PARAMS_FORMAT_Y8___U8___V8_N420 (0x0000003B) +#define NVCC7E_SET_PARAMS_FORMAT_Y10___U10V10_N444 (0x00000055) +#define NVCC7E_SET_PARAMS_FORMAT_Y10___U10V10_N422 (0x00000056) +#define NVCC7E_SET_PARAMS_FORMAT_Y10___V10U10_N420 (0x00000058) +#define NVCC7E_SET_PARAMS_FORMAT_Y12___U12V12_N444 (0x00000075) +#define NVCC7E_SET_PARAMS_FORMAT_Y12___U12V12_N422 (0x00000076) +#define NVCC7E_SET_PARAMS_FORMAT_Y12___V12U12_N420 (0x00000078) +#define NVCC7E_SET_PARAMS_CLAMP_BEFORE_BLEND 18:18 +#define NVCC7E_SET_PARAMS_CLAMP_BEFORE_BLEND_DISABLE (0x00000000) +#define NVCC7E_SET_PARAMS_CLAMP_BEFORE_BLEND_ENABLE (0x00000001) +#define NVCC7E_SET_PARAMS_SWAP_UV 19:19 +#define NVCC7E_SET_PARAMS_SWAP_UV_DISABLE (0x00000000) +#define NVCC7E_SET_PARAMS_SWAP_UV_ENABLE (0x00000001) +#define NVCC7E_SET_PARAMS_FMT_ROUNDING_MODE 22:22 +#define NVCC7E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_TO_NEAREST (0x00000000) +#define NVCC7E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_DOWN (0x00000001) +#define NVCC7E_SET_PLANAR_STORAGE(b) (0x00000230 + (b)*0x00000004) +#define NVCC7E_SET_PLANAR_STORAGE_PITCH 12:0 +#define NVCC7E_SET_SEMAPHORE_RELEASE_HI (0x0000023C) +#define NVCC7E_SET_SEMAPHORE_RELEASE_HI_VALUE 31:0 +#define NVCC7E_SET_SURFACE_ADDRESS_HI_BEGUN_SEMAPHORE_ARRAY (0x00000240) +#define NVCC7E_SET_SURFACE_ADDRESS_HI_BEGUN_SEMAPHORE_ARRAY_ADDRESS_HI 31:0 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_BEGUN_SEMAPHORE_ARRAY (0x00000244) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_BEGUN_SEMAPHORE_ARRAY_ADDRESS_LO 31:4 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_BEGUN_SEMAPHORE_ARRAY_TARGET 3:2 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_BEGUN_SEMAPHORE_ARRAY_TARGET_IOVA (0x00000000) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_BEGUN_SEMAPHORE_ARRAY_TARGET_PHYSICAL_NVM (0x00000001) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_BEGUN_SEMAPHORE_ARRAY_TARGET_PHYSICAL_PCI (0x00000002) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_BEGUN_SEMAPHORE_ARRAY_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_BEGUN_SEMAPHORE_ARRAY_ENABLE 0:0 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_BEGUN_SEMAPHORE_ARRAY_ENABLE_DISABLE (0x00000000) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_BEGUN_SEMAPHORE_ARRAY_ENABLE_ENABLE (0x00000001) +#define NVCC7E_SET_SURFACE_ADDRESS_HI_FINISH_SEMAPHORE_ARRAY (0x00000248) +#define NVCC7E_SET_SURFACE_ADDRESS_HI_FINISH_SEMAPHORE_ARRAY_ADDRESS_HI 31:0 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_FINISH_SEMAPHORE_ARRAY (0x0000024C) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_FINISH_SEMAPHORE_ARRAY_ADDRESS_LO 31:4 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_FINISH_SEMAPHORE_ARRAY_TARGET 3:2 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_FINISH_SEMAPHORE_ARRAY_TARGET_IOVA (0x00000000) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_FINISH_SEMAPHORE_ARRAY_TARGET_PHYSICAL_NVM (0x00000001) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_FINISH_SEMAPHORE_ARRAY_TARGET_PHYSICAL_PCI (0x00000002) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_FINISH_SEMAPHORE_ARRAY_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_FINISH_SEMAPHORE_ARRAY_ENABLE 0:0 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_FINISH_SEMAPHORE_ARRAY_ENABLE_DISABLE (0x00000000) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_FINISH_SEMAPHORE_ARRAY_ENABLE_ENABLE (0x00000001) +#define NVCC7E_SET_POINT_IN(b) (0x00000290 + (b)*0x00000004) +#define NVCC7E_SET_POINT_IN_X 15:0 +#define NVCC7E_SET_POINT_IN_Y 31:16 +#define NVCC7E_SET_SIZE_IN (0x00000298) +#define NVCC7E_SET_SIZE_IN_WIDTH 15:0 +#define NVCC7E_SET_SIZE_IN_HEIGHT 31:16 +#define NVCC7E_SET_SIZE_OUT (0x000002A4) +#define NVCC7E_SET_SIZE_OUT_WIDTH 15:0 +#define NVCC7E_SET_SIZE_OUT_HEIGHT 31:16 +#define NVCC7E_SET_CONTROL_INPUT_SCALER (0x000002A8) +#define NVCC7E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVCC7E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVCC7E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVCC7E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVCC7E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVCC7E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVCC7E_SET_CONTROL_INPUT_SCALER_VERTICAL_FORCE_ENABLE 8:8 +#define NVCC7E_SET_CONTROL_INPUT_SCALER_VERTICAL_FORCE_ENABLE_DISABLE (0x00000000) +#define NVCC7E_SET_CONTROL_INPUT_SCALER_VERTICAL_FORCE_ENABLE_ENABLE (0x00000001) +#define NVCC7E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_FORCE_ENABLE 9:9 +#define NVCC7E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_FORCE_ENABLE_DISABLE (0x00000000) +#define NVCC7E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_FORCE_ENABLE_ENABLE (0x00000001) +#define NVCC7E_SET_INPUT_SCALER_COEFF_VALUE (0x000002AC) +#define NVCC7E_SET_INPUT_SCALER_COEFF_VALUE_DATA 9:0 +#define NVCC7E_SET_INPUT_SCALER_COEFF_VALUE_INDEX 19:12 +#define NVCC7E_SET_COMPOSITION_CONTROL (0x000002EC) +#define NVCC7E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT 1:0 +#define NVCC7E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DISABLE (0x00000000) +#define NVCC7E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_SRC (0x00000001) +#define NVCC7E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DST (0x00000002) +#define NVCC7E_SET_COMPOSITION_CONTROL_DEPTH 11:4 +#define NVCC7E_SET_COMPOSITION_CONTROL_BYPASS 16:16 +#define NVCC7E_SET_COMPOSITION_CONTROL_BYPASS_DISABLE (0x00000000) +#define NVCC7E_SET_COMPOSITION_CONTROL_BYPASS_ENABLE (0x00000001) +#define NVCC7E_SET_COMPOSITION_CONSTANT_ALPHA (0x000002F0) +#define NVCC7E_SET_COMPOSITION_CONSTANT_ALPHA_K1 7:0 +#define NVCC7E_SET_COMPOSITION_CONSTANT_ALPHA_K2 15:8 +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT (0x000002F4) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT 3:0 +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT 7:4 +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT 11:8 +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT 15:12 +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT 19:16 +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT 23:20 +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT 27:24 +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT 31:28 +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVCC7E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVCC7E_SET_KEY_ALPHA (0x000002F8) +#define NVCC7E_SET_KEY_ALPHA_MIN 15:0 +#define NVCC7E_SET_KEY_ALPHA_MAX 31:16 +#define NVCC7E_SET_KEY_RED_CR (0x000002FC) +#define NVCC7E_SET_KEY_RED_CR_MIN 15:0 +#define NVCC7E_SET_KEY_RED_CR_MAX 31:16 +#define NVCC7E_SET_KEY_GREEN_Y (0x00000300) +#define NVCC7E_SET_KEY_GREEN_Y_MIN 15:0 +#define NVCC7E_SET_KEY_GREEN_Y_MAX 31:16 +#define NVCC7E_SET_KEY_BLUE_CB (0x00000304) +#define NVCC7E_SET_KEY_BLUE_CB_MIN 15:0 +#define NVCC7E_SET_KEY_BLUE_CB_MAX 31:16 +#define NVCC7E_SET_PRESENT_CONTROL (0x00000308) +#define NVCC7E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NVCC7E_SET_PRESENT_CONTROL_BEGIN_MODE 6:4 +#define NVCC7E_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000) +#define NVCC7E_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001) +#define NVCC7E_SET_PRESENT_CONTROL_TIMESTAMP_MODE 8:8 +#define NVCC7E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000) +#define NVCC7E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001) +#define NVCC7E_SET_PRESENT_CONTROL_STEREO_MODE 13:12 +#define NVCC7E_SET_PRESENT_CONTROL_STEREO_MODE_MONO (0x00000000) +#define NVCC7E_SET_PRESENT_CONTROL_STEREO_MODE_PAIR_FLIP (0x00000001) +#define NVCC7E_SET_PRESENT_CONTROL_STEREO_MODE_AT_ANY_FRAME (0x00000002) +#define NVCC7E_SET_ACQ_SEMAPHORE_VALUE_HI (0x0000030C) +#define NVCC7E_SET_ACQ_SEMAPHORE_VALUE_HI_VALUE 31:0 +#define NVCC7E_SET_ACQ_SEMAPHORE_CONTROL (0x00000330) +#define NVCC7E_SET_ACQ_SEMAPHORE_CONTROL_PAYLOAD_SIZE 15:15 +#define NVCC7E_SET_ACQ_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_32BIT (0x00000000) +#define NVCC7E_SET_ACQ_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_64BIT (0x00000001) +#define NVCC7E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE 13:12 +#define NVCC7E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE_EQ (0x00000000) +#define NVCC7E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE_CGEQ (0x00000001) +#define NVCC7E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE_STRICT_GEQ (0x00000002) +#define NVCC7E_SET_ACQ_SEMAPHORE_VALUE (0x00000334) +#define NVCC7E_SET_ACQ_SEMAPHORE_VALUE_VALUE 31:0 +#define NVCC7E_SET_SCAN_DIRECTION (0x0000033C) +#define NVCC7E_SET_SCAN_DIRECTION_HORIZONTAL_DIRECTION 0:0 +#define NVCC7E_SET_SCAN_DIRECTION_HORIZONTAL_DIRECTION_FROM_LEFT (0x00000000) +#define NVCC7E_SET_SCAN_DIRECTION_HORIZONTAL_DIRECTION_FROM_RIGHT (0x00000001) +#define NVCC7E_SET_SCAN_DIRECTION_VERTICAL_DIRECTION 1:1 +#define NVCC7E_SET_SCAN_DIRECTION_VERTICAL_DIRECTION_FROM_TOP (0x00000000) +#define NVCC7E_SET_SCAN_DIRECTION_VERTICAL_DIRECTION_FROM_BOTTOM (0x00000001) +#define NVCC7E_SET_SCAN_DIRECTION_COLUMN_ORDER 2:2 +#define NVCC7E_SET_SCAN_DIRECTION_COLUMN_ORDER_FALSE (0x00000000) +#define NVCC7E_SET_SCAN_DIRECTION_COLUMN_ORDER_TRUE (0x00000001) +#define NVCC7E_SET_TIMESTAMP_ORIGIN_LO (0x00000340) +#define NVCC7E_SET_TIMESTAMP_ORIGIN_LO_TIMESTAMP_LO 31:0 +#define NVCC7E_SET_TIMESTAMP_ORIGIN_HI (0x00000344) +#define NVCC7E_SET_TIMESTAMP_ORIGIN_HI_TIMESTAMP_HI 31:0 +#define NVCC7E_SET_UPDATE_TIMESTAMP_LO (0x00000348) +#define NVCC7E_SET_UPDATE_TIMESTAMP_LO_TIMESTAMP_LO 31:0 +#define NVCC7E_SET_UPDATE_TIMESTAMP_HI (0x0000034C) +#define NVCC7E_SET_UPDATE_TIMESTAMP_HI_TIMESTAMP_HI 31:0 +#define NVCC7E_SET_INTERLOCK_FLAGS (0x00000370) +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 0:0 +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+1):((i)+1) +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 1:1 +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 2:2 +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 3:3 +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 4:4 +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 5:5 +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 6:6 +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 7:7 +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 8:8 +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVCC7E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS (0x00000374) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVCC7E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) +#define NVCC7E_SET_CHROMA_VER (0x00000378) +#define NVCC7E_SET_CHROMA_VER_POSITION 1:0 +#define NVCC7E_SET_CHROMA_VER_POSITION_TOP (0x00000000) +#define NVCC7E_SET_CHROMA_VER_POSITION_CENTER (0x00000001) +#define NVCC7E_SET_CHROMA_VER_POSITION_BOTTOM (0x00000002) +#define NVCC7E_SET_CHROMA_VER_USE_SWPOSITION 2:2 +#define NVCC7E_SET_CHROMA_VER_USE_SWPOSITION_FALSE (0x00000000) +#define NVCC7E_SET_CHROMA_VER_USE_SWPOSITION_TRUE (0x00000001) +#define NVCC7E_SET_CHROMA_VER_WEIGHT_ODD 4:3 +#define NVCC7E_SET_CHROMA_VER_WEIGHT_ODD_WT_0 (0x00000000) +#define NVCC7E_SET_CHROMA_VER_WEIGHT_ODD_WT_QUARTER (0x00000001) +#define NVCC7E_SET_CHROMA_VER_WEIGHT_ODD_WT_HALF (0x00000002) +#define NVCC7E_SET_CHROMA_VER_WEIGHT_ODD_WT_THREE_QUARTER (0x00000003) +#define NVCC7E_SET_CHROMA_VER_WEIGHT_EVEN 6:5 +#define NVCC7E_SET_CHROMA_VER_WEIGHT_EVEN_WT_0 (0x00000000) +#define NVCC7E_SET_CHROMA_VER_WEIGHT_EVEN_WT_QUARTER (0x00000001) +#define NVCC7E_SET_CHROMA_VER_WEIGHT_EVEN_WT_HALF (0x00000002) +#define NVCC7E_SET_CHROMA_VER_WEIGHT_EVEN_WT_THREE_QUARTER (0x00000003) +#define NVCC7E_SET_CHROMA_VER_USE_SWWEIGHTS 7:7 +#define NVCC7E_SET_CHROMA_VER_USE_SWWEIGHTS_FALSE (0x00000000) +#define NVCC7E_SET_CHROMA_VER_USE_SWWEIGHTS_TRUE (0x00000001) +#define NVCC7E_SET_CHROMA_HOR (0x0000037C) +#define NVCC7E_SET_CHROMA_HOR_REPLACE_ODD 0:0 +#define NVCC7E_SET_CHROMA_HOR_REPLACE_ODD_DISABLE (0x00000000) +#define NVCC7E_SET_CHROMA_HOR_REPLACE_ODD_ENABLE (0x00000001) +#define NVCC7E_SET_CHROMA_HOR_REPLACE_EVEN 1:1 +#define NVCC7E_SET_CHROMA_HOR_REPLACE_EVEN_DISABLE (0x00000000) +#define NVCC7E_SET_CHROMA_HOR_REPLACE_EVEN_ENABLE (0x00000001) +#define NVCC7E_SET_CHROMA_HOR_WEIGHT_ODD 3:2 +#define NVCC7E_SET_CHROMA_HOR_WEIGHT_ODD_WT_0 (0x00000000) +#define NVCC7E_SET_CHROMA_HOR_WEIGHT_ODD_WT_QUARTER (0x00000001) +#define NVCC7E_SET_CHROMA_HOR_WEIGHT_ODD_WT_HALF (0x00000002) +#define NVCC7E_SET_CHROMA_HOR_WEIGHT_ODD_WT_THREE_QUARTER (0x00000003) +#define NVCC7E_SET_CHROMA_HOR_WEIGHT_EVEN 5:4 +#define NVCC7E_SET_CHROMA_HOR_WEIGHT_EVEN_WT_0 (0x00000000) +#define NVCC7E_SET_CHROMA_HOR_WEIGHT_EVEN_WT_QUARTER (0x00000001) +#define NVCC7E_SET_CHROMA_HOR_WEIGHT_EVEN_WT_HALF (0x00000002) +#define NVCC7E_SET_CHROMA_HOR_WEIGHT_EVEN_WT_THREE_QUARTER (0x00000003) +#define NVCC7E_SET_CHROMA_HOR_USE_SWWEIGHTS 6:6 +#define NVCC7E_SET_CHROMA_HOR_USE_SWWEIGHTS_FALSE (0x00000000) +#define NVCC7E_SET_CHROMA_HOR_USE_SWWEIGHTS_TRUE (0x00000001) +#define NVCC7E_SET_EXT_PACKET_CONTROL (0x00000398) +#define NVCC7E_SET_EXT_PACKET_CONTROL_ENABLE 0:0 +#define NVCC7E_SET_EXT_PACKET_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVCC7E_SET_EXT_PACKET_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVCC7E_SET_EXT_PACKET_CONTROL_LOCATION 4:4 +#define NVCC7E_SET_EXT_PACKET_CONTROL_LOCATION_VSYNC (0x00000000) +#define NVCC7E_SET_EXT_PACKET_CONTROL_LOCATION_VBLANK (0x00000001) +#define NVCC7E_SET_EXT_PACKET_CONTROL_FREQUENCY 8:8 +#define NVCC7E_SET_EXT_PACKET_CONTROL_FREQUENCY_EVERY_FRAME (0x00000000) +#define NVCC7E_SET_EXT_PACKET_CONTROL_FREQUENCY_ONCE (0x00000001) +#define NVCC7E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE 12:12 +#define NVCC7E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE_DISABLE (0x00000000) +#define NVCC7E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE_ENABLE (0x00000001) +#define NVCC7E_SET_EXT_PACKET_CONTROL_SIZE 27:16 +#define NVCC7E_SET_EXT_PACKET_DATA (0x0000039C) +#define NVCC7E_SET_EXT_PACKET_DATA_DB0 7:0 +#define NVCC7E_SET_EXT_PACKET_DATA_DB1 15:8 +#define NVCC7E_SET_EXT_PACKET_DATA_DB2 23:16 +#define NVCC7E_SET_EXT_PACKET_DATA_DB3 31:24 +#define NVCC7E_SET_WIN_INFOFRAME (0x000003A0) +#define NVCC7E_SET_WIN_INFOFRAME_FID 7:0 +#define NVCC7E_SET_WIN_INFOFRAME_EN 16:16 +#define NVCC7E_SET_WIN_INFOFRAME_EN_DISABLE (0x00000000) +#define NVCC7E_SET_WIN_INFOFRAME_EN_ENABLE (0x00000001) +#define NVCC7E_SET_FMT_COEFFICIENT_C00 (0x00000400) +#define NVCC7E_SET_FMT_COEFFICIENT_C00_VALUE 20:0 +#define NVCC7E_SET_FMT_COEFFICIENT_C01 (0x00000404) +#define NVCC7E_SET_FMT_COEFFICIENT_C01_VALUE 20:0 +#define NVCC7E_SET_FMT_COEFFICIENT_C02 (0x00000408) +#define NVCC7E_SET_FMT_COEFFICIENT_C02_VALUE 20:0 +#define NVCC7E_SET_FMT_COEFFICIENT_C03 (0x0000040C) +#define NVCC7E_SET_FMT_COEFFICIENT_C03_VALUE 20:0 +#define NVCC7E_SET_FMT_COEFFICIENT_C10 (0x00000410) +#define NVCC7E_SET_FMT_COEFFICIENT_C10_VALUE 20:0 +#define NVCC7E_SET_FMT_COEFFICIENT_C11 (0x00000414) +#define NVCC7E_SET_FMT_COEFFICIENT_C11_VALUE 20:0 +#define NVCC7E_SET_FMT_COEFFICIENT_C12 (0x00000418) +#define NVCC7E_SET_FMT_COEFFICIENT_C12_VALUE 20:0 +#define NVCC7E_SET_FMT_COEFFICIENT_C13 (0x0000041C) +#define NVCC7E_SET_FMT_COEFFICIENT_C13_VALUE 20:0 +#define NVCC7E_SET_FMT_COEFFICIENT_C20 (0x00000420) +#define NVCC7E_SET_FMT_COEFFICIENT_C20_VALUE 20:0 +#define NVCC7E_SET_FMT_COEFFICIENT_C21 (0x00000424) +#define NVCC7E_SET_FMT_COEFFICIENT_C21_VALUE 20:0 +#define NVCC7E_SET_FMT_COEFFICIENT_C22 (0x00000428) +#define NVCC7E_SET_FMT_COEFFICIENT_C22_VALUE 20:0 +#define NVCC7E_SET_FMT_COEFFICIENT_C23 (0x0000042C) +#define NVCC7E_SET_FMT_COEFFICIENT_C23_VALUE 20:0 +#define NVCC7E_SET_ILUT_CONTROL (0x00000440) +#define NVCC7E_SET_ILUT_CONTROL_INTERPOLATE 0:0 +#define NVCC7E_SET_ILUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVCC7E_SET_ILUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVCC7E_SET_ILUT_CONTROL_MIRROR 1:1 +#define NVCC7E_SET_ILUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVCC7E_SET_ILUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVCC7E_SET_ILUT_CONTROL_MODE 3:2 +#define NVCC7E_SET_ILUT_CONTROL_MODE_SEGMENTED (0x00000000) +#define NVCC7E_SET_ILUT_CONTROL_MODE_DIRECT8 (0x00000001) +#define NVCC7E_SET_ILUT_CONTROL_MODE_DIRECT10 (0x00000002) +#define NVCC7E_SET_ILUT_CONTROL_SIZE 18:8 +#define NVCC7E_SET_CSC00CONTROL (0x0000045C) +#define NVCC7E_SET_CSC00CONTROL_ENABLE 0:0 +#define NVCC7E_SET_CSC00CONTROL_ENABLE_DISABLE (0x00000000) +#define NVCC7E_SET_CSC00CONTROL_ENABLE_ENABLE (0x00000001) +#define NVCC7E_SET_CSC00COEFFICIENT_C00 (0x00000460) +#define NVCC7E_SET_CSC00COEFFICIENT_C00_VALUE 20:0 +#define NVCC7E_SET_CSC00COEFFICIENT_C01 (0x00000464) +#define NVCC7E_SET_CSC00COEFFICIENT_C01_VALUE 20:0 +#define NVCC7E_SET_CSC00COEFFICIENT_C02 (0x00000468) +#define NVCC7E_SET_CSC00COEFFICIENT_C02_VALUE 20:0 +#define NVCC7E_SET_CSC00COEFFICIENT_C03 (0x0000046C) +#define NVCC7E_SET_CSC00COEFFICIENT_C03_VALUE 20:0 +#define NVCC7E_SET_CSC00COEFFICIENT_C10 (0x00000470) +#define NVCC7E_SET_CSC00COEFFICIENT_C10_VALUE 20:0 +#define NVCC7E_SET_CSC00COEFFICIENT_C11 (0x00000474) +#define NVCC7E_SET_CSC00COEFFICIENT_C11_VALUE 20:0 +#define NVCC7E_SET_CSC00COEFFICIENT_C12 (0x00000478) +#define NVCC7E_SET_CSC00COEFFICIENT_C12_VALUE 20:0 +#define NVCC7E_SET_CSC00COEFFICIENT_C13 (0x0000047C) +#define NVCC7E_SET_CSC00COEFFICIENT_C13_VALUE 20:0 +#define NVCC7E_SET_CSC00COEFFICIENT_C20 (0x00000480) +#define NVCC7E_SET_CSC00COEFFICIENT_C20_VALUE 20:0 +#define NVCC7E_SET_CSC00COEFFICIENT_C21 (0x00000484) +#define NVCC7E_SET_CSC00COEFFICIENT_C21_VALUE 20:0 +#define NVCC7E_SET_CSC00COEFFICIENT_C22 (0x00000488) +#define NVCC7E_SET_CSC00COEFFICIENT_C22_VALUE 20:0 +#define NVCC7E_SET_CSC00COEFFICIENT_C23 (0x0000048C) +#define NVCC7E_SET_CSC00COEFFICIENT_C23_VALUE 20:0 +#define NVCC7E_SET_CSC0LUT_FP_NORM_SCALE (0x00000490) +#define NVCC7E_SET_CSC0LUT_FP_NORM_SCALE_VALUE 31:0 +#define NVCC7E_SET_CSC0LUT_CONTROL (0x000004A0) +#define NVCC7E_SET_CSC0LUT_CONTROL_INTERPOLATE 0:0 +#define NVCC7E_SET_CSC0LUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVCC7E_SET_CSC0LUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVCC7E_SET_CSC0LUT_CONTROL_MIRROR 1:1 +#define NVCC7E_SET_CSC0LUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVCC7E_SET_CSC0LUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVCC7E_SET_CSC0LUT_CONTROL_ENABLE 4:4 +#define NVCC7E_SET_CSC0LUT_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVCC7E_SET_CSC0LUT_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVCC7E_SET_CSC0LUT_CONTROL_CURVE 6:5 +#define NVCC7E_SET_CSC0LUT_CONTROL_CURVE_CSCLUT_PQ (0x00000000) +#define NVCC7E_SET_CSC0LUT_CONTROL_CURVE_CSCLUT_HLG (0x00000001) +#define NVCC7E_SET_CSC0LUT_CONTROL_CURVE_CSCLUT_DIRECT (0x00000002) +#define NVCC7E_SET_CSC01CONTROL (0x000004BC) +#define NVCC7E_SET_CSC01CONTROL_ENABLE 0:0 +#define NVCC7E_SET_CSC01CONTROL_ENABLE_DISABLE (0x00000000) +#define NVCC7E_SET_CSC01CONTROL_ENABLE_ENABLE (0x00000001) +#define NVCC7E_SET_CSC01COEFFICIENT_C00 (0x000004C0) +#define NVCC7E_SET_CSC01COEFFICIENT_C00_VALUE 20:0 +#define NVCC7E_SET_CSC01COEFFICIENT_C01 (0x000004C4) +#define NVCC7E_SET_CSC01COEFFICIENT_C01_VALUE 20:0 +#define NVCC7E_SET_CSC01COEFFICIENT_C02 (0x000004C8) +#define NVCC7E_SET_CSC01COEFFICIENT_C02_VALUE 20:0 +#define NVCC7E_SET_CSC01COEFFICIENT_C03 (0x000004CC) +#define NVCC7E_SET_CSC01COEFFICIENT_C03_VALUE 20:0 +#define NVCC7E_SET_CSC01COEFFICIENT_C10 (0x000004D0) +#define NVCC7E_SET_CSC01COEFFICIENT_C10_VALUE 20:0 +#define NVCC7E_SET_CSC01COEFFICIENT_C11 (0x000004D4) +#define NVCC7E_SET_CSC01COEFFICIENT_C11_VALUE 20:0 +#define NVCC7E_SET_CSC01COEFFICIENT_C12 (0x000004D8) +#define NVCC7E_SET_CSC01COEFFICIENT_C12_VALUE 20:0 +#define NVCC7E_SET_CSC01COEFFICIENT_C13 (0x000004DC) +#define NVCC7E_SET_CSC01COEFFICIENT_C13_VALUE 20:0 +#define NVCC7E_SET_CSC01COEFFICIENT_C20 (0x000004E0) +#define NVCC7E_SET_CSC01COEFFICIENT_C20_VALUE 20:0 +#define NVCC7E_SET_CSC01COEFFICIENT_C21 (0x000004E4) +#define NVCC7E_SET_CSC01COEFFICIENT_C21_VALUE 20:0 +#define NVCC7E_SET_CSC01COEFFICIENT_C22 (0x000004E8) +#define NVCC7E_SET_CSC01COEFFICIENT_C22_VALUE 20:0 +#define NVCC7E_SET_CSC01COEFFICIENT_C23 (0x000004EC) +#define NVCC7E_SET_CSC01COEFFICIENT_C23_VALUE 20:0 +#define NVCC7E_SET_TMO_CONTROL (0x00000500) +#define NVCC7E_SET_TMO_CONTROL_INTERPOLATE 0:0 +#define NVCC7E_SET_TMO_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVCC7E_SET_TMO_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVCC7E_SET_TMO_CONTROL_SAT_MODE 3:2 +#define NVCC7E_SET_TMO_CONTROL_SIZE 18:8 +#define NVCC7E_SET_TMO_LOW_INTENSITY_ZONE (0x00000508) +#define NVCC7E_SET_TMO_LOW_INTENSITY_ZONE_END 29:16 +#define NVCC7E_SET_TMO_LOW_INTENSITY_VALUE (0x0000050C) +#define NVCC7E_SET_TMO_LOW_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVCC7E_SET_TMO_LOW_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVCC7E_SET_TMO_LOW_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVCC7E_SET_TMO_MEDIUM_INTENSITY_ZONE (0x00000510) +#define NVCC7E_SET_TMO_MEDIUM_INTENSITY_ZONE_START 13:0 +#define NVCC7E_SET_TMO_MEDIUM_INTENSITY_ZONE_END 29:16 +#define NVCC7E_SET_TMO_MEDIUM_INTENSITY_VALUE (0x00000514) +#define NVCC7E_SET_TMO_MEDIUM_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVCC7E_SET_TMO_MEDIUM_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVCC7E_SET_TMO_MEDIUM_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVCC7E_SET_TMO_HIGH_INTENSITY_ZONE (0x00000518) +#define NVCC7E_SET_TMO_HIGH_INTENSITY_ZONE_START 13:0 +#define NVCC7E_SET_TMO_HIGH_INTENSITY_VALUE (0x0000051C) +#define NVCC7E_SET_TMO_HIGH_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVCC7E_SET_TMO_HIGH_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVCC7E_SET_TMO_HIGH_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVCC7E_SET_CSC10CONTROL (0x0000053C) +#define NVCC7E_SET_CSC10CONTROL_ENABLE 0:0 +#define NVCC7E_SET_CSC10CONTROL_ENABLE_DISABLE (0x00000000) +#define NVCC7E_SET_CSC10CONTROL_ENABLE_ENABLE (0x00000001) +#define NVCC7E_SET_CSC10COEFFICIENT_C00 (0x00000540) +#define NVCC7E_SET_CSC10COEFFICIENT_C00_VALUE 20:0 +#define NVCC7E_SET_CSC10COEFFICIENT_C01 (0x00000544) +#define NVCC7E_SET_CSC10COEFFICIENT_C01_VALUE 20:0 +#define NVCC7E_SET_CSC10COEFFICIENT_C02 (0x00000548) +#define NVCC7E_SET_CSC10COEFFICIENT_C02_VALUE 20:0 +#define NVCC7E_SET_CSC10COEFFICIENT_C03 (0x0000054C) +#define NVCC7E_SET_CSC10COEFFICIENT_C03_VALUE 20:0 +#define NVCC7E_SET_CSC10COEFFICIENT_C10 (0x00000550) +#define NVCC7E_SET_CSC10COEFFICIENT_C10_VALUE 20:0 +#define NVCC7E_SET_CSC10COEFFICIENT_C11 (0x00000554) +#define NVCC7E_SET_CSC10COEFFICIENT_C11_VALUE 20:0 +#define NVCC7E_SET_CSC10COEFFICIENT_C12 (0x00000558) +#define NVCC7E_SET_CSC10COEFFICIENT_C12_VALUE 20:0 +#define NVCC7E_SET_CSC10COEFFICIENT_C13 (0x0000055C) +#define NVCC7E_SET_CSC10COEFFICIENT_C13_VALUE 20:0 +#define NVCC7E_SET_CSC10COEFFICIENT_C20 (0x00000560) +#define NVCC7E_SET_CSC10COEFFICIENT_C20_VALUE 20:0 +#define NVCC7E_SET_CSC10COEFFICIENT_C21 (0x00000564) +#define NVCC7E_SET_CSC10COEFFICIENT_C21_VALUE 20:0 +#define NVCC7E_SET_CSC10COEFFICIENT_C22 (0x00000568) +#define NVCC7E_SET_CSC10COEFFICIENT_C22_VALUE 20:0 +#define NVCC7E_SET_CSC10COEFFICIENT_C23 (0x0000056C) +#define NVCC7E_SET_CSC10COEFFICIENT_C23_VALUE 20:0 +#define NVCC7E_SET_CSC1LUT_FP_SCALE (0x00000570) +#define NVCC7E_SET_CSC1LUT_FP_SCALE_VALUE 15:0 +#define NVCC7E_SET_CSC1LUT_CONTROL (0x00000580) +#define NVCC7E_SET_CSC1LUT_CONTROL_INTERPOLATE 0:0 +#define NVCC7E_SET_CSC1LUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVCC7E_SET_CSC1LUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVCC7E_SET_CSC1LUT_CONTROL_MIRROR 1:1 +#define NVCC7E_SET_CSC1LUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVCC7E_SET_CSC1LUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVCC7E_SET_CSC1LUT_CONTROL_ENABLE 4:4 +#define NVCC7E_SET_CSC1LUT_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVCC7E_SET_CSC1LUT_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVCC7E_SET_CSC1LUT_CONTROL_CURVE 6:5 +#define NVCC7E_SET_CSC1LUT_CONTROL_CURVE_CSCLUT_PQ (0x00000000) +#define NVCC7E_SET_CSC1LUT_CONTROL_CURVE_CSCLUT_HLG (0x00000001) +#define NVCC7E_SET_CSC1LUT_CONTROL_CURVE_CSCLUT_DIRECT (0x00000002) +#define NVCC7E_SET_CSC11CONTROL (0x0000059C) +#define NVCC7E_SET_CSC11CONTROL_ENABLE 0:0 +#define NVCC7E_SET_CSC11CONTROL_ENABLE_DISABLE (0x00000000) +#define NVCC7E_SET_CSC11CONTROL_ENABLE_ENABLE (0x00000001) +#define NVCC7E_SET_CSC11CONTROL_LEVEL 7:2 +#define NVCC7E_SET_CSC11COEFFICIENT_C00 (0x000005A0) +#define NVCC7E_SET_CSC11COEFFICIENT_C00_VALUE 20:0 +#define NVCC7E_SET_CSC11COEFFICIENT_C01 (0x000005A4) +#define NVCC7E_SET_CSC11COEFFICIENT_C01_VALUE 20:0 +#define NVCC7E_SET_CSC11COEFFICIENT_C02 (0x000005A8) +#define NVCC7E_SET_CSC11COEFFICIENT_C02_VALUE 20:0 +#define NVCC7E_SET_CSC11COEFFICIENT_C03 (0x000005AC) +#define NVCC7E_SET_CSC11COEFFICIENT_C03_VALUE 20:0 +#define NVCC7E_SET_CSC11COEFFICIENT_C10 (0x000005B0) +#define NVCC7E_SET_CSC11COEFFICIENT_C10_VALUE 20:0 +#define NVCC7E_SET_CSC11COEFFICIENT_C11 (0x000005B4) +#define NVCC7E_SET_CSC11COEFFICIENT_C11_VALUE 20:0 +#define NVCC7E_SET_CSC11COEFFICIENT_C12 (0x000005B8) +#define NVCC7E_SET_CSC11COEFFICIENT_C12_VALUE 20:0 +#define NVCC7E_SET_CSC11COEFFICIENT_C13 (0x000005BC) +#define NVCC7E_SET_CSC11COEFFICIENT_C13_VALUE 20:0 +#define NVCC7E_SET_CSC11COEFFICIENT_C20 (0x000005C0) +#define NVCC7E_SET_CSC11COEFFICIENT_C20_VALUE 20:0 +#define NVCC7E_SET_CSC11COEFFICIENT_C21 (0x000005C4) +#define NVCC7E_SET_CSC11COEFFICIENT_C21_VALUE 20:0 +#define NVCC7E_SET_CSC11COEFFICIENT_C22 (0x000005C8) +#define NVCC7E_SET_CSC11COEFFICIENT_C22_VALUE 20:0 +#define NVCC7E_SET_CSC11COEFFICIENT_C23 (0x000005CC) +#define NVCC7E_SET_CSC11COEFFICIENT_C23_VALUE 20:0 +#define NVCC7E_SET_CLAMP_RANGE (0x000005D0) +#define NVCC7E_SET_CLAMP_RANGE_LOW 15:0 +#define NVCC7E_SET_CLAMP_RANGE_HIGH 31:16 +#define NVCC7E_SW_RESERVED(b) (0x000005D4 + (b)*0x00000004) +#define NVCC7E_SW_RESERVED_VALUE 31:0 +#define NVCC7E_SET_DIRTY_RECT_SIZE (0x000005E4) +#define NVCC7E_SET_DIRTY_RECT_SIZE_WIDTH 15:0 +#define NVCC7E_SET_DIRTY_RECT_SIZE_HEIGHT 31:16 +#define NVCC7E_SET_DIRTY_RECT_POSITION (0x000005E8) +#define NVCC7E_SET_DIRTY_RECT_POSITION_X 15:0 +#define NVCC7E_SET_DIRTY_RECT_POSITION_Y 31:16 +#define NVCC7E_SET_SUPERFRAME (0x000005EC) +#define NVCC7E_SET_SUPERFRAME_ENABLE 0:0 +#define NVCC7E_SET_SUPERFRAME_ENABLE_DISABLE (0x00000000) +#define NVCC7E_SET_SUPERFRAME_ENABLE_ENABLE (0x00000001) +#define NVCC7E_SET_SUPERFRAME_MODE 2:1 +#define NVCC7E_SET_SUPERFRAME_MODE_FIXED (0x00000000) +#define NVCC7E_SET_SUPERFRAME_MODE_DYNAMIC (0x00000001) +#define NVCC7E_SET_SUPERFRAME_MODE_SW (0x00000003) +#define NVCC7E_SET_SUPERFRAME_RATIO_IN 15:8 +#define NVCC7E_SET_SUPERFRAME_RATIO_OUT 23:16 +#define NVCC7E_SET_SUPERFRAME_START_COUNT 31:24 +#define NVCC7E_SET_INFOFRAME_CTRL(b) (0x000005F0 + (b)*0x00000004) +#define NVCC7E_SET_INFOFRAME_CTRL_ENABLE 0:0 +#define NVCC7E_SET_INFOFRAME_CTRL_ENABLE_DISABLE (0x00000000) +#define NVCC7E_SET_INFOFRAME_CTRL_ENABLE_ENABLE (0x00000001) +#define NVCC7E_SET_INFOFRAME_CTRL_LOCATION 5:4 +#define NVCC7E_SET_INFOFRAME_CTRL_LOCATION_VBLANK (0x00000000) +#define NVCC7E_SET_INFOFRAME_CTRL_LOCATION_VSYNC (0x00000001) +#define NVCC7E_SET_INFOFRAME_CTRL_LOCATION_LINE (0x00000002) +#define NVCC7E_SET_INFOFRAME_CTRL_LINE_ID 30:16 +#define NVCC7E_SET_INFOFRAME_CTRL_LINE_ID_REVERSED 31:31 +#define NVCC7E_SET_INFOFRAME_CTRL_LINE_ID_REVERSED_DISABLE (0x00000000) +#define NVCC7E_SET_INFOFRAME_CTRL_LINE_ID_REVERSED_ENABLE (0x00000001) +#define NVCC7E_SET_INFOFRAME_HEADER(b) (0x000005F8 + (b)*0x00000004) +#define NVCC7E_SET_INFOFRAME_HEADER_HB0 7:0 +#define NVCC7E_SET_INFOFRAME_HEADER_HB1 15:8 +#define NVCC7E_SET_INFOFRAME_HEADER_HB2 23:16 +#define NVCC7E_SET_INFOFRAME_HEADER_HB3 31:24 +#define NVCC7E_SET_INFOFRAME_DATA0(b) (0x00000600 + (b)*0x00000004) +#define NVCC7E_SET_INFOFRAME_DATA0_DB0 7:0 +#define NVCC7E_SET_INFOFRAME_DATA0_DB1 15:8 +#define NVCC7E_SET_INFOFRAME_DATA0_DB2 23:16 +#define NVCC7E_SET_INFOFRAME_DATA0_DB3 31:24 +#define NVCC7E_SET_INFOFRAME_DATA1(b) (0x00000608 + (b)*0x00000004) +#define NVCC7E_SET_INFOFRAME_DATA1_DB4 7:0 +#define NVCC7E_SET_INFOFRAME_DATA1_DB5 15:8 +#define NVCC7E_SET_INFOFRAME_DATA1_DB6 23:16 +#define NVCC7E_SET_INFOFRAME_DATA1_DB7 31:24 +#define NVCC7E_SET_INFOFRAME_DATA2(b) (0x00000610 + (b)*0x00000004) +#define NVCC7E_SET_INFOFRAME_DATA2_DB8 7:0 +#define NVCC7E_SET_INFOFRAME_DATA2_DB9 15:8 +#define NVCC7E_SET_INFOFRAME_DATA2_DB10 23:16 +#define NVCC7E_SET_INFOFRAME_DATA2_DB11 31:24 +#define NVCC7E_SET_INFOFRAME_DATA3(b) (0x00000618 + (b)*0x00000004) +#define NVCC7E_SET_INFOFRAME_DATA3_DB12 7:0 +#define NVCC7E_SET_INFOFRAME_DATA3_DB13 15:8 +#define NVCC7E_SET_INFOFRAME_DATA3_DB14 23:16 +#define NVCC7E_SET_INFOFRAME_DATA3_DB15 31:24 +#define NVCC7E_SET_INFOFRAME_DATA4(b) (0x00000620 + (b)*0x00000004) +#define NVCC7E_SET_INFOFRAME_DATA4_DB16 7:0 +#define NVCC7E_SET_INFOFRAME_DATA4_DB17 15:8 +#define NVCC7E_SET_INFOFRAME_DATA4_DB18 23:16 +#define NVCC7E_SET_INFOFRAME_DATA4_DB19 31:24 +#define NVCC7E_SET_INFOFRAME_DATA5(b) (0x00000628 + (b)*0x00000004) +#define NVCC7E_SET_INFOFRAME_DATA5_DB20 7:0 +#define NVCC7E_SET_INFOFRAME_DATA5_DB21 15:8 +#define NVCC7E_SET_INFOFRAME_DATA5_DB22 23:16 +#define NVCC7E_SET_INFOFRAME_DATA5_DB23 31:24 +#define NVCC7E_SET_INFOFRAME_DATA6(b) (0x00000630 + (b)*0x00000004) +#define NVCC7E_SET_INFOFRAME_DATA6_DB24 7:0 +#define NVCC7E_SET_INFOFRAME_DATA6_DB25 15:8 +#define NVCC7E_SET_INFOFRAME_DATA6_DB26 23:16 +#define NVCC7E_SET_INFOFRAME_DATA6_DB27 31:24 +#define NVCC7E_SET_INFOFRAME_DATA7(b) (0x00000638 + (b)*0x00000004) +#define NVCC7E_SET_INFOFRAME_DATA7_DB28 7:0 +#define NVCC7E_SET_INFOFRAME_DATA7_DB29 15:8 +#define NVCC7E_SET_INFOFRAME_DATA7_DB30 23:16 +#define NVCC7E_SET_INFOFRAME_DATA7_DB31 31:24 +#define NVCC7E_SET_SURFACE_ADDRESS_HI_SEMAPHORE (0x00000640) +#define NVCC7E_SET_SURFACE_ADDRESS_HI_SEMAPHORE_ADDRESS_HI 31:0 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_SEMAPHORE (0x00000644) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_SEMAPHORE_ADDRESS_LO 31:4 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_SEMAPHORE_TARGET 3:2 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_SEMAPHORE_TARGET_IOVA (0x00000000) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_SEMAPHORE_TARGET_PHYSICAL_NVM (0x00000001) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_SEMAPHORE_TARGET_PHYSICAL_PCI (0x00000002) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_SEMAPHORE_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_SEMAPHORE_ENABLE 0:0 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_SEMAPHORE_ENABLE_DISABLE (0x00000000) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_SEMAPHORE_ENABLE_ENABLE (0x00000001) +#define NVCC7E_SET_SURFACE_ADDRESS_HI_ACQ_SEMAPHORE (0x00000648) +#define NVCC7E_SET_SURFACE_ADDRESS_HI_ACQ_SEMAPHORE_ADDRESS_HI 31:0 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE (0x0000064C) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE_ADDRESS_LO 31:4 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE_TARGET 3:2 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE_TARGET_IOVA (0x00000000) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE_TARGET_PHYSICAL_NVM (0x00000001) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE_TARGET_PHYSICAL_PCI (0x00000002) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE_ENABLE 0:0 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE_ENABLE_DISABLE (0x00000000) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE_ENABLE_ENABLE (0x00000001) +#define NVCC7E_SET_SURFACE_ADDRESS_HI_NOTIFIER (0x00000650) +#define NVCC7E_SET_SURFACE_ADDRESS_HI_NOTIFIER_ADDRESS_HI 31:0 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_NOTIFIER (0x00000654) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ADDRESS_LO 31:4 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET 3:2 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_IOVA (0x00000000) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_NVM (0x00000001) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI (0x00000002) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE 0:0 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_DISABLE (0x00000000) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_ENABLE (0x00000001) +#define NVCC7E_SET_SURFACE_ADDRESS_HI_ISO(b) (0x00000658 + (b)*0x00000004) +#define NVCC7E_SET_SURFACE_ADDRESS_HI_ISO_ADDRESS_HI 31:0 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ISO(b) (0x00000670 + (b)*0x00000004) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ISO_ADDRESS_LO 31:4 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET 3:2 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_IOVA (0x00000000) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_PHYSICAL_NVM (0x00000001) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_PHYSICAL_PCI (0x00000002) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ISO_KIND 1:1 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ISO_KIND_PITCH (0x00000000) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ISO_KIND_BLOCKLINEAR (0x00000001) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ISO_ENABLE 0:0 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ISO_ENABLE_DISABLE (0x00000000) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ISO_ENABLE_ENABLE (0x00000001) +#define NVCC7E_SET_SURFACE_ADDRESS_HI_ILUT (0x00000688) +#define NVCC7E_SET_SURFACE_ADDRESS_HI_ILUT_ADDRESS_HI 31:0 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ILUT (0x0000068C) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ILUT_ADDRESS_LO 31:4 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET 3:2 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_IOVA (0x00000000) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_PHYSICAL_NVM (0x00000001) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_PHYSICAL_PCI (0x00000002) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ILUT_ENABLE 0:0 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ILUT_ENABLE_DISABLE (0x00000000) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_ILUT_ENABLE_ENABLE (0x00000001) +#define NVCC7E_SET_SURFACE_ADDRESS_HI_TMO_LUT (0x00000690) +#define NVCC7E_SET_SURFACE_ADDRESS_HI_TMO_LUT_ADDRESS_HI 31:0 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_TMO_LUT (0x00000694) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_TMO_LUT_ADDRESS_LO 31:4 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_TMO_LUT_TARGET 3:2 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_TMO_LUT_TARGET_IOVA (0x00000000) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_TMO_LUT_TARGET_PHYSICAL_NVM (0x00000001) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_TMO_LUT_TARGET_PHYSICAL_PCI (0x00000002) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_TMO_LUT_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_TMO_LUT_ENABLE 0:0 +#define NVCC7E_SET_SURFACE_ADDRESS_LO_TMO_LUT_ENABLE_DISABLE (0x00000000) +#define NVCC7E_SET_SURFACE_ADDRESS_LO_TMO_LUT_ENABLE_ENABLE (0x00000001) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clcc7e_h diff --git a/src/common/sdk/nvidia/inc/cpuopsys.h b/src/common/sdk/nvidia/inc/cpuopsys.h new file mode 100644 index 0000000..230c7b5 --- /dev/null +++ b/src/common/sdk/nvidia/inc/cpuopsys.h @@ -0,0 +1,422 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! \brief + * Define compile time symbols for CPU type and operating system type. + * This file should only contain preprocessor commands so that + * there are no dependencies on other files. + * + * cpuopsys.h + * + * Copyright (c) 2001, Nvidia Corporation. All rights reserved. + */ + +/*! + * Uniform names are defined for compile time options to distinguish + * CPU types and Operating systems. + * Distinctions between CPU and OpSys should be orthogonal. + * + * These uniform names have initially been defined by keying off the + * makefile/build names defined for builds in the OpenGL group. + * Getting the uniform names defined for other builds may require + * different qualifications. + * + * The file is placed here to allow for the possibility of all driver + * components using the same naming convention for conditional compilation. + */ + +#ifndef CPUOPSYS_H +#define CPUOPSYS_H + +/*****************************************************************************/ +/* Define all OS/CPU-Chip related symbols */ + +/* ***** WINDOWS variations */ +#if defined(_WIN32) || defined(_WIN16) +# define NV_WINDOWS + +# if defined(_WIN32_WINNT) +# define NV_WINDOWS_NT +# elif defined(_WIN32_WCE) +# define NV_WINDOWS_CE +# else +# define NV_WINDOWS_9X +# endif +#endif /* _WIN32 || defined(_WIN16) */ + +/* ***** Unix variations */ +#if defined(__linux__) && !defined(NV_LINUX) && !defined(NV_VMWARE) +# define NV_LINUX +#endif /* defined(__linux__) */ + +#if defined(__VMWARE__) && !defined(NV_VMWARE) +# define NV_VMWARE +#endif /* defined(__VMWARE__) */ + +/* SunOS + gcc */ +#if defined(__sun__) && defined(__svr4__) && !defined(NV_SUNOS) +# define NV_SUNOS +#endif /* defined(__sun__) && defined(__svr4__) */ + +/* SunOS + Sun Compiler (named SunPro, Studio or Forte) */ +#if defined(__SUNPRO_C) || defined(__SUNPRO_CC) +# define NV_SUNPRO_C +# define NV_SUNOS +#endif /* defined(_SUNPRO_C) || defined(__SUNPRO_CC) */ + +#if defined(__FreeBSD__) && !defined(NV_BSD) +# define NV_BSD +#endif /* defined(__FreeBSD__) */ + +/* XXXar don't define NV_UNIX on MacOSX or vxworks or QNX */ +#if (defined(__unix__) || defined(__unix) || defined(__INTEGRITY) ) && !defined(nvmacosx) && !defined(vxworks) && !defined(NV_UNIX) && !defined(__QNX__) && !defined(__QNXNTO__)/* XXX until removed from Makefiles */ +# define NV_UNIX +#endif /* defined(__unix__) */ + +#if (defined(__QNX__) || defined(__QNXNTO__)) && !defined(NV_QNX) +# define NV_QNX +#endif + +#if (defined(__ANDROID__) || defined(ANDROID)) && !defined(NV_ANDROID) +# define NV_ANDROID +#endif + +#if defined(DceCore) && !defined(NV_DCECORE) +# define NV_DCECORE +#endif + +/* ***** Apple variations */ +#if defined(macintosh) || defined(__APPLE__) +# define NV_MACINTOSH +# if defined(__MACH__) +# define NV_MACINTOSH_OSX +# else +# define NV_MACINTOSH_OS9 +# endif +# if defined(__LP64__) +# define NV_MACINTOSH_64 +# endif +#endif /* defined(macintosh) */ + +/* ***** VxWorks */ +/* Tornado 2.21 is gcc 2.96 and #defines __vxworks. */ +/* Tornado 2.02 is gcc 2.7.2 and doesn't define any OS symbol, so we rely on */ +/* the build system #defining vxworks. */ +#if defined(__vxworks) || defined(vxworks) +# define NV_VXWORKS +#endif + +/* ***** Integrity OS */ +#if defined(__INTEGRITY) +# if !defined(NV_INTEGRITY) +# define NV_INTEGRITY +# endif +#endif + +/* ***** Processor type variations */ +/* Note: The prefix NV_CPU_* is taken by Nvcm.h */ + +#if ((defined(_M_IX86) || defined(__i386__) || defined(__i386)) && !defined(NVCPU_X86)) /* XXX until removed from Makefiles */ +/* _M_IX86 for windows, __i386__ for Linux (or any x86 using gcc) */ +/* __i386 for Studio compiler on Solaris x86 */ +# define NVCPU_X86 /* any IA32 machine (not x86-64) */ +# define NVCPU_MIN_PAGE_SHIFT 12 +#endif + +#if defined(NV_LINUX) && defined(__ia64__) +# define NVCPU_IA64_LINUX /* any IA64 for Linux opsys */ +#endif +#if defined(NVCPU_IA64_WINDOWS) || defined(NVCPU_IA64_LINUX) || defined(IA64) +# define NVCPU_IA64 /* any IA64 for any opsys */ +#endif + +#if (defined(NV_MACINTOSH) && !(defined(__i386__) || defined(__x86_64__))) || defined(__PPC__) || defined(__ppc) +# if defined(__powerpc64__) && defined(__LITTLE_ENDIAN__) +# ifndef NVCPU_PPC64LE +# define NVCPU_PPC64LE /* PPC 64-bit little endian */ +# endif +# else +# ifndef NVCPU_PPC +# define NVCPU_PPC /* any non-PPC64LE PowerPC architecture */ +# endif +# ifndef NV_BIG_ENDIAN +# define NV_BIG_ENDIAN +# endif +# endif +# define NVCPU_FAMILY_PPC +#endif + +#if defined(__x86_64) || defined(AMD64) || defined(_M_AMD64) +# define NVCPU_X86_64 /* any x86-64 for any opsys */ +#endif + +#if defined(NVCPU_X86) || defined(NVCPU_X86_64) +# define NVCPU_FAMILY_X86 +#endif + +#if defined(__riscv) && (__riscv_xlen==64) +# define NVCPU_RISCV64 +# if defined(__nvriscv) +# define NVCPU_NVRISCV64 +# endif +#endif + +#if defined(__arm__) || defined(_M_ARM) +/* + * 32-bit instruction set on, e.g., ARMv7 or AArch32 execution state + * on ARMv8 + */ +# define NVCPU_ARM +# define NVCPU_MIN_PAGE_SHIFT 12 +#endif + +#if defined(__aarch64__) || defined(__ARM64__) || defined(_M_ARM64) +# define NVCPU_AARCH64 /* 64-bit A64 instruction set on ARMv8 */ +# define NVCPU_MIN_PAGE_SHIFT 12 +#endif + +#if defined(NVCPU_ARM) || defined(NVCPU_AARCH64) +# define NVCPU_FAMILY_ARM +#endif + +#if defined(__SH4__) +# ifndef NVCPU_SH4 +# define NVCPU_SH4 /* Renesas (formerly Hitachi) SH4 */ +# endif +# if defined NV_WINDOWS_CE +# define NVCPU_MIN_PAGE_SHIFT 12 +# endif +#endif + +/* For Xtensa processors */ +#if defined(__XTENSA__) +# define NVCPU_XTENSA +# if defined(__XTENSA_EB__) +# define NV_BIG_ENDIAN +# endif +#endif + + +/* + * Other flavors of CPU type should be determined at run-time. + * For example, an x86 architecture with/without SSE. + * If it can compile, then there's no need for a compile time option. + * For some current GCC limitations, these may be fixed by using the Intel + * compiler for certain files in a Linux build. + */ + +/* The minimum page size can be determined from the minimum page shift */ +#if defined(NVCPU_MIN_PAGE_SHIFT) +#define NVCPU_MIN_PAGE_SIZE (1 << NVCPU_MIN_PAGE_SHIFT) +#endif + +#if defined(NVCPU_IA64) || defined(NVCPU_X86_64) || \ + defined(NV_MACINTOSH_64) || defined(NVCPU_AARCH64) || \ + defined(NVCPU_PPC64LE) || defined(NVCPU_RISCV64) +# define NV_64_BITS /* all architectures where pointers are 64 bits */ +#else +/* we assume 32 bits. I don't see a need for NV_16_BITS. */ +#endif + +/* For verification-only features not intended to be included in normal drivers */ +#if defined(ENABLE_VERIF_FEATURES) +#define NV_VERIF_FEATURES +#endif + +/* + * New, safer family of #define's -- these ones use 0 vs. 1 rather than + * defined/!defined. This is advantageous because if you make a typo, + * say misspelled ENDIAN: + * + * #if NVCPU_IS_BIG_ENDAIN + * + * ...some compilers can give you a warning telling you that you screwed up. + * The compiler can also give you a warning if you forget to #include + * "cpuopsys.h" in your code before the point where you try to use these + * conditionals. + * + * Also, the names have been prefixed in more cases with "CPU" or "OS" for + * increased clarity. You can tell the names apart from the old ones because + * they all use "_IS_" in the name. + * + * Finally, these can be used in "if" statements and not just in #if's. For + * example: + * + * if (NVCPU_IS_BIG_ENDIAN) x = Swap32(x); + * + * Maybe some day in the far-off future these can replace the old #define's. + */ + +#define NV_IS_MODS 0 + +#define NVOS_IS_WINDOWS 0 +#if defined(NV_WINDOWS_CE) +#define NVOS_IS_WINDOWS_CE 1 +#else +#define NVOS_IS_WINDOWS_CE 0 +#endif +#if defined(NV_LINUX) +#define NVOS_IS_LINUX 1 +#else +#define NVOS_IS_LINUX 0 +#endif +#if defined(NV_UNIX) +#define NVOS_IS_UNIX 1 +#else +#define NVOS_IS_UNIX 0 +#endif +#if defined(NV_BSD) +#define NVOS_IS_FREEBSD 1 +#else +#define NVOS_IS_FREEBSD 0 +#endif +#if defined(NV_SUNOS) +#define NVOS_IS_SOLARIS 1 +#else +#define NVOS_IS_SOLARIS 0 +#endif +#define NVOS_IS_VMWARE 0 +#if defined(NV_QNX) +#define NVOS_IS_QNX 1 +#else +#define NVOS_IS_QNX 0 +#endif +#if defined(NV_ANDROID) +#define NVOS_IS_ANDROID 1 +#else +#define NVOS_IS_ANDROID 0 +#endif +#if defined(NV_MACINTOSH) +#define NVOS_IS_MACINTOSH 1 +#else +#define NVOS_IS_MACINTOSH 0 +#endif +#if defined(NV_VXWORKS) +#define NVOS_IS_VXWORKS 1 +#else +#define NVOS_IS_VXWORKS 0 +#endif +#if defined(NV_LIBOS) +#define NVOS_IS_LIBOS 1 +#else +#define NVOS_IS_LIBOS 0 +#endif +#if defined(NV_INTEGRITY) +#define NVOS_IS_INTEGRITY 1 +#else +#define NVOS_IS_INTEGRITY 0 +#endif + +#if defined(NVCPU_X86) +#define NVCPU_IS_X86 1 +#else +#define NVCPU_IS_X86 0 +#endif +#if defined(NVCPU_RISCV64) +#define NVCPU_IS_RISCV64 1 +#else +#define NVCPU_IS_RISCV64 0 +#endif +#if defined(NVCPU_NVRISCV64) +#define NVCPU_IS_NVRISCV64 1 +#else +#define NVCPU_IS_NVRISCV64 0 +#endif +#if defined(NVCPU_IA64) +#define NVCPU_IS_IA64 1 +#else +#define NVCPU_IS_IA64 0 +#endif +#if defined(NVCPU_X86_64) +#define NVCPU_IS_X86_64 1 +#else +#define NVCPU_IS_X86_64 0 +#endif +#if defined(NVCPU_FAMILY_X86) +#define NVCPU_IS_FAMILY_X86 1 +#else +#define NVCPU_IS_FAMILY_X86 0 +#endif +#if defined(NVCPU_PPC) +#define NVCPU_IS_PPC 1 +#else +#define NVCPU_IS_PPC 0 +#endif +#if defined(NVCPU_PPC64LE) +#define NVCPU_IS_PPC64LE 1 +#else +#define NVCPU_IS_PPC64LE 0 +#endif +#if defined(NVCPU_FAMILY_PPC) +#define NVCPU_IS_FAMILY_PPC 1 +#else +#define NVCPU_IS_FAMILY_PPC 0 +#endif +#if defined(NVCPU_ARM) +#define NVCPU_IS_ARM 1 +#else +#define NVCPU_IS_ARM 0 +#endif +#if defined(NVCPU_AARCH64) +#define NVCPU_IS_AARCH64 1 +#else +#define NVCPU_IS_AARCH64 0 +#endif +#if defined(NVCPU_FAMILY_ARM) +#define NVCPU_IS_FAMILY_ARM 1 +#else +#define NVCPU_IS_FAMILY_ARM 0 +#endif +#if defined(NVCPU_SH4) +#define NVCPU_IS_SH4 1 +#else +#define NVCPU_IS_SH4 0 +#endif +#if defined(NVCPU_XTENSA) +#define NVCPU_IS_XTENSA 1 +#else +#define NVCPU_IS_XTENSA 0 +#endif +#if defined(NV_BIG_ENDIAN) +#define NVCPU_IS_BIG_ENDIAN 1 +#else +#define NVCPU_IS_BIG_ENDIAN 0 +#endif +#if defined(NV_64_BITS) +#define NVCPU_IS_64_BITS 1 +#else +#define NVCPU_IS_64_BITS 0 +#endif +#if defined(NVCPU_FAMILY_ARM) +#define NVCPU_IS_PCIE_CACHE_COHERENT 0 +#else +#define NVCPU_IS_PCIE_CACHE_COHERENT 1 +#endif +#if defined(NV_DCECORE) +#define NVOS_IS_DCECORE 1 +#else +#define NVOS_IS_DCECORE 0 +#endif +/*****************************************************************************/ + +#endif /* CPUOPSYS_H */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000base.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000base.h new file mode 100644 index 0000000..6f49405 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000base.h @@ -0,0 +1,68 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000base.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NV01_ROOT (client) control commands and parameters */ + +#define NV0000_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x0000,NV0000_CTRL_##cat,idx) + +/* Client command categories (6bits) */ +#define NV0000_CTRL_RESERVED (0x00) +#define NV0000_CTRL_SYSTEM (0x01) +#define NV0000_CTRL_GPU (0x02) +#define NV0000_CTRL_GSYNC (0x03) +#define NV0000_CTRL_DIAG (0x04) +#define NV0000_CTRL_EVENT (0x05) +#define NV0000_CTRL_NVD (0x06) +#define NV0000_CTRL_SWINSTR (0x07) +#define NV0000_CTRL_PROC (0x09) +#define NV0000_CTRL_SYNC_GPU_BOOST (0x0A) +#define NV0000_CTRL_GPUACCT (0x0B) +#define NV0000_CTRL_VGPU (0x0C) +#define NV0000_CTRL_CLIENT (0x0D) + +// per-OS categories start at highest category and work backwards +#define NV0000_CTRL_OS_WINDOWS (0x3F) +#define NV0000_CTRL_OS_MACOS (0x3E) +#define NV0000_CTRL_OS_UNIX (0x3D) + + +/* + * NV0000_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0000_CTRL_CMD_NULL (0x0) /* finn: Evaluated from "(FINN_NV01_ROOT_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* _ctrl0000_base_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h new file mode 100644 index 0000000..3417f7f --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h @@ -0,0 +1,184 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000client.finn +// + +#include "ctrl/ctrl0000/ctrl0000base.h" + +#include "ctrl/ctrlxxxx.h" +#include "class/cl0000.h" +#include "rs_access.h" + +/* + * NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE + * + * This command may be used to query memory address space type associated with an object + * + * Parameters: + * hObject[IN] + * handle of the object to look up + * addrSpaceType[OUT] + * addrSpaceType with associated memory descriptor + * + * Possible status values are: + * NV_OK + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_OBJECT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE (0xd01) /* finn: Evaluated from "(FINN_NV01_ROOT_CLIENT_INTERFACE_ID << 8) | NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS { + NvHandle hObject; /* [in] - Handle of object to look up */ + NvU32 mapFlags; /* [in] - Flags that will be used when mapping the object */ + NvU32 addrSpaceType; /* [out] - Memory Address Space Type */ +} NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS; + +#define NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_INVALID 0x00000000 +#define NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_SYSMEM 0x00000001 +#define NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM 0x00000002 +#define NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_REGMEM 0x00000003 +#define NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_FABRIC 0x00000004 +#define NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_FABRIC_MC 0x00000005 + +/* + * NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO + * + * This command may be used to query information on a handle + */ +#define NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO (0xd02) /* finn: Evaluated from "(FINN_NV01_ROOT_CLIENT_INTERFACE_ID << 8) | NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS { + NvHandle hObject; /* [in] - Handle of object to look up */ + NvU32 index; /* [in] - Type of lookup */ + + union { + NvHandle hResult; /* [out] - Result of lookup when result is a handle type */ + NV_DECLARE_ALIGNED(NvU64 iResult, 8); /* [out] - Result of lookup when result is a integer */ + } data; +} NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS; + +#define NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO_INDEX_INVALID 0x00000000 +#define NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO_INDEX_PARENT 0x00000001 +#define NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO_INDEX_CLASSID 0x00000002 + +/* + * NV0000_CTRL_CMD_CLIENT_GET_ACCESS_RIGHTS + * + * This command may be used to get this client's access rights for an object + * The object to which access rights are checked does not have to be owned by + * the client calling the command, it is owned by the hClient parameter + */ +#define NV0000_CTRL_CMD_CLIENT_GET_ACCESS_RIGHTS (0xd03) /* finn: Evaluated from "(FINN_NV01_ROOT_CLIENT_INTERFACE_ID << 8) | NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS { + NvHandle hObject; /* [in] - Handle of object to look up */ + NvHandle hClient; /* [in] - Handle of client which owns hObject */ + RS_ACCESS_MASK maskResult; /* [out] - Result of lookup */ +} NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS; + +/* + * NV0000_CTRL_CMD_CLIENT_SET_INHERITED_SHARE_POLICY + * + * DEPRECATED: Calls NV0000_CTRL_CMD_CLIENT_SHARE_OBJECT with hObject=hClient + * + * This command will modify a client's inherited share policy list + * The policy is applied in the same way that NvRmShare applies policies, + * except to the client's inherited policy list instead of an object's policy list + */ +#define NV0000_CTRL_CMD_CLIENT_SET_INHERITED_SHARE_POLICY (0xd04) /* finn: Evaluated from "(FINN_NV01_ROOT_CLIENT_INTERFACE_ID << 8) | NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS { + RS_SHARE_POLICY sharePolicy; /* [in] - Share Policy to apply */ +} NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS; + +/* + * NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE + * + * This command may be used to get a handle of a child of a given type + */ +#define NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE (0xd05) /* finn: Evaluated from "(FINN_NV01_ROOT_CLIENT_INTERFACE_ID << 8) | NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS { + NvHandle hParent; /* [in] - Handle of parent object */ + NvU32 classId; /* [in] - Class ID of the child object */ + NvHandle hObject; /* [out] - Handle of the child object (0 if not found) */ +} NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS; + +/* + * NV0000_CTRL_CMD_CLIENT_SHARE_OBJECT + * + * This command is meant to imitate the NvRmShare API. + * Applies a share policy to an object, which should be owned by the caller's client. + * The policy is applied in the same way that NvRmShare applies policies. + * + * This ctrl command is only meant to be used in older branches. For releases after R450, + * use NvRmShare directly instead. + */ +#define NV0000_CTRL_CMD_CLIENT_SHARE_OBJECT (0xd06) /* finn: Evaluated from "(FINN_NV01_ROOT_CLIENT_INTERFACE_ID << 8) | NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS { + NvHandle hObject; /* [in] - Handle of object to share */ + RS_SHARE_POLICY sharePolicy; /* [in] - Share Policy to apply */ +} NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS; + +/* + * NV0000_CTRL_CMD_CLIENT_OBJECTS_ARE_DUPLICATES + * + * This command returns true if the objects are duplicates. + * + * Currently supported only for memory objects. + */ +#define NV0000_CTRL_CMD_CLIENT_OBJECTS_ARE_DUPLICATES (0xd07) /* finn: Evaluated from "(FINN_NV01_ROOT_CLIENT_INTERFACE_ID << 8) | NV0000_CTRL_CLIENT_OBJECTS_ARE_DUPLICATES_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CLIENT_OBJECTS_ARE_DUPLICATES_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV0000_CTRL_CLIENT_OBJECTS_ARE_DUPLICATES_PARAMS { + NvHandle hObject1; /* [in] - Handle of object to be checked */ + NvHandle hObject2; /* [in] - Handle of object to be checked */ + NvBool bDuplicates; /* [out] - Returns true if duplicates */ +} NV0000_CTRL_CLIENT_OBJECTS_ARE_DUPLICATES_PARAMS; + + + +/* _ctrl0000client_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h new file mode 100644 index 0000000..45f421e --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h @@ -0,0 +1,324 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000diag.finn +// + +#include "ctrl/ctrl0000/ctrl0000base.h" + +#include "ctrl/ctrlxxxx.h" +/* NV01_ROOT (client) system control commands and parameters */ + +/* + * NV0000_CTRL_CMD_DIAG_GET_LOCK_METER_STATE + * + * This command returns the current lock meter logging state. + * + * state + * This parameter returns the current lock meter logging state. + * NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_DISABLED + * This value indicates lock metering is disabled. + * NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_ENABLED + * This value indicates lock metering is enabled. + * count + * This parameter returns the total number of lock metering entries + * (NV0000_CTRL_DIAG_LOCK_METER_ENTRY) available. This value will + * not exceed NV0000_CTRL_DIAG_LOCK_METER_MAX_ENTRIES. When lock metering + * is enabled this parameter will return zero. + * missedCount + * This parameter returns the number of lock metering entries that had + * to be discarded due to a full lock metering table. This value will + * not exceed NV0000_CTRL_DIAG_LOCK_METER_MAX_TABLE_ENTRIES. When lock + * metering is enabled this parameter will return zero. + * bCircularBuffer + * This parameter returns type of buffer. + * TRUE + * Buffer is circular + * FALSE + * Buffer is sequential + * + * Possible status values returned are: + * NV_OK + */ +#define NV0000_CTRL_CMD_DIAG_GET_LOCK_METER_STATE (0x480) /* finn: Evaluated from "(FINN_NV01_ROOT_DIAG_INTERFACE_ID << 8) | NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_PARAMS_MESSAGE_ID (0x80U) + +typedef struct NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_PARAMS { + NvU32 state; + NvU32 count; + NvU32 missedCount; + NvBool bCircularBuffer; +} NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_PARAMS; + +/* valid lock metering state values */ +#define NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_DISABLED (0x00000000) +#define NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_ENABLED (0x00000001) + +/* maximum possible number of lock metering entries stored internally */ +#define NV0000_CTRL_DIAG_LOCK_METER_MAX_TABLE_ENTRIES (0x20000) + +/* + * NV0000_CTRL_CMD_DIAG_SET_LOCK_METER_STATE + * + * This command sets the current lock meter logging state. + * + * state + * This parameter specifies the new state of the lock metering mechanism. + * Legal state values are: + * NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_DISABLE + * This value disables lock metering. + * NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_ENABLE + * This value enables lock metering. + * NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_RESET + * This value resets, or clears, all lock metering state. Lock + * metering must be disabled prior to attempting a reset. + * bCircularBuffer + * This parameter specifies type of buffer. + * Possible values are: + * TRUE + * For circular buffer. + * FALSE + * For sequential buffer. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV0000_CTRL_CMD_DIAG_SET_LOCK_METER_STATE (0x481) /* finn: Evaluated from "(FINN_NV01_ROOT_DIAG_INTERFACE_ID << 8) | NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_PARAMS_MESSAGE_ID (0x81U) + +typedef struct NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_PARAMS { + NvU32 state; + NvBool bCircularBuffer; +} NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_PARAMS; + +/* valid lock metering state values */ +#define NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_DISABLE (0x00000000) +#define NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_ENABLE (0x00000001) +#define NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_RESET (0x00000002) + +/* + * NV0000_CTRL_DIAG_LOCK_METER_ENTRY + * + * This structure represents a single lock meter entry. + * + * counter + * This field contains the number of nanonseconds elapsed since the + * the last system boot when the lock meter entry was generated. + * freq + * This field contains the CPU performance counter frequency in units + * of ticks per second. + * line + * This field contains the relevant line number. + * filename + * This field contains the relevant file name. + * tag + * This field contains a tag uniquely identifying the user of the metered + * lock operations. + * cpuNum + * This field contains the CPU number from which the metered operation + * was initiated. + * irql + * This field contains the IRQL at which the metered operation was + * initiated. + * data0 + * data1 + * data2 + * These fields contain tag-specific data. + */ +#define NV0000_CTRL_DIAG_LOCK_METER_ENTRY_FILENAME_LENGTH (0xc) + +typedef struct NV0000_CTRL_DIAG_LOCK_METER_ENTRY { + NV_DECLARE_ALIGNED(NvU64 counter, 8); + + NvU32 line; + NvU8 filename[NV0000_CTRL_DIAG_LOCK_METER_ENTRY_FILENAME_LENGTH]; + + NvU16 tag; + NvU8 cpuNum; + NvU8 irql; + + NV_DECLARE_ALIGNED(NvU64 threadId, 8); + + NvU32 data0; + NvU32 data1; + NvU32 data2; +} NV0000_CTRL_DIAG_LOCK_METER_ENTRY; + +/* valid lock meter entry tag values */ +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ACQUIRE_SEMA (0x00000001) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ACQUIRE_SEMA_FORCED (0x00000002) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ACQUIRE_SEMA_COND (0x00000003) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_RELEASE_SEMA (0x00000004) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ACQUIRE_API (0x00000010) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_RELEASE_API (0x00000011) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ACQUIRE_GPUS (0x00000020) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_RELEASE_GPUS (0x00000021) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_DATA (0x00000100) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_RMCTRL (0x00001000) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_CFG_GETEX (0x00002002) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_CFG_SETEX (0x00002003) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_VIDHEAP (0x00003000) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_MAPMEM (0x00003001) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_UNMAPMEM (0x00003002) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_MAPMEM_DMA (0x00003003) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_UNMAPMEM_DMA (0x00003004) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ALLOC (0x00004000) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ALLOC_MEM (0x00004001) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_DUP_OBJECT (0x00004010) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_CLIENT (0x00005000) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_DEVICE (0x00005001) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_SUBDEVICE (0x00005002) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_SUBDEVICE_DIAG (0x00005003) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_DISP (0x00005004) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_DISP_CMN (0x00005005) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_CHANNEL (0x00005006) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_CHANNEL_MPEG (0x00005007) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_CHANNEL_DISP (0x00005008) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_MEMORY (0x00005009) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_FBMEM (0x0000500A) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_OBJECT (0x0000500B) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_EVENT (0x0000500C) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_IDLE_CHANNELS (0x00006000) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_BIND_CTXDMA (0x00007000) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ALLOC_CTXDMA (0x00007001) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ISR (0x0000F000) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_DPC (0x0000F00F) + +/* + * NV0000_CTRL_CMD_DIAG_GET_LOCK_METER_ENTRIES + * + * This command returns lock metering data in a fixed-sized array of entries. + * Each request will return up NV0000_CTRL_CMD_DIAG_GET_LOCK_METER_MAX_ENTRIES + * entries. + * + * It is up to the caller to repeat these requests to retrieve the total number + * of entries reported by NV0000_CTRL_CMD_DIAG_GET_LOCK_METER_STATE. + * + * entryCount + * This parameter returns the total number of valid entries returned + * in the entries array. This value will not exceed + * NV0000_CTRL_DIAG_GET_LOCK_METER_ENTRIES_MAX but may be less. + * A value of zero indicates there are no more valid entries. + * entries + * This parameter contains the storage into which lock metering entry + * data is returned. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV0000_CTRL_CMD_DIAG_GET_LOCK_METER_ENTRIES (0x485) /* finn: Evaluated from "(FINN_NV01_ROOT_DIAG_INTERFACE_ID << 8) | NV0000_CTRL_DIAG_GET_LOCK_METER_ENTRIES_PARAMS_MESSAGE_ID" */ + +/* total number of entries returned */ +#define NV0000_CTRL_DIAG_GET_LOCK_METER_ENTRIES_MAX (0x40) + +#define NV0000_CTRL_DIAG_GET_LOCK_METER_ENTRIES_PARAMS_MESSAGE_ID (0x85U) + +typedef struct NV0000_CTRL_DIAG_GET_LOCK_METER_ENTRIES_PARAMS { + NvU32 entryCount; + NV_DECLARE_ALIGNED(NV0000_CTRL_DIAG_LOCK_METER_ENTRY entries[NV0000_CTRL_DIAG_GET_LOCK_METER_ENTRIES_MAX], 8); +} NV0000_CTRL_DIAG_GET_LOCK_METER_ENTRIES_PARAMS; + +/* + * NV0000_CTRL_CMD_DIAG_PROFILE_RPC + * + * This command returns the RPC runtime information, and + * will only return valid when it is running inside VGX mode. + * + * rpcProfileCmd: + * RPC profiler command issued by rpc profiler utility + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_CTRL_CMD_DIAG_PROFILE_RPC (0x488) /* finn: Evaluated from "(FINN_NV01_ROOT_DIAG_INTERFACE_ID << 8) | NV0000_CTRL_DIAG_PROFILE_RPC_PARAMS_MESSAGE_ID" */ + +typedef struct RPC_METER_ENTRY { + NV_DECLARE_ALIGNED(NvU64 startTimeInNs, 8); + NV_DECLARE_ALIGNED(NvU64 endTimeInNs, 8); + NV_DECLARE_ALIGNED(NvU64 rpcDataTag, 8); + NV_DECLARE_ALIGNED(NvU64 rpcExtraData, 8); +} RPC_METER_ENTRY; + +#define NV0000_CTRL_DIAG_PROFILE_RPC_PARAMS_MESSAGE_ID (0x88U) + +typedef struct NV0000_CTRL_DIAG_PROFILE_RPC_PARAMS { + NvU32 rpcProfileCmd; +} NV0000_CTRL_DIAG_PROFILE_RPC_PARAMS; + +#define NV0000_CTRL_PROFILE_RPC_CMD_DISABLE (0x00000000) +#define NV0000_CTRL_PROFILE_RPC_CMD_ENABLE (0x00000001) +#define NV0000_CTRL_PROFILE_RPC_CMD_RESET (0x00000002) + +/* + * NV0000_CTRL_CMD_DIAG_DUMP_RPC + * + * This command returns the RPC runtime information, which + * will be logged by NV0000_CTRL_CMD_DIAG_PROFILE_RPC command + * when running inside VGX mode. + * + * When issuing this command, the RPC profiler has to be disabled. + * + * firstEntryOffset: + * [IN] offset for first entry. + * + * outputEntryCout: + * [OUT] number of entries returned in rpcProfilerBuffer. + * + * remainingEntryCount: + * [OUT] number of entries remaining. + * + * elapsedTimeInNs: + * [OUT] runtime for the RPC profiler tool. + * + * rpcProfilerBuffer: + * [OUT] buffer to store the RPC entries + */ + +#define NV0000_CTRL_CMD_DIAG_DUMP_RPC (0x489) /* finn: Evaluated from "(FINN_NV01_ROOT_DIAG_INTERFACE_ID << 8) | NV0000_CTRL_DIAG_DUMP_RPC_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_DIAG_RPC_MAX_ENTRIES (100) + +#define NV0000_CTRL_DIAG_DUMP_RPC_PARAMS_MESSAGE_ID (0x89U) + +typedef struct NV0000_CTRL_DIAG_DUMP_RPC_PARAMS { + NvU32 firstEntryOffset; + NvU32 outputEntryCount; + NvU32 remainingEntryCount; + NV_DECLARE_ALIGNED(NvU64 elapsedTimeInNs, 8); + NV_DECLARE_ALIGNED(RPC_METER_ENTRY rpcProfilerBuffer[NV0000_CTRL_DIAG_RPC_MAX_ENTRIES], 8); +} NV0000_CTRL_DIAG_DUMP_RPC_PARAMS; + +/* _ctrl0000diag_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000event.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000event.h new file mode 100644 index 0000000..ded1265 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000event.h @@ -0,0 +1,145 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000event.finn +// + +#include "ctrl/ctrl0000/ctrl0000base.h" + +#include "ctrl/ctrlxxxx.h" +#include "class/cl0000.h" + +#define NV0000_NOTIFIERS_DISPLAY_CHANGE (0) +#define NV0000_NOTIFIERS_VGPU_UNBIND_EVENT (1) +#define NV0000_NOTIFIERS_VGPU_BIND_EVENT (2) +#define NV0000_NOTIFIERS_GPU_BIND_UNBIND_EVENT (3) +#define NV0000_NOTIFIERS_MAXCOUNT (4) + +/* + * NV0000_CTRL_CMD_EVENT_SET_NOTIFICATION + * + * This command sets event notification for the system events. + * + * event + * This parameter specifies the type of event to which the specified + * action is to be applied. The valid event values can be found in + * cl0000.h. + * + * action + * This parameter specifies the desired event notification action. + * Valid notification actions include: + * NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE + * This action disables event notification for the specified + * event. + * NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE + * This action enables single-shot event notification for the + * specified event. + * NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT + * This action enables repeated event notification for the + * specified event. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_CLIENT + * + */ + +#define NV0000_CTRL_CMD_EVENT_SET_NOTIFICATION (0x501) /* finn: Evaluated from "(FINN_NV01_ROOT_EVENT_INTERFACE_ID << 8) | NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS { + NvU32 event; + NvU32 action; +} NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS; + +/* valid action values */ +#define NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE (0x00000000) +#define NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE (0x00000001) +#define NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002) + +typedef struct NV0000_CTRL_SYSTEM_EVENT_DATA_DISPLAY_CHANGE { + NvU32 deviceMask; +} NV0000_CTRL_SYSTEM_EVENT_DATA_DISPLAY_CHANGE; + +typedef struct NV0000_CTRL_SYSTEM_EVENT_DATA_VGPU_UNBIND { + NvU32 gpuId; +} NV0000_CTRL_SYSTEM_EVENT_DATA_VGPU_UNBIND; + +typedef struct NV0000_CTRL_SYSTEM_EVENT_DATA_VGPU_BIND { + NvU32 gpuId; +} NV0000_CTRL_SYSTEM_EVENT_DATA_VGPU_BIND; + +typedef struct NV0000_CTRL_SYSTEM_EVENT_DATA_GPU_BIND_UNBIND { + NvU32 gpuId; + NvBool bBind; +} NV0000_CTRL_SYSTEM_EVENT_DATA_GPU_BIND_UNBIND; + +/* + * NV0000_CTRL_CMD_GET_SYSTEM_EVENT_DATA + * + * This command reads the client's event data queue info FIFO order. + * See the description of NV01_EVENT for details on registering events. + * + * event + * Output only as selective event data query is not supported yet. + * Event type. + * Valid event type values can be found in this header file. + * + * data + * Data associated with the event. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND when system event queue is empty + * + */ + +#define NV0000_CTRL_CMD_GET_SYSTEM_EVENT_DATA (0x502) /* finn: Evaluated from "(FINN_NV01_ROOT_EVENT_INTERFACE_ID << 8) | NV0000_CTRL_GET_SYSTEM_EVENT_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GET_SYSTEM_EVENT_DATA_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_GET_SYSTEM_EVENT_DATA_PARAMS { + NvU32 event; + + union { + NV0000_CTRL_SYSTEM_EVENT_DATA_DISPLAY_CHANGE display; + NV0000_CTRL_SYSTEM_EVENT_DATA_VGPU_UNBIND vgpuUnbind; + NV0000_CTRL_SYSTEM_EVENT_DATA_VGPU_BIND vgpuBind; + NV0000_CTRL_SYSTEM_EVENT_DATA_GPU_BIND_UNBIND gpuBindUnbind; + } data; +} NV0000_CTRL_GET_SYSTEM_EVENT_DATA_PARAMS; + +/* _ctrl0000event_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h new file mode 100644 index 0000000..1a684e8 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h @@ -0,0 +1,1129 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000gpu.finn +// + +#include "ctrl/ctrl0000/ctrl0000base.h" +#include "ctrl/ctrlxxxx.h" +#include "ctrl/ctrl2080/ctrl2080nvlink_common.h" +#include "nvlimits.h" + +/* NV01_ROOT (client) GPU control commands and parameters */ + +typedef NV2080_CTRL_NVLINK_LINK_MASK NV0000_CTRL_NVLINK_LINK_MASK; + +/* + * NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS + * + * This command returns a table of attached gpuId values. + * The table is NV0000_CTRL_GPU_MAX_ATTACHED_GPUS entries in size. + * + * gpuIds[] + * This parameter returns the table of attached GPU IDs. + * The GPU ID is an opaque platform-dependent value that can be used + * with the NV0000_CTRL_CMD_GPU_GET_ID_INFO command to retrieve + * additional information about the GPU. The valid entries in gpuIds[] + * are contiguous, with a value of NV0000_CTRL_GPU_INVALID_ID indicating + * the invalid entries. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS (0x201U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_MAX_ATTACHED_GPUS 32U +#define NV0000_CTRL_GPU_INVALID_ID (0xffffffffU) + +#define NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS { + NvU32 gpuIds[NV0000_CTRL_GPU_MAX_ATTACHED_GPUS]; +} NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS; + +/* + * Deprecated. Please use NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2 instead. + */ +#define NV0000_CTRL_CMD_GPU_GET_ID_INFO (0x202U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_ID_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_MAX_SZNAME 128U + +#define NV0000_CTRL_NO_NUMA_NODE (-1) + +#define NV0000_CTRL_GPU_GET_ID_INFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_GPU_GET_ID_INFO_PARAMS { + NvU32 gpuId; + NvU32 gpuFlags; + NvU32 deviceInstance; + NvU32 subDeviceInstance; + NV_DECLARE_ALIGNED(NvP64 szName, 8); + NvU32 sliStatus; + NvU32 boardId; + NvU32 gpuInstance; + NvS32 numaId; +} NV0000_CTRL_GPU_GET_ID_INFO_PARAMS; + +#define NV0000_CTRL_SLI_STATUS_OK (0x00000000U) +#define NV0000_CTRL_SLI_STATUS_OS_NOT_SUPPORTED (0x00000002U) +#define NV0000_CTRL_SLI_STATUS_GPU_NOT_SUPPORTED (0x00000040U) +#define NV0000_CTRL_SLI_STATUS_INVALID_GPU_COUNT (0x00000001U) + +/* + * NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2 + * This command returns GPU instance information for the specified GPU. + * + * [in] gpuId + * This parameter should specify a valid GPU ID value. If there + * is no GPU present with the specified ID, a status of + * NV_ERR_INVALID_ARGUMENT is returned. + * [out] gpuFlags + * This parameter returns various flags values for the specified GPU. + * Valid flag values include: + * NV0000_CTRL_GPU_ID_INFO_IN_USE + * When true this flag indicates there are client references + * to the GPU in the form of device class instantiations (see + * NV01_DEVICE or NV03_DEVICE descriptions for details). + * NV0000_CTRL_GPU_ID_INFO_LINKED_INTO_SLI_DEVICE + * When true this flag indicates the GPU is linked into an + * active SLI device. + * NV0000_CTRL_GPU_ID_INFO_MOBILE + * When true this flag indicates the GPU is a mobile GPU. + * NV0000_CTRL_GPU_ID_BOOT_MASTER + * When true this flag indicates the GPU is the boot master GPU. + * NV0000_CTRL_GPU_ID_INFO_SOC + * When true this flag indicates the GPU is part of a + * System-on-Chip (SOC). + * NV0000_CTRL_GPU_ID_INFO_ATS_ENABLED + * When ATS is enabled on the system. + * NV0000_CTRL_GPU_ID_INFO_SOC_TYPE + * This field indicates the GPU type for SOC-based GPUs. Legal values + * for this field include: + * NV0000_CTRL_GPU_ID_INFO_SOC_TYPE_NONE + * This value indicates the GPU is not an SOC GPU. + * NV0000_CTRL_GPU_ID_INFO_SOC_TYPE_DISPLAY + * This value indicates the GPU is an SOC display GPU. + * NV0000_CTRL_GPU_ID_INFO_SOC_TYPE_IGPU + * This value indicates the GPU is an iGPU. + * NV0000_CTRL_GPU_ID_INFO_SOC_TYPE_DISPLAY_AND_IGPU + * This value indicates the GPU is both an iGPU and an SOC + * display GPU. + * [out] deviceInstance + * This parameter returns the broadcast device instance number associated + * with the specified GPU. This value can be used to instantiate + * a broadcast reference to the GPU using the NV01_DEVICE classes. + * [out] subDeviceInstance + * This parameter returns the unicast subdevice instance number + * associated with the specified GPU. This value can be used to + * instantiate a unicast reference to the GPU using the NV20_SUBDEVICE + * classes. + * [out] sliStatus + * This parameters returns the SLI status for the specified GPU. + * Legal values for this member are described by NV0000_CTRL_SLI_STATUS. + * [out] boardId + * This parameter returns the board ID value with which the + * specified GPU is associated. Multiple GPUs can share the + * same board ID in multi-GPU configurations. + * [out] gpuInstance + * This parameter returns the GPU instance number for the specified GPU. + * GPU instance numbers are assigned in bus-probe order beginning with + * zero and are limited to one less the number of GPUs in the system. + * [out] numaId + * This parameter returns the ID of NUMA node for the specified GPU or + * the subscribed MIG partition when MIG is enabled. + * In case there is no NUMA node, NV0000_CTRL_NO_NUMA_NODE is returned. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + + + +#define NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2 (0x205U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS { + NvU32 gpuId; + NvU32 gpuFlags; + NvU32 deviceInstance; + NvU32 subDeviceInstance; + NvU32 sliStatus; + NvU32 boardId; + NvU32 gpuInstance; + NvS32 numaId; +} NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS; + + +/* valid flags values */ +#define NV0000_CTRL_GPU_ID_INFO_IN_USE 0:0 +#define NV0000_CTRL_GPU_ID_INFO_IN_USE_FALSE (0x00000000U) +#define NV0000_CTRL_GPU_ID_INFO_IN_USE_TRUE (0x00000001U) +#define NV0000_CTRL_GPU_ID_INFO_LINKED_INTO_SLI_DEVICE 1:1 +#define NV0000_CTRL_GPU_ID_INFO_LINKED_INTO_SLI_DEVICE_FALSE (0x00000000U) +#define NV0000_CTRL_GPU_ID_INFO_LINKED_INTO_SLI_DEVICE_TRUE (0x00000001U) +#define NV0000_CTRL_GPU_ID_INFO_MOBILE 2:2 +#define NV0000_CTRL_GPU_ID_INFO_MOBILE_FALSE (0x00000000U) +#define NV0000_CTRL_GPU_ID_INFO_MOBILE_TRUE (0x00000001U) +#define NV0000_CTRL_GPU_ID_INFO_BOOT_MASTER 3:3 +#define NV0000_CTRL_GPU_ID_INFO_BOOT_MASTER_FALSE (0x00000000U) +#define NV0000_CTRL_GPU_ID_INFO_BOOT_MASTER_TRUE (0x00000001U) + + +#define NV0000_CTRL_GPU_ID_INFO_SOC 5:5 +#define NV0000_CTRL_GPU_ID_INFO_SOC_FALSE (0x00000000U) +#define NV0000_CTRL_GPU_ID_INFO_SOC_TRUE (0x00000001U) +#define NV0000_CTRL_GPU_ID_INFO_ATS_ENABLED 6:6 +#define NV0000_CTRL_GPU_ID_INFO_ATS_ENABLED_FALSE (0x00000000U) +#define NV0000_CTRL_GPU_ID_INFO_ATS_ENABLED_TRUE (0x00000001U) +#define NV0000_CTRL_GPU_ID_INFO_SOC_TYPE 8:7 +#define NV0000_CTRL_GPU_ID_INFO_SOC_TYPE_NONE (0x00000000U) +#define NV0000_CTRL_GPU_ID_INFO_SOC_TYPE_DISPLAY (0x00000001U) +#define NV0000_CTRL_GPU_ID_INFO_SOC_TYPE_IGPU (0x00000002U) +#define NV0000_CTRL_GPU_ID_INFO_SOC_TYPE_DISPLAY_AND_IGPU (0x00000003U) + +/* + * NV0000_CTRL_CMD_GPU_GET_INIT_STATUS + * + * This command returns the initialization status for the specified GPU, and + * will return NV_ERR_INVALID_STATE if called prior to GPU + * initialization. + * + * gpuId + * This parameter should specify a valid GPU ID value. If there + * is no GPU present with the specified ID, a status of + * NV_ERR_INVALID_ARGUMENT is returned. + * status + * This parameter returns the status code identifying the initialization + * state of the GPU. If this parameter has the value NV_OK, + * then no errors were detected during GPU initialization. Otherwise, this + * parameter specifies the top-level error that was detected during GPU + * initialization. Note that a value of NV_OK only means that + * no errors were detected during the actual GPU initialization, and other + * errors may have occurred that prevent the GPU from being attached or + * accessible via the NV01_DEVICE or NV20_SUBDEVICE classes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV0000_CTRL_CMD_GPU_GET_INIT_STATUS (0x203U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS { + NvU32 gpuId; + NvU32 status; +} NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_GET_DEVICE_IDS + * + * This command returns a mask of valid device IDs. These device IDs + * can be used to instantiate the NV01_DEVICE_0 class (see NV01_DEVICE_0 + * for more information). + * + * deviceIds + * This parameter returns the mask of valid device IDs. Each enabled bit + * in the mask corresponds to a valid device instance. Valid device + * instances can be used to initialize the NV0080_ALLOC_PARAMETERS + * structure when using NvRmAlloc to instantiate device handles. The + * number of device IDs will not exceed NV_MAX_DEVICES in number. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0000_CTRL_CMD_GPU_GET_DEVICE_IDS (0x204U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS { + NvU32 deviceIds; +} NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS; + + + +/* + * NV0000_CTRL_CMD_GPU_GET_PROBED_IDS + * + * This command returns a table of probed gpuId values. + * The table is NV0000_CTRL_GPU_MAX_PROBED_GPUS entries in size. + * + * gpuIds[] + * This parameter returns the table of probed GPU IDs. + * The GPU ID is an opaque platform-dependent value that can + * be used with the NV0000_CTRL_CMD_GPU_ATTACH_IDS and + * NV0000_CTRL_CMD_GPU_DETACH_ID commands to attach and detach + * the GPU. + * The valid entries in gpuIds[] are contiguous, with a value + * of NV0000_CTRL_GPU_INVALID_ID indicating the invalid entries. + * excludedGpuIds[] + * This parameter returns the table of excluded GPU IDs. + * An excluded GPU ID is an opaque platform-dependent value that + * can be used with NV0000_CTRL_CMD_GPU_GET_PCI_INFO and + * NV0000_CTRL_CMD_GPU_GET_UUID_INFO. + * The valid entries in excludedGpuIds[] are contiguous, with a value + * of NV0000_CTRL_GPU_INVALID_ID indicating the invalid entries. + * gpuFlags[] + * This parameter returns flags for each valid entry in the gpuIds[] + * table. Note that excluded GPUs do not have a gpuFlags[] entry. + * Valid flag values include: + * NV0000_CTRL_GPU_PROBED_ID_INFO_FLAGS_SOC_DISPLAY + * When TRUE this flag indicates the GPU supports SOC Display + * functionality. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0000_CTRL_CMD_GPU_GET_PROBED_IDS (0x214U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_MAX_PROBED_GPUS NV_MAX_DEVICES + +#define NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS { + NvU32 gpuIds[NV0000_CTRL_GPU_MAX_PROBED_GPUS]; + NvU32 excludedGpuIds[NV0000_CTRL_GPU_MAX_PROBED_GPUS]; + NvU32 gpuFlags[NV0000_CTRL_GPU_MAX_PROBED_GPUS]; +} NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS; + +/* valid flags values */ +#define NV0000_CTRL_GPU_PROBED_ID_FLAGS_SOC_DISPLAY 0:0 +#define NV0000_CTRL_GPU_PROBED_ID_FLAGS_SOC_DISPLAY_FALSE (0x00000000U) +#define NV0000_CTRL_GPU_PROBED_ID_FLAGS_SOC_DISPLAY_TRUE (0x00000001U) + +/* + * NV0000_CTRL_CMD_GPU_GET_PCI_INFO + * + * This command takes a gpuId and returns PCI bus information about + * the device. If the OS does not support returning PCI bus + * information, this call will return NV_ERR_NOT_SUPPORTED + * + * gpuId + * This parameter should specify a valid GPU ID value. If there + * is no GPU present with the specified ID, a status of + * NV_ERR_INVALID_ARGUMENT is returned. + * + * domain + * This parameter returns the PCI domain of the GPU. + * + * bus + * This parameter returns the PCI bus of the GPU. + * + * slot + * This parameter returns the PCI slot of the GPU. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0000_CTRL_CMD_GPU_GET_PCI_INFO (0x21bU) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS_MESSAGE_ID (0x1BU) + +typedef struct NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS { + NvU32 gpuId; + NvU32 domain; + NvU16 bus; + NvU16 slot; +} NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_ATTACH_IDS + * + * This command attaches the GPUs with the gpuIds matching those in + * the table provided by the client. + * The table is NV0000_CTRL_GPU_MAX_PROBED_GPUS entries in size. + * + * gpuIds[] + * This parameter holds the table of gpuIds to attach. At least + * one gpuId must be specified; clients may use the special + * gpuId value NV0000_CTRL_GPU_ATTACH_ALL_PROBED_IDS to indicate + * that all probed GPUs are to be attached. + * The entries in gpuIds[] must be contiguous, with a value of + * NV0000_CTRL_GPU_INVALID_ID to indicate the first invalid + * entry. + * If one or more of the gpuId values do not specify a GPU found + * in the system, the NV_ERR_INVALID_ARGUMENT error + * status is returned. + * + * failedId + * If NV0000_CTRL_GPU_ATTACH_ALL_PROBED_IDS is specified and + * a GPU cannot be attached, the NV0000_CTRL_CMD_GPU_ATTACH_IDS + * command returns an error code and saves the failing GPU's + * gpuId in this field. + * + * If a table of gpuIds is provided, these gpuIds will be validated + * against the RM's table of probed gpuIds and attached in turn, + * if valid; if NV0000_CTRL_GPU_ATTACH_ALL_PROBED_IDS is used, all + * probed gpuIds will be attached, in the order the associated GPUs + * were probed in by the RM. + * + * If a gpuId fails to attach, this gpuId is stored in the failedId + * field. Any GPUs attached by the command prior the failure are + * detached. + * + * If multiple clients use NV0000_CTRL_CMD_GPU_ATTACH_IDS to attach + * a gpuId, the RM ensures that the gpuId won't be detached until + * all clients have issued a call to NV0000_CTRL_CMD_GPU_DETACH_IDS + * to detach the gpuId (or have terminated). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OPERATING_SYSTEM + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_IRQ_EDGE_TRIGGERED + * NV_ERR_IRQ_NOT_FIRING + */ +#define NV0000_CTRL_CMD_GPU_ATTACH_IDS (0x215U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_ATTACH_IDS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_ATTACH_ALL_PROBED_IDS (0x0000ffffU) + +#define NV0000_CTRL_GPU_ATTACH_IDS_PARAMS_MESSAGE_ID (0x15U) + +typedef struct NV0000_CTRL_GPU_ATTACH_IDS_PARAMS { + NvU32 gpuIds[NV0000_CTRL_GPU_MAX_PROBED_GPUS]; + NvU32 failedId; +} NV0000_CTRL_GPU_ATTACH_IDS_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_DETACH_IDS + * + * This command detaches the GPUs with the gpuIds matching those in + * the table provided by the client. + * The table is NV0000_CTRL_GPU_MAX_ATTACHED_GPUS entries in size. + * + * gpuIds[] + * This parameter holds the table of gpuIds to detach. At least + * one gpuId must be specified; clients may use the special + * gpuId NV0000_CTRL_GPU_DETACH_ALL_ATTACHED_IDS to indicate that + * all attached GPUs are to be detached. + * The entries in gpuIds[] must be contiguous, with a value of + * NV0000_CTRL_GPU_INVALID_ID to indicate the first invalid + * entry. + * If one or more of the gpuId values do not specify a GPU found + * in the system, the NV_ERR_INVALID_ARGUMENT error + * status is returned. + * + * If a table of gpuIds is provided, these gpuIds will be validated + * against the RM's list of attached gpuIds; each valid gpuId is + * detached immediately if it's no longer in use (i.e. if there are + * no client references to the associated GPU in the form of + * device class instantiations (see the NV01_DEVICE or NV03_DEVICE + * descriptions for details)) and if no other client still requires + * the associated GPU to be attached. + * + * If a given gpuId can't be detached immediately, it will instead + * be detached when the last client reference is freed or when + * the last client that issued NV0000_CTRL_CMD_GPU_ATTACH_IDS for + * this gpuId either issues NV0000_CTRL_CMD_GPU_DETACH_IDS or exits + * without detaching the gpuId explicitly. + * + * Clients may use the NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS command + * to obtain a table of the attached gpuIds. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OPERATING_SYSTEM + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0000_CTRL_CMD_GPU_DETACH_IDS (0x216U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_DETACH_IDS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_DETACH_ALL_ATTACHED_IDS (0x0000ffffU) + +#define NV0000_CTRL_GPU_DETACH_IDS_PARAMS_MESSAGE_ID (0x16U) + +typedef struct NV0000_CTRL_GPU_DETACH_IDS_PARAMS { + NvU32 gpuIds[NV0000_CTRL_GPU_MAX_ATTACHED_GPUS]; +} NV0000_CTRL_GPU_DETACH_IDS_PARAMS; + + + +/* + * NV0000_CTRL_CMD_GPU_GET_VIDEO_LINKS + * + * This command returns information about video bridge connections + * detected between GPUs in the system, organized as a table + * with one row per attached GPU and none, one or more peer GPUs + * listed in the columns of each row, if connected to the row head + * GPU via a video bridge. + * + * gpuId + * For each row, this field holds the GPU ID of the GPU + * whose connections are listed in the row. + * + * connectedGpuIds + * For each row, this table holds the GPU IDs of the + * GPUs connected to the GPU identified via the 'gpuId' + * field. + * + * links + * This table holds information about the video bridges + * connected between GPUs in the system. Each row + * represents connections to a single GPU. + * + * Please note: the table only reports video links between already + * attached GPUs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_CTRL_CMD_GPU_GET_VIDEO_LINKS (0x219U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_VIDEO_LINKS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_MAX_VIDEO_LINKS 8U + +typedef struct NV0000_CTRL_GPU_VIDEO_LINKS { + NvU32 gpuId; + NvU32 connectedGpuIds[NV0000_CTRL_GPU_MAX_VIDEO_LINKS]; +} NV0000_CTRL_GPU_VIDEO_LINKS; + +#define NV0000_CTRL_GPU_GET_VIDEO_LINKS_PARAMS_MESSAGE_ID (0x19U) + +typedef struct NV0000_CTRL_GPU_GET_VIDEO_LINKS_PARAMS { + NV0000_CTRL_GPU_VIDEO_LINKS links[NV0000_CTRL_GPU_MAX_ATTACHED_GPUS]; +} NV0000_CTRL_GPU_GET_VIDEO_LINKS_PARAMS; + + + +/* + * NV0000_CTRL_CMD_GPU_GET_UUID_INFO + * + * This command returns requested information pertaining to the GPU + * specified by the GPU UUID passed in. + * + * Generally only GPUs that have been attached are visible to this call. Therefore + * queries on unattached GPUs will fail with NV_ERR_OBJECT_NOT_FOUND. However, + * a query for a SHA1 UUID may succeed for an unattached GPU in cases where the GID + * is cached, such as an excluded GPU. + * + * gpuGuid (INPUT) + * The GPU UUID of the gpu whose parameters are to be returned. Refer to + * NV0000_CTRL_CMD_GPU_GET_ID_INFO for more information. + * + * flags (INPUT) + * The _FORMAT* flags designate ascii string format or a binary format. + * + * The _TYPE* flags designate either SHA-1-based (32-hex-character) or + * SHA-256-based (64-hex-character). + * + * gpuId (OUTPUT) + * The GPU ID of the GPU identified by gpuGuid. Refer to + * NV0000_CTRL_CMD_GPU_GET_ID_INFO for more information. + * + * deviceInstance (OUTPUT) + * The device instance of the GPU identified by gpuGuid. Refer to + * NV0000_CTRL_CMD_GPU_GET_ID_INFO for more information. + * + * subdeviceInstance (OUTPUT) + * The subdevice instance of the GPU identified by gpuGuid. Refer to + * NV0000_CTRL_CMD_GPU_GET_ID_INFO for more information. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + * + */ +#define NV0000_CTRL_CMD_GPU_GET_UUID_INFO (0x274U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS_MESSAGE_ID" */ + +/* maximum possible number of bytes of GID information */ +#define NV0000_GPU_MAX_GID_LENGTH (0x00000100U) + +#define NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS_MESSAGE_ID (0x74U) + +typedef struct NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS { + NvU8 gpuUuid[NV0000_GPU_MAX_GID_LENGTH]; + NvU32 flags; + NvU32 gpuId; + NvU32 deviceInstance; + NvU32 subdeviceInstance; +} NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS; + +#define NV0000_CTRL_CMD_GPU_GET_UUID_INFO_FLAGS_FORMAT 1:0 +#define NV0000_CTRL_CMD_GPU_GET_UUID_INFO_FLAGS_FORMAT_ASCII (0x00000000U) +#define NV0000_CTRL_CMD_GPU_GET_UUID_INFO_FLAGS_FORMAT_BINARY (0x00000002U) + +#define NV0000_CTRL_CMD_GPU_GET_UUID_INFO_FLAGS_TYPE 2:2 +#define NV0000_CTRL_CMD_GPU_GET_UUID_INFO_FLAGS_TYPE_SHA1 (0x00000000U) +#define NV0000_CTRL_CMD_GPU_GET_UUID_INFO_FLAGS_TYPE_SHA256 (0x00000001U) + +/* + * NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID + * + * This command returns the GPU UUID for the provided GPU ID. + * Note that only GPUs that have been attached are visible to this call. + * Therefore queries on unattached GPUs will fail + * with NV_ERR_OBJECT_NOT_FOUND. + * + * gpuId (INPUT) + * The GPU ID whose parameters are to be returned. Refer to + * NV0000_CTRL_CMD_GPU_GET_ID_INFO for more information. + * + * flags (INPUT) + * + * NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_FORMAT_ASCII + * This value is used to request the GPU UUID in ASCII format. + * NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_FORMAT_BINARY + * This value is used to request the GPU UUID in binary format. + * + * NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_TYPE_SHA1 + * This value is used to request that the GPU UUID value + * be SHA1-based (32-hex-character). + * NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_TYPE_SHA256 + * This value is used to request that the GPU UUID value + * be SHA256-based (64-hex-character). + * + * gpuUuid[NV0000_GPU_MAX_GID_LENGTH] (OUTPUT) + * The GPU UUID of the GPU identified by GPU ID. Refer to + * NV0000_CTRL_CMD_GPU_GET_ID_INFO for more information. + * + * uuidStrLen (OUTPUT) + * The length of the UUID returned which is related to the format that + * was requested using flags. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_OPERATING_SYSTEM + */ +#define NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID (0x275U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS_MESSAGE_ID (0x75U) + +typedef struct NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS { + NvU32 gpuId; + NvU32 flags; + NvU8 gpuUuid[NV0000_GPU_MAX_GID_LENGTH]; + NvU32 uuidStrLen; +} NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS; + +/* valid format values */ +#define NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_FORMAT 1:0 +#define NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_FORMAT_ASCII (0x00000000U) +#define NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_FORMAT_BINARY (0x00000002U) + +/*valid type values*/ +#define NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_TYPE 2:2 +#define NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_TYPE_SHA1 (0x00000000U) +#define NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_TYPE_SHA256 (0x00000001U) + + + +/* + * NV0000_CTRL_CMD_GPU_MODIFY_DRAIN_STATE + * + * This command is used to enter or exit the so called "drain" state. + * When this state is enabled, the existing clients continue executing + * as usual, however no new client connections are allowed. + * This is done in order to "drain" the system of the running clients + * in preparation to selectively powering down the GPU. + * No GPU can enter a bleed state if that GPU is in an SLI group. + * In that case, NV_ERR_IN_USE is returned. + * Requires administrator privileges. + * + * It is expected, that the "drain" state will be eventually deprecated + * and replaced with another mechanism to quiesce a GPU (Bug 1718113). + * + * gpuId (INPUT) + * This parameter should specify a valid GPU ID value. If there + * is no GPU present with the specified ID, a status of + * NV_ERR_INVALID_ARGUMENT is returned. + * newState (INPUT) + * This input parameter is used to enter or exit the "drain" + * software state of the GPU specified by the gpuId parameter. + * Possible values are: + * NV0000_CTRL_GPU_DRAIN_STATE_ENABLED + * NV0000_CTRL_GPU_DRAIN_STATE_DISABLED + * flags (INPUT) + * NV0000_CTRL_GPU_DRAIN_STATE_FLAG_REMOVE_DEVICE + * if set, upon reaching quiescence, a request will be made to + * the OS to "forget" the PCI device associated with the + * GPU specified by the gpuId parameter, in case such a request + * is supported by the OS. Otherwise, NV_ERR_NOT_SUPPORTED + * will be returned. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_IN_USE + */ + +#define NV0000_CTRL_CMD_GPU_MODIFY_DRAIN_STATE (0x278U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS_MESSAGE_ID" */ + +/* Possible values of newState */ +#define NV0000_CTRL_GPU_DRAIN_STATE_DISABLED (0x00000000U) +#define NV0000_CTRL_GPU_DRAIN_STATE_ENABLED (0x00000001U) + +/* Defined bits for the "flags" argument */ +#define NV0000_CTRL_GPU_DRAIN_STATE_FLAG_REMOVE_DEVICE (0x00000001U) +#define NV0000_CTRL_GPU_DRAIN_STATE_FLAG_LINK_DISABLE (0x00000002U) + +#define NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS_MESSAGE_ID (0x78U) + +typedef struct NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS { + NvU32 gpuId; + NvU32 newState; + NvU32 flags; +} NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_QUERY_DRAIN_STATE + * + * gpuId (INPUT) + * This parameter should specify a valid GPU ID value. If there + * is no GPU present with the specified ID, a status of + * NVOS_STATUS_ERROR_INVALID_ARGUMENT is returned. + * drainState (OUTPUT) + * This parameter returns a value indicating if the "drain" + * state is currently enabled or not for the specified GPU. See the + * description of NV0000_CTRL_CMD_GPU_MODIFY_DRAIN_STATE. + * Possible values are: + * NV0000_CTRL_GPU_DRAIN_STATE_ENABLED + * NV0000_CTRL_GPU_DRAIN_STATE_DISABLED + * flags (OUTPUT) + * NV0000_CTRL_GPU_DRAIN_STATE_FLAG_REMOVE_DEVICE + * if set, upon reaching quiesence, the GPU device will be + * removed automatically from the kernel space, similar + * to what writing "1" to the sysfs "remove" node does. + * NV0000_CTRL_GPU_DRAIN_STATE_FLAG_LINK_DISABLE + * after removing the GPU, also disable the parent bridge's + * PCIe link. This flag can only be set in conjunction with + * NV0000_CTRL_GPU_DRAIN_STATE_FLAG_REMOVE_DEVICE, and then + * only when the GPU is already idle (not attached). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_CTRL_CMD_GPU_QUERY_DRAIN_STATE (0x279U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS_MESSAGE_ID (0x79U) + +typedef struct NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS { + NvU32 gpuId; + NvU32 drainState; + NvU32 flags; +} NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_DISCOVER + * + * This request asks the OS to scan the PCI tree or a sub-tree for GPUs, + * that are not yet known to the OS, and to make them available for use. + * If all of domain:bus:slot.function are zeros, the entire tree is scanned, + * otherwise the parameters identify the bridge device, that roots the + * subtree to be scanned. + * Requires administrator privileges. + * + * domain (INPUT) + * PCI domain of the bridge + * bus (INPUT) + * PCI bus of the bridge + * slot (INPUT) + * PCI slot of the bridge + * function (INPUT) + * PCI function of the bridge + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_DEVICE + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_OPERATING_SYSTEM + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0000_CTRL_CMD_GPU_DISCOVER (0x27aU) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | 0x7A" */ + +typedef struct NV0000_CTRL_GPU_DISCOVER_PARAMS { + NvU32 domain; + NvU8 bus; + NvU8 slot; + NvU8 function; +} NV0000_CTRL_GPU_DISCOVER_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_GET_MEMOP_ENABLE + * + * This command is used to get the content of the MemOp (CUDA Memory Operation) + * enablement mask, which can be overridden by using the MemOpOverride RegKey. + * + * The enableMask member must be treated as a bitmask, where each bit controls + * the enablement of a feature. + * + * So far, the only feature which is defined controls to whole MemOp APIs. + * + * Possible status values returned are: + * NV_OK + * + */ +#define NV0000_CTRL_CMD_GPU_GET_MEMOP_ENABLE (0x27bU) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS_MESSAGE_ID (0x7BU) + +typedef struct NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS { + NvU32 enableMask; +} NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS; + +#define NV0000_CTRL_GPU_FLAGS_MEMOP_ENABLE (0x00000001U) + +/* + * NV0000_CTRL_CMD_GPU_DISABLE_NVLINK_INIT + * + * This privileged command is used to disable initialization for the NVLinks + * provided in the mask. + * + * The mask must be applied before the GPU is attached. DISABLE_NVLINK_INIT + * is an NOP for non-NVLink GPUs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_DEVICE + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_INVALID_STATE + * NV_ERR_IN_USE + * + */ +#define NV0000_CTRL_CMD_GPU_DISABLE_NVLINK_INIT (0x281U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS_MESSAGE_ID (0x81U) + +typedef struct NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS { + NvU32 gpuId; + NvU32 mask; // This field will be deprecated in the future, please use links + NV_DECLARE_ALIGNED(NV0000_CTRL_NVLINK_LINK_MASK links, 8); + NvBool bSkipHwNvlinkDisable; +} NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS; + + +#define NV0000_CTRL_GPU_LEGACY_CONFIG_MAX_PARAM_DATA 0x00000175U +#define NV0000_CTRL_GPU_LEGACY_CONFIG_MAX_PROPERTIES_IN 6U +#define NV0000_CTRL_GPU_LEGACY_CONFIG_MAX_PROPERTIES_OUT 5U + +/* + * NV0000_CTRL_CMD_GPU_LEGACY_CONFIG + * + * Path to use legacy RM GetConfig/Set API. This API is being phased out. + */ +#define NV0000_CTRL_CMD_GPU_LEGACY_CONFIG (0x282U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS_MESSAGE_ID (0x82U) + +typedef struct NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS { + NvHandle hContext; /* [in] - Handle of object to perform operation on (Device, Subdevice, etc) */ + NvU32 opType; /* [in] - Type of API */ + NvV32 index; /* [in] - command type */ + NvU32 dataType; /* [out] - data union type */ + + union { + struct { + NvU8 paramData[NV0000_CTRL_GPU_LEGACY_CONFIG_MAX_PARAM_DATA]; + NvU32 paramSize; + } configEx; + struct { + NvU32 propertyId; + NvU32 propertyIn[NV0000_CTRL_GPU_LEGACY_CONFIG_MAX_PROPERTIES_IN]; + NvU32 propertyOut[NV0000_CTRL_GPU_LEGACY_CONFIG_MAX_PROPERTIES_OUT]; + } reservedProperty; + } data; +} NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS; + +#define NV0000_CTRL_GPU_LEGACY_CONFIG_OP_TYPE_GET_EX (0x00000002U) +#define NV0000_CTRL_GPU_LEGACY_CONFIG_OP_TYPE_SET_EX (0x00000003U) +#define NV0000_CTRL_GPU_LEGACY_CONFIG_OP_TYPE_RESERVED (0x00000004U) + +/* + * NV0000_CTRL_CMD_IDLE_CHANNELS + */ +#define NV0000_CTRL_CMD_IDLE_CHANNELS (0x283U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_IDLE_CHANNELS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_IDLE_CHANNELS_PARAMS_MESSAGE_ID (0x83U) + +typedef struct NV0000_CTRL_GPU_IDLE_CHANNELS_PARAMS { + NvHandle hDevice; + NvHandle hChannel; + NvV32 numChannels; + /* C form: NvP64 phClients NV_ALIGN_BYTES(8); */ + NV_DECLARE_ALIGNED(NvP64 phClients, 8); + /* C form: NvP64 phDevices NV_ALIGN_BYTES(8); */ + NV_DECLARE_ALIGNED(NvP64 phDevices, 8); + /* C form: NvP64 phChannels NV_ALIGN_BYTES(8); */ + NV_DECLARE_ALIGNED(NvP64 phChannels, 8); + NvV32 flags; + NvV32 timeout; +} NV0000_CTRL_GPU_IDLE_CHANNELS_PARAMS; + +#define NV0000_CTRL_GPU_IMAGE_TYPE_GSP (0x00000001U) +#define NV0000_CTRL_GPU_IMAGE_TYPE_GSP_LOG (0x00000002U) +#define NV0000_CTRL_GPU_IMAGE_TYPE_BINDATA_IMAGE (0x00000003U) +/* + * NV0000_CTRL_CMD_PUSH_UCODE_IMAGE + * + * This command is used to push the GSP ucode into RM. + * This function is used only on VMware + * + * Possible status values returned are: + * NV_OK The sent data is stored successfully + * NV_ERR_INVALID_ARGUMENT if the arguments are not proper + * NV_ERR_NO_MEMORY if memory allocation failed + * NV_ERR_NOT_SUPPORTED if function is invoked on non-GSP setup or any + * setup other than VMware host + * + */ +#define NV0000_CTRL_CMD_PUSH_UCODE_IMAGE (0x285) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_PUSH_UCODE_IMAGE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_PUSH_UCODE_IMAGE_PARAMS_MESSAGE_ID (0x85U) + +typedef struct NV0000_CTRL_GPU_PUSH_UCODE_IMAGE_PARAMS { + NvU8 image; + NV_DECLARE_ALIGNED(NvU64 totalSize, 8); + NV_DECLARE_ALIGNED(NvP64 pData, 8); +} NV0000_CTRL_GPU_PUSH_UCODE_IMAGE_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_SET_NVLINK_BW_MODE + * + * This command is used to set NVLINK bandwidth for power saving + * + * The setting must be applied before the GPU is attached. + * NVLINK_BW_MODE is an NOP for non-NVLink GPUs. + * + * [in] mode + * BW mode requested defined as a DRF + * Possible Legacy values that can be set in bits 2:0: + * NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_FULL + * NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_OFF + * NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_MIN + * NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_HALF + * NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_3QUARTER + * Link count can be requested on Blackwell+ in bits 7:3 + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_DEVICE + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_INVALID_STATE + * NV_ERR_IN_USE + */ + +#define NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_SETTING_LEGACY 2:0 +#define NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_SETTING_LINK_COUNT 7:3 + +#define NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_FULL (0x00U) +#define NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_OFF (0x01U) +#define NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_MIN (0x02U) +#define NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_HALF (0x03U) +#define NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_3QUARTER (0x04U) +#define NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_LINK_COUNT (0x05U) + +#define NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_SCOPE_UNSET (0x00U) +#define NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_SCOPE_PER_NODE (0x01U) +#define NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_SCOPE_PER_GPU (0x02U) + + +#define NV0000_CTRL_CMD_GPU_SET_NVLINK_BW_MODE (0x286U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_SET_NVLINK_BW_MODE_PARAMS_MESSAGE_ID" */ +#define NV0000_CTRL_GPU_SET_NVLINK_BW_MODE_PARAMS_MESSAGE_ID (0x86U) + +typedef struct NV0000_CTRL_GPU_SET_NVLINK_BW_MODE_PARAMS { + NvU8 mode; +} NV0000_CTRL_GPU_SET_NVLINK_BW_MODE_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_GET_NVLINK_BW_MODE + * + * This command is used to get NVLINK bandwidth for power saving + * + * NVLINK_BW_MODE is an NOP for non-NVLink GPUs. + * + * [out] mode + * BW mode currently set for the GPUs on the system. + * Possible values are: + * NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_FULL + * NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_OFF + * NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_MIN + * NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_HALF + * NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_3QUARTER + * [out] bwModeScope + * Scope of the bw mode setting on the system. + * Possible values are: + * NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_SCOPE_UNSET + * NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_SCOPE_PER_NODE + * NV0000_CTRL_CMD_GPU_NVLINK_BW_MODE_SCOPE_PER_GPU + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_DEVICE + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_INVALID_STATE + * NV_ERR_IN_USE + */ + +#define NV0000_CTRL_CMD_GPU_GET_NVLINK_BW_MODE (0x287U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_NVLINK_BW_MODE_PARAMS_MESSAGE_ID" */ +#define NV0000_CTRL_GPU_GET_NVLINK_BW_MODE_PARAMS_MESSAGE_ID (0x87U) + +typedef struct NV0000_CTRL_GPU_GET_NVLINK_BW_MODE_PARAMS { + NvU8 mode; + NvU8 bwModeScope; +} NV0000_CTRL_GPU_GET_NVLINK_BW_MODE_PARAMS; + +/* + * NV0000_CTRL_GPU_ACTIVE_DEVICE + * + * This structure describes a single MIG or plain device in the system + * available for use. + * + * gpuId + * ID of an attached GPU. + * gpuInstanceId + * MIG gpu instance id of an instance existing on this GPU. + * NV0000_CTRL_GPU_INVALID_ID if the GPU is not in MIG mode. + * computeInstanceId + * MIG compute instance id of an instance existing on this GPU. + * NV0000_CTRL_GPU_INVALID_ID if the GPU is not in MIG mode. + * + */ +typedef struct NV0000_CTRL_GPU_ACTIVE_DEVICE { + NvU32 gpuId; + NvU32 gpuInstanceId; + NvU32 computeInstanceId; +} NV0000_CTRL_GPU_ACTIVE_DEVICE; + +/* + * NV0000_CTRL_CMD_GPU_GET_ACTIVE_DEVICE_IDS + * + * This command returns a list of valid GPUs treating uniformly MIG devices + * and GPUs not in MIG mode. + * + * [out] devices + * List of devices aviable for use. + * [out] numDevices + * Number of valid entries in gpus + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0000_CTRL_CMD_GPU_GET_ACTIVE_DEVICE_IDS (0x288U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_ACTIVE_DEVICE_IDS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_MAX_ACTIVE_DEVICES 256U + +#define NV0000_CTRL_GPU_GET_ACTIVE_DEVICE_IDS_PARAMS_MESSAGE_ID (0x88U) + +typedef struct NV0000_CTRL_GPU_GET_ACTIVE_DEVICE_IDS_PARAMS { + NvU32 numDevices; + NV0000_CTRL_GPU_ACTIVE_DEVICE devices[NV0000_CTRL_GPU_MAX_ACTIVE_DEVICES]; +} NV0000_CTRL_GPU_GET_ACTIVE_DEVICE_IDS_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_ASYNC_ATTACH_ID + * + * This command attaches the GPU with the given gpuId, similar to + * NV0000_CTRL_CMD_GPU_ATTACH_IDS. However, this command instructs the RM + * to perform the attach in the background. + * + * After calling this command, clients are expected to call + * NV0000_CTRL_CMD_GPU_WAIT_ATTACH_ID before performing any operation that + * depends on the GPU being attached. + * + * If the gpuId fails to attach, either this command or the subsequent + * NV0000_CTRL_CMD_GPU_WAIT_ATTACH_ID command may fail. + * + * If clients from multiple processes use this command or the + * NV0000_CTRL_CMD_GPU_ATTACH_IDS command to attach a gpuId, the RM ensures + * that the gpuId won't be detached until all processes have issued a call to + * NV0000_CTRL_CMD_GPU_DETACH_IDS to detach the gpuId (or have terminated). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OPERATING_SYSTEM + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0000_CTRL_CMD_GPU_ASYNC_ATTACH_ID (0x289U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_ASYNC_ATTACH_ID_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_ASYNC_ATTACH_ID_PARAMS_MESSAGE_ID (0x89U) + +typedef struct NV0000_CTRL_GPU_ASYNC_ATTACH_ID_PARAMS { + NvU32 gpuId; +} NV0000_CTRL_GPU_ASYNC_ATTACH_ID_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_WAIT_ATTACH_ID + * + * This command waits for and returns the status of a background attach + * operation started by NV0000_CTRL_CMD_GPU_ASYNC_ATTACH_ID. + * + * Clients are expected to call this command after calling + * NV0000_CTRL_CMD_GPU_ASYNC_ATTACH_ID before performing any operation that + * depends on the GPU being attached. + * + * If the gpuId fails to attach, either this command or the previous + * NV0000_CTRL_CMD_GPU_ASYNC_ATTACH_ID command may fail. + * + * Calling this command for a gpuId that is already attached (for example, + * after a successful NV0000_CTRL_CMD_GPU_ATTACH_IDS) is a no-op. + * + * Calling this command for a gpuId that is neither attached nor has a + * background attach operation will result in NV_ERR_INVALID_ARGUMENT. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OPERATING_SYSTEM + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_IRQ_EDGE_TRIGGERED + * NV_ERR_IRQ_NOT_FIRING + */ +#define NV0000_CTRL_CMD_GPU_WAIT_ATTACH_ID (0x290U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_WAIT_ATTACH_ID_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_WAIT_ATTACH_ID_PARAMS_MESSAGE_ID (0x90U) + +typedef struct NV0000_CTRL_GPU_WAIT_ATTACH_ID_PARAMS { + NvU32 gpuId; +} NV0000_CTRL_GPU_WAIT_ATTACH_ID_PARAMS; + +/* _ctrl0000gpu_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpuacct.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpuacct.h new file mode 100644 index 0000000..f38c9c3 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpuacct.h @@ -0,0 +1,255 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000gpuacct.finn +// + +#include "ctrl/ctrl0000/ctrl0000base.h" + +/* + * NV0000_CTRL_CMD_GPUACCT_SET_ACCOUNTING_STATE + * + * This command is used to enable or disable the per process GPU accounting. + * This is part of GPU's software state and will persist if persistent + * software state is enabled. Refer to the description of + * NV0080_CTRL_CMD_GPU_MODIFY_SW_STATE_PERSISTENCE for more information. + * + * gpuId + * This parameter should specify a valid GPU ID value. Refer to the + * description of NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS for more + * information. If there is no GPU present with the specified ID, + * a status of NV_ERR_INVALID_ARGUMENT is returned. + * pid + * This input parameter specifies the process id of the process for which + * the accounting state needs to be set. + * In case of VGX host, this parameter specifies VGPU plugin(VM) pid. This + * parameter is set only when this RM control is called from VGPU plugin, + * otherwise it is zero meaning set/reset the accounting state for the + * specified GPU. + * newState + * This input parameter is used to enable or disable the GPU accounting. + * Possible values are: + * NV0000_CTRL_GPU_ACCOUNTING_STATE_ENABLED + * NV0000_CTRL_GPU_ACCOUNTING_STATE_DISABLED + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_CTRL_CMD_GPUACCT_SET_ACCOUNTING_STATE (0xb01) /* finn: Evaluated from "(FINN_NV01_ROOT_GPUACCT_INTERFACE_ID << 8) | NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS_MESSAGE_ID" */ + +/* Possible values of persistentSwState */ +#define NV0000_CTRL_GPU_ACCOUNTING_STATE_ENABLED (0x00000000) +#define NV0000_CTRL_GPU_ACCOUNTING_STATE_DISABLED (0x00000001) + +#define NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS { + NvU32 gpuId; + NvU32 pid; + NvU32 newState; +} NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS; + +/* + * NV0000_CTRL_CMD_GPUACCT_GET_ACCOUNTING_STATE + * + * This command is used to get the current state of GPU accounting. + * + * gpuId + * This parameter should specify a valid GPU ID value. Refer to the + * description of NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS for more + * information. If there is no GPU present with the specified ID, + * a status of NV_ERR_INVALID_ARGUMENT is returned. + * pid + * This input parameter specifies the process id of the process of which the + * accounting state needs to be queried. + * In case of VGX host, this parameter specifies VGPU plugin(VM) pid. This + * parameter is set only when this RM control is called from VGPU plugin, + * otherwise it is zero meaning the accounting state needs to be queried for + * the specified GPU. + * state + * This parameter returns a value indicating if per process GPU accounting + * is currently enabled or not for the specified GPU. See the + * description of NV0000_CTRL_CMD_GPU_SET_ACCOUNTING_STATE. + * Possible values are: + * NV0000_CTRL_GPU_ACCOUNTING_STATE_ENABLED + * NV0000_CTRL_GPU_ACCOUNTING_STATE_DISABLED + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_CTRL_CMD_GPUACCT_GET_ACCOUNTING_STATE (0xb02) /* finn: Evaluated from "(FINN_NV01_ROOT_GPUACCT_INTERFACE_ID << 8) | NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS { + NvU32 gpuId; + NvU32 pid; + NvU32 state; +} NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS; + +/* + * NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS + * + * This command returns GPU accounting data for the process. + * + * gpuId + * This parameter should specify a valid GPU ID value. Refer to the + * description of NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS for more + * information. If there is no GPU present with the specified ID, + * a status of NV_ERR_INVALID_ARGUMENT is returned. + * pid + * This parameter specifies the PID of the process for which information is + * to be queried. + * In case of VGX host, this parameter specifies VGPU plugin(VM) pid inside + * which the subPid is running. This parameter is set to VGPU plugin pid + * when this RM control is called from VGPU plugin. + * subPid + * In case of VGX host, this parameter specifies the PID of the process for + * which information is to be queried. In other cases, it is zero. + * gpuUtil + * This parameter returns the average GR utilization during the process's + * lifetime. + * fbUtil + * This parameter returns the average FB bandwidth utilization during the + * process's lifetime. + * maxFbUsage + * This parameter returns the maximum FB allocated (in bytes) by the process. + * startTime + * This parameter returns the time stamp value in micro seconds at the time + * process started utilizing GPU. + * stopTime + * This parameter returns the time stamp value in micro seconds at the time + * process stopped utilizing GPU. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0000_CTRL_CMD_GPUACCT_GET_PROC_ACCOUNTING_INFO (0xb03) /* finn: Evaluated from "(FINN_NV01_ROOT_GPUACCT_INTERFACE_ID << 8) | NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS { + NvU32 gpuId; + NvU32 pid; + NvU32 subPid; + NvU32 gpuUtil; + NvU32 fbUtil; + NV_DECLARE_ALIGNED(NvU64 maxFbUsage, 8); + NV_DECLARE_ALIGNED(NvU64 startTime, 8); + NV_DECLARE_ALIGNED(NvU64 endTime, 8); +} NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS; + +/* + * NV0000_CTRL_CMD_GPUACCT_GET_ACCOUNTING_PIDS + * + * This command is used to get the PIDS of processes with accounting + * information in the driver. + * + * gpuId + * This parameter should specify a valid GPU ID value. Refer to the + * description of NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS for more + * information. If there is no GPU present with the specified ID, + * a status of NV_ERR_INVALID_ARGUMENT is returned. + * pid + * This input parameter specifies the process id of the process of which the + * information needs to be queried. + * In case of VGX host, this parameter specifies VGPU plugin(VM) pid. This + * parameter is set only when this RM control is called from VGPU plugin, + * otherwise it is zero meaning get the pid list of the all the processes + * running on the specified GPU. + * pidTbl + * This parameter returns the table of all PIDs for which driver has + * accounting info. + * pidCount + * This parameter returns the number of entries in the PID table. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_CTRL_CMD_GPUACCT_GET_ACCOUNTING_PIDS (0xb04) /* finn: Evaluated from "(FINN_NV01_ROOT_GPUACCT_INTERFACE_ID << 8) | NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS_MESSAGE_ID" */ + +/* max size of pidTable */ +#define NV0000_GPUACCT_PID_MAX_COUNT 4000 + +#define NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS { + NvU32 gpuId; + NvU32 pid; + NvU32 pidTbl[NV0000_GPUACCT_PID_MAX_COUNT]; + NvU32 pidCount; +} NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS; + +/* + * NV0000_CTRL_CMD_GPUACCT_CLEAR_ACCOUNTING_DATA + * + * This command is used to clear previously collected GPU accounting data. This + * will have no affect on data for the running processes, accounting data for + * these processes will not be cleared and will still be logged for these + * processes. In order to clear ALL accounting data, accounting needs to be + * disabled using NV0000_CTRL_CMD_GPUACCT_SET_ACCOUNTING_STATE before executing + * this command. + * + * gpuId + * This parameter should specify a valid GPU ID value. Refer to the + * description of NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS for more + * information. If there is no GPU present with the specified ID, + * a status of NV_ERR_INVALID_ARGUMENT is returned. + * pid + * This input parameter specifies the process id of the process for which + * the accounting data needs to be cleared. + * In case of VGX host, this parameter specifies VGPU plugin(VM) pid for + * which the accounting data needs to be cleared. This parameter is set only + * when this RM control is called from VGPU plugin, otherwise it is zero + * meaning clear the accounting data of processes running on baremetal + * system. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ + +#define NV0000_CTRL_CMD_GPUACCT_CLEAR_ACCOUNTING_DATA (0xb05) /* finn: Evaluated from "(FINN_NV01_ROOT_GPUACCT_INTERFACE_ID << 8) | NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS { + NvU32 gpuId; + NvU32 pid; +} NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS; + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h new file mode 100644 index 0000000..fae2cf9 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h @@ -0,0 +1,101 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000gsync.finn +// + +#include "ctrl/ctrl0000/ctrl0000base.h" + +#include "ctrl/ctrlxxxx.h" +#include "class/cl30f1.h" +/* NV01_ROOT (client) system controller control commands and parameters */ + +/* + * NV0000_CTRL_CMD_GSYNC_GET_ATTACHED_IDS + * + * This command returns a table of attached gsyncId values. + * The table is NV0000_CTRL_GSYNC_MAX_ATTACHED_GSYNCS entries in size. + * + * gsyncIds[] + * This parameter returns the table of attached gsync IDs. + * The gsync ID is an opaque platform-dependent value that + * can be used with the NV0000_CTRL_CMD_GSYNC_GET_ID_INFO command to + * retrieve additional information about the gsync device. + * The valid entries in gsyncIds[] are contiguous, with a value + * of NV0000_CTRL_GSYNC_INVALID_ID indicating the invalid entries. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV0000_CTRL_CMD_GSYNC_GET_ATTACHED_IDS (0x301) /* finn: Evaluated from "(FINN_NV01_ROOT_GSYNC_INTERFACE_ID << 8) | NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS { + NvU32 gsyncIds[NV30F1_MAX_GSYNCS]; +} NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS; + +/* this value marks entries in gsyncIds[] as invalid */ +#define NV0000_CTRL_GSYNC_INVALID_ID (0xffffffff) + +/* + * NV0000_CTRL_CMD_GSYNC_GET_ID_INFO + * + * This command returns gsync instance information for the + * specified gsync device. + * + * gsyncId + * This parameter should specify a valid gsync ID value. + * If there is no gsync present with the specified ID, a + * status of NV_ERR_INVALID_ARGUMENT is returned. + * gsyncFlags + * This parameter returns the current state of the gsync device. + * gsyncInstance + * This parameter returns the instance number associated with the + * specified gsync. This value can be used to instantiate + * a reference to the gsync using one of the NV30_GSYNC + * classes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0000_CTRL_CMD_GSYNC_GET_ID_INFO (0x302) /* finn: Evaluated from "(FINN_NV01_ROOT_GSYNC_INTERFACE_ID << 8) | NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS { + NvU32 gsyncId; + NvU32 gsyncFlags; + NvU32 gsyncInstance; +} NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS; + +/* _ctrl0000gsync_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h new file mode 100644 index 0000000..92a2ba5 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h @@ -0,0 +1,637 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000nvd.finn +// + +#include "ctrl/ctrl0000/ctrl0000base.h" + +#include "ctrl/ctrlxxxx.h" +/* NV01_ROOT (client) nvd control commands and parameters */ + +/* + * NV0080_CTRL_NVD_DUMP_COMPONENT + * + * The following dump components are used to describe legal ranges in + * commands below: + * + * NV0080_CTRL_CMD_NVD_DUMP_COMPONENT_SYS + * This is the system dump component. + * NV0080_CTRL_CMD_NVD_DUMP_COMPONENT_NVLOG + * This is the nvlog dump component. + * NV0080_CTRL_CMD_NVD_DUMP_COMPONENT_RESERVED + * This component is reserved. + * + * See nvdump.h for more information on dump component values. + */ +#define NV0000_CTRL_NVD_DUMP_COMPONENT_SYS (0x400) +#define NV0000_CTRL_NVD_DUMP_COMPONENT_NVLOG (0x800) +#define NV0000_CTRL_NVD_DUMP_COMPONENT_RESERVED (0xB00) + +/* + * NV0000_CTRL_CMD_NVD_GET_DUMP_SIZE + * + * This command gets the expected dump size of a particular system + * dump component. Note that events that occur between this command + * and a later NV0000_CTRL_CMD_NVD_GET_DUMP command could alter the size of + * the buffer required. + * + * component + * This parameter specifies the system dump component for which the + * dump size is desired. Legal values for this parameter must + * be greater than or equal to NV0000_CTRL_NVD_DUMP_COMPONENT_SYS and + * less than NV0000_CTRL_NVD_GET_DUMP_COMPONENT_NVLOG. + * size + * This parameter returns the expected size in bytes. The maximum + * value of this call is NV0000_CTRL_NVD_MAX_DUMP_SIZE. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT if components are invalid. + */ + +#define NV0000_CTRL_CMD_NVD_GET_DUMP_SIZE (0x601) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_NVD_GET_DUMP_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_NVD_GET_DUMP_SIZE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_NVD_GET_DUMP_SIZE_PARAMS { + NvU32 component; + NvU32 size; +} NV0000_CTRL_NVD_GET_DUMP_SIZE_PARAMS; + +/* Max size that a GET_DUMP_SIZE_PARAMS call can return */ +#define NV0000_CTRL_NVD_MAX_DUMP_SIZE (1000000) + +/* + * NV0000_CTRL_CMD_NVD_GET_DUMP + * + * This command gets a dump of a particular system dump component. If triggers + * is non-zero, the command waits for the trigger to occur before it returns. + * + * pBuffer + * This parameter points to the buffer for the data. + * component + * This parameter specifies the system dump component for which the + * dump is to be retrieved. Legal values for this parameter must + * be greater than or equal to NV0000_CTRL_NVD_DUMP_COMPONENT_SYS and + * less than NV0000_CTRL_NVD_GET_DUMP_COMPONENT_NVLOG. + * size + * On entry, this parameter specifies the maximum length for + * the returned data. On exit, it specifies the number of bytes + * returned. + * + * Possible status values returned are: + * NV_OK + * NVOS_ERROR_INVALID_ARGUMENT if components are invalid. + * NVOS_ERROR_INVALID_ADDRESS if pBuffer is invalid + * NVOS_ERROR_INVALID_???? if the buffer was too small + */ +#define NV0000_CTRL_CMD_NVD_GET_DUMP (0x602) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_NVD_GET_DUMP_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_NVD_GET_DUMP_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_NVD_GET_DUMP_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pBuffer, 8); + NvU32 component; + NvU32 size; +} NV0000_CTRL_NVD_GET_DUMP_PARAMS; + +/* + * NV0000_CTRL_CMD_NVD_GET_TIMESTAMP + * + * This command returns the current value of the timestamp used + * by the RM in NvDebug dumps. It is provided to keep the RM and NvDebug + * clients on the same time base. + * + * cpuClkId + * See also NV2080_CTRL_CMD_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO + * This parameter specifies the source of the CPU clock. Legal values for + * this parameter include: + * NV0000_NVD_CPU_TIME_CLK_ID_DEFAULT and NV0000_NVD_CPU_TIME_CLK_ID_OSTIME + * This clock id will provide real time in microseconds since 00:00:00 UTC on January 1, 1970. + * It is calculated as follows: + * (seconds * 1000000) + uSeconds + * NV0000_NVD_CPU_TIME_CLK_ID_PLATFORM_API + * This clock id will provide time stamp that is constant-rate, high + * precision using platform API that is also available in the user mode. + * NV0000_NVD_CPU_TIME_CLK_ID_TSC + * This clock id will provide time stamp using CPU's time stamp counter. + * + * timestamp + * Retrieved timestamp + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_NVD_CPU_TIME_CLK_ID_DEFAULT (0x00000000) +#define NV0000_NVD_CPU_TIME_CLK_ID_OSTIME (0x00000001) +#define NV0000_NVD_CPU_TIME_CLK_ID_TSC (0x00000002) +#define NV0000_NVD_CPU_TIME_CLK_ID_PLATFORM_API (0x00000003) + +#define NV0000_CTRL_CMD_NVD_GET_TIMESTAMP (0x603) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_NVD_GET_TIMESTAMP_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_NVD_GET_TIMESTAMP_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0000_CTRL_NVD_GET_TIMESTAMP_PARAMS { + NV_DECLARE_ALIGNED(NvU64 timestamp, 8); + NvU8 cpuClkId; +} NV0000_CTRL_NVD_GET_TIMESTAMP_PARAMS; + +/* + * NV0000_CTRL_CMD_NVD_GET_NVLOG_INFO + * + * This command gets the current state of the NvLog buffer system. + * + * component (in) + * This parameter specifies the system dump component for which the + * NvLog info is desired. Legal values for this parameter must + * be greater than or equal to NV0000_CTRL_NVD_DUMP_COMPONENT_NVLOG and + * less than NV0000_CTRL_NVD_DUMP_COMPONENT_RESERVED. + * version (out) + * This parameter returns the version of the Nvlog subsystem. + * runtimeSizes (out) + * This parameter returns the array of sizes for all supported printf + * specifiers. This information is necessary to know how many bytes + * to decode when given a certain specifier (such as %d). + * The following describes the contents of each array entry: + * NV0000_CTRL_NVD_RUNTIME_SIZE_UNUSED + * This array entry has special meaning and is unused in the + * runtimeSizes array. + * NV0000_CTRL_NVD_RUNTIME_SIZE_INT + * This array entry returns the size of integer types for use in + * interpreting the %d, %u, %x, %X, %i, %o specifiers. + * NV0000_CTRL_NVD_RUNTIME_SIZE_LONG_LONG + * This array entry returns the size of long long integer types for + * using in interpreting the %lld, %llu, %llx, %llX, %lli, %llo + * specifiers. + * NV0000_CTRL_NVD_RUNTIME_SIZE_STRING + * This array entry returns zero as strings are not allowed. + * NV0000_CTRL_NVD_RUNTIME_SIZE_PTR + * This array entry returns the size of the pointer type for use + * in interpreting the %p specifier. + * NV0000_CTRL_NVD_RUNTIME_SIZE_CHAR + * This array entry returns the size of the char type for use in + * intpreting the %c specifier. + * NV0000_CTRL_NVD_RUNTIME_SIZE_FLOAT + * This array entry returns the size of the float types for use in + * in interpreting the %f, %g, %e, %F, %G, %E specifiers. + * All remaining entries are reserved and return 0. + * printFlags (out) + * This parameter returns the flags of the NvLog system. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_BUFFER_FLAGS + * See NV0000_CTRL_CMD_NVD_GET_NVLOG_BUF_INFO for more details. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_BUFFER_SIZE + * This field returns the buffer size in KBytes. A value of zero + * is returned when logging is disabled. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_TIMESTAMP + * This field returns the format of the timestamp. Legal values + * for this parameter include: + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_TIMESTAMP_NONE + * This value indicates no timestamp. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_TIMESTAMP_32BIT + * This value indicates a 32-bit timestamp. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_TIMESTAMP_64BIT + * This value indicates a 64-bit timestamp. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_TIMESTAMP_32BIT_DIFF + * This value indicates a 32-bit differential timestamp. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_RESERVED + * This field is reserved. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_RUNTIME_LEVEL + * This field returns the lowest debug level for which logging + * is enabled by default. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_INIT + * This field indicates if logging for the specified component has + * been initialized. Legal values for this parameter include: + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_INIT_NO + * This value indicates NvLog is uninitialized. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_INIT_YES + * This value indicates NvLog has been initialized. + * signature (out) + * This parameter is the signature of the database + * required to decode these logs, autogenerated at buildtime. + * bufferTags (out) + * This parameter identifies the buffer tag used during allocation + * or a value of '0' if buffer is unallocated for each possible + * buffer. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT if components are invalid. + */ +#define NV0000_CTRL_CMD_NVD_GET_NVLOG_INFO (0x604) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_NVD_GET_NVLOG_INFO_PARAMS_MESSAGE_ID" */ + +/* maximum size of the runtimeSizes array */ +#define NV0000_CTRL_NVD_MAX_RUNTIME_SIZES (16) + +/* size of signature parameter */ +#define NV0000_CTRL_NVD_SIGNATURE_SIZE (4) + +/* Maximum number of buffers */ +#define NV0000_CTRL_NVD_MAX_BUFFERS (3840) + +#define NV0000_CTRL_NVD_GET_NVLOG_INFO_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0000_CTRL_NVD_GET_NVLOG_INFO_PARAMS { + NvU32 component; + NvU32 version; + NvU8 runtimeSizes[NV0000_CTRL_NVD_MAX_RUNTIME_SIZES]; + NvU32 printFlags; + NvU32 signature[NV0000_CTRL_NVD_SIGNATURE_SIZE]; + NvU32 bufferTags[NV0000_CTRL_NVD_MAX_BUFFERS]; +} NV0000_CTRL_NVD_GET_NVLOG_INFO_PARAMS; + +/* runtimeSize array indices */ +#define NV0000_CTRL_NVD_RUNTIME_SIZE_UNUSED (0) +#define NV0000_CTRL_NVD_RUNTIME_SIZE_INT (1) +#define NV0000_CTRL_NVD_RUNTIME_SIZE_LONG_LONG (2) +#define NV0000_CTRL_NVD_RUNTIME_SIZE_STRING (3) +#define NV0000_CTRL_NVD_RUNTIME_SIZE_PTR (4) +#define NV0000_CTRL_NVD_RUNTIME_SIZE_CHAR (5) +#define NV0000_CTRL_NVD_RUNTIME_SIZE_FLOAT (6) + +/* printFlags fields and values */ +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_BUFFER_INFO 7:0 +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_BUFFER_SIZE 23:8 +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_BUFFER_SIZE_DISABLE (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_BUFFER_SIZE_DEFAULT (0x00000004) +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_RUNTIME_LEVEL 28:25 +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_TIMESTAMP 30:29 +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_TIMESTAMP_NONE (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_TIMESTAMP_32 (0x00000001) +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_TIMESTAMP_64 (0x00000002) +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_TIMESTAMP_32_DIFF (0x00000003) +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_INITED 31:31 +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_INITED_NO (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_INITED_YES (0x00000001) + +/* + * NV0000_CTRL_CMD_NVD_GET_NVLOG_BUFFER_INFO + * + * This command gets the current state of a specific buffer in the NvLog + * buffer system. + * + * component (in) + * This parameter specifies the system dump component for which the + * NvLog info is desired. Legal values for this parameter must + * be greater than or equal to NV0000_CTRL_NVD_DUMP_COMPONENT_NVLOG and + * less than NV0000_CTRL_NVD_DUMP_COMPONENT_RESERVED. + * buffer (in/out) + * This parameter specifies the buffer number from which to retrieve the + * buffer information. Valid values are 0 to (NV0000_CTRL_NVD_MAX_BUFFERS - 1). + * If the buffer is specified using the 'tag' parameter, the buffer + * number is returned through this one. + * tag (in/out) + * If this parameter is non-zero, it will be used to specify the buffer, + * instead of 'buffer' parameter. It returns the tag of the specified buffer + * size (out) + * This parameter returns the size of the specified buffer. + * flags (in/out) + * On input, this parameter sets the following behavior: + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_PAUSE + * This flag controls if the nvlog system should pause output + * to this buffer. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_PAUSE_YES + * The buffer should be paused until another command + * unpauses this buffer. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_PAUSE_NO + * The buffer should not be paused. + * On output, this parameter returns the flags of a specified buffer: + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_DISABLED + * This flag indicates if logging to the specified buffer is + * disabled or not. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_TYPE + * This flag indicates the buffer logging type: + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_TYPE_RING + * This type value indicates logging to the buffer wraps. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_TYPE_NOWRAP + * This type value indicates logging to the buffer does not wrap. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_EXPANDABLE + * This flag indicates if the buffer size is expandable. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_EXPANDABLE_NO + * The buffer is not expandable. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_EXPANDABLE_YES + * The buffer is expandable. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_NON_PAGED + * This flag indicates if the buffer occupies non-paged or pageable + * memory. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_NON_PAGED_NO + * The buffer is in pageable memory. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_NON_PAGES_YES + * The buffer is in non-paged memory. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING + * This flag indicates the locking mode for the specified buffer. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING_NONE + * This locking value indicates that no locking is performed. This + * locking mode is typically used for inherently single-threaded + * buffers. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING_STATE + * This locking value indicates that the buffer is locked only + * during state changes and that memory copying is unlocked. This + * mode should not be used tiny buffers that overflow every write + * or two. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING_FULL + * This locking value indicates the buffer is locked for the full + * duration of the write. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_OCA + * This flag indicates if the buffer is stored in OCA dumps. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_OCA_NO + * The buffer is not included in OCA dumps. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_OCA_YES + * The buffer is included in OCA dumps. + * pos (out) + * This parameter is the current position of the tracker/cursor in the + * buffer. + * overflow (out) + * This parameter is the number of times the buffer has overflowed. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT if components are invalid. + */ + +#define NV0000_CTRL_CMD_NVD_GET_NVLOG_BUFFER_INFO (0x605) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_NVD_GET_NVLOG_BUFFER_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_NVD_GET_NVLOG_BUFFER_INFO_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0000_CTRL_NVD_GET_NVLOG_BUFFER_INFO_PARAMS { + NvU32 component; + NvU32 buffer; + NvU32 tag; + NvU32 size; + NvU32 flags; + NvU32 pos; + NvU32 overflow; +} NV0000_CTRL_NVD_GET_NVLOG_BUFFER_INFO_PARAMS; + +/* flags fields and values */ +/* input */ +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_PAUSE 0:0 +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_PAUSE_NO (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_PAUSE_YES (0x00000001) + +/* output */ +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_DISABLED 0:0 +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_DISABLED_NO (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_DISABLED_YES (0x00000001) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_TYPE 1:1 +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_TYPE_RING (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_TYPE_NOWRAP (0x00000001) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_EXPANDABLE 2:2 +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_EXPANDABLE_NO (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_EXPANDABLE_YES (0x00000001) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_NONPAGED 3:3 +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_NONPAGED_NO (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_NONPAGED_YES (0x00000001) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING 5:4 +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING_NONE (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING_STATE (0x00000001) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING_FULL (0x00000002) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_OCA 6:6 +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_OCA_NO (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_OCA_YES (0x00000001) + +/* + * NV0000_CTRL_CMD_NVD_GET_NVLOG + * + * This command retrieves the specified dump block from the specified + * NvLog buffer. To retrieve the entire buffer, the caller should start + * with blockNum set to 0 and continue issuing calls with an incremented + * blockNum until the returned size value is less than + * NV0000_CTRL_NVD_NVLOG_MAX_BLOCK_SIZE. + * + * component (in) + * This parameter specifies the system dump component for which the NvLog + * dump operation is to be directed. Legal values for this parameter + * must be greater than or equal to NV0000_CTRL_NVD_DUMP_COMPONENT_NVLOG + * and less than NV0000_CTRL_NVD_DUMP_COMPONENT_RESERVED. + * buffer (in) + * This parameter specifies the NvLog buffer to dump. + * blockNum (in) + * This parameter specifies the block number for which data is to be + * dumped. + * size (in/out) + * On entry, this parameter specifies the maximum length in bytes for + * the returned data (should be set to NV0000_CTRL_NVLOG_MAX_BLOCK_SIZE). + * On exit, it specifies the number of bytes returned. + * data (out) + * This parameter returns the data for the specified block. The size + * patameter values indicates the number of valid bytes returned. + * + * Possible status values returned are: + * NV_OK + * NVOS_ERROR_INVALID_ARGUMENT if components are invalid. + */ +#define NV0000_CTRL_CMD_NVD_GET_NVLOG (0x606) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_NVD_GET_NVLOG_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_NVLOG_MAX_BLOCK_SIZE (4000) + +#define NV0000_CTRL_NVD_GET_NVLOG_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV0000_CTRL_NVD_GET_NVLOG_PARAMS { + NvU32 component; + NvU32 buffer; + NvU32 blockNum; + NvU32 size; + NvU8 data[NV0000_CTRL_NVLOG_MAX_BLOCK_SIZE]; +} NV0000_CTRL_NVD_GET_NVLOG_PARAMS; + +/* + * NV0000_CTRL_CMD_NVD_GET_RCERR_RPT + * + * This command returns block of registers that were recorded at the time + * of an RC error for the current process. + * + * reqIdx: + * [IN] the index of the report being requested. + * index rolls over to 0. + * if the requested index is not in the circular buffer, then no data is + * transferred & either NV_ERR_INVALID_INDEX (indicating the specified + * index is not in the table) is returned. + * + * rptIdx: + * [OUT] the index of the report being returned. + * if the requested index is not in the circular buffer, then the value is + * undefined, no data istransferred & NV_ERR_INVALID_INDEX is returned. + * if the the specified index is present, but does not meet the requested + * criteria (refer to the owner & processId fields). the rptIdx will be + * set to a value that does not match the reqIdx, and no data will be + * transferred. NV_ERR_INSUFFICIENT_PERMISSIONS is still returned. + * + * gpuTag: + * [OUT] id of the GPU whose data was collected. + * + * rptTimeInNs: + * [OUT] the timestamp for when the report was created. + * + * startIdx: + * [OUT] the index of the oldest start record for the first report that + * matches the specified criteria (refer to the owner & processId + * fields). If no records match the specified criteria, this value is + * undefined, the failure code NV_ERR_MISSING_TABLE_ENTRY will + * be returned, and no data will be transferred. + * + * endIdx: + * [OUT] the index of the newest end record for the most recent report that + * matches the specified criteria (refer to the owner & processId + * fields). If no records match the specified criteria, this value is + * undefined, the failure code NV_ERR_MISSING_TABLE_ENTRY will + * be returned, and no data will be transferred. + * + * rptType: + * [OUT] indicator of what data is in the report. + * + * flags + * [OUT] a set odf flags indicating attributes of the record + * NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_POS_FIRST -- indicates this is the first record of a report. + * NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_POS_LAST -- indicates this is the last record of the report. + * NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_RANGE_VALID -- indicates this is the response contains a valid +* index range. + * Note, this may be set when an error is returned indicating a valid range was found, but event of + * the requested index was not. + * NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_DATA_VALID -- indicates this is the response contains valid data. + * + * rptCount: + * [OUT] number of entries returned in report. + * + * owner: + * [IN] Entries are only returned if they have the same owner as the specified owner or the specified + * owner Id is NV0000_CTRL_CMD_NVD_RPT_ANY_OWNER_ID. + * if the requested index is not owned by the specified owner, the rptIdx + * will be set to a value that does not match the reqIdx, and no data will + * be transferred. NV_ERR_INSUFFICIENT_PERMISSIONS is returned. + * + * processId: + * [IN] Deprecated + * report: + * [OUT] array of rptCount enum/value pair entries containing the data from the report. + * entries beyond rptCount are undefined. + * + * + * Possible status values returned are: + * NV_OK -- we found & transferred the requested record. + * NV_ERR_MISSING_TABLE_ENTRY -- we don't find any records that meet the criteria. + * NV_ERR_INVALID_INDEX -- the requested index was not found in the buffer. + * NV_ERR_INSUFFICIENT_PERMISSIONS -- the requested record was found, but it did not meet the criteria. + * NV_ERR_BUSY_RETRY -- We could not access the circular buffer. + * + */ + +#define NV0000_CTRL_CMD_NVD_GET_RCERR_RPT (0x607) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_MAX_ENTRIES 200 + +// report types +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_TYPE_TEST 0 +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_TYPE_GRSTATUS 1 +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_TYPE_GPCSTATUS 2 +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_TYPE_MMU_FAULT_STATUS 3 +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_TYPE_RC_ERROR 4 + +// pseudo register enums attribute content +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_REG_EMPTY 0x00000000 +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_REG_OVERFLOWED 0x00000001 // number of missed entries. +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_REG_MAX_PSEDO_REG 0x0000000f + + + +// Flags Definitions +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_POS_FIRST 0x00000001 // indicates this is the first record of a report. +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_POS_LAST 0x00000002 // indicates this is the last record of the report. +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_RANGE_VALID 0x00000004 // indicates this is the response contains a valid range +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_DATA_VALID 0x00000008 // indicates this is the response contains valid data + + +// Attribute Definitions +#define TPC_REG_ATTR(gpcId, tpcId) ((gpcId << 8) | (tpcId)) +#define ROP_REG_ATTR(gpcId, ropId) ((gpcId << 8) | (ropId)) +#define SM_REG_ATTR(gpcId, tpcId, smId) ((((gpcId) << 16) | ((tpcId) << 8)) | (smId)) + +// Process Id Pseudo values +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_ANY_PROCESS_ID 0x00000000 // get report for any process ID + +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_ANY_OWNER_ID 0xFFFFFFFF // get report for any owner ID + + +typedef struct NV0000_CTRL_CMD_NVD_RCERR_RPT_REG_ENTRY { + NvU32 tag; + NvU32 value; + NvU32 attribute; +} NV0000_CTRL_CMD_NVD_RCERR_RPT_REG_ENTRY; + +#define NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS { + NvU16 reqIdx; + NvU16 rptIdx; + NvU32 GPUTag; + NvU32 rptTime; // time in seconds since 1/1/1970 + NvU16 startIdx; + NvU16 endIdx; + NvU16 rptType; + NvU32 flags; + NvU16 rptCount; + NvU32 owner; // indicating whose reports to get + NvU32 processId; // deprecated field + + NV0000_CTRL_CMD_NVD_RCERR_RPT_REG_ENTRY report[NV0000_CTRL_CMD_NVD_RCERR_RPT_MAX_ENTRIES]; +} NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS; + +/* + * NV0000_CTRL_CMD_NVD_GET_DPC_ISR_TS + * + * This command returns the time stamp information that are collected from + * the execution of various DPCs/ISRs. This time stamp information is for + * debugging purposes only and would help with analyzing regressions and + * latencies for DPC/ISR execution times. + * + * tsBufferSize + * This field specifies the size of the buffer that the caller allocates. + * tsBuffer + * THis field specifies a pointer in the callers address space to the + * buffer into which the timestamp info on DPC/ISR is to be returned. + * This buffer must at least be as big as tsBufferSize. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV0000_CTRL_CMD_NVD_GET_DPC_ISR_TS (0x608) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_NVD_GET_DPC_ISR_TS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_NVD_GET_DPC_ISR_TS_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV0000_CTRL_NVD_GET_DPC_ISR_TS_PARAMS { + NvU32 tsBufferSize; + NV_DECLARE_ALIGNED(NvP64 pTSBuffer, 8); +} NV0000_CTRL_NVD_GET_DPC_ISR_TS_PARAMS; + +/* _ctrl0000nvd_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h new file mode 100644 index 0000000..70ff0eb --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h @@ -0,0 +1,98 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000proc.finn +// + +#include "ctrl/ctrl0000/ctrl0000base.h" +#include "nvlimits.h" + +/* + * NV0000_CTRL_CMD_SET_SUB_PROCESS_ID + * + * Save the sub process ID and sub process name in client database + * subProcID + * Sub process ID + * subProcessName + * Sub process name + * + * In vGPU environment, sub process means the guest user/kernel process running + * within a single VM. It also refers to any sub process (or sub-sub process) + * within a parent process. + * + * Please refer to the wiki for more details about sub process concept: Resource_Server + * + * Possible return values are: + * NV_OK + */ +#define NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS { + NvU32 subProcessID; + char subProcessName[NV_PROC_NAME_MAX_LENGTH]; +} NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS; + +/* + * NV0000_CTRL_CMD_DISABLE_SUB_PROCESS_USERD_ISOLATION + * + * Disable sub process USERD isolation. + * bIsSubProcIsolated + * NV_TRUE to disable sub process USERD isolation + * + * USERD allocated by different domains should not be put into the same physical page. + * This provides the basic security isolation because a physical page is the unit of + * granularity at which OS can provide isolation between processes. + * + * GUEST_USER: USERD allocated by guest user process + * GUEST_KERNEL: USERD allocated by guest kernel process + * GUEST_INSECURE: USERD allocated by guest/kernel process, + * INSECURE means there is no isolation between guest user and guest kernel + * HOST_USER: USERD allocated by host user process + * HOST_KERNEL: USERD allocated by host kernel process + * + * When sub process USERD isolation is disabled, we won't distinguish USERD allocated by guest + * user and guest kernel. They all belong to the GUEST_INSECURE domain. + * + * Please refer to wiki for more details: RM_USERD_Isolation + * + * Possible return values are: + * NV_OK + */ +#define NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS { + NvBool bIsSubProcessDisabled; +} NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS; + +#define NV0000_CTRL_CMD_SET_SUB_PROCESS_ID (0x901) /* finn: Evaluated from "(FINN_NV01_ROOT_PROC_INTERFACE_ID << 8) | NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CMD_DISABLE_SUB_PROCESS_USERD_ISOLATION (0x902) /* finn: Evaluated from "(FINN_NV01_ROOT_PROC_INTERFACE_ID << 8) | NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS_MESSAGE_ID" */ + +/* _ctrl0000proc_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h new file mode 100644 index 0000000..1ca299f --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h @@ -0,0 +1,112 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000syncgpuboost.finn +// + +#include "ctrl/ctrl0000/ctrl0000base.h" + +#include "ctrl/ctrlxxxx.h" +#include "nvtypes.h" +#include "nvlimits.h" + +/* --------------------------- Macros ----------------------------------------*/ +// There are at least 2 GPUs in a sync group. Hence max is half of max devices. +#define NV0000_SYNC_GPU_BOOST_MAX_GROUPS (0x10) /* finn: Evaluated from "((NV_MAX_DEVICES) >> 1)" */ +#define NV0000_SYNC_GPU_BOOST_INVALID_GROUP_ID 0xFFFFFFFF + +/*-------------------------Command Prototypes---------------------------------*/ + +/*! + * Query whether SYNC GPU BOOST MANAGER is enabled or disabled. + */ +#define NV0000_CTRL_CMD_SYNC_GPU_BOOST_INFO (0xa01) /* finn: Evaluated from "(FINN_NV01_ROOT_SYNC_GPU_BOOST_INTERFACE_ID << 8) | NV0000_SYNC_GPU_BOOST_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_SYNC_GPU_BOOST_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_SYNC_GPU_BOOST_INFO_PARAMS { + // [out] Specifies if Sync Gpu Boost Manager is enabled or not. + NvBool bEnabled; +} NV0000_SYNC_GPU_BOOST_INFO_PARAMS; + +/*! + * Creates a Synchronized GPU-Boost Group (SGBG) + */ +#define NV0000_CTRL_CMD_SYNC_GPU_BOOST_GROUP_CREATE (0xa02) /* finn: Evaluated from "(FINN_NV01_ROOT_SYNC_GPU_BOOST_INTERFACE_ID << 8) | NV0000_SYNC_GPU_BOOST_GROUP_CREATE_PARAMS_MESSAGE_ID" */ + +/*! + * Describes a Synchronized GPU-Boost Group configuration + */ +typedef struct NV0000_SYNC_GPU_BOOST_GROUP_CONFIG { + // [in] Number of elements in @ref gpuIds + NvU32 gpuCount; + + // [in] IDs of GPUs to be put in the Sync Boost Group + NvU32 gpuIds[NV_MAX_DEVICES]; + + // [out] Unique ID of the SGBG, if created + NvU32 boostGroupId; + + // [in] If this group represents bridgeless SLI + NvBool bBridgeless; +} NV0000_SYNC_GPU_BOOST_GROUP_CONFIG; + +#define NV0000_SYNC_GPU_BOOST_GROUP_CREATE_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_SYNC_GPU_BOOST_GROUP_CREATE_PARAMS { + NV0000_SYNC_GPU_BOOST_GROUP_CONFIG boostConfig; +} NV0000_SYNC_GPU_BOOST_GROUP_CREATE_PARAMS; + +/*! + * Destroys a previously created Synchronized GPU-Boost Group(SGBG) + */ +#define NV0000_CTRL_CMD_SYNC_GPU_BOOST_GROUP_DESTROY (0xa03) /* finn: Evaluated from "(FINN_NV01_ROOT_SYNC_GPU_BOOST_INTERFACE_ID << 8) | NV0000_SYNC_GPU_BOOST_GROUP_DESTROY_PARAMS_MESSAGE_ID" */ + +#define NV0000_SYNC_GPU_BOOST_GROUP_DESTROY_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0000_SYNC_GPU_BOOST_GROUP_DESTROY_PARAMS { + // [[in] Unique ID of the SGBG to be destroyed + NvU32 boostGroupId; +} NV0000_SYNC_GPU_BOOST_GROUP_DESTROY_PARAMS; + +/*! + * Get configuration information for all Synchronized Boost Groups in the system. + */ +#define NV0000_CTRL_CMD_SYNC_GPU_BOOST_GROUP_INFO (0xa04) /* finn: Evaluated from "(FINN_NV01_ROOT_SYNC_GPU_BOOST_INTERFACE_ID << 8) | NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS { + // [out] Number of groups retrieved. @ref NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS::boostGroups + NvU32 groupCount; + + // [out] @ref NV0000_SYNC_GPU_BOOST_GROUP_CONFIG + NV0000_SYNC_GPU_BOOST_GROUP_CONFIG pBoostGroups[NV0000_SYNC_GPU_BOOST_MAX_GROUPS]; +} NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS; + +/* _ctrl0000syncgpuboost_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h new file mode 100644 index 0000000..8474929 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h @@ -0,0 +1,3113 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000system.finn +// + +#include "ctrl/ctrlxxxx.h" +#include "ctrl/ctrl0000/ctrl0000base.h" + +/* NV01_ROOT (client) system control commands and parameters */ + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_FEATURES + * + * This command returns a mask of supported features for the SYSTEM category + * of the 0000 class. + * + * Valid features include: + * + * NV0000_CTRL_GET_FEATURES_SLI + * When this bit is set, SLI is supported. + * NV0000_CTRL_SYSTEM_GET_FEATURES_IS_EFI_INIT + * When this bit is set, EFI has initialized core channel + * NV0000_CTRL_SYSTEM_GET_FEATURES_RM_TEST_ONLY_CODE_ENABLED + * When this bit is set, RM test only code is supported. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_FEATURES (0x1f0U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS_MESSAGE_ID (0xF0U) + +typedef struct NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS { + NvU32 featuresMask; +} NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS; + + + +/* Valid feature values */ +#define NV0000_CTRL_SYSTEM_GET_FEATURES_SLI 0:0 +#define NV0000_CTRL_SYSTEM_GET_FEATURES_SLI_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_FEATURES_SLI_TRUE (0x00000001U) + +#define NV0000_CTRL_SYSTEM_GET_FEATURES_UUID_BASED_MEM_SHARING 3:3 +#define NV0000_CTRL_SYSTEM_GET_FEATURES_UUID_BASED_MEM_SHARING_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_FEATURES_UUID_BASED_MEM_SHARING_TRUE (0x00000001U) + +#define NV0000_CTRL_SYSTEM_GET_FEATURES_RM_TEST_ONLY_CODE_ENABLED 4:4 +#define NV0000_CTRL_SYSTEM_GET_FEATURES_RM_TEST_ONLY_CODE_ENABLED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_FEATURES_RM_TEST_ONLY_CODE_ENABLED_TRUE (0x00000001U) +/* + * NV0000_CTRL_CMD_SYSTEM_GET_BUILD_VERSION + * + * This command returns the current driver information. + * The first time this is called the size of strings is + * set with the greater of NV_BUILD_BRANCH_VERSION and + * NV_DISPLAY_DRIVER_TITLE. The client then allocates memory + * of size sizeOfStrings for pVersionBuffer and pTitleBuffer + * and calls the command again to receive driver info. + * + * sizeOfStrings + * This field returns the size in bytes of the pVersionBuffer and + * pTitleBuffer strings. + * pDriverVersionBuffer + * This field returns the version (NV_VERSION_STRING). + * pVersionBuffer + * This field returns the version (NV_BUILD_BRANCH_VERSION). + * pTitleBuffer + * This field returns the title (NV_DISPLAY_DRIVER_TITLE). + * changelistNumber + * This field returns the changelist value (NV_BUILD_CHANGELIST_NUM). + * officialChangelistNumber + * This field returns the last official changelist value + * (NV_LAST_OFFICIAL_CHANGELIST_NUM). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ + +#define NV0000_CTRL_CMD_SYSTEM_GET_BUILD_VERSION (0x101U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_PARAMS { + NvU32 sizeOfStrings; + NV_DECLARE_ALIGNED(NvP64 pDriverVersionBuffer, 8); + NV_DECLARE_ALIGNED(NvP64 pVersionBuffer, 8); + NV_DECLARE_ALIGNED(NvP64 pTitleBuffer, 8); + NvU32 changelistNumber; + NvU32 officialChangelistNumber; +} NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_PARAMS; + +typedef enum NV0000_CTRL_SYSTEM_SH_SOC_TYPE { + NV0000_CTRL_SYSTEM_SH_SOC_TYPE_NA = 0, + NV0000_CTRL_SYSTEM_SH_SOC_TYPE_NV_GRACE = 1, + +} NV0000_CTRL_SYSTEM_SH_SOC_TYPE; + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_CPU_INFO + * + * This command returns system CPU information. + * + * type + * This field returns the processor type. + * Legal processor types include: + * Intel processors: + * P55 : P55C - MMX + * P6 : PPro + * P2 : PentiumII + * P2XC : Xeon & Celeron + * CELA : Celeron-A + * P3 : Pentium-III + * P3_INTL2 : Pentium-III w/integrated L2 (fullspeed, on die, 256K) + * P4 : Pentium 4 + * CORE2 : Core2 Duo Conroe + * AMD processors + * K62 : K6-2 w/ 3DNow + * IDT/Centaur processors + * C6 : WinChip C6 + * C62 : WinChip 2 w/ 3DNow + * Cyrix processors + * GX : MediaGX + * M1 : 6x86 + * M2 : M2 + * MGX : MediaGX w/ MMX + * Transmeta processors + * TM_CRUSOE : Transmeta Crusoe(tm) + * PowerPC processors + * PPC603 : PowerPC 603 + * PPC604 : PowerPC 604 + * PPC750 : PowerPC 750 + * + * capabilities + * This field returns the capabilities of the processor. + * Legal processor capabilities include: + * MMX : supports MMX + * SSE : supports SSE + * 3DNOW : supports 3DNow + * SSE2 : supports SSE2 + * SFENCE : supports SFENCE + * WRITE_COMBINING : supports write-combining + * ALTIVEC : supports ALTIVEC + * PUT_NEEDS_IO : requires OUT inst w/PUT updates + * NEEDS_WC_WORKAROUND : requires workaround for P4 write-combining bug + * 3DNOW_EXT : supports 3DNow Extensions + * MMX_EXT : supports MMX Extensions + * CMOV : supports CMOV + * CLFLUSH : supports CLFLUSH + * SSE3 : supports SSE3 + * NEEDS_WAR_124888 : requires write to GPU while spinning on + * : GPU value + * HT : support hyper-threading + * clock + * This field returns the processor speed in MHz. + * L1DataCacheSize + * This field returns the level 1 data (or unified) cache size + * in kilobytes. + * L2DataCacheSize + * This field returns the level 2 data (or unified) cache size + * in kilobytes. + * dataCacheLineSize + * This field returns the bytes per line in the level 1 data cache. + * numLogicalCpus + * This field returns the number of logical processors. On Intel x86 + * systems that support it, this value will incorporate the current state + * of HyperThreading. + * numPhysicalCpus + * This field returns the number of physical processors. + * name + * This field returns the CPU name in ASCII string format. + * family + * Vendor defined Family and Extended Family combined + * model + * Vendor defined Model and Extended Model combined + * stepping + * Silicon stepping + * bCCEnabled + * Confidentail compute enabled/disabled state + * selfHostedSocType + * SoC type NV0000_CTRL_SYSTEM_SH_SOC_TYPE* in case of self hosted systems + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_CPU_INFO (0x102U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS { + NvU32 type; /* processor type */ + NvU32 capabilities; /* processor caps */ + NvU32 clock; /* processor speed (MHz) */ + NvU32 L1DataCacheSize; /* L1 dcache size (KB) */ + NvU32 L2DataCacheSize; /* L2 dcache size (KB) */ + NvU32 dataCacheLineSize; /* L1 dcache bytes/line */ + NvU32 numLogicalCpus; /* logial processor cnt */ + NvU32 numPhysicalCpus; /* physical processor cnt*/ + NvU8 name[52]; /* embedded cpu name */ + NvU32 family; /* Vendor defined Family and Extended Family combined */ + NvU32 model; /* Vendor defined Model and Extended Model combined */ + NvU8 stepping; /* Silicon stepping */ + NvU32 coresOnDie; /* cpu cores per die */ + NvBool bCCEnabled; /* CC enabled on cpu */ + NV0000_CTRL_SYSTEM_SH_SOC_TYPE selfHostedSocType; /* SoC type in case of self hosted systems */ +} NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS; + +// Macros for CPU family information +#define NV0000_CTRL_SYSTEM_CPU_FAMILY 3:0 +#define NV0000_CTRL_SYSTEM_CPU_EXTENDED_FAMILY 11:4 + +// Macros for CPU model information +#define NV0000_CTRL_SYSTEM_CPU_MODEL 3:0 +#define NV0000_CTRL_SYSTEM_CPU_EXTENDED_MODEL 7:4 + +// Macros for AMD CPU information +#define NV0000_CTRL_SYSTEM_CPU_ID_AMD_FAMILY 0xF +#define NV0000_CTRL_SYSTEM_CPU_ID_AMD_EXTENDED_FAMILY 0xA +#define NV0000_CTRL_SYSTEM_CPU_ID_AMD_MODEL 0x0 +#define NV0000_CTRL_SYSTEM_CPU_ID_AMD_EXTENDED_MODEL 0x4 + +// Macros for Intel CPU information +#define NV0000_CTRL_SYSTEM_CPU_ID_INTEL_FAMILY 0x6 +#define NV0000_CTRL_SYSTEM_CPU_ID_INTEL_EXTENDED_FAMILY 0x0 +#define NV0000_CTRL_SYSTEM_CPU_ID_INTEL_CORE_S_MODEL 0x7 +#define NV0000_CTRL_SYSTEM_CPU_ID_INTEL_CORE_P_MODEL 0xA +#define NV0000_CTRL_SYSTEM_CPU_ID_INTEL_EXTENDED_MODEL 0x9 + +/* processor type values */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_UNKNOWN (0x00000000U) +/* Intel types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P5 (0x00000001U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P55 (0x00000002U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P6 (0x00000003U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P2 (0x00000004U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P2XC (0x00000005U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_CELA (0x00000006U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P3 (0x00000007U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P3_INTL2 (0x00000008U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P4 (0x00000009U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_CORE2 (0x00000010U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_CELN_M16H (0x00000011U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_CORE2_EXTRM (0x00000012U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_ATOM (0x00000013U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_XEON_SPR (0x00000014U) +/* AMD types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K5 (0x00000030U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K6 (0x00000031U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K62 (0x00000032U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K63 (0x00000033U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K7 (0x00000034U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K8 (0x00000035U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K10 (0x00000036U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K11 (0x00000037U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_RYZEN (0x00000038U) +/* IDT/Centaur types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_C6 (0x00000060U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_C62 (0x00000061U) +/* Cyrix types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_GX (0x00000070U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_M1 (0x00000071U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_M2 (0x00000072U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_MGX (0x00000073U) +/* Transmeta types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_TM_CRUSOE (0x00000080U) +/* IBM types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_PPC603 (0x00000090U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_PPC604 (0x00000091U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_PPC750 (0x00000092U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_POWERN (0x00000093U) +/* Unknown ARM architecture CPU type */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_ARM_UNKNOWN (0xA0000000U) +/* ARM Ltd types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_ARM_A9 (0xA0000009U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_ARM_A15 (0xA000000FU) +/* NVIDIA types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_NV_DENVER_1_0 (0xA0001000U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_NV_DENVER_2_0 (0xA0002000U) + +/* Generic types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_ARMV8A_GENERIC (0xA00FF000U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_ARMV9A_GENERIC (0xA00FF001U) + +/* processor capabilities */ +#define NV0000_CTRL_SYSTEM_CPU_CAP_MMX (0x00000001U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_SSE (0x00000002U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_3DNOW (0x00000004U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_SSE2 (0x00000008U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_SFENCE (0x00000010U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_WRITE_COMBINING (0x00000020U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_ALTIVEC (0x00000040U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_PUT_NEEDS_IO (0x00000080U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_NEEDS_WC_WORKAROUND (0x00000100U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_3DNOW_EXT (0x00000200U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_MMX_EXT (0x00000400U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_CMOV (0x00000800U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_CLFLUSH (0x00001000U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_NEEDS_WAR_190854 (0x00002000U) /* deprecated */ +#define NV0000_CTRL_SYSTEM_CPU_CAP_SSE3 (0x00004000U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_NEEDS_WAR_124888 (0x00008000U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_HT_CAPABLE (0x00010000U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_SSE41 (0x00020000U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_SSE42 (0x00040000U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_AVX (0x00080000U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_ERMS (0x00100000U) + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_CHIPSET_INFO + * + * This command returns system chipset information. + * + * vendorId + * This parameter returns the vendor identification for the chipset. + * A value of NV0000_SYSTEM_CHIPSET_INVALID_ID indicates the chipset + * cannot be identified. + * deviceId + * This parameter returns the device identification for the chipset. + * A value of NV0000_SYSTEM_CHIPSET_INVALID_ID indicates the chipset + * cannot be identified. + * subSysVendorId + * This parameter returns the subsystem vendor identification for the + * chipset. A value of NV0000_SYSTEM_CHIPSET_INVALID_ID indicates the + * chipset cannot be identified. + * subSysDeviceId + * This parameter returns the subsystem device identification for the + * chipset. A value of NV0000_SYSTEM_CHIPSET_INVALID_ID indicates the + * chipset cannot be identified. + * HBvendorId + * This parameter returns the vendor identification for the chipset's + * host bridge. A value of NV0000_SYSTEM_CHIPSET_INVALID_ID indicates + * the chipset's host bridge cannot be identified. + * HBdeviceId + * This parameter returns the device identification for the chipset's + * host bridge. A value of NV0000_SYSTEM_CHIPSET_INVALID_ID indicates + * the chipset's host bridge cannot be identified. + * HBsubSysVendorId + * This parameter returns the subsystem vendor identification for the + * chipset's host bridge. A value of NV0000_SYSTEM_CHIPSET_INVALID_ID + * indicates the chipset's host bridge cannot be identified. + * HBsubSysDeviceId + * This parameter returns the subsystem device identification for the + * chipset's host bridge. A value of NV0000_SYSTEM_CHIPSET_INVALID_ID + * indicates the chipset's host bridge cannot be identified. + * sliBondId + * This parameter returns the SLI bond identification for the chipset. + * vendorNameString + * This parameter returns the vendor name string. + * chipsetNameString + * This parameter returns the vendor name string. + * sliBondNameString + * This parameter returns the SLI bond name string. + * flag + * This parameter specifies NV0000_CTRL_SYSTEM_CHIPSET_FLAG_XXX flags: + * _HAS_RESIZABLE_BAR_ISSUE_YES: Chipset where the use of resizable BAR1 + * should be disabled - bug 3440153 + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_CHIPSET_INFO (0x104U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_CHIPSET_INFO_PARAMS_MESSAGE_ID" */ + +/* maximum name string length */ +#define NV0000_SYSTEM_MAX_CHIPSET_STRING_LENGTH (0x0000020U) + +/* invalid id */ +#define NV0000_SYSTEM_CHIPSET_INVALID_ID (0xffffU) + +#define NV0000_CTRL_SYSTEM_GET_CHIPSET_INFO_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0000_CTRL_SYSTEM_GET_CHIPSET_INFO_PARAMS { + NvU16 vendorId; + NvU16 deviceId; + NvU16 subSysVendorId; + NvU16 subSysDeviceId; + NvU16 HBvendorId; + NvU16 HBdeviceId; + NvU16 HBsubSysVendorId; + NvU16 HBsubSysDeviceId; + NvU32 sliBondId; + NvU8 vendorNameString[NV0000_SYSTEM_MAX_CHIPSET_STRING_LENGTH]; + NvU8 subSysVendorNameString[NV0000_SYSTEM_MAX_CHIPSET_STRING_LENGTH]; + NvU8 chipsetNameString[NV0000_SYSTEM_MAX_CHIPSET_STRING_LENGTH]; + NvU8 sliBondNameString[NV0000_SYSTEM_MAX_CHIPSET_STRING_LENGTH]; + NvU32 flags; +} NV0000_CTRL_SYSTEM_GET_CHIPSET_INFO_PARAMS; + +#define NV0000_CTRL_SYSTEM_CHIPSET_FLAG_HAS_RESIZABLE_BAR_ISSUE 0:0 +#define NV0000_CTRL_SYSTEM_CHIPSET_FLAG_HAS_RESIZABLE_BAR_ISSUE_NO (0x00000000U) +#define NV0000_CTRL_SYSTEM_CHIPSET_FLAG_HAS_RESIZABLE_BAR_ISSUE_YES (0x00000001U) + + + +/* + * NV0000_CTRL_SYSTEM_GET_VRR_COOKIE_PRESENT + * + * This command returns whether the VRR cookie is present in the SBIOS. + * + * bIsPresent (out) + * This parameter contains whether the VRR cookie is present in the SBIOS. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_REQUEST + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0000_CTRL_SYSTEM_GET_VRR_COOKIE_PRESENT (0x107U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_VRR_COOKIE_PRESENT_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_VRR_COOKIE_PRESENT_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV0000_CTRL_SYSTEM_GET_VRR_COOKIE_PRESENT_PARAMS { + NvBool bIsPresent; +} NV0000_CTRL_SYSTEM_GET_VRR_COOKIE_PRESENT_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_LOCK_TIMES + * + * This command is used to retrieve the measured times spent holding and waiting for + * the main RM locks (API and GPU). + * + * waitApiLock + * Total time spent by RM API's waiting to acquire the API lock + * + * holdRoApiLock + * Total time spent by RM API's holding the API lock in RO mode. + * + * holdRwApiLock + * Total time spent by RM API's holding the API lock in RW mode. + * + * waitGpuLock + * Total time spent by RM API's waiting to acquire one or more GPU locks. + * + * holdGpuLock + * Total time spent by RM API's holding one or more GPU locks. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0000_CTRL_CMD_SYSTEM_GET_LOCK_TIMES (0x109U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_LOCK_TIMES_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_LOCK_TIMES_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV0000_CTRL_SYSTEM_GET_LOCK_TIMES_PARAMS { + NV_DECLARE_ALIGNED(NvU64 waitApiLock, 8); + NV_DECLARE_ALIGNED(NvU64 holdRoApiLock, 8); + NV_DECLARE_ALIGNED(NvU64 holdRwApiLock, 8); + NV_DECLARE_ALIGNED(NvU64 waitGpuLock, 8); + NV_DECLARE_ALIGNED(NvU64 holdGpuLock, 8); +} NV0000_CTRL_SYSTEM_GET_LOCK_TIMES_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_CLASSLIST + * + * This command is used to retrieve the set of system-level classes + * supported by the platform. + * + * numClasses + * This parameter returns the number of valid entries in the returned + * classes[] list. This parameter will not exceed + * Nv0000_CTRL_SYSTEM_MAX_CLASSLIST_SIZE. + * classes + * This parameter returns the list of supported classes + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ + +#define NV0000_CTRL_CMD_SYSTEM_GET_CLASSLIST (0x108U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS_MESSAGE_ID" */ + +/* maximum number of classes returned in classes[] array */ +#define NV0000_CTRL_SYSTEM_MAX_CLASSLIST_SIZE (32U) + +#define NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS { + NvU32 numClasses; + NvU32 classes[NV0000_CTRL_SYSTEM_MAX_CLASSLIST_SIZE]; +} NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_NOTIFY_EVENT + * + * This command is used to send triggered mobile related system events + * to the RM. + * + * eventType + * This parameter indicates the triggered event type. This parameter + * should specify a valid NV0000_CTRL_SYSTEM_EVENT_TYPE value. + * eventData + * This parameter specifies the type-dependent event data associated + * with EventType. This parameter should specify a valid + * NV0000_CTRL_SYSTEM_EVENT_DATA value. + * bEventDataForced + * This parameter specifies what we have to do, Whether trust current + * Lid/Dock state or not. This parameter should specify a valid + * NV0000_CTRL_SYSTEM_EVENT_DATA_FORCED value. + + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + * Sync this up (#defines) with one in nvapi.spec! + * (NV_ACPI_EVENT_TYPE & NV_ACPI_EVENT_DATA) + */ +#define NV0000_CTRL_CMD_SYSTEM_NOTIFY_EVENT (0x110U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS { + NvU32 eventType; + NvU32 eventData; + NvBool bEventDataForced; +} NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS; + +/* valid eventType values */ +#define NV0000_CTRL_SYSTEM_EVENT_TYPE_LID_STATE (0x00000000U) +#define NV0000_CTRL_SYSTEM_EVENT_TYPE_POWER_SOURCE (0x00000001U) +#define NV0000_CTRL_SYSTEM_EVENT_TYPE_DOCK_STATE (0x00000002U) +#define NV0000_CTRL_SYSTEM_EVENT_TYPE_TRUST_LID (0x00000003U) +#define NV0000_CTRL_SYSTEM_EVENT_TYPE_TRUST_DOCK (0x00000004U) + +/* valid eventData values */ +#define NV0000_CTRL_SYSTEM_EVENT_DATA_LID_OPEN (0x00000000U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_LID_CLOSED (0x00000001U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_POWER_BATTERY (0x00000000U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_POWER_AC (0x00000001U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_UNDOCKED (0x00000000U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_DOCKED (0x00000001U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_LID_DSM (0x00000000U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_LID_DCS (0x00000001U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_LID_NVIF (0x00000002U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_LID_ACPI (0x00000003U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_LID_POLL (0x00000004U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_LID_COUNT (0x5U) /* finn: Evaluated from "(NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_LID_POLL + 1)" */ +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_DOCK_DSM (0x00000000U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_DOCK_DCS (0x00000001U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_DOCK_NVIF (0x00000002U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_DOCK_ACPI (0x00000003U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_DOCK_POLL (0x00000004U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_DOCK_COUNT (0x5U) /* finn: Evaluated from "(NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_DOCK_POLL + 1)" */ + +/* valid bEventDataForced values */ +#define NV0000_CTRL_SYSTEM_EVENT_DATA_FORCED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_FORCED_TRUE (0x00000001U) + +/* + * NV000_CTRL_CMD_SYSTEM_GET_PLATFORM_TYPE + * + * This command is used to query the platform type. + * + * systemType + * This parameter returns the type of the system. + * Legal values for this parameter include: + * NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_DESKTOP + * The system is a desktop platform. + * NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_MOBILE_GENERIC + * The system is a mobile (non-Toshiba) platform. + * NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_DESKTOP + * The system is a mobile Toshiba platform. + * NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_SOC + * The system is a system-on-a-chip (SOC) platform. + * + + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_PLATFORM_TYPE (0x111U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_CMD_SYSTEM_GET_PLATFORM_TYPE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CMD_SYSTEM_GET_PLATFORM_TYPE_PARAMS_MESSAGE_ID (0x11U) + +typedef struct NV0000_CTRL_CMD_SYSTEM_GET_PLATFORM_TYPE_PARAMS { + NvU32 systemType; +} NV0000_CTRL_CMD_SYSTEM_GET_PLATFORM_TYPE_PARAMS; + +/* valid systemType values */ +#define NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_DESKTOP (0x000000U) +#define NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_MOBILE_GENERIC (0x000001U) +#define NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_MOBILE_TOSHIBA (0x000002U) +#define NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_SOC (0x000003U) + + + + +/* + * NV0000_CTRL_CMD_SYSTEM_DEBUG_RMMSG_CTRL + * + * This command controls the current RmMsg filters. + * + * It is only supported if RmMsg is enabled (e.g. debug builds). + * + * cmd + * GET - Gets the current RmMsg filter string. + * SET - Sets the current RmMsg filter string. + * + * count + * The length of the RmMsg filter string. + * + * data + * The RmMsg filter string. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0000_CTRL_CMD_SYSTEM_DEBUG_RMMSG_CTRL (0x121U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE 512U + +#define NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_CMD_GET (0x00000000U) +#define NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_CMD_SET (0x00000001U) + +#define NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS_MESSAGE_ID (0x21U) + +typedef struct NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS { + NvU32 cmd; + NvU32 count; + NvU8 data[NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE]; +} NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS; + +/* + * NV0000_CTRL_SYSTEM_HWBC_INFO + * + * This structure contains information about the HWBC (BR04) specified by + * hwbcId. + * + * hwbcId + * This field specifies the HWBC ID. + * firmwareVersion + * This field returns the version of the firmware on the HWBC (BR04), if + * present. This is a packed binary number of the form 0x12345678, which + * corresponds to a firmware version of 12.34.56.78. + * subordinateBus + * This field returns the subordinate bus number of the HWBC (BR04). + * secondaryBus + * This field returns the secondary bus number of the HWBC (BR04). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +typedef struct NV0000_CTRL_SYSTEM_HWBC_INFO { + NvU32 hwbcId; + NvU32 firmwareVersion; + NvU32 subordinateBus; + NvU32 secondaryBus; +} NV0000_CTRL_SYSTEM_HWBC_INFO; + +#define NV0000_CTRL_SYSTEM_HWBC_INVALID_ID (0xFFFFFFFFU) + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_HWBC_INFO + * + * This command returns information about all Hardware Broadcast (HWBC) + * devices present in the system that are BR04s. To get the complete + * list of HWBCs in the system, all GPUs present in the system must be + * initialized. See the description of NV0000_CTRL_CMD_GPU_ATTACH_IDS to + * accomplish this. + * + * hwbcInfo + * This field is an array of NV0000_CTRL_SYSTEM_HWBC_INFO structures into + * which HWBC information is placed. There is one entry for each HWBC + * present in the system. Valid entries are contiguous, invalid entries + * have the hwbcId equal to NV0000_CTRL_SYSTEM_HWBC_INVALID_ID. If no HWBC + * is present in the system, all the entries would be marked invalid, but + * the return value would still be SUCCESS. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_HWBC_INFO (0x124U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_HWBC_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_MAX_HWBCS (0x00000080U) + +#define NV0000_CTRL_SYSTEM_GET_HWBC_INFO_PARAMS_MESSAGE_ID (0x24U) + +typedef struct NV0000_CTRL_SYSTEM_GET_HWBC_INFO_PARAMS { + NV0000_CTRL_SYSTEM_HWBC_INFO hwbcInfo[NV0000_CTRL_SYSTEM_MAX_HWBCS]; +} NV0000_CTRL_SYSTEM_GET_HWBC_INFO_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_GPS_CONTROL + * + * This command is used to control GPS functionality. It allows control of + * GPU Performance Scaling (GPS), changing its operational parameters and read + * most GPS dynamic parameters. + * + * command + * This parameter specifies the command to execute. Invalid commands + * result in the return of an NV_ERR_INVALID_ARGUMENT status. + * locale + * This parameter indicates the specific locale to which the command + * 'command' is to be applied. + * Supported range of CPU/GPU {i = 0, ..., 255} + * data + * This parameter contains a command-specific data payload. It can + * be used to input data as well as output data. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_COMMAND + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_DATA + * NV_ERR_INVALID_REQUEST + * NV_ERR_NOT_SUPPORTED + */ +#define NV0000_CTRL_CMD_SYSTEM_GPS_CONTROL (0x122U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GPS_CONTROL_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GPS_CONTROL_PARAMS_MESSAGE_ID (0x22U) + +typedef struct NV0000_CTRL_SYSTEM_GPS_CONTROL_PARAMS { + NvU16 command; + NvU16 locale; + NvU32 data; +} NV0000_CTRL_SYSTEM_GPS_CONTROL_PARAMS; + +/* + * Valid command values : + * + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_GET_INIT + * Is used to check if GPS was correctly initialized. + * Possible return (OUT) values are: + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_INIT_NO + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_INIT_YES + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_SET_EXEC + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_GET_EXEC + * Are used to stop/start GPS functionality and to get current status. + * Possible IN/OUT values are: + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_EXEC_STOP + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_EXEC_START + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_SET_ACTIONS + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_GET_ACTIONS + * Are used to control execution of GPS actions and to get current status. + * Possible IN/OUT values are: + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_ACTIONS_OFF + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_ACTIONS_ON + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_SET_LOGIC + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_GET_LOGIC + * Are used to switch current GPS logic and to retrieve current logic. + * Possible IN/OUT values are: + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_LOGIC_OFF + * Will cause that all GPS actions will be NULL. + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_LOGIC_FUZZY + * Fuzzy logic will determine GPS actions based on current ruleset. + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_LOGIC_DETERMINISTIC + * Deterministic logic will define GPS actions based on current ruleset. + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_SET_PREFERENCE + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_GET_PREFERENCE + * Are used to set/retrieve system control preference. + * Possible IN/OUT values are: + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_PREFERENCE_CPU + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_PREFERENCE_GPU + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_PREFERENCE_BOTH + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_SET_GPU2CPU_LIMIT + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_GET_GPU2CPU_LIMIT + * Are used to set/retrieve GPU2CPU pstate limits. + * IN/OUT values are four bytes packed into a 32-bit data field. + * The CPU cap index for GPU pstate 0 is in the lowest byte, the CPU cap + * index for the GPU pstate 3 is in the highest byte, etc. One + * special value is to disable the override to the GPU2CPU map: + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_SET_PMU_GPS_STATE + * Is used to stop/start GPS PMU functionality. + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_GET_PMU_GPS_STATE + * Is used to get the current status of PMU GPS. + * NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_NO_MAP_OVERRIDE + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_SET_MAX_POWER + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_GET_MAX_POWER + * Are used to set/retrieve max power [mW] that system can provide. + * This is hardcoded GPS safety feature and logic/rules does not apply + * to this threshold. + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_SET_COOLING_BUDGET + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_GET_COOLING_BUDGET + * Are used to set/retrieve current system cooling budget [mW]. + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_SET_INTEGRAL_PERIOD + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_GET_INTEGRAL_PERIOD + * Are used to set/retrieve integration interval [sec]. + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_SET_RULESET + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_GET_RULESET + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_GET_RULE_COUNT + * Are used to set/retrieve used ruleset [#]. Value is checked + * against MAX number of rules for currently used GPS logic. Also COUNT + * provides a way to find out how many rules exist for the current control + * system. + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_SET_APP_BOOST + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_GET_APP_BOOST + * Is used to set/get a delay relative to now during which to allow unbound + * CPU performance. Units are seconds. + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_SET_PWR_SUPPLY_MODE + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_GET_PWR_SUPPLY_MODE + * Is used to override/get the actual power supply mode (AC/Battery). + * Possible IN/OUT values are: + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_DEF_PWR_SUPPLY_REAL + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_DEF_PWR_SUPPLY_FAKE_AC + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_DEF_PWR_SUPPLY_FAKE_BATT + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_GET_VCT_SUPPORT_INFO + * Is used to get the Ventura system information for VCT tool + * Returned 32bit value should be treated as bitmask and decoded in + * following way: + * Encoding details are defined in objgps.h refer to + * NV_GPS_SYS_SUPPORT_INFO and corresponding bit defines. + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_GET_SUPPORTED_FUNCTION + * Is used to get the supported sub-functions defined in SBIOS. Returned + * value is a bitmask where each bit corresponds to different function: + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_DEF_FUNC_SUPPORT + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_DEF_FUNC_VENTURASTATUS + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_DEF_FUNC_GETPSS + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_DEF_FUNC_SETPPC + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_DEF_FUNC_GETPPC + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_DEF_FUNC_VENTURACB + * NV0000_CTRL_CMD_SYSTEM_GPS_SYS_DEF_FUNC_SYSPARAMS + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_POWER + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_POWER_DELTA + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_POWER_FUTURE + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_POWER_LTMAVG + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_POWER_INTEGRAL + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_POWER_BURDEN + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_POWER_INTERMEDIATE + * Are used to retrieve appropriate power measurements and their derivatives + * in [mW] for required locale. _BURDEN is defined only for _LOCALE_SYSTEM. + * _INTERMEDIATE is not defined for _LOCALE_SYSTEM, and takes an In value as + * index. + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_SENSOR_PARAMETERS + * Is used to retrieve parameters when adjusting raw sensor power reading. + * The values may come from SBIOS, VBIOS, registry or driver default. + * Possible IN value is the index of interested parameter. + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_TEMP + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_TEMP_DELTA + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_TEMP_FUTURE + * Are used to retrieve appropriate temperature measurements and their + * derivatives in [1/1000 Celsius]. + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_PSTATE + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_PSTATE_CAP + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_PSTATE_MIN + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_PSTATE_MAX + * Are used to retrieve CPU(x)/GPU(x) p-state or it's limits. + * Not applicable to _LOCALE_SYSTEM. + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_PSTATE_ACTION + * Is used to retrieve last GPS action for given domain. + * Not applicable to _LOCALE_SYSTEM. + * Possible return (OUT) values are: + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_DEF_ACTION_DEC_TO_P0 + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_DEF_ACTION_DEC_BY_1 + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_DEF_ACTION_DO_NOTHING + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_DEF_ACTION_SET_CURRENT + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_DEF_ACTION_INC_BY_1 + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_DEF_ACTION_INC_BY_2 + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_DEF_ACTION_INC_TO_LFM + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_DEF_ACTION_INC_TO_SLFM + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_SET_POWER_SIM_STATE + * Is used to set the power sensor simulator state. + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_POWER_SIM_STATE + * Is used to get the power simulator sensor simulator state. + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_SET_POWER_SIM_DATA + * Is used to set power sensor simulator data + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_POWER_SIM_DATA + * Is used to get power sensor simulator data + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_INIT_USING_SBIOS_AND_ACK + * Is used to respond to the ACPI event triggered by SBIOS. RM will + * request value for budget and status, validate them, apply them + * and send ACK back to SBIOS. + * NV0000_CTRL_CMD_SYSTEM_GPS_DATA_PING_SBIOS_FOR_EVENT + * Is a test cmd that should notify SBIOS to send ACPI event requesting + * budget and status change. + */ +#define NV0000_CTRL_CMD_SYSTEM_GPS_INVALID (0xFFFFU) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_GET_INIT (0x0000U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_SET_EXEC (0x0001U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_GET_EXEC (0x0002U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_SET_ACTIONS (0x0003U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_GET_ACTIONS (0x0004U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_SET_LOGIC (0x0005U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_GET_LOGIC (0x0006U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_SET_PREFERENCE (0x0007U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_GET_PREFERENCE (0x0008U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_SET_GPU2CPU_LIMIT (0x0009U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_GET_GPU2CPU_LIMIT (0x000AU) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_SET_PMU_GPS_STATE (0x000BU) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_GET_PMU_GPS_STATE (0x000CU) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_SET_MAX_POWER (0x0100U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_GET_MAX_POWER (0x0101U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_SET_COOLING_BUDGET (0x0102U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_GET_COOLING_BUDGET (0x0103U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_SET_INTEGRAL_PERIOD (0x0104U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_GET_INTEGRAL_PERIOD (0x0105U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_SET_RULESET (0x0106U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_GET_RULESET (0x0107U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_GET_RULE_COUNT (0x0108U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_SET_APP_BOOST (0x0109U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_GET_APP_BOOST (0x010AU) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_SET_PWR_SUPPLY_MODE (0x010BU) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_GET_PWR_SUPPLY_MODE (0x010CU) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_GET_VCT_SUPPORT_INFO (0x010DU) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_GET_SUPPORTED_FUNCTIONS (0x010EU) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_POWER (0x0200U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_POWER_DELTA (0x0201U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_POWER_FUTURE (0x0202U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_POWER_LTMAVG (0x0203U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_POWER_INTEGRAL (0x0204U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_POWER_BURDEN (0x0205U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_POWER_INTERMEDIATE (0x0206U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_SENSOR_PARAMETERS (0x0210U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_TEMP (0x0220U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_TEMP_DELTA (0x0221U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_TEMP_FUTURE (0x0222U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_PSTATE (0x0240U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_PSTATE_CAP (0x0241U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_PSTATE_MIN (0x0242U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_PSTATE_MAX (0x0243U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_PSTATE_ACTION (0x0244U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_PSTATE_SLFM_PRESENT (0x0245U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_SET_POWER_SIM_STATE (0x0250U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_POWER_SIM_STATE (0x0251U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_SET_POWER_SIM_DATA (0x0252U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_GET_POWER_SIM_DATA (0x0253U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_INIT_USING_SBIOS_AND_ACK (0x0320U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_PING_SBIOS_FOR_EVENT (0x0321U) + +/* valid LOCALE values */ +#define NV0000_CTRL_CMD_SYSTEM_GPS_LOCALE_INVALID (0xFFFFU) +#define NV0000_CTRL_CMD_SYSTEM_GPS_LOCALE_SYSTEM (0x0000U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_LOCALE_CPU(i) (0x0100+((i)%0x100)) +#define NV0000_CTRL_CMD_SYSTEM_GPS_LOCALE_GPU(i) (0x0200+((i)%0x100)) + +/* valid data values for enums */ +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_INVALID (0x80000000U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_INIT_NO (0x00000000U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_INIT_YES (0x00000001U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_EXEC_STOP (0x00000000U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_EXEC_START (0x00000001U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_ACTIONS_OFF (0x00000000U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_ACTIONS_ON (0x00000001U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_LOGIC_OFF (0x00000000U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_LOGIC_FUZZY (0x00000001U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_LOGIC_DETERMINISTIC (0x00000002U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_PREFERENCE_CPU (0x00000000U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_PREFERENCE_GPU (0x00000001U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_PREFERENCE_BOTH (0x00000002U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_NO_MAP_OVERRIDE (0xFFFFFFFFU) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_PMU_GPS_STATE_OFF (0x00000000U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_PMU_GPS_STATE_ON (0x00000001U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_DEF_PWR_SUPPLY_REAL (0x00000000U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_DEF_PWR_SUPPLY_FAKE_AC (0x00000001U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_DEF_PWR_SUPPLY_FAKE_BATT (0x00000002U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_DEF_FUNC_SUPPORT (0x00000001U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_DEF_FUNC_VENTURASTATUS (0x00000002U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_DEF_FUNC_GETPSS (0x00000004U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_DEF_FUNC_SETPPC (0x00000008U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_DEF_FUNC_GETPPC (0x00000010U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_DEF_FUNC_VENTURACB (0x00000020U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_SYS_DEF_FUNC_SYSPARAMS (0x00000040U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_DEF_ACTION_DEC_TO_P0 (0x00000000U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_DEF_ACTION_DEC_BY_1 (0x00000001U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_DEF_ACTION_DO_NOTHING (0x00000002U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_DEF_ACTION_SET_CURRENT (0x00000003U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_DEF_ACTION_INC_BY_1 (0x00000004U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_DEF_ACTION_INC_BY_2 (0x00000005U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_DEF_ACTION_INC_TO_LFM (0x00000006U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_DEF_ACTION_INC_TO_SLFM (0x00000007U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_DEF_SLFM_PRESENT_NO (0x00000000U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_DEF_SLFM_PRESENT_YES (0x00000001U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_DEF_POWER_SIM_STATE_OFF (0x00000000U) +#define NV0000_CTRL_CMD_SYSTEM_GPS_DATA_DEF_POWER_SIM_STATE_ON (0x00000001U) + +/* + * NV0000_CTRL_CMD_SYSTEM_GPS_BATCH_CONTROL + * + * This command allows execution of multiple GpsControl commands within one + * RmControl call. For practical reasons # of commands is limited to 16. + * This command shares defines with NV0000_CTRL_CMD_SYSTEM_GPS_CONTROL. + * + * cmdCount + * Number of commands that should be executed. + * Less or equal to NV0000_CTRL_CMD_SYSTEM_GPS_BATCH_COMMAND_MAX. + * + * succeeded + * Number of commands that were succesully executed. + * Less or equal to NV0000_CTRL_CMD_SYSTEM_GPS_BATCH_COMMAND_MAX. + * Failing commands return NV0000_CTRL_CMD_SYSTEM_GPS_CMD_DEF_INVALID + * in their data field. + * + * cmdData + * Array of commands with following structure: + * command + * This parameter specifies the command to execute. + * Invalid commands result in the return of an + * NV_ERR_INVALID_ARGUMENT status. + * locale + * This parameter indicates the specific locale to which + * the command 'command' is to be applied. + * Supported range of CPU/GPU {i = 0, ..., 255} + * data + * This parameter contains a command-specific data payload. + * It is used both to input data as well as to output data. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_REQUEST + * NV_ERR_NOT_SUPPORTED + */ +#define NV0000_CTRL_CMD_SYSTEM_GPS_BATCH_CONTROL (0x123U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GPS_BATCH_CONTROL_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CMD_SYSTEM_GPS_BATCH_COMMAND_MAX (16U) +#define NV0000_CTRL_SYSTEM_GPS_BATCH_CONTROL_PARAMS_MESSAGE_ID (0x23U) + +typedef struct NV0000_CTRL_SYSTEM_GPS_BATCH_CONTROL_PARAMS { + NvU32 cmdCount; + NvU32 succeeded; + + struct { + NvU16 command; + NvU16 locale; + NvU32 data; + } cmdData[NV0000_CTRL_CMD_SYSTEM_GPS_BATCH_COMMAND_MAX]; +} NV0000_CTRL_SYSTEM_GPS_BATCH_CONTROL_PARAMS; + + +/* + * Deprecated. Please use NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_V2 instead. + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS (0x127U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS_MESSAGE_ID" */ + +/* + * NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS_SQUARED must remain equal to the square of + * NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS due to Check RM parsing issues. + * NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS is the maximum size of GPU groups + * allowed for batched P2P caps queries provided by the RM control + * NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_MATRIX. + */ +#define NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS 32U +#define NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS_SQUARED 1024U +#define NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS 8U +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INVALID_PEER 0xffffffffU + +/* P2P capabilities status index values */ +#define NV0000_CTRL_P2P_CAPS_INDEX_READ 0U +#define NV0000_CTRL_P2P_CAPS_INDEX_WRITE 1U +#define NV0000_CTRL_P2P_CAPS_INDEX_NVLINK 2U +#define NV0000_CTRL_P2P_CAPS_INDEX_ATOMICS 3U +#define NV0000_CTRL_P2P_CAPS_INDEX_PROP 4U +#define NV0000_CTRL_P2P_CAPS_INDEX_LOOPBACK 5U +#define NV0000_CTRL_P2P_CAPS_INDEX_PCI 6U +#define NV0000_CTRL_P2P_CAPS_INDEX_C2C 7U +#define NV0000_CTRL_P2P_CAPS_INDEX_PCI_BAR1 8U + +#define NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE 9U + + +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS_MESSAGE_ID (0x27U) + +typedef struct NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS { + NvU32 gpuIds[NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS]; + NvU32 gpuCount; + NvU32 p2pCaps; + NvU32 p2pOptimalReadCEs; + NvU32 p2pOptimalWriteCEs; + NvU8 p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE]; + NV_DECLARE_ALIGNED(NvP64 busPeerIds, 8); + NV_DECLARE_ALIGNED(NvP64 busEgmPeerIds, 8); +} NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS; + +/* valid p2pCaps values */ +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_WRITES_SUPPORTED 0:0 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_WRITES_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_WRITES_SUPPORTED_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_READS_SUPPORTED 1:1 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_READS_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_READS_SUPPORTED_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PROP_SUPPORTED 2:2 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PROP_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PROP_SUPPORTED_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_NVLINK_SUPPORTED 3:3 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_NVLINK_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_NVLINK_SUPPORTED_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_ATOMICS_SUPPORTED 4:4 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_ATOMICS_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_ATOMICS_SUPPORTED_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_LOOPBACK_SUPPORTED 5:5 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_LOOPBACK_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_LOOPBACK_SUPPORTED_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_SUPPORTED 6:6 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_SUPPORTED_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_WRITES_SUPPORTED 7:7 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_WRITES_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_WRITES_SUPPORTED_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_READS_SUPPORTED 8:8 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_READS_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_READS_SUPPORTED_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_ATOMICS_SUPPORTED 9:9 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_ATOMICS_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_ATOMICS_SUPPORTED_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_NVLINK_SUPPORTED 10:10 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_NVLINK_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_NVLINK_SUPPORTED_TRUE (0x00000001U) + + +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_C2C_SUPPORTED 12:12 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_C2C_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_C2C_SUPPORTED_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_BAR1_SUPPORTED 13:13 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_BAR1_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_BAR1_SUPPORTED_TRUE (0x00000001U) + +/* P2P status codes */ +#define NV0000_P2P_CAPS_STATUS_OK (0x00U) +#define NV0000_P2P_CAPS_STATUS_CHIPSET_NOT_SUPPORTED (0x01U) +#define NV0000_P2P_CAPS_STATUS_GPU_NOT_SUPPORTED (0x02U) +#define NV0000_P2P_CAPS_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED (0x03U) +#define NV0000_P2P_CAPS_STATUS_DISABLED_BY_REGKEY (0x04U) +#define NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED (0x05U) + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_V2 + * + * This command returns peer to peer capabilities present between GPUs. + * Valid requests must present a list of GPU Ids. + * + * [in] gpuIds + * This member contains the array of GPU IDs for which we query the P2P + * capabilities. Valid entries are contiguous, beginning with the first + * entry in the list. + * [in] gpuCount + * This member contains the number of GPU IDs stored in the gpuIds[] array. + * [out] p2pCaps + * This member returns the peer to peer capabilities discovered between the + * GPUs. Valid p2pCaps values include: + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_WRITES_SUPPORTED + * When this bit is set, peer to peer writes between subdevices owned + * by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_READS_SUPPORTED + * When this bit is set, peer to peer reads between subdevices owned + * by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PROP_SUPPORTED + * When this bit is set, peer to peer PROP between subdevices owned + * by this device are supported. This is enabled by default + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_SUPPORTED + * When this bit is set, PCI is supported for all P2P between subdevices + * owned by this device. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_NVLINK_SUPPORTED + * When this bit is set, NVLINK is supported for all P2P between subdevices + * owned by this device. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_ATOMICS_SUPPORTED + * When this bit is set, peer to peer atomics between subdevices owned + * by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_LOOPBACK_SUPPORTED + * When this bit is set, peer to peer loopback is supported for subdevices + * owned by this device. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_WRITES_SUPPORTED + * When this bit is set, indirect peer to peer writes between subdevices + * owned by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_READS_SUPPORTED + * When this bit is set, indirect peer to peer reads between subdevices + * owned by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_ATOMICS_SUPPORTED + * When this bit is set, indirect peer to peer atomics between + * subdevices owned by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_NVLINK_SUPPORTED + * When this bit is set, indirect NVLINK is supported for subdevices + * owned by this device. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_C2C_SUPPORTED + * When this bit is set, C2C P2P is supported between the GPUs + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_BAR1_SUPPORTED + * When this bit is set, BAR1 P2P is supported between the GPUs + * mentioned in @ref gpuIds + * [out] p2pOptimalReadCEs + * For a pair of GPUs, return mask of CEs to use for p2p reads over Nvlink + * [out] p2pOptimalWriteCEs + * For a pair of GPUs, return mask of CEs to use for p2p writes over Nvlink + * [out] p2pCapsStatus + * This member returns status of all supported p2p capabilities. Valid + * status values include: + * NV0000_P2P_CAPS_STATUS_OK + * P2P capability is supported. + * NV0000_P2P_CAPS_STATUS_CHIPSET_NOT_SUPPORTED + * Chipset doesn't support p2p capability. + * NV0000_P2P_CAPS_STATUS_GPU_NOT_SUPPORTED + * GPU doesn't support p2p capability. + * NV0000_P2P_CAPS_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED + * IOH topology isn't supported. For e.g. root ports are on different + * IOH. + * NV0000_P2P_CAPS_STATUS_DISABLED_BY_REGKEY + * P2P Capability is disabled by a regkey. + * NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED + * P2P Capability is not supported. + * NV0000_P2P_CAPS_STATUS_NVLINK_SETUP_FAILED + * Indicates that NvLink P2P link setup failed. + * [out] busPeerIds + * Peer ID matrix. It is a one-dimentional array. + * busPeerIds[X * gpuCount + Y] maps from index X to index Y in + * the gpuIds[] table. For invalid or non-existent peer busPeerIds[] + * has the value NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INVALID_PEER. + * [out] busEgmPeerIds + * EGM Peer ID matrix. It is a one-dimentional array. + * busEgmPeerIds[X * gpuCount + Y] maps from index X to index Y in + * the gpuIds[] table. For invalid or non-existent peer busEgmPeerIds[] + * has the value NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INVALID_PEER. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ + + + +#define NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_V2 (0x12bU) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_P2P_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_V2_PARAMS_MESSAGE_ID (0x2BU) + +typedef struct NV0000_CTRL_SYSTEM_GET_P2P_CAPS_V2_PARAMS { + NvU32 gpuIds[NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS]; + NvU32 gpuCount; + NvU32 p2pCaps; + NvU32 p2pOptimalReadCEs; + NvU32 p2pOptimalWriteCEs; + NvU8 p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE]; + NvU32 busPeerIds[NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS_SQUARED]; + NvU32 busEgmPeerIds[NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS_SQUARED]; +} NV0000_CTRL_SYSTEM_GET_P2P_CAPS_V2_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_MATRIX + * + * This command returns peer to peer capabilities present between all pairs of + * GPU IDs {(a, b) : a in gpuIdGrpA and b in gpuIdGrpB}. This can be used to + * collect all P2P capabilities in the system - see the SRT: + * NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_MATRIX_TEST + * for a demonstration. + * + * The call will query for all pairs between set A and set B, and returns + * results in both link directions. The results are two-dimensional arrays where + * the first dimension is the index within the set-A array of one GPU ID under + * consideration, and the second dimension is the index within the set-B array + * of the other GPU ID under consideration. + * + * That is, the result arrays are *ALWAYS* to be indexed first with the set-A + * index, then with the set-B index. The B-to-A direction of results are put in + * the b2aOptimal(Read|Write)CEs. This makes it unnecessary to call the query + * twice, since the usual use case requires both directions. + * + * If a set is being compared against itself (by setting grpBCount to 0), then + * the result matrices are symmetric - it doesn't matter which index is first. + * However, the choice of indices is effectively a choice of which ID is "B" and + * which is "A" for the "a2b" and "b2a" directional results. + * + * [in] grpACount + * This member contains the number of GPU IDs stored in the gpuIdGrpA[] + * array. Must be >= 0. + * [in] grpBCount + * This member contains the number of GPU IDs stored in the gpuIdGrpB[] + * array. Can be == 0 to specify a check of group A against itself. + * [in] gpuIdGrpA + * This member contains the array of GPU IDs in "group A", each of which + * will have its P2P capabilities returned with respect to each GPU ID in + * "group B". Valid entries are contiguous, beginning with the first entry + * in the list. + * [in] gpuIdGrpB + * This member contains the array of GPU IDs in "group B", each of which + * will have its P2P capabilities returned with respect to each GPU ID in + * "group A". Valid entries are contiguous, beginning with the first entry + * in the list. May be equal to gpuIdGrpA, but best performance requires + * that the caller specifies grpBCount = 0 in this case, and ignores this. + * [out] p2pCaps + * This member returns the peer to peer capabilities discovered between the + * pairs of input GPUs between the groups, indexed by [A_index][B_index]. + * Valid p2pCaps values include: + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_WRITES_SUPPORTED + * When this bit is set, peer to peer writes between subdevices owned + * by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_READS_SUPPORTED + * When this bit is set, peer to peer reads between subdevices owned + * by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PROP_SUPPORTED + * When this bit is set, peer to peer PROP between subdevices owned + * by this device are supported. This is enabled by default + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_SUPPORTED + * When this bit is set, PCI is supported for all P2P between subdevices + * owned by this device. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_NVLINK_SUPPORTED + * When this bit is set, NVLINK is supported for all P2P between subdevices + * owned by this device. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_ATOMICS_SUPPORTED + * When this bit is set, peer to peer atomics between subdevices owned + * by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_LOOPBACK_SUPPORTED + * When this bit is set, peer to peer loopback is supported for subdevices + * owned by this device. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_WRITES_SUPPORTED + * When this bit is set, indirect peer to peer writes between subdevices + * owned by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_READS_SUPPORTED + * When this bit is set, indirect peer to peer reads between subdevices + * owned by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_ATOMICS_SUPPORTED + * When this bit is set, indirect peer to peer atomics between + * subdevices owned by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_NVLINK_SUPPORTED + * When this bit is set, indirect NVLINK is supported for subdevices + * owned by this device. + * [out] a2bOptimalReadCes + * For a pair of GPUs, return mask of CEs to use for p2p reads over Nvlink + * in the A-to-B direction. + * [out] a2bOptimalWriteCes + * For a pair of GPUs, return mask of CEs to use for p2p writes over Nvlink + * in the A-to-B direction. + * [out] b2aOptimalReadCes + * For a pair of GPUs, return mask of CEs to use for p2p reads over Nvlink + * in the B-to-A direction. + * [out] b2aOptimalWriteCes + * For a pair of GPUs, return mask of CEs to use for p2p writes over Nvlink + * in the B-to-A direction. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ + + + +#define NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_MATRIX (0x13aU) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS_MESSAGE_ID" */ + +typedef NvU32 NV0000_CTRL_P2P_CAPS_MATRIX_ROW[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS_MESSAGE_ID (0x3AU) + +typedef struct NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS { + NvU32 grpACount; + NvU32 grpBCount; + NvU32 gpuIdGrpA[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; + NvU32 gpuIdGrpB[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; + NV0000_CTRL_P2P_CAPS_MATRIX_ROW p2pCaps[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; + NV0000_CTRL_P2P_CAPS_MATRIX_ROW a2bOptimalReadCes[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; + NV0000_CTRL_P2P_CAPS_MATRIX_ROW a2bOptimalWriteCes[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; + NV0000_CTRL_P2P_CAPS_MATRIX_ROW b2aOptimalReadCes[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; + NV0000_CTRL_P2P_CAPS_MATRIX_ROW b2aOptimalWriteCes[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; +} NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_GPS_CTRL + * + * This command is used to execute general GPS Functions, most dealing with + * calling SBIOS, or retrieving cached sensor and GPS state data. + * + * version + * This parameter specifies the version of the interface. Legal values + * for this parameter are 1. + * cmd + * This parameter specifies the GPS API to be invoked. + * Valid values for this parameter are: + * NV0000_CTRL_GPS_CMD_GET_THERM_LIMIT + * This command gets the temperature limit for thermal controller. When + * this command is specified the input parameter contains ???. + * NV0000_CTRL_GPS_CMD_SET_THERM_LIMIT + * This command set the temperature limit for thermal controller. When + * this command is specified the input parameter contains ???. + * input + * This parameter specifies the cmd-specific input value. + * result + * This parameter returns the cmd-specific output value. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_CTRL_CMD_SYSTEM_GPS_CTRL (0x12aU) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GPS_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GPS_CTRL_PARAMS_MESSAGE_ID (0x2AU) + +typedef struct NV0000_CTRL_SYSTEM_GPS_CTRL_PARAMS { + NvU32 cmd; + NvS32 input[2]; + NvS32 result[4]; +} NV0000_CTRL_SYSTEM_GPS_CTRL_PARAMS; + +/* valid version values */ +#define NV0000_CTRL_GPS_PSHARE_PARAMS_PSP_CURRENT_VERSION (0x00010000U) + +/* valid cmd values */ +#define NV0000_CTRL_GPS_CMD_TYPE_GET_THERM_LIMIT (0x00000002U) +#define NV0000_CTRL_GPS_INPUT_SENSOR_INDEX (0x00000000U) +#define NV0000_CTRL_GPS_RESULT_THERMAL_LIMIT (0x00000000U) +#define NV0000_CTRL_GPS_RESULT_MIN_LIMIT (0x00000001U) +#define NV0000_CTRL_GPS_RESULT_MAX_LIMIT (0x00000002U) +#define NV0000_CTRL_GPS_RESULT_LIMIT_SOURCE (0x00000003U) + +#define NV0000_CTRL_GPS_CMD_TYPE_SET_THERM_LIMIT (0x00000003U) +// NV0000_CTRL_GPS_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_GPS_INPUT_THERMAL_LIMIT (0x00000001U) + +#define NV0000_CTRL_GPS_CMD_TYPE_GET_TEMP_CTRL_DOWN_N_DELTA (0x00000004U) +// NV0000_CTRL_GPS_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_GPS_RESULT_TEMP_CTRL_DOWN_N_DELTA (0x00000000U) + +#define NV0000_CTRL_GPS_CMD_TYPE_SET_TEMP_CTRL_DOWN_N_DELTA (0x00000005U) +// NV0000_CTRL_GPS_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_GPS_INPUT_TEMP_CTRL_DOWN_N_DELTA (0x00000001U) + +#define NV0000_CTRL_GPS_CMD_TYPE_GET_TEMP_CTRL_HOLD_DELTA (0x00000006U) +// NV0000_CTRL_GPS_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_GPS_RESULT_TEMP_CTRL_HOLD_DELTA (0x00000000U) + +#define NV0000_CTRL_GPS_CMD_TYPE_SET_TEMP_CTRL_HOLD_DELTA (0x00000007U) +// NV0000_CTRL_GPS_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_GPS_INPUT_TEMP_CTRL_HOLD_DELTA (0x00000001U) + +#define NV0000_CTRL_GPS_CMD_TYPE_GET_TEMP_CTRL_UP_DELTA (0x00000008U) +// NV0000_CTRL_GPS_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_GPS_RESULT_TEMP_CTRL_UP_DELTA (0x00000000U) + +#define NV0000_CTRL_GPS_CMD_TYPE_SET_TEMP_CTRL_UP_DELTA (0x00000009U) +// NV0000_CTRL_GPS_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_GPS_INPUT_TEMP_CTRL_UP_DELTA (0x00000001U) + +#define NV0000_CTRL_GPS_CMD_TYPE_GET_TEMP_CTRL_ENGAGE_DELTA (0x0000000AU) +// NV0000_CTRL_GPS_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_GPS_RESULT_TEMP_CTRL_ENGAGE_DELTA (0x00000000U) + +#define NV0000_CTRL_GPS_CMD_TYPE_SET_TEMP_CTRL_ENGAGE_DELTA (0x0000000BU) +// NV0000_CTRL_GPS_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_GPS_INPUT_TEMP_CTRL_ENGAGE_DELTA (0x00000001U) + +#define NV0000_CTRL_GPS_CMD_TYPE_GET_TEMP_CTRL_DISENGAGE_DELTA (0x0000000CU) +// NV0000_CTRL_GPS_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_GPS_RESULT_TEMP_CTRL_DISENGAGE_DELTA (0x00000000U) + +#define NV0000_CTRL_GPS_CMD_TYPE_SET_TEMP_CTRL_DISENGAGE_DELTA (0x0000000DU) +// NV0000_CTRL_GPS_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_GPS_INPUT_TEMP_CTRL_DISENGAGE_DELTA (0x00000000U) + +#define NV0000_CTRL_GPS_CMD_TYPE_GET_TEMP_CTRL_STATUS (0x00000016U) +#define NV0000_CTRL_GPS_RESULT_TEMP_CTRL_STATUS (0x00000000U) + +#define NV0000_CTRL_GPS_CMD_TYPE_SET_TEMP_CTRL_STATUS (0x00000017U) +#define NV0000_CTRL_GPS_INPUT_TEMP_CTRL_STATUS (0x00000000U) + +#define NV0000_CTRL_GPS_CMD_TYPE_GET_CPU_GET_UTIL_AVG_NUM (0x00000018U) +#define NV0000_CTRL_GPS_RESULT_CPU_SET_UTIL_AVG_NUM (0x00000000U) + +#define NV0000_CTRL_GPS_CMD_TYPE_SET_CPU_SET_UTIL_AVG_NUM (0x00000019U) +#define NV0000_CTRL_GPS_INPUT_CPU_GET_UTIL_AVG_NUM (0x00000000U) + +#define NV0000_CTRL_GPS_CMD_TYPE_GET_PERF_SENSOR (0x0000001AU) +// NV0000_CTRL_GPS_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_GPS_INPUT_NEXT_EXPECTED_POLL (0x00000001U) +#define NV0000_CTRL_GPS_RESULT_PERF_SENSOR_VALUE (0x00000000U) +#define NV0000_CTRL_GPS_RESULT_PERF_SENSOR_AVAILABLE (0x00000001U) + +#define NV0000_CTRL_GPS_CMD_TYPE_CALL_ACPI (0x0000001BU) +#define NV0000_CTRL_GPS_INPUT_ACPI_CMD (0x00000000U) +#define NV0000_CTRL_GPS_INPUT_ACPI_PARAM_IN (0x00000001U) +#define NV0000_CTRL_GPS_OUTPUT_ACPI_RESULT_1 (0x00000000U) +#define NV0000_CTRL_GPS_OUTPUT_ACPI_RESULT_2 (0x00000001U) +#define NV0000_CTRL_GPS_OUTPUT_ACPI_PSHAREPARAM_STATUS (0x00000000U) +#define NV0000_CTRL_GPS_OUTPUT_ACPI_PSHAREPARAM_VERSION (0x00000001U) +#define NV0000_CTRL_GPS_OUTPUT_ACPI_PSHAREPARAM_SZ (0x00000002U) +#define NV0000_CTRL_GPS_OUTPUT_ACPI_PSS_SZ (0x00000000U) +#define NV0000_CTRL_GPS_OUTPUT_ACPI_PSS_COUNT (0x00000001U) + +#define NV0000_CTRL_GPS_CMD_TYPE_SET_IGPU_TURBO (0x0000001CU) +#define NV0000_CTRL_GPS_INPUT_SET_IGPU_TURBO (0x00000000U) + +#define NV0000_CTRL_GPS_CMD_TYPE_SET_TEMP_PERIOD (0x00000026U) +#define NV0000_CTRL_GPS_INPUT_TEMP_PERIOD (0x00000000U) + +#define NV0000_CTRL_GPS_CMD_TYPE_GET_TEMP_PERIOD (0x00000027U) +#define NV0000_CTRL_GPS_RESULT_TEMP_PERIOD (0x00000000U) + +#define NV0000_CTRL_GPS_CMD_TYPE_SET_TEMP_NUDGE_FACTOR (0x00000028U) +#define NV0000_CTRL_GPS_INPUT_TEMP_NUDGE_UP (0x00000000U) +#define NV0000_CTRL_GPS_INPUT_TEMP_NUDGE_DOWN (0x00000001U) + +#define NV0000_CTRL_GPS_CMD_TYPE_GET_TEMP_NUDGE_FACTOR (0x00000029U) +#define NV0000_CTRL_GPS_RESULT_TEMP_NUDGE_UP (0x00000000U) +#define NV0000_CTRL_GPS_RESULT_TEMP_NUDGE_DOWN (0x00000001U) + +#define NV0000_CTRL_GPS_CMD_TYPE_SET_TEMP_THRESHOLD_SAMPLES (0x0000002AU) +#define NV0000_CTRL_GPS_INPUT_TEMP_THRESHOLD_SAMPLE_HOLD (0x00000000U) +#define NV0000_CTRL_GPS_INPUT_TEMP_THRESHOLD_SAMPLE_STEP (0x00000001U) + +#define NV0000_CTRL_GPS_CMD_TYPE_GET_TEMP_THRESHOLD_SAMPLES (0x0000002BU) +#define NV0000_CTRL_GPS_RESULT_TEMP_THRESHOLD_SAMPLE_HOLD (0x00000000U) +#define NV0000_CTRL_GPS_RESULT_TEMP_THRESHOLD_SAMPLE_STEP (0x00000001U) + +#define NV0000_CTRL_GPS_CMD_TYPE_SET_TEMP_PERF_LIMITS (0x0000002CU) +#define NV0000_CTRL_GPS_INPUT_TEMP_PERF_LIMIT_UPPER (0x00000000U) +#define NV0000_CTRL_GPS_INPUT_TEMP_PERF_LIMIT_LOWER (0x00000001U) + +#define NV0000_CTRL_GPS_CMD_TYPE_GET_TEMP_PERF_LIMITS (0x0000002DU) +#define NV0000_CTRL_GPS_RESULT_TEMP_PERF_LIMIT_UPPER (0x00000000U) +#define NV0000_CTRL_GPS_RESULT_TEMP_PERF_LIMIT_LOWER (0x00000001U) + +#define NV0000_CTRL_GPS_CMD_TYPE_SET_PM1_AVAILABLE (0x0000002EU) +#define NV0000_CTRL_GPS_INPUT_PM1_AVAILABLE (0x00000000U) + +#define NV0000_CTRL_GPS_CMD_TYPE_GET_PM1_AVAILABLE (0x0000002FU) +#define NV0000_CTRL_GPS_OUTPUT_PM1_AVAILABLE (0x00000000U) + +#define NV0000_CTRL_GPS_CMD_TYPE_GET_CPU_PACKAGE_LIMITS (0x00000044U) +#define NV0000_CTRL_GPS_CMD_TYPE_GET_CPU_PACKAGE_LIMITS_PL1 (0x00000000U) +#define NV0000_CTRL_GPS_CMD_TYPE_GET_CPU_PACKAGE_LIMITS_PL2 (0x00000001U) + +#define NV0000_CTRL_GPS_CMD_TYPE_SET_CPU_PACKAGE_LIMITS (0x00000045U) +#define NV0000_CTRL_GPS_CMD_TYPE_SET_CPU_PACKAGE_LIMITS_PL1 (0x00000000U) + +#define NV0000_CTRL_GPS_CMD_TYPE_GET_CPU_FREQ_LIMIT (0x00000046U) +#define NV0000_CTRL_GPS_CMD_TYPE_GET_CPU_FREQ_LIMIT_MHZ (0000000000U) + +#define NV0000_CTRL_GPS_CMD_TYPE_SET_CPU_FREQ_LIMIT (0x00000047U) +#define NV0000_CTRL_GPS_CMD_TYPE_SET_CPU_FREQ_LIMIT_MHZ (0000000000U) + +#define NV0000_CTRL_GPS_CMD_TYPE_GET_PPM (0x00000048U) +#define NV0000_CTRL_GPS_CMD_TYPE_GET_PPM_INDEX (0000000000U) +#define NV0000_CTRL_GPS_CMD_TYPE_GET_PPM_AVAILABLE_MASK (0000000001U) + +#define NV0000_CTRL_GPS_CMD_TYPE_SET_PPM (0x00000049U) +#define NV0000_CTRL_GPS_CMD_TYPE_SET_PPM_INDEX (0000000000U) +#define NV0000_CTRL_GPS_CMD_TYPE_SET_PPM_INDEX_MAX (2U) + +#define NV0000_CTRL_GPS_PPM_INDEX 7:0 +#define NV0000_CTRL_GPS_PPM_INDEX_MAXPERF (0U) +#define NV0000_CTRL_GPS_PPM_INDEX_BALANCED (1U) +#define NV0000_CTRL_GPS_PPM_INDEX_QUIET (2U) +#define NV0000_CTRL_GPS_PPM_INDEX_INVALID (0xFFU) +#define NV0000_CTRL_GPS_PPM_MASK 15:8 +#define NV0000_CTRL_GPS_PPM_MASK_INVALID (0U) + +/* valid PS_STATUS result values */ +#define NV0000_CTRL_GPS_CMD_PS_STATUS_OFF (0U) +#define NV0000_CTRL_GPS_CMD_PS_STATUS_ON (1U) + + +#define GPS_MAX_COUNTERS_PER_BLOCK 32U +#define NV0000_CTRL_SYSTEM_GPS_GET_PERF_SENSOR_COUNTERS_PARAMS_MESSAGE_ID (0x29U) + +typedef struct NV0000_CTRL_SYSTEM_GPS_GET_PERF_SENSOR_COUNTERS_PARAMS { + NvU32 objHndl; + NvU32 blockId; + NvU32 nextExpectedSampleTimems; + NvU32 countersReq; + NvU32 countersReturned; + NvU32 counterBlock[GPS_MAX_COUNTERS_PER_BLOCK]; +} NV0000_CTRL_SYSTEM_GPS_GET_PERF_SENSOR_COUNTERS_PARAMS; + +#define NV0000_CTRL_CMD_SYSTEM_GPS_GET_PERF_SENSORS (0x12cU) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GPS_GET_PERF_SENSORS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GPS_GET_PERF_SENSORS_PARAMS_MESSAGE_ID (0x2CU) + +typedef NV0000_CTRL_SYSTEM_GPS_GET_PERF_SENSOR_COUNTERS_PARAMS NV0000_CTRL_SYSTEM_GPS_GET_PERF_SENSORS_PARAMS; + +#define NV0000_CTRL_CMD_SYSTEM_GPS_GET_EXTENDED_PERF_SENSORS (0x12eU) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GPS_GET_EXTENDED_PERF_SENSORS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GPS_GET_EXTENDED_PERF_SENSORS_PARAMS_MESSAGE_ID (0x2EU) + +typedef NV0000_CTRL_SYSTEM_GPS_GET_PERF_SENSOR_COUNTERS_PARAMS NV0000_CTRL_SYSTEM_GPS_GET_EXTENDED_PERF_SENSORS_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_GPS_CALL_ACPI + * + * This command allows users to call GPS ACPI commands for testing purposes. + * + * cmd + * This parameter specifies the GPS ACPI command to execute. + * + * input + * This parameter specified the cmd-dependent input value. + * + * resultSz + * This parameter returns the size (in bytes) of the valid data + * returned in the result parameter. + * + * result + * This parameter returns the results of the specified cmd. + * The maximum size (in bytes) of this returned data will + * not exceed GPS_MAX_ACPI_OUTPUT_BUFFER_SIZE + * + * GPS_MAX_ACPI_OUTPUT_BUFFER_SIZE + * The size of buffer (result) in unit of NvU32. + * The smallest value is sizeof(PSS_ENTRY)*ACPI_PSS_ENTRY_MAX. + * Since the prior one is 24 bytes, and the later one is 48, + * this value cannot be smaller than 288. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT, + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INSUFFICIENT_PERMISSIONS + * + */ +#define GPS_MAX_ACPI_OUTPUT_BUFFER_SIZE 288U +#define NV0000_CTRL_SYSTEM_GPS_CALL_ACPI_PARAMS_MESSAGE_ID (0x2DU) + +typedef struct NV0000_CTRL_SYSTEM_GPS_CALL_ACPI_PARAMS { + NvU32 cmd; + NvU32 input; + NvU32 resultSz; + NvU32 result[GPS_MAX_ACPI_OUTPUT_BUFFER_SIZE]; +} NV0000_CTRL_SYSTEM_GPS_CALL_ACPI_PARAMS; + +#define NV0000_CTRL_CMD_SYSTEM_GPS_CALL_ACPI (0x12dU) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GPS_CALL_ACPI_PARAMS_MESSAGE_ID" */ + +/* + * NV0000_CTRL_SYSTEM_PARAM_* + * + * The following is a list of system-level parameters (often sensors) that the + * driver can be made aware of. They are primarily intended to be used by system + * power-balancing algorithms that require system-wide visibility in order to + * function. The names and values used here are established and specified in + * several different NVIDIA documents that are made externally available. Thus, + * updates to this list must be made with great caution. The only permissible + * change is to append new parameters. Reordering is strictly prohibited. + * + * Brief Parameter Summary: + * TGPU - GPU temperature (NvTemp) + * PDTS - CPU package temperature (NvTemp) + * SFAN - System fan speed (% of maximum fan speed) + * SKNT - Skin temperature (NvTemp) + * CPUE - CPU energy counter (NvU32) + * TMP1 - Additional temperature sensor 1 (NvTemp) + * TMP2 - Additional temperature sensor 2 (NvTemp) + * CTGP - Mode 2 power limit offset (NvU32) + * PPMD - Power mode data (NvU32) + */ +#define NV0000_CTRL_SYSTEM_PARAM_TGPU (0x00000000U) +#define NV0000_CTRL_SYSTEM_PARAM_PDTS (0x00000001U) +#define NV0000_CTRL_SYSTEM_PARAM_SFAN (0x00000002U) +#define NV0000_CTRL_SYSTEM_PARAM_SKNT (0x00000003U) +#define NV0000_CTRL_SYSTEM_PARAM_CPUE (0x00000004U) +#define NV0000_CTRL_SYSTEM_PARAM_TMP1 (0x00000005U) +#define NV0000_CTRL_SYSTEM_PARAM_TMP2 (0x00000006U) +#define NV0000_CTRL_SYSTEM_PARAM_CTGP (0x00000007U) +#define NV0000_CTRL_SYSTEM_PARAM_PPMD (0x00000008U) +#define NV0000_CTRL_SYSTEM_PARAM_COUNT (0x00000009U) + +/* + * NV0000_CTRL_CMD_SYSTEM_EXECUTE_ACPI_METHOD + * + * This command is used to execute general ACPI methods. + * + * method + * This parameter identifies the MXM ACPI API to be invoked. + * Valid values for this parameter are: + * NV0000_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_NVOP_OPTIMUSCAPS + * This value specifies that the DSM NVOP subfunction OPTIMUSCAPS + * API is to be invoked. + * NV0000_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_NVOP_OPTIMUSFLAG + * This value specifies that the DSM NVOP subfunction OPTIMUSFLAG + * API is to be invoked. This API will set a Flag in sbios to Indicate + * that HD Audio Controller is disable/Enabled from GPU Config space. + * This flag will be used by sbios to restore Audio state after resuming + * from s3/s4. + * NV0000_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_JT_CAPS + * This value specifies that the DSM JT subfunction FUNC_CAPS is to + * to be invoked to get the SBIOS capabilities + * NV0000_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_JT_PLATPOLICY + * This value specifies that the DSM JT subfunction FUNC_PLATPOLICY is + * to be invoked to set and get the various platform policies for JT. + * Refer to the JT spec in more detail on various policies. + * inData + * This parameter specifies the method-specific input buffer. Data is + * passed to the specified API using this buffer. + * inDataSize + * This parameter specifies the size of the inData buffer in bytes. + * outStatus + * This parameter returns the status code from the associated ACPI call. + * outData + * This parameter specifies the method-specific output buffer. Data + * is returned by the specified API using this buffer. + * outDataSize + * This parameter specifies the size of the outData buffer in bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_CTRL_CMD_SYSTEM_EXECUTE_ACPI_METHOD (0x130U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_PARAMS_MESSAGE_ID (0x30U) + +typedef struct NV0000_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_PARAMS { + NvU32 method; + NV_DECLARE_ALIGNED(NvP64 inData, 8); + NvU16 inDataSize; + NvU32 outStatus; + NV_DECLARE_ALIGNED(NvP64 outData, 8); + NvU16 outDataSize; +} NV0000_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_PARAMS; + +/* valid method parameter values */ +#define NV0000_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_NVOP_OPTIMUSCAPS (0x00000000U) +#define NV0000_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_NVOP_OPTIMUSFLAG (0x00000001U) +#define NV0000_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_JT_CAPS (0x00000002U) +#define NV0000_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_JT_PLATPOLICY (0x00000003U) +/* + * NV0000_CTRL_CMD_SYSTEM_ENABLE_ETW_EVENTS + * + * This command can be used to instruct the RM to enable/disable specific module + * of ETW events. + * + * moduleMask + * This parameter specifies the module of events we would like to + * enable/disable. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0000_CTRL_CMD_SYSTEM_ENABLE_ETW_EVENTS (0x131U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_ENABLE_ETW_EVENTS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_ENABLE_ETW_EVENTS_PARAMS_MESSAGE_ID (0x31U) + +typedef struct NV0000_CTRL_SYSTEM_ENABLE_ETW_EVENTS_PARAMS { + NvU32 moduleMask; +} NV0000_CTRL_SYSTEM_ENABLE_ETW_EVENTS_PARAMS; + +#define NV0000_CTRL_SYSTEM_RMTRACE_MODULE_ALL (0x00000001U) +#define NV0000_CTRL_SYSTEM_RMTRACE_MODULE_NOFREQ (0x00000002U) +#define NV0000_CTRL_SYSTEM_RMTRACE_MODULE_FLUSH (0x00000004U) + +#define NV0000_CTRL_SYSTEM_RMTRACE_MODULE_PERF (0x00000010U) +#define NV0000_CTRL_SYSTEM_RMTRACE_MODULE_ELPG (0x00000020U) +#define NV0000_CTRL_SYSTEM_RMTRACE_MODULE_NVDPS (0x00000040U) +#define NV0000_CTRL_SYSTEM_RMTRACE_MODULE_POWER (0x00000080U) +#define NV0000_CTRL_SYSTEM_RMTRACE_MODULE_DISP (0x00000100U) +#define NV0000_CTRL_SYSTEM_RMTRACE_MODULE_RMAPI (0x00000200U) +#define NV0000_CTRL_SYSTEM_RMTRACE_MODULE_INTR (0x00000400U) +#define NV0000_CTRL_SYSTEM_RMTRACE_MODULE_LOCK (0x00000800U) +#define NV0000_CTRL_SYSTEM_RMTRACE_MODULE_RCJOURNAL (0x00001000U) +#define NV0000_CTRL_SYSTEM_RMTRACE_MODULE_GENERIC (0x00002000U) +#define NV0000_CTRL_SYSTEM_RMTRACE_MODULE_THERM (0x00004000U) +#define NV0000_CTRL_SYSTEM_RMTRACE_MODULE_GPS (0x00008000U) +#define NV0000_CTRL_SYSTEM_RMTRACE_MODULE_PCIE (0x00010000U) +#define NV0000_CTRL_SYSTEM_RMTRACE_MODULE_NVTELEMETRY (0x00020000U) + +/* + * NV0000_CTRL_CMD_SYSTEM_GPS_GET_FRM_DATA + * + * This command is used to read FRL data based on need. + * + * nextSampleNumber + * This parameter returns the counter of next sample which is being filled. + * samples + * This parameter returns the frame time, render time, target time, client ID + * with one reserve bit for future use. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0000_CTRL_CMD_SYSTEM_GPS_GET_FRM_DATA (0x12fU) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GPS_GET_FRM_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GPS_FRM_DATA_SAMPLE_SIZE 64U + +typedef struct NV0000_CTRL_SYSTEM_GPS_FRM_DATA_SAMPLE { + NvU16 frameTime; + NvU16 renderTime; + NvU16 targetTime; + NvU8 sleepTime; + NvU8 sampleNumber; +} NV0000_CTRL_SYSTEM_GPS_FRM_DATA_SAMPLE; + +#define NV0000_CTRL_SYSTEM_GPS_GET_FRM_DATA_PARAMS_MESSAGE_ID (0x2FU) + +typedef struct NV0000_CTRL_SYSTEM_GPS_GET_FRM_DATA_PARAMS { + NV0000_CTRL_SYSTEM_GPS_FRM_DATA_SAMPLE samples[NV0000_CTRL_SYSTEM_GPS_FRM_DATA_SAMPLE_SIZE]; + NvU8 nextSampleNumber; +} NV0000_CTRL_SYSTEM_GPS_GET_FRM_DATA_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_GPS_SET_FRM_DATA + * + * This command is used to write FRM data based on need. + * + * frameTime + * This parameter contains the frame time of current frame. + * renderTime + * This parameter contains the render time of current frame. + * targetTime + * This parameter contains the target time of current frame. + * sleepTime + * This parameter contains the sleep duration inserted by FRL for the latest frame. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0000_CTRL_CMD_SYSTEM_GPS_SET_FRM_DATA (0x132U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GPS_SET_FRM_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GPS_SET_FRM_DATA_PARAMS_MESSAGE_ID (0x32U) + +typedef struct NV0000_CTRL_SYSTEM_GPS_SET_FRM_DATA_PARAMS { + NV0000_CTRL_SYSTEM_GPS_FRM_DATA_SAMPLE sampleData; +} NV0000_CTRL_SYSTEM_GPS_SET_FRM_DATA_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO + * + * This command returns the current host driver, host OS and + * plugin information. It is only valid when VGX is setup. + * szHostDriverVersionBuffer + * This field returns the host driver version (NV_VERSION_STRING). + * szHostVersionBuffer + * This field returns the host driver version (NV_BUILD_BRANCH_VERSION). + * szHostTitleBuffer + * This field returns the host driver title (NV_DISPLAY_DRIVER_TITLE). + * szPluginTitleBuffer + * This field returns the plugin build title (NV_DISPLAY_DRIVER_TITLE). + * szHostUnameBuffer + * This field returns the call of 'uname' on the host OS. + * iHostChangelistNumber + * This field returns the changelist value of the host driver (NV_BUILD_CHANGELIST_NUM). + * iPluginChangelistNumber + * This field returns the changelist value of the plugin (NV_BUILD_CHANGELIST_NUM). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ + +#define NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE 256U +#define NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO (0x133U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_VGX_SYSTEM_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_VGX_SYSTEM_INFO_PARAMS_MESSAGE_ID (0x33U) + +typedef struct NV0000_CTRL_SYSTEM_GET_VGX_SYSTEM_INFO_PARAMS { + char szHostDriverVersionBuffer[NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE]; + char szHostVersionBuffer[NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE]; + char szHostTitleBuffer[NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE]; + char szPluginTitleBuffer[NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE]; + char szHostUnameBuffer[NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE]; + NvU32 iHostChangelistNumber; + NvU32 iPluginChangelistNumber; +} NV0000_CTRL_SYSTEM_GET_VGX_SYSTEM_INFO_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_GPUS_POWER_STATUS + * + * This command returns the power status of the GPUs in the system, successfully attached or not because of + * insufficient power. It is supported on Kepler and up only. + * gpuCount + * This field returns the count into the following arrays. + * busNumber + * This field returns the busNumber of a GPU. + * gpuExternalPowerStatus + * This field returns the corresponding external power status: + * NV0000_CTRL_SYSTEM_GPU_EXTERNAL_POWER_STATUS_CONNECTED + * NV0000_CTRL_SYSTEM_GPU_EXTERNAL_POWER_STATUS_NOT_CONNECTED + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0000_CTRL_CMD_SYSTEM_GET_GPUS_POWER_STATUS (0x134U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_GPUS_POWER_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_GPUS_POWER_STATUS_PARAMS_MESSAGE_ID (0x34U) + +typedef struct NV0000_CTRL_SYSTEM_GET_GPUS_POWER_STATUS_PARAMS { + NvU8 gpuCount; + NvU8 gpuBus[NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS]; + NvU8 gpuExternalPowerStatus[NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS]; +} NV0000_CTRL_SYSTEM_GET_GPUS_POWER_STATUS_PARAMS; + +/* Valid gpuExternalPowerStatus values */ +#define NV0000_CTRL_SYSTEM_GPU_EXTERNAL_POWER_STATUS_CONNECTED 0U +#define NV0000_CTRL_SYSTEM_GPU_EXTERNAL_POWER_STATUS_NOT_CONNECTED 1U + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_PRIVILEGED_STATUS + * + * This command returns the caller's API access privileges using + * this client handle. + * + * privStatus + * This parameter returns a mask of possible access privileges: + * NV0000_CTRL_SYSTEM_PRIVILEGED_STATUS_PRIV_USER_FLAG + * The caller is running with elevated privileges + * NV0000_CTRL_SYSTEM_PRIVILEGED_STATUS_ROOT_HANDLE_FLAG + * Client is of NV01_ROOT class. + * NV0000_CTRL_SYSTEM_PRIVILEGED_STATUS_PRIV_HANDLE_FLAG + * Client has PRIV bit set. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ + + +#define NV0000_CTRL_CMD_SYSTEM_GET_PRIVILEGED_STATUS (0x135U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS_MESSAGE_ID (0x35U) + +typedef struct NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS { + NvU8 privStatusFlags; +} NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS; + + +/* Valid privStatus values */ +#define NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PRIV_USER_FLAG (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_KERNEL_HANDLE_FLAG (0x00000002U) +#define NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PRIV_HANDLE_FLAG (0x00000004U) + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_FABRIC_STATUS + * + * The fabric manager (FM) notifies RM that fabric (system) is ready for peer to + * peer (P2P) use or still initializing the fabric. This command allows clients + * to query fabric status to allow P2P operations. + * + * Note, on systems where FM isn't used, RM just returns _SKIP. + * + * fabricStatus + * This parameter returns current fabric status: + * NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_SKIP + * NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_UNINITIALIZED + * NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_IN_PROGRESS + * NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_INITIALIZED + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_INVALID_PARAM_STRUCT + */ + +typedef enum NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS { + NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_SKIP = 1, + NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_UNINITIALIZED = 2, + NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_IN_PROGRESS = 3, + NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_INITIALIZED = 4, +} NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS; + +#define NV0000_CTRL_CMD_SYSTEM_GET_FABRIC_STATUS (0x136U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS_MESSAGE_ID (0x36U) + +typedef struct NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS { + NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS fabricStatus; +} NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS; + +/* + * NV0000_CTRL_VGPU_GET_VGPU_VERSION_INFO + * + * This command is used to query the range of VGX version supported. + * + * host_min_supported_version + * The minimum vGPU version supported by host driver + * host_max_supported_version + * The maximum vGPU version supported by host driver + * user_min_supported_version + * The minimum vGPU version set by user for vGPU support + * user_max_supported_version + * The maximum vGPU version set by user for vGPU support + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_REQUEST + */ +#define NV0000_CTRL_VGPU_GET_VGPU_VERSION (0x137U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_VGPU_GET_VGPU_VERSION_PARAMS_MESSAGE_ID" */ + +/* + * NV0000_CTRL_VGPU_GET_VGPU_VERSION + */ +#define NV0000_CTRL_VGPU_GET_VGPU_VERSION_PARAMS_MESSAGE_ID (0x37U) + +typedef struct NV0000_CTRL_VGPU_GET_VGPU_VERSION_PARAMS { + NvU32 host_min_supported_version; + NvU32 host_max_supported_version; + NvU32 user_min_supported_version; + NvU32 user_max_supported_version; +} NV0000_CTRL_VGPU_GET_VGPU_VERSION_PARAMS; + +/* + * NV0000_CTRL_VGPU_SET_VGPU_VERSION + * + * This command is used to query whether pGPU is live migration capable or not. + * + * min_version + * The minimum vGPU version to be supported being set + * max_version + * The maximum vGPU version to be supported being set + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_REQUEST + */ +#define NV0000_CTRL_VGPU_SET_VGPU_VERSION (0x138U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_VGPU_SET_VGPU_VERSION_PARAMS_MESSAGE_ID" */ + +/* + * NV0000_CTRL_VGPU_SET_VGPU_VERSION_PARAMS + */ +#define NV0000_CTRL_VGPU_SET_VGPU_VERSION_PARAMS_MESSAGE_ID (0x38U) + +typedef struct NV0000_CTRL_VGPU_SET_VGPU_VERSION_PARAMS { + NvU32 min_version; + NvU32 max_version; +} NV0000_CTRL_VGPU_SET_VGPU_VERSION_PARAMS; + +/* + * NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID + * + * This command is used to get a unique identifier for the instance of RM. + * The returned value will only change when the driver is reloaded. A previous + * value will never be reused on a given machine. + * + * rm_instance_id; + * The instance ID of the current RM instance + * + * Possible status values returned are: + * NV_OK + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_RM_INSTANCE_ID (0x139U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS_MESSAGE_ID" */ + +/* + * NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS + */ +#define NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS_MESSAGE_ID (0x39U) + +typedef struct NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS { + NV_DECLARE_ALIGNED(NvU64 rm_instance_id, 8); +} NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO + * + * This API is used to get the TPP(total processing power) and + * the rated TGP(total GPU power) from SBIOS. + * + * NVPCF is an acronym for Nvidia Platform Controllers and Framework + * which implements platform level policies. NVPCF is implemented in + * a kernel driver on windows. It is implemented in a user mode app + * called nvidia-powerd on Linux. + * + * Valid subFunc ids for NVPCF 1x include : + * NVPCF0100_CTRL_CONFIG_DSM_1X_FUNC_GET_SUPPORTED + * NVPCF0100_CTRL_CONFIG_DSM_1X_FUNC_GET_DYNAMIC_PARAMS + * + * Valid subFunc ids for NVPCF 2x include : + * NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_SUPPORTED + * NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_DYNAMIC_PARAMS + * NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_STATIC_CONFIG_TABLES + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_REQUEST + * NV_ERR_NOT_SUPPORTED + */ +#define NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO (0x13bU) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS_MESSAGE_ID" */ +#define NVPCF_CTRL_SYSPWRLIMIT_TYPE_BASE 1U +#define NV0000_CTRL_SYSTEM_POWER_INFO_INDEX_MAX_SIZE 32U + +#define NV0000_CTRL_CMD_SYSTEM_GET_SYSTEM_POWER_LIMIT_MESSAGE_ID (0x48U) + +typedef struct NV0000_CTRL_CMD_SYSTEM_GET_SYSTEM_POWER_LIMIT { + + /* Battery state of charge threshold (percent 0-100) */ + NvU8 batteryStateOfChargePercent; + + /* Long Timescale Battery current limit (milliamps) */ + NvU32 batteryCurrentLimitmA; + + /* Rest of system reserved power (milliwatts) */ + NvU32 restOfSytemReservedPowermW; + + /* Min CPU TDP (milliwatts) */ + NvU32 minCpuTdpmW; + + /* Max CPU TDP (milliwatts) */ + NvU32 maxCpuTdpmW; + + /* Short Timescale Battery current limit (milliamps) */ + NvU32 shortTimescaleBatteryCurrentLimitmA; +} NV0000_CTRL_CMD_SYSTEM_GET_SYSTEM_POWER_LIMIT; + +/*! + * States for the Battery CPU TDP Control ability. + * _CPU_TDP_CONTROL_TYPE_DC_ONLY :==> Legacy setting for DC only CPU TDP Control + * _CPU_TDP_CONTROL_TYPE_DC_AC :==> AC and DC both support CPU TDP Control + */ +typedef enum QBOOST_CPU_TDP_CONTROL_TYPE { + QBOOST_CPU_TDP_CONTROL_TYPE_DC_ONLY = 0, + QBOOST_CPU_TDP_CONTROL_TYPE_DC_AC = 1, +} QBOOST_CPU_TDP_CONTROL_TYPE; + +#define NV0000_CTRL_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS_MESSAGE_ID (0x3BU) + +typedef struct NV0000_CTRL_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS { + /*Buffer to get all the supported functions*/ + NvU32 supportedFuncs; + + /* GPU ID */ + NvU32 gpuId; + + /* Total processing power including CPU and GPU */ + NvU32 tpp; + + /* Rated total GPU Power */ + NvU32 ratedTgp; + + /* NVPCF subfunction id */ + NvU32 subFunc; + + /* Configurable TGP offset, in mW */ + NvS32 ctgpOffsetmW; + + /* TPP, as offset in mW */ + NvS32 targetTppOffsetmW; + + /* Maximum allowed output, as offset in mW */ + NvS32 maxOutputOffsetmW; + + /* Minimum allowed output, as offset in mW */ + NvS32 minOutputOffsetmW; + + /* Configurable TGP offset, on battery, in milli-Watts. */ + NvS32 ctgpBattOffsetmW; + + /* Target total processing power on battery, offset, in milli-Watts. */ + NvS32 targetTppBattOffsetmW; + + /* + * Maximum allowed output on battery, offset, in milli-Watts. + */ + NvS32 maxOutputBattOffsetmW; + + /* + * Minimum allowed output on battery, offset, in milli-Watts. + */ + NvS32 minOutputBattOffsetmW; + + /* + * If value specified is larger than the statically assigned ROS reserve in + * the system power limits table, this will take affect. + * + * A value of zero naturally works as a clear as it will be lesser than the + * statically assigned value. + */ + NvU32 dcRosReserveOverridemW; + + /* + * This is the active arbitrated long timescale limit provided by Qboost and + * honored by JPAC/JPPC + */ + NvU32 dcTspLongTimescaleLimitmA; + + /* + * This is the active arbitrated short timescale limit provided by Qboost and + * honored by RM/PMU + */ + NvU32 dcTspShortTimescaleLimitmA; + + /* Dynamic Boost AC support */ + NvBool bEnableForAC; + + /* Dynamic Boost DC support */ + NvBool bEnableForDC; + + /* The System Controller Table Version */ + NvU8 version; + + /* Base sampling period */ + NvU16 samplingPeriodmS; + + /* Sampling Multiplier */ + NvU16 samplingMulti; + + /* Fitler function type */ + NvU8 filterType; + + union { + + /* weight */ + NvU8 weight; + + /* windowSize */ + NvU8 windowSize; + } filterParam; + + /* Reserved */ + NvU16 filterReserved; + + /* Controller Type Dynamic Boost Controller */ + NvBool bIsBoostController; + + /* Increase power limit ratio */ + NvU16 incRatio; + + /* Decrease power limit ratio */ + NvU16 decRatio; + + /* Dynamic Boost Controller DC Support */ + NvBool bSupportBatt; + + /* CPU type(Intel/AMD) */ + NvU8 cpuType; + + /* GPU type(Nvidia) */ + NvU8 gpuType; + + /* System Power Table info index */ + NvU32 sysPwrIndex; + + /* System Power Table get table limits */ + NV0000_CTRL_CMD_SYSTEM_GET_SYSTEM_POWER_LIMIT sysPwrGetInfo[NV0000_CTRL_SYSTEM_POWER_INFO_INDEX_MAX_SIZE]; + + /* + * Does this version of the system power limits table support TSP -> table + * version 2.0 and later should set this to true + */ + NvBool bIsTspSupported; + + /* + * Stores the System Power Limits (Battery State of Charge aka BSOC) table version implemented by the SBIOS + * + */ + NvU8 sysPwrLimitsTableVersion; + + /* SYSPWRLIMIT class types */ + NvU32 type; + + /* CPU TDP Limit to be set (milliwatts) */ + NvU32 cpuTdpmw; + + /* CPU TDP Control Support */ + QBOOST_CPU_TDP_CONTROL_TYPE cpuTdpControlType; +} NV0000_CTRL_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS; + +/* Define the filter types */ +#define CONTROLLER_FILTER_TYPE_EMWA 0U +#define CONTROLLER_FILTER_TYPE_MOVING_MAX 1U + +/* Valid NVPCF subfunction case */ +#define NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_SUPPORTED_CASE 2U +#define NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_DYNAMIC_CASE 3U + +/* NVPCF subfunction to get the static data tables */ +#define NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_STATIC_CASE 4U + +/* NVPCF subfunction to get the system power limits table */ +#define NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_DC_SYSTEM_POWER_LIMITS_CASE 5U + +/* NVPCF subfunction to change the CPU's TDP limit */ +#define NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_CPU_TDP_LIMIT_CONTROL_CASE 6U + +/* Valid NVPCF subfunction ids */ +#define NVPCF0100_CTRL_CONFIG_DSM_1X_FUNC_GET_SUPPORTED (0x00000000) +#define NVPCF0100_CTRL_CONFIG_DSM_1X_FUNC_GET_DYNAMIC_PARAMS (0x00000002) + +/* + * Defines for get supported sub functions bit fields + */ +#define NVPCF0100_CTRL_CONFIG_DSM_FUNC_GET_SUPPORTED_IS_SUPPORTED 0:0 +#define NVPCF0100_CTRL_CONFIG_DSM_FUNC_GET_SUPPORTED_IS_SUPPORTED_YES 1 +#define NVPCF0100_CTRL_CONFIG_DSM_FUNC_GET_SUPPORTED_IS_SUPPORTED_NO 0 +#define NVPCF0100_CTRL_CONFIG_DSM_FUNC_GET_DC_SYSTEM_POWER_LIMITS_IS_SUPPORTED 8:8 +#define NVPCF0100_CTRL_CONFIG_DSM_FUNC_GET_DC_SYSTEM_POWER_LIMITS_IS_SUPPORTED_YES 1 +#define NVPCF0100_CTRL_CONFIG_DSM_FUNC_GET_DC_SYSTEM_POWER_LIMITS_IS_SUPPORTED_NO 0 +#define NVPCF0100_CTRL_CONFIG_DSM_FUNC_CPU_TDP_LIMIT_CONTROL_IS_SUPPORTED 9:9 +#define NVPCF0100_CTRL_CONFIG_DSM_FUNC_CPU_TDP_LIMIT_CONTROL_IS_SUPPORTED_YES 1 +#define NVPCF0100_CTRL_CONFIG_DSM_FUNC_CPU_TDP_LIMIT_CONTROL_IS_SUPPORTED_NO 0 + + +/*! + * Config DSM 2x version specific defines + */ +#define NVPCF0100_CTRL_CONFIG_DSM_2X_VERSION (0x00000200) +#define NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_SUPPORTED (0x00000000) +#define NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_STATIC_CONFIG_TABLES (0x00000001) +#define NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_DYNAMIC_PARAMS (0x00000002) +#define NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_DC_SYSTEM_POWER_LIMITS_TABLE (0x00000008) +#define NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_CPU_TDP_LIMIT_CONTROL (0x00000009) + +/*! + * Defines the max buffer size for config + */ +#define NVPCF0100_CTRL_CONFIG_2X_BUFF_SIZE_MAX (255) + +/* + * NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT + * + * This API is used to sync the external fabric management status with + * GSP-RM + * + * bExternalFabricMgmt + * Whether fabric is externally managed + * + * Possible status values returned are: + * NV_OK + */ +#define NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT (0x13cU) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS_MESSAGE_ID (0x3CU) + +typedef struct NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS { + NvBool bExternalFabricMgmt; +} NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS; + +/* + * NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO + * + * This API is used to get information about the RM client + * database. + * + * clientCount [OUT] + * This field indicates the number of clients currently allocated. + * + * resourceCount [OUT] + * This field indicates the number of resources currently allocated + * across all clients. + * + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_CLIENT_DATABASE_INFO (0x13dU) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS_MESSAGE_ID (0x3DU) + +typedef struct NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS { + NvU32 clientCount; + NV_DECLARE_ALIGNED(NvU64 resourceCount, 8); +} NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_BUILD_VERSION_V2 + * + * This command returns the current driver information in + * statically sized character arrays. + * + * driverVersionBuffer + * This field returns the version (NV_VERSION_STRING). + * versionBuffer + * This field returns the version (NV_BUILD_BRANCH_VERSION). + * driverBranch + * This field returns the branch (NV_BUILD_BRANCH). + * titleBuffer + * This field returns the title (NV_DISPLAY_DRIVER_TITLE). + * changelistNumber + * This field returns the changelist value (NV_BUILD_CHANGELIST_NUM). + * officialChangelistNumber + * This field returns the last official changelist value + * (NV_LAST_OFFICIAL_CHANGELIST_NUM). + * + * Possible status values returned are: + * NV_OK + */ + +#define NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_MAX_STRING_SIZE 256U +#define NV0000_CTRL_CMD_SYSTEM_GET_BUILD_VERSION_V2 (0x13eU) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS_MESSAGE_ID (0x3EU) + +typedef struct NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS { + char driverVersionBuffer[NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_MAX_STRING_SIZE]; + char versionBuffer[NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_MAX_STRING_SIZE]; + char driverBranch[NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_MAX_STRING_SIZE]; + char titleBuffer[NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_MAX_STRING_SIZE]; + NvU32 changelistNumber; + NvU32 officialChangelistNumber; +} NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_RMCTRL_CACHE_MODE_CTRL + * + * This API is used to get/set RMCTRL cache mode + * + * cmd [IN] + * GET - Gets RMCTRL cache mode + * SET - Sets RMCTRL cache mode + * + * mode [IN/OUT] + * On GET, this field is the output of current RMCTRL cache mode + * On SET, this field indicates the mode to set RMCTRL cache to + * Valid values for this parameter are: + * NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_MODE_DISABLE + * No get/set action to cache. + * NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_MODE_ENABLE + * Try to get from cache at the beginning of the control. + * Set cache after control finished if the control has not been cached. + * NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_MODE_VERIFY_ONLY + * Do not get from cache. Set cache when control call finished. + * When setting the cache, verify the value in the cache is the same + * with the current control value if the control is already cached. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0000_CTRL_CMD_SYSTEM_RMCTRL_CACHE_MODE_CTRL (0x13fU) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_PARAMS_MESSAGE_ID (0x3FU) + +typedef struct NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_PARAMS { + NvU32 cmd; + NvU32 mode; +} NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_PARAMS; + +#define NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_CMD_GET (0x00000000U) +#define NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_CMD_SET (0x00000001U) + +#define NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_MODE_DISABLE (0x00000000U) +#define NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_MODE_ENABLE (0x00000001U) +#define NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_MODE_VERIFY_ONLY (0x00000002U) + +/* + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CONTROL + * + * This command is used to control PFM_REQ_HNDLR functionality. It allows control of + * GPU Performance Scaling (PFM_REQ_HNDLR), changing its operational parameters and read + * most PFM_REQ_HNDLR dynamic parameters. + * + * command + * This parameter specifies the command to execute. Invalid commands + * result in the return of an NV_ERR_INVALID_ARGUMENT status. + * locale + * This parameter indicates the specific locale to which the command + * 'command' is to be applied. +* Supported range of CPU/GPU {i = 0, ..., 255} + * data + * This parameter contains a command-specific data payload. It can + * be used to input data as well as output data. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_COMMAND + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_DATA + * NV_ERR_INVALID_REQUEST + * NV_ERR_NOT_SUPPORTED + */ +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CONTROL (0x140U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_CONTROL_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_CONTROL_PARAMS_MESSAGE_ID (0x40U) + +typedef struct NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_CONTROL_PARAMS { + NvU16 command; + NvU16 locale; + NvU32 data; +} NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_CONTROL_PARAMS; + +/* + * Valid command values : + * + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_GET_INIT + * Is used to check if PFM_REQ_HNDLR was correctly initialized. + * Possible return (OUT) values are: + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_INIT_NO + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_INIT_YES + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_SET_EXEC + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_GET_EXEC + * Are used to stop/start PFM_REQ_HNDLR functionality and to get current status. + * Possible IN/OUT values are: + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_EXEC_STOP + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_EXEC_START + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_SET_ACTIONS + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_GET_ACTIONS + * Are used to control execution of PFM_REQ_HNDLR actions and to get current status. + * Possible IN/OUT values are: + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_ACTIONS_OFF + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_ACTIONS_ON + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_SET_LOGIC + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_GET_LOGIC + * Are used to switch current PFM_REQ_HNDLR logic and to retrieve current logic. + * Possible IN/OUT values are: + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_LOGIC_OFF + * Will cause that all PFM_REQ_HNDLR actions will be NULL. + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_LOGIC_FUZZY + * Fuzzy logic will determine PFM_REQ_HNDLR actions based on current ruleset. + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_LOGIC_DETERMINISTIC + * Deterministic logic will define PFM_REQ_HNDLR actions based on current ruleset. + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_SET_PREFERENCE + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_GET_PREFERENCE + * Are used to set/retrieve system control preference. + * Possible IN/OUT values are: + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_PREFERENCE_CPU + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_PREFERENCE_GPU + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_PREFERENCE_BOTH + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_SET_GPU2CPU_LIMIT + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_GET_GPU2CPU_LIMIT + * Are used to set/retrieve GPU2CPU pstate limits. + * IN/OUT values are four bytes packed into a 32-bit data field. + * The CPU cap index for GPU pstate 0 is in the lowest byte, the CPU cap + * index for the GPU pstate 3 is in the highest byte, etc. One + * special value is to disable the override to the GPU2CPU map: + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_SET_PMU_PFM_REQ_HNDLR_STATE + * Is used to stop/start PFM_REQ_HNDLR PMU functionality. + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_GET_PMU_PFM_REQ_HNDLR_STATE + * Is used to get the current status of PMU PFM_REQ_HNDLR. + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_NO_MAP_OVERRIDE + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_SET_MAX_POWER + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_GET_MAX_POWER + * Are used to set/retrieve max power [mW] that system can provide. + * This is hardcoded PFM_REQ_HNDLR safety feature and logic/rules does not apply + * to this threshold. + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_SET_COOLING_BUDGET + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_GET_COOLING_BUDGET + * Are used to set/retrieve current system cooling budget [mW]. + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_SET_INTEGRAL_PERIOD + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_GET_INTEGRAL_PERIOD + * Are used to set/retrieve integration interval [sec]. + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_SET_RULESET + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_GET_RULESET + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_GET_RULE_COUNT + * Are used to set/retrieve used ruleset [#]. Value is checked + * against MAX number of rules for currently used PFM_REQ_HNDLR logic. Also COUNT + * provides a way to find out how many rules exist for the current control + * system. + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_SET_APP_BOOST + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_GET_APP_BOOST + * Is used to set/get a delay relative to now during which to allow unbound + * CPU performance. Units are seconds. + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_SET_PWR_SUPPLY_MODE + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_GET_PWR_SUPPLY_MODE + * Is used to override/get the actual power supply mode (AC/Battery). + * Possible IN/OUT values are: + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_DEF_PWR_SUPPLY_REAL + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_DEF_PWR_SUPPLY_FAKE_AC + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_DEF_PWR_SUPPLY_FAKE_BATT + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_GET_VCT_SUPPORT_INFO + * Is used to get the Ventura system information for VCT tool + * Returned 32bit value should be treated as bitmask and decoded in + * following way: + * Encoding details are defined in objPFM_REQ_HNDLR.h refer to + * NV_PFM_REQ_HNDLR_SYS_SUPPORT_INFO and corresponding bit defines. + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_GET_SUPPORTED_FUNCTION + * Is used to get the supported sub-functions defined in SBIOS. Returned + * value is a bitmask where each bit corresponds to different function: + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_DEF_FUNC_SUPPORT + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_DEF_FUNC_VENTURASTATUS + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_DEF_FUNC_GETPSS + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_DEF_FUNC_SETPPC + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_DEF_FUNC_GETPPC + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_DEF_FUNC_VENTURACB + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_DEF_FUNC_SYSPARAMS + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_POWER + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_POWER_DELTA + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_POWER_FUTURE + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_POWER_LTMAVG + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_POWER_INTEGRAL + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_POWER_BURDEN + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_POWER_INTERMEDIATE + * Are used to retrieve appropriate power measurements and their derivatives + * in [mW] for required locale. _BURDEN is defined only for _LOCALE_SYSTEM. + * _INTERMEDIATE is not defined for _LOCALE_SYSTEM, and takes an In value as + * index. + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_SENSOR_PARAMETERS + * Is used to retrieve parameters when adjusting raw sensor power reading. + * The values may come from SBIOS, VBIOS, registry or driver default. + * Possible IN value is the index of interested parameter. + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_TEMP + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_TEMP_DELTA + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_TEMP_FUTURE + * Are used to retrieve appropriate temperature measurements and their + * derivatives in [1/1000 Celsius]. + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_PSTATE + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_PSTATE_CAP + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_PSTATE_MIN + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_PSTATE_MAX + * Are used to retrieve CPU(x)/GPU(x) p-state or it's limits. + * Not applicable to _LOCALE_SYSTEM. + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_PSTATE_ACTION + * Is used to retrieve last PFM_REQ_HNDLR action for given domain. + * Not applicable to _LOCALE_SYSTEM. + * Possible return (OUT) values are: + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_DEF_ACTION_DEC_TO_P0 + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_DEF_ACTION_DEC_BY_1 + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_DEF_ACTION_DO_NOTHING + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_DEF_ACTION_SET_CURRENT + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_DEF_ACTION_INC_BY_1 + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_DEF_ACTION_INC_BY_2 + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_DEF_ACTION_INC_TO_LFM + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_DEF_ACTION_INC_TO_SLFM + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_SET_POWER_SIM_STATE + * Is used to set the power sensor simulator state. + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_POWER_SIM_STATE + * Is used to get the power simulator sensor simulator state. + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_SET_POWER_SIM_DATA + * Is used to set power sensor simulator data + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_POWER_SIM_DATA + * Is used to get power sensor simulator data + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_INIT_USING_SBIOS_AND_ACK + * Is used to respond to the ACPI event triggered by SBIOS. RM will + * request value for budget and status, validate them, apply them + * and send ACK back to SBIOS. + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_PING_SBIOS_FOR_EVENT + * Is a test cmd that should notify SBIOS to send ACPI event requesting + * budget and status change. + */ +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_INVALID (0xFFFFU) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_GET_INIT (0x0000U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_SET_EXEC (0x0001U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_GET_EXEC (0x0002U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_SET_ACTIONS (0x0003U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_GET_ACTIONS (0x0004U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_SET_LOGIC (0x0005U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_GET_LOGIC (0x0006U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_SET_PREFERENCE (0x0007U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_GET_PREFERENCE (0x0008U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_SET_GPU2CPU_LIMIT (0x0009U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_GET_GPU2CPU_LIMIT (0x000AU) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_SET_PMU_PFM_REQ_HNDLR_STATE (0x000BU) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_GET_PMU_PFM_REQ_HNDLR_STATE (0x000CU) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_SET_MAX_POWER (0x0100U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_GET_MAX_POWER (0x0101U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_SET_COOLING_BUDGET (0x0102U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_GET_COOLING_BUDGET (0x0103U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_SET_INTEGRAL_PERIOD (0x0104U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_GET_INTEGRAL_PERIOD (0x0105U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_SET_RULESET (0x0106U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_GET_RULESET (0x0107U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_GET_RULE_COUNT (0x0108U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_SET_APP_BOOST (0x0109U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_GET_APP_BOOST (0x010AU) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_SET_PWR_SUPPLY_MODE (0x010BU) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_GET_PWR_SUPPLY_MODE (0x010CU) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_GET_VCT_SUPPORT_INFO (0x010DU) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_GET_SUPPORTED_FUNCTIONS (0x010EU) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_POWER (0x0200U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_POWER_DELTA (0x0201U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_POWER_FUTURE (0x0202U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_POWER_LTMAVG (0x0203U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_POWER_INTEGRAL (0x0204U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_POWER_BURDEN (0x0205U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_POWER_INTERMEDIATE (0x0206U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_SENSOR_PARAMETERS (0x0210U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_TEMP (0x0220U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_TEMP_DELTA (0x0221U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_TEMP_FUTURE (0x0222U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_PSTATE (0x0240U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_PSTATE_CAP (0x0241U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_PSTATE_MIN (0x0242U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_PSTATE_MAX (0x0243U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_PSTATE_ACTION (0x0244U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_PSTATE_SLFM_PRESENT (0x0245U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_SET_POWER_SIM_STATE (0x0250U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_POWER_SIM_STATE (0x0251U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_SET_POWER_SIM_DATA (0x0252U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_GET_POWER_SIM_DATA (0x0253U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_INIT_USING_SBIOS_AND_ACK (0x0320U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_PING_SBIOS_FOR_EVENT (0x0321U) + +/* valid LOCALE values */ +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_LOCALE_INVALID (0xFFFFU) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_LOCALE_SYSTEM (0x0000U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_LOCALE_CPU(i) (0x0100+((i)%0x100)) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_LOCALE_GPU(i) (0x0200+((i)%0x100)) + +/* valid data values for enums */ +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_INVALID (0x80000000U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_INIT_NO (0x00000000U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_INIT_YES (0x00000001U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_EXEC_STOP (0x00000000U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_EXEC_START (0x00000001U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_ACTIONS_OFF (0x00000000U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_ACTIONS_ON (0x00000001U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_LOGIC_OFF (0x00000000U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_LOGIC_FUZZY (0x00000001U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_LOGIC_DETERMINISTIC (0x00000002U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_PREFERENCE_CPU (0x00000000U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_PREFERENCE_GPU (0x00000001U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_PREFERENCE_BOTH (0x00000002U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_NO_MAP_OVERRIDE (0xFFFFFFFFU) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_PMU_PFM_REQ_HNDLR_STATE_OFF (0x00000000U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_PMU_PFM_REQ_HNDLR_STATE_ON (0x00000001U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_DEF_PWR_SUPPLY_REAL (0x00000000U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_DEF_PWR_SUPPLY_FAKE_AC (0x00000001U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_DEF_PWR_SUPPLY_FAKE_BATT (0x00000002U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_DEF_FUNC_SUPPORT (0x00000001U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_DEF_FUNC_VENTURASTATUS (0x00000002U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_DEF_FUNC_GETPSS (0x00000004U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_DEF_FUNC_SETPPC (0x00000008U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_DEF_FUNC_GETPPC (0x00000010U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_DEF_FUNC_VENTURACB (0x00000020U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SYS_DEF_FUNC_SYSPARAMS (0x00000040U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_DEF_ACTION_DEC_TO_P0 (0x00000000U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_DEF_ACTION_DEC_BY_1 (0x00000001U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_DEF_ACTION_DO_NOTHING (0x00000002U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_DEF_ACTION_SET_CURRENT (0x00000003U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_DEF_ACTION_INC_BY_1 (0x00000004U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_DEF_ACTION_INC_BY_2 (0x00000005U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_DEF_ACTION_INC_TO_LFM (0x00000006U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_DEF_ACTION_INC_TO_SLFM (0x00000007U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_DEF_SLFM_PRESENT_NO (0x00000000U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_DEF_SLFM_PRESENT_YES (0x00000001U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_DEF_POWER_SIM_STATE_OFF (0x00000000U) +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_DATA_DEF_POWER_SIM_STATE_ON (0x00000001U) + +/* + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_BATCH_CONTROL + * + * This command allows execution of multiple PFM_REQ_HNDLRControl commands within one + * RmControl call. For practical reasons # of commands is limited to 16. + * This command shares defines with NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CONTROL. + * + * cmdCount + * Number of commands that should be executed. + * Less or equal to NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_BATCH_COMMAND_MAX. + * + * succeeded + * Number of commands that were succesully executed. + * Less or equal to NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_BATCH_COMMAND_MAX. + * Failing commands return NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CMD_DEF_INVALID + * in their data field. + * + * cmdData + * Array of commands with following structure: + * command + * This parameter specifies the command to execute. + * Invalid commands result in the return of an + * NV_ERR_INVALID_ARGUMENT status. + * locale + * This parameter indicates the specific locale to which + * the command 'command' is to be applied. + * Supported range of CPU/GPU {i = 0, ..., 255} + * data + * This parameter contains a command-specific data payload. + * It is used both to input data as well as to output data. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_REQUEST + * NV_ERR_NOT_SUPPORTED + */ +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_BATCH_CONTROL (0x141U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_BATCH_CONTROL_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_BATCH_COMMAND_MAX (16U) +#define NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_BATCH_CONTROL_PARAMS_MESSAGE_ID (0x41U) + +typedef struct NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_BATCH_CONTROL_PARAMS { + NvU32 cmdCount; + NvU32 succeeded; + + struct { + NvU16 command; + NvU16 locale; + NvU32 data; + } cmdData[NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_BATCH_COMMAND_MAX]; +} NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_BATCH_CONTROL_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CTRL + * + * This command is used to execute general PFM_REQ_HNDLR Functions, most dealing with + * calling SBIOS, or retrieving cached sensor and PFM_REQ_HNDLR state data. + * + * version + * This parameter specifies the version of the interface. Legal values + * for this parameter are 1. + * cmd + * This parameter specifies the PFM_REQ_HNDLR API to be invoked. + * Valid values for this parameter are: + * NV0000_CTRL_PFM_REQ_HNDLR_CMD_GET_THERM_LIMIT + * This command gets the temperature limit for thermal controller. When + * this command is specified the input parameter contains ???. + * NV0000_CTRL_PFM_REQ_HNDLR_CMD_SET_THERM_LIMIT + * This command set the temperature limit for thermal controller. When + * this command is specified the input parameter contains ???. + * input + * This parameter specifies the cmd-specific input value. + * result + * This parameter returns the cmd-specific output value. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CTRL (0x142U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_CTRL_PARAMS_MESSAGE_ID (0x42U) + +typedef struct NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_CTRL_PARAMS { + NvU32 cmd; + NvS32 input[2]; + NvS32 result[4]; +} NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_CTRL_PARAMS; + +/* valid version values */ +#define NV0000_CTRL_PFM_REQ_HNDLR_PSHARE_PARAMS_PSP_CURRENT_VERSION (0x00010000U) + +/* valid cmd values */ +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_GET_THERM_LIMIT (0x00000002U) +#define NV0000_CTRL_PFM_REQ_HNDLR_INPUT_SENSOR_INDEX (0x00000000U) +#define NV0000_CTRL_PFM_REQ_HNDLR_RESULT_THERMAL_LIMIT (0x00000000U) +#define NV0000_CTRL_PFM_REQ_HNDLR_RESULT_MIN_LIMIT (0x00000001U) +#define NV0000_CTRL_PFM_REQ_HNDLR_RESULT_MAX_LIMIT (0x00000002U) +#define NV0000_CTRL_PFM_REQ_HNDLR_RESULT_LIMIT_SOURCE (0x00000003U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_SET_THERM_LIMIT (0x00000003U) +// NV0000_CTRL_PFM_REQ_HNDLR_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_PFM_REQ_HNDLR_INPUT_THERMAL_LIMIT (0x00000001U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_GET_TEMP_CTRL_DOWN_N_DELTA (0x00000004U) +// NV0000_CTRL_PFM_REQ_HNDLR_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_PFM_REQ_HNDLR_RESULT_TEMP_CTRL_DOWN_N_DELTA (0x00000000U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_SET_TEMP_CTRL_DOWN_N_DELTA (0x00000005U) +// NV0000_CTRL_PFM_REQ_HNDLR_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_PFM_REQ_HNDLR_INPUT_TEMP_CTRL_DOWN_N_DELTA (0x00000001U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_GET_TEMP_CTRL_HOLD_DELTA (0x00000006U) +// NV0000_CTRL_PFM_REQ_HNDLR_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_PFM_REQ_HNDLR_RESULT_TEMP_CTRL_HOLD_DELTA (0x00000000U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_SET_TEMP_CTRL_HOLD_DELTA (0x00000007U) +// NV0000_CTRL_PFM_REQ_HNDLR_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_PFM_REQ_HNDLR_INPUT_TEMP_CTRL_HOLD_DELTA (0x00000001U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_GET_TEMP_CTRL_UP_DELTA (0x00000008U) +// NV0000_CTRL_PFM_REQ_HNDLR_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_PFM_REQ_HNDLR_RESULT_TEMP_CTRL_UP_DELTA (0x00000000U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_SET_TEMP_CTRL_UP_DELTA (0x00000009U) +// NV0000_CTRL_PFM_REQ_HNDLR_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_PFM_REQ_HNDLR_INPUT_TEMP_CTRL_UP_DELTA (0x00000001U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_GET_TEMP_CTRL_ENGAGE_DELTA (0x0000000AU) +// NV0000_CTRL_PFM_REQ_HNDLR_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_PFM_REQ_HNDLR_RESULT_TEMP_CTRL_ENGAGE_DELTA (0x00000000U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_SET_TEMP_CTRL_ENGAGE_DELTA (0x0000000BU) +// NV0000_CTRL_PFM_REQ_HNDLR_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_PFM_REQ_HNDLR_INPUT_TEMP_CTRL_ENGAGE_DELTA (0x00000001U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_GET_TEMP_CTRL_DISENGAGE_DELTA (0x0000000CU) +// NV0000_CTRL_PFM_REQ_HNDLR_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_PFM_REQ_HNDLR_RESULT_TEMP_CTRL_DISENGAGE_DELTA (0x00000000U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_SET_TEMP_CTRL_DISENGAGE_DELTA (0x0000000DU) +// NV0000_CTRL_PFM_REQ_HNDLR_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_PFM_REQ_HNDLR_INPUT_TEMP_CTRL_DISENGAGE_DELTA (0x00000000U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_GET_TEMP_CTRL_STATUS (0x00000016U) +#define NV0000_CTRL_PFM_REQ_HNDLR_RESULT_TEMP_CTRL_STATUS (0x00000000U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_SET_TEMP_CTRL_STATUS (0x00000017U) +#define NV0000_CTRL_PFM_REQ_HNDLR_INPUT_TEMP_CTRL_STATUS (0x00000000U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_GET_CPU_GET_UTIL_AVG_NUM (0x00000018U) +#define NV0000_CTRL_PFM_REQ_HNDLR_RESULT_CPU_SET_UTIL_AVG_NUM (0x00000000U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_SET_CPU_SET_UTIL_AVG_NUM (0x00000019U) +#define NV0000_CTRL_PFM_REQ_HNDLR_INPUT_CPU_GET_UTIL_AVG_NUM (0x00000000U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_GET_PERF_SENSOR (0x0000001AU) +// NV0000_CTRL_PFM_REQ_HNDLR_INPUT_SENSOR_INDEX (0x00000000) +#define NV0000_CTRL_PFM_REQ_HNDLR_INPUT_NEXT_EXPECTED_POLL (0x00000001U) +#define NV0000_CTRL_PFM_REQ_HNDLR_RESULT_PERF_SENSOR_VALUE (0x00000000U) +#define NV0000_CTRL_PFM_REQ_HNDLR_RESULT_PERF_SENSOR_AVAILABLE (0x00000001U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_CALL_ACPI (0x0000001BU) +#define NV0000_CTRL_PFM_REQ_HNDLR_INPUT_ACPI_CMD (0x00000000U) +#define NV0000_CTRL_PFM_REQ_HNDLR_INPUT_ACPI_PARAM_IN (0x00000001U) +#define NV0000_CTRL_PFM_REQ_HNDLR_OUTPUT_ACPI_RESULT_1 (0x00000000U) +#define NV0000_CTRL_PFM_REQ_HNDLR_OUTPUT_ACPI_RESULT_2 (0x00000001U) +#define NV0000_CTRL_PFM_REQ_HNDLR_OUTPUT_ACPI_PSHAREPARAM_STATUS (0x00000000U) +#define NV0000_CTRL_PFM_REQ_HNDLR_OUTPUT_ACPI_PSHAREPARAM_VERSION (0x00000001U) +#define NV0000_CTRL_PFM_REQ_HNDLR_OUTPUT_ACPI_PSHAREPARAM_SZ (0x00000002U) +#define NV0000_CTRL_PFM_REQ_HNDLR_OUTPUT_ACPI_PSS_SZ (0x00000000U) +#define NV0000_CTRL_PFM_REQ_HNDLR_OUTPUT_ACPI_PSS_COUNT (0x00000001U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_SET_IGPU_TURBO (0x0000001CU) +#define NV0000_CTRL_PFM_REQ_HNDLR_INPUT_SET_IGPU_TURBO (0x00000000U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_SET_TEMP_PERIOD (0x00000026U) +#define NV0000_CTRL_PFM_REQ_HNDLR_INPUT_TEMP_PERIOD (0x00000000U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_GET_TEMP_PERIOD (0x00000027U) +#define NV0000_CTRL_PFM_REQ_HNDLR_RESULT_TEMP_PERIOD (0x00000000U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_SET_TEMP_NUDGE_FACTOR (0x00000028U) +#define NV0000_CTRL_PFM_REQ_HNDLR_INPUT_TEMP_NUDGE_UP (0x00000000U) +#define NV0000_CTRL_PFM_REQ_HNDLR_INPUT_TEMP_NUDGE_DOWN (0x00000001U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_GET_TEMP_NUDGE_FACTOR (0x00000029U) +#define NV0000_CTRL_PFM_REQ_HNDLR_RESULT_TEMP_NUDGE_UP (0x00000000U) +#define NV0000_CTRL_PFM_REQ_HNDLR_RESULT_TEMP_NUDGE_DOWN (0x00000001U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_SET_TEMP_THRESHOLD_SAMPLES (0x0000002AU) +#define NV0000_CTRL_PFM_REQ_HNDLR_INPUT_TEMP_THRESHOLD_SAMPLE_HOLD (0x00000000U) +#define NV0000_CTRL_PFM_REQ_HNDLR_INPUT_TEMP_THRESHOLD_SAMPLE_STEP (0x00000001U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_GET_TEMP_THRESHOLD_SAMPLES (0x0000002BU) +#define NV0000_CTRL_PFM_REQ_HNDLR_RESULT_TEMP_THRESHOLD_SAMPLE_HOLD (0x00000000U) +#define NV0000_CTRL_PFM_REQ_HNDLR_RESULT_TEMP_THRESHOLD_SAMPLE_STEP (0x00000001U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_SET_TEMP_PERF_LIMITS (0x0000002CU) +#define NV0000_CTRL_PFM_REQ_HNDLR_INPUT_TEMP_PERF_LIMIT_UPPER (0x00000000U) +#define NV0000_CTRL_PFM_REQ_HNDLR_INPUT_TEMP_PERF_LIMIT_LOWER (0x00000001U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_GET_TEMP_PERF_LIMITS (0x0000002DU) +#define NV0000_CTRL_PFM_REQ_HNDLR_RESULT_TEMP_PERF_LIMIT_UPPER (0x00000000U) +#define NV0000_CTRL_PFM_REQ_HNDLR_RESULT_TEMP_PERF_LIMIT_LOWER (0x00000001U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_SET_PM1_AVAILABLE (0x0000002EU) +#define NV0000_CTRL_PFM_REQ_HNDLR_INPUT_PM1_AVAILABLE (0x00000000U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_GET_PM1_AVAILABLE (0x0000002FU) +#define NV0000_CTRL_PFM_REQ_HNDLR_OUTPUT_PM1_AVAILABLE (0x00000000U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_GET_CPU_PACKAGE_LIMITS (0x00000044U) +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_GET_CPU_PACKAGE_LIMITS_PL1 (0x00000000U) +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_GET_CPU_PACKAGE_LIMITS_PL2 (0x00000001U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_SET_CPU_PACKAGE_LIMITS (0x00000045U) +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_SET_CPU_PACKAGE_LIMITS_PL1 (0x00000000U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_GET_CPU_FREQ_LIMIT (0x00000046U) +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_GET_CPU_FREQ_LIMIT_MHZ (0000000000U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_SET_CPU_FREQ_LIMIT (0x00000047U) +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_SET_CPU_FREQ_LIMIT_MHZ (0000000000U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_GET_PPM (0x00000048U) +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_GET_PPM_INDEX (0000000000U) +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_GET_PPM_AVAILABLE_MASK (0000000001U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_SET_PPM (0x00000049U) +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_SET_PPM_INDEX (0000000000U) +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_TYPE_SET_PPM_INDEX_MAX (2U) + +#define NV0000_CTRL_PFM_REQ_HNDLR_PPM_INDEX 7:0 +#define NV0000_CTRL_PFM_REQ_HNDLR_PPM_INDEX_MAXPERF (0U) +#define NV0000_CTRL_PFM_REQ_HNDLR_PPM_INDEX_BALANCED (1U) +#define NV0000_CTRL_PFM_REQ_HNDLR_PPM_INDEX_QUIET (2U) +#define NV0000_CTRL_PFM_REQ_HNDLR_PPM_INDEX_INVALID (0xFFU) +#define NV0000_CTRL_PFM_REQ_HNDLR_PPM_MASK 15:8 +#define NV0000_CTRL_PFM_REQ_HNDLR_PPM_MASK_INVALID (0U) + +/* valid PS_STATUS result values */ +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_PS_STATUS_OFF (0U) +#define NV0000_CTRL_PFM_REQ_HNDLR_CMD_PS_STATUS_ON (1U) + +#define PFM_REQ_HNDLR_MAX_COUNTERS_PER_BLOCK 32U +typedef struct NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_GET_PERF_SENSOR_COUNTERS_PARAMS { + NvU32 objHndl; + NvU32 blockId; + NvU32 nextExpectedSampleTimems; + NvU32 countersReq; + NvU32 countersReturned; + NvU32 counterBlock[PFM_REQ_HNDLR_MAX_COUNTERS_PER_BLOCK]; +} NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_GET_PERF_SENSOR_COUNTERS_PARAMS; + +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_GET_PERF_SENSORS (0x146U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_GET_PERF_SENSORS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_GET_PERF_SENSORS_PARAMS_MESSAGE_ID (0x46U) + +typedef NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_GET_PERF_SENSOR_COUNTERS_PARAMS NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_GET_PERF_SENSORS_PARAMS; + +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_GET_EXTENDED_PERF_SENSORS (0x147U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_GET_EXTENDED_PERF_SENSORS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_GET_EXTENDED_PERF_SENSORS_PARAMS_MESSAGE_ID (0x47U) + +typedef NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_GET_PERF_SENSOR_COUNTERS_PARAMS NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_GET_EXTENDED_PERF_SENSORS_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CALL_ACPI + * + * This command allows users to call PFM_REQ_HNDLR ACPI commands for testing purposes. + * + * cmd + * This parameter specifies the PFM_REQ_HNDLR ACPI command to execute. + * + * input + * This parameter specified the cmd-dependent input value. + * + * resultSz + * This parameter returns the size (in bytes) of the valid data + * returned in the result parameter. + * + * result + * This parameter returns the results of the specified cmd. + * The maximum size (in bytes) of this returned data will + * not exceed PFM_REQ_HNDLR_MAX_ACPI_OUTPUT_BUFFER_SIZE + * + * PFM_REQ_HNDLR_MAX_ACPI_OUTPUT_BUFFER_SIZE + * The size of buffer (result) in unit of NvU32. + * The smallest value is sizeof(PSS_ENTRY)*ACPI_PSS_ENTRY_MAX. + * Since the prior one is 24 bytes, and the later one is 48, + * this value cannot be smaller than 288. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT, + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INSUFFICIENT_PERMISSIONS + * + */ +#define PFM_REQ_HNDLR_MAX_ACPI_OUTPUT_BUFFER_SIZE 288U +#define NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_CALL_ACPI_PARAMS_MESSAGE_ID (0x43U) + +typedef struct NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_CALL_ACPI_PARAMS { + NvU32 cmd; + NvU32 input; + NvU32 resultSz; + NvU32 result[PFM_REQ_HNDLR_MAX_ACPI_OUTPUT_BUFFER_SIZE]; +} NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_CALL_ACPI_PARAMS; + +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CALL_ACPI (0x143U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_CALL_ACPI_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_RMTRACE_MODULE_PFM_REQ_HNDLR (0x00008000U) + +/* + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_GET_FRM_DATA + * + * This command is used to read FRL data based on need. + * + * nextSampleNumber + * This parameter returns the counter of next sample which is being filled. + * samples + * This parameter returns the frame time, render time, target time, client ID + * with one reserve bit for future use. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_GET_FRM_DATA (0x144U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_GET_FRM_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_FRM_DATA_SAMPLE_SIZE 64U + +typedef struct NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_FRM_DATA_SAMPLE { + NvU16 frameTime; + NvU16 renderTime; + NvU16 targetTime; + NvU8 sleepTime; + NvU8 sampleNumber; +} NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_FRM_DATA_SAMPLE; + +#define NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_GET_FRM_DATA_PARAMS_MESSAGE_ID (0x44U) + +typedef struct NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_GET_FRM_DATA_PARAMS { + NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_FRM_DATA_SAMPLE samples[NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_FRM_DATA_SAMPLE_SIZE]; + NvU8 nextSampleNumber; +} NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_GET_FRM_DATA_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SET_FRM_DATA + * + * This command is used to write FRM data based on need. + * + * frameTime + * This parameter contains the frame time of current frame. + * renderTime + * This parameter contains the render time of current frame. + * targetTime + * This parameter contains the target time of current frame. + * sleepTime + * This parameter contains the sleep duration inserted by FRL for the latest frame. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_SET_FRM_DATA (0x145U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_SET_FRM_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_SET_FRM_DATA_PARAMS_MESSAGE_ID (0x45U) + +typedef struct NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_SET_FRM_DATA_PARAMS { + NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_FRM_DATA_SAMPLE sampleData; +} NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_SET_FRM_DATA_PARAMS; + +/* _ctrl0000system_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h new file mode 100644 index 0000000..9ddc130 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h @@ -0,0 +1,439 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000unix.finn +// + +#include "ctrl/ctrl0000/ctrl0000base.h" + +#include "ctrl/ctrlxxxx.h" +/* NV01_ROOT (client) Linux control commands and parameters */ + +/* + * NV0000_CTRL_CMD_OS_UNIX_FLUSH_USER_CACHE + * + * This command may be used to force a cache flush for a range of virtual addresses in + * memory. Can be used for either user or kernel addresses. + * + * offset, length + * These parameters specify the offset within the memory block + * and the number of bytes to flush/invalidate + * cacheOps + * This parameter flags whether to flush, invalidate or do both. + * Possible values are: + * NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_FLUSH + * NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_INVALIDATE + * NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_FLUSH_INVALIDATE + * hDevice + * This parameter is the handle to the device + * hObject + * This parameter is the handle to the memory structure being operated on. + * internalOnly + * Intended for internal use unless client is running in MODS UNIX environment, + * in which case this parameter specify the virtual address of the memory block + * to flush. + * + * Possible status values are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_COMMAND + * NV_ERR_INVALID_LIMIT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0000_CTRL_CMD_OS_UNIX_FLUSH_USER_CACHE (0x3d02) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 offset, 8); + NV_DECLARE_ALIGNED(NvU64 length, 8); + NvU32 cacheOps; + NvHandle hDevice; + NvHandle hObject; + NV_DECLARE_ALIGNED(NvU64 internalOnly, 8); +} NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS; + +#define NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_FLUSH (0x00000001) +#define NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_INVALIDATE (0x00000002) +#define NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_FLUSH_INVALIDATE (0x00000003) + + +/* + * NV0000_CTRL_CMD_OS_UNIX_GET_CONTROL_FILE_DESCRIPTOR + * + * This command is used to get the control file descriptor. + * + * Possible status values returned are: + * NV_OK + * + */ +#define NV0000_CTRL_CMD_OS_UNIX_GET_CONTROL_FILE_DESCRIPTOR (0x3d04) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | 0x4" */ + +typedef struct NV0000_CTRL_OS_UNIX_GET_CONTROL_FILE_DESCRIPTOR_PARAMS { + NvS32 fd; +} NV0000_CTRL_OS_UNIX_GET_CONTROL_FILE_DESCRIPTOR_PARAMS; + +typedef enum NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE { + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_NONE = 0, + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM = 1, +} NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE; + +typedef struct NV0000_CTRL_OS_UNIX_EXPORT_OBJECT { + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE type; + + union { + struct { + NvHandle hDevice; + NvHandle hParent; + NvHandle hObject; + } rmObject; + } data; +} NV0000_CTRL_OS_UNIX_EXPORT_OBJECT; + +/* + * NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECT_TO_FD + * + * This command may be used to export NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE + * object to file descriptor. + * + * Note that the 'fd' parameter is an input parameter at the kernel level, but + * an output parameter for usermode RMAPI clients -- the RMAPI library will + * open a new FD automatically if a usermode RMAPI client exports an object. + * + * Kernel-mode RM clients can export an object to an FD in two steps: + * 1. User client calls this RMControl with the flag 'EMPTY_FD_TRUE' to create + * an empty FD to receive the object, then passes that FD to the kernel-mode + * RM client. + * 2. Kernel-mode RM client fills in the rest of the + * NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS as usual and calls RM to + * associate its desired RM object with the empty FD from its usermode + * client. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_PARAMETER + */ +#define NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECT_TO_FD (0x3d05) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS { + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT object; /* IN */ + NvS32 fd; /* IN/OUT */ + NvU32 flags; /* IN */ +} NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS; + +/* + * If EMPTY_FD is TRUE, the 'fd' will be created but no object will be + * associated with it. The hDevice parameter is still required, to determine + * the correct device node on which to create the file descriptor. + * (An empty FD can then be passed to a kernel-mode driver to associate it with + * an actual object.) + */ +#define NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_FLAGS_EMPTY_FD 0:0 +#define NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_FLAGS_EMPTY_FD_FALSE (0x00000000) +#define NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_FLAGS_EMPTY_FD_TRUE (0x00000001) + +/* + * NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD + * + * This command may be used to import back + * NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE object from file descriptor. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_PARAMETER + */ +#define NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD (0x3d06) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS { + NvS32 fd; /* IN */ + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT object; /* IN */ +} NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS; + +/* + * NV0000_CTRL_CMD_OS_GET_GPU_INFO + * + * This command will query the OS specific info for the specified GPU. + * + * gpuId + * This parameter should specify a valid GPU ID value. If there + * is no GPU present with the specified ID, a status of + * NV_ERR_INVALID_ARGUMENT is returned. + * minorNum + * This parameter returns minor number of device node. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_CTRL_CMD_OS_GET_GPU_INFO (0x3d07) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | 0x7" */ + +typedef struct NV0000_CTRL_OS_GET_GPU_INFO_PARAMS { + NvU32 gpuId; /* IN */ + NvU32 minorNum; /* OUT */ +} NV0000_CTRL_OS_GET_GPU_INFO_PARAMS; + +/* + * NV0000_CTRL_CMD_OS_UNIX_GET_EXPORT_OBJECT_INFO + * + * This command will query the deviceInstance for the specified FD + * which is referencing an exported object. + * + * fd + * File descriptor parameter is referencing an exported object on a Unix system. + * + * deviceInstatnce + * This parameter returns a deviceInstance on which the object is located. + * + * NV_MAX_DEVICES is returned if the object is parented by a client instead + * of a device. + * + * gpuInstanceId + * For objects parented by device this parameter returns MIG GPU instance + * id the device is subscribed to or NV_U32_MAX if no subscription was made. + * + * maxObjects + * This parameter returns the maximum number of object handles that may be + * contained in the file descriptor. + * + * metadata + * This parameter returns the user metadata passed into the + * _EXPORT_OBJECTS_TO_FD control call. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OBJECT_NOT_FOUND + */ + +#define NV0000_CTRL_CMD_OS_UNIX_GET_EXPORT_OBJECT_INFO (0x3d08) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_OS_UNIX_EXPORT_OBJECT_FD_BUFFER_SIZE 64 + +#define NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS { + NvS32 fd; /* IN */ + NvU32 deviceInstance; /* OUT */ + NvU32 gpuInstanceId; /* OUT */ + NvU16 maxObjects; /* OUT */ + NvU8 metadata[NV0000_OS_UNIX_EXPORT_OBJECT_FD_BUFFER_SIZE]; /* OUT */ +} NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS; + +/* + * NV0000_CTRL_CMD_OS_UNIX_REFRESH_RMAPI_DEVICE_LIST + * + * This command will re-fetch probed GPUs information and update RMAPI library's + * internal detected GPU context information accordingly. Without this, GPUs + * attached to RM after RMAPI client initialization will not be accessible and + * all RMAPI library calls will fail on them. + * Currently this is used by NVSwitch Fabric Manager in conjunction with NVSwitch + * Shared Virtualization feature where GPUs are hot-plugged to OS/RM (by Hypervisor) + * and Fabric Manager is signaled externally by the Hypervisor to initialize those GPUs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_OPERATING_SYSTEM + */ + +#define NV0000_CTRL_CMD_OS_UNIX_REFRESH_RMAPI_DEVICE_LIST (0x3d09) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | 0x9" */ + +/* + * This control call has been deprecated. It will be deleted soon. + * Use NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECT_TO_FD (singular) or + * NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECTS_TO_FD (plural) instead. + */ +#define NV0000_CTRL_CMD_OS_UNIX_CREATE_EXPORT_OBJECT_FD (0x3d0a) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_BUFFER_SIZE NV0000_OS_UNIX_EXPORT_OBJECT_FD_BUFFER_SIZE + +#define NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS { + NvHandle hDevice; /* IN */ + NvU16 maxObjects; /* IN */ + NvU8 metadata[NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_BUFFER_SIZE]; /* IN */ + NvS32 fd; /* IN/OUT */ +} NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS; + +/* + * NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECTS_TO_FD + * + * Exports RM handles to an fd that was provided, also creates an FD if + * requested. + * + * The objects in the 'handles' array are exported into the fd + * as the range [index, index + numObjects). + * + * If index + numObjects is greater than the maxObjects value used + * to create the file descriptor, NV_ERR_INVALID_PARAMETER is returned. + * + * If 'numObjects and 'index' overlap with a prior call, the newer call's RM object + * handles will overwrite the previously exported handles from the previous call. + * This overlapping behavior can also be used to unexport a handle by setting + * the appropriate object in 'objects' to 0. + * + * fd + * A file descriptor. If -1, a new FD will be created. + * + * hDevice + * The owning device of the objects to be exported (must be the same for + * all objects). + * + * maxObjects + * The total number of objects that the client wishes to export to the FD. + * This parameter will be honored only when the FD is getting created. + * + * metadata + * A buffer for clients to write some metadata to and pass to the importing + * client. This parameter will be honored only when the FD is getting + * created. + * + * objects + * Array of RM object handles to export to the fd. + * + * numObjects + * The number of handles the user wishes to export in this call. + * + * index + * The index into the export fd at which to start exporting the handles in + * 'objects' (for use in iterative calls). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OUT_OF_RANGE + * NV_ERR_NOT_SUPPORTED + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_OBJECT_HANDLE + */ +#define NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECTS_TO_FD (0x3d0b) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_MAX_OBJECTS 512 + +#define NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS { + NvS32 fd; /* IN/OUT */ + NvHandle hDevice; /* IN */ + NvU16 maxObjects; /* IN */ + NvU8 metadata[NV0000_OS_UNIX_EXPORT_OBJECT_FD_BUFFER_SIZE]; /* IN */ + NvHandle objects[NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_MAX_OBJECTS]; /* IN */ + NvU16 numObjects; /* IN */ + NvU16 index; /* IN */ +} NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS; + +/* + * NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECTS_FROM_FD + * + * This command can be used to import back RM handles + * that were exported to an fd using the + * NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECTS_TO_FD control call. + * + * If index + numObjects is greater than the maxObjects value used + * to create the file descriptor, NV_ERR_INVALID_PARAMETER is returned + * and no objects are imported. + * + * For each valid handle in the 'objects' array parameter at index 'i', + * the corresponding object handle at index ('i' + 'index') contained by + * the fd will be imported. If the object at index ('i' + 'index') has + * not been exported into the fd, no object will be imported. + * + * If any of handles contained in the 'objects' array parameter are invalid + * and the corresponding export object handle is valid, + * NV_ERR_INVALID_PARAMETER will be returned and no handles will be imported. + * + * fd + * The export fd on which to import handles out of. + * + * hParent + * The parent RM handle of which all of the exported objects will + * be duped under. + * + * objects + * An array of RM handles. The exported objects will be duped under + * these handles during the import process. + * + * objectTypes + * An array of RM handle types. The type _NONE will be returned if + * the object was not imported. Other possible object types are + * mentioned below. + * + * numObjects + * The number of valid object handles in the 'objects' array. This should + * be set to the number of objects that the client wishes to import. + * + * index + * The index into the fd in which to start importing from. For + * use in iterative calls. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OUT_OF_RANGE + * NV_ERR_NOT_SUPPORTED + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_PARAMETER + */ +#define NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECTS_FROM_FD (0x3d0c) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS_MESSAGE_ID" */ + +// +// TODO Bump this back up to 512 after the FLA revamp is complete +// +#define NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_TO_FD_MAX_OBJECTS 128 + +#define NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_NONE 0 +#define NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_VIDMEM 1 +#define NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_SYSMEM 2 +#define NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_FABRIC 3 +#define NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_FABRIC_MC 4 + +#define NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS { + NvS32 fd; /* IN */ + NvHandle hParent; /* IN */ + NvHandle objects[NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_TO_FD_MAX_OBJECTS]; /* IN */ + NvU8 objectTypes[NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_TO_FD_MAX_OBJECTS]; /* OUT */ + NvU16 numObjects; /* IN */ + NvU16 index; /* IN */ +} NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS; + +/* _ctrl0000unix_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h new file mode 100644 index 0000000..412559c --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h @@ -0,0 +1,224 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000vgpu.finn +// + +#include "ctrl/ctrl0000/ctrl0000base.h" + +#include "ctrl/ctrlxxxx.h" +#include "ctrl/ctrla081.h" +#include "class/cl0000.h" +#include "nv_vgpu_types.h" + +/* DRF macros for OBJGPU::gpuId */ +#define NV0000_BUSDEVICE_DOMAIN 31:16 +#define NV0000_BUSDEVICE_BUS 15:8 +#define NV0000_BUSDEVICE_DEVICE 7:0 + +#define GPU_32_BIT_ID_DECODE_DOMAIN(gpuId) (NvU16)DRF_VAL(0000, _BUSDEVICE, _DOMAIN, gpuId); +#define GPU_32_BIT_ID_DECODE_BUS(gpuId) (NvU8) DRF_VAL(0000, _BUSDEVICE, _BUS, gpuId); +#define GPU_32_BIT_ID_DECODE_DEVICE(gpuId) (NvU8) DRF_VAL(0000, _BUSDEVICE, _DEVICE, gpuId); + +/* + * NV0000_CTRL_CMD_VGPU_CREATE_DEVICE + * + * This command informs RM to create a vGPU device on KVM. + * + * vgpuName [IN] + * This parameter provides the MDEV UUID or VF BDF depending on whether MDEV + * or vfio-pci-core framework is used. + * + * gpuPciId [IN] + * This parameter provides gpuId of GPU on which vgpu device is created. + * + * gpuPciBdf + * This parameter specifies the BDF of the VF. (Same as PF for non-sriov) + * + * vgpuTypeId [IN] + * This parameter specifies the vGPU type ID for the device to be created. + * + * vgpuId [OUT] + * This parameter returns the vgpu id allocated by RM for the device + * + * gpuInstanceId [OUT] + * This parameter returns the swizzId allocated by RM for the device. + * + * placementId [OUT] + * This parameter returns the placementId allocated by RM for the device. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_EVENT + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_CLIENT + * + */ + +#define NV0000_CTRL_CMD_VGPU_CREATE_DEVICE (0xc02) /* finn: Evaluated from "(FINN_NV01_ROOT_VGPU_INTERFACE_ID << 8) | NV0000_CTRL_VGPU_CREATE_DEVICE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_VGPU_CREATE_DEVICE_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_VGPU_CREATE_DEVICE_PARAMS { + NvU8 vgpuName[VM_UUID_SIZE]; + NvU32 gpuPciId; + NvU32 gpuPciBdf; + NvU32 vgpuTypeId; + NvU16 vgpuId; + NvU32 gpuInstanceId; + NvU32 placementId; +} NV0000_CTRL_VGPU_CREATE_DEVICE_PARAMS; + +/* + * NV0000_CTRL_CMD_VGPU_GET_INSTANCES + * + * This command queries RM for available instances for a particular vGPU type ID + * on KVM. + * + * gpuPciId [IN] + * This parameter specifies gpuId of GPU on which vGPU instances are being + * queried. + * + * gpuPciBdf [IN] + * This parameter specifies the BDF of the VF. (Same as PF for non-sriov) + * + * numVgpuTypes [IN] + * This parameter specifies the count of vgpuTypeIds supplied and the + * count of availableInstances values to be returned. + * + * vgpuTypeIds [IN] + * This parameter specifies a total of numVgpuTypes vGPU type IDs for which + * the available instances are to be queried. + * + * availableInstances [OUT] + * This parameter returns a total of numVgpuTypes available instances for + * the respective vGPU type IDs supplied in vgpuTypeIds input parameter. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_EVENT + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_CLIENT + * NV_ERR_INVALID_STATE + * + */ + +#define NV0000_CTRL_CMD_VGPU_GET_INSTANCES (0xc03) /* finn: Evaluated from "(FINN_NV01_ROOT_VGPU_INTERFACE_ID << 8) | NV0000_CTRL_VGPU_GET_INSTANCES_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_VGPU_GET_INSTANCES_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0000_CTRL_VGPU_GET_INSTANCES_PARAMS { + NvU32 gpuPciId; + NvU32 gpuPciBdf; + NvU32 numVgpuTypes; + NvU32 vgpuTypeIds[NVA081_MAX_VGPU_TYPES_PER_PGPU]; + NvU32 availableInstances[NVA081_MAX_VGPU_TYPES_PER_PGPU]; +} NV0000_CTRL_VGPU_GET_INSTANCES_PARAMS; + +/* + * NV0000_CTRL_CMD_VGPU_DELETE_DEVICE + * + * This command informs RM to delete a vGPU device on KVM. + * + * vgpuName [IN] + * This parameter provides the MDEV UUID or VF BDF depending on whether MDEV + * or vfio-pci-core framework is used. + * + * vgpuId [IN] + * This parameter provides the vgpu id allocated by RM for the device to be + * deleted. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_EVENT + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_CLIENT + * + */ + +#define NV0000_CTRL_CMD_VGPU_DELETE_DEVICE (0xc04) /* finn: Evaluated from "(FINN_NV01_ROOT_VGPU_INTERFACE_ID << 8) | NV0000_CTRL_VGPU_DELETE_DEVICE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_VGPU_DELETE_DEVICE_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0000_CTRL_VGPU_DELETE_DEVICE_PARAMS { + NvU8 vgpuName[VM_UUID_SIZE]; + NvU16 vgpuId; +} NV0000_CTRL_VGPU_DELETE_DEVICE_PARAMS; + +/* + * NV0000_CTRL_CMD_VGPU_VFIO_NOTIFY_RM_STATUS + * + * This command informs RM the status of vgpu-vfio GPU operations such as probe and unregister. + * + * returnStatus [IN] + * This parameter provides the status of vgpu-vfio GPU operation. + * + * gpuPciId [IN] + * This parameter provides the gpu id of the GPU + */ + +#define NV0000_CTRL_CMD_VGPU_VFIO_NOTIFY_RM_STATUS (0xc05) /* finn: Evaluated from "(FINN_NV01_ROOT_VGPU_INTERFACE_ID << 8) | NV0000_CTRL_VGPU_VFIO_NOTIFY_RM_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_VGPU_VFIO_NOTIFY_RM_STATUS_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0000_CTRL_VGPU_VFIO_NOTIFY_RM_STATUS_PARAMS { + NvU32 returnStatus; + NvU32 gpuId; +} NV0000_CTRL_VGPU_VFIO_NOTIFY_RM_STATUS_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_UPDATE_SYSFS_NODE + * + * This command will get/set the information of following sysfs node: + * gpuInstanceId + * placementId + * + * vgpuName [IN] + * This parameter provides the MDEV UUID or VF BDF depending on whether MDEV + * or vfio-pci-core framework is used. + * + * mode [IN] + * This parameter provides info about the type of operation this cmd will perform. + * + * sysfs_val [IN/OUT] + * This parameter will store the info of placementID/gpuInstanceId. + * + */ + +#define NV0000_CTRL_CMD_GPU_UPDATE_SYSFS_NODE (0x206U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_UPDATE_SYSFS_NODE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_UPDATE_SYSFS_NODE_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV0000_CTRL_GPU_UPDATE_SYSFS_NODE_PARAMS { + NvU8 vgpuName[VM_UUID_SIZE]; + NvU32 mode; + NvU32 sysfs_val; +} NV0000_CTRL_GPU_UPDATE_SYSFS_NODE_PARAMS; + +/* _ctrl0000vgpu_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0002.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0002.h new file mode 100644 index 0000000..44f5279 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0002.h @@ -0,0 +1,178 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0002.finn +// + +#include "ctrl/ctrlxxxx.h" +#define NV0002_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x0002, NV0002_CTRL_##cat, idx) + +/* Client command categories (6bits) */ +#define NV0002_CTRL_RESERVED (0x00) +#define NV0002_CTRL_DMA (0x01) + + +/* + * NV0002_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ + +#define NV0002_CTRL_CMD_NULL (0x20000) /* finn: Evaluated from "(FINN_NV01_CONTEXT_DMA_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + +/* + * NV0002_CTRL_CMD_UPDATE_CONTEXTDMA + * + * This command will update the parameters of the specified context dma. The + * context dma must be bound to a display channel. The update is limited + * to the display view of the context dma. Other use cases will continue to + * use the original allocation parameters. + * + * This is used on platforms where memory may be moved by the operating + * system after allocation. + * + * This control call supports the NVOS54_FLAGS_LOCK_BYPASS flag. + * + * baseAddress + * This parameter, if selected by flags, indicates the new baseAddress for + * the ctxdma + * limit + * This parameter, if selected by flags, indicates the new limit of the + * ctxdma. + * hCtxDma + * ContextDma handle on which to operate. Must match the handle given to the control + * call. + * hChannel + * Display channel handle. This field is ignored. + * hintHandle + * Hint value returned from HeapAllocHint which encodes information about + * the surface. This is used by chips without generic kind. Newer chips + * use the COMPR_INFO flag and the hintHandle must be zero. + * flags + * This parameter specifies flags which indicate which other parameters are + * valid. + * FLAGS_PAGESIZE updates the context DMA pagesize field, if not _DEFAULT + * FLAGS_USE_COMPR_INFO uses the surface format specified in the params, instead of hintHandle. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_OBJECT + * NV_ERR_INVALID_ARGUMENT + * NVOS_STATUS_NOT_SUPPORTED + */ +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA (0x20101) /* finn: Evaluated from "(FINN_NV01_CONTEXT_DMA_DMA_INTERFACE_ID << 8) | NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS_MESSAGE_ID" */ + +#define NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS { + NV_DECLARE_ALIGNED(NvU64 baseAddress, 8); + NV_DECLARE_ALIGNED(NvU64 limit, 8); + NvHandle hSubDevice; + NvHandle hCtxDma; + NvHandle hChannel; + NvHandle hintHandle; + NvU32 flags; +} NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS; + +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_BASEADDRESS 0:0 +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_BASEADDRESS_INVALID (0x00000000) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_BASEADDRESS_VALID (0x00000001) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_LIMIT 1:1 +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_LIMIT_INVALID (0x00000000) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_LIMIT_VALID (0x00000001) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_HINT 2:2 +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_HINT_INVALID (0x00000000) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_HINT_VALID (0x00000001) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_PAGESIZE 4:3 +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_PAGESIZE_DEFAULT (0x00000000) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_PAGESIZE_4K (0x00000001) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_PAGESIZE_BIG (0x00000002) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_USE_COMPR_INFO 6:5 +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_USE_COMPR_INFO_NONE (0x00000000) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_USE_COMPR_INFO_FORMAT_PITCH (0x00000001) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_USE_COMPR_INFO_FORMAT_BLOCK_LINEAR (0x00000002) + +/* + * NV0002_CTRL_CMD_BIND_CONTEXTDMA + * + * Bind a context dma to a display channel. Binding is no longer required for + * Host channels, but does silently succeed. + * + * This control call supports the NVOS54_FLAGS_LOCK_BYPASS flag. + * + * This control replaces the obsolete RmBindContextDma() API. + * + * hChannel + * The channel for ctxdma bind + * + * Possible error codes include + * NV_OK + * NV_ERR_TOO_MANY_PRIMARIES hash table is full + * NV_ERR_NO_MEMORY instance memory is full + * NV_ERR_INVALID_OFFSET surface is not correctly aligned + * NV_ERR_STATE_IN_USE context dma was already bound given channel + */ +#define NV0002_CTRL_CMD_BIND_CONTEXTDMA (0x20102) /* finn: Evaluated from "(FINN_NV01_CONTEXT_DMA_DMA_INTERFACE_ID << 8) | NV0002_CTRL_BIND_CONTEXTDMA_PARAMS_MESSAGE_ID" */ + +#define NV0002_CTRL_BIND_CONTEXTDMA_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0002_CTRL_BIND_CONTEXTDMA_PARAMS { + NvHandle hChannel; +} NV0002_CTRL_BIND_CONTEXTDMA_PARAMS; + +/* + * NV0002_CTRL_CMD_UNBIND_CONTEXTDMA + * + * Unbind a context dma from a display channel. + * + * This control call supports the NVOS54_FLAGS_LOCK_BYPASS flag. + * + * hChannel + * The display channel to unbind from + * + * Possible error codes include + * NV_OK + * NV_ERR_INVALID_STATE channel was not bound + */ +#define NV0002_CTRL_CMD_UNBIND_CONTEXTDMA (0x20103) /* finn: Evaluated from "(FINN_NV01_CONTEXT_DMA_DMA_INTERFACE_ID << 8) | NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS_MESSAGE_ID" */ + +#define NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS { + NvHandle hChannel; +} NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS; + +/* _ctrl0002.h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0004.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0004.h new file mode 100644 index 0000000..05bb88f --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0004.h @@ -0,0 +1,93 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0004.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NV01_TIMER control commands and parameters */ + +#define NV0004_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x0004, NV0004_CTRL_##cat, idx) + +/* NV01_TIMER command categories (8bits) */ +#define NV0004_CTRL_RESERVED (0x00) +#define NV0004_CTRL_TMR (0x01) + +/* + * NV0004_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0004_CTRL_CMD_NULL (0x40000) /* finn: Evaluated from "(FINN_NV01_TIMER_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/* + * NV0004_CTRL_CMD_TMR_SET_ALARM_NOTIFY + * + * This command can be used to set a PTIMER alarm to trigger at the + * specified time in the future on the subdevice associated with this + * NV01_TIMER object instance. + * + * hEvent + * This parameter specifies the handle of an NV01_EVENT object instance + * that is to be signaled when the alarm triggers. This NV01_EVENT + * object instance must have been allocated with this NV01_TIMER object + * instance as its parent. If this parameter is set to NV01_NULL_OBJECT + * then all NV01_EVENT object instances associated with this NV01_TIMER + * object instance are signaled. + * alarmTimeUsecs + * This parameter specifies the relative time in nanoseconds at which + * the alarm should trigger. Note that the accuracy between the alarm + * trigger and the subsequent notification to the caller can vary + * depending on system conditions. + * + * Possible status values returned include: + * NVOS_STATUS_SUCCES + * NVOS_STATUS_INVALID_PARAM_STRUCT + * NVOS_STATUS_INVALID_OBJECT_HANDLE + */ + +#define NV0004_CTRL_CMD_TMR_SET_ALARM_NOTIFY (0x40110) /* finn: Evaluated from "(FINN_NV01_TIMER_TMR_INTERFACE_ID << 8) | NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS_MESSAGE_ID" */ + +#define NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS { + NvHandle hEvent; + NV_DECLARE_ALIGNED(NvU64 alarmTimeNsecs, 8); +} NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS; + +/* _ctrl0004_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0020.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0020.h new file mode 100644 index 0000000..257db3a --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0020.h @@ -0,0 +1,80 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0020.finn +// + +#include "ctrl/ctrlxxxx.h" +#define NV0020_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0x0020, NV0020_CTRL_##cat, idx) + +/* NV0020_GPU_MANAGEMENT command categories (6bits) */ +#define NV0020_CTRL_RESERVED (0x00) +#define NV0020_CTRL_GPU_MGMT (0x01) + +/* + * NV0020_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0020_CTRL_CMD_NULL (0x200000) /* finn: Evaluated from "(FINN_NV0020_GPU_MANAGEMENT_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* Maximum possible number of bytes of GID information */ +#define NV0020_GPU_MAX_GID_LENGTH (0x00000100) + +/* + * NV0020_CTRL_CMD_GPU_MGMT_SET_SHUTDOWN_STATE + * + * This command modifies GPU zero power state for the desired GPU in the + * database. This state is set by a privileged client, after the GPU is + * completely unregistered from RM as well as PCI subsystem. On Linux, + * clients perform this operation through pci-sysfs. + * This control call requires admin privileges. + * + * uuid (INPUT) + * The UUID of the gpu. + * Supports binary format and SHA-1 type. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0020_CTRL_CMD_GPU_MGMT_SET_SHUTDOWN_STATE (0x200101) /* finn: Evaluated from "(FINN_NV0020_GPU_MANAGEMENT_GPU_MGMT_INTERFACE_ID << 8) | NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS_MESSAGE_ID" */ + +#define NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS { + NvU8 uuid[NV0020_GPU_MAX_GID_LENGTH]; +} NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS; + +/* _ctrl0020_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl003e.h b/src/common/sdk/nvidia/inc/ctrl/ctrl003e.h new file mode 100644 index 0000000..8799e9e --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl003e.h @@ -0,0 +1,136 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl003e.finn +// + +#include "ctrl0041.h" +#include "ctrl/ctrlxxxx.h" +/* NV01_MEMORY_SYSTEM control commands and parameters */ + +#define NV003E_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x003E, NV003E_CTRL_##cat, idx) + +/* NV01_MEMORY_SYSTEM command categories (6bits) */ +#define NV003E_CTRL_RESERVED (0x00) +#define NV003E_CTRL_MEMORY (0x01) + +/* + * NV003E_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV003E_CTRL_CMD_NULL (0x3e0000) /* finn: Evaluated from "(FINN_NV01_MEMORY_SYSTEM_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/* + * NV003E_CTRL_CMD_GET_SURFACE_PHYS_ATTR + * + * See NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR + * + */ +#define NV003E_CTRL_CMD_GET_SURFACE_PHYS_ATTR NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR + +typedef NV0041_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS NV003E_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS; + + +/* valid gpuCacheAttr return values */ +#define NV003E_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED_UNKNOWN (0x00000000) +#define NV003E_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED (0x00000001) +#define NV003E_CTRL_GET_SURFACE_PHYS_ATTR_GPU_UNCACHED (0x00000002) + +/* NV003E_CTRL_CMD_GET_SURFACE_NUM_PHYS_PAGES + * + * This command returns the number of physical pages associated with the + * memory object. + * + * This call is currently only implemented on Linux and assumes that linux + * kernel in which RM module will be loaded has same page size as defined + * in linux kernel source with which RM module was built. + * + * numPages + * This parameter returns total number of physical pages associated with + * the memory object. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV003E_CTRL_CMD_GET_SURFACE_NUM_PHYS_PAGES (0x3e0102) /* finn: Evaluated from "(FINN_NV01_MEMORY_SYSTEM_MEMORY_INTERFACE_ID << 8) | NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS_MESSAGE_ID" */ + +#define NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS { + NvU32 numPages; +} NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS; + + +/* NV003E_CTRL_CMD_GET_SURFACE_PHYS_PAGES + * + * This command returns physical pages associated with the memory object. + * + * This call is currently only implemented on Linux and assumes that linux + * kernel in which RM module will be loaded has same page size as defined + * in linux kernel source with which RM module was built. + * + * pPages + * This parameter returns physical pages associated with the memory object. + * + * numPages + * This parameter is both an input and an output. As an input parameter, + * it's value indicates maximum number of physical pages to be copied to + * pPages. As an output parameter, it's value indicates number of physical + * pages copied to pPages. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV003E_CTRL_CMD_GET_SURFACE_PHYS_PAGES (0x3e0103) /* finn: Evaluated from "(FINN_NV01_MEMORY_SYSTEM_MEMORY_INTERFACE_ID << 8) | NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS_MESSAGE_ID" */ + +#define NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pPages, 8); + NvU32 numPages; +} NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS; + +/* _ctrl003e_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0041.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0041.h new file mode 100644 index 0000000..fd66f78 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0041.h @@ -0,0 +1,494 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0041.finn +// + +#include "nvos.h" +#include "ctrl/ctrlxxxx.h" +/* NV04_MEMORY control commands and parameters */ + +#define NV0041_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x0041, NV0041_CTRL_##cat, idx) + +/* NV04_MEMORY command categories (6bits) */ +#define NV0041_CTRL_RESERVED (0x00) +#define NV0041_CTRL_MEMORY (0x01) + +/* + * NV0041_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0041_CTRL_CMD_NULL (0x410000) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + +/* + * NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR + * + * This command returns attributes associated with the memory object + * at the given offset. The architecture dependent return parameter + * comprFormat determines the meaningfulness (or not) of comprOffset. + * + * This call is only currently supported in the MODS environment. + * + * memOffset + * This parameter is both an input and an output. As input, this + * parameter holds an offset into the memory surface. The return + * value is the physical address of the surface at the given offset. + * memFormat + * This parameter returns the memory kind of the surface. + * comprOffset + * This parameter returns the compression offset of the surface. + * comprFormat + * This parameter returns the type of compression of the surface. + * memAperture + * The aperture of the surface is returned in this field. + * Legal return values for this parameter are + * NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR_APERTURE_VIDMEM + * NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR_APERTURE_SYSMEM + * gpuCacheAttr + * gpuCacheAttr returns the gpu cache attribute of the surface. + * Legal return values for this field are + * NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED_UNKNOWN + * NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED + * NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_UNCACHED + * gpuP2PCacheAttr + * gpuP2PCacheAttr returns the gpu peer-to-peer cache attribute of the surface. + * Legal return values for this field are + * NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED_UNKNOWN + * NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED + * NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_UNCACHED + * mmuContext + * mmuContext indicates the type of physical address to be returned (input parameter). + * Legal return values for this field are + * TEGRA_VASPACE_A -- return the device physical address for Tegra (non-GPU) engines. This is the system physical address itself. + * returns the system physical address. This may change to use a class value in future. + * FERMI_VASPACE_A -- return the device physical address for GPU engines. This can be a system physical address or a GPU SMMU virtual address. + * 0 -- return the device physical address for GPU engines. This can be a system physical address or a GPU SMMU virtual address. + * use of zero may be deprecated in future. + * contigSegmentSize + * If the underlying surface is physically contiguous, this parameter + * returns the size in bytes of the piece of memory starting from + * the offset specified in the memOffset parameter extending to the last + * byte of the surface. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_BAD_OBJECT_HANDLE + * NVOS_STATUS_BAD_OBJECT_PARENT + * NVOS_STATUS_NOT_SUPPORTED + * + */ +#define NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR (0x410103) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0041_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS { + NV_DECLARE_ALIGNED(NvU64 memOffset, 8); + NvU32 memFormat; + NvU32 comprOffset; + NvU32 comprFormat; + NvU32 memAperture; + NvU32 gpuCacheAttr; + NvU32 gpuP2PCacheAttr; + NvU32 mmuContext; + NV_DECLARE_ALIGNED(NvU64 contigSegmentSize, 8); +} NV0041_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS; + +/* valid memAperture return values */ +#define NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR_APERTURE_VIDMEM (0x00000000) +#define NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR_APERTURE_SYSMEM (0x00000001) + +/* valid gpuCacheAttr return values */ +#define NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED_UNKNOWN (0x00000000) +#define NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED (0x00000001) +#define NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_UNCACHED (0x00000002) + +/* + * NV0041_CTRL_CMD_GET_SURFACE_ZCULL_ID + * + * This command returns the Z-cull identifier for a surface. + * The value of ~0 is returned if there is none associated. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_BAD_OBJECT_HANDLE + * NVOS_STATUS_BAD_OBJECT_PARENT + * NVOS_STATUS_NOT_SUPPORTED + * + */ +#define NV0041_CTRL_CMD_GET_SURFACE_ZCULL_ID (0x410104) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_GET_SURFACE_ZCULL_ID_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_GET_SURFACE_ZCULL_ID_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0041_CTRL_GET_SURFACE_ZCULL_ID_PARAMS { + NvU32 zcullId; +} NV0041_CTRL_GET_SURFACE_ZCULL_ID_PARAMS; + + +// return values for 'tilingFormat' +// XXX - the names for these are misleading +#define NV0041_CTRL_CMD_GET_SURFACE_TILING_FORMAT_INVALID (0x00000000) +#define NV0041_CTRL_CMD_GET_SURFACE_TILING_FORMAT_FB (0x00000001) +#define NV0041_CTRL_CMD_GET_SURFACE_TILING_FORMAT_FB_1HIGH (0x00000002) +#define NV0041_CTRL_CMD_GET_SURFACE_TILING_FORMAT_FB_4HIGH (0x00000003) +#define NV0041_CTRL_CMD_GET_SURFACE_TILING_FORMAT_UMA_1HIGH (0x00000004) +#define NV0041_CTRL_CMD_GET_SURFACE_TILING_FORMAT_UMA_4HIGH (0x00000005) + +/* + * NV0041_CTRL_SURFACE_INFO + * + * This structure represents a single 32bit surface value. Clients + * request a particular surface value by specifying a unique surface + * information index. + * + * Legal surface information index values are: + * NV0041_CTRL_SURFACE_INFO_INDEX_ATTRS + * This index is used to request the set of hw attributes associated + * with the surface. Each distinct attribute is represented by a + * single bit flag in the returned value. + * Legal flags values for this index are: + * NV0041_CTRL_SURFACE_INFO_ATTRS_COMPR + * This surface has compression resources bound to it. + * NV0041_CTRL_SURFACE_INFO_ATTRS_ZCULL + * This surface has zcull resources bound to it. + * NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_SIZE_LO + * This index is used to request the low 32 bits of the physically allocated + * size (64 bit value) in units of bytes for the associated surface. + * NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_SIZE_HI + * This index is used to request the high 32 bits of the physically + * allocated size (64 bit value) in units of bytes for the associated + * surface. + * NV0041_CTRL_SURFACE_INFO_INDEX_COMPR_COVERAGE + * This index is used to request the compression coverage (if any) + * in units of 64K for the associated surface. A value of zero indicates + * there are no compression resources associated with the surface. + * Legal return values range from zero to a maximum number of 64K units + * that is GPU implementation dependent. + * NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_SIZE + * This index is used to request the physically allocated size in units + * of 4K(NV0041_CTRL_SURFACE_INFO_PHYS_SIZE_SCALE_FACTOR) for the associated + * surface. This interface is obsoleted by + * NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_SIZE_{LO,HI}. + * NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_ATTR + * This index is used to request the surface attribute field. The returned + * field value can be decoded using the NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_* + * DRF-style macros provided below. + * NV0041_CTRL_SURFACE_INFO_INDEX_ADDR_SPACE_TYPE + * This index is used to request the surface address space type. + * Returned values are described by NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE. + */ +typedef NVXXXX_CTRL_XXX_INFO NV0041_CTRL_SURFACE_INFO; + +/* valid surface info index values */ +#define NV0041_CTRL_SURFACE_INFO_INDEX_ATTRS (0x00000001) +#define NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_SIZE_LO (0x00000002) +#define NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_SIZE_HI (0x00000003) +#define NV0041_CTRL_SURFACE_INFO_INDEX_COMPR_COVERAGE (0x00000005) +#define NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_SIZE (0x00000007) +#define NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_ATTR (0x00000008) +#define NV0041_CTRL_SURFACE_INFO_INDEX_ADDR_SPACE_TYPE (0x00000009) + +/* + * This define indicates the scale factor of the reported physical size to the + * actual size in bytes. We use the scale factor to save space from the + * interface and account for large surfaces. To get the actual size, + * use `(NvU64)reported_size * NV0041_CTRL_SURFACE_INFO_PHYS_SIZE_SCALE_FACTOR`. + */ +#define NV0041_CTRL_SURFACE_INFO_PHYS_SIZE_SCALE_FACTOR (0x1000) + +/* valid surface info attr flags */ +#define NV0041_CTRL_SURFACE_INFO_ATTRS_COMPR (0x00000002) +#define NV0041_CTRL_SURFACE_INFO_ATTRS_ZCULL (0x00000004) + +/* Valid surface info page size */ +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_PAGE_SIZE NVOS32_ATTR_PAGE_SIZE +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_PAGE_SIZE_DEFAULT NVOS32_ATTR_PAGE_SIZE_DEFAULT +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_PAGE_SIZE_4KB NVOS32_ATTR_PAGE_SIZE_4KB +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_PAGE_SIZE_BIG NVOS32_ATTR_PAGE_SIZE_BIG +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_PAGE_SIZE_HUGE NVOS32_ATTR_PAGE_SIZE_HUGE + +/* Valid surface info CPU coherency */ +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_CPU_COHERENCY NVOS32_ATTR_COHERENCY +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_CPU_COHERENCY_UNCACHED NVOS32_ATTR_COHERENCY_UNCACHED +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_CPU_COHERENCY_CACHED NVOS32_ATTR_COHERENCY_CACHED +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_CPU_COHERENCY_WRITE_COMBINE NVOS32_ATTR_COHERENCY_WRITE_COMBINE +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_CPU_COHERENCY_WRITE_THROUGH NVOS32_ATTR_COHERENCY_WRITE_THROUGH +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_CPU_COHERENCY_WRITE_PROTECT NVOS32_ATTR_COHERENCY_WRITE_PROTECT +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_CPU_COHERENCY_WRITE_BACK NVOS32_ATTR_COHERENCY_WRITE_BACK + +/* Valid surface info format */ +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_FORMAT NVOS32_ATTR_FORMAT +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_FORMAT_PITCH NVOS32_ATTR_FORMAT_PITCH +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_FORMAT_SWIZZLED NVOS32_ATTR_FORMAT_SWIZZLED +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_FORMAT_BLOCK_LINEAR NVOS32_ATTR_FORMAT_BLOCK_LINEAR + +/* + * NV0041_CTRL_CMD_GET_SURFACE_INFO + * + * This command returns surface information for the associated memory object. + * Requests to retrieve surface information use a list of one or more + * NV0041_CTRL_SURFACE_INFO structures. + * + * surfaceInfoListSize + * This field specifies the number of entries on the caller's + * surfaceInfoList. + * surfaceInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the surface information is to be returned. + * This buffer must be at least as big as surfaceInfoListSize multiplied + * by the size of the NV0041_CTRL_SURFACE_INFO structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV0041_CTRL_CMD_GET_SURFACE_INFO (0x410110) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_GET_SURFACE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_GET_SURFACE_INFO_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV0041_CTRL_GET_SURFACE_INFO_PARAMS { + NvU32 surfaceInfoListSize; + NV_DECLARE_ALIGNED(NvP64 surfaceInfoList, 8); +} NV0041_CTRL_GET_SURFACE_INFO_PARAMS; + +/* + * NV0041_CTRL_CMD_GET_SURFACE_COMPRESSION_COVERAGE + * + * This command returns the percentage of surface compression tag coverage. + * The value of 0 is returned if there are no tags associated. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_BAD_OBJECT_HANDLE + * NVOS_STATUS_BAD_OBJECT_PARENT + * NVOS_STATUS_NOT_SUPPORTED + * + */ +#define NV0041_CTRL_CMD_GET_SURFACE_COMPRESSION_COVERAGE (0x410112) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_GET_SURFACE_COMPRESSION_COVERAGE_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_GET_SURFACE_COMPRESSION_COVERAGE_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV0041_CTRL_GET_SURFACE_COMPRESSION_COVERAGE_PARAMS { + NvHandle hSubDevice; /* if non zero subDevice handle of local GPU */ + NvU32 lineMin; + NvU32 lineMax; + NvU32 format; +} NV0041_CTRL_GET_SURFACE_COMPRESSION_COVERAGE_PARAMS; + +/* + * NV0041_CTRL_CMD_GET_FBMEM_BUS_ADDR + * + * This command returns the BAR1 physical address of a + * Memory mapping made using NvRmMapMemory() + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_INVALID_DATA + * NV_ERR_INVALID_CLIENT + * NV_ERR_INVALID_OBJECT_HANDLE + * + */ +#define NV0041_CTRL_CMD_GET_FBMEM_BUS_ADDR (0x410114) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_GET_FBMEM_BUS_ADDR_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_GET_FBMEM_BUS_ADDR_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV0041_CTRL_GET_FBMEM_BUS_ADDR_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pLinearAddress, 8); /* [in] Linear address of CPU mapping */ + NV_DECLARE_ALIGNED(NvU64 busAddress, 8); /* [out] BAR1 address */ +} NV0041_CTRL_GET_FBMEM_BUS_ADDR_PARAMS; + +/* + * NV0041_CTRL_CMD_SURFACE_FLUSH_GPU_CACHE + * + * This command flushes a cache on the GPU which all memory accesses go + * through. The types of flushes supported by this API may not be supported by + * all hardware. Attempting an unsupported flush type will result in an error. + * + * flags + * Contains flags to control various aspects of the flush. Valid values + * are defined in NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS*. Not all + * flags are valid for all GPUs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NVOS_STATUS_INVALID_ARGUMENT + * NVOS_STATUS_INVALID_STATE + * + * See Also: + * NV0080_CTRL_CMD_DMA_FLUSH + * Performs flush operations in broadcast for the GPU cache and other hardware + * engines. Use this call if you want to flush all GPU caches in a + * broadcast device. + * NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE + * Flushes the entire GPU cache or a set of physical addresses (if the + * hardware supports it). Use this call if you want to flush a set of + * addresses or the entire GPU cache in unicast mode. + * + */ +#define NV0041_CTRL_CMD_SURFACE_FLUSH_GPU_CACHE (0x410116) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_PARAMS_MESSAGE_ID (0x16U) + +typedef struct NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_PARAMS { + NvU32 flags; +} NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_PARAMS; + +#define NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS_WRITE_BACK 0:0 +#define NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS_WRITE_BACK_NO (0x00000000) +#define NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS_WRITE_BACK_YES (0x00000001) +#define NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS_INVALIDATE 1:1 +#define NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS_INVALIDATE_NO (0x00000000) +#define NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS_INVALIDATE_YES (0x00000001) + +/* + * NV0041_CTRL_CMD_GET_EME_PAGE_SIZE + * + * This command may be used to get the memory page size + * + * Parameters: + * pageSize [OUT] + * pageSize with associated memory descriptor + * + * Possible status values are: + * NV_OK + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_NOT_SUPPORTED + */ +#define NV0041_CTRL_CMD_GET_MEM_PAGE_SIZE (0x410118) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_GET_MEM_PAGE_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_GET_MEM_PAGE_SIZE_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV0041_CTRL_GET_MEM_PAGE_SIZE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 pageSize, 8); /* [out] - page size */ +} NV0041_CTRL_GET_MEM_PAGE_SIZE_PARAMS; + +/* + * NV0041_CTRL_CMD_UPDATE_SURFACE_COMPRESSION + * + * Acquire/release compression for surface + * + * Parameters: + * bRelease [IN] + * true = release compression; false = acquire compression + */ +#define NV0041_CTRL_CMD_UPDATE_SURFACE_COMPRESSION (0x410119) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_UPDATE_SURFACE_COMPRESSION_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_UPDATE_SURFACE_COMPRESSION_PARAMS_MESSAGE_ID (0x19U) + +typedef struct NV0041_CTRL_UPDATE_SURFACE_COMPRESSION_PARAMS { + NvBool bRelease; /* [in] - acquire/release setting */ +} NV0041_CTRL_UPDATE_SURFACE_COMPRESSION_PARAMS; + +#define NV0041_CTRL_CMD_PRINT_LABELS_PARAMS_MESSAGE_ID (0x50U) + +typedef struct NV0041_CTRL_CMD_PRINT_LABELS_PARAMS { + NvU32 tag; /* [in] */ +} NV0041_CTRL_CMD_PRINT_LABELS_PARAMS; +#define NV0041_CTRL_CMD_SET_LABEL_PARAMS_MESSAGE_ID (0x51U) + +typedef struct NV0041_CTRL_CMD_SET_LABEL_PARAMS { + NvU32 tag; /* [in] */ +} NV0041_CTRL_CMD_SET_LABEL_PARAMS; +#define NV0041_CTRL_CMD_SET_LABEL (0x410151) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_CMD_SET_LABEL_PARAMS_MESSAGE_ID" */ +#define NV0041_CTRL_CMD_GET_LABEL (0x410152) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_CMD_GET_LABEL_PARAMS_MESSAGE_ID" */ +#define NV0041_CTRL_CMD_GET_LABEL_PARAMS_MESSAGE_ID (0x52U) + +typedef struct NV0041_CTRL_CMD_GET_LABEL_PARAMS { + NvU32 tag; /* [in] */ +} NV0041_CTRL_CMD_GET_LABEL_PARAMS; + +/* + * NV0041_CTRL_CMD_SET_TAG + * + * This command sets memory allocation tag used for debugging. +* Every client has it's own memory allocation tag and tag is copying when object is duping. + * This control can be used for shared allocations to change it's tag. + */ +#define NV0041_CTRL_CMD_SET_TAG (0x410120) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_CMD_SET_TAG_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_CMD_SET_TAG_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV0041_CTRL_CMD_SET_TAG_PARAMS { + NvU32 tag; /* [in] */ +} NV0041_CTRL_CMD_SET_TAG_PARAMS; + +/* + * NV0041_CTRL_CMD_GET_TAG + * + * This command returns memory allocation tag used for debugging. + */ +#define NV0041_CTRL_CMD_GET_TAG (0x410121) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_CMD_GET_TAG_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_CMD_GET_TAG_PARAMS_MESSAGE_ID (0x21U) + +typedef struct NV0041_CTRL_CMD_GET_TAG_PARAMS { + NvU32 tag; /* [out] */ +} NV0041_CTRL_CMD_GET_TAG_PARAMS; + +/* + * NV0041_CTRL_CMD_MAP_MEMORY_FOR_GPU_ACCESS + * + * Map system memory into IOMMU VAS of a GPU described by hSubdevice + * Returns the address + * + */ +#define NV0041_CTRL_CMD_MAP_MEMORY_FOR_GPU_ACCESS (0x410122) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_MAP_MEMORY_FOR_GPU_ACCESS_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_MAP_MEMORY_FOR_GPU_ACCESS_PARAMS_MESSAGE_ID (0x22U) + +typedef struct NV0041_CTRL_MAP_MEMORY_FOR_GPU_ACCESS_PARAMS { + NvHandle hSubdevice; + NV_DECLARE_ALIGNED(NvU64 address, 8); +} NV0041_CTRL_MAP_MEMORY_FOR_GPU_ACCESS_PARAMS; + + +/* + * NV0041_CTRL_CMD_UNMAP_MEMORY_FOR_GPU_ACCESS + * + * See NV0041_CTRL_CMD_MAP_MEMORY_FOR_GPU_ACCESS + * + */ +#define NV0041_CTRL_CMD_UNMAP_MEMORY_FOR_GPU_ACCESS (0x410153) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_UNMAP_MEMORY_FOR_GPU_ACCESS_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_UNMAP_MEMORY_FOR_GPU_ACCESS_PARAMS_MESSAGE_ID (0x53U) + +typedef struct NV0041_CTRL_UNMAP_MEMORY_FOR_GPU_ACCESS_PARAMS { + NvHandle hSubdevice; +} NV0041_CTRL_UNMAP_MEMORY_FOR_GPU_ACCESS_PARAMS; + +/* _ctrl0041_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073.h new file mode 100644 index 0000000..9987bae --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2015,2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* category-specific includes */ +#include "ctrl0073/ctrl0073system.h" +#include "ctrl0073/ctrl0073specific.h" +#include "ctrl0073/ctrl0073stereo.h" +#include "ctrl0073/ctrl0073event.h" +#include "ctrl0073/ctrl0073internal.h" +#include "ctrl0073/ctrl0073dfp.h" +#include "ctrl0073/ctrl0073dp.h" +#include "ctrl0073/ctrl0073svp.h" +#include "ctrl0073/ctrl0073dpu.h" +#include "ctrl0073/ctrl0073psr.h" +#include "ctrl0073/ctrl0073common.h" diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073base.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073base.h new file mode 100644 index 0000000..1f687aa --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073base.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073base.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NV04_DISPLAY_COMMON control commands and parameters */ + +#define NV0073_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x0073, NV0073_CTRL_##cat, idx) + +/* NV04_DISPLAY_COMMON command categories (6bits) */ +#define NV0073_CTRL_RESERVED (0x00U) +#define NV0073_CTRL_SYSTEM (0x01U) +#define NV0073_CTRL_SPECIFIC (0x02U) +#define NV0073_CTRL_EVENT (0x03U) +#define NV0073_CTRL_INTERNAL (0x04U) +#define NV0073_CTRL_COMMON (0x05U) +#define NV0073_CTRL_DFP (0x11U) +#define NV0073_CTRL_DP (0x13U) +#define NV0073_CTRL_SVP (0x14U) +#define NV0073_CTRL_DPU (0x15U) +#define NV0073_CTRL_PSR (0x16U) +#define NV0073_CTRL_STEREO (0x17U) + +/* + * NV0073_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0073_CTRL_CMD_NULL (0x730000U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* _ctrl0073base_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h new file mode 100644 index 0000000..153cfb8 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h @@ -0,0 +1,119 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073common.finn +// + + + +/* + * DSC caps - + * bDscSupported + * If GPU supports DSC or not + * + * encoderColorFormatMask + * Mask of all color formats for which DSC + * encoding is supported by GPU + * + * lineBufferSizeKB + * Size of line buffer. + * + * rateBufferSizeKB + * Size of rate buffer per slice. + * + * bitsPerPixelPrecision + * Bits per pixel precision for DSC e.g. 1/16, 1/8, 1/4, 1/2, 1bpp + * + * maxNumHztSlices + * Maximum number of horizontal slices supported by DSC encoder + * + * lineBufferBitDepth + * Bit depth used by the GPU to store the reconstructed pixels within + * the line buffer + */ +#define NV0073_CTRL_CMD_DSC_CAP_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS { + NvBool bDscSupported; + NvU32 encoderColorFormatMask; + NvU32 lineBufferSizeKB; + NvU32 rateBufferSizeKB; + NvU32 bitsPerPixelPrecision; + NvU32 maxNumHztSlices; + NvU32 lineBufferBitDepth; +} NV0073_CTRL_CMD_DSC_CAP_PARAMS; + +/* + * NV0073_CTRL_CMD_FRL_CONFIG_MACRO_PAD + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior + * cmd + * This parameter is an input to this command. + * Here are the current defined fields: + * NV0073_CTRL_CMD_FRL_CONFIG_MACRO_PAD_CMD_POWER + * Set to specify what operation to run. + * NV0073_CTRL_CMD_FRL_CONFIG_MACRO_PAD_CMD_POWER_UP + * Request to power up pad. + * NV0073_CTRL_CMD_FRL_CONFIG_MACRO_PAD_CMD_POWER_DOWN + * Request to power down the pad. + * linkBw + * This parameter is used to pass in the link bandwidth required to run the + * power up sequence. Refer enum DM_FRL_LINK_RATE_GBPS for valid values. + * laneCount + * This parameter is used to pass the lanecount. + * sorIndex + * This parameter is used to pass the SOR index. + * padlinkIndex + * This parameter is used to pass the padlink index for primary link. + * Please refer enum DFPPADLINK for valid index values for Link A~F. + */ + +#define NV0073_CTRL_CMD_FRL_CONFIG_MACRO_PAD (0x730502U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_COMMON_INTERFACE_ID << 8) | NV0073_CTRL_CMD_FRL_CONFIG_MACRO_PAD_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_FRL_CONFIG_MACRO_PAD_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0073_CTRL_CMD_FRL_CONFIG_MACRO_PAD_PARAMS { + NvU32 subDeviceInstance; + NvU32 cmd; + NvU32 linkBw; + NvU32 laneCount; + NvU32 sorIndex; + NvU32 padlinkIndex; +} NV0073_CTRL_CMD_FRL_CONFIG_MACRO_PAD_PARAMS; + +#define NV0073_CTRL_CMD_FRL_CONFIG_MACRO_PAD_CMD_POWER 0:0 +#define NV0073_CTRL_CMD_FRL_CONFIG_MACRO_PAD_CMD_POWER_UP (0x00000000U) +#define NV0073_CTRL_CMD_FRL_CONFIG_MACRO_PAD_CMD_POWER_DOWN (0x00000001U) + +/* _ctrl0073common_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h new file mode 100644 index 0000000..3718b48 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h @@ -0,0 +1,1665 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073dfp.finn +// + +#include "ctrl/ctrl0073/ctrl0073base.h" +#include "ctrl/ctrl0073/ctrl0073common.h" + +#include "nvcfg_sdk.h" + +/* NV04_DISPLAY_COMMON dfp-display-specific control commands and parameters */ + +/* + * NV0073_CTRL_CMD_DFP_GET_INFO + * + * This command can be used to determine the associated display type for + * the specified displayId. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the dfp + * caps should be returned. The display ID must be a dfp display + * as determined with the NV0073_CTRL_CMD_SPECIFIC_GET_TYPE command. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * flags + * This parameter returns the information specific to this dfp. Here are + * the possible fields: + * NV0073_CTRL_DFP_FLAGS_SIGNAL + * This specifies the type of signal used for this dfp. + * NV0073_CTRL_DFP_FLAGS_LANES + * This specifies whether the board supports 1, 2, or 4 lanes + * for DISPLAYPORT signals. + * NV0073_CTRL_DFP_FLAGS_LIMIT + * Some GPUs were not qualified to run internal TMDS except at 60 HZ + * refresh rates. So, if LIMIT_60HZ_RR is set, then the client must + * make sure to only allow 60 HZ refresh rate modes to the OS/User. + * NV0073_CTRL_DFP_FLAGS_SLI_SCALER + * While running in SLI, if SLI_SCALER_DISABLE is set, the GPU cannot + * scale any resolutions. So, the output timing must match the + * memory footprint. + * NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE + * This specifies whether the DFP displayId is capable of + * transmitting HDMI. + * NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE + * This specifies whether the displayId is capable of sending a + * limited color range out from the board. + * NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE + * This specifies whether the displayId is capable of auto-configuring + * the color range. + * NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE + * This specifies whether the displayId is capable of sending the + * YCBCR422 color format out from the board. + * NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE + * This specifies whether the displayId is capable of sending + * YCBCR444 color format out from the board. + * NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR + * This specifies whether the displayId is a DP connector routed to an USB-TYPE-C port. + * NV0073_CTRL_DFP_FLAGS_DP_LINK_BANDWIDTH + * This specifies max link rate supported by the displayId, if the DFP is + * display port. + * NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED + * This specifies whether the DFP displayId is allowed to transmit HDMI + * based on the VBIOS settings. + * NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT + * This specifies whether the DFP displayId is actually an embedded display + * port based on VBIOS connector information AND ASSR cap. + * NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT + * This specifies whether the DFP displayId must be trained to RBR mode + * (if it is using DP protocol) whenever possible. + * NV0073_CTRL_DFP_FLAGS_LINK + * This specifies whether the board supports single or dual links + * for TMDS, LVDS, and SDI signals. + * NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED + * This specifies if PostCursor2 is disabled in the VBIOS + * NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID + * This indicates whether this SOR uses DSI-A, DSI-B or both (ganged mode). + * NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE + * This indicates whether this DFP supports DDS (NV dynamic display mux). + * UHBRSupportedByDfp + * Bitmask to specify the UHBR link rates supported by this dfp. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID (0x40U) + +typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 UHBRSupportedByDfp; +} NV0073_CTRL_DFP_GET_INFO_PARAMS; + +/* valid display types */ +#define NV0073_CTRL_DFP_FLAGS_SIGNAL 2:0 +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI (0x00000004U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK (0x00000005U) +#define NV0073_CTRL_DFP_FLAGS_LANE 5:3 +#define NV0073_CTRL_DFP_FLAGS_LANE_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_LANE_OCT (0x00000004U) +#define NV0073_CTRL_DFP_FLAGS_LIMIT 6:6 +#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER 7:7 +#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE 8:8 +#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE 9:9 +#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE 10:10 +#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE 11:11 +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12 +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR 13:13 +#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14 +#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT 15:15 +#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT 16:16 +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW 19:17 +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS (0x00000004U) +#define NV0073_CTRL_DFP_FLAGS_LINK 21:20 +#define NV0073_CTRL_DFP_FLAGS_LINK_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID 22:22 +#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID 24:23 +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED 25:25 +#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT 29:26 +#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE 30:30 +#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE (0x00000001U) + + + +/* + * NV0073_CTRL_CMD_DFP_GET_DP2TMDS_DONGLE_INFO + * + * This command can be used to determine information about dongles attached + * to a displayport connection. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the dfp display which owns the + * panel power to adjust. The display ID must be a dfp display + * as determined with the NV0073_CTRL_CMD_SPECIFIC_GET_TYPE command. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * flags + * This parameter provide state information about the dongle attachments. + * NV0073_CTRL_DFP_GET_DP2TMDS_DONGLE_INFO_FLAGS_CAPABLE + * Specifies if the connection is capable of a dongle. This field + * returns false in all cases of signal types except for those capable + * of outputting TMDS. Even then the if a gpio is not defined, the + * the a statement of false will also be returned. + * NV0073_CTRL_DFP_GET_DP2TMDS_DONGLE_INFO_FLAGS_ATTACHED + * When attached, this value specifies that a dongle is detected and + * attached. The client should read the _TYPE field to determine + * if it is a dp2hdmi or dp2dvi dongle. + * NV0073_CTRL_DFP_GET_DP2TMDS_DONGLE_INFO_FLAGS_TYPE + * _DP2DVI: no response to i2cAddr 0x80 per DP interop guidelines. + * clients MUST avoid outputting HDMI even if capable. + * _DP2HDMI: dongle responds to i2cAddr 0x80 per DP interop guidelines. + * client is allowed to output HDMI when possible. + * _LFH_DVI: DMS59-DVI breakout dongle is in use. + * _LFH_VGA: DMS59-VGA breakout dongle is in use. + * NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_DP2TMDS_DONGLE_TYPE + * _1: Max TMDS Clock rate is 165 MHz for both DVI and HDMI. + * _2: Max TMDS Clock rate will be specified in the dongle + * address space at device address 0x80. + * DVI is up to 165 MHz + * HDMI is up to 300 MHz + * There are type 2 devices that support beyond 600 MHz + * though not defined in the spec. + * maxTmdsClkRateHz + * This defines the max TMDS clock rate for dual mode adaptor in Hz. + */ +#define NV0073_CTRL_CMD_DFP_GET_DISPLAYPORT_DONGLE_INFO (0x731142U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS_MESSAGE_ID (0x42U) + +typedef struct NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 maxTmdsClkRateHz; +} NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS; + +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_CAPABLE 0:0 +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_ATTACHED 1:1 +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_ATTACHED_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_ATTACHED_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_TYPE 7:4 +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_TYPE_DP2DVI (0x00000000U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_TYPE_DP2HDMI (0x00000001U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_TYPE_LFH_DVI (0x00000002U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_TYPE_LFH_VGA (0x00000003U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_DP2TMDS_DONGLE_TYPE 8:8 +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_DP2TMDS_DONGLE_TYPE_1 (0x00000000U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_DP2TMDS_DONGLE_TYPE_2 (0x00000001U) + +/* + * NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS + * + * This command is used to inform hardware the receiver's audio capabilities + * using the new EDID Like Data (ELD) memory structure. The ELD memory + * structure is read by the audio driver by issuing the ELD Data command verb. + * This mechanism is used for passing sink device' audio EDID information + * from graphics software to audio software. ELD contents will contain a + * subset of the sink device's EDID information. + * The client should inform hardware at initial boot, a modeset, and whenever + * a hotplug event occurs. + * + * displayId + * This parameter indicates the digital display device's + * mask. This comes as input to this command. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * numELDSize + * This parameter specifies how many bytes of data RM should write to the + * ELD buffer. Section 7.3.3.36 of the ECN specifies that the ELD buffer + * size of zero based. HDAudio driver will then use this information to + * determine how many bytes of the ELD buffer the HDAudio should read. + * The maximum size of the buffer is 96 bytes. + * bufferELD + * This buffer contains data as defined in the ECR HDMI ELD memory structure. + * Refer to the ELD Memory Structure Specification for more details. + * The format should be: + * - Header block is fixed at 4 bytes + * The header block contains the ELD version and baseline ELD len as + * well as some reserved fields. + * - Baseline block for audio descriptors is 76 bytes + * (15 SAD; each SAD=3 bytes requiring 45 bytes with 31 bytes to spare) + * As well as some other bits used to denote the CEA version, + * the speaker allocation data, monitor name, connector type, and + * hdcp capabilities. + * - Vendor specific block of 16 bytes + * maxFreqSupported + * Supply the maximum frequency supported for the overall audio caps. + * This value should match CEA861-X defines for sample freq. + * ctrl: + * NV0073_CTRL_DFP_SET_ELD_AUDIO_CAPS_CTRL_PD: + * Specifies the presence detect of the receiver. On a hotplug + * or modeset client should set this bit to TRUE. + * NV0073_CTRL_DFP_SET_ELD_AUDIO_CAPS_CTRL_ELDV: + * Specifies whether the ELD buffer contents are valid. + * An intrinsic unsolicited response (UR) is generated whenever + * the ELDV bit changes in value and the PD=1. When _PD=1(hotplug), + * RM will set the ELDV bit after ELD buffer contents are written. + * If _ELDV bit is set to false such as during a unplug, then the + * contents of the ELD buffer will be cleared. + * deviceEntry: + * The deviceEntry number from which the SF should accept packets. + * _NONE if disabling audio. + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS (0x731144U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER 96U + +#define NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS_MESSAGE_ID (0x44U) + +typedef struct NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 numELDSize; + NvU8 bufferELD[NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER]; + NvU32 maxFreqSupported; + NvU32 ctrl; + NvU32 deviceEntry; +} NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS; + +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_0320KHZ (0x00000001U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_0441KHZ (0x00000002U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_0480KHZ (0x00000003U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_0882KHZ (0x00000004U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_0960KHZ (0x00000005U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_1764KHZ (0x00000006U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_1920KHZ (0x00000007U) + +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD 0:0 +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV 1:1 +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_TRUE (0x00000001U) + +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_0 (0x00000000U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_1 (0x00000001U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_2 (0x00000002U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_3 (0x00000003U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_NONE (0x00000007U) + + + +/* + * NV0073_CTRL_CMD_DFP_GET_SPREAD_SPECTRUM_STATUS + * + * This command is used to get spread spectrum status for a display device. + * + * displayId + * Display ID for which the spread spectrum status is needed. + * checkRMSsState + * Default is to check in Vbios. This flag lets this control call to check in register. + * status + * Return status value. + */ + +#define NV0073_CTRL_CMD_DFP_GET_SPREAD_SPECTRUM (0x73114cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_SPREAD_SPECTRUM_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_GET_SPREAD_SPECTRUM_PARAMS_MESSAGE_ID (0x4CU) + +typedef struct NV0073_CTRL_DFP_GET_SPREAD_SPECTRUM_PARAMS { + NvU32 displayId; + NvBool enabled; +} NV0073_CTRL_DFP_GET_SPREAD_SPECTRUM_PARAMS; + +/* + * NV0073_CTRL_CMD_DFP_UPDATE_DYNAMIC_DFP_CACHE + * + * Update the Dynamic DFP with Bcaps read from remote display. + * Also updates hdcpFlags, gpu hdcp capable flags in DFP. + * If bResetDfp is true, all the flags are reset before making changes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_DFP_UPDATE_DYNAMIC_DFP_CACHE (0x73114eU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS_MESSAGE_ID (0x4EU) + +typedef struct NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS { + NvU32 subDeviceInstance; + NvU32 headIndex; + NvU8 bcaps; + NvU8 bksv[5]; + NvU32 hdcpFlags; + NvBool bHdcpCapable; + NvBool bResetDfp; + NvU8 updateMask; +} NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS; + +#define NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_MASK_BCAPS 0x01U +#define NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_MASK_BKSV 0x02U +#define NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_MASK_FLAGS 0x03U + +/* + * NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE + * + * This command sets the audio enable state of the DFP. When disabled, + * no audio stream packets or audio timestamp packets will be sent. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the dfp + * audio should be enabled or disabled. The display ID must be a dfp display. + * If the displayId is not a dfp, this call will return + * NV_ERR_INVALID_ARGUMENT. + * enable + * This parameter specifies whether to enable (NV_TRUE) or disable (NV_FALSE) + * audio to the display. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + * + */ +#define NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE (0x731150U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS_MESSAGE_ID (0x50U) + +typedef struct NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool enable; +} NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS; + + +/* + * NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG + * + * This variable specifies default/primary/secondary sor sublinks to be configured. + * These access modes are: + * + * NV0073_CTRL_DFP_ASSIGN_SOR_FORCE_NONE + * Default link config + * NV0073_CTRL_DFP_ASSIGN_SOR_FORCE_PRIMARY_SOR_LINK + * Primary sor sublink to be configured + * NV0073_CTRL_DFP_ASSIGN_SOR_FORCE_SECONDARY_SOR_LINK + * Secondary sor sublink to be configured + */ +typedef NvU32 NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG; + +#define NV0073_CTRL_DFP_ASSIGN_SOR_FORCE_NONE (0x0U) +#define NV0073_CTRL_DFP_ASSIGN_SOR_FORCE_PRIMARY_SOR_LINK (0x1U) +#define NV0073_CTRL_DFP_ASSIGN_SOR_FORCE_SECONDARY_SOR_LINK (0x2U) + +/* + * NV0073_CTRL_DFP_ASSIGN_SOR_INFO + * + * This structure describes info about assigned SOR + * + * displayMask + * The displayMask for the SOR corresponding to its HW routings + * sorType + * This parameter specifies the SOR type + * Here are the current defined fields: + * NV0073_CTRL_DFP_SOR_TYPE_NONE + * Unallocated SOR + * NV0073_CTRL_DFP_SOR_TYPE_2H1OR_PRIMARY + * Primary SOR for 2H1OR stream + * NV0073_CTRL_DFP_SOR_TYPE_2H1OR_SECONDARY + * Secondary SOR for 2H1OR stream + * NV0073_CTRL_DFP_SOR_TYPE_SINGLE + * Default Single SOR + * Note - sorType should only be referred to identify 2H1OR Primary and Secondary SOR + * + */ + +typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_INFO { + NvU32 displayMask; + NvU32 sorType; +} NV0073_CTRL_DFP_ASSIGN_SOR_INFO; + +#define NV0073_CTRL_DFP_SOR_TYPE_NONE (0x00000000U) +#define NV0073_CTRL_DFP_SOR_TYPE_SINGLE (0x00000001U) +#define NV0073_CTRL_DFP_SOR_TYPE_2H1OR_PRIMARY (0x00000002U) +#define NV0073_CTRL_DFP_SOR_TYPE_2H1OR_SECONDARY (0x00000003U) + +/* + * NV0073_CTRL_CMD_DFP_ASSIGN_SOR + * + * This command is used by the clients to assign SOR to DFP for CROSS-BAR + * when the default SOR-DFP routing that comes from vbios is not considered. + * SOR shall be assigned to a DFP at the runtime. This call should be called + * before a modeset is done on any dfp display and also before LinkTraining for DP displays. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * DisplayId of the primary display for which SOR is to be assigned. However, if + * displayId is 0 then RM shall return the XBAR config it has stored in it's structures. + * sorExcludeMask + * sorMask of the SORs which should not be used for assignment. If this is 0, + * then SW is free to allocate any available SOR. + * slaveDisplayId + * displayId of the slave device in case of dualSST mode. This ctrl call will + * allocate SORs to both slave and the master if slaveDisplayId is set. + * forceSublinkConfig + * forces RM to configure primary or secondary sor sublink on the given diaplayId. + * If not set, then RM will do the default configurations. + * bIs2Head1Or + * Specifies that SOR allocation is required for 2 head 1 OR. This will allocate + * 2 SOR for same displayId - one Master and one Slave. Slave SOR would be disconnected + * from any padlink and get feedback clock from Master SOR's padlink. + * sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS] + * returns the displayMask for all SORs corresponding to their HW routings. + * sorAssignListWithTag[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS] + * returns the displayMask for all SORs corresponding to their HW routings along with + * SOR type to identify 2H1OR Primary and Secondary SORs. SOR type would be identified by + * NV0073_CTRL_DFP_SOR_TYPE. sorAssignList would look as below - + * sorAssignListWithTag[] = { DisplayMask, SOR Type + * {0x100, SECONDARY_SOR} + * {0x200, SINGLE_SOR} + * {0x100, PRIMARY_SOR} + * {0, NONE}} + * } + * Here, for display id = 0x100, SOR2 is Primary and SOR0 is Secondary. + * Note - sorAssignList parameter would be removed after Bug 200749158 is resolved + * reservedSorMask + * returns the sorMask reserved for the internal panels. + * flags + * Other detail settings. + * _AUDIO_OPTIMAL: Client requests trying to get audio SOR if possible. + * If there's no audio capable SOR and OD is HDMI/DP, + * RM will fail the control call. + * _AUDIO_DEFAULT: RM does not check audio-capability of SOR. + * + * _ACTIVE_SOR_NOT_AUDIO_CAPABLE_YES : RM returns Active SOR which is not Audio capable. + * _ACTIVE_SOR_NOT_AUDIO_CAPABLE_NO : RM is not returning 'Active non-audio capable SOR'. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + + +#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR (0x731152U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS 4U + +#define NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS_MESSAGE_ID (0x52U) + +typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU8 sorExcludeMask; + NvU32 slaveDisplayId; + NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG forceSublinkConfig; + NvBool bIs2Head1Or; + NvU32 sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS]; + NV0073_CTRL_DFP_ASSIGN_SOR_INFO sorAssignListWithTag[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS]; + NvU8 reservedSorMask; + NvU32 flags; +} NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS; + +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO 0:0 +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_OPTIMAL (0x00000001U) +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_DEFAULT (0x00000000U) +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE 1:1 +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_NO (0x00000000U) +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_YES (0x00000001U) + +/* +* NV0073_CTRL_CMD_DFP_GET_PADLINK_MASK +* +* This command will only be used by chipTB tests to get the padlinks corresponding +* to the given displayId. RM gets this information from vbios. This control call is +* only for verif purpose. +* +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. This parameter must specify a value between zero and the +* total number of subdevices within the parent device. This parameter +* should be set to zero for default behavior. +* displayId +* DisplayId of the display for which the client needs analog link Mask +* padlinkMask +* analogLinkMask for the given displayId. This value returned is 0xffffffff if +* the given displayId is invalid else RM returns the corresponding padlinkMask. +* NV_OK +* NV_ERR_INVALID_ARGUMENT +* NV_ERR_NOT_SUPPORTED +*/ + + +#define NV0073_CTRL_CMD_DFP_GET_PADLINK_MASK (0x731153U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS_MESSAGE_ID (0x53U) + +typedef struct NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 padlinkMask; +} NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS; + +/* + * NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE + * This enum defines the functions that are supported for which a + * corresponding GPIO pin number could be retrieved + * Values copied from objgpio.h GPIO_FUNC_TYPE_LCD_*. Please keep the + * values in sync between the 2 files + */ + +typedef enum NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE { + // GPIO types of LCD GPIO functions common to all internal panels + NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE_LCD_BACKLIGHT = 268435456, + NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE_LCD_POWER = 285212672, + NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE_LCD_POWER_OK = 301989888, + NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE_LCD_SELF_TEST = 318767104, + NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE_LCD_LAMP_STATUS = 335544320, + NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE_LCD_BRIGHTNESS = 352321536, +} NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE; + +/* + * NV0073_CTRL_CMD_DFP_GET_LCD_GPIO_PIN_NUM + * + * This command can be used to get the GPIO pin number that corresponds to one + * of the LCD functions + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the dfp display. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NVOS_STATUS_ERROR_INVALID_ARGUMENT. + * funcType + * The LDC function for which the GPIO pin number is needed + * lcdGpioPinNum + * The GPIO pin number that corresponds to the LCD function. + * +*/ +#define NV0073_CTRL_CMD_DFP_GET_LCD_GPIO_PIN_NUM (0x731154U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_LCD_GPIO_PIN_NUM_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_GET_LCD_GPIO_PIN_NUM_PARAMS_MESSAGE_ID (0x54U) + +typedef struct NV0073_CTRL_DFP_GET_LCD_GPIO_PIN_NUM_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE funcType; + NvU32 lcdGpioPinNum; +} NV0073_CTRL_DFP_GET_LCD_GPIO_PIN_NUM_PARAMS; + + + +/* + * NV0073_CTRL_CMD_DFP_CONFIG_TWO_HEAD_ONE_OR + * + * This command is used for configuration of 2 head 1 OR. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * Display Id of the panel for which Two Head One OR is going to be used + * bEnable + * Enable/Disable 2 Head 1 OR + * masterSorIdx + * Master SOR Index which will send pixels to panel + * slaveSorIdx + * Slave SOR Index which will take feedback clock from Master SOR's + * padlink + * Possible status values returned are: + * NVOS_STATUS_SUCCESS + * NVOS_STATUS_ERROR_INVALID_ARGUMENT + * NVOS_STATUS_ERROR_NOT_SUPPORTED + */ + + +#define NV0073_CTRL_CMD_DFP_CONFIG_TWO_HEAD_ONE_OR (0x731156U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS_MESSAGE_ID (0x56U) + +typedef struct NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bEnable; + NvU32 masterSorIdx; + NvU32 slaveSorIdx; +} NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS; + +/* + * NV0073_CTRL_CMD_DFP_DSC_CRC_CONTROL + * + * This command is used to enable/disable CRC on the GPU or query the registers + * related to it + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * headIndex + * index of the head + * cmd + * specifying if setup or querying is done + * bEnable + * enable or disable CRC on the GPU + * gpuCrc0 + * 0-indexed CRC register of the GPU + * gpuCrc1 + * 1-indexed CRC register of the GPU + * gpuCrc0 + * 2-indexed CRC register of the GPU + * Possible status values returned are: + * NVOS_STATUS_SUCCESS + * NVOS_STATUS_ERROR_NOT_SUPPORTED + */ + + +#define NV0073_CTRL_CMD_DFP_DSC_CRC_CONTROL (0x731157U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS_MESSAGE_ID (0x57U) + +typedef struct NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS { + NvU32 subDeviceInstance; + NvU32 headIndex; + NvU32 cmd; + NvBool bEnable; + NvU16 gpuCrc0; + NvU16 gpuCrc1; + NvU16 gpuCrc2; +} NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS; + +#define NV0073_CTRL_DP_CRC_CONTROL_CMD 0:0 +#define NV0073_CTRL_DP_CRC_CONTROL_CMD_SETUP (0x00000000U) +#define NV0073_CTRL_DP_CRC_CONTROL_CMD_QUERY (0x00000001U) + +/* + * NV0073_CTRL_CMD_DFP_INIT_MUX_DATA + * + * This control call is used to configure the display MUX related data + * for the given display device. Clients to RM are expected to call this + * control call to initialize the data related to MUX before any MUX related + * operations such mux switch or PSR entry/ exit are performed. + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId (in) + * ID of the display device for which the mux state has to be initialized + * manfId (in) + * Specifies the manufacturer ID of panel obtained from the EDID. This + * parameter is expected to be non-zero only in case of internal panel. + * productId (in) + * Specifies the product ID of panel obtained from the EDID. This + * parameter is expected to be non-zero only in case of internal panel. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_DFP_INIT_MUX_DATA (0x731158U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS_MESSAGE_ID (0x58U) + +typedef struct NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU16 manfId; + NvU16 productId; +} NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS; + + + +/* + * NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX + * + * This command is used to switch the dynamic display mux between + * integrated GPU and discrete GPU. + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId (in) + * ID of the display device for which the display MUX has to be switched + * flags (in) + * Flags indicating the action to be performed. Here are the possible + * valid values- + * NV0073_CTRL_DFP_DISP_MUX_SWITCH_IGPU_TO_DGPU + * When set mux is switched from integrated to discrete GPU. + * NV0073_CTRL_DFP_DISP_MUX_SWITCH_DGPU_TO_IGPU + * When set mux is switched from discrete to integrated GPU. + * NV0073_CTRL_DFP_DISP_MUX_SWITCH_SKIP_SIDEBAND_ACCESS + * Set to true for PSR panels as we skip sideband access. + * auxSettleDelay (in) + * Time, in milliseconds, necessary for AUX channel to settle and become + * accessible after a mux switch. Set to zero to use the default delay. + * muxSwitchLatencyMs (out) + * mux switch latency stats in milli-seconds + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX (0x731160U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX_PARAMS_MESSAGE_ID (0x60U) + +typedef struct NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 auxSettleDelay; + NvU32 muxSwitchLatencyMs; +} NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX_PARAMS; + +/* valid flags*/ +#define NV0073_CTRL_DFP_DISP_MUX_SWITCH 0:0 +#define NV0073_CTRL_DFP_DISP_MUX_SWITCH_IGPU_TO_DGPU 0x00000000 +#define NV0073_CTRL_DFP_DISP_MUX_SWITCH_DGPU_TO_IGPU 0x00000001 +#define NV0073_CTRL_DFP_DISP_MUX_SWITCH_SKIP_SIDEBAND_ACCESS 1:1 +#define NV0073_CTRL_DFP_DISP_MUX_SWITCH_SKIP_SIDEBAND_ACCESS_YES 0x00000001 +#define NV0073_CTRL_DFP_DISP_MUX_SWITCH_SKIP_SIDEBAND_ACCESS_NO 0x00000000 + +/* + * NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS + * + * This command is used to perform all the operations that need to be + * performed before a mux switch is started. + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId (in) + * ID of the display device for which the pre mux switch operations have + * to be performed. + * flags (in) + * Flags indicating the action to be performed. Here are the possible + * valid values - + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU + * Indicates a switch from i to d is initiated + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU + * Indicates a switch from d to i is initiated + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_ENTER_SKIP_NO + * When set RM will execute the PSR enter sequence. By default RM will + * not skip SR enter sequence + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_ENTER_SKIP_YES + * When set RM will skip the PSR enter sequence + * iGpuBrightness (in) + * iGPU brightness value (scale 0~100) before switching mux from I2D. + * This is used to match brightness after switching mux to dGPU + * preOpsLatencyMs (out) + * premux switch operations latency stats in milli-seconds. This includes - + * - disabling SOR sequencer and enable BL GPIO control + * - toggling LCD VDD, BL EN and PWM MUX GPIOs + * - PSR entry, if not skipped + * psrEntryLatencyMs (out) + * psr entry latency stats in milli-seconds + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS (0x731161U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS_PARAMS_MESSAGE_ID (0x61U) + +typedef struct NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 iGpuBrightness; + NvU32 preOpsLatencyMs; + NvU32 psrEntryLatencyMs; +} NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS_PARAMS; + +/* valid flags*/ +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE 0:0 +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU 0x00000000U +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU 0x00000001U +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_ENTER_SKIP 1:1 +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_ENTER_SKIP_NO 0x00000000U +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_ENTER_SKIP_YES 0x00000001U +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_MUX_SWITCH_IGPU_POWER_TIMING 2:2 +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_MUX_SWITCH_IGPU_POWER_TIMING_KNOWN 0x00000000 +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_MUX_SWITCH_IGPU_POWER_TIMING_UNKNOWN 0x00000001 +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SKIP_BACKLIGHT_ENABLE 3:3 +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SKIP_BACKLIGHT_ENABLE_NO 0x00000000U +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SKIP_BACKLIGHT_ENABLE_YES 0x00000001U + +#define NV0073_CTRL_DISP_MUX_BACKLIGHT_BRIGHTNESS_MIN 0U +#define NV0073_CTRL_DISP_MUX_BACKLIGHT_BRIGHTNESS_MAX 100U + +/* + * NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS + * + * This command is used to perform all the operations that need to be + * performed after a successful mux switch is completed. + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId (in) + * ID of the display device for which the post mux switch operations have + * to be performed. + * flags (in) + * Flags indicating the action to be performed. Here are the possible + * valid values - + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU + * Indicates a switch from i to d is initiated + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU + * Indicates a switch from d to i is initiated + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_EXIT_SKIP_NO + * When set RM will execute the PSR exit sequence. By default RM will + * not skip SR exit sequence + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_EXIT_SKIP_YES + * When set RM will skip the PSR exit sequence + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_MUX_SWITCH_IGPU_POWER_TIMING_KNOWN + * Indicates mux switches where we know when igpu powers up + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_MUX_SWITCH_IGPU_POWER_TIMING_UNKNOWN + * Indicates mux switches where we don't know when igpu powers up + * postOpsLatencyMs (out) + * postmux switch operations latency stats in milli-seconds. This includes - + * - restoring SOR sequencer and BL GPIO control + * - toggling LCD VDD, BL EN and PWM MUX GPIOs + * - PSR exit, if not skipped + * psrExitLatencyMs (out) + * psr exit latency stats in milli-seconds + * psrExitTransitionToInactiveLatencyMs (out) + * psr exit latency stats in milli-seconds, from state 2 (SR active) to state 4 (transition to inactive) + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_TIMEOUT in case of SR exit failure + */ + +#define NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS (0x731162U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS_PARAMS_MESSAGE_ID (0x62U) + +typedef struct NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 postOpsLatencyMs; + NvU32 psrExitLatencyMs; + NvU32 psrExitTransitionToInactiveLatencyMs; +} NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS_PARAMS; + +/* valid flags*/ +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE 0:0 +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_EXIT_SKIP 1:1 +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_EXIT_SKIP_NO 0x00000000U +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_EXIT_SKIP_YES 0x00000001U + +/* + * NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS + * + * This command is used to query the display mux status for the given + * display device + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId (in) + * ID of the display device for which the post mux switch operations have + * to be performed. + * muxStatus (out) + * status indicating the current state of the mux. + * valid values - + * NV0073_CTRL_DFP_DISP_MUX_STATE_INTEGRATED_GPU + * Indicates that the MUX is currently switched to integrated GPU. + * NV0073_CTRL_DFP_DISP_MUX_STATE_DISCRETE_GPU + * Indicates that the MUX is currently switched to discrete GPU. + * NV0073_CTRL_DFP_DISP_MUX_MODE_DISCRETE_ONLY + * Indicates that the MUX mode is set to discrete mode, where all displays + * are driven by discrete GPU. + * NV0073_CTRL_DFP_DISP_MUX_MODE_INTEGRATED_ONLY + * Indicates that the MUX mode is set to integrated mode, where all + * displays are driven by Integrated GPU. + * NV0073_CTRL_DFP_DISP_MUX_MODE_HYBRID + * Indicates that the MUX mode is set to hybrid, where internal panel is + * driven by integrated GPU, while external displays might be driven by + * discrete GPU. + * NV0073_CTRL_DFP_DISP_MUX_MODE_DYNAMIC + * Indicates that the MUX mode is dynamic. It is only in this mode, the + * display MUX can be toggled between discrete and hybrid dynamically. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS (0x731163U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS_MESSAGE_ID (0x63U) + +typedef struct NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 muxStatus; +} NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS; + +/* valid flags */ +#define NV0073_CTRL_DFP_DISP_MUX_STATE 1:0 +#define NV0073_CTRL_DFP_DISP_MUX_STATE_INVALID 0x00000000U +#define NV0073_CTRL_DFP_DISP_MUX_STATE_INTEGRATED_GPU 0x00000001U +#define NV0073_CTRL_DFP_DISP_MUX_STATE_DISCRETE_GPU 0x00000002U +#define NV0073_CTRL_DFP_DISP_MUX_MODE 4:2 +#define NV0073_CTRL_DFP_DISP_MUX_MODE_INVALID 0x00000000U +#define NV0073_CTRL_DFP_DISP_MUX_MODE_INTEGRATED_ONLY 0x00000001U +#define NV0073_CTRL_DFP_DISP_MUX_MODE_DISCRETE_ONLY 0x00000002U +#define NV0073_CTRL_DFP_DISP_MUX_MODE_HYBRID 0x00000003U +#define NV0073_CTRL_DFP_DISP_MUX_MODE_DYNAMIC 0x00000004U + + + +/* +* NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING +* +* This command can be used to get DSI mode timing parameters. +* +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. This parameter must specify a value between zero and the +* total number of subdevices within the parent device. This parameter +* should be set to zero for default behavior. +* displayId +* This parameter specifies the ID of the display on which the DSI +* info will be set. The display ID must be a DSI-capable display. +* hActive +* This parameter specifies the horizontal length of the active pixel +* data in the raster. +* vActive +* This parameter specifies the vertical lines of the active pixel +* data in the raster. +* hFrontPorch +* This parameter specifies the number of horizontal front porch +* blanking pixels in the raster. +* vFrontPorch +* This parameter specifies the numer of lines of the vertical front +* porch in the raster. +* hBackPorch +* This parameter specifies the number of horizontal back porch +* blanking pixels in the raster. +* vBackPorch +* This parameter specifies the numer of lines of the vertical back +* porch in the raster. +* hSyncWidth +* This parameter specifies the number of horizontal sync pixels in +* the raster. +* vSyncWidth +* This parameter specifies the numer of lines of the vertical sync +* in the raster. +* bpp +* This parameter specifies the depth (Bits per Pixel) of the output +* display stream. +* refresh +* This parameter specifies the refresh rate of the panel (in Hz). +* pclkHz +* This parameter specifies the pixel clock rate in Hz. +* numLanes +* Number of DSI data lanes. +* dscEnable +* Flag to indicate if DSC an be enabled, which in turn indicates if +* panel supports DSC. +* dscBpp +* DSC Bits per pixel +* dscNumSlices +* Number of slices for DSC. +* dscDuaDsc +* Flag to indicate if panel supports DSC streams from two DSI +* controllers. +* dscSliceHeight +* Height of DSC slices. +* dscBlockPrediction +* Flag to indicate if DSC Block Prediction needs to be enabled. +* dscDecoderVersionMajor +* Major version number of DSC decoder on Panel. +* dscDecoderVersionMinor +* Minor version number of DSC decoder on Panel. +* dscUseCustomPPS +* Flag to indicate if Panel uses custom PPS values which deviate from standard values. +* dscCustomPPSData +* 32 bytes of custom PPS data required by Panel. +* dscEncoderCaps +* Capabilities of DSC encoder in SoC. +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT +* NV_ERR_NOT_SUPPORTED +*/ + +#define NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING (0x731166U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DFP_DSI_CUSTOM_PPS_DATA_COUNT 32U + +#define NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS_MESSAGE_ID (0x66U) + +typedef struct NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 hActive; + NvU32 vActive; + NvU32 hFrontPorch; + NvU32 vFrontPorch; + NvU32 hBackPorch; + NvU32 vBackPorch; + NvU32 hSyncWidth; + NvU32 vSyncWidth; + NvU32 bpp; + NvU32 refresh; + NvU32 pclkHz; + NvU32 numLanes; + NvU32 dscEnable; + NvU32 dscBpp; + NvU32 dscNumSlices; + NvU32 dscDualDsc; + NvU32 dscSliceHeight; + NvU32 dscBlockPrediction; + NvU32 dscDecoderVersionMajor; + NvU32 dscDecoderVersionMinor; + NvBool dscUseCustomPPS; + NvU32 dscCustomPPSData[NV0073_CTRL_CMD_DFP_DSI_CUSTOM_PPS_DATA_COUNT]; + NV0073_CTRL_CMD_DSC_CAP_PARAMS dscEncoderCaps; +} NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS; + + + +/* + * NV0073_CTRL_CMD_DFP_GET_FIXED_MODE_TIMING + * + * This control call is used to retrieve the display mode timing info that's + * specified for a given DFP from an offline configuration blob (e.g., Device Tree). + * This display timing info is intended to replace the timings exposed in a + * sink's EDID. + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId (in) + * ID of the display device for which the timings should be retrieved. + * stream (in) + * For MST connectors with static topologies (e.g., DP serializers), + * this parameter further identifies the video stream for which the + * timings should be retrieved. + * valid (out) + * Indicates whether a valid display timing was found for this DFP. + * hActive (out) + * Horizontal active width in pixels + * hFrontPorch (out) + * Horizontal front porch + * hSyncWidth (out) + * Horizontal sync width + * hBackPorch (out) + * Horizontal back porch + * vActive (out) + * Vertical active height in lines + * vFrontPorch (out) + * Vertical front porch + * vSyncWidth (out) + * Vertical sync width + * vBackPorch (out) + * Vertical back porch + * pclkKHz (out) + * Pixel clock frequency in KHz + * rrx1k (out) + * Refresh rate in units of 0.001Hz +* x (out) +* x offset inside superframe at which this view starts + * y (out) +* y offset inside superframe at which this view starts + * width (out) +* Horizontal active width in pixels for this view +* height (out) +* Vertical active height in lines for this view + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_DFP_GET_FIXED_MODE_TIMING (0x731172) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8 | NV0073_CTRL_DFP_GET_FIXED_MODE_TIMING_PARAMS_MESSAGE_ID)" */ + +#define NV0073_CTRL_DFP_FIXED_MODE_TIMING_MAX_SUPERFRAME_VIEWS 4U + +#define NV0073_CTRL_DFP_GET_FIXED_MODE_TIMING_PARAMS_MESSAGE_ID (0x72U) + +typedef struct NV0073_CTRL_DFP_GET_FIXED_MODE_TIMING_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU8 stream; + + NvBool valid; + + NvU16 hActive; + NvU16 hFrontPorch; + NvU16 hSyncWidth; + NvU16 hBackPorch; + + NvU16 vActive; + NvU16 vFrontPorch; + NvU16 vSyncWidth; + NvU16 vBackPorch; + + NvU32 pclkKHz; + NvU32 rrx1k; + + struct { + NvU8 numViews; + struct { + NvU16 x; + NvU16 y; + NvU16 width; + NvU16 height; + } view[NV0073_CTRL_DFP_FIXED_MODE_TIMING_MAX_SUPERFRAME_VIEWS]; + } superframeInfo; +} NV0073_CTRL_DFP_GET_FIXED_MODE_TIMING_PARAMS; + + + +/* + * NV0073_CTRL_DFP_ENTER_DISPLAY_POWER_GATING_DIAGNOSTIC_DATA + * + * This structure describes diagnostic information about display power + * gating entry sequence + * + * totalRmEntryLatencyUs + * Duration in microseconds that RM took to service 'Enter + * Display Power Gating' command. This includes time for all + * steps that RM performs as part of display power gating entry + * sequence including the below parameters. + * hwOkToGateLatencyUs + * Duration in microseconds that HW took to assert ok_to_gate. + * Only valid when displayId is not equal to 0xFFFFFFFF + * carApiLatencyUs + * Duration in microseconds that CAR (Clock and Reset) block took to + * service 'Enter Display Power Gating' command + * + */ + +typedef struct NV0073_CTRL_DFP_ENTER_DISPLAY_POWER_GATING_DIAGNOSTIC_DATA { + NvU32 totalRmEntryLatencyUs; + NvU32 hwOkToGateLatencyUs; + NvU32 carEntryApiLatencyUs; +} NV0073_CTRL_DFP_ENTER_DISPLAY_POWER_GATING_DIAGNOSTIC_DATA; + +/* + * NV0073_CTRL_CMD_DFP_ENTER_DISPLAY_POWER_GATING + * + * This command can be used to enter display power gating with an option to + * save-restore settings for the specified displayId. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the state + * needs to be saved-restored during exit of Display Power Gating. + * The display ID must be a dfp display as determined with the + * NV0073_CTRL_CMD_SPECIFIC_GET_TYPE command. If more than one + * displayId bit is set or the displayId is not a dfp, this call will + * return NV_ERR_INVALID_ARGUMENT. For the case where no save-restore + * is needed, displayId should be set to 0xFFFFFFFF. + * flags + * This parameter specifies special request from client for RM(for future use) + * diagnosticData + * This parameter provides diagnostic information about display power + * gating entry sequence + * + * Possible status values returned are: + * NV_OK - If Display Power Gating Entry was successful + * NV_ERR_GENERIC - If Display Power Gating Entry failed + * NV_ERR_INVALID_ARGUMENT - If incorrect parameters are sent + */ +#define NV0073_CTRL_CMD_DFP_ENTER_DISPLAY_POWER_GATING (0x731174U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DFP_ENTER_DISPLAY_POWER_GATING_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DFP_ENTER_DISPLAY_POWER_GATING_PARAMS_MESSAGE_ID (0x74U) + +typedef struct NV0073_CTRL_CMD_DFP_ENTER_DISPLAY_POWER_GATING_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flag; + NV0073_CTRL_DFP_ENTER_DISPLAY_POWER_GATING_DIAGNOSTIC_DATA diagnosticData; +} NV0073_CTRL_CMD_DFP_ENTER_DISPLAY_POWER_GATING_PARAMS; + +#define NV0073_CTRL_DFP_ENTER_DISPLAY_POWER_GATING_FLAGS_RESERVED 31:0 +#define NV0073_CTRL_DFP_ENTER_DISPLAY_POWER_GATING_FLAGS_RESERVED_INIT (0x00000000U) + +/* + * NV0073_CTRL_DFP_EXIT_DISPLAY_POWER_GATING_DIAGNOSTIC_DATA + * + * This structure describes diagnostic information about display power + * gating exit sequence + * + * totalRmExitLatencyUs + * Duration in microseconds that RM took to service 'Exit + * Display Power Gating' command. This includes time for all + * steps that RM performs as part of display power gating exit + * sequence including the below parameters. + * riscvBootupLatencyUs + * Duration in microseconds that LTM RISCV took to bootup. + * carExitApiLatencyUs + * Duration in microseconds that CAR (Clock and Reset) block took + * to service 'Exit Display Power Gating' command + * + */ +typedef struct NV0073_CTRL_DFP_EXIT_DISPLAY_POWER_GATING_DIAGNOSTIC_DATA { + NvU32 totalRmExitLatencyUs; + NvU32 riscvBootupLatencyUs; + NvU32 carExitApiLatencyUs; +} NV0073_CTRL_DFP_EXIT_DISPLAY_POWER_GATING_DIAGNOSTIC_DATA; + +/* + * NV0073_CTRL_CMD_DFP_EXIT_DISPLAY_POWER_GATING + * + * This command can be used to exit display power gating. If preceding + * NV0073_CTRL_CMD_DFP_ENTER_DISPLAY_POWER_GATING command requested for + * save-restore of settings for a particular displayId then this command + * will restore settings for that displayId. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * flags + * This parameter specifies special request from client for RM(for future use) + * diagnosticData + * This parameter provides diagnostic information about display power + * gating exit sequence + * + * Possible status values returned are: + * NV_OK - When Display Power Gating Exit was successful + * NV_ERR_GENERIC - When Display Power Gating Exit failed + * NV_ERR_INVALID_ARGUMENT - When incorrect parameters are sent + */ + +#define NV0073_CTRL_CMD_DFP_EXIT_DISPLAY_POWER_GATING (0x731175U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DFP_EXIT_DISPLAY_POWER_GATING_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DFP_EXIT_DISPLAY_POWER_GATING_PARAMS_MESSAGE_ID (0x75U) + +typedef struct NV0073_CTRL_CMD_DFP_EXIT_DISPLAY_POWER_GATING_PARAMS { + NvU32 subDeviceInstance; + NvU32 flag; + NV0073_CTRL_DFP_EXIT_DISPLAY_POWER_GATING_DIAGNOSTIC_DATA diagnosticData; +} NV0073_CTRL_CMD_DFP_EXIT_DISPLAY_POWER_GATING_PARAMS; + +#define NV0073_CTRL_DFP_EXIT_DISPLAY_POWER_GATING_FLAGS_RESERVED 31:0 +#define NV0073_CTRL_DFP_EXIT_DISPLAY_POWER_GATING_FLAGS_RESERVED_INIT (0x00000000U) + +/* + * NV0073_CTRL_CMD_DFP_EDP_DRIVER_UNLOAD + * + * This command is called when we want to inform RM of driver + * unload. + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation + * should be directed. + * displayId (in) + * This parameter inputs the displayId of the active display. A value + * of zero indicates no display is active. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_DFP_EDP_DRIVER_UNLOAD (0x731176U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_EDP_DRIVER_UNLOAD_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_EDP_DRIVER_UNLOAD_PARAMS_MESSAGE_ID (0x76U) + +typedef struct NV0073_CTRL_DFP_EDP_DRIVER_UNLOAD_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; +} NV0073_CTRL_DFP_EDP_DRIVER_UNLOAD_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_SET_REGION_RAM_RECTANGLES + * + * @brief + * This command can be used to program the Rectangle regions info into + * Region RAM. These Rectangle regions are then used as regions of interest + * for Tell Tale and Frozen Frame detection. + * + * Some NVDisplay hardware has an internal RAM to support TellTale(TT) + * and Frozen Frame(FF) features. Each entry in this RAM is simply defined + * as a rectangle (x/y position, width and height). RM will load the region + * RAM with rectangle entries using PDISP registers. + * + * Several new Core/Window methods have been added. These methods can be + * programmed to specify which rectangle resources would be enabled and inform + * hw to start using for TT/FF checking. There are also methods that can be used + * to configure the manner of checking (e.g., for frozen frame detection, how + * many regions need to be frozen for how many frames before it's considered as + * a fault). As part of these methods, need to specify the index of the region + * RAM entry (rectangle) that needs to be checked. This ID is already specified + * for each rectangle as part of the info that was programmed by RM to Region RAM. + * + * The rectangles loaded onto Region RAM are not specifically tied to the current + * mode, and do not have to be coupled with only one single mode. Based on the + * current raster size, it is expected that the RM clients would choose the + * Rectangles that are within the raster size. Once TT/FF checking is enabled, + * Display HW continuously checks and will raise an interrupt event if detects an + * error case. If a rectangle resource is chosen that "doesn't fit" the current + * raster size, overlaps with another rectangle resource, etc, then HW will + * generate an exception for these invalid states. + * + * @params + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * numRectangles + * This parameter specifies the number of rectangles whose region info + * has been passed as part of this control call + * rectanglesInfo::rectangleID + * This parameter provides the ID of the rectangle which will be used + * to identify the rectangle during methods programming for TellTale + * or Frozen Frame detection. + * rectanglesInfo::xCoordinate + * This parameter specifies the x-coordinate of the top left corner of + * the rectangle in the viewport. + * rectanglesInfo::yCoordinate + * This parameter specifies the y-coordinate of the top left corner of + * the rectangle in the viewport. + * rectanglesInfo::width + * This parameter specifies the width of the rectangle. + * rectanglesInfo::height + * This parameter specifies the height of the rectangle. + * + * Possible status values returned are: + * NV_OK - Upon successfully programming Rectangles info to Region RAM + * NV_ERR_INVALID_ARGUMENT - When incorrect values are passed in arguments + */ +#define NV0073_CTRL_CMD_SYSTEM_SET_REGION_RAM_RECTANGLES (0x731177U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SYSTEM_SET_REGION_RAM_RECTANGLES_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SYSTEM_MAX_REGION_RAM_RECTANGLES 16U + +#define NV0073_CTRL_CMD_SYSTEM_SET_REGION_RAM_RECTANGLES_PARAMS_MESSAGE_ID (0x77U) + +typedef struct NV0073_CTRL_CMD_SYSTEM_SET_REGION_RAM_RECTANGLES_PARAMS { + NvU32 subDeviceInstance; + NvU8 numRectangles; + + struct { + NvU8 rectangleID; + NvU16 xCoordinate; + NvU16 yCoordinate; + NvU16 width; + NvU16 height; + } rectanglesInfo[NV0073_CTRL_CMD_SYSTEM_MAX_REGION_RAM_RECTANGLES]; +} NV0073_CTRL_CMD_SYSTEM_SET_REGION_RAM_RECTANGLES_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_CONFIGURE_SAFETY_INTERRUPTS + * + * This command can be used to set the interrupt handling mechanism (One-time + * or Continuous) of TellTale and FrozenFrame/Overlap events. Also, this command + * can be used to Enable/Disable the safety interrupts. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * tellTaleEvents + * The 'mode' variable holds the interrupt configuration values for + * TellTale events. Clients can specify whether the interrupt needs to + * be Disabled or Enabled Continuously or Enabled Only Once using the + * NV0073_CTRL_CMD_SYSTEM_SAFETY_INTERRUPT_MODE_* macros. ENABLE_ONLY_ONCE + * helps avoid interrupt storm by disabling the interrupt after the first + * event since Safety Interrupts are generated per frame. + * The 'specified' field should be used to specify if the 'mode' value at + * that index should be programmed as part of the control call handling + * function. This helps if Clients don't want to update the TellTale + * interrupt configuration of a particular tile in this instance of the + * control call invocation. If it is set to NV_TRUE, the 'mode' value + * will be programmed. If NV_FALSE, 'mode' value will not be programmed. + * frozenFrameEvents + * The 'mode' variable holds the interrupt configuration values for + * FrozenFrame events. Clients can specify whether the interrupt needs to + * be Disabled or Enabled Continuously or Enabled Only Once using the + * NV0073_CTRL_CMD_SYSTEM_SAFETY_INTERRUPT_MODE_* macros. ENABLE_ONLY_ONCE + * helps avoid interrupt storm by disabling the interrupt after the first + * event since Safety Interrupts are generated per frame. + * The 'specified' field should be used to specify if the 'mode' value at + * that index should be programmed as part of the control call handling + * function. This helps if Clients don't want to update the Frozen Frame + * interrupt configuration of a particular head in this instance of the + * control call invocation. If it is set to NV_TRUE, the 'mode' value + * will be programmed. If NV_FALSE, 'mode' value will not be programmed. + * overlapEvents + * The 'mode' variable holds the interrupt configuration values for + * Overlap events. Clients can specify whether the interrupt needs to + * be Disabled or Enabled Continuously or Enabled Only Once using the + * NV0073_CTRL_CMD_SYSTEM_SAFETY_INTERRUPT_MODE_* macros. ENABLE_ONLY_ONCE + * helps avoid interrupt storm by disabling the interrupt after the first + * event since Safety Interrupts are generated per frame. + * The 'specified' field should be used to specify if the 'mode' value at + * that index should be programmed as part of the control call handling + * function. This helps if Clients don't want to update the Overlap + * interrupt configuration of a particular tile in this instance of the + * control call invocation. If it is set to NV_TRUE, the 'mode' value + * will be programmed. If NV_FALSE, 'mode' value will not be programmed. + */ +/* + * NOTE: Though we have created tellTaleEvents, frozenFrameEvents and overlapEvents structs as + * arrays holding interrupt 'mode' for each Head/Tile, RM currently does not support + * configuring these interrupts per Head/Tile. This support is planned to be added in RM + * sometime later, but having the structures per Head/Tile helps in future-proofing the + * control call interface. + * Expectation from the clients is to set the same 'mode' value at all the indexes (for + * all Heads/Tiles). This specified 'mode' value will be globally applied for all + Heads/Tiles for now. + */ +#define NV0073_CTRL_CMD_SYSTEM_CONFIGURE_SAFETY_INTERRUPTS (0x731178U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SYSTEM_CONFIGURE_SAFETY_INTERRUPTS_PARAMS_MESSAGE_ID" */ + +/* + * Interrupt configuration values for Safety events + */ +#define NV0073_CTRL_CMD_SYSTEM_SAFETY_INTERRUPT_MODE_DISABLE 0U +#define NV0073_CTRL_CMD_SYSTEM_SAFETY_INTERRUPT_MODE_ENABLE_CONTINUOUS 1U +#define NV0073_CTRL_CMD_SYSTEM_SAFETY_INTERRUPT_MODE_ENABLE_ONLY_ONCE 2U +#define NV0073_CTRL_CMD_SYSTEM_SAFETY_INTERRUPT_MODE_RESERVED 3U + +/* + * Head and Tile count used to specify Safety interrupt configuration for each head/tile. + */ +#define NV0073_CTRL_CMD_SYSTEM_MAX_SAFETY_HEAD_COUNT 8U +#define NV0073_CTRL_CMD_SYSTEM_MAX_SAFETY_TILE_COUNT 8U + +#define NV0073_CTRL_CMD_SYSTEM_CONFIGURE_SAFETY_INTERRUPTS_PARAMS_MESSAGE_ID (0x78U) + +typedef struct NV0073_CTRL_CMD_SYSTEM_CONFIGURE_SAFETY_INTERRUPTS_PARAMS { + NvU32 subDeviceInstance; + + struct { + NvU8 mode; + NvBool specified; + } tellTaleEvents[NV0073_CTRL_CMD_SYSTEM_MAX_SAFETY_TILE_COUNT]; + + struct { + NvU8 mode; + NvBool specified; + } frozenFrameEvents[NV0073_CTRL_CMD_SYSTEM_MAX_SAFETY_HEAD_COUNT]; + + struct { + NvU8 mode; + NvBool specified; + } overlapEvents[NV0073_CTRL_CMD_SYSTEM_MAX_SAFETY_TILE_COUNT]; +} NV0073_CTRL_CMD_SYSTEM_CONFIGURE_SAFETY_INTERRUPTS_PARAMS; + +/* + * NV0073_CTRL_CMD_DFP_SET_FORCE_BLACK_PIXELS + * + * This command is used to force black pixels from postcomp. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * + * bForceBlackPixels + * To enable or disable black pixel generation. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV0073_CTRL_CMD_DFP_SET_FORCE_BLACK_PIXELS (0x731179U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_FORCE_BLACK_PIXELS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_SET_FORCE_BLACK_PIXELS_PARAMS_MESSAGE_ID (0x79U) + +typedef struct NV0073_CTRL_DFP_SET_FORCE_BLACK_PIXELS_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvBool bForceBlack; +} NV0073_CTRL_DFP_SET_FORCE_BLACK_PIXELS_PARAMS; + +/* _ctrl0073dfp_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h new file mode 100644 index 0000000..70b2206 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h @@ -0,0 +1,3045 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073dp.finn +// + +#include "ctrl/ctrl0073/ctrl0073base.h" +#include "ctrl/ctrl0073/ctrl0073common.h" + +#include "nvcfg_sdk.h" + +/* NV04_DISPLAY_COMMON dfp-display-specific control commands and parameters */ + +/* + * NV0073_CTRL_CMD_DP_AUXCH_CTRL + * + * This command can be used to perform an aux channel transaction to the + * displayPort receiver. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the dfp + * caps should be returned. The display ID must a dfp display. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * bAddrOnly + * If set to NV_TRUE, this parameter prompts an address-only + * i2c-over-AUX transaction to be issued, if supported. Else the + * call fails with NVOS_STATUS_ERR_NOT_SUPPORTED. The size parameter is + * expected to be 0 for address-only transactions. + * cmd + * This parameter is an input to this command. The cmd parameter follows + * Section 2.4 AUX channel syntax in the DisplayPort spec. + * Here are the current defined fields: + * NV0073_CTRL_DP_AUXCH_CMD_TYPE + * This specifies the request command transaction + * NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C + * Set this value to indicate a I2C transaction. + * NV0073_CTRL_DP_AUXCH_CMD_TYPE_AUX + * Set this value to indicate a DisplayPort transaction. + * NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT + * This field is dependent on NV0073_CTRL_DP_AUXCH_CMD_TYPE. + * It is only valid if NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C + * is specified above and indicates a middle of transaction. + * In the case of AUX, this field should be set to zero. The valid + * values are: + * NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_FALSE + * The I2C transaction is not in the middle of a transaction. + * NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_TRUE + * The I2C transaction is in the middle of a transaction. + * NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE + * The request type specifies if we are doing a read/write or write + * status request: + * NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_READ + * An I2C or AUX channel read is requested. + * NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE + * An I2C or AUX channel write is requested. + * NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE_STATUS + * An I2C write status request desired. This value should + * not be set in the case of an AUX CH request and only applies + * to I2C write transaction command. + * addr + * This parameter is an input to this command. The addr parameter follows + * Section 2.4 in DisplayPort spec and the client should refer to the valid + * address in DisplayPort spec. Only the first 20 bits are valid. + * data[] + * In the case of a read transaction, this parameter returns the data from + * transaction request. In the case of a write transaction, the client + * should write to this buffer for the data to send. The max # of bytes + * allowed is NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE. + * size + * Specifies how many data bytes to read/write depending on the transaction type. + * The input size value should be indexed from 0. That means if you want to read + * 1 byte -> size = 0, 2 bytes -> size = 1, 3 bytes -> size = 2, up to 16 bytes + * where size = 15. On return, this parameter returns total number of data bytes + * successfully read/written from/to the transaction (indexed from 1). That is, + * if you successfully requested 1 byte, you would send down size = 0. On return, + * you should expect size = 1 if all 1 byte were successfully read. (Note that + * it is valid for a display to reply with fewer than the requested number of + * bytes; in that case, it is up to the client to make a new request for the + * remaining bytes.) + * replyType + * This parameter is an output to this command. It returns the auxChannel + * status after the end of the aux Ch transaction. The valid values are + * based on the DisplayPort spec: + * NV0073_CTRL_DP_AUXCH_REPLYTYPE_ACK + * In the case of a write, + * AUX: write transaction completed and all data bytes written. + * I2C: return size bytes has been written to i2c slave. + * In the case of a read, return of ACK indicates ready to reply + * another read request. + * NV0073_CTRL_DP_AUXCH_REPLYTYPE_NACK + * In the case of a write, first return size bytes have been written. + * In the case of a read, implies that does not have requested data + * for the read request transaction. + * NV0073_CTRL_DP_AUXCH_REPLYTYPE_DEFER + * Not ready for the write/read request and client should retry later. + * NV0073_CTRL_DP_DISPLAYPORT_AUXCH_REPLYTYPE_I2CNACK + * Applies to I2C transactions only. For I2C write transaction: + * has written the first return size bytes to I2C slave before getting + * NACK. For a read I2C transaction, the I2C slave has NACKED the I2C + * address. + * NV0073_CTRL_DP_AUXCH_REPLYTYPE_I2CDEFER + * Applicable to I2C transactions. For I2C write and read + * transactions, I2C slave has yet to ACK or NACK the I2C transaction. + * NV0073_CTRL_DP_AUXCH_REPLYTYPE_TIMEOUT + * The receiver did not respond within the timeout period defined in + * the DisplayPort 1.1a specification. + * retryTimeMs + * This parameter is an output to this command. In case of + * NVOS_STATUS_ERROR_RETRY return status, this parameter returns the time + * duration in milli-seconds after which client should retry this command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NVOS_STATUS_ERROR_RETRY + */ +#define NV0073_CTRL_CMD_DP_AUXCH_CTRL (0x731341U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_AUXCH_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE 16U +#define NV0073_CTRL_DP_AUXCH_CTRL_PARAMS_MESSAGE_ID (0x41U) + +typedef struct NV0073_CTRL_DP_AUXCH_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bAddrOnly; + NvU32 cmd; + NvU32 addr; + NvU8 data[NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE]; + NvU32 size; + NvU32 replyType; + NvU32 retryTimeMs; +} NV0073_CTRL_DP_AUXCH_CTRL_PARAMS; + +#define NV0073_CTRL_DP_AUXCH_CMD_TYPE 3:3 +#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C (0x00000000U) +#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_AUX (0x00000001U) +#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT 2:2 +#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_FALSE (0x00000000U) +#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_TRUE (0x00000001U) +#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE 1:0 +#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE (0x00000000U) +#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_READ (0x00000001U) +#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE_STATUS (0x00000002U) + +#define NV0073_CTRL_DP_AUXCH_ADDR 20:0 + +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE 3:0 +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE_ACK (0x00000000U) +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE_NACK (0x00000001U) +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE_DEFER (0x00000002U) +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE_TIMEOUT (0x00000003U) +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE_I2CNACK (0x00000004U) +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE_I2CDEFER (0x00000008U) + +//This is not the register field, this is software failure case when we +//have invalid argument +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE_INVALID_ARGUMENT (0xffffffffU) + + + +/* + * NV0073_CTRL_CMD_DP_CTRL + * + * This command is used to set various displayPort configurations for + * the specified displayId such a lane count and link bandwidth. It + * is assumed that link training has already occurred. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the dfp + * caps should be returned. The display ID must a dfp display. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * cmd + * This parameter is an input to this command. + * Here are the current defined fields: + * NV0073_CTRL_DP_CMD_SET_LANE_COUNT + * Set to specify the number of displayPort lanes to configure. + * NV0073_CTRL_DP_CMD_SET_LANE_COUNT_FALSE + * No request to set the displayport lane count. + * NV0073_CTRL_DP_CMD_SET_LANE_COUNT_TRUE + * Set this value to indicate displayport lane count change. + * NV0073_CTRL_DP_CMD_SET_LINK_BANDWIDTH + * Set to specify a request to change the link bandwidth. + * NV0073_CTRL_DP_CMD_SET_LINK_BANDWIDTH_FALSE + * No request to set the displayport link bandwidth. + * NV0073_CTRL_DP_CMD_SET_LINK_BANDWIDTH_TRUE + * Set this value to indicate displayport link bandwidth change. + * NV0073_CTRL_DP_CMD_SET_LINK_BANDWIDTH + * Set to specify a request to change the link bandwidth. + * NV0073_CTRL_DP_CMD_SET_LINK_BANDWIDTH_FALSE + * No request to set the displayport link bandwidth. + * NV0073_CTRL_DP_CMD_SET_LINK_BANDWIDTH_TRUE + * Set this value to indicate displayport link bandwidth change. + * NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD + * Set to disable downspread during link training. + * NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_FALSE + * Downspread will be enabled. + * NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_TRUE + * Downspread will be disabled (e.g. for compliance testing). + * NV0073_CTRL_DP_CMD_SET_FORMAT_MODE + * This field specifies the DP stream mode. + * NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_SINGLE_STREAM + * This value indicates that single stream mode is specified. + * NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_MULTI_STREAM + * This value indicates that multi stream mode is specified. + * NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING + * Set to do Fast link training (avoid AUX transactions for link + * training). We need to restore all the previous trained link settings + * (e.g. the drive current/preemphasis settings) before doing FLT. + * During FLT, we send training pattern 1 followed by training pattern 2 + * each for a period of 500us. + * NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_NO + * Not a fast link training scenario. + * NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_YES + * Do fast link training. + * NV0073_CTRL_DP_CMD_NO_LINK_TRAINING + * Set to do No link training. We need to restore all the previous + * trained link settings (e.g. the drive current/preemphasis settings) + * before doing NLT, but we don't need to do the Clock Recovery and + * Channel Equalization. (Please refer to NVIDIA PANEL SELFREFRESH + * CONTROLLER SPECIFICATION 3.1.6 for detail flow) + * NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_NO + * Not a no link training scenario. + * NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_YES + * Do no link training. + * NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING + * Specifies whether RM should use the DP Downspread setting specified by + * NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD command regardless of what the Display + * is capable of. This is used along with the Fake link training option so that + * we can configure the GPU to enable/disable spread when a real display is + * not connected. + * NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_FORCE + * RM Always use the DP Downspread setting specified. + * NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_DEFAULT + * RM will enable Downspread only if the display supports it. (default) + * NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING + * Specifies whether RM should skip HW training of the link. + * If this is the case then RM only updates its SW state without actually + * touching any HW registers. Clients should use this ONLY if it has determined - + * a. link is trained and not lost + * b. desired link config is same as current trained link config + * c. link is not in D3 (should be in D0) + * NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_NO + * RM doesn't skip HW LT as the current Link Config is not the same as the + * requested Link Config. + * NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_YES + * RM skips HW LT and only updates its SW state as client has determined that + * the current state of the link and the requested Link Config is the same. + * NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG + * Set if the client does not want link training to happen. + * This should ONLY be used for HW verification. + * NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_FALSE + * This is normal production behaviour which shall perform + * link training or follow the normal procedure for lane count + * reduction. + * NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_TRUE + * Set this value to not perform link config steps, this should + * only be turned on for HW verif testing. If _LINK_BANDWIDTH + * or _LANE_COUNT is set, RM will only write to the TX DP registers + * and perform no link training. + * NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED + * This field specifies if source grants Post Link training Adjustment request or not. + * NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_NO + * Source does not grant Post Link training Adjustment request + * NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_YES + * Source grants Post Link training Adjustment request + * Source wants to link train LT Tunable Repeaters + * NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING + * This field specifies if fake link training is to be done. This will + * program enough of the hardware to avoid any hardware hangs and + * depending upon option chosen by the client, OR will be enabled for + * transmisssion. + * NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_NO + * No Fake LT will be performed + * NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_DONOT_TOGGLE_TRANSMISSION + * SOR will be not powered up during Fake LT + * NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_TOGGLE_TRANSMISSION_ON + * SOR will be powered up during Fake LT + * NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER + * This field specifies if source wants to link train LT Tunable Repeaters or not. + * NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_NO + * Source does not want to link train LT Tunable Repeaters + * NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_YES + * NV0073_CTRL_DP_CMD_BANDWIDTH_TEST + * Set if the client wants to reset the link after the link + * training is done. As a part of uncommtting a DP display. + * NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_NO + * This is for normal operation, if DD decided not to reset the link. + * NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_YES + * This is to reset the link, if DD decided to uncommit the display because + * the link is no more required to be enabled, as in a DP compliance test. + * NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE + * Set if the client does not want link training to happen. + * This should ONLY be used for HW verification if necessary. + * NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_FALSE + * This is normal production behaviour which shall perform + * pre link training checks such as if both rx and tx are capable + * of the requested config for lane and link bw. + * NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE + * Set this value to bypass link config check, this should + * only be turned on for HW verif testing. If _LINK_BANDWIDTH + * or _LANE_COUNT is set, RM will not check TX and DX caps. + * NV0073_CTRL_DP_CMD_FALLBACK_CONFIG + * Set if requested config by client fails and if link if being + * trained for the fallback config. + * NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_FALSE + * This is the normal case when the link is being trained for a requested config. + * NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE + * Set this value in case the link configuration for requested config fails + * and the link is being trained for a fallback config. + * NV0073_CTRL_DP_CMD_ENABLE_FEC + * Specifies whether RM should set NV_DPCD14_FEC_CONFIGURATION_FEC_READY + * before link training if client has determined that FEC is required(for DSC). + * If required to be enabled RM sets FEC enable bit in panel, start link training. + * Enabling/disabling FEC on GPU side is not done during Link training + * and RM Ctrl call NV0073_CTRL_CMD_DP_CONFIGURE_FEC has to be called + * explicitly to enable/disable FEC after LT(including PostLT LQA). + * If enabled, FEC would be disabled while powering down the link. + * Client has to make sure to account for 3% overhead of transmitting + * FEC symbols while calculating DP bandwidth. + * NV0073_CTRL_DP_CMD_ENABLE_FEC_FALSE + * This is the normal case when FEC is not required + * NV0073_CTRL_DP_CMD_ENABLE_FEC_TRUE + * Set this value in case FEC needs to be enabled + * data + * This parameter is an input and output to this command. + * Here are the current defined fields: + * NV0073_CTRL_DP_DATA_SET_LANE_COUNT + * This field specifies the desired setting for lane count. A client + * may choose any lane count as long as it does not exceed the + * capability of DisplayPort receiver as indicated in the + * receiver capability field. The valid values for this field are: + * NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0 + * For zero-lane configurations, link training is shut down. + * NV0073_CTRL_DP_DATA_SET_LANE_COUNT_1 + * For one-lane configurations, lane0 is used. + * NV0073_CTRL_DP_DATA_SET_LANE_COUNT_2 + * For two-lane configurations, lane0 and lane1 is used. + * NV0073_CTRL_DP_DATA_SET_LANE_COUNT_4 + * For four-lane configurations, all lanes are used. + * NV0073_CTRL_DP_DATA_SET_LANE_COUNT_8 + * For devices that supports 8-lane DP. + * On return, the lane count setting is returned which may be + * different from the requested input setting. + * NV0073_CTRL_DP_DATA_SET_LINK_BW + * This field specifies the desired setting for link bandwidth. There + * are only four supported main link bandwidth settings. The + * valid values for this field are: + * NV0073_CTRL_DP_DATA_SET_LINK_BW_1_62GBPS + * NV0073_CTRL_DP_DATA_SET_LINK_BW_2_70GBPS + * NV0073_CTRL_DP_DATA_SET_LINK_BW_5_40GBPS + * NV0073_CTRL_DP_DATA_SET_LINK_BW_8_10GBPS + * On return, the link bandwidth setting is returned which may be + * different from the requested input setting. + * NV0073_CTRL_DP_DATA_TARGET + * This field specifies which physical repeater or sink to be trained. + * Client should make sure + * 1. Physical repeater should be targeted in order, start from the one closest to GPU. + * 2. All physical repeater is properly trained before targets sink. + * The valid values for this field are: + * NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_X + * 'X' denotes physical repeater index. It's a 1-based index to + * reserve 0 for _SINK. + * 'X' can't be more than 8. + * NV0073_CTRL_DP_DATA_TARGET_SINK + * err + * This parameter specifies provides info regarding the outcome + * of this calling control call. If zero, no errors were found. + * Otherwise, this parameter will specify the error detected. + * The valid parameter is broken down as follows: + * NV0073_CTRL_DP_ERR_SET_LANE_COUNT + * If set to _ERR, set lane count failed. + * NV0073_CTRL_DP_ERR_SET_LINK_BANDWIDTH + * If set to _ERR, set link bandwidth failed. + * NV0073_CTRL_DP_ERR_DISABLE_DOWNSPREAD + * If set to _ERR, disable downspread failed. + * NV0073_CTRL_DP_ERR_INVALID_PARAMETER + * If set to _ERR, at least one of the calling functions + * failed due to an invalid parameter. + * NV0073_CTRL_DP_ERR_SET_LINK_TRAINING + * If set to _ERR, link training failed. + * NV0073_CTRL_DP_ERR_TRAIN_PHY_REPEATER + * If set to _ERR, the operation to Link Train repeater is failed. + * NV0073_CTRL_DP_ERR_ENABLE_FEC + * If set to _ERR, the operation to enable FEC is failed. + * NV0073_CTRL_DP_ERR_LINK_STATUS + * If set to _DISCONNECTED, link training failed and link is disconnected / unplugged. + * + * retryTimeMs + * This parameter is an output to this command. In case of + * NVOS_STATUS_ERROR_RETRY return status, this parameter returns the time + * duration in milli-seconds after which client should retry this command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NVOS_STATUS_ERROR_RETRY + */ + +#define NV0073_CTRL_CMD_DP_CTRL (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID (0x43U) + +typedef struct NV0073_CTRL_DP_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 cmd; + NvU32 data; + NvU32 err; + NvU32 retryTimeMs; + NvU32 eightLaneDpcdBaseAddr; +} NV0073_CTRL_DP_CTRL_PARAMS; + +#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT 0:0 +#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_SET_LINK_BW 1:1 +#define NV0073_CTRL_DP_CMD_SET_LINK_BW_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_SET_LINK_BW_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD 2:2 +#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_UNUSED 3:3 +#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE 4:4 +#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_SINGLE_STREAM (0x00000000U) +#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_MULTI_STREAM (0x00000001U) +#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING 5:5 +#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING 6:6 +#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING 7:7 +#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING 8:8 +#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_DEFAULT (0x00000000U) +#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_FORCE (0x00000001U) +#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING 9:9 +#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED 10:10 +#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING 12:11 +#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_DONOT_TOGGLE_TRANSMISSION (0x00000001U) +#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_TOGGLE_TRANSMISSION_ON (0x00000002U) +#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER 13:13 +#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG 14:14 +#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_ENABLE_FEC 15:15 +#define NV0073_CTRL_DP_CMD_ENABLE_FEC_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_ENABLE_FEC_TRUE (0x00000001U) + +#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST 29:29 +#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE 30:30 +#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG 31:31 +#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_TRUE (0x00000001U) + +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT 4:0 +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0 (0x00000000U) +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_1 (0x00000001U) +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_2 (0x00000002U) +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_4 (0x00000004U) +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_8 (0x00000008U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW 15:8 +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_1_62GBPS (0x00000006U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_16GBPS (0x00000008U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_43GBPS (0x00000009U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_70GBPS (0x0000000AU) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_3_24GBPS (0x0000000CU) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_4_32GBPS (0x00000010U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_5_40GBPS (0x00000014U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_8_10GBPS (0x0000001EU) +#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING 18:18 +#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_NO (0x00000000U) +#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_YES (0x00000001U) +#define NV0073_CTRL_DP_DATA_TARGET 22:19 +#define NV0073_CTRL_DP_DATA_TARGET_SINK (0x00000000U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_0 (0x00000001U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_1 (0x00000002U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_2 (0x00000003U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_3 (0x00000004U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_4 (0x00000005U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_5 (0x00000006U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_6 (0x00000007U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_7 (0x00000008U) + +#define NV0073_CTRL_DP_ERR_SET_LANE_COUNT 0:0 +#define NV0073_CTRL_DP_ERR_SET_LANE_COUNT_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_SET_LANE_COUNT_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_SET_LINK_BW 1:1 +#define NV0073_CTRL_DP_ERR_SET_LINK_BW_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_SET_LINK_BW_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_DISABLE_DOWNSPREAD 2:2 +#define NV0073_CTRL_DP_ERR_DISABLE_DOWNSPREAD_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_DISABLE_DOWNSPREAD_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_UNUSED 3:3 +#define NV0073_CTRL_DP_ERR_CLOCK_RECOVERY 4:4 +#define NV0073_CTRL_DP_ERR_CLOCK_RECOVERY_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_CLOCK_RECOVERY_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_CHANNEL_EQUALIZATION 5:5 +#define NV0073_CTRL_DP_ERR_CHANNEL_EQUALIZATION_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_CHANNEL_EQUALIZATION_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_TRAIN_PHY_REPEATER 6:6 +#define NV0073_CTRL_DP_ERR_TRAIN_PHY_REPEATER_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_TRAIN_PHY_REPEATER_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_ENABLE_FEC 7:7 +#define NV0073_CTRL_DP_ERR_ENABLE_FEC_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_ENABLE_FEC_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_CR_DONE_LANE 11:8 +#define NV0073_CTRL_DP_ERR_CR_DONE_LANE_0_LANE (0x00000000U) +#define NV0073_CTRL_DP_ERR_CR_DONE_LANE_1_LANE (0x00000001U) +#define NV0073_CTRL_DP_ERR_CR_DONE_LANE_2_LANE (0x00000002U) +#define NV0073_CTRL_DP_ERR_CR_DONE_LANE_4_LANE (0x00000004U) +#define NV0073_CTRL_DP_ERR_CR_DONE_LANE_8_LANE (0x00000008U) +#define NV0073_CTRL_DP_ERR_EQ_DONE_LANE 15:12 +#define NV0073_CTRL_DP_ERR_EQ_DONE_LANE_0_LANE (0x00000000U) +#define NV0073_CTRL_DP_ERR_EQ_DONE_LANE_1_LANE (0x00000001U) +#define NV0073_CTRL_DP_ERR_EQ_DONE_LANE_2_LANE (0x00000002U) +#define NV0073_CTRL_DP_ERR_EQ_DONE_LANE_4_LANE (0x00000004U) +#define NV0073_CTRL_DP_ERR_EQ_DONE_LANE_8_LANE (0x00000008U) +#define NV0073_CTRL_DP_ERR_LINK_STATUS 29:29 +#define NV0073_CTRL_DP_ERR_LINK_STATUS_CONNECTED (0x00000000U) +#define NV0073_CTRL_DP_ERR_LINK_STATUS_DISCONNECTED (0x00000001U) +#define NV0073_CTRL_DP_ERR_INVALID_PARAMETER 30:30 +#define NV0073_CTRL_DP_ERR_INVALID_PARAMETER_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_INVALID_PARAMETER_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_LINK_TRAINING 31:31 +#define NV0073_CTRL_DP_ERR_LINK_TRAINING_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_LINK_TRAINING_ERR (0x00000001U) + +/* + * NV0073_CTRL_DP_LANE_DATA_PARAMS + * + * This structure provides lane characteristics. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the dfp + * caps should be returned. The display ID must a dfp display. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * numLanes + * Indicates number of lanes for which the data is valid + * data + * This parameter is an input to this command. + * Here are the current defined fields: + * NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS + * This field specifies the preemphasis level set in the lane. + * The valid values for this field are: + * NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_NONE + * No-preemphais for this lane. + * NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL1 + * Preemphasis set to 3.5 dB. + * NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL2 + * Preemphasis set to 6.0 dB. + * NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL3 + * Preemphasis set to 9.5 dB. + * NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT + * This field specifies the drive current set in the lane. + * The valid values for this field are: + * NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL0 + * Drive current level is set to 8 mA + * NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL1 + * Drive current level is set to 12 mA + * NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL2 + * Drive current level is set to 16 mA + * NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL3 + * Drive current level is set to 24 mA + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_MAX_LANES 8U + +typedef struct NV0073_CTRL_DP_LANE_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 numLanes; + NvU32 data[NV0073_CTRL_MAX_LANES]; +} NV0073_CTRL_DP_LANE_DATA_PARAMS; + +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS 1:0 +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_NONE (0x00000000U) +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL1 (0x00000001U) +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL2 (0x00000002U) +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL3 (0x00000003U) +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT 3:2 +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL0 (0x00000000U) +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL1 (0x00000001U) +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL2 (0x00000002U) +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL3 (0x00000003U) + +/* + * NV0073_CTRL_CMD_GET_DP_LANE_DATA + * + * This command is used to get the current pre-emphasis and drive current + * level values for the specified number of lanes. + * + * The command takes a NV0073_CTRL_DP_LANE_DATA_PARAMS structure as the + * argument with the appropriate subDeviceInstance and displayId filled. + * The arguments of this structure and the format of preemphasis and drive- + * current levels are described above. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + * NOTE: This control call is only for testing purposes and + * should not be used in normal DP operations. Preemphais + * and drive current level will be set during Link training + * in normal DP operations + * + */ + +#define NV0073_CTRL_CMD_DP_GET_LANE_DATA (0x731345U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_GET_LANE_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_GET_LANE_DATA_PARAMS_MESSAGE_ID (0x45U) + +typedef NV0073_CTRL_DP_LANE_DATA_PARAMS NV0073_CTRL_DP_GET_LANE_DATA_PARAMS; + + +/* + * NV0073_CTRL_CMD_SET_DP_LANE_DATA + * + * This command is used to set the pre-emphasis and drive current + * level values for the specified number of lanes. + * + * The command takes a NV0073_CTRL_DP_LANE_DATA_PARAMS structure as the + * argument with the appropriate subDeviceInstance, displayId, number of + * lanes, preemphasis and drive current values filled in. + * The arguments of this structure and the format of preemphasis and drive- + * current levels are described above. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + * NOTE: This control call is only for testing purposes and + * should not be used in normal DP operations. Preemphais + * and drivecurrent will be set during Link training in + * normal DP operations + * + */ + +#define NV0073_CTRL_CMD_DP_SET_LANE_DATA (0x731346U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_LANE_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_SET_LANE_DATA_PARAMS_MESSAGE_ID (0x46U) + +typedef NV0073_CTRL_DP_LANE_DATA_PARAMS NV0073_CTRL_DP_SET_LANE_DATA_PARAMS; + +#define NV0073_CTRL_CSTM_BUFFER_SIZE 9U + +/* + * NV0073_CTRL_DP_CSTM + * + * This structure specifies - + * A) 80 bit DP CSTM Test Pattern data for DP1.x (HBR2 + 8b/10b channel coding) + * The fields of this structure are to be specified as follows: + * field_31_0 takes bits 31:0 + * field_63_32 takes bits 63:32 + * field_95_64 takes bits 79:64 + * B) 264 bit DP CSTM Test Pattern data for DP2.x (128b/132b channel coding) + * The fields of this structure are to be specified as follows: + * field_31_0 contains bits 31:0 + * field_63_32 contains bits 63:32 + * field_95_64 contains bits 95:64 + * field_127_96 contains bits 127:96 + * field_159_128 contains bits 159:128 + * field_191_160 contains bits 191:160 + * field_223_192 contains bits 223:192 + * field_255_224 contains bits 255:224 + * field_263_256 contains bits 263:256 + */ +typedef struct NV0073_CTRL_DP_CSTM { + NvU32 field_31_0; + NvU32 field_63_32; + NvU32 field_95_64; + NvU32 field_127_96; + NvU32 field_159_128; + NvU32 field_191_160; + NvU32 field_223_192; + NvU32 field_255_224; + NvU32 field_263_256; +} NV0073_CTRL_DP_CSTM; + +#define NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS_CSTM2 15:0 +#define NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS_CSTM8 7:0 + +/* + * NV0073_CTRL_DP_TESTPATTERN + * + * This structure specifies the possible test patterns available in display port, + * and parameters for Square pattern. + * + */ + +typedef struct NV0073_CTRL_DP_TESTPATTERN { + NvU32 testPattern; +} NV0073_CTRL_DP_TESTPATTERN; +#define NV0073_CTRL_DP_TESTPATTERN_DATA 4:0 +#define NV0073_CTRL_DP_TESTPATTERN_DATA_NONE (0x00000000U) +#define NV0073_CTRL_DP_TESTPATTERN_DATA_D10_2 (0x00000001U) +#define NV0073_CTRL_DP_TESTPATTERN_DATA_SERMP (0x00000002U) +#define NV0073_CTRL_DP_TESTPATTERN_DATA_PRBS_7 (0x00000003U) +#define NV0073_CTRL_DP_TESTPATTERN_DATA_CSTM (0x00000004U) +#define NV0073_CTRL_DP_TESTPATTERN_DATA_HBR2COMPLIANCE (0x00000005U) +#define NV0073_CTRL_DP_TESTPATTERN_DATA_CP2520PAT3 (0x00000006U) +#define NV0073_CTRL_DP_TESTPATTERN_DATA_TRAINING1 (0x00000007U) +#define NV0073_CTRL_DP_TESTPATTERN_DATA_TRAINING2 (0x00000008U) +#define NV0073_CTRL_DP_TESTPATTERN_DATA_TRAINING3 (0x00000009U) +#define NV0073_CTRL_DP_TESTPATTERN_DATA_TRAINING4 (0x0000000AU) +#define NV0073_CTRL_DP_TESTPATTERN_DATA_CP2520PAT1 (0x0000000BU) + + + +/* + * NV0073_CTRL_CMD_DP_SET_TESTPATTERN + * + * This command forces the main link to output the selected test patterns + * supported in DP specs. + * + * The command takes a NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS structure as the + * argument with the appropriate subDeviceInstance, displayId and test pattern + * to be set as inputs. + * The arguments of this structure and the format of test patterns are + * described above. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the dfp + * caps should be returned. The display ID must a dfp display. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * testPattern + * This parameter is of type NV0073_CTRL_DP_TESTPATTERN and specifies + * the testpattern to set on displayport. The format of this structure + * is described above. + * laneMask + * This parameter specifies the bit mask of DP lanes on which test + * pattern is to be applied. + * cstm + * This parameter specifies the all the bits for CSTM test pattern. + * bIsHBR2 + * This Boolean parameter is set to TRUE if HBR2 compliance test is + * being performed. + * bSkipLaneDataOverride + * skip override of pre-emp and drive current + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + * NOTE: This control call is only for testing purposes and + * should not be used in normal DP operations. Preemphais + * and drivecurrent will be set during Link training in + * normal DP operations + * + */ + +#define NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS_MESSAGE_ID (0x47U) + +typedef struct NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NV0073_CTRL_DP_TESTPATTERN testPattern; + NvU8 laneMask; + NV0073_CTRL_DP_CSTM cstm; + NvBool bIsHBR2; + NvBool bSkipLaneDataOverride; +} NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS; + +#define NV0073_CTRL_CMD_DP_SET_TESTPATTERN (0x731347U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS_MESSAGE_ID" */ + +/* + * NV0073_CTRL_CMD_GET_DP_TESTPATTERN + * + * This command returns the current test pattern set on the main link of + * Display Port. + * + * The command takes a NV0073_CTRL_DP_GET_TESTPATTERN_PARAMS structure as the + * argument with the appropriate subDeviceInstance, displayId as inputs and + * returns the current test pattern in testPattern field of the structure. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the dfp + * caps should be returned. The display ID must a dfp display. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * testPattern + * This parameter is of type NV0073_CTRL_DP_TESTPATTERN and specifies the + * testpattern set on displayport. The format of this structure is + * described above. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + * NOTE: This control call is only for testing purposes and + * should not be used in normal DP operations. + * + */ + +#define NV0073_CTRL_DP_GET_TESTPATTERN_PARAMS_MESSAGE_ID (0x48U) + +typedef struct NV0073_CTRL_DP_GET_TESTPATTERN_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NV0073_CTRL_DP_TESTPATTERN testPattern; + NV0073_CTRL_DP_CSTM cstm; +} NV0073_CTRL_DP_GET_TESTPATTERN_PARAMS; + +#define NV0073_CTRL_CMD_DP_GET_TESTPATTERN (0x731348U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_GET_TESTPATTERN_PARAMS_MESSAGE_ID" */ + +/* + * NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA + * + * This structure specifies the Pre-emphasis/Drive Current/Postcursor2/TxPu information + * for a display port device. These are the the current values that RM is + * using to map the levels for Pre-emphasis and Drive Current for Link + * Training. + * preEmphasis + * This field specifies the preemphasis values. + * driveCurrent + * This field specifies the driveCurrent values. + * postcursor2 + * This field specifies the postcursor2 values. + * TxPu + * This field specifies the pull-up current source drive values. + */ +#define NV0073_CTRL_MAX_DRIVECURRENT_LEVELS 4U +#define NV0073_CTRL_MAX_PREEMPHASIS_LEVELS 4U +#define NV0073_CTRL_MAX_POSTCURSOR2_LEVELS 4U + +typedef struct NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_1 { + NvU32 preEmphasis; + NvU32 driveCurrent; + NvU32 postCursor2; + NvU32 TxPu; +} NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_1; + +typedef NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_1 NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_SLICE1[NV0073_CTRL_MAX_PREEMPHASIS_LEVELS]; + +typedef NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_SLICE1 NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_SLICE2[NV0073_CTRL_MAX_DRIVECURRENT_LEVELS]; + +typedef NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_SLICE2 NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA[NV0073_CTRL_MAX_POSTCURSOR2_LEVELS]; + + +/* + * NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA + * + * This command is used to override the Pre-emphasis/Drive Current/PostCursor2/TxPu + * data in the RM. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the digital display for which the + * data should be returned. The display ID must a digital display. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * dpData + * This parameter is of type NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA + * and specifies the Pre-emphasis/Drive Current/Postcursor2/TxPu information + * for a display port device. + * The command takes a NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS + * structure as the argument with the appropriate subDeviceInstance, displayId, + * and dpData. The fields of this structure are described above. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS_MESSAGE_ID (0x51U) + +typedef struct NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA dpData; +} NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS; + +#define NV0073_CTRL_CMD_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA (0x731351U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS_MESSAGE_ID" */ + +/* + * NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA + * + * This command is used to get the Pre-emphasis/Drive Current/PostCursor2/TxPu data. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the digital display for which the + * data should be returned. The display ID must a digital display. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * The command takes a NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS + * structure as the argument with the appropriate subDeviceInstance, displayId, + * and dpData. The fields of this structure are described above. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS_MESSAGE_ID (0x52U) + +typedef struct NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA dpData; +} NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS; + +#define NV0073_CTRL_CMD_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA (0x731352U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS_MESSAGE_ID" */ + + + +/* + * NV0073_CTRL_CMD_DP_MAIN_LINK_CTRL + * + * This command is used to set various Main Link configurations for + * the specified displayId such as powering up/down Main Link. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the DP display which owns + * the Main Link to be adjusted. The display ID must a DP display + * as determined with the NV0073_CTRL_CMD_SPECIFIC_GET_TYPE command. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * ctrl + * Here are the current defined fields: + * NV0073_CTRL_DP_MAIN_LINK_CTRL_POWER_STATE_POWERDOWN + * This value will power down Main Link. + * NV0073_CTRL_DP_MAIN_LINK_CTRL_POWER_STATE_POWERUP + * This value will power up Main Link. + * +*/ +#define NV0073_CTRL_CMD_DP_MAIN_LINK_CTRL (0x731356U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS_MESSAGE_ID (0x56U) + +typedef struct NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 ctrl; +} NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS; + +#define NV0073_CTRL_DP_MAIN_LINK_CTRL_POWER_STATE 0:0 +#define NV0073_CTRL_DP_MAIN_LINK_CTRL_POWER_STATE_POWERDOWN (0x00000000U) +#define NV0073_CTRL_DP_MAIN_LINK_CTRL_POWER_STATE_POWERUP (0x00000001U) + +/* + * NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM + * + * This command sets the current audio mute state on the main link of Display Port + * + * The command takes a NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS structure as the + * argument with the appropriate subDeviceInstance, displayId as inputs and whether to enable + * or disable mute in the parameter - mute. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the audio stream + * state should be returned. The display ID must a DP display. + * If the display ID is invalid or if it is not a DP display, + * this call will return NV_ERR_INVALID_ARGUMENT. + * mute + * This parameter is an input to this command. + * Here are the current defined values: + * NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_DISABLE + * Audio mute will be disabled. + * NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_ENABLE + * Audio mute will be enabled. + * NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_AUTO + * Audio mute will be automatically controlled by hardware. + * + * Note: Any other value for mute in NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS is not allowed and + * the API will return an error. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + * + */ +#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID (0x59U) + +typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 mute; +} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS; + +#define NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_DISABLE (0x00000000U) +#define NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_ENABLE (0x00000001U) +#define NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_AUTO (0x00000002U) + +/* + * NV0073_CTRL_CMD_DP_ASSR_CTRL + * + * This command is used to control and query DisplayPort ASSR + * settings for the specified displayId. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the DP display which owns + * the Main Link to be adjusted. The display ID must a DP display + * as determined with the NV0073_CTRL_CMD_SPECIFIC_GET_TYPE command. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * cmd + * This input parameter specifies the command to execute. Legal + * values for this parameter include: + * NV0073_CTRL_DP_ASSR_CMD_QUERY_STATE + * This field can be used to query ASSR state. When used the ASSR + * state value is returned in the data parameter. + * NV0073_CTRL_DP_ASSR_CMD_DISABLE + * This field can be used to control the ASSR disable state. + * NV0073_CTRL_DP_ASSR_CMD_FORCE_STATE + * This field can be used to control ASSR State without looking at + * whether the display supports it. Used in conjunction with + * Fake link training. Note that this updates the state on the + * source side only. The sink is assumed to be configured for ASSR + * by the client (DD). + * data + * This parameter specifies the data associated with the cmd + * parameter. + * NV0073_CTRL_DP_ASSR_DATA_STATE_ENABLED + * This field indicates the state of ASSR when queried using cmd + * parameter. When used to control the State, it indicates whether + * ASSR should be enabled or disabled. + * NV0073_CTRL_DP_ASSR_DATA_STATE_ENABLED_NO + * When queried this flag indicates that ASSR is not enabled on the sink. + * When used as the data for CMD_FORCE_STATE, it requests ASSR to + * to be disabled on the source side. + * NV0073_CTRL_DP_ASSR_DATA_STATE_ENABLED_YES + * When queried this flag indicates that ASSR is not enabled on the sink. + * When used as the data for CMD_FORCE_STATE, it requests ASSR to + * to be enabled on the source side. + * err + * This output parameter specifies any errors associated with the cmd + * parameter. + * NV0073_CTRL_DP_ASSR_ERR_CAP + * This field indicates the error pertaining to ASSR capability of + * the sink device. + * NV0073_CTRL_DP_ASSR_ERR_CAP_NOERR + * This flag indicates there is no error. + * NV0073_CTRL_DP_ASSR_ERR_CAP_ERR + * This flag indicates that the sink is not ASSR capable. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0073_CTRL_CMD_DP_ASSR_CTRL (0x73135aU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_ASSR_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_ASSR_CTRL_PARAMS_MESSAGE_ID (0x5AU) + +typedef struct NV0073_CTRL_DP_ASSR_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 cmd; + NvU32 data; + NvU32 err; +} NV0073_CTRL_DP_ASSR_CTRL_PARAMS; + +#define NV0073_CTRL_DP_ASSR_CMD 31:0 +#define NV0073_CTRL_DP_ASSR_CMD_QUERY_STATE (0x00000001U) +#define NV0073_CTRL_DP_ASSR_CMD_DISABLE (0x00000002U) +#define NV0073_CTRL_DP_ASSR_CMD_FORCE_STATE (0x00000003U) +#define NV0073_CTRL_DP_ASSR_CMD_ENABLE (0x00000004U) +#define NV0073_CTRL_DP_ASSR_DATA_STATE_ENABLED 0:0 +#define NV0073_CTRL_DP_ASSR_DATA_STATE_ENABLED_NO (0x00000000U) +#define NV0073_CTRL_DP_ASSR_DATA_STATE_ENABLED_YES (0x00000001U) +#define NV0073_CTRL_DP_ASSR_ERR_CAP 0:0 +#define NV0073_CTRL_DP_ASSR_ERR_CAP_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ASSR_ERR_CAP_ERR (0x00000001U) + +/* + * NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID + * + * This command is used to assign a displayId from the free pool + * to a specific AUX Address in a DP 1.2 topology. The topology + * is uniquely identified by the DisplayId of the DP connector. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This is the DisplayId of the DP connector to which the topology + * is rooted. + * preferredDisplayId + * Client can sent a preferredDisplayID which RM can use during allocation + * if available. If this Id is a part of allDisplayMask in RM then we return + * a free available Id to the client. However, if this is set to + * NV0073_CTRL_CMD_DP_INVALID_PREFERRED_DISPLAY_ID then we return allDisplayMask value. + * useBFM + * Set to true if DP-BFM is used during emulation/RTL Sim. + * + * [out] displayIdAssigned + * This is the out field that will receive the new displayId. If the + * function fails this is guaranteed to be 0. + * [out] allDisplayMask + * This is allDisplayMask RM variable which is returned only when + * preferredDisplayId is set to NV0073_CTRL_CMD_DP_INVALID_PREFERRED_DISPLAY_ID + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID (0x73135bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS_MESSAGE_ID" */ + +/* + * There cannot be more than 128 devices in a topology (also by DP 1.2 specification) + * NOTE: Temporarily lowered to pass XAPI RM tests. Should be reevaluated! + */ +#define NV0073_CTRL_CMD_DP_MAX_TOPOLOGY_NODES 120U +#define NV0073_CTRL_CMD_DP_INVALID_PREFERRED_DISPLAY_ID 0xffffffffU + +#define NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS_MESSAGE_ID (0x5BU) + +typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 preferredDisplayId; + + NvBool force; + NvBool useBFM; + + NvU32 displayIdAssigned; + NvU32 allDisplayMask; +} NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID + * + * This command is used to return a multistream displayid to the unused pool. + * You must not call this function while either the ARM or ASSEMBLY state cache + * refers to this display-id. The head must not be attached. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This is the displayId to free. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + * + */ +#define NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID (0x73135cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS_MESSAGE_ID (0x5CU) + +typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; +} NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_GET_LINK_CONFIG + * + * This command is used to query DisplayPort link config + * settings on the transmitter side. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the DP display which owns + * the Main Link to be queried. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * laneCount + * Number of lanes the DP transmitter hardware is set up to drive. + * linkBW + * The BW of each lane that the DP transmitter hardware is set up to drive. + * The values returned will be according to the DP specifications. + * dp2LinkBW + * Current BW of each lane that the DP transmitter hardware is set up to drive is UHBR. + * The values returned will be using 10M convention. + * + * Note: + * linkBW and dp2LinkBw are mutual exclusive. Only one of the value will be non-zero. + * + */ +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG (0x731360U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS_MESSAGE_ID (0x60U) + +typedef struct NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 laneCount; + NvU32 linkBW; + NvU32 dp2LinkBW; + NvBool bFECEnabled; +} NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS; + +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT 3:0 +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_0 (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_1 (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_2 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_4 (0x00000004U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LINK_BW 3:0 +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LINK_BW_1_62GBPS (0x00000006U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LINK_BW_2_70GBPS (0x0000000aU) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LINK_BW_5_40GBPS (0x00000014U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LINK_BW_8_10GBPS (0x0000001EU) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LINK_BW_2_16GBPS (0x00000008U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LINK_BW_2_43GBPS (0x00000009U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LINK_BW_3_24GBPS (0x0000000CU) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LINK_BW_4_32GBPS (0x00000010U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LINK_BW_6_75GBPS (0x00000019U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_DP2LINK_BW 15:0 +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_DP2LINK_BW_1_62GBPS (0x000000A2U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_DP2LINK_BW_2_70GBPS (0x0000010EU) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_DP2LINK_BW_5_40GBPS (0x0000021CU) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_DP2LINK_BW_8_10GBPS (0x0000032AU) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_DP2LINK_BW_2_16GBPS (0x000000D8U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_DP2LINK_BW_2_43GBPS (0x000000F3U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_DP2LINK_BW_3_24GBPS (0x00000114U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_DP2LINK_BW_4_32GBPS (0x000001B0U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_DP2LINK_BW_6_75GBPS (0x000002A3U) + + +/* + * NV0073_CTRL_CMD_DP_GET_EDP_DATA + * + * This command is used to query Embedded DisplayPort information. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the eDP display which owns + * the Main Link to be queried. + * If more than one displayId bit is set or the displayId is not a eDP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * data + * This output parameter specifies the data associated with the eDP display. + * It is only valid if this function returns NV_OK. + * NV0073_CTRL_CMD_DP_GET_EDP_DATA_PANEL_POWER + * This field indicates the state of the eDP panel power. + * NV0073_CTRL_CMD_DP_GET_EDP_DATA_PANEL_POWER_OFF + * This eDP panel is powered off. + * NV0073_CTRL_CMD_DP_GET_EDP_DATA_PANEL_POWER_ON + * This eDP panel is powered on. + * NV0073_CTRL_CMD_DP_GET_EDP_DATA_DPCD_POWER_OFF + * This field tells the client if DPCD power off command + * should be used for the current eDP panel. + * NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_OFF_ENABLE + * This eDP panel can use DPCD to power off the panel. + * NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_OFF_DISABLE + * This eDP panel cannot use DPCD to power off the panel. + * NV0073_CTRL_DP_GET_EDP_DATA_DPCD_SET_POWER + * This field tells the client current eDP panel DPCD SET_POWER (0x600) status + * NV0073_CTRL_DP_GET_EDP_DATA_DPCD_SET_POWER_D0 + * This eDP panel is current up and in full power mode. + * NV0073_CTRL_DP_GET_EDP_DATA_DPCD_SET_POWER_D3 + * This eDP panel is current standby. + */ +#define NV0073_CTRL_CMD_DP_GET_EDP_DATA (0x731361U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_GET_EDP_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_GET_EDP_DATA_PARAMS_MESSAGE_ID (0x61U) + +typedef struct NV0073_CTRL_DP_GET_EDP_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 data; +} NV0073_CTRL_DP_GET_EDP_DATA_PARAMS; + +#define NV0073_CTRL_DP_GET_EDP_DATA_PANEL_POWER 0:0 +#define NV0073_CTRL_DP_GET_EDP_DATA_PANEL_POWER_OFF (0x00000000U) +#define NV0073_CTRL_DP_GET_EDP_DATA_PANEL_POWER_ON (0x00000001U) +#define NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_OFF 1:1 +#define NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_OFF_ENABLE (0x00000000U) +#define NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_OFF_DISABLE (0x00000001U) +#define NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_STATE 2:2 +#define NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_STATE_D0 (0x00000000U) +#define NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_STATE_D3 (0x00000001U) +/* + * NV0073_CTRL_CMD_DP_CONFIG_STREAM + * + * This command sets various multi/single stream related params for + * for a given head. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * Head + * Specifies the head index for the stream. + * sorIndex + * Specifies the SOR index for the stream. + * dpLink + * Specifies the DP link: either 0 or 1 (A , B) + * bEnableOverride + * Specifies whether we're manually configuring this stream. + * If not set, none of the remaining settings have any effect. + * bMST + * Specifies whether in Multistream or Singlestream mode. + * MST/SST + * Structures for passing in either Multistream or Singlestream params + * slotStart + * Specifies the start value of the timeslot + * slotEnd + * Specifies the end value of the timeslot + * PBN + * Specifies the PBN for the timeslot. + * minHBlank + * Specifies the min HBlank + * minVBlank + * Specifies the min VBlank + * sendACT -- deprecated. A new control call has been added. + * Specifies whether ACT has to be sent or not. + * tuSize + * Specifies TU size value + * watermark + * Specifies stream watermark. + * bEnableTwoHeadOneOr + * Whether two head one OR is enabled. If this is set then RM will + * replicate SF settings of Master head on Slave head. Head index + * passed should be of Master Head. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC: when this command has already been called + * + */ +#define NV0073_CTRL_CMD_DP_CONFIG_STREAM (0x731362U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS_MESSAGE_ID (0x62U) + +typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 sorIndex; + NvU32 dpLink; + + NvBool bEnableOverride; + NvBool bMST; + NvU32 singleHeadMultistreamMode; + NvU32 hBlankSym; + NvU32 vBlankSym; + NvU32 colorFormat; + NvBool bEnableTwoHeadOneOr; + + struct { + NvU32 slotStart; + NvU32 slotEnd; + NvU32 PBN; + NvU32 Timeslice; + NvBool sendACT; // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT + NvU32 singleHeadMSTPipeline; + NvBool bEnableAudioOverRightPanel; + } MST; + + struct { + NvBool bEnhancedFraming; + NvU32 tuSize; + NvU32 waterMark; + NvBool bEnableAudioOverRightPanel; + } SST; +} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_SET_RATE_GOV + * + * This command enables rate governing for a MST. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * Head + * Specifies the head index for the stream. + * sorIndex + * Specifies the SOR index for the stream. + * flags + * Specifies Rate Governing, trigger type and wait on trigger and operation type. + * + * _FLAGS_OPERATION: whether this control call should program or check for status of previous operation. + * + * _FLAGS_STATUS: Out only. Caller should check the status for _FLAGS_OPERATION_CHECK_STATUS through + * this bit. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC: when this command has already been called + * + */ +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV (0x731363U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS_MESSAGE_ID (0x63U) + +typedef struct NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 sorIndex; + NvU32 flags; +} NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS; + +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_ENABLE_RG 0:0 +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_ENABLE_RG_OFF (0x00000000U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_ENABLE_RG_ON (0x00000001U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_TRIGGER_MODE 1:1 +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_TRIGGER_MODE_LOADV (0x00000000U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_TRIGGER_MODE_IMMEDIATE (0x00000001U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_WAIT_TRIGGER 2:2 +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_WAIT_TRIGGER_OFF (0x00000000U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_WAIT_TRIGGER_ON (0x00000001U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_OPERATION 3:3 +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_OPERATION_PROGRAM (0x00000000U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_OPERATION_CHECK_STATUS (0x00000001U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_STATUS 31:31 +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_STATUS_FAIL (0x00000000U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_STATUS_PASS (0x00000001U) + +/* + * NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT + * + * This call is used by the displayport library. Once + * all of the platforms have ported, this call will be + * deprecated and made the default behavior. + * + * Disables automatic watermark programming + * Disables automatic DP IRQ handling (CP IRQ) + * Disables automatic retry on defers + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT (0x731365U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS_MESSAGE_ID (0x65U) + +typedef struct NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS { + NvU32 subDeviceInstance; +} NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_SET_ECF + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * sorIndex + * This parameter specifies the Index of sor for which ecf + * should be updated. + * ecf + * This parameter has the ECF bit mask. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0073_CTRL_CMD_DP_SET_ECF (0x731366U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_ECF_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SET_ECF_PARAMS_MESSAGE_ID (0x66U) + +typedef struct NV0073_CTRL_CMD_DP_SET_ECF_PARAMS { + NvU32 subDeviceInstance; + NvU32 sorIndex; + NV_DECLARE_ALIGNED(NvU64 ecf, 8); + NvBool bForceClearEcf; + NvBool bAddStreamBack; +} NV0073_CTRL_CMD_DP_SET_ECF_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_SEND_ACT + * + * This command sends ACT. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * + * displayId + * Specifies the root port displayId for which the trigger has to be done. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC: when this command has already been called + * + */ +#define NV0073_CTRL_CMD_DP_SEND_ACT (0x731367U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS_MESSAGE_ID (0x67U) + +typedef struct NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; +} NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS; + + + +/* + * NV0073_CTRL_CMD_DP_GET_CAPS + * + * This command returns the following info + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * sorIndex + * Specifies the SOR index. + * dpVersionsSupported + * Specified the DP versions supported by the GPU + * UHBRSupportedByGpu + * Bitmask to specify the UHBR link rates supported by the GPU. + * bIsMultistreamSupported + * Returns NV_TRUE if MST is supported by the GPU else NV_FALSE + * bIsSCEnabled + * Returns NV_TRUE if Stream cloning is supported by the GPU else NV_FALSE + * maxLinkRate + * Returns Maximum allowed orclk for DP mode of SOR + * 1 signifies 5.40(HBR2), 2 signifies 2.70(HBR), 3 signifies 1.62(RBR) + * bHasIncreasedWatermarkLimits + * Returns NV_TRUE if the GPU uses higher watermark limits, else NV_FALSE + * bIsPC2Disabled + * Returns NV_TRUE if VBIOS flag to disable PostCursor2 is set, else NV_FALSE + * bFECSupported + * Returns NV_TRUE if GPU supports FEC, else NV_FALSE + * bIsTrainPhyRepeater + * Returns NV_TRUE if LTTPR Link Training feature is set + * bOverrideLinkBw + * Returns NV_TRUE if DFP limits defined in DCB have to be honored, else NV_FALSE + * bUseRgFlushSequence + * Returns NV_TRUE if GPU uses the new RG flush design + * bSupportDPDownSpread + * Returns NV_TRUE if GPU support downspread. + * bAvoidHBR3 + * Returns NV_TRUE if we need to avoid HBR3 as much as possible + * bIsDpTunnelingHwBugWarEnabled + * Returns NV_TRUE if USB4 DP tunneling HW bug WAR is enabled for the chip. + * + * DSC caps + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ + +#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U) + +typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS { + NvU32 subDeviceInstance; + NvU32 sorIndex; + NvU32 maxLinkRate; + NvU32 dpVersionsSupported; + NvU32 UHBRSupportedByGpu; + NvBool bIsMultistreamSupported; + NvBool bIsSCEnabled; + NvBool bHasIncreasedWatermarkLimits; + NvBool bIsPC2Disabled; + NvBool isSingleHeadMSTSupported; + NvBool bFECSupported; + NvBool bIsTrainPhyRepeater; + NvBool bOverrideLinkBw; + NvBool bUseRgFlushSequence; + NvBool bSupportDPDownSpread; + NvBool bAvoidHBR3; + NvBool bIsDpTunnelingHwBugWarEnabled; + NV0073_CTRL_CMD_DSC_CAP_PARAMS DSC; +} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS; + +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2 0:0 +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_NO (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_YES (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4 1:1 +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_NO (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_YES (0x00000001U) + + +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE 2:0 +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62 (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40 (0x00000003U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10 (0x00000004U) + + +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U) + +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000003U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000004U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000005U) + +#define NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES (0x73136aU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_MSA_PROPERTIES_SYNC_POLARITY_LOW (0U) +#define NV0073_CTRL_CMD_DP_MSA_PROPERTIES_SYNC_POLARITY_HIGH (1U) + +typedef struct NV0073_CTRL_DP_MSA_PROPERTIES_MASK { + NvU8 miscMask[2]; + NvBool bRasterTotalHorizontal; + NvBool bRasterTotalVertical; + NvBool bActiveStartHorizontal; + NvBool bActiveStartVertical; + NvBool bSurfaceTotalHorizontal; + NvBool bSurfaceTotalVertical; + NvBool bSyncWidthHorizontal; + NvBool bSyncPolarityHorizontal; + NvBool bSyncHeightVertical; + NvBool bSyncPolarityVertical; + NvBool bReservedEnable[3]; +} NV0073_CTRL_DP_MSA_PROPERTIES_MASK; + +typedef struct NV0073_CTRL_DP_MSA_PROPERTIES_VALUES { + NvU8 misc[2]; + NvU16 rasterTotalHorizontal; + NvU16 rasterTotalVertical; + NvU16 activeStartHorizontal; + NvU16 activeStartVertical; + NvU16 surfaceTotalHorizontal; + NvU16 surfaceTotalVertical; + NvU16 syncWidthHorizontal; + NvU16 syncPolarityHorizontal; + NvU16 syncHeightVertical; + NvU16 syncPolarityVertical; + NvU8 reserved[3]; +} NV0073_CTRL_DP_MSA_PROPERTIES_VALUES; + +#define NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS_MESSAGE_ID (0x6AU) + +typedef struct NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bEnableMSA; + NvBool bStereoPhaseInverse; + NvBool bCacheMsaOverrideForNextModeset; + NV0073_CTRL_DP_MSA_PROPERTIES_MASK featureMask; + NV0073_CTRL_DP_MSA_PROPERTIES_VALUES featureValues; + NV_DECLARE_ALIGNED(struct NV0073_CTRL_DP_MSA_PROPERTIES_VALUES *pFeatureDebugValues, 8); +} NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT + * + * This command can be used to invoke a fake interrupt for the operation of DP1.2 branch device + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * interruptType + * This parameter specifies the type of fake interrupt to be invoked. Possible values are: + * 0 => IRQ + * 1 => HPDPlug + * 2 => HPDUnPlug + * displayId + * should be for DP only + * + */ + +#define NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT (0x73136bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS_MESSAGE_ID (0x6BU) + +typedef struct NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 interruptType; +} NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS; + +#define NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_IRQ (0x00000000U) +#define NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PLUG (0x00000001U) +#define NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_UNPLUG (0x00000002U) + +/* + * NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG + * + * This command sets the MS displayId lit up by driver for further use of VBIOS + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * should be for DP only + * activeDevAddr + * Active MS panel address + * sorIndex + * SOR Index + * dpLink + * DP Sub Link Index + * hopCount + * Maximum hopcounts in MS address + * dpMsDevAddrState + * DP Multistream Device Address State. The values can be + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_TIMEOUT + * + */ +#define NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG (0x73136cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS_MESSAGE_ID (0x6CU) + +typedef struct NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 activeDevAddr; + NvU32 sorIndex; + NvU32 dpLink; + NvU32 hopCount; + NvU32 dpMsDevAddrState; +} NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS; + + + +/* +* NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT +* +* This command configures a new bit, NV_PDISP_SF_DP_LINKCTL_TRIGGER_SELECT +* to indicate which pipeline will handle the +* time slots allocation in single head MST mode +* +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. This parameter must specify a value between zero and the +* total number of subdevices within the parent device. This parameter +* should be set to zero for default behavior +* Head +* Specifies the head index for the stream +* sorIndex +* Specifies the SOR index for the stream +* streamIndex +* Stream Identifier +* +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT +* NV_ERR_GENERIC: when this command has already been called +* +*/ +#define NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT (0x73136fU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS_MESSAGE_ID (0x6FU) + +typedef struct NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 sorIndex; + NvU32 singleHeadMSTPipeline; +} NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS; + +/* +* NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM +* +* This call is used by the displayport library.& clients of RM +* Its main function is to configure single Head Multi stream mode + * this call configures internal RM datastructures to support required mode. +* +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. This parameter must specify a value between zero and the +* total number of subdevices within the parent device. This parameter +* should be set to zero for default behavior. +* +* displayIDs +* This parameter specifies array of DP displayIds to be configured which are driven out from a single head. +* +* numStreams +* This parameter specifies number of streams driven from a single head +* ex: for 2SST & 2MST its value is 2. +* +* mode +* This parameter specifies single head multi stream mode to be configured. +* +* bSetConfigure +* This parameter configures single head multistream mode +* if TRUE it sets SST or MST based on 'mode' parameter and updates internal driver data structures with the given information. +* if FALSE clears the configuration of single head multi stream mode. +* +* vbiosPrimaryDispIdIndex +* This parameter specifies vbios master displayID index in displayIDs input array. +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT +* NV_ERR_NOT_SUPPORTED +* +*/ +#define NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM (0x73136eU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SINGLE_HEAD_MAX_STREAMS (0x00000002U) +#define NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS_MESSAGE_ID (0x6EU) + +typedef struct NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayIDs[NV0073_CTRL_CMD_DP_SINGLE_HEAD_MAX_STREAMS]; + NvU32 numStreams; + NvU32 mode; + NvBool bSetConfig; + NvU8 vbiosPrimaryDispIdIndex; +} NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS; + +#define NV0073_CTRL_CMD_DP_SINGLE_HEAD_MULTI_STREAM_NONE (0x00000000U) +#define NV0073_CTRL_CMD_DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST (0x00000001U) +#define NV0073_CTRL_CMD_DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST (0x00000002U) + +/* +* NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL +* +* This command configures a new bit, NV_PDISP_SF_DP_LINKCTL_TRIGGER_ALL +* to indicate which if all the pipelines to take affect on ACT (sorFlushUpdates) +* in single head MST mode +* +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. This parameter must specify a value between zero and the +* total number of subdevices within the parent device. This parameter +* should be set to zero for default behavior +* Head +* Specifies the head index for the stream +* sorIndex +* Specifies the SOR index for the stream +* streamIndex +* Stream Identifier +* +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT +* NV_ERR_GENERIC: when this command has already been called +* +*/ +#define NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL (0x731370U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS_MESSAGE_ID (0x70U) + +typedef struct NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvBool enable; +} NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS; + +/* NV0073_CTRL_CMD_SPECIFIC_RETRIEVE_DP_RING_BUFFER + * + * These commands retrieves buffer from RM for + * DP Library to dump logs + * + * + * Possible status values returned include: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_DP_RETRIEVE_DP_RING_BUFFER (0x731371U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_RETRIEVE_DP_RING_BUFFER_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_RETRIEVE_DP_RING_BUFFER_PARAMS_MESSAGE_ID (0x71U) + +typedef struct NV0073_CTRL_CMD_DP_RETRIEVE_DP_RING_BUFFER_PARAMS { + NV_DECLARE_ALIGNED(NvU8 *pDpRingBuffer, 8); + NvU8 ringBufferType; + NvU32 numRecords; +} NV0073_CTRL_CMD_DP_RETRIEVE_DP_RING_BUFFER_PARAMS; + + + +/* +* NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA +* +* This command collects the DP AUX log from the RM aux buffer and +* sends it to the application. +* +* dpAuxBufferReadSize +* Specifies the number of logs to be read from the +* AUX buffer in RM +* dpNumMessagesRead +* Specifies the number of logs read from the AUX buffer +* dpAuxBuffer +* The local buffer to copy the specified number of logs +* from RM to user application +* +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT +* NV_ERR_GENERIC: when this command has already been called +* +* +*DPAUXPACKET - This structure holds the log information +* auxPacket - carries the hex dump of the message transaction +* auxEvents - Contains the information as in what request and reply type where +* auxRequestTimeStamp - Request timestamp +* auxMessageReqSize - Request Message size +* auxMessageReplySize - Reply message size(how much information was actually send by receiver) +* auxOutPort - DP port number +* auxPortAddress - Address to which data was requested to be read or written +* auxReplyTimeStamp - Reply timestamp +* auxCount - Serial number to keep track of transactions +*/ + +/*Maximum dp messages size is 16 as per the protocol*/ +#define DP_MAX_MSG_SIZE 16U +#define MAX_LOGS_PER_POLL 50U + +/* Various kinds of DP Aux transactions */ +#define NV_DP_AUXLOGGER_REQUEST_TYPE 3:0 +#define NV_DP_AUXLOGGER_REQUEST_TYPE_NULL 0x00000000U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_I2CWR 0x00000001U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_I2CREQWSTAT 0x00000002U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_MOTWR 0x00000003U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_MOTREQWSTAT 0x00000004U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_AUXWR 0x00000005U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_I2CRD 0x00000006U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_MOTRD 0x00000007U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_AUXRD 0x00000008U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_UNKNOWN 0x00000009U + +#define NV_DP_AUXLOGGER_REPLY_TYPE 7:4 +#define NV_DP_AUXLOGGER_REPLY_TYPE_NULL 0x00000000U +#define NV_DP_AUXLOGGER_REPLY_TYPE_SB_ACK 0x00000001U +#define NV_DP_AUXLOGGER_REPLY_TYPE_RETRY 0x00000002U +#define NV_DP_AUXLOGGER_REPLY_TYPE_TIMEOUT 0x00000003U +#define NV_DP_AUXLOGGER_REPLY_TYPE_DEFER 0x00000004U +#define NV_DP_AUXLOGGER_REPLY_TYPE_DEFER_TO 0x00000005U +#define NV_DP_AUXLOGGER_REPLY_TYPE_ACK 0x00000006U +#define NV_DP_AUXLOGGER_REPLY_TYPE_ERROR 0x00000007U +#define NV_DP_AUXLOGGER_REPLY_TYPE_UNKNOWN 0x00000008U + +#define NV_DP_AUXLOGGER_EVENT_TYPE 9:8 +#define NV_DP_AUXLOGGER_EVENT_TYPE_AUX 0x00000000U +#define NV_DP_AUXLOGGER_EVENT_TYPE_HOT_PLUG 0x00000001U +#define NV_DP_AUXLOGGER_EVENT_TYPE_HOT_UNPLUG 0x00000002U +#define NV_DP_AUXLOGGER_EVENT_TYPE_IRQ 0x00000003U + +#define NV_DP_AUXLOGGER_AUXCTL_CMD 15:12 +#define NV_DP_AUXLOGGER_AUXCTL_CMD_INIT 0x00000000U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_I2CWR 0x00000000U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_I2CRD 0x00000001U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_I2CREQWSTAT 0x00000002U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_MOTWR 0x00000004U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_MOTRD 0x00000005U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_MOTREQWSTAT 0x00000006U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_AUXWR 0x00000008U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_AUXRD 0x00000009U + + +typedef struct DPAUXPACKET { + NvU32 auxEvents; + NvU32 auxRequestTimeStamp; + NvU32 auxMessageReqSize; + NvU32 auxMessageReplySize; + NvU32 auxOutPort; + NvU32 auxPortAddress; + NvU32 auxReplyTimeStamp; + NvU32 auxCount; + NvU8 auxPacket[DP_MAX_MSG_SIZE]; +} DPAUXPACKET; +typedef struct DPAUXPACKET *PDPAUXPACKET; + +#define NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA (0x731373U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS_MESSAGE_ID (0x73U) + +typedef struct NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS { + //In + NvU32 subDeviceInstance; + NvU32 dpAuxBufferReadSize; + + //Out + NvU32 dpNumMessagesRead; + DPAUXPACKET dpAuxBuffer[MAX_LOGS_PER_POLL]; +} NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS; + + + + +/* NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES + * + * This setup link rate table for target display to enable indexed link rate + * and export valid link rates back to client. Client may pass empty table to + * reset previous setting. + * + * subDeviceInstance + * client will give a subdevice to get right pGpu/pDisp for it + * displayId + * DisplayId of the display for which the client targets + * linkRateTbl + * Link rates in 200KHz as native granularity from eDP 1.4 + * linkBwTbl + * Link rates valid for client to apply to + * linkBwCount + * Total valid link rates + * + * Possible status values returned include: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES 8U + +#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID (0x77U) + +typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS { + // In + NvU32 subDeviceInstance; + NvU32 displayId; + NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES]; + + // Out + NvU16 linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES]; + NvU8 linkBwCount; +} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS; + + +/* + * NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES + * + * This command is used to not depend on supervisor interrupts for setting the + * stereo msa params. We will not cache the values and can toggle stereo using + * this ctrl call on demand. Note that this control call will only change stereo + * settings and will leave other settings as is. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * should be for DP only + * bEnableMSA + * To enable or disable MSA + * bStereoPhaseInverse + * To enable or disable Stereo Phase Inverse value + * featureMask + * Enable/Disable mask of individual MSA property. + * featureValues + * MSA property value to write + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_TIMEOUT + * + */ +#define NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES (0x731378U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS_MESSAGE_ID (0x78U) + +typedef struct NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bEnableMSA; + NvBool bStereoPhaseInverse; + NV0073_CTRL_DP_MSA_PROPERTIES_MASK featureMask; + NV0073_CTRL_DP_MSA_PROPERTIES_VALUES featureValues; +} NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS; + + + +/* + * NV0073_CTRL_CMD_DP_CONFIGURE_FEC + * + * This command is used to enable/disable FEC on DP Mainlink. + * FEC is a prerequisite to DSC. This should be called only + * after LT completes (including PostLT LQA) while enabling. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * + * displayId + * Can only be 1 and must be DP. + * + * bEnableFec + * To enable or disable FEC + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV0073_CTRL_CMD_DP_CONFIGURE_FEC (0x73137aU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS_MESSAGE_ID (0x7AU) + +typedef struct NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bEnableFec; +} NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior + * cmd + * This parameter is an input to this command. + * Here are the current defined fields: + * NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_CMD_POWER + * Set to specify what operation to run. + * NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_CMD_POWER_UP + * Request to power up pad. + * NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_CMD_POWER_DOWN + * Request to power down the pad. + * linkBw + * This parameter is used to pass in the link bandwidth required to run the + * power up sequence. Refer enum DP_LINK_BANDWIDTH for valid values. + * laneCount + * This parameter is used to pass the lanecount. + * sorIndex + * This parameter is used to pass the SOR index. + * sublinkIndex + * This parameter is used to pass the sublink index. Please refer + * enum DFPLINKINDEX for valid values + * priPadLinkIndex + * This parameter is used to pass the padlink index for primary link. + * Please refer enum DFPPADLINK for valid index values for Link A~F. + * secPadLinkIndex + * This parameter is used to pass the padlink index for secondary link. + * For Single SST pass in NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PADLINK_INDEX_INVALID + * bEnableSpread + * This parameter is boolean value used to indicate if spread is to be enabled or disabled. + */ + +#define NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD (0x73137bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS_MESSAGE_ID (0x7BU) + +typedef struct NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS { + NvU32 subDeviceInstance; + NvU32 cmd; + NvU32 linkBw; + NvU32 laneCount; + NvU32 sorIndex; + NvU32 sublinkIndex; // sublink A/B + NvU32 priPadLinkIndex; // padlink A/B/C/D/E/F + NvU32 secPadLinkIndex; // padlink A/B/C/D/E/F for secondary link in DualSST case. + NvBool bEnableSpread; +} NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS; + +#define NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_CMD_POWER 0:0 +#define NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_CMD_POWER_UP (0x00000000U) +#define NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_CMD_POWER_DOWN (0x00000001U) + +#define NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PADLINK_INDEX_INVALID (0x000000FFU) + +/* + * NV0073_CTRL_CMD_DP_AUXCH_CTRL + * + * This command can be used to perform the I2C Bulk transfer over + * DP Aux channel. This is the display port specific implementation + * for sending bulk data over the DpAux channel, by splitting up the + * data into pieces and retrying for pieces that aren't ACK'd. + * + * subDeviceInstance [IN] + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId [IN] + * This parameter specifies the ID of the display for which the dfp + * caps should be returned. The display ID must a dfp display. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * addr [IN] + * This parameter is an input to this command. The addr parameter follows + * Section 2.4 in DisplayPort spec and the client should refer to the valid + * address in DisplayPort spec. Only the first 20 bits are valid. + * bWrite [IN] + * This parameter specifies whether the command is a I2C write (NV_TRUE) or + * a I2C read (NV_FALSE). + * data [IN/OUT] + * In the case of a read transaction, this parameter returns the data from + * transaction request. In the case of a write transaction, the client + * should write to this buffer for the data to send. + * size [IN/OUT] + * Specifies how many data bytes to read/write depending on the + * transaction type. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_DP_AUXCH_I2C_TRANSFER_CTRL (0x73137cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_MAX_DATA_SIZE 256U + +#define NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_CTRL_PARAMS_MESSAGE_ID (0x7CU) + +typedef struct NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 addr; + NvBool bWrite; + NvU8 data[NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_MAX_DATA_SIZE]; + NvU32 size; +} NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_CTRL_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_ENABLE_VRR + * + * The command is used to enable VRR. + * + * subDeviceInstance [IN] + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior + * displayId [IN] + * This parameter is an input to this command, specifies the ID of the display + * for client targeted to. + * The display ID must a DP display. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * cmd [IN] + * This parameter is an input to this command. + * + * _STAGE: specifies the stage id to execute in the VRR enablement sequence. + * _MONITOR_ENABLE_BEGIN: Send command to the monitor to start monitor + * enablement procedure. + * _MONITOR_ENABLE_CHALLENGE: Send challenge to the monitor + * _MONITOR_ENABLE_CHECK: Read digest from the monitor, and verify + * if the result is valid. + * _DRIVER_ENABLE_BEGIN: Send command to the monitor to start driver + * enablement procedure. + * _DRIVER_ENABLE_CHALLENGE: Read challenge from the monitor and write back + * corresponding digest. + * _DRIVER_ENABLE_CHECK: Check if monitor enablement worked. + * _RESET_MONITOR: Set the FW state m/c to a known state. + * _INIT_PUBLIC_INFO: Send command to the monitor to prepare public info. + * _GET_PUBLIC_INFO: Read public info from the monitor. + * _STATUS_CHECK: Check if monitor is ready for next command. + * result [OUT] + * This is an output parameter to reflect the result of the operation. + */ +#define NV0073_CTRL_CMD_DP_ENABLE_VRR (0x73137dU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_ENABLE_VRR_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_ENABLE_VRR_PARAMS_MESSAGE_ID (0x7DU) + +typedef struct NV0073_CTRL_CMD_DP_ENABLE_VRR_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 cmd; + NvU32 result; +} NV0073_CTRL_CMD_DP_ENABLE_VRR_PARAMS; + +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE 3:0 +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_BEGIN (0x00000000U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHALLENGE (0x00000001U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHECK (0x00000002U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_BEGIN (0x00000003U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHALLENGE (0x00000004U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHECK (0x00000005U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_RESET_MONITOR (0x00000006U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_INIT_PUBLIC_INFO (0x00000007U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_GET_PUBLIC_INFO (0x00000008U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_STATUS_CHECK (0x00000009U) + +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_OK (0x00000000U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING (0x80000001U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_READ_ERROR (0x80000002U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_WRITE_ERROR (0x80000003U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_DEVICE_ERROR (0x80000004U) + +/* + * NV0073_CTRL_CMD_DP_GET_GENERIC_INFOFRAME + * + * This command is used to capture the display output packets for DP protocol. + * Common supported packets are Dynamic Range and mastering infoframe SDP for HDR, + * VSC SDP for colorimetry and pixel encoding info. + * + * displayID (in) + * This parameter specifies the displayID for the display resource to configure. + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the NV04_DISPLAY_COMMON + * parent device to which the operation should be directed. + * infoframeIndex (in) + * HW provides support to program 2 generic infoframes per frame for DP. + * This parameter indicates which infoframe packet is to be captured. + * Possible flags are as follows: + * NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_CAPTURE_MODE + * This flag indicates the INFOFRAME that needs to be read. + * Set to _INFOFRAME0 if RM should read GENERIC_INFOFRAME + * Set to _INFOFRAME1 if RM should read GENERIC_INFOFRAME1 + * packet (out) + * pPacket points to the memory for reading the infoframe packet. + * bTransmitControl (out) + * This gives the transmit mode of infoframes. + * If set, means infoframe will be sent as soon as possible and then on + * every frame during vblank. + * If cleared, means the infoframe will be sent once as soon as possible. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_DP_GET_GENERIC_INFOFRAME (0x73137eU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_GENERIC_INFOFRAME_MAX_PACKET_SIZE 36U + +#define NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS_MESSAGE_ID (0x7EU) + +typedef struct NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 infoframeIndex; + NvU8 packet[NV0073_CTRL_DP_GENERIC_INFOFRAME_MAX_PACKET_SIZE]; + NvBool bTransmitControl; +} NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS; + + +#define NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_CAPTURE_MODE 0:0 +#define NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_CAPTURE_MODE_INFOFRAME0 (0x0000000U) +#define NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_CAPTURE_MODE_INFOFRAME1 (0x0000001U) + + +/* + * NV0073_CTRL_CMD_DP_GET_MSA_ATTRIBUTES + * + * This command is used to capture the various data attributes sent in the MSA for DP protocol. + * Refer table 2-94 'MSA Data Fields' in DP1.4a spec document for MSA data field description. + * + * displayID (in) + * This parameter specifies the displayID for the display resource to configure. + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the NV04_DISPLAY_COMMON + * parent device to which the operation should be directed. + * mvid, nvid (out) + * Video timestamp used by DP sink for regenerating pixel clock. + * misc0, misc1 (out) + * Miscellaneous MSA attributes. + * hTotal, vTotal (out) + * Htotal measured in pixel count and vtotal measured in line count. + * hActiveStart, vActiveStart (out) + * Active start measured from start of leading edge of the sync pulse. + * hActiveWidth, vActiveWidth (out) + * Active video width and height. + * hSyncWidth, vSyncWidth (out) + * Width of sync pulse. + * hSyncPolarity, vSyncPolarity (out) + * Polarity of sync pulse. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_DP_GET_MSA_ATTRIBUTES (0x73137fU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_MSA_MAX_DATA_SIZE 7U + +#define NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS_MESSAGE_ID (0x7FU) + +typedef struct NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 mvid; + NvU32 nvid; + NvU8 misc0; + NvU8 misc1; + NvU16 hTotal; + NvU16 vTotal; + NvU16 hActiveStart; + NvU16 vActiveStart; + NvU16 hActiveWidth; + NvU16 vActiveWidth; + NvU16 hSyncWidth; + NvU16 vSyncWidth; + NvBool hSyncPolarity; + NvBool vSyncPolarity; +} NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS; + +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_MVID 23:0 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_NVID 23:0 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_MISC0 7:0 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_MISC1 15:8 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_HTOTAL 15:0 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_VTOTAL 31:16 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_HACTIVE_START 15:0 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_VACTIVE_START 31:16 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_HACTIVE_WIDTH 15:0 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_VACTIVE_WIDTH 31:16 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_HSYNC_WIDTH 14:0 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_HSYNC_POLARITY 15:15 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_VSYNC_WIDTH 30:16 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_VSYNC_POLARITY 31:31 + +/* + * NV0073_CTRL_CMD_DP_AUXCH_OD_CTRL + * + * This command is used to query OD capability and status as well as + * control OD functionality of eDP LCD panels. + * + * subDeviceInstance [in] + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId [in] + * This parameter specifies the ID of the DP display which owns + * the Main Link to be adjusted. The display ID must a DP display + * as determined with the NV0073_CTRL_CMD_SPECIFIC_GET_TYPE command. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * cmd [in] + * This parameter is an input to this command. The cmd parameter tells + * whether we have to get the value of a specific field or set the + * value in case of a writeable field. + * control [in] + * This parameter is input by the user. It is used by the user to decide the control + * value to be written to change the Sink OD mode. The command to write is + * the NV0073_CTRL_CMD_DP_AUXCH_OD_CTL_SET command. + * bOdCapable [out] + * This parameter reflects the OD capability of the Sink which can be + * fetched by using the NV0073_CTRL_CMD_DP_AUXCH_OD_CAPABLE_QUERY command. + * bOdControlCapable [out] + * This parameter reflects the OD control capability of the Sink which can be + * fetched by using the NV0073_CTRL_CMD_DP_AUXCH_OD_CTL_CAPABLE_QUERY command. + * bOdStatus [out] + * This parameter reflects the Sink OD status which can be + * fetched by using the NV0073_CTRL_CMD_DP_AUXCH_OD_STATUS_QUERY command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_DP_AUXCH_OD_CTRL (0x731380U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_AUXCH_OD_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_AUXCH_OD_CTRL_PARAMS_MESSAGE_ID (0x80U) + +typedef struct NV0073_CTRL_CMD_DP_AUXCH_OD_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU8 control; + NvU8 cmd; + NvBool bOdCapable; + NvBool bOdControlCapable; + NvBool bOdStatus; +} NV0073_CTRL_CMD_DP_AUXCH_OD_CTRL_PARAMS; + +/* valid commands */ +#define NV0073_CTRL_CMD_DP_AUXCHQUERY_OD_CAPABLE 0x00000000 +#define NV0073_CTRL_CMD_DP_AUXCHQUERY_OD_CTL_CAPABLE 0x00000001 +#define NV0073_CTRL_CMD_DP_AUXCHQUERY_OD_STATUS 0x00000002 +#define NV0073_CTRL_CMD_DP_AUXCH_OD_CTL_SET 0x00000003 + +/* valid state values */ +#define NV0073_CTRL_CMD_DP_AUXCH_OD_CTL_SET_AUTONOMOUS 0x00000000 +#define NV0073_CTRL_CMD_DP_AUXCH_OD_CTL_SET_DISABLE_OD 0x00000002 +#define NV0073_CTRL_CMD_DP_AUXCH_OD_CTL_SET_ENABLE_OD 0x00000003 + +/* + * NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES + * + * This command returns the following info + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * should be for DP only + * bEnableMSA + * To enable or disable MSA + * bStereoPhaseInverse + * To enable or disable Stereo Phase Inverse value + * bCacheMsaOverrideForNextModeset + * Cache the values and don't apply them until next modeset + * featureMask + * Enable/Disable mask of individual MSA property + * featureValues + * MSA property value to write + * bDebugValues + * To inform whether actual MSA values need to be returned + * pFeatureDebugValues + * It will actual MSA property value being written on HW. + * If its NULL then no error but return nothing + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_TIMEOUT + * + */ +#define NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_V2 (0x731381U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_V2_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_V2_PARAMS_MESSAGE_ID (0x81U) + +typedef struct NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_V2_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bEnableMSA; + NvBool bStereoPhaseInverse; + NvBool bCacheMsaOverrideForNextModeset; + NV0073_CTRL_DP_MSA_PROPERTIES_MASK featureMask; + NV0073_CTRL_DP_MSA_PROPERTIES_VALUES featureValues; + NvBool bDebugValues; + NV0073_CTRL_DP_MSA_PROPERTIES_VALUES featureDebugValues; +} NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_V2_PARAMS; + + + +/* + * NV0073_CTRL_CMD_DP_AUXCH_VBL_CTRL + * + * This command is used to query VBL capability and status as well as + * control enable/disable of VBL feature of eDP LCD panels. + * + * subDeviceInstance [in] + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId [in] + * This parameter specifies the ID of the DP display which owns + * the Main Link to be adjusted. The display ID must a DP display + * as determined with the NV0073_CTRL_CMD_SPECIFIC_GET_TYPE command. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * cmd [in] + * This parameter is an input to this command. The cmd parameter tells + * whether we have to get the value of a specific field or set the + * value in case of a writeable field. + * control [in] + * This parameter is input by the user. It is used by the user to decide the control + * value to be written to the VBL control field. The command to write is + * the NV0073_CTRL_CMD_DP_AUXCH_VBL_CTL_SET command. + * bVblControlCapable [out] + * This parameter reflects the VBL control capability of the Sink which can be + * fetched by using the NV0073_CTRL_CMD_DP_AUXCH_VBL_CTL_CAPABLE_QUERY command. + * bVblStatus [out] + * This parameter reflects the Sink VBL status which can be + * fetched by using the NV0073_CTRL_CMD_DP_AUXCH_VBL_STATUS_QUERY command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_DP_AUXCH_VBL_CTRL (0x731386U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_AUXCH_VBL_CTRL_PARAMS_MESSAGE_ID" */ + +/* valid commands */ +#define NV0073_CTRL_CMD_DP_AUXCH_QUERY_VBL_CTL_CAPABLE 0x00000000 +#define NV0073_CTRL_CMD_DP_AUXCH_QUERY_VBL_STATUS 0x00000001 +#define NV0073_CTRL_CMD_DP_AUXCH_SET_VBL_CTL 0x00000002 + +/* valid state values */ +#define NV0073_CTRL_CMD_DP_AUXCH_SET_VBL_CTL_DISABLE 0x00000001 +#define NV0073_CTRL_CMD_DP_AUXCH_SET_VBL_CTL_AUTONOMOUS 0x00000000 + +#define NV0073_CTRL_CMD_DP_AUXCH_VBL_CTRL_PARAMS_MESSAGE_ID (0x86U) + +typedef struct NV0073_CTRL_CMD_DP_AUXCH_VBL_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU8 cmd; + NvU8 control; + NvBool bVblControlCapable; + NvBool bVblStatus; +} NV0073_CTRL_CMD_DP_AUXCH_VBL_CTRL_PARAMS; + +/* + * NV0073_CTRL_DP_LEVEL_INFO_TABLE_ENTRY + * + * This structure specifies the Pre-emphasis/Drive Current/preshoot/TxPu + * information for a DP device. These are the the current values that RM is + * using to map the levels for Pre-emphasis and Drive Current for Link Training. + * preEmphasis + * This field specifies the preemphasis values. + * driveCurrent + * This field specifies the driveCurrent values. + * preshoot + * This field specifies the preshoot values. + * TxPu + * This field specifies the pull-up current source drive values. + */ +#define NV0073_CTRL_DP2X_MAX_TXFFE_LEVELS 16 +typedef struct NV0073_CTRL_DP_LEVEL_INFO_TABLE_ENTRY { + NvU32 preEmphasis; + NvU32 driveCurrent; + NvU32 preShoot; + NvU32 txPu; +} NV0073_CTRL_DP_LEVEL_INFO_TABLE_ENTRY; + +/* + * NV0073_CTRL_DP_SET_LEVEL_INFO_TABLE_DATA + * + * This command is used to override the Pre-emphasis/Drive Current/preshoot/TxPu + * data table in RM. This data is dependent on the target link rate. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the digital display for which the + * data should be returned. The display ID must a digital display. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * linkRate + * The target link rate that the lane drive parameters will be used with, + * using 10M convention. Refer to NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_DP2LINK_BW + * constants. + * This control call only supports DP1.x link rates. For DP2.X UHBR link rates, + * use NV0073_CTRL_DP2X_SET_LEVEL_INFO_TABLE_DATA + * dpData + * This parameter is of type NV0073_CTRL_DP_LEVEL_INFO_TABLE_DATA + * and specifies the Pre-emphasis/Drive Current/Preshoot/TxPu information + * for a DP device. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0073_CTRL_DP_SET_LEVEL_INFO_TABLE_DATA_PARAMS_MESSAGE_ID (0x87U) + +typedef struct NV0073_CTRL_DP_SET_LEVEL_INFO_TABLE_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 padlinkIndex; + NvU32 linkRate; + NV0073_CTRL_DP_LEVEL_INFO_TABLE_ENTRY dpData[NV0073_CTRL_MAX_DRIVECURRENT_LEVELS][NV0073_CTRL_MAX_PREEMPHASIS_LEVELS]; +} NV0073_CTRL_DP_SET_LEVEL_INFO_TABLE_DATA_PARAMS; + +#define NV0073_CTRL_CMD_DP_SET_LEVEL_INFO_TABLE_DATA (0x731387U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_LEVEL_INFO_TABLE_DATA_PARAMS_MESSAGE_ID" */ + +/* + * NV0073_CTRL_DP_GET_LEVEL_INFO_TABLE_DATA + * + * This command is used to fetch the Pre-emphasis/Drive Current/preshoot/TxPu + * data table in RM. This data is dependent on the target link rate. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the digital display for which the + * data should be returned. The display ID must a digital display. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * linkRate + * The target link rate that the lane drive parameters will be used with, + * using 10M convention. Refer to NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_DP2LINK_BW + * constants. + * This control call only supports DP1.x link rates. For DP2.X UHBR link rates, + * use NV0073_CTRL_DP2X_GET_LEVEL_INFO_TABLE_DATA + * dpData + * This parameter is of type NV0073_CTRL_DP_LEVEL_INFO_TABLE_DATA + * and specifies the Pre-emphasis/Drive Current/Preshoot/TxPu information + * for a DP device. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0073_CTRL_DP_GET_LEVEL_INFO_TABLE_DATA_PARAMS_MESSAGE_ID (0x88U) + +typedef struct NV0073_CTRL_DP_GET_LEVEL_INFO_TABLE_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 padlinkIndex; + NvU32 linkRate; + NV0073_CTRL_DP_LEVEL_INFO_TABLE_ENTRY dpData[NV0073_CTRL_MAX_DRIVECURRENT_LEVELS][NV0073_CTRL_MAX_PREEMPHASIS_LEVELS]; +} NV0073_CTRL_DP_GET_LEVEL_INFO_TABLE_DATA_PARAMS; + +#define NV0073_CTRL_CMD_DP_GET_LEVEL_INFO_TABLE_DATA (0x731388U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_GET_LEVEL_INFO_TABLE_DATA_PARAMS_MESSAGE_ID" */ + + + +/* + * NV0073_CTRL_CMD_CALCULATE_DP_IMP + * + * This command is used to query if a certain mode is supported by the DP IMP + * calculation. The command is required for NVD5 and later GPUs. + * + * subDeviceInstance [in] + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId [in] + * This parameter specifies the ID of the DP display which owns + * the Main Link to be adjusted. The display ID must a DP display + * as determined with the NV0073_CTRL_CMD_SPECIFIC_GET_TYPE command. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * linkConfig [in] + * This parameter specifies the link configuration used to validate the mode. + * linkRate10M: + * The link rate that will be used to validate the IMP. Using 10M convention. + * Refer to NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_DP2LINK_BW constants. + * laneCount: + * The lane count that will be used to validate the IMP. + * bEnhancedFraming: + * Specify if enhanced framing is enabled. + * bDp2xChannelCoding: + * Specify if it's using 8b/10b or 128b/132b channel coding. + * bMultiStreamTopology: + * Specify if it's on Multiple stream topology (the device direct connected is a branch). + * bFECEnabled: + * Specify if FEC is enabled. + * modesetInfo [in] + * This parameter specifies the target display mode to be validated. + * rasterWidth: + * The total width of the mode. + * rasterHeight: + * The total height of the mode. + * surfaceWidth: + * The active width of the mode. + * surfaceHeight: + * The active height of the mode. + * rasterBlankStartX / rasterBlankEndX: + * The pixel location of horizontal blank starts and ends. + * depth: + * Color depth represents the number of bits used to indicate the color of a single pixel. + * The value will be different when DSC is enabled. + * twoChannelAudioHz/eightChannelAudioHz: + * The audio sample rate for different channels used. + * pixelFrequencyKHz: + * The pixel clock used by the mode. + * colorFormat: + * RGB/YCbCr444/YCbCr422/YCbCr420. + * bDSCEnabled: + * Specify if DSC is enabled. + * watermark [out] + * This parameter reflects the results of the calculation/verification. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_CALCULATE_DP_IMP (0x73138cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_CALCULATE_DP_IMP_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_DP_IMP_LINK_CONFIGURATION { + NvU32 linkRate10M; + NvU32 laneCount; + NvBool bEnhancedFraming; + NvBool bDp2xChannelCoding; + NvBool bMultiStreamTopology; + NvBool bFECEnabled; + NvBool bDisableEffBppSST8b10b; +} NV0073_CTRL_DP_IMP_LINK_CONFIGURATION; + +typedef struct NV0073_CTRL_DP_IMP_DSC_PARAMETERS { + NvU32 sliceCount; + NvU32 sliceWidth; + NvU32 sliceHeight; + NvU32 dscVersionMajor; + NvU32 dscVersionMinor; +} NV0073_CTRL_DP_IMP_DSC_PARAMETERS; + +typedef struct NV0073_CTRL_DP_IMP_MODESET_DATA { + NvU32 rasterWidth; + NvU32 rasterHeight; + NvU32 surfaceWidth; + NvU32 surfaceHeight; + NvU32 rasterBlankStartX; + NvU32 rasterBlankEndX; + NvU32 depth; + NvU32 twoChannelAudioHz; + NvU32 eightChannelAudioHz; + NvU32 pixelFrequencyKHz; + NvU32 bitsPerComponent; + NvU32 colorFormat; + NvBool bDSCEnabled; +} NV0073_CTRL_DP_IMP_MODESET_DATA; + +typedef struct NV0073_CTRL_DP_IMP_WATERMARK { + NvU32 waterMark; + NvU32 tuSize; + NvU32 minHBlank; + NvU32 hBlankSym; + NvU32 vBlankSym; + NvU32 effectiveBpp; + NvBool bIsModePossible; +} NV0073_CTRL_DP_IMP_WATERMARK; + +#define NV0073_CTRL_CMD_CALCULATE_DP_IMP_PARAMS_MESSAGE_ID (0x8CU) + +typedef struct NV0073_CTRL_CMD_CALCULATE_DP_IMP_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 headIndex; + NV0073_CTRL_DP_IMP_LINK_CONFIGURATION linkConfig; + NV0073_CTRL_DP_IMP_MODESET_DATA modesetInfo; + NV0073_CTRL_DP_IMP_DSC_PARAMETERS dscInfo; + NV0073_CTRL_DP_IMP_WATERMARK watermark; +} NV0073_CTRL_CMD_CALCULATE_DP_IMP_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_GET_CABLEID_INFO_FROM_MACRO + * + * This command is used to read cable ID Information from USB-C Cable for + * DP configuration purposes. + * + * subDeviceInstance [in] + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * + * displayId [in] + * This parameter specifies the ID of the DP display which owns + * the Main Link to be adjusted. The display ID must a DP display + * as determined with the NV0073_CTRL_CMD_SPECIFIC_GET_TYPE command. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_PARAMETER. + * + * cableIDInfo [out] + * This parameter reflects the result of the cable ID read from the cable + * + * Possible status values returned are: + * NV_ERR_INVALID_PARAMETER + * NV_ERR_NOT_SUPPORTED + * NV_OK + */ + +#define NV0073_CTRL_CMD_DP_GET_CABLEID_INFO_FROM_MACRO (0x73138dU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_USBC_CABLEID_INFO_PARAMS_MESSAGE_ID" */ + +typedef enum NV0073_CTRL_DP_USBC_CABLEID_CABLETYPE { + NV0073_CTRL_DP_USBC_CABLEID_CABLETYPE_UNKNOWN = 0, + NV0073_CTRL_DP_USBC_CABLEID_CABLETYPE_PASSIVE = 1, + NV0073_CTRL_DP_USBC_CABLEID_CABLETYPE_ACTIVE_RETIMER = 2, + NV0073_CTRL_DP_USBC_CABLEID_CABLETYPE_ACTIVE_LIN_REDRIVER = 3, + NV0073_CTRL_DP_USBC_CABLEID_CABLETYPE_OPTICAL = 4, +} NV0073_CTRL_DP_USBC_CABLEID_CABLETYPE; + +typedef struct NV0073_CTRL_DP_USBC_CABLEID_INFO { + NvBool uhbr10_0_capable; + NvBool uhbr13_5_capable; + NvBool uhbr20_0_capable; + NV0073_CTRL_DP_USBC_CABLEID_CABLETYPE type; + NvBool vconn_source; +} NV0073_CTRL_DP_USBC_CABLEID_INFO; + +#define NV0073_CTRL_DP_USBC_CABLEID_INFO_PARAMS_MESSAGE_ID (0x8DU) + +typedef struct NV0073_CTRL_DP_USBC_CABLEID_INFO_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NV0073_CTRL_DP_USBC_CABLEID_INFO cableIDInfo; +} NV0073_CTRL_DP_USBC_CABLEID_INFO_PARAMS; + +/* _ctrl0073dp_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h new file mode 100644 index 0000000..04aeead --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073dpu.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073event.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073event.h new file mode 100644 index 0000000..b02eda5 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073event.h @@ -0,0 +1,112 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073event.finn +// + +#include "ctrl/ctrl0073/ctrl0073base.h" + +/* +* tilemask +* Tiles assocaited with calc tiemout head. +* frameActive +* New frame has started on this Head. +* histoReady +* Asserts when histogram data from all the "enabled" LTMs belonging to this head +* is ready. +* startFrameReady +* When histo_ready interrupt +* is received and HDMA is programmed and then RISCV asserts STARTFRAME_READY. +*/ +typedef struct NV0073_LTM_HEAD_STATUS_PARAMS { + NvU8 tileMask; + NvBool frameActive; + NvBool histoReady; + NvBool startFrameReady; +} NV0073_LTM_HEAD_STATUS_PARAMS; + +/* NV04_DISPLAY_COMMON event-related control commands and parameters */ + +/* + * NV0073_CTRL_CMD_EVENT_SET_NOTIFICATION + * + * This command sets event notification state for the associated display + * object. This command requires that an instance of NV01_EVENT has been + * previously bound to the associated display object. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * hEvent + * This parameter specifies the handle of the NV01_EVENT instance + * to be bound to the given subDeviceInstance. + * event + * This parameter specifies the type of event to which the specified + * action is to be applied. This parameter must specify a valid + * NV0073_NOTIFIERS value (see cl0073.h for more details) and should + * not exceed one less NV0073_NOTIFIERS_MAXCOUNT. + * action + * This parameter specifies the desired event notification action. + * Valid notification actions include: + * NV0073_CTRL_SET_EVENT_NOTIFICATION_DISABLE + * This action disables event notification for the specified + * event for the associated subdevice object. + * NV0073_CTRL_SET_EVENT_NOTIFICATION_SINGLE + * This action enables single-shot event notification for the + * specified event for the associated subdevice object. + * NV0073_CTRL_SET_EVENT_NOTIFICATION_REPEAT + * This action enables repeated event notification for the specified + * event for the associated system controller object. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV0073_CTRL_CMD_EVENT_SET_NOTIFICATION (0x730301U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_EVENT_INTERFACE_ID << 8) | NV0073_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0073_CTRL_EVENT_SET_NOTIFICATION_PARAMS { + NvU32 subDeviceInstance; + NvHandle hEvent; + NvU32 event; + NvU32 action; +} NV0073_CTRL_EVENT_SET_NOTIFICATION_PARAMS; + +/* valid action values */ +#define NV0073_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE (0x00000000U) +#define NV0073_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE (0x00000001U) +#define NV0073_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002U) + +/* _ctrl0073event_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073internal.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073internal.h new file mode 100644 index 0000000..de1bfa2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073internal.h @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073internal.finn +// + +#include "ctrl/ctrl0073/ctrl0073base.h" +#include "ctrl/ctrl0073/ctrl0073system.h" +#include "ctrl/ctrl0073/ctrl0073dfp.h" + +#define NV0073_CTRL_CMD_INTERNAL_GET_HOTPLUG_UNPLUG_STATE (0x730401U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_INTERNAL_INTERFACE_ID << 8) | NV0073_CTRL_INTERNAL_GET_HOTPLUG_UNPLUG_STATE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_INTERNAL_GET_HOTPLUG_UNPLUG_STATE_PARAMS_MESSAGE_ID (0x1U) + +typedef NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS NV0073_CTRL_INTERNAL_GET_HOTPLUG_UNPLUG_STATE_PARAMS; + +#define NV0073_CTRL_CMD_INTERNAL_VRR_SET_RGLINE_ACTIVE (0x730402U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_INTERNAL_INTERFACE_ID << 8) | NV0073_CTRL_CMD_INTERNAL_VRR_SET_RGLINE_ACTIVE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_INTERNAL_VRR_SET_RGLINE_ACTIVE_PARAMS_MESSAGE_ID (0x2U) + +typedef NV0073_CTRL_CMD_SYSTEM_VRR_SET_RGLINE_ACTIVE_PARAMS NV0073_CTRL_CMD_INTERNAL_VRR_SET_RGLINE_ACTIVE_PARAMS; + +#define NV0073_CTRL_CMD_INTERNAL_DFP_SWITCH_DISP_MUX (0x730460U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_INTERNAL_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_INTERNAL_DFP_SWITCH_DISP_MUX_PARAMS_MESSAGE_ID (0x3U) + +typedef NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX_PARAMS NV0073_CTRL_CMD_INTERNAL_DFP_SWITCH_DISP_MUX_PARAMS; + +#define NV0073_CTRL_CMD_INTERNAL_DFP_GET_DISP_MUX_STATUS (0x730404U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_INTERNAL_INTERFACE_ID << 8) | NV0073_CTRL_CMD_INTERNAL_DFP_GET_DISP_MUX_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_INTERNAL_DFP_GET_DISP_MUX_STATUS_PARAMS_MESSAGE_ID (0x4U) + +typedef NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS NV0073_CTRL_CMD_INTERNAL_DFP_GET_DISP_MUX_STATUS_PARAMS; + +/* ctrl0073internal_h */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h new file mode 100644 index 0000000..aec32e0 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h @@ -0,0 +1,70 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073psr.finn +// + +#include "ctrl/ctrl0073/ctrl0073base.h" + + + +/* + * NV0073_CTRL_CMD_PSR_GET_SR_PANEL_INFO + * + * displayId + * Display ID on which this information is being requested. + * frameLockPin + * Returns the frame lock pin of the panel. + * i2cAddress + * Returns the i2c address on which the SR panel is attached. + * NOTE: applies only to LVDS panels, otherwise this field + * should be ignored. + * bSelfRefreshEnabled + * Returns whether SR is enabled in RM. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_PSR_GET_SR_PANEL_INFO (0x731602U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_PSR_INTERFACE_ID << 8) | NV0073_CTRL_PSR_GET_SR_PANEL_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_PSR_GET_SR_PANEL_INFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0073_CTRL_PSR_GET_SR_PANEL_INFO_PARAMS { + NvU32 displayId; + NvU32 frameLockPin; + NvU8 i2cAddress; + NvBool bSelfRefreshEnabled; +} NV0073_CTRL_PSR_GET_SR_PANEL_INFO_PARAMS; + + + +/* _ctrl0073psr_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h new file mode 100644 index 0000000..a9ef028 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h @@ -0,0 +1,3140 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073specific.finn +// + +#include "ctrl/ctrl0073/ctrl0073base.h" + +#include "ctrl/ctrlxxxx.h" +/* NV04_DISPLAY_COMMON display-specific control commands and parameters */ + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_TYPE + * + * This command can be used to determine the associated display type for + * the specified displayId. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the display + * type is to be returned. Only one display may be indicated in this + * parameter. + * displayType + * This parameter returns the display type associated with the specified + * displayId parameter. Valid displayType values are: + * NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT + * NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP + * NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_TV + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_TYPE (0x730240U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS_MESSAGE_ID (0x40U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 displayType; +} NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS; + +/* valid display types */ +#define NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_UNKNOWN (0x00000000U) +#define NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT (0x00000001U) +#define NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP (0x00000002U) +#define NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_TV (0x00000003U) + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2 + * + * This command can be used to request the EDID for the specified displayId. + * + * [in] subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the total + * number of subdevices within the parent device. This parameter should + * be set to zero for default behavior. + * [in] displayId + * This parameter specifies the display to read the EDID. The display ID + * must specify a display with a positive connect state as determined + * with the NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE command. Only one + * display may be indicated in this parameter. If a more than one + * display Id is used, the RM will return NV_ERR_INVALID_ARGUMENT. + * [out] bufferSize + * This parameter returns the number of bytes copied into edidBuffer after + * performing the requested EDID operations. + * [out] edidBuffer + * The array of EDIDs that RM will fill after the requested operations. If + * the size of the array is not large enough to hold the number of bytes to + * be copied, NV_ERR_INVALID_ARGUMENT will be returned. + * [in] flags + * This parameter defines the specific operations that will be performed + * in reading the EDID. + * Here are the current defined fields: + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_COPY_CACHE + * A client uses this field to indicate whether to return the cached + * copy of the EDID or to use DDC to read the EDID from the display. + * Possible values are: + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_COPY_CACHE_NO + * The RM will use DDC to grab the EDID. + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_COPY_CACHE_YES + * The RM will copy the last EDID found into the clients + * buffer. No DDC will be performed. + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_READ_MODE + * A client uses this field to indicate whether to read from + * the HW and return the EDID w/o any patching + * Possible values are: + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_READ_MODE_COOKED + * Use the _COPY_CACHE policy + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_READ_MODE_RAW + * Perform the read and return an unadulterated EDID. + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_DISPMUX_READ_MODE + * A client uses this field to indicate whether to read EDID + * from SBIOS using ACPI sub function for display dynamic switching + * feature. This flag should only be set on internal display with + * dynamic switching feature enabled. + * Possible values are: + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_DISPMUX_READ_MODE_ACPI + * RM reads the EDID from SBIOS and returns the raw EDID provided + * by SBIOS. + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_DISPMUX_READ_MODE_DEFAULT + * EDID is read based on rest of the 'flags' that are passed to + * this function. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2 (0x730245U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES 2048U + +#define NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS_MESSAGE_ID (0x45U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 bufferSize; + NvU32 flags; + NvU8 edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES]; +} NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS; + +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_COPY_CACHE 0:0 +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_COPY_CACHE_NO 0x00000000U +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_COPY_CACHE_YES 0x00000001U + +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_READ_MODE 1:1 +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_READ_MODE_COOKED 0x00000000U +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_READ_MODE_RAW 0x00000001U + +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_DISPMUX_READ_MODE 3:2 +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_DISPMUX_READ_MODE_DEFAULT 0x00000000U +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_DISPMUX_READ_MODE_ACPI 0x00000001U + +/* + * NV0073_CTRL_CMD_SPECIFIC_SET_EDID_V2 + * + * This command can be used to set or remove a complete EDID for the + * specified displayId. Once the EDID is set, any requests + * to read the EDID or use DDC detection will always use a cached copy of + * the EDID. That is, the EDID becomes static until disabled by calling + * this same function with edidBuffer. Note, that DDC based + * detection will always pass for any displayId that has set an EDID. Also, + * this path will not store any value across reboots. If an EDID needs to + * remain set after a reboot, RM clients must call this function again. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the total + * number of subdevices within the parent device. This parameter should + * be set to zero for default behavior. + * displayId + * This parameter specifies the display to store or the EDID. Only one + * display may be indicated in this parameter. If more than one displayId + * is used, the RM will return NV_ERR_INVALID_ARGUMENT. + * If the displayId does not use DDC and hence would not have an EDID, + * then the RM could also return NV_ERR_INVALID_ARGUMENT. + * bufferSize + * This parameter specifies the size of the EDID buffer pointed to by + * pEdidBuffer. If the EDID write contains more bytes than bufferSize, + * the RM will extend the bufferSize of the EDID inside the RM to match. + * Note a bufferSize of 0 would mean no bytes will be copied, but set the + * current cached EDID as static. + * edidBuffer + * This parameter specifies the EDID buffer that the RM will copy into + * the RM buffer. If the EDID buffer is empty, the RM will remove any + * previous set EDID and allow further detection and EDID reads to use DDC. + * The RM will not check to see if the EDID is valid here or not. + * The client should validate the EDID if needed before calling this function. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + */ + +#define NV0073_CTRL_CMD_SPECIFIC_SET_EDID_V2 (0x730246U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS_MESSAGE_ID (0x46U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 bufferSize; + NvU8 edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES]; +} NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE + * + * This Control Cmd is for providing the Faking device(s) support from RM. + * This command serves as the entry point for all interaction of RM with + * user mode component of the any internal [test] tool. The Faking framework + * in RM will be activated only after the usermode app sends in a proper ENABLE + * cmd first. Any attempt to issue other cmds while the faking code has not + * been enabled will result in RM err _INVALID_DATA. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the total + * number of subdevices within the parent device. This parameter should + * be set to zero for default behavior. + * cmd + * This field will carry the command to be executed by the framework. + * This includes Enabling/Disabling the test framework and faking devices + * like CRT/DVI/TV. + * data + * This field is to carry the data required for executing the cmd. + * Except for Enable and Disable, the other faking device commands will + * require the device mask of the device to be faked/removed. + * tvType + * This field specifies a specific TV type while faking a TV. + * Possible values are: + * NV0073_FAKE_DEVICE_TV_NONE + * NV0073_FAKE_DEVICE_TV_SVIDEO + * NV0073_FAKE_DEVICE_TV_COMPOSITE + * NV0073_FAKE_DEVICE_TV_COMPONENT + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_DATA + * + */ + +#define NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE (0x730243U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS_MESSAGE_ID" */ + +/* valid fake device TV connector types */ +#define NV0073_FAKE_DEVICE_TV_NONE (0U) +#define NV0073_FAKE_DEVICE_TV_SVIDEO (1U) +#define NV0073_FAKE_DEVICE_TV_COMPOSITE (2U) +#define NV0073_FAKE_DEVICE_TV_COMPONENT (3U) + +#define NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS_MESSAGE_ID (0x43U) + +typedef struct NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS { + NvU32 subDeviceInstance; + NvU32 cmd; + NvU32 data; + NvU32 tvType; +} NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS; + +/* Faking Support commands */ +/* some random value to enable/disable test code */ +#define NV0073_FAKE_DEVICE_SUPPORT_ENABLE 0x11faU +#define NV0073_FAKE_DEVICE_SUPPORT_DISABLE 0x99ceU +#define NV0073_FAKE_DEVICE_SUPPORT_ATTACH_DEVICES 0x100U +#define NV0073_FAKE_DEVICE_SUPPORT_REMOVE_DEVICES 0x101U + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_I2C_PORTID + * + * This command returns the I2C portID for the specified display device. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the display for which information is to be + * returned. Only one display may be indicated in this parameter. + * If more than one displayId is used a failing status of + * NV_ERR_INVALID_ARGUMENT will be returned. + * commPortId + * This parameter returns the I2C communication port ID of the + * display device indicated by the displayId parameter. + * ddcPortId + * This parameter returns the I2C DDC port ID of the display device + * indicated by the displayId parameter. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_SPECIFIC_GET_I2C_PORTID (0x730211U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS_MESSAGE_ID (0x11U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 commPortId; + NvU32 ddcPortId; +} NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS; + +#define NV0073_CTRL_SPECIFIC_I2C_PORT_NONE (0x0U) + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA + * + * This command can be used to get display connector data. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the display for which information is to be + * returned. Only one display may be indicated in this parameter. + * If more than one displayId is used a failing status of + * NV_ERR_INVALID_ARGUMENT will be returned. + * DDCPartners + * This parameter specifies an NV0073_DISPLAY_MASK value describing + * the set of displays that share the same DDC line as displayId. This + * parameter will always be returned even if we also return the + * NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_FLAGS_PRESENT_NO flag. + * flags + * This parameter specifies optional flags to be used while retrieving + * the connector data for a given displayId. + * Legal values for this parameter include: + * NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_FLAGS_PRESENT + * This flag describes whether the connector data is present + * inside the firmware. + * count + * This parameter returns the number of connectors associated with + * the displayId argument. This value indicates the number of +* valid entries returned in the data parameter. + * data + * This parameter returns an array of structures containing the connector + * data associated with each connector for the given displayId argument. + * The count field specifies how many entries in this array are returned. + * Each entry in the array contains the following members: + * index + * This value is the index associated with the given connector. If + * two displayIds share the same index, then they share the same + * connector. + * type + * This value defines the type of connector associated with the + * displayId argument. + * location + * This value provides a possible means to determine the relative + * location of the connector in association to other connectors. + * For desktop boards, a value of zero defines the south most + * connector (the connector closest to the bus slot into which + * the board is inserted). + * platform + * This value defines the type of system with which to associate the + * location of each connector. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + */ + +#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */ + +/* maximum number of connectors */ +#define NV0073_CTRL_MAX_CONNECTORS 4U + +#define NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID (0x50U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 DDCPartners; + NvU32 count; + struct { + NvU32 index; + NvU32 type; + NvU32 location; + } data[NV0073_CTRL_MAX_CONNECTORS]; + NvU32 platform; +} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS; + +/* defines for the flags field */ +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_FLAGS_PRESENT 0:0 +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_FLAGS_PRESENT_NO 0x00000000U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_FLAGS_PRESENT_YES 0x00000001U + +/* defines for the data[].type field */ +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_VGA_15_PIN 0x00000000U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_TV_COMPOSITE 0x00000010U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_TV_SVIDEO 0x00000011U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_TV_HDTV_COMPONENT 0x00000013U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_TV_SCART 0x00000014U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_TV_COMPOSITE_SCART_OVER_EIAJ4120_BLUE 0x00000016U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_TV_HDTV_EIAJ4120 0x00000017U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_PC_POD_HDTV_YPRPB 0x00000018U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_PC_POD_SVIDEO 0x00000019U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_PC_POD_COMPOSITE 0x0000001AU +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I_TV_SVIDEO 0x00000020U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I_TV_COMPOSITE 0x00000021U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I 0x00000030U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_D 0x00000031U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_ADC 0x00000032U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DVI_I_1 0x00000038U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DVI_I_2 0x00000039U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_SPWG 0x00000040U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_OEM 0x00000041U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_EXT 0x00000046U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_INT 0x00000047U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_MINI_EXT 0x00000048U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_SERIALIZER 0x00000049U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_HDMI_A 0x00000061U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_HDMI_C_MINI 0x00000063U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DP_1 0x00000064U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DP_2 0x00000065U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_VIRTUAL_WFD 0x00000070U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_USB_C 0x00000071U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DSI 0x00000072U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_STEREO_3PIN_DIN 0x00000073U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_USB_C_UHBR 0x00000074U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_UNKNOWN 0xFFFFFFFFU + +/* defines for the platform field */ +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_DEFAULT_ADD_IN_CARD 0x00000000U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_TWO_PLATE_ADD_IN_CARD 0x00000001U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_CONFIGURABLE 0x00000002U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_DESKTOP_FULL_DP 0x00000007U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_MOBILE_ADD_IN_CARD 0x00000008U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_MXM 0x00000009U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_MOBILE_BACK 0x00000010U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_MOBILE_BACK_LEFT 0x00000011U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_MOBILE_BACK_DOCK 0x00000018U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_CRUSH_DEFAULT 0x00000020U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_UNKNOWN 0xFFFFFFFFU + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_HDCP_REPEATER_INFO + * + * This command is used to get HDCP repeater information. From the + * repeater device this call returns KsvList,BStatus and VPrime. If the + * device is implemented internally, the client call supply a Cksv and Cn + * And in turn following parameters are returned: MPrime, Dksv which are used + * for upstream authentication. In addition to this the flag bAuthRequired + * shall be set to indicate that upstream authentication is required along with + * comparing the KsvList with SRM. On the other hand, if the device is an + * external implementation MPrime and Dksv values shall be ignored and the flag + * bAuthRequired will not be set indicating upstream authentication not + * required and just the comparison of KsvList with upstream SRM is required. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter indicates the digital display device's + * displayId. This comes as input to this command. + * bRepeaterPending + * The repeater pending flag as the output to this command. + * The flag returned indicating whether repeater is ready or not + * TRUE if repeater is not ready and FALSE otherwise. + * Cn + * This parameter is the input value Cn a 64 bit random number + * to be provided to this command. Cn value is Upstream protocol's + * exchange random number.This value must be written by software + * before the KSV is written if the transmitter device follows the HDCP + * upstream protocol. If the transmitter supports a proprietary method of + * authenticating the repeater device, Cn can be ignored. + * Cksv + * This parameter is the input value Cksv (a unique identifier) of 40 bit + * size to be provided to this command. This input value shall contain 20 + * ones and 20 zeros in accordance with the HDCP specification. This value + * must be written by software before the KSV is written if the transmitter + * device follows the HDCP upstream protocol. If the transmitter supports + * a proprietary method of authenticating the repeater device, Cksv can be + * ignored. + * actualKsvSize + * The actual KSV list size(in bytes) returned back as output while reading + * KSV list. + * ksvList + * In case downstream device is repeater then this will give the list of + * KSV's of all downstream devices attached to the repeater. It differs + * from actualKsvSize because this allocates maximum allowed size. + * If downstream device is receiver then this array will contain all zeros. + * BStatus + * The BSTATUS value as output by this command.This value's bit fields + * contains information returned by repeater device such as total number of + * downstream devices attached to the repeater excluding HDCP repeater, + * value for the depth indicating number of connection levels through + * connection topology, this value also gives information about maximum + * cascaded and devices exceeded (127). + * VPrime + * The VPRIME value returned as output by this command from the repeater + * device. This value should be used to compare the verification value + * during the HDCP upstream protocol using SHA1 in accordance with the + * upstream protocol. This value can be ignored if bAuthRequired is not + * set indicating the verification is done by the transmitter device. + * bAuthrequired + * The authentication flag as the output to this command. + * The Flag returned indicating whether authentication is required or not + * TRUE if authentication required and FALSE otherwise. + * MPrime + * The MPRIME value returned as output by this command. + * MPrime shall be decrypted by the client and used in the SHA-1 + * computation of V during upstream authentication. This value can be + * ignored if bAuthRequired is not set indicating the verification is + * done by the transmitter device. + * Dksv + * This parameter is the output value DKSV of 40 bit size. + * As per the HDCP specification this value should contain 20 ones and + * 20 zeros.This value can be ignored if bAuthRequired is not set + * indicating the verification is done by the transmitter device. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_REP_KSV_SIZE (5U) +#define NV0073_CTRL_DKSV_SIZE (5U) +#define NV0073_CTRL_MPRIME_SIZE (0x2U) /* finn: Evaluated from "(8 / 4)" */ +#define NV0073_CTRL_CN_SIZE (0x2U) /* finn: Evaluated from "(8 / 4)" */ +#define NV0073_CTRL_CKSV_SIZE (5U) +#define NV0073_CTRL_VPRIME_SIZE (0x5U) /* finn: Evaluated from "(20 / 4)" */ + +#define NV0073_CTRL_MAX_HDCP_REPEATER_COUNT (0x27bU) /* finn: Evaluated from "(NV0073_CTRL_REP_KSV_SIZE * 127)" */ + +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDCP_REPEATER_INFO (0x730260U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_HDCP_REPEATER_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_HDCP_REPEATER_INFO_PARAMS_MESSAGE_ID (0x60U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_HDCP_REPEATER_INFO_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU8 bRepeaterPending; + NvU32 Cn[NV0073_CTRL_CN_SIZE]; + NvU8 Cksv[NV0073_CTRL_CKSV_SIZE]; + NvU32 actualKsvSize; + NvU8 ksvList[NV0073_CTRL_MAX_HDCP_REPEATER_COUNT]; + NvU16 BStatus; + NvU32 VPrime[NV0073_CTRL_VPRIME_SIZE]; + NvU8 bAuthrequired; + NvU32 MPrime[NV0073_CTRL_MPRIME_SIZE]; + NvU8 Dksv[NV0073_CTRL_DKSV_SIZE]; +} NV0073_CTRL_SPECIFIC_GET_HDCP_REPEATER_INFO_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE + * + * This command is used to signal the resource manager that the upcoming mode + * shall be hdmi vs dvi. This is required since the resource manager + * does not read the display edid. The resource manager shall enable hdmi + * components such as turning on the audio engine for instance. This should + * be called prior to every modeset in which the displayId is capable of hdmi. + * displayId + * This parameter specifies the displayId of HDMI resource to configure. + * This comes as input to this command. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which operation should be directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * enable + * This field specifies the legal values: + * NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_TRUE + * NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_FALSE + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE (0x730273U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS_MESSAGE_ID (0x73U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS { + NvU8 subDeviceInstance; + NvU32 displayId; + NvU8 enable; +} NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS; + +#define NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_FALSE (0x00000000U) +#define NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_TRUE (0x00000001U) + +/* + * NV0073_CTRL_CMD_SPECIFIC_CTRL_HDMI + * + * This command can be used to enable HDMI communication on the associated GPU. + * This should be called prior to every modeset in which the displayId is capable of HDMI. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which operation should be directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * displayId + * This parameter specifies the displayId of HDMI resource to configure. + * This comes as input to this command. + * enable + * This field specifies the legal values: + * NV0073_CTRL_SPECIFIC_CTRL_HDMI_DISABLE + * NV0073_CTRL_SPECIFIC_CTRL_HDMI_ENABLE + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_CTRL_HDMI (0x730274U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS_MESSAGE_ID (0x74U) + +typedef struct NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS { + NvU8 subDeviceInstance; + NvU32 displayId; + NvBool bEnable; +} NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS; + +#define NV0073_CTRL_SPECIFIC_CTRL_HDMI_DISABLE (0x00000000U) +#define NV0073_CTRL_SPECIFIC_CTRL_HDMI_ENABLE (0x00000001U) + +/* + * NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM + * + * This command is used to signal the resource manager that the audio stream + * is to be mute off or on. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which operation should be directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * displayId + * This parameter specifies the displayId of HDMI resource to configure. + * This comes as input to this command. + * mute + * This field specifies the legal values: + * NV0073_CTRL_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_TRUE + * NV0073_CTRL_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_FALSE + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM (0x730275U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID (0x75U) + +typedef struct NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS { + NvU8 subDeviceInstance; + NvU32 displayId; + NvU8 mute; +} NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS; + +#define NV0073_CTRL_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_FALSE (0x00000000U) +#define NV0073_CTRL_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_TRUE (0x00000001U) + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_HDCP_STATE + * + * This command is used to obtain that state of hdcp for the specified attach + * point (that being the displayId). + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the display for which information is to be + * returned. Only one display may be indicated in this parameter. + * If more than one displayId is used a failing status of + * NV_ERR_INVALID_ARGUMENT will be returned. + * flags + * This parameter specifies the state of the attach point resource. + * Here are the current defined fields: + * NV0073_CTRL_SPECIFIC_HDCP_STATE_ENCRYPTING + * This comes as an output to this command. The attach point + * is currently encrypting hdcp content over the attach point + * link. This state of this field is determined by + * NV0073_CTRL_SPECIFIC_HDCP_STATE_ENCRYPTING_CACHED. The + * default is to return cached hdcp state. + * NV0073_CTRL_SPECIFIC_HDCP_STATE_ENCRYPTING_CACHED + * This comes as an input to this command. If set to 1, the return + * value in NV0073_CTRL_SPECIFIC_HDCP_STATE_ENCRYPTING is based + * on the Status Word. If the uncached hdcp state fails, + * such as the case for external hdcp designs that do not support + * Upstream Status register, then if the flag + * NV0073_CTRL_SPECIFIC_HDCP_STATE_ENCRYPTING_CACHED is set, RM + * will unclear it and return the cached value instead. + * NV0073_CTRL_SPECIFIC_HDCP_STATE_AP_CAPABLE + * This comes as an output to this command. + * This bit indicates that the attach point resource is capable + * of hdcp encryption. + * NV0073_CTRL_SPECIFIC_HDCP_STATE_AP_DUAL_LINK_CAPABLE + * This comes as an output to this command. + * This bit indicates that the attach point resource is capable + * of hdcp encryption in a dual-link configuration. + * NV0073_CTRL_SPECIFIC_HDCP_STATE_AP_DISALLOWED + * This bit indicates that the attach point resource should not + * have HDCP available even if capable. + * NV0073_CTRL_SPECIFIC_HDCP_STATE_RECEIVER_CAPABLE + * This comes as an output to this command. + * This bit indicates that the receiver attached to this attach point + * resource is capable of hdcp encryption. + * NV0073_CTRL_SPECIFIC_HDCP_STATE_REPEATER_CAPABLE + * This comes as an output to this command. + * This bit indicates that the receiver attached to this attach point + * resource is capable of hdcp repeater operation. + * NV0073_CTRL_SPECIFIC_HDCP_STATE_FP_INTERNAL + * This comes as output to this command. + * This bit indicates that the associated display is an HDCP-capable + * internal panel. + * NV0073_CTRL_SPECIFIC_HDCP_STATE_HDCP22_RECEIVER_CAPABLE + * This comes as output to this command. + * This bit indicates that the receiver attached to this attach point + * resource is capable of hdcp22 encryption. + * NV0073_CTRL_SPECIFIC_HDCP_STATE_HDCP22_REPEATER_CAPABLE + * This comes as output to this command. + * This bit indicates that the receiver attached to this attach point + * resource is capable of hdcp22 repeater operation. + * NV0073_CTRL_SPECIFIC_HDCP_STATE_HDCP22_ENCRYPTING + * This comes as an output to this command. The attach point + * is currently encrypting hdcp22 content over the attach point + * link. This state of this field is determined by + * NV0073_CTRL_SPECIFIC_HDCP_STATE_ENCRYPTING_CACHED, return hdcp22 uncached + * status by default. + * NV0073_CTRL_SPECIFIC_HDCP_STATE_HDCP22_TYPE1 + * This comes as an output to this command. The attach point + * is currently encrypting hdcp22 content with stream Type 1 over the + * link. This state of this field is determined by + * NV0073_CTRL_SPECIFIC_HDCP_STATE_ENCRYPTING_CACHED, return hdcp22 uncached + * status of stream type by default. + * NV0073_CTRL_SPECIFIC_HDCP_STATE_AUTHENTICATED + * This comes an output to this command. + * This bit indicates if the receiver attached to this attach point + * completes authenticaion with source or not. To non DP-MST receiver, the state + * should be identical to NV0073_CTRL_SPECIFIC_HDCP_STATE_HDCP22_ENCRYPTING, while + * DP MST needs to assign ECF after authenticated. + * NV0073_CTRL_SPECIFIC_HDCP_STATE_AP_HDCP22_CAPABLE + * This comes as an output to this command. + * This bit indicates that the attach point resource is capable + * of hdcp2.2 encryption. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDCP_STATE (0x730280U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_HDCP_STATE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_HDCP_STATE_PARAMS_MESSAGE_ID (0x80U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_HDCP_STATE_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; +} NV0073_CTRL_SPECIFIC_GET_HDCP_STATE_PARAMS; + +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_ENCRYPTING 0:0 +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_ENCRYPTING_NO (0x00000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_ENCRYPTING_YES (0x00000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_ENCRYPTING_CACHED 1:1 +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_ENCRYPTING_CACHED_TRUE (0x00000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_ENCRYPTING_CACHED_FALSE (0x00000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_HDCP22_ENCRYPTION_INPROGRESS 2:2 +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_HDCP22_ENCRYPTION_INPROGRESS_YES (0x00000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_HDCP22_ENCRYPTION_INPROGRESS_NO (0x00000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_AP_CAPABLE 4:4 +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_AP_CAPABLE_NO (0x00000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_AP_CAPABLE_YES (0x00000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_AP_DUAL_LINK_CAPABLE 5:5 +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_AP_DUAL_LINK_CAPABLE_NO (0x00000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_AP_DUAL_LINK_CAPABLE_YES (0x00000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_AP_DISALLOWED 6:6 +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_AP_DISALLOWED_NO (0x00000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_AP_DISALLOWED_YES (0x00000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_RECEIVER_CAPABLE 8:8 +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_RECEIVER_CAPABLE_NO (0x00000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_RECEIVER_CAPABLE_YES (0x00000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_REPEATER_CAPABLE 9:9 +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_REPEATER_CAPABLE_NO (0x00000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_REPEATER_CAPABLE_YES (0x00000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_FP_INTERNAL 10:10 +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_FP_INTERNAL_FALSE (0x00000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_FP_INTERNAL_TRUE (0x00000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_HDCP22_RECEIVER_CAPABLE 11:11 +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_HDCP22_RECEIVER_CAPABLE_NO (0x00000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_HDCP22_RECEIVER_CAPABLE_YES (0x00000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_HDCP22_REPEATER_CAPABLE 12:12 +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_HDCP22_REPEATER_CAPABLE_NO (0x00000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_HDCP22_REPEATER_CAPABLE_YES (0x00000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_HDCP22_ENCRYPTING 13:13 +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_HDCP22_ENCRYPTING_NO (0x00000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_HDCP22_ENCRYPTING_YES (0x00000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_HDCP22_TYPE1 14:14 +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_HDCP22_TYPE1_NO (0x00000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_HDCP22_TYPE1_YES (0x00000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_AUTHENTICATED 15:15 +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_AUTHENTICATED_NO (0x00000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_AUTHENTICATED_YES (0x00000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_AP_HDCP22_CAPABLE 16:16 +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_AP_HDCP22_CAPABLE_NO (0x00000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_STATE_AP_HDCP22_CAPABLE_YES (0x00000001U) + +/* + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_INFO + * + * This structure describes stStatus information. + * + * displayId + * This parameter returns the displayId associated with the + * attach point index. + * S + * Each element contains the attach-point S. This value's bit + * field contains information pertaining to STATUS of each attach point. + * The stStatus parameter is broken down as follows: + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_ENCRYPTING + * This field specifies that the attach-point is transmitting and + * has output encryption enabled. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_REPEATER + * This field specifies that the attach-point is transmitting to a + * repeater device. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_USER_ACCESSIBLE + * This field specifies that the attach-point is transmitting on a + * user-accessible external digital port. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_EXTERNALLY_UNPROTECTED + * This field specifies that the attach-point is transmitting + * externally and is unprotected. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_ATTACH_PORT_INDEX + * This field specifies the port/attach-point index. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_NUM_PORTS + * This field specifies the number of connectable attach-ports. + * The default is 8. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_INTERNAL_PANEL + * This field specifies a compliant internal/non-user accessible + * port panel without hdcp encryption. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_WIDE_SCOPE + * This field specifies _CS is not enough to determine the presence + * of non-compliant outputs (this field is always 1). + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_CS_CAPABLE + * This field specifies that connection-state is supported. + * This field is always 1. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_READZ_CAPABLE + * This field specifies that readZ is supported. + * This field is always 0. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_DUAL_LINK_EVEN + * This field specifies the even half of a dual-link (0x74). + * This field *NOT* yet supported. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_DUAL_LINK_ODD + * This field specifies the odd half of a dual-link (0x76) + * This field *NOT* yet supported. + */ +typedef struct NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_INFO { + NvU32 displayId; + NV_DECLARE_ALIGNED(NvU64 S, 8); +} NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_INFO; + +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_ENCRYPTING 0:0 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_REPEATER 1:1 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_USER_ACCESSIBLE 2:2 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_EXTERNALLY_UNPROTECTED 3:3 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_ATTACH_PORT_INDEX 7:4 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_NUM_PORTS 11:8 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_INTERNAL_PANEL 12:12 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_WIDE_SCOPE 13:13 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_CS_CAPABLE 14:14 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_READZ_CAPABLE 15:15 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_RESERVED0 39:16 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_DUAL_LINK_EVEN 40:40 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_S_DUAL_LINK_ODD 41:41 + + +/* + * NV0073_CTRL_CMD_SPECIFIC_HDCP_CTRL + * + * This command is used to do HDCP controls on the specified attach + * point (that being the displayId). + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the display for which information is to be + * returned. Only one display may be indicated in this parameter. + * If more than one displayId is used a failing status of + * NV_ERR_INVALID_ARGUMENT will be returned. + * err + * This parameter specifies provides info regarding the outcome + * of this calling control call. If zero, no errors were found. + * Otherwise, this parameter will specify the error detected. + * The valid parameter is broken down as follows: + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_ERR_UNSUCCESSFUL + * If set to _YES, this indicates at least one of the calling + * functions failed. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_ERR_PENDING + * If set to _YES, this indicates at renogiation is not complete and + * that the client should check status later. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_ERR_BAD_TOKEN_TYPE + * If set to _YES, the session ID or KSV was rejected. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_ERR_LINK_FAILED + * If set to _YES, renogiation could not complete. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_ERR_INVALID_PARAMETER + * If set to _YES, one or more of the calling parameters was invalid. + * cmd + * This parameter specifies a bitmask of the legal defined fields. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_NULL + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_RENEGOTIATE + * This command forces the specified displayId to renegotiate the + * hdcp link. The client should supply as an input, + * cN and cKsv. On return, bStatus, stStatus and cS is returned. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_DISABLE_AUTHENTICATION + * This command forces the specified displayId to off authentication the + * hdcp link. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_READ_LINK_STATUS + * This command reads the status of the cipher returning a signed + * S (ie: kP) and cS for the requested displayId, as well as + * the relevant parameters necessary for the client to verify + * the information provided. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_VALIDATE_LINK + * This command returns the parameters necessary to validiate the + * links for the displayId. The client should supply as input + * cN and cKsv. On return, bStatus, cS, stStatus, aN, numBksvs, + * bKsvList, vP, mP, and dKsv are returned. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_QUERY_HEAD_CONFIG + * This command enumerates ports attached to a head. + * On input, Cn and cKsv should be provided and on return + * bSTatus, status, and connection state is returned. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_ABORT_AUTHENTICATION + * This command causes the specified AP to abort authentication + * protocol after KSV list is read, or during next time it's + * renegotiated. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_GET_ALL_FLAGS + * This command provides all possible valid device data. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_GET_SPECIFIED_FLAGS + * This command provides data specified by flags field set by the + * client. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_FORWARD_PENDING_KSVLIST_READY + * This command provides client to tell there's pending Hdcp1X KsvList + * Ready notification at BStatus read. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_READ_LINK_STATUS_NO_DISPLAY + * This command reads the status of the cipher returning a signed + * S (ie: kP) and cS and the relevant parameters necessary for the client to + * verify upstream. + * flags + * This parameter specifies a bitmask of the legal defined fields and the + * reason AbortAuthentication. + * The client shall set the desired fields and on return if valid, + * the resource manager will set which flags are actually valid. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_BCAPS_PRESENT + * IN: Request hdcp receiver bcaps register state. + * OUT: Bcaps parameter contains valid receiver bcaps register data. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_BSTATUS_PRESENT + * IN: Request hdcp receiver bstatus register state. + * OUT: BStatus parameter contains valid receiver bstatus register data + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_BKSV_PRESENT + * IN: Request hdcp receiver key selection vector: BKSV + * Driver will read BKSV from receiver and update RM states if + * the cmd is _GET_SPECIFIED_FLAGS and _KP_PRESENT is unset. + * Otherwise, driver returns cached BKSV. + * OUT: Bksv parameter contains valid receiver bksv data. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_BKSV_LIST_PRESENT + * IN: Request list of downstream BKSV from repeater + * OUT: BksvList parameter contains valid data. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_DKSV_PRESENT + * IN: Request hdcp transmitter key selection vector: DKSV + * OUT: Dksv parameter contains valid receiver DKSV data. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_AN_PRESENT + * IN: Request hdcp parameter An + * OUT: An parameter contains valid data + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_AKSV_PRESENT + * IN: Request hdcp transmitter downstream key selection vector: AKSV + * OUT: Aksv parameter contains valid receiver Aksv data. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_VP_PRESENT + * IN: Request VPrime data + * OUT: VPrime parameter contains valid data + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_MP_PRESENT + * IN: Request MPrime data used for repeater authentication + * OUT: MPrime parameter contains valid data + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_KP_PRESENT + * IN: Request SPrime data + * OUT: SPrime parameter contains valid data + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_IMPLICIT_HEAD + * IN: The head to use if no legal head could be located. + * Use NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_IMPLICIT_HEAD_NONE + * if no implicit head should be used. + * OUT: n/a + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_FORCE_REAUTH + * IN: Request to execute authentication protocol even encryption + * enabled. + * OUT: n/a + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_RXIDMSG_PENDING + * IN: Request to execute repeater authentication protocol with pending + * ID List message information. + * OUT: n/a + * Reason of AbortAuthentication. + * When pass in by client, it indicates the reason why client issue an + * Abort. When return by RM, it indicates the reason of last successful + * Abort. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_ABORT_NONE + * Default value + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_ABORT_UNTRUST + * Abort due to Kp mismatch + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_ABORT_UNRELBL + * Abort due to repeated link failure + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_ABORT_KSV_LEN + * Abort due to KSV length + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_ABORT_KSV_SIG + * Abort due to KSV signature + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_ABORT_SRM_SIG + * Abort due to SRM signature + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_ABORT_SRM_REV + * Abort due to SRM revocation + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_ABORT_NORDY + * Abort due to repeater not ready + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_ABORT_KSVTOP + * Abort due to KSV topology error + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_ABORT_BADBKSV + * Abort due to invalid Bksv + * linkCount + * This parameter specifies how many links are valid. This is important + * when determining AKSV, BKSV, AN, etc... and is an output to this + * command. + * apIndex + * Each element of this parameter specifies the hardware attach-point index + * for the requested displayId. The 2nd element is only valid in the case + * where the resource output is capable of dual-link determined when the + * linkCount is greater than 1. + * cN + * This parameter is the input value Cn a 64 bit random number + * to be provided to this command. Cn value is Upstream protocol's + * exchange random number.This value must be written by software + * before the KSV is written if the transmitter device follows the HDCP + * upstream protocol. + * cKsv + * This parameter is the input value Cksv (a unique identifier) of 40 bit + * size to be provided to this command. This input value shall contain 20 + * ones and 20 zeros in accordance with the HDCP specification. This value + * must be written by software before the KSV is written if the transmitter + * device follows the HDCP upstream protocol. + * aN + * Each element of this buffer specifies the output value aN, + * a 64 bit random number used during hdcp authentication and validating + * the upstream link in which case only the first 40 bits are used. + * The 2nd element is only valid in the case where the resource output is + * capable of dual-link determined when the linkCount is greater than 1. + * aKsv + * Each element of this buffer specifies output value aKsv of 40 bit size. + * As per the HDCP specification this value should contain 20 ones and + * 20 zeros. + * The 2nd element is only valid in the case where the resource output is + * capable of dual-link determined when the linkCount is greater than 1. + * bStatus + * Each element contains the attach-point bStatus data returned by the + * repeater/receiver device (if valid). The bStatus value is an output + * by this command. For HDCP on DP, bInfo is the one we should look at. + * bInfo is defined exactly the same with bStatus on DVI. Hal is taking + * care of the difference here. + * This bStatus info is broken down as follows: + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_BSTATUS_DEVICE_COUNT + * Specifies the total number of receivers excluding repeater. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_BSTATUS_MAX_DEVICES_EXCEEDED + * Specifies a topology error in which greater than 127 devices are + * detected in the overall hdcp configuration. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_BSTATUS_REPEATER_DEPTH + * Specifies the repeater depth. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_BSTATUS_MAX_CASCADE_EXCEEDED + * Specifies a topology error in which greater than 7 levels are + * detected in the overall hdcp configuration. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDMI_MODE + * Specifies that the hdcp receiver is in HDMI mode. + * bCaps + * The BCAPS value is an output by this command. This value's bit fields + * contains information returned by receiver device. Bcaps can be used + * to determine if receiver is a repeater and when the ksvlist and vprime + * data is ready. + * The BCAPS is defined different in the spec of HDCP on DP. It's been + * split to BCAPS and BSTATUS. Here we'll end a flag to indicate + * the client if it's a DP. + * The bCaps parameter is broken down as follows based on HDCP spec 1.1: + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_BCAPS_FAST_REAUTHENTICATION + * This field when set to 1, specifies the receiver is capable of + * receiving(unencrypted) video signal during session + * re-authentication. All HDMI capable receivers shall be capable of + * performing the fast authentication even if this bit is not set. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_BCAPS_EESS_1_1 + * This field when set to 1, specifies this HDCP receiver supports + * Enhanced Encryption Status Signaling (EESS), Advance cipher, and + * Enhanced Link Verification options. For the HDMI protocol, EESS + * capability is assumed regardless of this bit setting. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_BCAPS_READY_KSV_FIFO + * This field when set to 1, specifies this HDCP repeater has built + * the list of attached KSVs and computed the verification value V'. + * This value is always zero during the computation of V'. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_BCAPS_FAST + * This field when set to 1, specifies this device supports 400khz + * transfers. When zero, 100 khz is the maximum transfer rate + * supported. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_BCAPS_REPEATER + * This field when set to 1, specifies the HDCP receiver supports + * downstream connections as permitted by Digital Content + * Protection LLC licence. This bit does not change while the HDCP + * receiver is active. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_BCAPS_HDMI_RESERVED + * This field is reserved and HDCP receivers not capable of + * support HDMI must clear this bit to 0. + * stStatus + * This parameter specifies the attach point stStatus. See + * the description of NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_INFO for + * details on stStatus information. + * cS + * This parameter provides the connection-state for the status of + * all port/attach-points on this head. + * The connection-state is broken down as follows: + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_CS_ATTACH_POINTS + * This field specifies the transmitting attach-points. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_CS_NON_HDCP + * This field specifies the transmitting attach-points. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_CS_HEAD_INDEX + * This field specifies the index of the head. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_CS_RFUPLANES + * This field specifies the RFUPLANES. + * This field *NOT* yet supported. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_CS_NUM_ACTIVE_HEADS + * This field specifies the number of heads - 1. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_CS_ATTACH_PLANES + * This field specifies attach planes. + * This field *NOT* yet supported. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_CS_ENCRYPTING + * This field is supported on chips with Display IP 0501 and later. + * NV0073_CTRL_SPECIFIC_HDCP_CTRL_CS_SPAN_MODE + * This field specifies dual-display span mode. + * This field *NOT* yet supported. + * bKsvList + * In case downstream device is repeater then this will give the list of + * KSV's of all downstream devices attached to the repeater. It differs + * from actualKsvSize because this allocates maximum allowed size. + * If downstream device is receiver then this array will contain all zeros. + * numBksvs + * Total number of Bksv from all downstream devices in the bKsvList. + * This info can also be obtained via bStatus. + * vP + * The VPRIME value returned as output by this command from the repeater + * device. This value should be used to compare the verification value + * during the HDCP upstream protocol using SHA1 in accordance with the + * upstream protocol. This value can be ignored if bAuthRequired is not + * set indicating the verification is done by the transmitter device. + * kP + * The KP value is returned as an output by this command. This + * parameter is the signature computed by hardware and the client + * should compute the signature to compare this value. + * The 2nd element is only valid in the case where the resource output is + * capable of dual-link determined when the linkCount is greater than 1. + * mP + * The MPRIME value returned as output by this command. + * MPrime shall be decrypted by the client and used in the SHA-1 + * computation of V during upstream authentication. This value can be + * ignored if bAuthRequired is not set indicating the verification is + * done by the transmitter device. + * dKsv + * Each element of this buffer is the output value DKSV of 40 bit size. + * As per the HDCP specification this value should contain 20 ones and + * 20 zeros. + * The 2nd element is only valid in the case where the resource output is + * capable of dual-link determined when the linkCount is greater than 1. + * streamIndex + * Each content stream is assigned an index value bye upstream client. + * HDMI: The index must be 0. + * DP SST: The index must be 0. + * DP MST: Assigned stream index. + * streamType + * Each content stream is assigned a type value by the upstream content + * control function. + * 0x00: Type 0 content stream. May be transimtted by the HDCP repeater to + * all HDCP devices. + * 0x01: Type 1 content stream. Must not be transmitted by the HDCP + * repeater to HDCP 1.x-compliant devices and HDCP 2.0-compliant + * repeaters. + * 0x02-0xFF: reserved for futures use only. + * bEnforceType0Hdcp1xDS + * If this bit is set, DPU enforces Type0 if it finds Hdcp1x monitor downstream + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_SPECIFIC_HDCP_CTRL (0x730282U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_HDCP_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_HDCP_LINK_COUNT (0x0000002U) +#define NV0073_CTRL_HDCP_VPRIME_SIZE (0x0000014U) +#define NV0073_CTRL_HDCP_MAX_DEVICE_COUNT (0x00000FFU) +#define NV0073_CTRL_HDCP_KSV_SIZE (0x0000005U) +#define NV0073_CTRL_HDCP_MAX_NUM_APS (0x0000010U) + +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_PARAMS_MESSAGE_ID (0x82U) + +typedef struct NV0073_CTRL_SPECIFIC_HDCP_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + + NvU32 err; + + NvU32 cmd; + NvU32 flags; + NvU8 linkCount; + + NV_DECLARE_ALIGNED(NvU64 cN, 8); + NV_DECLARE_ALIGNED(NvU64 cKsv, 8); + + NvU32 apIndex[NV0073_CTRL_HDCP_LINK_COUNT]; + + NV_DECLARE_ALIGNED(NvU64 aN[NV0073_CTRL_HDCP_LINK_COUNT], 8); + NV_DECLARE_ALIGNED(NvU64 aKsv[NV0073_CTRL_HDCP_LINK_COUNT], 8); + + NvU32 bStatus[NV0073_CTRL_HDCP_MAX_NUM_APS]; + NvU32 bCaps[NV0073_CTRL_HDCP_MAX_NUM_APS]; + + NV_DECLARE_ALIGNED(NV0073_CTRL_SPECIFIC_HDCP_CTRL_STATUS_INFO stStatus[NV0073_CTRL_HDCP_MAX_NUM_APS], 8); + + NV_DECLARE_ALIGNED(NvU64 cS, 8); + + NV_DECLARE_ALIGNED(NvU64 bKsv[NV0073_CTRL_HDCP_LINK_COUNT], 8); + NV_DECLARE_ALIGNED(NvU64 bKsvList[NV0073_CTRL_HDCP_MAX_DEVICE_COUNT], 8); + NvU32 numBksvs; + + NvU8 vP[NV0073_CTRL_HDCP_VPRIME_SIZE]; + NV_DECLARE_ALIGNED(NvU64 kP[NV0073_CTRL_HDCP_LINK_COUNT], 8); + + NV_DECLARE_ALIGNED(NvU64 mP, 8); + NV_DECLARE_ALIGNED(NvU64 dKsv[NV0073_CTRL_HDCP_LINK_COUNT], 8); + + NvU32 streamIndex; + NvU8 streamType; + NvBool bEnforceType0Hdcp1xDS; + + NvBool bPendingKsvListReady; + + NvBool isHdcpCapable; + NvBool isHdcpAuthOn; + NvBool isHdcpRp; + NvBool isHdcp2X; + NvBool bMaxCascadeExceeded; + NvBool bMaxDeviceExceeded; + NvBool bHdcp1DevDownstream; + NvBool bHdcp2LegacyDevDownstream; + NvU8 cascadeDepth; +} NV0073_CTRL_SPECIFIC_HDCP_CTRL_PARAMS; + +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_ERR_UNSUCCESSFUL 0:0 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_ERR_UNSUCCESSFUL_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_ERR_UNSUCCESSFUL_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_ERR_PENDING 1:1 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_ERR_PENDING_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_ERR_PENDING_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_ERR_BAD_TOKEN_TYPE 2:2 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_ERR_BAD_TOKEN_TYPE_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_ERR_BAD_TOKEN_TYPE_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_ERR_LINK_FAILED 3:3 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_ERR_LINK_FAILED_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_ERR_LINK_FAILED_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_ERR_INVALID_PARAMETER 4:4 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_ERR_INVALID_PARAMETER_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_ERR_INVALID_PARAMETER_YES (0x0000001U) + +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD 31:0 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_NULL (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_READ_LINK_STATUS (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_VALIDATE_LINK (0x0000002U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_QUERY_HEAD_CONFIG (0x0000003U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_RENEGOTIATE (0x0000004U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_GET_ALL_FLAGS (0x0000005U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_GET_SPECIFIED_FLAGS (0x0000006U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_ABORT_AUTHENTICATION (0x0000007U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_DISABLE_AUTHENTICATION (0x0000008U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_SET_TYPE (0x0000009U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_FORWARD_KSVLIST_READY (0x000000AU) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_READ_LINK_STATUS_NO_DISPLAY (0x000000BU) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CMD_READ_TOPOLOGY (0x000000CU) + +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_BCAPS_PRESENT 0:0 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_BCAPS_PRESENT_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_BCAPS_PRESENT_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_BSTATUS_PRESENT 1:1 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_BSTATUS_PRESENT_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_BSTATUS_PRESENT_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_BKSV_PRESENT 2:2 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_BKSV_PRESENT_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_BKSV_PRESENT_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_BKSV_S_PRESENT 3:3 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_BKSV_S_PRESENT_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_BKSV_S_PRESENT_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_BKSV_LIST_PRESENT 4:4 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_BKSV_LIST_PRESENT_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_BKSV_LIST_PRESENT_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_AN_PRESENT 5:5 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_AN_PRESENT_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_AN_PRESENT_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_AN_S_PRESENT 6:6 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_AN_S_PRESENT_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_AN_S_PRESENT_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_AKSV_PRESENT 7:7 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_AKSV_PRESENT_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_AKSV_PRESENT_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_AKSV_S_PRESENT 8:8 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_AKSV_S_PRESENT_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_AKSV_S_PRESENT_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_DKSV_PRESENT 9:9 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_DKSV_PRESENT_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_DKSV_PRESENT_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_DKSV_S_PRESENT 10:10 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_DKSV_S_PRESENT_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_DKSV_S_PRESENT_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_MP_PRESENT 11:11 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_MP_PRESENT_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_MP_PRESENT_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_VP_PRESENT 12:12 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_VP_PRESENT_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_VP_PRESENT_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_CN_PRESENT 13:13 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_CN_PRESENT_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_CN_PRESENT_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_CKSV_PRESENT 14:14 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_CKSV_PRESENT_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_CKSV_PRESENT_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_KP_PRESENT 15:15 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_KP_PRESENT_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_KP_PRESENT_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_KP_S_PRESENT 16:16 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_KP_S_PRESENT_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_KP_S_PRESENT_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_STATUS_PRESENT 17:17 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_STATUS_PRESENT_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_STATUS_PRESENT_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_CS_PRESENT 18:18 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_CS_PRESENT_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_CS_PRESENT_YES (0x0000001U) + +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_ABORT 22:19 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_ABORT_NONE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_ABORT_UNTRUST (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_ABORT_UNRELBL (0x0000002U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_ABORT_KSV_LEN (0x0000003U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_ABORT_KSV_SIG (0x0000004U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_ABORT_SRM_SIG (0x0000005U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_ABORT_SRM_REV (0x0000006U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_ABORT_NORDY (0x0000007U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_ABORT_KSVTOP (0x0000008U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_ABORT_BADBKSV (0x0000009U) + +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_IMPLICIT_HEAD 25:23 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_IMPLICIT_HEAD_NONE (0x0000007U) + +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_TYPE_CHANGED 26:26 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_TYPE_CHANGED_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_TYPE_CHANGED_YES (0x0000001U) + +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_FORCE_REAUTH 27:27 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_FORCE_REAUTH_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_FORCE_REAUTH_YES (0x0000001U) + +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_RXIDMSG_PENDING 28:28 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_RXIDMSG_PENDING_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_FLAGS_RXIDMSG_PENDING_YES (0x0000001U) + +/* BCaps definition of HDCP over TMDS */ +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_BCAPS_FAST_REAUTHENTICATION 0:0 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_BCAPS_EESS_1_1 1:1 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_BCAPS_FAST 4:4 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_BCAPS_READY_KSV_FIFO 5:5 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_BCAPS_REPEATER 6:6 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_BCAPS_HDMI_RESERVED 7:7 +/* BCaps definition of HDCP over DP */ +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_BCAPS_DP_HDCP_CAPABLE 0:0 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_BCAPS_DP_REPEATER 1:1 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_BCAPS_DP_READY_KSV_FIFO 2:2 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_BCAPS_DP_R0_AVAILABLE 3:3 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_BCAPS_DP_LINK_INTEGRITY_FAILURE 4:4 + +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_BSTATUS_DEVICE_COUNT 6:0 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_BSTATUS_MAX_DEVICES_EXCEEDED 7:7 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_BSTATUS_REPEATER_DEPTH 10:8 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_BSTATUS_MAX_CASCADE_EXCEEDED 11:11 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDMI_MODE 12:12 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_RESERVED_0 31:13 + +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CS_ATTACH_POINTS 15:0 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CS_NON_HDCP 16:16 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CS_HEAD_INDEX 20:17 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CS_RFUPLANES 28:21 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CS_NUM_ACTIVE_HEADS 30:29 +// Bit 39-29 are implementation dependent connection state information +// for HDCP22 from gm206 (v02_06) onwards Bit-30 says HDCP22 encryption status. +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CS_HDCP22_ENCRYPTION 30:30 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CS_HDCP22_ENCRYPTION_YES 0x00000001U +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CS_HDCP22_ENCRYPTION_NO 0x00000000U +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CS_HDCP22_TYPE1 31:31 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CS_HDCP22_TYPE1_YES 0x00000001U +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CS_HDCP22_TYPE1_NO 0x00000000U +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CS_RESERVED_0 39:32 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CS_ATTACH_PLANES 47:40 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CS_ENCRYPTING 48:48 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_CS_SPAN_MODE 49:49 + +/* This HDCP_MODE definition applies to both DP and TMDS */ +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_BCAPS_HDCP_MODE 15:15 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_BCAPS_HDCP_MODE_TMDS (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_BCAPS_HDCP_MODE_DP (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_BCAPS_HDCP_VERSION 23:16 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_BCAPS_HDCP_VERSION_1X (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_BCAPS_HDCP_VERSION_22 (0x0000022U) + +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_BCAPS_REPEATER 0:0 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_BCAPS_REPEATER_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_BCAPS_REPEATER_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_BCAPS_HDCP_CAPABLE 1:1 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_BCAPS_HDCP_CAPABLE_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_BCAPS_HDCP_CAPABLE_YES (0x0000001U) + +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_BSTATUS_DP_READY 0:0 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_BSTATUS_DP_HPRIME_AVAILABLE 1:1 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_BSTATUS_DP_PAIRING_AVAILABLE 2:2 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_BSTATUS_DP_REAUTH_REQ 3:3 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_BSTATUS_DP_LINK_INTEGRITY_FAILURE 4:4 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_BSTATUS_HDCP1_REPEATER_DOWNSTREAM 5:5 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_BSTATUS_HDCP2_0_REPEATER_DOWNSTREAM 6:6 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_BSTATUS_MAX_CASCADE_EXCEEDED 7:7 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_BSTATUS_MAX_DEVS_EXCEEDED 8:8 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_BSTATUS_DEVICE_COUNT 13:9 +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_BSTATUS_REPEATER_DEPTH 16:14 + +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_TYPE_0 (0x00U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_TYPE_1 (0x01U) + +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_ENFORCE_TYPE0_HDCP1XDS_NO (0x00U) +#define NV0073_CTRL_SPECIFIC_HDCP_CTRL_HDCP22_ENFORCE_TYPE0_HDCP1XDS_YES (0x01U) + + + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_HDCP_DIAGNOSTICS + * + * This command is used to obtain diagnostic info, useful when hdcp + * fails for the specified attach point (that being the displayId). + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the display for which information is to be + * returned. Only one display may be indicated in this parameter. + * If more than one displayId is used a failing status of + * NV_ERR_INVALID_ARGUMENT will be returned. + * flags + * This parameter specifies the diagnostics obtained from the attach point + * resource. Here are the current defined fields: + * NV0073_CTRL_SPECIFIC_HDCP_STATE_ROM_ERROR + * The hdcp hardware detected an error with the rom. Possible + * causes are that a rom is not present or if present, the hardware + * is not able to access the rom. + * NV0073_CTRL_SPECIFIC_HDCP_DIAGNOSTICS_BOND_NOT_ENABLED + * The hdcp fuse register has not been set. + * NV0073_CTRL_SPECIFIC_HDCP_STATE_AKSV_INVALID + * If the AKSV (key selection vector) of the hardware does not return + * 20 1s and 0s, this bit will be set. This is an indication that + * the ROM is not programmed correctly and may need to be corrected + * by replacing the external hdcp cryptorom. + * NV0073_CTRL_SPECIFIC_HDCP_STATE_BKSV_INVALID + * If the BKSV (key selection vector) of the display receiver hardware + * does not return 20 1s and 0s, this bit will be set. + * NV0073_CTRL_SPECIFIC_HDCP_STATE_DKSV_INVALID + * If the DKSV (key selection vector) of the upstream hdcp hardware + * does not return 20 1s and 0s, this bit will be set. + * NV0073_CTRL_SPECIFIC_HDCP_STATE_DUAL_LINK_INUSE + * This bit is set if the attach point is currently outputting dual-link + * NV0073_CTRL_SPECIFIC_HDCP_STATE_DOWNSTREAM_CHECKSUM_FAILED + * This bit is set if hardware reports that its checksum BIST of its + * downstream HDCP keys failed. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDCP_DIAGNOSTICS (0x730281U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_HDCP_DIAGNOSTICS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_HDCP_DIAGNOSTICS_PARAMS_MESSAGE_ID (0x81U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_HDCP_DIAGNOSTICS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; +} NV0073_CTRL_SPECIFIC_GET_HDCP_DIAGNOSTICS_PARAMS; + +#define NV0073_CTRL_SPECIFIC_HDCP_DIAGNOSTICS_ROM_ERROR 0:0 +#define NV0073_CTRL_SPECIFIC_HDCP_DIAGNOSTICS_BOND_NOT_ENABLED 1:1 +#define NV0073_CTRL_SPECIFIC_HDCP_DIAGNOSTICS_AKSV_INVALID 2:2 +#define NV0073_CTRL_SPECIFIC_HDCP_DIAGNOSTICS_BKSV_INVALID 3:3 +#define NV0073_CTRL_SPECIFIC_HDCP_DIAGNOSTICS_DKSV_INVALID 4:4 +#define NV0073_CTRL_SPECIFIC_HDCP_DIAGNOSTICS_DUAL_LINK_INUSE 5:5 +#define NV0073_CTRL_SPECIFIC_HDCP_DIAGNOSTICS_DOWNSTREAM_CHECKSUM_FAILED 6:6 + +/* + * NV0073_CTRL_SPECIFIC_ACPI_ID_MAPPING + * + * This structure defines the mapping between the ACPI ID and the corresponding + * display ID of a display device + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * acpiId + * The ACPI ID of the display device + * displayId + * The corresponding display ID + * dodIndex + * The corresponding DOD index + */ +typedef struct NV0073_CTRL_SPECIFIC_ACPI_ID_MAPPING { + NvU32 subDeviceInstance; + NvU32 acpiId; + NvU32 displayId; + NvU32 dodIndex; +} NV0073_CTRL_SPECIFIC_ACPI_ID_MAPPING; + +/* + * NV0073_CTRL_CMD_GET_ACPI_DOD_DISPLAY_PORT_ATTACHMENT + * + * This call will return the Display Port Attachment value + * per displayID as defined by Nvidia that is directly + * associated with the ACPI 3.0 _DOD entry's Display Port + * Attachment field. This should help clients map the + * _DOD ACPI ID to each displayID. Note, that some systems + * do not have a standard in place for this field. On those + * systems, the RM will return NV_ERR_NOT_SUPPORTED. + * + * Note that this "Display Port" attachment field has nothing + * to do with DisplayPort/DP. It's an unfortunate name inside + * the ACPI 3.0 spec that coincides with the name of DisplayPort. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_NOT_SUPPORTED + * +*/ + +#define NV0073_CTRL_GET_ACPI_DOD_DISPLAY_PORT_ATTACHMENT_PARAMS_MESSAGE_ID (0x85U) + +typedef struct NV0073_CTRL_GET_ACPI_DOD_DISPLAY_PORT_ATTACHMENT_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 dispPortAttachment; +} NV0073_CTRL_GET_ACPI_DOD_DISPLAY_PORT_ATTACHMENT_PARAMS; + + +#define NV0073_CTRL_CMD_SPECIFIC_GET_ACPI_DOD_DISPLAY_PORT_ATTACHMENT (0x730285U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_GET_ACPI_DOD_DISPLAY_PORT_ATTACHMENT_PARAMS_MESSAGE_ID" */ + +// defines for dispPortAttachment +#define NV0073_DISPLAY_PORT_ATTACHMENT_ANALOG (0x00000000U) +#define NV0073_DISPLAY_PORT_ATTACHMENT_LVDS (0x00000001U) +#define NV0073_DISPLAY_PORT_ATTACHMENT_DP_A (0x00000002U) +#define NV0073_DISPLAY_PORT_ATTACHMENT_DP_B (0x00000003U) +#define NV0073_DISPLAY_PORT_ATTACHMENT_DP_C (0x00000004U) +#define NV0073_DISPLAY_PORT_ATTACHMENT_DP_D (0x00000005U) +#define NV0073_DISPLAY_PORT_ATTACHMENT_UNKNOWN (0xFFFFFFFFU) + +/* + * NV0073_CTRL_CMD_SPECIFIC_SET_ACPI_ID_MAPPING + * + * This call will update the RM data structure which holds the + * ACPI ID to display ID mapping of the display devices + * + * The input parameter is an array of structures of type + * NV0073_CTRL_SPECIFIC_ACPI_ID_MAPPING + * + * If some of the array elements remain unused, the acpiId field of the + * structure must be set to 0x0000 + * + * The size of the array is given by + * NV0073_CTRL_SPECIFIC_MAX_ACPI_DEVICES (defined below) + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * +*/ +#define NV0073_CTRL_CMD_SPECIFIC_SET_ACPI_ID_MAPPING (0x730284U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_ACPI_ID_MAPPING_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_MAX_ACPI_DEVICES 16U + +#define NV0073_CTRL_SPECIFIC_SET_ACPI_ID_MAPPING_PARAMS_MESSAGE_ID (0x84U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_ACPI_ID_MAPPING_PARAMS { + NV0073_CTRL_SPECIFIC_ACPI_ID_MAPPING mapTable[NV0073_CTRL_SPECIFIC_MAX_ACPI_DEVICES]; +} NV0073_CTRL_SPECIFIC_SET_ACPI_ID_MAPPING_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK + * + * This call will return all head mask. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * headMask + * headMask is the mask of all heads that are usable. For example, if + * head 0 and head 2 are present, headMask would be NVBIT(0)|NVBIT(2). This + * parameter returns to the client. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK (0x730287U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS_MESSAGE_ID (0x87U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS { + NvU32 subDeviceInstance; + NvU32 headMask; +} NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET + * + * This command is used to program the display output packets. + * Currently it supports DP and HDMI. + * Common supported packets are AVI infoframes, Audio Infoframes, Gamma + * Metadata, Vendor Specific infoframes and General Control Packets (GCP). + * + GCP AVMute Enable should be performed before the start of the modeset. + * + GCP AVMute Disable should be performed after the end of the modeset. + * GCP AVMute should contain HDR + 7 bytes. + * + AVI infoframes should occur after the modeset but before a GCP AVMute + * Disable. AVI infoframe should contain HDR + 14 bytes + * + Audio infoframes should occur after the modeset but before a GCP AVMute + * Enable. + * Audio infoframe should contain HDR + 11 bytes. + * + Gamma Metadata packets should contain HDR + 28 bytes. + * + Vendor Specific packets are variable length. + * By HDMI 1.4 June 5 2009 spec, payload can be 5 bytes, 6 bytes, 7 bytes or + * 16 bytes, depending on the packets spec. + * Unused data bytes should be zero-ed out. + * + * displayID + * This parameter specifies the displayID for the display resource to + * configure. + * This comes as input to this command. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * transmitControl + * This parameter controls how the packet is to be sent by setting the + * control bits. + * Possible flags are as follows: + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE + * Set to _ENABLE to start sending the packet at next frame, set to + * _DISABLE to stop sending. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME + * Set to _ENABLE to send the packet at other frame, set to _DISABLE to + * send at every frame. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME + * Set to _ENABLE to send once next frame, set to _DISABLE to send at + * every frame. + * Note: A setting to set both _OTHER_FRAME and _SINGLE_FRAME is invalid + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK + * Set to _ENABLE to send the packet once on next HBLANK, set to + * _DISABLE to send on VBLANK. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE + * Set to _ENABLE to send the info frame packet as soon as possible. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT + * Set to _SW_CONTROLLED to set HDMI_Video_Format field and 3D_Structure field + * from NV_PDISP_SF_HDMI_VSI_SUBPACK0_HIGH_PB4 and PB5, if it is set to _HW_CONTROLLED + * then HW will get them based on the state of the setHdmiCtrl method. + * Btw this applies only for stereo ovverides. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY + * Set to TRUE to send Vendor specific info frame used for 3D stereo LR sync. + * Set PACKET_TYPE=pktType_VendorSpecInfoFrame along with this flag. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING + * Set to TRUE to send Vendor specific info frame used for Self Refresh panels + * Set PACKET_TYPE=pktType_VendorSpecInfoFrame along with this flag. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE + * HW provides support to program 2 generic infoframes per frame for DP with GP10X+. + * This flag indicates the INFOFRAME that needs to be programmed. + * Set to _INFOFRAME0 if RM should program GENERIC_INFOFRAME + * Set to _INFOFRAME1 if RM should program GENERIC_INFOFRAME1 + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE + * This option is reserved for backward compatibility with + * NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_PACKET and + * NV0073_CTRL_CMD_DP_SET_PACKET. + * It is temporal and will be deprecated soon. + * packetSize + * packet size of packets in pPacket to send, including header and payload. + * targetHead + * Specifies the target head number for which SDP needs to be updated. + * bUsePsrHeadforSdp + * Indicates use targetHead field for setting SDP or infoframe packet instead + * of deriving the active head from displayID. + * pPacket + * pPacket points to the packets to send. + * For HDMI 1.1, the maximum allowed bytes is 31. + * The packet array includes the 3 bytes of header + data depending on + * the type of packet. For an infoframe, the header bytes refer to type, + * version and length respectively. This comes as input to this command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET (0x730288U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SET_OD_MAX_PACKET_SIZE 36U + +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS_MESSAGE_ID (0x88U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 transmitControl; + NvU32 packetSize; + NvU32 targetHead; + NvBool bUsePsrHeadforSdp; + NvU8 aPacket[NV0073_CTRL_SET_OD_MAX_PACKET_SIZE]; +} NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS; + +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE 0:0 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME 1:1 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME 2:2 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK 3:3 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_DISABLE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_ENABLE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE 4:4 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_DISABLE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_ENABLE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT 5:5 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY 6:6 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING 7:7 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE 9:8 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME0 (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME1 (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE 31:31 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES (0x0000001U) + + +/* + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS + * + * This command is used to enable/disable sending of display output packets. + * Currently it supports HDMI only. + * Unused data bytes should be zero-ed out. + * + * displayID + * This parameter specifies the displayID for the display output resource to + * configure. + * This comes as input to this command. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * type + * The client shall specify the type of display output packet. For HDMI, set + * this according to HDMI specification 1.4. + * This comes as input to this command. + * transmitControl + * This parameter controls how the packet is to be sent by setting the control + * bits. + * Possible flags are as follows: + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ENABLE + * Set to _ENABLE to start sending the packet at next frame, set to + * _DISABLE to stop sending. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_OTHER_FRAME + * Set to _ENABLE to send the packet at other frame, set to _DISABLE to + * send at every frame. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SINGLE_FRAME + * Set to _ENABLE to send once next frame, set to _DISABLE to send at + * every frame. + * Note: A setting to set both _OTHER_FRAME and _SINGLE_FRAME is invalid + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ON_HBLANK + * Set to _ENABLE to send the packet once on next HBLANK, set to _DISABLE + * to send on VBLANK. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_VIDEO_FMT + * Set to _SW_CONTROLLED to set HDMI_Video_Format field and 3D_Structure field + * from NV_PDISP_SF_HDMI_VSI_SUBPACK0_HIGH_PB4 and PB5, if it is set to _HW_CONTROLLED + * then HW will get them based on the state of the setHdmiCtrl method. + * Btw this applies only for stereo ovverides. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_STEREO_POLARITY + * Set to TRUE to enable Vendor specific info frame used for 3D stereo LR sync + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING + * Set to TRUE to enable Vendor specific info frame used for Self Refresh panels + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE + * This option is reserved for backward compatibility with + * NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_PACKET and + * NV0073_CTRL_CMD_DP_SET_PACKET. + * It is temporal and will be deprecated soon. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET_CTRL (0x730289U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS_MESSAGE_ID (0x89U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 type; + NvU32 transmitControl; +} NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS; + +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ENABLE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ENABLE_NO NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_NO +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ENABLE_YES NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_YES +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_OTHER_FRAME NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SINGLE_FRAME NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ON_HBLANK NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ON_HBLANK_DISABLE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_DISABLE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ON_HBLANK_ENABLE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_ENABLE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_VIDEO_FMT NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_STEREO_POLARITY NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_PCLK_LIMIT + * + * This command returns the maximum supported pixel clock rate that is + * supported by the specified display device. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the display for which information is to be + * returned. Only one display may be indicated in this parameter. + * If more than one displayId is used a failing status of + * NV_ERR_INVALID_ARGUMENT will be returned. + * pclkLimit + * This parameter returns the min of orPclkLimit and vbPclkLimit in KHz. + * It may be used for SLI configs that use a video bridge. For non-SLI + * configs and bridgeless SLI configs, the client should use orPclkLimit instead. + * orPclkLimit + * This parameter returns the maximum pixel clock frequency of OR in KHz. + * vbPclkLimit + * This parameter returns the maximum pixel clock frequency of the + * video bridge (SLI) in KHz (or zero if there is no video bridge). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_PCLK_LIMIT (0x73028aU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS_MESSAGE_ID (0x8AU) + +typedef struct NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 pclkLimit; + NvU32 orPclkLimit; + NvU32 vbPclkLimit; +} NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO + * + * This command returns output resource information for the specified display + * device. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the display for which information is to be + * returned. Only one display may be indicated in this parameter. + * If more than one displayId is used a failing status of + * NV_ERR_INVALID_ARGUMENT will be returned. + * type + * This parameter returns the output resource type. Legal values for + * this parameter include: + * NV0073_CTRL_SPECIFIC_OR_TYPE_DAC + * The output resource is a DAC. + * NV0073_CTRL_SPECIFIC_OR_TYPE_SOR + * The output resource is a serial output resource. + * NV0073_CTRL_SPECIFIC_OR_TYPE_DSI + * The output resource is a Display Serial Interface output resource. + * NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR + * The output resource is a parallel input output resource. + * index + * This parameter returns the type-specific index of the output + * resource associated with the specified displayId. + * protocol + * This parameter returns the type-specific protocol used by the + * output resource. Legal values for this parameter include: + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_UNKNOWN + * ditherType + * This parameter returns the dither type for the output resource. + * Legal values for this parameter include: + * NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_6_BITS + * NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_8_BITS + * NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_10_BITS + * NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF + * ditherAlgo + * This parameter returns the dithering algorithm used by the output + * resource. Legal values for this parameter include: + * NV0073_CTRL_SPECIFIC_OR_DITHER_ALOGO_DYNAMIC_ERR_ACC + * NV0073_CTRL_SPECIFIC_OR_DITHER_ALOGO_STATIC_ERR_ACC + * NV0073_CTRL_SPECIFIC_OR_DITHER_ALOGO_DYNAMIC_2X2 + * NV0073_CTRL_SPECIFIC_OR_DITHER_ALOGO_STATIC_2X2 + * NV0073_CTRL_SPECIFIC_OR_DITHER_ALOGO_TEMPORAL + * NV0073_CTRL_SPECIFIC_OR_DITHER_ALOGO_UNKNOWN + * location + * This parameter returns the physical location of the output resource. + * Legal values for this parameter include: + * NV0073_CTRL_SPECIFIC_OR_LOCATION_CHIP + * NV0073_CTRL_SPECIFIC_OR_LOCATION_BOARD + * rootPortId + * This parameter specifies the Root-Port ID for the given display. + * dcbIndex + * This parameter returns the DCB index of the display device. + * vbiosAddress + * This parameter is the VBIOS IP address which will have valid value + * only if displayId is allocated by VBIOS. + * bIsLitByVbios + * This parameter specifies that whether displayID allocation was + * requested by VBIOS or not. + * bIsDispDynamic + * Returns NV_TRUE if DisplayID is allocated Dynamically else NV_FALSE + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO (0x73028bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS_MESSAGE_ID (0x8BU) + +typedef struct NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 index; + NvU32 type; + NvU32 protocol; + NvU32 ditherType; + NvU32 ditherAlgo; + NvU32 location; + NvU32 rootPortId; + NvU32 dcbIndex; + NV_DECLARE_ALIGNED(NvU64 vbiosAddress, 8); + NvBool bIsLitByVbios; + NvBool bIsDispDynamic; +} NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS; + +/* valid type values */ +#define NV0073_CTRL_SPECIFIC_OR_TYPE_NONE (0x00000000U) +#define NV0073_CTRL_SPECIFIC_OR_TYPE_DAC (0x00000001U) +#define NV0073_CTRL_SPECIFIC_OR_TYPE_SOR (0x00000002U) +#define NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR (0x00000003U) + + +#define NV0073_CTRL_SPECIFIC_OR_TYPE_DSI (0x00000005U) + +/* valid DAC protocol values */ +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT (0x00000000U) + + + +/* valid SOR protocol values */ +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM (0x00000000U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A (0x00000001U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B (0x00000002U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS (0x00000005U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A (0x00000008U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B (0x00000009U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DSI (0x00000010U) + +/* valid DSI protocol values */ +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI (0x00000011U) + +/* valid PIOR protocol values */ +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC (0x00000000U) + +/* valid UNKNOWN protocol value */ +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_UNKNOWN (0xFFFFFFFFU) + +/* valid ditherType values */ +#define NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_6_BITS (0x00000000U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_8_BITS (0x00000001U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_10_BITS (0x00000002U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF (0x00000003U) + +/* valid ditherAlgo values */ +#define NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_ERR_ACC (0x00000000U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_ERR_ACC (0x00000001U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_2X2 (0x00000002U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_2X2 (0x00000003U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_TEMPORAL (0x00000004U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN (0xFFFFFFFFU) + +/* valid location values */ +#define NV0073_CTRL_SPECIFIC_OR_LOCATION_CHIP (0x00000000U) +#define NV0073_CTRL_SPECIFIC_OR_LOCATION_BOARD (0x00000001U) + +/* + * NV0073_CTRL_CMD_SPECIFIC_HDCP_KSVLIST_VALIDATE + * + * This Command initiate the KSV validation for the specific device + * if it is a repeater. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * displayId + * This parameter specifies the ID of the root port device for which KsvList is to be validated + * bUseCachedKsvList + * The parameter specifies RM to use cachedKsvList in case BCAPS's READY bit not set to read + * ksvList. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_NOT_READY + */ +#define NV0073_CTRL_CMD_SPECIFIC_HDCP_KSVLIST_VALIDATE (0x73028dU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_HDCP_KSVLIST_VALIDATE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_HDCP_KSVLIST_VALIDATE_PARAMS_MESSAGE_ID (0x8DU) + +typedef struct NV0073_CTRL_SPECIFIC_HDCP_KSVLIST_VALIDATE_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bUseCachedKsvList; +} NV0073_CTRL_SPECIFIC_HDCP_KSVLIST_VALIDATE_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_HDCP_UPDATE + * + * This Command updates the display to the proper HDCP state based on + * whether it has been newly connected or disconnected. This is called + * during a hotplug event. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * displayId + * This parameter specifies the ID of the root port device to update + * bIsConnected + * This parameter specifies whether the device has been connected (NV_TRUE) + * or disconnected (NV_FALSE). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_SPECIFIC_HDCP_UPDATE (0x73028eU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_HDCP_UPDATE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_HDCP_UPDATE_PARAMS_MESSAGE_ID (0x8EU) + +typedef struct NV0073_CTRL_SPECIFIC_HDCP_UPDATE_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bIsConnected; +} NV0073_CTRL_SPECIFIC_HDCP_UPDATE_PARAMS; + + + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS + * NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS + * + * These commands retrieve and set the user backlight brightness for + * the specified display. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId + * Display for which brightness is to be retrieved or set. + * brightness + * The backlight brightness in the range [0,100], inclusive. This + * is an input for SET_BACKLIGHT_BRIGHTNESS, and an output for + * GET_BACKLIGHT_BRIGHTNESS. + * brightnessType + * This can take in one of the three parameters: + * NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100(for percentage brightness with value calibrated to 100 scale), + * NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT1000(for percentage brightness with uncalibrated values), + * NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_NITS(used when panel supports Nits based) + * based on the brightness control method to be used. + * + * Possible status values returned include: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_BACKLIGHT_BRIGHTNESS_MIN_VALUE 0U +#define NV0073_CTRL_BACKLIGHT_BRIGHTNESS_MAX_VALUE 100U + +typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 brightness; + NvBool bUncalibrated; + NvU8 brightnessType; +} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS; +#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100 1 +#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT1000 2 +#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_NITS 3 + +#define NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID (0x91U) + +typedef NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID (0x92U) + +typedef NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS + * + * This command is used to inform RM about the scrambling, clock mode, FRL and + * DSC caps of the HDMI sink device. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed.. + * displayID + * This parameter specifies the displayID for the display output resource to + * configure. + * caps + * This parameter specifies the sink caps. + * GT_340MHZ_CLOCK_SUPPORTED refers to whether sink supports TMDS clock (sorClk) rate greater than 340 MHz + * LTE_340MHZ_SCRAMBLING_SUPPORTED refers to whether scrambling is supported for clock rate at or below 340 MHz + * SCDC_SUPPORTED refers to whether SCDC access is supported on sink + * MAX_FRL_RATE_SUPPORTED refers to the maximum HDMI 2.1 FRL rate supported + * DSC_12_SUPPORTED refers to whether VESA DSC v1.2a is supported + * DSC_12_MAX_FRL_RATE_SUPPORTED refers to the maximum HDMI 2.1 FRL rate supported when VESA DSC v1.2a is supported + * + * + * Possible status values returned include: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID (0x93U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 caps; +} NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED 0:0 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_FALSE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_TRUE (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED 1:1 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_FALSE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_TRUE (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED 2:2 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_FALSE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_TRUE (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED 5:3 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED 6:6 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_FALSE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_TRUE (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED 9:7 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U) + + + +/* + * NV0073_CTRL_CMD_SPECIFIC_SET_MONITOR_POWER + * + * This command sets monitor power on/off. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId + * This parameter specifies the displayID for the display output resource to + * configure. + * powerState + * This parameter should be one of the valid + * NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_* values. + * headIdx + * The head id on which power operation needs to be done. + * bForceMonitorState + * Monitor power state that client wants to force in RM. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_SET_MONITOR_POWER (0x730295U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS_MESSAGE_ID (0x95U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 powerState; + NvU32 headIdx; + NvBool bForceMonitorState; +} NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS; + +#define NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_OFF (0x00000000U) +#define NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_ON (0x00000001U) + + + +/* +* NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_FRL_CONFIG +* +* This command is used to perform HDMI FRL link training and enable FRL mode for +* the specified displayId. The link configuration will be returned after link +* training success. +* +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. +* displayID +* This parameter specifies the displayID for the display output resource to +* configure. +* data +* This parameter is an input and output to this command. +* Here are the current defined fields: +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE +* This field specifies the desired setting for lane count. A client may +* choose any lane count as long as it does not exceed the capability of +* HDMI FRL sink as indicated in the sink capability field. +* The valid values for this field are: +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_NONE +* For 0 lane configuration, link training is shut down (disable FRL). +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_3LANES_3G +* For FRL 3-lane configuration and 3 Gbps bandwidth per lane. +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_3LANES_6G +* For FRL 3-lane configuration and 6 Gbps bandwidth per lane. +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_6G +* For FRL 4-lane configuration and 6 Gbps bandwidth per lane. +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_8G +* For FRL 4-lane configuration and 8 Gbps bandwidth per lane. +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_10G +* For FRL 4-lane configuration and 10 Gbps bandwidth per lane. +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_12G +* For FRL 4-lane configuration and 12 Gbps bandwidth per lane. +* On return, the link bandwidth setting is returned which may be +* different from the requested input setting. +* bFakeLt +* This flag as input to this command. +* It indicates the FRL link training is a fake link training or not. +* TRUE if the FRL link training is fake and no real sink device attached. +* bLtSkipped +* The flag returned indicating whether link training is skipped or not. +* TRUE if link training is skipped due to the link config is not changed. +* +* Possible status values returned include: +* NV_OK - +* Affter finishing link tranning, NV_OK status will be returned along with +* the updated link congiration. In case of link training failure, FRL_RATE_NONE +* will be returned with NV_OK. +* NV_ERR_NOT_SUPPORTED - +* If the GPU/sink is not capable for HDMI FRL, NV_ERR_NOT_SUPPORTED status +* will be returned. +* NV_ERR_INVALID_ARGUMENT +* If any argument is valid for this control call, NV_ERR_INVALID_ARGUMENT +* status will be returned. +*/ +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_FRL_CONFIG (0x73029aU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS_MESSAGE_ID (0x9AU) + +typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 data; + NvBool bFakeLt; + NvBool bLtSkipped; +} NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS; + +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE 2:0 +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_NONE (0x00000000U) +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_3LANES_3G (0x00000001U) +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_3LANES_6G (0x00000002U) +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_6G (0x00000003U) +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_8G (0x00000004U) +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_10G (0x00000005U) +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_12G (0x00000006U) + + + +#define NV0073_CTRL_SPECIFIC_MAX_CRC_REGIONS 9U + +#define NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS_PARAMS_MESSAGE_ID (0xA0U) + +typedef struct NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 regionCrcs[NV0073_CTRL_SPECIFIC_MAX_CRC_REGIONS]; + NvU16 reqRegionCrcMask; +} NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS + * + * This command is used to capture the active viewport region CRCs + * + * [in]subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * + * [in]displayId + * This parameter specifies the displayId of panel, for which region CRC to be captured +.* + * [out]regionCrcs + * This field holds the region CRC values to be returned after successful completion of the control command. + * + * [in]reqRegionCrcMask + * This parameter specifies BIT mask value of requested CRC regions. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS (0x7302a0U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS_PARAMS_MESSAGE_ID" */ + +/* +* NV0073_CTRL_CMD_SPECIFIC_APPLY_EDID_OVERRIDE_V2 +* +* Apply EDID override on specific OD. +* +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. +* displayId (in) +* ID of panel on which the operation is to be performed. +* bufferSize (in) +* Size of the EDID buffer. +* edidBuffer (in/out) +* The buffer which stores the EDID before and after override. +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_PARAMETER +*/ +#define NV0073_CTRL_CMD_SPECIFIC_APPLY_EDID_OVERRIDE_V2 (0x7302a1U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS_MESSAGE_ID (0xA1U) + +typedef struct NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 bufferSize; + NvU8 edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES]; +} NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS + * + * This command is used to get the HDMI FRL caps of GPU side. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * caps + * This parameter specifies the GPU caps. + * MAX_FRL_RATE_SUPPORTED refers to the maximum HDMI 2.1 FRL link rate supported + * + * + * Possible status values returned include: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS (0x7302a2U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS_MESSAGE_ID (0xA2U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS { + NvU32 subDeviceInstance; + NvU32 caps; +} NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED 2:0 +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED_NONE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED_3LANES_3G (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED_3LANES_6G (0x00000002U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED_4LANES_6G (0x00000003U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED_4LANES_8G (0x00000004U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED_4LANES_10G (0x00000005U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED_4LANES_12G (0x00000006U) + + + +/* + * NV0073_CTRL_CMD_SPECIFIC_DISPLAY_CHANGE + * + * Notifies the system that a display change is about to begin/end. + * Also performs the necessary synchronizations for the same. + * + * The command takes a NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS structure as an + * argument with appropriate subDeviceInstance. + * + * [in]subDeviceInstance + * The sub-device instance + * [in]newDevices + * Bitmask of devices that are planned on being enabled with the + * pending device change. See NV_CFGEX_GET_DEVICES_CONFIGURATION for bit defs. + * [in]properties + * Bitmask of display attributes for new configuration (none used at the moment). + * [in]enable + * Parameter to decide between display change start and end. Can take values + * NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_START or NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_END. + * Possible return values: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + */ + +#define NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS_MESSAGE_ID (0xA4U) + +typedef struct NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS { + + NvU32 subDeviceInstance; + NvU32 newDevices; + NvU32 properties; + NvU32 enable; +} NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS; + +#define NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_END (0x00000000U) +#define NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_START (0x00000001U) + +#define NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PROPERTIES_SPANNING (0x00000001U) + +#define NV0073_CTRL_CMD_SPECIFIC_DISPLAY_CHANGE (0x7302a4U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS_MESSAGE_ID" */ + + + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA + * + * This command is used to get the HDMI sink status/caps via Status and Control + * Data Channel (SCDC). + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId + * This parameter specifies the displayId of HDMI sink. + * offset + * This parameter specifies the SCDC offset which the read operation + * should be used. + * data + * This field specifies the return data from sink for reading the specified + * SCDC offset. + * + * + * Possible status values returned include: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA (0x7302a6U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS_MESSAGE_ID (0xA6U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU8 offset; + NvU8 data; +} NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET 7:0 +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_NONE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_SINK_VERSION (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_SOURCE_VERSION (0x00000002U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_UPDATE_FLAGS_0 (0x00000010U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_TMDS_CONFIGURATION (0x00000020U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_SCRAMBLER_STATUS (0x00000021U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CONFIGURATION_0 (0x00000030U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CONFIGURATION_1 (0x00000031U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_SOURCE_TEST_CONFIGURATION (0x00000035U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_STATUS_FLAGS_0 (0x00000040U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_STATUS_FLAGS_1 (0x00000041U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_STATUS_FLAGS_2 (0x00000042U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_0 (0x00000050U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_1 (0x00000051U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_2 (0x00000052U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_3 (0x00000053U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_4 (0x00000054U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_5 (0x00000055U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_6 (0x00000056U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_7 (0x00000057U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_8 (0x00000058U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_RSED_0 (0x00000059U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_RSED_1 (0x0000005AU) + +/* + * NV0073_CTRL_CMD_SPECIFIC_IS_DIRECTMODE_DISPLAY + * + * This command is used to query whether the specified monitor should be used + * with directmode. + * + * [in]manufacturerID + * This parameter specifies the 16-bit EDID Manufacturer ID. + * [in]productID + * This parameter specifies the 16-bit EDID Product ID. + * [out]bIsDirectmode; + * This indicates whether the monitor should be used with directmode. + * Possible return values: + * NV_OK + */ + +#define NV0073_CTRL_CMD_SPECIFIC_IS_DIRECTMODE_DISPLAY (0x7302a7U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS_MESSAGE_ID (0xA7U) + +typedef struct NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS { + NvU16 manufacturerID; + NvU16 productID; + NvBool bIsDirectmode; +} NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION + * + * This command is used to get the HDMI FRL capacity computation result. + * + * [in] cmd + * This parameter specifies the command for the HDMI FRL capacity computation. + * [in] input + * This parameter specifies the input data for the HDMI FRL capacity + * computation. + * [out] result + * This indicates the computation result of HDMI FRL capacity computation. + * [in/out] preCalc + * This indicates the pre-caculation result of HDMI FRL capacity computation. + * [in/out] dsc + * This indicates the DSC parameters of HDMI FRL capacity computation. + * Possible return values: + * NV_OK + */ + +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION (0x7302a8U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS { + NvU32 numLanes; + NvU32 frlBitRateGbps; + NvU32 pclk10KHz; + NvU32 hTotal; + NvU32 hActive; + NvU32 bpc; + NvU32 pixelPacking; + NvU32 audioType; + NvU32 numAudioChannels; + NvU32 audioFreqKHz; + + struct { + NvU32 bppTargetx16; + NvU32 hSlices; + NvU32 sliceWidth; + NvU32 dscTotalChunkKBytes; + } compressionInfo; +} NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS; + +typedef struct NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT { + NvU32 frlRate; + NvU32 bppTargetx16; + NvBool engageCompression; + NvBool isAudioSupported; + NvBool dataFlowDisparityReqMet; + NvBool dataFlowMeteringReqMet; + NvBool isVideoTransportSupported; + NvU32 triBytesBorrowed; + NvU32 hcActiveBytes; + NvU32 hcActiveTriBytes; + NvU32 hcBlankTriBytes; + NvU32 tBlankToTTotalX1k; +} NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT; + +typedef struct NV0073_CTRL_FRL_PRE_CALC_CONFIG { + NvU32 vic; + NvU32 packing; + NvU32 bpc; + NvU32 frlRate; + NvU32 bppX16; + NvBool bHasPreCalcFRLData; +} NV0073_CTRL_FRL_PRE_CALC_CONFIG; + +typedef struct NV0073_CTRL_IS_FRL_DSC_POSSIBLE_PARAMS { + NvU32 maxSliceCount; + NvU32 maxSliceWidth; + NvBool bIsDSCPossible; +} NV0073_CTRL_IS_FRL_DSC_POSSIBLE_PARAMS; + +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS_MESSAGE_ID (0xA8U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS { + NvU8 cmd; + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS input; + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT result; + NV0073_CTRL_FRL_PRE_CALC_CONFIG preCalc; + NV0073_CTRL_IS_FRL_DSC_POSSIBLE_PARAMS dsc; +} NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS; + +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_NULL (0x00000000U) +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_UNCOMPRESSED_VIDEO (0x00000001U) +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_COMPRESSED_VIDEO (0x00000002U) +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_HAS_PRECAL_FRL_DATA (0x00000003U) +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_GET_PRECAL_UNCOMPRESSED_FRL_CONFIG (0x00000004U) +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_GET_PRECAL_COMPRESSED_FRL_CONFIG (0x00000005U) +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_IS_FRL_DSC_POSSIBLE (0x00000006U) + +/* + * NV0073_CTRL_CMD_SPECIFIC_SET_SHARED_GENERIC_PACKET + * + * This command is used to program the display output packets. + * This generic packets can be used for both HDMI and DP. + * HW has added 6 new generic packets for each head because some usecases have + * requirement to send infoframe in particular location (vsync, vblank, loadV). + * + * Note: 1. Client first needs to reserve or acquire a free infoframe index + * using NV0073_CTRL_CMD_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET. + * 2. Client needs to update the SDP index for head through control call + * NV0073_CTRL_CMD_SPECIFIC_SET_SHARED_GENERIC_PACKET + * 3. Client needs to Release the infoframe index using control call + * NV0073_CTRL_CMD_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET + * + * [in]subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * [in]transmitControl + * This parameter controls how the packet is to be sent by setting the + * control bits. + * Possible flags are as follows: + * NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_ENABLE + * Setting this field to _YES will enable this generic infoframe, + * Setting this field to _NO will disable this generic infoframe. + * NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_SINGLE + * Set to _YES will cause new infoframe to be transmitted exactly once. + * Set to _NO will cause new infoframe to be transmitted every frame. + * NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_LOC + * SDP can be sent in 3 different locations: + * VBLANK - new infoframe will be sent at Vblank. + * VSYNC - new infoframe will be sent at Vsync. + * LOADV - new infoframe will be triggered by LOADV, and sent at Vsync + * NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_UPDATE_RFB_OVERRIDE + * _ENABLE: override DB1 bit1 with existence of loadv (for Panel Self Refresh) + * _DISABLE: do not override shared generic infoframe subpacker DB1 bit1. + * NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_SU_COORDINATES_VALID_OVERRIDE + * _ENABLE: override DB1 bit3 with existence of loadv (for Panel Replay) + * _DISABLE: do not override shared generic infoframe subpacker DB1 bit3. + * [in]packetSize + * size of packets in Packet array to send, including header and payload. + * [in]infoframeIndex + * Specifies the target head number for which SDP needs to be updated. + * [in]infoframeIndex + * Specifies the index of infoframe. + * [in]packet + * pPacket points to the packets to send. + * For HDMI 1.1, the maximum allowed bytes is 31. + * The packet array includes the 3 bytes of header + data depending on + * the type of packet. For an infoframe, the header bytes refer to type, + * version and length respectively. This comes as input to this command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_SPECIFIC_SET_SHARED_GENERIC_PACKET (0x7302a9) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS_MESSAGE_ID (0xA9U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS { + NvU32 subDeviceInstance; + NvU32 transmitControl; + NvU32 packetSize; + NvU32 targetHeadIndex; + NvU32 infoframeIndex; + NvU8 packet[NV0073_CTRL_SET_OD_MAX_PACKET_SIZE]; +} NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS; + +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_ENABLE 0:0 +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_ENABLE_NO (0x0000000) +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_ENABLE_YES (0x0000001) + +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_SINGLE 1:1 +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_SINGLE_NO (0x0000000) +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_SINGLE_YES (0x0000001) + +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_LOC 5:2 +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_LOC_VBLANK (0x0000000) +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_LOC_VSYNC (0x0000001) +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_LOC_LOADV (0x0000002) + +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_STATE_OVERRIDE 6:6 +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_STATE_OVERRIDE_DISABLE (0x0000000) +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_STATE_OVERRIDE_ENABLE (0x0000001) + +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_UPDATE_RFB_OVERRIDE 7:7 +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_UPDATE_RFB_OVERRIDE_DISABLE (0x0000000) +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_UPDATE_RFB_OVERRIDE_ENABLE (0x0000001) + +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_SU_COORDINATES_VALID_OVERRIDE 8:8 +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_SU_COORDINATES_VALID_OVERRIDE_DISABLE (0x0000000) +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_SU_COORDINATES_VALID_OVERRIDE_ENABLE (0x0000001) + +/* + * NV0073_CTRL_CMD_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET + * + * This command is used to reserve the infoframe for head and RM would assign + * free infoframe index and return the index of infoframe. Later client needs + * to call control call NV0073_CTRL_CMD_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET + * to release the index. + * + * [in]subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * [in]targetHeadIndex + * target Head for which SDP needs to be sent + * [out]infoframeIndex + * return Infoframe Index for head. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFIENT_RESOURCES + */ + +#define NV0073_CTRL_CMD_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET (0x7302aa) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS_MESSAGE_ID (0xAAU) + +typedef struct NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS { + NvU32 subDeviceInstance; + NvU32 targetHeadIndex; + NvU32 infoframeIndex; +} NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET + * + * This command is used to release the infoframe index which was acquired by + * client. + * + * [in]subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * [in]targetHeadIndex + * Specifies the target head number for which SDP needs to be updated. + * [in]infoframeIndex + * Infoframe index for the target head + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET (0x7302ab) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS_MESSAGE_ID (0xABU) + +typedef struct NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS { + NvU32 subDeviceInstance; + NvU32 targetHeadIndex; + NvU32 infoframeIndex; +} NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_DISP_I2C_READ_WRITE + * + * This command is used to do I2C R/W to slave on display i2c instance. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * i2cPort + * This parameter specifies the I2C CCB port ID. + * i2cSlaveAddress + * This parameter specifies the I2C slave address. + * readWriteFlag + * This parameter specifies whether its read/write operation. + * readWriteLen + * This parameter specifies the length of the read/write buffer + * readBuffer + * This parameter reads the data from slave address and copies to this + * buffer + * writeBuffer + * This parameter specifies this buffer data that would be written to + * slave address + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_SPECIFIC_DISP_I2C_READ_WRITE (0x7302acU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_DISP_I2C_READ_WRITE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_DISP_I2C_READ_WRITE_BUF_LEN 128U + +#define NV0073_CTRL_SPECIFIC_DISP_I2C_READ_WRITE_PARAMS_MESSAGE_ID (0xACU) + +typedef struct NV0073_CTRL_SPECIFIC_DISP_I2C_READ_WRITE_PARAMS { + NvU32 subDeviceInstance; + NvU32 i2cPort; + NvU32 i2cSlaveAddress; + NvU32 readWriteFlag; + NvU32 readWriteLen; + NvU8 readBuffer[NV0073_CTRL_SPECIFIC_DISP_I2C_READ_WRITE_BUF_LEN]; + NvU8 writeBuffer[NV0073_CTRL_SPECIFIC_DISP_I2C_READ_WRITE_BUF_LEN]; +} NV0073_CTRL_SPECIFIC_DISP_I2C_READ_WRITE_PARAMS; + +#define NV0073_CTRL_SPECIFIC_DISP_I2C_READ_MODE (0x00000001) +#define NV0073_CTRL_SPECIFIC_DISP_I2C_WRITE_MODE (0x00000000) + +/* + * NV0073_CTRL_CMD_GET_VALID_HEAD_WINDOW_ASSIGNMENT + * + * This command returns the valid window head assignment mask + * + * windowHeadMask [out] + * This out parameter is an array which holds the head mask for + * each window. The Nth element in the array would be a bitmask + * of which heads can possibly drive window N. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_VALID_HEAD_WINDOW_ASSIGNMENT (0x7302ad) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_VALID_HEAD_WINDOW_ASSIGNMENT_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_MAX_WINDOWS 32U +#define NV0073_CTRL_SPECIFIC_FLEXIBLE_HEAD_WINDOW_ASSIGNMENT (0xFFU) + +#define NV0073_CTRL_SPECIFIC_GET_VALID_HEAD_WINDOW_ASSIGNMENT_PARAMS_MESSAGE_ID (0xADU) + +typedef struct NV0073_CTRL_SPECIFIC_GET_VALID_HEAD_WINDOW_ASSIGNMENT_PARAMS { + NvU32 subDeviceInstance; + NvU8 windowHeadMask[NV0073_CTRL_SPECIFIC_MAX_WINDOWS]; +} NV0073_CTRL_SPECIFIC_GET_VALID_HEAD_WINDOW_ASSIGNMENT_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_DEFAULT_ADAPTIVESYNC_DISPLAY + * + * This command is used to query whether the specified monitor should default + * to adaptive sync. + * + * [in]manufacturerID + * This parameter specifies the 16-bit EDID Manufacturer ID. + * [in]productID + * This parameter specifies the 16-bit EDID Product ID. + * [out]bDefaultAdaptivesync; + * This indicates whether the monitor should default to adaptive sync. + * Possible return values: + * NV_OK + */ + +#define NV0073_CTRL_CMD_SPECIFIC_DEFAULT_ADAPTIVESYNC_DISPLAY (0x7302aeU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_DEFAULT_ADAPTIVESYNC_DISPLAY_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_DEFAULT_ADAPTIVESYNC_DISPLAY_PARAMS_MESSAGE_ID (0xAEU) + +typedef struct NV0073_CTRL_SPECIFIC_DEFAULT_ADAPTIVESYNC_DISPLAY_PARAMS { + NvU16 manufacturerID; + NvU16 productID; + NvBool bDefaultAdaptivesync; +} NV0073_CTRL_SPECIFIC_DEFAULT_ADAPTIVESYNC_DISPLAY_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_DISPLAY_BRIGHTNESS_LTM + * NV0073_CTRL_CMD_SPECIFIC_SET_DISPLAY_BRIGHTNESS_LTM + * These commands retrieve and set the brightness level and Local Tone Mapping (LTM) settings for + * the specified display. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * + * displayId + * Display for which brightness/LTM settings is to be retrieved or set. + * + * brightnessMilliNits + * the display brightness in the millinits units in the [0,10000000] range, inclusive. + * + * transitionTimeMs + * the transition time for display brightness to transition from current brightness level to the brightnessMilliNits + * + * bUncalibrated + * if true the parameter indicates brightnessMilliNits has to be interpreted as brightness % value, in 0.001% units + * + * bAdaptiveBrightness + * if true the brightnessMilliNits is set by the OS in the response to the ambient light sensor (ALS) input (if present on the system) + * + * bBoostRange + * if true the brightnessMilliNits value is set by the OS in the response to the ambient light sensor (ALS) input (if present on the system) + * Indicating very bright ambient light environment + * + * ambientIlluminance + * if bAdaptiveBrightness = true, ambientIlluminance represents an ambient illuminance value reported by the ALS sensor, in 0.001 Lux units + * + * ambientChromaticityX + * if bAdaptiveBrightness = true, ambientChromaticityX represents X chromaticity value reported by ALS sensor, in 0.001 units of [0,1] range + * + * ambientChromaticityY + * if bAdaptiveBrightness = true, ambientChromaticityY represents Y chromaticity value reported by ALS sensor, in 0.001 units of [0,1] range + * + * bLtmEnable + * if set to true, enable LTM functionality in GPU display HW. Set to true by default. + * + * bEnhanceContrast + * if bLtmEnable = true, and if set to true, enhance local contrast via LTM regardless of any other contrast enhancement driver policies. Set to false by default. + * + * contrastGain + * if bLtmEnable = true, this is used to how gain we need to apply on the contrast. + * + * detailGain + * if bLtmEnable = true, how much details needs to be boosted is indicated by this parameter. + * + * bContentAdaptiveBrightness + * if bLtmEnable = true, and if set to true, modify display backlight level and adjust pixel values dynamically on per-frame basis to perform content adaptive brightness control to reduce display power. Set to false by default. + * + * bDynamicHdrTonemapping + * if bLtmEnable = true, and if set to true, and output is HDR, enable dynamic per frame HDR tonemapping. Set to false by default. + * + * maxDisplayLuminance + * maximum display luminance + * + * luminanceSscalingFactor + * HDR tone mapping luminance scaling factor + * + * Possible status values returned include: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +typedef struct NV0073_CTRL_SPECIFIC_DISPLAY_BRIGHTNESS_LTM_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 brightnessMilliNits; + NvU32 transitionTimeMs; + NvBool bUncalibrated; + NvBool bAdaptiveBrightness; + NvBool bBoostRange; + NvU32 ambientIlluminance; + NvU32 ambientChromaticityX; + NvU32 ambientChromaticityY; + NvBool bEnhanceContrast; + NvU16 contrastGain; + NvU16 detailGain; + NvBool bContentAdaptiveBrightness; + NvBool bDynamicHdrTonemapping; + NvU32 maxDisplayLuminance; + NvU32 luminanceScalingFactor; +} NV0073_CTRL_SPECIFIC_DISPLAY_BRIGHTNESS_LTM_PARAMS; + +#define NV0073_CTRL_SPECIFIC_GET_DISPLAY_BRIGHTNESS_LTM_PARAMS_MESSAGE_ID (0xAFU) + +typedef NV0073_CTRL_SPECIFIC_DISPLAY_BRIGHTNESS_LTM_PARAMS NV0073_CTRL_SPECIFIC_GET_DISPLAY_BRIGHTNESS_LTM_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_GET_DISPLAY_BRIGHTNESS_LTM (0x7302afU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_DISPLAY_BRIGHTNESS_LTM_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_SET_DISPLAY_BRIGHTNESS_LTM_PARAMS_MESSAGE_ID (0xB0U) + +typedef NV0073_CTRL_SPECIFIC_DISPLAY_BRIGHTNESS_LTM_PARAMS NV0073_CTRL_SPECIFIC_SET_DISPLAY_BRIGHTNESS_LTM_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_SET_DISPLAY_BRIGHTNESS_LTM (0x7302b0U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_DISPLAY_BRIGHTNESS_LTM_PARAMS_MESSAGE_ID" */ + +/* +* NV0073_CTRL_CMD_GET_REGISTER_OFFSET_FOR_ULMB_TIMESTAMP +* +* The command returns the offset of the disp registers for sending timestamp +* directly to RISCV, so that clients may map them directly and write to this +* register which will trigger interrupt in RISCV. +* +* Possible status values returned are: +* NV_OK +* NV_ERR_NOT_SUPPORTED +*/ + +#define NV0073_CTRL_CMD_GET_REGISTER_OFFSET_FOR_ULMB_TIMESTAMP (0x7302b1U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_GET_REGISTER_OFFSET_FOR_ULMB_TIMESTAMP_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_GET_REGISTER_OFFSET_FOR_ULMB_TIMESTAMP_PARAMS_MESSAGE_ID (0xB1U) + +typedef struct NV0073_CTRL_GET_REGISTER_OFFSET_FOR_ULMB_TIMESTAMP_PARAMS { + NvU32 subDeviceInstance; + NvU32 dispRegisterBase; +} NV0073_CTRL_GET_REGISTER_OFFSET_FOR_ULMB_TIMESTAMP_PARAMS; +/* _ctrl0073specific_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h new file mode 100644 index 0000000..7029743 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h @@ -0,0 +1,201 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073stereo.finn +// + +#include "ctrl/ctrl0073/ctrl0073base.h" + + + +/* + * NV0073_CTRL_CMD_STEREO_DONGLE_SUPPORTED + * + * This command returns the support status of the NV stereo emitter + * (also known as the stereo dongle). It reports if the stereo dongle + * is present in terms of the USB interface initialized in Resman. + * This provides a RmControl interface to the STEREO_DONGLE_SUPPORTED + * command in stereoDongleControl. + * + * Parameters: + * [IN] subDeviceInstance - This parameter specifies the subdevice instance + * within the NV04_DISPLAY_COMMON parent device to which the operation + * should be directed. This parameter must specify a value between + * zero and the total number of subdevices within the parent device. + * This parameter should be set to zero for default behavior. + * [IN] head - head to be passed to stereoDongleControl + * [IN] bI2cEmitter - I2C driven DT embedded emitter + * [IN] bForcedSupported - GPIO23 driven emitter + * [OUT] support - the control word returned by stereoDongleControl + * + * Possible status values returned are: + * NV_ERR_NOT_SUPPORTED - stereo is not initialized on the GPU + */ +#define NV0073_CTRL_CMD_STEREO_DONGLE_SUPPORTED (0x731702U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_STEREO_INTERFACE_ID << 8) | NV0073_CTRL_STEREO_DONGLE_SUPPORTED_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_STEREO_DONGLE_SUPPORTED_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0073_CTRL_STEREO_DONGLE_SUPPORTED_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvBool bI2cEmitter; + NvBool bForcedSupported; + NvU32 support; +} NV0073_CTRL_STEREO_DONGLE_SUPPORTED_PARAMS; + +/* + * NV0073_CTRL_CMD_STEREO_DONGLE_SET_TIMINGS + * + * Sets new video mode timings + * E.g. from display driver on mode set + * + * Parameters: + * [IN] subDeviceInstance - This parameter specifies the subdevice instance + * within the NV04_DISPLAY_COMMON parent device to which the operation + * should be directed. This parameter must specify a value between + * zero and the total number of subdevices within the parent device. + * This parameter should be set to zero for default behavior. + * [IN] head - head to be passed to stereoDongleControl + * [IN] timings - new timings to be set + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED - stereo is not initialized on the GPU + */ +#define NV0073_CTRL_CMD_STEREO_DONGLE_SET_TIMINGS (0x731703U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_STEREO_INTERFACE_ID << 8) | NV0073_CTRL_STEREO_DONGLE_SET_TIMINGS_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_STEREO_VIDEO_MODE_TIMINGS { + NvU32 PixelClock; + NvU16 TotalWidth; + NvU16 VisibleImageWidth; + NvU16 HorizontalBlankStart; + NvU16 HorizontalBlankWidth; + NvU16 HorizontalSyncStart; + NvU16 HorizontalSyncWidth; + NvU16 TotalHeight; + NvU16 VisibleImageHeight; + NvU16 VerticalBlankStart; + NvU16 VerticalBlankHeight; + NvU16 VerticalSyncStart; + NvU16 VerticalSyncHeight; + NvU16 InterlacedMode; + NvU16 DoubleScanMode; + + NvU16 MonitorVendorId; + NvU16 MonitorProductId; +} NV0073_CTRL_STEREO_VIDEO_MODE_TIMINGS; + +#define NV0073_CTRL_STEREO_DONGLE_SET_TIMINGS_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0073_CTRL_STEREO_DONGLE_SET_TIMINGS_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NV0073_CTRL_STEREO_VIDEO_MODE_TIMINGS timings; +} NV0073_CTRL_STEREO_DONGLE_SET_TIMINGS_PARAMS; + +/* + * NV0073_CTRL_CMD_STEREO_DONGLE_ACTIVATE + * + * stereoDongleActivate wrapper / NV_STEREO_DONGLE_ACTIVATE_DATA_ACTIVE_YES + * Updates sbios of 3D stereo state active + * + * Parameters: + * [IN] subDeviceInstance - This parameter specifies the subdevice instance + * within the NV04_DISPLAY_COMMON parent device to which the operation + * should be directed. This parameter must specify a value between + * zero and the total number of subdevices within the parent device. + * This parameter should be set to zero for default behavior. + * [IN] head - head to be passed to stereoDongleActivate + * [IN] bSDA - enable stereo on DDC SDA + * [IN] bWorkStation - is workstation stereo? + * [IN] bDLP - is checkerboard DLP Stereo? + * [IN] IRPower - IR power value + * [IN] flywheel - FlyWheel value + * [IN] bRegIgnore - use reg? + * [IN] bI2cEmitter - Sets NV_STEREO_DONGLE_ACTVATE_DATA_I2C_EMITTER_YES and pStereo->bAegisDT + * [IN] bForcedSupported - Sets NV_STEREO_DONGLE_FORCED_SUPPORTED_YES and pStereo->GPIOControlledDongle + * [IN] bInfoFrame - Aegis DT with DP InfoFrame + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT - if (head > OBJ_MAX_HEADS) + * NV_ERR_NOT_SUPPORTED - stereo is not initialized on the GPU + */ +#define NV0073_CTRL_CMD_STEREO_DONGLE_ACTIVATE (0x731704U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_STEREO_INTERFACE_ID << 8) | NV0073_CTRL_STEREO_DONGLE_ACTIVATE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_STEREO_DONGLE_ACTIVATE_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0073_CTRL_STEREO_DONGLE_ACTIVATE_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvBool bSDA; + NvBool bWorkStation; + NvBool bDLP; + NvU8 IRPower; + NvU8 flywheel; + NvBool bRegIgnore; + NvBool bI2cEmitter; + NvBool bForcedSupported; + NvBool bInfoFrame; +} NV0073_CTRL_STEREO_DONGLE_ACTIVATE_PARAMS; + +/* + * NV0073_CTRL_CMD_STEREO_DONGLE_DEACTIVATE + * + * stereoDongleActivate wrapper / NV_STEREO_DONGLE_ACTIVATE_DATA_ACTIVE_NO + * + * If active count<=0 then no 3D app is running which indicates + * that we have really deactivated the stereo, updates sbios of 3D stereo state NOT ACTIVE. + * + * Parameters: + * [IN] subDeviceInstance - This parameter specifies the subdevice instance + * within the NV04_DISPLAY_COMMON parent device to which the operation + * should be directed. This parameter must specify a value between + * zero and the total number of subdevices within the parent device. + * This parameter should be set to zero for default behavior. + * [IN] head - head to be passed to stereoDongleActivate + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT - if (head > OBJ_MAX_HEADS) + * NV_ERR_NOT_SUPPORTED - stereo is not initialized on the GPU + */ +#define NV0073_CTRL_CMD_STEREO_DONGLE_DEACTIVATE (0x731705U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_STEREO_INTERFACE_ID << 8) | NV0073_CTRL_STEREO_DONGLE_DEACTIVATE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_STEREO_DONGLE_DEACTIVATE_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0073_CTRL_STEREO_DONGLE_DEACTIVATE_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; +} NV0073_CTRL_STEREO_DONGLE_DEACTIVATE_PARAMS; + + + +/* _ctrl0073stereo_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h new file mode 100644 index 0000000..0c93979 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073svp.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h new file mode 100644 index 0000000..c17efea --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h @@ -0,0 +1,2571 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073system.finn +// + +#include "ctrl/ctrl0073/ctrl0073base.h" + +/* NV04_DISPLAY_COMMON system-level control commands and parameters */ + +/* extract cap bit setting from tbl */ +#define NV0073_CTRL_SYSTEM_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* Caps format is byte_index:bit_mask. + * Important: keep the number of bytes needed for these fields in sync with + * NV0073_CTRL_SYSTEM_CAPS_TBL_SIZE + */ +#define NV0073_CTRL_SYSTEM_CAPS_AA_FOS_GAMMA_COMP_SUPPORTED 0:0x01 +#define NV0073_CTRL_SYSTEM_CAPS_TV_LOWRES_BUG_85919 0:0x02 +#define NV0073_CTRL_SYSTEM_CAPS_DFP_GPU_SCALING_BUG_154102 0:0x04 +#define NV0073_CTRL_SYSTEM_CAPS_SLI_INTERLACED_MODE_BUG_235218 0:0x08 // Deprecated +#define NV0073_CTRL_SYSTEM_CAPS_STEREO_DIN_AVAILABLE 0:0x10 +#define NV0073_CTRL_SYSTEM_CAPS_OFFSET_PCLK_DFP_FOR_EMI_BUG_443891 0:0x20 +#define NV0073_CTRL_SYSTEM_CAPS_GET_DMI_SCANLINE_SUPPORTED 0:0x40 +/* + * Indicates support for HDCP Key Selection Vector (KSV) list and System + * Renewability Message (SRM) validation +*/ +#define NV0073_CTRL_SYSTEM_CAPS_KSV_SRM_VALIDATION_SUPPORTED 0:0x80 + +#define NV0073_CTRL_SYSTEM_CAPS_SINGLE_HEAD_MST_SUPPORTED 1:0x01 +#define NV0073_CTRL_SYSTEM_CAPS_SINGLE_HEAD_DUAL_SST_SUPPORTED 1:0x02 +#define NV0073_CTRL_SYSTEM_CAPS_HDMI_2_0_SUPPORTED 1:0x04 +#define NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED 1:0x08 +#define NV0073_CTRL_SYSTEM_CAPS_RASTER_LOCK_NEEDS_MIO_POWER 1:0x10 +/* + * Indicates that modesets where no heads are increasing resource requirements, + * or no heads are decreasing resource requirements, can be done glitchlessly. + */ +#define NV0073_CTRL_SYSTEM_CAPS_GLITCHLESS_MODESET_SUPPORTED 1:0x20 +/* Indicates the SW ACR is enabled for HDMI 2.1 due to Bug 3275257. */ +#define NV0073_CTRL_SYSTEM_CAPS_HDMI21_SW_ACR_BUG_3275257 1:0x40 + +/* Size in bytes of display caps table. Keep in sync with # of fields above. */ +#define NV0073_CTRL_SYSTEM_CAPS_TBL_SIZE 2U + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_CAPS_V2 + * + * This command returns the set of display capabilities for the parent device + * in the form of an array of unsigned bytes. Display capabilities + * include supported features and required workarounds for the display + * engine(s) within the device, each represented by a byte offset into the + * table and a bit position within that byte. The set of display capabilities + * will be normalized across all GPUs within the device (a feature capability + * will be set only if it's supported on all GPUs while a required workaround + * capability will be set if any of the GPUs require it). + * + * [out] capsTbl + * The display caps bits will be transferred by the RM into this array of + * unsigned bytes. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_CAPS_V2 (0x730101U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x01U) + +typedef struct NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS { + NvU8 capsTbl[NV0073_CTRL_SYSTEM_CAPS_TBL_SIZE]; +} NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS + * + * This commands returns the number of heads supported by the specified + * subdevice and available for use by displays. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * flags + * This parameter specifies optional flags to be used to while retrieving + * the number of heads. + * Possible valid flags are: + * NV0073_CTRL_SYSTEM_GET_NUM_HEADS_CLIENT + * This flag is used to request the number of heads that are + * currently in use by an NV client using a user display class + * instance (see NV15_VIDEO_LUT_CURSOR_DAC for an examle). If this + * flag is disabled then the total number of heads supported is + * returned. + * numHeads + * This parameter returns the number of usable heads for the specified + * subdevice. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS (0x730102U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS_MESSAGE_ID (0x02U) + +typedef struct NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS { + NvU32 subDeviceInstance; + NvU32 flags; + NvU32 numHeads; +} NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS; + +/* valid get num heads flags */ +#define NV0073_CTRL_SYSTEM_GET_NUM_HEADS_FLAGS_CLIENT 0:0 +#define NV0073_CTRL_SYSTEM_GET_NUM_HEADS_FLAGS_CLIENT_DISABLE (0x00000000U) +#define NV0073_CTRL_SYSTEM_GET_NUM_HEADS_FLAGS_CLIENT_ENABLE (0x00000001U) + + + + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_SCANLINE + * + * This command returns the current RG scanline of the specified head on the + * specified subdevice. To get the DMI scanline on supported chips, use + * NV0073_CTRL_CMD_SYSTEM_GET_DMI_SCANLINE + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * head + * This parameter specifies the head for which the active display + * should be retrieved. This value must be between zero and the + * maximum number of heads supported by the subdevice. + * currentScanline + * This parameter returns the current RG scanline value for the specified + * head. If the head does not have a valid mode enabled then a scanline + * value of 0xffffffff is returned. + * bStereoEyeSupported (out) + * This parameter specifies whether stereoEye reporting is supported (this + * is hw dependent). Note that this value doesn't actually reflect whether + * given head is really in stereo mode. + * stereoEye (out) + * If supported (ie bStereoEyeSupported is TRUE), this parameter returns + * either NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS_RIGHT_EYE or + * NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS_LEFT_EYE, reflecting the + * stereo eye that is currently scanned out. Although this value typically + * changes at the beginning of vblank, the exact guarantee isn't more + * accurate than "somewhere in vblank". + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_SCANLINE (0x730104U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SYSTEM_GET_SCANLINE_RIGHT_EYE 0x00000000U +#define NV0073_CTRL_CMD_SYSTEM_GET_SCANLINE_LEFT_EYE 0x00000001U + +#define NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS_MESSAGE_ID (0x04U) + +typedef struct NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 currentScanline; + NvBool bStereoEyeSupported; + NvU32 stereoEye; +} NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_VBLANK_COUNTER + * + * This command returns the current VBlank counter of the specified head on the + * specified subdevice. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * head + * This parameter specifies the head for which the vblank counter + * should be retrieved. This value must be between zero and the + * maximum number of heads supported by the subdevice. + * lowLatencyHint + * RM maintains several different vblank counts. When this parameter is + * NV_TRUE, the command may return the low latency count. + * verticalBlankCounter + * This parameter returns the vblank counter value for the specified + * head. If the display mode is not valid or vblank not active then + * the verticalBlankCounter value is undefined. + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_VBLANK_COUNTER (0x730105U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_VBLANK_COUNTER_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_VBLANK_COUNTER_PARAMS_MESSAGE_ID (0x05U) + +typedef struct NV0073_CTRL_SYSTEM_GET_VBLANK_COUNTER_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvBool lowLatencyHint; + NvU32 verticalBlankCounter; +} NV0073_CTRL_SYSTEM_GET_VBLANK_COUNTER_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_VBLANK_ENABLE + * + * This command returns the current VBlank enable status for the specified + * head. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * head + * This parameter specifies the head for which the vblank status + * should be retrieved. This value must be between zero and the + * maximum number of heads supported by the subdevice. + * bEnabled + * This parameter returns the vblank enable status for the specified head. + * A value of NV_FALSE indicates that vblank interrupts are not currently + * enabled while a value of NV_TRUE indicates that vblank are currently + * enabled. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_VBLANK_ENABLE (0x730106U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_VBLANK_ENABLE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_VBLANK_ENABLE_PARAMS_MESSAGE_ID (0x06U) + +typedef struct NV0073_CTRL_SYSTEM_GET_VBLANK_ENABLE_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvBool bEnabled; +} NV0073_CTRL_SYSTEM_GET_VBLANK_ENABLE_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED + * + * This command returns the set of supported display IDs for the specified + * subdevice in the form of a 32bit display mask. State from internal + * display connectivity tables is used to determine the set of possible + * display connections for the GPU. The presence of a display in the + * display mask only indicates the display is supported. The connectivity + * status of the display should be determined using the + * NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE command. The displayMask + * value returned by NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED is static + * and will remain consistent across boots of the system. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayMask + * This parameter returns a NV0073_DISPLAY_MASK value describing the set + * of displays supported by the subdevice. An enabled bit in displayMask + * indicates the support of a display device with that displayId. + * displayMaskDDC + * This parameter returns a NV0073_DISPLAY_MASK value, indicating the + * subset of displayMask that supports DDC. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730107U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID (0x07U) + +typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayMask; + NvU32 displayMaskDDC; +} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE + * + * This command can be used to check the presence of a mask of display + * devices on the specified subdevice. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * flags + * This parameter specifies optional flags to be used while retrieving + * the connection state information. + * Here are the current defined fields: + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD + * A client uses this field to indicate what method it wishes the + * system to use when determining the presence of attached displays. + * Possible values are: + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD_DEFAULT + * The system decides what method to use. + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD_CACHED + * Return the last full detection state for the display mask. + * safety.) + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD_ECONODDC + * Ping the DDC address of the given display mask to check for + * a connected device. This is a lightweight method to check + * for a present device. + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_DDC + * A client uses this field to indicate whether to allow DDC during + * this detection or to not use it. + * Possible values are: + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_DDC_DEFAULT + * The system will use DDC as needed for each display. + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_DDC_DISABLE + * The system will not use DDC for any display. If DDC is + * disabled, this detection state will not be cached. + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_LOAD + * A client uses this field to indicate whether to detect loads + * during this detection or to not use it. + * Possible values are: + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_LOAD_DEFAULT + * The system will use load detection as needed for each display. + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_LOAD_DISABLE + * The system will not use load detection for any display. If + * load detection is disabled, this detection state will not + * be cached. + * displayMask + * This parameter specifies an NV0073_DISPLAY_MASK value describing + * the set of displays for which connectivity status is to be checked. + * If a display is present then the corresponding bit in the display + * mask is left enabled. If the display is not present then the + * corresponding bit in the display mask is disabled. Upon return this + * parameter contains the subset of displays in the mask that are + * connected. + * + * If displayMask includes bit(s) that correspond to a TV encoder, the + * result will be simply 'yes' or 'no' without any indication of which + * connector(s) are actually attached. For fine-grained TV attachment + * detection, please see NV0073_CTRL_CMD_TV_GET_ATTACHMENT_STATUS. + * retryTimeMs + * This parameter is an output to this command. In case of + * NVOS_STATUS_ERROR_RETRY return status, this parameter returns the time + * duration in milli-seconds after which client should retry this command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NVOS_STATUS_ERROR_RETRY + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730108U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID (0x08U) + +typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS { + NvU32 subDeviceInstance; + NvU32 flags; + NvU32 displayMask; + NvU32 retryTimeMs; +} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS; + +/* valid get connect state flags */ +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD 1:0 +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD_DEFAULT (0x00000000U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD_CACHED (0x00000001U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD_ECONODDC (0x00000002U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_DDC 4:4 +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_DDC_DEFAULT (0x00000000U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_DDC_DISABLE (0x00000001U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_LOAD 5:5 +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_LOAD_DEFAULT (0x00000000U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_LOAD_DISABLE (0x00000001U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_VBLANK 6:6 +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_VBLANK_DEFAULT (0x00000000U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_VBLANK_SAFE (0x00000001U) + + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_CONFIG + * + * This command can be used to retrieve dynamic hotplug state information that + * are currently recorded by the RM. This information can be used by the client + * to determine which displays to detect after a hotplug event occurs. Or if + * the client knows that this device generates a hot plug/unplug signal on all + * connectors, then this can be used to cull displays from detection. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * flags + * This parameter specifies optional flags to be used while retrieving + * or changing the hotplug configuration. + * No flags are currently defined. + * hotplugEventMask + * For _GET_HOTPLUG_CONFIG, this returns which connectors the client + * has asked for notifications for, when a hotplug event is detected. + * Events can only be provided for connectors whose displayID is set + * by the system in the hotplugInterruptible field. + * hotplugPollable + * For _GET_HOTPLUG_CONFIG, this returns which connectors are pollable + * in some non-destructive fashion. + * hotplugInterruptible + * For _GET_HOTPLUG_CONFIG, this returns which connectors are capable + * of generating interrupts. + * + * This display mask specifies an NV0073_DISPLAY_MASK value describing + * the set of displays that have seen a hotplug or hotunplug event + * sometime after the last valid EDID read. If the device never has + * a valid EDID read, then it will always be listed here. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + + +#define NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_CONFIG (0x730109U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS_MESSAGE_ID (0x09U) + +typedef struct NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS { + NvU32 subDeviceInstance; + NvU32 flags; + NvU32 hotplugEventMask; + NvU32 hotplugPollable; + NvU32 hotplugInterruptible; + NvU32 hotplugAlwaysAttached; +} NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_STATE + * + * This command can be used to retrieve dynamic hotplug state information that + * are currently recorded by the RM. This information can be used by the client + * to determine which displays to detect after a hotplug event occurs. Or if + * the client knows that this device generates a hot plug/unplug signal on all + * connectors, then this can be used to cull displays from detection. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * flags + * This parameter specifies optional flags to be used while retrieving + * the hotplug state information. + * Here are the current defined fields: + * NV0073_CTRL_SYSTEM_GET_HOTPLUG_STATE_FLAGS_LID + * A client uses this field to determine the lid state. + * Possible values are: + * NV0073_CTRL_SYSTEM_GET_HOTPLUG_STATE_FLAGS_LID_OPEN + * The lid is open. + * NV0073_CTRL_SYSTEM_GET_HOTPLUG_STATE_FLAGS_LID_CLOSED + * The lid is closed. The client should remove devices as + * reported inside the + * NV0073_CTRL_SYSTEM_GET_CONNECT_POLICY_PARAMS.lidClosedMask. + * hotplugAfterEdidMask + * This display mask specifies an NV0073_DISPLAY_MASK value describing + * the set of displays that have seen a hotplug or hotunplug event + * sometime after the last valid EDID read. If the device never has + * a valid EDID read, then it will always be listed here. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + + +#define NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_STATE (0x73010aU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_HOTPLUG_STATE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_HOTPLUG_STATE_PARAMS_MESSAGE_ID (0x0AU) + +typedef struct NV0073_CTRL_SYSTEM_GET_HOTPLUG_STATE_PARAMS { + NvU32 subDeviceInstance; + NvU32 flags; + NvU32 hotplugAfterEdidMask; +} NV0073_CTRL_SYSTEM_GET_HOTPLUG_STATE_PARAMS; + +/* valid get hoplug state flags */ +#define NV0073_CTRL_SYSTEM_GET_HOTPLUG_STATE_FLAGS_LID 0:0 +#define NV0073_CTRL_SYSTEM_GET_HOTPLUG_STATE_FLAGS_LID_OPEN (0x00000000U) +#define NV0073_CTRL_SYSTEM_GET_HOTPLUG_STATE_FLAGS_LID_CLOSED (0x00000001U) + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_HEAD_ROUTING_MAP + * + * This command can be used to retrieve the suggested head routing map + * for the specified display mask. A head routing map describes the + * suggested crtc (or head) assignments for each display in the specified + * mask. + * + * Up to MAX_DISPLAYS displays may be specified in the display mask. Displays + * are numbered from zero beginning with the lowest bit position set in the + * mask. The corresponding head assignment for each of specified displays can + * then be found in the respective per-device field in the routing map. + * + * If a particular display cannot be successfully assigned a position in the + * head routing map then it is removed from the display mask. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayMask + * This parameter specifies the NV0073_DISPLAY_MASK value for which + * the head routing map is desired. Each enabled bit indicates + * a display device to include in the routing map. Enabled bits + * must represent supported displays as indicated by the + * NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED command. If a particular + * display cannot be included in the routing map then it's corresponding + * bit in the displayMask will be disabled. A return value of 0 in + * displayMask indicates that a head routing map could not be constructed + * with the given display devices. + * oldDisplayMask + * This optional parameter specifies a prior display mask to be + * used when generating the head routing map to be returned in + * headRoutingMap. Displays set in oldDisplayMask are retained + * if possible in the new routing map. + * oldHeadRoutingMap + * This optional parameter specifies a prior head routing map to be + * used when generating the new routing map to be returned in + * headRoutingMap. Head assignments in oldHeadRoutingMap are + * retained if possible in the new routing map. + * headRoutingMap + * This parameter returns the new head routing map. This parameter + * is organized into eight distinct fields, each containing the head + * assignment for the corresponding display in display mask. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_HEAD_ROUTING_MAP (0x73010bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS_MESSAGE_ID (0x0BU) + +typedef struct NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayMask; + NvU32 oldDisplayMask; + NvU32 oldHeadRoutingMap; + NvU32 headRoutingMap; +} NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS; + +/* maximum number of allowed displays in a routing map */ +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_MAX_DISPLAYS (8U) + +/* per-display head assignments in a routing map */ +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY0 3:0 +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY1 7:4 +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY2 11:8 +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY3 15:12 +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY4 19:16 +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY5 23:20 +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY6 27:24 +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY7 31:28 + + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE + * + * This command returns the active display ID for the specified head + * on the specified subdevice. The active display may be established + * at system boot by low-level software and can then be later modified + * by an NV client using a user display class instance (see + * NV15_VIDEO_LUT_CURSOR_DAC). + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * head + * This parameter specifies the head for which the active display + * should be retrieved. This value must be between zero and the + * maximum number of heads supported by the subdevice. + * flags + * This parameter specifies optional flags to be used to while retrieving + * the active display information. + * Possible valid flags are: + * NV0073_CTRL_SYSTEM_GET_ACTIVE_FLAGS_CLIENT + * This flag is used to limit the search for the active display to + * that established by an NV client. If this flag is not specified, + * then any active display is returned (setup at system boot by + * low-level software or later by an NV client). + * displayId + * This parameter returns the displayId of the active display. A value + * of zero indicates no display is active. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x73010cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID (0x0CU) + +typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 flags; + NvU32 displayId; +} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS; + +/* valid get active flags */ +#define NV0073_CTRL_SYSTEM_GET_ACTIVE_FLAGS_CLIENT 0:0 +#define NV0073_CTRL_SYSTEM_GET_ACTIVE_FLAGS_CLIENT_DISABLE (0x00000000U) +#define NV0073_CTRL_SYSTEM_GET_ACTIVE_FLAGS_CLIENT_ENABLE (0x00000001U) + + + +/* + * NV0073_CTRL_SYSTEM_ACPI_ID_MAP + * + * This structure defines the mapping between the RM's displayId and the + * defined ACPI IDs for each display. + * displayId + * This parameter is a handle to a single display output path from the + * GPU pins to the display connector. Each display ID is defined by one bit. + * A zero in this parameter indicates a skip entry. + * acpiId + * This parameter defines the corresponding ACPI ID of the displayId. + * flags + * This parameter specifies optional flags that describe the association + * between the display ID and the ACPI ID. + * NV0073_CTRL_SYSTEM_ACPI_ID_MAP_ORIGIN + * This field describes where the ACPI was found. + * NV0073_CTRL_SYSTEM_ACPI_ID_MAP_ORIGIN_RM + * The ACPI ID was generated by RM code. + * NV0073_CTRL_SYSTEM_ACPI_ID_MAP_ORIGIN_DOD + * The ACPI ID was found via the ACPI _DOD call. + * NV0073_CTRL_SYSTEM_ACPI_ID_MAP_ORIGIN_CLIENT + * The ACPI ID was generated by RM Client and sent to RM. Note this + * must be set on a NV0073_CTRL_CMD_SYSTEM_SET_ACPI_ID_MAP call. + * NV0073_CTRL_SYSTEM_ACPI_ID_MAP_SNAG_UNDOCKED + * This flag explains that the ACPI ID is only valid when the system + * is undocked. If this flag is not set, the ACPI ID is valid always. + * NV0073_CTRL_SYSTEM_ACPI_ID_MAP_SNAG_DOCKED + * This flag explains that the ACPI ID is only valid when the system + * is docked. If this flag is not set, the ACPI ID is valid always. + * NV0073_CTRL_SYSTEM_ACPI_ID_MAP_DOD_BIOS_DETECT + * This flag is set only if the _DOD returns that the device can be + * detected by the system BIOS. This flag is copied directly from + * the ACPI spec. + * NV0073_CTRL_SYSTEM_ACPI_ID_MAP_DOD_NON_VGA_OUTPUT + * This flag is set only if the _DOD returns that the device is + * a non-VGA device whose power is related to the VGA device. + * i.e. TV tuner, DVD decoder, Video capture. This flag is copied + * directly from the ACPI spec. + * NV0073_CTRL_SYSTEM_ACPI_ID_MAP_DOD_MULTIHEAD_ID + * This value is set only if the _DOD returns it. The number + * indicates the head output of a multi-head device. This has no + * relation to the term, Head, currently used in the RM today. + * This is strictly a copy of the value directly from the ACPI spec. + * NV0073_CTRL_SYSTEM_ACPI_ID_MAP_DOD_SCHEME + * This flag is set only if the _DOD returns that the acpiID follows + * the ACPI 3.0 spec. This flag is copied directly from + * the ACPI spec. + * + */ + +typedef struct NV0073_CTRL_SYSTEM_ACPI_ID_MAP_PARAMS { + NvU32 displayId; + NvU32 acpiId; + NvU32 flags; +} NV0073_CTRL_SYSTEM_ACPI_ID_MAP_PARAMS; + +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_ORIGIN 1:0 +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_ORIGIN_RM 0x00000000U +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_ORIGIN_DOD 0x00000001U +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_ORIGIN_CLIENT 0x00000002U + +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_SNAG_UNDOCKED 2:2 +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_SNAG_UNDOCKED_FALSE 0x00000000U +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_SNAG_UNDOCKED_TRUE 0x00000001U + +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_SNAG_DOCKED 3:3 +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_SNAG_DOCKED_FALSE 0x00000000U +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_SNAG_DOCKED_TRUE 0x00000001U + +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_DOD_BIOS_DETECT 16:16 +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_DOD_BIOS_DETECT_FALSE 0x00000000U +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_DOD_BIOS_DETECT_TRUE 0x00000001U + +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_DOD_NON_VGA_OUTPUT 17:17 +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_DOD_NON_VGA_OUTPUT_FALSE 0x00000000U +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_DOD_NON_VGA_OUTPUT_TRUE 0x00000001U + +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_DOD_MULTIHEAD_ID 20:18 + +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_DOD_SCHEME 31:31 +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_DOD_SCHEME_VENDOR 0x00000000U +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_DOD_SCHEME_30 0x00000001U + +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U) + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_ACPI_ID_MAP + * + * This command retrieves the mapping between the RM's displayId and the + * defined ACPI IDs for each display. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and + * the total number of subdevices within the parent device. It should + * be set to zero for default behavior. + * NV0073_CTRL_SYSTEM_ACPI_ID_MAP_PARAMS + * An array of display ID to ACPI ids with flags for each description. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * Only returned if subdeviceInstance was not valid. + */ + +#define NV0073_CTRL_CMD_SYSTEM_GET_ACPI_ID_MAP (0x730115U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACPI_ID_MAP_PARAMS_MESSAGE_ID" */ + + +#define NV0073_CTRL_SYSTEM_GET_ACPI_ID_MAP_PARAMS_MESSAGE_ID (0x15U) + +typedef struct NV0073_CTRL_SYSTEM_GET_ACPI_ID_MAP_PARAMS { + NvU32 subDeviceInstance; + NV0073_CTRL_SYSTEM_ACPI_ID_MAP_PARAMS acpiIdMap[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS]; +} NV0073_CTRL_SYSTEM_GET_ACPI_ID_MAP_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_INTERNAL_DISPLAYS + * + * This command returns the set of internal (safe) display IDs for the specified + * subdevice in the form of a 32bit display mask. Safe means the displays do + * not require copy protection as they are on the motherboard. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * internalDisplaysMask + * This parameter returns a NV0073_DISPLAY_MASK value describing the set + * of displays that are internal (safe) and which do not require copy + * protection schemes. + * availableInternalDisplaysMask + * This parameter returns a NV0073_DISPLAY_MASK value describing the set + * of displays that are internal and available for use. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_INTERNAL_DISPLAYS (0x730116U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_INTERNAL_DISPLAYS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_INTERNAL_DISPLAYS_PARAMS_MESSAGE_ID (0x16U) + +typedef struct NV0073_CTRL_SYSTEM_GET_INTERNAL_DISPLAYS_PARAMS { + NvU32 subDeviceInstance; + NvU32 internalDisplaysMask; + NvU32 availableInternalDisplaysMask; +} NV0073_CTRL_SYSTEM_GET_INTERNAL_DISPLAYS_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_ACPI_SUBSYSTEM_ACTIVATED + * + * This command is used to notify RM that all subdevices are ready for ACPI + * calls. The caller must make sure that the OS is ready to handle the ACPI + * calls for each ACPI ID. So, this call must be done after the OS has + * initialized all the display ACPI IDs to this subdevice. + * Besides, the ACPI spec provides a function for the display drivers to read + * the EDID directly from the SBIOS for each display's ACPI ID. This function + * is used to override the EDID found from a I2C or DPAux based transaction. + * This command will also attempt to call the ACPI _DDC function to read the + * EDID from the SBIOS for all displayIDs. If an EDID is found from this call, + * the RM will store that new EDID in the EDID buffer of that OD. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_NOT_SUPPORTED + * + */ + +#define NV0073_CTRL_SYSTEM_ACPI_SUBSYSTEM_ACTIVATED_PARAMS_MESSAGE_ID (0x17U) + +typedef struct NV0073_CTRL_SYSTEM_ACPI_SUBSYSTEM_ACTIVATED_PARAMS { + NvU32 subDeviceInstance; +} NV0073_CTRL_SYSTEM_ACPI_SUBSYSTEM_ACTIVATED_PARAMS; + +#define NV0073_CTRL_CMD_SYSTEM_ACPI_SUBSYSTEM_ACTIVATED (0x730117U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_ACPI_SUBSYSTEM_ACTIVATED_PARAMS_MESSAGE_ID" */ + +/* + * To support RMCTRLs for BOARDOBJGRP_E255, we were required to increase the + * XAPI limit to 16K. It was observed that XP does NOT allow the static array + * size greater then 10K and this was causing the DVS failure. So we are using + * the OLD XAPI value i.e. 4K for NV0073_CTRL_SYSTEM_SRM_BUFFER_MAX while + * internally we are using the new updated XAPI value i.e. 16K. + */ +#define XAPI_ENVELOPE_MAX_PAYLOAD_SIZE_OLD 4096U + +/* + * NV0073_CTRL_SYSTEM_SRM_CHUNK + * + * Several control commands require an SRM, which may be larger than the + * available buffer. Therefore, this structure is used to transfer the needed + * data. + * + * startByte + * Index of the byte in the SRM buffer at which the current chunk of data + * starts. If this value is 0, it indicates the start of a new SRM. A + * value other than 0 indicates additional data for an SRM. + * numBytes + * Size in bytes of the current chunk of data. + * totalBytes + * Size in bytes of the entire SRM. + * srmBuffer + * Buffer containing the current chunk of SRM data. + */ +/* Set max SRM size to the XAPI max, minus some space for other fields */ +#define NV0073_CTRL_SYSTEM_SRM_BUFFER_MAX (0xe00U) /* finn: Evaluated from "(XAPI_ENVELOPE_MAX_PAYLOAD_SIZE_OLD - 512)" */ + +typedef struct NV0073_CTRL_SYSTEM_SRM_CHUNK { + NvU32 startByte; + NvU32 numBytes; + NvU32 totalBytes; + + /* C form: NvU8 srmBuffer[NV0073_CTRL_SYSTEM_SRM_BUFFER_MAX]; */ + NvU8 srmBuffer[NV0073_CTRL_SYSTEM_SRM_BUFFER_MAX]; +} NV0073_CTRL_SYSTEM_SRM_CHUNK; + +/* + * NV0073_CTRL_CMD_SYSTEM_VALIDATE_SRM + * + * Instructs the RM to validate the SRM for use by HDCP revocation. The SRM + * may be larger than the buffer provided by the API. In that case, the SRM is + * sent in chunks no larger than NV0073_CTRL_SYSTEM_SRM_BUFFER_MAX bytes. + * + * Upon completion of the validation, which is an asynchronous operation, the + * client will receive a event. Alternatively, the client + * may poll for completion of SRM validation via + * NV0073_CTRL_CMD_SYSTEM_GET_SRM_STATUS. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * srm + * A chunk of the SRM. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_NOT_READY + * NV_ERR_INVALID_ARGUMENT + * NV_WARN_MORE_PROCESSING_REQUIRED + * NV_ERR_INSUFFICIENT_RESOURCES + */ +#define NV0073_CTRL_CMD_SYSTEM_VALIDATE_SRM (0x730118U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_VALIDATE_SRM_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_VALIDATE_SRM_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV0073_CTRL_SYSTEM_VALIDATE_SRM_PARAMS { + NvU32 subDeviceInstance; + NV0073_CTRL_SYSTEM_SRM_CHUNK srm; +} NV0073_CTRL_SYSTEM_VALIDATE_SRM_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_SRM_STATUS + * + * Retrieves the status of the request to validate the SRM. If a request to + * validate an SRM is still pending, NV_ERR_NOT_READY will be + * returned and the status will not be updated. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * status + * Result of the last SRM validation request. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_NOT_READY + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_SRM_STATUS (0x730119U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SRM_STATUS_PARAMS_MESSAGE_ID" */ + +typedef enum NV0073_CTRL_SYSTEM_SRM_STATUS { + NV0073_CTRL_SYSTEM_SRM_STATUS_OK = 0, // Validation succeeded + NV0073_CTRL_SYSTEM_SRM_STATUS_FAIL = 1, // Validation request failed + NV0073_CTRL_SYSTEM_SRM_STATUS_BAD_FORMAT = 2, // Bad SRM format + NV0073_CTRL_SYSTEM_SRM_STATUS_INVALID = 3, // Bad SRM signature +} NV0073_CTRL_SYSTEM_SRM_STATUS; + +#define NV0073_CTRL_SYSTEM_GET_SRM_STATUS_PARAMS_MESSAGE_ID (0x19U) + +typedef struct NV0073_CTRL_SYSTEM_GET_SRM_STATUS_PARAMS { + NvU32 subDeviceInstance; + NvU32 status; +} NV0073_CTRL_SYSTEM_GET_SRM_STATUS_PARAMS; + + + +/* + * NV0073_CTRL_CMD_SYSTEM_HDCP_REVOCATION_CHECK + * + * Performs the HDCP revocation process. Given the supplied SRM, all attached + * devices will be checked to see if they are on the revocation list or not. + * + * srm + * The SRM to do the revocation check against. For SRMs larger than + * NV0073_CTRL_SYSTEM_SRM_BUFFER_MAX, the caller will need to break up the + * SRM into chunks and make multiple calls. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_NOT_READY + * NV_ERR_INVALID_ARGUMENT + * NV_WARN_MORE_PROCESSING_REQUIRED + * NV_ERR_INSUFFICIENT_RESOURCES + */ +#define NV0073_CTRL_CMD_SYSTEM_HDCP_REVOCATION_CHECK (0x73011bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_HDCP_REVOCATE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_HDCP_REVOCATE_PARAMS_MESSAGE_ID (0x1BU) + +typedef struct NV0073_CTRL_SYSTEM_HDCP_REVOCATE_PARAMS { + NV0073_CTRL_SYSTEM_SRM_CHUNK srm; +} NV0073_CTRL_SYSTEM_HDCP_REVOCATE_PARAMS; + +/* + * NV0073_CTRL_CMD_UPDATE_SRM + * + * Updates the SRM used by RM for HDCP revocation checks. The SRM must have + * been previously validated as authentic. + * + * srm + * The SRM data. For SRMs larger than NV0073_CTRL_SYSTEM_SRM_BUFFER_MAX, + * the caller will need to break up the SRM into chunks and make multiple + * calls. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_NOT_READY + * NV_ERR_INVALID_ARGUMENT + * NV_WARN_MORE_PROCESSING_REQUIRED + * NV_ERR_INSUFFICIENT_RESOURCES + */ +#define NV0073_CTRL_CMD_SYSTEM_UPDATE_SRM (0x73011cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_UPDATE_SRM_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_UPDATE_SRM_PARAMS_MESSAGE_ID (0x1CU) + +typedef struct NV0073_CTRL_SYSTEM_UPDATE_SRM_PARAMS { + NV0073_CTRL_SYSTEM_SRM_CHUNK srm; +} NV0073_CTRL_SYSTEM_UPDATE_SRM_PARAMS; + +/* + * NV0073_CTRL_SYSTEM_CONNECTOR_INFO + * + * This structure describes a single connector table entry. + * + * type + * This field specifies the connector type. + * displayMask + * This field specifies the the displayMask to which the connector belongs. + * location + * This field specifies the placement of the connector on the platform. + * hotplug + * This field specifies hotplug capabilities (if any) for the connector. + */ +typedef struct NV0073_CTRL_SYSTEM_CONNECTOR_INFO { + NvU32 type; + NvU32 displayMask; + NvU32 location; + NvU32 hotplug; +} NV0073_CTRL_SYSTEM_CONNECTOR_INFO; + +/* valid type values */ +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_VGA_15_PIN (0x00000000U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_DVI_A (0x00000001U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_POD_VGA_15_PIN (0x00000002U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_TV_COMPOSITE (0x00000010U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_TV_SVIDEO (0x00000011U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_TV_SVIDEO_BO_COMPOSITE (0x00000012U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_TV_COMPONENT (0x00000013U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_TV_SCART (0x00000014U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_TV_SCART_EIAJ4120 (0x00000014U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_TV_EIAJ4120 (0x00000017U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_PC_POD_HDTV_YPRPB (0x00000018U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_PC_POD_SVIDEO (0x00000019U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_PC_POD_COMPOSITE (0x0000001AU) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_DVI_I_TV_SVIDEO (0x00000020U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_DVI_I_TV_COMPOSITE (0x00000021U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_DVI_I_TV_SV_BO_COMPOSITE (0x00000022U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_DVI_I (0x00000030U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_DVI_D (0x00000031U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_ADC (0x00000032U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_LFH_DVI_I_1 (0x00000038U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_LFH_DVI_I_2 (0x00000039U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_LFH_SVIDEO (0x0000003AU) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_BNC (0x0000003CU) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_LVDS_SPWG (0x00000040U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_LVDS_OEM (0x00000041U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_LVDS_SPWG_DET (0x00000042U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_LVDS_OEM_DET (0x00000043U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_TVDS_OEM_ATT (0x00000045U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_VGA_15_PIN_UNDOCKED (0x00000050U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_VGA_15_PIN_DOCKED (0x00000051U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_DVI_I_UNDOCKED (0x00000052U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_DVI_I_DOCKED (0x00000053U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_DVI_D_UNDOCKED (0x00000052U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_DVI_D_DOCKED (0x00000053U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_DP_EXT (0x00000056U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_DP_INT (0x00000057U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_DP_EXT_UNDOCKED (0x00000058U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_DP_EXT_DOCKED (0x00000059U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_3PIN_DIN_STEREO (0x00000060U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_HDMI_A (0x00000061U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_AUDIO_SPDIF (0x00000062U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_HDMI_C_MINI (0x00000063U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_LFH_DP_1 (0x00000064U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_LFH_DP_2 (0x00000065U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_TYPE_VIRTUAL_WFD (0x00000070U) + +/* valid hotplug values */ +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_HOTPLUG_A_SUPPORTED (0x00000001U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_HOTPLUG_B_SUPPORTED (0x00000002U) + +/* + * Nv0073_CTRL_CMD_SYSTEM_GET_CONNECTOR_TABLE + * + * This command can be used to retrieve display connector information. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * version + * This parameter returns the version of the connector table. + * platform + * This parameter returns the type of platform of the associated subdevice. + * connectorTableEntries + * This parameter returns the number of valid entries in the connector + * table. + * connectorTable + * This parameter returns the connector information in the form of an + * array of NV0073_CTRL_SYSTEM_CONNECTOR_INFO structures. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECTOR_TABLE (0x73011dU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECTOR_TABLE_PARAMS_MESSAGE_ID" */ + +/* maximum number of connector table entries */ +#define NV0073_CTRL_SYSTEM_GET_CONNECTOR_TABLE_MAX_ENTRIES (16U) + +#define NV0073_CTRL_SYSTEM_GET_CONNECTOR_TABLE_PARAMS_MESSAGE_ID (0x1DU) + +typedef struct NV0073_CTRL_SYSTEM_GET_CONNECTOR_TABLE_PARAMS { + NvU32 subDeviceInstance; + NvU32 version; + NvU32 platform; + NvU32 connectorTableEntries; + /* + * C form: + * NV0073_CTRL_SYSTEM_CONNECTOR_INFO connectorTable[NV0073_CTRL_SYSTEM_CONNECTOR_TABLE_MAX_ENTRIES]; + */ + NV0073_CTRL_SYSTEM_CONNECTOR_INFO connectorTable[NV0073_CTRL_SYSTEM_GET_CONNECTOR_TABLE_MAX_ENTRIES]; +} NV0073_CTRL_SYSTEM_GET_CONNECTOR_TABLE_PARAMS; + +/* valid version values */ +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_VERSION_30 (0x00000030U) + +/* valid platform values */ +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_PLATFORM_DEFAULT_ADD_IN_CARD (0x00000000U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_PLATFORM_TWO_PLATE_ADD_IN_CARD (0x00000001U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_PLATFORM_MOBILE_ADD_IN_CARD (0x00000008U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_PLATFORM_MXM_MODULE (0x00000009U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_PLATFORM_MOBILE_BACK (0x00000010U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_PLATFORM_MOBILE_BACK_LEFT (0x00000011U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_PLATFORM_MOBILE_BACK_DOCK (0x00000018U) +#define NV0073_CTRL_SYSTEM_CONNECTOR_INFO_PLATFORM_CRUSH_DEFAULT (0x00000020U) + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_BOOT_DISPLAYS + * + * This command returns a mask of boot display IDs. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * bootDisplayMask + * This parameter returns the mask of boot display IDs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_BOOT_DISPLAYS (0x73011eU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS_MESSAGE_ID (0x1EU) + +typedef struct NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS { + NvU32 subDeviceInstance; + NvU32 bootDisplayMask; +} NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_EXECUTE_ACPI_METHOD + * + * This command is used to execute general MXM ACPI methods. + * + * method + * This parameter identifies the MXM ACPI API to be invoked. + * Valid values for this parameter are: + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXMI + * This value specifies that the MXMI API is to invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXMS + * This value specifies that the MXMS API is to invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXMX + * This value specifies that the MXMX API is to invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_GPUON + * This value specifies that the Hybrid GPU ON API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_GPUOFF + * This value specifies that the Hybrid GPU OFF API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_GPUSTA + * This value specifies that the Hybrid GPU STA API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXDS + * This value specifies that the Hybrid GPU MXDS API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NVHG_MXMX + * This value specifies that the Hybrid GPU MXMX API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DOS + * This value specifies that the Hybrid GPU DOS API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_ROM + * This value specifies that the Hybrid GPU ROM API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DOD + * This value specifies that the Hybrid GPU DOD API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_SUPPORT + * This value specifies that the Hybrid GPU DSM subfunction SUPPORT + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_HYBRIDCAPS + * This value specifies that the Hybrid GPU DSM subfunction SUPPORT + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_POLICYSELECT + * This value specifies that the Hybrid GPU DSM subfunction POLICYSELECT + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_POWERCONTROL + * This value specifies that the Hybrid GPU DSM subfunction POWERCONTROL + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_PLATPOLICY + * This value specifies that the Hybrid GPU DSM subfunction PLATPOLICY + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_DISPLAYSTATUS + * This value specifies that the Hybrid GPU DSM subfunction DISPLAYSTATUS + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_MDTL + * This value specifies that the Hybrid GPU DSM subfunction MDTL + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_HCSMBLIST + * This value specifies that the Hybrid GPU DSM subfunction HCSMBLIST + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_HCSMBADDR + * This value specifies that the Hybrid GPU DSM subfunction HCSMBADDR + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_HCREADBYTE + * This value specifies that the Hybrid GPU DSM subfunction HCREADBYTE + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_HCSENDBYTE + * This value specifies that the Hybrid GPU DSM subfunction HCSENDBYTES + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_HCGETSTATUS + * This value specifies that the Hybrid GPU DSM subfunction HCGETSTATUS + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_HCTRIGDDC + * This value specifies that the Hybrid GPU DSM subfunction HCTRIGDDC + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_HCGETDDC + * This value specifies that the Hybrid GPU DSM subfunction HCGETDDC + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DCS + * This value specifies that the Hybrid GPU DCS API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_MXM_MXSS + * This value specifies that the DSM MXM subfunction MXSS + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_MXM_MXMI + * This value specifies that the DSM MXM subfunction MXMI + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_MXM_MXMS + * This value specifies that the DSM MXM subfunction MXMS + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_MXM_MXPP + * This value specifies that the DSM MXM subfunction MXPP + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_MXM_MXDP + * This value specifies that the DSM MXM subfunction MXDP + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_MXM_MDTL + * This value specifies that the DSM MXM subfunction MDTL + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_MXM_MXCB + * This value specifies that the DSM MXM subfunction MXCB + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GENERIC_CTL_REMAPFUNC + * This value specifies the DSM generic remapping should return function + * and subfunction when this API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GENERIC_HYBRIDCAPS + * This value specifies that the generic DSM subfunction HYBRIDCAPS + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GENERIC_POLICYSELECT + * This value specifies that the generic DSM subfunction POLICYSELECT + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GENERIC_PLATPOLICY + * This value specifies that the generic DSM subfunction PLATPOLICY + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GENERIC_DISPLAYSTATUS + * This value specifies that the generic DSM subfunction DISPLAYSTATUS + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GENERIC_MDTL + * This value specifies that the generic DSM subfunction MDTL + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GENERIC_GETOBJBYTYPE + * This value specifies that the generic DSM subfunction GETOBJBYTYPE + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GENERIC_GETALLOBJS + * This value specifies that the generic DSM subfunction GETALLOBJS + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GENERIC_GETEVENTLIST + * This value specifies that the generic DSM subfunction GETEVENTLIST + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GENERIC_GETBACKLIGHT + * This value specifies that the generic DSM subfunction GETBACKLIGHT + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GENERIC_CTL_TESTSUBFUNCENABLED + * This value specifies the testIfDsmSubFunctionEnabled test should + * be done for the func/subfunction when this API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GENERIC_CTL_GETSUPPORTEDFUNC + * This value specifies the list of supported generic dsm functions + * should be returned. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_NVOP_OPTIMUSCAPS + * This value specifies that the DSM NVOP subfunction OPTIMUSCAPS + * API is to be invoked. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_NVOP_OPTIMUSFLAG + * This value specifies that the DSM NVOP subfunction OPTIMUSFLAG + * API is to be invoked. This API will set a Flag in sbios to Indicate + * that HD Audio Controller is disable/Enabled from GPU Config space. + * This flag will be used by sbios to restore Audio state after resuming + * from s3/s4. + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_WMMX_NVOP_GPUON + * This value specifies that the WMMX (WMI-ACPI) GPON methods has to be invoked + * this call should happen below DPC level from any client. + * inData + * This parameter specifies the method-specific input buffer. Data is + * passed to the specified API using this buffer. For display related + * APIs the associated display mask can be found at a byte offset within + * the inData buffer using the following method-specific values: + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXMX_DISP_MASK_OFFSET + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXDS_DISP_MASK_OFFSET + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NVHG_MXMX_DISP_MASK_OFFSET + * NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DOS_DISP_MASK_OFFSET + * inDataSize + * This parameter specifies the size of the inData buffer in bytes. + * outStatus + * This parameter returns the status code from the associated ACPI call. + * outData + * This parameter specifies the method-specific output buffer. Data + * is returned by the specified API using this buffer. + * outDataSize + * This parameter specifies the size of the outData buffer in bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_SYSTEM_EXECUTE_ACPI_METHOD (0x730120U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_PARAMS { + NvU32 method; + NV_DECLARE_ALIGNED(NvP64 inData, 8); + NvU16 inDataSize; + NvU32 outStatus; + NV_DECLARE_ALIGNED(NvP64 outData, 8); + NvU16 outDataSize; +} NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_PARAMS; + + +/* valid method parameter values */ +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXMX (0x00000002U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXMX_DISP_MASK_OFFSET (0x00000001U) + +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_GPUON (0x00000003U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_GPUOFF (0x00000004U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_GPUSTA (0x00000005U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXDS (0x00000006U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NVHG_MXMX (0x00000007U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DOS (0x00000008U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_ROM (0x00000009U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DOD (0x0000000aU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_SUPPORT (0x0000000bU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_HYBRIDCAPS (0x0000000cU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_POLICYSELECT (0x0000000dU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_POWERCONTROL (0x0000000eU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_PLATPOLICY (0x0000000fU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_DISPLAYSTATUS (0x00000010U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_MDTL (0x00000011U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_HCSMBLIST (0x00000012U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_HCSMBADDR (0x00000013U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_HCREADBYTE (0x00000014U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_HCSENDBYTE (0x00000015U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_HCGETSTATUS (0x00000016U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_HCTRIGDDC (0x00000017U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_HCGETDDC (0x00000018U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DCS (0x00000019U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_MXM_MXSS (0x0000001aU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_MXM_MXMI (0x0000001bU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_MXM_MXMS (0x0000001cU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_MXM_MXPP (0x0000001dU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_MXM_MXDP (0x0000001eU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_MXM_MDTL (0x0000001fU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_MXM_MXCB (0x00000020U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_MXM_GETEVENTLIST (0x00000021U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GETMEMTABLE (0x00000022U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GETMEMCFG (0x00000023U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GETOBJBYTYPE (0x00000024U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GETALLOBJS (0x00000025U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GENERIC_CTL_REMAPFUNC (0x00000026U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GENERIC_DISPLAYSTATUS (0x0000002aU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GENERIC_MDTL (0x0000002bU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GENERIC_GETOBJBYTYPE (0x0000002cU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GENERIC_GETALLOBJS (0x0000002dU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GENERIC_GETEVENTLIST (0x0000002eU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GENERIC_GETBACKLIGHT (0x0000002fU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GENERIC_CTL_TESTSUBFUNCENABLED (0x00000030U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GENERIC_CTL_GETSUPPORTEDFUNC (0x00000031U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_NVOP_OPTIMUSCAPS (0x00000032U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_WMMX_NVOP_GPUON (0x00000033U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_NVOP_OPTIMUSFLAG (0x00000034U) + + +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GENERIC_GETCALLBACKS (0x00000036U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_NBCI_SUPPORTFUNCS (0x00000037U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_NBCI_PLATCAPS (0x00000038U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_NBCI_PLATPOLICY (0x00000039U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_GENERIC_MSTL (0x0000003aU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DSM_NVGPS_FUNC_SUPPORT (0x0000003bU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXDS (0x0000003cU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXDM (0x0000003dU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXID (0x0000003eU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_LRST (0x0000003fU) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DDC_EDID (0x00000040U) + +/* valid input buffer offset values */ +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_MXDS_DISP_MASK_OFFSET (0x00000004U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NVHG_MXMX_DISP_MASK_OFFSET (0x00000004U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DOS_DISP_MASK_OFFSET (0x00000004U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXDS_DISP_MASK_OFFSET (0x00000004U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXDM_DISP_MASK_OFFSET (0x00000004U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_MXID_DISP_MASK_OFFSET (0x00000004U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_NBCI_LRST_DISP_MASK_OFFSET (0x00000004U) +#define NV0073_CTRL_SYSTEM_EXECUTE_ACPI_METHOD_DDC_EDID_DISP_MASK_OFFSET (0x00000004U) + + + +/* +* NV0073_CTRL_SYSTEM_VRR_DISPLAY_INFO_PARAMS +* +* This command is used to update information about VRR capable monitors +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed.This parameter must specify a value between zero and the +* total number of subdevices within the parent device.This parameter +* should be set to zero for default behavior. +* +* displayId +* DisplayId of the panel for which client wants to add or remove from VRR +* capable monitor list +* +* bAddition +* When set to NV_TRUE, signifies that the vrr monitor is to be added. +* When set to NV_FALSE, signifies that the vrr monitor is to be removed. +* +*/ +#define NV0073_CTRL_CMD_SYSTEM_VRR_DISPLAY_INFO (0x73012cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_VRR_DISPLAY_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_VRR_DISPLAY_INFO_PARAMS_MESSAGE_ID (0x2CU) + +typedef struct NV0073_CTRL_SYSTEM_VRR_DISPLAY_INFO_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bAddition; +} NV0073_CTRL_SYSTEM_VRR_DISPLAY_INFO_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_UNPLUG_STATE + * + * This command can be used to retrieve hotplug and unplug state + * information that are currently recorded by the RM. This information is + * used by the client to determine which displays to detect after a + * hotplug event occurs. Or if the client knows that this device generates + * a hot plug/unplug signal on all connectors, then this can be used to call + * displays from detection. The displayIds on which hotplug/unplug has + * happened will be reported only ONCE to the client. That is if the call + * is done multiple times for the same event update, then for consequent + * calls the display mask will be reported as 0. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * flags + * This parameter specifies optional flags to be used while retrieving + * the hotplug state information. + * Here are the current defined fields: + * NV0073_CTRL_SYSTEM_GET_HOTPLUG_STATE_FLAGS_LID + * A client uses this field to determine the lid state. + * Possible values are: + * NV0073_CTRL_SYSTEM_GET_HOTPLUG_STATE_FLAGS_LID_OPEN + * The lid is open. + * NV0073_CTRL_SYSTEM_GET_HOTPLUG_STATE_FLAGS_LID_CLOSED + * The lid is closed. The client should remove devices a + * reported inside the + * NV0073_CTRL_SYSTEM_GET_CONNECT_POLICY_PARAMS.lidClosedMask. + * hotPlugMask + * This display mask specifies an NV0073_DISPLAY_MASK value describing + * the set of displays that have seen a hotplug. + * hotUnplugMask + * This display mask specifies an NV0073_DISPLAY_MASK value describing + * the set of displays that have seen a hot unplug + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_UNPLUG_STATE (0x73012dU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS_MESSAGE_ID (0x2DU) + +typedef struct NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS { + NvU32 subDeviceInstance; + NvU32 flags; + NvU32 hotPlugMask; + NvU32 hotUnplugMask; +} NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS; + +/* valid get hoplug state flags */ +#define NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_FLAGS_LID 0:0 +#define NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_FLAGS_LID_OPEN (0x00000000U) +#define NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_FLAGS_LID_CLOSED (0x00000001U) + +/* + * NV0073_CTRL_CMD_SYSTEM_CLEAR_ELV_BLOCK + * + * This command instructs the RM to explicitly clear any + * ELV block. Clients should call this before attempting core-channel + * updates when in VRR one-shot mode. ELV block mode will be + * properly restored to its appropriate setting based on the stall-lock + * in Supervisor3 after the core channel update + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * + * displayId + * The public ID of the Output Display which is to be used for VRR. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ + +#define NV0073_CTRL_CMD_SYSTEM_CLEAR_ELV_BLOCK (0x73012eU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_CLEAR_ELV_BLOCK_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_CLEAR_ELV_BLOCK_PARAMS_MESSAGE_ID (0x2EU) + +typedef struct NV0073_CTRL_SYSTEM_CLEAR_ELV_BLOCK_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; +} NV0073_CTRL_SYSTEM_CLEAR_ELV_BLOCK_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR + * + * This command arms the display modeset supervisor to operate in + * a lightweight mode. By calling this, the client is implicitly + * promising not to make any changes in the next modeset that require + * the full supervisor. After SV3, the LWSV will disarm and any subsequent + * modesets will revert to full supervisors. This must be called separately + * for every display that will be part of the modeset. + * It is recommended that the client explicitly disarm the lightweight + * supervisor after every modeset as null modesets will not trigger the + * supervisor interrupts and the RM will not be able to disarm automatically + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * + * displayId + * The public ID of the Output Display which is to be used for VRR. + * + * bArmLWSV + * If this is set to NV_TRUE, the RM will arm the lightweight supervisor + * for the next modeset. + * If this is set to NV_FALSE, the RM will disarm the lightweight supervisor + * + * bVrrState + * VRR state to be changed. + * + * vActive + * GPU-SRC vertical active value + * + * vfp + * GPU-SRC vertical front porch + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ + +#define NV0073_CTRL_CMD_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR (0x73012fU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS_MESSAGE_ID (0x2FU) + +typedef struct NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bArmLWSV; + NvBool bVrrState; + NvU32 vActive; + NvU32 vfp; +} NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS; + + + +/* +* NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS +* +* This command is used to configure pstate switch parameters on VRR monitors +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed.This parameter must specify a value between zero and the +* total number of subdevices within the parent device.This parameter +* should be set to zero for default behavior. +* +* displayId +* DisplayId of the monitor being vrr configured +* +* bVrrState +* When set to NV_TRUE, signifies that the vrr is about to become active. +* When set to NV_FALSE, signifies that the vrr is about to become suspended. +* +* bVrrDirty +* When set to NV_TRUE, indicates that vrr configuration has been changed +* When set to NV_FALSE, this will indicate transitions from One shot mode to +* Continuous mode and vice versa +* +* bVrrEnabled +* When set to NV_TRUE, indicates that vrr has been enabled, i.e. vBp extended by 2 lines +* +* maxVblankExtension +* When VRR is enabled, this is the maximum amount of lines that the vblank can be extended. +* Only updated when bVrrDirty = true +* +* internalVRRHeadVblankStretch +* When VRR is enabled, this is the maximum amount of lines that the vblank can be extended. +* On NVSR and DD panels . Only updated when bVrrDirty = true +* +* minVblankExtension +* When VRR is enabled, this is the minimum amount of lines that should be present in the Vblank. The purpose is to cap the maximum refresh (currently only for HDMI 2.1 VRR compliance) +*/ +#define NV0073_CTRL_CMD_SYSTEM_CONFIG_VRR_PSTATE_SWITCH (0x730134U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS_MESSAGE_ID (0x34U) + +typedef struct NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bVrrState; + NvBool bVrrDirty; + NvBool bVrrEnabled; + NvU32 maxVblankExtension; + NvU32 internalVRRHeadVblankStretch; + NvU32 minVblankExtension; +} NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS; + + + +/* + * NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX + * + * This command is used to query the display mask of all displays + * that support dynamic display MUX. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayMask (out) + * Mask of all displays that support dynamic display MUX + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX (0x73013dU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS_MESSAGE_ID (0x3DU) + +typedef struct NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS { + NvU32 subDeviceInstance; + NvU32 muxDisplayMask; +} NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS; + + + +/* + * NV0073_CTRL_CMD_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH + * + * This command allocates a specified amount of ISO memory bandwidth for + * display. If the requested amount of bandwidth cannot be allocated (either + * because it exceeds the total bandwidth available to the system, or because + * too much bandwidth is already allocated to other clients), the call will + * fail and NV_ERR_INSUFFICIENT_RESOURCES will be returned. + * + * If bandwidth has already been allocated via a prior call, and a new + * allocation is requested, the new allocation will replace the old one. (If + * the new allocation fails, the old allocation remains in effect.) + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * averageBandwidthKBPS + * This parameter specifies the amount of ISO memory bandwidth requested. + * floorBandwidthKBPS + * This parameter specifies the minimum required (i.e., floor) dramclk + * frequency, multiplied by the width of the pipe over which the display + * data will travel. (It is understood that the bandwidth calculated by + * multiplying the clock frequency by the pipe width will not be + * realistically achievable, due to overhead in the memory subsystem. The + * API will not actually use the bandwidth value, except to reverse the + * calculation to get the required dramclk frequency.) + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INSUFFICIENT_RESOURCES + * NV_ERR_NOT_SUPPORTED + * NV_ERR_GENERIC + */ + +/* + * NV0073_CTRL_CMD_SYSTEM_INTERNAL_ALLOCATE_DISPLAY_BANDWIDTH + * + * This command is identical to + * NV0073_CTRL_CMD_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH, except that it routes to + * Physical RM, and is for internal RM use. Clients are advised to use + * NV0073_CTRL_CMD_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH instead. + */ + +#define NV0073_CTRL_CMD_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH (0x730143U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS_MESSAGE_ID" */ +#define NV0073_CTRL_CMD_SYSTEM_INTERNAL_ALLOCATE_DISPLAY_BANDWIDTH (0x730157U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_INTERNAL_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS_TYPE { + NvU32 subDeviceInstance; + NvU32 averageBandwidthKBPS; + NvU32 floorBandwidthKBPS; +} NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS_TYPE; + +#define NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS_MESSAGE_ID (0x43U) + +typedef NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS_TYPE NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS; +#define NV0073_CTRL_SYSTEM_INTERNAL_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS_MESSAGE_ID (0x57U) + +typedef NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS_TYPE NV0073_CTRL_SYSTEM_INTERNAL_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS; + +/* + * NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS + * + * This structure represents the hotplug event config control parameters. + * + * subDeviceInstance + * This parameter should specify the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * + * deviceMapFilter + * This parameter returns (in GET) or should specify (in SET) a device map + * indicating device(s) to sense. + * + * hotPluginSense + * This parameter returns (in GET) or should specify (in SET) a device map + * indicating device(s) plugged in that caused the most recent hotplug + * event. + * + * hotUnplugSense + * This parameter returns (in GET) or should specify (in SET) a device map + * indicating device(s) un plugged that caused the most recent hotplug + * event. + */ + +typedef struct NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS { + NvU32 subDeviceInstance; + NvU32 deviceMapFilter; + NvU32 hotPluginSense; + NvU32 hotUnplugSense; +} NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_EVENT_CONFIG + * + * This command fetches the hotplug event configuration. + * + * See @ref NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS for documentation on + * the parameters. + */ + +#define NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_EVENT_CONFIG (0x730144U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_HOTPLUG_EVENT_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_HOTPLUG_EVENT_CONFIG_PARAMS_MESSAGE_ID (0x44U) + +typedef NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS NV0073_CTRL_SYSTEM_GET_HOTPLUG_EVENT_CONFIG_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_SET_HOTPLUG_EVENT_CONFIG + * + * This command sets the hotplug event configuration. + * + * See @ref NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS for documentation on + * the parameters. + */ + +#define NV0073_CTRL_CMD_SYSTEM_SET_HOTPLUG_EVENT_CONFIG (0x730145U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_SET_HOTPLUG_EVENT_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_SET_HOTPLUG_EVENT_CONFIG_PARAMS_MESSAGE_ID (0x45U) + +typedef NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS NV0073_CTRL_SYSTEM_SET_HOTPLUG_EVENT_CONFIG_PARAMS; + + + +/* +* NV0073_CTRL_CMD_SYSTEM_RECORD_CHANNEL_REGS +* +* This command is used to read Core channel, Cursor channel, Window channel, and Head register values and encode these values with ProtoDmp. +* +* subDeviceInstance (in) +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. +* headMask (in) +* Head mask representing which register values should be encoded +* windowMask (in) +* Window channel mask whose register values should be encoded +* bRecordCoreChannel (in) +* Indicates whether or not to encode core channel register values +* bRecordCursorChannel (in) +* Indicates whether or not to encode cursor channel register values +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT +* NV_ERR_NOT_SUPPORTED +*/ +#define NV0073_CTRL_CMD_SYSTEM_RECORD_CHANNEL_REGS (0x73014aU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SYSTEM_RECORD_CHANNEL_REGS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SYSTEM_RECORD_CHANNEL_REGS_PARAMS_MESSAGE_ID (0x4AU) + +typedef struct NV0073_CTRL_CMD_SYSTEM_RECORD_CHANNEL_REGS_PARAMS { + NvU32 subDeviceInstance; + NvU32 headMask; + NvU32 windowMask; + NvBool bRecordCoreChannel; + NvBool bRecordCursorChannel; +} NV0073_CTRL_CMD_SYSTEM_RECORD_CHANNEL_REGS_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT + * + * This command is used to query the display mux status for the given + * display device + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT (0x73014bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS_MESSAGE_ID (0x4BU) + +typedef struct NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS { + NvU32 subDeviceInstance; + NvBool bIsSidebandI2cSupported; +} NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_SR_SUPPORT + * + * This command is used to query if SIDEBAND SR can be used with the + * given display device. If PSR API is supported on the system, + * then sideband SR support is set to false. + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation + * should be directed. + * displayId (in) + * This parameter inputs the displayId of the active display. A value + * of zero indicates no display is active. + * bIsSidebandSrSupported + * If it is true, it means that sideband is supported and not PSR API. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_SR_SUPPORT (0x73014cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_SR_SUPPORT_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_SR_SUPPORT_PARAMS_MESSAGE_ID (0x4CU) + +typedef struct NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_SR_SUPPORT_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bIsSidebandSrSupported; +} NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_SR_SUPPORT_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_VRR_SET_RGLINE_ACTIVE + * + * This command is used by client like nvkms to set up the VRR specific + * memory operation in RM such as mapping the client created shared memory + * into RM and reserving a RGline for processing of self-refresh timeout + * related calculations. + * + * Also the expectation is that the client which calls this command with parameter + * bEnable = TRUE, should also call this command with bEnable = FALSE on the + * same head when VRR needs to be disabled. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_GENERIC + */ + +/* + * This is the shared structure that will be used to communicate between + * Physical RM and clients. As of now the access relies on single source of + * truth operation, i.e. only Physical RM writes into the shared location + * and client (nvkms) reads from the same location. + * + * "dataTimeStamp" field is added to capture the timestamp before and after + * updating the flip delay related data fields(all fields except "timeout"). + * This timestamp will be used by clients to determine if the data got updated + * in between by RM while clients were reading it. + * As of now "timeout" field does not have such protection, as access to + * this field is only in response to notification from RM. + */ +typedef struct NV0073_CTRL_RM_VRR_SHARED_DATA { + NvU32 expectedFrameNum; + NvU32 timeout; + NV_DECLARE_ALIGNED(NvU64 flipTimeStamp, 8); + NvBool bCheckFlipTime; + NvBool bFlipTimeAdjustment; + NV_DECLARE_ALIGNED(NvU64 dataTimeStamp, 8); +} NV0073_CTRL_RM_VRR_SHARED_DATA; + +#define NV0073_CTRL_CMD_SYSTEM_VRR_SET_RGLINE_ACTIVE (0x73014dU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SYSTEM_VRR_SET_RGLINE_ACTIVE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SYSTEM_VRR_SET_RGLINE_ACTIVE_PARAMS_MESSAGE_ID (0x4DU) + +typedef struct NV0073_CTRL_CMD_SYSTEM_VRR_SET_RGLINE_ACTIVE_PARAMS { + NvU32 subDeviceInstance; + NvBool bEnable; + NvU32 head; + NvU32 height; + NvU32 maxFrameTime; + NvU32 minFrameTime; + NvHandle hMemory; +} NV0073_CTRL_CMD_SYSTEM_VRR_SET_RGLINE_ACTIVE_PARAMS; + +/* + * Maps the memory allocated in Kernel RM into Physical RM using the + * memory descriptor information provided. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +typedef struct NV0073_CTRL_SHARED_MEMDESC_INFO { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NV_DECLARE_ALIGNED(NvU64 alignment, 8); + NvU32 addressSpace; + NvU32 cpuCacheAttrib; +} NV0073_CTRL_SHARED_MEMDESC_INFO; + +#define NV0073_CTRL_CMD_SYSTEM_MAP_SHARED_DATA (0x730151U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SYSTEM_MAP_SHARED_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SYSTEM_MAP_SHARED_DATA_PARAMS_MESSAGE_ID (0x51U) + +typedef struct NV0073_CTRL_CMD_SYSTEM_MAP_SHARED_DATA_PARAMS { + NV_DECLARE_ALIGNED(NV0073_CTRL_SHARED_MEMDESC_INFO memDescInfo, 8); + NvU32 subDeviceInstance; + NvBool bMap; +} NV0073_CTRL_CMD_SYSTEM_MAP_SHARED_DATA_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_LOADV_COUNTER_INFO + * + * Fetches the LoadV Counter information from corresponding registers. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * head + * headId of the panel for which we are going to read loadv info + * Possible status values returned are: + * counterValue + * Counts number of frames that have been procesed or synchronized with display + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_SYSTEM_GET_LOADV_COUNTER_INFO (0x730154U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SYSTEM_GET_LOADV_COUNTER_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SYSTEM_GET_LOADV_COUNTER_INFO_PARAMS_MESSAGE_ID (0x54U) + +typedef struct NV0073_CTRL_CMD_SYSTEM_GET_LOADV_COUNTER_INFO_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 counterValue; +} NV0073_CTRL_CMD_SYSTEM_GET_LOADV_COUNTER_INFO_PARAMS; + +/*! + * @brief Defines Display Low Power feature IDs + * + * Following defines specifies unique IDs to identify Display Power saving feature. + */ +#define NV0073_CTRL_DISP_LPWR_FEATURE_ID_INVALID 0x0000 +#define NV0073_CTRL_DISP_LPWR_FEATURE_ID_ALPM 0x0001 +#define NV0073_CTRL_DISP_LPWR_FEATURE_ID_CLK_SWITCH_HUBCLK 0x0002 +#define NV0073_CTRL_DISP_LPWR_FEATURE_ID_CLK_SWITCH_RISCV0CLK 0x0003 +#define NV0073_CTRL_DISP_LPWR_FEATURE_ID_CLK_SWITCH_DISPCLK 0x0004 +#define NV0073_CTRL_DISP_LPWR_FEATURE_ID_CLK_SWITCH_POSTRG_CLKS 0x0005 +#define NV0073_CTRL_DISP_LPWR_FEATURE_ID_CLK_GATING_HUBCLK 0x0006 +#define NV0073_CTRL_DISP_LPWR_FEATURE_ID_CLK_GATING_DISPCLK 0x0007 +#define NV0073_CTRL_DISP_LPWR_FEATURE_ID_CLK_GATING_POSTRG_CLKS 0x0008 +#define NV0073_CTRL_DISP_LPWR_FEATURE_ID_MSCG 0x0009 + +// Parameter/characteristics of Display ALPM +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_ALPM_INVALID 0x0000 +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_ALPM_SUPPORTED 0x0001 +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_ALPM_ENABLED 0x0002 +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_ALPM_TYPE_AUX_LESS 0x0003 +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_ALPM_ENGAGE_TIME 0x0004 +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_ALPM_ENTRY_COUNT 0x0005 +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_ALPM_EXIT_COUNT 0x0006 + +/*! + * @brief Parameter/characteristics of hubclk, dispclk, riscv0clk and Post-RG clock Switching + * + * Following are the Parameter/characteristics for of hubclk, dispclk, riscv0clk and + * Post-RG clock Switching + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_CLK_SWITCH_INVALID (0x0000) + +/*! + * Property specifies if Clock Switching is supported + * or not. This property is applicable for hubclk, dispclk, riscv0clk and Post-RG clk. + * (This property allows Get operation) + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_CLK_SWITCH_SUPPORT (0x0001) + + /*! + * Property specifies if Clock Switching is enabled or not. + * This property is applicable for hubclk, dispclk, riscv0clk and Post-RG clk. + * (This property allows Get and Set operation) + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_CLK_SWITCH_ENABLED (0x0002) + + /*! + * Property specifies the time(us) for which the specified clock was in Safe mode. + * This property is applicable for hubclk, dispclk, riscv0clk and Post-RG clk + * (This property allows Get operation) + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_CLK_SWITCH_SAFE_TIME_US (0x0003) + + /*! + * Property specifies the time(us) for which the specified clock was in Alternate mode. + * This property is only applicable to riscv0clk. + * (This property allows Get operation) + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_CLK_SWITCH_ALTERNATE_TIME_US (0x0004) + + /*! + * Property specifies if the specified clock is forced to Function mode or not. + * This property is applicable for hubclk, dispclk, riscv0clk and Post-RG clk. + * (This property allows Get and Set operation) + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_CLK_SWITCH_FORCE_FUNCTION (0x0005) + + /*! + * Property specifies if there was an error when the specified clock is being switched + * to safe mode but switch didn't happen in programmed time. + * This property is applicable for hubclk, dispclk, riscv0clk and Post-RG clk. + * (This property allows Get operation) + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_CLK_SWITCH_SAFE_ERROR (0x0006) + + /*! + * Property specifies if there was an error when the specified clock is being switched + * to function mode but switch didn't happen in programmed time. + * This property is applicable for hubclk, dispclk, riscv0clk and Post-RG clk. + * (This property allows Get operation) + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_CLK_SWITCH_UNSAFE_ERROR (0x0007) + + /*! + * Property specifies if there was an error when Riscv0clk clock is being switched + * to alternate mode but switch didn't happen in programmed time. + * This property is only applicable to riscv0clk. + * (This property allows Get operation) + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_CLK_SWITCH_ALTER_ERROR (0x0008) + + /*! + * Property specifies if there was an error when the Riscv0clk clock is being switched to + * function mode but switch didn't happen in programmed time. + * This property is only applicable to riscv0clk. + * (This property allows Get operation) + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_CLK_SWITCH_UNALTER_ERROR (0x0009) + + /*! + * Property specifies current state of the specified clock + * i.e. Safe or Function or Alternate + * (This property allows Get operation) + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_CLK_SWITCH_STATUS (0x0010) + +/*! + * @brief Parameter/characteristics of hubclk, dispclk and Post-RG clock Gating + * + * Following are the Parameter/characteristics for of hubclk, dispclk and Post-RG + * Clock Gating + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_CLK_GATING_INVALID (0x0000) + +/*! + * Property specifies if Clock Gating is supported + * or not. This property is applicable for hubclk, dispclk and Post-RG clk. + * (This property allows Get operation) + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_CLK_GATING_SUPPORT (0x0001) +/*! + * Property specifies if Clock Gating is enabled or not. + * This property is applicable for hubclk, dispclk and Post-RG clk. + * (This property allows Get and Set operation) + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_CLK_GATING_ENABLED (0x0002) + +/*! + * Property specifies the time(us) for which the specified clock was gated. + * This property is applicable for hubclk, dispclk and Post-RG clk + * (This property allows Get operation) + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_CLK_GATING_GATE_TIME_US (0x0003) + + /*! + * Property specifies current state of the specified clock + * i.e. Gated ot not Gated + * (This property allows Get operation) + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_CLK_GATING_STATUS (0x0004) + +/*! + * @brief Parameter/characteristics of MSCG + * + * Following are the Parameter/characteristics for MSCG + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_MSCG_INVALID (0x0000) + +/*! + * Property specifies if DPS1 is supported + * (This property allows Get operation) + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_MSCG_DPS1_SUPPORT (0x0001) + +/*! + * Property specifies if DPS2 is supported + * (This property allows Get operation) + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_MSCG_DPS2_SUPPORT (0x0002) + +/*! + * Property specifies if MSCG is enabled + * (This property allows Get/SET operation) + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_MSCG_ENABLED (0x0003) + +/*! + * Property specifies the time(in US) for which DPS1 was enabled + * in ACTIVE region + * (This property allows Get operation) + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_MSCG_DPS1_ACTIVE_TIME_US (0x0004) + +/*! + * Property specifies the time(in US) for which DPS1 was enabled + * in VBLANK region + * (This property allows Get operation) + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_MSCG_DPS1_VBLANK_TIME_US (0x0005) + +/*! + * Property specifies the time(in US) for which DPS2 was enabled + * in ACTIVE region + * (This property allows Get operation) + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_MSCG_DPS2_ACTIVE_TIME_US (0x0006) + +/*! + * Property specifies the time(in US) for which DPS2 was enabled + * in VBLANK region + * (This property allows Get operation) + */ +#define NV0073_CTRL_DISP_LPWR_PARAMETER_ID_MSCG_DPS2_VBLANK_TIME_US (0x0007) + +/*! + * @brief Structure to identify display low power feature + * + * Structure to get/set feature Id, It has two fields + * FeatureID[In] : Feature Identifier + * SubFeatureID[In] : If Any Subfeature associated to Feature + * + * In general, Power saving feature is identify by featureId and SubFeature. + * Add enum in this structure in case some specific power feature needs + * additional fields. "Union" should follow XAPI standards. + */ +typedef struct NV0073_CTRL_DISP_LPWR_FEATURE { + NvU16 featureId; + NvU16 subfeatureId; +} NV0073_CTRL_DISP_LPWR_FEATURE; + +/*! + * @brief Parameter structure + * + * Structure to get/set parameter/characteristic. Each parameter has 3 field + * 1) ID [In] : Parameter Identifier + * 2) Flag [In/Out] : Flags + * 3) Value [In/Out] : Value of parameter + * + * Add enum in this structure in case we need to additional fields for some + * special parameters. + */ +typedef struct NV0073_CTRL_DISP_LPWR_PARAMETER { + NvU16 paramId; + NvU16 flag; + NvU32 val; +} NV0073_CTRL_DISP_LPWR_PARAMETER; + +/*! + * @brief Flags for PARAMETER + * + * SUCCEED: + * - Get/Set param call is succeed or not. + * - Get Param call for given parameter succeed means RMCtrl retrieved valid + * value for this parameter. + * - Set Param call for given parameter succeed means RMCtrl set value of this + * parameter. + * + * BLOCKING: + * - Defines whether RM Ctrl call is blocking/non-blocking for given parameter. + */ + +#define NV0073_CTRL_DISP_LPWR_FEATURE_PARAMETER_FLAG_SUCCEED 0:0 +#define NV0073_CTRL_DISP_LPWR_FEATURE_PARAMETER_FLAG_SUCCEED_NO 0x0 +#define NV0073_CTRL_DISP_LPWR_FEATURE_PARAMETER_FLAG_SUCCEED_YES 0x1 + +/*! + * @brief Defines all information required to get/set the parameter for given + * display low power feature. + */ +typedef struct NV0073_CTRL_DISP_LPWR_FEATURE_PARAMETER { + NV0073_CTRL_DISP_LPWR_FEATURE feature; + NV0073_CTRL_DISP_LPWR_PARAMETER param; +} NV0073_CTRL_DISP_LPWR_FEATURE_PARAMETER; + +// Max size of FEATURE_PARAMETER structure for RMCtrl NV0073_CTRL_DISP_LPWR_GET/SET +#define NV0073_CTRL_DISP_LPWR_FEATURE_PARAMETER_LIST_MAX_SIZE 64 + +/* + * NV0073_CTRL_CMD_DISP_LPWR_FEATURE_PARAMETER_GET + * + * This command retrieves parameters/characteristics of power features. It can + * query NV0073_CTRL_DISP_LPWR_FEATURE_PARAMETER_LIST_MAX_SIZE number of parameters + * in one call. Command provides facility of collecting information on multiple + * power saving features in one call. + * + * Commands returns SUCCESS only when it successfully retrieves value all + * parameter in the list. + * + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * + * displayId + * DisplayId of the panel for which we are going to low power features data + * Possible status values returned are: + * + * listSize + * Number of valid entries in list. + * + * list + * List of parameters. Refer NV0073_CTRL_DISP_LPWR_FEATURE_PARAMETER to get + * details about each entry in the list. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_DISP_LPWR_FEATURE_PARAMETER_GET (0x730155) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_DISP_LPWR_FEATURE_PARAMETER_GET_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DISP_LPWR_FEATURE_PARAMETER_GET_PARAMS_MESSAGE_ID (0x55U) + +typedef struct NV0073_CTRL_DISP_LPWR_FEATURE_PARAMETER_GET_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 listSize; + NV0073_CTRL_DISP_LPWR_FEATURE_PARAMETER list[NV0073_CTRL_DISP_LPWR_FEATURE_PARAMETER_LIST_MAX_SIZE]; +} NV0073_CTRL_DISP_LPWR_FEATURE_PARAMETER_GET_PARAMS; + +/* + * NV0073_CTRL_CMD_DISP_LPWR_FEATURE_PARAMETER_SET + * + * This command sets parameters/characteristics of power features. It can + * set NV0073_CTRL_DISP_LPWR_FEATURE_PARAMETER_LIST_MAX_SIZE number of parameters + * in one call. Command provides facility of setting parameters for multiple + * power saving features in one call. + * + * Commands returns SUCCESS only when it successfully sets value of all + * parameter in the list. + * + * listSize + * Number of valid entries in list. + * + * list + * List of parameters. Refer NV0073_CTRL_DISP_LPWR_FEATURE_PARAMETER to get + * details about each entry in the list. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_DISP_LPWR_FEATURE_PARAMETER_SET (0x730156) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_DISP_LPWR_FEATURE_PARAMETER_SET_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DISP_LPWR_FEATURE_PARAMETER_SET_PARAMS_MESSAGE_ID (0x56U) + +typedef struct NV0073_CTRL_DISP_LPWR_FEATURE_PARAMETER_SET_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 listSize; + NV0073_CTRL_DISP_LPWR_FEATURE_PARAMETER list[NV0073_CTRL_DISP_LPWR_FEATURE_PARAMETER_LIST_MAX_SIZE]; +} NV0073_CTRL_DISP_LPWR_FEATURE_PARAMETER_SET_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_NOTIFY_DRR_MSCG_WAR + * + * This command is used to Notify RM about DRR feature. RM uses this + * notification to account MSCG WARs for Turing and Ampere HW bugs. + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation + * should be directed. + * presentDurationUs (in) + * This parameter inputs the presentDurationUs of the active display. + * bEnableDrr + * If it is true, it means that DRR is enabled from DD side. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_SYSTEM_NOTIFY_DRR_MSCG_WAR (0x730159U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SYSTEM_NOTIFY_DRR_MSCG_WAR_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SYSTEM_NOTIFY_DRR_MSCG_WAR_PARAMS_MESSAGE_ID (0x59U) + +typedef struct NV0073_CTRL_CMD_SYSTEM_NOTIFY_DRR_MSCG_WAR_PARAMS { + NvU32 subDeviceInstance; + NvU32 presentDurationUs; + NvBool bEnableDrr; +} NV0073_CTRL_CMD_SYSTEM_NOTIFY_DRR_MSCG_WAR_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_SET_DISPLAY_PERF_LIMIT + * + * This command sets lower and/or upper bounds for a display clock (dispclk or + * hubclk), or for the memory perf level. When this API is called, the system + * will immediately attempt to switch the clock or perf level to a value that + * meets the specified condition(s). + * + * If no lower limit is desired, the "min" input should be set to zero. + * + * If no upper limit is desired for a clock, the "max" input should be set to + * NV_U32_MAX. + * + * This API does not allow an upper limit to be specified for perf level. + * + * Any perf limit set through this API will remain in effect until it updated + * or cancelled by a subsequent call to this API. A perf limit may be + * cancelled by setting the "min" value to NV_U32_MIN (0) and the "max" value + * (for clocks) to NV_U32_MAX. Only one perf limit may be in effect for a + * given clientUsageId and type at any given time. + * + * At any given time, multiple perf limits (with different clientUsageIds) may + * be in effect for a given clock or perf level, and that clock or perf level + * will be set to a value that meets the requirements of all active perf + * limits. If there is a conflict between perf limits, the conflict will be + * resolved by the perf limit(s) with the higher priority. + * + * For example, suppose the API is called with the following parameters: + * clientUsageId = NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_ID_MODS + * type = NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_TYPE_DISPCLK + * data.clock.minFrequencyKHz = 800000 + * data.clock.maxFrequencyKHz = 800000 + * This will set dispclk to 800 MHz (or possibly a slightly higher frequency, + * if the clock dividers do not allow 800 MHz to be set exactly). + * + * Then suppose this call is made: + * clientUsageId = NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_ID_ISMODEPOSSIBLE + * type = NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_TYPE_DISPCLK + * data.clock.minFrequencyKHz = 1000000 + * data.clock.maxFrequencyKHz = 0xFFFFFFFF + * After this call, dispclk will remain set to 800 MHz, because, although the + * min frequency was requested to be at least 1 GHz, this would conflict with + * the "maxFrequencyKHz = 800000" value set in the previous call, which is + * still in effect. The previous call takes priority because + * NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_ID_MODS has higher priority than + * NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_ID_ISMODEPOSSIBLE. + * + * Then suppose this call is made: + * clientUsageId = NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_ID_MODS + * type = NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_TYPE_DISPCLK + * data.clock.minFrequencyKHz = 0 + * data.clock.maxFrequencyKHz = 0xFFFFFFFF + * This removes the NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_ID_MODS perf + * limit. At this point, dispclk will be set to 1 GHz, in accordance with the + * NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_ID_ISMODEPOSSIBLE perf limit, + * which is still in effect. (The remaining perf limit allows the clock to be + * higher than 1 GHz, but in practice, the clock will generally be set to the + * lowest frequency that meets the perf limit requirement, to save power. For + * perf level, perf monitors (which do not use the perf limit mechanism) may + * force a higher value in order to meet performance needs.) + * + * This API takes an array of perf limit structures, so multiple perf limits + * maybe set within the same call. + * + * This API is primarily intended for use on SOC products, where display is + * separate from the GPU. On dGPU products, this API may not be supported; + * instead, NV2080_CTRL_CMD_PERF_LIMITS_SET_STATUS_V2 may be used to set perf + * limits. + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * + * numLimits (in) + * This is the number of perf limits with lower and/or upper limits to + * apply. + * + * bWaitForCompletion (in) + * It is possible that a perf change or clock change may take some time to + * execute. If this flag is set, the API will wait for all of the changes + * to complete before returning. (However, it will not wait for completion + * of any operation that is blocked by a higher priority perf limit.) + * + * clientUsageId (in) + * This is a NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_ID_xxx value + * indicating who the client is, and/or the purpose of the perf limit. It + * is used to establish priority between conflicting perf limits. + * + * whatIsToBeLimited (in) + * This is a NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_TYPE_xxx value + * indicating which clock, or memory perf level, is to have limits applied. + * + * minLevelOrFreqKHz (in) + * maxLevelOrFreqKHz (in) + * If type is + * NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_TYPE_PERF_LEVEL, then + * minLevelOrFreqKHz specifies the zero-based index of the minimum perf + * level to allow. maxLevelOrFreqKHz is not used. + * + * If type specifies a clock + * (NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_TYPE_DISPCLK or + * NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_TYPE_HUBCLK), then + * minLevelOrFreqKHz and maxLevelOrFreqKHz specify the lower and upper + * limits (respectively) for the specified clock's frequency. + */ +#define NV0073_CTRL_CMD_SYSTEM_SET_DISPLAY_PERF_LIMIT (0x73015aU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_PARAMS_MESSAGE_ID" */ + +/* valid clientUsageId values */ +#define NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_ID_ISMODEPOSSIBLE (0U) +#define NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_ID_MCLK_SWITCH (1U) +#define NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_ID_MODS (2U) + +/* valid type values */ +#define NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_LIMITING_PERF_LEVEL (0U) +#define NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_LIMITING_DISPCLK (1U) +#define NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_LIMITING_HUBCLK (2U) + +/* Define a structure for a single perf limit */ +typedef struct NV0073_CTRL_SYSTEM_DISPLAY_PERF_LIMIT { + NvU8 clientUsageId; + NvU8 whatIsToBeLimited; + NvU32 minLevelOrFreqKHz; + NvU32 maxLevelOrFreqKHz; +} NV0073_CTRL_SYSTEM_DISPLAY_PERF_LIMIT; + +#define NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_PARAMS_MESSAGE_ID (0x5AU) + +typedef struct NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_PARAMS { + NvU32 subDeviceInstance; + NvU32 numLimits; + NvBool bWaitForCompletion; + NV_DECLARE_ALIGNED(NvP64 limits, 8); +} NV0073_CTRL_SYSTEM_SET_DISPLAY_PERF_LIMIT_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_CRASH_LOCK_COUNTER_INFO + * + * Fetches the Crash Lock Counter information from corresponding register. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * head + * HeadId of the panel for which we are going to read crash lock counter info + * Possible status values returned are: + * counterValueV + * Counts number of vertical crashlock events that have occured with this display + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_SYSTEM_GET_CRASH_LOCK_COUNTER_INFO (0x730160U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SYSTEM_GET_CRASH_LOCK_COUNTER_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SYSTEM_GET_CRASH_LOCK_COUNTER_INFO_PARAMS_MESSAGE_ID (0x60U) + +typedef struct NV0073_CTRL_CMD_SYSTEM_GET_CRASH_LOCK_COUNTER_INFO_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 counterValueV; +} NV0073_CTRL_CMD_SYSTEM_GET_CRASH_LOCK_COUNTER_INFO_PARAMS; + +/* _ctrl0073system_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0076.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0076.h new file mode 100644 index 0000000..0387fc9 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0076.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0076.finn +// + + + +/* + * NV0076_CTRL_NOTIFY_CONSOLE_DISABLED + * + * This command signals to the resource manager that the operating system's + * legacy framebuffer console was disabled and that the underlying BAR mapping + * can be freed. + */ +#define NV0076_CTRL_CMD_NOTIFY_CONSOLE_DISABLED (0x760101) /* finn: Evaluated from "(FINN_NV01_FRAMEBUFFER_CONSOLE_INTERFACE_ID << 8) | 1" */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080.h new file mode 100644 index 0000000..3804556 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080.h @@ -0,0 +1,49 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080.finn +// + + + + +#include "ctrl0080/ctrl0080bif.h" +#include "ctrl0080/ctrl0080gpu.h" +#include "ctrl0080/ctrl0080dma.h" +#include "ctrl0080/ctrl0080gr.h" +#include "ctrl0080/ctrl0080cipher.h" +#include "ctrl0080/ctrl0080fb.h" +#include "ctrl0080/ctrl0080fifo.h" +#include "ctrl0080/ctrl0080host.h" + + +#include "ctrl0080/ctrl0080perf.h" +#include "ctrl0080/ctrl0080msenc.h" +#include "ctrl0080/ctrl0080bsp.h" +#include "ctrl0080/ctrl0080nvjpg.h" +#include "ctrl0080/ctrl0080unix.h" +#include "ctrl0080/ctrl0080internal.h" diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h new file mode 100644 index 0000000..7813add --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h @@ -0,0 +1,73 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080base.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NV01_DEVICE_XX/NV03_DEVICE control commands and parameters */ + +#define NV0080_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x0080, NV0080_CTRL_##cat, idx) + +/* GPU device command categories (6bits) */ +#define NV0080_CTRL_RESERVED (0x00) +#define NV0080_CTRL_BIF (0x01) +#define NV0080_CTRL_GPU (0x02) +#define NV0080_CTRL_CLK (0x10) +#define NV0080_CTRL_GR (0x11) +#define NV0080_CTRL_CIPHER (0x12) +#define NV0080_CTRL_FB (0x13) +#define NV0080_CTRL_HOST (0x14) +#define NV0080_CTRL_VIDEO (0x15) +#define NV0080_CTRL_FIFO (0x17) +#define NV0080_CTRL_DMA (0x18) +#define NV0080_CTRL_PERF (0x19) +#define NV0080_CTRL_PERF_LEGACY_NON_PRIVILEGED (0x99) /* finn: Evaluated from "(NV0080_CTRL_PERF | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV0080_CTRL_MSENC (0x1B) +#define NV0080_CTRL_BSP (0x1C) +#define NV0080_CTRL_RC (0x1D) +#define NV0080_CTRL_OS_UNIX (0x1E) +#define NV0080_CTRL_NVJPG (0x1F) +#define NV0080_CTRL_INTERNAL (0x20) +#define NV0080_CTRL_NVLINK (0x21) + +/* + * NV0080_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0080_CTRL_CMD_NULL (0x800000) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* _ctrl0080base_h_ */ + +/* extract device cap setting from specified category-specific caps table */ +#define NV0080_CTRL_GET_CAP(cat,tbl,c) \ + NV0080_CTRL_##cat##_GET_CAP(tbl, NV0080_CTRL_##cat##_CAPS_##c) diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h new file mode 100644 index 0000000..fadb2b1 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h @@ -0,0 +1,148 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080bif.finn +// + +#include "ctrl/ctrl0080/ctrl0080base.h" +#include "nvcfg_sdk.h" + +/* + * NV0080_CTRL_CMD_BIF_RESET + * + * This command initiates the specified reset type on the GPU. + * + * flags + * Specifies various arguments to the reset operation. + * + * Supported fields include: + * + * NV0080_CTRL_BIF_RESET_FLAGS_TYPE + * When set to _SW_RESET, a SW (fullchip) reset is performed. When set + * to _SBR, a secondary-bus reset is performed. When set to + * _FUNDAMENTAL, a fundamental reset is performed. + * + * NOTE: _FUNDAMENTAL is not supported for Blackwell and later chips. + * Use BOOT_DEVICE_FUSE or BOOT_DEVICE reset type instead. + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0080_CTRL_CMD_BIF_RESET (0x800102) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_BIF_INTERFACE_ID << 8) | NV0080_CTRL_BIF_RESET_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_BIF_RESET_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_BIF_RESET_PARAMS { + NvU32 flags; +} NV0080_CTRL_BIF_RESET_PARAMS; + + +#define NV0080_CTRL_BIF_RESET_FLAGS_TYPE 4:0 +#define NV0080_CTRL_BIF_RESET_FLAGS_TYPE_SW_RESET 0x1 +#define NV0080_CTRL_BIF_RESET_FLAGS_TYPE_SBR 0x2 +#define NV0080_CTRL_BIF_RESET_FLAGS_TYPE_FUNDAMENTAL 0x3 +#define NV0080_CTRL_BIF_RESET_FLAGS_TYPE_BOOT_DEVICE_FUSE 0x4 +#define NV0080_CTRL_BIF_RESET_FLAGS_TYPE_BOOT_DEVICE 0x5 +#define NV0080_CTRL_BIF_RESET_FLAGS_TYPE_PEX 0x6 +#define NV0080_CTRL_BIF_RESET_FLAGS_TYPE_OOBHUB_TRIGGER 0x7 +#define NV0080_CTRL_BIF_RESET_FLAGS_TYPE_BASE 0x8 + + + +/* + * NV0080_CTRL_BIF_SET_ASPM_FEATURE + * + * aspmFeatureSupported + * ASPM feature override by client + * + * Possible status values returned are: + * NV_OK + */ + +#define NV0080_CTRL_CMD_BIF_SET_ASPM_FEATURE (0x800104) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_BIF_INTERFACE_ID << 8) | NV0080_CTRL_BIF_SET_ASPM_FEATURE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_BIF_SET_ASPM_FEATURE_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0080_CTRL_BIF_SET_ASPM_FEATURE_PARAMS { + NvU32 aspmFeatureSupported; +} NV0080_CTRL_BIF_SET_ASPM_FEATURE_PARAMS; + +#define NV0080_CTRL_BIF_ASPM_FEATURE_DT_L0S 0:0 +#define NV0080_CTRL_BIF_ASPM_FEATURE_DT_L0S_ENABLED 0x000000001 +#define NV0080_CTRL_BIF_ASPM_FEATURE_DT_L0S_DISABLED 0x000000000 +#define NV0080_CTRL_BIF_ASPM_FEATURE_DT_L1 1:1 +#define NV0080_CTRL_BIF_ASPM_FEATURE_DT_L1_ENABLED 0x000000001 +#define NV0080_CTRL_BIF_ASPM_FEATURE_DT_L1_DISABLED 0x000000000 + +/* + * NV0080_CTRL_BIF_ASPM_CYA_UPDATE + * + * bL0sEnable + * bL1Enable + * ASPM CYA update by client + * + * Possible status values returned are: + * NV_OK + */ + +#define NV0080_CTRL_CMD_BIF_ASPM_CYA_UPDATE (0x800105) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_BIF_INTERFACE_ID << 8) | NV0080_CTRL_BIF_ASPM_CYA_UPDATE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_BIF_ASPM_CYA_UPDATE_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0080_CTRL_BIF_ASPM_CYA_UPDATE_PARAMS { + NvBool bL0sEnable; + NvBool bL1Enable; +} NV0080_CTRL_BIF_ASPM_CYA_UPDATE_PARAMS; + +/* + * NV0080_CTRL_BIF_ASPM_FEATURE + * + * pciePowerControlMask + * pciePowerControlIdentifiedKeyOrder + * pciePowerControlIdentifiedKeyLocation + * ASPM and RTD3 enable/disable information + * + * Possible status values returned are: + * NV_OK + */ + +#define NV0080_CTRL_CMD_BIF_GET_PCIE_POWER_CONTROL_MASK (0x800106) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_BIF_INTERFACE_ID << 8) | NV0080_CTRL_CMD_BIF_GET_PCIE_POWER_CONTROL_MASK_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_CMD_BIF_GET_PCIE_POWER_CONTROL_MASK_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV0080_CTRL_CMD_BIF_GET_PCIE_POWER_CONTROL_MASK_PARAMS { + NvU32 pciePowerControlMask; + NvU32 pciePowerControlIdentifiedKeyOrder; + NvU32 pciePowerControlIdentifiedKeyLocation; +} NV0080_CTRL_CMD_BIF_GET_PCIE_POWER_CONTROL_MASK_PARAMS; + +/* _ctrl0080bif_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h new file mode 100644 index 0000000..fb8e9a0 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h @@ -0,0 +1,112 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080bsp.finn +// + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE bit stream processor control commands and parameters */ + +/* + * NV0080_CTRL_CMD_BSP_GET_CAPS + * + * This command returns the set of BSP capabilities for the device + * in the form of an array of unsigned bytes. BSP capabilities + * include supported features and required workarounds for the decoder + * within the device, each represented by a byte offset into the + * table and a bit position within that byte. + * + * capsTblSize + * This parameter specifies the size in bytes of the caps table. + * This value should be set to NV0080_CTRL_BSP_CAPS_TBL_SIZE. + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the BSP caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + * instanceId + * This parameter specifies the instance Id of NVDEC for which + * cap bits are requested. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0080_CTRL_CMD_BSP_GET_CAPS (0x801c01) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_BSP_INTERFACE_ID << 8) | NV0080_CTRL_BSP_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_BSP_GET_CAPS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_BSP_GET_CAPS_PARAMS { + NvU32 capsTblSize; + NV_DECLARE_ALIGNED(NvP64 capsTbl, 8); + NvU32 instanceId; +} NV0080_CTRL_BSP_GET_CAPS_PARAMS; + + + +/* + * Size in bytes of bsp caps table. This value should be one greater + * than the largest byte_index value above. + */ +#define NV0080_CTRL_BSP_CAPS_TBL_SIZE 8 + +/* + * NV0080_CTRL_CMD_BSP_GET_CAPS_V2 + * + * This command returns the set of BSP capabilities for the device + * in the form of an array of unsigned bytes. BSP capabilities + * include supported features and required workarounds for the decoder + * within the device, each represented by a byte offset into the + * table and a bit position within that byte. + * (The V2 version flattens the capsTbl array pointer). + * + * capsTbl + * This parameter is an array of unsigned bytes where the BSP caps bits + * will be transferred by the RM. + * instanceId + * This parameter specifies the instance Id of NVDEC for which + * cap bits are requested. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0080_CTRL_CMD_BSP_GET_CAPS_V2 (0x801c02) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_BSP_INTERFACE_ID << 8) | NV0080_CTRL_BSP_GET_CAPS_PARAMS_V2_MESSAGE_ID" */ + +#define NV0080_CTRL_BSP_GET_CAPS_PARAMS_V2_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_BSP_GET_CAPS_PARAMS_V2 { + NvU8 capsTbl[NV0080_CTRL_BSP_CAPS_TBL_SIZE]; + NvU32 instanceId; +} NV0080_CTRL_BSP_GET_CAPS_PARAMS_V2; + +/* _ctrl0080bsp_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h new file mode 100644 index 0000000..9b63db0 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080cipher.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h new file mode 100644 index 0000000..bb039c2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h @@ -0,0 +1,890 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080dma.finn +// + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE dma control commands and parameters */ + +/* + * NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK + * + * This parameter returns the parameters specific to a PTE as follows: + * pageSize + * GET: This parameter returns the page size of the PTE information + * being returned. If 0, then this pteBlock[] array entry is + * invalid or not used. (pteBlock[0] is always used.) + * SET: This parameter specifies the page size of the PTE information + * to be set. If 0, then this pteBlock[] array entry is invalid + * or not used. (pteBlock[0] is always used.) + * pteEntrySize + * GET: This parameter returns the size of the PTE in bytes for this GPU. + * SET: N/A + * comptagLine + * GET: This parameter returns the comptagline field of the corresponding PTE. + * SET: This parameter sets the comptagline field of the corresponding PTE. + * Incorrect values may lead to dire consequences. + * kind + * GET: This parameter returns the kind field of the corresponding PTE. + * SET: This parameter sets the kind field of the corresponding PTE. + * Incorrect values may lead to undesirable consequences. + * pteFlags + * This parameter returns various fields from the PTE, these are: + * FLAGS_VALID: + * GET: This flag returns the valid bit of the PTE. + * SET: This flag sets the valid bit of the PTE. + * FLAGS_ENCRYPTED: + * GET: This flag returns the encrypted bit of the PTE. Not all GPUs + * support encryption. If not supported, this flag will be set to + * NOT_SUPPORTED. + * SET: This flag sets the encrypted bit of the PTE. + * FLAGS_APERTURE: + * GET: This flag returns the aperture field of the PTE. See + * NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS_FLAGS_APERTURE_* for values. + * SET: This flag sets the aperture field of the PTE. See + * NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS_FLAGS_APERTURE_* for values. + * FLAGS_COMPTAGS: + * GET: This flag returns the comptags field of the PTE. (Not used on Fermi) + * SET: N/A + * FLAGS_GPU_CACHED: + * GET: This flag returns the GPU cacheable bit of the PTE. GPU caching of + * sysmem was added in iGT21a and Fermi. If not supported, this flag + * will be set to NOT_SUPPORTED. + * SET: N/A for specific chips, e.g., GF100 + * FLAGS_SHADER_ACCESS: + * GET: This flag returns the shader access control of the PTE. This feature + * was introduced in Kepler. If not supported, this flag will be set to + * NOT_SUPPORTED. + * SET: N/A + */ + +typedef struct NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK { + NV_DECLARE_ALIGNED(NvU64 pageSize, 8); + NV_DECLARE_ALIGNED(NvU64 pteEntrySize, 8); + NvU32 comptagLine; + NvU32 kind; + NvU32 pteFlags; +} NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK; + +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_VALID 0:0 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_VALID_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_VALID_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ENCRYPTED 2:1 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ENCRYPTED_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ENCRYPTED_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ENCRYPTED_NOT_SUPPORTED (0x00000002U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_APERTURE 6:3 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_APERTURE_VIDEO_MEMORY (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_APERTURE_PEER_MEMORY (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_APERTURE_SYSTEM_COHERENT_MEMORY (0x00000002U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_APERTURE_SYSTEM_NON_COHERENT_MEMORY (0x00000003U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_COMPTAGS 10:7 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_COMPTAGS_NONE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_COMPTAGS_1 (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_COMPTAGS_2 (0x00000002U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_COMPTAGS_4 (0x00000004U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_GPU_CACHED 12:11 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_GPU_CACHED_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_GPU_CACHED_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_GPU_CACHED_NOT_SUPPORTED (0x00000002U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_SHADER_ACCESS 14:13 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_SHADER_ACCESS_READ_WRITE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_SHADER_ACCESS_READ_ONLY (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_SHADER_ACCESS_WRITE_ONLY (0x00000002U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_SHADER_ACCESS_NOT_SUPPORTED (0x00000003U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_READ_ONLY 15:15 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_READ_ONLY_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_READ_ONLY_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ATOMIC 16:16 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ATOMIC_DISABLE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ATOMIC_ENABLE (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ACCESS_COUNTING 17:17 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ACCESS_COUNTING_DISABLE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ACCESS_COUNTING_ENABLE (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_PRIVILEGED 18:18 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_PRIVILEGED_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_PRIVILEGED_TRUE (0x00000001U) + +/* + * NV0080_CTRL_DMA_GET_PTE_INFO + * + * This command queries PTE information for the specified GPU virtual address. + * + * gpuAddr + * This parameter specifies the GPU virtual address for which PTE + * information is to be returned. + * skipVASpaceInit + * This parameter specifies(true/false) whether the VA Space + * initialization should be skipped in this ctrl call. + * pteBlocks + * This parameter returns the page size-specific attributes of a PTE. + * Please see NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK. + * hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + * NV_ERR_GENERIC + */ + +#define NV0080_CTRL_CMD_DMA_GET_PTE_INFO (0x801801U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_GET_PTE_INFO_PTE_BLOCKS 5U + +#define NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvU64 gpuAddr, 8); + NvU32 subDeviceId; + NvU8 skipVASpaceInit; + NV_DECLARE_ALIGNED(NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK pteBlocks[NV0080_CTRL_DMA_GET_PTE_INFO_PTE_BLOCKS], 8); + NvHandle hVASpace; +} NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS; + +/* + * NV0080_CTRL_DMA_SET_PTE_INFO + * + * This command sets PTE information for the specified GPU virtual address. + * Usage of parameter and field definitions is identical to that of + * NV0080_CTRL_DMA_GET_PTE_INFO, with the following exception: + * + * - pteFlags field NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_COMPTAGS is ignored, + * as this setting is specified via the kind specification. + * - pteEntrySize is ignored, as this setting is read-only in the GET case. + * - hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space. + * + */ + +#define NV0080_CTRL_CMD_DMA_SET_PTE_INFO (0x80180aU) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_SET_PTE_INFO_PTE_BLOCKS 5U + +#define NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvU64 gpuAddr, 8); + NvU32 subDeviceId; + NV_DECLARE_ALIGNED(NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK pteBlocks[NV0080_CTRL_DMA_SET_PTE_INFO_PTE_BLOCKS], 8); + NvHandle hVASpace; +} NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS; + + +#define NV0080_CTRL_CMD_DMA_FILL_PTE_MEM (0x801802U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_FILL_PTE_MEM_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_FILL_PTE_MEM_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_DMA_FILL_PTE_MEM_PARAMS { + NvU32 pageCount; + struct { + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvU32 subDeviceId; + } hwResource; + struct { + NvU32 fbKind; + NvU32 sysKind; + NvU32 compTagStartOffset; + } comprInfo; + NV_DECLARE_ALIGNED(NvU64 offset, 8); + NV_DECLARE_ALIGNED(NvU64 gpuAddr, 8); + NV_DECLARE_ALIGNED(NvP64 pageArray, 8); + NV_DECLARE_ALIGNED(NvP64 pteMem, 8); + NvU32 pteMemPfn; + NvU32 pageSize; + NvU32 startPageIndex; + NV_DECLARE_ALIGNED(NvU64 flags, 8); + NvHandle hSrcVASpace; + NvHandle hTgtVASpace; + NvU32 peerId; +} NV0080_CTRL_DMA_FILL_PTE_MEM_PARAMS; + + + +/* + * NV0080_CTRL_DMA_FLUSH + * + * This command flushes the specified target unit + * + * targetUnit + * The unit to flush, either L2 cache or compression tag cache. + * This field is a logical OR of the individual fields such as + * L2 cache or compression tag cache. Also L2 invalidation for + * either SYSMEM/PEERMEM is triggered. But this invalidation is + * for FERMI. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + * + * See Also: + * NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE + * Flushes the entire GPU cache or a set of physical addresses (if the + * hardware supports it). Use this call if you want to flush a set of + * addresses or the entire GPU cache in unicast mode. + * NV0041_CTRL_CMD_SURFACE_FLUSH_GPU_CACHE + * Flushes memory associated with a single allocation if the hardware + * supports it. Use this call if you want to flush a single allocation and + * you have a memory object describing the physical memory. + */ +#define NV0080_CTRL_CMD_DMA_FLUSH (0x801805U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_FLUSH_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_FLUSH_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0080_CTRL_DMA_FLUSH_PARAMS { + NvU32 targetUnit; +} NV0080_CTRL_DMA_FLUSH_PARAMS; + +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_L2 0:0 +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_L2_DISABLE (0x00000000U) +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_L2_ENABLE (0x00000001U) +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_COMPTAG 1:1 +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_COMPTAG_DISABLE (0x00000000U) +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_COMPTAG_ENABLE (0x00000001U) +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_FB 2:2 +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_FB_DISABLE (0x00000000U) +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_FB_ENABLE (0x00000001U) + +// This is exclusively for Fermi +// The selection of non-zero valued bit-fields avoids the routing +// into the above cases and vice-versa +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_L2_INVALIDATE 4:3 +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_L2_INVALIDATE_SYSMEM (0x00000001U) +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_L2_INVALIDATE_PEERMEM (0x00000002U) + + +/** + * NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS + * + * This command returns information about the VA caps on the GPU + * + * vaBitCount + * Returns number of bits in a virtual address + * pdeCoverageBitCount + * Returns number of VA bits covered in each PDE. One PDE covers + * 2^pdeCoverageBitCount bytes. + * + * bigPageSize + * Size of the big page + * compressionPageSize + * Size of region each compression tag covers + * dualPageTableSupported + * TRUE if one page table can map with both 4KB and big pages + * + * numPageTableFormats + * Returns the number of different page table sizes supported by the RM + * pageTableBigFormat + * pageTable4KFormat[] + * Returns size in bytes and number of VA bits covered by each page table + * format. Up to MAX_NUM_PAGE_TABLE_FORMATS can be returned. The most + * compact format will be pageTableSize[0] and the least compact format + * will be last. + * hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space. + * vaRangeLo + * Indicates the start of usable VA range. + * + * hugePageSize + * Size of the huge page if supported, 0 otherwise. + * + * vaSpaceId + * Virtual Address Space id assigned by RM. + * Only relevant on AMODEL. + * + * pageSize512MB + * Size of the 512MB page if supported, 0 otherwise. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV0080_CTRL_CMD_DMA_ADV_SCHED_GET_VA_CAPS (0x801806U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS_MESSAGE_ID" */ + +typedef struct NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PAGE_TABLE_FORMAT { + NvU32 pageTableSize; + NvU32 pageTableCoverage; +} NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PAGE_TABLE_FORMAT; + +#define NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_MAX_NUM_PAGE_TABLE_FORMATS (16U) +#define NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS { + NvU32 vaBitCount; + NvU32 pdeCoverageBitCount; + NvU32 num4KPageTableFormats; + NvU32 bigPageSize; + NvU32 compressionPageSize; + NvU32 dualPageTableSupported; + NvU32 idealVRAMPageSize; + NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PAGE_TABLE_FORMAT pageTableBigFormat; + NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PAGE_TABLE_FORMAT pageTable4KFormat[NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_MAX_NUM_PAGE_TABLE_FORMATS]; + NvHandle hVASpace; + NV_DECLARE_ALIGNED(NvU64 vaRangeLo, 8); + NvU32 vaSpaceId; + NV_DECLARE_ALIGNED(NvU64 supportedPageSizeMask, 8); +} NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS; + +/* + * Adding a version define to allow clients to access valid + * parameters based on version. + */ +#define NV0080_CTRL_CMD_DMA_ADV_SCHED_GET_VA_CAPS_WITH_VA_RANGE_LO 0x1U + +/* + * NV0080_CTRL_DMA_GET_PDE_INFO + * + * This command queries PDE information for the specified GPU virtual address. + * + * gpuAddr + * This parameter specifies the GPU virtual address for which PDE + * information is to be returned. + * pdeVirtAddr + * This parameter returns the GPU virtual address of the PDE. + * pdeEntrySize + * This parameter returns the size of the PDE in bytes for this GPU. + * pdeAddrSpace + * This parameter returns the GPU address space of the PDE. + * pdeSize + * This parameter returns the fractional size of the page table(s) as + * actually set in the PDE, FULL, 1/2, 1/4 or 1/8. (This amount may + * differ from that derived from pdeVASpaceSize.) Intended for VERIF only. + * pteBlocks + * This parameter returns the page size-specific parameters as follows: + * ptePhysAddr + * This parameter returns the GPU physical address of the page table. + * pteCacheAttrib + * This parameter returns the caching attribute of the + * GPU physical address of the page table. + * pteEntrySize + * This parameter returns the size of the PTE in bytes for this GPU. + * pageSize + * This parameter returns the page size of the page table. + * If pageSize == 0, then this PTE block is not valid. + * pteAddrSpace + * This parameter returns the GPU address space of the page table. + * pdeVASpaceSize + * This parameter returns the size of the VA space addressable by + * the page table if fully used (i.e., if all PTEs marked VALID). + * pdbAddr + * This parameter returns the PDB address for the PDE. + * hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV0080_CTRL_CMD_DMA_GET_PDE_INFO (0x801809U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_MESSAGE_ID" */ + +typedef struct NV0080_CTRL_DMA_PDE_INFO_PTE_BLOCK { + NV_DECLARE_ALIGNED(NvU64 ptePhysAddr, 8); + NvU32 pteCacheAttrib; + NvU32 pteEntrySize; + NvU32 pageSize; + NvU32 pteAddrSpace; + NvU32 pdeVASpaceSize; + NvU32 pdeFlags; +} NV0080_CTRL_DMA_PDE_INFO_PTE_BLOCK; + +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PTE_ADDR_SPACE_VIDEO_MEMORY (0x00000000U) +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PTE_ADDR_SPACE_SYSTEM_COHERENT_MEMORY (0x00000001U) +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PTE_ADDR_SPACE_SYSTEM_NON_COHERENT_MEMORY (0x00000002U) + +#define NV0080_CTRL_DMA_PDE_INFO_PTE_BLOCKS 5U + +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvU64 gpuAddr, 8); + NV_DECLARE_ALIGNED(NvU64 pdeVirtAddr, 8); + NvU32 pdeEntrySize; + NvU32 pdeAddrSpace; + NvU32 pdeSize; + NvU32 subDeviceId; + NV_DECLARE_ALIGNED(NV0080_CTRL_DMA_PDE_INFO_PTE_BLOCK pteBlocks[NV0080_CTRL_DMA_PDE_INFO_PTE_BLOCKS], 8); + NV_DECLARE_ALIGNED(NvU64 pdbAddr, 8); + NvHandle hVASpace; +} NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS; + +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_ADDR_SPACE_VIDEO_MEMORY (0x00000000U) +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_ADDR_SPACE_SYSTEM_COHERENT_MEMORY (0x00000001U) +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_ADDR_SPACE_SYSTEM_NON_COHERENT_MEMORY (0x00000002U) +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_SIZE_FULL 1U +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_SIZE_HALF 2U +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_SIZE_QUARTER 3U +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_SIZE_EIGHTH 4U + +/* + * NV0080_CTRL_CMD_DMA_INVALIDATE_TLB + * + * This command invalidates the GPU TLB. This is intended to be used + * for RM clients that manage their own TLB consistency when updating + * page tables on their own, or with DEFER_TLB_INVALIDATION options + * to other RM APIs. + * + * hVASpace + * This parameter specifies the VASpace object whose MMU TLB entries + * needs to be invalidated, if the flag is set to NV0080_CTRL_DMA_INVALIDATE_TLB_ALL_FALSE. + * Specifying a GMMU VASpace object handle will invalidate the GMMU TLB for the particular VASpace. + * Specifying a SMMU VASpace object handle will flush the entire SMMU TLB & PTC. + * + * flags + * This parameter can be used to specify any flags needed + * for the invlalidation request. + * NV0080_CTRL_DMA_INVALIDATE_TLB_ALL + * When set to TRUE this flag requests a global invalidate. + * When set to FALSE this flag requests a chip-specfic + * optimization to invalidate only the address space bound + * to the associated hDevice. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_TIMEOUT_RETRY + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0080_CTRL_CMD_DMA_INVALIDATE_TLB (0x80180cU) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_INVALIDATE_TLB_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_INVALIDATE_TLB_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV0080_CTRL_DMA_INVALIDATE_TLB_PARAMS { + NvHandle hVASpace; + NvU32 flags; +} NV0080_CTRL_DMA_INVALIDATE_TLB_PARAMS; + +#define NV0080_CTRL_DMA_INVALIDATE_TLB_ALL 0:0 +#define NV0080_CTRL_DMA_INVALIDATE_TLB_ALL_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_INVALIDATE_TLB_ALL_TRUE (0x00000001U) + +/** + * NV0080_CTRL_CMD_DMA_GET_CAPS + * + * This command returns the set of DMA capabilities for the device + * in the form of an array of unsigned bytes. DMA capabilities + * include supported features and required workarounds for address + * translation system within the device, each represented by a byte + * offset into the table and a bit position within that byte. + * + * capsTblSize + * This parameter specifies the size in bytes of the caps table. + * This value should be set to NV0080_CTRL_DMA_CAPS_TBL_SIZE. + * + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the framebuffer caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + * + * 32BIT_POINTER_ENFORCED + * If this property is TRUE NVOS32 and NVOS46 calls with + * 32BIT_POINTER_DISABLED will return addresses above 4GB. + * + * SHADER_ACCESS_SUPPORTED + * If this property is set, the MMU in the system supports the independent + * access bits for the shader. This is accessed with the following fields: + * NVOS46_FLAGS_SHADER_ACCESS + * NV0080_CTRL_DMA_FILL_PTE_MEM_PARAMS_FLAGS_SHADER_ACCESS + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_DMA_GET_CAPS (0x80180dU) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_GET_CAPS_PARAMS_MESSAGE_ID" */ +/* size in bytes of fb caps table */ +#define NV0080_CTRL_DMA_CAPS_TBL_SIZE 8U +#define NV0080_CTRL_DMA_GET_CAPS_PARAMS_MESSAGE_ID (0xDU) + +typedef struct NV0080_CTRL_DMA_GET_CAPS_PARAMS { + NvU32 capsTblSize; + NvU8 capsTbl[NV0080_CTRL_DMA_CAPS_TBL_SIZE]; +} NV0080_CTRL_DMA_GET_CAPS_PARAMS; + +/* extract cap bit setting from tbl */ +#define NV0080_CTRL_DMA_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* caps format is byte_index:bit_mask */ +#define NV0080_CTRL_DMA_CAPS_32BIT_POINTER_ENFORCED 0:0x01 +#define NV0080_CTRL_DMA_CAPS_SHADER_ACCESS_SUPPORTED 0:0x04 +#define NV0080_CTRL_DMA_CAPS_SPARSE_VIRTUAL_SUPPORTED 0:0x08 +#define NV0080_CTRL_DMA_CAPS_MULTIPLE_VA_SPACES_SUPPORTED 0:0x10 + +/* + * NV0080_CTRL_DMA_SET_VA_SPACE_SIZE + * + * Change the size of an existing VA space. + * NOTE: Currently this only supports growing the size, not shrinking. + * + * 1. Allocate new page directory able to map extended range. + * 2. Copy existing PDEs from old directory to new directory. + * 3. Initialize new PDEs to invalid. + * 4. Update instmem to point to new page directory. + * 5. Free old page directory. + * + * vaSpaceSize + * On input, the requested size of the VA space in bytes. + * On output, the actual resulting VA space size. + * + * The actual size will be greater than or equal to the requested size, + * unless NV0080_CTRL_DMA_GROW_VA_SPACE_SIZE_MAX is requested, which + * requests the maximum available. + * + * NOTE: Specific size requests (e.g. other than SIZE_MAX) must account + * for the VA hole at the beginning of the range which is used to + * distinguish NULL pointers. This region is not counted as part + * of the vaSpaceSize since it is not allocatable. + * + * hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space + * associated with the client/device pair. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INSUFFICIENT_RESOURCES + */ +#define NV0080_CTRL_CMD_DMA_SET_VA_SPACE_SIZE (0x80180eU) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 vaSpaceSize, 8); + NvHandle hVASpace; +} NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_PARAMS; + +#define NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_MAX (0xFFFFFFFFFFFFFFFFULL) + +/* + * NV0080_CTRL_DMA_UPDATE_PDE_2 + * + * This command updates a single PDE for the given (hClient, hDevice) + * with specific attributes. + * This command is only available on Windows and MODS platforms. + * This command can be called by kernel clients only. + * + * The VA range the PDE maps must be contained by a VA allocation marked with + * NVOS32_ALLOC_FLAGS_EXTERNALLY_MANAGED. + * However if the MODS-only FORCE_OVERRIDE flag is set this restriction is relaxed. + * + * RM does not track the PDE's attributes in SW - this control simply stuffs + * the PDE in memory after translating and checking the parameters. + * + * Parameters are checked for relative consistency (e.g. valid domains), + * but it is the client's responsibility to provide correct page table + * addresses, e.g. global consistency is not checked. + * + * It is also the client's responsibility to flush/invalidate the MMU + * when appropriate, either by setting the _FLUSH_PDE_CACHE flag for this + * call or by flushing through other APIs. + * This control does not flush automatically to allow batches of calls + * to be made before a single flush. + * + * ptParams + * Page-size-specific parameters, as follows: + * + * physAddr + * Base address of physically contiguous memory of page table. + * Must be aligned sufficiently for the PDE address field. + * numEntries + * Deprecated and ignored. + * Use FLAGS_PDE_SIZE that applies to the tables for all page sizes. + * aperture + * Address space the base address applies to. + * Can be left as INVALID to ignore this page table size. + * + * pdeIndex + * The PDE index this update applies to. + * flags + * See NV0080_CTRL_DMA_UPDATE_PDE_FLAGS_*. + * hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space + * associated with the client/device pair. + * pPdeBuffer [out] + * Kernel pointer to 64 bit unsigned integer representing a Page Dir Entry + * that needs to be updated. It should point to memory as wide as the Page Dir + * Entry. + * + * If NULL, Page Dir Entry updates will go to the internally managed Page Dir. + * If not NULL, the updates will be written to this buffer. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_NOT_SUPPORTED + */ +#define NV0080_CTRL_CMD_DMA_UPDATE_PDE_2 (0x80180fU) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS_MESSAGE_ID" */ + +typedef struct NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 physAddr, 8); + NvU32 numEntries; // deprecated + NvU32 aperture; +} NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS; + +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PT_APERTURE_INVALID (0x00000000U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PT_APERTURE_VIDEO_MEMORY (0x00000001U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PT_APERTURE_SYSTEM_COHERENT_MEMORY (0x00000002U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PT_APERTURE_SYSTEM_NON_COHERENT_MEMORY (0x00000003U) + +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PT_IDX_SMALL 0U +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PT_IDX_BIG 1U +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PT_IDX__SIZE 2U + +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS_MESSAGE_ID (0xFU) + +typedef struct NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS { + NvU32 pdeIndex; + NvU32 flags; + NV_DECLARE_ALIGNED(NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS ptParams[NV0080_CTRL_DMA_UPDATE_PDE_2_PT_IDX__SIZE], 8); + NvHandle hVASpace; + NV_DECLARE_ALIGNED(NvP64 pPdeBuffer, 8); // NV_MMU_VER2_DUAL_PDE__SIZE + NvU32 subDeviceId; // ID+1, 0 for BC +} NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS; + +/*! + * If set a PDE cache flush (MMU invalidate) will be performed. + */ +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_FLUSH_PDE_CACHE 0:0 +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_FLUSH_PDE_CACHE_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_FLUSH_PDE_CACHE_TRUE (0x00000001U) + +/*! + * For verification purposes (MODS-only) this flag may be set to modify any PDE + * in the VA space (RM managed or externally managed). + * It is up to caller to restore any changes properly (or to expect faults). + */ +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_FORCE_OVERRIDE 1:1 +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_FORCE_OVERRIDE_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_FORCE_OVERRIDE_TRUE (0x00000001U) + +/*! + * Directly controls the PDE_SIZE field (size of the page tables pointed to by this PDE). + */ +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_PDE_SIZE 3:2 +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_PDE_SIZE_FULL (0x00000000U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_PDE_SIZE_HALF (0x00000001U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_PDE_SIZE_QUARTER (0x00000002U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_PDE_SIZE_EIGHTH (0x00000003U) + +/*! + * Used to specify if the allocation is sparse. Applicable only in case of + * VA Space managed by OS, as in WDDM2.0 + */ +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_SPARSE 4:4 +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_SPARSE_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_SPARSE_TRUE (0x00000001U) + +/* + * NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE + * This interface will create a corresponding privileged + * kernel address space that will mirror user space allocations in this + * VASPACE. + * The user can either pass a FERMI_VASPACE_A handle or RM will use the + * vaspace associated with the client/device if hVaspace is passed as + * NULL. + * Once this property is set, the user will not be able to make allocations + * from the top most PDE of this address space. + * + * The user is expected to call this function as soon as he has created + * the device/Vaspace object. If the user has already made VA allocations + * in this vaspace then this call will return a failure + * (NV_ERR_INVALID_STATE). + * The Vaspace should have no VA allocations when this call is made. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE +*/ +#define NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE (0x801810U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE_PARAMS { + NvHandle hVASpace; +} NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE_PARAMS; + +/* + * NV0080_CTRL_DMA_SET_DEFAULT_VASPACE + * This is a special control call provided for KMD to use. + * It will associate an allocated Address Space Object as the + * default address space of the device. + * + * This is added so that the USER can move to using address space objects when they + * want to specify the size of the big page size they want to use but still want + * to use the rest of the relevant RM apis without specifying the hVASpace. + * + * This call will succeed only if there is already no VASPACE associated with the + * device. This means the user will have to call this before he has made any allocations + * on this device/address space. + * + * The hVASpace that is passed in to be associated shoould belong to the parent device that + * this call is made for. This call will fail if we try to associate a VASpace belonging to + * some other client/device. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + * + */ +#define NV0080_CTRL_DMA_SET_DEFAULT_VASPACE (0x801812U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS { + NvHandle hVASpace; +} NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS; + +/*! + * NV0080_CTRL_DMA_SET_PAGE_DIRECTORY + * + * Move an existing VA space to an externally-managed top-level page directory. + * The VA space must have been created in SHARED_MANAGEMENT mode. + * For lifecycle details, see NV_VASPACE_ALLOCATION_PARAMETERS documentation in nvos.h. + * + * RM will propagate the update to all channels using the VA space. + * + * NOTE: All channels using this VA space are expected to be idle and unscheduled prior + * to and during this control call - it is responsibility of caller to ensure this. + * + * physAddress + * Physical address of the new page directory within the aperture specified by flags. + * numEntries + * Number of entries in the new page directory. + * The backing phyical memory must be at least this size (multiplied by entry size). + * flags + * APERTURE + * Specifies which physical aperture the page directory resides. + * PRESERVE_PDES + * Deprecated - RM will always copy the RM-managed PDEs from the old page directory + * to the new page directory. + * ALL_CHANNELS + * If true, RM will update the instance blocks for all channels using + * the VAS and ignore the chId parameter. + * EXTEND_VASPACE + * If true, RM will use the client VA for client VA requests in VASPACE_SHARED_MANAGEMENT mode + * If false, RM will use the internal VA for client VA requests. + * IGNORE_CHANNEL_BUSY + * If true, RM will ignore the channel busy status during set page + * directory operation. + * hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space + * associated with the client/device pair. + * chId + * ID of the Channel to be updated. + * pasid + * PASID (Process Address Space IDentifier) of the process corresponding to + * the VA space. Ignored unless the VA space has ATS enabled. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_LIMIT + * NV_ERR_GENERIC + */ +#define NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY (0x801813U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS { + NV_DECLARE_ALIGNED(NvU64 physAddress, 8); + NvU32 numEntries; + NvU32 flags; + NvHandle hVASpace; + NvU32 chId; + NvU32 subDeviceId; // ID+1, 0 for BC + NvU32 pasid; +} NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS; + +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE 1:0 +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_VIDMEM (0x00000000U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_SYSMEM_COH (0x00000001U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_SYSMEM_NONCOH (0x00000002U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES 2:2 +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS 3:3 +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY 4:4 +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE 5:5 +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE_TRUE (0x00000001U) + +/*! + * NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY + * + * Restore an existing VA space to an RM-managed top-level page directory. + * The VA space must have been created in SHARED_MANAGEMENT mode and + * previously relocated to an externally-managed page directory with + * NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY (these two API are symmetric operations). + * For lifecycle details, see NV_VASPACE_ALLOCATION_PARAMETERS documentation in nvos.h. + * + * RM will propagate the update to all channels using the VA space. + * + * NOTE: All channels using this VA space are expected to be idle and unscheduled prior + * to and during this control call - it is responsibility of caller to ensure this. + * + * hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space + * associated with the client/device pair. + */ +#define NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY (0x801814U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS { + NvHandle hVASpace; + NvU32 subDeviceId; // ID+1, 0 for BC +} NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS; + + + +/* _ctrl0080dma_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h new file mode 100644 index 0000000..5b023c8 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h @@ -0,0 +1,264 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080fb.finn +// + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE fb control commands and parameters */ + +/** + * NV0080_CTRL_CMD_FB_GET_CAPS + * + * This command returns the set of framebuffer capabilities for the device + * in the form of an array of unsigned bytes. Framebuffer capabilities + * include supported features and required workarounds for the framebuffer + * engine(s) within the device, each represented by a byte offset into the + * table and a bit position within that byte. + * + * capsTblSize + * This parameter specifies the size in bytes of the caps table. + * This value should be set to NV0080_CTRL_FB_CAPS_TBL_SIZE. + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the framebuffer caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_POINTER + */ +#define NV0080_CTRL_CMD_FB_GET_CAPS (0x801301) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FB_INTERFACE_ID << 8) | NV0080_CTRL_FB_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FB_GET_CAPS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_FB_GET_CAPS_PARAMS { + NvU32 capsTblSize; + NV_DECLARE_ALIGNED(NvP64 capsTbl, 8); +} NV0080_CTRL_FB_GET_CAPS_PARAMS; + +/* extract cap bit setting from tbl */ +#define NV0080_CTRL_FB_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* caps format is byte_index:bit_mask */ +#define NV0080_CTRL_FB_CAPS_SUPPORT_RENDER_TO_SYSMEM 0:0x01 +#define NV0080_CTRL_FB_CAPS_BLOCKLINEAR 0:0x02 +#define NV0080_CTRL_FB_CAPS_SUPPORT_SCANOUT_FROM_SYSMEM 0:0x04 +#define NV0080_CTRL_FB_CAPS_SUPPORT_CACHED_SYSMEM 0:0x08 +#define NV0080_CTRL_FB_CAPS_SUPPORT_C24_COMPRESSION 0:0x10 // Deprecated +#define NV0080_CTRL_FB_CAPS_SUPPORT_SYSMEM_COMPRESSION 0:0x20 +#define NV0080_CTRL_FB_CAPS_NISO_CFG0_BUG_534680 0:0x40 // Deprecated +#define NV0080_CTRL_FB_CAPS_ISO_FETCH_ALIGN_BUG_561630 0:0x80 // Deprecated + +#define NV0080_CTRL_FB_CAPS_BLOCKLINEAR_GOBS_512 1:0x01 +#define NV0080_CTRL_FB_CAPS_L2_TAG_BUG_632241 1:0x02 +#define NV0080_CTRL_FB_CAPS_SINGLE_FB_UNIT 1:0x04 // Deprecated +#define NV0080_CTRL_FB_CAPS_CE_RMW_DISABLE_BUG_897745 1:0x08 // Deprecated +#define NV0080_CTRL_FB_CAPS_OS_OWNS_HEAP_NEED_ECC_SCRUB 1:0x10 +#define NV0080_CTRL_FB_CAPS_ASYNC_CE_L2_BYPASS_SET 1:0x20 // Deprecated +#define NV0080_CTRL_FB_CAPS_DISABLE_TILED_CACHING_INVALIDATES_WITH_ECC_BUG_1521641 1:0x40 +#define NV0080_CTRL_FB_CAPS_GENERIC_PAGE_KIND 1:0x80 + +#define NV0080_CTRL_FB_CAPS_DISABLE_MSCG_WITH_VR_BUG_1681803 2:0x01 +#define NV0080_CTRL_FB_CAPS_VIDMEM_ALLOCS_ARE_CLEARED 2:0x02 +#define NV0080_CTRL_FB_CAPS_DISABLE_PLC_GLOBALLY 2:0x04 +#define NV0080_CTRL_FB_CAPS_PLC_BUG_3046774 2:0x08 +#define NV0080_CTRL_FB_CAPS_PARTIAL_UNMAP 2:0x10 + + +/* size in bytes of fb caps table */ +#define NV0080_CTRL_FB_CAPS_TBL_SIZE 3 + + + +/*! + * NV0080_CTRL_CMD_FB_COMPBIT_STORE_GET_INFO + * + * This command returns compbit backing store-related information. + * + * size + * [out] Size of compbit store, in bytes + * address + * [out] Address of compbit store + * addressSpace + * [out] Address space of compbit store (corresponds to type NV_ADDRESS_SPACE in nvrm.h) + * maxCompbitLine + * [out] Maximum compbitline possible, determined based on size + * comptagsPerCacheLine + * [out] Number of compression tags per compression cache line, across all + * L2 slices. + * cacheLineSize + * [out] Size of compression cache line, across all L2 slices. (bytes) + * cacheLineSizePerSlice + * [out] Size of the compression cache line per slice (bytes) + * cacheLineFetchAlignment + * [out] Alignment used while fetching the compression cacheline range in FB. + * If start offset of compcacheline in FB is S and end offset is E, then + * the range to fetch to ensure entire compcacheline data is extracted is: + * (align_down(S) , align_up(E)) + * This is needed in GM20X+ because of interleaving of data in Linear FB space. + * Example - In GM204 every other 1K FB chunk of data is offset by 16K. + * backingStoreBase + * [out] Address of start of Backing Store in linear FB Physical Addr space. + * This is the actual offset in FB which HW starts using as the Backing Store and + * in general will be different from the start of the region that driver allocates + * as the backing store. This address is expected to be 2K aligned. + * gobsPerComptagPerSlice + * [out] (Only on Pascal) Number of GOBS(512 bytes of surface PA) that correspond to one 64KB comptgaline, per slice. + * One GOB stores 1 byte of compression bits. + * 0 value means this field is not applicable for the current architecture. + * backingStoreCbcBase + * [out] 2KB aligned base address of CBC (post divide address) + * comptaglineAllocationPolicy + * [out] Policy used to allocate comptagline from CBC for the device + * privRegionStartOffset + * [out] Starting offset for any priv region allocated by clients. only used by MODS + * Possible status values returned are: + * NV_OK + */ +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO (0x801306) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FB_INTERFACE_ID << 8) | NV0080_CTRL_FB_GET_COMPBIT_STORE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FB_GET_COMPBIT_STORE_INFO_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV0080_CTRL_FB_GET_COMPBIT_STORE_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvU64 Size, 8); + NV_DECLARE_ALIGNED(NvU64 Address, 8); + NvU32 AddressSpace; + NvU32 MaxCompbitLine; + NvU32 comptagsPerCacheLine; + NvU32 cacheLineSize; + NvU32 cacheLineSizePerSlice; + NvU32 cacheLineFetchAlignment; + NV_DECLARE_ALIGNED(NvU64 backingStoreBase, 8); + NvU32 gobsPerComptagPerSlice; + NvU32 backingStoreCbcBase; + NvU32 comptaglineAllocationPolicy; + NV_DECLARE_ALIGNED(NvU64 privRegionStartOffset, 8); + NvU32 cbcCoveragePerSlice; +} NV0080_CTRL_FB_GET_COMPBIT_STORE_INFO_PARAMS; + +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO_ADDRESS_SPACE_UNKNOWN 0 // ADDR_UNKNOWN +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO_ADDRESS_SPACE_SYSMEM 1 // ADDR_SYSMEM +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO_ADDRESS_SPACE_FBMEM 2 // ADDR_FBMEM + +// Policy used to allocate comptaglines +/** + * Legacy mode allocates a comptagline for 64kb page. This mode will always allocate + * contiguous comptaglines from a ctag heap. + */ +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO_POLICY_LEGACY 0 +/** + * 1TO1 mode allocates a comptagline for 64kb page. This mode will calculate + * comptagline offset based on physical address. This mode will allocate + * contiguous comptaglines if the surface is contiguous and non-contiguous + * comptaglines for non-contiguous surfaces. + */ +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO_POLICY_1TO1 1 +/** + * 1TO4_Heap mode allocates a comptagline for 256kb page granularity. This mode + * will allocate comptagline from a heap. This mode will align the surface allocations + * to 256kb before allocating comptaglines. The comptaglines allocated will always be + * contiguous here. + * TODO: For GA10x, this mode will support < 256kb surface allocations, by sharing + * a comptagline with at most 3 different 64Kb allocations. This will result in + * miixed-contiguity config where comptaglines will be allocated contiguously as well + * as non-contiguous when shared with other allocations. + */ +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO_POLICY_1TO4 2 +/** + * Rawmode will transfer allocation of comptaglines to HW, where HW manages + * comptaglines based on physical offset. The comptaglines are cleared when SW + * issues physical/virtual scrub to the surface before reuse. + */ +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO_POLICY_RAWMODE 3 + +/** + * NV0080_CTRL_CMD_FB_GET_CAPS_V2 + * + * This command returns the same set of framebuffer capabilities for the + * device as @ref NV0080_CTRL_CMD_FB_GET_CAPS. The difference is in the structure + * NV0080_CTRL_FB_GET_CAPS_V2_PARAMS, which contains a statically sized array, + * rather than a caps table pointer and a caps table size in + * NV0080_CTRL_FB_GET_CAPS_PARAMS. + * + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the framebuffer caps bits will be written by the RM. + * The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_POINTER + */ +#define NV0080_CTRL_CMD_FB_GET_CAPS_V2 (0x801307) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FB_INTERFACE_ID << 8) | NV0080_CTRL_FB_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FB_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV0080_CTRL_FB_GET_CAPS_V2_PARAMS { + NvU8 capsTbl[NV0080_CTRL_FB_CAPS_TBL_SIZE]; +} NV0080_CTRL_FB_GET_CAPS_V2_PARAMS; + +/** + * NV0080_CTRL_CMD_FB_SET_DEFAULT_VIDMEM_PHYSICALITY + * + * When clients allocate video memory specifying _DEFAULT (0) for + * NVOS32_ATTR_PHYSICALITY, RM will usually allocate contiguous memory. + * Clients can change that behavior with this command so that _DEFAULT maps to + * another value. + * + * The expectation is that clients currently implicitly rely on the default, + * but can be incrementally updated to explicitly specify _CONTIGUOUS where + * necessary and change the default for their allocations to _NONCONTIGUOUS or + * _ALLOW_NONCONTIGUOUS. + * + * In the future RM may be updated to globally default to _NONCONTIGUOUS or + * _ALLOW_NONCONTIGUOUS, and at that point this can be removed. + */ +#define NV0080_CTRL_CMD_FB_SET_DEFAULT_VIDMEM_PHYSICALITY (0x801308) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FB_INTERFACE_ID << 8) | NV0080_CTRL_FB_SET_DEFAULT_VIDMEM_PHYSICALITY_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FB_SET_DEFAULT_VIDMEM_PHYSICALITY_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV0080_CTRL_FB_SET_DEFAULT_VIDMEM_PHYSICALITY_PARAMS { + NvU32 value; +} NV0080_CTRL_FB_SET_DEFAULT_VIDMEM_PHYSICALITY_PARAMS; + +typedef enum NV0080_CTRL_FB_DEFAULT_VIDMEM_PHYSICALITY { + NV0080_CTRL_FB_DEFAULT_VIDMEM_PHYSICALITY_DEFAULT = 0, + NV0080_CTRL_FB_DEFAULT_VIDMEM_PHYSICALITY_NONCONTIGUOUS = 1, + NV0080_CTRL_FB_DEFAULT_VIDMEM_PHYSICALITY_CONTIGUOUS = 2, + NV0080_CTRL_FB_DEFAULT_VIDMEM_PHYSICALITY_ALLOW_NONCONTIGUOUS = 3, +} NV0080_CTRL_FB_DEFAULT_VIDMEM_PHYSICALITY; + + +/* _ctrl0080fb_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h new file mode 100644 index 0000000..16b4289 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h @@ -0,0 +1,423 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080fifo.finn +// + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE fifo control commands and parameters */ + +/** + * NV0080_CTRL_FIFO_GET_CAPS + * + * This command returns the set of FIFO engine capabilities for the device + * in the form of an array of unsigned bytes. FIFO capabilities + * include supported features and required workarounds for the FIFO + * engine(s) within the device, each represented by a byte offset into the + * table and a bit position within that byte. + * + * capsTblSize + * This parameter specifies the size in bytes of the caps table. + * This value should be set to NV0080_CTRL_FIFO_CAPS_TBL_SIZE. + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the framebuffer caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_FIFO_GET_CAPS (0x801701) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_GET_CAPS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_FIFO_GET_CAPS_PARAMS { + NvU32 capsTblSize; + NV_DECLARE_ALIGNED(NvP64 capsTbl, 8); +} NV0080_CTRL_FIFO_GET_CAPS_PARAMS; + +/* extract cap bit setting from tbl */ +#define NV0080_CTRL_FIFO_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* caps format is byte_index:bit_mask */ +#define NV0080_CTRL_FIFO_CAPS_SUPPORT_SCHED_EVENT 0:0x01 +#define NV0080_CTRL_FIFO_CAPS_SUPPORT_PCI_PB 0:0x02 +#define NV0080_CTRL_FIFO_CAPS_SUPPORT_VID_PB 0:0x04 +#define NV0080_CTRL_FIFO_CAPS_USERD_IN_SYSMEM 0:0x40 +/* do not use pipelined PTE BLITs to update PTEs: call the RM */ +#define NV0080_CTRL_FIFO_CAPS_NO_PIPELINED_PTE_BLIT 0:0x80 +#define NV0080_CTRL_FIFO_CAPS_GPU_MAP_CHANNEL 1:0x01 +#define NV0080_CTRL_FIFO_CAPS_BUFFEREDMODE_SCHEDULING 1:0x02 // Deprecated +#define NV0080_CTRL_FIFO_CAPS_WFI_BUG_898467 1:0x08 // Deprecated +#define NV0080_CTRL_FIFO_CAPS_HAS_HOST_LB_OVERFLOW_BUG_1667921 1:0x10 +/* + * To indicate Volta subcontext support with multiple VA spaces in a TSG. + * We are not using "subcontext" tag for the property, since we also use + * subcontext to represent pre-VOlta SCG feature, which only allows a single + * VA space in a TSG. + */ +#define NV0080_CTRL_FIFO_CAPS_MULTI_VAS_PER_CHANGRP 1:0x20 + + +#define NV0080_CTRL_FIFO_CAPS_SUPPORT_WDDM_INTERLEAVING 1:0x40 + +/* size in bytes of fifo caps table */ +#define NV0080_CTRL_FIFO_CAPS_TBL_SIZE 2 + +/* + * NV0080_CTRL_CMD_FIFO_GET_ENGINE_CONTEXT_PROPERTIES + * + * This command is used to provide the caller with the alignment and size + * of the context save region for an engine + * + * engineId + * This parameter is an input parameter specifying the engineId for which + * the alignment/size is requested. + * alignment + * This parameter is an output parameter which will be filled in with the + * minimum alignment requirement. + * size + * This parameter is an output parameter which will be filled in with the + * minimum size of the context save region for the engine. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0080_CTRL_CMD_FIFO_GET_ENGINE_CONTEXT_PROPERTIES (0x801707) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID 4:0 +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS (0x00000000) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VLD (0x00000001) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VIDEO (0x00000002) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_MPEG (0x00000003) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_CAPTURE (0x00000004) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_DISPLAY (0x00000005) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_ENCRYPTION (0x00000006) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_POSTPROCESS (0x00000007) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ZCULL (0x00000008) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PM (0x00000009) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COMPUTE_PREEMPT (0x0000000a) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PREEMPT (0x0000000b) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SPILL (0x0000000c) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL (0x0000000d) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BETACB (0x0000000e) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV (0x0000000f) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PATCH (0x00000010) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BUNDLE_CB (0x00000011) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL_GLOBAL (0x00000012) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ATTRIBUTE_CB (0x00000013) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV_CB_GLOBAL (0x00000014) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_POOL (0x00000015) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_CTRL_BLK (0x00000016) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_FECS_EVENT (0x00000017) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP (0x00000018) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SETUP (0x00000019) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT (0x0000001a) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS { + NvU32 engineId; + NvU32 alignment; + NvU32 size; +} NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS; + +/* + * NV0080_CTRL_CMD_FIFO_GET_CHANNELLIST + * + * Takes a list of hChannels as input and returns the + * corresponding Channel IDs that they corresponding to + * on hw. + * + * numChannels + * Size of input hChannellist + * pChannelHandleList + * List of input channel handles + * pChannelList + * List of Channel ID's corresponding to the + * each entry in the hChannelList. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_FIFO_GET_CHANNELLIST (0x80170d) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS_MESSAGE_ID (0xDU) + +typedef struct NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS { + NvU32 numChannels; + NV_DECLARE_ALIGNED(NvP64 pChannelHandleList, 8); + NV_DECLARE_ALIGNED(NvP64 pChannelList, 8); +} NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS; + + +/* + * NV0080_CTRL_CMD_FIFO_GET_LATENCY_BUFFER_SIZE + * + * This control call is used to return the number of gp methods(gpsize) and push buffer methods(pbsize) + * allocated to each engine. + * + *engineID + * The engine ID which is an input + * + *gpEntries + * number of gp entries + * + *pbEntries + * number of pb entries (in units of 32B rows) + * + */ + + +#define NV0080_CTRL_CMD_FIFO_GET_LATENCY_BUFFER_SIZE (0x80170e) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_GET_LATENCY_BUFFER_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_GET_LATENCY_BUFFER_SIZE_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV0080_CTRL_FIFO_GET_LATENCY_BUFFER_SIZE_PARAMS { + NvU32 engineID; + NvU32 gpEntries; + NvU32 pbEntries; +} NV0080_CTRL_FIFO_GET_LATENCY_BUFFER_SIZE_PARAMS; + +#define NV0080_CTRL_FIFO_GET_CHANNELLIST_INVALID_CHANNEL (0xffffffff) + +/* + * NV0080_CTRL_CMD_FIFO_SET_CHANNEL_PROPERTIES + * + * This command allows internal properties of the channel + * to be modified even when the channel is active. Most of these properties + * are not meant to be modified during normal runs hence have been + * kept separate from channel alloc params. It is the + * responsibility of the underlying hal routine to make + * sure the channel properties are changed while the channel + * is *NOT* in a transient state. + * + * hChannel + * The handle to the channel. + * + * property + * The channel property to be modified. + * NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_xxx provides the entire list + * of properties. + * + * value + * The new value for the property. + * When property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_ENGINETIMESLICEINMICROSECONDS + * value = timeslice in microseconds + * desc: Used to change a channel's engine timeslice in microseconds + * + * property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PBDMATIMESLICEINMICROSECONDS + * value = timeslice in microseconds + * desc: Used to change a channel's pbdma timeslice in microseconds + * + * property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_ENGINETIMESLICEDISABLE + * value is ignored + * desc: Disables a channel from being timesliced out from an engine. + * Other scheduling events like explicit yield, acquire failures will + * switch out the channel though. + * + * property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PBDMATIMESLICEDISABLE + * value is ignored + * desc: Disables a channel from being timesliced out from its pbdma. + * + * property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_INVALIDATE_PDB_TARGET + * value is ignored + * desc: Override the channel's page directory pointer table with an + * erroneous aperture value. (TODO: make test calls NV_VERIF_FEATURES + * only)(VERIF ONLY) + * + * property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_RESETENGINECONTEXT + * value = engineID of engine that will have its context pointer reset. + * engineID defines can be found in cl2080.h + * (e.g., NV2080_ENGINE_TYPE_GRAPHICS) + * desc: Override the channel's engine context pointer with a non existent + * buffer forcing it to fault. (VERIF ONLY) + * + * property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_RESETENGINECONTEXT_NOPREEMPT + * value = engineID of engine that will have its context pointer reset. + * engineID defines can be found in cl2080.h + * (e.g., NV2080_ENGINE_TYPE_GRAPHICS) + * desc: Override the channel's engine context pointer with a non existent + * buffer forcing it to fault. However the channel will not be preempted + * before having its channel state modified.(VERIF ONLY) + * + * property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_NOOP + * value is ignored + * desc: does not change any channel state exercises a full channel preempt/ + * unbind/bind op. (VERIF ONLY) + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_CHANNEL + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0080_CTRL_CMD_FIFO_SET_CHANNEL_PROPERTIES (0x80170f) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS_MESSAGE_ID (0xFU) + +typedef struct NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS { + NvHandle hChannel; + NvU32 property; + NV_DECLARE_ALIGNED(NvU64 value, 8); +} NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS; + +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_ENGINETIMESLICEINMICROSECONDS (0x00000000) +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PBDMATIMESLICEINMICROSECONDS (0x00000001) +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_ENGINETIMESLICEDISABLE (0x00000002) +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PBDMATIMESLICEDISABLE (0x00000003) +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_INVALIDATE_PDB_TARGET (0x00000004) +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_RESETENGINECONTEXT (0x00000005) +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_NOOP (0x00000007) +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_RESETENGINECONTEXT_NOPREEMPT (0x00000008) + + + +/* + * NV0080_CTRL_CMD_FIFO_STOP_RUNLIST + * + * Stops all processing on the runlist for the given engine. This is only + * valid in per-engine round-robin scheduling mode. + * + * engineID + * This parameter specifies the engine to be stopped. Engine defines + * can be found in cl2080.h. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV0080_CTRL_CMD_FIFO_STOP_RUNLIST (0x801711) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_STOP_RUNLIST_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_STOP_RUNLIST_PARAMS_MESSAGE_ID (0x11U) + +typedef struct NV0080_CTRL_FIFO_STOP_RUNLIST_PARAMS { + NvU32 engineID; +} NV0080_CTRL_FIFO_STOP_RUNLIST_PARAMS; + +/* + * NV0080_CTRL_CMD_FIFO_START_RUNLIST + * + * Restarts a runlist previously stopped with NV0080_CTRL_CMD_FIFO_STOP_RUNLIST. + * This is only valid for per-engine round-robin mode. + * + * engineID + * This parameter specifies the engine to be started. Engine defines + * can be found in cl2080.h. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV0080_CTRL_CMD_FIFO_START_RUNLIST (0x801712) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_START_RUNLIST_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_START_RUNLIST_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV0080_CTRL_FIFO_START_RUNLIST_PARAMS { + NvU32 engineID; +} NV0080_CTRL_FIFO_START_RUNLIST_PARAMS; + +/** + * NV0080_CTRL_FIFO_GET_CAPS_V2 + * + * This command returns the same set of FIFO engine capabilities for the device + * as @ref NV0080_CTRL_FIFO_GET_CAPS. The difference is in the structure + * NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS, which contains a statically sized array, + * rather than a caps table pointer and a caps table size in + * NV0080_CTRL_FIFO_GET_CAPS_PARAMS. + * + * capsTbl + * This parameter is an array of the client's caps table buffer. + * The framebuffer caps bits will be written by the RM. + * The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_FIFO_GET_CAPS_V2 (0x801713) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS { + NvU8 capsTbl[NV0080_CTRL_FIFO_CAPS_TBL_SIZE]; +} NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS; + +/** + * NV0080_CTRL_CMD_FIFO_IDLE_CHANNELS + * + * @brief This command idles (deschedules and waits for pending work to complete) channels + * belonging to a particular device. + * + * numChannels + * Number of channels to idle + * + * hChannels + * Array of channel handles to idle + * + * flags + * NVOS30_FLAGS that control aspects of how the channel is idled + * + * timeout + * GPU timeout in microseconds, for each CHID Manager's idling operation + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_TIMEOUT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_LOCK_STATE + */ +#define NV0080_CTRL_CMD_FIFO_IDLE_CHANNELS (0x801714) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_IDLE_CHANNELS_PARAMS_MESSAGE_ID" */ +#define NV0080_CTRL_CMD_FIFO_IDLE_CHANNELS_MAX_CHANNELS 4096 + +#define NV0080_CTRL_FIFO_IDLE_CHANNELS_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV0080_CTRL_FIFO_IDLE_CHANNELS_PARAMS { + NvU32 numChannels; + NvHandle hChannels[NV0080_CTRL_CMD_FIFO_IDLE_CHANNELS_MAX_CHANNELS]; + NvU32 flags; + NvU32 timeout; +} NV0080_CTRL_FIFO_IDLE_CHANNELS_PARAMS; + +/* _ctrl0080fifo_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h new file mode 100644 index 0000000..9ab821b --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h @@ -0,0 +1,656 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080gpu.finn +// + +#include "ctrl/ctrl0080/ctrl0080base.h" +#include "nvlimits.h" + + +/* NV01_DEVICE_XX/NV03_DEVICE gpu control commands and parameters */ + +/* + * NV0080_CTRL_CMD_GPU_GET_CLASSLIST + * + * This command returns supported class information for the specified device. + * If the device is comprised of more than one GPU, the class list represents + * the set of supported classes common to all GPUs within the device. + * + * It has two modes: + * + * If the classList pointer is NULL, then this command returns the number + * of classes supported by the device in the numClasses field. The value + * should then be used by the client to allocate a classList buffer + * large enough to hold one 32bit value per numClasses entry. + * + * If the classList pointer is non-NULL, then this command returns the + * set of supported class numbers in the specified buffer. + * + * numClasses + * If classList is NULL, then this parameter will return the + * number of classes supported by the device. If classList is non-NULL, + * then this parameter indicates the number of entries in classList. + * classList + * This parameter specifies a pointer to the client's buffer into + * which the supported class numbers should be returned. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV0080_CTRL_CMD_GPU_GET_CLASSLIST (0x800201) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS { + NvU32 numClasses; + NV_DECLARE_ALIGNED(NvP64 classList, 8); +} NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS; + +/** + * NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES + * + * This command returns the number of subdevices for the device. + * + * numSubDevices + * This parameter returns the number of subdevices within the device. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES (0x800280) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS_MESSAGE_ID (0x80U) + +typedef struct NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS { + NvU32 numSubDevices; +} NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS; + +/* + * NV0080_CTRL_CMD_GPU_GET_VIDLINK_ORDER + * + * This command returns the video link order of each subdevice id inside the + * device. This call can only be made after SLI is enabled. This call is + * intended for 3D clients to use to determine the vidlink order of the + * devices. The Display Output Parent will always be the first subdevice + * mask listed in the array. Note that this command should not be used in + * case of bridgeless SLI. The order of the subdevices returned by this + * command will not be correct in case of bridgeless SLI. + * + * ConnectionCount + * Each HW can provide 1 or 2 links between all GPUs in a device. This + * number tells how many links are available between GPUs. This data + * also represents the number of concurrent SLI heads that can run at + * the same time over this one device. + * + * Order + * This array returns the order of subdevices that are used through + * the vidlink for display output. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0080_CTRL_CMD_GPU_GET_VIDLINK_ORDER (0x800281) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_VIDLINK_ORDER_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_VIDLINK_ORDER_PARAMS_MESSAGE_ID (0x81U) + +typedef struct NV0080_CTRL_GPU_GET_VIDLINK_ORDER_PARAMS { + NvU32 ConnectionCount; + NvU32 Order[NV_MAX_SUBDEVICES]; +} NV0080_CTRL_GPU_GET_VIDLINK_ORDER_PARAMS; + +/* + * NV0080_CTRL_CMD_GPU_SET_DISPLAY_OWNER + * + * This command sets display ownership within the device to the specified + * subdevice instance. The actual transfer of display ownership will take + * place at the next modeset. + * + * subDeviceInstance + * This member specifies the subdevice instance of the new display + * owner. The subdevice instance must be in the legal range + * indicated by the NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0080_CTRL_CMD_GPU_SET_DISPLAY_OWNER (0x800282) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_SET_DISPLAY_OWNER_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_SET_DISPLAY_OWNER_PARAMS_MESSAGE_ID (0x82U) + +typedef struct NV0080_CTRL_GPU_SET_DISPLAY_OWNER_PARAMS { + NvU32 subDeviceInstance; +} NV0080_CTRL_GPU_SET_DISPLAY_OWNER_PARAMS; + +/* + * NV0080_CTRL_CMD_GPU_GET_DISPLAY_OWNER + * + * This command returns the subdevice instance of the current display owner + * within the device. + * + * subDeviceInstance + * This member returns the subdevice instance of the current display + * owner. The subdevice instance will be in the legal range + * indicated by the NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0080_CTRL_CMD_GPU_GET_DISPLAY_OWNER (0x800283) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_DISPLAY_OWNER_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_DISPLAY_OWNER_PARAMS_MESSAGE_ID (0x83U) + +typedef struct NV0080_CTRL_GPU_GET_DISPLAY_OWNER_PARAMS { + NvU32 subDeviceInstance; +} NV0080_CTRL_GPU_GET_DISPLAY_OWNER_PARAMS; + +/* + * NV0080_CTRL_CMD_GPU_SET_VIDLINK + * + * This command enables or disables the VIDLINK of all subdevices in the + * current SLI configuration. + * + * enable + * Enables or disables the vidlink + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV0080_CTRL_CMD_GPU_SET_VIDLINK (0x800285) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_SET_VIDLINK_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_SET_VIDLINK_PARAMS_MESSAGE_ID (0x85U) + +typedef struct NV0080_CTRL_GPU_SET_VIDLINK_PARAMS { + NvU32 enable; +} NV0080_CTRL_GPU_SET_VIDLINK_PARAMS; + +#define NV0080_CTRL_GPU_SET_VIDLINK_ENABLE_FALSE (0x00000000) +#define NV0080_CTRL_GPU_SET_VIDLINK_ENABLE_TRUE (0x00000001) + +/* commands */ +#define NV0080_CTRL_CMD_GPU_VIDEO_POWERGATE_GET_STATUS 0 +#define NV0080_CTRL_CMD_GPU_VIDEO_POWERGATE_POWERDOWN 1 +#define NV0080_CTRL_CMD_GPU_VIDEO_POWERGATE_POWERUP 2 + +/* status */ +#define NV0080_CTRL_CMD_GPU_VIDEO_POWERGATE_STATUS_POWER_ON 0 +#define NV0080_CTRL_CMD_GPU_VIDEO_POWERGATE_STATUS_POWERING_DOWN 1 +#define NV0080_CTRL_CMD_GPU_VIDEO_POWERGATE_STATUS_GATED 2 +#define NV0080_CTRL_CMD_GPU_VIDEO_POWERGATE_STATUS_POWERING_UP 3 + +/* + * NV0080_CTRL_CMD_GPU_MODIFY_SW_STATE_PERSISTENCE + * + * This command is used to enable or disable the persistence of a GPU's + * software state when no clients exist. With persistent software state enabled + * the GPU's software state is not torn down when the last client exits, but is + * retained until either the kernel module unloads or persistent software state + * is disabled. + * + * newState + * This input parameter is used to enable or disable the persistence of the + * software state of all subdevices within the device. + * Possible values are: + * NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_ENABLED + * NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_DISABLED + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0080_CTRL_CMD_GPU_MODIFY_SW_STATE_PERSISTENCE (0x800287) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS_MESSAGE_ID" */ + +/* Possible values of persistentSwState */ +#define NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_ENABLED (0x00000000) +#define NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_DISABLED (0x00000001) + +#define NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS_MESSAGE_ID (0x87U) + +typedef struct NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS { + NvU32 newState; +} NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS; + +/* + * NV0080_CTRL_CMD_GPU_QUERY_SW_STATE_PERSISTENCE + * + * swStatePersistence + * This parameter returns a value indicating if persistent software + * state is currently enabled or not for the specified GPU. See the + * description of NV0080_CTRL_CMD_GPU_MODIFY_SW_STATE_PERSISTENCE. + * Possible values are: + * NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_ENABLED + * NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_DISABLED + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0080_CTRL_CMD_GPU_QUERY_SW_STATE_PERSISTENCE (0x800288) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS_MESSAGE_ID (0x88U) + +typedef struct NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS { + NvU32 swStatePersistence; +} NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS; + +/** + * NV0080_CTRL_CMD_GPU_GET_VIRTUALIZATION_MODE + * + * This command returns a value indicating virtualization mode in + * which the GPU is running. + * + * virtualizationMode + * This parameter returns the virtualization mode of the device. + * Possible values are: + * NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NONE + * This value indicates that there is no virtualization mode associated with the + * device (i.e. it's a baremetal GPU). + * NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NMOS + * This value indicates that the device is associated with the NMOS. + * NV0080_CTRL_GPU_VIRTUALIZATION_MODE_VGX + * This value indicates that the device is associated with VGX(guest GPU). + * NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST + * NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST_VGPU + * This value indicates that the device is associated with vGPU(host GPU). + * NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST_VSGA + * This value indicates that the device is associated with vSGA(host GPU). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_GPU_GET_VIRTUALIZATION_MODE (0x800289) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NONE (0x00000000) +#define NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NMOS (0x00000001) +#define NV0080_CTRL_GPU_VIRTUALIZATION_MODE_VGX (0x00000002) +#define NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST (0x00000003) +#define NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST_VGPU NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST +#define NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST_VSGA (0x00000004) + +#define NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS_MESSAGE_ID (0x89U) + +typedef struct NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS { + NvU32 virtualizationMode; + NvBool isGridBuild; +} NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS; + +/* + * NV0080_CTRL_CMD_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE + * + * This command returns the setting information for sparse texture compute + * mode optimization on the associated GPU. This setting indicates how the + * large page size should be selected by the RM for the GPU. + * + * defaultSetting + * This field specifies what the OS default setting is for the associated + * GPU. See NV0080_CTRL_CMD_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE for a list + * of possible values. + * currentSetting + * This field specifies which optimization mode was applied when the + * driver was loaded. See + * NV0080_CTRL_CMD_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE for a list of + * possible values. + * pendingSetting + * This field specifies which optimization mode will be applied on the + * next driver reload. See + * NV0080_CTRL_CMD_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE for a list of + * possible values. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0080_CTRL_CMD_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE (0x80028c) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS_MESSAGE_ID (0x8CU) + +typedef struct NV0080_CTRL_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS { + NvU32 defaultSetting; + NvU32 currentSetting; + NvU32 pendingSetting; +} NV0080_CTRL_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS; + +/* + * NV0080_CTRL_CMD_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE + * + * This command sets the pending setting for sparse texture compute mode. This + * setting indicates how the large page size should be selected by the RM for + * the GPU on the next driver reload. + * + * setting + * This field specifies which use case the RM should optimize the large + * page size for on the next driver reload. Possible values for this + * field are: + * NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_DEFAULT + * This value indicates that the RM should use the default setting for + * the GPU's large page size. The default setting is reported by + * NV0080_CTRL_CMD_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE. + * NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_OPTIMIZE_COMPUTE + * This value indicates that the RM should select the GPU's large page + * size to optimize for compute use cases. + * NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_OPTIMIZE_SPARSE_TEXTURE + * This value indicates that the RM should select the GPU's large page + * size to optimize for sparse texture use cases. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_PERMISSIONS + */ +#define NV0080_CTRL_CMD_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE (0x80028d) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS_MESSAGE_ID (0x8DU) + +typedef struct NV0080_CTRL_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS { + NvU32 setting; +} NV0080_CTRL_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS; + +/* Possible sparse texture compute mode setting values */ +#define NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_DEFAULT 0 +#define NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_OPTIMIZE_COMPUTE 1 +#define NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_OPTIMIZE_SPARSE_TEXTURE 2 + +/* + * NV0080_CTRL_CMD_GPU_GET_VGX_CAPS + * + * This command gets the VGX capability of the GPU depending on the status of + * the VGX hardware fuse. + * + * isVgx + * This field is set to NV_TRUE is VGX fuse is enabled for the GPU otherwise + * it is set to NV_FALSE. + * + * Possible status values returned are: + * NVOS_STATUS_SUCCESS + * NVOS_STATUS_ERROR_NOT_SUPPORTED + */ +#define NV0080_CTRL_CMD_GPU_GET_VGX_CAPS (0x80028e) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_VGX_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_VGX_CAPS_PARAMS_MESSAGE_ID (0x8EU) + +typedef struct NV0080_CTRL_GPU_GET_VGX_CAPS_PARAMS { + NvBool isVgx; +} NV0080_CTRL_GPU_GET_VGX_CAPS_PARAMS; + + + +/* + * NV0080_CTRL_CMD_GPU_GET_SRIOV_CAPS + * + * This command is used to query GPU SRIOV capabilities + * totalVFs + * Total number of virtual functions supported. + * + * firstVfOffset + * Offset of the first VF. + * + * vfFeatureMask + * Bitmask of features managed by the guest + * + * FirstVFBar0Address + * Address of BAR0 region of first VF. + * + * FirstVFBar1Address + * Address of BAR1 region of first VF. + * + * FirstVFBar2Address + * Address of BAR2 region of first VF. + * + * bar0Size + * Size of BAR0 region on VF. + * + * bar1Size + * Size of BAR1 region on VF. + * + * bar2Size + * Size of BAR2 region on VF. + * + * b64bitBar0 + * If the VF BAR0 is 64-bit addressable. + * + * b64bitBar1 + * If the VF BAR1 is 64-bit addressable. + * + * b64bitBar2 + * If the VF BAR2 is 64-bit addressable. + * + * bSriovEnabled + * Flag for SR-IOV enabled or not. + * + * bSriovHeavyEnabled + * Flag for whether SR-IOV is enabled in standard or heavy mode. + * + * bEmulateVFBar0TlbInvalidationRegister + * Flag for whether VF's TLB Invalidate Register region needs emulation. + * + * bClientRmAllocatedCtxBuffer + * Flag for whether engine ctx buffer is managed by client RM. + * + * bNonPowerOf2ChannelCountSupported + * Flag for whether non power of 2 VF channels are supported. + * + * bVfResizableBAR1Supported + * Flag for whether Resizable VF BAR1 capability is supported. + * +* Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV0080_CTRL_CMD_GPU_GET_SRIOV_CAPS (0x800291) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS_MESSAGE_ID (0x91U) + +typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS { + NvU32 totalVFs; + NvU32 firstVfOffset; + NvU32 vfFeatureMask; + NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8); + NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8); + NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8); + NV_DECLARE_ALIGNED(NvU64 bar0Size, 8); + NV_DECLARE_ALIGNED(NvU64 bar1Size, 8); + NV_DECLARE_ALIGNED(NvU64 bar2Size, 8); + NvBool b64bitBar0; + NvBool b64bitBar1; + NvBool b64bitBar2; + NvBool bSriovEnabled; + NvBool bSriovHeavyEnabled; + NvBool bEmulateVFBar0TlbInvalidationRegister; + NvBool bClientRmAllocatedCtxBuffer; + NvBool bNonPowerOf2ChannelCountSupported; + NvBool bVfResizableBAR1Supported; +} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS; + + +// Update this macro if new HW exceeds GPU Classlist MAX_SIZE +#define NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE 200 + +#define NV0080_CTRL_CMD_GPU_GET_CLASSLIST_V2 (0x800292) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS_MESSAGE_ID (0x92U) + +typedef struct NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS { + NvU32 numClasses; // __OUT__ + NvU32 classList[NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE]; // __OUT__ +} NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS; + +/* + * NV0080_CTRL_CMD_GPU_FIND_SUBDEVICE_HANDLE + * + * Find a subdevice handle allocated under this device + */ +#define NV0080_CTRL_CMD_GPU_FIND_SUBDEVICE_HANDLE (0x800293) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM_MESSAGE_ID (0x93U) + +typedef struct NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM { + NvU32 subDeviceInst; // [in] + NvHandle hSubDevice; // [out] +} NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM; + +/* + * NV0080_CTRL_CMD_GPU_GET_BRAND_CAPS + * + * This command gets branding information for the device. + * + * brands + * Mask containing branding information. A bit in this + * mask is set if the GPU has particular branding. + * + * Possible status values returned are: + * NV_OK + */ + +#define NV0080_CTRL_GPU_GET_BRAND_CAPS_QUADRO NVBIT(0) +#define NV0080_CTRL_GPU_GET_BRAND_CAPS_NVS NVBIT(1) +#define NV0080_CTRL_GPU_GET_BRAND_CAPS_TITAN NVBIT(2) + +#define NV0080_CTRL_CMD_GPU_GET_BRAND_CAPS (0x800294) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS_MESSAGE_ID (0x94U) + +typedef struct NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS { + NvU32 brands; +} NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS; + +/* + * These are the per-VF BAR1 sizes that we support in MB. + */ +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_64M (1 << 6) +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_128M (1 << 7) +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_256M (1 << 8) +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_512M (1 << 9) +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_1G (1 << 10) +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_2G (1 << 11) +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_4G (1 << 12) +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_8G (1 << 13) +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_16G (1 << 14) +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_32G (1 << 15) +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_64G (1 << 16) +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_128G (1 << 17) +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_256G (1 << 18) +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_MIN NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_64M +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_MAX NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_256G + +#define NV0080_CTRL_GPU_VGPU_NUM_VFS_INVALID 0x0 + +/* + * NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE + * + * @brief Resize BAR1 per-VF on the given GPU + * vfBar1SizeMB[in] Requested VF BAR1 size in MB + * numVfs[out] Number of VFs that can be created + * given the requested BAR1 size + */ +#define NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE (0x800296) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS_MESSAGE_ID (0x96U) + +typedef struct NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS { + NvU32 vfBar1SizeMB; + NvU32 numVfs; +} NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS; + +/* + * NV0080_CTRL_CMD_GPU_SET_VGPU_HETEROGENEOUS_MODE + * + * This command set a value indicating vGPU heterogeneous mode. + * vGPU heterogeneous mode on a GPU can only be set when the command + * is running in a vGPU host device. + * + * gpuInstanceId + * GPU Instance ID or Swizz ID + * + * bHeterogeneousMode + * This parameter set the vGPU heterogeneous mode of the device. + * Possible values are: + * NV_TRUE + * This value indicates that the device will be associated with vGPU heterogeneous mode. + * NV_FALSE + * This value indicates that the device will be removed from vGPU heterogeneous mode. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_IN_USE + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0080_CTRL_CMD_GPU_SET_VGPU_HETEROGENEOUS_MODE (0x800297) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_SET_VGPU_HETEROGENEOUS_MODE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_SET_VGPU_HETEROGENEOUS_MODE_PARAMS_MESSAGE_ID (0x97U) + +typedef struct NV0080_CTRL_GPU_SET_VGPU_HETEROGENEOUS_MODE_PARAMS { + NvBool bHeterogeneousMode; + NvU32 gpuInstanceId; +} NV0080_CTRL_GPU_SET_VGPU_HETEROGENEOUS_MODE_PARAMS; + +/** + * NV0080_CTRL_CMD_GPU_GET_VGPU_HETEROGENEOUS_MODE + * + * This command returns a value indicating vGPU heterogeneous mode of + * the GPU. + * + * gpuInstanceId + * GPU Instance ID or Swizz ID + * + * bHeterogeneousMode + * This parameter returns the vGPU heterogeneous mode of the device. + * Possible values are: + * NV_TRUE + * This value indicates that the device is associated with vGPU heterogeneous mode. + * NV_FALSE + * This value indicates that the device is not in vGPU heterogeneous mode. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_GPU_GET_VGPU_HETEROGENEOUS_MODE (0x800298) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_VGPU_HETEROGENEOUS_MODE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_VGPU_HETEROGENEOUS_MODE_PARAMS_MESSAGE_ID (0x98U) + +typedef struct NV0080_CTRL_GPU_GET_VGPU_HETEROGENEOUS_MODE_PARAMS { + NvBool bHeterogeneousMode; + NvU32 gpuInstanceId; +} NV0080_CTRL_GPU_GET_VGPU_HETEROGENEOUS_MODE_PARAMS; + +/* _ctrl0080gpu_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h new file mode 100644 index 0000000..ab431a3 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h @@ -0,0 +1,292 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080gr.finn +// + +#include "ctrl/ctrl0080/ctrl0080base.h" +#include "nvcfg_sdk.h" + +typedef struct NV0080_CTRL_GR_ROUTE_INFO { + NvU32 flags; + NV_DECLARE_ALIGNED(NvU64 route, 8); +} NV0080_CTRL_GR_ROUTE_INFO; + +/* NV01_DEVICE_XX/NV03_DEVICE gr engine control commands and parameters */ + +/** + * NV0080_CTRL_CMD_GR_GET_CAPS + * + * This command returns the set of graphics capabilities for the device + * in the form of an array of unsigned bytes. Graphics capabilities + * include supported features and required workarounds for the graphics + * engine(s) within the device, each represented by a byte offset into the + * table and a bit position within that byte. + * + * capsTblSize + * This parameter specifies the size in bytes of the caps table. + * This value should be set to NV0080_CTRL_GR_CAPS_TBL_SIZE. + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the graphics caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + */ +#define NV0080_CTRL_CMD_GR_GET_CAPS (0x801102) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GR_INTERFACE_ID << 8) | NV0080_CTRL_GR_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GR_GET_CAPS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_GR_GET_CAPS_PARAMS { + NvU32 capsTblSize; + NV_DECLARE_ALIGNED(NvP64 capsTbl, 8); +} NV0080_CTRL_GR_GET_CAPS_PARAMS; + +/* extract cap bit setting from tbl */ +#define NV0080_CTRL_GR_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + + + +/* + * Size in bytes of gr caps table. This value should be one greater + * than the largest byte_index value above. + */ +#define NV0080_CTRL_GR_CAPS_TBL_SIZE 23 + + + +/* + * NV0080_CTRL_CMD_GR_INFO + * + * This structure represents a single 32bit graphics engine value. Clients + * request a particular graphics engine value by specifying a unique bus + * information index. + * + * Legal graphics information index values are: + * NV0080_CTRL_GR_INFO_INDEX_MAXCLIPS + * This index is used to request the number of clip IDs supported by + * the device. + * NV0080_CTRL_GR_INFO_INDEX_MIN_ATTRS_BUG_261894 + * This index is used to request the minimum number of attributes that + * need to be enabled to avoid bug 261894. A return value of 0 + * indicates that there is no minimum and the bug is not present on this + * system. + */ +typedef NVXXXX_CTRL_XXX_INFO NV0080_CTRL_GR_INFO; + +/* valid graphics info index values */ +#define NV0080_CTRL_GR_INFO_INDEX_MAXCLIPS (0x00000000) +#define NV0080_CTRL_GR_INFO_INDEX_MIN_ATTRS_BUG_261894 (0x00000001) +#define NV0080_CTRL_GR_INFO_XBUF_MAX_PSETS_PER_BANK (0x00000002) +#define NV0080_CTRL_GR_INFO_INDEX_BUFFER_ALIGNMENT (0x00000003) +#define NV0080_CTRL_GR_INFO_INDEX_SWIZZLE_ALIGNMENT (0x00000004) +#define NV0080_CTRL_GR_INFO_INDEX_VERTEX_CACHE_SIZE (0x00000005) +#define NV0080_CTRL_GR_INFO_INDEX_VPE_COUNT (0x00000006) +#define NV0080_CTRL_GR_INFO_INDEX_SHADER_PIPE_COUNT (0x00000007) +#define NV0080_CTRL_GR_INFO_INDEX_THREAD_STACK_SCALING_FACTOR (0x00000008) +#define NV0080_CTRL_GR_INFO_INDEX_SHADER_PIPE_SUB_COUNT (0x00000009) +#define NV0080_CTRL_GR_INFO_INDEX_SM_REG_BANK_COUNT (0x0000000A) +#define NV0080_CTRL_GR_INFO_INDEX_SM_REG_BANK_REG_COUNT (0x0000000B) +#define NV0080_CTRL_GR_INFO_INDEX_SM_VERSION (0x0000000C) +#define NV0080_CTRL_GR_INFO_INDEX_MAX_WARPS_PER_SM (0x0000000D) +#define NV0080_CTRL_GR_INFO_INDEX_MAX_THREADS_PER_WARP (0x0000000E) +#define NV0080_CTRL_GR_INFO_INDEX_GEOM_GS_OBUF_ENTRIES (0x0000000F) +#define NV0080_CTRL_GR_INFO_INDEX_GEOM_XBUF_ENTRIES (0x00000010) +#define NV0080_CTRL_GR_INFO_INDEX_FB_MEMORY_REQUEST_GRANULARITY (0x00000011) +#define NV0080_CTRL_GR_INFO_INDEX_HOST_MEMORY_REQUEST_GRANULARITY (0x00000012) +#define NV0080_CTRL_GR_INFO_INDEX_MAX_SP_PER_SM (0x00000013) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCS (0x00000014) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPS (0x00000015) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_ZCULL_BANKS (0x00000016) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_TPC_PER_GPC (0x00000017) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_MIN_FBPS (0x00000018) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_MXBAR_FBP_PORTS (0x00000019) +#define NV0080_CTRL_GR_INFO_INDEX_TIMESLICE_ENABLED (0x0000001A) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPAS (0x0000001B) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_PES_PER_GPC (0x0000001C) +#define NV0080_CTRL_GR_INFO_INDEX_GPU_CORE_COUNT (0x0000001D) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_TPCS_PER_PES (0x0000001E) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_MXBAR_HUB_PORTS (0x0000001F) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_SM_PER_TPC (0x00000020) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_HSHUB_FBP_PORTS (0x00000021) +#define NV0080_CTRL_GR_INFO_INDEX_RT_CORE_COUNT (0x00000022) +#define NV0080_CTRL_GR_INFO_INDEX_TENSOR_CORE_COUNT (0x00000023) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GRS (0x00000024) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTCS (0x00000025) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTC_SLICES (0x00000026) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCMMU_PER_GPC (0x00000027) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTC_PER_FBP (0x00000028) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_ROP_PER_GPC (0x00000029) +#define NV0080_CTRL_GR_INFO_INDEX_FAMILY_MAX_TPC_PER_GPC (0x0000002A) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPA_PER_FBP (0x0000002B) +#define NV0080_CTRL_GR_INFO_INDEX_MAX_SUBCONTEXT_COUNT (0x0000002C) +#define NV0080_CTRL_GR_INFO_INDEX_MAX_LEGACY_SUBCONTEXT_COUNT (0x0000002D) +#define NV0080_CTRL_GR_INFO_INDEX_MAX_PER_ENGINE_SUBCONTEXT_COUNT (0x0000002E) + + +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_SLICES_PER_LTC (0x00000032) + +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GFXC_SMC_ENGINES (0x00000033) + + +#define NV0080_CTRL_GR_INFO_INDEX_DUMMY (0x00000033) +#define NV0080_CTRL_GR_INFO_INDEX_GFX_CAPABILITIES (0x00000034) +#define NV0080_CTRL_GR_INFO_INDEX_MAX_MIG_ENGINES (0x00000035) +#define NV0080_CTRL_GR_INFO_INDEX_MAX_PARTITIONABLE_GPCS (0x00000036) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_MIN_SUBCTX_PER_SMC_ENG (0x00000037) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCS_PER_DIELET (0x00000038) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_MAX_NUM_SMC_ENGINES_PER_DIELET (0x00000039) + +/* When adding a new INDEX, please update MAX_SIZE accordingly + * NOTE: 0080 functionality is merged with 2080 functionality, so this max size + * reflects that. + */ +#define NV0080_CTRL_GR_INFO_INDEX_MAX (0x00000039) +#define NV0080_CTRL_GR_INFO_MAX_SIZE (0x3a) /* finn: Evaluated from "(NV0080_CTRL_GR_INFO_INDEX_MAX + 1)" */ + +/* + * NV0080_CTRL_CMD_GR_GET_INFO + * + * This command returns graphics engine information for the associate GPU. + * Request to retrieve graphics information use a list of one or more + * NV0080_CTRL_GR_INFO structures. + * + * grInfoListSize + * This field specifies the number of entries on the caller's + * grInfoList. + * grInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the bus information is to be returned. + * This buffer must be at least as big as grInfoListSize multiplied + * by the size of the NV0080_CTRL_GR_INFO structure. + */ +#define NV0080_CTRL_CMD_GR_GET_INFO (0x801104) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GR_INTERFACE_ID << 8) | NV0080_CTRL_GR_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GR_GET_INFO_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0080_CTRL_GR_GET_INFO_PARAMS { + NvU32 grInfoListSize; + NV_DECLARE_ALIGNED(NvP64 grInfoList, 8); +} NV0080_CTRL_GR_GET_INFO_PARAMS; + +/* + * NV0080_CTRL_CMD_GR_GET_TPC_PARTITION_MODE + * This command gets the current partition mode of a TSG context. + * + * NV0080_CTRL_CMD_GR_SET_TPC_PARTITION_MODE + * This command sets the partition mode of a TSG context. + * + * NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS + * This structure defines the parameters used for TPC partitioning mode SET/GET commands + * + * hChannelGroup [IN] + * RM Handle to the TSG + * + * mode [IN/OUT] + * Partitioning mode enum value + * For the SET cmd, this is an input parameter + * For the GET cmd, this is an output parameter + * + * bEnableAllTpcs [IN] + * Flag to enable all TPCs by default + * + * grRouteInfo[IN] + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * + */ +#define NV0080_CTRL_CMD_GR_GET_TPC_PARTITION_MODE (0x801107) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GR_INTERFACE_ID << 8) | NV0080_CTRL_GR_GET_TPC_PARTITION_MODE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_CMD_GR_SET_TPC_PARTITION_MODE (0x801108) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GR_INTERFACE_ID << 8) | NV0080_CTRL_GR_SET_TPC_PARTITION_MODE_PARAMS_MESSAGE_ID" */ + +/* Enum for listing TPC partitioning modes */ +typedef enum NV0080_CTRL_GR_TPC_PARTITION_MODE { + NV0080_CTRL_GR_TPC_PARTITION_MODE_NONE = 0, + NV0080_CTRL_GR_TPC_PARTITION_MODE_STATIC = 1, + NV0080_CTRL_GR_TPC_PARTITION_MODE_DYNAMIC = 2, +} NV0080_CTRL_GR_TPC_PARTITION_MODE; + +typedef struct NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS { + NvHandle hChannelGroup; // [in] + NV0080_CTRL_GR_TPC_PARTITION_MODE mode; // [in/out] + NvBool bEnableAllTpcs; // [in/out] + NV_DECLARE_ALIGNED(NV0080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); // [in] +} NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS; + +#define NV0080_CTRL_GR_GET_TPC_PARTITION_MODE_PARAMS_MESSAGE_ID (0x7U) + +typedef NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS NV0080_CTRL_GR_GET_TPC_PARTITION_MODE_PARAMS; + +#define NV0080_CTRL_GR_SET_TPC_PARTITION_MODE_PARAMS_MESSAGE_ID (0x8U) + +typedef NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS NV0080_CTRL_GR_SET_TPC_PARTITION_MODE_PARAMS; + +/** + * NV0080_CTRL_CMD_GR_GET_CAPS_V2 + * + * This command returns the same set of graphics capabilities for the device + * as @ref NV0080_CTRL_CMD_GR_GET_CAPS. The difference is in the structure + * NV0080_CTRL_GR_GET_INFO_V2_PARAMS, which contains a statically sized array, + * rather than a caps table pointer and a caps table size in + * NV0080_CTRL_GR_GET_INFO_PARAMS. Additionally, + * NV0080_CTRL_GR_GET_INFO_V2_PARAMS contains a parameter for specifying routing + * information, used for MIG. + * + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the graphics caps bits will be written by the RM. + * The caps table is an array of unsigned bytes. + * + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * + * bCapsPopulated + * This parameter indicates that the capsTbl has been partially populated by + * previous calls to NV0080_CTRL_CMD_GR_GET_CAPS_V2 on other subdevices. + */ +#define NV0080_CTRL_CMD_GR_GET_CAPS_V2 (0x801109) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GR_INTERFACE_ID << 8) | NV0080_CTRL_GR_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GR_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV0080_CTRL_GR_GET_CAPS_V2_PARAMS { + NvU8 capsTbl[NV0080_CTRL_GR_CAPS_TBL_SIZE]; + NV_DECLARE_ALIGNED(NV0080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvBool bCapsPopulated; +} NV0080_CTRL_GR_GET_CAPS_V2_PARAMS; + +#define NV0080_CTRL_CMD_GR_GET_INFO_V2 (0x801110) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GR_INTERFACE_ID << 8) | NV0080_CTRL_GR_GET_INFO_V2_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GR_GET_INFO_V2_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV0080_CTRL_GR_GET_INFO_V2_PARAMS { + NvU32 grInfoListSize; + NV0080_CTRL_GR_INFO grInfoList[NV0080_CTRL_GR_INFO_MAX_SIZE]; + NV_DECLARE_ALIGNED(NV0080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV0080_CTRL_GR_GET_INFO_V2_PARAMS; + +/* _ctrl0080gr_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h new file mode 100644 index 0000000..312d6d7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h @@ -0,0 +1,87 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080host.finn +// + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE host control commands and parameters */ + +/* + * NV0080_CTRL_CMD_HOST_GET_CAPS + * + * This command returns the set of host capabilities for the device + * in the form of an array of unsigned bytes. Host capabilities + * include supported features and required workarounds for the host-related + * engine(s) within the device, each represented by a byte offset into + * the table and a bit position within that byte. + * + * capsTblSize + * This parameter specifies the size in bytes of the caps table. + * This value should be set to NV0080_CTRL_HOST_CAPS_TBL_SIZE. + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the host caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_POINTER + */ +#define NV0080_CTRL_CMD_HOST_GET_CAPS (0x801401) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_HOST_INTERFACE_ID << 8) | NV0080_CTRL_HOST_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_HOST_GET_CAPS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_HOST_GET_CAPS_PARAMS { + NvU32 capsTblSize; + NV_DECLARE_ALIGNED(NvP64 capsTbl, 8); +} NV0080_CTRL_HOST_GET_CAPS_PARAMS; + +/* extract cap bit setting from tbl */ +#define NV0080_CTRL_HOST_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* caps format is byte_index:bit_mask */ +#define NV0080_CTRL_HOST_CAPS_CPU_WRITE_WAR_BUG_420495 2:0x20 +#define NV0080_CTRL_HOST_CAPS_EXPLICIT_CACHE_FLUSH_REQD 2:0x40 + +/* size in bytes of host caps table */ +#define NV0080_CTRL_HOST_CAPS_TBL_SIZE 3 + +#define NV0080_CTRL_CMD_HOST_GET_CAPS_V2 (0x801402) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_HOST_INTERFACE_ID << 8) | NV0080_CTRL_HOST_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_HOST_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_HOST_GET_CAPS_V2_PARAMS { + NvU8 capsTbl[NV0080_CTRL_HOST_CAPS_TBL_SIZE]; +} NV0080_CTRL_HOST_GET_CAPS_V2_PARAMS; + +/* _ctrl0080host_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080internal.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080internal.h new file mode 100644 index 0000000..19bd919 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080internal.h @@ -0,0 +1,151 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080internal.finn +// + +#include "ctrl0080gr.h" +#include "ctrl0080fifo.h" +#include "ctrl/ctrl0080/ctrl0080base.h" +#include "ctrl/ctrl0080/ctrl0080perf.h" + + + + +/*! + * @ref NV0080_CTRL_CMD_GR_GET_TPC_PARTITION_MODE + */ +#define NV0080_CTRL_CMD_INTERNAL_GR_GET_TPC_PARTITION_MODE (0x802002) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV0080_CTRL_CMD_INTERNAL_GR_GET_TPC_PARTITION_MODE_FINN_PARAMS_MESSAGE_ID" */ + + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NV0080_CTRL_CMD_INTERNAL_GR_GET_TPC_PARTITION_MODE_FINN_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_CMD_INTERNAL_GR_GET_TPC_PARTITION_MODE_FINN_PARAMS { + NV_DECLARE_ALIGNED(NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS params, 8); +} NV0080_CTRL_CMD_INTERNAL_GR_GET_TPC_PARTITION_MODE_FINN_PARAMS; + + +/*! + * @ref NV0080_CTRL_CMD_GR_SET_TPC_PARTITION_MODE + */ +#define NV0080_CTRL_CMD_INTERNAL_GR_SET_TPC_PARTITION_MODE (0x802003) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV0080_CTRL_CMD_INTERNAL_GR_SET_TPC_PARTITION_MODE_FINN_PARAMS_MESSAGE_ID" */ + + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NV0080_CTRL_CMD_INTERNAL_GR_SET_TPC_PARTITION_MODE_FINN_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0080_CTRL_CMD_INTERNAL_GR_SET_TPC_PARTITION_MODE_FINN_PARAMS { + NV_DECLARE_ALIGNED(NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS params, 8); +} NV0080_CTRL_CMD_INTERNAL_GR_SET_TPC_PARTITION_MODE_FINN_PARAMS; + + +/*! + * @ref NV0080_CTRL_CMD_PERF_CUDA_LIMIT_SET_CONTROL + */ +#define NV0080_CTRL_CMD_INTERNAL_PERF_CUDA_LIMIT_SET_CONTROL (0x802009) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV0080_CTRL_PERF_CUDA_LIMIT_CONTROL_PARAMS_MESSAGE_ID" */ + + +/*! + * This command disables cuda limit activation at teardown of the client. + */ +#define NV0080_CTRL_CMD_INTERNAL_PERF_CUDA_LIMIT_DISABLE (0x802004) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x4" */ + +/*! + * @ref NV0080_CTRL_CMD_PERF_SLI_GPU_BOOST_SYNC_SET_CONTROL + */ +#define NV0080_CTRL_CMD_INTERNAL_PERF_SLI_GPU_BOOST_SYNC_SET_CONTROL (0x802007) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV0080_CTRL_PERF_SLI_GPU_BOOST_SYNC_CONTROL_PARAMS_MESSAGE_ID" */ + + + +/* + * NV0080_CTRL_CMD_INTERNAL_FIFO_RC_AND_PERMANENTLY_DISABLE_CHANNELS + * + * This command will RC and disable channels permanently for the given clients. + * + * numClients + * Number of clients + * clientHandles + * List of client handles + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + */ + +#define NV0080_CTRL_CMD_INTERNAL_FIFO_RC_AND_PERMANENTLY_DISABLE_CHANNELS (0x802008) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV0080_CTRL_INTERNAL_FIFO_RC_AND_PERMANENTLY_DISABLE_CHANNELS_PARAMS_MESSAGE_ID" */ + +#define NV_FIFO_PERMANENTLY_DISABLE_CHANNELS_MAX_CLIENTS 200U + +#define NV0080_CTRL_INTERNAL_FIFO_RC_AND_PERMANENTLY_DISABLE_CHANNELS_PARAMS_MESSAGE_ID (0x08U) + +typedef struct NV0080_CTRL_INTERNAL_FIFO_RC_AND_PERMANENTLY_DISABLE_CHANNELS_PARAMS { + NvU32 numClients; + NvHandle clientHandles[NV_FIFO_PERMANENTLY_DISABLE_CHANNELS_MAX_CLIENTS]; +} NV0080_CTRL_INTERNAL_FIFO_RC_AND_PERMANENTLY_DISABLE_CHANNELS_PARAMS; + +/*! + * NV0080_CTRL_CMD_INTERNAL_MEMSYS_SET_ZBC_REFERENCED + * + * Tell Physical RM whether any ZBC-kind surfaces are allocated. + * If PF and all VFs report false, ZBC table can be flushed by Physical RM. + * + * subdevInstance [IN] + * Subdevice instance of the GPU to be checked with + * bZbcReferenced [IN] + * NV_TRUE -> ZBC-kind (and no _SKIP_ZBCREFCOUNT flag) are allocated in Kernel RM + * + */ +#define NV0080_CTRL_CMD_INTERNAL_MEMSYS_SET_ZBC_REFERENCED (0x80200a) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV0080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS_MESSAGE_ID (0x0AU) + +typedef struct NV0080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS { + NvU32 subdevInstance; + NvBool bZbcSurfacesExist; +} NV0080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS; + + + +#define NV0080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS_MESSAGE_ID (0x45U) + +typedef struct NV0080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS { + NvBool bTeardown; +} NV0080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS; + +#define NV0080_CTRL_CMD_INTERNAL_KGR_INIT_BUG4208224_WAR (0x802046) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV0080_CTRL_INTERNAL_KGR_INIT_BUG4208224_WAR_PARAMS_MESSAGE_ID" */ +#define NV0080_CTRL_INTERNAL_KGR_INIT_BUG4208224_WAR_PARAMS_MESSAGE_ID (0x46U) + +typedef NV0080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS NV0080_CTRL_INTERNAL_KGR_INIT_BUG4208224_WAR_PARAMS; + +/* ctrl0080internal_h */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h new file mode 100644 index 0000000..84a7983 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h @@ -0,0 +1,90 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080msenc.finn +// + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE MSENC control commands and parameters */ + +/* + * NV0080_CTRL_CMD_MSENC_GET_CAPS + * + * This command returns the set of MSENC capabilities for the device + * in the form of an array of unsigned bytes. MSENC capabilities + * include supported features and required workarounds for the MSENC-related + * engine(s) within the device, each represented by a byte offset into + * the table and a bit position within that byte. + * + * capsTblSize + * This parameter specifies the size in bytes of the caps table. + * This value should be set to NV0080_CTRL_MSENC_CAPS_TBL_SIZE. + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the MSENC caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_MSENC_GET_CAPS (0x801b01) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_MSENC_INTERFACE_ID << 8) | NV0080_CTRL_MSENC_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_MSENC_GET_CAPS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_MSENC_GET_CAPS_PARAMS { + NvU32 capsTblSize; + NV_DECLARE_ALIGNED(NvP64 capsTbl, 8); +} NV0080_CTRL_MSENC_GET_CAPS_PARAMS; + + + +/* size in bytes of MSENC caps table */ +#define NV0080_CTRL_MSENC_CAPS_TBL_SIZE 4 + +/* + * NV0080_CTRL_CMD_MSENC_GET_CAPS_V2 + * + * This command is a version of NV0080_CTRL_CMD_MSENC_GET_CAPS with caps passed inline in capsTbl. + * + * For consistency with other video caps controls, it adds `instanceId` parameter. Currently it is + * ignored. + */ +#define NV0080_CTRL_CMD_MSENC_GET_CAPS_V2 (0x801b02) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_MSENC_INTERFACE_ID << 8) | NV0080_CTRL_MSENC_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_MSENC_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_MSENC_GET_CAPS_V2_PARAMS { + NvU8 capsTbl[NV0080_CTRL_MSENC_CAPS_TBL_SIZE]; + NvU32 instanceId; // ignored +} NV0080_CTRL_MSENC_GET_CAPS_V2_PARAMS; + +/* _ctrl0080msenc_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h new file mode 100644 index 0000000..dd3c662 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h @@ -0,0 +1,75 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080nvjpg.finn +// + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE NVJPG control commands and parameters */ + + + +/* + * Size in bytes of NVJPG caps table. This value should be one greater + * than the largest byte_index value above. + */ +#define NV0080_CTRL_NVJPG_CAPS_TBL_SIZE 9 + +/* + * NV0080_CTRL_CMD_NVJPG_GET_CAPS_V2 + * + * This command returns the set of NVJPG capabilities for the device + * in the form of an array of unsigned bytes. NVJPG capabilities + * include supported features of the NVJPG engine(s) within the device, + * each represented by a byte offset into the table and a bit position within + * that byte. + * + * [out] capsTbl + * This caps table array is where the NVJPG caps bits will be transferred + * by the RM. The caps table is an array of unsigned bytes. + * instanceId + * This parameter specifies the instance Id of NVDEC for which + * cap bits are requested. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_NVJPG_GET_CAPS_V2 (0x801f02) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_NVJPG_INTERFACE_ID << 8) | NV0080_CTRL_NVJPG_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_NVJPG_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_NVJPG_GET_CAPS_V2_PARAMS { + NvU8 capsTbl[NV0080_CTRL_NVJPG_CAPS_TBL_SIZE]; + NvU32 instanceId; +} NV0080_CTRL_NVJPG_GET_CAPS_V2_PARAMS; + +/* _ctrl0080NVJPG_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h new file mode 100644 index 0000000..1768aa7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h @@ -0,0 +1,63 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080perf.finn +// + +#define NV0080_CTRL_PERF_SLI_GPU_BOOST_SYNC_CONTROL_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV0080_CTRL_PERF_SLI_GPU_BOOST_SYNC_CONTROL_PARAMS { + NvBool bActivate; +} NV0080_CTRL_PERF_SLI_GPU_BOOST_SYNC_CONTROL_PARAMS; + +#define NV0080_CTRL_PERF_CUDA_LIMIT_CONTROL_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV0080_CTRL_PERF_CUDA_LIMIT_CONTROL_PARAMS { + NvBool bCudaLimit; +} NV0080_CTRL_PERF_CUDA_LIMIT_CONTROL_PARAMS; + + +/* + * This command sets the control information pertaining to Cuda limit. + * + * bCudaLimit + * When set to TRUE, clocks will be limited based on Cuda. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_REQUEST + * NV_ERR_INVALID_STATE + */ +#define NV0080_CTRL_CMD_PERF_CUDA_LIMIT_SET_CONTROL (0x801909) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_PERF_INTERFACE_ID << 8) | NV0080_CTRL_PERF_CUDA_LIMIT_CONTROL_PARAMS_MESSAGE_ID" */ + + + +/* _ctrl0080perf_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h new file mode 100644 index 0000000..cceb42f --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h @@ -0,0 +1,95 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080unix.finn +// + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE UNIX-specific control commands and parameters */ + +/* + * NV0080_CTRL_CMD_OS_UNIX_VT_SWITCH + * + * This command notifies RM to save or restore the current console state. It is + * intended to be called just before the display driver starts using the display + * engine, and after it has finished using it. + * + * cmd + * Indicates which operation should be performed. + * + * SAVE_VT_STATE + * Records the current state of the console, to be restored later. + * RESTORE_VT_STATE + * Restores the previously-saved console state. + * + * fbInfo + * Returns information about the system's framebuffer console, if one + * exists. If no console is present, all fields will be zero. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_OS_UNIX_VT_SWITCH (0x801e01) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS { + NvU32 cmd; /* in */ +} NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS; + +/* Called when the display driver needs RM to save the console data, + * which will be used in RM based console restore */ +#define NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_SAVE_VT_STATE (0x00000001) + +/* Called when the display driver needs RM to restore the console */ +#define NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_RESTORE_VT_STATE (0x00000002) + +/* Called when the display driver has restored the console -- RM doesn't + * need to do anything further, but needs to be informed to avoid turning the + * GPU off and thus destroying the console state. */ +#define NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_CONSOLE_RESTORED (0x00000003) + +#define NV0080_CTRL_CMD_OS_UNIX_VT_GET_FB_INFO (0x801e02) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS { + NvU32 subDeviceInstance; /* out */ + + NvU16 width; /* out */ + NvU16 height; /* out */ + NvU16 depth; /* out */ + NvU16 pitch; /* out */ + NV_DECLARE_ALIGNED(NvU64 baseAddress, 8); /* out */ + NV_DECLARE_ALIGNED(NvU64 size, 8); /* out */ +} NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS; + +/* _ctrl0080unix_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl00da.h b/src/common/sdk/nvidia/inc/ctrl/ctrl00da.h new file mode 100644 index 0000000..d404f06 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl00da.h @@ -0,0 +1,258 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl00da.finn +// + + + +/* NV_SEMAPHORE_SURFACE control commands and parameters */ + +#define NV_SEMAPHORE_SURFACE_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x00DA, NV00DA_CTRL_##cat, idx) + +/* +* NV_SEMAPHORE_SURFACE_CTRL_CMD_REF_MEMORY +* Duplicate the memory object(s) bound to the semaphore surface into the RM +* client of the caller. +* +* The handle values are generated and returned by resource manager if the +* client specified a handle value of zero. +* +* If the semaphore surface has a valid max submitted value memory object, but +* the GPU + system do not require separate max submitted value and semaphore +* value surfaces, the handles provided by the client must be equal, and the +* handles returned by resource manager will also be equal. In such cases, the +* client must recognize that both handles correspond to a single reference to +* a single object, and hence the handle must be freed only once by the client. +* +* If the GPU does not require a max submitted value memory object, the handle +* value must be set to zero and the returned handle will always be zero as +* well. +* +* RETURNS: +* NVOS_STATUS_SUCCESS if the memory object(s) were successfully duplicated +* into the calling client. +* NVOS_STATUS_ERROR_INVALID_PARAMETER if any of the rules regarding the +* max submitted value handle value were violated. +* An error code forwarded from NvRmDupObject for any other failures. +*/ +#define NV_SEMAPHORE_SURFACE_CTRL_CMD_REF_MEMORY (0xda0001) /* finn: Evaluated from "(FINN_NV_SEMAPHORE_SURFACE_INTERFACE_ID << 8) | NV_SEMAPHORE_SURFACE_CTRL_REF_MEMORY_PARAMS_MESSAGE_ID" */ + +#define NV_SEMAPHORE_SURFACE_CTRL_REF_MEMORY_PARAMS_MESSAGE_ID (0x01U) + +typedef struct NV_SEMAPHORE_SURFACE_CTRL_REF_MEMORY_PARAMS { + NvHandle hSemaphoreMem; + NvHandle hMaxSubmittedMem; +} NV_SEMAPHORE_SURFACE_CTRL_REF_MEMORY_PARAMS; + +/* +* Currently no known usages that require more than two indices per channel: +* +* 1) The channel's associated backend engine's TRAP interrupt. +* 2) The frontend/GPFIFO's non-stall interrupt. +* +* The remaining slots are for futureproofing purposes only. +*/ +#define NV_SEMAPHORE_SURFACE_CTRL_CMD_BIND_CHANNEL_MAX_INDICES 8 + +/* +* NV_SEMAPHORE_SURFACE_CTRL_CMD_BIND_CHANNEL +* Associates a channel with the semaphore surface. All channels which will +* wait on or signal semaphores in a semaphore surface should first register +* with it to ensure proper event delivery and error handling. +* +* numNotifyIndices is the number of valid entries in notifyIndices. +* +* notifyIndices is an array of notifier indices corresponding to the engines +* the caller may use to signal a semaphore in the semaphore surface. See +* cl2080_notifiers.h for a list of notifier indices. For example, this would +* indicate a channel using the GR0(graphics/compute) and FIFO TRAP method +* (GPFIFO) notifiers to signal semaphores. +* +* params.hChannel = myChannelHandle; +* params.numNotifyIndices = 2; +* params.notifyIndex[0] = NV2080_NOTIFIERS_GR0; +* params.notifyIndex[1] = NV2080_NOTIFIERS_FIFO_EVENT_MTHD; +* +* If the specified channel will only be used to wait for semaphores, set +* numNotifyIndices to 0. +* +* RETURNS: +* NVOS_STATUS_SUCCESS if the channel and notification indices were +* successfully bound. +* NVOS_STATUS_ERROR_INVALID_OBJECT_HANDLE if hChannel does not refer an object +* in the client. +* NVOS_STATUS_ERROR_INVALID_OBJECT_ERROR if hChannel does not refer to a valid +* channel object. +* NVOS_STATUS_ERROR_INVALID_PARAMETER if numNotifyIndices is greater than +* NV_SEMAPHORE_SURFACE_CTRL_CMD_BIND_CHANNEL_MAX_INDICES. +* NVOS_STATUS_ERROR_NOT_SUPPORTED if the notifyIndex is not a valid +* notification index. +* NVOS_STATUS_ERROR_INVALID_STATE if an internal inconsistency is found in the +* binding tracking logic. +* NV_ERR_NO_MEMORY if memory could not be allocated for internal tracking +* structures. +*/ +#define NV_SEMAPHORE_SURFACE_CTRL_CMD_BIND_CHANNEL (0xda0002) /* finn: Evaluated from "(FINN_NV_SEMAPHORE_SURFACE_INTERFACE_ID << 8) | NV_SEMAPHORE_SURFACE_CTRL_BIND_CHANNEL_PARAMS_MESSAGE_ID" */ + +#define NV_SEMAPHORE_SURFACE_CTRL_BIND_CHANNEL_PARAMS_MESSAGE_ID (0x02U) + +typedef struct NV_SEMAPHORE_SURFACE_CTRL_BIND_CHANNEL_PARAMS { + NvHandle hChannel; + NvU32 numNotifyIndices; + NvU32 notifyIndices[NV_SEMAPHORE_SURFACE_CTRL_CMD_BIND_CHANNEL_MAX_INDICES]; +} NV_SEMAPHORE_SURFACE_CTRL_BIND_CHANNEL_PARAMS; + +/* +* NV_SEMAPHORE_SURFACE_CTRL_CMD_UNBIND_CHANNEL +* Dissociate a channel and a semaphore surface. Before freeing a channel +* object, it should be dissociated from all semaphore surfaces to which it has +* been bound. +* +* hChannel is a valid channel object handle which has previously been bound +* to the semaphore surface. +* +* numNotifyIndices is the number of valid entries in the notifyIndices array. +* the hChannel handle. +* +* notifyIndices is the array of notifier indices that was bound to the +* semaphore surface with the hChannel handle. + +* RETURNS: +* NVOS_STATUS_SUCCESS if the channel and notification indices were +* successfully unbound. +* NVOS_STATUS_ERROR_INVALID_OBJECT_HANDLE if hChannel does not refer an object +* in the client. +* NVOS_STATUS_ERROR_INVALID_OBJECT_ERROR if hChannel does not refer to a valid +* channel object. +* NVOS_STATUS_ERROR_INVALID_PARAMETER if numNotifyIndices is greater than +* NV_SEMAPHORE_SURFACE_CTRL_CMD_BIND_CHANNEL_MAX_INDICES. +* NVOS_STATUS_ERROR_INVALID_STATE if no binding associated with the specified +* channel and notification indices is found. +*/ +#define NV_SEMAPHORE_SURFACE_CTRL_CMD_UNBIND_CHANNEL (0xda0006) /* finn: Evaluated from "(FINN_NV_SEMAPHORE_SURFACE_INTERFACE_ID << 8) | NV_SEMAPHORE_SURFACE_CTRL_UNBIND_CHANNEL_PARAMS_MESSAGE_ID" */ + +#define NV_SEMAPHORE_SURFACE_CTRL_UNBIND_CHANNEL_PARAMS_MESSAGE_ID (0x06U) + +typedef struct NV_SEMAPHORE_SURFACE_CTRL_UNBIND_CHANNEL_PARAMS { + NvHandle hChannel; + NvU32 numNotifyIndices; + NvU32 notifyIndices[NV_SEMAPHORE_SURFACE_CTRL_CMD_BIND_CHANNEL_MAX_INDICES]; +} NV_SEMAPHORE_SURFACE_CTRL_UNBIND_CHANNEL_PARAMS; + +/* +* NV_SEMAPHORE_SURFACE_CTRL_CMD_REGISTER_WAITER +* Ask RM to signal the specified OS event and/or set the semaphore to a new +* value when the value at the specified index is >= a desired value. +* +* index - Specifies the semaphore slot within the surface to which the the wait +* applies. +* waitValue - The value to wait for. +* newValue - Specifies a value to set the semaphore to automatically when the +* specified semaphore slot reaches waitValue. "0" means the semaphore +* value is not altered by this waiter. +* notificationHandle - The OS event (kernel callback or userspace event +* handle) to notify when the value is reached, or 0 if no notification +* is required. +* +* The waiter must specify at least one action. +* +* RETURNS: +* NVOS_STATUS_SUCCESS if the waitValue has not been reached and a waiter was +* successfully registered +* NVOS_STATUS_SUCCESS if the waitValue has been reached, newValue was applied, +* and notificationHandle was 0 (No notification was requested). +* NVOS_STATUS_ERROR_ALREADY_SIGNALLED if the waitValue has been reached and +* newValue was applied if it was not 0, but no notification was registered +* or generated on notificationHandle. Other notifications generated as a +* side effect of newValue being applied, if any, were generated. +* NVOS_STATUS_ERROR_STATE_IN_USE if newValue is not 0 and the specified index +* already has an auto-update value registered. No waiter is registered. +* NVOS_STATUS_ERROR_STATE_IN_USE if the specified notification handle is +* already registered as a waiter for the specified wait_value at the +* specified index. +* already has an auto-update value registered. No waiter is registered. +* NVOS_STATUS_ERROR_* miscelaneous internal errors. No waiter is registered. +*/ +#define NV_SEMAPHORE_SURFACE_CTRL_CMD_REGISTER_WAITER (0xda0003) /* finn: Evaluated from "(FINN_NV_SEMAPHORE_SURFACE_INTERFACE_ID << 8) | NV_SEMAPHORE_SURFACE_CTRL_REGISTER_WAITER_PARAMS_MESSAGE_ID" */ + +#define NV_SEMAPHORE_SURFACE_CTRL_REGISTER_WAITER_PARAMS_MESSAGE_ID (0x03U) + +typedef struct NV_SEMAPHORE_SURFACE_CTRL_REGISTER_WAITER_PARAMS { + NV_DECLARE_ALIGNED(NvU64 index, 8); + NV_DECLARE_ALIGNED(NvU64 waitValue, 8); + NV_DECLARE_ALIGNED(NvU64 newValue, 8); + NV_DECLARE_ALIGNED(NvU64 notificationHandle, 8); +} NV_SEMAPHORE_SURFACE_CTRL_REGISTER_WAITER_PARAMS; + +/* +* NV_SEMAPHORE_SURFACE_CTRL_CMD_SET_VALUE +* Modify a semaphore surface semaphore value, awakening any CPU waiters in +* the process. newValue must be >= the current value at the specified index. +*/ +#define NV_SEMAPHORE_SURFACE_CTRL_CMD_SET_VALUE (0xda0004) /* finn: Evaluated from "(FINN_NV_SEMAPHORE_SURFACE_INTERFACE_ID << 8) | NV_SEMAPHORE_SURFACE_CTRL_SET_VALUE_PARAMS_MESSAGE_ID" */ + +#define NV_SEMAPHORE_SURFACE_CTRL_SET_VALUE_PARAMS_MESSAGE_ID (0x04U) + +typedef struct NV_SEMAPHORE_SURFACE_CTRL_SET_VALUE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 index, 8); + NV_DECLARE_ALIGNED(NvU64 newValue, 8); +} NV_SEMAPHORE_SURFACE_CTRL_SET_VALUE_PARAMS; + +/* +* NV_SEMAPHORE_SURFACE_CTRL_CMD_UNREGISTER_WAITER +* Remove a previously registered notification handle from an index + value +* tuple's list of waiters. +* +* index - Specifies the semaphore slot within the surface on which the waiter +* was previously registered. +* waitValue - The value the wait was registered for. +* notificationHandle - The OS event (kernel callback or userspace event +* handle) registered as a waiter. +* +* RETURNS: +* NVOS_STATUS_SUCCESS the waiter was successfully removed from the list of +* pending waiters. +* NVOS_STATUS_ERROR_* miscelaneous internal errors, or the waiter was not +* found in the list of pending waiters. The waiter may have already been +* called, or may be in a list of imminent notifications the RM is +* processing. +*/ +#define NV_SEMAPHORE_SURFACE_CTRL_CMD_UNREGISTER_WAITER (0xda0005) /* finn: Evaluated from "(FINN_NV_SEMAPHORE_SURFACE_INTERFACE_ID << 8) | NV_SEMAPHORE_SURFACE_CTRL_UNREGISTER_WAITER_PARAMS_MESSAGE_ID" */ + +#define NV_SEMAPHORE_SURFACE_CTRL_UNREGISTER_WAITER_PARAMS_MESSAGE_ID (0x05U) + +typedef struct NV_SEMAPHORE_SURFACE_CTRL_UNREGISTER_WAITER_PARAMS { + NV_DECLARE_ALIGNED(NvU64 index, 8); + NV_DECLARE_ALIGNED(NvU64 waitValue, 8); + NV_DECLARE_ALIGNED(NvU64 notificationHandle, 8); +} NV_SEMAPHORE_SURFACE_CTRL_UNREGISTER_WAITER_PARAMS; + +/* _ctrl00da_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl00de.h b/src/common/sdk/nvidia/inc/ctrl/ctrl00de.h new file mode 100644 index 0000000..0e2d569 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl00de.h @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl00de.finn +// + +#include "ctrl/ctrlxxxx.h" + +#define NV00DE_CTRL_CMD(cat, idx) NVXXXX_CTRL_CMD(0x00de, NV00DE_CTRL_##cat, idx) + +/* RM_USER_SHARED_DATA control commands and parameters */ + +/* + * NV00DE_CTRL_CMD_REQUEST_DATA_POLL + * + * @brief Request some polled data elements to be updated + * Equivalent to requesting polling using NV00DE_ALLOC_PARAMETERS->polledDataMask + * See cl00de.h for mask bits + * + * @param[in] polledDataMask Bitmask of data to be updated + * + * @return NV_OK on success + * @return NV_ERR_ otherwise + */ +#define NV00DE_CTRL_CMD_REQUEST_DATA_POLL (0xde0001U) /* finn: Evaluated from "(FINN_RM_USER_SHARED_DATA_INTERFACE_ID << 8) | NV00DE_CTRL_REQUEST_DATA_POLL_PARAMS_MESSAGE_ID" */ + +#define NV00DE_CTRL_REQUEST_DATA_POLL_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV00DE_CTRL_REQUEST_DATA_POLL_PARAMS { + NV_DECLARE_ALIGNED(NvU64 polledDataMask, 8); +} NV00DE_CTRL_REQUEST_DATA_POLL_PARAMS; + +/* _ctrl00de.h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl00fe.h b/src/common/sdk/nvidia/inc/ctrl/ctrl00fe.h new file mode 100644 index 0000000..a5cf2b6 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl00fe.h @@ -0,0 +1,107 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl00fe.finn +// + +typedef enum NV00FE_CTRL_OPERATION_TYPE { + NV00FE_CTRL_OPERATION_TYPE_NOP = 0, // operation is ignored, used internally + NV00FE_CTRL_OPERATION_TYPE_MAP = 1, + NV00FE_CTRL_OPERATION_TYPE_UNMAP = 2, + NV00FE_CTRL_OPERATION_TYPE_SEMAPHORE_WAIT = 3, + NV00FE_CTRL_OPERATION_TYPE_SEMAPHORE_SIGNAL = 4, +} NV00FE_CTRL_OPERATION_TYPE; + +typedef struct NV00FE_CTRL_OPERATION_MAP { + NvHandle hVirtualMemory; + NV_DECLARE_ALIGNED(NvU64 virtualOffset, 8); + NvHandle hPhysicalMemory; + NV_DECLARE_ALIGNED(NvU64 physicalOffset, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 dmaFlags; // NVOS46_FLAGS + NvU32 kindOverride; +} NV00FE_CTRL_OPERATION_MAP; + +typedef struct NV00FE_CTRL_OPERATION_UNMAP { + NvHandle hVirtualMemory; + NV_DECLARE_ALIGNED(NvU64 virtualOffset, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 dmaFlags; // NVOS47_FLAGS +} NV00FE_CTRL_OPERATION_UNMAP; + +typedef struct NV00FE_CTRL_OPERATION_SEMAPHORE { + NvU32 index; + NV_DECLARE_ALIGNED(NvU64 value, 8); +} NV00FE_CTRL_OPERATION_SEMAPHORE; + +typedef struct NV00FE_CTRL_OPERATION { + NV00FE_CTRL_OPERATION_TYPE type; + + union { + NV_DECLARE_ALIGNED(NV00FE_CTRL_OPERATION_MAP map, 8); + NV_DECLARE_ALIGNED(NV00FE_CTRL_OPERATION_UNMAP unmap, 8); + NV_DECLARE_ALIGNED(NV00FE_CTRL_OPERATION_SEMAPHORE semaphore, 8); + } data; +} NV00FE_CTRL_OPERATION; + +/* + * NV00FE_CTRL_CMD_SUBMIT_OPERATIONS + * + * Execute a list of mapping/semaphore operations + * Page size is determined by the virtual allocation + * Offsets/sizes must respect the page size + * + */ +#define NV00FE_CTRL_CMD_SUBMIT_OPERATIONS (0xfe0101U) /* finn: Evaluated from "(FINN_NV_MEMORY_MAPPER_INTERFACE_ID << 8) | NV00FE_CTRL_SUBMIT_OPERATIONS_PARAMS_MESSAGE_ID" */ + +#define NV00FE_MAX_OPERATIONS_COUNT (0x00001000U) + +#define NV00FE_CTRL_SUBMIT_OPERATIONS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV00FE_CTRL_SUBMIT_OPERATIONS_PARAMS { + NvU32 operationsCount; + NV_DECLARE_ALIGNED(NV00FE_CTRL_OPERATION pOperations[NV00FE_MAX_OPERATIONS_COUNT], 8); + NvU32 operationsProcessedCount; +} NV00FE_CTRL_SUBMIT_OPERATIONS_PARAMS; + +/* + * NV00FE_CTRL_CMD_RESIZE_QUEUE + * + * Resize the MemoryMapper command queue + * All pending commands remain in queue + * + */ +#define NV00FE_CTRL_CMD_RESIZE_QUEUE (0xfe0102U) /* finn: Evaluated from "(FINN_NV_MEMORY_MAPPER_INTERFACE_ID << 8) | NV00FE_CTRL_RESIZE_QUEUE_PARAMS_MESSAGE_ID" */ + +#define NV00FE_CTRL_RESIZE_QUEUE_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV00FE_CTRL_RESIZE_QUEUE_PARAMS { + NvU32 maxQueueSize; +} NV00FE_CTRL_RESIZE_QUEUE_PARAMS; + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0100.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0100.h new file mode 100644 index 0000000..3c44f90 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0100.h @@ -0,0 +1,237 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0100.finn +// + +#include "ctrl/ctrlxxxx.h" + +#define NV0100_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x0100, NV0100_CTRL_##cat, idx) + +/* Client command categories (6bits) */ +#define NV0100_CTRL_RESERVED (0x00U) +#define NV0100_CTRL_LOCK_STRESS (0x01U) + +/* + * NV0100_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0100_CTRL_CMD_NULL (0x1000000U) /* finn: Evaluated from "(FINN_LOCK_STRESS_OBJECT_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + +/* + * NV0100_CTRL_CMD_RESET_LOCK_STRESS_STATE + * + * This command resets RM's lock stress counters to 0, allowing for a new lock stress + * run to start. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0100_CTRL_CMD_RESET_LOCK_STRESS_STATE (0x1000101U) /* finn: Evaluated from "(FINN_LOCK_STRESS_OBJECT_LOCK_STRESS_INTERFACE_ID << 8) | 0x1" */ + +/* + * Bit fields to indicate to the caller whether an action was performed on a counter + * Setting any of these fields to 1 means to increment/decrement the respective counter. + * Setting any of these fields to 0 means to ignore the respective counter. + */ +#define NV0100_CTRL_GLOBAL_RMAPI_LOCK_STRESS_COUNTER_ACTION 0:0 +#define NV0100_CTRL_GPU_LOCK_STRESS_COUNTER_ACTION 1:1 +#define NV0100_CTRL_CLIENT_LOCK_STRESS_COUNTER_ACTION 2:2 +#define NV0100_CTRL_INTERNAL_CLIENT_LOCK_STRESS_COUNTER_ACTION 3:3 +#define NV0100_CTRL_ALL_LOCK_STRESS_COUNTER_ACTION 3:0 + +/* + * Bit fields to indicate to the caller what type of action was performed on a counter + * Setting any of these fields to 1 means to increment the respective counter. + * Setting any of these fields to 0 means to decrement the respective counter. + */ +#define NV0100_CTRL_GLOBAL_RMAPI_LOCK_STRESS_COUNTER_INCREMENT 4:4 +#define NV0100_CTRL_GPU_LOCK_STRESS_COUNTER_INCREMENT 5:5 +#define NV0100_CTRL_CLIENT_LOCK_STRESS_COUNTER_INCREMENT 6:6 +#define NV0100_CTRL_INTERNAL_CLIENT_LOCK_STRESS_COUNTER_INCREMENT 7:7 +#define NV0100_CTRL_ALL_LOCK_STRESS_COUNTER_INCREMENT 7:4 + +typedef struct NV0100_CTRL_LOCK_STRESS_OUTPUT { + NvU8 action; +} NV0100_CTRL_LOCK_STRESS_OUTPUT; + +/* + * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_ALL_RM_LOCKS + * + * This command does a random increment/decrement on global counters in RM and reports + * the operation performed back to the caller. This is done with all RM locks held. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_ALL_RM_LOCKS (0x1000102U) /* finn: Evaluated from "(FINN_LOCK_STRESS_OBJECT_LOCK_STRESS_INTERFACE_ID << 8) | NV0100_CTRL_PERFORM_LOCK_STRESS_ALL_RM_LOCKS_PARAMS_MESSAGE_ID" */ + +#define NV0100_CTRL_PERFORM_LOCK_STRESS_ALL_RM_LOCKS_PARAMS_MESSAGE_ID (0x2U) + +typedef NV0100_CTRL_LOCK_STRESS_OUTPUT NV0100_CTRL_PERFORM_LOCK_STRESS_ALL_RM_LOCKS_PARAMS; + +/* + * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_NO_GPUS_LOCK + * + * This command does a random increment/decrement on global counters in RM and reports + * the operation performed back to the caller. This is done with all RM locks held except + * for the GPU locks. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_NO_GPUS_LOCK (0x1000103U) /* finn: Evaluated from "(FINN_LOCK_STRESS_OBJECT_LOCK_STRESS_INTERFACE_ID << 8) | NV0100_CTRL_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_PARAMS_MESSAGE_ID" */ + +#define NV0100_CTRL_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_PARAMS_MESSAGE_ID (0x3U) + +typedef NV0100_CTRL_LOCK_STRESS_OUTPUT NV0100_CTRL_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_PARAMS; + +/* + * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_API_LOCK_READ_MODE + * + * This command does a random increment/decrement on global counters in RM and reports + * the operation performed back to the caller. This is done with the API lock held in + * read mode and GPU locks held. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_API_LOCK_READ_MODE (0x1000104U) /* finn: Evaluated from "(FINN_LOCK_STRESS_OBJECT_LOCK_STRESS_INTERFACE_ID << 8) | NV0100_CTRL_PERFORM_LOCK_STRESS_API_LOCK_READ_MODE_PARAMS_MESSAGE_ID" */ + +#define NV0100_CTRL_PERFORM_LOCK_STRESS_API_LOCK_READ_MODE_PARAMS_MESSAGE_ID (0x4U) + +typedef NV0100_CTRL_LOCK_STRESS_OUTPUT NV0100_CTRL_PERFORM_LOCK_STRESS_API_LOCK_READ_MODE_PARAMS; + +/* + * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_API_LOCK_READ_MODE + * + * This command does a random increment/decrement on global counters in RM and reports + * the operation performed back to the caller. This is done with the API lock held in + * read mode. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_API_LOCK_READ_MODE (0x1000105U) /* finn: Evaluated from "(FINN_LOCK_STRESS_OBJECT_LOCK_STRESS_INTERFACE_ID << 8) | NV0100_CTRL_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_API_LOCK_READ_MODE_PARAMS_MESSAGE_ID" */ + +#define NV0100_CTRL_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_API_LOCK_READ_MODE_PARAMS_MESSAGE_ID (0x5U) + +typedef NV0100_CTRL_LOCK_STRESS_OUTPUT NV0100_CTRL_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_API_LOCK_READ_MODE_PARAMS; + +/* + * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_INTERNAL_ALL_RM_LOCKS + * + * This command does a random increment/decrement on global counters in RM and reports + * the operation performed back to the caller. This is done with all RM locks held in the + * internal RM API path. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_INTERNAL_ALL_RM_LOCKS (0x1000106U) /* finn: Evaluated from "(FINN_LOCK_STRESS_OBJECT_LOCK_STRESS_INTERFACE_ID << 8) | NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_ALL_RM_LOCKS_PARAMS_MESSAGE_ID" */ + +#define NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_ALL_RM_LOCKS_PARAMS_MESSAGE_ID (0x6U) + +typedef NV0100_CTRL_LOCK_STRESS_OUTPUT NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_ALL_RM_LOCKS_PARAMS; + +/* + * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK + * + * This command does a random increment/decrement on global counters in RM and reports + * the operation performed back to the caller. This is done with all RM locks held except + * for the GPU locks in the internal RM API path. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK (0x1000107U) /* finn: Evaluated from "(FINN_LOCK_STRESS_OBJECT_LOCK_STRESS_INTERFACE_ID << 8) | NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_PARAMS_MESSAGE_ID" */ + +#define NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_PARAMS_MESSAGE_ID (0x7U) + +typedef NV0100_CTRL_LOCK_STRESS_OUTPUT NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_PARAMS; + +/* + * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_INTERNAL_API_LOCK_READ_MODE + * + * This command does a random increment/decrement on global counters in RM and reports + * the operation performed back to the caller. This is done with the API lock taken in + * read mode and GPU locks held in the internal RM API path. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_INTERNAL_API_LOCK_READ_MODE (0x1000108U) /* finn: Evaluated from "(FINN_LOCK_STRESS_OBJECT_LOCK_STRESS_INTERFACE_ID << 8) | NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_API_LOCK_READ_MODE_PARAMS_MESSAGE_ID" */ + +#define NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_API_LOCK_READ_MODE_PARAMS_MESSAGE_ID (0x8U) + +typedef NV0100_CTRL_LOCK_STRESS_OUTPUT NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_API_LOCK_READ_MODE_PARAMS; + +/* + * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_API_LOCK_READ_MODE + * + * This command does a random increment/decrement on global counters in RM and reports + * the operation performed back to the caller. This is done with the API lock held in read + * mode in the internal RM API path. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_API_LOCK_READ_MODE (0x1000109U) /* finn: Evaluated from "(FINN_LOCK_STRESS_OBJECT_LOCK_STRESS_INTERFACE_ID << 8) | NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_API_LOCK_READ_MODE_PARAMS_MESSAGE_ID" */ + +#define NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_API_LOCK_READ_MODE_PARAMS_MESSAGE_ID (0x9U) + +typedef NV0100_CTRL_LOCK_STRESS_OUTPUT NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_API_LOCK_READ_MODE_PARAMS; + +/* + * NV0100_CTRL_CMD_GET_LOCK_STRESS_COUNTERS + * + * This command gets the value of the global lock stress counters in RM at the end of + * the lock stress test. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0100_CTRL_CMD_GET_LOCK_STRESS_COUNTERS (0x100010aU) /* finn: Evaluated from "(FINN_LOCK_STRESS_OBJECT_LOCK_STRESS_INTERFACE_ID << 8) | NV0100_CTRL_GET_LOCK_STRESS_COUNTERS_PARAMS_MESSAGE_ID" */ + +#define NV0100_CTRL_GET_LOCK_STRESS_COUNTERS_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV0100_CTRL_GET_LOCK_STRESS_COUNTERS_PARAMS { + NvS32 globalLockStressCounter; + NvS32 gpuLockStressCounter; + NvS32 clientLockStressCounter; + NvS32 internalClientLockStressCounter; +} NV0100_CTRL_GET_LOCK_STRESS_COUNTERS_PARAMS; + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080.h new file mode 100644 index 0000000..6da4d70 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080.h @@ -0,0 +1,90 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +#include "ctrl2080/ctrl2080gpu.h" +#include "ctrl2080/ctrl2080fuse.h" +#include "ctrl2080/ctrl2080event.h" +#include "ctrl2080/ctrl2080tmr.h" +#include "ctrl2080/ctrl2080bios.h" +#include "ctrl2080/ctrl2080mc.h" +#include "ctrl2080/ctrl2080fifo.h" +#include "ctrl2080/ctrl2080fb.h" + + + +#include "ctrl2080/ctrl2080spdm.h" +#include "ctrl2080/ctrl2080gr.h" +#include "ctrl2080/ctrl2080bus.h" +#include "ctrl2080/ctrl2080thermal.h" +#include "ctrl2080/ctrl2080fan.h" +#include "ctrl2080/ctrl2080i2c.h" +#include "ctrl2080/ctrl2080internal.h" +#include "ctrl2080/ctrl2080spi.h" +#include "ctrl2080/ctrl2080gpio.h" +#include "ctrl2080/ctrl2080clk.h" +#include "ctrl2080/ctrl2080perf.h" +#include "ctrl2080/ctrl2080perf_cf.h" + + +#include "ctrl2080/ctrl2080rc.h" +#include "ctrl2080/ctrl2080dma.h" +#include "ctrl2080/ctrl2080dmabuf.h" +#include "ctrl2080/ctrl2080nvd.h" +#include "ctrl2080/ctrl2080boardobj.h" +#include "ctrl2080/ctrl2080pmgr.h" +#include "ctrl2080/ctrl2080power.h" +#include "ctrl2080/ctrl2080lpwr.h" +#include "ctrl2080/ctrl2080acr.h" +#include "ctrl2080/ctrl2080ce.h" +#include "ctrl2080/ctrl2080nvlink.h" +#include "ctrl2080/ctrl2080flcn.h" +#include "ctrl2080/ctrl2080volt.h" +#include "ctrl2080/ctrl2080ecc.h" +#include "ctrl2080/ctrl2080cipher.h" +#include "ctrl2080/ctrl2080fla.h" +#include "ctrl2080/ctrl2080gsp.h" +#include "ctrl2080/ctrl2080pmu.h" + + +#include "ctrl2080/ctrl2080grmgr.h" + + +#include "ctrl2080/ctrl2080vgpumgrinternal.h" +#include "ctrl2080/ctrl2080hshub.h" +/* include appropriate os-specific command header */ + + +#include "ctrl2080/ctrl2080unix.h" + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h new file mode 100644 index 0000000..d67b0f2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080acr.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080base.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080base.h new file mode 100644 index 0000000..758a73a --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080base.h @@ -0,0 +1,114 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080base.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NV20_SUBDEVICE_XX control commands and parameters */ + +#define NV2080_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x2080, NV2080_CTRL_##cat, idx) + +/* Subdevice command categories (6bits) */ +#define NV2080_CTRL_RESERVED (0x00) +#define NV2080_CTRL_GPU (0x01) +#define NV2080_CTRL_GPU_LEGACY_NON_PRIVILEGED (0x81) /* finn: Evaluated from "(NV2080_CTRL_GPU | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_FUSE (0x02) +#define NV2080_CTRL_FUSE_LEGACY_NON_PRIVILEGED (0x82) /* finn: Evaluated from "(NV2080_CTRL_FUSE | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_EVENT (0x03) +#define NV2080_CTRL_TIMER (0x04) +#define NV2080_CTRL_THERMAL (0x05) +#define NV2080_CTRL_THERMAL_LEGACY_PRIVILEGED (0xc5) /* finn: Evaluated from "(NV2080_CTRL_THERMAL | NVxxxx_CTRL_LEGACY_PRIVILEGED)" */ +#define NV2080_CTRL_THERMAL_LEGACY_NON_PRIVILEGED (0x85) /* finn: Evaluated from "(NV2080_CTRL_THERMAL | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_I2C (0x06) +#define NV2080_CTRL_EXTI2C (0x07) +#define NV2080_CTRL_BIOS (0x08) +#define NV2080_CTRL_CIPHER (0x09) +#define NV2080_CTRL_INTERNAL (0x0A) +#define NV2080_CTRL_CLK_LEGACY_PRIVILEGED (0xd0) /* finn: Evaluated from "(NV2080_CTRL_CLK | NVxxxx_CTRL_LEGACY_PRIVILEGED)" */ +#define NV2080_CTRL_CLK_LEGACY_NON_PRIVILEGED (0x90) /* finn: Evaluated from "(NV2080_CTRL_CLK | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_CLK (0x10) +#define NV2080_CTRL_FIFO (0x11) +#define NV2080_CTRL_GR (0x12) +#define NV2080_CTRL_FB (0x13) +#define NV2080_CTRL_MC (0x17) +#define NV2080_CTRL_BUS (0x18) +#define NV2080_CTRL_PERF_LEGACY_PRIVILEGED (0xe0) /* finn: Evaluated from "(NV2080_CTRL_PERF | NVxxxx_CTRL_LEGACY_PRIVILEGED)" */ +#define NV2080_CTRL_PERF_LEGACY_NON_PRIVILEGED (0xa0) /* finn: Evaluated from "(NV2080_CTRL_PERF | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_PERF (0x20) +#define NV2080_CTRL_NVIF (0x21) +#define NV2080_CTRL_RC (0x22) +#define NV2080_CTRL_GPIO (0x23) +#define NV2080_CTRL_GPIO_LEGACY_NON_PRIVILEGED (0xa3) /* finn: Evaluated from "(NV2080_CTRL_GPIO | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_NVD (0x24) +#define NV2080_CTRL_DMA (0x25) +#define NV2080_CTRL_PMGR (0x26) +#define NV2080_CTRL_PMGR_LEGACY_PRIVILEGED (0xe6) /* finn: Evaluated from "(NV2080_CTRL_PMGR | NVxxxx_CTRL_LEGACY_PRIVILEGED)" */ +#define NV2080_CTRL_PMGR_LEGACY_NON_PRIVILEGED (0xa6) /* finn: Evaluated from "(NV2080_CTRL_PMGR | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_POWER (0x27) +#define NV2080_CTRL_POWER_LEGACY_NON_PRIVILEGED (0xa7) /* finn: Evaluated from "(NV2080_CTRL_POWER | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_LPWR (0x28) +#define NV2080_CTRL_LPWR_LEGACY_NON_PRIVILEGED (0xa8) /* finn: Evaluated from "(NV2080_CTRL_LPWR | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_LPWR_LEGACY_PRIVILEGED (0xe8) /* finn: Evaluated from "(NV2080_CTRL_LPWR | NVxxxx_CTRL_LEGACY_PRIVILEGED)" */ +#define NV2080_CTRL_ACR (0x29) +#define NV2080_CTRL_CE (0x2A) +#define NV2080_CTRL_SPI (0x2B) +#define NV2080_CTRL_NVLINK (0x30) +#define NV2080_CTRL_FLCN (0x31) +#define NV2080_CTRL_VOLT (0x32) +#define NV2080_CTRL_VOLT_LEGACY_PRIVILEGED (0xf2) /* finn: Evaluated from "(NV2080_CTRL_VOLT | NVxxxx_CTRL_LEGACY_PRIVILEGED)" */ +#define NV2080_CTRL_VOLT_LEGACY_NON_PRIVILEGED (0xb2) /* finn: Evaluated from "(NV2080_CTRL_VOLT | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_FAS (0x33) +#define NV2080_CTRL_ECC (0x34) +#define NV2080_CTRL_ECC_NON_PRIVILEGED (0xb4) /* finn: Evaluated from "(NV2080_CTRL_ECC | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_FLA (0x35) +#define NV2080_CTRL_GSP (0x36) +#define NV2080_CTRL_NNE (0x37) +#define NV2080_CTRL_NNE_LEGACY_NON_PRIVILEGED (0xb7) /* finn: Evaluated from "(NV2080_CTRL_NNE | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_GRMGR (0x38) +#define NV2080_CTRL_UCODE_FUZZER (0x39) +#define NV2080_CTRL_DMABUF (0x3A) +#define NV2080_CTRL_BIF (0x3B) + +// per-OS categories start at highest category and work backwards +#define NV2080_CTRL_OS_WINDOWS (0x3F) +#define NV2080_CTRL_OS_MACOS (0x3E) +#define NV2080_CTRL_OS_UNIX (0x3D) + + +/* + * NV2080_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_NULL (0x20800000) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* _ctrl2080base_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h new file mode 100644 index 0000000..203c94a --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h @@ -0,0 +1,484 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080bios.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX bios-related control commands and parameters */ + + + +typedef NVXXXX_CTRL_XXX_INFO NV2080_CTRL_BIOS_INFO; + +/* Maximum number of bios infos that can be queried at once */ +#define NV2080_CTRL_BIOS_INFO_MAX_SIZE (0x0000000F) + +#define NV2080_CTRL_BIOS_INFO_INDEX_REVISION (0x00000000) +#define NV2080_CTRL_BIOS_INFO_INDEX_OEM_REVISION (0x00000001) + + + +/* + * NV2080_CTRL_CMD_BIOS_GET_INFO + * + * This command returns bios information for the associated GPU. + * Requests to retrieve bios information use a list of one or more + * NV2080_CTRL_BIOS_INFO structures. + * + * biosInfoListSize + * This field specifies the number of entries on the caller's + * biosInfoList. + * biosInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the bios information is to be returned. + * This buffer must be at least as big as biosInfoListSize multiplied + * by the size of the NV2080_CTRL_BIOS_INFO structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_BIOS_GET_INFO (0x20800802) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID << 8) | 0x2" */ + +typedef struct NV2080_CTRL_BIOS_GET_INFO_PARAMS { + NvU32 biosInfoListSize; + NV_DECLARE_ALIGNED(NvP64 biosInfoList, 8); +} NV2080_CTRL_BIOS_GET_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_BIOS_GET_INFO_V2 + * + * This command returns bios information for the associated GPU. + * Requests to retrieve bios information use a list of one or more + * NV2080_CTRL_BIOS_INFO structures. + * + * biosInfoListSize + * This field specifies the number of entries on the caller's + * biosInfoList. + * biosInfoList + * Bios information to be returned. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_BIOS_GET_INFO_V2 (0x20800810) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID << 8) | NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS { + NvU32 biosInfoListSize; + NV2080_CTRL_BIOS_INFO biosInfoList[NV2080_CTRL_BIOS_INFO_MAX_SIZE]; +} NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS; + +/* + * NV2080_CTRL_BIOS_NBSI + * + * NV2080_CTRL_BIOS_NBSI_MAX_REG_STRING_LENGTH + * This is the maximum length of a given registry string input (in characters). + * + * NV2080_CTRL_BIOS_NBSI_STRING_TYPE_ASCII + * This is a value indicating the format of a registry string is ascii. + * NV2080_CTRL_BIOS_NBSI_STRING_TYPE_UNICODE + * This is a value indicating the format of a registry string is unicode. + * NV2080_CTRL_BIOS_NBSI_STRING_TYPE_HASH + * This is a value indicating a registry string is actually a pre-hashed value. + * + * NV2080_CTRL_BIOS_NBSI_REG_STRING + * This is a structure used to store a registry string object. + * The members are as follows: + * + * size + * This is the size (in bytes) of the data contained in the string. If this + * is greater than the maximum registry string length, an error will be + * returned. + * type + * This is the type of data contained in the registry string. It can be either + * ascii, unicode or a pre-hashed value. + * value + * This is the value of the string. Depending on the type, a different object + * will be used to access the data. + */ +#define NV2080_CTRL_BIOS_NBSI_MAX_REG_STRING_LENGTH (0x00000100) + +#define NV2080_CTRL_BIOS_NBSI_STRING_TYPE_ASCII (0x00000000) +#define NV2080_CTRL_BIOS_NBSI_STRING_TYPE_UNICODE (0x00000001) +#define NV2080_CTRL_BIOS_NBSI_STRING_TYPE_HASH (0x00000002) + +#define NV2080_CTRL_BIOS_NBSI_MODULE_ROOT (0x00000000) +#define NV2080_CTRL_BIOS_NBSI_MODULE_RM (0x00000001) +#define NV2080_CTRL_BIOS_NBSI_MODULE_DISPLAYDRIVER (0x00000002) +#define NV2080_CTRL_BIOS_NBSI_MODULE_VIDEO (0x00000003) +#define NV2080_CTRL_BIOS_NBSI_MODULE_CPL (0x00000004) +#define NV2080_CTRL_BIOS_NBSI_MODULE_D3D (0x00000005) +#define NV2080_CTRL_BIOS_NBSI_MODULE_OGL (0x00000006) +#define NV2080_CTRL_BIOS_NBSI_MODULE_PMU (0x00000007) +#define NV2080_CTRL_BIOS_NBSI_MODULE_MODE (0x00000008) +// this should equal the last NBSI_MODULE plus 1. +#define NV2080_CTRL_BIOS_NBSI_NUM_MODULES (0x00000009) + +// +// Never use this value! It's needed for DD/Video modules, but does not correspond +// to a valid NBSI hive! +// +#define NV2080_CTRL_BIOS_NBSI_MODULE_UNKNOWN (0x80000000) + +typedef struct NV2080_CTRL_BIOS_NBSI_REG_STRING { + NvU32 size; + NvU32 type; + + union { + NvU8 ascii[NV2080_CTRL_BIOS_NBSI_MAX_REG_STRING_LENGTH]; + NvU16 unicode[NV2080_CTRL_BIOS_NBSI_MAX_REG_STRING_LENGTH]; + NvU16 hash; + } value; +} NV2080_CTRL_BIOS_NBSI_REG_STRING; + + +/* + * NV2080_CTRL_CMD_BIOS_GET_NBSI + * + * module + * This field specifies the given module per the MODULE_TYPES enum. + * path + * This field specifies the full path and registry node name for a + * given NBSI object. This is a maximum of 255 unicode characters, + * but may be provided as ascii or a pre-formed hash per the type + * member. The size (in bytes) of the given string/hash should be + * provided in the size member. + * + * NOTE: In the case of an incomplete path such as HKR, one may pass + * in simply the root node. E.g.: + * 1.) Normal case: HKLM\Path\Subpath + * 2.) Unknown case: HKR + * It is expected that all unknown/incomplete paths will be determined + * prior to NBSI programming! There is otherwise NO WAY to match + * the hash given by an incomplete path to that stored in NBSI! + * + * valueName + * This field specifies the registry name for a given NBSI object. + * This is a maximum of 255 unicode characters, but may be provided + * in ascii or a pre-formed hash per the type member. The size (in bytes) + * of the given string/hash should be provided in the size member. + * retBuf + * This field provides a pointer to a buffer into which the value + * retrieved from NBSI may be returned + * retSize + * This field is an input/output. It specifies the maximum size of the + * return buffer as an input, and the size of the returned data as an + * output. + * errorCode + * This field is a return value. It gives an error code representing + * failure to return a value (as opposed to failure of the call). + * This obeys the following: + * + * NV2080_CTRL_BIOS_GET_NBSI_SUCCESS + * The call has returned complete and valid data. + * NV2080_CTRL_BIOS_GET_NBSI_OVERRIDE + * The call returned complete and valid data which is expected to override + * any stored registry settings. + * NV2080_CTRL_BIOS_GET_NBSI_INCOMPLETE + * The call returned data, but the size of the return buffer was + * insufficient to contain it. The value returned in retSize represents + * the total size necessary (in bytes) to contain the data. + * if the size was non-0, the buffer is filled with the object contents up + * to that size. Can be used with retBufOffset to use multiple calls to get + * tables of very large size. + * NV2080_CTRL_BIOS_GET_NBSI_NOT_FOUND + * The call did not find a valid NBSI object for this key. This indicates + * NBSI has no opinion and, more importantly, any data returned is identical + * to data passed in. + * + * Possible return values are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_BIOS_GET_NBSI_SUCCESS (0x00000000) +#define NV2080_CTRL_BIOS_GET_NBSI_OVERRIDE (0x00000001) +#define NV2080_CTRL_BIOS_GET_NBSI_BAD_HASH (0xFFFFFFFA) +#define NV2080_CTRL_BIOS_GET_NBSI_APITEST_SUCCESS (0xFFFFFFFB) +#define NV2080_CTRL_BIOS_GET_NBSI_BAD_TABLE (0xFFFFFFFC) +#define NV2080_CTRL_BIOS_GET_NBSI_NO_TABLE (0xFFFFFFFD) +#define NV2080_CTRL_BIOS_GET_NBSI_INCOMPLETE (0xFFFFFFFE) +#define NV2080_CTRL_BIOS_GET_NBSI_NOT_FOUND (0xFFFFFFFF) + +#define NV2080_CTRL_CMD_BIOS_GET_NBSI (0x20800803) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID << 8) | NV2080_CTRL_BIOS_GET_NBSI_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BIOS_GET_NBSI_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_BIOS_GET_NBSI_PARAMS { + NvU32 module; + NV2080_CTRL_BIOS_NBSI_REG_STRING path; + NV2080_CTRL_BIOS_NBSI_REG_STRING valueName; + NV_DECLARE_ALIGNED(NvP64 retBuf, 8); + NvU32 retSize; + NvU32 errorCode; +} NV2080_CTRL_BIOS_GET_NBSI_PARAMS; + +#define NV2080_CTRL_CMD_BIOS_GET_NBSI_V2 (0x2080080e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID << 8) | NV2080_CTRL_BIOS_GET_NBSI_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_BIOS_GET_NBSI_MAX_RET_SIZE (0x100) + +#define NV2080_CTRL_BIOS_GET_NBSI_V2_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV2080_CTRL_BIOS_GET_NBSI_V2_PARAMS { + NvU32 module; + NV2080_CTRL_BIOS_NBSI_REG_STRING path; + NV2080_CTRL_BIOS_NBSI_REG_STRING valueName; + NvU8 retBuf[NV2080_BIOS_GET_NBSI_MAX_RET_SIZE]; + NvU32 retSize; + NvU32 errorCode; +} NV2080_CTRL_BIOS_GET_NBSI_V2_PARAMS; + +/* + * NV2080_CTRL_CMD_BIOS_GET_NBSI_OBJ + * + * globType + * This field specifies the glob type wanted + * 0xffff: APItest... returns NV2080_CTRL_BIOS_GET_NBSI_APITEST_SUCCESS + * globIndex + * Index for globType desired + * 0 = best fit + * 1..255 = actual index + * globSource + * Index to nbsi directory sources used when getting entire directory + * 0 = registry + * 1 = VBIOS + * 2 = SBIOS + * 3 = ACPI + * retBufOffset + * When making multiple calls to get the object (if retSize is too small) + * offset into real object (0=start of object) + * retBuf + * This field provides a pointer to a buffer into which the object + * retrieved from NBSI may be returned + * retSize + * This field is an input/output. It specifies the maximum size of the + * return buffer as an input, and the size of the returned data as an + * output. + * totalObjSize + * This field is an output, where the total size of the object being + * retrieved is returned. + * errorCode + * This field is a return value. It gives an error code representing + * failure to return a value (as opposed to failure of the call). + * This obeys the following: + * + * NV2080_CTRL_BIOS_GET_NBSI_SUCCESS + * The call has returned complete and valid data. + * NV2080_CTRL_BIOS_GET_NBSI_OVERRIDE + * The call returned complete and valid data which is expected to override + * any stored registry settings. + * NV2080_CTRL_BIOS_GET_NBSI_INCOMPLETE + * The call returned data, but the size of the return buffer was + * insufficient to contain it. The value returned in retSize represents + * the total size necessary (in bytes) to contain the data. + * NV2080_CTRL_BIOS_GET_NBSI_NOT_FOUND + * The call did not find a valid NBSI object for this key. This indicates + * NBSI has no opinion and, more importantly, any data returned is identical + * to data passed in. + * + * Possible return values are: + * NV2080_CTRL_BIOS_GET_NBSI_SUCCESS + * NV2080_CTRL_BIOS_GET_NBSI_APITEST_NODIRACCESS + * NV2080_CTRL_BIOS_GET_NBSI_APITEST_SUCCESS + * NV2080_CTRL_BIOS_GET_NBSI_INCOMPLETE + * NV2080_CTRL_BIOS_GET_NBSI_BAD_TABLE + * NV2080_CTRL_BIOS_GET_NBSI_NO_TABLE + * NV2080_CTRL_BIOS_GET_NBSI_NOT_FOUND + */ +#define NV2080_CTRL_CMD_BIOS_GET_NBSI_OBJ (0x20800806) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID << 8) | NV2080_CTRL_BIOS_GET_NBSI_OBJ_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BIOS_GET_NBSI_OBJ_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV2080_CTRL_BIOS_GET_NBSI_OBJ_PARAMS { + NvU16 globType; + NvU8 globIndex; + NvU16 globSource; + NvU32 retBufOffset; + NV_DECLARE_ALIGNED(NvP64 retBuf, 8); + NvU32 retSize; + NvU32 totalObjSize; + NvU32 errorCode; +} NV2080_CTRL_BIOS_GET_NBSI_OBJ_PARAMS; + +#define GLOB_TYPE_GET_NBSI_DIR 0xfffe +#define GLOB_TYPE_APITEST 0xffff +#define GLOB_TYPE_GET_NBSI_ACPI_RAW 0xfffd + + + +/* + * NV2080_CTRL_CMD_BIOS_GET_SKU_INFO + * + * This command returns information about the current board SKU. + * NV_ERR_INVALID_OWNER will be returned if the call + * isn't made with the OS as the administrator. + * + * chipSKU + * This field returns the sku for the current chip. + * chipSKUMod + * This field returns the SKU modifier. + * skuConfigVersion + * Version number for the SKU configuration detailing pstate, thermal, VF curve and so on. + * project + * This field returns the Project (Board) number. + * projectSKU + * This field returns the Project (Board) SKU number. + * CDP + * This field returns the Collaborative Design Project Number. + * projectSKUMod + * This field returns the Project (Board) SKU Modifier. + * businessCycle + * This field returns the business cycle the board is associated with. + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_OWNER + */ +#define NV2080_CTRL_CMD_BIOS_GET_SKU_INFO (0x20800808) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID << 8) | NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS { + NvU32 BoardID; + char chipSKU[9]; + char chipSKUMod[5]; + NvU32 skuConfigVersion; + char project[5]; + char projectSKU[5]; + char CDP[6]; + char projectSKUMod[2]; + NvU32 businessCycle; +} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_BIOS_GET_POST_TIME + + * This command is used to get the GPU POST time (in milliseconds). + * If the associated GPU is the master GPU this value will be recorded + * by the VBIOS and retrieved from the KDA buffer. If the associated + * GPU is a secondaryGPU then this value will reflect the devinit + * processing time. + * + * vbiosPostTime + * This parameter returns the vbios post time in msec. + * + * Possible return status values are + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV2080_CTRL_CMD_BIOS_GET_POST_TIME (0x20800809) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BIOS_GET_POST_TIME_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BIOS_GET_POST_TIME_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_CMD_BIOS_GET_POST_TIME_PARAMS { + NV_DECLARE_ALIGNED(NvU64 vbiosPostTime, 8); +} NV2080_CTRL_CMD_BIOS_GET_POST_TIME_PARAMS; + + + +/* + * NV2080_CTRL_CMD_BIOS_GET_UEFI_SUPPORT + * + * This function is used to give out the UEFI version, UEFI image presence and + * Graphics Firmware Mode i.e. whether system is running in UEFI or not. + * + * version + * This parameter returns the UEFI version. + * + * flags + * This parameter indicates UEFI image presence and Graphics Firmware mode. + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE + * This field returns UEFI presence value. Legal values for this + * field include: + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_NO + * This value indicates that UEFI image is not present. + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_YES + * This value indicates that UEFI image is present. + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_PLACEHOLDER + * This value indicates that there is a dummy UEFI placeholder, + * which can later be updated with a valid UEFI image. + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_HIDDEN + * This value indicates that UEFI image is hidden. + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING + * This field indicates the UEFI running value. Legal values for + * this parameter include: + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING_FALSE + * This value indicates that UEFI is not running. + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING_TRUE + * This value indicates that UEFI is running. + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_IS_EFI_INIT + * This field indicates the EFI running value. Legal values for + * this parameter include: + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_IS_EFI_INIT_FALSE + * This value indicates that display is in vbios mode. + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_IS_EFI_INIT_TRUE + * This value indicates that display is in EFI mode. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_READY + * NV_ERR_INVALID_STATE + */ + +#define NV2080_CTRL_CMD_BIOS_GET_UEFI_SUPPORT (0x2080080b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID << 8) | NV2080_CTRL_BIOS_GET_UEFI_SUPPORT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BIOS_GET_UEFI_SUPPORT_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV2080_CTRL_BIOS_GET_UEFI_SUPPORT_PARAMS { + NvU32 version; + NvU32 flags; +} NV2080_CTRL_BIOS_GET_UEFI_SUPPORT_PARAMS; + +/* Legal values for flags parameter */ +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE 1:0 +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_NO (0x00000000) +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_YES (0x00000001) +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_PLACEHOLDER (0x00000002) +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_HIDDEN (0x00000003) +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING 2:2 +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING_FALSE (0x00000000) +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING_TRUE (0x00000001) +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_IS_EFI_INIT 3:3 +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_IS_EFI_INIT_FALSE (0x00000000) +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_IS_EFI_INIT_TRUE (0x00000001) + + + +/* _ctrl2080bios_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobj.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobj.h new file mode 100644 index 0000000..cefa9d9 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobj.h @@ -0,0 +1,1557 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080boardobj.finn +// + + + +#include "ctrl/ctrl2080/ctrl2080base.h" +#include "ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h" + +/*! + * @brief NV20_SUBDEVICE_XX BOARDOBJ-related control commands and parameters. + * + * Base structures in RMCTRL equivalent to BOARDOBJ/BOARDOBJGRP in RM. NV2080 + * structs in this file carry info w.r.t BOARDOBJ/BOARDOBJGRP. + */ + +/*! + * @brief Type for representing an index of a BOARDOBJ within a + * BOARDOBJGRP. This type can also represent the number of elements + * within a BOARDOBJGRP or the number of bits in a BOARDOBJGRPMASK. + */ +typedef NvU16 NvBoardObjIdx; + +/*! + * @brief Type for representing an index into a mask element within a + * BOARDOBJGRPMASK to a @ref NV2080_CTRL_BOARDOBJGRP_MASK_PRIMITIVE. + */ +typedef NvU16 NvBoardObjMaskIdx; + +/*! + * @brief Primitive type which a BOARDOBJGRPMASK is composed of. + * + * For example, a 32 bit mask will have one of these elements and a 256 bit + * mask will have eight. + */ +typedef NvU32 NV2080_CTRL_BOARDOBJGRP_MASK_PRIMITIVE; + +/*! + * @brief Min value a single BOARDOBJGRPMASK element can hold. + * + * @note Must be kept in sync with @ref + * NV2080_CTRL_BOARDOBJGRP_MASK_PRIMITIVE. + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_ELEMENT_MIN NV_U32_MIN + +/*! + * @brief Max value a single BOARDOBJGRPMASK element can hold. + * + * @note Must be kept in sync with @ref + * NV2080_CTRL_BOARDOBJGRP_MASK_PRIMITIVE. + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_ELEMENT_MAX NV_U32_MAX + +/*! + * @brief Number of bits in a the NV2080_CTRL_BOARDOBJGRP_MASK_PRIMITIVE type. + * + * This exists to eliminate the assumption that 32-bits is the width of + * NV2080_CTRL_BOARDOBJGRP_MASK primitive element. + * + * @note Left shift by 3 (multiply by 8) converts the sizeof in bytes to the + * number of bits in our primitive/essential mask type. + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_BIT_SIZE 32 + +/*! + * @brief Value for an invalid Board Object index. + * + * This value should only be used directly for input to and output + * from BOARDOBJ and BOARDOBJGRP code. + * + * @note This define should not be referenced directly in any + * implementing object code. Instead, each object should define it's + * own IDX_INVALID macro and alias it to whatever size fits their + * specific index storage type. + * For example, many objects still store indexes as NvU8 (because the + * GRPs are either _E32 or _E255) while others store as NvBoardObjIdx + * (currently aliased to NvU16), so they should alias to a correct + * type. + */ +#define NV2080_CTRL_BOARDOBJ_IDX_INVALID NV_U16_MAX + +/*! + * @brief Value for an invalid Board Object index. + * + * This value encodes an invalid/unsupported BOARDOBJ index for an + * 8-bit value. This should be used within by any legacy appcode + * implementing BOARDOBJGRP which stores/encodes indexes as 8-bit + * values. + * + * All new groups should use @ref NV2080_CTRL_BOARDOBJ_IDX_INVALID. + * + * @note This define should not be referenced directly in any + * implementing object code. Instead, each object should define it's + * own IDX_INVALID macro and alias it to whatever size fits their + * specific index storage type. + * For example, many objects still store indexes as NvU8 (because the + * GRPs are either _E32 or _E255) while others store as NvU16 (for + * GRPs larger than _E255), so they should alias to a correct type. + */ +#define NV2080_CTRL_BOARDOBJ_IDX_INVALID_8BIT NV_U8_MAX + +/*! + * @brief Computes the array index of a NV2080_CTRL_BOARDOBJGRP_MASK element + * storing requested bit. + * + * @note Designed to be used in conjunction with @ref + * NV2080_CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_OFFSET. + * + * @param[in] _bit Index of a bit within a bit mask. + * + * @return Array index of mask element containing @ref _bit. + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_INDEX(_bit) \ + ((_bit) / NV2080_CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_BIT_SIZE) + +/*! + * @brief Computes bit-position within NV2080_CTRL_BOARDOBJGRP_MASK element + * corresponding to requested bit. + * + * @note Designed to be used in conjunction with @ref + * NV2080_CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_INDEX. + * + * @param[in] _bit Index of a bit within a bit mask. + * + * @return Offset (in bits) within a mask element for @ref _bit. + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_OFFSET(_bit) \ + ((_bit) % NV2080_CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_BIT_SIZE) + +/*! + * @brief Computes the size of an array of NV2080_CTRL_BOARDOBJGRP_MASK + * elements that can store all mask's bits. + * + * @param[in] _bits Size of the mask in bits. + * + * @return Number of array elements needed to store @ref _bits number of bits. + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_DATA_SIZE(_bits) \ + (NV2080_CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_INDEX((_bits) - 1U) + 1U) + + +/*! + * @brief Number of elements that are in the NV2080_CTRL_BOARDOBJGRP_MASK base + * class. + * + * @note "START_SIZE" is used here to represent the size of the mask that + * derived classes must build up from. See @ref + * NV2080_CTRL_BOARDOBJGRP_MASK_E32, @ref + * NV2080_CTRL_BOARDOBJGRP_MASK_E255, @ref + * NV2080_CTRL_BOARDOBJGRP_MASK_E512, @ref + * NV2080_CTRL_BOARDOBJGRP_MASK_E1024, @ref + * NV2080_CTRL_BOARDOBJGRP_MASK_E2048, @ref + * NV2080_CTRL_BOARDOBJGRP_MASK_E8192. + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_ARRAY_START_SIZE 1U + +/*! + * @brief Macro used to determine the number of NV2080_CTRL_BOARDOBJGRP_MASK + * elements required to extend the base number of elements in a mask, + * @ref NV2080_CTRL_BOARDOBJGRP_MASK_ARRAY_START_SIZE. + * + * @note Used in order to avoid dynamic memory allocation and related + * code/data waste as well as two levels of indirection while accessing + * the data bits stored in an array of @ref + * NV2080_CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_BIT_SIZE sized words. The + * NV2080_CTRL_BOARDOBJGRP_MASK super-class array's size should be + * zero and actual data should be completely stored in child's array. + * Since most compilers reject structures with zero-sized arrays first + * element word was moved to the super-class and remaining array + * elements to child class. + * + * @param[in] _bits Total number of bits to be represented in the + * NV2080_CTRL_BOARDOBJGRP_MASK extending mask class. + * + * @return Number of additional mask elements that must be allocated in order + * to extend the NV2080_CTRL_BOARDOBJGRP_MASK base class. + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_ARRAY_EXTENSION_SIZE(_bits) \ + (NV2080_CTRL_BOARDOBJGRP_MASK_DATA_SIZE(_bits) - \ + (NV2080_CTRL_BOARDOBJGRP_MASK_ARRAY_START_SIZE)) + +/*! + * @brief Macro to set input bit in NV2080_CTRL_BOARDOBJGRP_MASK. + * + * @param[in] _pMask PBOARDOBJGRPMASK of mask. + * @param[in] _bitIdx Index of the target bit within the mask. + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_BIT_SET(_pMask, _bitIdx) \ + do { \ + (_pMask)->pData[ \ + NV2080_CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_INDEX(_bitIdx)] |= \ + NVBIT_TYPE( \ + NV2080_CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_OFFSET(_bitIdx), \ + NV2080_CTRL_BOARDOBJGRP_MASK_PRIMITIVE); \ + } while (NV_FALSE) + +/*! + * @brief Macro to clear input bit in NV2080_CTRL_BOARDOBJGRP_MASK. + * + * @param[in] _pMask PBOARDOBJGRPMASK of mask. + * @param[in] _bitIdx Index of the target bit within the mask. + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_BIT_CLR(_pMask, _bitIdx) \ + do { \ + (_pMask)->pData[ \ + NV2080_CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_INDEX(_bitIdx)] &= \ + ~NVBIT_TYPE( \ + NV2080_CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_OFFSET(_bitIdx), \ + NV2080_CTRL_BOARDOBJGRP_MASK_PRIMITIVE); \ + } while (NV_FALSE) + +/*! + * @brief Macro to test input bit in NV2080_CTRL_BOARDOBJGRP_MASK. + * + * @param[in] _pMask PBOARDOBJGRPMASK of mask. + * @param[in] _bitIdx Index of the target bit within the mask. + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_BIT_GET(_pMask, _bitIdx) \ + (((_pMask)->pData[NV2080_CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_INDEX(_bitIdx)]\ + & NVBIT_TYPE(NV2080_CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_OFFSET(_bitIdx), \ + NV2080_CTRL_BOARDOBJGRP_MASK_PRIMITIVE)) != 0U) + +/*! + * @brief Not to be called directly. Helper macro allowing simple iteration + * over bits set in a NV2080_CTRL_BOARDOBJGRP_MASK. + * + * @param[in] _maxObjects + * Maximum number of objects/bits in BOARDOJBGRP and its + * NV2080_CTRL_BOARDOBJGRP_MASK. + * @param[in,out] _index + * lvalue that is used as a bit index in the loop (can be declared + * as any NvU* or NvS* variable). + * CRPTODO - I think we need to revisit this. Signed types of + * size <= sizeof(NvBoardObjIdx) can't work. + * @param[in] _pMask + * Pointer to NV2080_CTRL_BOARDOBJGRP_MASK over which to iterate. + * + * @note CRPTODO - Follow-on CL will add ct_assert that _index has + * size >= sizeof(NvBoardObjIdx). + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_FOR_EACH_INDEX(_maxObjects,_index,_pMask) \ +{ \ + for ((_index) = 0; (_index) < (_maxObjects); (_index)++) \ + { \ + if(!NV2080_CTRL_BOARDOBJGRP_MASK_BIT_GET((_pMask), (_index))) \ + { \ + continue; \ + } +#define NV2080_CTRL_BOARDOBJGRP_MASK_FOR_EACH_INDEX_END \ + } \ +} + +/*! + * @brief Macro allowing simple iteration over bits set in a + * NV2080_CTRL_BOARDOBJGRP_MASK_E32. + * + * Wrapper for @ref NV2080_CTRL_BOARDOBJGRP_MASK_FOR_EACH_INDEX(). + * + * @copydetails NV2080_CTRL_BOARDOBJGRP_MASK_FOR_EACH_INDEX() + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_E32_FOR_EACH_INDEX(_index,_pMask) \ + NV2080_CTRL_BOARDOBJGRP_MASK_FOR_EACH_INDEX( \ + NV2080_CTRL_BOARDOBJGRP_E32_MAX_OBJECTS,_index,_pMask) + +/*! + * @brief Macro allowing simple iteration over bits set in a + * NV2080_CTRL_BOARDOBJGRP_MASK_E255. + * + * Wrapper for @ref NV2080_CTRL_BOARDOBJGRP_MASK_FOR_EACH_INDEX(). + * + * @copydetails NV2080_CTRL_BOARDOBJGRP_MASK_FOR_EACH_INDEX() + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_E255_FOR_EACH_INDEX(_index,_pMask) \ + NV2080_CTRL_BOARDOBJGRP_MASK_FOR_EACH_INDEX( \ + NV2080_CTRL_BOARDOBJGRP_E255_MAX_OBJECTS,_index,_pMask) + +/*! + * @brief Macro allowing simple iteration over bits set in a + * NV2080_CTRL_BOARDOBJGRP_MASK_E512. + * + * Wrapper for @ref NV2080_CTRL_BOARDOBJGRP_MASK_FOR_EACH_INDEX(). + * + * @copydetails NV2080_CTRL_BOARDOBJGRP_MASK_FOR_EACH_INDEX() + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_E512_FOR_EACH_INDEX(_index,_pMask) \ + NV2080_CTRL_BOARDOBJGRP_MASK_FOR_EACH_INDEX( \ + NV2080_CTRL_BOARDOBJGRP_E512_MAX_OBJECTS,_index,_pMask) + +/*! + * @brief Macro allowing simple iteration over bits set in a + * NV2080_CTRL_BOARDOBJGRP_MASK_E1024. + * + * Wrapper for @ref NV2080_CTRL_BOARDOBJGRP_MASK_FOR_EACH_INDEX(). + * + * @copydetails NV2080_CTRL_BOARDOBJGRP_MASK_FOR_EACH_INDEX() + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_E1024_FOR_EACH_INDEX(_index,_pMask) \ + NV2080_CTRL_BOARDOBJGRP_MASK_FOR_EACH_INDEX( \ + NV2080_CTRL_BOARDOBJGRP_E1024_MAX_OBJECTS,_index,_pMask) + +/*! + * @brief Macro allowing simple iteration over bits set in a + * NV2080_CTRL_BOARDOBJGRP_MASK_E2048. + * + * Wrapper for @ref NV2080_CTRL_BOARDOBJGRP_MASK_FOR_EACH_INDEX(). + * + * @copydetails NV2080_CTRL_BOARDOBJGRP_MASK_FOR_EACH_INDEX() + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_E2048_FOR_EACH_INDEX(_index,_pMask) \ + NV2080_CTRL_BOARDOBJGRP_MASK_FOR_EACH_INDEX( \ + NV2080_CTRL_BOARDOBJGRP_E2048_MAX_OBJECTS,_index,_pMask) + +/*! + * @brief Macro allowing simple iteration over bits set in a + * NV2080_CTRL_BOARDOBJGRP_MASK_E8192. + * + * Wrapper for @ref NV2080_CTRL_BOARDOBJGRP_MASK_FOR_EACH_INDEX(). + * + * @copydetails NV2080_CTRL_BOARDOBJGRP_MASK_FOR_EACH_INDEX() + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_E8192_FOR_EACH_INDEX(_index,_pMask) \ + NV2080_CTRL_BOARDOBJGRP_MASK_FOR_EACH_INDEX( \ + NV2080_CTRL_BOARDOBJGRP_E8192_MAX_OBJECTS,_index,_pMask) + +/*! + * @brief Not to be called directly. Macro to initialize a + * NV2080_CTRL_BOARDOBJGRP_MASK to an empty mask. + * + * @param[in] _pMask NV2080_CTRL_BOARDOBJGRP_MASK to initialize. + * @param[in] _bitSize NvU8 specifying size of the mask in bits. + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_INIT(_pMask,_bitSize) \ + do { \ + NvU32 *_pData = (_pMask)->pData; \ + NvBoardObjIdx _dataCount = NV2080_CTRL_BOARDOBJGRP_MASK_DATA_SIZE(_bitSize); \ + NvBoardObjIdx _dataIndex; \ + for (_dataIndex = 0; _dataIndex < _dataCount; _dataIndex++) \ + { \ + _pData[_dataIndex] = 0U; \ + } \ + } while (NV_FALSE) + +/*! + * @brief Macro to initialize NV2080_CTRL_BOARDOBJGRP_MASK_E32 to an empty + * mask. + * + * Wrapper for @ref NV2080_CTRL_BOARDOBJGRP_MASK_INIT(). + * + * @copydetails NV2080_CTRL_BOARDOBJGRP_MASK_INIT() + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_E32_INIT(_pMask) \ + NV2080_CTRL_BOARDOBJGRP_MASK_INIT(_pMask, \ + NV2080_CTRL_BOARDOBJGRP_E32_MAX_OBJECTS) + +/*! + * @brief Macro to initialize NV2080_CTRL_BOARDOBJGRP_MASK_E255 to an empty + * mask. + * + * Wrapper for @ref NV2080_CTRL_BOARDOBJGRP_MASK_INIT(). + * + * @copydetails NV2080_CTRL_BOARDOBJGRP_MASK_INIT() + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_E255_INIT(_pMask) \ + NV2080_CTRL_BOARDOBJGRP_MASK_INIT(_pMask, \ + NV2080_CTRL_BOARDOBJGRP_E255_MAX_OBJECTS) + +/*! + * @brief Macro to initialize NV2080_CTRL_BOARDOBJGRP_MASK_E512 to an empty + * mask. + * + * Wrapper for @ref NV2080_CTRL_BOARDOBJGRP_MASK_INIT(). + * + * @copydetails NV2080_CTRL_BOARDOBJGRP_MASK_INIT() + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_E512_INIT(_pMask) \ + NV2080_CTRL_BOARDOBJGRP_MASK_INIT(_pMask, \ + NV2080_CTRL_BOARDOBJGRP_E512_MAX_OBJECTS) + +/*! + * @brief Macro to initialize NV2080_CTRL_BOARDOBJGRP_MASK_E1024 to an empty + * mask. + * + * Wrapper for @ref NV2080_CTRL_BOARDOBJGRP_MASK_INIT(). + * + * @copydetails NV2080_CTRL_BOARDOBJGRP_MASK_INIT() + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_E1024_INIT(_pMask) \ + NV2080_CTRL_BOARDOBJGRP_MASK_INIT(_pMask, \ + NV2080_CTRL_BOARDOBJGRP_E1024_MAX_OBJECTS) + +/*! + * @brief Macro to initialize NV2080_CTRL_BOARDOBJGRP_MASK_E2048 to an empty + * mask. + * + * Wrapper for @ref NV2080_CTRL_BOARDOBJGRP_MASK_INIT(). + * + * @copydetails NV2080_CTRL_BOARDOBJGRP_MASK_INIT() + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_E2048_INIT(_pMask) \ + NV2080_CTRL_BOARDOBJGRP_MASK_INIT(_pMask, \ + NV2080_CTRL_BOARDOBJGRP_E2048_MAX_OBJECTS) + +/*! + * @brief Macro to initialize NV2080_CTRL_BOARDOBJGRP_MASK_E8192 to an empty + * mask. + * + * Wrapper for @ref NV2080_CTRL_BOARDOBJGRP_MASK_INIT(). + * + * @copydetails NV2080_CTRL_BOARDOBJGRP_MASK_INIT() + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_E8192_INIT(_pMask) \ + NV2080_CTRL_BOARDOBJGRP_MASK_INIT(_pMask, \ + NV2080_CTRL_BOARDOBJGRP_E8192_MAX_OBJECTS) + +/*! + * @brief Not to be called directly. Macro to perform a bitwise AND of a + * NV2080_CTRL_BOARDOBJGRP_MASK with another NV2080_CTRL_BOARDOBJGRP_MASK. + * + * @param[out] _pMaskOut NV2080_CTRL_BOARDOBJGRP_MASK with bitwise and output. + * @param[in] _pMask1 NV2080_CTRL_BOARDOBJGRP_MASK to bitwise and. + * @param[in] _pMask2 NV2080_CTRL_BOARDOBJGRP_MASK to bitwise and. + * @param[in] _bitSize NvU8 specifying size of the mask in bits. + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_AND(_pMaskOut, _pMask1, _pMask2, _bitSize) \ + do { \ + NvU32 *_pData1 = (_pMask1)->pData; \ + NvU32 *_pData2 = (_pMask2)->pData; \ + NvU32 *_pDataOut = (_pMaskOut)->pData; \ + NvBoardObjIdx _dataCount = NV2080_CTRL_BOARDOBJGRP_MASK_DATA_SIZE(_bitSize); \ + NvBoardObjIdx _dataIndex; \ + for (_dataIndex = 0; _dataIndex < _dataCount; _dataIndex++) \ + { \ + _pDataOut[_dataIndex] = _pData1[_dataIndex] & _pData2[_dataIndex]; \ + } \ + } while (NV_FALSE) + +/*! + * @brief Macro to bitwise and a NV2080_CTRL_BOARDOBJGRP_MASK_32 with another NV2080_CTRL_BOARDOBJGRP_MASK_32 + * + * Wrapper for @ref NV2080_CTRL_BOARDOBJGRP_MASK_AND(). + * + * @copydetails NV2080_CTRL_BOARDOBJGRP_MASK_AND() + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_E32_AND(_pMaskOut, _pMask1, _pMask2) \ + NV2080_CTRL_BOARDOBJGRP_MASK_AND(_pMaskOut, _pMask1, _pMask2, \ + NV2080_CTRL_BOARDOBJGRP_E32_MAX_OBJECTS) + +/*! + * @brief Macro to bitwise and a NV2080_CTRL_BOARDOBJGRP_MASK_255 with another NV2080_CTRL_BOARDOBJGRP_MASK_255 + * + * Wrapper for @ref NV2080_CTRL_BOARDOBJGRP_MASK_AND(). + * + * @copydetails NV2080_CTRL_BOARDOBJGRP_MASK_AND() + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_E255_AND(_pMaskOut, _pMask1, _pMask2) \ + NV2080_CTRL_BOARDOBJGRP_MASK_AND(_pMaskOut, _pMask1, _pMask2, \ + NV2080_CTRL_BOARDOBJGRP_E255_MAX_OBJECTS) + +/*! + * @brief Macro to bitwise and a NV2080_CTRL_BOARDOBJGRP_MASK_512 with another NV2080_CTRL_BOARDOBJGRP_MASK_512 + * + * Wrapper for @ref NV2080_CTRL_BOARDOBJGRP_MASK_AND(). + * + * @copydetails NV2080_CTRL_BOARDOBJGRP_MASK_AND() + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_E512_AND(_pMaskOut, _pMask1, _pMask2) \ + NV2080_CTRL_BOARDOBJGRP_MASK_AND(_pMaskOut, _pMask1, _pMask2, \ + NV2080_CTRL_BOARDOBJGRP_E512_MAX_OBJECTS) + +/*! + * @brief Macro to bitwise and a NV2080_CTRL_BOARDOBJGRP_MASK_1024 with another NV2080_CTRL_BOARDOBJGRP_MASK_1024 + * + * Wrapper for @ref NV2080_CTRL_BOARDOBJGRP_MASK_AND(). + * + * @copydetails NV2080_CTRL_BOARDOBJGRP_MASK_AND() + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_E1024_AND(_pMaskOut, _pMask1, _pMask2) \ + NV2080_CTRL_BOARDOBJGRP_MASK_AND(_pMaskOut, _pMask1, _pMask2, \ + NV2080_CTRL_BOARDOBJGRP_E1024_MAX_OBJECTS) + +/*! + * @brief Macro to bitwise and a NV2080_CTRL_BOARDOBJGRP_MASK_2048 with another NV2080_CTRL_BOARDOBJGRP_MASK_2048 + * + * Wrapper for @ref NV2080_CTRL_BOARDOBJGRP_MASK_AND(). + * + * @copydetails NV2080_CTRL_BOARDOBJGRP_MASK_AND() + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_E2048_AND(_pMaskOut, _pMask1, _pMask2) \ + NV2080_CTRL_BOARDOBJGRP_MASK_AND(_pMaskOut, _pMask1, _pMask2, \ + NV2080_CTRL_BOARDOBJGRP_E2048_MAX_OBJECTS) + +/*! + * @brief Macro to bitwise and a NV2080_CTRL_BOARDOBJGRP_MASK_8192 with another NV2080_CTRL_BOARDOBJGRP_MASK_8192 + * + * Wrapper for @ref NV2080_CTRL_BOARDOBJGRP_MASK_AND(). + * + * @copydetails NV2080_CTRL_BOARDOBJGRP_MASK_AND() + */ +#define NV2080_CTRL_BOARDOBJGRP_MASK_E8192_AND(_pMaskOut, _pMask1, _pMask2) \ + NV2080_CTRL_BOARDOBJGRP_MASK_AND(_pMaskOut, _pMask1, _pMask2, \ + NV2080_CTRL_BOARDOBJGRP_E8192_MAX_OBJECTS) + +/*! + * @brief Board Object Group Mask base class. + * + * Used to unify access to all NV2080_CTRL_BOARDOBJGRP_MASK_E** child classes. + */ +typedef struct NV2080_CTRL_BOARDOBJGRP_MASK { + /*! + * @brief Start with a single element array which is enough to represent + * NV2080_CTRL_BOARDOBJGRP_MASK_MASK_ELEMENT_BIT_SIZE bits. + * + * @note Must be the last member of this structure. + */ + // FINN PORT: The below field is a bit vector! + // In FINN, bit vectors are arrays of bools and each bool becomes 1 bit when used in an array + // FINN generates an array of NvU32's on the back end for these bit vectors + NvU32 pData[1] /* 32 bits */; +} NV2080_CTRL_BOARDOBJGRP_MASK; +typedef struct NV2080_CTRL_BOARDOBJGRP_MASK *PNV2080_CTRL_BOARDOBJGRP_MASK; + +/*! + * @brief NV2080_CTRL_BOARDOBJGRP_MASK child class capable of storing 32 bits + * indexed between 0..31. + */ +typedef struct NV2080_CTRL_BOARDOBJGRP_MASK_E32 { + /*! + * @brief NV2080_CTRL_BOARDOBJGRP_MASK super-class. Must be the first + * member of the structure. + */ + NV2080_CTRL_BOARDOBJGRP_MASK super; +} NV2080_CTRL_BOARDOBJGRP_MASK_E32; +typedef struct NV2080_CTRL_BOARDOBJGRP_MASK_E32 *PNV2080_CTRL_BOARDOBJGRP_MASK_E32; + +/*! + * @brief NV2080_CTRL_BOARDOBJGRP_MASK child class capable of storing 255 bits + * indexed between 0..254. + */ +typedef struct NV2080_CTRL_BOARDOBJGRP_MASK_E255 { + /*! + * @brief NV2080_CTRL_BOARDOBJGRP_MASK super-class. Must be the first + * member of the structure. + */ + NV2080_CTRL_BOARDOBJGRP_MASK super; + + /*! + * @brief Continuation of the array of + * NV2080_CTRL_BOARDOBJGRP_MASK_PRIMITIVE elements representing the + * bit-mask. + * + * @note Must be the second member of the structure. + */ + + // FINN PORT: The below field is a bit vector! + // In FINN, bit vectors are arrays of bools and each bool becomes 1 bit when used in an array + // FINN generates an array of NvU32's on the back end for these bit vectors + NvU32 pDataE255[7] /* 223 bits */; +} NV2080_CTRL_BOARDOBJGRP_MASK_E255; +typedef struct NV2080_CTRL_BOARDOBJGRP_MASK_E255 *PNV2080_CTRL_BOARDOBJGRP_MASK_E255; + +/*! + * @brief NV2080_CTRL_BOARDOBJGRP_MASK child class capable of storing 512 bits + * indexed between 0..511. + */ +typedef struct NV2080_CTRL_BOARDOBJGRP_MASK_E512 { + /*! + * @brief NV2080_CTRL_BOARDOBJGRP_MASK super-class. Must be the first + * member of the structure. + */ + NV2080_CTRL_BOARDOBJGRP_MASK super; + + /*! + * @brief Continuation of the array of + * NV2080_CTRL_BOARDOBJGRP_MASK_PRIMITIVE elements representing the + * bit-mask. + * + * @note Must be the second member of the structure. + */ + + // FINN PORT: The below field is a bit vector! + // In FINN, bit vectors are arrays of bools and each bool becomes 1 bit when used in an array + // FINN generates an array of NvU32's on the back end for these bit vectors + NvU32 pDataE512[15] /* 480 bits */; +} NV2080_CTRL_BOARDOBJGRP_MASK_E512; +typedef struct NV2080_CTRL_BOARDOBJGRP_MASK_E512 *PNV2080_CTRL_BOARDOBJGRP_MASK_E512; + +/*! + * @brief NV2080_CTRL_BOARDOBJGRP_MASK child class capable of storing 1024 bits + * indexed between 0..1023. + */ +typedef struct NV2080_CTRL_BOARDOBJGRP_MASK_E1024 { + /*! + * @brief NV2080_CTRL_BOARDOBJGRP_MASK super-class. Must be the first + * member of the structure. + */ + NV2080_CTRL_BOARDOBJGRP_MASK super; + + /*! + * @brief Continuation of the array of + * NV2080_CTRL_BOARDOBJGRP_MASK_PRIMITIVE elements representing the + * bit-mask. + * + * @note Must be the second member of the structure. + */ + + // FINN PORT: The below field is a bit vector! + // In FINN, bit vectors are arrays of bools and each bool becomes 1 bit when used in an array + // FINN generates an array of NvU32's on the back end for these bit vectors + NvU32 pDataE1024[31] /* 992 bits */; +} NV2080_CTRL_BOARDOBJGRP_MASK_E1024; +typedef struct NV2080_CTRL_BOARDOBJGRP_MASK_E1024 *PNV2080_CTRL_BOARDOBJGRP_MASK_E1024; + +/*! + * @brief NV2080_CTRL_BOARDOBJGRP_MASK child class capable of storing 2048 bits + * indexed between 0..2047. + */ +typedef struct NV2080_CTRL_BOARDOBJGRP_MASK_E2048 { + /*! + * @brief NV2080_CTRL_BOARDOBJGRP_MASK super-class. Must be the first + * member of the structure. + */ + NV2080_CTRL_BOARDOBJGRP_MASK super; + + /*! + * @brief Continuation of the array of + * NV2080_CTRL_BOARDOBJGRP_MASK_PRIMITIVE elements representing the + * bit-mask. + * + * @note Must be the second member of the structure. + */ + + // FINN PORT: The below field is a bit vector! + // In FINN, bit vectors are arrays of bools and each bool becomes 1 bit when used in an array + // FINN generates an array of NvU32's on the back end for these bit vectors + NvU32 pDataE2048[63] /* 2016 bits */; +} NV2080_CTRL_BOARDOBJGRP_MASK_E2048; +typedef struct NV2080_CTRL_BOARDOBJGRP_MASK_E2048 *PNV2080_CTRL_BOARDOBJGRP_MASK_E2048; + +/*! + * @brief NV2080_CTRL_BOARDOBJGRP_MASK child class capable of storing 8192 bits + * indexed between 0..8191. + */ +typedef struct NV2080_CTRL_BOARDOBJGRP_MASK_E8192 { + /*! + * @brief NV2080_CTRL_BOARDOBJGRP_MASK super-class. Must be the first + * member of the structure. + */ + NV2080_CTRL_BOARDOBJGRP_MASK super; + + /*! + * @brief Continuation of the array of + * NV2080_CTRL_BOARDOBJGRP_MASK_PRIMITIVE elements representing the + * bit-mask. + * + * @note Must be the second member of the structure. + */ + + // FINN PORT: The below field is a bit vector! + // In FINN, bit vectors are arrays of bools and each bool becomes 1 bit when used in an array + // FINN generates an array of NvU32's on the back end for these bit vectors + NvU32 pDataE8192[255] /* 8160 bits */; +} NV2080_CTRL_BOARDOBJGRP_MASK_E8192; +typedef struct NV2080_CTRL_BOARDOBJGRP_MASK_E8192 *PNV2080_CTRL_BOARDOBJGRP_MASK_E8192; + +/*! + * @brief Macro to provide the BOARDOBJ type for a given (UNIT, CLASS, TYPE) + * combination. + * + * @details For arguments (FOO, BAR, BAZ), this macro will return + * NV2080_CTRL_FOO_BAR_TYPE_BAZ + * + * @param[in] _unit The unit. + * @param[in] _class The class. + * @param[in] _type The type. + * + * @return BOARDOBJ object type identifier. + */ +#define NV2080_CTRL_BOARDOBJ_TYPE(_unit, _class, _type) \ + NV2080_CTRL_##_unit##_##_class##_TYPE_##_type + + + +/*! + * @brief Base structure which describes a BOARDOBJ CLIENT_INFO in RMCTRL. + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_INFO { + /*! + * @brief BOARDOBJ type. + * + * This should be a unique value within the class that the BOARDOBJ belongs. + */ + NvU8 type; +} NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_INFO; + +/*! + * @brief Base structure which describes a BOARDOBJ CLIENT_STATUS in RMCTRL. + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_STATUS { + /*! + * @brief BOARDOBJ type. + * + * This should be a unique value within the class that the BOARDOBJ belongs. + */ + NvU8 type; +} NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_STATUS; + +/*! + * @brief Base structure which describes a BOARDOBJ CLIENT_CONTROL in RMCTRL. + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_CONTROL { + /*! + * @brief BOARDOBJ type. + * + * This should be a unique value within the class that the BOARDOBJ belongs. + */ + NvU8 type; +} NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_CONTROL; + +/*! + * @brief Base structure which describes a BOARDOBJ CLIENT_READINGS in RMCTRL. + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_READINGS { + /*! + * @brief BOARDOBJ type. + * + * This should be a unique value within the class that the BOARDOBJ belongs. + */ + NvU8 type; +} NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_READINGS; + +/*! + * @brief Base structure which describes a CLIENT_INFO BOARDOBJ_INTERFACE in RMCTRL. + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_INFO_INTERFACE { + /*! + * @brief Reserved for future use cases. + */ + NvU8 rsvd; +} NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_INFO_INTERFACE; + +/*! + * @brief Base structure which describes a CLIENT_STATUS BOARDOBJ_INTERFACE in RMCTRL. + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_STATUS_INTERFACE { + /*! + * @brief Reserved for future use cases. + */ + NvU8 rsvd; +} NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_STATUS_INTERFACE; + +/*! + * @brief Base structure which describes a CLIENT_CONTROL BOARDOBJ_INTERFACE in RMCTRL. + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_CONTROL_INTERFACE { + /*! + * @brief Reserved for future use cases. + */ + NvU8 rsvd; +} NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_CONTROL_INTERFACE; + +/*! + * @brief Base structure which describes a CLIENT_READINGS BOARDOBJ_INTERFACE in RMCTRL. + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_READINGS_INTERFACE { + /*! + * @brief Reserved for future use cases. + */ + NvU8 rsvd; +} NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_READINGS_INTERFACE; + +/*! + * @brief Structure representing CLIENT_INFO for a BOARDOBJ_IFACE_MODEL interface + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_INFO_IFACE_MODEL { + /*! + * Super class CLIENT_INFO + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_INFO_INTERFACE super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_INFO_IFACE_MODEL; + +/*! + * @brief Structure representing CLIENT_INFO for a BOARDOBJ_IFACE_MODEL_20 + * interface + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_INFO_IFACE_MODEL_20 { + /*! + * Super class CLIENT_INFO + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_INFO_IFACE_MODEL super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_INFO_IFACE_MODEL_20; + +/*! + * @brief Structure representing CLIENT_INFO for a + * BOARDOBJ_IFACE_MODEL_PMU_INIT_1X interface + * + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_INFO_IFACE_MODEL_PMU_INIT_1X { + /*! + * Super class CLIENT_INFO + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_INFO_IFACE_MODEL_20 super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_INFO_IFACE_MODEL_PMU_INIT_1X; + +/*! + * @brief Structure representing CLIENT_STATUS for a BOARDOBJ_IFACE_MODEL interface + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_STATUS_IFACE_MODEL { + /*! + * Super class CLIENT_STATUS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_STATUS_INTERFACE super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_STATUS_IFACE_MODEL; + +/*! + * @brief Structure representing CLIENT_STATUS for a BOARDOBJ_IFACE_MODEL_20 + * interface + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_STATUS_IFACE_MODEL_20 { + /*! + * Super class CLIENT_STATUS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_STATUS_IFACE_MODEL super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_STATUS_IFACE_MODEL_20; + +/*! + * @brief Structure representing CLIENT_STATUS for a + * BOARDOBJ_IFACE_MODEL_PMU_INIT_1X interface + * + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_STATUS_IFACE_MODEL_PMU_INIT_1X { + /*! + * Super class CLIENT_STATUS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_STATUS_IFACE_MODEL_20 super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_STATUS_IFACE_MODEL_PMU_INIT_1X; + +/*! + * @brief Structure representing CLIENT_CONTROL for a BOARDOBJ_IFACE_MODEL interface + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_CONTROL_IFACE_MODEL { + /*! + * Super class CLIENT_CONTROL + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_CONTROL_INTERFACE super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_CONTROL_IFACE_MODEL; + +/*! + * @brief Structure representing CLIENT_CONTROL for a BOARDOBJ_IFACE_MODEL_20 + * interface + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_CONTROL_IFACE_MODEL_20 { + /*! + * Super class CLIENT_CONTROL + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_CONTROL_IFACE_MODEL super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_CONTROL_IFACE_MODEL_20; + +/*! + * @brief Structure representing CLIENT_CONTROL for a + * BOARDOBJ_IFACE_MODEL_PMU_INIT_1X interface + * + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_CONTROL_IFACE_MODEL_PMU_INIT_1X { + /*! + * Super class CLIENT_CONTROL + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_CONTROL_IFACE_MODEL_20 super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_CONTROL_IFACE_MODEL_PMU_INIT_1X; + +/*! + * @brief Structure representing CLIENT_READINGS for a BOARDOBJ_IFACE_MODEL interface + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_READINGS_IFACE_MODEL { + /*! + * Super class CLIENT_READINGS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_READINGS_INTERFACE super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_READINGS_IFACE_MODEL; + +/*! + * @brief Structure representing CLIENT_READINGS for a BOARDOBJ_IFACE_MODEL_20 + * interface + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_READINGS_IFACE_MODEL_20 { + /*! + * Super class CLIENT_READINGS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_READINGS_IFACE_MODEL super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_READINGS_IFACE_MODEL_20; + +/*! + * @brief Structure representing CLIENT_READINGS for a + * BOARDOBJ_IFACE_MODEL_PMU_INIT_1X interface + * + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_READINGS_IFACE_MODEL_PMU_INIT_1X { + /*! + * Super class CLIENT_READINGS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_READINGS_IFACE_MODEL_20 super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJ_CLIENT_READINGS_IFACE_MODEL_PMU_INIT_1X; + +/*! + * CLIENT_INFO data for @ref BOARDOBJGRP base class + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO { + /*! + * @brief Concrete class type of this BOARDOBJGRP + */ + NvU8 classType; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO; + +/*! + * CLIENT_INFO SUPER_ALIAS version for @ref BOARDOBJGRP base class + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_SUPER_ALIAS { + /*! + * Base class CLIENT_INFO + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO super; + + NV2080_CTRL_BOARDOBJGRP_MASK objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_SUPER_ALIAS; + +/*! + * CLIENT_STATUS data for @ref BOARDOBJGRP base class + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS { + /*! + * @brief Concrete class type of this BOARDOBJGRP + */ + NvU8 classType; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS; + +/*! + * CLIENT_STATUS SUPER_ALIAS version for @ref BOARDOBJGRP base class + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_SUPER_ALIAS { + /*! + * Base class CLIENT_STATUS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS super; + + NV2080_CTRL_BOARDOBJGRP_MASK objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_SUPER_ALIAS; + +/*! + * CLIENT_CONTROL data for @ref BOARDOBJGRP base class + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL { + /*! + * @brief Concrete class type of this BOARDOBJGRP + */ + NvU8 classType; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL; + +/*! + * CLIENT_CONTROL SUPER_ALIAS version for @ref BOARDOBJGRP base class + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_SUPER_ALIAS { + /*! + * Base class CLIENT_CONTROL + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL super; + + NV2080_CTRL_BOARDOBJGRP_MASK objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_SUPER_ALIAS; + +/*! + * CLIENT_READINGS data for @ref BOARDOBJGRP base class + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS { + /*! + * @brief Concrete class type of this BOARDOBJGRP + */ + NvU8 classType; + + /*! + * Whether the CLIENT_READINGS have been initialized. + * This should be set to false on any initial call into the API + */ + NvBool bInitialized; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS; + +/*! + * CLIENT_READINGS SUPER_ALIAS version for @ref BOARDOBJGRP base class + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_SUPER_ALIAS { + /*! + * Base class CLIENT_READINGS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS super; + + NV2080_CTRL_BOARDOBJGRP_MASK objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_SUPER_ALIAS; + +/*! + * CLIENT_INFO for @ref BOARDOBJGRP_E32 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_E32 { + /*! + * Base class CLIENT_INFO + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E32 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_E32; + +/*! + * CLIENT_STATUS for @ref BOARDOBJGRP_E32 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_E32 { + /*! + * Base class CLIENT_STATUS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E32 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_E32; + +/*! + * CLIENT_CONTROL for @ref BOARDOBJGRP_E32 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_E32 { + /*! + * Base class CLIENT_CONTROL + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E32 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_E32; + +/*! + * CLIENT_READINGS for @ref BOARDOBJGRP_E32 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_E32 { + /*! + * Base class CLIENT_READINGS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E32 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_E32; + +/*! + * CLIENT_INFO for @ref BOARDOBJGRP_E255 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_E255 { + /*! + * Base class CLIENT_INFO + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E255 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_E255; + +/*! + * CLIENT_STATUS for @ref BOARDOBJGRP_E255 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_E255 { + /*! + * Base class CLIENT_STATUS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E255 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_E255; + +/*! + * CLIENT_CONTROL for @ref BOARDOBJGRP_E255 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_E255 { + /*! + * Base class CLIENT_CONTROL + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E255 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_E255; + +/*! + * CLIENT_READINGS for @ref BOARDOBJGRP_E255 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_E255 { + /*! + * Base class CLIENT_READINGS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E255 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_E255; + +/*! + * CLIENT_INFO for @ref BOARDOBJGRP_E512 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_E512 { + /*! + * Base class CLIENT_INFO + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E512 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_E512; + +/*! + * CLIENT_STATUS for @ref BOARDOBJGRP_E512 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_E512 { + /*! + * Base class CLIENT_STATUS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E512 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_E512; + +/*! + * CLIENT_CONTROL for @ref BOARDOBJGRP_E512 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_E512 { + /*! + * Base class CLIENT_CONTROL + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E512 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_E512; + +/*! + * CLIENT_READINGS for @ref BOARDOBJGRP_E512 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_E512 { + /*! + * Base class CLIENT_READINGS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E512 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_E512; + +/*! + * CLIENT_INFO for @ref BOARDOBJGRP_E1024 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_E1024 { + /*! + * Base class CLIENT_INFO + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E1024 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_E1024; + +/*! + * CLIENT_STATUS for @ref BOARDOBJGRP_E1024 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_E1024 { + /*! + * Base class CLIENT_STATUS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E1024 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_E1024; + +/*! + * CLIENT_CONTROL for @ref BOARDOBJGRP_E1024 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_E1024 { + /*! + * Base class CLIENT_CONTROL + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E1024 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_E1024; + +/*! + * CLIENT_READINGS for @ref BOARDOBJGRP_E1024 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_E1024 { + /*! + * Base class CLIENT_READINGS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E1024 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_E1024; + +/*! + * CLIENT_INFO for @ref BOARDOBJGRP_E2048 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_E2048 { + /*! + * Base class CLIENT_INFO + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E2048 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_E2048; + +/*! + * CLIENT_STATUS for @ref BOARDOBJGRP_E2048 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_E2048 { + /*! + * Base class CLIENT_STATUS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E2048 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_E2048; + +/*! + * CLIENT_CONTROL for @ref BOARDOBJGRP_E2048 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_E2048 { + /*! + * Base class CLIENT_CONTROL + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E2048 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_E2048; + +/*! + * CLIENT_READINGS for @ref BOARDOBJGRP_E2048 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_E2048 { + /*! + * Base class CLIENT_READINGS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E2048 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_E2048; + + +/*! + * CLIENT_INFO for @ref BOARDOBJGRP_E8192 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_E8192 { + /*! + * Base class CLIENT_INFO + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E8192 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_E8192; + +/*! + * CLIENT_STATUS for @ref BOARDOBJGRP_E8192 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_E8192 { + /*! + * Base class CLIENT_STATUS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E8192 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_E8192; + +/*! + * CLIENT_CONTROL for @ref BOARDOBJGRP_E8192 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_E8192 { + /*! + * Base class CLIENT_CONTROL + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E8192 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_E8192; + +/*! + * CLIENT_READINGS for @ref BOARDOBJGRP_E8192 + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_E8192 { + /*! + * Base class CLIENT_READINGS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS super; + + /*! + * Mask of valid @ref BOARDOBJ objects within the payload + */ + NV2080_CTRL_BOARDOBJGRP_MASK_E8192 objMask; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_E8192; + + +/*! + * @brief Structure representing CLIENT_INFO for a BOARDOBJGRP_INTERFACE in RMCTRL. + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_INTERFACE { + /*! + * @brief Reserved for future use cases. + */ + NvU8 rsvd; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_INTERFACE; + +/*! + * @brief Structure representing CLIENT_INFO for a BOARDOBJGRP_IFACE_MODEL interface + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_IFACE_MODEL { + /*! + * Super class CLIENT_INFO + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_INTERFACE super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_IFACE_MODEL; + +/*! + * @brief Structure representing CLIENT_INFO for a BOARDOBJGRP_IFACE_MODEL_20 + * interface + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_IFACE_MODEL_20 { + /*! + * Super class CLIENT_INFO + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_IFACE_MODEL super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_IFACE_MODEL_20; + +/*! + * @brief Structure representing CLIENT_INFO for a + * BOARDOBJGRP_IFACE_MODEL_PMU_INIT_1X interface + * + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_IFACE_MODEL_PMU_INIT_1X { + /*! + * Super class CLIENT_INFO + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_IFACE_MODEL_20 super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_INFO_IFACE_MODEL_PMU_INIT_1X; + +/*! + * @brief Structure representing CLIENT_STATUS for a BOARDOBJGRP_INTERFACE in RMCTRL. + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_INTERFACE { + /*! + * @brief Reserved for future use cases. + */ + NvU8 rsvd; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_INTERFACE; + +/*! + * @brief Structure representing CLIENT_STATUS for a BOARDOBJGRP_IFACE_MODEL interface + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_IFACE_MODEL { + /*! + * Super class CLIENT_STATUS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_INTERFACE super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_IFACE_MODEL; + +/*! + * @brief Structure representing CLIENT_STATUS for a BOARDOBJGRP_IFACE_MODEL_20 + * interface + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_IFACE_MODEL_20 { + /*! + * Super class CLIENT_STATUS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_IFACE_MODEL super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_IFACE_MODEL_20; + +/*! + * @brief Structure representing CLIENT_STATUS for a + * BOARDOBJGRP_IFACE_MODEL_PMU_INIT_1X interface + * + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_IFACE_MODEL_PMU_INIT_1X { + /*! + * Super class CLIENT_STATUS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_IFACE_MODEL_20 super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_STATUS_IFACE_MODEL_PMU_INIT_1X; + +/*! + * @brief Structure representing CLIENT_CONTROL for a BOARDOBJGRP_INTERFACE in RMCTRL. + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_INTERFACE { + /*! + * @brief Reserved for future use cases. + */ + NvU8 rsvd; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_INTERFACE; + +/*! + * @brief Structure representing CLIENT_CONTROL for a BOARDOBJGRP_IFACE_MODEL interface + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_IFACE_MODEL { + /*! + * Super class CLIENT_CONTROL + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_INTERFACE super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_IFACE_MODEL; + +/*! + * @brief Structure representing CLIENT_CONTROL for a BOARDOBJGRP_IFACE_MODEL_20 + * interface + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_IFACE_MODEL_20 { + /*! + * Super class CLIENT_CONTROL + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_IFACE_MODEL super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_IFACE_MODEL_20; + +/*! + * @brief Structure representing CLIENT_CONTROL for a + * BOARDOBJGRP_IFACE_MODEL_PMU_INIT_1X interface + * + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_IFACE_MODEL_PMU_INIT_1X { + /*! + * Super class CLIENT_CONTROL + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_IFACE_MODEL_20 super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_CONTROL_IFACE_MODEL_PMU_INIT_1X; + +/*! + * @brief Structure representing CLIENT_READINGS for a BOARDOBJGRP_INTERFACE in RMCTRL. + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_INTERFACE { + /*! + * @brief Reserved for future use cases. + */ + NvU8 rsvd; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_INTERFACE; + +/*! + * @brief Structure representing CLIENT_READINGS for a BOARDOBJGRP_IFACE_MODEL interface + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_IFACE_MODEL { + /*! + * Super class CLIENT_READINGS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_INTERFACE super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_IFACE_MODEL; + +/*! + * @brief Structure representing CLIENT_READINGS for a BOARDOBJGRP_IFACE_MODEL_20 + * interface + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_IFACE_MODEL_20 { + /*! + * Super class CLIENT_READINGS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_IFACE_MODEL super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_IFACE_MODEL_20; + +/*! + * @brief Structure representing CLIENT_READINGS for a + * BOARDOBJGRP_IFACE_MODEL_PMU_INIT_1X interface + * + */ +typedef struct NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_IFACE_MODEL_PMU_INIT_1X { + /*! + * Super class CLIENT_READINGS + */ + NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_IFACE_MODEL_20 super; +} NV2080_CTRL_BOARDOBJ_BOARDOBJGRP_CLIENT_READINGS_IFACE_MODEL_PMU_INIT_1X; diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h new file mode 100644 index 0000000..5a11107 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080boardobjgrpclasses.finn +// + + + +#include "nvtypes.h" + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h new file mode 100644 index 0000000..bda2fd9 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h @@ -0,0 +1,1700 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080bus.finn +// + +#include "nvcfg_sdk.h" +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX bus control commands and parameters */ + +/** + * NV2080_CTRL_CMD_BUS_GET_PCI_INFO + * + * This command returns PCI bus identifier information for the specified GPU. + * + * pciDeviceId + * This parameter specifies the internal PCI device and vendor + * identifiers for the GPU. + * pciSubSystemId + * This parameter specifies the internal PCI subsystem identifier for + * the GPU. + * pciRevisionId + * This parameter specifies the internal PCI device-specific revision + * identifier for the GPU. + * pciExtDeviceId + * This parameter specifies the external PCI device identifier for + * the GPU. It contains only the 16-bit device identifier. This + * value is identical to the device identifier portion of + * pciDeviceId since non-transparent bridges are no longer supported. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_BUS_GET_PCI_INFO (0x20801801) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_PCI_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_GET_PCI_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_BUS_GET_PCI_INFO_PARAMS { + NvU32 pciDeviceId; + NvU32 pciSubSystemId; + NvU32 pciRevisionId; + NvU32 pciExtDeviceId; +} NV2080_CTRL_BUS_GET_PCI_INFO_PARAMS; + +/* + * NV2080_CTRL_BUS_INFO + * + * This structure represents a single 32bit bus engine value. Clients + * request a particular bus engine value by specifying a unique bus + * information index. + * + * Legal bus information index values are: + * NV2080_CTRL_BUS_INFO_INDEX_TYPE + * This index is used to request the bus type of the GPU. + * Legal return values for this index are: + * NV2080_CTRL_BUS_INFO_TYPE_PCI + * NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS + * NV2080_CTRL_BUS_INFO_TYPE_FPCI + * NV2080_CTRL_BUS_INFO_INDEX_INTLINE + * This index is used to request the interrupt line (or irq) assignment + * for the GPU. The return value is system-dependent. + * NV2080_CTRL_BUS_INFO_INDEX_CAPS + * This index is used to request the bus engine capabilities for the GPU. + * The return value is specified as a mask of capabilities. + * Legal return values for this index are: + * NV2080_CTRL_BUS_INFO_CAPS_NEED_IO_FLUSH + * NV2080_CTRL_BUS_INFO_CAPS_CHIP_INTEGRATED + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CAPS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_CAPS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_DOWNSTREAM_LINK_CAPS + * These indices are used to request PCI Express link-specific + * capabilities values. A value of zero is returned for non-PCIE GPUs. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CTRL_STATUS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_CTRL_STATUS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_DOWNSTREAM_LINK_CTRL_STATUS + * These indices are used to request PCI Express link-specific + * control status values. A value of zero is returned for non-PCIE GPUs. + * NV2080_CTRL_BUS_INFO_INDEX_COHERENT_DMA_FLAGS + * This index is used to request coherent dma transfer flags. + * Valid coherent dma transfer flags include: + * NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_CTXDMA + * NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_GPUGART + * NV2080_CTRL_BUS_INFO_INDEX_NONCOHERENT_DMA_FLAGS + * This index is used to request noncoherent dma transfer flags. + * Valid noncoherent dma transfer flags include: + * NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_CTXDMA + * NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_GPUGART + * NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_COH_MODE + * NV2080_CTRL_BUS_INFO_INDEX_GPU_GART_SIZE + * This index is used to request the size of the GPU GART in MBytes. + * NV2080_CTRL_BUS_INFO_INDEX_GPU_GART_FLAGS + * This index is used to request GPU GART flags. + * Valid gart flags include: + * NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_REQFLUSH + * This flag indicates that GPU GART clients need to do an explicit + * flush via an appropriate SetContextDma method. + * NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_UNIFIED + * This flag indicates that the GART address range includes both + * system and video memory. + * NV2080_CTRL_BUS_INFO_INDEX_BUS_NUMBER + * This index is used to request the PCI-based bus number of the GPU. + * Support for this index is platform-dependent. + * NV2080_CTRL_BUS_INFO_INDEX_DEVICE_NUMBER + * This index is used to request the PCI-based device number of the GPU. + * Support for this index is platform-dependent. + * NV2080_CTRL_BUS_INFO_INDEX_DOMAIN_NUMBER + * This index is used to request the PCI-based domain number of the GPU. + * Support for this index is platform-dependent. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_ERRORS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_ERRORS + * These indices are used to request PCI Express error status. + * The current status is cleared as part of these requests. + * Valid PCI Express error status values include: + * NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_CORR_ERROR + * NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_NON_FATAL_ERROR + * NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_FATAL_ERROR + * NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_UNSUPP_REQUEST + * NV2080_CTRL_BUS_INFO_INDEX_INTERFACE_TYPE + * This index is used to request the bus interface type of the GPU. + * Legal return values for this index are: + * NV2080_CTRL_BUS_INFO_TYPE_PCI + * NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS + * NV2080_CTRL_BUS_INFO_TYPE_FPCI + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GEN2_INFO // DEPRECATED + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GEN_INFO // REPLACES "GEN2" variant + * This index is used to retrieve PCI Express Gen configuration support + * This index is used to retrieve PCI Express Gen2 configuration support + * for the GPU. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN1 + * The GPU is PCI Express Gen1 capable. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN2 + * The GPU is PCI Express Gen2 capable. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN3 + * The GPU is PCI Express Gen3 capable. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN4 + * The GPU is PCI Express Gen4 capable. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN5 + * The GPU is PCI Express Gen5 capable. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN1 + * The GPU is configured in PCI Express Gen1 mode. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN2 + * The GPU is configured in PCI Express Gen2 mode. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN3 + * The GPU is configured in PCI Express Gen3 mode. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN4 + * The GPU is configured in PCI Express Gen4 mode. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN5 + * The GPU is configured in PCI Express Gen5 mode. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_AER + * This index retrieves PCI Express Advanced Error Reporting (AER) errors + * for the GPU. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_BOARD_LINK_CAPS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_UPSTREAM_LINK_CAPS + * This index retrieves the PCI Express link capabilities for the + * board. For example, a Quadro FX4700X2 has two GPUs and PCIe + * switch. With this board, this index returns the link + * capabilities of the PCIe switch. In a single GPU board, this + * index returns the link capabilities of the GPU. A value of + * zero is returned for non-PCIE GPUs. + * UPSTREAM_LINK_CAPS is kept for backwards compatibility. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_BOARD_LINK_CTRL_STATUS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_UPSTREAM_LINK_CTRL_STATUS + * This index retrieves the PCI Express link status for the board. + * For example, a Quadro FX4700X2 has two GPUs and PCIe switch. + * With this board, this index returns the link capabilities of + * the PCIe switch. In a single GPU board, this index returns the + * link status of the GPU. A value of zero is returned for + * non-PCIE GPUs. + * UPSTREAM_LINK_CTRL_STATUS is kept for backwards compatibility. + * NV2080_CTRL_BUS_INFO_INDEX_ASLM_STATUS + * This index is used to request the PCI Express ASLM settings. + * This index is only valid when NV2080_CTRL_BUS_INFO_TYPE indicates PCIE. + * A value of zero is returned for non-PCI Express bus type. + * _ASLM_STATUS_PCIE is always _PRESENT if PCI Express bus type. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_LINK_WIDTH_SWITCH_ERROR_COUNT + * This index is used to get the ASLM switching error count. + * A value of zero will be returned if no errors occurs while + * ASLM switching + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GEN2_SWITCH_ERROR_COUNT + * This index is used to get the Gen1<-->Gen2 switching error count + * A value of zero will be returned in case speed change from Gen1 to + * Gen2 is clean or if chipset is not gen2 capable or if gen1<-->gen2 + * switching is disabled. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_CYA_ASPM + * This index is used to get the ASPM CYA L0s\L1 enable\disable status. + * Legal return value is specified as a mask of valid and data field + * possible return values are: + * NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_VALID_NO + * NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_VALID_YES + * NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_DISABLED + * NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_L0S + * NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_L1 + * NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_L0S_L1 + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_LINECODE_ERRORS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CRC_ERRORS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NAKS_RECEIVED + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FAILED_L0S_EXITS + * These indices are used to request detailed PCI Express error counters. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_LINECODE_ERRORS_CLEAR + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CRC_ERRORS_CLEAR + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NAKS_RECEIVED_CLEAR + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FAILED_L0S_EXITS_CLEAR + * These indices are used to clear detailed PCI Express error counters. + * NV2080_CTRL_BUS_INFO_INDEX_GPU_INTERFACE_TYPE + * This index is used to request the internal interface type of the GPU. + * Legal return values for this index are: + * NV2080_CTRL_BUS_INFO_TYPE_PCI + * NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS + * NV2080_CTRL_BUS_INFO_TYPE_FPCI + * NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE + * This index queries the type of sysmem connection to CPU + * NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_PCIE + * NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_NVLINK + * NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_C2C + * + */ + +typedef NVXXXX_CTRL_XXX_INFO NV2080_CTRL_BUS_INFO; + +/* valid bus info index values */ + +/** + * This index is used to request the bus type of the GPU. + * Legal return values for this index are: + * NV2080_CTRL_BUS_INFO_TYPE_PCI + * NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS + * NV2080_CTRL_BUS_INFO_TYPE_FPCI + */ +#define NV2080_CTRL_BUS_INFO_INDEX_TYPE (0x00000000) +#define NV2080_CTRL_BUS_INFO_INDEX_INTLINE (0x00000001) +#define NV2080_CTRL_BUS_INFO_INDEX_CAPS (0x00000002) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CAPS (0x00000003) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_CAPS (0x00000004) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_UPSTREAM_LINK_CAPS (0x00000005) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_DOWNSTREAM_LINK_CAPS (0x00000006) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CTRL_STATUS (0x00000007) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_CTRL_STATUS (0x00000008) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_UPSTREAM_LINK_CTRL_STATUS (0x00000009) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_DOWNSTREAM_LINK_CTRL_STATUS (0x0000000A) +/** + * This index is used to request coherent dma transfer flags. + * Valid coherent dma transfer flags include: + * NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_CTXDMA + * NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_GPUGART + */ +#define NV2080_CTRL_BUS_INFO_INDEX_COHERENT_DMA_FLAGS (0x0000000B) +/** + * This index is used to request noncoherent dma transfer flags. + * Valid noncoherent dma transfer flags include: + * NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_CTXDMA + * NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_GPUGART + * NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_COH_MODE + */ +#define NV2080_CTRL_BUS_INFO_INDEX_NONCOHERENT_DMA_FLAGS (0x0000000C) +/** + * This index is used to request the size of the GPU GART in MBytes. + */ +#define NV2080_CTRL_BUS_INFO_INDEX_GPU_GART_SIZE (0x0000000D) +/** + * This index is used to request GPU GART flags. + * Valid gart flags include: + * NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_REQFLUSH + * This flag indicates that GPU GART clients need to do an explicit + * flush via an appropriate SetContextDma method. + * NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_UNIFIED + * This flag indicates that the GART address range includes both + * system and video memory. + */ +#define NV2080_CTRL_BUS_INFO_INDEX_GPU_GART_FLAGS (0x0000000E) +#define NV2080_CTRL_BUS_INFO_INDEX_BUS_NUMBER (0x0000000F) +#define NV2080_CTRL_BUS_INFO_INDEX_DEVICE_NUMBER (0x00000010) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_ERRORS (0x00000011) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_ERRORS (0x00000012) +#define NV2080_CTRL_BUS_INFO_INDEX_INTERFACE_TYPE (0x00000013) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GEN2_INFO (0x00000014) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_AER (0x00000015) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_BOARD_LINK_CAPS (0x00000016) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_BOARD_LINK_CTRL_STATUS (0x00000017) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_ASLM_STATUS (0x00000018) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_LINK_WIDTH_SWITCH_ERROR_COUNT (0x00000019) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_LINK_SPEED_SWITCH_ERROR_COUNT (0x0000001A) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_CYA_ASPM (0x0000001B) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_LINECODE_ERRORS (0x0000001C) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CRC_ERRORS (0x0000001D) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NAKS_RECEIVED (0x0000001E) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FAILED_L0S_EXITS (0x0000001F) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_LINECODE_ERRORS_CLEAR (0x00000020) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CRC_ERRORS_CLEAR (0x00000021) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NAKS_RECEIVED_CLEAR (0x00000022) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FAILED_L0S_EXITS_CLEAR (0x00000023) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CORRECTABLE_ERRORS (0x00000024) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NONFATAL_ERRORS (0x00000025) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FATAL_ERRORS (0x00000026) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_UNSUPPORTED_REQUESTS (0x00000027) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CORRECTABLE_ERRORS_CLEAR (0x00000028) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NONFATAL_ERRORS_CLEAR (0x00000029) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FATAL_ERRORS_CLEAR (0x0000002A) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_UNSUPPORTED_REQUESTS_CLEAR (0x0000002B) +#define NV2080_CTRL_BUS_INFO_INDEX_DOMAIN_NUMBER (0x0000002C) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GEN_INFO (0x0000002D) +#define NV2080_CTRL_BUS_INFO_INDEX_GPU_INTERFACE_TYPE (0x0000002E) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_UPSTREAM_GEN_INFO (0x0000002F) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_BOARD_GEN_INFO (0x00000030) +#define NV2080_CTRL_BUS_INFO_INDEX_MSI_INFO (0x00000031) +/** + * This index is used to request the top 32 bits of the size of the GPU + * GART in MBytes. + */ +#define NV2080_CTRL_BUS_INFO_INDEX_GPU_GART_SIZE_HI (0x00000032) +#define NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE (0x00000033) +#define NV2080_CTRL_BUS_INFO_INDEX_MAX NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE +#define NV2080_CTRL_BUS_INFO_MAX_LIST_SIZE (0x00000034) + +/* valid bus info type return values */ +#define NV2080_CTRL_BUS_INFO_TYPE_PCI (0x00000001) +#define NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS (0x00000003) +#define NV2080_CTRL_BUS_INFO_TYPE_FPCI (0x00000004) +#define NV2080_CTRL_BUS_INFO_TYPE_AXI (0x00000008) + +/* valid bus capability flags */ +#define NV2080_CTRL_BUS_INFO_CAPS_NEED_IO_FLUSH (0x00000001) +#define NV2080_CTRL_BUS_INFO_CAPS_CHIP_INTEGRATED (0x00000002) + +/* + * Format of PCIE link caps return values + * Note that Link Capabilities register format is followed only for bits 11:0 + */ +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED 3:0 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_2500MBPS (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_5000MBPS (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_8000MBPS (0x00000003) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_16000MBPS (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_32000MBPS (0x00000005) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_64000MBPS (0x00000006) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_WIDTH 9:4 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_ASPM 11:10 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_ASPM_NONE (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_ASPM_L0S (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_ASPM_L0S_L1 (0x00000003) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN 15:12 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN1 (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN2 (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN3 (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN4 (0x00000003) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN5 (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN6 (0x00000005) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL 19:16 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN1 (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN2 (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN3 (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN4 (0x00000003) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN5 (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN6 (0x00000005) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GPU_GEN 23:20 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GPU_GEN_GEN1 (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GPU_GEN_GEN2 (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GPU_GEN_GEN3 (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GPU_GEN_GEN4 (0x00000003) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GPU_GEN_GEN5 (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GPU_GEN_GEN6 (0x00000005) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_SPEED_CHANGES 24:24 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_SPEED_CHANGES_ENABLED (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_SPEED_CHANGES_DISABLED (0x00000001) + +/* format of PCIE control status return values */ +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_ASPM 1:0 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_ASPM_DISABLED (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_ASPM_L0S (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_ASPM_L1 (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_ASPM_L0S_L1 (0x00000003) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED 19:16 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED_2500MBPS (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED_5000MBPS (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED_8000MBPS (0x00000003) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED_16000MBPS (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED_32000MBPS (0x00000005) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED_64000MBPS (0x00000006) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH 25:20 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_UNDEFINED (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_X1 (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_X2 (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_X4 (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_X8 (0x00000008) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_X12 (0x0000000C) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_X16 (0x00000010) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_X32 (0x00000020) + +/* coherent dma transfer flags */ +#define NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_CTXDMA 0:0 +#define NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_CTXDMA_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_CTXDMA_TRUE (0x00000001) +#define NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_GPUGART 2:2 +#define NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_GPUGART_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_GPUGART_TRUE (0x00000001) + +/* noncoherent dma transfer flags */ +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_CTXDMA 0:0 +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_CTXDMA_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_CTXDMA_TRUE (0x00000001) +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_GPUGART 2:2 +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_GPUGART_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_GPUGART_TRUE (0x00000001) +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_COH_MODE 3:3 +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_COH_MODE_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_COH_MODE_TRUE (0x00000001) + +/* GPU GART flags */ +#define NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_REQFLUSH 0:0 +#define NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_REQFLUSH_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_REQFLUSH_TRUE (0x00000001) +#define NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_UNIFIED 1:1 +#define NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_UNIFIED_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_UNIFIED_TRUE (0x00000001) + +/* format of PCIE errors return values */ +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_CORR_ERROR (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_NON_FATAL_ERROR (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_FATAL_ERROR (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_UNSUPP_REQUEST (0x00000008) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_ENTERED_RECOVERY (0x00000010) + +/* PCIE Gen2 capability and current level */ +#define NV2080_CTRL_BUS_INFO_PCIE_GEN2_INFO_CAP 0:0 +#define NV2080_CTRL_BUS_INFO_PCIE_GEN2_INFO_CAP_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_GEN2_INFO_CAP_TRUE (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_GEN2_INFO_CURR_LEVEL 1:1 +#define NV2080_CTRL_BUS_INFO_PCIE_GEN2_INFO_CURR_LEVEL_GEN1 (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_GEN2_INFO_CURR_LEVEL_GEN2 (0x00000001) + +/* format of PCIE AER return values */ +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_TRAINING_ERR (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_DLINK_PROTO_ERR (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_POISONED_TLP (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_FC_PROTO_ERR (0x00000008) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_CPL_TIMEOUT (0x00000010) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_CPL_ABORT (0x00000020) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNEXP_CPL (0x00000040) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_RCVR_OVERFLOW (0x00000080) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_MALFORMED_TLP (0x00000100) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_ECRC_ERROR (0x00000200) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNSUPPORTED_REQ (0x00000400) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RCV_ERR (0x00010000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_TLP (0x00020000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_DLLP (0x00040000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_ROLLOVER (0x00080000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_TIMEOUT (0x00100000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_ADVISORY_NONFATAL (0x00200000) + +/* format of PCIE ASLM status return value */ +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_PCIE 0:0 +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_PCIE_ERROR (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_PCIE_PRESENT (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_SUPPORTED 1:1 +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_SUPPORTED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_SUPPORTED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_CL_CAPABLE 2:2 +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_CL_CAPABLE_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_CL_CAPABLE_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_OS_SUPPORTED 3:3 +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_OS_SUPPORTED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_OS_SUPPORTED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_BR04 4:4 +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_BR04_MISSING (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_BR04_PRESENT (0x00000001) + +/* format of GPU CYA CAPS return value */ +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_VALID 0:0 +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_VALID_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_VALID_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM 2:1 +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_DISABLED (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_L0S (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_L1 (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_L0S_L1 (0x00000003) + +/* format of MSI INFO return value */ +#define NV2080_CTRL_BUS_INFO_MSI_STATUS 0:0 +#define NV2080_CTRL_BUS_INFO_MSI_STATUS_DISABLED (0x00000000) +#define NV2080_CTRL_BUS_INFO_MSI_STATUS_ENABLED (0x00000001) + +/*format of L1PM Substates capabilities information */ +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_PCIPM_L1_2_SUPPORTED 0:0 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_PCIPM_L1_2_SUPPORTED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_PCIPM_L1_2_SUPPORTED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_PCIPM_L1_1_SUPPORTED 1:1 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_PCIPM_L1_1_SUPPORTED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_PCIPM_L1_1_SUPPORTED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_ASPM_L1_2_SUPPORTED 2:2 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_ASPM_L1_2_SUPPORTED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_ASPM_L1_2_SUPPORTED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_ASPM_L1_1_SUPPORTED 3:3 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_ASPM_L1_1_SUPPORTED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_ASPM_L1_1_SUPPORTED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_L1PM_SUPPORTED 4:4 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_L1PM_SUPPORTED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_L1PM_SUPPORTED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_RESERVED 7:5 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_PORT_RESTORE_TIME 15:8 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_T_POWER_ON_SCALE 17:16 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_T_POWER_ON_VALUE 23:19 + +/*format of L1 PM Substates Control 1 Register */ +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_PCIPM_L1_2_ENABLED 0:0 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_PCIPM_L1_2_ENABLED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_PCIPM_L1_2_ENABLED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_PCIPM_L1_1_ENABLED 1:1 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_PCIPM_L1_1_ENABLED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_PCIPM_L1_1_ENABLED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_ASPM_L1_2_ENABLED 2:2 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_ASPM_L1_2_ENABLED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_ASPM_L1_2_ENABLED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_ASPM_L1_1_ENABLED 3:3 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_ASPM_L1_1_ENABLED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_ASPM_L1_1_ENABLED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_COMMON_MODE_RESTORE_TIME 15:8 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_LTR_L1_2_THRESHOLD_VALUE 25:16 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_LTR_L1_2_THRESHOLD_SCALE 31:29 + +/*format of L1 PM Substates Control 2 Register */ +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL2_T_POWER_ON_SCALE 1:0 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL2_T_POWER_ON_VALUE 7:3 + +/* valid sysmem connection type values */ +#define NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_PCIE (0x00000000) +#define NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_NVLINK (0x00000001) +#define NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_C2C (0x00000002) + +/** + * NV2080_CTRL_CMD_BUS_GET_INFO + * + * This command returns bus engine information for the associated GPU. + * Requests to retrieve bus information use a list of one or more + * NV2080_CTRL_BUS_INFO structures. + * + * busInfoListSize + * This field specifies the number of entries on the caller's + * busInfoList. + * busInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the bus information is to be returned. + * This buffer must be at least as big as busInfoListSize multiplied + * by the size of the NV2080_CTRL_BUS_INFO structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_BUS_GET_INFO (0x20801802) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_GET_INFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_BUS_GET_INFO_PARAMS { + NvU32 busInfoListSize; + NV_DECLARE_ALIGNED(NvP64 busInfoList, 8); +} NV2080_CTRL_BUS_GET_INFO_PARAMS; + +#define NV2080_CTRL_CMD_BUS_GET_INFO_V2 (0x20801823) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_INFO_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_GET_INFO_V2_PARAMS_MESSAGE_ID (0x23U) + +typedef struct NV2080_CTRL_BUS_GET_INFO_V2_PARAMS { + NvU32 busInfoListSize; + NV2080_CTRL_BUS_INFO busInfoList[NV2080_CTRL_BUS_INFO_MAX_LIST_SIZE]; +} NV2080_CTRL_BUS_GET_INFO_V2_PARAMS; + +/* + * NV2080_CTRL_BUS_PCI_BAR_INFO + * + * This structure describes PCI bus BAR information. + * + * flags + * This field contains any flags for the associated BAR. + * barSize + * This field contains the size in megabytes of the associated BAR. + * DEPRECATED, please use barSizeBytes. + * barSizeBytes + * This field contains the size in bytes of the associated BAR. + * barOffset + * This field contains the PCI bus offset in bytes of the associated BAR. + */ +typedef struct NV2080_CTRL_BUS_PCI_BAR_INFO { + NvU32 flags; + NvU32 barSize; + NV_DECLARE_ALIGNED(NvU64 barSizeBytes, 8); + NV_DECLARE_ALIGNED(NvU64 barOffset, 8); +} NV2080_CTRL_BUS_PCI_BAR_INFO; + +/* + * NV2080_CTRL_CMD_BUS_GET_PCI_BAR_INFO + * + * This command returns PCI bus BAR information. + * + * barCount + * This field returns the number of BARs for the associated subdevice. + * Legal values for this parameter will be between one to + * NV2080_CTRL_BUS_MAX_BARS. + * barInfo + * This field returns per-BAR information in the form of an array of + * NV2080_CTRL_BUS_PCI_BAR_INFO structures. Information for as many as + * NV2080_CTRL_BUS_MAX_PCI_BARS will be returned. Any unused entries will + * be initialized to zero. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_BUS_GET_PCI_BAR_INFO (0x20801803) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_PCI_BAR_INFO_PARAMS_MESSAGE_ID" */ + +/* maximum number of BARs per subdevice */ +#define NV2080_CTRL_BUS_MAX_PCI_BARS (8) + +#define NV2080_CTRL_BUS_GET_PCI_BAR_INFO_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_BUS_GET_PCI_BAR_INFO_PARAMS { + NvU32 pciBarCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_BUS_PCI_BAR_INFO pciBarInfo[NV2080_CTRL_BUS_MAX_PCI_BARS], 8); +} NV2080_CTRL_BUS_GET_PCI_BAR_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_SET_PCIE_LINK_WIDTH + * + * This command sets PCI-E link width to the specified new value. + * + * pcieLinkWidth + * This field specifies the new PCI-E link width. + * + * failingReason + * This field specifies the reason why the change of link width fails. + * It is valid only when this routine returns NV_ERR_GENERIC. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_GENERIC + */ +#define NV2080_CTRL_CMD_BUS_SET_PCIE_LINK_WIDTH (0x20801804) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_PARAMS { + NvU32 pcieLinkWidth; + NvU32 failingReason; +} NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_PARAMS; + +#define NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_ERROR_PSTATE (0x00000001) +#define NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_ERROR_PCIE_CFG_ACCESS (0x00000002) +#define NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_ERROR_TRAINING (0x00000004) + +/* + * NV2080_CTRL_CMD_BUS_SET_PCIE_SPEED + * + * This command Initiates a change in PCIE Bus Speed + * + * busSpeed + * This field is the target speed to train to. + * Legal values for this parameter are: + * NV2080_CTRL_BUS_SET_PCIE_SPEED_2500MBPS + * NV2080_CTRL_BUS_SET_PCIE_SPEED_5000MBPS + * NV2080_CTRL_BUS_SET_PCIE_SPEED_8000MBPS + * NV2080_CTRL_BUS_SET_PCIE_SPEED_16000MBPS + * NV2080_CTRL_BUS_SET_PCIE_SPEED_32000MBPS + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_BUS_SET_PCIE_SPEED (0x20801805) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_SET_PCIE_SPEED_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_SET_PCIE_SPEED_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV2080_CTRL_BUS_SET_PCIE_SPEED_PARAMS { + NvU32 busSpeed; +} NV2080_CTRL_BUS_SET_PCIE_SPEED_PARAMS; + +#define NV2080_CTRL_BUS_SET_PCIE_SPEED_2500MBPS (0x00000001) +#define NV2080_CTRL_BUS_SET_PCIE_SPEED_5000MBPS (0x00000002) +#define NV2080_CTRL_BUS_SET_PCIE_SPEED_8000MBPS (0x00000003) +#define NV2080_CTRL_BUS_SET_PCIE_SPEED_16000MBPS (0x00000004) +#define NV2080_CTRL_BUS_SET_PCIE_SPEED_32000MBPS (0x00000005) +#define NV2080_CTRL_BUS_SET_PCIE_SPEED_64000MBPS (0x00000006) + + +/* + * NV2080_CTRL_CMD_BUS_MAP_BAR2 + * + * This command sets up BAR2 page tables for passed-in memory handle. + * This command MUST be executed before NV2080_CTRL_CMD_BUS_UNMAP_BAR2 + * or NV2080_CTRL_CMD_BUS_VERIFY_BAR2. Not supported on SLI. + * + * hMemory + * This field is a handle to physical memory. + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV2080_CTRL_CMD_BUS_MAP_BAR2 (0x20801809) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_MAP_BAR2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_MAP_BAR2_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_BUS_MAP_BAR2_PARAMS { + NvHandle hMemory; +} NV2080_CTRL_BUS_MAP_BAR2_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_UNMAP_BAR2 + * + * This command unmaps any pending BAR2 page tables created with + * NV2080_CTRL_CMD_BUS_MAP_BAR2 command. The handle passed in must + * match the handle used to map the page tables. Not supported on SLI. + * + * hMemory + * This field is a handle to physical memory. + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV2080_CTRL_CMD_BUS_UNMAP_BAR2 (0x2080180a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_UNMAP_BAR2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_UNMAP_BAR2_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV2080_CTRL_BUS_UNMAP_BAR2_PARAMS { + NvHandle hMemory; +} NV2080_CTRL_BUS_UNMAP_BAR2_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_VERIFY_BAR2 + * + * This command tests BAR2 against BAR0 if there are BAR2 page tables + * set up with NV2080_CTRL_CMD_BUS_MAP_BAR2 command. The handle passed + * in must match the handle used to map the page tables. Not supported on SLI. + * + * hMemory + * This field is a handle to physical memory. + * offset + * Base offset of the surface where the test will make its first dword write. + * size + * Test will write '(size/4)*4' bytes starting at surface offset `offset'. + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV2080_CTRL_CMD_BUS_VERIFY_BAR2 (0x2080180b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_VERIFY_BAR2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_VERIFY_BAR2_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV2080_CTRL_BUS_VERIFY_BAR2_PARAMS { + NvHandle hMemory; + NvU32 offset; + NvU32 size; +} NV2080_CTRL_BUS_VERIFY_BAR2_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_SERVICE_GPU_MULTIFUNC_STATE + * This command would reports the current Audio device power state or Sets new power state. + * + * command + * This parametrer specifies the target GPU multifunction state. + * NV2080_CTRL_BUS_ENABLE_GPU_MULTIFUNC_STATE Enables the multi function state + * NV2080_CTRL_BUS_DISABLE_GPU_MULTIFUNC_STATE Disables the multi function state. + * NV2080_CTRL_BUS_GET_GPU_MULTIFUNC_STATE Get the Current device power state. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + */ + +#define NV2080_CTRL_CMD_BUS_SERVICE_GPU_MULTIFUNC_STATE (0x20801812) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_SERVICE_GPU_MULTIFUNC_STATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_SERVICE_GPU_MULTIFUNC_STATE_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV2080_CTRL_BUS_SERVICE_GPU_MULTIFUNC_STATE_PARAMS { + NvU8 command; + NvU32 deviceState; +} NV2080_CTRL_BUS_SERVICE_GPU_MULTIFUNC_STATE_PARAMS; + +#define NV2080_CTRL_BUS_ENABLE_GPU_MULTIFUNC_STATE (0x00000000) +#define NV2080_CTRL_BUS_DISABLE_GPU_MULTIFUNC_STATE (0x00000001) +#define NV2080_CTRL_BUS_GET_GPU_MULTIFUNC_STATE (0x00000002) + +/* + * NV2080_CTRL_CMD_BUS_GET_PEX_COUNTERS + * This command gets the counts for different counter types. + * + * pexCounterMask + * This parameter specifies the input mask for desired counter types. + * + * pexTotalCorrectableErrors + * This parameter gives the total correctable errors which includes + * NV_XVE_ERROR_COUNTER1 plus LCRC Errors, 8B10B Errors, NAKS and Failed L0s + * + * pexCorrectableErrors + * This parameter only includes NV_XVE_ERROR_COUNTER1 value. + * + * pexTotalNonFatalErrors + * This parameter returns total Non-Fatal Errors which may or may not + * include Correctable Errors. + * + * pexTotalFatalErrors + * This parameter returns Total Fatal Errors + * + * pexTotalUnsupportedReqs + * This parameter returns Total Unsupported Requests + * + * pexErrors + * This array contains the error counts for each error type as requested from + * the pexCounterMask. The array indexes correspond to the mask bits one-to-one. + */ + +#define NV2080_CTRL_CMD_BUS_GET_PEX_COUNTERS (0x20801813) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_PEX_COUNTERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PEX_MAX_COUNTER_TYPES 31 +#define NV2080_CTRL_BUS_GET_PEX_COUNTERS_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV2080_CTRL_BUS_GET_PEX_COUNTERS_PARAMS { + NvU32 pexCounterMask; + NvU32 pexTotalCorrectableErrors; + NvU16 pexCorrectableErrors; + NvU8 pexTotalNonFatalErrors; + NvU8 pexTotalFatalErrors; + NvU8 pexTotalUnsupportedReqs; + NvU16 pexCounters[NV2080_CTRL_PEX_MAX_COUNTER_TYPES]; +} NV2080_CTRL_BUS_GET_PEX_COUNTERS_PARAMS; + +/* + * Note that MAX_COUNTER_TYPES will need to be updated each time + * a new counter type gets added to the list below. The value + * depends on the bits set for the last valid define. Look + * at pexCounters[] comments above for details. + * + */ +#define NV2080_CTRL_BUS_PEX_COUNTER_TYPE 0x00000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_RECEIVER_ERRORS 0x00000001 +#define NV2080_CTRL_BUS_PEX_COUNTER_REPLAY_COUNT 0x00000002 +#define NV2080_CTRL_BUS_PEX_COUNTER_REPLAY_ROLLOVER_COUNT 0x00000004 +#define NV2080_CTRL_BUS_PEX_COUNTER_BAD_DLLP_COUNT 0x00000008 +#define NV2080_CTRL_BUS_PEX_COUNTER_BAD_TLP_COUNT 0x00000010 +#define NV2080_CTRL_BUS_PEX_COUNTER_8B10B_ERRORS_COUNT 0x00000020 +#define NV2080_CTRL_BUS_PEX_COUNTER_SYNC_HEADER_ERRORS_COUNT 0x00000040 +#define NV2080_CTRL_BUS_PEX_COUNTER_LCRC_ERRORS_COUNT 0x00000080 +#define NV2080_CTRL_BUS_PEX_COUNTER_FAILED_L0S_EXITS_COUNT 0x00000100 +#define NV2080_CTRL_BUS_PEX_COUNTER_NAKS_SENT_COUNT 0x00000200 +#define NV2080_CTRL_BUS_PEX_COUNTER_NAKS_RCVD_COUNT 0x00000400 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_ERRORS 0x00000800 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1_TO_RECOVERY_COUNT 0x00001000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L0_TO_RECOVERY_COUNT 0x00002000 +#define NV2080_CTRL_BUS_PEX_COUNTER_RECOVERY_COUNT 0x00004000 +#define NV2080_CTRL_BUS_PEX_COUNTER_CHIPSET_XMIT_L0S_ENTRY_COUNT 0x00008000 +#define NV2080_CTRL_BUS_PEX_COUNTER_GPU_XMIT_L0S_ENTRY_COUNT 0x00010000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1_ENTRY_COUNT 0x00020000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1P_ENTRY_COUNT 0x00040000 +#define NV2080_CTRL_BUS_PEX_COUNTER_DEEP_L1_ENTRY_COUNT 0x00080000 +#define NV2080_CTRL_BUS_PEX_COUNTER_ASLM_COUNT 0x00100000 +#define NV2080_CTRL_BUS_PEX_COUNTER_TOTAL_CORR_ERROR_COUNT 0x00200000 +#define NV2080_CTRL_BUS_PEX_COUNTER_CORR_ERROR_COUNT 0x00400000 +#define NV2080_CTRL_BUS_PEX_COUNTER_NON_FATAL_ERROR_COUNT 0x00800000 +#define NV2080_CTRL_BUS_PEX_COUNTER_FATAL_ERROR_COUNT 0x01000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_UNSUPP_REQ_COUNT 0x02000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1_1_ENTRY_COUNT 0x04000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1_2_ENTRY_COUNT 0x08000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1_2_ABORT_COUNT 0x10000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1SS_TO_DEEP_L1_TIMEOUT_COUNT 0x20000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1_SHORT_DURATION_COUNT 0x40000000 + +/* + * NV2080_CTRL_CMD_BUS_CLEAR_PEX_COUNTER_COUNTERS + * This command gets the counts for different counter types. + * + * pexCounterMask + * This parameter specifies the input mask for desired counters to be + * cleared. Note that all counters cannot be cleared. + */ + +#define NV2080_CTRL_CMD_BUS_CLEAR_PEX_COUNTERS (0x20801814) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_CLEAR_PEX_COUNTERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_CLEAR_PEX_COUNTERS_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV2080_CTRL_BUS_CLEAR_PEX_COUNTERS_PARAMS { + NvU32 pexCounterMask; +} NV2080_CTRL_BUS_CLEAR_PEX_COUNTERS_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_FREEZE_PEX_COUNTERS + * This command gets the counts for different counter types. + * + * pexCounterMask + * This parameter specifies the input mask for desired counters to be + * freezed. Note that all counters cannot be frozen. + * + * bFreezeRmCounter + * This parameter decides whether API will freeze it or unfreeze it. + * NV_TRUE for freeze and NV_FALSE for unfreeze. + */ + +#define NV2080_CTRL_CMD_BUS_FREEZE_PEX_COUNTERS (0x20801815) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_FREEZE_PEX_COUNTERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_FREEZE_PEX_COUNTERS_PARAMS_MESSAGE_ID (0x15U) + +typedef struct NV2080_CTRL_BUS_FREEZE_PEX_COUNTERS_PARAMS { + NvU32 pexCounterMask; + NvBool bFreezeRmCounter; +} NV2080_CTRL_BUS_FREEZE_PEX_COUNTERS_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_GET_PEX_LANE_COUNTERS + * This command gets the per Lane Counters and the type of errors. + * + * pexLaneErrorStatus + * This mask specifies the type of error detected on any of the Lanes. + * + * pexLaneCounter + * This array gives the counters per Lane. Each index corresponds to Lane + * index + 1 + */ + +#define NV2080_CTRL_CMD_BUS_GET_PEX_LANE_COUNTERS (0x20801816) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_GET_PEX_LANE_COUNTERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PEX_MAX_LANES 16 +#define NV2080_CTRL_CMD_BUS_GET_PEX_LANE_COUNTERS_PARAMS_MESSAGE_ID (0x16U) + +typedef struct NV2080_CTRL_CMD_BUS_GET_PEX_LANE_COUNTERS_PARAMS { + NvU16 pexLaneErrorStatus; + NvU8 pexLaneCounter[NV2080_CTRL_PEX_MAX_LANES]; +} NV2080_CTRL_CMD_BUS_GET_PEX_LANE_COUNTERS_PARAMS; + +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_TYPE 0x00000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_SYNC_HDR_CODING_ERR 0x00000001 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_SYNC_HDR_ORDER_ERR 0x00000002 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_OS_DATA_SEQ_ERR 0x00000004 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_TSX_DATA_SEQ_ERR 0x00000008 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_SKPOS_LFSR_ERR 0x00000010 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_RX_CLK_FIFO_OVERFLOW 0x00000020 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_ELASTIC_FIFO_OVERFLOW 0x00000040 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_RCVD_LINK_NUM_ERR 0x00000080 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_RCVD_LANE_NUM_ERR 0x00000100 + +#define NV2080_CTRL_CMD_BUS_GET_PCIE_LTR_LATENCY (0x20801817) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_GET_PCIE_LTR_LATENCY_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BUS_GET_PCIE_LTR_LATENCY_PARAMS_MESSAGE_ID (0x17U) + +typedef struct NV2080_CTRL_CMD_BUS_GET_PCIE_LTR_LATENCY_PARAMS { + NvBool bPexLtrRegkeyOverride; + NvBool bPexRootPortLtrSupported; + NvBool bPexGpuLtrSupported; + NvU16 pexLtrSnoopLatencyValue; + NvU8 pexLtrSnoopLatencyScale; + NvU16 pexLtrNoSnoopLatencyValue; + NvU8 pexLtrNoSnoopLatencyScale; +} NV2080_CTRL_CMD_BUS_GET_PCIE_LTR_LATENCY_PARAMS; + +#define NV2080_CTRL_CMD_BUS_SET_PCIE_LTR_LATENCY (0x20801818) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_SET_PCIE_LTR_LATENCY_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BUS_SET_PCIE_LTR_LATENCY_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV2080_CTRL_CMD_BUS_SET_PCIE_LTR_LATENCY_PARAMS { + NvU16 pexLtrSnoopLatencyValue; + NvU8 pexLtrSnoopLatencyScale; + NvU16 pexLtrNoSnoopLatencyValue; + NvU8 pexLtrNoSnoopLatencyScale; +} NV2080_CTRL_CMD_BUS_SET_PCIE_LTR_LATENCY_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_GET_PEX_UTIL_COUNTERS + * This command gets the counts for different counter types. + * + * pexCounterMask + * This parameter specifies the input mask for desired counter types. + * + */ +#define NV2080_CTRL_BUS_PEX_UTIL_COUNTER_TX_BYTES 0x00000001 +#define NV2080_CTRL_BUS_PEX_UTIL_COUNTER_RX_BYTES 0x00000002 +#define NV2080_CTRL_BUS_PEX_UTIL_COUNTER_TX_L0 0x00000004 +#define NV2080_CTRL_BUS_PEX_UTIL_COUNTER_RX_L0 0x00000008 +#define NV2080_CTRL_BUS_PEX_UTIL_COUNTER_TX_L0S 0x00000010 +#define NV2080_CTRL_BUS_PEX_UTIL_COUNTER_RX_L0S 0x00000020 +#define NV2080_CTRL_BUS_PEX_UTIL_COUNTER_NON_L0_L0S 0x00000040 +#define NV2080_CTRL_PEX_UTIL_MAX_COUNTER_TYPES 7 + +#define NV2080_CTRL_CMD_BUS_GET_PEX_UTIL_COUNTERS (0x20801819) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_PEX_UTIL_COUNTERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_GET_PEX_UTIL_COUNTERS_PARAMS_MESSAGE_ID (0x19U) + +typedef struct NV2080_CTRL_BUS_GET_PEX_UTIL_COUNTERS_PARAMS { + NvU32 pexCounterMask; + NvU32 pexCounters[NV2080_CTRL_PEX_UTIL_MAX_COUNTER_TYPES]; +} NV2080_CTRL_BUS_GET_PEX_UTIL_COUNTERS_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_CLEAR_PEX_UTIL_COUNTER_COUNTERS + * This command gets the counts for different counter types. + * + * pexCounterMask + * This parameter specifies the input mask for desired counters to be + * cleared. Note that all counters cannot be cleared. + * + * NOTE: EX_UTIL_COUNTER_UPSTREAM & NV2080_CTRL_BUS_PEX_UTIL_COUNTER_DOWNSTREAM + * belongs to PMU. The ctrl function will not reset nor disable/enable them. + */ +#define NV2080_CTRL_CMD_BUS_CLEAR_PEX_UTIL_COUNTERS (0x20801820) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_CLEAR_PEX_UTIL_COUNTERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_CLEAR_PEX_UTIL_COUNTERS_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV2080_CTRL_BUS_CLEAR_PEX_UTIL_COUNTERS_PARAMS { + NvU32 pexCounterMask; +} NV2080_CTRL_BUS_CLEAR_PEX_UTIL_COUNTERS_PARAMS; + +#define NV2080_CTRL_CMD_BUS_GET_BFD (0x20801821) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_BFD_PARAMSARR_MESSAGE_ID" */ + +typedef struct NV2080_CTRL_BUS_GET_BFD_PARAMS { + NvBool valid; + NvU16 deviceID; + NvU16 vendorID; + NvU32 domain; + NvU16 bus; + NvU16 device; + NvU8 function; +} NV2080_CTRL_BUS_GET_BFD_PARAMS; + +#define NV2080_CTRL_BUS_GET_BFD_PARAMSARR_MESSAGE_ID (0x21U) + +typedef struct NV2080_CTRL_BUS_GET_BFD_PARAMSARR { + NV2080_CTRL_BUS_GET_BFD_PARAMS params[32]; +} NV2080_CTRL_BUS_GET_BFD_PARAMSARR; + +/* + * NV2080_CTRL_CMD_BUS_GET_ASPM_DISABLE_FLAGS + * This command gets the following mentioned PDB Properties + * + * aspmDisableFlags[] + * NvBool array stores each of the properties' state. the array size can + * be increased as per requirement. + * + * NOTE: When adding more properties, increment NV2080_CTRL_ASPM_DISABLE_FLAGS_MAX_FLAGS. + */ + +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_L1_MASK_REGKEY_OVERRIDE 0x00000000 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_OS_RM_MAKES_POLICY_DECISIONS 0x00000001 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_GPU_BEHIND_BRIDGE 0x00000002 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_GPU_UPSTREAM_PORT_L1_UNSUPPORTED 0x00000003 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED 0x00000004 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY 0x00000005 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_CL_ASPM_L1_CHIPSET_DISABLED 0x00000006 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_CL_ASPM_L1_CHIPSET_ENABLED_MOBILE_ONLY 0x00000007 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_BIF_ENABLE_ASPM_DT_L1 0x00000008 +//append properties here + +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_MAX_FLAGS 9 + +#define NV2080_CTRL_CMD_BUS_GET_ASPM_DISABLE_FLAGS (0x20801822) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_ASPM_DISABLE_FLAGS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_GET_ASPM_DISABLE_FLAGS_PARAMS_MESSAGE_ID (0x22U) + +typedef struct NV2080_CTRL_BUS_GET_ASPM_DISABLE_FLAGS_PARAMS { + NvBool aspmDisableFlags[NV2080_CTRL_ASPM_DISABLE_FLAGS_MAX_FLAGS]; +} NV2080_CTRL_BUS_GET_ASPM_DISABLE_FLAGS_PARAMS; + +#define NV2080_CTRL_CMD_BUS_CONTROL_PUBLIC_ASPM_BITS (0x20801824) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_CONTROL_PUBLIC_ASPM_BITS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BUS_CONTROL_PUBLIC_ASPM_BITS_PARAMS_MESSAGE_ID (0x24U) + +typedef struct NV2080_CTRL_CMD_BUS_CONTROL_PUBLIC_ASPM_BITS_PARAMS { + NvBool bEnable; +} NV2080_CTRL_CMD_BUS_CONTROL_PUBLIC_ASPM_BITS_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_GET_NVLINK_PEER_ID_MASK + * + * This command returns cached(SW only) NVLINK peer id mask. Currently, this control + * call is only needed inside a SR-IOV enabled guest where page table management is + * being done by the guest. Guest needs this mask to derive the peer id corresponding + * to the peer GPU. This peer id will then be programmed inside the PTEs by guest RM. + * + * nvlinkPeerIdMask[OUT] + * - The peer id mask is returned in this array. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_BUS_GET_NVLINK_PEER_ID_MASK (0x20801825) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_NVLINK_PEER_ID_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_MAX_NUM_GPUS 32 + +#define NV2080_CTRL_BUS_GET_NVLINK_PEER_ID_MASK_PARAMS_MESSAGE_ID (0x25U) + +typedef struct NV2080_CTRL_BUS_GET_NVLINK_PEER_ID_MASK_PARAMS { + NvU32 nvlinkPeerIdMask[NV2080_CTRL_BUS_MAX_NUM_GPUS]; +} NV2080_CTRL_BUS_GET_NVLINK_PEER_ID_MASK_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_SET_EOM_PARAMETERS + * This command takes parameters eomMode, eomNblks and eomNerrs from the client + * and then sends it out to PMU. + */ +#define NV2080_CTRL_CMD_BUS_SET_EOM_PARAMETERS (0x20801826) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_SET_EOM_PARAMETERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BUS_SET_EOM_PARAMETERS_PARAMS_MESSAGE_ID (0x26U) + +typedef struct NV2080_CTRL_CMD_BUS_SET_EOM_PARAMETERS_PARAMS { + NvU8 eomMode; + NvU8 eomNblks; + NvU8 eomNerrs; +} NV2080_CTRL_CMD_BUS_SET_EOM_PARAMETERS_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_GET_UPHY_DLN_CFG_SPACE + * This command takes parameters UPHY register's address and lane from the client + * and then sends it out to PMU. + */ +#define NV2080_CTRL_CMD_BUS_GET_UPHY_DLN_CFG_SPACE (0x20801827) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_GET_UPHY_DLN_CFG_SPACE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BUS_GET_UPHY_DLN_CFG_SPACE_PARAMS_MESSAGE_ID (0x27U) + +typedef struct NV2080_CTRL_CMD_BUS_GET_UPHY_DLN_CFG_SPACE_PARAMS { + NvU32 regAddress; + NvU32 laneSelectMask; + NvU16 regValue; +} NV2080_CTRL_CMD_BUS_GET_UPHY_DLN_CFG_SPACE_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_GET_EOM_STATUS + * + */ +#define NV2080_CTRL_CMD_BUS_GET_EOM_STATUS (0x20801828) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_EOM_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_MAX_NUM_LANES 32 + +#define NV2080_CTRL_BUS_GET_EOM_STATUS_PARAMS_MESSAGE_ID (0x28U) + +typedef struct NV2080_CTRL_BUS_GET_EOM_STATUS_PARAMS { + NvU8 eomMode; + NvU8 eomNblks; + NvU8 eomNerrs; + NvU8 eomBerEyeSel; + NvU8 eomPamEyeSel; + NvU32 laneMask; + NvU16 eomStatus[NV2080_CTRL_BUS_MAX_NUM_LANES]; +} NV2080_CTRL_BUS_GET_EOM_STATUS_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS + * + * This command returns the PCIe requester atomics operation capabilities + * of the GPU with regards to the aspect of support the client is asking for. + * + * + * capType [IN] + * The aspect of the atomics support the client is querying atomics capability for. + * Supported types are defined under NV2080_CTRL_CMD_BUS_PCIE_ATOMICS_CAPTYPE_*. + * + * dbdf [IN] - + * Argument used to identify the PCIe peer endpoint. Used only for the _CAPTYPE_P2P. + * Encoded as: domain (31:16), bus (15:8), device (7:3), function (2:0) + * + * atomicsCaps[OUT] + * Mask of supported PCIe requester atomic operations in the form of + * NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_* + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS (0x20801829) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_PARAMS_MESSAGE_ID (0x29U) + +typedef struct NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_PARAMS { + NvU32 capType; + NvU32 dbdf; + NvU32 atomicsCaps; +} NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_PARAMS; + +/* + * Defined methods to expose atomics capability. + * + * NV2080_CTRL_CMD_BUS_PCIE_ATOMICS_CAPTYPE_SYSMEM + * Exposes the state of atomics support between GPU and Sysmem. + * NV2080_CTRL_CMD_BUS_PCIE_ATOMICS_CAPTYPE_GPU + * Exposes the state of the GPU atomics support without taking into account PCIe topology. + * NV2080_CTRL_CMD_BUS_PCIE_ATOMICS_CAPTYPE_P2P + * Exposes the state of atomics support between the source (this GPU) + * and peer GPU identified by the dbdf argument. + */ +#define NV2080_CTRL_CMD_BUS_PCIE_ATOMICS_CAPTYPE_SYSMEM 0x0 +#define NV2080_CTRL_CMD_BUS_PCIE_ATOMICS_CAPTYPE_GPU 0x1 +#define NV2080_CTRL_CMD_BUS_PCIE_ATOMICS_CAPTYPE_P2P 0x2 + +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_FETCHADD_32 0:0 +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_FETCHADD_32_YES (0x00000001) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_FETCHADD_32_NO (0x00000000) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_FETCHADD_64 1:1 +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_FETCHADD_64_YES (0x00000001) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_FETCHADD_64_NO (0x00000000) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_SWAP_32 2:2 +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_SWAP_32_YES (0x00000001) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_SWAP_32_NO (0x00000000) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_SWAP_64 3:3 +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_SWAP_64_YES (0x00000001) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_SWAP_64_NO (0x00000000) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_CAS_32 4:4 +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_CAS_32_YES (0x00000001) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_CAS_32_NO (0x00000000) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_CAS_64 5:5 +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_CAS_64_YES (0x00000001) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_CAS_64_NO (0x00000000) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_CAS_128 6:6 +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_CAS_128_YES (0x00000001) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_CAS_128_NO (0x00000000) + +/* + * NV2080_CTRL_CMD_BUS_GET_PCIE_SUPPORTED_GPU_ATOMICS + * + * This command returns the supported GPU atomic operations + * that map to the capable PCIe atomic operations from GPU to + * coherent SYSMEM. + * + * capType [IN] + * The aspect of the atomics support the client is querying atomics capability for. + * Supported types are defined under NV2080_CTRL_CMD_BUS_PCIE_ATOMICS_CAPTYPE_*. + * + * dbdf [IN] - + * Argument used to identify the PCIe peer endpoint. Used only for the _CAPTYPE_P2P. + * Encoded as: domain (31:16), bus (15:8), device (7:3), function (2:0) + * + * atomicOp[OUT] + * Array of NV2080_CTRL_BUS_PCIE_GPU_ATOMICS that contains the atomic operation + * supported status and its attributes. The array can be + * indexed using one of NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_* + * + */ +#define NV2080_CTRL_CMD_BUS_GET_PCIE_SUPPORTED_GPU_ATOMICS (0x2080182a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_GET_PCIE_SUPPORTED_GPU_ATOMICS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_IADD 0 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_IMIN 1 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_IMAX 2 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_INC 3 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_DEC 4 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_IAND 5 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_IOR 6 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_IXOR 7 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_EXCH 8 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_CAS 9 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_FADD 10 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_FMIN 11 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_FMAX 12 + +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_COUNT 13 + +/* + * NV2080_CTRL_BUS_PCIE_GPU_ATOMIC_OP_INFO + * + * Describes the support state and related attributes of a single GPU atomic op. + * + * bSupported + * Is the GPU atomic operation natively supported by the PCIe + * + * attributes + * Provides the attributes mask of the GPU atomic operation when supported + * in the form of + * NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_* + */ +typedef struct NV2080_CTRL_BUS_PCIE_GPU_ATOMIC_OP_INFO { + NvBool bSupported; + NvU32 attributes; +} NV2080_CTRL_BUS_PCIE_GPU_ATOMIC_OP_INFO; + +#define NV2080_CTRL_CMD_BUS_GET_PCIE_SUPPORTED_GPU_ATOMICS_PARAMS_MESSAGE_ID (0x2AU) + +typedef struct NV2080_CTRL_CMD_BUS_GET_PCIE_SUPPORTED_GPU_ATOMICS_PARAMS { + NvU32 capType; + NvU32 dbdf; + NV2080_CTRL_BUS_PCIE_GPU_ATOMIC_OP_INFO atomicOp[NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_COUNT]; +} NV2080_CTRL_CMD_BUS_GET_PCIE_SUPPORTED_GPU_ATOMICS_PARAMS; + +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SCALAR 0:0 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SCALAR_YES 1 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SCALAR_NO 0 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_VECTOR 1:1 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_VECTOR_YES 1 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_VECTOR_NO 0 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_REDUCTION 2:2 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_REDUCTION_YES 1 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_REDUCTION_NO 0 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIZE_32 3:3 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIZE_32_YES 1 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIZE_32_NO 0 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIZE_64 4:4 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIZE_64_YES 1 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIZE_64_NO 0 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIZE_128 5:5 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIZE_128_YES 1 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIZE_128_NO 0 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIGNED 6:6 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIGNED_YES 1 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIGNED_NO 0 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_UNSIGNED 7:7 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_UNSIGNED_YES 1 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_UNSIGNED_NO 0 + +/* + * NV2080_CTRL_CMD_BUS_GET_C2C_INFO + * + * This command returns the C2C links information. + * + * bIsLinkUp[OUT] + * NV_TRUE if the C2C links are present and the links are up. + * The below remaining fields are valid only if return value is + * NV_OK and bIsLinkUp is NV_TRUE. + * bLinkInHS[OUT] + * NV_TRUE if the C2C links are in high speed mode. + * nrLinks[OUT] + * Total number of C2C links that are up. + * maxNrLinks[OUT] + * Maximum number of C2C links that are supported. + * linkMask[OUT] + * Bitmask of the C2C links present and up. + * perLinkBwMBps[OUT] + * Theoretical per link bandwidth in MBps. + * perLinkLaneWidth[OUT] + * Lane width per link. + * remoteType[OUT] + * Type of the device connected to the remote end of the C2C link. + * Valid values are : + * NV2080_CTRL_BUS_GET_C2C_INFO_REMOTE_TYPE_CPU - connected to a CPU + * NV2080_CTRL_BUS_GET_C2C_INFO_REMOTE_TYPE_GPU - connected to another GPU + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * + * Please also review the information below for additional information on + * select fields: + * + * remoteType[OUT] + * NV2080_CTRL_BUS_GET_C2C_INFO_REMOTE_TYPE_CPU - connected to a CPU + * in either self-hosted mode or externally-hostedmode. + */ + +#define NV2080_CTRL_CMD_BUS_GET_C2C_INFO (0x2080182b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_GET_C2C_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BUS_GET_C2C_INFO_PARAMS_MESSAGE_ID (0x2BU) + +typedef struct NV2080_CTRL_CMD_BUS_GET_C2C_INFO_PARAMS { + NvBool bIsLinkUp; + NvBool bLinkInHS; + NvU32 nrLinks; + NvU32 maxNrLinks; + NvU32 linkMask; + NvU32 perLinkBwMBps; + NvU32 perLinkLaneWidth; + NvU32 remoteType; +} NV2080_CTRL_CMD_BUS_GET_C2C_INFO_PARAMS; + +#define NV2080_CTRL_BUS_GET_C2C_INFO_REMOTE_TYPE_CPU 1 +#define NV2080_CTRL_BUS_GET_C2C_INFO_REMOTE_TYPE_GPU 2 + +#define NV2080_CTRL_BUS_GET_C2C_INFO_REMOTE_TYPE_SOC 3 + + + +/* + * NV2080_CTRL_CMD_BUS_SYSMEM_ACCESS + * + * This command disables the GPU system memory access after quiescing the GPU, + * or re-enables sysmem access. + * + * bDisable + * If NV_TRUE the GPU is quiesced and system memory access is disabled . + * If NV_FALSE the GPU system memory access is re-enabled and the GPU is resumed. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_BUS_SYSMEM_ACCESS (0x2080182c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_SYSMEM_ACCESS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_SYSMEM_ACCESS_PARAMS_MESSAGE_ID (0x2CU) + +typedef struct NV2080_CTRL_BUS_SYSMEM_ACCESS_PARAMS { + NvBool bDisable; +} NV2080_CTRL_BUS_SYSMEM_ACCESS_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_GET_C2C_ERR_INFO + * + * This command returns the C2C error info for a C2C links. + * + * errCnts[OUT] + * Array of structure that contains the error counts for + * number of times one of C2C fatal error interrupt has happened. + * The array size should be NV2080_CTRL_BUS_GET_C2C_ERR_INFO_MAX_NUM_C2C_INSTANCES + * * NV2080_CTRL_BUS_GET_C2C_ERR_INFO_MAX_C2C_LINKS_PER_INSTANCE. + * + * nrCrcErrIntr[OUT] + * Number of times CRC error interrupt triggered. + * nrReplayErrIntr[OUT] + * Number of times REPLAY error interrupt triggered. + * nrReplayB2bErrIntr[OUT] + * Number of times REPLAY_B2B error interrupt triggered. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_BUS_GET_C2C_ERR_INFO (0x2080182d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_C2C_ERR_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_GET_C2C_ERR_INFO_MAX_NUM_C2C_INSTANCES 2 +#define NV2080_CTRL_BUS_GET_C2C_ERR_INFO_MAX_C2C_LINKS_PER_INSTANCE 7 + +#define NV2080_CTRL_BUS_GET_C2C_ERR_INFO_PARAMS_MESSAGE_ID (0x2DU) + +typedef struct NV2080_CTRL_BUS_GET_C2C_ERR_INFO_PARAMS { + struct { + NvU32 nrCrcErrIntr; + NvU32 nrReplayErrIntr; + NvU32 nrReplayB2bErrIntr; + } errCnts[NV2080_CTRL_BUS_GET_C2C_ERR_INFO_MAX_NUM_C2C_INSTANCES * NV2080_CTRL_BUS_GET_C2C_ERR_INFO_MAX_C2C_LINKS_PER_INSTANCE]; +} NV2080_CTRL_BUS_GET_C2C_ERR_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_SET_P2P_MAPPING + * + * This command sets P2P mapping. + * + * connectionType[IN] + * Connection type, one of NV2080_CTRL_CMD_BUS_SET_P2P_MAPPINGS_CONNECTION + * peerId[IN] + * peerId of remote GPU from local GPU on which call is made. + * bSpaAccessOnly[IN] + * SPA access only. SPA addressing mode is supported when we support ATS. + * bUseUuid [in] + * Option only available for Guest RPCs and is not avaliable for external clients. + * Set to NV_TRUE to use remoteGpuUuid in lieu of remoteGpuId to identify target GPU. + * remoteGpuId[IN] + * GPU ID of remote GPU. + * remoteGpuUuid [in] + * Alternative to gpuId; used to identify target GPU for which caps are being queried. + * Option only available for Guest RPCs. + * If bUseUuid == NV_TRUE, gpuUuid is used in lieu of gpuId to identify target GPU. + * If bUseUuid == NV_FALSE, gpuUuid is ignored and gpuId is used by default. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_BUS_SET_P2P_MAPPING (0x2080182e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BUS_SET_P2P_MAPPING_CONNECTION_TYPE_INVALID 0 +#define NV2080_CTRL_CMD_BUS_SET_P2P_MAPPING_CONNECTION_TYPE_NVLINK 1 +#define NV2080_CTRL_CMD_BUS_SET_P2P_MAPPING_CONNECTION_TYPE_PCIE 2 +#define NV2080_CTRL_CMD_BUS_SET_P2P_MAPPING_CONNECTION_TYPE_PCIE_BAR1 3 +#define NV2080_CTRL_CMD_BUS_SET_P2P_MAPPING_CONNECTION_TYPE_C2C 4 + +#define NV2080_SET_P2P_MAPPING_UUID_LEN 16U + +#define NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS_MESSAGE_ID (0x2EU) + +typedef struct NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS { + NvU32 connectionType; + NvU32 peerId; + NvBool bEgmPeer; + NvBool bSpaAccessOnly; + NvBool bUseUuid; + NvU32 remoteGpuId; + NvU8 remoteGpuUuid[NV2080_SET_P2P_MAPPING_UUID_LEN]; +} NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_UNSET_P2P_MAPPING + * + * This command unsets P2P mapping. + * + * connectionType[IN] + * Connection type, one of NV2080_CTRL_CMD_BUS_SET_P2P_MAPPINGS_CONNECTION + * peerId[IN] + * peerId of remote GPU from local GPU on which call is mad. + * bUseUuid [in] + * Option only available for Guest RPCs and is not avaliable for external clients. + * Set to NV_TRUE to use remoteGpuUuid in lieu of remoteGpuId to identify target GPU. + * remoteGpuId[IN] + * GPU ID of remote GPU. + * remoteGpuUuid [in] + * Alternative to gpuId; used to identify target GPU for which caps are being queried. + * Option only available for Guest RPCs. + * If bUseUuid == NV_TRUE, gpuUuid is used in lieu of gpuId to identify target GPU. + * If bUseUuid == NV_FALSE, gpuUuid is ignored and gpuId is used by default. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_BUS_UNSET_P2P_MAPPING (0x2080182f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_UNSET_P2P_MAPPING_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_UNSET_P2P_MAPPING_PARAMS_MESSAGE_ID (0x2FU) + +typedef struct NV2080_CTRL_BUS_UNSET_P2P_MAPPING_PARAMS { + NvU32 connectionType; + NvU32 peerId; + NvBool bUseUuid; + NvU32 remoteGpuId; + NvU8 remoteGpuUuid[NV2080_SET_P2P_MAPPING_UUID_LEN]; +} NV2080_CTRL_BUS_UNSET_P2P_MAPPING_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS + * + * This command returns the PCIe completer atomics operation capabilities + * of the GPU. + * + * atomicsCaps[OUT] + * Mask of supported PCIe completer atomic operations in the form of + * NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_* + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS (0x20801830) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_PARAMS_MESSAGE_ID (0x30U) + +typedef struct NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_PARAMS { + NvU32 atomicsCaps; +} NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_PARAMS; + +#define NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_FETCHADD_32 0:0 +#define NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_FETCHADD_32_YES (0x00000001) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_FETCHADD_32_NO (0x00000000) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_FETCHADD_64 1:1 +#define NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_FETCHADD_64_YES (0x00000001) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_FETCHADD_64_NO (0x00000000) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_SWAP_32 2:2 +#define NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_SWAP_32_YES (0x00000001) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_SWAP_32_NO (0x00000000) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_SWAP_64 3:3 +#define NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_SWAP_64_YES (0x00000001) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_SWAP_64_NO (0x00000000) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_CAS_32 4:4 +#define NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_CAS_32_YES (0x00000001) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_CAS_32_NO (0x00000000) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_CAS_64 5:5 +#define NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_CAS_64_YES (0x00000001) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_CAS_64_NO (0x00000000) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_CAS_128 6:6 +#define NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_CAS_128_YES (0x00000001) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_CPL_ATOMICS_CAPS_CAS_128_NO (0x00000000) + +/* + * NV2080_CTRL_CMD_BUS_GET_C2C_LPWR_STATS + * + * This command returns C2C low power statistics. + * Units for residency and latency are in microsceconds. + * c2cStateSupportMask[OUT] + Support Mask of supplrted C2C State. CL0 will always be supported (FULL_POWER) + * cl3EntryCount[OUT] + * Count of the number of times CL3 state has been entered. + * cl3ResidentTimeUs[OUT] + * Total/Average resident time in CL3 state. + * cl3AvgEntryLatencyUs[OUT] + * Average entry latency for CL3 state. + * cl3AvgExitLatencyUs[OUT] + * Average exit latency for CL3 state. + * cl3PstateSupportMask[OUT] + Pstate Support Mask for CL3 state + * cl4EntryCount[OUT] + * Count of the number of times CL4 state has been entered. + * cl4ResidentTimeUs[OUT] + * Total/Average resident time in CL4 state. + * cl4AvgEntryLatencyUs[OUT] + * Average entry latency for CL4 state. + * cl4AvgExitLatencyUs[OUT] + * Average exit latency for CL4 state. + * cl4PstateSupportMask[OUT] + Pstate Support Mask for CL4 state + * localPowerState[OUT] + * Power state of the local end of the C2C link. + * Valid values are : + * NV2080_CTRL_CMD_BUS_GET_C2C_STATE_FULL_POWER - Full power state + * NV2080_CTRL_CMD_BUS_GET_C2C_STATE_CL3 - Low power state + * NV2080_CTRL_CMD_BUS_GET_C2C_STATE_CL4 - Low power state + * remotePowerState[OUT] + * Power state of the remote end of the C2C link. + * Valid values are : + * NV2080_CTRL_CMD_BUS_GET_C2C_STATE_FULL_POWER - Full power state + * NV2080_CTRL_CMD_BUS_GET_C2C_STATE_CL3 - Low power state + * NV2080_CTRL_CMD_BUS_GET_C2C_STATE_CL4 - Low power state + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * + * Please also review the information below for additional information on + * select fields: + * + * cl3EntryCount/cl4EntryCount[OUT] + * These may not represent current exact count, as low power transitions could have + * occured after reading the counter register. + */ + +#define NV2080_CTRL_CMD_BUS_GET_C2C_LPWR_STATS (0x20801831) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_GET_C2C_LPWR_STATS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BUS_GET_C2C_LPWR_STATS_PARAMS_MESSAGE_ID (0x31U) + +typedef struct NV2080_CTRL_CMD_BUS_GET_C2C_LPWR_STATS_PARAMS { + NvU32 c2cStateSupportMask; + NvBool bCl3Support; + NvU32 cl3EntryCount; + NvU32 cl3ExitCount; + NvU32 cl3ResidentTimeUs; + NvU32 cl3AvgEntryLatencyUs; + NvU32 cl3AvgExitLatencyUs; + NvU32 cl3PstateSupportMask; + NvU32 cl3DisallowReasonMask; + NvBool bCl4Support; + NvU32 cl4EntryCount; + NvU32 cl4ExitCount; + NvU32 cl4ResidentTimeUs; + NvU32 cl4AvgEntryLatencyUs; + NvU32 cl4AvgExitLatencyUs; + NvU32 cl4PstateSupportMask; + NvU32 cl4DisallowReasonMask; + NvU32 c2cLpwrStateAllowedMask; + NvU32 localPowerState; + NvU32 remotePowerState; +} NV2080_CTRL_CMD_BUS_GET_C2C_LPWR_STATS_PARAMS; + +#define NV2080_CTRL_CMD_BUS_GET_C2C_STATE_FULL_POWER 0x0 +#define NV2080_CTRL_CMD_BUS_GET_C2C_STATE_CL3 0x1 +#define NV2080_CTRL_CMD_BUS_GET_C2C_STATE_CL4 0x2 + +/* + * NV2080_CTRL_CMD_BUS_SET_C2C_LPWR_STATE_VOTE + * + * This command sets the allow vote for C2C Lpwr States. + * c2cLpwrStateId[IN] + * C2C LowPower State Id : NV2080_CTRL_LPWR_C2C_STATE_ID_CLx + * bAllowed[in] + * State Allowed/disallowed flag + */ +#define NV2080_CTRL_CMD_BUS_SET_C2C_LPWR_STATE_VOTE (0x20801832) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_SET_C2C_LPWR_STATE_VOTE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BUS_SET_C2C_LPWR_STATE_VOTE_PARAMS_MESSAGE_ID (0x32U) + +typedef struct NV2080_CTRL_CMD_BUS_SET_C2C_LPWR_STATE_VOTE_PARAMS { + NvU32 c2cLpwrStateId; + NvBool bAllowed; +} NV2080_CTRL_CMD_BUS_SET_C2C_LPWR_STATE_VOTE_PARAMS; + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h new file mode 100644 index 0000000..c8f04b8 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h @@ -0,0 +1,511 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080ce.finn +// + + + +#include "nvcfg_sdk.h" + + +/* NV20_SUBDEVICE_XX ce control commands and parameters */ + +/* + * NV2080_CTRL_CMD_CE_GET_CAPS + * + * This command returns the set of CE capabilities for the device + * in the form of an array of unsigned bytes. + * + * ceEngineType + * This parameter specifies the copy engine type + * capsTblSize + * This parameter specifies the size in bytes of the caps table per CE. + * This value should be set to NV2080_CTRL_CE_CAPS_TBL_SIZE. + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the CE caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_CE_GET_CAPS (0x20802a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_CAPS_PARAMS_MESSAGE_ID" */ + +/* + * Size in bytes of CE caps table. This value should be one greater + * than the largest byte_index value below. + */ +#define NV2080_CTRL_CE_CAPS_TBL_SIZE 2 + +#define NV2080_CTRL_CE_GET_CAPS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_CE_GET_CAPS_PARAMS { + NvU32 ceEngineType; + NvU32 capsTblSize; + NV_DECLARE_ALIGNED(NvP64 capsTbl, 8); +} NV2080_CTRL_CE_GET_CAPS_PARAMS; + +#define NV2080_CTRL_CMD_CE_GET_CAPS_V2 (0x20802a03) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_CE_GET_CAPS_V2_PARAMS { + NvU32 ceEngineType; + NvU8 capsTbl[NV2080_CTRL_CE_CAPS_TBL_SIZE]; +} NV2080_CTRL_CE_GET_CAPS_V2_PARAMS; + +/* extract cap bit setting from tbl */ +#define NV2080_CTRL_CE_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* caps format is byte_index:bit_mask */ +#define NV2080_CTRL_CE_CAPS_CE_GRCE 0:0x01 +#define NV2080_CTRL_CE_CAPS_CE_SHARED 0:0x02 +#define NV2080_CTRL_CE_CAPS_CE_SYSMEM_READ 0:0x04 +#define NV2080_CTRL_CE_CAPS_CE_SYSMEM_WRITE 0:0x08 +#define NV2080_CTRL_CE_CAPS_CE_NVLINK_P2P 0:0x10 +#define NV2080_CTRL_CE_CAPS_CE_SYSMEM 0:0x20 +#define NV2080_CTRL_CE_CAPS_CE_P2P 0:0x40 +#define NV2080_CTRL_CE_CAPS_CE_BL_SIZE_GT_64K_SUPPORTED 0:0x80 +#define NV2080_CTRL_CE_CAPS_CE_SUPPORTS_NONPIPELINED_BL 1:0x01 +#define NV2080_CTRL_CE_CAPS_CE_SUPPORTS_PIPELINED_BL 1:0x02 +#define NV2080_CTRL_CE_CAPS_CE_CC_SECURE 1:0x04 +#define NV2080_CTRL_CE_CAPS_CE_DECOMP_SUPPORTED 1:0x08 +#define NV2080_CTRL_CE_CAPS_CE_CC_WORK_SUBMIT 1:0x10 +#define NV2080_CTRL_CE_CAPS_CE_SCRUB 1:0x40 + +/* + * NV2080_CTRL_CE_CAPS_CE_GRCE + * Set if the CE is synchronous with GR + * + * NV2080_CTRL_CE_CAPS_CE_SHARED + * Set if the CE shares physical CEs with any other CE + * + * NV2080_CTRL_CE_CAPS_CE_SYSMEM_READ + * Set if the CE can give enhanced performance for SYSMEM reads over other CEs + * + * NV2080_CTRL_CE_CAPS_CE_SYSMEM_WRITE + * Set if the CE can give enhanced performance for SYSMEM writes over other CEs + * + * NV2080_CTRL_CE_CAPS_CE_NVLINK_P2P + * Set if the CE can be used for P2P transactions using NVLINK + * Once a CE is exposed for P2P over NVLINK, it will remain available for the life of RM + * PCE2LCE mapping may change based on the number of GPUs registered in RM however + * + * NV2080_CTRL_CE_CAPS_CE_SYSMEM + * Set if the CE can be used for SYSMEM transactions + * + * NV2080_CTRL_CE_CAPS_CE_P2P + * Set if the CE can be used for P2P transactions + * + * NV2080_CTRL_CE_CAPS_CE_BL_SIZE_GT_64K_SUPPORTED + * Set if the CE supports BL copy size greater than 64K + * + * NV2080_CTRL_CE_CAPS_CE_SUPPORTS_NONPIPELINED_BL + * Set if the CE supports non-pipelined Block linear + * + * NV2080_CTRL_CE_CAPS_CE_SUPPORTS_PIPELINED_BL + * Set if the CE supports pipelined Block Linear + * + * NV2080_CTRL_CE_CAPS_CE_CC_SECURE + * Set if the CE is capable of encryption/decryption + * + * NV2080_CTRL_CE_CAPS_CE_DECOMP_SUPPORTED + * Set if the CE is capable of handling decompression workloads; + * async copies will not be supported on the same CE + * + * NV2080_CTRL_CE_CAPS_CE_CC_WORK_SUBMIT + * Set if the CE can be used for work launch/completion in CC mode + * + * NV2080_CTRL_CE_CAPS_CE_SCRUB + * Set if the CE can be used for fast scrubbing + */ + +/* + * NV2080_CTRL_CMD_CE_GET_CE_PCE_MASK + * + * This command returns the mapping of PCE's for the given LCE. + * The pceMask is local to the CE shim that ceEngineType belongs to. + * + * ceEngineType + * This parameter specifies the copy engine type + * pceMask + * This parameter specifies a mask of PCEs that correspond + * to the LCE specified in ceEngineType + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_CE_GET_CE_PCE_MASK (0x20802a02) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS { + NvU32 ceEngineType; + NvU32 pceMask; +} NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS; + +/* + * NV2080_CTRL_CMD_CE_SET_PCE_LCE_CONFIG + * + * This command sets the PCE2LCE configuration + * + * pceLceConfig[NV2080_CTRL_MAX_PCES] + * This parameter specifies the PCE-LCE mapping requested + * grceLceConfig[NV2080_CTRL_MAX_GRCES] + * This parameter specifies which LCE is the GRCE sharing with + * 0xF -> Does not share with any LCE + * 0-MAX_LCE -> Shares with the given LCE + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_CE_SET_PCE_LCE_CONFIG (0x20802a04) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_SET_PCE_LCE_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MAX_PCES 32 +#define NV2080_CTRL_MAX_GRCES 4 + +#define NV2080_CTRL_CE_SET_PCE_LCE_CONFIG_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_CE_SET_PCE_LCE_CONFIG_PARAMS { + NvU32 ceEngineType; + NvU32 pceLceMap[NV2080_CTRL_MAX_PCES]; + NvU32 grceSharedLceMap[NV2080_CTRL_MAX_GRCES]; +} NV2080_CTRL_CE_SET_PCE_LCE_CONFIG_PARAMS; + +/* + * NV2080_CTRL_CMD_CE_UPDATE_PCE_LCE_MAPPINGS + * + * This command updates the PCE-LCE mappings + * + * pceLceMap [IN] + * This parameter contains the array of PCE to LCE mappings. + * The array is indexed by the PCE index, and contains the + * LCE index that the PCE is assigned to. A unused PCE is + * tagged with NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_INVALID_LCE. + * + * grceConfig [IN] + * This parameter contains the array of GRCE configs. + * 0xF -> GRCE does not share with any LCE + * 0-MAX_LCE -> GRCE shares with the given LCE + * + * exposeCeMask [IN] + * This parameter specifies the mask of LCEs to export to the + * clients after the update. + * + * bUpdateNvlinkPceLce [IN] + * Whether PCE-LCE mappings need to be updated for nvlink topology. + * If this is NV_FALSE, RM would ignore the above values. However, + * PCE-LCE mappings will still be updated if there were any regkey + * overrides. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV2080_CTRL_CMD_CE_UPDATE_PCE_LCE_MAPPINGS (0x20802a05) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_PARAMS { + NvU32 pceLceMap[NV2080_CTRL_MAX_PCES]; + NvU32 grceConfig[NV2080_CTRL_MAX_GRCES]; + NvU32 exposeCeMask; + NvBool bUpdateNvlinkPceLce; +} NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_PARAMS; + +#define NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_INVALID_LCE 0xf + +/* + * NV2080_CTRL_CMD_CE_UPDATE_CLASS_DB + * + * This function triggers an update of the exported CE classes. CEs with + * no physical resources will not be exported. A record of these + * will be return in in the stubbedCeMask. + * + * An example if NV2080_ENGINE_TYPE_COPY4 is stubbed (1<<4) will be + * set in stubbedCeMask. + * + * This function operates on all CE shims. + */ +#define NV2080_CTRL_CMD_CE_UPDATE_CLASS_DB (0x20802a06) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_UPDATE_CLASS_DB_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_UPDATE_CLASS_DB_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV2080_CTRL_CE_UPDATE_CLASS_DB_PARAMS { + NvU32 stubbedCeMask; +} NV2080_CTRL_CE_UPDATE_CLASS_DB_PARAMS; + +/* + * NV2080_CTRL_CMD_CE_GET_PHYSICAL_CAPS + * + * Query _CE_GRCE, _CE_SHARED, _CE_SUPPORTS_PIPELINED_BL, _CE_SUPPORTS_NONPIPELINED_BL bits of CE + * capabilities. + * + */ + +#define NV2080_CTRL_CMD_CE_GET_PHYSICAL_CAPS (0x20802a07) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_PHYSICAL_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_GET_PHYSICAL_CAPS_PARAMS_MESSAGE_ID (0x7U) + +typedef NV2080_CTRL_CE_GET_CAPS_V2_PARAMS NV2080_CTRL_CE_GET_PHYSICAL_CAPS_PARAMS; + +#define NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS { + NvU32 size; +} NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS; + +#define NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE (0x20802a08) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_CE_GET_HUB_PCE_MASKS + * + * Get HSHUB and FBHUB PCE masks. + * + * [out] hshubPceMasks + * PCE mask for each HSHUB + * [out] fbhubPceMask + * FBHUB PCE mask + */ + +#define NV2080_CTRL_CMD_CE_GET_HUB_PCE_MASK (0x20802a09) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_HUB_PCE_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_MAX_HSHUBS 32 + +#define NV2080_CTRL_CE_GET_HUB_PCE_MASK_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_CE_GET_HUB_PCE_MASK_PARAMS { + NvU32 hshubPceMasks[NV2080_CTRL_CE_MAX_HSHUBS]; + NvU32 fbhubPceMask; +} NV2080_CTRL_CE_GET_HUB_PCE_MASK_PARAMS; + +/* + * NV2080_CTRL_CMD_CE_GET_ALL_CAPS + * + * Query caps of all CEs. + * + * [out] capsTbl + * Array of CE caps in the order of CEs. The caps bits interpretation is the same as in + * NV2080_CTRL_CMD_CE_GET_CAPS. + * [out] present + * Bit mask indicating which CEs are usable by the client and have their caps indicated in capsTbl. + * If a CE is not marked present, its caps bits should be ignored. + * If client is subscribed to a MIG instance, only the CEs present in the instance are tagged as such. + */ + +#define NV2080_CTRL_CMD_CE_GET_ALL_CAPS (0x20802a0a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MAX_CES 64 + +#define NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS_MESSAGE_ID (0xaU) + +typedef struct NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS { + NvU8 capsTbl[NV2080_CTRL_MAX_CES][NV2080_CTRL_CE_CAPS_TBL_SIZE]; + NV_DECLARE_ALIGNED(NvU64 present, 8); +} NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS; + +#define NV2080_CTRL_CMD_CE_GET_ALL_PHYSICAL_CAPS (0x20802a0b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_ALL_PHYSICAL_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_GET_ALL_PHYSICAL_CAPS_PARAMS_MESSAGE_ID (0xbU) + +typedef NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS NV2080_CTRL_CE_GET_ALL_PHYSICAL_CAPS_PARAMS; + +/* + * NV2080_CTRL_CMD_CE_GET_LCE_SHIM_INFO + * + * This command queries LCE shim information of a specified CE. + * The information includes the shim instance the CE belongs to. + * And the local LCE within the shim. + * + * [in] ceEngineType + * This parameter specifies the copy engine type, NV2080 define + * [out] shimInstance + * The shim instance the ceEngineType belongs to. + * [out] shimLocalLceIdx + * The local LCE index within the shim + * + */ + +#define NV2080_CTRL_CMD_CE_GET_LCE_SHIM_INFO (0x20802a0c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_LCE_SHIM_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_GET_LCE_SHIM_INFO_PARAMS_MESSAGE_ID (0xcU) + +typedef struct NV2080_CTRL_CE_GET_LCE_SHIM_INFO_PARAMS { + NvU32 ceEngineType; + NvU32 shimInstance; + NvU32 shimLocalLceIdx; +} NV2080_CTRL_CE_GET_LCE_SHIM_INFO_PARAMS; + +/* + * This command is identical to NV2080_CTRL_CMD_CE_UPDATE_PCE_LCE_MAPPINGS + * but supports more than one CE shim. + * + * This command updates the PCE-LCE mappings for one CE shim. On + * GPUs with multiple CE shims, this interface must be called for + * each shim. + * + * shimInstance [IN] + * Specify which CE shim instance to operate on. + */ + +#define NV2080_CTRL_CMD_CE_UPDATE_PCE_LCE_MAPPINGS_V2 (0x20802a0d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_V2_PARAMS_MESSAGE_ID (0xdU) + +typedef struct NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_V2_PARAMS { + NvU32 pceLceMap[NV2080_CTRL_MAX_PCES]; + NvU32 grceConfig[NV2080_CTRL_MAX_GRCES]; + NvU32 exposeCeMask; + NvBool bUpdateNvlinkPceLce; + NvU32 shimInstance; +} NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_V2_PARAMS; + +/* + * This command is identical to NV2080_CTRL_CMD_CE_GET_HUB_PCE_MASK_PARAMS + * but supports more than one CE shim. + * + * This command gets HSHUB/CEHUB and FBHUB PCE Mask. On + * GPUs with multiple CE shims, this interface must be called for + * each shim. + * + * [in] shimInstance + * Specify which CE shim instance to operate on. + */ + +#define NV2080_CTRL_CMD_CE_GET_HUB_PCE_MASK_V2 (0x20802a0e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_HUB_PCE_MASK_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_GET_HUB_PCE_MASK_V2_PARAMS_MESSAGE_ID (0xeU) + +typedef struct NV2080_CTRL_CE_GET_HUB_PCE_MASK_V2_PARAMS { + NvU32 connectingHubPceMasks[NV2080_CTRL_CE_MAX_HSHUBS]; + NvU32 fbhubPceMask; + NvU32 shimInstance; +} NV2080_CTRL_CE_GET_HUB_PCE_MASK_V2_PARAMS; + +typedef enum NV2080_CTRL_CE_LCE_TYPE { + NV2080_CTRL_CE_LCE_TYPE_PCIE = 1, + NV2080_CTRL_CE_LCE_TYPE_DECOMP = 2, + NV2080_CTRL_CE_LCE_TYPE_SCRUB = 3, + NV2080_CTRL_CE_LCE_TYPE_NVLINK_PEER = 4, + NV2080_CTRL_CE_LCE_TYPE_C2C = 5, + NV2080_CTRL_CE_LCE_TYPE_PCIE_RD = 6, + NV2080_CTRL_CE_LCE_TYPE_PCIE_WR = 7, + NV2080_CTRL_CE_LCE_TYPE_C2C_H2D = 8, + NV2080_CTRL_CE_LCE_TYPE_C2C_D2H = 9, + NV2080_CTRL_CE_LCE_TYPE_CC_WORK_SUBMIT = 10, +} NV2080_CTRL_CE_LCE_TYPE; + +/* + * NV2080_CTRL_CMD_INTERNAL_CE_GET_PCE_CONFIG_FOR_LCE_TYPE + * + * This command queries the PCE config required for the specified LCE type. + * + * [in] lceType + * LCE type. Should be one of NV2080_CTRL_CE_LCE_TYPE_* values. + * [out] numPces + * Number of PCEs supported per LCE + * [out] numLces + * Maximum number of LCEs supported by the chip for the specified LCE type. + * [out] supportedPceMask + * The mask of the PCEs that support the specified LCE type. + * [out] supportedLceMask + * The mask of the LCEs that support the specified LCE type. + * [out] pcePerHshub + * Numbers of PCEs from any given HSHUB that can be assigned to this LCE type. + * + * @return NV_OK + */ + +#define NV2080_CTRL_CMD_INTERNAL_CE_GET_PCE_CONFIG_FOR_LCE_TYPE (0x20802a0f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_CE_GET_PCE_CONFIG_FOR_LCE_TYPE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_CE_GET_PCE_CONFIG_FOR_LCE_TYPE_PARAMS_MESSAGE_ID (0xfU) + +typedef struct NV2080_CTRL_INTERNAL_CE_GET_PCE_CONFIG_FOR_LCE_TYPE_PARAMS { + NV2080_CTRL_CE_LCE_TYPE lceType; + NvU32 numPces; + NvU32 numLces; + NvU32 supportedPceMask; + NvU32 supportedLceMask; + NvU32 pcePerHshub; +} NV2080_CTRL_INTERNAL_CE_GET_PCE_CONFIG_FOR_LCE_TYPE_PARAMS; + +/* + * NV2080_CTRL_CMD_CE_GET_DECOMP_LCE_MASK + * + * This command gets the mask of LCEs that are enabled for decomp workloads. + * On GPUs with multiple CE shims, this interface must be called for + * each shim. + * + * [in] shimInstance + * Specify which CE shim instance to operate on. + * [out] decompLceMask + * Returns a 64-bit mask of which LCEs in given shim are marked as decomp CEs + */ + +#define NV2080_CTRL_CMD_CE_GET_DECOMP_LCE_MASK (0x20802a11) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_DECOMP_LCE_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_GET_DECOMP_LCE_MASK_PARAMS_MESSAGE_ID (0x11U) + +typedef struct NV2080_CTRL_CE_GET_DECOMP_LCE_MASK_PARAMS { + NV_DECLARE_ALIGNED(NvU64 decompLceMask, 8); + NvU32 shimInstance; +} NV2080_CTRL_CE_GET_DECOMP_LCE_MASK_PARAMS; + +/* + * NV2080_CTRL_CMD_CE_IS_DECOMP_LCE_ENABLED + * + * This command returns whether a given global LCE index is enabled for decomp workloads. + * It has to be given a global LCE index (cannot support shim local LCE ID) + * + * [in] lceIndex + * [out] bDecompEnabled + * Returns NV_TRUE if LCE is enabled for decompression, else returns NV_FALSE + */ + +#define NV2080_CTRL_CMD_CE_IS_DECOMP_LCE_ENABLED (0x20802a12) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_IS_DECOMP_LCE_ENABLED_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_IS_DECOMP_LCE_ENABLED_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV2080_CTRL_CE_IS_DECOMP_LCE_ENABLED_PARAMS { + NvU32 lceIndex; + NvBool bDecompEnabled; +} NV2080_CTRL_CE_IS_DECOMP_LCE_ENABLED_PARAMS; + +/* _ctrl2080ce_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h new file mode 100644 index 0000000..b46acf7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080cipher.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h new file mode 100644 index 0000000..7954004 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h @@ -0,0 +1,45 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080clk.finn +// + +#include "nvfixedtypes.h" +#include "ctrl/ctrl2080/ctrl2080base.h" +#include "ctrl/ctrl2080/ctrl2080boardobj.h" +#include "ctrl/ctrl2080/ctrl2080gpumon.h" +#include "ctrl/ctrl2080/ctrl2080clkavfs.h" +#include "ctrl/ctrl2080/ctrl2080volt.h" +#include "ctrl/ctrl2080/ctrl2080pmumon.h" + +#define NV2080_CTRL_CLK_DOMAIN_TEGRA_UNDEFINED (0x00000000U) +#define NV2080_CTRL_CLK_DOMAIN_TEGRA_GPCCLK (0x00000001U) +#define NV2080_CTRL_CLK_DOMAIN_TEGRA_NVDCLK (0x00000002U) + + +/* _ctrl2080clk_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clkavfs.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clkavfs.h new file mode 100644 index 0000000..2d339fa --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clkavfs.h @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080clkavfs.finn +// + + + +#include "ctrl/ctrl2080/ctrl2080base.h" +#include "ctrl/ctrl2080/ctrl2080boardobj.h" +#include "ctrl/ctrl2080/ctrl2080volt.h" + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h new file mode 100644 index 0000000..4a380d1 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h @@ -0,0 +1,180 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080dma.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX dma control commands and parameters */ + +/* + * NV2080_CTRL_CMD_DMA_INVALIDATE_TLB + * + * This command invalidates the GPU TLB. This is intended to be used + * by RM clients that manage their own TLB consistency when updating + * page tables on their own, or with DEFER_TLB_INVALIDATION options + * to other RM APIs. + * + * hVASpace + * This parameter specifies the VASpace object whose MMU TLB entries needs to be invalidated. + * Specifying a GMMU VASpace object handle will invalidate the GMMU TLB for the particular VASpace. + * Specifying a SMMU VASpace object handle will flush the entire SMMU TLB & PTC. + * + * This call can be used with the NV50_DEFERRED_API_CLASS (class 0x5080). + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_TIMEOUT_RETRY + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_DMA_INVALIDATE_TLB (0x20802502) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_DMA_INTERFACE_ID << 8) | NV2080_CTRL_DMA_INVALIDATE_TLB_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_DMA_INVALIDATE_TLB_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_DMA_INVALIDATE_TLB_PARAMS { + NvHandle hClient; // Deprecated. Kept here for compactibility with chips_GB9-2-1-1 + NvHandle hDevice; // Deprecated. Kept here for compactibility with chips_GB9-2-1-1 + NvU32 engine; // Deprecated. Kept here for compactibility with chips_GB9-2-1-1 + NvHandle hVASpace; +} NV2080_CTRL_DMA_INVALIDATE_TLB_PARAMS; + +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_GRAPHICS 0:0 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_GRAPHICS_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_GRAPHICS_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_VIDEO 1:1 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_VIDEO_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_VIDEO_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_DISPLAY 2:2 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_DISPLAY_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_DISPLAY_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_CAPTURE 3:3 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_CAPTURE_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_CAPTURE_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_IFB 4:4 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_IFB_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_IFB_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_MV 5:5 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_MV_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_MV_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_MPEG 6:6 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_MPEG_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_MPEG_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_VLD 7:7 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_VLD_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_VLD_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_ENCRYPTION 8:8 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_ENCRYPTION_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_ENCRYPTION_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_PERFMON 9:9 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_PERFMON_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_PERFMON_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_POSTPROCESS 10:10 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_POSTPROCESS_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_POSTPROCESS_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_BAR 11:11 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_BAR_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_BAR_TRUE (0x00000001) + +/* + * NV2080_CTRL_DMA_INFO + * + * This structure represents a single 32bit dma engine value. Clients + * request a particular DMA engine value by specifying a unique dma + * information index. + * + * Legal dma information index values are: + * NV2080_CTRL_DMA_INFO_INDEX_SYSTEM_ADDRESS_SIZE + * This index can be used to request the system address size in bits. + */ +typedef NVXXXX_CTRL_XXX_INFO NV2080_CTRL_DMA_INFO; + +/* valid dma info index values */ +#define NV2080_CTRL_DMA_INFO_INDEX_SYSTEM_ADDRESS_SIZE (0x000000000) + +/* set INDEX_MAX to greatest possible index value */ +#define NV2080_CTRL_DMA_INFO_INDEX_MAX NV2080_CTRL_DMA_INFO_INDEX_SYSTEM_ADDRESS_SIZE + +/* + * NV2080_CTRL_CMD_DMA_GET_INFO + * + * This command returns dma engine information for the associated GPU. + * Requests to retrieve dma information use an array of one or more + * NV2080_CTRL_DMA_INFO structures. + * + * dmaInfoTblSize + * This field specifies the number of valid entries in the dmaInfoList + * array. This value cannot exceed NV2080_CTRL_DMA_GET_INFO_MAX_ENTRIES. + * dmaInfoTbl + * This parameter contains the client's dma info table into + * which the dma info values will be transferred by the RM. + * The dma info table is an array of NV2080_CTRL_DMA_INFO structures. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_DMA_GET_INFO (0x20802503) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_DMA_INTERFACE_ID << 8) | NV2080_CTRL_DMA_GET_INFO_PARAMS_MESSAGE_ID" */ + +/* maximum number of NV2080_CTRL_DMA_INFO entries per request */ +#define NV2080_CTRL_DMA_GET_INFO_MAX_ENTRIES (256) + +#define NV2080_CTRL_DMA_GET_INFO_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_DMA_GET_INFO_PARAMS { + NvU32 dmaInfoTblSize; + /* + * C form: + * NV2080_CTRL_DMA_INFO dmaInfoTbl[NV2080_CTRL_DMA_GET_INFO_MAX_ENTRIES]; + */ + NV2080_CTRL_DMA_INFO dmaInfoTbl[NV2080_CTRL_DMA_GET_INFO_MAX_ENTRIES]; +} NV2080_CTRL_DMA_GET_INFO_PARAMS; + +typedef struct NV2080_CTRL_DMA_UPDATE_COMPTAG_INFO_TILE_INFO { + /*! + * 64KB aligned address of source 64KB tile for comptag reswizzle. + */ + NvU32 srcAddr; + + /*! + * 64KB aligned address of destination 64KB tile for comptag reswizzle. + */ + NvU32 dstAddr; + + /*! + * Comptag index assigned to the 64K sized tile relative to + * the compcacheline. Absolute comptag index would be: + * startComptagIndex + relComptagIndex. + */ + NvU16 relComptagIndex; +} NV2080_CTRL_DMA_UPDATE_COMPTAG_INFO_TILE_INFO; + +// _ctrl2080dma_h_ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h new file mode 100644 index 0000000..34fa680 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h @@ -0,0 +1,127 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080dmabuf.finn +// + + + +/* + * NV2080_CTRL_CMD_DMABUF_EXPORT_OBJECTS_TO_FD + * + * Exports RM vidmem and sysmem handles to a dma-buf fd. + * + * The objects in the 'handles' array are exported to the fd as range: + * [index, index + numObjects). + * + * A dma-buf fd is created the first time this control call is called. + * The fd is an input parameter for subsequent calls to attach additional handles + * over NV2080_CTRL_DMABUF_MAX_HANDLES. + * + * fd + * A dma-buf file descriptor. If -1, a new FD will be created. + * + * totalObjects + * The total number of objects that the client wishes to export to the FD. + * This parameter will be honored only when the FD is getting created. + * + * numObjects + * The number of handles the user wishes to export in this call. + * + * index + * The index into the export fd at which to start exporting the handles in + * 'handles'. This index cannot overlap a previously used index. + * + * totalSize + * The total size of memory being exported in bytes, needed to create the dma-buf. + * This size includes the memory that will be exported in future export calls + * for this dma-buf. + * + * mappingType + * The type of mapping that must be used for this dma-buf. + * See NV2080_CTRL_DMABUF_EXPORT_MAPPING_TYPE_* for all possible values. + * + * With type DEFAULT, the driver shall decide the type based on platform coherency: + * C2C for coherent + * PCIe BAR1 for non-coherent + * Type FORCE_PCIE field is a workaround for platforms with Grace (PCIe Gen4/Gen5 RPs) paired with + * Gen6 parts like Blackwell GPUs and CX8 NICs, requiring a separate Gen6 PCIe link to maximize RDMA + * bandwidth. This type allows dma-buf to be mapped over this Gen6 PCIe link. + * + * This call shall return NV_ERR_NOT_SUPPORTED if FORCE_PCIE type is used on non-Grace platforms. + * + * bAllowMmap + * mmap support is allowed or not for the specific dmabuf based fd. User can control it. + * That way user can enable mmap for testing/specific use cases and not for any all handles. + * + * handles + * An array of {handle, offset, size} that describes the dma-buf. + * The offsets and sizes must be OS page-size aligned. + * + * Limitations: + * 1. This call supports vidmem and sysmem objects. + * 2. All memory handles should belong to the same GPU or the same GPU MIG instance. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_NO_MEMORY + * NV_ERR_OPERATING_SYSTEM + * NV_ERR_IN_USE + * NV_ERR_INVALID_OBJECT + * NV_ERR_INVALID_OBJECT_PARENT + */ +#define NV2080_CTRL_CMD_DMABUF_EXPORT_OBJECTS_TO_FD (0x20803a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_DMABUF_INTERFACE_ID << 8) | NV2080_CTRL_DMABUF_EXPORT_MEM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_DMABUF_MAX_HANDLES 128 + +#define NV2080_CTRL_DMABUF_EXPORT_MAPPING_TYPE_DEFAULT (0x00000000U) +#define NV2080_CTRL_DMABUF_EXPORT_MAPPING_TYPE_FORCE_PCIE (0x00000001U) + +typedef struct NV2080_CTRL_DMABUF_MEM_HANDLE_INFO { + NvHandle hMemory; + NV_DECLARE_ALIGNED(NvU64 offset, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); +} NV2080_CTRL_DMABUF_MEM_HANDLE_INFO; + +#define NV2080_CTRL_DMABUF_EXPORT_MEM_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_DMABUF_EXPORT_MEM_PARAMS { + NvS32 fd; + NvU32 totalObjects; + NvU32 numObjects; + NvU32 index; + NV_DECLARE_ALIGNED(NvU64 totalSize, 8); + NvU8 mappingType; + NvBool bAllowMmap; + NV_DECLARE_ALIGNED(NV2080_CTRL_DMABUF_MEM_HANDLE_INFO handles[NV2080_CTRL_DMABUF_MAX_HANDLES], 8); +} NV2080_CTRL_DMABUF_EXPORT_MEM_PARAMS; + +// _ctrl2080dmabuf_h_ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h new file mode 100644 index 0000000..bc933b2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h @@ -0,0 +1,247 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080ecc.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + + + +#define NV2080_CTRL_CMD_ECC_GET_CLIENT_EXPOSED_COUNTERS (0x20803400U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_ECC_INTERFACE_ID << 8) | NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS + * + * sramParityUncorrectedUnique [out] + * sramSecDedUncorrectedUnique [out] + * sramCorrectedUnique [out] + * dramUncorrectedTotal [out] + * dramCorrectedTotal [out] + * Aggregate error counts for SRAM and DRAM. + * + * lastClearedTimestamp [out] + * unix-epoch based timestamp. These fields indicate when the error counters + * were last cleared by the user. + * + * sramBucketL2 [out] + * sramBucketSM [out] + * sramBucketPcie [out] + * sramBucketFirmware [out] + * sramBucketOther [out] + * Aggregate unique uncorrctable error counts for SRAM buckets. + * + * sramErrorThresholdExceeded [out] + * Boolean flag which is set if SRAM error threshold was exceeded + */ + +#define NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS_MESSAGE_ID (0x0U) + +typedef struct NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS { + NV_DECLARE_ALIGNED(NvU64 sramParityUncorrectedUnique, 8); + NV_DECLARE_ALIGNED(NvU64 sramSecDedUncorrectedUnique, 8); + NV_DECLARE_ALIGNED(NvU64 sramCorrectedUnique, 8); + NV_DECLARE_ALIGNED(NvU64 dramUncorrectedTotal, 8); + NV_DECLARE_ALIGNED(NvU64 dramCorrectedTotal, 8); + + NvU32 lastClearedTimestamp; + + NV_DECLARE_ALIGNED(NvU64 sramBucketL2, 8); + NV_DECLARE_ALIGNED(NvU64 sramBucketSM, 8); + NV_DECLARE_ALIGNED(NvU64 sramBucketPcie, 8); + NV_DECLARE_ALIGNED(NvU64 sramBucketFirmware, 8); + NV_DECLARE_ALIGNED(NvU64 sramBucketOther, 8); + + NvBool sramErrorThresholdExceeded; +} NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS; + +/* + * NV2080_CTRL_ECC_GET_VOLATILE_COUNTS_PARAMS + * + * Reports count of volatile errors + * + * sramCorUni [out]: + * Unique correctable SRAM error count + * sramUncParityUni [out]: + * Unique uncorrectable SRAM parity error count + * sramUncSecDedUni [out]: + * Unique uncorrectable SRAM SEC-DED error count + * dramCorTot [out]: + * Total correctable DRAM error count + * dramUncTot [out]: + * total uncorrectable DRAM error count + */ +#define NV2080_CTRL_CMD_ECC_GET_VOLATILE_COUNTS (0x20803401U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_ECC_INTERFACE_ID << 8) | NV2080_CTRL_ECC_GET_VOLATILE_COUNTS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_ECC_GET_VOLATILE_COUNTS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_ECC_GET_VOLATILE_COUNTS_PARAMS { + NV_DECLARE_ALIGNED(NvU64 sramCorUni, 8); + NV_DECLARE_ALIGNED(NvU64 sramUncParityUni, 8); + NV_DECLARE_ALIGNED(NvU64 sramUncSecDedUni, 8); + NV_DECLARE_ALIGNED(NvU64 dramCorTot, 8); + NV_DECLARE_ALIGNED(NvU64 dramUncTot, 8); +} NV2080_CTRL_ECC_GET_VOLATILE_COUNTS_PARAMS; + +typedef struct eccLocation { + NvU32 location; + NvU32 sublocation; + NvU32 extlocation; +} eccLocation; + +/* + * NV2080_CTRL_ECC_SRAM_UNIQUE_UNCORR_COUNTS_ENTRY + * + * unit + * The unit the error occurred in + * location + * The location info for the error + * address + * The address of the error + * bIsParity + * True if error is parity error, false if error is SEC-DED error + * count + * The number of uncorrectable unique error counts that occurred + */ + +typedef struct NV2080_CTRL_ECC_SRAM_UNIQUE_UNCORR_COUNTS_ENTRY { + NvU32 unit; + eccLocation location; + NvU32 address; + NvBool bIsParity; + NvU32 count; +} NV2080_CTRL_ECC_SRAM_UNIQUE_UNCORR_COUNTS_ENTRY; + +/* + * NV2080_CTRL_CMD_ECC_GET_SRAM_UNIQUE_UNCORR_COUNTS + * + * This command is used to query the ECC inforom object and determine the number + * of unique uncorrectable error counts that occurred at an address. + * + * entryCount + * The number of entries + * + * entries + * The array of NV2080_CTRL_ECC_SRAM_UNIQUE_UNCORR_COUNTS_ENTRY + */ + +#define NV2080_CTRL_ECC_SRAM_UNIQUE_UNCORR_COUNTS_MAX_COUNT 600 + +#define NV2080_CTRL_CMD_ECC_GET_SRAM_UNIQUE_UNCORR_COUNTS (0x20803402) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_ECC_INTERFACE_ID << 8) | NV2080_CTRL_ECC_GET_SRAM_UNIQUE_UNCORR_COUNTS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_ECC_GET_SRAM_UNIQUE_UNCORR_COUNTS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_ECC_GET_SRAM_UNIQUE_UNCORR_COUNTS_PARAMS { + NvU32 entryCount; + NV2080_CTRL_ECC_SRAM_UNIQUE_UNCORR_COUNTS_ENTRY entries[NV2080_CTRL_ECC_SRAM_UNIQUE_UNCORR_COUNTS_MAX_COUNT]; +} NV2080_CTRL_ECC_GET_SRAM_UNIQUE_UNCORR_COUNTS_PARAMS; + +#define NV2080_CTRL_ECC_ERROR_TYPE_CORRECTED 0 +#define NV2080_CTRL_ECC_ERROR_TYPE_UNCORRECTED 1 + + + +/* + * NV2080_CTRL_CMD_ECC_INJECT_ERROR + * + * This command is used to inject ECC errors. + * + * unit + * The ECC unit + * + * errorType + * The type of error to be injected + * + * location + * The location within the ECC unit to be injected + * + * flags + * Specific injection flags + * + * address + * Specific injection address for DRAM + */ +#define NV2080_CTRL_CMD_ECC_INJECT_ERROR (0x20803403) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_ECC_INTERFACE_ID << 8) | NV2080_CTRL_ECC_INJECT_ERROR_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_ECC_INJECT_ERROR_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_ECC_INJECT_ERROR_PARAMS { + NvU32 unit; + NvU8 errorType; + eccLocation location; + NvU32 flags; + NV_DECLARE_ALIGNED(NvU64 address, 8); +} NV2080_CTRL_ECC_INJECT_ERROR_PARAMS; + +/* + * NV2080_CTRL_CMD_ECC_GET_REPAIR_STATUS + * + * This command is used to query the status of TPC/Channel repair + * + * bTpcRepairPending + * Boolean indicating if TPC repair is pending + * bChannelRepairPending + * Boolean indicating if Channel repair is pending + */ + +#define NV2080_CTRL_CMD_ECC_GET_REPAIR_STATUS (0x20803404) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_ECC_INTERFACE_ID << 8) | NV2080_CTRL_ECC_GET_REPAIR_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_ECC_GET_REPAIR_STATUS_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_ECC_GET_REPAIR_STATUS_PARAMS { + NvBool bTpcRepairPending; + NvBool bChannelRepairPending; +} NV2080_CTRL_ECC_GET_REPAIR_STATUS_PARAMS; + +/* + * NV2080_CTRL_CMD_ECC_INJECTION_SUPPORTED + * + * Determines if error injection is supported for a given HW unit + * + * unit [in]: + * The ECC protected unit for which ECC injection support is being checked + * + * bCorrectableSupported [out]: + * Boolean value that shows if correcatable errors can be injected + * + * bUncorrectableSupported [out]: + * Boolean value that shows if uncorrecatable errors can be injected +*/ + +#define NV2080_CTRL_CMD_ECC_INJECTION_SUPPORTED (0x20803405) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_ECC_INTERFACE_ID << 8) | NV2080_CTRL_ECC_INJECTION_SUPPORTED_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_ECC_INJECTION_SUPPORTED_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV2080_CTRL_ECC_INJECTION_SUPPORTED_PARAMS { + NvU32 unit; + NvBool bCorrectableSupported; + NvBool bUncorrectableSupported; +} NV2080_CTRL_ECC_INJECTION_SUPPORTED_PARAMS; + +/* _ctrl2080ecc_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h new file mode 100644 index 0000000..22c8981 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h @@ -0,0 +1,373 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080event.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +#include "nv_vgpu_types.h" +/* NV20_SUBDEVICE_XX event-related control commands and parameters */ + +/* + * NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION + * + * This command sets event notification state for the associated subdevice. + * This command requires that an instance of NV01_EVENT has been previously + * bound to the associated subdevice object. + * + * event + * This parameter specifies the type of event to which the specified + * action is to be applied. This parameter must specify a valid + * NV2080_NOTIFIERS value (see cl2080.h for more details) and should + * not exceed one less NV2080_NOTIFIERS_MAXCOUNT. + * action + * This parameter specifies the desired event notification action. + * Valid notification actions include: + * NV2080_CTRL_SET_EVENT_NOTIFICATION_DISABLE + * This action disables event notification for the specified + * event for the associated subdevice object. + * NV2080_CTRL_SET_EVENT_NOTIFICATION_SINGLE + * This action enables single-shot event notification for the + * specified event for the associated subdevice object. + * NV2080_CTRL_SET_EVENT_NOTIFICATION_REPEAT + * This action enables repeated event notification for the specified + * event for the associated system controller object. + * bNotifyState + * This boolean is used to indicate the current state of the notifier + * at the time of event registration. This is optional and its semantics + * needs to be agreed upon by the notifier and client using the notifier + * info32 + * This is used to send 32-bit initial state info with the notifier at + * time of event registration + * info16 + * This is used to send 16-bit initial state info with the notifier at + * time of event registration + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION (0x20800301) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS { + NvU32 event; + NvU32 action; + NvBool bNotifyState; + NvU32 info32; + NvU16 info16; +} NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS; + +/* valid action values */ +#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE (0x00000000) +#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE (0x00000001) +#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002) + +/* XUSB/PPC D-state defines */ +#define NV2080_EVENT_DSTATE_XUSB_D0 (0x00000000) +#define NV2080_EVENT_DSTATE_XUSB_D3 (0x00000003) +#define NV2080_EVENT_DSTATE_XUSB_INVALID (0xFFFFFFFF) +#define NV2080_EVENT_DSTATE_PPC_D0 (0x00000000) +#define NV2080_EVENT_DSTATE_PPC_D3 (0x00000003) +#define NV2080_EVENT_DSTATE_PPC_INVALID (0xFFFFFFFF) + +// HDACODEC Decice DState, D3_COLD is only for verbose mapping, it cannot be logged +typedef enum NV2080_EVENT_HDACODEC_DSTATE { + NV2080_EVENT_HDACODEC_DEVICE_DSTATE_D0 = 0, + NV2080_EVENT_HDACODEC_DEVICE_DSTATE_D1 = 1, + NV2080_EVENT_HDACODEC_DEVICE_DSTATE_D2 = 2, + NV2080_EVENT_HDACODEC_DEVICE_DSTATE_D3_HOT = 3, + NV2080_EVENT_HDACODEC_DEVICE_DSTATE_D3_COLD = 4, + NV2080_EVENT_HDACODEC_DEVICE_DSTATE_DSTATE_MAX = 5, +} NV2080_EVENT_HDACODEC_DSTATE; + +/* + * NV2080_CTRL_CMD_EVENT_SET_TRIGGER + * + * This command triggers a software event for the associated subdevice. + * This command accepts no parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_EVENT_SET_TRIGGER (0x20800302) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | 0x2" */ + +/* + * NV2080_CTRL_CMD_EVENT_SET_NOTIFIER_MEMORY + * + * hMemory + * This parameter specifies the handle of the memory object + * that identifies the memory address translation for this + * subdevice instance's notification(s). The beginning of the + * translation points to an array of notification data structures. + * The size of the translation must be at least large enough to hold the + * maximum number of notification data structures identified by + * the NV2080_MAX_NOTIFIERS value. + * Legal argument values must be instances of the following classes: + * NV01_NULL + * NV04_MEMORY + * When hMemory specifies the NV01_NULL_OBJECT value then any existing + * memory translation connection is cleared. There must not be any + * pending notifications when this command is issued. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_EVENT_SET_MEMORY_NOTIFIES (0x20800303) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS { + NvHandle hMemory; +} NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS; + +#define NV2080_EVENT_MEMORY_NOTIFIES_STATUS_NOTIFIED 0 +#define NV2080_EVENT_MEMORY_NOTIFIES_STATUS_PENDING 1 +#define NV2080_EVENT_MEMORY_NOTIFIES_STATUS_ERROR 2 + +/* + * NV2080_CTRL_CMD_EVENT_SET_SEMAPHORE_MEMORY + * + * hSemMemory + * This parameter specifies the handle of the memory object that + * identifies the semaphore memory associated with this subdevice + * event notification. Once this is set RM will generate an event + * only when there is a change in the semaphore value. It is + * expected that the semaphore memory value will be updated by + * the GPU indicating that there is an event pending. This + * command is used by VGX plugin to determine which virtual + * machine has generated a particular event. + * + * semOffset + * This parameter indicates the memory offset of the semaphore. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_EVENT_SET_SEMAPHORE_MEMORY (0x20800304) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS { + NvHandle hSemMemory; + NvU32 semOffset; +} NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS; + +/* + * NV2080_CTRL_CMD_EVENT_SET_GUEST_MSI + * + * hSemMemory + * This parameter specifies the handle of the memory object that + * identifies the semaphore memory associated with this subdevice + * event notification. Once this is set RM will generate an event + * only when there is a change in the semaphore value. It is + * expected that the semaphore memory value will be updated by + * the GPU indicating that there is an event pending. This + * command is used by VGX plugin to determine which virtual + * machine has generated a particular event. + * + * guestMSIAddr + * This parameter indicates the guest allocated MSI address. + * + * guestMSIData + * This parameter indicates the MSI data set by the guest OS. + * + * vgpuUuid + * This parameter specifies the uuid of vGPU assigned to VM. + * + * domainId + * This parameter specifies the unique guest virtual machine identifier + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_EVENT_SET_GUEST_MSI (0x20800305) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_GUEST_MSI_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_EVENT_SET_GUEST_MSI_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV2080_CTRL_EVENT_SET_GUEST_MSI_PARAMS { + NV_DECLARE_ALIGNED(NvU64 guestMSIAddr, 8); + NvU32 guestMSIData; + NvHandle hSemMemory; + NvBool isReset; + NvU8 vgpuUuid[VM_UUID_SIZE]; + NV_DECLARE_ALIGNED(NvU64 domainId, 8); +} NV2080_CTRL_EVENT_SET_GUEST_MSI_PARAMS; + + +/* + * NV2080_CTRL_CMD_EVENT_SET_SEMA_MEM_VALIDATION + * + * hSemMemory + * This parameter specifies the handle of the memory object that + * identifies the semaphore memory associated with this subdevice + * event notification. Once this is set RM will generate an event + * only when there is a change in the semaphore value. It is + * expected that the semaphore memory value will be updated by + * the GPU indicating that there is an event pending. This + * command is used by VGX plugin to determine which virtual + * machine has generated a particular event. + * + * isSemaMemValidationEnabled + * This parameter used to enable/disable change in sema value check + * while generating an event. + * + * Possible status values returned are: + * NVOS_STATUS_SUCCESS + * NVOS_STATUS_ERROR_INVALID_OBJECT_HANDLE + * NVOS_STATUS_ERROR_INVALID_ARGUMENT + */ + + +#define NV2080_CTRL_CMD_EVENT_SET_SEMA_MEM_VALIDATION (0x20800306) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS { + NvHandle hSemMemory; + NvBool isSemaMemValidationEnabled; +} NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS; + + +/* + * NV2080_CTRL_CMD_EVENT_SET_TRIGGER_FIFO + * + * This command triggers a FIFO event for the associated subdevice. + * + * hEvent + * Handle of the event that should be notified. If zero, all + * non-stall interrupt events for this subdevice will be notified. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_EVENT_SET_TRIGGER_FIFO (0x20800308) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS { + NvHandle hEvent; +} NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS; + +/* + * NV2080_CTRL_CMD_EVENT_VIDEO_BIND_EVTBUF_FOR_UID + * + * This command is used to create a video bind-point to an event buffer that + * is filtered by UID. + * + * hEventBuffer[IN] + * The event buffer to bind to + * + * recordSize[IN] + * The size of the FECS record in bytes + * + * levelOfDetail[IN] + * One of NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_LOD_: + * FULL: Report all CtxSw events + * SIMPLE: Report engine start and engine end events only + * CUSTOM: Report events in the eventFilter field + * NOTE: RM may override the level-of-detail depending on the caller + * + * eventFilter[IN] + * Bitmask of events to report if levelOfDetail is CUSTOM + * + * bAllUsers[IN] + * Only report video data for the current user if false, for all users if true + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_EVENT_VIDEO_BIND_EVTBUF (0x20800309) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_PARAMS_MESSAGE_ID" */ + +typedef enum NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_LOD { + NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_LOD_FULL = 0, + NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_LOD_SIMPLE = 1, + NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_LOD_CUSTOM = 2, +} NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_LOD; + +#define NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_PARAMS { + NvHandle hEventBuffer; + NvU32 recordSize; + NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_LOD levelOfDetail; + NvU32 eventFilter; + NvBool bAllUsers; +} NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_PARAMS; + + +/* + * NV2080_CTRL_CMD_EVENT_RATS_GSP_TRACE_BIND_EVTBUF_FOR_UID + * + * This command is used to create a RATS tracing bindpoint to eventbuffer. + * + * hEventBuffer[IN] + * The event buffer to bind to + * + * tracepointMask[IN] + * Bitmask for selecting tracepoints + * + * gspLoggingBufferSize[IN] + * User defined size of GSP owned event logging buffer + * + * gspLoggingBufferWatermark[IN] + * User defined watermark that triggers RPC to kernel of traces + * HINT: set higher for more frequent trace updates + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_EVENT_RATS_GSP_TRACE_BIND_EVTBUF (0x2080030a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_RATS_GSP_TRACE_BIND_EVTBUF_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_EVENT_RATS_GSP_TRACE_BIND_EVTBUF_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV2080_CTRL_EVENT_RATS_GSP_TRACE_BIND_EVTBUF_PARAMS { + NvHandle hEventBuffer; + NV_DECLARE_ALIGNED(NvU64 tracepointMask, 8); + NvU32 gspLoggingBufferSize; + NvU32 gspLoggingBufferWatermark; +} NV2080_CTRL_EVENT_RATS_GSP_TRACE_BIND_EVTBUF_PARAMS; + +/* _ctrl2080event_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h new file mode 100644 index 0000000..1221171 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h @@ -0,0 +1,33 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080fan.finn +// + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h new file mode 100644 index 0000000..32430a1 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h @@ -0,0 +1,2896 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080fb.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX fb control commands and parameters */ + +#include "nvlimits.h" +#include "nvcfg_sdk.h" + +/* + * NV2080_CTRL_FB_INFO + * + * This structure represents a single 32bit fb engine value. Clients + * request a particular fb engine value by specifying a unique fb + * information index. + * + * Legal fb information index values are: + * NV2080_CTRL_FB_INFO_INDEX_TILE_REGION_COUNT + * This index is used to request the number of tiled regions supported + * by the associated subdevice. The return value is GPU + * implementation-dependent. A return value of 0 indicates the GPU + * does not support tiling. + * NV2080_CTRL_FB_INFO_INDEX_COMPRESSION_SIZE + * This index is used to request the amount of compression (in bytes) + * supported by the associated subdevice. The return value is GPU + * implementation-dependent. A return value of 0 indicates the GPU + * does not support compression. + * Nv2080_CTRL_FB_INFO_INDEX_DRAM_PAGE_STRIDE + * This index is used to request the DRAM page stride (in bytes) + * supported by the associated subdevice. The return value is GPU + * implementation-dependent. + * NV2080_CTRL_FB_INFO_INDEX_TILE_REGION_FREE_COUNT + * This index is used to request the number of free tiled regions on + * the associated subdevice. The return value represents the current + * number of free tiled regions at the time the command is processed and + * is not guaranteed to remain unchanged. A return value of 0 indicates + * that there are no available tiled regions on the associated subdevice. + * NV2080_CTRL_FB_INFO_INDEX_PARTITION_COUNT + * This index is used to request the number of frame buffer partitions + * on the associated subdevice. Starting with Fermi there are now two units + * with the name framebuffer partitions. On those chips this index returns + * the number of FBPAs. For number of FBPs use + * NV2080_CTRL_FB_INFO_INDEX_FBP_COUNT. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_RAM_SIZE + * This index is used to request the amount of framebuffer memory in + * kilobytes physically present on the associated subdevice. This + * value will never exceed the value reported by + * NV2080_CTRL_FB_INFO_INDEX_TOTAL_RAM_SIZE. + * This an SMC aware attribute, so the per-partition framebuffer memory + * size will be returned when the client has a partition subscription. + * NV2080_CTRL_FB_INFO_INDEX_TOTAL_RAM_SIZE + * This index is used to request the total amount of video memory in + * kilobytes for use with the associated subdevice. This value will + * reflect both framebuffer memory as well as any system memory dedicated + * for use with the subdevice. + * This an SMC aware attribute, so the per-partition video memory size + * will be returned when the client has a partition subscription. + * NV2080_CTRL_FB_INFO_INDEX_HEAP_SIZE + * This index is used to request the amount of total RAM in kilobytes + * available for user allocations. This value reflects the total ram + * size less the amount of memory reserved for internal use. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_HEAP_START + * This index is used to request the offset for start of heap in + * kilobytes. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_HEAP_FREE + * This index is used to request the available amount of video memory in + * kilobytes for use with the associated subdevice or the SMC partition. + * This an SMC aware attribute, thus necessary partition subscription is + * required to query per partition information, if the device is partitioned. + * Alternatively, the SMC/MIG monitor capability can be acquired to query + * aggregate available memory across all the valid partitions. + * NV2080_CTRL_FB_INFO_INDEX_MAPPABLE_HEAP_SIZE + * This index reflects the amount of heap memory in kilobytes that + * is accessible by the CPU. On subdevices with video memory sizes that + * exceed the amount that can be bus mappable this value will be less + * than that reported by NV2080_CTRL_FB_INFO_INDEX_HEAP_SIZE. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_BUS_WIDTH + * This index is used to request the FB bus bandwidth on the associated + * subdevice. + * NV2080_CTRL_FB_INFO_INDEX_RAM_CFG + * This index is used to request the implementation-dependent RAM + * configuration value of the associated subdevice. + * NV2080_CTRL_FB_INFO_INDEX_RAM_TYPE + * This index is used to request the type of RAM used for the framebuffer + * on the associated subdevice. Legal RAM types include: + * NV2080_CTRL_FB_INFO_RAM_TYPE_UNKNOWN + * NV2080_CTRL_FB_INFO_RAM_TYPE_SDRAM + * NV2080_CTRL_FB_INFO_RAM_TYPE_DDR1 + * NV2080_CTRL_FB_INFO_RAM_TYPE_DDR2 + * NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR2 + * NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR3 + * NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR4 + * NV2080_CTRL_FB_INFO_RAM_TYPE_DDR3 + * NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR5 + * NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR5X + * NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR6 + * NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR6X + * NV2080_CTRL_FB_INFO_RAM_TYPE_LPDDR2 + * NV2080_CTRL_FB_INFO_RAM_TYPE_LPDDR4 + * NV2080_CTRL_FB_INFO_RAM_TYPE_LPDDR5 + * NV2080_CTRL_FB_INFO_INDEX_BANK_COUNT + * This index is used to request the number of FB banks on the associated + * subdevice. + * NV2080_CTRL_FB_INFO_INDEX_OVERLAY_OFFSET_ADJUSTMENT + * This index is used to request the offset relative to the start of the + * overlay surface(s), in bytes, at which scanout should happen if the + * primary and the overlay surfaces are all aligned on large page + * boundaries. + * NV2080_CTRL_FB_INFO_INDEX_GPU_VADDR_SPACE_SIZE_KB + * This index is used to request the size of the GPU's virtual address + * space in kilobytes. + * NV2080_CTRL_FB_INFO_INDEX_GPU_VADDR_HEAP_SIZE_KB + * This index is used to request the size of the GPU's virtual address + * space heap (minus RM-reserved space) in kilobytes. + * NV2080_CTRL_FB_INFO_INDEX_GPU_VADDR_MAPPBLE_SIZE_KB + * This index is used to request the size of the GPU's BAR1 mappable + * virtual address space in kilobytes. + * NV2080_CTRL_FB_INFO_INDEX_EFFECTIVE_BW + * This index is deprecated, and returns zero value. + * NV2080_CTRL_FB_INFO_INDEX_PARTITION_MASK + * NV2080_CTRL_FB_INFO_INDEX_PARTITION_MASK_0 + * NV2080_CTRL_FB_INFO_INDEX_PARTITION_MASK_1 + * This index is used to request the mask of currently active partitions. + * Each active partition has an ID that's equivalent to the corresponding + * bit position in the mask. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * This value is moving from 32bits to 64bits, so PARTITION_MASK + * (though kept for backwards compatibility on older chips), on newer chips + * will be replaced by: + * PARTITION_MASK_0 for the lower 32bits + * PARTITION_MASK_1 for the upper 32bits + * Note that PARTITION_MASK and PARTITION_MASK_0 are handled the same, and + * use the same enum value. + * NV2080_CTRL_FB_INFO_INDEX_LTC_MASK + * NV2080_CTRL_FB_INFO_INDEX_LTC_MASK_0 + * NV2080_CTRL_FB_INFO_INDEX_LTC_MASK_1 + * This index is used to request the mask of currently active LTCs. + * Each active LTC has an ID that's equivalent to the corresponding + * bit position in the mask. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * This value is moving from 32bits to 64bits, so LTC_MASK + * (though kept for backwards compatibility on older chips), on newer chips + * will be replaced by: + * LTC_MASK_0 for the lower 32bits + * LTC_MASK_1 for the upper 32bits + * Note that LTC_MASK and LTC_MASK_0 are handled the same, and + * use the same enum value. + * NV2080_CTRL_FB_INFO_INDEX_VISTA_RESERVED_HEAP_SIZE + * This index is used to request the amount of total RAM in kilobytes + * reserved for internal RM allocations on Vista. This will need to + * be subtracted from the total heap size to get the amount available to + * KMD. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_RAM_LOCATION + * This index is used to distinguish between different memory + * configurations. + * NV2080_CTRL_FB_INFO_INDEX_FB_IS_BROKEN + * This index is used to check if the FB is functional + * NV2080_CTRL_FB_INFO_INDEX_FBP_COUNT + * This index is used to get the number of FBPs on the subdevice. This + * field is not to be confused with + * NV2080_CTRL_FB_INFO_INDEX_PARTITION_COUNT (returns number of FBPAs). + * Starting with Fermi the term partition is an ambiguous term, both FBP + * and FBPA mean FB partitions. The FBPA is the low level DRAM controller, + * while a FBP is the aggregation of one or more FBPAs, L2, ROP, and some + * other units. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_L2CACHE_SIZE + * This index is used to get the size of the L2 cache in Bytes. + * A value of zero indicates that the L2 cache isn't supported on the + * associated subdevice. + * NV2080_CTRL_FB_INFO_INDEX_MEMORYINFO_VENDOR_ID + * This index is used to get the memory vendor ID information from + * the Memory Information Table in the VBIOS. Legal memory Vendor ID + * values include: + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_UNKNOWN + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_RESERVED + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_SAMSUNG + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_QIMONDA + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_ELPIDA + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_ETRON + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_NANYA + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_HYNIX + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_MOSEL + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_WINBOND + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_ESMT + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_MICRON + * NV2080_CTRL_FB_INFO_INDEX_BAR1_AVAIL_SIZE + * This index is used to request the amount of unused bar1 space. The + * data returned is a value in KB. It is not guaranteed to be entirely + * accurate since it is a snapshot at a particular time and can + * change quickly. + * NV2080_CTRL_FB_INFO_INDEX_BAR1_MAX_CONTIGUOUS_AVAIL_SIZE + * This index is used to request the amount of largest unused contiguous + * block in bar1 space. The data returned is a value in KB. It is not + * guaranteed to be entirely accurate since it is a snapshot at a particular + * time and can change quickly. + * NV2080_CTRL_FB_INFO_INDEX_USABLE_RAM_SIZE + * This index is used to request the amount of usable framebuffer memory in + * kilobytes physically present on the associated subdevice. This + * value will never exceed the value reported by + * NV2080_CTRL_FB_INFO_INDEX_TOTAL_RAM_SIZE. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_LTC_COUNT + * Returns the active LTC count across all active FBPs. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_LTS_COUNT + * Returns the active LTS count across all active LTCs. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_PSEUDO_CHANNEL_MODE + * This is used to identify if pseudo-channel mode is enabled for HBM + * NV2080_CTRL_FB_INFO_INDEX_SMOOTHDISP_RSVD_BAR1_SIZE + * This is used by WDDM-KMD to determine whether and how much RM reserved BAR1 for smooth transition + * NV2080_CTRL_FB_INFO_INDEX_HEAP_OFFLINE_SIZE + * Returns the total size of the all dynamically offlined pages in KiB + * NV2080_CTRL_FB_INFO_INDEX_1TO1_COMPTAG_ENABLED + * Returns true if 1to1 comptag is enabled + * NV2080_CTRL_FB_INFO_INDEX_SUSPEND_RESUME_RSVD_SIZE + * Returns the total size of the memory(FB) that will saved/restored during save/restore cycle + * NV2080_CTRL_FB_INFO_INDEX_ALLOW_PAGE_RETIREMENT + * Returns true if page retirement is allowed + * NV2080_CTRL_FB_INFO_POISON_FUSE_ENABLED + * Returns true if poison fuse is enabled + * NV2080_CTRL_FB_INFO_FBPA_ECC_ENABLED + * Returns true if ECC is enabled for FBPA + * NV2080_CTRL_FB_INFO_DYNAMIC_PAGE_OFFLINING_ENABLED + * Returns true if dynamic page blacklisting is enabled + * NV2080_CTRL_FB_INFO_INDEX_FORCED_BAR1_64KB_MAPPING_ENABLED + * Returns true if 64KB mapping on BAR1 is force-enabled + * NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_SIZE + * Returns the P2P mailbox size to be allocated by the client. + * Returns 0 if the P2P mailbox is allocated by RM. + * NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_ALIGNMENT_SIZE + * Returns the P2P mailbox alignment requirement. + * Returns 0 if the P2P mailbox is allocated by RM. + * NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_BAR1_MAX_OFFSET_64KB + * Returns the P2P mailbox max offset requirement. + * Returns 0 if the P2P mailbox is allocated by RM. + * NV2080_CTRL_FB_INFO_INDEX_PROTECTED_MEM_SIZE_TOTAL_KB + * Returns total protected memory when memory protection is enabled + * Returns 0 when memory protection is not enabled. + * NV2080_CTRL_FB_INFO_INDEX_PROTECTED_MEM_SIZE_FREE_KB + * Returns protected memory available for allocation when memory + * protection is enabled. + * Returns 0 when memory protection is not enabled. + * NV2080_CTRL_FB_INFO_INDEX_ECC_STATUS_SIZE + * Returns the ECC status size (corresponds to subpartitions or channels + * depending on architecture/memory type). + * NV2080_CTRL_FB_INFO_INDEX_IS_ZERO_FB + * Returns true if FB is not present on this chip + * NV2080_CTRL_FB_INFO_INDEX_ACCESS_COUNTER_BUFFER_COUNT + * Returns the count of access counter buffers supported by GPU + */ +typedef NVXXXX_CTRL_XXX_INFO NV2080_CTRL_FB_INFO; + +/* valid fb info index values */ +#define NV2080_CTRL_FB_INFO_INDEX_TILE_REGION_COUNT (0x00000000U) // Deprecated +#define NV2080_CTRL_FB_INFO_INDEX_COMPRESSION_SIZE (0x00000001U) +#define NV2080_CTRL_FB_INFO_INDEX_DRAM_PAGE_STRIDE (0x00000002U) +#define NV2080_CTRL_FB_INFO_INDEX_TILE_REGION_FREE_COUNT (0x00000003U) +#define NV2080_CTRL_FB_INFO_INDEX_PARTITION_COUNT (0x00000004U) +#define NV2080_CTRL_FB_INFO_INDEX_BAR1_SIZE (0x00000005U) +#define NV2080_CTRL_FB_INFO_INDEX_BANK_SWIZZLE_ALIGNMENT (0x00000006U) +#define NV2080_CTRL_FB_INFO_INDEX_RAM_SIZE (0x00000007U) +#define NV2080_CTRL_FB_INFO_INDEX_TOTAL_RAM_SIZE (0x00000008U) +#define NV2080_CTRL_FB_INFO_INDEX_HEAP_SIZE (0x00000009U) +#define NV2080_CTRL_FB_INFO_INDEX_MAPPABLE_HEAP_SIZE (0x0000000AU) +#define NV2080_CTRL_FB_INFO_INDEX_BUS_WIDTH (0x0000000BU) +#define NV2080_CTRL_FB_INFO_INDEX_RAM_CFG (0x0000000CU) +#define NV2080_CTRL_FB_INFO_INDEX_RAM_TYPE (0x0000000DU) +#define NV2080_CTRL_FB_INFO_INDEX_BANK_COUNT (0x0000000EU) +#define NV2080_CTRL_FB_INFO_INDEX_OVERLAY_OFFSET_ADJUSTMENT (0x0000000FU) // Deprecated (index reused to return 0) +#define NV2080_CTRL_FB_INFO_INDEX_GPU_VADDR_SPACE_SIZE_KB (0x0000000FU) // Deprecated (index reused to return 0) +#define NV2080_CTRL_FB_INFO_INDEX_GPU_VADDR_HEAP_SIZE_KB (0x0000000FU) // Deprecated (index reused to return 0) +#define NV2080_CTRL_FB_INFO_INDEX_GPU_VADDR_MAPPBLE_SIZE_KB (0x0000000FU) // Deprecated (index reused to return 0) +#define NV2080_CTRL_FB_INFO_INDEX_EFFECTIVE_BW (0x0000000FU) // Deprecated (index reused to return 0) +#define NV2080_CTRL_FB_INFO_INDEX_FB_TAX_SIZE_KB (0x00000010U) +#define NV2080_CTRL_FB_INFO_INDEX_HEAP_BASE_KB (0x00000011U) +#define NV2080_CTRL_FB_INFO_INDEX_LARGEST_FREE_REGION_SIZE_KB (0x00000012U) +#define NV2080_CTRL_FB_INFO_INDEX_LARGEST_FREE_REGION_BASE_KB (0x00000013U) +#define NV2080_CTRL_FB_INFO_INDEX_PARTITION_MASK (0x00000014U) +#define NV2080_CTRL_FB_INFO_INDEX_VISTA_RESERVED_HEAP_SIZE (0x00000015U) +#define NV2080_CTRL_FB_INFO_INDEX_HEAP_FREE (0x00000016U) +#define NV2080_CTRL_FB_INFO_INDEX_RAM_LOCATION (0x00000017U) +#define NV2080_CTRL_FB_INFO_INDEX_FB_IS_BROKEN (0x00000018U) +#define NV2080_CTRL_FB_INFO_INDEX_FBP_COUNT (0x00000019U) +#define NV2080_CTRL_FB_INFO_INDEX_FBP_MASK (0x0000001AU) +#define NV2080_CTRL_FB_INFO_INDEX_L2CACHE_SIZE (0x0000001BU) +#define NV2080_CTRL_FB_INFO_INDEX_MEMORYINFO_VENDOR_ID (0x0000001CU) +#define NV2080_CTRL_FB_INFO_INDEX_BAR1_AVAIL_SIZE (0x0000001DU) +#define NV2080_CTRL_FB_INFO_INDEX_HEAP_START (0x0000001EU) +#define NV2080_CTRL_FB_INFO_INDEX_BAR1_MAX_CONTIGUOUS_AVAIL_SIZE (0x0000001FU) +#define NV2080_CTRL_FB_INFO_INDEX_USABLE_RAM_SIZE (0x00000020U) +#define NV2080_CTRL_FB_INFO_INDEX_TRAINIG_2T (0x00000021U) +#define NV2080_CTRL_FB_INFO_INDEX_LTC_COUNT (0x00000022U) +#define NV2080_CTRL_FB_INFO_INDEX_LTS_COUNT (0x00000023U) +#define NV2080_CTRL_FB_INFO_INDEX_L2CACHE_ONLY_MODE (0x00000024U) +#define NV2080_CTRL_FB_INFO_INDEX_PSEUDO_CHANNEL_MODE (0x00000025U) +#define NV2080_CTRL_FB_INFO_INDEX_SMOOTHDISP_RSVD_BAR1_SIZE (0x00000026U) +#define NV2080_CTRL_FB_INFO_INDEX_HEAP_OFFLINE_SIZE (0x00000027U) +#define NV2080_CTRL_FB_INFO_INDEX_1TO1_COMPTAG_ENABLED (0x00000028U) +#define NV2080_CTRL_FB_INFO_INDEX_SUSPEND_RESUME_RSVD_SIZE (0x00000029U) +#define NV2080_CTRL_FB_INFO_INDEX_ALLOW_PAGE_RETIREMENT (0x0000002AU) +#define NV2080_CTRL_FB_INFO_INDEX_LTC_MASK (0x0000002BU) +#define NV2080_CTRL_FB_INFO_POISON_FUSE_ENABLED (0x0000002CU) +#define NV2080_CTRL_FB_INFO_FBPA_ECC_ENABLED (0x0000002DU) +#define NV2080_CTRL_FB_INFO_DYNAMIC_PAGE_OFFLINING_ENABLED (0x0000002EU) +#define NV2080_CTRL_FB_INFO_INDEX_FORCED_BAR1_64KB_MAPPING_ENABLED (0x0000002FU) +#define NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_SIZE (0x00000030U) +#define NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_ALIGNMENT (0x00000031U) +#define NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_BAR1_MAX_OFFSET_64KB (0x00000032U) +#define NV2080_CTRL_FB_INFO_INDEX_PROTECTED_MEM_SIZE_TOTAL_KB (0x00000033U) +#define NV2080_CTRL_FB_INFO_INDEX_PROTECTED_MEM_SIZE_FREE_KB (0x00000034U) +#define NV2080_CTRL_FB_INFO_INDEX_ECC_STATUS_SIZE (0x00000035U) +#define NV2080_CTRL_FB_INFO_INDEX_IS_ZERO_FB (0x00000036U) +#define NV2080_CTRL_FB_INFO_INDEX_PARTITION_MASK_0 (NV2080_CTRL_FB_INFO_INDEX_PARTITION_MASK) +#define NV2080_CTRL_FB_INFO_INDEX_PARTITION_MASK_1 (0x00000037U) +#define NV2080_CTRL_FB_INFO_INDEX_LTC_MASK_0 (NV2080_CTRL_FB_INFO_INDEX_LTC_MASK) +#define NV2080_CTRL_FB_INFO_INDEX_LTC_MASK_1 (0x00000038U) +#define NV2080_CTRL_FB_INFO_INDEX_ACCESS_COUNTER_BUFFER_COUNT (0x00000039U) +#define NV2080_CTRL_FB_INFO_INDEX_COHERENCE_INFO (0x0000003AU) + +#define NV2080_CTRL_FB_INFO_INDEX_MAX NV2080_CTRL_FB_INFO_INDEX_ACCESS_COUNTER_BUFFER_COUNT + +/* Intentionally picking a value much bigger than NV2080_CTRL_FB_INFO_INDEX_MAX to prevent VGPU plumbing updates */ +#define NV2080_CTRL_FB_INFO_MAX_LIST_SIZE (0x00000080U) + +/* valid fb RAM type values */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_UNKNOWN (0x00000000U) +#define NV2080_CTRL_FB_INFO_RAM_TYPE_SDRAM (0x00000001U) +#define NV2080_CTRL_FB_INFO_RAM_TYPE_DDR1 (0x00000002U) /* SDDR and GDDR (aka DDR1 and GDDR1) */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_SDDR2 (0x00000003U) /* SDDR2 Used on NV43 and later */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_DDR2 NV2080_CTRL_FB_INFO_RAM_TYPE_SDDR2 /* Deprecated alias */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR2 (0x00000004U) /* GDDR2 Used on NV30 and some NV36 */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR3 (0x00000005U) /* GDDR3 Used on NV40 and later */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR4 (0x00000006U) /* GDDR4 Used on G80 and later (deprecated) */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_SDDR3 (0x00000007U) /* SDDR3 Used on G9x and later */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_DDR3 NV2080_CTRL_FB_INFO_RAM_TYPE_SDDR3 /* Deprecated alias */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR5 (0x00000008U) /* GDDR5 Used on GT21x and later */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_LPDDR2 (0x00000009U) /* LPDDR (Low Power SDDR) used on T2x and later. */ + + +#define NV2080_CTRL_FB_INFO_RAM_TYPE_SDDR4 (0x0000000CU) /* SDDR4 Used on Maxwell and later */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_LPDDR4 (0x0000000DU) /* LPDDR (Low Power SDDR) used on T21x and later.*/ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_HBM1 (0x0000000EU) /* HBM1 (High Bandwidth Memory) used on GP100 */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_HBM2 (0x0000000FU) /* HBM2 (High Bandwidth Memory-pseudo channel) */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR5X (0x00000010U) /* GDDR5X Used on GP10x */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR6 (0x00000011U) /* GDDR6 Used on TU10x */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR6X (0x00000012U) /* GDDR6X Used on GA10x */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_LPDDR5 (0x00000013U) /* LPDDR (Low Power SDDR) used on T23x and later.*/ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_HBM3 (0x00000014U) /* HBM3 (High Bandwidth Memory) v3 */ + + + +/* valid RAM LOCATION types */ +#define NV2080_CTRL_FB_INFO_RAM_LOCATION_GPU_DEDICATED (0x00000000U) +#define NV2080_CTRL_FB_INFO_RAM_LOCATION_SYS_SHARED (0x00000001U) +#define NV2080_CTRL_FB_INFO_RAM_LOCATION_SYS_DEDICATED (0x00000002U) + +/* valid Memory Vendor ID values */ +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_SAMSUNG (0x00000001U) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_QIMONDA (0x00000002U) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_ELPIDA (0x00000003U) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_ETRON (0x00000004U) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_NANYA (0x00000005U) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_HYNIX (0x00000006U) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_MOSEL (0x00000007U) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_WINBOND (0x00000008U) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_ESMT (0x00000009U) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_MICRON (0x0000000FU) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_UNKNOWN (0xFFFFFFFFU) + +#define NV2080_CTRL_FB_INFO_PSEUDO_CHANNEL_MODE_UNSUPPORTED (0x00000000U) +#define NV2080_CTRL_FB_INFO_PSEUDO_CHANNEL_MODE_DISABLED (0x00000001U) +#define NV2080_CTRL_FB_INFO_PSEUDO_CHANNEL_MODE_ENABLED (0x00000002U) + +#define NV2080_CTRL_FB_INFO_INDEX_COHERENCE_INFO_NON_FULLY_COHERENT (0x00000000U) +#define NV2080_CTRL_FB_INFO_INDEX_COHERENCE_INFO_FULLY_COHERENT (0x00000001U) + +/** + * NV2080_CTRL_CMD_FB_GET_INFO + * + * This command returns fb engine information for the associated GPU. + * Requests to retrieve fb information use a list of one or more + * NV2080_CTRL_FB_INFO structures. + * + * fbInfoListSize + * This field specifies the number of entries on the caller's + * fbInfoList. + * fbInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the fb information is to be returned. + * This buffer must be at least as big as fbInfoListSize multiplied + * by the size of the NV2080_CTRL_FB_INFO structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_FB_GET_INFO (0x20801301U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_FB_GET_INFO_PARAMS { + NvU32 fbInfoListSize; + NV_DECLARE_ALIGNED(NvP64 fbInfoList, 8); +} NV2080_CTRL_FB_GET_INFO_PARAMS; + +#define NV2080_CTRL_CMD_FB_GET_INFO_V2 (0x20801303U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_INFO_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_INFO_V2_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_FB_GET_INFO_V2_PARAMS { + NvU32 fbInfoListSize; + NV2080_CTRL_FB_INFO fbInfoList[NV2080_CTRL_FB_INFO_MAX_LIST_SIZE]; +} NV2080_CTRL_FB_GET_INFO_V2_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_GET_BAR1_OFFSET + * + * This command returns the GPU virtual address of a bar1 + * allocation, given the CPU virtual address. + * + * cpuVirtAddress + * This field specifies the associated CPU virtual address of the + * memory allocation. + * gpuVirtAddress + * The GPU virtual address associated with the allocation + * is returned in this field. + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_GET_BAR1_OFFSET (0x20801310U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_BAR1_OFFSET_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_BAR1_OFFSET_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV2080_CTRL_FB_GET_BAR1_OFFSET_PARAMS { + NV_DECLARE_ALIGNED(NvP64 cpuVirtAddress, 8); + NV_DECLARE_ALIGNED(NvU64 gpuVirtAddress, 8); +} NV2080_CTRL_FB_GET_BAR1_OFFSET_PARAMS; + +/* + * NV2080_CTRL_FB_CMD_GET_CALIBRATION_LOCK_FAILED + * + * This command returns the failure counts for calibration. + * + * uFlags + * Just one for now -- ehether to reset the counts. + * driveStrengthRiseCount + * This parameter specifies the failure count for drive strength rising. + * driveStrengthFallCount + * This parameter specifies the failure count for drive strength falling. + * driveStrengthTermCount + * This parameter specifies the failure count for drive strength + * termination. + * slewStrengthRiseCount + * This parameter specifies the failure count for slew strength rising. + * slewStrengthFallCount + * This parameter specifies the failure count for slew strength falling. + * slewStrengthTermCount + * This parameter specifies the failure count for slew strength + * termination. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_INVALID_PARAM_STRUCT + * NVOS_STATUS_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_GET_CALIBRATION_LOCK_FAILED (0x2080130cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_CALIBRATION_LOCK_FAILED_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_CALIBRATION_LOCK_FAILED_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV2080_CTRL_FB_GET_CALIBRATION_LOCK_FAILED_PARAMS { + NvU32 flags; + NvU32 driveStrengthRiseCount; + NvU32 driveStrengthFallCount; + NvU32 driveStrengthTermCount; + NvU32 slewStrengthRiseCount; + NvU32 slewStrengthFallCount; +} NV2080_CTRL_FB_GET_CALIBRATION_LOCK_FAILED_PARAMS; + +/* valid flags parameter values */ +#define NV2080_CTRL_CMD_FB_GET_CAL_FLAG_NONE (0x00000000U) +#define NV2080_CTRL_CMD_FB_GET_CAL_FLAG_RESET (0x00000001U) + +/* + * NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE_IRQL + * + * If supported by hardware and the OS, this command implements a streamlined version of + * NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE which can be called at high IRQL and Bypass the + * RM Lock. + * + * Requires the following NVOS54_PARAMETERS to be set for raised IRQ / Lock Bypass operation: + * NVOS54_FLAGS_IRQL_RAISED + * NVOS54_FLAGS_LOCK_BYPASS + * + * flags + * Contains flags to control various aspects of the flush. Valid values + * are defined in NV2080_CTRL_CTRL_CMD_FB_FLUSH_GPU_CACHE_IRQL_FLAGS*. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + * + * See Also: + * NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE + * This is the more generalized version which is not intended to be called at raised IRQ level + * NV0080_CTRL_CMD_DMA_FLUSH + * Performs flush operations in broadcast for the GPU cache and other hardware + * engines. Use this call if you want to flush all GPU caches in a + * broadcast device. + * NV0041_CTRL_CMD_SURFACE_FLUSH_GPU_CACHE + * Flushes memory associated with a single allocation if the hardware + * supports it. Use this call if you want to flush a single allocation and + * you have a memory object describing the physical memory. + */ +#define NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE_IRQL (0x2080130dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE_IRQL_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE_IRQL_PARAMS_MESSAGE_ID (0xDU) + +typedef struct NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE_IRQL_PARAMS { + NvU32 flags; +} NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE_IRQL_PARAMS; + +/* valid fields and values for flags */ +#define NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE_IRQL_FLAGS_WRITE_BACK 0:0 +#define NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE_IRQL_FLAGS_WRITE_BACK_NO (0x00000000U) +#define NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE_IRQL_FLAGS_WRITE_BACK_YES (0x00000001U) +#define NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE_IRQL_FLAGS_INVALIDATE 1:1 +#define NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE_IRQL_FLAGS_INVALIDATE_NO (0x00000000U) +#define NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE_IRQL_FLAGS_INVALIDATE_YES (0x00000001U) +#define NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE_IRQL_FLAGS_FB_FLUSH 2:2 +#define NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE_IRQL_FLAGS_FB_FLUSH_NO (0x00000000U) +#define NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE_IRQL_FLAGS_FB_FLUSH_YES (0x00000001U) + +/* + * NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE + * + * This command flushes a cache on the GPU which all memory accesses go + * through. The types of flushes supported by this API may not be supported by + * all hardware. Attempting an unsupported flush type will result in an error. + * + * addressArray + * An array of physical addresses in the aperture defined by + * NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_APERTURE. Each entry points to a + * contiguous block of memory of size memBlockSizeBytes. The addresses are + * aligned down to addressAlign before coalescing adjacent addresses and + * sending flushes to hardware. + * addressAlign + * Used to align-down addresses held in addressArray. A value of 0 will be + * forced to 1 to avoid a divide by zero. Value is treated as minimum + * alignment and any hardware alignment requirements above this value will + * be honored. + * addressArraySize + * The number of entries in addressArray. + * memBlockSizeBytes + * The size in bytes of each memory block pointed to by addressArray. + * flags + * Contains flags to control various aspects of the flush. Valid values + * are defined in NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS*. Not all flags are + * valid for all defined FLUSH_MODEs or all GPUs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + * + * See Also: + * NV0080_CTRL_CMD_DMA_FLUSH + * Performs flush operations in broadcast for the GPU cache and other hardware + * engines. Use this call if you want to flush all GPU caches in a + * broadcast device. + * NV0041_CTRL_CMD_SURFACE_FLUSH_GPU_CACHE + * Flushes memory associated with a single allocation if the hardware + * supports it. Use this call if you want to flush a single allocation and + * you have a memory object describing the physical memory. + */ +#define NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE (0x2080130eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_FLUSH_GPU_CACHE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_MAX_ADDRESSES 500U + +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV2080_CTRL_FB_FLUSH_GPU_CACHE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 addressArray[NV2080_CTRL_FB_FLUSH_GPU_CACHE_MAX_ADDRESSES], 8); + NvU32 addressArraySize; + NvU32 addressAlign; + NV_DECLARE_ALIGNED(NvU64 memBlockSizeBytes, 8); + NvU32 flags; +} NV2080_CTRL_FB_FLUSH_GPU_CACHE_PARAMS; + +/* valid fields and values for flags */ +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_APERTURE 1:0 +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_APERTURE_VIDEO_MEMORY (0x00000000U) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_APERTURE_SYSTEM_MEMORY (0x00000001U) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_APERTURE_PEER_MEMORY (0x00000002U) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_WRITE_BACK 2:2 +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_WRITE_BACK_NO (0x00000000U) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_WRITE_BACK_YES (0x00000001U) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_INVALIDATE 3:3 +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_INVALIDATE_NO (0x00000000U) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_INVALIDATE_YES (0x00000001U) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_FLUSH_MODE 4:4 +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_FLUSH_MODE_ADDRESS_ARRAY (0x00000000U) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_FLUSH_MODE_FULL_CACHE (0x00000001U) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_FB_FLUSH 5:5 +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_FB_FLUSH_NO (0x00000000U) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_FB_FLUSH_YES (0x00000001U) + +/* + * NV2080_CTRL_CMD_FB_IS_KIND + * + * This command is used to perform various operations like 'IS_KIND_VALID', + * 'IS_KIND_COMPRESSIBLE'on the kind passed by the caller. The operation to be + * performed should be passed in the 'operation' parameter of + * NV2080_CTRL_FB_IS_KIND_PARAMS, the kind on which the operation is to be + * performed should be passed in the 'kind' parameter. The result of the + * operation (true/false) will be returned in the 'result' parameter. + * + * operation + * Specifies what operation is to be performed on the kind passed by the + * caller. The supported operations are + * NV2080_CTRL_FB_IS_KIND_OPERATION_SUPPORTED + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure is + * supported for this GPU. Returns nonzero value in 'result' parameter + * if the input kind is supported, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure is + * compressible. Returns nonzero value in 'result' parameter if the + * input kind is compressible, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE_1 + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure supports + * 1 bit compression. Returns nonzero value in 'result' parameter if + * kind supports 1 bit compression, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE_2 + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure supports + * 2 bit compression. Returns nonzero value in 'result' parameter if + * kind supports 1 bit compression, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE_4 + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure supports + * 4 bit compression. Returns nonzero value in 'result' parameter if + * kind supports 4 bit compression, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure + * supports ZBC. Returns nonzero value in 'result' parameter if the + * input kind supports ZBC, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC_ALLOWS_1 + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure + * supports 1 bit ZBC. Returns nonzero value in 'result' parameter if + * the input kind supports 1 bit ZBC, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC_ALLOWS_2 + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure + * supports 2 bit ZBC. Returns nonzero value in 'result' parameter if + * the input kind supports 2 bit ZBC, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC_ALLOWS_4 + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure + * supports 4 bit ZBC. Returns nonzero value in 'result' parameter if + * the input kind supports 4 bit ZBC, else returns zero in the result. + * kind + * Specifies the kind on which the operation is to be carried out. The + * legal range of values for the kind parameter is different on different + * GPUs. For e.g. on Fermi, valid range is 0x00 to 0xfe. Still, some values + * inside this legal range can be invalid i.e. not defined. + * So its always better to first check if a particular kind is supported on + * the current GPU with 'NV2080_CTRL_FB_IS_KIND_SUPPORTED' operation. + * result + * Upon return, this parameter will hold the result (true/false) of the + * operation performed on the kind. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_IS_KIND (0x20801313U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_IS_KIND_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_IS_KIND_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV2080_CTRL_FB_IS_KIND_PARAMS { + NvU32 operation; + NvU32 kind; + NvBool result; +} NV2080_CTRL_FB_IS_KIND_PARAMS; + +/* valid values for operation */ +#define NV2080_CTRL_FB_IS_KIND_OPERATION_SUPPORTED (0x00000000U) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE (0x00000001U) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE_1 (0x00000002U) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE_2 (0x00000003U) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE_4 (0x00000004U) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC (0x00000005U) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC_ALLOWS_1 (0x00000006U) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC_ALLOWS_2 (0x00000007U) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC_ALLOWS_4 (0x00000008U) + +/** + * NV2080_CTRL_CMD_FB_GET_GPU_CACHE_INFO + * + * This command returns the state of a cache which all GPU memory accesess go + * through. + * + * powerState + * Returns the power state of the cache. Possible values are defined in + * NV2080_CTRL_FB_GET_GPU_CACHE_INFO_POWER_STATE. + * + * writeMode + * Returns the write mode of the cache. Possible values are defined in + * NV2080_CTRL_FB_GET_GPU_CACHE_INFO_WRITE_MODE. + * + * bypassMode + * Returns the bypass mode of the L2 cache. Possible values are defined in + * NV2080_CTRL_FB_GET_GPU_CACHE_INFO_BYPASS_MODE. + * + * rcmState + * Returns the RCM state of the cache. Possible values are defined in + * NV2080_CTRL_FB_GET_GPU_CACHE_INFO_RCM_STATE. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_FB_GET_GPU_CACHE_INFO (0x20801315U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_GPU_CACHE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_PARAMS_MESSAGE_ID (0x15U) + +typedef struct NV2080_CTRL_FB_GET_GPU_CACHE_INFO_PARAMS { + NvU32 powerState; + NvU32 writeMode; + NvU32 bypassMode; + NvU32 rcmState; +} NV2080_CTRL_FB_GET_GPU_CACHE_INFO_PARAMS; + +/* valid values for powerState */ +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_POWER_STATE_ENABLED (0x00000000U) +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_POWER_STATE_DISABLED (0x00000001U) +/* valid values for writeMode */ +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_WRITE_MODE_WRITETHROUGH (0x00000000U) +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_WRITE_MODE_WRITEBACK (0x00000001U) +/* valid values for bypassMode */ +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_BYPASS_MODE_DISABLED (0x00000000U) +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_BYPASS_MODE_ENABLED (0x00000001U) +/* valid values for rcmState */ +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_RCM_STATE_FULL (0x00000000U) +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_RCM_STATE_TRANSITIONING (0x00000001U) +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_RCM_STATE_REDUCED (0x00000002U) +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_RCM_STATE_ZERO_CACHE (0x00000003U) + +/* + * NV2080_CTRL_FB_CMD_GET_FB_REGION_INFO + * + * This command returns the FB memory region characteristics. + * + * numFBRegions + * Number of valid regions returned in fbRegion[] + * fbRegion[].base + * Base address of region. The first valid address in the range + * [base..limit]. + * fbRegion[].limit + * Last/end address of region. The last valid address in the range + * [base..limit]. + * (limit - base + 1) = size of the region + * fbRegion[].reserved + * Amount of memory that RM speculatively needs within the region. A + * client doing its own memory management should leave at least this much + * memory available for RM use. This particularly applies to a driver + * model like LDDM. + * fbRegion[].performance + * Relative performance of this region compared to other regions. + * The definition is vague, and only connotes relative bandwidth or + * performance. The higher the value, the higher the performance. + * fbRegion[].supportCompressed + * TRUE if compressed surfaces/kinds are supported + * FALSE if compressed surfaces/kinds are not allowed to be allocated in + * this region + * fbRegion[].supportISO + * TRUE if ISO surfaces/kinds are supported (Display, cursor, video) + * FALSE if ISO surfaces/kinds are not allowed to be allocated in this + * region + * fbRegion[].bProtected + * TRUE if this region is a protected memory region. If true only + * allocations marked as protected (NVOS32_ALLOC_FLAGS_PROTECTED) can be + * allocated in this region. + * fbRegion[].blackList[] - DEPRECATED: Use supportISO + * TRUE for each NVOS32_TYPE_IMAGE* that is NOT allowed in this region. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO (0x20801320U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES 18U + +typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES]; + +typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 limit, 8); + NV_DECLARE_ALIGNED(NvU64 reserved, 8); + NvU32 performance; + NvBool supportCompressed; + NvBool supportISO; + NvBool bProtected; + NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList; +} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO; + +#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U + +#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS { + NvU32 numFBRegions; + NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8); +} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_OFFLINE_PAGES + * + * This command adds video memory page addresses to the list of offlined + * addresses so that they're not allocated to any client. The newly offlined + * addresses take effect after a reboot. + * + * offlined + * This input parameter is an array of NV2080_CTRL_FB_OFFLINED_ADDRESS_INFO + * structures, containing the video memory physical page numbers that + * are to be blacklisted. This array can hold a maximum of NV2080_CTRL_FB_ + * BLACKLIST_PAGES_MAX_PAGES address pairs. Valid entries are adjacent. + * pageSize + * This input parameter contains the size of the page that is to be + * blacklisted. + * validEntries + * This input parameter specifies the number of valid entries in the + * offlined array. + * numPagesAdded + * This output parameter specifies how many of the validEntries were + * actually offlined. If numPagesAdded < validEntries, it + * means that only addresses from offlined[0] to offlined[numPagesAdded - 1] + * were added to the list of offlined addresses. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_OFFLINE_PAGES (0x20801321U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_OFFLINE_PAGES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_OFFLINED_PAGES_MAX_PAGES (0x00000040U) +#define NV2080_CTRL_FB_OFFLINED_PAGES_INVALID_ADDRESS (0xffffffffffffffffULL) +#define NV2080_CTRL_FB_OFFLINED_PAGES_PAGE_SIZE_4K (0x00000000U) +#define NV2080_CTRL_FB_OFFLINED_PAGES_PAGE_SIZE_64K (0x00000001U) +#define NV2080_CTRL_FB_OFFLINED_PAGES_PAGE_SIZE_128K (0x00000002U) + +/* + * NV2080_CTRL_FB_OFFLINED_ADDRESS_INFO + * + * pageAddressWithEccOn + * Address of the memory page retired when ECC is enabled on the board. + * pageAddressWithEccOff + * Address of the memory page retired when ECC is disabled on the board. + * rbcAddress + * Row/Bank/Column Address of the faulty memory which caused the page to + * be retired + * source + * The reason for the page to be retired + * status + * Non-exceptional reasons for a page retirement failure + * NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_OK + * No error + * NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_PENDING_RETIREMENT + * The given address is already pending retirement or has + * been retired during the current driver run. The page + * will be offlined during the next driver run. + * NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_BLACKLISTING_FAILED + * The given page was retired on a previous driver run, + * so it should not be accessible unless offlining failed. + * Failing to offline a page is strongly indicative of a + * driver offlining bug. + * NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_TABLE_FULL + * The PBL is full and no more pages can be retired + * NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_INTERNAL_ERROR + * Internal driver error + * + */ + + + +typedef struct NV2080_CTRL_FB_OFFLINED_ADDRESS_INFO { + NV_DECLARE_ALIGNED(NvU64 pageAddressWithEccOn, 8); + NV_DECLARE_ALIGNED(NvU64 pageAddressWithEccOff, 8); + NvU32 rbcAddress; + NvU32 source; + NvU32 status; + NvU32 timestamp; +} NV2080_CTRL_FB_OFFLINED_ADDRESS_INFO; + +/* valid values for source */ +#define NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_DPR_MULTIPLE_SBE (0x00000002U) +#define NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_DPR_DBE (0x00000004U) + + + +/* valid values for status */ +#define NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_OK (0x00000000U) +#define NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_PENDING_RETIREMENT (0x00000001U) +#define NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_BLACKLISTING_FAILED (0x00000002U) +#define NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_TABLE_FULL (0x00000003U) +#define NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_INTERNAL_ERROR (0x00000004U) + +/* deprecated */ +#define NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_MULTIPLE_SBE NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_DPR_MULTIPLE_SBE +#define NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_DBE NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_DPR_DBE + + +#define NV2080_CTRL_FB_OFFLINE_PAGES_PARAMS_MESSAGE_ID (0x21U) + +typedef struct NV2080_CTRL_FB_OFFLINE_PAGES_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_FB_OFFLINED_ADDRESS_INFO offlined[NV2080_CTRL_FB_OFFLINED_PAGES_MAX_PAGES], 8); + NvU32 pageSize; + NvU32 validEntries; + NvU32 numPagesAdded; +} NV2080_CTRL_FB_OFFLINE_PAGES_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_GET_OFFLINED_PAGES + * + * This command returns the list of video memory page addresses in the + * Inforom's blacklist. + * + * offlined + * This output parameter is an array of NV2080_CTRL_FB_BLACKLIST_ADDRESS_ + * INFO structures, containing the video memory physical page numbers that + * are blacklisted. This array can hold a maximum of NV2080_CTRL_FB_ + * BLACKLIST_PAGES_MAX_PAGES address pairs. Valid entries are adjacent. + * The array also contains the Row/Bank/Column Address and source. + * validEntries + * This output parameter specifies the number of valid entries in the + * offlined array. + * bRetirementPending (DEPRECATED, use retirementPending instead) + * This output parameter returns if any pages on the list are pending + * retirement. + * retirementPending + * Communicates to the caller whether retirement updates are pending and the + * reason for the updates. Possible fields are: + * NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_*: + * NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_SBE: + * Indicates whether pages are pending retirement due to SBE. + * NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_DBE: + * Indicates whether pages are pending retirement due to DBE. Driver + * reload needed to retire bad memory pages and allow compute app runs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + + + +#define NV2080_CTRL_CMD_FB_GET_OFFLINED_PAGES (0x20801322U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_OFFLINED_PAGES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_SBE 0:0 +#define NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_SBE_FALSE 0U +#define NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_SBE_TRUE 1U +#define NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_DBE 1:1 +#define NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_DBE_FALSE 0U +#define NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_DBE_TRUE 1U + + + +#define NV2080_CTRL_FB_GET_OFFLINED_PAGES_PARAMS_MESSAGE_ID (0x22U) + +typedef struct NV2080_CTRL_FB_GET_OFFLINED_PAGES_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_FB_OFFLINED_ADDRESS_INFO offlined[NV2080_CTRL_FB_OFFLINED_PAGES_MAX_PAGES], 8); + NvU32 validEntries; + NvBool bRetirementPending; + NvU8 retirementPending; +} NV2080_CTRL_FB_GET_OFFLINED_PAGES_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_QUERY_ACR_REGION + * + * This control command is used to query the secured region allocated + * + * queryType + * NV2080_CTRL_CMD_FB_ACR_QUERY_GET_REGION_STATUS: Provides the alloc + * status and ACR region ID. + * NV2080_CTRL_CMD_FB_QUERY_MAP_REGION : Maps the region on BAR1 + * it returns the "pCpuAddr" and pPriv to user. + * NV2080_CTRL_CMD_FB_QUERY_UNMAP_REGION: Unmaps the mapped region. + * it takes the pPriv as input + * + * clientReq : struct ACR_REQUEST_PARAMS + * It is used to find the allocated ACR region for that client + * clientId : ACR Client ID + * reqReadMask : read mask of ACR region + * reqWriteMask : Write mask of ACR region + * regionSize : ACR region Size + * + * clientReqStatus : struct ACR_STATUS_PARAMS + * This struct is stores the output of requested ACR region. + * allocStatus : Allocated Status of ACR region + * regionId : ACR region ID + * physicalAddress : Physical address on FB + * + * + * NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_CODE + * NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_NONE : Control command executed successfully + * NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_INVALID_CLIENT_REQUEST : Please check the parameter + * for ACR client request + * NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_FAILED_TO_MAP_ON_BAR1 : RM Fails to map ACR region + * on BAR1 + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED +*/ +#define NV2080_CTRL_CMD_FB_QUERY_ACR_REGION (0x20801325U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_QUERY_ACR_REGION_PARAMS_MESSAGE_ID" */ + +// +// We can create an ACR region by using RMCreateAcrRegion[1|2] regkey or mods -acr[1|2]_size +// Client ID for such region is 2 in RM. +// +#define NV2080_CTRL_CMD_FB_ACR_CLIENT_ID 2U + +typedef enum NV2080_CTRL_CMD_FB_ACR_QUERY_TYPE { + NV2080_CTRL_CMD_FB_ACR_QUERY_GET_CLIENT_REGION_STATUS = 0, + NV2080_CTRL_CMD_FB_ACR_QUERY_GET_REGION_PROPERTY = 1, + NV2080_CTRL_CMD_FB_ACR_QUERY_GET_FALCON_STATUS = 2, +} NV2080_CTRL_CMD_FB_ACR_QUERY_TYPE; + +typedef enum NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_CODE { + NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_NONE = 0, + NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_INVALID_CLIENT_REQUEST = 1, +} NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_CODE; + +typedef struct ACR_REQUEST_PARAMS { + NvU32 clientId; + NvU32 reqReadMask; + NvU32 reqWriteMask; + NvU32 regionSize; +} ACR_REQUEST_PARAMS; + +typedef struct ACR_REGION_ID_PROP { + NvU32 regionId; + NvU32 readMask; + NvU32 writeMask; + NvU32 regionSize; + NvU32 clientMask; + NV_DECLARE_ALIGNED(NvU64 physicalAddress, 8); +} ACR_REGION_ID_PROP; + +typedef struct ACR_STATUS_PARAMS { + NvU32 allocStatus; + NvU32 regionId; + NV_DECLARE_ALIGNED(NvU64 physicalAddress, 8); +} ACR_STATUS_PARAMS; + +typedef struct ACR_REGION_HANDLE { + NvHandle hClient; + NvHandle hParent; + NvHandle hMemory; + NvU32 hClass; + NvHandle hDevice; +} ACR_REGION_HANDLE; + +typedef struct ACR_FALCON_LS_STATUS { + NvU16 falconId; + NvBool bIsInLs; +} ACR_FALCON_LS_STATUS; + +#define NV2080_CTRL_CMD_FB_QUERY_ACR_REGION_PARAMS_MESSAGE_ID (0x25U) + +typedef struct NV2080_CTRL_CMD_FB_QUERY_ACR_REGION_PARAMS { + NV2080_CTRL_CMD_FB_ACR_QUERY_TYPE queryType; + NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_CODE errorCode; + NV_DECLARE_ALIGNED(ACR_REGION_ID_PROP acrRegionIdProp, 8); + ACR_REQUEST_PARAMS clientReq; + NV_DECLARE_ALIGNED(ACR_STATUS_PARAMS clientReqStatus, 8); + ACR_REGION_HANDLE handle; + ACR_FALCON_LS_STATUS falconStatus; +} NV2080_CTRL_CMD_FB_QUERY_ACR_REGION_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_CLEAR_OFFLINED_PAGES + * + * This command clears offlined video memory page addresses from the Inforom. + * + * sourceMask + * This is a bit mask of NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE. Pages + * offlined from the specified sources will be cleared/removed from the + * Inforom PBL object denylist. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_CLEAR_OFFLINED_PAGES (0x20801326U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_CLEAR_OFFLINED_PAGES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_CLEAR_OFFLINED_PAGES_PARAMS_MESSAGE_ID (0x26U) + +typedef struct NV2080_CTRL_FB_CLEAR_OFFLINED_PAGES_PARAMS { + NvU32 sourceMask; +} NV2080_CTRL_FB_CLEAR_OFFLINED_PAGES_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_INFO + * + * Gets pointer to then object of class CompBitCopy, which is used for swizzling + * compression bits in the compression backing store. The caller is expected to + * have the appropriate headers for class CompBitCopy. Also retrieves values of some + * parameters needed to call the compbit swizzling method. + * + * @params[out] void *pCompBitCopyObj + * Opaque pointer to object of class CompBitCopy + * @params[out] void *pSwizzleParams + * Opaque pointer to values needed to call the compbit + * swizzle method. + * + * Possible status values returned are: + * NV_OK NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_INFO (0x20801327U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_INFO_PARAMS_MESSAGE_ID (0x27U) + +typedef struct NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pCompBitCopyObj, 8); + NV_DECLARE_ALIGNED(NvP64 pSwizzleParams, 8); +} NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_GET_LTC_INFO_FOR_FBP + * + * Gets the count and mask of LTCs for a given FBP. + * + * fbpIndex + * The physical index of the FBP to get LTC info for. + * ltcMask + * The mask of active LTCs for the given FBP. + * ltcCount + * The count of active LTCs for the given FBP. + * ltsMask + * The mask of active LTSs for the given FBP + * ltsCount + * The count of active LTSs for the given FBP + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_GET_LTC_INFO_FOR_FBP (0x20801328U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_LTC_INFO_FOR_FBP_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_LTC_INFO_FOR_FBP_PARAMS_MESSAGE_ID (0x28U) + +typedef struct NV2080_CTRL_FB_GET_LTC_INFO_FOR_FBP_PARAMS { + NvU8 fbpIndex; + NvU32 ltcMask; + NvU32 ltcCount; + NvU32 ltsMask; + NvU32 ltsCount; +} NV2080_CTRL_FB_GET_LTC_INFO_FOR_FBP_PARAMS; + + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_CONTEXT < Deprecated > + * + * "set the context" for following CompBitCopy member functions. + * These are the CompBitCopy member variables that remain connstant + * over multiple CompBitCopy member function calls, yet stay the same + * throughout a single surface eviction. + * + * @params[in] UINT64 backingStorePA; + * Physical Address of the Backing Store + * @params[in] UINT08 *backingStoreVA; + * Virtual Address of the Backing Store + * @params[in] UINT64 backingStoreChunkPA; + * Physical Address of the "Chunk Buffer" + * @params[in] UINT08 *backingStoreChunkVA; + * Virtual Address of the "Chunk Buffer" + * @params[in] UINT32 backingStoreChunkSize; + * Size of the "Chunk Buffer" + * @params[in] UINT08 *cacheWriteBitMap; + * Pointer to the bitmap which parts of the + * "Chunk" was updated. + * @params[in] bool backingStoreChunkOverfetch; + * Overfetch factor. + * @params[in] UINT32 PageSizeSrc; + * Page size of Source Surface. + * @params[in] UINT32 PageSizeDest; + * page size of Destination Surface. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_CONTEXT (0x20801329U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x29" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_CONTEXT_PARAMS { + NvU32 CBCBaseAddress; + NV_DECLARE_ALIGNED(NvU64 backingStorePA, 8); + NV_DECLARE_ALIGNED(NvU8 *backingStoreVA, 8); + NV_DECLARE_ALIGNED(NvU64 backingStoreChunkPA, 8); + NV_DECLARE_ALIGNED(NvU8 *backingStoreChunkVA, 8); + NvU32 backingStoreChunkSize; + NV_DECLARE_ALIGNED(NvU8 *cacheWriteBitMap, 8); + NvBool backingStoreChunkOverfetch; + NvU32 PageSizeSrc; + NvU32 PageSizeDest; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_CONTEXT_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPBITS < Deprecated > + * + * Retrieves the Compression and Fast Clear bits for the surface+offset given. + * + * @params[out] NvU32 *fcbits; + * Fast Clear Bits returned + * @params[out] NvU32 *compbits; + * Compression Bits returned + * @params[in] NvU64 dataPhysicalStart; + * Start Address of Data + * @params[in] NvU64 surfaceOffset; + * Offset in the surface + * @params[in] NvU32 comptagLine; + * Compression Tag Number + * @params[in] NvBool upper64KBCompbitSel; + * Selects Upper or Lower 64K + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTEDD + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPBITS (0x2080132aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x2A" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPBITS_PARAMS { + NV_DECLARE_ALIGNED(NvU32 *fcbits, 8); + NV_DECLARE_ALIGNED(NvU32 *compbits, 8); + NV_DECLARE_ALIGNED(NvU64 dataPhysicalStart, 8); + NV_DECLARE_ALIGNED(NvU64 surfaceOffset, 8); + NvU32 comptagLine; + NvBool upper64KBCompbitSel; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPBITS_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_PUT_COMPBITS < Deprecated > + * + * Sets the Compression and Fast Clear bits for the surface+offset given. + * + * @params[in] NvU32 fcbits; + * Fast Clear Bits to write. + * @params[in] NvU32 compbits; + * Compression Bits to write + * @params[in] NvBool writeFc; + * Indicates if Fast Clear Bits should be written + * @params[in] NvU64 dataPhysicalStart; + * Start Address of Data + * @params[in] NvU64 surfaceOffset; + * Offset in the surface + * @params[in] NvU32 comptagLine; + * Compression Tag Number + * @params[in] NvBool upper64KBCompbitSel; + * Selects Upper or Lower 64K + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_PUT_COMPBITS (0x2080132bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x2B" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_PUT_COMPBITS_PARAMS { + NvU32 fcbits; + NvU32 compbits; + NvBool writeFc; + NV_DECLARE_ALIGNED(NvU64 dataPhysicalStart, 8); + NV_DECLARE_ALIGNED(NvU64 surfaceOffset, 8); + NvU32 comptagLine; + NvBool upper64KBCompbitSel; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_PUT_COMPBITS_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_READ_COMPBITS64KB < Deprecated > + * + * Read 64KB chunk of CompBits + * + * @params[in] NvU64 SrcDataPhysicalStart; + * Start Address of Data + * @params[in] NvU32 SrcComptagLine; + * Compression Tag Number + * @params[in] NvU32 page64KB; + * Which 64K block to read from. + * @params[out] NvU32 *compbitBuffer; + * Buffer for CompBits read, + * @params[in] NvBool upper64KBCompbitSel; + * Selects Upper or Lower 64K + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_READ_COMPBITS64KB (0x2080132cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x2C" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_READ_COMPBITS64KB_PARAMS { + NV_DECLARE_ALIGNED(NvU64 SrcDataPhysicalStart, 8); + NvU32 SrcComptagLine; + NvU32 page64KB; + NV_DECLARE_ALIGNED(NvU32 *compbitBuffer, 8); + NvBool upper64KBCompbitSel; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_READ_COMPBITS64KB_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_WRITE_COMPBITS64KB < Deprecated > + * + * Write 64K chunk of COmpBits. + * + * @params[in] NvU64 SrcDataPhysicalStart; + * Start Address of Data + * @params[in] NvU32 SrcComptagLine; + * Compression Tag Number + * @params[in] NvU32 page64KB; + * Which 64K block to read from. + * @params[in] NvU32 *compbitBuffer; + * Buffer for CompBits to write. + * @params[in] NvBool upper64KBCompbitSel + * Selects Upper or Lower 64K + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_WRITE_COMPBITS64KB (0x2080132dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x2D" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_WRITE_COMPBITS64KB_PARAMS { + NV_DECLARE_ALIGNED(NvU64 DstDataPhysicalStart, 8); + NvU32 DstComptagLine; + NvU32 page64KB; + NV_DECLARE_ALIGNED(NvU32 *compbitBuffer, 8); + NvBool upper64KBCompbitSel; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_WRITE_COMPBITS64KB_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_FORCE_BAR1 < Deprecated > + * + * Used by MODS (and possibly other clients) to have compbit code write + * write directly to BAR1, rather than a intermediate buffer. + * + * @params[in] NvBool bForceBar1; + * Enables or disables direct writes to BAR1. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_FORCE_BAR1 (0x20801335U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x35" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_FORCE_BAR1_PARAMS { + NvBool bForceBar1; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_FORCE_BAR1_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_AMAP_CONF + * + * Fills in fields of a structure of class ConfParamsV1, which is used for + * swizzling compression bits in the compression backing store. + * The caller is expected to have the appropriate headers for class ConfParamsV1. + * + * @params[in|out] void *pAmapConfParms + * Opaque pointer to structure of values for ConfParamsV1 + * @params[in|out] void *pCbcSwizzleParms + * Opaque pointer to structure of values for CbcSwizzleParamsV1 + * + * Possible status values returned are: + * NV_OK NV_ERR_NOT_SUPPORTED + * + * pCbcSwizzleParams will be filled in with certain parameters from + * @CbcSwizzleParamsV1. However, the caller is responsible for making sure + * all parameters are filled in before using it. + */ +#define NV2080_CTRL_CMD_FB_GET_AMAP_CONF (0x20801336U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_GET_AMAP_CONF_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_GET_AMAP_CONF_PARAMS_MESSAGE_ID (0x36U) + +typedef struct NV2080_CTRL_CMD_FB_GET_AMAP_CONF_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pAmapConfParams, 8); + NV_DECLARE_ALIGNED(NvP64 pCbcSwizzleParams, 8); +} NV2080_CTRL_CMD_FB_GET_AMAP_CONF_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_CBC_OP + * + * Provides a way for clients to request a CBC Operation + * + * @params[in] CTRL_CMD_FB_CBC_OP fbCBCOp + * CBC Operation requested. + * Valid Values: + * CTRL_CMD_FB_CBC_OP_CLEAN + * CTRL_CMD_FB_CBC_OP_INVALIDATE + * + * Possible status values returned are: + * NV_OK NV_ERR_NOT_SUPPORTED NV_ERR_INVALID_ARGUMENT NV_ERR_TIMEOUT + */ +#define NV2080_CTRL_CMD_FB_CBC_OP (0x20801337U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_CBC_OP_PARAMS_MESSAGE_ID" */ + +/*! + * Permitted CBC Operations + */ +typedef enum CTRL_CMD_FB_CBC_OP { + CTRL_CMD_FB_CBC_OP_CLEAN = 0, + CTRL_CMD_FB_CBC_OP_INVALIDATE = 1, +} CTRL_CMD_FB_CBC_OP; + +#define NV2080_CTRL_CMD_FB_CBC_OP_PARAMS_MESSAGE_ID (0x37U) + +typedef struct NV2080_CTRL_CMD_FB_CBC_OP_PARAMS { + CTRL_CMD_FB_CBC_OP fbCBCOp; +} NV2080_CTRL_CMD_FB_CBC_OP_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_CTAGS_FOR_CBC_EVICTION + * + * The call will fetch the compression tags reserved for CBC eviction. + * + * Each comptag will correspond to a unique compression cacheline. The usage of + * these comptags is to evict the CBC by making accesses to a dummy compressed page, + * thereby evicting each CBC line. + * + * @param [in][out] NvU32 pCompTags + * Array of reserved compression tags of size @ref NV2080_MAX_CTAGS_FOR_CBC_EVICTION + * @param [out] numCompTags + * Number of entries returned in @ref pCompTags + * + * @returns + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_OUT_OF_RANGE + * NV_ERR_INVALID_PARAMETER + */ +#define NV2080_CTRL_CMD_FB_GET_CTAGS_FOR_CBC_EVICTION (0x20801338U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_CTAGS_FOR_CBC_EVICTION_PARAMS_MESSAGE_ID" */ + +/*! + * Max size of @ref NV2080_CTRL_FB_GET_CTAGS_FOR_CBC_EVICTION_PARAMS::pCompTags + * Arbitrary, but sufficiently large number. Should be checked against CBC size. + */ +#define NV2080_MAX_CTAGS_FOR_CBC_EVICTION 0x7FU + + +#define NV2080_CTRL_FB_GET_CTAGS_FOR_CBC_EVICTION_PARAMS_MESSAGE_ID (0x38U) + +typedef struct NV2080_CTRL_FB_GET_CTAGS_FOR_CBC_EVICTION_PARAMS { + NvU32 pCompTags[NV2080_MAX_CTAGS_FOR_CBC_EVICTION]; + NvU32 numCompTags; +} NV2080_CTRL_FB_GET_CTAGS_FOR_CBC_EVICTION_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_ALLOC_COMP_RESOURCE + * + * This Call will allocate compression tag + * + * @params[in] NvU32 attr + * Stores the information: + * 1. NVOS32_ATTR_COMPR_REQUIRED or not + * 2. NVOS32_ATTR_PAGE_SIZE + * @params[in] NvU32 attr2 + * Determine whether to allocate + * an entire cache line or allocate by size + * @params[in] NvU32 size + * Specify the size of allocation, in pages not bytes + * @params[in] NvU32 ctagOffset + * Determine the offset usage of the allocation + * @params[out] NvU32 hwResId + * Stores the result of the allocation + * @params[out] NvU32 RetcompTagLineMin + * The resulting min Ctag Number from the allocation + * @params[out] NvU32 RetcompTagLineMax + * The resulting max Ctag Number from the allocation + * @returns + * NV_OK + * NV_ERR_INSUFFICIENT_RESOURCES + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ + +#define NV2080_CTRL_CMD_FB_ALLOC_COMP_RESOURCE (0x20801339U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_ALLOC_COMP_RESOURCE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_ALLOC_COMP_RESOURCE_PARAMS_MESSAGE_ID (0x39U) + +typedef struct NV2080_CTRL_CMD_FB_ALLOC_COMP_RESOURCE_PARAMS { + NvU32 attr; + NvU32 attr2; + NvU32 size; + NvU32 ctagOffset; + NvU32 hwResId; + NvU32 retCompTagLineMin; + NvU32 retCompTagLineMax; +} NV2080_CTRL_CMD_FB_ALLOC_COMP_RESOURCE_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_FREE_TILE + * + * This control call is used to release tile back to the free pool + * + * @params[in] NvU32 hwResId + * Stores the information of a previous allocation + * @returns + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ + +#define NV2080_CTRL_CMD_FB_FREE_TILE (0x2080133aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_FREE_TILE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_FREE_TILE_PARAMS_MESSAGE_ID (0x3AU) + +typedef struct NV2080_CTRL_CMD_FB_FREE_TILE_PARAMS { + NvU32 hwResId; +} NV2080_CTRL_CMD_FB_FREE_TILE_PARAMS; + + +/* + * NV2080_CTRL_CMD_FB_SETUP_VPR_REGION + * + * This control command is used to request vpr region setup + * + * requestType + * NV2080_CTRL_CMD_FB_SET_VPR: Request to setup VPR + * + * requestParams : struct VPR_REQUEST_PARAMS + * It contains the VPR region request details like, + * startAddr : FB offset from which we need to setup VPR + * size : required size of the region + * + * statusParams : struct VPR_STATUS_PARAMS + * This struct stores the output of requested VPR region + * status : Whether the request was successful + * + * NV2080_CTRL_CMD_FB_VPR_ERROR_CODE : + * NV2080_CTRL_CMD_FB_VPR_ERROR_GENERIC : Some unknown error occurred + * NV2080_CTRL_CMD_FB_VPR_ERROR_INVALID_CLIENT_REQUEST : Request was invalid + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_SETUP_VPR_REGION (0x2080133bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_SETUP_VPR_REGION_PARAMS_MESSAGE_ID" */ + +typedef enum NV2080_CTRL_CMD_FB_VPR_REQUEST_TYPE { + NV2080_CTRL_CMD_FB_SET_VPR = 0, +} NV2080_CTRL_CMD_FB_VPR_REQUEST_TYPE; + +typedef enum NV2080_CTRL_CMD_FB_VPR_ERROR_CODE { + NV2080_CTRL_CMD_FB_VPR_ERROR_GENERIC = 0, + NV2080_CTRL_CMD_FB_VPR_ERROR_INVALID_CLIENT_REQUEST = 1, +} NV2080_CTRL_CMD_FB_VPR_ERROR_CODE; + +typedef struct VPR_REQUEST_PARAMS { + NvU32 startAddr; + NvU32 size; +} VPR_REQUEST_PARAMS; + +typedef struct VPR_STATUS_PARAMS { + NvU32 status; +} VPR_STATUS_PARAMS; + +#define NV2080_CTRL_CMD_FB_SETUP_VPR_REGION_PARAMS_MESSAGE_ID (0x3BU) + +typedef struct NV2080_CTRL_CMD_FB_SETUP_VPR_REGION_PARAMS { + NV2080_CTRL_CMD_FB_VPR_REQUEST_TYPE requestType; + VPR_REQUEST_PARAMS requestParams; + VPR_STATUS_PARAMS statusParams; +} NV2080_CTRL_CMD_FB_SETUP_VPR_REGION_PARAMS; +typedef struct NV2080_CTRL_CMD_FB_SETUP_VPR_REGION_PARAMS *PNV2080_CTRL_CMD_FB_SETUP_VPR_REGION_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_GET_CLI_MANAGED_OFFLINED_PAGES + * + * This command returns the list of offlined video memory page addresses in the + * region managed by Client + * + * offlinedPages + * This output parameter is an array of video memory physical page numbers that + * are offlined. This array can hold a maximum of NV2080_CTRL_FB_ + * OFFLINED_PAGES_MAX_PAGES addresses. + * pageSize + * This output parameter contains the size of the page that is offlined. + * validEntries + * This output parameter specifies the number of valid entries in the + * offlined array. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_FB_GET_CLI_MANAGED_OFFLINED_PAGES (0x2080133cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_CLI_MANAGED_OFFLINED_PAGES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_CLI_MANAGED_OFFLINED_PAGES_PARAMS_MESSAGE_ID (0x3CU) + +typedef struct NV2080_CTRL_FB_GET_CLI_MANAGED_OFFLINED_PAGES_PARAMS { + NvU32 offlinedPages[NV2080_CTRL_FB_OFFLINED_PAGES_MAX_PAGES]; // A 32B can hold enough. + NvU32 pageSize; + NvU32 validEntries; +} NV2080_CTRL_FB_GET_CLI_MANAGED_OFFLINED_PAGES_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_CONSTRUCT_INFO + * + * This command returns parameters required to initialize compbit copy object + * used by address mapping library + * + * defaultPageSize + * Page size used by @ref CompBitCopy methods + * comptagsPerCacheLine + * Number of compression tags in a single compression cache line. + * unpackedComptagLinesPerCacheLine; + * From hw (not adjusted for CompBits code) Number of compression tags + * in a single compression cache line. + * compCacheLineSizePerLTC; + * Size of compression cache line per L2 slice. Size in Bytes. + * unpackedCompCacheLineSizePerLTC; + * From hw (not adjusted for CompBits code) size of compression + * cache line per L2 slice. Size in Bytes + * slicesPerLTC; + * Number of L2 slices per L2 cache. + * numActiveLTCs; + * Number of active L2 caches. (Not floorswept) + * familyName; + * Family name for the GPU. + * chipName; + * Chip name for the GPU. + * bitsPerRAMEntry; + * Bits per RAM entry. (Need better doc) + * ramBankWidth; + * Width of RAM bank. (Need better doc) + * bitsPerComptagLine; + * Number of bits per compression tag line. + * ramEntriesPerCompCacheLine; + * Number of RAM entries spanned by 1 compression cache line. + * comptagLineSize; + * Size of compression tag line, in Bytes. + * + * Possible status values returned are: + * NV_OK + */ + +#define NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_CONSTRUCT_INFO (0x2080133dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_CONSTRUCT_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_CONSTRUCT_INFO_PARAMS_MESSAGE_ID (0x3DU) + +typedef struct NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_CONSTRUCT_INFO_PARAMS { + NvU32 defaultPageSize; + NvU32 comptagsPerCacheLine; + NvU32 unpackedComptagLinesPerCacheLine; + NvU32 compCacheLineSizePerLTC; + NvU32 unpackedCompCacheLineSizePerLTC; + NvU32 slicesPerLTC; + NvU32 numActiveLTCs; + NvU32 familyName; + NvU32 chipName; + NvU32 bitsPerRAMEntry; + NvU32 ramBankWidth; + NvU32 bitsPerComptagLine; + NvU32 ramEntriesPerCompCacheLine; + NvU32 comptagLineSize; +} NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_CONSTRUCT_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_SET_RRD + * + * Sets the row-to-row delay on the GPU's FB + * + * Possible status values returned are: + * NV_OK + * Any error code + */ +#define NV2080_CTRL_CMD_FB_SET_RRD (0x2080133eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_SET_RRD_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_SET_RRD_RESET_VALUE (~((NvU32)0)) +#define NV2080_CTRL_FB_SET_RRD_PARAMS_MESSAGE_ID (0x3EU) + +typedef struct NV2080_CTRL_FB_SET_RRD_PARAMS { + NvU32 rrd; +} NV2080_CTRL_FB_SET_RRD_PARAMS; + +/* + * NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_PARAMS + * + * This is not a control call of it's own, but there are common definitions for + * the two NV2080_CTRL_CMD_FB_SET_READ/WRITE_LIMIT control calls. + */ +typedef struct NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_PARAMS { + NvU8 limit; +} NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_PARAMS; +#define NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_RESET_VALUE (0xffU) + +/* + * NV2080_CTRL_CMD_FB_SET_READ_LIMIT + * + * Sets the READ_LIMIT to be used in the NV_PFB_FBPA_DIR_ARB_CFG0 register + * + * limit + * The limit value to use + * + * Possible status values returned are: + * NV_OK + * Any error code + */ +#define NV2080_CTRL_CMD_FB_SET_READ_LIMIT (0x2080133fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_SET_READ_LIMIT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_SET_READ_LIMIT_RESET_VALUE NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_RESET_VALUE +#define NV2080_CTRL_FB_SET_READ_LIMIT_PARAMS_MESSAGE_ID (0x3FU) + +typedef NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_PARAMS NV2080_CTRL_FB_SET_READ_LIMIT_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_SET_WRITE_LIMIT + * + * Sets the WRITE_LIMIT to be used in the NV_PFB_FBPA_DIR_ARB_CFG0 register + * + * limit + * The limit value to us + * + * Possible status values returned are: + * NV_OK + * Any error code + */ +#define NV2080_CTRL_CMD_FB_SET_WRITE_LIMIT (0x20801340U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_SET_WRITE_LIMIT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_SET_WRITE_LIMIT_RESET_VALUE NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_RESET_VALUE +#define NV2080_CTRL_FB_SET_WRITE_LIMIT_PARAMS_MESSAGE_ID (0x40U) + +typedef NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_PARAMS NV2080_CTRL_FB_SET_WRITE_LIMIT_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_PATCH_PBR_FOR_MINING + * + * Patches some VBIOS values related to PBR to better suit mining applications + * + * bEnable + * Set the mining-specific values or reset to the original values + * + * Possible status values returned are: + * NV_OK + * Any error code + */ +#define NV2080_CTRL_CMD_FB_PATCH_PBR_FOR_MINING (0x20801341U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_PATCH_PBR_FOR_MINING_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_PATCH_PBR_FOR_MINING_PARAMS_MESSAGE_ID (0x41U) + +typedef struct NV2080_CTRL_FB_PATCH_PBR_FOR_MINING_PARAMS { + NvBool bEnable; +} NV2080_CTRL_FB_PATCH_PBR_FOR_MINING_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_MEM_ALIGNMENT + * + * Get memory alignment. Replacement for NVOS32_FUNCTION_GET_MEM_ALIGNMENT + */ +#define NV2080_CTRL_CMD_FB_GET_MEM_ALIGNMENT (0x20801342U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_MEM_ALIGNMENT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_MEM_ALIGNMENT_PARAMS_MESSAGE_ID (0x42U) + +typedef struct NV2080_CTRL_FB_GET_MEM_ALIGNMENT_PARAMS { + NvU32 alignType; // Input + NvU32 alignAttr; + NvU32 alignInputFlags; + NvU32 alignHead; + NV_DECLARE_ALIGNED(NvU64 alignSize, 8); + NvU32 alignHeight; + NvU32 alignWidth; + NvU32 alignPitch; + NvU32 alignPad; + NvU32 alignMask; + NvU32 alignKind; + NvU32 alignAdjust; // Output -- If non-zero the amount we need to adjust the offset + NvU32 alignAttr2; +} NV2080_CTRL_FB_GET_MEM_ALIGNMENT_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_CBC_BASEADDR + * + * Get the CBC Base physical address + * This control call is required by error containment tests + * NV2080_CTRL_CMD_FB_GET_AMAP_CONF can also return CBC base address + * but it requires kernel privilege, and it not callalble from SRT test + * + * @params[out] NvU64 cbcBaseAddr + * Base physical address for CBC data. + * + * Possible status values returned are: + * NV_OK NV_ERR_NOT_SUPPORTED + * + */ +#define NV2080_CTRL_CMD_FB_GET_CBC_BASE_ADDR (0x20801343U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_GET_CBC_BASE_ADDR_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_GET_CBC_BASE_ADDR_PARAMS_MESSAGE_ID (0x43U) + +typedef struct NV2080_CTRL_CMD_FB_GET_CBC_BASE_ADDR_PARAMS { + NvU32 cbcBaseAddress; + NvU32 compCacheLineSize; + NV_DECLARE_ALIGNED(NvU64 backingStoreStartPA, 8); + NV_DECLARE_ALIGNED(NvU64 backingStoreAllocPA, 8); + NvU32 backingStoreChunkOverfetch; +} NV2080_CTRL_CMD_FB_GET_CBC_BASE_ADDR_PARAMS; + +#define NV2080_CTRL_FB_REMAP_ENTRY_FLAGS_PENDING 0:0 +#define NV2080_CTRL_FB_REMAP_ENTRY_FLAGS_PENDING_FALSE 0U +#define NV2080_CTRL_FB_REMAP_ENTRY_FLAGS_PENDING_TRUE 1U + + + +typedef struct NV2080_CTRL_FB_REMAP_ENTRY { + NvU32 remapRegVal; + NvU32 timestamp; + NvU8 fbpa; + NvU8 sublocation; + NvU8 source; + NvU8 flags; +} NV2080_CTRL_FB_REMAP_ENTRY; + +/* valid values for source */ + + +#define NV2080_CTRL_FB_REMAPPED_ROW_SOURCE_SBE_FIELD (0x00000002U) +#define NV2080_CTRL_FB_REMAPPED_ROW_SOURCE_DBE_FIELD (0x00000003U) + +#define NV2080_CTRL_FB_REMAPPED_ROWS_MAX_ROWS (0x00000200U) + +/* + * NV2080_CTRL_CMD_FB_GET_REMAPPED_ROWS + * + * This command returns the list of remapped rows stored in the Inforom. + * + * entryCount + * This output parameter specifies the number of remapped rows + * flags + * This output parameter contains info on whether or not there are pending + * remappings and whether or not a remapping failed + * entries + * This output parameter is an array of NV2080_CTRL_FB_REMAP_ENTRY + * containing inforomation on the remapping that occurred. This array can + * hold a maximum of NV2080_CTRL_FB_REMAPPED_ROWS_MAX_ROWS + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_POINTER + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_GET_REMAPPED_ROWS (0x20801344U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_REMAPPED_ROWS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_REMAPPED_ROWS_FLAGS_PENDING \ + NV2080_CTRL_FB_REMAP_ENTRY_FLAGS_PENDING +#define NV2080_CTRL_FB_GET_REMAPPED_ROWS_FLAGS_PENDING_FALSE NV2080_CTRL_FB_REMAP_ENTRY_FLAGS_PENDING_FALSE +#define NV2080_CTRL_FB_GET_REMAPPED_ROWS_FLAGS_PENDING_TRUE NV2080_CTRL_FB_REMAP_ENTRY_FLAGS_PENDING_TRUE +#define NV2080_CTRL_FB_GET_REMAPPED_ROWS_FLAGS_FAILURE 1:1 +#define NV2080_CTRL_FB_GET_REMAPPED_ROWS_FLAGS_FAILURE_FALSE 0U +#define NV2080_CTRL_FB_GET_REMAPPED_ROWS_FLAGS_FAILURE_TRUE 1U + +#define NV2080_CTRL_FB_GET_REMAPPED_ROWS_PARAMS_MESSAGE_ID (0x44U) + +typedef struct NV2080_CTRL_FB_GET_REMAPPED_ROWS_PARAMS { + NvU32 entryCount; + NvU8 flags; + NV2080_CTRL_FB_REMAP_ENTRY entries[NV2080_CTRL_FB_REMAPPED_ROWS_MAX_ROWS]; +} NV2080_CTRL_FB_GET_REMAPPED_ROWS_PARAMS; + +// Max size of the queryParams in Bytes, so that the NV2080_CTRL_FB_FS_INFO_QUERY struct is still 32B +#define NV2080_CTRL_FB_FS_INFO_MAX_QUERY_SIZE 24U + +/*! + * Structure holding the out params for NV2080_CTRL_FB_FS_INFO_INVALID_QUERY. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_INVALID_QUERY_PARAMS { + // Unused param, will ensure the size of NV2080_CTRL_FB_FS_INFO_QUERY struct to be 32B + NvU8 data[NV2080_CTRL_FB_FS_INFO_MAX_QUERY_SIZE]; +} NV2080_CTRL_FB_FS_INFO_INVALID_QUERY_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_FBP_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_FBP_MASK_PARAMS { + /*! + * [in]: swizzId + * PartitionID associated with a created smc partition. Currently used only for a + * device monitoring client to get the physical values of the FB. The client needs to pass + * 'NV2080_CTRL_GPU_PARTITION_ID_INVALID' explicitly if it wants RM to ignore the swizzId. + * RM will consider this request similar to a legacy case. + * The client's subscription is used only as a capability check and not as an input swizzId. + */ + NvU32 swizzId; + /*! + * [out]: physical/local fbp mask. + */ + NV_DECLARE_ALIGNED(NvU64 fbpEnMask, 8); +} NV2080_CTRL_FB_FS_INFO_FBP_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_LTC_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_LTC_MASK_PARAMS { + /*! + * [in]: physical/local FB partition index. + */ + NvU32 fbpIndex; + /*! + * [out]: physical/local ltc mask. + */ + NvU32 ltcEnMask; +} NV2080_CTRL_FB_FS_INFO_LTC_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_LTS_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_LTS_MASK_PARAMS { + /*! + * [in]: physical/local FB partition index. + */ + NvU32 fbpIndex; + /*! + * [out]: physical/local lts mask. + * Note that lts bits are flattened out for all ltc with in a fbp. + */ + NvU32 ltsEnMask; +} NV2080_CTRL_FB_FS_INFO_LTS_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_FBPA_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_FBPA_MASK_PARAMS { + /*! + * [in]: physical/local FB partition index. + */ + NvU32 fbpIndex; + /*! + * [out]: physical/local FBPA mask. + */ + NvU32 fbpaEnMask; +} NV2080_CTRL_FB_FS_INFO_FBPA_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK_PARAMS { + /*! + * [in]: physical/local FB partition index. + */ + NvU32 fbpIndex; + /*! + * [out]: physical/local FBPA-SubPartition mask. + */ + NvU32 fbpaSubpEnMask; +} NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP + */ +typedef struct NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP_PARAMS { + /*! + * [in]: physical/local FB partition index. + */ + NvU32 fbpIndex; + /*! + * [out]: Logical/local FBP index + */ + NvU32 fbpLogicalIndex; +} NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_ROP_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_ROP_MASK_PARAMS { + /*! + * [in]: physical/local FB partition index. + */ + NvU32 fbpIndex; + /*! + * [out]: physical/local ROP mask. + */ + NvU32 ropEnMask; +} NV2080_CTRL_FB_FS_INFO_ROP_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_SYS_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_SYS_MASK_PARAMS { + /*! + * [in]: swizzId + * PartitionID associated with a created smc partition. Currently used only for a + * device monitoring client to get the physical values of the sys. The client needs to pass + * 'NV2080_CTRL_GPU_PARTITION_ID_INVALID' explicitly if it wants RM to ignore the swizzId. + * RM will consider this request similar to a legacy case. + */ + NvU32 swizzId; + /*! + * [out]: physical/local sys mask. + */ + NV_DECLARE_ALIGNED(NvU64 sysEnMask, 8); +} NV2080_CTRL_FB_FS_INFO_SYS_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK_PARAMS { + /*! + * [in]: Physical FB partition index. + */ + NvU32 fbpIndex; + /*! + * [in]: swizzId + * PartitionID associated with a created smc partition. + */ + NvU32 swizzId; + /*! + * [out]: physical ltc mask. + */ + NvU32 ltcEnMask; +} NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK_PARAMS { + /*! + * [in]: Physical FB partition index. + */ + NvU32 fbpIndex; + /*! + * [in]: swizzId + * PartitionID associated with a created smc partition. + */ + NvU32 swizzId; + /*! + * [out]: physical lts mask. + */ + NvU32 ltsEnMask; +} NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK_PARAMS { + /*! + * [in]: Physical FB partition index. + */ + NvU32 fbpIndex; + /*! + * [in]: swizzId + * PartitionID associated with a created smc partition. + */ + NvU32 swizzId; + /*! + * [out]: physical fbpa mask. + */ + NvU32 fbpaEnMask; +} NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK_PARAMS { + /*! + * [in]: Physical FB partition index. + */ + NvU32 fbpIndex; + /*! + * [in]: swizzId + * PartitionID associated with a created smc partition. + */ + NvU32 swizzId; + /*! + * [out]: physical rop mask. + */ + NvU32 ropEnMask; +} NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK_PARAMS { + /*! + * [in]: Physical FB partition index. + */ + NvU32 fbpIndex; + /*! + * [in]: swizzId + * PartitionID associated with a created smc partition. Currently used only for a + * device monitoring client to get the physical values of the FB. The client needs to pass + * 'NV2080_CTRL_GPU_PARTITION_ID_INVALID' explicitly if it wants RM to ignore the swizzId. + * RM will consider this request similar to a legacy case. + * The client's subscription is used only as a capability check and not as an input swizzId. + */ + NvU32 swizzId; + /*! + * [out]: physical FBPA_SubPartition mask associated with requested partition. + */ + NV_DECLARE_ALIGNED(NvU64 fbpaSubpEnMask, 8); +} NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_SYSL2_FS_INFO_SYSLTC_MASK. + */ +typedef struct NV2080_CTRL_SYSL2_FS_INFO_SYSLTC_MASK_PARAMS { + /*! + * [in]: physical/local sys Id. + */ + NvU32 sysIdx; + /*! + * [out]: physical/local sysltc mask. + */ + NvU32 sysl2LtcEnMask; +} NV2080_CTRL_SYSL2_FS_INFO_SYSLTC_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_SYSL2_FS_INFO_SYSLTS_MASK. + */ +typedef struct NV2080_CTRL_SYSL2_FS_INFO_SYSLTS_MASK_PARAMS { + /*! + * [IN]: physical/local SYS index. + */ + NvU32 sysIdx; + /*! + * [OUT]: physical/local lts mask. + * Note: this lts mask should be flattened out within a sys chiplet + */ + NV_DECLARE_ALIGNED(NvU64 sysl2LtsEnMask, 8); +} NV2080_CTRL_SYSL2_FS_INFO_SYSLTS_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_PAC_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_PAC_MASK_PARAMS { + /*! + * [in]: physical/local FB partition index. + */ + NvU32 fbpIndex; + /*! + * [out]: physical/local PAC mask. + */ + NvU32 pacEnMask; +} NV2080_CTRL_FB_FS_INFO_PAC_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_LOGICAL_LTC_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_LOGICAL_LTC_MASK_PARAMS { + /*! + * [in]: physical/local FB partition index. + */ + NvU32 fbpIndex; + /*! + * [out]: logical/local ltc mask. + */ + NV_DECLARE_ALIGNED(NvU64 logicalLtcEnMask, 8); +} NV2080_CTRL_FB_FS_INFO_LOGICAL_LTC_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LOGICAL_LTC_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LOGICAL_LTC_MASK_PARAMS { + /*! + * [in]: Physical FB partition index. + */ + NvU32 fbpIndex; + /*! + * [in]: swizzId + * PartitionID associated with a created smc partition. + */ + NvU32 swizzId; + /*! + * [out]: logical ltc mask. + */ + NV_DECLARE_ALIGNED(NvU64 logicalLtcEnMask, 8); +} NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LOGICAL_LTC_MASK_PARAMS; + +// Possible values for queryType +#define NV2080_CTRL_FB_FS_INFO_INVALID_QUERY 0x0U +#define NV2080_CTRL_FB_FS_INFO_FBP_MASK 0x1U +#define NV2080_CTRL_FB_FS_INFO_LTC_MASK 0x2U +#define NV2080_CTRL_FB_FS_INFO_LTS_MASK 0x3U +#define NV2080_CTRL_FB_FS_INFO_FBPA_MASK 0x4U +#define NV2080_CTRL_FB_FS_INFO_ROP_MASK 0x5U +#define NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK 0x6U +#define NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK 0x7U +#define NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK 0x8U +#define NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK 0x9U +#define NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK 0xAU +#define NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK 0xBU +#define NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP 0xCU +#define NV2080_CTRL_SYSL2_FS_INFO_SYSLTC_MASK 0xDU +#define NV2080_CTRL_FB_FS_INFO_PAC_MASK 0xEU +#define NV2080_CTRL_FB_FS_INFO_LOGICAL_LTC_MASK 0xFU +#define NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LOGICAL_LTC_MASK 0x10U +#define NV2080_CTRL_SYSL2_FS_INFO_SYSLTS_MASK 0x11U +#define NV2080_CTRL_FB_FS_INFO_SYS_MASK 0x12U + +typedef struct NV2080_CTRL_FB_FS_INFO_QUERY { + NvU16 queryType; + NvU8 reserved[2]; + NvU32 status; + union { + NV2080_CTRL_FB_FS_INFO_INVALID_QUERY_PARAMS inv; + NV_DECLARE_ALIGNED(NV2080_CTRL_FB_FS_INFO_FBP_MASK_PARAMS fbp, 8); + NV2080_CTRL_FB_FS_INFO_LTC_MASK_PARAMS ltc; + NV2080_CTRL_FB_FS_INFO_LTS_MASK_PARAMS lts; + NV2080_CTRL_FB_FS_INFO_FBPA_MASK_PARAMS fbpa; + NV2080_CTRL_FB_FS_INFO_ROP_MASK_PARAMS rop; + NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK_PARAMS dmLtc; + NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK_PARAMS dmLts; + NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK_PARAMS dmFbpa; + NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK_PARAMS dmRop; + NV_DECLARE_ALIGNED(NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK_PARAMS dmFbpaSubp, 8); + NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK_PARAMS fbpaSubp; + NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP_PARAMS fbpLogicalMap; + NV2080_CTRL_SYSL2_FS_INFO_SYSLTC_MASK_PARAMS sysl2Ltc; + NV2080_CTRL_FB_FS_INFO_PAC_MASK_PARAMS pac; + NV_DECLARE_ALIGNED(NV2080_CTRL_FB_FS_INFO_LOGICAL_LTC_MASK_PARAMS logicalLtc, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LOGICAL_LTC_MASK_PARAMS dmLogicalLtc, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_SYSL2_FS_INFO_SYSLTS_MASK_PARAMS sysl2Lts, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_FB_FS_INFO_SYS_MASK_PARAMS sys, 8); + } queryParams; +} NV2080_CTRL_FB_FS_INFO_QUERY; + +// Max number of queries that can be batched in a single call to NV2080_CTRL_CMD_FB_GET_FS_INFO +#define NV2080_CTRL_FB_FS_INFO_MAX_QUERIES 120U + +#define NV2080_CTRL_FB_GET_FS_INFO_PARAMS_MESSAGE_ID (0x46U) + +typedef struct NV2080_CTRL_FB_GET_FS_INFO_PARAMS { + NvU16 numQueries; + NvU8 reserved[6]; + NV_DECLARE_ALIGNED(NV2080_CTRL_FB_FS_INFO_QUERY queries[NV2080_CTRL_FB_FS_INFO_MAX_QUERIES], 8); +} NV2080_CTRL_FB_GET_FS_INFO_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_FS_INFO + * + * This control call returns the fb engine information for a partition/GPU. + * Supports an interface so that the caller can issue multiple queries by batching them + * in a single call. Returns the first error it encounters. + * + * numQueries[IN] + * - Specifies the number of valid queries. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_GET_FS_INFO (0x20801346U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_FS_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_HISTOGRAM_IDX_NO_REMAPPED_ROWS (0x0U) +#define NV2080_CTRL_FB_HISTOGRAM_IDX_SINGLE_REMAPPED_ROW (0x1U) +#define NV2080_CTRL_FB_HISTOGRAM_IDX_MIXED_REMAPPED_REMAINING_ROWS (0x2U) +#define NV2080_CTRL_FB_HISTOGRAM_IDX_SINGLE_REMAINING_ROW (0x3U) +#define NV2080_CTRL_FB_HISTOGRAM_IDX_MAX_REMAPPED_ROWS (0x4U) + +#define NV2080_CTRL_FB_GET_ROW_REMAPPER_HISTOGRAM_PARAMS_MESSAGE_ID (0x47U) + +typedef struct NV2080_CTRL_FB_GET_ROW_REMAPPER_HISTOGRAM_PARAMS { + NvU32 histogram[5]; +} NV2080_CTRL_FB_GET_ROW_REMAPPER_HISTOGRAM_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_ROW_REMAPPER_HISTOGRAM + * + * This control call returns stats on the number of banks that have a certain + * number of rows remapped in the bank. Specifically the number of banks that + * have 0, 1, 2 through (max-2), max-1 and max number of rows remapped in the + * bank. Values will be returned in an array. + * + * Index values are: + * + * NV2080_CTRL_FB_HISTOGRAM_IDX_NO_REMAPPED_ROWS + * Number of banks with zero rows remapped + NV2080_CTRL_FB_HISTOGRAM_IDX_SINGLE_REMAPPED_ROW + * Number of banks with one row remapped + NV2080_CTRL_FB_HISTOGRAM_IDX_MIXED_REMAPPED_REMAINING_ROWS + * Number of banks with 2 through (max-2) rows remapped + NV2080_CTRL_FB_HISTOGRAM_IDX_SINGLE_REMAINING_ROW + * Number of banks with (max-1) rows remapped + NV2080_CTRL_FB_HISTOGRAM_IDX_MAX_REMAPPED_ROWS + * Number of banks with max rows remapped + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_GET_ROW_REMAPPER_HISTOGRAM (0x20801347U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_ROW_REMAPPER_HISTOGRAM_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_FB_GET_DYNAMICALLY_BLACKLISTED_PAGES + * + * This command returns the list of dynamically blacklisted video memory page addresses + * after last driver load. + * + * blackList + * This output parameter is an array of NV2080_CTRL_FB_DYNAMIC_BLACKLIST_ADDRESS_INFO + * This array can hold a maximum of NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_ENTRIES. + * validEntries + * This output parameter specifies the number of valid entries in the + * blackList array. + * baseIndex + * With the limit of up to 512 blacklisted pages, the size of this array + * exceeds the rpc buffer limit. This control call will collect the data + * in multiple passes. This parameter indicates the start index of the + * data to be passed back to the caller + * This cannot be greater than NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_PAGES + * bMore + * This parameter indicates whether there are more valid elements to be + * fetched. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_FB_GET_DYNAMIC_OFFLINED_PAGES (0x20801348U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_DYNAMIC_OFFLINED_PAGES_PARAMS_MESSAGE_ID" */ + +/* Maximum pages that can be dynamically blacklisted */ +#define NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_PAGES 512U + +/* + * Maximum entries that can be sent in a single pass of + * NV2080_CTRL_CMD_FB_GET_DYNAMIC_OFFLINED_PAGES + */ +#define NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_ENTRIES 64U + +/** + * NV2080_CTRL_FB_DYNAMIC_OFFLINED_ADDRESS_INFO + * pageNumber + * This output parameter specifies the dynamically blacklisted page number. + * source + * The reason for the page to be retired. Valid values for + * this parameter include: + * NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_INVALID + * Invalid source. + * NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_DPR_DBE + * Page retired by dynamic page retirement due to a double bit + * error seen. + */ +typedef struct NV2080_CTRL_FB_DYNAMIC_OFFLINED_ADDRESS_INFO { + NV_DECLARE_ALIGNED(NvU64 pageNumber, 8); + NvU8 source; +} NV2080_CTRL_FB_DYNAMIC_OFFLINED_ADDRESS_INFO; + +#define NV2080_CTRL_FB_GET_DYNAMIC_OFFLINED_PAGES_PARAMS_MESSAGE_ID (0x48U) + +typedef struct NV2080_CTRL_FB_GET_DYNAMIC_OFFLINED_PAGES_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_FB_DYNAMIC_OFFLINED_ADDRESS_INFO offlined[NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_ENTRIES], 8); + NvU32 validEntries; + NvU32 baseIndex; + NvBool bMore; +} NV2080_CTRL_FB_GET_DYNAMIC_OFFLINED_PAGES_PARAMS; + +/* valid values for source */ + +#define NV2080_CTRL_FB_DYNAMIC_BLACKLISTED_PAGES_SOURCE_INVALID (0x00000000U) +#define NV2080_CTRL_FB_DYNAMIC_BLACKLISTED_PAGES_SOURCE_DPR_DBE (0x00000001U) + +/* + * NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO + * + * This control command is used by clients to query information pertaining to client allocations. + * + * + * @params [IN/OUT] NvU64 allocCount: + * Client specifies the allocation count that it received using the + * previous NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO control call. + * RM will get the total number of allocations known by RM and fill + * allocCount with it. + * + * @params [IN] NvP64 pAllocInfo: + * Pointer to the buffer allocated by client of size NV2080_CTRL_CMD_FB_ALLOCATION_INFO * + * allocCount. RM returns the info pertaining to each of the contiguous client + * allocation chunks in pAllocInfo. The format of the allocation information is given by + * NV2080_CTRL_CMD_FB_ALLOCATION_INFO. The client has to sort the returned information if + * it wants to retain the legacy behavior of SORTED BY OFFSET. Information is only returned + * if and only if allocCount[IN]>=allocCount[OUT] and clientCount[IN]>=clientCount[OUT]. + * + * @params [IN/OUT] NvP64 clientCount: + * Client specifies the client count that it received using the + * previous NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO control call. + * RM will get the total number of clients that have allocations with RM + * and fill clientCount with it. + * + * @params [IN] NvP64 pClientInfo: + * Pointer to the buffer allocated by client of size NV2080_CTRL_CMD_FB_CLIENT_INFO * + * clientCount. RM returns the info pertaining to each of the clients that have allocations + * known about by RM in pClientInfo. The format of the allocation information is given by + * NV2080_CTRL_CMD_FB_CLIENT_INFO. Information is only returned if and only if + * allocCount[IN]>=allocCount[OUT] and clientCount[IN]>=clientCount[OUT]. + * + * @returns Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_POINTER + * NV_ERR_NO_MEMORY + * + * @Usage: All privileged RM clients for debugging only. Initially, call this with allocCount = + * clientCount = 0 to get client count, and then call again with allocated memory and sizes. + * Client can repeat with the new count-sized allocations until a maximum try count is + * reached or client is out of memory. + */ + +#define NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO (0x20801349U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO_PARAMS_MESSAGE_ID" */ + +/* + * These work with the FLD_SET_REF_NUM and FLD_TEST_REF macros and describe the 'flags' member + * of the NV2080_CTRL_CMD_FB_ALLOCATION_INFO struct. + */ + +// Address space of the allocation +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_TYPE 4:0 +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_TYPE_SYSMEM 0U +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_TYPE_VIDMEM 1U + +// Whether the allocation is shared +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_SHARED 5:5 +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_SHARED_FALSE 0U +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_SHARED_TRUE 1U + +// Whether this client owns this allocation +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_OWNER 6:6 +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_OWNER_FALSE 0U +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_OWNER_TRUE 1U + +typedef struct NV2080_CTRL_CMD_FB_ALLOCATION_INFO { + NvU32 client; /* [OUT] Identifies the client that made or shares the allocation (index into pClientInfo)*/ + NvU32 flags; /* [OUT] Flags associated with the allocation (see previous defines) */ + NV_DECLARE_ALIGNED(NvU64 beginAddr, 8); /* [OUT] Starting physical address of the chunk */ + NV_DECLARE_ALIGNED(NvU64 size, 8); /* [OUT] Size of the allocated contiguous chunk in bytes */ +} NV2080_CTRL_CMD_FB_ALLOCATION_INFO; + +typedef struct NV2080_CTRL_CMD_FB_CLIENT_INFO { + NvHandle handle; /* [OUT] Handle of the client that made or shares the allocation */ + NvU32 pid; /* [OUT] PID of the client that made or shares the allocation */ + + /* For the definition of the subprocessID and subprocessName params, see NV0000_CTRL_CMD_SET_SUB_PROCESS_ID */ + NvU32 subProcessID; /* [OUT] Subprocess ID of the client that made or shares the allocation */ + char subProcessName[NV_PROC_NAME_MAX_LENGTH]; /* [OUT] Subprocess Name of the client that made or shares the allocation */ +} NV2080_CTRL_CMD_FB_CLIENT_INFO; + +#define NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO_PARAMS_MESSAGE_ID (0x49U) + +typedef struct NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvU64 allocCount, 8); + NV_DECLARE_ALIGNED(NvP64 pAllocInfo, 8); + NV_DECLARE_ALIGNED(NvU64 clientCount, 8); + NV_DECLARE_ALIGNED(NvP64 pClientInfo, 8); +} NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_UPDATE_NUMA_STATUS + * + * This control command is used by clients to update the NUMA status. + * + * @params [IN] NvBool bOnline: + * + * @returns Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * + */ +#define NV2080_CTRL_CMD_FB_UPDATE_NUMA_STATUS (0x20801350U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_UPDATE_NUMA_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_UPDATE_NUMA_STATUS_PARAMS_MESSAGE_ID (0x50U) + +typedef struct NV2080_CTRL_FB_UPDATE_NUMA_STATUS_PARAMS { + NvBool bOnline; +} NV2080_CTRL_FB_UPDATE_NUMA_STATUS_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_GET_NUMA_INFO + * + * This control command is used by clients to get per-subdevice or + * subscribed MIG partition(when MIG is enabled) NUMA memory information as + * assigned by the system. + * + * numaNodeId[OUT] + * - Specifies the NUMA node ID. + * + * numaMemAddr[OUT] + * - Specifies the NUMA memory address. + * + * numaMemSize[OUT] + * - Specifies the NUMA memory size. + * + * numaOfflineAddressesCount[IN/OUT] + * - If non-zero, then it specifies the maximum number of entries in + * numaOfflineAddresses[] for which the information is required. + * It will be updated with the actual number of entries present in + * the numaOfflineAddresses[]. + * + * numaOfflineAddresses[OUT] + * - If numaOfflineAddressesCount is non-zero, it contains the addresses + * of offline pages in the NUMA region. + * + * @returns Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_GET_NUMA_INFO (0x20801351U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_NUMA_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_NUMA_INFO_MAX_OFFLINE_ADDRESSES 64U + +#define NV2080_CTRL_FB_GET_NUMA_INFO_PARAMS_MESSAGE_ID (0x51U) + +typedef struct NV2080_CTRL_FB_GET_NUMA_INFO_PARAMS { + NvS32 numaNodeId; + NV_DECLARE_ALIGNED(NvU64 numaMemAddr, 8); + NV_DECLARE_ALIGNED(NvU64 numaMemSize, 8); + NvU32 numaOfflineAddressesCount; + NV_DECLARE_ALIGNED(NvU64 numaOfflineAddresses[NV2080_CTRL_FB_NUMA_INFO_MAX_OFFLINE_ADDRESSES], 8); +} NV2080_CTRL_FB_GET_NUMA_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_GET_SEMAPHORE_SURFACE_LAYOUT + * + * This control command is used by clients to get NV_SEMAPHORE_SURFACE layout/caps before allocation. + * A semaphore surface can be viewed as an array of independent semaphore entries. + * + * maxSubmittedSemaphoreValueOffset[OUT] + * - An offset of the max submitted value, relative to the semaphore surface entry start, if used. + * Used to emulate 64-bit semaphore values on chips where 64-bit semaphores are not supported. + * + * monitoredFenceThresholdOffset[OUT] + * - An offset of the monitored fence memory, relative to the semaphore surface entry start, if supported. + * + * size[OUT] + * - A size of a single semaphore surface entry. + * + * caps[OUT] + * - A mask of NV2080_CTRL_FB_GET_SEMAPHORE_SURFACE_LAYOUT_CAPS_* values. + */ +#define NV2080_CTRL_CMD_FB_GET_SEMAPHORE_SURFACE_LAYOUT (0x20801352U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_SEMAPHORE_SURFACE_LAYOUT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_SEMAPHORE_SURFACE_LAYOUT_CAPS_MONITORED_FENCE_SUPPORTED (0x00000001U) +#define NV2080_CTRL_FB_GET_SEMAPHORE_SURFACE_LAYOUT_CAPS_64BIT_SEMAPHORES_SUPPORTED (0x00000002U) + +#define NV2080_CTRL_FB_GET_SEMAPHORE_SURFACE_LAYOUT_PARAMS_MESSAGE_ID (0x52U) + +typedef struct NV2080_CTRL_FB_GET_SEMAPHORE_SURFACE_LAYOUT_PARAMS { + NV_DECLARE_ALIGNED(NvU64 maxSubmittedSemaphoreValueOffset, 8); + NV_DECLARE_ALIGNED(NvU64 monitoredFenceThresholdOffset, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 caps; +} NV2080_CTRL_FB_GET_SEMAPHORE_SURFACE_LAYOUT_PARAMS; + +typedef struct NV2080_CTRL_CMD_FB_STATS_ENTRY { + //! Total physical memory available (accounts row-remapping) + NV_DECLARE_ALIGNED(NvU64 totalSize, 8); + + //! Total reserved memory (includes both Region 1 and region 2) + NV_DECLARE_ALIGNED(NvU64 rsvdSize, 8); + + //! Total usable memory (Region 0) for OS/KMD + NV_DECLARE_ALIGNED(NvU64 osSize, 8); + + //! Region 1 (RM Internal) memory + NV_DECLARE_ALIGNED(NvU64 r1Size, 8); + + //! Region 2 (Reserved) memory + NV_DECLARE_ALIGNED(NvU64 r2Size, 8); + + //! Free memory (reserved but not allocated) + NV_DECLARE_ALIGNED(NvU64 freeSize, 8); +} NV2080_CTRL_CMD_FB_STATS_ENTRY; + +/* + * NV2080_CTRL_CMD_GMMU_COMMIT_TLB_INVALIDATE + * + * This control command is used by clients to commit TLB invalidates + * + * gfid[OUT] + * - Specifices GPU function ID. + * + * invalidateAll[OUT] + * - Specifies whether to invalidate all using boolean + * + * @returns Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV2080_CTRL_CMD_GMMU_COMMIT_TLB_INVALIDATE (0x20801353U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_GMMU_COMMIT_TLB_INVALIDATE_PARAMS_MESSAGE_ID" */ +#define NV2080_CTRL_GMMU_COMMIT_TLB_INVALIDATE_PARAMS_MESSAGE_ID (0x53U) + +typedef struct NV2080_CTRL_GMMU_COMMIT_TLB_INVALIDATE_PARAMS { + NvU32 gfid; + NvBool invalidateAll; +} NV2080_CTRL_GMMU_COMMIT_TLB_INVALIDATE_PARAMS; + +typedef struct NV2080_CTRL_CMD_FB_STATS_OWNER_INFO { + //! Total allocated size for this owner + NV_DECLARE_ALIGNED(NvU64 allocSize, 8); + + //! Total memory blocks belonging this owner + NvU32 numBlocks; + + //! Total reserved size for this owner + NV_DECLARE_ALIGNED(NvU64 rsvdSize, 8); +} NV2080_CTRL_CMD_FB_STATS_OWNER_INFO; + +#define NV2080_CTRL_CMD_FB_STATS_MAX_OWNER 200U + +/* + * NV2080_CTRL_CMD_FB_STATS_GET + * + * Get the FB allocations info. + */ +#define NV2080_CTRL_CMD_FB_STATS_GET (0x2080132a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_STATS_GET_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_STATS_GET_PARAMS_MESSAGE_ID (0x2AU) + +typedef struct NV2080_CTRL_CMD_FB_STATS_GET_PARAMS { + + //! Version id for driver and tool matching + NV_DECLARE_ALIGNED(NvU64 version, 8); + + //! All sizes info + NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_STATS_ENTRY fbSizeInfo, 8); + + //! Level 2 owner info table + NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_STATS_OWNER_INFO fbBlockInfo[NV2080_CTRL_CMD_FB_STATS_MAX_OWNER], 8); +} NV2080_CTRL_CMD_FB_STATS_GET_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_GET_STATIC_BAR1_INFO + * + * This command returns the GPU static BAR1 Info + * This is for general P2P DMA. NV50_P2P is for GPU P2P. + * + * @params [OUT] NvBool bStaticBar1Enabled: + * This field indicates the static BAR1 mode is enabled. All the following fields are valid + * only if static BAR1 mode is enabled. + * @params [OUT] NvU64 staticBar1StartOffset: + * Static BAR1 may start at nonzero BAR1 address. + * This field indicates the start offset of the static BAR1. + * @params [OUT] NvU64 staticBar1Size: + * This field indicates the size of the static BAR1. + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_GET_STATIC_BAR1_INFO (0x20801354U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_STATIC_BAR1_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_STATIC_BAR1_INFO_PARAMS_MESSAGE_ID (0x54U) + +typedef struct NV2080_CTRL_FB_GET_STATIC_BAR1_INFO_PARAMS { + NvBool bStaticBar1Enabled; + NvBool bStaticBar1WriteCombined; + NV_DECLARE_ALIGNED(NvU64 staticBar1StartOffset, 8); + NV_DECLARE_ALIGNED(NvU64 staticBar1Size, 8); +} NV2080_CTRL_FB_GET_STATIC_BAR1_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_QUERY_DRAM_ENCRYPTION_PENDING_CONFIGURATION + * + * This command returns the current DRAM encryption configuration + * setting for a GPU given its subdevice handle. The value returned + * is the current DRAM encryption setting for the GPU stored in non-volatile + * memory on the board. + * + * currentConfiguration + * The current DRAM encryption configuration setting. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_FB_QUERY_DRAM_ENCRYPTION_PENDING_CONFIGURATION (0x20801355U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_QUERY_DRAM_ENCRYPTION_PENDING_CONFIGURATION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_QUERY_DRAM_ENCRYPTION_PENDING_CONFIGURATION_DISABLED (0x00000000U) +#define NV2080_CTRL_FB_QUERY_DRAM_ENCRYPTION_PENDING_CONFIGURATION_ENABLED (0x00000001U) + +#define NV2080_CTRL_FB_QUERY_DRAM_ENCRYPTION_PENDING_CONFIGURATION_PARAMS_MESSAGE_ID (0x55U) + +typedef struct NV2080_CTRL_FB_QUERY_DRAM_ENCRYPTION_PENDING_CONFIGURATION_PARAMS { + NvU32 currentConfiguration; +} NV2080_CTRL_FB_QUERY_DRAM_ENCRYPTION_PENDING_CONFIGURATION_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_SET_DRAM_ENCRYPTION_CONFIGURATION + * + * This command changes the DRAM encryption configuration setting for + * a GPU given its subdevice handle. The value specified is stored + * in non-volatile memory on the board and will take effect with the + * next GPU reset. + * + * newConfiguration + * The new configuration setting to take effect with + * the next GPU reset. + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_SET_DRAM_ENCRYPTION_CONFIGURATION (0x20801356U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_SET_DRAM_ENCRYPTION_CONFIGURATION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_SET_DRAM_ENCRYPTION_CONFIGURATION_DISABLE (0x00000000U) +#define NV2080_CTRL_FB_SET_DRAM_ENCRYPTION_CONFIGURATION_ENABLE (0x00000001U) + +#define NV2080_CTRL_FB_SET_DRAM_ENCRYPTION_CONFIGURATION_PARAMS_MESSAGE_ID (0x56U) + +typedef struct NV2080_CTRL_FB_SET_DRAM_ENCRYPTION_CONFIGURATION_PARAMS { + NvU32 newConfiguration; +} NV2080_CTRL_FB_SET_DRAM_ENCRYPTION_CONFIGURATION_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_QUERY_DRAM_ENCRYPTION_INFOROM_SUPPORT + * + * This command returns whether or not DRAM encryption config object is supported via the InfoROM. + * + * isSupported [OUT] + * This parameter returns whether the DRAM Encryption inforom object is present in the inforom. + * The various values of isSupported is: + * 1. NV2080_CTRL_CMD_FB_QUERY_DRAM_ENCRYPTION_INFOROM_SUPPORT_DISABLED + * 2. NV2080_CTRL_CMD_FB_QUERY_DRAM_ENCRYPTION_INFOROM_SUPPORT_ENABLED + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_QUERY_DRAM_ENCRYPTION_INFOROM_SUPPORT (0x20801357U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_DRAM_ENCRYPTION_INFOROM_SUPPORT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_QUERY_DRAM_ENCRYPTION_INFOROM_SUPPORT_DISABLED (0x00000000U) +#define NV2080_CTRL_CMD_FB_QUERY_DRAM_ENCRYPTION_INFOROM_SUPPORT_ENABLED (0x00000001U) + +#define NV2080_CTRL_FB_DRAM_ENCRYPTION_INFOROM_SUPPORT_PARAMS_MESSAGE_ID (0x57U) + +typedef struct NV2080_CTRL_FB_DRAM_ENCRYPTION_INFOROM_SUPPORT_PARAMS { + NvU32 isSupported; +} NV2080_CTRL_FB_DRAM_ENCRYPTION_INFOROM_SUPPORT_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_QUERY_DRAM_ENCRYPTION_STATUS + * + * This command returns the current DRAM encryption status. + * + * currentStatus + * The current DRAM encryption status. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_FB_QUERY_DRAM_ENCRYPTION_STATUS (0x20801358U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_QUERY_DRAM_ENCRYPTION_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_QUERY_DRAM_ENCRYPTION_STATUS_DISABLED (0x00000000U) +#define NV2080_CTRL_CMD_FB_QUERY_DRAM_ENCRYPTION_STATUS_ENABLED (0x00000001U) + +#define NV2080_CTRL_FB_QUERY_DRAM_ENCRYPTION_STATUS_PARAMS_MESSAGE_ID (0x58U) + +typedef struct NV2080_CTRL_FB_QUERY_DRAM_ENCRYPTION_STATUS_PARAMS { + NvU32 currentStatus; +} NV2080_CTRL_FB_QUERY_DRAM_ENCRYPTION_STATUS_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_GET_MEMORY_BOOT_TRAINING_FLAGS + * + * This command returns the memory boot training flags from VBIOS table. + * + * flagCollectSchmooData + * flagWrTrHybridVrefEn + * flagWrTrHybridNonVrefEn + * flagRdTrHybridVrefEn + * flagRdTrHybridNonVrefEn + * skipBootTraining + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_GET_MEMORY_BOOT_TRAINING_FLAGS (0x20801359U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_MEMORY_BOOT_TRAINING_FLAGS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_MEMORY_BOOT_TRAINING_FLAGS_PARAMS_MESSAGE_ID (0x59U) + +typedef struct NV2080_CTRL_FB_GET_MEMORY_BOOT_TRAINING_FLAGS_PARAMS { + NvBool flagCollectSchmooData; + NvBool flagWrTrHybridVrefEn; + NvBool flagWrTrHybridNonVrefEn; + NvBool flagRdTrHybridVrefEn; + NvBool flagRdTrHybridNonVrefEn; + NvBool skipBootTraining; +} NV2080_CTRL_FB_GET_MEMORY_BOOT_TRAINING_FLAGS_PARAMS; + +/* + * NV2080_CTRL_FB_CMD_GET_CARVEOUT_REGION_INFO + * + * This command returns the carveout memory region characteristics. + * + * numCarveoutRegions + * Number of valid regions returned in carveoutRegion[]. + * carveoutRegion[].base + * Base address of carveout memory region. + * carveoutRegion[].size + * size of carveout memory region. + * carveoutType + * carveout type for carveout memory region. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_GET_CARVEOUT_REGION_INFO (0x20801360U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_CARVEOUT_REGION_INFO_PARAMS_MESSAGE_ID" */ + +typedef enum NV2080_CTRL_FB_GET_CARVEOUT_REGION_CARVEOUT_TYPE { + NV2080_CTRL_FB_GET_CARVEOUT_REGION_CARVEOUT_TYPE_DISPLAY_FRM = 0, + NV2080_CTRL_FB_GET_CARVEOUT_REGION_CARVEOUT_TYPE_DISPLAY_FRM_RESERVED = 1, + NV2080_CTRL_FB_GET_CARVEOUT_REGION_CARVEOUT_TYPE_UEFI = 2, +} NV2080_CTRL_FB_GET_CARVEOUT_REGION_CARVEOUT_TYPE; + +typedef struct NV2080_CTRL_FB_GET_CARVEOUT_REGION_INFO { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NV2080_CTRL_FB_GET_CARVEOUT_REGION_CARVEOUT_TYPE carveoutType; +} NV2080_CTRL_FB_GET_CARVEOUT_REGION_INFO; + +#define NV2080_CTRL_FB_GET_CARVEOUT_REGION_INFO_MAX_ENTRIES 8U + +#define NV2080_CTRL_FB_GET_CARVEOUT_REGION_INFO_PARAMS_MESSAGE_ID (0x60U) + +typedef struct NV2080_CTRL_FB_GET_CARVEOUT_REGION_INFO_PARAMS { + NvU32 numCarveoutRegions; + NV_DECLARE_ALIGNED(NV2080_CTRL_FB_GET_CARVEOUT_REGION_INFO carveoutRegion[NV2080_CTRL_FB_GET_CARVEOUT_REGION_INFO_MAX_ENTRIES], 8); +} NV2080_CTRL_FB_GET_CARVEOUT_REGION_INFO_PARAMS; + +/* _ctrl2080fb_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h new file mode 100644 index 0000000..b3d54a0 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h @@ -0,0 +1,1166 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080fifo.finn +// + +#include "nvcfg_sdk.h" +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* + * NV2080_CTRL_CMD_SET_GPFIFO + * + * This command set the GPFIFO offset and number of entries for a channel + * after it has been allocated. The channel must be idle and not pending, + * otherwise ERROR_IN_USE will be returned. + * + * hChannel + * The handle to the channel. + * base + * The base of the GPFIFO in the channel ctxdma. + * numEntries + * The number of entries in the GPFIFO. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_DEVICE + * NV_ERR_INVALID_CHANNEL + * NV_ERR_STATE_IN_USE + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV2080_CTRL_CMD_SET_GPFIFO (0x20801102) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_CMD_SET_GPFIFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_SET_GPFIFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_CMD_SET_GPFIFO_PARAMS { + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 base, 8); + NvU32 numEntries; +} NV2080_CTRL_CMD_SET_GPFIFO_PARAMS; + +/* + * NV2080_CTRL_FIFO_BIND_CHANNEL + * + * This structure is used to describe a channel that is to have + * it's bindable engines bound to those of other channels. + * + * hClient + * This structure member contains the handle of the client object + * that owns the channel object specified by hChannel. + * + * hChannel + * This structure member contains the channel handle of the channel + * object. + */ + +typedef struct NV2080_CTRL_FIFO_BIND_CHANNEL { + NvHandle hClient; + NvHandle hChannel; +} NV2080_CTRL_FIFO_BIND_CHANNEL; + +/* + * NV2080_CTRL_CMD_FIFO_BIND_ENGINES + * + * This control call is now deprecated. + * This command can be used to bind different video engines on G8X from separate + * channels together for operations such as idling. The set of bindable engines + * includes the NV2080_ENGINE_TYPE_BSP, NV2080_ENGINE_TYPE_VP and + * NV2080_ENGINE_TYPE_PPP engines. + * + * bindChannelCount + * This parameter specifies the number of channels to bind together. This + * parameter cannot exceed NV2080_CTRL_FIFO_BIND_ENGINES_MAX_CHANNELS. + * + * bindChannels + * The parameter specifies the array of channels to bind together. The first + * bindChannelCount entries are used in the bind channel operation. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_DEVICE + * NV_ERR_INVALID_CHANNEL + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_FIFO_BIND_ENGINES_MAX_CHANNELS (16) + +#define NV2080_CTRL_FIFO_BIND_ENGINES_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_FIFO_BIND_ENGINES_PARAMS { + NvU32 bindChannelCount; + NV2080_CTRL_FIFO_BIND_CHANNEL bindChannels[NV2080_CTRL_FIFO_BIND_ENGINES_MAX_CHANNELS]; +} NV2080_CTRL_FIFO_BIND_ENGINES_PARAMS; + +#define NV2080_CTRL_CMD_FIFO_BIND_ENGINES (0x20801103) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_BIND_ENGINES_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES + * + * This command is used for a client to setup specialized custom operational + * properties that may be specific to an environment, or properties that + * should be set generally but are not for reasons of backward compatibility + * with previous chip generations + * + * flags + * This field specifies the operational properties to be applied + * + * Possible return status values returned are + * NV_OK + * NV_ERR_INVALID_CHANNEL + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES (0x20801104) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_PARAMS { + NvU32 flags; +} NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_PARAMS; + +#define NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_FLAGS_ERROR_ON_STUCK_SEMAPHORE 0:0 +#define NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_FLAGS_ERROR_ON_STUCK_SEMAPHORE_FALSE (0x00000000) +#define NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_FLAGS_ERROR_ON_STUCK_SEMAPHORE_TRUE (0x00000001) + +/* + * NV2080_CTRL_CMD_FIFO_GET_PHYSICAL_CHANNEL_COUNT + * + * This command returns the maximum number of physical channels available for + * allocation on the current GPU. This may be less than or equal to the total + * number of channels supported by the current hardware. + * + * physChannelCount + * This output parameter contains the maximum physical channel count. + * + * physChannelCountInUse + * This output parameter contains the number of physical channels in use + * + * Possible return status values returned are + * NV_OK + * + */ +#define NV2080_CTRL_CMD_FIFO_GET_PHYSICAL_CHANNEL_COUNT (0x20801108) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_PHYSICAL_CHANNEL_COUNT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_GET_PHYSICAL_CHANNEL_COUNT_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV2080_CTRL_FIFO_GET_PHYSICAL_CHANNEL_COUNT_PARAMS { + NvU32 physChannelCount; + NvU32 physChannelCountInUse; +} NV2080_CTRL_FIFO_GET_PHYSICAL_CHANNEL_COUNT_PARAMS; + +/* + * NV2080_CTRL_FIFO_INFO + * + * This structure represents a single 32bit fifo engine value. Clients + * request a particular FIFO engine value by specifying a unique fifo + * information index. + * + * Legal fifo information index values are: + * NV2080_CTRL_FIFO_INFO_INDEX_INSTANCE_TOTAL + * This index can be used to request the amount of instance space + * in kilobytes reserved by the fifo engine. + * NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNEL_GROUPS + * This index can be used to query the maximum number of channel groups + * that can be allocated on the GPU. + * NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNELS_PER_GROUP + * This index can be used to query the maximum number of channels that can + * be allocated in a single channel group. + * NV2080_CTRL_FIFO_INFO_INDEX_MAX_SUBCONTEXT_PER_GROUP + * This index can be used to query the maximum number of subcontext that can + * be allocated in a single channel group. + * NV2080_CTRL_FIFO_INFO_INDEX_BAR1_USERD_START_OFFSET + * This index can be used to query the starting offset of the RM + * pre-allocated USERD range in BAR1. This index query is honored only + * on Legacy-vGPU host RM. + * NV2080_CTRL_FIFO_INFO_INDEX_DEFAULT_CHANNEL_TIMESLICE + * This index can be used to query the default timeslice value + * (microseconds) used for a channel or channel group. + * NV2080_CTRL_FIFO_INFO_INDEX_CHANNEL_GROUPS_IN_USE + * This index can be used to query the number of channel groups that are + * already allocated on the GPU. + * NV2080_CTRL_FIFO_INFO_INDEX_IS_PER_RUNLIST_CHANNEL_RAM_SUPPORTED + * This index can be used to check if per runlist channel ram is supported, and + * to query the supported number of channels per runlist. + * NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNEL_GROUPS_PER_ENGINE + * This index can be used to get max channel groups supported per engine/runlist. + * NV2080_CTRL_FIFO_INFO_INDEX_CHANNEL_GROUPS_IN_USE_PER_ENGINE + * This index can be used too get channel groups currently in use per engine/runlist. + * NV2080_CTRL_FIFO_INFO_INDEX_MAX_LOWER_SUBCONTEXT + * This index can be used to query the maximum "lower" subcontext index + * allocated under NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_ASYNC_PREFER_LOWER. + * Note: Includes subcontext ID 0, which will be allocated last in ASYNC allocation mode. + * + */ +typedef NVXXXX_CTRL_XXX_INFO NV2080_CTRL_FIFO_INFO; + +/* valid fifo info index values */ +#define NV2080_CTRL_FIFO_INFO_INDEX_INSTANCE_TOTAL (0x000000000) +#define NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNEL_GROUPS (0x000000001) +#define NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNELS_PER_GROUP (0x000000002) +#define NV2080_CTRL_FIFO_INFO_INDEX_MAX_SUBCONTEXT_PER_GROUP (0x000000003) +#define NV2080_CTRL_FIFO_INFO_INDEX_BAR1_USERD_START_OFFSET (0x000000004) +#define NV2080_CTRL_FIFO_INFO_INDEX_DEFAULT_CHANNEL_TIMESLICE (0x000000005) +#define NV2080_CTRL_FIFO_INFO_INDEX_CHANNEL_GROUPS_IN_USE (0x000000006) +#define NV2080_CTRL_FIFO_INFO_INDEX_IS_PER_RUNLIST_CHANNEL_RAM_SUPPORTED (0x000000007) +#define NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNEL_GROUPS_PER_ENGINE (0x000000008) +#define NV2080_CTRL_FIFO_INFO_INDEX_CHANNEL_GROUPS_IN_USE_PER_ENGINE (0x000000009) +#define NV2080_CTRL_FIFO_INFO_INDEX_MAX_LOWER_SUBCONTEXT (0x00000000a) + + +/* set INDEX_MAX to greatest possible index value */ +#define NV2080_CTRL_FIFO_INFO_INDEX_MAX NV2080_CTRL_FIFO_INFO_INDEX_MAX_LOWER_SUBCONTEXT + +#define NV2080_CTRL_FIFO_GET_INFO_USERD_OFFSET_SHIFT (12) + +/* + * NV2080_CTRL_CMD_FIFO_GET_INFO + * + * This command returns fifo engine information for the associated GPU. + * Requests to retrieve fifo information use an array of one or more + * NV2080_CTRL_FIFO_INFO structures. + * + * fifoInfoTblSize + * This field specifies the number of valid entries in the fifoInfoList + * array. This value cannot exceed NV2080_CTRL_FIFO_GET_INFO_MAX_ENTRIES. + * fifoInfoTbl + * This parameter contains the client's fifo info table into + * which the fifo info values will be transferred by the RM. + * The fifo info table is an array of NV2080_CTRL_FIFO_INFO structures. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FIFO_GET_INFO (0x20801109) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_INFO_PARAMS_MESSAGE_ID" */ + +/* maximum number of NV2080_CTRL_FIFO_INFO entries per request */ +#define NV2080_CTRL_FIFO_GET_INFO_MAX_ENTRIES (256) + +#define NV2080_CTRL_FIFO_GET_INFO_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_FIFO_GET_INFO_PARAMS { + NvU32 fifoInfoTblSize; + /* + * C form: + * NV2080_CTRL_FIFO_INFO fifoInfoTbl[NV2080_CTRL_FIFO_GET_INFO_MAX_ENTRIES]; + */ + NV2080_CTRL_FIFO_INFO fifoInfoTbl[NV2080_CTRL_FIFO_GET_INFO_MAX_ENTRIES]; + NvU32 engineType; +} NV2080_CTRL_FIFO_GET_INFO_PARAMS; + + + +/* + * NV2080_CTRL_FIFO_CHANNEL_PREEMPTIVE_REMOVAL + * + * This command removes the specified channel from the associated GPU's runlist + * and then initiates RC recovery. If the channel is active it will first be preempted. + * hChannel + * The handle to the channel to be preempted. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_CHANNEL + */ +#define NV2080_CTRL_CMD_FIFO_CHANNEL_PREEMPTIVE_REMOVAL (0x2080110a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_CHANNEL_PREEMPTIVE_REMOVAL_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_CHANNEL_PREEMPTIVE_REMOVAL_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV2080_CTRL_FIFO_CHANNEL_PREEMPTIVE_REMOVAL_PARAMS { + NvHandle hChannel; +} NV2080_CTRL_FIFO_CHANNEL_PREEMPTIVE_REMOVAL_PARAMS; + +/* + * NV2080_CTRL_CMD_FIFO_DISABLE_CHANNELS + * + * This command will disable or enable scheduling of channels described in the + * list provided. Whether or not the channels are also preempted off the GPU + * can be controlled by bOnlyDisableScheduling. By default channels are preempted + * off the GPU. + * + * bDisable + * This value determines whether to disable or + * enable the set of channels. + * numChannels + * The number of channels to be stopped. + * bOnlyDisableScheduling + * When false and bDisable=NV_TRUE,the call will ensure none of the listed + * channels are running in hardware and will not run until a call with + * bDisable=NV_FALSE is made. When true and bDisable=NV_TRUE, the control + * call will ensure that none of the listed channels can be scheduled on the + * GPU until a call with bDisable=NV_FALSE is made, but will not remove any + * of the listed channels from hardware if they are currently running. When + * bDisable=NV_FALSE this field is ignored. + * bRewindGpPut + * If a channel is being disabled and bRewindGpPut=NV_TRUE, the channel's RAMFC + * will be updated so that GP_PUT is reset to the value of GP_GET. + * hClientList + * An array of NvU32 listing the client handles + * hChannelList + * An array of NvU32 listing the channel handles + * to be stopped. + * pRunlistPreemptEvent + * KEVENT handle for Async HW runlist preemption (unused on preMaxwell) + * When NULL, will revert to synchronous preemption with spinloop + * + * Possible status values returned are: + * NV_OK + * NVOS_INVALID_STATE + */ + +#define NV2080_CTRL_CMD_FIFO_DISABLE_CHANNELS (0x2080110b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_DISABLE_CHANNELS_MAX_ENTRIES (64) + +#define NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS { + NvBool bDisable; + NvU32 numChannels; + NvBool bOnlyDisableScheduling; + NvBool bRewindGpPut; + NV_DECLARE_ALIGNED(NvP64 pRunlistPreemptEvent, 8); + // C form: NvHandle hClientList[NV2080_CTRL_FIFO_DISABLE_CHANNELS_MAX_ENTRIES] + NvHandle hClientList[NV2080_CTRL_FIFO_DISABLE_CHANNELS_MAX_ENTRIES]; + // C form: NvHandle hChannelList[NV2080_CTRL_FIFO_DISABLE_CHANNELS_MAX_ENTRIES] + NvHandle hChannelList[NV2080_CTRL_FIFO_DISABLE_CHANNELS_MAX_ENTRIES]; +} NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS; + +#define NV2080_CTRL_FIFO_DISABLE_CHANNEL_FALSE (0x00000000) +#define NV2080_CTRL_FIFO_DISABLE_CHANNEL_TRUE (0x00000001) +#define NV2080_CTRL_FIFO_ONLY_DISABLE_SCHEDULING_FALSE (0x00000000) +#define NV2080_CTRL_FIFO_ONLY_DISABLE_SCHEDULING_TRUE (0x00000001) + +/* + * NV2080_CTRL_FIFO_MEM_INFO + * + * This structure describes the details of a block of memory. It consists + * of the following fields + * + * aperture + * One of the NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_* values + * base + * Physical base address of the memory + * size + * Size in bytes of the memory +*/ +typedef struct NV2080_CTRL_FIFO_MEM_INFO { + NvU32 aperture; + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); +} NV2080_CTRL_FIFO_MEM_INFO; + +/* + * NV2080_CTRL_FIFO_CHANNEL_MEM_INFO + * + * This structure describes the details of the instance memory, ramfc + * and method buffers a channel. It consists of the following fields + * + * inst + * Structure describing the details of instance memory + * ramfc + * Structure describing the details of ramfc + * methodBuf + * Array of structures describing the details of method buffers + * methodBufCount + * Number of method buffers(one per runqueue) + */ + +// max runqueues +#define NV2080_CTRL_FIFO_GET_CHANNEL_MEM_INFO_MAX_COUNT 0x2 + +typedef struct NV2080_CTRL_FIFO_CHANNEL_MEM_INFO { + NV_DECLARE_ALIGNED(NV2080_CTRL_FIFO_MEM_INFO inst, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_FIFO_MEM_INFO ramfc, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_FIFO_MEM_INFO methodBuf[NV2080_CTRL_FIFO_GET_CHANNEL_MEM_INFO_MAX_COUNT], 8); + NvU32 methodBufCount; +} NV2080_CTRL_FIFO_CHANNEL_MEM_INFO; + +/* + * NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM + * + * This command returns the memory aperture, physical base address and the + * size of each of the instance memory, cache1 and ramfc of a channel. + * + * hChannel + * The handle to the channel for which the memory information is desired. + * chMemInfo + * A NV2080_CTRL_FIFO_CHANNEL_MEM_INFO structure + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_CHANNEL +*/ + +#define NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO (0x2080110c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS { + NvHandle hChannel; + NV_DECLARE_ALIGNED(NV2080_CTRL_FIFO_CHANNEL_MEM_INFO chMemInfo, 8); +} NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS; + +#define NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_INVALID 0x00000000 +#define NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_VIDMEM 0x00000001 +#define NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_SYSMEM_COH 0x00000002 +#define NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_SYSMEM_NCOH 0x00000003 + +/* + * NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION + * + * This command determines the location (vidmem/sysmem) + * and attribute (cached/uncached/write combined) of memory where USERD is located. + * + * aperture + * One of the NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_APERTURE_* values. + * + * attribute + * One of the NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_ATTRIBUTE_* values. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_DEVICE + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_POINTER +*/ + +#define NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION (0x2080110d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_PARAMS_MESSAGE_ID (0xDU) + +typedef struct NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_PARAMS { + NvU32 aperture; + NvU32 attribute; +} NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_PARAMS; + +// support for CPU coherent vidmem (VIDMEM_NVILINK_COH) is not yet available in RM + +#define NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_APERTURE_VIDMEM 0x00000000 +#define NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_APERTURE_SYSMEM 0x00000001 + +#define NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_ATTRIBUTE_CACHED 0x00000000 +#define NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_ATTRIBUTE_UNCACHED 0X00000001 +#define NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_ATTRIBUTE_WRITECOMBINED 0X00000002 + + +#define NV2080_CTRL_CMD_VGPU_SCHEDULER_POLICY_UNKNOWN 0 +#define NV2080_CTRL_CMD_VGPU_SCHEDULER_POLICY_OTHER 1 +#define NV2080_CTRL_CMD_VGPU_SCHEDULER_POLICY_BEST_EFFORT 2 +#define NV2080_CTRL_CMD_VGPU_SCHEDULER_POLICY_EQUAL_SHARE 3 +#define NV2080_CTRL_CMD_VGPU_SCHEDULER_POLICY_FIXED_SHARE 4 + +// Count of the supported vGPU scheduler policies +#define NV2080_CTRL_CMD_SUPPORTED_VGPU_SCHEDULER_POLICY_COUNT 3 + +#define NV2080_CTRL_CMD_VGPU_SCHEDULER_ARR_DEFAULT 0 +#define NV2080_CTRL_CMD_VGPU_SCHEDULER_ARR_DISABLE 1 +#define NV2080_CTRL_CMD_VGPU_SCHEDULER_ARR_ENABLE 2 + +/* + * NV2080_CTRL_CMD_FIFO_OBJSCHED_SW_GET_LOG + * + * This command returns the OBJSCHED_SW log enties. + * + * engineId + * This field specifies the NV2080_ENGINE_TYPE_* engine whose SW runlist log + * entries are to be fetched. + * + * count + * This field returns the count of log entries fetched. + * + * entry + * The array of SW runlist log entries. + * + * timestampNs + * Timestamp in ns when this SW runlist was preeempted. + * + * timeRunTotalNs + * Total time in ns this SW runlist has run as compared to others. + * + * timeRunNs + * Time in ns this SW runlist ran before preemption. + * + * swrlId + * SW runlist Id. + * + * schedPolicy + * This field returns the runlist scheduling policy. It specifies the + * NV2080_CTRL_CMD_VGPU_SCHEDULER_POLICY_* scheduling policy. + * + * arrEnabled + * This field returns if Adaptive round robin scheduler + * is enabled/disabled. + * + * arrAvgFactor + * This field returns the average factor to be used in compensating the timeslice + * for Adaptive scheduler mode. + * + * targetTimesliceNs + * This field returns the target timeslice duration in ns for each SW runlist + * as configured by the user or the default value otherwise. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT +*/ + +#define NV2080_CTRL_CMD_FIFO_OBJSCHED_SW_GET_LOG (0x2080110e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_OBJSCHED_SW_GET_LOG_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_OBJSCHED_SW_COUNT 32 +#define NV2080_CTRL_FIFO_OBJSCHED_SW_NCOUNTERS 8 +#define NV2080_CTRL_FIFO_OBJSCHED_SW_GET_LOG_ENTRIES 200 + +typedef struct NV2080_CTRL_FIFO_OBJSCHED_SW_GET_LOG_ENTRY { + NV_DECLARE_ALIGNED(NvU64 timestampNs, 8); + NV_DECLARE_ALIGNED(NvS64 timeRunTotalNs, 8); + NvU32 timeRunNs; + NvU32 swrlId; + NvU32 targetTimeSlice; + NV_DECLARE_ALIGNED(NvU64 cumulativePreemptionTime, 8); + NV_DECLARE_ALIGNED(NvU64 cumulativeIdleTime, 8); + NV_DECLARE_ALIGNED(NvU64 counters[NV2080_CTRL_FIFO_OBJSCHED_SW_NCOUNTERS], 8); +} NV2080_CTRL_FIFO_OBJSCHED_SW_GET_LOG_ENTRY; + +#define NV2080_CTRL_FIFO_OBJSCHED_SW_GET_LOG_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV2080_CTRL_FIFO_OBJSCHED_SW_GET_LOG_PARAMS { + NvU32 engineId; + NvU32 count; + NV_DECLARE_ALIGNED(NV2080_CTRL_FIFO_OBJSCHED_SW_GET_LOG_ENTRY entry[NV2080_CTRL_FIFO_OBJSCHED_SW_GET_LOG_ENTRIES], 8); + NvU32 schedPolicy; + NvU32 arrEnabled; + NvU32 arrAvgFactor; + NvU32 targetTimesliceNs; +} NV2080_CTRL_FIFO_OBJSCHED_SW_GET_LOG_PARAMS; + + + +/* + * NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE + * + * This command retrieves entries from the SW encoded GPU device info table + * from Host RM. + * + * Parameters: + * + * baseIndex [in] + * The starting index to read from the devinfo table. Must be a multiple of + * MAX_ENTRIES. + * + * entries [out] + * A buffer to store up to MAX_ENTRIES entries of the devinfo table. + * + * numEntries [out] + * Number of populated entries in the provided buffer. + * + * bMore [out] + * A boolean flag indicating whether more valid entries are available to be + * read. A value of NV_TRUE indicates that a further call to this control + * with baseIndex incremented by MAX_ENTRIES will yield further valid data. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE (0x20801112) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_DEVICES 256 +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES 32 +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES 16 +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA 2 +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN 16 + +/* + * NV2080_CTRL_FIFO_DEVICE_ENTRY + * + * This structure contains the engine, engine name and + * push buffers information of FIFO device entry. It consists of the following fields + * + * engineData + * Type of the engine + * pbdmaIds + * List of pbdma ids associated with engine + * pbdmaFaultIds + * List of pbdma fault ids associated with engine + * numPbdmas + * Number of pbdmas + * engineName + * Name of the engine + */ +typedef struct NV2080_CTRL_FIFO_DEVICE_ENTRY { + NvU32 engineData[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES]; + NvU32 pbdmaIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA]; + NvU32 pbdmaFaultIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA]; + NvU32 numPbdmas; + char engineName[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN]; +} NV2080_CTRL_FIFO_DEVICE_ENTRY; + +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS { + NvU32 baseIndex; + NvU32 numEntries; + NvBool bMore; + // C form: NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES]; + NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES]; +} NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS; + +/* + * NV2080_CTRL_CMD_FIFO_CLEAR_FAULTED_BIT + * + * This command clears the ENGINE or PBDMA FAULTED bit and reschedules the faulted channel + * by ringing channel's doorbell + * + * Parameters: + * + * engineType [in] + * The NV2080_ENGINE_TYPE of the engine to which the faulted + * channel is bound. This may be a logical id for guest RM in + * case of SMC. + * + * vChid [in] + * Virtual channel ID on which the fault occurred + * + * faultType [in] + * Whether fault was triggered by engine (_ENGINE_FAULTED) or PBDMA (_PBDMA_FAULTED) + * The value specified must be one of the NV2080_CTRL_FIFO_CLEAR_FAULTED_BIT_FAULT_TYPE_* values + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FIFO_CLEAR_FAULTED_BIT (0x20801113) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FIFO_CLEAR_FAULTED_BIT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_CLEAR_FAULTED_BIT_FAULT_TYPE_ENGINE 0x00000001 +#define NV2080_CTRL_FIFO_CLEAR_FAULTED_BIT_FAULT_TYPE_PBDMA 0x00000002 + +#define NV2080_CTRL_CMD_FIFO_CLEAR_FAULTED_BIT_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV2080_CTRL_CMD_FIFO_CLEAR_FAULTED_BIT_PARAMS { + NvU32 engineType; + NvU32 vChid; + NvU32 faultType; +} NV2080_CTRL_CMD_FIFO_CLEAR_FAULTED_BIT_PARAMS; + + + +/* + * NV2080_CTRL_CMD_FIFO_RUNLIST_SET_SCHED_POLICY + * + * Allows clients to set the global scheduling policy for all runlists + * associated to the given subdevice. + * + * Currently, this is only supported for HW runlists. + * + * Since this is a global setting, only privileged clients will be allowed to + * set it. Regular clients will get NV_ERR_INSUFFICIENT_PERMISSIONS error. + * + * Once a certain scheduling policy is set, that policy cannot be changed to a + * different one unless all clients which set it have either restored the policy + * (using the corresponding restore flag) or died. Clients trying to set a + * policy while a different one is locked by another client will get a + * NV_ERR_INVALID_STATE error. + * + * The same client can set a scheduling policy and later change to another one + * only when no other clients have set the same policy. Such sequence will be + * equivalent to restoring the policy in between. + * + * For instance, the following sequence: + * + * 1. Set policy A + * 2. Set policy B + * + * is equivalent to: + * + * 1. Set policy A + * 2. Restore policy + * 3. Set policy B + * + * Parameters: + * + * flags + * This field specifies the operational properties to be applied: + * + * - NV2080_CTRL_CMD_FIFO_RUNLIST_SET_SCHED_POLICY_FLAGS_RESTORE_FALSE + * Try to set the provided 'schedPolicy' scheduling policy. If the + * operation succeeds, other clients will be prevented from setting a + * different scheduling policy until all clients using it have either + * restored it or died. + * + * - NV2080_CTRL_CMD_FIFO_RUNLIST_SET_SCHED_POLICY_FLAGS_RESTORE_TRUE + * Let the scheduler know the client no longer requires the current + * scheduling policy. This may or may not actually change the + * scheduling policy, depending on how many other clients are also + * using the current policy. + * + * The 'schedPolicy' parameter is ignored when this flag is set. + * + * schedPolicy + * One of: + * + * - NV2080_CTRL_FIFO_RUNLIST_SCHED_POLICY_DEFAULT + * Set the default scheduling policy and prevent other clients from + * changing it. + * + * - NV2080_CTRL_FIFO_RUNLIST_SCHED_POLICY_CHANNEL_INTERLEAVED + * This scheduling policy will make channels to be scheduled according + * to their interleave level. See NVA06C_CTRL_CMD_SET_INTERLEAVE_LEVEL + * description for more details. + * - NV2080_CTRL_FIFO_RUNLIST_SCHED_POLICY_CHANNEL_INTERLEAVED_WDDM + * This scheduling policy will make channels to be scheduled according + * to their interleave level per WDDM policy. + * See NVA06C_CTRL_CMD_SET_INTERLEAVE_LEVEL description for more details. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_DEVICE + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_INVALID_STATE + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FIFO_RUNLIST_SET_SCHED_POLICY (0x20801115) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_RUNLIST_SET_SCHED_POLICY_PARAMS_MESSAGE_ID" */ + +/* schedPolicy values */ +#define NV2080_CTRL_FIFO_RUNLIST_SCHED_POLICY_DEFAULT 0x0 +#define NV2080_CTRL_FIFO_RUNLIST_SCHED_POLICY_CHANNEL_INTERLEAVED 0x1 +#define NV2080_CTRL_FIFO_RUNLIST_SCHED_POLICY_CHANNEL_INTERLEAVED_WDDM 0x2 + +/* SET_SCHED_POLICY flags */ +#define NV2080_CTRL_CMD_FIFO_RUNLIST_SET_SCHED_POLICY_FLAGS_RESTORE 0:0 +#define NV2080_CTRL_CMD_FIFO_RUNLIST_SET_SCHED_POLICY_FLAGS_RESTORE_FALSE (0x00000000) +#define NV2080_CTRL_CMD_FIFO_RUNLIST_SET_SCHED_POLICY_FLAGS_RESTORE_TRUE (0x00000001) + +#define NV2080_CTRL_FIFO_RUNLIST_SET_SCHED_POLICY_PARAMS_MESSAGE_ID (0x15U) + +typedef struct NV2080_CTRL_FIFO_RUNLIST_SET_SCHED_POLICY_PARAMS { + NvU32 flags; + NvU32 schedPolicy; +} NV2080_CTRL_FIFO_RUNLIST_SET_SCHED_POLICY_PARAMS; + +/* + * NV2080_CTRL_CMD_FIFO_UPDATE_CHANNEL_INFO + * + * This command updates the channel info params for an existing channel + * + * Can be a deferred Api. The control call can be used for migrating a + * + * channel to a new userd and gpfifo + * + * Parameters: + * [in] hClient - Client handle + * [in] hChannel - Channel handle + * [in] hUserdMemory - UserD handle + * [in] gpFifoEntries - Number of Gpfifo Entries + * [in] gpFifoOffset - Gpfifo Virtual Offset + * [in] userdOffset - UserD offset + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FIFO_UPDATE_CHANNEL_INFO (0x20801116) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO_PARAMS_MESSAGE_ID (0x16U) + +typedef struct NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO_PARAMS { + NvHandle hClient; + NvHandle hChannel; + NvHandle hUserdMemory; + NvU32 gpFifoEntries; + NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); + NV_DECLARE_ALIGNED(NvU64 userdOffset, 8); +} NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_FIFO_DISABLE_USERMODE_CHANNELS + * + * This command will disable or enable scheduling of all usermode channels. + * + * bDisable + * This value determines whether to disable or enable the usermode channels. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FIFO_DISABLE_USERMODE_CHANNELS (0x20801117) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_DISABLE_USERMODE_CHANNELS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_DISABLE_USERMODE_CHANNELS_PARAMS_MESSAGE_ID (0x17U) + +typedef struct NV2080_CTRL_FIFO_DISABLE_USERMODE_CHANNELS_PARAMS { + NvBool bDisable; +} NV2080_CTRL_FIFO_DISABLE_USERMODE_CHANNELS_PARAMS; + +/* + * NV2080_CTRL_CMD_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB + * + * When a VF subcontext is marked as a zombie, host RM points its PDB to a dummy + * page allocated by guest RM in GPA space. This command provides the parameters + * of the guest RMs memory descriptor to be able to create a corresponding + * memory descriptor on the host RM. Host RM uses this to program the PDB of a + * zombie subcontext. + * + * Parameters: + * Input parameters to describe the memory descriptor + * [in] base + * [in] size + * [in] addressSpace + * [in] cacheAttrib + */ +#define NV2080_CTRL_CMD_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB (0x20801118) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 addressSpace; + NvU32 cacheAttrib; +} NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS; + +/* + * NV2080_CTRL_CMD_FIFO_GET_ALLOCATED_CHANNELS + * + * Get's a bitmask of allocated channels. No guarantees are made about + * synchronization. A channel returned as allocated by this ctrl cmd might have + * already been destructed. + * + * Parameters: + * [in] runlistId + * [in,out] bitMask A 1 bit indicates that a channel with this index/id is + * allocated. This field is a multiple of 32 bits and each 32 + * bit group must be accessed as a platform 32 bit int to + * correctly map channel IDs. + * + */ +#define NV2080_CTRL_CMD_FIFO_GET_ALLOCATED_CHANNELS (0x20801119) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_ALLOCATED_CHANNELS_PARAMS_MESSAGE_ID" */ + +/* + * The maximum number than can be returned by + * NV2080_CTRL_CMD_INTERNAL_FIFO_GET_NUM_CHANNELS + */ +#define NV2080_CTRL_FIFO_GET_ALLOCATED_CHANNELS_MAX_CHANNELS 4096 + +#define NV2080_CTRL_FIFO_GET_ALLOCATED_CHANNELS_PARAMS_MESSAGE_ID (0x19U) + +typedef struct NV2080_CTRL_FIFO_GET_ALLOCATED_CHANNELS_PARAMS { + NvU32 runlistId; + NvU32 bitMask[NV2080_CTRL_FIFO_GET_ALLOCATED_CHANNELS_MAX_CHANNELS / 32]; +} NV2080_CTRL_FIFO_GET_ALLOCATED_CHANNELS_PARAMS; + +/* + * NV2080_CTRL_CMD_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION + * + * This command will disable and preempt channels described in the + * list provided and mark them ready for key rotation. + * hClient <-> hChannel pairs should use the same index in the arrays. + * + * numChannels + * The number of valid entries in hChannelList array. + * hClientList + * An array of NvHandle listing the client handles + * hChannelList + * An array of NvHandle listing the channel handles + * to be stopped. + * bEnableAfterKeyRotation + * This determines if channel is enabled by RM after it completes key rotation. + * Possible status values returned are: + * NV_OK + * NVOS_INVALID_STATE + */ +#define NV2080_CTRL_CMD_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION (0x2080111a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_MAX_ENTRIES (64) + +#define NV2080_CTRL_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_PARAMS_MESSAGE_ID (0x1AU) + +typedef struct NV2080_CTRL_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_PARAMS { + NvU32 numChannels; + NvHandle hClientList[NV2080_CTRL_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_MAX_ENTRIES]; + NvHandle hChannelList[NV2080_CTRL_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_MAX_ENTRIES]; + NvBool bEnableAfterKeyRotation; +} NV2080_CTRL_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_PARAMS; + +/* + * NV2080_CTRL_CMD_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_V2 + * + * This command does the same thing as @ref NV2080_CTRL_CMD_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION. + * The difference is that it doesn't take a list of clients and instead all channels belong + * to the client on which this control call is made. + * + * numChannels + * The number of valid entries in hChannelList array. + * hChannelList + * An array of NvHandle listing the channel handles + * to be stopped. + * bEnableAfterKeyRotation + * This determines if channel is enabled by RM after it completes key rotation. + * Possible status values returned are: + * NV_OK + * NVOS_INVALID_STATE + */ +#define NV2080_CTRL_CMD_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_V2 (0x2080111b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_V2_PARAMS_MESSAGE_ID (0x1BU) + +typedef struct NV2080_CTRL_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_V2_PARAMS { + NvU32 numChannels; + NvHandle hChannelList[NV2080_CTRL_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_MAX_ENTRIES]; + NvBool bEnableAfterKeyRotation; +} NV2080_CTRL_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_V2_PARAMS; + + + +/* + * NV2080_CTRL_CMD_FIFO_OBJSCHED_GET_STATE + * + * This command returns the vGPU schedular state. + * + * engineId + * This field specifies the NV2080_ENGINE_TYPE_* engine whose SW runlist log + * entries are to be fetched. + * + * schedPolicy + * This field returns the runlist scheduling policy. It specifies the + * NV2080_CTRL_CMD_VGPU_SCHEDULER_POLICY_* scheduling policy. + * + * arrEnabled + * This field returns if Adaptive round robin scheduler + * is enabled/disabled. + * + * targetTimesliceNs + * This field returns the target timeslice duration in ns for each SW runlist + * as configured by the user or the default value otherwise. + * + * arrAvgFactor + * This field returns the average factor to be used in compensating the timeslice + * for Adaptive scheduler mode. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_FIFO_OBJSCHED_GET_STATE (0x20801120) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_OBJSCHED_GET_STATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_OBJSCHED_GET_STATE_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV2080_CTRL_FIFO_OBJSCHED_GET_STATE_PARAMS { + NvU32 engineId; + NvU32 schedPolicy; + NvU32 arrEnabled; + NvU32 targetTimesliceNs; + NvU32 arrAvgFactor; +} NV2080_CTRL_FIFO_OBJSCHED_GET_STATE_PARAMS; + +/* + * NV2080_CTRL_CMD_FIFO_OBJSCHED_SET_STATE + * + * This command set the vGPU schedular state. + * + * engineId + * This field specifies the NV2080_ENGINE_TYPE_* engine. + * + * schedPolicy + * This field sets the runlist scheduling policy. It specifies the + * NV2080_CTRL_CMD_VGPU_SCHEDULER_POLICY_* scheduling policy. + * + * enableArr + * This field sets the Adaptive round robin scheduler + * is enabled/disabled. + * + * timesliceTargetNs + * This field sets the time slice target time in ns. + * + * frequencyForARR + * This field sets the scheduling frequency for Adaptive round robin scheduler mode. + * + * avgFactorForARR + * This field sets the average factor to be used in compensating the timeslice + * for Adaptive scheduler mode. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_DEVICE + * NV_ERR_INVALID_STATE + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ + +#define NV2080_CTRL_CMD_FIFO_OBJSCHED_SET_STATE (0x20801121) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_OBJSCHED_SET_STATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_OBJSCHED_SET_STATE_PARAMS_MESSAGE_ID (0x21U) + +typedef struct NV2080_CTRL_FIFO_OBJSCHED_SET_STATE_PARAMS { + NvU32 engineId; + NvU32 schedPolicy; + NvU32 enableArr; + NvU32 timesliceTargetNs; + NvU32 frequencyForARR; + NvU32 avgFactorForARR; +} NV2080_CTRL_FIFO_OBJSCHED_SET_STATE_PARAMS; + +/* + * NV2080_CTRL_CMD_FIFO_OBJSCHED_GET_CAPS + * + * This command returns the vGPU schedular capabilities. + * + * engineId [in] + * This field specifies the NV2080_ENGINE_TYPE_* engine whose SW runlist log + * entries are to be fetched. + * + * supportedSchedulers [out] + * This field returns the supported runlist scheduling policies on the device. + * It specifies the NV2080_CTRL_CMD_VGPU_SCHEDULER_POLICY_* scheduling policy. + * + * bIsArrModeSupported [out] + * This field returns if Adaptive scheduler mode is enabled/disabled. + * + * maxTimesliceNs [out] + * This field returns the maximum time slice value in ns. + * + * minTimesliceNs [out] + * This field returns the minimum time slice value in ns. + * + * maxFrequencyForARR [out] + * This field returns the maximum scheduling frequency for + * Adaptive round robin scheduler mode. + * + * minFrequencyForARR [out] + * This field returns the minimum scheduling frequency for + * Adaptive round robin scheduler mode. + * + * maxAvgFactorForARR [out] + * This field returns the maximum average factor in compensating + * the timeslice for Adaptive scheduler mode. + * + * minAvgFactorForARR [out] + * This field returns the minimum average factor in compensating + * the timeslice for Adaptive scheduler mode. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_FIFO_OBJSCHED_GET_CAPS (0x20801122) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_OBJSCHED_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_OBJSCHED_GET_CAPS_PARAMS_MESSAGE_ID (0x22U) + +typedef struct NV2080_CTRL_FIFO_OBJSCHED_GET_CAPS_PARAMS { + NvU32 engineId; + NvU32 supportedSchedulers[NV2080_CTRL_CMD_SUPPORTED_VGPU_SCHEDULER_POLICY_COUNT]; + NvBool bIsArrModeSupported; + NvU32 maxTimesliceNs; + NvU32 minTimesliceNs; + NvU32 maxFrequencyForARR; + NvU32 minFrequencyForARR; + NvU32 maxAvgFactorForARR; + NvU32 minAvgFactorForARR; +} NV2080_CTRL_FIFO_OBJSCHED_GET_CAPS_PARAMS; + +// Max channels per group is limited by NV_RAMRL_ENTRY_TSG_LENGTH_MAX for the arch. +#define NV2080_CTRL_CMD_FIFO_MAX_CHANNELS_PER_TSG 128 + +/* + * NV2080_CTRL_CMD_FIFO_GET_CHANNEL_GROUP_UNIQUE_ID_INFO + * hClient + * Input parameter + * This parameter specifies the client handle associated input channel/TSG + * hChannelOrTsg + * Input parameter. + * This parameter specifies the handle of input channel handle (or channel + * group) + * tsgId + * Output parameter. + * This field return the Unique of TSG object if user specified a channel group handle + * with hChannelOrTsg. + * numChannels + * Output parameter. + * This field return the number of channels under TSG if user specify a + * channel group handle or return 1 if user specify a channel handle. + * channelUniqueID + * Output parameter. + * This array field returns unique Channel ID for each channel. + * vasUniqueID + * Output parameter. + * This array field returns unique IDs of VA Space objects of channels under TSG or channel. + * veid + * Output parameter. + * This array field returns VEID for channels under TSG or channel. + */ +#define NV2080_CTRL_CMD_FIFO_GET_CHANNEL_GROUP_UNIQUE_ID_INFO (0x20801123) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_CHANNEL_GROUP_UNIQUE_ID_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_GET_CHANNEL_GROUP_UNIQUE_ID_INFO_PARAMS_MESSAGE_ID (0x23U) + +typedef struct NV2080_CTRL_FIFO_GET_CHANNEL_GROUP_UNIQUE_ID_INFO_PARAMS { + NvHandle hClient; + NvHandle hChannelOrTsg; + NvU32 tsgId; + NvU32 numChannels; + NvU32 channelUniqueID[NV2080_CTRL_CMD_FIFO_MAX_CHANNELS_PER_TSG]; + NvU32 vasUniqueID[NV2080_CTRL_CMD_FIFO_MAX_CHANNELS_PER_TSG]; + NvU32 veid[NV2080_CTRL_CMD_FIFO_MAX_CHANNELS_PER_TSG]; +} NV2080_CTRL_FIFO_GET_CHANNEL_GROUP_UNIQUE_ID_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_FIFO_QUERY_CHANNEL_UNIQUE_ID + * This command is used query the CID (channel ID) in batch + * hClients + * Input parameter + * Array of Client handles + * hChannels + * Input parameter + * Array of Channel handles + * numChannels + * Indicates the number of input client, channel handle pairs. + * channelUniqueIDs + * Output parameter. + * This parameter returns an array of unique Channel IDs for each input pair. + * channel handles. + */ +#define NV2080_CTRL_CMD_FIFO_QUERY_CHANNEL_UNIQUE_ID (0x20801124) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_QUERY_CHANNEL_UNIQUE_ID_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_QUERY_CHANNEL_UNIQUE_ID_PARAMS_MESSAGE_ID (0x24U) + +typedef struct NV2080_CTRL_FIFO_QUERY_CHANNEL_UNIQUE_ID_PARAMS { + NvHandle hClients[NV2080_CTRL_CMD_FIFO_MAX_CHANNELS_PER_TSG]; + NvHandle hChannels[NV2080_CTRL_CMD_FIFO_MAX_CHANNELS_PER_TSG]; + NvU32 numChannels; + NvU32 channelUniqueIDs[NV2080_CTRL_CMD_FIFO_MAX_CHANNELS_PER_TSG]; +} NV2080_CTRL_FIFO_QUERY_CHANNEL_UNIQUE_ID_PARAMS; +/* _ctrl2080fifo_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h new file mode 100644 index 0000000..1f0c86a --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h @@ -0,0 +1,208 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080fla.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX FLA control commands and parameters */ + +/* + * NV2080_CTRL_CMD_FLA_RANGE + * + * This command is used to initialize/destroy FLA VAS for a GPU.This is intended + * to be used by RM clients that manages the FLA VASpace range. The mode of the + * command is decided based on the parameter passed by the client. + * + * base + * This parameter specifies the base of the FLA VAS that needs to be allocated + * for this GPU + * + * size + * This parameter specifies the size of the FLA VAS that needs to be allocated + * for this GPU + * + * mode + * This parameter specifies the functionality of the command. + * MODE_INITIALIZE + * Setting this mode, will initialize the FLA VASpace for the gpu with + * base and size passed as arguments. FLA VASpace will be owned by RM. + * if the client calls the command more than once before destroying + * the FLA VAS, then this command will verify the range exported before and + * return success if it matches. If FLA is not supported for the platform, + * will return NV_ERR_NOT_SUPPORTED. + * MODE_DESTROY (deprecated) + * This command is NOP. + * MODE_HOST_MANAGED_VAS_INITIALIZE + * This mode will initialize the FLA VASpace for the gpu with hVASpace + * handle in addition to base and size arguments. FLA VASpace will be initiated + * and owned by guest RM. Used only in virtualization platforms by internal clients. + * MODE_HOST_MANAGED_VAS_DESTROY + * This mode will destroy the FLA VAS associated with the device. It will destruct + * only the resources associated with host RM side. Used only in virtualization platforms + * by internal clients. + * + * hVASpace + * This paramete specifies the FLA VAspace that needs to be associated with + * device. This parameter takes effect only for internal client in virtualization + * platforms. For any other platform and external clients, this parameter has no effect. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_IN_USE + * NV_ERR_INVALID_OWNER + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_FLA_RANGE (0x20803501) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLA_INTERFACE_ID << 8) | NV2080_CTRL_FLA_RANGE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLA_RANGE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_FLA_RANGE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 mode; + NvHandle hVASpace; +} NV2080_CTRL_FLA_RANGE_PARAMS; + +#define NV2080_CTRL_FLA_RANGE_PARAMS_MODE_NONE 0x00000000 +#define NV2080_CTRL_FLA_RANGE_PARAMS_MODE_INITIALIZE NVBIT(0) +#define NV2080_CTRL_FLA_RANGE_PARAMS_MODE_DESTROY NVBIT(1) +#define NV2080_CTRL_FLA_RANGE_PARAMS_MODE_HOST_MANAGED_VAS_INITIALIZE NVBIT(2) +#define NV2080_CTRL_FLA_RANGE_PARAMS_MODE_HOST_MANAGED_VAS_DESTROY NVBIT(3) + + +/* + * NV2080_CTRL_CMD_FLA_SETUP_INSTANCE_MEM_BLOCK + * + * This command is used to (un)bind FLA Instance Memory Block(IMB) with MMU. + * This control call is created for vGPU platform, when a FLA VAS is created/destroyed + * by Guest RM. Guest RM doesn't have privilege to (un)bind the IMB with MMU, hence + * need to be RPC-ed to Host RM to (un)bind + * The mode of the command is decided based on the actionParam passed by the client. + * + * imbPhysAddr + * This parameter specifies the FLA Instance Memory Block PA to be programmed + * to MMU. IMB address should be 4k aligned. This parameter is needed only + * for ACTION_BIND. + * + * addrSpace + * This parameter specifies the address space of FLA Instance Memory Block. This + * parmater is needed only for ACTION_BIND. + * Available options are: + * NV2080_CTRL_FLA_ADDRSPACE_SYSMEM + * Clients need to use this address space if the IMB is located in sysmem + * NV2080_CTRL_FLA_ADDRSPACE_FBMEM + * Clients need to use this address space if the IMB is located in FB + * + * actionParam + * This parameter specifies the functionality of the command. + * NV2080_CTRL_FLA_ACTION_BIND + * Setting this type, will call busBindFla helper HAL + * NV2080_CTRL_FLA_ACTION_UNBIND + * Setting this type, will call busUnbindFla helper HAL + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INSUFFICIENT_PERMISSIONS + */ +typedef enum NV2080_CTRL_FLA_ADDRSPACE { + NV2080_CTRL_FLA_ADDRSPACE_SYSMEM = 0, + NV2080_CTRL_FLA_ADDRSPACE_FBMEM = 1, +} NV2080_CTRL_FLA_ADDRSPACE; + +typedef enum NV2080_CTRL_FLA_ACTION { + NV2080_CTRL_FLA_ACTION_BIND = 0, + NV2080_CTRL_FLA_ACTION_UNBIND = 1, +} NV2080_CTRL_FLA_ACTION; + +#define NV2080_CTRL_CMD_FLA_SETUP_INSTANCE_MEM_BLOCK (0x20803502) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLA_INTERFACE_ID << 8) | NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS { + NV_DECLARE_ALIGNED(NvU64 imbPhysAddr, 8); + NV2080_CTRL_FLA_ADDRSPACE addrSpace; + NV2080_CTRL_FLA_ACTION flaAction; +} NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS; + + +/* + * NV2080_CTRL_CMD_FLA_GET_RANGE + * + * This command is used to query the FLA base and size from plugin to return as static info to Guest RM. + * + * base + * This parameter returns the base address of FLA range registered to the subdevice. + * size + * This parameter returns the size of FLA range registered to the subdevice. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FLA_GET_RANGE (0x20803503) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLA_INTERFACE_ID << 8) | NV2080_CTRL_FLA_GET_RANGE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLA_GET_RANGE_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_FLA_GET_RANGE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); +} NV2080_CTRL_FLA_GET_RANGE_PARAMS; + +/* + * NV2080_CTRL_CMD_FLA_GET_FABRIC_MEM_STATS + * + * This command returns the total size and the free size of the fabric vaspace. + * Note: This returns the information for the FABRIC_VASPACE_A class. + * + * totalSize[OUT] + * - Total fabric vaspace. + * + * freeSize [OUT] + * - Available fabric vaspace. + * + * Possible status values returned are: + * + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FLA_GET_FABRIC_MEM_STATS (0x20803504) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLA_INTERFACE_ID << 8) | NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS { + NV_DECLARE_ALIGNED(NvU64 totalSize, 8); + NV_DECLARE_ALIGNED(NvU64 freeSize, 8); +} NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS; + +// _ctrl2080fla_h_ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h new file mode 100644 index 0000000..6fb43d8 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h @@ -0,0 +1,452 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080flcn.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + + +// +// XAPICHK/XAPI_TEST breaks on including "nvmisc.h". Workaround: don't include +// the header in that case and just redefine the macros we need. +// +#include "nvmisc.h" +/* + * Obsolete Falcon ID type. Use NV2080_ENGINE_TYPE_ instead + */ +#define FALCON_ID_PMU (NV2080_ENGINE_TYPE_PMU) +#define FALCON_ID_DPU (NV2080_ENGINE_TYPE_DPU) +#define FALCON_ID_SEC2 (NV2080_ENGINE_TYPE_SEC2) +#define FALCON_ID_FBFLCN (NV2080_ENGINE_TYPE_FBFLCN) + +/* + * NV2080_CTRL_CMD_FLCN_GET_DMEM_USAGE + * + * This command returns total heap size and free heap size of a falcon engine + * + * flcnID + * The falcon ID + * + * heapSize + * Total heap size in byte + * + * heapFree + * Total free heap size in byte + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT + */ +#define NV2080_CTRL_CMD_FLCN_GET_DMEM_USAGE (0x20803101) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | NV2080_CTRL_FLCN_GET_DMEM_USAGE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLCN_GET_DMEM_USAGE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_FLCN_GET_DMEM_USAGE_PARAMS { + NvU32 flcnID; + NvU32 heapSize; + NvU32 heapFree; +} NV2080_CTRL_FLCN_GET_DMEM_USAGE_PARAMS; + +/*! + * @defgroup NVOS_INST_EVT Instrumentation event types. + * @{ + */ + +//! Reserved for uStreamer internal use. +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_RSVD_DO_NOT_USE 0x00U + +//! RTOS CTXSW includes next taskID and number of ODP for previous task. +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_CTXSW_END 0x01U + +//! Begin of a HW IRQ. +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_HW_IRQ_BEGIN 0x02U + +//! End of a HW IRQ, before stack pinning etc is performed. +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_HW_IRQ_END 0x03U + +//! RTOS Timer tick slip. (Only for # tick processed > 1). +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_TIMER_TICK 0x04U + +//! Task start processing an event, includes taskId, eventType and unitId (optional). +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_TASK_EVENT_BEGIN 0x05U + +//! Task finished processing an event, incldues taskId. +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_TASK_EVENT_END 0x06U + +//! Latency for inserting response into RM queue. +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_RM_QUEUE_LATENCY 0x07U + +//! Special / multi-purpose event, see field definition below. +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_TASK_SPECIAL_EVENT 0x08U + +//! Unused, recycle +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_UNUSED_0 0x09U + +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_DMA_END 0x0AU + +//! Begin/end for arbitrary block of code. The payload contains a sub-ID for each location profiled. +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_GENERIC_BEGIN 0x0BU +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_GENERIC_END 0x0CU + +//! Queueing time for the most recent event. +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_TASK_EVENT_LATENCY 0x0DU +/*!@}*/ + +#define NV2080_CTRL_FLCN_NVOS_INST_INVALID_TASK_ID 0xFFU + +/* + * NV2080_CTRL_CMD_FLCN_GET_ENGINE_ARCH + * + * Get the egine arch i.e FALCON, RISCV etc given the NV2080_ENGINE_TYPE_*. + * + */ +#define NV2080_CTRL_CMD_FLCN_GET_ENGINE_ARCH (0x20803118) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | NV2080_CTRL_FLCN_GET_ENGINE_ARCH_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLCN_GET_ENGINE_ARCH_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV2080_CTRL_FLCN_GET_ENGINE_ARCH_PARAMS { + //! The engine type, from NV2080_ENGINE_TYPE_* + NvU32 engine; + + //! The engine architecture - FALCON or RISC-V + NvU32 engineArch; +} NV2080_CTRL_FLCN_GET_ENGINE_ARCH_PARAMS; + +/*! + * @defgroup Engine Arch types + * @{ + */ +#define NV2080_CTRL_FLCN_GET_ENGINE_ARCH_DEFAULT 0x0 +#define NV2080_CTRL_FLCN_GET_ENGINE_ARCH_FALCON 0x1 +#define NV2080_CTRL_FLCN_GET_ENGINE_ARCH_RISCV 0x2 +#define NV2080_CTRL_FLCN_GET_ENGINE_ARCH_RISCV_EB 0x3 +/*!@}*/ + + +/* ----------------------- uStreamer (INST v2) ------------------------------ */ +/*! + * @defgroup NV2080_CTRL_FLCN_USTREAMER_EVENT uStreamer event fields. + * + * This is a binary-packed representation of uStreamer events. There are + * three main types of entry: Head, Payload, and Tail. COMM here is used + * when a field is shared among multiple event types. + * + * @{ + */ +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_COMM_FLAG 31:31 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_COMM_HEAD 30:30 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_VARIABLE 29:29 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EXTEND 28:28 + +/*! + * Below DRF's need constants assigned to start and end so they can be represented in FINN properly + * This is because FINN v1 will not have the ability to represent DRF's and bit fields yet. + */ +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTID_DRF_EXTENT (27) +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTID_DRF_BASE (20) +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTID \ + (NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTID_DRF_EXTENT) : \ + (NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTID_DRF_BASE) +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTIDCOMPACT_DRF_EXTENT (28) +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTIDCOMPACT_DRF_BASE (24) +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTIDCOMPACT \ + (NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTIDCOMPACT_DRF_EXTENT) : \ + (NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTIDCOMPACT_DRF_BASE) + +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_LENGTH 19:8 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOAD 7:0 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT 23:0 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_HEAD_TIME 29:0 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_DATA_PAYLOAD 30:0 +/*!@}*/ + + +/*! + * @defgroup NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_BREAKDOWN + * + * These DRFs define breakdown of the compact payload for various event IDs. + * + * @{ + */ + +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_CTXSW_END_TASK_ID 7:0 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_CTXSW_END_REASON 10:8 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_CTXSW_END_REASON_YIELD 0x0 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_CTXSW_END_REASON_INT0 0x1 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_CTXSW_END_REASON_TIMER_TICK 0x2 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_CTXSW_END_REASON_QUEUE_BLOCK 0x3 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_CTXSW_END_REASON_DMA_SUSPENDED 0x4 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_CTXSW_END_ODP_MISS_COUNT 23:11 + +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TIMER_TICK_TIME_SLIP 23:0 + +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_EVENT_BEGIN_TASK_ID 7:0 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_EVENT_BEGIN_UNIT_ID 15:8 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_EVENT_BEGIN_EVENT_TYPE 23:16 + +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_EVENT_END_TASK_ID 7:0 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_EVENT_END_CALLBACK_ID 15:8 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_EVENT_END_RPC_FUNC 15:8 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_EVENT_END_RPC_FUNC_BOBJ_CMD_BASE 0xF0 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_EVENT_END_CLASS_ID 23:16 + +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_RM_QUEUE_LATENCY_SHIFT 10U + +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_SPECIAL_EVENT_TASK_ID 7:0 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_SPECIAL_EVENT_ID 23:8 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_SPECIAL_EVENT_ID_RESERVED 0x000000 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_SPECIAL_EVENT_ID_CB_ENQUEUE_FAIL 0x000001 + +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_EVENT_LATENCY_SHIFT 6U + +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_GENERIC_ID 11:0 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_GENERIC_ID_INVALID 0x000 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_GENERIC_ID_VF_SWITCH_TOTAL 0x001 + +/*!@}*/ + + +/*! + * @defgroup NV2080_CTRL_FLCN_USTREAMER_FEATURE + * + * This defines all the features currently supported by uStreamer. For a new + * usecase of uStreamer, a feature should be defined here describing the usecase. + * This value should be unique for each queue. + * + * @{ + */ +#define NV2080_CTRL_FLCN_USTREAMER_FEATURE_DEFAULT 0U +#define NV2080_CTRL_FLCN_USTREAMER_FEATURE__COUNT 1U +/*!@}*/ + +/*! + * @defgroup NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY + * + * This defines the DRF used for ustreamer queue policy + * + * @{ + */ + +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_IDLE_FLUSH 0:0 +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_IDLE_FLUSH_DISABLED 0U +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_IDLE_FLUSH_ENABLED 1U + +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_FULL_FLUSH 1:1 +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_FULL_FLUSH_DISABLED 0U +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_FULL_FLUSH_ENABLED 1U + +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_IMMEDIATE_FLUSH 2:2 +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_IMMEDIATE_FLUSH_DISABLED 0U +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_IMMEDIATE_FLUSH_ENABLED 1U + +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_IDLE_THRESHOLD 31:8 + +/*!@}*/ + +/*! + * The maximum number of compact event types, calculated from the number of bits + * in the event structure. + */ +#define NV2080_CTRL_FLCN_USTREAMER_NUM_EVT_TYPES_COMPACT (0x20U) /* finn: Evaluated from "(1 << (NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTIDCOMPACT_DRF_EXTENT - NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTIDCOMPACT_DRF_BASE + 1))" */ + +/*! + * The maximum number of event types, calculated from the number of bits in the + * event structure. + */ +#define NV2080_CTRL_FLCN_USTREAMER_NUM_EVT_TYPES (0x120U) /* finn: Evaluated from "((1 << (NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTID_DRF_EXTENT - NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTID_DRF_BASE + 1)) + NV2080_CTRL_FLCN_USTREAMER_NUM_EVT_TYPES_COMPACT)" */ + +/*! + * The number of bytes required in the event mask to contain all event types. + */ +#define NV2080_CTRL_FLCN_USTREAMER_MASK_SIZE_BYTES (0x24U) /* finn: Evaluated from "((NV2080_CTRL_FLCN_USTREAMER_NUM_EVT_TYPES + 7) / 8)" */ + +/*! + * uStreamer Event Filter type, stored as a bitmask. + */ +typedef struct NV2080_CTRL_FLCN_USTREAMER_EVENT_FILTER { + NvU8 mask[NV2080_CTRL_FLCN_USTREAMER_MASK_SIZE_BYTES]; +} NV2080_CTRL_FLCN_USTREAMER_EVENT_FILTER; + +/*! + * NV2080_CTRL_CMD_FLCN_USTREAMER_QUEUE_INFO + * Get queue info for mapping / unmapping + */ +#define NV2080_CTRL_CMD_FLCN_USTREAMER_QUEUE_INFO (0x20803120) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | NV2080_CTRL_FLCN_USTREAMER_QUEUE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_INFO_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV2080_CTRL_FLCN_USTREAMER_QUEUE_INFO_PARAMS { + //! The engine type, from NV2080_ENGINE_TYPE_* + NvU32 engine; + + //! + // The page size of the requested queue in bytes. + // + NvU32 pageSize; + + //! Offset of the queue buffer in FB. + NV_DECLARE_ALIGNED(NvUPtr offset, 8); + + //! + // The size of the user-mapped instrumentation buffer. Measured in bytes. + // + NvU32 size; + + //! + // The feature ID of the queue. + // + NvU8 queueFeatureId; +} NV2080_CTRL_FLCN_USTREAMER_QUEUE_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_FLCN_USTREAMER_CONTROL_GET/SET + * + * Get/set the event bitmask for the default queue. + */ +#define NV2080_CTRL_CMD_FLCN_USTREAMER_CONTROL_GET (0x20803122) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | NV2080_CTRL_FLCN_USTREAMER_CONTROL_GET_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FLCN_USTREAMER_CONTROL_SET (0x20803123) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | NV2080_CTRL_FLCN_USTREAMER_CONTROL_SET_PARAMS_MESSAGE_ID" */ + +typedef struct NV2080_CTRL_FLCN_USTREAMER_CONTROL_PARAMS { + //! The engine type, from NV2080_ENGINE_TYPE_* + NvU32 engine; + + /*! + * The bitmask of which event types to log. An event type corresponding to + * a bit with a zero will be ignored at the log site, which prevents it + * from filling up the resident buffer in the PMU. In general, set this to + * only log the event types you actually want to use. + * Refer to NVOS_BM_* in nvos_utility.h for usage. + */ + NV2080_CTRL_FLCN_USTREAMER_EVENT_FILTER eventFilter; + + //! The queueId of the queue whose eventFilter we want to interact with + NvU8 queueId; +} NV2080_CTRL_FLCN_USTREAMER_CONTROL_PARAMS; + +#define NV2080_CTRL_FLCN_USTREAMER_CONTROL_GET_PARAMS_MESSAGE_ID (0x22U) + +typedef NV2080_CTRL_FLCN_USTREAMER_CONTROL_PARAMS NV2080_CTRL_FLCN_USTREAMER_CONTROL_GET_PARAMS; + +#define NV2080_CTRL_FLCN_USTREAMER_CONTROL_SET_PARAMS_MESSAGE_ID (0x23U) + +typedef NV2080_CTRL_FLCN_USTREAMER_CONTROL_PARAMS NV2080_CTRL_FLCN_USTREAMER_CONTROL_SET_PARAMS; + +/* + * NV2080_CTRL_CMD_FLCN_GET_CTX_BUFFER_INFO + * + * This command provides the attributes of the falcon engine context buffer + * + * hUserClient [IN] + * This parameter specifies the client handle that owns this channel. + * hChannel [IN] + * This parameter specifies the channel or channel group (TSG) handle + * alignment + * Specifies the alignment requirement for each context buffer + * size + * Aligned size of context buffer + * bufferHandle + * Opaque pointer to memdesc. Used by kernel clients for tracking purpose only. + * pageCount + * allocation size in the form of pageCount + * physAddr + * Physical address of the buffer first page + * aperture + * allocation aperture. Could be SYSMEM, VIDMEM, UNKNOWN + * kind + * PTE kind of this allocation. + * pageSize + * Page size of the buffer. + * bIsContigous + * States if physical allocation for this buffer is contiguous. PageSize will + * have no meaning if this flag is set. + * bDeviceDescendant + * TRUE if the allocation is a constructed under a Device or Subdevice. + * uuid + * SHA1 UUID of the Device or Subdevice. Valid when bDeviceDescendant is TRUE. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FLCN_GET_CTX_BUFFER_INFO (0x20803124) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | NV2080_CTRL_FLCN_GET_CTX_BUFFER_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLCN_GET_CTX_BUFFER_INFO_PARAMS_MESSAGE_ID (0x24U) + +typedef struct NV2080_CTRL_FLCN_GET_CTX_BUFFER_INFO_PARAMS { + NvHandle hUserClient; + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 alignment, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NV_DECLARE_ALIGNED(NvP64 bufferHandle, 8); + NV_DECLARE_ALIGNED(NvU64 pageCount, 8); + NV_DECLARE_ALIGNED(NvU64 physAddr, 8); + NvU32 aperture; + NvU32 kind; + NvU32 pageSize; + NvBool bIsContigous; + NvBool bDeviceDescendant; + NvU8 uuid[16]; +} NV2080_CTRL_FLCN_GET_CTX_BUFFER_INFO_PARAMS; + +// Aperture flags +#define NV2080_CTRL_FLCN_CTX_BUFFER_INFO_APERTURE_UNKNWON ADDR_UNKNOWN +#define NV2080_CTRL_FLCN_CTX_BUFFER_INFO_APERTURE_SYSMEM ADDR_SYSMEM +#define NV2080_CTRL_FLCN_CTX_BUFFER_INFO_APERTURE_FBMEM ADDR_FBMEM + +/* + * NV2080_CTRL_CMD_FLCN_GET_CTX_BUFFER_SIZE + * + * This command provides the size of the falcon engine context buffer + * + * hChannel [IN] + * This parameter specifies the channel or channel group (TSG) handle + * totalBufferSize [OUT] + * This parameter returns the total context buffers size. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FLCN_GET_CTX_BUFFER_SIZE (0x20803125) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | NV2080_CTRL_FLCN_GET_CTX_BUFFER_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLCN_GET_CTX_BUFFER_SIZE_PARAMS_MESSAGE_ID (0x25U) + +typedef struct NV2080_CTRL_FLCN_GET_CTX_BUFFER_SIZE_PARAMS { + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 totalBufferSize, 8); +} NV2080_CTRL_FLCN_GET_CTX_BUFFER_SIZE_PARAMS; + + + +/* _ctrl2080flcn_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h new file mode 100644 index 0000000..7ea0b05 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080fuse.finn +// + + + +/* _ctrl2080fuse_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h new file mode 100644 index 0000000..fd0e349 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h @@ -0,0 +1,74 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080gpio.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + + + +#define NV2080_CTRL_CMD_INTERNAL_GPIO_PROGRAM_DIRECTION (0x20802300) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPIO_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GPIO_PROGRAM_DIRECTION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GPIO_PROGRAM_DIRECTION_PARAMS_MESSAGE_ID (0x00U) + +typedef struct NV2080_CTRL_INTERNAL_GPIO_PROGRAM_DIRECTION_PARAMS { + NvU32 gpioPin; // in + NvBool bInput; // in +} NV2080_CTRL_INTERNAL_GPIO_PROGRAM_DIRECTION_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_GPIO_PROGRAM_OUTPUT (0x20802301) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPIO_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GPIO_PROGRAM_OUTPUT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GPIO_PROGRAM_OUTPUT_PARAMS_MESSAGE_ID (0x01U) + +typedef struct NV2080_CTRL_INTERNAL_GPIO_PROGRAM_OUTPUT_PARAMS { + NvU32 gpioPin; // in + NvU32 value; // in +} NV2080_CTRL_INTERNAL_GPIO_PROGRAM_OUTPUT_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_GPIO_READ_INPUT (0x20802302) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPIO_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GPIO_READ_INPUT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GPIO_READ_INPUT_PARAMS_MESSAGE_ID (0x02U) + +typedef struct NV2080_CTRL_INTERNAL_GPIO_READ_INPUT_PARAMS { + NvU32 gpioPin; // in + NvU32 value; // out +} NV2080_CTRL_INTERNAL_GPIO_READ_INPUT_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_GPIO_ACTIVATE_HW_FUNCTION (0x20802303) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPIO_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GPIO_ACTIVATE_HW_FUNCTION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GPIO_ACTIVATE_HW_FUNCTION_PARAMS_MESSAGE_ID (0x03U) + +typedef struct NV2080_CTRL_INTERNAL_GPIO_ACTIVATE_HW_FUNCTION_PARAMS { + NvU32 function; // in + NvU32 pin; // in +} NV2080_CTRL_INTERNAL_GPIO_ACTIVATE_HW_FUNCTION_PARAMS; + +/* _ctrl2080gpio_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h new file mode 100644 index 0000000..f7669fc --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h @@ -0,0 +1,4739 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080gpu.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" +#include "ctrl/ctrl2080/ctrl2080gr.h" +#include "ctrl/ctrl0000/ctrl0000system.h" +#include "nvcfg_sdk.h" +#include "nvstatus.h" + +#define NV_GRID_LICENSE_INFO_MAX_LENGTH (128) + +/* License info strings for vGPU products */ +#define NV_GRID_LICENSE_FEATURE_VPC_EDITION "GRID-Virtual-PC,2.0;Quadro-Virtual-DWS,5.0;GRID-Virtual-WS,2.0;GRID-Virtual-WS-Ext,2.0" +#define NV_GRID_LICENSE_FEATURE_VAPPS_EDITION "GRID-Virtual-Apps,3.0" +#define NV_GRID_LICENSE_FEATURE_VIRTUAL_WORKSTATION_EDITION "Quadro-Virtual-DWS,5.0;GRID-Virtual-WS,2.0;GRID-Virtual-WS-Ext,2.0" +#define NV_GRID_LICENSE_FEATURE_GAMING_EDITION "GRID-vGaming,8.0" +#define NV_GRID_LICENSE_FEATURE_COMPUTE_EDITION "NVIDIA-vComputeServer,9.0" + +#define NV_GRID_LICENSED_PRODUCT_VWS "NVIDIA RTX Virtual Workstation" +#define NV_GRID_LICENSED_PRODUCT_GAMING "NVIDIA Cloud Gaming" +#define NV_GRID_LICENSED_PRODUCT_VPC "NVIDIA Virtual PC" +#define NV_GRID_LICENSED_PRODUCT_VAPPS "NVIDIA Virtual Applications" +#define NV_GRID_LICENSED_PRODUCT_COMPUTE "NVIDIA Virtual Compute Server" + + + +/* NV20_SUBDEVICE_XX gpu control commands and parameters */ + + + +typedef NVXXXX_CTRL_XXX_INFO NV2080_CTRL_GPU_INFO; + +/* valid gpu info index values */ + +#define NV2080_CTRL_GPU_INFO_INDEX_INDEX 23:0 + + + +#define NV2080_CTRL_GPU_INFO_INDEX_ECID_LO32 (0x00000001U) +#define NV2080_CTRL_GPU_INFO_INDEX_ECID_HI32 (0x00000002U) +#define NV2080_CTRL_GPU_INFO_INDEX_MINOR_REVISION_EXT (0x00000004U) + + +#define NV2080_CTRL_GPU_INFO_INDEX_NETLIST_REV0 (0x00000012U) +#define NV2080_CTRL_GPU_INFO_INDEX_NETLIST_REV1 (0x00000013U) + + +#define NV2080_CTRL_GPU_INFO_INDEX_ECID_EXTENDED (0x0000001bU) +#define NV2080_CTRL_GPU_INFO_INDEX_SYSMEM_ACCESS (0x0000001fU) + + +#define NV2080_CTRL_GPU_INFO_INDEX_GEMINI_BOARD (0x00000022U) + + +#define NV2080_CTRL_GPU_INFO_INDEX_SURPRISE_REMOVAL_POSSIBLE (0x00000025U) +#define NV2080_CTRL_GPU_INFO_INDEX_GLOBAL_POISON_FUSE_ENABLED (0x00000027U) +#define NV2080_CTRL_GPU_INFO_INDEX_NVSWITCH_PROXY_DETECTED (0x00000028U) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_SR_SUPPORT (0x00000029U) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_SMC_MODE (0x0000002aU) +#define NV2080_CTRL_GPU_INFO_INDEX_SPLIT_VAS_MGMT_SERVER_CLIENT_RM (0x0000002bU) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_SM_VERSION (0x0000002cU) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_FLA_CAPABILITY (0x0000002dU) + + +#define NV2080_CTRL_GPU_INFO_INDEX_PER_RUNLIST_CHANNEL_RAM (0x0000002fU) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_ATS_CAPABILITY (0x00000030U) +#define NV2080_CTRL_GPU_INFO_INDEX_NVENC_STATS_REPORTING_STATE (0x00000031U) + + +#define NV2080_CTRL_GPU_INFO_INDEX_4K_PAGE_ISOLATION_REQUIRED (0x00000033U) +#define NV2080_CTRL_GPU_INFO_INDEX_DISPLAY_ENABLED (0x00000034U) +#define NV2080_CTRL_GPU_INFO_INDEX_MOBILE_CONFIG_ENABLED (0x00000035U) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_PROFILING_CAPABILITY (0x00000036U) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_DEBUGGING_CAPABILITY (0x00000037U) + + +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_LOCAL_EGM_CAPABILITY (0x0000003aU) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_SELF_HOSTED_CAPABILITY (0x0000003bU) +#define NV2080_CTRL_GPU_INFO_INDEX_CMP_SKU (0x0000003cU) +#define NV2080_CTRL_GPU_INFO_INDEX_DMABUF_CAPABILITY (0x0000003dU) + + +#define NV2080_CTRL_GPU_INFO_INDEX_IS_RESETLESS_MIG_SUPPORTED (0x0000003fU) + + +#define NV2080_CTRL_GPU_INFO_INDEX_IS_LOCALIZATION_SUPPORTED (0x00000041U) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_NON_PASID_ATS_CAPABILITY (0x00000042U) + + +#define NV2080_CTRL_GPU_INFO_INDEX_COHERENT_GPU_MEMORY_MODE (0x00000044U) + +#define NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE (0x00000045U) +#define NV2080_CTRL_GPU_INFO_INDEX_GROUP_ID 30:24 +#define NV2080_CTRL_GPU_INFO_INDEX_RESERVED 31:31 + +/* valid minor revision extended values */ +#define NV2080_CTRL_GPU_INFO_MINOR_REVISION_EXT_NONE (0x00000000U) +#define NV2080_CTRL_GPU_INFO_MINOR_REVISION_EXT_P (0x00000001U) +#define NV2080_CTRL_GPU_INFO_MINOR_REVISION_EXT_V (0x00000002U) +#define NV2080_CTRL_GPU_INFO_MINOR_REVISION_EXT_PV (0x00000003U) + + + +/* valid system memory access capability values */ +#define NV2080_CTRL_GPU_INFO_SYSMEM_ACCESS_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_SYSMEM_ACCESS_YES (0x00000001U) + + + +/* valid gemini board values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GEMINI_BOARD_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_GEMINI_BOARD_YES (0x00000001U) + +/* valid surprise removal values */ +#define NV2080_CTRL_GPU_INFO_INDEX_SURPRISE_REMOVAL_POSSIBLE_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_SURPRISE_REMOVAL_POSSIBLE_YES (0x00000001U) + +/* valid poison fuse capability values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GLOBAL_POISON_FUSE_ENABLED_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_GLOBAL_POISON_FUSE_ENABLED_YES (0x00000001U) + +/* valid nvswitch proxy detected values */ +#define NV2080_CTRL_GPU_INFO_NVSWITCH_PROXY_DETECTED_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_NVSWITCH_PROXY_DETECTED_YES (0x00000001U) + +/* valid NVSR GPU support info values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_SR_SUPPORT_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_SR_SUPPORT_YES (0x00000001U) + +/* valid SMC mode values */ +#define NV2080_CTRL_GPU_INFO_GPU_SMC_MODE_UNSUPPORTED (0x00000000U) +#define NV2080_CTRL_GPU_INFO_GPU_SMC_MODE_ENABLED (0x00000001U) +#define NV2080_CTRL_GPU_INFO_GPU_SMC_MODE_DISABLED (0x00000002U) +#define NV2080_CTRL_GPU_INFO_GPU_SMC_MODE_ENABLE_PENDING (0x00000003U) +#define NV2080_CTRL_GPU_INFO_GPU_SMC_MODE_DISABLE_PENDING (0x00000004U) + +/* valid split VAS mode values */ +#define NV2080_CTRL_GPU_INFO_SPLIT_VAS_MGMT_SERVER_CLIENT_RM_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_SPLIT_VAS_MGMT_SERVER_CLIENT_RM_YES (0x00000001U) + +/* valid grid capability values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_FLA_CAPABILITY_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_FLA_CAPABILITY_YES (0x00000001U) + +/* valid per runlist channel ram capability values */ +#define NV2080_CTRL_GPU_INFO_INDEX_PER_RUNLIST_CHANNEL_RAM_DISABLED (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_PER_RUNLIST_CHANNEL_RAM_ENABLED (0x00000001U) + +/* valid ATS capability values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_ATS_CAPABILITY_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_ATS_CAPABILITY_YES (0x00000001U) + +/* valid Nvenc Session Stats reporting state values */ +#define NV2080_CTRL_GPU_INFO_NVENC_STATS_REPORTING_STATE_DISABLED (0x00000000U) +#define NV2080_CTRL_GPU_INFO_NVENC_STATS_REPORTING_STATE_ENABLED (0x00000001U) +#define NV2080_CTRL_GPU_INFO_NVENC_STATS_REPORTING_STATE_NOT_SUPPORTED (0x00000002U) + +/* valid 4K PAGE isolation requirement values */ +#define NV2080_CTRL_GPU_INFO_INDEX_4K_PAGE_ISOLATION_REQUIRED_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_4K_PAGE_ISOLATION_REQUIRED_YES (0x00000001U) + +/* valid display enabled values */ +#define NV2080_CTRL_GPU_INFO_DISPLAY_ENABLED_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_DISPLAY_ENABLED_YES (0x00000001U) + +/* valid mobile config enabled values */ +#define NV2080_CTRL_GPU_INFO_INDEX_MOBILE_CONFIG_ENABLED_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_MOBILE_CONFIG_ENABLED_YES (0x00000001U) + + +/* valid profiling capability values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_PROFILING_CAPABILITY_DISABLED (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_PROFILING_CAPABILITY_ENABLED (0x00000001U) + +/* valid debugging capability values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_DEBUGGING_CAPABILITY_DISABLED (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_DEBUGGING_CAPABILITY_ENABLED (0x00000001U) + + + +/* valid local EGM supported values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_LOCAL_EGM_CAPABILITY_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_LOCAL_EGM_CAPABILITY_YES (0x00000001U) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_LOCAL_EGM_PEERID 31:1 + +/* valid self hosted values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_SELF_HOSTED_CAPABILITY_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_SELF_HOSTED_CAPABILITY_YES (0x00000001U) + +/* valid CMP (Crypto Mining Processor) SKU values */ +#define NV2080_CTRL_GPU_INFO_INDEX_CMP_SKU_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_CMP_SKU_YES (0x00000001U) + + +/* valid dma-buf suport values */ +#define NV2080_CTRL_GPU_INFO_INDEX_DMABUF_CAPABILITY_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_DMABUF_CAPABILITY_YES (0x00000001U) + +/* valid resetless MIG device supported values */ +#define NV2080_CTRL_GPU_INFO_INDEX_IS_RESETLESS_MIG_SUPPORTED_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_IS_RESETLESS_MIG_SUPPORTED_YES (0x00000001U) + +/* valid localization supported values */ +#define NV2080_CTRL_GPU_INFO_INDEX_IS_LOCALIZATION_SUPPORTED_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_IS_LOCALIZATION_SUPPORTED_YES (0x00000001U) + +/* valid Non-PASID ATS capability values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_NON_PASID_ATS_CAPABILITY_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_NON_PASID_ATS_CAPABILITY_YES (0x00000001U) + + + +/* valid coherent GPU memory mode capability values */ +#define NV2080_CTRL_GPU_INFO_INDEX_COHERENT_GPU_MEMORY_MODE_NONE (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_COHERENT_GPU_MEMORY_MODE_NUMA (0x00000001U) +#define NV2080_CTRL_GPU_INFO_INDEX_COHERENT_GPU_MEMORY_MODE_DRIVER (0x00000002U) + +/* + * NV2080_CTRL_CMD_GPU_GET_INFO + * + * This command returns gpu information for the associated GPU. Requests + * to retrieve gpu information use a list of one or more NV2080_CTRL_GPU_INFO + * structures. + * + * gpuInfoListSize + * This field specifies the number of entries on the caller's + * gpuInfoList. + * gpuInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the gpu information is to be returned. + * This buffer must be at least as big as gpuInfoListSize multiplied + * by the size of the NV2080_CTRL_GPU_INFO structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_GPU_GET_INFO (0x20800101U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_GPU_GET_INFO_PARAMS { + NvU32 gpuInfoListSize; + NV_DECLARE_ALIGNED(NvP64 gpuInfoList, 8); +} NV2080_CTRL_GPU_GET_INFO_PARAMS; + +#define NV2080_CTRL_CMD_GPU_GET_INFO_V2 (0x20800102U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_GPU_GET_INFO_V2_PARAMS { + NvU32 gpuInfoListSize; + NV2080_CTRL_GPU_INFO gpuInfoList[NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE]; +} NV2080_CTRL_GPU_GET_INFO_V2_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_NAME_STRING + * + * This command returns the name of the GPU in string form in either ASCII + * or UNICODE format. + * + * gpuNameStringFlags + * This field specifies flags to use while creating the GPU name string. + * Valid flags values: + * NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_ASCII + * The returned name string should be in standard ASCII format. + * NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_UNICODE + * The returned name string should be in unicode format. + * gpuNameString + * This field contains the buffer into which the name string should be + * returned. The length of the returned string will be no more than + * NV2080_CTRL_GPU_MAX_NAME_STRING_LENGTH bytes in size. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_GPU_GET_NAME_STRING (0x20800110U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS_MESSAGE_ID" */ + +#define NV2080_GPU_MAX_NAME_STRING_LENGTH (0x0000040U) + +// This field is deprecated - 'gpuNameStringFlags' is now a simple scalar. +// Field maintained (and extended from 0:0) for compile-time compatibility. +#define NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE 31:0 + +/* valid gpu name string flags */ +#define NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_ASCII (0x00000000U) +#define NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_UNICODE (0x00000001U) + +#define NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS { + NvU32 gpuNameStringFlags; + union { + NvU8 ascii[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + NvU16 unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + } gpuNameString; +} NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_SHORT_NAME_STRING + * + * This command returns the short name of the GPU in ASCII string form. + * + * gpuShortNameString + * This field contains the buffer into which the short name string should + * be returned. The length of the returned string will be no more than + * NV2080_MAX_NAME_STRING_LENGTH bytes in size. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_GPU_GET_SHORT_NAME_STRING (0x20800111U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS_MESSAGE_ID (0x11U) + +typedef struct NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS { + NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH]; +} NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_SET_POWER + * + * This command sets the power state for the GPU as a whole, various engines, + * or clocks. + * + * target + * One of NV2080_CTRL_GPU_SET_POWER_TARGET_* + * + * newLevel + * One of NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_* + * NV2080_CTRL_GPU_SET_POWER_STATE_ENGINE_LEVEL_* + * NV2080_CTRL_GPU_SET_POWER_STATE_CLOCK_LEVEL_* + * depending on the target above. + * + * oldLevel + * Previous level as appropriate. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_GPU_SET_POWER (0x20800112U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_POWER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_SET_POWER_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV2080_CTRL_GPU_SET_POWER_PARAMS { + NvU32 target; + NvU32 newLevel; + NvU32 oldLevel; +} NV2080_CTRL_GPU_SET_POWER_PARAMS; + + + +#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0 (0x00000000U) +#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_1 (0x00000001U) +#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_2 (0x00000002U) +#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3 (0x00000003U) +#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_4 (0x00000004U) +#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_7 (0x00000007U) + +/* + * NV2080_CTRL_CMD_GPU_GET_SDM + * + * This command returns the subdevice mask value for the associated subdevice. + * The subdevice mask value can be used with the SET_SUBDEVICE_MASK instruction + * provided by the NV36_CHANNEL_DMA and newer channel dma classes. + * + * subdeviceMask [out] + * This field return the subdevice mask value. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_GET_SDM (0x20800118U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_SDM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_SDM_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV2080_CTRL_GPU_GET_SDM_PARAMS { + NvU32 subdeviceMask; +} NV2080_CTRL_GPU_GET_SDM_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_SET_SDM + * + * This command sets the subdevice instance and mask value for the associated subdevice. + * The subdevice mask value can be used with the SET_SUBDEVICE_MASK instruction + * provided by the NV36_CHANNEL_DMA and newer channel dma classes. + * It must be called before the GPU HW is initialized otherwise + * NV_ERR_INVALID_STATE is being returned. + * + * subdeviceMask [in] + * This field configures the subdevice mask value for the GPU/Subdevice + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_DATA + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_SET_SDM (0x20800120U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_SDM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_SET_SDM_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV2080_CTRL_GPU_SET_SDM_PARAMS { + NvU32 subdeviceMask; +} NV2080_CTRL_GPU_SET_SDM_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_SIMULATION_INFO + * + * This command returns the associated subdevices' simulation information. + * + * type + * This field returns the simulation type. + * One of NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_* + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_SIMULATION_INFO (0x20800119U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS_MESSAGE_ID (0x19U) + +typedef struct NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS { + NvU32 type; +} NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS; + +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_NONE (0x00000000U) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_MODS_AMODEL (0x00000001U) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_LIVE_AMODEL (0x00000002U) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_FMODEL (0x00000003U) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_RTL (0x00000004U) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_EMU (0x00000005U) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_EMU_LOW_POWER (0x00000006U) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_DFPGA (0x00000007U) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_DFPGA_RTL (0x00000008U) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_DFPGA_FMODEL (0x00000009U) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_UNKNOWN (0xFFFFFFFFU) + +/* + * NV2080_CTRL_GPU_REG_OP + * + * This structure describes register operation information for use with + * the NV2080_CTRL_CMD_GPU_EXEC_REG_OPS command. The structure describes + * a single register operation. The operation can be a read or write and + * can involve either 32bits or 64bits of data. + * + * For 32bit read operations, the operation takes the following form: + * + * regValueLo = read(bar0 + regOffset) + * regValueHi = 0 + * + * For 64bit read operations, the operation takes the following form: + * + * regValueLo = read(bar0 + regOffset) + * regValueHi = read(bar0 + regOffset + 4) + * + * For 32bit write operations, the operation takes the following form: + * + * new = ((read(bar0 + regOffset) & ~regAndNMaskLo) | regValueLo) + * write(bar0 + regOffset, new) + * + * For 64bit write operations, the operation takes the following form: + * + * new_lo = ((read(bar0 + regOffset) & ~regAndNMaskLo) | regValueLo) + * new_hi = ((read(bar0 + regOffset + 4) & ~regAndNMaskHi) | regValueHi) + * write(bar0 + regOffset, new_lo) + * write(bar0 + regOffset + 4, new_hi) + * + * Details on the parameters follow: + * + * regOp + * This field specifies the operation to be applied to the register + * specified by the regOffset parameter. Valid values for this + * parameter are: + * NV2080_CTRL_GPU_REG_OP_READ_08 + * The register operation should be a 8bit global privileged register read. + * NV2080_CTRL_GPU_REG_OP_WRITE_08 + * The register operation should be a 8bit global privileged register write. + * NV2080_CTRL_GPU_REG_OP_READ_32 + * The register operation should be a 32bit register read. + * NV2080_CTRL_GPU_REG_OP_WRITE_32 + * The register operation should be a 32bit register write. + * NV2080_CTRL_GPU_REG_OP_READ_64 + * The register operation should be a 64bit register read. + * NV2080_CTRL_GPU_REG_OP_WRITE_64 + * The register operation should be a 64bit register write. + * regType + * This field specifies the type of the register specified by the + * regOffset parameter. Valid values for this parameter are: + * NV2080_CTRL_GPU_REG_OP_TYPE_GLOBAL + * The register is a global privileged register. Read operations + * return the current value from the associated global register. + * Write operations for registers of this type take effect immediately. + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX + * The register is a graphics context register. Read operations + * return the current value from the associated global register. + * Write operations are applied to all existing graphics engine + * contexts. Any newly created graphics engine contexts will also + * be modified. + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_TPC + * This is a graphics context TPC register group. Write operations are + * applied to TPC group(s) specified by regGroupMask. + * This field is ignored for read operations. + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_SM + * This is a graphics context SM register group that is inside TPC + * group. Write operations are applied to SM group(s) specified by + * regGroupMask (TPC) and regSubGroupMask (SM). This field is ignored + * for read operations. + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_CROP + * This is a graphics context CROP register group. Write operations + * are applied to registers specified by regGroupMask. This field is + * ignored for read operations. + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_ZROP + * This is a graphics context ZROP register group. Write operations + * are applied to registers specified by regGroupMask. This field is + * ignored for read operations. + * NV2080_CTRL_GPU_REG_OP_TYPE_FB + * This is a fb register group. Write operations are applied to + * registers specified by regGroupMask. This field is + * ignored for read operations. + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_QUAD + * This is a graphics context QUAD register group. Operations + * are applied to registers specified by regQuad value. + * regQuad + * This field specifies the quad to be accessed for register regOffsetwhen + * the regType specified is NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_QUAD. + * regGroupMask + * This field specifies which registers inside an array should be updated. + * This field is used when regType is one of below: + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_TPC + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_SM + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_CROP + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_ZROP + * NV2080_CTRL_GPU_REG_OP_TYPE_FB + * When regGroupMask is used, the regOffset MUST be the first register in + * an array. + * regSubGroupMask + * This field specifies which registers inside a group should be updated. + * This field is used for updating SM registers when regType is: + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_TPC + * When regSubGroupMask is used, regOffset MUST be the first register in an + * array AND also the first one in sub array. regGroupMask specifies + * TPC(X) and regSubGroupMask specifies SM_CTX_N(Y) + * regStatus + * This field returns the completion status for the associated register + * operation in the form of a bitmask. Possible status values for this + * field are: + * NV2080_CTRL_GPU_REG_OP_STATUS_SUCCESS + * This value indicates the operation completed successfully. + * NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_OP + * This bit value indicates that the regOp value is not valid. + * NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_TYPE + * This bit value indicates that the regType value is not valid. + * NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_OFFSET + * This bit value indicates that the regOffset value is invalid. + * The regOffset value must be within the legal BAR0 range for the + * associated GPU and must target a supported register with a + * supported operation. + * NV2080_CTRL_GPU_REG_OP_STATUS_UNSUPPORTED_OFFSET + * This bit value indicates that the operation to the register + * specified by the regOffset value is not supported for the + * associated GPU. + * NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_MASK + * This bit value indicates that the regTpcMask value is invalid. + * The regTpcMask must be a subset of TPCs that are enabled on the + * associated GPU. + * NV2080_CTRL_GPU_REG_OP_STATUS_NOACCESS + * The caller does not have access to the register at given offset + * regOffset + * This field specifies the register offset to access. The specified + * offset must be a valid BAR0 offset for the associated GPU. + * regValueLo + * This field contains the low 32bits of the register value. + * For read operations, this value returns the current value of the + * register specified by regOffset. For write operations, this field + * specifies the logical OR value applied to the current value + * contained in the register specified by regOffset. + * regValueHi + * This field contains the high 32bits of the register value. + * For read operations, this value returns the current value of the + * register specified by regOffset + 4. For write operations, this field + * specifies the logical OR value applied to the current value + * contained in the register specified by regOffset + 4. + * regAndNMaskLo + * This field contains the mask used to clear a desired field from + * the current value contained in the register specified by regOffsetLo. + * This field is negated and ANDed to this current register value. + * This field is only used for write operations. This field is ignored + * for read operations. + * regAndNMaskHi + * This field contains the mask used to clear a desired field from + * the current value contained in the register specified by regOffsetHi. + * This field is negated and ANDed to this current register value. + * This field is only used for write operations. This field is ignored + * for read operations. + */ +typedef struct NV2080_CTRL_GPU_REG_OP { + NvU8 regOp; + NvU8 regType; + NvU8 regStatus; + NvU8 regQuad; + NvU32 regGroupMask; + NvU32 regSubGroupMask; + NvU32 regOffset; + NvU32 regValueHi; + NvU32 regValueLo; + NvU32 regAndNMaskHi; + NvU32 regAndNMaskLo; +} NV2080_CTRL_GPU_REG_OP; + +/* valid regOp values */ +#define NV2080_CTRL_GPU_REG_OP_READ_32 (0x00000000U) +#define NV2080_CTRL_GPU_REG_OP_WRITE_32 (0x00000001U) +#define NV2080_CTRL_GPU_REG_OP_READ_64 (0x00000002U) +#define NV2080_CTRL_GPU_REG_OP_WRITE_64 (0x00000003U) +#define NV2080_CTRL_GPU_REG_OP_READ_08 (0x00000004U) +#define NV2080_CTRL_GPU_REG_OP_WRITE_08 (0x00000005U) + +/* valid regType values */ +#define NV2080_CTRL_GPU_REG_OP_TYPE_GLOBAL (0x00000000U) +#define NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX (0x00000001U) +#define NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_TPC (0x00000002U) +#define NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_SM (0x00000004U) +#define NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_CROP (0x00000008U) +#define NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_ZROP (0x00000010U) +#define NV2080_CTRL_GPU_REG_OP_TYPE_FB (0x00000020U) +#define NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_QUAD (0x00000040U) +#define NV2080_CTRL_GPU_REG_OP_TYPE_DEVICE (0x00000080U) + +/* valid regStatus values (note: NvU8 ie, 1 byte) */ +#define NV2080_CTRL_GPU_REG_OP_STATUS_SUCCESS (0x00U) +#define NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_OP (0x01U) +#define NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_TYPE (0x02U) +#define NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_OFFSET (0x04U) +#define NV2080_CTRL_GPU_REG_OP_STATUS_UNSUPPORTED_OP (0x08U) +#define NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_MASK (0x10U) +#define NV2080_CTRL_GPU_REG_OP_STATUS_NOACCESS (0x20U) + +/* + * NV2080_CTRL_CMD_GPU_EXEC_REG_OPS + * + * This command is used to submit a buffer containing one or more + * NV2080_CTRL_GPU_REG_OP structures for processing. Each entry in the + * buffer specifies a single read or write operation. Each entry is checked + * for validity in an initial pass over the buffer with the results for + * each operation stored in the corresponding regStatus field. Unless + * bNonTransactional flag is set to true, if any invalid entries are found + * during this initial pass then none of the operations are executed. Entries + * are processed in order within each regType with NV2080_CTRL_GPU_REG_OP_TYPE_GLOBAL + * entries processed first followed by NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX entries. + * + * hClientTarget + * This parameter specifies the handle of the client that owns the channel + * specified by hChannelTarget. If this parameter is set to 0 then the set + * of channel-specific register operations are applied to all current and + * future channels. + * hChannelTarget + * This parameter specifies the handle of the target channel (or channel + * group) object instance to which channel-specific register operations are + * to be directed. If hClientTarget is set to 0 then this parameter must + * also be set to 0. + * bNonTransactional + * This field specifies if command is non-transactional i.e. if set to + * true, all the valid operations will be executed. + * reserved00 + * This parameter is reserved for future use. It should be initialized to + * zero for correct operation. + * regOpCount + * This field specifies the number of entries on the caller's regOps + * list. + * regOps + * This field specifies a pointer in the caller's address space + * to the buffer from which the desired register information is to be + * retrieved. This buffer must be at least as big as regInfoCount + * multiplied by the size of the NV2080_CTRL_GPU_REG_OP structure. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. When SMC is enabled, this + * is a mandatory parameter. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_EXEC_REG_OPS (0x20800122U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS_MESSAGE_ID (0x22U) + +typedef struct NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS { + NvHandle hClientTarget; + NvHandle hChannelTarget; + NvU32 bNonTransactional; + NvU32 reserved00[2]; + NvU32 regOpCount; + NV_DECLARE_ALIGNED(NvP64 regOps, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_ENGINES + * + * Returns a list of supported engine types along with the number of instances + * of each type. Querying with engineList NULL returns engineCount. + * + * engineCount + * This field specifies the number of entries on the caller's engineList + * field. + * engineList + * This field is a pointer to a buffer of NvU32 values representing the + * set of engines supported by the associated subdevice. Refer to cl2080.h + * for the complete set of supported engine types. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_GPU_GET_ENGINES (0x20800123U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENGINES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_ENGINES_PARAMS_MESSAGE_ID (0x23U) + +typedef struct NV2080_CTRL_GPU_GET_ENGINES_PARAMS { + NvU32 engineCount; + NV_DECLARE_ALIGNED(NvP64 engineList, 8); +} NV2080_CTRL_GPU_GET_ENGINES_PARAMS; + +#define NV2080_CTRL_CMD_GPU_GET_ENGINES_V2 (0x20800170U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS_MESSAGE_ID" */ + +/* Must match NV2080_ENGINE_TYPE_LAST from cl2080.h */ +#define NV2080_GPU_MAX_ENGINES_LIST_SIZE 0x54U + +#define NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS_MESSAGE_ID (0x70U) + +typedef struct NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS { + NvU32 engineCount; + NvU32 engineList[NV2080_GPU_MAX_ENGINES_LIST_SIZE]; +} NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_GET_ENGINE_CLASSLIST + * + * Returns a list of classes supported by a given engine type. + * + * engineType + * This field specifies the engine type being queried. + * NV2080_CTRL_ENGINE_TYPE_ALLENGINES will return classes + * supported by all engines. + * + * numClasses + * This field specifies the number of classes supported by + * engineType. + * + * classList + * This field is an array containing the list of supported + * classes. Is of type (NvU32*) + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_GET_ENGINE_CLASSLIST (0x20800124U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS_MESSAGE_ID (0x24U) + +typedef struct NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS { + NvU32 engineType; + NvU32 numClasses; + NV_DECLARE_ALIGNED(NvP64 classList, 8); +} NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS; + + +/* + * NV2080_CTRL_CMD_GPU_GET_ENGINE_FAULT_INFO + * + * This command returns the fault properties of the specified engine type. + * + * engineType + * Input parameter. + * This field specifies the engine type being queried. + * Engine type is specified using the NV2080_ENGINE_TYPE_* defines in cl2080.h. + * The list of engines supported by a chip can be got using the + * NV2080_CTRL_CMD_GPU_GET_ENGINES ctrl call. + * + * mmuFaultId + * Output parameter. + * This field returns the MMU fault ID for the specified engine. + * If the engine supports subcontext, this field provides the base fault id. + * + * bSubcontextSupported + * Output parameter. + * Returns TRUE if subcontext faulting is supported by the engine. + * Engine that support subcontext use fault IDs in the range [mmuFaultId, mmuFaultId + maxSubCtx). + * "maxSubctx" can be found using the NV2080_CTRL_FIFO_INFO ctrl call with + * NV2080_CTRL_FIFO_INFO_INDEX_MAX_SUBCONTEXT_PER_GROUP as the index. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_GET_ENGINE_FAULT_INFO (0x20800125U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENGINE_FAULT_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_ENGINE_FAULT_INFO_PARAMS_MESSAGE_ID (0x25U) + +typedef struct NV2080_CTRL_GPU_GET_ENGINE_FAULT_INFO_PARAMS { + NvU32 engineType; + NvU32 mmuFaultId; + NvBool bSubcontextSupported; +} NV2080_CTRL_GPU_GET_ENGINE_FAULT_INFO_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_QUERY_MODE + * + * This command is used to detect the mode of the GPU associated with the + * subdevice. + * + * mode + * This parameter returns the current mode of GPU. Legal values for + * this parameter include: + * NV2080_CTRL_GPU_QUERY_MODE_GRAPHICS_MODE + * The GPU is currently operating in graphics mode. + * NV2080_CTRL_GPU_QUERY_MODE_COMPUTE_MODE + * The GPU is currently operating in compute mode. + * NV2080_CTRL_GPU_QUERY_MODE_UNKNOWN_MODE + * The current mode of the GPU could not be determined. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_GPU_QUERY_MODE (0x20800128U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_QUERY_MODE_PARAMS_MESSAGE_ID" */ + +/* valid mode parameter values */ +#define NV2080_CTRL_GPU_QUERY_MODE_UNKNOWN_MODE (0x00000000U) +#define NV2080_CTRL_GPU_QUERY_MODE_GRAPHICS_MODE (0x00000001U) +#define NV2080_CTRL_GPU_QUERY_MODE_COMPUTE_MODE (0x00000002U) + +#define NV2080_CTRL_GPU_QUERY_MODE_PARAMS_MESSAGE_ID (0x28U) + +typedef struct NV2080_CTRL_GPU_QUERY_MODE_PARAMS { + NvU32 mode; +} NV2080_CTRL_GPU_QUERY_MODE_PARAMS; + + + +/*! + * NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY + * Data block describing a virtual context buffer to be promoted + * + * gpuPhysAddr [IN] + * GPU Physical Address for the buffer + * gpuVirtAddr [IN] + * GPU Virtual Address for the buffer + * size[IN] + * Size of this virtual context buffer + * physAttr [IN] + * Physical memory attributes (aperture, cacheable) + * bufferId [IN] + * Virtual context buffer type, data type NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_* + * bInitialize [IN] + * Flag indicating that this virtual context buffer should be initialized prior to promotion. + * The client must clear (memset) the buffer to 0x0 prior to initialization. + * Following buffers need initialization: + * 1. NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN + * 2. NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH + * 3. NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP + * 4. NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP + * bNonmapped [IN] + * Flag indicating that the virtual address is not to be promoted with this + * call. It is illegal to set this flag and not set bInitialize. + */ +typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY { + NV_DECLARE_ALIGNED(NvU64 gpuPhysAddr, 8); + NV_DECLARE_ALIGNED(NvU64 gpuVirtAddr, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 physAttr; + NvU16 bufferId; + NvU8 bInitialize; + NvU8 bNonmapped; +} NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY; + +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN 0U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM 1U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH 2U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB 3U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL 4U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB 5U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL 6U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL 7U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK 8U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT 9U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP 10U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP 11U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GLOBAL_PRIV_ACCESS_MAP 12U + +#define NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES 16U + +/* + * NV2080_CTRL_CMD_GPU_PROMOTE_CTX + * + * This command is used to promote a Virtual Context + * + * engineType + * Engine Virtual Context is for + * hClient + * Client Handle for hVirtMemory + * ChID + * Hw Channel -- Actually hw index for channel (deprecated) + * hChanClient + * The client handle for hObject + * hObject + * Passed in object handle for either a single channel or a channel group + * hVirtMemory + * Virtual Address handle to map Virtual Context to + * virtAddress + * Virtual Address to map Virtual Context to + * size + * size of the Virtual Context + * entryCount + * Number of valid entries in the promotion entry list + * promoteEntry + * List of context buffer entries to issue promotions for. + * + * When not using promoteEntry, only hVirtMemory or (virtAddress, size) should be + * specified, the code cases based on hVirtMemory(NULL vs non-NULL) so + * if both are specified, hVirtMemory has precedence. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED - The Class does not support version info retrieval + * NV_ERR_INVALID_DEVICE - The Class/Device is not yet ready to provide this info. + * NV_ERR_INVALID_ARGUMENT - Bad/Unknown Class ID specified. + */ +#define NV2080_CTRL_CMD_GPU_PROMOTE_CTX (0x2080012bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_MESSAGE_ID (0x2BU) + +typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS { + NvU32 engineType; + NvHandle hClient; + NvU32 ChID; + NvHandle hChanClient; + NvHandle hObject; + NvHandle hVirtMemory; + NV_DECLARE_ALIGNED(NvU64 virtAddress, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 entryCount; + // C form: NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES]; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES], 8); +} NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS; +typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *PNV2080_CTRL_GPU_PROMOTE_CTX_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_EVICT_CTX + * + * This command is used to evict a Virtual Context + * + * engineType + * Engine Virtual Context is for + * hClient + * Client Handle + * ChID + * Hw Channel -- Actually hw index for channel (deprecated) + * hChanClient + * Client handle for hObject + * hObject + * Passed in object handle for either a single channel or a channel group + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED - The Class does not support version info retrieval + * NV_ERR_INVALID_DEVICE - The Class/Device is not yet ready to provide this info. + * NV_ERR_INVALID_ARGUMENT - Bad/Unknown Class ID specified. + */ +#define NV2080_CTRL_CMD_GPU_EVICT_CTX (0x2080012cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_EVICT_CTX_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_EVICT_CTX_PARAMS_MESSAGE_ID (0x2CU) + +typedef struct NV2080_CTRL_GPU_EVICT_CTX_PARAMS { + NvU32 engineType; + NvHandle hClient; + NvU32 ChID; + NvHandle hChanClient; + NvHandle hObject; +} NV2080_CTRL_GPU_EVICT_CTX_PARAMS; +typedef struct NV2080_CTRL_GPU_EVICT_CTX_PARAMS *PNV2080_CTRL_GPU_EVICT_CTX_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_INITIALIZE_CTX + * + * This command is used to initialize a Virtual Context. The ctx buffer must be + * cleared (zerod) by the caller prior to invoking this method. + * + * engineType + * Engine Virtual Context is for + * hClient + * Client Handle for the hVirtMemory + * ChID + * Hw channel -- Actually channel index (deprecated) + * hChanClient + * The client handle for hObject + * hObject + * Passed in object handle for either a single channel or a channel group + * hVirtMemory + * Virtual Address where to map Virtual Context to + * physAddress + * Physical offset in FB to use as Virtual Context + * physAttr + * Physical memory attributes + * hDmaHandle + * Dma Handle when using discontiguous context buffers + * index + * Start offset in Virtual DMA Context + * size + * Size of the Virtual Context + * + * Only hVirtMemory or size should be specified, the code cases based on hVirtMemory + * (NULL vs non-NULL) so if both are specified, hVirtMemory has precedence. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED - The Class does not support version info retrieval + * NV_ERR_INVALID_DEVICE - The Class/Device is not yet ready to provide this info. + * NV_ERR_INVALID_ARGUMENT - Bad/Unknown Class ID specified. + */ +#define NV2080_CTRL_CMD_GPU_INITIALIZE_CTX (0x2080012dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS_MESSAGE_ID (0x2DU) + +typedef struct NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS { + NvU32 engineType; + NvHandle hClient; + NvU32 ChID; + NvHandle hChanClient; + NvHandle hObject; + NvHandle hVirtMemory; + NV_DECLARE_ALIGNED(NvU64 physAddress, 8); + NvU32 physAttr; + NvHandle hDmaHandle; + NvU32 index; + NV_DECLARE_ALIGNED(NvU64 size, 8); +} NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS; +typedef struct NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS *PNV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS; + +#define NV2080_CTRL_GPU_INITIALIZE_CTX_APERTURE 1:0 +#define NV2080_CTRL_GPU_INITIALIZE_CTX_APERTURE_VIDMEM (0x00000000U) +#define NV2080_CTRL_GPU_INITIALIZE_CTX_APERTURE_COH_SYS (0x00000001U) +#define NV2080_CTRL_GPU_INITIALIZE_CTX_APERTURE_NCOH_SYS (0x00000002U) + +#define NV2080_CTRL_GPU_INITIALIZE_CTX_GPU_CACHEABLE 2:2 +#define NV2080_CTRL_GPU_INITIALIZE_CTX_GPU_CACHEABLE_YES (0x00000000U) +#define NV2080_CTRL_GPU_INITIALIZE_CTX_GPU_CACHEABLE_NO (0x00000001U) + +/* + * NV2080_CTRL_GPU_INITIALIZE_CTX_PRESERVE_CTX - Tells RM Whether this Ctx buffer needs to + * do a full initialization (Load the golden image). When a context is promoted on a different + * channel than it was originally inited, the client can use this flag to tell RM + * that this is an already inited Context. In such cases RM will update the internal state + * to update the context address and state variables. + */ + +#define NV2080_CTRL_GPU_INITIALIZE_CTX_PRESERVE_CTX 3:3 +#define NV2080_CTRL_GPU_INITIALIZE_CTX_PRESERVE_CTX_NO (0x00000000U) +#define NV2080_CTRL_GPU_INITIALIZE_CTX_PRESERVE_CTX_YES (0x00000001U) + +/* + * NV2080_CTRL_CMD_CPU_QUERY_ECC_INTR + * Queries the top level ECC PMC PRI register + * TODO remove these parameters, tracked in bug #1975721 + */ +#define NV2080_CTRL_CMD_GPU_QUERY_ECC_INTR (0x2080012eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0x2E" */ + +typedef struct NV2080_CTRL_GPU_QUERY_ECC_INTR_PARAMS { + NvU32 eccIntrStatus; +} NV2080_CTRL_GPU_QUERY_ECC_INTR_PARAMS; + +/** + * NV2080_CTRL_CMD_GPU_QUERY_ECC_STATUS + * + * This command is used to query the ECC status of a GPU by a subdevice + * handle. Please see the NV2080_CTRL_GPU_QUERY_ECC_UNIT_STATUS + * data structure description below for details on the data reported + * per hardware unit. + * + * units + * Array of structures used to describe per-unit state + * + * flags + * See interface flag definitions below. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + + + +#define NV2080_CTRL_CMD_GPU_QUERY_ECC_STATUS (0x2080012fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS_MESSAGE_ID" */ + + +#define NV2080_CTRL_GPU_ECC_UNIT_GSP (0x0000001DU) + + +#define NV2080_CTRL_GPU_ECC_UNIT_COUNT (0x00000024U) + + + +// Deprecated do not use +#define NV2080_CTRL_GPU_QUERY_ECC_STATUS_FLAGS_TYPE 0:0 +#define NV2080_CTRL_GPU_QUERY_ECC_STATUS_FLAGS_TYPE_FILTERED (0x00000000U) +#define NV2080_CTRL_GPU_QUERY_ECC_STATUS_FLAGS_TYPE_RAW (0x00000001U) + +#define NV2080_CTRL_GPU_QUERY_ECC_STATUS_UNC_ERR_FALSE 0U +#define NV2080_CTRL_GPU_QUERY_ECC_STATUS_UNC_ERR_TRUE 1U +#define NV2080_CTRL_GPU_QUERY_ECC_STATUS_UNC_ERR_INDETERMINATE 2U + +/* + * NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS + * + * This structure represents the exception status of a class of per-unit + * exceptions + * + * count + * number of exceptions that have occurred since boot + */ +typedef struct NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS { + NV_DECLARE_ALIGNED(NvU64 count, 8); +} NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS; + +/* + * NV2080_CTRL_GPU_QUERY_ECC_UNIT_STATUS + * + * This structure represents the per-unit ECC exception status + * + * enabled + * ECC enabled yes/no for this unit + * scrubComplete + * Scrub has completed yes/no. A scrub is performed for some units to ensure + * the checkbits are consistent with the protected data. + * supported + * Whether HW supports ECC in this unit for this GPU + * dbe + * Double bit error (DBE) status. The value returned reflects a counter + * that is monotonic, but can be reset by clients. + * dbeNonResettable (deprecated do not use) + * Double bit error (DBE) status, not client resettable. + * sbe + * Single bit error (SBE) status. The value returned reflects a counter + * that is monotonic, but can be reset by clients. + * sbeNonResettable (deprecated do not use) + * Single bit error (SBE) status, not client resettable. + * + */ +typedef struct NV2080_CTRL_GPU_QUERY_ECC_UNIT_STATUS { + NvBool enabled; + NvBool scrubComplete; + NvBool supported; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS dbe, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS dbeNonResettable, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS sbe, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS sbeNonResettable, 8); +} NV2080_CTRL_GPU_QUERY_ECC_UNIT_STATUS; + +/* + * NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS + * + * This structure returns ECC exception status and GPU Fatal Poison for all units + * + * units + * This structure represents ECC exception status for all Units. + * bFatalPoisonError + * Whether GPU Fatal poison error occurred in this GPU. This will be set for Ampere_and_later + * uncorrectableError + * Indicates whether any uncorrectable GR ECC errors have occurred. When + * SMC is enabled, uncorrectableError is only valid when the client is + * subscribed to a partition. Check QUERY_ECC_STATUS_UNC_ERR_* + * flags + * Flags passed by caller. Refer NV2080_CTRL_GPU_QUERY_ECC_STATUS_FLAGS_TYPE_* for details. + * grRouteInfo + * SMC partition information. This input is only valid when SMC is + * enabled on Ampere_and_later. + * + */ +#define NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS_MESSAGE_ID (0x2FU) + +typedef struct NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_QUERY_ECC_UNIT_STATUS units[NV2080_CTRL_GPU_ECC_UNIT_COUNT], 8); + NvBool bFatalPoisonError; + NvU8 uncorrectableError; + NvU32 flags; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_SET_COMPUTE_MODE_RULES + * + * This command sets the compute mode rules for the associated subdevice. The + * default mode is equivalent to NV2080_CTRL_GPU_COMPUTE_MODE_RULES_NONE. This + * command is available to clients with administrator privileges only. An + * attempt to use this command by a client without administrator privileged + * results in the return of an NV_ERR_INSUFFICIENT_PERMISSIONS status. + * + * rules + * This parameter is used to specify the rules that govern the GPU with + * respect to NV50_COMPUTE objects. Legal values for this parameter include: + * + * NV2080_CTRL_GPU_COMPUTE_MODE_RULES_NONE + * This mode indicate that no special restrictions apply to the + * allocation of NV50_COMPUTE objects. + * + * NV2080_CTRL_GPU_COMPUTE_MODE_RULES_EXCLUSIVE_COMPUTE + * This mode means that only one instance of NV50_COMPUTE will be + * allowed at a time. This restriction is enforced at each subsequent + * NV50_COMPUTE allocation attempt. Setting this mode will not affect + * any existing compute programs that may be running. For example, + * if this mode is set while three compute programs are running, then + * all of those programs will be allowed to continue running. However, + * until they all finish running, no new NV50_COMPUTE objects may be + * allocated. User-mode clients should treat this as restricting access + * to a NV50_COMPUTE object to a single thread within a process. + * + * NV2080_CTRL_GPU_COMPUTE_MODE_RULES_COMPUTE_PROHIBITED + * This mode means that that GPU is not ever allowed to instantiate an + * NV50_COMPUTE object, and thus cannot run any new compute programs. + * This restriction is enforced at each subsequent NV50_COMPUTE object + * allocation attempt. Setting this mode will not affect any existing + * compute programs that may be running. For example, if this mode is + * set while three compute programs are running, then all of those + * programs will be allowed to continue running. However, no new + * NV50_COMPUTE objects may be allocated. + * + * + * NV2080_CTRL_GPU_COMPUTE_MODE_EXCLUSIVE_COMPUTE_PROCESS + * This mode is identical to EXCLUSIVE_COMPUTE, where only one instance + * of NV50_COMPUTE will be allowed at a time. It is separate from + * EXCLUSIVE_COMPUTE to allow user-mode clients to differentiate + * exclusive access to a compute object from a single thread of a + * process from exclusive access to a compute object from all threads + * of a process. User-mode clients should not limit access to a + * NV50_COMPUTE object to a single thread when the GPU is set to + * EXCLUSIVE_COMPUTE_PROCESS. + * + * An invalid rules parameter value results in the return of an + * NV_ERR_INVALID_ARGUMENT status. + * + * flags + * Reserved. Caller should set this field to zero. + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT (if an invalid rule number is provided) + * NV_ERR_INSUFFICIENT_PERMISSIONS (if the user is not the Administrator or superuser) + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_SET_COMPUTE_MODE_RULES (0x20800130U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_COMPUTE_MODE_RULES_PARAMS_MESSAGE_ID" */ + +/* valid rules parameter values */ +#define NV2080_CTRL_GPU_COMPUTE_MODE_RULES_NONE (0x00000000U) +#define NV2080_CTRL_GPU_COMPUTE_MODE_RULES_EXCLUSIVE_COMPUTE (0x00000001U) +#define NV2080_CTRL_GPU_COMPUTE_MODE_RULES_COMPUTE_PROHIBITED (0x00000002U) +#define NV2080_CTRL_GPU_COMPUTE_MODE_RULES_EXCLUSIVE_COMPUTE_PROCESS (0x00000003U) + +#define NV2080_CTRL_GPU_SET_COMPUTE_MODE_RULES_PARAMS_MESSAGE_ID (0x30U) + +typedef struct NV2080_CTRL_GPU_SET_COMPUTE_MODE_RULES_PARAMS { + NvU32 rules; + NvU32 flags; +} NV2080_CTRL_GPU_SET_COMPUTE_MODE_RULES_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_QUERY_COMPUTE_MODE_RULES + * + * This command queries the compute mode rules for the associated subdevice. + * Please see the NV2080_CTRL_CMD_GPU_SET_COMPUTE_MODE_RULES command, above, for + * details as to what the rules mean. + * + * rules + * Specifies the rules that govern the GPU, with respect to NV50_COMPUTE + * objects. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_QUERY_COMPUTE_MODE_RULES (0x20800131U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_QUERY_COMPUTE_MODE_RULES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_QUERY_COMPUTE_MODE_RULES_PARAMS_MESSAGE_ID (0x31U) + +typedef struct NV2080_CTRL_GPU_QUERY_COMPUTE_MODE_RULES_PARAMS { + NvU32 rules; +} NV2080_CTRL_GPU_QUERY_COMPUTE_MODE_RULES_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_QUERY_ECC_CONFIGURATION + * + * This command returns the current ECC configuration setting for + * a GPU given its subdevice handle. The value returned is + * the current ECC setting for the GPU stored in non-volatile + * memory on the board. + * + * currentConfiguration + * The current ECC configuration setting. + * + * defaultConfiguration + * The factory default ECC configuration setting. + * + * Please see the NV2080_CTRL_CMD_GPU_QUERY_ECC_STATUS command if + * you wish to determine if ECC is currently enabled. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_GPU_QUERY_ECC_CONFIGURATION (0x20800133U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_QUERY_ECC_CONFIGURATION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_ECC_CONFIGURATION_DISABLED (0x00000000U) +#define NV2080_CTRL_GPU_ECC_CONFIGURATION_ENABLED (0x00000001U) + +#define NV2080_CTRL_GPU_QUERY_ECC_CONFIGURATION_PARAMS_MESSAGE_ID (0x33U) + +typedef struct NV2080_CTRL_GPU_QUERY_ECC_CONFIGURATION_PARAMS { + NvU32 currentConfiguration; + NvU32 defaultConfiguration; +} NV2080_CTRL_GPU_QUERY_ECC_CONFIGURATION_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_SET_ECC_CONFIGURATION + * + * This command changes the ECC configuration setting for a GPU + * given its subdevice handle. The value specified is + * stored in non-volatile memory on the board and will take + * effect with the next GPU reset + * + * newConfiguration + * The new configuration setting to take effect with + * the next GPU reset. + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_SET_ECC_CONFIGURATION (0x20800134U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_ECC_CONFIGURATION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_ECC_CONFIGURATION_DISABLE (0x00000000U) +#define NV2080_CTRL_GPU_ECC_CONFIGURATION_ENABLE (0x00000001U) + +#define NV2080_CTRL_GPU_SET_ECC_CONFIGURATION_PARAMS_MESSAGE_ID (0x34U) + +typedef struct NV2080_CTRL_GPU_SET_ECC_CONFIGURATION_PARAMS { + NvU32 newConfiguration; +} NV2080_CTRL_GPU_SET_ECC_CONFIGURATION_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_RESET_ECC_ERROR_STATUS + * + * This command resets volatile and/or persistent ECC error + * status information for a GPU given its subdevice + * handle. + * + * statuses + * The ECC error statuses (the current, volatile + * and/or the persistent error counter(s)) to + * be reset by the command. + * flags + * FORCE_PURGE + * Forcibly clean all the ECC InfoROM state if this flag is set + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_RESET_ECC_ERROR_STATUS (0x20800136U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_ECC_ERROR_STATUS_NONE (0x00000000U) +#define NV2080_CTRL_GPU_ECC_ERROR_STATUS_VOLATILE (0x00000001U) +#define NV2080_CTRL_GPU_ECC_ERROR_STATUS_AGGREGATE (0x00000002U) + +#define NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_FLAGS_FORCE_PURGE 0:0 +#define NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_FLAGS_FORCE_PURGE_FALSE 0U +#define NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_FLAGS_FORCE_PURGE_TRUE 1U + +#define NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_PARAMS_MESSAGE_ID (0x36U) + +typedef struct NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_PARAMS { + NvU32 statuses; + NvU8 flags; +} NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_FERMI_GPC_INFO + * + * This command returns a mask of enabled GPCs for the associated GPU. + * + * gpcMask + * This parameter returns a mask of enabled GPCs. Each GPC has an ID + * that's equivalent to the corresponding bit position in the mask. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_GET_FERMI_GPC_INFO (0x20800137U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS_MESSAGE_ID (0x37U) + +typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS { + NvU32 gpcMask; +} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_FERMI_TPC_INFO + * + * This command returns a mask of enabled TPCs for a specified GPC. + * + * gpcId + * This parameter specifies the GPC for which TPC information is + * to be retrieved. If the GPC with this ID is not enabled this command + * will return an tpcMask value of zero. + * + * tpcMask + * This parameter returns a mask of enabled TPCs for the specified GPC. + * Each TPC has an ID that's equivalent to the corresponding bit + * position in the mask. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_GET_FERMI_TPC_INFO (0x20800138U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS_MESSAGE_ID (0x38U) + +typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS { + NvU32 gpcId; + NvU32 tpcMask; +} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_FERMI_ZCULL_INFO + * + * This command returns a mask of enabled ZCULLs for a specified GPC. + * + * gpcId + * This parameter specifies the GPC for which ZCULL information is to be + * retrieved. If the GPC with this ID is not enabled this command will + * return an zcullMask value of zero. + * + * zcullMask + * This parameter returns a mask of enabled ZCULLs for the specified GPC. + * Each ZCULL has an ID that's equivalent to the corresponding bit + * position in the mask. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_PARAM_STRUCT + * + * Deprecated: Please use GR based control call + * NV2080_CTRL_CMD_GR_GET_ZCULL_MASK + * + */ +#define NV2080_CTRL_CMD_GPU_GET_FERMI_ZCULL_INFO (0x20800139U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS_MESSAGE_ID (0x39U) + +typedef struct NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS { + NvU32 gpcId; + NvU32 zcullMask; +} NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_GET_OEM_BOARD_INFO + * + * If an InfoROM with a valid OEM Board Object is present, this + * command returns relevant information from the object to the + * caller. + * + * The following data are currently reported: + * + * buildDate + * The board's build date (8 digit BCD in format YYYYMMDD). + * + * marketingName + * The board's marketing name (24 ASCII letters e.g. "Quadro FX5800"). + * + * boardSerialNumber + * The board's serial number. + * + * memoryManufacturer + * The board's memory manufacturer ('S'amsung/'H'ynix/'I'nfineon). + * + * memoryDateCode + * The board's memory datecode (LSB justified ASCII field with 0x00 + * denoting empty space). + * + * productPartNumber + * The board's 900 product part number (LSB justified ASCII field with 0x00 + * denoting empty space e.g. "900-21228-0208-200"). + * + * boardRevision + * The board's revision (for e.g. A02, B01) + * + * boardType + * The board's type ('E'ngineering/'P'roduction) + * + * board699PartNumber + * The board's 699 product part number (LSB justified ASCII field with 0x00 + * denoting empty space e.g. "699-21228-0208-200"). + * + * board965PartNumber + * The board's 965 product part number (LSB justified ASCII field with 0x00 + * denoting empty space e.g. "965-21228-0208-200"). + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_OEM_BOARD_INFO (0x2080013fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_GPU_MAX_MARKETING_NAME_LENGTH (0x00000018U) +#define NV2080_GPU_MAX_SERIAL_NUMBER_LENGTH (0x00000010U) +#define NV2080_GPU_MAX_MEMORY_PART_ID_LENGTH (0x00000014U) +#define NV2080_GPU_MAX_MEMORY_DATE_CODE_LENGTH (0x00000006U) +#define NV2080_GPU_MAX_PRODUCT_PART_NUMBER_LENGTH (0x00000014U) + +#define NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS_MESSAGE_ID (0x3FU) + +typedef struct NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS { + NvU32 buildDate; + NvU8 marketingName[NV2080_GPU_MAX_MARKETING_NAME_LENGTH]; + NvU8 serialNumber[NV2080_GPU_MAX_SERIAL_NUMBER_LENGTH]; + NvU8 memoryManufacturer; + NvU8 memoryPartID[NV2080_GPU_MAX_MEMORY_PART_ID_LENGTH]; + NvU8 memoryDateCode[NV2080_GPU_MAX_MEMORY_DATE_CODE_LENGTH]; + NvU8 productPartNumber[NV2080_GPU_MAX_PRODUCT_PART_NUMBER_LENGTH]; + NvU8 boardRevision[3]; + NvU8 boardType; + NvU8 board699PartNumber[NV2080_GPU_MAX_PRODUCT_PART_NUMBER_LENGTH]; + NvU8 board965PartNumber[NV2080_GPU_MAX_PRODUCT_PART_NUMBER_LENGTH]; +} NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_GET_ID + * + * This command returns the gpuId of the associated object. + * + * gpuId + * This field return the gpuId. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_ID (0x20800142U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ID_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_ID_PARAMS_MESSAGE_ID (0x42U) + +typedef struct NV2080_CTRL_GPU_GET_ID_PARAMS { + NvU32 gpuId; +} NV2080_CTRL_GPU_GET_ID_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_SET_GPU_DEBUG_MODE + * + * This command is used to enable or disable GPU debug mode. While this mode + * is enabled, some client RM calls that can potentially timeout return + * NV_ERR_BUSY_RETRY, signalling the client to try again once GPU + * debug mode is disabled. + * + * mode + * This parameter specifies whether GPU debug mode is to be enabled or + * disabled. Possible values are: + * + * NV2080_CTRL_GPU_DEBUG_MODE_ENABLED + * NV2080_CTRL_GPU_DEBUG_MODE_DISABLED + * + * Possible return status values are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV2080_CTRL_CMD_GPU_SET_GPU_DEBUG_MODE (0x20800143U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_GPU_DEBUG_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_SET_GPU_DEBUG_MODE_PARAMS_MESSAGE_ID (0x43U) + +typedef struct NV2080_CTRL_GPU_SET_GPU_DEBUG_MODE_PARAMS { + NvU32 mode; +} NV2080_CTRL_GPU_SET_GPU_DEBUG_MODE_PARAMS; + +#define NV2080_CTRL_GPU_DEBUG_MODE_ENABLED (0x00000001U) +#define NV2080_CTRL_GPU_DEBUG_MODE_DISABLED (0x00000002U) + +/* + * NV2080_CTRL_CMD_GPU_GET_GPU_DEBUG_MODE + * + * This command is used to query whether debug mode is enabled on the current + * GPU. Please see the description of NV2080_CTRL_CMD_GPU_SET_GPU_DEBUG_MODE + * for more details on GPU debug mode. + * + * currentMode + * This parameter returns the state of GPU debug mode for the current GPU. + * Possible values are: + * + * NV2080_CTRL_GPU_DEBUG_MODE_ENABLED + * NV2080_CTRL_GPU_DEBUG_MODE_DISABLED + * + * Possible return status values are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV2080_CTRL_CMD_GPU_GET_GPU_DEBUG_MODE (0x20800144U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_GPU_DEBUG_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_GPU_DEBUG_MODE_PARAMS_MESSAGE_ID (0x44U) + +typedef struct NV2080_CTRL_GPU_GET_GPU_DEBUG_MODE_PARAMS { + NvU32 currentMode; +} NV2080_CTRL_GPU_GET_GPU_DEBUG_MODE_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_GET_ENGINE_PARTNERLIST + * + * Returns a list of engines that can partner or coexist + * when using the target channel or partnership class. + * This list may include all engines (pre-Kepler), or as few + * as 1 engine (Kepler and beyond). + * + * engineType + * This field specifies the target engine type. + * See cl2080.h for a list of valid engines. + * + * partnershipClassId + * This field specifies the target channel + * or partnership class ID. + * An example of such a class is GF100_CHANNEL_GPFIFO. + * + * runqueue + * This field is an index which indicates the runqueue to + * return the list of supported engines for. This is the + * same field as what NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE + * specifies. This is only valid for TSG. + * + * numPartners; + * This field returns the number of + * valid entries in the partnersList array + * + * partnerList + * This field is an array containing the list of supported + * partner engines types, in no particular order, and + * may even be empty (numPartners = 0). + * See cl2080.h for a list of possible engines. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ + +#define NV2080_CTRL_CMD_GPU_GET_ENGINE_PARTNERLIST (0x20800147U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS_MESSAGE_ID" */ + +/* this macro specifies the maximum number of partner entries */ +#define NV2080_CTRL_GPU_MAX_ENGINE_PARTNERS (0x00000020U) + +#define NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS_MESSAGE_ID (0x47U) + +typedef struct NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS { + NvU32 engineType; + NvU32 partnershipClassId; + NvU32 runqueue; + NvU32 numPartners; + // C form: NvU32 partnerList[NV2080_CTRL_GPU_MAX_ENGINE_PARTNERS]; + NvU32 partnerList[NV2080_CTRL_GPU_MAX_ENGINE_PARTNERS]; +} NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_GET_GID_INFO + * + * This command returns the GPU ID (GID) string for the associated + * GPU. This value can be useful for GPU identification and security + * system validation. + * + * The GPU ID is a SHA-1 based 16 byte ID, formatted as a 32 character + * hexadecimal string as "GPU-%08x-%04x-%04x-%04x-%012x" (the + * canonical format of a UUID) + * + * The GPU IDs are generated using the ECID, PMC_BOOT_0, and + * PMC_BOOT_42 of the GPU as the hash message. + * + * index + * (Input) "Select which GID set to get." Or so the original documentation + * said. In reality, there is only one GID per GPU, and the implementation + * completely ignores this parameter. You can too. + * + * flags (Input) The _FORMAT* flags designate ascii or binary format. Binary + * format returns the raw bytes of either the 16-byte SHA-1 ID or the + * 32-byte SHA-256 ID. + * + * The _TYPE* flags needs to specify the _SHA1 type. + * + * length + * (Output) Actual GID length, in bytes. + * + * data[NV2080_BUS_MAX_GID_LENGTH] + * (Output) Result buffer: the GID itself, in a format that is determined by + * the "flags" field (described above). + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_GPU_GET_GID_INFO (0x2080014aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_GID_INFO_PARAMS_MESSAGE_ID" */ + +/* maximum possible number of bytes of GID information returned */ +#define NV2080_GPU_MAX_GID_LENGTH (0x000000100ULL) + +/* maximum possible number of bytes of GID information returned if given the BINARY and SHA1 flags */ +#define NV2080_GPU_MAX_SHA1_BINARY_GID_LENGTH (0x000000010ULL) + +#define NV2080_CTRL_GPU_GET_GID_INFO_PARAMS_MESSAGE_ID (0x4AU) + +typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS { + NvU32 index; + NvU32 flags; + NvU32 length; + NvU8 data[NV2080_GPU_MAX_GID_LENGTH]; +} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS; + +/* valid flags values */ +#define NV2080_GPU_CMD_GPU_GET_GID_FLAGS_FORMAT 1:0 +#define NV2080_GPU_CMD_GPU_GET_GID_FLAGS_FORMAT_ASCII (0x00000000U) +#define NV2080_GPU_CMD_GPU_GET_GID_FLAGS_FORMAT_BINARY (0x00000002U) + +#define NV2080_GPU_CMD_GPU_GET_GID_FLAGS_TYPE 2:2 +#define NV2080_GPU_CMD_GPU_GET_GID_FLAGS_TYPE_SHA1 (0x00000000U) + +/* + * NV2080_CTRL_CMD_GPU_GET_INFOROM_OBJECT_VERSION + * + * This command can be used by clients to retrieve the version of an + * InfoROM object. + * + * objectType + * This parameter specifies the name of the InfoROM object whose version + * should be queried. + * + * version + * This parameter returns the version of the InfoROM object specified by + * the objectType parameter. + * + * subversion + * This parameter returns the subversion of the InfoROM object specified + * by the objectType parameter. + * + * Possible return status values: + * NV_OK + * NV_ERR_STATE_IN_USE + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV2080_CTRL_CMD_GPU_GET_INFOROM_OBJECT_VERSION (0x2080014bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_INFOROM_OBJECT_VERSION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_INFOROM_OBJ_TYPE_LEN 3U + +#define NV2080_CTRL_GPU_GET_INFOROM_OBJECT_VERSION_PARAMS_MESSAGE_ID (0x4BU) + +typedef struct NV2080_CTRL_GPU_GET_INFOROM_OBJECT_VERSION_PARAMS { + char objectType[NV2080_CTRL_GPU_INFOROM_OBJ_TYPE_LEN]; + NvU8 version; + NvU8 subversion; +} NV2080_CTRL_GPU_GET_INFOROM_OBJECT_VERSION_PARAMS; + + +/* + * NV2080_CTRL_CMD_SET_GPU_OPTIMUS_INFO + * + * This command will specify that system is Optimus enabled. + * + * isOptimusEnabled + * Set NV_TRUE if system is Optimus enabled. + * + * Possible status return values are: + * NV_OK + */ +#define NV2080_CTRL_CMD_SET_GPU_OPTIMUS_INFO (0x2080014cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS_MESSAGE_ID (0x4CU) + +typedef struct NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS { + NvBool isOptimusEnabled; +} NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_IP_VERSION + * + * Will return the IP VERSION on the given engine for engines that support + * this capability. + * + * targetEngine + * This parameter specifies the target engine type to query for IP_VERSION. + * + * ipVersion + * This parameter returns the IP VERSION read from the unit's IP_VER + * register. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_GPU_GET_IP_VERSION (0x2080014dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS_MESSAGE_ID (0x4DU) + +typedef struct NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS { + NvU32 targetEngine; + NvU32 ipVersion; +} NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS; + +#define NV2080_CTRL_GPU_GET_IP_VERSION_DISPLAY (0x00000001U) +#define NV2080_CTRL_GPU_GET_IP_VERSION_HDACODEC (0x00000002U) +#define NV2080_CTRL_GPU_GET_IP_VERSION_PMGR (0x00000003U) +#define NV2080_CTRL_GPU_GET_IP_VERSION_PPWR_PMU (0x00000004U) +#define NV2080_CTRL_GPU_GET_IP_VERSION_DISP_FALCON (0x00000005U) + +/* + * NV2080_CTRL_CMD_GPU_ID_ILLUM_SUPPORT + * + * This command returns an indicator which reports if the specified Illumination control + * attribute is supported + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_GPU_ILLUM_ATTRIB_LOGO_BRIGHTNESS 0U +#define NV2080_CTRL_GPU_ILLUM_ATTRIB_SLI_BRIGHTNESS 1U +#define NV2080_CTRL_CMD_GPU_QUERY_ILLUM_SUPPORT (0x20800153U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_CMD_GPU_QUERY_ILLUM_SUPPORT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_GPU_QUERY_ILLUM_SUPPORT_PARAMS_MESSAGE_ID (0x53U) + +typedef struct NV2080_CTRL_CMD_GPU_QUERY_ILLUM_SUPPORT_PARAMS { + NvU32 attribute; + NvBool bSupported; +} NV2080_CTRL_CMD_GPU_QUERY_ILLUM_SUPPORT_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_ID_ILLUM + * + * This command returns the current value of the specified Illumination control attribute. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_ILLUM (0x20800154U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ILLUM_PARAMS_MESSAGE_ID" */ + +typedef struct NV2080_CTRL_CMD_GPU_ILLUM_PARAMS { + NvU32 attribute; + NvU32 value; +} NV2080_CTRL_CMD_GPU_ILLUM_PARAMS; + +#define NV2080_CTRL_GPU_GET_ILLUM_PARAMS_MESSAGE_ID (0x54U) + +typedef NV2080_CTRL_CMD_GPU_ILLUM_PARAMS NV2080_CTRL_GPU_GET_ILLUM_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_SET_ID_ILLUM + * + * This command sets a new valuefor the specified Illumination control attribute. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_SET_ILLUM (0x20800155U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_ILLUM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_SET_ILLUM_PARAMS_MESSAGE_ID (0x55U) + +typedef NV2080_CTRL_CMD_GPU_ILLUM_PARAMS NV2080_CTRL_GPU_SET_ILLUM_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_INFOROM_IMAGE_VERSION + * + * This command can be used by clients to retrieve the version of the entire + * InfoROM image. + * + * version + * This parameter returns the version of the InfoROM image as a NULL- + * terminated character string of the form "XXXX.XXXX.XX.XX" where each + * 'X' is an integer character. + * + * Possible status return values are: + * NVOS_STATUS_SUCCES + * NV_ERR_INSUFFICIENT_RESOURCES + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_DATA + */ +#define NV2080_CTRL_CMD_GPU_GET_INFOROM_IMAGE_VERSION (0x20800156U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_INFOROM_IMAGE_VERSION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_INFOROM_IMAGE_VERSION_LEN 16U + +#define NV2080_CTRL_GPU_GET_INFOROM_IMAGE_VERSION_PARAMS_MESSAGE_ID (0x56U) + +typedef struct NV2080_CTRL_GPU_GET_INFOROM_IMAGE_VERSION_PARAMS { + NvU8 version[NV2080_CTRL_GPU_INFOROM_IMAGE_VERSION_LEN]; +} NV2080_CTRL_GPU_GET_INFOROM_IMAGE_VERSION_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_QUERY_INFOROM_ECC_SUPPORT + * + * This command returns whether or not ECC is supported via the InfoROM. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_QUERY_INFOROM_ECC_SUPPORT (0x20800157U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0x57" */ + +/* + * NV2080_CTRL_GPU_PHYSICAL_BRIDGE_VERSION + * + * This structure contains information about a single physical bridge. + * + * fwVersion + * This field specifies Firmware Version of the bridge stored in + * bridge EEPROM. + * oemVersion + * This field specifies Oem Version of the firmware stored in + * bridge EEPROM. + * siliconRevision + * This field contains the silicon revision of the bridge hardware. + * It is set by the chip manufacturer. + * hwbcResourceType + * This field specifies the hardware broadcast resource type. + * Value denotes the kind of bridge - PLX or BR04 + * + */ + +typedef struct NV2080_CTRL_GPU_PHYSICAL_BRIDGE_VERSION_PARAMS { + NvU32 fwVersion; + NvU8 oemVersion; + NvU8 siliconRevision; + NvU8 hwbcResourceType; +} NV2080_CTRL_GPU_PHYSICAL_BRIDGE_VERSION_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_PHYSICAL_BRIDGE_VERSION_INFO + * + * This command returns physical bridge information in the system. + * Information consists of bridgeCount and a list of bridgeId's. + * The bridge Id's are used by NV2080_CTRL_CMD_GPU_GET_PHYSICAL_BRIDGE_VERSION + * to get firmware version, oem version and silicon revision info. + * + * bridgeCount + * This field specifies the number of physical brides present + * in the system. + * hPhysicalBridges + * This field specifies an array of size NV2080_CTRL_MAX_PHYSICAL_BRIDGE. + * In this array, the bridge Id's are stored. + * bridgeList + * This field specifies an array of size NV2080_CTRL_MAX_PHYSICAL_BRIDGE. + * In this array, the bridge version details are stored. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_PHYSICAL_BRIDGE_VERSION_INFO (0x2080015aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_PHYSICAL_BRIDGE_VERSION_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MAX_PHYSICAL_BRIDGE (100U) +#define NV2080_CTRL_GPU_GET_PHYSICAL_BRIDGE_VERSION_INFO_PARAMS_MESSAGE_ID (0x5AU) + +typedef struct NV2080_CTRL_GPU_GET_PHYSICAL_BRIDGE_VERSION_INFO_PARAMS { + NvU8 bridgeCount; + NvHandle hPhysicalBridges[NV2080_CTRL_MAX_PHYSICAL_BRIDGE]; + NV2080_CTRL_GPU_PHYSICAL_BRIDGE_VERSION_PARAMS bridgeList[NV2080_CTRL_MAX_PHYSICAL_BRIDGE]; +} NV2080_CTRL_GPU_GET_PHYSICAL_BRIDGE_VERSION_INFO_PARAMS; + +/* + * NV2080_CTRL_GPU_BRIDGE_VERSION + * + * This structure contains information about a single physical bridge. + * + * bus + * This field specifies the bus id of the bridge. + * device + * This field specifies the device id of the bridge. + * func + * This field specifies the function id of the bridge. + * oemVersion + * This field specifies Oem Version of the firmware stored in + * bridge EEPROM. + * siliconRevision + * This field contains the silicon revision of the bridge hardware. + * It is set by the chip manufacturer. + * hwbcResourceType + * This field specifies the hardware broadcast resource type. + * Value denotes the kind of bridge - PLX or BR04 + * domain + * This field specifies the respective domain of the PCI device. + * fwVersion + * This field specifies Firmware Version of the bridge stored in + * bridge EEPROM. + * + * If (fwVersion, oemVersion, siliconRevision) == 0, it would mean that RM + * was unable to fetch the value from the bridge device. + * + */ + +typedef struct NV2080_CTRL_GPU_BRIDGE_VERSION_PARAMS { + NvU8 bus; + NvU8 device; + NvU8 func; + NvU8 oemVersion; + NvU8 siliconRevision; + NvU8 hwbcResourceType; + NvU32 domain; + NvU32 fwVersion; +} NV2080_CTRL_GPU_BRIDGE_VERSION_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU + * + * This command returns information about all the upstream bridges of the GPU. + * Information consists of bridge firmware version and its bus topology. + * + * bridgeCount + * This field specifies the number of physical brides present + * in the system. + * physicalBridgeIds + * This field specifies an array of size NV2080_CTRL_MAX_PHYSICAL_BRIDGE. + * In this array, the bridge Ids are stored. + * bridgeList + * This field specifies an array of size NV2080_CTRL_MAX_PHYSICAL_BRIDGE. + * In this array, the bridge version details are stored. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU (0x2080015bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU_PARAMS_MESSAGE_ID (0x5BU) + +typedef struct NV2080_CTRL_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU_PARAMS { + NvU8 bridgeCount; + NvU32 physicalBridgeIds[NV2080_CTRL_MAX_PHYSICAL_BRIDGE]; + NV2080_CTRL_GPU_BRIDGE_VERSION_PARAMS bridgeList[NV2080_CTRL_MAX_PHYSICAL_BRIDGE]; +} NV2080_CTRL_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_QUERY_SCRUBBER_STATUS + * + * This command is used to query the status of the HW scrubber. If a scrub is + * in progress then the range which is being scrubbed is also reported back. + * + * scrubberStatus + * Reports the status of the scrubber unit - running/idle. + * + * remainingtimeMs + * If scrubbing is going on, reports the remaining time in milliseconds + * required to finish the scrub. + * + * scrubStartAddr + * This parameter reports the start address of the ongoing scrub if scrub + * is going on, otherwise reports the start addr of the last finished scrub + * + * scrubEndAddr + * This parameter reports the end address of the ongoing scrub if scrub + * is going on, otherwise reports the end addr of the last finished scrub. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_GPU_QUERY_SCRUBBER_STATUS (0x2080015fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_QUERY_SCRUBBER_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_QUERY_SCRUBBER_STATUS_PARAMS_MESSAGE_ID (0x5FU) + +typedef struct NV2080_CTRL_GPU_QUERY_SCRUBBER_STATUS_PARAMS { + NvU32 scrubberStatus; + NvU32 remainingTimeMs; + NV_DECLARE_ALIGNED(NvU64 scrubStartAddr, 8); + NV_DECLARE_ALIGNED(NvU64 scrubEndAddr, 8); +} NV2080_CTRL_GPU_QUERY_SCRUBBER_STATUS_PARAMS; + +/* valid values for scrubber status */ +#define NV2080_CTRL_GPU_QUERY_SCRUBBER_STATUS_SCRUBBER_RUNNING (0x00000000U) +#define NV2080_CTRL_GPU_QUERY_SCRUBBER_STATUS_SCRUBBER_IDLE (0x00000001U) + +/* + * NV2080_CTRL_CMD_GPU_GET_VPR_CAPS + * + * This command is used to query the VPR capability information for a + * GPU. If VPR is supported, the parameters are filled accordingly. + * The addresses returned are all physical addresses. + * + * minStartAddr + * Returns the minimum start address that can be possible for VPR. + * + * maxEndAddr + * Returns the maximum end address that can be possible for VPR. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_GPU_GET_VPR_CAPS (0x20800160U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_VPR_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_VPR_CAPS_PARAMS_MESSAGE_ID (0x60U) + +typedef struct NV2080_CTRL_GPU_GET_VPR_CAPS_PARAMS { + NV_DECLARE_ALIGNED(NvU64 minStartAddr, 8); + NV_DECLARE_ALIGNED(NvU64 maxEndAddr, 8); +} NV2080_CTRL_GPU_GET_VPR_CAPS_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_HANDLE_GPU_SR + * + * Communicates to RM to handle GPU Surprise Removal + * Called from client when it receives SR IRP from OS + * Possible status values returned are: + * NVOS_STATUS_SUCCESS + */ +#define NV2080_CTRL_CMD_GPU_HANDLE_GPU_SR (0x20800167U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0x67" */ + + +/* + * NV2080_CTRL_CMD_GPU_GET_PES_INFO + * + * This command provides the PES count and mask of enabled PES for a + * specified GPC. It also returns the TPC to PES mapping information + * for a given GPU. + * + * gpcId[IN] + * This parameter specifies the GPC for which PES information is to be + * retrieved. If the GPC with this ID is not enabled this command will + * return an activePesMask of zero + * + * numPesInGpc[OUT] + * This parameter returns the number of PES in this GPC. + * + * activePesMask[OUT] + * This parameter returns a mask of enabled PESs for the specified GPC. + * Each PES has an ID that is equivalent to the corresponding bit position + * in the mask. + * + * maxTpcPerGpcCount[OUT] + * This parameter returns the max number of TPCs in a GPC. + * + * tpcToPesMap[OUT] + * This array stores the TPC to PES mappings. The value at tpcToPesMap[tpcIndex] + * is the index of the PES it belongs to. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_PES_INFO (0x20800168U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_PES_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_GPU_GET_PES_INFO_MAX_TPC_PER_GPC_COUNT 10U + +#define NV2080_CTRL_GPU_GET_PES_INFO_PARAMS_MESSAGE_ID (0x68U) + +typedef struct NV2080_CTRL_GPU_GET_PES_INFO_PARAMS { + NvU32 gpcId; + NvU32 numPesInGpc; + NvU32 activePesMask; + NvU32 maxTpcPerGpcCount; + NvU32 tpcToPesMap[NV2080_CTRL_CMD_GPU_GET_PES_INFO_MAX_TPC_PER_GPC_COUNT]; +} NV2080_CTRL_GPU_GET_PES_INFO_PARAMS; + +/* NV2080_CTRL_CMD_GPU_GET_OEM_INFO + * + * If an InfoROM with a valid OEM Object is present, this + * command returns relevant information from the object to the + * caller. + * + * oemInfo + * This array stores information specifically for OEM use + * (e.g. "their own serial number", "lot codes", etc) + * "The byte definition is up to the OEM" + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_GPU_GET_OEM_INFO (0x20800169U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_GPU_MAX_OEM_INFO_LENGTH (0x000001F8U) + +#define NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS_MESSAGE_ID (0x69U) + +typedef struct NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS { + NvU8 oemInfo[NV2080_GPU_MAX_OEM_INFO_LENGTH]; +} NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_VPR_INFO + * + * This command is used to query the VPR information for a GPU. + * The following VPR related information can be queried by selecting the queryType: + * 1. The current VPR range. + * 2. The max VPR range ever possible on this GPU. + * + * queryType [in] + * This input parameter is used to select the type of information to query. + * Possible values for this parameter are: + * 1. NV2080_CTRL_GPU_GET_VPR_INFO_QUERY_VPR_CAPS: Use this to query the + * max VPR range ever possible on this GPU. + * 2. NV2080_CTRL_GPU_GET_VPR_INFO_QUERY_CUR_VPR_RANGE: Use this to query + * the current VPR range on this GPU. + * + * bVprEnabled [out] + * For query type "NV2080_CTRL_GPU_GET_VPR_INFO_CUR_RANGE", this parameter + * returns if VPR is currently enabled or not. + * + * vprStartAddress [out] + * For NV2080_CTRL_GPU_GET_VPR_INFO_CAPS, it returns minimum allowed VPR start address. + * For NV2080_CTRL_GPU_GET_VPR_INFO_RANGE, it returns current VPR start address. + * + * vprEndAddress [out] + * For NV2080_CTRL_GPU_GET_VPR_INFO_CAPS, it returns maximum allowed VPR end address. + * For NV2080_CTRL_GPU_GET_VPR_INFO_RANGE, it returns current VPR end address. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_VPR_INFO (0x2080016bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_VPR_INFO_PARAMS_MESSAGE_ID" */ + + +typedef enum NV2080_CTRL_VPR_INFO_QUERY_TYPE { + NV2080_CTRL_GPU_GET_VPR_INFO_QUERY_VPR_CAPS = 0, + NV2080_CTRL_GPU_GET_VPR_INFO_QUERY_CUR_VPR_RANGE = 1, +} NV2080_CTRL_VPR_INFO_QUERY_TYPE; + +#define NV2080_CTRL_GPU_GET_VPR_INFO_PARAMS_MESSAGE_ID (0x6BU) + +typedef struct NV2080_CTRL_GPU_GET_VPR_INFO_PARAMS { + NV2080_CTRL_VPR_INFO_QUERY_TYPE queryType; + NvBool bIsVprEnabled; + NV_DECLARE_ALIGNED(NvU64 vprStartAddressInBytes, 8); + NV_DECLARE_ALIGNED(NvU64 vprEndAddressInBytes, 8); +} NV2080_CTRL_GPU_GET_VPR_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_ENCODER_CAPACITY + * + * This command is used to query the encoder capacity of the GPU. + * + * queryType [in] + * This input parameter is used to select the type of information to query. + * Possible values for this parameter are: + * 1. NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_H264: Use this to query the + * H.264 encoding capacity on this GPU. + * 2. NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_HEVC: Use this to query the + * H.265/HEVC encoding capacity on this GPU. + * 3. NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_AV1: Use this to query the + * AV1 encoding capacity on this GPU. + * + * encoderCapacity [out] + * Encoder capacity value from 0 to 100. Value of 0x00 indicates encoder performance + * may be minimal for this GPU and software should fall back to CPU-based encode. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_GPU_GET_ENCODER_CAPACITY (0x2080016cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_PARAMS_MESSAGE_ID" */ + +typedef enum NV2080_CTRL_ENCODER_CAPACITY_QUERY_TYPE { + NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_H264 = 0, + NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_HEVC = 1, + NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_AV1 = 2, +} NV2080_CTRL_ENCODER_CAPACITY_QUERY_TYPE; + +#define NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_PARAMS_MESSAGE_ID (0x6CU) + +typedef struct NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_PARAMS { + NV2080_CTRL_ENCODER_CAPACITY_QUERY_TYPE queryType; + NvU32 encoderCapacity; +} NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_PARAMS; + +/* + * NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS + * + * This command is used to retrieve the GPU's count of encoder sessions, + * trailing average FPS and encode latency over all active sessions. + * + * encoderSessionCount + * This field specifies count of all active encoder sessions on this GPU. + * + * averageEncodeFps + * This field specifies the average encode FPS for this GPU. + * + * averageEncodeLatency + * This field specifies the average encode latency in microseconds for this GPU. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS (0x2080016dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS_PARAMS_MESSAGE_ID (0x6DU) + +typedef struct NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS_PARAMS { + NvU32 encoderSessionCount; + NvU32 averageEncodeFps; + NvU32 averageEncodeLatency; +} NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS_PARAMS; + +#define NV2080_CTRL_GPU_NVENC_SESSION_INFO_MAX_COPYOUT_ENTRIES 0x200U // 512 entries. + +/* + * NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO + * + * This command returns NVENC software sessions information for the associate GPU. + * Request to retrieve session information use a list of one or more + * NV2080_CTRL_NVENC_SW_SESSION_INFO structures. + * + * sessionInfoTblEntry + * This field specifies the number of entries on the that are filled inside + * sessionInfoTbl. Max value of this field once returned from RM would be + * NV2080_CTRL_GPU_NVENC_SESSION_INFO_MAX_COPYOUT_ENTRIES, + * + * sessionInfoTbl + * This field specifies a pointer in the caller's address space + * to the buffer into which the NVENC session information is to be returned. + * When buffer is NULL, RM assume that client is querying sessions count value + * and return the current encoder session counts in sessionInfoTblEntry field. + * To get actual buffer data, client should allocate sessionInfoTbl of size + * NV2080_CTRL_GPU_NVENC_SESSION_INFO_MAX_COPYOUT_ENTRIES multiplied by the + * size of the NV2080_CTRL_NVENC_SW_SESSION_INFO structure. RM will fill the + * current session data in sessionInfoTbl buffer and then update the + * sessionInfoTblEntry to reflect current session count value. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NO_MEMORY + * NV_ERR_INVALID_LOCK_STATE + * NV_ERR_INVALID_ARGUMENT + */ + +typedef struct NV2080_CTRL_NVENC_SW_SESSION_INFO { + NvU32 processId; + NvU32 subProcessId; + NvU32 sessionId; + NvU32 codecType; + NvU32 hResolution; + NvU32 vResolution; + NvU32 averageEncodeFps; + NvU32 averageEncodeLatency; +} NV2080_CTRL_NVENC_SW_SESSION_INFO; + +#define NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_PARAMS_MESSAGE_ID (0x6EU) + +typedef struct NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_PARAMS { + NvU32 sessionInfoTblEntry; + NV_DECLARE_ALIGNED(NvP64 sessionInfoTbl, 8); +} NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_PARAMS; + +#define NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO (0x2080016eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_GPU_SET_FABRIC_BASE_ADDR + * + * The command sets fabric base address which represents top N bits of a + * peer memory address. These N bits will be used to index NvSwitch routing + * tables to forward peer memory accesses to associated GPUs. + * + * The command is available to clients with administrator privileges only. + * An attempt to use this command by a client without administrator privileged + * results in the return of NV_ERR_INSUFFICIENT_PERMISSIONS status. + * + * The command allows to set fabricAddr once in a lifetime of a GPU. A GPU must + * be destroyed in order to re-assign a different fabricAddr. An attempt to + * re-assign address without destroying a GPU would result in the return of + * NV_ERR_STATE_IN_USE status. + * + * fabricBaseAddr[IN] + * - An address with at least 32GB alignment. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_NOT_SUPPORTED + * NV_ERR_STATE_IN_USE + */ + +#define NV2080_CTRL_GPU_SET_FABRIC_BASE_ADDR_PARAMS_MESSAGE_ID (0x6FU) + +typedef struct NV2080_CTRL_GPU_SET_FABRIC_BASE_ADDR_PARAMS { + NV_DECLARE_ALIGNED(NvU64 fabricBaseAddr, 8); +} NV2080_CTRL_GPU_SET_FABRIC_BASE_ADDR_PARAMS; + +#define NV2080_CTRL_CMD_GPU_SET_FABRIC_BASE_ADDR (0x2080016fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_FABRIC_BASE_ADDR_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_GPU_VIRTUAL_INTERRUPT + * + * The command will trigger the specified interrupt on the host from a guest. + * + * handle[IN] + * - An opaque handle that will be passed in along with the interrupt + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_GPU_VIRTUAL_INTERRUPT_PARAMS_MESSAGE_ID (0x72U) + +typedef struct NV2080_CTRL_GPU_VIRTUAL_INTERRUPT_PARAMS { + NvU32 handle; +} NV2080_CTRL_GPU_VIRTUAL_INTERRUPT_PARAMS; + +#define NV2080_CTRL_CMD_GPU_VIRTUAL_INTERRUPT (0x20800172U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_VIRTUAL_INTERRUPT_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS + * + * This control call is to query the status of gpu function registers + * + * statusMask[IN] + * - Input mask of required status registers + * xusbData[OUT] + * - data from querying XUSB status register + * ppcData[OUT] + * - data from querying PPC status register + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + + + +#define NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS_MESSAGE_ID (0x73U) + +typedef struct NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS { + NvU32 statusMask; + NvU32 xusbData; + NvU32 ppcData; +} NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS; + +#define NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS (0x20800173U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_GPU_PARTITION_SPAN + * + * This struct represents the span of a memory partition, which represents the + * slices a given partition occupies (or may occupy) within a fixed range which + * is defined per-chip. A partition containing more resources will cover more + * GPU slices and therefore cover a larger span. + * + * lo + * - The starting unit of this span, inclusive + * + * hi + * - The ending unit of this span, inclusive + * + */ +typedef struct NV2080_CTRL_GPU_PARTITION_SPAN { + NV_DECLARE_ALIGNED(NvU64 lo, 8); + NV_DECLARE_ALIGNED(NvU64 hi, 8); +} NV2080_CTRL_GPU_PARTITION_SPAN; + +/* + * NV2080_CTRL_EXEC_PARTITION_SPAN + * + * This struct represents the span of a compute partition, which represents the + * slices a given partition occupies (or may occupy) within a fixed range which + * is defined memory partition. A partition containing more resources will cover + * more GPU instance slices and therefore cover a larger span. + * + * lo + * - The starting unit of this span, inclusive + * + * hi + * - The ending unit of this span, inclusive + * + */ +typedef struct NV2080_CTRL_EXEC_PARTITION_SPAN { + NV_DECLARE_ALIGNED(NvU64 lo, 8); + NV_DECLARE_ALIGNED(NvU64 hi, 8); +} NV2080_CTRL_EXEC_PARTITION_SPAN; + +#define NV_GI_UUID_LEN 16U + +/* + * NV2080_CTRL_GPU_SET_PARTITION_INFO + * + * This command partitions a GPU into different SMC-Memory partitions. + * The command will configure HW partition table to create work and memory + * isolation. + * + * The command is available to clients with administrator privileges only. + * An attempt to use this command by a client without administrator privileged + * results in the return of NV_ERR_INSUFFICIENT_PERMISSIONS status. + * + * The command allows partitioning an invalid partition only. An attempt to + * re-partition a valid partition will resule in NV_ERR_STATE_IN_USE. + * Repartitioning can be done only if a partition has been destroyed/invalidated + * before re-partitioning. + * + * swizzId[IN/OUT] + * - PartitionID associated with a newly created partition. Input in case + * of partition invalidation. + * + * uuid[OUT] + * - Uuid of a newly created partition. + * + * partitionFlag[IN] + * - Flags to determine if GPU is requested to be partitioned in FULL, + * HALF, QUARTER or ONE_EIGHTHED and whether the partition requires + * any additional resources. + * When flags include NV2080_CTRL_GPU_PARTITION_FLAG_REQ_DEC_JPG_OFA + * partition will be created with at least one video decode, jpeg and + * optical flow engines. This flag is valid only for partitions with + * a single GPC. + * + * bValid[IN] + * - NV_TRUE if creating a partition. NV_FALSE if destroying a partition. + * + * placement[IN] + * - Optional placement span to allocate the partition into. Valid + * placements are returned from NV2080_CTRL_CMD_GPU_GET_PARTITION_CAPACITY. + * The partition flag NV2080_CTRL_GPU_PARTITION_FLAG_PLACE_AT_SPAN must + * be set for this parameter to be used. If the flag is set and the given + * placement is not valid, an error will be returned. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_NOT_SUPPORTED + * NV_ERR_STATE_IN_USE + */ +typedef struct NV2080_CTRL_GPU_SET_PARTITION_INFO { + NvU32 swizzId; + NvU8 uuid[NV_GI_UUID_LEN]; + NvU32 partitionFlag; + NvBool bValid; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PARTITION_SPAN placement, 8); +} NV2080_CTRL_GPU_SET_PARTITION_INFO; + +#define PARTITIONID_INVALID NV2080_CTRL_GPU_PARTITION_ID_INVALID +#define NV2080_CTRL_GPU_PARTITION_ID_INVALID 0xFFFFFFFFU +#define NV2080_CTRL_GPU_MAX_PARTITIONS 0x00000008U +#define NV2080_CTRL_GPU_MAX_PARTITION_IDS 0x00000009U +#define NV2080_CTRL_GPU_MAX_SMC_IDS 0x00000008U +#define NV2080_CTRL_GPU_MAX_GPC_PER_SMC 0x00000010U +#define NV2080_CTRL_GPU_MAX_CE_PER_SMC 0x00000008U + +#define NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE 1:0 +#define NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_FULL 0x00000000U +#define NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_HALF 0x00000001U +#define NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_QUARTER 0x00000002U +#define NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_EIGHTH 0x00000003U +#define NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE__SIZE 4U +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE 4:2 +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_FULL 0x00000000U +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_HALF 0x00000001U +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_MINI_HALF 0x00000002U +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_QUARTER 0x00000003U +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_MINI_QUARTER 0x00000004U +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_EIGHTH 0x00000005U +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_RESERVED_INTERNAL_06 0x00000006U +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_RESERVED_INTERNAL_07 0x00000007U +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE__SIZE 8U + +#define NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE 7:5 +#define NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_FULL 0x00000001U +#define NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_HALF 0x00000002U +#define NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_MINI_HALF 0x00000003U +#define NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_QUARTER 0x00000004U +#define NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_EIGHTH 0x00000005U +#define NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_RESERVED_INTERNAL_06 0x00000006U +#define NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_RESERVED_INTERNAL_07 0x00000007U + +#define NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_NONE 0x00000000U +#define NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE__SIZE 8U + + +#define NV2080_CTRL_GPU_PARTITION_MAX_TYPES 90U + + +#define NV2080_CTRL_GPU_PARTITION_FLAG_REQ_DEC_JPG_OFA 30:30 +#define NV2080_CTRL_GPU_PARTITION_FLAG_REQ_DEC_JPG_OFA_DISABLE 0U +#define NV2080_CTRL_GPU_PARTITION_FLAG_REQ_DEC_JPG_OFA_ENABLE 1U +#define NV2080_CTRL_GPU_PARTITION_FLAG_PLACE_AT_SPAN 31:31 +#define NV2080_CTRL_GPU_PARTITION_FLAG_PLACE_AT_SPAN_DISABLE 0U +#define NV2080_CTRL_GPU_PARTITION_FLAG_PLACE_AT_SPAN_ENABLE 1U + +// TODO XXX Bug 2657907 Remove these once clients update +#define NV2080_CTRL_GPU_PARTITION_FLAG_FULL_GPU (DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _MEMORY_SIZE, _FULL) | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _COMPUTE_SIZE, _FULL)) +#define NV2080_CTRL_GPU_PARTITION_FLAG_ONE_HALF_GPU (DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _MEMORY_SIZE, _HALF) | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _COMPUTE_SIZE, _HALF)) +#define NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_HALF_GPU (DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _MEMORY_SIZE, _HALF) | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _COMPUTE_SIZE, _MINI_HALF)) +#define NV2080_CTRL_GPU_PARTITION_FLAG_ONE_QUARTER_GPU (DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _MEMORY_SIZE, _QUARTER) | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _COMPUTE_SIZE, _QUARTER)) +#define NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_QUARTER_GPU (DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _MEMORY_SIZE, _QUARTER) | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _COMPUTE_SIZE, _MINI_QUARTER)) +#define NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU (DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _MEMORY_SIZE, _EIGHTH) | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _COMPUTE_SIZE, _EIGHTH)) + +#define NV2080_CTRL_GPU_SET_PARTITIONS_PARAMS_MESSAGE_ID (0x74U) + +typedef struct NV2080_CTRL_GPU_SET_PARTITIONS_PARAMS { + NvU32 partitionCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_SET_PARTITION_INFO partitionInfo[NV2080_CTRL_GPU_MAX_PARTITIONS], 8); +} NV2080_CTRL_GPU_SET_PARTITIONS_PARAMS; + +#define NV2080_CTRL_CMD_GPU_SET_PARTITIONS (0x20800174U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_PARTITIONS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_GPU_GET_PARTITION_INFO + * + * This command gets the partition information for requested partitions. + * If GPU is not partitioned, the control call will return NV_ERR_NOT_SUPPORTED. + * + * The command will can return global partition information as well as single + * partition information if global flag is not set. + * In bare-metal user-mode can request all partition info while in virtualization + * plugin should make an RPC with swizzId which is assigned to the requesting + * VM. + * + * swizzId[IN] + * - HW Partition ID associated with the requested partition. + * + * partitionFlag[OUT] + * - partitionFlag that was provided during partition creation. + * + * grEngCount[OUT] + * - Number of SMC engines/GR engines allocated in partition + * GrIDs in a partition will always start from 0 and end at grEngCount-1 + * + * veidCount[OUT] + * - VEID Count assigned to a partition. These will be divided across + * SMC engines once CONFIGURE_PARTITION call has been made. The current + * algorithm is to assign veidPerGpc * gpcCountPerSmc to a SMC engine. + * + * smCount[OUT] + * - SMs assigned to a partition. + * + * ceCount[OUT] + * - Copy Engines assigned to a partition. + * + * nvEncCount[OUT] + * - NvEnc Engines assigned to a partition. + * + * nvDecCount[OUT] + * - NvDec Engines assigned to a partition. + * + * nvJpgCount[OUT] + * - NvJpg Engines assigned to a partition. + * + * gpcCount[OUT] + * - Max GPCs assigned to a partition, including the GfxCapable ones. + * + * virtualGpcCount[OUT] + * - Virtualized GPC count assigned to partition + * + * gfxGpcCount[OUT] + * - Max GFX GPCs assigned to a partition. This is a subset of the GPCs incuded in gpcCount. + * + * gpcsPerGr[NV2080_CTRL_GPU_MAX_SMC_IDS][OUT] + * - GPC count associated with every valid SMC/Gr, including the GPCs capable of GFX + * + * virtualGpcsPerGr[NV2080_CTRL_GPU_MAX_SMC_IDS][OUT] + * - Virtualized GPC count associated with every valid SMC/Gr, including the GPCs capable of GFX + * + * gfxGpcsPerGr[NV2080_CTRL_GPU_MAX_SMC_IDS][OUT] + * - GFX GPC count associated with every valid SMC/Gr. This is a subset of the GPCs included in gfxGpcCount + * + * veidsPerGr[NV2080_CTRL_GPU_MAX_SMC_IDS][OUT] + * - VEID count associated with every valid SMC. VEIDs within this SMC + * will start from 0 and go till veidCount[SMC_ID] - 1. + * + * span[OUT] + * - The span covered by this partition + * + * bValid[OUT] + * - NV_TRUE if partition is valid else NV_FALSE. + * + * bPartitionError[OUT] + * - NV_TRUE if partition had poison error which requires drain and reset + * else NV_FALSE. + * + * validCTSIdMask[OUT] + * - Mask of CTS IDs usable by this partition, not reflecting current allocations + * + * validGfxCTSIdMask[OUT] + * - Mask of CTS IDs that contain Gfx capable Grs usable by this partition, not reflecting current allocations + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_NOT_SUPPORTED + */ +typedef struct NV2080_CTRL_GPU_GET_PARTITION_INFO { + NvU32 swizzId; + NvU32 partitionFlag; + NvU32 grEngCount; + NvU32 veidCount; + NvU32 smCount; + NvU32 ceCount; + NvU32 nvEncCount; + NvU32 nvDecCount; + NvU32 nvJpgCount; + NvU32 nvOfaCount; + NvU32 gpcCount; + NvU32 virtualGpcCount; + NvU32 gfxGpcCount; + NvU32 gpcsPerGr[NV2080_CTRL_GPU_MAX_SMC_IDS]; + NvU32 virtualGpcsPerGr[NV2080_CTRL_GPU_MAX_SMC_IDS]; + NvU32 gfxGpcPerGr[NV2080_CTRL_GPU_MAX_SMC_IDS]; + NvU32 veidsPerGr[NV2080_CTRL_GPU_MAX_SMC_IDS]; + NV_DECLARE_ALIGNED(NvU64 memSize, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PARTITION_SPAN span, 8); + NvBool bValid; + NvBool bPartitionError; + NV_DECLARE_ALIGNED(NvU64 validCTSIdMask, 8); + NV_DECLARE_ALIGNED(NvU64 validGfxCTSIdMask, 8); +} NV2080_CTRL_GPU_GET_PARTITION_INFO; + +/* + * NV2080_CTRL_GPU_GET_PARTITIONS_PARAMS + * + * queryPartitionInfo[IN] + * - Max sized array of NV2080_CTRL_GPU_GET_PARTITION_INFO to get partition + * Info + * + * bGetAllPartitionInfo[In] + * - Flag to get all partitions info. Only root client will receive all + * partition's info. Non-Root clients should not use this flag + * + * validPartitionCount[Out] + * - Valid partition count which has been filled by RM as part of the call + * + */ +#define NV2080_CTRL_GPU_GET_PARTITIONS_PARAMS_MESSAGE_ID (0x75U) + +typedef struct NV2080_CTRL_GPU_GET_PARTITIONS_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_GET_PARTITION_INFO queryPartitionInfo[NV2080_CTRL_GPU_MAX_PARTITIONS], 8); + NvU32 validPartitionCount; + NvBool bGetAllPartitionInfo; +} NV2080_CTRL_GPU_GET_PARTITIONS_PARAMS; + +#define NV2080_CTRL_CMD_GPU_GET_PARTITIONS (0x20800175U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_PARTITIONS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_GPU_CONFIGURE_PARTITION + * + * This command configures a partition by associating GPCs with SMC Engines + * available in that partition. Engines which are to have GPCs assigned to them + * shall not already have any GPCs assigned to them. It is not valid to both + * assign GPCs and remove GPCs as part of a single call to this function. + * + * swizzId[IN] + * - PartitionID for configuring partition. If partition has a valid + * context created, then configuration is not allowed. + * + * gpcCountPerSmcEng[IN] + * - Number of GPCs expected to be configured per SMC. Supported + * configurations are 0, 1, 2, 4 or 8. "0" means a particular SMC + * engine will be disabled with no GPC connected to it. + * + * updateSmcEngMask[IN] + * - Mask tracking valid entries of gpcCountPerSmcEng. A value of + * 0 in bit index i indicates that engine i will keep its current + * configuration. + * + * bUseAllGPCs[IN] + * - Flag specifying alternate configuration mode, indicating that in + * swizzid 0 only, all non-floorswept GPCs should be connected to the + * engine indicated by a raised bit in updateSmcEngMask. Only a single + * engine may be targeted by this operation. The gpcCountPerSmcEng + * parameter should not be used with this flag. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_INSUFFICIENT_RESOURCES + * NV_ERR_NOT_SUPPORTED + * NV_ERR_STATE_IN_USE + */ +#define NV2080_CTRL_CMD_GPU_CONFIGURE_PARTITION (0x20800176U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_CONFIGURE_PARTITION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_CONFIGURE_PARTITION_PARAMS_MESSAGE_ID (0x76U) + +typedef struct NV2080_CTRL_GPU_CONFIGURE_PARTITION_PARAMS { + NvU32 swizzId; + NvU32 gpcCountPerSmcEng[NV2080_CTRL_GPU_MAX_SMC_IDS]; + NvU32 updateSmcEngMask; + NvBool bUseAllGPCs; +} NV2080_CTRL_GPU_CONFIGURE_PARTITION_PARAMS; + + +/* + * NV2080_CTRL_GPU_FAULT_PACKET + * + * This struct represents a GMMU fault packet. + * + */ +#define NV2080_CTRL_GPU_FAULT_PACKET_SIZE 32U +typedef struct NV2080_CTRL_GPU_FAULT_PACKET { + NvU8 data[NV2080_CTRL_GPU_FAULT_PACKET_SIZE]; +} NV2080_CTRL_GPU_FAULT_PACKET; + +/* + * NV2080_CTRL_GPU_REPORT_NON_REPLAYABLE_FAULT + * + * This command reports a nonreplayable fault packet to RM. + * It is only used by UVM. + * + * pFaultPacket[IN] + * - A fault packet that will be later cast to GMMU_FAULT_PACKET *. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_GPU_REPORT_NON_REPLAYABLE_FAULT (0x20800177U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_REPORT_NON_REPLAYABLE_FAULT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_REPORT_NON_REPLAYABLE_FAULT_PARAMS_MESSAGE_ID (0x77U) + +typedef struct NV2080_CTRL_GPU_REPORT_NON_REPLAYABLE_FAULT_PARAMS { + NV2080_CTRL_GPU_FAULT_PACKET faultPacket; +} NV2080_CTRL_GPU_REPORT_NON_REPLAYABLE_FAULT_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_EXEC_REG_OPS_VGPU + * + * This command is similar to NV2080_CTRL_CMD_GPU_EXEC_REG_OPS, except it is used + * by the VGPU plugin client only. This command provides access to the subset of + * privileged registers. + * + * See confluence page "vGPU UMED Security" for details. + * + */ +#define NV2080_CTRL_CMD_GPU_EXEC_REG_OPS_VGPU (0x20800178U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_EXEC_REG_OPS_VGPU_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_EXEC_REG_OPS_VGPU_PARAMS_MESSAGE_ID (0x78U) + +typedef NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS NV2080_CTRL_GPU_EXEC_REG_OPS_VGPU_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_ENGINE_RUNLIST_PRI_BASE + * + * This command returns the runlist pri base of the specified engine(s). + * + * engineList + * Input array. + * This array specifies the engines being queried for information. + * The list of engines supported by a chip can be fetched using the + * NV2080_CTRL_CMD_GPU_GET_ENGINES/GET_ENGINES_V2 ctrl call. + * + * runlistPriBase + * Output array. + * Returns the runlist pri base for the specified engines + * Else, will return _NULL when the input is a NV2080_ENGINE_TYPE_NULL + * and will return _ERROR when the control call fails due to an invalid argument + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_ENGINE_RUNLIST_PRI_BASE (0x20800179U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_PARAMS_MESSAGE_ID (0x79U) + +typedef struct NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_PARAMS { + NvU32 engineList[NV2080_GPU_MAX_ENGINES_LIST_SIZE]; + NvU32 runlistPriBase[NV2080_GPU_MAX_ENGINES_LIST_SIZE]; + NvU32 runlistId[NV2080_GPU_MAX_ENGINES_LIST_SIZE]; +} NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_PARAMS; + +#define NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_NULL (0xFFFFFFFFU) +#define NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_ERROR (0xFFFFFFFBU) +#define NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_INVALID (0xFFFFFFFFU) +#define NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_ERROR (0xFFFFFFFBU) + +/* + * NV2080_CTRL_CMD_GPU_GET_HW_ENGINE_ID + * + * This command returns the host hardware defined engine ID of the specified engine(s). + * + * engineList + * Input array. + * This array specifies the engines being queried for information. + * The list of engines supported by a chip can be fetched using the + * NV2080_CTRL_CMD_GPU_GET_ENGINES/GET_ENGINES_V2 ctrl call. + * + * hwEngineID + * Output array. + * Returns the host hardware engine ID(s) for the specified engines + * Else, will return _NULL when the input is a NV2080_ENGINE_TYPE_NULL + * and will return _ERROR when the control call fails due to an invalid argument + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_HW_ENGINE_ID (0x2080017aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_HW_ENGINE_ID_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_HW_ENGINE_ID_PARAMS_MESSAGE_ID (0x7AU) + +typedef struct NV2080_CTRL_GPU_GET_HW_ENGINE_ID_PARAMS { + NvU32 engineList[NV2080_GPU_MAX_ENGINES_LIST_SIZE]; + NvU32 hwEngineID[NV2080_GPU_MAX_ENGINES_LIST_SIZE]; +} NV2080_CTRL_GPU_GET_HW_ENGINE_ID_PARAMS; + +#define NV2080_CTRL_GPU_GET_HW_ENGINE_ID_NULL (0xFFFFFFFFU) +#define NV2080_CTRL_GPU_GET_HW_ENGINE_ID_ERROR (0xFFFFFFFBU) + +/* + * NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS + * + * This command is used to retrieve the GPU's count of FBC sessions, + * average FBC calls and FBC latency over all active sessions. + * + * sessionCount + * This field specifies count of all active fbc sessions on this GPU. + * + * averageFPS + * This field specifies the average frames captured. + * + * averageLatency + * This field specifies the average FBC latency in microseconds. + * + * Possible status values returned are : + * NV_OK + * NV_ERR_INVALID_ARGUMENT +*/ +#define NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS (0x2080017bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS_PARAMS_MESSAGE_ID (0x7BU) + +typedef struct NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS_PARAMS { + NvU32 sessionCount; + NvU32 averageFPS; + NvU32 averageLatency; +} NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS_PARAMS; + +/* +* NV2080_CTRL_NVFBC_SW_SESSION_INFO +* +* processId[OUT] +* Process id of the process owning the NvFBC session. +* On VGX host, this will specify the vGPU plugin process id. +* subProcessId[OUT] +* Process id of the process owning the NvFBC session if the +* session is on VGX guest, else the value is zero. +* vgpuInstanceId[OUT] +* vGPU on which the process owning the NvFBC session +* is running if session is on VGX guest, else +* the value is zero. +* sessionId[OUT] +* Unique session id of the NvFBC session. +* sessionType[OUT] +* Type of NvFBC session. +* displayOrdinal[OUT] +* Display identifier associated with the NvFBC session. +* sessionFlags[OUT] +* One or more of NV2080_CTRL_NVFBC_SESSION_FLAG_xxx. +* hMaxResolution[OUT] +* Max horizontal resolution supported by the NvFBC session. +* vMaxResolution[OUT] +* Max vertical resolution supported by the NvFBC session. +* hResolution[OUT] +* Horizontal resolution requested by caller in grab call. +* vResolution[OUT] +* Vertical resolution requested by caller in grab call. +* averageFPS[OUT] +* Average no. of frames captured per second. +* averageLatency[OUT] +* Average frame capture latency in microseconds. +*/ + +#define NV2080_CTRL_NVFBC_SESSION_FLAG_DIFFMAP_ENABLED 0x00000001U +#define NV2080_CTRL_NVFBC_SESSION_FLAG_CLASSIFICATIONMAP_ENABLED 0x00000002U +#define NV2080_CTRL_NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_NO_WAIT 0x00000004U +#define NV2080_CTRL_NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_INFINITE 0x00000008U +#define NV2080_CTRL_NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_TIMEOUT 0x00000010U + +typedef struct NV2080_CTRL_NVFBC_SW_SESSION_INFO { + NvU32 processId; + NvU32 subProcessId; + NvU32 vgpuInstanceId; + NvU32 sessionId; + NvU32 sessionType; + NvU32 displayOrdinal; + NvU32 sessionFlags; + NvU32 hMaxResolution; + NvU32 vMaxResolution; + NvU32 hResolution; + NvU32 vResolution; + NvU32 averageFPS; + NvU32 averageLatency; +} NV2080_CTRL_NVFBC_SW_SESSION_INFO; + +/* +* NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO +* +* This command returns NVFBC software sessions information for the associate GPU. +* +* sessionInfoCount +* This field specifies the number of entries that are filled inside +* sessionInfoTbl. Max value of this field once returned from RM would be +* NV2080_GPU_NVFBC_MAX_COUNT. +* +* sessionInfoTbl +* This field specifies the array in which the NVFBC session information is to +* be returned. RM will fill the current session data in sessionInfoTbl array +* and then update the sessionInfoCount to reflect current session count value. +* +* Possible status values returned are: +* NV_OK +* NV_ERR_NO_MEMORY +* NV_ERR_INVALID_LOCK_STATE +* NV_ERR_INVALID_ARGUMENT +*/ + +#define NV2080_GPU_NVFBC_MAX_SESSION_COUNT 256U + +#define NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO_PARAMS_MESSAGE_ID (0x7CU) + +typedef struct NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO_PARAMS { + NvU32 sessionInfoCount; + NV2080_CTRL_NVFBC_SW_SESSION_INFO sessionInfoTbl[NV2080_GPU_NVFBC_MAX_SESSION_COUNT]; +} NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO_PARAMS; + +#define NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO (0x2080017cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO_PARAMS_MESSAGE_ID" */ + + +/* + * NV2080_CTRL_CMD_GPU_GET_FIRST_ASYNC_CE_IDX + * + * This command returns the first async ce index + * + * CE Index + * Output parameter. + * Returns the first async ce index + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_GPU_GET_FIRST_ASYNC_CE_IDX_PARAMS_MESSAGE_ID (0xe6U) + +typedef struct NV2080_CTRL_GPU_GET_FIRST_ASYNC_CE_IDX_PARAMS { + NvU32 firstAsyncCEIdx; +} NV2080_CTRL_GPU_GET_FIRST_ASYNC_CE_IDX_PARAMS; +#define NV2080_CTRL_CMD_GPU_GET_FIRST_ASYNC_CE_IDX (0x208001e6U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FIRST_ASYNC_CE_IDX_PARAMS_MESSAGE_ID" */ + + +/* + * NV2080_CTRL_CMD_GPU_GET_VMMU_SEGMENT_SIZE + * + * This command returns the VMMU page size + * + * vmmuSegmentSize + * Output parameter. + * Returns the VMMU segment size (in bytes) + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_VMMU_SEGMENT_SIZE (0x2080017eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_VMMU_SEGMENT_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_VMMU_SEGMENT_SIZE_PARAMS_MESSAGE_ID (0x7EU) + +typedef struct NV2080_CTRL_GPU_GET_VMMU_SEGMENT_SIZE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 vmmuSegmentSize, 8); +} NV2080_CTRL_GPU_GET_VMMU_SEGMENT_SIZE_PARAMS; + +#define NV2080_CTRL_GPU_VMMU_SEGMENT_SIZE_32MB 0x02000000U +#define NV2080_CTRL_GPU_VMMU_SEGMENT_SIZE_64MB 0x04000000U +#define NV2080_CTRL_GPU_VMMU_SEGMENT_SIZE_128MB 0x08000000U +#define NV2080_CTRL_GPU_VMMU_SEGMENT_SIZE_256MB 0x10000000U +#define NV2080_CTRL_GPU_VMMU_SEGMENT_SIZE_512MB 0x20000000U +#define NV2080_CTRL_GPU_VMMU_SEGMENT_SIZE_1024MB 0x40000000U + +/* + * NV2080_CTRL_GPU_GET_PARTITION_CAPACITY + * + * This command returns the count of partitions of given size (represented by + * NV2080_CTRL_GPU_PARTITION_FLAG_*) which can be requested via + * NV2080_CTRL_GPU_SET_PARTITIONS ctrl call. + * Note that this API does not "reserve" any partitions, and there is no + * guarantee that the reported count of available partitions of a given size + * will remain consistent following creation of partitions of different size + * through NV2080_CTRL_GPU_SET_PARTITIONS. + * Note that this API is unsupported if SMC is feature-disabled. + * + * partitionFlag[IN] + * - Partition flag indicating size of requested partitions + * + * partitionCount[OUT] + * - Available number of partitions of the given size which can currently be created. + * + * availableSpans[OUT] + * - For each partition able to be created of the specified size, the span + * it could occupy. + * + * availableSpansCount[OUT] + * - Number of valid entries in availableSpans. + * + * totalPartitionCount[OUT] + * - Total number of partitions of the given size which can be created. + * + * totalSpans[OUT] + * - List of spans which can possibly be occupied by partitions of the + * given type. + * + * totalSpansCount[OUT] + * - Number of valid entries in totalSpans. + * + * bStaticInfo[IN] + * - Flag indicating that client requests only the information from + * totalPartitionCount and totalSpans. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_PARTITION_CAPACITY (0x20800181U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_PARTITION_CAPACITY_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_PARTITION_CAPACITY_PARAMS_MESSAGE_ID (0x81U) + +typedef struct NV2080_CTRL_GPU_GET_PARTITION_CAPACITY_PARAMS { + NvU32 partitionFlag; + NvU32 partitionCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PARTITION_SPAN availableSpans[NV2080_CTRL_GPU_MAX_PARTITIONS], 8); + NvU32 availableSpansCount; + NvU32 totalPartitionCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PARTITION_SPAN totalSpans[NV2080_CTRL_GPU_MAX_PARTITIONS], 8); + NvU32 totalSpansCount; + NvBool bStaticInfo; +} NV2080_CTRL_GPU_GET_PARTITION_CAPACITY_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_CACHED_INFO + * + * This command returns cached(SW only) gpu information for the associated GPU. + * Requests to retrieve gpu information use a list of one or more NV2080_CTRL_GPU_INFO + * structures. + * The gpuInfoList is aligned with NV2080_CTRL_GPU_GET_INFO_V2_PARAMS for security concern + * + * gpuInfoListSize + * This field specifies the number of entries on the caller's + * gpuInfoList. + * gpuInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the gpu information is to be returned. + * This buffer must be at least as big as gpuInfoListSize multiplied + * by the size of the NV2080_CTRL_GPU_INFO structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_GPU_GET_CACHED_INFO (0x20800182U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_CACHED_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_CACHED_INFO_PARAMS_MESSAGE_ID (0x82U) + +typedef NV2080_CTRL_GPU_GET_INFO_V2_PARAMS NV2080_CTRL_GPU_GET_CACHED_INFO_PARAMS; + +/* + * NV2080_CTRL_GPU_SET_PARTITIONING_MODE + * + * This command configures this GPU to control global mode for partitioning. + * This command may not be sent to a GPU with any active partitions. + * This command may be used to set the following modes: + * + * NV2080_CTRL_GPU_SET_PARTITIONING_MODE_REPARTITIONING + * NV2080_CTRL_GPU_SET_PARTITIONING_MODE_REPARTITIONING_LEGACY + * This is the default mode. While this GPU is in this mode, no partitions + * will be allowed to be created via SET_PARTITIONS - a client must set one + * of the below modes prior to partitioning the GPU. When a client sets a + * GPU into this mode, any performance changes resulting from partitions + * made while in either of the below modes will be cleared. A + * physical-function-level reset is required after setting this mode. + * + * NV2080_CTRL_GPU_SET_PARTITIONING_MODE_REPARTITIONING_MAX_PERF + * In this mode, when the GPU is partitioned, each partition will have the + * maximum possible performance which can be evenly distributed among all + * partitions. The total performance of the GPU, taking into account all + * partitions created in this mode, may be less than that of a GPU running + * in legacy non-SMC mode. Partitions created while in this mode require a + * physical-function-level reset before the partitioning may take full + * effect. Destroying all partitions while in this mode may be + * insufficient to restore full performance to the GPU - only by setting + * the mode to _LEGACY can this be achieved. A physical-function-level + * reset is NOT required after setting this mode. + * + * NV2080_CTRL_GPU_SET_PARTIITONING_MODE_REPARTITIONING_FAST_RECONFIG + * By setting this mode, the performance of the GPU will be restricted such + * that all partitions will have a consistent fraction of the total + * available performance, which may be less than the maximum possible + * performance available to each partition. Creating or destroying + * partitions on this GPU while in this mode will not require a + * physical-function-level reset, and will not affect other active + * partitions. Destroying all partitions while in this mode may be + * insufficient to restore full performance to the GPU - only by setting + * the mode to _LEGACY can this be achieved. A physical-function-level + * reset is required after setting this mode. + * + * Parameters: + * partitioningMode[IN] + * - Partitioning Mode to set for this GPU. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_SET_PARTITIONING_MODE (0x20800183U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_PARTITIONING_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_SET_PARTITIONING_MODE_REPARTITIONING 1:0 +#define NV2080_CTRL_GPU_SET_PARTITIONING_MODE_REPARTITIONING_LEGACY 0U +#define NV2080_CTRL_GPU_SET_PARTITIONING_MODE_REPARTITIONING_MAX_PERF 1U +#define NV2080_CTRL_GPU_SET_PARTITIONING_MODE_REPARTITIONING_FAST_RECONFIG 2U + +#define NV2080_CTRL_GPU_SET_PARTITIONING_MODE_PARAMS_MESSAGE_ID (0x83U) + +typedef struct NV2080_CTRL_GPU_SET_PARTITIONING_MODE_PARAMS { + NvU32 partitioningMode; +} NV2080_CTRL_GPU_SET_PARTITIONING_MODE_PARAMS; + + + +/* NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_INFO + * + * This structure describes resources available in a partition requested of a + * given type. + * + * [OUT] partitionFlag + * - Flags to specify in NV2080_CTRL_CMD_GPU_SET_PARTITIONS to request this + * partition + * + * [OUT] grCount + * - Total Number of SMC engines/GR engines (including GFX capable ones in this parition) + * + * [OUT] gfxGrCount + * - Number of SMC engines/GR engines capable of GFX. This is a subset of the engines included in grCount + * + * [OUT] gpcCount + * - Number of GPCs in this partition, including the GFX Capable ones. + * + * [OUT] virtualGpcCount + * - Number of virtualized GPCs in this partition, including the GFX Capable ones. + * + * [OUT] gfxGpcCount + * - Number of GFX Capable GPCs in this partition. This is a subset of the GPCs included in gpcCount. + * + * [OUT] veidCount + * - Number of VEIDS in this partition + * + * [OUT] smCount + * - Number of SMs in this partition + * + * [OUT] ceCount + * - Copy Engines in this partition + * + * [OUT] nvEncCount + * - Encoder Engines in this partition + * + * [OUT] nvDecCount + * - Decoder Engines in this partition + * + * [OUT] nvJpgCount + * - Jpg Engines in this partition + * + * [OUT] nvOfaCount + * - Ofa engines in this partition + * [OUT] memorySize + * - Total available memory within this partition + */ +typedef struct NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_INFO { + NvU32 partitionFlag; + NvU32 grCount; + NvU32 gfxGrCount; + NvU32 gpcCount; + NvU32 virtualGpcCount; + NvU32 gfxGpcCount; + NvU32 veidCount; + NvU32 smCount; + NvU32 ceCount; + NvU32 nvEncCount; + NvU32 nvDecCount; + NvU32 nvJpgCount; + NvU32 nvOfaCount; + NV_DECLARE_ALIGNED(NvU64 memorySize, 8); +} NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_INFO; + +/* + * NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS + * + * This command returns information regarding GPU partitions which can be + * requested via NV2080_CTRL_CMD_GPU_SET_PARTITIONS. + * + * [OUT] descCount + * - Number of valid partition types + * + * [OUT] partitionDescs + * - Information describing available partitions + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS_MESSAGE_ID (0x85U) + +typedef struct NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS { + NvU32 descCount; + // C form: NV2080_CTRL_GPU_DESCRIBE_PARTITION_INFO partitionDescs[NV2080_CTRL_GPU_PARTITION_MAX_TYPES]; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_INFO partitionDescs[NV2080_CTRL_GPU_PARTITION_MAX_TYPES], 8); +} NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS; + +#define NV2080_CTRL_CMD_GPU_DESCRIBE_PARTITIONS (0x20800185U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS_MESSAGE_ID" */ + + + +/* + * NV2080_CTRL_CMD_GPU_GET_MAX_SUPPORTED_PAGE_SIZE + * + * This command returns information regarding maximum page size supported + * by GMMU on the platform on which RM is running. + * + * [OUT] maxSupportedPageSize + * - Maximum local vidmem page size supported by GMMU of a given GPU (HW) + * on a given platform (OS) + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_MAX_SUPPORTED_PAGE_SIZE (0x20800188U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS_MESSAGE_ID (0x88U) + +typedef struct NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 maxSupportedPageSize, 8); +} NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS; + + + +/* + * NV2080_CTRL_GPU_GET_NUM_MMUS_PER_GPC + * + * This command returns the max number of MMUs per GPC + * + * gpcId [IN] + * Logical GPC id + * count [OUT] + * The number of MMUs per GPC + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. When SMC is enabled, this + * is a mandatory parameter. + */ +#define NV2080_CTRL_GPU_GET_NUM_MMUS_PER_GPC_PARAMS_MESSAGE_ID (0x8AU) + +typedef struct NV2080_CTRL_GPU_GET_NUM_MMUS_PER_GPC_PARAMS { + NvU32 gpcId; + NvU32 count; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GPU_GET_NUM_MMUS_PER_GPC_PARAMS; + +#define NV2080_CTRL_CMD_GPU_GET_NUM_MMUS_PER_GPC (0x2080018aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_NUM_MMUS_PER_GPC_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_GPU_GET_ACTIVE_PARTITION_IDS + * + * This command returns the GPU partition IDs for all active partitions + * If GPU is not partitioned, the control call will return partition count as "0" + * + * swizzId[OUT] + * - HW Partition ID associated with the active partitions + * + * partitionCount[OUT] + * - Number of active partitions in system + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_GPU_GET_ACTIVE_PARTITION_IDS_PARAMS_MESSAGE_ID (0x8BU) + +typedef struct NV2080_CTRL_GPU_GET_ACTIVE_PARTITION_IDS_PARAMS { + NvU32 swizzId[NV2080_CTRL_GPU_MAX_PARTITION_IDS]; + NvU32 partitionCount; +} NV2080_CTRL_GPU_GET_ACTIVE_PARTITION_IDS_PARAMS; + +#define NV2080_CTRL_CMD_GPU_GET_ACTIVE_PARTITION_IDS (0x2080018bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ACTIVE_PARTITION_IDS_PARAMS_MESSAGE_ID" */ + + + +/* + * NV2080_CTRL_CMD_GPU_GET_PIDS + * + * Given a resource identifier and its type, this command returns a set of + * process identifiers (PIDs) of processes that have instantiated this resource. + * For example, given a class number, this command returns a list of all + * processes with clients that have matching object allocations. + * This is a SMC aware call and the scope of the information gets restricted + * based on partition subscription. + * The call enforces partition subscription if SMC is enabled, and client is not + * a monitoring client. + * Monitoring clients get global information without any scope based filtering. + * Monitoring clients are also not expected to subscribe to a partition when + * SMC is enabled. + * + * idType + * Type of the resource identifier. See below for a list of valid types. + * id + * Resource identifier. + * pidTblCount + * Number of entries in the PID table. + * pidTbl + * Table which will contain the PIDs. Each table entry is of type NvU32. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_GPU_GET_PIDS (0x2080018dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_PIDS_PARAMS_MESSAGE_ID" */ + +/* max size of pidTable */ +#define NV2080_CTRL_GPU_GET_PIDS_MAX_COUNT 950U + +#define NV2080_CTRL_GPU_GET_PIDS_PARAMS_MESSAGE_ID (0x8DU) + +typedef struct NV2080_CTRL_GPU_GET_PIDS_PARAMS { + NvU32 idType; + NvU32 id; + NvU32 pidTblCount; + NvU32 pidTbl[NV2080_CTRL_GPU_GET_PIDS_MAX_COUNT]; +} NV2080_CTRL_GPU_GET_PIDS_PARAMS; + +/* + * Use class NV20_SUBDEVICE_0 with NV2080_CTRL_GPU_GET_PIDS_ID_TYPE_CLASS to query + * PIDs with or without GPU contexts. For any other class id, PIDs only with GPU + * contexts are returned. + */ +#define NV2080_CTRL_GPU_GET_PIDS_ID_TYPE_CLASS (0x00000000U) +#define NV2080_CTRL_GPU_GET_PIDS_ID_TYPE_VGPU_GUEST (0x00000001U) + +/* + * NV2080_CTRL_SMC_SUBSCRIPTION_INFO + * + * This structure contains information about the SMC subscription type. + * If MIG is enabled a valid ID is returned, it is set to PARTITIONID_INVALID otherwise. + * + * computeInstanceId + * This parameter returns a valid compute instance ID + * gpuInstanceId + * This parameter returns a valid GPU instance ID + */ +typedef struct NV2080_CTRL_SMC_SUBSCRIPTION_INFO { + NvU32 computeInstanceId; + NvU32 gpuInstanceId; +} NV2080_CTRL_SMC_SUBSCRIPTION_INFO; + +/* + * NV2080_CTRL_GPU_PID_INFO_VIDEO_MEMORY_USAGE_DATA + * + * This structure contains the video memory usage information. + * + * memPrivate + * This parameter returns the amount of memory exclusively owned + * (i.e. private) to the client + * memSharedOwned + * This parameter returns the amount of shared memory owned by the client + * memSharedDuped + * This parameter returns the amount of shared memory duped by the client + * protectedMemPrivate + * This parameter returns the amount of protected memory exclusively owned + * (i.e. private) to the client whenever memory protection is enabled + * protectedMemSharedOwned + * This parameter returns the amount of shared protected memory owned by the + * client whenever memory protection is enabled + * protectedMemSharedDuped + * This parameter returns the amount of shared protected memory duped by the + * client whenever memory protection is enabled + */ +typedef struct NV2080_CTRL_GPU_PID_INFO_VIDEO_MEMORY_USAGE_DATA { + NV_DECLARE_ALIGNED(NvU64 memPrivate, 8); + NV_DECLARE_ALIGNED(NvU64 memSharedOwned, 8); + NV_DECLARE_ALIGNED(NvU64 memSharedDuped, 8); + NV_DECLARE_ALIGNED(NvU64 protectedMemPrivate, 8); + NV_DECLARE_ALIGNED(NvU64 protectedMemSharedOwned, 8); + NV_DECLARE_ALIGNED(NvU64 protectedMemSharedDuped, 8); +} NV2080_CTRL_GPU_PID_INFO_VIDEO_MEMORY_USAGE_DATA; + +#define NV2080_CTRL_GPU_PID_INFO_INDEX_VIDEO_MEMORY_USAGE (0x00000000U) + +#define NV2080_CTRL_GPU_PID_INFO_INDEX_MAX NV2080_CTRL_GPU_PID_INFO_INDEX_VIDEO_MEMORY_USAGE + +typedef union NV2080_CTRL_GPU_PID_INFO_DATA { + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PID_INFO_VIDEO_MEMORY_USAGE_DATA vidMemUsage, 8); +} NV2080_CTRL_GPU_PID_INFO_DATA; + + +/* + * NV2080_CTRL_GPU_PID_INFO + * + * This structure contains the per pid information. Each type of information + * retrievable via NV2080_CTRL_CMD_GET_PID_INFO is assigned a unique index + * below. In addition the process for which the lookup is for is also defined. + * This is a SMC aware call and the scope of the information gets restricted + * based on partition subscription. + * The call enforces partition subscription if SMC is enabled, and client is not + * a monitoring client. + * Monitoring clients get global information without any scope based filtering. + * Monitoring clients are also not expected to subscribe to a partition when + * SMC is enabled. + * + * pid + * This parameter specifies the PID of the process for which information is + * to be queried. + * index + * This parameter specifies the type of information being queried for the + * process of interest. + * result + * This parameter returns the result of the instruction's execution. + * data + * This parameter returns the data corresponding to the information which is + * being queried. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + * + * Valid PID information indices are: + * + * NV2080_CTRL_GPU_PID_INFO_INDEX_VIDEO_MEMORY_USAGE + * This index is used to request the amount of video memory on this GPU + * allocated to the process. + */ +typedef struct NV2080_CTRL_GPU_PID_INFO { + NvU32 pid; + NvU32 index; + NvU32 result; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PID_INFO_DATA data, 8); + NV2080_CTRL_SMC_SUBSCRIPTION_INFO smcSubscription; +} NV2080_CTRL_GPU_PID_INFO; + +/* + * NV2080_CTRL_CMD_GPU_GET_PID_INFO + * + * This command allows querying per-process information from the RM. Clients + * request information by specifying a unique informational index and the + * Process ID of the process in question. The result is set to indicate success + * and the information queried (if available) is returned in the data parameter. + * + * pidInfoListCount + * The number of valid entries in the pidInfoList array. + * pidInfoList + * An array of NV2080_CTRL_GPU_PID_INFO of maximum length + * NV2080_CTRL_GPU_GET_PID_INFO_MAX_COUNT. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_GPU_GET_PID_INFO (0x2080018eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_PID_INFO_PARAMS_MESSAGE_ID" */ + +/* max size of pidInfoList */ +#define NV2080_CTRL_GPU_GET_PID_INFO_MAX_COUNT 200U + +#define NV2080_CTRL_GPU_GET_PID_INFO_PARAMS_MESSAGE_ID (0x8EU) + +typedef struct NV2080_CTRL_GPU_GET_PID_INFO_PARAMS { + NvU32 pidInfoListCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PID_INFO pidInfoList[NV2080_CTRL_GPU_GET_PID_INFO_MAX_COUNT], 8); +} NV2080_CTRL_GPU_GET_PID_INFO_PARAMS; + + + +/*! + * NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT + * + * @brief Handle VF PRI faults + * + * faultType + * BAR1, BAR2, PHYSICAL or UNBOUND_INSTANCE + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * + */ + +#define NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT (0x20800192U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_TYPE_INVALID 0U +#define NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_TYPE_BAR1 1U +#define NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_TYPE_BAR2 2U +#define NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_TYPE_PHYSICAL 3U +#define NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_TYPE_UNBOUND_INSTANCE 4U + +#define NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_PARAMS_MESSAGE_ID (0x92U) + +typedef struct NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_PARAMS { + NvU32 faultType; +} NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_PARAMS; + + +/*! + * Compute policy types to be specified by callers to set a config. + * + * _TIMESLICE + * Set the timeslice config for the requested GPU. + * Check @ref NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG_DATA_TIMESLICE for + * permissible timeslice values. + */ +#define NV2080_CTRL_GPU_COMPUTE_POLICY_TIMESLICE 0U +#define NV2080_CTRL_GPU_COMPUTE_POLICY_MAX 1U + +/*! + * Enum consisting of permissible timeslice options that can configured + * for a GPU. These can be queried by compute clients and the exact + * timeslice values can be chosen appropriately as per GPU support + */ +typedef enum NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG_DATA_TIMESLICE { + NV2080_CTRL_CMD_GPU_COMPUTE_TIMESLICE_DEFAULT = 0, + NV2080_CTRL_CMD_GPU_COMPUTE_TIMESLICE_SHORT = 1, + NV2080_CTRL_CMD_GPU_COMPUTE_TIMESLICE_MEDIUM = 2, + NV2080_CTRL_CMD_GPU_COMPUTE_TIMESLICE_LONG = 3, + NV2080_CTRL_CMD_GPU_COMPUTE_TIMESLICE_MAX = 4, +} NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG_DATA_TIMESLICE; + +typedef struct NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG { + /*! + * NV2080_CTRL_GPU_COMPUTE_POLICY_ + */ + NvU32 type; + + /*! + * Union of type-specific data + */ + union { + NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG_DATA_TIMESLICE timeslice; + } data; +} NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG; + +#define NV2080_CTRL_GPU_SET_COMPUTE_POLICY_CONFIG_PARAMS_MESSAGE_ID (0x94U) + +typedef struct NV2080_CTRL_GPU_SET_COMPUTE_POLICY_CONFIG_PARAMS { + NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG config; +} NV2080_CTRL_GPU_SET_COMPUTE_POLICY_CONFIG_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_COMPUTE_POLICY_CONFIG + * + * This command retrieves all compute policies configs for the associated gpu. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV2080_CTRL_CMD_GPU_GET_COMPUTE_POLICY_CONFIG (0x20800195U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_COMPUTE_POLICY_CONFIG_PARAMS_MESSAGE_ID" */ + +/*! + * This define limits the max number of policy configs that can be handled by + * NV2080_CTRL_CMD_GPU_GET_COMPUTE_POLICY_CONFIG command. + * + * @note Needs to be in sync (greater or equal) to NV2080_CTRL_GPU_COMPUTE_POLICY_MAX. + */ + +#define NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG_LIST_MAX 32U + +#define NV2080_CTRL_GPU_GET_COMPUTE_POLICY_CONFIG_PARAMS_MESSAGE_ID (0x95U) + +typedef struct NV2080_CTRL_GPU_GET_COMPUTE_POLICY_CONFIG_PARAMS { + NvU32 numConfigs; + + /*! + * C form: + * NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG configList[NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG_LIST_MAX]; + */ + NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG configList[NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG_LIST_MAX]; +} NV2080_CTRL_GPU_GET_COMPUTE_POLICY_CONFIG_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_GFID + * + * This command returns the GFID (GPU Function ID) for a given SR-IOV + * Virtual Function (VF) of the physical GPU. + * + * domain [IN] + * This field specifies the respective domain of the PCI device. + * bus [IN] + * This field specifies the bus id for a given VF. + * device [IN] + * This field specifies the device id for a given VF. + * func [IN] + * This field specifies the function id for a given VF. + * gfid[OUT] + * - This field returns GFID for a given VF BDF. + * gfidMask[OUT] + * - This field returns GFID mask value. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ + +#define NV2080_CTRL_CMD_GPU_GET_GFID (0x20800196U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_GFID_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_GFID_PARAMS_MESSAGE_ID (0x96U) + +typedef struct NV2080_CTRL_GPU_GET_GFID_PARAMS { + NvU32 domain; + NvU8 bus; + NvU8 device; + NvU8 func; + NvU32 gfid; + NvU32 gfidMask; +} NV2080_CTRL_GPU_GET_GFID_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_UPDATE_GFID_P2P_CAPABILITY + * + * This command informs the GPU driver that the GPU partition associated with + * a given GFID has been activated or will be deactivated. + * + * gfid[IN] + * - The GPU function identifier for a given VF BDF + * bEnable [IN] + * - Set to NV_TRUE if the GPU partition has been activated. + * - Set to NV_FALSE if the GPU partition will be deactivated. + * fabricPartitionId [IN] + * - Set the fabric manager partition ID dring partition activation. + * - Ignored during partition deactivation. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ + +#define NV2080_CTRL_CMD_GPU_UPDATE_GFID_P2P_CAPABILITY (0x20800197U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_CMD_GPU_UPDATE_GFID_P2P_CAPABILITY_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_GPU_UPDATE_GFID_P2P_CAPABILITY_PARAMS_MESSAGE_ID (0x97U) + +typedef struct NV2080_CTRL_CMD_GPU_UPDATE_GFID_P2P_CAPABILITY_PARAMS { + NvU32 gfid; + NvBool bEnable; + NvU32 fabricPartitionId; +} NV2080_CTRL_CMD_GPU_UPDATE_GFID_P2P_CAPABILITY_PARAMS; + +/*! + * NV2080_CTRL_CMD_GPU_VALIDATE_MEM_MAP_REQUEST + * + * @brief Validate the address range for memory map request by comparing the + * user supplied address range with GPU BAR0/BAR1 range. + * + * @param[in] addressStart Start address for memory map request + * @param[in] addressLength Length for for memory map request + * @param[out] protection NV_PROTECT_READ_WRITE, if both read/write is allowed + * NV_PROTECT_READABLE, if only read is allowed + * + * Possible status values returned are: + * NV_OK + * NV_ERR_PROTECTION_FAULT + * + */ +#define NV2080_CTRL_CMD_GPU_VALIDATE_MEM_MAP_REQUEST (0x20800198U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS_MESSAGE_ID (0x98U) + +typedef struct NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS { + NV_DECLARE_ALIGNED(NvU64 addressStart, 8); + NV_DECLARE_ALIGNED(NvU64 addressLength, 8); + NvU32 protection; +} NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_ENGINE_LOAD_TIMES + * + * This command is used to retrieve the load time (latency) of each engine. + * + * engineCount + * This field specifies the number of entries of the following + * three arrays. + * + * engineList[NV2080_GPU_MAX_ENGINE_OBJECTS] + * An array of NvU32 which stores each engine's descriptor. + * + * engineStateLoadTime[NV2080_GPU_MAX_ENGINE_OBJECTS] + * A array of NvU64 which stores each engine's load time. + * + * engineIsInit[NV2080_GPU_MAX_ENGINE_OBJECTS] + * A array of NvBool which stores each engine's initialization status. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_GPU_GET_ENGINE_LOAD_TIMES (0x2080019bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS 0xC8U + +#define NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS_MESSAGE_ID (0x9BU) + +typedef struct NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS { + NvU32 engineCount; + NvU32 engineList[NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS]; + NV_DECLARE_ALIGNED(NvU64 engineStateLoadTime[NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS], 8); + NvBool engineIsInit[NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS]; +} NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_ID_NAME_MAPPING + * + * This command is used to retrieve the mapping of engine ID and engine Name. + * + * engineCount + * This field specifies the size of the mapping. + * + * engineID + * An array of NvU32 which stores each engine's descriptor. + * + * engineName + * An array of char[100] which stores each engine's name. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_GPU_GET_ID_NAME_MAPPING (0x2080019cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ID_NAME_MAPPING_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_ID_NAME_MAPPING_PARAMS_MESSAGE_ID (0x9CU) + +typedef struct NV2080_CTRL_GPU_GET_ID_NAME_MAPPING_PARAMS { + NvU32 engineCount; + NvU32 engineID[NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS]; + char engineName[NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS][100]; +} NV2080_CTRL_GPU_GET_ID_NAME_MAPPING_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_EXEC_REG_OPS_NOPTRS + * + * Same as above NV2080_CTRL_CMD_GPU_EXEC_REG_OPS except that this CTRL CMD will + * not allow any embedded pointers. The regOps array is inlined as part of the + * struct. + * NOTE: This intended for gsp plugin only as it may override regOp access + * restrictions + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_EXEC_REG_OPS_NOPTRS (0x2080019dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_EXEC_REG_OPS_NOPTRS_PARAMS_MESSAGE_ID" */ + +/* setting this to 100 keeps it right below 4k in size */ +#define NV2080_CTRL_REG_OPS_ARRAY_MAX 100U +#define NV2080_CTRL_GPU_EXEC_REG_OPS_NOPTRS_PARAMS_MESSAGE_ID (0x9DU) + +typedef struct NV2080_CTRL_GPU_EXEC_REG_OPS_NOPTRS_PARAMS { + NvHandle hClientTarget; + NvHandle hChannelTarget; + NvU32 bNonTransactional; + NvU32 reserved00[2]; + NvU32 regOpCount; + NV2080_CTRL_GPU_REG_OP regOps[NV2080_CTRL_REG_OPS_ARRAY_MAX]; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GPU_EXEC_REG_OPS_NOPTRS_PARAMS; + +#define NV2080_CTRL_GPU_SKYLINE_INFO_MAX_SKYLINES 9U +#define NV2080_CTRL_GPU_SKYLINE_INFO_MAX_NON_SINGLETON_VGPCS 32U +/*! + * NV2080_CTRL_GPU_SKYLINE_INFO + * skylineVgpcSize[OUT] + * - TPC count of non-singleton VGPCs + * singletonVgpcMask[OUT] + * - Mask of active Singletons + * maxInstances[OUT] + * - Max allowed instances of this skyline concurrently on a GPU + * computeSizeFlag + * - One of NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_* flags which is associated with this skyline + */ +typedef struct NV2080_CTRL_GPU_SKYLINE_INFO { + NvU8 skylineVgpcSize[NV2080_CTRL_GPU_SKYLINE_INFO_MAX_NON_SINGLETON_VGPCS]; + NvU32 singletonVgpcMask; + NvU32 maxInstances; + NvU32 computeSizeFlag; +} NV2080_CTRL_GPU_SKYLINE_INFO; + +/*! + * NV2080_CTRL_GPU_GET_SKYLINE_INFO_PARAMS + * skylineTable[OUT] + * - TPC count of non-singleton VGPCs + * - Mask of singleton vGPC IDs active + * - Max Instances of this skyline possible concurrently + * - Associated compute size with the indexed skyline + * validEntries[OUT] + * - Number of entries which contain valid info in skylineInfo + */ +#define NV2080_CTRL_GPU_GET_SKYLINE_INFO_PARAMS_MESSAGE_ID (0x9FU) + +typedef struct NV2080_CTRL_GPU_GET_SKYLINE_INFO_PARAMS { + NV2080_CTRL_GPU_SKYLINE_INFO skylineTable[NV2080_CTRL_GPU_SKYLINE_INFO_MAX_SKYLINES]; + NvU32 validEntries; +} NV2080_CTRL_GPU_GET_SKYLINE_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_SKYLINE_INFO + * + * Retrieves skyline information about the GPU. Params are sized to currently known max + * values, but will need to be modified in the future should that change. + */ +#define NV2080_CTRL_CMD_GPU_GET_SKYLINE_INFO (0x2080019fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_SKYLINE_INFO_PARAMS_MESSAGE_ID" */ + +/*! + * NV2080_CTRL_GPU_P2P_PEER_CAPS_PEER_INFO + * + * [in/out] gpuId + * GPU ID for which the capabilities are queried. + * For the NV2080_CTRL_CMD_GET_P2P_CAPS control: + * If bAllCaps == NV_TRUE, this parameter is an out parameter and equals to + * the GPU ID of an attached GPU. + * If bAllCaps == NV_FALSE, this parameter is an in parameter and the requester + * should set it to the ID of the GPU that needs to be queried from. + * [in] gpuUuid + * Alternative to gpuId; used to identify target GPU for which caps are being queried. + * Option only available for Guest RPCs. + * If bUseUuid == NV_TRUE, gpuUuid is used in lieu of gpuId to identify target GPU. + * If bUseUuid == NV_FALSE, gpuUuid is ignored and gpuId is used by default. + * If bAllCaps == NV_TRUE, gpuUuid is ignored. + * [out] p2pCaps + * Peer to peer capabilities discovered between the GPUs. + * See NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_V2 for the list of valid values. + * [out] p2pOptimalReadCEs + * Mask of CEs to use for p2p reads over Nvlink. + * [out] p2pOptimalWriteCEs + * Mask of CEs to use for p2p writes over Nvlink. + * [out] p2pCapsStatus + * Status of all supported p2p capabilities. + * See NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_V2 for the list of valid values. + * [out] busPeerId + * Bus peer ID. For an invalid or a non-existent peer this field + * has the value NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INVALID_PEER. + * [out] busEgmPeerId + * Bus EGM peer ID. For an invalid or a non-existent peer this field + * has the value NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INVALID_PEER. + */ +#define NV2080_GET_P2P_CAPS_UUID_LEN 16U + +typedef struct NV2080_CTRL_GPU_P2P_PEER_CAPS_PEER_INFO { + NvU32 gpuId; + NvU8 gpuUuid[NV2080_GET_P2P_CAPS_UUID_LEN]; + NvU32 p2pCaps; + NvU32 p2pOptimalReadCEs; + NvU32 p2pOptimalWriteCEs; + NvU8 p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE]; + NvU32 busPeerId; + NvU32 busEgmPeerId; +} NV2080_CTRL_GPU_P2P_PEER_CAPS_PEER_INFO; + +/*! + * NV2080_CTRL_CMD_GET_P2P_CAPS + * + * Returns peer to peer capabilities present between GPUs. + * The caller must either specify bAllCaps to query the capabilities for + * all the attached GPUs or they must pass a valid list of GPU IDs. + * + * [in] bAllCaps + * Set to NV_TRUE to query the capabilities for all the attached GPUs. + * Set to NV_FALSE and specify peerGpuCount and peerGpuCaps[].gpuId + * to retrieve the capabilities only for the specified GPUs. + * [in] bUseUuid + * Option only available for Guest RPCs. + * Set to NV_TRUE to use gpuUuid in lieu of gpuId to identify target GPU. + * If bAllCaps == NV_TRUE, bUseUuid is ignored. + * [in/out] peerGpuCount + * The number of the peerGpuCaps entries. + * If bAllCaps == NV_TRUE, this parameter is an out parameter and equals to + * the total number of the attached GPUs. + * If bAllCaps == NV_FALSE, this parameter is an in parameter and the requester + * should set it to the number of the peerGpuCaps entries. + * [in/out] peerGpuCaps + * The array of NV2080_CTRL_GPU_P2P_PEER_CAPS_PEER_INFO entries, describing + * the peer to peer capabilities of the GPUs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT - Invalid peerGpuCount + * NV_ERR_OBJECT_NOT_FOUND - Invalid peerGpuCaps[].gpuId + */ +#define NV2080_CTRL_CMD_GET_P2P_CAPS (0x208001a0U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GET_P2P_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GET_P2P_CAPS_PARAMS_MESSAGE_ID (0xA0U) + +typedef struct NV2080_CTRL_GET_P2P_CAPS_PARAMS { + NvBool bAllCaps; + NvBool bUseUuid; + NvU32 peerGpuCount; + NV2080_CTRL_GPU_P2P_PEER_CAPS_PEER_INFO peerGpuCaps[NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS]; +} NV2080_CTRL_GET_P2P_CAPS_PARAMS; + + + +/*! + * NV2080_CTRL_GPU_COMPUTE_PROFILE + * + * This structure specifies resources in an execution partition + * + * id[OUT] + * - Total Number of GPCs in this partition + * + * computeSize[OUT] + * - NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_* associated with this profile + * + * gpcCount[OUT] + * - Total Number of GPCs in this partition (including GFX Supported GPCs) + * + * veidCount[OUT] + * - Number of VEIDs allocated to this profile + * + * smCount[OUT] + * - Number of SMs usable in this profile + */ +typedef struct NV2080_CTRL_GPU_COMPUTE_PROFILE { + NvU8 computeSize; + NvU32 gfxGpcCount; + NvU32 gpcCount; + NvU32 veidCount; + NvU32 smCount; +} NV2080_CTRL_GPU_COMPUTE_PROFILE; + +/*! + * NV2080_CTRL_GPU_GET_COMPUTE_PROFILES_PARAMS + * + * This structure specifies resources in an execution partition + * + * partitionFlag[IN] + * - GPU instance profile flags for which to query compute profiles + * Ignored, if subdevice is subscribed to a GPU instance + * + * profileCount[OUT] + * - Total Number of profiles filled + * + * profiles[OUT] + * - NV2080_CTRL_GPU_COMPUTE_PROFILE filled with valid compute instance profiles + */ +#define NV2080_CTRL_GPU_GET_COMPUTE_PROFILES_PARAMS_MESSAGE_ID (0xA2U) + +typedef struct NV2080_CTRL_GPU_GET_COMPUTE_PROFILES_PARAMS { + NvU32 partitionFlag; + NvU32 profileCount; + NV2080_CTRL_GPU_COMPUTE_PROFILE profiles[NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE__SIZE]; +} NV2080_CTRL_GPU_GET_COMPUTE_PROFILES_PARAMS; + +#define NV2080_CTRL_CMD_GPU_GET_COMPUTE_PROFILES (0x208001a2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_COMPUTE_PROFILES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_FABRIC_PROBE_STATE_UNSUPPORTED 0U +#define NV2080_CTRL_GPU_FABRIC_PROBE_STATE_NOT_STARTED 1U +#define NV2080_CTRL_GPU_FABRIC_PROBE_STATE_IN_PROGRESS 2U +#define NV2080_CTRL_GPU_FABRIC_PROBE_STATE_COMPLETE 3U + +#define NV2080_GPU_FABRIC_CLUSTER_UUID_LEN 16U + +#define NV2080_CTRL_GPU_FABRIC_PROBE_CAP_MC_SUPPORTED NVBIT64(0) + + + +#define NV2080_CTRL_GPU_FABRIC_HEALTH_MASK_DEGRADED_BW 1:0 +#define NV2080_CTRL_GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_NOT_SUPPORTED 0 +#define NV2080_CTRL_GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_TRUE 1 +#define NV2080_CTRL_GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_FALSE 2 + +#define NV2080_CTRL_GPU_FABRIC_HEALTH_MASK_ROUTE_UPDATE 3:2 +#define NV2080_CTRL_GPU_FABRIC_HEALTH_MASK_ROUTE_UPDATE_NOT_SUPPORTED 0 +#define NV2080_CTRL_GPU_FABRIC_HEALTH_MASK_ROUTE_UPDATE_TRUE 1 +#define NV2080_CTRL_GPU_FABRIC_HEALTH_MASK_ROUTE_UPDATE_FALSE 2 + +#define NV2080_CTRL_GPU_FABRIC_HEALTH_MASK_CONNECTION_UNHEALTHY 5:4 +#define NV2080_CTRL_GPU_FABRIC_HEALTH_MASK_CONNECTION_UNHEALTHY_NOT_SUPPORTED 0 +#define NV2080_CTRL_GPU_FABRIC_HEALTH_MASK_CONNECTION_UNHEALTHY_TRUE 1 +#define NV2080_CTRL_GPU_FABRIC_HEALTH_MASK_CONNECTION_UNHEALTHY_FALSE 2 + +#define NV2080_CTRL_GPU_FABRIC_HEALTH_MASK_ACCESS_TIMEOUT_RECOVERY 7:6 +#define NV2080_CTRL_GPU_FABRIC_HEALTH_MASK_ACCESS_TIMEOUT_RECOVERY_NOT_SUPPORTED 0 +#define NV2080_CTRL_GPU_FABRIC_HEALTH_MASK_ACCESS_TIMEOUT_RECOVERY_TRUE 1 +#define NV2080_CTRL_GPU_FABRIC_HEALTH_MASK_ACCESS_TIMEOUT_RECOVERY_FALSE 2 + +#define NV2080_CTRL_GPU_FABRIC_HEALTH_MASK_INCORRECT_CONFIGURATION 11:8 +#define NV2080_CTRL_GPU_FABRIC_HEALTH_MASK_INCORRECT_CONFIGURATION_NOT_SUPPORTED 0 +#define NV2080_CTRL_GPU_FABRIC_HEALTH_MASK_INCORRECT_CONFIGURATION_NONE 1 +#define NV2080_CTRL_GPU_FABRIC_HEALTH_MASK_INCORRECT_CONFIGURATION_INCORRECT_SYSGUID 2 +#define NV2080_CTRL_GPU_FABRIC_HEALTH_MASK_INCORRECT_CONFIGURATION_INCORRECT_CHASSIS_SN 3 +#define NV2080_CTRL_GPU_FABRIC_HEALTH_MASK_INCORRECT_CONFIGURATION_NO_PARTITION 4 +#define NV2080_CTRL_GPU_FABRIC_HEALTH_MASK_INCORRECT_CONFIGURATION_INSUFFICIENT_NVLINKS 5 + +#define NV2080_CTRL_GPU_FABRIC_HEALTH_SUMMARY_NOT_SUPPORTED 0 +#define NV2080_CTRL_GPU_FABRIC_HEALTH_SUMMARY_HEALTHY 1 +#define NV2080_CTRL_GPU_FABRIC_HEALTH_SUMMARY_UNHEALTHY 2 +#define NV2080_CTRL_GPU_FABRIC_HEALTH_SUMMARY_LIMITED_CAPACITY 3 + +/*! + * NV2080_CTRL_CMD_GET_GPU_FABRIC_PROBE_INFO_PARAMS + * + * This structure provides the GPU<-->FM probe state info on NVSwitch based + * systems + * + * state[OUT] + * - Current state of GPU<-->FM probe req/rsp + * Following values can be returned + * _UNSUPPORTED - system does not support this feature + * _NOT_STARTED - probe request is not yet sent to the FM + * _IN_PROGRESS - probe response is not yet received + * _COMPLETE - probe response is received + * When state is NV2080_CTRL_GPU_FABRIC_PROBE_STATE_COMPLETE + * status has to be checked for probe response success/failure + * status[OUT] + * - Inband Probe response status + * A GPU which returns NV_OK status upon receiving the probe response + * can participate in P2P + * clusterUuid[OUT] + * - Uuid of the cluster to which this node belongs + * fabricPartitionId[OUT] + * - Fabric partition Id + * fabricCaps[OUT] + * - Summary of fabric capabilities received from probe resp + * Possible values are + * NV2080_CTRL_GPU_FABRIC_PROBE_CAP_* + * fabricCliqueId[OUT] + * - Unique ID of a set of GPUs within a fabric partition that can perform P2P + * fabricHealthMask[OUT] + * - Mask where bits indicate different status about the health of the fabric + * fabricHealthSummary[OUT] + * - Summary of the Fabric Health + */ +#define NV2080_CTRL_CMD_GET_GPU_FABRIC_PROBE_INFO_PARAMS_MESSAGE_ID (0xA3U) + +typedef struct NV2080_CTRL_CMD_GET_GPU_FABRIC_PROBE_INFO_PARAMS { + NvU8 state; + NV_STATUS status; + NvU8 clusterUuid[NV2080_GPU_FABRIC_CLUSTER_UUID_LEN]; + NvU16 fabricPartitionId; + NV_DECLARE_ALIGNED(NvU64 fabricCaps, 8); + NvU32 fabricCliqueId; + NvU32 fabricHealthMask; + NvU8 fabricHealthSummary; +} NV2080_CTRL_CMD_GET_GPU_FABRIC_PROBE_INFO_PARAMS; + +#define NV2080_CTRL_CMD_GET_GPU_FABRIC_PROBE_INFO (0x208001a3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_CMD_GET_GPU_FABRIC_PROBE_INFO_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_GPU_GET_CHIP_DETAILS + * + * This command retrieves and constructs the GPU partnumber from the VBIOS. + * + * The following data are currently supported: + * + * pciDevId + * The PCI device ID + * + * chipSku + * The chip SKU information + * + * chipMajor + * The chip major number + * + * chipMinor + * The chip minor number + * + */ +#define NV2080_CTRL_CMD_GPU_GET_CHIP_DETAILS (0x208001a4U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_CHIP_DETAILS_PARAMS_MESSAGE_ID" */ + +/* + * The string format for a GPU part number + * The GPU part number is formatted with 4 hexadecimal digits for the PCI device ID, the chip SKU string, + * the chip major number, and then the chip minor number. + * Ordering of the fields for the string format must be synced with the NV2080_CTRL_GPU_GET_CHIP_DETAILS_PARAMS + * struct fields. + */ +#define GPU_PART_NUMBER_FMT "%04X-%s-%X%X" + +/* The maximum length for the chip sku */ +#define NV2080_MAX_CHIP_SKU_LENGTH 0x00000004U + +#define NV2080_CTRL_GPU_GET_CHIP_DETAILS_PARAMS_MESSAGE_ID (0xA4U) + +typedef struct NV2080_CTRL_GPU_GET_CHIP_DETAILS_PARAMS { + NvU32 pciDevId; + NvU8 chipSku[NV2080_MAX_CHIP_SKU_LENGTH]; + NvU32 chipMajor; + NvU32 chipMinor; +} NV2080_CTRL_GPU_GET_CHIP_DETAILS_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_MOVE_RUNLISTS_ALLOCATION_TO_SUBHEAP + * + * This command returns the host hardware defined engine ID of the specified engine(s). + * + * swizzId[IN] + * - HW Partition ID associated with the requested partition. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_MOVE_RUNLISTS_ALLOCATION_TO_SUBHEAP (0x208001a5U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_MOVE_RUNLISTS_ALLOCATION_TO_SUBHEAP_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_MOVE_RUNLISTS_ALLOCATION_TO_SUBHEAP_PARAMS_MESSAGE_ID (0xA5U) + +typedef struct NV2080_CTRL_GPU_MOVE_RUNLISTS_ALLOCATION_TO_SUBHEAP_PARAMS { + NvU32 swizzId; +} NV2080_CTRL_GPU_MOVE_RUNLISTS_ALLOCATION_TO_SUBHEAP_PARAMS; + +#define NV2080_CTRL_CMD_GPU_MIGRATABLE_OPS (0x208001a6U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_MIGRATABLE_OPS_PARAMS_MESSAGE_ID" */ +#define NV2080_CTRL_CMD_GPU_MIGRATABLE_OPS_GSP (0x208001a7U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_MIGRATABLE_OPS_GSP_PARAMS_MESSAGE_ID" */ +#define NV2080_CTRL_CMD_GPU_MIGRATABLE_OPS_VGPU (0x208001a8U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_MIGRATABLE_OPS_VGPU_PARAMS_MESSAGE_ID" */ + +/* setting this to 100 keeps it right below 4k in size */ +#define NV2080_CTRL_MIGRATABLE_OPS_ARRAY_MAX 50U +typedef struct NV2080_CTRL_GPU_MIGRATABLE_OPS_CMN_PARAMS { + NvHandle hClientTarget; + NvHandle hChannelTarget; + NvU32 bNonTransactional; + NvU32 regOpCount; + NvU32 smIds[NV2080_CTRL_MIGRATABLE_OPS_ARRAY_MAX]; + NV2080_CTRL_GPU_REG_OP regOps[NV2080_CTRL_MIGRATABLE_OPS_ARRAY_MAX]; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GPU_MIGRATABLE_OPS_CMN_PARAMS; + +#define NV2080_CTRL_GPU_MIGRATABLE_OPS_PARAMS_MESSAGE_ID (0xA6U) + +typedef NV2080_CTRL_GPU_MIGRATABLE_OPS_CMN_PARAMS NV2080_CTRL_GPU_MIGRATABLE_OPS_PARAMS; +#define NV2080_CTRL_GPU_MIGRATABLE_OPS_GSP_PARAMS_MESSAGE_ID (0xA7U) + +typedef NV2080_CTRL_GPU_MIGRATABLE_OPS_CMN_PARAMS NV2080_CTRL_GPU_MIGRATABLE_OPS_GSP_PARAMS; +#define NV2080_CTRL_GPU_MIGRATABLE_OPS_VGPU_PARAMS_MESSAGE_ID (0xA8U) + +typedef NV2080_CTRL_GPU_MIGRATABLE_OPS_CMN_PARAMS NV2080_CTRL_GPU_MIGRATABLE_OPS_VGPU_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_MARK_DEVICE_FOR_RESET + * + * INTERNAL DEBUG/TESTING USE ONLY + * + * Marks the device for reset. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_MARK_DEVICE_FOR_RESET (0x208001a9U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0xA9" */ + +/* + * NV2080_CTRL_CMD_GPU_UNMARK_DEVICE_FOR_RESET + * + * INTERNAL DEBUG/TESTING USE ONLY + * + * Unmarks the device for reset. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_UNMARK_DEVICE_FOR_RESET (0x208001aaU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0xAA" */ + +/* + * NV2080_CTRL_CMD_GPU_GET_RESET_STATUS + * + * Gets the current reset status of the device. + * + * bResetNeeded + * Set to NV_TRUE if the device needs to be reset. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_RESET_STATUS (0x208001abU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_RESET_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_RESET_STATUS_PARAMS_MESSAGE_ID (0xABU) + +typedef struct NV2080_CTRL_GPU_GET_RESET_STATUS_PARAMS { + NvBool bResetRequired; +} NV2080_CTRL_GPU_GET_RESET_STATUS_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_MARK_DEVICE_FOR_DRAIN_AND_RESET + * + * INTERNAL DEBUG/TESTING USE ONLY + * + * Marks the device for drain and reset. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_MARK_DEVICE_FOR_DRAIN_AND_RESET (0x208001acU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0xAC" */ + +/* + * NV2080_CTRL_CMD_GPU_UNMARK_DEVICE_FOR_DRAIN_AND_RESET + * + * INTERNAL DEBUG/TESTING USE ONLY + * + * Unmarks the device for drain and reset. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_UNMARK_DEVICE_FOR_DRAIN_AND_RESET (0x208001adU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0xAD" */ + +/* + * NV2080_CTRL_CMD_GPU_GET_DRAIN_AND_RESET_STATUS + * + * Gets the current drain and reset status of the device. Drain and reset is used only SMC configs. + * + * bDrainRecommended + * Set to NV_TRUE if a drain and reset is recommended for the device. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_DRAIN_AND_RESET_STATUS (0x208001aeU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_DRAIN_AND_RESET_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_DRAIN_AND_RESET_STATUS_PARAMS_MESSAGE_ID (0xAEU) + +typedef struct NV2080_CTRL_GPU_GET_DRAIN_AND_RESET_STATUS_PARAMS { + NvBool bDrainRecommended; +} NV2080_CTRL_GPU_GET_DRAIN_AND_RESET_STATUS_PARAMS; + +/* + * NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2 + * + * This command returns NVENC software sessions information for the associate GPU. + * This command is similar to NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO but doesn't have + * embedded pointers. + * + * Check NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO for detailed information. + */ + +#define NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2_PARAMS_MESSAGE_ID (0xAFU) + +typedef struct NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2_PARAMS { + NvU32 sessionInfoTblEntry; + NV2080_CTRL_NVENC_SW_SESSION_INFO sessionInfoTbl[NV2080_CTRL_GPU_NVENC_SESSION_INFO_MAX_COPYOUT_ENTRIES]; +} NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2_PARAMS; + +#define NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2 (0x208001afU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_V2_PARAMS_MESSAGE_ID" */ + +typedef struct NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO { + NvU32 engDesc; + NvU32 ctxAttr; + NvU32 ctxBufferSize; + NvU32 addrSpaceList; + NvU32 registerBase; +} NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO; +#define NV2080_CTRL_GPU_MAX_CONSTRUCTED_FALCONS 0x40 + +#define NV2080_CTRL_CMD_GPU_GET_CONSTRUCTED_FALCON_INFO (0x208001b0) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID (0xB0U) + +typedef struct NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS { + NvU32 numConstructedFalcons; + NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_GPU_MAX_CONSTRUCTED_FALCONS]; +} NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_VF_CAPS + * + * This command will return the MSIX capabilities for virtual function + * Parameters: + * + * gfid [IN] + * The GPU function identifier for a given VF BDF + * + * vfMsixCap [out] + * This field returns the VF MSIX cap values + * + * Possible status values returned are: + * NV_OK + */ +typedef struct NV2080_VF_MSIX_CAPS { + NvU32 msix_header; + NvU32 msix_table; + NvU32 msix_pba; +} NV2080_VF_MSIX_CAPS; + +#define NV2080_CTRL_CMD_GPU_GET_VF_CAPS (0x208001b1) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_VF_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_VF_CAPS_PARAMS_MESSAGE_ID (0xB1U) + +typedef struct NV2080_CTRL_GPU_GET_VF_CAPS_PARAMS { + NvU32 gfid; + NV2080_VF_MSIX_CAPS vfMsixCap; +} NV2080_CTRL_GPU_GET_VF_CAPS_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_RECOVERY_ACTION + * + * Gets the recovery action needed for the device after a failure. + * + * action [OUT] + * Returns the recovery action needed. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_RECOVERY_ACTION (0x208001b2U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_RECOVERY_ACTION_PARAMS_MESSAGE_ID" */ + +typedef enum NV2080_CTRL_GPU_RECOVERY_ACTION { + NV2080_CTRL_GPU_RECOVERY_ACTION_NONE = 0, + NV2080_CTRL_GPU_RECOVERY_ACTION_GPU_RESET = 1, + NV2080_CTRL_GPU_RECOVERY_ACTION_NODE_REBOOT = 2, + NV2080_CTRL_GPU_RECOVERY_ACTION_DRAIN_P2P = 3, + NV2080_CTRL_GPU_RECOVERY_ACTION_DRAIN_AND_RESET = 4, +} NV2080_CTRL_GPU_RECOVERY_ACTION; + +#define NV2080_CTRL_GPU_GET_RECOVERY_ACTION_PARAMS_MESSAGE_ID (0xB2U) + +typedef struct NV2080_CTRL_GPU_GET_RECOVERY_ACTION_PARAMS { + NV2080_CTRL_GPU_RECOVERY_ACTION action; +} NV2080_CTRL_GPU_GET_RECOVERY_ACTION_PARAMS; + +/* + * NV2080_CTRL_GPU_GET_FIPS_STATUS + * + * @brief get FIPS status (enabled/disabled) from GSP-RM + * + * + * @return NV_OK on success + * @return NV_ERR_ otherwise + */ +#define NV2080_CTRL_GPU_GET_FIPS_STATUS_PARAMS_MESSAGE_ID (0xe4U) + +typedef struct NV2080_CTRL_GPU_GET_FIPS_STATUS_PARAMS { + NvBool bFipsEnabled; +} NV2080_CTRL_GPU_GET_FIPS_STATUS_PARAMS; +#define NV2080_CTRL_GPU_GET_FIPS_STATUS (0x208001e4) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FIPS_STATUS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_GPU_GET_RAFTS_FS_MASK + * + * @brief Get floorsweeping mask for given skyline configuration. + * + * tpcCountMatrix [IN] + * TPC per GPC distribution for which user require floorsweeping mask. + * + * gfxGpcCount [IN] + * Number of GFX capable GPC required. + * + * gfxTpcCount [IN] + * Number of GFX capable TPC required. + * + * floorSweepConfig [OUT] + * MODS floorsweeping mask. + * + * bValid [OUT] + * If entries in floorSweepConfig are valid or not. + * + * @return NV_OK : on success + * @return NV_ERR_INVALID_PARAMETER : Parameters in pParams are incompatible with each other + * @return NV_ERR_NOT_SUPPORTED : Requested skyline not supported by current chip but can be supported by a chip + * with fewer defects + */ +typedef enum NV2080_RAFTS_FLOORSWEEP_UNIT_MASK_TYPE { + NV2080_RAFTS_FLOORSWEEP_UNIT_TYPE_INVALID = 0, + NV2080_RAFTS_FLOORSWEEP_UNIT_TYPE_TPC = 1, + NV2080_RAFTS_FLOORSWEEP_UNIT_TYPE_GPC = 2, +} NV2080_RAFTS_FLOORSWEEP_UNIT_MASK_TYPE; + +typedef struct NV2080_RAFTS_FLOORSWEEP_INFO { + NV2080_RAFTS_FLOORSWEEP_UNIT_MASK_TYPE unitType; + NvU32 parentId; + NvU32 mask; +} NV2080_RAFTS_FLOORSWEEP_INFO; + +#define NV2080_CTRL_GPU_RAFTS_NUM_MAX_UGPU 0x2 +#define NV2080_CTRL_GPU_RAFTS_NUM_MAX_GPC_PER_UGPU 0xC +#define NV2080_CTRL_GPU_RAFTS_NUM_MAX_NUM_GPC (0x18) /* finn: Evaluated from "NV2080_CTRL_GPU_RAFTS_NUM_MAX_UGPU * NV2080_CTRL_GPU_RAFTS_NUM_MAX_GPC_PER_UGPU" */ +#define NV2080_CTRL_GPU_RAFTS_NUM_MAX_FS_UNIT (0x1a) /* finn: Evaluated from "NV2080_CTRL_GPU_RAFTS_NUM_MAX_NUM_GPC + 2" */ + +#define NV2080_CTRL_GPU_GET_RAFTS_FS_MASK_PARAMS_MESSAGE_ID (0xB3U) + +typedef struct NV2080_CTRL_GPU_GET_RAFTS_FS_MASK_PARAMS { + NvU8 tpcCountMatrix[NV2080_CTRL_GPU_RAFTS_NUM_MAX_UGPU][NV2080_CTRL_GPU_RAFTS_NUM_MAX_GPC_PER_UGPU]; + NvBool bValid; + NV2080_RAFTS_FLOORSWEEP_INFO floorSweepConfig[NV2080_CTRL_GPU_RAFTS_NUM_MAX_FS_UNIT]; + NvU8 gfxGpcCount; + NvU8 gfxTpcPerGpcCount; + NvU8 maxUgpuTpcDiff; +} NV2080_CTRL_GPU_GET_RAFTS_FS_MASK_PARAMS; + +#define NV2080_CTRL_GPU_GET_RAFTS_FS_MASK (0x208001b3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_RAFTS_FS_MASK_PARAMS_MESSAGE_ID" */ + +/*! + * NV2080_CTRL_GPU_GET_COMPUTE_PROFILE_CAPACITY_PARAMS + * + * This structure specifies resources in an execution partition + * + * partitionFlag[IN] + * - If GPU instance profile flag for which to query specified + * compute profile + * + * computeSize[IN] + * - Size specifying compute profile whose info to query + * + * totalProfileCount[OUT] + * - Total Number of profiles possible to create in instance of specified GPU + * profile + * + * totalSpans[OUT] + * - List of spans which can possibly be occupied by partitions of the + * given type. + * + * totalSpansCount[OUT] + * - Number of entries filled in totalSpans + * + */ +#define NV2080_CTRL_CMD_GPU_GET_COMPUTE_PROFILE_CAPACITY (0x208001e5U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_COMPUTE_PROFILE_CAPACITY_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_COMPUTE_PROFILE_CAPACITY_PARAMS_MESSAGE_ID (0xe5U) + +typedef struct NV2080_CTRL_GPU_GET_COMPUTE_PROFILE_CAPACITY_PARAMS { + NvU32 partitionFlag; + NvU32 computeSize; + NvU32 totalProfileCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_EXEC_PARTITION_SPAN totalSpans[NV2080_CTRL_GPU_MAX_PARTITIONS], 8); + NvU32 totalSpansCount; +} NV2080_CTRL_GPU_GET_COMPUTE_PROFILE_CAPACITY_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_TPC_RECONFIG_MASK + * + * This command returns the TPC reconfig mask for a specific GPC + * + * gpc[IN] + * The GPC for which the TPC reconfig mask needs to be queried. + * The GPC should be specified as a logical index. + * + * tpcReconfigMask[OUT] + * Mask of reconfigurable TPCs in the specified GPC + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_TPC_RECONFIG_MASK (0x208001e7U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_TPC_RECONFIG_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_TPC_RECONFIG_MASK_PARAMS_MESSAGE_ID (0xe7U) + +typedef struct NV2080_CTRL_GPU_GET_TPC_RECONFIG_MASK_PARAMS { + NvU32 gpc; + NvU32 tpcReconfigMask; +} NV2080_CTRL_GPU_GET_TPC_RECONFIG_MASK_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_RPC_GSP_TEST + * + * This command checks a variable sized RPC for a known pattern, then + * fills the data field with another pattern. It also records the timestamp + * before and after the RPC is sent to GSP. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + */ +#define NV2080_CTRL_CMD_GPU_RPC_GSP_TEST (0x208001e8) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS_MESSAGE_ID (0xe8U) + +typedef struct NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS { + NvU8 test; + NvU32 dataSize; + NV_DECLARE_ALIGNED(NvU64 startTimestamp, 8); + NV_DECLARE_ALIGNED(NvU64 stopTimestamp, 8); + NV_DECLARE_ALIGNED(NvP64 data, 8); +} NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS; + +#define NV2080_CTRL_GPU_RPC_GSP_TEST_SERIALIZED_INTEGRITY 0x1 +#define NV2080_CTRL_GPU_RPC_GSP_TEST_UNSERIALIZED 0x2 +#define NV2080_CTRL_GPU_RPC_GSP_TEST_SERIALIZED_NOP 0x3 + +/* + * NV2080_CTRL_CMD_GPU_RPC_GSP_QUERY_SIZES + * + * This command returns information necessary for GSP RPC integrity tests + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + */ +#define NV2080_CTRL_CMD_GPU_RPC_GSP_QUERY_SIZES (0x208001e9) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_RPC_GSP_QUERY_SIZES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_RPC_GSP_QUERY_SIZES_PARAMS_MESSAGE_ID (0xe9U) + +typedef struct NV2080_CTRL_GPU_RPC_GSP_QUERY_SIZES_PARAMS { + NvU32 maxRpcSize; + NvU32 finnRmapiSize; + NvU32 rpcGspControlSize; + NvU32 rpcMessageHeaderSize; + NV_DECLARE_ALIGNED(NvU64 timestampFreq, 8); +} NV2080_CTRL_GPU_RPC_GSP_QUERY_SIZES_PARAMS; + +/* + * RUSD features. + * Each feature definition equates to a bit in the supportedFeatures bitmask. + */ +#define RUSD_FEATURE_NON_POLLING 0x1 +#define RUSD_FEATURE_POLLING 0x2 + +/* + * NV2080_CTRL_CMD_GPU_RUSD_GET_SUPPORTED_FEATURES + * + * @brief Returns bitmask of supported RUSD features. + * + * @param[out] supportedFeatures Bitmask of supported RUSD features + * + * @return NV_OK + */ +#define NV2080_CTRL_CMD_RUSD_GET_SUPPORTED_FEATURES (0x208001eaU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_RUSD_GET_SUPPORTED_FEATURES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_RUSD_GET_SUPPORTED_FEATURES_PARAMS_MESSAGE_ID (0xeaU) + +typedef struct NV2080_CTRL_RUSD_GET_SUPPORTED_FEATURES_PARAMS { + NvU32 supportedFeatures; +} NV2080_CTRL_RUSD_GET_SUPPORTED_FEATURES_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_RUSD_SET_FEATURES + * + * @brief Set RUSD featuress + * + * permanentPolledDataMask + * This field specifies which RUSD data should be polled. + * + * @return NV_OK + */ +#define NV2080_CTRL_CMD_GPU_RUSD_SET_FEATURES (0x208001ebU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_RUSD_SET_FEATURES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_RUSD_SET_FEATURES_PARAMS_MESSAGE_ID (0xebU) + +typedef struct NV2080_CTRL_GPU_RUSD_SET_FEATURES_PARAMS { + NV_DECLARE_ALIGNED(NvU64 permanentPolledDataMask, 8); +} NV2080_CTRL_GPU_RUSD_SET_FEATURES_PARAMS; + +/* _ctrl2080gpu_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h new file mode 100644 index 0000000..2398a0c --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h @@ -0,0 +1,96 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080gpumon.finn +// + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/*! + * This structure represents base class of GPU monitoring sample. + */ +typedef struct NV2080_CTRL_GPUMON_SAMPLE { + /*! + * Timestamps in nano-seconds. + */ + NV_DECLARE_ALIGNED(NvU64 timeStamp, 8); +} NV2080_CTRL_GPUMON_SAMPLE; + +/*! + * This structure represents base GPU monitoring sample. + */ +typedef struct NV2080_CTRL_GPUMON_SAMPLES { + /*! + * Type of the sample, see NV2080_CTRL_GPUMON_SAMPLE_TYPE_* for reference. + */ + NvU8 type; + /*! + * Size of the buffer, this should be + * bufSize == NV2080_CTRL_*_GPUMON_SAMPLE_COUNT_* + * sizeof(derived type of NV2080_CTRL_GPUMON_SAMPLE). + */ + NvU32 bufSize; + /*! + * Number of samples in ring buffer. + */ + NvU32 count; + /*! + * tracks the offset of the tail in the circular queue array pSamples. + */ + NvU32 tracker; + /*! + * Pointer to a circular queue based on array of NV2080_CTRL_GPUMON_SAMPLE + * or its derived types structs with size == bufSize. + * + * @note This circular queue wraps around after 10 seconds of sampling, + * and it is clients' responsibility to query within this time frame in + * order to avoid losing samples. + * @note With one exception, this queue contains last 10 seconds of samples + * with tracker poiniting to oldest entry and entry before tracker as the + * newest entry. Exception is when queue is not full (i.e. tracker is + * pointing to a zeroed out entry), in that case valid entries are between 0 + * and tracker. + * @note Clients can store tracker from previous query in order to provide + * samples since last read. + */ + NV_DECLARE_ALIGNED(NvP64 pSamples, 8); +} NV2080_CTRL_GPUMON_SAMPLES; + +/*! + * Enumeration of GPU monitoring sample types. + */ +#define NV2080_CTRL_GPUMON_SAMPLE_TYPE_PWR_MONITOR_STATUS 0x00000001 +#define NV2080_CTRL_GPUMON_SAMPLE_TYPE_PERFMON_UTIL 0x00000002 + +/*! + * Macro for invalid PID. + */ +#define NV2080_GPUMON_PID_INVALID ((NvU32)(~0)) diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h new file mode 100644 index 0000000..52d9459 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h @@ -0,0 +1,1986 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080gr.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +#include "ctrl/ctrl0080/ctrl0080gr.h" /* 2080 is partially derivative of 0080 */ +#include "nvcfg_sdk.h" + +/* + * NV2080_CTRL_GR_ROUTE_INFO + * + * This structure specifies the routing information used to + * disambiguate the target GR engine. + * + * flags + * This field decides how the route field is interpreted + * + * route + * This field has the data to identify target GR engine + * + */ +#define NV2080_CTRL_GR_ROUTE_INFO_FLAGS_TYPE 1:0 +#define NV2080_CTRL_GR_ROUTE_INFO_FLAGS_TYPE_NONE 0x0U +#define NV2080_CTRL_GR_ROUTE_INFO_FLAGS_TYPE_ENGID 0x1U +#define NV2080_CTRL_GR_ROUTE_INFO_FLAGS_TYPE_CHANNEL 0x2U + +#define NV2080_CTRL_GR_ROUTE_INFO_DATA_CHANNEL_HANDLE 31:0 +#define NV2080_CTRL_GR_ROUTE_INFO_DATA_ENGID 31:0 + +typedef NV0080_CTRL_GR_ROUTE_INFO NV2080_CTRL_GR_ROUTE_INFO; + +/* NV20_SUBDEVICE_XX gr control commands and parameters */ + +/* + * NV2080_CTRL_GR_INFO + * + * This structure represents a single 32bit gr engine value. Clients + * request a particular gr engine value by specifying a unique gr + * information index. + * + * Legal gr information index values are: + * NV2080_CTRL_GR_INFO_INDEX_BUFFER_ALIGNMENT + * This index is used to request the surface buffer alignment (in bytes) + * required by the associated subdevice. The return value is GPU + * implementation-dependent. + * NV2080_CTRL_GR_INFO_INDEX_SWIZZLE_ALIGNMENT + * This index is used to request the required swizzled surface alignment + * (in bytes) supported by the associated subdevice. The return value + * is GPU implementation-dependent. A return value of 0 indicates the GPU + * does not support swizzled surfaces. + * NV2080_CTRL_GR_INFO_INDEX_VERTEX_CACHE_SIZE + * This index is used to request the vertex cache size (in entries) + * supported by the associated subdevice. The return value is GPU + * implementation-dependent. A value of 0 indicates the GPU does + * have a vertex cache. + * NV2080_CTRL_GR_INFO_INDEX_VPE_COUNT + * This index is used to request the number of VPE units supported by the + * associated subdevice. The return value is GPU implementation-dependent. + * A return value of 0 indicates the GPU does not contain VPE units. + * NV2080_CTRL_GR_INFO_INDEX_SHADER_PIPE_COUNT + * This index is used to request the number of shader pipes supported by + * the associated subdevice. The return value is GPU + * implementation-dependent. A return value of 0 indicates the GPU does + * not contain dedicated shader units. + * For tesla: this value is the number of enabled TPCs + * NV2080_CTRL_GR_INFO_INDEX_SHADER_PIPE_SUB_COUNT + * This index is used to request the number of sub units per + * shader pipes supported by the associated subdevice. The return + * value is GPU implementation-dependent. A return value of 0 indicates + * the GPU does not contain dedicated shader units. + * For tesla: this value is the number of enabled SMs (per TPC) + * NV2080_CTRL_GR_INFO_INDEX_THREAD_STACK_SCALING_FACTOR + * This index is used to request the scaling factor for thread stack + * memory. + * A value of 0 indicates the GPU does not support this function. + * NV2080_CTRL_GR_INFO_INDEX_SM_REG_BANK_COUNT + * This index is used to request the number of SM register banks supported. + * A value of 0 indicates the GPU does not support this function. + * NV2080_CTRL_GR_INFO_INDEX_SM_REG_BANK_REG_COUNT + * This index is used to request the number of registers per SM register + * bank. A value of 0 indicates the GPU does not support this function. + * NV2080_CTRL_GR_INFO_INDEX_SM_VERSION + * This index is used to determine the SM version. + * A value of 0 indicates the GPU does not support this function. + * Otherwise one of NV2080_CTRL_GR_INFO_SM_VERSION_*. + * NV2080_CTRL_GR_INFO_INDEX_MAX_WARPS_PER_SM + * This index is used to determine the maximum number of warps + * (thread groups) per SM. + * A value of 0 indicates the GPU does not support this function. + * NV2080_CTRL_GR_INFO_INDEX_MAX_THREADS_PER_WARP + * This index is used to determine the maximum number of threads + * in each warp (thread group). + * A value of 0 indicates the GPU does not support this function. + * NV2080_CTRL_GR_INFO_INDEX_FB_MEMORY_REQUEST_GRANULARITY + * This index is used to request the default fb memory read/write request + * size in bytes (typically based on the memory configuration/controller). + * Smaller memory requests are likely to take as long as a full one. + * A value of 0 indicates the GPU does not support this function. + * NV2080_CTRL_GR_INFO_INDEX_HOST_MEMORY_REQUEST_GRANULARITY + * This index is used to request the default host memory read/write request + * size in bytes (typically based on the memory configuration/controller). + * Smaller memory requests are likely to take as long as a full one. + * A value of 0 indicates the GPU does not support this function. + * NV2080_CTRL_GR_INFO_INDEX_MAX_SP_PER_SM + * This index is used to request the maximum number of streaming processors + * per SM. + * NV2080_CTRL_GR_INFO_INDEX_LITTER_* + * This index is used to query the various LITTER size information from + * the chip. + * NV2080_CTRL_GR_INFO_INDEX_TIMESLICE_ENABLED + * This index is used to query whether the chip has timeslice mode enabled. + * NV2080_CTRL_GR_INFO_INDEX_GPU_CORE_COUNT + * This index is used to return the number of "GPU Cores" + * supported by the graphics pipeline + * NV2080_CTRL_GR_INFO_INDEX_RT_CORE_COUNT + * This index is used to return the number of "Ray Tracing Cores" + * supported by the graphics pipeline + * NV2080_CTRL_GR_INFO_INDEX_TENSOR_CORE_COUNT + * This index is used to return the number of "Tensor Cores" + * supported by the graphics pipeline + * NV2080_CTRL_GR_INFO_INDEX_GFX_CAPABILITIES + * This index is used to return the Graphics capabilities + * supported by the graphics pipeline + */ +typedef NV0080_CTRL_GR_INFO NV2080_CTRL_GR_INFO; + +/* + * Valid GR info index values + * These indices are offset from supporting the 0080 version of this call + */ +#define NV2080_CTRL_GR_INFO_INDEX_MAXCLIPS NV0080_CTRL_GR_INFO_INDEX_MAXCLIPS +#define NV2080_CTRL_GR_INFO_INDEX_MIN_ATTRS_BUG_261894 NV0080_CTRL_GR_INFO_INDEX_MIN_ATTRS_BUG_261894 +#define NV2080_CTRL_GR_INFO_XBUF_MAX_PSETS_PER_BANK NV0080_CTRL_GR_INFO_XBUF_MAX_PSETS_PER_BANK +/** + * This index is used to request the surface buffer alignment (in bytes) + * required by the associated subdevice. The return value is GPU + * implementation-dependent. + */ +#define NV2080_CTRL_GR_INFO_INDEX_BUFFER_ALIGNMENT NV0080_CTRL_GR_INFO_INDEX_BUFFER_ALIGNMENT +#define NV2080_CTRL_GR_INFO_INDEX_SWIZZLE_ALIGNMENT NV0080_CTRL_GR_INFO_INDEX_SWIZZLE_ALIGNMENT +#define NV2080_CTRL_GR_INFO_INDEX_VERTEX_CACHE_SIZE NV0080_CTRL_GR_INFO_INDEX_VERTEX_CACHE_SIZE +/** + * This index is used to request the number of VPE units supported by the + * associated subdevice. The return value is GPU implementation-dependent. + * A return value of 0 indicates the GPU does not contain VPE units. + */ +#define NV2080_CTRL_GR_INFO_INDEX_VPE_COUNT NV0080_CTRL_GR_INFO_INDEX_VPE_COUNT +/** + * This index is used to request the number of shader pipes supported by + * the associated subdevice. The return value is GPU + * implementation-dependent. A return value of 0 indicates the GPU does + * not contain dedicated shader units. + * For tesla: this value is the number of enabled TPCs + */ +#define NV2080_CTRL_GR_INFO_INDEX_SHADER_PIPE_COUNT NV0080_CTRL_GR_INFO_INDEX_SHADER_PIPE_COUNT +/** + * This index is used to request the scaling factor for thread stack + * memory. + * A value of 0 indicates the GPU does not support this function. + */ +#define NV2080_CTRL_GR_INFO_INDEX_THREAD_STACK_SCALING_FACTOR NV0080_CTRL_GR_INFO_INDEX_THREAD_STACK_SCALING_FACTOR +/** + * This index is used to request the number of sub units per + * shader pipes supported by the associated subdevice. The return + * value is GPU implementation-dependent. A return value of 0 indicates + * the GPU does not contain dedicated shader units. + * For tesla: this value is the number of enabled SMs (per TPC) + */ +#define NV2080_CTRL_GR_INFO_INDEX_SHADER_PIPE_SUB_COUNT NV0080_CTRL_GR_INFO_INDEX_SHADER_PIPE_SUB_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_SM_REG_BANK_COUNT NV0080_CTRL_GR_INFO_INDEX_SM_REG_BANK_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_SM_REG_BANK_REG_COUNT NV0080_CTRL_GR_INFO_INDEX_SM_REG_BANK_REG_COUNT +/** + * This index is used to determine the SM version. + * A value of 0 indicates the GPU does not support this function. + * Otherwise one of NV2080_CTRL_GR_INFO_SM_VERSION_*. + */ +#define NV2080_CTRL_GR_INFO_INDEX_SM_VERSION NV0080_CTRL_GR_INFO_INDEX_SM_VERSION +/** + * This index is used to determine the maximum number of warps + * (thread groups) per SM. + * A value of 0 indicates the GPU does not support this function. + */ +#define NV2080_CTRL_GR_INFO_INDEX_MAX_WARPS_PER_SM NV0080_CTRL_GR_INFO_INDEX_MAX_WARPS_PER_SM +/** + * This index is used to determine the maximum number of threads + * in each warp (thread group). + * A value of 0 indicates the GPU does not support this function. + */ +#define NV2080_CTRL_GR_INFO_INDEX_MAX_THREADS_PER_WARP NV0080_CTRL_GR_INFO_INDEX_MAX_THREADS_PER_WARP +#define NV2080_CTRL_GR_INFO_INDEX_GEOM_GS_OBUF_ENTRIES NV0080_CTRL_GR_INFO_INDEX_GEOM_GS_OBUF_ENTRIES +#define NV2080_CTRL_GR_INFO_INDEX_GEOM_XBUF_ENTRIES NV0080_CTRL_GR_INFO_INDEX_GEOM_XBUF_ENTRIES +#define NV2080_CTRL_GR_INFO_INDEX_FB_MEMORY_REQUEST_GRANULARITY NV0080_CTRL_GR_INFO_INDEX_FB_MEMORY_REQUEST_GRANULARITY +#define NV2080_CTRL_GR_INFO_INDEX_HOST_MEMORY_REQUEST_GRANULARITY NV0080_CTRL_GR_INFO_INDEX_HOST_MEMORY_REQUEST_GRANULARITY +#define NV2080_CTRL_GR_INFO_INDEX_MAX_SP_PER_SM NV0080_CTRL_GR_INFO_INDEX_MAX_SP_PER_SM +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_ZCULL_BANKS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_ZCULL_BANKS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_TPC_PER_GPC NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_TPC_PER_GPC +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_MIN_FBPS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_MIN_FBPS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_MXBAR_FBP_PORTS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_MXBAR_FBP_PORTS +#define NV2080_CTRL_GR_INFO_INDEX_TIMESLICE_ENABLED NV0080_CTRL_GR_INFO_INDEX_TIMESLICE_ENABLED +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPAS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPAS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_PES_PER_GPC NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_PES_PER_GPC +#define NV2080_CTRL_GR_INFO_INDEX_GPU_CORE_COUNT NV0080_CTRL_GR_INFO_INDEX_GPU_CORE_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_TPCS_PER_PES NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_TPCS_PER_PES +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_MXBAR_HUB_PORTS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_MXBAR_HUB_PORTS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_SM_PER_TPC NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_SM_PER_TPC +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_HSHUB_FBP_PORTS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_HSHUB_FBP_PORTS +/** + * This index is used to return the number of "Ray Tracing Cores" + * supported by the graphics pipeline + */ +#define NV2080_CTRL_GR_INFO_INDEX_RT_CORE_COUNT NV0080_CTRL_GR_INFO_INDEX_RT_CORE_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_TENSOR_CORE_COUNT NV0080_CTRL_GR_INFO_INDEX_TENSOR_CORE_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GRS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GRS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTCS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTCS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTC_SLICES NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTC_SLICES +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCMMU_PER_GPC NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCMMU_PER_GPC +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTC_PER_FBP NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTC_PER_FBP +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_ROP_PER_GPC NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_ROP_PER_GPC +#define NV2080_CTRL_GR_INFO_INDEX_FAMILY_MAX_TPC_PER_GPC NV0080_CTRL_GR_INFO_INDEX_FAMILY_MAX_TPC_PER_GPC +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPA_PER_FBP NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPA_PER_FBP +#define NV2080_CTRL_GR_INFO_INDEX_MAX_SUBCONTEXT_COUNT NV0080_CTRL_GR_INFO_INDEX_MAX_SUBCONTEXT_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_MAX_LEGACY_SUBCONTEXT_COUNT NV0080_CTRL_GR_INFO_INDEX_MAX_LEGACY_SUBCONTEXT_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_MAX_PER_ENGINE_SUBCONTEXT_COUNT NV0080_CTRL_GR_INFO_INDEX_MAX_PER_ENGINE_SUBCONTEXT_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_SINGLETON_GPCS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_SINGLETON_GPCS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GFXC_GPCS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GFXC_GPCS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GFXC_TPCS_PER_GFXC_GPC NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GFXC_TPCS_PER_GFXC_GPC +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_SLICES_PER_LTC NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_SLICES_PER_LTC + +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GFXC_SMC_ENGINES NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GFXC_SMC_ENGINES + + +#define NV2080_CTRL_GR_INFO_INDEX_DUMMY NV0080_CTRL_GR_INFO_INDEX_DUMMY +#define NV2080_CTRL_GR_INFO_INDEX_GFX_CAPABILITIES NV0080_CTRL_GR_INFO_INDEX_GFX_CAPABILITIES +#define NV2080_CTRL_GR_INFO_INDEX_MAX_MIG_ENGINES NV0080_CTRL_GR_INFO_INDEX_MAX_MIG_ENGINES +#define NV2080_CTRL_GR_INFO_INDEX_MAX_PARTITIONABLE_GPCS NV0080_CTRL_GR_INFO_INDEX_MAX_PARTITIONABLE_GPCS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_MIN_SUBCTX_PER_SMC_ENG NV0080_CTRL_GR_INFO_INDEX_LITTER_MIN_SUBCTX_PER_SMC_ENG +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCS_PER_DIELET NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCS_PER_DIELET +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_MAX_NUM_SMC_ENGINES_PER_DIELET NV0080_CTRL_GR_INFO_INDEX_LITTER_MAX_NUM_SMC_ENGINES_PER_DIELET + +/* When adding a new INDEX, please update INDEX_MAX and MAX_SIZE accordingly + * NOTE: 0080 functionality is merged with 2080 functionality, so this max size + * reflects that. + */ +#define NV2080_CTRL_GR_INFO_INDEX_MAX NV0080_CTRL_GR_INFO_INDEX_MAX +#define NV2080_CTRL_GR_INFO_MAX_SIZE NV0080_CTRL_GR_INFO_MAX_SIZE + +/* valid SM version return values */ + +#define NV2080_CTRL_GR_INFO_SM_VERSION_NONE (0x00000000U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_1_05 (0x00000105U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_1_1 (0x00000110U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_1_2 (0x00000120U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_1_3 (0x00000130U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_1_4 (0x00000140U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_1_5 (0x00000150U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_2_0 (0x00000200U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_2_1 (0x00000210U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_2_2 (0x00000220U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_0 (0x00000300U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_1 (0x00000310U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_2 (0x00000320U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_3 (0x00000330U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_5 (0x00000350U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_6 (0x00000360U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_8 (0x00000380U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_9 (0x00000390U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_4_0 (0x00000400U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_5_0 (0x00000500U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_5_02 (0x00000502U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_5_03 (0x00000503U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_6_0 (0x00000600U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_6_01 (0x00000601U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_6_02 (0x00000602U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_0 (0x00000700U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_01 (0x00000701U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_02 (0x00000702U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_03 (0x00000703U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_05 (0x00000705U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_02 (0x00000802U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_06 (0x00000806U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_07 (0x00000807U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_08 (0x00000808U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_09 (0x00000809U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_9_00 (0x00000900U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_10_00 (0x00000A00U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_10_01 (0x00000A01U) + + + +/* compatibility SM versions to match the official names in the ISA (e.g., SM5.2) */ +#define NV2080_CTRL_GR_INFO_SM_VERSION_5_2 (NV2080_CTRL_GR_INFO_SM_VERSION_5_02) +#define NV2080_CTRL_GR_INFO_SM_VERSION_5_3 (NV2080_CTRL_GR_INFO_SM_VERSION_5_03) +#define NV2080_CTRL_GR_INFO_SM_VERSION_6_1 (NV2080_CTRL_GR_INFO_SM_VERSION_6_01) +#define NV2080_CTRL_GR_INFO_SM_VERSION_6_2 (NV2080_CTRL_GR_INFO_SM_VERSION_6_02) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_1 (NV2080_CTRL_GR_INFO_SM_VERSION_7_01) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_2 (NV2080_CTRL_GR_INFO_SM_VERSION_7_02) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_3 (NV2080_CTRL_GR_INFO_SM_VERSION_7_03) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_5 (NV2080_CTRL_GR_INFO_SM_VERSION_7_05) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_2 (NV2080_CTRL_GR_INFO_SM_VERSION_8_02) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_6 (NV2080_CTRL_GR_INFO_SM_VERSION_8_06) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_7 (NV2080_CTRL_GR_INFO_SM_VERSION_8_07) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_8 (NV2080_CTRL_GR_INFO_SM_VERSION_8_08) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_9 (NV2080_CTRL_GR_INFO_SM_VERSION_8_09) +#define NV2080_CTRL_GR_INFO_SM_VERSION_9_0 (NV2080_CTRL_GR_INFO_SM_VERSION_9_00) +#define NV2080_CTRL_GR_INFO_SM_VERSION_10_0 (NV2080_CTRL_GR_INFO_SM_VERSION_10_00) +#define NV2080_CTRL_GR_INFO_SM_VERSION_10_1 (NV2080_CTRL_GR_INFO_SM_VERSION_10_01) + + + +#define NV2080_CTRL_GR_INFO_GFX_CAPABILITIES_2D 0:0 +#define NV2080_CTRL_GR_INFO_GFX_CAPABILITIES_2D_FALSE 0x0U +#define NV2080_CTRL_GR_INFO_GFX_CAPABILITIES_2D_TRUE 0x1U +#define NV2080_CTRL_GR_INFO_GFX_CAPABILITIES_3D 1:1 +#define NV2080_CTRL_GR_INFO_GFX_CAPABILITIES_3D_FALSE 0x0U +#define NV2080_CTRL_GR_INFO_GFX_CAPABILITIES_3D_TRUE 0x1U +#define NV2080_CTRL_GR_INFO_GFX_CAPABILITIES_COMPUTE 2:2 +#define NV2080_CTRL_GR_INFO_GFX_CAPABILITIES_COMPUTE_FALSE 0x0U +#define NV2080_CTRL_GR_INFO_GFX_CAPABILITIES_COMPUTE_TRUE 0x1U +#define NV2080_CTRL_GR_INFO_GFX_CAPABILITIES_I2M 3:3 +#define NV2080_CTRL_GR_INFO_GFX_CAPABILITIES_I2M_FALSE 0x0U +#define NV2080_CTRL_GR_INFO_GFX_CAPABILITIES_I2M_TRUE 0x1U + +/** + * NV2080_CTRL_CMD_GR_GET_INFO + * + * This command returns gr engine information for the associated GPU. + * Requests to retrieve gr information use a list of one or more + * NV2080_CTRL_GR_INFO structures. + * + * grInfoListSize + * This field specifies the number of entries on the caller's + * grInfoList. + * grInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the gr information is to be returned. + * This buffer must be at least as big as grInfoListSize multiplied + * by the size of the NV2080_CTRL_GR_INFO structure. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. When MIG is enabled, this + * is a mandatory parameter. + */ +#define NV2080_CTRL_CMD_GR_GET_INFO (0x20801201U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_GR_GET_INFO_PARAMS { + NvU32 grInfoListSize; + NV_DECLARE_ALIGNED(NvP64 grInfoList, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_GET_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_CTXSW_ZCULL_MODE + * + * This command is used to set the zcull context switch mode for the specified + * channel. A value of NV_ERR_NOT_SUPPORTED is returned if the + * target channel does not support zcull context switch mode changes. + * + * hChannel + * This parameter specifies the channel handle of + * the channel that is to have it's zcull context switch mode changed. + * hShareClient + * Support for sharing zcull buffers across RM clients is no longer + * supported. To maintain API compatibility, this field must match + * the hClient used in the control call. + * hShareChannel + * This parameter specifies the channel handle of + * the channel with which the zcull context buffer is to be shared. This + * parameter is valid when zcullMode is set to SEPARATE_BUFFER. This + * parameter should be set to the same value as hChannel if no + * sharing is intended. + * zcullMode + * This parameter specifies the new zcull context switch mode. + * Legal values for this parameter include: + * NV2080_CTRL_GR_SET_CTXSW_ZCULL_MODE_GLOBAL + * This mode is the normal zcull operation where it is not + * context switched and there is one set of globally shared + * zcull memory and tables. This mode is only supported as + * long as all channels use this mode. + * NV2080_CTRL_GR_SET_CTXSW_ZCULL_MODE_NO_CTXSW + * This mode causes the zcull tables to be reset on a context + * switch, but the zcull buffer will not be saved/restored. + * NV2080_CTRL_GR_SET_CTXSW_ZCULL_MODE_SEPARATE_BUFFER + * This mode will cause the zcull buffers and tables to be + * saved/restored on context switches. If a share channel + * ID is given (shareChID), then the 2 channels will share + * the zcull context buffers. + */ +#define NV2080_CTRL_CMD_GR_CTXSW_ZCULL_MODE (0x20801205U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_CTXSW_ZCULL_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_CTXSW_ZCULL_MODE_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV2080_CTRL_GR_CTXSW_ZCULL_MODE_PARAMS { + NvHandle hChannel; + NvHandle hShareClient; + NvHandle hShareChannel; + NvU32 zcullMode; +} NV2080_CTRL_GR_CTXSW_ZCULL_MODE_PARAMS; +/* valid zcullMode values */ +#define NV2080_CTRL_CTXSW_ZCULL_MODE_GLOBAL (0x00000000U) +#define NV2080_CTRL_CTXSW_ZCULL_MODE_NO_CTXSW (0x00000001U) +#define NV2080_CTRL_CTXSW_ZCULL_MODE_SEPARATE_BUFFER (0x00000002U) + +/** + * NV2080_CTRL_CMD_GR_GET_ZCULL_INFO + * + * This command is used to query the RM for zcull information that the + * driver will need to allocate and manage the zcull regions. + * + * widthAlignPixels + * This parameter returns the width alignment restrictions in pixels + * used to adjust a surface for proper aliquot coverage (typically + * #TPC's * 16). + * + * heightAlignPixels + * This parameter returns the height alignment restrictions in pixels + * used to adjust a surface for proper aliquot coverage (typically 32). + * + * pixelSquaresByAliquots + * This parameter returns the pixel area covered by an aliquot + * (typically #Zcull_banks * 16 * 16). + * + * aliquotTotal + * This parameter returns the total aliquot pool available in HW. + * + * zcullRegionByteMultiplier + * This parameter returns multiplier used to convert aliquots in a region + * to the number of bytes required to save/restore them. + * + * zcullRegionHeaderSize + * This parameter returns the region header size which is required to be + * allocated and accounted for in any save/restore operation on a region. + * + * zcullSubregionHeaderSize + * This parameter returns the subregion header size which is required to be + * allocated and accounted for in any save/restore operation on a region. + * + * subregionCount + * This parameter returns the subregion count. + * + * subregionWidthAlignPixels + * This parameter returns the subregion width alignment restrictions in + * pixels used to adjust a surface for proper aliquot coverage + * (typically #TPC's * 16). + * + * subregionHeightAlignPixels + * This parameter returns the subregion height alignment restrictions in + * pixels used to adjust a surface for proper aliquot coverage + * (typically 62). + * + * The callee should compute the size of a zcull region as follows. + * (numBytes = aliquots * zcullRegionByteMultiplier + + * zcullRegionHeaderSize + zcullSubregionHeaderSize) + */ +#define NV2080_CTRL_CMD_GR_GET_ZCULL_INFO (0x20801206U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_ZCULL_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_ZCULL_INFO_PARAMS_SUBREGION_SUPPORTED +#define NV2080_CTRL_GR_GET_ZCULL_INFO_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV2080_CTRL_GR_GET_ZCULL_INFO_PARAMS { + NvU32 widthAlignPixels; + NvU32 heightAlignPixels; + NvU32 pixelSquaresByAliquots; + NvU32 aliquotTotal; + NvU32 zcullRegionByteMultiplier; + NvU32 zcullRegionHeaderSize; + NvU32 zcullSubregionHeaderSize; + NvU32 subregionCount; + NvU32 subregionWidthAlignPixels; + NvU32 subregionHeightAlignPixels; +} NV2080_CTRL_GR_GET_ZCULL_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_CTXSW_PM_MODE + * + * This command is used to set the pm context switch mode for the specified + * channel. A value of NV_ERR_NOT_SUPPORTED is returned if the + * target channel does not support pm context switch mode changes. + * + * hChannel + * This parameter specifies the channel handle of + * the channel that is to have its pm context switch mode changed. + * pmMode + * This parameter specifies the new pm context switch mode. + * Legal values for this parameter include: + * NV2080_CTRL_CTXSW_PM_MODE_NO_CTXSW + * This mode says that the pms are not to be context switched. + * NV2080_CTRL_CTXSW_PM_MODE_CTXSW + * This mode says that the pms in Mode-B are to be context switched. + * NV2080_CTRL_CTXSW_PM_MODE_STREAM_OUT_CTXSW + * This mode says that the pms in Mode-E (stream out) are to be context switched. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_CTXSW_PM_MODE (0x20801207U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_CTXSW_PM_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_CTXSW_PM_MODE_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV2080_CTRL_GR_CTXSW_PM_MODE_PARAMS { + NvHandle hChannel; + NvU32 pmMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_CTXSW_PM_MODE_PARAMS; + +/* valid pmMode values */ +#define NV2080_CTRL_CTXSW_PM_MODE_NO_CTXSW (0x00000000U) +#define NV2080_CTRL_CTXSW_PM_MODE_CTXSW (0x00000001U) +#define NV2080_CTRL_CTXSW_PM_MODE_STREAM_OUT_CTXSW (0x00000002U) + +/* + * NV2080_CTRL_CMD_GR_CTXSW_ZCULL_BIND + * + * This command is used to set the zcull context switch mode and virtual address + * for the specified channel. A value of NV_ERR_NOT_SUPPORTED is + * returned if the target channel does not support zcull context switch mode + * changes. + * + * hClient + * This parameter specifies the client handle of + * that owns the zcull context buffer. This field must match + * the hClient used in the control call for non-kernel clients. + * hChannel + * This parameter specifies the channel handle of + * the channel that is to have its zcull context switch mode changed. + * vMemPtr + * This parameter specifies the 64 bit virtual address + * for the allocated zcull context buffer. + * zcullMode + * This parameter specifies the new zcull context switch mode. + * Legal values for this parameter include: + * NV2080_CTRL_GR_SET_CTXSW_ZCULL_MODE_GLOBAL + * This mode is the normal zcull operation where it is not + * context switched and there is one set of globally shared + * zcull memory and tables. This mode is only supported as + * long as all channels use this mode. + * NV2080_CTRL_GR_SET_CTXSW_ZCULL_MODE_NO_CTXSW + * This mode causes the zcull tables to be reset on a context + * switch, but the zcull buffer will not be saved/restored. + * NV2080_CTRL_GR_SET_CTXSW_ZCULL_MODE_SEPARATE_BUFFER + * This mode will cause the zcull buffers and tables to be + * saved/restored on context switches. If a share channel + * ID is given (shareChID), then the 2 channels will share + * the zcull context buffers. + */ +#define NV2080_CTRL_CMD_GR_CTXSW_ZCULL_BIND (0x20801208U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS { + NvHandle hClient; + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 vMemPtr, 8); + NvU32 zcullMode; +} NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS; +/* valid zcullMode values same as above NV2080_CTRL_CTXSW_ZCULL_MODE */ + +/* + * NV2080_CTRL_CMD_GR_CTXSW_PM_BIND + * + * This command is used to set the PM context switch mode and virtual address + * for the specified channel. A value of NV_ERR_NOT_SUPPORTED is + * returned if the target channel does not support PM context switch mode + * changes. + * + * hClient + * This parameter specifies the client handle of + * that owns the PM context buffer. + * hChannel + * This parameter specifies the channel handle of + * the channel that is to have its PM context switch mode changed. + * vMemPtr + * This parameter specifies the 64 bit virtual address + * for the allocated PM context buffer. + * pmMode + * This parameter specifies the new PM context switch mode. + * Legal values for this parameter include: + * NV2080_CTRL_GR_SET_CTXSW_PM_MODE_NO_CTXSW + * This mode says that the pms are not to be context switched + * NV2080_CTRL_GR_SET_CTXSW_PM_MODE_CTXSW + * This mode says that the pms are to be context switched + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_CTXSW_PM_BIND (0x20801209U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_CTXSW_PM_BIND_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_CTXSW_PM_BIND_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_GR_CTXSW_PM_BIND_PARAMS { + NvHandle hClient; + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 vMemPtr, 8); + NvU32 pmMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_CTXSW_PM_BIND_PARAMS; +/* valid pmMode values same as above NV2080_CTRL_CTXSW_PM_MODE */ + +/* + * NV2080_CTRL_CMD_GR_CTXSW_SETUP_BIND + * + * This command is used to set the Setup context switch mode and virtual address + * for the specified channel. A value of NV_ERR_NOT_SUPPORTED is + * returned if the target channel does not support setup context switch mode + * changes. + * + * hClient + * This parameter specifies the client handle of + * that owns the Setup context buffer. This field must match + * the hClient used in the control call for non-kernel clients. + * hChannel + * This parameter specifies the channel handle of + * the channel that is to have its Setup context switch mode changed. + * vMemPtr + * This parameter specifies the 64 bit virtual address + * for the allocated Setup context buffer. + */ +#define NV2080_CTRL_CMD_GR_CTXSW_SETUP_BIND (0x2080123aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_CTXSW_SETUP_BIND_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_CTXSW_SETUP_BIND_PARAMS_MESSAGE_ID (0x3AU) + +typedef struct NV2080_CTRL_GR_CTXSW_SETUP_BIND_PARAMS { + NvHandle hClient; + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 vMemPtr, 8); +} NV2080_CTRL_GR_CTXSW_SETUP_BIND_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_SET_GPC_TILE_MAP + * + * Send a list of values used to describe GPC/TPC tile mapping tables. + * + * mapValueCount + * This field specifies the number of actual map entries. This count + * should equal the number of TPCs in the system. + * mapValues + * This field is a pointer to a buffer of NvU08 values representing map + * data. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_GR_SET_GPC_TILE_MAP_MAX_VALUES 128U +#define NV2080_CTRL_CMD_GR_SET_GPC_TILE_MAP (0x2080120aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_SET_GPC_TILE_MAP_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_SET_GPC_TILE_MAP_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV2080_CTRL_GR_SET_GPC_TILE_MAP_PARAMS { + NvU32 mapValueCount; + NvU8 mapValues[NV2080_CTRL_GR_SET_GPC_TILE_MAP_MAX_VALUES]; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_SET_GPC_TILE_MAP_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GR_CTXSW_SMPC_MODE + * + * This command is used to set the SMPC context switch mode for the specified + * channel or channel group (TSG). A value of NV_ERR_NOT_SUPPORTED + * is returned if the target channel/TSG does not support SMPC context switch + * mode changes. If a channel is part of a TSG, the user must send in the TSG + * handle and not an individual channel handle, an error will be returned if a + * channel handle is used in this case. + * + * SMPC = SM Performance Counters + * + * hChannel + * This parameter specifies the channel or channel group (TSG) handle + * that is to have its SMPC context switch mode changed. + * If this parameter is set to 0, then the mode below applies to all current + * and future channels (i.e. we will be enabling/disabling global mode) + * smpcMode + * This parameter specifies the new SMPC context switch mode. + * Legal values for this parameter include: + * NV2080_CTRL_GR_SET_CTXSW_SMPC_MODE_NO_CTXSW + * This mode says that the SMPC data is not to be context switched. + * NV2080_CTRL_GR_SET_CTXSW_SMPC_MODE_CTXSW + * This mode says that the SMPC data is to be context switched. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_CTXSW_SMPC_MODE (0x2080120eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_CTXSW_SMPC_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_CTXSW_SMPC_MODE_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV2080_CTRL_GR_CTXSW_SMPC_MODE_PARAMS { + NvHandle hChannel; + NvU32 smpcMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_CTXSW_SMPC_MODE_PARAMS; + +/* valid smpcMode values */ +#define NV2080_CTRL_CTXSW_SMPC_MODE_NO_CTXSW (0x00000000U) +#define NV2080_CTRL_CTXSW_SMPC_MODE_CTXSW (0x00000001U) + +/* + * NV2080_CTRL_CMD_GR_GET_SM_TO_GPC_TPC_MAPPINGS + * + * This command returns an array of the mappings between SMs and GPC/TPCs. + * + * smId + * An array of the mappings between SMs and GPC/TPCs. + * smCount + * Returns the number of valid mappings in the array. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_GET_SM_TO_GPC_TPC_MAPPINGS (0x2080120fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_MAX_SM_COUNT 240U +#define NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_PARAMS_MESSAGE_ID (0xFU) + +typedef struct NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_PARAMS { + struct { + NvU32 gpcId; + NvU32 tpcId; + } smId[NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_MAX_SM_COUNT]; + NvU32 smCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_SET_CTXSW_PREEMPTION_MODE + * + * This command is used to set the preemption context switch mode for the specified + * channel. A value of NV_ERR_NOT_SUPPORTED is returned if the + * target channel does not support preemption context switch mode changes. + * + * flags + * This field specifies flags for the preemption mode changes. + * These flags can tell callee which mode is valid in the call + * since we handle graphics and/or compute + * hChannel + * This parameter specifies the channel handle of the channel + * that is to have it's preemption context switch mode set. + * gfxpPreemptMode + * This parameter specifies the new Graphics preemption context switch + * mode. Legal values for this parameter include: + * NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_GFX_WFI + * This mode is the normal wait-for-idle context switch mode. + * NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_GFX_GFXP + * This mode causes the graphics engine to allow preempting the + * channel mid-triangle. + * NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_GFX_GFXP_POOL + * This mode causes the graphics engine to use a shared pool of buffers + * to support GfxP with lower memory overhead + * cilpPreemptMode + * This parameter specifies the new Compute preemption context switch + * mode. Legal values for this parameter include: + * NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_COMPUTE_WFI + * This mode is the normal wait-for-idle context switch mode. + * NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_COMPUTE_CTA + * This mode causes the compute engine to allow preempting the channel + * at the instruction level. + * NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_COMPUTE_CILP + * This mode causes the compute engine to allow preempting the channel + * at the instruction level. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_SET_CTXSW_PREEMPTION_MODE (0x20801210U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS { + NvU32 flags; + NvHandle hChannel; + NvU32 gfxpPreemptMode; + NvU32 cilpPreemptMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS; + +/* valid preemption flags */ +#define NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_FLAGS_CILP 0:0 +#define NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_FLAGS_CILP_IGNORE (0x00000000U) +#define NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_FLAGS_CILP_SET (0x00000001U) +#define NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_FLAGS_GFXP 1:1 +#define NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_FLAGS_GFXP_IGNORE (0x00000000U) +#define NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_FLAGS_GFXP_SET (0x00000001U) + +/* valid Graphics mode values */ +#define NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_GFX_WFI (0x00000000U) +#define NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_GFX_GFXP (0x00000001U) +#define NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_GFX_GFXP_POOL (0x00000002U) + +/* valid Compute mode values */ +#define NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_COMPUTE_WFI (0x00000000U) +#define NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_COMPUTE_CTA (0x00000001U) +#define NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_COMPUTE_CILP (0x00000002U) + +/* valid preemption buffers */ +typedef enum NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS { + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_MAIN = 0, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_SPILL = 1, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_PAGEPOOL = 2, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_BETACB = 3, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_RTV = 4, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL = 5, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL = 6, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL_CPU = 7, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_SETUP = 8, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END = 9, +} NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS; + +/* + * NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND + * + * This command is used to set the preemption context switch mode and virtual + * addresses of the preemption buffers for the specified channel. A value of + * NV_ERR_NOT_SUPPORTED is returned if the target channel does not + * support preemption context switch mode changes. + * + * flags + * This field specifies flags for the preemption mode changes. + * These flags can tell callee which mode is valid in the call + * since we handle graphics and/or compute + * hClient + * This parameter specifies the client handle of + * that owns the preemption context buffer. + * hChannel + * This parameter specifies the channel handle of the channel + * that is to have its preemption context switch mode set. + * vMemPtr + * This parameter specifies the 64 bit virtual address + * for the allocated preemption context buffer. + * gfxpPreemptMode + * This parameter specifies the new Graphics preemption context switch + * mode. Legal values for this parameter include: + * NV2080_CTRL_CTXSW_PREEMPTION_MODE_GFX_WFI + * This mode is the normal wait-for-idle context switch mode. + * NV2080_CTRL_CTXSW_PREEMPTION_MODE_GFX_GFXP + * This mode causes the graphics engine to allow preempting the + * channel mid-triangle. + * cilpPreemptMode + * This parameter specifies the new Compute preemption context switch + * mode. Legal values for this parameter include: + * NV2080_CTRL_CTXSW_PREEMPTION_MODE_COMPUTE_WFI + * This mode is the normal wait-for-idle context switch mode. + * NV2080_CTRL_CTXSW_PREEMPTION_MODE_COMPUTE_CTA + * This mode causes the compute engine to allow preempting the channel + * at the instruction level. + * NV2080_CTRL_CTXSW_PREEMPTION_MODE_COMPUTE_CILP + * This mode causes the compute engine to allow preempting the channel + * at the instruction level. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND (0x20801211U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS_MESSAGE_ID (0x11U) + +typedef struct NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS { + NvU32 flags; + NvHandle hClient; + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 vMemPtrs[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END], 8); + NvU32 gfxpPreemptMode; + NvU32 cilpPreemptMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS; +/* valid mode and flag values same as above NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE */ + +/* + * NV2080_CTRL_CMD_GR_PC_SAMPLING_MODE + * + * This command is used to apply the WAR for PC sampling to avoid hang in + * multi-ctx scenario. + * + * hChannel + * This parameter specifies the channel or channel group (TSG) handle + * that is to have its PC Sampling mode changed. + * samplingMode + * This parameter specifies whether sampling is turned ON or OFF. + * Legal values for this parameter include: + * NV2080_CTRL_GR_SET_PC_SAMPLING_MODE_DISABLED + * This mode says that PC sampling is disabled for current context. + * NV2080_CTRL_GR_SET_PC_SAMPLING_MODE_ENABLED + * This mode says that PC sampling is disabled for current context. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_PC_SAMPLING_MODE (0x20801212U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS { + NvHandle hChannel; + NvU32 samplingMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS; + +/* valid samplingMode values */ +#define NV2080_CTRL_PC_SAMPLING_MODE_DISABLED (0x00000000U) +#define NV2080_CTRL_PC_SAMPLING_MODE_ENABLED (0x00000001U) + +/* + * NV2080_CTRL_CMD_GR_GET_ROP_INFO + * + * Gets information about ROPs including the ROP unit count and information + * about ROP operations per clock. + * + * ropUnitCount + * The count of active ROP units. + * ropOperationsFactor. + * The number of ROP operations per clock for a single ROP unit. + * ropOperationsCount + * The number of ROP operations per clock across all active ROP units. + */ +#define NV2080_CTRL_CMD_GR_GET_ROP_INFO (0x20801213U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_ROP_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_ROP_INFO_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV2080_CTRL_GR_GET_ROP_INFO_PARAMS { + NvU32 ropUnitCount; + NvU32 ropOperationsFactor; + NvU32 ropOperationsCount; +} NV2080_CTRL_GR_GET_ROP_INFO_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GR_GET_CTXSW_STATS + * + * This command is used to get the context switch statistics. The user can + * also add a flag to tell RM to reset the stats counters back to 0. + * + * hChannel + * This parameter specifies the channel or channel group (TSG) handle + * that is to have the stats returned. Note, must be the TSG handle if + * channel is part of a TSG. + * flags + * This parameter specifies processing flags. See possible flags below. + * saveCnt + * This parameter returns the number of saves on the channel. + * restoreCnt + * This parameter returns the number of restores on the channel. + * wfiSaveCnt + * This parameter returns the number of WFI saves on the channel. + * ctaSaveCnt + * This parameter returns the number of CTA saves on the channel. + * cilpSaveCnt + * This parameter returns the number of CILP saves on the channel. + * gfxpSaveCnt + * This parameter returns the number of GfxP saves on the channel. + */ +#define NV2080_CTRL_CMD_GR_GET_CTXSW_STATS (0x20801215U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_CTXSW_STATS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_CTXSW_STATS_PARAMS_MESSAGE_ID (0x15U) + +typedef struct NV2080_CTRL_GR_GET_CTXSW_STATS_PARAMS { + NvHandle hChannel; + NvU32 flags; + NvU32 saveCnt; + NvU32 restoreCnt; + NvU32 wfiSaveCnt; + NvU32 ctaSaveCnt; + NvU32 cilpSaveCnt; + NvU32 gfxpSaveCnt; +} NV2080_CTRL_GR_GET_CTXSW_STATS_PARAMS; +/* valid GET_CTXSW_STATS flags settings */ +#define NV2080_CTRL_GR_GET_CTXSW_STATS_FLAGS_RESET 0:0 +#define NV2080_CTRL_GR_GET_CTXSW_STATS_FLAGS_RESET_FALSE (0x00000000U) +#define NV2080_CTRL_GR_GET_CTXSW_STATS_FLAGS_RESET_TRUE (0x00000001U) + + + +/* + * NV2080_CTRL_CMD_GR_GET_CTX_BUFFER_SIZE + * + * This command provides the size, alignment of all context buffers including global and + * local context buffers which has been created & will be mapped on a context + * + * hChannel [IN] + * This parameter specifies the channel or channel group (TSG) handle + * totalBufferSize [OUT] + * This parameter returns the total context buffers size. + */ +#define NV2080_CTRL_CMD_GR_GET_CTX_BUFFER_SIZE (0x20801218U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_CTX_BUFFER_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_CTX_BUFFER_SIZE_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV2080_CTRL_GR_GET_CTX_BUFFER_SIZE_PARAMS { + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 totalBufferSize, 8); +} NV2080_CTRL_GR_GET_CTX_BUFFER_SIZE_PARAMS; + +/* + * NV2080_CTRL_GR_CTX_BUFFER_INFO + * alignment + * Specifies the alignment requirement for each context buffer + * size + * Aligned size of context buffer + * bufferHandle [deprecated] + * Opaque pointer to memdesc. Used by kernel clients for tracking purpose only. + * pageCount + * allocation size in the form of pageCount + * physAddr + * Physical address of the buffer first page + * bufferType + * NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID type of this buffer + * aperture + * allocation aperture. Could be SYSMEM, VIDMEM, UNKNOWN + * kind + * PTE kind of this allocation. + * pageSize + * Page size of the buffer. + * bIsContigous + * States if physical allocation for this buffer is contiguous. PageSize will + * have no meaning if this flag is set. + * bGlobalBuffer + * States if a defined buffer is global as global buffers need to be mapped + * only once in TSG. + * bLocalBuffer + * States if a buffer is local to a channel. + * bDeviceDescendant + * TRUE if the allocation is a constructed under a Device or Subdevice. + * uuid + * SHA1 UUID of the Device or Subdevice. Valid when deviceDescendant is TRUE. + */ +typedef struct NV2080_CTRL_GR_CTX_BUFFER_INFO { + NV_DECLARE_ALIGNED(NvU64 alignment, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NV_DECLARE_ALIGNED(NvP64 bufferHandle, 8); + NV_DECLARE_ALIGNED(NvU64 pageCount, 8); + NV_DECLARE_ALIGNED(NvU64 physAddr, 8); + NvU32 bufferType; + NvU32 aperture; + NvU32 kind; + NvU32 pageSize; + NvBool bIsContigous; + NvBool bGlobalBuffer; + NvBool bLocalBuffer; + NvBool bDeviceDescendant; + NvU8 uuid[16]; +} NV2080_CTRL_GR_CTX_BUFFER_INFO; +typedef struct NV2080_CTRL_GR_CTX_BUFFER_INFO *PNV2080_CTRL_GR_CTX_BUFFER_INFO; + +#define NV2080_CTRL_GR_MAX_CTX_BUFFER_COUNT 64U + +/* + * NV2080_CTRL_CMD_GR_GET_CTX_BUFFER_INFO + * + * This command provides the size, alignment of all context buffers including global and + * local context buffers which has been created & will be mapped on a context. + * If the client invoking the command is a kernel client, the buffers are retained. + * + * hUserClient [IN] + * This parameter specifies the client handle that owns this channel. + * hChannel [IN] + * This parameter specifies the channel or channel group (TSG) handle + * bufferCount [OUT] + * This parameter specifies the number of entries in ctxBufferInfo filled + * by the command. + * ctxBufferInfo [OUT] + * Array of context buffer info containing alignment, size etc. + */ +#define NV2080_CTRL_CMD_GR_GET_CTX_BUFFER_INFO (0x20801219U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_CTX_BUFFER_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_CTX_BUFFER_INFO_PARAMS_MESSAGE_ID (0x19U) + +typedef struct NV2080_CTRL_GR_GET_CTX_BUFFER_INFO_PARAMS { + NvHandle hUserClient; + NvHandle hChannel; + NvU32 bufferCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_CTX_BUFFER_INFO ctxBufferInfo[NV2080_CTRL_GR_MAX_CTX_BUFFER_COUNT], 8); +} NV2080_CTRL_GR_GET_CTX_BUFFER_INFO_PARAMS; + +// Aperture flags. The defines should match the defines in mem_desc.h +#define NV2080_CTRL_GR_CTX_BUFFER_INFO_APERTURE_UNKNOWN 0 +#define NV2080_CTRL_GR_CTX_BUFFER_INFO_APERTURE_SYSMEM 1 +#define NV2080_CTRL_GR_CTX_BUFFER_INFO_APERTURE_FBMEM 2 + +/* + * NV2080_CTRL_CMD_GR_GET_GLOBAL_SM_ORDER + * This command returns the global logical ordering of SM w.r.t GPCs/TPCs. + * + * NV2080_CTRL_GR_GET_GLOBAL_SM_ORDER_PARAMS + * This structure holds the TPC/SM ordering info. + * + * gpcId + * Logical GPC Id. + * This is the ordering of enabled GPCs post floor sweeping. + * The GPCs are numbered from 0 to N-1, where N is the enabled GPC count. + * + * localTpcId + * Local Logical TPC Id. + * This is the ordering of enabled TPCs within a GPC post floor sweeping. + * This ID is used in conjunction with the gpcId. + * The TPCs are numbered from 0 to N-1, where N is the enabled TPC count for the given GPC. + * + * localSmId + * Local Logical SM Id. + * This is the ordering of enabled SMs within a TPC post floor sweeping. + * This ID is used in conjunction with the localTpcId. + * The SMs are numbered from 0 to N-1, where N is the enabled SM count for the given TPC. + * + * globalTpcId + * Global Logical TPC Id. + * This is the ordering of all enabled TPCs in the GPU post floor sweeping. + * The TPCs are numbered from 0 to N-1, where N is the enabled TPC count across all GPCs + * + * globalSmId + * Global Logical SM Id array. + * This is the global ordering of all enabled SMs in the GPU post floor sweeping. + * The SMs are numbered from 0 to N-1, where N is the enabled SM count across all GPCs. + * + * virtualGpcId + * Virtual GPC Id. + * This is the ordering of enabled GPCs post floor sweeping (ordered in increasing + * number of TPC counts) The GPCs are numbered from 0 to N-1, where N is the + * enabled GPC count and 8-23 for singleton TPC holders. + * + * migratableTpcId + * Migratable TPC Id. + * This is the same as the Local Tpc Id for virtual GPC 0-8 (true physical gpcs) and 0 for + * virtual gpcs 8-23 that represent singleton tpcs. + * + * numSm + * Enabled SM count across all GPCs. + * This represent the valid entries in the globalSmId array + * + * numTpc + * Enabled TPC count across all GPCs. + * + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * + * ugpuId + * Specifies the uGPU ID on Hopper+. + * + */ +#define NV2080_CTRL_CMD_GR_GET_GLOBAL_SM_ORDER (0x2080121bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_GLOBAL_SM_ORDER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_GR_GET_GLOBAL_SM_ORDER_MAX_SM_COUNT 512U + +#define NV2080_CTRL_GR_DISABLED_SM_VGPC_ID 0xFFU + +#define NV2080_CTRL_GR_GET_GLOBAL_SM_ORDER_PARAMS_MESSAGE_ID (0x1BU) + +typedef struct NV2080_CTRL_GR_GET_GLOBAL_SM_ORDER_PARAMS { + struct { + NvU16 gpcId; + NvU16 localTpcId; + NvU16 localSmId; + NvU16 globalTpcId; + NvU16 virtualGpcId; + NvU16 migratableTpcId; + NvU16 ugpuId; + NvU16 physicalCpcId; + NvU16 virtualTpcId; + } globalSmId[NV2080_CTRL_CMD_GR_GET_GLOBAL_SM_ORDER_MAX_SM_COUNT]; + + NvU16 numSm; + NvU16 numTpc; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_GET_GLOBAL_SM_ORDER_PARAMS; + +/* +* NV2080_CTRL_CMD_GR_GET_CURRENT_RESIDENT_CHANNEL +* +* This command gives current resident channel on GR engine +* +* chID [OUT] +* RM returns current resident channel on GR engine +* grRouteInfo [IN] +* This parameter specifies the routing information used to +* disambiguate the target GR engine. +*/ +#define NV2080_CTRL_CMD_GR_GET_CURRENT_RESIDENT_CHANNEL (0x2080121cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_CURRENT_RESIDENT_CHANNEL_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_CURRENT_RESIDENT_CHANNEL_PARAMS_MESSAGE_ID (0x1CU) + +typedef struct NV2080_CTRL_GR_GET_CURRENT_RESIDENT_CHANNEL_PARAMS { + NvU32 chID; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_GET_CURRENT_RESIDENT_CHANNEL_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_VAT_ALARM_DATA + * + * This command provides the _VAT_ALARM data i.e. error and warning, counter and + * timestamps along with max GPC and TPC per GPC count. + * + * smVatAlarm [OUT] + * VAT Alarm data array per SM containing per GPC per TPC, counter and + * timestamp values for error and warning alarms. + * maxGpcCount [OUT] + * This parameter returns max GPC count. + * maxTpcPerGpcCount [OUT] + * This parameter returns the max TPC per GPC count. + */ +#define NV2080_CTRL_CMD_GR_GET_VAT_ALARM_MAX_GPC_COUNT 10U +#define NV2080_CTRL_CMD_GR_GET_VAT_ALARM_MAX_TPC_PER_GPC_COUNT 10U + +#define NV2080_CTRL_CMD_GR_GET_VAT_ALARM_DATA (0x2080121dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_VAT_ALARM_DATA_PARAMS_MESSAGE_ID" */ + +typedef struct NV2080_CTRL_GR_VAT_ALARM_DATA_PER_TPC { + NV_DECLARE_ALIGNED(NvU64 errorCounter, 8); + NV_DECLARE_ALIGNED(NvU64 errorTimestamp, 8); + NV_DECLARE_ALIGNED(NvU64 warningCounter, 8); + NV_DECLARE_ALIGNED(NvU64 warningTimestamp, 8); +} NV2080_CTRL_GR_VAT_ALARM_DATA_PER_TPC; + +typedef struct NV2080_CTRL_GR_VAT_ALARM_DATA_PER_GPC { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_VAT_ALARM_DATA_PER_TPC tpc[NV2080_CTRL_CMD_GR_GET_VAT_ALARM_MAX_TPC_PER_GPC_COUNT], 8); +} NV2080_CTRL_GR_VAT_ALARM_DATA_PER_GPC; + +typedef struct NV2080_CTRL_GR_VAT_ALARM_DATA { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_VAT_ALARM_DATA_PER_GPC gpc[NV2080_CTRL_CMD_GR_GET_VAT_ALARM_MAX_GPC_COUNT], 8); +} NV2080_CTRL_GR_VAT_ALARM_DATA; + +#define NV2080_CTRL_GR_GET_VAT_ALARM_DATA_PARAMS_MESSAGE_ID (0x1DU) + +typedef struct NV2080_CTRL_GR_GET_VAT_ALARM_DATA_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_VAT_ALARM_DATA smVatAlarm, 8); + NvU32 maxGpcCount; + NvU32 maxTpcPerGpcCount; +} NV2080_CTRL_GR_GET_VAT_ALARM_DATA_PARAMS; +typedef struct NV2080_CTRL_GR_GET_VAT_ALARM_DATA_PARAMS *PNV2080_CTRL_GR_GET_VAT_ALARM_DATA_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_ATTRIBUTE_BUFFER_SIZE + * + * This command provides the size of GR attribute buffer. + * + * attribBufferSize [OUT] + * This parameter returns the attribute buffer size. + */ +#define NV2080_CTRL_CMD_GR_GET_ATTRIBUTE_BUFFER_SIZE (0x2080121eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_ATTRIBUTE_BUFFER_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_ATTRIBUTE_BUFFER_SIZE_PARAMS_MESSAGE_ID (0x1EU) + +typedef struct NV2080_CTRL_GR_GET_ATTRIBUTE_BUFFER_SIZE_PARAMS { + NvU32 attribBufferSize; +} NV2080_CTRL_GR_GET_ATTRIBUTE_BUFFER_SIZE_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GFX_POOL_QUERY_SIZE + * + * This API queries size parameters for a request maximum graphics preemption + * pool size. It is only available to kernel callers + * + * NV2080_CTRL_GR_GFX_POOL_QUERY_SIZE_PARAMS + * struct to return the size parameters + * + * maxSlots + * Input specifying the maximum number of slots, RM will calculate the output + * parameters based on this. Must be non-zero + * ctrlStructSize + * Output indicating the required size in bytes of the control structure to + * support a pool of maxSlots size. + * ctrlStructAlign + * Output indicating the required alignment of the control structure + * poolSize + * Output indicating the required size in bytes of the GfxP Pool. + * poolAlign + * Output indicating the required alignment of the GfxP Pool + * slotStride + * The number of bytes in each slot, i * slotStride gives the offset from the + * base of the pool to a given slot + */ +#define NV2080_CTRL_CMD_GR_GFX_POOL_QUERY_SIZE (0x2080121fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GFX_POOL_QUERY_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GFX_POOL_QUERY_SIZE_PARAMS_MESSAGE_ID (0x1FU) + +typedef struct NV2080_CTRL_GR_GFX_POOL_QUERY_SIZE_PARAMS { + NvU32 maxSlots; + NvU32 slotStride; + NV_DECLARE_ALIGNED(NvU64 ctrlStructSize, 8); + NV_DECLARE_ALIGNED(NvU64 ctrlStructAlign, 8); + NV_DECLARE_ALIGNED(NvU64 poolSize, 8); + NV_DECLARE_ALIGNED(NvU64 poolAlign, 8); +} NV2080_CTRL_GR_GFX_POOL_QUERY_SIZE_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GFX_POOL_INITIALIZE + * + * This API takes a CPU pointer to a GFxP Pool Control Structure and does the + * required onetime initialization. It should be called once and only once + * before a pool is used. It is only accessible to kernel callers. + * + * NV2080_CTRL_GR_GFX_POOL_INITIALIZE_PARAMS + * struct to hand in the required info to RM + * + * maxSlots + * Max pool slots + * hMemory + * Handle to GFX Pool memory + * offset + * Offset of the control structure in GFX Pool memory + * size + * Size of the control structure + */ +#define NV2080_CTRL_CMD_GR_GFX_POOL_INITIALIZE (0x20801220U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GFX_POOL_INITIALIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GFX_POOL_INITIALIZE_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV2080_CTRL_GR_GFX_POOL_INITIALIZE_PARAMS { + NvU32 maxSlots; + NvHandle hMemory; + NvU32 offset; + NvU32 size; +} NV2080_CTRL_GR_GFX_POOL_INITIALIZE_PARAMS; + +#define NV2080_CTRL_GR_GFX_POOL_MAX_SLOTS 64U + +/* + * NV2080_CTRL_CMD_GR_GFX_POOL_ADD_SLOTS + * + * This API adds a list of buffer slots to a given control structure. It can + * only be called when no channel using the given pool is running or may become + * running for the duration of this call. If more slots are added than there + * is room for in the control structure the behavior is undefined. It is only + * accessible to kernel callers. + * + * NV2080_CTRL_GR_GFX_POOL_ADD_SLOTS_PARAMS + * + * numSlots + * This input indicates how many slots are being added and are contained in the slots parameter + * slots + * This input contains an array of the slots to be added to the control structure + * hMemory + * Handle to GFX Pool memory + * offset + * Offset of the control structure in GFX Pool memory + * size + * Size of the control structure + */ +#define NV2080_CTRL_CMD_GR_GFX_POOL_ADD_SLOTS (0x20801221U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GFX_POOL_ADD_SLOTS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GFX_POOL_ADD_SLOTS_PARAMS_MESSAGE_ID (0x21U) + +typedef struct NV2080_CTRL_GR_GFX_POOL_ADD_SLOTS_PARAMS { + NvU32 numSlots; + NvU32 slots[NV2080_CTRL_GR_GFX_POOL_MAX_SLOTS]; + NvHandle hMemory; + NvU32 offset; + NvU32 size; +} NV2080_CTRL_GR_GFX_POOL_ADD_SLOTS_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GFX_POOL_REMOVE_SLOTS + * + * This API removes buffer slots from a given control structure. It can + * only be called when no channel using the given pool is running or may become + * running for the duration of this call. It can operate in two modes, either + * it will a specified number of slots, or a specified list of slots. + * + * It is only accessible to kernel callers. + * + * NV2080_CTRL_CMD_GR_GFX_POOL_REMOVE_SLOTS_PARAMS + * + * numSlots + * This input indicates how many slots are being removed. if + * bRemoveSpecificSlots is true, then it also indicates how many entries in + * the slots array are populated. + * slots + * This array is either an input or output. If bRemoveSpecificSlots is true, + * then this will contain the list of slots to remove. If it is false, then + * it will be populated by RM with the indexes of the slots that were + * removed. + * bRemoveSpecificSlots + * This input determines which mode the call will run in. If true the caller + * will specify the list of slots they want removed, if any of those slots + * are not on the freelist, the call will fail. If false they only specify + * the number of slots they want removed and RM will pick up to that + * many. If there are not enough slots on the freelist to remove the + * requested amount, RM will return the number it was able to remove. + * hMemory + * Handle to GFX Pool memory + * offset + * Offset of the control structure in GFX Pool memory + * size + * Size of the control structure + */ +#define NV2080_CTRL_CMD_GR_GFX_POOL_REMOVE_SLOTS (0x20801222U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GFX_POOL_REMOVE_SLOTS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GFX_POOL_REMOVE_SLOTS_PARAMS_MESSAGE_ID (0x22U) + +typedef struct NV2080_CTRL_GR_GFX_POOL_REMOVE_SLOTS_PARAMS { + NvU32 numSlots; + NvU32 slots[NV2080_CTRL_GR_GFX_POOL_MAX_SLOTS]; + NvBool bRemoveSpecificSlots; + NvHandle hMemory; + NvU32 offset; + NvU32 size; +} NV2080_CTRL_GR_GFX_POOL_REMOVE_SLOTS_PARAMS; + + + +#define NV2080_CTRL_CMD_GR_GET_CAPS_V2 (0x20801227U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x27U) + +typedef NV0080_CTRL_GR_GET_CAPS_V2_PARAMS NV2080_CTRL_GR_GET_CAPS_V2_PARAMS; + +#define NV2080_CTRL_CMD_GR_GET_INFO_V2 (0x20801228U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_INFO_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_INFO_V2_PARAMS_MESSAGE_ID (0x28U) + +typedef NV0080_CTRL_GR_GET_INFO_V2_PARAMS NV2080_CTRL_GR_GET_INFO_V2_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GR_GET_GPC_MASK + * + * This command returns a mask of enabled GPCs for the associated subdevice. + * + * grRouteInfo[IN] + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * + * gpcMask[OUT] + * This parameter returns a mask of enabled GPCs. Each GPC has an ID + * that's equivalent to the corresponding bit position in the mask. + */ +#define NV2080_CTRL_CMD_GR_GET_GPC_MASK (0x2080122aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_GPC_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_GPC_MASK_PARAMS_MESSAGE_ID (0x2AU) + +typedef struct NV2080_CTRL_GR_GET_GPC_MASK_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvU32 gpcMask; +} NV2080_CTRL_GR_GET_GPC_MASK_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_TPC_MASK + * + * This command returns a mask of enabled TPCs for a specified GPC. + * + * grRouteInfo[IN] + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * + * gpcId[IN] + * This parameter specifies the GPC for which TPC information is + * to be retrieved. If the GPC with this ID is not enabled this command + * will return an tpcMask value of zero. + * + * tpcMask[OUT] + * This parameter returns a mask of enabled TPCs for the specified GPC. + * Each TPC has an ID that's equivalent to the corresponding bit + * position in the mask. + */ +#define NV2080_CTRL_CMD_GR_GET_TPC_MASK (0x2080122bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_TPC_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_TPC_MASK_PARAMS_MESSAGE_ID (0x2BU) + +typedef struct NV2080_CTRL_GR_GET_TPC_MASK_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvU32 gpcId; + NvU32 tpcMask; +} NV2080_CTRL_GR_GET_TPC_MASK_PARAMS; + +#define NV2080_CTRL_CMD_GR_SET_TPC_PARTITION_MODE (0x2080122cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_SET_TPC_PARTITION_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_SET_TPC_PARTITION_MODE_PARAMS_MESSAGE_ID (0x2CU) + +typedef NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS NV2080_CTRL_GR_SET_TPC_PARTITION_MODE_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_ENGINE_CONTEXT_PROPERTIES + * + * This command is used to provide the caller with the alignment and size + * of the context save region for an engine + * + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * engineId + * This parameter is an input parameter specifying the engineId for which + * the alignment/size is requested. + * alignment + * This parameter is an output parameter which will be filled in with the + * minimum alignment requirement. + * size + * This parameter is an output parameter which will be filled in with the + * minimum size of the context save region for the engine. + * bInfoPopulated + * This parameter will be set if alignment and size are already set with + * valid values from a previous call. + */ + +#define NV2080_CTRL_CMD_GR_GET_ENGINE_CONTEXT_PROPERTIES (0x2080122dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS_MESSAGE_ID (0x2DU) + +typedef struct NV2080_CTRL_GR_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvU32 engineId; + NvU32 alignment; + NvU32 size; + NvBool bInfoPopulated; +} NV2080_CTRL_GR_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GR_GET_SM_ISSUE_RATE_MODIFIER + * + * This command provides an interface to retrieve the speed select values of + * various instruction types. + * + * grRouteInfo[IN] + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * + * imla0[OUT] + * The current speed select for IMLA0. + * + * fmla16[OUT] + * The current speed select for FMLA16. + * + * dp[OUT] + * The current speed select for DP. + * + * fmla32[OUT] + * The current speed select for FMLA32. + * + * ffma[OUT] + * The current speed select for FFMA. + * + * imla1[OUT] + * The current speed select for IMLA1. + * + * imla2[OUT] + * The current speed select for IMLA2. + * + * imla3[OUT] + * The current speed select for IMLA3. + * + * imla4[OUT] + * The current speed select for IMLA4. + */ +#define NV2080_CTRL_CMD_GR_GET_SM_ISSUE_RATE_MODIFIER (0x20801230U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA0_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA0_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA0_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA0_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA0_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA0_REDUCED_SPEED_1_32 (0x5U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA0_REDUCED_SPEED_1_64 (0x6U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA16_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA16_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA16_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA16_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA16_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA16_REDUCED_SPEED_1_32 (0x5U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_DP_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_DP_REDUCED_SPEED (0x1U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA32_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA32_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA32_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA32_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA32_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA32_REDUCED_SPEED_1_32 (0x5U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FFMA_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FFMA_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FFMA_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FFMA_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FFMA_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FFMA_REDUCED_SPEED_1_32 (0x5U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA1_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA1_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA1_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA1_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA1_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA1_REDUCED_SPEED_1_32 (0x5U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA1_REDUCED_SPEED_1_64 (0x6U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA2_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA2_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA2_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA2_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA2_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA2_REDUCED_SPEED_1_32 (0x5U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA2_REDUCED_SPEED_1_64 (0x6U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA3_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA3_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA3_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA3_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA3_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA3_REDUCED_SPEED_1_32 (0x5U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA3_REDUCED_SPEED_1_64 (0x6U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA4_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA4_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA4_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA4_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA4_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA4_REDUCED_SPEED_1_32 (0x5U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA4_REDUCED_SPEED_1_64 (0x6U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS_MESSAGE_ID (0x30U) + +typedef struct NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvU8 imla0; + NvU8 fmla16; + NvU8 dp; + NvU8 fmla32; + NvU8 ffma; + NvU8 imla1; + NvU8 imla2; + NvU8 imla3; + NvU8 imla4; +} NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS; + +#define NV2080_CTRL_GR_SM_ISSUE_RATE_MODIFIER_V2_MAX_LIST_SIZE (0xFFU) +#define NV2080_CTRL_GR_SM_ISSUE_RATE_MODIFIER_V2_FMLA16 (0x0U) +#define NV2080_CTRL_GR_SM_ISSUE_RATE_MODIFIER_V2_DP (0x1U) +#define NV2080_CTRL_GR_SM_ISSUE_RATE_MODIFIER_V2_FMLA32 (0x2U) +#define NV2080_CTRL_GR_SM_ISSUE_RATE_MODIFIER_V2_FFMA (0x3U) +#define NV2080_CTRL_GR_SM_ISSUE_RATE_MODIFIER_V2_IMLA0 (0x4U) +#define NV2080_CTRL_GR_SM_ISSUE_RATE_MODIFIER_V2_IMLA1 (0x5U) +#define NV2080_CTRL_GR_SM_ISSUE_RATE_MODIFIER_V2_IMLA2 (0x6U) +#define NV2080_CTRL_GR_SM_ISSUE_RATE_MODIFIER_V2_IMLA3 (0x7U) +#define NV2080_CTRL_GR_SM_ISSUE_RATE_MODIFIER_V2_IMLA4 (0x8U) +#define NV2080_CTRL_GR_SM_ISSUE_RATE_MODIFIER_V2_FP16 (0x9U) +#define NV2080_CTRL_GR_SM_ISSUE_RATE_MODIFIER_V2_FP32 (0xAU) +#define NV2080_CTRL_GR_SM_ISSUE_RATE_MODIFIER_V2_DFMA (0xBU) +#define NV2080_CTRL_GR_SM_ISSUE_RATE_MODIFIER_V2_DMLA (0xCU) + +/* + * NV2080_CTRL_CMD_GR_GET_SM_ISSUE_RATE_MODIFIER_V2 + * + * This command provides an interface to retrieve the speed select values of + * various instruction types. + * + * smIssueRateModifierListSize + * This field specifies the number of entries on the caller's + * smIssueRateModifierList. + * When caller passes smIssueRateModifierListSize = 0, all fuse + * values are returned. + * smIssueRateModifierList + * This field specifies a pointer in the caller's address space + * to the buffer into which the speed select values are to be returned. + */ +#define NV2080_CTRL_CMD_GR_GET_SM_ISSUE_RATE_MODIFIER_V2 (0x2080123cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_V2_PARAMS_MESSAGE_ID" */ + +typedef NVXXXX_CTRL_XXX_INFO NV2080_CTRL_GR_SM_ISSUE_RATE_MODIFIER_V2; + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_V2_PARAMS_MESSAGE_ID (0x3CU) + +typedef struct NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_V2_PARAMS { + NvU32 smIssueRateModifierListSize; + NV2080_CTRL_GR_SM_ISSUE_RATE_MODIFIER_V2 smIssueRateModifierList[NV2080_CTRL_GR_SM_ISSUE_RATE_MODIFIER_V2_MAX_LIST_SIZE]; +} NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_V2_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_FECS_BIND_EVTBUF_FOR_UID + * + * *DEPRECATED* Use NV2080_CTRL_CMD_GR_FECS_BIND_EVTBUF_FOR_UID_V2 instead + * + * This command is used to create a FECS bind-point to an event buffer that + * is filtered by UID. + * + * hEventBuffer[IN] + * The event buffer to bind to + * + * recordSize[IN] + * The size of the FECS record in bytes + * + * levelOfDetail[IN] + * One of NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD_: + * FULL: Report all CtxSw events + * SIMPLE: Report ACTIVE_REGION_START and ACTIVE_REGION_END only + * COMPAT: Events that KMD is interested in (for backwards compatibility) + * CUSTOM: Report events in the eventFilter field + * NOTE: RM may override the level-of-detail depending on the caller + * + * eventFilter[IN] + * Bitmask of events to report if levelOfDetail is CUSTOM + * + * bAllUsers[IN] + * Only report FECS CtxSw data for the current user if false, for all users if true + */ + +#define NV2080_CTRL_CMD_GR_FECS_BIND_EVTBUF_FOR_UID (0x20801231U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_PARAMS_MESSAGE_ID" */ + +typedef enum NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD { + NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD_FULL = 0, + NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD_SIMPLE = 1, + NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD_COMPAT = 2, + NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD_CUSTOM = 3, +} NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD; + +#define NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_PARAMS_MESSAGE_ID (0x31U) + +typedef struct NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_PARAMS { + NvHandle hEventBuffer; + NvU32 recordSize; + NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD levelOfDetail; + NvU32 eventFilter; + NvBool bAllUsers; +} NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_PHYS_GPC_MASK + * + * This command returns a mask of physical GPC Ids for the associated syspipe + * + * physSyspipeId[IN] + * This parameter specifies syspipe for which phys GPC mask is requested + * + * gpcMask[OUT] + * This parameter returns a mask of mapped GPCs to provided syspipe. + * Each GPC-ID has a corresponding bit position in the mask. + */ +#define NV2080_CTRL_CMD_GR_GET_PHYS_GPC_MASK (0x20801232U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_PHYS_GPC_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_PHYS_GPC_MASK_PARAMS_MESSAGE_ID (0x32U) + +typedef struct NV2080_CTRL_GR_GET_PHYS_GPC_MASK_PARAMS { + NvU32 physSyspipeId; + NvU32 gpcMask; +} NV2080_CTRL_GR_GET_PHYS_GPC_MASK_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_PPC_MASK + * + * This command returns a mask of enabled PPCs for a specified GPC. + * + * grRouteInfo[IN] + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * + * gpcId[IN] + * This parameter specifies the GPC for which TPC information is + * to be retrieved. If the GPC with this ID is not enabled this command + * will return an ppcMask value of zero. + * + * ppcMask[OUT] + * This parameter returns a mask of enabled PPCs for the specified GPC. + * Each PPC has an ID that's equivalent to the corresponding bit + * position in the mask. + */ +#define NV2080_CTRL_CMD_GR_GET_PPC_MASK (0x20801233U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_PPC_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_PPC_MASK_PARAMS_MESSAGE_ID (0x33U) + +typedef struct NV2080_CTRL_GR_GET_PPC_MASK_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvU32 gpcId; + NvU32 ppcMask; +} NV2080_CTRL_GR_GET_PPC_MASK_PARAMS; + +#define NV2080_CTRL_CMD_GR_GET_NUM_TPCS_FOR_GPC (0x20801234U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_NUM_TPCS_FOR_GPC_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_NUM_TPCS_FOR_GPC_PARAMS_MESSAGE_ID (0x34U) + +typedef struct NV2080_CTRL_GR_GET_NUM_TPCS_FOR_GPC_PARAMS { + NvU32 gpcId; + NvU32 numTpcs; +} NV2080_CTRL_GR_GET_NUM_TPCS_FOR_GPC_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_CTXSW_MODES + * + * This command is used to get context switch modes for the specified + * channel. A value of NV_ERR_NOT_SUPPORTED is returned if the + * target channel does not support context switch mode changes. + * + * hChannel + * This parameter specifies the channel handle of + * the channel that is to have its context switch modes retrieved. + * zcullMode + * See NV2080_CTRL_CMD_GR_CTXSW_ZCULL_MODE for possible return values + * pmMode + * See NV2080_CTRL_CMD_GR_CTXSW_PM_MODE for possible return values + * smpcMode + * See NV2080_CTRL_CMD_GR_CTXSW_SMPC_MODE for possible return values + * cilpPreemptMode + * See NV2080_CTRL_CMD_GR_SET_CTXSW_PREEMPTION_MODE for possible return values + * gfxpPreemptMode + * See NV2080_CTRL_CMD_GR_SET_CTXSW_PREEMPTION_MODE for possible return values + */ +#define NV2080_CTRL_CMD_GR_GET_CTXSW_MODES (0x20801235U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_CTXSW_MODES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_CTXSW_MODES_PARAMS_MESSAGE_ID (0x35U) + +typedef struct NV2080_CTRL_GR_GET_CTXSW_MODES_PARAMS { + NvHandle hChannel; + NvU32 zcullMode; + NvU32 pmMode; + NvU32 smpcMode; + NvU32 cilpPreemptMode; + NvU32 gfxpPreemptMode; +} NV2080_CTRL_GR_GET_CTXSW_MODES_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_GPC_TILE_MAP + * + * Get a list of values used to describe GPC/TPC tile mapping tables. + * + * mapValueCount + * This field specifies the number of actual map entries. This count + * should equal the number of TPCs in the system. + * mapValues + * This field is a pointer to a buffer of NvU08 values representing map + * data. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_GET_GPC_TILE_MAP (0x20801236U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_GPC_TILE_MAP_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_GPC_TILE_MAP_PARAMS_MESSAGE_ID (0x36U) + +typedef NV2080_CTRL_GR_SET_GPC_TILE_MAP_PARAMS NV2080_CTRL_GR_GET_GPC_TILE_MAP_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_ZCULL_MASK + * + * This command returns a mask of enabled ZCULLs for a specified GPC. + * + * gpcId[IN] + * This parameter, physical GPC index, specifies the GPC for which ZCULL + * information is to be retrieved. If the GPC with this ID is not enabled + * this command will return a zcullMask value of zero. + * + * zcullMask[OUT] + * This parameter returns a mask of enabled ZCULLs for the specified GPC. + * Each ZCULL has an ID that's equivalent to the corresponding bit + * position in the mask. + */ + +#define NV2080_CTRL_CMD_GR_GET_ZCULL_MASK (0x20801237U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_ZCULL_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_ZCULL_MASK_PARAMS_MESSAGE_ID (0x37U) + +typedef struct NV2080_CTRL_GR_GET_ZCULL_MASK_PARAMS { + NvU32 gpcId; + NvU32 zcullMask; +} NV2080_CTRL_GR_GET_ZCULL_MASK_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_FECS_BIND_EVTBUF_FOR_UID_V2 + * + * This command is used to create a FECS bind-point to an event buffer that + * is filtered by UID. + * + * hEventBuffer[IN] + * The event buffer to bind to + * + * recordSize[IN] + * The size of the FECS record in bytes + * + * levelOfDetail[IN] + * One of NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD_: + * FULL: Report all CtxSw events + * SIMPLE: Report ACTIVE_REGION_START and ACTIVE_REGION_END only + * COMPAT: Events that KMD is interested in (for backwards compatibility) + * CUSTOM: Report events in the eventFilter field + * NOTE: RM may override the level-of-detail depending on the caller + * + * eventFilter[IN] + * Bitmask of events to report if levelOfDetail is CUSTOM + * + * bAllUsers[IN] + * Only report FECS CtxSw data for the current user if false, for all users if true + * + * reasonCode [OUT] + * Reason for failure + */ +#define NV2080_CTRL_CMD_GR_FECS_BIND_EVTBUF_FOR_UID_V2 (0x20801238U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_V2_PARAMS_MESSAGE_ID" */ + +typedef enum NV2080_CTRL_GR_FECS_BIND_EVTBUF_REASON_CODE { + NV2080_CTRL_GR_FECS_BIND_REASON_CODE_NONE = 0, + NV2080_CTRL_GR_FECS_BIND_REASON_CODE_GPU_TOO_OLD = 1, + NV2080_CTRL_GR_FECS_BIND_REASON_CODE_NOT_ENABLED_GPU = 2, + NV2080_CTRL_GR_FECS_BIND_REASON_CODE_NOT_ENABLED = 3, + NV2080_CTRL_GR_FECS_BIND_REASON_CODE_NEED_ADMIN = 4, + NV2080_CTRL_GR_FECS_BIND_REASON_CODE_NEED_CAPABILITY = 5, +} NV2080_CTRL_GR_FECS_BIND_EVTBUF_REASON_CODE; + +#define NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_V2_PARAMS_MESSAGE_ID (0x38U) + +typedef struct NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_V2_PARAMS { + NvHandle hEventBuffer; + NvU32 recordSize; + NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD levelOfDetail; + NvU32 eventFilter; + NvBool bAllUsers; + NvU32 reasonCode; +} NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_V2_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_GFX_GPC_AND_TPC_INFO + * + * This command grabs information on GFX capable GPC's and TPC's for a specifc GR engine + * + * grRouteInfo[IN] + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * + * physGfxGpcMask [OUT] + * Physical mask of Gfx capable GPC's + * + * numGfxTpc [OUT] + * Total number of Gfx capable TPC's + */ +#define NV2080_CTRL_CMD_GR_GET_GFX_GPC_AND_TPC_INFO (0x20801239U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_GFX_GPC_AND_TPC_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_GFX_GPC_AND_TPC_INFO_PARAMS_MESSAGE_ID (0x39U) + +typedef struct NV2080_CTRL_GR_GET_GFX_GPC_AND_TPC_INFO_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvU32 physGfxGpcMask; + NvU32 numGfxTpc; +} NV2080_CTRL_GR_GET_GFX_GPC_AND_TPC_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_TPC_RECONFIG_MASK + * + * This command returns the TPC reconfig mask for a specific GPC + * + * gpc[IN] + * The GPC for which the TPC reconfig mask needs to be queried. + * The GPC should be specified as a logical index. + * + * tpcReconfigMask[OUT] + * Mask of reconfigurable TPCs in the specified GPC + * + * grRouteInfo[IN] + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_GET_TPC_RECONFIG_MASK (0x2080123bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_TPC_RECONFIG_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_TPC_RECONFIG_MASK_PARAMS_MESSAGE_ID (0x3bU) + +typedef struct NV2080_CTRL_GR_GET_TPC_RECONFIG_MASK_PARAMS { + NvU32 gpc; + NvU32 tpcReconfigMask; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_GET_TPC_RECONFIG_MASK_PARAMS; + +/* _ctrl2080gr_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h new file mode 100644 index 0000000..8d41d3a --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h @@ -0,0 +1,299 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080grmgr.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX grmgr control commands and parameters */ + +// +// NV2080_CTRL_CMD_GRMGR_GET_GR_FS_INFO +// +// This control call works as a batched query interface where we +// have multiple different queries that can be passed in +// and RM will return the associated data and status type +// If there is any error in NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS, +// we will immediately fail the call. +// However, if there is an error in the query-specific calls, we will +// log the error and march on. +// +// NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS +// numQueries[in] +// - Specifies the number of valid queries that the caller will be passing in +// +// Possible status values returned are: +// NV_OK +// NV_ERR_INVALID_ARGUMENT +// NV_ERR_INVALID_STATE +// +#define NV2080_CTRL_CMD_GRMGR_GET_GR_FS_INFO (0x20803801) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GRMGR_INTERFACE_ID << 8) | NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS_MESSAGE_ID" */ + +// Max number of queries that can be batched in a single call to NV2080_CTRL_CMD_GRMGR_GET_GR_FS_INFO +#define NV2080_CTRL_GRMGR_GR_FS_INFO_MAX_QUERIES 96 + +// +// Preference is to keep max.size of union at 24 bytes (i.e. 6 32-bit members) +// so that the size of entire query struct is maintained at 32 bytes, to ensure +// that overall params struct does not exceed 4kB +// +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_MAX_SIZE 32 +#define NV2080_CTRL_GRMGR_MAX_SMC_IDS 8 + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_GPC_COUNT_PARAMS + * gpcCount[out] + * - No. of logical/local GPCs which client can use to create the + * logical/local mask respectively + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_GPC_COUNT_PARAMS { + NvU32 gpcCount; // param[out] - logical/local GPC mask +} NV2080_CTRL_GRMGR_GR_FS_INFO_GPC_COUNT_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_GPC_MAP_PARAMS + * gpcId[in] + * - Logical/local GPC ID + * chipletGpcMap[out] + * - Returns chiplet GPC ID for legacy case and device monitoring client + * - Returns local GPC ID (== input gpcId) for SMC client + * - Does not support DM attribution case + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_GPC_MAP_PARAMS { + NvU32 gpcId; // param[in] - logical/local GPC ID + NvU32 chipletGpcMap; // param[out] - chiplet GPC ID +} NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_GPC_MAP_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_TPC_MASK_PARAMS + * gpcId[in] + * - Logical/local GPC ID + * tpcMask[out] + * - Returns physical TPC mask for legacy, DM client and SMC cases + * - Does not support DM attribution case + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_TPC_MASK_PARAMS { + NvU32 gpcId; // param[in] - logical/local GPC ID + NvU32 tpcMask; // param[out] - physical TPC mask +} NV2080_CTRL_GRMGR_GR_FS_INFO_TPC_MASK_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_PPC_MASK_PARAMS + * gpcId[in] + * - Logical/local GPC ID + * ppcMask[out] + * - Returns physical PPC mask for legacy, DM client and SMC cases + * - Does not support DM attribution case + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_PPC_MASK_PARAMS { + NvU32 gpcId; // param[in] - logical/local GPC ID + NvU32 ppcMask; // param[out] - physical PPC mask +} NV2080_CTRL_GRMGR_GR_FS_INFO_PPC_MASK_PARAMS; + +/*! + * !!! DEPRECATED - This query will return NV_ERR_NOT_SUPPORTED since deleting + * it would break driver compatibility !!! + * + * NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_GPC_MAP_PARAMS + * swizzId[in] + * - Swizz ID of partition + * - A DM client with an invalid swizz ID, will fail this call + * - This parameter is not compulsory for an SMC client; the subscription + * itself will do the necessary validation. + * gpcId[in] + * - Logical/local GPC ID + * chipletGpcMap[out] + * - Returns chiplet GPC ID for legacy case and device monitoring client + * - Returns local GPC ID (== input gpcId) for SMC client + * - Does not support non-attribution case for DM client + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_GPC_MAP_PARAMS { + NvU32 swizzId; // param[in] - swizz ID of partition + NvU32 gpcId; // param[in] - logical/local GPC ID + NvU32 chipletGpcMap; // param[out] - chiplet GPC ID +} NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_GPC_MAP_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_ROP_MASK_PARAMS + * gpcId[in] + * - Logical/local GPC ID + * ropMask[out] + * - Returns physical ROP mask for legacy, DM client + * - Returns logical ROP mask for SMC + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_ROP_MASK_PARAMS { + NvU32 gpcId; // param[in] - logical/local GPC ID + NvU32 ropMask; // param[out] - physical ROP mask +} NV2080_CTRL_GRMGR_GR_FS_INFO_ROP_MASK_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_SYSPIPE_MASK_PARAMS + * chipletSyspipeMask [out] + * - Mask of chiplet SMC-IDs for DM client attribution case + * - Mask of local SMC-IDs for SMC client + * - Legacy case returns 1 GR + * - Does not support attribution case for DM client + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_SYSPIPE_MASK_PARAMS { + NvU32 chipletSyspipeMask; // param[out] - Mask of chiplet SMC IDs +} NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_SYSPIPE_MASK_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_SYSPIPE_IDS_PARAMS + * swizzId[in] + * - Swizz ID of partition + * - A DM client with an invalid swizz ID, will fail this call + * physSyspipeId[GRMGR_MAX_SMC_IDS] [out] + * - Physical SMC-IDs mapped to partition local idx for DM client attribution case + * - Does not support non-attribution case for DM client, SMC clients, legacy case + * physSyspipeIdCount[out] + * - Valid count of physSmcIds which has been populated in above array. + * - Failure case will return 0 + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_SYSPIPE_IDS_PARAMS { + NvU16 swizzId; // param[in] - swizz ID of partition + NvU16 physSyspipeIdCount; // param[out] - Count of physSmcIds in above array + NvU8 physSyspipeId[NV2080_CTRL_GRMGR_MAX_SMC_IDS]; // param[out] - physical/local SMC IDs +} NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_SYSPIPE_IDS_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_PROFILER_MON_GPC_MASK_PARAMS + * swizzId[in] + * - Swizz ID of partition + * - Mandatory parameter + * - A DM client with an invalid swizz ID, will fail this call + * grIdx[in] + * - Local grIdx for a partition + * - Mandatory parameter + * gpcEnMask[out] + * - Logical enabled GPC mask associated with requested grIdx of the partition i.e swizzid->engineId->gpcMask + * - These Ids should be used as input further + * - Does not support non-attribution case for DM client, SMC clients, legacy case + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_PROFILER_MON_GPC_MASK_PARAMS { + NvU32 swizzId; // param[in] - swizz ID of partition + NvU32 grIdx; // param[in] - partition local GR ID + NvU32 gpcEnMask; // param[out] - logical enabled GPC mask +} NV2080_CTRL_GRMGR_GR_FS_INFO_PROFILER_MON_GPC_MASK_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_SYSPIPE_ID + * syspipeId[out] + * - Partition-local GR idx for client subscribed to exec partition + * - Does not support legacy case, DM client, or SMC client subscribed only to partition + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_SYSPIPE_ID_PARAMS { + NvU32 syspipeId; // param[out] - partition-local Gr idx +} NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_SYSPIPE_ID_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_GRAPHICS_SYSPIPE_MASK_PARAMS + * chipletSyspipeMask [out] + * - Mask of chiplet GFX capable SMC-IDs for DM client attribution case + * - Mask of local GFX capable SMC-IDs for SMC client + * - Legacy case returns GR0 if GFX capable, else 0 + * - Does not support attribution case for DM client + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_GRAPHICS_SYSPIPE_MASK_PARAMS { + NvU32 chipletSyspipeMask; // param[out] - Mask of chiplet SMC IDs +} NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_GRAPHICS_SYSPIPE_MASK_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_GFX_CAPABLE_GPC_MASK_PARAMS + * swizzId[in] + * - Swizz ID of partition + * - Mandatory parameter + * - A DM client with an invalid swizz ID, will fail this call + * grIdx[in] + * - Local grIdx for a partition + * - Mandatory parameter + * gpcEnMask[out] + * - Logical enabled GPC mask associated with requested grIdx of the partition i.e swizzid->engineId->gpcMask + * - These Ids should be used as input further + * - Does not support non-attribution case for DM client, SMC clients, legacy case + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_GFX_CAPABLE_GPC_MASK_PARAMS { + NvU32 swizzId; // param[in] - swizz ID of partition + NvU32 grIdx; // param[in] - partition local GR ID + NvU32 gpcEnMask; // param[out] - logical enabled GPC mask +} NV2080_CTRL_GRMGR_GR_FS_INFO_GFX_CAPABLE_GPC_MASK_PARAMS; + +/*! + * queryType[in] + * - Use queryType defines to specify what information is being requested + * status[out] + * - Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARAMS { + NvU16 queryType; + NvU8 reserved[2]; // To keep the struct aligned for now and available for future use (if needed) + NvU32 status; + union { + NV2080_CTRL_GRMGR_GR_FS_INFO_GPC_COUNT_PARAMS gpcCountData; + NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_GPC_MAP_PARAMS chipletGpcMapData; + NV2080_CTRL_GRMGR_GR_FS_INFO_TPC_MASK_PARAMS tpcMaskData; + NV2080_CTRL_GRMGR_GR_FS_INFO_PPC_MASK_PARAMS ppcMaskData; + NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_GPC_MAP_PARAMS partitionGpcMapData; + NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_SYSPIPE_MASK_PARAMS syspipeMaskData; + NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_SYSPIPE_IDS_PARAMS partitionChipletSyspipeData; + NV2080_CTRL_GRMGR_GR_FS_INFO_PROFILER_MON_GPC_MASK_PARAMS dmGpcMaskData; + NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_SYSPIPE_ID_PARAMS partitionSyspipeIdData; + NV2080_CTRL_GRMGR_GR_FS_INFO_ROP_MASK_PARAMS ropMaskData; + NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_GRAPHICS_SYSPIPE_MASK_PARAMS gfxSyspipeMaskData; + NV2080_CTRL_GRMGR_GR_FS_INFO_GFX_CAPABLE_GPC_MASK_PARAMS gfxGpcMaskData; + } queryData; +} NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARAMS; + +#define NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS { + NvU16 numQueries; + NvU8 reserved[6]; // To keep the struct aligned for now and available for future use (if needed) + NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARAMS queries[NV2080_CTRL_GRMGR_GR_FS_INFO_MAX_QUERIES]; +} NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS; + +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_INVALID 0 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_GPC_COUNT 1 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_CHIPLET_GPC_MAP 2 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_TPC_MASK 3 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PPC_MASK 4 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARTITION_CHIPLET_GPC_MAP 5 /* deprecated */ +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_CHIPLET_SYSPIPE_MASK 6 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARTITION_CHIPLET_SYSPIPE_IDS 7 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PROFILER_MON_GPC_MASK 8 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARTITION_SYSPIPE_ID 9 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_ROP_MASK 10 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_CHIPLET_GRAPHICS_SYSPIPE_MASK 11 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_GFX_CAPABLE_GPC_MASK 12 + +/* _ctrl2080grmgr_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h new file mode 100644 index 0000000..a5d2e5a --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h @@ -0,0 +1,193 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080gsp.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX GSP control commands and parameters */ + +/* + * NV2080_CTRL_CMD_GSP_GET_FEATURES + * + * This command is used to determine which GSP features are + * supported on this GPU. + * + * gspFeatures + * Bit mask that specifies GSP features supported. + * bValid + * If this field is set to NV_TRUE, then above bit mask is + * considered valid. Otherwise, bit mask should be ignored + * as invalid. bValid will be set to NV_TRUE when RM is a + * GSP client with GPU support offloaded to GSP firmware. + * bDefaultGspRmGpu + * If this field is set to NV_TRUE, it indicates that the + * underlying GPU has GSP-RM enabled by default. If set to NV_FALSE, + * it indicates that the GPU has GSP-RM disabled by default. + * firmwareVersion + * This field contains the buffer into which the firmware build version + * should be returned, if GPU is offloaded. Otherwise, the buffer + * will remain untouched. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_GSP_GET_FEATURES (0x20803601) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GSP_INTERFACE_ID << 8) | NV2080_CTRL_GSP_GET_FEATURES_PARAMS_MESSAGE_ID" */ + +#define NV2080_GSP_MAX_BUILD_VERSION_LENGTH (0x0000040) + +#define NV2080_CTRL_GSP_GET_FEATURES_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_GSP_GET_FEATURES_PARAMS { + NvU32 gspFeatures; + NvBool bValid; + NvBool bDefaultGspRmGpu; + NvU8 firmwareVersion[NV2080_GSP_MAX_BUILD_VERSION_LENGTH]; +} NV2080_CTRL_GSP_GET_FEATURES_PARAMS; + +/* Valid feature values */ +#define NV2080_CTRL_GSP_GET_FEATURES_UVM_ENABLED 0:0 +#define NV2080_CTRL_GSP_GET_FEATURES_UVM_ENABLED_FALSE (0x00000000) +#define NV2080_CTRL_GSP_GET_FEATURES_UVM_ENABLED_TRUE (0x00000001) +#define NV2080_CTRL_GSP_GET_FEATURES_VGPU_GSP_MIG_REFACTORING_ENABLED 1:1 +#define NV2080_CTRL_GSP_GET_FEATURES_VGPU_GSP_MIG_REFACTORING_ENABLED_FALSE (0x00000000) +#define NV2080_CTRL_GSP_GET_FEATURES_VGPU_GSP_MIG_REFACTORING_ENABLED_TRUE (0x00000001) + +/* + * NV2080_CTRL_CMD_GSP_GET_RM_HEAP_STATS + * + * This command reports the current GSP-RM heap usage statistics. + * + * gfid + * The gfid that's under query: When gfid = 0, it will report the stats of PF. + * Otherwise, it will report stats for RM task's memory consumption associated + * with a given gfid. + * managedSize + * The total size in bytes of the underlying heap. Note that not all memory + * will be allocatable, due to fragmentation and memory allocator/tracking + * overhead. + * current + * An NV2080_CTRL_GSP_RM_HEAP_STATS_SNAPSHOT record corresponding to + * GSP-RM heap usage at the time this command is called. + * peak + * An NV2080_CTRL_GSP_RM_HEAP_STATS_SNAPSHOT record corresponding to + * the "high water mark" of heap usage since GSP-RM was started. + */ +#define NV2080_CTRL_CMD_GSP_GET_RM_HEAP_STATS (0x20803602) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GSP_INTERFACE_ID << 8) | NV2080_CTRL_GSP_GET_RM_HEAP_STATS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_GSP_RM_HEAP_STATS_SNAPSHOT + * + * This record represents a set of heap measurements at a given point in time. + * + * allocatedSize + * Allocated memory size, in bytes. This value does not include overhead used + * by the underlying allocator for padding/metadata, but does include the + * NvPort memory tracking overhead. + * usableSize + * Allocated memory size excluding all metadata, in bytes. This value does + * not include the NvPort memory tracking overhead. + * memTrackOverhead + * Allocated memory size used for NvPort memory tracking. + */ +typedef struct NV2080_CTRL_GSP_RM_HEAP_STATS_SNAPSHOT { + NV_DECLARE_ALIGNED(NvU64 allocatedSize, 8); + NV_DECLARE_ALIGNED(NvU64 usableSize, 8); + NV_DECLARE_ALIGNED(NvU64 memTrackOverhead, 8); + NvU32 allocationCount; +} NV2080_CTRL_GSP_RM_HEAP_STATS_SNAPSHOT; + +#define NV2080_CTRL_GSP_GET_RM_HEAP_STATS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_GSP_GET_RM_HEAP_STATS_PARAMS { + NvU32 gfid; + NV_DECLARE_ALIGNED(NvU64 managedSize, 8); + NV_DECLARE_ALIGNED(NvU64 largestFreeChunkSize, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_GSP_RM_HEAP_STATS_SNAPSHOT current, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_GSP_RM_HEAP_STATS_SNAPSHOT peak, 8); +} NV2080_CTRL_GSP_GET_RM_HEAP_STATS_PARAMS; + +/* + * NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS + * + * This command reports the current partition's VGPU-GSP plugin's heap usage statistics. + * + * managedSize + * The total size in bytes of the underlying heap. Note that not all memory + * will be allocatable, due to fragmentation and memory allocator/tracking + * overhead. + * allocatedSize + * Allocated memory size, in bytes. This value does not include overhead used + * by the underlying allocator for padding/metadata. + * allocationCount + * The number of active allocations. This count reflects the current number of + * memory blocks that have been allocated but not yet freed. + * peakAllocatedSize + * The highest recorded allocated memory size, in bytes. This value represents the + * maximum amount of memory that has been allocated at any point in time. When a new + * highest allocated size is recorded, the peakAllocatedSize is updated. + * peakAllocationCount + * The number of active allocations corresponding to the highest recorded peakAllocatedSize. + */ + +#define NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS (0x20803603) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GSP_INTERFACE_ID << 8) | NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS { + NV_DECLARE_ALIGNED(NvU64 allocatedSize, 8); + NV_DECLARE_ALIGNED(NvU64 peakAllocatedSize, 8); + NV_DECLARE_ALIGNED(NvU64 managedSize, 8); + NvU32 allocationCount; + NvU32 peakAllocationCount; + NV_DECLARE_ALIGNED(NvU64 largestFreeChunkSize, 8); +} NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS; + +#define NV2080_CTRL_CMD_GSP_GET_LIBOS_HEAP_STATS (0x20803604) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GSP_INTERFACE_ID << 8) | NV2080_CTRL_CMD_GSP_GET_LIBOS_HEAP_STATS_PARAMS_MESSAGE_ID" */ +#define NV2080_CTRL_GSP_LIBOS_POOL_COUNT_MAX 64 + +typedef struct NV2080_CTRL_GSP_LIBOS_POOL_STATS { + NvU32 allocations; + NvU32 peakAllocations; + NV_DECLARE_ALIGNED(NvU64 objectSize, 8); +} NV2080_CTRL_GSP_LIBOS_POOL_STATS; + + +#define NV2080_CTRL_CMD_GSP_GET_LIBOS_HEAP_STATS_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_CMD_GSP_GET_LIBOS_HEAP_STATS_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GSP_LIBOS_POOL_STATS poolStats[NV2080_CTRL_GSP_LIBOS_POOL_COUNT_MAX], 8); + NV_DECLARE_ALIGNED(NvU64 totalHeapSize, 8); + NvU8 poolCount; +} NV2080_CTRL_CMD_GSP_GET_LIBOS_HEAP_STATS_PARAMS; + +// _ctrl2080gsp_h_ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h new file mode 100644 index 0000000..0160705 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h @@ -0,0 +1,73 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080hshub.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* + * NV2080_CTRL_CMD_HSHUB_GET_AVAILABLE_MASK + * + * This command get active HSHUB masks. + * + * hshubNcisocMask + * NCISOC enabled active HSHUBs + * hshubNvlMask + * NVLINK capable active HSHUBs. + */ + +#define NV2080_CTRL_CMD_HSHUB_GET_AVAILABLE_MASK_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_CMD_HSHUB_GET_AVAILABLE_MASK_PARAMS { + NvU32 hshubNcisocMask; + NvU32 hshubNvlMask; +} NV2080_CTRL_CMD_HSHUB_GET_AVAILABLE_MASK_PARAMS; + +#define NV2080_CTRL_CMD_HSHUB_GET_AVAILABLE_MASK (0x20804101) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_HSHUB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_HSHUB_GET_AVAILABLE_MASK_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_HSHUB_SET_EC_THROTTLE_MODE + * + * This command sets EC throttle mode registers + * + * ecMode + * EC Mode 0-7 to write to mode register + * status + * return status + */ +#define NV2080_CTRL_CMD_HSHUB_SET_EC_THROTTLE_MODE_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_CMD_HSHUB_SET_EC_THROTTLE_MODE_PARAMS { + NvU32 ecMode; + NvU32 status; +} NV2080_CTRL_CMD_HSHUB_SET_EC_THROTTLE_MODE_PARAMS; + +#define NV2080_CTRL_CMD_HSHUB_SET_EC_THROTTLE_MODE (0x20804102) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_HSHUB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_HSHUB_SET_EC_THROTTLE_MODE_PARAMS_MESSAGE_ID" */ +/* _ctrl2080hshub_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h new file mode 100644 index 0000000..99e78c2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h @@ -0,0 +1,372 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080i2c.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX i2c-related control commands and parameters */ + +/* + * NV2080_CTRL_I2C_VERSION + * + * NV2080_CTRL_I2C_VERSION_0_0: + * This return state specifies that support is only available + * for single subAddr reads. + * + */ +#define NV2080_CTRL_I2C_VERSION_0 0x00 + +/* maximum number of i2c entries support */ +#define NV2080_CTRL_I2C_MAX_ENTRIES 256 +#define NV2080_CTRL_I2C_MAX_REG_LEN 8 +#define NV2080_CTRL_I2C_MAX_ADDR_ENTRIES 20 + +/* + * NV2080_CTRL_I2C_FLAGS + * + * NV2080_CTRL_I2C_FLAGS_NONSTD_SI1930UC: + * This option specified that non-compliant i2c for SI1930UC is required + * + * NV2080_CTRL_I2C_FLAGS_PRIVILEGE + * This option specified that the i2c access is privileged + * + * NV2080_CTRL_I2C_FLAGS_PX3540 + * This option specified that the i2c device -PX3540/3544- is accessed + */ +#define NV2080_CTRL_I2C_FLAGS_NONSTD_SI1930UC (0x00000001) +#define NV2080_CTRL_I2C_FLAGS_PRIVILEGE (0x00000002) +#define NV2080_CTRL_I2C_FLAGS_DATA_ENCRYPTED (0x00000004) +#define NV2080_CTRL_I2C_FLAGS_PX3540 (0x00000010) +#define NV2080_CTRL_I2C_FLAGS_ADDR_AUTO_INC_NOT_SUPPORTED (0x00000008) + +/* + * NV2080_CTRL_CMD_I2C_READ_BUFFER + * + * This command allocates video memory for a particular subset of microcode. + * + * version + * This field is returned to the client and indicates the current + * supported I2C controls available. + * + * port + * This field must be specified by the client to indicate which port/bus + * in which i2c access is desired. + * + * flags + * This field is specified by the client to request additional options + * as provided by NV2080_CTRL_I2C_FLAGS. + * + * inputCount + * This field specifies the total # of elements contained in inputBuffer + * + * inputBuffer + * This should contain the chipaddr as the first element, followed by + * the each subAddress in which to access the first element of data + * Eg. ... + * In general, client will only have 2 elements + * + * outputCount + * This field specifies how many registers from the start register index. + * The maximum values allow are NV2080_CTRL_I2C_MAX_ENTRIES. + * + * outputBuffer + * This buffer is returned to the client with the data read from + * the start register index. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_STATE_IN_USE + * NV_ERR_INVALID_STATE + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV2080_CTRL_I2C_READ_BUFFER_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_I2C_READ_BUFFER_PARAMS { + NvU32 version; + NvU32 port; + NvU32 flags; + NvU32 inputCount; + // C form: NvU8 inputBuffer[NV2080_CTRL_I2C_MAX_ENTRIES]; + NvU8 inputBuffer[NV2080_CTRL_I2C_MAX_ENTRIES]; + NvU32 outputCount; + // C form: NvU8 outputBuffer[NV2080_CTRL_I2C_MAX_ENTRIES]; + NvU8 outputBuffer[NV2080_CTRL_I2C_MAX_ENTRIES]; +} NV2080_CTRL_I2C_READ_BUFFER_PARAMS; + +#define NV2080_CTRL_CMD_I2C_READ_BUFFER (0x20800601) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_I2C_INTERFACE_ID << 8) | NV2080_CTRL_I2C_READ_BUFFER_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_I2C_WRITE_BUFFER + * + * This command allocates video memory for a particular subset of microcode. + * + * version + * This field is returned to the client and indicates the current + * supported I2C controls available. + * + * port + * This field must be specified by the client to indicate which port/bus + * in which i2c access is desired. + * + * flags + * This field is specified by the client to request additional options. + * NV2080_CTRL_I2C_FLAGS_NONSTD_SI1930UC: + * - Specifies that non-compliant i2c access for SI1930UC is required + * + * inputCount + * This field specifies the total # of elements contained in inputBuffer + * + * inputBuffer + * This should contain the chipaddr as the first element, followed by + * the each subAddress in which to access the first element of data, + * and finally the data to be programmed. + * Eg. ... ... + * In general, client will have 2 elements + data to be programmed. + * ... + * + * encrClientID + * This field is specified by client, which is used to uniquely access + * the client's encryption context + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_STATE_IN_USE + * NV_ERR_INVALID_STATE + * + */ + +#define NV2080_CTRL_I2C_WRITE_BUFFER_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_I2C_WRITE_BUFFER_PARAMS { + NvU32 version; + NvU32 port; + NvU32 flags; + NvU32 inputCount; + // C form: NvU8 inputBuffer[NV2080_CTRL_I2C_MAX_ENTRIES]; + NvU8 inputBuffer[NV2080_CTRL_I2C_MAX_ENTRIES]; + NvU32 encrClientID; +} NV2080_CTRL_I2C_WRITE_BUFFER_PARAMS; + +#define NV2080_CTRL_CMD_I2C_WRITE_BUFFER (0x20800602) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_I2C_INTERFACE_ID << 8) | NV2080_CTRL_I2C_WRITE_BUFFER_PARAMS_MESSAGE_ID" */ + + +/* + * NV2080_CTRL_CMD_I2C_READ_REG + * + * This command allocates video memory for a particular subset of microcode. + * + * version + * This field is returned to the client and indicates the current + * supported I2C controls available. + * + * port + * This field must be specified by the client to indicate which port/bus + * in which i2c access is desired. + * + * flags + * This field is specified by the client to request additional options. + * NV2080_CTRL_I2C_FLAGS_NONSTD_SI1930UC: + * - Specifies that non-compliant i2c access for SI1930UC is required + * addr + * This field is specified by the client to target address. + * reg + * This field is specified by the client to target register address. + * + * bufsize + * This field specifies the total bytes # of register size + * + * buffer + * when used for read, it used as buffer that store returned register content + * when used for write, It include data that will be written. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_STATE_IN_USE + * NV_ERR_INVALID_STATE + * + */ +typedef struct NV2080_CTRL_I2C_RW_REG_PARAMS { + NvU32 version; + NvU32 port; + NvU32 flags; + NvU32 addr; + NvU8 reg; + NvU8 bufsize; + // C form: NvU8 buffer[NV2080_CTRL_I2C_MAX_ENTRIES - 1]; + NvU8 buffer[(NV2080_CTRL_I2C_MAX_ENTRIES - 1)]; +} NV2080_CTRL_I2C_RW_REG_PARAMS; + +#define NV2080_CTRL_CMD_I2C_READ_REG (0x20800603) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_I2C_INTERFACE_ID << 8) | NV2080_CTRL_I2C_READ_REG_PARAMS_MESSAGE_ID" */ + +// provide NV2080_CTRL_I2C_READ_REG_PARAMS as the historical name +#define NV2080_CTRL_I2C_READ_REG_PARAMS_MESSAGE_ID (0x3U) + +typedef NV2080_CTRL_I2C_RW_REG_PARAMS NV2080_CTRL_I2C_READ_REG_PARAMS; + +#define NV2080_CTRL_CMD_I2C_WRITE_REG (0x20800604) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_I2C_INTERFACE_ID << 8) | NV2080_CTRL_I2C_WRITE_REG_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_I2C_WRITE_REG_PARAMS_MESSAGE_ID (0x4U) + +typedef NV2080_CTRL_I2C_RW_REG_PARAMS NV2080_CTRL_I2C_WRITE_REG_PARAMS; + +/* + * NV006F_CTRL_CMD_SYSTEM_I2C_ACCESS + * + * This command allows Clients to read and write data using the I2C ports + * + * token [IN] + * This used in i2cAcquirePort + * + * cmd [IN] + * The I2CAccess command + * + * port [IN] + * The port ID of the concerned display + * + * flags [IN] + * The I2CAccess Flags such ack,start,stop + * + * data [OUT/IN] + * Data that needs to be pass or read out + * + * dataBuffSize [IN] + * Size of the data buffer. + * + * speed [IN] + * Speed of transaction. + * + * status [OUT] + * The I2CAccess Status returned + * + * encrClientID [IN] + * This field is specified by client, which is used to uniquely access + * the client's encryption context + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_I2C_ACCESS (0x20800610) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_I2C_INTERFACE_ID << 8) | NV2080_CTRL_I2C_ACCESS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_I2C_ACCESS_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV2080_CTRL_I2C_ACCESS_PARAMS { + NvU32 token; + NvU32 cmd; + NvU32 port; + NvU32 flags; + NV_DECLARE_ALIGNED(NvP64 data, 8); + NvU32 status; + NvU32 dataBuffSize; + NvU32 speed; + NvU32 encrClientID; +} NV2080_CTRL_I2C_ACCESS_PARAMS; + +// commands +#define NV2080_CTRL_I2C_ACCESS_CMD_ACQUIRE 0x1 +#define NV2080_CTRL_I2C_ACCESS_CMD_RELEASE 0x2 +#define NV2080_CTRL_I2C_ACCESS_CMD_WRITE_BYTE 0x3 +#define NV2080_CTRL_I2C_ACCESS_CMD_READ_BYTE 0x4 +#define NV2080_CTRL_I2C_ACCESS_CMD_NULL 0x5 +#define NV2080_CTRL_I2C_ACCESS_CMD_RESET 0x6 +#define NV2080_CTRL_I2C_ACCESS_CMD_TEST_PORT 0x11 +#define NV2080_CTRL_I2C_ACCESS_CMD_SET_FAST_MODE 0x12 +#define NV2080_CTRL_I2C_ACCESS_CMD_SET_NORMAL_MODE 0x13 +#define NV2080_CTRL_I2C_ACCESS_CMD_WRITE_BUFFER 0x14 +#define NV2080_CTRL_I2C_ACCESS_CMD_READ_BUFFER 0x15 +#define NV2080_CTRL_I2C_ACCESS_CMD_START 0x17 +#define NV2080_CTRL_I2C_ACCESS_CMD_STOP 0x18 +#define NV2080_CTRL_I2C_ACCESS_CMD_SET_SLOW_MODE 0x20 + +// flags +#define NV2080_CTRL_I2C_ACCESS_FLAG_START 0x1 +#define NV2080_CTRL_I2C_ACCESS_FLAG_STOP 0x2 +#define NV2080_CTRL_I2C_ACCESS_FLAG_ACK 0x4 +#define NV2080_CTRL_I2C_ACCESS_FLAG_RAB 0x8 +#define NV2080_CTRL_I2C_ACCESS_FLAG_ADDR_10BITS 0x10 +#define NV2080_CTRL_I2C_ACCESS_FLAG_PRIVILEGE 0x20 +#define NV2080_CTRL_I2C_ACCESS_FLAG_DATA_ENCRYPTED 0x40 +#define NV2080_CTRL_I2C_ACCESS_FLAG_RESTART 0x80 +#define NV2080_CTRL_I2C_ACCESS_FLAG_SLOW_MODE_33_33PCT 0x100 +#define NV2080_CTRL_I2C_ACCESS_FLAG_SLOW_MODE_33PCT 0x200 +#define NV2080_CTRL_I2C_ACCESS_FLAG_SLOW_MODE_10PCT 0x400 +#define NV2080_CTRL_I2C_ACCESS_FLAG_SLOW_MODE_3_33PCT 0x800 +#define NV2080_CTRL_I2C_ACCESS_FLAG_SLOW_MODE_3PCT 0x1000 + +// port +#define NV2080_CTRL_I2C_ACCESS_PORT_DYNAMIC 0x0 +#define NV2080_CTRL_I2C_ACCESS_PORT_PRIMARY 0x1 +#define NV2080_CTRL_I2C_ACCESS_PORT_SECONDARY 0x2 +#define NV2080_CTRL_I2C_ACCESS_PORT_TERTIARY 0x3 +#define NV2080_CTRL_I2C_ACCESS_PORT_QUARTIARY 0x4 + +// Alternate numeric port designators +#define NV2080_CTRL_I2C_ACCESS_PORT_1 0x1 +#define NV2080_CTRL_I2C_ACCESS_PORT_2 0x2 +#define NV2080_CTRL_I2C_ACCESS_PORT_3 0x3 +#define NV2080_CTRL_I2C_ACCESS_PORT_4 0x4 +#define NV2080_CTRL_I2C_ACCESS_PORT_5 0x5 +#define NV2080_CTRL_I2C_ACCESS_PORT_6 0x6 +#define NV2080_CTRL_I2C_ACCESS_PORT_7 0x7 +#define NV2080_CTRL_I2C_ACCESS_PORT_8 0x8 +#define NV2080_CTRL_I2C_ACCESS_PORT_9 0x9 +#define NV2080_CTRL_I2C_ACCESS_PORT_10 0x10 + +// Total ports count +#define NV2080_CTRL_I2C_ACCESS_NUM_PORTS NV2080_CTRL_I2C_ACCESS_PORT_10 + +// status +#define NV2080_CTRL_I2C_ACCESS_STATUS_SUCCESS 0x0 +#define NV2080_CTRL_I2C_ACCESS_STATUS_ERROR 0x1 +#define NV2080_CTRL_I2C_ACCESS_STATUS_PROTOCOL_ERROR 0x2 +#define NV2080_CTRL_I2C_ACCESS_STATUS_DEVICE_BUSY 0x3 +#define NV2080_CTRL_I2C_ACCESS_STATUS_NACK_AFTER_SEND 0x4 +#define NV2080_CTRL_I2C_ACCESS_STATUS_DP2TMDS_DONGLE_MISSING 0x5 + +#define NV2080_CTRL_CMD_I2C_ENABLE_MONITOR_3D_MODE (0x20800620) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_I2C_INTERFACE_ID << 8) | NV2080_CTRL_I2C_ENABLE_MONITOR_3D_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_I2C_ENABLE_MONITOR_3D_MODE_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV2080_CTRL_I2C_ENABLE_MONITOR_3D_MODE_PARAMS { + NvU32 head; + NvU32 authType; + NvU32 status; +} NV2080_CTRL_I2C_ENABLE_MONITOR_3D_MODE_PARAMS; + +/* _ctrl2080i2c_h_ */ + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h new file mode 100644 index 0000000..ee2ada8 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080illum.finn +// + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h new file mode 100644 index 0000000..2467257 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h @@ -0,0 +1,5338 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080internal.finn +// + +#include "nvimpshared.h" +#include "cc_drv.h" +#include "ctrl/ctrl2080/ctrl2080base.h" + +#include "ctrl/ctrl2080/ctrl2080gpu.h" +#include "ctrl/ctrl2080/ctrl2080gr.h" /* Some controls derivative of 2080gr */ +#include "ctrl/ctrl0080/ctrl0080msenc.h" /* NV0080_CTRL_MSENC_CAPS_TBL_SIZE */ +#include "ctrl/ctrl0080/ctrl0080bsp.h" /* NV0080_CTRL_BSP_CAPS_TBL_SIZE */ +#include "ctrl/ctrl2080/ctrl2080fifo.h" /* NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO */ +#include "ctrl/ctrl2080/ctrl2080mc.h" /* NV2080_INTR_* */ +#include "ctrl/ctrl0073/ctrl0073system.h" /* NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS */ +#include "ctrl/ctrl0000/ctrl0000system.h" +#include "ctrl/ctrl2080/ctrl2080nvlink_common.h" /* NV2080_CTRL_NVLINK_LINK_MASK */ +#include "ctrl/ctrl90f1.h" +#include "ctrl/ctrl30f1.h" +#include "nvcfg_sdk.h" + +/*! + * NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO + * + * This command obtains information from physical RM for use by CPU-RM. + * + * feHwSysCap + * Display IP v03_00 and later. + * Contents of capability register. + * + * windowPresentMask + * Display IP v03_00 and later. + * Mask for the present WINDOWs actually on the current chip. + * bFbRemapperEnabled + * Display IP v02_01 and later. + * Indicates that the display remapper HW exists and is enabled. + * numHeads + * Display IP v02_01 and later. + * Provides the number of heads HW support. + */ + +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS { + NvU32 feHwSysCap; + NvU32 windowPresentMask; + NvBool bFbRemapperEnabled; + NvU32 numHeads; + NvU32 i2cPort; + NvU32 internalDispActiveMask; + NvU32 embeddedDisplayPortMask; + NvBool bExternalMuxSupported; + NvBool bInternalMuxSupported; + NvU32 numDispChannels; +} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS; + + + +// +// MemorySystem settings that are static after GPU state init/load is finished. +// +// Fields are shared between the VGPU guest/GSP Client as well as the VGPU +// host/GSP-RM. +// +#define NV2080_CTRL_INTERNAL_MEMSYS_GET_STATIC_CONFIG_PARAMS_MESSAGE_ID (0x1CU) + +typedef struct NV2080_CTRL_INTERNAL_MEMSYS_GET_STATIC_CONFIG_PARAMS { + /*! Determines if RM should use 1 to 1 Comptagline allocation policy */ + NvBool bOneToOneComptagLineAllocation; + + /*! Determines if RM should use 1 to 4 Comptagline allocation policy */ + NvBool bUseOneToFourComptagLineAllocation; + + /*! Determines if RM should use raw Comptagline allocation policy */ + NvBool bUseRawModeComptaglineAllocation; + + /*! Has COMPBIT_BACKING_SIZE been overridden to zero (i.e. disabled)? */ + NvBool bDisableCompbitBacking; + + /*! Determine if we need to disable post L2 compression */ + NvBool bDisablePostL2Compression; + + /*! Is ECC DRAM feature supported? */ + NvBool bEnabledEccFBPA; + + NvBool bL2PreFill; + + /*! L2 cache size */ + NV_DECLARE_ALIGNED(NvU64 l2CacheSize, 8); + + /*! Indicate whether fpba is present or not */ + NvBool bFbpaPresent; + + /*! Size covered by one comptag */ + NvU32 comprPageSize; + + /*! log32(comprPageSize) */ + NvU32 comprPageShift; + + /*! RAM type */ + NvU32 ramType; + + /*! LTC count */ + NvU32 ltcCount; + + /*! LTS per LTC count */ + NvU32 ltsPerLtcCount; +} NV2080_CTRL_INTERNAL_MEMSYS_GET_STATIC_CONFIG_PARAMS; + +/*! + * Retrieve Memory System Static data. + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_GET_STATIC_CONFIG (0x20800a1c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MEMSYS_GET_STATIC_CONFIG_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_REGISTER_UVM_ACCESS_CNTR_BUFFER + * + * This command sends access counter buffer pages allocated by CPU-RM + * to be setup and enabled in physical RM. + * + * accessCounterIndex + * Index of access counter buffer to register. + * + * bufferSize + * Size of the access counter buffer to register. + * + * bufferPteArray + * Pages of access counter buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_UVM_REGISTER_ACCESS_CNTR_BUFFER (0x20800a1d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_UVM_REGISTER_ACCESS_CNTR_BUFFER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_UVM_ACCESS_CNTR_BUFFER_MAX_PAGES 64 +#define NV2080_CTRL_INTERNAL_UVM_REGISTER_ACCESS_CNTR_BUFFER_PARAMS_MESSAGE_ID (0x1DU) + +typedef struct NV2080_CTRL_INTERNAL_UVM_REGISTER_ACCESS_CNTR_BUFFER_PARAMS { + NvU32 accessCounterIndex; + NvU32 bufferSize; + NV_DECLARE_ALIGNED(NvU64 bufferPteArray[NV2080_CTRL_INTERNAL_UVM_ACCESS_CNTR_BUFFER_MAX_PAGES], 8); +} NV2080_CTRL_INTERNAL_UVM_REGISTER_ACCESS_CNTR_BUFFER_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_UVM_UNREGISTER_ACCESS_CNTR_BUFFER + * + * This command requests physical RM to disable the access counter buffer. + * + * accessCounterIndex + * Index of access counter buffer to unregister. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_UVM_UNREGISTER_ACCESS_CNTR_BUFFER (0x20800a1e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_UVM_UNREGISTER_ACCESS_CNTR_BUFFER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_UVM_UNREGISTER_ACCESS_CNTR_BUFFER_PARAMS_MESSAGE_ID (0x1EU) + +typedef struct NV2080_CTRL_INTERNAL_UVM_UNREGISTER_ACCESS_CNTR_BUFFER_PARAMS { + NvU32 accessCounterIndex; +} NV2080_CTRL_INTERNAL_UVM_UNREGISTER_ACCESS_CNTR_BUFFER_PARAMS; + +#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES 8 + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_CAPS_V2 + */ + + + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CAPS { + NvU8 capsTbl[NV0080_CTRL_GR_CAPS_TBL_SIZE]; +} NV2080_CTRL_INTERNAL_STATIC_GR_CAPS; + +#define NV2080_CTRL_INTERNAL_STATIC_GR_GET_CAPS_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CAPS_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_CAPS engineCaps[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CAPS_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CAPS (0x20800a1f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CAPS_PARAMS_MESSAGE_ID (0x1FU) + +typedef NV2080_CTRL_INTERNAL_STATIC_GR_GET_CAPS_PARAMS NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CAPS_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_GLOBAL_SM_ORDER + * @ref NV2080_CTRL_CMD_GR_GET_SM_TO_GPC_TPC_MAPPINGS + */ + + + +#define NV2080_CTRL_INTERNAL_GR_MAX_SM 240 + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GLOBAL_SM_ORDER { + struct { + NvU16 gpcId; + NvU16 localTpcId; + NvU16 localSmId; + NvU16 globalTpcId; + NvU16 virtualGpcId; + NvU16 migratableTpcId; + NvU16 ugpuId; + NvU16 physicalCpcId; + NvU16 virtualTpcId; + } globalSmId[NV2080_CTRL_INTERNAL_GR_MAX_SM]; + + NvU16 numSm; + NvU16 numTpc; +} NV2080_CTRL_INTERNAL_STATIC_GR_GLOBAL_SM_ORDER; + +#define NV2080_CTRL_INTERNAL_STATIC_GR_GET_GLOBAL_SM_ORDER_PARAMS_MESSAGE_ID (0x23U) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_GLOBAL_SM_ORDER_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_GLOBAL_SM_ORDER globalSmOrder[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_GLOBAL_SM_ORDER_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_GLOBAL_SM_ORDER (0x20800a22) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_GLOBAL_SM_ORDER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_STATIC_KGR_GET_GLOBAL_SM_ORDER_PARAMS_MESSAGE_ID (0x22U) + +typedef NV2080_CTRL_INTERNAL_STATIC_GR_GET_GLOBAL_SM_ORDER_PARAMS NV2080_CTRL_INTERNAL_STATIC_KGR_GET_GLOBAL_SM_ORDER_PARAMS; + +/*! + * BSP Static data. + */ + +#define NV2080_CTRL_CMD_INTERNAL_MAX_BSPS 8 + +typedef struct NV2080_CTRL_INTERNAL_BSP_CAPS { + NvU8 capsTbl[NV0080_CTRL_BSP_CAPS_TBL_SIZE]; +} NV2080_CTRL_INTERNAL_BSP_CAPS; + +typedef struct NV2080_CTRL_INTERNAL_BSP_GET_CAPS_PARAMS { + NV2080_CTRL_INTERNAL_BSP_CAPS caps[NV2080_CTRL_CMD_INTERNAL_MAX_BSPS]; + NvBool valid[NV2080_CTRL_CMD_INTERNAL_MAX_BSPS]; +} NV2080_CTRL_INTERNAL_BSP_GET_CAPS_PARAMS; + +/*! + * MSENC Static data. + */ + +#define NV2080_CTRL_CMD_INTERNAL_MAX_MSENCS 8 + +typedef struct NV2080_CTRL_INTERNAL_MSENC_CAPS { + NvU8 capsTbl[NV0080_CTRL_MSENC_CAPS_TBL_SIZE]; +} NV2080_CTRL_INTERNAL_MSENC_CAPS; + +typedef struct NV2080_CTRL_INTERNAL_MSENC_GET_CAPS_PARAMS { + NV2080_CTRL_INTERNAL_MSENC_CAPS caps[NV2080_CTRL_CMD_INTERNAL_MAX_MSENCS]; + NvBool valid[NV2080_CTRL_CMD_INTERNAL_MAX_MSENCS]; +} NV2080_CTRL_INTERNAL_MSENC_GET_CAPS_PARAMS; + + +#define NV2080_CTRL_INTERNAL_GR_MAX_GPC 16 +#define NV2080_CTRL_INTERNAL_MAX_TPC_PER_GPC_COUNT 10 + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_GPC_MASK + * @ref NV2080_CTRL_CMD_GR_GET_TPC_MASK + * @ref NV2080_CTRL_CMD_GR_GET_PHYS_GPC_MASK + */ + + + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_FLOORSWEEPING_MASKS { + NvU32 gpcMask; + + /*! + * tpcMask is indexed by logical GPC ID for MIG case + * and indexed by physical GPC ID for non-MIG case + */ + NvU32 tpcMask[NV2080_CTRL_INTERNAL_GR_MAX_GPC]; + + /*! + * tpcCount is always indexed by logical GPC ID + */ + NvU32 tpcCount[NV2080_CTRL_INTERNAL_GR_MAX_GPC]; + NvU32 physGpcMask; + NvU32 mmuPerGpc[NV2080_CTRL_INTERNAL_GR_MAX_GPC]; + + NvU32 tpcToPesMap[NV2080_CTRL_INTERNAL_MAX_TPC_PER_GPC_COUNT]; + NvU32 numPesPerGpc[NV2080_CTRL_INTERNAL_GR_MAX_GPC]; + + /*! + * zcullMask is always indexed by physical GPC ID + */ + NvU32 zcullMask[NV2080_CTRL_INTERNAL_GR_MAX_GPC]; + + NvU32 physGfxGpcMask; + NvU32 numGfxTpc; +} NV2080_CTRL_INTERNAL_STATIC_GR_FLOORSWEEPING_MASKS; + +#define NV2080_CTRL_INTERNAL_STATIC_GR_GET_FLOORSWEEPING_MASKS_PARAMS_MESSAGE_ID (0x27U) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_FLOORSWEEPING_MASKS_PARAMS { + /*! + * floorsweeping masks which are indexed via local GR index + */ + NV2080_CTRL_INTERNAL_STATIC_GR_FLOORSWEEPING_MASKS floorsweepingMasks[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_FLOORSWEEPING_MASKS_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FLOORSWEEPING_MASKS (0x20800a26) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_FLOORSWEEPING_MASKS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_STATIC_KGR_GET_FLOORSWEEPING_MASKS_PARAMS_MESSAGE_ID (0x26U) + +typedef NV2080_CTRL_INTERNAL_STATIC_GR_GET_FLOORSWEEPING_MASKS_PARAMS NV2080_CTRL_INTERNAL_STATIC_KGR_GET_FLOORSWEEPING_MASKS_PARAMS; + +/* + * NV2080_CTRL_CMD_KGR_GET_CTX_BUFFER_PTES + * + * This command returns physical addresses of specified context buffer. + * To obtain addresses of whole buffer firstPage has to be advanced on + * subsequent invocations of the control until whole buffer is probed. + * If the buffer is contiguous, only single address will be returned by + * this control. + * + * bufferType[IN] + * Buffer type as returned by GET_CTX_BUFFER_INFO. + * + * firstPage[IN] + * Index of the first page to return in 'physAddrs' array. + * + * numPages[OUT] + * Number of entries filled in 'physAddrs' array. This will be 0 + * if firstPage is greater or equal to number of pages managed by 'hBuffer'. + * + * physAddrs[OUT] + * Physical addresses of pages comprising specified buffer. + * + * bNoMorePages[OUT] + * End of buffer reached. Either 'physAddrs' contains last page of the + * buffer or 'firstPage' specifies index past the buffer. + */ +#define NV2080_CTRL_KGR_MAX_BUFFER_PTES 128 +#define NV2080_CTRL_CMD_KGR_GET_CTX_BUFFER_PTES (0x20800a28) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_KGR_GET_CTX_BUFFER_PTES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_KGR_GET_CTX_BUFFER_PTES_PARAMS_MESSAGE_ID (0x28U) + +typedef struct NV2080_CTRL_KGR_GET_CTX_BUFFER_PTES_PARAMS { + NvHandle hUserClient; + NvHandle hChannel; + NvU32 bufferType; + NvU32 firstPage; + NvU32 numPages; + NV_DECLARE_ALIGNED(NvU64 physAddrs[NV2080_CTRL_KGR_MAX_BUFFER_PTES], 8); + NvBool bNoMorePages; +} NV2080_CTRL_KGR_GET_CTX_BUFFER_PTES_PARAMS; + +/*! + * NV2080_CTRL_INTERNAL_MEMDESC_INFO + * + * A generic container structure representing a memory region to be used as a + * component of other control call parameters. + * + */ +typedef struct NV2080_CTRL_INTERNAL_MEMDESC_INFO { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NV_DECLARE_ALIGNED(NvU64 alignment, 8); + NvU32 addressSpace; + NvU32 cpuCacheAttrib; +} NV2080_CTRL_INTERNAL_MEMDESC_INFO; + +/*! + * @ref NV0080_CTRL_CMD_GR_GET_INFO + * @ref NV0080_CTRL_CMD_GR_GET_INFO_V2 + * @ref NV2080_CTRL_CMD_GR_GET_INFO + * @ref NV2080_CTRL_CMD_GR_GET_INFO_V2 + */ + + + +/*! + * @ref NV2080_CTRL_GR_INFO + */ +typedef struct NV2080_CTRL_INTERNAL_GR_INFO { + NvU32 index; + NvU32 data; +} NV2080_CTRL_INTERNAL_GR_INFO; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_INFO { + NV2080_CTRL_INTERNAL_GR_INFO infoList[NV0080_CTRL_GR_INFO_MAX_SIZE]; +} NV2080_CTRL_INTERNAL_STATIC_GR_INFO; +#define NV2080_CTRL_INTERNAL_STATIC_GR_GET_INFO_PARAMS_MESSAGE_ID (0x2BU) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_INFO_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_INFO engineInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_INFO_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_INFO (0x20800a2a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_STATIC_KGR_GET_INFO_PARAMS_MESSAGE_ID (0x2AU) + +typedef NV2080_CTRL_INTERNAL_STATIC_GR_GET_INFO_PARAMS NV2080_CTRL_INTERNAL_STATIC_KGR_GET_INFO_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_ZCULL_INFO + */ + + + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_ZCULL_INFO { + NvU32 widthAlignPixels; + NvU32 heightAlignPixels; + NvU32 pixelSquaresByAliquots; + NvU32 aliquotTotal; + NvU32 zcullRegionByteMultiplier; + NvU32 zcullRegionHeaderSize; + NvU32 zcullSubregionHeaderSize; + NvU32 subregionCount; + NvU32 subregionWidthAlignPixels; + NvU32 subregionHeightAlignPixels; +} NV2080_CTRL_INTERNAL_STATIC_GR_ZCULL_INFO; + +#define NV2080_CTRL_INTERNAL_STATIC_GR_GET_ZCULL_INFO_PARAMS_MESSAGE_ID (0x2DU) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_ZCULL_INFO_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_ZCULL_INFO engineZcullInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_ZCULL_INFO_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_ZCULL_INFO (0x20800a2c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_ZCULL_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_STATIC_KGR_GET_ZCULL_INFO_PARAMS_MESSAGE_ID (0x2CU) + +typedef NV2080_CTRL_INTERNAL_STATIC_GR_GET_ZCULL_INFO_PARAMS NV2080_CTRL_INTERNAL_STATIC_KGR_GET_ZCULL_INFO_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_ROP_INFO + */ + + + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_ROP_INFO { + NvU32 ropUnitCount; + NvU32 ropOperationsFactor; + NvU32 ropOperationsCount; +} NV2080_CTRL_INTERNAL_STATIC_GR_ROP_INFO; + +#define NV2080_CTRL_INTERNAL_STATIC_GR_GET_ROP_INFO_PARAMS_MESSAGE_ID (0x2FU) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_ROP_INFO_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_ROP_INFO engineRopInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_ROP_INFO_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_ROP_INFO (0x20800a2e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_ROP_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_STATIC_KGR_GET_ROP_INFO_PARAMS_MESSAGE_ID (0x2EU) + +typedef NV2080_CTRL_INTERNAL_STATIC_GR_GET_ROP_INFO_PARAMS NV2080_CTRL_INTERNAL_STATIC_KGR_GET_ROP_INFO_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_PPC_MASK + */ + + + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_PPC_MASKS { + NvU32 mask[NV2080_CTRL_INTERNAL_GR_MAX_GPC]; +} NV2080_CTRL_INTERNAL_STATIC_GR_PPC_MASKS; + +#define NV2080_CTRL_INTERNAL_STATIC_GR_GET_PPC_MASKS_PARAMS_MESSAGE_ID (0x31U) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_PPC_MASKS_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_PPC_MASKS enginePpcMasks[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_PPC_MASKS_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_PPC_MASKS (0x20800a30) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_PPC_MASKS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_STATIC_KGR_GET_PPC_MASKS_PARAMS_MESSAGE_ID (0x30U) + +typedef NV2080_CTRL_INTERNAL_STATIC_GR_GET_PPC_MASKS_PARAMS NV2080_CTRL_INTERNAL_STATIC_KGR_GET_PPC_MASKS_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_ENGINE_CONTEXT_PROPERTIES + * @ref NV2080_CTRL_CMD_GR_GET_ATTRIBUTE_BUFFER_SIZE + */ + + + +#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x1a + +typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO { + NvU32 size; + NvU32 alignment; +} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO { + NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT]; +} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO; + +#define NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID (0x33U) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID (0x32U) + +typedef NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_SM_ISSUE_RATE_MODIFIER + */ + + + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_SM_ISSUE_RATE_MODIFIER { + NvU8 imla0; + NvU8 fmla16; + NvU8 dp; + NvU8 fmla32; + NvU8 ffma; + NvU8 imla1; + NvU8 imla2; + NvU8 imla3; + NvU8 imla4; +} NV2080_CTRL_INTERNAL_STATIC_GR_SM_ISSUE_RATE_MODIFIER; + +#define NV2080_CTRL_INTERNAL_STATIC_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS_MESSAGE_ID (0x35U) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_SM_ISSUE_RATE_MODIFIER smIssueRateModifier[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_SM_ISSUE_RATE_MODIFIER (0x20800a34) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_STATIC_KGR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS_MESSAGE_ID (0x34U) + +typedef NV2080_CTRL_INTERNAL_STATIC_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS NV2080_CTRL_INTERNAL_STATIC_KGR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS; + +/* + * NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS + * + * This command obtains information from physical RM for use by CPU-RM. + */ + +#define NV2080_CTRL_CMD_INTERNAL_GPU_GET_CHIP_INFO (0x20800a36) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS_MESSAGE_ID" */ + +/* + * Maximum number of register bases to return. + * These are indexed by NV_REG_BASE_* constants from gpu.h, and this value needs + * to be updated if NV_REG_BASE_LAST ever goes over it. See the ct_assert() in gpu.h + */ +#define NV2080_CTRL_INTERNAL_GET_CHIP_INFO_REG_BASE_MAX 16 +#define NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS_MESSAGE_ID (0x36U) + +typedef struct NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS { + NvU8 chipSubRev; + NvU32 emulationRev1; + NvBool isCmpSku; + NvU32 pciDeviceId; + NvU32 pciSubDeviceId; + NvU32 pciRevisionId; + NvU32 regBases[NV2080_CTRL_INTERNAL_GET_CHIP_INFO_REG_BASE_MAX]; +} NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS; + +/** + * NV2080_CTRL_CMD_INTERNAL_GR_SET_FECS_TRACE_HW_ENABLE + * + * Set whether or not context switch logging is enabled + * + * bEnable + * Enable/Disable status for context switch logging + */ +#define NV2080_CTRL_CMD_INTERNAL_GR_SET_FECS_TRACE_HW_ENABLE (0x20800a37) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_HW_ENABLE_PARAMS_MESSAGE_ID" */ + +/** + * NV2080_CTRL_CMD_INTERNAL_GR_GET_FECS_TRACE_HW_ENABLE + * + * Retrieve whether or not context switch logging is enabled + * + * bEnable + * Enable/Disable status for context switch logging + */ +#define NV2080_CTRL_CMD_INTERNAL_GR_GET_FECS_TRACE_HW_ENABLE (0x20800a38) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_HW_ENABLE_PARAMS_MESSAGE_ID" */ + +typedef struct NV2080_CTRL_INTERNAL_GR_FECS_TRACE_HW_ENABLE_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvBool bEnable; +} NV2080_CTRL_INTERNAL_GR_FECS_TRACE_HW_ENABLE_PARAMS; + +#define NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_HW_ENABLE_PARAMS_MESSAGE_ID (0x37U) + +typedef NV2080_CTRL_INTERNAL_GR_FECS_TRACE_HW_ENABLE_PARAMS NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_HW_ENABLE_PARAMS; + +#define NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_HW_ENABLE_PARAMS_MESSAGE_ID (0x38U) + +typedef NV2080_CTRL_INTERNAL_GR_FECS_TRACE_HW_ENABLE_PARAMS NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_HW_ENABLE_PARAMS; + +/** + * NV2080_CTRL_CMD_INTERNAL_GR_SET_FECS_TRACE_RD_OFFSET + * + * Set read offset into FECS context switch trace record + * + * offset + * Value indicating number of records by which to offset + */ +#define NV2080_CTRL_CMD_INTERNAL_GR_SET_FECS_TRACE_RD_OFFSET (0x20800a39) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_RD_OFFSET_PARAMS_MESSAGE_ID" */ + +/** + * NV2080_CTRL_CMD_INTERNAL_GR_SET_FECS_TRACE_WR_OFFSET + * + * Set write offset into FECS context switch trace record + * + * offset + * Value indicating number of records by which to offset + */ +#define NV2080_CTRL_CMD_INTERNAL_GR_SET_FECS_TRACE_WR_OFFSET (0x20800a3a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_WR_OFFSET_PARAMS_MESSAGE_ID" */ + +/** + * NV2080_CTRL_CMD_INTERNAL_GR_GET_FECS_TRACE_RD_OFFSET + * + * Get read offset into FECS context switch trace record + * + * offset + * Value indicating number of records by which to offset + */ + +#define NV2080_CTRL_CMD_INTERNAL_GR_GET_FECS_TRACE_RD_OFFSET (0x20800a3b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_RD_OFFSET_PARAMS_MESSAGE_ID" */ + +typedef struct NV2080_CTRL_INTERNAL_GR_FECS_TRACE_OFFSET_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvU32 offset; +} NV2080_CTRL_INTERNAL_GR_FECS_TRACE_OFFSET_PARAMS; + +#define NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_RD_OFFSET_PARAMS_MESSAGE_ID (0x39U) + +typedef NV2080_CTRL_INTERNAL_GR_FECS_TRACE_OFFSET_PARAMS NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_RD_OFFSET_PARAMS; + +#define NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_WR_OFFSET_PARAMS_MESSAGE_ID (0x3AU) + +typedef NV2080_CTRL_INTERNAL_GR_FECS_TRACE_OFFSET_PARAMS NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_WR_OFFSET_PARAMS; + +#define NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_RD_OFFSET_PARAMS_MESSAGE_ID (0x3BU) + +typedef NV2080_CTRL_INTERNAL_GR_FECS_TRACE_OFFSET_PARAMS NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_RD_OFFSET_PARAMS; + +/** + * NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_RECORD_SIZE + * + * Get size of FECS record + * + * fecsRecordSize + * Size of FECS record + */ + + + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_RECORD_SIZE (0x20800a3d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_RECORD_SIZE_PARAMS_MESSAGE_ID" */ + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE { + NvU32 fecsRecordSize; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE; + +#define NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE_PARAMS_MESSAGE_ID (0x3CU) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE fecsRecordSize[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_RECORD_SIZE_PARAMS_MESSAGE_ID (0x3DU) + +typedef NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE_PARAMS NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_RECORD_SIZE_PARAMS; + +/** + * NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_TRACE_DEFINES + * + * Get HW defines used to extract information from FECS records + * + * fecsRecordSize + * Size of FECS record + * + * timestampHiTagMask + * Mask for high bits of raw timestamp to extract tag + * + * timestampHiTagShift + * Shift for high bits of raw timestamp to extract tag + * + * timestampVMask + * Mask to extract timestamp from raw timestamp + * + * numLowerBitsZeroShift + * Number of bits timestamp is shifted by + */ + + + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES { + NvU32 fecsRecordSize; + NvU32 timestampHiTagMask; + NvU8 timestampHiTagShift; + NV_DECLARE_ALIGNED(NvU64 timestampVMask, 8); + NvU8 numLowerBitsZeroShift; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES; + +#define NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES_PARAMS_MESSAGE_ID (0x3EU) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES fecsTraceDefines[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES], 8); +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_TRACE_DEFINES (0x20800a3f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_FECS_TRACE_DEFINES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_STATIC_KGR_GET_FECS_TRACE_DEFINES_PARAMS_MESSAGE_ID (0x3FU) + +typedef NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES_PARAMS NV2080_CTRL_INTERNAL_STATIC_KGR_GET_FECS_TRACE_DEFINES_PARAMS; + +/** + * NV2080_CTRL_CMD_INTERNAL_GET_DEVICE_INFO_TABLE + * + * Parse the DEVICE_INFO2_TABLE on the physical side and return it to kernel. + */ +typedef struct NV2080_CTRL_INTERNAL_DEVICE_INFO { + NvU32 faultId; + NvU32 instanceId; + NvU32 typeEnum; + NvU32 resetId; + NvU32 devicePriBase; + NvU32 isEngine; + NvU32 rlEngId; + NvU32 runlistPriBase; + NvU32 groupId; + NvU32 ginTargetId; + NvU32 deviceBroadcastPriBase; + NvU32 groupLocalInstanceId; +} NV2080_CTRL_INTERNAL_DEVICE_INFO; +#define NV2080_CTRL_CMD_INTERNAL_DEVICE_INFO_MAX_ENTRIES 512 + +#define NV2080_CTRL_CMD_INTERNAL_GET_DEVICE_INFO_TABLE (0x20800a40) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID (0x40U) + +typedef struct NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS { + NvU32 numEntries; + NV2080_CTRL_INTERNAL_DEVICE_INFO deviceInfoTable[NV2080_CTRL_CMD_INTERNAL_DEVICE_INFO_MAX_ENTRIES]; +} NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP (0x20800a41) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GPU_USER_REGISTER_ACCESS_MAP_MAX_COMPRESSED_SIZE 4096 +#define NV2080_CTRL_INTERNAL_GPU_USER_REGISTER_ACCESS_MAP_MAX_PROFILING_RANGES 4096 + +#define NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS_MESSAGE_ID (0x41U) + +typedef struct NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS { + NvU32 userRegisterAccessMapSize; + NvU32 compressedSize; + NvU8 compressedData[NV2080_CTRL_INTERNAL_GPU_USER_REGISTER_ACCESS_MAP_MAX_COMPRESSED_SIZE]; + NvU32 profilingRangesSize; + NvU8 profilingRanges[NV2080_CTRL_INTERNAL_GPU_USER_REGISTER_ACCESS_MAP_MAX_PROFILING_RANGES]; +} NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS; + +/** + * Get GR PDB properties synchronized between Kernel and Physical + * + * bPerSubCtxheaderSupported + * @ref PDB_PROP_GR_SUPPORTS_PER_SUBCONTEXT_CONTEXT_HEADER + */ + + + +typedef struct NV2080_CTRL_INTERNAL_NV_RANGE { + NV_DECLARE_ALIGNED(NvU64 lo, 8); + NV_DECLARE_ALIGNED(NvU64 hi, 8); +} NV2080_CTRL_INTERNAL_NV_RANGE; + +/*! + * NV2080_CTRL_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS + * + * This structure specifies a target swizz-id and mem_range to update + * + * swizzId[IN] + * - Targeted swizz-id for which the memRange is being set + * + * memAddrRange[IN] + * - Memory Range for given GPU instance + */ +#define NV2080_CTRL_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS_MESSAGE_ID (0x43U) + +typedef struct NV2080_CTRL_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS { + NvU32 swizzId; + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_NV_RANGE memAddrRange, 8); +} NV2080_CTRL_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_KMIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE (0x20800a44) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_KMIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_KMIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS_MESSAGE_ID (0x44U) + +typedef NV2080_CTRL_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS NV2080_CTRL_INTERNAL_KMIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE (0x20800a43) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS_MESSAGE_ID" */ + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_PDB_PROPERTIES { + NvBool bPerSubCtxheaderSupported; +} NV2080_CTRL_INTERNAL_STATIC_GR_PDB_PROPERTIES; + +#define NV2080_CTRL_INTERNAL_STATIC_GR_GET_PDB_PROPERTIES_PARAMS_MESSAGE_ID (0x47U) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_PDB_PROPERTIES_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_PDB_PROPERTIES pdbTable[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_PDB_PROPERTIES_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_PDB_PROPERTIES (0x20800a48) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_PDB_PROPERTIES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_STATIC_KGR_GET_PDB_PROPERTIES_PARAMS_MESSAGE_ID (0x48U) + +typedef NV2080_CTRL_INTERNAL_STATIC_GR_GET_PDB_PROPERTIES_PARAMS NV2080_CTRL_INTERNAL_STATIC_KGR_GET_PDB_PROPERTIES_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM + * + * This command writes instance memory information in the display hardware registers. + * + * instMemPhysAddr + * GPU physical address or IOVA address of the display instance memory. + * + * instMemSize + * Size of the display instance memory. + * + * instMemAddrSpace + * Address space of the display instance memory. + * + * instMemCpuCacheAttr + * Cache attribute of the display instance memory. + */ +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM (0x20800a49) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS_MESSAGE_ID (0x49U) + +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS { + NV_DECLARE_ALIGNED(NvU64 instMemPhysAddr, 8); + NV_DECLARE_ALIGNED(NvU64 instMemSize, 8); + NvU32 instMemAddrSpace; + NvU32 instMemCpuCacheAttr; +} NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS; + +/* + * NV2080_CTRL_INTERNAL_GPU_RECOVER_ALL_COMPUTE_CONTEXTS + * + * This command issues RC recovery for all compute contexts running on the given GPU. + */ +#define NV2080_CTRL_CMD_INTERNAL_RECOVER_ALL_COMPUTE_CONTEXTS (0x20800a4a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x4A" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_IP_VERSION + * + * This command obtains IP version of display engine for use by Kernel RM. + * + * ipVersion + * IP Version of display engine. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED - DISP has been disabled + */ +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_IP_VERSION (0x20800a4b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS_MESSAGE_ID (0x4BU) + +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS { + NvU32 ipVersion; +} NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_GPU_GET_SMC_MODE + * + * This command determines the current status of MIG MODE from Physical RM. + * + * smcMode [OUT] + * Current MIG MODE of the GPU. Values range NV2080_CTRL_GPU_INFO_GPU_SMC_MODE* + */ +#define NV2080_CTRL_CMD_INTERNAL_GPU_GET_SMC_MODE (0x20800a4c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS_MESSAGE_ID (0x4CU) + +typedef struct NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS { + NvU32 smcMode; +} NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR + * + * head + * This parameter specifies the head for which the callback is to be registered/unregistered. This value must be + * less than the maximum number of heads supported by the GPU subdevice. + * + * rgLineNum + * This indicates the RG scanout line number on which the callback will be executed. + * + * intrLine + * Enable: [out] Which interrupt line was allocated for this head. + * Disable: [in] Which interrupt line to deallocate. + * + * bEnable + * Should we allocate or deallocate an interrupt line? + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC - There was no available interrupt to allocate. + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR (0x20800a4d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS_MESSAGE_ID (0x4DU) + +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS { + NvU32 head; + NvU32 rgLineNum; + NvU32 intrLine; + NvBool bEnable; +} NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS; + +/*! + * NV2080_CTRL_INTERNAL_MIGMGR_PROFILE_INFO + * + * Description of a supported GPU instance. + * + * partitionFlag [OUT] + * Allocation flag to be used to allocate a partition with this profile. + * + * grCount [OUT] + * # GR engines, including the GFX capable ones. + * + * gfxGrCount [OUT] + * # GR engines capable of Gfx, which is a subset of the GR engines included in grCount + * + * gpcCount [OUT] + * # total gpcs, including the GFX capable ones. + * + * virtualGpcCount [OUT] + * # virtualized gpcs, including the GFX capable ones. + * + * gfxGpcCount [OUT] + * # total gpcs capable of Gfx. This is a subset of the GPCs included in gpcCount. + * + * veidCount [OUT] + * # total veids + * + * smCount [OUT] + * # total SMs + * + * ceCount [OUT] + * # CE engines + * + * nvEncCount [OUT] + * # NVENC engines + * + * nvDecCount [OUT] + * # NVDEC engines + * + * nvJpgCount [OUT] + * # NVJPG engines + * + * nvOfaCount [OUT] + * # NVOFA engines + * + * validCTSIdMask [OUT] + * # mask of CTS IDs which can be assigned under this profile + * + * validGfxCTSIdMask [OUT] + * # mask of CTS IDs that contain Gfx capable Grs which can be assigned under this profile + */ +#define NV2080_CTRL_INTERNAL_GRMGR_PARTITION_MAX_TYPES 90 + + + +typedef struct NV2080_CTRL_INTERNAL_MIGMGR_PROFILE_INFO { + NvU32 partitionFlag; + NvU32 grCount; + NvU32 gfxGrCount; + NvU32 gpcCount; + NvU32 virtualGpcCount; + NvU32 gfxGpcCount; + NvU32 veidCount; + NvU32 smCount; + NvU32 ceCount; + NvU32 nvEncCount; + NvU32 nvDecCount; + NvU32 nvJpgCount; + NvU32 nvOfaCount; + NV_DECLARE_ALIGNED(NvU64 validCTSIdMask, 8); + NV_DECLARE_ALIGNED(NvU64 validGfxCTSIdMask, 8); +} NV2080_CTRL_INTERNAL_MIGMGR_PROFILE_INFO; + +/*! + * NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PROFILES_PARAMS + * + * Returns the list of supported GPU instance profiles. + * + * count [OUT] + * Number of supported profiles. + * + * table [OUT] + * Supported profiles. + */ +#define NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PROFILES_PARAMS_MESSAGE_ID (0x4FU) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PROFILES_PARAMS { + NvU32 count; + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_MIGMGR_PROFILE_INFO table[NV2080_CTRL_INTERNAL_GRMGR_PARTITION_MAX_TYPES], 8); +} NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PROFILES_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM + * + * This command instructs the physical MemorySystem to set up memory partitioning + * exterior boundaries in hardware. + * + * partitionableMemSize [input] + * Size of the partitionable memory in bytes + * + * bottomRsvdSize [input] + * Size of the reserved region below partitionable memory in bytes + * + * topRsvdSize [input] + * Size of the reserved region above partitionable memory in bytes + * + * partitionableStartAddr [output] + * Start address of the partitionable memory, aligned to HW constraints + * + * partitionableEndAddr [output] + * End address of the partitionable memory, aligned to HW constraints + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM (0x20800a51) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM_PARAMS_MESSAGE_ID (0x51U) + +typedef struct NV2080_CTRL_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM_PARAMS { + NV_DECLARE_ALIGNED(NvU64 partitionableMemSize, 8); + NV_DECLARE_ALIGNED(NvU64 bottomRsvdSize, 8); + NV_DECLARE_ALIGNED(NvU64 topRsvdSize, 8); + NV_DECLARE_ALIGNED(NvU64 partitionableStartAddr, 8); + NV_DECLARE_ALIGNED(NvU64 partitionableEndAddr, 8); +} NV2080_CTRL_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM_PARAMS; + + + +#define NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PARTITIONABLE_ENGINES_MAX_ENGINES_MASK_SIZE 4 + +#define NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PARTITIONABLE_ENGINES_PARAMS_MESSAGE_ID (0x52U) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PARTITIONABLE_ENGINES_PARAMS { + NV_DECLARE_ALIGNED(NvU64 engineMask[NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PARTITIONABLE_ENGINES_MAX_ENGINES_MASK_SIZE], 8); +} NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PARTITIONABLE_ENGINES_PARAMS; + +#define NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_BUFFERS 2 +#define NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_ID 64 +/*! + * NV2080_CTRL_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS_PARAMS + * + * Promote a single partition's runlist buffers allocated by kernel Client RM to Physical RM + * + * rlBuffers [IN] + * 2D array of runlist buffers for a single partition + * + * runlistIdMask [IN] + * Mask of runlists belonging to partition + * + */ +#define NV2080_CTRL_CMD_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS (0x20800a53) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS_PARAMS_MESSAGE_ID (0x53U) + +typedef struct NV2080_CTRL_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_MEMDESC_INFO rlBuffers[NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_ID][NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_BUFFERS], 8); + NV_DECLARE_ALIGNED(NvU64 runlistIdMask, 8); + NvU32 swizzId; +} NV2080_CTRL_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_SET_IMP_INIT_INFO (0x20800a54) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS_MESSAGE_ID (0x54U) + +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS { + TEGRA_IMP_IMPORT_DATA tegraImpImportData; +} NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_GET_EGPU_BRIDGE_INFO (0x20800a55) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_EGPU_BRIDGE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GET_EGPU_BRIDGE_INFO_PARAMS_MESSAGE_ID (0x55U) + +typedef struct NV2080_CTRL_INTERNAL_GET_EGPU_BRIDGE_INFO_PARAMS { + NvU16 pciDeviceId; + NvU16 pciSubDeviceId; + NvBool iseGPUBridge; + NvU8 approvedBusType; +} NV2080_CTRL_INTERNAL_GET_EGPU_BRIDGE_INFO_PARAMS; + +#define NV2080_CTRL_INTERNAL_EGPU_BUS_TYPE_NONE (0x00000000) +#define NV2080_CTRL_INTERNAL_EGPU_BUS_TYPE_CUSTOM (0x00000001) +#define NV2080_CTRL_INTERNAL_EGPU_BUS_TYPE_TB2 (0x00000002) +#define NV2080_CTRL_INTERNAL_EGPU_BUS_TYPE_TB3 (0x00000003) + + +/*! + * NV2080_CTRL_CMD_INTERNAL_BUS_FLUSH_WITH_SYSMEMBAR + * + * This command triggers a sysmembar to flush VIDMEM writes. + * This command accepts no parameters. + * + */ +#define NV2080_CTRL_CMD_INTERNAL_BUS_FLUSH_WITH_SYSMEMBAR (0x20800a70) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x70" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL + * + * Setup local PCIE P2P Mailbox + * + * local2Remote[IN] + * Local peer ID of remote gpu on local gpu + * + * remote2Local[IN] + * Remote peer ID of local gpu on remote gpu + * + * localP2PDomainRemoteAddr[IN] + * P2P domain remote address on local gpu + * + * remoteP2PDomainLocalAddr[IN] + * P2P domain local address on remote gpu + * + * remoteWMBoxLocalAddr[IN] + * Local mailbox address on remote gpu + * + * p2pWmbTag[OUT] + * Tag for mailbox to transport from local to remote GPU + * + * bNeedWarBug999673[IN] + * Set to true if WAR for bug 999673 is required + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL (0x20800a71) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL_PARAMS_MESSAGE_ID (0x71U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL_PARAMS { + NvU32 local2Remote; + NvU32 remote2Local; + NV_DECLARE_ALIGNED(NvU64 localP2PDomainRemoteAddr, 8); + NV_DECLARE_ALIGNED(NvU64 remoteP2PDomainLocalAddr, 8); + NV_DECLARE_ALIGNED(NvU64 remoteWMBoxLocalAddr, 8); + NV_DECLARE_ALIGNED(NvU64 p2pWmbTag, 8); + NvBool bNeedWarBug999673; +} NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL_PARAMS; + + /* + * NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE + * + * Setup remote PCIE P2P Mailbox + * + * local2Remote[IN] + * Local peer ID of remote gpu on local gpu + * + * remote2Local[IN] + * Remote peer ID of local gpu on remote gpu + * + * localP2PDomainRemoteAddr[IN] + * P2P domain remote address on local gpu + * + * remoteP2PDomainLocalAddr[IN] + * P2P domain local address on remote gpu + * + * remoteWMBoxAddrU64[IN] + * Mailbox address on remote gpu + * + * p2pWmbTag[IN] + * Tag for mailbox to transport from local to remote GPU + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE (0x20800a72) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE_PARAMS_MESSAGE_ID (0x72U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE_PARAMS { + NvU32 local2Remote; + NvU32 remote2Local; + NV_DECLARE_ALIGNED(NvU64 localP2PDomainRemoteAddr, 8); + NV_DECLARE_ALIGNED(NvU64 remoteP2PDomainLocalAddr, 8); + NV_DECLARE_ALIGNED(NvU64 remoteWMBoxAddrU64, 8); + NV_DECLARE_ALIGNED(NvU64 p2pWmbTag, 8); +} NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_BUS_DESTROY_P2P_MAILBOX + * + * Destroy PCIE P2P Mailbox + * + * peerIdx[IN] + * Peer ID of the P2P destination GPU + * + * bNeedWarBug999673[IN] + * Set to true if WAR for bug 999673 is required + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_BUS_DESTROY_P2P_MAILBOX (0x20800a73) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BUS_DESTROY_P2P_MAILBOX_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_BUS_DESTROY_P2P_MAILBOX_PARAMS_MESSAGE_ID (0x73U) + +typedef struct NV2080_CTRL_INTERNAL_BUS_DESTROY_P2P_MAILBOX_PARAMS { + NvU32 peerIdx; + NvBool bNeedWarBug999673; +} NV2080_CTRL_INTERNAL_BUS_DESTROY_P2P_MAILBOX_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_BUS_CREATE_C2C_PEER_MAPPING + * + * Create C2C mapping to a given peer GPU + * + * peerId[IN] + * Peer ID for local to remote GPU + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_BUS_CREATE_C2C_PEER_MAPPING (0x20800a74) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BUS_CREATE_C2C_PEER_MAPPING_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_BUS_CREATE_C2C_PEER_MAPPING_PARAMS_MESSAGE_ID (0x74U) + +typedef struct NV2080_CTRL_INTERNAL_BUS_CREATE_C2C_PEER_MAPPING_PARAMS { + NvU32 peerId; +} NV2080_CTRL_INTERNAL_BUS_CREATE_C2C_PEER_MAPPING_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_BUS_REMOVE_C2C_PEER_MAPPING + * + * Remove C2C mapping to a given peer GPU + * + * peerId[IN] + * Peer ID for local to remote GPU + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_BUS_REMOVE_C2C_PEER_MAPPING (0x20800a75) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BUS_REMOVE_C2C_PEER_MAPPING_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_BUS_REMOVE_C2C_PEER_MAPPING_PARAMS_MESSAGE_ID (0x75U) + +typedef struct NV2080_CTRL_INTERNAL_BUS_REMOVE_C2C_PEER_MAPPING_PARAMS { + NvU32 peerId; +} NV2080_CTRL_INTERNAL_BUS_REMOVE_C2C_PEER_MAPPING_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES + * + * Retrieves the corresponding SPAs (per the given GFID's VMMU mappings) + * for the given array of GPAs. + * + * gfid [IN] + * GFID to translate GPAs for + * + * numEntries [IN] + * Number of entries (<= NV2080_CTRL_CMD_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES) + * to translate (i.e. number of elements in gpaEntries) + * + * gpaEntries [IN] + * Array of GPAs to translate + * + * spaEntries [OUT] + * Resulting array of SPAs + */ +#define NV2080_CTRL_CMD_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES (0x20800a57) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_VMMU_MAX_SPA_FOR_GPA_ENTRIES 128 + +#define NV2080_CTRL_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES_PARAMS_MESSAGE_ID (0x57U) + +typedef struct NV2080_CTRL_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES_PARAMS { + NvU32 gfid; + NvU32 numEntries; + NV_DECLARE_ALIGNED(NvU64 gpaEntries[NV2080_CTRL_INTERNAL_VMMU_MAX_SPA_FOR_GPA_ENTRIES], 8); + NV_DECLARE_ALIGNED(NvU64 spaEntries[NV2080_CTRL_INTERNAL_VMMU_MAX_SPA_FOR_GPA_ENTRIES], 8); +} NV2080_CTRL_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER + * + * Pass required pushbuffer parameters from kernel RM to Physical RM + * + * addressSpace [IN] + * Address space of pushbuffer whtether it is ADDR_SYSMEM or ADDR_FBMEM + * + * physicalAddr [IN] + * Physical address of pushbuffer + * + * addressSpace [IN] + * Limit of the pushbuffer address, it should be less than 4K + * + * cacheSnoop [IN] + * Cachesnoop supported or not + * + * channelInstance [IN] + * Channel instance pass by the client to get corresponding dispchannel + * + * hclass [IN] + * External class ID pass by the client to get the channel class + * + * valid [IN] + * This bit indicates whether pushbuffer parameters are valid or not + * + * pbTargetAperture [IN] + * Indicates the PushBuffer Target Aperture type (IOVA, PCI, PCI_COHERENT or NVM) + * + * channelPBSize [IN] + * Indicates the PushBuffer size requested by client + * + * subDeviceId [IN] + * One-hot encoded subDeviceId (i.e. SDM) that will be used to address the channel + * in the pushbuffer stream (via SSDM method) + */ +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID (0x58U) + +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS { + NvU32 addressSpace; + NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8); + NV_DECLARE_ALIGNED(NvU64 limit, 8); + NvU32 cacheSnoop; + NvU32 hclass; + NvU32 channelInstance; + NvBool valid; + NvU32 pbTargetAperture; + NvU32 channelPBSize; + NvU32 subDeviceId; +} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_GMMU_GET_STATIC_INFO + * + * This command obtains information from physical RM for use by CPU-RM. + * + * replayableFaultBufferSize + * Default size of replayable fault buffer + * + * nonReplayableFaultBufferSize + * Default size of non-replayable fault buffer + * + */ + +#define NV2080_CTRL_CMD_INTERNAL_GMMU_GET_STATIC_INFO (0x20800a59) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS_MESSAGE_ID (0x59U) + +typedef struct NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS { + NvU32 replayableFaultBufferSize; + NvU32 replayableShadowFaultBufferMetadataSize; + NvU32 nonReplayableFaultBufferSize; + NvU32 nonReplayableShadowFaultBufferMetadataSize; +} NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_CTXSW_MODES + */ +#define NV2080_CTRL_CMD_INTERNAL_GR_GET_CTXSW_MODES (0x20800a5a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GR_GET_CTXSW_MODES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GR_GET_CTXSW_MODES_PARAMS_MESSAGE_ID (0x5AU) + +typedef NV2080_CTRL_GR_GET_CTXSW_MODES_PARAMS NV2080_CTRL_INTERNAL_GR_GET_CTXSW_MODES_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE + * + * Get heap reservation size needed by different module + */ +#define NV2080_CTRL_CMD_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE (0x20800a5b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE_PARAMS_MESSAGE_ID (0x5BU) + +typedef struct NV2080_CTRL_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE_PARAMS { + NvU32 moduleIndex; + NvU32 size; +} NV2080_CTRL_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE + * + * tableLen [OUT] + * Number of valid records in table field. + * + * table [OUT] + * Interrupt table for Kernel RM. + * + * subtreeMap [OUT] + * Subtree mask for each NV2080_INTR_CATEGORY. + */ +#define NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE (0x20800a5c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE 128 + +typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY { + NvU16 engineIdx; + NvU32 pmcIntrMask; + NvU32 vectorStall; + NvU32 vectorNonStall; +} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY; + +#define NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID (0x5CU) + +typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS { + NvU32 tableLen; + NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY table[NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE]; + NV_DECLARE_ALIGNED(NV2080_INTR_CATEGORY_SUBTREE_MAP subtreeMap[NV2080_INTR_CATEGORY_ENUM_COUNT], 8); +} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS; + +/* Index to retrieve the needed heap space for specific module */ +#define NV2080_CTRL_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE_GR (0x00000000) + +/* + * NV2080_CTRL_CMD_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK + * + * Checking if the reservation / release of the PERFMON HW is possible + * + * bReservation [IN] + * NV_TRUE -> request for reservation, NV_FALSE -> request for release + * + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK (0x20800a98) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK_PARAMS_MESSAGE_ID" */ + + +#define NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK_PARAMS_MESSAGE_ID (0x98U) + +typedef struct NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK_PARAMS { + NvBool bReservation; +} NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET + * + * Reserving / Releasing PERFMON HW + * + * bReservation [IN] + * NV_TRUE -> request for reservation, NV_FALSE -> request for release + * + * bClientHandlesGrGating [IN] + * DM-TODO: Add comment for this + * + * bRmHandlesIdleSlow [IN] + * If the IDLE slowdown is required + * + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET (0x20800a99) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET_PARAMS_MESSAGE_ID (0x99U) + +typedef struct NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET_PARAMS { + NvBool bReservation; + NvBool bClientHandlesGrGating; + NvBool bRmHandlesIdleSlow; +} NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES + * + * Get active display devices + * + * displayMask [OUT] + * Get the mask of the active display devices in VBIOS + * + * numHeads [OUT] + * Number of heads display supported. + * + */ +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES (0x20800a5d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS_MESSAGE_ID (0x5DU) + +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS { + + NvU32 displayMask; + NvU32 numHeads; +} NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS; + + + +/* + * NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES + * + * Get FB Mem page ranges for all possible swizzIds + * + * fbMemPageRanges [OUT] + * Mem page ranges for each swizzId in the form of {lo, hi} + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES (0x20800a60) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MAX_SWIZZ_ID 15 + +#define NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS_MESSAGE_ID (0x60U) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_NV_RANGE fbMemPageRanges[NV2080_CTRL_INTERNAL_MAX_SWIZZ_ID], 8); +} NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_FIFO_GET_NUM_CHANNELS + * + * This command is an internal command sent from Kernel RM to Physical RM + * to get number of channels for a given runlist ID + * + * runlistId [IN] + * numChannels [OUT] + */ +#define NV2080_CTRL_CMD_INTERNAL_FIFO_GET_NUM_CHANNELS (0x20800a61) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FIFO_GET_NUM_CHANNELS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_FIFO_GET_NUM_CHANNELS_PARAMS_MESSAGE_ID (0x61U) + +typedef struct NV2080_CTRL_INTERNAL_FIFO_GET_NUM_CHANNELS_PARAMS { + NvU32 runlistId; + NvU32 numChannels; +} NV2080_CTRL_INTERNAL_FIFO_GET_NUM_CHANNELS_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_PROFILES + * @ref NV2080_CTRL_CMD_INTERNAL_STATIC_MIGMGR_GET_PROFILES + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_PROFILES (0x20800a63) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KMIGMGR_GET_PROFILES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_STATIC_KMIGMGR_GET_PROFILES_PARAMS_MESSAGE_ID (0x63U) + +typedef NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PROFILES_PARAMS NV2080_CTRL_INTERNAL_STATIC_KMIGMGR_GET_PROFILES_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_PARTITIONABLE_ENGINES + * NV2080_CTRL_CMD_INTERNAL_STATIC_MIGMGR_GET_PARTITIONABLE_ENGINES + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_PARTITIONABLE_ENGINES (0x20800a65) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KMIGMGR_GET_PARTITIONABLE_ENGINES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_STATIC_KMIGMGR_GET_PARTITIONABLE_ENGINES_PARAMS_MESSAGE_ID (0x65U) + +typedef NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PARTITIONABLE_ENGINES_PARAMS NV2080_CTRL_INTERNAL_STATIC_KMIGMGR_GET_PARTITIONABLE_ENGINES_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES + * NV2080_CTRL_CMD_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES (0x20800a66) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KMIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_STATIC_KMIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS_MESSAGE_ID (0x66U) + +typedef NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS NV2080_CTRL_INTERNAL_STATIC_KMIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_MEMSYS_GET_MIG_MEMORY_CONFIG + * NV2080_CTRL_CMD_INTERNAL_KMEMSYS_GET_MIG_MEMORY_CONFIG + * + * This command retrieves memory config from HW + * + * memBoundaryCfgA [OUT] + * Memory boundary config A (4KB aligned) + * + * memBoundaryCfgB [OUT] + * Memory boundary config B (4KB aligned) + * + * memBoundaryCfgC [OUT] + * Memory boundary config C (64KB aligned) + * + * memBoundaryCfg [OUT] + * Memory boundary config (64KB aligned) + * + * memBoundaryCfgValInit [OUT] + * Memory boundary config initial value (64KB aligned) + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_GET_MIG_MEMORY_CONFIG (0x20800a68) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_CONFIG_PARAMS_MESSAGE_ID (0x68U) + +typedef struct NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_CONFIG_PARAMS { + NV_DECLARE_ALIGNED(NvU64 memBoundaryCfgA, 8); + NV_DECLARE_ALIGNED(NvU64 memBoundaryCfgB, 8); + NvU32 memBoundaryCfgC; + NvU32 memBoundaryCfg; + NvU32 memBoundaryCfgValInit; +} NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_CONFIG_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_KMEMSYS_GET_MIG_MEMORY_CONFIG (0x20800a67) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_KMEMSYS_GET_MIG_MEMORY_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_KMEMSYS_GET_MIG_MEMORY_CONFIG_PARAMS_MESSAGE_ID (0x67U) + +typedef NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_CONFIG_PARAMS NV2080_CTRL_INTERNAL_KMEMSYS_GET_MIG_MEMORY_CONFIG_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE (0x20800a6b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_SIZE 8 + +#define NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_PARAMS_MESSAGE_ID (0x6BU) + +typedef struct NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_PARAMS { + NvU32 data[NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_SIZE]; +} NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_RC_WATCHDOG_TIMEOUT + * + * Invoke RC recovery after watchdog timeout is hit. + */ +#define NV2080_CTRL_CMD_INTERNAL_RC_WATCHDOG_TIMEOUT (0x20800a6a) /* finn: Evaluated from "((FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x6a)" */ + +/* ! + * This command is used to get the current AUX power state of the sub-device. + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_GET_AUX_POWER_STATE (0x20800a81) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_GET_AUX_POWER_STATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_PERF_GET_AUX_POWER_STATE_PARAMS_MESSAGE_ID (0x81U) + +typedef struct NV2080_CTRL_INTERNAL_PERF_GET_AUX_POWER_STATE_PARAMS { + NvU32 powerState; +} NV2080_CTRL_INTERNAL_PERF_GET_AUX_POWER_STATE_PARAMS; + +/*! + * This command can be used to boost P-State up one level or to the highest for a limited + * duration for the associated subdevice. Boosts from different clients are being tracked + * independently. Note that there are other factors that can limit P-States so the resulting + * P-State may differ from expectation. + * + * flags + * This parameter specifies the actual command. _CLEAR is to clear existing boost. + * _BOOST_1LEVEL is to boost P-State one level higher. _BOOST_TO_MAX is to boost + * to the highest P-State. + * duration + * This parameter specifies the duration of the boost in seconds. This has to be less + * than NV2080_CTRL_PERF_BOOST_DURATION_MAX. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_BOOST_SET_2X (0x20800a9a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_2X_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_2X_MESSAGE_ID (0x9AU) + +typedef struct NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_2X { + + NvBool flags; + NvU32 duration; +} NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_2X; + +#define NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_PSTATE 0U +#define NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_GPCCLK 1U +#define NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_LAST NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_GPCCLK +#define NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_NUM (0x2U) /* finn: Evaluated from "NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_LAST + 1" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_GPU_BOOST_SYNC_SET_CONTROL + * + * Activate/Deactivate GPU Boost Sync algorithm + * + * bActivate [IN] + * GPU Boost Sync algorithm: + * NV_TRUE -> activate + * NV_FALSE -> deactivate + * + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_GPU_BOOST_SYNC_SET_CONTROL (0x20800a7e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_CONTROL_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_CONTROL_PARAMS_MESSAGE_ID (0x7EU) + +typedef struct NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_CONTROL_PARAMS { + NvBool bActivate; +} NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_CONTROL_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS + * + * Apply given limits to a specific subdevice + * + * flags [IN] + * DM-TODO: write description here + * + * bBridgeless [IN] + * Bridgeless information, for now supporting only MIO bridges + * + * currLimits + * Array of limits that will be applied + * + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS (0x20800a7f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS_MESSAGE_ID (0x7FU) + +typedef struct NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS { + NvU32 flags; + NvBool bBridgeless; + NvU32 currLimits[NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_NUM]; +} NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO + * + * Data for GPU Boost Sync structure initialization + * + * hysteresisus [OUT] + * Hysteresis value for GPU Boost synchronization hysteresis algorithm. + * + * bHystersisEnable [OUT] + * hysteresis algorithm for SLI GPU Boost synchronization: + * NV_TRUE -> enabled, + * NV_FALSE -> disabled + * + * bSliGpuBoostSyncEnable [OUT] + * SLI GPU Boost feature is: + * NV_TRUE -> enabled, + * NV_FALSE -> disabled + * + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO (0x20800a80) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO_PARAMS_MESSAGE_ID (0x80U) + +typedef struct NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvU64 hysteresisus, 8); + NvBool bHystersisEnable; + NvBool bSliGpuBoostSyncEnable; +} NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_GMMU_REGISTER_FAULT_BUFFER + * + * This command sends replayable fault buffer pages allocated by CPU-RM + * to be setup and enabled in physical RM. + * + * hClient + * Client handle. + * + * hObject + * Object handle. + * + * faultBufferSize + * Size of the replayable fault buffer to register. + * + * faultBufferPteArray + * Pages of replayable fault buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_GMMU_REGISTER_FAULT_BUFFER (0x20800a9b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GMMU_REGISTER_FAULT_BUFFER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GMMU_FAULT_BUFFER_MAX_PAGES 256 +#define NV2080_CTRL_INTERNAL_GMMU_REGISTER_FAULT_BUFFER_PARAMS_MESSAGE_ID (0x9BU) + +typedef struct NV2080_CTRL_INTERNAL_GMMU_REGISTER_FAULT_BUFFER_PARAMS { + NvHandle hClient; + NvHandle hObject; + NvU32 faultBufferSize; + NV_DECLARE_ALIGNED(NvU64 faultBufferPteArray[NV2080_CTRL_INTERNAL_GMMU_FAULT_BUFFER_MAX_PAGES], 8); +} NV2080_CTRL_INTERNAL_GMMU_REGISTER_FAULT_BUFFER_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_GMMU_UNREGISTER_FAULT_BUFFER + * + * This command requests physical RM to disable the replayable fault buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_GMMU_UNREGISTER_FAULT_BUFFER (0x20800a9c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x9C" */ + +// Valid fault buffer types +#define NV2080_CTRL_FAULT_BUFFER_NON_REPLAYABLE (0x00000000) +#define NV2080_CTRL_FAULT_BUFFER_REPLAYABLE (0x00000001) + +/* + * NV2080_CTRL_CMD_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER + * + * This command sends client shadow fault buffer pages allocated by CPU-RM + * to be setup and enabled in physical RM. + * + * shadowFaultBufferQueuePhysAddr + * Physical address of shadow fault buffer queue. + * + * faultBufferSize + * Size of the client shadow fault buffer to register. + * + * shadowFaultBufferPteArray + * Pages of client shadow fault buffer. + * + * shadowFaultBufferType + * Replayable or non-replayable fault buffer + * + * faultBufferSharedMemoryPhysAddr + * Fault buffer shared memory address. Used only by the Replayable fault buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER (0x20800a9d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GMMU_CLIENT_SHADOW_FAULT_BUFFER_MAX_PAGES 3000 +#define NV2080_CTRL_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER_PARAMS_MESSAGE_ID (0x9DU) + +typedef struct NV2080_CTRL_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER_PARAMS { + NV_DECLARE_ALIGNED(NvU64 shadowFaultBufferQueuePhysAddr, 8); + NvU32 shadowFaultBufferSize; + NvU32 shadowFaultBufferMetadataSize; + NV_DECLARE_ALIGNED(NvU64 shadowFaultBufferPteArray[NV2080_CTRL_INTERNAL_GMMU_CLIENT_SHADOW_FAULT_BUFFER_MAX_PAGES], 8); + NvU32 shadowFaultBufferType; + NV_DECLARE_ALIGNED(NvU64 faultBufferSharedMemoryPhysAddr, 8); +} NV2080_CTRL_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER + * + * This command requests physical RM to disable the client shadow fault buffer. + * + * shadowFaultBufferType + * Replayable or non-replayable fault buffer + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER (0x20800a9e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER_PARAMS_MESSAGE_ID (0x9EU) + +typedef struct NV2080_CTRL_INTERNAL_GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER_PARAMS { + NvU32 shadowFaultBufferType; +} NV2080_CTRL_INTERNAL_GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_GMMU_COPY_RESERVED_SPLIT_GVASPACE_PDES_TO_SERVER + * + * Pin PDEs for Global VA range on the server RM and then mirror the client's page + * directory/tables in the server. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_GMMU_COPY_RESERVED_SPLIT_GVASPACE_PDES_TO_SERVER (0x20800a9f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GMMU_COPY_RESERVED_SPLIT_GVASPACE_PDES_TO_SERVER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GMMU_COPY_RESERVED_SPLIT_GVASPACE_PDES_TO_SERVER_PARAMS_MESSAGE_ID (0x9FU) + +typedef struct NV2080_CTRL_INTERNAL_GMMU_COPY_RESERVED_SPLIT_GVASPACE_PDES_TO_SERVER_PARAMS { + NV_DECLARE_ALIGNED(NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS PdeCopyParams, 8); +} NV2080_CTRL_INTERNAL_GMMU_COPY_RESERVED_SPLIT_GVASPACE_PDES_TO_SERVER_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_BOOST_SET_3X + * + * This command can be used to boost P-State the highest for a limited + * duration for the associated subdevice. Boosts from different clients are being tracked + * independently. Note that there are other factors that can limit P-States so the resulting + * P-State may differ from expectation. + * + * flags [IN] + * This parameter specifies the actual command. _CLEAR is to clear existing boost. + * and _BOOST_TO_MAX is to boost to the highest P-State. + * + * boostDuration [IN] + * This parameter specifies the duration of the boost in seconds. This has to be less + * than NV2080_CTRL_PERF_BOOST_DURATION_MAX. + * + * gfId [IN] + * This specifies Id of the Kernel RM that is requesting the Boost + * + * bOverrideInfinite[IN] + * This parameter specifies if we want to override already registered infinite boost for the specific Kernel RM. + * This should be NV_TRUE only in case when we are removing the current infinite boost for a specific Kernel RM + * and setting the boost duration to a next maximum duration registered for the Kernel RM in question. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_BOOST_SET_3X (0x20800aa0) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_3X_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_3X_MESSAGE_ID (0xA0U) + +typedef struct NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_3X { + NvU32 flags; + NvU32 boostDuration; + NvU32 gfId; + NvBool bOverrideInfinite; +} NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_3X; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_BOOST_CLEAR_3X + * + * Clear the boost for specific Kernel RM + * + * bIsCudaClient [IN] + * Specifies if the request is for clearing the CUDA boost or regular boost + * NV_TRUE -> CUDA boost, NV_FALSE otherwise + * + * gfId [IN] + * Specifies Id of the Kernel RM that is requesting Boost clear + * + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_BOOST_CLEAR_3X (0x20800aa1) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_BOOST_CLEAR_PARAMS_3X_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_PERF_BOOST_CLEAR_PARAMS_3X_MESSAGE_ID (0xA1U) + +typedef struct NV2080_CTRL_INTERNAL_PERF_BOOST_CLEAR_PARAMS_3X { + + NvBool bIsCudaClient; + NvU32 gfId; +} NV2080_CTRL_INTERNAL_PERF_BOOST_CLEAR_PARAMS_3X; + +/* + * NV2080_CTRL_CMD_INTERNAL_STATIC_GRMGR_GET_SKYLINE_INFO + * + * Retrieves skyline information about the GPU. Params are sized to currently known max + * values, but will need to be modified in the future should that change. + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_GRMGR_GET_SKYLINE_INFO (0x20800aa2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_GRMGR_GET_SKYLINE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO_MAX_SKYLINES 9 +#define NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO_MAX_NON_SINGLETON_VGPCS 32 +/*! + * NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO + * skylineVgpcSize[OUT] + * - TPC count of non-singleton VGPCs + * singletonVgpcMask[OUT] + * - Mask of active Singletons + * maxInstances[OUT] + * - Max allowed instances of this skyline concurrently on a GPU + * computeSizeFlag + * - One of NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_* flags which is associated with this skyline + * numNonSingletonVgpcs + * - Number of VGPCs with non-zero TPC counts which are not singletons + */ +typedef struct NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO { + NvU8 skylineVgpcSize[NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO_MAX_NON_SINGLETON_VGPCS]; + NvU32 singletonVgpcMask; + NvU32 maxInstances; + NvU32 computeSizeFlag; + NvU32 numNonSingletonVgpcs; +} NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO; + +/*! + * NV2080_CTRL_INTERNAL_STATIC_GRMGR_GET_SKYLINE_INFO_PARAMS + * skylineTable[OUT] + * - TPC count of non-singleton VGPCs + * - Mask of singleton vGPC IDs active + * - Max Instances of this skyline possible concurrently + * - Associated compute size with the indexed skyline + * - Number of VGPCs with non-zero TPC counts which are not singletons + * validEntries[OUT] + * - Number of entries which contain valid info in skylineInfo + */ +#define NV2080_CTRL_INTERNAL_STATIC_GRMGR_GET_SKYLINE_INFO_PARAMS_MESSAGE_ID (0xA2U) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GRMGR_GET_SKYLINE_INFO_PARAMS { + NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO skylineTable[NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO_MAX_SKYLINES]; + NvU32 validEntries; +} NV2080_CTRL_INTERNAL_STATIC_GRMGR_GET_SKYLINE_INFO_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GPU_SET_PARTITIONING_MODE + */ +#define NV2080_CTRL_CMD_INTERNAL_MIGMGR_SET_PARTITIONING_MODE (0x20800aa3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MIGMGR_SET_PARTITIONING_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MIGMGR_SET_PARTITIONING_MODE_PARAMS_MESSAGE_ID (0xA3U) + +typedef NV2080_CTRL_GPU_SET_PARTITIONING_MODE_PARAMS NV2080_CTRL_INTERNAL_MIGMGR_SET_PARTITIONING_MODE_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GPU_CONFIGURE_PARTITION + */ +#define NV2080_CTRL_CMD_INTERNAL_MIGMGR_CONFIGURE_GPU_INSTANCE (0x20800aa4) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MIGMGR_CONFIGURE_GPU_INSTANCE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MIGMGR_CONFIGURE_GPU_INSTANCE_PARAMS_MESSAGE_ID (0xA4U) + +typedef NV2080_CTRL_GPU_CONFIGURE_PARTITION_PARAMS NV2080_CTRL_INTERNAL_MIGMGR_CONFIGURE_GPU_INSTANCE_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GPU_SET_PARTITIONS + */ +#define NV2080_CTRL_CMD_INTERNAL_MIGMGR_SET_GPU_INSTANCES (0x20800aa5) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MIGMGR_SET_GPU_INSTANCES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MIGMGR_SET_GPU_INSTANCES_PARAMS_MESSAGE_ID (0xA5U) + +typedef NV2080_CTRL_GPU_SET_PARTITIONS_PARAMS NV2080_CTRL_INTERNAL_MIGMGR_SET_GPU_INSTANCES_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GPU_GET_PARTITIONS + */ +#define NV2080_CTRL_CMD_INTERNAL_MIGMGR_GET_GPU_INSTANCES (0x20800aa6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MIGMGR_GET_GPU_INSTANCES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MIGMGR_GET_GPU_INSTANCES_PARAMS_MESSAGE_ID (0xA6U) + +typedef NV2080_CTRL_GPU_GET_PARTITIONS_PARAMS NV2080_CTRL_INTERNAL_MIGMGR_GET_GPU_INSTANCES_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_MEMSYS_SET_ZBC_REFERENCED + * + * Tell Physical RM whether any ZBC-kind surfaces are allocated. + * If PF and all VFs report false, ZBC table can be flushed by Physical RM. + * + * bZbcReferenced [IN] + * NV_TRUE -> ZBC-kind (and no _SKIP_ZBCREFCOUNT flag) are allocated in Kernel RM + * + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_SET_ZBC_REFERENCED (0x20800a69) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS_MESSAGE_ID (0x69U) + +typedef struct NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS { + NvBool bZbcSurfacesExist; +} NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_KMIGMGR_EXPORT_GPU_INSTANCE + * + * Export the resource and placement information about a gpu instance such that a + * similar gpu instance can be recreated from scratch in the same position on the + * same or similar GPU. Note that different GPUs may have different physical + * resources due to floorsweeping, and an imported gpu instance is not guaranteed + * to get the exact same resources as the exported gpu instance, but the imported + * gpu instance should behave identically with respect to fragmentation and + * placement / span positioning. + */ +#define NV2080_CTRL_CMD_INTERNAL_KMIGMGR_EXPORT_GPU_INSTANCE (0x20800aa7) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_KMIGMGR_EXPORT_GPU_INSTANCE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_MIGMGR_EXPORT_GPU_INSTANCE (0x20800aa8) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MIGMGR_EXPORT_GPU_INSTANCE_PARAMS_MESSAGE_ID" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_KMIGMGR_IMPORT_GPU_INSTANCE + * + * Create a gpu instance resembling the exported instance info. Note that + * different GPUs may have different physical resources due to floorsweeping, + * and an imported gpu instance is not guaranteed to get the exact same resources + * as the exported gpu instance, but the imported gpu instance should behave + * identically with respect to fragmentation and placement / span positioning. + */ +#define NV2080_CTRL_CMD_INTERNAL_KMIGMGR_IMPORT_GPU_INSTANCE (0x20800aa9) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_GPU_INSTANCE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_MIGMGR_IMPORT_GPU_INSTANCE (0x20800aaa) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MIGMGR_IMPORT_GPU_INSTANCE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_KMIGMGR_EXPORTED_GPU_INSTANCE_MAX_ENGINES_MASK_SIZE 4 +typedef struct NV2080_CTRL_INTERNAL_KMIGMGR_EXPORTED_GPU_INSTANCE_INFO { + NV_DECLARE_ALIGNED(NvU64 enginesMask[NV2080_CTRL_INTERNAL_KMIGMGR_EXPORTED_GPU_INSTANCE_MAX_ENGINES_MASK_SIZE], 8); + NvU32 partitionFlags; + NvU32 gpcMask; + NvU32 virtualGpcCount; + NvU32 veidOffset; + NvU32 veidCount; +} NV2080_CTRL_INTERNAL_KMIGMGR_EXPORTED_GPU_INSTANCE_INFO; + +typedef struct NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS { + NvU32 swizzId; + NvU8 uuid[NV_GI_UUID_LEN]; + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_KMIGMGR_EXPORTED_GPU_INSTANCE_INFO info, 8); +} NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS; + +#define NV2080_CTRL_INTERNAL_KMIGMGR_EXPORT_GPU_INSTANCE_PARAMS_MESSAGE_ID (0xA7U) + +typedef NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS NV2080_CTRL_INTERNAL_KMIGMGR_EXPORT_GPU_INSTANCE_PARAMS; + +#define NV2080_CTRL_INTERNAL_MIGMGR_EXPORT_GPU_INSTANCE_PARAMS_MESSAGE_ID (0xA8U) + +typedef NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS NV2080_CTRL_INTERNAL_MIGMGR_EXPORT_GPU_INSTANCE_PARAMS; + +#define NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_GPU_INSTANCE_PARAMS_MESSAGE_ID (0xA9U) + +typedef NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_GPU_INSTANCE_PARAMS; + +#define NV2080_CTRL_INTERNAL_MIGMGR_IMPORT_GPU_INSTANCE_PARAMS_MESSAGE_ID (0xAAU) + +typedef NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS NV2080_CTRL_INTERNAL_MIGMGR_IMPORT_GPU_INSTANCE_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT + * + * Invalidate and/or evict the L2 cache + * + * flags [IN] + * flags that specify required actions + * + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT (0x20800a6c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_PARAMS_MESSAGE_ID (0x6cU) + +typedef struct NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_PARAMS { + NvU32 flags; +} NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_PARAMS; + +#define NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_ALL (0x00000001) +#define NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_FIRST (0x00000002) +#define NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_LAST (0x00000004) +#define NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_NORMAL (0x00000008) +#define NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_CLEAN (0x00000010) +#define NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_WAIT_FB_PULL (0x00000020) + +/*! + * NV2080_CTRL_CMD_INTERNAL_MEMSYS_FLUSH_L2_ALL_RAMS_AND_CACHES + * + * Flush all L2 Rams and Caches using the ELPG flush + * + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_FLUSH_L2_ALL_RAMS_AND_CACHES (0x20800a6d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x6D" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_BIF_GET_STATIC_INFO + * + * This command obtains information from physical RM for use by CPU-RM + * + * Data fields -> + * bPcieGen4Capable - tells whether PCIe is Gen4 capable + */ +#define NV2080_CTRL_CMD_INTERNAL_BIF_GET_STATIC_INFO (0x20800aac) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BIF_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_BIF_GET_STATIC_INFO_PARAMS_MESSAGE_ID (0xacU) + +typedef struct NV2080_CTRL_INTERNAL_BIF_GET_STATIC_INFO_PARAMS { + NvBool bPcieGen4Capable; + NvBool bIsC2CLinkUp; + NvBool bIsDeviceMultiFunction; + NvBool bGcxPmuCfgSpaceRestore; +} NV2080_CTRL_INTERNAL_BIF_GET_STATIC_INFO_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_HSHUB_PEER_CONN_CONFIG + * + * Program HSHUB Peer Conn Config space. + * + * programPeerMask[IN] + * If nonzero, the peer mask for programming peers based on hshub connectivity. + * + * invalidatePeerMask[IN] + * If nonzero, the peer mask for invalidating peers. + * + * programPciePeerMask[IN] + * If nonzero, the peer mask for programming peers in pcie case. + * + * Possible status values returned are: + * NV_OK + * NV_WARN_NOTHING_TO_DO + * If all peer masks are zero. + */ +#define NV2080_CTRL_CMD_INTERNAL_HSHUB_PEER_CONN_CONFIG (0x20800a88) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_HSHUB_PEER_CONN_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_HSHUB_PEER_CONN_CONFIG_PARAMS_MESSAGE_ID (0x88U) + +typedef struct NV2080_CTRL_INTERNAL_HSHUB_PEER_CONN_CONFIG_PARAMS { + NvU32 programPeerMask; + NvU32 invalidatePeerMask; + NvU32 programPciePeerMask; +} NV2080_CTRL_INTERNAL_HSHUB_PEER_CONN_CONFIG_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS + * + * Get the Hshub Ids connected to the other end of links. + * + * linkMask[IN] + * A mask of link ids to query. + * + * hshubIds[OUT] + * For each set bit in the link mask, the peer Hshub Id. + * + * Possible status values returned are: + * NV_OK + * NV_WARN_NOTHING_TO_DO + * If the mask is zero. + */ +#define NV2080_CTRL_CMD_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS (0x20800a8a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_PARAMS_MESSAGE_ID" */ +#define NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_TABLE_SIZE 32 + +#define NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_PARAMS_MESSAGE_ID (0x8aU) + +typedef struct NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_PARAMS { + NvU32 linkMask; + NvU8 hshubIds[NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_TABLE_SIZE]; +} NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_HSHUB_GET_NUM_UNITS + * + * Return the number of HSHUB units. + * + * numHshubs[OUT] + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_HSHUB_GET_NUM_UNITS (0x20800a8b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_HSHUB_GET_NUM_UNITS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_HSHUB_GET_NUM_UNITS_PARAMS_MESSAGE_ID (0x8bU) + +typedef struct NV2080_CTRL_INTERNAL_HSHUB_GET_NUM_UNITS_PARAMS { + NvU32 numHshubs; +} NV2080_CTRL_INTERNAL_HSHUB_GET_NUM_UNITS_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_HSHUB_NEXT_HSHUB_ID + * + * Return the next hshubId after the given hshubId. + * + * hshubId[IN/OUT] + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_HSHUB_NEXT_HSHUB_ID (0x20800a8c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_HSHUB_NEXT_HSHUB_ID_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_HSHUB_NEXT_HSHUB_ID_PARAMS_MESSAGE_ID (0x8cU) + +typedef struct NV2080_CTRL_INTERNAL_HSHUB_NEXT_HSHUB_ID_PARAMS { + NvU8 hshubId; +} NV2080_CTRL_INTERNAL_HSHUB_NEXT_HSHUB_ID_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_HSHUB_EGM_CONFIG + * + * Program HSHUB for EGM peer id. + * + * egmPeerId[IN] + * EGM peer id to program in the HSHUB registers. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_HSHUB_EGM_CONFIG (0x20800a8d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_HSHUB_EGM_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_HSHUB_EGM_CONFIG_PARAMS_MESSAGE_ID (0x8dU) + +typedef struct NV2080_CTRL_INTERNAL_HSHUB_EGM_CONFIG_PARAMS { + NvU32 egmPeerId; +} NV2080_CTRL_INTERNAL_HSHUB_EGM_CONFIG_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_ENABLE_COMPUTE_PEER_ADDR + * + * Enable compute peer addressing mode + * This command accepts no parameters. + */ + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_ENABLE_COMPUTE_PEER_ADDR (0x20800aad) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xad" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR + * + * Get/Set NVSwitch fabric address for FLA + * + * [In] bGet + * Whether to get or set the NVSwitch fabric address + * [In/Out] addr + * Address that is to be set or retrieved. + */ +#define NV2080_CTRL_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR_PARAMS_MESSAGE_ID (0xaeU) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR_PARAMS { + NvBool bGet; + NV_DECLARE_ALIGNED(NvU64 addr, 8); +} NV2080_CTRL_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR (0x20800aae) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR_PARAMS_MESSAGE_ID" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_BIF_GET_ASPM_L1_FLAGS + * + * This command obtains information from physical RM for use by CPU-RM + * + * Data fields -> + * bCyaMaskL1 + * bEnableAspmDtL1 + */ +#define NV2080_CTRL_CMD_INTERNAL_BIF_GET_ASPM_L1_FLAGS (0x20800ab0) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BIF_GET_ASPM_L1_FLAGS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_BIF_GET_ASPM_L1_FLAGS_PARAMS_MESSAGE_ID (0xb0U) + +typedef struct NV2080_CTRL_INTERNAL_BIF_GET_ASPM_L1_FLAGS_PARAMS { + NvBool bCyaMaskL1; + NvBool bEnableAspmDtL1; +} NV2080_CTRL_INTERNAL_BIF_GET_ASPM_L1_FLAGS_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_CF_CONTROLLERS_SET_MAX_VGPU_VM_COUNT + * + * Sets number of VM slots that are active in VGPU's scheduler + * + * maxActiveVGpuVMCount [IN] + * Number of VM slots that are active in vGPU's scheduler. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OUT_OF_RANGE - Passed value is out of range + * NV_ERR_NO_MEMORY - Out of memory + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_CF_CONTROLLERS_SET_MAX_VGPU_VM_COUNT (0x20800ab1) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_CF_CONTROLLERS_SET_MAX_VGPU_VM_COUNT_PARAMS_MESSAGE_ID" */ + +/*! + * Maximum value of VM slots that are active in vGPU's scheduler. + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_CF_CONTROLLERS_MAX_ACTIVE_VGPU_VM_COUNT_MAX_VALUE 32 + +#define NV2080_CTRL_INTERNAL_PERF_CF_CONTROLLERS_SET_MAX_VGPU_VM_COUNT_PARAMS_MESSAGE_ID (0xB1U) + +typedef struct NV2080_CTRL_INTERNAL_PERF_CF_CONTROLLERS_SET_MAX_VGPU_VM_COUNT_PARAMS { + NvU8 maxActiveVGpuVMCount; +} NV2080_CTRL_INTERNAL_PERF_CF_CONTROLLERS_SET_MAX_VGPU_VM_COUNT_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_MEMSYS_DISABLE_NVLINK_PEERS + * + * Disable all NVLINK FB peers + * + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_DISABLE_NVLINK_PEERS (0x20800a6e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x6E" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE + * + * Program GPU in raw / legacy compression mode + * + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE (0x20800a6f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE_PARAMS_MESSAGE_ID (0x6fU) + +typedef struct NV2080_CTRL_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE_PARAMS { + NvBool bRawMode; +} NV2080_CTRL_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_CCU_GET_SAMPLE_INFO + * + * This command gets the CCU samples Info from physical-RM. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_CCU_GET_SAMPLE_INFO (0x20800ab2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xB2" */ + +typedef struct NV2080_CTRL_INTERNAL_CCU_SAMPLE_INFO_PARAMS { + NvU32 ccuSampleSize; +} NV2080_CTRL_INTERNAL_CCU_SAMPLE_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_CCU_MAP + * + * This command gets the shared buffer memory descriptor from the CPU-RM and maps to it + * in physical-RM. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_ADDRESS + */ +#define NV2080_CTRL_CMD_INTERNAL_CCU_MAP (0x20800ab3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_CCU_MAP_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_CCU_DEV_SHRBUF_COUNT_MAX 1 + +typedef struct NV2080_CTRL_INTERNAL_CCU_MAP_INFO { + NV_DECLARE_ALIGNED(NvU64 phyAddr, 8); + NvU32 shrBufSize; + NvU32 cntBlkSize; +} NV2080_CTRL_INTERNAL_CCU_MAP_INFO; + +#define NV2080_CTRL_INTERNAL_CCU_MAP_INFO_PARAMS_MESSAGE_ID (0xB3U) + +typedef struct NV2080_CTRL_INTERNAL_CCU_MAP_INFO_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_CCU_MAP_INFO mapInfo[NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_SIZE + NV2080_CTRL_INTERNAL_CCU_DEV_SHRBUF_COUNT_MAX], 8); +} NV2080_CTRL_INTERNAL_CCU_MAP_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_CCU_UNMAP + * + * This command unmaps the shared buffer memory mapping in physical-RM + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_CCU_UNMAP (0x20800ab4) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_CCU_UNMAP_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_CCU_UNMAP_INFO_PARAMS_MESSAGE_ID (0xB4U) + +typedef struct NV2080_CTRL_INTERNAL_CCU_UNMAP_INFO_PARAMS { + NvBool bDevShrBuf; + NvBool bMigShrBuf; +} NV2080_CTRL_INTERNAL_CCU_UNMAP_INFO_PARAMS; + +/*! + * NV2080_CTRL_INTERNAL_SET_P2P_CAPS_PEER_INFO + * + * [in] gpuId + * GPU ID. + * [in] gpuInstance + * GPU instance. + * [in] p2pCaps + * Peer to peer capabilities discovered between the GPUs. + * See NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_V2 for the list of valid values. + * [in] p2pOptimalReadCEs + * Mask of CEs to use for p2p reads over Nvlink. + * [in] p2pOptimalWriteCEs + * Mask of CEs to use for p2p writes over Nvlink. + * [in] p2pCapsStatus + * Status of all supported p2p capabilities. + * See NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_V2 for the list of valid values. + * [in] busPeerId + * Bus peer ID. For an invalid or a non-existent peer this field + * has the value NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INVALID_PEER. + * [in] busEgmPeerId + * Bus EGM peer ID. For an invalid or a non-existent peer this field + * has the value NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INVALID_PEER. + */ +typedef struct NV2080_CTRL_INTERNAL_SET_P2P_CAPS_PEER_INFO { + NvU32 gpuId; + NvU32 gpuInstance; + NvU32 p2pCaps; + NvU32 p2pOptimalReadCEs; + NvU32 p2pOptimalWriteCEs; + NvU8 p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE]; + NvU32 busPeerId; + NvU32 busEgmPeerId; +} NV2080_CTRL_INTERNAL_SET_P2P_CAPS_PEER_INFO; + +/*! + * NV2080_CTRL_CMD_INTERNAL_SET_P2P_CAPS + * + * An internal call to propagate the peer to peer capabilities of peer GPUs + * to the Physical RM. These capabilities are to be consumed by the vGPU GSP plugin. + * This control is used to both add and and update the peer to peer capabilities. + * The existing GPU entries will be updated and those which don't exist will be added. + * Use NV2080_CTRL_CMD_INTERNAL_REMOVE_P2P_CAPS to remove the added entries. + * + * [in] peerGpuCount + * The number of the peerGpuInfos entries. + * [in] peerGpuInfos + * The array of NV2080_CTRL_CMD_INTERNAL_SET_P2P_CAPS entries, describing + * the peer to peer capabilities of the specified GPUs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT - Invalid peerGpuCount + * NV_ERR_INSUFFICIENT_RESOURCES - Total GPU count exceeds the maximum value + */ +#define NV2080_CTRL_CMD_INTERNAL_SET_P2P_CAPS (0x20800ab5) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_SET_P2P_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_SET_P2P_CAPS_PARAMS_MESSAGE_ID (0xB5U) + +typedef struct NV2080_CTRL_INTERNAL_SET_P2P_CAPS_PARAMS { + NvU32 peerGpuCount; + NV2080_CTRL_INTERNAL_SET_P2P_CAPS_PEER_INFO peerGpuInfos[NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS]; +} NV2080_CTRL_INTERNAL_SET_P2P_CAPS_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_REMOVE_P2P_CAPS + * + * An internal call to remove the cached peer to peer capabilities of peer GPUs + * from the Physical RM. + * + * [in] peerGpuIdCount + * The number of the peerGpuIds entries. + * [in] peerGpuIds + * The array of GPU IDs, specifying the GPU for which the entries need to be removed. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT - Invalid peerGpuIdCount + * NV_ERR_OBJECT_NOT_FOUND - Invalid peerGpuIds[] entry + */ +#define NV2080_CTRL_CMD_INTERNAL_REMOVE_P2P_CAPS (0x20800ab6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_REMOVE_P2P_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_REMOVE_P2P_CAPS_PARAMS_MESSAGE_ID (0xB6U) + +typedef struct NV2080_CTRL_INTERNAL_REMOVE_P2P_CAPS_PARAMS { + NvU32 peerGpuIdCount; + NvU32 peerGpuIds[NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS]; +} NV2080_CTRL_INTERNAL_REMOVE_P2P_CAPS_PARAMS; + + + +/*! + * NV2080_CTRL_CMD_INTERNAL_BUS_GET_PCIE_P2P_CAPS + * + * This command returns the GPU's PCIE P2P caps + * + * [in] bCommonPciSwitchFound + * All GPUs are under the same PCI switch + * [out] p2pReadCapsStatus + * [out] p2pWriteCapsStatus + * These members returns status of all supported p2p capabilities. Valid + * status values include: + * NV0000_P2P_CAPS_STATUS_OK + * P2P capability is supported. + * NV0000_P2P_CAPS_STATUS_CHIPSET_NOT_SUPPORTED + * Chipset doesn't support p2p capability. + * NV0000_P2P_CAPS_STATUS_GPU_NOT_SUPPORTED + * GPU doesn't support p2p capability. + * NV0000_P2P_CAPS_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED + * IOH topology isn't supported. For e.g. root ports are on different + * IOH. + * NV0000_P2P_CAPS_STATUS_DISABLED_BY_REGKEY + * P2P Capability is disabled by a regkey. + * NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED + * P2P Capability is not supported. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_INTERNAL_GET_PCIE_P2P_CAPS (0x20800ab8) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS_MESSAGE_ID (0xB8U) + +typedef struct NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS { + NvBool bCommonPciSwitchFound; + NvU8 p2pReadCapsStatus; + NvU8 p2pWriteCapsStatus; +} NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_BIF_SET_PCIE_RO + * + * Enable/disable PCIe Relaxed Ordering. + * + */ +#define NV2080_CTRL_CMD_INTERNAL_BIF_SET_PCIE_RO (0x20800ab9) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BIF_SET_PCIE_RO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_BIF_SET_PCIE_RO_PARAMS_MESSAGE_ID (0xB9U) + +typedef struct NV2080_CTRL_INTERNAL_BIF_SET_PCIE_RO_PARAMS { + // Enable/disable PCIe relaxed ordering + NvBool enableRo; +} NV2080_CTRL_INTERNAL_BIF_SET_PCIE_RO_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_DISPLAY_PRE_UNIX_CONSOLE + * + * An internal call to invoke the sequence VGA register reads & writes to + * perform save and restore of VGA + * + * [in] bSave + * To indicate whether save or restore needs to be performed. + * [in] bUseVbios + * Primary VGA indication from OS. + * [out] bReturnEarly + * To indicate caller to return after this call. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_OPERATION + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_PRE_UNIX_CONSOLE (0x20800a76) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_DISPLAY_PRE_UNIX_CONSOLE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_PRE_UNIX_CONSOLE_PARAMS_MESSAGE_ID (0x76U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_DISPLAY_PRE_UNIX_CONSOLE_PARAMS { + NvBool bSave; + NvBool bUseVbios; + NvBool bReturnEarly; +} NV2080_CTRL_CMD_INTERNAL_DISPLAY_PRE_UNIX_CONSOLE_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_DISPLAY_POST_UNIX_CONSOLE + * + * To perform save or restore operation from/to saved fonts. + * + * [in] bSave + * To indicate whether save or restore needs to be performed. + * [in] bUseVbios + * Primary VGA indication from OS. + * [in] bVbiosCallSuccessful + * Indicates if vbios invocation was successful or not. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_POST_UNIX_CONSOLE (0x20800a77) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_DISPLAY_POST_UNIX_CONSOLE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_POST_UNIX_CONSOLE_PARAMS_MESSAGE_ID (0x77U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_DISPLAY_POST_UNIX_CONSOLE_PARAMS { + NvBool bSave; + NvBool bUseVbios; + NvBool bVbiosCallSuccessful; +} NV2080_CTRL_CMD_INTERNAL_DISPLAY_POST_UNIX_CONSOLE_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_INTERNAL_STATIC_MIGMGR_GET_COMPUTE_PROFILES + * @ref NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_COMPUTE_PROFILES + */ + +/*! + * NV2080_CTRL_INTERNAL_MIGMGR_COMPUTE_PROFILE + * + * This structure specifies resources in an execution partition + * + * computeSize[OUT] + * - NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_* associated with this profile + * + * gfxGpcCount[OUT] + * - Total Number of GFX Supporting GPCs in this partition + * + * gpcCount[OUT] + * - Total Number of GPCs in this partition (including GFX Supported GPCs) + * + * veidCount[OUT] + * - Number of VEIDs allocated to this profile + * + * smCount[OUT] + * - Number of SMs usable in this profile + */ +typedef struct NV2080_CTRL_INTERNAL_MIGMGR_COMPUTE_PROFILE { + NvU8 computeSize; + NvU32 gfxGpcCount; + NvU32 gpcCount; + NvU32 veidCount; + NvU32 smCount; + NvU32 physicalSlots; +} NV2080_CTRL_INTERNAL_MIGMGR_COMPUTE_PROFILE; + +/*! + * NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_COMPUTE_PROFILES_PARAMS + * + * This structure specifies resources in an execution partition + * + * profileCount[OUT] + * - Total Number of profiles filled + * + * profiles[OUT] + * - NV2080_CTRL_GPU_COMPUTE_PROFILE filled with valid compute instance profiles + */ +#define NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_COMPUTE_PROFILES_PARAMS_MESSAGE_ID (0xBBU) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_COMPUTE_PROFILES_PARAMS { + NvU32 profileCount; + NV2080_CTRL_INTERNAL_MIGMGR_COMPUTE_PROFILE profiles[NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE__SIZE]; +} NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_COMPUTE_PROFILES_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_COMPUTE_PROFILES (0x20800aba) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KMIGMGR_GET_COMPUTE_PROFILES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_STATIC_KMIGMGR_GET_COMPUTE_PROFILES_PARAMS_MESSAGE_ID (0xBAU) + +typedef NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_COMPUTE_PROFILES_PARAMS NV2080_CTRL_INTERNAL_STATIC_KMIGMGR_GET_COMPUTE_PROFILES_PARAMS; + + + +/* + * NV2080_CTRL_CMD_INTERNAL_CCU_SET_STREAM_STATE + * + * This command sets the ccu stream to enable/disable state. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_INTERNAL_CCU_SET_STREAM_STATE (0x20800abd) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_CCU_STREAM_STATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_CCU_STREAM_STATE_PARAMS_MESSAGE_ID (0xBDU) + +typedef struct NV2080_CTRL_INTERNAL_CCU_STREAM_STATE_PARAMS { + NvBool bStreamState; +} NV2080_CTRL_INTERNAL_CCU_STREAM_STATE_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_GSYNC_ATTACH_AND_INIT + * + * Attach GPU to the external device. + * + * [in] bExtDevFound + * To enable GPIO interrupts. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_GSYNC_ATTACH_AND_INIT (0x20800abe) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GSYNC_ATTACH_AND_INIT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GSYNC_ATTACH_AND_INIT_PARAMS_MESSAGE_ID (0xBEU) + +typedef struct NV2080_CTRL_INTERNAL_GSYNC_ATTACH_AND_INIT_PARAMS { + NvBool bExtDevFound; +} NV2080_CTRL_INTERNAL_GSYNC_ATTACH_AND_INIT_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_GSYNC_OPTIMIZE_TIMING_PARAMETERS + * + * Optimize the Gsync timing parameters + * + * [in] timingParameters + * Timing parameters passed by client. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_GSYNC_OPTIMIZE_TIMING_PARAMETERS (0x20800abf) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GSYNC_OPTIMIZE_TIMING_PARAMETERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GSYNC_OPTIMIZE_TIMING_PARAMETERS_PARAMS_MESSAGE_ID (0xBFU) + +typedef struct NV2080_CTRL_INTERNAL_GSYNC_OPTIMIZE_TIMING_PARAMETERS_PARAMS { + NV_DECLARE_ALIGNED(NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PARAMS timingParameters, 8); +} NV2080_CTRL_INTERNAL_GSYNC_OPTIMIZE_TIMING_PARAMETERS_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_GSYNC_GET_DISPLAY_IDS + * + * Get displayIDs supported by the display. + * + * [out] displayIds + * Associated display ID with head. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_GSYNC_GET_DISPLAY_IDS (0x20800ac0) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GSYNC_GET_DISPLAY_IDS_PARAMS_MESSAGE_ID" */ + +#define NV2080_MAX_NUM_HEADS 4 + +#define NV2080_CTRL_INTERNAL_GSYNC_GET_DISPLAY_IDS_PARAMS_MESSAGE_ID (0xC0U) + +typedef struct NV2080_CTRL_INTERNAL_GSYNC_GET_DISPLAY_IDS_PARAMS { + NvU32 displayIds[NV2080_MAX_NUM_HEADS]; +} NV2080_CTRL_INTERNAL_GSYNC_GET_DISPLAY_IDS_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_GSYNC_SET_STREO_SYNC + * + * Set the Stereo sync for Gsync + * + * [in] slave + * Slave GPU head status. + * [in] localSlave + * Slave GPU head status but are not coupled. + * [in] master + * Master GPU head status. + * [in] regstatus + * Register status of status1 register. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_GSYNC_SET_STREO_SYNC (0x20800ac1) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GSYNC_SET_STREO_SYNC_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GSYNC_SET_STREO_SYNC_PARAMS_MESSAGE_ID (0xC1U) + +typedef struct NV2080_CTRL_INTERNAL_GSYNC_SET_STREO_SYNC_PARAMS { + NvU32 slave[NV2080_MAX_NUM_HEADS]; + NvU32 localSlave[NV2080_MAX_NUM_HEADS]; + NvU32 master[NV2080_MAX_NUM_HEADS]; + NvU32 regStatus; +} NV2080_CTRL_INTERNAL_GSYNC_SET_STREO_SYNC_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_GSYNC_GET_VERTICAL_ACTIVE_LINES + * + * Get vertical active lines for given head. + * + * [in] head + * For the headIdx which we need active. + * [out] vActiveLines + * Vertical active lines. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_GSYNC_GET_VERTICAL_ACTIVE_LINES (0x20800ac4) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GSYNC_GET_VERTICAL_ACTIVE_LINES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GSYNC_GET_VERTICAL_ACTIVE_LINES_PARAMS_MESSAGE_ID (0xC4U) + +typedef struct NV2080_CTRL_INTERNAL_GSYNC_GET_VERTICAL_ACTIVE_LINES_PARAMS { + NvU32 headIdx; + NvU32 vActiveLines; +} NV2080_CTRL_INTERNAL_GSYNC_GET_VERTICAL_ACTIVE_LINES_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_GSYNC_IS_DISPLAYID_VALID + * + * Verifies if this displayId is valid. + * + * [in] displays + * Displays given by the client + * + * [out] displayId + * DisplayId for the given display + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_GSYNC_IS_DISPLAYID_VALID (0x20800ac9) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GSYNC_IS_DISPLAYID_VALID_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GSYNC_IS_DISPLAYID_VALID_PARAMS_MESSAGE_ID (0xC9U) + +typedef struct NV2080_CTRL_INTERNAL_GSYNC_IS_DISPLAYID_VALID_PARAMS { + NvU32 displays; + NvU32 displayId; +} NV2080_CTRL_INTERNAL_GSYNC_IS_DISPLAYID_VALID_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_GSYNC_SET_OR_RESTORE_RASTER_SYNC + * + * Disable the raster sync gpio on the other P2060 GPU + * that's connected to master over Video bridge. + * + * [in] bEnableMaster + * If it is master gpu. + * + * [out] bRasterSyncGpioSaved + * If raster sync GPIO direction is saved or not. + * + * [in/out] bRasterSyncGpioDirection + * During save it gets the direction. + * In restores it sets the direction. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_GSYNC_SET_OR_RESTORE_RASTER_SYNC (0x20800aca) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GSYNC_SET_OR_RESTORE_RASTER_SYNC_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GSYNC_SET_OR_RESTORE_RASTER_SYNC_PARAMS_MESSAGE_ID (0xCAU) + +typedef struct NV2080_CTRL_INTERNAL_GSYNC_SET_OR_RESTORE_RASTER_SYNC_PARAMS { + NvBool bEnableMaster; + NvBool bRasterSyncGpioSaved; + NvU32 bRasterSyncGpioDirection; +} NV2080_CTRL_INTERNAL_GSYNC_SET_OR_RESTORE_RASTER_SYNC_PARAMS; + + + +/*! + * NV2080_CTRL_CMD_INTERNAL_FBSR_INIT + * + * Initialize FBSR on GSP to prepare for suspend-resume + * + * [in] hClient + * Handle to client of SYSMEM memlist object + * [in] hSysMem + * Handle to SYSMEM memlist object + * [in] bEnteringGcoffState + * Value of PDB_PROP_GPU_GCOFF_STATE_ENTERING + * [in] sysmemAddrOfSuspendResumeData + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_INTERNAL_FBSR_INIT (0x20800ac2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID (0xC2U) + +typedef struct NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS { + NvHandle hClient; + NvHandle hSysMem; + NvBool bEnteringGcoffState; + NV_DECLARE_ALIGNED(NvU64 sysmemAddrOfSuspendResumeData, 8); +} NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING + * + * Disable all the active channels during suspend + * Resume FIFO scheduling from GSP after resume on Kernel-RM + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING (0x20800ac3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS_MESSAGE_ID (0xC3U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS { + NvBool bDisableActiveChannels; +} NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_MEMMGR_GET_VGPU_CONFIG_HOST_RESERVED_FB + * + * This command is used to get the amount of host reserved FB + * + * hostReservedFb [OUT] + * Amount of FB reserved for the host + * vgpuTypeId [IN] + * The Type ID for VGPU profile + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMMGR_GET_VGPU_CONFIG_HOST_RESERVED_FB (0x20800ac5) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MEMMGR_GET_VGPU_CONFIG_HOST_RESERVED_FB_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MEMMGR_GET_VGPU_CONFIG_HOST_RESERVED_FB_PARAMS_MESSAGE_ID (0xC5U) + +typedef struct NV2080_CTRL_INTERNAL_MEMMGR_GET_VGPU_CONFIG_HOST_RESERVED_FB_PARAMS { + NV_DECLARE_ALIGNED(NvU64 hostReservedFb, 8); + NvU32 vgpuTypeId; +} NV2080_CTRL_INTERNAL_MEMMGR_GET_VGPU_CONFIG_HOST_RESERVED_FB_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD + * + * This command initiates brightc module state load. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD (0x20800ac6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_ACPI_DSM_READ_SIZE (0x1000) /* finn: Evaluated from "(4 * 1024)" */ + +#define NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS_MESSAGE_ID (0xC6U) + +typedef struct NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS { + NvU32 status; + NvU16 backLightDataSize; + NvU8 backLightData[NV2080_CTRL_ACPI_DSM_READ_SIZE]; +} NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS; + +/* + * NV2080_CTRL_INTERNAL_NVLINK_GET_NUM_ACTIVE_LINK_PER_IOCTRL + * + * Returns number of active links allowed per IOCTRL + * + * [Out] numActiveLinksPerIoctrl + */ +#define NV2080_CTRL_INTERNAL_NVLINK_GET_NUM_ACTIVE_LINK_PER_IOCTRL_PARAMS_MESSAGE_ID (0xC7U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_GET_NUM_ACTIVE_LINK_PER_IOCTRL_PARAMS { + NvU32 numActiveLinksPerIoctrl; +} NV2080_CTRL_INTERNAL_NVLINK_GET_NUM_ACTIVE_LINK_PER_IOCTRL_PARAMS; +#define NV2080_CTRL_INTERNAL_NVLINK_GET_NUM_ACTIVE_LINK_PER_IOCTRL (0x20800ac7U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_GET_NUM_ACTIVE_LINK_PER_IOCTRL_PARAMS_MESSAGE_ID" */ +/* + * NV2080_CTRL_INTERNAL_NVLINK_GET_TOTAL_NUM_LINK_PER_IOCTRL + * + * Returns number of links per IOCTRL + * + * [Out] numLinksPerIoctrl + */ +#define NV2080_CTRL_INTERNAL_NVLINK_GET_TOTAL_NUM_LINK_PER_IOCTRL_PARAMS_MESSAGE_ID (0xC8U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_GET_TOTAL_NUM_LINK_PER_IOCTRL_PARAMS { + NvU32 numLinksPerIoctrl; +} NV2080_CTRL_INTERNAL_NVLINK_GET_TOTAL_NUM_LINK_PER_IOCTRL_PARAMS; +#define NV2080_CTRL_INTERNAL_NVLINK_GET_TOTAL_NUM_LINK_PER_IOCTRL (0x20800ac8U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_GET_TOTAL_NUM_LINK_PER_IOCTRL_PARAMS_MESSAGE_ID" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_SMBPBI_PFM_REQ_HNDLR_CAP_UPDATE + * + * Update the system control capability + * + * bIsSysCtrlSupported [IN] + If the system control is supported + * bIsPlatformLegacy [OUT] + If the platform is legacy + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_INTERNAL_SMBPBI_PFM_REQ_HNDLR_CAP_UPDATE (0x20800acb) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_SMBPBI_PFM_REQ_HNDLR_CAP_UPDATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_SMBPBI_PFM_REQ_HNDLR_CAP_UPDATE_PARAMS_MESSAGE_ID (0xCBU) + +typedef struct NV2080_CTRL_INTERNAL_SMBPBI_PFM_REQ_HNDLR_CAP_UPDATE_PARAMS { + NvBool bIsSysCtrlSupported; + NvBool bIsPlatformLegacy; +} NV2080_CTRL_INTERNAL_SMBPBI_PFM_REQ_HNDLR_CAP_UPDATE_PARAMS; + +/*! + * Macros for PFM_REQ_HNDLR_STATE_SYNC data types. + */ +#define NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_DATA_TYPE_PMGR 0x00U +#define NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_DATA_TYPE_THERM 0x01U +#define NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_DATA_TYPE_SMBPBI 0x02U + +/*! + * Structure representing static data for a PFM_REQ_HNDLR_STATE_SYNC_SMBPBI. + */ +typedef struct NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_DATA_SMBPBI { + /*! + * PFM sensor ID + */ + NvU32 sensorId; + + /*! + * PFM sensor limit value if required + */ + NvU32 limit; +} NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_DATA_SMBPBI; + +/*! + * Structure of static information describing PFM_REQ_HNDLR_STATE_SYNC data types. + */ +typedef struct NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_DATA { + /*! + * @ref NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_DATA_TYPE_ + */ + NvU8 type; + + /*! + * Type-specific information. + */ + union { + NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_DATA_SMBPBI smbpbi; + } data; +} NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_DATA; + +/*! + * Macros for PFM_REQ_HNDLR_STATE_SYNC flags for specific operation. + */ +#define NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_FLAGS_PMGR_LOAD 0x00U +#define NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_FLAGS_THERM_INIT 0x01U +#define NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_FLAGS_SMBPBI_OP_CLEAR 0x02U +#define NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_FLAGS_SMBPBI_OP_SET 0x03U + +/*! + * Structure of static information describing PFM_REQ_HNDLR_STATE_SYNC params. + */ +typedef struct NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_PARAMS { + /*! + * @ref NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_FLAGS_ + */ + NvU8 flags; + + /*! + * Type-specific information. + */ + NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_DATA syncData; +} NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_STATE_LOAD_SYNC + * + * State sync with platform req handler and SMBPBI + * + * flags [IN] + * Flags that needs sync operation between physical and kernel + * + * syncData [IN] + * Sync payload data + * + */ +#define NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_STATE_LOAD_SYNC (0x20800acc) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PMGR_PFM_REQ_HNDLR_STATE_LOAD_SYNC_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_PMGR_PFM_REQ_HNDLR_STATE_LOAD_SYNC_PARAMS_MESSAGE_ID (0xCCU) + +typedef NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_PARAMS NV2080_CTRL_INTERNAL_PMGR_PFM_REQ_HNDLR_STATE_LOAD_SYNC_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_THERM_PFM_REQ_HNDLR_STATE_INIT_SYNC + * + * State sync with platform req handler and SMBPBI + * + * flags [IN] + * Flags that needs sync operation between physical and kernel + * + * syncData [IN] + * Sync payload data + * + */ +#define NV2080_CTRL_CMD_INTERNAL_THERM_PFM_REQ_HNDLR_STATE_INIT_SYNC (0x20800acd) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_THERM_PFM_REQ_HNDLR_STATE_INIT_SYNC_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_THERM_PFM_REQ_HNDLR_STATE_INIT_SYNC_PARAMS_MESSAGE_ID (0xCDU) + +typedef NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_STATE_SYNC_PARAMS NV2080_CTRL_INTERNAL_THERM_PFM_REQ_HNDLR_STATE_INIT_SYNC_PARAMS; + +/*! + * Macros for NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_GET_PM1_STATE flag + */ +#define NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_GET_PM1_FORCED_OFF_STATUS 0x00U +#define NV2080_CTRL_INTERNAL_PFM_REQ_HNDLR_GET_PM1_STATUS 0x01U + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_GET_PM1_STATE + * + * Queries PM1 Forced off / PM1 Available status + * + * flag [IN] + * Fetch PM1 Forced off / PM1 Available status based on value. + * bStatus [OUT] + * PM1 Forced off / PM1 Available is true or false. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMETS + */ + +#define NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_GET_PM1_STATE (0x20800ace) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_GET_PM1_STATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_GET_PM1_STATE_PARAMS_MESSAGE_ID (0xCEU) + +typedef struct NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_GET_PM1_STATE_PARAMS { + /*! + * Fetch PM1 Forced off / PM1 Available status based on value. + */ + NvU8 flag; + + /*! + * PM1 Forced off / PM1 Available status + */ + NvBool bStatus; +} NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_GET_PM1_STATE_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_SET_PM1_STATE + * + * Set PM1 state to enabled / disabled (boost clocks). + * + * bEnable [IN] + * NV_TRUE means enable PM1, NV_FALSE means disable. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_SET_PM1_STATE (0x20800acf) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_SET_PM1_STATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_SET_PM1_STATE_PARAMS_MESSAGE_ID (0xCFU) + +typedef struct NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_SET_PM1_STATE_PARAMS { + + /*! + * NV_TRUE means enable PM1, NV_FALSE means disable. + */ + NvBool bEnable; +} NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_SET_PM1_STATE_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_UPDATE_EDPP_LIMIT + * + * Updates EDPpeak Limit of GPU + * + * bEnable [IN] + * Enable or Reset the settings + * clientLimit [IN] + * Client requested limit + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_NOT_SUPPORTED + * NV_ERR_NOT_READY + */ + +#define NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_UPDATE_EDPP_LIMIT (0x20800ad0) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_UPDATE_EDPP_LIMIT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_UPDATE_EDPP_LIMIT_PARAMS_MESSAGE_ID (0xD0U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_UPDATE_EDPP_LIMIT_PARAMS { + NvBool bEnable; + NvU32 clientLimit; +} NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_UPDATE_EDPP_LIMIT_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_THERM_PFM_REQ_HNDLR_UPDATE_TGPU_LIMIT + * + * Updates Target Temperature of GPU + * + * targetTemp [IN] + * Target Temperature Set from SBIOS + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_INTERNAL_THERM_PFM_REQ_HNDLR_UPDATE_TGPU_LIMIT (0x20800ad1) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_THERM_PFM_REQ_HNDLR_UPDATE_TGPU_LIMIT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_THERM_PFM_REQ_HNDLR_UPDATE_TGPU_LIMIT_PARAMS_MESSAGE_ID (0xD1U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_THERM_PFM_REQ_HNDLR_UPDATE_TGPU_LIMIT_PARAMS { + NvS32 targetTemp; +} NV2080_CTRL_CMD_INTERNAL_THERM_PFM_REQ_HNDLR_UPDATE_TGPU_LIMIT_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_CONFIGURE_TGP_MODE + * + * Enable / disable CTGP MODE + * + * bEnable [IN] + * Enable or Reset the settings + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_CONFIGURE_TGP_MODE (0x20800ad2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_CONFIGURE_TGP_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_CONFIGURE_TGP_MODE_PARAMS_MESSAGE_ID (0xD2U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_CONFIGURE_TGP_MODE_PARAMS { + NvBool bEnable; +} NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_CONFIGURE_TGP_MODE_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_CONFIGURE_TURBO_V2 + * + * Configuration of the turbo v2 parameters for NVPCF-Turbo arb control + * + * ctgpOffsetmW [IN] + * TGP MODE Offset in mW + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_CONFIGURE_TURBO_V2 (0x20800ad3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_CONFIGURE_TURBO_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_CONFIGURE_TURBO_V2_PARAMS_MESSAGE_ID (0xD3U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_CONFIGURE_TURBO_V2_PARAMS { + NvU32 ctgpOffsetmW; +} NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_CONFIGURE_TURBO_V2_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_GET_VPSTATE_INFO + * + * Query VPstatese info. Get VPS PS2.0 support / get highest VP State Idx and requested VP State Idx + * + * bVpsPs20Supported [OUT] + * Reflects Vpstates PS20 support + * vPstateIdxHighest [OUT} + * Reflects Highest VPstate Idx + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_GET_VPSTATE_INFO (0x20800ad4) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_GET_VPSTATE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_GET_VPSTATE_INFO_PARAMS_MESSAGE_ID (0xD4U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_GET_VPSTATE_INFO_PARAMS { + /*! + * Reflects Vpstates PS20 support + */ + NvBool bVpsPs20Supported; + + /*! + * Get highest VPState Idx from VBIOS + */ + NvU32 vPstateIdxHighest; +} NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_GET_VPSTATE_INFO_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_GET_VPSTATE_MAPPING + * + * Get vPstate mapping for requested pStateIdx + * + * pStateIdx [IN] + * Requested PState Idx + * vPstateIdx [OUT} + * Mapped VPstate Idx + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_GET_VPSTATE_MAPPING (0x20800ad5) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_GET_VPSTATE_MAPPING_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_GET_VPSTATE_MAPPING_PARAMS_MESSAGE_ID (0xD5U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_GET_VPSTATE_MAPPING_PARAMS { + /*! + * Requested PState Idx + */ + NvU32 pStateIdx; + + /*! + * Mapped VPstate Idx + */ + NvU32 vPstateIdxMapping; +} NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_GET_VPSTATE_MAPPING_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_SET_VPSTATE + * + * Set requested VPstate + * + * vPstateIdx [IN] + * VPstate Idx to be set + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_SET_VPSTATE (0x20800ad6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_SET_VPSTATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_SET_VPSTATE_PARAMS_MESSAGE_ID (0xD6U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_SET_VPSTATE_PARAMS { + + /*! + * NV_TRUE means enable PM1, NV_FALSE means disable. + */ + NvU32 vPstateIdx; +} NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_SET_VPSTATE_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_GCX_ENTRY_PREREQUISITE + * + * This command gets if GPU is in a proper state (P8 and engine idle) to be ready to enter RTD3 + * + * Possible status return values are: + * NV_OK Success + */ +#define NV2080_CTRL_CMD_INTERNAL_GCX_ENTRY_PREREQUISITE (0x2080a7d7) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_POWER_LEGACY_NON_PRIVILEGED_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GCX_ENTRY_PREREQUISITE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GCX_ENTRY_PREREQUISITE_PARAMS_MESSAGE_ID (0xD7U) + +typedef struct NV2080_CTRL_INTERNAL_GCX_ENTRY_PREREQUISITE_PARAMS { + NvBool bIsGC6Satisfied; + NvBool bIsGCOFFSatisfied; +} NV2080_CTRL_INTERNAL_GCX_ENTRY_PREREQUISITE_PARAMS; + +/* + * This command unsets Dynamic Boost limit when nvidia-powerd is terminated unexpectedly. + */ +#define NV2080_CTRL_CMD_INTERNAL_PMGR_UNSET_DYNAMIC_BOOST_LIMIT (0x20800a7b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x7B" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_FIFO_GET_NUM_SECURE_CHANNELS + * + * This command is an internal command sent from Kernel RM to Physical RM + * to get number of secure channels supported on SEC2 and CE + * + * maxSec2SecureChannels [OUT] + * maxCeSecureChannels [OUT] + */ +#define NV2080_CTRL_CMD_INTERNAL_FIFO_GET_NUM_SECURE_CHANNELS (0x20800ad8) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FIFO_GET_NUM_SECURE_CHANNELS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_FIFO_GET_NUM_SECURE_CHANNELS_PARAMS_MESSAGE_ID (0xD8U) + +typedef struct NV2080_CTRL_INTERNAL_FIFO_GET_NUM_SECURE_CHANNELS_PARAMS { + NvU32 maxSec2SecureChannels; + NvU32 maxCeSecureChannels; +} NV2080_CTRL_INTERNAL_FIFO_GET_NUM_SECURE_CHANNELS_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_DEPENDENCY_CHECK + * + * This command checks if all the dependant modules to PRH have been initialized. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_PFM_REQ_HNDLR_DEPENDENCY_CHECK (0x20800a45) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x45" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_BIF_DISABLE_SYSTEM_MEMORY_ACCESS + * + * This command is an internal command sent from Kernel RM to Physical RM + * to disable the GPU system memory access after quiescing the GPU or + * re-enable sysmem access. + * + * bDisable [IN] + * If NV_TRUE the GPU is quiesced and system memory access is disabled . + * If NV_FALSE the GPU system memory access is re-enabled and the GPU is resumed. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_BIF_DISABLE_SYSTEM_MEMORY_ACCESS (0x20800adb) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BIF_DISABLE_SYSTEM_MEMORY_ACCESS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_BIF_DISABLE_SYSTEM_MEMORY_ACCESS_PARAMS_MESSAGE_ID (0xDBU) + +typedef struct NV2080_CTRL_INTERNAL_BIF_DISABLE_SYSTEM_MEMORY_ACCESS_PARAMS { + NvBool bDisable; +} NV2080_CTRL_INTERNAL_BIF_DISABLE_SYSTEM_MEMORY_ACCESS_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_DISP_PINSETS_TO_LOCKPINS (0x20800adc) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISP_PINSETS_TO_LOCKPINS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_DISP_PINSETS_TO_LOCKPINS_PARAMS_MESSAGE_ID (0xDCU) + +typedef struct NV2080_CTRL_INTERNAL_DISP_PINSETS_TO_LOCKPINS_PARAMS { + NvU32 pinSetIn; // in + NvU32 pinSetOut; // in + NvBool bMasterScanLock; // out + NvU32 masterScanLockPin; // out + NvBool bSlaveScanLock; // out + NvU32 slaveScanLockPin; // out +} NV2080_CTRL_INTERNAL_DISP_PINSETS_TO_LOCKPINS_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_DETECT_HS_VIDEO_BRIDGE (0x20800add) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xDD" */ + +#define NV2080_CTRL_CMD_INTERNAL_DISP_SET_SLI_LINK_GPIO_SW_CONTROL (0x20800ade) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISP_SET_SLI_LINK_GPIO_SW_CONTROL_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_DISP_SET_SLI_LINK_GPIO_SW_CONTROL_PARAMS_MESSAGE_ID (0xDEU) + +typedef struct NV2080_CTRL_INTERNAL_DISP_SET_SLI_LINK_GPIO_SW_CONTROL_PARAMS { + NvU32 pinSet; // in + NvU32 gpioFunction; // out + NvU32 gpioPin; // out + NvBool gpioDirection; // out +} NV2080_CTRL_INTERNAL_DISP_SET_SLI_LINK_GPIO_SW_CONTROL_PARAMS; + +/* NV2080_CTRL_CMD_INTERNAL_SET_STATIC_EDID_DATA + * + * This command sets up ACPI DDC Edid data. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_SET_STATIC_EDID_DATA (0x20800adf) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_SET_STATIC_EDID_DATA_PARAMS_MESSAGE_ID" */ + +/* From ACPI6.5 spec., the max size of EDID data from SBIOS(_DDC) is 512B */ +#define MAX_EDID_SIZE_FROM_SBIOS 512U + +typedef struct NV2080_CTRL_INTERNAL_EDID_DATA { + NvU32 status; + NvU32 acpiId; + NvU32 bufferSize; + NvU8 edidBuffer[MAX_EDID_SIZE_FROM_SBIOS]; +} NV2080_CTRL_INTERNAL_EDID_DATA; + +#define NV2080_CTRL_CMD_INTERNAL_SET_STATIC_EDID_DATA_PARAMS_MESSAGE_ID (0xDFU) + +typedef struct NV2080_CTRL_CMD_INTERNAL_SET_STATIC_EDID_DATA_PARAMS { + NvU32 tableLen; + NV2080_CTRL_INTERNAL_EDID_DATA edidTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS]; +} NV2080_CTRL_CMD_INTERNAL_SET_STATIC_EDID_DATA_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_DISPLAY_ACPI_SUBSYSTEM_ACTIVATED + * + * This command intializes display ACPI child devices. + * This command accepts no parameters. + * + */ +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_ACPI_SUBSYSTEM_ACTIVATED (0x20800af0) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xF0" */ + +/* NV2080_CTRL_CMD_INTERNAL_DISPLAY_PRE_MODESET */ +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_PRE_MODESET (0x20800af1) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xF1" */ + +/* NV2080_CTRL_CMD_INTERNAL_DISPLAY_POST_MODESET */ +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_POST_MODESET (0x20800af2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xF2" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_GET_GPU_FABRIC_PROBE_INFO_PARAMS + * + * This structure provides the params for getting GPU Fabric Probe Internal + * Info from GSP to CPU RM + * + * numProbes[OUT] + * - Number of probe requests sent + */ +#define NV2080_CTRL_CMD_INTERNAL_GET_GPU_FABRIC_PROBE_INFO_PARAMS_MESSAGE_ID (0xF4U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_GET_GPU_FABRIC_PROBE_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvU64 numProbes, 8); +} NV2080_CTRL_CMD_INTERNAL_GET_GPU_FABRIC_PROBE_INFO_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_GPU_GET_FABRIC_PROBE_INFO + * + * This command is used to get NV2080_CTRL_CMD_INTERNAL_GPU_FABRIC_PROBE_INFO_PARAMS + * from GSP to CPU RM. + * This command accepts NV2080_CTRL_CMD_INTERNAL_GET_GPU_FABRIC_PROBE_INFO_PARAMS + * + */ +#define NV2080_CTRL_CMD_INTERNAL_GPU_GET_FABRIC_PROBE_INFO (0x208001f4) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_GET_GPU_FABRIC_PROBE_INFO_PARAMS_MESSAGE_ID" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_START_GPU_FABRIC_PROBE_INFO_PARAMS + * + * This structure provides the params for starting GPU Fabric Probe + * + * bwMode[IN] + * - Nvlink Bandwidth mode + * + * bLocalEgmEnabled[IN] + * - EGM Enablement Status that needs to be set in GSP-RM + */ +#define NV2080_CTRL_CMD_INTERNAL_START_GPU_FABRIC_PROBE_INFO_PARAMS_MESSAGE_ID (0xF5U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_START_GPU_FABRIC_PROBE_INFO_PARAMS { + NvU8 bwMode; + NvBool bLocalEgmEnabled; +} NV2080_CTRL_CMD_INTERNAL_START_GPU_FABRIC_PROBE_INFO_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_GPU_START_FABRIC_PROBE + * + * This command is used to trigger start of GPU FABRIC PROBE PROCESS on GSP. + * This command accepts NV2080_CTRL_CMD_INTERNAL_START_GPU_FABRIC_PROBE_INFO_PARAMS + * + */ +#define NV2080_CTRL_CMD_INTERNAL_GPU_START_FABRIC_PROBE (0x208001f5) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_START_GPU_FABRIC_PROBE_INFO_PARAMS_MESSAGE_ID" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_GPU_STOP_FABRIC_PROBE + * + * This command is used to trigger stop of GPU FABRIC PROBE PROCESS on GSP. + * This command accepts no parameters + * + */ +#define NV2080_CTRL_CMD_INTERNAL_GPU_STOP_FABRIC_PROBE (0x208001f6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0xF6" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_GPU_SUSPEND_FABRIC_PROBE + * + * This command is used to trigger suspend of GPU FABRIC PROBE PROCESS on GSP. + * This command accepts no parameters + * + */ +#define NV2080_CTRL_CMD_INTERNAL_GPU_SUSPEND_FABRIC_PROBE (0x208001f7) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0xF7" */ + + +/*! + * NV2080_CTRL_CMD_INTERNAL_RESUME_GPU_FABRIC_PROBE_INFO_PARAMS + * + * This structure provides the params for resuming GPU Fabric Probe + * + * bwMode[IN] + * - Nvlink Bandwidth mode + */ +#define NV2080_CTRL_CMD_INTERNAL_RESUME_GPU_FABRIC_PROBE_INFO_PARAMS_MESSAGE_ID (0xF8U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_RESUME_GPU_FABRIC_PROBE_INFO_PARAMS { + NvU8 bwMode; +} NV2080_CTRL_CMD_INTERNAL_RESUME_GPU_FABRIC_PROBE_INFO_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_GPU_RESUME_FABRIC_PROBE + * + * This command is used to trigger resume of GPU FABRIC PROBE PROCESS on GSP. + * This command accepts NV2080_CTRL_CMD_INTERNAL_RESUME_GPU_FABRIC_PROBE_INFO_PARAMS + * + */ +#define NV2080_CTRL_CMD_INTERNAL_GPU_RESUME_FABRIC_PROBE (0x208001f8) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_RESUME_GPU_FABRIC_PROBE_INFO_PARAMS_MESSAGE_ID" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_GPU_INVALIDATE_FABRIC_PROBE + * + * This command is used to invalidate/reset GPU_FABRIC_PROBE_INFO on GSP. + * This command accepts no parameters + * + */ +#define NV2080_CTRL_CMD_INTERNAL_GPU_INVALIDATE_FABRIC_PROBE (0x208001f9) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0xF9" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_GET_STATIC_INFO + * + * This command is an internal command sent from Kernel RM to Physical RM + * to get static conf compute info + * + * bIsBar1Trusted: [OUT] + * Is BAR1 trusted to access CPR + * bIsPcieTrusted: [OUT] + * Is PCIE trusted to access CPR + */ +#define NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_GET_STATIC_INFO (0x20800af3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_CONF_COMPUTE_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_CONF_COMPUTE_GET_STATIC_INFO_PARAMS_MESSAGE_ID (0xF3U) + +typedef struct NV2080_CTRL_INTERNAL_CONF_COMPUTE_GET_STATIC_INFO_PARAMS { + NvBool bIsBar1Trusted; + NvBool bIsPcieTrusted; +} NV2080_CTRL_INTERNAL_CONF_COMPUTE_GET_STATIC_INFO_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_DERIVE_SWL_KEYS + * + * This command is an internal command sent from Kernel RM to Physical RM + * to derive SWL keys and IV masks for a given engine + * + * engineId: [IN] + * NV2080_ENGINE_TYPE_* for engine for which keys and IV mask should be derived + * ivMaskSet: [OUT] + * Set of IV masks for given engine + */ +#define NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_DERIVE_SWL_KEYS (0x20800ae1) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_SWL_KEYS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK_SIZE 3U +#define NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK_SWL_KERNEL 0U +#define NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK_SWL_USER 1U +#define NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK_SWL_SCRUBBER 2U +#define NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK_SWL_COUNT 3U +#define NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK_LCE_COUNT 6U + +typedef struct NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK { + NvU32 ivMask[NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK_SIZE]; +} NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK; + +#define NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_SWL_KEYS_PARAMS_MESSAGE_ID (0xE1U) + +typedef struct NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_SWL_KEYS_PARAMS { + NvU32 engineId; + NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK ivMaskSet[NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK_SWL_COUNT]; +} NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_SWL_KEYS_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS + * + * This command is an internal command sent from Kernel RM to Physical RM + * to derive LCE keys and IV masks for a given engine + * + * engineId: [IN] + * NV2080_ENGINE_TYPE_* for engine for which keys and IV mask should be derived + * ivMaskSet: [OUT] + * Set of IV masks for given engine + */ +#define NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS (0x20800ae2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS_PARAMS_MESSAGE_ID (0xE2U) + +typedef struct NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS_PARAMS { + NvU32 engineId; + NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK ivMaskSet[NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK_LCE_COUNT]; +} NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_ROTATE_KEY + * + * This command handles key rotation for a given H2D key (and corresponding D2H key) + * by deriving new key on GSP and updating the key on relevant SEC2 or LCE. + * It also updates IVs for all channels using the key and conditionally re-enables them + * and notifies clients of key rotation status at the end. + * + * globalH2DKey : [IN] + * global h2d key to be rotated + * updatedEncryptIVMask: [OUT] + * Encrypt IV mask post IV key rotation for a given engine's kernel channel + * updatedDecryptIVMask: [OUT] + * Decrypt IV mask post IV key rotation for a given engine's kernel channel + */ +#define NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_ROTATE_KEYS (0x20800ae5) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_CONF_COMPUTE_ROTATE_KEYS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_CONF_COMPUTE_ROTATE_KEYS_PARAMS_MESSAGE_ID (0xE5U) + +typedef struct NV2080_CTRL_INTERNAL_CONF_COMPUTE_ROTATE_KEYS_PARAMS { + NvU32 globalH2DKey; + NvU32 updatedEncryptIVMask[CC_AES_256_GCM_IV_SIZE_DWORD]; + NvU32 updatedDecryptIVMask[CC_AES_256_GCM_IV_SIZE_DWORD]; +} NV2080_CTRL_INTERNAL_CONF_COMPUTE_ROTATE_KEYS_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_RC_CHANNELS_FOR_KEY_ROTATION + * + * This command RCs all channels that use the given key and have not reported + * idle via NV2080_CTRL_CMD_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION yet. + * RM needs to RC such channels before going ahead with key rotation. + * + * globalH2DKey : [IN] + * global h2d key whose channels will be RCed + */ +#define NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_RC_CHANNELS_FOR_KEY_ROTATION (0x20800ae6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_CONF_COMPUTE_RC_CHANNELS_FOR_KEY_ROTATION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_CONF_COMPUTE_RC_CHANNELS_FOR_KEY_ROTATION_PARAMS_MESSAGE_ID (0xE6U) + +typedef struct NV2080_CTRL_INTERNAL_CONF_COMPUTE_RC_CHANNELS_FOR_KEY_ROTATION_PARAMS { + NvU32 exceptionType; + NvU32 globalH2DKey; +} NV2080_CTRL_INTERNAL_CONF_COMPUTE_RC_CHANNELS_FOR_KEY_ROTATION_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_GPU_STATE + * + * This control call can be used to set gpu state on GSP to accept client requests + * or to block client requests. + * This is a internal command sent from Kernel RM to Physical RM. + * + * bAcceptClientRequest:[IN] + * NV_TRUE : set gpu state to accept client work requests + * NV_FALSE: set gpu state to block client work requests + * + */ +#define NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_GPU_STATE (0x20800ae7) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_GPU_STATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_GPU_STATE_PARAMS_MESSAGE_ID (0xE7U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_GPU_STATE_PARAMS { + NvBool bAcceptClientRequest; +} NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_GPU_STATE_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_SECURITY_POLICY + * + * This control call can be used to set CC security policy on GSP. + * This is a internal command sent from Kernel RM to Physical RM. + * + * attackerAdvantage [IN] + * The minimum and maximum values for attackerAdvantage. + * The probability of an attacker successfully guessing the contents of + * an encrypted packet go up ("attacker advantage"). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_SECURITY_POLICY (0x20800ae8) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_SECURITY_POLICY_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_SECURITY_POLICY_PARAMS_MESSAGE_ID (0xE8U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_SECURITY_POLICY_PARAMS { + NV_DECLARE_ALIGNED(NvU64 attackerAdvantage, 8); +} NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_SECURITY_POLICY_PARAMS; + + + +/* + * NV2080_CTRL_CMD_INTERNAL_MEMMGR_MEMORY_TRANSFER_WITH_GSP + * + * This command is used by CPU-RM to perform memory operations using GSP + * + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_TIMEOUT_RETRY + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_INTERNAL_MEMMGR_MEMORY_TRANSFER_WITH_GSP (0x20800afa) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MEMMGR_MEMORY_TRANSFER_WITH_GSP_PARAMS_MESSAGE_ID" */ + +typedef enum NV2080_CTRL_MEMMGR_MEMORY_OP { + NV2080_CTRL_MEMMGR_MEMORY_OP_MEMCPY = 0, + NV2080_CTRL_MEMMGR_MEMORY_OP_MEMSET = 1, +} NV2080_CTRL_MEMMGR_MEMORY_OP; + +typedef struct NV2080_CTRL_INTERNAL_TRANSFER_SURFACE_INFO { + /*! + * Base physical address of the surface + */ + NV_DECLARE_ALIGNED(NvU64 baseAddr, 8); + + /*! + * Size of the surface in bytes + */ + NV_DECLARE_ALIGNED(NvU64 size, 8); + + /*! + * Offset in bytes into the surface where read/write must happen + */ + NV_DECLARE_ALIGNED(NvU64 offset, 8); + + /*! + * Aperture where the surface is allocated + */ + NvU32 aperture; + + /*! + * CPU caching attribute of the surface + */ + NvU32 cpuCacheAttrib; +} NV2080_CTRL_INTERNAL_TRANSFER_SURFACE_INFO; + +#define CC_AES_256_GCM_AUTH_TAG_SIZE_BYTES (0x10U) /* finn: Evaluated from "(128 / 8)" */ + +#define NV2080_CTRL_INTERNAL_MEMMGR_MEMORY_TRANSFER_WITH_GSP_PARAMS_MESSAGE_ID (0xFAU) + +typedef struct NV2080_CTRL_INTERNAL_MEMMGR_MEMORY_TRANSFER_WITH_GSP_PARAMS { + + /*! + * Source surface info + */ + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_TRANSFER_SURFACE_INFO src, 8); + + /*! + * Authentication tag if data is encrypted + */ + NvU8 authTag[CC_AES_256_GCM_AUTH_TAG_SIZE_BYTES]; + + /*! + * Destination surface info + */ + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_TRANSFER_SURFACE_INFO dst, 8); + + /*! + * Size of the data to be transferred + */ + NV_DECLARE_ALIGNED(NvU64 transferSize, 8); + + /*! + * To be set in case of memset + */ + NvU32 value; + + /*! + * Memory op to be performed + */ + NV2080_CTRL_MEMMGR_MEMORY_OP memop; +} NV2080_CTRL_INTERNAL_MEMMGR_MEMORY_TRANSFER_WITH_GSP_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_MEMSYS_GET_LOCAL_ATS_CONFIG + * + * This command is an internal command sent from Kernel RM to Physical RM + * to get local GPU's ATS config + * + * addrSysPhys : [OUT] + * System Physical Address + * addrWidth : [OUT] + * Address width value + * mask : [OUT] + * Mask value + * maskWidth : [OUT] + * Mask width value + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_GET_LOCAL_ATS_CONFIG (0x20800afb) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MEMSYS_GET_LOCAL_ATS_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MEMSYS_GET_LOCAL_ATS_CONFIG_PARAMS_MESSAGE_ID (0xFBU) + +typedef struct NV2080_CTRL_INTERNAL_MEMSYS_GET_LOCAL_ATS_CONFIG_PARAMS { + NV_DECLARE_ALIGNED(NvU64 addrSysPhys, 8); + NvU32 addrWidth; + NvU32 mask; + NvU32 maskWidth; +} NV2080_CTRL_INTERNAL_MEMSYS_GET_LOCAL_ATS_CONFIG_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_MEMSYS_SET_PEER_ATS_CONFIG + * + * This command is an internal command sent from Kernel RM to Physical RM + * to set peer ATS config using the parameters passed in. + * + * peerId : [IN] + * Peer Id of the peer for which ATS config is to be programmed + * addrSysPhys : [IN] + * System Physical Address + * addrWidth : [IN] + * Address width value + * mask : [IN] + * Mask value + * maskWidth : [IN] + * Mask width value + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_SET_PEER_ATS_CONFIG (0x20800afc) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MEMSYS_SET_PEER_ATS_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MEMSYS_SET_PEER_ATS_CONFIG_PARAMS_MESSAGE_ID (0xFCU) + +typedef struct NV2080_CTRL_INTERNAL_MEMSYS_SET_PEER_ATS_CONFIG_PARAMS { + NvU32 peerId; + NV_DECLARE_ALIGNED(NvU64 addrSysPhys, 8); + NvU32 addrWidth; + NvU32 mask; + NvU32 maskWidth; +} NV2080_CTRL_INTERNAL_MEMSYS_SET_PEER_ATS_CONFIG_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_GET_EDPP_LIMIT_INFO + * + * Get GPU EDPpeak Limit information + * + * limitMin [OUT] + * Minimum allowed limit value on EDPp policy on both AC and DC + * limitRated [OUT] + * Rated/default allowed limit value on EDPp policy on AC + * limitMax [OUT] + * Maximum allowed limit value on EDPp policy on AC + * limitCurr [OUT] + * Current resultant limit effective on EDPp policy on AC and DC + * limitBattRated [OUT] + * Default/rated allowed limit on EDPp policy on DC + * limitBattMax [OUT] + * Maximum allowed limit on EDPp policy on DC + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + */ + +#define NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_GET_EDPP_LIMIT_INFO (0x20800afd) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_GET_EDPP_LIMIT_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_GET_EDPP_LIMIT_INFO_PARAMS_MESSAGE_ID (0xFDU) + +typedef struct NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_GET_EDPP_LIMIT_INFO_PARAMS { + NvU32 limitMin; + NvU32 limitRated; + NvU32 limitMax; + NvU32 limitCurr; + NvU32 limitBattRated; + NvU32 limitBattMax; +} NV2080_CTRL_CMD_INTERNAL_PMGR_PFM_REQ_HNDLR_GET_EDPP_LIMIT_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_INIT_USER_SHARED_DATA + * + * @brief Initialize/Destroy RM User Shared Data memory mapping on physical RM + * + * @param[in] bInit If this is an init or a destroy request + * @param[in] physAddr Physical address of memdesc to link physical to kernel + * 0 to de-initialize + * + * @return NV_OK on success + * @return NV_ERR_ otherwise + */ +#define NV2080_CTRL_INTERNAL_INIT_USER_SHARED_DATA_PARAMS_MESSAGE_ID (0xFEU) + +typedef struct NV2080_CTRL_INTERNAL_INIT_USER_SHARED_DATA_PARAMS { + NvBool bInit; + NV_DECLARE_ALIGNED(NvU64 physAddr, 8); +} NV2080_CTRL_INTERNAL_INIT_USER_SHARED_DATA_PARAMS; +#define NV2080_CTRL_CMD_INTERNAL_INIT_USER_SHARED_DATA (0x20800afe) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INIT_USER_SHARED_DATA_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_USER_SHARED_DATA_SET_DATA_POLL + * + * @brief Set mask of data to be polled on physical for RUSD + * + * @param[in] polledDataMask Bitmask of data requested, defined in cl00de + * @param[in] pollFrequencyMs Requested polling interval, in ms + * + * @return NV_OK on success + * @return NV_ERR_ otherwise + */ +#define NV2080_CTRL_INTERNAL_USER_SHARED_DATA_SET_DATA_POLL_PARAMS_MESSAGE_ID (0xFFU) + +typedef struct NV2080_CTRL_INTERNAL_USER_SHARED_DATA_SET_DATA_POLL_PARAMS { + NV_DECLARE_ALIGNED(NvU64 polledDataMask, 8); + NvU32 pollFrequencyMs; +} NV2080_CTRL_INTERNAL_USER_SHARED_DATA_SET_DATA_POLL_PARAMS; +#define NV2080_CTRL_CMD_INTERNAL_USER_SHARED_DATA_SET_DATA_POLL (0x20800aff) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_USER_SHARED_DATA_SET_DATA_POLL_PARAMS_MESSAGE_ID" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE_PARAMS + * + * This structure provides the params for starting GPU Fabric Probe + * + * tracepointMask[IN] + * - tracepoint selection filter + * bufferAddr[IN] + * - physical address of tracing buffer for VGPU + * bufferSize[IN] + * - size of gsp side logging buffer + * bufferWatermark[IN] + * - entry threshold for GSP to issue RPC of logged entries to kernel RM + * flag[IN] + * - indicates which operation to perform + */ +#define NV2080_CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE_PARAMS_MESSAGE_ID (0xE3U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 tracepointMask, 8); + NV_DECLARE_ALIGNED(NvU64 bufferAddr, 8); + NvU32 bufferSize; + NvU32 bufferWatermark; + NvU8 flag; +} NV2080_CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE_PARAMS; + +/*! + * Macros for INTERNAL_CONTROL_GSP_TRACE flags for specific operation. + */ +#define NV2080_CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE_FLAG_START_KEEP_OLDEST 0x00U +#define NV2080_CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE_FLAG_START_KEEP_NEWEST 0x01U +#define NV2080_CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE_FLAG_STOP 0x02U + +/* + * NV2080_CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE + * + * This command is used to start GSP-RM trace tool. + * This command accepts NV2080_CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE_PARAMS + * + */ +#define NV2080_CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE (0x208001e3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_GET_ENABLED_SEC2_CLASSES + * + * @brief get state (enabled/disabled) of SEC2 classes + * + * + * @return NV_OK on success + * @return NV_ERR_ otherwise + */ +#define NV2080_CTRL_CMD_INTERNAL_GET_ENABLED_SEC2_CLASSES_PARAMS_MESSAGE_ID (0xAFU) + +typedef struct NV2080_CTRL_CMD_INTERNAL_GET_ENABLED_SEC2_CLASSES_PARAMS { + NvBool bMaxwellSec2Enabled; + NvBool bNv95A1TsecEnabled; + NvBool bHopperSec2WorkLaunchAEnabled; +} NV2080_CTRL_CMD_INTERNAL_GET_ENABLED_SEC2_CLASSES_PARAMS; +#define NV2080_CTRL_CMD_INTERNAL_GET_ENABLED_SEC2_CLASSES (0x20800aaf) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_GET_ENABLED_SEC2_CLASSES_PARAMS_MESSAGE_ID" */ + + +/*! + * @ref NV2080_CTRL_CMD_INTERNAL_GR_CTXSW_SETUP_BIND + */ +#define NV2080_CTRL_CMD_INTERNAL_GR_CTXSW_SETUP_BIND (0x20800ae4) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GR_CTXSW_SETUP_BIND_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GR_CTXSW_SETUP_BIND_PARAMS_MESSAGE_ID (0xE4U) + +typedef NV2080_CTRL_GR_CTXSW_SETUP_BIND_PARAMS NV2080_CTRL_INTERNAL_GR_CTXSW_SETUP_BIND_PARAMS; + +/*! + * NV2080_CTRL_INTERNAL_GPU_CLIENT_LOW_POWER_MODE_ENTER + * + * @brief Notify the offloaded RM that CPU-RM enters the power management cycle. + * + * bInPMTransition : [IN] + * newPMLevel : [IN] + * New PM Level : NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_[0-7] + * + * @return NV_OK on success + * @return NV_ERR_ otherwise + */ +#define NV2080_CTRL_INTERNAL_GPU_CLIENT_LOW_POWER_MODE_ENTER_PARAMS_MESSAGE_ID (0xE9U) + +typedef struct NV2080_CTRL_INTERNAL_GPU_CLIENT_LOW_POWER_MODE_ENTER_PARAMS { + NvBool bInPMTransition; + NvU32 newPMLevel; +} NV2080_CTRL_INTERNAL_GPU_CLIENT_LOW_POWER_MODE_ENTER_PARAMS; +#define NV2080_CTRL_INTERNAL_GPU_CLIENT_LOW_POWER_MODE_ENTER (0x20800ae9) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GPU_CLIENT_LOW_POWER_MODE_ENTER_PARAMS_MESSAGE_ID" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_NVLINK_POST_FATAL_RECOVERY + * + * This command is used to perform recovery actions after the fabric has been + * idled due to a fatal nvlink error. + * This command accepts no parameters. + * + * bSuccessful + * NV_TRUE if recovery was successful, NV_FALSE otherwise + */ +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_POST_FATAL_ERROR_RECOVERY (0x20800aea) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_POST_FATAL_ERROR_RECOVERY_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_NVLINK_POST_FATAL_ERROR_RECOVERY_PARAMS_MESSAGE_ID (0xEAU) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_POST_FATAL_ERROR_RECOVERY_PARAMS { + NvBool bSuccessful; +} NV2080_CTRL_INTERNAL_NVLINK_POST_FATAL_ERROR_RECOVERY_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_GPU_GET_GSP_RM_FREE_HEAP + * + * @brief To get the free heap size of GSP-RM + * + * freeHeapSize : [OUT] + * + * @return NV_OK + */ +#define NV2080_CTRL_INTERNAL_GPU_GET_GSP_RM_FREE_HEAP_PARAMS_MESSAGE_ID (0xEBU) + +typedef struct NV2080_CTRL_INTERNAL_GPU_GET_GSP_RM_FREE_HEAP_PARAMS { + NV_DECLARE_ALIGNED(NvU64 freeHeapSize, 8); +} NV2080_CTRL_INTERNAL_GPU_GET_GSP_RM_FREE_HEAP_PARAMS; +#define NV2080_CTRL_CMD_INTERNAL_GPU_GET_GSP_RM_FREE_HEAP (0x20800aeb) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GPU_GET_GSP_RM_FREE_HEAP_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_GPU_SET_ILLUM + * + * This command sets a new value for the specified Illumination control attribute. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_INTERNAL_GPU_SET_ILLUM_PARAMS_MESSAGE_ID (0xECU) + +typedef struct NV2080_CTRL_INTERNAL_GPU_SET_ILLUM_PARAMS { + NvU32 attribute; + NvU32 value; +} NV2080_CTRL_INTERNAL_GPU_SET_ILLUM_PARAMS; +#define NV2080_CTRL_CMD_INTERNAL_GPU_SET_ILLUM (0x20800aecU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GPU_SET_ILLUM_PARAMS_MESSAGE_ID" */ +/*! + * NV2080_CTRL_CMD_INTERNAL_GSYNC_APPLY_STEREO_PIN_ALWAYS_HI_WAR + * + * @brief NVIDIA RTX 5000 (GP180 SKU500) Windows-specific war + * to pull gpio19 (stereo pin) low for bug3362661. + * + * [in] bApplyStereoPinAlwaysHiWar + * If need to driver stereo pin(GPIO19) low(_IO_INPUT) + * + * @return NV_OK on success + * @return NV_ERR_ otherwise + */ +#define NV2080_CTRL_INTERNAL_GSYNC_APPLY_STEREO_PIN_ALWAYS_HI_WAR_PARAMS_MESSAGE_ID (0xEDU) + +typedef struct NV2080_CTRL_INTERNAL_GSYNC_APPLY_STEREO_PIN_ALWAYS_HI_WAR_PARAMS { + NvBool bApplyStereoPinAlwaysHiWar; +} NV2080_CTRL_INTERNAL_GSYNC_APPLY_STEREO_PIN_ALWAYS_HI_WAR_PARAMS; +#define NV2080_CTRL_CMD_INTERNAL_GSYNC_APPLY_STEREO_PIN_ALWAYS_HI_WAR (0x20800aed) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GSYNC_APPLY_STEREO_PIN_ALWAYS_HI_WAR_PARAMS_MESSAGE_ID" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_HSHUB_GET_MAX_HSHUBS_PER_SHIM + * + * Returns the maximum number of HSHUBs in a shim instance. + * + * maxHshubs[OUT] + * The maximum number of HSHUBs in a shim instance. + * + * @return NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_HSHUB_GET_MAX_HSHUBS_PER_SHIM (0x20800a79) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_HSHUB_GET_MAX_HSHUBS_PER_SHIM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_HSHUB_GET_MAX_HSHUBS_PER_SHIM_PARAMS_MESSAGE_ID (0x79U) + +typedef struct NV2080_CTRL_INTERNAL_HSHUB_GET_MAX_HSHUBS_PER_SHIM_PARAMS { + NvU32 maxHshubs; +} NV2080_CTRL_INTERNAL_HSHUB_GET_MAX_HSHUBS_PER_SHIM_PARAMS; + + +/*! + * NV2080_CTRL_CMD_INTERNAL_GSYNC_GET_RASTER_MODE + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_INTERNAL_GSYNC_GET_RASTER_SYNC_DECODE_MODE_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV2080_CTRL_INTERNAL_GSYNC_GET_RASTER_SYNC_DECODE_MODE_PARAMS { + NvU32 rasterSyncDecodeMode; +} NV2080_CTRL_INTERNAL_GSYNC_GET_RASTER_SYNC_DECODE_MODE_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_GSYNC_GET_RASTER_SYNC_DECODE_MODE (0x20800a14) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GSYNC_GET_RASTER_SYNC_DECODE_MODE_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_INTERNAL_GPU_GET_PF_BAR1_SPA_PARAMS + * + * This is an internal command sent from kernel-RM to physical-RM to retrieve GPU PF BAR1 SPA + * BAR1 SPA is required for BAR1 mapping in Direct NIC case for DMA(Direct Memory Access) of FB. + * + * spaValue[OUT] + * - BAR1 SPA of GPU PF + */ +#define NV2080_CTRL_INTERNAL_GPU_GET_PF_BAR1_SPA_PARAMS_MESSAGE_ID (0xEEU) + +typedef struct NV2080_CTRL_INTERNAL_GPU_GET_PF_BAR1_SPA_PARAMS { + NV_DECLARE_ALIGNED(NvU64 spaValue, 8); +} NV2080_CTRL_INTERNAL_GPU_GET_PF_BAR1_SPA_PARAMS; +#define NV2080_CTRL_CMD_INTERNAL_GPU_GET_PF_BAR1_SPA (0x20800aee) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GPU_GET_PF_BAR1_SPA_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_ENABLE_NVLINK_PEER + * + * This command is used to enable RM NVLink enabled peer state. + * Note: This just updates the RM state. To reflect the state in the registers, + * use NV2080_CTRL_CMD_NVLINK_SET_NVLINK_PEER + * + * [in] peerMask + * Mask of Peer IDs for which USE_NVLINK_PEER needs to be enabled + * [in] bEnable + * Whether the bit needs to be set or unset + * + * Possible status values returned are: + * NV_OK + * If the USE_NVLINK_PEER bit was enabled successfully + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on the chip, or + * If unsetting USE_NVLINK_PEER bit is not supported + * + */ +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_ENABLE_NVLINK_PEER (0x20800a21U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_ENABLE_NVLINK_PEER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_NVLINK_ENABLE_NVLINK_PEER_PARAMS_MESSAGE_ID (0x21U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_ENABLE_NVLINK_PEER_PARAMS { + NvU32 peerMask; + NvBool bEnable; +} NV2080_CTRL_INTERNAL_NVLINK_ENABLE_NVLINK_PEER_PARAMS; + +/* + * NVLINK Link states + * These should ALWAYS match the nvlink core library defines in nvlink.h + */ +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_OFF 0x00U +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_HS 0x01U +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_SAFE 0x02U +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_FAULT 0x03U +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_RECOVERY 0x04U +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_FAIL 0x05U +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_DETECT 0x06U +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_RESET 0x07U +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_ENABLE_PM 0x08U +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_DISABLE_PM 0x09U +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_SLEEP 0x0AU +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_SAVE_STATE 0x0BU +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_RESTORE_STATE 0x0CU +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_PRE_HS 0x0EU +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_DISABLE_ERR_DETECT 0x0FU +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_LANE_DISABLE 0x10U +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_LANE_SHUTDOWN 0x11U +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_TRAFFIC_SETUP 0x12U +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_INITPHASE1 0x13U +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_INITNEGOTIATE 0x14U +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_POST_INITNEGOTIATE 0x15U +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_INITOPTIMIZE 0x16U +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_POST_INITOPTIMIZE 0x17U +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_DISABLE_HEARTBEAT 0x18U +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_CONTAIN 0x19U +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_INITTL 0x1AU +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_INITPHASE5 0x1BU +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_ALI 0x1CU +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_ACTIVE_PENDING 0x1DU +#define NV2080_INTERNAL_NVLINK_CORE_LINK_STATE_INVALID 0xFFU + +/* + * NVLINK TX Sublink states + * These should ALWAYS match the nvlink core library defines in nvlink.h + */ +#define NV2080_NVLINK_CORE_SUBLINK_STATE_TX_HS 0x00U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_TX_SINGLE_LANE 0x04U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_TX_LOW_POWER 0x04U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_TX_TRAIN 0x05U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_TX_SAFE 0x06U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_TX_OFF 0x07U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_TX_COMMON_MODE 0x08U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_TX_COMMON_MODE_DISABLE 0x09U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_TX_DATA_READY 0x0AU +#define NV2080_NVLINK_CORE_SUBLINK_STATE_TX_EQ 0x0BU +#define NV2080_NVLINK_CORE_SUBLINK_STATE_TX_PRBS_EN 0x0CU +#define NV2080_NVLINK_CORE_SUBLINK_STATE_TX_POST_HS 0x0DU + +/* + * NVLINK RX Sublink states + * These should ALWAYS match the nvlink core library defines in nvlink.h + */ +#define NV2080_NVLINK_CORE_SUBLINK_STATE_RX_HS 0x00U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_RX_SINGLE_LANE 0x04U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_RX_LOW_POWER 0x04U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_RX_TRAIN 0x05U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_RX_SAFE 0x06U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_RX_OFF 0x07U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_RX_RXCAL 0x08U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_RX_INIT_TERM 0x09U + +/* + * Link training seed values + * These should ALWAYS match the values defined in nvlink.h + */ +#define NV2080_CTRL_INTERNAL_NVLINK_MAX_SEED_NUM 6U +#define NV2080_CTRL_INTERNAL_NVLINK_MAX_SEED_BUFFER_SIZE (0x7U) /* finn: Evaluated from "NV2080_CTRL_INTERNAL_NVLINK_MAX_SEED_NUM + 1" */ + +// NVLINK callback types +#define NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_TYPE_GET_DL_LINK_MODE 0x00U +#define NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_TYPE_SET_DL_LINK_MODE 0x01U +#define NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_TYPE_GET_TL_LINK_MODE 0x02U +#define NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_TYPE_SET_TL_LINK_MODE 0x03U +#define NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_TYPE_GET_TX_SUBLINK_MODE 0x04U +#define NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_TYPE_SET_TX_SUBLINK_MODE 0x05U +#define NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_TYPE_GET_RX_SUBLINK_MODE 0x06U +#define NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_TYPE_SET_RX_SUBLINK_MODE 0x07U +#define NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_TYPE_GET_RX_SUBLINK_DETECT 0x08U +#define NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_TYPE_SET_RX_SUBLINK_DETECT 0x09U +#define NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_TYPE_WRITE_DISCOVERY_TOKEN 0x0AU +#define NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_TYPE_READ_DISCOVERY_TOKEN 0x0BU +#define NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_TYPE_TRAINING_COMPLETE 0x0CU +#define NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_TYPE_GET_UPHY_LOAD 0x0DU + +/* + * Structure to store the GET_DL_MODE callback params. + * mode + * The current Nvlink DL mode + */ +typedef struct NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_GET_DL_LINK_MODE_PARAMS { + NvU32 mode; +} NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_GET_DL_LINK_MODE_PARAMS; + +/* + * Structure to store the SET_DL_LINK_MODE callback OFF params + * seedData + * The output seed data + */ +typedef struct NV2080_CTRL_INTERNAL_NVLINK_SET_DL_LINK_MODE_OFF_PARAMS { + NvU32 seedData[NV2080_CTRL_INTERNAL_NVLINK_MAX_SEED_BUFFER_SIZE]; +} NV2080_CTRL_INTERNAL_NVLINK_SET_DL_LINK_MODE_OFF_PARAMS; + +/* + * Structure to store the SET_DL_LINK_MODE callback PRE_HS params + * remoteDeviceType + * The input remote Device Type + * ipVerDlPl + * The input DLPL version + */ +typedef struct NV2080_CTRL_INTERNAL_NVLINK_SET_DL_LINK_MODE_PRE_HS_PARAMS { + NvU32 remoteDeviceType; + NvU32 ipVerDlPl; +} NV2080_CTRL_INTERNAL_NVLINK_SET_DL_LINK_MODE_PRE_HS_PARAMS; + +/* + * Structure to store SET_DL_LINK_MODE callback INIT_PHASE1 params + * seedData[] + * The input seed data + */ +typedef struct NV2080_CTRL_INTERNAL_NVLINK_SET_DL_LINK_MODE_INIT_PHASE1_PARAMS { + NvU32 seedData[NV2080_CTRL_INTERNAL_NVLINK_MAX_SEED_BUFFER_SIZE]; +} NV2080_CTRL_INTERNAL_NVLINK_SET_DL_LINK_MODE_INIT_PHASE1_PARAMS; + +/* + * Structure to store the Nvlink Remote and Local SID info + * remoteSid + * The output remote SID + * remoteDeviceType + * The output remote Device Type + * remoteLinkId + * The output remote link ID + * localSid + * The output local SID + */ +typedef struct NV2080_CTRL_INTERNAL_NVLINK_REMOTE_LOCAL_SID_INFO { + NV_DECLARE_ALIGNED(NvU64 remoteSid, 8); + NvU32 remoteDeviceType; + NvU32 remoteLinkId; + NV_DECLARE_ALIGNED(NvU64 localSid, 8); +} NV2080_CTRL_INTERNAL_NVLINK_REMOTE_LOCAL_SID_INFO; + +/* + * Structure to store the SET_DL_LINK_MODE callback POST_INITNEGOTIATE params + * bInitnegotiateConfigGood + * The output bool if the config is good + * remoteLocalSidInfo + * The output structure containing the Nvlink Remote/Local SID info + */ +typedef struct NV2080_CTRL_INTERNAL_NVLINK_SET_DL_LINK_MODE_POST_INITNEGOTIATE_PARAMS { + NvBool bInitnegotiateConfigGood; + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_NVLINK_REMOTE_LOCAL_SID_INFO remoteLocalSidInfo, 8); +} NV2080_CTRL_INTERNAL_NVLINK_SET_DL_LINK_MODE_POST_INITNEGOTIATE_PARAMS; + +/* + * Structure to store the SET_DL_LINK_MODE callback POST_INITOPTIMIZE params + * bPollDone + * The output bool if the polling has finished + */ +typedef struct NV2080_CTRL_INTERNAL_NVLINK_SET_DL_LINK_MODE_POST_INITOPTIMIZE_PARAMS { + NvBool bPollDone; +} NV2080_CTRL_INTERNAL_NVLINK_SET_DL_LINK_MODE_POST_INITOPTIMIZE_PARAMS; + +/* + * Structure to store the SET_DL_LINK_MODE callback params + * mode + * The input nvlink state to set + * bSync + * The input sync boolean + * linkMode + * The input link mode to be set for the callback + */ +typedef struct NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_SET_DL_LINK_MODE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 mode, 8); + NvBool bSync; + NvU32 linkMode; + union { + NV2080_CTRL_INTERNAL_NVLINK_SET_DL_LINK_MODE_OFF_PARAMS linkModeOffParams; + NV2080_CTRL_INTERNAL_NVLINK_SET_DL_LINK_MODE_PRE_HS_PARAMS linkModePreHsParams; + NV2080_CTRL_INTERNAL_NVLINK_SET_DL_LINK_MODE_INIT_PHASE1_PARAMS linkModeInitPhase1Params; + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_NVLINK_SET_DL_LINK_MODE_POST_INITNEGOTIATE_PARAMS linkModePostInitNegotiateParams, 8); + NV2080_CTRL_INTERNAL_NVLINK_SET_DL_LINK_MODE_POST_INITOPTIMIZE_PARAMS linkModePostInitOptimizeParams; + } linkModeParams; +} NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_SET_DL_LINK_MODE_PARAMS; + +/* + * Structure to store the GET_TL_MODE callback params. + * mode + * The current Nvlink TL mode + */ +typedef struct NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_GET_TL_LINK_MODE_PARAMS { + NvU32 mode; +} NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_GET_TL_LINK_MODE_PARAMS; + +/* + * Structure to store the SET_TL_LINK_MODE callback params + * mode + * The input nvlink mode to set + * bSync + * The input sync boolean + */ +typedef struct NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_SET_TL_LINK_MODE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 mode, 8); + NvBool bSync; +} NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_SET_TL_LINK_MODE_PARAMS; + +/* + * Structure to store the GET_RX/TX_SUBLINK_MODE callback params + * sublinkMode + * The current Sublink mode + * sublinkSubMode + * The current Sublink sub mode + */ +typedef struct NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_GET_SUBLINK_MODE_PARAMS { + NvU32 sublinkMode; + NvU32 sublinkSubMode; +} NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_GET_SUBLINK_MODE_PARAMS; + +/* + * Structure to store the SET_TL_LINK_MODE callback params + * mode + * The input nvlink mode to set + * bSync + * The input sync boolean + */ +typedef struct NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_SET_TX_SUBLINK_MODE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 mode, 8); + NvBool bSync; +} NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_SET_TX_SUBLINK_MODE_PARAMS; + +/* + * Structure to store the SET_RX_SUBLINK_MODE callback params + * mode + * The input nvlink mode to set + * bSync + * The input sync boolean + */ +typedef struct NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_SET_RX_SUBLINK_MODE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 mode, 8); + NvBool bSync; +} NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_SET_RX_SUBLINK_MODE_PARAMS; + +/* + * Structure to store the GET_RX_SUBLINK_DETECT callback params + * laneRxdetStatusMask + * The output RXDET per-lane status mask + */ +typedef struct NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_GET_RX_DETECT_PARAMS { + NvU32 laneRxdetStatusMask; +} NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_GET_RX_DETECT_PARAMS; + +/* + * Structure to store the SET_RX_DETECT callback params + * bSync + * The input bSync boolean + */ +typedef struct NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_SET_RX_DETECT_PARAMS { + NvBool bSync; +} NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_SET_RX_DETECT_PARAMS; + +/* + * Structure to store the RD_WR_DISCOVERY_TOKEN callback params + * ipVerDlPl + * The input DLPL version + * token + * The output token + */ +typedef struct NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_RD_WR_DISCOVERY_TOKEN_PARAMS { + NvU32 ipVerDlPl; + NV_DECLARE_ALIGNED(NvU64 token, 8); +} NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_RD_WR_DISCOVERY_TOKEN_PARAMS; + +/* + * Structure to store the GET_UPHY_LOAD callback params + * bUnlocked + * The output unlocked boolean + */ +typedef struct NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_GET_UPHY_LOAD_PARAMS { + NvBool bUnlocked; +} NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_GET_UPHY_LOAD_PARAMS; + +/* + * Structure to store the Union of Callback params + * type + * The input type of callback to be executed + */ +typedef struct NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_TYPE { + NvU8 type; + union { + NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_GET_DL_LINK_MODE_PARAMS getDlLinkMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_SET_DL_LINK_MODE_PARAMS setDlLinkMode, 8); + NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_GET_TL_LINK_MODE_PARAMS getTlLinkMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_SET_TL_LINK_MODE_PARAMS setTlLinkMode, 8); + NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_GET_SUBLINK_MODE_PARAMS getTxSublinkMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_SET_TX_SUBLINK_MODE_PARAMS setTxSublinkMode, 8); + NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_GET_SUBLINK_MODE_PARAMS getRxSublinkMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_SET_RX_SUBLINK_MODE_PARAMS setRxSublinkMode, 8); + NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_GET_RX_DETECT_PARAMS getRxSublinkDetect; + NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_SET_RX_DETECT_PARAMS setRxSublinkDetect; + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_RD_WR_DISCOVERY_TOKEN_PARAMS writeDiscoveryToken, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_RD_WR_DISCOVERY_TOKEN_PARAMS readDiscoveryToken, 8); + NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_GET_UPHY_LOAD_PARAMS getUphyLoad; + } callbackParams; +} NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_TYPE; + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_CORE_CALLBACK + * + * Generic NvLink callback RPC to route commands to GSP + * + * [In] linkdId + * ID of the link to be used + * [In/Out] callBackType + * Callback params + */ +#define NV2080_CTRL_INTERNAL_NVLINK_CORE_CALLBACK_PARAMS_MESSAGE_ID (0x24U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_CORE_CALLBACK_PARAMS { + NvU32 linkId; + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_NVLINK_CALLBACK_TYPE callbackType, 8); +} NV2080_CTRL_INTERNAL_NVLINK_CORE_CALLBACK_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_CORE_CALLBACK (0x20800a24U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_CORE_CALLBACK_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_UPDATE_REMOTE_LOCAL_SID + * + * Update Remote and Local Sid info via GSP + * + * [In] linkId + * ID of the link to be used + * [Out] remoteLocalSidInfo + * The output structure containing the Nvlink Remote/Local SID info + */ +#define NV2080_CTRL_INTERNAL_NVLINK_UPDATE_REMOTE_LOCAL_SID_PARAMS_MESSAGE_ID (0x25U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_UPDATE_REMOTE_LOCAL_SID_PARAMS { + NvU32 linkId; + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_NVLINK_REMOTE_LOCAL_SID_INFO remoteLocalSidInfo, 8); +} NV2080_CTRL_INTERNAL_NVLINK_UPDATE_REMOTE_LOCAL_SID_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_UPDATE_REMOTE_LOCAL_SID (0x20800a25U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_UPDATE_REMOTE_LOCAL_SID_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_GET_ALI_ENABLED + * + * Returns if ALI is enabled + * + * [Out] bEnableAli + * Output boolean for ALI enablement + */ +#define NV2080_CTRL_INTERNAL_NVLINK_GET_ALI_ENABLED_PARAMS_MESSAGE_ID (0x29U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_GET_ALI_ENABLED_PARAMS { + NvBool bEnableAli; +} NV2080_CTRL_INTERNAL_NVLINK_GET_ALI_ENABLED_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_GET_ALI_ENABLED (0x20800a29U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_GET_ALI_ENABLED_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_NVLINK_UPDATE_HSHUB_MUX_TYPE_PROGRAM 0x0U +#define NV2080_CTRL_INTERNAL_NVLINK_UPDATE_HSHUB_MUX_TYPE_RESET 0x1U + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_UPDATE_HSHUB_MUX + * + * Generic Hshub Mux Update RPC to route commands to GSP + * + * [In] updateType + * HSHUB Mux update type to program or reset Mux + * [In] bSysMem + * Boolean to differentiate between sysmen and peer mem + * [In] peerMask + * Mask of peer IDs. Only parsed when bSysMem is false + */ +#define NV2080_CTRL_INTERNAL_NVLINK_UPDATE_HSHUB_MUX_PARAMS_MESSAGE_ID (0x42U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_UPDATE_HSHUB_MUX_PARAMS { + NvBool updateType; + NvBool bSysMem; + NvU32 peerMask; +} NV2080_CTRL_INTERNAL_NVLINK_UPDATE_HSHUB_MUX_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_UPDATE_HSHUB_MUX (0x20800a42U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_UPDATE_HSHUB_MUX_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_PRE_SETUP_NVLINK_PEER + * + * Performs all the necessary actions required before setting a peer on NVLink + * + * [In] peerId + * Peer ID which will be set on NVLink + * [In] peerLinkMask + * Mask of links that connects the given peer + * [In] bNvswitchConn + * Is the GPU connected to NVSwitch + */ +#define NV2080_CTRL_INTERNAL_NVLINK_PRE_SETUP_NVLINK_PEER_PARAMS_MESSAGE_ID (0x4EU) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_PRE_SETUP_NVLINK_PEER_PARAMS { + NvU32 peerId; + NV_DECLARE_ALIGNED(NvU64 peerLinkMask, 8); + NvBool bEgmPeer; + NvBool bNvswitchConn; +} NV2080_CTRL_INTERNAL_NVLINK_PRE_SETUP_NVLINK_PEER_PARAMS; +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_PRE_SETUP_NVLINK_PEER (0x20800a4eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_PRE_SETUP_NVLINK_PEER_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_POST_SETUP_NVLINK_PEER + * + * Performs all the necessary actions required after setting a peer on NVLink + * + * [In] peerMask + * Mask of Peer IDs which has been set on NVLink + */ +#define NV2080_CTRL_INTERNAL_NVLINK_POST_SETUP_NVLINK_PEER_PARAMS_MESSAGE_ID (0x50U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_POST_SETUP_NVLINK_PEER_PARAMS { + NvU32 peerMask; +} NV2080_CTRL_INTERNAL_NVLINK_POST_SETUP_NVLINK_PEER_PARAMS; +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_POST_SETUP_NVLINK_PEER (0x20800a50U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_POST_SETUP_NVLINK_PEER_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_LOG_OOB_XID + * + * Log an XID message to OOB. + * + * xid [in] + * The XID number of the message. + * + * message [in] + * The text message, including the NULL terminator. + * + * len [in] + * The length, in bytes, of the text message, excluding the NULL terminator. + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_INTERNAL_LOG_OOB_XID (0x20800a56U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_LOG_OOB_XID_PARAMS_MESSAGE_ID" */ + +#define NV2080_INTERNAL_OOB_XID_MESSAGE_BUFFER_SIZE (81U) + +#define NV2080_CTRL_INTERNAL_LOG_OOB_XID_PARAMS_MESSAGE_ID (0x56U) + +typedef struct NV2080_CTRL_INTERNAL_LOG_OOB_XID_PARAMS { + NvU32 xid; + NvU8 message[NV2080_INTERNAL_OOB_XID_MESSAGE_BUFFER_SIZE]; + NvU32 len; +} NV2080_CTRL_INTERNAL_LOG_OOB_XID_PARAMS; + +#define NV2080_CTRL_INTERNAL_NVLINK_REMOVE_NVLINK_MAPPING_TYPE_SYSMEM 0x1U +#define NV2080_CTRL_INTERNAL_NVLINK_REMOVE_NVLINK_MAPPING_TYPE_PEER 0x2U + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_REMOVE_NVLINK_MAPPING + * + * Performs all the necessary actions required to remove NVLink mapping (sysmem or peer or both) + * + * [In] mapTypeMask + * Remove NVLink mapping for the given map types (sysmem or peer or both) + * [In] peerMask + * Mask of Peer IDs which needs to be removed on NVLink + * Only parsed if mapTypeMask accounts peer + * [In] bL2Entry + * Is the peer removal happening because links are entering L2 low power state? + * Only parsed if mapTypeMask accounts peer + */ +#define NV2080_CTRL_INTERNAL_NVLINK_REMOVE_NVLINK_MAPPING_PARAMS_MESSAGE_ID (0x5FU) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_REMOVE_NVLINK_MAPPING_PARAMS { + NvU32 mapTypeMask; + NvU32 peerMask; + NvBool bL2Entry; +} NV2080_CTRL_INTERNAL_NVLINK_REMOVE_NVLINK_MAPPING_PARAMS; +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_REMOVE_NVLINK_MAPPING (0x20800a5fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_REMOVE_NVLINK_MAPPING_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_SAVE_RESTORE_HSHUB_STATE + * + * Performs all the necessary actions required to save/restore HSHUB state during NVLink L2 entry/exit + * + * [In] bSave + * Whether this is a save/restore operation + * [In] linkMask + * Mask of links for which HSHUB config registers need to be saved/restored + */ +#define NV2080_CTRL_INTERNAL_NVLINK_SAVE_RESTORE_HSHUB_STATE_PARAMS_MESSAGE_ID (0x62U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_SAVE_RESTORE_HSHUB_STATE_PARAMS { + NvBool bSave; + NvU32 linkMask; +} NV2080_CTRL_INTERNAL_NVLINK_SAVE_RESTORE_HSHUB_STATE_PARAMS; +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_SAVE_RESTORE_HSHUB_STATE (0x20800a62U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_SAVE_RESTORE_HSHUB_STATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_NVLINK_PROGRAM_BUFFERREADY_FLAGS_SET (0x00000000) +#define NV2080_CTRL_INTERNAL_NVLINK_PROGRAM_BUFFERREADY_FLAGS_SAVE (0x00000001) +#define NV2080_CTRL_INTERNAL_NVLINK_PROGRAM_BUFFERREADY_FLAGS_RESTORE (0x00000002) + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_PROGRAM_BUFFERREADY + * + * Performs all the necessary actions required to save/restore bufferready state during NVLink L2 entry/exit + * + * [In] flags + * Whether to set, save or restore bufferready + * [In] bSysmem + * Whether to perform the operation for sysmem links or peer links + * [In] peerLinkMask + * Mask of peer links for which bufferready state need to be set/saved/restored + */ +#define NV2080_CTRL_INTERNAL_NVLINK_PROGRAM_BUFFERREADY_PARAMS_MESSAGE_ID (0x64U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_PROGRAM_BUFFERREADY_PARAMS { + NvU32 flags; + NvBool bSysmem; + NV_DECLARE_ALIGNED(NvU64 peerLinkMask, 8); +} NV2080_CTRL_INTERNAL_NVLINK_PROGRAM_BUFFERREADY_PARAMS; +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_PROGRAM_BUFFERREADY (0x20800a64U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_PROGRAM_BUFFERREADY_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_UPDATE_CURRENT_CONFIG + * + * Performs all the necessary actions required to update the current Nvlink configuration + * + * [out] bNvlinkSysmemEnabled + * Whether sysmem nvlink support was enabled + */ +#define NV2080_CTRL_INTERNAL_NVLINK_UPDATE_CURRENT_CONFIG_PARAMS_MESSAGE_ID (0x78U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_UPDATE_CURRENT_CONFIG_PARAMS { + NvBool bNvlinkSysmemEnabled; +} NV2080_CTRL_INTERNAL_NVLINK_UPDATE_CURRENT_CONFIG_PARAMS; +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_UPDATE_CURRENT_CONFIG (0x20800a78U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_UPDATE_CURRENT_CONFIG_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_UPDATE_PEER_LINK_MASK + * + * Synchronizes the peerLinkMask between CPU-RM and GSP-RM + * + * [In] gpuInst + * Gpu instance + * [In] peerLinkMask + * Mask of links to the given peer GPU + */ +#define NV2080_CTRL_INTERNAL_NVLINK_UPDATE_PEER_LINK_MASK_PARAMS_MESSAGE_ID (0x7DU) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_UPDATE_PEER_LINK_MASK_PARAMS { + NvU32 gpuInst; + NV_DECLARE_ALIGNED(NvU64 peerLinkMask, 8); +} NV2080_CTRL_INTERNAL_NVLINK_UPDATE_PEER_LINK_MASK_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_UPDATE_PEER_LINK_MASK (0x20800a7dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_UPDATE_PEER_LINK_MASK_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_UPDATE_LINK_CONNECTION + * + * Updates the remote connection information for a link + * + * [In] linkId + * Id of the link to be used + * [In] bConnected + * Boolean that tracks whether the link is connected + * [In] remoteDeviceType + * Tracks whether the remote device is switch/gpu/ibmnpu/tegra + * [In] remoteLinkNumber + * Tracks the link number for the connected remote device + */ +#define NV2080_CTRL_INTERNAL_NVLINK_UPDATE_LINK_CONNECTION_PARAMS_MESSAGE_ID (0x82U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_UPDATE_LINK_CONNECTION_PARAMS { + NV_DECLARE_ALIGNED(NvU64 remoteDeviceType, 8); + NV_DECLARE_ALIGNED(NvU64 remoteChipSid, 8); + NvU32 linkId; + NvU32 laneRxdetStatusMask; + NvU32 remoteLinkNumber; + NvU32 remotePciDeviceId; + NvU32 remoteDomain; + NvU8 remoteBus; + NvU8 remoteDevice; + NvU8 remoteFunction; + NvBool bConnected; +} NV2080_CTRL_INTERNAL_NVLINK_UPDATE_LINK_CONNECTION_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_UPDATE_LINK_CONNECTION (0x20800a82U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_UPDATE_LINK_CONNECTION_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_ENABLE_LINKS_POST_TOPOLOGY + * + * Enable links post topology via GSP + * + * [In] linkMask + * Mask of links to enable + * [Out] initializedLinks + * Mask of links that were initialized + */ +#define NV2080_CTRL_INTERNAL_NVLINK_ENABLE_LINKS_POST_TOPOLOGY_PARAMS_MESSAGE_ID (0x83U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_ENABLE_LINKS_POST_TOPOLOGY_PARAMS { + NvU32 linkMask; + NvU32 initializedLinks; +} NV2080_CTRL_INTERNAL_NVLINK_ENABLE_LINKS_POST_TOPOLOGY_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_ENABLE_LINKS_POST_TOPOLOGY (0x20800a83U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_ENABLE_LINKS_POST_TOPOLOGY_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_PRE_LINK_TRAIN_ALI + * + * [In] linkMask + * Mask of enabled links to train + * [In] bSync + * The input sync boolean + */ +#define NV2080_CTRL_INTERNAL_NVLINK_PRE_LINK_TRAIN_ALI_PARAMS_MESSAGE_ID (0x84U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_PRE_LINK_TRAIN_ALI_PARAMS { + NvU32 linkMask; + NvBool bSync; +} NV2080_CTRL_INTERNAL_NVLINK_PRE_LINK_TRAIN_ALI_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_PRE_LINK_TRAIN_ALI (0x20800a84U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_PRE_LINK_TRAIN_ALI_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_NVLINK_MAX_ARR_SIZE 64 + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_GET_LINK_MASK_POST_RX_DET + * + * Get link mask post Rx detection + * + * [Out] postRxDetLinkMask + * Mask of links discovered + * [Out] laneRxdetStatusMask + * RXDET per-lane status mask + */ +#define NV2080_CTRL_INTERNAL_NVLINK_GET_LINK_MASK_POST_RX_DET_PARAMS_MESSAGE_ID (0x85U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_GET_LINK_MASK_POST_RX_DET_PARAMS { + NV_DECLARE_ALIGNED(NvU64 postRxDetLinkMask, 8); + NvU32 laneRxdetStatusMask[NV2080_CTRL_INTERNAL_NVLINK_MAX_ARR_SIZE]; +} NV2080_CTRL_INTERNAL_NVLINK_GET_LINK_MASK_POST_RX_DET_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_GET_LINK_MASK_POST_RX_DET (0x20800a85U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_GET_LINK_MASK_POST_RX_DET_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_LINK_TRAIN_ALI + * + * [In] linkMask + * Mask of enabled links to train + * [In] bSync + * The input sync boolean + */ +#define NV2080_CTRL_INTERNAL_NVLINK_LINK_TRAIN_ALI_PARAMS_MESSAGE_ID (0x86U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_LINK_TRAIN_ALI_PARAMS { + NvU32 linkMask; + NvBool bSync; +} NV2080_CTRL_INTERNAL_NVLINK_LINK_TRAIN_ALI_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_LINK_TRAIN_ALI (0x20800a86U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_LINK_TRAIN_ALI_PARAMS_MESSAGE_ID" */ + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_DEVICE_LINK_VALUES { + NvBool bValid; + NvU8 linkId; + NvU32 ioctrlId; + NvU8 pllMasterLinkId; + NvU8 pllSlaveLinkId; + NvU32 ipVerDlPl; +} NV2080_CTRL_INTERNAL_NVLINK_DEVICE_LINK_VALUES; + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_GET_NVLINK_DEVICE_INFO + * + * [Out] ioctrlMask + * Mask of IOCTRLs discovered from PTOP device info table + * [Out] ioctrlNumEntries + * Number of IOCTRL entries in the PTOP device info table + * [Out] ioctrlSize + * Maximum number of entries in the PTOP device info table + * [Out] discoveredLinks + * Mask of links discovered from all the IOCTRLs + * [Out] ipVerNvlink + * IP revision of the NVLink HW + * [Out] maxSupportedLinks + * Maximum number of links supported for a given arch + * [Out] linkInfo + * Per link information + */ + +#define NV2080_CTRL_INTERNAL_NVLINK_GET_NVLINK_DEVICE_INFO_PARAMS_MESSAGE_ID (0x87U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_GET_NVLINK_DEVICE_INFO_PARAMS { + NvU32 ioctrlMask; + NvU8 ioctrlNumEntries; + NvU32 ioctrlSize; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK discoveredLinks, 8); + NvU32 ipVerNvlink; + NvU32 maxSupportedLinks; + NV2080_CTRL_INTERNAL_NVLINK_DEVICE_LINK_VALUES linkInfo[NV2080_CTRL_INTERNAL_NVLINK_MAX_ARR_SIZE]; +} NV2080_CTRL_INTERNAL_NVLINK_GET_NVLINK_DEVICE_INFO_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_GET_NVLINK_DEVICE_INFO (0x20800a87U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_GET_NVLINK_DEVICE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_NVLINK_MAX_LINKS_PER_IOCTRL_SW 6U + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_DEVICE_IP_REVISION_VALUES { + NvU32 ipVerIoctrl; + NvU32 ipVerMinion; +} NV2080_CTRL_INTERNAL_NVLINK_DEVICE_IP_REVISION_VALUES; + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_GET_IOCTRL_DEVICE_INFO + * + * [In] ioctrlIdx + * IOCTRL index + * [Out] PublicId + * PublicId of the IOCTRL discovered + * [Out] localDiscoveredLinks + * Mask of discovered links local to the IOCTRL + * [Out] localGlobalLinkOffset + * Global link offsets for the locally discovered links + * [Out] ioctrlDiscoverySize + * IOCTRL table size + * [Out] numDevices + * Number of devices discovered from the IOCTRL + * [Out] deviceIpRevisions + * IP revisions for the devices discovered in the IOCTRL + */ + +#define NV2080_CTRL_INTERNAL_NVLINK_GET_IOCTRL_DEVICE_INFO_PARAMS_MESSAGE_ID (0x8EU) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_GET_IOCTRL_DEVICE_INFO_PARAMS { + NvU32 ioctrlIdx; + NvU32 PublicId; + NvU32 localDiscoveredLinks; + NvU32 localGlobalLinkOffset; + NvU32 ioctrlDiscoverySize; + NvU8 numDevices; + NV2080_CTRL_INTERNAL_NVLINK_DEVICE_IP_REVISION_VALUES ipRevisions; +} NV2080_CTRL_INTERNAL_NVLINK_GET_IOCTRL_DEVICE_INFO_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_GET_IOCTRL_DEVICE_INFO (0x20800a8eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_GET_IOCTRL_DEVICE_INFO_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_PROGRAM_LINK_SPEED + * + * Program NVLink Speed from OS/VBIOS + * + * [In] bPlatformLinerateDefined + * Whether line rate is defined in the platform + * [In] platformLineRate + * Platform defined line rate + * [Out] nvlinkLinkSpeed + * The line rate that was programmed for the links + */ +#define NV2080_CTRL_INTERNAL_NVLINK_PROGRAM_LINK_SPEED_PARAMS_MESSAGE_ID (0x8FU) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_PROGRAM_LINK_SPEED_PARAMS { + NvBool bPlatformLinerateDefined; + NvU32 platformLineRate; + NvU32 nvlinkLinkSpeed; +} NV2080_CTRL_INTERNAL_NVLINK_PROGRAM_LINK_SPEED_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_PROGRAM_LINK_SPEED (0x20800a8fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_PROGRAM_LINK_SPEED_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_ARE_LINKS_TRAINED + * + * [In] linkMask + * Mask of links whose state will be checked + * [In] bActiveOnly + * The input boolean to check for Link Active state + * [Out] bIsLinkActive + * Boolean array to track if the link is trained + */ +#define NV2080_CTRL_INTERNAL_NVLINK_ARE_LINKS_TRAINED_PARAMS_MESSAGE_ID (0x90U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_ARE_LINKS_TRAINED_PARAMS { + NvU32 linkMask; + NvBool bActiveOnly; + NvBool bIsLinkActive[NV2080_CTRL_INTERNAL_NVLINK_MAX_ARR_SIZE]; +} NV2080_CTRL_INTERNAL_NVLINK_ARE_LINKS_TRAINED_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_ARE_LINKS_TRAINED (0x20800a90U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_ARE_LINKS_TRAINED_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_NVLINK_RESET_FLAGS_ASSERT (0x00000000) +#define NV2080_CTRL_INTERNAL_NVLINK_RESET_FLAGS_DEASSERT (0x00000001) +#define NV2080_CTRL_INTERNAL_NVLINK_RESET_FLAGS_TOGGLE (0x00000002) + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_RESET_LINKS + * + * [In] linkMask + * Mask of links which need to be reset + * [In] flags + * Whether to assert, de-assert or toggle the Nvlink reset + */ + +#define NV2080_CTRL_INTERNAL_NVLINK_RESET_LINKS_PARAMS_MESSAGE_ID (0x91U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_RESET_LINKS_PARAMS { + NvU32 linkMask; + NvU32 flags; +} NV2080_CTRL_INTERNAL_NVLINK_RESET_LINKS_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_RESET_LINKS (0x20800a91U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_RESET_LINKS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_DISABLE_DL_INTERRUPTS + * + * [In] linkMask + * Mask of links for which DL interrrupts need to be disabled + */ +#define NV2080_CTRL_INTERNAL_NVLINK_DISABLE_DL_INTERRUPTS_PARAMS_MESSAGE_ID (0x92U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_DISABLE_DL_INTERRUPTS_PARAMS { + NvU32 linkMask; +} NV2080_CTRL_INTERNAL_NVLINK_DISABLE_DL_INTERRUPTS_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_DISABLE_DL_INTERRUPTS (0x20800a92U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_DISABLE_DL_INTERRUPTS_PARAMS_MESSAGE_ID" */ + +/* + * Structure to store the GET_LINK_AND_CLOCK__INFO params + * + * [Out] bLinkConnectedToSystem + * Boolean indicating sysmem connection of a link + * [Out] bLinkConnectedToPeer + * Boolean indicating peer connection of a link + * [Out] bLinkReset + * Whether the link is in reset + * [Out] subLinkWidth + * Number of lanes per sublink + * [Out] linkState + * Mode of the link + * [Out] txSublinkState + * Tx sublink state + * [Out] rxSublinkState + * Rx sublink state + * [Out] bLaneReversal + * Boolean indicating if a link's lanes are reversed + * [Out] nvlinkLinkClockKHz + * Link clock value in KHz + * [Out] nvlinkLineRateMbps + * Link line rate in Mbps + * [Out] nvlinkLinkClockMhz + * Link clock in MHz + * [Out] nvlinkLinkDataRateKiBps + * Link Data rate in KiBps + * [Out] nvlinkRefClkType + * Current Nvlink refclk source + * [Out] nvlinkReqLinkClockMhz + * Requested link clock value + * [Out] nvlinkMinL1Threshold + * Requested link Min L1 Threshold + * [Out] nvlinkMaxL1Threshold + * Requested link Max L1 Threshold + * [Out] nvlinkL1ThresholdUnits + * Requested link L1 Threshold Units + */ +typedef struct NV2080_CTRL_INTERNAL_NVLINK_GET_LINK_AND_CLOCK_VALUES { + NvBool bLinkConnectedToSystem; + NvBool bLinkConnectedToPeer; + NvBool bLinkReset; + NvU8 subLinkWidth; + NvU32 linkState; + NvU32 txSublinkState; + NvU32 rxSublinkState; + NvBool bLaneReversal; + NvU32 nvlinkLinkClockKHz; + NvU32 nvlinkLineRateMbps; + NvU32 nvlinkLinkClockMhz; + NvU32 nvlinkLinkDataRateKiBps; + NvU8 nvlinkRefClkType; + NvU32 nvlinkReqLinkClockMhz; + NvU32 nvlinkMinL1Threshold; + NvU32 nvlinkMaxL1Threshold; + NvU32 nvlinkL1ThresholdUnits; +} NV2080_CTRL_INTERNAL_NVLINK_GET_LINK_AND_CLOCK_VALUES; + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_GET_LINK_AND_CLOCK_INFO + * + * [In] linkMask + * Mask of enabled links to loop over + * [Out] nvlinkRefClkSpeedKHz + * Ref clock value n KHz + * [Out] linkInfo + * Per link information + */ +#define NV2080_CTRL_INTERNAL_NVLINK_GET_LINK_AND_CLOCK_INFO_PARAMS_MESSAGE_ID (0x93U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_GET_LINK_AND_CLOCK_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvU64 linkMask, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK links, 8); + NvU32 nvlinkRefClkSpeedKHz; + NvBool bSublinkStateInst; // whether instantaneous sublink state is needed + NV2080_CTRL_INTERNAL_NVLINK_GET_LINK_AND_CLOCK_VALUES linkInfo[NV2080_CTRL_INTERNAL_NVLINK_MAX_ARR_SIZE]; +} NV2080_CTRL_INTERNAL_NVLINK_GET_LINK_AND_CLOCK_INFO_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_GET_LINK_AND_CLOCK_INFO (0x20800a93U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_GET_LINK_AND_CLOCK_INFO_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_SETUP_NVLINK_SYSMEM + * + * Updates the HSHUB sysmem config resgister state to reflect sysmem NVLinks + * + * [In] sysmemLinkMask + * Mask of discovered sysmem NVLinks + */ +#define NV2080_CTRL_INTERNAL_NVLINK_SETUP_NVLINK_SYSMEM_PARAMS_MESSAGE_ID (0x94U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_SETUP_NVLINK_SYSMEM_PARAMS { + NvU32 sysmemLinkMask; +} NV2080_CTRL_INTERNAL_NVLINK_SETUP_NVLINK_SYSMEM_PARAMS; +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_SETUP_NVLINK_SYSMEM (0x20800a94U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_SETUP_NVLINK_SYSMEM_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_PROCESS_FORCED_CONFIGS + * + * Process NVLink forced configurations which includes setting of HSHUB and memory system + * + * [In] bLegacyForcedConfig + * Tracks whether the forced config is legacy forced config or chiplib config + * [Out] bOverrideComputePeerMode + * Whether compute peer mode was enabled + * [In] phase + * Only applicable when bLegacyForcedConfig is true + * Tracks the set of registers to program from the NVLink table + * [In] linkConnection + * Array of chiplib configurations + */ +#define NV2080_CTRL_INTERNAL_NVLINK_PROCESS_FORCED_CONFIGS_PARAMS_MESSAGE_ID (0x95U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_PROCESS_FORCED_CONFIGS_PARAMS { + NvBool bLegacyForcedConfig; + NvBool bOverrideComputePeerMode; + NvU32 phase; + NvU32 linkConnection[NV2080_CTRL_INTERNAL_NVLINK_MAX_ARR_SIZE]; +} NV2080_CTRL_INTERNAL_NVLINK_PROCESS_FORCED_CONFIGS_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_PROCESS_FORCED_CONFIGS (0x20800a95U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_PROCESS_FORCED_CONFIGS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_SYNC_NVLINK_SHUTDOWN_PROPS + * + * Sync the NVLink lane shutdown properties with GSP-RM + * + * [In] bLaneShutdownOnUnload + * Whether nvlink shutdown should be triggered on driver unload + */ +#define NV2080_CTRL_INTERNAL_NVLINK_SYNC_NVLINK_SHUTDOWN_PROPS_PARAMS_MESSAGE_ID (0x96U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_SYNC_NVLINK_SHUTDOWN_PROPS_PARAMS { + NvBool bLaneShutdownOnUnload; +} NV2080_CTRL_INTERNAL_NVLINK_SYNC_NVLINK_SHUTDOWN_PROPS_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_SYNC_NVLINK_SHUTDOWN_PROPS (0x20800a96U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_SYNC_NVLINK_SHUTDOWN_PROPS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_ENABLE_SYSMEM_NVLINK_ATS + * + * Enable ATS functionality related to NVLink sysmem if hardware support is available + * + * [In] notUsed + */ +#define NV2080_CTRL_INTERNAL_NVLINK_ENABLE_SYSMEM_NVLINK_ATS_PARAMS_MESSAGE_ID (0x97U) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_ENABLE_SYSMEM_NVLINK_ATS_PARAMS { + NvU32 notUsed; +} NV2080_CTRL_INTERNAL_NVLINK_ENABLE_SYSMEM_NVLINK_ATS_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_ENABLE_SYSMEM_NVLINK_ATS (0x20800a97U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_ENABLE_SYSMEM_NVLINK_ATS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK + * + * Get the mask of Nvlink links connected to system + * + * [Out] sysmemLinkMask + * Mask of Nvlink links connected to system + */ +#define NV2080_CTRL_INTERNAL_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK_PARAMS_MESSAGE_ID (0xABU) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK_PARAMS { + NvU32 sysmemLinkMask; +} NV2080_CTRL_INTERNAL_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK (0x20800aabU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_SEND_CMC_LIBOS_BUFFER_INFO + * + * Send CMC gsplite the address and size of log buffer allocated on sysmem + * + * [In] PublicId + * ID of the gsplite engine + * [In] logBufferSize + * Size of the log buffer allocated on sysmem + * [In] logBufferAddr + * Start address of the log buffer + */ +#define NV2080_CTRL_INTERNAL_SEND_CMC_LIBOS_BUFFER_INFO_PARAMS_MESSAGE_ID (0x89U) + +typedef struct NV2080_CTRL_INTERNAL_SEND_CMC_LIBOS_BUFFER_INFO_PARAMS { + NvU32 PublicId; + NV_DECLARE_ALIGNED(NvU64 logBufferSize, 8); + NV_DECLARE_ALIGNED(NvU64 logBufferAddr, 8); +} NV2080_CTRL_INTERNAL_SEND_CMC_LIBOS_BUFFER_INFO_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_SEND_CMC_LIBOS_BUFFER_INFO (0x20800a89U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_SEND_CMC_LIBOS_BUFFER_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_HFRP_INSTANCE_SIZE 5 + +/* + * NV2080_CTRL_CMD_INTERNAL_GPU_GET_HFRP_INFO + * + * This command retrives HFRP info from physical RM + * + * [Out] hfrpPrivBase + * HFRP device PRIV base + * [Out] hfrpIntrCtrlReg + * HFRP intr control base + * + */ +#define NV2080_CTRL_INTERNAL_GPU_GET_HFRP_INFO_PARAMS_MESSAGE_ID (0x7AU) + +typedef struct NV2080_CTRL_INTERNAL_GPU_GET_HFRP_INFO_PARAMS { + NvU32 hfrpPrivBase[NV2080_CTRL_INTERNAL_HFRP_INSTANCE_SIZE]; + NvU32 hfrpIntrCtrlReg[NV2080_CTRL_INTERNAL_HFRP_INSTANCE_SIZE]; +} NV2080_CTRL_INTERNAL_GPU_GET_HFRP_INFO_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_GPU_GET_HFRP_INFO (0x20800a7aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GPU_GET_HFRP_INFO_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_SEND_CMC_UMD_API_OP_PARAMS + * + * Send UMD API operations to CMC + * + * [in] opType + * UMD Task construct/destroy operation to perform on CMC + * [in] handle + * Handle of CmcUmdApi object for CMC to identify + * [in] ringBufferSize + * Size of ring buffer in bytes + * [in] ringBufferOffset + * Offset of ring buffer + * [in] userdPa + * PA of USERD memory + * [in] userdVa + * VA of USERD memory + * [in] instBlkAddr + * Address of instance block holding VASpace PDB info + * [in] instBlkAperture + * Aperture of instance block + * [out] workSubmitToken + * WorkSubmitToken generated by CMC for UMD_API object + * + */ +#define NV2080_CTRL_INTERNAL_SEND_CMC_UMD_API_OP_PARAMS_MESSAGE_ID (0x7CU) + +typedef struct NV2080_CTRL_INTERNAL_SEND_CMC_UMD_API_OP_PARAMS { + NvU32 opType; + NvU32 ringBufferSize; + NV_DECLARE_ALIGNED(NvU64 ringBufferOffset, 8); + NV_DECLARE_ALIGNED(NvU64 userdPa, 8); + NV_DECLARE_ALIGNED(NvU64 userdVa, 8); + NV_DECLARE_ALIGNED(NvU64 instBlkAddr, 8); + NvU8 instBlkAperture; + NvU8 userdAperture; + NvU32 workSubmitToken; + NvU32 cmcHandle; +} NV2080_CTRL_INTERNAL_SEND_CMC_UMD_API_OP_PARAMS; + +#define NV2080_INTERNAL_CMC_UMD_API_TASK_CONSTRUCT 0x0U +#define NV2080_INTERNAL_CMC_UMD_API_TASK_DESTROY 0x1U + +#define NV2080_CTRL_CMD_INTERNAL_SEND_CMC_UMD_API_OP (0x20800a7cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_SEND_CMC_UMD_API_OP_PARAMS_MESSAGE_ID" */ + + +/* +* NV2080_CTRL_CMD_INTERNAL_NVLINK_REPLAY_SUPPRESSED_ERRORS +* +* Request from CPU-RM to proccess supressed errors during boot on GSP +* This command accepts no parameters. +*/ +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_REPLAY_SUPPRESSED_ERRORS (0x20800b01U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_2_INTERFACE_ID << 8) | 0x1" */ + + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_SM_ISSUE_RATE_MODIFIER_V2 + */ + + + + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_SM_ISSUE_RATE_MODIFIER_V2 { + NvU32 smIssueRateModifierListSize; + NV2080_CTRL_GR_SM_ISSUE_RATE_MODIFIER_V2 smIssueRateModifierList[NV2080_CTRL_GR_SM_ISSUE_RATE_MODIFIER_V2_MAX_LIST_SIZE]; +} NV2080_CTRL_INTERNAL_STATIC_GR_SM_ISSUE_RATE_MODIFIER_V2; + +#define NV2080_CTRL_INTERNAL_STATIC_GR_GET_SM_ISSUE_RATE_MODIFIER_V2_PARAMS_MESSAGE_ID (0x02U) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_SM_ISSUE_RATE_MODIFIER_V2_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_SM_ISSUE_RATE_MODIFIER_V2 smIssueRateModifierV2[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_SM_ISSUE_RATE_MODIFIER_V2_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_SM_ISSUE_RATE_MODIFIER_V2 (0x20800b03) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_2_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_SM_ISSUE_RATE_MODIFIER_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_STATIC_KGR_GET_SM_ISSUE_RATE_MODIFIER_V2_PARAMS_MESSAGE_ID (0x03U) + +typedef NV2080_CTRL_INTERNAL_STATIC_GR_GET_SM_ISSUE_RATE_MODIFIER_V2_PARAMS NV2080_CTRL_INTERNAL_STATIC_KGR_GET_SM_ISSUE_RATE_MODIFIER_V2_PARAMS; + +/* ctrl2080internal_h */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h new file mode 100644 index 0000000..707a256 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h @@ -0,0 +1,100 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080lpwr.finn +// + + + +/*! + * NV2080_CTRL_CMD_LPWR_DIFR_CTRL + * + * This command is used to control the DIFR + * feature behavior. + * + */ +#define NV2080_CTRL_CMD_LPWR_DIFR_CTRL (0x20802801) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_LPWR_INTERFACE_ID << 8) | NV2080_CTRL_CMD_LPWR_DIFR_CTRL_PARAMS_MESSAGE_ID" */ + +/*! + * @brief Various Values for control + */ +// Disable the DIFR +#define NV2080_CTRL_LPWR_DIFR_CTRL_DISABLE (0x00000001) +// Enable the DIFR +#define NV2080_CTRL_LPWR_DIFR_CTRL_ENABLE (0x00000002) + +// Support status for DIFR +#define NV2080_CTRL_LPWR_DIFR_CTRL_SUPPORT_STATUS (0x00000003) + +/*! + * Structure containing DIFR control call Parameters + */ +#define NV2080_CTRL_CMD_LPWR_DIFR_CTRL_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_CMD_LPWR_DIFR_CTRL_PARAMS { + NvU32 ctrlParamVal; +} NV2080_CTRL_CMD_LPWR_DIFR_CTRL_PARAMS; + +// Values for the SUPPORT Control Status +#define NV2080_CTRL_LPWR_DIFR_SUPPORTED (0x00000001) +#define NV2080_CTRL_LPWR_DIFR_NOT_SUPPORTED (0x00000002) + +/*! + * NV2080_CTRL_CMD_LPWR_DIFR_PREFETCH_RESPONSE + * + * This control call is used to send the prefetch response + * + */ +#define NV2080_CTRL_CMD_LPWR_DIFR_PREFETCH_RESPONSE (0x20802802) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_LPWR_INTERFACE_ID << 8) | NV2080_CTRL_CMD_LPWR_DIFR_PREFETCH_RESPONSE_PARAMS_MESSAGE_ID" */ + +/*! + * @brief Various Values of Reponses for Prefetch Status + */ + +// Prefetch is successfull. +#define NV2080_CTRL_LPWR_DIFR_PREFETCH_SUCCESS (0x00000001) +// OS Filps are enabled, so prefetch can not be done. +#define NV2080_CTRL_LPWR_DIFR_PREFETCH_FAIL_OS_FLIPS_ENABLED (0x00000002) +// Current Display surface can not fit in L2 +#define NV2080_CTRL_LPWR_DIFR_PREFETCH_FAIL_INSUFFICIENT_L2_SIZE (0x00000003) +// Fatal and un recoverable Error +#define NV2080_CTRL_LPWR_DIFR_PREFETCH_FAIL_CE_HW_ERROR (0x00000004) + +/*! + * Structure containing DIFR prefetch response control call Parameters + */ +#define NV2080_CTRL_CMD_LPWR_DIFR_PREFETCH_RESPONSE_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_CMD_LPWR_DIFR_PREFETCH_RESPONSE_PARAMS { + NvU32 responseVal; +} NV2080_CTRL_CMD_LPWR_DIFR_PREFETCH_RESPONSE_PARAMS; + + +// _ctrl2080lpwr_h_ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h new file mode 100644 index 0000000..106aff9 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h @@ -0,0 +1,341 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080mc.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" +#include "nvcfg_sdk.h" + +/* NV20_SUBDEVICE_XX mc control commands and parameters */ + +/** + * NV2080_CTRL_CMD_MC_GET_ARCH_INFO + * + * This command returns chip architecture information from the + * master control engine in the specified GPU. + * + * architecture + * This parameter specifies the architecture level for the GPU. + * implementation + * This parameter specifies the implementation of the architecture + * for the GPU. + * revision + * This parameter specifies the revision of the mask used to produce + * the GPU. + * subRevision + * This parameter specific the sub revision of the GPU. Value is one of + * NV2080_CTRL_MC_ARCH_INFO_SUBREVISION_* + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_MC_GET_ARCH_INFO (0x20801701) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID << 8) | NV2080_CTRL_MC_GET_ARCH_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MC_GET_ARCH_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_MC_GET_ARCH_INFO_PARAMS { + NvU32 architecture; + NvU32 implementation; + NvU32 revision; + NvU8 subRevision; +} NV2080_CTRL_MC_GET_ARCH_INFO_PARAMS; + +/* valid architecture values */ + +#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_T23X (0xE0000023) + + +#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_TU100 (0x00000160) +#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GA100 (0x00000170) +#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GH100 (0x00000180) +#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_AD100 (0x00000190) +#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GB100 (0x000001A0) + + + +/* valid ARCHITECTURE_T23X implementation values */ + + +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_T234 (0x00000004) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_T234D (0x00000005) + + +/* valid ARCHITECTURE_TU10x implementation values */ +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_TU100 (0x00000000) + + +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_TU102 (0x00000002) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_TU104 (0x00000004) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_TU106 (0x00000006) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_TU116 (0x00000008) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_TU117 (0x00000007) + + +/* valid ARCHITECTURE_GA10x implementation values */ +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA100 (0x00000000) + + +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA102 (0x00000002) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA103 (0x00000003) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA104 (0x00000004) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA106 (0x00000006) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA107 (0x00000007) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA10B (0x0000000B) + + +/* valid ARCHITECTURE_GH10x implementation values */ +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GH100 (0x00000000) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GH100_SOC (0x00000001) + +/* valid ARCHITECTURE_AD10x implementation values */ +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_AD100 (0x00000000) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_AD000 (0x00000001) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_AD101 (0x00000001) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_AD102 (0x00000002) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_AD103 (0x00000003) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_AD104 (0x00000004) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_AD106 (0x00000006) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_AD107 (0x00000007) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_AD10B (0x0000000B) +/* valid ARCHITECTURE_GB10x implementation values */ +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GB100 (0x00000000) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GB102 (0x00000002) + + + +/* Valid Chip sub revisions */ +#define NV2080_CTRL_MC_ARCH_INFO_SUBREVISION_NO_SUBREVISION (0x00000000) +#define NV2080_CTRL_MC_ARCH_INFO_SUBREVISION_P (0x00000001) +#define NV2080_CTRL_MC_ARCH_INFO_SUBREVISION_Q (0x00000002) +#define NV2080_CTRL_MC_ARCH_INFO_SUBREVISION_R (0x00000003) + +/* + * NV2080_CTRL_CMD_MC_SERVICE_INTERRUPTS + * + * This command instructs the RM to service interrupts for the specified + * engine(s). + * + * engines + * This parameter specifies which engines should have their interrupts + * serviced. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_MC_SERVICE_INTERRUPTS (0x20801702) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID << 8) | NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MC_ENGINE_ID_GRAPHICS 0x00000001 +#define NV2080_CTRL_MC_ENGINE_ID_ALL 0xFFFFFFFF + +#define NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS { + NvU32 engines; +} NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS; + + +/* + * NV2080_CTRL_CMD_MC_GET_MANUFACTURER + * + * This command returns the GPU manufacturer information for the associated + * subdevice. + * + * manufacturer + * This parameter returns the manufacturer value for the GPU. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_MC_GET_MANUFACTURER (0x20801703) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID << 8) | NV2080_CTRL_MC_GET_MANUFACTURER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MC_GET_MANUFACTURER_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_MC_GET_MANUFACTURER_PARAMS { + NvU32 manufacturer; +} NV2080_CTRL_MC_GET_MANUFACTURER_PARAMS; + + + +/* + * NV2080_CTRL_CMD_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP + * + * This call will setup RM to either service or ignore the + * repayable fault interrupt. + * This is a privileged call that can only be called by the UVM driver + * when it will take ownership of the repalayable fault interrupt. + * + * Possible status values returned are: + * NVOS_STATUS_SUCCESS + * NVOS_STATUS_ERROR_INVALID_ARGUMENT + * NVOS_STATUS_ERROR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP (0x2080170c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID << 8) | NV2080_CTRL_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV2080_CTRL_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP_PARAMS { + NvBool bOwnedByRm; +} NV2080_CTRL_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP_PARAMS; + +/* + * NV2080_CTRL_CMD_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS + * + * This command gets the notification interrupt vectors device for all VGPU engines from Host RM. + * + * Parameters: + * + * entries [out] + * A buffer to store up to MAX_ENGINES entries of type + * NV2080_CTRL_MC_ENGINE_NOTIFICATION_INTR_VECTOR_ENTRY. + * + * numEntries [out] + * Number of populated entries in the provided buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS (0x2080170d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID << 8) | NV2080_CTRL_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS_MAX_ENGINES 256 + +typedef struct NV2080_CTRL_MC_ENGINE_NOTIFICATION_INTR_VECTOR_ENTRY { + NvU32 nv2080EngineType; + NvU32 notificationIntrVector; +} NV2080_CTRL_MC_ENGINE_NOTIFICATION_INTR_VECTOR_ENTRY; + +#define NV2080_CTRL_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS_PARAMS_MESSAGE_ID (0xDU) + +typedef struct NV2080_CTRL_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS_PARAMS { + NvU32 numEntries; + NV2080_CTRL_MC_ENGINE_NOTIFICATION_INTR_VECTOR_ENTRY entries[NV2080_CTRL_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS_MAX_ENGINES]; +} NV2080_CTRL_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS_PARAMS; + +/* + * NV2080_CTRL_CMD_MC_GET_STATIC_INTR_TABLE + * + * This command gets the static interrupts needed by VGPU from Host RM. + * + * Parameters: + * + * entries [out] + * A buffer to store up to MAX_ENGINES entries of type + * NV2080_CTRL_MC_STATIC_INTR_ENTRY. + * + * numEntries [out] + * Number of populated entries in the provided buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_MC_GET_STATIC_INTR_TABLE (0x2080170e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID << 8) | NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_MAX 32 + +// Interface defines for static MC_ENGINE_IDX defines +#define NV2080_INTR_TYPE_NULL (0x00000000) +#define NV2080_INTR_TYPE_NON_REPLAYABLE_FAULT (0x00000001) +#define NV2080_INTR_TYPE_NON_REPLAYABLE_FAULT_ERROR (0x00000002) +#define NV2080_INTR_TYPE_INFO_FAULT (0x00000003) +#define NV2080_INTR_TYPE_REPLAYABLE_FAULT (0x00000004) +#define NV2080_INTR_TYPE_REPLAYABLE_FAULT_ERROR (0x00000005) +#define NV2080_INTR_TYPE_ACCESS_CNTR (0x00000006) +#define NV2080_INTR_TYPE_TMR (0x00000007) +#define NV2080_INTR_TYPE_CPU_DOORBELL (0x00000008) +#define NV2080_INTR_TYPE_GR0_FECS_LOG (0x00000009) +#define NV2080_INTR_TYPE_GR1_FECS_LOG (0x0000000A) +#define NV2080_INTR_TYPE_GR2_FECS_LOG (0x0000000B) +#define NV2080_INTR_TYPE_GR3_FECS_LOG (0x0000000C) +#define NV2080_INTR_TYPE_GR4_FECS_LOG (0x0000000D) +#define NV2080_INTR_TYPE_GR5_FECS_LOG (0x0000000E) +#define NV2080_INTR_TYPE_GR6_FECS_LOG (0x0000000F) +#define NV2080_INTR_TYPE_GR7_FECS_LOG (0x00000010) + +typedef struct NV2080_CTRL_MC_STATIC_INTR_ENTRY { + NvU32 nv2080IntrType; + NvU32 pmcIntrMask; + NvU32 intrVectorStall; + NvU32 intrVectorNonStall; +} NV2080_CTRL_MC_STATIC_INTR_ENTRY; + +#define NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_PARAMS { + NvU32 numEntries; + NV2080_CTRL_MC_STATIC_INTR_ENTRY entries[NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_MAX]; +} NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_PARAMS; + + +/*! + * Categories of interrupts. + * + * Each of these categories get a separate range of interrupt subtrees (top + * level bits) corresponding to a set of interrupt leaves. + * Interrupt leaves may overlap between two or more categories. + * Interrupt leaves may or may not be contiguous. + */ +typedef enum NV2080_INTR_CATEGORY { + NV2080_INTR_CATEGORY_DEFAULT = 0, + NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE = 1, + NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE_NOTIFICATION = 2, + NV2080_INTR_CATEGORY_RUNLIST = 3, + NV2080_INTR_CATEGORY_RUNLIST_NOTIFICATION = 4, + NV2080_INTR_CATEGORY_UVM_OWNED = 5, + NV2080_INTR_CATEGORY_UVM_SHARED = 6, + NV2080_INTR_CATEGORY_ENUM_COUNT = 7, +} NV2080_INTR_CATEGORY; + +#define NV2080_INTR_INVALID_SUBTREE NV_U8_MAX + +typedef struct NV2080_INTR_CATEGORY_SUBTREE_MAP { + // Maximum possible 64 subtrees, but 16 is enough for any existing silicon. + NV_DECLARE_ALIGNED(NvU64 subtreeMask, 8); +} NV2080_INTR_CATEGORY_SUBTREE_MAP; + +/* + * NV2080_CTRL_CMD_MC_GET_INTR_CATEGORY_SUBTREE_MAP + * + * This command gets a mapping from every interrupt category -> subtrees used from + * Host RM. + */ +#define NV2080_CTRL_CMD_MC_GET_INTR_CATEGORY_SUBTREE_MAP (0x2080170f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID << 8) | NV2080_CTRL_MC_GET_INTR_CATEGORY_SUBTREE_MAP_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MC_GET_INTR_CATEGORY_SUBTREE_MAP_PARAMS_MESSAGE_ID (0xFU) + +typedef struct NV2080_CTRL_MC_GET_INTR_CATEGORY_SUBTREE_MAP_PARAMS { + NV_DECLARE_ALIGNED(NV2080_INTR_CATEGORY_SUBTREE_MAP subtreeMap[NV2080_INTR_CATEGORY_ENUM_COUNT], 8); +} NV2080_CTRL_MC_GET_INTR_CATEGORY_SUBTREE_MAP_PARAMS; + +/* _ctrl2080mc_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h new file mode 100644 index 0000000..0954acf --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h @@ -0,0 +1,385 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080nvd.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +#include "ctrl/ctrlxxxx.h" +/* + * NV2080_CTRL_CMD_NVD_GET_DUMP_SIZE + * + * This command gets the expected dump size of a particular GPU dump component. + * Note that events that occur between this command and a later + * NV2080_CTRL_CMD_NVD_GET_DUMP command could alter the size of + * the buffer required. + * + * component + * One of NVDUMP_COMPONENT < 0x400 defined in nvdump.h to estimate + * the size of. + * size + * This parameter returns the expected size. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT if component is invalid. + * + */ + +#define NV2080_CTRL_CMD_NVD_GET_DUMP_SIZE (0x20802401) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVD_INTERFACE_ID << 8) | NV2080_CTRL_NVD_GET_DUMP_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVD_GET_DUMP_SIZE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_NVD_GET_DUMP_SIZE_PARAMS { + NvU32 component; + NvU32 size; +} NV2080_CTRL_NVD_GET_DUMP_SIZE_PARAMS; + +/* + * NV2080_CTRL_CMD_NVD_GET_DUMP + * + * This command gets a dump of a particular GPU dump component. If triggers + * is non-zero, the command waits for the trigger to occur + * before it returns. + * + * pBuffer + * This parameter points to the buffer for the data. + * component + * One of NVDUMP_COMPONENT < 0x400 defined in nvdump.h to select + * for dumping. + * size + * On entry, this parameter specifies the maximum length for + * the returned data. On exit, it specifies the number of bytes + * returned. + * + * Possible status values returned are: + * NV_OK + * NVOS_ERROR_INVALID_ARGUMENT if component is invalid. + * NVOS_ERROR_INVALID_ADDRESS if pBuffer is invalid + * NVOS_ERROR_INVALID_???? if the buffer was too small + * + * + */ +#define NV2080_CTRL_CMD_NVD_GET_DUMP (0x20802402) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVD_INTERFACE_ID << 8) | NV2080_CTRL_NVD_GET_DUMP_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVD_GET_DUMP_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_NVD_GET_DUMP_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pBuffer, 8); + NvU32 component; + NvU32 size; +} NV2080_CTRL_NVD_GET_DUMP_PARAMS; + +/* + * NV2080_CTRL_CMD_NVD_GET_NOCAT_JOURNAL + * + * This command returns the contents of the Journal used by NOCAT, and + * optionally clears the data + * + * clear: + * [IN] indicates if should the data be cleared after reporting + * + * JournalRecords : + * [OUT] an array of Journal records reported. + * + * outstandingAssertCount: + * [OUT] number of asserts that remain to be reported on. + * + * reportedAssertCount: + * [OUT] the number of asserts contained in the report + * + * asserts: + * [OUT] an array of up to NV2080_NOCAT_JOURNAL_MAX_ASSERT_RECORDS assert reports + */ + + +#define NV2080_CTRL_CMD_NVD_GET_NOCAT_JOURNAL (0x20802409) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVD_INTERFACE_ID << 8) | NV2080_CTRL_NVD_GET_NOCAT_JOURNAL_PARAMS_MESSAGE_ID" */ + +#define NV2080_NOCAT_JOURNAL_MAX_DIAG_BUFFER 1024 +#define NV2080_NOCAT_JOURNAL_MAX_STR_LEN 65 +#define NV2080_NOCAT_JOURNAL_MAX_JOURNAL_RECORDS 10 +#define NV2080_NOCAT_JOURNAL_MAX_ASSERT_RECORDS 32 + +// structure to hold clock details. +typedef struct NV2080_NOCAT_JOURNAL_OVERCLOCK_DETAILS { + NvS32 userMinOffset; + NvS32 userMaxOffset; + NvU32 factoryMinOffset; + NvU32 factoryMaxOffset; + NvU32 lastActiveClock; + NvU32 lastActiveVolt; + NvU32 lastActivePoint; + NvU32 kappa; +} NV2080_NOCAT_JOURNAL_OVERCLOCK_DETAILS; + + +// structure to hold clock configuration & state. +typedef struct NV2080_NOCAT_JOURNAL_OVERCLOCK_CFG { + NvU32 pstateVer; + NV2080_NOCAT_JOURNAL_OVERCLOCK_DETAILS gpcOverclock; + NV2080_NOCAT_JOURNAL_OVERCLOCK_DETAILS mclkOverclock; + NvBool bUserOverclocked; + NvBool bFactoryOverclocked; +} NV2080_NOCAT_JOURNAL_OVERCLOCK_CFG; + +// structure to hold the GPU context at the time of the report. +typedef struct NV2080_NOCAT_JOURNAL_GPU_STATE { + NvBool bValid; + NvU32 strap; + NvU16 deviceId; + NvU16 vendorId; + NvU16 subsystemVendor; + NvU16 subsystemId; + NvU16 revision; + NvU16 type; + NvU32 vbiosVersion; + NvBool bOptimus; + NvBool bMsHybrid; + NvBool bFullPower; + NvU32 vbiosOemVersion; + NvU16 memoryType; + NvU8 tag[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; + NvU8 vbiosProject[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; + NvBool bInFullchipReset; + NvBool bInSecBusReset; + NvBool bInGc6Reset; + NV2080_NOCAT_JOURNAL_OVERCLOCK_CFG overclockCfg; +} NV2080_NOCAT_JOURNAL_GPU_STATE; + +#define NV2080_NOCAT_JOURNAL_REC_TYPE_UNKNOWN 0 +#define NV2080_NOCAT_JOURNAL_REC_TYPE_BUGCHECK 1 +#define NV2080_NOCAT_JOURNAL_REC_TYPE_ENGINE 2 +#define NV2080_NOCAT_JOURNAL_REC_TYPE_TDR 3 +#define NV2080_NOCAT_JOURNAL_REC_TYPE_RC 4 +#define NV2080_NOCAT_JOURNAL_REC_TYPE_ASSERT 5 +#define NV2080_NOCAT_JOURNAL_REC_TYPE_ANY 6 + +// this should be relative to the highest type value +#define NV2080_NOCAT_JOURNAL_REC_TYPE_COUNT (0x7) /* finn: Evaluated from "NV2080_NOCAT_JOURNAL_REC_TYPE_ANY + 1" */ +typedef struct NV2080_NOCAT_JOURNAL_ENTRY { + NvU8 recType; + NvU32 bugcheck; + NvU32 tdrBucketId; + NvU8 source[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; + NvU32 subsystem; + NV_DECLARE_ALIGNED(NvU64 errorCode, 8); + NvU32 diagBufferLen; + NvU8 diagBuffer[NV2080_NOCAT_JOURNAL_MAX_DIAG_BUFFER]; + NvU8 faultingEngine[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; + NvU32 mmuFaultType; + NvU32 mmuErrorSrc; + NvU8 tdrReason[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; +} NV2080_NOCAT_JOURNAL_ENTRY; + +typedef struct NV2080_NOCAT_JOURNAL_RECORD { + NvU32 GPUTag; + NV_DECLARE_ALIGNED(NvU64 loadAddress, 8); + NV_DECLARE_ALIGNED(NvU64 timeStamp, 8); + NV_DECLARE_ALIGNED(NvU64 stateMask, 8); + NV2080_NOCAT_JOURNAL_GPU_STATE nocatGpuState; + NV_DECLARE_ALIGNED(NV2080_NOCAT_JOURNAL_ENTRY nocatJournalEntry, 8); +} NV2080_NOCAT_JOURNAL_RECORD; + +// NOCAT activity counter indexes +// collection activity +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COLLECT_REQ_IDX 0 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_GRANDFATHERED_RECORD_IDX 1 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_ALLOCATED_IDX 2 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COLLECTED_IDX 3 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_NOTIFICATIONS_IDX 4 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_NOTIFICATION_FAIL_IDX 5 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_ALLOC_FAILED_IDX 6 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COLLECT_FAILED_IDX 7 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COLLECT_LOCKED_OUT_IDX 8 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_CTRL_INSERT_RECORDS_IDX 9 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_RPC_INSERT_RECORDS_IDX 10 + +// Journal Lock activity +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_JOURNAL_LOCKED_IDX 11 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_JOURNAL_LOCK_UPDATED_IDX 12 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_JOURNAL_UNLOCKED_IDX 13 + +// lookup activity +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_NO_RECORDS_IDX 14 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_BAD_BUFFER_IDX 15 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_MATCH_FOUND_IDX 16 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_NO_MATCH_IDX 17 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_CLOSEST_FOUND_IDX 18 + +// reporting activity +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_REQUESTED_IDX 19 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_REPORTED_IDX 20 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_DROPPED_IDX 21 + +// update activity +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_UPDATE_REQ_IDX 22 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_UPDATED_IDX 23 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_UPDATE_FAILED_IDX 24 + +// general errors +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_BUSY_IDX 25 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_BAD_PARAM_IDX 26 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_BAD_TYPE_IDX 27 + +// reserved entries for temporary use. +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_RES4_IDX 28 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_RES3_IDX 29 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_RES2_IDX 30 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_RES1_IDX 31 + +// this should be relative to the highest counter index +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COUNTER_COUNT (0x20) /* finn: Evaluated from "NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_RES1_IDX + 1" */ + +#define NV2080_CTRL_NOCAT_GET_COUNTERS_ONLY 0:0 +#define NV2080_CTRL_NOCAT_GET_COUNTERS_ONLY_YES 1 +#define NV2080_CTRL_NOCAT_GET_COUNTERS_ONLY_NO 0 + +#define NV2080_CTRL_NOCAT_GET_RESET_COUNTERS 1:1 +#define NV2080_CTRL_NOCAT_GET_RESET_COUNTERS_YES 1 +#define NV2080_CTRL_NOCAT_GET_RESET_COUNTERS_NO 0 + + +#define NV2080_CTRL_NVD_GET_NOCAT_JOURNAL_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_NVD_GET_NOCAT_JOURNAL_PARAMS { + NvU32 flags; + NvU32 nocatRecordCount; + NvU32 nocatOutstandingRecordCount; + NV_DECLARE_ALIGNED(NV2080_NOCAT_JOURNAL_RECORD journalRecords[NV2080_NOCAT_JOURNAL_MAX_JOURNAL_RECORDS], 8); + NvU32 activityCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COUNTER_COUNT]; + NvU8 reserved[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; +} NV2080_CTRL_NVD_GET_NOCAT_JOURNAL_PARAMS; + + /* + * NV2080_CTRL_CMD_NVD_SET_NOCAT_JOURNAL_DATA + * + * This command reports the TDR data collected by KMD to be added to the + * nocat record + * + * dataType + * [IN] specifies the type of data provided. + * targetRecordType + * [IN] specifies record type the data is intended for. + * nocatJournalData + * [IN] specifies the data to be added. + */ + +#define NV2080_CTRL_CMD_NVD_SET_NOCAT_JOURNAL_DATA (0x2080240b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVD_INTERFACE_ID << 8) | NV2080_CTRL_NVD_SET_NOCAT_JOURNAL_DATA_PARAMS_MESSAGE_ID" */ + +// data types & structures +#define NV2080_CTRL_NOCAT_JOURNAL_DATA_TYPE_EMPTY 0 +#define NV2080_CTRL_NOCAT_JOURNAL_DATA_TYPE_TDR_REASON 1 +#define NV2080_CTRL_NOCAT_JOURNAL_DATA_TYPE_SET_TAG 2 +#define NV2080_CTRL_NOCAT_JOURNAL_DATA_TYPE_RCLOG 3 + +#define NV2080_CTRL_NOCAT_TDR_TYPE_NONE 0 +#define NV2080_CTRL_NOCAT_TDR_TYPE_LEGACY 1 +#define NV2080_CTRL_NOCAT_TDR_TYPE_FULLCHIP 2 +#define NV2080_CTRL_NOCAT_TDR_TYPE_BUSRESET 3 +#define NV2080_CTRL_NOCAT_TDR_TYPE_GC6_RESET 4 +#define NV2080_CTRL_NOCAT_TDR_TYPE_SURPRISE_REMOVAL 5 +#define NV2080_CTRL_NOCAT_TDR_TYPE_UCODE_RESET 6 +#define NV2080_CTRL_NOCAT_TDR_TYPE_GPU_RC_RESET 7 +#define NV2080_CTRL_NOCAT_TDR_TYPE_TEST 8 + +typedef struct NV2080CtrlNocatJournalDataTdrReason { + NvU32 flags; + NvU8 source[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; + NvU32 subsystem; + NV_DECLARE_ALIGNED(NvU64 errorCode, 8); + NvU32 reasonCode; +} NV2080CtrlNocatJournalDataTdrReason; + +#define NV2080_CTRL_NOCAT_TAG_CLEAR 0:0 +#define NV2080_CTRL_NOCAT_TAG_CLEAR_YES 1 +#define NV2080_CTRL_NOCAT_TAG_CLEAR_NO 0 +typedef struct NV2080CtrlNocatJournalSetTag { + NvU32 flags; + NvU8 tag[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; +} NV2080CtrlNocatJournalSetTag; + +typedef struct NV2080CtrlNocatJournalRclog { + NvU32 flags; + NvU32 rclogSize; // rclog size + NvU32 rmGpuId; // RMGpuId associated with the adapter + NvU32 APIType; // API Type (dx9, dx1x, ogl, etc.) + NvU32 contextType; // Context type (OGL, DX, etc.) + NvU32 exceptType; // ROBUST_CHANNEL_* error identifier + NvU8 processImageName[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; // process image name (without path) +} NV2080CtrlNocatJournalRclog; + +#define NV2080_CTRL_NVD_SET_NOCAT_JOURNAL_DATA_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV2080_CTRL_NVD_SET_NOCAT_JOURNAL_DATA_PARAMS { + NvU32 dataType; + NvU32 targetRecordType; + union { + NV_DECLARE_ALIGNED(NV2080CtrlNocatJournalDataTdrReason tdrReason, 8); + NV2080CtrlNocatJournalSetTag tagData; + NV2080CtrlNocatJournalRclog rclog; + } nocatJournalData; +} NV2080_CTRL_NVD_SET_NOCAT_JOURNAL_DATA_PARAMS; + /* + * NV2080_CTRL_CMD_NVD_INSERT_NOCAT_JOURNAL_RECORD + * + * This command Inserts a NOCAT Journal record from an outside component. + * + * nocatJournalData + * [IN] specifies the data to be added. + */ + +#define NV2080_CTRL_CMD_NVD_INSERT_NOCAT_JOURNAL_RECORD (0x2080240c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVD_INTERFACE_ID << 8) | NV2080_CTRL_CMD_NVD_INSERT_NOCAT_JOURNAL_RECORD_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NOCAT_INSERT_ALLOW_NULL_STR 0:0 +#define NV2080_CTRL_NOCAT_INSERT_ALLOW_NULL_STR_YES 1 +#define NV2080_CTRL_NOCAT_INSERT_ALLOW_NULL_STR_NO 0 +#define NV2080_CTRL_NOCAT_INSERT_ALLOW_0_LEN_BUFFER 1:1 +#define NV2080_CTRL_NOCAT_INSERT_ALLOW_0_LEN_BUFFER_YES 1 +#define NV2080_CTRL_NOCAT_INSERT_ALLOW_0_LEN_BUFFER_NO 0 + +typedef struct NV2080CtrlNocatJournalInsertRecord { + NvU32 flags; + NV_DECLARE_ALIGNED(NvU64 timestamp, 8); + NvU8 recType; + NvU32 bugcheck; + char source[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; + NvU32 subsystem; + NV_DECLARE_ALIGNED(NvU64 errorCode, 8); + char faultingEngine[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; + NvU32 tdrReason; + NvU32 diagBufferLen; + NvU8 diagBuffer[NV2080_NOCAT_JOURNAL_MAX_DIAG_BUFFER]; +} NV2080CtrlNocatJournalInsertRecord; +#define NV2080_CTRL_CMD_NVD_INSERT_NOCAT_JOURNAL_RECORD_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV2080_CTRL_CMD_NVD_INSERT_NOCAT_JOURNAL_RECORD_PARAMS { + NV_DECLARE_ALIGNED(NV2080CtrlNocatJournalInsertRecord nocatJournalRecord, 8); +} NV2080_CTRL_CMD_NVD_INSERT_NOCAT_JOURNAL_RECORD_PARAMS; +/* _ctr2080nvd_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h new file mode 100644 index 0000000..36b365b --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h @@ -0,0 +1,3820 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080nvlink.finn +// + +#include "nvfixedtypes.h" +#include "ctrl/ctrl2080/ctrl2080base.h" +#include "ctrl/ctrl2080/ctrl2080nvlink_common.h" +#include "nvcfg_sdk.h" + +/* NV20_SUBDEVICE_XX bus control commands and parameters */ + +/* + * NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS + * + * This command returns the NVLink capabilities supported by the subdevice. + * + * capsTbl + * This is bit field for getting different global caps. The individual bitfields are specified by NV2080_CTRL_NVLINK_CAPS_* + * lowestNvlinkVersion + * This field specifies the lowest supported NVLink version for this subdevice. + * highestNvlinkVersion + * This field specifies the highest supported NVLink version for this subdevice. + * lowestNciVersion + * This field specifies the lowest supported NCI version for this subdevice. + * highestNciVersion + * This field specifies the highest supported NCI version for this subdevice. + * discoveredLinkMask // deprecated use discovered links + * This field provides a bitfield mask of NVLink links discovered on this subdevice. + * enabledLinkMask // deprecated use enabled Links + * This field provides a bitfield mask of NVLink links enabled on this subdevice. + * discoveredLinks + * This field provides a bitfield mask of NVLink links discovered on this subdevice. + * enabledLinks + * This field provides a bitfield mask of NVLink links enabled on this subdevice. + * + */ +#define NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS_PARAMS { + NvU32 capsTbl; + + NvU8 lowestNvlinkVersion; + NvU8 highestNvlinkVersion; + NvU8 lowestNciVersion; + NvU8 highestNciVersion; + + NvU32 discoveredLinkMask; + NvU32 enabledLinkMask; + + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK discoveredLinks, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK enabledLinks, 8); +} NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS_PARAMS; + +/* extract cap bit setting from tbl */ +#define NV2080_CTRL_NVLINK_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* + * NV2080_CTRL_NVLINK_CAPS + * + * SUPPORTED + * Set if NVLink is present and supported on this subdevice, NV_FALSE otherwise. This field is used for *global* caps only and NOT for per-link caps + * P2P_SUPPORTED + * Set if P2P over NVLink is supported on this subdevice, NV_FALSE otherwise. + * SYSMEM_ACCESS + * Set if sysmem can be accessed over NVLink on this subdevice, NV_FALSE otherwise. + * PEER_ATOMICS + * Set if P2P atomics are supported over NVLink on this subdevice, NV_FALSE otherwise. + * SYSMEM_ATOMICS + * Set if sysmem atomic transcations are supported over NVLink on this subdevice, NV_FALSE otherwise. + * PEX_TUNNELING + * Set if PEX tunneling over NVLink is supported on this subdevice, NV_FALSE otherwise. + * SLI_BRIDGE + * GLOBAL: Set if SLI over NVLink is supported on this subdevice, NV_FALSE otherwise. + * LINK: Set if SLI over NVLink is supported on a link, NV_FALSE otherwise. + * SLI_BRIDGE_SENSABLE + * GLOBAL: Set if the subdevice is capable of sensing SLI bridges, NV_FALSE otherwise. + * LINK: Set if the link is capable of sensing an SLI bridge, NV_FALSE otherwise. + * POWER_STATE_L0 + * Set if L0 is a supported power state on this subdevice/link, NV_FALSE otherwise. + * POWER_STATE_L1 + * Set if L1 is a supported power state on this subdevice/link, NV_FALSE otherwise. + * POWER_STATE_L2 + * Set if L2 is a supported power state on this subdevice/link, NV_FALSE otherwise. + * POWER_STATE_L3 + * Set if L3 is a supported power state on this subdevice/link, NV_FALSE otherwise. + * VALID + * Set if this link is supported on this subdevice, NV_FALSE otherwise. This field is used for *per-link* caps only and NOT for global caps. + * UNCONTAINED_ERROR_RECOVERY + * Set if this GPU supports resetless recovery from uncontained packet errors. + * + */ + +/* caps format is byte_index:bit_mask */ +#define NV2080_CTRL_NVLINK_CAPS_SUPPORTED 0:0x01 +#define NV2080_CTRL_NVLINK_CAPS_P2P_SUPPORTED 0:0x02 +#define NV2080_CTRL_NVLINK_CAPS_SYSMEM_ACCESS 0:0x04 +#define NV2080_CTRL_NVLINK_CAPS_P2P_ATOMICS 0:0x08 +#define NV2080_CTRL_NVLINK_CAPS_SYSMEM_ATOMICS 0:0x10 +#define NV2080_CTRL_NVLINK_CAPS_PEX_TUNNELING 0:0x20 +#define NV2080_CTRL_NVLINK_CAPS_SLI_BRIDGE 0:0x40 +#define NV2080_CTRL_NVLINK_CAPS_SLI_BRIDGE_SENSABLE 0:0x80 +#define NV2080_CTRL_NVLINK_CAPS_POWER_STATE_L0 1:0x01 +#define NV2080_CTRL_NVLINK_CAPS_POWER_STATE_L1 1:0x02 +#define NV2080_CTRL_NVLINK_CAPS_POWER_STATE_L2 1:0x04 +#define NV2080_CTRL_NVLINK_CAPS_POWER_STATE_L3 1:0x08 +#define NV2080_CTRL_NVLINK_CAPS_VALID 1:0x10 +#define NV2080_CTRL_NVLINK_CAPS_UNCONTAINED_ERROR_RECOVERY 1:0x20 + +/* + * Size in bytes of nvlink caps table. This value should be one greater + * than the largest byte_index value above. + */ +#define NV2080_CTRL_NVLINK_CAPS_TBL_SIZE 2U + +#define NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_INVALID (0x00000000U) +#define NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_1_0 (0x00000001U) +#define NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_2_0 (0x00000002U) +#define NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_2_2 (0x00000004U) +#define NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_3_0 (0x00000005U) +#define NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_3_1 (0x00000006U) +#define NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_4_0 (0x00000007U) +#define NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_5_0 (0x00000008U) +#define NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_INVALID (0x00000000U) +#define NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_1_0 (0x00000001U) +#define NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_2_0 (0x00000002U) +#define NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_2_2 (0x00000004U) +#define NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_3_0 (0x00000005U) +#define NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_3_1 (0x00000006U) +#define NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_4_0 (0x00000007U) +#define NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_5_0 (0x00000008U) +/* + * NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS (0x20803001U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_NVLINK_DEVICE_INFO + * + * This structure stores information about the device to which this link is associated + * + * deviceIdFlags + * Bitmask that specifies which IDs are valid for the device + * Refer NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_ID_FLAGS_* for possible values + * If NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_ID_FLAGS_PCI is set, PCI information is valid + * If NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_ID_FLAGS_UUID is set, UUID is valid + * domain, bus, device, function, pciDeviceId + * PCI information for the device + * deviceType + * Type of the device + * See NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_* for possible values + * deviceUUID + * This field specifies the device UUID of the device. Useful for identifying the device (or version) + * fabricRecoveryStatusMask + * This field contains flags which advertise the current GPU-centric health status of the NVLINKs + */ +typedef struct NV2080_CTRL_NVLINK_DEVICE_INFO { + // ID Flags + NvU32 deviceIdFlags; + + // PCI Information + NvU32 domain; + NvU16 bus; + NvU16 device; + NvU16 function; + NvU32 pciDeviceId; + + // Device Type + NV_DECLARE_ALIGNED(NvU64 deviceType, 8); + + // Device UUID + NvU8 deviceUUID[16]; + + // GPU-centric fabric health + NvU32 fabricRecoveryStatusMask; +} NV2080_CTRL_NVLINK_DEVICE_INFO; + +#define NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_ID_FLAGS 31:0 +#define NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_ID_FLAGS_NONE (0x00000000U) +#define NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_ID_FLAGS_PCI (0x00000001U) +#define NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_ID_FLAGS_UUID (0x00000002U) + +#define NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_EBRIDGE (0x00000000U) +#define NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_NPU (0x00000001U) +#define NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_GPU (0x00000002U) +#define NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_SWITCH (0x00000003U) +#define NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_TEGRA (0x00000004U) +#define NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_NONE (0x000000FFU) + +#define NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_UUID_INVALID (0xFFFFFFFFU) + +#define NV2080_CTRL_NVLINK_DEVICE_INFO_FABRIC_RECOVERY_STATUS_MASK_GPU_DEGRADED 0:0 +#define NV2080_CTRL_NVLINK_DEVICE_INFO_FABRIC_RECOVERY_STATUS_MASK_GPU_DEGRADED_FALSE (0x00000000U) +#define NV2080_CTRL_NVLINK_DEVICE_INFO_FABRIC_RECOVERY_STATUS_MASK_GPU_DEGRADED_TRUE (0x00000001U) +#define NV2080_CTRL_NVLINK_DEVICE_INFO_FABRIC_RECOVERY_STATUS_MASK_UNCONTAINED_ERROR_RECOVERY 1:1 +#define NV2080_CTRL_NVLINK_DEVICE_INFO_FABRIC_RECOVERY_STATUS_MASK_UNCONTAINED_ERROR_RECOVERY_INACTIVE (0x00000000U) +#define NV2080_CTRL_NVLINK_DEVICE_INFO_FABRIC_RECOVERY_STATUS_MASK_UNCONTAINED_ERROR_RECOVERY_ACTIVE (0x00000001U) + +/* + * NV2080_CTRL_NVLINK_NVLINK_LINK_STATUS_INFO + * + * This structure stores the per-link status of different NVLink parameters. + * + * capsTbl + * This is bit field for getting different global caps. The individual bitfields + * are specified by NV2080_CTRL_NVLINK_CAPS_* + * phyType + * This field specifies the type of PHY (NVHS or GRS) being used for this link. + * subLinkWidth + * This field specifies the no. of lanes per sublink. + * linkState + * This field specifies the current state of the link. + * See NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_LINK_STATE_* for possible values. + * rxSublinkStatus + * This field specifies the current state of RX sublink. + * See NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_SUBLINK_RX_STATE_* for possible values. + * txSublinkStatus + * This field specifies the current state of TX sublink. + * See NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_SUBLINK_TX_STATE_* for possible values. + * bLaneReversal + * This field indicates that lane reversal is in effect on this link. + * nvlinkVersion + * This field specifies the NVLink version supported by the link. + * nciVersion + * This field specifies the NCI version supported by the link. + * phyVersion + * This field specifies the version of PHY being used by the link. + * nvlinkLineRateMbps + * Bit rate at which bits toggle on wires in megabits per second. + * NOTE: This value is the full speed line rate, not the instantaneous line rate of the link. + * nvlinkLinkClockMhz + * Clock corresponding to link logic in mega hertz + * nvlinkRefClkType + * This field specifies whether the link clock is taken from NVHS reflck + * or PEX refclk for the current GPU. + * nvlinkLinkDataRateKiBps + * Effective rate available for transactions after subtracting overhead, + * as seen at Data Layer in kibibytes (1024 bytes) per second. + * Only valid in GA100+, reported as 0 otherwise + * NOTE: Because minion calculates these values, it will only be valid if + * links are in ACTIVE state + * nvlinkRefClkSpeedMhz + * The input reference frequency to the PLL + * connected + * This field specifies if any device is connected on the other end of the link + * loopProperty + * This field specifies if the link is a loopback/loopout link. See NV2080_CTRL_NVLINK_STATUS_LOOP_PROPERTY_* for possible values. + * remoteDeviceLinkNumber + * This field specifies the link number on the remote end of the link + * remoteDeviceInfo + * This field stores the device information for the remote end of the link + * nvlinkMinL1Threshold + * This field stores the Min L1 Thresohld of the link + * nvlinkMaxL1Threshold + * This field stores the Max L1 Threshold of the link + * nvlinkL1ThresholdUnits + * This field stores the L1 Threshold Units of the link + * + */ +typedef struct NV2080_CTRL_NVLINK_LINK_STATUS_INFO { + // Top level capablilites + NvU32 capsTbl; + + NvU8 phyType; + NvU8 subLinkWidth; + + // Link and sublink states + NvU32 linkState; + NvU8 rxSublinkStatus; + NvU8 txSublinkStatus; + + // Indicates that lane reversal is in effect on this link. + NvBool bLaneReversal; + + NvU8 nvlinkVersion; + NvU8 nciVersion; + NvU8 phyVersion; + + // Legacy clock information (to be deprecated) + NvU32 nvlinkLinkClockKHz; + NvU32 nvlinkCommonClockSpeedKHz; + NvU32 nvlinkRefClkSpeedKHz; + + NvU32 nvlinkCommonClockSpeedMhz; + + // Clock Speed and Data Rate Reporting + NvU32 nvlinkLineRateMbps; + NvU32 nvlinkLinkClockMhz; + NvU8 nvlinkRefClkType; + NvU32 nvlinkLinkDataRateKiBps; + NvU32 nvlinkRefClkSpeedMhz; + + // Connection information + NvBool connected; + NvU8 loopProperty; + NvU8 remoteDeviceLinkNumber; + NvU8 localDeviceLinkNumber; + + // + // Added as part of NvLink 3.0 + // Note: SID has link info appended to it when provided by minion + // + NV_DECLARE_ALIGNED(NvU64 remoteLinkSid, 8); + NV_DECLARE_ALIGNED(NvU64 localLinkSid, 8); + + // Ampere+ only + NvU32 laneRxdetStatusMask; + + // L1 Threshold Info + NvU32 nvlinkMinL1Threshold; + NvU32 nvlinkMaxL1Threshold; + NvU32 nvlinkL1ThresholdUnits; + + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_DEVICE_INFO remoteDeviceInfo, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_DEVICE_INFO localDeviceInfo, 8); +} NV2080_CTRL_NVLINK_LINK_STATUS_INFO; + +// NVLink link states +#define NV2080_CTRL_NVLINK_STATUS_LINK_STATE_INIT (0x00000000U) +#define NV2080_CTRL_NVLINK_STATUS_LINK_STATE_HWCFG (0x00000001U) +#define NV2080_CTRL_NVLINK_STATUS_LINK_STATE_SWCFG (0x00000002U) +#define NV2080_CTRL_NVLINK_STATUS_LINK_STATE_ACTIVE (0x00000003U) +#define NV2080_CTRL_NVLINK_STATUS_LINK_STATE_FAULT (0x00000004U) +#define NV2080_CTRL_NVLINK_STATUS_LINK_STATE_SLEEP (0x00000005U) +#define NV2080_CTRL_NVLINK_STATUS_LINK_STATE_RECOVERY (0x00000006U) +#define NV2080_CTRL_NVLINK_STATUS_LINK_STATE_RECOVERY_AC (0x00000008U) +#define NV2080_CTRL_NVLINK_STATUS_LINK_STATE_RECOVERY_RX (0x0000000aU) +#define NV2080_CTRL_NVLINK_STATUS_LINK_STATE_INVALID (0xFFFFFFFFU) + +// NVLink Rx sublink states +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_RX_STATE_HIGH_SPEED_1 (0x00000000U) +// TODO: @achaudhry remove SINGLE_LANE define once references switch to LOW_POWER +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_RX_STATE_SINGLE_LANE (0x00000004) // Deprecated +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_RX_STATE_LOW_POWER (0x00000004) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_RX_STATE_TRAINING (0x00000005U) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_RX_STATE_SAFE_MODE (0x00000006U) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_RX_STATE_OFF (0x00000007U) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_RX_STATE_TEST (0x00000008U) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_RX_STATE_FAULT (0x0000000eU) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_RX_STATE_INVALID (0x000000FFU) + +// NVLink Tx sublink states +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_TX_STATE_HIGH_SPEED_1 (0x00000000U) +// TODO: @achaudhry remove SINGLE_LANE define once references switch to LOW_POWER +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_TX_STATE_SINGLE_LANE (0x00000004) // Deprecated +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_TX_STATE_LOW_POWER (0x00000004) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_TX_STATE_TRAINING (0x00000005U) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_TX_STATE_SAFE_MODE (0x00000006U) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_TX_STATE_OFF (0x00000007U) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_TX_STATE_TEST (0x00000008U) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_TX_STATE_FAULT (0x0000000eU) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_TX_STATE_INVALID (0x000000FFU) + +#define NV2080_CTRL_NVLINK_STATUS_PHY_NVHS (0x00000001U) +#define NV2080_CTRL_NVLINK_STATUS_PHY_GRS (0x00000002U) +#define NV2080_CTRL_NVLINK_STATUS_PHY_INVALID (0x000000FFU) + +// Version information +#define NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_1_0 (0x00000001U) +#define NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_2_0 (0x00000002U) +#define NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_2_2 (0x00000004U) +#define NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_3_0 (0x00000005U) +#define NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_3_1 (0x00000006U) +#define NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_4_0 (0x00000007U) +#define NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_5_0 (0x00000008U) +#define NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_INVALID (0x000000FFU) + +#define NV2080_CTRL_NVLINK_STATUS_NCI_VERSION_1_0 (0x00000001U) +#define NV2080_CTRL_NVLINK_STATUS_NCI_VERSION_2_0 (0x00000002U) +#define NV2080_CTRL_NVLINK_STATUS_NCI_VERSION_2_2 (0x00000004U) +#define NV2080_CTRL_NVLINK_STATUS_NCI_VERSION_3_0 (0x00000005U) +#define NV2080_CTRL_NVLINK_STATUS_NCI_VERSION_3_1 (0x00000006U) +#define NV2080_CTRL_NVLINK_STATUS_NCI_VERSION_4_0 (0x00000007U) +#define NV2080_CTRL_NVLINK_STATUS_NCI_VERSION_5_0 (0x00000008U) +#define NV2080_CTRL_NVLINK_STATUS_NCI_VERSION_INVALID (0x000000FFU) + +#define NV2080_CTRL_NVLINK_STATUS_NVHS_VERSION_1_0 (0x00000001U) +#define NV2080_CTRL_NVLINK_STATUS_NVHS_VERSION_INVALID (0x000000FFU) + +#define NV2080_CTRL_NVLINK_STATUS_GRS_VERSION_1_0 (0x00000001U) +#define NV2080_CTRL_NVLINK_STATUS_GRS_VERSION_INVALID (0x000000FFU) + +// Connection properties +#define NV2080_CTRL_NVLINK_STATUS_CONNECTED_TRUE (0x00000001U) +#define NV2080_CTRL_NVLINK_STATUS_CONNECTED_FALSE (0x00000000U) + +#define NV2080_CTRL_NVLINK_STATUS_LOOP_PROPERTY_LOOPBACK (0x00000001U) +#define NV2080_CTRL_NVLINK_STATUS_LOOP_PROPERTY_LOOPOUT (0x00000002U) +#define NV2080_CTRL_NVLINK_STATUS_LOOP_PROPERTY_NONE (0x00000000U) + +#define NV2080_CTRL_NVLINK_STATUS_REMOTE_LINK_NUMBER_INVALID (0x000000FFU) + +// L1 Threshold Units +typedef enum NV2080_CTRL_NVLINK_STATUS_L1_THRESHOLD_UNIT { + NV2080_CTRL_NVLINK_STATUS_L1_THRESHOLD_UNIT_100US = 0, + NV2080_CTRL_NVLINK_STATUS_L1_THRESHOLD_UNIT_50US = 1, +} NV2080_CTRL_NVLINK_STATUS_L1_THRESHOLD_UNIT; + +// NVLink REFCLK types +#define NV2080_CTRL_NVLINK_REFCLK_TYPE_INVALID (0x00U) +#define NV2080_CTRL_NVLINK_REFCLK_TYPE_NVHS (0x01U) +#define NV2080_CTRL_NVLINK_REFCLK_TYPE_PEX (0x02U) + +#define NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS { + NvU32 enabledLinkMask; // (This field will be deprecated in the future, use enabledLinks) + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK enabledLinks, 8); + NvBool bSublinkStateInst; // whether instantaneous sublink state is needed + NvBool bNvleModeEnabled; // whether Nvlink Encryption is enabled or not + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_STATUS_INFO linkInfo[NV2080_CTRL_NVLINK_MAX_ARR_SIZE], 8); +} NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS + * + * enabledLinkMask + * This field specifies the mask of available links on this subdevice. + * bNvleModeEnabled + * This field indicates if Nvlink Encryption is enabled or not. + * linkInfo + * This structure stores the per-link status of different NVLink parameters. The link is identified using an index. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS (0x20803002U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_NVLINK_ERR_INFO + * Error information per link + * + * TLErrlog + * Returns the error mask for NVLINK TL errors + * Used in Pascal + * + * TLIntrEn + * Returns the intr enable mask for NVLINK TL errors + * Used in Pascal + * + * TLCTxErrStatus0 + * Returns the TLC Tx Error Mask 0 + * Used in Volta and later + * + * TLCTxErrStatus1 + * Returns the TLC Tx Error Mask 1 + * Used in Ampere and later + * + * TLCTxSysErrStatus0 + * Returns the TLC Tx Sys Error Mask 0 + * Used in Ampere and later. + * + * TLCRxErrStatus0 + * Returns the TLC Rx Error Mask 0 + * Used in Volta and later + * + * TLCRxErrStatus1 + * Returns the TLC Rx Error Mask 1 + * Used in Volta and later + * + * TLCRxSysErrStatus0 + * Returns the TLC Rx Sys Error Mask 0 + * Used in Ampere and later. + * + * TLCTxErrLogEn0 + * Returns the TLC Tx Error Log En 0 + * Used in Volta and later + * + * TLCTxErrLogEn1 + * Returns the TLC Tx Error Log En 1 + * Used in Ampere and later + * + * TLCTxSysErrLogEn0 + * Returns the TLC Tx Sys Error Log En 0 + * Used in Ampere and later + * + * TLCRxErrLogEn0 + * Returns the TLC Rx Error Log En 0 + * Used in Volta and later + * + * TLCRxErrLogEn1 + * Returns the TLC Rx Error Log En 1 + * Used in Volta and later + * + * TLCRxSysErrLogEn0 + * Returns the TLC Rx Sys Error Log En 0 + * Used in Ampere and later + * + * MIFTxErrStatus0 + * Returns the MIF Rx Error Mask 0 + * Used in Volta and Turing + * + * MIFRxErrStatus0 + * Returns the MIF Tx Error Mask 0 + * Used in Volta and Turing + * + * NVLIPTLnkErrStatus0 + * Returns the NVLIPT_LNK Error Mask 0 + * Used in Ampere and later + * + * NVLIPTLnkErrLogEn0 + * Returns the NVLIPT_LNK Log En Mask 0 + * Used in Ampere and later + * + * NVLIPTLnkCtrlLinkStateRequest + * Returns the NVLIPT_LNK Control Link State Request value + * Used in Ampere and later + * + * DLSpeedStatusTx + * Returns the NVLINK DL speed status for sublink Tx + * + * DLSpeedStatusRx + * Returns the NVLINK DL speed status for sublink Rx + * + * NVLDLRxSlsmErrCntl + * Returns the NVLDL_RXSLSM_ERR_CNTL value + * Used in Hopper and later + * + * NVLDLTopLinkState + * Returns the NVLDL_TOP_LINK_STATE value + * Used in Hopper and later + * + * NVLDLTopIntr + * Returns the NVLDL_TOP_INTR value + * Used in Hopper and later + * + * DLStatMN00 + * Returns the DLSTAT MN00 Code and subcode + * Used in Hopper and later + * + * DLStatUC01 + * Returns the DLSTAT UC01 value + * Used in Hopper and later + * + * MinionNvlinkLinkIntr + * Returns the MINION_NVLINK_LINK_INTR code and subcode + * Used in Hopper and later + * + * bExcessErrorDL + * Returns true for excessive error rate interrupt from DL + */ +typedef struct NV2080_CTRL_NVLINK_ERR_INFO { + NvU32 TLErrlog; + NvU32 TLIntrEn; + NvU32 TLCTxErrStatus0; + NvU32 TLCTxErrStatus1; + NvU32 TLCTxSysErrStatus0; + NvU32 TLCRxErrStatus0; + NvU32 TLCRxErrStatus1; + NvU32 TLCRxSysErrStatus0; + NvU32 TLCTxErrLogEn0; + NvU32 TLCTxErrLogEn1; + NvU32 TLCTxSysErrLogEn0; + NvU32 TLCRxErrLogEn0; + NvU32 TLCRxErrLogEn1; + NvU32 TLCRxSysErrLogEn0; + NvU32 MIFTxErrStatus0; + NvU32 MIFRxErrStatus0; + NvU32 NVLIPTLnkErrStatus0; + NvU32 NVLIPTLnkErrLogEn0; + NvU32 NVLIPTLnkCtrlLinkStateRequest; + NvU32 DLSpeedStatusTx; + NvU32 DLSpeedStatusRx; + NvU32 NVLDLRxSlsmErrCntl; + NvU32 NVLDLTopLinkState; + NvU32 NVLDLTopIntr; + NvU32 DLStatMN00; + NvU32 DLStatUC01; + NvU32 MinionNvlinkLinkIntr; + NvBool bExcessErrorDL; +} NV2080_CTRL_NVLINK_ERR_INFO; + +/* + * NV2080_CTRL_NVLINK_COMMON_ERR_INFO + * Error information per IOCTRL + * + * NVLIPTErrStatus0 + * Returns the NVLIPT_COMMON Error Mask 0 + * Used in Ampere and later + * + * NVLIPTErrLogEn0 + * Returns the NVLIPT_COMMON Log En Mask 0 + * Used in Ampere and later + */ +typedef struct NV2080_CTRL_NVLINK_COMMON_ERR_INFO { + NvU32 NVLIPTErrStatus0; + NvU32 NVLIPTErrLogEn0; +} NV2080_CTRL_NVLINK_COMMON_ERR_INFO; + +/* Extract the error status bit for a given TL error index i */ +#define NV2080_CTRL_NVLINK_GET_TL_ERRLOG_BIT(intr, i) (((1U << i) & (intr)) >> i) + +/* Extract the intr enable bit for a given TL error index i */ +#define NV2080_CTRL_NVLINK_GET_TL_INTEN_BIT(intr, i) NV2080_CTRL_NVLINK_GET_TL_ERRLOG_BIT(intr, i) + +/* Error status values for a given NVLINK TL error */ +#define NV2080_CTRL_NVLINK_TL_ERRLOG_TRUE (0x00000001U) +#define NV2080_CTRL_NVLINK_TL_ERRLOG_FALSE (0x00000000U) + +/* Intr enable/disable for a given NVLINK TL error */ +#define NV2080_CTRL_NVLINK_TL_INTEN_TRUE (0x00000001U) +#define NV2080_CTRL_NVLINK_TL_INTEN_FALSE (0x00000000U) + +/* NVLINK TL interrupt enable fields for errors */ +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_RXDLDATAPARITYEN 0U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_RXDLCTRLPARITYEN 1U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_RXPROTOCOLEN 2U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_RXOVERFLOWEN 3U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_RXRAMDATAPARITYEN 4U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_RXRAMHDRPARITYEN 5U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_RXRESPEN 6U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_RXPOISONEN 7U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_TXRAMDATAPARITYEN 8U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_TXRAMHDRPARITYEN 9U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_DLFLOWPARITYEN 10U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_DLHDRPARITYEN 12U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_TXCREDITEN 13U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_MAX 14U + +/* NVLINK TL error fields */ +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_RXDLDATAPARITYERR 0U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_RXDLCTRLPARITYERR 1U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_RXPROTOCOLERR 2U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_RXOVERFLOWERR 3U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_RXRAMDATAPARITYERR 4U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_RXRAMHDRPARITYERR 5U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_RXRESPERR 6U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_RXPOISONERR 7U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_TXRAMDATAPARITYERR 8U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_TXRAMHDRPARITYERR 9U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_DLFLOWPARITYERR 10U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_DLHDRPARITYERR 12U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_TXCREDITERR 13U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_MAX 14U + +/* NVLINK DL speed status for sublink Tx*/ +#define NV2080_CTRL_NVLINK_SL0_SLSM_STATUS_TX_PRIMARY_STATE_HS (0x00000000U) +#define NV2080_CTRL_NVLINK_SL0_SLSM_STATUS_TX_PRIMARY_STATE_SINGLE_LANE (0x00000004U) +#define NV2080_CTRL_NVLINK_SL0_SLSM_STATUS_TX_PRIMARY_STATE_TRAIN (0x00000005U) +#define NV2080_CTRL_NVLINK_SL0_SLSM_STATUS_TX_PRIMARY_STATE_SAFE (0x00000006U) +#define NV2080_CTRL_NVLINK_SL0_SLSM_STATUS_TX_PRIMARY_STATE_OFF (0x00000007U) + +/* NVLINK DL speed status for sublink Rx*/ +#define NV2080_CTRL_NVLINK_SL1_SLSM_STATUS_RX_PRIMARY_STATE_HS (0x00000000U) +#define NV2080_CTRL_NVLINK_SL1_SLSM_STATUS_RX_PRIMARY_STATE_SINGLE_LANE (0x00000004U) +#define NV2080_CTRL_NVLINK_SL1_SLSM_STATUS_RX_PRIMARY_STATE_TRAIN (0x00000005U) +#define NV2080_CTRL_NVLINK_SL1_SLSM_STATUS_RX_PRIMARY_STATE_SAFE (0x00000006U) +#define NV2080_CTRL_NVLINK_SL1_SLSM_STATUS_RX_PRIMARY_STATE_OFF (0x00000007U) + +/* Flags to query different debug registers */ +#define NV2080_CTRL_NVLINK_ERR_INFO_FLAGS_DEFAULT (0x0U) +#define NV2080_CTRL_NVLINK_ERR_INFO_FLAGS_INTR_STATUS (0x1U) +#define NV2080_CTRL_NVLINK_ERR_INFO_FLAGS_ALI_STATUS (0x2U) + +#define NV2080_CTRL_NVLINK_MAX_IOCTRLS 3U +/* + * NV2080_CTRL_NVLINK_GET_ERR_INFO_PARAMS + * + * linkMask (This field will be deprecated in the future, please use links) + * Returns the mask of links enabled + * + * links + * Returns the mask of links enabled + * + * linkErrInfo + * Returns the error information for all the links + * + * ioctrlMask + * Returns the mask of ioctrls + * + * commonErrInfo + * Returns the error information common to each IOCTRL + * + * ErrInfoFlags + * Input for determining which values to query. Possible values: + * NV2080_CTRL_NVLINK_ERR_INFO_FLAGS_INTR_STATUS + * NV2080_CTRL_NVLINK_ERR_INFO_FLAGS_ALI_STATUS + * + */ +#define NV2080_CTRL_NVLINK_GET_ERR_INFO_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_NVLINK_GET_ERR_INFO_PARAMS { + NvU32 linkMask; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK links, 8); + NV2080_CTRL_NVLINK_ERR_INFO linkErrInfo[NV2080_CTRL_NVLINK_MAX_ARR_SIZE]; + NvU32 ioctrlMask; + NV2080_CTRL_NVLINK_COMMON_ERR_INFO commonErrInfo[NV2080_CTRL_NVLINK_MAX_IOCTRLS]; + NvU8 ErrInfoFlags; +} NV2080_CTRL_NVLINK_GET_ERR_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_GET_ERR_INFO + * This command is used to query the NVLINK error information + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_NVLINK_GET_ERR_INFO (0x20803003U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_ERR_INFO_PARAMS_MESSAGE_ID" */ + +/* + * APIs for getting NVLink counters + */ + +// These are the bitmask definitions for different counter types + +#define NV2080_CTRL_NVLINK_COUNTER_INVALID 0x00000000U + +#define NV2080_CTRL_NVLINK_COUNTER_TL_TX0 0x00000001U +#define NV2080_CTRL_NVLINK_COUNTER_TL_TX1 0x00000002U +#define NV2080_CTRL_NVLINK_COUNTER_TL_RX0 0x00000004U +#define NV2080_CTRL_NVLINK_COUNTER_TL_RX1 0x00000008U + +#define NV2080_CTRL_NVLINK_LP_COUNTERS_DL 0x00000010U + +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_ECC_LANE_L(i) (1 << (i + 8)) +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_ECC_LANE__SIZE 4U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_ECC_LANE_L0 0x00000100U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_ECC_LANE_L1 0x00000200U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_ECC_LANE_L2 0x00000400U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_ECC_LANE_L3 0x00000800U + +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_FLIT 0x00010000U + +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L(i) (1 << (i + 17)) +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE__SIZE 8U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L0 0x00020000U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L1 0x00040000U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L2 0x00080000U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L3 0x00100000U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L4 0x00200000U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L5 0x00400000U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L6 0x00800000U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L7 0x01000000U + +#define NV2080_CTRL_NVLINK_COUNTER_DL_TX_ERR_REPLAY 0x02000000U +#define NV2080_CTRL_NVLINK_COUNTER_DL_TX_ERR_RECOVERY 0x04000000U + +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_REPLAY 0x08000000U + +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_MASKED 0x10000000U + +/* + * Note that COUNTER_MAX_TYPES will need to be updated each time + * a new counter type gets added to the list above. + * + */ +#define NV2080_CTRL_NVLINK_COUNTER_MAX_TYPES 32U + +/* + * NV2080_CTRL_CMD_NVLINK_GET_COUNTERS + * This command gets the counts for different counter types. + * + * [in] counterMask + * Mask of counter types to be queried + * One of NV2080_CTRL_NVLINK_COUNTERS_TYPE_* macros + * + * [in] linkMask (This field will be deprecated in the future, please use links) + * Mask of links to be queried + * + * [in] links + * Mask of links to be queried + * + * [out] counters + * Counter value returned + * + * [out] bTx0TlCounterOverflow + * This boolean is set to NV_TRUE if TX Counter 0 has rolled over. + * + * [out] bTx1TlCounterOverflow + * This boolean is set to NV_TRUE if TX Counter 1 has rolled over. + * + * [out] bRx0TlCounterOverflow + * This boolean is set to NV_TRUE if RX Counter 0 has rolled over. + * + * [out] bRx1TlCounterOverflow + * This boolean is set to NV_TRUE if RX Counter 1 has rolled over. + * + * [out] value + * This array contains the error counts for each error type as requested from + * the counterMask. The array indexes correspond to the mask bits one-to-one. + */ +typedef struct NV2080_CTRL_NVLINK_GET_COUNTERS_VALUES { + NvBool bTx0TlCounterOverflow; + NvBool bTx1TlCounterOverflow; + NvBool bRx0TlCounterOverflow; + NvBool bRx1TlCounterOverflow; + NV_DECLARE_ALIGNED(NvU64 value[NV2080_CTRL_NVLINK_COUNTER_MAX_TYPES], 8); +} NV2080_CTRL_NVLINK_GET_COUNTERS_VALUES; + +#define NV2080_CTRL_NVLINK_GET_COUNTERS_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_NVLINK_GET_COUNTERS_PARAMS { + NvU32 counterMask; + NV_DECLARE_ALIGNED(NvU64 linkMask, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK links, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_GET_COUNTERS_VALUES counters[NV2080_CTRL_NVLINK_MAX_ARR_SIZE], 8); +} NV2080_CTRL_NVLINK_GET_COUNTERS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_GET_COUNTERS (0x20803004U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_COUNTERS_PARAMS_MESSAGE_ID" */ + + +/* + * NV2080_CTRL_CMD_NVLINK_CLEAR_COUNTERS + * This command clears/resets the counters for the specified types. + * + * [in] linkMask (This field will be deprecated in the future, please use links) + * This parameter specifies for which links we want to clear the + * counters. + * + * [in] links + * This parameter specifies for which links we want to clear the + * counters. + * + * [in] counterMask + * This parameter specifies the input mask for desired counters to be + * cleared. Note that all counters cannot be cleared. + * + * NOTE: Bug# 2098529: On Turing all DL errors and LP counters are cleared + * together. They cannot be cleared individually per error type. RM + * would possibly move to a new API on Ampere and beyond + */ + +#define NV2080_CTRL_CMD_NVLINK_CLEAR_COUNTERS (0x20803005U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_CLEAR_COUNTERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_CLEAR_COUNTERS_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV2080_CTRL_NVLINK_CLEAR_COUNTERS_PARAMS { + NvU32 counterMask; + NV_DECLARE_ALIGNED(NvU64 linkMask, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK links, 8); +} NV2080_CTRL_NVLINK_CLEAR_COUNTERS_PARAMS; + + +#define NV2080_CTRL_NVLINK_COUNTER_TP_TL_TX0 0U +#define NV2080_CTRL_NVLINK_COUNTER_TP_TL_TX1 1U +#define NV2080_CTRL_NVLINK_COUNTER_TP_TL_RX0 2U +#define NV2080_CTRL_NVLINK_COUNTER_TP_TL_RX1 3U + +#define NV2080_CTRL_NVLINK_COUNTER_ERR_DL_RX_ERR_ECC_LANE_SIZE 4U +#define NV2080_CTRL_NVLINK_COUNTER_ERR_DL_RX_ERR_ECC_LANE_L0 4U +#define NV2080_CTRL_NVLINK_COUNTER_ERR_DL_RX_ERR_ECC_LANE_L1 5U +#define NV2080_CTRL_NVLINK_COUNTER_ERR_DL_RX_ERR_ECC_LANE_L2 6U +#define NV2080_CTRL_NVLINK_COUNTER_ERR_DL_RX_ERR_ECC_LANE_L3 7U + +#define NV2080_CTRL_NVLINK_COUNTER_ERR_DL_RX_ERR_CRC_LANE_SIZE 8U +#define NV2080_CTRL_NVLINK_COUNTER_ERR_DL_RX_ERR_CRC_LANE_L0 8U +#define NV2080_CTRL_NVLINK_COUNTER_ERR_DL_RX_ERR_CRC_LANE_L1 9U +#define NV2080_CTRL_NVLINK_COUNTER_ERR_DL_RX_ERR_CRC_LANE_L2 10U +#define NV2080_CTRL_NVLINK_COUNTER_ERR_DL_RX_ERR_CRC_LANE_L3 11U +#define NV2080_CTRL_NVLINK_COUNTER_ERR_DL_RX_ERR_CRC_LANE_L4 12U +#define NV2080_CTRL_NVLINK_COUNTER_ERR_DL_RX_ERR_CRC_LANE_L5 13U +#define NV2080_CTRL_NVLINK_COUNTER_ERR_DL_RX_ERR_CRC_LANE_L6 14U +#define NV2080_CTRL_NVLINK_COUNTER_ERR_DL_RX_ERR_CRC_LANE_L7 15U + +#define NV2080_CTRL_NVLINK_COUNTER_ERR_DL_TX_ERR_RECOVERY 16U + +#define NV2080_CTRL_NVLINK_COUNTER_ERR_DL_TX_ERR_REPLAY 17U +#define NV2080_CTRL_NVLINK_COUNTER_ERR_DL_RX_ERR_REPLAY 18U + +#define NV2080_CTRL_NVLINK_COUNTER_ERR_DL_RX_ERR_CRC_MASKED 19U +#define NV2080_CTRL_NVLINK_COUNTER_ERR_DL_RX_ERR_CRC_FLIT 20U + +#define NV2080_CTRL_NVLINK_COUNTER_LP_DL 21U + +#define NV2080_CTRL_NVLINK_COUNTER_V1_MAX_COUNTER NV2080_CTRL_NVLINK_COUNTER_LP_DL + +/* Transmit Counters */ +#define NV2080_CTRL_NVLINK_COUNTER_XMIT_PACKETS 22U +#define NV2080_CTRL_NVLINK_COUNTER_XMIT_BYTES 23U +/* Received Counters */ +#define NV2080_CTRL_NVLINK_COUNTER_RCV_PACKETS 24U +#define NV2080_CTRL_NVLINK_COUNTER_RCV_BYTES 25U +/* Link Events */ +#define NV2080_CTRL_NVLINK_COUNTER_LINK_ERROR_RECOVERY_COUNTER 26U +#define NV2080_CTRL_NVLINK_COUNTER_LINK_DOWNED_COUNTER 27U +#define NV2080_CTRL_NVLINK_COUNTER_LINK_RECOVERY_SUCCESSFUL_COUNTER 28U +/* Link Receive Errors */ +#define NV2080_CTRL_NVLINK_COUNTER_RCV_ERRORS 29U +#define NV2080_CTRL_NVLINK_COUNTER_RCV_REMOTE_ERRORS 30U +#define NV2080_CTRL_NVLINK_COUNTER_RCV_GENERAL_ERRORS 31U +/* Link Receive Errors Detail */ +#define NV2080_CTRL_NVLINK_COUNTER_RCV_MALFORMED_PKT_ERROR 32U +#define NV2080_CTRL_NVLINK_COUNTER_RCV_BUFFER_OVERRUN_ERROR 33U +#define NV2080_CTRL_NVLINK_COUNTER_RCV_VL15DROPPED_ERROR 34U +/* Link Other Errors Detail */ +#define NV2080_CTRL_NVLINK_COUNTER_LINK_INTEGRITY_ERRORS 35U +#define NV2080_CTRL_NVLINK_COUNTER_BUFFER_OVERRUN_ERRORS 36U +/* Link Transmit Errors */ +#define NV2080_CTRL_NVLINK_COUNTER_XMIT_WAIT_TIME 37U +#define NV2080_CTRL_NVLINK_COUNTER_XMIT_ERRORS 38U +/* FEC Block Counters */ +#define NV2080_CTRL_NVLINK_COUNTER_SINGLE_ERROR_BLOCKS 39U +#define NV2080_CTRL_NVLINK_COUNTER_CORRECTED_BLOCKS 40U +#define NV2080_CTRL_NVLINK_COUNTER_UNCORRECTED_BLOCKS 41U +/* FEC Symbol Counters */ +#define NV2080_CTRL_NVLINK_COUNTER_CORRECTED_SYMBOLS_LANE_0 42U +#define NV2080_CTRL_NVLINK_COUNTER_CORRECTED_SYMBOLS_LANE_1 43U +#define NV2080_CTRL_NVLINK_COUNTER_CORRECTED_SYMBOLS_TOTAL 44U +/* FEC Raw Error Counters */ +#define NV2080_CTRL_NVLINK_COUNTER_RAW_ERRORS_LANE_0 45U +#define NV2080_CTRL_NVLINK_COUNTER_RAW_ERRORS_LANE_1 46U +#define NV2080_CTRL_NVLINK_COUNTER_CORRECTED_BITS 47U +/* FEC Raw BER */ +#define NV2080_CTRL_NVLINK_COUNTER_RAW_BER_LANE_0 48U +#define NV2080_CTRL_NVLINK_COUNTER_RAW_BER_LANE_1 49U +#define NV2080_CTRL_NVLINK_COUNTER_RAW_BER_TOTAL 50U +/* FEC Effective BER */ +#define NV2080_CTRL_NVLINK_COUNTER_NO_ERROR_BLOCKS 51U +#define NV2080_CTRL_NVLINK_COUNTER_EFFECTIVE_ERRORS 52U +#define NV2080_CTRL_NVLINK_COUNTER_EFFECTIVE_BER 53U +/* Phy Symbol Errors Counters */ +#define NV2080_CTRL_NVLINK_COUNTER_SYMBOL_ERRORS 54U +#define NV2080_CTRL_NVLINK_COUNTER_SYMBOL_BER 55U +#define NV2080_CTRL_NVLINK_COUNTER_RECEIVED_BITS 56U +/* Phy Other Errors Counters */ +#define NV2080_CTRL_NVLINK_COUNTER_SYNC_HEADER_ERRORS 57U +#define NV2080_CTRL_NVLINK_COUNTER_TIME_SINCE_LAST_CLEAR 58U +/* PLR Receive Counters */ +#define NV2080_CTRL_NVLINK_COUNTER_PLR_RCV_BLOCKS 59U +#define NV2080_CTRL_NVLINK_COUNTER_PLR_RCV_BLOCKS_WITH_UNCORRECTABLE_ERRORS 60U +#define NV2080_CTRL_NVLINK_COUNTER_PLR_RCV_BLOCKS_WITH_ERRORS 61U +/* PLR Transmit Counters */ +#define NV2080_CTRL_NVLINK_COUNTER_PLR_XMIT_BLOCKS 62U +#define NV2080_CTRL_NVLINK_COUNTER_PLR_XMIT_RETRY_BLOCKS 63U +#define NV2080_CTRL_NVLINK_COUNTER_PLR_XMIT_RETRY_EVENTS 64U +/* PLR BW Loss Counters */ +#define NV2080_CTRL_NVLINK_COUNTER_PLR_BW_LOSS 65U +/* NVLE Rx counters */ +#define NV2080_CTRL_NVLINK_COUNTER_NVLE_RX_GOOD 66U +#define NV2080_CTRL_NVLINK_COUNTER_NVLE_RX_ERROR 67U +#define NV2080_CTRL_NVLINK_COUNTER_NVLE_RX_AUTH 68U +/* NVLE Tx Counters */ +#define NV2080_CTRL_NVLINK_COUNTER_NVLE_TX_GOOD 69U +#define NV2080_CTRL_NVLINK_COUNTER_NVLE_TX_ERROR 70U +/* FEC Histogram */ +#define NV2080_CTRL_NVLINK_COUNTER_HISTORY_0 71U +#define NV2080_CTRL_NVLINK_COUNTER_HISTORY_1 72U +#define NV2080_CTRL_NVLINK_COUNTER_HISTORY_2 73U +#define NV2080_CTRL_NVLINK_COUNTER_HISTORY_3 74U +#define NV2080_CTRL_NVLINK_COUNTER_HISTORY_4 75U +#define NV2080_CTRL_NVLINK_COUNTER_HISTORY_5 76U +#define NV2080_CTRL_NVLINK_COUNTER_HISTORY_6 77U +#define NV2080_CTRL_NVLINK_COUNTER_HISTORY_7 78U +#define NV2080_CTRL_NVLINK_COUNTER_HISTORY_8 79U +#define NV2080_CTRL_NVLINK_COUNTER_HISTORY_9 80U +#define NV2080_CTRL_NVLINK_COUNTER_HISTORY_10 81U +#define NV2080_CTRL_NVLINK_COUNTER_HISTORY_11 82U +#define NV2080_CTRL_NVLINK_COUNTER_HISTORY_12 83U +#define NV2080_CTRL_NVLINK_COUNTER_HISTORY_13 84U +#define NV2080_CTRL_NVLINK_COUNTER_HISTORY_14 85U +#define NV2080_CTRL_NVLINK_COUNTER_HISTORY_15 86U + +/* Throughput counters */ +#define NV2080_CTRL_NVLINK_COUNTER_TP_RX_DATA 87U +#define NV2080_CTRL_NVLINK_COUNTER_TP_TX_DATA 88U +#define NV2080_CTRL_NVLINK_COUNTER_TP_RX_RAW 89U +#define NV2080_CTRL_NVLINK_COUNTER_TP_TX_RAW 90U + +/* Low power counters */ +#define NV2080_CTRL_NVLINK_COUNTER_L1_ENTRY 91U +#define NV2080_CTRL_NVLINK_COUNTER_L1_ENTRY_FORCE 92U +#define NV2080_CTRL_NVLINK_COUNTER_L1_EXIT 93U +#define NV2080_CTRL_NVLINK_COUNTER_L1_EXIT_RECAL 94U +#define NV2080_CTRL_NVLINK_COUNTER_L1_EXIT_REMOTE 95U +#define NV2080_CTRL_NVLINK_COUNTER_L1_LP_STEADY_STATE_TIME 96U +#define NV2080_CTRL_NVLINK_COUNTER_L1_HIGH_SPEED_STEADY_STATE_TIME 97U +#define NV2080_CTRL_NVLINK_COUNTER_L1_OTHER_STATE_TIME 98U +#define NV2080_CTRL_NVLINK_COUNTER_LP_LOCAL_ENTRY_TIME 99U +#define NV2080_CTRL_NVLINK_COUNTER_LP_LOCAL_EXIT_TIME 100U +#define NV2080_CTRL_NVLINK_COUNTER_LP_LOCAL_FULL_BW_ENTRY_TIME 101U +#define NV2080_CTRL_NVLINK_COUNTER_LP_LOCAL_FULL_BW_EXIT_TIME 102U +#define NV2080_CTRL_NVLINK_COUNTER_LP_REMOTE_ENTRY_TIME 103U +#define NV2080_CTRL_NVLINK_COUNTER_LP_REMOTE_EXIT_TIME 104U +#define NV2080_CTRL_NVLINK_COUNTER_LP_REMOTE_FULL_BW_ENTRY_TIME 105U +#define NV2080_CTRL_NVLINK_COUNTER_LP_REMOTE_FULL_BW_EXIT_TIME 106U + +#define NV2080_CTRL_NVLINK_COUNTER_PLR_LAST_RAW_BER 107U +#define NV2080_CTRL_NVLINK_COUNTER_PLR_XMIT_RETRY_EVENTS_WITHIN_T_SEC_MAX_LOW 108U + +#define NV2080_CTRL_NVLINK_COUNTERS_MAX 109U + +#define NV2080_CTRL_NVLINK_COUNTER_MAX_GROUPS 2U +#define NV2080_CTRL_NVLINK_COUNTER_MAX_COUNTERS_PER_LINK_IN_REQ 28 +#define NV2080_CTRL_NVLINK_COUNTER_V2_GROUP(i) ((i) / 64) +#define NV2080_CTRL_NVLINK_COUNTER_V2_COUNTER_MASK(i) ((NvU64)1 << ((i) % 64)) + +/* + * NV2080_CTRL_CMD_NVLINK_GET_COUNTERS_V2 + * This command gets the counts for different counter types. + * + * [in] linkMask (This field will be deprecated in the future, please use links) + * Mask of links to be queried + * + * [in] links + * Mask of links to be queried + * + * [in] counterMask + * Mask of counter types to be queried + * One of NV2080_CTRL_NVLINK_COUNTERS_TYPE_* macros + * + * [out] counter + * This array contains the error counts for each error type as requested from + * the counterMask. The array indexes correspond to the mask bits one-to-one. + */ + +typedef struct NV2080_CTRL_NVLINK_COUNTERS_V2_VALUES { + NvBool overFlow; + NV_DECLARE_ALIGNED(NvU64 value, 8); +} NV2080_CTRL_NVLINK_COUNTERS_V2_VALUES; + +#define NV2080_CTRL_NVLINK_GET_COUNTERS_V2_PARAMS_MESSAGE_ID (0x50U) + +typedef struct NV2080_CTRL_NVLINK_GET_COUNTERS_V2_PARAMS { + NV_DECLARE_ALIGNED(NvU64 linkMask, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK links, 8); + NV_DECLARE_ALIGNED(NvU64 counterMask[NV2080_CTRL_NVLINK_COUNTER_MAX_GROUPS], 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_COUNTERS_V2_VALUES counter[NV2080_CTRL_NVLINK_MAX_ARR_SIZE][NV2080_CTRL_NVLINK_COUNTER_MAX_COUNTERS_PER_LINK_IN_REQ], 8); +} NV2080_CTRL_NVLINK_GET_COUNTERS_V2_PARAMS; +#define NV2080_CTRL_CMD_NVLINK_GET_COUNTERS_V2 (0x20803050U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8 | NV2080_CTRL_NVLINK_GET_COUNTERS_V2_PARAMS_MESSAGE_ID)" */ + +/* + * NV2080_CTRL_CMD_NVLINK_CLEAR_COUNTERS_V2 + * This command clears/resets the counters for the specified types. + * + * [in] linkMask (This field will be deprecated in the future, please use links) + * This parameter specifies for which links we want to clear the + * counters. + * + * [in] links + * This parameter specifies for which links we want to clear the + * counters. + * + * [in] counterMask + * This parameter specifies the input mask for desired counters to be + * cleared. Note that all counters cannot be cleared. +*/ + +#define NV2080_CTRL_NVLINK_CLEAR_COUNTERS_V2_PARAMS_MESSAGE_ID (0x51U) + +typedef struct NV2080_CTRL_NVLINK_CLEAR_COUNTERS_V2_PARAMS { + NV_DECLARE_ALIGNED(NvU64 linkMask, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK links, 8); + NV_DECLARE_ALIGNED(NvU64 counterMask[NV2080_CTRL_NVLINK_COUNTER_MAX_GROUPS], 8); +} NV2080_CTRL_NVLINK_CLEAR_COUNTERS_V2_PARAMS; +#define NV2080_CTRL_CMD_NVLINK_CLEAR_COUNTERS_V2 (0x20803051U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8 | NV2080_CTRL_NVLINK_CLEAR_COUNTERS_V2_PARAMS_MESSAGE_ID)" */ + +/* + * NV2080_CTRL_CMD_NVLINK_INJECT_ERROR + * This command causes all the same actions to occur as if the related + * error were to occur, either fatal or recoverable. + * + * [in] linkMask (This field will be deprecated in the future, please use links) + * Controls which links to apply error injection to. + * + * [in] links + * Controls which links to apply error injection to. + * + * [in] bFatal + * This parameter specifies that the error should be fatal. + * + */ +#define NV2080_CTRL_CMD_NVLINK_INJECT_ERROR (0x20803006U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_INJECT_ERROR_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_INJECT_ERROR_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV2080_CTRL_NVLINK_INJECT_ERROR_PARAMS { + NvU32 linkMask; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK links, 8); + NvBool bFatalError; +} NV2080_CTRL_NVLINK_INJECT_ERROR_PARAMS; + +/* NVLINK unit list - to be used with error notifiers */ +#define NV2080_CTRL_NVLINK_UNIT_DL 0x01U +#define NV2080_CTRL_NVLINK_UNIT_TL 0x02U +#define NV2080_CTRL_NVLINK_UNIT_TLC_RX_0 0x03U +#define NV2080_CTRL_NVLINK_UNIT_TLC_RX_1 0x04U +#define NV2080_CTRL_NVLINK_UNIT_TLC_TX_0 0x05U +#define NV2080_CTRL_NVLINK_UNIT_MIF_RX_0 0x06U +#define NV2080_CTRL_NVLINK_UNIT_MIF_TX_0 0x07U +#define NV2080_CTRL_NVLINK_UNIT_MINION 0x08U + +/* + * NV2080_CTRL_NVLINK_ERROR_INJECT_CFG + * + * [in] errType + * This parameter specifies the type of error injection settings + * [in] errSettings + * This parameter specifies the settings for the error type in NVL5 + */ + +typedef enum NV2080_CTRL_NVLINK_HW_ERROR_INJECT_ERR_TYPE { + NV2080_CTRL_NVLINK_HW_ERROR_INJECT_ERR_TYPE_TX_ERR = 1, + NV2080_CTRL_NVLINK_HW_ERROR_INJECT_ERR_TYPE_PKT_ERR = 2, + NV2080_CTRL_NVLINK_HW_ERROR_INJECT_ERR_TYPE_AUTH_TAG_ERR = 3, + NV2080_CTRL_NVLINK_HW_ERROR_INJECT_ERR_TYPE_LINK_ERR = 4, + NV2080_CTRL_NVLINK_HW_ERROR_INJECT_ERR_TYPE_MAX = 5, +} NV2080_CTRL_NVLINK_HW_ERROR_INJECT_ERR_TYPE; + +typedef struct NV2080_CTRL_NVLINK_HW_ERROR_INJECT_CFG { + NV2080_CTRL_NVLINK_HW_ERROR_INJECT_ERR_TYPE errType; + NV_DECLARE_ALIGNED(NvU64 errSettings, 8); +} NV2080_CTRL_NVLINK_HW_ERROR_INJECT_CFG; + +/* + * Tx error type settings + */ +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_TX_ERR_TYPE 31:28 +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_TX_ERR_TYPE_NO_ERROR 0x00000000U +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_TX_ERR_TYPE_RAW_BER 0x00000001U +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_TX_ERR_TYPE_EFFECTIVE_BER 0x00000002U +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_TX_ERR_ERR_INJECT_DURATION 27:12 // Error Injection Duration, in 10ms units. +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_TX_ERR_BER_MANTISSA 11:8 +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_TX_ERR_BER_EXPONENT 7:0 + +/* + * Packet error type settings + */ +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_PKT_ERR_INJECT_COUNT 15:0 +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_PKT_ERR_STOMP 16:16 +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_PKT_ERR_STOMP_DIS 0x00000000U +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_PKT_ERR_STOMP_EN 0x00000001U +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_PKT_ERR_POISON 17:17 +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_PKT_ERR_POISON_DIS 0x00000000U +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_PKT_ERR_POISON_EN 0x00000001U +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_PKT_ERR_CLEAR_COUNTERS 18:18 + +/* + * Authentication error type settings + */ +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_AUTH_TAG_ERR_PIPE_INDEX 3:0 +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_AUTH_TAG_ERR_AUTH_ERR 4:4 +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_AUTH_TAG_ERR_AUTH_ERR_DIS 0x00000000U +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_AUTH_TAG_ERR_AUTH_ERR_EN 0x00000001U + +/* + * Link Error type settings + */ +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_LINK_ERR_FORCE_LINK_DOWN 0:0 +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_LINK_ERR_FORCE_LINK_DOWN_DIS 0x00000000U +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_LINK_ERR_FORCE_LINK_DOWN_EN 0x00000001U + +/* + * NV2080_CTRL_CMD_NVLINK_SET_HW_ERROR_INJECT + * This command causes all the same actions to occur as if the related + * error were to occur, either fatal or recoverable. + * + * [in] linkMask size: 64 bits (This field will be deprecated in the future, please use links) + * Mask of the links to be configured. + * + * [in] links + * Mask of the links to be configured. + * + * [in] errCfg + * This parameter specifies that the error configurations. + */ + +#define NV2080_CTRL_NVLINK_SET_HW_ERROR_INJECT_PARAMS_MESSAGE_ID (0x81U) + +typedef struct NV2080_CTRL_NVLINK_SET_HW_ERROR_INJECT_PARAMS { + NV_DECLARE_ALIGNED(NvU64 linkMask, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK links, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_HW_ERROR_INJECT_CFG errCfg[NV2080_CTRL_NVLINK_MAX_ARR_SIZE], 8); +} NV2080_CTRL_NVLINK_SET_HW_ERROR_INJECT_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_SET_HW_ERROR_INJECT (0x20803081U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_SET_HW_ERROR_INJECT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_STS_LINK_STATE 1:0 +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_STS_LINK_STATE_UP 0x00000000U +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_STS_LINK_STATE_DOWN_BY_REQUEST 0x00000001U +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_STS_LINK_STATE_DOWN_BY_HW_ERR 0x00000002U + +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_STS_OPER_STS 0:0 +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_STS_OPER_STS_NO_ERR_INJECT 0x00000000U +#define NV2080_CTRL_NVLINK_HW_ERROR_INJECT_STS_OPER_STS_PERFORMING_ERR_INJECT 0x00000001U + +/* + * NV2080_CTRL_NVLINK_HW_ERROR_INJECT_INFO + * + * [out] txErrInfo + * This info specifies the settings for Tx errs in NVL5 + * [out] packetErrSettings + * This info specifies the settings for Pkt errs in NVL5 + * [out] authErrSettings + * This info specifies the settings for NVLE errs in NVL5 + * [out] linkStatus + * This specifies the status of the link in NVL5 + * [out] errInjectStatus + * This specifies the status of error injection + */ + +typedef struct NV2080_CTRL_NVLINK_HW_ERROR_INJECT_INFO { + NvU32 txErrInfo; + NvU32 packetErrInfo; + NvU32 authErrInfo; + NvU32 linkStatus; + NvU32 errInjectStatus; +} NV2080_CTRL_NVLINK_HW_ERROR_INJECT_INFO; + +/* + * NV2080_CTRL_CMD_NVLINK_GET_HW_ERROR_INJECT + * This command get all the current nvlink error config + * + * [in] linkMask size: 64 bits (This field will be deprecated in the future, please use links) + * Mask of the links to be configured. + * + * [in] links + * Mask of the links to be configured. + * + * [in] errCfg + * This parameter specifies that the error configurations. + */ + +#define NV2080_CTRL_NVLINK_GET_HW_ERROR_INJECT_PARAMS_MESSAGE_ID (0x82U) + +typedef struct NV2080_CTRL_NVLINK_GET_HW_ERROR_INJECT_PARAMS { + NV_DECLARE_ALIGNED(NvU64 linkMask, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK links, 8); + NV2080_CTRL_NVLINK_HW_ERROR_INJECT_INFO errInfo[NV2080_CTRL_NVLINK_MAX_ARR_SIZE]; +} NV2080_CTRL_NVLINK_GET_HW_ERROR_INJECT_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_GET_HW_ERROR_INJECT (0x20803082U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_HW_ERROR_INJECT_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_GET_ERROR_RECOVERIES + * This command gets the number of successful error recoveries + * + * [in] linkMask size: 32 bits (This field will be deprecated in the future, please use links) + * This parameter controls which links to get recoveries for. + * + * [in] links + * This parameter controls which links to get recoveries for. + * + * [out] numRecoveries + * This parameter specifies the number of successful per link error recoveries + */ +#define NV2080_CTRL_CMD_NVLINK_GET_ERROR_RECOVERIES (0x20803007U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_CMD_NVLINK_GET_ERROR_RECOVERIES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_NVLINK_GET_ERROR_RECOVERIES_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV2080_CTRL_CMD_NVLINK_GET_ERROR_RECOVERIES_PARAMS { + NvU32 linkMask; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK links, 8); + NvU32 numRecoveries[NV2080_CTRL_NVLINK_MAX_ARR_SIZE]; +} NV2080_CTRL_CMD_NVLINK_GET_ERROR_RECOVERIES_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_GET_LINK_LAST_ERROR_REMOTE_TYPE + * + * This command queries the remote endpoint type of the link recorded at the + * time the last error occurred on the link. + * + * [in] linkId + * This parameter specifies the link to get the last remote endpoint type + * recorded for. + * + * [out] remoteType + * This parameter returns the remote endpoint type of the link recorded at + * the time the last error occurred on the link. Possible values are: + * NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_NONE + * The link is not connected to an active remote endpoint. + * NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_GPU + * The remote endpoint of the link is a peer GPU. + * NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_NPU + * The remote endpoint of the link is the host system (e.g., an NPU + * on IBM POWER platforms). + * NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_TEGRA + * The remote endpoint of the link a tegra device + * + * Possible return status values are: + * NV_OK + * If the remoteType parameter value is valid upon return. + * NV_ERR_INVALID_ARGUMENT + * If the linkId parameter does not specify a valid link. + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on this GPU or the remote endpoint type is + * not recorded in non-volatile storage. + */ +#define NV2080_CTRL_CMD_NVLINK_GET_LINK_LAST_ERROR_REMOTE_TYPE (0x20803008U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_LINK_LAST_ERROR_REMOTE_TYPE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_GET_LINK_LAST_ERROR_REMOTE_TYPE_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV2080_CTRL_NVLINK_GET_LINK_LAST_ERROR_REMOTE_TYPE_PARAMS { + NvU32 linkId; + NvU32 remoteType; +} NV2080_CTRL_NVLINK_GET_LINK_LAST_ERROR_REMOTE_TYPE_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_GET_LINK_FATAL_ERROR_COUNTS + * + * This command queries the number of each type of fatal errors that have + * occurred on the given link. + * + * [in] linkId + * This parameter specifies the link to get the fatal error information + * for. + * + * [out] supportedCounts + * This parameter identifies which counts in the fatalErrorCounts array + * are valid for the given link. A bit set in this field means that the + * corresponding index is valid in the fatalErrorCounts array. + * + * [out] fatalErrorCounts + * This parameter returns an array of 8-bit counts, one for each type of + * fatal error that can occur on the link. The valid indices of this array + * are: + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL(C)_RX_DL_DATA_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL(C)_RX_DL_CTRL_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_PROTOCOL + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_OVERFLOW + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL(C)_RX_RAM_DATA_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL(C)_RX_RAM_HDR_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_RESP + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_POISON + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DATA_POISONED_PKT_RCVD + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL(C)_TX_RAM_DATA_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL(C)_TX_RAM_HDR_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_TX_CREDIT + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_DL_FLOW_CTRL_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_DL_FLOW_CTRL_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_DL_HDR_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_TX_RECOVERY_LONG + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_TX_FAULT_RAM + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_TX_FAULT_INTERFACE + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_TX_FAULT_SUBLINK_CHANGE + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_RX_FAULT_SUBLINK_CHANGE + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_RX_FAULT_DL_PROTOCOL + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_LTSSM_FAULT + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DL_HDR_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_INVALID_AE_FLIT_RCVD + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_INVALID_BE_FLIT_RCVD + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_INVALID_ADDR_ALIGN + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_PKT_LEN + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_CMD_ENC + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_DAT_LEN_ENC + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_ADDR_TYPE + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_RSP_STATUS + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_PKT_STATUS + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_CACHE_ATTR_ENC_IN_PROBE_REQ + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_CACHE_ATTR_ENC_IN_PROBE_RESP + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DAT_LEN_GT_ATOMIC_REQ_MAX_SIZE + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DAT_LEN_GT_RMW_REQ_MAX_SIZE + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DAT_LEN_LT_ATR_RESP_MIN_SIZE + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_INVALID_PO_FOR_CACHE_ATTR + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_INVALID_COMPRESSED_RESP + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RESP_STATUS_TARGET + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RESP_STATUS_UNSUPPORTED_REQUEST + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_HDR_OVERFLOW + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DATA_OVERFLOW + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_STOMPED_PKT_RCVD + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_CORRECTABLE_INTERNAL + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_UNSUPPORTED_VC_OVERFLOW + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_UNSUPPORTED_NVLINK_CREDIT_RELEASE + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_UNSUPPORTED_NCISOC_CREDIT_RELEASE + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_HDR_CREDIT_OVERFLOW + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_DATA_CREDIT_OVERFLOW + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_DL_REPLAY_CREDIT_OVERFLOW + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_UNSUPPORTED_VC_OVERFLOW + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_STOMPED_PKT_SENT + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_DATA_POISONED_PKT_SENT + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_RESP_STATUS_TARGET + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_RESP_STATUS_UNSUPPORTED_REQUEST + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_MIF_RX_RAM_DATA_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_MIF_RX_RAM_HDR_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_MIF_TX_RAM_DATA_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_MIF_TX_RAM_HDR_PARITY + * + * Possible return status values are: + * NV_OK + * If the values in the fatalErrorCounts array are valid upon return. + * NV_ERR_INVALID_ARGUMENT + * If the linkId parameter does not specify a valid link. + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on this GPU or aggregate NVLINK fatal error + * counts are not recorded in non-volatile storage. + */ +#define NV2080_CTRL_CMD_NVLINK_GET_LINK_FATAL_ERROR_COUNTS (0x20803009U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_LINK_FATAL_ERROR_COUNTS_PARAMS_MESSAGE_ID" */ + +/* + * NVLink 1 Fatal Error Types + */ +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_DL_DATA_PARITY 0U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_DL_CTRL_PARITY 1U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_PROTOCOL 2U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_OVERFLOW 3U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_RAM_DATA_PARITY 4U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_RAM_HDR_PARITY 5U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_RESP 6U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_POISON 7U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_TX_RAM_DATA_PARITY 8U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_TX_RAM_HDR_PARITY 9U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_TX_CREDIT 10U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_DL_FLOW_CTRL_PARITY 11U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_DL_HDR_PARITY 12U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_TX_RECOVERY_LONG 13U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_TX_FAULT_RAM 14U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_TX_FAULT_INTERFACE 15U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_TX_FAULT_SUBLINK_CHANGE 16U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_RX_FAULT_SUBLINK_CHANGE 17U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_RX_FAULT_DL_PROTOCOL 18U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_LTSSM_FAULT 19U + +/* + * NVLink 2 Fatal Error Types + */ +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DL_DATA_PARITY 0U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DL_CTRL_PARITY 1U +// No direct equivalent to: TL_RX_PROTOCOL 2 +// No direct equivalent to: TL_RX_OVERFLOW 3 +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RAM_DATA_PARITY 4U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RAM_HDR_PARITY 5U +// No direct equivalent to: TL_RX_RESP 6 +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DATA_POISONED_PKT_RCVD 7U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_RAM_DATA_PARITY 8U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_RAM_HDR_PARITY 9U +// No direct equivalent to: TL_TX_CREDIT 10 +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_DL_FLOW_CONTROL_PARITY 11U +// No direct equivalent to: TL_DL_HDR_PARITY 12 +// Identical to NVLink 1: DL_TX_RECOVERY_LONG 13 +// Identical to NVLink 1: DL_TX_FAULT_RAM 14 +// Identical to NVLink 1: DL_TX_FAULT_INTERFACE 15 +// Identical to NVLink 1: DL_TX_FAULT_SUBLINK_CHANGE 16 +// Identical to NVLink 1: DL_RX_FAULT_SUBLINK_CHANGE 17 +// Identical to NVLink 1: DL_RX_FAULT_DL_PROTOCOL 18 +// Identical to NVLink 1: DL_LTSSM_FAULT 19 +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DL_HDR_PARITY 20U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_INVALID_AE_FLIT_RCVD 21U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_INVALID_BE_FLIT_RCVD 22U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_INVALID_ADDR_ALIGN 23U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_PKT_LEN 24U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_CMD_ENC 25U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_DAT_LEN_ENC 26U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_ADDR_TYPE 27U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_RSP_STATUS 28U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_PKT_STATUS 29U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_CACHE_ATTR_ENC_IN_PROBE_REQ 30U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_CACHE_ATTR_ENC_IN_PROBE_RESP 31U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DAT_LEN_GT_ATOMIC_REQ_MAX_SIZE 32U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DAT_LEN_GT_RMW_REQ_MAX_SIZE 33U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DAT_LEN_LT_ATR_RESP_MIN_SIZE 34U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_INVALID_PO_FOR_CACHE_ATTR 35U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_INVALID_COMPRESSED_RESP 36U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RESP_STATUS_TARGET 37U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RESP_STATUS_UNSUPPORTED_REQUEST 38U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_HDR_OVERFLOW 39U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DATA_OVERFLOW 40U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_STOMPED_PKT_RCVD 41U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_CORRECTABLE_INTERNAL 42U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_UNSUPPORTED_VC_OVERFLOW 43U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_UNSUPPORTED_NVLINK_CREDIT_RELEASE 44U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_UNSUPPORTED_NCISOC_CREDIT_RELEASE 45U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_HDR_CREDIT_OVERFLOW 46U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_DATA_CREDIT_OVERFLOW 47U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_DL_REPLAY_CREDIT_OVERFLOW 48U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_UNSUPPORTED_VC_OVERFLOW 49U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_STOMPED_PKT_SENT 50U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_DATA_POISONED_PKT_SENT 51U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_RESP_STATUS_TARGET 52U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_RESP_STATUS_UNSUPPORTED_REQUEST 53U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_MIF_RX_RAM_DATA_PARITY 54U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_MIF_RX_RAM_HDR_PARITY 55U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_MIF_TX_RAM_DATA_PARITY 56U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_MIF_TX_RAM_HDR_PARITY 57U + +/* + * NVLink 3 Fatal Error Types + */ +// Identical to NVLink 2: TLC_RX_DL_DATA_PARITY 0 +// Identical to NVLink 2: TLC_RX_DL_CTRL_PARITY 1 +// No direct equivalent to: TL_RX_PROTOCOL 2 +// No direct equivalent to: TL_RX_OVERFLOW 3 +// No direct equivalent to: TLC_RX_RAM_DATA_PARITY 4 +// No direct equivalent to: RX_RAM_HDR_PARITY 5 +// No direct equivalent to: TL_RX_RESP 6 +// No direct equivalent to: TLC_RX_DATA_POISONED_PKT_RCVD 7 +// No direct equivalent to: TLC_TX_RAM_DATA_PARITY 8 +// No direct equivalent to: TLC_TX_RAM_HDR_PARITY 9 +// No direct equivalent to: TL_TX_CREDIT 10 +// Identical to NVLink 2: TLC_TX_DL_FLOW_CONTROL_PARITY 11 +// No direct equivalent to: TL_DL_HDR_PARITY 12 +// No direct equivalent to: DL_TX_RECOVERY_LONG 13 +// Identical to NVLink 1: DL_TX_FAULT_RAM 14 +// Identical to NVLink 1: DL_TX_FAULT_INTERFACE 15 +// Identical to NVLink 1: DL_TX_FAULT_SUBLINK_CHANGE 16 +// Identical to NVLink 1: DL_RX_FAULT_SUBLINK_CHANGE 17 +// Identical to NVLink 1: DL_RX_FAULT_DL_PROTOCOL 18 +// No direct equivalent to: DL_LTSSM_FAULT 19 +// Identical to NVLink 2: TLC_RX_DL_HDR_PARITY 20 +// Identical to NVLink 2: TLC_RX_INVALID_AE_FLIT_RCVD 21 +// Identical to NVLink 2: TLC_RX_INVALID_BE_FLIT_RCVD 22 +// Identical to NVLink 2: TLC_RX_INVALID_ADDR_ALIGN 23 +// Identical to NVLink 2: TLC_RX_PKT_LEN 24 +// Identical to NVLink 2: TLC_RX_RSVD_CMD_ENC 25 +// Identical to NVLink 2: TLC_RX_RSVD_DAT_LEN_ENC 26 +// No direct equivalent to: TLC_RX_RSVD_ADDR_TYPE 27 +// No direct equivalent to: TLC_RX_RSVD_RSP_STATUS 28 +// Identical to NVLink 2: TLC_RX_RSVD_PKT_STATUS 29 +// Identical to NVLink 2: TLC_RX_RSVD_CACHE_ATTR_ENC_IN_PROBE_REQ 30 +// Identical to NVLink 2: TLC_RX_RSVD_CACHE_ATTR_ENC_IN_PROBE_RESP 31 +// No direct equivalent to: TLC_RX_DAT_LEN_GT_ATOMIC_REQ_MAX_SIZE 32 +// Identical to NVLink 2: TLC_RX_DAT_LEN_GT_RMW_REQ_MAX_SIZE 33 +// Identical to NVLink 2: TLC_RX_DAT_LEN_LT_ATR_RESP_MIN_SIZE 34 +// Identical to NVLink 2: TLC_RX_INVALID_PO_FOR_CACHE_ATTR 35 +// Identical to NVLink 2: TLC_RX_INVALID_COMPRESSED_RESP 36 +// No direct equivalent to: TLC_RX_RESP_STATUS_TARGET 37 +// No direct equivalent to: TLC_RX_RESP_STATUS_UNSUPPORTED_REQUEST 38 +// Identical to NVLink 2: TLC_RX_HDR_OVERFLOW 39 +// Identical to NVLink 2: TLC_RX_DATA_OVERFLOW 40 +// Identical to NVLink 2: TLC_RX_STOMPED_PKT_RCVD 41 +// No direct equivalent to: TLC_RX_CORRECTABLE_INTERNAL 42 +// No direct equivalent to: TLC_RX_UNSUPPORTED_VC_OVERFLOW 43 +// No direct equivalent to: TLC_RX_UNSUPPORTED_NVLINK_CREDIT_RELEASE 44 +// No direct equivalent to: TLC_RX_UNSUPPORTED_NCISOC_CREDIT_RELEASE 45 +// No direct equivalent to: TLC_TX_HDR_CREDIT_OVERFLOW 46 +// No direct equivalent to: TLC_TX_DATA_CREDIT_OVERFLOW 47 +// No direct equivalent to: TLC_TX_DL_REPLAY_CREDIT_OVERFLOW 48 +// No direct equivalent to: TLC_TX_UNSUPPORTED_VC_OVERFLOW 49 +// No direct equivalent to: TLC_TX_STOMPED_PKT_SENT 50 +// No direct equivalent to: TLC_TX_DATA_POISONED_PKT_SENT 51 +// No direct equivalent to: TLC_TX_RESP_STATUS_TARGET 52 +// No direct equivalent to: TLC_TX_RESP_STATUS_UNSUPPORTED_REQUEST 53 +// No direct equivalent to: MIF_RX_RAM_DATA_PARITY 54 +// No direct equivalent to: MIF_RX_RAM_HDR_PARITY 55 +// No direct equivalent to: MIF_TX_RAM_DATA_PARITY 56 +// No direct equivalent to: MIF_TX_RAM_HDR_PARITY 57 +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_INVALID_COLLAPSED_RESPONSE 58U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_NCISOC_HDR_ECC_DBE 59U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_NCISOC_PARITY 60U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_LTSSM_FAULT_UP 61U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_LTSSM_FAULT_DOWN 62U + +#define NV2080_CTRL_NVLINK_NUM_FATAL_ERROR_TYPES 63U + +#define NV2080_CTRL_NVLINK_IS_FATAL_ERROR_COUNT_VALID(count, supportedCounts) \ + (!!((supportedCounts) & NVBIT64(count))) + +#define NV2080_CTRL_NVLINK_GET_LINK_FATAL_ERROR_COUNTS_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_NVLINK_GET_LINK_FATAL_ERROR_COUNTS_PARAMS { + NvU32 linkId; + NV_DECLARE_ALIGNED(NvU64 supportedCounts, 8); + NvU8 fatalErrorCounts[NV2080_CTRL_NVLINK_NUM_FATAL_ERROR_TYPES]; +} NV2080_CTRL_NVLINK_GET_LINK_FATAL_ERROR_COUNTS_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_GET_LINK_NONFATAL_ERROR_RATES + * + * This command queries recent non-fatal error rates for the given link. + * + * The error rates specify the maximum number of errors per minute recorded + * for the given link within a 24-hour period for daily maximums or a 30-day + * period for monthly maximums. + * + * [in] linkId + * This parameter specifies the link to get the nonfatal error information + * for. + * + * [out] numDailyMaxNonfatalErrorRates + * This parameter returns the number of valid nonfatal error rate entries + * in the dailyMaxNonfatalErrorRates parameter. + * + * [out] dailyMaxNonfatalErrorRates + * This parameter returns maximum nonfatal error rate entries recorded + * over the last few 24-hour periods. For example, index 0 contains the + * maximum nonfatal error rate recorded in the current day, index 1 + * contains the maximum nonfatal error rate recorded yesterday ago, etc. + * + * [out] numMonthlyMaxNonfatalErrorRates + * This parameter returns the number of valid nonfatal error rate entries + * in the monthlyMaxNonfatalErrorRates parameter. + * + * [out] monthlyMaxNonfatalErrorRates + * THis parameter returns maximum nonfatal error rate entries recorded + * over the last few 30-day periods. For example, index 0 contains the + * maximum nonfatal error rate recorded in the current month, index 1 + * contains the maximum nonfatal error recorded last month, etc. + * + * Possible status values returned are: + * NV_OK + * If any nonfatal error rates are valid upon return. + * NV_ERR_INVALID_ARGUMENT + * If the linkId parameter does not specify a valid link. + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on this GPU or NVLINK nonfatal error rates + * are not recorded in non-volatile storage. + */ +#define NV2080_CTRL_CMD_NVLINK_GET_LINK_NONFATAL_ERROR_RATES (0x2080300aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_LINK_NONFATAL_ERROR_RATES_PARAMS_MESSAGE_ID" */ + +typedef struct NV2080_CTRL_NVLINK_NONFATAL_ERROR_RATE { + NvU32 errorsPerMinute; + NvU32 timestamp; +} NV2080_CTRL_NVLINK_NONFATAL_ERROR_RATE; + +#define NV2080_CTRL_NVLINK_NONFATAL_ERROR_RATE_ENTRIES 5U + +#define NV2080_CTRL_NVLINK_GET_LINK_NONFATAL_ERROR_RATES_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV2080_CTRL_NVLINK_GET_LINK_NONFATAL_ERROR_RATES_PARAMS { + NvU32 linkId; + NvU32 numDailyMaxNonfatalErrorRates; + NV2080_CTRL_NVLINK_NONFATAL_ERROR_RATE dailyMaxNonfatalErrorRates[NV2080_CTRL_NVLINK_NONFATAL_ERROR_RATE_ENTRIES]; + NvU32 numMonthlyMaxNonfatalErrorRates; + NV2080_CTRL_NVLINK_NONFATAL_ERROR_RATE monthlyMaxNonfatalErrorRates[NV2080_CTRL_NVLINK_NONFATAL_ERROR_RATE_ENTRIES]; +} NV2080_CTRL_NVLINK_GET_LINK_NONFATAL_ERROR_RATES_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_SET_ERROR_INJECTION_MODE + * + * This command sets the injection mode so that error handling and error + * logging software can be aware that errors cropping up on links are + * intentional and not due to HW failures. + * + * [in] bEnabled + * This parameter specifies whether injection mode should be enabled or + * disabled. + * + * Possible status values returned are: + * NV_OK + * If injection mode is enabled or disabled according to the parameters. + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on this GPU. + */ +#define NV2080_CTRL_CMD_NVLINK_SET_ERROR_INJECTION_MODE (0x2080300bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_SET_ERROR_INJECTION_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_SET_ERROR_INJECTION_MODE_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV2080_CTRL_NVLINK_SET_ERROR_INJECTION_MODE_PARAMS { + NvBool bEnabled; +} NV2080_CTRL_NVLINK_SET_ERROR_INJECTION_MODE_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_SETUP_EOM + * + * This command passes a packed 32bit params value to NV_PMINION_MISC_0_SCRATCH_SWRW_0 + * and then issues an EOM DLCMD to minion for the desired link. Only one DLCMD + * at a time can be issued to any given link. + * + * Params Packing is specified in Minion IAS + */ +#define NV2080_CTRL_CMD_NVLINK_SETUP_EOM (0x2080300cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_CMD_NVLINK_SETUP_EOM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_NVLINK_SETUP_EOM_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV2080_CTRL_CMD_NVLINK_SETUP_EOM_PARAMS { + NvU8 linkId; + NvU32 params; +} NV2080_CTRL_CMD_NVLINK_SETUP_EOM_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_SET_POWER_STATE + * + * This command sets the mask of links associated with the GPU + * to a target power state + * + * [in] linkMask (This field will be deprecated in the future, please use links) + * Mask of links that will be put to desired power state + * Note: In Turing RM supports only tansitions into/out of L2 + * + * [in] links + * Mask of links that will be put to desired power state + * Note: In Turing RM supports only tansitions into/out of L2 + * + * [in] powerState + * Target power state to which the links will transition + * This can be any one of NV2080_CTRL_NVLINK_POWER_STATE_* states + * + * Possible status values returned are: + * NV_OK + * If all links transitioned successfully to the target state + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on the chip or if the power state + * is not enabled on the chip + * NV_ERR_INVALID_ARGUMENT + * If the any of the links in the mask is not enabled + * NV_ERR_INVALID_REQUEST + * If the power state transition is not supported + * NV_WARN_MORE_PROCESSING_REQUIRED + * Link has received the request for the power transition + * The transition will happen when the remote end also agrees + * + * Note: Currently only L0->L2 and L2->L0 is supported + */ +#define NV2080_CTRL_CMD_NVLINK_SET_POWER_STATE (0x2080300dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_SET_POWER_STATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_SET_POWER_STATE_PARAMS_MESSAGE_ID (0xDU) + +typedef struct NV2080_CTRL_NVLINK_SET_POWER_STATE_PARAMS { + NvU32 linkMask; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK links, 8); + NvU32 powerState; +} NV2080_CTRL_NVLINK_SET_POWER_STATE_PARAMS; + +// NVLink Power States +#define NV2080_CTRL_NVLINK_POWER_STATE_L0 (0x00U) +#define NV2080_CTRL_NVLINK_POWER_STATE_L1 (0x01U) +#define NV2080_CTRL_NVLINK_POWER_STATE_L2 (0x02U) +#define NV2080_CTRL_NVLINK_POWER_STATE_L3 (0x03U) + +/* + * NV2080_CTRL_CMD_NVLINK_GET_POWER_STATE + * + * This command gets the power state of a link associated + * with the GPU + * + * [in] linkId + * Link whose power state is being requested + * [out] powerState + * Current power state of the link + * Is any one the NV2080_CTRL_NVLINK_POWER_STATE_* states + * + * Possible status values returned are: + * NV_OK + * If the power state is retrieved successfully + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on the chip + * NV_ERR_INVALID_ARGUMENT + * If the link is not enabled on the GPU + * NV_ERR_INVALID_STATE + * If the link is in an invalid state + */ +#define NV2080_CTRL_CMD_NVLINK_GET_POWER_STATE (0x2080300eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_POWER_STATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_GET_POWER_STATE_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV2080_CTRL_NVLINK_GET_POWER_STATE_PARAMS { + NvU32 linkId; + NvU32 powerState; +} NV2080_CTRL_NVLINK_GET_POWER_STATE_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_INJECT_TLC_ERROR + * + * This command injects TLC_*_REPORT_INJECT error. An RM interrupt + * will be triggered after injection. Currently the injection call + * only deals with HW_ERR, UR_ERR, PRIV_ERR in TX_SYS and RX_LNK devices + * + * [in] linkId + * Link whose power state is being requested. + * [in] errorType + * error type that needs to be injected. + * [in] device + * The device this injection is intended for. + * [in] bBroadcast + * Whether the link report error should be fired in multiple links. + + * Possible status values returned are: + * NV_OK + * If the injection succeeds. + * NV_ERR_NOT_SUPPORTED + * If the error type of NVLINK is not supported on the chip + */ +#define NV2080_CTRL_CMD_NVLINK_INJECT_TLC_ERROR (0x2080300fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_INJECT_TLC_ERROR_PARAMS_MESSAGE_ID" */ + +typedef enum NV2080_CTRL_NVLINK_INJECT_TLC_ERROR_DEVICE { + TLC_RX_LNK = 0, + TLC_TX_SYS = 1, +} NV2080_CTRL_NVLINK_INJECT_TLC_ERROR_DEVICE; + +typedef enum NV2080_CTRL_NVLINK_INJECT_TLC_TX_SYS_REPORT_ERROR_TYPE { + TX_SYS_TX_RSP_STATUS_HW_ERR = 0, + TX_SYS_TX_RSP_STATUS_UR_ERR = 1, + TX_SYS_TX_RSP_STATUS_PRIV_ERR = 2, +} NV2080_CTRL_NVLINK_INJECT_TLC_TX_SYS_REPORT_ERROR_TYPE; + +typedef enum NV2080_CTRL_NVLINK_INJECT_TLC_RX_LNK_REPORT_ERROR_TYPE { + RX_LNK_RX_RSP_STATUS_HW_ERR = 0, + RX_LNK_RX_RSP_STATUS_UR_ERR = 1, + RX_LNK_RX_RSP_STATUS_PRIV_ERR = 2, +} NV2080_CTRL_NVLINK_INJECT_TLC_RX_LNK_REPORT_ERROR_TYPE; + +typedef union NV2080_CTRL_NVLINK_INJECT_TLC_ERROR_TYPE { + NV2080_CTRL_NVLINK_INJECT_TLC_TX_SYS_REPORT_ERROR_TYPE txSysErrorType; + NV2080_CTRL_NVLINK_INJECT_TLC_RX_LNK_REPORT_ERROR_TYPE rxLnkErrorType; +} NV2080_CTRL_NVLINK_INJECT_TLC_ERROR_TYPE; + + +#define NV2080_CTRL_NVLINK_INJECT_TLC_ERROR_PARAMS_MESSAGE_ID (0xFU) + +typedef struct NV2080_CTRL_NVLINK_INJECT_TLC_ERROR_PARAMS { + NvU32 linkId; + NV2080_CTRL_NVLINK_INJECT_TLC_ERROR_DEVICE device; + NvBool bBroadcast; + NV2080_CTRL_NVLINK_INJECT_TLC_ERROR_TYPE errorType; +} NV2080_CTRL_NVLINK_INJECT_TLC_ERROR_PARAMS; + + + +/* + * NV2080_CTRL_CMD_NVLINK_GET_LINK_FOM_VALUES + * + * This command returns the per-lane Figure Of Merit (FOM) Values from a link + * + * [in] linkId + * The NVLink link ID to report FOM values for + * [out] numLanes + * This field specifies the no. of lanes per link + * [out] figureOfMeritValues + * This field contains the FOM values per lane + * + */ +#define NV2080_CTRL_CMD_NVLINK_GET_LINK_FOM_VALUES (0x20803011U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_CMD_NVLINK_GET_LINK_FOM_VALUES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_MAX_LANES 4U + +#define NV2080_CTRL_CMD_NVLINK_GET_LINK_FOM_VALUES_PARAMS_MESSAGE_ID (0x11U) + +typedef struct NV2080_CTRL_CMD_NVLINK_GET_LINK_FOM_VALUES_PARAMS { + NvU32 linkId; + NvU8 numLanes; + NvU16 figureOfMeritValues[NV2080_CTRL_NVLINK_MAX_LANES]; +} NV2080_CTRL_CMD_NVLINK_GET_LINK_FOM_VALUES_PARAMS; + +/* + * NV2080_CTRL_NVLINK_SET_NVLINK_PEER + * + * This command sets/unsets the USE_NVLINK_PEER bit for a given + * mask of peers + * + * [in] peerMask + * Mask of Peer IDs for which USE_NVLINK_PEER needs to be updated + * [in] bEnable + * Whether the bit needs to be set or unset + * + * Possible status values returned are: + * NV_OK + * If the USE_NVLINK_PEER bit was updated successfully + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on the chip, or + * If unsetting USE_NVLINK_PEER bit is not supported + * + * NOTE: This is only supported on Windows + * + */ +#define NV2080_CTRL_CMD_NVLINK_SET_NVLINK_PEER (0x20803012U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_SET_NVLINK_PEER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_SET_NVLINK_PEER_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV2080_CTRL_NVLINK_SET_NVLINK_PEER_PARAMS { + NvU32 peerMask; + NvBool bEnable; +} NV2080_CTRL_NVLINK_SET_NVLINK_PEER_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_READ_UPHY_PAD_LANE_REG + * + * This command packs the lane and addr values into NV_PMINION_MISC_0_SCRATCH_SWRW_0 + * and then issues a READPADLANEREG DLCMD to minion for the desired link. Only one DLCMD + * at a time can be issued to any given link. + * + * After this command completes it is necessary to read the appropriate + * NV_PNVL_BR0_PAD_CTL_7_CFG_RDATA register to retrieve the results of the read + * Only GV100 should read NV_PNVL_BR0_PAD_CTL_7_CFG_RDATA. + * From TU102+ the ctrl the required data would be updated in phyConfigData. + * + * [in] linkId + * Link whose pad lane register is being read + * [in] lane + * Lane whose pad lane register is being read + * [in] addr + * Address of the pad lane register to read + * [out] phyConfigData + * Provides phyconfigaddr and landid + * + * Possible status values returned are: + * NV_OK + * If the minion command completed successfully + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on the chip + * NV_ERR_INVALID_ARGUMENT + * If the link is not enabled on the GPU or the lane is invalid + * NV_ERR_TIMEOUT + * If a timeout occurred waiting for minion response + */ +#define NV2080_CTRL_CMD_NVLINK_READ_UPHY_PAD_LANE_REG (0x20803013U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_READ_UPHY_PAD_LANE_REG_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_READ_UPHY_PAD_LANE_REG_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV2080_CTRL_NVLINK_READ_UPHY_PAD_LANE_REG_PARAMS { + NvU8 linkId; + NvU8 lane; + NvU16 addr; + NvU32 phyConfigData; +} NV2080_CTRL_NVLINK_READ_UPHY_PAD_LANE_REG_PARAMS; + +/* + * Structure to store the ECC error data. + * valid + * Is the lane valid or not + * eccErrorValue + * Value of the Error. + * overflowed + * If the error overflowed or not + */ +typedef struct NV2080_CTRL_NVLINK_LANE_ERROR { + NvBool bValid; + NvU32 eccErrorValue; + NvBool overflowed; +} NV2080_CTRL_NVLINK_LANE_ERROR; + +/* + * Structure to store ECC error data for Links + * errorLane array index corresponds to the lane number. + * + * errorLane[] + * Stores the ECC error data per lane. + */ +typedef struct NV2080_CTRL_NVLINK_LINK_ECC_ERROR { + NV2080_CTRL_NVLINK_LANE_ERROR errorLane[NV2080_CTRL_NVLINK_MAX_LANES]; + NvU32 eccDecFailed; + NvBool eccDecFailedOverflowed; +} NV2080_CTRL_NVLINK_LINK_ECC_ERROR; + +/* + * NV2080_CTRL_NVLINK_GET_NVLINK_ECC_ERRORS + * + * Control to get the values of ECC ERRORS + * + * Parameters: + * linkMask [IN] (This field will be deprecated in the future, please use links) + * Links on which the ECC error data requested + * A valid link/port mask returned by the port masks returned by + * NVSWITCH_GET_INFO + * links [IN] + * Links on which the ECC error data requested + * A valid link/port mask returned by the port masks returned by + * NVSWITCH_GET_INFO + * errorLink[] [OUT] + * Stores the ECC error related information for each link. + * errorLink array index corresponds to the link Number. + */ + +#define NV2080_CTRL_NVLINK_GET_NVLINK_ECC_ERRORS_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV2080_CTRL_NVLINK_GET_NVLINK_ECC_ERRORS_PARAMS { + NvU32 linkMask; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK links, 8); + NV2080_CTRL_NVLINK_LINK_ECC_ERROR errorLink[NV2080_CTRL_NVLINK_MAX_ARR_SIZE]; +} NV2080_CTRL_NVLINK_GET_NVLINK_ECC_ERRORS_PARAMS; + + +#define NV2080_CTRL_CMD_NVLINK_GET_NVLINK_ECC_ERRORS (0x20803014U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_NVLINK_ECC_ERRORS_PARAMS_MESSAGE_ID" */ + +// Nvlink throughput counters reading data flits in TX +#define NV2080_CTRL_NVLINK_READ_TP_COUNTERS_TYPE_DATA_TX 0U + +// Nvlink throughput counters reading data flits in RX +#define NV2080_CTRL_NVLINK_READ_TP_COUNTERS_TYPE_DATA_RX 1U + +// Nvlink throughput counters reading all flits in TX +#define NV2080_CTRL_NVLINK_READ_TP_COUNTERS_TYPE_RAW_TX 2U + +// Nvlink throughput counters reading all flits in RX +#define NV2080_CTRL_NVLINK_READ_TP_COUNTERS_TYPE_RAW_RX 3U + +#define NV2080_CTRL_NVLINK_READ_TP_COUNTERS_TYPE_MAX 4U + +/* + * NV2080_CTRL_CMD_NVLINK_READ_TP_COUNTERS + * + * Reads reserved monotonically increasing NVLINK throughput counters for given linkIds + * + * [in] counterMask + * Mask of counter types to be queried + * One of NV2080_CTRL_NVLINK_READ_TP_COUNTERS_TYPE_* macros + * [in] linkMask (This field will be deprecated in the future, please use links) + * Mask of links to be queried + * [in] links + * Mask of links to be queried + * [out] value + * Throughput counter value returned + * + * Possible status values returned are: + * NV_OK + * If command completed successfully + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on the chip + * NV_ERR_INVALID_ARGUMENT + * If numLinks is out-of-range or requested link is inactive + * + * Note: + * The following commands will be deprecated in favor of NV2080_CTRL_CMD_NVLINK_READ_TP_COUNTERS: + * NV90CC_CTRL_CMD_NVLINK_GET_COUNTERS + * NV2080_CTRL_CMD_NVLINK_GET_COUNTERS + * Other commands that will be deprecated due to the change in design: + * NV90CC_CTRL_CMD_NVLINK_RESERVE_COUNTERS + * NV90CC_CTRL_CMD_NVLINK_RELEASE_COUNTERS + * NV90CC_CTRL_CMD_NVLINK_SET_COUNTERS_FROZEN + * NV90CC_CTRL_CMD_NVLINK_GET_TL_COUNTER_CFG + * NV90CC_CTRL_CMD_NVLINK_SET_TL_COUNTER_CFG + * NV90CC_CTRL_CMD_NVLINK_CLEAR_COUNTERS + * + * Also, note that there is no counter overflow handling for these calls. + * These counters would be counting in flits and assuming 25GB/s bandwidth per link, + * with traffic flowing continuously, it would take 174 years for overflow to happen. + * It is reasonable to assume an overflow will not occur within the GPU operation, + * given that the counters get reset at system reboot or GPU reset. Counters are 63-bit. + */ + +typedef struct NV2080_CTRL_NVLINK_READ_TP_COUNTERS_VALUES { + NV_DECLARE_ALIGNED(NvU64 value[NV2080_CTRL_NVLINK_READ_TP_COUNTERS_TYPE_MAX], 8); +} NV2080_CTRL_NVLINK_READ_TP_COUNTERS_VALUES; + +#define NV2080_CTRL_NVLINK_READ_TP_COUNTERS_PARAMS_MESSAGE_ID (0x15U) + +typedef struct NV2080_CTRL_NVLINK_READ_TP_COUNTERS_PARAMS { + NvU16 counterMask; + NV_DECLARE_ALIGNED(NvU64 linkMask, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK links, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_READ_TP_COUNTERS_VALUES counters[NV2080_CTRL_NVLINK_MAX_ARR_SIZE], 8); +} NV2080_CTRL_NVLINK_READ_TP_COUNTERS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_READ_TP_COUNTERS (0x20803015U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_READ_TP_COUNTERS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_LOCK_LINK_POWER_STATE + * + * This command locks the link power state so that RM doesn't modify the state + * of the link during pstate switch. + * + * [in] linkMask Links for which power mode needs to be locked. + */ +#define NV2080_CTRL_CMD_NVLINK_LOCK_LINK_POWER_STATE (0x20803016U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_CMD_NVLINK_LOCK_LINK_POWER_STATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_NVLINK_LOCK_LINK_POWER_STATE_PARAMS_MESSAGE_ID (0x16U) + +typedef struct NV2080_CTRL_CMD_NVLINK_LOCK_LINK_POWER_STATE_PARAMS { + NvBool bLockPowerMode; +} NV2080_CTRL_CMD_NVLINK_LOCK_LINK_POWER_STATE_PARAMS; + +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_COUNT_TX_NVHS 0U +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_COUNT_TX_EIGHTH 1U +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_COUNT_TX_OTHER 2U +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_NUM_TX_LP_ENTER 3U +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_NUM_TX_LP_EXIT 4U +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_COUNT_TX_SLEEP 5U +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_HS_TIME 6U +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_LOCAL_FULL_BW_EXIT_TIME 7U +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_LOCAL_LP_ENTRY_TIME 8U +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_LOCAL_LP_EXIT_TIME 9U +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_LOCAL_FULL_BW_ENTRY_TIME 10U +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_REMOTE_FULL_BW_EXIT_TIME 11U +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_REMOTE_LP_ENTRY_TIME 12U +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_REMOTE_LP_EXIT_TIME 13U +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_REMOTE_FULL_BW_ENTRY_TIME 14U +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_OTHER_STATE_TIME 15U +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_MAX_COUNTERS 16U + +/* + * NV2080_CTRL_CMD_NVLINK_GET_LP_COUNTERS + * + * Reads NVLINK low power counters for given linkId + * + * [in] linkId + * ID of the link to be queried + * [in,out] counterValidMask + * Mask of valid counters + * [out] counterValues + * Low power counter values returned + * + * Possible status values returned are: + * NV_OK + * If command completed successfully + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on the chip + * NV_ERR_INVALID_ARGUMENT + * If linkId is out-of-range or requested link is inactive + */ + +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV2080_CTRL_NVLINK_GET_LP_COUNTERS_PARAMS { + NvU32 linkId; + NvU32 counterValidMask; + NvU32 counterValues[NV2080_CTRL_NVLINK_GET_LP_COUNTERS_MAX_COUNTERS]; +} NV2080_CTRL_NVLINK_GET_LP_COUNTERS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_GET_LP_COUNTERS (0x20803018U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_LP_COUNTERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_NVLINK_CLEAR_LP_COUNTERS (0x20803052U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8 | 0x52)" */ + + +// +// Set the near end loopback mode using the following +// Currently, three modes - NEA, NEDR, NEW +// +#define NV2080_CTRL_NVLINK_SET_LOOPBACK_MODE_DEFAULT (0x00000000) +#define NV2080_CTRL_NVLINK_SET_LOOPBACK_MODE_NEA (0x00000001) +#define NV2080_CTRL_NVLINK_SET_LOOPBACK_MODE_NEDR (0x00000002) +#define NV2080_CTRL_NVLINK_SET_LOOPBACK_MODE_NEDW (0x00000003) +#define NV2080_CTRL_NVLINK_SET_LOOPBACK_MODE_PHY_REMOTE (0x00000004) +#define NV2080_CTRL_NVLINK_SET_LOOPBACK_MODE_PHY_LOCAL (0x00000005) +#define NV2080_CTRL_NVLINK_SET_LOOPBACK_MODE_EXT_LOCAL (0x00000006) + +/* + * NV2080_CTRL_CMD_NVLINK_SET_LOOPBACK_MODE + * + * Generic NvLink callback for MODS + * + * [In] linkdId + * ID of the link to be used + * [In] loopbackMode + * This value will decide which loopback mode need to + * set on the specified link. + * Modes are NEA / NEDR / NEDW + */ +#define NV2080_CTRL_NVLINK_SET_LOOPBACK_MODE_PARAMS_MESSAGE_ID (0x23U) + +typedef struct NV2080_CTRL_NVLINK_SET_LOOPBACK_MODE_PARAMS { + NvU32 linkId; + NvU8 loopbackMode; +} NV2080_CTRL_NVLINK_SET_LOOPBACK_MODE_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_SET_LOOPBACK_MODE (0x20803023U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_SET_LOOPBACK_MODE_PARAMS_MESSAGE_ID" */ + +// +// Read Refresh counter - the pass/fail occurrences +// + +typedef struct NV2080_CTRL_NVLINK_PHY_REFRESH_STATUS_INFO { + // requested links or not + NvBool bValid; + + // counters + NvU16 passCount; + NvU16 failCount; +} NV2080_CTRL_NVLINK_PHY_REFRESH_STATUS_INFO; + +#define NV2080_CTRL_NVLINK_MAX_LINK_COUNT 32 + +/* + * NV2080_CTRL_CMD_NVLINK_GET_REFRESH_COUNTERS + * + * + * [In] linkMask (This field will be deprecated in the future, please use links) + * Specifies for which links we want to read the counters + * [In] links + * Specifies for which links we want to read the counters + * [Out] refreshCountPass + * Count of number of times PHY refresh pass + * [Out] refreshCountFail + * Count of number of times PHY refresh fail + */ +#define NV2080_CTRL_NVLINK_GET_REFRESH_COUNTERS_PARAMS_MESSAGE_ID (0x28U) + +typedef struct NV2080_CTRL_NVLINK_GET_REFRESH_COUNTERS_PARAMS { + NvU32 linkMask; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK links, 8); + NV2080_CTRL_NVLINK_PHY_REFRESH_STATUS_INFO refreshCount[NV2080_CTRL_NVLINK_MAX_LINK_COUNT]; +} NV2080_CTRL_NVLINK_GET_REFRESH_COUNTERS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_GET_REFRESH_COUNTERS (0x20803028U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_REFRESH_COUNTERS_PARAMS_MESSAGE_ID" */ + +// +// Clear Refresh counter - the pass/fail occurrences +// + +/* + * NV2080_CTRL_CMD_NVLINK_CLEAR_REFRESH_COUNTERS + * + * + * [In] linkMask (This field will be deprecated in the future, please use links) + * Specifies for which links we want to clear the counters + * [In] links + * Specifies for which links we want to clear the counters + */ +#define NV2080_CTRL_NVLINK_CLEAR_REFRESH_COUNTERS_PARAMS_MESSAGE_ID (0x29U) + +typedef struct NV2080_CTRL_NVLINK_CLEAR_REFRESH_COUNTERS_PARAMS { + NvU32 linkMask; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK links, 8); +} NV2080_CTRL_NVLINK_CLEAR_REFRESH_COUNTERS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_CLEAR_REFRESH_COUNTERS (0x20803029U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_CLEAR_REFRESH_COUNTERS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_GET_SET_NVSWITCH_FLA_ADDR + * + * Get/Set NVSwitch FLA address + * + * [In] bGet + * Whether to get or set the NVSwitch FLA address + * [In/Out] addr + * Address that is to be set or retrieved. + */ +#define NV2080_CTRL_NVLINK_GET_SET_NVSWITCH_FLA_ADDR_PARAMS_MESSAGE_ID (0x38U) + +typedef struct NV2080_CTRL_NVLINK_GET_SET_NVSWITCH_FLA_ADDR_PARAMS { + NvBool bGet; + NV_DECLARE_ALIGNED(NvU64 addr, 8); +} NV2080_CTRL_NVLINK_GET_SET_NVSWITCH_FLA_ADDR_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_GET_SET_NVSWITCH_FLA_ADDR (0x20803038) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_SET_NVSWITCH_FLA_ADDR_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_SYNC_LINK_MASKS_AND_VBIOS_INFO + * + * Syncs the different link masks and vbios defined values between CPU-RM and GSP-RM + * + * [in] discoveredLinks (This field will be deprecated in the future, please use discoveredLinkMask) + * Mask of links discovered from IOCTRLs + * + * [in] discoveredLinkMask + * Mask of links discovered from IOCTRLs + * + * [in] connectedLinksMask (This field will be deprecated in the future, please use connectedLinks) + * Mask of links which are connected (remote present) + * + * [in] connectedLinks + * Mask of links which are connected (remote present) + * + * [in] bridgeSensableLinks (This field will be deprecated in the future, please use bridgeSensableLinkMask) + * Mask of links whose remote endpoint presence can be sensed + * + * [in] bridgeSensableLinkMask + * Mask of links whose remote endpoint presence can be sensed + * + * [in] bridgedLinks (This field will be deprecated in the future, please use bridgedLinkMask) + * Mask of links which are connected (remote present) + * Same as connectedLinksMask, but also tracks the case where link + * is connected but marginal and could not initialize + * + * [in] bridgedLinkMask + * Mask of links which are connected (remote present) + * Same as connectedLinks, but also tracks the case where link + * is connected but marginal and could not initialize + * + * [out] initDisabledLinksMask (This field will be deprecated in the future, please use initDisabledLinks) + * Mask of links for which initialization is disabled + * + * [out] initDisabledLinks + * Mask of links for which initialization is disabled + * + * [out] vbiosDisabledLinkMask (This field will be deprecated in the future, please use vbiosDisabledLinks) + * Mask of links disabled in the VBIOS + * + * [out] vbiosDisabledLinks + * Mask of links disabled in the VBIOS + * + * [out] initializedLinks (This field will be deprecated in the future, please use initializedLinkMask) + * Mask of initialized links + * + * [out] initializedLinkMask + * Mask of initialized links + * + * [out] bEnableTrainingAtLoad + * Whether the links should be trained to active during driver load + * + * [out] bEnableSafeModeAtLoad + * Whether the links should be initialized to swcfg during driver load + */ + +#define NV2080_CTRL_NVLINK_SYNC_LINK_MASKS_AND_VBIOS_INFO_PARAMS_MESSAGE_ID (0x39U) + +typedef struct NV2080_CTRL_NVLINK_SYNC_LINK_MASKS_AND_VBIOS_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvU64 discoveredLinks, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK discoveredLinkMasks, 8); + NvU32 connectedLinksMask; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK connectedLinks, 8); + NV_DECLARE_ALIGNED(NvU64 bridgeSensableLinks, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK bridgeSensableLinkMasks, 8); + NvU32 bridgedLinks; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK bridgedLinkMasks, 8); + NvU32 initDisabledLinksMask; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK initDisabledLinks, 8); + NV_DECLARE_ALIGNED(NvU64 vbiosDisabledLinkMask, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK vbiosDisabledLinks, 8); + NvU32 initializedLinks; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK initializedLinkMasks, 8); + NvBool bEnableTrainingAtLoad; + NvBool bEnableSafeModeAtLoad; +} NV2080_CTRL_NVLINK_SYNC_LINK_MASKS_AND_VBIOS_INFO_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_SYNC_LINK_MASKS_AND_VBIOS_INFO (0x20803039U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_SYNC_LINK_MASKS_AND_VBIOS_INFO_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_ENABLE_LINKS + * + * Enable pre-topology setup on the mask of enabled links + * This command accepts no parameters. + */ + +#define NV2080_CTRL_CMD_NVLINK_ENABLE_LINKS (0x2080303aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | 0x3a" */ + +/* + * NV2080_CTRL_CMD_NVLINK_PROCESS_INIT_DISABLED_LINKS + * + * Process the init disabled NVLinks and filter those out + * + * [in/out] initDisabledLinksMask (This field will be deprecated in the future, please use initDisabledLinks) + * Mask of links initdisabled on a given GPU + * + * [in/out] initDisabledLinks + * Mask of links initdisabled on a given GPU + * + * [in] bSkipHwNvlinkDisable + * Whether to consider skipping the HW initdisable links + */ + +#define NV2080_CTRL_NVLINK_PROCESS_INIT_DISABLED_LINKS_PARAMS_MESSAGE_ID (0x3bU) + +typedef struct NV2080_CTRL_NVLINK_PROCESS_INIT_DISABLED_LINKS_PARAMS { + NvU32 initDisabledLinksMask; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK initDisabledLinks, 8); + NvBool bSkipHwNvlinkDisable; +} NV2080_CTRL_NVLINK_PROCESS_INIT_DISABLED_LINKS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PROCESS_INIT_DISABLED_LINKS (0x2080303bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PROCESS_INIT_DISABLED_LINKS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_EOM_CONTROL + * + * cmd [IN] enum identifying the EOM related command for the driver to process + * link [IN] linkId + * params [IN] NvU32 word that is written into NV_PMINION_SCRATCH_SWRW_0 before calling CONFIGEOM dlcmd + * measurements [OUT] output of EOM + * + * Params Packing is specified in Minion IAS + */ + +typedef enum NV2080_CTRL_CMD_NVLINK_EOM_CONTROL_PARAMS_COMMAND { + NVLINK_EOM_CONTROL_START_EOM = 0, + NVLINK_EOM_CONTROL_END_EOM = 1, + NVLINK_EOM_CONTROL_CONFIG_EOM = 2, + NVLINK_EOM_CONTROL_FULL_EOM_SEQUENCE = 3, +} NV2080_CTRL_CMD_NVLINK_EOM_CONTROL_PARAMS_COMMAND; + +typedef struct NV2080_CTRL_NVLINK_EOM_MEASUREMENT { + NvU8 upper; + NvU8 middle; + NvU8 lower; + NvU8 composite; +} NV2080_CTRL_NVLINK_EOM_MEASUREMENT; + +#define NV2080_CTRL_NVLINK_EOM_CONTROL_PARAMS_MESSAGE_ID (0x3cU) + +typedef struct NV2080_CTRL_NVLINK_EOM_CONTROL_PARAMS { + NV2080_CTRL_CMD_NVLINK_EOM_CONTROL_PARAMS_COMMAND cmd; + NvU32 linkId; + NvU32 params; + NV2080_CTRL_NVLINK_EOM_MEASUREMENT measurements[NV2080_CTRL_NVLINK_MAX_LANES]; +} NV2080_CTRL_NVLINK_EOM_CONTROL_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_EOM_CONTROL (0x2080303c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_EOM_CONTROL_PARAMS_MESSAGE_ID" */ + +/*! + * Inband Received Data + */ +#define NV2080_CTRL_NVLINK_INBAND_MAX_DATA_SIZE 5120 +#define NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_PARAMS_MESSAGE_ID (0x3dU) + +typedef struct NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_PARAMS { + NvU8 data[NV2080_CTRL_NVLINK_INBAND_MAX_DATA_SIZE]; + NvU32 dataSize; +} NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_PARAMS; + +#define NV2080_CTRL_CMD_READ_NVLINK_INBAND_RESPONSE (0x2080303d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_L1_THRESHOLD_VALUE_DEFAULT (0xFFFFFFFF) + +/* + * NV2080_CTRL_CMD_NVLINK_SET_L1_THRESHOLD + * + * This command is used to set the L1 threshold value. + * A value of NV2080_CTRL_NVLINK_L1_THRESHOLD_VALUE_DEFAULT + * will reset the L1 Threshold to the default values. + * + * [in] l1Threshold + * Used to set the L1 threshold value + * + * [in] l1ExitThreshold + * Used to set the L1 Exit threshold value + */ +#define NV2080_CTRL_NVLINK_SET_L1_THRESHOLD_PARAMS_MESSAGE_ID (0x3eU) + +typedef struct NV2080_CTRL_NVLINK_SET_L1_THRESHOLD_PARAMS { + NvU32 l1Threshold; + NvU32 l1ExitThreshold; +} NV2080_CTRL_NVLINK_SET_L1_THRESHOLD_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_SET_L1_THRESHOLD (0x2080303eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_SET_L1_THRESHOLD_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_GET_L1_THRESHOLD + * + * This command is used to get the L1 threshold value + * + * [out] l1Threshold + * Used to get the L1 threshold value + * + * [out] l1ExitThreshold + * Used to get the L1 Exit Thrshold value + */ +#define NV2080_CTRL_NVLINK_GET_L1_THRESHOLD_PARAMS_MESSAGE_ID (0x3fU) + +typedef struct NV2080_CTRL_NVLINK_GET_L1_THRESHOLD_PARAMS { + NvU32 l1Threshold; + NvU32 l1ExitThreshold; +} NV2080_CTRL_NVLINK_GET_L1_THRESHOLD_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_GET_L1_THRESHOLD (0x2080303fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_L1_THRESHOLD_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_INBAND_SEND_DATA + * + * RPC for sending Inband data + * + * [In] data[] + * data to be sent over inband + * [In] dataSize + * Size of valid data in data array + */ +#define NV2080_CTRL_NVLINK_INBAND_SEND_DATA_PARAMS_MESSAGE_ID (0x40U) + +typedef struct NV2080_CTRL_NVLINK_INBAND_SEND_DATA_PARAMS { + NvU8 buffer[NV2080_CTRL_NVLINK_INBAND_MAX_DATA_SIZE]; + NvU32 dataSize; +} NV2080_CTRL_NVLINK_INBAND_SEND_DATA_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_INBAND_SEND_DATA (0x20803040U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_INBAND_SEND_DATA_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_IS_GPU_DEGRADED + * + * RPC for Getting GPU degraded status upon link error + * + * [In] linkId + * Id of the link on which error occured + * [Out] bIsGpuDegraded + * Boolean to track corresponding GPU is degraded or not + */ +#define NV2080_CTRL_NVLINK_IS_GPU_DEGRADED_PARAMS_MESSAGE_ID (0x41U) + +typedef struct NV2080_CTRL_NVLINK_IS_GPU_DEGRADED_PARAMS { + NvU32 linkId; + NvBool bIsGpuDegraded; +} NV2080_CTRL_NVLINK_IS_GPU_DEGRADED_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_IS_GPU_DEGRADED (0x20803041U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_IS_GPU_DEGRADED_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_DIRECT_CONNECT_CHECK + * + * This command is used to check for missing + * bridge/nvlink for direct connect GPU + * + * [out] bIsEnoughNvLink + * Check if number of active nvlink meet the minimum requirements. + * [out] numBridge + * Number of NVLink bridge + * [out] bridgePresenceMask + * Bit mask of NVLink bridges's presence + */ +#define NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS_MESSAGE_ID (0x42U) + +typedef struct NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS { + NvBool bIsEnoughNvLink; + NvU32 numBridge; + NvU32 bridgePresenceMask; +} NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_DIRECT_CONNECT_CHECK (0x20803042U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_POST_FAULT_UP + * + * This command is to trigger the next sequence after the 10 sec delay + * + * [out] linkId + * Link number which the sequence should be triggered + */ +#define NV2080_CTRL_NVLINK_POST_FAULT_UP_PARAMS_MESSAGE_ID (0x43U) + +typedef struct NV2080_CTRL_NVLINK_POST_FAULT_UP_PARAMS { + NvU32 linkId; +} NV2080_CTRL_NVLINK_POST_FAULT_UP_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_POST_FAULT_UP (0x20803043U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_POST_FAULT_UP_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PORT_EVENT_COUNT_SIZE 64U + +/* +* Structure to store port event information +* +* portEventType +* Type of port even that occurred: NVLINK_PORT_EVENT_TYPE* +* +* gpuId +* Gpu that port event occurred on +* +* linkId +* Link id that port event occurred on +* +* time +* Platform time (nsec) when event occurred +*/ +typedef struct NV2080_CTRL_NVLINK_PORT_EVENT { + NvU32 portEventType; + NvU32 gpuId; + NvU32 linkId; + NV_DECLARE_ALIGNED(NvU64 time, 8); +} NV2080_CTRL_NVLINK_PORT_EVENT; + +/* +* NV2080_CTRL_CMD_NVLINK_GET_PORT_EVENTS +* +* This command returns the port up and port down events that have occurred +* +* Parameters: +* +* portEventIndex [IN/OUT] +* On input: The index of the first port event at which to start reading out of the driver. +* +* On output: The index of the first port event that wasn't reported through the 'port event' array +* in this call to NV2080_CTRL_CMD_NVLINK_GET_PORT_EVENTS. +* +* nextPortEventIndex[OUT] +* The index that will be assigned to the next port event that occurs. +* Users of the GET_PORT_EVENTS control call may set 'portEventIndex' to this field on initialization +* to bypass port events that have already occurred without making multiple control calls. +* +* portEventCount [OUT] +* Number of port events returned by the call. Currently, portEventCount is limited +* by NV2080_CTRL_NVLINK_PORT_EVENT_COUNT_SIZE. In order to query all the port events, a +* client needs to keep calling the control till portEventCount is zero. +* +* bOverflow [OUT] +* True when the port event log is overflowed and no longer contains all the port +* events that have occurred, false otherwise. +* +* portEvent [OUT] +* The port event entires. +*/ +#define NV2080_CTRL_NVLINK_GET_PORT_EVENTS_PARAMS_MESSAGE_ID (0x44U) + +typedef struct NV2080_CTRL_NVLINK_GET_PORT_EVENTS_PARAMS { + NV_DECLARE_ALIGNED(NvU64 portEventIndex, 8); + NV_DECLARE_ALIGNED(NvU64 nextPortEventIndex, 8); + NvU32 portEventCount; + NvBool bOverflow; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_PORT_EVENT portEvent[NV2080_CTRL_NVLINK_PORT_EVENT_COUNT_SIZE], 8); +} NV2080_CTRL_NVLINK_GET_PORT_EVENTS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_GET_PORT_EVENTS (0x20803044U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_PORT_EVENTS_PARAMS_MESSAGE_ID" */ + +/* +* NV2080_CTRL_CMD_NVLINK_CYCLE_LINK +* +* This command cycles a link by faulting it and then retraining the link +* +* Parameters: +* +* linkId [IN] +* The link id of the link to be cycled +*/ +#define NV2080_CTRL_NVLINK_CYCLE_LINK_PARAMS_MESSAGE_ID (0x45U) + +typedef struct NV2080_CTRL_NVLINK_CYCLE_LINK_PARAMS { + NvU32 linkId; +} NV2080_CTRL_NVLINK_CYCLE_LINK_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_CYCLE_LINK (0x20803045U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_CYCLE_LINK_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_IS_REDUCED_CONFIG + * + * This command is to check if a GPU has a reduced nvlink configuration + * + * [out] bReducedNvlinkConfig + * Link number which the sequence should be triggered + */ +#define NV2080_CTRL_NVLINK_IS_REDUCED_CONFIG_PARAMS_MESSAGE_ID (0x46U) + +typedef struct NV2080_CTRL_NVLINK_IS_REDUCED_CONFIG_PARAMS { + NvBool bReducedNvlinkConfig; +} NV2080_CTRL_NVLINK_IS_REDUCED_CONFIG_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_IS_REDUCED_CONFIG (0x20803046U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_IS_REDUCED_CONFIG_PARAMS_MESSAGE_ID" */ + + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_MAX_LENGTH 496U + +typedef struct NV2080_CTRL_NVLINK_PRM_DATA { + NvU8 data[NV2080_CTRL_NVLINK_PRM_ACCESS_MAX_LENGTH]; +} NV2080_CTRL_NVLINK_PRM_DATA; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_PAOS (0x20803047U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_PAOS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_PAOS_PARAMS_MESSAGE_ID (0x47U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_PAOS_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 plane_ind; + NvU8 admin_status; + NvU8 lp_msb; + NvU8 local_port; + NvU8 swid; + NvU8 e; + NvU8 fd; + NvU8 ps_e; + NvU8 ls_e; + NvU8 ee_ps; + NvU8 ee_ls; + NvU8 ee; + NvU8 ase; + NvBool ee_nmxas; + NvU8 nmxas_e; + NvU8 ps_e_ext; +} NV2080_CTRL_NVLINK_PRM_ACCESS_PAOS_PARAMS; + + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_PLTC (0x20803053U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_PLTC_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_PLTC_PARAMS_MESSAGE_ID (0x53U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_PLTC_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 lane_mask; + NvU8 lp_msb; + NvU8 pnat; + NvU8 local_port; + NvU8 local_tx_precoding_admin; + NvU8 local_rx_precoding_admin; +} NV2080_CTRL_NVLINK_PRM_ACCESS_PLTC_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_PPLM (0x20803054U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_PPLM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_PPLM_PARAMS_MESSAGE_ID (0x54U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_PPLM_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvBool test_mode; + NvBool plr_vld; + NvU8 plane_ind; + NvU8 port_type; + NvU8 lp_msb; + NvU8 pnat; + NvU8 local_port; + NvBool plr_reject_mode_vld; + NvBool plr_margin_th_override_to_default; + NvU8 plr_reject_mode; + NvU8 tx_crc_plr; + NvU8 plr_margin_th; + NvU8 fec_override_admin_10g_40g; + NvU8 fec_override_admin_25g; + NvU8 fec_override_admin_50g; + NvU8 fec_override_admin_100g; + NvU8 fec_override_admin_56g; + NvU8 rs_fec_correction_bypass_admin; + NvU16 fec_override_admin_200g_4x; + NvU16 fec_override_admin_400g_8x; + NvU16 fec_override_admin_50g_1x; + NvU16 fec_override_admin_100g_2x; + NvU16 fec_override_admin_400g_4x; + NvU16 fec_override_admin_800g_8x; + NvU16 fec_override_admin_100g_1x; + NvU16 fec_override_admin_200g_2x; + NvBool tx_crc_plr_vld; + NvBool tx_crc_plr_override_to_default; + NvBool plr_reject_mode_override_to_default; + NvU16 nvlink_fec_override_admin_nvl_phy6; + NvU16 fec_override_admin_800g_4x; + NvU16 fec_override_admin_1600g_8x; + NvU16 fec_override_admin_200g_1x; + NvU16 fec_override_admin_400g_2x; +} NV2080_CTRL_NVLINK_PRM_ACCESS_PPLM_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_PPSLC (0x20803055U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_PPSLC_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_PPSLC_PARAMS_MESSAGE_ID (0x55U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_PPSLC_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 lp_msb; + NvU8 local_port; + NvBool l1_req_en; + NvBool l1_fw_req_en; + NvBool l1_cap_adv; + NvBool l1_fw_cap_adv; + NvU32 hp_queues_bitmap; + NvU16 l1_hw_active_time; + NvU16 l1_hw_inactive_time; + NvU8 qem[8]; + NvBool l0_rx_cap_adv; + NvBool l0_rx_req_en; + NvBool l0_tx_cap_adv; + NvBool l0_tx_req_en; + NvBool l0_all_queues_are_import; + NvU16 l0_hw_inactive_time; +} NV2080_CTRL_NVLINK_PRM_ACCESS_PPSLC_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_MCAM (0x20803056U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_MCAM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_MCAM_PARAMS_MESSAGE_ID (0x56U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_MCAM_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 access_reg_group; + NvU8 feature_group; +} NV2080_CTRL_NVLINK_PRM_ACCESS_MCAM_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_MTECR (0x2080305cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_MTECR_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_MTECR_PARAMS_MESSAGE_ID (0x5cU) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_MTECR_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 slot_index; +} NV2080_CTRL_NVLINK_PRM_ACCESS_MTECR_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_MTEWE (0x2080305eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_MTEWE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_MTEWE_PARAMS_MESSAGE_ID (0x5eU) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_MTEWE_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 slot_index; +} NV2080_CTRL_NVLINK_PRM_ACCESS_MTEWE_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_MTSDE (0x2080305fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_MTSDE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_MTSDE_PARAMS_MESSAGE_ID (0x5fU) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_MTSDE_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 slot_index; +} NV2080_CTRL_NVLINK_PRM_ACCESS_MTSDE_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_MTCAP (0x20803061U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_MTCAP_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_MTCAP_PARAMS_MESSAGE_ID (0x61U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_MTCAP_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 slot_index; +} NV2080_CTRL_NVLINK_PRM_ACCESS_MTCAP_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_PMTU (0x20803062U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_PMTU_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_PMTU_PARAMS_MESSAGE_ID (0x62U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_PMTU_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvBool itre; + NvU8 i_e; + NvU8 lp_msb; + NvU8 local_port; + NvU8 protocol; + NvU16 admin_mtu; +} NV2080_CTRL_NVLINK_PRM_ACCESS_PMTU_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_PMLP (0x20803064U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_PMLP_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_PMLP_PARAMS_MESSAGE_ID (0x64U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_PMLP_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 width; + NvU8 plane_ind; + NvU8 lp_msb; + NvU8 local_port; + NvBool m_lane_m; + NvBool rxtx; + NvBool mod_lab_map; +} NV2080_CTRL_NVLINK_PRM_ACCESS_PMLP_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_GHPKT (0x20803065U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_GHPKT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_GHPKT_PARAMS_MESSAGE_ID (0x65U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_GHPKT_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU16 trap_id; + NvU8 action; +} NV2080_CTRL_NVLINK_PRM_ACCESS_GHPKT_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_PDDR (0x20803066U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_PDDR_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_PDDR_PARAMS_MESSAGE_ID (0x66U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_PDDR_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 port_type; + NvU8 plane_ind; + NvU8 lp_msb; + NvU8 pnat; + NvU8 local_port; + NvU8 page_select; + NvU8 module_info_ext; + NvU8 module_ind_type; +} NV2080_CTRL_NVLINK_PRM_ACCESS_PDDR_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_PPTT (0x20803068U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_PPTT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_PPTT_PARAMS_MESSAGE_ID (0x68U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_PPTT_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvBool le; + NvU8 port_type; + NvU8 lane; + NvU8 lp_msb; + NvU8 pnat; + NvU8 local_port; + NvBool sw; + NvBool dm_ig; + NvBool p; + NvBool e; + NvU8 modulation; + NvU8 prbs_mode_admin; + NvBool prbs_fec_admin; + NvU16 lane_rate_admin; +} NV2080_CTRL_NVLINK_PRM_ACCESS_PPTT_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_PPCNT (0x20803069U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_PPCNT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_PPCNT_PARAMS_MESSAGE_ID (0x69U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_PPCNT_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 grp; + NvU8 port_type; + NvU8 lp_msb; + NvU8 pnat; + NvU8 local_port; + NvU8 swid; + NvU8 prio_tc; + NvU8 grp_profile; + NvU8 plane_ind; + NvBool counters_cap; + NvBool lp_gl; + NvBool clr; +} NV2080_CTRL_NVLINK_PRM_ACCESS_PPCNT_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_MGIR (0x2080306aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_MGIR_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_MGIR_PARAMS_MESSAGE_ID (0x6aU) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_MGIR_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; +} NV2080_CTRL_NVLINK_PRM_ACCESS_MGIR_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_PPAOS (0x2080306bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_PPAOS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_PPAOS_PARAMS_MESSAGE_ID (0x6bU) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_PPAOS_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 port_type; + NvU8 phy_test_mode_admin; + NvU8 lp_msb; + NvU8 local_port; + NvU8 swid; + NvU8 plane_ind; + NvU8 phy_status_admin; + NvBool ee_nmxas; + NvU8 nmxas_e; + NvU8 ps_e_ext; +} NV2080_CTRL_NVLINK_PRM_ACCESS_PPAOS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_PPHCR (0x2080306cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_PPHCR_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_PPHCR_PARAMS_MESSAGE_ID (0x6cU) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_PPHCR_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 plane_ind; + NvU8 port_type; + NvU8 lp_msb; + NvU8 pnat; + NvU8 local_port; + NvU8 hist_type; +} NV2080_CTRL_NVLINK_PRM_ACCESS_PPHCR_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_SLTP (0x2080306dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_SLTP_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_SLTP_PARAMS_MESSAGE_ID (0x6dU) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_SLTP_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvBool c_db; + NvU8 port_type; + NvU8 lane_speed; + NvU8 lane; + NvBool tx_policy; + NvU8 pnat; + NvU8 local_port; + NvU8 lp_msb; + NvBool conf_mod; +} NV2080_CTRL_NVLINK_PRM_ACCESS_SLTP_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_PGUID (0x2080306eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_PGUID_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_PGUID_PARAMS_MESSAGE_ID (0x6eU) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_PGUID_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 lp_msb; + NvU8 pnat; + NvU8 local_port; +} NV2080_CTRL_NVLINK_PRM_ACCESS_PGUID_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_PPRT (0x2080306fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_PPRT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_PPRT_PARAMS_MESSAGE_ID (0x6fU) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_PPRT_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvBool le; + NvU8 port_type; + NvU8 lane; + NvU8 lp_msb; + NvU8 pnat; + NvU8 local_port; + NvBool sw; + NvBool dm_ig; + NvBool p; + NvBool s; + NvBool e; + NvU8 modulation; + NvU8 prbs_mode_admin; + NvU16 lane_rate_oper; +} NV2080_CTRL_NVLINK_PRM_ACCESS_PPRT_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_PTYS (0x20803070U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_PTYS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_PTYS_PARAMS_MESSAGE_ID (0x70U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_PTYS_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 proto_mask; + NvBool transmit_allowed; + NvU8 plane_ind; + NvU8 port_type; + NvU8 lp_msb; + NvU8 local_port; + NvU8 tx_ready_e; + NvBool ee_tx_ready; + NvBool an_disable_admin; + NvU32 ext_eth_proto_admin; + NvU32 eth_proto_admin; + NvU16 ib_proto_admin; + NvU16 ib_link_width_admin; + NvBool xdr_2x_slow_admin; + NvU8 force_lt_frames_admin; +} NV2080_CTRL_NVLINK_PRM_ACCESS_PTYS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_SLRG (0x20803071U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_SLRG_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_SLRG_PARAMS_MESSAGE_ID (0x71U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_SLRG_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 port_type; + NvU8 lane; + NvU8 lp_msb; + NvU8 pnat; + NvU8 local_port; +} NV2080_CTRL_NVLINK_PRM_ACCESS_SLRG_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_PMAOS (0x20803072U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_PMAOS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_PMAOS_PARAMS_MESSAGE_ID (0x72U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_PMAOS_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 admin_status; + NvU8 module; + NvU8 slot_index; + NvBool rst; + NvU8 e; + NvBool ee; + NvBool ase; +} NV2080_CTRL_NVLINK_PRM_ACCESS_PMAOS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_PPLR (0x20803073U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_PPLR_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_PPLR_PARAMS_MESSAGE_ID (0x73U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_PPLR_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 plane_ind; + NvU8 port_type; + NvBool op_mod; + NvBool apply_im; + NvU8 lp_msb; + NvU8 local_port; + NvU16 lb_en; + NvBool lb_cap_mode_idx; + NvBool lb_link_mode_idx; +} NV2080_CTRL_NVLINK_PRM_ACCESS_PPLR_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_GET_SUPPORTED_COUNTERS (0x20803074U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_SUPPORTED_COUNTERS_PARAMS_MESSAGE_ID" */ +#define NV2080_CTRL_NVLINK_GET_SUPPORTED_COUNTERS_PARAMS_MESSAGE_ID (0x74U) + +typedef struct NV2080_CTRL_NVLINK_GET_SUPPORTED_COUNTERS_PARAMS { + NV_DECLARE_ALIGNED(NvU64 counterMask[NV2080_CTRL_NVLINK_COUNTER_MAX_GROUPS], 8); +} NV2080_CTRL_NVLINK_GET_SUPPORTED_COUNTERS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_MORD (0x20803075U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_MORD_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_MORD_PARAMS_MESSAGE_ID (0x75U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_MORD_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU16 segment_type; + NvU8 seq_num; + NvBool vhca_id_valid; + NvBool inline_dump; + NvU16 vhca_id; + NvU32 index1; + NvU32 index2; + NvU16 num_of_obj2; + NvU16 num_of_obj1; + NV_DECLARE_ALIGNED(NvU64 device_opaque, 8); + NvU32 mkey; + NV_DECLARE_ALIGNED(NvU64 address, 8); +} NV2080_CTRL_NVLINK_PRM_ACCESS_MORD_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_MTRC_CAP (0x20803076U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_MTRC_CAP_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_MTRC_CAP_PARAMS_MESSAGE_ID (0x76U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_MTRC_CAP_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvBool trace_owner; +} NV2080_CTRL_NVLINK_PRM_ACCESS_MTRC_CAP_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_MTRC_CONF (0x20803077U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_MTRC_CONF_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_MTRC_CONF_PARAMS_MESSAGE_ID (0x77U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_MTRC_CONF_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 trace_mode; + NvU8 log_trace_buffer_size; + NvU8 trace_mkey; +} NV2080_CTRL_NVLINK_PRM_ACCESS_MTRC_CONF_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_MTRC_CTRL (0x20803078U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_MTRC_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_MTRC_CTRL_PARAMS_MESSAGE_ID (0x78U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_MTRC_CTRL_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU16 modify_field_select; + NvBool arm_event; + NvU8 trace_status; +} NV2080_CTRL_NVLINK_PRM_ACCESS_MTRC_CTRL_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_MTEIM (0x20803079U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_MTEIM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_MTEIM_PARAMS_MESSAGE_ID (0x79U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_MTEIM_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; +} NV2080_CTRL_NVLINK_PRM_ACCESS_MTEIM_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_MTIE (0x2080307aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_MTIE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_MTIE_PARAMS_MESSAGE_ID (0x7aU) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_MTIE_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 enable_all; + NvU8 log_delay; + NvU32 source_id_bitmask[8]; +} NV2080_CTRL_NVLINK_PRM_ACCESS_MTIE_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_MTIM (0x2080307bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_MTIM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_MTIM_PARAMS_MESSAGE_ID (0x7bU) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_MTIM_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 log_level; + NvU32 log_bit_mask; +} NV2080_CTRL_NVLINK_PRM_ACCESS_MTIM_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_MPSCR (0x2080307cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_MPSCR_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_MPSCR_PARAMS_MESSAGE_ID (0x7cU) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_MPSCR_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 warning_inactive_time; + NvU8 warning_active_time; + NvU8 critical_inactive_time; + NvU8 critical_active_time; + NvBool cc; + NvBool l0_all_queues_are_import; + NvBool ge; +} NV2080_CTRL_NVLINK_PRM_ACCESS_MPSCR_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_MTSR (0x2080307dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_MTSR_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_MTSR_PARAMS_MESSAGE_ID (0x7dU) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_MTSR_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; +} NV2080_CTRL_NVLINK_PRM_ACCESS_MTSR_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_PPSLS (0x2080307eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_PPSLS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_PPSLS_PARAMS_MESSAGE_ID (0x7eU) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_PPSLS_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 lp_msb; + NvU8 local_port; +} NV2080_CTRL_NVLINK_PRM_ACCESS_PPSLS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_MLPC (0x2080307fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_MLPC_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_MLPC_PARAMS_MESSAGE_ID (0x7fU) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_MLPC_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 lp_msb; + NvU8 local_port; + NvU8 cnt_64bit; + NvBool stop_at_ff; + NvBool counter_rst; + NvBool counter_en; + NvU8 force_count_mask; + NvU8 cnt_type[8]; + NvU8 cnt_val[8]; +} NV2080_CTRL_NVLINK_PRM_ACCESS_MLPC_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_PLIB (0x20803080U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_PLIB_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_PLIB_PARAMS_MESSAGE_ID (0x80U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_PLIB_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU16 ib_port; + NvU8 lp_msb; + NvU8 local_port; + NvU8 split_num; +} NV2080_CTRL_NVLINK_PRM_ACCESS_PLIB_PARAMS; + +/* + * NV2080_CTRL_NVLINK_GET_PLATFORM_INFO_PARAMS + * + * This command returns platform-specific information related to the GPU's NVLink setup. + * + * ibGuid + * Infiniband GUID reported by platform (for Blackwell, ibGuid is 8 bytes so indices 8-15 are zero) + * rackGuid + * GUID of the rack containing this GPU (for Blackwell rackGuid is 13 bytes so indices 13-15 are zero) + * chassisPhysicalSlotNumber + * The slot number in the rack containing this GPU (includes switches) + * computeSlotIndex + * The index within the compute slots in the rack containing this GPU (does not include switches) + * nodeIndex + * Index of the node within the slot containing this GPU + * peerType + * Platform indicated NVLink-peer type (e.g. switch present or not) + * moduleId + * ID of this GPU within the node + */ + +#define NV2080_CTRL_CMD_NVLINK_GET_PLATFORM_INFO (0x20803083U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_PLATFORM_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_GET_PLATFORM_INFO_PARAMS_MESSAGE_ID (0x83U) + +typedef struct NV2080_CTRL_NVLINK_GET_PLATFORM_INFO_PARAMS { + NvU8 ibGuid[16]; + NvU8 rackGuid[16]; + NvU8 chassisPhysicalSlotNumber; + NvU8 computeSlotIndex; + NvU8 nodeIndex; + NvU8 peerType; + NvU8 moduleId; + NvU8 nvlinkSignalingProtocol; +} NV2080_CTRL_NVLINK_GET_PLATFORM_INFO_PARAMS; + + + +/* + * Structure to store UPHY cmd data. + * laneId + * Lane ID for specified link + * address + * Desired address for read + */ +typedef struct NV2080_CTRL_NVLINK_UPHY_CLN_CMD { + NvU8 pllIndex; + NvU16 address; +} NV2080_CTRL_NVLINK_UPHY_CLN_CMD; + +/* + * NV2080_CTRL_CMD_NVLINK_READ_UPHY_CLN + * + * + * This command retrieves the land id cln select, lane id, and pll index. + * + * [in] linkMask (This field will be deprecated in the future, please use links) + * Mask of links whose uphy should be read + * [in] links + * Mask of links whose uphy should be read + * [in] uphyCmd + * Array of input data (pll index and address) for each link, + * where index 0 represents link 0's pll index + * and index 16 represents link 16's pll index. + * [out] data + * Data from uphy cln for each link where index 0 represents link 0's pll index + * and index 16 represents link 16's pll index. + * + * Possible status values returned are: + * NV_OK + * If the minion command completed successfully + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on the chip + * NV_ERR_INVALID_ARGUMENT + * If the link is not enabled on the GPU or the lane is invalid + * NV_ERR_TIMEOUT + * If a timeout occurred waiting for minion response + */ +#define NV2080_CTRL_CMD_NVLINK_READ_UPHY_CLN (0x20803084U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_READ_UPHY_CLN_REG_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_READ_UPHY_CLN_REG_PARAMS_MESSAGE_ID (0x84U) + +typedef struct NV2080_CTRL_NVLINK_READ_UPHY_CLN_REG_PARAMS { + NvU32 linkMask; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK links, 8); + NV2080_CTRL_NVLINK_UPHY_CLN_CMD uphyCmd[NV2080_CTRL_NVLINK_MAX_LINKS]; + NvU32 data[NV2080_CTRL_NVLINK_MAX_LINKS]; +} NV2080_CTRL_NVLINK_READ_UPHY_CLN_REG_PARAMS; + +#define NV2080_CTRL_NVLINK_SUPPORTED_MAX_BW_MODE_COUNT 23U + +/* + * NV2080_CTRL_CMD_NVLINK_GET_SUPPORTED_BW_MODE + * + * This command gets the supported RBMs of the GPU + * + * [out] rbmModesList + * List of supported RBM modes + * [out] rbmTotalModes + * Total RBM modes supported + * Possible status values returned are: TODO: Update this + * NV_OK + * If the BW mode is retrieved successfully + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on the chip + * NV_ERR_INVALID_ARGUMENT + * If the link is not enabled on the GPU + * NV_ERR_INVALID_STATE + * If the link is in an invalid state + */ +#define NV2080_CTRL_CMD_NVLINK_GET_SUPPORTED_BW_MODE (0x20803085U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_SUPPORTED_BW_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_GET_SUPPORTED_BW_MODE_PARAMS_MESSAGE_ID (0x85U) + +typedef struct NV2080_CTRL_NVLINK_GET_SUPPORTED_BW_MODE_PARAMS { + NvU8 rbmModesList[NV2080_CTRL_NVLINK_SUPPORTED_MAX_BW_MODE_COUNT]; + NvU8 rbmTotalModes; +} NV2080_CTRL_NVLINK_GET_SUPPORTED_BW_MODE_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_SET_BW_MODE + * + * This command sets the requested RBM of the GPU + * + * [in] rbmMode + * Requested RBM mode + * + * Possible status values returned are: TODO: Update this + * NV_OK + * If the BW mode is set successfully + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on the chip + * NV_ERR_INVALID_ARGUMENT + * If the link is not enabled on the GPU + * NV_ERR_INVALID_STATE + * If the link is in an invalid state + */ +#define NV2080_CTRL_CMD_NVLINK_SET_BW_MODE (0x20803086U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_SET_BW_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_SET_BW_MODE_PARAMS_MESSAGE_ID (0x86U) + +typedef struct NV2080_CTRL_NVLINK_SET_BW_MODE_PARAMS { + NvU8 rbmMode; +} NV2080_CTRL_NVLINK_SET_BW_MODE_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_GET_BW_MODE + * + * This command gets the set RBM of the GPU + * + * [out] rbmMode + * RBM mode currently set + * + * Possible status values returned are: TODO: Update this + * NV_OK + * If the BW mode is set successfully + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on the chip + * NV_ERR_INVALID_ARGUMENT + * If the link is not enabled on the GPU + * NV_ERR_INVALID_STATE + * If the link is in an invalid state + */ +#define NV2080_CTRL_CMD_NVLINK_GET_BW_MODE (0x20803087U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_BW_MODE_PARAMS_MESSAGE_ID" */ +#define NV2080_CTRL_NVLINK_GET_BW_MODE_PARAMS_MESSAGE_ID (0x87U) + +typedef struct NV2080_CTRL_NVLINK_GET_BW_MODE_PARAMS { + NvU8 rbmMode; +} NV2080_CTRL_NVLINK_GET_BW_MODE_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_GET_LOCAL_DEVICE_INFO + * + * localDeviceInfo + * NVLINK-relevant information about the device + */ + +#define NV2080_CTRL_CMD_NVLINK_GET_LOCAL_DEVICE_INFO (0x20803088U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_LOCAL_DEVICE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_GET_LOCAL_DEVICE_INFO_PARAMS_MESSAGE_ID (0x88U) + +typedef struct NV2080_CTRL_NVLINK_GET_LOCAL_DEVICE_INFO_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_DEVICE_INFO localDeviceInfo, 8); +} NV2080_CTRL_NVLINK_GET_LOCAL_DEVICE_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_INJECT_SW_ERROR + * + * This command is used to inject NVL5 ERROR_INJECT_V2 commands + * + * [in] links + * link mask of which links to inject the error on + * [out] + * Error Types to be injected + * + * Possible status values returned are: + * NV_OK + * If the Error is injected successfully + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on the chip + * NV_ERR_INVALID_ARGUMENT + * If the link is not enabled on the GPU + * NV_ERR_INVALID_STATE + * If the link is in an invalid state + */ +#define NV2080_CTRL_CMD_NVLINK_INJECT_SW_ERROR (0x20803089U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_INJECT_SW_ERROR_PARAMS_MESSAGE_ID" */ +typedef enum NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY { + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY_NONFATAL = 0, + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY_APP_FATAL = 1, + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY_FATAL = 2, + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY_MSE_DEGRADATION = 3, + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY_WATCHDOG_TIMEOUT = 4, + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY_MSE_GIN_SAW_MVB_NON_FATAL = 5, + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY_MSE_GIN_SAW_MSE = 6, + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY_MSE_GIN_RLW_NON_FATAL = 7, + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY_MSE_GIN_RLW_FATAL = 8, + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY_MSE_GIN_RLW_PRIV_ERR = 9, + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY_MSE_GIN_TLW_NON_FATAL = 10, + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY_MSE_GIN_TLW_FATAL = 11, + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY_MSE_GIN_TREX_NON_FATAL = 12, + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY_MSE_GIN_NETIR_NON_FATAL = 13, + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY_MSE_GIN_NETIR_FATAL = 14, + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY_MSE_ECC_INJECT_SAW_MVB_NON_FATAL = 15, + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY_MSE_ECC_INJECT_SAW_MVB_FATAL = 16, + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY_MSE_ECC_INJECT_RLW_NON_FATAL = 17, + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY_MSE_ECC_INJECT_RLW_FATAL = 18, + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY_MSE_ECC_INJECT_TLW_NON_FATAL = 19, + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY_MSE_ECC_INJECT_TLW_FATAL = 20, + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY_MSE_GIN_SAW_MVB_FATAL = 21, + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY_MAX = 22, +} NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY; + +#define NV2080_CTRL_NVLINK_INJECT_SW_ERROR_PARAMS_MESSAGE_ID (0x89U) + +typedef struct NV2080_CTRL_NVLINK_INJECT_SW_ERROR_PARAMS { + NvU32 linkMask; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK links, 8); + NV2080_CTRL_NVLINK_INJECT_SW_ERROR_SEVERITY severity; +} NV2080_CTRL_NVLINK_INJECT_SW_ERROR_PARAMS; + +/* + * NV2080_CTRL_NVLINK_UPDATE_NVLE_TOPOLOGY + * + * This command is used to update the NVLE topology in GSP RM + * + * [in] localGpuAlid + * ALID of local GPU in P2P object + * [in] localGpuClid + * CLID of local GPU in P2P object + * [in] remoteGpuAlid + * ALID of remote GPU in P2P object + * [in] remoteGpuClid + * CLID of remote GPU in P2P object + */ +#define NV2080_CTRL_NVLINK_UPDATE_NVLE_TOPOLOGY (0x2080308cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_UPDATE_NVLE_TOPOLOGY_PARAMS_MESSAGE_ID" */ +#define NV2080_CTRL_NVLINK_UPDATE_NVLE_TOPOLOGY_PARAMS_MESSAGE_ID (0x8cU) + +typedef struct NV2080_CTRL_NVLINK_UPDATE_NVLE_TOPOLOGY_PARAMS { + NvU32 localGpuAlid; + NvU32 localGpuClid; + NvU32 remoteGpuAlid; + NvU32 remoteGpuClid; +} NV2080_CTRL_NVLINK_UPDATE_NVLE_TOPOLOGY_PARAMS; + +/* + * NV2080_CTRL_NVLINK_GET_NVLE_LIDS + * + * This command is used to get the alid and clid of a GPU from the remap table. + * + * [in] probeClid + * CLID from probe response + * [out] clid + * CLID of GPU from remap table + * [out] alid + * ALID of gpu + */ +#define NV2080_CTRL_NVLINK_GET_NVLE_LIDS (0x2080308dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_NVLE_LIDS_PARAMS_MESSAGE_ID" */ +#define NV2080_CTRL_NVLINK_GET_NVLE_LIDS_PARAMS_MESSAGE_ID (0x8dU) + +typedef struct NV2080_CTRL_NVLINK_GET_NVLE_LIDS_PARAMS { + NvU32 probeClid; + NvU32 clid; + NvU32 alid; +} NV2080_CTRL_NVLINK_GET_NVLE_LIDS_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_POST_LAZY_ERROR_RECOVERY + * + * Signal to GSP that lazy error recovery can proceed. + */ + +#define NV2080_CTRL_CMD_NVLINK_POST_LAZY_ERROR_RECOVERY (0x2080308aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | 0x8A" */ + +/* + * NV2080_CTRL_CMD_SET_NVLINK_CONFIGURE_L1_TOGGLE + * + * This command configures the nvlink L1 toggle pattern + * + * Commands returns SUCCESS only when it successfully sets value of all + * parameter in the list. + * + * Possible status return values are: + * NV_OK + * + * Reference: + * + */ +#define NV2080_CTRL_CMD_NVLINK_CONFIGURE_L1_TOGGLE (0x2080308eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_CONFIGURE_L1_TOGGLE_PARAMS_MESSAGE_ID" */ + +typedef enum NV2080_CTRL_NVLINK_CONFIGURE_L1_TOGGLE_MODE { + NV2080_CTRL_NVLINK_CONFIGURE_L1_TOGGLE_MODE_DISABLED = 0, + NV2080_CTRL_NVLINK_CONFIGURE_L1_TOGGLE_MODE_TRIGGER_ONCE = 1, + NV2080_CTRL_NVLINK_CONFIGURE_L1_TOGGLE_MODE_FORCE_EXITED = 2, + NV2080_CTRL_NVLINK_CONFIGURE_L1_TOGGLE_MODE_FORCE_ENTERED = 3, + NV2080_CTRL_NVLINK_CONFIGURE_L1_TOGGLE_MODE_DUTY_CYCLE = 4, +} NV2080_CTRL_NVLINK_CONFIGURE_L1_TOGGLE_MODE; + +typedef struct NV2080_CTRL_NVLINK_L1_FORCE_CONFIG { + NV2080_CTRL_NVLINK_CONFIGURE_L1_TOGGLE_MODE mode; + NvU8 toggleActiveTime; + NvU8 toggleInactiveTime; + NvBool bTrigger; +} NV2080_CTRL_NVLINK_L1_FORCE_CONFIG; + +#define NV2080_CTRL_NVLINK_CONFIGURE_L1_TOGGLE_PARAMS_MESSAGE_ID (0x8EU) + +typedef struct NV2080_CTRL_NVLINK_CONFIGURE_L1_TOGGLE_PARAMS { + NvU32 linkMask; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK links, 8); + NV2080_CTRL_NVLINK_L1_FORCE_CONFIG config; +} NV2080_CTRL_NVLINK_CONFIGURE_L1_TOGGLE_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_GET_L1_TOGGLE (0x2080308fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_L1_TOGGLE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_GET_L1_TOGGLE_PARAMS_MESSAGE_ID (0x8FU) + +typedef struct NV2080_CTRL_NVLINK_GET_L1_TOGGLE_PARAMS { + NvU32 linkMask; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_MASK links, 8); + NV2080_CTRL_NVLINK_L1_FORCE_CONFIG config[NV2080_CTRL_NVLINK_MAX_LINKS]; +} NV2080_CTRL_NVLINK_GET_L1_TOGGLE_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_GET_NVLE_ENCRYPT_EN_INFO + * + * This command is used to get the ENCRYPT_EN register info + * + * [out] bEncyptEnSet + * Boolean that shows if ENCRYPT_EN is enabled or not. + */ + +#define NV2080_CTRL_CMD_NVLINK_GET_NVLE_ENCRYPT_EN_INFO (0x2080308bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_NVLE_ENCRYPT_EN_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_GET_NVLE_ENCRYPT_EN_INFO_PARAMS_MESSAGE_ID (0x8bU) + +typedef struct NV2080_CTRL_NVLINK_GET_NVLE_ENCRYPT_EN_INFO_PARAMS { + NvBool bEncryptEnSet; +} NV2080_CTRL_NVLINK_GET_NVLE_ENCRYPT_EN_INFO_PARAMS; + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_MCSR_DATA_SIZE (0x10U) + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_MCSR (0x20803090U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_MCSR_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_MCSR_PARAMS_MESSAGE_ID (0x90U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_MCSR_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU32 base_address; + NvU16 num_addresses; + NvU32 data[NV2080_CTRL_NVLINK_PRM_ACCESS_MCSR_DATA_SIZE]; +} NV2080_CTRL_NVLINK_PRM_ACCESS_MCSR_PARAMS; + + +/* + * NV2080_CTRL_NVLINK_GET_FIRMWARE_VERSION_INFO + * + * This command is used to get the firmware version info + * + * [out] firmwareVersion + * Array of firmware versions specifying their ucode type, and major/minor/subminor version + * [out] chipTypeArch + * Chip type arch + * [out] numValidEntries + * Number of valid entries in firmwareVersion array + */ + +#define NV2080_CTRL_NVLINK_FIRMWARE_VERSION_LENGTH (0x10U) +#define NV2080_CTRL_NVLINK_SEMANTIC_VERSION_UCODE_TYPE_MSE (0x01U) +#define NV2080_CTRL_NVLINK_SEMANTIC_VERSION_UCODE_TYPE_NETIR (0x02U) +#define NV2080_CTRL_NVLINK_SEMANTIC_VERSION_UCODE_TYPE_NETIR_UPHY (0x03U) +#define NV2080_CTRL_NVLINK_SEMANTIC_VERSION_UCODE_TYPE_NETIR_CLN (0x04U) +#define NV2080_CTRL_NVLINK_SEMANTIC_VERSION_UCODE_TYPE_NETIR_DLN (0x05U) + +typedef struct NV2080_CTRL_NVLINK_SEMANTIC_VERSION { + NvU8 ucodeType; + NvU32 major; + NvU32 minor; + NvU32 subMinor; +} NV2080_CTRL_NVLINK_SEMANTIC_VERSION; + +#define NV2080_CTRL_NVLINK_GET_FIRMWARE_VERSION_INFO (0x20803091U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_FIRMWARE_VERSION_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_GET_FIRMWARE_VERSION_INFO_PARAMS_MESSAGE_ID (0x91U) + +typedef struct NV2080_CTRL_NVLINK_GET_FIRMWARE_VERSION_INFO_PARAMS { + NV2080_CTRL_NVLINK_SEMANTIC_VERSION firmwareVersion[NV2080_CTRL_NVLINK_FIRMWARE_VERSION_LENGTH]; + NvU8 chipTypeArch; + NvU32 numValidEntries; +} NV2080_CTRL_NVLINK_GET_FIRMWARE_VERSION_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_SET_NVLE_ENABLED_STATE + * + * This command is used to set the NVLE enablement status in GSP-RM + * + * [Out] bIsNvleEnabled + * Boolean to determine if Nvlink Encryption is enabled or not + */ + +#define NV2080_CTRL_CMD_NVLINK_SET_NVLE_ENABLED_STATE (0x20803092U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_SET_NVLE_ENABLED_STATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_SET_NVLE_ENABLED_STATE_PARAMS_MESSAGE_ID (0x92U) + +typedef struct NV2080_CTRL_NVLINK_SET_NVLE_ENABLED_STATE_PARAMS { + NvBool bIsNvleEnabled; +} NV2080_CTRL_NVLINK_SET_NVLE_ENABLED_STATE_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_PTASV2 (0x20803093U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_PTASV2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_PTASV2_PARAMS_MESSAGE_ID (0x93U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_PTASV2_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 lp_msb; + NvU8 pnat; + NvU8 local_port; + NvU8 xdr_lt_c2c_en; + NvU8 xdr_lt_c2m_en; + NvU8 kr_ext_req; + NvU8 lt_ext_neg_type; + NvU8 lt_ext_timeout_admin; + NvU8 prbs_type_admin; + NvBool ber_cnt_mlsd_dis; + NvU8 num_of_iter_admin; + NvU16 iter_time_admin; + NvU8 ber_target_coef_admin; + NvU8 ber_target_magnitude_admin; +} NV2080_CTRL_NVLINK_PRM_ACCESS_PTASV2_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_SLLM_5NM (0x20803094U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_SLLM_5NM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_SLLM_5NM_PARAMS_MESSAGE_ID (0x94U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_SLLM_5NM_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvBool c_db; + NvBool br_lanes; + NvU8 port_type; + NvU8 lane; + NvU8 lp_msb; + NvU8 pnat; + NvU8 local_port; + NvBool peq_cap; + NvU16 peq_interval_period; +} NV2080_CTRL_NVLINK_PRM_ACCESS_SLLM_5NM_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS (0x20803095U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_PARAMS_MESSAGE_ID (0x95U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_PARAMS { + NV2080_CTRL_NVLINK_PRM_DATA prm; +} NV2080_CTRL_NVLINK_PRM_ACCESS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PRM_ACCESS_PPRM (0x20803096U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRM_ACCESS_PPRM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PRM_ACCESS_PPRM_PARAMS_MESSAGE_ID (0x96U) + +typedef struct NV2080_CTRL_NVLINK_PRM_ACCESS_PPRM_PARAMS { + NvBool bWrite; + NV2080_CTRL_NVLINK_PRM_DATA prm; + NvU8 ovrd_no_neg_bhvr; + NvU8 plane_ind; + NvU8 lp_msb; + NvU8 pnat; + NvU8 local_port; + NvU8 no_neg_bhvr; + NvU8 wd_logic_re_lock_res; + NvU8 module_datapath_full_toggle; + NvU8 module_tx_disable; + NvU8 host_serdes_feq; + NvU8 host_logic_re_lock; + NvU16 link_down_timeout; + NvU8 draining_timeout; + NvU8 wd_module_full_toggle; + NvU8 wd_module_tx_disable; + NvU8 wd_host_serdes_feq; + NvU8 wd_host_logic_re_lock; +} NV2080_CTRL_NVLINK_PRM_ACCESS_PPRM_PARAMS; + +#define NV2080_CTRL_NVLINK_FIXED_POINT_HISTOGRAM_MAX_SIZE 32U + +typedef struct NV2080_CTRL_NVLINK_FIXED_POINT_HISTOGRAM { + NvUFXP0_8 data[NV2080_CTRL_NVLINK_FIXED_POINT_HISTOGRAM_MAX_SIZE]; + NvU32 size; +} NV2080_CTRL_NVLINK_FIXED_POINT_HISTOGRAM; + +/* + * NV2080_CTRL_NVLINK_GET_LINK_ACCUMULATIVE_METRIC_DATA + * + * This command is used to retrieve persistent per-link metrics from beginning of life. + * This command is supported for NVL IP version 5.0 + * + * [in] linkId + * link ID to query + * [out] globalMaxRawBERExp + * Largest lifetime RAW BER (exponent only) + * [out] globalMaxEffBERExp + * Largest lifetime EFF BER (exponent only) + * [out] linkDownCount + * Lifetime count of link down events (saturates) + * [out] downReasonHistogram + * Fixed-point histogram of unintentional link down reason codes with + * recency bias + */ + +#define NV2080_CTRL_CMD_NVLINK_GET_LINK_ACCUMULATIVE_METRIC_DATA (0x20803097U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_LINK_ACCUMULATIVE_METRIC_DATA_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_GET_LINK_ACCUMULATIVE_METRIC_DATA_PARAMS_MESSAGE_ID (0x97U) + +typedef struct NV2080_CTRL_NVLINK_GET_LINK_ACCUMULATIVE_METRIC_DATA_PARAMS { + NvU32 linkId; + NvU8 globalMaxRawBERExp; + NvU8 globalMaxEffBERExp; + NvU32 linkDownCount; + NV2080_CTRL_NVLINK_FIXED_POINT_HISTOGRAM downReasonHistogram; +} NV2080_CTRL_NVLINK_GET_LINK_ACCUMULATIVE_METRIC_DATA_PARAMS; + +/* + * NV2080_CTRL_NVLINK_GET_LINK_RECORD_METRIC_DATA + * + * This command is used to retrieve most recent persistent per-link metrics. + * This command is supported for NVL IP version 5.0 + * + * [in] linkId + * link ID to query + * [out] initialRawBERExp + * Raw BER from most recent linkup (exponent only) + * [out] initialEffBERExp + * Eff BER from most recent linkup (exponent only) + * [out] lastDownReason + * Port down reason code from most recent linkdown + * [out] uptime + * Total time port has been up since most recent linkup (floating point msec) + * [out] localMaxRawBERExp + * Largest Raw BER reading since most recent linkup (exponent only) + * [out] localMaxEffBERExp + * Largest Eff BER reading since most recent linkup (exponent only) + * [out] currEffBERMonitor + * Current Eff BER monitor from most recent linkup + * [out] plrXmitRetryWithinTSecMaxLo + * PLR retransmissions in time window + * [out] totalSuccessfulRecoveryEvents + * Total number of times the Port Training state machine has + * successfullycompleted the link error recovery process + */ +#define NV2080_CTRL_CMD_NVLINK_GET_LINK_RECORD_METRIC_DATA (0x20803098U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_LINK_RECORD_METRIC_DATA_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_GET_LINK_RECORD_METRIC_DATA_PARAMS_MESSAGE_ID (0x98U) + +typedef struct NV2080_CTRL_NVLINK_GET_LINK_RECORD_METRIC_DATA_PARAMS { + NvU32 linkId; + NvU8 initialRawBERExp; + NvU8 initialEffBERExp; + NvU8 lastDownReason; + NvU32 uptime; + NvU8 localMaxRawBERExp; + NvU8 localMaxEffBERExp; + NvU16 currEffBERMonitor; + NvU32 plrXmitRetryWithinTSecMaxLo; + NvU32 totalSuccessfulRecoveryEvents; +} NV2080_CTRL_NVLINK_GET_LINK_RECORD_METRIC_DATA_PARAMS; + +/* + * NV2080_CTRL_NVLINK_GET_DEVICE_RECORD_METRIC_DATA + * + * This command is used to retrieve most recent persistent per-device metrics. + * This command is supported for NVL IP version 5.0 + * + * [out] MFDE + * PRM payload of most recent NETIR_MFDE_EVENT error + */ + +#define NV2080_CTRL_CMD_NVLINK_GET_DEVICE_RECORD_METRIC_DATA (0x20803099U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_DEVICE_RECORD_METRIC_DATA_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_GET_DEVICE_RECORD_METRIC_DATA_PARAMS_MESSAGE_ID (0x99U) + +typedef struct NV2080_CTRL_NVLINK_GET_DEVICE_RECORD_METRIC_DATA_PARAMS { + NV2080_CTRL_NVLINK_PRM_DATA MFDE; +} NV2080_CTRL_NVLINK_GET_DEVICE_RECORD_METRIC_DATA_PARAMS; + + +/* _ctrl2080nvlink_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink_common.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink_common.h new file mode 100644 index 0000000..3c51f0e --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink_common.h @@ -0,0 +1,42 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080nvlink_common.finn +// + + + +#define NV2080_CTRL_NVLINK_MAX_LINKS 64 +#define NV2080_CTRL_NVLINK_MAX_ARR_SIZE 64 +#define NV2080_CTRL_NVLINK_MAX_MASK_SIZE (0x1) /* finn: Evaluated from "((NV2080_CTRL_NVLINK_MAX_LINKS + 63) / 64)" */ + +typedef struct NV2080_CTRL_NVLINK_LINK_MASK { + NvU8 lenMasks; + NV_DECLARE_ALIGNED(NvU64 masks[NV2080_CTRL_NVLINK_MAX_MASK_SIZE], 8); +} NV2080_CTRL_NVLINK_LINK_MASK; diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h new file mode 100644 index 0000000..93ce943 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h @@ -0,0 +1,923 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080perf.finn +// + +#include "nvfixedtypes.h" +#include "ctrl/ctrl2080/ctrl2080base.h" + +#define NV_SUBPROC_NAME_MAX_LENGTH 100 + +#include "nvmisc.h" + +#include "ctrl/ctrl2080/ctrl2080clk.h" +#include "ctrl/ctrl2080/ctrl2080gpumon.h" +#include "ctrl/ctrl2080/ctrl2080volt.h" +#include "ctrl/ctrl2080/ctrl2080vfe.h" +#include "ctrl/ctrl2080/ctrl2080pmumon.h" +#include "ctrl/ctrl0080/ctrl0080perf.h" +// +// XAPICHK/XAPI_TEST chokes on the "static NVINLINE" defines in nvmisc.h. +// However, we don't need any of those definitions for those tests (XAPICHK is a +// syntactical check, not a functional test). So, instead, just #define out the +// macros referenced below. +// + +/* + * NV2080_CTRL_CMD_PERF_BOOST + * + * This command can be used to boost P-State up one level or to the highest for a limited + * duration for the associated subdevice. Boosts from different clients are being tracked + * independently. Note that there are other factors that can limit P-States so the resulting + * P-State may differ from expectation. + * + * flags + * This parameter specifies the actual command. _CLEAR is to clear existing boost. + * _BOOST_1LEVEL is to boost P-State one level higher. _BOOST_TO_MAX is to boost + * to the highest P-State. + * duration + * This parameter specifies the duration of the boost in seconds. This has to be less + * than NV2080_CTRL_PERF_BOOST_DURATION_MAX. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_PERF_BOOST_FLAGS_CMD 1:0 +#define NV2080_CTRL_PERF_BOOST_FLAGS_CMD_CLEAR (0x00000000) +#define NV2080_CTRL_PERF_BOOST_FLAGS_CMD_BOOST_1LEVEL (0x00000001) +#define NV2080_CTRL_PERF_BOOST_FLAGS_CMD_BOOST_TO_MAX (0x00000002) + +#define NV2080_CTRL_PERF_BOOST_FLAGS_CUDA 4:4 +#define NV2080_CTRL_PERF_BOOST_FLAGS_CUDA_NO (0x00000000) +#define NV2080_CTRL_PERF_BOOST_FLAGS_CUDA_YES (0x00000001) + +#define NV2080_CTRL_PERF_BOOST_FLAGS_ASYNC 5:5 +#define NV2080_CTRL_PERF_BOOST_FLAGS_ASYNC_NO (0x00000000) +#define NV2080_CTRL_PERF_BOOST_FLAGS_ASYNC_YES (0x00000001) + +#define NV2080_CTRL_PERF_BOOST_FLAGS_CUDA_PRIORITY 6:6 +#define NV2080_CTRL_PERF_BOOST_FLAGS_CUDA_PRIORITY_DEFAULT (0x00000000) +#define NV2080_CTRL_PERF_BOOST_FLAGS_CUDA_PRIORITY_HIGH (0x00000001) + +#define NV2080_CTRL_PERF_BOOST_DURATION_MAX 3600 //The duration can be specified up to 1 hour +#define NV2080_CTRL_PERF_BOOST_DURATION_INFINITE 0xffffffff // If set this way, the boost will last until cleared. + +#define NV2080_CTRL_CMD_PERF_BOOST (0x2080200a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_BOOST_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PERF_BOOST_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV2080_CTRL_PERF_BOOST_PARAMS { + NvU32 flags; + NvU32 duration; +} NV2080_CTRL_PERF_BOOST_PARAMS; + +/* + * NV2080_CTRL_CMD_PERF_RESERVE_PERFMON_HW + * + * This command reserves HW Performance Monitoring capabilities for exclusive + * use by the requester. If the HW Performance Monitoring capabilities are + * currently in use then NVOS_STATUS_ERROR_STATE_IN_USE is returned. + * + * bAcquire + * When set to TRUE this parameter indicates that the client wants to + * acquire the Performance Monitoring capabilities on the subdevice. + * When set to FALSE this parameter releases the Performance Monitoring + * capabilities on the subdevice. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_ERROR_STATE_IN_USE + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_PERF_RESERVE_PERFMON_HW (0x20802093) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_RESERVE_PERFMON_HW_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PERF_RESERVE_PERFMON_HW_PARAMS_MESSAGE_ID (0x93U) + +typedef struct NV2080_CTRL_PERF_RESERVE_PERFMON_HW_PARAMS { + NvBool bAcquire; +} NV2080_CTRL_PERF_RESERVE_PERFMON_HW_PARAMS; + +/* + * NV2080_CTRL_PERF_POWERSTATE + * + * This structure describes power state information. + * + * powerState + * This parameter specifies the type of power source. + * Legal values for this parameter include: + * NV2080_CTRL_PERF_POWER_SOURCE_AC + * This values indicates that the power state is AC. + * NV2080_CTRL_PERF_POWER_SOURCE_BATTERY + * This values indicates that the power state is battery. + */ +#define NV2080_CTRL_PERF_POWER_SOURCE_AC (0x00000000) +#define NV2080_CTRL_PERF_POWER_SOURCE_BATTERY (0x00000001) + +typedef struct NV2080_CTRL_PERF_POWERSTATE_PARAMS { + NvU32 powerState; +} NV2080_CTRL_PERF_POWERSTATE_PARAMS; + +/* + * NV2080_CTRL_CMD_PERF_SET_POWERSTATE + * + * This command can be used to set the perf power state as AC or battery. + * + * powerStateInfo + * This parameter specifies the power source type to set. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_PERF_SET_POWERSTATE (0x2080205b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_SET_POWERSTATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PERF_SET_POWERSTATE_PARAMS_MESSAGE_ID (0x5BU) + +typedef struct NV2080_CTRL_PERF_SET_POWERSTATE_PARAMS { + NV2080_CTRL_PERF_POWERSTATE_PARAMS powerStateInfo; +} NV2080_CTRL_PERF_SET_POWERSTATE_PARAMS; + +/* + * NV2080_CTRL_CMD_PERF_SET_AUX_POWER_STATE + * + * This command allows the forcing of a performance level based on auxiliary + * power-states. + * + * powerState + * This parameter specifies the target auxiliary Power state. Legal aux + * power-states for this parameter are defined by the + * NV2080_CTRL_PERF_AUX_POWER_STATE_P* definitions that follow. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_PERF_SET_AUX_POWER_STATE (0x20802092) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_SET_AUX_POWER_STATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PERF_SET_AUX_POWER_STATE_PARAMS_MESSAGE_ID (0x92U) + +typedef struct NV2080_CTRL_PERF_SET_AUX_POWER_STATE_PARAMS { + NvU32 powerState; +} NV2080_CTRL_PERF_SET_AUX_POWER_STATE_PARAMS; + +#define NV2080_CTRL_PERF_AUX_POWER_STATE_P0 (0x00000000) +#define NV2080_CTRL_PERF_AUX_POWER_STATE_P1 (0x00000001) +#define NV2080_CTRL_PERF_AUX_POWER_STATE_P2 (0x00000002) +#define NV2080_CTRL_PERF_AUX_POWER_STATE_P3 (0x00000003) +#define NV2080_CTRL_PERF_AUX_POWER_STATE_P4 (0x00000004) +#define NV2080_CTRL_PERF_AUX_POWER_STATE_COUNT (0x00000005) + +/*! + * Enumeration of the RATED_TDP arbitration clients which make requests to force + * enable/disable VF points above the RATED_TDP point. + * + * These clients are sorted in descending priority - the RM will arbitrate + * between all clients in order of priority, taking as output the first client + * whose input action != @ref NV2080_CTRL_PERF_RATED_TDP_ACTION_DEFAULT. + */ +typedef enum NV2080_CTRL_PERF_RATED_TDP_CLIENT { + /*! + * Internal RM client corresponding to the RM's internal state and features. + * The RM client will either allow default behavior (@ref + * NV2080_CTRL_PERF_RATED_TDP_ACTION_DEFAULT) or will limit to RATED_TDP + * (@ref NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LIMIT) when no power + * controllers are active. + */ + NV2080_CTRL_PERF_RATED_TDP_CLIENT_RM = 0, + /*! + * This Client is specifically for Bug 1785342 where we need to limit the TDP + * to Min value on boot. And clear the Max TDP limit. + */ + NV2080_CTRL_PERF_RATED_TDP_CLIENT_WAR_BUG_1785342 = 1, + /*! + * Global client request. This client is expected to be used by a global + * switch functionality in an end-user tool, such as EVGA Precision, to + * either force enabling boost above RATED_TDP (@ref + * NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_EXCEED) or to force limiting to + * RATED_TDP (@ref NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LIMIT) across the + * board, regardless of any app-profie settings. + */ + NV2080_CTRL_PERF_RATED_TDP_CLIENT_GLOBAL = 2, + /*! + * Operating system request. This client is expected to be used by the + * operating system to set @ref NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LOCK + * for performance profiling. + */ + NV2080_CTRL_PERF_RATED_TDP_CLIENT_OS = 3, + /*! + * App profile client requests. This client is expected to be used by the + * app-profile settings to either default to whatever was requested by + * higher-priority clients (@ref NV2080_CTRL_PERF_RATED_TDP_ACTION_DEFAULT) + * or to limit to RATED_TDP (@ref + * NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LIMIT) for apps which have shown + * bad behavior when boosting. + */ + NV2080_CTRL_PERF_RATED_TDP_CLIENT_PROFILE = 4, + /*! + * Number of supported clients. + * + * @Note MUST ALWAYS BE LAST! + */ + NV2080_CTRL_PERF_RATED_TDP_CLIENT_NUM_CLIENTS = 5, +} NV2080_CTRL_PERF_RATED_TDP_CLIENT; + +/*! + * Enumeration RATED_TDP actions - these are the requested actions clients can + * make to change the behavior of the RATED_TDP functionality. + */ +typedef enum NV2080_CTRL_PERF_RATED_TDP_ACTION { + /*! + * The default action - meaning no explicit request from the client other + * than to take the default behavior (allowing boosting above RATED_TDP) or + * any explicit actions from lower priority clients. + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION_DEFAULT = 0, + /*! + * Force allow boosting above RATED_TDP - this action explicitly requests + * boosting above RATED_TDP, preventing lower priority clients to limit to + * RATED_TDP. + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_EXCEED = 1, + /*! + * Force to limit above RATED_TDP - this action explicitly requests to limit + * to RATED_TDP. This is the opposite of the default behavior to allow + * boosting above RATED_TDP. Clients specify this action when they + * explicitly need boost to be disabled (e.g. eliminating perf variation, + * special apps which exhibit bad behavior, etc.). + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LIMIT = 2, + /*! + * Lock to RATED_TDP - this action requests the clocks to be fixed at the + * RATED_TDP. Used for achieving stable clocks required for profiling. + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LOCK = 3, + /*! + * Lock to Min TDP - This requests min to be fixed at RATED_TDP but allow + * boosting for max + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_FLOOR = 4, +} NV2080_CTRL_PERF_RATED_TDP_ACTION; + +/*! + * Enumeration VPstates - these are possible VPStates that clients can + * request + */ +typedef enum NV2080_CTRL_PERF_RATED_TDP_VPSTATE_TYPE { + /*! + * Choise of the RATED TDP VPstate + */ + NV2080_CTRL_PERF_VPSTATE_RATED_TDP = 0, + /*! + * Choise of the TURBO BOOST VPstate + */ + NV2080_CTRL_PERF_VPSTATE_TURBO_BOOST = 1, + /*! + * Number of supported vpstates. + * + * @Note MUST ALWAYS BE LAST! + */ + NV2080_CTRL_PERF_VPSTATE_NUM_VPSTATES = 2, +} NV2080_CTRL_PERF_RATED_TDP_VPSTATE_TYPE; + +/*! + * Enumeration VPstates - these are possible VPStates that clients can + * request + */ +typedef struct NV2080_CTRL_PERF_RATED_TDP_CLIENT_REQUEST { + /*! + * [in] - Specified client for request. + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION action; + /*! + * [in/out] - Client's requested action. + */ + NV2080_CTRL_PERF_RATED_TDP_VPSTATE_TYPE vPstateType; +} NV2080_CTRL_PERF_RATED_TDP_CLIENT_REQUEST; + +/*! + * Structure describing dynamic state of the RATED_TDP feature. + */ +#define NV2080_CTRL_PERF_RATED_TDP_STATUS_PARAMS_MESSAGE_ID (0x6DU) + +typedef struct NV2080_CTRL_PERF_RATED_TDP_STATUS_PARAMS { + /*! + * Structure of internal RM state - these values are used to determine the + * behavior of NV2080_CTRL_PERF_RATED_TDP_CLIENT_RM per the RM's @ref + * perfPwrRatedTdpLimitRegisterClientActive() interface. + */ + struct { + /*! + * [out] - Mask of active client controllers (@ref + * PERF_PWR_RATED_TDP_CLIENT) which are currently regulating TDP. When + * this mask is zero, NV2080_CTRL_PERF_RATED_TDP_CLIENT_RM will request + * NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LIMIT. + */ + NvU32 clientActiveMask; + /*! + * [out] - Boolean indicating that user has requested locking to + * RATED_TDP vPstate via corresponding regkey + * NV_REG_STR_RM_PERF_RATED_TDP_LIMIT. When the boolean value is true, + * NV2080_CTRL_PERF_RATED_TDP_CLIENT_RM will request + * NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LIMIT. + */ + NvU8 bRegkeyLimitRatedTdp; + } rm; + + /*! + * [out] - Arbitrated output action of all client requests (@ref inputs). + * This is the current state of the RATED_TDP feature. Will only be @ref + * NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_EXCEED or @ref + * NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LIMIT. + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION output; + /* + * [out] - Arbitrated output VPStates of all client requests (@ref inputs). + * This is the current VPState of the RATED_TDP feature. + */ + NV2080_CTRL_PERF_RATED_TDP_VPSTATE_TYPE outputVPstate; + /*! + * [out] - Array of input client request actions, indexed via @ref + * NV2080_CTRL_PERF_RATED_TDP_CLIENT_. RM will arbitrate between these + * requests, choosing the highest priority request != @ref + * NV2080_CTRL_PERF_RATED_TDP_ACTION_DEFAULT or fallback to choosing @ref + * NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_EXCEED. + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION inputs[NV2080_CTRL_PERF_RATED_TDP_CLIENT_NUM_CLIENTS]; + /*! + * [out] - Array of input client request VPstates, indexed via @ref + * NV2080_CTRL_PERF_RATED_TDP_CLIENT_. RM will arbitrate between these + * requests, choosing the highest priority request != @ref + * NV2080_CTRL_PERF_RATED_TDP_ACTION_DEFAULT or fallback to choosing @ref + * NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_EXCEED. + */ + NV2080_CTRL_PERF_RATED_TDP_VPSTATE_TYPE vPstateTypes[NV2080_CTRL_PERF_RATED_TDP_CLIENT_NUM_CLIENTS]; +} NV2080_CTRL_PERF_RATED_TDP_STATUS_PARAMS; + +/*! + * NV2080_CTRL_CMD_PERF_RATED_TDP_GET_CONTROL + * + * This command retrieves the current requested RATED_TDP action corresponding + * to the specified client. + * + * See @ref NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS for documentation of + * parameters. + * + * Possible status values returned are + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_PERF_RATED_TDP_GET_CONTROL (0x2080206e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_RATED_TDP_GET_CONTROL_PARAMS_MESSAGE_ID" */ + +/*! + * Structure containing the requested action for a RATED_TDP client (@ref + * NV2080_CTRL_PERF_RATED_TDP_CLIENT). + */ +typedef struct NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS { + /*! + * [in] - Specified client for request. + */ + NV2080_CTRL_PERF_RATED_TDP_CLIENT client; + /*! + * [in/out] - Client's requested action. + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION input; + /* + * [in] - Specified VPState of the request + */ + NV2080_CTRL_PERF_RATED_TDP_VPSTATE_TYPE vPstateType; +} NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS; + +#define NV2080_CTRL_PERF_RATED_TDP_GET_CONTROL_PARAMS_MESSAGE_ID (0x6EU) + +typedef NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS NV2080_CTRL_PERF_RATED_TDP_GET_CONTROL_PARAMS; + +/*! + * NV2080_CTRL_CMD_PERF_RATED_TDP_SET_CONTROL + * + * This command sets the requested RATED_TDP action corresponding to the + * specified client. @Note, however, that this command is unable to set @ref + * NV2080_CTRL_PERF_RATED_TDP_CLIENT_RM. + * + * See @ref NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS for documentation of + * parameters. + * + * Possible status values returned are + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_PERF_RATED_TDP_SET_CONTROL (0x2080206f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_RATED_TDP_SET_CONTROL_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PERF_RATED_TDP_SET_CONTROL_PARAMS_MESSAGE_ID (0x6FU) + +typedef NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS NV2080_CTRL_PERF_RATED_TDP_SET_CONTROL_PARAMS; + +/*! + * This struct represents the GPU monitoring perfmon sample for an engine. + */ +typedef struct NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE { + /*! + * Percentage during the sample that the engine remains busy. This + * is in units of pct*100. + */ + NvU32 util; + /*! + * Scaling factor to convert utilization from full GPU to per vGPU. + */ + NvU32 vgpuScale; + /*! + * Process ID of the process that was active on the engine when the + * sample was taken. If no process is active then NV2080_GPUMON_PID_INVALID + * will be returned. + */ + NvU32 procId; + /*! + * Process ID of the process in the vGPU VM that was active on the engine when + * the sample was taken. If no process is active then NV2080_GPUMON_PID_INVALID + * will be returned. + */ + NvU32 subProcessID; + /*! + * Process name of the process in the vGPU VM that was active on the engine when + * the sample was taken. If no process is active then NULL will be returned. + */ + char subProcessName[NV_SUBPROC_NAME_MAX_LENGTH]; + /*! + * PID struct pointer of the process that was active on the engine when the + * the sample was taken. If no process is active then NULL pointer + * will be returned + */ + NV_DECLARE_ALIGNED(NvU64 pOsPidInfo, 8); +} NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE; + +/*! + * This struct represents the GPU monitoring perfmon sample. + */ +typedef struct NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE { + /*! + * Base GPU monitoring sample. + */ + NV_DECLARE_ALIGNED(NV2080_CTRL_GPUMON_SAMPLE base, 8); + /*! + * FB bandwidth utilization sample. + */ + NV_DECLARE_ALIGNED(NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE fb, 8); + /*! + * GR utilization sample. + */ + NV_DECLARE_ALIGNED(NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE gr, 8); + /*! + * NV ENCODER utilization sample. + */ + NV_DECLARE_ALIGNED(NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE nvenc, 8); + /*! + * NV DECODER utilization sample. + */ + NV_DECLARE_ALIGNED(NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE nvdec, 8); + /*! + * NV JPEG utilization sample. + */ + NV_DECLARE_ALIGNED(NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE nvjpg, 8); + /*! + * NV OFA utilization sample. + */ + NV_DECLARE_ALIGNED(NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE nvofa, 8); +} NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE; + +/*! + * This struct represents the GPU monitoring samples of perfmon values that + * client wants the access to. + */ +#define NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM_MESSAGE_ID (0x83U) + +typedef NV2080_CTRL_GPUMON_SAMPLES NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM; + +/*! + * Number of GPU monitoring sample in their respective buffers. + */ +#define NV2080_CTRL_PERF_GPUMON_SAMPLE_COUNT_PERFMON_UTIL 72 + +#define NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_BUFFER_SIZE \ + sizeof(NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE) * \ + NV2080_CTRL_PERF_GPUMON_SAMPLE_COUNT_PERFMON_UTIL + +/*! + * NV2080_CTRL_CMD_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2 + * + * This command returns perfmon gpu monitoring utilization samples. + * This command is not supported with SMC enabled. + * + * See NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM_V2 for documentation + * on the parameters. + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + * Note this is the same as NV2080_CTRL_CMD_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES + * but without the embedded pointer. + * + */ +#define NV2080_CTRL_CMD_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2 (0x20802096) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS_MESSAGE_ID" */ + +/*! + * This structure represents the GPU monitoring samples of utilization values that + * the client wants access to. + */ +#define NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS_MESSAGE_ID (0x96U) + +typedef struct NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS { + /*! + * Type of the sample, see NV2080_CTRL_GPUMON_SAMPLE_TYPE_* for reference. + */ + NvU8 type; + /*! + * Size of the buffer, this should be + * bufSize == NV2080_CTRL_*_GPUMON_SAMPLE_COUNT_* + * sizeof(derived type of NV2080_CTRL_GPUMON_SAMPLE). + */ + NvU32 bufSize; + /*! + * Number of samples in ring buffer. + */ + NvU32 count; + /*! + * tracks the offset of the tail in the circular queue array pSamples. + */ + NvU32 tracker; + /*! + * A circular queue with size == bufSize. + * + * @note This circular queue wraps around after 10 seconds of sampling, + * and it is clients' responsibility to query within this time frame in + * order to avoid losing samples. + * @note With one exception, this queue contains last 10 seconds of samples + * with tracker poiniting to oldest entry and entry before tracker as the + * newest entry. Exception is when queue is not full (i.e. tracker is + * pointing to a zeroed out entry), in that case valid entries are between 0 + * and tracker. + * @note Clients can store tracker from previous query in order to provide + * samples since last read. + */ + NV_DECLARE_ALIGNED(NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE samples[NV2080_CTRL_PERF_GPUMON_SAMPLE_COUNT_PERFMON_UTIL], 8); +} NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS; + +/* + * NV2080_CTRL_CMD_PERF_GPU_IS_IDLE + * + * This command notifies RM to make p state switching aggressive by setting + * required limiting factors to speed up GC6 Entry initiation. + * + * prevPstate [out] + * This parameter will contain the pstate before the switch was initiated + * + * Possible status return values are: + * NV_OK : If P State Switch is successful + * NV_INVALID_STATE : If unable to access P State structure + * NVOS_STATUS_ERROR : If P State Switch is unsuccessful + */ +#define NV2080_CTRL_CMD_PERF_GPU_IS_IDLE (0x20802089) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_GPU_IS_IDLE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PERF_GPU_IS_IDLE_PARAMS_MESSAGE_ID (0x89U) + +typedef struct NV2080_CTRL_PERF_GPU_IS_IDLE_PARAMS { + NvU32 prevPstate; + NvU32 action; +} NV2080_CTRL_PERF_GPU_IS_IDLE_PARAMS; + +#define NV2080_CTRL_PERF_GPU_IS_IDLE_TRUE (0x00000001) +#define NV2080_CTRL_PERF_GPU_IS_IDLE_FALSE (0x00000002) + +/* + * NV2080_CTRL_CMD_PERF_AGGRESSIVE_PSTATE_NOTIFY + * + * This command is for the KMD Aggressive P-state feature. + * + * bGpuIsIdle [in] + * When true, applies cap to lowest P-state/GPCCLK. When false, releases cap. + * idleTimeUs [in] + * The amount of time (in microseconds) the GPU was idle since previous + * call, part of the GPU utilization data from KMD. + * busyTimeUs [in] + * The amount of time (in microseconds) the GPU was not idle since + * previous call, part of the GPU utilization data from KMD. + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + + +#define NV2080_CTRL_CMD_PERF_AGGRESSIVE_PSTATE_NOTIFY (0x2080208f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_AGGRESSIVE_PSTATE_NOTIFY_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PERF_AGGRESSIVE_PSTATE_NOTIFY_PARAMS_MESSAGE_ID (0x8FU) + +typedef struct NV2080_CTRL_PERF_AGGRESSIVE_PSTATE_NOTIFY_PARAMS { + NvBool bGpuIsIdle; + NvBool bRestoreToMax; + NV_DECLARE_ALIGNED(NvU64 idleTimeUs, 8); + NV_DECLARE_ALIGNED(NvU64 busyTimeUs, 8); +} NV2080_CTRL_PERF_AGGRESSIVE_PSTATE_NOTIFY_PARAMS; + + +typedef struct NV2080_CTRL_PERF_GET_CLK_INFO { + NvU32 flags; + NvU32 domain; + NvU32 currentFreq; + NvU32 defaultFreq; + NvU32 minFreq; + NvU32 maxFreq; +} NV2080_CTRL_PERF_GET_CLK_INFO; + + +#define NV2080_CTRL_PERF_CLK_MAX_DOMAINS 32U + +#define NV2080_CTRL_CMD_PERF_GET_LEVEL_INFO (0x20802002) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_GET_LEVEL_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PERF_GET_LEVEL_INFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_PERF_GET_LEVEL_INFO_PARAMS { + NvU32 level; + NvU32 flags; + NV_DECLARE_ALIGNED(NvP64 perfGetClkInfoList, 8); + NvU32 perfGetClkInfoListSize; +} NV2080_CTRL_PERF_GET_LEVEL_INFO_PARAMS; + +#define NV2080_CTRL_CMD_PERF_GET_LEVEL_INFO_V2 (0x2080200b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_GET_LEVEL_INFO_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PERF_GET_LEVEL_INFO_V2_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV2080_CTRL_PERF_GET_LEVEL_INFO_V2_PARAMS { + NvU32 level; + NvU32 flags; + NV2080_CTRL_PERF_GET_CLK_INFO perfGetClkInfoList[NV2080_CTRL_PERF_CLK_MAX_DOMAINS]; + NvU32 perfGetClkInfoListSize; +} NV2080_CTRL_PERF_GET_LEVEL_INFO_V2_PARAMS; + +#define NV2080_CTRL_PERF_GET_LEVEL_INFO_FLAGS_TYPE 0:0 +#define NV2080_CTRL_PERF_GET_LEVEL_INFO_FLAGS_TYPE_DEFAULT (0x00000000) +#define NV2080_CTRL_PERF_GET_LEVEL_INFO_FLAGS_TYPE_OVERCLOCK (0x00000001) +#define NV2080_CTRL_PERF_GET_LEVEL_INFO_FLAGS_MODE 2:1 +#define NV2080_CTRL_PERF_GET_LEVEL_INFO_FLAGS_MODE_NONE (0x00000000) +#define NV2080_CTRL_PERF_GET_LEVEL_INFO_FLAGS_MODE_DESKTOP (0x00000001) +#define NV2080_CTRL_PERF_GET_LEVEL_INFO_FLAGS_MODE_MAXPERF (0x00000002) + +/* + * NV2080_CTRL_CMD_PERF_GET_VID_ENG_PERFMON_SAMPLE + * + * This command can be used to obtain video decoder utilization of + * the associated subdevice. + * This command is not supported with SMC enabled. + * + * engineType + * This parameter will allow clients to set type of video + * engine in question. It can be NVENC or NVDEC. + * clkPercentBusy + * This parameter contains the percentage during the sample that + * the clock remains busy. + * samplingPeriodUs + * This field returns the sampling period in microseconds. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_PERF_GET_VID_ENG_PERFMON_SAMPLE (0x20802087) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_GET_VID_ENG_PERFMON_SAMPLE_PARAMS_MESSAGE_ID" */ + +typedef enum NV2080_CTRL_CMD_PERF_VID_ENG { + /*! + * GPU Video encoder engine. + */ + NV2080_CTRL_CMD_PERF_VID_ENG_NVENC = 1, + + /*! + * GPU video decoder engine. + */ + NV2080_CTRL_CMD_PERF_VID_ENG_NVDEC = 2, + + /*! + * GPU JPEG engine. + */ + NV2080_CTRL_CMD_PERF_VID_ENG_NVJPG = 3, + + /*! + * GPU OFA engine. + */ + NV2080_CTRL_CMD_PERF_VID_ENG_NVOFA = 4, +} NV2080_CTRL_CMD_PERF_VID_ENG; + +#define NV2080_CTRL_PERF_GET_VID_ENG_PERFMON_SAMPLE_PARAMS_MESSAGE_ID (0x87U) + +typedef struct NV2080_CTRL_PERF_GET_VID_ENG_PERFMON_SAMPLE_PARAMS { + NV2080_CTRL_CMD_PERF_VID_ENG engineType; + NvU32 clkPercentBusy; + NvU32 samplingPeriodUs; +} NV2080_CTRL_PERF_GET_VID_ENG_PERFMON_SAMPLE_PARAMS; + +/* + * NV2080_CTRL_CMD_PERF_GET_POWERSTATE + * + * This command can be used to find out whether the perf power state is AC/battery. + * + * powerStateInfo + * This parameter specifies the power source type. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_PERF_GET_POWERSTATE (0x2080205a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_GET_POWERSTATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PERF_GET_POWERSTATE_PARAMS_MESSAGE_ID (0x5AU) + +typedef struct NV2080_CTRL_PERF_GET_POWERSTATE_PARAMS { + NV2080_CTRL_PERF_POWERSTATE_PARAMS powerStateInfo; +} NV2080_CTRL_PERF_GET_POWERSTATE_PARAMS; + +/* + * NV2080_CTRL_CMD_PERF_NOTIFY_VIDEOEVENT + * + * This command can be used by video driver to notify RM concerning + * performance related events. + * + * videoEvent + * This parameter specifies the video event to notify. + * Legal values for this parameter include: + * NV2080_CTRL_PERF_VIDEOEVENT_STREAM_HD_START + * NV2080_CTRL_PERF_VIDEOEVENT_STREAM_HD_STOP + * These values indicate that a HD video stream (less than 4K) + * has started/stopped. + * NV2080_CTRL_PERF_VIDEOEVENT_STREAM_SD_START + * NV2080_CTRL_PERF_VIDEOEVENT_STREAM_SD_STOP + * These are now obsolete in new products as we no longer + * need to differentiate between SD and HD. + * NV2080_CTRL_PERF_VIDEOEVENT_STREAM_4K_START + * NV2080_CTRL_PERF_VIDEOEVENT_STREAM_4K_STOP + * These value indicates that a 4K video stream (3840x2160 pixels + * or higher) has started/stopped. + * NV2080_CTRL_PERF_VIDEOEVENT_OFA_START + * NV2080_CTRL_PERF_VIDEOEVENT_OFA_STOP + * These value indicates that Optical Flow Accelerator usage has + * started/stopped. + * The following flags may be or'd into the event value: + * NV2080_CTRL_PERF_VIDEOEVENT_FLAG_LINEAR_MODE + * The stream operates BSP/VP2 or MSVLD/MSPDEC communication in + * linear mode (default is ring mode). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_PERF_NOTIFY_VIDEOEVENT (0x2080205d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_NOTIFY_VIDEOEVENT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PERF_NOTIFY_VIDEOEVENT_PARAMS_MESSAGE_ID (0x5DU) + +typedef struct NV2080_CTRL_PERF_NOTIFY_VIDEOEVENT_PARAMS { + NvU32 videoEvent; +} NV2080_CTRL_PERF_NOTIFY_VIDEOEVENT_PARAMS; + +#define NV2080_CTRL_PERF_VIDEOEVENT_EVENT_MASK (0x0000ffff) +#define NV2080_CTRL_PERF_VIDEOEVENT_STREAM_HD_START (0x00000001) +#define NV2080_CTRL_PERF_VIDEOEVENT_STREAM_HD_STOP (0x00000002) +#define NV2080_CTRL_PERF_VIDEOEVENT_STREAM_START NV2080_CTRL_PERF_VIDEOEVENT_STREAM_HD_START +#define NV2080_CTRL_PERF_VIDEOEVENT_STREAM_STOP NV2080_CTRL_PERF_VIDEOEVENT_STREAM_HD_STOP +#define NV2080_CTRL_PERF_VIDEOEVENT_STREAM_SD_START (0x00000003) +#define NV2080_CTRL_PERF_VIDEOEVENT_STREAM_SD_STOP (0x00000004) +#define NV2080_CTRL_PERF_VIDEOEVENT_STREAM_4K_START (0x00000005) +#define NV2080_CTRL_PERF_VIDEOEVENT_STREAM_4K_STOP (0x00000006) +#define NV2080_CTRL_PERF_VIDEOEVENT_OFA_START (0x00000007) +#define NV2080_CTRL_PERF_VIDEOEVENT_OFA_STOP (0x00000008) +#define NV2080_CTRL_PERF_VIDEOEVENT_FLAG_LINEAR_MODE (0x00010000) + +/*! + * @defgroup NV2080_CTRL_PERF_PSTATES + * + * These are definitions of performance states (P-states) values. + * P0 has the maximum performance capability and consumes maximum + * power. P1 has a lower perf and power than P0, and so on. + * For NVIDIA GPUs, the following definitions are made: + * P0 - maximum 3D performance + * P1 - original P0 when active clocked + * P2-P3 - balanced 3D performance-power + * P8 - basic HD video playback + * P10 - SD video playback + * P12 - minimum idle power + * P15 - max possible P-state under current scheme (currently not used) + * Not all P-states are available on a given system. + * + * @note The @ref NV2080_CTRL_PERF_PSTATES_ID was introduced after the + * original constants were added, so not all places that intend to use + * these values are using the type. They should be updated to do so. + * @{ + */ +typedef NvU32 NV2080_CTRL_PERF_PSTATES_ID; +#define NV2080_CTRL_PERF_PSTATES_UNDEFINED (0x00000000U) +#define NV2080_CTRL_PERF_PSTATES_CLEAR_FORCED (0x00000000U) +#define NV2080_CTRL_PERF_PSTATES_MIN (0x00000001U) +#define NV2080_CTRL_PERF_PSTATES_P0 (0x00000001U) +#define NV2080_CTRL_PERF_PSTATES_P1 (0x00000002U) +#define NV2080_CTRL_PERF_PSTATES_P2 (0x00000004U) +#define NV2080_CTRL_PERF_PSTATES_P3 (0x00000008U) +#define NV2080_CTRL_PERF_PSTATES_P4 (0x00000010U) +#define NV2080_CTRL_PERF_PSTATES_P5 (0x00000020U) +#define NV2080_CTRL_PERF_PSTATES_P6 (0x00000040U) +#define NV2080_CTRL_PERF_PSTATES_P7 (0x00000080U) +#define NV2080_CTRL_PERF_PSTATES_P8 (0x00000100U) +#define NV2080_CTRL_PERF_PSTATES_P9 (0x00000200U) +#define NV2080_CTRL_PERF_PSTATES_P10 (0x00000400U) +#define NV2080_CTRL_PERF_PSTATES_P11 (0x00000800U) +#define NV2080_CTRL_PERF_PSTATES_P12 (0x00001000U) +#define NV2080_CTRL_PERF_PSTATES_P13 (0x00002000U) +#define NV2080_CTRL_PERF_PSTATES_P14 (0x00004000U) +#define NV2080_CTRL_PERF_PSTATES_P15 (0x00008000U) +#define NV2080_CTRL_PERF_PSTATES_MAX NV2080_CTRL_PERF_PSTATES_P15 +#define NV2080_CTRL_PERF_PSTATES_SKIP_ENTRY (0x10000U) /* finn: Evaluated from "(NV2080_CTRL_PERF_PSTATES_MAX << 1)" */ +#define NV2080_CTRL_PERF_PSTATES_ALL (0xffffU) /* finn: Evaluated from "(NV2080_CTRL_PERF_PSTATES_MAX | (NV2080_CTRL_PERF_PSTATES_MAX - 1))" */ +/*!@}*/ + +/* + * NV2080_CTRL_CMD_PERF_GET_CURRENT_PSTATE + * + * This command returns the current performance state of the GPU. + * + * currPstate + * This parameter returns the current P-state, as defined in + * NV2080_CTRL_PERF_PSTATES values. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_PERF_GET_CURRENT_PSTATE (0x20802068) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_GET_CURRENT_PSTATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PERF_GET_CURRENT_PSTATE_PARAMS_MESSAGE_ID (0x68U) + +typedef struct NV2080_CTRL_PERF_GET_CURRENT_PSTATE_PARAMS { + NvU32 currPstate; +} NV2080_CTRL_PERF_GET_CURRENT_PSTATE_PARAMS; + +/* + * NV2080_CTRL_CMD_PERF_GET_TEGRA_PERFMON_SAMPLE + * + * Fetch the busyness of the specified clock domain in percentage + * for Tegra chips. + * + * clkDomain + * Clock domain identifier for Tegra platforms. + * + * clkPercentBusy + * Busyness of the specified clock domain in percentage + * unit ranging from 0 to 100. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_PERF_GET_TEGRA_PERFMON_SAMPLE (0x20802069) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_GET_TEGRA_PERFMON_SAMPLE_PARAMS_MESSAGE_ID" */ + +typedef NvU32 NV2080_CTRL_CLK_DOMAIN_TEGRA; + +#define NV2080_CTRL_PERF_GET_TEGRA_PERFMON_SAMPLE_PARAMS_MESSAGE_ID (0x69U) + +typedef struct NV2080_CTRL_PERF_GET_TEGRA_PERFMON_SAMPLE_PARAMS { + NV2080_CTRL_CLK_DOMAIN_TEGRA clkDomain; + NvU32 clkPercentBusy; +} NV2080_CTRL_PERF_GET_TEGRA_PERFMON_SAMPLE_PARAMS; + + +/* _ctrl2080perf_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf.h new file mode 100644 index 0000000..137549f --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf.h @@ -0,0 +1,33 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080perf_cf.finn +// + + +/* _ctrl2080perf_cf_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h new file mode 100644 index 0000000..a0b1190 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.finn +// + + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h new file mode 100644 index 0000000..6fcd9ac --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h @@ -0,0 +1,67 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080pmgr.finn +// + + + +#define NV2080_CTRL_PMGR_MODULE_INFO_NVSWITCH_NOT_SUPPORTED 0U +#define NV2080_CTRL_PMGR_MODULE_INFO_NVSWITCH_SUPPORTED 1U +#define NV2080_CTRL_PMGR_MODULE_INFO_NVSWITCH_NOT_APPLICABLE 2U + +/*! + * NV2080_CTRL_PMGR_MODULE_INFO_PARAMS + * + * This provides information about different module properties + * + * moduleId[OUT] + * - This is a static HW identifier that is unique for each module on a given baseboard. + * For non-baseboard products this would always be 0. + * nvswitchSupport[OUT] + * - NVSwitch present or not. Possible values are + * NV2080_CTRL_PMGR_MODULE_INFO_NVSWITCH_NOT_SUPPORTED + * NV2080_CTRL_PMGR_MODULE_INFO_NVSWITCH_SUPPORTED + * NV2080_CTRL_PMGR_MODULE_INFO_NVSWITCH_NOT_APPLICABLE + */ +#define NV2080_CTRL_PMGR_MODULE_INFO_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_PMGR_MODULE_INFO_PARAMS { + NvU32 moduleId; + NvU8 nvswitchSupport; +} NV2080_CTRL_PMGR_MODULE_INFO_PARAMS; + +/*! + * NV2080_CTRL_CMD_PMGR_GET_MODULE_INFO + * + * Control call to query the subdevice module INFO. + * + */ +#define NV2080_CTRL_CMD_PMGR_GET_MODULE_INFO (0x20802609) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PMGR_INTERFACE_ID << 8) | NV2080_CTRL_PMGR_MODULE_INFO_PARAMS_MESSAGE_ID" */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmu.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmu.h new file mode 100644 index 0000000..d4f9bb3 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmu.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080pmu.finn +// + + + +/*! + * @file + * + * @brief Enumeration of all PMU RMCTRL identifiers. + */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h new file mode 100644 index 0000000..a68cfaa --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080pmumon.finn +// + + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080power.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080power.h new file mode 100644 index 0000000..2f0abcb --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080power.h @@ -0,0 +1,123 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080power.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + + + +/*! + * @brief GC6 flavor ids + */ +typedef enum NV2080_CTRL_GC6_FLAVOR_ID { + NV2080_CTRL_GC6_FLAVOR_ID_MSHYBRID = 0, + NV2080_CTRL_GC6_FLAVOR_ID_OPTIMUS = 1, + + + NV2080_CTRL_GC6_FLAVOR_ID_MAX = 4, +} NV2080_CTRL_GC6_FLAVOR_ID; + +/* + * NV2080_CTRL_CMD_GC6_ENTRY + * + * This command executes the steps of GC6 entry sequence + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED (non-fatal) + * NV_ERR_INVALID_STATE (non-fatal) + * NV_ERR_INVALID_ARGUMENT (non-fatal) + * NV_ERR_NOT_READY (non-fatal) + * NV_ERR_TIMEOUT + * NV_ERR_GENERIC + */ +#define NV2080_CTRL_CMD_GC6_ENTRY (0x2080270d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_POWER_INTERFACE_ID << 8) | NV2080_CTRL_GC6_ENTRY_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GC6_ENTRY_PARAMS_MESSAGE_ID (0xDU) + +typedef struct NV2080_CTRL_GC6_ENTRY_PARAMS { + NV2080_CTRL_GC6_FLAVOR_ID flavorId; + NvU32 stepMask; + struct { + + + NvBool bIsRTD3Transition; + NvBool bIsRTD3CoreRailPowerCut; + + + NvBool bSkipPstateSanity; + } params; +} NV2080_CTRL_GC6_ENTRY_PARAMS; + +/* + * NV2080_CTRL_CMD_GC6_EXIT + * + * This command executes the steps of GC6 exit sequence + * + * Possible status return values are: + * NV_OK + * NV_ERR_GENERIC + */ +#define NV2080_CTRL_CMD_GC6_EXIT (0x2080270e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_POWER_INTERFACE_ID << 8) | NV2080_CTRL_GC6_EXIT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GC6_EXIT_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV2080_CTRL_GC6_EXIT_PARAMS { + NV2080_CTRL_GC6_FLAVOR_ID flavorId; + struct { + NvBool bIsGpuSelfWake; + NvBool bIsRTD3Transition; + + + NvBool bIsRTD3HotTransition; //output + } params; +} NV2080_CTRL_GC6_EXIT_PARAMS; + +/*! + * @brief GC6 step ids + */ +typedef enum NV2080_CTRL_GC6_STEP_ID { + NV2080_CTRL_GC6_STEP_ID_SR_ENTRY = 0, + NV2080_CTRL_GC6_STEP_ID_GPU_OFF = 1, + NV2080_CTRL_GC6_STEP_ID_MAX = 2, +} NV2080_CTRL_GC6_STEP_ID; + +typedef struct NV2080_CTRL_GC6_FLAVOR_INFO { + NV2080_CTRL_GC6_FLAVOR_ID flavorId; + NvU32 stepMask; +} NV2080_CTRL_GC6_FLAVOR_INFO; + + + +/* _ctrl2080power_h_ */ + + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h new file mode 100644 index 0000000..86bf7e9 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h @@ -0,0 +1,384 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080rc.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* + * NV2080_CTRL_CMD_RC_READ_VIRTUAL_MEM + * + * This command returns data read from the specified virtual memory address for + * the associated subdevice. + * + * hChannel + * This parameter specifies the channel object handle from which the virtual + * memory range applies. + * virtAddress + * This parameter specifies the GPU base virtual memory address from which data should + * be read. The amount of data read is specified by the bufferSize parameter. + * bufferPtr + * This parameter specifies the buffer address in the caller's address space into which + * the data is to be returned. The address must be aligned on an 8-byte boundary. + * The buffer must be at least as big as the value specified bufferSize parameter (in bytes). + * bufferSize + * This parameter specifies the size of the buffer referenced by the bufferPtr parameter. + * This parameter also indicates the total number of bytes to be returned. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_XLATE + */ +#define NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS { + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 virtAddress, 8); + NV_DECLARE_ALIGNED(NvP64 bufferPtr, 8); + NvU32 bufferSize; +} NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS; + +#define NV2080_CTRL_CMD_RC_READ_VIRTUAL_MEM (0x20802204) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_RC_GET_ERROR_COUNT + * + * This command returns the number of RC errors. + * + * errorCount + * Number of RC errors. + * + * Note: If SMC is enabled, mig/monitor capability must be acquired to query + * aggregate information. Otherwise, the control call returns + * NV_ERR_INSUFFICIENT_PERMISSIONS. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INSUFFICIENT_PERMISSIONS. + */ +#define NV2080_CTRL_RC_GET_ERROR_COUNT_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV2080_CTRL_RC_GET_ERROR_COUNT_PARAMS { + NvU32 errorCount; +} NV2080_CTRL_RC_GET_ERROR_COUNT_PARAMS; + +#define NV2080_CTRL_CMD_RC_GET_ERROR_COUNT (0x20802205) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | NV2080_CTRL_RC_GET_ERROR_COUNT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_RC_ERROR_PARAMS_BUFFER_SIZE (0x2000) /* finn: Evaluated from "(8 * 1024)" */ + +#define NV2080_CTRL_CMD_RC_GET_ERROR (0x20802206) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0x6" */ + +/* + * NV2080_CTRL_CMD_RC_GET_ERROR_V2 + * + * This command returns an error element in the RC error list. + * + * whichBuffer + * Which Error to return (0 is oldest) + * outputRecordSize + * Output Size of Buffer -- Zero if error record doesn't exist + * recordBuffer + * buffer + * + * Note: If SMC is enabled, mig/monitor capability must be acquired to query + * aggregate information. Otherwise, the control call returns + * NV_ERR_INSUFFICIENT_PERMISSIONS. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INSUFFICIENT_PERMISSIONS. + * + */ + +#define NV2080_CTRL_RC_GET_ERROR_V2_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV2080_CTRL_RC_GET_ERROR_V2_PARAMS { + + NvU32 whichBuffer; // [IN] - which error to return (0 is oldest) + NvU32 outputRecordSize; // [OUT] + NvU8 recordBuffer[NV2080_CTRL_RC_ERROR_PARAMS_BUFFER_SIZE]; +} NV2080_CTRL_RC_GET_ERROR_V2_PARAMS; + +#define NV2080_CTRL_CMD_RC_GET_ERROR_V2 (0x20802213) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | NV2080_CTRL_RC_GET_ERROR_V2_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_RC_SET_CLEAN_ERROR_HISTORY + * + * This command cleans error history. + * + * This command has no input parameters. + * + * Possible status values returned are: + * NV_OK + */ + +#define NV2080_CTRL_CMD_RC_SET_CLEAN_ERROR_HISTORY (0x20802207) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0x7" */ + +/* + * NV2080_CTRL_CMD_RC_GET_WATCHDOG_INFO + * + * This command returns information about the RC watchdog. + * + * watchdogStatusFlags + * This output parameter is a combination of one or more of the following: + * + * NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_NONE + * This is the value of watchdogStatusFlags if no flags are set. + * + * NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_DISABLED + * This means that the watchdog is disabled. + * + * NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_RUNNING + * This means that the watchdog is running. + * + * NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_INITIALIZED + * This means that the watchdog has been initialized. + * + * A typical result would be either "running and initialized", or + * "disabled". However, "initialized, but not running, and not disabled" + * is also quite reasonable (if the computer is hibernating, for example). + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_RC_GET_WATCHDOG_INFO_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_RC_GET_WATCHDOG_INFO_PARAMS { + NvU32 watchdogStatusFlags; +} NV2080_CTRL_RC_GET_WATCHDOG_INFO_PARAMS; + +#define NV2080_CTRL_CMD_RC_GET_WATCHDOG_INFO (0x20802209) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | NV2080_CTRL_RC_GET_WATCHDOG_INFO_PARAMS_MESSAGE_ID" */ + +/* valid values for watchdogStatusFlags */ +#define NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_NONE (0x00000000) +#define NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_DISABLED (0x00000001) +#define NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_RUNNING (0x00000002) +#define NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_INITIALIZED (0x00000004) + +/* + * NV2080_CTRL_CMD_RC_DISABLE_WATCHDOG + * + * This command disables the RC watchdog, if possible. + * If, however, another RM client has already explicitly (via NV2080 call) enabled + * the RC watchdog, then this method returns NV_ERR_STATE_IN_USE. + * + * This command, if successful, will prevent other clients from enabling the + * watchdog until the calling RM client releases its request with + * NV2080_CTRL_CMD_RC_RELEASE_WATCHDOG_REQUESTS or frees its NV20_SUBDEVICE. + * + * See NV2080_CTRL_CMD_RC_SOFT_DISABLE_WATCHDOG for disabling the watchdog + * without preventing other clients from enabling it. + * + * Possible status return values are: + * NV_OK + * NV_ERR_STATE_IN_USE + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_RC_DISABLE_WATCHDOG (0x2080220a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0xA" */ + +/* + * NV2080_CTRL_CMD_RC_ENABLE_WATCHDOG + * + * This command enables the RC watchdog, if possible. + * If, however, another RM client has already explicitly (via NV2080 call) disabled + * the RC watchdog, then this method returns NV_ERR_STATE_IN_USE. + * + * Possible status return values are: + * NV_OK + * NV_ERR_STATE_IN_USE + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_RC_ENABLE_WATCHDOG (0x2080220b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0xB" */ + +/* + * NV2080_CTRL_CMD_RC_RELEASE_WATCHDOG_REQUESTS + * + * This command releases all of the RM client's outstanding requests to enable + * or disable the watchdog. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_RC_RELEASE_WATCHDOG_REQUESTS (0x2080220c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0xC" */ + +/* + * NV2080_CTRL_CMD_SET_RC_RECOVERY/NV2080_CTRL_CMD_GET_RC_RECOVERY + * + * This command disables/enables RC recovery. + * + * rcEnable + * NV2080_CTRL_CMD_SET_RC_RECOVERY_DISABLED + * Disable robust channel recovery. + * + * NV2080_CTRL_CMD_SET_RC_RECOVERY_ENABLED + * Enable robust channel recovery with default breakpoint handling. + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +typedef struct NV2080_CTRL_CMD_RC_RECOVERY_PARAMS { + NvU32 rcEnable; +} NV2080_CTRL_CMD_RC_RECOVERY_PARAMS; + +#define NV2080_CTRL_CMD_SET_RC_RECOVERY (0x2080220d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | NV2080_CTRL_SET_RC_RECOVERY_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_SET_RC_RECOVERY_PARAMS_MESSAGE_ID (0xDU) + +typedef NV2080_CTRL_CMD_RC_RECOVERY_PARAMS NV2080_CTRL_SET_RC_RECOVERY_PARAMS; + +#define NV2080_CTRL_CMD_GET_RC_RECOVERY (0x2080220e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | NV2080_CTRL_GET_RC_RECOVERY_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GET_RC_RECOVERY_PARAMS_MESSAGE_ID (0xEU) + +typedef NV2080_CTRL_CMD_RC_RECOVERY_PARAMS NV2080_CTRL_GET_RC_RECOVERY_PARAMS; + +/* valid values for rcEnable */ +#define NV2080_CTRL_CMD_RC_RECOVERY_DISABLED (0x00000000) +#define NV2080_CTRL_CMD_RC_RECOVERY_ENABLED (0x00000001) + +/* + * NV2080_CTRL_CMD_TDR_SET_TIMEOUT_STATE + * + * This command can be used to set TDR timeout state. + * + * It can be used to indicate that a timeout has occurred and that a GPU + * reset will start. It can also be used to indicate that the reset has + * completed along with the corresponding complition status. + * + * cmd + * This parameter is used to indicate the stage of the TDR recovery + * process. Legal values for this parameter are: + * NV2080_CTRL_TDR_SET_TIMEOUT_STATE_CMD_GPU_RESET_BEGIN + * This value indicates that TDR recovery is about to begin. + * NV2080_CTRL_TDR_SET_TIMEOUT_STATE_CMD_GPU_RESET_END + * This value indicates that TDR recovery has completed. + * + * status + * This parameter is valid when the cmd parameter is set to + * NV2080_CTRL_TDR_SET_TIMEOUT_STATE_CMD_GPU_RESET_END. It is used + * to specify the completion status of the TDR recovery. Legal + * values for this parameter include: + * NV2080_CTRL_TDR_SET_TIMEOUT_STATE_STATUS_FAIL + * This value indicates the recovery failed. + * NV2080_CTRL_TDR_SET_TIMEOUT_STATE_STATUS_SUCCESS + * This value indicates the recovery succeeded. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_COMMAND + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_TDR_SET_TIMEOUT_STATE (0x2080220f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | NV2080_CTRL_TDR_SET_TIMEOUT_STATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_TDR_SET_TIMEOUT_STATE_PARAMS_MESSAGE_ID (0xFU) + +typedef struct NV2080_CTRL_TDR_SET_TIMEOUT_STATE_PARAMS { + NvU32 cmd; + NvS32 status; +} NV2080_CTRL_TDR_SET_TIMEOUT_STATE_PARAMS; + +/* valid cmd values */ +#define NV2080_CTRL_TDR_SET_TIMEOUT_STATE_CMD_GPU_RESET_BEGIN (0x00000000) +#define NV2080_CTRL_TDR_SET_TIMEOUT_STATE_CMD_GPU_RESET_END (0x00000001) + +/* valid status values */ +#define NV2080_CTRL_TDR_SET_TIMEOUT_STATE_STATUS_SUCCESS (0x00000000) +#define NV2080_CTRL_TDR_SET_TIMEOUT_STATE_STATUS_FAIL (0x00000001) + +/* + * NV2080_CTRL_CMD_RC_SOFT_DISABLE_WATCHDOG + * + * This command disables the RC watchdog, similarly to how + * NV2080_CTRL_CMD_RC_DISABLE_WATCHDOG does. However, unlike that command, this + * command will not prevent another RM client from explicitly enabling the RC + * watchdog with NV2080_CTRL_CMD_RC_ENABLE_WATCHDOG. + * + * Possible status return values are: + * NV_OK + * NV_ERR_STATE_IN_USE + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_RC_SOFT_DISABLE_WATCHDOG (0x20802210) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0x10" */ + +/* + * NV2080_CTRL_CMD_GET_RC_INFO/NV2080_CTRL_CMD_SET_RC_INFO + * + * This command can be used to set robust channel parameters. + * + * rcMode + * NV2080_CTRL_CMD_SET_RC_INFO_MODE_DISABLE + * Disable robust channel operation. + * + * NV2080_CTRL_CMD_SET_RC_INFO_MODE_ENABLE + * Enable robust channel operation. + * + * rcBreak + * NV2080_CTRL_CMD_SET_RC_INFO_BREAK_DISABLE + * Disable breakpoint handling during robust channel operation. + * + * NV2080_CTRL_CMD_SET_RC_INFO_BREAK_ENABLE + * Enable breakpoint handling during robust channel operation. + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +typedef struct NV2080_CTRL_CMD_RC_INFO_PARAMS { + NvU32 rcMode; + NvU32 rcBreak; +} NV2080_CTRL_CMD_RC_INFO_PARAMS; + +#define NV2080_CTRL_CMD_SET_RC_INFO (0x20802211) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | NV2080_CTRL_SET_RC_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_SET_RC_INFO_PARAMS_MESSAGE_ID (0x11U) + +typedef NV2080_CTRL_CMD_RC_INFO_PARAMS NV2080_CTRL_SET_RC_INFO_PARAMS; + +#define NV2080_CTRL_CMD_GET_RC_INFO (0x20802212) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | NV2080_CTRL_GET_RC_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GET_RC_INFO_PARAMS_MESSAGE_ID (0x12U) + +typedef NV2080_CTRL_CMD_RC_INFO_PARAMS NV2080_CTRL_GET_RC_INFO_PARAMS; + +/* valid rcMode values */ +#define NV2080_CTRL_CMD_RC_INFO_MODE_DISABLE (0x00000000) +#define NV2080_CTRL_CMD_RC_INFO_MODE_ENABLE (0x00000001) + +/* valid rcBreak values */ +#define NV2080_CTRL_CMD_RC_INFO_BREAK_DISABLE (0x00000000) +#define NV2080_CTRL_CMD_RC_INFO_BREAK_ENABLE (0x00000001) + +/* _ctrl2080rc_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spdm.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spdm.h new file mode 100644 index 0000000..4c25165 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spdm.h @@ -0,0 +1,248 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080spdm.finn +// + + + +/*************************** SPDM COMMANDS ************************************/ + +#include "cc_drv.h" + +/*! + * @brief SPDM Command Types + * + */ +#define RM_GSP_SPDM_CMD_ID_CC_INIT (0x1) +#define RM_GSP_SPDM_CMD_ID_CC_DEINIT (0x2) +#define RM_GSP_SPDM_CMD_ID_CC_CTRL (0x3) +#define RM_GSP_SPDM_CMD_ID_CC_INIT_RM_DATA (0x4) +#define RM_GSP_SPDM_CMD_ID_CC_HEARTBEAT_CTRL (0x5) +#define RM_GSP_SPDM_CMD_ID_FIPS_SELFTEST (0x6) + + +#define RM_GSP_SPDM_CMD_ID_INVALID_COMMAND (0xFF) + +#define SPDM_SESSION_ESTABLISHMENT_TRANSCRIPT_BUFFER_SIZE 0x2400 + + + +#define RSVD7_SIZE 16 + +#define RSVD8_SIZE 2 + +/*! + * Guest RM provides INIT context + */ +typedef struct RM_GSP_SPDM_CMD_CC_INIT { + // Command must be first as this struct is the part of union + NvU8 cmdType; +} RM_GSP_SPDM_CMD_CC_INIT; +typedef struct RM_GSP_SPDM_CMD_CC_INIT *PRM_GSP_SPDM_CMD_CC_INIT; + +/*! + * Guest RM provides INIT context + */ +typedef struct RM_GSP_SPDM_CMD_CC_DEINIT { + // Command must be first as this struct is the part of union + NvU8 cmdType; +} RM_GSP_SPDM_CMD_CC_DEINIT; +typedef struct RM_GSP_SPDM_CMD_CC_DEINIT *PRM_GSP_SPDM_CMD_CC_DEINIT; + +/*! + * RM provides the SPDM request info to GSP + */ +typedef struct RM_GSP_SPDM_CMD_CC_CTRL { + // Command must be first as this struct is the part of union + NvU8 cmdType; +} RM_GSP_SPDM_CMD_CC_CTRL; +typedef struct RM_GSP_SPDM_CMD_CC_CTRL *PRM_GSP_SPDM_CMD_CC_CTRL; + +typedef struct RM_GSP_SPDM_CMD_CC_INIT_RM_DATA { + // Command must be first as this struct is the part of union + NvU8 cmdType; + + NvU32 rsvd0[2]; + + NvU32 rsvd1; + + char rsvd2[9]; + + char rsvd3[5]; + + char rsvd4[5]; + + char rsvd5[5]; + + char rsvd6[2]; + + char rsvd7[RSVD7_SIZE]; + + NvU32 rsvd8[RSVD8_SIZE]; +} RM_GSP_SPDM_CMD_CC_INIT_RM_DATA; +typedef struct RM_GSP_SPDM_CMD_CC_INIT_RM_DATA *PRM_GSP_SPDM_CMD_CC_INIT_RM_DATA; + +typedef struct RM_GSP_SPDM_CMD_CC_HEARTBEAT_CTRL { + // Command must be first as this struct is the part of union + NvU8 cmdType; + + // Whether to enable or disable heartbeats + NvBool bEnable; +} RM_GSP_SPDM_CMD_CC_HEARTBEAT_CTRL; +typedef struct RM_GSP_SPDM_CMD_CC_HEARTBEAT_CTRL *PRM_GSP_SPDM_CMD_CC_HEARTBEAT_CTRL; + + + +/*! + * HCC FIPS Self-test. + */ +#define CE_FIPS_SELF_TEST_DATA_SIZE 16 +#define CE_FIPS_SELF_TEST_AUTH_TAG_SIZE 16 +#define CE_FIPS_SELF_TEST_IV_SIZE 12 + +typedef struct RM_GSP_SPDM_CMD_FIPS_SELFTEST { + NvU8 cmdType; + NvU8 isEnc; + CC_KMB kmb; + NvU8 text[CE_FIPS_SELF_TEST_DATA_SIZE]; + NvU8 authTag[CE_FIPS_SELF_TEST_AUTH_TAG_SIZE]; +} RM_GSP_SPDM_CMD_FIPS_SELFTEST; +typedef struct RM_GSP_SPDM_CMD_FIPS_SELFTEST *PRM_GSP_SPDM_CMD_FIPS_SELFTEST; + + + +/*! + * NOTE : Do not include structure members that have alignment requirement >= 8 to avoid alignment directives + * getting added in FINN generated structures / unions as RM_GSP_SPDM_CMD / RM_GSP_SPDM_MSG are pragma packed in + * other structures like RM_FLCN_CMD_GSP / RM_FLCN_MSG_GSP and pragma pack does not produce consistent behavior + * when paired with alignment directives on Linux and Windows. + */ + +/*! + * A union of all SPDM Commands. + */ +typedef union RM_GSP_SPDM_CMD { + NvU8 cmdType; + RM_GSP_SPDM_CMD_CC_INIT ccInit; + RM_GSP_SPDM_CMD_CC_DEINIT ccDeinit; + RM_GSP_SPDM_CMD_CC_CTRL ccCtrl; + RM_GSP_SPDM_CMD_CC_INIT_RM_DATA rmDataInitCmd; + RM_GSP_SPDM_CMD_CC_HEARTBEAT_CTRL ccHeartbeatCtrl; + + + RM_GSP_SPDM_CMD_FIPS_SELFTEST ccFipsTest; + +} RM_GSP_SPDM_CMD; +typedef union RM_GSP_SPDM_CMD *PRM_GSP_SPDM_CMD; + +/***************************** SPDM MESSAGES *********************************/ + +/*! + * SPDM Message Status + */ + +/*! + * Returns the status for program CE keys to RM + */ +#define RM_GSP_SPDM_MSG_ID_CC_INIT (0x1) +#define RM_GSP_SPDM_MSG_ID_CC_DEINIT (0x2) +#define RM_GSP_SPDM_MSG_ID_CC_CTRL (0x3) +#define RM_GSP_SPDM_MSG_ID_CC_INIT_RM_DATA (0x4) +#define RM_GSP_SPDM_MSG_ID_CC_HEARTBEAT_CTRL (0x5) +#define RM_GSP_SPDM_MSG_ID_FIPS_SELFTEST (0x6) + + + +/*! + * Returns the Error Status for Invalid Command + */ +#define RM_GSP_SPDM_MSG_ID_INVALID_COMMAND (0xFF) + +/*! + * NOTE : Do not include structure members that have alignment requirement >= 8 to avoid alignment directives + * getting added in FINN generated structures / unions as RM_GSP_SPDM_CMD / RM_GSP_SPDM_MSG are pragma packed in + * other structures like RM_FLCN_CMD_GSP / RM_FLCN_MSG_GSP and pragma pack does not produce consistent behavior + * when paired with alignment directives on Linux and Windows. + */ + +/*! + * SPDM message structure. + */ +typedef struct RM_GSP_SPDM_MSG { + NvU8 msgType; + + // status returned from GSP message infrastructure. + NvU32 status; + + NvU32 rsvd1; + + NvU32 rsvd2; + + NvU32 rsvd3; + + NvU32 rsvd4; + + NvU32 rsvd5; +} RM_GSP_SPDM_MSG; +typedef struct RM_GSP_SPDM_MSG *PRM_GSP_SPDM_MSG; + +/* + * NV2080_CTRL_CMD_INTERNAL_SPDM_PARTITION + * + * This command does a partition switch to SPDM partition + * + */ +#define NV2080_CTRL_INTERNAL_SPDM_PARTITION (0x20800ad9) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_SPDM_PARTITION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_SPDM_PARTITION_PARAMS_MESSAGE_ID (0xD9U) + +typedef struct NV2080_CTRL_INTERNAL_SPDM_PARTITION_PARAMS { + NvU8 index; + RM_GSP_SPDM_CMD cmd; + RM_GSP_SPDM_MSG msg; +} NV2080_CTRL_INTERNAL_SPDM_PARTITION_PARAMS; + +/* + * NV2080_CTRL_INTERNAL_SPDM_RETRIEVE_TRANSCRIPT + * + * This command retrieves the transcript of SPDM session establishment messages. + * + */ +#define NV2080_CTRL_INTERNAL_SPDM_RETRIEVE_TRANSCRIPT (0x20800ada) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_SPDM_RETRIEVE_TRANSCRIPT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_SPDM_RETRIEVE_TRANSCRIPT_PARAMS_MESSAGE_ID (0xDAU) + +typedef struct NV2080_CTRL_INTERNAL_SPDM_RETRIEVE_TRANSCRIPT_PARAMS { + NvU8 transcript[SPDM_SESSION_ESTABLISHMENT_TRANSCRIPT_BUFFER_SIZE]; + NvU32 transcriptSize; +} NV2080_CTRL_INTERNAL_SPDM_RETRIEVE_TRANSCRIPT_PARAMS; + + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h new file mode 100644 index 0000000..0b378d2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080spi.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080thermal.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080thermal.h new file mode 100644 index 0000000..25d6c42 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080thermal.h @@ -0,0 +1,417 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080thermal.finn +// + + + +/* + * Thermal System rmcontrol api versioning + */ +#define THERMAL_SYSTEM_API_VER 1U +#define THERMAL_SYSTEM_API_REV 0U + +/* + * NV2080_CTRL_THERMAL_SYSTEM constants + * + */ + +/* + * NV2080_CTRL_THERMAL_SYSTEM_TARGET + * + * Targets (ie the things the thermal system can observe). Target mask + * have to be in sync with corresponding element of NVAPI_THERMAL_TARGET + * enum, until there is a translation layer between these two. + * + * NV2080_CTRL_THERMAL_SYSTEM_TARGET_NONE + * There is no target. + * + * NV2080_CTRL_THERMAL_SYSTEM_TARGET_GPU + * The GPU is the target. + * + * NV2080_CTRL_THERMAL_SYSTEM_TARGET_MEMORY + * The memory is the target. + * + * NV2080_CTRL_THERMAL_SYSTEM_TARGET_POWER_SUPPLY + * The power supply is the target. + * + * NV2080_CTRL_THERMAL_SYSTEM_TARGET_BOARD + * The board (PCB) is the target. + */ + + + /* NV2080_CTRL_THERMAL_SYSTEM_TARGET_UNKNOWN + * The target is unknown. + */ +#define NV2080_CTRL_THERMAL_SYSTEM_TARGET_NONE (0x00000000U) +#define NV2080_CTRL_THERMAL_SYSTEM_TARGET_GPU (0x00000001U) +#define NV2080_CTRL_THERMAL_SYSTEM_TARGET_MEMORY (0x00000002U) +#define NV2080_CTRL_THERMAL_SYSTEM_TARGET_POWER_SUPPLY (0x00000004U) +#define NV2080_CTRL_THERMAL_SYSTEM_TARGET_BOARD (0x00000008U) + + +#define NV2080_CTRL_THERMAL_SYSTEM_TARGET_UNKNOWN (0xFFFFFFFFU) + +/* + * executeFlags values + */ +#define NV2080_CTRL_THERMAL_SYSTEM_EXECUTE_FLAGS_DEFAULT (0x00000000U) +#define NV2080_CTRL_THERMAL_SYSTEM_EXECUTE_FLAGS_IGNORE_FAIL (0x00000001U) + + +/* + * NV2080_CTRL_CMD_THERMAL_SYSTEM_EXECUTE_V2 + * + * This command will execute a list of thermal system instructions: + * + * clientAPIVersion + * This field must be set by the client to THERMAL_SYSTEM_API_VER, + * which allows the driver to determine api compatibility. + * + * clientAPIRevision + * This field must be set by the client to THERMAL_SYSTEM_API_REV, + * which allows the driver to determine api compatibility. + * + * clientInstructionSizeOf + * This field must be set by the client to + * sizeof(NV2080_CTRL_THERMAL_SYSTEM_INSTRUCTION), which allows the + * driver to determine api compatibility. + * + * executeFlags + * This field is set by the client to control instruction execution. + * NV2080_CTRL_THERMAL_SYSTEM_EXECUTE_FLAGS_DEFAULT + * Execute instructions normally. The first instruction + * failure will cause execution to stop. + * NV2080_CTRL_THERMAL_SYSTEM_EXECUTE_FLAGS_IGNORE_FAIL + * Execute all instructions, ignoring individual instruction failures. + * + * successfulInstructions + * This field is set by the driver and is the number of instructions + * that returned NV_OK on execution. If this field + * matches instructionListSize, all instructions executed successfully. + * + * instructionListSize + * This field is set by the client to the number of instructions in + * instruction list. + * + * instructionList + * This field is set an array of thermal system instructions + * (NV2080_CTRL_THERMAL_SYSTEM_INSTRUCTION) to execute, filled in by the + * client. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ + +#define NV2080_CTRL_CMD_THERMAL_SYSTEM_EXECUTE_V2 (0x20800513U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_THERMAL_INTERFACE_ID << 8) | NV2080_CTRL_THERMAL_SYSTEM_EXECUTE_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_THERMAL_SYSTEM_EXECUTE_V2_PHYSICAL (0x20808513U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_THERMAL_LEGACY_NON_PRIVILEGED_INTERFACE_ID << 8) | NV2080_CTRL_THERMAL_SYSTEM_EXECUTE_V2_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_THERMAL_SYSTEM_GET_INFO instructions... + * + */ + +/* + * NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_TARGETS_AVAILABLE instruction + * + * Get the number of available targets. + * + * availableTargets + * Returns the number of available targets. Targets are + * identified by an index, starting with 0. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_TARGETS_AVAILABLE_OPCODE (0x00000100U) +typedef struct NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_TARGETS_AVAILABLE_OPERANDS { + NvU32 availableTargets; +} NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_TARGETS_AVAILABLE_OPERANDS; + +/* + * NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_TARGET_TYPE instruction + * + * Get a target's type. + * + * targetIndex + * Set by the client to the desired target index. + * + * type + * Returns a target's type. + * Possible values returned are: + * NV2080_CTRL_THERMAL_SYSTEM_TARGET_NONE + * NV2080_CTRL_THERMAL_SYSTEM_TARGET_GPU + * NV2080_CTRL_THERMAL_SYSTEM_TARGET_MEMORY + * NV2080_CTRL_THERMAL_SYSTEM_TARGET_POWER_SUPPLY + * NV2080_CTRL_THERMAL_SYSTEM_TARGET_BOARD + * NV2080_CTRL_THERMAL_SYSTEM_TARGET_UNKNOWN + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_TARGET_TYPE_OPCODE (0x00000101U) +typedef struct NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_TARGET_TYPE_OPERANDS { + NvU32 targetIndex; + NvU32 type; +} NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_TARGET_TYPE_OPERANDS; + +/* + * NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_PROVIDER_TYPE instruction + * + * Get a providers's type. + * + * providerIndex + * Set by the client to the desired provider index. + * + * type + * Returns a provider's type. + */ + + + /* + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_PROVIDER_TYPE_OPCODE (0x00000301U) +typedef struct NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_PROVIDER_TYPE_OPERANDS { + NvU32 providerIndex; + NvU32 type; +} NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_PROVIDER_TYPE_OPERANDS; + +/* + * NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSORS_AVAILABLE instruction + * + * Get the number of available sensors. + * + * availableSensors + * Returns the number of available sensors. Sensors are + * identified by an index, starting with 0. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSORS_AVAILABLE_OPCODE (0x00000500U) +typedef struct NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSORS_AVAILABLE_OPERANDS { + NvU32 availableSensors; +} NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSORS_AVAILABLE_OPERANDS; + +/* + * NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_PROVIDER instruction + * + * Get a sensor's provider index. + * + * sensorIndex + * Set by the client to the desired sensor index. + * + * providerIndex + * Returns a sensor's provider index. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_PROVIDER_OPCODE (0x00000510U) +typedef struct NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_PROVIDER_OPERANDS { + NvU32 sensorIndex; + NvU32 providerIndex; +} NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_PROVIDER_OPERANDS; + +/*! + * Union of mode-specific arguments. + */ + + +/* + * NV2080_CTRL_THERMAL_SYSTEM_GET_STATUS_SENSOR_READING instruction + * + * Get a sensor's current reading. + * + * sensorIndex + * Set by the client to the desired sensor index. + * + * value + * Returns a sensor's current reading. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_THERMAL_SYSTEM_GET_STATUS_SENSOR_READING_OPCODE (0x00001500U) +typedef struct NV2080_CTRL_THERMAL_SYSTEM_GET_STATUS_SENSOR_READING_OPERANDS { + NvU32 sensorIndex; + NvS32 value; +} NV2080_CTRL_THERMAL_SYSTEM_GET_STATUS_SENSOR_READING_OPERANDS; + +/* + * NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_TARGET instruction + * + * Get a sensor's target index. + * + * sensorIndex + * Set by the client to the desired sensor index. + * + * targetIndex + * Returns a sensor's target index. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_TARGET_OPCODE (0x00000520U) +typedef struct NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_TARGET_OPERANDS { + NvU32 sensorIndex; + NvU32 targetIndex; +} NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_TARGET_OPERANDS; + +/* + * NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_READING_RANGE instruction + * + * Get a sensor's readings range (ie min, max). + * + * sensorIndex + * Set by the client to the desired sensor index. + * + * minimum + * Returns a sensor's range minimum. + * + * maximum + * Returns a sensor's range maximum. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_READING_RANGE_OPCODE (0x00000540U) +typedef struct NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_READING_RANGE_OPERANDS { + NvU32 sensorIndex; + NvS32 minimum; + NvS32 maximum; +} NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_READING_RANGE_OPERANDS; + +/* + * Thermal System instruction operand + */ +typedef union NV2080_CTRL_THERMAL_SYSTEM_INSTRUCTION_OPERANDS { + + NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_TARGETS_AVAILABLE_OPERANDS getInfoTargetsAvailable; + + NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_TARGET_TYPE_OPERANDS getInfoTargetType; + + NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_PROVIDER_TYPE_OPERANDS getInfoProviderType; + + NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSORS_AVAILABLE_OPERANDS getInfoSensorsAvailable; + + NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_PROVIDER_OPERANDS getInfoSensorProvider; + + NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_TARGET_OPERANDS getInfoSensorTarget; + + NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_READING_RANGE_OPERANDS getInfoSensorReadingRange; + + NV2080_CTRL_THERMAL_SYSTEM_GET_STATUS_SENSOR_READING_OPERANDS getStatusSensorReading; + + + NvU32 space[8]; +} NV2080_CTRL_THERMAL_SYSTEM_INSTRUCTION_OPERANDS; + +/* + * NV2080_CTRL_THERMAL_SYSTEM_INSTRUCTION + * + * All thermal system instructions have the following layout: + * + * result + * This field is set by the driver, and is the result of the + * instruction's execution. This value is only valid if the + * executed field is not 0 upon return. + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + * + * executed + * This field is set by the driver, and + * indicates if the instruction was executed. + * Possible status values returned are: + * 0: Not executed + * 1: Executed + * + * opcode + * This field is set by the client to the desired instruction opcode. + * Possible values are: + * NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_TARGET_TYPE_OPCODE + * NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_PROVIDER_TYPE_OPCODE + * NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSORS_AVAILABLE_OPCODE + * NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_PROVIDER_OPCODE + * NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_TARGET_OPCODE + * NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_READING_RANGE_OPCODE + * NV2080_CTRL_THERMAL_SYSTEM_GET_STATUS_SENSOR_READING_OPCODE + */ + + + /* + * operands + * This field is actually a union of all of the available operands. + * The interpretation of this field is opcode context dependent. + * Possible values are: + * NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_TARGET_TYPE_OPERANDS + * NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_PROVIDER_TYPE_OPERANDS + * NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSORS_AVAILABLE_OPERANDS + * NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_PROVIDER_OPERANDS + * NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_TARGET_OPERANDS + * NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_READING_RANGE_OPERANDS + * NV2080_CTRL_THERMAL_SYSTEM_GET_STATUS_SENSOR_READING_OPERANDS + */ + + +typedef struct NV2080_CTRL_THERMAL_SYSTEM_INSTRUCTION { + NvU32 result; + NvU32 executed; + NvU32 opcode; + NV2080_CTRL_THERMAL_SYSTEM_INSTRUCTION_OPERANDS operands; +} NV2080_CTRL_THERMAL_SYSTEM_INSTRUCTION; + +#define NV2080_CTRL_THERMAL_SYSTEM_INSTRUCTION_MAX_COUNT 0x20U +#define NV2080_CTRL_THERMAL_SYSTEM_EXECUTE_V2_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV2080_CTRL_THERMAL_SYSTEM_EXECUTE_V2_PARAMS { + NvU32 clientAPIVersion; + NvU32 clientAPIRevision; + NvU32 clientInstructionSizeOf; + NvU32 executeFlags; + NvU32 successfulInstructions; + NvU32 instructionListSize; + NV2080_CTRL_THERMAL_SYSTEM_INSTRUCTION instructionList[NV2080_CTRL_THERMAL_SYSTEM_INSTRUCTION_MAX_COUNT]; +} NV2080_CTRL_THERMAL_SYSTEM_EXECUTE_V2_PARAMS; + +/* _ctrl2080thermal_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h new file mode 100644 index 0000000..4d51f63 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h @@ -0,0 +1,260 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080tmr.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_TIMER related control commands and parameters */ + +/* + * NV2080_CTRL_CMD_TIMER_SCHEDULE + * + * This command schedules a GPU timer event to fire at the specified time interval. + * Can be called without API & GPU locks if NVOS54_FLAGS_IRQL_RAISED and + * NVOS54_FLAGS_LOCK_BYPASS are set in NVOS54_PARAMETERS.flags + * + * time_nsec + * This parameter specifies the time in nanoseconds at which the GPU timer + * event is to fire. + * flags + * This parameter determines the interpretation of the value specified by + * the time_nsec parameter: + * NV2080_CTRL_TIMER_SCHEDULE_FLAGS_TIME_ABS + * This flag indicates that time_nsec is in absolute time. + * NV2080_CTRL_TIMER_SCHEDULE_FLAGS_TIME_REL + * This flag indicates that time_nsec is in relative time. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_TIMER_SCHEDULE (0x20800401) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_TIMER_INTERFACE_ID << 8) | NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 time_nsec, 8); + NvU32 flags; +} NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS; + +/* valid flag values */ +#define NV2080_CTRL_TIMER_SCHEDULE_FLAGS_TIME 0:0 +#define NV2080_CTRL_TIMER_SCHEDULE_FLAGS_TIME_ABS (0x00000000) +#define NV2080_CTRL_TIMER_SCHEDULE_FLAGS_TIME_REL (0x00000001) + +/* + * NV2080_CTRL_CMD_TIMER_CANCEL + * + * This command cancels any pending timer events initiated with the + * NV2080_CTRL_CMD_TIMER_SCHEDULE command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_TIMER_CANCEL (0x20800402) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_TIMER_INTERFACE_ID << 8) | 0x2" */ + +/* + * NV2080_CTRL_CMD_TIMER_GET_TIME + * + * This command returns the current GPU timer value. The current time is + * expressed in elapsed nanoseconds since 00:00 GMT, January 1, 1970 + * (zero hour) with a resolution of 32 nanoseconds. + * + * Can be called without API & GPU locks if NVOS54_FLAGS_IRQL_RAISED and + * NVOS54_FLAGS_LOCK_BYPASS are set in NVOS54_PARAMETERS.flags + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_TIMER_GET_TIME (0x20800403) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_TIMER_INTERFACE_ID << 8) | NV2080_CTRL_TIMER_GET_TIME_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_TIMER_GET_TIME_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_TIMER_GET_TIME_PARAMS { + NV_DECLARE_ALIGNED(NvU64 time_nsec, 8); +} NV2080_CTRL_TIMER_GET_TIME_PARAMS; + +/* + * NV2080_CTRL_CMD_TIMER_GET_REGISTER_OFFSET + * + * The command returns the offset of the timer registers, so that clients may + * map them directly. + * + * Possible status values returned are: + * NV_OK + */ + +#define NV2080_CTRL_CMD_TIMER_GET_REGISTER_OFFSET (0x20800404) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_TIMER_INTERFACE_ID << 8) | NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS { + NvU32 tmr_offset; +} NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS; + +/* + * NV2080_CTRL_TIMER_GPU_CPU_TIME_SAMPLE + * + * This structure describes the information obtained with + * NV2080_CTRL_CMD_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO command. + * + * gpuTime + * GPU time is the value of GPU global timer (PTIMER) with a resolution + * of 32 nano seconds. + * cpuTime + * CPU time. Resolution of the cpu time depends on its source. Refer to + * NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_* for more information. + + */ +typedef struct NV2080_CTRL_TIMER_GPU_CPU_TIME_SAMPLE { + NV_DECLARE_ALIGNED(NvU64 cpuTime, 8); + NV_DECLARE_ALIGNED(NvU64 gpuTime, 8); +} NV2080_CTRL_TIMER_GPU_CPU_TIME_SAMPLE; + + +/* + * NV2080_CTRL_CMD_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO + * + * This command returns correlation information between GPU time and CPU time + * for a given CPU clock type. + * + * cpuClkId + * This parameter specifies the source of the CPU clock. This parameter is + * composed of two fields: + * NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_SOURCE + * This field specifies source ID of the CPU clock in question. Legal + * values for this parameter include: + * NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_OSTIME + * This clock id will provide real time in microseconds since + * 00:00:00 UTC on January 1, 1970, as reported by the host OS. + * NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_PLATFORM_API + * This clock id will provide time stamp that is constant-rate, high + * precision using platform API that is also available in the user + * mode. + * NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_TSC + * This clock id will provide time stamp using CPU's time stamp + * counter. + * NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_PROCESSOR + * This field specifies the processor whose clock should be used for the + * source. The control call and cpuClkId parameter remain named for the + * CPU specifically for legacy reasons. Not all processors will support + * all clock sources. Legal values for this parameter include: + * NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_PROCESSOR_CPU + * The clock information will be fulfilled by the CPU. This value + * is defined to be 0 so that it is the default for backwards + * compatibility. + * NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_PROCESSOR_GSP + * The clock information will be fulfilled by the GSP. + * + * sampleCount + * This field specifies the number of clock samples to be taken. + * This value cannot exceed NV2080_CTRL_TIMER_GPU_CPU_TIME_MAX_SAMPLES. + * + * samples + * This field returns an array of requested samples. Refer to + * NV2080_CTRL_TIMER_GPU_CPU_TIME_SAMPLE to get details about each entry + * in the array. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO (0x20800406) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_TIMER_INTERFACE_ID << 8) | NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_TIMER_GPU_CPU_TIME_MAX_SAMPLES 16 + +#define NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS { + NvU8 cpuClkId; + NvU8 sampleCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_TIMER_GPU_CPU_TIME_SAMPLE samples[NV2080_CTRL_TIMER_GPU_CPU_TIME_MAX_SAMPLES], 8); +} NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS; + +#define NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_SOURCE 3:0 + +/* Legal NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_SOURCE values */ +#define NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_OSTIME (0x00000001) +#define NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_TSC (0x00000002) +#define NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_PLATFORM_API (0x00000003) +#define NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_GSP_OS (0x00000004) + +#define NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_PROCESSOR 7:4 + +/* Legal NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_PROCESSOR values */ +#define NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_PROCESSOR_CPU (0x00000000) +#define NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_PROCESSOR_GSP (0x00000001) + + +/*! + * NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ + * + * This command changes the frequency at which Graphics Engine time stamp is + * updated. Frequency can either be set to max or restored to default. + * Clients can independently use this call to increase the timer frequency + * as a global reference count is maintained for requests to Max frequency. + * Client is assured that the system stays in this state till the requested + * client releases the state or is killed. Timer frequency will automatically + * be restored to default when there is no pending request to increase. + * + * Note that recursive requests for the same state from the same client + * are considered invalid. + * + * bSetMaxFreq + * Set to NV_TRUE if GR tick frequency needs to be set to Max. + * + * See @ref NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS for + * documentation of parameters. + * + * Possible status values returned are + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_STATE_IN_USE + * NV_ERR_INVALID_OPERATION + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ (0x20800407) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_TIMER_INTERFACE_ID << 8) | NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS_MESSAGE_ID" */ + +/*! + * This struct contains bSetMaxFreq flag. + */ +#define NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS { + NvBool bSetMaxFreq; +} NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS; + +/* _ctrl2080tmr_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h new file mode 100644 index 0000000..56463fb --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080ucodefuzzer.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h new file mode 100644 index 0000000..b058211 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h @@ -0,0 +1,190 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080unix.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX OS control commands and parameters */ + +/* + * NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT + * + * This command increases or decreases the value of the per-GPU GC6 blocker + * refCount used by Linux kernel clients to prevent the GPU from entering GC6. + * + * When the refCount is non-zero, the GPU cannot enter GC6. When the refCount + * transitions from zero to non-zero as a result of this command, the GPU will + * automatically come out of GC6. + * + * action Whether to increment or decrement the value of the refCount. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT (0x20803d01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS { + NvU32 action; +} NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS; + +// Possible values for action +#define NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_INC (0x00000001) +#define NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_DEC (0x00000002) + +/* + * NV2080_CTRL_CMD_OS_UNIX_ALLOW_DISALLOW_GCOFF + * + * RM by default allows GCOFF but when the X driver disallows to enter in GCOFF + * then this rmcontrol sets flag as NV_FALSE and if it allows to enter in GCOFF + * then the flag is set as NV_TRUE. + * + * action Whether to allow or disallow the user mode clients to enter in GCOFF. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_OS_UNIX_ALLOW_DISALLOW_GCOFF (0x20803d02) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS { + NvU32 action; +} NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS; + +// Possible values for action +#define NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_ALLOW (0x00000001) +#define NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_DISALLOW (0x00000002) + +/* + * NV2080_CTRL_CMD_OS_UNIX_AUDIO_DYNAMIC_POWER + * + * GPU can have integrated HDA (High Definition Audio) controller which + * can be in active or suspended state during dynamic power management. + * This command will perform HDA controller wakeup (if bEnter is false) or + * suspend (if bEnter is true). + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_OS_UNIX_AUDIO_DYNAMIC_POWER (0x20803d03) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS { + NvBool bEnter; +} NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS; + +/* + * NV2080_CTRL_CMD_OS_UNIX_INSTALL_PROFILER_HOOKS + * + * Initialize cyclestats HWPM support in the kernel. This will set up a callback + * event for the channel indicated by hNotifierResource. This callback will execute + * perf register read / write commands enqueued in the shared buffer indicated by + * hNotifyDataMemory. Only one client may use HWPM functionality at a time. + * + * Additionally, if perfmonIdCount is greater than zero, mode-e HWPM streaming into + * the buffer indicated by hSnapshotMemory will be initialized (but not turned on). + * Data will be copied into the provided buffer every 10ms, or whenever a + * NV2080_CTRL_CMD_OS_UNIX_FLUSH_SNAPSHOT_BUFFER command is issued. + */ +#define NV2080_CTRL_CMD_OS_UNIX_INSTALL_PROFILER_HOOKS (0x20803d04) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV2080_CTRL_OS_UNIX_INSTALL_PROFILER_HOOKS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_OS_UNIX_INSTALL_PROFILER_HOOKS_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_OS_UNIX_INSTALL_PROFILER_HOOKS_PARAMS { + NvHandle hNotifierResource; + NvU32 notifyDataSize; + NvHandle hNotifyDataMemory; + NvU32 perfmonIdCount; + NvU32 snapshotBufferSize; + NvHandle hSnapshotMemory; +} NV2080_CTRL_OS_UNIX_INSTALL_PROFILER_HOOKS_PARAMS; + +/* + * NV2080_CTRL_CMD_OS_UNIX_FLUSH_SNAPSHOT_BUFFER + * + * Immediately copies any pending mode-e HWPM data into the previously + * installed snapshot buffer instead of waiting for the timer. + */ +#define NV2080_CTRL_CMD_OS_UNIX_FLUSH_SNAPSHOT_BUFFER (0x20803d05) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | 0x5" */ + +/* + * NV2080_CTRL_CMD_OS_UNIX_STOP_PROFILER + * + * Stop the timer responsible for copying mode-e HWPM data to the snapshot buffer. + * The snapshot buffer must not be freed by the client before this command is issued. + */ +#define NV2080_CTRL_CMD_OS_UNIX_STOP_PROFILER (0x20803d06) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | 0x6" */ + +/* + * NV2080_CTRL_CMD_OS_UNIX_VIDMEM_PERSISTENCE_STATUS + * + * This command will be used by clients to check if the GPU video memory will + * be persistent during system suspend/resume cycle. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_OS_UNIX_VIDMEM_PERSISTENCE_STATUS (0x20803d07) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV2080_CTRL_OS_UNIX_VIDMEM_PERSISTENCE_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_OS_UNIX_VIDMEM_PERSISTENCE_STATUS_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV2080_CTRL_OS_UNIX_VIDMEM_PERSISTENCE_STATUS_PARAMS { + NvBool bVidmemPersistent; +} NV2080_CTRL_OS_UNIX_VIDMEM_PERSISTENCE_STATUS_PARAMS; + +/* + * NV2080_CTRL_CMD_OS_UNIX_UPDATE_TGP_STATUS + * + * This command will be used by clients to set restore TGP flag which will + * help to restore TGP limits when clients are destroyed. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_OS_UNIX_UPDATE_TGP_STATUS (0x20803d08) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV2080_CTRL_OS_UNIX_UPDATE_TGP_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_OS_UNIX_UPDATE_TGP_STATUS_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV2080_CTRL_OS_UNIX_UPDATE_TGP_STATUS_PARAMS { + NvBool bUpdateTGP; +} NV2080_CTRL_OS_UNIX_UPDATE_TGP_STATUS_PARAMS; +/* _ctrl2080unix_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h new file mode 100644 index 0000000..5742c5e --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h @@ -0,0 +1,42 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080vfe.finn +// + + + +/* _ctrl2080vfe_h_ */ +#include "nvfixedtypes.h" +#include "ctrl/ctrl2080/ctrl2080base.h" +#include "ctrl/ctrl2080/ctrl2080boardobj.h" +#include "ctrl/ctrl2080/ctrl2080bios.h" + +/* --------------------------- Forward Defines ----------------------------- */ +/* --------------------------- VFE Variable -------------------------------- */ +/* --------------------------- VFE Equation -------------------------------- */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vgpumgrinternal.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vgpumgrinternal.h new file mode 100644 index 0000000..4b63733 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vgpumgrinternal.h @@ -0,0 +1,522 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080vgpumgrinternal.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" +#include "ctrl/ctrla081.h" + +/* + * NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_BOOTLOAD_GSP_VGPU_PLUGIN_TASK + * + * This command is used to bootload GSP VGPU plugin task. + * Can be called only with SR-IOV and with VGPU_GSP_PLUGIN_OFFLOAD feature. + * + * dbdf - domain (31:16), bus (15:8), device (7:3), function (2:0) + * gfid - Gfid + * vgpuType - The Type ID for VGPU profile + * vmPid - Plugin process ID of vGPU guest instance + * swizzId - SwizzId + * numChannels - Number of channels + * numPluginChannels - Number of plugin channels + * bDisableSmcPartitionRestore - If set to true, SMC default execution partition + * save/restore will not be done in host-RM + * guestFbPhysAddrList - list of VMMU segment aligned physical address of guest FB memory + * guestFbLengthList - list of guest FB memory length in bytes + * pluginHeapMemoryPhysAddr - plugin heap memory offset + * pluginHeapMemoryLength - plugin heap memory length in bytes + * migRmHeapMemoryPhysAddr - Mig rm heap memory region's physical offset. + * migRmHeapMemoryLength - Mig rm heap memory length in bytes + * bDeviceProfilingEnabled - If set to true, profiling is allowed + */ +#define NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_BOOTLOAD_GSP_VGPU_PLUGIN_TASK (0x20804001) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_VGPU_MGR_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_VGPU_MGR_INTERNAL_BOOTLOAD_GSP_VGPU_PLUGIN_TASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MAX_VMMU_SEGMENTS 384 + +/* Must match NV2080_ENGINE_TYPE_LAST from cl2080.h */ +#define NV2080_GPU_MAX_ENGINES 0x54 + +#define NV2080_CTRL_VGPU_MGR_INTERNAL_BOOTLOAD_GSP_VGPU_PLUGIN_TASK_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_VGPU_MGR_INTERNAL_BOOTLOAD_GSP_VGPU_PLUGIN_TASK_PARAMS { + NvU32 dbdf; + NvU32 gfid; + NvU32 vgpuType; + NvU32 vmPid; + NvU32 swizzId; + NvU32 numChannels; + NvU32 numPluginChannels; + NvU32 chidOffset[NV2080_GPU_MAX_ENGINES]; + NvBool bDisableDefaultSmcExecPartRestore; + NvU32 numGuestFbSegments; + NV_DECLARE_ALIGNED(NvU64 guestFbPhysAddrList[NV2080_CTRL_MAX_VMMU_SEGMENTS], 8); + NV_DECLARE_ALIGNED(NvU64 guestFbLengthList[NV2080_CTRL_MAX_VMMU_SEGMENTS], 8); + NV_DECLARE_ALIGNED(NvU64 pluginHeapMemoryPhysAddr, 8); + NV_DECLARE_ALIGNED(NvU64 pluginHeapMemoryLength, 8); + NV_DECLARE_ALIGNED(NvU64 ctrlBuffOffset, 8); + NV_DECLARE_ALIGNED(NvU64 initTaskLogBuffOffset, 8); + NV_DECLARE_ALIGNED(NvU64 initTaskLogBuffSize, 8); + NV_DECLARE_ALIGNED(NvU64 vgpuTaskLogBuffOffset, 8); + NV_DECLARE_ALIGNED(NvU64 vgpuTaskLogBuffSize, 8); + NV_DECLARE_ALIGNED(NvU64 kernelLogBuffOffset, 8); + NV_DECLARE_ALIGNED(NvU64 kernelLogBuffSize, 8); + NV_DECLARE_ALIGNED(NvU64 migRmHeapMemoryPhysAddr, 8); + NV_DECLARE_ALIGNED(NvU64 migRmHeapMemoryLength, 8); + NvBool bDeviceProfilingEnabled; +} NV2080_CTRL_VGPU_MGR_INTERNAL_BOOTLOAD_GSP_VGPU_PLUGIN_TASK_PARAMS; + +/* + * NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_SHUTDOWN_GSP_VGPU_PLUGIN_TASK + * + * This command is used to shutdown GSP VGPU plugin task. + * Can be called only with SR-IOV and with VGPU_GSP_PLUGIN_OFFLOAD feature. + * + * gfid - Gfid + */ +#define NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_SHUTDOWN_GSP_VGPU_PLUGIN_TASK (0x20804002) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_VGPU_MGR_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_VGPU_MGR_INTERNAL_SHUTDOWN_GSP_VGPU_PLUGIN_TASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_VGPU_MGR_INTERNAL_SHUTDOWN_GSP_VGPU_PLUGIN_TASK_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_VGPU_MGR_INTERNAL_SHUTDOWN_GSP_VGPU_PLUGIN_TASK_PARAMS { + NvU32 gfid; +} NV2080_CTRL_VGPU_MGR_INTERNAL_SHUTDOWN_GSP_VGPU_PLUGIN_TASK_PARAMS; + +/* + * NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_PGPU_ADD_VGPU_TYPE + * + * This command is used to add a new vGPU config to the pGPU in physical RM. + * Unlike NVA081_CTRL_CMD_VGPU_CONFIG_SET_INFO, it does no validation + * and is only to be used internally. + * + * discardVgpuTypes [IN] + * This parameter specifies if existing vGPU configuration should be + * discarded for given pGPU + * + * vgpuInfoCount [IN] + * This parameter specifies the number of entries of virtual GPU type +* information + * + * vgpuInfo [IN] + * This parameter specifies virtual GPU type information + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_PGPU_ADD_VGPU_TYPE (0x20804003) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_VGPU_MGR_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_VGPU_MGR_INTERNAL_PGPU_ADD_VGPU_TYPE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_VGPU_MGR_INTERNAL_PGPU_ADD_VGPU_TYPE_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_VGPU_MGR_INTERNAL_PGPU_ADD_VGPU_TYPE_PARAMS { + NvBool discardVgpuTypes; + NvU32 vgpuInfoCount; + NV_DECLARE_ALIGNED(NVA081_CTRL_VGPU_INFO vgpuInfo[NVA081_MAX_VGPU_TYPES_PER_PGPU], 8); +} NV2080_CTRL_VGPU_MGR_INTERNAL_PGPU_ADD_VGPU_TYPE_PARAMS; + +/* + * NV2080_GUEST_VM_INFO + * + * This structure represents vGPU guest's (VM's) information + * + * vmPid [OUT] + * This param specifies the vGPU plugin process ID + * guestOs [OUT] + * This param specifies the vGPU guest OS type + * migrationProhibited [OUT] + * This flag indicates whether migration is prohibited for VM or not + * guestNegotiatedVgpuVersion [OUT] + * This param specifies the vGPU version of guest driver after negotiation + * frameRateLimit [OUT] + * This param specifies the current value of FRL set for guest + * licensed [OUT] + * This param specifies whether the VM is Unlicensed/Licensed + * licenseState [OUT] + * This param specifies the current state of the GRID license state machine + * licenseExpiryTimestamp [OUT] + * License expiry time in seconds since UNIX epoch + * licenseExpiryStatus [OUT] + * License expiry status + * guestDriverVersion [OUT] + * This param specifies the driver version of the driver installed on the VM + * guestDriverBranch [OUT] + * This param specifies the driver branch of the driver installed on the VM + * guestVmInfoState [OUT] + * This param stores the current state of guest dependent fields + * + */ +typedef struct NV2080_GUEST_VM_INFO { + NvU32 vmPid; + NvU32 guestOs; + NvU32 migrationProhibited; + NvU32 guestNegotiatedVgpuVersion; + NvU32 frameRateLimit; + NvBool licensed; + NvU32 licenseState; + NvU32 licenseExpiryTimestamp; + NvU8 licenseExpiryStatus; + NvU8 guestDriverVersion[NVA081_VGPU_STRING_BUFFER_SIZE]; + NvU8 guestDriverBranch[NVA081_VGPU_STRING_BUFFER_SIZE]; + GUEST_VM_INFO_STATE guestVmInfoState; +} NV2080_GUEST_VM_INFO; + +/* + * NV2080_GUEST_VGPU_DEVICE + * + * This structure represents host vgpu device's (assigned to VM) information + * + * gfid [OUT] + * This parameter specifies the gfid of vGPU assigned to VM. + * vgpuPciId [OUT] + * This parameter specifies vGPU PCI ID + * vgpuDeviceInstanceId [OUT] + * This paramter specifies the vGPU device instance per VM to be used for supporting + * multiple vGPUs per VM. + * fbUsed [OUT] + * This parameter specifies FB usage in bytes + * eccState [OUT] + * This parameter specifies the ECC state of the virtual GPU. + * One of NVA081_CTRL_ECC_STATE_xxx values. + * bDriverLoaded [OUT] + * This parameter specifies whether driver is loaded on this particular vGPU. + * + */ +typedef struct NV2080_HOST_VGPU_DEVICE { + NvU32 gfid; + NV_DECLARE_ALIGNED(NvU64 vgpuPciId, 8); + NvU32 vgpuDeviceInstanceId; + NvU32 accountingPid; + NV_DECLARE_ALIGNED(NvU64 fbUsed, 8); + NvU32 encoderCapacity; + NvU32 eccState; + NvBool bDriverLoaded; +} NV2080_HOST_VGPU_DEVICE; + +/* + * NV2080_VGPU_GUEST + * + * This structure represents a vGPU guest + * + */ +typedef struct NV2080_VGPU_GUEST { + NV2080_GUEST_VM_INFO guestVmInfo; + NV_DECLARE_ALIGNED(NV2080_HOST_VGPU_DEVICE vgpuDevice, 8); +} NV2080_VGPU_GUEST; + +/* + * NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_ENUMERATE_VGPU_PER_PGPU + * + * This command enumerates list of vGPU guest instances per pGpu + * + * numVgpu [OUT] + * This parameter specifies the number of virtual GPUs created on this physical GPU + * + * vgpuGuest [OUT] + * This parameter specifies an array containing guest vgpu's information for + * all vGPUs created on this physical GPU + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_ENUMERATE_VGPU_PER_PGPU (0x20804004) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_VGPU_MGR_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_VGPU_MGR_INTERNAL_ENUMERATE_VGPU_PER_PGPU_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_VGPU_MGR_INTERNAL_ENUMERATE_VGPU_PER_PGPU_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_VGPU_MGR_INTERNAL_ENUMERATE_VGPU_PER_PGPU_PARAMS { + NvU32 numVgpu; + NV_DECLARE_ALIGNED(NV2080_VGPU_GUEST vgpuGuest[NVA081_MAX_VGPU_PER_PGPU], 8); +} NV2080_CTRL_VGPU_MGR_INTERNAL_ENUMERATE_VGPU_PER_PGPU_PARAMS; + +/* + * NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_CLEAR_GUEST_VM_INFO + * + * This command is used clear guest vm info. It should be used when + * NVA084_CTRL_CMD_KERNEL_HOST_VGPU_DEVICE_SET_VGPU_GUEST_LIFE_CYCLE_STATE + * is called with NVA081_NOTIFIERS_EVENT_VGPU_GUEST_DESTROYED state. + * + * gfid [IN] + * This parameter specifies the gfid of vGPU assigned to VM. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_CLEAR_GUEST_VM_INFO (0x20804005) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_VGPU_MGR_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_VGPU_MGR_INTERNAL_CLEAR_GUEST_VM_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_VGPU_MGR_INTERNAL_CLEAR_GUEST_VM_INFO_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV2080_CTRL_VGPU_MGR_INTERNAL_CLEAR_GUEST_VM_INFO_PARAMS { + NvU32 gfid; +} NV2080_CTRL_VGPU_MGR_INTERNAL_CLEAR_GUEST_VM_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_GET_VGPU_FB_USAGE + * + * This command is used to get the FB usage of all vGPU instances running on a GPU. + * + * vgpuCount [OUT] + * This field specifies the number of vGPU devices for which FB usage is returned. + * vgpuFbUsage [OUT] + * This is an array of type NV2080_VGPU_FB_USAGE, which contains a list of vGPU gfid + * and their corresponding FB usage in bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_GET_VGPU_FB_USAGE (0x20804006) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_VGPU_MGR_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_VGPU_MGR_INTERNAL_GET_VGPU_FB_USAGE_PARAMS_MESSAGE_ID" */ + +typedef struct NV2080_VGPU_FB_USAGE { + NvU32 gfid; + NV_DECLARE_ALIGNED(NvU64 fbUsed, 8); +} NV2080_VGPU_FB_USAGE; + +#define NV2080_CTRL_VGPU_MGR_INTERNAL_GET_VGPU_FB_USAGE_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV2080_CTRL_VGPU_MGR_INTERNAL_GET_VGPU_FB_USAGE_PARAMS { + NvU32 vgpuCount; + NV_DECLARE_ALIGNED(NV2080_VGPU_FB_USAGE vgpuFbUsage[NVA081_MAX_VGPU_PER_PGPU], 8); +} NV2080_CTRL_VGPU_MGR_INTERNAL_GET_VGPU_FB_USAGE_PARAMS; + +/* + * NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_SET_VGPU_ENCODER_CAPACITY + * + * This command is used to set vGPU instance's (represented by gfid) encoder Capacity. + * + * gfid [IN] + * This parameter specifies the gfid of vGPU assigned to VM. + * encoderCapacity [IN] + * Encoder capacity value from 0 to 100. Value of 0x00 indicates encoder performance + * may be minimal for this GPU and software should fall back to CPU-based encode. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_SET_VGPU_ENCODER_CAPACITY (0x20804007) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_VGPU_MGR_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_VGPU_MGR_INTERNAL_SET_VGPU_ENCODER_CAPACITY_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_VGPU_MGR_INTERNAL_SET_VGPU_ENCODER_CAPACITY_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV2080_CTRL_VGPU_MGR_INTERNAL_SET_VGPU_ENCODER_CAPACITY_PARAMS { + NvU32 gfid; + NvU32 encoderCapacity; +} NV2080_CTRL_VGPU_MGR_INTERNAL_SET_VGPU_ENCODER_CAPACITY_PARAMS; + +/* + * NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_VGPU_PLUGIN_CLEANUP + * + * This command is used to cleanup all the GSP VGPU plugin task allocated resources after its shutdown. + * Can be called only with SR-IOV and with VGPU_GSP_PLUGIN_OFFLOAD feature. + * + * gfid [IN] + * This parameter specifies the gfid of vGPU assigned to VM. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_VGPU_PLUGIN_CLEANUP (0x20804008) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_VGPU_MGR_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_VGPU_MGR_INTERNAL_VGPU_PLUGIN_CLEANUP_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_VGPU_MGR_INTERNAL_VGPU_PLUGIN_CLEANUP_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV2080_CTRL_VGPU_MGR_INTERNAL_VGPU_PLUGIN_CLEANUP_PARAMS { + NvU32 gfid; +} NV2080_CTRL_VGPU_MGR_INTERNAL_VGPU_PLUGIN_CLEANUP_PARAMS; + +#define NV2080_CTRL_MAX_NVU32_TO_CONVERTED_STR_LEN 8 +#define NV2080_CTRL_MAX_GPC_COUNT 32 + +/* + * NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_GET_PGPU_FS_ENCODING + * + * Reroutes kvgpumgrGetPgpuFSEncoding to vgpuMgrGetPgpuFSEncoding. + * + * pgpuString [OUT] + * Resulting PGPU string + * pgpuStringSize + * PGPU string size + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_GET_PGPU_FS_ENCODING (0x20804009) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_VGPU_MGR_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_VGPU_MGR_INTERNAL_GET_PGPU_FS_ENCODING_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_VGPU_MGR_INTERNAL_GET_PGPU_FS_ENCODING_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_VGPU_MGR_INTERNAL_GET_PGPU_FS_ENCODING_PARAMS { + NvU8 pgpuString[NVA081_PGPU_METADATA_STRING_SIZE]; + NvU32 pgpuStringSize; +} NV2080_CTRL_VGPU_MGR_INTERNAL_GET_PGPU_FS_ENCODING_PARAMS; + +/* + * NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_GET_PGPU_MIGRATION_SUPPORT + * + * Reroutes kvgpumgrCheckPgpuMigrationSupport to vgpuMgrCheckPgpuMigrationSupport. + * + * bIsMigrationSupported [OUT] + * Resulting status of the migration support + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_GET_PGPU_MIGRATION_SUPPORT (0x2080400a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_VGPU_MGR_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_VGPU_MGR_INTERNAL_GET_PGPU_MIGRATION_SUPPORT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_VGPU_MGR_INTERNAL_GET_PGPU_MIGRATION_SUPPORT_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV2080_CTRL_VGPU_MGR_INTERNAL_GET_PGPU_MIGRATION_SUPPORT_PARAMS { + NvBool bIsMigrationSupported; +} NV2080_CTRL_VGPU_MGR_INTERNAL_GET_PGPU_MIGRATION_SUPPORT_PARAMS; + +/* + * NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_SET_VGPU_MGR_CONFIG + * + * Sets vgpu manager parameters. This control is used after GSP initialization. + * + * bSupportHeterogeneousTimeSlicedVgpuTypes [IN] + * Enable/disable heterogeneous time-sliced vgpu types + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_SET_VGPU_MGR_CONFIG (0x2080400b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_VGPU_MGR_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_VGPU_MGR_INTERNAL_SET_VGPU_MGR_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_VGPU_MGR_INTERNAL_SET_VGPU_MGR_CONFIG_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV2080_CTRL_VGPU_MGR_INTERNAL_SET_VGPU_MGR_CONFIG_PARAMS { + NvBool bSupportHeterogeneousTimeSlicedVgpuTypes; +} NV2080_CTRL_VGPU_MGR_INTERNAL_SET_VGPU_MGR_CONFIG_PARAMS; + +/* + * NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_GET_PGPU_MIGRATION_SUPPORT + * + * Reroutes NVA084_CTRL_CMD_KERNEL_HOST_VGPU_DEVICE_FREE_STATES to GSP RM to enable + * kernel clients to utilize NVA082_CTRL_CMD_HOST_VGPU_DEVICE_FREE_STATES. + * + * gfid [IN] + * This parameter specifies the gfid of vGPU assigned to VM. + * flags [IN] + * Specifies what component of HostVgpuDevice to free. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_FREE_STATES (0x2080400c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_VGPU_MGR_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_VGPU_MGR_INTERNAL_FREE_STATES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_VGPU_MGR_INTERNAL_FREE_STATES_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV2080_CTRL_VGPU_MGR_INTERNAL_FREE_STATES_PARAMS { + NvU32 gfid; + NvU32 flags; +} NV2080_CTRL_VGPU_MGR_INTERNAL_FREE_STATES_PARAMS; + +/* + * NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_GET_FRAME_RATE_LIMITER_STATUS + * + * Returns information whether frame rate limiter is disabled. + * + * bFlrDisabled [OUT] + * True, if frame rate limiter is disabled. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_GET_FRAME_RATE_LIMITER_STATUS (0x2080400d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_VGPU_MGR_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_VGPU_MGR_GET_FRAME_RATE_LIMITER_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_VGPU_MGR_GET_FRAME_RATE_LIMITER_STATUS_PARAMS_MESSAGE_ID (0xDU) + +typedef struct NV2080_CTRL_VGPU_MGR_GET_FRAME_RATE_LIMITER_STATUS_PARAMS { + NvBool bFlrDisabled; +} NV2080_CTRL_VGPU_MGR_GET_FRAME_RATE_LIMITER_STATUS_PARAMS; + +/* + * NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_SET_VGPU_HETEROGENEOUS_MODE + * + * This command will set heterogenous mode in GSP RM + * + * bHeterogeneousMode + * Mode of heterogeneous + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_SET_VGPU_HETEROGENEOUS_MODE (0x2080400e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_VGPU_MGR_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_VGPU_MGR_INTERNAL_SET_VGPU_HETEROGENEOUS_MODE_PARAMS_MESSAGE_ID" */ +#define NV2080_CTRL_VGPU_MGR_INTERNAL_SET_VGPU_HETEROGENEOUS_MODE_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV2080_CTRL_VGPU_MGR_INTERNAL_SET_VGPU_HETEROGENEOUS_MODE_PARAMS { + NvBool bHeterogeneousMode; +} NV2080_CTRL_VGPU_MGR_INTERNAL_SET_VGPU_HETEROGENEOUS_MODE_PARAMS; + +/* + * NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_SET_VGPU_MIG_TIMESLICE_MODE + * + * This command will set MIG timeslice mode in GSP RM + * + * bMigTimeslicingModeEnabled + * Mode of MIG timeslice + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_SET_VGPU_MIG_TIMESLICE_MODE (0x2080400f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_VGPU_MGR_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_VGPU_MGR_INTERNAL_SET_VGPU_MIG_TIMESLICE_MODE_PARAMS_MESSAGE_ID" */ +#define NV2080_CTRL_VGPU_MGR_INTERNAL_SET_VGPU_MIG_TIMESLICE_MODE_PARAMS_MESSAGE_ID (0xFU) + +typedef struct NV2080_CTRL_VGPU_MGR_INTERNAL_SET_VGPU_MIG_TIMESLICE_MODE_PARAMS { + NvBool bMigTimeslicingModeEnabled; +} NV2080_CTRL_VGPU_MGR_INTERNAL_SET_VGPU_MIG_TIMESLICE_MODE_PARAMS; + + + +/* + * NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_SET_POWER_STATE + * + * This command is used to share the Power State from KMD side to GSP-RM. + * + * state + * This parameter contains Power State Information. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_SET_POWER_STATE (0x20804010) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_VGPU_MGR_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_SET_POWER_STATE_PARAMS_MESSAGE_ID" */ +#define NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_SET_POWER_STATE_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_SET_POWER_STATE_PARAMS { + NvU32 state; +} NV2080_CTRL_CMD_VGPU_MGR_INTERNAL_SET_POWER_STATE_PARAMS; + +/* _ctrl2080vgpumgrinternal_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h new file mode 100644 index 0000000..a136214 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h @@ -0,0 +1,38 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080volt.finn +// + + + +/* _ctrl2080volt_h_ */ + +#include "ctrl/ctrl2080/ctrl2080base.h" +#include "ctrl/ctrl2080/ctrl2080boardobj.h" +#include "ctrl/ctrl2080/ctrl2080pmumon.h" + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fbase.h b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fbase.h new file mode 100644 index 0000000..89feff4 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fbase.h @@ -0,0 +1,73 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl208f/ctrl208fbase.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NV20_SUBDEVICE_DIAG: diagnostic class control commands and parameters */ + +#define NV208F_CTRL_CMD(cat, idx) NVXXXX_CTRL_CMD(0x208F, NV208F_CTRL_##cat, idx) + +/* Subdevice diag command categories (6 bits) */ +#define NV208F_CTRL_RESERVED (0x00) +#define NV208F_CTRL_POWER (0x01) +#define NV208F_CTRL_THERMAL (0x02) +#define NV208F_CTRL_SEQ (0x03) +#define NV208F_CTRL_FIFO (0x04) +#define NV208F_CTRL_FB (0x05) +#define NV208F_CTRL_MC (0x06) +#define NV208F_CTRL_BIF (0x07) +#define NV208F_CTRL_CLK (0x08) +#define NV208F_CTRL_PERF (0x09) +#define NV208F_CTRL_FBIO (0x0A) +#define NV208F_CTRL_MMU (0x0B) +#define NV208F_CTRL_PMU (0x0C) +#define NV208F_CTRL_EVENT (0x10) +#define NV208F_CTRL_GPU (0x11) +#define NV208F_CTRL_GR (0x12) +#define NV208F_CTRL_PMGR (0x13) +#define NV208F_CTRL_DMA (0x14) +// const NV208F_CTRL_TMR = (0x15); // not supported +#define NV208F_CTRL_SEC2 (0x16) +#define NV208F_CTRL_GSPMSGTIMING (0x17) +#define NV208F_CTRL_BUS (0x18) +#define NV208F_CTRL_UCODE_COVERAGE (0x19) +#define NV208F_CTRL_NVLINK (0x1A) + +/* + * NV208F_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV208F_CTRL_CMD_NULL (0x208f0000) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* _ctrl208fbase_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fgpu.h b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fgpu.h new file mode 100644 index 0000000..2edc89b --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fgpu.h @@ -0,0 +1,169 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl208f/ctrl208fgpu.finn +// + +#include "ctrl/ctrl2080/ctrl2080gr.h" /* 208F is partially derivative of 2080 */ +#include "ctrl/ctrl208f/ctrl208fbase.h" + +/* + * NV208F_CTRL_CMD_GPU_GET_RAM_SVOP_VALUES + * + * This command can be used to get the RAM SVOP values. + * + * sp + * This field outputs RAM_SVOP_SP + * rg + * This field outputs RAM_SVOP_REG + * pdp + * This field outputs RAM_SVOP_PDP + * dp + * This field outputs RAM_SVOP_DP + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV208F_CTRL_CMD_GPU_GET_RAM_SVOP_VALUES (0x208f1101) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_GPU_INTERFACE_ID << 8) | NV208F_CTRL_GPU_GET_RAM_SVOP_VALUES_PARAMS_MESSAGE_ID" */ + +typedef struct NV208F_CTRL_GPU_RAM_SVOP_VALUES_PARAMS { + NvU32 sp; + NvU32 rg; + NvU32 pdp; + NvU32 dp; +} NV208F_CTRL_GPU_RAM_SVOP_VALUES_PARAMS; + +#define NV208F_CTRL_GPU_GET_RAM_SVOP_VALUES_PARAMS_MESSAGE_ID (0x1U) + +typedef NV208F_CTRL_GPU_RAM_SVOP_VALUES_PARAMS NV208F_CTRL_GPU_GET_RAM_SVOP_VALUES_PARAMS; + +/* + * NV208F_CTRL_CMD_GPU_SET_RAM_SVOP_VALUES + * + * This command can be used to set the RAM SVOP values. + * + * sp + * Input for RAM_SVOP_SP + * rg + * Input for RAM_SVOP_REG + * pdp + * Input for RAM_SVOP_PDP + * dp + * Input for RAM_SVOP_DP + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV208F_CTRL_CMD_GPU_SET_RAM_SVOP_VALUES (0x208f1102) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_GPU_INTERFACE_ID << 8) | NV208F_CTRL_GPU_SET_RAM_SVOP_VALUES_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_GPU_SET_RAM_SVOP_VALUES_PARAMS_MESSAGE_ID (0x2U) + +typedef NV208F_CTRL_GPU_RAM_SVOP_VALUES_PARAMS NV208F_CTRL_GPU_SET_RAM_SVOP_VALUES_PARAMS; + + + +/* + * NV208F_CTRL_CMD_GPU_VERIFY_INFOROM + * + * This command can be used by clients to determine if an InfoROM + * with a valid image is present. If the SKU in question does + * not feature an InfoROM, the NV_ERR_NOT_SUPPORTED + * error is returned. Else the RM attempts to read the ROM object + * and any objects listed in the ROM object. The checksum of + * each object read is verified. If all checksums are valid, the + * RM will report the InfoROM as being valid. If image is valid then + * RM will return a checksum for all of the dynamically configurable + * data in InfoROM. This checksum is expected to be same for all the + * boards with an identical InfoROM version and similarly configured. + * + * result + * The result of the InfoROM verification attempt. Possible + * values are: + * NV208F_CTRL_GPU_INFOROM_VERIFICATION_RESULTS_NONE + * This value indicates that a validation couldn't be done + * due to some software/OS related error. + * NV208F_CTRL_GPU_INFOROM_VERIFICATION_RESULTS_IO_ERROR + * This value indicates that a validation couldn't be done + * due to some IO error. + * NV208F_CTRL_GPU_INFOROM_VERIFICATION_RESULTS_VALID + * This value indicates that all InfoROM objects have valid + * checksum. + * NV208F_CTRL_GPU_INFOROM_VERIFICATION_RESULTS_INVALID + * This value indicates that some InfoROM objects have invalid + * checksum. + * checksum + * Checksum for all of the dynamically configurable data + * in InfoROM for e.g. PWR and CFG objects. + * + * NOTE: For the result values to be valid, return status should be: + * NV_OK + * + * Possible return status values: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV208F_CTRL_CMD_GPU_VERIFY_INFOROM (0x208f1105) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_GPU_INTERFACE_ID << 8) | NV208F_CTRL_GPU_VERIFY_INFOROM_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_GPU_VERIFY_INFOROM_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV208F_CTRL_GPU_VERIFY_INFOROM_PARAMS { + NvU32 result; + NvU32 checksum; +} NV208F_CTRL_GPU_VERIFY_INFOROM_PARAMS; + +/* valid result values */ +#define NV208F_CTRL_GPU_INFOROM_VERIFICATION_RESULT_NONE (0x00000000) +#define NV208F_CTRL_GPU_INFOROM_VERIFICATION_RESULT_IO_ERROR (0x00000001) +#define NV208F_CTRL_GPU_INFOROM_VERIFICATION_RESULT_VALID (0x00000002) +#define NV208F_CTRL_GPU_INFOROM_VERIFICATION_RESULT_INVALID (0x00000003) + +/* + * NV208F_CTRL_CMD_GPU_DISABLE_ECC_INFOROM_REPORTING + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + + + +#define NV208F_CTRL_CMD_GPU_DISABLE_ECC_INFOROM_REPORTING (0x208f1107) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_GPU_INTERFACE_ID << 8) | 0x7" */ + + + +/* _ctrl208fgpu_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fucodecoverage.h b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fucodecoverage.h new file mode 100644 index 0000000..29d61c1 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fucodecoverage.h @@ -0,0 +1,145 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl208f/ctrl208fucodecoverage.finn +// + +#include "ctrl/ctrl208f/ctrl208fbase.h" + +/* Numerical ID for each ucode */ +#define NV208F_UCODE_COVERAGE_SEC2 1 +#define NV208F_UCODE_COVERAGE_PMU 2 +#define NV208F_UCODE_COVERAGE_GSP_TASK_RM 3 +#define NV208F_UCODE_COVERAGE_GSP_TASK_VGPU 4 + +/* Coverage Type */ +#define NV208F_SANITIZER_COVERAGE_TYPE 0 +#define NV208F_BULLSEYE_COVERAGE_TYPE 1 + +/* + * NV208F_CTRL_UCODE_COVERAGE_STATE_PARAMS + * + * Parameters struct shared by the control calls + * NV208F_CTRL_CMD_UCODE_COVERAGE_GET_STATE and + * NV208F_CTRL_CMD_UCODE_COVERAGE_SET_STATE. + */ +typedef struct NV208F_CTRL_UCODE_COVERAGE_STATE_PARAMS { + NvU32 ucode; + NvU32 gfid; + NvBool bEnabled; + NvBool bClear; + NvU32 covType; +} NV208F_CTRL_UCODE_COVERAGE_STATE_PARAMS; + +/* + * NV208F_CTRL_CMD_UCODE_COVERAGE_GET_STATE + * + * Retrieves the status of the target ucode's coverage + * + * ucode numeric id of the desired ucode to target + * gfid specifies which partition to send the command to + * (applies to ucode=NV208F_UCODE_COVERAGE_GSP_TASK_VGPU only) + * bEnabled whether the ucode's SanitizerCoverage run-time data gathering + * is enabled (NV_TRUE) or not (NV_FALSE) + * covType determines if using SanitizerCoverage or BullseyeCoverage + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV208F_CTRL_CMD_UCODE_COVERAGE_GET_STATE (0x208f1901) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_UCODE_COVERAGE_INTERFACE_ID << 8) | NV208F_CTRL_UCODE_COVERAGE_GET_STATE_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_UCODE_COVERAGE_GET_STATE_PARAMS_MESSAGE_ID (0x1U) + +typedef NV208F_CTRL_UCODE_COVERAGE_STATE_PARAMS NV208F_CTRL_UCODE_COVERAGE_GET_STATE_PARAMS; + +/* + * NV208F_CTRL_CMD_UCODE_COVERAGE_SET_STATE + * + * Enables/disables and optionally clears coverage data of the target ucode + * + * ucode numeric id of the desired ucode to target + * gfid specifies which partition to send the command to + * (applies to ucode=NV208F_UCODE_COVERAGE_GSP_TASK_VGPU only) + * bEnabled whether to enable (NV_TRUE) or disable (NV_FALSE) coverage for the given ucode + * bClear whether to clear coverage prior to enabling enabling coverage collection + * covType determines if using SanitizerCoverage or BullseyeCoverage + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV208F_CTRL_CMD_UCODE_COVERAGE_SET_STATE (0x208f1902) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_UCODE_COVERAGE_INTERFACE_ID << 8) | NV208F_CTRL_UCODE_COVERAGE_SET_STATE_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_UCODE_COVERAGE_SET_STATE_PARAMS_MESSAGE_ID (0x2U) + +typedef NV208F_CTRL_UCODE_COVERAGE_STATE_PARAMS NV208F_CTRL_UCODE_COVERAGE_SET_STATE_PARAMS; + +/* + * NV208F_UCODE_COVERAGE_RPC_MAX_BYTES_* + * + * Maximum number of bytes that can be retrieved by one RPC call + * (potentially differs per-ucode). These are used to size the buffer in the + * respective ucode RPC parameters. + */ +#define NV208F_UCODE_COVERAGE_RPC_MAX_BYTES_PMU 128 +#define NV208F_UCODE_COVERAGE_RPC_MAX_BYTES_GSPRM 65536 + +/* + * NV208F_CTRL_CMD_UCODE_COVERAGE_GET_DATA + * + * Retrieves coverage data of the target ucode + * + * data buffer to retrieve data into + * ucode numeric id of the desired ucode to retrieve data from + * gfid specifies which partition to send the command to + * (applies to ucode=NV208F_UCODE_FUZZER_GSP_TASK_VGPU only) + * offset offset of internal buffer to copy from + * covType determines if using SanitizerCoverage or BullseyeCoverage + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV208F_CTRL_CMD_UCODE_COVERAGE_GET_DATA (0x208f1903) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_UCODE_COVERAGE_INTERFACE_ID << 8) | NV208F_CTRL_UCODE_COVERAGE_GET_DATA_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_UCODE_COVERAGE_GET_DATA_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV208F_CTRL_UCODE_COVERAGE_GET_DATA_PARAMS { + NvU8 data[NV208F_UCODE_COVERAGE_RPC_MAX_BYTES_GSPRM]; + NvU32 ucode; + NvU32 gfid; + NvU32 offset; + NvU32 covType; +} NV208F_CTRL_UCODE_COVERAGE_GET_DATA_PARAMS; + +/* _ctrl208fucodecoverage_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h b/src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h new file mode 100644 index 0000000..b29b9a2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h @@ -0,0 +1,1560 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl30f1.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NV30_GSYNC_CTRL control commands and parameters */ + +#define NV30F1_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x30F1, NV30F1_CTRL_##cat, idx) + +/* Command categories (6bits) */ +#define NV30F1_CTRL_RESERVED (0x00) +#define NV30F1_CTRL_GSYNC (0x01) + +/* + * NV30F1_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV30F1_CTRL_CMD_NULL (0x30f10000) /* finn: Evaluated from "(FINN_NV30_GSYNC_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/* NV30F1_CTRL_GSYNC + * + * Gsync board access/control functionality. + * + */ + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_VERSION + * + * This command will get the current gsync api version info. + * + * version + * The api's major version. Does not change often. + * + * revision + * The api's minor version. + * Bumped with each change, no matter how minor. + * + * Possible status values returned are: + * NV_OK + * + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_VERSION (0x30f10101) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_VERSION_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_GET_VERSION_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV30F1_CTRL_GSYNC_GET_VERSION_PARAMS { + NvU32 version; + NvU32 revision; +} NV30F1_CTRL_GSYNC_GET_VERSION_PARAMS; + +#define NV30F1_CTRL_GSYNC_API_VER 1 +#define NV30F1_CTRL_GSYNC_API_REV 0 + +/* + * NV30F1_CTRL_GSYNC api + * + * The gsync interface provides access to gsync devices in the system. + * + * There are commands: + * NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SIGNALS + * Status on input sync signals. + * NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_PARAMS + * Get gsync parameters. + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS + * Get gsync parameters. + * NV30F1_CTRL_CMD_GSYNC_GET_INFO_CAPS + * Get basic info about the device and its connected displays + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_SYNC + * Enable frame sync. + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_UNSYNC + * Disable frame sync. + * NV30F1_CTRL_CMD_GSYNC_GET_STATUS + * Get status info relevant for the control panel + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_TESTING + * Test signal enabling/disabling + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_WATCHDOG + * Control the gsync watchdog + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_INTERLACE_MODE + * Set the interlace mode + * + */ + + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SIGNALS + * + * This command returns information associated with incoming signals to the + * gsync device. + * + * RJ45 + * This parameter contains the signal information for each of the two RJ45 + * ports on the gsync device. A value of ~0 indicates that a signal is + * detected, but no rate information is available. Anything else is a rate + * in units of 10e-4 Hz. + * house + * This parameter contains the signal information for the house sync signal + * (i.e. the bnc port). A value of 0 means that no signal is present. A value + * of ~0 indicates that a signal is detected, but no rate information is + * available. Anything else is a rate in units of 10e-4 Hz. + * rate + * A mask representing signals for which we would like rate information (if + * available). + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + * + */ + +#define NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SIGNALS (0x30f10102) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_STATUS_SIGNALS_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_GET_STATUS_SIGNALS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV30F1_CTRL_GSYNC_GET_STATUS_SIGNALS_PARAMS { + NvU32 RJ45[2]; + NvU32 house; + NvU32 rate; +} NV30F1_CTRL_GSYNC_GET_STATUS_SIGNALS_PARAMS; + +/* + * rate values + * + */ + +#define NV30F1_CTRL_GSYNC_GET_STATUS_SIGNALS_RJ45_0 (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_STATUS_SIGNALS_RJ45_1 (0x00000002) +#define NV30F1_CTRL_GSYNC_GET_SIGNALS_HOUSE (0x00000004) + + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_PARAMS + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS + * + * These commands respectively return and set state related to the operation + * of the gsync device. + * + * which + * This value is a mask set by the client representing which parameters are + * to be updated. In the case of a GET command, these parameters will + * be used to change the state of the hardware. For both a GET + * command and a SET command, the hardware state will be returned by + * the RM in the respective parameter. All other parameters are untouched. + * syncPolarity + * This parameter specifies which edge of the house sync signal to sync with. + * videoMode + * This parameter specifies which video mode to use to decode the house sync + * signal. + * nSync + * This parameter specifies the number of pulses to wait between frame lock + * signal generation. 0 indicates that every incomming pulse should result in + * a frame lock sync pulse being generated (i.e. the input and output rate + * matches). + * syncSkew + * This parameter specifies the time delay between the frame sync signal and + * the GPUs signal in units of 0.977 us. Maximum value for SyncSkew is defined + * in respective header files of gsync boards. e.g. For P2060 board value + * is defined in drivers/resman/kernel/inc/dacp2060.h + * syncStartDelay + * In master mode, the amount of time to wait before generating the first + * sync pulse in units of 7.81 us, max 512 ms (i.e 65535 units). + * useHouseSync + * When a house sync signal is detected, this parameter indicates that it + * should be used as the reference to generate the frame sync signal. + * syncMulDiv + * Enables multiply/divide of the frequency of the house sync signal by an + * integer. Only supported if the + * NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_MULTIPLY_DIVIDE_SYNC bit is set. The + * maximum value of multiplyDivideValue is given by + * NV30F1_CTRL_GSYNC_GET_CAPS_PARAMS.maxMulDivValue. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ + +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_PARAMS (0x30f10103) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_CONTROL_PARAMS_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS (0x30f10104) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS_MESSAGE_ID" */ + +typedef struct NV30F1_CTRL_GSYNC_MULTIPLY_DIVIDE_SETTINGS { + NvU8 multiplyDivideValue; + NvU8 multiplyDivideMode; + NvU16 rsvd; +} NV30F1_CTRL_GSYNC_MULTIPLY_DIVIDE_SETTINGS; + +typedef struct NV30F1_CTRL_GSYNC_CONTROL_PARAMS_PARAMS { + NvU32 which; + NvU32 syncPolarity; + NvU32 syncVideoMode; + NvU32 nSync; + NvU32 syncSkew; + NvU32 syncStartDelay; + NvU32 useHouseSync; + NV30F1_CTRL_GSYNC_MULTIPLY_DIVIDE_SETTINGS syncMulDiv; + NvU32 syncVRR; +} NV30F1_CTRL_GSYNC_CONTROL_PARAMS_PARAMS; + +#define NV30F1_CTRL_GSYNC_GET_CONTROL_PARAMS_PARAMS_MESSAGE_ID (0x3U) + +typedef NV30F1_CTRL_GSYNC_CONTROL_PARAMS_PARAMS NV30F1_CTRL_GSYNC_GET_CONTROL_PARAMS_PARAMS; + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS_MESSAGE_ID (0x4U) + +typedef NV30F1_CTRL_GSYNC_CONTROL_PARAMS_PARAMS NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS; + +/* + * which values + * + */ + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY 0x0001 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE 0x0002 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_NSYNC 0x0004 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_SKEW 0x0008 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_START_DELAY 0x0010 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_USE_HOUSE 0x0020 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_MULTIPLY_DIVIDE 0x0040 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_VRR 0x0080 + +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_POLARITY NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY +#define NV30F1_CTRL_GSYNC_GET_CONTROL_VIDEO_MODE NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE +#define NV30F1_CTRL_GSYNC_GET_CONTROL_NSYNC NV30F1_CTRL_GSYNC_SET_CONTROL_NSYNC +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_SKEW NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_SKEW +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_START_DELAY NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_START_DELAY +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_USE_HOUSE NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_USE_HOUSE +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_MULTIPLY_DIVIDE NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_MULTIPLY_DIVIDE +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_VRR NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_VRR + +/* + * syncPolarity values + * + */ + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY_RISING_EDGE 0 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY_FALLING_EDGE 1 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY_BOTH_EDGES 2 + +/* + * syncVideoMode values + * Video_Mode_Composite is valid for P2060 only. + * + */ + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_NONE 0 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_TTL 1 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_NTSCPALSECAM 2 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_HDTV 3 + +#define NV30F1_CTRL_GSYNC_GET_CONTROL_VIDEO_MODE_NONE NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_NONE +#define NV30F1_CTRL_GSYNC_GET_CONTROL_VIDEO_MODE_TTL NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_TTL +#define NV30F1_CTRL_GSYNC_GET_CONTROL_VIDEO_MODE_NTSCPALSECAM NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_NTSCPALSECAM +#define NV30F1_CTRL_GSYNC_GET_CONTROL_VIDEO_MODE_HDTV NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_HDTV +#define NV30F1_CTRL_GSYNC_GET_CONTROL_VIDEO_MODE_COMPOSITE 4 + +/* + * multiplyDivide values + * + */ +#define NV30F1_CTRL_GSYNC_SET_CONTROL_MULTIPLY_DIVIDE_MODE_MULTIPLY 0 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_MULTIPLY_DIVIDE_MODE_DIVIDE 1 + +/* + * VRR values + * + */ +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_MODE_FIXED_REFRESH_RATE 0 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_MODE_VARIABLE_REFRESH_RATE 1 + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_CAPS + * + * This command returns the capabilities of this gsync device. + * + * revId + * This parameter is set by the RM to indicate the combined + * FPGA revision (low 4 bits) and board ID (high 4 bits). + * + * boardId + * This parameter is set by the RM to indicate the board ID, + * allowing disambiguation of P2060 and so forth. + * + * minRevRequired + * This parameter is set by the RM to indicate the minimum + * Qsync FPGA revision required for a specific CHIP Familiy + * + * isFirmwareRevMismatch + * This parameter is set to TRUE by RM when the Qsync Firmware + * Revision is incompatibled with the connected GPU chip family. + * + * revision + * This parameter is set by the RM to indicate the device revision, + * also known as major version. + * + * extendedRevision + * This parameter is set by the RM to indicate the device extended + * revision, also known as minor version. + * + * capFlags + * This parameter is set by the RM to indicate capabilities of + * the board, preventing the client from needing to keep track + * of the feature lists supported by each revision of each board. + * + * maxSyncSkew + * This parameter returns that maximum units of sync skew the + * board supports. The value prgrammed into the board has to be + * between 0 and maxSyncSkew, inclusive. The value of each unit + * can be learned from the syncSkewResolution parameter. + * + * syncSkewResolution + * This parameter returns the number of nanoseconds that one unit + * of sync skew corresponds to. + * + * maxStartDelay + * This parameter returns that maximum units of sync start delay + * the board supports. The value prgrammed into the board has to be + * between 0 and maxStartDelay, inclusive. The value of each unit + * can be learned from the startDelayResolution parameter. + * + * startDelayResolution + * This parameter returns the number of nanoseconds that one unit + * of sync start delay corresponds to. + * + * maxSyncInterval + * This parameter returns the maximum duration of house sync interval + * between frame lock sync cycle that the board supports. The value + * programmed into the board has to be between 0 and maxSyncInterval, + * inclusive. + * + * maxMulDivValue + * This parameter returns the maximum possible value that can be + * programmed for multiplying / dividing house sync. Only valid if + * NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_MULTIPLY_DIVIDE_SYNC is set. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV30F1_CTRL_CMD_GSYNC_GET_CAPS (0x30f10105) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_GET_CAPS_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV30F1_CTRL_GSYNC_GET_CAPS_PARAMS { + NvU32 revId; + NvU32 boardId; + NvU32 minRevRequired; + NvBool isFirmwareRevMismatch; + NvU32 revision; + NvU32 extendedRevision; + NvU32 capFlags; + NvU32 maxSyncSkew; + NvU32 syncSkewResolution; + NvU32 maxStartDelay; + NvU32 startDelayResolution; + NvU32 maxSyncInterval; + NvU32 maxMulDivValue; +} NV30F1_CTRL_GSYNC_GET_CAPS_PARAMS; + +#define NV30F1_CTRL_GSYNC_GET_CAPS_BOARD_ID_P2060 (0x00002060) +#define NV30F1_CTRL_GSYNC_GET_CAPS_BOARD_ID_P2061 (0x00002061) + +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_FREQ_ACCURACY_2DPS (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_FREQ_ACCURACY_3DPS (0x00000002) +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_FREQ_ACCURACY_4DPS (0x00000004) + +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_NEED_MASTER_BARRIER_WAR (0x00000010) +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_MULTIPLY_DIVIDE_SYNC (0x00000020) + +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_SYNC_LOCK_EVENT (0x10000000) +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_HOUSE_SYNC_EVENT (0x20000000) +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_FRAME_COUNT_EVENT (0x40000000) + +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_ONLY_PRIMARY_CONNECTOR_EVENT (0x01000000) +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_ALL_CONNECTOR_EVENT (0x02000000) + +// For P2060, clients can only request for video modes at BNC connector +// e.g. NO HS, TTL and Composite etc. +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_ONLY_GET_VIDEO_MODE (0x00100000) + +/* + * NV30F1_CTRL_CMD_GET_GSYNC_GPU_TOPOLOGY + * + * This command returns the list of GPU IDs connected with the associated + * gsync device. + * + * gpus + * This array is set by RM to contain the gpu connection information + * for gpus attached to the gsync device. Valid entries are contiguous, + * beginning with the first entry in the list. The elements of this array contain + * the following fields: + * gpuId + * This field contains the ID of the connected GPU. If the entry in the + * table is invalid, this fields contains NV30F1_CTRL_GPU_INVALID_ID. + * connector + * This field indicates which connector on the device the GPU is connected + * to (i.e. the primary or secondary connector), if any. + * proxyGpuId + * If the 'connector' field indicates that the GPU is not connected to + * a G-Sync device directly, then this field contains the ID of the + * GPU that acts as a proxy, i.e. the GPU to which this GPU should be + * a RasterLock slave. + * connectorCount + * This parameter indicates the number of GPU connectors available on + * the gsync device. The connector count of the gsync device may be + * less than NV30F1_CTRL_MAX_GPUS_PER_GSYNC. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV30F1_CTRL_CMD_GET_GSYNC_GPU_TOPOLOGY (0x30f10106) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_MAX_GPUS_PER_GSYNC 4 +#define NV30F1_CTRL_GPU_INVALID_ID (0xffffffff) + +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_PARAMS { + struct { + NvU32 gpuId; + NvU32 connector; + NvU32 proxyGpuId; + } gpus[NV30F1_CTRL_MAX_GPUS_PER_GSYNC]; + NvU32 connectorCount; +} NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_PARAMS; + +/* + * connector values + * + */ +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_ONE 1 +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_TWO 2 +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_THREE 3 +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_FOUR 4 + +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_NONE 0 +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_PRIMARY NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_ONE +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_SECONDARY NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_TWO + + + + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_SYNC + * + * This command enables frame sync on displays. + * + * gpuId + * The parameter is set by the client to indicate the gpuId on which + * frame lock will be enabled. + * master + * This parameter is set by the client to specify whether this/these + * displays should be set as the master or as slaves. If this is a GET + * and displays is not 0, this will be set by the RM to indicate if + * the display can be the master. + * displays + * This is a device mask set by the client to indicate which display(s) + * are to be synched. Note that only one display may be set as master. + * If this is a GET, this set by the client to indicate which display + * is to be queried. If the display cannot be synched to this device, + * the RM will overwrite the mask with a 0. + * validateExternal + * This parameter is set by the client to tell the RM to validate the + * presence of an external sync source when enabling a master. + * refresh + * This parameter is set by the client to indicate the desired refresh rate + * The value is in 0.0001 Hertz (i.e. it has been multiplied by 10000). + * configFlags + * contains flags for specific options. So far only + * NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_CONFIG_FLAGS_KEEP_MASTER_SWAPBARRIER_DISABLED + * is supported which allows the caller to prevent the rm code to automatically + * enable the swapbarrier on framelock masters on fpga revisions <= 5. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SYNC (0x30f10110) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_SYNC (0x30f10111) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_PARAMS_MESSAGE_ID" */ + +// If set the swapbarrier is not enable automatically when enablign a framelock master on fpga revs <= 5. +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_CONFIG_FLAGS_KEEP_MASTER_SWAPBARRIER_DISABLED (0x00000001) + +typedef struct NV30F1_CTRL_GSYNC_CONTROL_SYNC_PARAMS { + NvU32 gpuId; + NvU32 master; + NvU32 displays; + NvU32 validateExternal; + NvU32 refresh; + NvU32 configFlags; +} NV30F1_CTRL_GSYNC_CONTROL_SYNC_PARAMS; + +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_PARAMS_MESSAGE_ID (0x10U) + +typedef NV30F1_CTRL_GSYNC_CONTROL_SYNC_PARAMS NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_PARAMS; + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_PARAMS_MESSAGE_ID (0x11U) + +typedef NV30F1_CTRL_GSYNC_CONTROL_SYNC_PARAMS NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_UNSYNC + * + * This command disables frame sync on displays + * + * gpuId + * The parameter is set by the client to indicate the gpuId on which + * frame lock will be disabled. + * master + * This parameter is set by the client to specify whether this/these + * display(s) to be unset is a master/are slaves. + * displays + * This is a device mask set by the client to indicate which display(s) + * are to be unsynched. + * retainMaster + * Retain the designation as master, but unsync the displays. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + */ +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_UNSYNC (0x30f10112) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_CONTROL_UNSYNC_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_UNSYNC_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV30F1_CTRL_GSYNC_SET_CONTROL_UNSYNC_PARAMS { + NvU32 gpuId; + NvU32 master; + NvU32 displays; + NvU32 retainMaster; +} NV30F1_CTRL_GSYNC_SET_CONTROL_UNSYNC_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SYNC + * + * This command gets the sync state for the gpus attached to the + * framelock device. Note that the frame lock device only has + * knowledge of sync status at the gpu level, not the display + * device level. + * + * gpuId + * The parameter is set by the client to indicate which gpuId is to be + * queried. + * bTiming + * This parameter is set by the RM to indicate that timing on the GPU is + * in sync with the master sync signal. + * bStereoSync + * This parameter is set by the RM to indicate whether the phase of the + * timing signal coming from the GPU is the same as the phase of the + * master sync signal. + * bSyncReady + * This parameter is set by the RM to indicate if a sync signal has + * been detected. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ + +#define NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SYNC (0x30f10113) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_PARAMS { + NvU32 gpuId; + NvU32 bTiming; + NvU32 bStereoSync; + NvU32 bSyncReady; +} NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_PARAMS; + + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_STATUS + * + * This command gets status information for the frame lock device + * relevant to a control panel. + * + * which + * This is a mask set by the client describing which of the other + * parameters we should collect status information for. + * bLeadingEdge + * This parameter is set by the RM to indicate that the gsync device is + * set to sync to the leading edge of a house sync signal. Note that + * this does not mean that house sync is the signal source. + * bFallingEdge + * This parameter is set by the RM to indicate that the gsync device is + * set to sync to the falling edge of a house sync signal. Note that + * this does not mean that house sync is the signal source. + * syncDelay + * This parameter is set by the RM to indicate the sync delay in + * microseconds, + * refresh + * This parameter is set by the RM to indicate the rate of frame sync pulse in + * 0.0001 Hertz (i.e. it has been multiplied by 10000). This is not the refresh + * rate of display device. This is same as incoming house sync rate if + * framelocked to an external house sync signal. Otherwise, this is same + * as the refresh rate of the master display device. + * houseSyncIncomming + * This parameter is set by the RM to indicate the rate of an incomming + * house sync signal in 0.0001 Hertz (i.e. it has been multiplied by 10000). + * syncInterval + * This parameter is set by the RM to indicate the number of incoming + * sync pulses to wait before the generation of the frame sync pulse. + * bSyncReady + * This paramater is set by the RM to indicate if a sync signal has + * been detected (this parameter is also available from the + * NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SYNC method). + * bSwapReady + * This paramater is set by the RM to indicate if the hardware is + * ready to swap. + * bHouseSync + * This parameter is set by the RM to indicate that a house sync signal + * should be used as the source signal if it is available. + * bPort0Input + * This parameter is set by the RM to indicate that RJ45 port 0 is + * configured as an input. + * bPort1Input + * This parameter is set by the RM to indicate that RJ45 port 1 is + * configured as an input + * bPort0Ehternet + * This parameter is set by the RM to indicate that RJ45 port 0 has + * been connected to an ethernet hub (this is not the right thing to do). + * bPort1Ehternet + * This parameter is set by the RM to indicate that RJ45 port 1 has + * been connected to an ethernet hub (this is not the right thing to do). + * universalFrameCount + * This parameter is set by the RM to indicate the value of the + * Universal frame counter. + * bInternalSlave + * This parameter is set by the RM to indicate that a p2061 has been + * configured as internal slave. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_STATUS (0x30f10114) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_GET_STATUS_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV30F1_CTRL_GSYNC_GET_STATUS_PARAMS { + NvU32 which; + NvU32 bLeadingEdge; + NvU32 bFallingEdge; + NvU32 syncDelay; + NvU32 refresh; + NvU32 houseSyncIncoming; + NvU32 syncInterval; + NvU32 bSyncReady; + NvU32 bSwapReady; + NvU32 bHouseSync; + NvU32 bPort0Input; + NvU32 bPort1Input; + NvU32 bPort0Ethernet; + NvU32 bPort1Ethernet; + NvU32 universalFrameCount; + NvU32 bInternalSlave; +} NV30F1_CTRL_GSYNC_GET_STATUS_PARAMS; + +/* + * which values + * + */ + +#define NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_POLARITY (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_STATUS_LEADING_EDGE (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_STATUS_FALLING_EDGE (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_DELAY (0x00000002) +#define NV30F1_CTRL_GSYNC_GET_STATUS_REFRESH (0x00000004) +#define NV30F1_CTRL_GSYNC_GET_STATUS_HOUSE_SYNC_INCOMING (0x00000008) +#define NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_INTERVAL (0x00000010) +#define NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_READY (0x00000020) +#define NV30F1_CTRL_GSYNC_GET_STATUS_SWAP_READY (0x00000040) +#define NV30F1_CTRL_GSYNC_GET_STATUS_TIMING (0x00000080) +#define NV30F1_CTRL_GSYNC_GET_STATUS_STEREO_SYNC (0x00000100) +#define NV30F1_CTRL_GSYNC_GET_STATUS_HOUSE_SYNC (0x00000200) +#define NV30F1_CTRL_GSYNC_GET_STATUS_PORT_INPUT (0x00000400) +#define NV30F1_CTRL_GSYNC_GET_STATUS_PORT0_INPUT (0x00000400) +#define NV30F1_CTRL_GSYNC_GET_STATUS_PORT1_INPUT (0x00000400) +#define NV30F1_CTRL_GSYNC_GET_STATUS_PORT_ETHERNET (0x00000800) +#define NV30F1_CTRL_GSYNC_GET_STATUS_PORT0_ETHERNET (0x00000800) +#define NV30F1_CTRL_GSYNC_GET_STATUS_PORT1_ETHERNET (0x00000800) +#define NV30F1_CTRL_GSYNC_GET_STATUS_UNIVERSAL_FRAME_COUNT (0x00001000) +#define NV30F1_CTRL_GSYNC_GET_STATUS_INTERNAL_SLAVE (0x00002000) + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_TESTING + * + * This command controls the test signal on the gsync device. + * + * bEmitTestSignal + * This parameter is set by the client to emit or stop emitting the test + * signal. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_TESTING (0x30f10120) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_CONTROL_TESTING_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_TESTING (0x30f10121) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_CONTROL_TESTING_PARAMS_MESSAGE_ID" */ + +typedef struct NV30F1_CTRL_GSYNC_CONTROL_TESTING_PARAMS { + NvU32 bEmitTestSignal; +} NV30F1_CTRL_GSYNC_CONTROL_TESTING_PARAMS; + +#define NV30F1_CTRL_GSYNC_GET_CONTROL_TESTING_PARAMS_MESSAGE_ID (0x20U) + +typedef NV30F1_CTRL_GSYNC_CONTROL_TESTING_PARAMS NV30F1_CTRL_GSYNC_GET_CONTROL_TESTING_PARAMS; + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_TESTING_PARAMS_MESSAGE_ID (0x21U) + +typedef NV30F1_CTRL_GSYNC_CONTROL_TESTING_PARAMS NV30F1_CTRL_GSYNC_SET_CONTROL_TESTING_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_WATCHDOG + * + * This command enables and disables the gsync watchdog + * + * enable + * This parameter is set by the client to enable or disable the + * gsync watchdog. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + */ +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_WATCHDOG (0x30f10130) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_CONTROL_WATCHDOG_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_WATCHDOG_PARAMS_MESSAGE_ID (0x30U) + +typedef struct NV30F1_CTRL_GSYNC_SET_CONTROL_WATCHDOG_PARAMS { + NvU32 enable; +} NV30F1_CTRL_GSYNC_SET_CONTROL_WATCHDOG_PARAMS; + + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_INTERLACE_MODE + * + * This command enables or disables interlace mode. + * + * enable + * This parameter is set by the client to enable or disable + * interlace mode + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_INTERLACE_MODE (0x30f10140) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_CONTROL_INTERLACE_MODE_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_INTERLACE_MODE (0x30f10141) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_CONTROL_INTERLACE_MODE_PARAMS_MESSAGE_ID" */ + +typedef struct NV30F1_CTRL_GSYNC_CONTROL_INTERLACE_MODE_PARAMS { + NvU32 enable; +} NV30F1_CTRL_GSYNC_CONTROL_INTERLACE_MODE_PARAMS; + +#define NV30F1_CTRL_GSYNC_GET_CONTROL_INTERLACE_MODE_PARAMS_MESSAGE_ID (0x40U) + +typedef NV30F1_CTRL_GSYNC_CONTROL_INTERLACE_MODE_PARAMS NV30F1_CTRL_GSYNC_GET_CONTROL_INTERLACE_MODE_PARAMS; + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_INTERLACE_MODE_PARAMS_MESSAGE_ID (0x41U) + +typedef NV30F1_CTRL_GSYNC_CONTROL_INTERLACE_MODE_PARAMS NV30F1_CTRL_GSYNC_SET_CONTROL_INTERLACE_MODE_PARAMS; + +/* + * + * NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SWAP_BARRIER + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_SWAP_BARRIER + * + * These commands enables or disables the swap barrier + * connection between a GPU and the rest of the gsync + * network + * + * gpuId + * The parameter is set by the client to indicate which gpuId is to be + * queried. + * enable + * In a set command, this parameter is set by the client to + * indicate if the barrier should be enabled (i.e. connected + * to the rest of the network) or disabled (disconnected). + * In both a set and a get command, if successful, the RM + * uses this parameter to return the current (i.e. post-set) + * value. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SWAP_BARRIER (0x30f10150) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_BARRIER_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_SWAP_BARRIER (0x30f10151) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_CONTROL_SWAP_BARRIER_PARAMS_MESSAGE_ID" */ + +typedef struct NV30F1_CTRL_GSYNC_CONTROL_SWAP_BARRIER_PARAMS { + NvU32 gpuId; + NvBool enable; +} NV30F1_CTRL_GSYNC_CONTROL_SWAP_BARRIER_PARAMS; + +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_BARRIER_PARAMS_MESSAGE_ID (0x50U) + +typedef NV30F1_CTRL_GSYNC_CONTROL_SWAP_BARRIER_PARAMS NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_BARRIER_PARAMS; + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SWAP_BARRIER_PARAMS_MESSAGE_ID (0x51U) + +typedef NV30F1_CTRL_GSYNC_CONTROL_SWAP_BARRIER_PARAMS NV30F1_CTRL_GSYNC_SET_CONTROL_SWAP_BARRIER_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW + * + * This command allow clients to obtain time period for which SwapLock window will + * remain HIGH for GSYNC III (P2060) i.e. TswapRdyHi. RM clients will use this value + * for programming SWAP_LOCKOUT_START on all heads of GPU connected to P2060. + * + * tSwapRdyHi + * RM will return swap lock window High time period in this variable. By default + * tSwapRdyHi is 250 micro seconds. RM also provide regkey to change this value. + * tSwapRdyHi also used by RM to configure value of LSR_MIN_TIME while programming + * swap barrier. + * Client should consider tSwapRdyHi only for Gsync III (P2060) network. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW (0x30f10153) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW_PARAMS_MESSAGE_ID (0x53U) + +typedef struct NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW_PARAMS { + NvU32 tSwapRdyHi; +} NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW_PARAMS; + + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_OPTIMIZED_TIMING + * + * This command allows the client to obtain suggested + * adjustments to vertical and horizontal timing values + * that will improve the ability of gsync to lock. + * + * gpuId + * This parameter is set by the client to indicate the + * gpuId of the GPU to which the display to be optimized + * is attached. + * output + * This parameter is set by the client to indicate the + * output resource type of the display to be optimized. + * For example, CRTs use DAC output, while DFPs use SOR + * (Serial Output Resource) type. + * protocol + * This parameter is set by the client to indicate the + * data protocol of output resource. For DAC displays, + * the format of the standard mode most closely matching + * the desired mode is used. For SOR display devices, + * the LVDS/TMDS/etc format is the protocol. + * structure + * This parameter is set by the client to indicate the + * raster structure of the mode, either progressive or + * interlaced. Diagrams of the raster structures are + * provided below. + * adjust + * This parameter is set by the client to specify which + * of the timing values, other than hTotal and vTotal, + * may be adjusted during optimization. + * If the client does not obtain instructions from the + * user about where adjustments should be applied, safe + * default values for progressive/interlaced modes are + * provided below. + * hTotal + * This parameter is set by the client to specify the + * initial Horizontal Pixel Total, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * vTotal + * This parameter is set by the client to specify the + * initial Vertical Pixel Total, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * hBlankStart + * This parameter is set by the client to specify the + * initial Horizontal Blanking Start, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * vBlankStart + * This parameter is set by the client to specify the + * initial Vertical Blanking Start, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * hBlankEnd + * This parameter is set by the client to specify the + * initial Horizontal Blanking End, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * vBlankEnd + * This parameter is set by the client to specify the + * initial Vertical Blanking End, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * vInterlacedBlankStart + * This parameter is set by the client to specify the + * initial Interlaced Vertical Blanking Start, from + * which the RM will begin optimizing. The RM will + * ignore this parameter for non-interlaced modes, as + * it has no meaning in those modes. In modes where + * it is meaningful, the RM also uses the parameter + * to return the optimized value. + * vInterlacedBlankEnd + * This parameter is set by the client to specify the + * initial Interlaced Vertical Blanking End, from + * which the RM will begin optimizing. The RM will + * ignore this parameter for non-interlaced modes, as + * it has no meaning in those modes. In modes where + * it is meaningful, the RM also uses the parameter + * to return the optimized value. + * hSyncEnd + * This parameter is set by the client to specify the + * initial Horizontal Raster Sync End, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * vSyncEnd + * This parameter is set by the client to specify the + * initial Vertical Raster Sync End, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * hDeltaStep + * This parameter is set by the client to specify the + * increments by which the Horizontal Pixel Total may + * be adjusted by the RM, during optimization. + * If the client does not obtain a custom value for + * this parameter from the user, setting all four of + * hDeltaStep, vDeltaStep, hDeltaMax, and vDeltaMax + * to zero will result in a safe default for all four. + * vDeltaStep + * This parameter is set by the client to specify the + * increments by which the vertical timings of each + * frame (in interlaced modes, each field) may be + * adjusted by the RM, during optimization. + * In interlaced modes, the adjustments to vTotal, + * vInterlacedBlankStart, and vInterlacedBlankEnd may + * be in increments of vDeltaStep or twice vDeltaStep, + * depending on where adjustments are made. + * In progressive modes, the adjustment to the vTotal + * will simply be in increments of vDeltaStep. + * If the client does not obtain a custom value for + * this parameter from the user, setting all four of + * hDeltaStep, vDeltaStep, hDeltaMax, and vDeltaMax + * to zero will result in a safe default for all four. + * hDeltaMax + * This parameter is set by the client to specify the + * maximum amount that the Horizontal Pixel Total may + * be adjusted by the RM, during optimization. + * If the client does not obtain a custom value for + * this parameter from the user, setting all four of + * hDeltaStep, vDeltaStep, hDeltaMax, and vDeltaMax + * to zero will result in a safe default for all four. + * vDeltaMax + * This parameter is set by the client to specify the + * maximum amount that vertical timings of each frame + * (in interlaced modes, each field) may be adjusted + * by the RM, during optimization. + * In interlaced modes, the adjustments to vTotal, + * vInterlacedBlankStart, and vInterlacedBlankEnd may + * be up to twice vDeltaMax. + * In progressive modes, the adjustment to the vTotal + * may simply be up to vDeltaMax. + * If the client does not obtain a custom value for + * this parameter from the user, setting all four of + * hDeltaStep, vDeltaStep, hDeltaMax, and vDeltaMax + * to zero will result in a safe default for all four. + * refreshX10K + * This parameter is set by the client to specify the + * desired refresh rate, multiplied by 10000. This + * allows refresh rate to be set in units of 0.0001 Hz. + * For example, a 59.94 Hz rate would be set as 599400. + * The client can alternatively specify a the + * pixelClockHz parameter (if the passed in refreshX10K + * parameter is set to 0, the pixelClockHz parameter + * will be used). + * pixelClockHz + * This parameter is set by the client to specify the + * desired pixel clock frequency in units of Hz. The + * client can alternatively specify the refreshX10K parameter. + * This parameter is returned by the RM to report the + * optimal pixel clock to use with the adjusted mode, + * in units of Hz. + * + * + * bOptimized[out] + * This is set to NV_TRUE if the timings were successfully optimized, and + * NV_FALSE otherwise. + * + * + * Progressive Raster Structure + * + * hSyncEnd hTotal + * 0 | hBlankEnd hBlankStart | + * | | | | | vSync vBlank + * 0--+--------------------------------------------+ +-+ | + * | Sync | | | + * vSyncEnd--| +----------------------------------------+ +-+ | + * | | Back Porch | | | + * vBlankEnd--| | +--------------------------------+ | | +-+ + * | | | Active Area | | | | + * | | | +------------------------+ | | | | + * | | | | | | | | | + * | S | B | A | | A | F | | | + * | y | a | c | | c | r | | | + * | n | c | t | | t | o | | | + * | c | k | i | | i | n | | | + * | | | v | | v | t | | | + * | | P | e | Output Viewport | e | | | | + * | | o | | | | P | | | + * | | r | A | | A | o | | | + * | | c | r | | r | r | | | + * | | h | e | | e | c | | | + * | | | a | | a | h | | | + * | | | | | | | | | + * | | | +------------------------+ | | | | + * | | | Active Area | | | | + * vBlankStart-| | +--------------------------------+ | | +-+ + * | | Front Porch | | | + * vTotal--+---+----------------------------------------+ +-+ | + * ___ + * / \________________________________________/ hSync + * ________ ____ + * \________________________________/ hBlank + * + * + * + * Interlaced Raster Structure + * + * hSyncEnd hTotal + * 0 | hBlankEnd hBlankStart | + * | | | | | vSync vBlank + * 0--+--------------------------------------------+ +-+ | + * | Sync | | | + * vSyncEnd--| +----------------------------------------+ +-+ | + * | | Back Porch | | | + * vBlankEnd--| | +--------------------------------+ | | +-+ + * | | | Active Area | | | | + * | | | +------------------------+ | | | | + * | | | | | | | | | + * | S | B | A | | A | F | | | + * | y | a | c | | c | r | | | + * | n | c | t | | t | o | | | + * | c | k | i | | i | n | | | + * | | | v | | v | t | | | + * | | P | e | Output Viewport | e | | | | + * | | o | | | | P | | | + * | | r | A | | A | o | | | + * | | c | r | | r | r | | | + * | | h | e | | e | c | | | + * | | | a | | a | h | | | + * | | | | | | | | | + * | | | +------------------------+ | | | | + * | | | Active Area | | | | + * vBlankStart-| | +--------------------------------+ | | +-+ + * | | | | | + * | | Front Porch +--------------------+ | | + * | | | | +-+ | + * | +-------------------+ | | | + * | | | | + * | Sync +--------------------+ | | + * | | | +-+ | + * | +-------------------+ | | | + * vInterlaced | | Back Porch | | | + * BlankEnd--| | +--------------------------------+ | | +-+ + * | | | Active Area | | | | + * | | | +------------------------+ | | | | + * | | | | | | | | | + * | S | B | A | | A | F | | | + * | y | a | c | | c | r | | | + * | n | c | t | | t | o | | | + * | c | k | i | | i | n | | | + * | | | v | | v | t | | | + * | | P | e | Output Viewport | e | | | | + * | | o | | | | P | | | + * | | r | A | | A | o | | | + * | | c | r | | r | r | | | + * | | h | e | | e | c | | | + * | | | a | | a | h | | | + * | | | | | | | | | + * | | | +------------------------+ | | | | + * vInterlaced | | | Active Area | | | | + * BlankStart-| | +--------------------------------+ | | +-+ + * | | Front Porch | | | + * vTotal--+---+----------------------------------------+ +-+ | + * ___ + * / \________________________________________/ hSync + * ________ ____ + * \________________________________/ hBlank + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * +*/ + +#define NV30F1_CTRL_CMD_GSYNC_GET_OPTIMIZED_TIMING (0x30f10160) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PARAMS_MESSAGE_ID (0x60U) + +typedef struct NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PARAMS { + NvU32 gpuId; + NvU32 output; + NvU32 protocol; + NvU32 structure; + NvU32 adjust; + NvU32 hDeltaStep; + NvU32 hDeltaMax; + NvU32 vDeltaStep; + NvU32 vDeltaMax; + NvU32 hSyncEnd; + NvU32 hBlankEnd; + NvU32 hBlankStart; + NvU32 hTotal; + NvU32 vSyncEnd; + NvU32 vBlankEnd; + NvU32 vBlankStart; + NvU32 vInterlacedBlankEnd; + NvU32 vInterlacedBlankStart; + NvU32 vTotal; + NvU32 refreshX10K; + NV_DECLARE_ALIGNED(NvU64 pixelClockHz, 8); + + NvBool bOptimized; +} NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PARAMS; + +/* output values */ +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_OUTPUT_DAC (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_OUTPUT_SOR (0x00000004) + +/* protocol values for DAC displays (e.g. CRTs) */ +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_DAC_RGB_CRT (0x00000000) + +/* protocol values for SOR displays (e.g. DFPs) */ +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_LVDS_CUSTOM (0x00000000) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_SINGLE_TMDS_A (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_SINGLE_TMDS_B (0x00000002) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DUAL_TMDS (0x00000005) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DP_A (0x00000008) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DP_B (0x00000009) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_HDMI_FRL (0x0000000C) + +/* structure values */ +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_STRUCTURE_INTERLACED (0x00000001) + +/* adjust values */ +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_H_FRONT_PORCH (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_V_FRONT_PORCH (0x00000002) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_H_ACTIVE_AREA (0x00000004) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_V_ACTIVE_AREA (0x00000008) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_H_BACK_PORCH (0x00000010) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_V_BACK_PORCH (0x00000020) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_H_RASTER_SYNC (0x00000040) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_V_RASTER_SYNC (0x00000080) + +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_DEFAULT_CRT (0x00000030) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_DEFAULT_DFP (0x00000020) + +/* DeltaStep and DeltaMax values to trigger default settings */ +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_H_DELTA_STEP_USE_DEFAULTS (0x00000000) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_H_DELTA_MAX_USE_DEFAULTS (0x00000000) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_V_DELTA_STEP_USE_DEFAULTS (0x00000000) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_V_DELTA_MAX_USE_DEFAULTS (0x00000000) + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_EVENT_NOTIFICATION + * + * This command sets event notification state for the associated Gsync + * object. This command requires that an instance of NV01_EVENT has + * been previously bound to the associated Gsync object. + * + * If one or more of the "smart event notification" options are set in the + * action parameter, multiple sequential events of the same type will only + * trigger one notification. After that, only an event of a different type + * will trigger a new notification. + * + * action + * This member specifies the desired event notification action. + * Valid notification actions include: + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_DISABLE + * This action disables event notification for the associated + * Gsync object. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_SYNC_LOSS + * This action enables smart event notification for the + * associated Gsync object, for "sync loss" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_SYNC_GAIN + * This action enables smart event notification for the + * associated Gsync object, for "sync gained" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_STEREO_LOSS + * This action enables smart event notification for the + * associated Gsync object, for "stereo lost" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_STEREO_GAIN + * This action enables smart event notification for the + * associated Gsync object, for "stereo gained" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_HOUSE_GAIN + * This action enables smart event notification for the + * associated Gsync object, for "house sync (BNC) plug in" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_HOUSE_LOSS + * This action enables smart event notification for the + * associated Gsync object, for "house sync (BNC) plug out" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_RJ45_GAIN + * This action enables smart event notification for the + * associated Gsync object, for "ethernet (RJ45) plug in" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_RJ45_LOSS + * This action enables smart event notification for the + * associated Gsync object, for "ethernet (RJ45) plug out" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_COUNT_MATCH + * This action enables smart event notification for the + * associated Gsync object, for "frame counter match" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_ALL + * This action enables smart event notification for the + * associated Gsync object, for any type of event. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV30F1_CTRL_CMD_GSYNC_SET_EVENT_NOTIFICATION (0x30f10170) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_PARAMS_MESSAGE_ID (0x70U) + +typedef struct NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_PARAMS { + NvU32 action; +} NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_PARAMS; + +/* valid action values */ + +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_DISABLE (0x00000000) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_SYNC_LOSS (0x00000001) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_SYNC_GAIN (0x00000002) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_STEREO_LOSS (0x00000004) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_STEREO_GAIN (0x00000008) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_HOUSE_GAIN (0x00000010) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_HOUSE_LOSS (0x00000020) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_RJ45_GAIN (0x00000040) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_RJ45_LOSS (0x00000080) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_COUNT_MATCH (0x00000100) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_ALL (0x000001FF) + +#define NV30F1_CTRL_GSYNC_EVENT_TYPES 9 + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE + * NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_STEREO_LOCK_MODE + * + * These commands can be used to get/set the stereo lock assistance mode of + * the GSYNC device. This is supported by GSYNC III device only. + * 1] In this mode the GSYNC recreates the hidden VS either by -> + * (a) using local stereo edge if stereo is toggle or + * (b) counting lines and generate the missing VS. + * 2] Master GSYNC card recreates the stereo and passes it along to + * the slave GSYNC cards. + * 3] Slave GSYNC cards generates the stereo raster sync structure to + * synchronize the GPU. + * 4] For stereo sync status reporting, under this mode, the GSYNC automatically + * reports stereo lock whenever it gets the master stereo signal. The + * assumption is local stereo will be in synced with the new structure. + * 5] If the slave GSYNC card does not observed master stereo for any reason, + * (a) it clears the stereo sync bit and + * (b) it generates its own version of stereo and sync the GPU. + * + * Parameters: + * gpuId + * This parameter is set by the client to indicate the gpuId on which + * the stereo lock mode should be enabled/disabled. + * + * enable + * In SET query, this parameter is set by the client to indicate whether + * RM should enable or disable stereo lock mode for GPU specified in gpuId. + * 1 and 0 indicates enable and disable stereo lock mode respectively. In + * GET query, RM will set this parameter to 1 or 0 depending on StereoLock + * mode is enabled or not respectively for specified GPU. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE (0x30f10172) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_STEREO_LOCK_MODE (0x30f10173) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_STEREO_LOCK_MODE_PARAMS_MESSAGE_ID" */ + +typedef struct NV30F1_CTRL_CMD_GSYNC_CONTROL_STEREO_LOCK_MODE_PARAMS { + NvU32 gpuId; + NvU32 enable; +} NV30F1_CTRL_CMD_GSYNC_CONTROL_STEREO_LOCK_MODE_PARAMS; + +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE_PARAMS_MESSAGE_ID (0x72U) + +typedef NV30F1_CTRL_CMD_GSYNC_CONTROL_STEREO_LOCK_MODE_PARAMS NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE_PARAMS; + +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_STEREO_LOCK_MODE_PARAMS_MESSAGE_ID (0x73U) + +typedef NV30F1_CTRL_CMD_GSYNC_CONTROL_STEREO_LOCK_MODE_PARAMS NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_STEREO_LOCK_MODE_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_READ_REGISTER + * + * This command is used to read raw i2c registers from the gsync device, via + * the given GPU (registers on the same gsync device may have different values + * depending on which GPU is used to do the read). + * + * This may only be used by a privileged client. + * + * Parameters: + * gpuId + * This parameter is set by the client to specify which GPU to use to + * perform the read. + * + * reg + * This parameter is set by the client to specify which i2c register to + * read. + * + * data + * This parameter is written by the RM and returned to the client upon a + * successful read. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_PERMISSIONS + */ +#define NV30F1_CTRL_CMD_GSYNC_READ_REGISTER (0x30f10180) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_READ_REGISTER_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_READ_REGISTER_PARAMS_MESSAGE_ID (0x80U) + +typedef struct NV30F1_CTRL_GSYNC_READ_REGISTER_PARAMS { + NvU32 gpuId; + NvU8 reg; + NvU8 data; +} NV30F1_CTRL_GSYNC_READ_REGISTER_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_WRITE_REGISTER + * + * This command is used to write raw i2c registers on the gsync device, via the + * given GPU (registers on the same gsync device may have different values + * depending on which GPU is used to do the write). + * + * This may only be used by a privileged client. + * + * Parameters: + * gpuId + * This parameter is set by the client to specify which GPU to use to + * perform the write. + * + * reg + * This parameter is set by the client to specify which i2c register to + * write. + * + * data + * This parameter is set by the client to specify what data to write to the + * given i2c register. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_PERMISSIONS + */ + +#define NV30F1_CTRL_CMD_GSYNC_WRITE_REGISTER (0x30f10181) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_WRITE_REGISTER_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_WRITE_REGISTER_PARAMS_MESSAGE_ID (0x81U) + +typedef struct NV30F1_CTRL_GSYNC_WRITE_REGISTER_PARAMS { + NvU32 gpuId; + NvU8 reg; + NvU8 data; +} NV30F1_CTRL_GSYNC_WRITE_REGISTER_PARAMS; + + + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_LOCAL_SYNC + * + * This command enables/disables raster sync on displays i.e. + * mosaic groups between gpus. + * + * gpuTimingSource + * The parameter is set by the client to indicate the gpuId of the + * Timing Source gpu for specified mosaic group. + * gpuTimingSlaves[] + * This parameter is set by the client to indicate the gpuIds of the + * timing slave gpus for specified mosaic group. It should not contain + * more gpuids than slaveGpuCount. + * slaveGpuCount + * This parameter is set by the client to indicate the count of timing + * slave gpus under specified group. + * Referring to gsync3-P2060, slaveGpuCount can vary from 0x01 to 0x03 + * as maximum possible connected gpus are four and one gpu must be + * timing master for mosaic group. + * mosaicGroupNumber + * This parameter is set by the client to tell the RM to which mosaic + * group it should refer. + * Referring to gsync3-P2060, mosaicGroupNumber can contain 0x00 or + * 0x01 as only two mosaic groups are possible. + * enableMosaic + * This parameter is set by the client to indicate RM that whether RM + * should enable mosaic or disable mosaic. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV30F1_CTRL_CMD_GSYNC_SET_LOCAL_SYNC (0x30f10185) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_LOCAL_SYNC_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_SET_LOCAL_SYNC_PARAMS_MESSAGE_ID (0x85U) + +typedef struct NV30F1_CTRL_GSYNC_SET_LOCAL_SYNC_PARAMS { + NvU32 gpuTimingSource; + NvU32 gpuTimingSlaves[NV30F1_CTRL_MAX_GPUS_PER_GSYNC]; + NvU32 slaveGpuCount; + NvU32 mosaicGroupNumber; + NvBool enableMosaic; +} NV30F1_CTRL_GSYNC_SET_LOCAL_SYNC_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_CONFIG_FLASH + * + * This command configure GSYNC registers for pre-flash and post-flash + * operations. This is currenly used for GSYNC-3 (P2060) only. RM clients + * has to make sure that they perform both pre-flash and post-flash + * operations on GSYNC board. Avoiding, post-flash will cause mismatch + * between RM cached-data and GSYNC register values. + * + * Parameters: + * gpuId + * This parameter is set by the client to indicate the gpuId for which + * GSYNC board connected to that GPU will be configured for pre-flash + * or post-flash operation depending on preFlash value. + * + * preFlash + * This parameter is set by the client to indicate whether RM has to configure + * GSYNC registers and SW state for pre-flash or post-flash operation. Values + * 1 and 0 indicates that RM will configure GSYNC board for pre-flash and + * post-flash operations respectively. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV30F1_CTRL_CMD_GSYNC_CONFIG_FLASH (0x30f10186) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_CMD_GSYNC_CONFIG_FLASH_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_CMD_GSYNC_CONFIG_FLASH_PARAMS_MESSAGE_ID (0x86U) + +typedef struct NV30F1_CTRL_CMD_GSYNC_CONFIG_FLASH_PARAMS { + NvU32 gpuId; + NvU32 preFlash; +} NV30F1_CTRL_CMD_GSYNC_CONFIG_FLASH_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_HOUSE_SYNC_MODE + * NV30F1_CTRL_CMD_GSYNC_SET_HOUSE_SYNC_MODE + * + * These two commands gets/sets house sync mode as input or output. + * + * Parameters: + * houseSyncMode + * This parameter indicates whether the house sync mode is input or + * output. For GET_HOUSE_SYNC_MODE, the current mode will be written + * by RM and returned to the client; for SET_HOUSE_SYNC_MODE, the client + * will write the new mode value to this parameter and pass it to RM + * for execution. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_DEVICE + * NV_ERR_INVALID_STATE + * + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_HOUSE_SYNC_MODE (0x30f10187) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_HOUSE_SYNC_MODE_PARAMS_MESSAGE_ID" */ + +typedef struct NV30F1_CTRL_GSYNC_HOUSE_SYNC_MODE_PARAMS { + NvU8 houseSyncMode; +} NV30F1_CTRL_GSYNC_HOUSE_SYNC_MODE_PARAMS; + +#define NV30F1_CTRL_GSYNC_GET_HOUSE_SYNC_MODE_PARAMS_MESSAGE_ID (0x87U) + +typedef NV30F1_CTRL_GSYNC_HOUSE_SYNC_MODE_PARAMS NV30F1_CTRL_GSYNC_GET_HOUSE_SYNC_MODE_PARAMS; + +#define NV30F1_CTRL_CMD_GSYNC_SET_HOUSE_SYNC_MODE (0x30f10188) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_HOUSE_SYNC_MODE_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_SET_HOUSE_SYNC_MODE_PARAMS_MESSAGE_ID (0x88U) + +typedef NV30F1_CTRL_GSYNC_HOUSE_SYNC_MODE_PARAMS NV30F1_CTRL_GSYNC_SET_HOUSE_SYNC_MODE_PARAMS; + +#define NV30F1_CTRL_GSYNC_HOUSE_SYNC_MODE_INPUT (0x00) +#define NV30F1_CTRL_GSYNC_HOUSE_SYNC_MODE_OUTPUT (0x01) + +/* _ctrl30f1_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl402c.h b/src/common/sdk/nvidia/inc/ctrl/ctrl402c.h new file mode 100644 index 0000000..3d2bb0e --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl402c.h @@ -0,0 +1,971 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl402c.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NV40_I2C control commands and parameters */ +#define NV402C_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0x402C, NV402C_CTRL_##cat, idx) + +/* I2C command categories (6 bits) */ +#define NV402C_CTRL_RESERVED (0x00) +#define NV402C_CTRL_I2C (0x01) + + +/* This field specifies the maximum regular port identifier allowed. */ +#define NV402C_CTRL_NUM_I2C_PORTS 16 +/* This temporary field specifies the dynamic port identifier. */ +#define NV402C_CTRL_DYNAMIC_PORT NV_U8_MAX + +/* + * NV402C_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV402C_CTRL_CMD_NULL (0x402c0000) /* finn: Evaluated from "(FINN_NV40_I2C_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + +/* + * NV402C_CTRL_I2C_GET_PORT_INFO_IMPLEMENTED + * The port exists on this hardware. + * NV402C_CTRL_I2C_GET_PORT_INFO_DCB_DECLARED + * The port has an entry in the DCB. + * NV402C_CTRL_I2C_GET_PORT_INFO_DDC_CHANNEL + * The port is used to read EDIDs via DDC. + * NV402C_CTRL_I2C_GET_PORT_INFO_CRTC_MAPPED + * The port is accessible via the CRTC register space. + * NV402C_CTRL_I2C_GET_PORT_INFO_VALID + * The port is validated using I2C device. + */ +#define NV402C_CTRL_I2C_GET_PORT_INFO_IMPLEMENTED 0:0 +#define NV402C_CTRL_I2C_GET_PORT_INFO_IMPLEMENTED_NO 0x00 +#define NV402C_CTRL_I2C_GET_PORT_INFO_IMPLEMENTED_YES 0x01 +#define NV402C_CTRL_I2C_GET_PORT_INFO_DCB_DECLARED 1:1 +#define NV402C_CTRL_I2C_GET_PORT_INFO_DCB_DECLARED_NO 0x00 +#define NV402C_CTRL_I2C_GET_PORT_INFO_DCB_DECLARED_YES 0x01 +#define NV402C_CTRL_I2C_GET_PORT_INFO_DDC_CHANNEL 2:2 +#define NV402C_CTRL_I2C_GET_PORT_INFO_DDC_CHANNEL_ABSENT 0x00 +#define NV402C_CTRL_I2C_GET_PORT_INFO_DDC_CHANNEL_PRESENT 0x01 +#define NV402C_CTRL_I2C_GET_PORT_INFO_CRTC_MAPPED 3:3 +#define NV402C_CTRL_I2C_GET_PORT_INFO_CRTC_MAPPED_NO 0x00 +#define NV402C_CTRL_I2C_GET_PORT_INFO_CRTC_MAPPED_YES 0x01 +#define NV402C_CTRL_I2C_GET_PORT_INFO_VALID 4:4 +#define NV402C_CTRL_I2C_GET_PORT_INFO_VALID_NO 0x00 +#define NV402C_CTRL_I2C_GET_PORT_INFO_VALID_YES 0x01 +#define NV402C_CTRL_I2C_GET_PORT_INFO_ALL 4:0 +#define NV402C_CTRL_I2C_GET_PORT_INFO_ALL_DEFAULT 0x00 + +/* + * NV402C_CTRL_CMD_I2C_GET_PORT_INFO + * + * Returns information for the first eight I2C ports. + * + * info + * This parameter is an output from the command and is ignored as an + * input. Each element contains the flags described previously named + * NV402C_CTRL_I2C_GET_PORT_INFO*. Note that the index into the info + * array is one less than the port identifier that would be returned from + * NV0073_CTRL_CMD_SPECIFIC_GET_I2C_PORTID; the port numbers here are + * 0-indexed as opposed to 1-indexed. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + + +#define NV402C_CTRL_CMD_I2C_GET_PORT_INFO (0x402c0101) /* finn: Evaluated from "(FINN_NV40_I2C_I2C_INTERFACE_ID << 8) | NV402C_CTRL_I2C_GET_PORT_INFO_PARAMS_MESSAGE_ID" */ + + + +#define NV402C_CTRL_I2C_GET_PORT_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV402C_CTRL_I2C_GET_PORT_INFO_PARAMS { + NvU8 info[NV402C_CTRL_NUM_I2C_PORTS]; +} NV402C_CTRL_I2C_GET_PORT_INFO_PARAMS; +#define NV402C_CTRL_I2C_INDEX_LENGTH_MAX 4 +#define NV402C_CTRL_I2C_MESSAGE_LENGTH_MAX 4096 + +//! Minimum and maximum valid read/write message length for block process protocol. +#define NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MIN 3 +#define NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX 32 + +/* + * NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE + * A client uses this field to indicate the I2C addressing mode to be + * used. + * Possible values are: + * NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE_7BIT + * The default, this value specifies the master to operate in the + * basic 7-bit addressing mode, which is available on all + * implementations. + * NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE_10BIT + * This I2C mode allows for 10 bits of addressing space and is + * reverse compatible with 7-bit addressing. + */ +#define NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE 0:0 +#define NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE_7BIT (0x00000000) +#define NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE_10BIT (0x00000001) +#define NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE_DEFAULT NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE_7BIT +/* + * NV402C_CTRL_I2C_FLAGS_SPEED_MODE + * A client uses this field to indicate the target speed at which the + * I2C master should attempt to drive the bus. The master may throttle + * its own speed for various reasons, and devices may slow the bus + * using clock-streching. Neither of these possibilities are + * considered failures. + */ +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE 4:1 +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_100KHZ (0x00000000) +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_200KHZ (0x00000001) +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_400KHZ (0x00000002) +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_33KHZ (0x00000003) +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_10KHZ (0x00000004) +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_3KHZ (0x00000005) +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_DEFAULT (0x00000006) +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_300KHZ (0x00000007) + + +/* + * NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE + * A client uses this field to specify a transaction mode. + * Possible values are: + * NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE_NORMAL + * The default, this value indicates to use the normal I2C transaction + * mode which will involve read/write operations depending on client's + * needs. + * NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE_PING + * This value specifies that the device only needs to be pinged. No need + * of performing a complete read/write transaction. This will send a + * single byte to the device to be pinged. On receiving an ACK, we will + * get a confirmation on the device's availability. + */ +#define NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE 11:10 +#define NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE_NORMAL (0x00000000) +#define NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE_PING (0x00000001) +#define NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE_DEFAULT NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE_NORMAL +/*! + * NV402C_CTRL_I2C_FLAGS_RESERVED + * A client must leave this field as 0, as it is reserved for future use. + */ +#define NV402C_CTRL_I2C_FLAGS_RESERVED 31:12 + +/*! + * The following defines specify WAR flags that can be specified during + * I2C Quick Read or Write command (Refer NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW). + * + * _NONE + * No workaround is needed. + * + * _TEST_PORT + * Use this flag to have the client sent a request to test a port instead + * of performing any transaction on it. Transaction type has to be + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW. + */ +#define NV402C_CTRL_I2C_SMBUS_QUICK_RW_WAR_FLAGS 0:0 +#define NV402C_CTRL_I2C_SMBUS_QUICK_RW_WAR_FLAGS_NONE 0x00000000 +#define NV402C_CTRL_I2C_SMBUS_QUICK_RW_WAR_FLAGS_TEST_PORT 0x00000001 + +/*! + * The following defines specify WAR flags that can be specified during + * I2C Register Read or Write buffer command + * (Refer NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW) + * + * _NONE + * No workaround is needed. + * + * _SI1930 + * SI1930 microcontroller register read or write requested by a client. + * Transaction type has to be NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW. + * + * _PX3540 + * Register read from PX3540 or PX3544 device. Transaction type has to be + * NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW and bWrite must be TRUE to + * indicate READ operation + */ +#define NV402C_CTRL_I2C_BUFFER_RW_WAR_FLAGS 1:0 +#define NV402C_CTRL_I2C_BUFFER_RW_WAR_FLAGS_NONE 0x00000000 +#define NV402C_CTRL_I2C_BUFFER_RW_WAR_FLAGS_SI1930 0x00000001 +#define NV402C_CTRL_I2C_BUFFER_RW_WAR_FLAGS_PX3540 0x00000002 + +/*! + * The following defines specify WAR flags that can be specified during + * I2C buffer Read or Write to Multibyte Register + * (Refer NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW) + * + * _NONE + * No workaround is needed. + * + * _NO_AUTO_INC + * This value specifies that the device does not support auto-increment. + * Most devices allow you to write multiple bytes after specifying a + * register address, and the subsequent bytes will go to incremented + * addresses. Without auto-increment, we write a buffer of data as a + * sequence of address-register-value triplets separated by starts. + */ +#define NV402C_CTRL_I2C_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW_WAR_FLAGS 0:0 +#define NV402C_CTRL_I2C_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW_WAR_FLAGS_NONE 0x00000000 +#define NV402C_CTRL_I2C_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW_WAR_FLAGS_NO_AUTO_INC 0x00000001 + +/* + * NV402C_CTRL_CMD_I2C_SYNC + * + * Perform a basic I2C transaction synchronously. + * + * portId + * This field must be specified by the client to indicate the logical + * port/bus for which the transaction is requested. The port identifier + * is one less than the value returned by + * NV0073_CTRL_CMD_SPECIFIC_GET_I2C_PORTID unless that value was 0 (the + * 'dynamic' port). For the 'dynamic' port, this should be 0xFF. Note + * that future versions of the API may obsolete use of the 'dynamic' port; + * please contact the RM if you begin using this portion of the API so we + * can help you migrate when the time comes. + * + * bIsWrite + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + * + * flags + * This parameter specifies optional flags used to control certain modal + * features such as target speed and addressing mode. The currently + * defined fields are described previously; see NV402C_I2C_FLAGS_*. + * + * address + * The address of the I2C slave. The address should be shifted left by + * one. For example, the I2C address 0x50, often used for reading EDIDs, + * would be stored here as 0xA0. This matches the position within the + * byte sent by the master, as the last bit is reserved to specify the + * read or write direction. + * + * indexLength + * This required parameter specifies how many bytes to write as part of the + * first index. If zero is specified, then no index will be sent. + * + * index + * This parameter, required of the client if index is one or more, + * specifies the index to be written. The buffer should be arranged such + * that index[0] will be the first byte sent. + * + * messageLength + * This parameter, required of the client, specifies the number of bytes to + * read or write from the slave after the index is written. + * + * pMessage + * This parameter, required of the client, specifies the data to be written + * to the slave. The buffer should be arranged such that pMessage[0] will + * be the first byte read or written. If the transaction is a read, then + * it will follow the combined format described in the I2C specification. + * If the transaction is a write, the message will immediately follow the + * index without a restart. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_STATE_IN_USE + * NV_ERR_GENERIC, if the I2C transaction fails. + */ +#define NV402C_CTRL_I2C_INDEXED_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV402C_CTRL_I2C_INDEXED_PARAMS { + NvU8 portId; + NvU8 bIsWrite; + NvU16 address; + NvU32 flags; + + NvU32 indexLength; + NvU8 index[NV402C_CTRL_I2C_INDEX_LENGTH_MAX]; + + NvU32 messageLength; + NV_DECLARE_ALIGNED(NvP64 pMessage, 8); +} NV402C_CTRL_I2C_INDEXED_PARAMS; + +#define NV402C_CTRL_CMD_I2C_INDEXED (0x402c0102) /* finn: Evaluated from "(FINN_NV40_I2C_I2C_INTERFACE_ID << 8) | NV402C_CTRL_I2C_INDEXED_PARAMS_MESSAGE_ID" */ + +/* + * NV402C_CTRL_CMD_I2C_GET_PORT_SPEED + * + * Returns information for the I2C ports. + * + * portSpeed + * This parameter is an output from the command and is ignored as an + * input. Each element contains the current I2C speed of the port. + * Note that the index into the info array is one less than the + * port identifier that would be returned from + * NV0073_CTRL_CMD_SPECIFIC_GET_I2C_PORTID; the port numbers here are + * 0-indexed as opposed to 1-indexed. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV402C_CTRL_I2C_GET_PORT_SPEED_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV402C_CTRL_I2C_GET_PORT_SPEED_PARAMS { + NvU32 portSpeed[NV402C_CTRL_NUM_I2C_PORTS]; +} NV402C_CTRL_I2C_GET_PORT_SPEED_PARAMS; + +#define NV402C_CTRL_CMD_I2C_GET_PORT_SPEED (0x402c0103) /* finn: Evaluated from "(FINN_NV40_I2C_I2C_INTERFACE_ID << 8) | NV402C_CTRL_I2C_GET_PORT_SPEED_PARAMS_MESSAGE_ID" */ + +/* + * NV402C_CTRL_I2C_DEVICE_INFO + * + * This structure describes the basic I2C Device information. + * + * type + * This field return the type of device NV_DCB4X_I2C_DEVICE_TYPE_ + * i2cAddress + * This field contains the 7 bit/10 bit address of the I2C device. + * i2cLogicalPort + * This field contains the Logical port of the I2C device. + */ +typedef struct NV402C_CTRL_I2C_DEVICE_INFO { + NvU8 type; + NvU16 i2cAddress; + NvU8 i2cLogicalPort; + NvU8 i2cDevIdx; +} NV402C_CTRL_I2C_DEVICE_INFO; + +/* Maximum number of I2C devices in DCB */ +#define NV402C_CTRL_I2C_MAX_DEVICES 32 + +/* + * NV402C_CTRL_CMD_I2C_TABLE_GET_DEV_INFO + * + * RM Control to get I2C device info from the DCB I2C Devices Table. + * + * i2cDevCount + * The value of this parameter will give the number of + * I2C devices found in DCB. + * + * i2cDevInfo + * For each device found in DCB the control call will write the info + * in this parameter. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV402C_CTRL_I2C_TABLE_GET_DEV_INFO_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV402C_CTRL_I2C_TABLE_GET_DEV_INFO_PARAMS { + NvU8 i2cDevCount; + NV402C_CTRL_I2C_DEVICE_INFO i2cDevInfo[NV402C_CTRL_I2C_MAX_DEVICES]; +} NV402C_CTRL_I2C_TABLE_GET_DEV_INFO_PARAMS; + +#define NV402C_CTRL_CMD_I2C_TABLE_GET_DEV_INFO (0x402c0104) /* finn: Evaluated from "(FINN_NV40_I2C_I2C_INTERFACE_ID << 8) | NV402C_CTRL_I2C_TABLE_GET_DEV_INFO_PARAMS_MESSAGE_ID" */ + +/*! + * The IDs of each type of I2C command available. + */ +typedef enum NV402C_CTRL_I2C_TRANSACTION_TYPE { + /*! + * This transaction type is used to perform the Quick SMBus Read/write command + * on a slave device. No data is sent or received, just used to verify the + * presence of the device. + * Refer SMBus spec 2.0 (section 5.5.1 Quick Command) + * SMBus Quick Write : S Addr|Wr [A] P + * SMBus Quick Read : S Addr|Rd [A] P + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW = 0, + /*! + * This transaction type is used to perform the I2C byte read/write from/to + * a slave device. As per the spec last byte should be NA (Not Acknolwedged) + * by slave. + * Refer I2CBus spec 3.0 (section 9 Fig 11 and Fig 12) or Refer SMBus spec + * 2.0 (section 5.5.2 Send Byte and 5.5.3 Receive Byte). + * I2C Byte Write : S Addr|Wr [A] Data [NA] P + * I2C Byte Read : S Addr|Rd [A] Data NA P + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BYTE_RW = 1, + /*! + * This transaction type is used to perform the I2C block (buffer) + * read/write from/to a slave device. As per the spec last byte should be NA + * (Not Acknolwedged) by slave. + * Refer I2CBus spec 3.0 (section 9 Fig 11 and Fig 12) + * I2C Byte Write : S Addr|Wr [A] Data1 [A]...Data(N-1) [A] DataN [NA] P + * I2C Byte Read : S Addr|Rd [A] Data1 A...Data(N-1) A DataN NA P + * + * Distinction between I2C_BLOCK and SMBUS_BLOCK protocol: + * In I2C Block write it is the slave device (and in I2C Block read it's + * the master device) that determines the number of bytes to transfer by + * asserting the NAK at last bit before stop. This differs from the SMBus + * block mode write command in which the master determines the block + * write transfer size. In I2c Block read there is no limit to maximum size + * of data that could be transferred whereas in SMBus block it is restricted + * to 255 bytes (0xFF). + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BLOCK_RW = 2, + /*! + * This transaction type is used to perform the I2C Buffer read/write + * from/to a register of a slave device. It does not send bytecount as + * part of data buffer. + * Not a part of SMBus spec. + * I2C Buffer Write : S Addr|Wr [A] cmd [A] Data1 [A]...DataN[A] P + * I2C Buffer Read : S Addr|Wr [A] cmd [A] Sr Addr|Rd [A] Data1 A... + * DataN-1 A DataN A P + * + * Distinction between SMBUS_BLOCK and I2C_BUFFER protocol: + * In SMBUS_BLOCK Read/write the first byte of data buffer contains the + * count size (The number of bytes to be transferred) and it is restricted + * to 255 bytes whereas in I2C_BUFFER, count size is not sent during the + * transfer and there is no restriction in terms of size. + * + * Distinction between I2C_BLOCK and I2C_BUFFER protocol: + * I2C_BUFFER takes the register address as argument whereas I2C_BLOCK does + * not have any register or command provision. + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW = 3, + /*! + * This transaction type is used to perform the I2C byte read/write from/to + * a slave device + * Refer SMBus spec 2.0 (section 5.5.4 Write Byte and 5.5.5 Read Byte) + * SMBus Byte Write : S Addr|Wr [A] cmd [A] Data [A] P + * SMBus Byte Read : S Addr|Wr [A] cmd [A] Sr Addr|Rd [A] Data A P + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BYTE_RW = 4, + /*! + * This transaction type is used to perform the SMBus byte read/write + * from/to a register of a slave device + * Refer SMBus spec 2.0 (section 5.5.4 Write Word and 5.5.5 Read Word) + * SMBus Word Write : S Addr|Wr [A] cmd [A] DataLow [A] DataHigh [A] P + * SMBus Word Read : S Addr|Wr [A] cmd [A] Sr Addr|Rd [A] DataLow A + * DataHigh A P + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_WORD_RW = 5, + /*! + * This transaction type is used to perform the SMBus Block read/write + * from/to a register of a slave device + * Refer SMBus spec 2.0 (section 5.5.7 Block Write/Read) + * SMBus Block Write : S Addr|Wr [A] cmd [A] ByteCount [A] Data1 [A]... + * DataN-1 [A] DataN[A] P + * SMBus Block Read : S Addr|Wr [A] cmd [A] Sr Addr|Rd [A] ByteCount A + * Data1 A...DataN-1 A DataN A P + * + * Distinction between I2C_BLOCK and SMBUS_BLOCK protocol: + * In I2C Block write it is the slave device (and in I2C Block read it's + * the master device) that determines the number of bytes to transfer by + * asserting the NAK at last bit before stop. This differs from the SMBus + * block mode write/Read command in which the master determines the block + * write transfer size. In I2c Block read/Write there is no limit to maximum + * size of data that could be transferred whereas in SMBus block it is + * restricted to 255 bytes (0xFF). + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW = 6, + /*! + * This transaction type is used to perform the SMBus process call. It sends + * data and waits for the slave to return a value dependent on that data. + * The protocol is simply a SMBus write Word followed by a SMBus Read Word + * without the Read-Word command field and the Write-Word STOP bit. + * Note that there is no STOP condition before the repeated START condition, + * and that a NACK signifies the end of the read transfer. + * + * Refer SMBus spec 2.0 (section 5.5.6 Process Call) + * SMBus Process Call : S Addr|Wr [A] cmd [A] DataLow [A] DataHigh [A] + * Sr Addr|Rd [A] DataLow [A] DataHigh [NA] P + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_PROCESS_CALL = 7, + /*! + * This transaction type is used to perform the SMBus Block Write Block Read + * process call. + * The block write-block read process call is a two-part message. The call + * begins with a slave address and a write condition. After the command code + * the host issues a write byte count (M) that describes how many more bytes + * will be written in the first part of the message. + * If a master has 6 bytes to send, the byte count field will have the value + * 6 (0000 0110b), followed by the 6 bytes of data. The write byte count (M) + * cannot be zero. + * The second part of the message is a block of read data beginning with a + * repeated start condition followed by the slave address and a Read bit. + * The next byte is the read byte count (N), which may differ from the write + * byte count (M). The read byte count (N) cannot be zero. The combined data + * payload must not exceed 32 bytes. + * The byte length restrictions of this process call are summarized as + * follows: + * M >= 1 byte + * N >= 1 byte + * M + N <= 32 bytes + * Note that there is no STOP condition before the repeated START condition, + * and that a NACK signifies the end of the read transfer. + * + * Refer SMBus spec 2.0 (section 5.5.8 Block Write Block Read Process Call) + * SMBus Process Call : S Addr|Wr [A] cmd [A] ByteCount=M [A] Data1 [A]... + * DataN-1 [A] DataM[A] Sr Addr|Rd [A] ByteCount=N [A] + * Data1 [A]...DataN [NA] P + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_PROCESS_CALL = 8, + /*! + * This transaction type is used to perform SMBus buffer read/write + * from/to multiple registers of a slave device known as Auto Increment. + * It is not a part of any standard I2C/SMBus spec but a feature of many + * SMBus devices like EEPROM. + * It is also used for reading a block of bytes from a designated register + * that is specified through the two Comm bytes.of a slave device or writing + * a block of bytes from a designated register of a slave device (Note : The + * command byte in this case could be 0, 2 or 4 Bytes) + * SMBus Multi-Byte Register Block Write : S Addr|Wr [A] cmd1 A cmd 2 [A]... + * cmdN [A] data1 [A] Data2 [A].....DataN [A] P + * SMBus Multi-Byte Register Block Read : S Addr|Rd [A] cmd1 A cmd 2 [A]... + * cmdN [A] data1 [A] Sr Addr [A] Data1 A Data2 A...DataN A P + * + * This transaction type could be also used for those devices which supports + * AUTO_INC. Even though it is frequently related to I2C/SMBus, automatic + * incrementation is not part of any I2C standard but rather a common + * feature found in many I2C devices. What it means is that the device + * maintains in internal pointer which is automatically incremented upon + * data read or write activities and which can be manually set to a fixed + * value. This comes in handy when storing larger amounts of data for + * instance in an ordinary I2C RAM or EEPROM. + * SMBus AUTO_INC Write : S Addr|Wr [A] cmd1 A Data1 [A] Data2 [A]... + * DataN [A] P + * SMBus AUTO_INC Read : S Addr|Rd [A] cmd1 A data1 [A] Sr Addr [A] Data1 A + * Data2 A...DataN A P + * If the device does not support AUTO_INC set warFlags of + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW to + * NV402C_CTRL_I2C_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW_WAR_FLAGS_NO_AUTO_INC. + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW = 9, + /*! + * This transaction type is used to perform the EDID read via DDC. + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_READ_EDID_DDC = 10, +} NV402C_CTRL_I2C_TRANSACTION_TYPE; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW. + * + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * Transaction specific flags + * (see NV402C_CTRL_I2C_SMBUS_QUICK_RW_WAR_FLAGS_*). + */ + NvU32 warFlags; +} NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BYTE_RW. + * + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * The main message data. + */ + NvU8 message; +} NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the transaction type is NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BLOCK_RW. + * + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * This parameter specifies the number of bytes to read or + * write from the slave after the register address is written. + */ + NvU32 messageLength; + /*! + * The main message data. + */ + NV_DECLARE_ALIGNED(NvP64 pMessage, 8); +} NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BYTE_RW. + * + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * The address of the register. + */ + NvU8 registerAddress; + /*! + * The main message data. + */ + NvU8 message; +} NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_WORD_RW. + * + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * The address of the register. + */ + NvU8 registerAddress; + /*! + * The main message data. + */ + NvU16 message; +} NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BUFFER_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BUFFER_RW. + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * Transaction specific flags to be set (see + * NV_NV402C_CTRL_I2C_BUFFER_RW_WAR_FLAGS_*) + */ + NvU32 warFlags; + /*! + * This parameter specifies how many bytes to write as part of the + * register address. If zero is specified, then no index will be sent. + */ + NvU8 registerAddress; + /*! + * This parameter specifies the number of bytes to read or + * write from the slave after the register address is written. + */ + NvU32 messageLength; + /*! + * The main message data. + * C form: NvU8 message[NV402C_CTRL_I2C_MESSAGE_LENGTH_MAX] + */ + NV_DECLARE_ALIGNED(NvP64 pMessage, 8); +} NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW. + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * This parameter specifies how many bytes to write as part of the + * register address. If zero is specified, then no index will be sent. + */ + NvU8 registerAddress; + /*! + * This parameter specifies the number of bytes to read or + * write from the slave after the register address is written. + */ + NvU32 messageLength; + /*! + * The main message data. + * C form: NvU8 message[NV402C_CTRL_I2C_MESSAGE_LENGTH_MAX] + */ + NV_DECLARE_ALIGNED(NvP64 pMessage, 8); +} NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_PROCESS_CALL. + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL { + /*! + * This parameter specifies how many bytes to write as part of the + * register address. If zero is specified, then no index will be sent. + */ + NvU8 registerAddress; + /*! + * The message data to be written to the slave. + */ + NvU16 writeMessage; + /*! + * The message data to be read from the slave. + */ + NvU16 readMessage; +} NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_PROCESS_CALL. + * + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL { + /*! + * This parameter specifies how many bytes to write as part of the + * register address. If zero is specified, then no index will be sent. + */ + NvU8 registerAddress; + /*! + * This parameter specifies the number of bytes to write the the slave + * after the writeByteCount is sent to the slave. + */ + NvU32 writeMessageLength; + /*! + * The message buffer to be written to the slave. + * C form: NvU8 writeMessage[NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX] + */ + NvU8 writeMessage[NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX]; + /*! + * This parameter specifies the number of bytes to read from the slave + * after the readByteCount is sent to the slave. + */ + NvU32 readMessageLength; + /*! + * The message buffer to be read from the slave. + * C form: NvU8 readMessage[NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX] + */ + NvU8 readMessage[NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX]; +} NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW. + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * Transaction specific flags (see + * NV402C_CTRL_I2C_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW_WAR_FLAGS_*) + */ + NvU32 warFlags; + /*! + * This parameter specifies how many bytes to write as part of the + * register address. If zero is specified, then no index will be sent. + */ + NvU32 indexLength; + /*! + * Optional indexing data; aka register address. + * C form: NvU8 index[NV402C_CTRL_I2C_INDEX_LENGTH_MAX] + */ + NvU8 index[NV402C_CTRL_I2C_INDEX_LENGTH_MAX]; + /*! + * This parameter specifies the number of bytes to read or + * write from the slave after the register address is written. + */ + NvU32 messageLength; + /*! + * The main message data. + * C form: NvU8 message[NV402C_CTRL_I2C_MESSAGE_LENGTH_MAX] + */ + NV_DECLARE_ALIGNED(NvP64 pMessage, 8); +} NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is NV402C_CTRL_I2C_TRANSACTION_TYPE_READ_EDID_DDC. + * + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC { + /*! + * The segment number of the EDID block which is to be read. + */ + NvU8 segmentNumber; + /*! + * The address of the register. + */ + NvU8 registerAddress; + /*! + * This parameter specifies the number of bytes to read or + * write from the slave after the register address is written. + */ + NvU32 messageLength; + /*! + * The main message data. + * C form: NvU8 message[NV402C_CTRL_I2C_MESSAGE_LENGTH_MAX] + */ + NV_DECLARE_ALIGNED(NvP64 pMessage, 8); +} NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA + * + * This union encapsulates the transaction data corresponding to the + * transaction type enlisted above. + */ +typedef union NV402C_CTRL_I2C_TRANSACTION_DATA { + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW. + */ + NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW smbusQuickData; + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BYTE_RW. + */ + NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW i2cByteData; + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BLOCK_RW. + */ + NV_DECLARE_ALIGNED(NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW i2cBlockData, 8); + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW. + */ + NV_DECLARE_ALIGNED(NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW i2cBufferData, 8); + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BYTE_RW. + */ + NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW smbusByteData; + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_WORD_RW. + */ + NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW smbusWordData; + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW. + */ + NV_DECLARE_ALIGNED(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW smbusBlockData, 8); + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_PROCESS_CALL. + */ + NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL smbusProcessData; + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_PROCESS_CALL. + */ + NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL smbusBlockProcessData; + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW. + */ + NV_DECLARE_ALIGNED(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW smbusMultibyteRegisterData, 8); + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_READ_EDID_DDC. + */ + NV_DECLARE_ALIGNED(NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC edidData, 8); +} NV402C_CTRL_I2C_TRANSACTION_DATA; + + +/*! + * NV402C_CTRL_I2C_TRANSACTION_PARAMS + * + * The params data structure for NV402C_CTRL_CMD_I2C_TRANSACTION. + */ +#define NV402C_CTRL_I2C_TRANSACTION_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV402C_CTRL_I2C_TRANSACTION_PARAMS { + /*! + * The logical port ID. + */ + NvU8 portId; + /*! + * This parameter specifies optional flags used to control certain modal + * features such as target speed and addressing mode. The currently + * defined fields are described previously; see NV402C_CTRL_I2C_FLAGS_* + */ + NvU32 flags; + /*! + * The address of the I2C slave. + */ + NvU16 deviceAddress; + /*! + * The transaction type. + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE transType; + /*! + * The transaction data corresponding transaction type. + */ + NV_DECLARE_ALIGNED(NV402C_CTRL_I2C_TRANSACTION_DATA transData, 8); +} NV402C_CTRL_I2C_TRANSACTION_PARAMS; + +#define NV402C_CTRL_CMD_I2C_TRANSACTION (0x402c0105) /* finn: Evaluated from "(FINN_NV40_I2C_I2C_INTERFACE_ID << 8) | NV402C_CTRL_I2C_TRANSACTION_PARAMS_MESSAGE_ID" */ + + +/* _ctrl402c_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070base.h b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070base.h new file mode 100644 index 0000000..fe28542 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070base.h @@ -0,0 +1,66 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070base.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NV5070_DISPLAY control commands and parameters */ + +#define NV5070_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x5070, NV5070_CTRL_##cat, idx) + +/* Display command categories (6bits) */ +#define NV5070_CTRL_RESERVED (0x00) +#define NV5070_CTRL_CHNCTL (0x01) +#define NV5070_CTRL_RG (0x02) +#define NV5070_CTRL_OR (0x04) +#define NV5070_CTRL_INST (0x05) +#define NV5070_CTRL_VERIF (0x06) +#define NV5070_CTRL_SYSTEM (0x07) +#define NV5070_CTRL_EVENT (0x09) + +/* + * NV5070_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV5070_CTRL_CMD_NULL (0x50700000) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + +// This struct must be the first member of all +// 5070 control calls +typedef struct NV5070_CTRL_CMD_BASE_PARAMS { + NvU32 subdeviceIndex; +} NV5070_CTRL_CMD_BASE_PARAMS; + +/* _ctrl5070base_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h new file mode 100644 index 0000000..ef012c7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h @@ -0,0 +1,1179 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070chnc.finn +// + +#include "ctrl/ctrl5070/ctrl5070base.h" +#include "ctrl5070common.h" +#include "nvdisptypes.h" + +#define NV5070_CTRL_CMD_NUM_DISPLAY_ID_DWORDS_PER_HEAD 2 + + + +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_NONE (0x00000000) +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_IGNORE_PI (NVBIT(0)) +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_SKIP_NOTIF (NVBIT(1)) +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_SKIP_SEMA (NVBIT(2)) +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_IGNORE_INTERLOCK (NVBIT(3)) +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_IGNORE_FLIPLOCK (NVBIT(4)) +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_TRASH_ONLY (NVBIT(5)) +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_TRASH_AND_ABORT (NVBIT(6)) + +#define NV5070_CTRL_IDLE_CHANNEL_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV5070_CTRL_IDLE_CHANNEL_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 channelClass; + NvU32 channelInstance; + + NvU32 desiredChannelStateMask; + NvU32 accelerators; // For future expansion. Not yet implemented + NvU32 timeout; // For future expansion. Not yet implemented + NvBool restoreDebugMode; +} NV5070_CTRL_IDLE_CHANNEL_PARAMS; + +/* + * NV5070_CTRL_CMD_STOP_OVERLAY + * + * This command tries to turn the overlay off ASAP. + * + * channelInstance + * This field indicates which of the two instances of the overlay + * channel the cmd is meant for. + * + * notifyMode + * This field indicates the action RM should take once the overlay has + * been successfully stopped. The options are (1) Set a notifier + * (2) Set the notifier and generate and OS event + * + * hNotifierCtxDma + * Handle to the ctx dma for the notifier that must be written once + * overlay is stopped. The standard NvNotification notifier structure + * is used. + * + * offset + * Offset within the notifier context dma where the notifier begins + * Offset must be 16 byte aligned. + * + * hEvent + * Handle to the event that RM must use to awaken the client when + * notifyMode is WRITE_AWAKEN. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT: Invalid notify mode + * NV_ERR_INVALID_CHANNEL: When the overlay is unallocated + * NV_ERR_INVALID_OWNER: Callee isn't the owner of the channel + * NV_ERR_INVALID_OBJECT_HANDLE: Notif ctx dma not found + * NV_ERR_INVALID_OFFSET: Bad offset within notif ctx dma + * NV_ERR_INSUFFICIENT_RESOURCES + * NV_ERR_TIMEOUT: RM timedout waiting to inject methods + */ +#define NV5070_CTRL_CMD_STOP_OVERLAY (0x50700102) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_CMD_STOP_OVERLAY_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_STOP_OVERLAY_NOTIFY_MODE_WRITE (0x00000000) +#define NV5070_CTRL_CMD_STOP_OVERLAY_NOTIFY_MODE_WRITE_AWAKEN (0x00000001) + +#define NV5070_CTRL_CMD_STOP_OVERLAY_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV5070_CTRL_CMD_STOP_OVERLAY_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 channelInstance; + NvU32 notifyMode; + NvHandle hNotifierCtxDma; + NvU32 offset; + NV_DECLARE_ALIGNED(NvP64 hEvent, 8); +} NV5070_CTRL_CMD_STOP_OVERLAY_PARAMS; + + + +/* + * NV5070_CTRL_CMD_IS_MODE_POSSIBLE + * + * This command is used by DD to determine whether or not a given mode + * is possible given the current nvclk, mclk, dispclk and potentially some + * other parameters that are normally hidden from it. All the parameters + * except IsPossible (output), Force422(output), MinPstate (input/output), + * minPerfLevel (output), CriticalWatermark (output), worstCaseMargin (output), + * and worstCaseDomain (output) params are supplied by the caller. + * + * HeadActive + * Whether or not the params for this head are relevant. + * + * PixelClock + * Frequency: Pixel clk frequency in KHz. + * Adj1000Div1001: 1000/1001 multiplier for pixel clock. + * + * RasterSize + * Width: Total width of the raster. Also referred to as HTotal. + * Height: Total height of the raster. Also referred to as VTotal. + * + * RasterBlankStart + * X: Start of horizontal blanking for the raster. + * Y: Start of vertical blanking for the raster. + * + * RasterBlankEnd + * X: End of horizontal blanking for the raster. + * Y: End of vertical blanking for the raster. + * + * RasterVertBlank2 + * YStart: Start of second blanking for second field for an + * interlaced raster. This field is irrelevant when raster is + * progressive. + * YEnd: End of second blanking for second field for an + * interlaced raster. This field is irrelevant when raster is + * progressive. + * + * Control + * RasterStructure: Whether the raster ir progressive or interlaced. + * + * OutputScaler + * VerticalTaps: Vertical scaler taps. + * HorizontalTaps: Horizontal scaler taps. + * Force422: Whether OutputScaler is operating in 422 mode or not. + * + * ViewportSizeOut + * Width: Width of output viewport. + * Height: Height of output viewport. + * Both the above fields are irrelevant for G80. + * + * ViewportSizeOutMin + * Width: Minimum possible/expected width of output viewport. + * Height: Minimum possible/expected height of output viewport. + * + * ViewportSizeIn + * Width: Width of input viewport. + * Height: Height of input viewport. + * + * Params + * Format: Core channel's pixel format. See the enumerants following + * the variable declaration for possible options. + * SuperSample: Whether to use X1AA or X4AA in core channel. + * This parameter is ignored for G80. + * + * BaseUsageBounds + * Usable: Whether or not the base channel is expected to be used. + * PixelDepth: Maximum pixel depth allowed in base channel. + * SuperSample: Whether or not X4AA is allowed in base channel. + * BaseLutUsage: Base LUT Size + * OutputLutUsage: Output LUT size + * + * OverlayUsageBounds + * Usable: Whether or not the overlay channel is expected to be used. + * PixelDepth: Maximum pixel depth allowed in overlay channel. + * OverlayLutUsage: Overlay LUT Size + * + * BaseLutLo + * Enable: Specifies Core Channel's Base LUT is enable or not. + * Mode: Specifies the LUT Mode. + * NeverYieldToBase: Specifies whether NEVER_YIELD_TO_BASE is enabled or not. + * + * OutputLutLo + * Enable: Specifies Core Channel's Output LUT is enable or not. + * Mode: Specifies the LUT Mode. + * NeverYieldToBase: Specifies whether NEVER_YIELD_TO_BASE is enabled or not. + * + * outputResourcePixelDepthBPP + * Specifies the output pixel depth with scaler mode. + * + * CriticalWatermark + * If MinPState is set to _NEED_MIN_PSTATE, this will return the critical + * watermark level at the minimum Pstate. Otherwise, this will return + * the critical watermark at the level that the IMP calculations are + * otherwise performed at. + * + * pixelReplicateMode + * Specifies the replication mode whether it is X2 or X4. Need to set the parameter + * to OFF if there is no pixel replication. + * + * numSSTLinks + * Number of Single Stream Transport links which will be used by the + * SOR. "0" means to use the number indicated by the most recent + * NV0073_CTRL_CMD_DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST call. + * + * RequestedOperation + * This parameter is used to determine whether + * 1. DD is simplying querying whether or not the specified mode is + * possible (REQUESTED_OPER = _QUERY) or + * 2. DD is about to set the specified mode and RM should make + * appropriate preparations to make the mode possible. DD should + * never pass in a mode that was never indicated by RM as possible + * when DD queried for the possibility of the mode. This + * corresponds to REQUESTED_OPER = _PRE_MODESET. + * 3. DD just finished setting the specified mode. RM can go ahead + * and make changes like lowering the perf level if desired. This + * corresponds to REQUESTED_OPER = _POST_MODESET. This parameter is + * useful when we are at a higher perf level in a mode that's not + * possible at a lower perf level and want to go to a mode that is + * possible even at a lower perf level. In such cases, lowering + * perf level before modeset is complete is dangerous as it will + * cause underflow. RM will wait until the end of modeset to lower + * the perf level. + * + * options + * Specifies a bitmask for options. + * NV5070_CTRL_IS_MODE_POSSIBLE_OPTIONS_GET_MARGIN + * Tells IMP to calculate worstCaseMargin and worstCaseDomain. + * + * IsPossible + * This is the first OUT param for this call. It indicates whether + * or not the current mode is possible. + * + * MinPState + * MinPState is an IO (in/out) variable; it gives the minimum p-state + * value at which the mode is possible on a PStates 2.0 system if the + * parameter is initialized by the caller with _NEED_MIN_PSTATE. If + * _NEED_MIN_PSTATE is not specified, IMP query will just run at the + * max available perf level and return results for that pstate. + * + * If the minimum pstate is required, then MasterLockMode, + * MasterLockPin, SlaveLockMode, and SlaveLockPin must all be + * initialized. + * + * On a PStates 3.0 system, the return value for MinPState is + * undefined, but minPerfLevel can return the minimum IMP v-pstate. + * + * minPerfLevel + * On a PStates 3.0 system, minPerfLevel returns the minimum IMP + * v-pstate at which the mode is possible. On a PStates 2.0 system, + * minPerfLevel returns the minimum perf level at which the mode is + * possible. + * + * minPerfLevel is valid only if MinPState is initialized to + * _NEED_MIN_PSTATE. + * + * worstCaseMargin + * Returns the ratio of available bandwidth to required bandwidth, + * multiplied by NV5070_CTRL_IMP_MARGIN_MULTIPLIER. Available + * bandwidth is calculated in the worst case bandwidth domain, i.e., + * the domain with the least available margin. Bandwidth domains + * include the IMP-relevant clock domains, and possibly other virtual + * bandwidth domains such as AWP. + * + * Note that IMP checks additional parameters besides the bandwidth + * margins, but only the bandwidth margin is reported here, so it is + * possible for a mode to have a more restrictive domain that is not + * reflected in the reported margin result. + * + * This result is not guaranteed to be valid if the mode is not + * possible. + * + * Note also that the result is generally calculated for the highest + * pstate possible (usually P0). But if _NEED_MIN_PSTATE is specified + * with the MinPState parameter, the result will be calculated for the + * min possible pstate (or the highest possible pstate, if the mode is + * not possible). + * + * The result is valid only if + * NV5070_CTRL_IS_MODE_POSSIBLE_OPTIONS_GET_MARGIN is set in + * "options". + * + * worstCaseDomain + * Returns a short text string naming the domain for the margin + * returned in "worstCaseMargin". See "worstCaseMargin" for more + * information. + * + * bUseCachedPerfState + * Indicates that RM should use cached values for the fastest + * available perf level (v-pstate for PStates 3.0 or pstate for + * PStates 2.0) and dispclk. This feature allows the query call to + * execute faster, and is intended to be used, for example, during + * mode enumeration, when many IMP query calls are made in close + * succession, and perf conditions are not expected to change between + * query calls. When IMP has not been queried recently, it is + * recommended to NOT use cached values, in case perf conditions have + * changed and the cached values no longer reflect the current + * conditions. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * + * Assumptions/Limitations: + * - If the caller sends any methods to alter the State Cache, before calling of + * the following functions: + * NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_QUERY_USE_SC + * NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_PRE_MODESET_USE_SC + * NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_POST_MODESET_USE_SC + * the caller must repeatedly issue NV5070_CTRL_CMD_GET_CHANNEL_INFO, and delay until the + * returned channelState is either: + * NV5070_CTRL_CMD_GET_CHANNEL_INFO_STATE_IDLE, + * NV5070_CTRL_CMD_GET_CHANNEL_INFO_STATE_WRTIDLE, or + * NV5070_CTRL_CMD_GET_CHANNEL_INFO_STATE_EMPTY. + * This ensures that all commands have reached the State Cache before RM reads + * them. + * + * + */ +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE (0x50700109) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_QUERY (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_PRE_MODESET (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_POST_MODESET (0x00000002) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_QUERY_USE_SC (0x00000003) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_PRE_MODESET_USE_SC (0x00000004) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_POST_MODESET_USE_SC (0x00000005) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_SUPERVISOR (0x00000007) + +#define NV5070_CTRL_IS_MODE_POSSIBLE_OPTIONS_GET_MARGIN (0x00000001) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_IS_POSSIBLE_NO (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_IS_POSSIBLE_YES (0x00000001) + +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_UNDEFINED (0x00000000) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P0 (0x00000001) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P1 (0x00000002) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P2 (0x00000004) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P3 (0x00000008) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P8 (0x00000100) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P10 (0x00000400) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P12 (0x00001000) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P15 (0x00008000) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_MAX NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P15 +#define NV5070_CTRL_IS_MODE_POSSIBLE_NEED_MIN_PSTATE (0x10101010) +#define NV5070_CTRL_IS_MODE_POSSIBLE_NEED_MIN_PSTATE_DEFAULT (0x00000000) + +#define NV5070_CTRL_IMP_MARGIN_MULTIPLIER (0x00000400) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_HEAD_ACTIVE_NO (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_HEAD_ACTIVE_YES (0x00000001) + +#define NV5070_CTRL_IS_MODE_POSSIBLE_DISPLAY_ID_SKIP_IMP_OUTPUT_CHECK (0xAAAAAAAA) + +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_DEFAULT (0x00000000) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000001) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000002) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000003) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000004) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000005) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000006) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000007) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000008) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000009) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PIXEL_CLOCK_ADJ1000DIV1001_NO (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PIXEL_CLOCK_ADJ1000DIV1001_YES (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_CONTROL_STRUCTURE_INTERLACED (0x00000001) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_VERTICAL_TAPS_1 (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_VERTICAL_TAPS_2 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_VERTICAL_TAPS_3 (0x00000002) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_VERTICAL_TAPS_3_ADAPTIVE (0x00000003) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_VERTICAL_TAPS_5 (0x00000004) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_HORIZONTAL_TAPS_1 (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_HORIZONTAL_TAPS_2 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_HORIZONTAL_TAPS_8 (0x00000002) + +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_SCALER_FORCE422_MODE_DISABLE (0x00000000) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_SCALER_FORCE422_MODE_ENABLE (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_I8 (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_VOID16 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_VOID32 (0x00000002) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x00000003) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A8R8G8B8 (0x00000004) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A2B10G10R10 (0x00000005) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A8B8G8R8 (0x00000006) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_R5G6B5 (0x00000007) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A1R5G5B5 (0x00000008) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_SUPER_SAMPLE_X1AA (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_SUPER_SAMPLE_X4AA (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_USABLE_USE_CURRENT (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_USABLE_NO (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_USABLE_YES (0x00000002) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_USE_CURRENT (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_8 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_16 (0x00000002) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_32 (0x00000003) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_64 (0x00000004) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_SUPER_SAMPLE_USE_CURRENT (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_SUPER_SAMPLE_X1AA (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_SUPER_SAMPLE_X4AA (0x00000002) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_BASE_LUT_USAGE_NONE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_BASE_LUT_USAGE_257 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_BASE_LUT_USAGE_1025 (0x00000002) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_USABLE_USE_CURRENT (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_USABLE_NO (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_USABLE_YES (0x00000002) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_USE_CURRENT (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_16 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_32 (0x00000002) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_64 (0x00000003) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_NONE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_257 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_1025 (0x00000002) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_ENABLE_ENABLE (0x00000001) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000002) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000003) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000004) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000005) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000006) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000007) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000001) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000002) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000003) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000004) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000005) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000006) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000007) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PIXEL_REPLICATE_MODE_X4 (0x00000002) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + struct { + NvU32 HeadActive; + struct { + NvU32 Frequency; + + NvU32 Adj1000Div1001; + } PixelClock; + + struct { + NvU32 Width; + NvU32 Height; + } RasterSize; + + struct { + NvU32 X; + NvU32 Y; + } RasterBlankStart; + + struct { + NvU32 X; + NvU32 Y; + } RasterBlankEnd; + + struct { + NvU32 YStart; + NvU32 YEnd; + } RasterVertBlank2; + + struct { + NvU32 Structure; +/* + * Note: For query calls, the lock modes and lock pins are used only if the min + * pstate is required (i.e., if MinPState is set to + * NV5070_CTRL_IS_MODE_POSSIBLE_NEED_MIN_PSTATE). + */ + NV_DISP_LOCK_MODE MasterLockMode; + NV_DISP_LOCK_PIN MasterLockPin; + NV_DISP_LOCK_MODE SlaveLockMode; + NV_DISP_LOCK_PIN SlaveLockPin; + } Control; + + struct { + NvU32 VerticalTaps; + NvU32 HorizontalTaps; + NvBool Force422; + } OutputScaler; + + struct { + NvU32 Width; + NvU32 Height; + } ViewportSizeOut; + + struct { + NvU32 Width; + NvU32 Height; + } ViewportSizeOutMin; + + struct { + NvU32 Width; + NvU32 Height; + } ViewportSizeOutMax; + + struct { + NvU32 Width; + NvU32 Height; + } ViewportSizeIn; + + struct { + NvU32 Format; + NvU32 SuperSample; + } Params; + + struct { + NvU32 Usable; + NvU32 PixelDepth; + NvU32 SuperSample; + NvU32 BaseLutUsage; + NvU32 OutputLutUsage; + } BaseUsageBounds; + + struct { + NvU32 Usable; + NvU32 PixelDepth; + NvU32 OverlayLutUsage; + } OverlayUsageBounds; + + struct { + NvBool Enable; + NvU32 Mode; + NvBool NeverYieldToBase; + } BaseLutLo; + + struct { + NvBool Enable; + NvU32 Mode; + NvBool NeverYieldToBase; + } OutputLutLo; + + NvU32 displayId[NV5070_CTRL_CMD_NUM_DISPLAY_ID_DWORDS_PER_HEAD]; + NvU32 outputResourcePixelDepthBPP; + + NvU32 CriticalWatermark; // in pixels + + } Head[NV5070_CTRL_CMD_MAX_HEADS]; + + struct { + NvU32 owner; + NvU32 protocol; + } Dac[NV5070_CTRL_CMD_MAX_DACS]; + + struct { +// +// owner field is deprecated. In the future, all client calls should set +// ownerMask and bUseSorOwnerMask. bUseSorOwnerMask must be set in order +// to use ownerMask. +// + NvU32 owner; + NvU32 ownerMask; // Head mask owned this sor + + NvU32 protocol; + NvU32 pixelReplicateMode; + + NvU8 numSSTLinks; + } Sor[NV5070_CTRL_CMD_MAX_SORS]; + + NvBool bUseSorOwnerMask; + + struct { + NvU32 owner; + NvU32 protocol; + } Pior[NV5070_CTRL_CMD_MAX_PIORS]; + + + NvU32 RequestedOperation; +// This argument is for VERIF and INTERNAL use only + NvU32 options; + NvU32 IsPossible; + NvU32 MinPState; + + NvU32 minPerfLevel; +// +// Below are the possible Output values for MinPState variable. +// Lower the p-state value higher the power consumption; if no p-states are defined on chip +// then it will return as zero. +// + +// +// Below are the possible input values for MinPstate Variable, by default it calculate +// mode is possible or not at max available p-state and return the same state in that variable. +// + NvU32 worstCaseMargin; + +// +// The calculated margin is multiplied by a constant, so that it can be +// represented as an integer with reasonable precision. "0x400" was chosen +// because it is a power of two, which might allow some compilers/CPUs to +// simplify the calculation by doing a shift instead of a multiply/divide. +// (And 0x400 is 1024, which is close to 1000, so that may simplify visual +// interpretation of the raw margin value.) +// + char worstCaseDomain[8]; + + NvBool bUseCachedPerfState; +} NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS; + +/* + * NV5070_CTRL_CMD_GET_CHANNEL_INFO + * + * This command returns the current channel state. + * + * channelClass + * This field indicates the hw class number (507A-507E) + * + * channelInstance + * This field indicates which of the two instances of the channel + * (in case there are two. ex: base, overlay etc) the cmd is meant for. + * Note that core channel has only one instance and the field should + * be set to 0 for core channel. + * + * channelState + * This field indicates the desired channel state in a mask form that + * is compatible with NV5070_CTRL_CMD_IDLE_CHANNEL. A mask format + * allows clients to check for one from a group of states. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + * + * Display driver uses this call to ensure that all it's methods have + * propagated through hardware's internal fifo + * (NV5070_CTRL_GET_CHANNEL_INFO_STATE_NO_METHOD_PENDING) before it calls + * RM to check whether or not the mode it set up in Assembly State Cache will + * be possible. Note that display driver can not use completion notifier in + * this case because completion notifier is associated with Update and Update + * will propagate the state from Assembly to Armed and when checking the + * possibility of a mode, display driver wouldn't want Armed state to be + * affected. + */ +#define NV5070_CTRL_CMD_GET_CHANNEL_INFO (0x5070010b) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_CHANNEL_INFO_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_IDLE NV5070_CTRL_CMD_CHANNEL_STATE_IDLE +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_WRTIDLE NV5070_CTRL_CMD_CHANNEL_STATE_WRTIDLE +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_EMPTY NV5070_CTRL_CMD_CHANNEL_STATE_EMPTY +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_FLUSHED NV5070_CTRL_CMD_CHANNEL_STATE_FLUSHED +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_BUSY NV5070_CTRL_CMD_CHANNEL_STATE_BUSY +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_DEALLOC NV5070_CTRL_CMD_CHANNEL_STATE_DEALLOC +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_DEALLOC_LIMBO NV5070_CTRL_CMD_CHANNEL_STATE_DEALLOC_LIMBO +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_LIMBO1 NV5070_CTRL_CMD_CHANNEL_STATE_LIMBO1 +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_LIMBO2 NV5070_CTRL_CMD_CHANNEL_STATE_LIMBO2 +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_FCODEINIT NV5070_CTRL_CMD_CHANNEL_STATE_FCODEINIT +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_FCODE NV5070_CTRL_CMD_CHANNEL_STATE_FCODE +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_VBIOSINIT NV5070_CTRL_CMD_CHANNEL_STATE_VBIOSINIT +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_VBIOSOPER NV5070_CTRL_CMD_CHANNEL_STATE_VBIOSOPER +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_UNCONNECTED NV5070_CTRL_CMD_CHANNEL_STATE_UNCONNECTED +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_INITIALIZE NV5070_CTRL_CMD_CHANNEL_STATE_INITIALIZE +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_SHUTDOWN1 NV5070_CTRL_CMD_CHANNEL_STATE_SHUTDOWN1 +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_SHUTDOWN2 NV5070_CTRL_CMD_CHANNEL_STATE_SHUTDOWN2 +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_NO_METHOD_PENDING (NV5070_CTRL_GET_CHANNEL_INFO_STATE_EMPTY | NV5070_CTRL_GET_CHANNEL_INFO_STATE_WRTIDLE | NV5070_CTRL_GET_CHANNEL_INFO_STATE_IDLE) +#define NV5070_CTRL_CMD_GET_CHANNEL_INFO_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV5070_CTRL_CMD_GET_CHANNEL_INFO_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 channelClass; + NvU32 channelInstance; + NvBool IsChannelInDebugMode; + + NvU32 channelState; +} NV5070_CTRL_CMD_GET_CHANNEL_INFO_PARAMS; + + + +/* + * NV5070_CTRL_CMD_SET_ACCL + * + * This command turns accelerators on and off. The use of this command + * should be restricted as it may have undesirable effects. It's + * purpose is to provide a mechanism for clients to use the + * accelerator bits to get into states that are either not detectable + * by the RM or may take longer to reach than we think is reasonable + * to wait in the RM. + * + * NV5070_CTRL_CMD_GET_ACCL + * + * This command queries the current state of the accelerators. + * + * channelClass + * This field indicates the hw class number (507A-507E) + * + * channelInstance + * This field indicates which of the two instances of the channel + * (in case there are two. ex: base, overlay etc) the cmd is meant for. + * Note that core channel has only one instance and the field should + * be set to 0 for core channel. + * + * accelerators + * Accelerators to be set in the SET_ACCEL command. Returns the + * currently set accelerators on the GET_ACCEL command. + * + * accelMask + * A mask to specify which accelerators to change with the + * SET_ACCEL command. This field does nothing in the GET_ACCEL + * command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_CHANNEL + * NV_ERR_INVALID_OWNER + * NV_ERR_GENERIC + * + */ + +#define NV5070_CTRL_CMD_SET_ACCL (0x5070010c) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_SET_ACCL_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_ACCL (0x5070010d) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_GET_ACCL_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_ACCL_NONE NV5070_CTRL_IDLE_CHANNEL_ACCL_NONE +#define NV5070_CTRL_ACCL_IGNORE_PI NV5070_CTRL_IDLE_CHANNEL_ACCL_IGNORE_PI +#define NV5070_CTRL_ACCL_SKIP_NOTIF NV5070_CTRL_IDLE_CHANNEL_ACCL_SKIP_NOTIF +#define NV5070_CTRL_ACCL_SKIP_SEMA NV5070_CTRL_IDLE_CHANNEL_ACCL_SKIP_SEMA +#define NV5070_CTRL_ACCL_IGNORE_INTERLOCK NV5070_CTRL_IDLE_CHANNEL_ACCL_IGNORE_INTERLOCK +#define NV5070_CTRL_ACCL_IGNORE_FLIPLOCK NV5070_CTRL_IDLE_CHANNEL_ACCL_IGNORE_FLIPLOCK +#define NV5070_CTRL_ACCL_TRASH_ONLY NV5070_CTRL_IDLE_CHANNEL_ACCL_TRASH_ONLY +#define NV5070_CTRL_ACCL_TRASH_AND_ABORT NV5070_CTRL_IDLE_CHANNEL_ACCL_TRASH_AND_ABORT +#define NV5070_CTRL_SET_ACCL_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV5070_CTRL_SET_ACCL_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 channelClass; + NvU32 channelInstance; + + NvU32 accelerators; + NvU32 accelMask; +} NV5070_CTRL_SET_ACCL_PARAMS; +#define NV5070_CTRL_GET_ACCL_PARAMS_MESSAGE_ID (0xDU) + +typedef NV5070_CTRL_SET_ACCL_PARAMS NV5070_CTRL_GET_ACCL_PARAMS; + +/* + * NV5070_CTRL_CMD_STOP_BASE + * + * This command tries to turn the base channel off ASAP. + * + * channelInstance + * This field indicates which of the two instances of the base + * channel the cmd is meant for. + * + * notifyMode + * This field indicates the action RM should take once the base + * channel has been successfully stopped. The options are (1) Set a + * notifier (2) Set the notifier and generate and OS event + * + * hNotifierCtxDma + * Handle to the ctx dma for the notifier that must be written once + * base channel is stopped. The standard NvNotification notifier + * structure is used. + * + * offset + * Offset within the notifier context dma where the notifier begins + * Offset must be 16 byte aligned. + * + * hEvent + * Handle to the event that RM must use to awaken the client when + * notifyMode is WRITE_AWAKEN. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT: Invalid notify mode + * NV_ERR_INVALID_CHANNEL: When the overlay is unallocated + * NV_ERR_INVALID_OWNER: Callee isn't the owner of the channel + * NV_ERR_INVALID_OBJECT_HANDLE: Notif ctx dma not found + * NV_ERR_INVALID_OFFSET: Bad offset within notif ctx dma + * NV_ERR_INSUFFICIENT_RESOURCES + * NV_ERR_TIMEOUT: RM timedout waiting to inject methods + */ +#define NV5070_CTRL_CMD_STOP_BASE (0x5070010e) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_CMD_STOP_BASE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_STOP_BASE_NOTIFY_MODE_WRITE (0x00000000) +#define NV5070_CTRL_CMD_STOP_BASE_NOTIFY_MODE_WRITE_AWAKEN (0x00000001) + +#define NV5070_CTRL_CMD_STOP_BASE_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV5070_CTRL_CMD_STOP_BASE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 channelInstance; + NvU32 notifyMode; + NvHandle hNotifierCtxDma; + NvU32 offset; + NV_DECLARE_ALIGNED(NvP64 hEvent, 8); +} NV5070_CTRL_CMD_STOP_BASE_PARAMS; + + + +/* + * NV5070_CTRL_CMD_GET_PINSET_COUNT + * + * Get number of pinsets on this GPU. + * + * pinsetCount [out] + * Number of pinsets on this GPU is returned in this parameter. + * This count includes pinsets that are not connected. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV5070_CTRL_CMD_GET_PINSET_COUNT (0x50700115) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_GET_PINSET_COUNT_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_GET_PINSET_COUNT_PARAMS_MESSAGE_ID (0x15U) + +typedef struct NV5070_CTRL_GET_PINSET_COUNT_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 pinsetCount; +} NV5070_CTRL_GET_PINSET_COUNT_PARAMS; + +/* + * NV5070_CTRL_CMD_GET_PINSET_PEER + * + * Retrieve the pinset/GPU that is connected to the specified pinset on + * this GPU. + * + * pinset [in] + * Pinset on this GPU for which peer info is to be returned must be + * specified in this parameter. + * + * peerGpuId [out] + * Instance of the GPU on the other side of the connection is + * returned in this parameter. + * + * peerPinset [out] + * Pinset on the other side of the connection is returned in this + * parameter. If there is no connection then the value is + * NV5070_CTRL_CMD_GET_PINSET_PEER_PEER_PINSET_NONE. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV5070_CTRL_CMD_GET_PINSET_PEER (0x50700116) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_GET_PINSET_PEER_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_PINSET_PEER_PEER_GPUINSTANCE_NONE (0xffffffff) + +#define NV5070_CTRL_CMD_GET_PINSET_PEER_PEER_PINSET_NONE (0xffffffff) + +#define NV5070_CTRL_GET_PINSET_PEER_PARAMS_MESSAGE_ID (0x16U) + +typedef struct NV5070_CTRL_GET_PINSET_PEER_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 pinset; + + NvU32 peerGpuInstance; + NvU32 peerPinset; +} NV5070_CTRL_GET_PINSET_PEER_PARAMS; + +/* + * NV5070_CTRL_CMD_SET_RMFREE_FLAGS + * + * This command sets the flags for an upcoming call to RmFree(). + * After the RmFree() API runs successfully or not, the flags are cleared. + * + * flags + * This parameter holds the NV0000_CTRL_GPU_SET_RMFREE_FLAGS_* + * flags to be passed for the next RmFree() command only. + * The flags can be one of those: + * - NV0000_CTRL_GPU_SET_RMFREE_FLAGS_NONE: + * explicitly clears the flags + * - NV0000_CTRL_GPU_SET_RMFREE_FLAGS_FREE_PRESERVES_HW: + * instructs RmFree() to preserve the HW configuration. After + * RmFree() is run this flag is cleared. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV5070_CTRL_CMD_SET_RMFREE_FLAGS (0x50700117) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_SET_RMFREE_FLAGS_NONE 0x00000000 +#define NV5070_CTRL_SET_RMFREE_FLAGS_PRESERVE_HW 0x00000001 +#define NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS_MESSAGE_ID (0x17U) + +typedef struct NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 flags; +} NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS; + + +/* + * NV5070_CTRL_CMD_IMP_SET_GET_PARAMETER + * + * This command allows to set or get certain IMP parameters. Change of + * values take effect on next modeset and is persistent across modesets + * until the driver is unloaded or user changes the override. + * + * index + * One of NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_XXX defines - + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IMP_ENABLE + * Only supports "get" operation. If FALSE, IMP is being bypassed and + * all Is Mode Possible queries are answered with "mode is possible" + * and registers normally set by IMP are not changed from their defaults. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED + * Should IMP consider using ASR. ASR won't be allowed unless it is set to + * "allowed" through both _IS_ASR_ALLOWED and _IS_ASR_ALLOWED_PER_PSTATE. + * Note that IMP will not run ASR and MSCG at the same time. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED_PER_PSTATE + * Should IMP consider using ASR when this pstate is being used. ASR won't + * be allowed unless it is set to "allowed" through both + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED and + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED_PER_PSTATE. + * So when NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED + * returns FALSE, IMP won't consider ASR for any p-state. Note that IMP + * will not run ASR and MSCG at the same time. This function is valid + * only on PStates 2.0 systems. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_MSCG_ALLOWED_PER_PSTATE + * Should IMP consider using MSCG when this pstate is being used. MSCG + * won't be allowed if the MSCG feature isn't enabled even if we set to + * "allowed" through + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_MSCG_ALLOWED_PER_PSTATE. + * Use NV2080_CTRL_CMD_MC_QUERY_POWERGATING_PARAMETER to query if MSCG is + * supported and enabled. Note that IMP will not run ASR and MSCG at the + * same time. This function is valid only on PStates 2.0 systems. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_STUTTER_FEATURE_PER_PSTATE + * Only supports "get" operation. Returns which stutter feature is being + * engaged in hardware when running on the given pstate. Valid values are: + * NV5070_CTRL_IMP_STUTTER_FEATURE_NONE + * This value indicates no stutter feature is enabled. + * NV5070_CTRL_IMP_STUTTER_FEATURE_ASR + * This value indicates ASR is the current enabled stutter feature. + * NV5070_CTRL_IMP_STUTTER_FEATURE_MSCG + * This value indicates MSCG is the current enabled stutter feature. + * Note that system will not run ASR and MSCG at the same time. This + * function is valid only on PStates 2.0 systems. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_STUTTER_FEATURE_PREDICTED_EFFICIENCY_PER_PSTATE + * Only supports "get" operation. Returns the efficiency which IMP + * predicted for the engaged stutter feature (ASR or MSCG) when running + * on the given pstate. Normally, the actual efficiency should be higher + * than the calculated predicted efficiency. For MSCG, the predicted + * efficiency assumes no mempool compression. If compression is enabled + * with MSCG, the actual efficiency may be significantly higher. Returns + * 0 if no stutter feature is running. On PStates 3.0 systems, the + * pstateApi parameter is ignored, and the result is returned for the min + * IMP v-pstate possible. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS + * Only supports "get" operation. Returns information about what the possible + * mclk switch is. Valid fields are: + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_POSSIBLE + * This field is not head-specific and indicates if mclk switch is + * possible with the current mode. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_OVERRIDE_MEMPOOL + * This field is not head-specific and indicates if mclk switch is + * possible with the nominal mempool settings (_NO) or if special + * settings are required in order for mclk switch to be possible (_YES). + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_MID_WATERMARK + * Each head has its own setting for this field. If this field is + * set to _YES, then the specified head will allow mclk switch to + * begin if mempool occupancy exceeds the MID_WATERMARK setting. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_DWCF + * Each head has its own setting for this field. If this field is + * set to _YES, then the specified head will allow mclk switch to + * begin if the head is in its DWCF interval, and the mempool + * occupancy is greater than or equal to the DWCF watermark. + * Note: If neither _MID_WATERMARK nor _DWCF is set to _YES, then the + * specified head is ignored when determining when it is OK to start an + * mclk switch. Mclk switch must be allowed (or ignored) by all heads + * before an mclk switch will actually begin. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_FORCE_MIN_MEMPOOL + * Should min mempool be forced. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MEMPOOL_COMPRESSION + * Should mempool compression be enabled. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_CURSOR_SIZE + * The cursor size (in horizontal pixels) used by IMP (rather than the + * actual cursor size) for its computation. + * A maximum value is in place for what can be set. It can be queried + * after resetting the value - it gets reset to the maximum possible + * value. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_ENABLE + * This is to Enable/Disable ISO FB Latency Test. + * The test records the max ISO FB latency for all heads during the test period (excluding modeset time). + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_WC_TOTAL_LATENCY + * This is used to retrieve calculated wcTotalLatency of ISO FB Latency Test. + * wcTotalLatency is the worst case time for a request's data to come back after the request is issued. + * It is the sum of IMP calculated FbLatency and stream delay. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_MAX_LATENCY + * This is used to retrieve the max latency among all heads during the whole ISO FB Latency Test. + * The max latency can be used to compare with the wcTotalLatency we calculated. + * It decides whether the ISO FB Latency Test is passed or not. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_MAX_TEST_PERIOD + * This is used to retrieve the max test period during the whole ISO FB Latency Test. + * By experimental result, the test period should be at least 10 secs to approximate the + * worst case Fb latency in real situation. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_GLITCHLESS_MODESET_ENABLE + * This enables or disables glitchless modesets. Modesets can be + * glitchless if: + * (1) There are no raster timing changes, and + * (2) The resource requirements of all bandwidth clients are either not + * changing, or they are all changing in the same direction (all + * increasing or all decreasing). + * If glitchless modeset is disabled, or is not possible, heads will be + * blanked during the modeset transition. + * pstateApi + * NV2080_CTRL_PERF_PSTATES_PXXX value. + * Required for NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED_PER_PSTATE, + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_MSCG_ALLOWED_PER_PSTATE, + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_STUTTER_FEATURE_PER_PSTATE and + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_STUTTER_FEATURE_PREDICTED_EFFICIENCY_PER_PSTATE + * on PStates 2.0 systems. For other indices must be + * NV2080_CTRL_PERF_PSTATES_UNDEFINED. Not used on PStates 3.0 systems. + * head + * Head index, which is required when querying Mclk switch feature. + * (index = NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS) + * operation + * NV5070_CTRL_IMP_SET_GET_PARAMETER_OPERATION_GET + * Indicates a "get" operation. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_OPERATION_SET + * Indicates a "set" operation. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_OPERATION_RESET + * Indicates a "reset" operation. This operation will reset the values for + * all indices to their RM defaults. + * value + * Value for new setting of a "set" operation, or the returned value of a + * "get" operation; for enable/disable operations, "enable" is non-zero, + * and "disable" is zero. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_POINTER + * NV_ERR_INVALID_INDEX specified index is not supported + * NV_ERR_INSUFFICIENT_RESOURCES cannot handle any more overrides + * NV_ERR_INVALID_OBJECT the struct needed to get the specified information + * is not marked as valid + * NV_ERR_INVALID_STATE the parameter has been set but resetting will + * not be possible + */ +#define NV5070_CTRL_CMD_IMP_SET_GET_PARAMETER (0x50700118) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 index; + NvU32 pstateApi; + NvU32 head; + NvU32 operation; + NvU32 value; +} NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS; + +/* valid operation values */ +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_OPERATION_GET 0 +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_OPERATION_SET 1 +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_OPERATION_RESET 2 + +/* valid index value */ +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_NONE (0x00000000) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IMP_ENABLE (0x00000001) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED (0x00000002) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED_PER_PSTATE (0x00000003) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_MSCG_ALLOWED_PER_PSTATE (0x00000004) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_STUTTER_FEATURE_PER_PSTATE (0x00000005) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_STUTTER_FEATURE_PREDICTED_EFFICIENCY_PER_PSTATE (0x00000006) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS (0x00000007) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_FORCE_MIN_MEMPOOL (0x00000008) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MEMPOOL_COMPRESSION (0x00000009) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_CURSOR_SIZE (0x0000000A) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_ENABLE (0x0000000B) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_WC_TOTAL_LATENCY (0x0000000C) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_MAX_LATENCY (0x0000000D) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_MAX_TEST_PERIOD (0x0000000E) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_GLITCHLESS_MODESET_ENABLE (0x0000000F) + +/* valid NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOHUB_STUTTER_FEATURE values */ +#define NV5070_CTRL_IMP_STUTTER_FEATURE_NONE 0 +#define NV5070_CTRL_IMP_STUTTER_FEATURE_ASR 1 +#define NV5070_CTRL_IMP_STUTTER_FEATURE_MSCG 2 + +/* valid NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE values */ +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_POSSIBLE 0:0 +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_POSSIBLE_NO (0x00000000) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_POSSIBLE_YES (0x00000001) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_OVERRIDE_MEMPOOL 1:1 +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_OVERRIDE_MEMPOOL_NO (0x00000000) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_OVERRIDE_MEMPOOL_YES (0x00000001) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_MID_WATERMARK 2:2 +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_MID_WATERMARK_NO (0x00000000) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_MID_WATERMARK_YES (0x00000001) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_DWCF 3:3 +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_DWCF_NO (0x00000000) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_DWCF_YES (0x00000001) + +/* + * NV5070_CTRL_CMD_SET_MEMPOOL_WAR_FOR_BLIT_TEARING + * + * This command engages the WAR for blit tearing caused by huge mempool size and + * mempool compression. The EVR in aero off mode uses scanline info to predict + * where the scanline will be at a later time. Since RG scanline is used to perform + * front buffer blits and isohub buffers large amount of display data it may have + * fetched several lines of data ahead of where the RG is scanning out leading to + * video tearing. The WAR for this problem is to reduce the amount of data fetched. + * + * base + * This struct must be the first member of all 5070 control calls containing + * the subdeviceIndex. + * bEngageWAR + * Indicates if mempool WAR has to be engaged or disengaged. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ + +#define NV5070_CTRL_CMD_SET_MEMPOOL_WAR_FOR_BLIT_TEARING (0x50700119) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_SET_MEMPOOL_WAR_FOR_BLIT_TEARING_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_SET_MEMPOOL_WAR_FOR_BLIT_TEARING_PARAMS_MESSAGE_ID (0x19U) + +typedef struct NV5070_CTRL_SET_MEMPOOL_WAR_FOR_BLIT_TEARING_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvBool bEngageWAR; +} NV5070_CTRL_SET_MEMPOOL_WAR_FOR_BLIT_TEARING_PARAMS; +typedef struct NV5070_CTRL_SET_MEMPOOL_WAR_FOR_BLIT_TEARING_PARAMS *PNV5070_CTRL_SET_MEMPOOL_WAR_FOR_BLIT_TEARING_PARAMS; + +#define NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE (0x50700120) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 head; + NvU32 activeViewportBase; +} NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE_PARAMS; +typedef struct NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE_PARAMS *PNV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE_PARAMS; + +/* _ctrl5070chnc_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070common.h b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070common.h new file mode 100644 index 0000000..75d7295 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070common.h @@ -0,0 +1,79 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070common.finn +// + + + +#define NV5070_CTRL_CMD_CHANNEL_STATE_IDLE NVBIT(0) +#define NV5070_CTRL_CMD_CHANNEL_STATE_WRTIDLE NVBIT(1) +#define NV5070_CTRL_CMD_CHANNEL_STATE_QUIESCENT1 NVBIT(2) +#define NV5070_CTRL_CMD_CHANNEL_STATE_QUIESCENT2 NVBIT(3) +#define NV5070_CTRL_CMD_CHANNEL_STATE_EMPTY NVBIT(4) +#define NV5070_CTRL_CMD_CHANNEL_STATE_FLUSHED NVBIT(5) +#define NV5070_CTRL_CMD_CHANNEL_STATE_BUSY NVBIT(6) +#define NV5070_CTRL_CMD_CHANNEL_STATE_DEALLOC NVBIT(7) +#define NV5070_CTRL_CMD_CHANNEL_STATE_DEALLOC_LIMBO NVBIT(8) +#define NV5070_CTRL_CMD_CHANNEL_STATE_LIMBO1 NVBIT(9) +#define NV5070_CTRL_CMD_CHANNEL_STATE_LIMBO2 NVBIT(10) +#define NV5070_CTRL_CMD_CHANNEL_STATE_FCODEINIT NVBIT(11) +#define NV5070_CTRL_CMD_CHANNEL_STATE_FCODE NVBIT(12) +#define NV5070_CTRL_CMD_CHANNEL_STATE_VBIOSINIT NVBIT(13) +#define NV5070_CTRL_CMD_CHANNEL_STATE_VBIOSOPER NVBIT(14) +#define NV5070_CTRL_CMD_CHANNEL_STATE_UNCONNECTED NVBIT(15) +#define NV5070_CTRL_CMD_CHANNEL_STATE_INITIALIZE NVBIT(16) +#define NV5070_CTRL_CMD_CHANNEL_STATE_SHUTDOWN1 NVBIT(17) +#define NV5070_CTRL_CMD_CHANNEL_STATE_SHUTDOWN2 NVBIT(18) +#define NV5070_CTRL_CMD_CHANNEL_STATE_INIT NVBIT(19) + +#define NV5070_CTRL_CMD_MAX_HEADS 4U +#define NV5070_CTRL_CMD_MAX_DACS 4U +#define NV5070_CTRL_CMD_MAX_SORS 8U +#define NV5070_CTRL_CMD_MAX_PIORS 4U + +#define NV5070_CTRL_CMD_OR_OWNER_NONE (0xFFFFFFFFU) +#define NV5070_CTRL_CMD_OR_OWNER_HEAD(i) (i) +#define NV5070_CTRL_CMD_OR_OWNER_HEAD__SIZE_1 NV5070_CTRL_CMD_MAX_HEADS + +#define NV5070_CTRL_CMD_SOR_OWNER_MASK_NONE (0x00000000U) +#define NV5070_CTRL_CMD_SOR_OWNER_MASK_HEAD(i) (1 << i) + +#define NV5070_CTRL_CMD_DAC_PROTOCOL_RGB_CRT (0x00000000U) + + + +#define NV5070_CTRL_CMD_SOR_PROTOCOL_SINGLE_TMDS_A (0x00000000U) +#define NV5070_CTRL_CMD_SOR_PROTOCOL_SINGLE_TMDS_B (0x00000001U) +#define NV5070_CTRL_CMD_SOR_PROTOCOL_DUAL_TMDS (0x00000002U) +#define NV5070_CTRL_CMD_SOR_PROTOCOL_LVDS_CUSTOM (0x00000003U) +#define NV5070_CTRL_CMD_SOR_PROTOCOL_DP_A (0x00000004U) +#define NV5070_CTRL_CMD_SOR_PROTOCOL_DP_B (0x00000005U) +#define NV5070_CTRL_CMD_SOR_PROTOCOL_SUPPORTED (0xFFFFFFFFU) + +#define NV5070_CTRL_CMD_PIOR_PROTOCOL_EXT_TMDS_ENC (0x00000000U) + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070impoverrides.h b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070impoverrides.h new file mode 100644 index 0000000..0f2864a --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070impoverrides.h @@ -0,0 +1,33 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070impoverrides.finn +// + + + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h new file mode 100644 index 0000000..93d31dd --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h @@ -0,0 +1,564 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070or.finn +// + +#include "ctrl5070common.h" +#include "ctrl/ctrl5070/ctrl5070base.h" + +/* + * NV5070_CTRL_CMD_SET_DAC_PWR + * + * This command sets the DAC power control register. orNumber, normalPower, + * and safePower will always have to be specified. However, HSync, VSync, + * and data for normal and/or safe mode can be empty, leaving the current + * values intact. + * + * orNumber + * The dac for which the settings need to be programmed. + * + * normalHSync + * The normal operating state for the H sync signal. + * + * normalVSync + * The normal operating state for the V sync signal. + * + * normalData + * The normal video data input pin of the d/a converter. + * + * normalPower + * The normal state of the dac macro power. + * + * safeHSync + * The safe operating state for the H sync signal. + * + * safeVSync + * The safe operating state for the V sync signal. + * + * safeData + * The safe video data input pin of the d/a converter. + * + * safePower + * The safe state of the dac macro power. + * + * flags + * The following flags have been defined: + * (1) SPECIFIED_NORMAL: Indicates whether HSync, VSync, data, + * for normal state have been specified in the parameters. + * (2) SPECIFIED_SAFE: Indicates whether HSync, VSync, data, + * for safe state have been specified in the parameters. + * (3) SPECIFIED_FORCE_SWITCH: Indicates whether to force the + * change immediately instead of waiting for VSync + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + * NV_ERR_TIMEOUT + */ +#define NV5070_CTRL_CMD_SET_DAC_PWR (0x50700404) /* finn: Evaluated from "(FINN_NV50_DISPLAY_OR_INTERFACE_ID << 8) | NV5070_CTRL_CMD_SET_DAC_PWR_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_HSYNC 1:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_HSYNC_ENABLE (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_HSYNC_LO (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_HSYNC_HI (0x00000002) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_VSYNC 1:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_VSYNC_ENABLE (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_VSYNC_LO (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_VSYNC_HI (0x00000002) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_DATA 1:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_DATA_ENABLE (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_DATA_LO (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_DATA_HI (0x00000002) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_PWR 0:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_PWR_OFF (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_PWR_ON (0x00000001) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_HSYNC 1:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_HSYNC_ENABLE (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_HSYNC_LO (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_HSYNC_HI (0x00000002) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_VSYNC 1:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_VSYNC_ENABLE (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_VSYNC_LO (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_VSYNC_HI (0x00000002) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_DATA 1:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_DATA_ENABLE (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_DATA_LO (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_DATA_HI (0x00000002) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_PWR 0:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_PWR_OFF (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_PWR_ON (0x00000001) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_SPECIFIED_NORMAL 0:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_SPECIFIED_NORMAL_NO (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_SPECIFIED_NORMAL_YES (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_SPECIFIED_SAFE 1:1 +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_SPECIFIED_SAFE_NO (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_SPECIFIED_SAFE_YES (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_FORCE_SWITCH 2:2 +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_FORCE_SWITCH_NO (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_FORCE_SWITCH_YES (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV5070_CTRL_CMD_SET_DAC_PWR_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + + NvU32 normalHSync; + NvU32 normalVSync; + NvU32 normalData; + NvU32 normalPower; + NvU32 safeHSync; + NvU32 safeVSync; + NvU32 safeData; + NvU32 safePower; + NvU32 flags; +} NV5070_CTRL_CMD_SET_DAC_PWR_PARAMS; + +/* + * NV5070_CTRL_CMD_GET_SOR_OP_MODE + * + * This command returns current settings for the specified SOR. + * + * orNumber + * The OR number for which the operating mode needs to be read. + * + * category + * Whether LVDS or CSTM setting are desired. + * + * puTxda + * Status of data pins of link A + * + * puTxdb + * Status of data pins of link B + * + * puTxca + * Status of link A clock + * + * puTxcb + * Status of link B clock + * + * upper + * Whether LVDS bank A is the upper, odd, or first pixel. + * + * mode + * Current protocol. + * + * linkActA + * Status of link B clock + * + * linkActB + * Status of link B clock + * + * lvdsEn + * Output driver configuration. + * + * lvdsDual + * Whether LVDS dual-link mode is turned on or not. + * + * dupSync + * Whether DE, HSYNC, and VSYNC are used for encoding instead of + * RES, CNTLE, and CNTLF. + * + * newMode + * Whether new or old mode is being used. + * + * balanced + * Whether balanced encoding is enabled. + * + * plldiv + * Feedback divider for the hi-speed pll + * + * rotClk + * Skew of TXC clock. + * + * rotDat + * How much are the 8 bits of each color channel rotated by + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE (0x50700422) /* finn: Evaluated from "(FINN_NV50_DISPLAY_OR_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_CATEGORY 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_CATEGORY_LVDS 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_CATEGORY_CUSTOM 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_0 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_0_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_0_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_1 1:1 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_1_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_1_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_2 2:2 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_2_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_2_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_3 3:3 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_3_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_3_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_0 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_0_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_0_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_1 1:1 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_1_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_1_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_2 2:2 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_2_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_2_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_3 3:3 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_3_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_3_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXCA 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXCA_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXCA_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXCB 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXCB_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXCB_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_UPPER 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_UPPER_UPPER_RESET 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_MODE 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_MODE_LVDS 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_MODE_TMDS 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LINKACTA 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LINKACTA_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LINKACTA_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LINKACTB 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LINKACTB_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LINKACTB_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LVDS_EN 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LVDS_EN_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LVDS_EN_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LVDS_DUAL 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LVDS_DUAL_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LVDS_DUAL_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_DUP_SYNC 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_DUP_SYNC_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_DUP_SYNC_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_NEW_MODE 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_NEW_MODE_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_NEW_MODE_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_BALANCED 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_BALANCED_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_BALANCED_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PLLDIV 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PLLDIV_BY_7 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PLLDIV_BY_10 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_ROTCLK 3:0 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_ROTDAT 2:0 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS_MESSAGE_ID (0x22U) + +typedef struct NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + + NvU32 category; + NvU32 puTxda; + NvU32 puTxdb; + NvU32 puTxca; + NvU32 puTxcb; + NvU32 upper; + NvU32 mode; + NvU32 linkActA; + NvU32 linkActB; + NvU32 lvdsEn; + NvU32 lvdsDual; + NvU32 dupSync; + NvU32 newMode; + NvU32 balanced; + NvU32 plldiv; + NvU32 rotClk; + NvU32 rotDat; +} NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS; + + +/* + * NV5070_CTRL_CMD_SET_SOR_OP_MODE + * + * This command applies the specified settings to the specified SOR. + * + * orNumber + * The OR number for which the operating mode needs to be read. + * Note that if DCB doesn't report LVDS for the specified orNumber, + * the call will return failure. + * + * category + * Whether LVDS or CSTM settings are specified. + * + * puTxda + * Used to enable or disable the data pins of link A. + * + * puTxdb + * Used to enable or disable the data pins of link B. + * + * puTxca + * Used to enable or disable link A clock. + * + * puTxcb + * Used to enable or disable link B clock. + * + * upper + * Whether LVDS bank A should be the upper, odd, or first pixel. + * + * mode + * What protocol (LVDS/TMDS to use). + * + * linkActA + * Used to enable or disable the digital logic of link A. + * + * linkActB + * Used to enable or disable the digital logic of link B. + * + * lvdsEn + * Output driver configuration. + * + * lvdsDual + * Whether to turn on LVDS dual-link mode. + * + * dupSync + * Whether to use DE, HSYNC, and VSYNC for encoding instead of + * RES, CNTLE, and CNTLF. + * + * newMode + * Whether to use new or old mode. + * + * balanced + * Whether or not to use balanced encoding. + * + * plldiv + * Feedback divider to use for the hi-speed pll. + * + * rotClk + * How much to skew TXC clock. + * + * rotDat + * How much to rotate the 8 bits of each color channel by. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE (0x50700423) /* finn: Evaluated from "(FINN_NV50_DISPLAY_OR_INTERFACE_ID << 8) | NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_CATEGORY 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_CATEGORY_LVDS 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_CATEGORY_CUSTOM 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_0 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_0_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_0_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_1 1:1 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_1_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_1_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_2 2:2 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_2_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_2_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_3 3:3 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_3_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_3_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_0 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_0_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_0_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_1 1:1 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_1_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_1_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_2 2:2 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_2_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_2_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_3 3:3 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_3_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_3_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXCA 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXCA_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXCA_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXCB 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXCB_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXCB_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_UPPER 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_UPPER_UPPER_RESET 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_MODE 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_MODE_LVDS 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_MODE_TMDS 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LINKACTA 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LINKACTA_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LINKACTA_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LINKACTB 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LINKACTB_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LINKACTB_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LVDS_EN 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LVDS_EN_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LVDS_EN_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LVDS_DUAL 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LVDS_DUAL_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LVDS_DUAL_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_DUP_SYNC 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_DUP_SYNC_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_DUP_SYNC_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_NEW_MODE 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_NEW_MODE_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_NEW_MODE_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_BALANCED 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_BALANCED_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_BALANCED_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PLLDIV 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PLLDIV_BY_7 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PLLDIV_BY_10 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_ROTCLK 3:0 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_ROTDAT 2:0 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS_MESSAGE_ID (0x23U) + +typedef struct NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + + NvU32 category; + NvU32 puTxda; + NvU32 puTxdb; + NvU32 puTxca; + NvU32 puTxcb; + NvU32 upper; + NvU32 mode; + NvU32 linkActA; + NvU32 linkActB; + NvU32 lvdsEn; + NvU32 lvdsDual; + NvU32 dupSync; + NvU32 newMode; + NvU32 balanced; + NvU32 plldiv; + NvU32 rotClk; + NvU32 rotDat; +} NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS; + +/* + * NV5070_CTRL_CMD_SET_SOR_FLUSH_MODE + * + * Set the given SOR number into flush mode in preparation for DP link training. + * + * orNumber [in] + * The SOR number to set into flush mode. + * + * bEnable [in] + * Whether to enable or disable flush mode on this SOR. + * + * bImmediate [in] + * If set to true, will enable flush in immediate mode. + * If not, will enable flush in loadv mode. + * NOTE: We do not support exit flush in LoadV mode. + * + * headMask [in] + * Optional. If set brings only the heads in the head mask out of flush + * OR will stay in flush mode until last head is out of flush mode. + * Caller can use _HEAD__ALL to specify all the heads are to be brought out. + * NOTE: headMask would be considered only while exiting from flush mode. + * + * bForceRgDiv [in] + * If set forces RgDiv. Should be used only for HW/SW testing + * + * bUseBFM [in] + * If Set then it mean we are using BFM else executing on non-BFM paltforms. + * + * bFireAndForget [in] + * Fire the flush mode & perform post-processing without waiting for it + * to be done. This is required for special cases like GC5 where we have + * ELV blocked, RG stall & we trigger flush for one shot mode & then do + * a modeset by disabling it without actually waiting for it to get + * disabled. We will not get any vblank interrupt in this case as we have + * stalled RG. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV5070_CTRL_CMD_SET_SOR_FLUSH_MODE (0x50700457) /* finn: Evaluated from "(FINN_NV50_DISPLAY_OR_INTERFACE_ID << 8) | NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS_MESSAGE_ID (0x57U) + +typedef struct NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 sorNumber; + NvBool bEnable; + NvBool bImmediate; + NvU32 headMask; + NvBool bForceRgDiv; + NvBool bUseBFM; + NvBool bFireAndForget; +} NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS; + +#define NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS_HEADMASK_HEAD(i) (i):(i) +#define NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS_HEADMASK_HEAD__SIZE_1 NV5070_CTRL_CMD_MAX_HEADS +#define NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS_HEADMASK_HEAD_ALL 0xFFFFFFFF + + + +/* _ctrl5070or_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h new file mode 100644 index 0000000..88f66ab --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h @@ -0,0 +1,437 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070rg.finn +// + +#include "ctrl/ctrl5070/ctrl5070base.h" + + + +/* + * NV5070_CTRL_CMD_GET_RG_STATUS + * + * This 'get' command returns the status of raster generator + * + * head + * The head for which RG status is desired. + * + * scanLocked + * Whether or not RG is scan (raster or frame) locked. + * flipLocked + * Whether or not RG is flip locked. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_GET_RG_STATUS (0x50700202) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_RG_STATUS_SCANLOCKED_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_STATUS_SCANLOCKED_YES (0x00000001) + +#define NV5070_CTRL_CMD_GET_RG_STATUS_FLIPLOCKED_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_STATUS_FLIPLOCKED_YES (0x00000001) + +#define NV5070_CTRL_CMD_GET_RG_STATUS_STALLED_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_STATUS_STALLED_YES (0x00000001) + +#define NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 head; + + NvU32 scanLocked; // [OUT] + NvU32 flipLocked; // [OUT] + NvU32 rgStalled; +} NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS; + +/* + * NV5070_CTRL_CMD_UNDERFLOW_PARAMS + * + * This structure contains data for + * NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP and + * NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP CTRL calls + * + * head + * The head for which RG underflow properties needed to be set/get. + * Valid values for this parameter are 0 to NV5070_CTRL_CMD_MAX_HEADS. + * enable + * _SET_RG_: Whether to enable or disable RG underflow reporting. + * _GET_RG_: Whether or not RG underflow reporting is enabled. + * underflow + * _SET_RG_: Clear underflow (CLEAR_UNDERFLOW_YES) or leave it alone + * (CLEAR_UNDERFLOW_NO). + * Note: The GET_RG function automatically clears the underflow. + * It is recommended that GET_RG be used to clear any initial + * underflows, and that the "underflow" field be set to + * CLEAR_UNDERFLOW_NO in any SET_RG calls. This field may be + * deprecated in the future, for SET_RG calls. + * _GET_RG_: UNDERFLOWED_YES if an RG underflow occurred since the most + * recent prior call to to NV5070_CTRL_CMD_GET_RG_STATUS. + * epfifoUnderflow + * _SET_RG_: Not used. + * _GET_RG_: EPFIFO_UNDERFLOWED_YES if an EPFIFO underflow occurred since + * the most recent prior call to NV5070_CTRL_CMD_GET_RG_STATUS. + * mode + * _SET_RG_: What mode to use when underflow occurs. This is + * independent from enable field. This is always active. + * _GET_RG_: What mode is used when underflow occurs. This is + * independent from enable field. This is always active. + */ +typedef struct NV5070_CTRL_CMD_UNDERFLOW_PARAMS { + NvU32 head; + NvU32 enable; + NvU32 underflow; + NvU32 epfifoUnderflow; + NvU32 mode; +} NV5070_CTRL_CMD_UNDERFLOW_PARAMS; + +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_ENABLED_NO (0x00000000) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_ENABLED_YES (0x00000001) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_UNDERFLOWED_NO (0x00000000) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_UNDERFLOWED_YES (0x00000001) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_EPFIFO_UNDERFLOWED_NO (0x00000000) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_EPFIFO_UNDERFLOWED_YES (0x00000001) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_MODE_REPEAT (0x00000000) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_MODE_RED (0x00000001) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_ENABLE_NO (0x00000000) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_ENABLE_YES (0x00000001) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_CLEAR_UNDERFLOW_NO (0x00000000) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_CLEAR_UNDERFLOW_YES (0x00000001) + +/* + * NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP + * + * This command returns the underflow reporting parameters inside + * NV5070_CTRL_CMD_UNDERFLOW_PARAMS structure + * + * underflowParams + * Contains data for underflow logging. + * Check NV5070_CTRL_CMD_UNDERFLOW_PARAMS structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP (0x50700203) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NV5070_CTRL_CMD_UNDERFLOW_PARAMS underflowParams; +} NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS; + + +/* + * NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP + * + * This command sets up the underflow parameters using + * NV5070_CTRL_CMD_UNDERFLOW_PARAMS structure + * + * underflowParams + * Contains data for underflow logging. + * Check NV5070_CTRL_CMD_UNDERFLOW_PARAMS structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP (0x50700204) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NV5070_CTRL_CMD_UNDERFLOW_PARAMS underflowParams; +} NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS; + + +/* + * NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP + * + * This command gets the timing parameters associated with the lockout period. + * + * head + * The head for which RG fliplock properties are desired. + * + * maxSwapLockoutSkew + * The maximum possible skew between the swap lockout signals for all + * heads which are fliplocked to this head. + * + * swapLockoutStart + * Determines the start of the start lockout period, expressed as the + * number of lines before the end of the frame. The minimum allowed + * value is 1. + + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP (0x50700205) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_MAX_SWAP_LOCKOUT_SKEW 9:0 + +#define NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_SWAP_LOCKOUT_START 15:0 + +#define NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 head; + + NvU32 maxSwapLockoutSkew; + NvU32 swapLockoutStart; +} NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS; + +/* + * NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP + * + * This command sets the timing parameters associated with the lockout period. + * + * head + * The head for which RG fliplock properties are desired. + * + * maxSwapLockoutSkew + * The maximum possible skew between the swap lockout signals for all + * heads which are fliplocked to this head. + * + * swapLockoutStart + * Determines the start of the start lockout period, expressed as the + * number of lines before the end of the frame. The minimum allowed + * value is 1. + + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP (0x50700206) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_MAX_SWAP_LOCKOUT_SKEW 9:0 +#define NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_MAX_SWAP_LOCKOUT_SKEW_INIT (0x00000000) + +#define NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_SWAP_LOCKOUT_START 15:0 +#define NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_SWAP_LOCKOUT_START_INIT (0x00000000) + +#define NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 head; + + NvU32 maxSwapLockoutSkew; + NvU32 swapLockoutStart; +} NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS; + +/* + * NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS + * + * This command returns which set of lockpins needs to be used in order to + * successfully raster lock two heads on different GPUs together. The + * second GPU is not inferred from linked SLI state, if any, and needs to + * be specified explicitly. + * + * head + * The local head to be locked with the peer head. + * + * peer.hDisplay + * The handle identifying a display object allocated on another + * GPU. It specifies the peer of interest with a subdevice + * index (see below) and needs to be be distinct from the handle + * supplied directly to NvRmControl(). + * + * peer.subdeviceIndex + * The index of the peer subdevice of interest. + * + * peer.head + * The peer head to be locked with the local head. + * + * masterScanLockPin + * slaveScanLockPin + * Returns the master and slave scanlock pins that would need to + * be used to lock the specified heads together, if any. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_OBJECT_PARENT + */ +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS (0x5070020a) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_MASTER_SCAN_LOCK_CONNECTED 0:0 +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_MASTER_SCAN_LOCK_CONNECTED_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_MASTER_SCAN_LOCK_CONNECTED_YES (0x00000001) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_MASTER_SCAN_LOCK_PIN 2:1 + +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_SLAVE_SCAN_LOCK_CONNECTED 0:0 +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_SLAVE_SCAN_LOCK_CONNECTED_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_SLAVE_SCAN_LOCK_CONNECTED_YES (0x00000001) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_SLAVE_SCAN_LOCK_PIN 2:1 + +#define NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 head; + + struct { + NvHandle hDisplay; + NvU32 subdeviceIndex; + NvU32 head; + } peer; + + NvU32 masterScanLock; + NvU32 slaveScanLock; +} NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS; + +/* + * NV5070_CTRL_CMD_GET_PINSET_LOCKPINS + * + * Get the lockpins for the specified pinset. + * + * pinset [in] + * The pinset whose corresponding lockpin numbers need to be determined + * must be specified with this parameter. + * + * scanLockPin [out] + * The scanlock lockpin (rasterlock or framelock) index, which can be + * either master or slave, is returned in this parameter. + * + * flipLockPin [out] + * The fliplock lockpin index, is returned in this parameter. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV5070_CTRL_CMD_GET_PINSET_LOCKPINS (0x5070020b) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_GET_PINSET_LOCKPINS_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_GET_PINSET_LOCKPINS_SCAN_LOCK_PIN_NONE 0xffffffff + +#define NV5070_CTRL_GET_PINSET_LOCKPINS_FLIP_LOCK_PIN_NONE 0xffffffff + +#define NV5070_CTRL_GET_PINSET_LOCKPINS_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV5070_CTRL_GET_PINSET_LOCKPINS_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 pinset; + NvU32 scanLockPin; + NvU32 flipLockPin; +} NV5070_CTRL_GET_PINSET_LOCKPINS_PARAMS; + +/* + * NV5070_CTRL_CMD_GET_RG_SCAN_LINE + * + * This 'get' command returns the current scan line value from raster generator + * + * head + * The head for which current scan line number is desired. + * + * scanLine + * Current scan line number. + * + * inVblank + * Whether or not in vblank. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_GET_RG_SCAN_LINE (0x5070020c) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_RG_SCAN_LINE_IN_VBLANK_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_SCAN_LINE_IN_VBLANK_YES (0x00000001) + +#define NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 head; + NvU32 scanLine; // [OUT] + NvU32 inVblank; // [OUT] +} NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS; + +/* + * NV5070_CTRL_CMD_GET_FRAMELOCK_HEADER_LOCKPINS + * + * This command returns FrameLock header lock pin information. + * Lock pin index returned by this command corresponds to the + * evo lock pin number. Example - lock pin index 0 means + * LOCKPIN_0. + * + * frameLockPin [out] + * This parameter returns the FrameLock pin index + * connected to FrameLock header. + * + * rasterLockPin [out] + * This parameter returns the RasterLock pin index + * connected to FrameLock header. + * + * flipLockPin [out] + * This parameter returns the FlipLock pin index + * connected to FrameLock header. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV5070_CTRL_CMD_GET_FRAMELOCK_HEADER_LOCKPINS (0x5070020d) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_FRAME_LOCK_PIN_NONE (0xffffffff) +#define NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_RASTER_LOCK_PIN_NONE (0xffffffff) +#define NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_FLIP_LOCK_PIN_NONE (0xffffffff) +#define NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_PARAMS_MESSAGE_ID (0xDU) + +typedef struct NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 frameLockPin; + NvU32 rasterLockPin; + NvU32 flipLockPin; +} NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_PARAMS; + + + +/* _ctrl5070rg_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070system.h b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070system.h new file mode 100644 index 0000000..c222907 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070system.h @@ -0,0 +1,81 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070system.finn +// + +#include "ctrl/ctrl5070/ctrl5070base.h" +#include "ctrl/ctrl5070/ctrl5070common.h" // NV5070_CTRL_CMD_MAX_HEADS + +/* extract cap bit setting from tbl */ +#define NV5070_CTRL_SYSTEM_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* caps format is byte_index:bit_mask */ +#define NV5070_CTRL_SYSTEM_CAPS_BUG_237734_REQUIRES_DMI_WAR 0:0x01 // Deprecated +#define NV5070_CTRL_SYSTEM_CAPS_STEREO_DIN_AVAILABLE 0:0x02 +#define NV5070_CTRL_SYSTEM_CAPS_BUG_381003_MULTIWAY_AFR_WAR 0:0x04 +#define NV5070_CTRL_SYSTEM_CAPS_BUG_538079_COLOR_COMPRESSION_SUPPORTED 0:0x08 // Deprecated +#define NV5070_CTRL_SYSTEM_CAPS_BUG_2052012_GLITCHY_MCLK_SWITCH 0:0x10 +#define NV5070_CTRL_SYSTEM_CAPS_DEEP_COLOR_SUPPORT 0:0x20 +#define NV5070_CTRL_SYSTEM_CAPS_BUG_644815_DNISO_VIDMEM_ONLY 0:0x40 + + +/* size in bytes of display caps table */ +#define NV5070_CTRL_SYSTEM_CAPS_TBL_SIZE 1 + +/* + * NV5070_CTRL_CMD_SYSTEM_GET_CAPS_V2 + * + * This command returns the set of display capabilities for the parent device + * in the form of an array of unsigned bytes. Display capabilities + * include supported features and required workarounds for the display + * engine(s) within the device, each represented by a byte offset into the + * table and a bit position within that byte. The set of display capabilities + * will be normalized across all GPUs within the device (a feature capability + * will be set only if it's supported on all GPUs while a required workaround + * capability will be set if any of the GPUs require it). + * + * [out] capsTbl + * This caps table array is where the display cap bits will be transferred + * by the RM. The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + */ +#define NV5070_CTRL_CMD_SYSTEM_GET_CAPS_V2 (0x50700709) /* finn: Evaluated from "(FINN_NV50_DISPLAY_SYSTEM_INTERFACE_ID << 8) | NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + + NvU8 capsTbl[NV5070_CTRL_SYSTEM_CAPS_TBL_SIZE]; +} NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS; + +/* _ctrl5070system_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h new file mode 100644 index 0000000..ab31072 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070verif.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83debase.h b/src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83debase.h new file mode 100644 index 0000000..80b69c4 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83debase.h @@ -0,0 +1,55 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl83de/ctrl83debase.finn +// + +#include "ctrl/ctrlxxxx.h" +/* GT200_DEBUG control commands and parameters */ + +#define NV83DE_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x83DEU, NV83DE_CTRL_##cat, idx) + +/* Command categories (6bits) */ +#define NV83DE_CTRL_RESERVED (0x00) +#define NV83DE_CTRL_GR (0x01) +#define NV83DE_CTRL_FIFO (0x02) +#define NV83DE_CTRL_DEBUG (0x03) +#define NV83DE_CTRL_INTERNAL (0x04) + + +/* + * NV83DE_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV83DE_CTRL_CMD_NULL (0x83de0000) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* _ctrl83debase_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83dedebug.h b/src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83dedebug.h new file mode 100644 index 0000000..c69f62b --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83dedebug.h @@ -0,0 +1,1234 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl83de/ctrl83dedebug.finn +// + +#include "ctrl/ctrl83de/ctrl83debase.h" +#include "nvstatus.h" + +#include "ctrl/ctrl2080/ctrl2080gpu.h" + +/* + * NV83DE_CTRL_CMD_SM_DEBUG_MODE_ENABLE + * + * The RmCtrl enables the debug mode for a given context. + * When enabled: + * - The program execution on a SM stops at breakpoints. + * - It allows the user to handle the RC recovery process and + * exceptions. (Yet to be supported) + * - It allows the user to suspend, resume the context. (Yet to be supported) + * + * This command accepts no parameters. + * + * Possible return values: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + * + */ +#define NV83DE_CTRL_CMD_SM_DEBUG_MODE_ENABLE (0x83de0301) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | 0x1" */ + +/* + * NV83DE_CTRL_CMD_SM_DEBUG_MODE_DISABLE + * + * The RmCtrl disables the debug mode for a given context. + * When disabled: + * - The program execution on a SM ignores the breakpoints. + * - RC recovery process and exceptions are handled in the usual way. + * - A request to suspend, resume the context will return error + * NV_ERR_INVALID_COMMAND. + * + * This command accepts no parameters. + * + * Possible return values: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV83DE_CTRL_CMD_SM_DEBUG_MODE_DISABLE (0x83de0302) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | 0x2" */ + +/* + * NV83DE_CTRL_CMD_DEBUG_SET_MODE_MMU_DEBUG + * + * This command sets the MMU DEBUG mode. This is Fermi-onwards feature. + * If the query is made on an incorrect platform (for example, pre-Fermi) + * the call will return with an NV_ERR_NOT_SUPPORTED error. + * + * action + * The possible action values are: + * - NV83DE_CTRL_CMD_DEBUG_SET_MODE_MMU_DEBUG_ENABLE + * This enables the MMU debug mode if possible. If however, any another + * client has already disabled the mode (via NV83DE call) then this + * operation returns NV_ERR_STATE_IN_USE. + * + * - NV83DE_CTRL_CMD_DEBUG_SET_MODE_MMU_DEBUG_DISABLE + * This disables the MMU debug mode if possible. If however, any another + * client has already enabled the mode (via NV83DE call) then this + * operation returns NV_ERR_STATE_IN_USE. + * + * - NV83DE_CTRL_CMD_DEBUG_RELEASE_MMU_DEBUG_REQUESTS + * This operation releases all the client's outstanding requests to enable + * or disable the MMU debug mode. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV83DE_CTRL_CMD_DEBUG_SET_MODE_MMU_DEBUG (0x83de0307) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS { + NvU32 action; +} NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS; + +#define NV83DE_CTRL_CMD_DEBUG_SET_MODE_MMU_DEBUG_ENABLE (0x00000001) +#define NV83DE_CTRL_CMD_DEBUG_SET_MODE_MMU_DEBUG_DISABLE (0x00000002) +#define NV83DE_CTRL_CMD_DEBUG_RELEASE_MMU_DEBUG_REQUESTS (0x00000003) + +/* + * NV83DE_CTRL_CMD_DEBUG_GET_MODE_MMU_DEBUG + * + * This command gets the value of currently configured MMU DEBUG mode. + * This is Fermi-onwards feature. If the query is made on an incorrect + * platform (for example, pre-Fermi) the call will return with an + * NV_ERR_NOT_SUPPORTED error. + * + * value + * This parameter returns the configured value. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV83DE_CTRL_CMD_DEBUG_GET_MODE_MMU_DEBUG (0x83de0308) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS { + NvU32 value; +} NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS; + +#define NV83DE_CTRL_CMD_DEBUG_GET_MODE_MMU_DEBUG_ENABLED (0x00000001) +#define NV83DE_CTRL_CMD_DEBUG_GET_MODE_MMU_DEBUG_DISABLED (0x00000002) + +/* + * NV83DE_CTRL_CMD_DEBUG_SET_EXCEPTION_MASK + * + * This command allows the caller to filter events (which are also referred to + * as "notifications", not to be confused with true notifiers), in the RM, + * fairly close to the source of the events. In other words, depending on the + * value of the exceptionMask, some events may not be raised. + * + * The original reason for creating this command is that the CUDA driver needs + * to place the RM and the GPU(s) into SM debug mode, for some GPUs, in order to + * activate various features and HW bug WARs. Being in SM debug mode has the + * side effect of exposing the caller to debug events, which are generally + * undesirable for the CUDA driver, but desirable for the CUDA debugger. This + * command allows each client to receive only the events that it is + * specifically interested in. + * + * If this command is never invoked, then the RM will behave as if + * exceptionMask==NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_ALL. + * + * As with many of the debugger features, this is Fermi-onwards feature. If this + * API call is issued on an earlier platform, it will return an + * NV_ERR_NOT_SUPPORTED error. + * + * exceptionMask + * This identifies the category of notifications that the debug client + * is interested in. + * + * Here are the allowed values for exceptionMask: + * + * - NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_FATAL + * This means that the caller wishes to receive events for any exceptions + * that are classified as fatal. For example, + * HWW_WARP_ESR_ERROR_PC_OVERFLOW is one such exception. + * + * If any debug object, in any channel, has registered to receive events + * for _FATAL exceptions, then RC recovery will be deferred if such an + * exception occurs. + * + * Also, if a client is registered for fatal exceptions, RC error recovery + * will be deferred. If not registered for fatal exceptions, then fatal + * errors will (as usual) cause RC recovery to run immediately. + * + * - NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_TRAP + * This means that an event will be raised when an SM executes a bpt.pause + * instruction. Note that on Fermi, the SM raises HWW when bpt.trap is + * executed as well, so this event will also be raised in that situation. + * + * - NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_SINGLE_STEP + * This means that an event will be raised a single step completion + * interrupt is received. + * + * - NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_INT + * This means that an event will be raised when an SM executes a bpt.int + * instruction. + * + * - NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_NONE + * This means that no debug events will be raised. + * + * - NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_ALL + * This means that an event will be raised for any and all debug + * exceptions. This is the default behavior. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV83DE_CTRL_CMD_DEBUG_SET_EXCEPTION_MASK (0x83de0309) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS { + NvU32 exceptionMask; +} NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS; + +#define NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_FATAL (0x00000001) +#define NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_TRAP (0x00000002) +#define NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_SINGLE_STEP (0x00000004) +#define NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_INT (0x00000008) +#define NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_CILP (0x00000010) +#define NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PREEMPTION_STARTED (0x00000020) +#define NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_NONE (0x00000000) +#define NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_ALL (0x0000FFFF) + + + +/* + * NV83DE_CTRL_CMD_READ_SINGLE_SM_ERROR_STATE + * + * This command reads the SM error state of a single SM. The error state + * consists of several 32-bit values. + * + * Note that this acts upon the currently resident GR (graphics) context. It is + * up to the RM client to ensure that the desired GR context is resident, before + * making this API call. + * + * See also: NV83DE_CTRL_CMD_READ_ALL_SM_ERROR_STATES. + * + * This is a Fermi-and-later feature. If this API call is issued on an earlier + * platform, it will return an NV_ERR_NOT_SUPPORTED error. + * + * Parameters: + * + * hTargetChannel (input) + * This identifies the channel. + * + * smID (input) + * This identifies the SM. Allowed values are any valid SM ID. The RM + * grProgramSmIdNumbering_HAL() routines are a good place to look, in order + * to see how SM IDs are set up. The main idea is that the RM chooses a + * numbering scheme, and then informs the GPU hardware of that scheme, by + * actually recording each SM ID into the GPU, via a series of PRI (GPU + * register) writes. + * + * smErrorState.hwwGlobalEsr (output) + * Value of the Global Error Status Register. + * + * smErrorState.hwwWarpEsr (output) + * Value of the Warp Error Status Register. + * + * smErrorState.hwwWarpEsrPc (output) : DEPRECATED for 64b PC below, will hold low 32b for now + * Value of the Warp Error Status Register Program Counter. + * + * smErrorState.hwwGlobalEsrReportMask (output) + * Value of the Global Error Status Register Report Mask. + * + * smErrorState.hwwWarpEsrReportMask (output) + * Value of the Error Status Register Report Mask. + * + * smErrorState.hwwWarpEsrPc64 (output) + * Value of the 64b Warp Error Status Register Program Counter. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV83DE_CTRL_CMD_DEBUG_READ_SINGLE_SM_ERROR_STATE (0x83de030b) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS_MESSAGE_ID" */ + +typedef struct NV83DE_SM_ERROR_STATE_REGISTERS { + NvU32 hwwGlobalEsr; + NvU32 hwwWarpEsr; + NvU32 hwwWarpEsrPc; + NvU32 hwwGlobalEsrReportMask; + NvU32 hwwWarpEsrReportMask; + NV_DECLARE_ALIGNED(NvU64 hwwEsrAddr, 8); + NV_DECLARE_ALIGNED(NvU64 hwwWarpEsrPc64, 8); + NvU32 hwwCgaEsr; + NvU32 hwwCgaEsrReportMask; +} NV83DE_SM_ERROR_STATE_REGISTERS; + +#define NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS { + NvHandle hTargetChannel; + NvU32 smID; + NV_DECLARE_ALIGNED(NV83DE_SM_ERROR_STATE_REGISTERS smErrorState, 8); +} NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS; + +/* + * NV83DE_CTRL_CMD_READ_ALL_SM_ERROR_STATES + * + * This command reads the SM error state of all SMs. + * + * Note that this acts upon the currently resident GR (graphics) context. It is + * up to the RM client to ensure that the desired GR context is resident, before + * making this API call. + * + * Parameters: + * + * hTargetChannel (input) + * This identifies the channel. + * + * numSMsToRead (input) + * This should be set to the number of SMs that the RM is supposed to read. + * It will typically be the total number of SMs in the GPU. For best + * results, you should not pass in a value that is greater than the number + * of SMs that the GPU actually contains. + * + * startingSM (input) + * This should be set to the starting index of the first SM to read. + * Clients may use this to read data from SMs beyond the maximum specified + * in NV83DE_CTRL_DEBUG_MAX_SMS_PER_CALL. + * + * smErrorStateArray (output) + * This is an array of NV83DE_SM_ERROR_STATE_REGISTERS structs. Please see + * the description of NV83DE_CTRL_CMD_READ_SINGLE_SM_ERROR_STATE, above, for + * a description of the individual fields. + * + * mmuFault.valid (output) + * This is NV_TRUE if an MMU fault occurred on the target channel since the last call to + * NV83DE_CTRL_CMD_CLEAR_ALL_SM_ERROR_STATES to this channel. + * + * mmuFault.faultInfo (output) + * This is the value of the first NV_PFIFO_INTR_MMU_FAULT_INFO that caused the MMU fault. + * + * mmuFaultInfo (output) + * Deprecated field, see mmuFault.faultInfo + * + * This is a Fermi-and-later feature. If this API call is issued on an earlier + * platform, it will return an NV_ERR_NOT_SUPPORTED error. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV83DE_CTRL_CMD_DEBUG_READ_ALL_SM_ERROR_STATES (0x83de030c) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_MAX_SMS_PER_CALL 100 + +typedef struct NV83DE_MMU_FAULT_INFO { + NvBool valid; + NvU32 faultInfo; +} NV83DE_MMU_FAULT_INFO; + +#define NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS { + NvHandle hTargetChannel; + NvU32 numSMsToRead; + NV_DECLARE_ALIGNED(NV83DE_SM_ERROR_STATE_REGISTERS smErrorStateArray[NV83DE_CTRL_DEBUG_MAX_SMS_PER_CALL], 8); + NvU32 mmuFaultInfo; // Deprecated, use mmuFault field instead + NV83DE_MMU_FAULT_INFO mmuFault; + NvU32 startingSM; +} NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS; + +/* + * NV83DE_CTRL_CMD_CLEAR_SINGLE_SM_ERROR_STATE + * + * This command clears the SM error state of a single SM. The error state + * consists of several 32-bit values. + * + * Note that this acts upon the currently resident GR (graphics) context. It is + * up to the RM client to ensure that the desired GR context is resident, before + * making this API call. + * + * See also: NV83DE_CTRL_CMD_CLEAR_ALL_SM_ERROR_STATES. + * + * This API call has a slightly different effect than what would occur as a + * result of issuing a read-modify-write via _READ_SINGLE_SM_ERROR_STATE and + * _WRITE_SINGLE_SM_ERROR_STATE. The difference arises due to the fact that RM + * is caching the error state, to compensate for the fact that the real GPU + * error state must be cleared very early on in the exception handling routine. + * + * In other words, the _READ data is stale by design, and cannot be used in a + * read-modify-write routine from user space. Therefore, in order to clear the + * SM error state, a separate RM API call is required. + * + * This is a Fermi-and-later feature. If this API call is issued on an earlier + * platform, it will return an NV_ERR_NOT_SUPPORTED error. + * + * Parameters: + * + * hTargetChannel (input) + * This identifies the channel. + * + * smID (input) + * This identifies the SM. Allowed values are any valid SM ID. Please see + * NV83DE_CTRL_CMD_READ_SINGLE_SM_ERROR_STATE for further details. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV83DE_CTRL_CMD_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE (0x83de030f) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS_MESSAGE_ID (0xFU) + +typedef struct NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS { + NvHandle hTargetChannel; + NvU32 smID; +} NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS; + +/* + * NV83DE_CTRL_CMD_CLEAR_ALL_SM_ERROR_STATES + * + * This command clears the SM error state of all SMs. + * + * Note that this acts upon the currently resident GR (graphics) context. It is + * up to the RM client to ensure that the desired GR context is resident, before + * making this API call. + * + * Parameters: + * + * hTargetChannel (input) + * This identifies the channel. + * + * numSMsToClear (input) + * This should be set to the number of SMs that the RM is supposed to write. + * It will typically be the total number of SMs in the GPU. For best + * results, you should not pass in a value that is greater than the number + * of SMs that the GPU actually contains. + * + * Please see the description of + * NV83DE_CTRL_CMD_CLEAR_SINGLE_SM_ERROR_STATE, above, for a description of + * why these two _CLEAR API calls are required. + * + * This is a Fermi-and-later feature. If this API call is issued on an earlier + * platform, it will return an NV_ERR_NOT_SUPPORTED error. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV83DE_CTRL_CMD_DEBUG_CLEAR_ALL_SM_ERROR_STATES (0x83de0310) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS { + NvHandle hTargetChannel; + NvU32 numSMsToClear; +} NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS; + + + +#define NV83DE_CTRL_CMD_DEBUG_SUSPEND_ALL_CONTEXTS_FOR_CLIENT_PARAMS_DEFINED 1 +#define NV83DE_CTRL_CMD_DEBUG_SUSPEND_ALL_CONTEXTS_FOR_CLIENT_HAS_RESIDENT_CHANNEL 1 +typedef struct NV83DE_CTRL_CMD_DEBUG_SUSPEND_ALL_CONTEXTS_FOR_CLIENT_PARAMS { + NvU32 waitForEvent; + NvHandle hResidentChannel; +} NV83DE_CTRL_CMD_DEBUG_SUSPEND_ALL_CONTEXTS_FOR_CLIENT_PARAMS; + + + +/* + * NV83DE_CTRL_CMD_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE + * + * This command set the type of action we want on RM encountering an error + * and issuing a STOP_TRIGGER. The action will be to either braodcast the + * STOP_TRIGGER to all SM's, or just send to the SM hitting an exception. + * + * stopTriggerType + * This identifies trigger type to initiate. + * + * Here are the allowed values for stopTriggerType: + * + * - NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_SINGLE_SM + * This means that we will issue STOP_TRIGGER to the single SM + * noted in the exception + * + * - NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_BROADCAST + * This means that we will issue STOP_TRIGGER to all SM's + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV83DE_CTRL_CMD_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE (0x83de0313) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS { + NvU32 stopTriggerType; +} NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS; + +#define NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_SINGLE_SM (0x00000001) +#define NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_BROADCSAT (0x00000002) + +/* + * NV83DE_CTRL_CMD_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING + * + * This command sets the type of action we want on RM encountering a + * SINGLE_STEP exception while in CILP debug mode. In the normal case, + * non-pausing, we ignore these exceptions as on prior chips. When the + * user selects pausing, it will cause the exception to be treated just + * like we had seen an SM error or BPT_PAUSE. + * + * singleStepHandling + * This identifies single step handling type to use. + * + * Here are the allowed values for singleStepHandling: + * + * - NV83DE_CTRL_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING_NONPAUSING + * Treat SINGLE_STEP exceptions while in debug mode as non-pausing, + * which is the default/normal mode in the interrupt pre-process + * function, where they are ignored. + * + * - NV83DE_CTRL_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING_PAUSING + * Treat SINGLE_STEP exceptions while in debug mode as pausing, + * which means in the interrupt pre-process function they will + * be treated like BPT_PAUSE and SM error exceptions + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV83DE_CTRL_CMD_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING (0x83de0314) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV83DE_CTRL_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING_PARAMS { + NvU32 singleStepHandling; +} NV83DE_CTRL_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING_PARAMS; + +#define NV83DE_CTRL_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING_NONPAUSING (0x00000001) +#define NV83DE_CTRL_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING_PAUSING (0x00000002) + +/* + * NV83DE_CTRL_CMD_DEBUG_READ_MEMORY + * + * This command reads a block of memory. + * This command is deprecated in favor of NV83DE_CTRL_CMD_DEBUG_READ_BATCH_MEMORY + * + * hMemory [IN] + * The handle to the memory being accessed. If hMemory is not accessible + * from the caller's address space, NV_ERR_INSUFFICIENT_PERMISSIONS + * is returned. + * + * length [IN/OUT] + * Number of bytes to read, as well as the number of bytes actually read + * returned. + * + * offset [IN] + * The offset into the physical memory region given by the handle above. + * + * buffer [OUT] + * The data read is returned in this buffer. + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ACCESS_TYPE + * NV_ERR_INSUFFICIENT_PERMISSIONS + * + */ +#define NV83DE_CTRL_CMD_DEBUG_READ_MEMORY (0x83de0315) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS_MESSAGE_ID (0x15U) + +typedef struct NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS { + NvU32 hMemory; + NvU32 length; + NV_DECLARE_ALIGNED(NvU64 offset, 8); + NV_DECLARE_ALIGNED(NvP64 buffer, 8); +} NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS; + +/* + * NV83DE_CTRL_CMD_DEBUG_WRITE_MEMORY + * + * This command writes a block of memory. + * This command is deprecated in favor of NV83DE_CTRL_CMD_DEBUG_WRITE_BATCH_MEMORY + * + * hMemory [IN] + * The handle to the memory being accessed. If hMemory is not accessible + * from the caller's address space, NV_ERR_INSUFFICIENT_PERMISSIONS + * is returned. + * + * length [IN/OUT] + * Number of bytes to write, as well as the number of bytes actually + * written. + * + * offset [IN] + * The offset into the physical memory region given by the handle above. + * + * buffer [IN] + * The data to be written is sent in this buffer. + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ACCESS_TYPE + * NV_ERR_INSUFFICIENT_PERMISSIONS + */ +#define NV83DE_CTRL_CMD_DEBUG_WRITE_MEMORY (0x83de0316) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS_MESSAGE_ID (0x16U) + +typedef struct NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS { + NvU32 hMemory; + NvU32 length; + NV_DECLARE_ALIGNED(NvU64 offset, 8); + NV_DECLARE_ALIGNED(NvP64 buffer, 8); +} NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS; + +/* + * NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT + * + * This command suspends a SM context associated with the debugger object. + * + * When the suspend call returns, context associated with the debugger object + * should not be actively executing any code on any SM. The channel will have + * been disabled if not resident on GR, or have its SM suspended if it was resident. + * + * This is a Fermi-and-later feature. If this API call is issued on an earlier + * platform, it will return an NV_ERR_NOT_SUPPORTED error. + * + * waitForEvent + * This return param indicates if the call had to issue a Preempt, + * therefore it is in process and user may need to wait for it. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_STATE + */ +#define NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT (0x83de0317) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT_PARAMS_MESSAGE_ID (0x17U) + +typedef NV83DE_CTRL_CMD_DEBUG_SUSPEND_ALL_CONTEXTS_FOR_CLIENT_PARAMS NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT_PARAMS; + +/* + * NV83DE_CTRL_CMD_DEBUG_RESUME_CONTEXT + * + * This command safely resumes the SM context associated with the debugger object. + * + * This is a Fermi-and-later feature. If this API call is issued on an earlier + * platform, it will return an NV_ERR_NOT_SUPPORTED error. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_STATE + */ +#define NV83DE_CTRL_CMD_DEBUG_RESUME_CONTEXT (0x83de0318) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | 0x18" */ + +/* + * NV83DE_CTRL_CMD_DEBUG_GET_HANDLES + * + * This command returns relevant handles for the debug object + * This command is only available on debug and develop builds. + * + * Possible return values: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_CLIENT + * NV_ERR_INVALID_OBJECT_HANDLE + * + */ +#define NV83DE_CTRL_CMD_DEBUG_GET_HANDLES (0x83de0319) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | 0x19" */ + +typedef struct NV83DE_CTRL_DEBUG_GET_HANDLES_PARAMS { + NvHandle hChannel; + NvHandle hSubdevice; +} NV83DE_CTRL_DEBUG_GET_HANDLES_PARAMS; + +/* + * NV83DE_CTRL_CMD_READ_SURFACE + * + * This command allows the caller to copy the data from a specified gpuVA + * to a usermode buffer. Before copying, this command validates whether or + * not the virtual address (VA) range provided as input has valid and allocated + * pages mapped to it in its entirety. + * + * This command's input is NV83DE_CTRL_DEBUG_ACCESS_SURFACE_PARAMETERS which + * contains a buffer of NV83DE_CTRL_DEBUG_ACCESS_OPs + * + * Possible return values: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_XLATE + */ +#define NV83DE_CTRL_CMD_READ_SURFACE (0x83de031a) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | 0x1A" */ + +/* + * NV83DE_CTRL_CMD_WRITE_SURFACE + * + * This command allows the caller to copy the data from a provided usermode + * buffer to a specified GPU VA. Before copying, this command validates whether or + * not the virtual address (VA) range provided as input has valid and allocated + * pages mapped to it in its entirety. + * + * This command's input is NV83DE_CTRL_DEBUG_ACCESS_SURFACE_PARAMETERS which + * contains a buffer of NV83DE_CTRL_DEBUG_ACCESS_OPs + * + * Possible return values: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_XLATE + */ +#define NV83DE_CTRL_CMD_WRITE_SURFACE (0x83de031b) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | 0x1B" */ + +#define MAX_ACCESS_OPS 64 + +typedef struct NV83DE_CTRL_DEBUG_ACCESS_OP { + NV_DECLARE_ALIGNED(NvU64 gpuVA, 8); // IN + NV_DECLARE_ALIGNED(NvP64 pCpuVA, 8); // IN/OUT Debugger CPU Pointer of buffer + NvU32 size; // IN Size in bytes + NvU32 valid; // OUT Whether the GpuVA is accessible +} NV83DE_CTRL_DEBUG_ACCESS_OP; + +typedef struct NV83DE_CTRL_DEBUG_ACCESS_SURFACE_PARAMETERS { + NvU32 count; // IN Number of ops in this call + NV_DECLARE_ALIGNED(NV83DE_CTRL_DEBUG_ACCESS_OP opsBuffer[MAX_ACCESS_OPS], 8); +} NV83DE_CTRL_DEBUG_ACCESS_SURFACE_PARAMETERS; + +/* + * NV83DE_CTRL_CMD_GET_MAPPINGS + * + * This command traverses through the virtual memory page hierarchy and + * fetches the valid virtual mappings and their sizes for a provided virtual + * address (VA) range. + * If a given VA range has more than MAX_GET_MAPPINGS_OPS valid mappings, + * hasMore is set to 1, and opsBuffer is still filled with MAX_GET_MAPPINGS_OPS + * valid mappings. In this case, this command should be called again with + * vaLo = opsBuffer[MAX_GET_MAPPINGS_OPS - 1].gpuVA + + * opsBuffer[MAX_GET_MAPPINGS_OPS - 1].size; + * and vaHi set to the next desired upper limit. + * + * This command's input is NV83DE_CTRL_DEBUG_GET_MAPPINGS_PARAMETERS which + * contains a buffer of NV83DE_CTRL_DEBUG_GET_MAPPINGS_OP + * + * Possible return values: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_XLATE + */ +#define NV83DE_CTRL_CMD_GET_MAPPINGS (0x83de031c) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | 0x1C" */ + +#define MAX_GET_MAPPINGS_OPS 64 + +typedef struct NV83DE_CTRL_DEBUG_GET_MAPPINGS_OP { + NV_DECLARE_ALIGNED(NvU64 gpuVA, 8); // OUT Start of GPU VA for this mapping + NvU32 size; // OUT Size in bytes of this mapping +} NV83DE_CTRL_DEBUG_GET_MAPPINGS_OP; + +typedef struct NV83DE_CTRL_DEBUG_GET_MAPPINGS_PARAMETERS { + NV_DECLARE_ALIGNED(NvU64 vaLo, 8); // IN Lower VA range, inclusive + NV_DECLARE_ALIGNED(NvU64 vaHi, 8); // IN Upper VA range, inclusive + NvU32 count; // OUT Number of ops in this call + NvU32 hasMore; // OUT Whether there are more valid mappings in this range than MAX_GET_MAPPINGS_OPS + NV_DECLARE_ALIGNED(NV83DE_CTRL_DEBUG_GET_MAPPINGS_OP opsBuffer[MAX_GET_MAPPINGS_OPS], 8); +} NV83DE_CTRL_DEBUG_GET_MAPPINGS_PARAMETERS; + +/* + * NV83DE_CTRL_CMD_DEBUG_EXEC_REG_OPS + * + * This command is used to submit a buffer containing one or more + * NV2080_CTRL_GPU_REG_OP structures for processing. Each entry in the + * buffer specifies a single read or write operation. Each entry is checked + * for validity in an initial pass over the buffer with the results for + * each operation stored in the corresponding regStatus field. Unless + * bNonTransactional flag is set to true, if any invalid entries are found + * during this initial pass then none of the operations are executed. Entries + * are processed in order within each regType with NV2080_CTRL_GPU_REG_OP_TYPE_GLOBAL + * entries processed first followed by NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX entries. + * + * [IN] bNonTransactional + * This field specifies if command is non-transactional i.e. if set to + * true, all the valid operations will be executed. + * + * [IN] regOpCount + * This field specifies the number of valid entries in the regops list. + * + * [IN/OUT] regOps + * This field is to be filled with the desired register information that is + * to be retrieved. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV83DE_CTRL_CMD_DEBUG_EXEC_REG_OPS (0x83de031d) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_GPU_EXEC_REG_OPS_MAX_OPS 100 +#define NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS_MESSAGE_ID (0x1DU) + +typedef struct NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS { + NvBool bNonTransactional; + NvU32 regOpCount; + // C form: NV2080_CTRL_GPU_REG_OP regOps[NV2080_CTRL_GPU_EXEC_REG_OPS_MAX_OPS] + NV2080_CTRL_GPU_REG_OP regOps[NV83DE_CTRL_GPU_EXEC_REG_OPS_MAX_OPS]; +} NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS; + +/* + * NV83DE_CTRL_CMD_DEBUG_SET_MODE_ERRBAR + * + * This command sets the Errbar Debug mode. This is Volta-onwards feature. + * If the query is made on an incorrect platform (for example, pre-Volta) + * the call will return with an NV_ERR_NOT_SUPPORTED error. + * + * action + * The possible action values are: + * - NV83DE_CTRL_CMD_DEBUG_SET_MODE_ERRBAR_ENABLE + * This enables the Errbar debug mode. + * + * - NV83DE_CTRL_CMD_DEBUG_SET_MODE_ERRBAR_DISABLE + * This disables the Errbar debug mode. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_WRITE + * NV_ERR_INVALID_ARGUMENT + */ +#define NV83DE_CTRL_CMD_DEBUG_SET_MODE_ERRBAR_DEBUG (0x83de031f) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS_MESSAGE_ID (0x1FU) + +typedef struct NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS { + NvU32 action; +} NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS; + +#define NV83DE_CTRL_CMD_DEBUG_SET_MODE_ERRBAR_DEBUG_DISABLE (0x00000000) +#define NV83DE_CTRL_CMD_DEBUG_SET_MODE_ERRBAR_DEBUG_ENABLE (0x00000001) + +/* + * NV83DE_CTRL_CMD_DEBUG_GET_MODE_ERRBAR + * + * This command gets the value of currently configured Errbar DEBUG mode. + * This is Volta-onwards feature. If the query is made on an incorrect + * platform (for example, pre-Volta) the call will return with an + * NV_ERR_NOT_SUPPORTED error. + * + * value + * This parameter returns the configured value. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_READ + * NV_ERR_INVALID_ARGUMENT + */ +#define NV83DE_CTRL_CMD_DEBUG_GET_MODE_ERRBAR_DEBUG (0x83de0320) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_GET_MODE_ERRBAR_DEBUG_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_GET_MODE_ERRBAR_DEBUG_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV83DE_CTRL_DEBUG_GET_MODE_ERRBAR_DEBUG_PARAMS { + NvU32 value; +} NV83DE_CTRL_DEBUG_GET_MODE_ERRBAR_DEBUG_PARAMS; + +#define NV83DE_CTRL_CMD_DEBUG_GET_MODE_ERRBAR_DEBUG_DISABLED (0x00000000) +#define NV83DE_CTRL_CMD_DEBUG_GET_MODE_ERRBAR_DEBUG_ENABLED (0x00000001) + +/* + * NV83DE_CTRL_CMD_DEBUG_SET_SINGLE_SM_SINGLE_STEP + * + * This command either enables or disables single step mode for the given SM. + * + * smID (input) + * This identifies the SM. + * bSingleStep (input) + * This indicates the single step mode. NV_TRUE for ENABLED. + */ +#define NV83DE_CTRL_CMD_DEBUG_SET_SINGLE_SM_SINGLE_STEP (0x83de0321) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS_MESSAGE_ID (0x21U) + +typedef struct NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS { + NvU32 smID; + NvBool bSingleStep; +} NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS; + +/* + * NV83DE_CTRL_CMD_DEBUG_SET_SINGLE_SM_STOP_TRIGGER + * + * This command sets or clears the stop trigger for the given SM. + * + * smID (input) + * This identifies the SM. + * bStopTrigger (input) + * This indicates whether to set or clear the trigger. NV_TRUE for ENABLED. + */ +#define NV83DE_CTRL_CMD_DEBUG_SET_SINGLE_SM_STOP_TRIGGER (0x83de0322) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_SET_SINGLE_SM_STOP_TRIGGER_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_SET_SINGLE_SM_STOP_TRIGGER_BROADCAST ((NvU32)~0) + +#define NV83DE_CTRL_DEBUG_SET_SINGLE_SM_STOP_TRIGGER_PARAMS_MESSAGE_ID (0x22U) + +typedef struct NV83DE_CTRL_DEBUG_SET_SINGLE_SM_STOP_TRIGGER_PARAMS { + NvU32 smID; + NvBool bStopTrigger; +} NV83DE_CTRL_DEBUG_SET_SINGLE_SM_STOP_TRIGGER_PARAMS; + +/* + * NV83DE_CTRL_CMD_DEBUG_SET_SINGLE_SM_RUN_TRIGGER + * + * This command sets or clears the run trigger for the given SM. + * + * smID (input) + * This identifies the SM. + * bRunTrigger (input) + * This indicates whether to set or clear the trigger. NV_TRUE for ENABLED. + */ +#define NV83DE_CTRL_CMD_DEBUG_SET_SINGLE_SM_RUN_TRIGGER (0x83de0323) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_SET_SINGLE_SM_RUN_TRIGGER_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_SET_SINGLE_SM_RUN_TRIGGER_PARAMS_MESSAGE_ID (0x23U) + +typedef struct NV83DE_CTRL_DEBUG_SET_SINGLE_SM_RUN_TRIGGER_PARAMS { + NvU32 smID; + NvBool bRunTrigger; +} NV83DE_CTRL_DEBUG_SET_SINGLE_SM_RUN_TRIGGER_PARAMS; + +/* + * NV83DE_CTRL_CMD_DEBUG_SET_SINGLE_SM_SKIP_IDLE_WARP_DETECT + * + * This command enables or disables skip idle warp detect for the given sm. + * + * smID (input) + * This identifies the SM. + * bSkipIdleWarpDetect (input) + * This indicates whether to enable or disable the mode. NV_TRUE for ENABLED. + */ +#define NV83DE_CTRL_CMD_DEBUG_SET_SINGLE_SM_SKIP_IDLE_WARP_DETECT (0x83de0324) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SKIP_IDLE_WARP_DETECT_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SKIP_IDLE_WARP_DETECT_PARAMS_MESSAGE_ID (0x24U) + +typedef struct NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SKIP_IDLE_WARP_DETECT_PARAMS { + NvU32 smID; + NvBool bSkipIdleWarpDetect; +} NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SKIP_IDLE_WARP_DETECT_PARAMS; + +/* + * NV83DE_CTRL_CMD_DEBUG_GET_SINGLE_SM_DEBUGGER_STATUS + * + * This command retrieves the debugger status states of the given SM. + * + * smID (input) + * This identifies the SM. + * bInTrapMode (output) + * This indicates whether the SM is in trap mode. + * bCrsFlushDone (output) + * Deprecated GK110+. Always 0 Volta+. + * bRunTriggerInProgress (output) + * Deprecated GM10X+. Always 0 Volta+. + * bComputeContext (output) + * Deprecated GM10X+. Always 0 Volta+. + * bLockedDown (output) + * This indicates whether the SM is locked down. + */ +#define NV83DE_CTRL_CMD_DEBUG_GET_SINGLE_SM_DEBUGGER_STATUS (0x83de0325) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_GET_SINGLE_SM_DEBUGGER_STATUS_PARAMS_MESSAGE_ID" */ + +typedef struct NV83DE_CTRL_DEBUG_SINGLE_SM_DEBUGGER_STATUS { + NvBool bInTrapMode; + NvBool bCrsFlushDone; + NvBool bRunTriggerInProgress; + NvBool bComputeContext; + NvBool bLockedDown; +} NV83DE_CTRL_DEBUG_SINGLE_SM_DEBUGGER_STATUS; + +#define NV83DE_CTRL_DEBUG_GET_SINGLE_SM_DEBUGGER_STATUS_PARAMS_MESSAGE_ID (0x25U) + +typedef struct NV83DE_CTRL_DEBUG_GET_SINGLE_SM_DEBUGGER_STATUS_PARAMS { + NvU32 smID; + NV83DE_CTRL_DEBUG_SINGLE_SM_DEBUGGER_STATUS smDebuggerStatus; +} NV83DE_CTRL_DEBUG_GET_SINGLE_SM_DEBUGGER_STATUS_PARAMS; + +/*! + * NV83DE_CTRL_CMD_DEBUG_ACCESS_MEMORY_ENTRY + * + * This struct represents a requet to read/write a block of memory. + * + * hMemory [IN] + * The handle to the memory being accessed. If hMemory is not accessible + * from the caller's address space, NV_ERR_INSUFFICIENT_PERMISSIONS + * is returned. + * + * length [IN] + * Number of bytes to read/write + * + * memOffset [IN] + * The offset into the physical memory region given by the handle above. + * + * dataOffset [IN] + * An offset into the usermode memory region provided by the enclosing + * params indicating where to read/write data from/to. + * + * status [OUT] + * The result status of the operation will be output. If NV_OK, even if + * command returned error status, the given operation was successful. If + * not NV_OK, it is guaranteed that the command will return error status. + */ +typedef struct NV83DE_CTRL_DEBUG_ACCESS_MEMORY_ENTRY { + NvHandle hMemory; + NvU32 length; + NV_DECLARE_ALIGNED(NvU64 memOffset, 8); + NvU32 dataOffset; + NV_STATUS status; +} NV83DE_CTRL_DEBUG_ACCESS_MEMORY_ENTRY; + +/*! + * NV83DE_CTRL_CMD_DEBUG_READ_BATCH_MEMORY + * + * Execute a batch of read memory operations. + * + * count [IN] + * Number of read/write operations to perform. + * + * dataLength [IN] + * Length of the usermode buffer passed in, in bytes. + * + * pData [OUT] + * Usermode buffer to store the output of the read operations. Each + * operation is expected to provide an offset into this buffer. + * + * entries [IN] + * List of operations to perform. First `count` entries are used. + */ +#define NV83DE_CTRL_CMD_DEBUG_READ_BATCH_MEMORY (0x83de0326) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | 0x26" */ + +/*! + * NV83DE_CTRL_CMD_DEBUG_WRITE_BATCH_MEMORY + * + * Execute a batch of write memory operations. + * + * count [IN] + * Number of read/write operations to perform. + * + * dataLength [IN] + * Length of the usermode buffer passed in, in bytes. + * + * pData [IN] + * Usermode buffer to store the input of the write operations. Each + * operation is expected to provide an offset into this buffer. + * + * entries [IN] + * List of operations to perform. First `count` entries are used. + */ +#define NV83DE_CTRL_CMD_DEBUG_WRITE_BATCH_MEMORY (0x83de0327) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | 0x27" */ + +#define MAX_ACCESS_MEMORY_OPS 150 +typedef struct NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pData, 8); + NvU32 dataLength; + NvU32 count; + NV_DECLARE_ALIGNED(NV83DE_CTRL_DEBUG_ACCESS_MEMORY_ENTRY entries[MAX_ACCESS_MEMORY_OPS], 8); +} NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS; + +/* + * NV83DE_CTRL_DEBUG_READ_MMU_FAULT_INFO_ENTRY + * + * faultAddress (OUT) + * Faulting address + * faultType (OUT) + * Type of MMU fault + * accessType (OUT) + * Type of access that caused this fault + */ +#define NV83DE_CTRL_DEBUG_READ_MMU_FAULT_INFO_MAX_ENTRIES 4 +typedef struct NV83DE_CTRL_DEBUG_READ_MMU_FAULT_INFO_ENTRY { + NV_DECLARE_ALIGNED(NvU64 faultAddress, 8); + NvU32 faultType; + NvU32 accessType; +} NV83DE_CTRL_DEBUG_READ_MMU_FAULT_INFO_ENTRY; + +/* + * NV83DE_CTRL_CMD_DEBUG_READ_MMU_FAULT_INFO + * + * mmuFaultInfoList (OUT) + * Entries of MMU faults recorded for the attached context. The cached data is only cleared via LRU policy when new MMU faults arrive so repeat data may be returned by this command. + * count (OUT) + * Number of MMU fault entries contain valid data. + */ +#define NV83DE_CTRL_CMD_DEBUG_READ_MMU_FAULT_INFO (0x83de0328) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_READ_MMU_FAULT_INFO_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_READ_MMU_FAULT_INFO_PARAMS_MESSAGE_ID (0x28U) + +typedef struct NV83DE_CTRL_DEBUG_READ_MMU_FAULT_INFO_PARAMS { + NV_DECLARE_ALIGNED(NV83DE_CTRL_DEBUG_READ_MMU_FAULT_INFO_ENTRY mmuFaultInfoList[NV83DE_CTRL_DEBUG_READ_MMU_FAULT_INFO_MAX_ENTRIES], 8); + NvU32 count; +} NV83DE_CTRL_DEBUG_READ_MMU_FAULT_INFO_PARAMS; + +/* + * NV83DE_CTRL_CMD_DEBUG_SET_DROP_DEFERRED_RC + * + * bDropDeferredRc (OUT) + * This indicates whether debugger wants a fault to eventually trigger RC on teardown or be dropped. + */ +#define NV83DE_CTRL_CMD_DEBUG_SET_DROP_DEFERRED_RC (0x83de0329) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_SET_DROP_DEFERRED_RC_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_SET_DROP_DEFERRED_RC_PARAMS_MESSAGE_ID (0x29U) + +typedef struct NV83DE_CTRL_DEBUG_SET_DROP_DEFERRED_RC_PARAMS { + NvBool bDropDeferredRc; +} NV83DE_CTRL_DEBUG_SET_DROP_DEFERRED_RC_PARAMS; + +/* + * NV83DE_CTRL_CMD_DEBUG_SET_MODE_MMU_GCC_DEBUG + * + * This command sets the MMU GCC DEBUG mode. This is Blackwell-onwards feature. + * If the query is made on an incorrect platform (for example, pre-Blackwell) + * the call will return with an NV_ERR_NOT_SUPPORTED error. + * + * action + * The possible action values are: + * - NV83DE_CTRL_CMD_DEBUG_SET_MODE_MMU_GCC_DEBUG_ENABLE + * This enables the MMU GCC debug mode if possible. If however, any another + * client has already disabled the mode (via NV83DE call) then this + * operation returns NV_ERR_STATE_IN_USE. + * + * - NV83DE_CTRL_CMD_DEBUG_SET_MODE_MMU_GCC_DEBUG_DISABLE + * This disables the MMU GCC debug mode if possible. If however, any another + * client has already enabled the mode (via NV83DE call) then this + * operation returns NV_ERR_STATE_IN_USE. + * + * - NV83DE_CTRL_CMD_DEBUG_RELEASE_MMU_GCC_DEBUG_REQUESTS + * This operation releases all the client's outstanding requests to enable + * or disable the MMU debug mode. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV83DE_CTRL_CMD_DEBUG_SET_MODE_MMU_GCC_DEBUG (0x83de032a) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_SET_MODE_MMU_GCC_DEBUG_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_SET_MODE_MMU_GCC_DEBUG_PARAMS_MESSAGE_ID (0x2AU) + +typedef struct NV83DE_CTRL_DEBUG_SET_MODE_MMU_GCC_DEBUG_PARAMS { + NvU32 action; +} NV83DE_CTRL_DEBUG_SET_MODE_MMU_GCC_DEBUG_PARAMS; + +#define NV83DE_CTRL_CMD_DEBUG_SET_MODE_MMU_GCC_DEBUG_ENABLE (0x00000001) +#define NV83DE_CTRL_CMD_DEBUG_SET_MODE_MMU_GCC_DEBUG_DISABLE (0x00000002) +#define NV83DE_CTRL_CMD_DEBUG_RELEASE_MMU_GCC_DEBUG_REQUESTS (0x00000003) + +/* + * NV83DE_CTRL_CMD_DEBUG_GET_MODE_MMU_GCC_DEBUG + * + * This command gets the value of currently configured MMU GCC DEBUG mode. + * This is Blackwell-onwards feature. If the query is made on an incorrect + * platform (for example, pre-Blackwell) the call will return with an + * NV_ERR_NOT_SUPPORTED error. + * + * value + * This parameter returns the configured value. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV83DE_CTRL_CMD_DEBUG_GET_MODE_MMU_GCC_DEBUG (0x83de032b) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_GET_MODE_MMU_GCC_DEBUG_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_GET_MODE_MMU_GCC_DEBUG_PARAMS_MESSAGE_ID (0x2BU) + +typedef struct NV83DE_CTRL_DEBUG_GET_MODE_MMU_GCC_DEBUG_PARAMS { + NvU32 value; +} NV83DE_CTRL_DEBUG_GET_MODE_MMU_GCC_DEBUG_PARAMS; + +#define NV83DE_CTRL_CMD_DEBUG_GET_MODE_MMU_GCC_DEBUG_ENABLED (0x00000001) +#define NV83DE_CTRL_CMD_DEBUG_GET_MODE_MMU_GCC_DEBUG_DISABLED (0x00000002) + +/* _ctrl83dedebug_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl906f.h b/src/common/sdk/nvidia/inc/ctrl/ctrl906f.h new file mode 100644 index 0000000..2e9c9b9 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl906f.h @@ -0,0 +1,222 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl906f.finn +// + + + + +/* GF100_GPFIFO control commands and parameters */ + +#include "ctrl/ctrlxxxx.h" +#define NV906F_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0x906F, NV906F_CTRL_##cat, idx) + +/* GF100_GPFIFO command categories (6bits) */ +#define NV906F_CTRL_RESERVED (0x00) +#define NV906F_CTRL_GPFIFO (0x01) + + +/* + * NV906F_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV906F_CTRL_CMD_NULL (0x906f0000) /* finn: Evaluated from "(FINN_GF100_CHANNEL_GPFIFO_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + + +/* + * NV906F_CTRL_GET_CLASS_ENGINEID + * + * Takes an object handle as input and returns + * the Class and Engine that this object uses. + * + * hObject + * Handle to an object created. For example a + * handle to object of type FERMI_A created by + * the client. This is supplied by the client + * of this call. + * + * classEngineID + * A concatenation of class and engineid + * that the object with handle hObject + * belongs to. This is returned by RM. The internal + * format of this data structure is opaque to clients. + * + * classID + * ClassID for object represented by hObject + * + * engineID + * EngineID for object represented by hObject + * + * Possible status values returned are: + * NV_OK + * If the call was successful. + * + * NV_ERR_INVALID_OBJECT_HANDLE + * No object of handle hObject was found. + */ +#define NV906F_CTRL_GET_CLASS_ENGINEID (0x906f0101) /* finn: Evaluated from "(FINN_GF100_CHANNEL_GPFIFO_GPFIFO_INTERFACE_ID << 8) | NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS_MESSAGE_ID" */ + +#define NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS { + NvHandle hObject; + NvU32 classEngineID; + NvU32 classID; + NvU32 engineID; +} NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS; + +/* + * NV906F_CTRL_RESET_CHANNEL + * + * This command resets the channel corresponding to specified engine and also + * resets the specified engine. + * + * Takes an engine ID as input. + * + * engineID + * This parameter specifies the engine to be reset. See the description of the + * NV2080_ENGINE_TYPE values in cl2080.h for more information. + * subdeviceInstance + * This parameter specifies the subdevice to be reset when in SLI. + * resetReason + * Specifies reason to reset a channel. + * + * bIsRcPending + * Specifies if an RC is pending on the channel + * + * Possible status values returned are: + * NV_OK + */ +#define NV906F_CTRL_CMD_RESET_CHANNEL_REASON_DEFAULT 0 +#define NV906F_CTRL_CMD_RESET_CHANNEL_REASON_VERIF 1 +#define NV906F_CTRL_CMD_RESET_CHANNEL_REASON_MMU_FLT 2 +#define NV906F_CTRL_CMD_RESET_CHANNEL_REASON_ENUM_MAX 3 +/* + * Internal values for NV906F_CTRL_CMD_RESET_REASON. External values will be + * checked and enforced to be < NV906F_CTRL_CMD_RESET_CHANNEL_REASON_ENUM_MAX + */ +#define NV906F_CTRL_CMD_INTERNAL_RESET_CHANNEL_REASON_FAKE_ERROR (0x4) /* finn: Evaluated from "NV906F_CTRL_CMD_RESET_CHANNEL_REASON_ENUM_MAX + 1" */ + + +#define NV906F_CTRL_CMD_RESET_CHANNEL (0x906f0102) /* finn: Evaluated from "((FINN_GF100_CHANNEL_GPFIFO_GPFIFO_INTERFACE_ID << 8) | NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS_MESSAGE_ID)" */ + +#define NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS { + NvU32 engineID; + NvU32 subdeviceInstance; + NvU32 resetReason; + NvBool bIsRcPending; +} NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS; + + + +/* + * NV906F_CTRL_CMD_GET_DEFER_RC_STATE + * + * If SM Debugger is attached then on a MMU fault, RM defers the RC error + * recovery and keeps a flag indicating that RC is deferred. This command + * checks whether or not deferred RC is pending in RM for the associated + * channel. + * + * bDeferRCPending + * The output are TRUE and FALSE. + * + * Possible status values returned are: + * NV_OK + */ + +#define NV906F_CTRL_CMD_GET_DEFER_RC_STATE (0x906f0105) /* finn: Evaluated from "(FINN_GF100_CHANNEL_GPFIFO_GPFIFO_INTERFACE_ID << 8) | NV906F_CTRL_CMD_GET_DEFER_RC_STATE_PARAMS_MESSAGE_ID" */ + +#define NV906F_CTRL_CMD_GET_DEFER_RC_STATE_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV906F_CTRL_CMD_GET_DEFER_RC_STATE_PARAMS { + NvBool bDeferRCPending; +} NV906F_CTRL_CMD_GET_DEFER_RC_STATE_PARAMS; + +#define NV906F_CTRL_CMD_GET_MMU_FAULT_INFO (0x906f0106) /* finn: Evaluated from "(FINN_GF100_CHANNEL_GPFIFO_GPFIFO_INTERFACE_ID << 8) | NV906F_CTRL_GET_MMU_FAULT_INFO_PARAMS_MESSAGE_ID" */ + +/* + * Shader types supported by MMU fault info + * The types before compute shader refer to NV9097_SET_PIPELINE_SHADER_TYPE + */ +#define NV906F_CTRL_MMU_FAULT_SHADER_TYPE_VERTEX_CULL_BEFORE_FETCH 0x00000000 +#define NV906F_CTRL_MMU_FAULT_SHADER_TYPE_VERTEX 0x00000001 +#define NV906F_CTRL_MMU_FAULT_SHADER_TYPE_TESSELLATION_INIT 0x00000002 +#define NV906F_CTRL_MMU_FAULT_SHADER_TYPE_TESSELLATION 0x00000003 +#define NV906F_CTRL_MMU_FAULT_SHADER_TYPE_GEOMETRY 0x00000004 +#define NV906F_CTRL_MMU_FAULT_SHADER_TYPE_PIXEL 0x00000005 +#define NV906F_CTRL_MMU_FAULT_SHADER_TYPE_COMPUTE 0x00000006 +#define NV906F_CTRL_MMU_FAULT_SHADER_TYPES 7 + +/* + * NV906F_CTRL_CMD_GET_MMU_FAULT_INFO + * + * This command returns MMU fault information for a given channel. The MMU + * fault information will be cleared once this command is executed. + * + * addrHi - [out] + * Upper 32 bits of faulting address + * addrLo [out] + * Lower 32 bits of faulting address + * faultType [out] + * MMU fault type. Please see NV_PFIFO_INTR_MMU_FAULT_INFO_TYPE_* in + * dev_fifo.h for details about MMU fault type. + * faultString [out] + * String indicating the MMU fault type + * shaderProgramVA [out] + * an array of shader program virtual addresses to indicate faulted shaders in the pipeline + * + * Possible status values returned are: + * NV_OK + */ +#define NV906F_CTRL_MMU_FAULT_STRING_LEN 32 +#define NV906F_CTRL_GET_MMU_FAULT_INFO_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV906F_CTRL_GET_MMU_FAULT_INFO_PARAMS { + NvU32 addrHi; + NvU32 addrLo; + NvU32 faultType; + char faultString[NV906F_CTRL_MMU_FAULT_STRING_LEN]; + NV_DECLARE_ALIGNED(NvU64 shaderProgramVA[NV906F_CTRL_MMU_FAULT_SHADER_TYPES], 8); +} NV906F_CTRL_GET_MMU_FAULT_INFO_PARAMS; + + +/* _ctrl906f.h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h b/src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h new file mode 100644 index 0000000..6113271 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h @@ -0,0 +1,174 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl90cd.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NV_EVENT_BUFFER control commands and parameters */ + +#define NV_EVENT_BUFFER_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x90CD, NV90CD_CTRL_##cat, idx) + +#define NV90CD_CTRL_RESERVED (0x00) +#define NV90CD_CTRL_EVENT (0x01) + +/* +* NV_EVENT_BUFFER_CTRL_CMD_NULL +* +* This command does nothing. +* This command does not take any parameters. +* +* Possible status values returned are: +* NV_OK +*/ +#define NV_EVENT_BUFFER_CTRL_CMD_NULL (0x90cd0000) /* finn: Evaluated from "(FINN_NV_EVENT_BUFFER_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/* +* NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS +* This interface enables all the events that are associated to the event buffer +*/ +#define NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS (0x90cd0101) /* finn: Evaluated from "(FINN_NV_EVENT_BUFFER_EVENT_INTERFACE_ID << 8) | 0x1" */ + +#define NV_EVENT_BUFFER_FLAG 0:32 + +/* +* NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY +* This flag defines the kernel behavior when the buffer is full +* +* DEFAULT/DISABLED: By default kernel doesn't assume any policy. To enable events +* an overflow policy has to be set to retain older or newer events +* +* KEEP_OLDEST: kernel would retain older events and drop newer events if the buffer is full +* +* KEEP_NEWEST: kernel would retain newer events and drop older events if the buffer is full +* +*/ +#define NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY 0:1 +#define NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_DISABLED 0 +#define NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_KEEP_OLDEST 1 +#define NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_KEEP_NEWEST 2 +#define NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_DEFAULT NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_DISABLED + +/* +* NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS +* +* enable [IN] +* This field is used to enable or disable events +* +* flags[IN] +* This field sets NV_EVENT_BUFFER_FLAG parameter used to configure event buffer overflow options +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT +*/ +typedef struct NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS { + NvBool enable; + NvU32 flags; +} NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS; + +/* +* NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET +* This interface allows the user to update get pointers. +* This call is useful in the KEEP_OLDEST policy to update free space available in the buffer. +* In keep oldest policy, kernel adds new entries in the buffer only if there is free space. +* The full/empty decision is made as follows: +* - when GET==PUT, the fifo is empty +* - when GET==PUT+1, the fifo is full +*/ +#define NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET (0x90cd0102) /* finn: Evaluated from "(FINN_NV_EVENT_BUFFER_EVENT_INTERFACE_ID << 8) | 0x2" */ + +/* +* NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS +* +* recordBufferGet [IN] +* Value to be used to update the get offset of record buffer +* +* varDataBufferGet[IN] +* This is the buffer offset up to which user has consumed the vardataBuffer +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT: if any of the get offsets is greater than respective bufferSize. +*/ +typedef struct NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS { + NvU32 recordBufferGet; + NvU32 varDataBufferGet; +} NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS; + +/* + * Send a test event-buffer notification (verification-only) + */ +#define NV_EVENT_BUFFER_CTRL_CMD_VERIF_NOTIFY (0x90cd0103) /* finn: Evaluated from "(FINN_NV_EVENT_BUFFER_EVENT_INTERFACE_ID << 8) | 0x3" */ + +/* + * Synchronous flush + */ +#define NV_EVENT_BUFFER_CTRL_CMD_FLUSH (0x90cd0104) /* finn: Evaluated from "(FINN_NV_EVENT_BUFFER_EVENT_INTERFACE_ID << 8) | 0x4" */ + +/* + * post event + */ +#define NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT (0x90cd0105) /* finn: Evaluated from "(FINN_NV_EVENT_BUFFER_EVENT_INTERFACE_ID << 8) | 0x5" */ + + /* + * NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS + * + * eventType [IN] + * the NvTelemetry event type. + * typeVersion [IN] + * the version of the event structure + * eventData [IN] + * an array of 256 bytes used to hold the event data. + * eventDataSz [IN] + * the amount of valid data in the eventData buffer. + * varData [IN] + * an array of 256 bytes used to hold the var data. + * varDataSz [IN] + * the amount of valid data in the varData buffer. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +typedef struct NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS { + NvU32 eventType; + NvU16 typeVersion; + NvU8 eventData[256]; + NvU16 eventDataSz; + NvU8 varData[256]; + NvU16 varDataSz; +} NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS; + +/* _ctr l90cd_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl90e7.h b/src/common/sdk/nvidia/inc/ctrl/ctrl90e7.h new file mode 100644 index 0000000..b9a8cf7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl90e7.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl90e7.finn +// + + + +#include "ctrl90e7/ctrl90e7bbx.h" + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl90e7/ctrl90e7base.h b/src/common/sdk/nvidia/inc/ctrl/ctrl90e7/ctrl90e7base.h new file mode 100644 index 0000000..a177cf1 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl90e7/ctrl90e7base.h @@ -0,0 +1,57 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl90e7/ctrl90e7base.finn +// + +#include "ctrl/ctrlxxxx.h" + +/* GF100_SUBDEVICE_INFOROM control commands and parameters */ + +#define NV90E7_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x90E7, NV90E7_CTRL_##cat, idx) + +/* Command categories (6 bits) */ +#define NV90E7_CTRL_RESERVED (0x00) +#define NV90E7_CTRL_BBX_LEGACY_PRIVILEGED (0xc1) /* finn: Evaluated from "(NV90E7_CTRL_BBX | NVxxxx_CTRL_LEGACY_PRIVILEGED)" */ +#define NV90E7_CTRL_BBX (0x01) +#define NV90E7_CTRL_RPR (0x02) +#define NV90E7_CTRL_PREDICTIVE (0x03) + +/* + * NV90E7_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ + +#define NV90E7_CTRL_CMD_NULL (0x90e70000) /* finn: Evaluated from "(FINN_GF100_SUBDEVICE_INFOROM_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl90e7/ctrl90e7bbx.h b/src/common/sdk/nvidia/inc/ctrl/ctrl90e7/ctrl90e7bbx.h new file mode 100644 index 0000000..5563a66 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl90e7/ctrl90e7bbx.h @@ -0,0 +1,92 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl90e7/ctrl90e7bbx.finn +// + +#include "nvfixedtypes.h" +#include "ctrl/ctrl90e7/ctrl90e7base.h" + + + +/* + * NV90E7_CTRL_CMD_BBX_GET_LAST_FLUSH_TIME + * + * This command is used to query the last BBX flush timestamp and duration. If BBX has not yet + * been flushed, the status returned is NV_ERR_NOT_READY. + * + * timestamp + * This parameter specifies the start timestamp of the last BBX flush. + * + * durationUs + * This parameter specifies the duration (us) of the last BBX flush. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_READY + * NV_ERR_NOT_SUPPORTED + */ +#define NV90E7_CTRL_CMD_BBX_GET_LAST_FLUSH_TIME (0x90e70113) /* finn: Evaluated from "(FINN_GF100_SUBDEVICE_INFOROM_BBX_INTERFACE_ID << 8) | NV90E7_CTRL_BBX_GET_LAST_FLUSH_TIME_PARAMS_MESSAGE_ID" */ + +#define NV90E7_CTRL_BBX_GET_LAST_FLUSH_TIME_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV90E7_CTRL_BBX_GET_LAST_FLUSH_TIME_PARAMS { + NV_DECLARE_ALIGNED(NvU64 timestamp, 8); + NvU32 durationUs; +} NV90E7_CTRL_BBX_GET_LAST_FLUSH_TIME_PARAMS; + + + +/* + * NV90E7_CTRL_CMD_BBX_IS_NVM_FLUSH_ENABLED + * + * This command is used to query whether BBX flushing to non-volatile memory is enabled. + * + * bIsEnabled + * Whether BBX flushing to non-volatile memory is enabled. + * bPeriodicFlush + * Whether BBX periodically flushes to non-volatile memory. + * periodicFlushIntervalSec + * The minimum interval (in seconds) between two consecutive periodic BBX flushes + * if periodic flush is enabled. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV90E7_CTRL_CMD_BBX_IS_NVM_FLUSH_ENABLED (0x90e70119) /* finn: Evaluated from "(FINN_GF100_SUBDEVICE_INFOROM_BBX_INTERFACE_ID << 8) | NV90E7_CTRL_BBX_IS_NVM_FLUSH_ENABLED_PARAMS_MESSAGE_ID" */ + +#define NV90E7_CTRL_BBX_IS_NVM_FLUSH_ENABLED_PARAMS_MESSAGE_ID (0x19U) + +typedef struct NV90E7_CTRL_BBX_IS_NVM_FLUSH_ENABLED_PARAMS { + NvBool bIsEnabled; + NvBool bPeriodicFlush; + NvU32 periodicFlushIntervalSec; +} NV90E7_CTRL_BBX_IS_NVM_FLUSH_ENABLED_PARAMS; + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h b/src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h new file mode 100644 index 0000000..1ee51f7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h @@ -0,0 +1,124 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl90ec.finn +// + +#include "ctrl/ctrlxxxx.h" +/* GK104 HDACODEC control commands and parameters */ + +#define NV90EC_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x90EC, NV90EC_CTRL_##cat, idx) + +/* NV04_DISPLAY_COMMON command categories (6bits) */ +#define NV90EC_CTRL_RESERVED (0x00) +#define NV90EC_CTRL_HDACODEC (0x01) + +/* + * NV90EC_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV90EC_CTRL_CMD_NULL (0x90ec0000) /* finn: Evaluated from "(FINN_GF100_HDACODEC_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/* + * NV90EC_CTRL_CMD_HDACODEC_SET_CP_READY_ENABLE + * + * This command sets the CP_READY bit. It basically informs RM whether + * the DD has worked upon the HDCP request requested by the Audio driver + * or not. DD asks RM to enable CP_READY bit (by setting CpReadyEnable to NV_TRUE) + * once it is done honouring/dishonouring the request. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the cp ready + * bit should be enabled. The display ID must a dfp display. + * If the displayId is not a dfp, this call will return + * NV_ERR_INVALID_ARGUMENT. + * CpReadyEnable + * This parameter specifies whether to enable (NV_TRUE) or not. If CpReady + * is enabled then AudioCodec can send more HDCP requests. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + * + */ +#define NV90EC_CTRL_CMD_HDACODEC_SET_CP_READY_ENABLE (0x90ec0101) /* finn: Evaluated from "(FINN_GF100_HDACODEC_HDACODEC_INTERFACE_ID << 8) | NV90EC_CTRL_CMD_HDACODEC_SET_CP_READY_ENABLE_PARAMS_MESSAGE_ID" */ + +#define NV90EC_CTRL_CMD_HDACODEC_SET_CP_READY_ENABLE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV90EC_CTRL_CMD_HDACODEC_SET_CP_READY_ENABLE_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bCpReadyEnable; +} NV90EC_CTRL_CMD_HDACODEC_SET_CP_READY_ENABLE_PARAMS; + +/* + * NV90EC_CTRL_CMD_HDACODEC_NOTIFY_AUDIO_EVENT + * + * This command notifies Audio of any events to audio + * like notification of PD bit being set. + * + * audioEvent + * This parameter specifies the event type. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + * + */ +#define NV90EC_CTRL_CMD_HDACODEC_NOTIFY_AUDIO_EVENT (0x90ec0102) /* finn: Evaluated from "(FINN_GF100_HDACODEC_HDACODEC_INTERFACE_ID << 8) | NV90EC_CTRL_HDACODEC_NOTIFY_AUDIO_EVENT_PARAMS_MESSAGE_ID" */ + +#define NV90EC_CTRL_HDACODEC_NOTIFY_AUDIO_EVENT_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV90EC_CTRL_HDACODEC_NOTIFY_AUDIO_EVENT_PARAMS { + NvU32 audioEvent; +} NV90EC_CTRL_HDACODEC_NOTIFY_AUDIO_EVENT_PARAMS; + +/* + * This command notifies audio driver that PD bit is set by DD, by writing to scratch register + */ +#define NV90EC_CTRL_HDACODEC_AUDIOEVENT_PD_BIT_SET (0x00000001) + +/* _ctrl90ec_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl90f1.h b/src/common/sdk/nvidia/inc/ctrl/ctrl90f1.h new file mode 100644 index 0000000..73555b5 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl90f1.h @@ -0,0 +1,407 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl90f1.finn +// + +#include "ctrl/ctrlxxxx.h" +#include "mmu_fmt_types.h" +#include "nvcfg_sdk.h" + +#define GMMU_FMT_MAX_LEVELS 6U + +/* Fermi+ GPU VASpace control commands and parameters */ +#define NV90F1_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x90F1, NV90F1_CTRL_##cat, idx) + +/* Command categories (6bits) */ +#define NV90F1_CTRL_RESERVED (0x00U) +#define NV90F1_CTRL_VASPACE (0x01U) + +/*! + * Does nothing. + */ +#define NV90F1_CTRL_CMD_NULL (0x90f10000U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/*! + * Get VAS GPU MMU format. + */ +#define NV90F1_CTRL_CMD_VASPACE_GET_GMMU_FORMAT (0x90f10101U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_GET_GMMU_FORMAT_PARAMS_MESSAGE_ID" */ + +#define NV90F1_CTRL_VASPACE_GET_GMMU_FORMAT_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV90F1_CTRL_VASPACE_GET_GMMU_FORMAT_PARAMS { + /*! + * [in] GPU sub-device handle - this API only supports unicast. + * Pass 0 to use subDeviceId instead. + */ + NvHandle hSubDevice; + + /*! + * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero. + */ + NvU32 subDeviceId; + + /*! + * [out] GMMU format struct. This is of RM-internal type "struct GMMU_FMT*" + * which can only be accessed by kernel builds since this is a kernel + * only API. + */ + NV_DECLARE_ALIGNED(NvP64 pFmt, 8); +} NV90F1_CTRL_VASPACE_GET_GMMU_FORMAT_PARAMS; + +/*! + * Get VAS page level information. + */ +#define NV90F1_CTRL_CMD_VASPACE_GET_PAGE_LEVEL_INFO (0x90f10102U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_GET_PAGE_LEVEL_INFO_PARAMS_MESSAGE_ID" */ + +typedef struct NV_CTRL_VASPACE_PAGE_LEVEL { + /*! + * Format of this level. + */ + NV_DECLARE_ALIGNED(struct MMU_FMT_LEVEL *pFmt, 8); + + /*! + * Level/Sublevel Formats flattened + */ + NV_DECLARE_ALIGNED(MMU_FMT_LEVEL levelFmt, 8); + NV_DECLARE_ALIGNED(MMU_FMT_LEVEL sublevelFmt[MMU_FMT_MAX_SUB_LEVELS], 8); + + /*! + * Physical address of this page level instance. + */ + NV_DECLARE_ALIGNED(NvU64 physAddress, 8); + + /*! + * Aperture in which this page level instance resides. + */ + NvU32 aperture; + + /*! + * Size in bytes allocated for this level instance. + */ + NV_DECLARE_ALIGNED(NvU64 size, 8); + + /*! + * Entry Index for this offset. + */ + NvU32 entryIndex; +} NV_CTRL_VASPACE_PAGE_LEVEL; + +#define NV90F1_CTRL_VASPACE_GET_PAGE_LEVEL_INFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV90F1_CTRL_VASPACE_GET_PAGE_LEVEL_INFO_PARAMS { + /*! + * [in] GPU sub-device handle - this API only supports unicast. + * Pass 0 to use subDeviceId instead. + */ + NvHandle hSubDevice; + + /*! + * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero. + */ + NvU32 subDeviceId; + + /*! + * [in] GPU virtual address to query. + */ + NV_DECLARE_ALIGNED(NvU64 virtAddress, 8); + + /*! + * [in] Page size to query. + */ + NV_DECLARE_ALIGNED(NvU64 pageSize, 8); + + /*! + * [in] Flags + * Contains flags to control various aspects of page level info. + */ + NV_DECLARE_ALIGNED(NvU64 flags, 8); + + /*! + * [out] Number of levels populated. + */ + NvU32 numLevels; + + /*! + * [out] Per-level information. + */ + NV_DECLARE_ALIGNED(NV_CTRL_VASPACE_PAGE_LEVEL levels[GMMU_FMT_MAX_LEVELS], 8); +} NV90F1_CTRL_VASPACE_GET_PAGE_LEVEL_INFO_PARAMS; + +/* valid flags parameter values */ +#define NV90F1_CTRL_VASPACE_GET_PAGE_LEVEL_INFO_FLAG_NONE 0x0ULL +#define NV90F1_CTRL_VASPACE_GET_PAGE_LEVEL_INFO_FLAG_BAR1 NVBIT64(0) + + +/*! + * Reserve (allocate and bind) page directory/table entries up to + * a given level of the MMU format. Also referred to as "lock-down". + * + * Each range that has been reserved must be released + * eventually with @ref NV90F1_CTRL_CMD_VASPACE_RELEASE_ENTRIES. + * A particular VA range and level (page size) combination may only be + * locked down once at a given time, but each level is independent. + */ +#define NV90F1_CTRL_CMD_VASPACE_RESERVE_ENTRIES (0x90f10103U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_RESERVE_ENTRIES_PARAMS_MESSAGE_ID" */ + +#define NV90F1_CTRL_VASPACE_RESERVE_ENTRIES_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV90F1_CTRL_VASPACE_RESERVE_ENTRIES_PARAMS { + /*! + * [in] GPU sub-device handle - this API only supports unicast. + * Pass 0 to use subDeviceId instead. + */ + NvHandle hSubDevice; + + /*! + * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero. + */ + NvU32 subDeviceId; + + /*! + * [in] Page size (VA coverage) of the level to reserve. + * This need not be a leaf (page table) page size - it can be + * the coverage of an arbitrary level (including root page directory). + */ + NV_DECLARE_ALIGNED(NvU64 pageSize, 8); + + /*! + * [in] First GPU virtual address of the range to reserve. + * This must be aligned to pageSize. + */ + NV_DECLARE_ALIGNED(NvU64 virtAddrLo, 8); + + /*! + * [in] Last GPU virtual address of the range to reserve. + * This (+1) must be aligned to pageSize. + */ + NV_DECLARE_ALIGNED(NvU64 virtAddrHi, 8); +} NV90F1_CTRL_VASPACE_RESERVE_ENTRIES_PARAMS; + +/*! + * Release (unbind and free) page directory/table entries up to + * a given level of the MMU format that has been reserved through a call to + * @ref NV90F1_CTRL_CMD_VASPACE_RESERVE_ENTRIES. Also referred to as "unlock". + */ +#define NV90F1_CTRL_CMD_VASPACE_RELEASE_ENTRIES (0x90f10104U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_RELEASE_ENTRIES_PARAMS_MESSAGE_ID" */ + +#define NV90F1_CTRL_VASPACE_RELEASE_ENTRIES_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV90F1_CTRL_VASPACE_RELEASE_ENTRIES_PARAMS { + /*! + * [in] GPU sub-device handle - this API only supports unicast. + * Pass 0 to use subDeviceId instead. + */ + NvHandle hSubDevice; + + /*! + * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero. + */ + NvU32 subDeviceId; + + /*! + * [in] Page size (VA coverage) of the level to release. + * This need not be a leaf (page table) page size - it can be + * the coverage of an arbitrary level (including root page directory). + */ + NV_DECLARE_ALIGNED(NvU64 pageSize, 8); + + /*! + * [in] First GPU virtual address of the range to release. + * This must be aligned to pageSize. + */ + NV_DECLARE_ALIGNED(NvU64 virtAddrLo, 8); + + /*! + * [in] Last GPU virtual address of the range to release. + * This (+1) must be aligned to pageSize. + */ + NV_DECLARE_ALIGNED(NvU64 virtAddrHi, 8); +} NV90F1_CTRL_VASPACE_RELEASE_ENTRIES_PARAMS; + +/*! + * Get VAS page level information without kernel priviledge. This will internally call + * NV90F1_CTRL_CMD_VASPACE_GET_PAGE_LEVEL_INFO. + */ +#define NV90F1_CTRL_CMD_VASPACE_GET_PAGE_LEVEL_INFO_VERIF (0x90f10105U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_GET_PAGE_LEVEL_INFO_VERIF_PARAMS_MESSAGE_ID" */ + +#define NV90F1_CTRL_VASPACE_GET_PAGE_LEVEL_INFO_VERIF_PARAMS_MESSAGE_ID (0x5U) + +typedef NV90F1_CTRL_VASPACE_GET_PAGE_LEVEL_INFO_PARAMS NV90F1_CTRL_VASPACE_GET_PAGE_LEVEL_INFO_VERIF_PARAMS; + +/*! + * Pin PDEs for a given VA range on the server RM and then mirror the client's page + * directory/tables in the server. + * + * @ref + */ +#define NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES (0x90f10106U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_MESSAGE_ID" */ + +#define NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS { + /*! + * [in] GPU sub-device handle - this API only supports unicast. + * Pass 0 to use subDeviceId instead. + */ + NvHandle hSubDevice; + + /*! + * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero. + */ + NvU32 subDeviceId; + + /*! + * [in] Page size (VA coverage) of the level to reserve. + * This need not be a leaf (page table) page size - it can be + * the coverage of an arbitrary level (including root page directory). + */ + NV_DECLARE_ALIGNED(NvU64 pageSize, 8); + + /*! + * [in] First GPU virtual address of the range to reserve. + * This must be aligned to pageSize. + */ + NV_DECLARE_ALIGNED(NvU64 virtAddrLo, 8); + + /*! + * [in] Last GPU virtual address of the range to reserve. + * This (+1) must be aligned to pageSize. + */ + NV_DECLARE_ALIGNED(NvU64 virtAddrHi, 8); + + /*! + * [in] Number of PDE levels to copy. + */ + NvU32 numLevelsToCopy; + + /*! + * [in] Per-level information. + */ + struct { + /*! + * Physical address of this page level instance. + */ + NV_DECLARE_ALIGNED(NvU64 physAddress, 8); + + /*! + * Size in bytes allocated for this level instance. + */ + NV_DECLARE_ALIGNED(NvU64 size, 8); + + /*! + * Aperture in which this page level instance resides. + */ + NvU32 aperture; + + /*! + * Page shift corresponding to the level + */ + NvU8 pageShift; + } levels[GMMU_FMT_MAX_LEVELS]; +} NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS; + +/*! + * Retrieve extra VA range that RM needs to reserve from the OS + */ +#define NV90F1_CTRL_CMD_VASPACE_GET_HOST_RM_MANAGED_SIZE (0x90f10107U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_GET_HOST_RM_MANAGED_SIZE_PARAMS_MESSAGE_ID" */ +#define NV90F1_CTRL_VASPACE_GET_HOST_RM_MANAGED_SIZE_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV90F1_CTRL_VASPACE_GET_HOST_RM_MANAGED_SIZE_PARAMS { + /*! + * [in] GPU sub-device handle - this API only supports unicast. + * Pass 0 to use subDeviceId instead. + */ + NvHandle hSubDevice; + + /*! + * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero. + */ + NvU32 subDeviceId; + + /*! + * [out] The required VA range, in Megabytes + */ + NV_DECLARE_ALIGNED(NvU64 requiredVaRange, 8); +} NV90F1_CTRL_VASPACE_GET_HOST_RM_MANAGED_SIZE_PARAMS; + +/*! + * Retrieve info on a VAS heap - used only for the MODS test RandomVATest + */ +#define NV90F1_CTRL_CMD_VASPACE_GET_VAS_HEAP_INFO (0x90f10108U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_GET_VAS_HEAP_INFO_PARAMS_MESSAGE_ID" */ +#define NV90F1_CTRL_VASPACE_GET_VAS_HEAP_INFO_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV90F1_CTRL_VASPACE_GET_VAS_HEAP_INFO_PARAMS { + /*! + * [in] GPU sub-device handle - this API only supports unicast. + * Pass 0 to use subDeviceId instead. + */ + NvHandle hSubDevice; + + /*! + * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero. + */ + NvU32 subDeviceId; + + /*! + * [out] Number of free bytes in the heap + */ + NV_DECLARE_ALIGNED(NvU64 bytesFree, 8); + + /*! + * [out] Number of bytes in the heap + */ + NV_DECLARE_ALIGNED(NvU64 bytesTotal, 8); + + /*! + * [out] Offset of largest free block + */ + NV_DECLARE_ALIGNED(NvU64 largestFreeOffset, 8); + + /*! + * [out] Size of the largest free block + */ + NV_DECLARE_ALIGNED(NvU64 largestFreeSize, 8); + + /*! + * [out] Number of usable free bytes + */ + NV_DECLARE_ALIGNED(NvU64 usableBytesFree, 8); + + /*! + * [out] Number of free blocks + */ + NvU32 numFreeBlocks; +} NV90F1_CTRL_VASPACE_GET_VAS_HEAP_INFO_PARAMS; + +/* _ctrl90f1_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrla06f.h b/src/common/sdk/nvidia/inc/ctrl/ctrla06f.h new file mode 100644 index 0000000..c3a0356 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrla06f.h @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrla06f.finn +// + + + +#include "ctrl/ctrlxxxx.h" +#include "ctrl/ctrla06f/ctrla06fbase.h" +#include "ctrl/ctrla06f/ctrla06fgpfifo.h" +#include "ctrl/ctrla06f/ctrla06finternal.h" diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fbase.h b/src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fbase.h new file mode 100644 index 0000000..521132e --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fbase.h @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrla06f/ctrla06fbase.finn +// + + + + +/* GK100_GPFIFO control commands and parameters */ + +#include "ctrl/ctrlxxxx.h" +#include "ctrl/ctrl906f.h" /* A06F is partially derived from 906F */ + +#define NVA06F_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0xA06F, NVA06F_CTRL_##cat, idx) + +/* GK100_GPFIFO command categories (6bits) */ +#define NVA06F_CTRL_RESERVED (0x00) +#define NVA06F_CTRL_GPFIFO (0x01) +#define NVA06F_CTRL_INTERNAL (0x03) + +/* + * NVA06F_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + * + */ +#define NVA06F_CTRL_CMD_NULL (0xa06f0000) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* _ctrla06fbase_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h b/src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h new file mode 100644 index 0000000..65b82d5 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h @@ -0,0 +1,261 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrla06f/ctrla06fgpfifo.finn +// + +#include "ctrl/ctrla06f/ctrla06fbase.h" + +/* + * NVA06F_CTRL_CMD_GPFIFO_SCHEDULE + * + * This command schedules a channel in hardware. This command should be called + * after objects have been allocated on the channel or a call to + * NVA06F_CTRL_CMD_BIND has been made. + * + * bEnable + * This parameter indicates whether or not the channel should be scheduled in hardware. + * When set, the channel will be enabled in addition to being added to the appropriate runlist. + * When not set, the channel will be disabled and removed from runlist. + * + * bSkipSubmit + * This parameter indicates whether this channel is not to be scheduled, even though it is enabled. + * When set, the channel will not be scheduled. + * When not set, the channel will be scheduled. + * + * bSkipEnable + * This parameter indicates whether this channel is not to be enabled, even though it is scheduled. + * When set, the channel will not be enabled. + * When not set, the channel will be enabled. + * + * When bEnable is set, both bSkipSubmit and bSkipEnable can't be set. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_OPERATION + * + */ +#define NVA06F_CTRL_CMD_GPFIFO_SCHEDULE (0xa06f0103) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_MESSAGE_ID" */ +#define NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS { + NvBool bEnable; + NvBool bSkipSubmit; + NvBool bSkipEnable; +} NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS; + +/* + * NVA06F_CTRL_CMD_BIND + * + * This command uses the given engine to configure the channel for scheduling. + * It alleviates the need to call NVA06F_CTRL_CMD_GPFIFO_SCHEDULE after objects + * have been allocated. However, it requires that the caller know which engine + * they want to be able to execute on the channel. Once this has been called + * only objects that can be allocated on the specified engine or other engines + * allowed to coexist on the channel will be allowed. See + * NV2080_CTRL_CMD_GPU_GET_ENGINE_PARTNERLIST to determine which engines can + * share a parent. + * + * engineType + * This parameter specifies an NV2080_ENGINE_TYPE value indicating the + * engine to which this channel should be bound. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NVA06F_CTRL_CMD_BIND (0xa06f0104) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_BIND_PARAMS_MESSAGE_ID" */ + +#define NVA06F_CTRL_BIND_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NVA06F_CTRL_BIND_PARAMS { + NvU32 engineType; +} NVA06F_CTRL_BIND_PARAMS; + +/* + * NVA06F_CTRL_CMD_SET_ERROR_NOTIFIER + * + * This command sets the channel error notifier of the target channel. + * bNotifyEachChannelInTSG + * When true, the error notifier will be set on every channel in + * the TSG that contains the channel. + * + * Possible status values returned are: + * NV_OK + */ +#define NVA06F_CTRL_CMD_SET_ERROR_NOTIFIER (0xa06f0108) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_SET_ERROR_NOTIFIER_PARAMS_MESSAGE_ID" */ + +#define NVA06F_CTRL_SET_ERROR_NOTIFIER_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NVA06F_CTRL_SET_ERROR_NOTIFIER_PARAMS { + NvBool bNotifyEachChannelInTSG; +} NVA06F_CTRL_SET_ERROR_NOTIFIER_PARAMS; + +/* + * NVA06F_CTRL_CMD_SET_INTERLEAVE_LEVEL + * + * Symmetric to NVA06C_CTRL_CMD_SET_INTERLEAVE_LEVEL, applied to the individual + * target channel. + * + * When belonging to a TSG, same interleave level will be set to every channel + * in the TSG. + * + * channelInterleaveLevel + * Input parameter. One of: + * - NVA06C_CTRL_INTERLEAVE_LEVEL_LOW + * - NVA06C_CTRL_INTERLEAVE_LEVEL_MEDIUM + * - NVA06C_CTRL_INTERLEAVE_LEVEL_HIGH + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + * NV_ERR_INSUFFICIENT_PERMISSIONS + */ +#define NVA06F_CTRL_CMD_SET_INTERLEAVE_LEVEL (0xa06f0109) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_SET_INTERLEAVE_LEVEL_PARAMS_MESSAGE_ID" */ + +typedef struct NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS { + NvU32 channelInterleaveLevel; +} NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS; + +#define NVA06F_CTRL_SET_INTERLEAVE_LEVEL_PARAMS_MESSAGE_ID (0x9U) + +typedef NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS NVA06F_CTRL_SET_INTERLEAVE_LEVEL_PARAMS; + +/* + * NVA06F_CTRL_CMD_GET_INTERLEAVE_LEVEL + * + * Returns the target channel's interleave level. + * + * channelInterleaveLevel + * Output parameter. One of: + * - NVA06C_CTRL_INTERLEAVE_LEVEL_LOW + * - NVA06C_CTRL_INTERLEAVE_LEVEL_MEDIUM + * - NVA06C_CTRL_INTERLEAVE_LEVEL_HIGH + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NVA06F_CTRL_CMD_GET_INTERLEAVE_LEVEL (0xa06f0110) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_GET_INTERLEAVE_LEVEL_PARAMS_MESSAGE_ID" */ + +#define NVA06F_CTRL_GET_INTERLEAVE_LEVEL_PARAMS_MESSAGE_ID (0x10U) + +typedef NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS NVA06F_CTRL_GET_INTERLEAVE_LEVEL_PARAMS; + +/* + * NVA06F_CTRL_CMD_RESTART_RUNLIST + * + * This command expires the current timeslice and restarts the runlist the given + * channel belongs to. This effectively preempts the current channel on the + * corresponding engine. + * + * This is useful for clients to trigger preemption manually and reduce start + * latency for higher priority channels as they are added first to the runlist + * if NV0080_CTRL_FIFO_RUNLIST_SCHED_POLICY_CHANNEL_INTERLEAVED policy is + * configured. + * + * This command interacts with the scheduler and may cause certain low priority + * channels to starve under certain circumstances. Therefore, it is only + * available to privileged clients. + * + * bForceRestart + * Input parameter. If NV_FALSE, the runlist restart will be skipped + * whenever the given channel (or its group) is already running on the + * corresponding engine. + * + * bBypassWait + * Input parameter. If NV_TRUE, the command will return immediately after + * issuing the hardware preemption request, without actually waiting for the + * context switch to complete. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NVA06F_CTRL_CMD_RESTART_RUNLIST (0xa06f0111) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_RESTART_RUNLIST_PARAMS_MESSAGE_ID" */ + +#define NVA06F_CTRL_RESTART_RUNLIST_PARAMS_MESSAGE_ID (0x11U) + +typedef struct NVA06F_CTRL_RESTART_RUNLIST_PARAMS { + NvBool bForceRestart; + NvBool bBypassWait; +} NVA06F_CTRL_RESTART_RUNLIST_PARAMS; + +/* + * NVA06F_CTRL_CMD_STOP_CHANNEL + * + * This command is used to stop the channel + * + * Stopping the channel here means disabling and unbinding the channel and removing it from runlist. + * So, if the channel needs to run again, it has to be scheduled, bound and enabled again. + * If we fail to preempt channel or remove it from runlist, then we RC the channel. + * Also set an error notifier to notify user space that channel is stopped. + * + * bImmediate + * Input parameter. If NV_FALSE, we will wait for default RM timeout + * for channel to idle. If NV_TRUE, we don't wait for channel to idle. + * If channel is not idle, we forcefully preempt it off the runlist. + * If the preempt times out, we will RC the channel. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NVA06F_CTRL_CMD_STOP_CHANNEL (0xa06f0112) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_STOP_CHANNEL_PARAMS_MESSAGE_ID" */ + +#define NVA06F_CTRL_STOP_CHANNEL_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NVA06F_CTRL_STOP_CHANNEL_PARAMS { + NvBool bImmediate; +} NVA06F_CTRL_STOP_CHANNEL_PARAMS; + +/* + * NVA06F_CTRL_CMD_GET_CONTEXT_ID + * + * This command returns the context ID of a given channel. + * + * Possible status values returned are: + * NV_OK + */ +#define NVA06F_CTRL_CMD_GET_CONTEXT_ID (0xa06f0113) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_GET_CONTEXT_ID_PARAMS_MESSAGE_ID" */ + +#define NVA06F_CTRL_GET_CONTEXT_ID_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NVA06F_CTRL_GET_CONTEXT_ID_PARAMS { + NvU32 contextId; +} NVA06F_CTRL_GET_CONTEXT_ID_PARAMS; + +/* _ctrla06fgpfifo_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06finternal.h b/src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06finternal.h new file mode 100644 index 0000000..354a750 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06finternal.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrla06f/ctrla06finternal.finn +// + +#include "ctrl/ctrla06f/ctrla06fbase.h" +#include "ctrl/ctrla06f/ctrla06fgpfifo.h" + +/* + * NVA06F_CTRL_CMD_INTERNAL_STOP_CHANNEL + * + * This command is an internal command sent from Kernel RM to Physical RM + * to stop the channel in hardware + * + * Please see description of NVA06F_CTRL_CMD_STOP_CHANNEL for more information. + * + */ +#define NVA06F_CTRL_CMD_INTERNAL_STOP_CHANNEL (0xa06f0301) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_INTERNAL_INTERFACE_ID << 8) | NVA06F_CTRL_INTERNAL_STOP_CHANNEL_PARAMS_MESSAGE_ID" */ + +#define NVA06F_CTRL_INTERNAL_STOP_CHANNEL_PARAMS_MESSAGE_ID (0x1U) + +typedef NVA06F_CTRL_STOP_CHANNEL_PARAMS NVA06F_CTRL_INTERNAL_STOP_CHANNEL_PARAMS; + +/* + * NVA06F_CTRL_CMD_INTERNAL_GPFIFO_SCHEDULE + * + * This command is an internal command sent from Kernel RM to Physical RM + * to schedule the channel in hardware + * + * Please see description of NVA06F_CTRL_CMD_GPFIFO_SCHEDULE for more information. + * + */ +#define NVA06F_CTRL_CMD_INTERNAL_GPFIFO_SCHEDULE (0xa06f0303) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_INTERNAL_INTERFACE_ID << 8) | NVA06F_CTRL_INTERNAL_GPFIFO_SCHEDULE_PARAMS_MESSAGE_ID" */ + +#define NVA06F_CTRL_INTERNAL_GPFIFO_SCHEDULE_PARAMS_MESSAGE_ID (0x3U) + +typedef NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS NVA06F_CTRL_INTERNAL_GPFIFO_SCHEDULE_PARAMS; + +/* ctrla06finternal_h */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrla081.h b/src/common/sdk/nvidia/inc/ctrl/ctrla081.h new file mode 100644 index 0000000..de48120 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrla081.h @@ -0,0 +1,1076 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrla081.finn +// + +#include "ctrl/ctrlxxxx.h" +#include "ctrl/ctrl2080/ctrl2080gpu.h" +#include "nv_vgpu_types.h" +#include "nvcfg_sdk.h" +/* NVA081_VGPU_CONFIG control commands and parameters */ + +#define NVA081_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0xA081, NVA081_CTRL_##cat, idx) + +/* Command categories (6bits) */ +#define NVA081_CTRL_RESERVED (0x00) +#define NVA081_CTRL_VGPU_CONFIG (0x01) + +#define NVA081_CTRL_VGPU_CONFIG_INVALID_TYPE 0x00 +#define NVA081_MAX_VGPU_TYPES_PER_PGPU 0x64 +#define NVA081_MAX_VGPU_PER_PGPU 48 +#define NVA081_MAX_VGPU_PER_PGPU_NON_MIG 32 +#define NVA081_MAX_VGPU_PER_GI 12 +#define NVA081_VM_UUID_SIZE 16 +#define NVA081_VGPU_STRING_BUFFER_SIZE 64 +#define NVA081_VGPU_SIGNATURE_SIZE 128 +#define NVA081_VM_NAME_SIZE 128 +#define NVA081_PCI_CONFIG_SPACE_SIZE 0x100 +#define NVA081_PGPU_METADATA_STRING_SIZE 256 +#define NVA081_EXTRA_PARAMETERS_SIZE 1024 +#define NVA081_PLACEMENT_ID_INVALID 0xFFFFU +#define NVA081_CONFIG_PARAMS_MAX_LENGTH 1024 + +#define NVA081_MAX_BAR_REGION_COUNT 4 +#define NVA081_MAX_SPARSE_REGION_COUNT 5 + +/* + * NVA081_CTRL_CMD_VGPU_CONFIG_SET_INFO + * + * This command sets the vGPU config information in RM + * + * Parameters: + * + * discardVgpuTypes [IN] + * This parameter specifies if existing vGPU configuration should be + * discarded for given pGPU + * + * vgpuInfo [IN] + * This parameter specifies virtual GPU type information + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NVA081_CTRL_CMD_VGPU_CONFIG_SET_INFO (0xa0810101) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_CONFIG_INFO_PARAMS_MESSAGE_ID" */ + +/* + * NVA081_CTRL_VGPU_CONFIG_INFO + * + * This structure represents the per vGPU information + * + */ +typedef struct NVA081_CTRL_VGPU_INFO { + // This structure should be in sync with NVA082_CTRL_CMD_HOST_VGPU_DEVICE_GET_VGPU_TYPE_INFO_PARAMS + NvU32 vgpuType; + NvU8 vgpuName[NVA081_VGPU_STRING_BUFFER_SIZE]; + NvU8 vgpuClass[NVA081_VGPU_STRING_BUFFER_SIZE]; + NvU8 vgpuSignature[NVA081_VGPU_SIGNATURE_SIZE]; + NvU8 license[NV_GRID_LICENSE_INFO_MAX_LENGTH]; + NvU32 maxInstance; + NvU32 numHeads; + NvU32 maxResolutionX; + NvU32 maxResolutionY; + NvU32 maxPixels; + NvU32 frlConfig; + NvU32 cudaEnabled; + NvU32 eccSupported; + NvU32 gpuInstanceSize; + NvU32 multiVgpuSupported; + NV_DECLARE_ALIGNED(NvU64 vdevId, 8); + NV_DECLARE_ALIGNED(NvU64 pdevId, 8); + NV_DECLARE_ALIGNED(NvU64 profileSize, 8); + NV_DECLARE_ALIGNED(NvU64 fbLength, 8); + NV_DECLARE_ALIGNED(NvU64 gspHeapSize, 8); + NV_DECLARE_ALIGNED(NvU64 fbReservation, 8); + NV_DECLARE_ALIGNED(NvU64 mappableVideoSize, 8); + NvU32 encoderCapacity; + NV_DECLARE_ALIGNED(NvU64 bar1Length, 8); + NvU32 frlEnable; + NvU8 adapterName[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + NvU16 adapterName_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + NvU8 shortGpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + NvU8 licensedProductName[NV_GRID_LICENSE_INFO_MAX_LENGTH]; + NvU32 vgpuExtraParams[NVA081_EXTRA_PARAMETERS_SIZE]; + NvU32 ftraceEnable; + NvU32 gpuDirectSupported; + NvU32 nvlinkP2PSupported; + NvU32 maxInstancePerGI; + NvU32 multiVgpuExclusive; + NvU32 exclusiveType; + NvU32 exclusiveSize; + // used only by NVML + NvU32 gpuInstanceProfileId; + NvU32 placementSize; + NvU32 homogeneousPlacementCount; + NvU32 homogeneousPlacementIds[NVA081_MAX_VGPU_PER_PGPU]; + NvU32 heterogeneousPlacementCount; + NvU32 heterogeneousPlacementIds[NVA081_MAX_VGPU_PER_PGPU]; +} NVA081_CTRL_VGPU_INFO; + +/* + * NVA081_CTRL_VGPU_CONFIG_INFO_PARAMS + * + * This structure represents the vGPU configuration information + * + */ +#define NVA081_CTRL_VGPU_CONFIG_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVA081_CTRL_VGPU_CONFIG_INFO_PARAMS { + NvBool discardVgpuTypes; + NV_DECLARE_ALIGNED(NVA081_CTRL_VGPU_INFO vgpuInfo, 8); + NvU32 vgpuConfigState; +} NVA081_CTRL_VGPU_CONFIG_INFO_PARAMS; + +/* VGPU Config state values */ +#define NVA081_CTRL_VGPU_CONFIG_STATE_UNINITIALIZED 0 +#define NVA081_CTRL_VGPU_CONFIG_STATE_IN_PROGRESS 1 +#define NVA081_CTRL_VGPU_CONFIG_STATE_READY 2 + +/* + * NVA081_CTRL_VGPU_CONFIG_ENUMERATE_VGPU_PER_PGPU + * + * This command enumerates list of vGPU guest instances per pGpu + * + * Parameters: + * + * vgpuType [OUT] + * This parameter specifies the virtual GPU type for this physical GPU + * + * numVgpu [OUT] + * This parameter specifies the number of virtual GPUs created on this physical GPU + * + * guestInstanceInfo [OUT] + * This parameter specifies an array containing guest instance's information for + * all instances created on this physical GPU + * + * guestVgpuInfo [OUT] + * This parameter specifies an array containing guest vgpu's information for + * all vGPUs created on this physical GPU + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NVA081_CTRL_CMD_VGPU_CONFIG_ENUMERATE_VGPU_PER_PGPU (0xa0810102) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_CONFIG_ENUMERATE_VGPU_PER_PGPU_PARAMS_MESSAGE_ID" */ + +/* + * NVA081_GUEST_VM_INFO + * + * This structure represents vGPU guest's (VM's) information + * + * vmPid [OUT] + * This param specifies the vGPU plugin process ID + * vmIdType [OUT] + * This param specifies the VM ID type, i.e. DOMAIN_ID or UUID + * guestOs [OUT] + * This param specifies the vGPU guest OS type + * migrationProhibited [OUT] + * This flag indicates whether migration is prohibited for VM or not + * guestNegotiatedVgpuVersion [OUT] + * This param specifies the vGPU version of guest driver after negotiation + * frameRateLimit [OUT] + * This param specifies the current value of FRL set for guest + * licensed [OUT] + * This param specifies whether the VM is Unlicensed/Licensed + * licenseState [OUT] + * This param specifies the current state of the GRID license state machine + * licenseExpiryTimestamp [OUT] + * License expiry time in seconds since UNIX epoch + * licenseExpiryStatus [OUT] + * License expiry status + * guestDriverVersion [OUT] + * This param specifies the driver version of the driver installed on the VM + * guestDriverBranch [OUT] + * This param specifies the driver branch of the driver installed on the VM + * vmName [OUT] + * This param stores the name assigned to VM (KVM only) + * guestVmInfoState [OUT] + * This param stores the current state of guest dependent fields + * + */ +typedef struct NVA081_GUEST_VM_INFO { + NvU32 vmPid; + VM_ID_TYPE vmIdType; + NvU32 guestOs; + NvU32 migrationProhibited; + NvU32 guestNegotiatedVgpuVersion; + NvU32 frameRateLimit; + NvBool licensed; + NvU32 licenseState; + NvU32 licenseExpiryTimestamp; + NvU8 licenseExpiryStatus; + NV_DECLARE_ALIGNED(VM_ID guestVmId, 8); + NvU8 guestDriverVersion[NVA081_VGPU_STRING_BUFFER_SIZE]; + NvU8 guestDriverBranch[NVA081_VGPU_STRING_BUFFER_SIZE]; + NvU8 vmName[NVA081_VM_NAME_SIZE]; + GUEST_VM_INFO_STATE guestVmInfoState; +} NVA081_GUEST_VM_INFO; + +/* + * NVA081_GUEST_VGPU_DEVICE + * + * This structure represents host vgpu device's (assigned to VM) information + * + * eccState [OUT] + * This parameter specifies the ECC state of the virtual GPU. + * One of NVA081_CTRL_ECC_STATE_xxx values. + * bDriverLoaded [OUT] + * This parameter specifies whether driver is loaded on this particular vGPU. + * swizzId [OUT] + * This param specifies the GPU Instance ID or Swizz ID + * placementId [OUT] + * This param specifies the placement ID of heterogeneous timesliced vGPU instance. + * Otherwise it is NVA081_PLACEMENT_ID_INVALID. + * vgpuDevName [OUT] + * This param specifies the VF BDF of the virtual GPU. + * + */ +typedef struct NVA081_HOST_VGPU_DEVICE { + NvU32 vgpuType; + NvU32 vgpuDeviceInstanceId; + NV_DECLARE_ALIGNED(NvU64 vgpuPciId, 8); + NvU8 vgpuUuid[VM_UUID_SIZE]; + NvU8 vgpuDevName[VM_UUID_SIZE]; + NvU32 encoderCapacity; + NV_DECLARE_ALIGNED(NvU64 fbUsed, 8); + NvU32 eccState; + NvBool bDriverLoaded; + NvU32 swizzId; + NvU32 placementId; + NvU32 accountingPid; +} NVA081_HOST_VGPU_DEVICE; + +/* ECC state values */ +#define NVA081_CTRL_ECC_STATE_UNKNOWN 0 +#define NVA081_CTRL_ECC_STATE_NOT_SUPPORTED 1 +#define NVA081_CTRL_ECC_STATE_DISABLED 2 +#define NVA081_CTRL_ECC_STATE_ENABLED 3 + +/* + * NVA081_VGPU_GUEST + * + * This structure represents a vGPU guest + * + */ +typedef struct NVA081_VGPU_GUEST { + NV_DECLARE_ALIGNED(NVA081_GUEST_VM_INFO guestVmInfo, 8); + NV_DECLARE_ALIGNED(NVA081_HOST_VGPU_DEVICE vgpuDevice, 8); +} NVA081_VGPU_GUEST; + +/* + * NVA081_CTRL_VGPU_CONFIG_ENUMERATE_VGPU_PER_PGPU_PARAMS + * + * This structure represents the information of vGPU guest instances per pGpu + * + */ +#define NVA081_CTRL_VGPU_CONFIG_ENUMERATE_VGPU_PER_PGPU_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NVA081_CTRL_VGPU_CONFIG_ENUMERATE_VGPU_PER_PGPU_PARAMS { + NvU32 vgpuType; + NvU32 numVgpu; + NV_DECLARE_ALIGNED(NVA081_VGPU_GUEST vgpuGuest[NVA081_MAX_VGPU_PER_PGPU], 8); +} NVA081_CTRL_VGPU_CONFIG_ENUMERATE_VGPU_PER_PGPU_PARAMS; + +/* + * NVA081_CTRL_CMD_VGPU_CONFIG_GET_VGPU_TYPE_INFO + * + * This command fetches vGPU type info from RM. + * + * Parameters: + * + * vgpuType [IN] + * This parameter specifies the virtual GPU type for which vGPU info should be returned. + * + * vgpuTypeInfo [OUT] + * This parameter returns NVA081_CTRL_VGPU_INFO data for the vGPU type specified by vgpuType. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + */ + +#define NVA081_CTRL_CMD_VGPU_CONFIG_GET_VGPU_TYPE_INFO (0xa0810103) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_CONFIG_GET_VGPU_TYPE_INFO_PARAMS_MESSAGE_ID" */ + +/* + * NVA081_CTRL_VGPU_CONFIG_GET_VGPU_TYPE_INFO_PARAMS + * + */ +#define NVA081_CTRL_VGPU_CONFIG_GET_VGPU_TYPE_INFO_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NVA081_CTRL_VGPU_CONFIG_GET_VGPU_TYPE_INFO_PARAMS { + NvU32 vgpuType; + NV_DECLARE_ALIGNED(NVA081_CTRL_VGPU_INFO vgpuTypeInfo, 8); +} NVA081_CTRL_VGPU_CONFIG_GET_VGPU_TYPE_INFO_PARAMS; + +/* + * NVA081_CTRL_VGPU_CONFIG_GET_VGPU_TYPES_PARAMS + * This structure represents supported/creatable vGPU types on a pGPU + */ +typedef struct NVA081_CTRL_VGPU_CONFIG_GET_VGPU_TYPES_PARAMS { + /* + * [IN] GPU Instance ID or Swizz ID + */ + NvU32 gpuInstanceId; + + /* + * [OUT] vGPU config state on a pGPU + */ + NvU32 vgpuConfigState; + + /* + * [OUT] Count of supported/creatable vGPU types on a pGPU + */ + NvU32 numVgpuTypes; + + /* + * [OUT] - Array of vGPU type ids supported/creatable on a pGPU + */ + NvU32 vgpuTypes[NVA081_MAX_VGPU_TYPES_PER_PGPU]; +} NVA081_CTRL_VGPU_CONFIG_GET_VGPU_TYPES_PARAMS; + + +/* + * NVA081_CTRL_CMD_VGPU_CONFIG_GET_SUPPORTED_VGPU_TYPES + * + * This command fetches count and list of vGPU types supported on a pGpu from RM + * + * Parameters: + * + * numVgpuTypes [OUT] + * This parameter returns the number of vGPU types supported on this pGPU + * + * vgpuTypes [OUT] + * This parameter returns list of supported vGPUs types on this pGPU + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_NOT_SUPPORTED + */ + +#define NVA081_CTRL_CMD_VGPU_CONFIG_GET_SUPPORTED_VGPU_TYPES (0xa0810104) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_CONFIG_GET_SUPPORTED_VGPU_TYPES_PARAMS_MESSAGE_ID" */ + +#define NVA081_CTRL_VGPU_CONFIG_GET_SUPPORTED_VGPU_TYPES_PARAMS_MESSAGE_ID (0x4U) + +typedef NVA081_CTRL_VGPU_CONFIG_GET_VGPU_TYPES_PARAMS NVA081_CTRL_VGPU_CONFIG_GET_SUPPORTED_VGPU_TYPES_PARAMS; + +/* + * NVA081_CTRL_CMD_VGPU_CONFIG_GET_CREATABLE_VGPU_TYPES + * + * This command fetches count and list of vGPU types creatable on a pGpu from RM + * + * Parameters: + * + * numVgpuTypes [OUT] + * This parameter returns the number of vGPU types creatable on this pGPU + * + * vgpuTypes [OUT] + * This parameter returns list of creatable vGPUs types on this pGPU + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_NOT_SUPPORTED + */ + +#define NVA081_CTRL_CMD_VGPU_CONFIG_GET_CREATABLE_VGPU_TYPES (0xa0810105) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_CONFIG_GET_CREATABLE_VGPU_TYPES_PARAMS_MESSAGE_ID" */ + +#define NVA081_CTRL_VGPU_CONFIG_GET_CREATABLE_VGPU_TYPES_PARAMS_MESSAGE_ID (0x5U) + +typedef NVA081_CTRL_VGPU_CONFIG_GET_VGPU_TYPES_PARAMS NVA081_CTRL_VGPU_CONFIG_GET_CREATABLE_VGPU_TYPES_PARAMS; + +/* + * NVA081_CTRL_CMD_VGPU_CONFIG_EVENT_SET_NOTIFICATION + * + * This command sets event notification state for the associated subdevice/pGPU. + * This command requires that an instance of NV01_EVENT has been previously + * bound to the associated subdevice object. + * + * event + * This parameter specifies the type of event to which the specified + * action is to be applied. This parameter must specify a valid + * NVA081_NOTIFIERS value (see cla081.h for more details) and should + * not exceed one less NVA081_NOTIFIERS_MAXCOUNT. + * action + * This parameter specifies the desired event notification action. + * Valid notification actions include: + * NVA081_CTRL_SET_EVENT_NOTIFICATION_DISABLE + * This action disables event notification for the specified + * event for the associated subdevice object. + * NVA081_CTRL_SET_EVENT_NOTIFICATION_SINGLE + * This action enables single-shot event notification for the + * specified event for the associated subdevice object. + * NVA081_CTRL_SET_EVENT_NOTIFICATION_REPEAT + * This action enables repeated event notification for the specified + * event for the associated system controller object. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NVA081_CTRL_CMD_VGPU_CONFIG_EVENT_SET_NOTIFICATION (0xa0810106) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_CONFIG_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */ + +#define NVA081_CTRL_VGPU_CONFIG_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NVA081_CTRL_VGPU_CONFIG_EVENT_SET_NOTIFICATION_PARAMS { + NvU32 event; + NvU32 action; +} NVA081_CTRL_VGPU_CONFIG_EVENT_SET_NOTIFICATION_PARAMS; + + +/* valid event action values */ +#define NVA081_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE (0x00000000) +#define NVA081_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE (0x00000001) +#define NVA081_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002) + +/* + * NVA081_CTRL_CMD_VGPU_CONFIG_UPDATE_PGPU_INFO + * + * This command register the GPU to Linux kernel's mdev module for vGPU on KVM. + */ +#define NVA081_CTRL_CMD_VGPU_CONFIG_UPDATE_PGPU_INFO (0xa0810109) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | 0x9" */ + +/* + * NVA081_CTRL_CMD_VGPU_CONFIG_SET_VGPU_INSTANCE_ENCODER_CAPACITY + * + * This command is used to set vGPU instance's (represented by vgpuUuid) encoder Capacity. + * + * vgpuUuid + * This parameter specifies the uuid of vGPU assigned to VM. + * encoderCapacity + * Encoder capacity value from 0 to 100. Value of 0x00 indicates encoder performance + * may be minimal for this GPU and software should fall back to CPU-based encode. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NVA081_CTRL_CMD_VGPU_CONFIG_SET_VGPU_INSTANCE_ENCODER_CAPACITY (0xa0810110) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_CONFIG_VGPU_INSTANCE_ENCODER_CAPACITY_PARAMS_MESSAGE_ID" */ + +/* + * NVA081_CTRL_VGPU_CONFIG_VGPU_INSTANCE_ENCODER_CAPACITY_PARAMS + * + * This structure represents encoder capacity for vgpu instance. + */ +#define NVA081_CTRL_VGPU_CONFIG_VGPU_INSTANCE_ENCODER_CAPACITY_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NVA081_CTRL_VGPU_CONFIG_VGPU_INSTANCE_ENCODER_CAPACITY_PARAMS { + NvU8 vgpuUuid[VM_UUID_SIZE]; + NvU32 encoderCapacity; +} NVA081_CTRL_VGPU_CONFIG_VGPU_INSTANCE_ENCODER_CAPACITY_PARAMS; + +/* + * NVA081_CTRL_CMD_VGPU_CONFIG_GET_VGPU_FB_USAGE + * + * This command is used to get the FB usage of all vGPU instances running on a GPU. + * + * vgpuCount + * This field specifies the number of vGPU devices for which FB usage is returned. + * vgpuFbUsage + * This is an array of type NVA081_VGPU_FB_USAGE, which contains a list of vGPUs + * and their corresponding FB usage in bytes; + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NVA081_CTRL_CMD_VGPU_CONFIG_GET_VGPU_FB_USAGE (0xa0810111) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_CONFIG_GET_VGPU_FB_USAGE_PARAMS_MESSAGE_ID" */ + +typedef struct NVA081_VGPU_FB_USAGE { + NvU8 vgpuUuid[VM_UUID_SIZE]; + NV_DECLARE_ALIGNED(NvU64 fbUsed, 8); +} NVA081_VGPU_FB_USAGE; + +/* + * NVA081_CTRL_VGPU_CONFIG_GET_VGPU_FB_USAGE_PARAMS + * + * This structure represents the FB usage information of vGPU instances running on a GPU. + */ +#define NVA081_CTRL_VGPU_CONFIG_GET_VGPU_FB_USAGE_PARAMS_MESSAGE_ID (0x11U) + +typedef struct NVA081_CTRL_VGPU_CONFIG_GET_VGPU_FB_USAGE_PARAMS { + NvU32 vgpuCount; + NV_DECLARE_ALIGNED(NVA081_VGPU_FB_USAGE vgpuFbUsage[NVA081_MAX_VGPU_PER_PGPU], 8); +} NVA081_CTRL_VGPU_CONFIG_GET_VGPU_FB_USAGE_PARAMS; + +/* + * NVA081_CTRL_CMD_VGPU_CONFIG_GET_MIGRATION_CAP + * + * This command is used to query whether pGPU is live migration capable or not. + * + * bMigrationCap + * Set to NV_TRUE if pGPU is migration capable. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_REQUEST + */ +#define NVA081_CTRL_CMD_VGPU_CONFIG_GET_MIGRATION_CAP (0xa0810112) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_CMD_VGPU_CONFIG_GET_MIGRATION_CAP_PARAMS_MESSAGE_ID" */ + +/* + * NVA081_CTRL_CMD_VGPU_CONFIG_GET_MIGRATION_CAP_PARAMS + */ +#define NVA081_CTRL_CMD_VGPU_CONFIG_GET_MIGRATION_CAP_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NVA081_CTRL_CMD_VGPU_CONFIG_GET_MIGRATION_CAP_PARAMS { + NvBool bMigrationCap; +} NVA081_CTRL_CMD_VGPU_CONFIG_GET_MIGRATION_CAP_PARAMS; + +/* + * NVA081_CTRL_CMD_VGPU_CONFIG_GET_HOST_FB_RESERVATION + * + * This command is used to get the host FB requirements + * + * hostReservedFb [OUT] + * Amount of FB reserved for the host + * eccAndPrReservedFb [OUT] + * Amount of FB reserved for the ecc and page retirement + * totalReservedFb [OUT] + * Total FB reservation + * vgpuTypeId [IN] + * The Type ID for VGPU profile + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NVA081_CTRL_CMD_VGPU_CONFIG_GET_HOST_FB_RESERVATION (0xa0810113) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_CONFIG_GET_HOST_FB_RESERVATION_PARAMS_MESSAGE_ID" */ + +#define NVA081_CTRL_VGPU_CONFIG_GET_HOST_FB_RESERVATION_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NVA081_CTRL_VGPU_CONFIG_GET_HOST_FB_RESERVATION_PARAMS { + NV_DECLARE_ALIGNED(NvU64 hostReservedFb, 8); + NV_DECLARE_ALIGNED(NvU64 eccAndPrReservedFb, 8); + NV_DECLARE_ALIGNED(NvU64 totalReservedFb, 8); + NvU32 vgpuTypeId; +} NVA081_CTRL_VGPU_CONFIG_GET_HOST_FB_RESERVATION_PARAMS; + +/* + * NVA081_CTRL_CMD_VGPU_CONFIG_GET_PGPU_METADATA_STRING + * + * This command is used to get the pGpu metadata string. + * + * pGpuString + * String holding pGpu Metadata + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_REQUEST + * NV_ERR_INVALID_STATE + * NV_ERR_INSUFFICIENT_RESOURCES + */ +#define NVA081_CTRL_CMD_VGPU_CONFIG_GET_PGPU_METADATA_STRING (0xa0810114) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_CONFIG_GET_PGPU_METADATA_STRING_PARAMS_MESSAGE_ID" */ + +#define NVA081_CTRL_VGPU_CONFIG_GET_PGPU_METADATA_STRING_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NVA081_CTRL_VGPU_CONFIG_GET_PGPU_METADATA_STRING_PARAMS { + NvU8 pGpuString[NVA081_PGPU_METADATA_STRING_SIZE]; +} NVA081_CTRL_VGPU_CONFIG_GET_PGPU_METADATA_STRING_PARAMS; + +#define NVA081_CTRL_CMD_VGPU_CONFIG_GET_DOORBELL_EMULATION_SUPPORT (0xa0810115) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_CONFIG_GET_DOORBELL_EMULATION_SUPPORT_PARAMS_MESSAGE_ID" */ + +#define NVA081_CTRL_VGPU_CONFIG_GET_DOORBELL_EMULATION_SUPPORT_PARAMS_MESSAGE_ID (0x15U) + +typedef struct NVA081_CTRL_VGPU_CONFIG_GET_DOORBELL_EMULATION_SUPPORT_PARAMS { + NvBool doorbellEmulationEnabled; +} NVA081_CTRL_VGPU_CONFIG_GET_DOORBELL_EMULATION_SUPPORT_PARAMS; + +/* + * NVA081_CTRL_CMD_VGPU_CONFIG_GET_FREE_SWIZZID + * + * This command is used to get free swizzid from RM + * + * gpuPciId [IN] + * This param specifies the PCI device ID of VF on which VM is running + * + * vgpuTypeId [IN] + * This param specifies the Type ID for VGPU profile + * + * swizzId [OUT] + * This param specifies the GPU Instance ID or Swizz ID + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_REQUEST + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + */ +#define NVA081_CTRL_CMD_VGPU_CONFIG_GET_FREE_SWIZZID (0xa0810116) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_CONFIG_GET_FREE_SWIZZID_PARAMS_MESSAGE_ID" */ + +#define NVA081_CTRL_VGPU_CONFIG_GET_FREE_SWIZZID_PARAMS_MESSAGE_ID (0x16U) + +typedef struct NVA081_CTRL_VGPU_CONFIG_GET_FREE_SWIZZID_PARAMS { + NvU32 gpuPciId; + NvU32 vgpuTypeId; + NvU32 swizzId; +} NVA081_CTRL_VGPU_CONFIG_GET_FREE_SWIZZID_PARAMS; + +/* + * NVA081_CTRL_CMD_PGPU_GET_MULTI_VGPU_SUPPORT_INFO + * + * This command is used to get multi vGPU related info for the physical GPU. + * + * fractionalmultiVgpuSupported [OUT] + * This param specifies whether fractional multi-vGPU is supported + * + * heterogeneousTimesliceProfilesSupported [OUT] + * This param specifies whether concurrent execution of timesliced vGPU profiles of differing types is supported + * + * heterogeneousTimesliceSizesSupported [OUT] + * This param specifies whether concurrent execution of timesliced vGPU profiles of differing framebuffer sizes is supported + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_REQUEST + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + */ + +#define NVA081_CTRL_CMD_PGPU_GET_MULTI_VGPU_SUPPORT_INFO (0xa0810117) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_PGPU_GET_MULTI_VGPU_SUPPORT_INFO_PARAMS_MESSAGE_ID" */ + +#define NVA081_CTRL_PGPU_GET_MULTI_VGPU_SUPPORT_INFO_PARAMS_MESSAGE_ID (0x17U) + +typedef struct NVA081_CTRL_PGPU_GET_MULTI_VGPU_SUPPORT_INFO_PARAMS { + NvU32 fractionalmultiVgpuSupported; + NvU32 heterogeneousTimesliceProfilesSupported; + NvU32 heterogeneousTimesliceSizesSupported; +} NVA081_CTRL_PGPU_GET_MULTI_VGPU_SUPPORT_INFO_PARAMS; + +/* + * NVA081_CTRL_CMD_GET_VGPU_DRIVER_CAPS + * + * This command is used to get vGPU driver capabilities. + * + * heterogeneousMultiVgpuSupported [OUT] + * This param specifies whether heterogeneous multi-vGPU is supported + * warmUpdateSupported [OUT] + * This param specifies FSR / warm driver update operation is supported + * ie. supports FSR and warm update of vGPU host driver without terminating the running guest VM + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_REQUEST + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + */ + +#define NVA081_CTRL_CMD_GET_VGPU_DRIVER_CAPS (0xa0810118) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_GET_VGPU_DRIVER_CAPS_PARAMS_MESSAGE_ID" */ + +#define NVA081_CTRL_GET_VGPU_DRIVER_CAPS_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NVA081_CTRL_GET_VGPU_DRIVER_CAPS_PARAMS { + NvU32 heterogeneousMultiVgpuSupported; + NvU32 warmUpdateSupported; +} NVA081_CTRL_GET_VGPU_DRIVER_CAPS_PARAMS; + +/* + * NVA081_CTRL_CMD_VGPU_CONFIG_SET_PGPU_INFO + * + * This command is to set pgpu info + * + * fractionalMultiVgpu [IN] + * This param specifies the fractional multivgpu is enabled or disabled on GPU + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + */ + +#define NVA081_CTRL_CMD_VGPU_CONFIG_SET_PGPU_INFO (0xa0810119) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_CONFIG_SET_PGPU_INFO_PARAMS_MESSAGE_ID" */ + +#define NVA081_CTRL_VGPU_CONFIG_SET_PGPU_INFO_PARAMS_MESSAGE_ID (0x19U) + +typedef struct NVA081_CTRL_VGPU_CONFIG_SET_PGPU_INFO_PARAMS { + NvU32 fractionalMultiVgpu; +} NVA081_CTRL_VGPU_CONFIG_SET_PGPU_INFO_PARAMS; + +/* + * NVA081_CTRL_CMD_VGPU_CONFIG_VALIDATE_SWIZZID + * + * This command is used to vallidate input swizzid from RM + * + * vgpuTypeId [IN] + * This param specifies the Type ID for VGPU profile + * + * swizzId [IN] + * This param specifies the GPU Instance ID or Swizz ID + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_REQUEST + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + */ +#define NVA081_CTRL_CMD_VGPU_CONFIG_VALIDATE_SWIZZID (0xa081011a) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_CONFIG_VALIDATE_SWIZZID_PARAMS_MESSAGE_ID" */ + +#define NVA081_CTRL_VGPU_CONFIG_VALIDATE_SWIZZID_PARAMS_MESSAGE_ID (0x1aU) + +typedef struct NVA081_CTRL_VGPU_CONFIG_VALIDATE_SWIZZID_PARAMS { + NvU32 vgpuTypeId; + NvU32 swizzId; +} NVA081_CTRL_VGPU_CONFIG_VALIDATE_SWIZZID_PARAMS; + +/* + * NVA081_CTRL_CMD_VGPU_CONFIG_UPDATE_PLACEMENT_INFO + * + * This command is used to get a placement Id from RM for timesliced + * heterogeneous or homogeneous vGPUs. The physical GPU shall be used + * in either heterogeneous or homogeneous mode at a time. + * + * isHeterogeneousEnabled [OUT] + * This param specific whether timesliced heterogeneous vGPU is enabled + * for different FB sized profiles. + * + * placementId [IN / OUT] + * This param specifies the input placement ID provided by hypervisor + * or output placement ID reserved by RM for vGPU type ID. + * + * vgpuTypeId [IN] + * This param specifies the Type ID for VGPU profile + * + * vgpuDevName [IN] + * This param specifies the VF BDF of the virtual GPU. + * + * guestFbLength [OUT] + * This param specifies the FB size assigned to the VM. + * + * guestFbOffset [OUT] + * This param specifies the starting FB offset assigned to the VM. + * + * gspHeapOffset [OUT] + * This param specifies the heap offset of gsp vgpu task. + * + * guestBar1PFOffset [OUT] + * This param specifies the starting PF BAR1 offset assigned to the VM. + * Only applicable for the legacy GPUs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_REQUEST + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + */ +#define NVA081_CTRL_CMD_VGPU_CONFIG_UPDATE_PLACEMENT_INFO (0xa081011b) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_CONFIG_UPDATE_PLACEMENT_INFO_PARAMS_MESSAGE_ID" */ + +#define NVA081_CTRL_VGPU_CONFIG_UPDATE_PLACEMENT_INFO_PARAMS_MESSAGE_ID (0x1bU) + +typedef struct NVA081_CTRL_VGPU_CONFIG_UPDATE_PLACEMENT_INFO_PARAMS { + NvBool isHeterogeneousEnabled; + NvU16 placementId; + NvU32 vgpuTypeId; + NvU8 vgpuDevName[VM_UUID_SIZE]; + NV_DECLARE_ALIGNED(NvU64 guestFbLength, 8); + NV_DECLARE_ALIGNED(NvU64 guestFbOffset, 8); + NV_DECLARE_ALIGNED(NvU64 gspHeapOffset, 8); + NV_DECLARE_ALIGNED(NvU64 guestBar1PFOffset, 8); +} NVA081_CTRL_VGPU_CONFIG_UPDATE_PLACEMENT_INFO_PARAMS; + +/* + * NVA081_CTRL_CMD_VGPU_CONFIG_GET_CREATABLE_PLACEMENTS + * + * This command fetches count and list of creatable vGPU placements + * + * Parameters: + * + * gpuInstanceId [IN] + * GPU Instance ID or Swizz ID + * + * vgpuTypeId [IN] + * The client provided vGPU type ID + * + * placementSize [OUT] + * The number of placement slots occupied by the vGPU type + * + * count [OUT] + * This parameter returns the number of placements supported for the vGPU + * + * placementIds [OUT] + * Array of creatable placement IDs + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_NOT_SUPPORTED + */ + +#define NVA081_CTRL_CMD_VGPU_CONFIG_GET_CREATABLE_PLACEMENTS (0xa081011c) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_CONFIG_GET_CREATABLE_PLACEMENTS_PARAMS_MESSAGE_ID" */ + +#define NVA081_CTRL_VGPU_CONFIG_GET_CREATABLE_PLACEMENTS_PARAMS_MESSAGE_ID (0x1cU) + +typedef struct NVA081_CTRL_VGPU_CONFIG_GET_CREATABLE_PLACEMENTS_PARAMS { + NvU32 gpuInstanceId; + NvU32 vgpuTypeId; + NvU32 placementSize; + NvU32 count; + NvU32 placementIds[NVA081_MAX_VGPU_PER_PGPU]; +} NVA081_CTRL_VGPU_CONFIG_GET_CREATABLE_PLACEMENTS_PARAMS; + +/* + * NVA081_CTRL_CMD_PGPU_GET_VGPU_STREAMING_CAPABILITY + * + * This command is used to get streaming capability for the physical GPU. + * + * streamingCapability [OUT] + * This param specifies whether vGPU profiles on the GPU supports migration data streaming + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_REQUEST + * NV_ERR_INVALID_ARGUMENT + */ +#define NVA081_CTRL_CMD_PGPU_GET_VGPU_STREAMING_CAPABILITY (0xa081011d) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_PGPU_GET_VGPU_STREAMING_CAPABILITY_PARAMS_MESSAGE_ID" */ + +#define NVA081_CTRL_PGPU_GET_VGPU_STREAMING_CAPABILITY_PARAMS_MESSAGE_ID (0x1dU) + +typedef struct NVA081_CTRL_PGPU_GET_VGPU_STREAMING_CAPABILITY_PARAMS { + NvBool streamingCapability; +} NVA081_CTRL_PGPU_GET_VGPU_STREAMING_CAPABILITY_PARAMS; + +/* vGPU capabilities */ +#define NVA081_CTRL_VGPU_CAPABILITY_MINI_QUARTER_GPU 0 +#define NVA081_CTRL_VGPU_CAPABILITY_COMPUTE_MEDIA_ENGINE_GPU 1 +#define NVA081_CTRL_VGPU_CAPABILITY_WARM_UPDATE 2 +#define NVA081_CTRL_VGPU_CAPABILITY_DEVICE_STREAMING 3 +#define NVA081_CTRL_VGPU_CAPABILITY_READ_DEVICE_BUFFER_BW 4 +#define NVA081_CTRL_VGPU_CAPABILITY_WRITE_DEVICE_BUFFER_BW 5 +#define NVA081_CTRL_VGPU_CAPABILITY_HETEROGENEOUS_TIMESLICE_SIZES 6 +#define NVA081_CTRL_VGPU_CAPABILITY_HETEROGENEOUS_TIMESLICE_PROFILES 7 +#define NVA081_CTRL_VGPU_CAPABILITY_FRACTIONAL_MULTI_VGPU 8 +#define NVA081_CTRL_VGPU_CAPABILITY_HOMOGENEOUS_PLACEMENT_ID 9 + +#define NVA081_CTRL_VGPU_CAPABILITY_MIG_TIMESLICING_SUPPORTED 10 +#define NVA081_CTRL_VGPU_CAPABILITY_MIG_TIMESLICING_MODE_ENABLED 11 + + + +/* + * NVA081_CTRL_CMD_VGPU_SET_CAPABILITY + * + * This command is used to set vGPU capability for the physical GPU. + * + * capability [IN] + * This param specifies the requested capabiity of the device that is to be set + * One of NVA081_CTRL_VGPU_CAPABILITY* values + * + * state [IN] + * This param specifies the state of the capability + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_ARGUMENT + */ +#define NVA081_CTRL_CMD_VGPU_SET_CAPABILITY (0xa081011e) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_SET_CAPABILITY_PARAMS_MESSAGE_ID" */ + +#define NVA081_CTRL_VGPU_SET_CAPABILITY_PARAMS_MESSAGE_ID (0x1eU) + +typedef struct NVA081_CTRL_VGPU_SET_CAPABILITY_PARAMS { + NvU32 capability; + NvBool state; +} NVA081_CTRL_VGPU_SET_CAPABILITY_PARAMS; + +/* + * NVA081_CTRL_CMD_VGPU_GET_CAPABILITY + * + * This command is to get state of vGPU capability for the physical GPU. + * + * capability [IN] + * This param specifies the requested capabiity of the device that is to be set + * One of NVA081_CTRL_VGPU_CAPABILITY* values + * + * state [OUT] + * This param specifies the state of the capability + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_ARGUMENT + */ +#define NVA081_CTRL_CMD_VGPU_GET_CAPABILITY (0xa081011f) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_GET_CAPABILITY_PARAMS_MESSAGE_ID" */ + +#define NVA081_CTRL_VGPU_GET_CAPABILITY_PARAMS_MESSAGE_ID (0x1fU) + +typedef struct NVA081_CTRL_VGPU_GET_CAPABILITY_PARAMS { + NvU32 capability; + NvU32 state; +} NVA081_CTRL_VGPU_GET_CAPABILITY_PARAMS; + +/* + * NVA081_CTRL_CMD_VGPU_SET_VM_NAME + * + * This command is to set VM name for the host. + * + * vmIdType [IN] + * This param provides the guest VM ID type based on the host. + * + * guestVmId [IN] + * This param provides the guest VM indentifier to RM based on the host. + * + * vmName [IN] + * This param provides the VM name of the vGPU device attached + * to the above mentioned host VM. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_ARGUMENT + */ + +#define NVA081_CTRL_CMD_VGPU_SET_VM_NAME (0xa0810120) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_SET_VM_NAME_PARAMS_MESSAGE_ID" */ + +#define NVA081_CTRL_VGPU_SET_VM_NAME_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NVA081_CTRL_VGPU_SET_VM_NAME_PARAMS { + VM_ID_TYPE vmIdType; + NV_DECLARE_ALIGNED(VM_ID guestVmId, 8); + NvU8 vmName[NVA081_VM_NAME_SIZE]; +} NVA081_CTRL_VGPU_SET_VM_NAME_PARAMS; + +/* + * NVA081_CTRL_CMD_VGPU_GET_BAR_INFO + * + * This command is to get the bar info for a vGPU. + * + * gpuPciId [IN] + * This param specifies the PCI device ID of VF on which VM is running + * + * vgpuName [IN] + * This param provides the vGPU device name to RM. + * + * configParams [IN] + * This param provides the vGPU config params to RM + * + * barSizes [OUT] + * This param provides the BAR size for each region index of the device + * + * sparseOffsets [OUT] + * This param provides the offset of each sparse mmap region in BAR0 + * + * sparseSizes [OUT] + * This param provides the size of each sparse mmap region in BAR0 + * + * sparseCount [OUT] + * This param provides the number of sparse mmap regions in BAR0 + * + * isBar064bit [OUT] + * This param provides whether the BAR0 is 64bit of the vGPU device + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_ARGUMENT + */ + +#define NVA081_CTRL_CMD_VGPU_GET_BAR_INFO (0xa0810121) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_GET_BAR_INFO_PARAMS_MESSAGE_ID" */ + +#define NVA081_CTRL_VGPU_GET_BAR_INFO_PARAMS_MESSAGE_ID (0x21U) + +typedef struct NVA081_CTRL_VGPU_GET_BAR_INFO_PARAMS { + NvU32 gpuPciId; + NvU8 vgpuName[VM_UUID_SIZE]; + NvU8 configParams[NVA081_CONFIG_PARAMS_MAX_LENGTH]; + NV_DECLARE_ALIGNED(NvU64 barSizes[NVA081_MAX_BAR_REGION_COUNT], 8); + NV_DECLARE_ALIGNED(NvU64 sparseOffsets[NVA081_MAX_SPARSE_REGION_COUNT], 8); + NV_DECLARE_ALIGNED(NvU64 sparseSizes[NVA081_MAX_SPARSE_REGION_COUNT], 8); + NvU32 sparseCount; + NvBool isBar064bit; +} NVA081_CTRL_VGPU_GET_BAR_INFO_PARAMS; + +/* + * NVA081_CTRL_CMD_VGPU_CONFIG_GET_MIGRATION_BANDWIDTH + * + * This command is to get the migration bandwidth of the physical GPU. + * + * migrationBandwidth [OUT] + * This param specifies the migration bandwidth of GPU + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_REQUEST + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + */ +#define NVA081_CTRL_CMD_VGPU_CONFIG_GET_MIGRATION_BANDWIDTH (0xa0810122) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_CONFIG_GET_MIGRATION_BANDWIDTH_PARAMS_MESSAGE_ID" */ + +#define NVA081_CTRL_VGPU_CONFIG_GET_MIGRATION_BANDWIDTH_PARAMS_MESSAGE_ID (0x22U) + +typedef struct NVA081_CTRL_VGPU_CONFIG_GET_MIGRATION_BANDWIDTH_PARAMS { + NvU32 migrationBandwidth; +} NVA081_CTRL_VGPU_CONFIG_GET_MIGRATION_BANDWIDTH_PARAMS; + +/* + * NVA081_CTRL_CMD_VGPU_CONFIG_ENUMERATE_VGPU_PER_GPU_INSTANCE + * + * This command enumerates list of vGPU guest instances per GPU instance + * + * gpuInstanceId [IN] + * This parameter specifies the GPU Instance Id or Swizz Id + * + * numVgpu [OUT] + * This parameter specifies the number of virtual GPUs created on a GPU instance + * + * vgpuInstanceIds [OUT] + * This parameter specifies an array of vGPU type ids active on a GPU instance + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_NOT_SUPPORTED + */ +#define NVA081_CTRL_CMD_VGPU_CONFIG_ENUMERATE_VGPU_PER_GPU_INSTANCE (0xa0810124) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_CONFIG_ENUMERATE_VGPU_PER_GPU_INSTANCE_PARAMS_MESSAGE_ID" */ + +#define NVA081_CTRL_VGPU_CONFIG_ENUMERATE_VGPU_PER_GPU_INSTANCE_PARAMS_MESSAGE_ID (0x24U) + +typedef struct NVA081_CTRL_VGPU_CONFIG_ENUMERATE_VGPU_PER_GPU_INSTANCE_PARAMS { + NvU32 gpuInstanceId; + NvU32 numVgpu; + NvU32 vgpuInstanceIds[NVA081_MAX_VGPU_PER_GI]; +} NVA081_CTRL_VGPU_CONFIG_ENUMERATE_VGPU_PER_GPU_INSTANCE_PARAMS; + +/* + * NVA081_CTRL_CMD_VGPU_CONFIG_CLEAR_SWIZZID_MASK + * + * This Command Clears the Assigned SwizzId Mask during vGPU destroy + * + * swizzId [IN] + * This parameter specifies the SwizzId of vGPU device + */ +#define NVA081_CTRL_CMD_VGPU_CONFIG_CLEAR_SWIZZID_MASK (0xa0810125) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_CMD_VGPU_CONFIG_CLEAR_SWIZZID_MASK_PARAMS_MESSAGE_ID" */ +#define NVA081_CTRL_CMD_VGPU_CONFIG_CLEAR_SWIZZID_MASK_PARAMS_MESSAGE_ID (0x25U) + +typedef struct NVA081_CTRL_CMD_VGPU_CONFIG_CLEAR_SWIZZID_MASK_PARAMS { + NvU32 swizzId; +} NVA081_CTRL_CMD_VGPU_CONFIG_CLEAR_SWIZZID_MASK_PARAMS; + +/* _ctrlA081vgpuconfig_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlb06f.h b/src/common/sdk/nvidia/inc/ctrl/ctrlb06f.h new file mode 100644 index 0000000..9b24b20 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlb06f.h @@ -0,0 +1,365 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlb06f.finn +// + + + + +/* MAXWELL_CHANNEL_GPFIFO_A control commands and parameters */ + +#include "ctrl/ctrlxxxx.h" +#define NVB06F_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0xB06F, NVB06F_CTRL_##cat, idx) + +/* MAXWELL_CHANNEL_GPFIFO_A command categories (6bits) */ +#define NVB06F_CTRL_RESERVED (0x00) +#define NVB06F_CTRL_GPFIFO (0x01) + +/* + * NVB06F_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NVB06F_CTRL_CMD_NULL (0xb06f0000) /* finn: Evaluated from "(FINN_MAXWELL_CHANNEL_GPFIFO_A_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + +/* + * NVB06F_CTRL_CMD_GET_ENGINE_CTX_SIZE + * + * This command returns the size of the engine context. + * + * engineID + * This parameter specifies the engine context size to be retrieved. + * + * See the description of the NV2080_ENGINE_TYPE values in cl2080.h for more + * information + * + * size + * This parameter returns the size of the engine context + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NVB06F_CTRL_CMD_GET_ENGINE_CTX_SIZE (0xb06f010b) /* finn: Evaluated from "(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVB06F_CTRL_GET_ENGINE_CTX_SIZE_PARAMS_MESSAGE_ID" */ + +#define NVB06F_CTRL_GET_ENGINE_CTX_SIZE_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NVB06F_CTRL_GET_ENGINE_CTX_SIZE_PARAMS { + NvU32 engineID; + NvU32 size; +} NVB06F_CTRL_GET_ENGINE_CTX_SIZE_PARAMS; + +/* + * NVB06F_CTRL_CMD_GET_ENGINE_CTX_DATA + * + * This command returns the context buffer data for the given engine for vGPU motion. + * + * engineID + * This parameter specifies the engine context to be retrieved. + * + * See the description of the NV2080_ENGINE_TYPE values in cl2080.h for + * more information + * + * size + * This parameter specifies the size of the context buffer. + * + * pEngineCtxBuff + * This parameter specifies the context buffer for motion operation to be filled in. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + */ + +#define NVB06F_CTRL_CMD_GET_ENGINE_CTX_DATA (0xb06f010c) /* finn: Evaluated from "(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS_MESSAGE_ID" */ + +typedef struct SW_OBJECT_ENGINE_CTX { + NvU32 hObject; + NvU32 subCh; +} SW_OBJECT_ENGINE_CTX; + +#define NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS { + NvU32 engineID; + NvU32 size; + NV_DECLARE_ALIGNED(NvP64 pEngineCtxBuff, 8); +} NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS; + +/* + * NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA + * + * This command restores the context buffer for the given engine for vGPU motion. + * + * engineID + * This parameter specifies the engine context to be restored. + * + * See the description of the NV2080_ENGINE_TYPE values in cl2080.h for + * more information + * + * size + * This parameter specifies the size of the context buffer. + * + * pEngineCtxBuff + * This parameter specifies the context buffer for motion operation restore. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + */ + +#define NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA (0xb06f010d) /* finn: Evaluated from "(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVB06F_CTRL_MIGRATE_ENGINE_CTX_DATA_PARAMS_MESSAGE_ID" */ + +#define NVB06F_CTRL_MIGRATE_ENGINE_CTX_DATA_PARAMS_MESSAGE_ID (0xDU) + +typedef NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS NVB06F_CTRL_MIGRATE_ENGINE_CTX_DATA_PARAMS; + +/* + * NVB06F_CTRL_CMD_GET_ENGINE_CTX_STATE + * + * This command returns the context buffer state of the given engine for vGPU motion. + * + * engineID + * This input parameter specifies the engine context to be restored. + * + * See the description of the NV2080_ENGINE_TYPE values in cl2080.h for + * more information + * + * hObject + * This parameter specifies the channel object that is running on the SW engine. + * + * engineCtxState + * This parameter specifies the engine context state. For SW engine, the only meaningful + * field is INVALID, INITIALIZED and the subch. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + */ + +#define NVB06F_CTRL_CMD_GET_ENGINE_CTX_STATE (0xb06f010e) /* finn: Evaluated from "(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVB06F_CTRL_GET_ENGINE_CTX_STATE_PARAMS_MESSAGE_ID" */ + +#define NVB06F_CTRL_GET_ENGINE_CTX_STATE_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NVB06F_CTRL_GET_ENGINE_CTX_STATE_PARAMS { + NvU32 engineID; + NvU32 hObject; + NvU32 engineCtxState; +} NVB06F_CTRL_GET_ENGINE_CTX_STATE_PARAMS; + +#define NVB06F_CTRL_GET_ENGINE_CTX_VIRTUAL_CONTEXT 0:0 +#define NVB06F_CTRL_GET_ENGINE_CTX_VIRTUAL_CONTEXT_DISABLED (0x00000000) +#define NVB06F_CTRL_GET_ENGINE_CTX_VIRTUAL_CONTEXT_ENABLED (0x00000001) +#define NVB06F_CTRL_GET_ENGINE_CTX_STATE 2:1 +#define NVB06F_CTRL_GET_ENGINE_CTX_STATE_INVALID (0x00000000) +#define NVB06F_CTRL_GET_ENGINE_CTX_STATE_INITIALIZED (0x00000001) +#define NVB06F_CTRL_GET_ENGINE_CTX_STATE_PROMOTED (0x00000002) +#define NVB06F_CTRL_GET_ENGINE_CTX_STATE_EVICTED (0x00000003) +#define NVB06F_CTRL_GET_ENGINE_CTX_SUBCH 6:4 + +/* + * NVB06F_CTRL_CMD_GET_CHANNEL_HW_STATE + * + * This command returns the channel HW state. + * + * state + * This parameter stores single bit-fields corresponding to the following + * channel HW states: + * NEXT + * A value of NV_TRUE indicates that this channel should be scheduled + * first when GPU Host chooses this TSG to run next on the runlist. + * + * CTX_RELOAD + * A value of NV_TRUE indicates that this channel's context was + * preempted and needs to be reloaded. + * + * PENDING + * A value of NV_TRUE indicates that this channel is not loaded on the + * PBDMA but methods still remain. This includes the completion of + * semaphores acquires and WFI methods. This field is deprecated + * from Volta onwards, and can be ignored. + * + * ENG_FAULTED + * A value of NV_TRUE indicates that the channel's engine has faulted, + * and the channel will not be rescheduled until the fault has been + * cleared. This bit should only be set as part of migration, and will + * not necessarily cause the channel to be prevented from being + * scheduled. + * + * PBDMA_FAULTED + * A value of NV_TRUE indicates that the channel's PBDMA has faulted, + * and the channel will not be rescheduled until the fault has been + * cleared. This bit should only be set as part of migration, and will + * not necessarily cause the channel to be prevented from being + * scheduled. + * + * ACQUIRE_FAIL + * A value of NV_TRUE indicates that the engine scheduler failed to + * acquire a semaphore for this channel. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + */ +#define NVB06F_CTRL_CMD_GET_CHANNEL_HW_STATE (0xb06f010f) /* finn: Evaluated from "(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVB06F_CTRL_GET_CHANNEL_HW_STATE_PARAMS_MESSAGE_ID" */ + +#define NVB06F_CTRL_CMD_CHANNEL_HW_STATE_NEXT 0:0 +#define NVB06F_CTRL_CMD_CHANNEL_HW_STATE_CTX_RELOAD 1:1 +#define NVB06F_CTRL_CMD_CHANNEL_HW_STATE_PENDING 2:2 +#define NVB06F_CTRL_CMD_CHANNEL_HW_STATE_ENG_FAULTED 3:3 +#define NVB06F_CTRL_CMD_CHANNEL_HW_STATE_PBDMA_FAULTED 4:4 +#define NVB06F_CTRL_CMD_CHANNEL_HW_STATE_ACQUIRE_FAIL 5:5 + +typedef struct NVB06F_CTRL_CHANNEL_HW_STATE_PARAMS { + NvU32 state; +} NVB06F_CTRL_CHANNEL_HW_STATE_PARAMS; + +#define NVB06F_CTRL_GET_CHANNEL_HW_STATE_PARAMS_MESSAGE_ID (0xFU) + +typedef NVB06F_CTRL_CHANNEL_HW_STATE_PARAMS NVB06F_CTRL_GET_CHANNEL_HW_STATE_PARAMS; + +/* + * NVB06F_CTRL_CMD_SET_CHANNEL_HW_STATE + * + * This command restores the channel HW state. + * + * state + * This parameter stores single bit-fields corresponding to the following + * channel HW states: + * NEXT + * A value of NV_TRUE indicates that this channel should be scheduled + * first when GPU Host chooses this TSG to run next on the runlist. + * + * CTX_RELOAD + * A value of NV_TRUE indicates that this channel's context was + * preempted and needs to be reloaded. + * + * PENDING + * A value of NV_TRUE indicates that this channel is not loaded on the + * PBDMA but methods still remain. This includes the completion of + * semaphores acquires and WFI methods. This field is deprecated + * from Volta onwards, and can be ignored. + * + * ENG_FAULTED + * A value of NV_TRUE indicates that the channel's engine has faulted, + * and the channel will not be rescheduled until the fault has been + * cleared. This bit should only be set as part of migration, and will + * not necessarily cause the channel to be prevented from being + * scheduled. + * + * PBDMA_FAULTED + * A value of NV_TRUE indicates that the channel's PBDMA has faulted, + * and the channel will not be rescheduled until the fault has been + * cleared. This bit should only be set as part of migration, and will + * not necessarily cause the channel to be prevented from being + * scheduled. + * + * ACQUIRE_FAIL + * A value of NV_TRUE indicates that the engine scheduler failed to + * acquire a semaphore for this channel. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + */ +#define NVB06F_CTRL_CMD_SET_CHANNEL_HW_STATE (0xb06f0110) /* finn: Evaluated from "(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVB06F_CTRL_SET_CHANNEL_HW_STATE_PARAMS_MESSAGE_ID" */ + +#define NVB06F_CTRL_SET_CHANNEL_HW_STATE_PARAMS_MESSAGE_ID (0x10U) + +typedef NVB06F_CTRL_CHANNEL_HW_STATE_PARAMS NVB06F_CTRL_SET_CHANNEL_HW_STATE_PARAMS; + +/* + * NVB06F_CTRL_CMD_SAVE_ENGINE_CTX_DATA + * + * This command returns the context buffer data for the given engine for vGPU migration. + * + * engineID + * This parameter specifies the engine context to be retrieved. + * + * See the description of the NV2080_ENGINE_TYPE values in cl2080.h for + * more information + * + * size + * This parameter specifies the size of the context buffer. The maximum size + * of engine context buffer is choosen to support only RAMFC and instance + * memory block. To use this RmCtrl for a higher sized buffer, like the GR + * context, the max size would have to be increased. + * + * engineCtxBuff + * This parameter specifies the context buffer data. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + */ + +#define NVB06F_CTRL_CMD_SAVE_ENGINE_CTX_DATA (0xb06f0111) /* finn: Evaluated from "(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS_MESSAGE_ID" */ + +#define NVB06F_CTRL_ENGINE_CTX_BUFFER_SIZE_MAX 4096 + +#define NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS_MESSAGE_ID (0x11U) + +typedef struct NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS { + NvU32 engineID; + NvU32 size; + NvU8 engineCtxBuff[NVB06F_CTRL_ENGINE_CTX_BUFFER_SIZE_MAX]; +} NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS; + +/* + * NVB06F_CTRL_CMD_RESTORE_ENGINE_CTX_DATA + * + * This command restores the context buffer for the given engine for vGPU migration. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + */ + +#define NVB06F_CTRL_CMD_RESTORE_ENGINE_CTX_DATA (0xb06f0112) /* finn: Evaluated from "(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVB06F_CTRL_CMD_RESTORE_ENGINE_CTX_DATA_FINN_PARAMS_MESSAGE_ID" */ + +typedef NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS NVB06F_CTRL_RESTORE_ENGINE_CTX_DATA_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVB06F_CTRL_CMD_RESTORE_ENGINE_CTX_DATA_FINN_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NVB06F_CTRL_CMD_RESTORE_ENGINE_CTX_DATA_FINN_PARAMS { + NVB06F_CTRL_RESTORE_ENGINE_CTX_DATA_PARAMS params; +} NVB06F_CTRL_CMD_RESTORE_ENGINE_CTX_DATA_FINN_PARAMS; + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc36f.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc36f.h new file mode 100644 index 0000000..c4dee8f --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc36f.h @@ -0,0 +1,159 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc36f.finn +// + + + + +/* VOLTA_CHANNEL_GPFIFO_A control commands and parameters */ + +#include "ctrl/ctrlxxxx.h" +#define NVC36F_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0xC36F, NVC36F_CTRL_##cat, idx) + +/* VOLTA_CHANNEL_GPFIFO_A command categories (6bits) */ +#define NVC36F_CTRL_RESERVED (0x00) +#define NVC36F_CTRL_GPFIFO (0x01) +#define NVC36F_CTRL_INTERNAL (0x03) + +/* + * NVC36F_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned is: NV_OK +*/ +#define NVC36F_CTRL_CMD_NULL (0xc36f0000) /* finn: Evaluated from "(FINN_VOLTA_CHANNEL_GPFIFO_A_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + +/* + * NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN + * + * This command returns an opaque work submit token to the caller which + * can be used to write to doorbell register to finish submitting work. If + * the client has provided an error context handle during channel allocation, + * and the error context is large enough to write the doorbell token, a + * notification at index NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN + * will be sent with the work submit token value. This index may be modified + * by NVC36F_CTRL_CMD_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX. + * + * workSubmitToken The 32-bit work submit token + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_ARGUMENT + * + */ + +#define NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN (0xc36f0108) /* finn: Evaluated from "(FINN_VOLTA_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS_MESSAGE_ID" */ + +#define NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS { + NvU32 workSubmitToken; +} NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS; + +/** + * NVC36F_CTRL_CMD_GPFIFO_UPDATE_FAULT_METHOD_BUFFER + * + * This command updates the HOST CE Fault method buffer + * data structure of Virtual channel created for SR-IOV guest. It will also + * update the fault method buffer address in the instance block of the channel + * + * bar2Addr[] + * Array contains the Virtual BAR2 address mapped by the Guest during channel + * creation. + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_INSUFFICIENT_RESOURCES + * + */ +#define NVC36F_CTRL_CMD_GPFIFO_FAULT_METHOD_BUFFER_MAX_RUNQUEUES 0x2 +#define NVC36F_CTRL_CMD_GPFIFO_UPDATE_FAULT_METHOD_BUFFER (0xc36f0109) /* finn: Evaluated from "(FINN_VOLTA_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVC36F_CTRL_GPFIFO_UPDATE_FAULT_METHOD_BUFFER_PARAMS_MESSAGE_ID" */ + +#define NVC36F_CTRL_GPFIFO_UPDATE_FAULT_METHOD_BUFFER_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NVC36F_CTRL_GPFIFO_UPDATE_FAULT_METHOD_BUFFER_PARAMS { + NV_DECLARE_ALIGNED(NvU64 bar2Addr[NVC36F_CTRL_CMD_GPFIFO_FAULT_METHOD_BUFFER_MAX_RUNQUEUES], 8); +} NVC36F_CTRL_GPFIFO_UPDATE_FAULT_METHOD_BUFFER_PARAMS; + +/* + * NVC36F_CTRL_CMD_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX + * + * This command updates the notifier index within the error context notifier + * to write the doorbell token to. This interface cannot be used to cause the + * token to overwrite the RC notification slot. The notification slot + * referred to by the passed index must be within the bounds of the error + * context notifier object. In the case that multiple channels share the same + * error context, it is not illegal for the client to set the same index for + * multiple channels (however it is not recommended). + * + * [IN] index + * Notification slot to write the doorbell token. The initial value of this + * index is NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_ARGUMENT + */ + +#define NVC36F_CTRL_CMD_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX (0xc36f010a) /* finn: Evaluated from "(FINN_VOLTA_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS_MESSAGE_ID" */ + +#define NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS { + NvU32 index; +} NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS; + + + +/* + * NVC36F_CTRL_CMD_INTERNAL_GPFIFO_GET_WORK_SUBMIT_TOKEN + * + * This command is an internal command sent from Kernel RM to Physical RM + * to get work submit token + * + * Please see description of NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN for more information. + * + */ + +#define NVC36F_CTRL_CMD_INTERNAL_GPFIFO_GET_WORK_SUBMIT_TOKEN (0xc36f0301) /* finn: Evaluated from "(FINN_VOLTA_CHANNEL_GPFIFO_A_INTERNAL_INTERFACE_ID << 8) | NVC36F_CTRL_INTERNAL_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS_MESSAGE_ID" */ + +#define NVC36F_CTRL_INTERNAL_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS_MESSAGE_ID (0x1U) + +typedef NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS NVC36F_CTRL_INTERNAL_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS; + +/* _ctrlc36f.h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370base.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370base.h new file mode 100644 index 0000000..99612e8 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370base.h @@ -0,0 +1,66 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc370/ctrlc370base.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NVC370_DISPLAY control commands and parameters */ + +#define NVC370_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0XC370, NVC370_CTRL_##cat, idx) + +/* NVC370_DISPLAY command categories (6bits) */ +#define NVC370_CTRL_RESERVED (0x00) +#define NVC370_CTRL_CHNCTL (0x01) +#define NVC370_CTRL_RG (0x02) +#define NVC370_CTRL_OR (0x04) +#define NVC370_CTRL_INST (0x05) +#define NVC370_CTRL_VERIF (0x06) +#define NVC370_CTRL_SYSTEM (0x07) +#define NVC370_CTRL_EVENT (0x09) + +// This struct must be the first member of all C370 control calls +typedef struct NVC370_CTRL_CMD_BASE_PARAMS { + NvU32 subdeviceIndex; +} NVC370_CTRL_CMD_BASE_PARAMS; + + +/* + * NVC370_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NVC370_CTRL_CMD_NULL (0xc3700000) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + +/* _ctrlc370base_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h new file mode 100644 index 0000000..ef2b3ad --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h @@ -0,0 +1,303 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc370/ctrlc370chnc.finn +// + +#include "ctrl/ctrlc370/ctrlc370base.h" +/* C370 is partially derived from 5070 */ +#include "ctrl/ctrl5070/ctrl5070chnc.h" + + + + +#define NVC370_CTRL_CMD_CHANNEL_STATE_IDLE NVBIT(0) +#define NVC370_CTRL_CMD_CHANNEL_STATE_QUIESCENT1 NVBIT(2) +#define NVC370_CTRL_CMD_CHANNEL_STATE_QUIESCENT2 NVBIT(3) +#define NVC370_CTRL_CMD_CHANNEL_STATE_BUSY NVBIT(6) +#define NVC370_CTRL_CMD_CHANNEL_STATE_DEALLOC NVBIT(7) +#define NVC370_CTRL_CMD_CHANNEL_STATE_DEALLOC_LIMBO NVBIT(8) +#define NVC370_CTRL_CMD_CHANNEL_STATE_EFI_INIT1 NVBIT(11) +#define NVC370_CTRL_CMD_CHANNEL_STATE_EFI_INIT2 NVBIT(12) +#define NVC370_CTRL_CMD_CHANNEL_STATE_EFI_OPERATION NVBIT(13) +#define NVC370_CTRL_CMD_CHANNEL_STATE_VBIOS_INIT1 NVBIT(14) +#define NVC370_CTRL_CMD_CHANNEL_STATE_VBIOS_INIT2 NVBIT(15) +#define NVC370_CTRL_CMD_CHANNEL_STATE_VBIOS_OPERATION NVBIT(16) +#define NVC370_CTRL_CMD_CHANNEL_STATE_UNCONNECTED NVBIT(17) +#define NVC370_CTRL_CMD_CHANNEL_STATE_INIT1 NVBIT(18) +#define NVC370_CTRL_CMD_CHANNEL_STATE_INIT2 NVBIT(19) +#define NVC370_CTRL_CMD_CHANNEL_STATE_SHUTDOWN1 NVBIT(20) +#define NVC370_CTRL_CMD_CHANNEL_STATE_SHUTDOWN2 NVBIT(21) + +#define NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CORE 1 +#define NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW 32 +#define NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW_IMM 32 +#define NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WRITEBACK 8 +#define NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CURSOR 8 + +/* + * NVC370_CTRL_CMD_IDLE_CHANNEL + * + * This command tries to wait or forces the desired channel state. + * + * channelClass + * This field indicates the hw class number (0xC378 - 0xC37E). + * It's defined in the h/w header (i.e. clc37d.h, etc.). + * + * channelInstance + * This field indicates which instance of the channelClass the cmd is + * meant for. (zero-based) + * + * desiredChannelStateMask + * This field indicates the desired channel states. When more than + * one bit is set, RM will return whenever it finds hardware on one + * of the states in the bistmask. + * Normal options are IDLE, QUIESCENT1 and QUIESCENT2. + * Verif only option includes BUSY as well. + * Note: + * (1) When QUIESCENT1 or QUIESCENT2 is chosen only one bit should + * be set in the bitmask. RM will ignore any other state. + * (2) Accelerators should not be required for QUIESCENT states as + * RM tries to ensure QUIESCENT forcibly on it's own. + * + * accelerators + * What accelerator bits should be used if RM timesout trying to + * wait for the desired state. This is not yet implemented since it + * should normally not be required to use these. Usage of accelerators + * should be restricted and be done very carefully as they may have + * undesirable effects. + * NOTE: accelerators should not be used directly in production code. + * + * timeout + * Timeout to use when waiting for the desired state. This is also for + * future expansion and not yet implemented. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_TIMEOUT + */ +#define NVC370_CTRL_CMD_IDLE_CHANNEL (0xc3700101) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NVC370_CTRL_IDLE_CHANNEL_PARAMS_MESSAGE_ID" */ + +#define NVC370_CTRL_IDLE_CHANNEL_MAX_INSTANCE_CORE NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CORE +#define NVC370_CTRL_IDLE_CHANNEL_MAX_INSTANCE_WINDOW NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW +#define NVC370_CTRL_IDLE_CHANNEL_MAX_INSTANCE_WINDOW_IMM NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW_IMM +#define NVC370_CTRL_IDLE_CHANNEL_MAX_INSTANCE_WRITEBACK NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WRITEBACK +#define NVC370_CTRL_IDLE_CHANNEL_MAX_INSTANCE_CURSOR NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CURSOR + +#define NVC370_CTRL_IDLE_CHANNEL_STATE_IDLE NVC370_CTRL_CMD_CHANNEL_STATE_IDLE +#define NVC370_CTRL_IDLE_CHANNEL_STATE_QUIESCENT1 NVC370_CTRL_CMD_CHANNEL_STATE_QUIESCENT1 +#define NVC370_CTRL_IDLE_CHANNEL_STATE_QUIESCENT2 NVC370_CTRL_CMD_CHANNEL_STATE_QUIESCENT2 + +#define NVC370_CTRL_IDLE_CHANNEL_STATE_BUSY NVC370_CTRL_CMD_CHANNEL_STATE_BUSY + +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_NONE (0x00000000) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_PI (NVBIT(0)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_SKIP_NOTIF (NVBIT(1)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_SKIP_SEMA (NVBIT(2)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_INTERLOCK (NVBIT(3)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_FLIPLOCK (NVBIT(4)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_TRASH_ONLY (NVBIT(5)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_TRASH_AND_ABORT (NVBIT(6)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_SKIP_SYNCPOINT (NVBIT(7)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_TIMESTAMP (NVBIT(8)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_MGI (NVBIT(9)) + +#define NVC370_CTRL_IDLE_CHANNEL_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVC370_CTRL_IDLE_CHANNEL_PARAMS { + NVC370_CTRL_CMD_BASE_PARAMS base; + NvU32 channelClass; + NvU32 channelInstance; + NvU32 desiredChannelStateMask; + NvU32 accelerators; // For future expansion. Not yet implemented + NvU32 timeout; // For future expansion. Not yet implemented + NvBool restoreDebugMode; +} NVC370_CTRL_IDLE_CHANNEL_PARAMS; + +/* + * NVC370_CTRL_CMD_SET_ACCL + * + * This command turns accelerators on and off. The use of this command + * should be restricted as it may have undesirable effects. It's + * purpose is to provide a mechanism for clients to use the + * accelerator bits to get into states that are either not detectable + * by the RM or may take longer to reach than we think is reasonable + * to wait in the RM. + * + * NVC370_CTRL_CMD_GET_ACCL + * + * This command queries the current state of the accelerators. + * + * channelClass + * This field indicates the hw class number (0xC378 - 0xC37E). + * It's defined in the h/w header (i.e. clc37d.h, etc.). + * + * channelInstance + * This field indicates which instance of the channelClass the cmd is + * meant for. (zero-based) + * + * accelerators + * Accelerators to be set in the SET_ACCEL command. Returns the + * currently set accelerators on the GET_ACCEL command. + */ + + +/* + * + * accelMask + * A mask to specify which accelerators to change with the + * SET_ACCEL command. This field does nothing in the GET_ACCEL + * command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_CHANNEL + * NV_ERR_INVALID_OWNER + * NV_ERR_GENERIC + * + */ + +#define NVC370_CTRL_CMD_SET_ACCL (0xc3700102) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NVC370_CTRL_SET_ACCL_PARAMS_MESSAGE_ID" */ + +#define NVC370_CTRL_CMD_GET_ACCL (0xc3700103) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NVC370_CTRL_GET_ACCL_PARAMS_MESSAGE_ID" */ + +#define NVC370_CTRL_CMD_CHANNEL_CANCEL_FLIP (0xc3700105) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NVC370_CTRL_CHANNEL_CANCEL_FLIP_PARAMS_MESSAGE_ID" */ + +#define NVC370_CTRL_ACCL_MAX_INSTANCE_CORE NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CORE +#define NVC370_CTRL_ACCL_MAX_INSTANCE_WINDOW NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW +#define NVC370_CTRL_ACCL_MAX_INSTANCE_WINDOW_IMM NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW_IMM +#define NVC370_CTRL_ACCL_MAX_INSTANCE_WRITEBACK NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WRITEBACK +#define NVC370_CTRL_ACCL_MAX_INSTANCE_CURSOR NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CURSOR + +#define NVC370_CTRL_ACCL_NONE NVC370_CTRL_IDLE_CHANNEL_ACCL_NONE +#define NVC370_CTRL_ACCL_IGNORE_PI NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_PI +#define NVC370_CTRL_ACCL_SKIP_NOTIF NVC370_CTRL_IDLE_CHANNEL_ACCL_SKIP_NOTIF +#define NVC370_CTRL_ACCL_SKIP_SEMA NVC370_CTRL_IDLE_CHANNEL_ACCL_SKIP_SEMA +#define NVC370_CTRL_ACCL_IGNORE_INTERLOCK NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_INTERLOCK +#define NVC370_CTRL_ACCL_IGNORE_FLIPLOCK NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_FLIPLOCK +#define NVC370_CTRL_ACCL_TRASH_ONLY NVC370_CTRL_IDLE_CHANNEL_ACCL_TRASH_ONLY +#define NVC370_CTRL_ACCL_TRASH_AND_ABORT NVC370_CTRL_IDLE_CHANNEL_ACCL_TRASH_AND_ABORT +#define NVC370_CTRL_ACCL_SKIP_SYNCPOINT NVC370_CTRL_IDLE_CHANNEL_ACCL_SKIP_SYNCPOINT +#define NVC370_CTRL_ACCL_IGNORE_TIMESTAMP NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_TIMESTAMP +#define NVC370_CTRL_ACCL_IGNORE_MGI NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_MGI +typedef struct NVC370_CTRL_ACCL_PARAMS { + NVC370_CTRL_CMD_BASE_PARAMS base; + NvU32 channelClass; + NvU32 channelInstance; + NvU32 accelerators; + NvU32 accelMask; +} NVC370_CTRL_ACCL_PARAMS; + +#define NVC370_CTRL_SET_ACCL_PARAMS_MESSAGE_ID (0x2U) + +typedef NVC370_CTRL_ACCL_PARAMS NVC370_CTRL_SET_ACCL_PARAMS; + +#define NVC370_CTRL_GET_ACCL_PARAMS_MESSAGE_ID (0x3U) + +typedef NVC370_CTRL_ACCL_PARAMS NVC370_CTRL_GET_ACCL_PARAMS; + +#define NVC370_CTRL_CHANNEL_CANCEL_FLIP_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NVC370_CTRL_CHANNEL_CANCEL_FLIP_PARAMS { + NVC370_CTRL_CMD_BASE_PARAMS base; + NvU32 channelClass; + NvU32 channelInstance; +} NVC370_CTRL_CHANNEL_CANCEL_FLIP_PARAMS; + +/* + * NVC370_CTRL_CMD_GET_CHANNEL_INFO + * + * This command returns the current channel state. + * + * channelClass + * This field indicates the hw class number (0xC378 - 0xC37E). + * It's defined in the h/w header (i.e. clc37d.h, etc.). + * + * channelInstance + * This field indicates which instance of the channelClass the cmd is + * meant for. (zero-based) + * + * channelState + * This field indicates the desired channel state in a mask form that + * is compatible with NVC370_CTRL_CMD_IDLE_CHANNEL. A mask format + * allows clients to check for one from a group of states. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + * + * Display driver uses this call to ensure that all it's methods have + * propagated through hardware's internal fifo + * (NVC370_CTRL_GET_CHANNEL_INFO_STATE_NO_METHOD_PENDING) before it calls + * RM to check whether or not the mode it set up in Assembly State Cache will + * be possible. Note that display driver can not use completion notifier in + * this case because completion notifier is associated with Update and Update + * will propagate the state from Assembly to Armed and when checking the + * possibility of a mode, display driver wouldn't want Armed state to be + * affected. + */ + + + +#define NVC370_CTRL_CMD_GET_CHANNEL_INFO (0xc3700104) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS_MESSAGE_ID" */ + +#define NVC370_CTRL_GET_CHANNEL_INFO_MAX_INSTANCE_CORE NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CORE +#define NVC370_CTRL_GET_CHANNEL_INFO_MAX_INSTANCE_WINDOW NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW +#define NVC370_CTRL_GET_CHANNEL_INFO_MAX_INSTANCE_WINDOW_IMM NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW_IMM +#define NVC370_CTRL_GET_CHANNEL_INFO_MAX_INSTANCE_WRITEBACK NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WRITEBACK +#define NVC370_CTRL_GET_CHANNEL_INFO_MAX_INSTANCE_CURSOR NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CURSOR + +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_IDLE NVC370_CTRL_CMD_CHANNEL_STATE_IDLE +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_BUSY NVC370_CTRL_CMD_CHANNEL_STATE_BUSY +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_DEALLOC NVC370_CTRL_CMD_CHANNEL_STATE_DEALLOC +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_DEALLOC_LIMBO NVC370_CTRL_CMD_CHANNEL_STATE_DEALLOC_LIMBO +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_EFI_INIT1 NVC370_CTRL_CMD_CHANNEL_STATE_EFI_INIT1 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_EFI_INIT2 NVC370_CTRL_CMD_CHANNEL_STATE_EFI_INIT2 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_EFI_OPERATION NVC370_CTRL_CMD_CHANNEL_STATE_EFI_OPERATION +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_VBIOS_INIT1 NVC370_CTRL_CMD_CHANNEL_STATE_VBIOS_INIT1 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_VBIOS_INIT2 NVC370_CTRL_CMD_CHANNEL_STATE_VBIOS_INIT2 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_VBIOS_OPERATION NVC370_CTRL_CMD_CHANNEL_STATE_VBIOS_OPERATION +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_UNCONNECTED NVC370_CTRL_CMD_CHANNEL_STATE_UNCONNECTED +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_INIT1 NVC370_CTRL_CMD_CHANNEL_STATE_INIT1 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_INIT2 NVC370_CTRL_CMD_CHANNEL_STATE_INIT2 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_SHUTDOWN1 NVC370_CTRL_CMD_CHANNEL_STATE_SHUTDOWN1 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_SHUTDOWN2 NVC370_CTRL_CMD_CHANNEL_STATE_SHUTDOWN2 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_NO_METHOD_PENDING NVC370_CTRL_GET_CHANNEL_INFO_STATE_IDLE +#define NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS { + NVC370_CTRL_CMD_BASE_PARAMS base; + NvU32 channelClass; + NvU32 channelInstance; + NvBool IsChannelInDebugMode; + NvU32 channelState; +} NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS; + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370event.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370event.h new file mode 100644 index 0000000..72e8bef --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370event.h @@ -0,0 +1,56 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc370/ctrlc370event.finn +// + +#include "ctrl/ctrlc370/ctrlc370base.h" +/* C370 is partially derived from 0073 */ +#include "ctrl/ctrl0073/ctrl0073event.h" + +/* +* headId +* This parameter indicates the ID of head on which we received interrupt +* RgSemId +* This parameter indicates the RG Semaphore Index for given head +*/ +typedef struct NVC370_RG_SEM_NOTIFICATION_PARAMS { + NvU32 headId; + NvU32 rgSemId; +} NVC370_RG_SEM_NOTIFICATION_PARAMS; + + + + +/* valid action values */ +#define NVC370_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE NV0073_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE +#define NVC370_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE NV0073_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE +#define NVC370_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT NV0073_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT + +/* _ctrlc370event_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370or.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370or.h new file mode 100644 index 0000000..6f45385 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370or.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc370/ctrlc370or.finn +// + + + +#include "ctrl/ctrlc370/ctrlc370base.h" +/* C370 is partially derived from 5070 */ +#include "ctrl/ctrl5070/ctrl5070or.h" + + +/* _ctrlc370or_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h new file mode 100644 index 0000000..a7e4a79 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h @@ -0,0 +1,123 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc370/ctrlc370rg.finn +// + +#include "ctrl/ctrlc370/ctrlc370base.h" +/* C370 is partially derived from 5070 */ +#include "ctrl/ctrl5070/ctrl5070rg.h" + + + + +/* + * NVC370_CTRL_CMD_GET_LOCKPINS_CAPS + * + * This command returns lockpins for the specified pinset, + * as well as lockpins' HW capabilities. + * + * pinset [in] + * This parameter takes the pinset whose corresponding + * lockpin numbers need to be determined. This only affects + * the return value for the RaterLock and FlipLock pins. + * + * frameLockPin [out] + * This parameter returns the FrameLock pin index. + * + * rasterLockPin [out] + * This parameter returns the RasterLock pin index. + * + * flipLockPin [out] + * This parameter returns the FlipLock pin index. + * + * stereoPin [out] + * This parameter returns the Stereo pin index. + * + * numScanLockPins [out] + * This parameter returns the HW capability of ScanLock pins. + * + * numFlipLockPins [out] + * This parameter returns the HW capability of FlipLock pins. + * + * numStereoPins [out] + * This parameter returns the HW capability of Stereo pins. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NVC370_CTRL_CMD_GET_LOCKPINS_CAPS (0xc3700201) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_RG_INTERFACE_ID << 8) | NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS_MESSAGE_ID" */ + +#define NVC370_CTRL_GET_LOCKPINS_CAPS_FRAME_LOCK_PIN_NONE (0xffffffff) +#define NVC370_CTRL_GET_LOCKPINS_CAPS_RASTER_LOCK_PIN_NONE (0xffffffff) +#define NVC370_CTRL_GET_LOCKPINS_CAPS_FLIP_LOCK_PIN_NONE (0xffffffff) +#define NVC370_CTRL_GET_LOCKPINS_CAPS_STEREO_PIN_NONE (0xffffffff) +#define NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS { + NVC370_CTRL_CMD_BASE_PARAMS base; + NvU32 pinset; + NvU32 frameLockPin; + NvU32 rasterLockPin; + NvU32 flipLockPin; + NvU32 stereoPin; + NvU32 numScanLockPins; + NvU32 numFlipLockPins; + NvU32 numStereoPins; +} NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS; + +/* + * NVC370_CTRL_CMD_SET_SWAPRDY_GPIO_WAR + * + * This command switches SWAP_READY_OUT GPIO between SW + * and HW control to WAR bug 200374184 + * + * bEnable [in]: + * This parameter indicates enable/disable external fliplock + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ + +#define NVC370_CTRL_CMD_SET_SWAPRDY_GPIO_WAR (0xc3700202) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_RG_INTERFACE_ID << 8) | NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS_MESSAGE_ID" */ + +#define NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS { + NVC370_CTRL_CMD_BASE_PARAMS base; + NvBool bEnable; +} NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS; + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h new file mode 100644 index 0000000..bb58fae --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h @@ -0,0 +1,133 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc370/ctrlc370verif.finn +// + +#include "ctrl/ctrlc370/ctrlc370base.h" +/* C370 is partially derived from 5070 */ +#include "ctrl/ctrl5070/ctrl5070verif.h" + + + + +#define NVC370_CTRL_CMD_GET_FORCE_MODESWITCH_FLAGS_OVERRIDES (0xc3700601) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_VERIF_INTERFACE_ID << 8) | NVC370_CTRL_CMD_GET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS_MESSAGE_ID" */ + +#define NVC370_CTRL_CMD_GET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVC370_CTRL_CMD_GET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS { + NVC370_CTRL_CMD_BASE_PARAMS base; + NvU32 headInstance; + NvU32 modesetValue; +} NVC370_CTRL_CMD_GET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS; + +/* + * NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES + * + * This command sets modeswitch flags, prior to a modeset. The flags will be + * automatically cleared at the end of each modeset, so this function must be + * called for each modeset where overrides are desired. + * + * headInstance + * this field specifies the head for which modeswitch flags will be overridden + * + * modesetMaskValid + * this field specifies the maskset at which modeswitch flags will be overridden + * + * modesetValue + * this field specifies the override value + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES (0xc3700602) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_VERIF_INTERFACE_ID << 8) | NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS_MESSAGE_ID" */ + +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS { + NVC370_CTRL_CMD_BASE_PARAMS base; + NvU32 headInstance; + NvU32 modesetMaskValid; + NvU32 modesetValue; +} NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS; + +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_BLANK 0:0 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_BLANK_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_BLANK_INVALID 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK 1:1 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_INVALID 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_SHUTDOWN 2:2 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_SHUTDOWN_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_SHUTDOWN_INVALID 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOSHUTDOWN 3:3 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOSHUTDOWN_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOSHUTDOWN_INVALID 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_CHANGE_VPLL 4:4 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_CHANGE_VPLL_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_CHANGE_VPLL_INVALID 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOCHANGE_VPLL 5:5 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOCHANGE_VPLL_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOCHANGE_VPLL_INVALID 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_WAKEUP 6:6 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_WAKEUP_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_WAKEUP_INVALID 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_SHUTDOWN 7:7 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_SHUTDOWN_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_SHUTDOWN_INVALID 0x00000000 + + +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_BLANK 0:0 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_BLANK_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_BLANK_NO 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK 1:1 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_NO 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_SHUTDOWN 2:2 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_SHUTDOWN_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_SHUTDOWN_NO 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOSHUTDOWN 3:3 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOSHUTDOWN_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOSHUTDOWN_NO 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_CHANGE_VPLL 4:4 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_CHANGE_VPLL_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_CHANGE_VPLL_NO 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOCHANGE_VPLL 5:5 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOCHANGE_VPLL_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOCHANGE_VPLL_NO 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_WAKEUP 6:6 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_WAKEUP_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_WAKEUP_NO 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_SHUTDOWN 7:7 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_SHUTDOWN_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_SHUTDOWN_NO 0x00000000 + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372base.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372base.h new file mode 100644 index 0000000..d8423a2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372base.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc372/ctrlc372base.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NVC372_DISPLAY_SW control commands and parameters */ + +#define NVC372_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0XC372, NVC372_CTRL_##cat, idx) + +/* NVC372_DISPLAY_SW command categories (6 bits) */ +#define NVC372_CTRL_RESERVED (0x00) +#define NVC372_CTRL_CHNCTL (0x01) +#define NVC372_CTRL_VERIF (0x02) + +/* + * NVC372_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NVC372_CTRL_CMD_NULL (0xc3720000) /* finn: Evaluated from "(FINN_NVC372_DISPLAY_SW_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + +// This struct must be the first member of all +// C372 control calls +typedef struct NVC372_CTRL_CMD_BASE_PARAMS { + NvU32 subdeviceIndex; +} NVC372_CTRL_CMD_BASE_PARAMS; + +/* _ctrlc372base_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h new file mode 100644 index 0000000..1e27525 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h @@ -0,0 +1,934 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc372/ctrlc372chnc.finn +// + +#include "nvdisptypes.h" +#include "ctrl/ctrlc372/ctrlc372base.h" + +#define NVC372_CTRL_MAX_POSSIBLE_HEADS 8 +#define NVC372_CTRL_MAX_POSSIBLE_WINDOWS 32 +#define NVC372_CTRL_MAX_POSSIBLE_TILES 8 + +#define NVC372_CTRL_CMD_IS_MODE_POSSIBLE (0xc3720101) /* finn: Evaluated from "(FINN_NVC372_DISPLAY_SW_CHNCTL_INTERFACE_ID << 8) | NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS_MESSAGE_ID" */ + +/* + * NVC372_CTRL_CMD_IS_MODE_POSSIBLE + * + * This command tells whether or not the specified display config is possible. + * A config might not be possible if the display requirements exceed the GPU + * capabilities. Display requirements will be higher with more display + * surfaces, higher resolutions, higher downscaling factors, etc. GPU + * capabilities are determined by clock frequencies, the width of data pipes, + * amount of mempool available, number of thread groups available, etc. + * + * Inputs: + * head.headIndex + * This is the hardware index number for the head. Only active heads + * should be included in the input structure. + * + * head.maxPixelClkKHz + * This parameter specifies the pixel scanout rate (in KHz). + * + * head.rasterSize + * This structure specifies the total width and height of the raster that + * is sent to the display. (The width and height are also referred to as + * HTotal and VTotal, respectively.) + * + * head.rasterBlankStart + * X specifies the pixel column where horizontal blanking begins; + * Y specifies the pixel row where vertical blanking begins. + * + * head.rasterBlankEnd + * X specifies the pixel column where horizontal blanking ends; + * Y specifies the pixel row where vertical blanking ends. + * + * head.rasterVertBlank2 + * X and Y specify the pixel column/row where horizontal/vertical blanking + * ends on the second field of every pair for an interlaced raster. This + * field is not used when the raster is progressive. + * + * head.control.masterLockMode + * head.control.masterLockPin + * head.control.slaveLockMode + * head.control.slaveLockPin + * Heads that are raster locked or frame locked together will have + * synchronized timing. For example, vblank will occur at the same time on + * all of the heads that are locked together. + * + * "LockMode" tells if a head is raster locked, frame locked, or not locked. + * + * "LockPin" tells which heads are in a group of locked heads. There + * should be one master per group, and all slave heads that are locked to + * that master should have the same slaveLockPin number as the master's + * masterLockPin number. + * + * Note: The LockModes and LockPins are used only if the min v-pstate is + * required (i.e., if NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE + * is set). + * + * head.maxDownscaleFactorH + * head.maxDownscaleFactorV + * maxDownscaleFactorH and maxDownscaleFactorV represent the maximum amount + * by which the the composited image can be reduced in size, horizontally + * and vertically, respectively, multiplied by 0x400. For example, if the + * scaler input width is 1024, and the scaler output width is 2048, the + * downscale factor would be 1024 / 2048 = 0.5, and multiplying by 0x400 + * would give 512. + * + * head.outputScalerVerticalTaps + * scalerVerticalTaps indicates the maximum number of vertical taps + * allowed in the output scaler. + * + * Note that there are no #defines for tap values; the parameter is simply + * the number of taps (e.g., "2" for 2 taps). + * + * head.bUpscalingAllowedV + * bUpscalingAllowed indicates whether or not the composited image can be + * increased in size, vertically. + * + * head.bOverfetchEnabled + * bOverfetchEnabled indicates whether or not the vertical overfetch is + * enabled in postcomp scaler. + * + * head.bLtmAllowed + * bLtmAllowed indicates whether or not the Local Tone Mapping (LTM) is + * enabled in postcomp. + * + * head.minFrameIdle.leadingRasterLines + * leadingRasterLines defines the number of lines between the start of the + * frame (vsync) and the start of the active region. This includes Vsync, + * Vertical Back Porch, and the top part of the overscan border. The + * minimum value is 2 because vsync and VBP must be at least 1 line each. + * + * head.minFrameIdle.trailingRasterLines + * trailingRasterLines defines the number of lines between the end of the + * active region and the end of the frame. This includes the bottom part + * of the overscan border and the Vertical Front Porch. + * + * head.lut + * This parameter specifies whether or not the output LUT is enabled, and + * the size of the LUT. The parameter should be an + * NVC372_CTRL_IMP_LUT_USAGE_xxx value. + * On Volta, the value should be one of these: + * NVC372_CTRL_IMP_LUT_USAGE_1025, NVC372_CTRL_IMP_LUT_USAGE_257 or + * NVC372_CTRL_IMP_LUT_USAGE_NONE + * + * After Volta, the value should be one of these: + * NVC372_CTRL_IMP_LUT_USAGE_HW_MAX (this indicates that LUT is allowed) or + * NVC372_CTRL_IMP_LUT_USAGE_NONE + * (On older post-Volta products, clients may set other + * NVC372_CTRL_IMP_LUT_USAGE_xxx values, but they map to + * NVC372_CTRL_IMP_LUT_USAGE_HW_MAX in RM-SW.) + * + * head.cursorSize32p + * This parameter specifies the width of the cursor, in units of 32 pixels. + * So, for example, "8" would mean 8 * 32 = 256, for a 256x256 cursor. Zero + * means the cursor is disabled. + * + * head.tileMask + * This parameter contains a bitmask specifying which tiles must be + * assigned to the head. Normally, this parameter is set to zero, allowing + * IMP to calculate the number of tiles, but the tiles may be specified + * explicitly for test or debug. If the mode is not possible with the + * specified number of tiles, IMP will report the result as such; the + * number of tiles will not be adjusted. + * + * If tiles are forced for only a subset of active heads, IMP will + * calculate the tiles for the remaining heads (if possible). + * + * If head.bEnableDsc is enabled, head.possibleDscSliceCountMask may + * optionally be used to force the number of DSC slices. + * + * head.bEnableDsc + * bEnableDsc indicates whether or not DSC is enabled, by default. If it + * is disabled by default, but head.possibleDscSliceCountMask is non-zero, + * IMP may still present tiling solutions with DSC enabled, but only if the + * mode is not possible otherwise. (This will be indicated by a non-zero + * tileList.headDscSlices output.) + * + * head.dscTargetBppX16 + * dscTargetBppX16 is the DSC encoder's target bits per pixel, multiplied + * by 16. + * + * This field is required only on systems that support tiling, and only if + * head.possibleDscSliceCountMask is true. + * + * head.possibleDscSliceCountMask + * This is a bit mask indicating how many DSC slices are allowed in a + * scanline. If a bit n is set in the bit mask, it means that one possible + * configuration has n+1 DSC slices per scanline. + * + * This field is required only on systems that support tiling. + * + * head.maxDscSliceWidth + * The maximum allowed DSC slice width is determined by spec restrictions + * and monitor capabilities. + * + * This field is required only on systems that support tiling, and only if + * head.possibleDscSliceCountMask is true. + * + * head.bYUV420Format + * This parameter indicates output format is YUV420. + * Refer to NVD_YUV420_Output_Functional_Description.docx for more details. + * + * head.bIs2Head1Or + * This parameter specifies if the head operates in 2Head1Or mode. + * Refer to NVD_2_Heads_Driving_1_OR_Functional_Description.docx for more details. + * + * head.bGetOSLDOutput + * This parameter specifies if the client requires output for the OSLD + * (One Shot Mode with Stall Lock Disabled) mode along with legacy outputs. + * + * head.bDisableMidFrameAndDWCFWatermark + * WAR for bug 200508242. + * In linux it is possible that there will be no fullscreen window visible + * for a head. For these cases we would not hit dwcf or midframe watermarks + * leading to fbflcn timing out waiting on ok_to_switch and forcing mclk + * switch. This could lead to underflows. So if that scenario is caught (by + * Display Driver) bDisableMidFrameAndDWCFWatermark will be set to true and + * IMP will exclude dwcf and midframe contribution from the "is mclk switch + * guaranteed" calculation for the bandwidth clients of that head. + * + * window.windowIndex + * This is the hardware index number for the window. Only active windows + * should be included in the input structure. + * + * window.owningHead + * This is the hardware index of the head that will receive the window's + * output. + * + * window.formatUsageBound + * This parameter is a bitmask of all possible non-rotated mode data + * formats (NVC372_CTRL_FORMAT_xxx values). + * + * window.rotatedFormatUsageBound + * This parameter is a bitmask of all possible rotated mode data formats + * (NVC372_CTRL_FORMAT_xxx values). + * + * window.surfaceLayout + * This parameter is the surface layout of the window. It is one of + * NVC372_CTRL_LAYOUT_xxx values. + * The default value of 0U would imply that SW uses legacy equations + * (pre NVD5.0) in its computation for fetch BW. + * + * window.maxPixelsFetchedPerLine + * This parameter defines the maximum number of pixels that may need to be + * fetched in a single line for this window. Often, this can be set to the + * viewportSizeIn.Width. But if the window is known to be clipped, such + * that an entire line will never be fetched, then this parameter can be + * set to the clipped size (to improve the chances of the mode being + * possible, or possible at a lower v-pstate). + * + * In some cases, the value of this parameter must be increased by a few + * pixels in order to account for scaling overfetch, input chroma overfetch + * (420/422->444), and/or chroma output low pass filter overfetch + * (444->422/420). This value is chip dependent; refer to the + * MaxPixelsFetchedPerLine parameter in nvdClass_01.mfs for the exact + * value. In no case does the maxPixelsFetchedPerLine value need to exceed + * the surface width. + * + * window.maxDownscaleFactorH + * window.maxDownscaleFactorV + * maxDownscaleFactorH and maxDownscaleFactorV represent the maximum amount + * by which the the window image can be reduced in size, horizontally and + * vertically, respectively, multiplied by + * NVC372_CTRL_SCALING_FACTOR_MULTIPLIER. For example, + * if the scaler input width is 1024, and the scaler output width is 2048, + * the downscale factor would be 1024 / 2048 = 0.5, and multiplying by + * NVC372_CTRL_SCALING_FACTOR_MULTIPLIER if 0x400 would give 512. + * + * window.inputScalerVerticalTaps + * scalerVerticalTaps indicates the maximum number of vertical taps + * allowed in the input scaler. + * + * Note that there are no #defines for tap values; the parameter is simply + * the number of taps (e.g., "2" for 2 taps). + * + * window.bUpscalingAllowedV + * bUpscalingAllowed indicates whether or not the composited image can be + * increased in size, vertically. + * + * window.bOverfetchEnabled + * bOverfetchEnabled indicates whether or not the vertical overfetch is + * enabled in precomp scaler. + * + * window.lut + * This parameter specifies whether or not the input LUT is enabled, and + * the size of the LUT. The parameter should be an + * NVC372_CTRL_IMP_LUT_USAGE_xxx value. + * On Volta, the value should be one of these: + * NVC372_CTRL_IMP_LUT_USAGE_1025, NVC372_CTRL_IMP_LUT_USAGE_257 or + * NVC372_CTRL_IMP_LUT_USAGE_NONE + * + * After Volta, the value should be one of these: + * NVC372_CTRL_IMP_LUT_USAGE_HW_MAX (this indicates that LUT is allowed) or + * NVC372_CTRL_IMP_LUT_USAGE_NONE + * (On older post-Volta products, clients may set other + * NVC372_CTRL_IMP_LUT_USAGE_xxx values, but they map to + * NVC372_CTRL_IMP_LUT_USAGE_HW_MAX in RM-SW.) + * + * window.tmoLut + * This parameter specifies whether or not the tmo LUT is enabled, and + * the size of the LUT. This lut is used for HDR. The parameter should be + * an NVC372_CTRL_IMP_LUT_USAGE_xxx value. + * On Volta, the value should be one of these: + * NVC372_CTRL_IMP_LUT_USAGE_1025, NVC372_CTRL_IMP_LUT_USAGE_257 or + * NVC372_CTRL_IMP_LUT_USAGE_NONE + * + * After Volta, the value should be one of these: + * NVC372_CTRL_IMP_LUT_USAGE_HW_MAX (this indicates that LUT is allowed) or + * NVC372_CTRL_IMP_LUT_USAGE_NONE + * (On older post-Volta products, clients may set other + * NVC372_CTRL_IMP_LUT_USAGE_xxx values, but they map to + * NVC372_CTRL_IMP_LUT_USAGE_HW_MAX in RM-SW.) + * + * numHeads + * This is the number of heads in the "head" array of the + * NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS struct. Only active heads should be + * included in the struct. + * + * numWindows + * This is the number of windows in the "window" array of the + * NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS struct. Only active windows should + * be included in the struct. + * + * options + * This parameter specifies a bitmask for options. + * + * NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_GET_MARGIN + * tells IMP to calculate worstCaseMargin and worstCaseDomain. + * NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE + * tells IMP to calculate and report the minimum v-pstate at which the + * mode is possible. + * + * bUseCachedPerfState + * Indicates that RM should use cached values for the fastest + * available perf level (v-pstate for PStates 3.0 or pstate for + * PStates 2.0) and dispclk. This feature allows the query call to + * execute faster, and is intended to be used, for example, during + * mode enumeration, when many IMP query calls are made in close + * succession, and perf conditions are not expected to change between + * query calls. When IMP has not been queried recently, it is + * recommended to NOT use cached values, in case perf conditions have + * changed and the cached values no longer reflect the current + * conditions. + * + * testMclkFreqKHz + * This is the mclk frequency specified by the client, in KHz. RM will + * use this value to compare with the minimum dramclk required by the + * given mode. The parameter will have value 0 if the client doesn't want + * IMP query to consider this. This input is valid only on Tegra and only + * for verification purposes on internal builds. + * For this input to work, client must set + * NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE in the + * "options" field. + * + * Outputs: + * bIsPossible + * This output tells if the specified mode can be supported. To know if + * the mode is possible with OSLD, the bIsOSLDPossible result must + * be checked. + * + * bIsOSLDPossible + * This output is returned for each head and suggests to the clients + * if the mode will be possible or not on that head when OSLD is enabled. + * The output is only valid if bGetOSLDOutput is set in the head input. + * Note that bIsOSLDPossible is only valid if bIsPossible is true. + * + * minImpVPState + * minImpVPState returns the minimum v-pstate at which the mode is possible + * (assuming bIsPossible is TRUE). This output is valid only on dGPU, and + * only if NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE was set in + * the "options" field. + * + * If the minimum v-pstate is required for a multi-head config, then + * masterLockMode, masterLockPin, slaveLockMode, and slaveLockPin must all + * be initialized. + * minPState + * minPState returns the pstate value corresponding to minImpVPState. It + * is returned as the numeric value of the pstate (P0 -> 0, P1 -> 1, etc.). + * This output is valid only on dGPU, and only if + * NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE was set + * in the "options" field. + * + * Note that the pstate returned by minPstateForGlitchless is not + * necessarily sufficient to meet IMP requirements. The pstate corresponds + * to the vpstate returned by minImpVPState, and this vpstate represents + * clocks that are sufficient for IMP requirements, but the pstate + * typically covers a range of frequencies (depending on the clock), and it + * is possible that only part of the range is sufficient for IMP. + * + * minRequiredBandwidthKBPS + * minRequiredBandwidthKBPS returns the minimum bandwidth that must be + * allocated to display in order to make the mode possible (assuming + * bIsPossible is TRUE). This output is valid only on Tegra, and only if + * NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE was set in the + * "options" field. + * + * floorBandwidthKBPS + * floorBandwidthKBPS returns the minimum mclk frequency that can support + * the mode, and allow glitchless mclk switch, multiplied by the width of + * the data pipe. (This is an approximation of the bandwidth that can be + * provided by the min required mclk frequency, ignoring overhead.) If the + * mode is possible, but glitchless mclk switch is not, floorBandwidthKBPS + * will be calculated based on the maximum possible mclk frequency. This + * output is valid only on Tegra, and only if + * NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE was set in the + * "options" field. + * + * minRequiredHubclkKHz + * minRequiredHubclkKHz returns the minimum hubclk frequency that can + * support the mode. This output is valid only on Tegra, and only if + * NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE was set in the + * "options" field. + * + * vblankIncreaseInLinesForOSLDMode + * vblankIncreaseInLinesForOSLDMode returns the amount, in lines, by + * which vblank needs to be extended to achieve optimized MSCG when OSLD + * Mode is enabled (assuming bIsPossible is TRUE). Features like Panel + * Replay and Panel Self Refresh enable OSLD mode. This value is 0 if + * the vblank is large enough to accommodate spool up and MSCG latencies. + * This output is valid only if + * NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE was set in the + * "options" field and bGetOSLDOutput was specified in the head input. + * + * wakeUpRgLineForOSLDMode + * wakeUpRgLineForOSLDMode returns the rg line in the vblank region at which + * the clients will be required to send a timestamped update to achieve + * optimized MSCG when OSLD Mode is enabled (assuming bIsPossible is + * is TRUE). Features like Panel Replay and Panel Self Refresh enable + * OSLD mode. This output is valid only if + * NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE was set in the + * "options" field and bGetOSLDOutput was specified in the head input. + * + * worstCaseMargin + * worstCaseMargin returns the ratio of available bandwidth to required + * bandwidth, multiplied by NV5070_CTRL_IMP_MARGIN_MULTIPLIER. Available + * bandwidth is calculated in the worst case bandwidth domain, i.e., the + * domain with the least available margin. Bandwidth domains include the + * IMP-relevant clock domains, and possibly other virtual bandwidth + * domains such as AWP. + * + * Note that IMP checks additional parameters besides the bandwidth margins + * but only the bandwidth margin is reported here, so it is possible for a + * mode to have a more restrictive domain that is not reflected in the + * reported margin result. + * + * This result is not guaranteed to be valid if the mode is not possible. + * + * Note also that the result is generally calculated for the highest + * v-pstate possible (usually P0). But if the _NEED_MIN_VPSTATE is + * specified, the result will be calculated for the min possible v-pstate + * (or the highest possible v-pstate, if the mode is not possible). + * + * The result is valid only if + * NV5070_CTRL_IS_MODE_POSSIBLE_OPTIONS_GET_MARGIN is set in "options". + * + * dispClkKHz + * This is the dispclk frequency selected by IMP for this mode. For dGPU, + * it will be one of the fixed frequencies from the list of frequencies + * supported by the vbios. + * + * worstCaseDomain + * Returns a short text string naming the domain for the margin returned in + * "worstCaseMargin". See "worstCaseMargin" for more information. + * + * numTilingAssignments + * This is the number of entries in the tilingAssignments array, each of + * which represents a possible distinct tiling configuration. The client + * will do the actual assignment of tiles during the modeset, but the + * information provided here will help the client to know how many tiles + * to assign to each head. + * + * The first tiling assignment (tilingAssignments[0]) is required; it will + * specify that one or more tiles must be assigned to each active head in + * order for the mode to be possible. Subsequent tiling assignments are + * optional; each higher assignment will reduce the required dispclk to a + * lower frequency, so the client may choose to use some or all of these + * assignments because they might reduce power consumption. + * + * The tiling assignments are incremental; the client may choose how many + * assignments to use (after the first one, which is required), but they + * must be used in sequence. For example, if there are five possible + * assignments (numTilingAssignments = 5), and the client wishes to apply + * the third assignment, the tiles from the first two assignments must also + * be allocated. + * + * If the client decides to use a particular tiling assignment, it should + * assign all tiles specified in that assignment. (Otherwise, there will + * be no benefit from that assignment for reducing dispclk.) + * + * A 3-head example of a set of tiling assignments is: + * numTilingAssignments = 3 + * tilingAssignments[0].numTiles = 4 + * tilingAssignments[1].numTiles = 2 + * tilingAssignments[2].numTiles = 1 + * tileList[0].head = 0 (first tile for tilingAssignments[0]) + * tileList[0].headDscSlices = 2 + * tileList[1].head = 0 (second tile for tilingAssignments[0]) + * tileList[1].headDscSlices = xxx (not used, because it is specified in + * tileList[0].headDscSlices) + * tileList[2].head = 1 (third tile for tilingAssignments[0]) + * tileList[2].headDscSlices = 1 + * tileList[3].head = 2 (fourth tile for tilingAssignments[0]) + * tileList[3].headDscSlices = 1 + * tileList[4].head = 1 (first tile for tilingAssignments[1]) + * tileList[4].headDscSlices = 2 + * tileList[5].head = 2 (second tile for tilingAssignments[1]) + * tileList[5].headDscSlices = 2 + * tileList[6].head = 0 (tile for tilingAssignments[2] + * tileList[6].headDscSlices = 3 + * + * tilingAssignments[0] always specifies the minimum tiling assignment + * necessary to make the mode possible. In this example, two tiles are + * required on head 0, but heads 1 and 2 can work with a single tile each. + * + * After the four required tiles are assigned for tilingAssignments[0], the + * client may choose to apply tilingAssignment[1] as well, to reduce + * dispclk further. Two additional tiles would be required for this, one + * on head 1 and one on head 2. Note that there would be no benefit to + * assigning a tile to only one of these two heads; all heads specified in + * the tilingAssignment must be assigned (if the tilingAssignment is to be + * used). After this assignment, head 1 and head 2 would each have two + * tiles assigned (one from tilingAssignment[0] and one from + * tilingAssignent[1]). Head 0 would still have 2 tiles assigned. + * + * If tilingAssignments[2] is also used, an additional tile would be + * assigned to head 0, bringing the tile total to three for that head. The + * number of DSC slices required for that head would be increased to three. + * + * Note that the tiling assignments do not specify which tiles to use; they + * only specify how many tiles to assign to each head. The client must + * choose which tiles to assign, based on their capabilities. + * + * tilingAssignments.numTiles + * This is the number of additional tiles required for the indexed tiling + * assignment. The tilingAssignment does not provide any benefit unless + * all of its specified tiles are assigned. + * + * tileList.head + * This specifies the head to which a tile must be assigned, to receive a + * benefit (dispclk reduction) for a given tiling assignment. + * tileList entries (head indexes) are assigned consecutively, based on the + * tilingAssignments.numTiles entries. For example, if + * tilingAssignments[0].numTiles = 3 and tilingAssignments[1].numTiles = 2, + * then the first three tileList entries (indexes 0, 1, and 2) would be for tiling + * assignment 0 and the next 2 entries (indexes 3 and 4) would be for + * tiling assignment 1. + * + * A single assignment may have multiple tileList.head entries for the same + * head (if a single head requires that more than one additional tile be + * assigned). + * + * tilelist.head indexes heads as they are indexed in the + * NVC372_CTRL_IMP_HEAD array within the IMP input data structure. (These + * do not necessarily correspond to physical head indexes.) + * + * tileList.headDscSlices + * headDscSlices gives the recommended number of DSC slices for each + * scanline for the head specified in tileList.head. If a specific tiling + * assignment has multiple tiles assigned to the same head, the + * headDscSlices value for the first tileList entry should be used; + * subsequent entries may be ignored. If multiple tilingAssignments are + * applied, the headDscSlices entry for the highest indexed + * tilingAssignment takes precedence over any entries from lower indexed + * assignments, for the same head. + * + * This field is relevant only if head.bEnableDsc is true. + * + * Possible status values returned are: + * NVOS_STATUS_SUCCESS + * NVOS_STATUS_ERROR_GENERIC + */ +#define NVC372_CTRL_IMP_LUT_USAGE_NONE 0 +#define NVC372_CTRL_IMP_LUT_USAGE_257 1 +#define NVC372_CTRL_IMP_LUT_USAGE_1025 2 +#define NVC372_CTRL_IMP_LUT_USAGE_HW_MAX 3 + +typedef struct NVC372_CTRL_IMP_HEAD { + NvU8 headIndex; + + NvU32 maxPixelClkKHz; + + struct { + NvU32 width; + NvU32 height; + } rasterSize; + + struct { + NvU32 X; + NvU32 Y; + } rasterBlankStart; + + struct { + NvU32 X; + NvU32 Y; + } rasterBlankEnd; + + struct { + NvU32 yStart; + NvU32 yEnd; + } rasterVertBlank2; + + struct { + NV_DISP_LOCK_MODE masterLockMode; + NV_DISP_LOCK_PIN masterLockPin; + NV_DISP_LOCK_MODE slaveLockMode; + NV_DISP_LOCK_PIN slaveLockPin; + } control; + + NvU32 maxDownscaleFactorH; + NvU32 maxDownscaleFactorV; + NvU8 outputScalerVerticalTaps; + NvBool bUpscalingAllowedV; + NvBool bOverfetchEnabled; + NvBool bLtmAllowed; + + struct { + NvU16 leadingRasterLines; + NvU16 trailingRasterLines; + } minFrameIdle; + + NvU8 lut; + NvU8 cursorSize32p; + + NvU8 tileMask; + + NvBool bEnableDsc; + + NvU16 dscTargetBppX16; + + NvU32 possibleDscSliceCountMask; + + NvU32 maxDscSliceWidth; + + NvBool bYUV420Format; + + NvBool bIs2Head1Or; + + NvBool bGetOSLDOutput; + + NvBool bDisableMidFrameAndDWCFWatermark; +} NVC372_CTRL_IMP_HEAD; +typedef struct NVC372_CTRL_IMP_HEAD *PNVC372_CTRL_IMP_HEAD; + +typedef struct NVC372_CTRL_IMP_WINDOW { + NvU32 windowIndex; + NvU32 owningHead; + NvU32 formatUsageBound; + NvU32 rotatedFormatUsageBound; + NvU32 maxPixelsFetchedPerLine; + NvU32 maxDownscaleFactorH; + NvU32 maxDownscaleFactorV; + NvU8 inputScalerVerticalTaps; + NvBool bUpscalingAllowedV; + NvBool bOverfetchEnabled; + NvU8 lut; + NvU8 tmoLut; + NvU8 surfaceLayout; +} NVC372_CTRL_IMP_WINDOW; +typedef struct NVC372_CTRL_IMP_WINDOW *PNVC372_CTRL_IMP_WINDOW; + +typedef struct NVC372_TILING_ASSIGNMENT { + NvU8 numTiles; +} NVC372_TILING_ASSIGNMENT; + +typedef struct NVC372_TILE_ENTRY { + NvU8 head; + NvU8 headDscSlices; +} NVC372_TILE_ENTRY; + +#define NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_GET_MARGIN (0x00000001) +#define NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE (0x00000002) + +#define NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS { + NVC372_CTRL_CMD_BASE_PARAMS base; + + NvU8 numHeads; + NvU8 numWindows; + + NVC372_CTRL_IMP_HEAD head[NVC372_CTRL_MAX_POSSIBLE_HEADS]; + + // C form: NVC372_CTRL_IMP_WINDOW window[NVC372_CTRL_MAX_POSSIBLE_WINDOWS]; + NVC372_CTRL_IMP_WINDOW window[NVC372_CTRL_MAX_POSSIBLE_WINDOWS]; + + NvU32 options; + + NvU32 testMclkFreqKHz; + + NvBool bIsPossible; + + NvBool bIsOSLDPossible[NVC372_CTRL_MAX_POSSIBLE_HEADS]; + + NvU32 minImpVPState; + + NvU32 minPState; + + NvU32 minRequiredBandwidthKBPS; + + NvU32 floorBandwidthKBPS; + + NvU32 minRequiredHubclkKHz; + + NvU32 vblankIncreaseInLinesForOSLDMode[NVC372_CTRL_MAX_POSSIBLE_HEADS]; + + NvU32 wakeUpRgLineForOSLDMode[NVC372_CTRL_MAX_POSSIBLE_HEADS]; + + NvU32 worstCaseMargin; + + NvU32 dispClkKHz; + + NvU32 numTilingAssignments; + + NVC372_TILING_ASSIGNMENT tilingAssignments[NVC372_CTRL_MAX_POSSIBLE_TILES]; + + NVC372_TILE_ENTRY tileList[NVC372_CTRL_MAX_POSSIBLE_TILES]; + + char worstCaseDomain[8]; + + NvBool bUseCachedPerfState; +} NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS; +typedef struct NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS *PNVC372_CTRL_IS_MODE_POSSIBLE_PARAMS; + +/* valid format values */ +#define NVC372_CTRL_FORMAT_RGB_PACKED_1_BPP (0x00000001) +#define NVC372_CTRL_FORMAT_RGB_PACKED_2_BPP (0x00000002) +#define NVC372_CTRL_FORMAT_RGB_PACKED_4_BPP (0x00000004) +#define NVC372_CTRL_FORMAT_RGB_PACKED_8_BPP (0x00000008) +#define NVC372_CTRL_FORMAT_YUV_PACKED_422 (0x00000010) +#define NVC372_CTRL_FORMAT_YUV_PLANAR_420 (0x00000020) +#define NVC372_CTRL_FORMAT_YUV_PLANAR_444 (0x00000040) +#define NVC372_CTRL_FORMAT_YUV_SEMI_PLANAR_420 (0x00000080) +#define NVC372_CTRL_FORMAT_YUV_SEMI_PLANAR_422 (0x00000100) +#define NVC372_CTRL_FORMAT_YUV_SEMI_PLANAR_422R (0x00000200) +#define NVC372_CTRL_FORMAT_YUV_SEMI_PLANAR_444 (0x00000400) +#define NVC372_CTRL_FORMAT_EXT_YUV_PLANAR_420 (0x00000800) +#define NVC372_CTRL_FORMAT_EXT_YUV_PLANAR_444 (0x00001000) +#define NVC372_CTRL_FORMAT_EXT_YUV_SEMI_PLANAR_420 (0x00002000) +#define NVC372_CTRL_FORMAT_EXT_YUV_SEMI_PLANAR_422 (0x00004000) +#define NVC372_CTRL_FORMAT_EXT_YUV_SEMI_PLANAR_422R (0x00008000) +#define NVC372_CTRL_FORMAT_EXT_YUV_SEMI_PLANAR_444 (0x00010000) + +/* valid layout values */ +#define NVC372_CTRL_LAYOUT_PITCH_BLOCKLINEAR 0 +#define NVC372_CTRL_LAYOUT_PITCH 1 +#define NVC372_CTRL_LAYOUT_BLOCKLINEAR 2 + +/* valid impResult values */ +#define NVC372_CTRL_IMP_MODE_POSSIBLE 0 +#define NVC372_CTRL_IMP_NOT_ENOUGH_MEMPOOL 1 +#define NVC372_CTRL_IMP_REQ_LIMIT_TOO_HIGH 2 +#define NVC372_CTRL_IMP_VBLANK_TOO_SMALL 3 +#define NVC372_CTRL_IMP_HUBCLK_TOO_LOW 4 +#define NVC372_CTRL_IMP_INSUFFICIENT_BANDWIDTH 5 +#define NVC372_CTRL_IMP_DISPCLK_TOO_LOW 6 +#define NVC372_CTRL_IMP_ELV_START_TOO_HIGH 7 +#define NVC372_CTRL_IMP_OSLD_ELV_START_TOO_HIGH 8 +#define NVC372_CTRL_IMP_INSUFFICIENT_THREAD_GROUPS 9 +#define NVC372_CTRL_IMP_INVALID_PARAMETER 10 +#define NVC372_CTRL_IMP_UNRECOGNIZED_FORMAT 11 +#define NVC372_CTRL_IMP_UNSPECIFIED 12 + +/* + * The calculated margin is multiplied by a constant, so that it can be + * represented as an integer with reasonable precision. "0x400" was chosen + * because it is a power of two, which might allow some compilers/CPUs to + * simplify the calculation by doing a shift instead of a multiply/divide. + * (And 0x400 is 1024, which is close to 1000, so that may simplify visual + * interpretation of the raw margin value.) + */ +#define NVC372_CTRL_IMP_MARGIN_MULTIPLIER (0x00000400) + +/* scaling factor */ +#define NVC372_CTRL_SCALING_FACTOR_MULTIPLIER (0x00000400) + +#define NVC372_CTRL_CMD_NUM_DISPLAY_ID_DWORDS_PER_HEAD 2 +#define NVC372_CTRL_CMD_MAX_SORS 4 + +#define NVC372_CTRL_CMD_IS_MODE_POSSIBLE_OR_SETTINGS (0xc3720102) /* finn: Evaluated from "(FINN_NVC372_DISPLAY_SW_CHNCTL_INTERFACE_ID << 8) | NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS_MESSAGE_ID" */ + +/* + * NVC372_CTRL_CMD_IS_MODE_POSSIBLE_OR_SETTINGS + * + * This command tells us if output resource pixel clocks requested by client + * is possible or not. Note that this will not be used for displayport sor as + * it will be handled by displayport library. + * + * Inputs: + * numHeads + * This is the number of heads in the "head" array of the + * NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS struct. Only active heads + * should be included in the struct. + * + * head.headIndex + * This is the hardware index number for the head. Only an active head + * should be included in the input structure. + * + * head.maxPixelClkKHz + * This parameter specifies the pixel scanout rate (in KHz). + * + * head.displayId + * Array of displayId's associated with the head. This is limited by + * NVC372_CTRL_CMD_NUM_DISPLAY_ID_DWORDS_PER_HEAD. + * + * sor.ownerMask + * Consists of a mask of all heads that drive this sor. + * + * sor.protocol + * Defines the protocol of the sor in question. + * + * sor.pixelReplicateMode + * Defines which pixel replication mode is requested. This can be off + * or X2 or X4 mode. + * + * Outputs: + * bIsPossible + * This tells us that the requested pixel clock can be supported. + */ + + +#define NVC372_CTRL_IS_MODE_POSSIBLE_DISPLAY_ID_SKIP_IMP_OUTPUT_CHECK (0xAAAAAAAA) + +typedef struct NVC372_CTRL_IMP_OR_SETTINGS_HEAD { + NvU8 headIndex; + NvU32 maxPixelClkKhz; + + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP outputResourcePixelDepthBPP; + + NvU32 displayId[NVC372_CTRL_CMD_NUM_DISPLAY_ID_DWORDS_PER_HEAD]; +} NVC372_CTRL_IMP_OR_SETTINGS_HEAD; +typedef struct NVC372_CTRL_IMP_OR_SETTINGS_HEAD *PNVC372_CTRL_IMP_OR_SETTINGS_HEAD; + +#define NVC372_CTRL_CMD_SOR_OWNER_MASK_NONE (0x00000000) +#define NVC372_CTRL_CMD_SOR_OWNER_MASK_HEAD(i) (1 << i) + +#define NVC372_CTRL_CMD_SOR_PROTOCOL_SINGLE_TMDS_A (0x00000000) +#define NVC372_CTRL_CMD_SOR_PROTOCOL_SINGLE_TMDS_B (0x00000001) +#define NVC372_CTRL_CMD_SOR_PROTOCOL_DUAL_TMDS (0x00000002) +#define NVC372_CTRL_CMD_SOR_PROTOCOL_SUPPORTED (0xFFFFFFFF) + +#define NVC372_CTRL_IS_MODE_POSSIBLE_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NVC372_CTRL_IS_MODE_POSSIBLE_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NVC372_CTRL_IS_MODE_POSSIBLE_PIXEL_REPLICATE_MODE_X4 (0x00000002) + +typedef struct NVC372_CTRL_IMP_OR_SETTINGS_SOR { + NvU32 ownerMask; + NvU32 protocol; + NvU32 pixelReplicateMode; +} NVC372_CTRL_IMP_OR_SETTINGS_SOR; +typedef struct NVC372_CTRL_IMP_OR_SETTINGS_SOR *PNVC372_CTRL_IMP_OR_SETTINGS_SOR; + +#define NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS { + NVC372_CTRL_CMD_BASE_PARAMS base; + + NvU32 numHeads; + + NVC372_CTRL_IMP_OR_SETTINGS_HEAD head[NVC372_CTRL_MAX_POSSIBLE_HEADS]; + + NVC372_CTRL_IMP_OR_SETTINGS_SOR sor[NVC372_CTRL_CMD_MAX_SORS]; + + NvBool bIsPossible; +} NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS; +typedef struct NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS *PNVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS; + +#define NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE (0xc3720103) /* finn: Evaluated from "(FINN_NVC372_DISPLAY_SW_CHNCTL_INTERFACE_ID << 8) | NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS_MESSAGE_ID" */ + +/* + * NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE + * + * This control call is used by clients to inform RM about video adaptive refresh rate enable/disable. + * Based on the state, RM will enable/disable supported low power features. + * + * Inputs: + * displayID + * displayId of panel on which video adaptive refresh rate is enabled/disabled. + * + * bEnable + * NV_TRUE to enable video adaptive refresh rate mode. + * NV_FALSE to disable video adaptive refresh rate mode. + * + * Outputs: + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS { + NvU32 displayID; + NvBool bEnable; +} NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS; +typedef struct NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS *PNVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS; + + +#define NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN (0xc3720104) /* finn: Evaluated from "(FINN_NVC372_DISPLAY_SW_CHNCTL_INTERFACE_ID << 8) | NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS_MESSAGE_ID" */ + +/* + * NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN + * + * This control call is used by clients to query the active viewport for the + * provided window precalculated at the beginning of each frame. + * + * Inputs: + * windowIndex + * Index of the window to be queried. Must be connected to an active head. + * + * Outputs: + * activeViewportPointIn + * X and Y coordinates of the active viewport on the provided window for + * the most recent frame. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT if the window index is invalid + * NV_ERR_INVALID_STATE if the window index isn't connected to a head + * NV_ERR_NOT_SUPPORTED + */ +#define NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS { + NVC372_CTRL_CMD_BASE_PARAMS base; + + NvU32 windowIndex; + + struct { + NvU32 x; + NvU32 y; + } activeViewportPointIn; +} NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS; +typedef struct NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS *PNVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS; + +/* _ctrlc372chnc_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc637.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc637.h new file mode 100644 index 0000000..f480c0e --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc637.h @@ -0,0 +1,407 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc637.finn +// + +#include "nvlimits.h" +#include "ctrl/ctrlxxxx.h" +/* AMPERE_SMC_PARTITION_REF commands and parameters */ + +#define NVC637_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0xC637, NVC637_CTRL_##cat, idx) + +/* Command categories (6bits) */ +#define NVC637_CTRL_RESERVED (0x00) +#define NVC637_CTRL_EXEC_PARTITIONS (0x01) + + +/*! + * NVC637_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NVC637_CTRL_CMD_NULL (0xc6370000) /* finn: Evaluated from "(FINN_AMPERE_SMC_PARTITION_REF_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/*! + * NVC637_CTRL_EXEC_PARTITIONS_INFO + * + * This structure specifies resources in an execution partition + * + * gpcCount[IN/OUT] + * - Total Number of GPCs in this partition (including GFX Supported GPCs) + * + * gfxGpcCount[IN/OUT] + * - Number of GFX GPCs in this partition. This should be a subset of gpcs inlcuded in gpcCount + * + * VeidCount[OUT] + * - Number of VEIDs available in this partition. + * + * ceCount[IN/OUT] + * - Copy Engines in this partition + * + * nvEncCount[IN/OUT] + * - Encoder Engines in this partition + * + * nvDecCount[IN/OUT] + * - Decoder Engines in this partition + * + * nvJpgCount[IN/OUT] + * - Jpg Engines in this partition + * + * nvOfaCount[IN/OUT] + * - Ofa engines in this partition + * + * sharedEngFlags[IN/OUT] + * - Flags determining whether above engines are shared with other execution partitions + * + * veidStartOffset[OUT] + * - VEID start offset within GPU partition + * + * smCount[IN/OUT] + * - Number of active SMs in this partition + * + * spanStart[IN/OUT] + * - First slot in the span for an execution partition placement + * + * computeSize[IN/OUT] + * - Flag corresponding to the compute profile used + * + */ +typedef struct NVC637_CTRL_EXEC_PARTITIONS_INFO { + NvU32 gpcCount; + NvU32 gfxGpcCount; + NvU32 veidCount; + NvU32 ceCount; + NvU32 nvEncCount; + NvU32 nvDecCount; + NvU32 nvJpgCount; + NvU32 ofaCount; + NvU32 sharedEngFlag; + NvU32 veidStartOffset; + NvU32 smCount; + NvU32 spanStart; + NvU32 computeSize; +} NVC637_CTRL_EXEC_PARTITIONS_INFO; + +#define NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG 31:0 +#define NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NONE 0x0 +#define NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_CE NVBIT(0) +#define NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NVDEC NVBIT(1) +#define NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NVENC NVBIT(2) +#define NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_OFA NVBIT(3) +#define NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NVJPG NVBIT(4) + +#define NVC637_CTRL_MAX_EXEC_PARTITIONS 8 +#define NVC637_CTRL_EXEC_PARTITIONS_ID_INVALID 0xFFFFFFFF + +/*! + * NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS + * + * This command will create requested execution partitions under the subscribed + * memory partition. The memory partition is expected to be configured before + * execution partition creation. + * + * bQuery[IN] + * - If NV_TRUE, execution partitions will not be created, but return + * status of NV_OK will indicate that the request is valid and can + * currently be fulfilled + * flag [IN] + * REQUEST_WITH_PART_ID + * - If set, RM will try to assign execution partition id requested by clients. + * This flag is only supported on vGPU enabled RM build and will be removed + * when vgpu plugin implements virtualized execution partition ID support. + * (bug 2938187) + * + * REQUEST_AT_SPAN + * - If set, RM will try to assign execution partition resources at the specified span. + * This flag currently is only useful for chips in which CTS IDs are mandatory in RM, + * as it allows the requester to position compute instances without using RM best fit + * allocation. + * + * execPartCount[IN] + * - Number of execution partitions requested + * + * execPartInfo[IN] + * - Requested execution partition resources for each requested partition + * + * execPartId[OUT] + * - ID of each requested execution partition + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NVC637_CTRL_DMA_EXEC_PARTITIONS_CREATE_REQUEST_WITH_PART_ID 0:0 +#define NVC637_CTRL_DMA_EXEC_PARTITIONS_CREATE_REQUEST_WITH_PART_ID_FALSE (0x00000000) +#define NVC637_CTRL_DMA_EXEC_PARTITIONS_CREATE_REQUEST_WITH_PART_ID_TRUE (0x00000001) +#define NVC637_CTRL_DMA_EXEC_PARTITIONS_CREATE_REQUEST_AT_SPAN 1:1 +#define NVC637_CTRL_DMA_EXEC_PARTITIONS_CREATE_REQUEST_AT_SPAN_FALSE (0x00000000) +#define NVC637_CTRL_DMA_EXEC_PARTITIONS_CREATE_REQUEST_AT_SPAN_TRUE (0x00000001) + +#define NVC637_CTRL_CMD_EXEC_PARTITIONS_CREATE (0xc6370101) /* finn: Evaluated from "(FINN_AMPERE_SMC_PARTITION_REF_EXEC_PARTITIONS_INTERFACE_ID << 8) | NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS_MESSAGE_ID" */ + +/*! + * NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS + * + * This command will delete requested execution partitions. + * + * execPartCount[IN] + * - Number of execution partitions to delete. + * + * execPartId[IN] + * - Execution partition IDs to delete + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS { + NvBool bQuery; + NvU32 flags; + NvU32 execPartCount; + // C form: NVC637_CTRL_EXEC_PARTITIONS_INFO execPartInfo[NVC637_CTRL_MAX_EXEC_PARTITIONS]; + NVC637_CTRL_EXEC_PARTITIONS_INFO execPartInfo[NVC637_CTRL_MAX_EXEC_PARTITIONS]; + // C form: NvU32 execPartId[NVC637_CTRL_MAX_EXECUTION_PARTITIONS]; + NvU32 execPartId[NVC637_CTRL_MAX_EXEC_PARTITIONS]; +} NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS; +#define NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS { + NvU32 execPartCount; + NvU32 execPartId[NVC637_CTRL_MAX_EXEC_PARTITIONS]; +} NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS; + +#define NVC637_CTRL_CMD_EXEC_PARTITIONS_DELETE (0xc6370102) /* finn: Evaluated from "(FINN_AMPERE_SMC_PARTITION_REF_EXEC_PARTITIONS_INTERFACE_ID << 8) | NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS_MESSAGE_ID" */ + +/*! + * NVC637_CTRL_EXEC_PARTITIONS_GET_PARAMS + * + * This command will return information about execution partitions which + * currently exist within the subscribed memory partition. + * + * execPartCount[OUT] + * - Number of existing execution partitions + * + * execPartId[OUT] + * - ID of existing execution partitions + * + * execPartInfo[OUT] + * - Resources within each existing execution partition + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NVC637_CTRL_EXEC_PARTITIONS_GET_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NVC637_CTRL_EXEC_PARTITIONS_GET_PARAMS { + NvU32 execPartCount; + // C form: NvU32 execPartId[NVC637_CTRL_MAX_EXECUTION_PARTITIONS]; + NvU32 execPartId[NVC637_CTRL_MAX_EXEC_PARTITIONS]; + // C form: NVC637_CTRL_EXEC_PARTITIONS_INFO execPartInfo[NVC637_CTRL_MAX_EXEC_PARTITIONS]; + NVC637_CTRL_EXEC_PARTITIONS_INFO execPartInfo[NVC637_CTRL_MAX_EXEC_PARTITIONS]; +} NVC637_CTRL_EXEC_PARTITIONS_GET_PARAMS; + +#define NVC637_CTRL_CMD_EXEC_PARTITIONS_GET (0xc6370103) /* finn: Evaluated from "(FINN_AMPERE_SMC_PARTITION_REF_EXEC_PARTITIONS_INTERFACE_ID << 8) | NVC637_CTRL_EXEC_PARTITIONS_GET_PARAMS_MESSAGE_ID" */ + +/*! + * NVC637_CTRL_EXEC_PARTITIONS_GET_ACTIVE_IDS + * + * This command will return IDs of all active execution partitions in a memory + * partition + * + * execPartCount[OUT] + * - Number of existing execution partitions + * + * execPartId[OUT] + * - ID of existing execution partitions + * + * execPartUuid[OUT] + * - ASCII UUID string of existing execution partitions + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NVC637_UUID_LEN 16 +#define NVC637_UUID_STR_LEN NV_MIG_DEVICE_UUID_STR_LENGTH + +typedef struct NVC637_EXEC_PARTITION_UUID { + // C form: char str[NVC638_UUID_STR_LEN]; + char str[NVC637_UUID_STR_LEN]; +} NVC637_EXEC_PARTITION_UUID; + +#define NVC637_CTRL_EXEC_PARTITIONS_GET_ACTIVE_IDS_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NVC637_CTRL_EXEC_PARTITIONS_GET_ACTIVE_IDS_PARAMS { + NvU32 execPartCount; + + // C form: NvU32 execPartId[NVC637_CTRL_MAX_EXECUTION_PARTITIONS]; + NvU32 execPartId[NVC637_CTRL_MAX_EXEC_PARTITIONS]; + + // C form: NVC637_EXEC_PARTITION_UUID execPartUuid[NVC637_CTRL_MAX_EXEC_PARTITIONS]; + NVC637_EXEC_PARTITION_UUID execPartUuid[NVC637_CTRL_MAX_EXEC_PARTITIONS]; +} NVC637_CTRL_EXEC_PARTITIONS_GET_ACTIVE_IDS_PARAMS; + +#define NVC637_CTRL_EXEC_PARTITIONS_GET_ACTIVE_IDS (0xc6370104) /* finn: Evaluated from "(FINN_AMPERE_SMC_PARTITION_REF_EXEC_PARTITIONS_INTERFACE_ID << 8) | NVC637_CTRL_EXEC_PARTITIONS_GET_ACTIVE_IDS_PARAMS_MESSAGE_ID" */ + +/* + * NVC637_CTRL_CMD_EXEC_PARTITIONS_EXPORT + * + * Export the resource and placement information about an exec partition such + * that a similar partition can be recreated from scratch in the same position. + */ +#define NVC637_CTRL_CMD_EXEC_PARTITIONS_EXPORT (0xc6370105) /* finn: Evaluated from "(FINN_AMPERE_SMC_PARTITION_REF_EXEC_PARTITIONS_INTERFACE_ID << 8) | 0x5" */ + +/* + * NVC637_CTRL_CMD_EXEC_PARTITIONS_IMPORT + * + * Create an exec partition resembling the exported partition info. The imported + * partition should behave identically with respect to fragmentation. + */ +#define NVC637_CTRL_CMD_EXEC_PARTITIONS_IMPORT (0xc6370106) /* finn: Evaluated from "(FINN_AMPERE_SMC_PARTITION_REF_EXEC_PARTITIONS_INTERFACE_ID << 8) | 0x6" */ + +#define NVC637_CTRL_EXEC_PARTITIONS_EXPORT_MAX_ENGINES_MASK_SIZE 4 +typedef struct NVC637_CTRL_EXEC_PARTITIONS_EXPORTED_INFO { + NV_DECLARE_ALIGNED(NvU64 enginesMask[NVC637_CTRL_EXEC_PARTITIONS_EXPORT_MAX_ENGINES_MASK_SIZE], 8); + NvU8 uuid[NVC637_UUID_LEN]; + NvU32 sharedEngFlags; + NvU32 gpcMask; + NvU32 gfxGpcCount; + NvU32 veidOffset; + NvU32 veidCount; + NvU32 smCount; + NvU32 spanStart; + NvU32 computeSize; +} NVC637_CTRL_EXEC_PARTITIONS_EXPORTED_INFO; + +typedef struct NVC637_CTRL_EXEC_PARTITIONS_IMPORT_EXPORT_PARAMS { + NvU32 id; + NV_DECLARE_ALIGNED(NVC637_CTRL_EXEC_PARTITIONS_EXPORTED_INFO info, 8); + NvBool bCreateCap; +} NVC637_CTRL_EXEC_PARTITIONS_IMPORT_EXPORT_PARAMS; + +/* + * NVC637_CTRL_EXEC_PARTITION_PARTITION_SPAN + * + * This struct represents the span of a compute instance, which represents the + * resource slots a given partition occupies (or may occupy) within a fixed range which + * is defined per-chip. A partition containing more resources will cover more + * resource slots and therefore cover a larger span. + * + * lo + * - The starting unit of this span, inclusive + * + * hi + * - The ending unit of this span, inclusive + * + */ +typedef struct NVC637_CTRL_EXEC_PARTITION_PARTITION_SPAN { + NV_DECLARE_ALIGNED(NvU64 lo, 8); + NV_DECLARE_ALIGNED(NvU64 hi, 8); +} NVC637_CTRL_EXEC_PARTITION_PARTITION_SPAN; + +/* + * NVC637_CTRL_EXEC_PARTITIONS_GET_PROFILE_CAPACITY + * + * This command returns the count of compute instances which can be created + * of the given commpute profile size (represented by the computeSize field + * within a profile) which can be requested via NV2080_CTRL_CMD_GPU_GET_COMPUTE_PROFILES + * Note that this API does not "reserve" any partitions, and there is no + * guarantee that the reported count of available partitions of a given size + * will remain consistent following creation of partitions of different size + * through NV2080_CTRL_GPU_SET_PARTITIONS. + * Note that this API is unsupported if SMC is feature-disabled. + * Note that the caller of this CTRL must be subscribed to a valid GPU instance + * + * computeSize[IN] + * - Partition flag indicating size of requested profile + * + * profileCount[OUT] + * - Available number of profiles of the given size which can currently be created. + * + * availableSpans[OUT] + * - For each profile able to be created of the specified size, the span + * it could occupy. + * + * availableSpansCount[OUT] + * - Number of valid entries in availableSpans. + * + * totalProfileCount[OUT] + * - Total number of profiles of the given size which can be created. + * + * totalSpans[OUT] + * - List of spans which can possibly be occupied by profiles of the + * given type. + * + * totalSpansCount[OUT] + * - Number of valid entries in totalSpans. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NVC637_CTRL_CMD_EXEC_PARTITIONS_GET_PROFILE_CAPACITY (0xc63701a9U) /* finn: Evaluated from "(FINN_AMPERE_SMC_PARTITION_REF_EXEC_PARTITIONS_INTERFACE_ID << 8) | NVC637_CTRL_EXEC_PARTITIONS_GET_PROFILE_CAPACITY_PARAMS_MESSAGE_ID" */ + +#define NVC637_CTRL_EXEC_PARTITIONS_GET_PROFILE_CAPACITY_PARAMS_MESSAGE_ID (0xA9U) + +typedef struct NVC637_CTRL_EXEC_PARTITIONS_GET_PROFILE_CAPACITY_PARAMS { + NvU32 computeSize; + NvU32 profileCount; + NV_DECLARE_ALIGNED(NVC637_CTRL_EXEC_PARTITION_PARTITION_SPAN availableSpans[NVC637_CTRL_MAX_EXEC_PARTITIONS], 8); + NvU32 availableSpansCount; + NvU32 totalProfileCount; + NV_DECLARE_ALIGNED(NVC637_CTRL_EXEC_PARTITION_PARTITION_SPAN totalSpans[NVC637_CTRL_MAX_EXEC_PARTITIONS], 8); + NvU32 totalSpansCount; +} NVC637_CTRL_EXEC_PARTITIONS_GET_PROFILE_CAPACITY_PARAMS; + +/* + * NVC637_CTRL_CMD_GET_UUID + */ +#define NVC637_CTRL_CMD_GET_UUID (0xc63701aaU) /* finn: Evaluated from "(FINN_AMPERE_SMC_PARTITION_REF_EXEC_PARTITIONS_INTERFACE_ID << 8) | NVC637_CTRL_GET_UUID_PARAMS_MESSAGE_ID" */ + +#define NVC637_CTRL_GET_UUID_PARAMS_MESSAGE_ID (0xAAU) + +typedef struct NVC637_CTRL_GET_UUID_PARAMS { + NvU8 uuid[NVC637_UUID_LEN]; + + char uuidStr[NVC637_UUID_STR_LEN]; +} NVC637_CTRL_GET_UUID_PARAMS; + +// _ctrlc637_h_ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc638.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc638.h new file mode 100644 index 0000000..5e848a9 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc638.h @@ -0,0 +1,91 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc638.finn +// + +#include "ctrl/ctrlxxxx.h" +/* AMPERE_SMC_EXEC_PARTITION_REF commands and parameters */ + +#define NVC638_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0xC638, NVC638_CTRL_##cat, idx) + +/* Command categories (6bits) */ +#define NVC638_CTRL_RESERVED (0x00) +#define NVC638_CTRL_EXEC_PARTITION (0x01) + +/*! + * NVC638_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NVC638_CTRL_CMD_NULL (0xc6380000) /* finn: Evaluated from "(FINN_AMPERE_SMC_EXEC_PARTITION_REF_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/*! + * NVC638_CTRL_CMD_GET_UUID + * + * This command returns SHA1 ASCII UUID string as well as the binary UUID for + * the execution partition. The ASCII string format is, + * "MIG-%16x-%08x-%08x-%08x-%024x" (the canonical format of a UUID) + * + * uuid[OUT] + * - Raw UUID bytes + * + * uuidStr[OUT] + * - ASCII UUID string + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + */ + +#define NVC638_UUID_LEN 16 + +/* 'M' 'I' 'G' '-'(x5), '\0x0', extra = 9 */ +#define NVC638_UUID_STR_LEN (0x29) /* finn: Evaluated from "((NVC638_UUID_LEN << 1) + 9)" */ + + + +#define NVC638_CTRL_CMD_GET_UUID (0xc6380101) /* finn: Evaluated from "(FINN_AMPERE_SMC_EXEC_PARTITION_REF_EXEC_PARTITION_INTERFACE_ID << 8) | NVC638_CTRL_GET_UUID_PARAMS_MESSAGE_ID" */ + + + +#define NVC638_CTRL_GET_UUID_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVC638_CTRL_GET_UUID_PARAMS { + // C form: NvU8 uuid[NVC638_UUID_LEN]; + NvU8 uuid[NVC638_UUID_LEN]; + + // C form: char uuidStr[NVC638_UUID_STR_LEN]; + char uuidStr[NVC638_UUID_STR_LEN]; +} NVC638_CTRL_GET_UUID_PARAMS;// _ctrlc638_h_ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h b/src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h new file mode 100644 index 0000000..c51ea2b --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h @@ -0,0 +1,74 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlxxxx.finn +// + + + +#include "nvtypes.h" + +/* definitions shared by all CTRL interfaces */ + +/* Basic command format: +* cmd_class [31:16], +* cmd_reserved [15:15], +* cmd_reserved [14:14], +* cmd_category [13:8], +* cmd_index [7:0] +*/ + +#define NVXXXX_CTRL_CMD_CLASS 31:16 + +#define NVXXXX_CTRL_CMD_CATEGORY 13:8 +#define NVXXXX_CTRL_CMD_INDEX 7:0 + +/* don't use DRF_NUM - not always available */ +# define NVXXXX_CTRL_CMD(cls,cat,idx) \ + (((cls) << 16) | ((0) << 15) | ((0) << 14) \ + | ((cat) << 8) | ((idx) & 0xFF)) +/* + * NVXXXX_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * This command is valid for all classes. + * + * Possible status values returned are: + * NV_OK + */ +#define NVXXXX_CTRL_CMD_NULL (0x00000000) + +#define NVxxxx_CTRL_LEGACY_PRIVILEGED (0xC0) +#define NVxxxx_CTRL_LEGACY_NON_PRIVILEGED (0x80) + +typedef struct NVXXXX_CTRL_XXX_INFO { + NvU32 index; + NvU32 data; +} NVXXXX_CTRL_XXX_INFO; diff --git a/src/common/sdk/nvidia/inc/dpringbuffertypes.h b/src/common/sdk/nvidia/inc/dpringbuffertypes.h new file mode 100644 index 0000000..b61abaf --- /dev/null +++ b/src/common/sdk/nvidia/inc/dpringbuffertypes.h @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2002-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef INCLUDED_DPRINGBUFFERTYPES_H +#define INCLUDED_DPRINGBUFFERTYPES_H + +#define MAX_MESSAGE_LEN 100 +#define MAX_RECORD_COUNT 15 + +typedef enum _DP_RECORD_TYPE +{ + ASSERT_HIT = 135, + LOG_CALL = 136, +} DP_RECORD_TYPE; + +typedef struct _DpAssertHitRecord +{ + NvU64 breakpointAddr; +} DpAssertHitRecord, *PDpAssertHitRecord; + +typedef struct _DpLogCallRecord +{ + char msg[MAX_MESSAGE_LEN]; + NvU64 addr; +} DpLogCallRecord, *PDpLogCallRecord; + +typedef union _DpLogRecord +{ + DpAssertHitRecord dpAssertHitRecord; + DpLogCallRecord dpLogCallRecord; +} DpLogRecord, *PDpLogRecord; + +typedef enum _DpLogQueryType +{ + DpLogQueryTypeAssert = 1, + DpLogQueryTypeCallLog = 2, +} DpLogQueryType, *PDpLogQueryType; + +#endif //INCLUDED_DPRINGBUFFERTYPES_H diff --git a/src/common/sdk/nvidia/inc/g_finn_rm_api.h b/src/common/sdk/nvidia/inc/g_finn_rm_api.h new file mode 100644 index 0000000..1972d94 --- /dev/null +++ b/src/common/sdk/nvidia/inc/g_finn_rm_api.h @@ -0,0 +1,717 @@ +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// + + +#include + + +#include +#include + +#define FINN_INTERFACE_ID(T) (T ## _INTERFACE_ID) +#define FINN_MESSAGE_ID(T) (T ## _MESSAGE_ID) + +#if (defined(__cplusplus) && __cplusplus >= 201103L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L) +#define FINN_OFFSETOF(T,f) (offsetof(T, f)) +#else +#define FINN_OFFSETOF(T,f) ((NvU64)&((T*)0)->f) +#endif + +#if !defined(_MSC_VER) && (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) && !defined(__arm) +#define FINN_PACK_COMPOSITE(b) b __attribute__ ((packed)) +#else +#define FINN_PACK_COMPOSITE(b) b +#endif + +/* + * FINN serialization version. A version mismatch indicates incompatibility + * between the serializer and the deserializer. + * + * WARNING: Current serialization version is 0. This is a pre-release version of + * serialization and is only intended to be used in a driver and client compiled + * together. DO NOT use this in firmware or versioned clients. + */ +#define FINN_SERIALIZATION_VERSION 0 + +/* + * FINN compiler version + */ +#define FINN_VERSION_MAJOR 1 +#define FINN_VERSION_MINOR 23 +#define FINN_VERSION_PATCH 0 + +typedef struct FINN_RM_API +{ + NV_DECLARE_ALIGNED(NvU64 version, 8); + NV_DECLARE_ALIGNED(NvU64 payloadSize, 8); + NV_DECLARE_ALIGNED(NvU64 interface, 8); + NV_DECLARE_ALIGNED(NvU64 message, 8); +} FINN_RM_API; + + + + +/*! + * @brief Private functions not to be called directly + */ +/**@{*/ +NV_STATUS finnSerializeInternal_FINN_RM_API(NvU64 interface, NvU64 message, const char *api, char *dst, NvLength dst_size, NvBool seri_up); +NV_STATUS finnDeserializeInternal_FINN_RM_API(const char *src, NvLength src_size, char *api, NvLength api_size, NvBool deser_up); +/**@}*/ + + +/*! + * @brief Serializes an RM API control params struct and copies it into the + * serialization buffer as a FINN message. + * + * @note FinnRmApiSerializeDown is for serializing down the call stack. + * + * FinnRmApiSerializeUp is for serializing up the call stack. It + * frees memory allocated by FinnRmApiDeserializeDown. Use only + * when handling RM API control requests. + * + * @warning One of these may be unimplemented depending on platform. If both + * are implemented, misuse causes memory corruption and memory leaks. + * + * @param[in] interface FINN interface ID of the param struct. + * @param[in] message FINN message ID of the param struct. + * @param[in] api Source param struct from which to copy the data. + * @param[in] dst Destination buffer into which to copy the data. + * @param[in] dst_size Maximum size of the destination buffer measured in + * `NvU8` units. + * + * @retval NV_OK Serialization successful. + * @retval NV_ERR_INVALID_ARGUMENT Bad function arguments, invalid union + * selector, or invalid enum value. + * @retval NV_ERR_NOT_SUPPORTED Unserializable or nonexistent ID. + * @retval NV_ERR_NOT_COMPATIBLE Container count too large. + * @retval NV_ERR_OUT_OF_RANGE Ranged field exceeded bounds. + * @retval NV_ERR_BUFFER_TOO_SMALL Destination buffer size too small. + */ +/**@{*/ +static NV_INLINE NV_STATUS FinnRmApiSerializeUp(NvU64 interface, NvU64 message, const void *api, NvU8 *dst, NvLength dst_size) +{ + return finnSerializeInternal_FINN_RM_API(interface, message, (const char *) api, (char *) dst, dst_size, NV_TRUE); +} +static NV_INLINE NV_STATUS FinnRmApiSerializeDown(NvU64 interface, NvU64 message, const void *api, NvU8 *dst, NvLength dst_size) +{ + return finnSerializeInternal_FINN_RM_API(interface, message, (const char *) api, (char *) dst, dst_size, NV_FALSE); +} +/**@}*/ + + +/*! + * @brief The following APIs deserialize a FINN message from the serialization + * buffer and copy it into an RM API control params struct. + * + * @note FinnRmApiDeserializeDown is for deserializing down the call stack. It + * allocates deep buffers for primitive pointers in the serialization + * buffer, assuming that it remains in memory. Use only when handling + * RM API control requests. + * + * FinnRmApiDeserializeUp is for deserializing up the call stack. It + * copies deep buffers of primitive pointers into the params struct, + * assuming that memory is already allocated for them. Use only when + * receiving RM API control results. + * + * @warning One of these may be unimplemented depending on platform. If both + * are implemented, misuse causes memory corruption and memory leaks. + * + * @param[in] src Source buffer from which to copy the data. + * @param[in] src_size Maximum size of the source buffer measured in + * `NvU8` units. + * @param[in, out] api Destination param struct into which to copy the data. + * @param[in] api_size Size of the destination param struct measured in + * `char` units per `sizeof` operator. + * + * @retval NV_OK Deserialization successful. + * @retval NV_ERR_INVALID_ARGUMENT Bad function arguments, invalid union + * selector, invalid enum value, or + * mismatch between expected and actual + * serialized size. + * @retval NV_ERR_NOT_SUPPORTED Unserializable or nonexistent ID. + * @retval NV_ERR_OUT_OF_RANGE Ranged field exceeded bounds. + * @retval NV_ERR_BUFFER_TOO_SMALL Source/destination buffer too small. + * @retval NV_ERR_LIB_RM_VERSION_MISMATCH Version mismatch. + */ +/**@{*/ +static NV_INLINE NV_STATUS FinnRmApiDeserializeDown(NvU8 *src, NvLength src_size, void *api, NvLength api_size) +{ + return finnDeserializeInternal_FINN_RM_API((const char *) src, src_size / sizeof(NvU8), (char *) api, api_size, NV_FALSE); +} +static NV_INLINE NV_STATUS FinnRmApiDeserializeUp(NvU8 *src, NvLength src_size, void *api, NvLength api_size) +{ + return finnDeserializeInternal_FINN_RM_API((const char *) src, src_size / sizeof(NvU8), (char *) api, api_size, NV_TRUE); +} +/**@}*/ + + +/*! + * @brief Calculates the serialized size of an RM API param struct. + * + * @param[in] interface FINN interface ID of the param struct. + * @param[in] message FINN message ID of the param struct. + * @param[in] src Pointer to the param struct. + * + * @retval Non-zero serialized size of param struct on success. + * @retval 0 if the API is unsupported by serialization or src is NULL. + */ +NvU64 FinnRmApiGetSerializedSize(NvU64 interface, NvU64 message, const NvP64 src); + + +/*! + * @brief Fetches the unserialized size of an API param struct. + * + * @note The size is measured in `char` units like the `sizeof` operator. + * + * @param[in] interface FINN interface ID of the param struct. + * @param[in] message FINN message ID of the param struct. + * + * @retval Non-zero sizeof param struct on success. + * @retval 0 if the API is unsupported by serialization. + */ +NvU64 FinnRmApiGetUnserializedSize(NvU64 interface, NvU64 message); + + +#define NV_RM_ALLOC_INTERFACE_INTERFACE_ID (0xA000U) +typedef FINN_RM_API NV_RM_ALLOC_INTERFACE; +#define FINN_NV01_ROOT_RESERVED_INTERFACE_ID (0x0U) +typedef FINN_RM_API FINN_NV01_ROOT_RESERVED; +#define FINN_NV01_ROOT_CLIENT_INTERFACE_ID (0xdU) +typedef FINN_RM_API FINN_NV01_ROOT_CLIENT; +#define FINN_NV01_ROOT_DIAG_INTERFACE_ID (0x4U) +typedef FINN_RM_API FINN_NV01_ROOT_DIAG; +#define FINN_NV01_ROOT_EVENT_INTERFACE_ID (0x5U) +typedef FINN_RM_API FINN_NV01_ROOT_EVENT; +#define FINN_NV01_ROOT_GPU_INTERFACE_ID (0x2U) +typedef FINN_RM_API FINN_NV01_ROOT_GPU; +#define FINN_NV01_ROOT_GPUACCT_INTERFACE_ID (0xbU) +typedef FINN_RM_API FINN_NV01_ROOT_GPUACCT; +#define FINN_NV01_ROOT_GSYNC_INTERFACE_ID (0x3U) +typedef FINN_RM_API FINN_NV01_ROOT_GSYNC; +#define FINN_NV01_ROOT_NVD_INTERFACE_ID (0x6U) +typedef FINN_RM_API FINN_NV01_ROOT_NVD; +#define FINN_NV01_ROOT_PROC_INTERFACE_ID (0x9U) +typedef FINN_RM_API FINN_NV01_ROOT_PROC; + +#define FINN_NV01_ROOT_SYNC_GPU_BOOST_INTERFACE_ID (0xaU) +typedef FINN_RM_API FINN_NV01_ROOT_SYNC_GPU_BOOST; +#define FINN_NV01_ROOT_SYSTEM_INTERFACE_ID (0x1U) +typedef FINN_RM_API FINN_NV01_ROOT_SYSTEM; +#define FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID (0x3dU) +typedef FINN_RM_API FINN_NV01_ROOT_OS_UNIX; +#define FINN_NV01_ROOT_VGPU_INTERFACE_ID (0xcU) +typedef FINN_RM_API FINN_NV01_ROOT_VGPU; +#define FINN_NV01_ROOT_OS_WINDOWS_INTERFACE_ID (0x3fU) +typedef FINN_RM_API FINN_NV01_ROOT_OS_WINDOWS; +#define FINN_NV01_CONTEXT_DMA_RESERVED_INTERFACE_ID (0x200U) +typedef FINN_RM_API FINN_NV01_CONTEXT_DMA_RESERVED; +#define FINN_NV01_CONTEXT_DMA_DMA_INTERFACE_ID (0x201U) +typedef FINN_RM_API FINN_NV01_CONTEXT_DMA_DMA; +#define FINN_NV01_TIMER_RESERVED_INTERFACE_ID (0x400U) +typedef FINN_RM_API FINN_NV01_TIMER_RESERVED; +#define FINN_NV01_TIMER_TMR_INTERFACE_ID (0x401U) +typedef FINN_RM_API FINN_NV01_TIMER_TMR; +#define FINN_FABRIC_MANAGER_SESSION_RESERVED_INTERFACE_ID (0xf00U) +typedef FINN_RM_API FINN_FABRIC_MANAGER_SESSION_RESERVED; +#define FINN_FABRIC_MANAGER_SESSION_FM_INTERFACE_ID (0xf01U) +typedef FINN_RM_API FINN_FABRIC_MANAGER_SESSION_FM; +#define FINN_NV0020_GPU_MANAGEMENT_RESERVED_INTERFACE_ID (0x2000U) +typedef FINN_RM_API FINN_NV0020_GPU_MANAGEMENT_RESERVED; +#define FINN_NV0020_GPU_MANAGEMENT_GPU_MGMT_INTERFACE_ID (0x2001U) +typedef FINN_RM_API FINN_NV0020_GPU_MANAGEMENT_GPU_MGMT; +#define FINN_NV01_MEMORY_SYSTEM_RESERVED_INTERFACE_ID (0x3e00U) +typedef FINN_RM_API FINN_NV01_MEMORY_SYSTEM_RESERVED; +#define FINN_NV01_MEMORY_SYSTEM_MEMORY_INTERFACE_ID (0x3e01U) +typedef FINN_RM_API FINN_NV01_MEMORY_SYSTEM_MEMORY; +#define FINN_NV01_ROOT_USER_RESERVED_INTERFACE_ID (0x4100U) +typedef FINN_RM_API FINN_NV01_ROOT_USER_RESERVED; +#define FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID (0x4101U) +typedef FINN_RM_API FINN_NV01_ROOT_USER_MEMORY; +#define FINN_NV_CE_UTILS_RESERVED_INTERFACE_ID (0x0050U) +typedef FINN_RM_API FINN_NV_CE_UTILS_RESERVED; +#define FINN_NV_CE_UTILS_UTILS_INTERFACE_ID (0x5001U) +typedef FINN_RM_API FINN_NV_CE_UTILS_UTILS; +#define FINN_NV04_DISPLAY_COMMON_RESERVED_INTERFACE_ID (0x7300U) +typedef FINN_RM_API FINN_NV04_DISPLAY_COMMON_RESERVED; +#define FINN_NV04_DISPLAY_COMMON_COMMON_INTERFACE_ID (0x7305U) +typedef FINN_RM_API FINN_NV04_DISPLAY_COMMON_COMMON; +#define FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID (0x7311U) +typedef FINN_RM_API FINN_NV04_DISPLAY_COMMON_DFP; +#define FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID (0x7313U) +typedef FINN_RM_API FINN_NV04_DISPLAY_COMMON_DP; + +#define FINN_NV04_DISPLAY_COMMON_EVENT_INTERFACE_ID (0x7303U) +typedef FINN_RM_API FINN_NV04_DISPLAY_COMMON_EVENT; +#define FINN_NV04_DISPLAY_COMMON_INTERNAL_INTERFACE_ID (0x7304U) +typedef FINN_RM_API FINN_NV04_DISPLAY_COMMON_INTERNAL; +#define FINN_NV04_DISPLAY_COMMON_PSR_INTERFACE_ID (0x7316U) +typedef FINN_RM_API FINN_NV04_DISPLAY_COMMON_PSR; +#define FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID (0x7302U) +typedef FINN_RM_API FINN_NV04_DISPLAY_COMMON_SPECIFIC; +#define FINN_NV04_DISPLAY_COMMON_STEREO_INTERFACE_ID (0x7317U) +typedef FINN_RM_API FINN_NV04_DISPLAY_COMMON_STEREO; + +#define FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID (0x7301U) +typedef FINN_RM_API FINN_NV04_DISPLAY_COMMON_SYSTEM; +#define FINN_NV01_FRAMEBUFFER_CONSOLE_INTERFACE_ID (0x007601U) +typedef FINN_RM_API FINN_NV01_FRAMEBUFFER_CONSOLE; +#define FINN_NV01_DEVICE_0_RESERVED_INTERFACE_ID (0x8000U) +typedef FINN_RM_API FINN_NV01_DEVICE_0_RESERVED; +#define FINN_NV01_DEVICE_0_BIF_INTERFACE_ID (0x8001U) +typedef FINN_RM_API FINN_NV01_DEVICE_0_BIF; +#define FINN_NV01_DEVICE_0_BSP_INTERFACE_ID (0x801cU) +typedef FINN_RM_API FINN_NV01_DEVICE_0_BSP; + +#define FINN_NV01_DEVICE_0_DMA_INTERFACE_ID (0x8018U) +typedef FINN_RM_API FINN_NV01_DEVICE_0_DMA; +#define FINN_NV01_DEVICE_0_FB_INTERFACE_ID (0x8013U) +typedef FINN_RM_API FINN_NV01_DEVICE_0_FB; +#define FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID (0x8017U) +typedef FINN_RM_API FINN_NV01_DEVICE_0_FIFO; +#define FINN_NV01_DEVICE_0_GPU_INTERFACE_ID (0x8002U) +typedef FINN_RM_API FINN_NV01_DEVICE_0_GPU; +#define FINN_NV01_DEVICE_0_GR_INTERFACE_ID (0x8011U) +typedef FINN_RM_API FINN_NV01_DEVICE_0_GR; +#define FINN_NV01_DEVICE_0_HOST_INTERFACE_ID (0x8014U) +typedef FINN_RM_API FINN_NV01_DEVICE_0_HOST; +#define FINN_NV01_DEVICE_0_INTERNAL_INTERFACE_ID (0x8020U) +typedef FINN_RM_API FINN_NV01_DEVICE_0_INTERNAL; +#define FINN_NV01_DEVICE_0_MSENC_INTERFACE_ID (0x801bU) +typedef FINN_RM_API FINN_NV01_DEVICE_0_MSENC; +#define FINN_NV01_DEVICE_0_NVJPG_INTERFACE_ID (0x801fU) +typedef FINN_RM_API FINN_NV01_DEVICE_0_NVJPG; +#define FINN_NV01_DEVICE_0_PERF_INTERFACE_ID (0x8019U) +typedef FINN_RM_API FINN_NV01_DEVICE_0_PERF; + +#define FINN_NV01_DEVICE_0_OS_UNIX_INTERFACE_ID (0x801eU) +typedef FINN_RM_API FINN_NV01_DEVICE_0_OS_UNIX; + +#define FINN_NV0090_KERNEL_GRAPHICS_CONTEXT_INTERFACE_ID (0x9001U) +typedef FINN_RM_API FINN_NV0090_KERNEL_GRAPHICS_CONTEXT; +#define FINN_NV_SEMAPHORE_SURFACE_INTERFACE_ID (0x00da00U) +typedef FINN_RM_API FINN_NV_SEMAPHORE_SURFACE; +#define FINN_RM_USER_SHARED_DATA_INTERFACE_ID (0xde00U) +typedef FINN_RM_API FINN_RM_USER_SHARED_DATA; +#define FINN_NV_MEMORY_EXPORT_RESERVED_INTERFACE_ID (0xE000U) +typedef FINN_RM_API FINN_NV_MEMORY_EXPORT_RESERVED; +#define FINN_NV_MEMORY_EXPORT_INTERFACE_ID (0xE001U) +typedef FINN_RM_API FINN_NV_MEMORY_EXPORT; +#define FINN_IMEX_SESSION_INTERFACE_ID (0xf100U) +typedef FINN_RM_API FINN_IMEX_SESSION; +#define FINN_NV_MEMORY_FABRIC_RESERVED_INTERFACE_ID (0xf800U) +typedef FINN_RM_API FINN_NV_MEMORY_FABRIC_RESERVED; +#define FINN_NV_MEMORY_FABRIC_FABRIC_INTERFACE_ID (0xf801U) +typedef FINN_RM_API FINN_NV_MEMORY_FABRIC_FABRIC; +#define FINN_NV_MEMORY_FABRIC_IMPORT_V2_RESERVED_INTERFACE_ID (0xf900U) +typedef FINN_RM_API FINN_NV_MEMORY_FABRIC_IMPORT_V2_RESERVED; +#define FINN_NV_MEMORY_FABRIC_IMPORT_V2_IMPORT_INTERFACE_ID (0xf901U) +typedef FINN_RM_API FINN_NV_MEMORY_FABRIC_IMPORT_V2_IMPORT; +#define FINN_NV_MEMORY_FABRIC_IMPORTED_REF_RESERVED_INTERFACE_ID (0xfb00U) +typedef FINN_RM_API FINN_NV_MEMORY_FABRIC_IMPORTED_REF_RESERVED; +#define FINN_NV_MEMORY_FABRIC_IMPORTED_REF_IMPORT_REF_INTERFACE_ID (0xfb01U) +typedef FINN_RM_API FINN_NV_MEMORY_FABRIC_IMPORTED_REF_IMPORT_REF; +#define FINN_NV_MEMORY_MULTICAST_FABRIC_RESERVED_INTERFACE_ID (0xfd00U) +typedef FINN_RM_API FINN_NV_MEMORY_MULTICAST_FABRIC_RESERVED; +#define FINN_NV_MEMORY_MULTICAST_FABRIC_FABRIC_INTERFACE_ID (0xfd01U) +typedef FINN_RM_API FINN_NV_MEMORY_MULTICAST_FABRIC_FABRIC; +#define FINN_NV_MEMORY_MAPPER_INTERFACE_ID (0xfe01U) +typedef FINN_RM_API FINN_NV_MEMORY_MAPPER; +#define FINN_LOCK_STRESS_OBJECT_RESERVED_INTERFACE_ID (0x10000U) +typedef FINN_RM_API FINN_LOCK_STRESS_OBJECT_RESERVED; +#define FINN_LOCK_STRESS_OBJECT_LOCK_STRESS_INTERFACE_ID (0x10001U) +typedef FINN_RM_API FINN_LOCK_STRESS_OBJECT_LOCK_STRESS; + +#define FINN_NV20_SUBDEVICE_0_RESERVED_INTERFACE_ID (0x208000U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_RESERVED; + +#define FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID (0x208008U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_BIOS; +#define FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID (0x208018U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_BUS; +#define FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID (0x20802aU) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_CE; + +#define FINN_NV20_SUBDEVICE_0_CLK_INTERFACE_ID (0x208010U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_CLK; + +#define FINN_NV20_SUBDEVICE_0_DMA_INTERFACE_ID (0x208025U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_DMA; +#define FINN_NV20_SUBDEVICE_0_DMABUF_INTERFACE_ID (0x20803AU) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_DMABUF; +#define FINN_NV20_SUBDEVICE_0_ECC_INTERFACE_ID (0x208034U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_ECC; + +#define FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID (0x208003U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_EVENT; +#define FINN_NV20_SUBDEVICE_0_THERMAL_INTERFACE_ID (0x208005U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_THERMAL; + +#define FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID (0x208013U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_FB; +#define FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID (0x208011U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_FIFO; +#define FINN_NV20_SUBDEVICE_0_FLA_INTERFACE_ID (0x208035U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_FLA; +#define FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID (0x208031U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_FLCN; +#define FINN_NV20_SUBDEVICE_0_FUSE_INTERFACE_ID (0x208002U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_FUSE; + +#define FINN_NV20_SUBDEVICE_0_GPIO_INTERFACE_ID (0x208023U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_GPIO; + +#define FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID (0x208001U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_GPU; + +#define FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID (0x208012U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_GR; +#define FINN_NV20_SUBDEVICE_0_GRMGR_INTERFACE_ID (0x208038U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_GRMGR; +#define FINN_NV20_SUBDEVICE_0_GSP_INTERFACE_ID (0x208036U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_GSP; +#define FINN_NV20_SUBDEVICE_0_HSHUB_INTERFACE_ID (0x208041U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_HSHUB; +#define FINN_NV20_SUBDEVICE_0_I2C_INTERFACE_ID (0x208006U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_I2C; +#define FINN_NV20_SUBDEVICE_0_PMGR_INTERFACE_ID (0x208026U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_PMGR; + +#define FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID (0x20800aU) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_INTERNAL; +#define FINN_NV20_SUBDEVICE_0_INTERNAL_2_INTERFACE_ID (0x20800bU) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_INTERNAL_2; +#define FINN_NV20_SUBDEVICE_0_LPWR_INTERFACE_ID (0x208028U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_LPWR; + +#define FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID (0x208017U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_MC; +#define FINN_NV20_SUBDEVICE_0_NNE_INTERFACE_ID (0x208037U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_NNE; + +#define FINN_NV20_SUBDEVICE_0_NVD_INTERFACE_ID (0x208024U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_NVD; + +#define FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID (0x208030U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_NVLINK; + +#define FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID (0x208020U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_PERF; + +#define FINN_NV20_SUBDEVICE_0_POWER_INTERFACE_ID (0x208027U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_POWER; + +#define FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID (0x208022U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_RC; + +#define FINN_NV20_SUBDEVICE_0_SEC2_INTERFACE_ID (0x208042U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_SEC2; + +#define FINN_NV20_SUBDEVICE_0_TIMER_INTERFACE_ID (0x208004U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_TIMER; + +#define FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID (0x20803dU) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_OS_UNIX; +#define FINN_NV20_SUBDEVICE_0_VGPU_MGR_INTERNAL_INTERFACE_ID (0x208040U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_VGPU_MGR_INTERNAL; +#define FINN_NV20_SUBDEVICE_0_VOLT_INTERFACE_ID (0x208032U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_VOLT; + +#define FINN_NV2081_BINAPI_RESERVED_INTERFACE_ID (0x208100U) +typedef FINN_RM_API FINN_NV2081_BINAPI_RESERVED; +#define FINN_NV2081_BINAPI_INTERFACE_ID (0x208101U) +typedef FINN_RM_API FINN_NV2081_BINAPI; +#define FINN_NV2082_BINAPI_RESERVED_INTERFACE_ID (0x208200U) +typedef FINN_RM_API FINN_NV2082_BINAPI_RESERVED; +#define FINN_NV2082_BINAPI_INTERFACE_ID (0x208201U) +typedef FINN_RM_API FINN_NV2082_BINAPI; +#define FINN_NV20_SUBDEVICE_DIAG_RESERVED_INTERFACE_ID (0x208f00U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_RESERVED; +#define FINN_NV20_SUBDEVICE_DIAG_BIF_INTERFACE_ID (0x208f07U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_BIF; +#define FINN_NV20_SUBDEVICE_DIAG_BUS_INTERFACE_ID (0x208f18U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_BUS; + +#define FINN_NV20_SUBDEVICE_DIAG_DMA_INTERFACE_ID (0x208f14U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_DMA; +#define FINN_NV20_SUBDEVICE_DIAG_EVENT_INTERFACE_ID (0x208f10U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_EVENT; +#define FINN_NV20_SUBDEVICE_DIAG_FB_INTERFACE_ID (0x208f05U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_FB; +#define FINN_NV20_SUBDEVICE_DIAG_FBIO_INTERFACE_ID (0x208f0aU) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_FBIO; +#define FINN_NV20_SUBDEVICE_DIAG_FIFO_INTERFACE_ID (0x208f04U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_FIFO; +#define FINN_NV20_SUBDEVICE_DIAG_GPU_INTERFACE_ID (0x208f11U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_GPU; +#define FINN_NV20_SUBDEVICE_DIAG_GR_INTERFACE_ID (0x208f12U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_GR; + +#define FINN_NV20_SUBDEVICE_DIAG_MMU_INTERFACE_ID (0x208f0bU) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_MMU; +#define FINN_NV20_SUBDEVICE_DIAG_NVLINK_INTERFACE_ID (0x208f1AU) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_NVLINK; + +#define FINN_NV20_SUBDEVICE_DIAG_PMU_INTERFACE_ID (0x208f0cU) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_PMU; + +#define FINN_NV20_SUBDEVICE_DIAG_UCODE_COVERAGE_INTERFACE_ID (0x208f19U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_UCODE_COVERAGE; +#define FINN_NV30_GSYNC_RESERVED_INTERFACE_ID (0x30f100U) +typedef FINN_RM_API FINN_NV30_GSYNC_RESERVED; +#define FINN_NV30_GSYNC_GSYNC_INTERFACE_ID (0x30f101U) +typedef FINN_RM_API FINN_NV30_GSYNC_GSYNC; +#define FINN_NV40_I2C_RESERVED_INTERFACE_ID (0x402c00U) +typedef FINN_RM_API FINN_NV40_I2C_RESERVED; +#define FINN_NV40_I2C_I2C_INTERFACE_ID (0x402c01U) +typedef FINN_RM_API FINN_NV40_I2C_I2C; +#define FINN_NV50_THIRD_PARTY_P2P_P2P_INTERFACE_ID (0x503c01U) +typedef FINN_RM_API FINN_NV50_THIRD_PARTY_P2P_P2P; +#define FINN_NV50_THIRD_PARTY_P2P_RESERVED_INTERFACE_ID (0x503c00U) +typedef FINN_RM_API FINN_NV50_THIRD_PARTY_P2P_RESERVED; +#define FINN_NV50_CHANNEL_GPFIFO_RESERVED_INTERFACE_ID (0x506f00U) +typedef FINN_RM_API FINN_NV50_CHANNEL_GPFIFO_RESERVED; +#define FINN_NV50_CHANNEL_GPFIFO_GPFIFO_INTERFACE_ID (0x506f01U) +typedef FINN_RM_API FINN_NV50_CHANNEL_GPFIFO_GPFIFO; +#define FINN_NV50_DISPLAY_RESERVED_INTERFACE_ID (0x507000U) +typedef FINN_RM_API FINN_NV50_DISPLAY_RESERVED; +#define FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID (0x507001U) +typedef FINN_RM_API FINN_NV50_DISPLAY_CHNCTL; +#define FINN_NV50_DISPLAY_OR_INTERFACE_ID (0x507004U) +typedef FINN_RM_API FINN_NV50_DISPLAY_OR; +#define FINN_NV50_DISPLAY_RG_INTERFACE_ID (0x507002U) +typedef FINN_RM_API FINN_NV50_DISPLAY_RG; +#define FINN_NV50_DISPLAY_SYSTEM_INTERFACE_ID (0x507007U) +typedef FINN_RM_API FINN_NV50_DISPLAY_SYSTEM; + +#define FINN_NV50_DEFERRED_API_CLASS_RESERVED_INTERFACE_ID (0x508000U) +typedef FINN_RM_API FINN_NV50_DEFERRED_API_CLASS_RESERVED; +#define FINN_NV50_DEFERRED_API_CLASS_DEFERRED_INTERFACE_ID (0x508001U) +typedef FINN_RM_API FINN_NV50_DEFERRED_API_CLASS_DEFERRED; +#define FINN_GT200_DEBUGGER_RESERVED_INTERFACE_ID (0x83de00U) +typedef FINN_RM_API FINN_GT200_DEBUGGER_RESERVED; +#define FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID (0x83de03U) +typedef FINN_RM_API FINN_GT200_DEBUGGER_DEBUG; +#define FINN_GT200_DEBUGGER_FIFO_INTERFACE_ID (0x83de02U) +typedef FINN_RM_API FINN_GT200_DEBUGGER_FIFO; +#define FINN_GT200_DEBUGGER_INTERNAL_INTERFACE_ID (0x83de04U) +typedef FINN_RM_API FINN_GT200_DEBUGGER_INTERNAL; + +#define FINN_NV9010_VBLANK_CALLBACK_RESERVED_INTERFACE_ID (0x901000U) +typedef FINN_RM_API FINN_NV9010_VBLANK_CALLBACK_RESERVED; +#define FINN_NV9010_VBLANK_CALLBACK_INTERFACE_ID (0x901001U) +typedef FINN_RM_API FINN_NV9010_VBLANK_CALLBACK; +#define FINN_FERMI_CONTEXT_SHARE_A_RESERVED_INTERFACE_ID (0x906700U) +typedef FINN_RM_API FINN_FERMI_CONTEXT_SHARE_A_RESERVED; +#define FINN_FERMI_CONTEXT_SHARE_A_TPC_PARTITION_INTERFACE_ID (0x906701U) +typedef FINN_RM_API FINN_FERMI_CONTEXT_SHARE_A_TPC_PARTITION; +#define FINN_FERMI_CONTEXT_SHARE_A_CWD_WATERMARK_INTERFACE_ID (0x906702U) +typedef FINN_RM_API FINN_FERMI_CONTEXT_SHARE_A_CWD_WATERMARK; +#define FINN_GF100_CHANNEL_GPFIFO_RESERVED_INTERFACE_ID (0x906f00U) +typedef FINN_RM_API FINN_GF100_CHANNEL_GPFIFO_RESERVED; +#define FINN_GF100_CHANNEL_GPFIFO_GPFIFO_INTERFACE_ID (0x906f01U) +typedef FINN_RM_API FINN_GF100_CHANNEL_GPFIFO_GPFIFO; +#define FINN_GF100_DISP_SW_RESERVED_INTERFACE_ID (0x907200U) +typedef FINN_RM_API FINN_GF100_DISP_SW_RESERVED; +#define FINN_GF100_DISP_SW_DISP_SW_INTERFACE_ID (0x907201U) +typedef FINN_RM_API FINN_GF100_DISP_SW_DISP_SW; +#define FINN_GF100_TIMED_SEMAPHORE_SW_RESERVED_INTERFACE_ID (0x907400U) +typedef FINN_RM_API FINN_GF100_TIMED_SEMAPHORE_SW_RESERVED; +#define FINN_GF100_TIMED_SEMAPHORE_SW_SEM_INTERFACE_ID (0x907401U) +typedef FINN_RM_API FINN_GF100_TIMED_SEMAPHORE_SW_SEM; +#define FINN_GF100_REMAPPER_RESERVED_INTERFACE_ID (0x907f00U) +typedef FINN_RM_API FINN_GF100_REMAPPER_RESERVED; +#define FINN_GF100_REMAPPER_REMAPPER_INTERFACE_ID (0x907f01U) +typedef FINN_RM_API FINN_GF100_REMAPPER_REMAPPER; +#define FINN_GF100_ZBC_CLEAR_RESERVED_INTERFACE_ID (0x909600U) +typedef FINN_RM_API FINN_GF100_ZBC_CLEAR_RESERVED; +#define FINN_GF100_ZBC_CLEAR_ZBC_INTERFACE_ID (0x909601U) +typedef FINN_RM_API FINN_GF100_ZBC_CLEAR_ZBC; +#define FINN_GF100_PROFILER_RESERVED_INTERFACE_ID (0x90cc00U) +typedef FINN_RM_API FINN_GF100_PROFILER_RESERVED; +#define FINN_GF100_PROFILER_HWPM_INTERFACE_ID (0x90cc01U) +typedef FINN_RM_API FINN_GF100_PROFILER_HWPM; +#define FINN_GF100_PROFILER_NVLINK_INTERFACE_ID (0x90cc02U) +typedef FINN_RM_API FINN_GF100_PROFILER_NVLINK; +#define FINN_GF100_PROFILER_POWER_INTERFACE_ID (0x90cc03U) +typedef FINN_RM_API FINN_GF100_PROFILER_POWER; +#define FINN_NV_EVENT_BUFFER_RESERVED_INTERFACE_ID (0x90cd00U) +typedef FINN_RM_API FINN_NV_EVENT_BUFFER_RESERVED; +#define FINN_NV_EVENT_BUFFER_EVENT_INTERFACE_ID (0x90cd01U) +typedef FINN_RM_API FINN_NV_EVENT_BUFFER_EVENT; +#define FINN_GF100_SUBDEVICE_GRAPHICS_RESERVED_INTERFACE_ID (0x90e000U) +typedef FINN_RM_API FINN_GF100_SUBDEVICE_GRAPHICS_RESERVED; +#define FINN_GF100_SUBDEVICE_GRAPHICS_GRAPHICS_INTERFACE_ID (0x90e001U) +typedef FINN_RM_API FINN_GF100_SUBDEVICE_GRAPHICS_GRAPHICS; +#define FINN_GF100_SUBDEVICE_FB_RESERVED_INTERFACE_ID (0x90e100U) +typedef FINN_RM_API FINN_GF100_SUBDEVICE_FB_RESERVED; +#define FINN_GF100_SUBDEVICE_FB_FB_INTERFACE_ID (0x90e101U) +typedef FINN_RM_API FINN_GF100_SUBDEVICE_FB_FB; +#define FINN_GF100_SUBDEVICE_MASTER_RESERVED_INTERFACE_ID (0x90e600U) +typedef FINN_RM_API FINN_GF100_SUBDEVICE_MASTER_RESERVED; +#define FINN_GF100_SUBDEVICE_MASTER_MASTER_INTERFACE_ID (0x90e601U) +typedef FINN_RM_API FINN_GF100_SUBDEVICE_MASTER_MASTER; +#define FINN_GF100_SUBDEVICE_INFOROM_RESERVED_INTERFACE_ID (0x90e700U) +typedef FINN_RM_API FINN_GF100_SUBDEVICE_INFOROM_RESERVED; +#define FINN_GF100_SUBDEVICE_INFOROM_BBX_INTERFACE_ID (0x90e701U) +typedef FINN_RM_API FINN_GF100_SUBDEVICE_INFOROM_BBX; + +#define FINN_GF100_HDACODEC_RESERVED_INTERFACE_ID (0x90ec00U) +typedef FINN_RM_API FINN_GF100_HDACODEC_RESERVED; +#define FINN_GF100_HDACODEC_HDACODEC_INTERFACE_ID (0x90ec01U) +typedef FINN_RM_API FINN_GF100_HDACODEC_HDACODEC; +#define FINN_FERMI_VASPACE_A_RESERVED_INTERFACE_ID (0x90f100U) +typedef FINN_RM_API FINN_FERMI_VASPACE_A_RESERVED; +#define FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID (0x90f101U) +typedef FINN_RM_API FINN_FERMI_VASPACE_A_VASPACE; +#define FINN_KEPLER_CHANNEL_GROUP_A_RESERVED_INTERFACE_ID (0xa06c00U) +typedef FINN_RM_API FINN_KEPLER_CHANNEL_GROUP_A_RESERVED; +#define FINN_KEPLER_CHANNEL_GROUP_A_GPFIFO_INTERFACE_ID (0xa06c01U) +typedef FINN_RM_API FINN_KEPLER_CHANNEL_GROUP_A_GPFIFO; +#define FINN_KEPLER_CHANNEL_GROUP_A_INTERNAL_INTERFACE_ID (0xa06c02U) +typedef FINN_RM_API FINN_KEPLER_CHANNEL_GROUP_A_INTERNAL; +#define FINN_KEPLER_CHANNEL_GPFIFO_A_RESERVED_INTERFACE_ID (0xa06f00U) +typedef FINN_RM_API FINN_KEPLER_CHANNEL_GPFIFO_A_RESERVED; +#define FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID (0xa06f01U) +typedef FINN_RM_API FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO; +#define FINN_KEPLER_CHANNEL_GPFIFO_A_INTERNAL_INTERFACE_ID (0xa06f03U) +typedef FINN_RM_API FINN_KEPLER_CHANNEL_GPFIFO_A_INTERNAL; +#define FINN_KEPLER_DEVICE_VGPU_RESERVED_INTERFACE_ID (0xa08000U) +typedef FINN_RM_API FINN_KEPLER_DEVICE_VGPU_RESERVED; +#define FINN_KEPLER_DEVICE_VGPU_VGPU_DISPLAY_INTERFACE_ID (0xa08001U) +typedef FINN_RM_API FINN_KEPLER_DEVICE_VGPU_VGPU_DISPLAY; +#define FINN_KEPLER_DEVICE_VGPU_VGPU_MEMORY_INTERFACE_ID (0xa08002U) +typedef FINN_RM_API FINN_KEPLER_DEVICE_VGPU_VGPU_MEMORY; +#define FINN_KEPLER_DEVICE_VGPU_VGPU_OTHERS_INTERFACE_ID (0xa08003U) +typedef FINN_RM_API FINN_KEPLER_DEVICE_VGPU_VGPU_OTHERS; +#define FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID (0xa08101U) +typedef FINN_RM_API FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG; + +#define FINN_NVA083_GRID_DISPLAYLESS_RESERVED_INTERFACE_ID (0xa08300U) +typedef FINN_RM_API FINN_NVA083_GRID_DISPLAYLESS_RESERVED; +#define FINN_NVA083_GRID_DISPLAYLESS_VIRTUAL_DISPLAY_INTERFACE_ID (0xa08301U) +typedef FINN_RM_API FINN_NVA083_GRID_DISPLAYLESS_VIRTUAL_DISPLAY; +#define FINN_NVA084_KERNEL_HOST_VGPU_DEVICE_KERNEL_HOST_VGPU_DEVICE_INTERFACE_ID (0xa08401U) +typedef FINN_RM_API FINN_NVA084_KERNEL_HOST_VGPU_DEVICE_KERNEL_HOST_VGPU_DEVICE; +#define FINN_NVENC_SW_SESSION_NVENC_SW_SESSION_INTERFACE_ID (0xa0bc01U) +typedef FINN_RM_API FINN_NVENC_SW_SESSION_NVENC_SW_SESSION; +#define FINN_NVFBC_SW_SESSION_NVFBC_SW_SESSION_INTERFACE_ID (0xa0bd01U) +typedef FINN_RM_API FINN_NVFBC_SW_SESSION_NVFBC_SW_SESSION; +#define FINN_GK110_SUBDEVICE_GRAPHICS_RESERVED_INTERFACE_ID (0xa0e000U) +typedef FINN_RM_API FINN_GK110_SUBDEVICE_GRAPHICS_RESERVED; +#define FINN_GK110_SUBDEVICE_GRAPHICS_GRAPHICS_INTERFACE_ID (0xa0e001U) +typedef FINN_RM_API FINN_GK110_SUBDEVICE_GRAPHICS_GRAPHICS; +#define FINN_GK110_SUBDEVICE_FB_RESERVED_INTERFACE_ID (0xa0e100U) +typedef FINN_RM_API FINN_GK110_SUBDEVICE_FB_RESERVED; +#define FINN_GK110_SUBDEVICE_FB_FB_INTERFACE_ID (0xa0e101U) +typedef FINN_RM_API FINN_GK110_SUBDEVICE_FB_FB; +#define FINN_MAXWELL_FAULT_BUFFER_A_RESERVED_INTERFACE_ID (0xb06900U) +typedef FINN_RM_API FINN_MAXWELL_FAULT_BUFFER_A_RESERVED; +#define FINN_MAXWELL_FAULT_BUFFER_A_FAULTBUFFER_INTERFACE_ID (0xb06901U) +typedef FINN_RM_API FINN_MAXWELL_FAULT_BUFFER_A_FAULTBUFFER; +#define FINN_MAXWELL_CHANNEL_GPFIFO_A_RESERVED_INTERFACE_ID (0xb06f00U) +typedef FINN_RM_API FINN_MAXWELL_CHANNEL_GPFIFO_A_RESERVED; +#define FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID (0xb06f01U) +typedef FINN_RM_API FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO; +#define FINN_MAXWELL_PROFILER_RESERVED_INTERFACE_ID (0xb0cc00U) +typedef FINN_RM_API FINN_MAXWELL_PROFILER_RESERVED; +#define FINN_MAXWELL_PROFILER_INTERNAL_INTERFACE_ID (0xb0cc02U) +typedef FINN_RM_API FINN_MAXWELL_PROFILER_INTERNAL; +#define FINN_MAXWELL_PROFILER_POWER_INTERFACE_ID (0xb0cc03U) +typedef FINN_RM_API FINN_MAXWELL_PROFILER_POWER; +#define FINN_MAXWELL_PROFILER_PROFILER_INTERFACE_ID (0xb0cc01U) +typedef FINN_RM_API FINN_MAXWELL_PROFILER_PROFILER; +#define FINN_MAXWELL_PROFILER_CONTEXT_RESERVED_INTERFACE_ID (0xb1cc00U) +typedef FINN_RM_API FINN_MAXWELL_PROFILER_CONTEXT_RESERVED; +#define FINN_MAXWELL_PROFILER_CONTEXT_PROFILER_INTERFACE_ID (0xb1cc01U) +typedef FINN_RM_API FINN_MAXWELL_PROFILER_CONTEXT_PROFILER; +#define FINN_MAXWELL_PROFILER_DEVICE_RESERVED_INTERFACE_ID (0xb2cc00U) +typedef FINN_RM_API FINN_MAXWELL_PROFILER_DEVICE_RESERVED; + +#define FINN_MAXWELL_SEC2_SEC2_INTERFACE_ID (0xb6b901U) +typedef FINN_RM_API FINN_MAXWELL_SEC2_SEC2; +#define FINN_GP100_SUBDEVICE_GRAPHICS_RESERVED_INTERFACE_ID (0xc0e000U) +typedef FINN_RM_API FINN_GP100_SUBDEVICE_GRAPHICS_RESERVED; +#define FINN_GP100_SUBDEVICE_GRAPHICS_GRAPHICS_INTERFACE_ID (0xc0e001U) +typedef FINN_RM_API FINN_GP100_SUBDEVICE_GRAPHICS_GRAPHICS; +#define FINN_GP100_SUBDEVICE_FB_RESERVED_INTERFACE_ID (0xc0e100U) +typedef FINN_RM_API FINN_GP100_SUBDEVICE_FB_RESERVED; +#define FINN_GP100_SUBDEVICE_FB_FB_INTERFACE_ID (0xc0e101U) +typedef FINN_RM_API FINN_GP100_SUBDEVICE_FB_FB; +#define FINN_VOLTA_GSP_GSP_INTERFACE_ID (0xc31001U) +typedef FINN_RM_API FINN_VOLTA_GSP_GSP; +#define FINN_ACCESS_COUNTER_NOTIFY_BUFFER_RESERVED_INTERFACE_ID (0xc36500U) +typedef FINN_RM_API FINN_ACCESS_COUNTER_NOTIFY_BUFFER_RESERVED; +#define FINN_ACCESS_COUNTER_NOTIFY_BUFFER_ACCESS_CNTR_BUFFER_INTERFACE_ID (0xc36501U) +typedef FINN_RM_API FINN_ACCESS_COUNTER_NOTIFY_BUFFER_ACCESS_CNTR_BUFFER; +#define FINN_MMU_FAULT_BUFFER_RESERVED_INTERFACE_ID (0xc36900U) +typedef FINN_RM_API FINN_MMU_FAULT_BUFFER_RESERVED; +#define FINN_MMU_FAULT_BUFFER_MMU_FAULT_BUFFER_INTERFACE_ID (0xc36901U) +typedef FINN_RM_API FINN_MMU_FAULT_BUFFER_MMU_FAULT_BUFFER; +#define FINN_VOLTA_CHANNEL_GPFIFO_A_RESERVED_INTERFACE_ID (0xc36f00U) +typedef FINN_RM_API FINN_VOLTA_CHANNEL_GPFIFO_A_RESERVED; +#define FINN_VOLTA_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID (0xc36f01U) +typedef FINN_RM_API FINN_VOLTA_CHANNEL_GPFIFO_A_GPFIFO; +#define FINN_VOLTA_CHANNEL_GPFIFO_A_INTERNAL_INTERFACE_ID (0xc36f03U) +typedef FINN_RM_API FINN_VOLTA_CHANNEL_GPFIFO_A_INTERNAL; +#define FINN_NVC370_DISPLAY_RESERVED_INTERFACE_ID (0xc37000U) +typedef FINN_RM_API FINN_NVC370_DISPLAY_RESERVED; +#define FINN_NVC370_DISPLAY_CHNCTL_INTERFACE_ID (0xc37001U) +typedef FINN_RM_API FINN_NVC370_DISPLAY_CHNCTL; +#define FINN_NVC370_DISPLAY_EVENT_INTERFACE_ID (0xc37009U) +typedef FINN_RM_API FINN_NVC370_DISPLAY_EVENT; +#define FINN_NVC370_DISPLAY_OR_INTERFACE_ID (0xc37004U) +typedef FINN_RM_API FINN_NVC370_DISPLAY_OR; +#define FINN_NVC370_DISPLAY_RG_INTERFACE_ID (0xc37002U) +typedef FINN_RM_API FINN_NVC370_DISPLAY_RG; +#define FINN_NVC370_DISPLAY_VERIF_INTERFACE_ID (0xc37006U) +typedef FINN_RM_API FINN_NVC370_DISPLAY_VERIF; +#define FINN_NVC372_DISPLAY_SW_RESERVED_INTERFACE_ID (0xc37200U) +typedef FINN_RM_API FINN_NVC372_DISPLAY_SW_RESERVED; +#define FINN_NVC372_DISPLAY_SW_CHNCTL_INTERFACE_ID (0xc37201U) +typedef FINN_RM_API FINN_NVC372_DISPLAY_SW_CHNCTL; + +#define FINN_GV100_SUBDEVICE_GRAPHICS_RESERVED_INTERFACE_ID (0xc3e000U) +typedef FINN_RM_API FINN_GV100_SUBDEVICE_GRAPHICS_RESERVED; +#define FINN_GV100_SUBDEVICE_GRAPHICS_GRAPHICS_INTERFACE_ID (0xc3e001U) +typedef FINN_RM_API FINN_GV100_SUBDEVICE_GRAPHICS_GRAPHICS; +#define FINN_GV100_SUBDEVICE_FB_RESERVED_INTERFACE_ID (0xc3e100U) +typedef FINN_RM_API FINN_GV100_SUBDEVICE_FB_RESERVED; +#define FINN_GV100_SUBDEVICE_FB_FB_INTERFACE_ID (0xc3e101U) +typedef FINN_RM_API FINN_GV100_SUBDEVICE_FB_FB; +#define FINN_AMPERE_CHANNEL_GPFIFO_A_RESERVED_INTERFACE_ID (0xc56f00U) +typedef FINN_RM_API FINN_AMPERE_CHANNEL_GPFIFO_A_RESERVED; +#define FINN_AMPERE_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID (0xc56f01U) +typedef FINN_RM_API FINN_AMPERE_CHANNEL_GPFIFO_A_GPFIFO; +#define FINN_AMPERE_SMC_PARTITION_REF_RESERVED_INTERFACE_ID (0xc63700U) +typedef FINN_RM_API FINN_AMPERE_SMC_PARTITION_REF_RESERVED; +#define FINN_AMPERE_SMC_PARTITION_REF_EXEC_PARTITIONS_INTERFACE_ID (0xc63701U) +typedef FINN_RM_API FINN_AMPERE_SMC_PARTITION_REF_EXEC_PARTITIONS; +#define FINN_AMPERE_SMC_EXEC_PARTITION_REF_RESERVED_INTERFACE_ID (0xc63800U) +typedef FINN_RM_API FINN_AMPERE_SMC_EXEC_PARTITION_REF_RESERVED; +#define FINN_AMPERE_SMC_EXEC_PARTITION_REF_EXEC_PARTITION_INTERFACE_ID (0xc63801U) +typedef FINN_RM_API FINN_AMPERE_SMC_EXEC_PARTITION_REF_EXEC_PARTITION; +#define FINN_MMU_VIDMEM_ACCESS_BIT_BUFFER_RESERVED_INTERFACE_ID (0xc76300U) +typedef FINN_RM_API FINN_MMU_VIDMEM_ACCESS_BIT_BUFFER_RESERVED; +#define FINN_MMU_VIDMEM_ACCESS_BIT_BUFFER_VIDMEM_ACCESS_BIT_BUFFER_INTERFACE_ID (0xc76301U) +typedef FINN_RM_API FINN_MMU_VIDMEM_ACCESS_BIT_BUFFER_VIDMEM_ACCESS_BIT_BUFFER; +#define FINN_NV_CONFIDENTIAL_COMPUTE_RESERVED_INTERFACE_ID (0xcb3300U) +typedef FINN_RM_API FINN_NV_CONFIDENTIAL_COMPUTE_RESERVED; +#define FINN_NV_CONFIDENTIAL_COMPUTE_CONF_COMPUTE_INTERFACE_ID (0xcb3301U) +typedef FINN_RM_API FINN_NV_CONFIDENTIAL_COMPUTE_CONF_COMPUTE; + +#define FINN_NV_COUNTER_COLLECTION_UNIT_RESERVED_INTERFACE_ID (0xcbca00U) +typedef FINN_RM_API FINN_NV_COUNTER_COLLECTION_UNIT_RESERVED; +#define FINN_NV_COUNTER_COLLECTION_UNIT_CCU_INTERFACE_ID (0xcbca01U) +typedef FINN_RM_API FINN_NV_COUNTER_COLLECTION_UNIT_CCU; +#define FINN_NV_SCHEDULER_RESERVED_INTERFACE_ID (0xcbcb00U) +typedef FINN_RM_API FINN_NV_SCHEDULER_RESERVED; +#define FINN_NV_SCHEDULER_SCHEDULER_INTERFACE_ID (0xcbcb01U) +typedef FINN_RM_API FINN_NV_SCHEDULER_SCHEDULER; +#define FINN_NVE2_SYNCPOINT_BASE_RESERVED_INTERFACE_ID (0xe2ad00U) +typedef FINN_RM_API FINN_NVE2_SYNCPOINT_BASE_RESERVED; +#define FINN_NVE2_SYNCPOINT_BASE_SYNCPOINT_BASE_INTERFACE_ID (0xe2ad01U) +typedef FINN_RM_API FINN_NVE2_SYNCPOINT_BASE_SYNCPOINT_BASE; diff --git a/src/common/sdk/nvidia/inc/mmu_fmt_types.h b/src/common/sdk/nvidia/inc/mmu_fmt_types.h new file mode 100644 index 0000000..13bb88f --- /dev/null +++ b/src/common/sdk/nvidia/inc/mmu_fmt_types.h @@ -0,0 +1,151 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: mmu_fmt_types.finn +// + + + + +/*! + * @file mmu_fmt_types.h + * + * @brief Types used to describre MMU HW formats. + */ +#include "nvtypes.h" + +// Forward declarations. + + +/*! + * Generic MMU page directory/table level format description. + * + * Since the terminology of page directories and tables varies, + * the following describes the interpretation assumed here. + * + * Each level of virtual address translation is described by a range of + * virtual address bits. + * These bits index into a contiguous range of physical memory referred to + * generally as a "page level." + * Page level memory is interpreted as an array of entries, with each entry + * describing the next step of virtual to physical translation. + * + * Each entry in a given level may be interpreted as either a PDE or PTE. + * 1. A PDE (page directory entry) points to one or more "sub-levels" that + * continue the VA translation recursively. + * 2. A PTE (page table entry) is the base case, pointing to a physical page. + * + * The decision to treat an entry as a PDE or PTE may be static for a level. + * Levels that only contain PDEs are referred to as page directories. + * Levels that only contain PTEs are referred to as page tables. + * + * However, some formats have levels that may contain a mix of PDEs and PTEs, + * with the intpretation based on a "cutoff" bit in each entry (e.g. PTE valid bit). + * Such levels are referred to as "polymorphic page levels" since they can be + * viewed as both a page directory and a page table. + */ +typedef struct MMU_FMT_LEVEL { + /*! + * First virtual address bit that this page level covers. + */ + NvU8 virtAddrBitLo; + + /*! + * Last virtual address bit that this page level covers. + */ + NvU8 virtAddrBitHi; + + /*! + * Size in bytes of each entry within a level instance. + */ + NvU8 entrySize; + + /*! + * Indicates if this level can contain PTEs. + */ + NvBool bPageTable; + + /*! + * Number of sub-levels pointed to by PDEs in this level in + * range [0, MMU_FMT_MAX_SUB_LEVELS]. + * 0 indicates this level cannot contain PDEs. + */ + NvU8 numSubLevels; + + /*! + * Information tag used to decode internal level naming. + * Used for verif. + * */ + NvU32 pageLevelIdTag; + + + /*! + * Array of sub-level formats of length numSubLevels. + * + * @warning This array results in a circular reference to MMU_FMT_LEVEL. + * This can present an issue for FINN serialization and may have to + * be refactored before MMU_FMT_LEVEL can be serialized. + */ + NV_DECLARE_ALIGNED(struct MMU_FMT_LEVEL *subLevels, 8); +} MMU_FMT_LEVEL; + +/*! + * Maximum number of pointers to sub-levels within a page directory entry. + * + * Standard page directory entries (PDEs) point to a single sub-level, + * either the next page directory level in the topology or a leaf page table. + * + * However, some formats contain PDEs that point to more than one sub-level. + * These sub-levels are translated by HW in parallel to support multiple + * page sizes at a higher granularity (e.g. for migration between + * 4K system memory pages and big video memory pages for GPU MMU). + * + * The current supported formats have a maximum of 2 parallel sub-levels, + * often referred to as "dual PDE" or "dual page table" support. + * + * Example for Fermi GPU HW: + * Sub-level 0 corresponds to big page table pointer. + * Sub-level 1 corresponds to small page table pointer. + * + * This number is very unlikely to change, but it is defined to + * simplify SW handling, encouraging loops over "dual copy-paste." + */ +#define MMU_FMT_MAX_SUB_LEVELS 2 + +/*! + * Valid values for the pageLevelIdTag field. + * These values are used to identify the internal naming of the page level. + * Used for verif purposes. + */ +#define MMU_FMT_PT_SURF_ID_PD0 0 +#define MMU_FMT_PT_SURF_ID_PD1 1 +#define MMU_FMT_PT_SURF_ID_PD2 2 +#define MMU_FMT_PT_SURF_ID_PD3 3 +#define MMU_FMT_PT_SURF_ID_PD4 4 +#define MMU_FMT_PT_SURF_ID_PT_4K 5 +#define MMU_FMT_PT_SURF_ID_PT_BIG 6 diff --git a/src/common/sdk/nvidia/inc/nv-hypervisor.h b/src/common/sdk/nvidia/inc/nv-hypervisor.h new file mode 100644 index 0000000..a2b2649 --- /dev/null +++ b/src/common/sdk/nvidia/inc/nv-hypervisor.h @@ -0,0 +1,93 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_HYPERVISOR_H_ +#define _NV_HYPERVISOR_H_ + +#include + +// Enums for supported hypervisor types. +// New hypervisor type should be added before OS_HYPERVISOR_UNKNOWN +typedef enum _HYPERVISOR_TYPE +{ + OS_HYPERVISOR_XEN = 0, + OS_HYPERVISOR_VMWARE, + OS_HYPERVISOR_HYPERV, + OS_HYPERVISOR_KVM, + OS_HYPERVISOR_UNKNOWN +} HYPERVISOR_TYPE; + +#define CMD_VFIO_WAKE_REMOVE_GPU 1 +#define CMD_VGPU_VFIO_PRESENT 2 +#define CMD_VFIO_PCI_CORE_PRESENT 3 + +#define MAX_VF_COUNT_PER_GPU 64 + +typedef enum _VGPU_TYPE_INFO +{ + VGPU_TYPE_NAME = 0, + VGPU_TYPE_DESCRIPTION, + VGPU_TYPE_INSTANCES, +} VGPU_TYPE_INFO; + +typedef struct +{ + void *nv; + NvU32 domain; + NvU32 bus; + NvU32 device; + NvU32 return_status; +} vgpu_vfio_info; + +typedef struct +{ + NvU32 domain; + NvU8 bus; + NvU8 slot; + NvU8 function; + NvBool isNvidiaAttached; + NvBool isMdevAttached; +} vgpu_vf_pci_info; + +typedef enum VGPU_CMD_PROCESS_VF_INFO_E +{ + NV_VGPU_SAVE_VF_INFO = 0, + NV_VGPU_REMOVE_VF_PCI_INFO = 1, + NV_VGPU_REMOVE_VF_MDEV_INFO = 2, + NV_VGPU_GET_VF_INFO = 3 +} VGPU_CMD_PROCESS_VF_INFO; + +typedef enum VGPU_DEVICE_STATE_E +{ + NV_VGPU_DEV_UNUSED = 0, + NV_VGPU_DEV_OPENED = 1, + NV_VGPU_DEV_IN_USE = 2 +} VGPU_DEVICE_STATE; + +/* + * Function prototypes + */ + +HYPERVISOR_TYPE NV_API_CALL nv_get_hypervisor_type(void); + +#endif // _NV_HYPERVISOR_H_ diff --git a/src/common/sdk/nvidia/inc/nv-kernel-interface-api.h b/src/common/sdk/nvidia/inc/nv-kernel-interface-api.h new file mode 100644 index 0000000..183f9b4 --- /dev/null +++ b/src/common/sdk/nvidia/inc/nv-kernel-interface-api.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_KERNEL_INTERFACE_API_H +#define _NV_KERNEL_INTERFACE_API_H +/************************************************************************************************************** +* +* File: nv-kernel-interface-api.h +* +* Description: +* Defines the NV API related macros. +* +**************************************************************************************************************/ + +#if NVOS_IS_UNIX && NVCPU_IS_X86_64 && defined(__use_altstack__) +#define NV_API_CALL __attribute__((altstack(0))) +#else +#define NV_API_CALL +#endif + +#endif /* _NV_KERNEL_INTERFACE_API_H */ diff --git a/src/common/sdk/nvidia/inc/nv_stdarg.h b/src/common/sdk/nvidia/inc/nv_stdarg.h new file mode 100644 index 0000000..b23f7f7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/nv_stdarg.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _NV_STDARG_H_ +#define _NV_STDARG_H_ + +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) + #include "conftest.h" + #if defined(NV_LINUX_STDARG_H_PRESENT) + #include + #else + #include + #endif +#else + #include +#endif + +#endif // _NV_STDARG_H_ diff --git a/src/common/sdk/nvidia/inc/nv_vgpu_types.h b/src/common/sdk/nvidia/inc/nv_vgpu_types.h new file mode 100644 index 0000000..eea5638 --- /dev/null +++ b/src/common/sdk/nvidia/inc/nv_vgpu_types.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: nv_vgpu_types.finn +// + + + + +/* XAPIGEN - this file is not suitable for (nor needed by) xapigen. */ +/* Rather than #ifdef out every such include in every sdk */ +/* file, punt here. */ +#include "nvtypes.h" + /* ! XAPIGEN */ + +#define VM_UUID_SIZE 16 +#define INVALID_VGPU_DEV_INST 0xFFFFFFFFU +#define MAX_VGPU_DEVICES_PER_VM 16U + +/* This enum represents the current state of guest dependent fields */ +typedef enum GUEST_VM_INFO_STATE { + GUEST_VM_INFO_STATE_UNINITIALIZED = 0, + GUEST_VM_INFO_STATE_INITIALIZED = 1, +} GUEST_VM_INFO_STATE; + +/* This enum represents types of VM identifiers */ +typedef enum VM_ID_TYPE { + VM_ID_DOMAIN_ID = 0, + VM_ID_UUID = 1, +} VM_ID_TYPE; + +/* This structure represents VM identifier */ +typedef union VM_ID { + NvU8 vmUuid[VM_UUID_SIZE]; + NV_DECLARE_ALIGNED(NvU64 vmId, 8); +} VM_ID; diff --git a/src/common/sdk/nvidia/inc/nvcd.h b/src/common/sdk/nvidia/inc/nvcd.h new file mode 100644 index 0000000..6c462eb --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvcd.h @@ -0,0 +1,159 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2002-2002 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVCD_H +#define NVCD_H + +//****************************************************************************** +// +// Module Name: NVCD.H +// +// This file contains structures and constants that define the NV specific +// data to be returned by the miniport's new VideoBugCheckCallback. The callback +// can return up to 4k bytes of data that will be appended to the dump file. +// The bugcheck callback is currently only invoked for bugcheck 0xEA failures. +// The buffer returned contains a top level header, followed by a variable +// number of data records. The top level header contains an ASCII signature +// that can be located with a search as well as a GUID for unique identification +// of the crash dump layout, i.e. future bugcheck callbacks can define a new +// GUID to redefine the entire crash dump layout. A checksum and crash dump +// size values are also included to insure crash dump data integrity. The +// data records each contain a header indicating what group the data belongs to +// as well as the actual record type and size. This flexibility allows groups +// to define and extend the information in their records without adversely +// affecting the code in the debugger extension that has to parse and display +// this information. The structures for these individual data records are +// contained in separate header files for each group. +// +//****************************************************************************** +#include "nvtypes.h" + +// Define the GUID type for non-Windows OSes + +#ifndef GUID_DEFINED +#define GUID_DEFINED +typedef struct _GUID { + NvU32 Data1; + NvU16 Data2; + NvU16 Data3; + NvU8 Data4[8]; +} GUID, *LPGUID; +#endif + +// Define the crash dump ASCII tag value and the dump format GUIDs +#define NVCD_SIGNATURE 0x4443564E /* ASCII crash dump signature "NVCD" */ + +#define GUID_NVCD_DUMP_V1 { /* e3d5dc6e-db7d-4e28-b09e-f59a942f4a24 */ \ + 0xe3d5dc6e, 0xdb7d, 0x4e28, \ + {0xb0, 0x9e, 0xf5, 0x9a, 0x94, 0x2f, 0x4a, 0x24}\ +}; +#define GUID_NVCD_DUMP_V2 { /* cd978ac1-3aa1-494b-bb5b-e93daf2b0536 */ \ + 0xcd978ac1, 0x3aa1, 0x494b, \ + {0xbb, 0x5b, 0xe9, 0x3d, 0xaf, 0x2b, 0x05, 0x36}\ +}; +#define GUID_NVCDMP_RSVD1 { /* 391fc656-a37c-4574-8d57-b29a562f909b */ \ + 0x391fc656, 0xa37c, 0x4574, \ + {0x8d, 0x57, 0xb2, 0x9a, 0x56, 0x2f, 0x90, 0x9b}\ +}; +#define GUID_NVCDMP_RSVD2 { /* c6d9982d-1ba9-4f80-badd-3dc992d41b46 */ \ + 0xc6d9982d, 0x1ba9, 0x4f80, \ + {0xba, 0xdd, 0x3d, 0xc9, 0x92, 0xd4, 0x1b, 0x46}\ +}; + +// RC 2.0 NVCD (NV crash dump) GUID +#define GUID_NVCD_RC2_V1 { /* d3793533-a4a6-46d3-97f2-1446cfdc1ee7 */ \ + 0xd3793533, 0xa4a6, 0x46d3, \ + {0x97, 0xf2, 0x14, 0x46, 0xcf, 0xdc, 0x1e, 0xe7}\ +}; + + +// Define NVIDIA crash dump header structure (First data block in crash dump) +typedef struct +{ + NvU32 dwSignature; // ASCII crash dump signature "NVCD" + GUID gVersion; // GUID for crashdump file (Version) + NvU32 dwSize; // Size of the crash dump data + NvU8 cCheckSum; // Crash dump checksum (Zero = ignore) + NvU8 cFiller[3]; // Filler (Possible CRC value) +} NVCD_HEADER; +typedef NVCD_HEADER *PNVCD_HEADER; + +// Define the crash dump record groups +typedef enum +{ + NvcdGroup = 0, // NVIDIA crash dump group (System NVCD records) + RmGroup = 1, // Resource manager group (RM records) + DriverGroup = 2, // Driver group (Driver/miniport records) + HardwareGroup = 3, // Hardware group (Hardware records) + InstrumentationGroup = 4, // Instrumentation group (Special records) +} NVCD_GROUP_TYPE; + +// Define the crash dump group record types (Single end of data record type) +typedef enum +{ + EndOfData = 0, // End of crash dump data record + CompressedDataHuffman = 1, // Compressed huffman data +} NVCD_RECORD_TYPE; + +// Define the crash dump data record header +typedef struct +{ + NvU8 cRecordGroup; // Data record group (NVCD_GROUP_TYPE) + NvU8 cRecordType; // Data record type (See group header) + NvU16 wRecordSize; // Size of the data record in bytes +} NVCD_RECORD; +typedef NVCD_RECORD *PNVCD_RECORD; + +// Define the EndOfData record structure +typedef struct +{ + NVCD_RECORD Header; // End of data record header +} EndOfData_RECORD; +typedef EndOfData_RECORD *PEndOfData_RECORD; + +// +// Generic mini-record type (keep the size at 64bits) +// +typedef struct +{ + NVCD_RECORD Header; // header for mini record + NvU32 Payload; // 32 bit payload value +} NVCDMiniRecord; +typedef NVCDMiniRecord *PNVCDMiniRecord; + +// +// Generic record collection type +// +typedef struct +{ + NVCD_RECORD Header; // generic header to binary type this in OCA buffer + // size is actual size of this struct + all items in collection + NvU32 NumRecords; // number of records this collection contain + NVCD_RECORD FirstRecord; // first record, its data follow +} NVCDRecordCollection; +typedef NVCDRecordCollection *PNVCDRecordCollection; + +#define COLL_HEADER_SIZEOF (sizeof(NVCDRecordCollection) - sizeof(NVCD_RECORD)) + + +#endif // NVCD_H diff --git a/src/common/sdk/nvidia/inc/nvcfg_sdk.h b/src/common/sdk/nvidia/inc/nvcfg_sdk.h new file mode 100644 index 0000000..4ce8d4b --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvcfg_sdk.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef NV_CFG_SDK_INCLUDED +#define NV_CFG_SDK_INCLUDED + + +#endif // NV_CFG_SDK_INCLUDED diff --git a/src/common/sdk/nvidia/inc/nvdisptypes.h b/src/common/sdk/nvidia/inc/nvdisptypes.h new file mode 100644 index 0000000..adcfb98 --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvdisptypes.h @@ -0,0 +1,92 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2016,2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /***************************************************************************\ +|* *| +|* NV Display Common Types *| +|* *| +|* defines the common display types. *| +|* *| + \***************************************************************************/ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: nvdisptypes.finn +// + + + + +#include "nvtypes.h" + + + +typedef enum NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP { + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_DEFAULT = 0, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 = 1, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 = 2, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 = 3, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 = 4, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 = 5, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 = 6, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 = 7, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 = 8, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 = 9, +} NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP; + + + +typedef NvU32 NV_DISP_LOCK_PIN; + +#define NV_DISP_LOCK_PIN_0 0x0 +#define NV_DISP_LOCK_PIN_1 0x1 +#define NV_DISP_LOCK_PIN_2 0x2 +#define NV_DISP_LOCK_PIN_3 0x3 +#define NV_DISP_LOCK_PIN_4 0x4 +#define NV_DISP_LOCK_PIN_5 0x5 +#define NV_DISP_LOCK_PIN_6 0x6 +#define NV_DISP_LOCK_PIN_7 0x7 +#define NV_DISP_LOCK_PIN_8 0x8 +#define NV_DISP_LOCK_PIN_9 0x9 +#define NV_DISP_LOCK_PIN_A 0xA +#define NV_DISP_LOCK_PIN_B 0xB +#define NV_DISP_LOCK_PIN_C 0xC +#define NV_DISP_LOCK_PIN_D 0xD +#define NV_DISP_LOCK_PIN_E 0xE +#define NV_DISP_LOCK_PIN_F 0xF + +// Value used solely for HW initialization +#define NV_DISP_LOCK_PIN_UNSPECIFIED 0x10 + + + +typedef NvU32 NV_DISP_LOCK_MODE; + +#define NV_DISP_LOCK_MODE_NO_LOCK 0x0 +#define NV_DISP_LOCK_MODE_FRAME_LOCK 0x1 +#define NV_DISP_LOCK_MODE_RASTER_LOCK 0x3 + diff --git a/src/common/sdk/nvidia/inc/nverror.h b/src/common/sdk/nvidia/inc/nverror.h new file mode 100644 index 0000000..7c48921 --- /dev/null +++ b/src/common/sdk/nvidia/inc/nverror.h @@ -0,0 +1,297 @@ +/* + * Copyright (c) 1993-2025, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVERROR_H +#define NVERROR_H +/****************************************************************************** +* +* File: nverror.h +* +* Description: +* This file contains the error codes set when the error notifier +* is signaled. +* +******************************************************************************/ + +#include "nvcfg_sdk.h" + +#define ROBUST_CHANNEL_GR_EXCEPTION (13) +#define ROBUST_CHANNEL_FAKE_ERROR (14) +#define ROBUST_CHANNEL_VBLANK_CALLBACK_TIMEOUT (16) +#define ROBUST_CHANNEL_DISP_MISSED_NOTIFIER (19) +#define ROBUST_CHANNEL_MPEG_ERROR_SW_METHOD (20) +#define ROBUST_CHANNEL_ME_ERROR_SW_METHOD (21) +#define ROBUST_CHANNEL_VP_ERROR_SW_METHOD (22) +#define ROBUST_CHANNEL_RC_LOGGING_ENABLED (23) +#define ROBUST_CHANNEL_VP_ERROR (27) +#define ROBUST_CHANNEL_VP2_ERROR (28) +#define ROBUST_CHANNEL_BSP_ERROR (29) +#define ROBUST_CHANNEL_UNUSED_ERROR_30 (30) +#define ROBUST_CHANNEL_FIFO_ERROR_MMU_ERR_FLT (31) +#define ROBUST_CHANNEL_PBDMA_ERROR (32) +#define ROBUST_CHANNEL_SEC_ERROR (33) +#define ROBUST_CHANNEL_MSVLD_ERROR (34) +#define ROBUST_CHANNEL_MSPDEC_ERROR (35) +#define ROBUST_CHANNEL_MSPPP_ERROR (36) +#define ROBUST_CHANNEL_CE0_ERROR (39) +#define ROBUST_CHANNEL_CE1_ERROR (40) +#define ROBUST_CHANNEL_CE2_ERROR (41) +#define ROBUST_CHANNEL_VIC_ERROR (42) +#define ROBUST_CHANNEL_RESETCHANNEL_VERIF_ERROR (43) +#define ROBUST_CHANNEL_GR_FAULT_DURING_CTXSW (44) +#define ROBUST_CHANNEL_PREEMPTIVE_REMOVAL (45) +#define ROBUST_CHANNEL_NVENC0_ERROR (47) +#define ROBUST_CHANNEL_GPU_ECC_DBE (48) +#define FB_MEMORY_ERROR (58) +#define PMU_ERROR (59) +#define ROBUST_CHANNEL_SEC2_ERROR (60) +#define PMU_BREAKPOINT (61) +#define PMU_HALT_ERROR (62) +#define INFOROM_PAGE_RETIREMENT_EVENT (63) +#define INFOROM_DRAM_RETIREMENT_EVENT INFOROM_PAGE_RETIREMENT_EVENT +#define INFOROM_PAGE_RETIREMENT_FAILURE (64) +#define INFOROM_DRAM_RETIREMENT_FAILURE INFOROM_PAGE_RETIREMENT_FAILURE +#define ROBUST_CHANNEL_NVENC1_ERROR (65) +#define ROBUST_CHANNEL_NVDEC0_ERROR (68) +#define ROBUST_CHANNEL_GR_CLASS_ERROR (69) +#define ROBUST_CHANNEL_CE3_ERROR (70) +#define ROBUST_CHANNEL_CE4_ERROR (71) +#define ROBUST_CHANNEL_CE5_ERROR (72) +#define ROBUST_CHANNEL_NVENC2_ERROR (73) +#define NVLINK_ERROR (74) +#define ROBUST_CHANNEL_CE6_ERROR (75) +#define ROBUST_CHANNEL_CE7_ERROR (76) +#define ROBUST_CHANNEL_CE8_ERROR (77) +#define VGPU_START_ERROR (78) +#define ROBUST_CHANNEL_GPU_HAS_FALLEN_OFF_THE_BUS (79) +#define PBDMA_PUSHBUFFER_CRC_MISMATCH (80) +#define ROBUST_CHANNEL_VGA_SUBSYSTEM_ERROR (81) +#define ROBUST_CHANNEL_NVJPG0_ERROR (82) +#define ROBUST_CHANNEL_NVDEC1_ERROR (83) +#define ROBUST_CHANNEL_NVDEC2_ERROR (84) +#define ROBUST_CHANNEL_CE9_ERROR (85) +#define ROBUST_CHANNEL_OFA0_ERROR (86) +#define NVTELEMETRY_DRIVER_REPORT (87) +#define ROBUST_CHANNEL_NVDEC3_ERROR (88) +#define ROBUST_CHANNEL_NVDEC4_ERROR (89) +#define LTC_ERROR (90) +#define RESERVED_XID (91) +#define EXCESSIVE_SBE_INTERRUPTS (92) +#define INFOROM_ERASE_LIMIT_EXCEEDED (93) +#define ROBUST_CHANNEL_CONTAINED_ERROR (94) +#define ROBUST_CHANNEL_UNCONTAINED_ERROR (95) +#define ROBUST_CHANNEL_NVDEC5_ERROR (96) +#define ROBUST_CHANNEL_NVDEC6_ERROR (97) +#define ROBUST_CHANNEL_NVDEC7_ERROR (98) +#define ROBUST_CHANNEL_NVJPG1_ERROR (99) +#define ROBUST_CHANNEL_NVJPG2_ERROR (100) +#define ROBUST_CHANNEL_NVJPG3_ERROR (101) +#define ROBUST_CHANNEL_NVJPG4_ERROR (102) +#define ROBUST_CHANNEL_NVJPG5_ERROR (103) +#define ROBUST_CHANNEL_NVJPG6_ERROR (104) +#define ROBUST_CHANNEL_NVJPG7_ERROR (105) +#define DESTINATION_FLA_TRANSLATION_ERROR (108) +#define SEC_FAULT_ERROR (110) +#define GSP_RPC_TIMEOUT (119) +#define GSP_ERROR (120) +#define C2C_ERROR (121) +#define SPI_PMU_RPC_READ_FAIL (122) +#define SPI_PMU_RPC_WRITE_FAIL (123) +#define SPI_PMU_RPC_ERASE_FAIL (124) +#define INFOROM_FS_ERROR (125) +#define ROBUST_CHANNEL_CE10_ERROR (126) +#define ROBUST_CHANNEL_CE11_ERROR (127) +#define ROBUST_CHANNEL_CE12_ERROR (128) +#define ROBUST_CHANNEL_CE13_ERROR (129) +#define ROBUST_CHANNEL_CE14_ERROR (130) +#define ROBUST_CHANNEL_CE15_ERROR (131) +#define ROBUST_CHANNEL_CE16_ERROR (132) +#define ROBUST_CHANNEL_CE17_ERROR (133) +#define ROBUST_CHANNEL_CE18_ERROR (134) +#define ROBUST_CHANNEL_CE19_ERROR (135) +#define ALI_TRAINING_FAIL (136) +#define NVLINK_FLA_PRIV_ERR (137) +#define ROBUST_CHANNEL_DLA_ERROR (138) +#define ROBUST_CHANNEL_OFA1_ERROR (139) +#define UNRECOVERABLE_ECC_ERROR_ESCAPE (140) +#define ROBUST_CHANNEL_FAST_PATH_ERROR (141) +#define ROBUST_CHANNEL_NVENC3_ERROR (142) +#define GPU_INIT_ERROR (143) +#define NVLINK_SAW_ERROR (144) +#define NVLINK_RLW_ERROR (145) +#define NVLINK_TLW_ERROR (146) +#define NVLINK_TREX_ERROR (147) +#define NVLINK_NVLPW_CTRL_ERROR (148) +#define NVLINK_NETIR_ERROR (149) +#define NVLINK_MSE_ERROR (150) +#define ROBUST_CHANNEL_KEY_ROTATION_ERROR (151) +#define ROBUST_CHANNEL_DLA_SMMU_ERROR (152) +#define ROBUST_CHANNEL_DLA_TIMEOUT (153) +#define GPU_RECOVERY_ACTION_CHANGED (154) +#define NVLINK_SW_DEFINED_ERROR (155) +#define RESOURCE_RETIREMENT_EVENT (156) +#define RESOURCE_RETIREMENT_FAILURE (157) +#define GPU_FATAL_TIMEOUT (158) +#define ROBUST_CHANNEL_CHI_NON_DATA_ERROR (159) +#define CHANNEL_RETIREMENT_EVENT (160) +#define CHANNEL_RETIREMENT_FAILURE (161) +#define ISINK_REENGAGED (162) +#define ISINK_DISENGAGED (163) +#define ISINK_LOW_LIFETIME (164) +#define ISINK_ZERO_LIFETIME (165) +#define NVLINK_SECURE_NVLE_ERROR (166) +#define PCIE_FATAL_TIMEOUT (167) +#define REDUCED_GPU_MEMORY_CAPACITY (168) +#define SEC2_HALT_ERROR (169) +#define NVLINK_SECURE_NON_NVLE_ERROR (170) +#define UNCORRECTABLE_DRAM_ERROR (171) +#define UNCORRECTABLE_SRAM_ERROR (172) +#define ROBUST_CHANNEL_LAST_ERROR (172) + +// Indexed CE reference +#define ROBUST_CHANNEL_CE_ERROR(x) \ + ((x < 3) ? \ + (ROBUST_CHANNEL_CE0_ERROR + (x)) : \ + ((x < 6) ? \ + (ROBUST_CHANNEL_CE3_ERROR + (x - 3)) : \ + ((x < 9) ? \ + (ROBUST_CHANNEL_CE6_ERROR + (x - 6)) : \ + ((x == 9) ? \ + (ROBUST_CHANNEL_CE9_ERROR) : \ + (ROBUST_CHANNEL_CE10_ERROR + (x - 10)))))) + +#define ROBUST_CHANNEL_IS_CE_ERROR(x) \ + ((x == ROBUST_CHANNEL_CE0_ERROR) || (x == ROBUST_CHANNEL_CE1_ERROR) || \ + (x == ROBUST_CHANNEL_CE2_ERROR) || (x == ROBUST_CHANNEL_CE3_ERROR) || \ + (x == ROBUST_CHANNEL_CE4_ERROR) || (x == ROBUST_CHANNEL_CE5_ERROR) || \ + (x == ROBUST_CHANNEL_CE6_ERROR) || (x == ROBUST_CHANNEL_CE7_ERROR) || \ + (x == ROBUST_CHANNEL_CE8_ERROR) || (x == ROBUST_CHANNEL_CE9_ERROR) || \ + (x == ROBUST_CHANNEL_CE10_ERROR) || (x == ROBUST_CHANNEL_CE11_ERROR) || \ + (x == ROBUST_CHANNEL_CE12_ERROR) || (x == ROBUST_CHANNEL_CE13_ERROR) || \ + (x == ROBUST_CHANNEL_CE14_ERROR) || (x == ROBUST_CHANNEL_CE15_ERROR) || \ + (x == ROBUST_CHANNEL_CE16_ERROR) || (x == ROBUST_CHANNEL_CE17_ERROR) || \ + (x == ROBUST_CHANNEL_CE18_ERROR) || (x == ROBUST_CHANNEL_CE19_ERROR)) + +#define ROBUST_CHANNEL_CE_ERROR_IDX(x) \ + (((x >= ROBUST_CHANNEL_CE0_ERROR) && (x <= ROBUST_CHANNEL_CE2_ERROR)) ? \ + (x - ROBUST_CHANNEL_CE0_ERROR) : \ + (((x >= ROBUST_CHANNEL_CE3_ERROR) && (x <= ROBUST_CHANNEL_CE5_ERROR)) ? \ + (x - ROBUST_CHANNEL_CE3_ERROR + 3) : \ + (((x >= ROBUST_CHANNEL_CE6_ERROR) && (x <= ROBUST_CHANNEL_CE8_ERROR)) ? \ + (x - ROBUST_CHANNEL_CE6_ERROR + 6) : \ + ((x == ROBUST_CHANNEL_CE9_ERROR) ? \ + (x - ROBUST_CHANNEL_CE9_ERROR + 9) : \ + (x - ROBUST_CHANNEL_CE10_ERROR + 10))))) + +// Indexed NVDEC reference +#define ROBUST_CHANNEL_NVDEC_ERROR(x) \ + ((x < 1) ? \ + (ROBUST_CHANNEL_NVDEC0_ERROR) : \ + ((x < 3) ? \ + (ROBUST_CHANNEL_NVDEC1_ERROR + (x - 1)) : \ + ((x < 5) ? \ + (ROBUST_CHANNEL_NVDEC3_ERROR + (x - 3)): \ + (ROBUST_CHANNEL_NVDEC5_ERROR + (x - 5))))) + +#define ROBUST_CHANNEL_IS_NVDEC_ERROR(x) \ + ((x == ROBUST_CHANNEL_NVDEC0_ERROR) || \ + (x == ROBUST_CHANNEL_NVDEC1_ERROR) || \ + (x == ROBUST_CHANNEL_NVDEC2_ERROR) || \ + (x == ROBUST_CHANNEL_NVDEC3_ERROR) || \ + (x == ROBUST_CHANNEL_NVDEC4_ERROR) || \ + (x == ROBUST_CHANNEL_NVDEC5_ERROR) || \ + (x == ROBUST_CHANNEL_NVDEC6_ERROR) || \ + (x == ROBUST_CHANNEL_NVDEC7_ERROR)) + +#define ROBUST_CHANNEL_NVDEC_ERROR_IDX(x) \ + ((x == ROBUST_CHANNEL_NVDEC0_ERROR) ? \ + (x - ROBUST_CHANNEL_NVDEC0_ERROR) : \ + (((x >= ROBUST_CHANNEL_NVDEC1_ERROR) && (x <= ROBUST_CHANNEL_NVDEC2_ERROR)) ? \ + (x - ROBUST_CHANNEL_NVDEC1_ERROR + 1) : \ + (((x >= ROBUST_CHANNEL_NVDEC3_ERROR) && (x <= ROBUST_CHANNEL_NVDEC4_ERROR)) ? \ + (x - ROBUST_CHANNEL_NVDEC3_ERROR + 3) : \ + (x - ROBUST_CHANNEL_NVDEC5_ERROR + 5)))) + +// Indexed NVENC reference +#define ROBUST_CHANNEL_NVENC_ERROR(x) \ + ((x == 0) ? (ROBUST_CHANNEL_NVENC0_ERROR) : \ + ((x == 1) ? (ROBUST_CHANNEL_NVENC1_ERROR) : \ + ((x == 2) ? (ROBUST_CHANNEL_NVENC2_ERROR) : \ + (ROBUST_CHANNEL_NVENC3_ERROR)))) + +#define ROBUST_CHANNEL_IS_NVENC_ERROR(x) \ + ((x == ROBUST_CHANNEL_NVENC0_ERROR) || \ + (x == ROBUST_CHANNEL_NVENC1_ERROR) || \ + (x == ROBUST_CHANNEL_NVENC2_ERROR) || \ + (x == ROBUST_CHANNEL_NVENC3_ERROR)) + +#define ROBUST_CHANNEL_NVENC_ERROR_IDX(x) \ + ((x == ROBUST_CHANNEL_NVENC0_ERROR) ? \ + (x - ROBUST_CHANNEL_NVENC0_ERROR) : \ + ((x == ROBUST_CHANNEL_NVENC1_ERROR) ? \ + (x - ROBUST_CHANNEL_NVENC1_ERROR + 1) : \ + ((x == ROBUST_CHANNEL_NVENC2_ERROR) ? \ + (x - ROBUST_CHANNEL_NVENC2_ERROR + 2) : \ + (x - ROBUST_CHANNEL_NVENC3_ERROR + 3)))) + +// Indexed NVJPG reference +#define ROBUST_CHANNEL_NVJPG_ERROR(x) \ + ((x < 1) ? \ + (ROBUST_CHANNEL_NVJPG0_ERROR) : \ + (ROBUST_CHANNEL_NVJPG1_ERROR + (x - 1))) + +#define ROBUST_CHANNEL_IS_NVJPG_ERROR(x) \ + ((x == ROBUST_CHANNEL_NVJPG0_ERROR) || \ + (x == ROBUST_CHANNEL_NVJPG1_ERROR) || \ + (x == ROBUST_CHANNEL_NVJPG2_ERROR) || \ + (x == ROBUST_CHANNEL_NVJPG3_ERROR) || \ + (x == ROBUST_CHANNEL_NVJPG4_ERROR) || \ + (x == ROBUST_CHANNEL_NVJPG5_ERROR) || \ + (x == ROBUST_CHANNEL_NVJPG6_ERROR) || \ + (x == ROBUST_CHANNEL_NVJPG7_ERROR)) + +#define ROBUST_CHANNEL_NVJPG_ERROR_IDX(x) \ + ((x == ROBUST_CHANNEL_NVJPG0_ERROR) ? \ + (x - ROBUST_CHANNEL_NVJPG0_ERROR) : \ + (x - ROBUST_CHANNEL_NVJPG1_ERROR + 1)) + +// Indexed OFA reference +#define ROBUST_CHANNEL_OFA_ERROR(x) \ + ((x == 0) ? \ + (ROBUST_CHANNEL_OFA0_ERROR) : \ + (ROBUST_CHANNEL_OFA1_ERROR)) + +#define ROBUST_CHANNEL_IS_OFA_ERROR(x) \ + ((x == ROBUST_CHANNEL_OFA0_ERROR) || \ + (x == ROBUST_CHANNEL_OFA1_ERROR)) + +#define ROBUST_CHANNEL_OFA_ERROR_IDX(x) \ + ((x == ROBUST_CHANNEL_OFA0_ERROR) ? \ + (x - ROBUST_CHANNEL_OFA0_ERROR) : \ + (x - ROBUST_CHANNEL_OFA1_ERROR + 1)) + +// Error Levels +#define ROBUST_CHANNEL_ERROR_RECOVERY_LEVEL_INFO (0) +#define ROBUST_CHANNEL_ERROR_RECOVERY_LEVEL_NON_FATAL (1) +#define ROBUST_CHANNEL_ERROR_RECOVERY_LEVEL_FATAL (2) + +#endif // NVERROR_H diff --git a/src/common/sdk/nvidia/inc/nvfixedtypes.h b/src/common/sdk/nvidia/inc/nvfixedtypes.h new file mode 100644 index 0000000..f8c51ab --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvfixedtypes.h @@ -0,0 +1,408 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVFIXEDTYPES_INCLUDED +#define NVFIXEDTYPES_INCLUDED + +#include "nvtypes.h" + +/*! + * Fixed-point master data types. + * + * These are master-types represent the total number of bits contained within + * the FXP type. All FXP types below should be based on one of these master + * types. + */ +typedef NvS16 NvSFXP16; +typedef NvS32 NvSFXP32; +typedef NvS64 NvSFXP64; +typedef NvU8 NvUFXP8; +typedef NvU16 NvUFXP16; +typedef NvU32 NvUFXP32; +typedef NvU64 NvUFXP64; + + +/*! + * Fixed-point data types. + * + * These are all integer types with precision indicated in the naming of the + * form: NvFXP_. The actual + * size of the data type is calculated as num_bits_above_radix + + * num_bit_below_radix. + * + * All of these FXP types should be based on one of the master types above. + */ +typedef NvSFXP16 NvSFXP11_5; +typedef NvSFXP16 NvSFXP4_12; +typedef NvSFXP16 NvSFXP8_8; +typedef NvSFXP32 NvSFXP8_24; +typedef NvSFXP32 NvSFXP10_22; +typedef NvSFXP32 NvSFXP16_16; +typedef NvSFXP32 NvSFXP18_14; +typedef NvSFXP32 NvSFXP20_12; +typedef NvSFXP32 NvSFXP24_8; +typedef NvSFXP32 NvSFXP27_5; +typedef NvSFXP32 NvSFXP28_4; +typedef NvSFXP32 NvSFXP29_3; +typedef NvSFXP32 NvSFXP31_1; +typedef NvSFXP64 NvSFXP52_12; + +typedef NvUFXP8 NvUFXP0_8; +typedef NvUFXP16 NvUFXP0_16; +typedef NvUFXP16 NvUFXP4_12; +typedef NvUFXP16 NvUFXP8_8; +typedef NvUFXP32 NvUFXP3_29; +typedef NvUFXP32 NvUFXP4_28; +typedef NvUFXP32 NvUFXP7_25; +typedef NvUFXP32 NvUFXP8_24; +typedef NvUFXP32 NvUFXP9_23; +typedef NvUFXP32 NvUFXP10_22; +typedef NvUFXP32 NvUFXP15_17; +typedef NvUFXP32 NvUFXP16_16; +typedef NvUFXP32 NvUFXP18_14; +typedef NvUFXP32 NvUFXP20_12; +typedef NvUFXP32 NvUFXP24_8; +typedef NvUFXP32 NvUFXP25_7; +typedef NvUFXP32 NvUFXP26_6; +typedef NvUFXP32 NvUFXP28_4; + +typedef NvUFXP64 NvUFXP37_27; +typedef NvUFXP64 NvUFXP40_24; +typedef NvUFXP64 NvUFXP48_16; +typedef NvUFXP64 NvUFXP52_12; +typedef NvUFXP64 NvUFXP54_10; +typedef NvUFXP64 NvUFXP60_4; + +/*! + * Utility macros used in converting between signed integers and fixed-point + * notation. + * + * - COMMON - These are used by both signed and unsigned. + */ +#define NV_TYPES_FXP_INTEGER(x, y) ((x)+(y)-1):(y) +#define NV_TYPES_FXP_FRACTIONAL(x, y) ((y)-1):0 +#define NV_TYPES_FXP_FRACTIONAL_MSB(x, y) ((y)-1):((y)-1) +#define NV_TYPES_FXP_FRACTIONAL_MSB_ONE 0x00000001 +#define NV_TYPES_FXP_FRACTIONAL_MSB_ZERO 0x00000000 +#define NV_TYPES_FXP_ZERO (0) + +/*! + * - UNSIGNED - These are only used for unsigned. + */ +#define NV_TYPES_UFXP_INTEGER_MAX(x, y) (~(NVBIT((y))-1U)) +#define NV_TYPES_UFXP_INTEGER_MIN(x, y) (0U) + +/*! + * - SIGNED - These are only used for signed. + */ +#define NV_TYPES_SFXP_INTEGER_SIGN(x, y) ((x)+(y)-1):((x)+(y)-1) +#define NV_TYPES_SFXP_INTEGER_SIGN_NEGATIVE 0x00000001 +#define NV_TYPES_SFXP_INTEGER_SIGN_POSITIVE 0x00000000 +#define NV_TYPES_SFXP_S32_SIGN_EXTENSION(x, y) 31:(x) +#define NV_TYPES_SFXP_S32_SIGN_EXTENSION_POSITIVE(x, y) 0x00000000 +#define NV_TYPES_SFXP_S32_SIGN_EXTENSION_NEGATIVE(x, y) (NVBIT(32-(x))-1U) +#define NV_TYPES_SFXP_INTEGER_MAX(x, y) (NVBIT((x))-1U) +#define NV_TYPES_SFXP_INTEGER_MIN(x, y) (~(NVBIT((x))-1U)) +#define NV_TYPES_SFXP_S64_SIGN_EXTENSION(x, y) 63:(x) +#define NV_TYPES_SFXP_S64_SIGN_EXTENSION_POSITIVE(x, y) 0x0000000000000000 +#define NV_TYPES_SFXP_S64_SIGN_EXTENSION_NEGATIVE(x, y) (NVBIT64(64-(x))-1U) +#define NV_TYPES_SFXP_S64_INTEGER_MAX(x, y) (NVBIT64((x)-1)-1U) +#define NV_TYPES_SFXP_S64_INTEGER_MIN(x, y) (~(NVBIT64((x)-1)-1U)) + +/*! + * Conversion macros used for converting between integer and fixed point + * representations. Both signed and unsigned variants. + * + * Warning: + * Note that most of the macros below can overflow if applied on values that can + * not fit the destination type. It's caller responsibility to ensure that such + * situations will not occur. + * + * Some conversions perform some commonly performed tasks other than just + * bit-shifting: + * + * - _SCALED: + * For integer -> fixed-point we add handling divisors to represent + * non-integer values. + * + * - _ROUNDED: + * For fixed-point -> integer we add rounding to integer values. + */ + +// 32-bit Unsigned FXP: +#define NV_TYPES_U32_TO_UFXP_X_Y(x, y, integer) \ + ((NvUFXP##x##_##y) (((NvU32) (integer)) << \ + DRF_SHIFT(NV_TYPES_FXP_INTEGER((x), (y))))) + +#define NV_TYPES_U32_TO_UFXP_X_Y_SCALED(x, y, integer, scale) \ + ((NvUFXP##x##_##y) ((((((NvU32) (integer)) << \ + DRF_SHIFT(NV_TYPES_FXP_INTEGER((x), (y))))) / \ + (scale)) + \ + ((((((NvU32) (integer)) << \ + DRF_SHIFT(NV_TYPES_FXP_INTEGER((x), (y)))) % \ + (scale)) > ((scale) >> 1)) ? 1U : 0U))) + +#define NV_TYPES_UFXP_X_Y_TO_U32(x, y, fxp) \ + ((NvU32) (DRF_VAL(_TYPES, _FXP, _INTEGER((x), (y)), \ + ((NvUFXP##x##_##y) (fxp))))) + +#define NV_TYPES_UFXP_X_Y_TO_U32_ROUNDED(x, y, fxp) \ + (NV_TYPES_UFXP_X_Y_TO_U32(x, y, (fxp)) + \ + (FLD_TEST_DRF_NUM(_TYPES, _FXP, _FRACTIONAL_MSB((x), (y)), \ + NV_TYPES_FXP_FRACTIONAL_MSB_ONE, ((NvUFXP##x##_##y) (fxp))) ? \ + 1U : 0U)) + +// 64-bit Unsigned FXP +#define NV_TYPES_U64_TO_UFXP_X_Y(x, y, integer) \ + ((NvUFXP##x##_##y) (((NvU64) (integer)) << \ + DRF_SHIFT64(NV_TYPES_FXP_INTEGER((x), (y))))) + +#define NV_TYPES_U64_TO_UFXP_X_Y_SCALED(x, y, integer, scale) \ + ((NvUFXP##x##_##y) (((((NvU64) (integer)) << \ + DRF_SHIFT64(NV_TYPES_FXP_INTEGER((x), (y)))) + \ + ((scale) >> 1)) / \ + (scale))) + +#define NV_TYPES_UFXP_X_Y_TO_U64(x, y, fxp) \ + ((NvU64) (DRF_VAL64(_TYPES, _FXP, _INTEGER((x), (y)), \ + ((NvUFXP##x##_##y) (fxp))))) + +#define NV_TYPES_UFXP_X_Y_TO_U64_ROUNDED(x, y, fxp) \ + (NV_TYPES_UFXP_X_Y_TO_U64(x, y, (fxp)) + \ + (FLD_TEST_DRF_NUM64(_TYPES, _FXP, _FRACTIONAL_MSB((x), (y)), \ + NV_TYPES_FXP_FRACTIONAL_MSB_ONE, ((NvUFXP##x##_##y) (fxp))) ? \ + 1U : 0U)) + +// +// 32-bit Signed FXP: +// Some compilers do not support left shift negative values +// so typecast integer to NvU32 instead of NvS32 +// +// Note that there is an issue with the rounding in +// NV_TYPES_S32_TO_SFXP_X_Y_SCALED. In particular, when the signs of the +// numerator and denominator don't match, the rounding is done towards positive +// infinity, rather than away from 0. This will need to be fixed in a follow-up +// change. +// +#define NV_TYPES_S32_TO_SFXP_X_Y(x, y, integer) \ + ((NvSFXP##x##_##y) (((NvU32) (integer)) << \ + DRF_SHIFT(NV_TYPES_FXP_INTEGER((x), (y))))) + +#define NV_TYPES_S32_TO_SFXP_X_Y_SCALED(x, y, integer, scale) \ + ((NvSFXP##x##_##y) (((((NvS32) (integer)) << \ + DRF_SHIFT(NV_TYPES_FXP_INTEGER((x), (y)))) + \ + ((scale) >> 1)) / \ + (scale))) + +#define NV_TYPES_SFXP_X_Y_TO_S32(x, y, fxp) \ + ((NvS32) ((DRF_VAL(_TYPES, _FXP, _INTEGER((x), (y)), \ + ((NvSFXP##x##_##y) (fxp)))) | \ + ((DRF_VAL(_TYPES, _SFXP, _INTEGER_SIGN((x), (y)), (fxp)) == \ + NV_TYPES_SFXP_INTEGER_SIGN_NEGATIVE) ? \ + DRF_NUM(_TYPES, _SFXP, _S32_SIGN_EXTENSION((x), (y)), \ + NV_TYPES_SFXP_S32_SIGN_EXTENSION_NEGATIVE((x), (y))) : \ + DRF_NUM(_TYPES, _SFXP, _S32_SIGN_EXTENSION((x), (y)), \ + NV_TYPES_SFXP_S32_SIGN_EXTENSION_POSITIVE((x), (y)))))) + +/*! + * Note: The rounding action for signed numbers should ideally round away from + * 0 in both the positive and the negative regions. + * For positive numbers, we add 1 if the fractional MSb is 1. + * For negative numbers, we add -1 (equivalent to subtracting 1) if the + * fractional MSb is 1. + */ +#define NV_TYPES_SFXP_X_Y_TO_S32_ROUNDED(x, y, fxp) \ + (NV_TYPES_SFXP_X_Y_TO_S32(x, y, (fxp)) + \ + (FLD_TEST_DRF_NUM(_TYPES, _FXP, _FRACTIONAL_MSB((x), (y)), \ + NV_TYPES_FXP_FRACTIONAL_MSB_ONE, ((NvSFXP##x##_##y) (fxp))) ? \ + ((DRF_VAL(_TYPES, _SFXP, _INTEGER_SIGN((x), (y)), (fxp)) == \ + NV_TYPES_SFXP_INTEGER_SIGN_POSITIVE) ? 1 : -1) : 0)) + +#define NV_TYPES_SFXP_X_Y_TO_FLOAT32(x, y, fxp) \ + ((NvF32) NV_TYPES_SFXP_X_Y_TO_S32(x, y, (fxp)) + \ + ((NvF32) DRF_NUM(_TYPES, _FXP, _FRACTIONAL((x), (y)), \ + ((NvSFXP##x##_##y) (fxp))) / (NvF32) (1 << (y)))) + +// +// 64-bit Signed FXP: +// Some compilers do not support left shift negative values +// so typecast integer to NvU64 instead of NvS64 +// +// Note that there is an issue with the rounding in +// NV_TYPES_S64_TO_SFXP_X_Y_SCALED. In particular, when the signs of the +// numerator and denominator don't match, the rounding is done towards positive +// infinity, rather than away from 0. This will need to be fixed in a follow-up +// change. +// +#define NV_TYPES_S64_TO_SFXP_X_Y(x, y, integer) \ + ((NvSFXP##x##_##y) (((NvU64) (integer)) << \ + DRF_SHIFT64(NV_TYPES_FXP_INTEGER((x), (y))))) + +#define NV_TYPES_S64_TO_SFXP_X_Y_SCALED(x, y, integer, scale) \ + ((NvSFXP##x##_##y) (((((NvS64) (integer)) << \ + DRF_SHIFT64(NV_TYPES_FXP_INTEGER((x), (y)))) + \ + ((scale) >> 1)) / \ + (scale))) + +#define NV_TYPES_SFXP_X_Y_TO_S64(x, y, fxp) \ + ((NvS64) ((DRF_VAL64(_TYPES, _FXP, _INTEGER((x), (y)), \ + ((NvSFXP##x##_##y) (fxp)))) | \ + ((DRF_VAL64(_TYPES, _SFXP, _INTEGER_SIGN((x), (y)), (fxp)) == \ + NV_TYPES_SFXP_INTEGER_SIGN_NEGATIVE) ? \ + DRF_NUM64(_TYPES, _SFXP, _S64_SIGN_EXTENSION((x), (y)), \ + NV_TYPES_SFXP_S64_SIGN_EXTENSION_NEGATIVE((x), (y))) : \ + DRF_NUM64(_TYPES, _SFXP, _S64_SIGN_EXTENSION((x), (y)), \ + NV_TYPES_SFXP_S64_SIGN_EXTENSION_POSITIVE((x), (y)))))) + +/*! + * Note: The rounding action for signed numbers should ideally round away from + * 0 in both the positive and the negative regions. + * For positive numbers, we add 1 if the fractional MSb is 1. + * For negative numbers, we add -1 (equivalent to subtracting 1) if the + * fractional MSb is 1. + */ +#define NV_TYPES_SFXP_X_Y_TO_S64_ROUNDED(x, y, fxp) \ + (NV_TYPES_SFXP_X_Y_TO_S64(x, y, (fxp)) + \ + (FLD_TEST_DRF_NUM64(_TYPES, _FXP, _FRACTIONAL_MSB((x), (y)), \ + NV_TYPES_FXP_FRACTIONAL_MSB_ONE, ((NvSFXP##x##_##y) (fxp))) ? \ + ((DRF_VAL64(_TYPES, _SFXP, _INTEGER_SIGN((x), (y)), (fxp)) == \ + NV_TYPES_SFXP_INTEGER_SIGN_POSITIVE) ? 1 : -1) : 0)) + +/*! + * Macros representing the single-precision IEEE 754 floating point format for + * "binary32", also known as "single" and "float". + * + * Single precision floating point format wiki [1] + * + * _SIGN + * Single bit representing the sign of the number. + * _EXPONENT + * Unsigned 8-bit number representing the exponent value by which to scale + * the mantissa. + * _BIAS - The value by which to offset the exponent to account for sign. + * _MANTISSA + * Explicit 23-bit significand of the value. When exponent != 0, this is an + * implicitly 24-bit number with a leading 1 prepended. This 24-bit number + * can be conceptualized as FXP 9.23. + * + * With these definitions, the value of a floating point number can be + * calculated as: + * (-1)^(_SIGN) * + * 2^(_EXPONENT - _EXPONENT_BIAS) * + * (1 + _MANTISSA / (1 << 23)) + */ +// [1] : https://en.wikipedia.org/wiki/Single_precision_floating-point_format +#define NV_TYPES_SINGLE_SIGN 31:31 +#define NV_TYPES_SINGLE_SIGN_POSITIVE 0x00000000 +#define NV_TYPES_SINGLE_SIGN_NEGATIVE 0x00000001 +#define NV_TYPES_SINGLE_EXPONENT 30:23 +#define NV_TYPES_SINGLE_EXPONENT_ZERO 0x00000000 +#define NV_TYPES_SINGLE_EXPONENT_MAX 0x000000FE +#define NV_TYPES_SINGLE_EXPONENT_BIAS 0x0000007F +#define NV_TYPES_SINGLE_MANTISSA 22:0 + + +/*! + * Helper macro to return a IEEE 754 single-precision value's mantissa as an + * unsigned FXP 9.23 value. + * + * @param[in] single IEEE 754 single-precision value to manipulate. + * + * @return IEEE 754 single-precision values mantissa represented as an unsigned + * FXP 9.23 value. + */ +#define NV_TYPES_SINGLE_MANTISSA_TO_UFXP9_23(single) \ + ((NvUFXP9_23)(FLD_TEST_DRF(_TYPES, _SINGLE, _EXPONENT, _ZERO, single) ? \ + NV_TYPES_U32_TO_UFXP_X_Y(9, 23, 0) : \ + (NV_TYPES_U32_TO_UFXP_X_Y(9, 23, 1) + \ + DRF_VAL(_TYPES, _SINGLE, _MANTISSA, single)))) + +/*! + * Helper macro to return an IEEE 754 single-precision value's exponent, + * including the bias. + * + * @param[in] single IEEE 754 single-precision value to manipulate. + * + * @return Signed exponent value for IEEE 754 single-precision. + */ +#define NV_TYPES_SINGLE_EXPONENT_BIASED(single) \ + ((NvS32)(DRF_VAL(_TYPES, _SINGLE, _EXPONENT, single) - \ + NV_TYPES_SINGLE_EXPONENT_BIAS)) + +/*! + * Helper macro to convert an NvS8 unbiased exponent value to an IEEE 754 + * single-precision value's exponent, by adding the bias. + * Input exponent can range from -127 to 127 which is stored in the range + * [0, 254] + * + * @param[in] single IEEE 754 single-precision value to manipulate. + * + * @return Biased exponent value for IEEE 754 single-precision. + */ +#define NV_TYPES_NvS32_TO_SINGLE_EXPONENT_BIASED(exponent) \ + ((NvU32)((exponent) + NV_TYPES_SINGLE_EXPONENT_BIAS)) + +/*! + * NvTemp - temperature data type introduced to avoid bugs in conversion between + * various existing notations. + */ +typedef NvSFXP24_8 NvTemp; + +/*! + * Macros for NvType <-> Celsius temperature conversion. + */ +#define NV_TYPES_CELSIUS_TO_NV_TEMP(cel) \ + NV_TYPES_S32_TO_SFXP_X_Y(24,8,(cel)) +#define NV_TYPES_NV_TEMP_TO_CELSIUS_TRUNCED(nvt) \ + NV_TYPES_SFXP_X_Y_TO_S32(24,8,(nvt)) +#define NV_TYPES_NV_TEMP_TO_CELSIUS_ROUNDED(nvt) \ + NV_TYPES_SFXP_X_Y_TO_S32_ROUNDED(24,8,(nvt)) +#define NV_TYPES_NV_TEMP_TO_CELSIUS_FLOAT(nvt) \ + NV_TYPES_SFXP_X_Y_TO_FLOAT32(24,8,(nvt)) + +/*! + * Macro for NvType -> number of bits conversion + */ +#define NV_NBITS_IN_TYPE(type) (8 * sizeof(type)) + +/*! + * Macro to convert SFXP 11.5 to NvTemp. + */ +#define NV_TYPES_NVSFXP11_5_TO_NV_TEMP(x) ((NvTemp)(x) << 3) + +/*! + * Macro to convert NvTemp to SFXP 11.5. + */ +#define NV_TYPES_NV_TEMP_TO_NVSFXP11_5(x) ((NvSFXP11_5)(x) >> 3) + +/*! + * Macro to convert UFXP 5.3 to NvTemp. + */ +#define NV_TYPES_NVUFXP5_3_TO_NV_TEMP(x) ((NvTemp)(x) << 5) + +/*! + * Macro to convert UFXP11.5 Watts to NvU32 milli-Watts. + */ +#define NV_TYPES_NVUFXP11_5_WATTS_TO_NVU32_MILLI_WATTS(x) ((((NvU32)(x)) * ((NvU32)1000)) >> 5) + +#endif /* NVFIXEDTYPES_INCLUDED */ diff --git a/src/common/sdk/nvidia/inc/nvgputypes.h b/src/common/sdk/nvidia/inc/nvgputypes.h new file mode 100644 index 0000000..d018414 --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvgputypes.h @@ -0,0 +1,177 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2006 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + + /***************************************************************************\ +|* *| +|* NV GPU Types *| +|* *| +|* This header contains definitions describing NVIDIA's GPU hardware state. *| +|* *| + \***************************************************************************/ + + +#ifndef NVGPUTYPES_INCLUDED +#define NVGPUTYPES_INCLUDED +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + + /***************************************************************************\ +|* NvNotification *| + \***************************************************************************/ + +/***** NvNotification Structure *****/ +/* + * NV objects return information about method completion to clients via an + * array of notification structures in main memory. + * + * The client sets the status field to NV???_NOTIFICATION_STATUS_IN_PROGRESS. + * NV fills in the NvNotification[] data structure in the following order: + * timeStamp, otherInfo32, otherInfo16, and then status. + */ + +/* memory data structures */ +typedef volatile struct NvNotificationRec { + struct { /* 0000- */ + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvV32 info32; /* info returned depends on method 0008-000b*/ + NvV16 info16; /* info returned depends on method 000c-000d*/ + NvV16 status; /* user sets bit 15, NV sets status 000e-000f*/ +} NvNotification; + + /***************************************************************************\ +|* NvGpuSemaphore *| + \***************************************************************************/ + +/***** NvGpuSemaphore Structure *****/ +/* + * NvGpuSemaphore objects are used by the GPU to synchronize multiple + * command-streams. + * + * Please refer to class documentation for details regarding the content of + * the data[] field. + */ + +/* memory data structures */ +typedef volatile struct NvGpuSemaphoreRec { + NvV32 data[2]; /* Payload/Report data 0000-0007*/ + struct { /* 0008- */ + NvV32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 8- f*/ + } timeStamp; /* -000f*/ +} NvGpuSemaphore; + + /***************************************************************************\ +|* NvGetReport *| + \***************************************************************************/ + +/* + * NV objects, starting with Kelvin, return information such as pixel counts to + * the user via the NV*_GET_REPORT method. + * + * The client fills in the "zero" field to any nonzero value and waits until it + * becomes zero. NV fills in the timeStamp, value, and zero fields. + */ +typedef volatile struct NVGetReportRec { + struct { /* 0000- */ + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvU32 value; /* info returned depends on method 0008-000b*/ + NvU32 zero; /* always written to zero 000c-000f*/ +} NvGetReport; + + /***************************************************************************\ +|* NvRcNotification *| + \***************************************************************************/ + +/* + * NV robust channel notification information is reported to clients via + * standard NV01_EVENT objects bound to instance of the NV*_CHANNEL_DMA and + * NV*_CHANNEL_GPFIFO objects. + */ +typedef struct NvRcNotificationRec { + struct { + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvU32 exceptLevel; /* exception level 000c-000f*/ + NvU32 exceptType; /* exception type 0010-0013*/ +} NvRcNotification; + + /***************************************************************************\ +|* NvSyncPointFence *| + \***************************************************************************/ + +/***** NvSyncPointFence Structure *****/ +/* + * NvSyncPointFence objects represent a syncpoint event. The syncPointID + * identifies the syncpoint register and the value is the value that the + * register will contain right after the event occurs. + * + * If syncPointID contains NV_INVALID_SYNCPOINT_ID then this is an invalid + * event. This is often used to indicate an event in the past (i.e. no need to + * wait). + * + * For more info on syncpoints refer to Mobile channel and syncpoint + * documentation. + */ +typedef struct NvSyncPointFenceRec { + NvU32 syncPointID; + NvU32 value; +} NvSyncPointFence; + +#define NV_INVALID_SYNCPOINT_ID ((NvU32)-1) + + /***************************************************************************\ +|* *| +|* 64 bit type definitions for use in interface structures. *| +|* *| + \***************************************************************************/ + +typedef NvU64 NvOffset; /* GPU address */ + +#define NvOffset_HI32(n) ((NvU32)(((NvU64)(n)) >> 32)) +#define NvOffset_LO32(n) ((NvU32)((NvU64)(n))) + +/* +* There are two types of GPU-UUIDs available: +* +* (1) a SHA-256 based 32 byte ID, formatted as a 64 character +* hexadecimal string as "GPU-%16x-%08x-%08x-%08x-%024x"; this is +* deprecated. +* +* (2) a SHA-1 based 16 byte ID, formatted as a 32 character +* hexadecimal string as "GPU-%08x-%04x-%04x-%04x-%012x" (the +* canonical format of a UUID); this is the default. +*/ +#define NV_GPU_UUID_SHA1_LEN (16) +#define NV_GPU_UUID_SHA256_LEN (32) +#define NV_GPU_UUID_LEN NV_GPU_UUID_SHA1_LEN + +#ifdef __cplusplus +}; +#endif + +#endif /* NVGPUTYPES_INCLUDED */ diff --git a/src/common/sdk/nvidia/inc/nvi2c.h b/src/common/sdk/nvidia/inc/nvi2c.h new file mode 100644 index 0000000..28c1ba5 --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvi2c.h @@ -0,0 +1,37 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_I2C_H_ +#define _NV_I2C_H_ + +#define NV_I2C_MSG_WR 0x0000 +#define NV_I2C_MSG_RD 0x0001 + +typedef struct nv_i2c_msg_s +{ + NvU16 addr; + NvU16 flags; + NvU16 len; + NvU8* buf; +} nv_i2c_msg_t; + +#endif diff --git a/src/common/sdk/nvidia/inc/nvimpshared.h b/src/common/sdk/nvidia/inc/nvimpshared.h new file mode 100644 index 0000000..7b821ff --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvimpshared.h @@ -0,0 +1,93 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************************************************************\ +* * +* Description: * +* Accommodates sharing of IMP-related structures between kernel interface * +* files and core RM. * +* * +\******************************************************************************/ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: nvimpshared.finn +// + + + + +// +// There are only a small number of discrete dramclk frequencies available on +// the system. This structure contains IMP-relevant information associated +// with a specific dramclk frequency. +// +typedef struct DRAM_CLK_INSTANCE { + NvU32 dram_clk_freq_khz; + + NvU32 mchub_clk_khz; + + NvU32 mc_clk_khz; + + NvU32 max_iso_bw_kbps; + + // + // switch_latency_ns is the maximum time required to switch the dramclk + // frequency to the frequency specified in dram_clk_freq_khz. + // + NvU32 switch_latency_ns; +} DRAM_CLK_INSTANCE; + +// +// This table is used to collect information from other modules that is needed +// for RM IMP calculations. (Used on Tegra only.) +// +typedef struct TEGRA_IMP_IMPORT_DATA { + // + // max_iso_bw_kbps stores the maximum possible ISO bandwidth available to + // display, assuming display is the only active ISO client. (Note that ISO + // bandwidth will typically be allocated to multiple clients, so display + // will generally not have access to the maximum possible bandwidth.) + // + NvU32 max_iso_bw_kbps; + + // On Orin, each dram channel is 16 bits wide. + NvU32 num_dram_channels; + + // + // dram_clk_instance stores entries for all possible dramclk frequencies, + // sorted by dramclk frequency in increasing order. + // + // "24" is expected to be larger than the actual number of required entries + // (which is provided by a BPMP API), but it can be increased if necessary. + // + // num_dram_clk_entries is filled in with the actual number of distinct + // dramclk entries. + // + NvU32 num_dram_clk_entries; + DRAM_CLK_INSTANCE dram_clk_instance[24]; +} TEGRA_IMP_IMPORT_DATA; diff --git a/src/common/sdk/nvidia/inc/nvlimits.h b/src/common/sdk/nvidia/inc/nvlimits.h new file mode 100644 index 0000000..e7fad3f --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvlimits.h @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: nvlimits.finn +// + + + + +/* + * This is the maximum number of GPUs supported in a single system. + */ +#define NV_MAX_DEVICES 32 + +/* + * This is the maximum number of subdevices within a single device. + */ +#define NV_MAX_SUBDEVICES 8 + +/* + * This is the maximum length of the process name string. + */ +#define NV_PROC_NAME_MAX_LENGTH 100U + +/* + * This is the maximum number of heads per GPU. + */ +#define NV_MAX_HEADS 4 + +/* + * Maximum length of a MIG device UUID. It is a 36-byte UUID string plus a + * 4-byte prefix and NUL terminator: 'M' 'I' 'G' '-' UUID '\0x0' + */ +#define NV_MIG_DEVICE_UUID_STR_LENGTH 41U diff --git a/src/common/sdk/nvidia/inc/nvmisc.h b/src/common/sdk/nvidia/inc/nvmisc.h new file mode 100644 index 0000000..4407cdb --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvmisc.h @@ -0,0 +1,1000 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * nvmisc.h + */ +#ifndef __NV_MISC_H +#define __NV_MISC_H + +#ifdef __cplusplus +extern "C" { +#endif //__cplusplus + +#include "nvtypes.h" + +// Miscellaneous macros useful for bit field manipulations. +#ifndef NVBIT +#define NVBIT(b) (1U<<(b)) +#endif +#ifndef NVBIT_TYPE +#define NVBIT_TYPE(b, t) (((t)1U)<<(b)) +#endif +#ifndef NVBIT32 +#define NVBIT32(b) NVBIT_TYPE(b, NvU32) +#endif +#ifndef NVBIT64 +#define NVBIT64(b) NVBIT_TYPE(b, NvU64) +#endif + +//Concatenate 2 32bit values to a 64bit value +#define NV_CONCAT_32_TO_64(hi, lo) ((((NvU64)hi) << 32) | ((NvU64)lo)) + +// Helper macro's for 32 bit bitmasks +#define NV_BITMASK32_ELEMENT_SIZE (sizeof(NvU32) << 3) +#define NV_BITMASK32_IDX(chId) (((chId) & ~(0x1F)) >> 5) +#define NV_BITMASK32_OFFSET(chId) ((chId) & (0x1F)) +#define NV_BITMASK32_SET(pChannelMask, chId) \ + (pChannelMask)[NV_BITMASK32_IDX(chId)] |= NVBIT(NV_BITMASK32_OFFSET(chId)) +#define NV_BITMASK32_GET(pChannelMask, chId) \ + ((pChannelMask)[NV_BITMASK32_IDX(chId)] & NVBIT(NV_BITMASK32_OFFSET(chId))) + + +// Index of the 'on' bit (assuming that there is only one). +// Even if multiple bits are 'on', result is in range of 0-31. +#define BIT_IDX_32(n) \ + (((((n) & 0xFFFF0000U) != 0U) ? 0x10U: 0U) | \ + ((((n) & 0xFF00FF00U) != 0U) ? 0x08U: 0U) | \ + ((((n) & 0xF0F0F0F0U) != 0U) ? 0x04U: 0U) | \ + ((((n) & 0xCCCCCCCCU) != 0U) ? 0x02U: 0U) | \ + ((((n) & 0xAAAAAAAAU) != 0U) ? 0x01U: 0U) ) + +// Index of the 'on' bit (assuming that there is only one). +// Even if multiple bits are 'on', result is in range of 0-63. +#define BIT_IDX_64(n) \ + (((((n) & 0xFFFFFFFF00000000ULL) != 0U) ? 0x20U: 0U) | \ + ((((n) & 0xFFFF0000FFFF0000ULL) != 0U) ? 0x10U: 0U) | \ + ((((n) & 0xFF00FF00FF00FF00ULL) != 0U) ? 0x08U: 0U) | \ + ((((n) & 0xF0F0F0F0F0F0F0F0ULL) != 0U) ? 0x04U: 0U) | \ + ((((n) & 0xCCCCCCCCCCCCCCCCULL) != 0U) ? 0x02U: 0U) | \ + ((((n) & 0xAAAAAAAAAAAAAAAAULL) != 0U) ? 0x01U: 0U) ) + +/*! + * DRF MACRO README: + * + * Glossary: + * DRF: Device, Register, Field + * FLD: Field + * REF: Reference + * + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA 0xDEADBEEF + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_GAMMA 27:0 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA 31:28 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_ZERO 0x00000000 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_ONE 0x00000001 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_TWO 0x00000002 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_THREE 0x00000003 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_FOUR 0x00000004 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_FIVE 0x00000005 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_SIX 0x00000006 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_SEVEN 0x00000007 + * + * + * Device = _DEVICE_OMEGA + * This is the common "base" that a group of registers in a manual share + * + * Register = _REGISTER_ALPHA + * Register for a given block of defines is the common root for one or more fields and constants + * + * Field(s) = _FIELD_GAMMA, _FIELD_ZETA + * These are the bit ranges for a given field within the register + * Fields are not required to have defined constant values (enumerations) + * + * Constant(s) = _ZERO, _ONE, _TWO, ... + * These are named values (enums) a field can contain; the width of the constants should not be larger than the field width + * + * MACROS: + * + * DRF_SHIFT: + * Bit index of the lower bound of a field + * DRF_SHIFT(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 28 + * + * DRF_SHIFT_RT: + * Bit index of the higher bound of a field + * DRF_SHIFT_RT(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 31 + * + * DRF_MASK: + * Produces a mask of 1-s equal to the width of a field + * DRF_MASK(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 0xF (four 1s starting at bit 0) + * + * DRF_SHIFTMASK: + * Produces a mask of 1s equal to the width of a field at the location of the field + * DRF_SHIFTMASK(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 0xF0000000 + * + * DRF_DEF: + * Shifts a field constant's value to the correct field offset + * DRF_DEF(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, _THREE) == 0x30000000 + * + * DRF_NUM: + * Shifts a number to the location of a particular field + * DRF_NUM(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 3) == 0x30000000 + * NOTE: If the value passed in is wider than the field, the value's high bits will be truncated + * + * DRF_SIZE: + * Provides the width of the field in bits + * DRF_SIZE(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 4 + * + * DRF_VAL: + * Provides the value of an input within the field specified + * DRF_VAL(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 0xABCD1234) == 0xA + * This is sort of like the inverse of DRF_NUM + * + * DRF_IDX...: + * These macros are similar to the above but for fields that accept an index argumment + * + * FLD_SET_DRF: + * Set the field bits in a given value with the given field constant + * NvU32 x = 0x00001234; + * x = FLD_SET_DRF(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, _THREE, x); + * x == 0x30001234; + * + * FLD_SET_DRF_NUM: + * Same as FLD_SET_DRF but instead of using a field constant a literal/variable is passed in + * NvU32 x = 0x00001234; + * x = FLD_SET_DRF_NUM(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 0xF, x); + * x == 0xF0001234; + * + * FLD_IDX...: + * These macros are similar to the above but for fields that accept an index argumment + * + * FLD_TEST_DRF: + * Test if location specified by drf in 'v' has the same value as NV_drfc + * FLD_TEST_DRF(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, _THREE, 0x3000ABCD) == NV_TRUE + * + * FLD_TEST_DRF_NUM: + * Test if locations specified by drf in 'v' have the same value as n + * FLD_TEST_DRF_NUM(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 0x3, 0x3000ABCD) == NV_TRUE + * + * REF_DEF: + * Like DRF_DEF but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * REF_DEF(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, _THREE) == 0x30000000 + * + * REF_VAL: + * Like DRF_VAL but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * REF_VAL(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, 0xABCD1234) == 0xA + * + * REF_NUM: + * Like DRF_NUM but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * REF_NUM(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, 0xA) == 0xA00000000 + * + * FLD_SET_REF_NUM: + * Like FLD_SET_DRF_NUM but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * NvU32 x = 0x00001234; + * x = FLD_SET_REF_NUM(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, 0xF, x); + * x == 0xF0001234; + * + * FLD_TEST_REF: + * Like FLD_TEST_DRF but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * FLD_TEST_REF(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, _THREE, 0x3000ABCD) == NV_TRUE + * + * Other macros: + * There a plethora of other macros below that extend the above (notably Multi-Word (MW), 64-bit, and some + * reg read/write variations). I hope these are self explanatory. If you have a need to use them, you + * probably have some knowledge of how they work. + */ + +// tegra mobile uses nvmisc_macros.h and can't access nvmisc.h... and sometimes both get included. +#ifndef _NVMISC_MACROS_H +// Use Coverity Annotation to mark issues as false positives/ignore when using single bit defines. +#define DRF_ISBIT(bitval,drf) \ + ( /* coverity[identical_branches] */ \ + (bitval != 0) ? drf ) +#define DEVICE_BASE(d) (0?d) // what's up with this name? totally non-parallel to the macros below +#define DEVICE_EXTENT(d) (1?d) // what's up with this name? totally non-parallel to the macros below +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +#ifdef MISRA_14_3 +#define DRF_BASE(drf) (drf##_LOW_FIELD) +#define DRF_EXTENT(drf) (drf##_HIGH_FIELD) +#define DRF_SHIFT(drf) ((drf##_LOW_FIELD) % 32U) +#define DRF_SHIFT_RT(drf) ((drf##_HIGH_FIELD) % 32U) +#define DRF_SIZE(drf) ((drf##_HIGH_FIELD)-(drf##_LOW_FIELD)+1U) +#define DRF_MASK(drf) (0xFFFFFFFFU >> (31U - ((drf##_HIGH_FIELD) % 32U) + ((drf##_LOW_FIELD) % 32U))) +#else +#define DRF_BASE(drf) (NV_FALSE?drf) // much better +#define DRF_EXTENT(drf) (NV_TRUE?drf) // much better +#define DRF_SHIFT(drf) (((NvU32)DRF_BASE(drf)) % 32U) +#define DRF_SHIFT_RT(drf) (((NvU32)DRF_EXTENT(drf)) % 32U) +#define DRF_SIZE(drf) (DRF_EXTENT(drf)-DRF_BASE(drf)+1U) +#define DRF_MASK(drf) (0xFFFFFFFFU>>(31U - DRF_SHIFT_RT(drf) + DRF_SHIFT(drf))) +#endif +#define DRF_DEF(d,r,f,c) (((NvU32)(NV ## d ## r ## f ## c))<>(31-((DRF_ISBIT(1,drf)) % 32)+((DRF_ISBIT(0,drf)) % 32))) +#define DRF_DEF(d,r,f,c) ((NV ## d ## r ## f ## c)<>DRF_SHIFT(NV ## d ## r ## f))&DRF_MASK(NV ## d ## r ## f)) +#endif + +// Signed version of DRF_VAL, which takes care of extending sign bit. +#define DRF_VAL_SIGNED(d,r,f,v) (((DRF_VAL(d,r,f,(v)) ^ (NVBIT(DRF_SIZE(NV ## d ## r ## f)-1U)))) - (NVBIT(DRF_SIZE(NV ## d ## r ## f)-1U))) +#define DRF_IDX_DEF(d,r,f,i,c) ((NV ## d ## r ## f ## c)<>DRF_SHIFT(NV##d##r##f(i)))&DRF_MASK(NV##d##r##f(i))) +#define DRF_IDX_OFFSET_VAL(d,r,f,i,o,v) (((v)>>DRF_SHIFT(NV##d##r##f(i,o)))&DRF_MASK(NV##d##r##f(i,o))) +// Fractional version of DRF_VAL which reads Fx.y fixed point number (x.y)*z +#define DRF_VAL_FRAC(d,r,x,y,v,z) ((DRF_VAL(d,r,x,(v))*z) + ((DRF_VAL(d,r,y,v)*z) / (1<>(63-((DRF_ISBIT(1,drf)) % 64)+((DRF_ISBIT(0,drf)) % 64))) +#define DRF_SHIFTMASK64(drf) (DRF_MASK64(drf)<<(DRF_SHIFT64(drf))) + +#define DRF_DEF64(d,r,f,c) (((NvU64)(NV ## d ## r ## f ## c))<>DRF_SHIFT64(NV ## d ## r ## f))&DRF_MASK64(NV ## d ## r ## f)) + +#define DRF_VAL_SIGNED64(d,r,f,v) (((DRF_VAL64(d,r,f,(v)) ^ (NVBIT64(DRF_SIZE(NV ## d ## r ## f)-1)))) - (NVBIT64(DRF_SIZE(NV ## d ## r ## f)-1))) +#define DRF_IDX_DEF64(d,r,f,i,c) (((NvU64)(NV ## d ## r ## f ## c))<>DRF_SHIFT64(NV##d##r##f(i)))&DRF_MASK64(NV##d##r##f(i))) +#define DRF_IDX_OFFSET_VAL64(d,r,f,i,o,v) (((NvU64)(v)>>DRF_SHIFT64(NV##d##r##f(i,o)))&DRF_MASK64(NV##d##r##f(i,o))) + +#define FLD_SET_DRF64(d,r,f,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_DEF64(d,r,f,c)) +#define FLD_SET_DRF_NUM64(d,r,f,n,v) ((((NvU64)(v)) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_NUM64(d,r,f,n)) +#define FLD_IDX_SET_DRF64(d,r,f,i,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_DEF64(d,r,f,i,c)) +#define FLD_IDX_OFFSET_SET_DRF64(d,r,f,i,o,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i,o))) | DRF_IDX_OFFSET_DEF64(d,r,f,i,o,c)) +#define FLD_IDX_SET_DRF_DEF64(d,r,f,i,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_DEF64(d,r,f,i,c)) +#define FLD_IDX_SET_DRF_NUM64(d,r,f,i,n,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_NUM64(d,r,f,i,n)) +#define FLD_SET_DRF_IDX64(d,r,f,c,i,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_DEF64(d,r,f,c(i))) + +#define FLD_TEST_DRF64(d,r,f,c,v) (DRF_VAL64(d, r, f, (v)) == NV##d##r##f##c) +#define FLD_TEST_DRF_AND64(d,r,f,c,v) (DRF_VAL64(d, r, f, (v)) & NV##d##r##f##c) +#define FLD_TEST_DRF_NUM64(d,r,f,n,v) (DRF_VAL64(d, r, f, (v)) == (n)) +#define FLD_IDX_TEST_DRF64(d,r,f,i,c,v) (DRF_IDX_VAL64(d, r, f, i, (v)) == NV##d##r##f##c) +#define FLD_IDX_OFFSET_TEST_DRF64(d,r,f,i,o,c,v) (DRF_IDX_OFFSET_VAL64(d, r, f, i, o, (v)) == NV##d##r##f##c) + +#define REF_DEF64(drf,d) (((drf ## d)&DRF_MASK64(drf))<>DRF_SHIFT64(drf))&DRF_MASK64(drf)) +#if defined(NV_MISRA_COMPLIANCE_REQUIRED) && defined(MISRA_14_3) +#define REF_NUM64(drf,n) (((NvU64)(n)&(0xFFFFFFFFFFFFFFFFU>>(63U-((drf##_HIGH_FIELD) % 63U)+((drf##_LOW_FIELD) % 63U)))) << ((drf##_LOW_FIELD) % 63U)) +#else +#define REF_NUM64(drf,n) (((NvU64)(n)&DRF_MASK64(drf))<>DRF_SHIFT(drf))&DRF_MASK(drf)) +#if defined(NV_MISRA_COMPLIANCE_REQUIRED) && defined(MISRA_14_3) +#define REF_NUM(drf,n) (((n)&(0xFFFFFFFFU>>(31U-((drf##_HIGH_FIELD) % 32U)+((drf##_LOW_FIELD) % 32U)))) << ((drf##_LOW_FIELD) % 32U)) +#else +#define REF_NUM(drf,n) (((n)&DRF_MASK(drf))<>DRF_SHIFT(CR ## d ## r ## f))&DRF_MASK(CR ## d ## r ## f)) + +// Multi-word (MW) field manipulations. For multi-word structures (e.g., Fermi SPH), +// fields may have bit numbers beyond 32. To avoid errors using "classic" multi-word macros, +// all the field extents are defined as "MW(X)". For example, MW(127:96) means +// the field is in bits 0-31 of word number 3 of the structure. +// +// DRF_VAL_MW() macro is meant to be used for native endian 32-bit aligned 32-bit word data, +// not for byte stream data. +// +// DRF_VAL_BS() macro is for byte stream data used in fbQueryBIOS_XXX(). +// +#define DRF_EXPAND_MW(drf) drf // used to turn "MW(a:b)" into "a:b" +#define DRF_PICK_MW(drf,v) ((v)? DRF_EXPAND_##drf) // picks low or high bits +#define DRF_WORD_MW(drf) (DRF_PICK_MW(drf,0)/32) // which word in a multi-word array +#define DRF_BASE_MW(drf) (DRF_PICK_MW(drf,0)%32) // which start bit in the selected word? +#define DRF_EXTENT_MW(drf) (DRF_PICK_MW(drf,1)%32) // which end bit in the selected word +#define DRF_SHIFT_MW(drf) (DRF_PICK_MW(drf,0)%32) +#define DRF_MASK_MW(drf) (0xFFFFFFFFU>>((31-(DRF_EXTENT_MW(drf))+(DRF_BASE_MW(drf)))%32)) +#define DRF_SHIFTMASK_MW(drf) ((DRF_MASK_MW(drf))<<(DRF_SHIFT_MW(drf))) +#define DRF_SIZE_MW(drf) (DRF_EXTENT_MW(drf)-DRF_BASE_MW(drf)+1) + +#define DRF_DEF_MW(d,r,f,c) ((NV##d##r##f##c) << DRF_SHIFT_MW(NV##d##r##f)) +#define DRF_NUM_MW(d,r,f,n) (((n)&DRF_MASK_MW(NV##d##r##f))<>DRF_SHIFT_MW(NV##d##r##f))&DRF_MASK_MW(NV##d##r##f)) +#define DRF_SPANS(drf) ((DRF_PICK_MW(drf,0)/32) != (DRF_PICK_MW(drf,1)/32)) +#define DRF_WORD_MW_LOW(drf) (DRF_PICK_MW(drf,0)/32) +#define DRF_WORD_MW_HIGH(drf) (DRF_PICK_MW(drf,1)/32) +#define DRF_MASK_MW_LOW(drf) (0xFFFFFFFFU) +#define DRF_MASK_MW_HIGH(drf) (0xFFFFFFFFU>>(31-(DRF_EXTENT_MW(drf)))) +#define DRF_SHIFT_MW_LOW(drf) (DRF_PICK_MW(drf,0)%32) +#define DRF_SHIFT_MW_HIGH(drf) (0) +#define DRF_MERGE_SHIFT(drf) ((32-((DRF_PICK_MW(drf,0)%32)))%32) +#define DRF_VAL_MW_2WORD(d,r,f,v) (((((v)[DRF_WORD_MW_LOW(NV##d##r##f)])>>DRF_SHIFT_MW_LOW(NV##d##r##f))&DRF_MASK_MW_LOW(NV##d##r##f)) | \ + (((((v)[DRF_WORD_MW_HIGH(NV##d##r##f)])>>DRF_SHIFT_MW_HIGH(NV##d##r##f))&DRF_MASK_MW_HIGH(NV##d##r##f)) << DRF_MERGE_SHIFT(NV##d##r##f))) +#define DRF_VAL_MW(d,r,f,v) ( DRF_SPANS(NV##d##r##f) ? DRF_VAL_MW_2WORD(d,r,f,v) : DRF_VAL_MW_1WORD(d,r,f,v) ) + +#define DRF_IDX_DEF_MW(d,r,f,i,c) ((NV##d##r##f##c)<>DRF_SHIFT_MW(NV##d##r##f(i)))&DRF_MASK_MW(NV##d##r##f(i))) + +// +// Logically OR all DRF_DEF constants indexed from zero to s (semiinclusive). +// Caution: Target variable v must be pre-initialized. +// +#define FLD_IDX_OR_DRF_DEF(d,r,f,c,s,v) \ +do \ +{ NvU32 idx; \ + for (idx = 0; idx < (NV ## d ## r ## f ## s); ++idx)\ + { \ + v |= DRF_IDX_DEF(d,r,f,idx,c); \ + } \ +} while(0) + + +#define FLD_MERGE_MW(drf,n,v) (((v)[DRF_WORD_MW(drf)] & ~DRF_SHIFTMASK_MW(drf)) | n) +#define FLD_ASSIGN_MW(drf,n,v) ((v)[DRF_WORD_MW(drf)] = FLD_MERGE_MW(drf, n, v)) +#define FLD_IDX_MERGE_MW(drf,i,n,v) (((v)[DRF_WORD_MW(drf(i))] & ~DRF_SHIFTMASK_MW(drf(i))) | n) +#define FLD_IDX_ASSIGN_MW(drf,i,n,v) ((v)[DRF_WORD_MW(drf(i))] = FLD_MERGE_MW(drf(i), n, v)) + +#define FLD_SET_DRF_MW(d,r,f,c,v) FLD_MERGE_MW(NV##d##r##f, DRF_DEF_MW(d,r,f,c), v) +#define FLD_SET_DRF_NUM_MW(d,r,f,n,v) FLD_ASSIGN_MW(NV##d##r##f, DRF_NUM_MW(d,r,f,n), v) +#define FLD_SET_DRF_DEF_MW(d,r,f,c,v) FLD_ASSIGN_MW(NV##d##r##f, DRF_DEF_MW(d,r,f,c), v) +#define FLD_IDX_SET_DRF_MW(d,r,f,i,c,v) FLD_IDX_MERGE_MW(NV##d##r##f, i, DRF_IDX_DEF_MW(d,r,f,i,c), v) +#define FLD_IDX_SET_DRF_DEF_MW(d,r,f,i,c,v) FLD_IDX_MERGE_MW(NV##d##r##f, i, DRF_IDX_DEF_MW(d,r,f,i,c), v) +#define FLD_IDX_SET_DRF_NUM_MW(d,r,f,i,n,v) FLD_IDX_ASSIGN_MW(NV##d##r##f, i, DRF_IDX_NUM_MW(d,r,f,i,n), v) + +#define FLD_TEST_DRF_MW(d,r,f,c,v) ((DRF_VAL_MW(d, r, f, (v)) == NV##d##r##f##c)) +#define FLD_TEST_DRF_NUM_MW(d,r,f,n,v) ((DRF_VAL_MW(d, r, f, (v)) == n)) +#define FLD_IDX_TEST_DRF_MW(d,r,f,i,c,v) ((DRF_IDX_VAL_MW(d, r, f, i, (v)) == NV##d##r##f##c)) + +#define DRF_VAL_BS(d,r,f,v) ( DRF_SPANS(NV##d##r##f) ? DRF_VAL_BS_2WORD(d,r,f,(v)) : DRF_VAL_BS_1WORD(d,r,f,(v)) ) + +//------------------------------------------------------------------------// +// // +// Common defines for engine register reference wrappers // +// // +// New engine addressing can be created like: // +// \#define ENG_REG_PMC(o,d,r) NV##d##r // +// \#define ENG_IDX_REG_CE(o,d,i,r) CE_MAP(o,r,i) // +// // +// See FB_FBPA* for more examples // +//------------------------------------------------------------------------// + +#define ENG_RD_REG(g,o,d,r) GPU_REG_RD32(g, ENG_REG##d(o,d,r)) +#define ENG_WR_REG(g,o,d,r,v) GPU_REG_WR32(g, ENG_REG##d(o,d,r), (v)) +#define ENG_RD_DRF(g,o,d,r,f) ((GPU_REG_RD32(g, ENG_REG##d(o,d,r))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define ENG_WR_DRF_DEF(g,o,d,r,f,c) GPU_REG_WR32(g, ENG_REG##d(o,d,r),(GPU_REG_RD32(g,ENG_REG##d(o,d,r))&~(GPU_DRF_MASK(NV##d##r##f)<>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define ENG_TEST_IDX_DRF_DEF(g,o,d,r,f,c,i) (ENG_RD_IDX_DRF(g, o, d, r, f, (i)) == NV##d##r##f##c) + +#define ENG_IDX_RD_REG(g,o,d,i,r) GPU_REG_RD32(g, ENG_IDX_REG##d(o,d,i,r)) +#define ENG_IDX_WR_REG(g,o,d,i,r,v) GPU_REG_WR32(g, ENG_IDX_REG##d(o,d,i,r), (v)) + +#define ENG_IDX_RD_DRF(g,o,d,i,r,f) ((GPU_REG_RD32(g, ENG_IDX_REG##d(o,d,i,r))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) + +// +// DRF_READ_1WORD_BS() and DRF_READ_1WORD_BS_HIGH() do not read beyond the bytes that contain +// the requested value. Reading beyond the actual data causes a page fault panic when the +// immediately following page happened to be protected or not mapped. +// +#define DRF_VAL_BS_1WORD(d,r,f,v) ((DRF_READ_1WORD_BS(d,r,f,v)>>DRF_SHIFT_MW(NV##d##r##f))&DRF_MASK_MW(NV##d##r##f)) +#define DRF_VAL_BS_2WORD(d,r,f,v) (((DRF_READ_4BYTE_BS(NV##d##r##f,v)>>DRF_SHIFT_MW_LOW(NV##d##r##f))&DRF_MASK_MW_LOW(NV##d##r##f)) | \ + (((DRF_READ_1WORD_BS_HIGH(d,r,f,v)>>DRF_SHIFT_MW_HIGH(NV##d##r##f))&DRF_MASK_MW_HIGH(NV##d##r##f)) << DRF_MERGE_SHIFT(NV##d##r##f))) + +#define DRF_READ_1BYTE_BS(drf,v) ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4])) +#define DRF_READ_2BYTE_BS(drf,v) (DRF_READ_1BYTE_BS(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+1])<<8)) +#define DRF_READ_3BYTE_BS(drf,v) (DRF_READ_2BYTE_BS(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+2])<<16)) +#define DRF_READ_4BYTE_BS(drf,v) (DRF_READ_3BYTE_BS(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+3])<<24)) + +#define DRF_READ_1BYTE_BS_HIGH(drf,v) ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4])) +#define DRF_READ_2BYTE_BS_HIGH(drf,v) (DRF_READ_1BYTE_BS_HIGH(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+1])<<8)) +#define DRF_READ_3BYTE_BS_HIGH(drf,v) (DRF_READ_2BYTE_BS_HIGH(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+2])<<16)) +#define DRF_READ_4BYTE_BS_HIGH(drf,v) (DRF_READ_3BYTE_BS_HIGH(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+3])<<24)) + +// Calculate 2^n - 1 and avoid shift counter overflow +// +// On Windows amd64, 64 << 64 => 1 +// +#define NV_TWO_N_MINUS_ONE(n) (((1ULL<<(n/2))<<((n+1)/2))-1) + +// +// Create a 64b bitmask with n bits set +// This is the same as ((1ULL<>((n>64) ? 0 : (64-n)))) + +#define DRF_READ_1WORD_BS(d,r,f,v) \ + ((DRF_EXTENT_MW(NV##d##r##f)<8)?DRF_READ_1BYTE_BS(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<16)?DRF_READ_2BYTE_BS(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<24)?DRF_READ_3BYTE_BS(NV##d##r##f,(v)): \ + DRF_READ_4BYTE_BS(NV##d##r##f,(v))))) + +#define DRF_READ_1WORD_BS_HIGH(d,r,f,v) \ + ((DRF_EXTENT_MW(NV##d##r##f)<8)?DRF_READ_1BYTE_BS_HIGH(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<16)?DRF_READ_2BYTE_BS_HIGH(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<24)?DRF_READ_3BYTE_BS_HIGH(NV##d##r##f,(v)): \ + DRF_READ_4BYTE_BS_HIGH(NV##d##r##f,(v))))) + +#define LOWESTBIT(x) ( (x) & (((x) - 1U) ^ (x)) ) +// Destructive operation on n32 +#define HIGHESTBIT(n32) \ +{ \ + HIGHESTBITIDX_32(n32); \ + n32 = NVBIT(n32); \ +} +#define ONEBITSET(x) ( ((x) != 0U) && (((x) & ((x) - 1U)) == 0U) ) + +// Destructive operation on n32 +#define NUMSETBITS_32(n32) \ +{ \ + n32 = n32 - ((n32 >> 1) & 0x55555555); \ + n32 = (n32 & 0x33333333) + ((n32 >> 2) & 0x33333333); \ + n32 = (((n32 + (n32 >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; \ +} + +/*! + * Calculate number of bits set in a 32-bit unsigned integer. + * Pure typesafe alternative to @ref NUMSETBITS_32. + */ +static NV_FORCEINLINE NvU32 +nvPopCount32(const NvU32 x) +{ + NvU32 temp = x; + temp = temp - ((temp >> 1) & 0x55555555U); + temp = (temp & 0x33333333U) + ((temp >> 2) & 0x33333333U); + temp = (((temp + (temp >> 4)) & 0x0F0F0F0FU) * 0x01010101U) >> 24; + return temp; +} + +/*! + * Calculate number of bits set in a 64-bit unsigned integer. + */ +static NV_FORCEINLINE NvU32 +nvPopCount64(const NvU64 x) +{ + NvU64 temp = x; + temp = temp - ((temp >> 1) & 0x5555555555555555ULL); + temp = (temp & 0x3333333333333333ULL) + ((temp >> 2) & 0x3333333333333333ULL); + temp = (temp + (temp >> 4)) & 0x0F0F0F0F0F0F0F0FULL; + temp = (temp * 0x0101010101010101ULL) >> 56; + return (NvU32)temp; +} + +/*! + * Determine how many bits are set below a bit index within a mask. + * This assigns a dense ordering to the set bits in the mask. + * + * For example the mask 0xCD contains 5 set bits: + * nvMaskPos32(0xCD, 0) == 0 + * nvMaskPos32(0xCD, 2) == 1 + * nvMaskPos32(0xCD, 3) == 2 + * nvMaskPos32(0xCD, 6) == 3 + * nvMaskPos32(0xCD, 7) == 4 + */ +static NV_FORCEINLINE NvU32 +nvMaskPos32(const NvU32 mask, const NvU32 bitIdx) +{ + return nvPopCount32(mask & (NVBIT32(bitIdx) - 1U)); +} + +// Destructive operation on n32 +#define LOWESTBITIDX_32(n32) \ +{ \ + n32 = BIT_IDX_32(LOWESTBIT(n32));\ +} + +// Destructive operation on n64 +#define LOWESTBITIDX_64(n64) \ +{ \ + n64 = BIT_IDX_64(LOWESTBIT(n64));\ +} + +// Destructive operation on n32 +#define HIGHESTBITIDX_32(n32) \ +{ \ + NvU32 count = 0; \ + while (n32 >>= 1) \ + { \ + count++; \ + } \ + n32 = count; \ +} + +// Destructive operation on n64 +#define HIGHESTBITIDX_64(n64) \ +{ \ + NvU64 count = 0; \ + while (n64 >>= 1) \ + { \ + count++; \ + } \ + n64 = count; \ +} + +// Destructive operation on n32 +#define ROUNDUP_POW2(n32) \ +{ \ + n32--; \ + n32 |= n32 >> 1; \ + n32 |= n32 >> 2; \ + n32 |= n32 >> 4; \ + n32 |= n32 >> 8; \ + n32 |= n32 >> 16; \ + n32++; \ +} + +/*! + * Round up a 32-bit unsigned integer to the next power of 2. + * Pure typesafe alternative to @ref ROUNDUP_POW2. + * + * param[in] x must be in range [0, 2^31] to avoid overflow. + */ +static NV_FORCEINLINE NvU32 +nvNextPow2_U32(const NvU32 x) +{ + NvU32 y = x; + y--; + y |= y >> 1; + y |= y >> 2; + y |= y >> 4; + y |= y >> 8; + y |= y >> 16; + y++; + return y; +} + + +static NV_FORCEINLINE NvU32 +nvPrevPow2_U32(const NvU32 x ) +{ + NvU32 y = x; + y |= (y >> 1); + y |= (y >> 2); + y |= (y >> 4); + y |= (y >> 8); + y |= (y >> 16); + return y - (y >> 1); +} + +static NV_FORCEINLINE NvU64 +nvPrevPow2_U64(const NvU64 x ) +{ + NvU64 y = x; + y |= (y >> 1); + y |= (y >> 2); + y |= (y >> 4); + y |= (y >> 8); + y |= (y >> 16); + y |= (y >> 32); + return y - (y >> 1); +} + +// Destructive operation on n64 +#define ROUNDUP_POW2_U64(n64) \ +{ \ + n64--; \ + n64 |= n64 >> 1; \ + n64 |= n64 >> 2; \ + n64 |= n64 >> 4; \ + n64 |= n64 >> 8; \ + n64 |= n64 >> 16; \ + n64 |= n64 >> 32; \ + n64++; \ +} + +#define NV_SWAP_U8(a,b) \ +{ \ + NvU8 temp; \ + temp = a; \ + a = b; \ + b = temp; \ +} + +#define NV_SWAP_U32(a,b) \ +{ \ + NvU32 temp; \ + temp = a; \ + a = b; \ + b = temp; \ +} + +/*! + * @brief Macros allowing simple iteration over bits set in a given mask. + * + * @param[in] maskWidth bit-width of the mask (allowed: 8, 16, 32, 64) + * + * @param[in,out] index lvalue that is used as a bit index in the loop + * (can be declared as any NvU* or NvS* variable) + * @param[in] mask expression, loop will iterate over set bits only + */ +#define FOR_EACH_INDEX_IN_MASK(maskWidth,index,mask) \ +{ \ + NvU##maskWidth lclMsk = (NvU##maskWidth)(mask); \ + for ((index) = 0U; lclMsk != 0U; (index)++, lclMsk >>= 1U)\ + { \ + if (((NvU##maskWidth)NVBIT64(0) & lclMsk) == 0U) \ + { \ + continue; \ + } +#define FOR_EACH_INDEX_IN_MASK_END \ + } \ +} + +/*! + * Returns the position of nth set bit in the given mask. + * + * Returns -1 if mask has fewer than n bits set. + * + * n is 0 indexed and has valid values 0..31 inclusive, so "zeroth" set bit is + * the first set LSB. + * + * Example, if mask = 0x000000F0u and n = 1, the return value will be 5. + * Example, if mask = 0x000000F0u and n = 4, the return value will be -1. + */ +static NV_FORCEINLINE NvS32 +nvGetNthSetBitIndex32(NvU32 mask, NvU32 n) +{ + NvU32 seenSetBitsCount = 0; + NvS32 index; + FOR_EACH_INDEX_IN_MASK(32, index, mask) + { + if (seenSetBitsCount == n) + { + return index; + } + ++seenSetBitsCount; + } + FOR_EACH_INDEX_IN_MASK_END; + + return -1; +} + +// +// Size to use when declaring variable-sized arrays +// +#define NV_ANYSIZE_ARRAY 1 + +// +// Returns ceil(a/b) +// +#define NV_CEIL(a,b) (((a)+(b)-1)/(b)) + +// Clearer name for NV_CEIL +#ifndef NV_DIV_AND_CEIL +#define NV_DIV_AND_CEIL(a, b) NV_CEIL(a,b) +#endif + +#ifndef NV_MIN +#define NV_MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif + +#ifndef NV_MAX +#define NV_MAX(a, b) (((a) > (b)) ? (a) : (b)) +#endif + +// +// Returns absolute value of provided integer expression +// +#define NV_ABS(a) ((a)>=0?(a):(-(a))) + +// +// Returns 1 if input number is positive, 0 if 0 and -1 if negative. Avoid +// macro parameter as function call which will have side effects. +// +#define NV_SIGN(s) ((NvS8)(((s) > 0) - ((s) < 0))) + +// +// Returns 1 if input number is >= 0 or -1 otherwise. This assumes 0 has a +// positive sign. +// +#define NV_ZERO_SIGN(s) ((NvS8)((((s) >= 0) * 2) - 1)) + +// Returns the offset (in bytes) of 'member' in struct 'type'. +#ifndef NV_OFFSETOF + #if defined(__GNUC__) && (__GNUC__ > 3) + #define NV_OFFSETOF(type, member) ((NvUPtr) __builtin_offsetof(type, member)) + #else + #define NV_OFFSETOF(type, member) ((NvUPtr) &(((type *)0)->member)) + #endif +#endif + +// Given a pointer and the member it is of the parent struct, return a pointer to the parent struct +#define NV_CONTAINEROF(ptr, type, member) ((type *) (((NvUPtr) ptr) - NV_OFFSETOF(type, member))) + +// +// Performs a rounded division of b into a (unsigned). For SIGNED version of +// NV_ROUNDED_DIV() macro check the comments in bug 769777. +// +#define NV_UNSIGNED_ROUNDED_DIV(a,b) (((a) + ((b) / 2U)) / (b)) + +/*! + * Performs a ceiling division of b into a (unsigned). A "ceiling" division is + * a division is one with rounds up result up if a % b != 0. + * + * @param[in] a Numerator + * @param[in] b Denominator + * + * @return a / b + a % b != 0 ? 1 : 0. + */ +#define NV_UNSIGNED_DIV_CEIL(a, b) (((a) + (b - 1)) / (b)) + +/*! + * Performs subtraction where a negative difference is raised to zero. + * Can be used to avoid underflowing an unsigned subtraction. + * + * @param[in] a Minuend + * @param[in] b Subtrahend + * + * @return a > b ? a - b : 0. + */ +#define NV_SUBTRACT_NO_UNDERFLOW(a, b) ((a)>(b) ? (a)-(b) : 0) + +/*! + * Performs a rounded right-shift of 32-bit unsigned value "a" by "shift" bits. + * Will round result away from zero. + * + * @param[in] a 32-bit unsigned value to shift. + * @param[in] shift Number of bits by which to shift. + * + * @return Resulting shifted value rounded away from zero. + */ +#define NV_RIGHT_SHIFT_ROUNDED(a, shift) \ + (((a) >> (shift)) + !!((NVBIT((shift) - 1) & (a)) == NVBIT((shift) - 1))) + +// +// Power of 2 alignment. +// (Will give unexpected results if 'gran' is not a power of 2.) +// +#ifndef NV_ALIGN_DOWN +// +// Notably using v - v + gran ensures gran gets promoted to the same type as v if gran has a smaller type. +// Otherwise, if aligning a NVU64 with NVU32 granularity, the top 4 bytes get zeroed. +// +#define NV_ALIGN_DOWN(v, gran) ((v) & ~((v) - (v) + (gran) - 1)) +#endif + +#ifndef NV_ALIGN_UP +// +// Notably using v - v + gran ensures gran gets promoted to the same type as v if gran has a smaller type. +// Otherwise, if aligning a NVU64 with NVU32 granularity, the top 4 bytes get zeroed. +// +#define NV_ALIGN_UP(v, gran) (((v) + ((gran) - 1)) & ~((v) - (v) + (gran) - 1)) +#endif + +#ifndef NV_ALIGN_DOWN64 +#define NV_ALIGN_DOWN64(v, gran) ((v) & ~(((NvU64)gran) - 1)) +#endif + +#ifndef NV_ALIGN_UP64 +#define NV_ALIGN_UP64(v, gran) (((v) + ((gran) - 1)) & ~(((NvU64)gran)-1)) +#endif + +#ifndef NV_IS_ALIGNED +#define NV_IS_ALIGNED(v, gran) (0U == ((v) & ((gran) - 1U))) +#endif + +#ifndef NV_IS_ALIGNED64 +#define NV_IS_ALIGNED64(v, gran) (0U == ((v) & (((NvU64)gran) - 1U))) +#endif + +#ifndef NVMISC_MEMSET +static NV_FORCEINLINE void *NVMISC_MEMSET(void *s, NvU8 c, NvLength n) +{ + NvU8 *b = (NvU8 *) s; + NvLength i; + + for (i = 0; i < n; i++) + { + b[i] = c; + } + + return s; +} +#endif + +#ifndef NVMISC_MEMCPY +static NV_FORCEINLINE void *NVMISC_MEMCPY(void *dest, const void *src, NvLength n) +{ + NvU8 *destByte = (NvU8 *) dest; + const NvU8 *srcByte = (const NvU8 *) src; + NvLength i; + + for (i = 0; i < n; i++) + { + destByte[i] = srcByte[i]; + } + + return dest; +} +#endif + +static NV_FORCEINLINE char *NVMISC_STRNCPY(char *dest, const char *src, NvLength n) +{ + NvLength i; + + for (i = 0; i < n; i++) + { + dest[i] = src[i]; + if (src[i] == '\0') + { + break; + } + } + + for (; i < n; i++) + { + dest[i] = '\0'; + } + + return dest; +} + +/*! + * Convert a void* to an NvUPtr. This is used when MISRA forbids us from doing a direct cast. + * + * @param[in] ptr Pointer to be converted + * + * @return Resulting NvUPtr + */ +static NV_FORCEINLINE NvUPtr NV_PTR_TO_NVUPTR(void *ptr) +{ + union + { + NvUPtr v; + void *p; + } uAddr; + + uAddr.p = ptr; + return uAddr.v; +} + +/*! + * Convert an NvUPtr to a void*. This is used when MISRA forbids us from doing a direct cast. + * + * @param[in] ptr Pointer to be converted + * + * @return Resulting void * + */ +static NV_FORCEINLINE void *NV_NVUPTR_TO_PTR(NvUPtr address) +{ + union + { + NvUPtr v; + void *p; + } uAddr; + + uAddr.v = address; + return uAddr.p; +} + +// Get bit at pos (k) from x +#define NV_BIT_GET(k, x) (((x) >> (k)) & 1) +// Get bit at pos (n) from (hi) if >= 64, otherwise from (lo). This is paired with NV_BIT_SET_128 which sets the bit. +#define NV_BIT_GET_128(n, lo, hi) (((n) < 64) ? NV_BIT_GET((n), (lo)) : NV_BIT_GET((n) - 64, (hi))) +// +// Set the bit at pos (b) for U64 which is < 128. Since the (b) can be >= 64, we need 2 U64 to store this. +// Use (lo) if (b) is less than 64, and (hi) if >= 64. +// +#define NV_BIT_SET_128(b, lo, hi) { nvAssert( (b) < 128 ); if ( (b) < 64 ) (lo) |= NVBIT64(b); else (hi) |= NVBIT64( b & 0x3F ); } +// +// Clear the bit at pos (b) for U64 which is < 128. +// Use (lo) if (b) is less than 64, and (hi) if >= 64. +// +#define NV_BIT_CLEAR_128(b, lo, hi) { nvAssert( (b) < 128 ); if ( (b) < 64 ) (lo) &= ~NVBIT64(b); else (hi) &= ~NVBIT64( b & 0x3F ); } + +// Get the number of elements the specified fixed-size array +#define NV_ARRAY_ELEMENTS(x) ((sizeof(x)/sizeof((x)[0]))) + +#if !defined(NVIDIA_UNDEF_LEGACY_BIT_MACROS) +// +// Deprecated macros whose definition can be removed once the code base no longer references them. +// Use the NVBIT* macros instead of these macros. +// +#ifndef BIT +#define BIT(b) (1U<<(b)) +#endif +#ifndef BIT32 +#define BIT32(b) ((NvU32)1U<<(b)) +#endif +#ifndef BIT64 +#define BIT64(b) ((NvU64)1U<<(b)) +#endif +#endif + +#ifdef __cplusplus +} +#endif //__cplusplus + +#endif // __NV_MISC_H + diff --git a/src/common/sdk/nvidia/inc/nvos.h b/src/common/sdk/nvidia/inc/nvos.h new file mode 100644 index 0000000..7c18579 --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvos.h @@ -0,0 +1,2955 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /***************************************************************************\ +|* *| +|* NV Architecture Interface *| +|* *| +|* defines the Operating System function and ioctl interfaces to *| +|* NVIDIA's Unified Media Architecture (TM). *| +|* *| + \***************************************************************************/ + +#ifndef NVOS_INCLUDED +#define NVOS_INCLUDED +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvstatus.h" + +#include "nvgputypes.h" +#include "rs_access.h" +#include "nvcfg_sdk.h" + + +// Temporary include. Please include this directly instead of nvos.h +#include "alloc/alloc_channel.h" + +/* local defines here */ +#define FILE_DEVICE_NV 0x00008000 +#define NV_IOCTL_FCT_BASE 0x00000800 + +// This is the maximum number of subdevices supported in an SLI +// configuration. +#define NVOS_MAX_SUBDEVICES 8 + +/* Define to indicate the use of Unified status codes - bug 200043705*/ +#define UNIFIED_NV_STATUS 1 + + /***************************************************************************\ +|* NV OS Functions *| + \***************************************************************************/ + +/* + Result codes for RM APIs, shared for all the APIs + + *** IMPORTANT *** + + Ensure that no NVOS_STATUS value has the highest bit set. That bit + is used to passthrough the NVOS_STATUS on code expecting an RM_STATUS. +*/ +#define NVOS_STATUS NV_STATUS + +#define NVOS_STATUS_SUCCESS NV_OK +#define NVOS_STATUS_ERROR_CARD_NOT_PRESENT NV_ERR_CARD_NOT_PRESENT +#define NVOS_STATUS_ERROR_DUAL_LINK_INUSE NV_ERR_DUAL_LINK_INUSE +#define NVOS_STATUS_ERROR_GENERIC NV_ERR_GENERIC +#define NVOS_STATUS_ERROR_GPU_NOT_FULL_POWER NV_ERR_GPU_NOT_FULL_POWER +#define NVOS_STATUS_ERROR_ILLEGAL_ACTION NV_ERR_ILLEGAL_ACTION +#define NVOS_STATUS_ERROR_IN_USE NV_ERR_STATE_IN_USE +#define NVOS_STATUS_ERROR_INSUFFICIENT_RESOURCES NV_ERR_INSUFFICIENT_RESOURCES +#define NVOS_STATUS_ERROR_INSUFFICIENT_ZBC_ENTRY NV_ERR_INSUFFICIENT_ZBC_ENTRY +#define NVOS_STATUS_ERROR_INVALID_ACCESS_TYPE NV_ERR_INVALID_ACCESS_TYPE +#define NVOS_STATUS_ERROR_INVALID_ARGUMENT NV_ERR_INVALID_ARGUMENT +#define NVOS_STATUS_ERROR_INVALID_BASE NV_ERR_INVALID_BASE +#define NVOS_STATUS_ERROR_INVALID_CHANNEL NV_ERR_INVALID_CHANNEL +#define NVOS_STATUS_ERROR_INVALID_CLASS NV_ERR_INVALID_CLASS +#define NVOS_STATUS_ERROR_INVALID_CLIENT NV_ERR_INVALID_CLIENT +#define NVOS_STATUS_ERROR_INVALID_COMMAND NV_ERR_INVALID_COMMAND +#define NVOS_STATUS_ERROR_INVALID_DATA NV_ERR_INVALID_DATA +#define NVOS_STATUS_ERROR_INVALID_DEVICE NV_ERR_INVALID_DEVICE +#define NVOS_STATUS_ERROR_INVALID_DMA_SPECIFIER NV_ERR_INVALID_DMA_SPECIFIER +#define NVOS_STATUS_ERROR_INVALID_EVENT NV_ERR_INVALID_EVENT +#define NVOS_STATUS_ERROR_INVALID_FLAGS NV_ERR_INVALID_FLAGS +#define NVOS_STATUS_ERROR_INVALID_FUNCTION NV_ERR_INVALID_FUNCTION +#define NVOS_STATUS_ERROR_INVALID_HEAP NV_ERR_INVALID_HEAP +#define NVOS_STATUS_ERROR_INVALID_INDEX NV_ERR_INVALID_INDEX +#define NVOS_STATUS_ERROR_INVALID_LIMIT NV_ERR_INVALID_LIMIT +#define NVOS_STATUS_ERROR_INVALID_METHOD NV_ERR_INVALID_METHOD +#define NVOS_STATUS_ERROR_INVALID_OBJECT_BUFFER NV_ERR_BUFFER_TOO_SMALL +#define NVOS_STATUS_ERROR_INVALID_OBJECT_ERROR NV_ERR_INVALID_OBJECT +#define NVOS_STATUS_ERROR_INVALID_OBJECT_HANDLE NV_ERR_INVALID_OBJECT_HANDLE +#define NVOS_STATUS_ERROR_INVALID_OBJECT_NEW NV_ERR_INVALID_OBJECT_NEW +#define NVOS_STATUS_ERROR_INVALID_OBJECT_OLD NV_ERR_INVALID_OBJECT_OLD +#define NVOS_STATUS_ERROR_INVALID_OBJECT_PARENT NV_ERR_INVALID_OBJECT_PARENT +#define NVOS_STATUS_ERROR_INVALID_OFFSET NV_ERR_INVALID_OFFSET +#define NVOS_STATUS_ERROR_INVALID_OWNER NV_ERR_INVALID_OWNER +#define NVOS_STATUS_ERROR_INVALID_PARAM_STRUCT NV_ERR_INVALID_PARAM_STRUCT +#define NVOS_STATUS_ERROR_INVALID_PARAMETER NV_ERR_INVALID_PARAMETER +#define NVOS_STATUS_ERROR_INVALID_POINTER NV_ERR_INVALID_POINTER +#define NVOS_STATUS_ERROR_INVALID_REGISTRY_KEY NV_ERR_INVALID_REGISTRY_KEY +#define NVOS_STATUS_ERROR_INVALID_STATE NV_ERR_INVALID_STATE +#define NVOS_STATUS_ERROR_INVALID_STRING_LENGTH NV_ERR_INVALID_STRING_LENGTH +#define NVOS_STATUS_ERROR_INVALID_XLATE NV_ERR_INVALID_XLATE +#define NVOS_STATUS_ERROR_IRQ_NOT_FIRING NV_ERR_IRQ_NOT_FIRING +#define NVOS_STATUS_ERROR_MULTIPLE_MEMORY_TYPES NV_ERR_MULTIPLE_MEMORY_TYPES +#define NVOS_STATUS_ERROR_NOT_SUPPORTED NV_ERR_NOT_SUPPORTED +#define NVOS_STATUS_ERROR_OPERATING_SYSTEM NV_ERR_OPERATING_SYSTEM +#define NVOS_STATUS_ERROR_LIB_RM_VERSION_MISMATCH NV_ERR_LIB_RM_VERSION_MISMATCH +#define NVOS_STATUS_ERROR_PROTECTION_FAULT NV_ERR_PROTECTION_FAULT +#define NVOS_STATUS_ERROR_TIMEOUT NV_ERR_TIMEOUT +#define NVOS_STATUS_ERROR_TOO_MANY_PRIMARIES NV_ERR_TOO_MANY_PRIMARIES +#define NVOS_STATUS_ERROR_IRQ_EDGE_TRIGGERED NV_ERR_IRQ_EDGE_TRIGGERED +#define NVOS_STATUS_ERROR_INVALID_OPERATION NV_ERR_INVALID_OPERATION +#define NVOS_STATUS_ERROR_NOT_COMPATIBLE NV_ERR_NOT_COMPATIBLE +#define NVOS_STATUS_ERROR_MORE_PROCESSING_REQUIRED NV_WARN_MORE_PROCESSING_REQUIRED +#define NVOS_STATUS_ERROR_INSUFFICIENT_PERMISSIONS NV_ERR_INSUFFICIENT_PERMISSIONS +#define NVOS_STATUS_ERROR_TIMEOUT_RETRY NV_ERR_TIMEOUT_RETRY +#define NVOS_STATUS_ERROR_NOT_READY NV_ERR_NOT_READY +#define NVOS_STATUS_ERROR_GPU_IS_LOST NV_ERR_GPU_IS_LOST +#define NVOS_STATUS_ERROR_IN_FULLCHIP_RESET NV_ERR_GPU_IN_FULLCHIP_RESET +#define NVOS_STATUS_ERROR_INVALID_LOCK_STATE NV_ERR_INVALID_LOCK_STATE +#define NVOS_STATUS_ERROR_INVALID_ADDRESS NV_ERR_INVALID_ADDRESS +#define NVOS_STATUS_ERROR_INVALID_IRQ_LEVEL NV_ERR_INVALID_IRQ_LEVEL +#define NVOS_STATUS_ERROR_MEMORY_TRAINING_FAILED NV_ERR_MEMORY_TRAINING_FAILED +#define NVOS_STATUS_ERROR_BUSY_RETRY NV_ERR_BUSY_RETRY +#define NVOS_STATUS_ERROR_INSUFFICIENT_POWER NV_ERR_INSUFFICIENT_POWER +#define NVOS_STATUS_ERROR_OBJECT_NOT_FOUND NV_ERR_OBJECT_NOT_FOUND +#define NVOS_STATUS_ERROR_RESOURCE_LOST NV_ERR_RESOURCE_LOST +#define NVOS_STATUS_ERROR_BUFFER_TOO_SMALL NV_ERR_BUFFER_TOO_SMALL +#define NVOS_STATUS_ERROR_RESET_REQUIRED NV_ERR_RESET_REQUIRED +#define NVOS_STATUS_ERROR_INVALID_REQUEST NV_ERR_INVALID_REQUEST + +#define NVOS_STATUS_ERROR_PRIV_SEC_VIOLATION NV_ERR_PRIV_SEC_VIOLATION +#define NVOS_STATUS_ERROR_GPU_IN_DEBUG_MODE NV_ERR_GPU_IN_DEBUG_MODE +#define NVOS_STATUS_ERROR_ALREADY_SIGNALLED NV_ERR_ALREADY_SIGNALLED + +/* + Note: + This version of the architecture has been changed to allow the + RM to return a client handle that will subsequently used to + identify the client. NvAllocRoot() returns the handle. All + other functions must specify this client handle. + +*/ +/* macro NV01_FREE */ +#define NV01_FREE (0x00000000) + +/* NT ioctl data structure */ +typedef struct +{ + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectOld; + NvV32 status; +} NVOS00_PARAMETERS; + +/* valid hClass values. */ +#define NV01_ROOT (0x0U) +// +// Redefining it here to maintain consistency with current code +// This is also defined in class cl0001.h +// +#define NV01_ROOT_NON_PRIV (0x00000001) + +// Deprecated, please use NV01_ROOT_CLIENT +#define NV01_ROOT_USER NV01_ROOT_CLIENT + +// +// This will eventually replace NV01_ROOT_USER in RM client code. Please use this +// RM client object type for any new RM client object allocations that are being +// added. +// +#define NV01_ROOT_CLIENT (0x00000041) + +/* macro NV01_ALLOC_MEMORY */ +#define NV01_ALLOC_MEMORY (0x00000002) + +/* parameter values */ +#define NVOS02_FLAGS_PHYSICALITY 7:4 +#define NVOS02_FLAGS_PHYSICALITY_CONTIGUOUS (0x00000000) +#define NVOS02_FLAGS_PHYSICALITY_NONCONTIGUOUS (0x00000001) +#define NVOS02_FLAGS_LOCATION 11:8 +#define NVOS02_FLAGS_LOCATION_PCI (0x00000000) +#define NVOS02_FLAGS_LOCATION_VIDMEM (0x00000002) +#define NVOS02_FLAGS_COHERENCY 15:12 +#define NVOS02_FLAGS_COHERENCY_UNCACHED (0x00000000) +#define NVOS02_FLAGS_COHERENCY_CACHED (0x00000001) +#define NVOS02_FLAGS_COHERENCY_WRITE_COMBINE (0x00000002) +#define NVOS02_FLAGS_COHERENCY_WRITE_THROUGH (0x00000003) +#define NVOS02_FLAGS_COHERENCY_WRITE_PROTECT (0x00000004) +#define NVOS02_FLAGS_COHERENCY_WRITE_BACK (0x00000005) +#define NVOS02_FLAGS_ALLOC 17:16 +#define NVOS02_FLAGS_ALLOC_NONE (0x00000001) +#define NVOS02_FLAGS_GPU_CACHEABLE 18:18 +#define NVOS02_FLAGS_GPU_CACHEABLE_NO (0x00000000) +#define NVOS02_FLAGS_GPU_CACHEABLE_YES (0x00000001) +// If requested, RM will create a kernel mapping of this memory. +// Default is no map. +#define NVOS02_FLAGS_KERNEL_MAPPING 19:19 +#define NVOS02_FLAGS_KERNEL_MAPPING_NO_MAP (0x00000000) +#define NVOS02_FLAGS_KERNEL_MAPPING_MAP (0x00000001) +#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY 20:20 +#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_NO (0x00000000) +#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_YES (0x00000001) + +// +// If the flag is set, the RM will only allow read-only CPU user mappings to the +// allocation. +// +#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY 21:21 +#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_NO (0x00000000) +#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_YES (0x00000001) + +// +// If the flag is set, the RM will only allow read-only DMA mappings to the +// allocation. +// +#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY 22:22 +#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_NO (0x00000000) +#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_YES (0x00000001) + +// +// If the flag is set, the IO memory allocation can be registered with the RM if +// the RM regkey peerMappingOverride is set or the client is privileged. +// +// See Bug 1630288 "[PeerSync] threat related to GPU.." for more details. +// +#define NVOS02_FLAGS_PEER_MAP_OVERRIDE 23:23 +#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_DEFAULT (0x00000000) +#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_REQUIRED (0x00000001) + +// If the flag is set RM will assume the memory pages are of type syncpoint. +#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT 24:24 +#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT_APERTURE (0x00000001) + +// +// Allow client allocations to go to protected/unprotected video/system memory. +// When Ampere Protected Model aka APM or Confidential Compute is enabled and +// DEFAULT flag is set by client, allocations go to protected memory. When +// protected memory is not enabled, allocations go to unprotected memory. +// If APM or CC is not enabled, it is a bug for a client to set the PROTECTED +// flag to YES +// +#define NVOS02_FLAGS_MEMORY_PROTECTION 26:25 +#define NVOS02_FLAGS_MEMORY_PROTECTION_DEFAULT (0x00000000) +#define NVOS02_FLAGS_MEMORY_PROTECTION_PROTECTED (0x00000001) +#define NVOS02_FLAGS_MEMORY_PROTECTION_UNPROTECTED (0x00000002) + +// +// When allocating memory, register the memory descriptor to GSP-RM +// so that GSP-RM is aware of and can access it +// +#define NVOS02_FLAGS_REGISTER_MEMDESC_TO_PHYS_RM 27:27 +#define NVOS02_FLAGS_REGISTER_MEMDESC_TO_PHYS_RM_FALSE (0x00000000) +#define NVOS02_FLAGS_REGISTER_MEMDESC_TO_PHYS_RM_TRUE (0x00000001) + +// +// If _NO_MAP is requested, the RM in supported platforms will not map the +// allocated system or IO memory into user space. The client can later map +// memory through the RmMapMemory() interface. +// If _NEVER_MAP is requested, the RM will never map the allocated system or +// IO memory into user space +// +#define NVOS02_FLAGS_MAPPING 31:30 +#define NVOS02_FLAGS_MAPPING_DEFAULT (0x00000000) +#define NVOS02_FLAGS_MAPPING_NO_MAP (0x00000001) +#define NVOS02_FLAGS_MAPPING_NEVER_MAP (0x00000002) + +// ------------------------------------------------------------------------------------- + +/* parameters */ +typedef struct +{ + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectNew; + NvV32 hClass; + NvV32 flags; + NvP64 pMemory NV_ALIGN_BYTES(8); + NvU64 limit NV_ALIGN_BYTES(8); + NvV32 status; +} NVOS02_PARAMETERS; + +/* parameter values */ +#define NVOS03_FLAGS_ACCESS 1:0 +#define NVOS03_FLAGS_ACCESS_READ_WRITE (0x00000000) +#define NVOS03_FLAGS_ACCESS_READ_ONLY (0x00000001) +#define NVOS03_FLAGS_ACCESS_WRITE_ONLY (0x00000002) + +#define NVOS03_FLAGS_PREALLOCATE 2:2 +#define NVOS03_FLAGS_PREALLOCATE_DISABLE (0x00000000) +#define NVOS03_FLAGS_PREALLOCATE_ENABLE (0x00000001) + +#define NVOS03_FLAGS_GPU_MAPPABLE 15:15 +#define NVOS03_FLAGS_GPU_MAPPABLE_DISABLE (0x00000000) +#define NVOS03_FLAGS_GPU_MAPPABLE_ENABLE (0x00000001) + +// ------------------------------------------------------------------------------------ +// This flag is required for a hack to be placed inside DD that allows it to +// access a dummy ctxdma as a block linear surface. Refer bug 1562766 for details. +// +// This flag is deprecated, use NVOS03_FLAGS_PTE_KIND. +// +#define NVOS03_FLAGS_PTE_KIND_BL_OVERRIDE 16:16 +#define NVOS03_FLAGS_PTE_KIND_BL_OVERRIDE_FALSE (0x00000000) +#define NVOS03_FLAGS_PTE_KIND_BL_OVERRIDE_TRUE (0x00000001) + +/* + * This field allows to specify the page kind. If the page kind + * is not specified then the page kind associated with the memory will be used. + * + * In tegra display driver stack, the page kind remains unknown at the time + * of memory allocation/import, the page kind can only be known when display + * driver client creates a framebuffer from allocated/imported memory. + * + * This field compatible with NVOS03_FLAGS_PTE_KIND_BL_OVERRIDE flag. + */ +#define NVOS03_FLAGS_PTE_KIND 17:16 +#define NVOS03_FLAGS_PTE_KIND_NONE (0x00000000) +#define NVOS03_FLAGS_PTE_KIND_BL (0x00000001) +#define NVOS03_FLAGS_PTE_KIND_PITCH (0x00000002) + +#define NVOS03_FLAGS_TYPE 23:20 +#define NVOS03_FLAGS_TYPE_NOTIFIER (0x00000001) + +/* + * This is an alias into the LSB of the TYPE field which + * actually indicates if a Kernel Mapping should be created. + * If the RM should have access to the memory then Enable this + * flag. + * + * Note that the NV_OS03_FLAGS_MAPPING is an alias to + * the LSB of the NV_OS03_FLAGS_TYPE. And in fact if + * type is NV_OS03_FLAGS_TYPE_NOTIFIER (bit 20 set) + * then it implicitly means that NV_OS03_FLAGS_MAPPING + * is _MAPPING_KERNEL. If the client wants to have a + * Kernel Mapping, it should use the _MAPPING_KERNEL + * flag set and the _TYPE_NOTIFIER should be used only + * with NOTIFIERS. + */ + +#define NVOS03_FLAGS_MAPPING 20:20 +#define NVOS03_FLAGS_MAPPING_NONE (0x00000000) +#define NVOS03_FLAGS_MAPPING_KERNEL (0x00000001) + +#define NVOS03_FLAGS_CACHE_SNOOP 28:28 +#define NVOS03_FLAGS_CACHE_SNOOP_ENABLE (0x00000000) +#define NVOS03_FLAGS_CACHE_SNOOP_DISABLE (0x00000001) + +// HASH_TABLE:ENABLE means that the context DMA is automatically bound into all +// channels in the client. This can lead to excessive hash table usage. +// HASH_TABLE:DISABLE means that the context DMA must be explicitly bound into +// any channel that needs to use it via NvRmBindContextDma. +// HASH_TABLE:ENABLE is not supported on NV50 and up, and HASH_TABLE:DISABLE should +// be preferred for all new code. +#define NVOS03_FLAGS_HASH_TABLE 29:29 +#define NVOS03_FLAGS_HASH_TABLE_ENABLE (0x00000000) +#define NVOS03_FLAGS_HASH_TABLE_DISABLE (0x00000001) + +/* macro NV01_ALLOC_OBJECT */ +#define NV01_ALLOC_OBJECT (0x00000005) + +/* parameters */ +typedef struct +{ + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectNew; + NvV32 hClass; + NvV32 status; +} NVOS05_PARAMETERS; + +/* Valid values for hClass in Nv01AllocEvent */ +/* Note that NV01_EVENT_OS_EVENT is same as NV01_EVENT_WIN32_EVENT */ +/* TODO: delete the WIN32 name */ +#define NV01_EVENT_KERNEL_CALLBACK (0x00000078) +#define NV01_EVENT_OS_EVENT (0x00000079) +#define NV01_EVENT_WIN32_EVENT NV01_EVENT_OS_EVENT +#define NV01_EVENT_KERNEL_CALLBACK_EX (0x0000007E) + +/* NOTE: NV01_EVENT_KERNEL_CALLBACK is deprecated. Please use NV01_EVENT_KERNEL_CALLBACK_EX. */ +/* For use with NV01_EVENT_KERNEL_CALLBACK. */ +/* NVOS10_EVENT_KERNEL_CALLBACK data structure storage needs to be retained by the caller. */ +typedef void (*Callback1ArgVoidReturn)(void *arg); +typedef void (*Callback5ArgVoidReturn)(void *arg1, void *arg2, NvHandle hEvent, NvU32 data, NvU32 status); + +/* NOTE: the 'void* arg' below is ok (but unfortunate) since this interface + can only be used by other kernel drivers which must share the same ptr-size */ +typedef struct +{ + Callback1ArgVoidReturn func; + void *arg; +} NVOS10_EVENT_KERNEL_CALLBACK; + +/* For use with NV01_EVENT_KERNEL_CALLBACK_EX. */ +/* NVOS10_EVENT_KERNEL_CALLBACK_EX data structure storage needs to be retained by the caller. */ +/* NOTE: the 'void* arg' below is ok (but unfortunate) since this interface + can only be used by other kernel drivers which must share the same ptr-size */ +typedef struct +{ + Callback5ArgVoidReturn func; + void *arg; +} NVOS10_EVENT_KERNEL_CALLBACK_EX; + +/* Setting this bit in index will set the Event to a Broadcast type */ +/* i.e. each subdevice under a device needs to see the Event before it's signaled */ +#define NV01_EVENT_BROADCAST (0x80000000) + +/* allow non-root resman client to create NV01_EVENT_KERNEL_CALLBACK events */ +/* -- this works in debug/develop drivers only (for security reasons)*/ +#define NV01_EVENT_PERMIT_NON_ROOT_EVENT_KERNEL_CALLBACK_CREATION (0x40000000) + +/* RM event should be triggered only by the specified subdevice; see cl0005.h + * for details re: how to specify subdevice. */ +#define NV01_EVENT_SUBDEVICE_SPECIFIC (0x20000000) + +/* RM should trigger the event but shouldn't do the book-keeping of data + * associated with that event */ +#define NV01_EVENT_WITHOUT_EVENT_DATA (0x10000000) + +/* RM event should be triggered only by the non-stall interrupt */ +#define NV01_EVENT_NONSTALL_INTR (0x08000000) + +/* RM event was allocated from client RM, post events back to client RM */ +#define NV01_EVENT_CLIENT_RM (0x04000000) + +/* function OS19 */ +#define NV04_I2C_ACCESS (0x00000013) + +#define NVOS_I2C_ACCESS_MAX_BUFFER_SIZE 2048 + +/* parameters */ +typedef struct +{ + NvHandle hClient; + NvHandle hDevice; + NvU32 paramSize; + NvP64 paramStructPtr NV_ALIGN_BYTES(8); + NvV32 status; +} NVOS_I2C_ACCESS_PARAMS; + +/* current values for command */ +#define NVOS20_COMMAND_unused0001 0x0001 +#define NVOS20_COMMAND_unused0002 0x0002 +#define NVOS20_COMMAND_STRING_PRINT 0x0003 + +/* function OS21 */ +#define NV04_ALLOC (0x00000015) + +/* parameters */ +typedef struct +{ + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectNew; + NvV32 hClass; + NvP64 pAllocParms NV_ALIGN_BYTES(8); + NvU32 paramsSize; + NvV32 status; +} NVOS21_PARAMETERS; + +#define NVOS64_FLAGS_NONE (0x00000000) +#define NVOS64_FLAGS_FINN_SERIALIZED (0x00000001) + +/* New struct with rights requested */ +typedef struct +{ + NvHandle hRoot; // [IN] client handle + NvHandle hObjectParent; // [IN] parent handle of new object + NvHandle hObjectNew; // [INOUT] new object handle, 0 to generate + NvV32 hClass; // [in] class num of new object + NvP64 pAllocParms NV_ALIGN_BYTES(8); // [IN] class-specific alloc parameters + NvP64 pRightsRequested NV_ALIGN_BYTES(8); // [IN] RS_ACCESS_MASK to request rights, or NULL + NvU32 paramsSize; // [IN] Size of alloc params + NvU32 flags; // [IN] flags for FINN serialization + NvV32 status; // [OUT] status +} NVOS64_PARAMETERS; + +/* RM Alloc header + * + * Replacement for NVOS21/64_PARAMETERS where embedded pointers are not allowed. + * Input layout for RM Alloc user space calls should be + * + * +--- NVOS62_PARAMETERS ---+--- RM Alloc parameters ---+ + * +--- NVOS65_PARAMETERS ---+--- Rights Requested ---+--- RM Alloc parameters ---+ + * + * NVOS62_PARAMETERS::paramsSize is the size of RM Alloc parameters + * If NVOS65_PARAMETERS::maskSize is 0, Rights Requested will not be present in memory. + * + */ +typedef struct +{ + NvHandle hRoot; // [IN] client handle + NvHandle hObjectParent; // [IN] parent handle of the new object + NvHandle hObjectNew; // [IN] new object handle + NvV32 hClass; // [IN] class num of the new object + NvU32 paramSize; // [IN] size in bytes of the RM alloc parameters + NvV32 status; // [OUT] status +} NVOS62_PARAMETERS; + +#define NVOS65_PARAMETERS_VERSION_MAGIC 0x77FEF81E + +typedef struct +{ + NvHandle hRoot; // [IN] client handle + NvHandle hObjectParent; // [IN] parent handle of the new object + NvHandle hObjectNew; // [INOUT] new object handle, 0 to generate + NvV32 hClass; // [IN] class num of the new object + NvU32 paramSize; // [IN] size in bytes of the RM alloc parameters + NvU32 versionMagic; // [IN] NVOS65_PARAMETERS_VERISON_MAGIC + NvU32 maskSize; // [IN] size in bytes of access mask, or 0 if NULL + NvV32 status; // [OUT] status +} NVOS65_PARAMETERS; + +/* function OS30 */ +#define NV04_IDLE_CHANNELS (0x0000001E) + +/* parameter values */ +#define NVOS30_FLAGS_BEHAVIOR 3:0 +#define NVOS30_FLAGS_BEHAVIOR_SPIN (0x00000000) +#define NVOS30_FLAGS_BEHAVIOR_SLEEP (0x00000001) +#define NVOS30_FLAGS_BEHAVIOR_QUERY (0x00000002) +#define NVOS30_FLAGS_BEHAVIOR_FORCE_BUSY_CHECK (0x00000003) +#define NVOS30_FLAGS_CHANNEL 7:4 +#define NVOS30_FLAGS_CHANNEL_LIST (0x00000000) +#define NVOS30_FLAGS_CHANNEL_SINGLE (0x00000001) +#define NVOS30_FLAGS_IDLE 30:8 +#define NVOS30_FLAGS_IDLE_PUSH_BUFFER (0x00000001) +#define NVOS30_FLAGS_IDLE_CACHE1 (0x00000002) +#define NVOS30_FLAGS_IDLE_GRAPHICS (0x00000004) +#define NVOS30_FLAGS_IDLE_MPEG (0x00000008) +#define NVOS30_FLAGS_IDLE_MOTION_ESTIMATION (0x00000010) +#define NVOS30_FLAGS_IDLE_VIDEO_PROCESSOR (0x00000020) +#define NVOS30_FLAGS_IDLE_MSPDEC (0x00000020) +#define NVOS30_FLAGS_IDLE_BITSTREAM_PROCESSOR (0x00000040) +#define NVOS30_FLAGS_IDLE_MSVLD (0x00000040) +#define NVOS30_FLAGS_IDLE_NVDEC0 NVOS30_FLAGS_IDLE_MSVLD +#define NVOS30_FLAGS_IDLE_CIPHER_DMA (0x00000080) +#define NVOS30_FLAGS_IDLE_SEC (0x00000080) +#define NVOS30_FLAGS_IDLE_CALLBACKS (0x00000100) +#define NVOS30_FLAGS_IDLE_MSPPP (0x00000200) +#define NVOS30_FLAGS_IDLE_CE0 (0x00000400) +#define NVOS30_FLAGS_IDLE_CE1 (0x00000800) +#define NVOS30_FLAGS_IDLE_CE2 (0x00001000) +#define NVOS30_FLAGS_IDLE_CE3 (0x00002000) +#define NVOS30_FLAGS_IDLE_CE4 (0x00004000) +#define NVOS30_FLAGS_IDLE_CE5 (0x00008000) +#define NVOS30_FLAGS_IDLE_VIC (0x00010000) +#define NVOS30_FLAGS_IDLE_MSENC (0x00020000) +#define NVOS30_FLAGS_IDLE_NVENC0 NVOS30_FLAGS_IDLE_MSENC +#define NVOS30_FLAGS_IDLE_NVENC1 (0x00040000) +#define NVOS30_FLAGS_IDLE_NVENC2 (0x00080000) +#define NVOS30_FLAGS_IDLE_NVJPG (0x00100000) +#define NVOS30_FLAGS_IDLE_NVDEC1 (0x00200000) +#define NVOS30_FLAGS_IDLE_NVDEC2 (0x00400000) +#define NVOS30_FLAGS_IDLE_ACTIVECHANNELS (0x00800000) +#define NVOS30_FLAGS_IDLE_ALL_ENGINES (NVOS30_FLAGS_IDLE_GRAPHICS | \ + NVOS30_FLAGS_IDLE_MPEG | \ + NVOS30_FLAGS_IDLE_MOTION_ESTIMATION | \ + NVOS30_FLAGS_IDLE_VIDEO_PROCESSOR | \ + NVOS30_FLAGS_IDLE_BITSTREAM_PROCESSOR | \ + NVOS30_FLAGS_IDLE_CIPHER_DMA | \ + NVOS30_FLAGS_IDLE_MSPDEC | \ + NVOS30_FLAGS_IDLE_NVDEC0 | \ + NVOS30_FLAGS_IDLE_SEC | \ + NVOS30_FLAGS_IDLE_MSPPP | \ + NVOS30_FLAGS_IDLE_CE0 | \ + NVOS30_FLAGS_IDLE_CE1 | \ + NVOS30_FLAGS_IDLE_CE2 | \ + NVOS30_FLAGS_IDLE_CE3 | \ + NVOS30_FLAGS_IDLE_CE4 | \ + NVOS30_FLAGS_IDLE_CE5 | \ + NVOS30_FLAGS_IDLE_NVENC0 | \ + NVOS30_FLAGS_IDLE_NVENC1 | \ + NVOS30_FLAGS_IDLE_NVENC2 | \ + NVOS30_FLAGS_IDLE_VIC | \ + NVOS30_FLAGS_IDLE_NVJPG | \ + NVOS30_FLAGS_IDLE_NVDEC1 | \ + NVOS30_FLAGS_IDLE_NVDEC2) +#define NVOS30_FLAGS_WAIT_FOR_ELPG_ON 31:31 +#define NVOS30_FLAGS_WAIT_FOR_ELPG_ON_NO (0x00000000) +#define NVOS30_FLAGS_WAIT_FOR_ELPG_ON_YES (0x00000001) + +/* parameters */ +typedef struct +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hChannel; + NvV32 numChannels; + + NvP64 phClients NV_ALIGN_BYTES(8); + NvP64 phDevices NV_ALIGN_BYTES(8); + NvP64 phChannels NV_ALIGN_BYTES(8); + + NvV32 flags; + NvV32 timeout; + NvV32 status; +} NVOS30_PARAMETERS; + +/* function OS32 */ +typedef void (*BindResultFunc)(void * pVoid, NvU32 gpuMask, NvU32 bState, NvU32 bResult); + +#define NV04_VID_HEAP_CONTROL (0x00000020) +/************************************************************************* +************************ New Heap Interface ****************************** +*************************************************************************/ +// NVOS32 Descriptor types +// +// NVOS32_DESCRIPTOR_TYPE_OS_DMA_BUF_PTR: The dma-buf object +// pointer, provided by the linux kernel buffer sharing sub-system. +// This descriptor can only be used by kernel space rm-clients. +// +#define NVOS32_DESCRIPTOR_TYPE_VIRTUAL_ADDRESS 0 +#define NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY 1 +#define NVOS32_DESCRIPTOR_TYPE_OS_IO_MEMORY 2 +#define NVOS32_DESCRIPTOR_TYPE_OS_PHYS_ADDR 3 +#define NVOS32_DESCRIPTOR_TYPE_OS_FILE_HANDLE 4 +#define NVOS32_DESCRIPTOR_TYPE_OS_DMA_BUF_PTR 5 +#define NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR 6 +#define NVOS32_DESCRIPTOR_TYPE_KERNEL_VIRTUAL_ADDRESS 7 +// NVOS32 function +#define NVOS32_FUNCTION_ALLOC_SIZE 2 +#define NVOS32_FUNCTION_FREE 3 +#define NVOS32_FUNCTION_INFO 5 +#define NVOS32_FUNCTION_ALLOC_TILED_PITCH_HEIGHT 6 +#define NVOS32_FUNCTION_ALLOC_SIZE_RANGE 14 +#define NVOS32_FUNCTION_REACQUIRE_COMPR 15 +#define NVOS32_FUNCTION_RELEASE_COMPR 16 +#define NVOS32_FUNCTION_GET_MEM_ALIGNMENT 18 +#define NVOS32_FUNCTION_HW_ALLOC 19 +#define NVOS32_FUNCTION_HW_FREE 20 +#define NVOS32_FUNCTION_ALLOC_OS_DESCRIPTOR 27 + +typedef struct +{ + NvP64 sgt NV_ALIGN_BYTES(8); + NvP64 gem NV_ALIGN_BYTES(8); +} NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR_PARAMETERS; + +// NVOS32 IVC-heap number delimiting value +#define NVOS32_IVC_HEAP_NUMBER_DONT_ALLOCATE_ON_IVC_HEAP 0 // When IVC heaps are present, + // IVC-heap number specified + // as part of 'NVOS32_PARAMETERS' + // which is less or equal to this + // constant indicates that allocation + // should not be done on IVC heap. + // Explanation of IVC-heap number is + // under 'AllocSize' structure below. + +typedef struct +{ + NvHandle hRoot; // [IN] - root object handle + NvHandle hObjectParent; // [IN] - device handle + NvU32 function; // [IN] - heap function, see below FUNCTION* defines + NvHandle hVASpace; // [IN] - VASpace handle + NvS16 ivcHeapNumber; // [IN] - When IVC heaps are present: either 1) number of the IVC heap + // shared between two VMs or 2) number indicating that allocation + // should not be done on an IVC heap. Values greater than constant + // 'NVOS32_IVC_HEAP_NUMBER_DONT_ALLOCATE_ON_IVC_HEAP' define set 1) + // and values less or equal to that constant define set 2). + // When IVC heaps are present, correct IVC-heap number must be specified. + // When IVC heaps are absent, IVC-heap number is diregarded. + // RM provides for each VM a bitmask of heaps with each bit + // specifying the other peer that can use the partition. + // Each bit set to one can be enumerated, such that the bit + // with lowest significance is enumerated with one. + // 'ivcHeapNumber' parameter specifies this enumeration value. + // This value is used to uniquely identify a heap shared between + // two particular VMs. + // Illustration: + // bitmask: 1 1 0 1 0 = 0x1A + // possible 'ivcHeapNumber' values: 3, 2, 1 + NvV32 status; // [OUT] - returned NVOS32* status code, see below STATUS* defines + NvU64 total NV_ALIGN_BYTES(8); // [OUT] - returned total size of heap + NvU64 free NV_ALIGN_BYTES(8); // [OUT] - returned free space available in heap + + union + { + // NVOS32_FUNCTION_ALLOC_SIZE + struct + { + NvU32 owner; // [IN] - memory owner ID + NvHandle hMemory; // [IN/OUT] - unique memory handle - IN only if MEMORY_HANDLE_PROVIDED is set (otherwise generated) + NvU32 type; // [IN] - surface type, see below TYPE* defines + NvU32 flags; // [IN] - allocation modifier flags, see below ALLOC_FLAGS* defines + NvU32 attr; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 format; // [IN/OUT] - format requested, and format allocated + NvU32 comprCovg; // [IN/OUT] - compr covg requested, and allocated + NvU32 zcullCovg; // [OUT] - zcull covg allocated + NvU32 partitionStride; // [IN/OUT] - 0 means "RM" chooses + NvU32 width; // [IN] - width "hint" used for zcull region allocations + NvU32 height; // [IN] - height "hint" used for zcull region allocations + NvU64 size NV_ALIGN_BYTES(8); // [IN/OUT] - size of allocation - also returns the actual size allocated + NvU64 alignment NV_ALIGN_BYTES(8); // [IN] - requested alignment - NVOS32_ALLOC_FLAGS_ALIGNMENT* must be on + NvU64 offset NV_ALIGN_BYTES(8); // [IN/OUT] - desired offset if NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE is on AND returned offset + NvU64 limit NV_ALIGN_BYTES(8); // [OUT] - returned surface limit + NvP64 address NV_ALIGN_BYTES(8);// [OUT] - returned address + NvU64 rangeBegin NV_ALIGN_BYTES(8); // [IN] - allocated memory will be limited to the range + NvU64 rangeEnd NV_ALIGN_BYTES(8); // [IN] - from rangeBegin to rangeEnd, inclusive. + NvU32 attr2; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 ctagOffset; // [IN] - comptag offset for this surface (see NVOS32_ALLOC_COMPTAG_OFFSET) + NvS32 numaNode; // [IN] - NUMA node from which memory should be allocated + } AllocSize; + + // NVOS32_FUNCTION_ALLOC_TILED_PITCH_HEIGHT + struct + { + NvU32 owner; // [IN] - memory owner ID + NvHandle hMemory; // [IN/OUT] - unique memory handle - IN only if MEMORY_HANDLE_PROVIDED is set (otherwise generated) + NvU32 type; // [IN] - surface type, see below TYPE* defines + NvU32 flags; // [IN] - allocation modifier flags, see below ALLOC_FLAGS* defines + NvU32 height; // [IN] - height of surface in pixels + NvS32 pitch; // [IN/OUT] - desired pitch AND returned actual pitch allocated + NvU32 attr; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 width; //[IN] - width of surface in pixels + NvU32 format; // [IN/OUT] - format requested, and format allocated + NvU32 comprCovg; // [IN/OUT] - compr covg requested, and allocated + NvU32 zcullCovg; // [OUT] - zcull covg allocated + NvU32 partitionStride; // [IN/OUT] - 0 means "RM" chooses + NvU64 size NV_ALIGN_BYTES(8); // [IN/OUT] - size of allocation - also returns the actual size allocated + NvU64 alignment NV_ALIGN_BYTES(8); // [IN] - requested alignment - NVOS32_ALLOC_FLAGS_ALIGNMENT* must be on + NvU64 offset NV_ALIGN_BYTES(8); // [IN/OUT] - desired offset if NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE is on AND returned offset + NvU64 limit NV_ALIGN_BYTES(8); // [OUT] - returned surface limit + NvP64 address NV_ALIGN_BYTES(8);// [OUT] - returned address + NvU64 rangeBegin NV_ALIGN_BYTES(8); // [IN] - allocated memory will be limited to the range + NvU64 rangeEnd NV_ALIGN_BYTES(8); // [IN] - from rangeBegin to rangeEnd, inclusive. + NvU32 attr2; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 ctagOffset; // [IN] - comptag offset for this surface (see NVOS32_ALLOC_COMPTAG_OFFSET) + NvS32 numaNode; // [IN] - NUMA node from which memory should be allocated + } AllocTiledPitchHeight; + + // NVOS32_FUNCTION_FREE + struct + { + NvU32 owner; // [IN] - memory owner ID + NvHandle hMemory; // [IN] - unique memory handle + NvU32 flags; // [IN] - heap free flags (must be NVOS32_FREE_FLAGS_MEMORY_HANDLE_PROVIDED) + } Free; + + // NVOS32_FUNCTION_RELEASE_COMPR + struct + { + NvU32 owner; // [IN] - memory owner ID + NvU32 flags; // [IN] - must be NVOS32_RELEASE_COMPR_FLAGS_MEMORY_HANDLE_PROVIDED + NvHandle hMemory; // [IN] - unique memory handle (valid if _RELEASE_COMPR_FLAGS_MEMORY_HANDLE_PROVIDED + } ReleaseCompr; + + // NVOS32_FUNCTION_REACQUIRE_COMPR + struct + { + NvU32 owner; // [IN] - memory owner ID + NvU32 flags; // [IN] - must be NVOS32_REACQUIRE_COMPR_FLAGS_MEMORY_HANDLE_PROVIDED + NvHandle hMemory; // [IN] - unique memory handle (valid if _REACQUIRE_COMPR_FLAGS_MEMORY_HANDLE_PROVIDED + } ReacquireCompr; + + // NVOS32_FUNCTION_INFO + struct + { + NvU32 attr; // [IN] - memory heap attributes requested + NvU64 offset NV_ALIGN_BYTES(8); // [OUT] - base of largest free block + NvU64 size NV_ALIGN_BYTES(8); // [OUT] - size of largest free block + NvU64 base NV_ALIGN_BYTES(8); // [OUT] - returned heap phys base + } Info; + + // NVOS32_FUNCTION_ALLOC_SIZE_RANGE + struct + { + NvU32 owner; // [IN] - memory owner ID + NvHandle hMemory; // [IN] - unique memory handle + NvU32 type; // [IN] - surface type, see below TYPE* defines + NvU32 flags; // [IN] - allocation modifier flags, see below ALLOC_FLAGS* defines + NvU32 attr; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 format; // [IN/OUT] - format requested, and format allocated + NvU32 comprCovg; // [IN/OUT] - compr covg requested, and allocated + NvU32 zcullCovg; // [OUT] - zcull covg allocated + NvU32 partitionStride; // [IN/OUT] - 0 means "RM" chooses + NvU64 size NV_ALIGN_BYTES(8); // [IN/OUT] - size of allocation - also returns the actual size allocated + NvU64 alignment NV_ALIGN_BYTES(8); // [IN] - requested alignment - NVOS32_ALLOC_FLAGS_ALIGNMENT* must be on + NvU64 offset NV_ALIGN_BYTES(8); // [IN/OUT] - desired offset if NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE is on AND returned offset + NvU64 limit NV_ALIGN_BYTES(8); // [OUT] - returned surface limit + NvU64 rangeBegin NV_ALIGN_BYTES(8); // [IN] - allocated memory will be limited to the range + NvU64 rangeEnd NV_ALIGN_BYTES(8); // [IN] - from rangeBegin to rangeEnd, inclusive. + NvP64 address NV_ALIGN_BYTES(8);// [OUT] - returned address + NvU32 attr2; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 ctagOffset; // [IN] - comptag offset for this surface (see NVOS32_ALLOC_COMPTAG_OFFSET) + NvS32 numaNode; // [IN] - NUMA node from which memory should be allocated + } AllocSizeRange; + + // NVOS32_FUNCTION_GET_MEM_ALIGNMENT + struct + { + NvU32 alignType; // Input + NvU32 alignAttr; + NvU32 alignInputFlags; + NvU64 alignSize NV_ALIGN_BYTES(8); + NvU32 alignHeight; + NvU32 alignWidth; + NvU32 alignPitch; + NvU32 alignPad; + NvU32 alignMask; + NvU32 alignKind; + NvU32 alignAdjust; // Output -- If non-zero the amount we need to adjust the offset + NvU32 alignAttr2; + } AllocHintAlignment; + + struct + { + NvU32 allocOwner; // [IN] - memory owner ID + NvHandle allochMemory; // [IN/OUT] - unique memory handle - IN only if MEMORY_HANDLE_PROVIDED is set (otherwise generated) + NvU32 flags; + NvU32 allocType; // Input + NvU32 allocAttr; + NvU32 allocInputFlags; + NvU64 allocSize NV_ALIGN_BYTES(8); + NvU32 allocHeight; + NvU32 allocWidth; + NvU32 allocPitch; + NvU32 allocMask; + NvU32 allocComprCovg; + NvU32 allocZcullCovg; + NvP64 bindResultFunc NV_ALIGN_BYTES(8); // BindResultFunc + NvP64 pHandle NV_ALIGN_BYTES(8); + NvHandle hResourceHandle; // Handle to RM container + NvU32 retAttr; // Output Indicates the resources that we allocated + NvU32 kind; + NvU64 osDeviceHandle NV_ALIGN_BYTES(8); + NvU32 allocAttr2; + NvU32 retAttr2; // Output Indicates the resources that we allocated + NvU64 allocAddr NV_ALIGN_BYTES(8); + // [out] from GMMU_COMPR_INFO in drivers/common/shared/inc/mmu/gmmu_fmt.h + struct + { + NvU32 compPageShift; + NvU32 compressedKind; + NvU32 compTagLineMin; + NvU32 compPageIndexLo; + NvU32 compPageIndexHi; + NvU32 compTagLineMultiplier; + } comprInfo; + // [out] fallback uncompressed kind. + NvU32 uncompressedKind; + } HwAlloc; + + // NVOS32_FUNCTION_HW_FREE + struct + { + NvHandle hResourceHandle; // Handle to RM Resource Info + NvU32 flags; // Indicate if HW Resources and/or Memory + } HwFree; +// Updated interface check. +#define NV_RM_OS32_ALLOC_OS_DESCRIPTOR_WITH_OS32_ATTR 1 + + // NVOS32_FUNCTION_ALLOC_OS_DESCRIPTOR + struct + { + NvHandle hMemory; // [IN/OUT] - unique memory handle - IN only if MEMORY_HANDLE_PROVIDED is set (otherwise generated) + NvU32 type; // [IN] - surface type, see below TYPE* defines + NvU32 flags; // [IN] - allocation modifier flags, see below ALLOC_FLAGS* defines + NvU32 attr; // [IN] - attributes for memory placement/properties, see below + NvU32 attr2; // [IN] - attributes GPU_CACHEABLE + NvP64 descriptor NV_ALIGN_BYTES(8); // [IN] - descriptor address + NvU64 limit NV_ALIGN_BYTES(8); // [IN] - allocated size -1 + NvU32 descriptorType; // [IN] - descriptor type(Virtual | nvmap Handle) + } AllocOsDesc; + + } data; +} NVOS32_PARAMETERS; + +#define NVOS32_DELETE_RESOURCES_ALL 0 + +// type field +#define NVOS32_TYPE_IMAGE 0 +#define NVOS32_TYPE_DEPTH 1 +#define NVOS32_TYPE_TEXTURE 2 +#define NVOS32_TYPE_VIDEO 3 +#define NVOS32_TYPE_FONT 4 +#define NVOS32_TYPE_CURSOR 5 +#define NVOS32_TYPE_DMA 6 +#define NVOS32_TYPE_INSTANCE 7 +#define NVOS32_TYPE_PRIMARY 8 +#define NVOS32_TYPE_ZCULL 9 +#define NVOS32_TYPE_UNUSED 10 +#define NVOS32_TYPE_SHADER_PROGRAM 11 +#define NVOS32_TYPE_OWNER_RM 12 +#define NVOS32_TYPE_NOTIFIER 13 +#define NVOS32_TYPE_RESERVED 14 +#define NVOS32_TYPE_PMA 15 +#define NVOS32_TYPE_STENCIL 16 +#define NVOS32_TYPE_SYNCPOINT 17 +#define NVOS32_NUM_MEM_TYPES 18 + +// Surface attribute field - bitmask of requested attributes the surface +// should have. +// This value is updated to reflect what was actually allocated, and so this +// field must be checked after every allocation to determine what was +// allocated. Pass in the ANY tags to indicate that RM should fall back but +// still succeed the alloc. +// for example, if tiled_any is passed in, but no tile ranges are available, +// RM will allocate normal memory and indicate that in the returned attr field. +// Each returned attribute will have the REQUIRED field set if that attribute +// applies to the allocated surface. + +#define NVOS32_ATTR_NONE 0x00000000 + +#define NVOS32_ATTR_DEPTH 2:0 +#define NVOS32_ATTR_DEPTH_UNKNOWN 0x00000000 +#define NVOS32_ATTR_DEPTH_8 0x00000001 +#define NVOS32_ATTR_DEPTH_16 0x00000002 +#define NVOS32_ATTR_DEPTH_24 0x00000003 +#define NVOS32_ATTR_DEPTH_32 0x00000004 +#define NVOS32_ATTR_DEPTH_64 0x00000005 +#define NVOS32_ATTR_DEPTH_128 0x00000006 + +#define NVOS32_ATTR_COMPR_COVG 3:3 +#define NVOS32_ATTR_COMPR_COVG_DEFAULT 0x00000000 +#define NVOS32_ATTR_COMPR_COVG_PROVIDED 0x00000001 + +// Surface description - number of AA samples +// This number should only reflect AA done in hardware, not in software. For +// example, OpenGL's 8x AA mode is a mix of 2x hardware multisample and 2x2 +// software supersample. +// OpenGL should specify ATTR_AA_SAMPLES of 2 in this case, not 8, because +// the hardware will be programmed to run in 2x AA mode. +// Note that X_VIRTUAL_Y means X real samples with Y samples total (i.e. Y +// does not indicate the number of virtual samples). For instance, what +// arch and HW describe as NV_PGRAPH_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_12 +// corresponds to NVOS32_ATTR_AA_SAMPLES_4_VIRTUAL_16 here. + +#define NVOS32_ATTR_AA_SAMPLES 7:4 +#define NVOS32_ATTR_AA_SAMPLES_1 0x00000000 +#define NVOS32_ATTR_AA_SAMPLES_2 0x00000001 +#define NVOS32_ATTR_AA_SAMPLES_4 0x00000002 +#define NVOS32_ATTR_AA_SAMPLES_4_ROTATED 0x00000003 +#define NVOS32_ATTR_AA_SAMPLES_6 0x00000004 +#define NVOS32_ATTR_AA_SAMPLES_8 0x00000005 +#define NVOS32_ATTR_AA_SAMPLES_16 0x00000006 +#define NVOS32_ATTR_AA_SAMPLES_4_VIRTUAL_8 0x00000007 +#define NVOS32_ATTR_AA_SAMPLES_4_VIRTUAL_16 0x00000008 +#define NVOS32_ATTR_AA_SAMPLES_8_VIRTUAL_16 0x00000009 +#define NVOS32_ATTR_AA_SAMPLES_8_VIRTUAL_32 0x0000000A + +// +// +// GPU_CACHE_SNOOPABLE_ON signals to RM that CPU (or other IO device) +// accesses to this surface will snoop the GPU cache. +// _OFF indicates no GPU cache snooping will take place. +// _MAPPING defers the decision to mapping time. +// +// Only applies to fully coherent platforms. +// +// +#define NVOS32_ATTR_GPU_CACHE_SNOOPABLE 9:8 +#define NVOS32_ATTR_GPU_CACHE_SNOOPABLE_MAPPING 0x00000000 +#define NVOS32_ATTR_GPU_CACHE_SNOOPABLE_OFF 0x00000001 +#define NVOS32_ATTR_GPU_CACHE_SNOOPABLE_ON 0x00000002 +#define NVOS32_ATTR_GPU_CACHE_SNOOPABLE_INVALID 0x00000003 + +// Zcull region (NV40 and up) +// If ATTR_ZCULL is REQUIRED or ANY and ATTR_DEPTH is UNKNOWN, the +// allocation will fail. +// If ATTR_DEPTH or ATTR_AA_SAMPLES is not accurate, erroneous rendering +// may result. +#define NVOS32_ATTR_ZCULL 11:10 +#define NVOS32_ATTR_ZCULL_NONE 0x00000000 +#define NVOS32_ATTR_ZCULL_REQUIRED 0x00000001 +#define NVOS32_ATTR_ZCULL_ANY 0x00000002 +#define NVOS32_ATTR_ZCULL_SHARED 0x00000003 + +// Compression (NV20 and up) +// If ATTR_COMPR is REQUIRED or ANY and ATTR_DEPTH is UNKNOWN, the +// allocation will fail. +// If ATTR_DEPTH or ATTR_AA_SAMPLES is not accurate, performance will +// suffer heavily +#define NVOS32_ATTR_COMPR 13:12 +#define NVOS32_ATTR_COMPR_NONE 0x00000000 +#define NVOS32_ATTR_COMPR_REQUIRED 0x00000001 +#define NVOS32_ATTR_COMPR_ANY 0x00000002 +#define NVOS32_ATTR_COMPR_PLC_REQUIRED NVOS32_ATTR_COMPR_REQUIRED +#define NVOS32_ATTR_COMPR_PLC_ANY NVOS32_ATTR_COMPR_ANY +#define NVOS32_ATTR_COMPR_DISABLE_PLC_ANY 0x00000003 + +// +// Force the allocation to go to the reserved heap. +// This flag is used for KMD allocations when MIG is enabled. +// +#define NVOS32_ATTR_ALLOCATE_FROM_RESERVED_HEAP 14:14 +#define NVOS32_ATTR_ALLOCATE_FROM_RESERVED_HEAP_NO 0x00000000 +#define NVOS32_ATTR_ALLOCATE_FROM_RESERVED_HEAP_YES 0x00000001 + +// Format +// _BLOCK_LINEAR is only available for nv50+. +#define NVOS32_ATTR_FORMAT 17:16 +// Macros representing the low/high bits of NVOS32_ATTR_FORMAT +// bit range. These provide direct access to the range limits +// without needing to split the low:high representation via +// ternary operator, thereby avoiding MISRA 14.3 violation. +#define NVOS32_ATTR_FORMAT_LOW_FIELD 16 +#define NVOS32_ATTR_FORMAT_HIGH_FIELD 17 +#define NVOS32_ATTR_FORMAT_PITCH 0x00000000 +#define NVOS32_ATTR_FORMAT_SWIZZLED 0x00000001 +#define NVOS32_ATTR_FORMAT_BLOCK_LINEAR 0x00000002 + +#define NVOS32_ATTR_Z_TYPE 18:18 +#define NVOS32_ATTR_Z_TYPE_FIXED 0x00000000 +#define NVOS32_ATTR_Z_TYPE_FLOAT 0x00000001 + +#define NVOS32_ATTR_ZS_PACKING 21:19 +#define NVOS32_ATTR_ZS_PACKING_S8 0x00000000 // Z24S8 and S8 share definition +#define NVOS32_ATTR_ZS_PACKING_Z24S8 0x00000000 +#define NVOS32_ATTR_ZS_PACKING_S8Z24 0x00000001 +#define NVOS32_ATTR_ZS_PACKING_Z32 0x00000002 +#define NVOS32_ATTR_ZS_PACKING_Z24X8 0x00000003 +#define NVOS32_ATTR_ZS_PACKING_X8Z24 0x00000004 +#define NVOS32_ATTR_ZS_PACKING_Z32_X24S8 0x00000005 +#define NVOS32_ATTR_ZS_PACKING_X8Z24_X24S8 0x00000006 +#define NVOS32_ATTR_ZS_PACKING_Z16 0x00000007 +// NOTE: ZS packing and color packing fields are overlaid +#define NVOS32_ATTR_COLOR_PACKING NVOS32_ATTR_ZS_PACKING +#define NVOS32_ATTR_COLOR_PACKING_A8R8G8B8 0x00000000 +#define NVOS32_ATTR_COLOR_PACKING_X8R8G8B8 0x00000001 + + + +// +// For virtual allocs to choose page size for the region. Specifying +// _DEFAULT will select a virtual page size that allows for a surface +// to be mixed between video and system memory and allow the surface +// to be migrated between video and system memory. For tesla chips, +// 4KB will be used. For fermi chips with dual page tables, a virtual +// address with both page tables will be used. +// +// For physical allocation on chips with page swizzle this field is +// used to select the page swizzle. This later also sets the virtual +// page size, but does not have influence over selecting a migratable +// virtual address. That must be selected when mapping the physical +// memory. +// +// BIG_PAGE = 64 KB on PASCAL +// = 64 KB or 128 KB on pre_PASCAL chips +// +// HUGE_PAGE = 2 MB on PASCAL+ +// = 2 MB or 512 MB on AMPERE+ +// = not supported on pre_PASCAL chips. +// +// To request for a HUGE page size, +// set NVOS32_ATTR_PAGE_SIZE to _HUGE and NVOS32_ATTR2_PAGE_SIZE_HUGE to +// the desired size. +// +#define NVOS32_ATTR_PAGE_SIZE 24:23 +#define NVOS32_ATTR_PAGE_SIZE_DEFAULT 0x00000000 +#define NVOS32_ATTR_PAGE_SIZE_4KB 0x00000001 +#define NVOS32_ATTR_PAGE_SIZE_BIG 0x00000002 +#define NVOS32_ATTR_PAGE_SIZE_HUGE 0x00000003 + +#define NVOS32_ATTR_LOCATION 26:25 +#define NVOS32_ATTR_LOCATION_VIDMEM 0x00000000 +#define NVOS32_ATTR_LOCATION_PCI 0x00000001 +#define NVOS32_ATTR_LOCATION_ANY 0x00000003 + +// +// _DEFAULT implies _CONTIGUOUS for video memory currently, but +// may be changed to imply _NONCONTIGUOUS in the future. +// _ALLOW_NONCONTIGUOUS enables falling back to the noncontiguous +// vidmem allocator if contig allocation fails. +// +#define NVOS32_ATTR_PHYSICALITY 28:27 +#define NVOS32_ATTR_PHYSICALITY_DEFAULT 0x00000000 +#define NVOS32_ATTR_PHYSICALITY_NONCONTIGUOUS 0x00000001 +#define NVOS32_ATTR_PHYSICALITY_CONTIGUOUS 0x00000002 +#define NVOS32_ATTR_PHYSICALITY_ALLOW_NONCONTIGUOUS 0x00000003 + +#define NVOS32_ATTR_COHERENCY 31:29 +#define NVOS32_ATTR_COHERENCY_UNCACHED 0x00000000 +#define NVOS32_ATTR_COHERENCY_CACHED 0x00000001 +#define NVOS32_ATTR_COHERENCY_WRITE_COMBINE 0x00000002 +#define NVOS32_ATTR_COHERENCY_WRITE_THROUGH 0x00000003 +#define NVOS32_ATTR_COHERENCY_WRITE_PROTECT 0x00000004 +#define NVOS32_ATTR_COHERENCY_WRITE_BACK 0x00000005 + +// ATTR2 fields +#define NVOS32_ATTR2_NONE 0x00000000 + +// +// DEFAULT - Let lower level drivers pick optimal page kind. +// PREFER_NO_ZBC - Prefer other types of compression over ZBC when +// selecting page kind. +// PREFER_ZBC - Prefer ZBC over other types of compression when +// selecting page kind. +// REQUIRE_ONLY_ZBC - Require a page kind that enables ZBC but disables +// other types of compression (i.e. 2C page kind). +// INVALID - Aliases REQUIRE_ONLY_ZBC, which is not supported +// by all RM implementations. +// +#define NVOS32_ATTR2_ZBC 1:0 +#define NVOS32_ATTR2_ZBC_DEFAULT 0x00000000 +#define NVOS32_ATTR2_ZBC_PREFER_NO_ZBC 0x00000001 +#define NVOS32_ATTR2_ZBC_PREFER_ZBC 0x00000002 +#define NVOS32_ATTR2_ZBC_REQUIRE_ONLY_ZBC 0x00000003 +#define NVOS32_ATTR2_ZBC_INVALID 0x00000003 + +// +// DEFAULT - Highest performance cache policy that is coherent with the highest +// performance CPU mapping. Typically this is gpu cached for video +// memory and gpu uncached for system memory. +// YES - Enable gpu caching if supported on this surface type. For system +// memory this will not be coherent with direct CPU mappings. +// NO - Disable gpu caching if supported on this surface type. +// INVALID - Clients should never set YES and NO simultaneously. +// +#define NVOS32_ATTR2_GPU_CACHEABLE 3:2 +#define NVOS32_ATTR2_GPU_CACHEABLE_DEFAULT 0x00000000 +#define NVOS32_ATTR2_GPU_CACHEABLE_YES 0x00000001 +#define NVOS32_ATTR2_GPU_CACHEABLE_NO 0x00000002 +#define NVOS32_ATTR2_GPU_CACHEABLE_INVALID 0x00000003 + +// +// DEFAULT - GPU-dependent cache policy +// YES - Enable gpu caching for p2p mem +// NO - Disable gpu caching for p2p mem +// +#define NVOS32_ATTR2_P2P_GPU_CACHEABLE 5:4 +#define NVOS32_ATTR2_P2P_GPU_CACHEABLE_DEFAULT 0x00000000 +#define NVOS32_ATTR2_P2P_GPU_CACHEABLE_YES 0x00000001 +#define NVOS32_ATTR2_P2P_GPU_CACHEABLE_NO 0x00000002 + +// This applies to virtual allocs only. See NVOS46_FLAGS_32BIT_POINTER. +#define NVOS32_ATTR2_32BIT_POINTER 6:6 +#define NVOS32_ATTR2_32BIT_POINTER_DISABLE 0x00000000 +#define NVOS32_ATTR2_32BIT_POINTER_ENABLE 0x00000001 + +// +// Whether or not a NUMA Node ID has been specified. +// If yes, the NUMA node ID specified in numaNode will be used. +// If no, memory can be allocated from any socket (numaNode will be ignored). +// Specified numaNode must be of a CPU's memory +// + +#define NVOS32_ATTR2_FIXED_NUMA_NODE_ID 7:7 +#define NVOS32_ATTR2_FIXED_NUMA_NODE_ID_NO 0x00000000 +#define NVOS32_ATTR2_FIXED_NUMA_NODE_ID_YES 0x00000001 + +// +// Force SMMU mapping on GPU physical allocation in Tegra +// SMMU mapping for GPU physical allocation decided internally by RM +// This attribute provide an override to RM policy for verification purposes. +// +#define NVOS32_ATTR2_SMMU_ON_GPU 9:8 +#define NVOS32_ATTR2_SMMU_ON_GPU_DEFAULT 0x00000000 +#define NVOS32_ATTR2_SMMU_ON_GPU_DISABLE 0x00000001 +#define NVOS32_ATTR2_SMMU_ON_GPU_ENABLE 0x00000002 + +// Used for allocating the memory from scanout carveout. +#define NVOS32_ATTR2_USE_SCANOUT_CARVEOUT 10:10 +#define NVOS32_ATTR2_USE_SCANOUT_CARVEOUT_FALSE 0x00000000 +#define NVOS32_ATTR2_USE_SCANOUT_CARVEOUT_TRUE 0x00000001 + +// +// Make comptag allocation aligned to compression cacheline size. +// Specifying this attribute will make RM allocate comptags worth an entire +// comp cacheline. The allocation will be offset aligned to number of comptags/comp cacheline. +// +#define NVOS32_ATTR2_ALLOC_COMPCACHELINE_ALIGN 11:11 +#define NVOS32_ATTR2_ALLOC_COMPCACHELINE_ALIGN_OFF 0x0 +#define NVOS32_ATTR2_ALLOC_COMPCACHELINE_ALIGN_ON 0x1 +#define NVOS32_ATTR2_ALLOC_COMPCACHELINE_ALIGN_DEFAULT \ + NVOS32_ATTR2_ALLOC_COMPCACHELINE_ALIGN_OFF + +// Allocation preferred in high or low priority memory +#define NVOS32_ATTR2_PRIORITY 13:12 +#define NVOS32_ATTR2_PRIORITY_DEFAULT 0x0 +#define NVOS32_ATTR2_PRIORITY_HIGH 0x1 +#define NVOS32_ATTR2_PRIORITY_LOW 0x2 + +// PMA: Allocation is an RM internal allocation (RM-only) +#define NVOS32_ATTR2_INTERNAL 14:14 +#define NVOS32_ATTR2_INTERNAL_NO 0x0 +#define NVOS32_ATTR2_INTERNAL_YES 0x1 + +// Allocate 2C instead of 2CZ +#define NVOS32_ATTR2_PREFER_2C 15:15 +#define NVOS32_ATTR2_PREFER_2C_NO 0x00000000 +#define NVOS32_ATTR2_PREFER_2C_YES 0x00000001 + +// Allocation used by display engine; RM verifies display engine has enough +// address bits or remapper available. +#define NVOS32_ATTR2_NISO_DISPLAY 16:16 +#define NVOS32_ATTR2_NISO_DISPLAY_NO 0x00000000 +#define NVOS32_ATTR2_NISO_DISPLAY_YES 0x00000001 + +// +// !!WARNING!!! +// +// This flag is introduced as a temporary WAR to enable color compression +// without ZBC. +// +// This dangerous flag can be used by UMDs to instruct RM to skip the zbc +// table refcounting that RM does today, when the chosen PTE kind has ZBC +// support. +// +// Currently we do not have a safe per process zbc slot management and +// refcounting mechanism between RM and UMD and hence, any process can +// access any other process's zbc entry in the global zbc table (without mask) +// Inorder to flush the ZBC table for slot reuse RM cannot track which +// process is using which zbc slot. Hence RM has a global refcount for the +// zbc table to flush and reuse the entries if the PTE kind supports zbc. +// +// This scheme poses a problem if there are apps that are persistent such as +// the desktop components that can have color compression enabled which will +// always keep the refcount active. Since these apps can live without +// ZBC, UMD can disable ZBC using masks. +// +// In such a case, if UMD so chooses to disable ZBC, this flag should be used +// to skip refcounting as by default RM would refcount the ZBC table. +// +// NOTE: There is no way for RM to enforce/police this, and we totally rely +// on UMD to use a zbc mask in the pushbuffer method to prevent apps from +// accessing the ZBC table. +// +#define NVOS32_ATTR2_ZBC_SKIP_ZBCREFCOUNT 17:17 +#define NVOS32_ATTR2_ZBC_SKIP_ZBCREFCOUNT_NO 0x00000000 +#define NVOS32_ATTR2_ZBC_SKIP_ZBCREFCOUNT_YES 0x00000001 + +// Allocation requires ISO bandwidth guarantees +#define NVOS32_ATTR2_ISO 18:18 +#define NVOS32_ATTR2_ISO_NO 0x00000000 +#define NVOS32_ATTR2_ISO_YES 0x00000001 + +// +// Turn off blacklist feature for video memory allocation +// This attribute should be used only by Kernel client (KMD), to mask +// the blacklisted pages for the allocation. This is done so that the clients +// will manage the above masked blacklisted pages after the allocation. It will +// return to RM's pool after the allocation was free-d.RmVidHeapCtrl returns +// NV_ERR_INSUFFICIENT_PERMISSIONS if it is being called by non-kernel clients. +// + +// TODO: Project ReLingo - This term is marked for deletion. Use PAGE_OFFLINING. +#define NVOS32_ATTR2_BLACKLIST 19:19 +#define NVOS32_ATTR2_BLACKLIST_ON 0x00000000 +#define NVOS32_ATTR2_BLACKLIST_OFF 0x00000001 +#define NVOS32_ATTR2_PAGE_OFFLINING 19:19 +#define NVOS32_ATTR2_PAGE_OFFLINING_ON 0x00000000 +#define NVOS32_ATTR2_PAGE_OFFLINING_OFF 0x00000001 + +// +// For virtual allocs to choose the HUGE page size for the region. +// NVOS32_ATTR_PAGE_SIZE must be set to _HUGE to use this. +// Currently, the default huge page is 2MB, so a request with _DEFAULT +// set will always be interpreted as 2MB. +// Not supported on pre_AMPERE chips. +// +#define NVOS32_ATTR2_PAGE_SIZE_HUGE 21:20 +#define NVOS32_ATTR2_PAGE_SIZE_HUGE_DEFAULT 0x00000000 +#define NVOS32_ATTR2_PAGE_SIZE_HUGE_2MB 0x00000001 +#define NVOS32_ATTR2_PAGE_SIZE_HUGE_512MB 0x00000002 +#define NVOS32_ATTR2_PAGE_SIZE_HUGE_256GB 0x00000003 + +// Allow read-only or read-write user CPU mappings +#define NVOS32_ATTR2_PROTECTION_USER 22:22 +#define NVOS32_ATTR2_PROTECTION_USER_READ_WRITE 0x00000000 +#define NVOS32_ATTR2_PROTECTION_USER_READ_ONLY 0x00000001 + +// Allow read-only or read-write device mappings +#define NVOS32_ATTR2_PROTECTION_DEVICE 23:23 +#define NVOS32_ATTR2_PROTECTION_DEVICE_READ_WRITE 0x00000000 +#define NVOS32_ATTR2_PROTECTION_DEVICE_READ_ONLY 0x00000001 + +// Deprecated. To be deleted once client code has removed references. +#define NVOS32_ATTR2_USE_EGM 24:24 +#define NVOS32_ATTR2_USE_EGM_FALSE 0x00000000 +#define NVOS32_ATTR2_USE_EGM_TRUE 0x00000001 + +// +// Allow client allocations to go to protected/unprotected video/system memory. +// When Ampere Protected Model aka APM or Confidential Compute is enabled and +// DEFAULT flag is set by client, allocations go to protected memory. When +// protected memory is not enabled, allocations go to unprotected memory. +// If APM or CC is not enabled, it is a bug for a client to set the PROTECTED +// flag to YES +// +#define NVOS32_ATTR2_MEMORY_PROTECTION 26:25 +#define NVOS32_ATTR2_MEMORY_PROTECTION_DEFAULT 0x00000000 +#define NVOS32_ATTR2_MEMORY_PROTECTION_PROTECTED 0x00000001 +#define NVOS32_ATTR2_MEMORY_PROTECTION_UNPROTECTED 0x00000002 +// +// Force the allocation to go to guest subheap. +// This flag is used by vmiop plugin to allocate from GPA +// +#define NVOS32_ATTR2_ALLOCATE_FROM_SUBHEAP 27:27 +#define NVOS32_ATTR2_ALLOCATE_FROM_SUBHEAP_NO 0x00000000 +#define NVOS32_ATTR2_ALLOCATE_FROM_SUBHEAP_YES 0x00000001 + +// +// Force the video memory allocation to localized allocation. +// Same attribute can be used to choose between uGPU0 and uGPU1. +// if set to default, RM will choose the next available uGPU memory. +// if set to _UGPU0, RM will choose the memory from uGPU0. +// if set to _UGPU1, RM will choose the memory from uGPU1. +// +#define NVOS32_ATTR2_ENABLE_LOCALIZED_MEMORY 30:29 +#define NVOS32_ATTR2_ENABLE_LOCALIZED_MEMORY_DEFAULT 0x00000000 +#define NVOS32_ATTR2_ENABLE_LOCALIZED_MEMORY_UGPU0 0x00000001 +#define NVOS32_ATTR2_ENABLE_LOCALIZED_MEMORY_UGPU1 0x00000002 + +// +// When allocating memory, register the memory descriptor to GSP-RM +// so that GSP-RM is aware of and can access it +// +#define NVOS32_ATTR2_REGISTER_MEMDESC_TO_PHYS_RM 31:31 +#define NVOS32_ATTR2_REGISTER_MEMDESC_TO_PHYS_RM_FALSE 0x00000000 +#define NVOS32_ATTR2_REGISTER_MEMDESC_TO_PHYS_RM_TRUE 0x00000001 + +/** + * NVOS32 ALLOC_FLAGS + * + * NVOS32_ALLOC_FLAGS_IGNORE_BANK_PLACEMENT + * + * NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_UP + * + * NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN + * + * NVOS32_ALLOC_FLAGS_FORCE_ALIGN_HOST_PAGE + * + * NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE + * + * NVOS32_ALLOC_FLAGS_BANK_HINT + * + * NVOS32_ALLOC_FLAGS_BANK_FORCE + * + * NVOS32_ALLOC_FLAGS_ALIGNMENT_HINT + * + * NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE + * + * NVOS32_ALLOC_FLAGS_BANK_GROW_UP + * Only relevant if bank_hint or bank_force are set + * + * NVOS32_ALLOC_FLAGS_BANK_GROW_DOWN + * Only relevant if bank_hint or bank_force are set + * + * NVOS32_ALLOC_FLAGS_LAZY + * Lazy allocation (deferred pde, pagetable creation) + * + * NVOS32_ALLOC_FLAGS_NO_SCANOUT + * Set if surface will never be scanned out + * + * NVOS32_ALLOC_FLAGS_PITCH_FORCE + * Fail alloc if supplied pitch is not aligned + * + * NVOS32_ALLOC_FLAGS_MEMORY_HANDLE_PROVIDED + * Memory handle provided to be associated with this allocation + * + * NVOS32_ALLOC_FLAGS_MAP_NOT_REQUIRED + * By default memory is mapped into the CPU address space + * + * NVOS32_ALLOC_FLAGS_PERSISTENT_VIDMEM + * Allocate persistent video memory + * + * NVOS32_ALLOC_FLAGS_USE_BEGIN_END + * Use rangeBegin & rangeEnd fields in allocs other than size/range + * + * NVOS32_ALLOC_FLAGS_TURBO_CIPHER_ENCRYPTED + * Allocate TurboCipher encrypted region + * + * NVOS32_ALLOC_FLAGS_VIRTUAL + * Allocate virtual memory address space + * + * NVOS32_ALLOC_FLAGS_FORCE_INTERNAL_INDEX + * Force allocation internal index + * + * NVOS32_ALLOC_FLAGS_ZCULL_COVG_SPECIFIED + * This flag is depreciated and allocations will fail. + * + * NVOS32_ALLOC_FLAGS_EXTERNALLY_MANAGED + * Must be used with NVOS32_ALLOC_FLAGS_VIRTUAL. + * Page tables for this allocation will be managed outside of RM. + * + * NVOS32_ALLOC_FLAGS_FORCE_DEDICATED_PDE + * + * NVOS32_ALLOC_FLAGS_PROTECTED + * Allocate in a protected memory region if available + * + * NVOS32_ALLOC_FLAGS_KERNEL_MAPPING_MAP + * Map kernel os descriptor + * + * NVOS32_ALLOC_FLAGS_MAXIMIZE_ADDRESS_SPACE + * On WDDM all address spaces are created with MINIMIZE_PTETABLE_SIZE + * to reduce the overhead of private address spaces per application, + * at the cost of holes in the virtual address space. + * + * Shaders have short pointers that are required to be within a + * GPU dependent 32b range. + * + * MAXIMIZE_ADDRESS_SPACE will reverse the MINIMIZE_PTE_TABLE_SIZE + * flag with certain restrictions: + * - This flag only has an effect when the allocation has the side + * effect of creating a new PDE. It does not affect existing PDEs. + * - The first few PDEs of the address space are kept minimum to allow + * small applications to use fewer resources. + * - By default this operations on the 0-4GB address range. + * - If USE_BEGIN_END is specified the setting will apply to the + * specified range instead of the first 4GB. + * + * NVOS32_ALLOC_FLAGS_SPARSE + * Denote that a virtual address range is "sparse". Must be used with + * NVOS32_ALLOC_FLAGS_VIRTUAL. Creation of a "sparse" virtual address range + * denotes that an unmapped virtual address range should "not" fault but simply + * return 0's. + * + * NVOS32_ALLOC_FLAGS_ALLOCATE_KERNEL_PRIVILEGED + * This a special flag that can be used only by kernel(root) clients + * to allocate memory out of a protected region of the address space + * If this flag is set by non kernel clients then the allocation will + * fail. + * + * NVOS32_ALLOC_FLAGS_SKIP_RESOURCE_ALLOC + * + * NVOS32_ALLOC_FLAGS_PREFER_PTES_IN_SYSMEMORY + * If new pagetable need to be allocated prefer them in sysmem (if supported by the gpu) + * + * NVOS32_ALLOC_FLAGS_SKIP_ALIGN_PAD + * As per KMD request to eliminate extra allocation + * + * NVOS32_ALLOC_FLAGS_WPR1 + * Allocate in a WPR1 region if available + * + * NVOS32_ALLOC_FLAGS_ZCULL_DONT_ALLOCATE_SHARED_1X + * If using zcull sharing and this surface is fsaa, then don't allocate an additional non-FSAA region. + * + * NVOS32_ALLOC_FLAGS_WPR2 + * Allocate in a WPR1 region if available + */ +#define NVOS32_ALLOC_FLAGS_IGNORE_BANK_PLACEMENT 0x00000001 +#define NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_UP 0x00000002 +#define NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN 0x00000004 +#define NVOS32_ALLOC_FLAGS_FORCE_ALIGN_HOST_PAGE 0x00000008 +#define NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE 0x00000010 +#define NVOS32_ALLOC_FLAGS_BANK_HINT 0x00000020 +#define NVOS32_ALLOC_FLAGS_BANK_FORCE 0x00000040 +#define NVOS32_ALLOC_FLAGS_ALIGNMENT_HINT 0x00000080 +#define NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE 0x00000100 +#define NVOS32_ALLOC_FLAGS_BANK_GROW_UP 0x00000000 +#define NVOS32_ALLOC_FLAGS_BANK_GROW_DOWN 0x00000200 +#define NVOS32_ALLOC_FLAGS_LAZY 0x00000400 +#define NVOS32_ALLOC_FLAGS_FORCE_REVERSE_ALLOC 0x00000800 +#define NVOS32_ALLOC_FLAGS_NO_SCANOUT 0x00001000 +#define NVOS32_ALLOC_FLAGS_PITCH_FORCE 0x00002000 +#define NVOS32_ALLOC_FLAGS_MEMORY_HANDLE_PROVIDED 0x00004000 +#define NVOS32_ALLOC_FLAGS_MAP_NOT_REQUIRED 0x00008000 +#define NVOS32_ALLOC_FLAGS_PERSISTENT_VIDMEM 0x00010000 +#define NVOS32_ALLOC_FLAGS_USE_BEGIN_END 0x00020000 +#define NVOS32_ALLOC_FLAGS_TURBO_CIPHER_ENCRYPTED 0x00040000 +#define NVOS32_ALLOC_FLAGS_VIRTUAL 0x00080000 +#define NVOS32_ALLOC_FLAGS_FORCE_INTERNAL_INDEX 0x00100000 +#define NVOS32_ALLOC_FLAGS_ZCULL_COVG_SPECIFIED 0x00200000 +#define NVOS32_ALLOC_FLAGS_EXTERNALLY_MANAGED 0x00400000 +#define NVOS32_ALLOC_FLAGS_FORCE_DEDICATED_PDE 0x00800000 +#define NVOS32_ALLOC_FLAGS_PROTECTED 0x01000000 +#define NVOS32_ALLOC_FLAGS_KERNEL_MAPPING_MAP 0x02000000 // TODO BUG 2488679: fix alloc flag aliasing +#define NVOS32_ALLOC_FLAGS_MAXIMIZE_ADDRESS_SPACE 0x02000000 +#define NVOS32_ALLOC_FLAGS_SPARSE 0x04000000 +#define NVOS32_ALLOC_FLAGS_USER_READ_ONLY 0x04000000 // TODO BUG 2488682: remove this after KMD transition +#define NVOS32_ALLOC_FLAGS_DEVICE_READ_ONLY 0x08000000 // TODO BUG 2488682: remove this after KMD transition +#define NVOS32_ALLOC_FLAGS_ALLOCATE_KERNEL_PRIVILEGED 0x08000000 +#define NVOS32_ALLOC_FLAGS_SKIP_RESOURCE_ALLOC 0x10000000 +#define NVOS32_ALLOC_FLAGS_PREFER_PTES_IN_SYSMEMORY 0x20000000 +#define NVOS32_ALLOC_FLAGS_SKIP_ALIGN_PAD 0x40000000 +#define NVOS32_ALLOC_FLAGS_WPR1 0x40000000 // TODO BUG 2488672: fix alloc flag aliasing +#define NVOS32_ALLOC_FLAGS_ZCULL_DONT_ALLOCATE_SHARED_1X 0x80000000 +#define NVOS32_ALLOC_FLAGS_WPR2 0x80000000 // TODO BUG 2488672: fix alloc flag aliasing + +// Internal flags used for RM's allocation paths +#define NVOS32_ALLOC_INTERNAL_FLAGS_CLIENTALLOC 0x00000001 // RM internal flags - not sure if this should be exposed even. Keeping it here. +#define NVOS32_ALLOC_INTERNAL_FLAGS_SKIP_SCRUB 0x00000004 // RM internal flags - not sure if this should be exposed even. Keeping it here. +#define NVOS32_ALLOC_FLAGS_MAXIMIZE_4GB_ADDRESS_SPACE NVOS32_ALLOC_FLAGS_MAXIMIZE_ADDRESS_SPACE // Legacy name + +// +// Bitmask of flags that are only valid for virtual allocations. +// +#define NVOS32_ALLOC_FLAGS_VIRTUAL_ONLY ( \ + NVOS32_ALLOC_FLAGS_VIRTUAL | \ + NVOS32_ALLOC_FLAGS_LAZY | \ + NVOS32_ALLOC_FLAGS_EXTERNALLY_MANAGED | \ + NVOS32_ALLOC_FLAGS_SPARSE | \ + NVOS32_ALLOC_FLAGS_MAXIMIZE_ADDRESS_SPACE | \ + NVOS32_ALLOC_FLAGS_PREFER_PTES_IN_SYSMEMORY ) + +// COMPR_COVG_* allows for specification of what compression resources +// are required (_MIN) and necessary (_MAX). Default behavior is for +// RM to provide as much as possible, including none if _ANY is allowed. +// Values for min/max are (0-100, a %) * _COVG_SCALE (so max value is +// 100*100==10000). _START is used to specify the % offset into the +// region to begin the requested coverage. +// _COVG_BITS allows specification of the number of comptags per ROP tile. +// A value of 0 is default and allows RM to choose based upon MMU/FB rules. +// All other values for _COVG_BITS are arch-specific. +// Note: NVOS32_ATTR_COMPR_COVG_PROVIDED must be set for this feature +// to be available (verif-only). +#define NVOS32_ALLOC_COMPR_COVG_SCALE 10 +#define NVOS32_ALLOC_COMPR_COVG_BITS 1:0 +#define NVOS32_ALLOC_COMPR_COVG_BITS_DEFAULT 0x00000000 +#define NVOS32_ALLOC_COMPR_COVG_BITS_1 0x00000001 +#define NVOS32_ALLOC_COMPR_COVG_BITS_2 0x00000002 +#define NVOS32_ALLOC_COMPR_COVG_BITS_4 0x00000003 +#define NVOS32_ALLOC_COMPR_COVG_MAX 11:2 +#define NVOS32_ALLOC_COMPR_COVG_MIN 21:12 +#define NVOS32_ALLOC_COMPR_COVG_START 31:22 + + +// Note: NVOS32_ALLOC_FLAGS_ZCULL_COVG_SPECIFIED must be set for this feature +// to be enabled. +// If FALLBACK_ALLOW is set, a fallback from LOW_RES_Z or LOW_RES_ZS +// to HIGH_RES_Z is allowed if the surface can't be fully covered. +#define NVOS32_ALLOC_ZCULL_COVG_FORMAT 3:0 +#define NVOS32_ALLOC_ZCULL_COVG_FORMAT_LOW_RES_Z 0x00000000 +#define NVOS32_ALLOC_ZCULL_COVG_FORMAT_HIGH_RES_Z 0x00000002 +#define NVOS32_ALLOC_ZCULL_COVG_FORMAT_LOW_RES_ZS 0x00000003 +#define NVOS32_ALLOC_ZCULL_COVG_FALLBACK 4:4 +#define NVOS32_ALLOC_ZCULL_COVG_FALLBACK_DISALLOW 0x00000000 +#define NVOS32_ALLOC_ZCULL_COVG_FALLBACK_ALLOW 0x00000001 + + +// _ALLOC_COMPTAG_OFFSET allows the caller to specify the starting +// offset for the comptags for a given surface, primarily for test only. +// To specify an offset, set _USAGE_FIXED or _USAGE_MIN in conjunction +// with _START. +// +// _USAGE_FIXED sets a surface's comptagline to start at the given +// starting value. If the offset has already been assigned, then +// the alloc call fails. +// +// _USAGE_MIN sets a surface's comptagline to start at the given +// starting value or higher, depending on comptagline availability. +// In this case, if the offset has already been assigned, the next +// available comptagline (in increasing order) will be assigned. +// +// For Fermi, up to 2^17 comptags may be allowed, but the actual, +// usable limit depends on the size of the compbit backing store. +// +// For Pascal, up to 2 ^ 18 comptags may be allowed +// From Turing. up to 2 ^ 20 comptags may be allowed +// +// See also field ctagOffset in struct NVOS32_PARAMETERS. +#define NVOS32_ALLOC_COMPTAG_OFFSET_START 19:0 +#define NVOS32_ALLOC_COMPTAG_OFFSET_START_DEFAULT 0x00000000 +#define NVOS32_ALLOC_COMPTAG_OFFSET_USAGE 31:30 +#define NVOS32_ALLOC_COMPTAG_OFFSET_USAGE_DEFAULT 0x00000000 +#define NVOS32_ALLOC_COMPTAG_OFFSET_USAGE_OFF 0x00000000 +#define NVOS32_ALLOC_COMPTAG_OFFSET_USAGE_FIXED 0x00000001 +#define NVOS32_ALLOC_COMPTAG_OFFSET_USAGE_MIN 0x00000002 + + +// REALLOC flags field +#define NVOS32_REALLOC_FLAGS_GROW_ALLOCATION 0x00000000 +#define NVOS32_REALLOC_FLAGS_SHRINK_ALLOCATION 0x00000001 +#define NVOS32_REALLOC_FLAGS_REALLOC_UP 0x00000000 // towards/from high memory addresses +#define NVOS32_REALLOC_FLAGS_REALLOC_DOWN 0x00000002 // towards/from memory address 0 + +// RELEASE_COMPR, REACQUIRE_COMPR flags field +#define NVOS32_RELEASE_COMPR_FLAGS_MEMORY_HANDLE_PROVIDED 0x000000001 + +#define NVOS32_REACQUIRE_COMPR_FLAGS_MEMORY_HANDLE_PROVIDED 0x000000001 + + +// FREE flags field +#define NVOS32_FREE_FLAGS_MEMORY_HANDLE_PROVIDED 0x00000001 + +// DUMP flags field +#define NVOS32_DUMP_FLAGS_TYPE 1:0 +#define NVOS32_DUMP_FLAGS_TYPE_FB 0x00000000 +#define NVOS32_DUMP_FLAGS_TYPE_CLIENT_PD 0x00000001 +#define NVOS32_DUMP_FLAGS_TYPE_CLIENT_VA 0x00000002 +#define NVOS32_DUMP_FLAGS_TYPE_CLIENT_VAPTE 0x00000003 + +#define NVOS32_BLOCK_TYPE_FREE 0xFFFFFFFF +#define NVOS32_INVALID_BLOCK_FREE_OFFSET 0xFFFFFFFF + +#define NVOS32_MEM_TAG_NONE 0x00000000 + +/* + * NV_CONTEXT_DMA_ALLOCATION_PARAMS - Allocation params to create context dma + through NvRmAlloc. + */ +typedef struct +{ + NvHandle hSubDevice; + NvV32 flags; + NvHandle hMemory; + NvU64 offset NV_ALIGN_BYTES(8); + NvU64 limit NV_ALIGN_BYTES(8); +} NV_CONTEXT_DMA_ALLOCATION_PARAMS; + +/* + * NV_MEMORY_ALLOCATION_PARAMS - Allocation params to create memory through + * NvRmAlloc. Flags are populated with NVOS32_ defines. + */ +typedef struct +{ + NvU32 owner; // [IN] - memory owner ID + NvU32 type; // [IN] - surface type, see below TYPE* defines + NvU32 flags; // [IN] - allocation modifier flags, see below ALLOC_FLAGS* defines + + NvU32 width; // [IN] - width of surface in pixels + NvU32 height; // [IN] - height of surface in pixels + NvS32 pitch; // [IN/OUT] - desired pitch AND returned actual pitch allocated + + NvU32 attr; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 attr2; // [IN/OUT] - surface attributes requested, and surface attributes allocated + + NvU32 format; // [IN/OUT] - format requested, and format allocated + NvU32 comprCovg; // [IN/OUT] - compr covg requested, and allocated + NvU32 zcullCovg; // [OUT] - zcull covg allocated + + NvU64 rangeLo NV_ALIGN_BYTES(8); // [IN] - allocated memory will be limited to the range + NvU64 rangeHi NV_ALIGN_BYTES(8); // [IN] - from rangeBegin to rangeEnd, inclusive. + + NvU64 size NV_ALIGN_BYTES(8); // [IN/OUT] - size of allocation - also returns the actual size allocated + NvU64 alignment NV_ALIGN_BYTES(8); // [IN] - requested alignment - NVOS32_ALLOC_FLAGS_ALIGNMENT* must be on + NvU64 offset NV_ALIGN_BYTES(8); // [IN/OUT] - desired offset if NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE is on AND returned offset + NvU64 limit NV_ALIGN_BYTES(8); // [OUT] - returned surface limit + NvP64 address NV_ALIGN_BYTES(8); // [OUT] - returned address + + NvU32 ctagOffset; // [IN] - comptag offset for this surface (see NVOS32_ALLOC_COMPTAG_OFFSET) + NvHandle hVASpace; // [IN] - VASpace handle. Used when flag is VIRTUAL. + + NvU32 internalflags; // [IN] - internal flags to change allocation behaviors from internal paths + + NvU32 tag; // [IN] - memory tag used for debugging + + NvS32 numaNode; // [IN] - CPU NUMA node from which memory should be allocated +} NV_MEMORY_ALLOCATION_PARAMS; + +/* + * NV_OS_DESC_MEMORY_ALLOCATION_PARAMS - Allocation params to create OS + * described memory through NvRmAlloc. Flags are populated with NVOS32_ defines. + */ +typedef struct +{ + NvU32 type; // [IN] - surface type, see below TYPE* defines + NvU32 flags; // [IN] - allocation modifier flags, see below ALLOC_FLAGS* defines + NvU32 attr; // [IN] - attributes for memory placement/properties, see below + NvU32 attr2; // [IN] - attributes GPU_CACHEABLE + NvP64 descriptor NV_ALIGN_BYTES(8); // [IN] - descriptor address + NvU64 limit NV_ALIGN_BYTES(8); // [IN] - allocated size -1 + NvU32 descriptorType; // [IN] - descriptor type(Virtual | nvmap Handle) + NvU32 tag; // [IN] - memory tag used for debugging +} NV_OS_DESC_MEMORY_ALLOCATION_PARAMS; + +/* + * NV_USER_LOCAL_DESC_MEMORY_ALLOCATION_PARAMS - Allocation params to create a memory + * object from user allocated video memory. Flags are populated with NVOS32_* + * defines. + */ +typedef struct +{ + NvU32 flags; // [IN] - allocation modifier flags, see NVOS02_FLAGS* defines + NvU64 physAddr NV_ALIGN_BYTES(8); // [IN] - physical address + NvU64 size NV_ALIGN_BYTES(8); // [IN] - mem size + NvU32 tag; // [IN] - memory tag used for debugging + NvBool bGuestAllocated; // [IN] - Set if memory is guest allocated (mapped by VMMU) +} NV_USER_LOCAL_DESC_MEMORY_ALLOCATION_PARAMS; + +/* + * NV_MEMORY_HW_RESOURCES_ALLOCATION_PARAMS - Allocation params to create + * memory HW resources through NvRmAlloc. Flags are populated with NVOS32_ + * defines. + */ +typedef struct +{ + NvU32 owner; // [IN] - memory owner ID + NvU32 flags; // [IN] - allocation modifier flags, see below ALLOC_FLAGS* defines + NvU32 type; // [IN] - surface type, see below TYPE* defines + + NvU32 attr; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 attr2; // [IN/OUT] - surface attributes requested, and surface attributes allocated + + NvU32 height; + NvU32 width; + NvU32 pitch; + NvU32 alignment; + NvU32 comprCovg; + NvU32 zcullCovg; + + NvU32 kind; + + NvP64 bindResultFunc NV_ALIGN_BYTES(8); // BindResultFunc + NvP64 pHandle NV_ALIGN_BYTES(8); + NvU64 osDeviceHandle NV_ALIGN_BYTES(8); + NvU64 size NV_ALIGN_BYTES(8); + NvU64 allocAddr NV_ALIGN_BYTES(8); + + // [out] from GMMU_COMPR_INFO in drivers/common/shared/inc/mmu/gmmu_fmt.h + NvU32 compPageShift; + NvU32 compressedKind; + NvU32 compTagLineMin; + NvU32 compPageIndexLo; + NvU32 compPageIndexHi; + NvU32 compTagLineMultiplier; + + // [out] fallback uncompressed kind. + NvU32 uncompressedKind; + + NvU32 tag; // [IN] - memory tag used for debugging +} NV_MEMORY_HW_RESOURCES_ALLOCATION_PARAMS; + +/* function OS33 */ +#define NV04_MAP_MEMORY (0x00000021) + +// Legacy map and unmap memory flags that don't use DRF_DEF scheme +#define NV04_MAP_MEMORY_FLAGS_NONE (0x00000000) +#define NV04_MAP_MEMORY_FLAGS_USER (0x00004000) + +// New map and unmap memory flags. These flags are used for both NvRmMapMemory +// and for NvRmUnmapMemory. + +// Mappings can have restricted permissions (read-only, write-only). Some +// RM implementations may choose to ignore these flags, or they may work +// only for certain memory spaces (system, video memory); in such cases, +// you may get a read/write mapping even if you asked for a read-only or +// write-only mapping. +#define NVOS33_FLAGS_ACCESS 1:0 +#define NVOS33_FLAGS_ACCESS_READ_WRITE (0x00000000) +#define NVOS33_FLAGS_ACCESS_READ_ONLY (0x00000001) +#define NVOS33_FLAGS_ACCESS_WRITE_ONLY (0x00000002) + +// Persistent mappings are no longer supported +#define NVOS33_FLAGS_PERSISTENT 4:4 +#define NVOS33_FLAGS_PERSISTENT_DISABLE (0x00000000) +#define NVOS33_FLAGS_PERSISTENT_ENABLE (0x00000001) + +// This flag is a hack to work around bug 150889. It disables the error +// checking in the RM that verifies that the client is not trying to map +// memory past the end of the memory object. This error checking needs to +// be shut off in some cases for a PAE bug workaround in certain kernels. +#define NVOS33_FLAGS_SKIP_SIZE_CHECK 8:8 +#define NVOS33_FLAGS_SKIP_SIZE_CHECK_DISABLE (0x00000000) +#define NVOS33_FLAGS_SKIP_SIZE_CHECK_ENABLE (0x00000001) + +// Normally, a mapping is created in the same memory space as the client -- in +// kernel space for a kernel RM client, or in user space for a user RM client. +// However, a kernel RM client can specify MEM_SPACE:USER to create a user-space +// mapping in the current RM client. +#define NVOS33_FLAGS_MEM_SPACE 14:14 +#define NVOS33_FLAGS_MEM_SPACE_CLIENT (0x00000000) +#define NVOS33_FLAGS_MEM_SPACE_USER (0x00000001) + +// The client can ask for direct memory mapping (i.e. no BAR1) if remappers and +// blocklinear are not required. RM can do direct mapping in this case if +// carveout is available. +// DEFAULT: Use direct mapping if available and no address/data translation +// is necessary; reflected otherwise +// DIRECT: Use direct mapping if available, even if some translation is +// necessary (the client is responsible for translation) +// REFLECTED: Always use reflected mapping +#define NVOS33_FLAGS_MAPPING 16:15 +#define NVOS33_FLAGS_MAPPING_DEFAULT (0x00000000) +#define NVOS33_FLAGS_MAPPING_DIRECT (0x00000001) +#define NVOS33_FLAGS_MAPPING_REFLECTED (0x00000002) + +// The client requests a fifo mapping but doesn't know the offset or length +// DEFAULT: Do error check length and offset +// ENABLE: Don't error check length and offset but have the RM fill them in +#define NVOS33_FLAGS_FIFO_MAPPING 17:17 +#define NVOS33_FLAGS_FIFO_MAPPING_DEFAULT (0x00000000) +#define NVOS33_FLAGS_FIFO_MAPPING_ENABLE (0x00000001) + +// The client can require that the CPU mapping be to a specific CPU address +// (akin to MAP_FIXED for mmap). +// DISABLED: RM will map the allocation at a CPU VA that RM selects. +// ENABLED: RM will map the allocation at the CPU VA specified by the address +// pass-back parameter to NvRmMapMemory +// NOTES: +// - Used for controlling CPU addresses in CUDA's unified CPU+GPU virtual +// address space +// - Only valid on NvRmMapMemory +// - Implemented on Unix but not VMware +#define NVOS33_FLAGS_MAP_FIXED 18:18 +#define NVOS33_FLAGS_MAP_FIXED_DISABLE (0x00000000) +#define NVOS33_FLAGS_MAP_FIXED_ENABLE (0x00000001) + +// The client can specify to the RM that the CPU virtual address range for an +// allocation should remain reserved after the allocation is unmapped. +// DISABLE: When this mapping is destroyed, RM will unmap the CPU virtual +// address space used by this allocation. On Linux this corresponds +// to calling munmap on the CPU VA region. +// ENABLE: When the map object is freed, RM will leave the CPU virtual +// address space used by allocation reserved. On Linux this means +// that RM will overwrite the previous mapping with an anonymous +// mapping of instead calling munmap. +// NOTES: +// - When combined with MAP_FIXED, this allows the client to exert +// significant control over the CPU heap +// - Used in CUDA's unified CPU+GPU virtual address space +// - Valid in nvRmUnmapMemory +// - Valid on NvRmMapMemory (specifies RM's behavior whenever the +// mapping is destroyed, regardless of mechanism) +// - Implemented on Unix but not VMware +#define NVOS33_FLAGS_RESERVE_ON_UNMAP 19:19 +#define NVOS33_FLAGS_RESERVE_ON_UNMAP_DISABLE (0x00000000) +#define NVOS33_FLAGS_RESERVE_ON_UNMAP_ENABLE (0x00000001) + +// Internal use only +#define NVOS33_FLAGS_OS_DESCRIPTOR 22:22 +#define NVOS33_FLAGS_OS_DESCRIPTOR_DISABLE (0x00000000) +#define NVOS33_FLAGS_OS_DESCRIPTOR_ENABLE (0x00000001) + +// +// For use in the linux mapping path. This flag sets the +// caching mode for pcie BAR mappings (from nv_memory_type.h). +// Internal use only. +// +#define NVOS33_FLAGS_CACHING_TYPE 25:23 +#define NVOS33_FLAGS_CACHING_TYPE_CACHED 0 +#define NVOS33_FLAGS_CACHING_TYPE_UNCACHED 1 +#define NVOS33_FLAGS_CACHING_TYPE_WRITECOMBINED 2 +#define NVOS33_FLAGS_CACHING_TYPE_WRITEBACK 5 +#define NVOS33_FLAGS_CACHING_TYPE_DEFAULT 6 +#define NVOS33_FLAGS_CACHING_TYPE_UNCACHED_WEAK 7 + +// +// For use when Hopper Confidential Compute is operating in devtools mode +// BAR1 access to CPR vidmem is blocked to CPU-RM by default when HCC is +// enabled in both devtools and prod modes. However, certain mappings are +// allowed to go through successfully only in devtools mode. For example, +// CPU mappings made on behalf of devtools, event buffer mappings are allowed +// to happen in devtools mode +// +#define NVOS33_FLAGS_ALLOW_MAPPING_ON_HCC 26:26 +#define NVOS33_FLAGS_ALLOW_MAPPING_ON_HCC_NO (0x00000000) +#define NVOS33_FLAGS_ALLOW_MAPPING_ON_HCC_YES (0x00000001) + +/* parameters */ +typedef struct +{ + NvHandle hClient; + NvHandle hDevice; // device or sub-device handle + NvHandle hMemory; // handle to memory object if provided -- NULL if not + NvU64 offset NV_ALIGN_BYTES(8); + NvU64 length NV_ALIGN_BYTES(8); + NvP64 pLinearAddress NV_ALIGN_BYTES(8); // pointer for returned address + NvU32 status; + NvU32 flags; +} NVOS33_PARAMETERS; + + +/* function OS34 */ +#define NV04_UNMAP_MEMORY (0x00000022) + +/* parameters */ +typedef struct +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvP64 pLinearAddress NV_ALIGN_BYTES(8); // ptr to virtual address of mapped memory + NvU32 status; + NvU32 flags; +} NVOS34_PARAMETERS; + +/* function OS38 */ +#define NV04_ACCESS_REGISTRY (0x00000026) + +/* parameter values */ +#define NVOS38_ACCESS_TYPE_READ_DWORD 1 +#define NVOS38_ACCESS_TYPE_WRITE_DWORD 2 +#define NVOS38_ACCESS_TYPE_READ_BINARY 6 +#define NVOS38_ACCESS_TYPE_WRITE_BINARY 7 + +#define NVOS38_MAX_REGISTRY_STRING_LENGTH 256 +#define NVOS38_MAX_REGISTRY_BINARY_LENGTH 256 + +/* parameters */ +typedef struct +{ + NvHandle hClient; + NvHandle hObject; + NvV32 AccessType; + + NvV32 DevNodeLength; + NvP64 pDevNode NV_ALIGN_BYTES(8); + + NvV32 ParmStrLength; + NvP64 pParmStr NV_ALIGN_BYTES(8); + + NvV32 BinaryDataLength; + NvP64 pBinaryData NV_ALIGN_BYTES(8); + + NvV32 Data; + NvV32 Entry; + NvV32 status; +} NVOS38_PARAMETERS; + +#define NV04_ALLOC_CONTEXT_DMA (0x00000027) + +/* parameter values are the same as NVOS03 -- not repeated here */ + +/* parameters */ +typedef struct +{ + NvHandle hObjectParent; + NvHandle hSubDevice; + NvHandle hObjectNew; + NvV32 hClass; + NvV32 flags; + NvU32 selector; + NvHandle hMemory; + NvU64 offset NV_ALIGN_BYTES(8); + NvU64 limit NV_ALIGN_BYTES(8); + NvV32 status; +} NVOS39_PARAMETERS; + + +#define NV04_GET_EVENT_DATA (0x00000028) + +typedef struct +{ + NvHandle hObject; + NvV32 NotifyIndex; + + // + // Holds same information as that of nvgputypes.h::NvNotification's + // info32 and info16. + // + NvV32 info32; + NvU16 info16; +} NvUnixEvent; + +/* parameters */ +typedef struct +{ + NvP64 pEvent NV_ALIGN_BYTES(8); + NvV32 MoreEvents; + NvV32 status; +} NVOS41_PARAMETERS; + +/* function NVOS43 -- deleted 4/09 */ +/* #define NV04_UNIFIED_FREE (0x0000002B) */ + + +#define NVSIM01_BUS_XACT (0x0000002C) + +/* parameters */ +typedef struct +{ + NvHandle hClient; // n/a currently + NvHandle hDevice; // n/a currently + NvU32 offset; // phy bus offset + NvU32 bar; // ~0 := phy addr, {0..2} specify gpu bar + NvU32 bytes; // # of bytes + NvU32 write; // 0 := read request + NvU32 data; // in/out based upon 'write' + NvU32 status; +} NVOS2C_PARAMETERS; + +/* function NVOS2D -- deleted 4/09 */ +/* #define NVSIM01_BUS_GET_IFACES (0x0000002D) */ + + +/* function OS46 */ +#define NV04_MAP_MEMORY_DMA (0x0000002E) + +/* parameter values */ +#define NVOS46_FLAGS_ACCESS 1:0 +#define NVOS46_FLAGS_ACCESS_READ_WRITE (0x00000000) +#define NVOS46_FLAGS_ACCESS_READ_ONLY (0x00000001) +#define NVOS46_FLAGS_ACCESS_WRITE_ONLY (0x00000002) + +// +// Compute shaders support both 32b and 64b pointers. This allows mappings +// to be restricted to the bottom 4GB of the address space. How _DISABLE +// is handled is chip specific and may force a pointer above 4GB. +// +#define NVOS46_FLAGS_32BIT_POINTER 2:2 +#define NVOS46_FLAGS_32BIT_POINTER_DISABLE (0x00000000) +#define NVOS46_FLAGS_32BIT_POINTER_ENABLE (0x00000001) + +#define NVOS46_FLAGS_PAGE_KIND 3:3 +#define NVOS46_FLAGS_PAGE_KIND_PHYSICAL (0x00000000) +#define NVOS46_FLAGS_PAGE_KIND_VIRTUAL (0x00000001) + +#define NVOS46_FLAGS_CACHE_SNOOP 4:4 +#define NVOS46_FLAGS_CACHE_SNOOP_DISABLE (0x00000000) +#define NVOS46_FLAGS_CACHE_SNOOP_ENABLE (0x00000001) + +// The client requests a CPU kernel mapping so that SW class could use it +// DEFAULT: Don't map CPU address +// ENABLE: Map CPU address +#define NVOS46_FLAGS_KERNEL_MAPPING 5:5 +#define NVOS46_FLAGS_KERNEL_MAPPING_NONE (0x00000000) +#define NVOS46_FLAGS_KERNEL_MAPPING_ENABLE (0x00000001) + +// +// Compute shader access control. +// GPUs that support this feature set the NV0080_CTRL_DMA_CAPS_SHADER_ACCESS_SUPPORTED +// property. These were first supported in Kepler. _DEFAULT will match the ACCESS field. +// +#define NVOS46_FLAGS_SHADER_ACCESS 7:6 +#define NVOS46_FLAGS_SHADER_ACCESS_DEFAULT (0x00000000) +#define NVOS46_FLAGS_SHADER_ACCESS_READ_ONLY (0x00000001) +#define NVOS46_FLAGS_SHADER_ACCESS_WRITE_ONLY (0x00000002) +#define NVOS46_FLAGS_SHADER_ACCESS_READ_WRITE (0x00000003) + +// +// How the PAGE_SIZE field is interpreted is architecture specific. +// +// On Curie chips it is ignored. +// +// On Tesla it is used to guide is used to select which type PDE +// to use. By default the RM will select 4KB for system memory +// and BIG (64KB) for video memory. BOTH is not supported. +// +// Likewise on Fermi this used to select the PDE type. Fermi cannot +// mix page sizes to a single mapping so the page size is determined +// at surface alloation time. 4KB or BIG may be specified but they +// must match the page size selected at allocation time. DEFAULT +// allows the RM to select either a single page size or both PDE, +// while BOTH forces the RM to select a dual page size PDE. +// +// BIG_PAGE = 64 KB on PASCAL +// = 64 KB or 128 KB on pre_PASCAL chips +// +// HUGE_PAGE = 2 MB on PASCAL +// = not supported on pre_PASCAL chips. +// +#define NVOS46_FLAGS_PAGE_SIZE 11:8 +#define NVOS46_FLAGS_PAGE_SIZE_DEFAULT (0x00000000) +#define NVOS46_FLAGS_PAGE_SIZE_4KB (0x00000001) +#define NVOS46_FLAGS_PAGE_SIZE_BIG (0x00000002) +#define NVOS46_FLAGS_PAGE_SIZE_BOTH (0x00000003) +#define NVOS46_FLAGS_PAGE_SIZE_HUGE (0x00000004) +#define NVOS46_FLAGS_PAGE_SIZE_512M (0x00000005) + +// Some systems allow the device to use the system L3 cache when accessing the +// system memory. For example, the iGPU on T19X can allocate from the system L3 +// provided the SoC L3 cache is configured for device allocation. +// +// NVOS46_FLAGS_SYSTEM_L3_ALLOC_DEFAULT - Use the default L3 allocation +// policy. When using this policy, device memory access will be coherent with +// non-snooping devices such as the display on Tegra. +// +// NVOS46_FLAGS_SYSTEM_L3_ALLOC_ENABLE_HINT - Enable L3 allocation if possible. +// When L3 allocation is enabled, device memory access may be cached, and the +// memory access will be coherent only with other snoop-enabled access. This +// flag is a hint and will be ignored if the system does not support L3 +// allocation for the device. NVOS46_FLAGS_CACHE_SNOOP_ENABLE must also be set +// for this flag to be effective. +// +// Note: This flag is implemented only by rmapi_tegra. It is not implemented by +// Resman. +// +#define NVOS46_FLAGS_SYSTEM_L3_ALLOC 13:13 +#define NVOS46_FLAGS_SYSTEM_L3_ALLOC_DEFAULT (0x00000000) +#define NVOS46_FLAGS_SYSTEM_L3_ALLOC_ENABLE_HINT (0x00000001) + +#define NVOS46_FLAGS_DMA_OFFSET_GROWS 14:14 +#define NVOS46_FLAGS_DMA_OFFSET_GROWS_UP (0x00000000) +#define NVOS46_FLAGS_DMA_OFFSET_GROWS_DOWN (0x00000001) + +// +// DMA_OFFSET_FIXED is overloaded for two purposes. +// +// 1. For CTXDMA mappings that use DMA_UNICAST_REUSE_ALLOC_FALSE, +// DMA_OFFSET_FIXED_TRUE indicates to use the dmaOffset parameter +// for a fixed address allocation out of the VA space heap. +// DMA_OFFSET_FIXED_FALSE indicates dmaOffset input will be ignored. +// +// 2. For CTXDMA mappings that use DMA_UNICAST_REUSE_ALLOC_TRUE and +// for *ALL* non-CTXDMA mappings, DMA_OFFSET_FIXED_TRUE indicates +// to treat the input dmaOffset as an absolute virtual address +// instead of an offset relative to the virtual allocation being +// mapped into. Whether relative or absolute, the resulting +// virtual address *must* be contained within the specified +// virtual allocation. +// +// Internally, it is also required that the virtual address be aligned +// to the page size of the mapping (obviously cannot map sub-pages). +// For client flexibility the physical offset does not require page alignment. +// This is handled by adding the physical misalignment +// (internally called pteAdjust) to the returned virtual address. +// The *input* dmaOffset can account for this pteAdjust (or not), +// but the returned virtual address always will. +// +#define NVOS46_FLAGS_DMA_OFFSET_FIXED 15:15 +#define NVOS46_FLAGS_DMA_OFFSET_FIXED_FALSE (0x00000000) +#define NVOS46_FLAGS_DMA_OFFSET_FIXED_TRUE (0x00000001) + +#define NVOS46_FLAGS_DISABLE_ENCRYPTION 16:16 +#define NVOS46_FLAGS_DISABLE_ENCRYPTION_FALSE (0x00000000) +#define NVOS46_FLAGS_DISABLE_ENCRYPTION_TRUE (0x00000001) + +// +// DEFAULT - Use the cache policy set at allocation +// YES - Enable gpu caching +// NO - Disable gpu caching +// +#define NVOS46_FLAGS_GPU_CACHEABLE 18:17 +#define NVOS46_FLAGS_GPU_CACHEABLE_DEFAULT (0x00000000) +#define NVOS46_FLAGS_GPU_CACHEABLE_YES (0x00000001) +#define NVOS46_FLAGS_GPU_CACHEABLE_NO (0x00000002) +#define NVOS46_FLAGS_GPU_CACHEABLE_INVALID (0x00000003) + +#define NVOS46_FLAGS_PAGE_KIND_OVERRIDE 19:19 +#define NVOS46_FLAGS_PAGE_KIND_OVERRIDE_NO (0x00000000) +#define NVOS46_FLAGS_PAGE_KIND_OVERRIDE_YES (0x00000001) + +#define NVOS46_FLAGS_P2P 27:20 + +#define NVOS46_FLAGS_P2P_ENABLE 21:20 +#define NVOS46_FLAGS_P2P_ENABLE_NO (0x00000000) +#define NVOS46_FLAGS_P2P_ENABLE_YES (0x00000001) +#define NVOS46_FLAGS_P2P_ENABLE_NONE NVOS46_FLAGS_P2P_ENABLE_NO +#define NVOS46_FLAGS_P2P_ENABLE_SLI NVOS46_FLAGS_P2P_ENABLE_YES +#define NVOS46_FLAGS_P2P_ENABLE_NOSLI (0x00000002) +// Subdevice ID. Reserved 3 bits for the possibility of 8-way SLI +#define NVOS46_FLAGS_P2P_SUBDEVICE_ID 24:22 +#define NVOS46_FLAGS_P2P_SUBDEV_ID_SRC NVOS46_FLAGS_P2P_SUBDEVICE_ID +#define NVOS46_FLAGS_P2P_SUBDEV_ID_TGT 27:25 +#define NVOS46_FLAGS_TLB_LOCK 28:28 +#define NVOS46_FLAGS_TLB_LOCK_DISABLE (0x00000000) +#define NVOS46_FLAGS_TLB_LOCK_ENABLE (0x00000001) +#define NVOS46_FLAGS_DMA_UNICAST_REUSE_ALLOC 29:29 +#define NVOS46_FLAGS_DMA_UNICAST_REUSE_ALLOC_FALSE (0x00000000) +#define NVOS46_FLAGS_DMA_UNICAST_REUSE_ALLOC_TRUE (0x00000001) +// +// Force pte kind to compresssed for this and future mappings of the memory object +// Only affects mappings using NVOS46_FLAGS_PAGE_KIND_VIRTUAL +// Only has effect when physical allocation is compressed +// +#define NVOS46_FLAGS_ENABLE_FORCE_COMPRESSED_MAP 30:30 +#define NVOS46_FLAGS_ENABLE_FORCE_COMPRESSED_MAP_FALSE (0x00000000) +#define NVOS46_FLAGS_ENABLE_FORCE_COMPRESSED_MAP_TRUE (0x00000001) + +// +// This flag must be used with caution. Improper use can leave stale entries in the TLB, +// and allow access to memory no longer owned by the RM client or cause page faults. +// Also see corresponding flag for NvUnmapMemoryDma. +// +#define NVOS46_FLAGS_DEFER_TLB_INVALIDATION 31:31 +#define NVOS46_FLAGS_DEFER_TLB_INVALIDATION_FALSE (0x00000000) +#define NVOS46_FLAGS_DEFER_TLB_INVALIDATION_TRUE (0x00000001) + +// +// This flag is used on fully coherent platforms to specify whether the GPU cache +// should be snooped by the CPU or other IO devices for this mapping. +// +// Only takes effect if the physical memory is allocated with +// _ATTR_GPU_CACHE_SNOOPABLE_MAPPING indicating the decision +// should be deferred to map time. +// +#define NVOS46_FLAGS2_GPU_CACHE_SNOOP 1:0 +#define NVOS46_FLAGS2_GPU_CACHE_SNOOP_DEFAULT (0x00000000) +#define NVOS46_FLAGS2_GPU_CACHE_SNOOP_ENABLE (0x00000001) +#define NVOS46_FLAGS2_GPU_CACHE_SNOOP_DISABLE (0x00000002) + +/* parameters */ +typedef struct +{ + NvHandle hClient; // [IN] client handle + NvHandle hDevice; // [IN] device handle for mapping + NvHandle hDma; // [IN] dma handle for mapping + NvHandle hMemory; // [IN] memory handle for mapping + NvU64 offset NV_ALIGN_BYTES(8); // [IN] offset of region + NvU64 length NV_ALIGN_BYTES(8); // [IN] limit of region + NvV32 flags; // [IN] flags + NvV32 flags2; // [IN] flags2 + NvV32 kindOverride; // [IN] page kind - applied if NVOS46_FLAGS_PAGE_KIND_OVERRIDE is YES + NvU64 dmaOffset NV_ALIGN_BYTES(8); // [OUT] offset of mapping + // [IN] if FLAGS_DMA_OFFSET_FIXED_TRUE + // *OR* hDma is NOT a CTXDMA handle + // (see NVOS46_FLAGS_DMA_OFFSET_FIXED) + NvV32 status; // [OUT] status +} NVOS46_PARAMETERS; + +typedef NVOS46_PARAMETERS NV_MAP_MEMORY_DMA_PARAMETERS; + +/* function OS47 */ +#define NV04_UNMAP_MEMORY_DMA (0x0000002F) + +#define NVOS47_FLAGS_DEFER_TLB_INVALIDATION 0:0 +#define NVOS47_FLAGS_DEFER_TLB_INVALIDATION_FALSE (0x00000000) +#define NVOS47_FLAGS_DEFER_TLB_INVALIDATION_TRUE (0x00000001) + +/* parameters */ +typedef struct +{ + NvHandle hClient; // [IN] client handle + NvHandle hDevice; // [IN] device handle for mapping + NvHandle hDma; // [IN] dma handle for mapping + NvHandle hMemory; // [IN] memory handle for mapping + NvV32 flags; // [IN] flags + NvU64 dmaOffset NV_ALIGN_BYTES(8); // [IN] dma offset from NV04_MAP_MEMORY_DMA + NvU64 size NV_ALIGN_BYTES(8); // [IN] size to unmap, 0 to unmap entire mapping + NvV32 status; // [OUT] status +} NVOS47_PARAMETERS; + +typedef NVOS47_PARAMETERS NV_UNMAP_MEMORY_DMA_PARAMETERS; + +#define NV04_BIND_CONTEXT_DMA (0x00000031) +/* parameters */ +typedef struct +{ + NvHandle hClient; // [IN] client handle + NvHandle hChannel; // [IN] channel handle for binding + NvHandle hCtxDma; // [IN] ctx dma handle for binding + NvV32 status; // [OUT] status +} NVOS49_PARAMETERS; + + +/* function OS54 */ +#define NV04_CONTROL (0x00000036) + +#define NVOS54_FLAGS_NONE (0x00000000) +#define NVOS54_FLAGS_IRQL_RAISED (0x00000001) +#define NVOS54_FLAGS_LOCK_BYPASS (0x00000002) +#define NVOS54_FLAGS_FINN_SERIALIZED (0x00000004) + +/* parameters */ +typedef struct +{ + NvHandle hClient; + NvHandle hObject; + NvV32 cmd; + NvU32 flags; + NvP64 params NV_ALIGN_BYTES(8); + NvU32 paramsSize; + NvV32 status; +} NVOS54_PARAMETERS; + +/* RM Control header + * + * Replacement for NVOS54_PARAMETERS where embedded pointers are not allowed. + * Input layout for user space RM Control calls should be: + * + * +--- NVOS63_PARAMETERS ---+--- RM Control parameters ---+ + * + * NVOS63_PARAMETERS::paramsSize is the size of RM Control parameters + * + */ +typedef struct +{ + NvHandle hClient; // [IN] client handle + NvHandle hObject; // [IN] object handle + NvV32 cmd; // [IN] control command ID + NvU32 paramsSize; // [IN] size in bytes of the RM Control parameters + NvV32 status; // [OUT] status +} NVOS63_PARAMETERS; + + +/* function OS55 */ +#define NV04_DUP_OBJECT (0x00000037) + +/* parameters */ +typedef struct +{ + NvHandle hClient; // [IN] destination client handle + NvHandle hParent; // [IN] parent of new object + NvHandle hObject; // [INOUT] destination (new) object handle + NvHandle hClientSrc; // [IN] source client handle + NvHandle hObjectSrc; // [IN] source (old) object handle + NvU32 flags; // [IN] flags + NvU32 status; // [OUT] status +} NVOS55_PARAMETERS; + +#define NV04_DUP_HANDLE_FLAGS_NONE (0x00000000) +#define NV04_DUP_HANDLE_FLAGS_REJECT_KERNEL_DUP_PRIVILEGE (0x00000001) // If set, prevents an RM kernel client from duping unconditionally + // NOTE: Do not declare a NV04_DUP_HANDLE_FLAGS_* value of 0x00000008 + // until Bug 2859347 is resolved! This is due to conflicting usage + // of RS_RES_DUP_PARAMS_INTERNAL.flags to pass + // NVOS32_ALLOC_INTERNAL_FLAGS_FLA_MEMORY to an object constructor. + +/* function OS56 */ +#define NV04_UPDATE_DEVICE_MAPPING_INFO (0x00000038) + +/* parameters */ +typedef struct +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvP64 pOldCpuAddress NV_ALIGN_BYTES(8); + NvP64 pNewCpuAddress NV_ALIGN_BYTES(8); + NvV32 status; +} NVOS56_PARAMETERS; + +/* function OS57 */ +#define NV04_SHARE (0x0000003E) + +/* parameters */ +typedef struct +{ + NvHandle hClient; // [IN] owner client handle + NvHandle hObject; // [IN] resource to share + RS_SHARE_POLICY sharePolicy; // [IN] share policy entry + NvU32 status; // [OUT] status +} NVOS57_PARAMETERS; + +/* parameters */ +typedef struct +{ + NvU32 deviceReference; + NvU32 head; + NvU32 state; + NvU8 forceMonitorState; + NvU8 bForcePerfBiosLevel; + NvU8 bIsD3HotTransition; // [OUT] To tell client if it's a D3Hot transition + NvU8 bForcePowerStateFail; + NvU32 errorStatus; // [OUT] To tell client if there is bubble up errors + NvU32 fastBootPowerState; + NvU8 reserved; + NvU8 reserved2; +} NVPOWERSTATE_PARAMETERS, *PNVPOWERSTATE_PARAMETERS; + + /***************************************************************************\ +|* Object Allocation Parameters *| + \***************************************************************************/ + +// GR engine creation parameters +typedef struct { + NvU32 version; // set to 0x2 + NvU32 flags; // input param from a rm client (no flags are currently defined) + NvU32 size; // sizeof(NV_GR_ALLOCATION_PARAMETERS) + NvU32 caps; // output param for a rm client - class dependent +} NV_GR_ALLOCATION_PARAMETERS; + +// +// NvAlloc parameters for NV03_DEVICE_XX class +// hClientShare +// For NV50+ this can be set to virtual address space for this +// device. On previous chips this field is ignored. There are +// three possible settings +// NV01_NULL_OBJECT - Use the default global VA space +// Handle to current client - Create a new private address space +// Handle to another client - Attach to other clients address space +// flags +// MAP_PTE_GLOBALLY Deprecated. +// MINIMIZE_PTETABLE_SIZE Pass hint to DMA HAL to use partial page +// tables. Depending on allocation pattern +// this may actually use more instance memory. +// RETRY_PTE_ALLOC_IN_SYS Fallback to PTEs allocation in sysmem. This +// is now enabled by default. +// VASPACE_SIZE Honor vaSpaceSize field. +// +// MAP_PTE Deprecated. +// +// VASPACE_IS_MIRRORED This flag will tell RM to create a mirrored +// kernel PDB for the address space associated +// with this device. When this flag is set +// the address space covered by the top PDE +// is restricted and cannot be allocated out of. +// +// +// VASPACE_BIG_PAGE_SIZE_64k ***Warning this flag will be deprecated do not use***** +// VASPACE_BIG_PAGE_SIZE_128k This flag will choose the big page size of the VASPace +// to 64K/128k if the system supports a configurable size. +// If the system does not support a configurable size then +// defaults will be chosen. +// If the user sets both these bits then this API will fail. +// +// SHARED_MANAGEMENT +// *** Warning: This will be deprecated - see NV_VASPACE_ALLOCATION_PARAMETERS. *** +// +// +// hTargetClient/hTargetDevice +// Deprecated. Can be deleted once client code has removed references. +// +// vaBase +// *** Warning: This will be deprecated - see NV_VASPACE_ALLOCATION_PARAMETERS. *** +// +// vaSpaceSize +// Set the size of the VA space used for this client if allocating +// a new private address space. Is expressed as a size such as +// (1<<32) for a 32b address space. Reducing the size of the address +// space allows the dma chip specific code to reduce the instance memory +// used for page tables. +// +// vaMode +// The vaspace allocation mode. There are three modes supported: +// 1. SINGLE_VASPACE +// An old abstraction that provides a single VA space under a +// device and it's allocated implicityly when an object requires a VA +// space. Typically, this VA space is also shared across clients. +// +// 2. OPTIONAL_MULTIPLE_VASPACES +// Global + multiple private va spaces. In this mode, the old abstraction, +// a single vaspace under a device that is allocated implicitly is still +// being supported. A private VA space is an entity under a device, which/ +// cannot be shared with other clients, but multiple channels under the +// same device can still share a private VA space. +// Private VA spaces (class:90f1,FERMI_VASPACE_A) can be allocated as +// objects through RM APIs. This mode requires the users to know what they +// are doing in terms of using VA spaces. Page fault can easily occur if +// one is not careful with a mixed of an implicit VA space and multiple +// VA spaces. +// +// 3. MULTIPLE_VASPACES +// In this mode, all VA spaces have to be allocated explicitly through RM +// APIs and users have to specify which VA space to use for each object. +// This case prevents users to use context dma, which is not supported and +// can be misleading if used. Therefore, it's more a safeguard mode to +// prevent people making mistakes that are hard to debug. +// +// DEFAULT MODE: 2. OPTIONAL_MULTIPLE_VASPACES +// +// See NV0080_ALLOC_PARAMETERS for allocation parameter structure. +// + +#define NV_DEVICE_ALLOCATION_SZNAME_MAXLEN 128 +#define NV_DEVICE_ALLOCATION_FLAGS_NONE (0x00000000) +#define NV_DEVICE_ALLOCATION_FLAGS_MAP_PTE_GLOBALLY (0x00000001) +#define NV_DEVICE_ALLOCATION_FLAGS_MINIMIZE_PTETABLE_SIZE (0x00000002) +#define NV_DEVICE_ALLOCATION_FLAGS_RETRY_PTE_ALLOC_IN_SYS (0x00000004) +#define NV_DEVICE_ALLOCATION_FLAGS_VASPACE_SIZE (0x00000008) +#define NV_DEVICE_ALLOCATION_FLAGS_MAP_PTE (0x00000010) +#define NV_DEVICE_ALLOCATION_FLAGS_VASPACE_IS_TARGET (0x00000020) +#define NV_DEVICE_ALLOCATION_FLAGS_VASPACE_SHARED_MANAGEMENT (0x00000100) +#define NV_DEVICE_ALLOCATION_FLAGS_VASPACE_BIG_PAGE_SIZE_64k (0x00000200) +#define NV_DEVICE_ALLOCATION_FLAGS_VASPACE_BIG_PAGE_SIZE_128k (0x00000400) +#define NV_DEVICE_ALLOCATION_FLAGS_RESTRICT_RESERVED_VALIMITS (0x00000800) + +/* + *TODO: Delete this flag once CUDA moves to the ctrl call + */ +#define NV_DEVICE_ALLOCATION_FLAGS_VASPACE_IS_MIRRORED (0x00000040) + +// XXX NV_DEVICE_ALLOCATION_FLAGS_VASPACE_PTABLE_PMA_MANAGED should not +// should not be exposed to clients. It should be the default RM +// behavior. +// +// Until it is made the default, certain clients such as OpenGL +// might still need PTABLE allocations to go through PMA, so this +// flag has been temporary exposed. +// +// See bug 1880192 +#define NV_DEVICE_ALLOCATION_FLAGS_VASPACE_PTABLE_PMA_MANAGED (0x00001000) + +// +// Indicates this device is being created by guest and requires a +// KernelHostVgpuDeviceApi creation in client. +// +#define NV_DEVICE_ALLOCATION_FLAGS_HOST_VGPU_DEVICE (0x00002000) + +// +// Indicates this device is being created for VGPU plugin use. +// Requires a HostVgpuDevice handle to indicate the guest on which +// this plugin operates. +// +#define NV_DEVICE_ALLOCATION_FLAGS_PLUGIN_CONTEXT (0x00004000) + +// +// For clients using unlinked SLI to catch allocations attempts on secondary GPUs +// not accompanied by a fixed offset. +// +#define NV_DEVICE_ALLOCATION_FLAGS_VASPACE_REQUIRE_FIXED_OFFSET (0x00008000) + +#define NV_DEVICE_ALLOCATION_VAMODE_OPTIONAL_MULTIPLE_VASPACES (0x00000000) +#define NV_DEVICE_ALLOCATION_VAMODE_SINGLE_VASPACE (0x00000001) +#define NV_DEVICE_ALLOCATION_VAMODE_MULTIPLE_VASPACES (0x00000002) + + +#define NV_CHANNELGPFIFO_NOTIFICATION_TYPE_ERROR 0x00000000 +#define NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN 0x00000001 +#define NV_CHANNELGPFIFO_NOTIFICATION_TYPE_KEY_ROTATION_STATUS 0x00000002 +#define NV_CHANNELGPFIFO_NOTIFICATION_TYPE__SIZE_1 3 +#define NV_CHANNELGPFIFO_NOTIFICATION_STATUS_VALUE 14:0 +#define NV_CHANNELGPFIFO_NOTIFICATION_STATUS_IN_PROGRESS 15:15 +#define NV_CHANNELGPFIFO_NOTIFICATION_STATUS_IN_PROGRESS_TRUE 0x1 +#define NV_CHANNELGPFIFO_NOTIFICATION_STATUS_IN_PROGRESS_FALSE 0x0 + +typedef enum +{ + PB_SIZE_4KB = 0, + PB_SIZE_8KB, + PB_SIZE_16KB, + PB_SIZE_32KB, + PB_SIZE_64KB +} ChannelPBSize; + +typedef struct +{ + NvV32 channelInstance; // One of the n channel instances of a given channel type. + // Note that core channel has only one instance + // while all others have two (one per head). + NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer + NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications + NvU32 offset; // Initial offset for put/get, usually zero. + NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs + + NvU32 flags; + ChannelPBSize channelPBSize; // Size of Push Buffer requested by client (allowed values in enum) +#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1 +#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000 +#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001 + + NvU32 subDeviceId; // One-hot encoded subDeviceId (i.e. SDM) that will be used to address the channel in the pushbuffer stream (via SSDM method) +} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvV32 channelInstance; // One of the n channel instances of a given channel type. + // All PIO channels have two instances (one per head). + NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors. + NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of control region for PIO channel +} NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS; + +// Used for allocating a channel group +typedef struct +{ + NvHandle hObjectError; // Error notifier for TSG + NvHandle hObjectEccError; // ECC Error notifier for TSG + NvHandle hVASpace; // VA space handle for TSG + NvU32 engineType; // Engine to which all channels in this TSG are associated with + NvBool bIsCallingContextVgpuPlugin; +} NV_CHANNEL_GROUP_ALLOCATION_PARAMETERS; + +/* +* @params: +* @engineId : Engine to which the software runlist be associated with. +* @maxTSGs : Maximum number of TSG entries that will be submitted in this software runlist +* The size of the runlist buffer will be determined by +* 2 * // double buffer +* maxTSGs * // determined by KMD +* maxChannelPerTSG * // Determined by RM +* sizeof(RunlistEntry) // Determined by HW format +* @qosIntrEnableMask: QOS Interrupt bitmask that needs to be enabled for the SW runlist defined below. +*/ +typedef struct +{ + NvU32 engineId; //(IN) + NvU32 maxTSGs; //(IN) // Size of the RM could return error if the request cannot be accommodated. + NvU32 qosIntrEnableMask; //(IN) // Bitmask for QOS interrupts that needs to be enabled +} NV_SWRUNLIST_ALLOCATION_PARAMS; + +#define NV_SWRUNLIST_QOS_INTR_NONE 0x00000000 +#define NV_SWRUNLIST_QOS_INTR_RUNLIST_AND_ENG_IDLE_ENABLE NVBIT32(0) +#define NV_SWRUNLIST_QOS_INTR_RUNLIST_IDLE_ENABLE NVBIT32(1) +#define NV_SWRUNLIST_QOS_INTR_RUNLIST_ACQUIRE_ENABLE NVBIT32(2) +#define NV_SWRUNLIST_QOS_INTR_RUNLIST_ACQUIRE_AND_ENG_IDLE_ENABLE NVBIT32(3) + +typedef struct +{ + NvU32 size; + NvU32 caps; +} NV_ME_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; + NvU32 engineInstance; // Select NVDEC0 or NVDEC1 or NVDEC2 +} NV_BSP_ALLOCATION_PARAMETERS; + +// +// These are referenced by mdiag mods tests, but do not appear to be used during +// in the RM any longer +// +#define NV_VP_ALLOCATION_FLAGS_STANDARD_UCODE (0x00000000) +#define NV_VP_ALLOCATION_FLAGS_STATIC_UCODE (0x00000001) +#define NV_VP_ALLOCATION_FLAGS_DYNAMIC_UCODE (0x00000002) + +// +// NV_VP_ALLOCATION_PARAMETERS.flags +// +// NV_VP_ALLOCATION_FLAGS_AVP_CLIENT are used by Tegra to specify if +// the current allocation with be used by Video or Audio +// +#define NV_VP_ALLOCATION_FLAGS_AVP_CLIENT_VIDEO (0x00000000) +#define NV_VP_ALLOCATION_FLAGS_AVP_CLIENT_AUDIO (0x00000001) + +typedef struct +{ + NvU32 size; + NvU32 caps; + NvU32 flags; + NvU32 altUcode; + NvP64 rawUcode NV_ALIGN_BYTES(8); + NvU32 rawUcodeSize; + NvU32 numSubClasses; + NvU32 numSubSets; + NvP64 subClasses NV_ALIGN_BYTES(8); + NvU32 prohibitMultipleInstances; + NvP64 pControl NV_ALIGN_BYTES(8); // Used by Tegra to return a mapping to NvE276Control + NvHandle hMemoryCmdBuffer NV_ALIGN_BYTES(8); // Used by Tegra to specify cmd buffer + NvU64 offset NV_ALIGN_BYTES(8); // Used by Tegra to specify an offset into the cmd buffer + +} NV_VP_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; +} NV_PPP_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of MSENC? + NvU32 engineInstance; // Select MSENC/NVENC0 or NVENC1 or NVENC2 +} NV_MSENC_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of SEC2? +} NV_SEC2_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of NVJPG? + NvU32 engineInstance; +} NV_NVJPG_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of OFA? + NvU32 engineInstance; +} NV_OFA_ALLOCATION_PARAMETERS; + +#define NV04_ADD_VBLANK_CALLBACK (0x0000003D) + +#include "class/cl9010.h" // for OSVBLANKCALLBACKPROC + +/* parameters */ +/* NOTE: the "void* pParm's" below are ok (but unfortunate) since this interface + can only be used by other kernel drivers which must share the same ptr-size */ +typedef struct +{ + NvHandle hClient; // [IN] client handle + NvHandle hDevice; // [IN] device handle for mapping + NvHandle hVblank; // [IN] Vblank handle for control + OSVBLANKCALLBACKPROC pProc; // Routine to call at vblank time + + NvV32 LogicalHead; // Logical Head + void *pParm1; + void *pParm2; + NvU32 bAdd; // Add or Delete + NvV32 status; // [OUT] status +} NVOS61_PARAMETERS; + +/** + * @brief NvAlloc parameters for VASPACE classes + * + * Used to create a new private virtual address space. + * + * index + * Tegra: With TEGRA_VASPACE_A, index specifies the IOMMU + * virtual address space to be created. Based on the + * index, RM/NVMEM will decide the HW ASID to be used with + * this VA Space. "index" takes values from the + * NVMEM_CLIENT_* defines in + * "drivers/common/inc/tegra/memory/ioctl.h". + * + * Big GPU: With FERMI_VASPACE_A, see NV_VASPACE_ALLOCATION_INDEX_GPU_*. + * + * flags + * MINIMIZE_PTETABLE_SIZE Pass hint to DMA HAL to use partial page tables. + * Depending on allocation pattern this may actually + * use more instance memory. + * + * RETRY_PTE_ALLOC_IN_SYS Fallback to PTEs allocation in sysmem. This is now + * enabled by default. + * + * SHARED_MANAGEMENT + * Indicates management of the VA space is shared with another + * component (e.g. driver layer, OS, etc.). + * + * The initial VA range from vaBase (inclusive) through vaSize (exclusive) + * is managed by RM. The range must be aligned to a top-level PDE's VA + * coverage since backing page table levels for this range are managed by RM. + * All normal RM virtual memory management APIs work within this range. + * + * An external component can manage the remaining VA ranges, + * from 0 (inclusive) to vaBase (exclusive) and from vaSize (inclusive) up to the + * maximum VA limit supported by HW. + * Management of these ranges includes VA sub-allocation and the + * backing lower page table levels. + * + * The top-level page directory is special since it is a shared resource. + * Management of the page directory is as follows: + * 1. Initially RM allocates a page directory for RM-managed PDEs. + * 2. The external component may create a full page directory and commit it + * with NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY. + * This will copy the RM-managed PDEs from the RM-managed page directory + * into the external page directory and commit channels to the external page directory. + * After this point RM will update the external page directory directly for + * operations that modify RM-managed PDEs. + * 3. The external component may use NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY repeatedly + * if it needs to update the page directory again (e.g. to resize or migrate). + * This will copy the RM-managed PDEs from the old external page directory + * into the new external page directory and commit channels to the new page directory. + * 4. The external component may restore management of the page directory back to + * RM with NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY. + * This will copy the RM-managed PDEs from the external page directory + * into the RM-managed page directory and commit channels to the RM-managed page directory. + * After this point RM will update the RM-managed page directory for + * operations that modify RM-managed PDEs. + * Note that operations (2) and (4) are symmetric - the RM perspective of management is identical + * before and after a sequence of SET => ... => UNSET. + * + * IS_MIRRORED + * This flag will tell RM to create a mirrored + * kernel PDB for the address space associated + * with this device. When this flag is set + * the address space covered by the top PDE + * is restricted and cannot be allocated out of. + * ENABLE_PAGE_FAULTING + * Enable page faulting if the architecture supports it. + * As of now page faulting is only supported for compute on pascal+. + * IS_EXTERNALLY_OWNED + * This vaspace that has been allocated will be managed by + * an external driver. RM will not own the pagetables for this vaspace. + * + * ENABLE_NVLINK_ATS + * Enables VA translation for this address space using NVLINK ATS. + * Note, the GMMU page tables still exist and take priority over NVLINK ATS. + * VA space object creation will fail if: + * - hardware support is not available (NV_ERR_NOT_SUPPORTED) + * - incompatible options IS_MIRRORED or IS_EXTERNALLY_OWNED are set (NV_ERR_INVALID_ARGUMENT) + * IS_FLA + * Sets FLA flag for this VASPACE + * + * ALLOW_ZERO_ADDRESS + * Allows VASPACE Range to start from zero + * SKIP_SCRUB_MEMPOOL + * Skip scrubbing in MemPool + * + * vaBase [in, out] + * On input, the lowest usable base address of the VA space. + * If 0, RM will pick a default value - 0 is always reserved to respresent NULL pointers. + * The value must be aligned to the largest page size of the VA space. + * Larger values aid in debug since offsets added to NULL pointers will still fault. + * + * On output, the actual usable base address is returned. + * + * vaSize [in,out] + * On input, requested size of the virtual address space in bytes. + * Requesting a smaller size reduces the memory required for the initial + * page directory, but the VAS may be resized later (NV0080_CTRL_DMA_SET_VA_SPACE_SIZE). + * If 0, the default VA space size will be used. + * + * On output, the actual size of the VAS in bytes. + * NOTE: This corresponds to the VA_LIMIT + 1, so the usable size is (vaSize - vaBase). + * + * bigPageSIze + * Set the size of the big page in this address space object. Current HW supports + * either 64k or 128k as the size of the big page. HW that support multiple big + * page size per address space will use this size. Hw that do not support this feature + * will override to the default big page size that is supported by the system. + * If the big page size value is set to ZERO then we will pick the default page size + * of the system. + * pasid + * Process Address Space Identifier. Used by RM internally when + * NV_VASPACE_ALLOCATION_FLAGS_ENABLE_NVLINK_ATS_TEST is set + **/ +typedef struct +{ + NvU32 index; + NvV32 flags; + NvU64 vaSize NV_ALIGN_BYTES(8); + NvU64 vaStartInternal NV_ALIGN_BYTES(8); + NvU64 vaLimitInternal NV_ALIGN_BYTES(8); + NvU32 bigPageSize; + NvU64 vaBase NV_ALIGN_BYTES(8); + NvU32 pasid; +} NV_VASPACE_ALLOCATION_PARAMETERS; + +#define NV_VASPACE_ALLOCATION_FLAGS_NONE (0x00000000) +#define NV_VASPACE_ALLOCATION_FLAGS_MINIMIZE_PTETABLE_SIZE BIT(0) +#define NV_VASPACE_ALLOCATION_FLAGS_RETRY_PTE_ALLOC_IN_SYS BIT(1) +#define NV_VASPACE_ALLOCATION_FLAGS_SHARED_MANAGEMENT BIT(2) +#define NV_VASPACE_ALLOCATION_FLAGS_IS_EXTERNALLY_OWNED BIT(3) +#define NV_VASPACE_ALLOCATION_FLAGS_ENABLE_NVLINK_ATS BIT(4) +#define NV_VASPACE_ALLOCATION_FLAGS_IS_MIRRORED BIT(5) +#define NV_VASPACE_ALLOCATION_FLAGS_ENABLE_PAGE_FAULTING BIT(6) +#define NV_VASPACE_ALLOCATION_FLAGS_VA_INTERNAL_LIMIT BIT(7) +#define NV_VASPACE_ALLOCATION_FLAGS_ALLOW_ZERO_ADDRESS BIT(8) +#define NV_VASPACE_ALLOCATION_FLAGS_IS_FLA BIT(9) +#define NV_VASPACE_ALLOCATION_FLAGS_SKIP_SCRUB_MEMPOOL BIT(10) +#define NV_VASPACE_ALLOCATION_FLAGS_OPTIMIZE_PTETABLE_MEMPOOL_USAGE BIT(11) +#define NV_VASPACE_ALLOCATION_FLAGS_REQUIRE_FIXED_OFFSET BIT(12) +#define NV_VASPACE_ALLOCATION_FLAGS_PTETABLE_HEAP_MANAGED BIT(13) +// To be used only by SRT for testing ATS within RM +#define NV_VASPACE_ALLOCATION_FLAGS_ENABLE_NVLINK_ATS_TEST BIT(14) + +#define NV_VASPACE_ALLOCATION_INDEX_GPU_NEW 0x00 // NV_STATUS_LEVEL_WARN + * to determine success v. failure of a call. + */ +#define NV_STATUS_LEVEL_WARN 1 + +/*! + * @def NV_STATUS_LEVEL_ERR + * @see NV_STATUS_LEVEL + * @brief Unrecoverable error condition + */ +#define NV_STATUS_LEVEL_ERR 3 + +/*! + * @def NV_STATUS_LEVEL + * @see NV_STATUS_LEVEL_OK + * @see NV_STATUS_LEVEL_WARN + * @see NV_STATUS_LEVEL_ERR + * @brief Level of the status code + * + * @warning IMPORTANT: When comparing NV_STATUS_LEVEL(_S) against one of + * these constants, it is important to use '<=' or '>' (rather + * than '<' or '>='). + * + * For example. do: + * if (NV_STATUS_LEVEL(status) <= NV_STATUS_LEVEL_WARN) + * rather than: + * if (NV_STATUS_LEVEL(status) < NV_STATUS_LEVEL_ERR) + * + * By being consistent in this manner, it is easier to systematically + * add additional level constants. New levels are likely to lower + * (rather than raise) the severity of _ERR codes. For example, + * if we were to add NV_STATUS_LEVEL_RETRY to indicate hardware + * failures that may be recoverable (e.g. RM_ERR_TIMEOUT_RETRY + * or RM_ERR_BUSY_RETRY), it would be less severe than + * NV_STATUS_LEVEL_ERR the level to which these status codes now + * belong. Using '<=' and '>' ensures your code is not broken in + * cases like this. + */ +#define NV_STATUS_LEVEL(_S) \ + ((_S) == NV_OK? NV_STATUS_LEVEL_OK: \ + ((_S) != NV_ERR_GENERIC && (_S) & 0x00010000? NV_STATUS_LEVEL_WARN: \ + NV_STATUS_LEVEL_ERR)) + +/*! + * @def NV_STATUS_LEVEL + * @see NV_STATUS_LEVEL_OK + * @see NV_STATUS_LEVEL_WARN + * @see NV_STATUS_LEVEL_ERR + * @brief Character representing status code level + */ +#define NV_STATUS_LEVEL_CHAR(_S) \ + ((_S) == NV_OK? '0': \ + ((_S) != NV_ERR_GENERIC && (_S) & 0x00010000? 'W': \ + 'E')) + +// Function definitions +const char *nvstatusToString(NV_STATUS nvStatusIn); + +#ifdef __cplusplus +} +#endif + +#endif /* SDK_NVSTATUS_H */ diff --git a/src/common/sdk/nvidia/inc/nvstatuscodes.h b/src/common/sdk/nvidia/inc/nvstatuscodes.h new file mode 100644 index 0000000..98ebb7b --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvstatuscodes.h @@ -0,0 +1,180 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef SDK_NVSTATUSCODES_H +#define SDK_NVSTATUSCODES_H + +NV_STATUS_CODE(NV_OK, 0x00000000, "Success") +NV_STATUS_CODE(NV_ERR_GENERIC, 0x0000FFFF, "Failure: Generic Error") + +NV_STATUS_CODE(NV_ERR_BROKEN_FB, 0x00000001, "Frame-Buffer broken") +NV_STATUS_CODE(NV_ERR_BUFFER_TOO_SMALL, 0x00000002, "Buffer passed in is too small") +NV_STATUS_CODE(NV_ERR_BUSY_RETRY, 0x00000003, "System is busy, retry later") +NV_STATUS_CODE(NV_ERR_CALLBACK_NOT_SCHEDULED, 0x00000004, "The requested callback API not scheduled") +NV_STATUS_CODE(NV_ERR_CARD_NOT_PRESENT, 0x00000005, "Card not detected") +NV_STATUS_CODE(NV_ERR_CYCLE_DETECTED, 0x00000006, "Call cycle detected") +NV_STATUS_CODE(NV_ERR_DMA_IN_USE, 0x00000007, "Requested DMA is in use") +NV_STATUS_CODE(NV_ERR_DMA_MEM_NOT_LOCKED, 0x00000008, "Requested DMA memory is not locked") +NV_STATUS_CODE(NV_ERR_DMA_MEM_NOT_UNLOCKED, 0x00000009, "Requested DMA memory is not unlocked") +NV_STATUS_CODE(NV_ERR_DUAL_LINK_INUSE, 0x0000000A, "Dual-Link is in use") +NV_STATUS_CODE(NV_ERR_ECC_ERROR, 0x0000000B, "Generic ECC error") +NV_STATUS_CODE(NV_ERR_FIFO_BAD_ACCESS, 0x0000000C, "FIFO: Invalid access") +NV_STATUS_CODE(NV_ERR_FREQ_NOT_SUPPORTED, 0x0000000D, "Requested frequency is not supported") +NV_STATUS_CODE(NV_ERR_GPU_DMA_NOT_INITIALIZED, 0x0000000E, "Requested DMA not initialized") +NV_STATUS_CODE(NV_ERR_GPU_IS_LOST, 0x0000000F, "GPU lost from the bus") +NV_STATUS_CODE(NV_ERR_GPU_IN_FULLCHIP_RESET, 0x00000010, "GPU currently in full-chip reset") +NV_STATUS_CODE(NV_ERR_GPU_NOT_FULL_POWER, 0x00000011, "GPU not in full power") +NV_STATUS_CODE(NV_ERR_GPU_UUID_NOT_FOUND, 0x00000012, "GPU UUID not found") +NV_STATUS_CODE(NV_ERR_HOT_SWITCH, 0x00000013, "System in hot switch") +NV_STATUS_CODE(NV_ERR_I2C_ERROR, 0x00000014, "I2C Error") +NV_STATUS_CODE(NV_ERR_I2C_SPEED_TOO_HIGH, 0x00000015, "I2C Error: Speed too high") +NV_STATUS_CODE(NV_ERR_ILLEGAL_ACTION, 0x00000016, "Current action is not allowed") +NV_STATUS_CODE(NV_ERR_IN_USE, 0x00000017, "Generic busy error") +NV_STATUS_CODE(NV_ERR_INFLATE_COMPRESSED_DATA_FAILED, 0x00000018, "Failed to inflate compressed data") +NV_STATUS_CODE(NV_ERR_INSERT_DUPLICATE_NAME, 0x00000019, "Found a duplicate entry in the requested btree") +NV_STATUS_CODE(NV_ERR_INSUFFICIENT_RESOURCES, 0x0000001A, "Ran out of a critical resource, other than memory") +NV_STATUS_CODE(NV_ERR_INSUFFICIENT_PERMISSIONS, 0x0000001B, "The requester does not have sufficient permissions") +NV_STATUS_CODE(NV_ERR_INSUFFICIENT_POWER, 0x0000001C, "Generic Error: Low power") +NV_STATUS_CODE(NV_ERR_INVALID_ACCESS_TYPE, 0x0000001D, "This type of access is not allowed") +NV_STATUS_CODE(NV_ERR_INVALID_ADDRESS, 0x0000001E, "Address not valid") +NV_STATUS_CODE(NV_ERR_INVALID_ARGUMENT, 0x0000001F, "Invalid argument to call") +NV_STATUS_CODE(NV_ERR_INVALID_BASE, 0x00000020, "Invalid base") +NV_STATUS_CODE(NV_ERR_INVALID_CHANNEL, 0x00000021, "Given channel-id not valid") +NV_STATUS_CODE(NV_ERR_INVALID_CLASS, 0x00000022, "Given class-id not valid") +NV_STATUS_CODE(NV_ERR_INVALID_CLIENT, 0x00000023, "Given client not valid") +NV_STATUS_CODE(NV_ERR_INVALID_COMMAND, 0x00000024, "Command passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_DATA, 0x00000025, "Invalid data passed") +NV_STATUS_CODE(NV_ERR_INVALID_DEVICE, 0x00000026, "Current device is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_DMA_SPECIFIER, 0x00000027, "The requested DMA specifier is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_EVENT, 0x00000028, "Invalid event occurred") +NV_STATUS_CODE(NV_ERR_INVALID_FLAGS, 0x00000029, "Invalid flags passed") +NV_STATUS_CODE(NV_ERR_INVALID_FUNCTION, 0x0000002A, "Called function is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_HEAP, 0x0000002B, "Heap corrupted") +NV_STATUS_CODE(NV_ERR_INVALID_INDEX, 0x0000002C, "Index invalid") +NV_STATUS_CODE(NV_ERR_INVALID_IRQ_LEVEL, 0x0000002D, "Requested IRQ level is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_LIMIT, 0x0000002E, "Generic Error: Invalid limit") +NV_STATUS_CODE(NV_ERR_INVALID_LOCK_STATE, 0x0000002F, "Requested lock state not valid") +NV_STATUS_CODE(NV_ERR_INVALID_METHOD, 0x00000030, "Requested method not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT, 0x00000031, "Object not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_BUFFER, 0x00000032, "Object buffer passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_HANDLE, 0x00000033, "Object handle is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_NEW, 0x00000034, "New object is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_OLD, 0x00000035, "Old object is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_PARENT, 0x00000036, "Object parent is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OFFSET, 0x00000037, "The offset passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OPERATION, 0x00000038, "Requested operation is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OWNER, 0x00000039, "Owner not valid") +NV_STATUS_CODE(NV_ERR_INVALID_PARAM_STRUCT, 0x0000003A, "Invalid structure parameter") +NV_STATUS_CODE(NV_ERR_INVALID_PARAMETER, 0x0000003B, "At least one of the parameters passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_PATH, 0x0000003C, "The requested path is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_POINTER, 0x0000003D, "Pointer not valid") +NV_STATUS_CODE(NV_ERR_INVALID_REGISTRY_KEY, 0x0000003E, "Found an invalid registry key") +NV_STATUS_CODE(NV_ERR_INVALID_REQUEST, 0x0000003F, "Generic Error: Invalid request") +NV_STATUS_CODE(NV_ERR_INVALID_STATE, 0x00000040, "Generic Error: Invalid state") +NV_STATUS_CODE(NV_ERR_INVALID_STRING_LENGTH, 0x00000041, "The string length is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_READ, 0x00000042, "The requested read operation is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_WRITE, 0x00000043, "The requested write operation is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_XLATE, 0x00000044, "The requested translate operation is not valid") +NV_STATUS_CODE(NV_ERR_IRQ_NOT_FIRING, 0x00000045, "Requested IRQ is not firing") +NV_STATUS_CODE(NV_ERR_IRQ_EDGE_TRIGGERED, 0x00000046, "IRQ is edge triggered") +NV_STATUS_CODE(NV_ERR_MEMORY_TRAINING_FAILED, 0x00000047, "Failed memory training sequence") +NV_STATUS_CODE(NV_ERR_MISMATCHED_SLAVE, 0x00000048, "Slave mismatch") +NV_STATUS_CODE(NV_ERR_MISMATCHED_TARGET, 0x00000049, "Target mismatch") +NV_STATUS_CODE(NV_ERR_MISSING_TABLE_ENTRY, 0x0000004A, "Requested entry missing not found in the table") +NV_STATUS_CODE(NV_ERR_MODULE_LOAD_FAILED, 0x0000004B, "Failed to load the requested module") +NV_STATUS_CODE(NV_ERR_MORE_DATA_AVAILABLE, 0x0000004C, "There is more data available") +NV_STATUS_CODE(NV_ERR_MORE_PROCESSING_REQUIRED, 0x0000004D, "More processing required for the given call") +NV_STATUS_CODE(NV_ERR_MULTIPLE_MEMORY_TYPES, 0x0000004E, "Multiple memory types found") +NV_STATUS_CODE(NV_ERR_NO_FREE_FIFOS, 0x0000004F, "No more free FIFOs found") +NV_STATUS_CODE(NV_ERR_NO_INTR_PENDING, 0x00000050, "No interrupt pending") +NV_STATUS_CODE(NV_ERR_NO_MEMORY, 0x00000051, "Out of memory") +NV_STATUS_CODE(NV_ERR_NO_SUCH_DOMAIN, 0x00000052, "Requested domain does not exist") +NV_STATUS_CODE(NV_ERR_NO_VALID_PATH, 0x00000053, "Caller did not specify a valid path") +NV_STATUS_CODE(NV_ERR_NOT_COMPATIBLE, 0x00000054, "Generic Error: Incompatible types") +NV_STATUS_CODE(NV_ERR_NOT_READY, 0x00000055, "Generic Error: Not ready") +NV_STATUS_CODE(NV_ERR_NOT_SUPPORTED, 0x00000056, "Call not supported") +NV_STATUS_CODE(NV_ERR_OBJECT_NOT_FOUND, 0x00000057, "Requested object not found") +NV_STATUS_CODE(NV_ERR_OBJECT_TYPE_MISMATCH, 0x00000058, "Specified objects do not match") +NV_STATUS_CODE(NV_ERR_OPERATING_SYSTEM, 0x00000059, "Generic operating system error") +NV_STATUS_CODE(NV_ERR_OTHER_DEVICE_FOUND, 0x0000005A, "Found other device instead of the requested one") +NV_STATUS_CODE(NV_ERR_OUT_OF_RANGE, 0x0000005B, "The specified value is out of bounds") +NV_STATUS_CODE(NV_ERR_OVERLAPPING_UVM_COMMIT, 0x0000005C, "Overlapping unified virtual memory commit") +NV_STATUS_CODE(NV_ERR_PAGE_TABLE_NOT_AVAIL, 0x0000005D, "Requested page table not available") +NV_STATUS_CODE(NV_ERR_PID_NOT_FOUND, 0x0000005E, "Process-Id not found") +NV_STATUS_CODE(NV_ERR_PROTECTION_FAULT, 0x0000005F, "Protection fault") +NV_STATUS_CODE(NV_ERR_RC_ERROR, 0x00000060, "Generic RC error") +NV_STATUS_CODE(NV_ERR_REJECTED_VBIOS, 0x00000061, "Given Video BIOS rejected/invalid") +NV_STATUS_CODE(NV_ERR_RESET_REQUIRED, 0x00000062, "Reset required") +NV_STATUS_CODE(NV_ERR_STATE_IN_USE, 0x00000063, "State in use") +NV_STATUS_CODE(NV_ERR_SIGNAL_PENDING, 0x00000064, "Signal pending") +NV_STATUS_CODE(NV_ERR_TIMEOUT, 0x00000065, "Call timed out") +NV_STATUS_CODE(NV_ERR_TIMEOUT_RETRY, 0x00000066, "Call timed out, please retry later") +NV_STATUS_CODE(NV_ERR_TOO_MANY_PRIMARIES, 0x00000067, "Too many primaries") +NV_STATUS_CODE(NV_ERR_UVM_ADDRESS_IN_USE, 0x00000068, "Unified virtual memory requested address already in use") +NV_STATUS_CODE(NV_ERR_MAX_SESSION_LIMIT_REACHED, 0x00000069, "Maximum number of sessions reached") +NV_STATUS_CODE(NV_ERR_LIB_RM_VERSION_MISMATCH, 0x0000006A, "Library version doesn't match driver version") //Contained within the RMAPI library +NV_STATUS_CODE(NV_ERR_PRIV_SEC_VIOLATION, 0x0000006B, "Priv security violation") +NV_STATUS_CODE(NV_ERR_GPU_IN_DEBUG_MODE, 0x0000006C, "GPU currently in debug mode") +NV_STATUS_CODE(NV_ERR_FEATURE_NOT_ENABLED, 0x0000006D, "Requested Feature functionality is not enabled") +NV_STATUS_CODE(NV_ERR_RESOURCE_LOST, 0x0000006E, "Requested resource has been destroyed") +NV_STATUS_CODE(NV_ERR_PMU_NOT_READY, 0x0000006F, "PMU is not ready or has not yet been initialized") +NV_STATUS_CODE(NV_ERR_FLCN_ERROR, 0x00000070, "Generic falcon assert or halt") +NV_STATUS_CODE(NV_ERR_FATAL_ERROR, 0x00000071, "Fatal/unrecoverable error") +NV_STATUS_CODE(NV_ERR_MEMORY_ERROR, 0x00000072, "Generic memory error") +NV_STATUS_CODE(NV_ERR_INVALID_LICENSE, 0x00000073, "License provided is rejected or invalid") +NV_STATUS_CODE(NV_ERR_NVLINK_INIT_ERROR, 0x00000074, "Nvlink Init Error") +NV_STATUS_CODE(NV_ERR_NVLINK_MINION_ERROR, 0x00000075, "Nvlink Minion Error") +NV_STATUS_CODE(NV_ERR_NVLINK_CLOCK_ERROR, 0x00000076, "Nvlink Clock Error") +NV_STATUS_CODE(NV_ERR_NVLINK_TRAINING_ERROR, 0x00000077, "Nvlink Training Error") +NV_STATUS_CODE(NV_ERR_NVLINK_CONFIGURATION_ERROR, 0x00000078, "Nvlink Configuration Error") +NV_STATUS_CODE(NV_ERR_RISCV_ERROR, 0x00000079, "Generic RISC-V assert or halt") +NV_STATUS_CODE(NV_ERR_FABRIC_MANAGER_NOT_PRESENT, 0x0000007A, "Fabric Manager is not loaded") +NV_STATUS_CODE(NV_ERR_ALREADY_SIGNALLED, 0x0000007B, "Semaphore Surface value already >= requested wait value") +NV_STATUS_CODE(NV_ERR_QUEUE_TASK_SLOT_NOT_AVAILABLE, 0x0000007C, "PMU RPC error due to no queue slot available for this event") +NV_STATUS_CODE(NV_ERR_KEY_ROTATION_IN_PROGRESS, 0x0000007D, "Operation not allowed as key rotation is in progress") +NV_STATUS_CODE(NV_ERR_TEST_ONLY_CODE_NOT_ENABLED, 0x0000007E, "Test-only code path not enabled") +NV_STATUS_CODE(NV_ERR_SECURE_BOOT_FAILED, 0x0000007F, "GFW secure boot failed") +NV_STATUS_CODE(NV_ERR_INSUFFICIENT_ZBC_ENTRY, 0x00000080, "No more ZBC entry for the client") +NV_STATUS_CODE(NV_ERR_NVLINK_FABRIC_NOT_READY, 0x00000081, "Nvlink Fabric Status or Fabric Probe is not yet complete, caller needs to retry") +NV_STATUS_CODE(NV_ERR_NVLINK_FABRIC_FAILURE, 0x00000082, "Nvlink Fabric Probe failed") +NV_STATUS_CODE(NV_ERR_GPU_MEMORY_ONLINING_FAILURE, 0x00000083, "GPU Memory Onlining failed") +NV_STATUS_CODE(NV_ERR_REDUCTION_MANAGER_NOT_AVAILABLE, 0x00000084, "Reduction Manager is not available") +NV_STATUS_CODE(NV_ERR_THRESHOLD_CROSSED, 0x00000085, "A fatal threshold has been crossed") +NV_STATUS_CODE(NV_ERR_RESOURCE_RETIREMENT_ERROR, 0x00000086, "An error occurred while trying to retire a resource") +NV_STATUS_CODE(NV_ERR_FABRIC_STATE_OUT_OF_SYNC, 0x00000087, "NVLink fabric state cached by the driver is out of sync") +NV_STATUS_CODE(NV_ERR_BUFFER_FULL, 0x00000088, "Buffer is full") +NV_STATUS_CODE(NV_ERR_BUFFER_EMPTY, 0x00000089, "Buffer is empty") +NV_STATUS_CODE(NV_ERR_MC_FLA_OFFSET_TABLE_FULL, 0x0000008A, "Multicast FLA offset table has no available slots") + +// Warnings: +NV_STATUS_CODE(NV_WARN_HOT_SWITCH, 0x00010001, "WARNING Hot switch") +NV_STATUS_CODE(NV_WARN_INCORRECT_PERFMON_DATA, 0x00010002, "WARNING Incorrect performance monitor data") +NV_STATUS_CODE(NV_WARN_MISMATCHED_SLAVE, 0x00010003, "WARNING Slave mismatch") +NV_STATUS_CODE(NV_WARN_MISMATCHED_TARGET, 0x00010004, "WARNING Target mismatch") +NV_STATUS_CODE(NV_WARN_MORE_PROCESSING_REQUIRED, 0x00010005, "WARNING More processing required for the call") +NV_STATUS_CODE(NV_WARN_NOTHING_TO_DO, 0x00010006, "WARNING Nothing to do") +NV_STATUS_CODE(NV_WARN_NULL_OBJECT, 0x00010007, "WARNING NULL object found") +NV_STATUS_CODE(NV_WARN_OUT_OF_RANGE, 0x00010008, "WARNING value out of range") +NV_STATUS_CODE(NV_WARN_THRESHOLD_CROSSED, 0x00010009, "WARNING Threshold has been crossed") + +#endif /* SDK_NVSTATUSCODES_H */ diff --git a/src/common/sdk/nvidia/inc/nvtypes.h b/src/common/sdk/nvidia/inc/nvtypes.h new file mode 100644 index 0000000..68fd32c --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvtypes.h @@ -0,0 +1,636 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVTYPES_INCLUDED +#define NVTYPES_INCLUDED + +#include "cpuopsys.h" + +#ifndef NVTYPES_USE_STDINT +#define NVTYPES_USE_STDINT 0 +#endif + +#if NVTYPES_USE_STDINT +#ifdef __cplusplus +#include +#include +#else +#include +#include +#endif // __cplusplus +#endif // NVTYPES_USE_STDINT + +#ifndef __cplusplus +// Header includes to make sure wchar_t is defined for C-file compilation +// (C++ is not affected as it is a fundamental type there) +// _MSC_VER is a hack to avoid failures for old setup of UEFI builds which are +// currently set to msvc100 but do not properly set the include paths +#endif // __cplusplus + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(MAKE_NV64TYPES_8BYTES_ALIGNED) && defined(__i386__) +// ensure or force 8-bytes alignment of NV 64-bit types +#define OPTIONAL_ALIGN8_ATTR __attribute__((aligned(8))) +#else +// nothing needed +#define OPTIONAL_ALIGN8_ATTR +#endif // MAKE_NV64TYPES_8BYTES_ALIGNED && i386 + + /***************************************************************************\ +|* Typedefs *| + \***************************************************************************/ + +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +//Typedefs for MISRA COMPLIANCE +typedef unsigned long long UInt64; +typedef signed long long Int64; +typedef unsigned int UInt32; +typedef signed int Int32; +typedef unsigned short UInt16; +typedef signed short Int16; +typedef unsigned char UInt8 ; +typedef signed char Int8 ; + +typedef void Void; +typedef float float32_t; +typedef double float64_t; +#endif + + +// Floating point types +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef float32_t NvF32; /* IEEE Single Precision (S1E8M23) */ +typedef float64_t NvF64 OPTIONAL_ALIGN8_ATTR; /* IEEE Double Precision (S1E11M52) */ +#else +typedef float NvF32; /* IEEE Single Precision (S1E8M23) */ +typedef double NvF64 OPTIONAL_ALIGN8_ATTR; /* IEEE Double Precision (S1E11M52) */ +#endif + + +// 8-bit: 'char' is the only 8-bit in the C89 standard and after. +#if NVTYPES_USE_STDINT +typedef uint8_t NvV8; /* "void": enumerated or multiple fields */ +typedef uint8_t NvU8; /* 0 to 255 */ +typedef int8_t NvS8; /* -128 to 127 */ +#else +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt8 NvV8; /* "void": enumerated or multiple fields */ +typedef UInt8 NvU8; /* 0 to 255 */ +typedef Int8 NvS8; /* -128 to 127 */ +#else +typedef unsigned char NvV8; /* "void": enumerated or multiple fields */ +typedef unsigned char NvU8; /* 0 to 255 */ +typedef signed char NvS8; /* -128 to 127 */ +#endif +#endif // NVTYPES_USE_STDINT + + +#if NVTYPES_USE_STDINT +typedef uint16_t NvV16; /* "void": enumerated or multiple fields */ +typedef uint16_t NvU16; /* 0 to 65535 */ +typedef int16_t NvS16; /* -32768 to 32767 */ +#else +// 16-bit: If the compiler tells us what we can use, then use it. +#ifdef __INT16_TYPE__ +typedef unsigned __INT16_TYPE__ NvV16; /* "void": enumerated or multiple fields */ +typedef unsigned __INT16_TYPE__ NvU16; /* 0 to 65535 */ +typedef signed __INT16_TYPE__ NvS16; /* -32768 to 32767 */ + +// The minimal standard for C89 and after +#else // __INT16_TYPE__ +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt16 NvV16; /* "void": enumerated or multiple fields */ +typedef UInt16 NvU16; /* 0 to 65535 */ +typedef Int16 NvS16; /* -32768 to 32767 */ +#else +typedef unsigned short NvV16; /* "void": enumerated or multiple fields */ +typedef unsigned short NvU16; /* 0 to 65535 */ +typedef signed short NvS16; /* -32768 to 32767 */ +#endif +#endif // __INT16_TYPE__ +#endif // NVTYPES_USE_STDINT + +// wchar type (fixed size types consistent across Linux/Windows boundaries) +#if defined(NV_HAS_WCHAR_T_TYPEDEF) + typedef wchar_t NvWchar; +#else + typedef NvV16 NvWchar; +#endif + +// Macro to build an NvU32 from four bytes, listed from msb to lsb +#define NvU32_BUILD(a, b, c, d) \ + ((NvU32)( \ + (((NvU32)(a) & 0xff) << 24) | \ + (((NvU32)(b) & 0xff) << 16) | \ + (((NvU32)(c) & 0xff) << 8) | \ + (((NvU32)(d) & 0xff)))) + +// Macro to build an NvU64 from two DWORDS, listed from msb to lsb +#define NvU64_BUILD(a, b) \ + ((NvU64)( \ + (((NvU64)(a) & ~0U) << 32) | \ + (((NvU64)(b) & ~0U)))) + +#if NVTYPES_USE_STDINT +typedef uint32_t NvV32; /* "void": enumerated or multiple fields */ +typedef uint32_t NvU32; /* 0 to 4294967295 */ +typedef int32_t NvS32; /* -2147483648 to 2147483647 */ +#else +// 32-bit: If the compiler tells us what we can use, then use it. +#ifdef __INT32_TYPE__ +typedef unsigned __INT32_TYPE__ NvV32; /* "void": enumerated or multiple fields */ +typedef unsigned __INT32_TYPE__ NvU32; /* 0 to 4294967295 */ +typedef signed __INT32_TYPE__ NvS32; /* -2147483648 to 2147483647 */ + +// Older compilers +#else // __INT32_TYPE__ + +// For historical reasons, NvU32/NvV32 are defined to different base intrinsic +// types than NvS32 on some platforms. +// Mainly for 64-bit linux, where long is 64 bits and win9x, where int is 16 bit. +#if (defined(NV_UNIX) || defined(vxworks) || defined(NV_WINDOWS_CE) || \ + defined(__arm) || defined(__IAR_SYSTEMS_ICC__) || defined(NV_QNX) || \ + defined(NV_INTEGRITY) || defined(NV_MODS) || \ + defined(__GNUC__) || defined(__clang__) || defined(NV_MACINTOSH_64)) && \ + (!defined(NV_MACINTOSH) || defined(NV_MACINTOSH_64)) +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt32 NvV32; /* "void": enumerated or multiple fields */ +typedef UInt32 NvU32; /* 0 to 4294967295 */ +#else +typedef unsigned int NvV32; /* "void": enumerated or multiple fields */ +typedef unsigned int NvU32; /* 0 to 4294967295 */ +#endif + +// The minimal standard for C89 and after +#else // (defined(NV_UNIX) || defined(vxworks) || ... +typedef unsigned long NvV32; /* "void": enumerated or multiple fields */ +typedef unsigned long NvU32; /* 0 to 4294967295 */ +#endif // (defined(NV_UNIX) || defined(vxworks) || ... + +// Mac OS 32-bit still needs this +#if defined(NV_MACINTOSH) && !defined(NV_MACINTOSH_64) +typedef signed long NvS32; /* -2147483648 to 2147483647 */ +#else +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef Int32 NvS32; /* -2147483648 to 2147483647 */ +#else +typedef signed int NvS32; /* -2147483648 to 2147483647 */ +#endif +#endif // defined(NV_MACINTOSH) && !defined(NV_MACINTOSH_64) +#endif // __INT32_TYPE__ +#endif // NVTYPES_USE_STDINT + + + +#if NVTYPES_USE_STDINT +typedef uint64_t NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef int64_t NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ + +#define NvU64_fmtX PRIX64 +#define NvU64_fmtx PRIx64 +#define NvU64_fmtu PRIu64 +#define NvU64_fmto PRIo64 +#define NvS64_fmtd PRId64 +#define NvS64_fmti PRIi64 +#else +// 64-bit types for compilers that support them, plus some obsolete variants +#if defined(__GNUC__) || defined(__clang__) || defined(__arm) || \ + defined(__IAR_SYSTEMS_ICC__) || defined(__ghs__) || defined(_WIN64) || \ + defined(__SUNPRO_C) || defined(__SUNPRO_CC) || defined (__xlC__) +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt64 NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef Int64 NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ +#else +typedef unsigned long long NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef long long NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ +#endif + +#define NvU64_fmtX "llX" +#define NvU64_fmtx "llx" +#define NvU64_fmtu "llu" +#define NvU64_fmto "llo" +#define NvS64_fmtd "lld" +#define NvS64_fmti "lli" + +// Microsoft since 2003 -- https://msdn.microsoft.com/en-us/library/29dh1w7z.aspx +#else +typedef unsigned __int64 NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef __int64 NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ + +#define NvU64_fmtX "I64X" +#define NvU64_fmtx "I64x" +#define NvU64_fmtu "I64u" +#define NvU64_fmto "I64o" +#define NvS64_fmtd "I64d" +#define NvS64_fmti "I64i" + +#endif +#endif // NVTYPES_USE_STDINT + +#ifdef NV_TYPESAFE_HANDLES +/* + * Can't use opaque pointer as clients might be compiled with mismatched + * pointer sizes. TYPESAFE check will eventually be removed once all clients + * have transistioned safely to NvHandle. + * The plan is to then eventually scale up the handle to be 64-bits. + */ +typedef struct +{ + NvU32 val; +} NvHandle; +#else +/* + * For compatibility with modules that haven't moved typesafe handles. + */ +typedef NvU32 NvHandle; +#endif // NV_TYPESAFE_HANDLES + +/* Boolean type */ +typedef NvU8 NvBool; +#define NV_TRUE ((NvBool)(0 == 0)) +#define NV_FALSE ((NvBool)(0 != 0)) + +/* Tristate type: NV_TRISTATE_FALSE, NV_TRISTATE_TRUE, NV_TRISTATE_INDETERMINATE */ +typedef NvU8 NvTristate; +#define NV_TRISTATE_FALSE ((NvTristate) 0) +#define NV_TRISTATE_TRUE ((NvTristate) 1) +#define NV_TRISTATE_INDETERMINATE ((NvTristate) 2) + +/* Macros to extract the low and high parts of a 64-bit unsigned integer */ +/* Also designed to work if someone happens to pass in a 32-bit integer */ +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +#define NvU64_HI32(n) ((NvU32)((((NvU64)(n)) >> 32) & 0xffffffffU)) +#define NvU64_LO32(n) ((NvU32)(( (NvU64)(n)) & 0xffffffffU)) +#else +#define NvU64_HI32(n) ((NvU32)((((NvU64)(n)) >> 32) & 0xffffffff)) +#define NvU64_LO32(n) ((NvU32)(( (NvU64)(n)) & 0xffffffff)) +#endif +#define NvU40_HI32(n) ((NvU32)((((NvU64)(n)) >> 8) & 0xffffffffU)) +#define NvU40_HI24of32(n) ((NvU32)( (NvU64)(n) & 0xffffff00U)) + +/* Macros to get the MSB and LSB of a 32 bit unsigned number */ +#define NvU32_HI16(n) ((NvU16)((((NvU32)(n)) >> 16) & 0xffffU)) +#define NvU32_LO16(n) ((NvU16)(( (NvU32)(n)) & 0xffffU)) + + /***************************************************************************\ +|* *| +|* 64 bit type definitions for use in interface structures. *| +|* *| + \***************************************************************************/ + +#if defined(NV_64_BITS) + +typedef void* NvP64; /* 64 bit void pointer */ +typedef NvU64 NvUPtr; /* pointer sized unsigned int */ +typedef NvS64 NvSPtr; /* pointer sized signed int */ +typedef NvU64 NvLength; /* length to agree with sizeof */ + +#define NvP64_VALUE(n) (n) +#define NvP64_fmt "%p" + +#define KERNEL_POINTER_FROM_NvP64(p,v) ((p)(v)) +#define NvP64_PLUS_OFFSET(p,o) (NvP64)((NvU64)(p) + (NvU64)(o)) + +#define NvUPtr_fmtX NvU64_fmtX +#define NvUPtr_fmtx NvU64_fmtx +#define NvUPtr_fmtu NvU64_fmtu +#define NvUPtr_fmto NvU64_fmto +#define NvSPtr_fmtd NvS64_fmtd +#define NvSPtr_fmti NvS64_fmti + +#else + +typedef NvU64 NvP64; /* 64 bit void pointer */ +typedef NvU32 NvUPtr; /* pointer sized unsigned int */ +typedef NvS32 NvSPtr; /* pointer sized signed int */ +typedef NvU32 NvLength; /* length to agree with sizeof */ + +#define NvP64_VALUE(n) ((void *)(NvUPtr)(n)) +#define NvP64_fmt "0x%llx" + +#define KERNEL_POINTER_FROM_NvP64(p,v) ((p)(NvUPtr)(v)) +#define NvP64_PLUS_OFFSET(p,o) ((p) + (NvU64)(o)) + +#define NvUPtr_fmtX "X" +#define NvUPtr_fmtx "x" +#define NvUPtr_fmtu "u" +#define NvUPtr_fmto "o" +#define NvSPtr_fmtd "d" +#define NvSPtr_fmti "i" + +#endif + +#define NvP64_NULL (NvP64)0 + +/*! + * Helper macro to pack an @ref NvU64_ALIGN32 structure from a @ref NvU64. + * + * @param[out] pDst Pointer to NvU64_ALIGN32 structure to pack + * @param[in] pSrc Pointer to NvU64 with which to pack + */ +#define NvU64_ALIGN32_PACK(pDst, pSrc) \ +do { \ + (pDst)->lo = NvU64_LO32(*(pSrc)); \ + (pDst)->hi = NvU64_HI32(*(pSrc)); \ +} while (NV_FALSE) + +/*! + * Helper macro to unpack a @ref NvU64_ALIGN32 structure into a @ref NvU64. + * + * @param[out] pDst Pointer to NvU64 in which to unpack + * @param[in] pSrc Pointer to NvU64_ALIGN32 structure from which to unpack + */ +#define NvU64_ALIGN32_UNPACK(pDst, pSrc) \ +do { \ + (*(pDst)) = NvU64_ALIGN32_VAL(pSrc); \ +} while (NV_FALSE) + +/*! + * Helper macro to unpack a @ref NvU64_ALIGN32 structure as a @ref NvU64. + * + * @param[in] pSrc Pointer to NvU64_ALIGN32 structure to unpack + */ +#define NvU64_ALIGN32_VAL(pSrc) \ + ((NvU64) ((NvU64)((pSrc)->lo) | (((NvU64)(pSrc)->hi) << 32U))) + +/*! + * Helper macro to check whether the 32 bit aligned 64 bit number is zero. + * + * @param[in] _pU64 Pointer to NvU64_ALIGN32 structure. + * + * @return + * NV_TRUE _pU64 is zero. + * NV_FALSE otherwise. + */ +#define NvU64_ALIGN32_IS_ZERO(_pU64) \ + (((_pU64)->lo == 0U) && ((_pU64)->hi == 0U)) + +/*! + * Helper macro to sub two 32 aligned 64 bit numbers on 64 bit processor. + * + * @param[in] pSrc1 Pointer to NvU64_ALIGN32 source 1 structure. + * @param[in] pSrc2 Pointer to NvU64_ALIGN32 source 2 structure. + * @param[in/out] pDst Pointer to NvU64_ALIGN32 dest. structure. + */ +#define NvU64_ALIGN32_ADD(pDst, pSrc1, pSrc2) \ +do { \ + NvU64 __dst, __src1, __scr2; \ + \ + NvU64_ALIGN32_UNPACK(&__src1, (pSrc1)); \ + NvU64_ALIGN32_UNPACK(&__scr2, (pSrc2)); \ + __dst = __src1 + __scr2; \ + NvU64_ALIGN32_PACK((pDst), &__dst); \ +} while (NV_FALSE) + +/*! + * Helper macro to sub two 32 aligned 64 bit numbers on 64 bit processor. + * + * @param[in] pSrc1 Pointer to NvU64_ALIGN32 source 1 structure. + * @param[in] pSrc2 Pointer to NvU64_ALIGN32 source 2 structure. + * @param[in/out] pDst Pointer to NvU64_ALIGN32 dest. structure. + */ +#define NvU64_ALIGN32_SUB(pDst, pSrc1, pSrc2) \ +do { \ + NvU64 __dst, __src1, __scr2; \ + \ + NvU64_ALIGN32_UNPACK(&__src1, (pSrc1)); \ + NvU64_ALIGN32_UNPACK(&__scr2, (pSrc2)); \ + __dst = __src1 - __scr2; \ + NvU64_ALIGN32_PACK((pDst), &__dst); \ +} while (NV_FALSE) + +/*! + * Structure for representing 32 bit aligned NvU64 (64-bit unsigned integer) + * structures. This structure must be used because the 32 bit processor and + * 64 bit processor compilers will pack/align NvU64 differently. + * + * One use case is RM being 64 bit proc whereas PMU being 32 bit proc, this + * alignment difference will result in corrupted transactions between the RM + * and PMU. + * + * See the @ref NvU64_ALIGN32_PACK and @ref NvU64_ALIGN32_UNPACK macros for + * packing and unpacking these structures. + * + * @note The intention of this structure is to provide a datatype which will + * packed/aligned consistently and efficiently across all platforms. + * We don't want to use "NV_DECLARE_ALIGNED(NvU64, 8)" because that + * leads to memory waste on our 32-bit uprocessors (e.g. FALCONs) where + * DMEM efficiency is vital. + */ +typedef struct +{ + /*! + * Low 32 bits. + */ + NvU32 lo; + /*! + * High 32 bits. + */ + NvU32 hi; +} NvU64_ALIGN32; + +/* Useful macro to hide required double cast */ +#define NV_PTR_TO_NvP64(n) (NvP64)(NvUPtr)(n) +#define NV_SIGN_EXT_PTR_TO_NvP64(p) ((NvP64)(NvS64)(NvSPtr)(p)) +#define KERNEL_POINTER_TO_NvP64(p) ((NvP64)(uintptr_t)(p)) + + /***************************************************************************\ +|* *| +|* Limits for common types. *| +|* *| + \***************************************************************************/ + +/* Explanation of the current form of these limits: + * + * - Decimal is used, as hex values are by default positive. + * - Casts are not used, as usage in the preprocessor itself (#if) ends poorly. + * - The subtraction of 1 for some MIN values is used to get around the fact + * that the C syntax actually treats -x as NEGATE(x) instead of a distinct + * number. Since 214748648 isn't a valid positive 32-bit signed value, we + * take the largest valid positive signed number, negate it, and subtract 1. + */ +#define NV_S8_MIN (-128) +#define NV_S8_MAX (+127) +#define NV_U8_MIN (0U) +#define NV_U8_MAX (+255U) +#define NV_S16_MIN (-32768) +#define NV_S16_MAX (+32767) +#define NV_U16_MIN (0U) +#define NV_U16_MAX (+65535U) +#define NV_S32_MIN (-2147483647 - 1) +#define NV_S32_MAX (+2147483647) +#define NV_U32_MIN (0U) +#define NV_U32_MAX (+4294967295U) +#define NV_S64_MIN (-9223372036854775807LL - 1LL) +#define NV_S64_MAX (+9223372036854775807LL) +#define NV_U64_MIN (0ULL) +#define NV_U64_MAX (+18446744073709551615ULL) + +/* Aligns fields in structs so they match up between 32 and 64 bit builds */ +#if defined(__GNUC__) || defined(__clang__) || defined(NV_QNX) +#define NV_ALIGN_BYTES(size) __attribute__ ((aligned (size))) +#elif defined(__arm) +#define NV_ALIGN_BYTES(size) __align(ALIGN) +#else +// XXX This is dangerously nonportable! We really shouldn't provide a default +// version of this that doesn't do anything. +#define NV_ALIGN_BYTES(size) +#endif + +// NV_DECLARE_ALIGNED() can be used on all platforms. +// This macro form accounts for the fact that __declspec on Windows is required +// before the variable type, +// and NV_ALIGN_BYTES is required after the variable name. +#if defined(__GNUC__) || defined(__clang__) || defined(NV_QNX) +#define NV_DECLARE_ALIGNED(TYPE_VAR, ALIGN) TYPE_VAR __attribute__ ((aligned (ALIGN))) +#elif defined(__arm) +#define NV_DECLARE_ALIGNED(TYPE_VAR, ALIGN) __align(ALIGN) TYPE_VAR +#endif + + /***************************************************************************\ +|* Function Declaration Types *| + \***************************************************************************/ + +// stretching the meaning of "nvtypes", but this seems to least offensive +// place to re-locate these from nvos.h which cannot be included by a number +// of builds that need them + +#if defined(__GNUC__) || defined(__clang__) || defined(__INTEL_COMPILER) + #define NV_ATTRIBUTE_UNUSED __attribute__((__unused__)) +#else + #define NV_ATTRIBUTE_UNUSED +#endif + + #if defined(__GNUC__) + #if (__GNUC__ > 3) || \ + ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) && (__GNUC_PATCHLEVEL__ >= 1)) + #define NV_NOINLINE __attribute__((__noinline__)) + #endif + #elif defined(__clang__) + #if __has_attribute(noinline) + #define NV_NOINLINE __attribute__((__noinline__)) + #endif + #elif defined(__arm) && (__ARMCC_VERSION >= 300000) + #define NV_NOINLINE __attribute__((__noinline__)) + #elif (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590)) ||\ + (defined(__SUNPRO_CC) && (__SUNPRO_CC >= 0x590)) + #define NV_NOINLINE __attribute__((__noinline__)) + #elif defined (__INTEL_COMPILER) + #define NV_NOINLINE __attribute__((__noinline__)) + #endif + + #if !defined(NV_NOINLINE) + #define NV_NOINLINE + #endif + + /* GreenHills compiler defines __GNUC__, but doesn't support + * __inline__ keyword. */ + #if defined(__ghs__) + #define NV_INLINE inline + #elif defined(__GNUC__) || defined(__clang__) || defined(__INTEL_COMPILER) + #define NV_INLINE __inline__ + #elif defined (macintosh) || defined(__SUNPRO_C) || defined(__SUNPRO_CC) + #define NV_INLINE inline + #elif defined(__arm) + #define NV_INLINE __inline + #else + #define NV_INLINE + #endif + + /* Don't force inline on DEBUG builds -- it's annoying for debuggers. */ + #if !defined(DEBUG) + /* GreenHills compiler defines __GNUC__, but doesn't support + * __attribute__ or __inline__ keyword. */ + #if defined(__ghs__) + #define NV_FORCEINLINE inline + #elif defined(__GNUC__) + // GCC 3.1 and beyond support the always_inline function attribute. + #if (__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1)) + #define NV_FORCEINLINE __attribute__((__always_inline__)) __inline__ + #else + #define NV_FORCEINLINE __inline__ + #endif + #elif defined(__clang__) + #if __has_attribute(always_inline) + #define NV_FORCEINLINE __attribute__((__always_inline__)) __inline__ + #else + #define NV_FORCEINLINE __inline__ + #endif + #elif defined(__arm) && (__ARMCC_VERSION >= 220000) + // RVDS 2.2 also supports forceinline, but ADS 1.2 does not + #define NV_FORCEINLINE __forceinline + #else /* defined(__GNUC__) */ + #define NV_FORCEINLINE NV_INLINE + #endif + #else + #define NV_FORCEINLINE NV_INLINE + #endif + + #define NV_APIENTRY + #define NV_FASTCALL + #define NV_CDECLCALL + #define NV_STDCALL + + /* + * The 'warn_unused_result' function attribute prompts GCC to issue a + * warning if the result of a function tagged with this attribute + * is ignored by a caller. In combination with '-Werror', it can be + * used to enforce result checking in RM code; at this point, this + * is only done on UNIX. + */ + #if defined(__GNUC__) && defined(NV_UNIX) + #if (__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4)) + #define NV_FORCERESULTCHECK __attribute__((__warn_unused_result__)) + #else + #define NV_FORCERESULTCHECK + #endif + #elif defined(__clang__) + #if __has_attribute(warn_unused_result) + #define NV_FORCERESULTCHECK __attribute__((__warn_unused_result__)) + #else + #define NV_FORCERESULTCHECK + #endif + #else /* defined(__GNUC__) */ + #define NV_FORCERESULTCHECK + #endif + + /* + * Functions decorated with NV_FORMAT_PRINTF(f, a) have a format string at + * parameter number 'f' and variadic arguments start at parameter number 'a'. + * (Note that for C++ methods, there is an implicit 'this' parameter so + * explicit parameters are numbered from 2.) + */ + #if defined(__GNUC__) + #define NV_FORMAT_PRINTF(_f, _a) __attribute__((format(printf, _f, _a))) + #else + #define NV_FORMAT_PRINTF(_f, _a) + #endif + +#ifdef __cplusplus +} +#endif + +#endif /* NVTYPES_INCLUDED */ diff --git a/src/common/sdk/nvidia/inc/rs_access.h b/src/common/sdk/nvidia/inc/rs_access.h new file mode 100644 index 0000000..7020677 --- /dev/null +++ b/src/common/sdk/nvidia/inc/rs_access.h @@ -0,0 +1,273 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: rs_access.finn +// + + + + +#include "nvtypes.h" +#include "nvmisc.h" + + +/****************************************************************************/ +/* Access right definitions */ +/****************************************************************************/ + +// +// The meaning of each access right is documented in +// resman/docs/rmapi/resource_server/rm_capabilities.adoc +// +// RS_ACCESS_COUNT is the number of access rights that have been defined +// and are in use. All integers in the range [0, RS_ACCESS_COUNT) should +// represent valid access rights. +// +// When adding a new access right, don't forget to update +// 1) The descriptions in the resman/docs/rmapi/resource_server/rm_capabilities.adoc +// 2) RS_ACCESS_COUNT, defined below +// 3) The declaration of g_rsAccessMetadata in rs_access_rights.c +// 4) The list of access rights in drivers/common/chip-config/Chipcontrols.pm +// 5) Any relevant access right callbacks +// + +#define RS_ACCESS_DUP_OBJECT 0U +#define RS_ACCESS_NICE 1U +#define RS_ACCESS_DEBUG 2U +#define RS_ACCESS_PERFMON 3U +#define RS_ACCESS_COUNT 4U + + +/****************************************************************************/ +/* Access right data structures */ +/****************************************************************************/ + +/*! + * @brief A type that can be used to represent any access right. + */ +typedef NvU16 RsAccessRight; + +/*! + * @brief An internal type used to represent one limb in an access right mask. + */ +typedef NvU32 RsAccessLimb; +#define SDK_RS_ACCESS_LIMB_BITS 32 + +/*! + * @brief The number of limbs in the RS_ACCESS_MASK struct. + */ +#define SDK_RS_ACCESS_MAX_LIMBS 1 + +/*! + * @brief The maximum number of possible access rights supported by the + * current data structure definition. + * + * You probably want RS_ACCESS_COUNT instead, which is the number of actual + * access rights defined. + */ +#define SDK_RS_ACCESS_MAX_COUNT (0x20) /* finn: Evaluated from "(SDK_RS_ACCESS_LIMB_BITS * SDK_RS_ACCESS_MAX_LIMBS)" */ + +/** + * @brief A struct representing a set of access rights. + * + * Note that the values of bit positions larger than RS_ACCESS_COUNT is + * undefined, and should not be assumed to be 0 (see RS_ACCESS_MASK_FILL). + */ +typedef struct RS_ACCESS_MASK { + RsAccessLimb limbs[SDK_RS_ACCESS_MAX_LIMBS]; +} RS_ACCESS_MASK; + +/** + * @brief A struct representing auxiliary information about each access right. + */ +typedef struct RS_ACCESS_INFO { + NvU32 flags; +} RS_ACCESS_INFO; + + +/****************************************************************************/ +/* Access right macros */ +/****************************************************************************/ + +#define SDK_RS_ACCESS_LIMB_INDEX(index) ((index) / SDK_RS_ACCESS_LIMB_BITS) +#define SDK_RS_ACCESS_LIMB_POS(index) ((index) % SDK_RS_ACCESS_LIMB_BITS) + +#define SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) \ + ((pAccessMask)->limbs[SDK_RS_ACCESS_LIMB_INDEX(index)]) +#define SDK_RS_ACCESS_OFFSET_MASK(index) \ + NVBIT_TYPE(SDK_RS_ACCESS_LIMB_POS(index), RsAccessLimb) + +/*! + * @brief Checks that accessRight represents a valid access right. + * + * The valid range of access rights is [0, RS_ACCESS_COUNT). + * + * @param[in] accessRight The access right value to check + * + * @return true if accessRight is valid + * @return false otherwise + */ +#define RS_ACCESS_BOUNDS_CHECK(accessRight) \ + (accessRight < RS_ACCESS_COUNT) + +/*! + * @brief Test whether an access right is present in a set + * + * @param[in] pAccessMask The set of access rights to read + * @param[in] index The access right to examine + * + * @return NV_TRUE if the access right specified by index was present in the set, + * and NV_FALSE otherwise + */ +#define RS_ACCESS_MASK_TEST(pAccessMask, index) \ + (RS_ACCESS_BOUNDS_CHECK(index) && \ + (SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) & SDK_RS_ACCESS_OFFSET_MASK(index)) != 0) + +/*! + * @brief Add an access right to a mask + * + * @param[in] pAccessMask The set of access rights to modify + * @param[in] index The access right to set + */ +#define RS_ACCESS_MASK_ADD(pAccessMask, index) \ + do \ + { \ + if (RS_ACCESS_BOUNDS_CHECK(index)) { \ + SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) |= SDK_RS_ACCESS_OFFSET_MASK(index); \ + } \ + } while (NV_FALSE) + +/*! + * @brief Remove an access right from a mask + * + * @param[in] pAccessMask The set of access rights to modify + * @param[in] index The access right to unset + */ +#define RS_ACCESS_MASK_REMOVE(pAccessMask, index) \ + do \ + { \ + if (RS_ACCESS_BOUNDS_CHECK(index)) { \ + SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) &= ~SDK_RS_ACCESS_OFFSET_MASK(index); \ + } \ + } while (NV_FALSE) + +/*! + * @brief Performs an in-place union between two access right masks + * + * @param[in,out] pMaskOut The access rights mask to be updated + * @param[in] pMaskIn The set of access rights to be added to pMaskOut + */ +#define RS_ACCESS_MASK_UNION(pMaskOut, pMaskIn) \ + do \ + { \ + NvLength limb; \ + for (limb = 0; limb < SDK_RS_ACCESS_MAX_LIMBS; limb++) \ + { \ + SDK_RS_ACCESS_LIMB_ELT(pMaskOut, limb) |= SDK_RS_ACCESS_LIMB_ELT(pMaskIn, limb); \ + } \ + } while (NV_FALSE) + +/*! + * @brief Performs an in-place subtract of one mask's rights from another + * + * @param[in,out] pMaskOut The access rights mask to be updated + * @param[in] pMaskIn The set of access rights to be removed from pMaskOut + */ +#define RS_ACCESS_MASK_SUBTRACT(pMaskOut, pMaskIn) \ + do \ + { \ + NvLength limb; \ + for (limb = 0; limb < SDK_RS_ACCESS_MAX_LIMBS; limb++) \ + { \ + SDK_RS_ACCESS_LIMB_ELT(pMaskOut, limb) &= ~SDK_RS_ACCESS_LIMB_ELT(pMaskIn, limb); \ + } \ + } while (NV_FALSE) + +/*! + * @brief Removes all rights from an access rights mask + * + * @param[in,out] pAccessMask The access rights mask to be updated + */ +#define RS_ACCESS_MASK_CLEAR(pAccessMask) \ + do \ + { \ + portMemSet(pAccessMask, 0, sizeof(*pAccessMask)); \ + } while (NV_FALSE) + +/*! + * @brief Adds all rights to an access rights mask + * + * @param[in,out] pAccessMask The access rights mask to be updated + */ +#define RS_ACCESS_MASK_FILL(pAccessMask) \ + do \ + { \ + portMemSet(pAccessMask, 0xff, sizeof(*pAccessMask)); \ + } while (NV_FALSE) + + +/****************************************************************************/ +/* Share definitions */ +/****************************************************************************/ + +// +// The usage of Share Policy and the meaning of each share type is documented in +// resman/docs/rmapi/resource_server/rm_capabilities.adoc +// +#define RS_SHARE_TYPE_NONE (0U) +#define RS_SHARE_TYPE_ALL (1U) +#define RS_SHARE_TYPE_OS_SECURITY_TOKEN (2U) +#define RS_SHARE_TYPE_CLIENT (3U) +#define RS_SHARE_TYPE_PID (4U) +#define RS_SHARE_TYPE_SMC_PARTITION (5U) +#define RS_SHARE_TYPE_GPU (6U) +#define RS_SHARE_TYPE_FM_CLIENT (7U) +// Must be last. Update when a new SHARE_TYPE is added +#define RS_SHARE_TYPE_MAX (8U) + + +// +// Use Revoke to remove an existing policy from the list. +// Allow is based on OR logic, Require is based on AND logic. +// To share a right, at least one Allow (non-Require) must match, and all Require must pass. +// If Compose is specified, policies will be added to the list. Otherwise, they will replace the list. +// +#define RS_SHARE_ACTION_FLAG_REVOKE NVBIT(0) +#define RS_SHARE_ACTION_FLAG_REQUIRE NVBIT(1) +#define RS_SHARE_ACTION_FLAG_COMPOSE NVBIT(2) + +/****************************************************************************/ +/* Share flag data structures */ +/****************************************************************************/ + +typedef struct RS_SHARE_POLICY { + NvU32 target; + RS_ACCESS_MASK accessMask; + NvU16 type; ///< RS_SHARE_TYPE_ + NvU8 action; ///< RS_SHARE_ACTION_ +} RS_SHARE_POLICY; diff --git a/src/common/shared/inc/compat.h b/src/common/shared/inc/compat.h new file mode 100644 index 0000000..655e40f --- /dev/null +++ b/src/common/shared/inc/compat.h @@ -0,0 +1,50 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __COMPAT_H__ +#define __COMPAT_H__ + +#include "nvtypes.h" +#include "nvmisc.h" +#include "nvstatus.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(PORT_IS_KERNEL_BUILD) + +#include "utils/nvassert.h" +#include "nvport/nvport.h" + +#else + +#error NvPort must be enabled to use compat.h + +#endif + +#ifdef __cplusplus +} +#endif + +#endif //__COMPAT_H__ diff --git a/src/common/shared/inc/nvdevid.h b/src/common/shared/inc/nvdevid.h new file mode 100644 index 0000000..514825e --- /dev/null +++ b/src/common/shared/inc/nvdevid.h @@ -0,0 +1,709 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVDEVID_H +#define NVDEVID_H + + + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Private device ids defines - only defines ! * +* * +\***************************************************************************/ + +/////////////////////////////////////////////////////////////////////////////////////////// +// +// VENDOR/SUBVENDOR IDS +// XXX Cleanup to do: change PCI_VENDOR_* to NV_PCI_SUBID_VENDOR_* +// +/////////////////////////////////////////////////////////////////////////////////////////// +#define NV_PCI_SUBID_VENDOR 15:0 /* RW--F */ +#define NV_PCI_SUBID_VENDOR_AMD 0x1022 +#define NV_PCI_SUBID_VENDOR_ALI 0x10B9 +#define NV_PCI_SUBID_VENDOR_NVIDIA 0x10DE +#define NV_PCI_SUBID_VENDOR_INTEL 0x8086 +#define NV_PCI_SUBID_VENDOR_VIA 0x1106 +#define NV_PCI_SUBID_VENDOR_RCC 0x1166 +#define NV_PCI_SUBID_VENDOR_MICRON_1 0x1042 +#define NV_PCI_SUBID_VENDOR_MICRON_2 0x1344 +#define NV_PCI_SUBID_VENDOR_APPLE 0x106B +#define NV_PCI_SUBID_VENDOR_SIS 0x1039 +#define NV_PCI_SUBID_VENDOR_ATI 0x1002 +#define NV_PCI_SUBID_VENDOR_TRANSMETA 0x1279 +#define NV_PCI_SUBID_VENDOR_HP 0x103C +#define NV_PCI_SUBID_VENDOR_DELL 0x1028 +#define NV_PCI_SUBID_VENDOR_FUJITSU 0x10cf +#define NV_PCI_SUBID_VENDOR_ASUS 0x1043 +#define NV_PCI_SUBID_VENDOR_MSI 0x1462 +#define NV_PCI_SUBID_VENDOR_FOXCONN 0x105B +#define NV_PCI_SUBID_VENDOR_ECS 0x1019 +#define NV_PCI_SUBID_VENDOR_DFI_1 0x106E +#define NV_PCI_SUBID_VENDOR_TOSHIBA 0x1179 +#define NV_PCI_SUBID_VENDOR_DFI_2 0x15BD +#define NV_PCI_SUBID_VENDOR_ACER 0x1025 +#define NV_PCI_SUBID_VENDOR_GIGABYTE 0x1458 +#define NV_PCI_SUBID_VENDOR_EVGA 0x3842 +#define NV_PCI_SUBID_VENDOR_BROADCOM 0x1166 +#define NV_PCI_SUBID_VENDOR_SUPERMICRO 0x15D9 +#define NV_PCI_SUBID_VENDOR_BIOSTAR 0x1565 +#define NV_PCI_SUBID_VENDOR_XFX 0x1682 +#define NV_PCI_SUBID_VENDOR_PCPARTNER 0x19DA +#define NV_PCI_SUBID_VENDOR_LENOVO 0x17AA +#define NV_PCI_SUBID_VENDOR_FSC 0x1734 +#define NV_PCI_SUBID_VENDOR_FTS 0x1734 +#define NV_PCI_SUBID_VENDOR_COLORFUL 0x7377 +#define NV_PCI_SUBID_VENDOR_ASROCK 0x1849 +#define NV_PCI_SUBID_VENDOR_SHUTTLE 0x1297 +#define NV_PCI_SUBID_VENDOR_CLEVO 0x1558 +#define NV_PCI_SUBID_VENDOR_PEGATRON 0x1B0A +#define NV_PCI_SUBID_VENDOR_JETWAY 0x16F3 +#define NV_PCI_SUBID_VENDOR_HIGHGRADE 0x1C6C +#define NV_PCI_SUBID_VENDOR_GALAXY 0x1B4C +#define NV_PCI_SUBID_VENDOR_ZOTAC 0x19DA +#define NV_PCI_SUBID_VENDOR_ARIMA 0x161F +#define NV_PCI_SUBID_VENDOR_BFG 0x19F1 +#define NV_PCI_SUBID_VENDOR_SONY 0x104D +#define NV_PCI_SUBID_VENDOR_BITLAND 0x1642 +#define NV_PCI_SUBID_VENDOR_PC_PARTNER 0x174B +#define NV_PCI_SUBID_VENDOR_CAVIUM 0x177D +#define NV_PCI_SUBID_VENDOR_NZXT 0x1D96 + +// XXX CKEANUP TO REMOVE IN FAVOR OF NV_PCI_SUBID_VENDOR_* +#define PCI_VENDOR_ID_AMD 0x1022 +#define PCI_VENDOR_ID_ALI 0x10B9 +#define PCI_VENDOR_ID_NVIDIA 0x10DE +#define PCI_VENDOR_ID_INTEL 0x8086 +#define PCI_VENDOR_ID_VIA 0x1106 +#define PCI_VENDOR_ID_RCC 0x1166 +#define PCI_VENDOR_ID_MICRON_1 0x1042 +#define PCI_VENDOR_ID_MICRON_2 0x1344 +#define PCI_VENDOR_ID_APPLE 0x106B +#define PCI_VENDOR_ID_SIS 0x1039 +#define PCI_VENDOR_ID_ATI 0x1002 +#define PCI_VENDOR_ID_TRANSMETA 0x1279 +#define PCI_VENDOR_ID_HP 0x103C +#define PCI_VENDOR_ID_DELL 0x1028 +#define PCI_VENDOR_ID_FUJITSU 0x10cf +#define PCI_VENDOR_ID_ASUS 0x1043 +#define PCI_VENDOR_ID_MSI 0x1462 +#define PCI_VENDOR_ID_FOXCONN 0x105B +#define PCI_VENDOR_ID_ECS 0x1019 +#define PCI_VENDOR_ID_DFI_1 0x106E +#define PCI_VENDOR_ID_TOSHIBA 0x1179 +#define PCI_VENDOR_ID_DFI_2 0x15BD +#define PCI_VENDOR_ID_ACER 0x1025 +#define PCI_VENDOR_ID_GIGABYTE 0x1458 +#define PCI_VENDOR_ID_EVGA 0x3842 +#define PCI_VENDOR_ID_BROADCOM 0x1166 +#define PCI_VENDOR_ID_SUPERMICRO 0x15D9 +#define PCI_VENDOR_ID_BIOSTAR 0x1565 +#define PCI_VENDOR_ID_XFX 0x1682 +#define PCI_VENDOR_ID_PCPARTNER 0x19DA +#define PCI_VENDOR_ID_LENOVO 0x17AA +#define PCI_VENDOR_ID_FSC 0x1734 +#define PCI_VENDOR_ID_FTS 0x1734 +#define PCI_VENDOR_ID_COLORFUL 0x7377 +#define PCI_VENDOR_ID_ASROCK 0x1849 +#define PCI_VENDOR_ID_SHUTTLE 0x1297 +#define PCI_VENDOR_ID_CLEVO 0x1558 +#define PCI_VENDOR_ID_PEGATRON 0x1B0A +#define PCI_VENDOR_ID_JETWAY 0x16F3 +#define PCI_VENDOR_ID_HIGHGRADE 0x1C6C +#define PCI_VENDOR_ID_GALAXY 0x1B4C +#define PCI_VENDOR_ID_ZOTAC 0x19DA +#define PCI_VENDOR_ID_ARIMA 0x161F +#define PCI_VENDOR_ID_PC_PARTNER 0x174B +#define PCI_VENDOR_ID_APM 0x10E8 +#define PCI_VENDOR_ID_IBM 0x1014 +#define PCI_VENDOR_ID_NZXT 0x1D96 +#define PCI_VENDOR_ID_MARVELL 0x177D +#define PCI_VENDOR_ID_REDHAT 0x1B36 +#define PCI_VENDOR_ID_AMPERE 0x1DEF +#define PCI_VENDOR_ID_HUAWEI 0x19E5 +#define PCI_VENDOR_ID_MELLANOX 0x15B3 +#define PCI_VENDOR_ID_AMAZON 0x1D0F +#define PCI_VENDOR_ID_CADENCE 0x17CD +#define PCI_VENDOR_ID_ARM 0x13B5 +#define PCI_VENDOR_ID_HYGON 0x1D94 +#define PCI_VENDOR_ID_ALIBABA 0x1DED +#define PCI_VENDOR_ID_SIFIVE 0xF15E +#define PCI_VENDOR_ID_PLDA 0x1556 +#define PCI_VENDOR_ID_PHYTIUM 0x1DB7 + +#define NV_PCI_DEVID_DEVICE 31:16 /* RW--F */ +#define NV_PCI_SUBID_DEVICE 31:16 /* RW--F */ + +/////////////////////////////////////////////////////////////////////////////////////////// +// +// GPU DEVICE IDS +// +/////////////////////////////////////////////////////////////////////////////////////////// + +#define NV_PCI_DEVID_DEVICE_PG171_SKU200_PG179_SKU220 0x25B6 /* NVIDIA A16 / NVIDIA A2 */ +#define NV_PCI_DEVID_DEVICE_PG189_SKU600 0x1EBA + +/////////////////////////////////////////////////////////////////////////////////////////// +// +// SUBDEVICE IDs +// +/////////////////////////////////////////////////////////////////////////////////////////// + +// A16 +#define NV_PCI_SUBID_DEVICE_PG171_SKU200 0x14A9 + +// NVIDIA B200 +#define NV_PCI_SUBID_DEVICE_PG525_SKU220 0x1999 +#define NV_PCI_SUBID_DEVICE_PG525_SKU225 0x199B +#define NV_PCI_SUBID_DEVICE_PG525_SKU230 0x20DA + +/////////////////////////////////////////////////////////////////////////////////////////// +// +// CHIPSET IDs +// +/////////////////////////////////////////////////////////////////////////////////////////// +// Desktop flavor of X58 +#define X58_DESKTOP_DEVIDS 0x3400, 0x3405 +// Mobile version of X58 +#define X58_MOBILE_DEVID 0x3405 +#define X58_MOBILE_CLEVO_7200_SSDEVID 0x7200 + +// Sandy bridge CLEVO platform +#define SANDYBRIDGE_P180HM_SSDEVID 0x8000 +#define SandyBridge_E_X79_P270WM_SSDEVID 0x270 +#define IvyBridge_Z75_P370EM_SSDEVID 0x371 + +// Device ID's of Devices present on Patsburg's PCIE bus. +#define PATSBURG_PCIE_DEVICE_MIN_DEVID 0x1D10 +#define PATSBURG_PCIE_DEVICE_MAX_DEVID 0x1D1F +#define PATSBURG_PCIE_DEVICE_DEVID 0x244E + +//Tylersburg Congurations +#define TYLERSBURG_DEVID 0x3406 + +// Intel Barlow Ridge TB5 device IDs +#define DEVICE_ID_INTEL_BARLOW_RIDGE_5786_EGPU 0x5786 +#define DEVICE_ID_INTEL_BARLOW_RIDGE_57A4_EGPU 0x57A4 + +// Intel Grantsdale definitions +#define DEVICE_ID_INTEL_2580_HOST_BRIDGE 0x2580 +#define DEVICE_ID_INTEL_2581_ROOT_PORT 0x2581 + +// Intel Alderwood definitions +#define DEVICE_ID_INTEL_2584_HOST_BRIDGE 0x2584 +#define DEVICE_ID_INTEL_2585_ROOT_PORT 0x2585 + +// Intel Alviso definitions +#define DEVICE_ID_INTEL_2590_HOST_BRIDGE 0x2590 +#define DEVICE_ID_INTEL_2591_ROOT_PORT 0x2591 + +// Intel Tumwater definitions +#define DEVICE_ID_INTEL_359E_HOST_BRIDGE 0x359E +#define DEVICE_ID_INTEL_3597_ROOT_PORT 0x3597 + +// Intel Stoakley definitions +#define INTEL_4000_SUBDEVICE_ID 0x021D + +// Intel SkullTrail definitions +#define INTEL_4003_SUBDEVICE_ID 0x5358 + +// Intel Core I7 CPU +#define INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER_I7 0x2C01 + +// Intel Core I5 CPU Lynnfield +#define INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER_I5_L 0x2C81 +#define INTEL_LYNNFIELD_ROOTPORT_CPU1 0xD138 +#define INTEL_LYNNFIELD_ROOTPORT_CPU2 0xD13A + +// Intel Core I5 CPU Auburndale +#define INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER_I5_A 0x2D41 + +// Intel Core I5 CPU 650 +#define INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER_I5_6 0x2D01 + +// Intel Poulsbo definitions +#define DEVICE_ID_INTEL_8100_HOST_BRIDGE 0x8100 +#define DEVICE_ID_INTEL_8110_ROOT_PORT 0x8110 +#define DEVICE_ID_INTEL_8112_ROOT_PORT 0x8112 + +// Intel TunnelCreek definitions +#define DEVICE_ID_INTEL_8180_ROOT_PORT 0x8180 +#define DEVICE_ID_INTEL_8181_ROOT_PORT 0x8181 +#define DEVICE_ID_INTEL_8184_ROOT_PORT 0x8184 +#define DEVICE_ID_INTEL_8185_ROOT_PORT 0x8185 + +// Intel I/O Hub definitions +#define DEVICE_ID_INTEL_3408_ROOT_PORT 0x3408 +#define DEVICE_ID_INTEL_3411_ROOT_PORT 0x3411 +#define DEVICE_ID_INTEL_3420_ROOT_PORT 0x3420 +#define DEVICE_ID_INTEL_3421_ROOT_PORT 0x3421 + +// Intel SandyBridge IIO definitions +#define DEVICE_ID_INTEL_3C02_ROOT_PORT 0x3c02 +#define DEVICE_ID_INTEL_3C03_ROOT_PORT 0x3c03 +#define DEVICE_ID_INTEL_3C04_ROOT_PORT 0x3c04 +#define DEVICE_ID_INTEL_3C05_ROOT_PORT 0x3c05 +#define DEVICE_ID_INTEL_3C06_ROOT_PORT 0x3c06 +#define DEVICE_ID_INTEL_3C07_ROOT_PORT 0x3c07 +#define DEVICE_ID_INTEL_3C08_ROOT_PORT 0x3c08 +#define DEVICE_ID_INTEL_3C09_ROOT_PORT 0x3c09 +#define DEVICE_ID_INTEL_3C0A_ROOT_PORT 0x3c0a +#define DEVICE_ID_INTEL_3C0B_ROOT_PORT 0x3c0b + +// Intel Haswell-E definitions +#define DEVICE_ID_INTEL_2F00_HOST_BRIDGE 0x2f00 +#define DEVICE_ID_INTEL_2F01_ROOT_PORT 0x2f01 +#define DEVICE_ID_INTEL_2F02_ROOT_PORT 0x2f02 +#define DEVICE_ID_INTEL_2F03_ROOT_PORT 0x2f03 +#define DEVICE_ID_INTEL_2F04_ROOT_PORT 0x2f04 +#define DEVICE_ID_INTEL_2F05_ROOT_PORT 0x2f05 +#define DEVICE_ID_INTEL_2F06_ROOT_PORT 0x2f06 +#define DEVICE_ID_INTEL_2F07_ROOT_PORT 0x2f07 +#define DEVICE_ID_INTEL_2F08_ROOT_PORT 0x2f08 +#define DEVICE_ID_INTEL_2F09_ROOT_PORT 0x2f09 +#define DEVICE_ID_INTEL_2F0A_ROOT_PORT 0x2f0a +#define DEVICE_ID_INTEL_2F0B_ROOT_PORT 0x2f0b + +#define DEVICE_ID_INTEL_0C01_ROOT_PORT 0x0c01 + +// Intel IvyTown definitions + +#define DEVICE_ID_INTEL_0E02_ROOT_PORT 0x0e02 +#define DEVICE_ID_INTEL_0E03_ROOT_PORT 0x0e03 +#define DEVICE_ID_INTEL_0E04_ROOT_PORT 0x0e04 +#define DEVICE_ID_INTEL_0E05_ROOT_PORT 0x0e05 +#define DEVICE_ID_INTEL_0E06_ROOT_PORT 0x0e06 +#define DEVICE_ID_INTEL_0E07_ROOT_PORT 0x0e07 +#define DEVICE_ID_INTEL_0E08_ROOT_PORT 0x0e08 +#define DEVICE_ID_INTEL_0E09_ROOT_PORT 0x0e09 +#define DEVICE_ID_INTEL_0E0A_ROOT_PORT 0x0e0a +#define DEVICE_ID_INTEL_0E0B_ROOT_PORT 0x0e0b +// Intel Ivy Bridge E definitions +#define DEVICE_ID_INTEL_0E00_HOST_BRIDGE 0x0E00 + +// Intel PCH definitions +#define DEVICE_ID_INTEL_9D10_PCH_BRIDGE 0x9d10 +#define DEVICE_ID_INTEL_9D18_PCH_BRIDGE 0x9d18 +#define DEVICE_ID_INTEL_A117_PCH_BRIDGE 0xa117 +#define DEVICE_ID_INTEL_A118_PCH_BRIDGE 0xa118 +#define DEVICE_ID_INTEL_9C98_PCH_BRIDGE 0x9c98 + +// Intel Broadwell definitions +#define DEVICE_ID_INTEL_6F00_HOST_BRIDGE 0x6f00 +#define DEVICE_ID_INTEL_6F01_ROOT_PORT 0x6f01 +#define DEVICE_ID_INTEL_6F02_ROOT_PORT 0x6f02 +#define DEVICE_ID_INTEL_6F03_ROOT_PORT 0x6f03 +#define DEVICE_ID_INTEL_6F04_ROOT_PORT 0x6f04 +#define DEVICE_ID_INTEL_6F05_ROOT_PORT 0x6f05 +#define DEVICE_ID_INTEL_6F06_ROOT_PORT 0x6f06 +#define DEVICE_ID_INTEL_6F07_ROOT_PORT 0x6f07 +#define DEVICE_ID_INTEL_6F08_ROOT_PORT 0x6f08 +#define DEVICE_ID_INTEL_6F09_ROOT_PORT 0x6f09 +#define DEVICE_ID_INTEL_6F0A_ROOT_PORT 0x6f0A +#define DEVICE_ID_INTEL_6F0B_ROOT_PORT 0x6f0B +#define DEVICE_ID_INTEL_1601_ROOT_PORT 0x1601 +#define DEVICE_ID_INTEL_1605_ROOT_PORT 0x1605 +#define DEVICE_ID_INTEL_1609_ROOT_PORT 0x1609 +#define DEVICE_ID_INTEL_BROADWELL_U_HOST_BRIDGE 0x1604 +#define DEVICE_ID_INTEL_BROADWELL_H_HOST_BRIDGE 0x1614 + +// Intel Skylake definitions +#define DEVICE_ID_INTEL_1901_ROOT_PORT 0x1901 +#define DEVICE_ID_INTEL_1905_ROOT_PORT 0x1905 +#define DEVICE_ID_INTEL_1909_ROOT_PORT 0x1909 +#define DEVICE_ID_INTEL_SKYLAKE_U_HOST_BRIDGE 0x1904 +#define DEVICE_ID_INTEL_SKYLAKE_S_HOST_BRIDGE 0x191F +#define DEVICE_ID_INTEL_SKYLAKE_H_HOST_BRIDGE 0x1910 + +// Intel Skylake-E definitions +#define DEVICE_ID_INTEL_2030_ROOT_PORT 0x2030 +#define DEVICE_ID_INTEL_2033_ROOT_PORT 0x2033 + +// Intel Kabylake definitions +#define DEVICE_ID_INTEL_KABYLAKE_U_HOST_BRIDGE 0x5904 +#define DEVICE_ID_INTEL_KABYLAKE_H_HOST_BRIDGE 0x5910 + +// AMD Matisse, Rome definitions +#define DEVICE_ID_AMD_1483_ROOT_PORT 0x1483 +// AMD Castle Peak definition +#define DEVICE_ID_AMD_1480_ROOT_PORT 0x1480 +// AMD Renoir-H definition +#define DEVICE_ID_AMD_1630_ROOT_PORT 0x1630 + +// Dell SkullTrail definitions +#define DELL_4003_SUBDEVICE_ID 0x021D + +// Dell Quicksilver MLK definitions +#define DELL_0040_SUBDEVICE_ID 0x043a + +// HP Tylersburg definitions +#define TYLERSBURG_Z800_SSDEVID 0x130B + +// HP Romley definitions +#define ROMLEY_Z820_SSDEVID 0x158B +#define ROMLEY_Z620_SSDEVID 0x158A +#define ROMLEY_Z420_SSDEVID 0x1589 + +// HP Grantley definitions +#define GRANTLEY_Z840_SSDEVID 0x2129 +#define GRANTLEY_Z640_SSDEVID 0x212A +#define GRANTLEY_Z440_SSDEVID 0x212B + +// HP PURELY definitions +#define HP_QUADRO_Z4GEN4_DEVID 0xA2D2 +#define PURLEY_Z8GEN4_SSDEVID 0x81C7 +#define PURLEY_Z6GEN4_SSDEVID 0x81C6 +#define PURLEY_Z4GEN4_SSDEVID 0x81C5 + +// Lenovo Romley definitions +#define ROMLEY_C30_SSDEVID 0x1028 +#define ROMLEY_D30_SSDEVID 0x1027 +#define ROMLEY_S30_SSDEVID 0x1026 + +// Dell Romley definitions +#define ROMLEY_T7600_SSDEVID 0x0495 +#define ROMLEY_T5600_SSDEVID 0x0496 +#define ROMLEY_T3600_SSDEVID 0x0497 + +// Dell Romley + IVB-EP CPU Refresh +#define IVYTOWN_T7610_SSDEVID 0x05D4 +#define IVYTOWN_T5610_SSDEVID 0x05D3 + +// Dell Romley (Ipanema) +#define ROMLEY_R7610_SSDEVID 0x05A1 + +// FTS Romley definitions +#define ROMLEY_R920_SSDEVID 0x11B6 + +// Lenovo Grantley (Messi, Pele, Ronaldo) +#define GRANTLEY_V40_SSDEVID 0x1031 +#define GRANTLEY_D40_SSDEVID 0x1030 +#define GRANTLEY_S40_SSDEVID 0x102F + +// Dell Grantley (Avalon) +#define GRANTLEY_T7810_SSDEVID 0x0618 +#define GRANTLEY_T7910_SSDEVID 0x0619 + +// Lenovo Purley (Nile, Volga) +#define PURLEY_P920_SSDEVID 0x1038 +#define PURLEY_P720_SSDEVID 0x1037 +#define PURLEY_P520_SSDEVID 0x1036 + +// Dell Purley(Matira) +#define PURLEY_MATIRA3X_DEVID 0xA2D2 +#define PURLEY_MATIRA3X_SSDEVID 0x08B1 +#define PURLEY_MATIRA3_SSDEVID 0x0738 +#define PURLEY_MATIRA5_SSDEVID 0x0739 +#define PURLEY_MATIRA7_SSDEVID 0x073A + +//FTS Purley +#define PURLEY_R970_SSDEVID 0x1230 +#define PURLEY_M770_SSDEVID 0x1231 + +// HP Arrandale, Clarksfield, X58 workstation definitions +#define ARRANDALE_Z200SFF_SSDEVID 0x304A +#define CLARKSFIELD_Z200_SSDEVID 0x170B +#define X58_Z400_SSDEVID 0x1309 + +// GIGABYTE Sniper 3 (Z77) +#define GIGABYTE_SNIPER_3_SSDEVID_1 0x5000 +#define GIGABYTE_SNIPER_3_SSDEVID_2 0x5001 + +// Supermicro Quadro VCA definitions +#define SUPERMICRO_QUADRO_VCA_DEVID 0x8D44 +#define SUPERMICRO_QUADRO_VCA_SSDEVID 0x7270 + +// Asus Quadro BOXX definitions +#define ASUS_QUADRO_BOXX_DEVID 0x8D44 +#define ASUS_QUADRO_BOXX_SSDEVID 0x85F6 + +// APEXX8 Quadro BOXX definitions +#define APEXX8_QUADRO_BOXX_DEVID 0xA2D3 +#define APEXX8_QUADRO_BOXX_SSDEVID 0x098e + +// APEXX5 Quadro BOXX definitions +#define APEXX5_QUADRO_BOXX_DEVID 0xA2D3 +#define APEXX5_QUADRO_BOXX_SSDEVID 0x1000 + +// ASUS X99-E-10G +#define ASUS_X99_E_10G_SSDEVID 0x8600 + +// VIA definitions +#define DEVICE_ID_VIA_VT8369B_HOST_BRIDGE 0x0308 + +// Foxconn Einstein 64 [8086:a1c1][105b:7270] +#define FOXCONN_EINSTEIN_64_DEVID 0xA1C1 +#define FOXCONN_EINSTEIN_64_SSDEVID 0x7270 + +// Cavium, Inc. CN99xx [ThunderX2] [177d:af00] +#define CAVIUM_X2_DEVID 0xAF00 + +// Lenovo Tomcat/Falcon/Hornet Workstations +#define LENOVO_TOMCAT_DEVID 0x1B81 +#define LENOVO_TOMCAT_SSDEVID 0x104e +#define LENOVO_FALCON_DEVID 0x7A8A +#define LENOVO_FALCON_SSDEVID 0x1055 +#define LENOVO_HORNET_DEVID 0x7A8A +#define LENOVO_HORNET_SSDEVID 0x1056 + +// NVIDIA C51 +#define NVIDIA_C51_DEVICE_ID_MIN 0x2F0 +#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_0 0x2F0 +#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_1 0x2F1 +#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_2 0x2F2 +#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_3 0x2F3 +#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_IGPU_DISABLE_0 0x2F4 +#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_IGPU_DISABLE_1 0x2F5 +#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_IGPU_DISABLE_2 0x2F6 +#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_IGPU_DISABLE_3 0x2F7 +#define NVIDIA_C51_DEVICE_ID_MAX 0x2F7 + +// NVIDIA MCP55 +#define NVIDIA_MCP55_ULDT_CFG_0_DEVICE_ID_DEFAULT 0x0369 + +// NVIDIA MCP61 +#define NVIDIA_MCP61_ULDT_CFG_0_DEVICE_ID_DEFAULT 0x03EA +#define NVIDIA_MCP61_ULDT_CFG_0_DEVICE_ID_PA 0x03E2 + +// NVIDIA C55 +#define NVIDIA_C55_CPU_PCI_0_DEVICE_ID_PRO 0x03A0 +#define NVIDIA_C55_CPU_PCI_0_DEVICE_ID_PRO 0x03A0 +#define NVIDIA_C55_CPU_PCI_0_DEVICE_ID_SLIX16 0x03A1 +#define NVIDIA_C55_CPU_PCI_0_DEVICE_ID_SLI 0x03A3 +#define NVIDIA_C55_CPU_PCI_0_DEVICE_ID_U 0x03A2 + +// NVIDIA MCP65 +#define NVIDIA_MCP65_ULDT_CFG_0_DEVICE_ID_DEFAULT 0x0444 + +// NVIDIA MCP67/MCP68 +#define NVIDIA_MCP67_ULDT_CFG_0_DEVICE_ID_DEFAULT 0x0547 + +// NVIDIA MCP73 +#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_PV 0x07C0 +#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_O 0x07C1 +#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_S 0x07C2 +#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_V 0x07C3 +#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_RSVD_0 0x07C4 +#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_RSVD_1 0x07C5 +#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_RSVD_2 0x07C6 +#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_D 0x07C7 + +// NVIDIA C73 +#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_SLI2 0x0800 +#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_SLI_ALL 0x0801 +#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_SLIX8 0x0802 +#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_U 0x0803 +#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_RESERVED_0 0x0804 +#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_RESERVED_1 0x0805 +#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_RESERVED_2 0x0806 +#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_RESERVED_3 0x0807 + +// NVIDIA MCP77/78 +#define NVIDIA_MCP77_ULDT_CFG_0_DEVICE_ID_DEFAULT 0x0754 +#define NVIDIA_MCP77_ULDT_CFG_0_DEVICE_ID_1 0x0755 +#define NVIDIA_MCP77_ULDT_CFG_0_DEVICE_ID_2 0x0756 +#define NVIDIA_MCP77_ULDT_CFG_0_DEVICE_ID_3 0x0757 +#define NVIDIA_MCP77_MCP_SM_CFG_0_DEVICE_ID_UNIT_SM 0x0752 + +// NVIDIA MCP79/7A +#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_DEFAULT 0x0A80 +#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_SLIX16 0x0A81 +#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_SLI 0x0A82 +#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_U 0x0A83 +#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_GM 0x0A84 +#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_GVM 0x0A85 +#define NVIDIA_MCP79_MCP_SM_CFG_0_DEVICE_ID_UNIT_SM 0x0AA2 + +// NVIDIA MCP89/P83 +#define NVIDIA_MCP89_CPU_PCI_0_DEVICE_ID_DEFAULT 0x00000D60 + +/////////////////////////////////////////////////////////////////////////////////////////// +// +// enumeration of chipset families +// +/////////////////////////////////////////////////////////////////////////////////////////// + +enum { + CS_UNKNOWN = 0x0000, + CS_UNKNOWN_PCIE = 0x1000 +, CS_INTEL_2580 +, CS_INTEL_2584 +, CS_INTEL_2588 +, CS_INTEL_2590 +, CS_INTEL_25E0 +, CS_INTEL_29X0 +, CS_INTEL_29E0 +, CS_INTEL_359E +, CS_INTEL_4000 +, CS_INTEL_4003 +, CS_INTEL_3400 +, CS_INTEL_3B42 +, CS_INTEL_2770 +, CS_INTEL_2774 +, CS_INTEL_277C +, CS_INTEL_2A40 +, CS_INTEL_2E00 +, CS_INTEL_0040 +, CS_INTEL_1C10 +, CS_INTEL_1C46 +, CS_INTEL_1C49 +, CS_INTEL_1D40 +, CS_INTEL_8D47 +, CS_INTEL_1E10 +, CS_INTEL_8C4B +, CS_INTEL_8CC4 +, CS_INTEL_A145 +, CS_INTEL_A2C5 +, CS_INTEL_A242 +, CS_INTEL_A2D2 +, CS_INTEL_A2C9 +, CS_INTEL_A301 +, CS_INTEL_0685 +, CS_INTEL_4381 +, CS_INTEL_7A82 +, CS_NVIDIA_CK804 +, CS_NVIDIA_C19 +, CS_NVIDIA_C51 +, CS_NVIDIA_MCP55 +, CS_NVIDIA_MCP61 +, CS_NVIDIA_C55 +, CS_NVIDIA_MCP65 +, CS_NVIDIA_MCP67 +, CS_NVIDIA_MCP73 +, CS_NVIDIA_C73 +, CS_NVIDIA_MCP77 +, CS_NVIDIA_MCP79 +, CS_NVIDIA_MCP89 +, CS_NVIDIA_TEGRA3 +, CS_SIS_649 +, CS_SIS_656 +, CS_ATI_RS400 +, CS_ATI_RS400_A21 +, CS_ATI_RS480 +, CS_ATI_RS480_A21 +, CS_AMD_RS780 +, CS_VIA_VT8369B +, CS_ATI_FX790 +, CS_ATI_RD850 +, CS_ATI_RD870 +, CS_ATI_RD890 +, CS_ATI_FX890 +, CS_ATI_RX780 +, CS_ATI_FX990 +, CS_AMD_GX890 +, CS_AMD_X370 +, CS_VIA_VX900 +, CS_APM_STORM +, CS_IBM_VENICE +, CS_NVIDIA_T124 +, CS_NVIDIA_T210 +, CS_NVIDIA_T186 +, CS_NVIDIA_T194 +, CS_NVIDIA_T234 +, CS_NVIDIA_T23x +, CS_NVIDIA_TH500 +, CS_NVIDIA_T264 +, CS_MARVELL_THUNDERX2 +, CS_REDHAT_QEMU +, CS_AMPERE_EMAG +, CS_HUAWEI_KUNPENG920 +, CS_MELLANOX_BLUEFIELD +, CS_AMAZON_GRAVITRON2 +, CS_FUJITSU_A64FX +, CS_AMPERE_ALTRA +, CS_ARM_NEOVERSEN1 +, CS_MARVELL_OCTEON_CN96XX +, CS_MARVELL_OCTEON_CN98XX +, CS_INTEL_C620 +, CS_HYGON_C86 +, CS_PHYTIUM_S2500 +, CS_MELLANOX_BLUEFIELD2 +, CS_MELLANOX_BLUEFIELD3 +, CS_ALIBABA_YITIAN +, CS_INTEL_1B81 +, CS_INTEL_18DC +, CS_INTEL_7A04 +, CS_INTEL_5795 +, CS_RESERVED_1 +, CS_SIFIVE_FU740_C000 +, CS_PLDA_XPRESSRICH_AXI_REF +, CS_AMPERE_AMPEREONE160 +, CS_PHYTIUM_S5000 +, CS_RESERVED_2 +, CS_RESERVED_3 +, CS_AMD_RPH +, CS_INTEL_B660 +, CS_AMPERE_AMPEREONE192 +, CS_MAX_PCIE +}; + +// Chip IDs for Tegra SoCs +#define NV_CHIP_ID_T234 0x2350 + +enum { + RP_UNKNOWN = 0 +, RP_BROADCOM_HT2100 +, RP_INTEL_2581 +, RP_INTEL_2585 +, RP_INTEL_2589 +, RP_INTEL_2591 +, RP_INTEL_3597 +, RP_INTEL_2775 +, RP_INTEL_2771 +, RP_INTEL_8110 +, RP_INTEL_8112 +, RP_INTEL_8180 +, RP_INTEL_8181 +, RP_INTEL_8184 +, RP_INTEL_8185 +, RP_INTEL_3C02 +, RP_INTEL_3C03 +, RP_INTEL_3C04 +, RP_INTEL_3C05 +, RP_INTEL_3C06 +, RP_INTEL_3C07 +, RP_INTEL_3C08 +, RP_INTEL_3C09 +, RP_INTEL_3C0A +, RP_INTEL_3C0B +, RP_INTEL_2F04 +, RP_INTEL_2F08 +, RP_INTEL_0C01 +, RP_INTEL_1601 +, RP_INTEL_1605 +, RP_INTEL_1609 +, RP_INTEL_1901 +, RP_INTEL_1905 +, RP_INTEL_1909 +, RP_INTEL_5904 +, RP_NVIDIA_CK804 +, RP_NVIDIA_C19 +, RP_NVIDIA_C51 +, RP_NVIDIA_MCP55 +, RP_NVIDIA_MCP61 +, RP_NVIDIA_C55 +, RP_NVIDIA_MCP65 +}; + +#endif //NVDEVID_H + diff --git a/src/common/shared/nvstatus/nvstatus.c b/src/common/shared/nvstatus/nvstatus.c new file mode 100644 index 0000000..c01f4ea --- /dev/null +++ b/src/common/shared/nvstatus/nvstatus.c @@ -0,0 +1,83 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvstatus.h" + +#if !defined(NV_PRINTF_STRING_SECTION) +#if defined(NVRM) && NVOS_IS_LIBOS +#include "libos_log.h" +#define NV_PRINTF_STRING_SECTION LIBOS_SECTION_LOGGING +#else // defined(NVRM) && NVOS_IS_LIBOS +#define NV_PRINTF_STRING_SECTION +#endif // defined(NVRM) && NVOS_IS_LIBOS +#endif // !defined(NV_PRINTF_STRING_SECTION) + +/* + * Include nvstatuscodes.h twice. Once for creating constant strings in the + * the NV_PRINTF_STRING_SECTION section of the executable, and once to build + * the g_StatusCodeList table. + */ +#undef NV_STATUS_CODE +#undef SDK_NVSTATUSCODES_H +#define NV_STATUS_CODE( name, code, string ) static NV_PRINTF_STRING_SECTION \ + const char rm_pvt_##name##_str[] = string " [" #name "]"; +#include "nvstatuscodes.h" + +#undef NV_STATUS_CODE +#undef SDK_NVSTATUSCODES_H +#define NV_STATUS_CODE( name, code, string ) { name, rm_pvt_##name##_str }, +static struct NvStatusCodeString +{ + NV_STATUS statusCode; + const char *statusString; +} g_StatusCodeList[] = { + #include "nvstatuscodes.h" + { 0xffffffff, "Unknown error code!" } // Some compilers don't like the trailing ',' +}; +#undef NV_STATUS_CODE + +/*! + * @brief Given an NV_STATUS code, returns the corresponding status string. + * + * @param[in] nvStatusIn NV_STATUS code for which the string is required + * + * @returns Corresponding status string from the nvstatuscodes.h + * + * TODO: Bug 200025711: convert this to an array-indexed lookup, instead of a linear search + * +*/ +const char *nvstatusToString(NV_STATUS nvStatusIn) +{ + static NV_PRINTF_STRING_SECTION const char rm_pvt_UNKNOWN_str[] = "Unknown error code!"; + NvU32 i; + NvU32 n = ((NvU32)(sizeof(g_StatusCodeList))/(NvU32)(sizeof(g_StatusCodeList[0]))); + for (i = 0; i < n; i++) + { + if (g_StatusCodeList[i].statusCode == nvStatusIn) + { + return g_StatusCodeList[i].statusString; + } + } + + return rm_pvt_UNKNOWN_str; +} diff --git a/src/common/softfloat/COPYING.txt b/src/common/softfloat/COPYING.txt new file mode 100644 index 0000000..b577946 --- /dev/null +++ b/src/common/softfloat/COPYING.txt @@ -0,0 +1,37 @@ + +License for Berkeley SoftFloat Release 3d + +John R. Hauser +2017 August 10 + +The following applies to the whole of SoftFloat Release 3d as well as to +each source file individually. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions, and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/src/common/softfloat/nvidia/nv-softfloat.h b/src/common/softfloat/nvidia/nv-softfloat.h new file mode 100644 index 0000000..51680ab --- /dev/null +++ b/src/common/softfloat/nvidia/nv-softfloat.h @@ -0,0 +1,163 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_SOFTFLOAT_H__ +#define __NV_SOFTFLOAT_H__ + +/* + * This header file provides utility code built on top of the softfloat floating + * point emulation library. + */ + +#include "softfloat.h" +#include "nvtypes.h" +#include "platform.h" + +/* + * float32_t stores the bit pattern for a 32-bit single-precision IEEE floating + * point value in a structure containing an uint32_t: + * + * typedef struct { uint32_t v; } float32_t; + * + * In some cases, clients pass in a 32-bit single-precision IEEE floating + * point value in an NvU32. + * + * Define functions to change the "view" between an NvU32 and a float32_t. + */ +INLINE float32_t NvU32viewAsF32(NvU32 u) +{ + float32_t f = { .v = u }; + return f; +} + +INLINE NvU32 F32viewAsNvU32(float32_t f) +{ + return f.v; +} + +/* + * Convert the value of a float32_t to an NvU16. + * + * The conversion requires several steps: + * + * - Clamp the float32_t value to the [0,NV_U16_MAX] range of NvU16. + * + * - Use softfloat to convert the float32_t to ui32, with appropriate rounding. + * + * - Due to the clamping and rounding above, the value in the ui32 should be in + * the range of NvU16 and can be safely returned as NvU16. + */ +INLINE NvU16 F32toNvU16(float32_t f) +{ + const float32_t minF32 = NvU32viewAsF32(0); + const float32_t maxF32 = ui32_to_f32(NV_U16_MAX); + NvU32 u; + + /* clamp to zero: f = (f < minF32) ? minF32 : f */ + f = f32_lt(f, minF32) ? minF32 : f; + + /* clamp to NV_U16_MAX: f = (maxF32 < f) ? maxF32 : f */ + f = f32_lt(maxF32, f) ? maxF32 : f; + + /* + * The "_r_minMag" in "f32_to_ui32_r_minMag" means round "to minimum + * magnitude" (i.e., round towards zero). + * + * The "exact = FALSE" argument means do not raise the inexact exception + * flag, even if the conversion is inexact. + * + * For more on f32_to_ui32_r_minMag() semantics, see + * drivers/common/softfloat/doc/SoftFloat.html + */ + u = f32_to_ui32_r_minMag(f, NV_FALSE /* exact */); + nvAssert(u <= NV_U16_MAX); + + return (NvU16) u; +} + +/* + * Perform the following with float32_t: (a * b) + (c * d) + e + */ +INLINE float32_t F32_AxB_plus_CxD_plus_E( + float32_t a, + float32_t b, + float32_t c, + float32_t d, + float32_t e) +{ + const float32_t tmpA = f32_mul(a, b); + const float32_t tmpB = f32_mul(c, d); + const float32_t tmpC = f32_add(tmpA, tmpB); + + return f32_add(tmpC, e); +} + +/* + * Perform the following with float32_t: (a * b) - (c * d) + */ +INLINE float32_t F32_AxB_minus_CxD( + float32_t a, + float32_t b, + float32_t c, + float32_t d) +{ + const float32_t tmpA = f32_mul(a, b); + const float32_t tmpB = f32_mul(c, d); + + return f32_sub(tmpA, tmpB); +} + +/* + * Perform the following with float64_t: a * -1 + */ +INLINE float64_t F64_negate(float64_t a) +{ + const float64_t negOneF64 = i32_to_f64(-1); + return f64_mul(negOneF64, a); +} + +INLINE float16_t nvUnormToFp16(NvU16 unorm, float32_t maxf) +{ + const float32_t unormf = ui32_to_f32(unorm); + const float32_t normf = f32_div(unormf, maxf); + + return f32_to_f16(normf); +} + +INLINE float16_t nvUnorm10ToFp16(NvU16 unorm10) +{ + const float32_t maxf = NvU32viewAsF32(0x44800000U); // 1024.0f + return nvUnormToFp16(unorm10, maxf); +} + +INLINE float32_t f32_min(float32_t a, float32_t b) +{ + return (f32_lt(a, b)) ? a : b; +} + +INLINE float32_t f32_max(float32_t a, float32_t b) +{ + return (f32_lt(a, b)) ? b : a; +} + +#endif /* __NV_SOFTFLOAT_H__ */ diff --git a/src/common/softfloat/nvidia/platform.h b/src/common/softfloat/nvidia/platform.h new file mode 100644 index 0000000..f6db383 --- /dev/null +++ b/src/common/softfloat/nvidia/platform.h @@ -0,0 +1,56 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef nvidia_softfloat_platform_h +#define nvidia_softfloat_platform_h 1 + +#include "nvtypes.h" + +/* + * Build softfloat for little endian CPUs: all NVIDIA target platforms are + * little endian. + */ +#define LITTLEENDIAN 1 + +/* + * "INLINE" is used by softfloat like this: + * + * INLINE uint32_t softfloat_foo(...) + * { + * ... + * } + */ +#define INLINE static NV_INLINE + +#if !defined(nvAssert) +#define nvAssert(x) +#endif + +/* + * softfloat will use THREAD_LOCAL to tag variables that should be per-thread; + * it could be set to, e.g., gcc's "__thread" keyword. If THREAD_LOCAL is left + * undefined, these variables will default to being ordinary global variables. + */ +#undef THREAD_LOCAL + +#endif /* nvidia_softfloat_platform_h */ diff --git a/src/common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c b/src/common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c new file mode 100644 index 0000000..cc73833 --- /dev/null +++ b/src/common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "specialize.h" + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 16-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +uint_fast16_t softfloat_commonNaNToF16UI( const struct commonNaN *aPtr ) +{ + + return (uint_fast16_t) aPtr->sign<<15 | 0x7E00 | aPtr->v64>>54; + +} + diff --git a/src/common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c b/src/common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c new file mode 100644 index 0000000..278cdcf --- /dev/null +++ b/src/common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "specialize.h" + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 32-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +uint_fast32_t softfloat_commonNaNToF32UI( const struct commonNaN *aPtr ) +{ + + return (uint_fast32_t) aPtr->sign<<31 | 0x7FC00000 | aPtr->v64>>41; + +} + diff --git a/src/common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c b/src/common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c new file mode 100644 index 0000000..2346b06 --- /dev/null +++ b/src/common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c @@ -0,0 +1,53 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "specialize.h" + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 64-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +uint_fast64_t softfloat_commonNaNToF64UI( const struct commonNaN *aPtr ) +{ + + return + (uint_fast64_t) aPtr->sign<<63 | UINT64_C( 0x7FF8000000000000 ) + | aPtr->v64>>12; + +} + diff --git a/src/common/softfloat/source/8086-SSE/s_f16UIToCommonNaN.c b/src/common/softfloat/source/8086-SSE/s_f16UIToCommonNaN.c new file mode 100644 index 0000000..18b6dfd --- /dev/null +++ b/src/common/softfloat/source/8086-SSE/s_f16UIToCommonNaN.c @@ -0,0 +1,59 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "specialize.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Assuming `uiA' has the bit pattern of a 16-bit floating-point NaN, converts +| this NaN to the common NaN form, and stores the resulting common NaN at the +| location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +void softfloat_f16UIToCommonNaN( uint_fast16_t uiA, struct commonNaN *zPtr ) +{ + + if ( softfloat_isSigNaNF16UI( uiA ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + zPtr->sign = uiA>>15; + zPtr->v64 = (uint_fast64_t) uiA<<54; + zPtr->v0 = 0; + +} + diff --git a/src/common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c b/src/common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c new file mode 100644 index 0000000..0c6e610 --- /dev/null +++ b/src/common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c @@ -0,0 +1,59 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "specialize.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Assuming `uiA' has the bit pattern of a 32-bit floating-point NaN, converts +| this NaN to the common NaN form, and stores the resulting common NaN at the +| location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +void softfloat_f32UIToCommonNaN( uint_fast32_t uiA, struct commonNaN *zPtr ) +{ + + if ( softfloat_isSigNaNF32UI( uiA ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + zPtr->sign = uiA>>31; + zPtr->v64 = (uint_fast64_t) uiA<<41; + zPtr->v0 = 0; + +} + diff --git a/src/common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c b/src/common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c new file mode 100644 index 0000000..c81dfa9 --- /dev/null +++ b/src/common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c @@ -0,0 +1,59 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "specialize.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Assuming `uiA' has the bit pattern of a 64-bit floating-point NaN, converts +| this NaN to the common NaN form, and stores the resulting common NaN at the +| location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +void softfloat_f64UIToCommonNaN( uint_fast64_t uiA, struct commonNaN *zPtr ) +{ + + if ( softfloat_isSigNaNF64UI( uiA ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + zPtr->sign = uiA>>63; + zPtr->v64 = uiA<<12; + zPtr->v0 = 0; + +} + diff --git a/src/common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c b/src/common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c new file mode 100644 index 0000000..daaa31d --- /dev/null +++ b/src/common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c @@ -0,0 +1,63 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 32-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast32_t + softfloat_propagateNaNF32UI( uint_fast32_t uiA, uint_fast32_t uiB ) +{ + bool isSigNaNA; + + isSigNaNA = softfloat_isSigNaNF32UI( uiA ); + if ( isSigNaNA || softfloat_isSigNaNF32UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + if ( isSigNaNA ) return uiA | 0x00400000; + } + return (isNaNF32UI( uiA ) ? uiA : uiB) | 0x00400000; + +} + diff --git a/src/common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c b/src/common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c new file mode 100644 index 0000000..78a29da --- /dev/null +++ b/src/common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c @@ -0,0 +1,63 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 64-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast64_t + softfloat_propagateNaNF64UI( uint_fast64_t uiA, uint_fast64_t uiB ) +{ + bool isSigNaNA; + + isSigNaNA = softfloat_isSigNaNF64UI( uiA ); + if ( isSigNaNA || softfloat_isSigNaNF64UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + if ( isSigNaNA ) return uiA | UINT64_C( 0x0008000000000000 ); + } + return (isNaNF64UI( uiA ) ? uiA : uiB) | UINT64_C( 0x0008000000000000 ); + +} + diff --git a/src/common/softfloat/source/8086-SSE/softfloat_raiseFlags.c b/src/common/softfloat/source/8086-SSE/softfloat_raiseFlags.c new file mode 100644 index 0000000..f2c25ad --- /dev/null +++ b/src/common/softfloat/source/8086-SSE/softfloat_raiseFlags.c @@ -0,0 +1,52 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include "platform.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Raises the exceptions specified by `flags'. Floating-point traps can be +| defined here if desired. It is currently not possible for such a trap +| to substitute a result value. If traps are not implemented, this routine +| should be simply `softfloat_exceptionFlags |= flags;'. +*----------------------------------------------------------------------------*/ +void softfloat_raiseFlags( uint_fast8_t flags ) +{ + + softfloat_exceptionFlags |= flags; + +} + diff --git a/src/common/softfloat/source/8086-SSE/specialize.h b/src/common/softfloat/source/8086-SSE/specialize.h new file mode 100644 index 0000000..abf8150 --- /dev/null +++ b/src/common/softfloat/source/8086-SSE/specialize.h @@ -0,0 +1,216 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef specialize_h +#define specialize_h 1 + +#include +#include +#include "softfloat_types.h" + +/*---------------------------------------------------------------------------- +| Default value for `softfloat_detectTininess'. +*----------------------------------------------------------------------------*/ +#define init_detectTininess softfloat_tininess_afterRounding + +/*---------------------------------------------------------------------------- +| The values to return on conversions to 32-bit integer formats that raise an +| invalid exception. +*----------------------------------------------------------------------------*/ +#define ui32_fromPosOverflow 0xFFFFFFFF +#define ui32_fromNegOverflow 0 +#define ui32_fromNaN 0xFFFFFFFF +#define i32_fromPosOverflow 0x7FFFFFFF +#define i32_fromNegOverflow (-0x7FFFFFFF - 1) +#define i32_fromNaN 0x7FFFFFFF + +/*---------------------------------------------------------------------------- +| The values to return on conversions to 64-bit integer formats that raise an +| invalid exception. +*----------------------------------------------------------------------------*/ +#define ui64_fromPosOverflow UINT64_C( 0xFFFFFFFFFFFFFFFF ) +#define ui64_fromNegOverflow 0 +#define ui64_fromNaN UINT64_C( 0xFFFFFFFFFFFFFFFF ) +#define i64_fromPosOverflow UINT64_C( 0x7FFFFFFFFFFFFFFF ) +#define i64_fromNegOverflow (-UINT64_C( 0x7FFFFFFFFFFFFFFF ) - 1) +#define i64_fromNaN UINT64_C( 0x7FFFFFFFFFFFFFFF ) + +/*---------------------------------------------------------------------------- +| "Common NaN" structure, used to transfer NaN representations from one format +| to another. +*----------------------------------------------------------------------------*/ +struct commonNaN { + bool sign; +#ifdef LITTLEENDIAN + uint64_t v0, v64; +#else + uint64_t v64, v0; +#endif +}; + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 16-bit floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNF16UI 0xFE00 + +/*---------------------------------------------------------------------------- +| Returns true when 16-bit unsigned integer `uiA' has the bit pattern of a +| 16-bit floating-point signaling NaN. +| Note: This macro evaluates its argument more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNF16UI( uiA ) ((((uiA) & 0x7E00) == 0x7C00) && ((uiA) & 0x01FF)) + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 16-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +uint_fast16_t softfloat_commonNaNToF16UI( const struct commonNaN *aPtr ); + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 32-bit floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNF32UI 0xFFC00000 + +/*---------------------------------------------------------------------------- +| Returns true when 32-bit unsigned integer `uiA' has the bit pattern of a +| 32-bit floating-point signaling NaN. +| Note: This macro evaluates its argument more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNF32UI( uiA ) ((((uiA) & 0x7FC00000) == 0x7F800000) && ((uiA) & 0x003FFFFF)) + +/*---------------------------------------------------------------------------- +| Assuming 'uiA' has the bit pattern of a 16-bit floating-point NaN, converts +| this NaN to the common NaN form, and stores the resulting common NaN at the +| location pointed to by 'zPtr'. If the NaN is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +void softfloat_f16UIToCommonNaN( uint_fast16_t uiA, struct commonNaN *zPtr ); + +/*---------------------------------------------------------------------------- +| Assuming `uiA' has the bit pattern of a 32-bit floating-point NaN, converts +| this NaN to the common NaN form, and stores the resulting common NaN at the +| location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +void softfloat_f32UIToCommonNaN( uint_fast32_t uiA, struct commonNaN *zPtr ); + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 32-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +uint_fast32_t softfloat_commonNaNToF32UI( const struct commonNaN *aPtr ); + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 32-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast32_t + softfloat_propagateNaNF32UI( uint_fast32_t uiA, uint_fast32_t uiB ); + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 64-bit floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNF64UI UINT64_C( 0xFFF8000000000000 ) + +/*---------------------------------------------------------------------------- +| Returns true when 64-bit unsigned integer `uiA' has the bit pattern of a +| 64-bit floating-point signaling NaN. +| Note: This macro evaluates its argument more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNF64UI( uiA ) ((((uiA) & UINT64_C( 0x7FF8000000000000 )) == UINT64_C( 0x7FF0000000000000 )) && ((uiA) & UINT64_C( 0x0007FFFFFFFFFFFF ))) + +/*---------------------------------------------------------------------------- +| Assuming `uiA' has the bit pattern of a 64-bit floating-point NaN, converts +| this NaN to the common NaN form, and stores the resulting common NaN at the +| location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +void softfloat_f64UIToCommonNaN( uint_fast64_t uiA, struct commonNaN *zPtr ); + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 64-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +uint_fast64_t softfloat_commonNaNToF64UI( const struct commonNaN *aPtr ); + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 64-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast64_t + softfloat_propagateNaNF64UI( uint_fast64_t uiA, uint_fast64_t uiB ); + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 80-bit extended floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNExtF80UI64 0xFFFF +#define defaultNaNExtF80UI0 UINT64_C( 0xC000000000000000 ) + +/*---------------------------------------------------------------------------- +| Returns true when the 80-bit unsigned integer formed from concatenating +| 16-bit `uiA64' and 64-bit `uiA0' has the bit pattern of an 80-bit extended +| floating-point signaling NaN. +| Note: This macro evaluates its arguments more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNExtF80UI( uiA64, uiA0 ) ((((uiA64) & 0x7FFF) == 0x7FFF) && ! ((uiA0) & UINT64_C( 0x4000000000000000 )) && ((uiA0) & UINT64_C( 0x3FFFFFFFFFFFFFFF ))) + + +/*---------------------------------------------------------------------------- +| The following functions are needed only when `SOFTFLOAT_FAST_INT64' is +| defined. +*----------------------------------------------------------------------------*/ + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 128-bit floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNF128UI64 UINT64_C( 0xFFFF800000000000 ) +#define defaultNaNF128UI0 UINT64_C( 0 ) + +/*---------------------------------------------------------------------------- +| Returns true when the 128-bit unsigned integer formed from concatenating +| 64-bit `uiA64' and 64-bit `uiA0' has the bit pattern of a 128-bit floating- +| point signaling NaN. +| Note: This macro evaluates its arguments more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNF128UI( uiA64, uiA0 ) ((((uiA64) & UINT64_C( 0x7FFF800000000000 )) == UINT64_C( 0x7FFF000000000000 )) && ((uiA0) || ((uiA64) & UINT64_C( 0x00007FFFFFFFFFFF )))) + + +#endif + diff --git a/src/common/softfloat/source/f16_to_f32.c b/src/common/softfloat/source/f16_to_f32.c new file mode 100644 index 0000000..fb8b381 --- /dev/null +++ b/src/common/softfloat/source/f16_to_f32.c @@ -0,0 +1,93 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f16_to_f32( float16_t a ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + bool sign; + int_fast8_t exp; + uint_fast16_t frac; + struct commonNaN commonNaN; + uint_fast32_t uiZ; + struct exp8_sig16 normExpSig; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF16UI( uiA ); + exp = expF16UI( uiA ); + frac = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x1F ) { + if ( frac ) { + softfloat_f16UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF32UI( &commonNaN ); + } else { + uiZ = packToF32UI( sign, 0xFF, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! exp ) { + if ( ! frac ) { + uiZ = packToF32UI( sign, 0, 0 ); + goto uiZ; + } + normExpSig = softfloat_normSubnormalF16Sig( frac ); + exp = normExpSig.exp - 1; + frac = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ = packToF32UI( sign, exp + 0x70, (uint_fast32_t) frac<<13 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f32_add.c b/src/common/softfloat/source/f32_add.c new file mode 100644 index 0000000..314c76e --- /dev/null +++ b/src/common/softfloat/source/f32_add.c @@ -0,0 +1,61 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t f32_add( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( signF32UI( uiA ^ uiB ) ) { + return softfloat_subMagsF32( uiA, uiB ); + } else { + return softfloat_addMagsF32( uiA, uiB ); + } + +} + diff --git a/src/common/softfloat/source/f32_div.c b/src/common/softfloat/source/f32_div.c new file mode 100644 index 0000000..d817bc0 --- /dev/null +++ b/src/common/softfloat/source/f32_div.c @@ -0,0 +1,176 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_div( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool signA; + int_fast16_t expA; + uint_fast32_t sigA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signB; + int_fast16_t expB; + uint_fast32_t sigB; + bool signZ; + struct exp16_sig32 normExpSig; + int_fast16_t expZ; +#ifdef SOFTFLOAT_FAST_DIV64TO32 + uint_fast64_t sig64A; + uint_fast32_t sigZ; +#else + uint_fast32_t sigZ; + uint_fast64_t rem; +#endif + uint_fast32_t uiZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF32UI( uiB ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA ) goto propagateNaN; + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + goto invalid; + } + goto infinity; + } + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + goto zero; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) { + if ( ! (expA | sigA) ) goto invalid; + softfloat_raiseFlags( softfloat_flag_infinite ); + goto infinity; + } + normExpSig = softfloat_normSubnormalF32Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA - expB + 0x7E; + sigA |= 0x00800000; + sigB |= 0x00800000; +#ifdef SOFTFLOAT_FAST_DIV64TO32 + if ( sigA < sigB ) { + --expZ; + sig64A = (uint_fast64_t) sigA<<31; + } else { + sig64A = (uint_fast64_t) sigA<<30; + } + sigZ = sig64A / sigB; + if ( ! (sigZ & 0x3F) ) sigZ |= ((uint_fast64_t) sigB * sigZ != sig64A); +#else + if ( sigA < sigB ) { + --expZ; + sigA <<= 8; + } else { + sigA <<= 7; + } + sigB <<= 8; + sigZ = ((uint_fast64_t) sigA * softfloat_approxRecip32_1( sigB ))>>32; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sigZ += 2; + if ( (sigZ & 0x3F) < 2 ) { + sigZ &= ~3; + rem = ((uint_fast64_t) sigA<<31) - (uint_fast64_t) sigZ * sigB; + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + sigZ -= 4; + } else { + if ( rem ) sigZ |= 1; + } + } +#endif + return softfloat_roundPackToF32( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF32UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infinity: + uiZ = packToF32UI( signZ, 0xFF, 0 ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF32UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f32_eq.c b/src/common/softfloat/source/f32_eq.c new file mode 100644 index 0000000..5f07eee --- /dev/null +++ b/src/common/softfloat/source/f32_eq.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_eq( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + if ( + softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + return (uiA == uiB) || ! (uint32_t) ((uiA | uiB)<<1); + +} + diff --git a/src/common/softfloat/source/f32_eq_signaling.c b/src/common/softfloat/source/f32_eq_signaling.c new file mode 100644 index 0000000..f5fcc82 --- /dev/null +++ b/src/common/softfloat/source/f32_eq_signaling.c @@ -0,0 +1,61 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f32_eq_signaling( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + return (uiA == uiB) || ! (uint32_t) ((uiA | uiB)<<1); + +} + diff --git a/src/common/softfloat/source/f32_isSignalingNaN.c b/src/common/softfloat/source/f32_isSignalingNaN.c new file mode 100644 index 0000000..5004a5a --- /dev/null +++ b/src/common/softfloat/source/f32_isSignalingNaN.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_isSignalingNaN( float32_t a ) +{ + union ui32_f32 uA; + + uA.f = a; + return softfloat_isSigNaNF32UI( uA.ui ); + +} + diff --git a/src/common/softfloat/source/f32_le.c b/src/common/softfloat/source/f32_le.c new file mode 100644 index 0000000..77595fb --- /dev/null +++ b/src/common/softfloat/source/f32_le.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f32_le( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + (signA != signB) ? signA || ! (uint32_t) ((uiA | uiB)<<1) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/src/common/softfloat/source/f32_le_quiet.c b/src/common/softfloat/source/f32_le_quiet.c new file mode 100644 index 0000000..1ec9101 --- /dev/null +++ b/src/common/softfloat/source/f32_le_quiet.c @@ -0,0 +1,71 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_le_quiet( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + if ( + softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + (signA != signB) ? signA || ! (uint32_t) ((uiA | uiB)<<1) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/src/common/softfloat/source/f32_lt.c b/src/common/softfloat/source/f32_lt.c new file mode 100644 index 0000000..9e12843 --- /dev/null +++ b/src/common/softfloat/source/f32_lt.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f32_lt( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + (signA != signB) ? signA && ((uint32_t) ((uiA | uiB)<<1) != 0) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/src/common/softfloat/source/f32_lt_quiet.c b/src/common/softfloat/source/f32_lt_quiet.c new file mode 100644 index 0000000..9f83b81 --- /dev/null +++ b/src/common/softfloat/source/f32_lt_quiet.c @@ -0,0 +1,71 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_lt_quiet( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + if ( + softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + (signA != signB) ? signA && ((uint32_t) ((uiA | uiB)<<1) != 0) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/src/common/softfloat/source/f32_mul.c b/src/common/softfloat/source/f32_mul.c new file mode 100644 index 0000000..a2a673f --- /dev/null +++ b/src/common/softfloat/source/f32_mul.c @@ -0,0 +1,137 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_mul( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool signA; + int_fast16_t expA; + uint_fast32_t sigA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signB; + int_fast16_t expB; + uint_fast32_t sigB; + bool signZ; + uint_fast32_t magBits; + struct exp16_sig32 normExpSig; + int_fast16_t expZ; + uint_fast32_t sigZ, uiZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF32UI( uiB ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA || ((expB == 0xFF) && sigB) ) goto propagateNaN; + magBits = expB | sigB; + goto infArg; + } + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + magBits = expA | sigA; + goto infArg; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zero; + normExpSig = softfloat_normSubnormalF32Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA + expB - 0x7F; + sigA = (sigA | 0x00800000)<<7; + sigB = (sigB | 0x00800000)<<8; + sigZ = softfloat_shortShiftRightJam64( (uint_fast64_t) sigA * sigB, 32 ); + if ( sigZ < 0x40000000 ) { + --expZ; + sigZ <<= 1; + } + return softfloat_roundPackToF32( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF32UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infArg: + if ( ! magBits ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + } else { + uiZ = packToF32UI( signZ, 0xFF, 0 ); + } + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF32UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f32_mulAdd.c b/src/common/softfloat/source/f32_mulAdd.c new file mode 100644 index 0000000..e98021b --- /dev/null +++ b/src/common/softfloat/source/f32_mulAdd.c @@ -0,0 +1,60 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t f32_mulAdd( float32_t a, float32_t b, float32_t c ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + union ui32_f32 uC; + uint_fast32_t uiC; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + uC.f = c; + uiC = uC.ui; + return softfloat_mulAddF32( uiA, uiB, uiC, 0 ); + +} + diff --git a/src/common/softfloat/source/f32_rem.c b/src/common/softfloat/source/f32_rem.c new file mode 100644 index 0000000..771b1b9 --- /dev/null +++ b/src/common/softfloat/source/f32_rem.c @@ -0,0 +1,168 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_rem( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool signA; + int_fast16_t expA; + uint_fast32_t sigA; + union ui32_f32 uB; + uint_fast32_t uiB; + int_fast16_t expB; + uint_fast32_t sigB; + struct exp16_sig32 normExpSig; + uint32_t rem; + int_fast16_t expDiff; + uint32_t q, recip32, altRem, meanRem; + bool signRem; + uint_fast32_t uiZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + uB.f = b; + uiB = uB.ui; + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA || ((expB == 0xFF) && sigB) ) goto propagateNaN; + goto invalid; + } + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) goto invalid; + normExpSig = softfloat_normSubnormalF32Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + rem = sigA | 0x00800000; + sigB |= 0x00800000; + expDiff = expA - expB; + if ( expDiff < 1 ) { + if ( expDiff < -1 ) return a; + sigB <<= 6; + if ( expDiff ) { + rem <<= 5; + q = 0; + } else { + rem <<= 6; + q = (sigB <= rem); + if ( q ) rem -= sigB; + } + } else { + recip32 = softfloat_approxRecip32_1( sigB<<8 ); + /*-------------------------------------------------------------------- + | Changing the shift of `rem' here requires also changing the initial + | subtraction from `expDiff'. + *--------------------------------------------------------------------*/ + rem <<= 7; + expDiff -= 31; + /*-------------------------------------------------------------------- + | The scale of `sigB' affects how many bits are obtained during each + | cycle of the loop. Currently this is 29 bits per loop iteration, + | which is believed to be the maximum possible. + *--------------------------------------------------------------------*/ + sigB <<= 6; + for (;;) { + q = (rem * (uint_fast64_t) recip32)>>32; + if ( expDiff < 0 ) break; + rem = -(q * (uint32_t) sigB); + expDiff -= 29; + } + /*-------------------------------------------------------------------- + | (`expDiff' cannot be less than -30 here.) + *--------------------------------------------------------------------*/ + q >>= ~expDiff & 31; + rem = (rem<<(expDiff + 30)) - q * (uint32_t) sigB; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + do { + altRem = rem; + ++q; + rem -= sigB; + } while ( ! (rem & 0x80000000) ); + meanRem = rem + altRem; + if ( (meanRem & 0x80000000) || (! meanRem && (q & 1)) ) rem = altRem; + signRem = signA; + if ( 0x80000000 <= rem ) { + signRem = ! signRem; + rem = -rem; + } + return softfloat_normRoundPackToF32( signRem, expB, rem ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF32UI( uiA, uiB ); + goto uiZ; + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f32_roundToInt.c b/src/common/softfloat/source/f32_roundToInt.c new file mode 100644 index 0000000..84e3c62 --- /dev/null +++ b/src/common/softfloat/source/f32_roundToInt.c @@ -0,0 +1,113 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_roundToInt( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t uiZ, lastBitMask, roundBitsMask; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp <= 0x7E ) { + if ( ! (uint32_t) (uiA<<1) ) return a; + if ( exact ) softfloat_exceptionFlags |= softfloat_flag_inexact; + uiZ = uiA & packToF32UI( 1, 0, 0 ); + switch ( roundingMode ) { + case softfloat_round_near_even: + if ( ! fracF32UI( uiA ) ) break; + /* fall through */ + case softfloat_round_near_maxMag: + if ( exp == 0x7E ) uiZ |= packToF32UI( 0, 0x7F, 0 ); + break; + case softfloat_round_min: + if ( uiZ ) uiZ = packToF32UI( 1, 0x7F, 0 ); + break; + case softfloat_round_max: + if ( ! uiZ ) uiZ = packToF32UI( 0, 0x7F, 0 ); + break; + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x96 <= exp ) { + if ( (exp == 0xFF) && fracF32UI( uiA ) ) { + uiZ = softfloat_propagateNaNF32UI( uiA, 0 ); + goto uiZ; + } + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ = uiA; + lastBitMask = (uint_fast32_t) 1<<(0x96 - exp); + roundBitsMask = lastBitMask - 1; + if ( roundingMode == softfloat_round_near_maxMag ) { + uiZ += lastBitMask>>1; + } else if ( roundingMode == softfloat_round_near_even ) { + uiZ += lastBitMask>>1; + if ( ! (uiZ & roundBitsMask) ) uiZ &= ~lastBitMask; + } else if ( + roundingMode + == (signF32UI( uiZ ) ? softfloat_round_min : softfloat_round_max) + ) { + uiZ += roundBitsMask; + } + uiZ &= ~roundBitsMask; + if ( exact && (uiZ != uiA) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f32_sqrt.c b/src/common/softfloat/source/f32_sqrt.c new file mode 100644 index 0000000..5ef659e --- /dev/null +++ b/src/common/softfloat/source/f32_sqrt.c @@ -0,0 +1,121 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_sqrt( float32_t a ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool signA; + int_fast16_t expA; + uint_fast32_t sigA, uiZ; + struct exp16_sig32 normExpSig; + int_fast16_t expZ; + uint_fast32_t sigZ, shiftedSigZ; + uint32_t negRem; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA ) { + uiZ = softfloat_propagateNaNF32UI( uiA, 0 ); + goto uiZ; + } + if ( ! signA ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( signA ) { + if ( ! (expA | sigA) ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = ((expA - 0x7F)>>1) + 0x7E; + expA &= 1; + sigA = (sigA | 0x00800000)<<8; + sigZ = + ((uint_fast64_t) sigA * softfloat_approxRecipSqrt32_1( expA, sigA )) + >>32; + if ( expA ) sigZ >>= 1; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sigZ += 2; + if ( (sigZ & 0x3F) < 2 ) { + shiftedSigZ = sigZ>>2; + negRem = shiftedSigZ * shiftedSigZ; + sigZ &= ~3; + if ( negRem & 0x80000000 ) { + sigZ |= 1; + } else { + if ( negRem ) --sigZ; + } + } + return softfloat_roundPackToF32( 0, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f32_sub.c b/src/common/softfloat/source/f32_sub.c new file mode 100644 index 0000000..604d3bd --- /dev/null +++ b/src/common/softfloat/source/f32_sub.c @@ -0,0 +1,61 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t f32_sub( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( signF32UI( uiA ^ uiB ) ) { + return softfloat_addMagsF32( uiA, uiB ); + } else { + return softfloat_subMagsF32( uiA, uiB ); + } + +} + diff --git a/src/common/softfloat/source/f32_to_f16.c b/src/common/softfloat/source/f32_to_f16.c new file mode 100644 index 0000000..7a97158 --- /dev/null +++ b/src/common/softfloat/source/f32_to_f16.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float16_t f32_to_f16( float32_t a ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t frac; + struct commonNaN commonNaN; + uint_fast16_t uiZ, frac16; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + frac = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0xFF ) { + if ( frac ) { + softfloat_f32UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF16UI( &commonNaN ); + } else { + uiZ = packToF16UI( sign, 0x1F, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + frac16 = frac>>9 | ((frac & 0x1FF) != 0); + if ( ! (exp | frac16) ) { + uiZ = packToF16UI( sign, 0, 0 ); + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + return softfloat_roundPackToF16( sign, exp - 0x71, frac16 | 0x4000 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f32_to_f64.c b/src/common/softfloat/source/f32_to_f64.c new file mode 100644 index 0000000..f9e02f2 --- /dev/null +++ b/src/common/softfloat/source/f32_to_f64.c @@ -0,0 +1,93 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f32_to_f64( float32_t a ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t frac; + struct commonNaN commonNaN; + uint_fast64_t uiZ; + struct exp16_sig32 normExpSig; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + frac = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0xFF ) { + if ( frac ) { + softfloat_f32UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF64UI( &commonNaN ); + } else { + uiZ = packToF64UI( sign, 0x7FF, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! exp ) { + if ( ! frac ) { + uiZ = packToF64UI( sign, 0, 0 ); + goto uiZ; + } + normExpSig = softfloat_normSubnormalF32Sig( frac ); + exp = normExpSig.exp - 1; + frac = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ = packToF64UI( sign, exp + 0x380, (uint_fast64_t) frac<<29 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f32_to_i32.c b/src/common/softfloat/source/f32_to_i32.c new file mode 100644 index 0000000..c9f2cf9 --- /dev/null +++ b/src/common/softfloat/source/f32_to_i32.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f32_to_i32( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t sig; + uint_fast64_t sig64; + int_fast16_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (i32_fromNaN != i32_fromPosOverflow) || (i32_fromNaN != i32_fromNegOverflow) + if ( (exp == 0xFF) && sig ) { +#if (i32_fromNaN == i32_fromPosOverflow) + sign = 0; +#elif (i32_fromNaN == i32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return i32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<32; + shiftDist = 0xAA - exp; + if ( 0 < shiftDist ) sig64 = softfloat_shiftRightJam64( sig64, shiftDist ); + return softfloat_roundToI32( sign, sig64, roundingMode, exact ); + +} + diff --git a/src/common/softfloat/source/f32_to_i32_r_minMag.c b/src/common/softfloat/source/f32_to_i32_r_minMag.c new file mode 100644 index 0000000..1a94dcc --- /dev/null +++ b/src/common/softfloat/source/f32_to_i32_r_minMag.c @@ -0,0 +1,89 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f32_to_i32_r_minMag( float32_t a, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + bool sign; + int_fast32_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x9E - exp; + if ( 32 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF32UI( uiA ); + if ( shiftDist <= 0 ) { + if ( uiA == packToF32UI( 1, 0x9E, 0 ) ) return -0x7FFFFFFF - 1; + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? i32_fromNaN + : sign ? i32_fromNegOverflow : i32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig | 0x00800000)<<8; + absZ = sig>>shiftDist; + if ( exact && ((uint_fast32_t) absZ< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f32_to_i64( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + uint_fast64_t sig64, extra; + struct uint64_extra sig64Extra; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0xBE - exp; + if ( shiftDist < 0 ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<40; + extra = 0; + if ( shiftDist ) { + sig64Extra = softfloat_shiftRightJam64Extra( sig64, 0, shiftDist ); + sig64 = sig64Extra.v; + extra = sig64Extra.extra; + } + return softfloat_roundToI64( sign, sig64, extra, roundingMode, exact ); + +} + diff --git a/src/common/softfloat/source/f32_to_i64_r_minMag.c b/src/common/softfloat/source/f32_to_i64_r_minMag.c new file mode 100644 index 0000000..7d336a4 --- /dev/null +++ b/src/common/softfloat/source/f32_to_i64_r_minMag.c @@ -0,0 +1,94 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f32_to_i64_r_minMag( float32_t a, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast64_t sig64; + int_fast64_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0xBE - exp; + if ( 64 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF32UI( uiA ); + if ( shiftDist <= 0 ) { + if ( uiA == packToF32UI( 1, 0xBE, 0 ) ) { + return -INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<40; + absZ = sig64>>shiftDist; + shiftDist = 40 - shiftDist; + if ( exact && (shiftDist < 0) && (uint32_t) (sig<<(shiftDist & 31)) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return sign ? -absZ : absZ; + +} + diff --git a/src/common/softfloat/source/f32_to_ui32.c b/src/common/softfloat/source/f32_to_ui32.c new file mode 100644 index 0000000..5ec279b --- /dev/null +++ b/src/common/softfloat/source/f32_to_ui32.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f32_to_ui32( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t sig; + uint_fast64_t sig64; + int_fast16_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (ui32_fromNaN != ui32_fromPosOverflow) || (ui32_fromNaN != ui32_fromNegOverflow) + if ( (exp == 0xFF) && sig ) { +#if (ui32_fromNaN == ui32_fromPosOverflow) + sign = 0; +#elif (ui32_fromNaN == ui32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return ui32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<32; + shiftDist = 0xAA - exp; + if ( 0 < shiftDist ) sig64 = softfloat_shiftRightJam64( sig64, shiftDist ); + return softfloat_roundToUI32( sign, sig64, roundingMode, exact ); + +} + diff --git a/src/common/softfloat/source/f32_to_ui32_r_minMag.c b/src/common/softfloat/source/f32_to_ui32_r_minMag.c new file mode 100644 index 0000000..12f7261 --- /dev/null +++ b/src/common/softfloat/source/f32_to_ui32_r_minMag.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f32_to_ui32_r_minMag( float32_t a, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x9E - exp; + if ( 32 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF32UI( uiA ); + if ( sign || (shiftDist < 0) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? ui32_fromNaN + : sign ? ui32_fromNegOverflow : ui32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig | 0x00800000)<<8; + z = sig>>shiftDist; + if ( exact && (z< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f32_to_ui64( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + uint_fast64_t sig64, extra; + struct uint64_extra sig64Extra; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0xBE - exp; + if ( shiftDist < 0 ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<40; + extra = 0; + if ( shiftDist ) { + sig64Extra = softfloat_shiftRightJam64Extra( sig64, 0, shiftDist ); + sig64 = sig64Extra.v; + extra = sig64Extra.extra; + } + return softfloat_roundToUI64( sign, sig64, extra, roundingMode, exact ); + +} + diff --git a/src/common/softfloat/source/f32_to_ui64_r_minMag.c b/src/common/softfloat/source/f32_to_ui64_r_minMag.c new file mode 100644 index 0000000..f96f3e1 --- /dev/null +++ b/src/common/softfloat/source/f32_to_ui64_r_minMag.c @@ -0,0 +1,90 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f32_to_ui64_r_minMag( float32_t a, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast64_t sig64, z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0xBE - exp; + if ( 64 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF32UI( uiA ); + if ( sign || (shiftDist < 0) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<40; + z = sig64>>shiftDist; + shiftDist = 40 - shiftDist; + if ( exact && (shiftDist < 0) && (uint32_t) (sig<<(shiftDist & 31)) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + +} + diff --git a/src/common/softfloat/source/f64_add.c b/src/common/softfloat/source/f64_add.c new file mode 100644 index 0000000..b1969ca --- /dev/null +++ b/src/common/softfloat/source/f64_add.c @@ -0,0 +1,65 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t f64_add( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; + + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); + if ( signA == signB ) { + return softfloat_addMagsF64( uiA, uiB, signA ); + } else { + return softfloat_subMagsF64( uiA, uiB, signA ); + } + +} + diff --git a/src/common/softfloat/source/f64_div.c b/src/common/softfloat/source/f64_div.c new file mode 100644 index 0000000..c5a2d4f --- /dev/null +++ b/src/common/softfloat/source/f64_div.c @@ -0,0 +1,172 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_div( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + int_fast16_t expA; + uint_fast64_t sigA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; + int_fast16_t expB; + uint_fast64_t sigB; + bool signZ; + struct exp16_sig64 normExpSig; + int_fast16_t expZ; + uint32_t recip32, sig32Z, doubleTerm; + uint_fast64_t rem; + uint32_t q; + uint_fast64_t sigZ; + uint_fast64_t uiZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA ) goto propagateNaN; + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + goto invalid; + } + goto infinity; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + goto zero; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) { + if ( ! (expA | sigA) ) goto invalid; + softfloat_raiseFlags( softfloat_flag_infinite ); + goto infinity; + } + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA - expB + 0x3FE; + sigA |= UINT64_C( 0x0010000000000000 ); + sigB |= UINT64_C( 0x0010000000000000 ); + if ( sigA < sigB ) { + --expZ; + sigA <<= 11; + } else { + sigA <<= 10; + } + sigB <<= 11; + recip32 = softfloat_approxRecip32_1( sigB>>32 ) - 2; + sig32Z = ((uint32_t) (sigA>>32) * (uint_fast64_t) recip32)>>32; + doubleTerm = sig32Z<<1; + rem = + ((sigA - (uint_fast64_t) doubleTerm * (uint32_t) (sigB>>32))<<28) + - (uint_fast64_t) doubleTerm * ((uint32_t) sigB>>4); + q = (((uint32_t) (rem>>32) * (uint_fast64_t) recip32)>>32) + 4; + sigZ = ((uint_fast64_t) sig32Z<<32) + ((uint_fast64_t) q<<4); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( (sigZ & 0x1FF) < 4<<4 ) { + q &= ~7; + sigZ &= ~(uint_fast64_t) 0x7F; + doubleTerm = q<<1; + rem = + ((rem - (uint_fast64_t) doubleTerm * (uint32_t) (sigB>>32))<<28) + - (uint_fast64_t) doubleTerm * ((uint32_t) sigB>>4); + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + sigZ -= 1<<7; + } else { + if ( rem ) sigZ |= 1; + } + } + return softfloat_roundPackToF64( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infinity: + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF64UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f64_eq.c b/src/common/softfloat/source/f64_eq.c new file mode 100644 index 0000000..ccb602a --- /dev/null +++ b/src/common/softfloat/source/f64_eq.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_eq( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + if ( + softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + return (uiA == uiB) || ! ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )); + +} + diff --git a/src/common/softfloat/source/f64_eq_signaling.c b/src/common/softfloat/source/f64_eq_signaling.c new file mode 100644 index 0000000..ee5a441 --- /dev/null +++ b/src/common/softfloat/source/f64_eq_signaling.c @@ -0,0 +1,61 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f64_eq_signaling( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + return (uiA == uiB) || ! ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )); + +} + diff --git a/src/common/softfloat/source/f64_isSignalingNaN.c b/src/common/softfloat/source/f64_isSignalingNaN.c new file mode 100644 index 0000000..f55acb4 --- /dev/null +++ b/src/common/softfloat/source/f64_isSignalingNaN.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_isSignalingNaN( float64_t a ) +{ + union ui64_f64 uA; + + uA.f = a; + return softfloat_isSigNaNF64UI( uA.ui ); + +} + diff --git a/src/common/softfloat/source/f64_le.c b/src/common/softfloat/source/f64_le.c new file mode 100644 index 0000000..91fc994 --- /dev/null +++ b/src/common/softfloat/source/f64_le.c @@ -0,0 +1,67 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f64_le( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + (signA != signB) + ? signA || ! ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/src/common/softfloat/source/f64_le_quiet.c b/src/common/softfloat/source/f64_le_quiet.c new file mode 100644 index 0000000..a5d332a --- /dev/null +++ b/src/common/softfloat/source/f64_le_quiet.c @@ -0,0 +1,72 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_le_quiet( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + if ( + softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + (signA != signB) + ? signA || ! ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/src/common/softfloat/source/f64_lt.c b/src/common/softfloat/source/f64_lt.c new file mode 100644 index 0000000..abf62fd --- /dev/null +++ b/src/common/softfloat/source/f64_lt.c @@ -0,0 +1,67 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f64_lt( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + (signA != signB) + ? signA && ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/src/common/softfloat/source/f64_lt_quiet.c b/src/common/softfloat/source/f64_lt_quiet.c new file mode 100644 index 0000000..6531f57 --- /dev/null +++ b/src/common/softfloat/source/f64_lt_quiet.c @@ -0,0 +1,72 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_lt_quiet( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + if ( + softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + (signA != signB) + ? signA && ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/src/common/softfloat/source/f64_mul.c b/src/common/softfloat/source/f64_mul.c new file mode 100644 index 0000000..caac424 --- /dev/null +++ b/src/common/softfloat/source/f64_mul.c @@ -0,0 +1,139 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_mul( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + int_fast16_t expA; + uint_fast64_t sigA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; + int_fast16_t expB; + uint_fast64_t sigB; + bool signZ; + uint_fast64_t magBits; + struct exp16_sig64 normExpSig; + int_fast16_t expZ; + struct uint128 sig128Z; + uint_fast64_t sigZ, uiZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA || ((expB == 0x7FF) && sigB) ) goto propagateNaN; + magBits = expB | sigB; + goto infArg; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + magBits = expA | sigA; + goto infArg; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zero; + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA + expB - 0x3FF; + sigA = (sigA | UINT64_C( 0x0010000000000000 ))<<10; + sigB = (sigB | UINT64_C( 0x0010000000000000 ))<<11; + sig128Z = softfloat_mul64To128( sigA, sigB ); + sigZ = sig128Z.v64 | (sig128Z.v0 != 0); + if ( sigZ < UINT64_C( 0x4000000000000000 ) ) { + --expZ; + sigZ <<= 1; + } + return softfloat_roundPackToF64( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infArg: + if ( ! magBits ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + } else { + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + } + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF64UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f64_mulAdd.c b/src/common/softfloat/source/f64_mulAdd.c new file mode 100644 index 0000000..67fc44d --- /dev/null +++ b/src/common/softfloat/source/f64_mulAdd.c @@ -0,0 +1,60 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t f64_mulAdd( float64_t a, float64_t b, float64_t c ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + union ui64_f64 uC; + uint_fast64_t uiC; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + uC.f = c; + uiC = uC.ui; + return softfloat_mulAddF64( uiA, uiB, uiC, 0 ); + +} + diff --git a/src/common/softfloat/source/f64_rem.c b/src/common/softfloat/source/f64_rem.c new file mode 100644 index 0000000..79d4105 --- /dev/null +++ b/src/common/softfloat/source/f64_rem.c @@ -0,0 +1,185 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_rem( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + int_fast16_t expA; + uint_fast64_t sigA; + union ui64_f64 uB; + uint_fast64_t uiB; + int_fast16_t expB; + uint_fast64_t sigB; + struct exp16_sig64 normExpSig; + uint64_t rem; + int_fast16_t expDiff; + uint32_t q, recip32; + uint_fast64_t q64; + uint64_t altRem, meanRem; + bool signRem; + uint_fast64_t uiZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA || ((expB == 0x7FF) && sigB) ) goto propagateNaN; + goto invalid; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA < expB - 1 ) return a; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) goto invalid; + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + rem = sigA | UINT64_C( 0x0010000000000000 ); + sigB |= UINT64_C( 0x0010000000000000 ); + expDiff = expA - expB; + if ( expDiff < 1 ) { + if ( expDiff < -1 ) return a; + sigB <<= 9; + if ( expDiff ) { + rem <<= 8; + q = 0; + } else { + rem <<= 9; + q = (sigB <= rem); + if ( q ) rem -= sigB; + } + } else { + recip32 = softfloat_approxRecip32_1( sigB>>21 ); + /*-------------------------------------------------------------------- + | Changing the shift of `rem' here requires also changing the initial + | subtraction from `expDiff'. + *--------------------------------------------------------------------*/ + rem <<= 9; + expDiff -= 30; + /*-------------------------------------------------------------------- + | The scale of `sigB' affects how many bits are obtained during each + | cycle of the loop. Currently this is 29 bits per loop iteration, + | the maximum possible. + *--------------------------------------------------------------------*/ + sigB <<= 9; + for (;;) { + q64 = (uint32_t) (rem>>32) * (uint_fast64_t) recip32; + if ( expDiff < 0 ) break; + q = (q64 + 0x80000000)>>32; + rem <<= 29; + rem -= q * (uint64_t) sigB; + if ( rem & UINT64_C( 0x8000000000000000 ) ) rem += sigB; + expDiff -= 29; + } + /*-------------------------------------------------------------------- + | (`expDiff' cannot be less than -29 here.) + *--------------------------------------------------------------------*/ + q = (uint32_t) (q64>>32)>>(~expDiff & 31); + rem = (rem<<(expDiff + 30)) - q * (uint64_t) sigB; + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + altRem = rem + sigB; + goto selectRem; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + do { + altRem = rem; + ++q; + rem -= sigB; + } while ( ! (rem & UINT64_C( 0x8000000000000000 )) ); + selectRem: + meanRem = rem + altRem; + if ( + (meanRem & UINT64_C( 0x8000000000000000 )) || (! meanRem && (q & 1)) + ) { + rem = altRem; + } + signRem = signA; + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + signRem = ! signRem; + rem = -rem; + } + return softfloat_normRoundPackToF64( signRem, expB, rem ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto uiZ; + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f64_roundToInt.c b/src/common/softfloat/source/f64_roundToInt.c new file mode 100644 index 0000000..3129a55 --- /dev/null +++ b/src/common/softfloat/source/f64_roundToInt.c @@ -0,0 +1,113 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_roundToInt( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + int_fast16_t exp; + uint_fast64_t uiZ, lastBitMask, roundBitsMask; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp <= 0x3FE ) { + if ( ! (uiA & UINT64_C( 0x7FFFFFFFFFFFFFFF )) ) return a; + if ( exact ) softfloat_exceptionFlags |= softfloat_flag_inexact; + uiZ = uiA & packToF64UI( 1, 0, 0 ); + switch ( roundingMode ) { + case softfloat_round_near_even: + if ( ! fracF64UI( uiA ) ) break; + /* fall through */ + case softfloat_round_near_maxMag: + if ( exp == 0x3FE ) uiZ |= packToF64UI( 0, 0x3FF, 0 ); + break; + case softfloat_round_min: + if ( uiZ ) uiZ = packToF64UI( 1, 0x3FF, 0 ); + break; + case softfloat_round_max: + if ( ! uiZ ) uiZ = packToF64UI( 0, 0x3FF, 0 ); + break; + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x433 <= exp ) { + if ( (exp == 0x7FF) && fracF64UI( uiA ) ) { + uiZ = softfloat_propagateNaNF64UI( uiA, 0 ); + goto uiZ; + } + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ = uiA; + lastBitMask = (uint_fast64_t) 1<<(0x433 - exp); + roundBitsMask = lastBitMask - 1; + if ( roundingMode == softfloat_round_near_maxMag ) { + uiZ += lastBitMask>>1; + } else if ( roundingMode == softfloat_round_near_even ) { + uiZ += lastBitMask>>1; + if ( ! (uiZ & roundBitsMask) ) uiZ &= ~lastBitMask; + } else if ( + roundingMode + == (signF64UI( uiZ ) ? softfloat_round_min : softfloat_round_max) + ) { + uiZ += roundBitsMask; + } + uiZ &= ~roundBitsMask; + if ( exact && (uiZ != uiA) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f64_sqrt.c b/src/common/softfloat/source/f64_sqrt.c new file mode 100644 index 0000000..9a06cfa --- /dev/null +++ b/src/common/softfloat/source/f64_sqrt.c @@ -0,0 +1,133 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_sqrt( float64_t a ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + int_fast16_t expA; + uint_fast64_t sigA, uiZ; + struct exp16_sig64 normExpSig; + int_fast16_t expZ; + uint32_t sig32A, recipSqrt32, sig32Z; + uint_fast64_t rem; + uint32_t q; + uint_fast64_t sigZ, shiftedSigZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA ) { + uiZ = softfloat_propagateNaNF64UI( uiA, 0 ); + goto uiZ; + } + if ( ! signA ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( signA ) { + if ( ! (expA | sigA) ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + | (`sig32Z' is guaranteed to be a lower bound on the square root of + | `sig32A', which makes `sig32Z' also a lower bound on the square root of + | `sigA'.) + *------------------------------------------------------------------------*/ + expZ = ((expA - 0x3FF)>>1) + 0x3FE; + expA &= 1; + sigA |= UINT64_C( 0x0010000000000000 ); + sig32A = sigA>>21; + recipSqrt32 = softfloat_approxRecipSqrt32_1( expA, sig32A ); + sig32Z = ((uint_fast64_t) sig32A * recipSqrt32)>>32; + if ( expA ) { + sigA <<= 8; + sig32Z >>= 1; + } else { + sigA <<= 9; + } + rem = sigA - (uint_fast64_t) sig32Z * sig32Z; + q = ((uint32_t) (rem>>2) * (uint_fast64_t) recipSqrt32)>>32; + sigZ = ((uint_fast64_t) sig32Z<<32 | 1<<5) + ((uint_fast64_t) q<<3); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( (sigZ & 0x1FF) < 0x22 ) { + sigZ &= ~(uint_fast64_t) 0x3F; + shiftedSigZ = sigZ>>6; + rem = (sigA<<52) - shiftedSigZ * shiftedSigZ; + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + --sigZ; + } else { + if ( rem ) sigZ |= 1; + } + } + return softfloat_roundPackToF64( 0, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f64_sub.c b/src/common/softfloat/source/f64_sub.c new file mode 100644 index 0000000..14ea575 --- /dev/null +++ b/src/common/softfloat/source/f64_sub.c @@ -0,0 +1,65 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t f64_sub( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; + + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); + if ( signA == signB ) { + return softfloat_subMagsF64( uiA, uiB, signA ); + } else { + return softfloat_addMagsF64( uiA, uiB, signA ); + } + +} + diff --git a/src/common/softfloat/source/f64_to_f32.c b/src/common/softfloat/source/f64_to_f32.c new file mode 100644 index 0000000..99b13dd --- /dev/null +++ b/src/common/softfloat/source/f64_to_f32.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f64_to_f32( float64_t a ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t frac; + struct commonNaN commonNaN; + uint_fast32_t uiZ, frac32; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + frac = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x7FF ) { + if ( frac ) { + softfloat_f64UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF32UI( &commonNaN ); + } else { + uiZ = packToF32UI( sign, 0xFF, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + frac32 = softfloat_shortShiftRightJam64( frac, 22 ); + if ( ! (exp | frac32) ) { + uiZ = packToF32UI( sign, 0, 0 ); + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + return softfloat_roundPackToF32( sign, exp - 0x381, frac32 | 0x40000000 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f64_to_i32.c b/src/common/softfloat/source/f64_to_i32.c new file mode 100644 index 0000000..8712c0a --- /dev/null +++ b/src/common/softfloat/source/f64_to_i32.c @@ -0,0 +1,82 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f64_to_i32( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (i32_fromNaN != i32_fromPosOverflow) || (i32_fromNaN != i32_fromNegOverflow) + if ( (exp == 0x7FF) && sig ) { +#if (i32_fromNaN == i32_fromPosOverflow) + sign = 0; +#elif (i32_fromNaN == i32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return i32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= UINT64_C( 0x0010000000000000 ); + shiftDist = 0x427 - exp; + if ( 0 < shiftDist ) sig = softfloat_shiftRightJam64( sig, shiftDist ); + return softfloat_roundToI32( sign, sig, roundingMode, exact ); + +} + diff --git a/src/common/softfloat/source/f64_to_i32_r_minMag.c b/src/common/softfloat/source/f64_to_i32_r_minMag.c new file mode 100644 index 0000000..b7e1e03 --- /dev/null +++ b/src/common/softfloat/source/f64_to_i32_r_minMag.c @@ -0,0 +1,96 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f64_to_i32_r_minMag( float64_t a, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + bool sign; + int_fast32_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x433 - exp; + if ( 53 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF64UI( uiA ); + if ( shiftDist < 22 ) { + if ( + sign && (exp == 0x41E) && (sig < UINT64_C( 0x0000000000200000 )) + ) { + if ( exact && sig ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return -0x7FFFFFFF - 1; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && sig ? i32_fromNaN + : sign ? i32_fromNegOverflow : i32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig |= UINT64_C( 0x0010000000000000 ); + absZ = sig>>shiftDist; + if ( exact && ((uint_fast64_t) (uint_fast32_t) absZ< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f64_to_i64( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + struct uint64_extra sigExtra; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= UINT64_C( 0x0010000000000000 ); + shiftDist = 0x433 - exp; + if ( shiftDist <= 0 ) { + if ( shiftDist < -11 ) goto invalid; + sigExtra.v = sig<<-shiftDist; + sigExtra.extra = 0; + } else { + sigExtra = softfloat_shiftRightJam64Extra( sig, 0, shiftDist ); + } + return + softfloat_roundToI64( + sign, sigExtra.v, sigExtra.extra, roundingMode, exact ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && fracF64UI( uiA ) ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + +} + diff --git a/src/common/softfloat/source/f64_to_i64_r_minMag.c b/src/common/softfloat/source/f64_to_i64_r_minMag.c new file mode 100644 index 0000000..6cca23f --- /dev/null +++ b/src/common/softfloat/source/f64_to_i64_r_minMag.c @@ -0,0 +1,100 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f64_to_i64_r_minMag( float64_t a, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + int_fast64_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x433 - exp; + if ( shiftDist <= 0 ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( shiftDist < -10 ) { + if ( uiA == packToF64UI( 1, 0x43E, 0 ) ) { + return -INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && sig ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sig |= UINT64_C( 0x0010000000000000 ); + absZ = sig<<-shiftDist; + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( 53 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sig |= UINT64_C( 0x0010000000000000 ); + absZ = sig>>shiftDist; + if ( exact && ((uint_fast64_t)(absZ< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f64_to_ui32( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (ui32_fromNaN != ui32_fromPosOverflow) || (ui32_fromNaN != ui32_fromNegOverflow) + if ( (exp == 0x7FF) && sig ) { +#if (ui32_fromNaN == ui32_fromPosOverflow) + sign = 0; +#elif (ui32_fromNaN == ui32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return ui32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= UINT64_C( 0x0010000000000000 ); + shiftDist = 0x427 - exp; + if ( 0 < shiftDist ) sig = softfloat_shiftRightJam64( sig, shiftDist ); + return softfloat_roundToUI32( sign, sig, roundingMode, exact ); + +} + diff --git a/src/common/softfloat/source/f64_to_ui32_r_minMag.c b/src/common/softfloat/source/f64_to_ui32_r_minMag.c new file mode 100644 index 0000000..11f0b05 --- /dev/null +++ b/src/common/softfloat/source/f64_to_ui32_r_minMag.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f64_to_ui32_r_minMag( float64_t a, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x433 - exp; + if ( 53 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF64UI( uiA ); + if ( sign || (shiftDist < 21) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && sig ? ui32_fromNaN + : sign ? ui32_fromNegOverflow : ui32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig |= UINT64_C( 0x0010000000000000 ); + z = sig>>shiftDist; + if ( exact && ((uint_fast64_t) z< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f64_to_ui64( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + struct uint64_extra sigExtra; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= UINT64_C( 0x0010000000000000 ); + shiftDist = 0x433 - exp; + if ( shiftDist <= 0 ) { + if ( shiftDist < -11 ) goto invalid; + sigExtra.v = sig<<-shiftDist; + sigExtra.extra = 0; + } else { + sigExtra = softfloat_shiftRightJam64Extra( sig, 0, shiftDist ); + } + return + softfloat_roundToUI64( + sign, sigExtra.v, sigExtra.extra, roundingMode, exact ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && fracF64UI( uiA ) ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + +} + diff --git a/src/common/softfloat/source/f64_to_ui64_r_minMag.c b/src/common/softfloat/source/f64_to_ui64_r_minMag.c new file mode 100644 index 0000000..25918c4 --- /dev/null +++ b/src/common/softfloat/source/f64_to_ui64_r_minMag.c @@ -0,0 +1,93 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f64_to_ui64_r_minMag( float64_t a, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast64_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x433 - exp; + if ( 53 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF64UI( uiA ); + if ( sign ) goto invalid; + if ( shiftDist <= 0 ) { + if ( shiftDist < -11 ) goto invalid; + z = (sig | UINT64_C( 0x0010000000000000 ))<<-shiftDist; + } else { + sig |= UINT64_C( 0x0010000000000000 ); + z = sig>>shiftDist; + if ( exact && (uint64_t) (sig<<(-shiftDist & 63)) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && sig ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + +} + diff --git a/src/common/softfloat/source/i32_to_f32.c b/src/common/softfloat/source/i32_to_f32.c new file mode 100644 index 0000000..b1aedba --- /dev/null +++ b/src/common/softfloat/source/i32_to_f32.c @@ -0,0 +1,58 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t i32_to_f32( int32_t a ) +{ + bool sign; + union ui32_f32 uZ; + uint_fast32_t absA; + + sign = (a < 0); + if ( ! (a & 0x7FFFFFFF) ) { + uZ.ui = sign ? packToF32UI( 1, 0x9E, 0 ) : 0; + return uZ.f; + } + absA = sign ? -(uint_fast32_t) a : (uint_fast32_t) a; + return softfloat_normRoundPackToF32( sign, 0x9C, absA ); + +} + diff --git a/src/common/softfloat/source/i32_to_f64.c b/src/common/softfloat/source/i32_to_f64.c new file mode 100644 index 0000000..d3901eb --- /dev/null +++ b/src/common/softfloat/source/i32_to_f64.c @@ -0,0 +1,65 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t i32_to_f64( int32_t a ) +{ + uint_fast64_t uiZ; + bool sign; + uint_fast32_t absA; + int_fast8_t shiftDist; + union ui64_f64 uZ; + + if ( ! a ) { + uiZ = 0; + } else { + sign = (a < 0); + absA = sign ? -(uint_fast32_t) a : (uint_fast32_t) a; + shiftDist = softfloat_countLeadingZeros32( absA ) + 21; + uiZ = + packToF64UI( + sign, 0x432 - shiftDist, (uint_fast64_t) absA< +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t i64_to_f32( int64_t a ) +{ + bool sign; + uint_fast64_t absA; + int_fast8_t shiftDist; + union ui32_f32 u; + uint_fast32_t sig; + + sign = (a < 0); + absA = sign ? -(uint_fast64_t) a : (uint_fast64_t) a; + shiftDist = softfloat_countLeadingZeros64( absA ) - 40; + if ( 0 <= shiftDist ) { + u.ui = + a ? packToF32UI( + sign, 0x95 - shiftDist, (uint_fast32_t) absA< +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t i64_to_f64( int64_t a ) +{ + bool sign; + union ui64_f64 uZ; + uint_fast64_t absA; + + sign = (a < 0); + if ( ! (a & UINT64_C( 0x7FFFFFFFFFFFFFFF )) ) { + uZ.ui = sign ? packToF64UI( 1, 0x43E, 0 ) : 0; + return uZ.f; + } + absA = sign ? -(uint_fast64_t) a : (uint_fast64_t) a; + return softfloat_normRoundPackToF64( sign, 0x43C, absA ); + +} + diff --git a/src/common/softfloat/source/include/internals.h b/src/common/softfloat/source/include/internals.h new file mode 100644 index 0000000..20a3017 --- /dev/null +++ b/src/common/softfloat/source/include/internals.h @@ -0,0 +1,147 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef internals_h +#define internals_h 1 + +#include +#include +#include "primitives.h" +#include "softfloat_types.h" + +union ui16_f16 { uint16_t ui; float16_t f; }; +union ui32_f32 { uint32_t ui; float32_t f; }; +union ui64_f64 { uint64_t ui; float64_t f; }; + +union extF80M_extF80 { struct extFloat80M fM; extFloat80_t f; }; +union ui128_f128 { struct uint128 ui; float128_t f; }; + +enum { + softfloat_mulAdd_subC = 1, + softfloat_mulAdd_subProd = 2 +}; + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +uint_fast32_t softfloat_roundToUI32( bool, uint_fast64_t, uint_fast8_t, bool ); + +uint_fast64_t + softfloat_roundToUI64( + bool, uint_fast64_t, uint_fast64_t, uint_fast8_t, bool ); + +int_fast32_t softfloat_roundToI32( bool, uint_fast64_t, uint_fast8_t, bool ); + +int_fast64_t + softfloat_roundToI64( + bool, uint_fast64_t, uint_fast64_t, uint_fast8_t, bool ); + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signF16UI( a ) ((bool) ((uint16_t) (a)>>15)) +#define expF16UI( a ) ((int_fast8_t) ((a)>>10) & 0x1F) +#define fracF16UI( a ) ((a) & 0x03FF) +#define packToF16UI( sign, exp, sig ) (((uint16_t) (sign)<<15) + ((uint16_t) (exp)<<10) + (sig)) + +#define isNaNF16UI( a ) (((~(a) & 0x7C00) == 0) && ((a) & 0x03FF)) + +float16_t softfloat_roundPackToF16( bool, int_fast16_t, uint_fast16_t ); + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signF32UI( a ) ((bool) ((uint32_t) (a)>>31)) +#define expF32UI( a ) ((int_fast16_t) ((a)>>23) & 0xFF) +#define fracF32UI( a ) ((a) & 0x007FFFFF) +#define packToF32UI( sign, exp, sig ) (((uint32_t) (sign)<<31) + ((uint32_t) (exp)<<23) + (sig)) + +#define isNaNF32UI( a ) (((~(a) & 0x7F800000) == 0) && ((a) & 0x007FFFFF)) + +struct exp8_sig16 { int_fast8_t exp; uint_fast16_t sig; }; +struct exp8_sig16 softfloat_normSubnormalF16Sig( uint_fast16_t ); + +struct exp16_sig32 { int_fast16_t exp; uint_fast32_t sig; }; +struct exp16_sig32 softfloat_normSubnormalF32Sig( uint_fast32_t ); + +float32_t softfloat_roundPackToF32( bool, int_fast16_t, uint_fast32_t ); +float32_t softfloat_normRoundPackToF32( bool, int_fast16_t, uint_fast32_t ); + +float32_t softfloat_addMagsF32( uint_fast32_t, uint_fast32_t ); +float32_t softfloat_subMagsF32( uint_fast32_t, uint_fast32_t ); +float32_t + softfloat_mulAddF32( + uint_fast32_t, uint_fast32_t, uint_fast32_t, uint_fast8_t ); + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signF64UI( a ) ((bool) ((uint64_t) (a)>>63)) +#define expF64UI( a ) ((int_fast16_t) ((a)>>52) & 0x7FF) +#define fracF64UI( a ) ((a) & UINT64_C( 0x000FFFFFFFFFFFFF )) +#define packToF64UI( sign, exp, sig ) ((uint64_t) (((uint_fast64_t) (sign)<<63) + ((uint_fast64_t) (exp)<<52) + (sig))) + +#define isNaNF64UI( a ) (((~(a) & UINT64_C( 0x7FF0000000000000 )) == 0) && ((a) & UINT64_C( 0x000FFFFFFFFFFFFF ))) + +struct exp16_sig64 { int_fast16_t exp; uint_fast64_t sig; }; +struct exp16_sig64 softfloat_normSubnormalF64Sig( uint_fast64_t ); + +float64_t softfloat_roundPackToF64( bool, int_fast16_t, uint_fast64_t ); +float64_t softfloat_normRoundPackToF64( bool, int_fast16_t, uint_fast64_t ); + +float64_t softfloat_addMagsF64( uint_fast64_t, uint_fast64_t, bool ); +float64_t softfloat_subMagsF64( uint_fast64_t, uint_fast64_t, bool ); +float64_t + softfloat_mulAddF64( + uint_fast64_t, uint_fast64_t, uint_fast64_t, uint_fast8_t ); + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signExtF80UI64( a64 ) ((bool) ((uint16_t) (a64)>>15)) +#define expExtF80UI64( a64 ) ((a64) & 0x7FFF) +#define packToExtF80UI64( sign, exp ) ((uint_fast16_t) (sign)<<15 | (exp)) + +#define isNaNExtF80UI( a64, a0 ) ((((a64) & 0x7FFF) == 0x7FFF) && ((a0) & UINT64_C( 0x7FFFFFFFFFFFFFFF ))) + + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signF128UI64( a64 ) ((bool) ((uint64_t) (a64)>>63)) +#define expF128UI64( a64 ) ((int_fast32_t) ((a64)>>48) & 0x7FFF) +#define fracF128UI64( a64 ) ((a64) & UINT64_C( 0x0000FFFFFFFFFFFF )) +#define packToF128UI64( sign, exp, sig64 ) (((uint_fast64_t) (sign)<<63) + ((uint_fast64_t) (exp)<<48) + (sig64)) + +#define isNaNF128UI( a64, a0 ) (((~(a64) & UINT64_C( 0x7FFF000000000000 )) == 0) && (a0 || ((a64) & UINT64_C( 0x0000FFFFFFFFFFFF )))) + + +#endif + diff --git a/src/common/softfloat/source/include/primitiveTypes.h b/src/common/softfloat/source/include/primitiveTypes.h new file mode 100644 index 0000000..781d82f --- /dev/null +++ b/src/common/softfloat/source/include/primitiveTypes.h @@ -0,0 +1,83 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef primitiveTypes_h +#define primitiveTypes_h 1 + +#include + + +#ifdef LITTLEENDIAN +struct uint128 { uint64_t v0, v64; }; +struct uint64_extra { uint64_t extra, v; }; +struct uint128_extra { uint64_t extra; struct uint128 v; }; +#else +struct uint128 { uint64_t v64, v0; }; +struct uint64_extra { uint64_t v, extra; }; +struct uint128_extra { struct uint128 v; uint64_t extra; }; +#endif + + +/*---------------------------------------------------------------------------- +| These macros are used to isolate the differences in word order between big- +| endian and little-endian platforms. +*----------------------------------------------------------------------------*/ +#ifdef LITTLEENDIAN +#define wordIncr 1 +#define indexWord( total, n ) (n) +#define indexWordHi( total ) ((total) - 1) +#define indexWordLo( total ) 0 +#define indexMultiword( total, m, n ) (n) +#define indexMultiwordHi( total, n ) ((total) - (n)) +#define indexMultiwordLo( total, n ) 0 +#define indexMultiwordHiBut( total, n ) (n) +#define indexMultiwordLoBut( total, n ) 0 +#define INIT_UINTM4( v3, v2, v1, v0 ) { v0, v1, v2, v3 } +#else +#define wordIncr -1 +#define indexWord( total, n ) ((total) - 1 - (n)) +#define indexWordHi( total ) 0 +#define indexWordLo( total ) ((total) - 1) +#define indexMultiword( total, m, n ) ((total) - 1 - (m)) +#define indexMultiwordHi( total, n ) 0 +#define indexMultiwordLo( total, n ) ((total) - (n)) +#define indexMultiwordHiBut( total, n ) 0 +#define indexMultiwordLoBut( total, n ) (n) +#define INIT_UINTM4( v3, v2, v1, v0 ) { v3, v2, v1, v0 } +#endif + +#endif + diff --git a/src/common/softfloat/source/include/primitives.h b/src/common/softfloat/source/include/primitives.h new file mode 100644 index 0000000..11ab0c2 --- /dev/null +++ b/src/common/softfloat/source/include/primitives.h @@ -0,0 +1,297 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef primitives_h +#define primitives_h 1 + +#include +#include +#include "primitiveTypes.h" + +/*---------------------------------------------------------------------------- +| Shifts 'a' right by the number of bits given in 'dist', which must be in +| the range 1 to 63. If any nonzero bits are shifted off, they are "jammed" +| into the least-significant bit of the shifted value by setting the least- +| significant bit to 1. This shifted-and-jammed value is returned. +*----------------------------------------------------------------------------*/ +INLINE +uint64_t softfloat_shortShiftRightJam64( uint64_t a, uint_fast8_t dist ) + { return a>>dist | ((a & (((uint_fast64_t) 1<>dist | ((uint32_t) (a<<(-dist & 31)) != 0) : (a != 0); +} + +/*---------------------------------------------------------------------------- +| Shifts 'a' right by the number of bits given in 'dist', which must not +| be zero. If any nonzero bits are shifted off, they are "jammed" into the +| least-significant bit of the shifted value by setting the least-significant +| bit to 1. This shifted-and-jammed value is returned. +| The value of 'dist' can be arbitrarily large. In particular, if 'dist' is +| greater than 64, the result will be either 0 or 1, depending on whether 'a' +| is zero or nonzero. +*----------------------------------------------------------------------------*/ +INLINE uint64_t softfloat_shiftRightJam64( uint64_t a, uint_fast32_t dist ) +{ + return + (dist < 63) ? a>>dist | ((uint64_t) (a<<(-dist & 63)) != 0) : (a != 0); +} + +/*---------------------------------------------------------------------------- +| A constant table that translates an 8-bit unsigned integer (the array index) +| into the number of leading 0 bits before the most-significant 1 of that +| integer. For integer zero (index 0), the corresponding table element is 8. +*----------------------------------------------------------------------------*/ +extern const uint_least8_t softfloat_countLeadingZeros8[256]; + +/*---------------------------------------------------------------------------- +| Returns the number of leading 0 bits before the most-significant 1 bit of +| 'a'. If 'a' is zero, 16 is returned. +*----------------------------------------------------------------------------*/ +INLINE uint_fast8_t softfloat_countLeadingZeros16( uint16_t a ) +{ + uint_fast8_t count = 8; + if ( 0x100 <= a ) { + count = 0; + a >>= 8; + } + count += softfloat_countLeadingZeros8[a]; + return count; +} + +/*---------------------------------------------------------------------------- +| Returns the number of leading 0 bits before the most-significant 1 bit of +| 'a'. If 'a' is zero, 32 is returned. +*----------------------------------------------------------------------------*/ +INLINE uint_fast8_t softfloat_countLeadingZeros32( uint32_t a ) +{ + uint_fast8_t count = 0; + if ( a < 0x10000 ) { + count = 16; + a <<= 16; + } + if ( a < 0x1000000 ) { + count += 8; + a <<= 8; + } + count += softfloat_countLeadingZeros8[a>>24]; + return count; +} + +/*---------------------------------------------------------------------------- +| Returns the number of leading 0 bits before the most-significant 1 bit of +| 'a'. If 'a' is zero, 64 is returned. +*----------------------------------------------------------------------------*/ +uint_fast8_t softfloat_countLeadingZeros64( uint64_t a ); + +extern const uint16_t softfloat_approxRecip_1k0s[16]; +extern const uint16_t softfloat_approxRecip_1k1s[16]; + +/*---------------------------------------------------------------------------- +| Returns an approximation to the reciprocal of the number represented by 'a', +| where 'a' is interpreted as an unsigned fixed-point number with one integer +| bit and 31 fraction bits. The 'a' input must be "normalized", meaning that +| its most-significant bit (bit 31) must be 1. Thus, if A is the value of +| the fixed-point interpretation of 'a', then 1 <= A < 2. The returned value +| is interpreted as a pure unsigned fraction, having no integer bits and 32 +| fraction bits. The approximation returned is never greater than the true +| reciprocal 1/A, and it differs from the true reciprocal by at most 2.006 ulp +| (units in the last place). +*----------------------------------------------------------------------------*/ +#ifdef SOFTFLOAT_FAST_DIV64TO32 +#define softfloat_approxRecip32_1( a ) ((uint32_t) (UINT64_C( 0x7FFFFFFFFFFFFFFF ) / (uint32_t) (a))) +#endif + +extern const uint16_t softfloat_approxRecipSqrt_1k0s[16]; +extern const uint16_t softfloat_approxRecipSqrt_1k1s[16]; + +/*---------------------------------------------------------------------------- +| Returns an approximation to the reciprocal of the square root of the number +| represented by 'a', where 'a' is interpreted as an unsigned fixed-point +| number either with one integer bit and 31 fraction bits or with two integer +| bits and 30 fraction bits. The format of 'a' is determined by 'oddExpA', +| which must be either 0 or 1. If 'oddExpA' is 1, 'a' is interpreted as +| having one integer bit, and if 'oddExpA' is 0, 'a' is interpreted as having +| two integer bits. The 'a' input must be "normalized", meaning that its +| most-significant bit (bit 31) must be 1. Thus, if A is the value of the +| fixed-point interpretation of 'a', it follows that 1 <= A < 2 when 'oddExpA' +| is 1, and 2 <= A < 4 when 'oddExpA' is 0. +| The returned value is interpreted as a pure unsigned fraction, having +| no integer bits and 32 fraction bits. The approximation returned is never +| greater than the true reciprocal 1/sqrt(A), and it differs from the true +| reciprocal by at most 2.06 ulp (units in the last place). The approximation +| returned is also always within the range 0.5 to 1; thus, the most- +| significant bit of the result is always set. +*----------------------------------------------------------------------------*/ +uint32_t softfloat_approxRecipSqrt32_1( unsigned int oddExpA, uint32_t a ); + + +/*---------------------------------------------------------------------------- +| The following functions are needed only when 'SOFTFLOAT_FAST_INT64' is +| defined. +*----------------------------------------------------------------------------*/ + +/*---------------------------------------------------------------------------- +| Shifts the 128 bits formed by concatenating 'a64' and 'a0' left by the +| number of bits given in 'dist', which must be in the range 1 to 63. +*----------------------------------------------------------------------------*/ +INLINE +struct uint128 + softfloat_shortShiftLeft128( uint64_t a64, uint64_t a0, uint_fast8_t dist ) +{ + struct uint128 z; + z.v64 = a64<>(-dist & 63); + z.v0 = a0<>dist; + z.v0 = + a64<<(negDist & 63) | a0>>dist + | ((uint64_t) (a0<<(negDist & 63)) != 0); + return z; +} + +/*---------------------------------------------------------------------------- +| Shifts the 128 bits formed by concatenating 'a' and 'extra' right by 64 +| _plus_ the number of bits given in 'dist', which must not be zero. This +| shifted value is at most 64 nonzero bits and is returned in the 'v' field +| of the 'struct uint64_extra' result. The 64-bit 'extra' field of the result +| contains a value formed as follows from the bits that were shifted off: The +| _last_ bit shifted off is the most-significant bit of the 'extra' field, and +| the other 63 bits of the 'extra' field are all zero if and only if _all_but_ +| _the_last_ bits shifted off were all zero. +| (This function makes more sense if 'a' and 'extra' are considered to form +| an unsigned fixed-point number with binary point between 'a' and 'extra'. +| This fixed-point value is shifted right by the number of bits given in +| 'dist', and the integer part of this shifted value is returned in the 'v' +| field of the result. The fractional part of the shifted value is modified +| as described above and returned in the 'extra' field of the result.) +*----------------------------------------------------------------------------*/ +INLINE +struct uint64_extra + softfloat_shiftRightJam64Extra( + uint64_t a, uint64_t extra, uint_fast32_t dist ) +{ + struct uint64_extra z; + if ( dist < 64 ) { + z.v = a>>dist; + z.extra = a<<(-dist & 63); + } else { + z.v = 0; + z.extra = (dist == 64) ? a : (a != 0); + } + z.extra |= (extra != 0); + return z; +} + +/*---------------------------------------------------------------------------- +| Shifts the 128 bits formed by concatenating 'a64' and 'a0' right by the +| number of bits given in 'dist', which must not be zero. If any nonzero bits +| are shifted off, they are "jammed" into the least-significant bit of the +| shifted value by setting the least-significant bit to 1. This shifted-and- +| jammed value is returned. +| The value of 'dist' can be arbitrarily large. In particular, if 'dist' is +| greater than 128, the result will be either 0 or 1, depending on whether the +| original 128 bits are all zeros. +*----------------------------------------------------------------------------*/ +struct uint128 + softfloat_shiftRightJam128( uint64_t a64, uint64_t a0, uint_fast32_t dist ); + +/*---------------------------------------------------------------------------- +| Returns the sum of the 128-bit integer formed by concatenating 'a64' and +| 'a0' and the 128-bit integer formed by concatenating 'b64' and 'b0'. The +| addition is modulo 2^128, so any carry out is lost. +*----------------------------------------------------------------------------*/ +INLINE +struct uint128 + softfloat_add128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) +{ + struct uint128 z; + z.v0 = a0 + b0; + z.v64 = a64 + b64 + (z.v0 < a0); + return z; +} + +/*---------------------------------------------------------------------------- +| Returns the difference of the 128-bit integer formed by concatenating 'a64' +| and 'a0' and the 128-bit integer formed by concatenating 'b64' and 'b0'. +| The subtraction is modulo 2^128, so any borrow out (carry out) is lost. +*----------------------------------------------------------------------------*/ +INLINE +struct uint128 + softfloat_sub128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) +{ + struct uint128 z; + z.v0 = a0 - b0; + z.v64 = a64 - b64; + z.v64 -= (a0 < b0); + return z; +} + +/*---------------------------------------------------------------------------- +| Returns the 128-bit product of 'a' and 'b'. +*----------------------------------------------------------------------------*/ +struct uint128 softfloat_mul64To128( uint64_t a, uint64_t b ); + + +#endif + diff --git a/src/common/softfloat/source/include/softfloat.h b/src/common/softfloat/source/include/softfloat.h new file mode 100644 index 0000000..a7b97c5 --- /dev/null +++ b/src/common/softfloat/source/include/softfloat.h @@ -0,0 +1,172 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + + +/*============================================================================ +| Note: If SoftFloat is made available as a general library for programs to +| use, it is strongly recommended that a platform-specific version of this +| header, "softfloat.h", be created that folds in "softfloat_types.h" and that +| eliminates all dependencies on compile-time macros. +*============================================================================*/ + + +#ifndef softfloat_h +#define softfloat_h 1 + +#include +#include +#include "softfloat_types.h" + +#ifndef THREAD_LOCAL +#define THREAD_LOCAL +#endif + +/*---------------------------------------------------------------------------- +| Software floating-point underflow tininess-detection mode. +*----------------------------------------------------------------------------*/ +extern THREAD_LOCAL uint_fast8_t softfloat_detectTininess; +enum { + softfloat_tininess_beforeRounding = 0, + softfloat_tininess_afterRounding = 1 +}; + +/*---------------------------------------------------------------------------- +| Software floating-point rounding mode. (Mode "odd" is supported only if +| SoftFloat is compiled with macro 'SOFTFLOAT_ROUND_ODD' defined.) +*----------------------------------------------------------------------------*/ +extern THREAD_LOCAL uint_fast8_t softfloat_roundingMode; +enum { + softfloat_round_near_even = 0, + softfloat_round_minMag = 1, + softfloat_round_min = 2, + softfloat_round_max = 3, + softfloat_round_near_maxMag = 4, + softfloat_round_odd = 5 +}; + +/*---------------------------------------------------------------------------- +| Software floating-point exception flags. +*----------------------------------------------------------------------------*/ +extern THREAD_LOCAL uint_fast8_t softfloat_exceptionFlags; +enum { + softfloat_flag_inexact = 1, + softfloat_flag_underflow = 2, + softfloat_flag_overflow = 4, + softfloat_flag_infinite = 8, + softfloat_flag_invalid = 16 +}; + +/*---------------------------------------------------------------------------- +| Routine to raise any or all of the software floating-point exception flags. +*----------------------------------------------------------------------------*/ +void softfloat_raiseFlags( uint_fast8_t ); + +/*---------------------------------------------------------------------------- +| Integer-to-floating-point conversion routines. +*----------------------------------------------------------------------------*/ +float32_t ui32_to_f32( uint32_t ); +float64_t ui32_to_f64( uint32_t ); +float32_t ui64_to_f32( uint64_t ); +float64_t ui64_to_f64( uint64_t ); +float32_t i32_to_f32( int32_t ); +float64_t i32_to_f64( int32_t ); +float32_t i64_to_f32( int64_t ); +float64_t i64_to_f64( int64_t ); + +/*---------------------------------------------------------------------------- +| 16-bit (half-precision) floating-point operations. +*----------------------------------------------------------------------------*/ +float32_t f16_to_f32( float16_t ); + +/*---------------------------------------------------------------------------- +| 32-bit (single-precision) floating-point operations. +*----------------------------------------------------------------------------*/ +uint_fast32_t f32_to_ui32( float32_t, uint_fast8_t, bool ); +uint_fast64_t f32_to_ui64( float32_t, uint_fast8_t, bool ); +int_fast32_t f32_to_i32( float32_t, uint_fast8_t, bool ); +int_fast64_t f32_to_i64( float32_t, uint_fast8_t, bool ); +uint_fast32_t f32_to_ui32_r_minMag( float32_t, bool ); +uint_fast64_t f32_to_ui64_r_minMag( float32_t, bool ); +int_fast32_t f32_to_i32_r_minMag( float32_t, bool ); +int_fast64_t f32_to_i64_r_minMag( float32_t, bool ); +float16_t f32_to_f16( float32_t ); +float64_t f32_to_f64( float32_t ); +float32_t f32_roundToInt( float32_t, uint_fast8_t, bool ); +float32_t f32_add( float32_t, float32_t ); +float32_t f32_sub( float32_t, float32_t ); +float32_t f32_mul( float32_t, float32_t ); +float32_t f32_mulAdd( float32_t, float32_t, float32_t ); +float32_t f32_div( float32_t, float32_t ); +float32_t f32_rem( float32_t, float32_t ); +float32_t f32_sqrt( float32_t ); +bool f32_eq( float32_t, float32_t ); +bool f32_le( float32_t, float32_t ); +bool f32_lt( float32_t, float32_t ); +bool f32_eq_signaling( float32_t, float32_t ); +bool f32_le_quiet( float32_t, float32_t ); +bool f32_lt_quiet( float32_t, float32_t ); +bool f32_isSignalingNaN( float32_t ); + +/*---------------------------------------------------------------------------- +| 64-bit (double-precision) floating-point operations. +*----------------------------------------------------------------------------*/ +uint_fast32_t f64_to_ui32( float64_t, uint_fast8_t, bool ); +uint_fast64_t f64_to_ui64( float64_t, uint_fast8_t, bool ); +int_fast32_t f64_to_i32( float64_t, uint_fast8_t, bool ); +int_fast64_t f64_to_i64( float64_t, uint_fast8_t, bool ); +uint_fast32_t f64_to_ui32_r_minMag( float64_t, bool ); +uint_fast64_t f64_to_ui64_r_minMag( float64_t, bool ); +int_fast32_t f64_to_i32_r_minMag( float64_t, bool ); +int_fast64_t f64_to_i64_r_minMag( float64_t, bool ); +float32_t f64_to_f32( float64_t ); +float64_t f64_roundToInt( float64_t, uint_fast8_t, bool ); +float64_t f64_add( float64_t, float64_t ); +float64_t f64_sub( float64_t, float64_t ); +float64_t f64_mul( float64_t, float64_t ); +float64_t f64_mulAdd( float64_t, float64_t, float64_t ); +float64_t f64_div( float64_t, float64_t ); +float64_t f64_rem( float64_t, float64_t ); +float64_t f64_sqrt( float64_t ); +bool f64_eq( float64_t, float64_t ); +bool f64_le( float64_t, float64_t ); +bool f64_lt( float64_t, float64_t ); +bool f64_eq_signaling( float64_t, float64_t ); +bool f64_le_quiet( float64_t, float64_t ); +bool f64_lt_quiet( float64_t, float64_t ); +bool f64_isSignalingNaN( float64_t ); + +#endif + diff --git a/src/common/softfloat/source/include/softfloat_types.h b/src/common/softfloat/source/include/softfloat_types.h new file mode 100644 index 0000000..af1888f --- /dev/null +++ b/src/common/softfloat/source/include/softfloat_types.h @@ -0,0 +1,81 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef softfloat_types_h +#define softfloat_types_h 1 + +#include + +/*---------------------------------------------------------------------------- +| Types used to pass 16-bit, 32-bit, 64-bit, and 128-bit floating-point +| arguments and results to/from functions. These types must be exactly +| 16 bits, 32 bits, 64 bits, and 128 bits in size, respectively. Where a +| platform has "native" support for IEEE-Standard floating-point formats, +| the types below may, if desired, be defined as aliases for the native types +| (typically 'float' and 'double', and possibly 'long double'). +*----------------------------------------------------------------------------*/ +typedef struct { uint16_t v; } float16_t; +typedef struct { uint32_t v; } float32_t; +typedef struct { uint64_t v; } float64_t; +typedef struct { uint64_t v[2]; } float128_t; + +/*---------------------------------------------------------------------------- +| The format of an 80-bit extended floating-point number in memory. This +| structure must contain a 16-bit field named 'signExp' and a 64-bit field +| named 'signif'. +*----------------------------------------------------------------------------*/ +#ifdef LITTLEENDIAN +struct extFloat80M { uint64_t signif; uint16_t signExp; }; +#else +struct extFloat80M { uint16_t signExp; uint64_t signif; }; +#endif + +/*---------------------------------------------------------------------------- +| The type used to pass 80-bit extended floating-point arguments and +| results to/from functions. This type must have size identical to +| 'struct extFloat80M'. Type 'extFloat80_t' can be defined as an alias for +| 'struct extFloat80M'. Alternatively, if a platform has "native" support +| for IEEE-Standard 80-bit extended floating-point, it may be possible, +| if desired, to define 'extFloat80_t' as an alias for the native type +| (presumably either 'long double' or a nonstandard compiler-intrinsic type). +| In that case, the 'signif' and 'signExp' fields of 'struct extFloat80M' +| must align exactly with the locations in memory of the sign, exponent, and +| significand of the native type. +*----------------------------------------------------------------------------*/ +typedef struct extFloat80M extFloat80_t; + +#endif + diff --git a/src/common/softfloat/source/s_addMagsF32.c b/src/common/softfloat/source/s_addMagsF32.c new file mode 100644 index 0000000..ba64781 --- /dev/null +++ b/src/common/softfloat/source/s_addMagsF32.c @@ -0,0 +1,126 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" + +float32_t softfloat_addMagsF32( uint_fast32_t uiA, uint_fast32_t uiB ) +{ + int_fast16_t expA; + uint_fast32_t sigA; + int_fast16_t expB; + uint_fast32_t sigB; + int_fast16_t expDiff; + uint_fast32_t uiZ; + bool signZ; + int_fast16_t expZ; + uint_fast32_t sigZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( ! expA ) { + uiZ = uiA + sigB; + goto uiZ; + } + if ( expA == 0xFF ) { + if ( sigA | sigB ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + signZ = signF32UI( uiA ); + expZ = expA; + sigZ = 0x01000000 + sigA + sigB; + if ( ! (sigZ & 1) && (expZ < 0xFE) ) { + uiZ = packToF32UI( signZ, expZ, sigZ>>1 ); + goto uiZ; + } + sigZ <<= 6; + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + signZ = signF32UI( uiA ); + sigA <<= 6; + sigB <<= 6; + if ( expDiff < 0 ) { + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + uiZ = packToF32UI( signZ, 0xFF, 0 ); + goto uiZ; + } + expZ = expB; + sigA += expA ? 0x20000000 : sigA; + sigA = softfloat_shiftRightJam32( sigA, -expDiff ); + } else { + if ( expA == 0xFF ) { + if ( sigA ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + expZ = expA; + sigB += expB ? 0x20000000 : sigB; + sigB = softfloat_shiftRightJam32( sigB, expDiff ); + } + sigZ = 0x20000000 + sigA + sigB; + if ( sigZ < 0x40000000 ) { + --expZ; + sigZ <<= 1; + } + } + return softfloat_roundPackToF32( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF32UI( uiA, uiB ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/s_addMagsF64.c b/src/common/softfloat/source/s_addMagsF64.c new file mode 100644 index 0000000..63e1afe --- /dev/null +++ b/src/common/softfloat/source/s_addMagsF64.c @@ -0,0 +1,128 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" + +float64_t + softfloat_addMagsF64( uint_fast64_t uiA, uint_fast64_t uiB, bool signZ ) +{ + int_fast16_t expA; + uint_fast64_t sigA; + int_fast16_t expB; + uint_fast64_t sigB; + int_fast16_t expDiff; + uint_fast64_t uiZ; + int_fast16_t expZ; + uint_fast64_t sigZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( ! expA ) { + uiZ = uiA + sigB; + goto uiZ; + } + if ( expA == 0x7FF ) { + if ( sigA | sigB ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + expZ = expA; + sigZ = UINT64_C( 0x0020000000000000 ) + sigA + sigB; + sigZ <<= 9; + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sigA <<= 9; + sigB <<= 9; + if ( expDiff < 0 ) { + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + goto uiZ; + } + expZ = expB; + if ( expA ) { + sigA += UINT64_C( 0x2000000000000000 ); + } else { + sigA <<= 1; + } + sigA = softfloat_shiftRightJam64( sigA, -expDiff ); + } else { + if ( expA == 0x7FF ) { + if ( sigA ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + expZ = expA; + if ( expB ) { + sigB += UINT64_C( 0x2000000000000000 ); + } else { + sigB <<= 1; + } + sigB = softfloat_shiftRightJam64( sigB, expDiff ); + } + sigZ = UINT64_C( 0x2000000000000000 ) + sigA + sigB; + if ( sigZ < UINT64_C( 0x4000000000000000 ) ) { + --expZ; + sigZ <<= 1; + } + } + return softfloat_roundPackToF64( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/s_approxRecipSqrt32_1.c b/src/common/softfloat/source/s_approxRecipSqrt32_1.c new file mode 100644 index 0000000..2695f7f --- /dev/null +++ b/src/common/softfloat/source/s_approxRecipSqrt32_1.c @@ -0,0 +1,74 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitives.h" + +#ifndef softfloat_approxRecipSqrt32_1 + +extern const uint16_t softfloat_approxRecipSqrt_1k0s[]; +extern const uint16_t softfloat_approxRecipSqrt_1k1s[]; + +uint32_t softfloat_approxRecipSqrt32_1( unsigned int oddExpA, uint32_t a ) +{ + int index; + uint16_t eps, r0; + uint_fast32_t ESqrR0; + uint32_t sigma0; + uint_fast32_t r; + uint32_t sqrSigma0; + + index = (a>>27 & 0xE) + oddExpA; + eps = (uint16_t) (a>>12); + r0 = softfloat_approxRecipSqrt_1k0s[index] + - ((softfloat_approxRecipSqrt_1k1s[index] * (uint_fast32_t) eps) + >>20); + ESqrR0 = (uint_fast32_t) r0 * r0; + if ( ! oddExpA ) ESqrR0 <<= 1; + sigma0 = ~(uint_fast32_t) (((uint32_t) ESqrR0 * (uint_fast64_t) a)>>23); + r = ((uint_fast32_t) r0<<16) + ((r0 * (uint_fast64_t) sigma0)>>25); + sqrSigma0 = ((uint_fast64_t) sigma0 * sigma0)>>32; + r += ((uint32_t) ((r>>1) + (r>>3) - ((uint_fast32_t) r0<<14)) + * (uint_fast64_t) sqrSigma0) + >>48; + if ( ! (r & 0x80000000) ) r = 0x80000000; + return r; + +} + +#endif + diff --git a/src/common/softfloat/source/s_approxRecipSqrt_1Ks.c b/src/common/softfloat/source/s_approxRecipSqrt_1Ks.c new file mode 100644 index 0000000..a60cf82 --- /dev/null +++ b/src/common/softfloat/source/s_approxRecipSqrt_1Ks.c @@ -0,0 +1,49 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitives.h" + +const uint16_t softfloat_approxRecipSqrt_1k0s[16] = { + 0xB4C9, 0xFFAB, 0xAA7D, 0xF11C, 0xA1C5, 0xE4C7, 0x9A43, 0xDA29, + 0x93B5, 0xD0E5, 0x8DED, 0xC8B7, 0x88C6, 0xC16D, 0x8424, 0xBAE1 +}; +const uint16_t softfloat_approxRecipSqrt_1k1s[16] = { + 0xA5A5, 0xEA42, 0x8C21, 0xC62D, 0x788F, 0xAA7F, 0x6928, 0x94B6, + 0x5CC7, 0x8335, 0x52A6, 0x74E2, 0x4A3E, 0x68FE, 0x432B, 0x5EFD +}; + diff --git a/src/common/softfloat/source/s_countLeadingZeros64.c b/src/common/softfloat/source/s_countLeadingZeros64.c new file mode 100644 index 0000000..0045741 --- /dev/null +++ b/src/common/softfloat/source/s_countLeadingZeros64.c @@ -0,0 +1,73 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" + +#ifndef softfloat_countLeadingZeros64 + +#define softfloat_countLeadingZeros64 softfloat_countLeadingZeros64 +#include "primitives.h" + +uint_fast8_t softfloat_countLeadingZeros64( uint64_t a ) +{ + uint_fast8_t count; + uint32_t a32; + + count = 0; + a32 = a>>32; + if ( ! a32 ) { + count = 32; + a32 = a; + } + /*------------------------------------------------------------------------ + | From here, result is current count + count leading zeros of `a32'. + *------------------------------------------------------------------------*/ + if ( a32 < 0x10000 ) { + count += 16; + a32 <<= 16; + } + if ( a32 < 0x1000000 ) { + count += 8; + a32 <<= 8; + } + count += softfloat_countLeadingZeros8[a32>>24]; + return count; + +} + +#endif + diff --git a/src/common/softfloat/source/s_countLeadingZeros8.c b/src/common/softfloat/source/s_countLeadingZeros8.c new file mode 100644 index 0000000..1158d01 --- /dev/null +++ b/src/common/softfloat/source/s_countLeadingZeros8.c @@ -0,0 +1,59 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitives.h" + +const uint_least8_t softfloat_countLeadingZeros8[256] = { + 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +}; + diff --git a/src/common/softfloat/source/s_mul64To128.c b/src/common/softfloat/source/s_mul64To128.c new file mode 100644 index 0000000..3b0fb96 --- /dev/null +++ b/src/common/softfloat/source/s_mul64To128.c @@ -0,0 +1,67 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" +#include "primitives.h" + +#ifndef softfloat_mul64To128 + +struct uint128 softfloat_mul64To128( uint64_t a, uint64_t b ) +{ + uint32_t a32, a0, b32, b0; + struct uint128 z; + uint64_t mid1, mid; + + a32 = a>>32; + a0 = a; + b32 = b>>32; + b0 = b; + z.v0 = (uint_fast64_t) a0 * b0; + mid1 = (uint_fast64_t) a32 * b0; + mid = mid1 + (uint_fast64_t) a0 * b32; + z.v64 = (uint_fast64_t) a32 * b32; + z.v64 += (uint_fast64_t) (mid < mid1)<<32 | mid>>32; + mid <<= 32; + z.v0 += mid; + z.v64 += (z.v0 < mid); + return z; + +} + +#endif + diff --git a/src/common/softfloat/source/s_mulAddF32.c b/src/common/softfloat/source/s_mulAddF32.c new file mode 100644 index 0000000..d163ea0 --- /dev/null +++ b/src/common/softfloat/source/s_mulAddF32.c @@ -0,0 +1,224 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t + softfloat_mulAddF32( + uint_fast32_t uiA, uint_fast32_t uiB, uint_fast32_t uiC, uint_fast8_t op ) +{ + bool signA; + int_fast16_t expA; + uint_fast32_t sigA; + bool signB; + int_fast16_t expB; + uint_fast32_t sigB; + bool signC; + int_fast16_t expC; + uint_fast32_t sigC; + bool signProd; + uint_fast32_t magBits, uiZ; + struct exp16_sig32 normExpSig; + int_fast16_t expProd; + uint_fast64_t sigProd; + bool signZ; + int_fast16_t expZ; + uint_fast32_t sigZ; + int_fast16_t expDiff; + uint_fast64_t sig64Z, sig64C; + int_fast8_t shiftDist; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + signB = signF32UI( uiB ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + signC = signF32UI( uiC ) ^ (op == softfloat_mulAdd_subC); + expC = expF32UI( uiC ); + sigC = fracF32UI( uiC ); + signProd = signA ^ signB ^ (op == softfloat_mulAdd_subProd); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA || ((expB == 0xFF) && sigB) ) goto propagateNaN_ABC; + magBits = expB | sigB; + goto infProdArg; + } + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN_ABC; + magBits = expA | sigA; + goto infProdArg; + } + if ( expC == 0xFF ) { + if ( sigC ) { + uiZ = 0; + goto propagateNaN_ZC; + } + uiZ = uiC; + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zeroProd; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zeroProd; + normExpSig = softfloat_normSubnormalF32Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expProd = expA + expB - 0x7E; + sigA = (sigA | 0x00800000)<<7; + sigB = (sigB | 0x00800000)<<7; + sigProd = (uint_fast64_t) sigA * sigB; + if ( sigProd < UINT64_C( 0x2000000000000000 ) ) { + --expProd; + sigProd <<= 1; + } + signZ = signProd; + if ( ! expC ) { + if ( ! sigC ) { + expZ = expProd - 1; + sigZ = softfloat_shortShiftRightJam64( sigProd, 31 ); + goto roundPack; + } + normExpSig = softfloat_normSubnormalF32Sig( sigC ); + expC = normExpSig.exp; + sigC = normExpSig.sig; + } + sigC = (sigC | 0x00800000)<<6; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expProd - expC; + if ( signProd == signC ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff <= 0 ) { + expZ = expC; + sigZ = sigC + softfloat_shiftRightJam64( sigProd, 32 - expDiff ); + } else { + expZ = expProd; + sig64Z = + sigProd + + softfloat_shiftRightJam64( + (uint_fast64_t) sigC<<32, expDiff ); + sigZ = softfloat_shortShiftRightJam64( sig64Z, 32 ); + } + if ( sigZ < 0x40000000 ) { + --expZ; + sigZ <<= 1; + } + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sig64C = (uint_fast64_t) sigC<<32; + if ( expDiff < 0 ) { + signZ = signC; + expZ = expC; + sig64Z = sig64C - softfloat_shiftRightJam64( sigProd, -expDiff ); + } else if ( ! expDiff ) { + expZ = expProd; + sig64Z = sigProd - sig64C; + if ( ! sig64Z ) goto completeCancellation; + if ( sig64Z & UINT64_C( 0x8000000000000000 ) ) { + signZ = ! signZ; + sig64Z = -sig64Z; + } + } else { + expZ = expProd; + sig64Z = sigProd - softfloat_shiftRightJam64( sig64C, expDiff ); + } + shiftDist = softfloat_countLeadingZeros64( sig64Z ) - 1; + expZ -= shiftDist; + shiftDist -= 32; + if ( shiftDist < 0 ) { + sigZ = softfloat_shortShiftRightJam64( sig64Z, -shiftDist ); + } else { + sigZ = (uint_fast32_t) sig64Z< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + + +float64_t + softfloat_mulAddF64( + uint_fast64_t uiA, uint_fast64_t uiB, uint_fast64_t uiC, uint_fast8_t op ) +{ + bool signA; + int_fast16_t expA; + uint_fast64_t sigA; + bool signB; + int_fast16_t expB; + uint_fast64_t sigB; + bool signC; + int_fast16_t expC; + uint_fast64_t sigC; + bool signZ; + uint_fast64_t magBits, uiZ; + struct exp16_sig64 normExpSig; + int_fast16_t expZ; + struct uint128 sig128Z; + uint_fast64_t sigZ; + int_fast16_t expDiff; + struct uint128 sig128C; + int_fast8_t shiftDist; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + signB = signF64UI( uiB ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + signC = signF64UI( uiC ) ^ (op == softfloat_mulAdd_subC); + expC = expF64UI( uiC ); + sigC = fracF64UI( uiC ); + signZ = signA ^ signB ^ (op == softfloat_mulAdd_subProd); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA || ((expB == 0x7FF) && sigB) ) goto propagateNaN_ABC; + magBits = expB | sigB; + goto infProdArg; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN_ABC; + magBits = expA | sigA; + goto infProdArg; + } + if ( expC == 0x7FF ) { + if ( sigC ) { + uiZ = 0; + goto propagateNaN_ZC; + } + uiZ = uiC; + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zeroProd; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zeroProd; + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA + expB - 0x3FE; + sigA = (sigA | UINT64_C( 0x0010000000000000 ))<<10; + sigB = (sigB | UINT64_C( 0x0010000000000000 ))<<10; + sig128Z = softfloat_mul64To128( sigA, sigB ); + if ( sig128Z.v64 < UINT64_C( 0x2000000000000000 ) ) { + --expZ; + sig128Z = + softfloat_add128( + sig128Z.v64, sig128Z.v0, sig128Z.v64, sig128Z.v0 ); + } + if ( ! expC ) { + if ( ! sigC ) { + --expZ; + sigZ = sig128Z.v64<<1 | (sig128Z.v0 != 0); + goto roundPack; + } + normExpSig = softfloat_normSubnormalF64Sig( sigC ); + expC = normExpSig.exp; + sigC = normExpSig.sig; + } + sigC = (sigC | UINT64_C( 0x0010000000000000 ))<<9; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expZ - expC; + if ( expDiff < 0 ) { + expZ = expC; + if ( (signZ == signC) || (expDiff < -1) ) { + sig128Z.v64 = softfloat_shiftRightJam64( sig128Z.v64, -expDiff ); + } else { + sig128Z = + softfloat_shortShiftRightJam128( sig128Z.v64, sig128Z.v0, 1 ); + } + } else if ( expDiff ) { + sig128C = softfloat_shiftRightJam128( sigC, 0, expDiff ); + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( signZ == signC ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff <= 0 ) { + sigZ = (sigC + sig128Z.v64) | (sig128Z.v0 != 0); + } else { + sig128Z = + softfloat_add128( + sig128Z.v64, sig128Z.v0, sig128C.v64, sig128C.v0 ); + sigZ = sig128Z.v64 | (sig128Z.v0 != 0); + } + if ( sigZ < UINT64_C( 0x4000000000000000 ) ) { + --expZ; + sigZ <<= 1; + } + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff < 0 ) { + signZ = signC; + sig128Z = softfloat_sub128( sigC, 0, sig128Z.v64, sig128Z.v0 ); + } else if ( ! expDiff ) { + sig128Z.v64 = sig128Z.v64 - sigC; + if ( ! (sig128Z.v64 | sig128Z.v0) ) goto completeCancellation; + if ( sig128Z.v64 & UINT64_C( 0x8000000000000000 ) ) { + signZ = ! signZ; + sig128Z = softfloat_sub128( 0, 0, sig128Z.v64, sig128Z.v0 ); + } + } else { + sig128Z = + softfloat_sub128( + sig128Z.v64, sig128Z.v0, sig128C.v64, sig128C.v0 ); + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( ! sig128Z.v64 ) { + expZ -= 64; + sig128Z.v64 = sig128Z.v0; + sig128Z.v0 = 0; + } + shiftDist = softfloat_countLeadingZeros64( sig128Z.v64 ) - 1; + expZ -= shiftDist; + if ( shiftDist < 0 ) { + sigZ = softfloat_shortShiftRightJam64( sig128Z.v64, -shiftDist ); + } else { + sig128Z = + softfloat_shortShiftLeft128( + sig128Z.v64, sig128Z.v0, shiftDist ); + sigZ = sig128Z.v64; + } + sigZ |= (sig128Z.v0 != 0); + } + roundPack: + return softfloat_roundPackToF64( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN_ABC: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto propagateNaN_ZC; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infProdArg: + if ( magBits ) { + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + if ( expC != 0x7FF ) goto uiZ; + if ( sigC ) goto propagateNaN_ZC; + if ( signZ == signC ) goto uiZ; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + propagateNaN_ZC: + uiZ = softfloat_propagateNaNF64UI( uiZ, uiC ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zeroProd: + uiZ = uiC; + if ( ! (expC | sigC) && (signZ != signC) ) { + completeCancellation: + uiZ = + packToF64UI( + (softfloat_roundingMode == softfloat_round_min), 0, 0 ); + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + + diff --git a/src/common/softfloat/source/s_normRoundPackToF32.c b/src/common/softfloat/source/s_normRoundPackToF32.c new file mode 100644 index 0000000..14e0811 --- /dev/null +++ b/src/common/softfloat/source/s_normRoundPackToF32.c @@ -0,0 +1,58 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" + +float32_t + softfloat_normRoundPackToF32( bool sign, int_fast16_t exp, uint_fast32_t sig ) +{ + int_fast8_t shiftDist; + union ui32_f32 uZ; + + shiftDist = softfloat_countLeadingZeros32( sig ) - 1; + exp -= shiftDist; + if ( (7 <= shiftDist) && ((unsigned int) exp < 0xFD) ) { + uZ.ui = packToF32UI( sign, sig ? exp : 0, sig<<(shiftDist - 7) ); + return uZ.f; + } else { + return softfloat_roundPackToF32( sign, exp, sig< +#include +#include "platform.h" +#include "internals.h" + +float64_t + softfloat_normRoundPackToF64( bool sign, int_fast16_t exp, uint_fast64_t sig ) +{ + int_fast8_t shiftDist; + union ui64_f64 uZ; + + shiftDist = softfloat_countLeadingZeros64( sig ) - 1; + exp -= shiftDist; + if ( (10 <= shiftDist) && ((unsigned int) exp < 0x7FD) ) { + uZ.ui = packToF64UI( sign, sig ? exp : 0, sig<<(shiftDist - 10) ); + return uZ.f; + } else { + return softfloat_roundPackToF64( sign, exp, sig< +#include "platform.h" +#include "internals.h" + +struct exp8_sig16 softfloat_normSubnormalF16Sig( uint_fast16_t sig ) +{ + int_fast8_t shiftDist; + struct exp8_sig16 z; + + shiftDist = softfloat_countLeadingZeros16( sig ) - 5; + z.exp = 1 - shiftDist; + z.sig = sig< +#include "platform.h" +#include "internals.h" + +struct exp16_sig32 softfloat_normSubnormalF32Sig( uint_fast32_t sig ) +{ + int_fast8_t shiftDist; + struct exp16_sig32 z; + + shiftDist = softfloat_countLeadingZeros32( sig ) - 8; + z.exp = 1 - shiftDist; + z.sig = sig< +#include "platform.h" +#include "internals.h" + +struct exp16_sig64 softfloat_normSubnormalF64Sig( uint_fast64_t sig ) +{ + int_fast8_t shiftDist; + struct exp16_sig64 z; + + shiftDist = softfloat_countLeadingZeros64( sig ) - 11; + z.exp = 1 - shiftDist; + z.sig = sig< +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float16_t + softfloat_roundPackToF16( bool sign, int_fast16_t exp, uint_fast16_t sig ) +{ + uint_fast8_t roundingMode; + bool roundNearEven; + uint_fast8_t roundIncrement, roundBits; + bool isTiny; + uint_fast16_t uiZ; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundingMode = softfloat_roundingMode; + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x8; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0xF + : 0; + } + roundBits = sig & 0xF; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x1D <= (unsigned int) exp ) { + if ( exp < 0 ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + isTiny = + (softfloat_detectTininess == softfloat_tininess_beforeRounding) + || (exp < -1) || (sig + roundIncrement < 0x8000); + sig = softfloat_shiftRightJam32( sig, -exp ); + exp = 0; + roundBits = sig & 0xF; + if ( isTiny && roundBits ) { + softfloat_raiseFlags( softfloat_flag_underflow ); + } + } else if ( (0x1D < exp) || (0x8000 <= sig + roundIncrement) ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + softfloat_raiseFlags( + softfloat_flag_overflow | softfloat_flag_inexact ); + uiZ = packToF16UI( sign, 0x1F, 0 ) - ! roundIncrement; + goto uiZ; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig + roundIncrement)>>4; + if ( roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; +#ifdef SOFTFLOAT_ROUND_ODD + if ( roundingMode == softfloat_round_odd ) { + sig |= 1; + goto packReturn; + } +#endif + } + sig &= ~(uint_fast16_t) (! (roundBits ^ 8) & roundNearEven); + if ( ! sig ) exp = 0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + packReturn: + uiZ = packToF16UI( sign, exp, sig ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/s_roundPackToF32.c b/src/common/softfloat/source/s_roundPackToF32.c new file mode 100644 index 0000000..cc34508 --- /dev/null +++ b/src/common/softfloat/source/s_roundPackToF32.c @@ -0,0 +1,113 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t + softfloat_roundPackToF32( bool sign, int_fast16_t exp, uint_fast32_t sig ) +{ + uint_fast8_t roundingMode; + bool roundNearEven; + uint_fast8_t roundIncrement, roundBits; + bool isTiny; + uint_fast32_t uiZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundingMode = softfloat_roundingMode; + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x40; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0x7F + : 0; + } + roundBits = sig & 0x7F; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0xFD <= (unsigned int) exp ) { + if ( exp < 0 ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + isTiny = + (softfloat_detectTininess == softfloat_tininess_beforeRounding) + || (exp < -1) || (sig + roundIncrement < 0x80000000); + sig = softfloat_shiftRightJam32( sig, -exp ); + exp = 0; + roundBits = sig & 0x7F; + if ( isTiny && roundBits ) { + softfloat_raiseFlags( softfloat_flag_underflow ); + } + } else if ( (0xFD < exp) || (0x80000000 <= sig + roundIncrement) ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + softfloat_raiseFlags( + softfloat_flag_overflow | softfloat_flag_inexact ); + uiZ = packToF32UI( sign, 0xFF, 0 ) - ! roundIncrement; + goto uiZ; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig + roundIncrement)>>7; + if ( roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; +#ifdef SOFTFLOAT_ROUND_ODD + if ( roundingMode == softfloat_round_odd ) { + sig |= 1; + goto packReturn; + } +#endif + } + sig &= ~(uint_fast32_t) (! (roundBits ^ 0x40) & roundNearEven); + if ( ! sig ) exp = 0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + packReturn: + uiZ = packToF32UI( sign, exp, sig ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/s_roundPackToF64.c b/src/common/softfloat/source/s_roundPackToF64.c new file mode 100644 index 0000000..aaff008 --- /dev/null +++ b/src/common/softfloat/source/s_roundPackToF64.c @@ -0,0 +1,117 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t + softfloat_roundPackToF64( bool sign, int_fast16_t exp, uint_fast64_t sig ) +{ + uint_fast8_t roundingMode; + bool roundNearEven; + uint_fast16_t roundIncrement, roundBits; + bool isTiny; + uint_fast64_t uiZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundingMode = softfloat_roundingMode; + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x200; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0x3FF + : 0; + } + roundBits = sig & 0x3FF; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x7FD <= (uint16_t) exp ) { + if ( exp < 0 ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + isTiny = + (softfloat_detectTininess == softfloat_tininess_beforeRounding) + || (exp < -1) + || (sig + roundIncrement < UINT64_C( 0x8000000000000000 )); + sig = softfloat_shiftRightJam64( sig, -exp ); + exp = 0; + roundBits = sig & 0x3FF; + if ( isTiny && roundBits ) { + softfloat_raiseFlags( softfloat_flag_underflow ); + } + } else if ( + (0x7FD < exp) + || (UINT64_C( 0x8000000000000000 ) <= sig + roundIncrement) + ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + softfloat_raiseFlags( + softfloat_flag_overflow | softfloat_flag_inexact ); + uiZ = packToF64UI( sign, 0x7FF, 0 ) - ! roundIncrement; + goto uiZ; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig + roundIncrement)>>10; + if ( roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; +#ifdef SOFTFLOAT_ROUND_ODD + if ( roundingMode == softfloat_round_odd ) { + sig |= 1; + goto packReturn; + } +#endif + } + sig &= ~(uint_fast64_t) (! (roundBits ^ 0x200) & roundNearEven); + if ( ! sig ) exp = 0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + packReturn: + uiZ = packToF64UI( sign, exp, sig ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/s_roundToI32.c b/src/common/softfloat/source/s_roundToI32.c new file mode 100644 index 0000000..20a3ff4 --- /dev/null +++ b/src/common/softfloat/source/s_roundToI32.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t + softfloat_roundToI32( + bool sign, uint_fast64_t sig, uint_fast8_t roundingMode, bool exact ) +{ + bool roundNearEven; + uint_fast16_t roundIncrement, roundBits; + uint_fast32_t sig32; + union { uint32_t ui; int32_t i; } uZ; + int_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x800; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0xFFF + : 0; + } + roundBits = sig & 0xFFF; + sig += roundIncrement; + if ( sig & UINT64_C( 0xFFFFF00000000000 ) ) goto invalid; + sig32 = sig>>12; + sig32 &= ~(uint_fast32_t) (! (roundBits ^ 0x800) & roundNearEven); + uZ.ui = sign ? -sig32 : sig32; + z = uZ.i; + if ( z && ((z < 0) ^ sign) ) goto invalid; + if ( exact && roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? i32_fromNegOverflow : i32_fromPosOverflow; + +} + diff --git a/src/common/softfloat/source/s_roundToI64.c b/src/common/softfloat/source/s_roundToI64.c new file mode 100644 index 0000000..fcddbc2 --- /dev/null +++ b/src/common/softfloat/source/s_roundToI64.c @@ -0,0 +1,89 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t + softfloat_roundToI64( + bool sign, + uint_fast64_t sig, + uint_fast64_t sigExtra, + uint_fast8_t roundingMode, + bool exact + ) +{ + bool roundNearEven, doIncrement; + union { uint64_t ui; int64_t i; } uZ; + int_fast64_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + doIncrement = (UINT64_C( 0x8000000000000000 ) <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + if ( doIncrement ) { + ++sig; + if ( ! sig ) goto invalid; + sig &= + ~(uint_fast64_t) + (! (sigExtra & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + & roundNearEven); + } + uZ.ui = sign ? -sig : sig; + z = uZ.i; + if ( z && ((z < 0) ^ sign) ) goto invalid; + if ( exact && sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? i64_fromNegOverflow : i64_fromPosOverflow; + +} + diff --git a/src/common/softfloat/source/s_roundToUI32.c b/src/common/softfloat/source/s_roundToUI32.c new file mode 100644 index 0000000..180899b --- /dev/null +++ b/src/common/softfloat/source/s_roundToUI32.c @@ -0,0 +1,80 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t + softfloat_roundToUI32( + bool sign, uint_fast64_t sig, uint_fast8_t roundingMode, bool exact ) +{ + bool roundNearEven; + uint_fast16_t roundIncrement, roundBits; + uint_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x800; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0xFFF + : 0; + } + roundBits = sig & 0xFFF; + sig += roundIncrement; + if ( sig & UINT64_C( 0xFFFFF00000000000 ) ) goto invalid; + z = sig>>12; + z &= ~(uint_fast32_t) (! (roundBits ^ 0x800) & roundNearEven); + if ( sign && z ) goto invalid; + if ( exact && roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? ui32_fromNegOverflow : ui32_fromPosOverflow; + +} + diff --git a/src/common/softfloat/source/s_roundToUI64.c b/src/common/softfloat/source/s_roundToUI64.c new file mode 100644 index 0000000..de35b5e --- /dev/null +++ b/src/common/softfloat/source/s_roundToUI64.c @@ -0,0 +1,85 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t + softfloat_roundToUI64( + bool sign, + uint_fast64_t sig, + uint_fast64_t sigExtra, + uint_fast8_t roundingMode, + bool exact + ) +{ + bool roundNearEven, doIncrement; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + doIncrement = (UINT64_C( 0x8000000000000000 ) <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + if ( doIncrement ) { + ++sig; + if ( ! sig ) goto invalid; + sig &= + ~(uint_fast64_t) + (! (sigExtra & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + & roundNearEven); + } + if ( sign && sig ) goto invalid; + if ( exact && sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return sig; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + +} + diff --git a/src/common/softfloat/source/s_shiftRightJam128.c b/src/common/softfloat/source/s_shiftRightJam128.c new file mode 100644 index 0000000..7f3d4c8 --- /dev/null +++ b/src/common/softfloat/source/s_shiftRightJam128.c @@ -0,0 +1,70 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" +#include "primitives.h" + +#ifndef softfloat_shiftRightJam128 + +struct uint128 + softfloat_shiftRightJam128( uint64_t a64, uint64_t a0, uint_fast32_t dist ) +{ + uint_fast8_t u8NegDist; + struct uint128 z; + + if ( dist < 64 ) { + u8NegDist = -dist; + z.v64 = a64>>dist; + z.v0 = + a64<<(u8NegDist & 63) | a0>>dist + | ((uint64_t) (a0<<(u8NegDist & 63)) != 0); + } else { + z.v64 = 0; + z.v0 = + (dist < 127) + ? a64>>(dist & 63) + | (((a64 & (((uint_fast64_t) 1<<(dist & 63)) - 1)) | a0) + != 0) + : ((a64 | a0) != 0); + } + return z; + +} + +#endif + diff --git a/src/common/softfloat/source/s_subMagsF32.c b/src/common/softfloat/source/s_subMagsF32.c new file mode 100644 index 0000000..86e89f2 --- /dev/null +++ b/src/common/softfloat/source/s_subMagsF32.c @@ -0,0 +1,143 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t softfloat_subMagsF32( uint_fast32_t uiA, uint_fast32_t uiB ) +{ + int_fast16_t expA; + uint_fast32_t sigA; + int_fast16_t expB; + uint_fast32_t sigB; + int_fast16_t expDiff; + uint_fast32_t uiZ; + int_fast32_t sigDiff; + bool signZ; + int_fast8_t shiftDist; + int_fast16_t expZ; + uint_fast32_t sigX, sigY; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA | sigB ) goto propagateNaN; + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + goto uiZ; + } + sigDiff = sigA - sigB; + if ( ! sigDiff ) { + uiZ = + packToF32UI( + (softfloat_roundingMode == softfloat_round_min), 0, 0 ); + goto uiZ; + } + if ( expA ) --expA; + signZ = signF32UI( uiA ); + if ( sigDiff < 0 ) { + signZ = ! signZ; + sigDiff = -sigDiff; + } + shiftDist = softfloat_countLeadingZeros32( sigDiff ) - 8; + expZ = expA - shiftDist; + if ( expZ < 0 ) { + shiftDist = expA; + expZ = 0; + } + uiZ = packToF32UI( signZ, expZ, sigDiff< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t + softfloat_subMagsF64( uint_fast64_t uiA, uint_fast64_t uiB, bool signZ ) +{ + int_fast16_t expA; + uint_fast64_t sigA; + int_fast16_t expB; + uint_fast64_t sigB; + int_fast16_t expDiff; + uint_fast64_t uiZ; + int_fast64_t sigDiff; + int_fast8_t shiftDist; + int_fast16_t expZ; + uint_fast64_t sigZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA | sigB ) goto propagateNaN; + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + goto uiZ; + } + sigDiff = sigA - sigB; + if ( ! sigDiff ) { + uiZ = + packToF64UI( + (softfloat_roundingMode == softfloat_round_min), 0, 0 ); + goto uiZ; + } + if ( expA ) --expA; + if ( sigDiff < 0 ) { + signZ = ! signZ; + sigDiff = -sigDiff; + } + shiftDist = softfloat_countLeadingZeros64( sigDiff ) - 11; + expZ = expA - shiftDist; + if ( expZ < 0 ) { + shiftDist = expA; + expZ = 0; + } + uiZ = packToF64UI( signZ, expZ, sigDiff< +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +#ifndef THREAD_LOCAL +#define THREAD_LOCAL +#endif + +THREAD_LOCAL uint_fast8_t softfloat_roundingMode = softfloat_round_near_even; +THREAD_LOCAL uint_fast8_t softfloat_detectTininess = init_detectTininess; +THREAD_LOCAL uint_fast8_t softfloat_exceptionFlags = 0; diff --git a/src/common/softfloat/source/ui32_to_f32.c b/src/common/softfloat/source/ui32_to_f32.c new file mode 100644 index 0000000..7e5ece6 --- /dev/null +++ b/src/common/softfloat/source/ui32_to_f32.c @@ -0,0 +1,57 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t ui32_to_f32( uint32_t a ) +{ + union ui32_f32 uZ; + + if ( ! a ) { + uZ.ui = 0; + return uZ.f; + } + if ( a & 0x80000000 ) { + return softfloat_roundPackToF32( 0, 0x9D, a>>1 | (a & 1) ); + } else { + return softfloat_normRoundPackToF32( 0, 0x9C, a ); + } + +} + diff --git a/src/common/softfloat/source/ui32_to_f64.c b/src/common/softfloat/source/ui32_to_f64.c new file mode 100644 index 0000000..5e5f843 --- /dev/null +++ b/src/common/softfloat/source/ui32_to_f64.c @@ -0,0 +1,59 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t ui32_to_f64( uint32_t a ) +{ + uint_fast64_t uiZ; + int_fast8_t shiftDist; + union ui64_f64 uZ; + + if ( ! a ) { + uiZ = 0; + } else { + shiftDist = softfloat_countLeadingZeros32( a ) + 21; + uiZ = + packToF64UI( 0, 0x432 - shiftDist, (uint_fast64_t) a< +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t ui64_to_f32( uint64_t a ) +{ + int_fast8_t shiftDist; + union ui32_f32 u; + uint_fast32_t sig; + + shiftDist = softfloat_countLeadingZeros64( a ) - 40; + if ( 0 <= shiftDist ) { + u.ui = + a ? packToF32UI( + 0, 0x95 - shiftDist, (uint_fast32_t) a< +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t ui64_to_f64( uint64_t a ) +{ + union ui64_f64 uZ; + + if ( ! a ) { + uZ.ui = 0; + return uZ.f; + } + if ( a & UINT64_C( 0x8000000000000000 ) ) { + return + softfloat_roundPackToF64( + 0, 0x43D, softfloat_shortShiftRightJam64( a, 1 ) ); + } else { + return softfloat_normRoundPackToF64( 0, 0x43C, a ); + } + +} + diff --git a/src/common/src/nv_smg.c b/src/common/src/nv_smg.c new file mode 100644 index 0000000..7998f50 --- /dev/null +++ b/src/common/src/nv_smg.c @@ -0,0 +1,677 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#if defined(NV_SMG_IN_NVKMS) +# include "nvkms-utils.h" +#else +# include +# include +# include +#endif + + +#include "nv_smg.h" +#include "nvmisc.h" + +#include "class/cl0000.h" +#include "class/cl0080.h" +#include "class/cl2080.h" +#include "class/clc637.h" +#include "class/clc638.h" +#include "ctrl/ctrl0000/ctrl0000gpu.h" +#include "ctrl/ctrl0080/ctrl0080gpu.h" +#include "ctrl/ctrl2080/ctrl2080gpu.h" +#include "ctrl/ctrlc637.h" +#include "ctrl/ctrlc638.h" + +/* + * Since we will be compiled both into NVKMS and userspace drivers we do a + * little renaming here to make the rest of the code target-agnostic. + */ +#if defined(NV_SMG_IN_NVKMS) +#define smg_memcmp nvkms_memcmp +#define smg_memcpy nvkms_memcpy +#define smg_memset nvkms_memset +#define smg_strcmp nvkms_strcmp +#else +#define smg_memcmp memcmp +#define smg_memcpy memcpy +#define smg_memset memset +#define smg_strcmp strcmp +#endif + +/* + * Stack is limited in kernelspace so in NVKMS builds we allocate our + * workspace from heap. RM parameters are huge so this is needed in several + * of the main RM-heavy functions. These macros hide the unavoidable + * boilerplate and make the actual functions hopefully read cleaner. + */ +#if defined(NV_SMG_IN_NVKMS) +#define ENTER_WORKSPACE(ws) \ + (ws) = (struct workspace *)nvAlloc(sizeof(*(ws))); \ + if (!(ws)) { \ + return NV_FALSE; \ + } +#define EXIT_WORKSPACE_AND_RETURN(ws, retval) \ + nvFree((ws)); \ + return (retval) + +#else /* !NV_SMG_IN_NVKMS */ + +#define ENTER_WORKSPACE(ws) \ + struct workspace _ws; \ + (ws) = &_ws +#define EXIT_WORKSPACE_AND_RETURN(ws, retval) \ + return (retval) +#endif + +/* + * This is a conservative guess. Theoretically, looking at the current + * templates, each GPU can be split into at least 8 partitions. We're also + * limited by the number of GRCE's: each partition needs one so + * realistically the maximum is like two partitions per GPU. Then, + * NV_MAX_DEVICES itself is 32 which is also a rather high number. I'm not + * quite sure if there exists hardware that supports 32 individual GPU + * cards. We'll just arbitrarily set the MIG device array size to the same + * 32: it likely overshoots conservatively but if we ever fill them all + * ListPartitions() will fail and we can bump the size up. + */ +#define NV_MAX_MIG_DEVICES 32 + +static struct nvGlobalMigMappingRec { + NvBool initialized; + + NvU32 deviceCount; + nvMIGDeviceDescription deviceList[NV_MAX_MIG_DEVICES]; +} globalMigMapping; + +/* MIGDeviceId to index in deviceList */ +#define MIG_DEVICE_ID_TO_INDEX(id) (~(id) & ~MIG_DEVICE_ID_SUBDEV_MASK) +/* Create a MIGDeviceId based on index and subdev index. */ +#define MIG_DEVICE_INDEX_TO_ID(idx, subdev_idx) ((~(idx) & ~MIG_DEVICE_ID_SUBDEV_MASK) | \ + ((subdev_idx & 0xf) << MIG_DEVICE_ID_SUBDEV_SHIFT)) + +static NvBool InitializeGlobalMapping(nvRMContextPtr rmctx); +static NvBool ListPartitions(nvRMContextPtr rmctx, struct nvGlobalMigMappingRec *mappings); +#if !defined(NV_MODS) && !defined(NV_RMAPI_TEGRA) +static NvBool GetGraphicsPartitionUUIDForDevice(nvRMContextPtr rmctx, nvMIGDeviceDescription *migDev); +#endif +static NvU32 DeviceInMigMode (nvRMContextPtr rmctx, NvU32 hSubDevice, NvBool *inMigMode); +static NvBool GpuHasSMGPartitions (NvU32 gpuId); + +/* + * Set up SMG for the given subdevice. Handles partition allocation and + * selection. Returns true if this subdevice is equipped to do graphics. + * That is, either when not in MIG-mode at all, or when MIG-mode is enabled + * and we were able to successfully set up an SMG partition for doing + * graphics on this subdevice. Returns false otherwise when any kind of + * unrecoverable error condition that means broken graphics is encountered. + */ +NvBool nvSMGSubscribeSubDevToPartition(nvRMContextPtr rmctx, + NvU32 subDevHandle, + MIGDeviceId migDevice, + NvU32 hGpuInstSubscription, + NvU32 hComputeInstSubscription) +{ + /* + * These RM parameters can be huge: allocate them from the heap to keep + * kernel stack usage low. For userspace, allocate on stack. + */ + struct workspace { + NV2080_CTRL_GPU_GET_ID_PARAMS getIdParams; + NVC637_ALLOCATION_PARAMETERS allocParams; + NVC638_ALLOCATION_PARAMETERS execAllocParams; + NVC638_CTRL_GET_UUID_PARAMS getUuidParams; + } *ws; + const nvMIGDeviceDescription *desc = NULL; + NvU32 gpuId = NV0000_CTRL_GPU_INVALID_ID; + NvBool inMigMode; + NvU32 res; + + if (migDevice != NO_MIG_DEVICE) { + return NV_FALSE; + } + + /* First, make sure we've created the identities for all MIG devices. */ + if (!InitializeGlobalMapping(rmctx)) { + return NV_FALSE; + } + + ENTER_WORKSPACE(ws); + smg_memset(ws, 0, sizeof(*ws)); + + /* Read gpuID for reference and cross-checking. */ + res = rmctx->control(rmctx, + rmctx->clientHandle, + subDevHandle, + NV2080_CTRL_CMD_GPU_GET_ID, + &ws->getIdParams, + sizeof(ws->getIdParams)); + if (res != NV_OK) { + EXIT_WORKSPACE_AND_RETURN(ws, NV_FALSE); + } + gpuId = ws->getIdParams.gpuId; + + /* + * Before anything, check explicitly whether the GPU is in MIG mode. + */ + if (DeviceInMigMode(rmctx, subDevHandle, &inMigMode) != NV_OK) { + EXIT_WORKSPACE_AND_RETURN(ws, NV_FALSE); + } + + /* + * If not in MIG mode then just return true to indicate we're able to do + * graphics. No MIG mode, no partitions, no subscriptions. + */ + if (!inMigMode) { + EXIT_WORKSPACE_AND_RETURN(ws, NV_TRUE); + } + + /* + * However, if we're in MIG mode but there are no (graphics) partitions + * this means we won't be able to do graphics. Bail out with false. + */ + if (!GpuHasSMGPartitions(gpuId)) { + EXIT_WORKSPACE_AND_RETURN(ws, NV_FALSE); + } + + /* Get the device description for exact partition parameters. */ + if (nvSMGGetDeviceById(rmctx, migDevice, &desc) != NV_OK) { + EXIT_WORKSPACE_AND_RETURN(ws, NV_FALSE); + } + + /* If the GPU id doesn't match, something is wrong. */ + if (desc->gpuId != gpuId) { + EXIT_WORKSPACE_AND_RETURN(ws, NV_FALSE); + } + + /* + * Try to subscribe to the graphics partition. + */ + ws->allocParams.swizzId = desc->gpuInstanceId; + + res = rmctx->alloc(rmctx, + rmctx->clientHandle, + subDevHandle, + hGpuInstSubscription, + AMPERE_SMC_PARTITION_REF, + &ws->allocParams); + + if (res != NV_OK) { + EXIT_WORKSPACE_AND_RETURN(ws, NV_FALSE); + } + + /* + * Next try to subscribe to the compute instance (exec partition) + * that should be available under our graphics partition. + */ + ws->execAllocParams.execPartitionId = desc->computeInstanceId; + + res = rmctx->alloc(rmctx, + rmctx->clientHandle, + hGpuInstSubscription, + hComputeInstSubscription, + AMPERE_SMC_EXEC_PARTITION_REF, + &ws->execAllocParams); + + if (res == NV_OK) { + /* + * Ok, found and allocated the desired compute instance (exec + * partition). The subDevHandle is now fully subscribed to do SMG on + * the requested MIG partition. + */ + EXIT_WORKSPACE_AND_RETURN(ws, NV_TRUE); + } + + rmctx->free(rmctx, rmctx->clientHandle, subDevHandle, hGpuInstSubscription); + + EXIT_WORKSPACE_AND_RETURN(ws, NV_FALSE); +} + + +/* + * Get unified device description matching the given MIG UUID. + */ +NvU32 nvSMGGetDeviceByUUID(nvRMContextPtr rmctx, + const char *migUuid, + const nvMIGDeviceDescription **uniDev) +{ + NvU32 i; + + if (!InitializeGlobalMapping(rmctx)) { + return NV_ERR_INVALID_STATE; + } + + for (i = 0; i < globalMigMapping.deviceCount; i++) { + if (smg_strcmp(globalMigMapping.deviceList[i].migUuid, migUuid) == 0) { + *uniDev = &globalMigMapping.deviceList[i]; + return NV_OK; + } + } + + return NV_ERR_INVALID_ARGUMENT; +} + +/* + * Get unified device description matching a MIG device ID. + */ +NvU32 nvSMGGetDeviceById(nvRMContextPtr rmctx, + MIGDeviceId migDevice, + const nvMIGDeviceDescription **uniDev) +{ + NvU32 index = MIG_DEVICE_ID_TO_INDEX(migDevice); + + if (!InitializeGlobalMapping(rmctx)) { + return NV_ERR_INVALID_STATE; + } + + if (index < globalMigMapping.deviceCount && + /* The following condition is to satisfy array bounds checker: */ + globalMigMapping.deviceCount <= NV_MAX_MIG_DEVICES) { + *uniDev = &globalMigMapping.deviceList[index]; + return NV_OK; + } + + return NV_ERR_INVALID_ARGUMENT; +} + +NvU32 nvSMGGetDeviceList(nvRMContextPtr rmctx, + nvMIGDeviceDescription **devices, + NvU32 *deviceCount) +{ + if (!InitializeGlobalMapping(rmctx)) { + return NV_ERR_INVALID_STATE; + } + + *devices = globalMigMapping.deviceList; + *deviceCount = globalMigMapping.deviceCount; + + return NV_OK; +} + +NvU32 nvSMGGetDefaultDeviceForDeviceInstance(nvRMContextPtr rmctx, + NvU32 deviceInstance, + const nvMIGDeviceDescription **uniDev) +{ + NvU32 i; + + if (!InitializeGlobalMapping(rmctx)) { + return NV_ERR_INVALID_STATE; + } + + for (i = 0; i < globalMigMapping.deviceCount; i++) { + if (globalMigMapping.deviceList[i].deviceInstance == deviceInstance && + globalMigMapping.deviceList[i].smgAccessOk) { + *uniDev = &globalMigMapping.deviceList[i]; + return NV_OK; + } + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +/* + * Do this once. + */ +static NvBool InitializeGlobalMapping(nvRMContextPtr rmctx) +{ + static NvBool firstRun = NV_TRUE; + + if (firstRun) { +#if !defined(NV_SMG_IN_NVKMS) + static pthread_mutex_t initMutex = PTHREAD_MUTEX_INITIALIZER; + + pthread_mutex_lock(&initMutex); +#endif + + if (firstRun) { + firstRun = NV_FALSE; + + /* + * Copy the RMContext and replace the clientHandle with a newly + * allocated, dedicated RM client for our one-time probe. + */ + nvRMContext clientctx = *rmctx; + NvU32 res; + + if (rmctx->allocRoot) { + res = rmctx->allocRoot(rmctx, + &clientctx.clientHandle); + } else { + res = rmctx->alloc(rmctx, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_ROOT, + &clientctx.clientHandle); + } + + if (res == NV_OK) { + /* Initialize the global struct. */ + smg_memset(&globalMigMapping, 0, sizeof(globalMigMapping)); + + if (ListPartitions(&clientctx, &globalMigMapping)) { + globalMigMapping.initialized = NV_TRUE; + } + + rmctx->free(&clientctx, clientctx.clientHandle, clientctx.clientHandle, clientctx.clientHandle); + } + } +#if !defined(NV_SMG_IN_NVKMS) + pthread_mutex_unlock(&initMutex); +#endif + } + + return globalMigMapping.initialized; +} + +/* + * Query active GPUs from RM and pick all GPUs with MIG partitions into our + * own device list for further reference. + */ +static NvBool ListPartitions(nvRMContextPtr rmctx, struct nvGlobalMigMappingRec *mapping) +{ +#if !defined(NV_MODS) && !defined(NV_RMAPI_TEGRA) + struct workspace { + NV0000_CTRL_GPU_GET_ACTIVE_DEVICE_IDS_PARAMS activeParams; + NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS idInfoParams; + } *ws; + + NvU32 i; + NvU32 res; + NvU32 currentGpuId = NV0000_CTRL_GPU_INVALID_ID; + NvU32 currentGpuIdSubDevIndex = 0; + + ENTER_WORKSPACE(ws); + smg_memset(ws, 0, sizeof(*ws)); + + res = rmctx->control(rmctx, + rmctx->clientHandle, + rmctx->clientHandle, + NV0000_CTRL_CMD_GPU_GET_ACTIVE_DEVICE_IDS, + &ws->activeParams, + sizeof(ws->activeParams)); + if (res != NV_OK) { + /* Explicitly not being supported can be considered a non-error. */ + if (res == NV_ERR_NOT_SUPPORTED) { + EXIT_WORKSPACE_AND_RETURN(ws, NV_TRUE); + } + + EXIT_WORKSPACE_AND_RETURN(ws, NV_FALSE); + } + + /* Add MIGDevice active devices. */ + for (i = 0; i < ws->activeParams.numDevices; i++) { + NV0000_CTRL_GPU_ACTIVE_DEVICE *dev = &ws->activeParams.devices[i]; + nvMIGDeviceDescription *migDev; + + /* First, skip over any GPU not in MIG mode. */ + if (dev->gpuInstanceId == NV0000_CTRL_GPU_INVALID_ID) { + continue; + } + + /* If we ever fill up the device array, bail out for a good reason. */ + if (mapping->deviceCount == NV_MAX_MIG_DEVICES) { + EXIT_WORKSPACE_AND_RETURN(ws, NV_FALSE); + } + + /* Add MIG GPUs to our list, start with getting GPU info. */ + smg_memset(&ws->idInfoParams, 0, sizeof(ws->idInfoParams)); + ws->idInfoParams.gpuId = dev->gpuId; + + res = rmctx->control(rmctx, + rmctx->clientHandle, + rmctx->clientHandle, + NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2, + &ws->idInfoParams, + sizeof(ws->idInfoParams)); + if (res != NV_OK) { + EXIT_WORKSPACE_AND_RETURN(ws, NV_FALSE); + } + + /* Maintain per-GPU index of MIG devices. */ + if (currentGpuId != dev->gpuId) { + currentGpuId = dev->gpuId; + currentGpuIdSubDevIndex = 0; + } else { + currentGpuIdSubDevIndex++; + } + + /* Fill in device data. */ + migDev = &mapping->deviceList[mapping->deviceCount]; + migDev->migDeviceId = MIG_DEVICE_INDEX_TO_ID(mapping->deviceCount, currentGpuIdSubDevIndex); + migDev->deviceInstance = ws->idInfoParams.deviceInstance; + migDev->subDeviceInstance = ws->idInfoParams.subDeviceInstance; + migDev->gpuId = dev->gpuId; + migDev->gpuInstanceId = dev->gpuInstanceId; + migDev->computeInstanceId = dev->computeInstanceId; + migDev->smgAccessOk = NV_FALSE; + + /* If it's a graphics partition and we can access it, mark valid */ + if (GetGraphicsPartitionUUIDForDevice(rmctx, migDev)) { + migDev->smgAccessOk = NV_TRUE; + } + + mapping->deviceCount++; + } + + EXIT_WORKSPACE_AND_RETURN(ws, NV_TRUE); + +#else /* !defined(NV_MODS) && !defined(NV_RMAPI_TEGRA) */ + /* + * MODS and TEgra builds don't have all the MIG related rmcontrols so + * don't do any queries, just leave it with zero SMG partitions. + */ + return NV_TRUE; +#endif /* !defined(NV_MODS) && !defined(NV_RMAPI_TEGRA) */ +} + +/* + * Obtain the UUID of the MIG device. This means allocating the devices and + * partition refs so that we can access the GET_UUID control. + */ +#if !defined(NV_MODS) && !defined(NV_RMAPI_TEGRA) +static NvBool GetGraphicsPartitionUUIDForDevice(nvRMContextPtr rmctx, nvMIGDeviceDescription *migDev) +{ + struct workspace { + NV0080_ALLOC_PARAMETERS nv0080Params; + NV2080_ALLOC_PARAMETERS nv2080Params; + NV2080_CTRL_GPU_GET_PARTITIONS_PARAMS partInfoParams; + NVC637_ALLOCATION_PARAMETERS allocParams; + NVC638_ALLOCATION_PARAMETERS execAllocParams; + NVC638_CTRL_GET_UUID_PARAMS getUuidParams; + NV2080_CTRL_SMC_SUBSCRIPTION_INFO subInfoParams; + } *ws; + + const NvU32 hDevice = 0x1; + const NvU32 hSubDevice = 0x2; + const NvU32 hGpuInstSub = 0x3; + const NvU32 hCompInstSub = 0x4; + NV2080_CTRL_GPU_GET_PARTITION_INFO *partInfo; + NvU32 gfxSize; + NvBool success = NV_FALSE; + NvBool inMigMode; + NvU32 i; + + ENTER_WORKSPACE(ws); + smg_memset(ws, 0, sizeof(*ws)); + + /* Allocate the corresponding device. */ + ws->nv0080Params.deviceId = migDev->deviceInstance; + ws->nv0080Params.hClientShare = rmctx->clientHandle; + + if (rmctx->alloc(rmctx, + rmctx->clientHandle, + rmctx->clientHandle, + hDevice, + NV01_DEVICE_0, + &ws->nv0080Params) != NV_OK) { + EXIT_WORKSPACE_AND_RETURN(ws, NV_FALSE); + } + + /* And the corresponding subdevice. */ + ws->nv2080Params.subDeviceId = migDev->subDeviceInstance; + + if (rmctx->alloc(rmctx, + rmctx->clientHandle, + hDevice, + hSubDevice, + NV20_SUBDEVICE_0, + &ws->nv2080Params) != NV_OK) { + goto out; + } + + /* If not in MIG mode we can't obtain MIG UUID either. */ + if (DeviceInMigMode(rmctx, hSubDevice, &inMigMode) != NV_OK) { + goto out; + } + + if (!inMigMode) { + goto out; + } + + /* Allocate partition ref per supplied id: GPU_GET_PARTITIONS won't work + * without a subscription for unprivileged tasks. */ + ws->allocParams.swizzId = migDev->gpuInstanceId; + + if (rmctx->alloc(rmctx, + rmctx->clientHandle, + hSubDevice, + hGpuInstSub, + AMPERE_SMC_PARTITION_REF, + &ws->allocParams) != NV_OK) { + goto out; + } + + /* Find info for the partition inferred from the partition ref object. */ + if (rmctx->control(rmctx, + rmctx->clientHandle, + hSubDevice, + NV2080_CTRL_CMD_GPU_GET_PARTITIONS, + &ws->partInfoParams, + sizeof(ws->partInfoParams)) != NV_OK) { + goto out; + } + + for (i = 0, partInfo = NULL; i partInfoParams.validPartitionCount; i++) { + if (ws->partInfoParams.queryPartitionInfo[i].bValid && + ws->partInfoParams.queryPartitionInfo[i].swizzId == migDev->gpuInstanceId) { + partInfo = &ws->partInfoParams.queryPartitionInfo[i]; + break; + } + } + + /* This shouldn't happen but it doesn't hurt to fail instead of asserting. */ + if (!partInfo) { + goto out; + } + + /* Check that the referenced partition can actually do graphics */ + gfxSize = DRF_VAL(2080_CTRL_GPU, _PARTITION_FLAG, _GFX_SIZE, partInfo->partitionFlag); + if (gfxSize == NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_NONE) { + goto out; + } + + /* And exec partition. */ + ws->execAllocParams.execPartitionId = migDev->computeInstanceId; + + if (rmctx->alloc(rmctx, + rmctx->clientHandle, + hGpuInstSub, + hCompInstSub, + AMPERE_SMC_EXEC_PARTITION_REF, + &ws->execAllocParams) != NV_OK) { + goto out; + } + + /* Query UUID. */ + if (rmctx->control(rmctx, + rmctx->clientHandle, + hCompInstSub, + NVC638_CTRL_CMD_GET_UUID, + &ws->getUuidParams, + sizeof(ws->getUuidParams)) != NV_OK) { + goto out; + } + + /* Got one: update migDev and call it a success. */ + smg_memcpy(migDev->migUuid, + ws->getUuidParams.uuidStr, + NV_MIG_DEVICE_UUID_STR_LENGTH); + + ct_assert(NVC638_UUID_LEN == NV_GPU_UUID_LEN); + smg_memcpy(migDev->migUuidBin, + ws->getUuidParams.uuid, + NV_GPU_UUID_LEN); + success = NV_TRUE; + +out: + rmctx->free(rmctx, rmctx->clientHandle, rmctx->clientHandle, hDevice); + EXIT_WORKSPACE_AND_RETURN(ws, success); +} +#endif /* !defined(NV_MODS) && !defined(NV_RMAPI_TEGRA) */ + +/* + * Quick getter for SMC mode. + */ +static NvU32 DeviceInMigMode (nvRMContextPtr rmctx, + NvU32 hSubDevice, + NvBool *inMigMode) +{ + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS gpuInfoParams; + NvU32 res; + + gpuInfoParams.gpuInfoListSize = 1; + gpuInfoParams.gpuInfoList[0].index = NV2080_CTRL_GPU_INFO_INDEX_GPU_SMC_MODE; + + res = rmctx->control(rmctx, + rmctx->clientHandle, + hSubDevice, + NV2080_CTRL_CMD_GPU_GET_INFO_V2, + &gpuInfoParams, + sizeof(gpuInfoParams)); + + if (res == NV_OK) { + *inMigMode = ( + gpuInfoParams.gpuInfoList[0].data == NV2080_CTRL_GPU_INFO_GPU_SMC_MODE_ENABLED || + gpuInfoParams.gpuInfoList[0].data == NV2080_CTRL_GPU_INFO_GPU_SMC_MODE_DISABLE_PENDING); + } + + return res; +} + +/* + * This reduces down to a simple search for the gpuId. + */ +static NvBool GpuHasSMGPartitions (NvU32 gpuId) +{ + NvU32 i; + + for (i = 0; i < globalMigMapping.deviceCount; i++) { + if (globalMigMapping.deviceList[i].gpuId == gpuId && + globalMigMapping.deviceList[i].smgAccessOk) { + return NV_TRUE; + } + } + + return NV_FALSE; +} diff --git a/src/common/unix/common/inc/nv-float.h b/src/common/unix/common/inc/nv-float.h new file mode 100644 index 0000000..95fc719 --- /dev/null +++ b/src/common/unix/common/inc/nv-float.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NV_FLOAT_H) +#define NV_FLOAT_H + +/* Floating-point constants, expressed as integer constants */ + +#define NV_FLOAT_NEG_ONE 0xBF800000 /* -1.00f */ +#define NV_FLOAT_NEG_QUARTER 0xBE800000 /* -0.25f */ +#define NV_FLOAT_ZERO 0x00000000 /* 0.00f */ +#define NV_FLOAT_QUARTER 0x3E800000 /* 0.25f */ +#define NV_FLOAT_HALF 0x3F000000 /* 0.50f */ +#define NV_FLOAT_ONE 0x3F800000 /* 1.00f */ +#define NV_FLOAT_TWO 0x40000000 /* 2.00f */ +#define NV_FLOAT_255 0x437F0000 /* 255.00f */ +#define NV_FLOAT_1024 0x44800000 /* 1024.00f */ +#define NV_FLOAT_65536 0x47800000 /* 65536.00f */ + +#endif diff --git a/src/common/unix/common/inc/nv_amodel_enum.h b/src/common/unix/common/inc/nv_amodel_enum.h new file mode 100644 index 0000000..8f90205 --- /dev/null +++ b/src/common/unix/common/inc/nv_amodel_enum.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_AMODEL_ENUM_H__ +#define __NV_AMODEL_ENUM_H__ + + + +typedef enum _NVAModelConfig { + NV_AMODEL_NONE = 0, + NV_AMODEL_KEPLER, + NV_AMODEL_KEPLER_SM35, + NV_AMODEL_MAXWELL, + NV_AMODEL_PASCAL, + NV_AMODEL_VOLTA, + NV_AMODEL_TURING, + NV_AMODEL_AMPERE, +} NVAModelConfig; + +#endif /* __NV_AMODEL_ENUM_H__ */ diff --git a/src/common/unix/common/inc/nv_assert.h b/src/common/unix/common/inc/nv_assert.h new file mode 100644 index 0000000..8c62ef5 --- /dev/null +++ b/src/common/unix/common/inc/nv_assert.h @@ -0,0 +1,82 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_ASSERT_H__ +#define __NV_ASSERT_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * nvAssert() has three possible configurations: __COVERITY__, DEBUG, + * and non-DEBUG. In DEBUG builds, the includer should provide an + * implementation of nvDebugAssert(). + */ + +#if defined(__COVERITY__) + /* + * Coverity assert handling -- basically inform coverity that the + * condition is verified independently and coverity can assume that + * it is true. + */ + void __coverity_panic__(void); + + #define nvAssert(exp) \ + do { \ + if (exp) { \ + } else { \ + __coverity_panic__(); \ + } \ + } while (0) + +#elif defined(DEBUG) + + void nvDebugAssert(const char *expString, const char *filenameString, + const char *funcString, const unsigned int lineNumber); + + /* + * Assert that (exp) is TRUE. We use 'if (exp) { } else { fail }' + * instead of 'if (!(exp)) { fail }' to cause warnings when people + * accidentally write nvAssert(foo = bar) instead of nvAssert(foo == + * bar). + */ + #define nvAssert(exp) \ + do { \ + if (exp) { \ + } else { \ + nvDebugAssert(#exp, __FILE__, __FUNCTION__, __LINE__); \ + } \ + } while (0) + +#else + + #define nvAssert(exp) {} + +#endif + +#ifdef __cplusplus +}; +#endif + +#endif /* __NV_ASSERT_H__ */ diff --git a/src/common/unix/common/inc/nv_common_utils.h b/src/common/unix/common/inc/nv_common_utils.h new file mode 100644 index 0000000..6b10e76 --- /dev/null +++ b/src/common/unix/common/inc/nv_common_utils.h @@ -0,0 +1,120 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_COMMON_UTILS_H__ +#define __NV_COMMON_UTILS_H__ + +#include "nvtypes.h" +#include "nvmisc.h" + +#if !defined(TRUE) +#define TRUE NV_TRUE +#endif + +#if !defined(FALSE) +#define FALSE NV_FALSE +#endif + +#define NV_IS_UNSIGNED(x) ((__typeof__(x))-1 > 0) + +/* Get the length of a statically-sized array. */ +#define ARRAY_LEN(_arr) (sizeof(_arr) / sizeof(_arr[0])) + +#define NV_INVALID_HEAD 0xFFFFFFFF + +#define NV_INVALID_CONNECTOR_PHYSICAL_INFORMATION (~0) + +#if !defined(NV_MIN) +# define NV_MIN(a,b) (((a)<(b))?(a):(b)) +#endif + +#define NV_MIN3(a,b,c) NV_MIN(NV_MIN(a, b), c) +#define NV_MIN4(a,b,c,d) NV_MIN3(NV_MIN(a,b),c,d) + +#if !defined(NV_MAX) +# define NV_MAX(a,b) (((a)>(b))?(a):(b)) +#endif + +#define NV_MAX3(a,b,c) NV_MAX(NV_MAX(a, b), c) +#define NV_MAX4(a,b,c,d) NV_MAX3(NV_MAX(a,b),c,d) + +static inline int NV_LIMIT_VAL_TO_MIN_MAX(int val, int min, int max) +{ + if (val < min) { + return min; + } + if (val > max) { + return max; + } + return val; +} + +#define NV_ROUNDUP_DIV(x,y) ((x) / (y) + (((x) % (y)) ? 1 : 0)) + +/* + * Macros used for computing palette entries: + * + * NV_UNDER_REPLICATE(val, source_size, result_size) expands a value + * of source_size bits into a value of target_size bits by shifting + * the source value into the high bits and replicating the high bits + * of the value into the low bits of the result. + * + * PALETTE_DEPTH_SHIFT(val, w) maps a colormap entry for a component + * that has w bits to an appropriate entry in a LUT of 256 entries. + */ +static inline unsigned int NV_UNDER_REPLICATE(unsigned short val, + int source_size, + int result_size) +{ + return (val << (result_size - source_size)) | + (val >> ((source_size << 1) - result_size)); +} + + +static inline unsigned short PALETTE_DEPTH_SHIFT(unsigned short val, int depth) +{ + return NV_UNDER_REPLICATE(val, depth, 8); +} + +/* + * Use __builtin_ffs where it is supported, or provide an equivalent + * implementation for platforms like riscv where it is not. + */ +#if defined(__GNUC__) && !NVCPU_IS_RISCV64 +static inline int nv_ffs(int x) +{ + return __builtin_ffs(x); +} +#else +static inline int nv_ffs(int x) +{ + if (x == 0) + return 0; + + LOWESTBITIDX_32(x); + + return 1 + x; +} +#endif + +#endif /* __NV_COMMON_UTILS_H__ */ diff --git a/src/common/unix/common/inc/nv_dpy_id.h b/src/common/unix/common/inc/nv_dpy_id.h new file mode 100644 index 0000000..fe742a5 --- /dev/null +++ b/src/common/unix/common/inc/nv_dpy_id.h @@ -0,0 +1,370 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * This header file defines the types NVDpyId and NVDpyIdList, as well + * as inline functions to manipulate these types. NVDpyId and + * NVDpyIdList should be treated as opaque by includers of this header + * file. + */ + +#ifndef __NV_DPY_ID_H__ +#define __NV_DPY_ID_H__ + +#include "nvtypes.h" +#include "nvmisc.h" +#include "nv_common_utils.h" +#include /* NV_MAX_SUBDEVICES */ + +typedef struct { + NvU32 opaqueDpyId; +} NVDpyId; + +typedef struct { + NvU32 opaqueDpyIdList; +} NVDpyIdList; + +#define NV_DPY_ID_MAX_SUBDEVICES NV_MAX_SUBDEVICES +#define NV_DPY_ID_MAX_DPYS_IN_LIST 32 + +/* + * For use in combination with nvDpyIdToPrintFormat(); e.g., + * + * printf("dpy id: " NV_DPY_ID_PRINT_FORMAT "\n", + * nvDpyIdToPrintFormat(dpyId)); + * + * The includer should not make assumptions about the return type of + * nvDpyIdToPrintFormat(). + */ +#define NV_DPY_ID_PRINT_FORMAT "0x%08x" + +/* functions to return an invalid DpyId and empty DpyIdList */ + +static inline NVDpyId nvInvalidDpyId(void) +{ + NVDpyId dpyId = { 0 }; + return dpyId; +} + +static inline NVDpyIdList nvEmptyDpyIdList(void) +{ + NVDpyIdList dpyIdList = { 0 }; + return dpyIdList; +} + +static inline NVDpyIdList nvAllDpyIdList(void) +{ + NVDpyIdList dpyIdList = { ~0U }; + return dpyIdList; +} + +static inline void +nvEmptyDpyIdListSubDeviceArray(NVDpyIdList dpyIdList[NV_DPY_ID_MAX_SUBDEVICES]) +{ + int dispIndex; + for (dispIndex = 0; dispIndex < NV_DPY_ID_MAX_SUBDEVICES; dispIndex++) { + dpyIdList[dispIndex] = nvEmptyDpyIdList(); + } +} + +/* set operations on DpyIds and DpyIdLists: Add, Subtract, Intersect, Xor */ + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvAddDpyIdToDpyIdList(NVDpyId dpyId, NVDpyIdList dpyIdList) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList | + dpyId.opaqueDpyId; + return tmpDpyIdList; +} + +/* Passing an invalid display ID makes this function return an empty list. */ +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvAddDpyIdToEmptyDpyIdList(NVDpyId dpyId) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyId.opaqueDpyId; + return tmpDpyIdList; +} + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvAddDpyIdListToDpyIdList(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdListB.opaqueDpyIdList | + dpyIdListA.opaqueDpyIdList; + return tmpDpyIdList; +} + +/* Returns: dpyIdList - dpyId */ +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvDpyIdListMinusDpyId(NVDpyIdList dpyIdList, NVDpyId dpyId) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList & + (~dpyId.opaqueDpyId); + return tmpDpyIdList; +} + +/* Returns: dpyIdListA - dpyIdListB */ +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvDpyIdListMinusDpyIdList(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdListA.opaqueDpyIdList & + (~dpyIdListB.opaqueDpyIdList); + return tmpDpyIdList; +} + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvIntersectDpyIdAndDpyIdList(NVDpyId dpyId, NVDpyIdList dpyIdList) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList & + dpyId.opaqueDpyId; + return tmpDpyIdList; +} + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvIntersectDpyIdListAndDpyIdList(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdListA.opaqueDpyIdList & + dpyIdListB.opaqueDpyIdList; + return tmpDpyIdList; +} + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvXorDpyIdAndDpyIdList(NVDpyId dpyId, NVDpyIdList dpyIdList) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList ^ + dpyId.opaqueDpyId; + return tmpDpyIdList; +} + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvXorDpyIdListAndDpyIdList(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdListA.opaqueDpyIdList ^ + dpyIdListB.opaqueDpyIdList; + return tmpDpyIdList; +} + + +/* boolean checks */ + +static inline NvBool nvDpyIdIsInDpyIdList(NVDpyId dpyId, + NVDpyIdList dpyIdList) +{ + return !!(dpyIdList.opaqueDpyIdList & dpyId.opaqueDpyId); +} + +static inline NvBool nvDpyIdIsInvalid(NVDpyId dpyId) +{ + return (dpyId.opaqueDpyId == 0); +} + +static inline NvBool nvDpyIdListIsEmpty(NVDpyIdList dpyIdList) +{ + return (dpyIdList.opaqueDpyIdList == 0); +} + +static inline NvBool +nvDpyIdListSubDeviceArrayIsEmpty(NVDpyIdList + dpyIdList[NV_DPY_ID_MAX_SUBDEVICES]) +{ + int dispIndex; + for (dispIndex = 0; dispIndex < NV_DPY_ID_MAX_SUBDEVICES; dispIndex++) { + if (!nvDpyIdListIsEmpty(dpyIdList[dispIndex])) { + return NV_FALSE; + } + } + return NV_TRUE; +} + + +static inline NvBool nvDpyIdsAreEqual(NVDpyId dpyIdA, NVDpyId dpyIdB) +{ + return (dpyIdA.opaqueDpyId == dpyIdB.opaqueDpyId); +} + +static inline NvBool nvDpyIdListsAreEqual(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + return (dpyIdListA.opaqueDpyIdList == dpyIdListB.opaqueDpyIdList); +} + +static inline NvBool nvDpyIdListIsASubSetofDpyIdList(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + NVDpyIdList intersectedDpyIdList = + nvIntersectDpyIdListAndDpyIdList(dpyIdListA, dpyIdListB); + + return nvDpyIdListsAreEqual(intersectedDpyIdList, dpyIdListA); +} + + +/* + * retrieve the individual dpyIds from dpyIdList; if dpyId is invalid, + * start at the beginning of the list; otherwise, start at the dpyId + * after the specified dpyId + */ + +static inline __attribute__ ((warn_unused_result)) +NVDpyId nvNextDpyIdInDpyIdListUnsorted(NVDpyId dpyId, NVDpyIdList dpyIdList) +{ + if (nvDpyIdIsInvalid(dpyId)) { + dpyId.opaqueDpyId = 1; + } else { + dpyId.opaqueDpyId <<= 1; + } + + while (dpyId.opaqueDpyId) { + + if (nvDpyIdIsInDpyIdList(dpyId, dpyIdList)) { + return dpyId; + } + + dpyId.opaqueDpyId <<= 1; + } + + /* no dpyIds left in dpyIdlist; return the invalid dpyId */ + + return nvInvalidDpyId(); +} + +#define FOR_ALL_DPY_IDS(_dpyId, _dpyIdList) \ + for ((_dpyId) = nvNextDpyIdInDpyIdListUnsorted(nvInvalidDpyId(), \ + (_dpyIdList)); \ + !nvDpyIdIsInvalid(_dpyId); \ + (_dpyId) = nvNextDpyIdInDpyIdListUnsorted((_dpyId), \ + (_dpyIdList))) + +/* report how many dpyIds are in the dpyIdList */ + +static inline int nvCountDpyIdsInDpyIdList(NVDpyIdList dpyIdList) +{ + return nvPopCount32(dpyIdList.opaqueDpyIdList); +} + +static inline int +nvCountDpyIdsInDpyIdListSubDeviceArray(NVDpyIdList + dpyIdList[NV_DPY_ID_MAX_SUBDEVICES]) +{ + int dispIndex, n = 0; + + for (dispIndex = 0; dispIndex < NV_DPY_ID_MAX_SUBDEVICES; dispIndex++) { + n += nvCountDpyIdsInDpyIdList(dpyIdList[dispIndex]); + } + + return n; +} + +/* convert between dpyId/dpyIdList and NV-CONTROL values */ + +static inline int nvDpyIdToNvControlVal(NVDpyId dpyId) +{ + return (int) dpyId.opaqueDpyId; +} + +static inline int nvDpyIdListToNvControlVal(NVDpyIdList dpyIdList) +{ + return (int) dpyIdList.opaqueDpyIdList; +} + +static inline NVDpyId nvNvControlValToDpyId(int val) +{ + NVDpyId dpyId; + dpyId.opaqueDpyId = (val == 0) ? 0 : 1 << (nv_ffs(val)-1); + return dpyId; +} + +static inline NVDpyIdList nvNvControlValToDpyIdList(int val) +{ + NVDpyIdList dpyIdList; + dpyIdList.opaqueDpyIdList = val; + return dpyIdList; +} + + +/* convert between dpyId and NvU32 */ + +static inline NVDpyId nvNvU32ToDpyId(NvU32 val) +{ + NVDpyId dpyId; + dpyId.opaqueDpyId = (val == 0) ? 0 : 1 << (nv_ffs(val)-1); + return dpyId; +} + +static inline NVDpyIdList nvNvU32ToDpyIdList(NvU32 val) +{ + NVDpyIdList dpyIdList; + dpyIdList.opaqueDpyIdList = val; + return dpyIdList; +} + +static inline NvU32 nvDpyIdToNvU32(NVDpyId dpyId) +{ + return dpyId.opaqueDpyId; +} + +static inline NvU32 nvDpyIdListToNvU32(NVDpyIdList dpyIdList) +{ + return dpyIdList.opaqueDpyIdList; +} + +/* Return the bit position of dpyId: a number in the range [0..31]. */ +static inline NvU32 nvDpyIdToIndex(NVDpyId dpyId) +{ + return nv_ffs(dpyId.opaqueDpyId) - 1; +} + +/* Return a display ID that is not in the list passed in. */ + +static inline NVDpyId nvNewDpyId(NVDpyIdList excludeList) +{ + NVDpyId dpyId; + if (~excludeList.opaqueDpyIdList == 0) { + return nvInvalidDpyId(); + } + dpyId.opaqueDpyId = + 1U << (nv_ffs(~excludeList.opaqueDpyIdList) - 1); + return dpyId; +} + +/* See comment for NV_DPY_ID_PRINT_FORMAT. */ +static inline NvU32 nvDpyIdToPrintFormat(NVDpyId dpyId) +{ + return nvDpyIdToNvU32(dpyId); +} + +/* Prevent usage of opaque values. */ +#define opaqueDpyId __ERROR_ACCESS_ME_VIA_NV_DPY_ID_H +#define opaqueDpyIdList __ERROR_ACCESS_ME_VIA_NV_DPY_ID_H + +#endif /* __NV_DPY_ID_H__ */ diff --git a/src/common/unix/common/inc/nv_mode_timings.h b/src/common/unix/common/inc/nv_mode_timings.h new file mode 100644 index 0000000..5c72e3d --- /dev/null +++ b/src/common/unix/common/inc/nv_mode_timings.h @@ -0,0 +1,163 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_MODE_TIMINGS_H__ +#define __NV_MODE_TIMINGS_H__ + +#include "nvtypes.h" + +/* + * NvModeTimings: hardware-independent modetiming structure. + * + * For interlaced modes, the vertical values are stored in frame size, + * rather than field size (e.g., for 1080i modes, vVisible is 1080, + * not 540); similarly, for doublescan modes, the vertical values are + * stored in normal framesize (not doubled vertically). + * + * RRx1k should be field rate for interlaced modes, and should be + * frame rate for doubleScan modes; e.g., 1920x1080@60i and + * 640x480@60d, not 1920x1080@30i or 640x480@120d. + * + * RRx1k is also the "real" refresh rate (time spent displaying one eye) + * for HDMI 3D frame packed modes, e.g. 47940 (2x24hz) for 1920x1080@24 + * HDMI 3D mode. This needs to be halved again for all user-visible reported + * refresh rates (which needs to report time spent between each vblank, or + * each pair of eyes). + * + * pixelClock is doubled for doubleScan and HDMI 3D frame packed modes. + * + * The intent is that this structure match the X configuration file + * ModeLine. + * + * hdmi3D reflects whether this mode is a HDMI 3D frame packed mode. True only + * if the user selected HDMI 3D stereo mode and the GPU supports it. If true, + * then pixelClock is doubled. + * + * yuv420Mode reflects whether this mode requires YUV 4:2:0 decimation into a + * half-width output through headsurface (SW YUV420) or >=nvdisplay 4.0 HW CSC + * (HW YUV420). + * + * If a mode requires SW YUV 4:2:0 emulation, the pixelClock and width values + * in NvModeTimings will still be the full width values specified by the mode + * parsed from the EDID (e.g. 3840x2160@60), but the pixelClock and width values + * in NVHwModeTimingsEvo will be the "real" half width values programmed in HW + * and rendered to through a headSurface transform (e.g. 1920x2160@60). If a + * mode requires HW YUV 4:2:0 CSC, the pixelClock and width values in both + * NvModeTimings and NVHwModeTimingsEvo will be full width, and the decimation + * to the half width scanout surface is performed in HW. In both cases, only + * the full width values should ever be reported to the client. + */ + +enum NvYuv420Mode { + NV_YUV420_MODE_NONE = 0, + NV_YUV420_MODE_SW, + NV_YUV420_MODE_HW, +}; + +typedef struct _NvModeTimings { + NvU64 pixelClockHz NV_ALIGN_BYTES(8); /* in Hz units */ + NvU32 RRx1k; + NvU16 hVisible; + NvU16 hSyncStart; + NvU16 hSyncEnd; + NvU16 hTotal; + NvU16 hSkew; /* Just placeholder for XRRModeInfo.hSkew */ + NvU16 vVisible; + NvU16 vSyncStart; + NvU16 vSyncEnd; + NvU16 vTotal; + struct { + NvU16 w; + NvU16 h; + } sizeMM; + NvBool interlaced; + NvBool doubleScan; + /* + * Note: hSyncPos and vSyncPos are ignored, and the polarity is positive if + * [hv]SyncNeg is false. However, X.Org has separate flags for each, and + * treats modes with positive, negative, both, and neither as separate + * modes. + */ + NvBool hSyncPos; + NvBool hSyncNeg; + NvBool vSyncPos; + NvBool vSyncNeg; + NvBool hdmi3D; + enum NvYuv420Mode yuv420Mode; +} NvModeTimings, *NvModeTimingsPtr; + +static inline NvBool NvModeTimingsMatch(const NvModeTimings *pA, + const NvModeTimings *pB, + NvBool ignoreSizeMM, + NvBool ignoreRRx1k) +{ + /* + * Ignore sizeMM and/or RRx1k, if requested. The sizeMM and RRx1k fields + * don't impact hardware modetiming values, so it is reasonable that some + * callers may choose to ignore them when comparing NvModeTimings. + */ + NvBool sizeMMmatches = ignoreSizeMM || ((pA->sizeMM.w == pB->sizeMM.w) && + (pA->sizeMM.h == pB->sizeMM.h)); + + NvBool rrx1kMatches = ignoreRRx1k || (pA->RRx1k == pB->RRx1k); + + return (sizeMMmatches && rrx1kMatches && + (pA->pixelClockHz == pB->pixelClockHz) && + (pA->hVisible == pB->hVisible) && + (pA->hSyncStart == pB->hSyncStart) && + (pA->hSyncEnd == pB->hSyncEnd) && + (pA->hTotal == pB->hTotal) && + (pA->hSkew == pB->hSkew) && + (pA->vVisible == pB->vVisible) && + (pA->vSyncStart == pB->vSyncStart) && + (pA->vSyncEnd == pB->vSyncEnd) && + (pA->vTotal == pB->vTotal) && + (pA->interlaced == pB->interlaced) && + (pA->doubleScan == pB->doubleScan) && + (pA->hSyncPos == pB->hSyncPos) && + (pA->hSyncNeg == pB->hSyncNeg) && + (pA->vSyncPos == pB->vSyncPos) && + (pA->vSyncNeg == pB->vSyncNeg) && + (pA->hdmi3D == pB->hdmi3D) && + (pA->yuv420Mode == pB->yuv420Mode)); +} + +/* + * Convert between Hz and kHz. + * + * Note that Hz ==> kHz ==> Hz is lossy. + * + * We do +500 before /1000 in order to round, rather than truncate. + */ +static inline NvU32 HzToKHz(NvU64 hz) +{ + return (hz + 500) / 1000; +} + +static inline NvU64 KHzToHz(NvU32 kHz) +{ + return kHz * (NvU64)1000; +} + + +#endif /* __NV_MODE_TIMINGS_H__ */ diff --git a/src/common/unix/common/utils/interface/nv_memory_tracker.h b/src/common/unix/common/utils/interface/nv_memory_tracker.h new file mode 100644 index 0000000..75f0c03 --- /dev/null +++ b/src/common/unix/common/utils/interface/nv_memory_tracker.h @@ -0,0 +1,62 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_MEMORY_TRACKER_H__ +#define __NV_MEMORY_TRACKER_H__ + +#include "nv_list.h" + +#include /* size_t */ + +/* + * The following functions allocate and free memory, and track the + * allocations in a linked list, such that the includer can call + * nvMemoryTrackerPrintUnfreedAllocations() to print any leaked + * allocations. + */ + +void *nvMemoryTrackerTrackedAlloc(NVListPtr list, size_t size, + int line, const char *file); + +void *nvMemoryTrackerTrackedCalloc(NVListPtr list, size_t nmemb, size_t size, + int line, const char *file); + +void *nvMemoryTrackerTrackedRealloc(NVListPtr list, void *ptr, size_t size, + int line, const char *file); + +void nvMemoryTrackerTrackedFree(void *ptr); + +void nvMemoryTrackerPrintUnfreedAllocations(NVListPtr list); + +/* + * Users of nv_memory_tracker must provide implementations of the + * following helper functions. + */ +void *nvMemoryTrackerAlloc(size_t size); +void nvMemoryTrackerFree(void *ptr, size_t size); +void nvMemoryTrackerPrintf(const char *format, ...) + __attribute__((format (printf, 1, 2))); +void nvMemoryTrackerMemset(void *s, int c, size_t n); +void nvMemoryTrackerMemcpy(void *dest, const void *src, size_t n); + +#endif /* __NV_MEMORY_TRACKER_H__ */ diff --git a/src/common/unix/common/utils/interface/nv_mode_timings_utils.h b/src/common/unix/common/utils/interface/nv_mode_timings_utils.h new file mode 100644 index 0000000..72deb14 --- /dev/null +++ b/src/common/unix/common/utils/interface/nv_mode_timings_utils.h @@ -0,0 +1,135 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_MODE_TIMINGS_UTILS_H__ +#define __NV_MODE_TIMINGS_UTILS_H__ + +/* + * Utility code to operate on NvModeTimings and NVT_TIMINGs. + */ + +#include "nvtypes.h" + +#include "nv_mode_timings.h" +#include "timing/nvtiming.h" + +#include /* size_t */ + +/* + * Macros used for printing values divided by 1000 without floating + * point division and printing. + * + * Example: + * printf("pclk is %.2f MHz\n", pclk_khz / 1000.0f); + * becomes: + * printf("pclk is " NV_FMT_DIV_1000_POINT_2 " MHz\n", + * NV_VA_DIV_1000_POINT_2(pclk_khz)); + * + * Different precision controls the number of digits printed after the + * decimal point. Bias is added for correct rounding. + */ +#define NV_FMT_DIV_1000_POINT_1 "%d.%d" +#define NV_FMT_DIV_1000_POINT_2 "%d.%02d" +#define NV_VA_DIV_1000_POINT_1(x) \ + ((x) + 49) / 1000, (((x) + 49) % 1000) / 100 +#define NV_VA_DIV_1000_POINT_2(x) \ + ((x) + 4) / 1000, (((x) + 4) % 1000) / 10 + +/* + * macro to use integer math to convert an NvU32 kHz value to Hz; we + * add 500 Hz before dividing by 1000 to round rather than truncate. + */ + +#define NV_U32_KHZ_TO_HZ(_x) (((_x) + 500) / 1000) + +/* + * NVT_TIMING stores HVisible multiplied by the horizontal replication + * factor (e.g., a 720 mode with hrep=2 has HVisible of 1440). For + * reporting purposes, divide HVisible by hrep. + */ +static inline NvU16 NV_NVT_TIMING_HVISIBLE(const NVT_TIMING *pTiming) +{ + if (pTiming->etc.rep > 1) { + return pTiming->HVisible / pTiming->etc.rep; + } else { + return pTiming->HVisible; + } +} + +/* + * NVT_TIMING stores VVisible as half height when interlaced (e.g., + * 1920x1080i has VVisible 540). + */ +static inline NvU16 NV_NVT_TIMING_VVISIBLE(const NVT_TIMING *pTiming) +{ + return pTiming->VVisible * (pTiming->interlaced ? 2 : 1); +} + +/* + * When non-zero, NVT_TIMING::etc::aspect contains bytes 12, 13, and + * 14 from the Detailed Timing Definition of the EDID. This contains + * a packed width and height. The width and height is either an + * aspect ratio (16:9 or 4:3), or a physical image size in + * millimeters. See Table 3.21, and the subsequent notes, in the + * E-EDID 1.4 specification. + */ +static inline NvU16 NV_NVT_TIMING_IMAGE_SIZE_WIDTH(const NVT_TIMING *pTiming) +{ + return (pTiming->etc.aspect >> 16) & 0xFFFF; +} + +static inline NvU16 NV_NVT_TIMING_IMAGE_SIZE_HEIGHT(const NVT_TIMING *pTiming) +{ + return pTiming->etc.aspect & 0xFFFF; +} + +static inline NvBool NV_NVT_TIMING_HAS_ASPECT_RATIO(const NVT_TIMING *pTiming) +{ + NvU16 w = NV_NVT_TIMING_IMAGE_SIZE_WIDTH(pTiming); + NvU16 h = NV_NVT_TIMING_IMAGE_SIZE_HEIGHT(pTiming); + + return (((w == 16) && (h == 9)) || + ((w == 4) && (h == 3))); +} + +static inline NvBool NV_NVT_TIMING_HAS_IMAGE_SIZE(const NVT_TIMING *pTiming) +{ + return ((pTiming->etc.aspect != 0) && + !NV_NVT_TIMING_HAS_ASPECT_RATIO(pTiming)); +} + +NvBool IsEdid640x480_60_NVT_TIMING(const NVT_TIMING *pTiming); + +void NVT_TIMINGtoNvModeTimings(const NVT_TIMING *pTiming, + NvModeTimingsPtr pModeTimings); + +void nvBuildModeName(NvU16 width, NvU16 height, char *name, size_t nameLen); + +/* + * Users of nvBuildModeName() should provide an implementation of + * nvBuildModeNameSnprintf(). + */ +int nvBuildModeNameSnprintf(char *str, size_t size, const char *format, ...) + __attribute__((format (printf, 3, 4))); + +#endif /* __NV_MODE_TIMINGS_UTILS_H__ */ diff --git a/src/common/unix/common/utils/interface/nv_vasprintf.h b/src/common/unix/common/utils/interface/nv_vasprintf.h new file mode 100644 index 0000000..ec94beb --- /dev/null +++ b/src/common/unix/common/utils/interface/nv_vasprintf.h @@ -0,0 +1,65 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_VASPRINTF_H__ +#define __NV_VASPRINTF_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +/* + * nv_vasprintf() depends on nv_vasprintf_{alloc,free,vsnprintf}(). + * Those functions should be implemented by the user of + * nv_vasprintf(). + */ +void *nv_vasprintf_alloc(size_t size); +void nv_vasprintf_free(void *ptr); +int nv_vasprintf_vsnprintf(char *str, size_t size, + const char *format, va_list ap); + +char* nv_vasprintf(const char *f, va_list ap); + +/* + * NV_VSNPRINTF(): macro that assigns b using nv_vasprintf(); intended to + * be used by vararg printing functions. + * + * This macro allocates memory for b; the caller should free the + * memory when done. + */ + +#define NV_VSNPRINTF(b, f) do { \ + va_list ap; \ + va_start(ap, f); \ + (b) = nv_vasprintf(f, ap); \ + va_end(ap); \ +} while(0) + +#ifdef __cplusplus +}; +#endif + +#endif /* __NV_VASPRINTF_H__ */ diff --git a/src/common/unix/common/utils/interface/unix_rm_handle.h b/src/common/unix/common/utils/interface/unix_rm_handle.h new file mode 100644 index 0000000..d795fda --- /dev/null +++ b/src/common/unix/common/utils/interface/unix_rm_handle.h @@ -0,0 +1,122 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __UNIX_RM_HANDLE_H__ +#define __UNIX_RM_HANDLE_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV_UNIX_RM_HANDLE_INITIAL_HANDLES 512 +#define NV_UNIX_RM_HANDLE_BITMAP_SIZE(_numHandles) ((_numHandles) >> 5) + +#if defined(DEBUG) +typedef struct _nv_unix_rm_handle_allocation *NVUnixRmHandleAllocationPtr; + +typedef struct _nv_unix_rm_handle_allocation { + const char *file; + int line; +} NVUnixRmHandleAllocationRec; +#endif + +typedef struct _nv_unix_rm_handle_allocator *NVUnixRmHandleAllocatorPtr; + +typedef struct _nv_unix_rm_handle_allocator { + NvU32 rmClient; + NvU32 clientData; + + NvU32 *bitmap; + NvU32 maxHandles; + +#if defined(DEBUG) + NVUnixRmHandleAllocationRec *allocationTable; +#endif +} NVUnixRmHandleAllocatorRec; + +NvBool nvInitUnixRmHandleAllocator(NVUnixRmHandleAllocatorPtr pAllocator, + NvU32 rmClient, NvU32 clientData); + +NvU32 nvGenerateUnixRmHandleInternal(NVUnixRmHandleAllocatorPtr pAllocator); +void nvFreeUnixRmHandleInternal(NVUnixRmHandleAllocatorPtr pAllocator, + NvU32 UnixRmHandle); + +void nvTearDownUnixRmHandleAllocator(NVUnixRmHandleAllocatorPtr pAllocator); + +#if defined(DEBUG) + +#define NV_UNIX_RM_HANDLE_DEBUG_ERROR 0 +#define NV_UNIX_RM_HANDLE_DEBUG_MSG 1 +#define NV_UNIX_RM_HANDLE_DEBUG_VERBOSE 2 + +/* + * Users of the handle generator need to provide implementations + * of nvUnixRmHandleDebugAssert() and nvUnixRmHandleLogMsg() + * in builds where DEBUG is defined. + */ +void nvUnixRmHandleDebugAssert(const char *expString, + const char *filenameString, + const char *funcString, + const unsigned lineNumber); +#define nvUnixRmHandleAssert(_exp) \ + do { \ + if (_exp) { \ + } else { \ + nvUnixRmHandleDebugAssert(#_exp, __FILE__, __FUNCTION__, __LINE__); \ + } \ + } while (0) + +void nvUnixRmHandleLogMsg(NvU32 level, const char *fmt, ...) __attribute__((format (printf, 2, 3))); + +NvU32 nvDebugGenerateUnixRmHandle(NVUnixRmHandleAllocatorPtr pAllocator, + const char *file, int line); +#define nvGenerateUnixRmHandle(s) \ + nvDebugGenerateUnixRmHandle((s), __FILE__, __LINE__) + +void nvDebugFreeUnixRmHandle(NVUnixRmHandleAllocatorPtr pAllocator, NvU32 handle); +#define nvFreeUnixRmHandle(n,s) nvDebugFreeUnixRmHandle((n), (s)) + +#else + +#define nvUnixRmHandleAssert(_exp) do {} while(0) +#define nvUnixRmHandleLogMsg(__fmt, ...) do {} while(0) + +#define nvGenerateUnixRmHandle(s) nvGenerateUnixRmHandleInternal((s)) +#define nvFreeUnixRmHandle(n, s) nvFreeUnixRmHandleInternal((n), (s)) + +#endif /* DEBUG */ + +/* + * Users of the handle generator always need to provide implementations + * of nvUnixRmHandleReallocMem(), and nvUnixRmHandleFreeMem(). + */ +void *nvUnixRmHandleReallocMem(void *oldPtr, NvLength newSize); +void nvUnixRmHandleFreeMem(void *ptr); + +#ifdef __cplusplus +}; +#endif + +#endif diff --git a/src/common/unix/common/utils/nv_memory_tracker.c b/src/common/unix/common/utils/nv_memory_tracker.c new file mode 100644 index 0000000..cecf5d1 --- /dev/null +++ b/src/common/unix/common/utils/nv_memory_tracker.c @@ -0,0 +1,230 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if defined(DEBUG) + +#include "nv_memory_tracker.h" + +/* + * Define NV_MEMORY_TRACKER_BACKTRACES in the including makefile to enable + * backtrace capture/reporting for memory leaks. E.g., + * NV_DEFINES += NV_MEMORY_TRACKER_BACKTRACES + * Note that this probably only works with glibc (backtrace() and friends are + * GNU extensions). + */ + +#if defined(NV_MEMORY_TRACKER_BACKTRACES) + #include /* backtrace() and backtrace_symbols() */ + #include /* free(3) */ + #define MAX_BACKTRACE_DEPTH 30 +#endif + + +typedef union { + struct { + NVListRec entry; + const char *file; + int line; + size_t size; +#if defined(NV_MEMORY_TRACKER_BACKTRACES) + void *backtrace[MAX_BACKTRACE_DEPTH]; + int backtraceSize; +#endif + } header; + /* + * Unused. For alignment purposes only. Guarantee alignment to + * twice pointer size. That is the alignment guaranteed by glibc: + * https://www.gnu.org/software/libc/manual/html_node/Aligned-Memory-Blocks.html + * which seems reasonable to match here. + */ + NvU8 align __attribute__((aligned(sizeof(void*) * 2))); +} NvMemoryAllocation; + + +static void PrintAllocationBacktrace(const NvMemoryAllocation *alloc) +{ +#if defined(NV_MEMORY_TRACKER_BACKTRACES) + char **symbols; + const int numSymbols = alloc->header.backtraceSize; + int j; + + symbols = backtrace_symbols(alloc->header.backtrace, numSymbols); + + if (symbols == NULL) { + return; + } + + nvMemoryTrackerPrintf("Allocation context:"); + + for (j = 0; j < numSymbols; j++) { + if (symbols[j] == NULL) { + continue; + } + + nvMemoryTrackerPrintf("#%-2d %s", j, symbols[j]); + } + free(symbols); +#endif +} + + +static void RegisterAllocation(NVListPtr list, NvMemoryAllocation *alloc, + const char *file, int line, size_t size) +{ + nvListAdd(&alloc->header.entry, list); + + alloc->header.file = file; + alloc->header.line = line; + alloc->header.size = size; + +#if defined(NV_MEMORY_TRACKER_BACKTRACES) + /* Record the backtrace at this point (only addresses, not symbols) */ + alloc->header.backtraceSize = + backtrace(alloc->header.backtrace, MAX_BACKTRACE_DEPTH); +#endif +} + + +static NvBool IsAllocationSane(NvMemoryAllocation *alloc) +{ + NVListPtr entry = &alloc->header.entry; + if (entry->prev->next != entry || entry->next->prev != entry) { + /* + * This will likely have already crashed, but we might as well + * report it if we can. + */ + nvMemoryTrackerPrintf("Attempted to free untracked memory %p!", + alloc + 1); + return NV_FALSE; + } + return NV_TRUE; +} + + +static void UnregisterAllocation(NvMemoryAllocation *alloc) +{ + if (!IsAllocationSane(alloc)) { + return; + } + + nvListDel(&alloc->header.entry); +} + + +void *nvMemoryTrackerTrackedAlloc(NVListPtr list, size_t size, + int line, const char *file) +{ + NvMemoryAllocation *alloc = nvMemoryTrackerAlloc(sizeof(*alloc) + size); + + if (alloc == NULL) { + return NULL; + } + + RegisterAllocation(list, alloc, file, line, size); + + return alloc + 1; +} + + +void *nvMemoryTrackerTrackedCalloc(NVListPtr list, size_t nmemb, size_t size, + int line, const char *file) +{ + size_t totalSize = size * nmemb; + void *ptr = nvMemoryTrackerTrackedAlloc(list, totalSize, line, file); + + if (ptr == NULL) { + return NULL; + } + + nvMemoryTrackerMemset(ptr, 0, totalSize); + + return ptr; +} + + +void *nvMemoryTrackerTrackedRealloc(NVListPtr list, void *ptr, size_t size, + int line, const char *file) +{ + NvMemoryAllocation *oldAlloc = NULL; + void *newptr; + + if (ptr == NULL) { + /* realloc with a ptr of NULL is equivalent to malloc. */ + return nvMemoryTrackerTrackedAlloc(list, size, line, file); + } + + if (size == 0) { + /* realloc with a size of 0 is equivalent to free. */ + nvMemoryTrackerTrackedFree(ptr); + return NULL; + } + + oldAlloc = ((NvMemoryAllocation *) ptr) - 1; + newptr = nvMemoryTrackerTrackedAlloc(list, size, line, file); + + if (newptr != NULL) { + nvMemoryTrackerMemcpy(newptr, ptr, NV_MIN(size, oldAlloc->header.size)); + nvMemoryTrackerTrackedFree(ptr); + } + + return newptr; +} + + +void nvMemoryTrackerTrackedFree(void *ptr) +{ + NvMemoryAllocation *alloc; + size_t size; + + if (ptr == NULL) { + return; + } + + alloc = ((NvMemoryAllocation *) ptr) - 1; + + UnregisterAllocation(alloc); + + size = alloc->header.size + sizeof(NvMemoryAllocation); + + /* Poison the memory. */ + nvMemoryTrackerMemset(alloc, 0x55, size); + + nvMemoryTrackerFree(alloc, size); +} + + +void nvMemoryTrackerPrintUnfreedAllocations(NVListPtr list) +{ + NvMemoryAllocation *iter; + + nvListForEachEntry(iter, list, header.entry) { + nvMemoryTrackerPrintf("Unfreed allocation: %18p (size: %5u) (%s:%d)", + iter + 1, + (unsigned int) iter->header.size, + iter->header.file, + iter->header.line); + PrintAllocationBacktrace(iter); + } +} + +#endif /* defined(DEBUG) */ diff --git a/src/common/unix/common/utils/nv_mode_timings_utils.c b/src/common/unix/common/utils/nv_mode_timings_utils.c new file mode 100644 index 0000000..26c0197 --- /dev/null +++ b/src/common/unix/common/utils/nv_mode_timings_utils.c @@ -0,0 +1,159 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv_mode_timings_utils.h" + +/* + * Check if this NVT_TIMING is the 640x480@60Hz Industry standard + * mode; but due to the lack of precision of the pclk field in the + * NVT_TIMING data structure, is not quite correct: pclk should be + * 2517.5, and rrx1k should be 59940. See bug 263631. + * + * Note that we check if rrx1k is either 60000 or 59940 because we may + * use this function immediately after receiving the NVT_TIMINGs from + * the EDID and patch rrx1k, or we may use this function later from + * NVT_TIMINGtoNvModeTimings(), at which point we'll have already + * patched rrx1k. + */ + +NvBool IsEdid640x480_60_NVT_TIMING(const NVT_TIMING *pTiming) +{ + return ((pTiming->pclk == 2518) && + (pTiming->HVisible == 640) && + (pTiming->VVisible == 480) && + (pTiming->HTotal == 800) && + (pTiming->HFrontPorch == 16) && + (pTiming->HSyncWidth == 96) && + (pTiming->VTotal == 525) && + (pTiming->VFrontPorch == 10) && + (pTiming->VSyncWidth == 2) && + (pTiming->HBorder == 0) && + (pTiming->VBorder == 0) && + (pTiming->HSyncPol == NVT_H_SYNC_NEGATIVE) && + (pTiming->VSyncPol == NVT_V_SYNC_NEGATIVE) && + (pTiming->interlaced == 0) && + ((pTiming->etc.flag & + NVT_FLAG_NV_DOUBLE_SCAN_TIMING) == 0) && + ((pTiming->etc.rrx1k == 60000) || + (pTiming->etc.rrx1k == 59940))); +} + +/* + * Convert from NVT_TIMING to NvModeTimings; this is a safe operation + * to perform because NvModeTimings has higher precision (pixelclockHz + * in Hz, and vertical values doubled for interlaced) than NVT_TIMING + */ + +void NVT_TIMINGtoNvModeTimings(const NVT_TIMING *pTiming, + NvModeTimingsPtr pModeTimings) +{ + char *bytePtr = (char *)pModeTimings; + size_t i; + + for (i = 0; i < sizeof(*pModeTimings); i++) { + bytePtr[i] = 0; + } + + pModeTimings->RRx1k = pTiming->etc.rrx1k; + + /* pTiming->pclk is in 10*kHz; pModeTimings->pixelClockHz is in Hz */ + + pModeTimings->pixelClockHz = KHzToHz(pTiming->pclk) * 10; + + pModeTimings->hVisible = pTiming->HVisible; + pModeTimings->hSyncStart = pTiming->HFrontPorch + pTiming->HVisible; + pModeTimings->hSyncEnd = + pTiming->HFrontPorch + pTiming->HVisible + pTiming->HSyncWidth; + pModeTimings->hTotal = pTiming->HTotal; + + pModeTimings->vVisible = pTiming->VVisible; + pModeTimings->vSyncStart = pTiming->VFrontPorch + pTiming->VVisible; + pModeTimings->vSyncEnd = + pTiming->VFrontPorch + pTiming->VVisible + pTiming->VSyncWidth; + pModeTimings->vTotal = pTiming->VTotal; + + pModeTimings->interlaced = pTiming->interlaced; + pModeTimings->doubleScan = + !!(pTiming->etc.flag & NVT_FLAG_NV_DOUBLE_SCAN_TIMING); + + /* + * pTiming stores vertical values divided by two when interlaced; so + * double the vertical values in pModeTimings + */ + + if (pModeTimings->interlaced) { + pModeTimings->vVisible *= 2; + pModeTimings->vSyncStart *= 2; + pModeTimings->vSyncEnd *= 2; + pModeTimings->vTotal *= 2; + } + + /* + * pTiming: 0 is positive, 1 is negative + * pModeTimings: FALSE is positive, TRUE is negative + */ + + if (pTiming->HSyncPol == NVT_H_SYNC_POSITIVE) { + pModeTimings->hSyncNeg = NV_FALSE; + } else { + pModeTimings->hSyncNeg = NV_TRUE; + } + + if (pTiming->VSyncPol == NVT_V_SYNC_POSITIVE) { + pModeTimings->vSyncNeg = NV_FALSE; + } else { + pModeTimings->vSyncNeg = NV_TRUE; + } + + pModeTimings->hSyncPos = !pModeTimings->hSyncNeg; + pModeTimings->vSyncPos = !pModeTimings->vSyncNeg; + + /* + * Save any physical size information for this mode from the + * Detailed Timing Definition of the EDID. + */ + if (NV_NVT_TIMING_HAS_IMAGE_SIZE(pTiming)) { + pModeTimings->sizeMM.w = NV_NVT_TIMING_IMAGE_SIZE_WIDTH(pTiming); + pModeTimings->sizeMM.h = NV_NVT_TIMING_IMAGE_SIZE_HEIGHT(pTiming); + } + + /* + * XXX work around lack of precision in NVT_TIMING: catch the + * 640x480@60Hz EDID mode and patch pixelClockHz and RRx1k. + */ + + if (IsEdid640x480_60_NVT_TIMING(pTiming)) { + pModeTimings->RRx1k = 59940; + pModeTimings->pixelClockHz = 25175000; + } +} + + +/*! + * Build a mode name, of the format 'WIDTHxHEIGHT'. + */ +void nvBuildModeName(NvU16 width, NvU16 height, char *name, size_t nameLen) +{ + nvBuildModeNameSnprintf(name, nameLen, "%dx%d", width, height); + name[nameLen - 1] = '\0'; +} diff --git a/src/common/unix/common/utils/nv_vasprintf.c b/src/common/unix/common/utils/nv_vasprintf.c new file mode 100644 index 0000000..390ad9f --- /dev/null +++ b/src/common/unix/common/utils/nv_vasprintf.c @@ -0,0 +1,74 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv_vasprintf.h" + +/* + * nv_vasprintf(): function that returns a string using vsnprintf(); intended to + * be used by vararg printing functions. This is supposedly correct + * for differing semantics of vsnprintf() in different versions of + * glibc: + * + * different semantics of the return value from (v)snprintf: + * + * -1 when the buffer is not long enough (glibc < 2.1) + * + * or + * + * the length the string would have been if the buffer had been large + * enough (glibc >= 2.1) + * + * This function allocates memory for the returned string; the caller should use + * free() the memory when done. + * + * The includer should implement nv_vasprintf_{alloc,free,vsnprintf}. + */ + +#define __NV_VASPRINTF_LEN 64 + +char* nv_vasprintf(const char *f, va_list ap) +{ + int len, current_len = __NV_VASPRINTF_LEN; + char *b = (char *)nv_vasprintf_alloc(current_len); + + while (b) { + va_list tmp_ap; + + va_copy(tmp_ap, ap); + len = nv_vasprintf_vsnprintf(b, current_len, f, tmp_ap); + va_end(tmp_ap); + + if ((len > -1) && (len < current_len)) { + break; + } else if (len > -1) { + current_len = len + 1; + } else { + current_len += __NV_VASPRINTF_LEN; + } + + nv_vasprintf_free(b); + b = (char *)nv_vasprintf_alloc(current_len); + } + + return b; +} diff --git a/src/common/unix/common/utils/unix_rm_handle.c b/src/common/unix/common/utils/unix_rm_handle.c new file mode 100644 index 0000000..b24d0e0 --- /dev/null +++ b/src/common/unix/common/utils/unix_rm_handle.c @@ -0,0 +1,385 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file contains functions for dealing with dynamic allocation and + * management of resource handles. + * + * Note that dynamic handles are not suitable for all use cases. If a + * handle is placed in the pushbuffer, and the pushbuffer will be + * replayed during channel recovery, the handle value must be kept + * constant. For such handles, use an invariant handle value. + * + * We keep a bitmap of which handles we've used. + * + * Composition of an object handle: + * [31:16] Client data + * [15:00] Handle constant + */ + +#include + +#include "unix_rm_handle.h" + +#define INVALID_HANDLE 0 +#define UNIX_RM_HANDLE_CLIENT_DATA_SHIFT 16 + +/* Mask to AND only client data */ +#define CLIENT_DATA_MASK ((~(NvU32)0) << UNIX_RM_HANDLE_CLIENT_DATA_SHIFT) +/* Mask to AND off client data */ +#define HANDLE_MASK (~(CLIENT_DATA_MASK)) +/* Handle 0 is reserved, so subtract one from a handle to get its index */ +#define HANDLE_INDEX(_handle) (((_handle) - 1) & HANDLE_MASK) + +/* Bits to OR in for client data */ +#define GET_CLIENT_DATA_BITS(_data) \ + (((_data) << UNIX_RM_HANDLE_CLIENT_DATA_SHIFT)) + +#define DWORD_FROM_HANDLE(_handle) (HANDLE_INDEX(_handle) >> 5) +#define BIT_FROM_HANDLE(_handle) (HANDLE_INDEX(_handle) & 0x1f) + +/* Check if a handle is used */ +#define USED(_bitmap, _handle) \ + ((_bitmap)[DWORD_FROM_HANDLE(_handle)] & (1U << BIT_FROM_HANDLE(_handle))) +/* Reserve a handle in the bitmap */ +#define RESERVE(_bitmap, _handle) \ + ((_bitmap)[DWORD_FROM_HANDLE(_handle)] |= (1U << BIT_FROM_HANDLE(_handle))) +/* Unreserve a handle in the bitmap */ +#define UNRESERVE(_bitmap, _handle) \ + ((_bitmap)[DWORD_FROM_HANDLE(_handle)] &= (~(1U << BIT_FROM_HANDLE(_handle)))) + +#if defined(DEBUG) +static void +nvReportUnfreedUnixRmHandleAllocations(NVUnixRmHandleAllocatorPtr pAllocator); +#endif + + +static void UnixRmHandleMemset(void *ptr, char data, NvLength size) +{ + char *byte = (char *)ptr; + NvLength i; + + for (i = 0; i < size; i++) { + byte[i] = data; + } +} + +static NvBool UnixRmHandleReallocBitmap(NVUnixRmHandleAllocatorPtr pAllocator, + NvU32 newMaxHandles) +{ + NvU32 *newBitmap; +#if defined(DEBUG) + NVUnixRmHandleAllocationPtr newAllocationTable; +#endif /* defined(DEBUG) */ + const NvLength newMemSize = NV_UNIX_RM_HANDLE_BITMAP_SIZE(newMaxHandles) * + sizeof(*newBitmap); + const NvU32 oldBitmapSize = + NV_UNIX_RM_HANDLE_BITMAP_SIZE(pAllocator->maxHandles); + + /* New handle limit must fit in the bitmask */ + if (newMaxHandles > GET_CLIENT_DATA_BITS(1)) { + return NV_FALSE; + } + + /* New handle limit must be a power of 2 */ + nvUnixRmHandleAssert(!(newMaxHandles & (newMaxHandles - 1))); + + newBitmap = (NvU32 *)nvUnixRmHandleReallocMem(pAllocator->bitmap, newMemSize); + + if (!newBitmap) { + return NV_FALSE; + } + + UnixRmHandleMemset(&newBitmap[oldBitmapSize], 0, + newMemSize - (oldBitmapSize * sizeof(*newBitmap))); + pAllocator->bitmap = newBitmap; + +#if defined(DEBUG) + newAllocationTable = + (NVUnixRmHandleAllocationPtr) + nvUnixRmHandleReallocMem(pAllocator->allocationTable, + newMaxHandles * + sizeof(*pAllocator->allocationTable)); + + if (!newAllocationTable) { + /* + * Leave the new bitmap allocation in place. If that realloc + * succeeded, the old bitmap allocation is gone, and it is at + * least big enough to hold the old pAllocator->maxHandles, + * since a shrinking of the allocation table shouldn't have + * failed, and maxHandles currently never decreases anyway. + */ + nvUnixRmHandleAssert(newMaxHandles >= pAllocator->maxHandles); + + return NV_FALSE; + } + + pAllocator->allocationTable = newAllocationTable; +#endif /* defined(DEBUG) */ + + pAllocator->maxHandles = newMaxHandles; + + return NV_TRUE; +} + +NvBool nvInitUnixRmHandleAllocator(NVUnixRmHandleAllocatorPtr pAllocator, + NvU32 rmClient, NvU32 clientData) +{ + nvUnixRmHandleAssert(pAllocator != NULL && + rmClient != 0 && clientData != 0); + nvUnixRmHandleAssert((clientData & 0x0000FFFF) == clientData); + + UnixRmHandleMemset(pAllocator, 0, sizeof(*pAllocator)); + + pAllocator->rmClient = rmClient; + pAllocator->clientData = clientData; + + if (!UnixRmHandleReallocBitmap(pAllocator, + NV_UNIX_RM_HANDLE_INITIAL_HANDLES)) { + nvUnixRmHandleAssert(!"Failed to init RM handle allocator bitmap"); + nvTearDownUnixRmHandleAllocator(pAllocator); + + return NV_FALSE; + } + + /* + * If the RM-provided client handle falls within the allocator range + * then reserve it up-front. + */ + if ((pAllocator->rmClient & CLIENT_DATA_MASK) == + GET_CLIENT_DATA_BITS(pAllocator->clientData)) { + NvU32 handleId = pAllocator->rmClient & HANDLE_MASK; + + if ((handleId <= pAllocator->maxHandles) && + (handleId != INVALID_HANDLE)) { + RESERVE(pAllocator->bitmap, handleId); + } + } + + return NV_TRUE; +} + +/* + * nvGenerateUnixRmHandleInternal() + * Return a unique, random handle. Be sure to free the handle + * when you're done with it! Returns 0 if we run out of handles. + */ +NvU32 nvGenerateUnixRmHandleInternal(NVUnixRmHandleAllocatorPtr pAllocator) +{ + NvU32 handleId; + NvU32 handle; + + nvUnixRmHandleAssert(pAllocator != NULL && + pAllocator->rmClient != 0 && + pAllocator->clientData != 0); + + /* Find free handle */ + handleId = 1; + while ((handleId <= pAllocator->maxHandles) && + USED(pAllocator->bitmap, handleId)) { + handleId++; + } + + if (handleId > pAllocator->maxHandles) { + if (!UnixRmHandleReallocBitmap(pAllocator, pAllocator->maxHandles * 2)) { + nvUnixRmHandleAssert(!"Failed to grow RM handle allocator bitmap"); + return INVALID_HANDLE; + } + } + + nvUnixRmHandleAssert(!USED(pAllocator->bitmap, handleId)); + + RESERVE(pAllocator->bitmap, handleId); + + handle = GET_CLIENT_DATA_BITS(pAllocator->clientData) | handleId; + + nvUnixRmHandleAssert(handle != pAllocator->rmClient); + + return handle; +} + +/* + * nvFreeUnixRmHandleInternal() + * Mark the handle passed in as free in the bitmap. + */ +void nvFreeUnixRmHandleInternal(NVUnixRmHandleAllocatorPtr pAllocator, + NvU32 unixHandle) +{ + NvU32 handle = unixHandle & HANDLE_MASK; + + if (!unixHandle) { + return; + } + + nvUnixRmHandleAssert(pAllocator != NULL && + pAllocator->rmClient != 0 && pAllocator->clientData != 0); + + nvUnixRmHandleAssert(USED(pAllocator->bitmap, handle)); + + UNRESERVE(pAllocator->bitmap, handle); +} + +/* + * This function just makes sure we freed all of the handles we allocated, for + * debugging purposes. + */ +void nvTearDownUnixRmHandleAllocator(NVUnixRmHandleAllocatorPtr pAllocator) +{ + if (pAllocator == NULL) { + return; + } + + /* + * If the RM-provided client handle falls within the allocator range, + * then it is reserved up-front. so make sure that it is get unreserved + * before teardown. + */ + if ((pAllocator->rmClient & CLIENT_DATA_MASK) == + GET_CLIENT_DATA_BITS(pAllocator->clientData)) { + NvU32 handleId = pAllocator->rmClient & HANDLE_MASK; + + if ((handleId <= pAllocator->maxHandles) && + (handleId != INVALID_HANDLE)) { + UNRESERVE(pAllocator->bitmap, handleId); + } + } + +#if defined(DEBUG) + nvReportUnfreedUnixRmHandleAllocations(pAllocator); + nvUnixRmHandleFreeMem(pAllocator->allocationTable); +#endif + + nvUnixRmHandleFreeMem(pAllocator->bitmap); + + UnixRmHandleMemset(pAllocator, 0, sizeof(*pAllocator)); +} + +/* + * Handle allocation tracking code; in a debug build, the below + * functions wrap the actual allocation functions above. + */ + +#if defined(DEBUG) + +#define UNIX_RM_HANDLE_ALLOC_LABEL "NVIDIA UNIX RM HANDLE TRACKER: " + +static NVUnixRmHandleAllocationPtr +FindUnixRmHandleAllocation(NVUnixRmHandleAllocatorPtr pAllocator, NvU32 handle) +{ + if (((handle & HANDLE_MASK) == INVALID_HANDLE) || + ((handle & HANDLE_MASK) > pAllocator->maxHandles)) { + return NULL; + } + + return &pAllocator->allocationTable[HANDLE_INDEX(handle)]; +} + +static void RecordUnixRmHandleAllocation(NVUnixRmHandleAllocatorPtr pAllocator, + NvU32 handle, const char *file, int line) +{ + /* Find a free allocation table slot. */ + NVUnixRmHandleAllocationPtr alloc = FindUnixRmHandleAllocation(pAllocator, handle); + + if (!alloc) { + nvUnixRmHandleLogMsg(NV_UNIX_RM_HANDLE_DEBUG_ERROR, + UNIX_RM_HANDLE_ALLOC_LABEL + "NVUnixRmHandleAllocator is corrupted." + "(table entry not found for handle)"); + return; + } + + nvUnixRmHandleLogMsg(NV_UNIX_RM_HANDLE_DEBUG_VERBOSE, + UNIX_RM_HANDLE_ALLOC_LABEL + "Recording handle allocation: 0x%08x (%s:%d)", + handle, file, line); + + alloc->file = file; + alloc->line = line; +} + +static void FreeUnixRmHandleAllocation(NVUnixRmHandleAllocatorPtr pAllocator, + NvU32 handle) +{ + NVUnixRmHandleAllocationPtr alloc = + FindUnixRmHandleAllocation(pAllocator, handle); + + if (!alloc) { + return; + } + + nvUnixRmHandleLogMsg(NV_UNIX_RM_HANDLE_DEBUG_VERBOSE, + UNIX_RM_HANDLE_ALLOC_LABEL + "Freeing handle allocation: 0x%08x (%s:%d)", + handle, alloc->file, alloc->line); + + UnixRmHandleMemset(alloc, 0, sizeof(*alloc)); +} + + +NvU32 +nvDebugGenerateUnixRmHandle(NVUnixRmHandleAllocatorPtr pAllocator, + const char *file, int line) +{ + NvU32 handle = nvGenerateUnixRmHandleInternal(pAllocator); + + RecordUnixRmHandleAllocation(pAllocator, handle, file, line); + return handle; +} + +void nvDebugFreeUnixRmHandle(NVUnixRmHandleAllocatorPtr pAllocator, NvU32 handle) +{ + if (!handle) { + return; + } + + FreeUnixRmHandleAllocation(pAllocator, handle); + + nvFreeUnixRmHandleInternal(pAllocator, handle); +} + +void nvReportUnfreedUnixRmHandleAllocations(NVUnixRmHandleAllocatorPtr pAllocator) +{ + NvU32 handleId; + + for (handleId = 1; handleId <= pAllocator->maxHandles; handleId++) { + if (USED(pAllocator->bitmap, handleId)) { + + NVUnixRmHandleAllocationPtr alloc = + FindUnixRmHandleAllocation(pAllocator, handleId); + + if (alloc == NULL) { + continue; + } + + nvUnixRmHandleLogMsg(NV_UNIX_RM_HANDLE_DEBUG_MSG, + UNIX_RM_HANDLE_ALLOC_LABEL + "Unfreed handle ID allocation: 0x%08x (%s:%d)", + handleId, + alloc->file, + alloc->line); + } + } +} + +#endif /* DEBUG */ + diff --git a/src/common/unix/nvidia-3d/include/nv_xz_mem_hooks.h b/src/common/unix/nvidia-3d/include/nv_xz_mem_hooks.h new file mode 100644 index 0000000..1ff6ac0 --- /dev/null +++ b/src/common/unix/nvidia-3d/include/nv_xz_mem_hooks.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_XZ_MEM_HOOKS_H__ +#define __NV_XZ_MEM_HOOKS_H__ + +/* + * This file is included by xz_config.h when NV_XZ_CUSTOM_MEM_HOOKS is defined, + * allowing us to override xzminidec's standard library use. + */ + +#include "nvidia-3d-imports.h" + +#define kmalloc(size, flags) nv3dImportAlloc(size) +#define kfree(ptr) nv3dImportFree(ptr) +#define vmalloc(size) nv3dImportAlloc(size) +#define vfree(ptr) nv3dImportFree(ptr) + +#define memeq(a, b, size) (nv3dImportMemCmp(a, b, size) == 0) +#define memzero(buf, size) nv3dImportMemSet(buf, 0, size) +#define memcpy(a, b, size) nv3dImportMemCpy(a, b, size) +#define memmove(a, b, size) nv3dImportMemMove(a, b, size) + +#endif /* __NV_XZ_MEM_HOOKS_H__ */ diff --git a/src/common/unix/nvidia-3d/include/nvidia-3d-fermi.h b/src/common/unix/nvidia-3d/include/nvidia-3d-fermi.h new file mode 100644 index 0000000..eee4f80 --- /dev/null +++ b/src/common/unix/nvidia-3d/include/nvidia-3d-fermi.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_3D_FERMI_H__ +#define __NVIDIA_3D_FERMI_H__ + +#include "nvidia-3d-types.h" + +void _nv3dSetProgramOffsetFermi( + Nv3dChannelRec *p3dChannel, + NvU32 stage, + NvU32 offset); +void _nv3dInvalidateTexturesFermi( + Nv3dChannelRec *p3dChannel); +void _nv3dSetVertexStreamEndFermi( + Nv3dChannelPtr p3dChannel, + enum Nv3dVertexAttributeStreamType stream, + const Nv3dVertexAttributeStreamRec *pStream); + +#endif /* __NVIDIA_3D_FERMI__ */ + diff --git a/src/common/unix/nvidia-3d/include/nvidia-3d-hopper.h b/src/common/unix/nvidia-3d/include/nvidia-3d-hopper.h new file mode 100644 index 0000000..9b3ce58 --- /dev/null +++ b/src/common/unix/nvidia-3d/include/nvidia-3d-hopper.h @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_3D_HOPPER_H__ +#define __NVIDIA_3D_HOPPER_H__ + +#include "nvidia-3d-types.h" + +void _nv3dInitChannelHopper(Nv3dChannelRec *p3dChannel); + +void _nv3dAssignNv3dTextureHopper( + Nv3dRenderTexInfo info, + Nv3dTexture *tex); + +#endif /* __NVIDIA_3D_HOPPER_H__ */ diff --git a/src/common/unix/nvidia-3d/include/nvidia-3d-kepler.h b/src/common/unix/nvidia-3d/include/nvidia-3d-kepler.h new file mode 100644 index 0000000..59c9347 --- /dev/null +++ b/src/common/unix/nvidia-3d/include/nvidia-3d-kepler.h @@ -0,0 +1,45 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_3D_KEPLER_H__ +#define __NVIDIA_3D_KEPLER_H__ + +#include "nvidia-3d-types.h" + +void _nv3dSetSpaVersionKepler(Nv3dChannelRec *p3dChannel); + +void _nv3dInitChannelKepler(Nv3dChannelRec *p3dChannel); + +void _nv3dUploadDataInlineKepler( + Nv3dChannelRec *p3dChannel, + NvU64 gpuBaseAddress, + size_t offset, + const void *data, + size_t bytes); +void _nv3dBindTexturesKepler( + Nv3dChannelPtr p3dChannel, + int programIndex, + const int *textureBindingIndices); + +#endif /* __NVIDIA_3D_KEPLER__ */ + diff --git a/src/common/unix/nvidia-3d/include/nvidia-3d-maxwell.h b/src/common/unix/nvidia-3d/include/nvidia-3d-maxwell.h new file mode 100644 index 0000000..cf303b9 --- /dev/null +++ b/src/common/unix/nvidia-3d/include/nvidia-3d-maxwell.h @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_3D_MAXWELL_H__ +#define __NVIDIA_3D_MAXWELL_H__ + +#include "nvidia-3d-types.h" + +void _nv3dInitChannelMaxwell(Nv3dChannelRec *p3dChannel); + +void _nv3dAssignNv3dTextureMaxwell( + Nv3dRenderTexInfo info, + Nv3dTexture *tex); + +#endif /* __NVIDIA_3D_MAXWELL__ */ diff --git a/src/common/unix/nvidia-3d/include/nvidia-3d-pascal.h b/src/common/unix/nvidia-3d/include/nvidia-3d-pascal.h new file mode 100644 index 0000000..483545b --- /dev/null +++ b/src/common/unix/nvidia-3d/include/nvidia-3d-pascal.h @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_3D_PASCAL_H__ +#define __NVIDIA_3D_PASCAL_H__ + +#include "nvidia-3d-types.h" + +void _nv3dInitChannelPascal(Nv3dChannelRec *p3dChannel); + +void _nv3dAssignNv3dTexturePascal( + Nv3dRenderTexInfo info, + Nv3dTexture *tex); + +#endif /* __NVIDIA_3D_PASCAL__ */ diff --git a/src/common/unix/nvidia-3d/include/nvidia-3d-surface.h b/src/common/unix/nvidia-3d/include/nvidia-3d-surface.h new file mode 100644 index 0000000..fba2f5a --- /dev/null +++ b/src/common/unix/nvidia-3d/include/nvidia-3d-surface.h @@ -0,0 +1,33 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_3D_SURFACE_H__ +#define __NVIDIA_3D_SURFACE_H__ + +#include "nvidia-3d-types.h" + +void _nv3dAssignSurfaceOffsets( + const Nv3dAllocChannelStateParams *pParams, + Nv3dChannelPtr p3dChannel); + +#endif /* __NVIDIA_3D_SURFACE_H__ */ diff --git a/src/common/unix/nvidia-3d/include/nvidia-3d-turing.h b/src/common/unix/nvidia-3d/include/nvidia-3d-turing.h new file mode 100644 index 0000000..79af88d --- /dev/null +++ b/src/common/unix/nvidia-3d/include/nvidia-3d-turing.h @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_3D_TURING_H__ +#define __NVIDIA_3D_TURING_H__ + +#include "nvidia-3d-types.h" + +void _nv3dInitChannelTuring(Nv3dChannelRec *p3dChannel); +void _nv3dSetVertexStreamEndTuring( + Nv3dChannelPtr p3dChannel, + enum Nv3dVertexAttributeStreamType stream, + const Nv3dVertexAttributeStreamRec *pStream); + +#endif /* __NVIDIA_3D_TURING__ */ diff --git a/src/common/unix/nvidia-3d/include/nvidia-3d-types-priv.h b/src/common/unix/nvidia-3d/include/nvidia-3d-types-priv.h new file mode 100644 index 0000000..a15b035 --- /dev/null +++ b/src/common/unix/nvidia-3d/include/nvidia-3d-types-priv.h @@ -0,0 +1,48 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_3D_TYPES_PRIV_H__ +#define __NVIDIA_3D_TYPES_PRIV_H__ + +#include "nvidia-3d-types.h" + +struct _Nv3dHal { + void (*setSpaVersion) (Nv3dChannelRec *p3dChannel); + void (*initChannel) (Nv3dChannelRec *p3dChannel); + void (*uploadDataInline) (Nv3dChannelRec *p3dChannel, + NvU64 gpuBaseAddress, + size_t offset, + const void *data, + size_t bytes); + void (*setProgramOffset) (Nv3dChannelRec *p3dChannel, + NvU32 stage, + NvU32 offset); + void (*assignNv3dTexture) (Nv3dRenderTexInfo info, + Nv3dTexture *tex); + void (*setVertexStreamEnd) (Nv3dChannelPtr p3dChannel, + enum Nv3dVertexAttributeStreamType stream, + const Nv3dVertexAttributeStreamRec *pStream); +}; + +#endif /* __NVIDIA_3D_TYPES_PRIV_H__ */ + diff --git a/src/common/unix/nvidia-3d/include/nvidia-3d-vertex-arrays.h b/src/common/unix/nvidia-3d/include/nvidia-3d-vertex-arrays.h new file mode 100644 index 0000000..10fdc68 --- /dev/null +++ b/src/common/unix/nvidia-3d/include/nvidia-3d-vertex-arrays.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_3D_VERTEX_ARRAYS_H__ +#define __NVIDIA_3D_VERTEX_ARRAYS_H__ + +#include "nvidia-3d-types.h" + +void _nv3dInitializeStreams( + Nv3dChannelRec *p3dChannel); + +#endif /* __NVIDIA_3D_VERTEX_ARRAYS_H__ */ diff --git a/src/common/unix/nvidia-3d/include/nvidia-3d-volta.h b/src/common/unix/nvidia-3d/include/nvidia-3d-volta.h new file mode 100644 index 0000000..4055abf --- /dev/null +++ b/src/common/unix/nvidia-3d/include/nvidia-3d-volta.h @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_3D_VOLTA_H__ +#define __NVIDIA_3D_VOLTA_H__ + +#include "nvidia-3d-types.h" + +void _nv3dSetProgramOffsetVolta( + Nv3dChannelRec *p3dChannel, + NvU32 stage, + NvU32 offset); + +#endif /* __NVIDIA_3D_VOLTA__ */ + diff --git a/src/common/unix/nvidia-3d/interface/nvidia-3d-color-targets.h b/src/common/unix/nvidia-3d/interface/nvidia-3d-color-targets.h new file mode 100644 index 0000000..5b0be47 --- /dev/null +++ b/src/common/unix/nvidia-3d/interface/nvidia-3d-color-targets.h @@ -0,0 +1,93 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_3D_COLOR_TARGETS_H__ +#define __NVIDIA_3D_COLOR_TARGETS_H__ + +#include "nvidia-3d.h" + +#include +#include + +/* + * This header file defines static inline functions to manage 3D class + * color targets. + */ + +static inline void nv3dSelectColorTarget( + Nv3dChannelPtr p3dChannel, + NvU8 colorTargetIndex) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_CT_SELECT, + NV3D_V(9097, SET_CT_SELECT, TARGET_COUNT, 1) | + NV3D_V(9097, SET_CT_SELECT, TARGET0, colorTargetIndex) | + NV3D_V(9097, SET_CT_SELECT, TARGET1, 0) | + NV3D_V(9097, SET_CT_SELECT, TARGET2, 0) | + NV3D_V(9097, SET_CT_SELECT, TARGET3, 0) | + NV3D_V(9097, SET_CT_SELECT, TARGET4, 0) | + NV3D_V(9097, SET_CT_SELECT, TARGET5, 0) | + NV3D_V(9097, SET_CT_SELECT, TARGET6, 0) | + NV3D_V(9097, SET_CT_SELECT, TARGET7, 0)); +} + +static inline void nv3dSetColorTarget( + Nv3dChannelPtr p3dChannel, + NvU8 colorTargetIndex, + NvU32 surfaceFormat, + NvU64 surfaceGpuAddress, + NvBool blockLinear, + Nv3dBlockLinearLog2GobsPerBlock gobsPerBlock, + NvU32 surfaceWidth, + NvU32 surfaceHeight) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + + const NvU32 memoryInfo = + blockLinear ? + (NV3D_V(9097, SET_COLOR_TARGET_MEMORY, BLOCK_WIDTH, gobsPerBlock.x) | + NV3D_V(9097, SET_COLOR_TARGET_MEMORY, BLOCK_HEIGHT, gobsPerBlock.y) | + NV3D_V(9097, SET_COLOR_TARGET_MEMORY, BLOCK_DEPTH, gobsPerBlock.z) | + NV3D_C(9097, SET_COLOR_TARGET_MEMORY, LAYOUT, BLOCKLINEAR)) : + NV3D_C(9097, SET_COLOR_TARGET_MEMORY, LAYOUT, PITCH); + + if (surfaceFormat == NV9097_SET_COLOR_TARGET_FORMAT_V_DISABLED) { + // Disable this color target. + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, + NV9097_SET_COLOR_TARGET_FORMAT(colorTargetIndex), + NV9097_SET_COLOR_TARGET_FORMAT_V_DISABLED); + return; + } + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, + NV9097_SET_COLOR_TARGET_A(colorTargetIndex), 6); + + nvPushSetMethodDataU64(p, surfaceGpuAddress); + nvPushSetMethodData(p, surfaceWidth); + nvPushSetMethodData(p, surfaceHeight); + nvPushSetMethodData(p, surfaceFormat); + nvPushSetMethodData(p, memoryInfo); +} + +#endif /* __NVIDIA_3D_COLOR_TARGETS_H__ */ diff --git a/src/common/unix/nvidia-3d/interface/nvidia-3d-constant-buffers.h b/src/common/unix/nvidia-3d/interface/nvidia-3d-constant-buffers.h new file mode 100644 index 0000000..3587f7a --- /dev/null +++ b/src/common/unix/nvidia-3d/interface/nvidia-3d-constant-buffers.h @@ -0,0 +1,196 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_3D_CONSTANT_BUFFERS_H__ +#define __NVIDIA_3D_CONSTANT_BUFFERS_H__ + +#include "nvidia-3d.h" + +#include +#include + +/* + * This header file defines static inline functions to manage 3D class + * constant buffers. + */ + + +static inline void nv3dSelectCbAddress( + Nv3dChannelRec *p3dChannel, + NvU64 offset, + NvU32 size) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + + nvAssert(size > 0); + nvAssert(NV_IS_ALIGNED(size, NV3D_MIN_CONSTBUF_ALIGNMENT)); + nvAssert(size <= 65536); + nvAssert(NV_IS_ALIGNED(offset, NV3D_MIN_CONSTBUF_ALIGNMENT)); + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, + NV9097_SET_CONSTANT_BUFFER_SELECTOR_A, 3); + nvPushSetMethodData(p, size); + nvPushSetMethodDataU64(p, offset); +} + +/*! + * Select a constant buffer for binding or updating. + */ +static inline void nv3dSelectCb( + Nv3dChannelRec *p3dChannel, + int constantBufferIndex) +{ + const NvU64 gpuAddress = + nv3dGetConstantBufferGpuAddress(p3dChannel, constantBufferIndex); + + nv3dSelectCbAddress(p3dChannel, gpuAddress, NV3D_CONSTANT_BUFFER_SIZE); +} + +/*! + * Bind the selected Cb to a given slot (or invalidate that slot). + */ +static inline void nv3dBindCb( + Nv3dChannelRec *p3dChannel, + int bindGroup, // XXX TODO: this type should be NVShaderBindGroup + int slot, + NvBool valid) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + + ASSERT_DRF_NUM(9097, _BIND_GROUP_CONSTANT_BUFFER, _SHADER_SLOT, slot); + + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, + NV9097_BIND_GROUP_CONSTANT_BUFFER(bindGroup), + NV3D_V(9097, BIND_GROUP_CONSTANT_BUFFER, VALID, !!valid) | + NV3D_V(9097, BIND_GROUP_CONSTANT_BUFFER, SHADER_SLOT, slot)); +} + +/*! + * Push *only the header* to tell the GPU to "load" constants from the + * pushbuffer. + * + * \param[in] p3dChannel The nvidia-3d channel. + * \param[in] offset The offset in bytes of the start of the + * updates. + * \param[in] dwords Count of dwords to be loaded (after the + * header). + * + * \return An NvPushChannelUnion pointing immediately after the + * header, with enough contiguous space to copy 'dwords' of + * data. + */ +static inline NvPushChannelUnion *nv3dLoadConstantsHeader( + Nv3dChannelRec *p3dChannel, + NvU32 offset, + size_t dwords) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + NvPushChannelUnion *buffer; + + nvAssert((dwords + 1) <= nvPushMaxMethodCount(p)); + + nvPushMethodOneIncr(p, NVA06F_SUBCHANNEL_3D, + NV9097_LOAD_CONSTANT_BUFFER_OFFSET, dwords + 1); + nvPushSetMethodData(p, offset); + + buffer = p->main.buffer; + p->main.buffer += dwords; + + return buffer; +} + +/*! + * Load an array of bytes into a constant buffer at a specified location. + * + * The count must be a multiple of 4 bytes. + * + * \param[in] p3dChannel The nvidia-3d channel. + * \param[in] offset The offset in bytes of the start of the + * updates. + * \param[in] bytes Count of bytes to write. Must be a + * multiple of 4. + * \param[in] values Data to be written. + */ +static inline void nv3dLoadConstants( + Nv3dChannelRec *p3dChannel, + NvU32 offset, + size_t bytes, + const void *values) +{ + const size_t dwords = bytes / 4; + NvPushChannelUnion *buffer; + + nvAssert((bytes & 3) == 0); + + buffer = nv3dLoadConstantsHeader(p3dChannel, offset, dwords); + + nvDmaMoveDWORDS(buffer, values, dwords); +} + +/*! + * Set the current constant buffer's current byte offset, for use with + * nv3dPushConstants(). + */ +static inline void nv3dSetConstantBufferOffset( + Nv3dChannelRec *p3dChannel, + NvU32 offset) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, + NV9097_LOAD_CONSTANT_BUFFER_OFFSET, 1); + nvPushSetMethodData(p, offset); +} + +// Load an array of dwords into a constant buffer at the current location. This +// also advances the constant buffer load offset, so that multiple calls to +// nv3dPushConstants will write to sequential memory addresses. +static inline void nv3dPushConstants( + Nv3dChannelRec *p3dChannel, + size_t bytes, + const void *values) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + const size_t dwords = bytes / 4; + nvAssert((bytes & 3) == 0); + nvAssert(dwords <= nvPushMaxMethodCount(p)); + + nvPushMethodNoIncr(p, NVA06F_SUBCHANNEL_3D, + NV9097_LOAD_CONSTANT_BUFFER(0), dwords); + nvPushInlineData(p, values, dwords); +} + +static inline void nv3dLoadSingleConstant( + Nv3dChannelRec *p3dChannel, + NvU32 offset, + NvU32 value) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, + NV9097_LOAD_CONSTANT_BUFFER_OFFSET, 2); + nvPushSetMethodData(p, offset); + nvPushSetMethodData(p, value); +} + +#endif /* __NVIDIA_3D_CONSTANT_BUFFERS_H__ */ diff --git a/src/common/unix/nvidia-3d/interface/nvidia-3d-imports.h b/src/common/unix/nvidia-3d/interface/nvidia-3d-imports.h new file mode 100644 index 0000000..839a2da --- /dev/null +++ b/src/common/unix/nvidia-3d/interface/nvidia-3d-imports.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * nvidia-3d-imports.h declares functions with nvidia-3d host drivers must + * provide. + */ + +#ifndef __NVIDIA_3D_IMPORTS_H__ +#define __NVIDIA_3D_IMPORTS_H__ + +#include /* size_t */ + +void *nv3dImportAlloc(size_t size); +void nv3dImportFree(void *ptr); +int nv3dImportMemCmp(const void *a, const void *b, size_t size); +void nv3dImportMemSet(void *s, int c, size_t size); +void nv3dImportMemCpy(void *dest, const void *src, size_t size); +void nv3dImportMemMove(void *dest, const void *src, size_t size); + +#endif /* __NVIDIA_3D_IMPORTS_H__ */ diff --git a/src/common/unix/nvidia-3d/interface/nvidia-3d-shader-constants.h b/src/common/unix/nvidia-3d/interface/nvidia-3d-shader-constants.h new file mode 100644 index 0000000..b6e210c --- /dev/null +++ b/src/common/unix/nvidia-3d/interface/nvidia-3d-shader-constants.h @@ -0,0 +1,53 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVIDIA_3D_SHADER_CONSTANTS_H_ +#define _NVIDIA_3D_SHADER_CONSTANTS_H_ + +#if defined(NV3D_BUILD_AS_GLSL) + + #define NV3D_CB_SLOT_FIRST_USER_BINDABLE 0 + +#else + + /* Shaders always use this slot for compiler-emitted constants. This + * assumption is verified at ucode build time. */ + #define NV3D_CB_SLOT_COMPILER 1 + + /* Offset between GLSL slot 0 and hardware slot */ + #define NV3D_CB_SLOT_FIRST_USER_BINDABLE 3 + +#endif + +/* This slot is used for most uniforms/constants defined in each shader */ +#define NV3D_CB_SLOT_MISC1 (NV3D_CB_SLOT_FIRST_USER_BINDABLE + 0) + +/* When needed (Kepler+), shaders always use this constant slot for bindless + * texture handles. */ +#define NV3D_CB_SLOT_BINDLESS_TEXTURE (NV3D_CB_SLOT_FIRST_USER_BINDABLE + 1) + + +/* Matches __GL_PGM_UNUSED_TEXTURE_UNIT */ +#define NV3D_TEX_BINDING_UNUSED 255 + +#endif /* _NVIDIA_3D_SHADER_CONSTANTS_H_ */ diff --git a/src/common/unix/nvidia-3d/interface/nvidia-3d-shaders.h b/src/common/unix/nvidia-3d/interface/nvidia-3d-shaders.h new file mode 100644 index 0000000..3e8c75a --- /dev/null +++ b/src/common/unix/nvidia-3d/interface/nvidia-3d-shaders.h @@ -0,0 +1,69 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_3D_SHADERS_H__ +#define __NVIDIA_3D_SHADERS_H__ + +#include +#include + +// These are used in the "shader type" field below +#define NV3D_SHADER_TYPE_VERTEX NV9097_SET_PIPELINE_SHADER_TYPE_VERTEX +#define NV3D_SHADER_TYPE_PIXEL NV9097_SET_PIPELINE_SHADER_TYPE_PIXEL + +typedef enum { + NV3D_HW_SHADER_STAGE_VERTEX_A = 0, + NV3D_HW_SHADER_STAGE_VERTEX_B, + NV3D_HW_SHADER_STAGE_TESS_CONTROL, + NV3D_HW_SHADER_STAGE_TESS_EVAL, + NV3D_HW_SHADER_STAGE_GEOMETRY, + NV3D_HW_SHADER_STAGE_PIXEL, + NV3D_HW_SHADER_STAGE_COUNT, +} __attribute__ ((__packed__)) Nv3dShaderStage; + +typedef enum { + NV3D_HW_BIND_GROUP_VERTEX = 0, + NV3D_HW_BIND_GROUP_TESS_CONTROL, + NV3D_HW_BIND_GROUP_TESS_EVAL, + NV3D_HW_BIND_GROUP_GEOMETRY, + NV3D_HW_BIND_GROUP_FRAGMENT, + NV3D_HW_BIND_GROUP_LAST = NV3D_HW_BIND_GROUP_FRAGMENT +} __attribute__ ((__packed__)) Nv3dShaderBindGroup; + +typedef struct _nv_program_info { + NvU32 offset; // Start offset relative to program heap + NvU8 registerCount; // From '#.MAX_REG n'+1 + NvU8 type; // Shader type + NvS8 constIndex; // Index into the compiler-generated constant buffer table + + Nv3dShaderStage stage; // Pipeline stage + Nv3dShaderBindGroup bindGroup; // NV3D_HW_BIND_GROUP +} Nv3dProgramInfo; + +typedef struct _nv_shader_const_buf_info { + const NvU32 *data; + NvU32 offset; + NvU32 size; +} Nv3dShaderConstBufInfo; + +#endif // __NVIDIA_3D_SHADERS_H__ diff --git a/src/common/unix/nvidia-3d/interface/nvidia-3d-types.h b/src/common/unix/nvidia-3d/interface/nvidia-3d-types.h new file mode 100644 index 0000000..99cccec --- /dev/null +++ b/src/common/unix/nvidia-3d/interface/nvidia-3d-types.h @@ -0,0 +1,477 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_3D_TYPES_H__ +#define __NVIDIA_3D_TYPES_H__ + + +#include "nvtypes.h" +#include "nvlimits.h" +#include "nvidia-push-methods.h" + +#include "nvidia-3d-shaders.h" + +enum Nv3dBlendOperation { + NV3D_BLEND_OP_CLEAR, + NV3D_BLEND_OP_SRC, + NV3D_BLEND_OP_DST, + NV3D_BLEND_OP_OVER, + NV3D_BLEND_OP_OVER_REVERSE, + NV3D_BLEND_OP_IN, + NV3D_BLEND_OP_IN_REVERSE, + NV3D_BLEND_OP_OUT, + NV3D_BLEND_OP_OUT_REVERSE, + NV3D_BLEND_OP_ATOP, + NV3D_BLEND_OP_ATOP_REVERSE, + NV3D_BLEND_OP_XOR, + NV3D_BLEND_OP_ADD, + NV3D_BLEND_OP_SATURATE, +}; + +// We use two vertex streams: one for static attributes (values that are the +// same for all vertices) and one for dynamic attributes. +enum Nv3dVertexAttributeStreamType { + NV3D_VERTEX_ATTRIBUTE_STREAM_FIRST = 0, + NV3D_VERTEX_ATTRIBUTE_STREAM_STATIC = 0, + NV3D_VERTEX_ATTRIBUTE_STREAM_DYNAMIC = 1, + NV3D_VERTEX_ATTRIBUTE_STREAM_COUNT, +} __attribute__ ((__packed__)); + +/* The data type of a vertex attribute. */ +/* Names of enum Nv3dVertexAttributeDataType members follow + * "NV3D_VERTEX_ATTRIBUTE_DATA_TYPE_{N_elements}_{element_size}_{NUMERICAL_TYPE}" convention + * where {NUMERICAL_TYPE} gives information about NV9097_SET_VERTEX_ATTRIBUTE_A_NUMERICAL_TYPE + */ +enum Nv3dVertexAttributeDataType { + NV3D_VERTEX_ATTRIBUTE_DATA_TYPE_2_32_FLOAT, /* two floats */ + NV3D_VERTEX_ATTRIBUTE_DATA_TYPE_4_32_FLOAT, /* four floats */ + NV3D_VERTEX_ATTRIBUTE_DATA_TYPE_4_16_UNORM, /* four unsigned shorts mapped to floats: [0,65535] => [0.0f,1.0f] */ + NV3D_VERTEX_ATTRIBUTE_DATA_TYPE_4_8_UNORM, /* four unsigned bytes mapped to floats: [0,255] => [0.0f,1.0f] */ + NV3D_VERTEX_ATTRIBUTE_DATA_TYPE_2_16_SSCALED,/* two shorts mapped to floats: [-32768,32767] => [-32768.0f,32767.0f] */ +} __attribute__ ((__packed__)); + +/* The possible vertex attributes. */ +enum Nv3dVertexAttributeType { + NV3D_VERTEX_ATTRIBUTE_POSITION = 0, + NV3D_VERTEX_ATTRIBUTE_VERTEX_WEIGHT = 1, + NV3D_VERTEX_ATTRIBUTE_NORMAL = 2, + NV3D_VERTEX_ATTRIBUTE_COLOR = 3, + NV3D_VERTEX_ATTRIBUTE_SECONDARY_COLOR = 4, + NV3D_VERTEX_ATTRIBUTE_FOG_COORD = 5, + NV3D_VERTEX_ATTRIBUTE_POINT_SIZE = 6, + NV3D_VERTEX_ATTRIBUTE_MATRIX_INDEX = 7, + NV3D_VERTEX_ATTRIBUTE_TEXCOORD0 = 8, + NV3D_VERTEX_ATTRIBUTE_TEXCOORD1 = 9, + NV3D_VERTEX_ATTRIBUTE_TEXCOORD2 = 10, + NV3D_VERTEX_ATTRIBUTE_TEXCOORD3 = 11, + /* + * The _END enum value is used as a sentinel to terminate arrays of + * Nv3dVertexAttributeInfoRec (see Nv3dVertexAttributeInfoRec, below). + */ + NV3D_VERTEX_ATTRIBUTE_END = 255, +} __attribute__ ((__packed__)); + +/* + * Nv3dVertexAttributeInfoRec stores the triplet attribute, stream type, and + * data type. Arrays of Nv3dVertexAttributeInfoRec are used to describe vertex + * attribute configurations to FermiSetupVertexArrays(). + * + * The NV3D_ATTRIB_ENTRY() and NV3D_ATTRIB_END macros can be used to make + * Nv3dVertexAttributeInfoRec assignment more succinct. E.g., + * + * Nv3dVertexAttributeInfoRec attribs[] = { + * NV3D_ATTRIB_ENTRY(COLOR, STATIC, 4UB), + * NV3D_ATTRIB_END, + * }; + */ +typedef struct _Nv3dVertexAttributeInfoRec { + enum Nv3dVertexAttributeType attributeType; + enum Nv3dVertexAttributeStreamType streamType; + enum Nv3dVertexAttributeDataType dataType; +} Nv3dVertexAttributeInfoRec; + +#define NV3D_ATTRIB_TYPE_ENTRY(_i, _streamType, _dataType) \ + (Nv3dVertexAttributeInfoRec) \ + { .attributeType = _i, \ + .streamType = NV3D_VERTEX_ATTRIBUTE_STREAM_##_streamType, \ + .dataType = NV3D_VERTEX_ATTRIBUTE_DATA_TYPE_##_dataType } + +#define NV3D_ATTRIB_ENTRY(_attribType, _streamType, _dataType) \ + NV3D_ATTRIB_TYPE_ENTRY(NV3D_VERTEX_ATTRIBUTE_##_attribType, _streamType, _dataType) + +#define NV3D_ATTRIB_END \ + (Nv3dVertexAttributeInfoRec) \ + { .attributeType = NV3D_VERTEX_ATTRIBUTE_END } + +/* + * When built into kernel code, define Nv3dFloat to be an NvU32: it is the same + * size as a float, but the caller is responsible for storing float bit patterns + * to Nv3dFloat. + */ +ct_assert(sizeof(float) == sizeof(NvU32)); +#if NV_PUSH_ALLOW_FLOAT +typedef float Nv3dFloat; +#else +typedef NvU32 Nv3dFloat; +#endif + +static inline void nv3dPushFloat(NvPushChannelPtr p, const Nv3dFloat data) +{ +#if NV_PUSH_ALLOW_FLOAT + nvPushSetMethodDataF(p, data); +#else + nvPushSetMethodData(p, data); +#endif +} + +/* + * Vertex attribute data types. Each of these types represents a different way + * of specifying vertex attribute data. + */ +typedef struct __attribute__((packed)) { + Nv3dFloat x, y; +} Nv3dVertexAttrib2F; + +typedef struct __attribute__((packed)) { + NvU32 x, y; +} Nv3dVertexAttrib2U; + +typedef struct __attribute__((packed)) { + NvS32 x, y; +} Nv3dVertexAttrib2S; + +typedef struct __attribute__((packed)) { + Nv3dFloat x, y, z; +} Nv3dVertexAttrib3F; + +typedef struct __attribute__((packed)) { + NvU32 x, y, z; +} Nv3dVertexAttrib3U; + +typedef struct __attribute__((packed)) { + Nv3dFloat x, y, z, w; +} Nv3dVertexAttrib4F; + +typedef struct __attribute__((packed)) { + NvU16 x, y, z, w; +} Nv3dVertexAttrib4US; + +typedef struct __attribute__((packed)) { + NvU8 x, y, z, w; +} Nv3dVertexAttrib4UB; + +typedef struct { + NvU32 xyzw; +} Nv3dVertexAttrib4UBPacked; + +typedef struct __attribute__((packed)) { + NvU32 xy; +} Nv3dVertexAttrib2SPacked; + +// List of component sizes used for the internal representation of a +// texture header +enum Nv3dTexHeaderComponentSizes { + NV3D_TEXHEAD_A8B8G8R8, + NV3D_TEXHEAD_A2B10G10R10, + NV3D_TEXHEAD_B5G6R5, + NV3D_TEXHEAD_A1B5G5R5, + NV3D_TEXHEAD_R8, + NV3D_TEXHEAD_R32, + NV3D_TEXHEAD_R16, + NV3D_TEXHEAD_G8R8, + NV3D_TEXHEAD_R16G16B16A16, + NV3D_TEXHEAD_R32G32B32A32, + NV3D_TEXHEAD_Y8_VIDEO +}; + +// List of component sources used for the internal representation of a +// texture header +enum Nv3dTexHeaderSource { + NV3D_TEXHEAD_IN_A, + NV3D_TEXHEAD_IN_R, + NV3D_TEXHEAD_IN_G, + NV3D_TEXHEAD_IN_B, + NV3D_TEXHEAD_IN_ZERO, + NV3D_TEXHEAD_IN_ONE_FLOAT +}; + +// List of component data types used for the internal representation of +// a texture header +enum Nv3dTexHeaderDataType { + NV3D_TEXHEAD_NUM_UNORM, + NV3D_TEXHEAD_NUM_UINT, + NV3D_TEXHEAD_NUM_FLOAT, + NV3D_TEXHEAD_NUM_SNORM, + NV3D_TEXHEAD_NUM_SINT +}; + +enum Nv3dTexHeaderRepeatType { + NV3D_TEXHEAD_REPEAT_TYPE_NONE, + NV3D_TEXHEAD_REPEAT_TYPE_NORMAL, + NV3D_TEXHEAD_REPEAT_TYPE_PAD, + NV3D_TEXHEAD_REPEAT_TYPE_REFLECT +}; + +enum Nv3dTextureFilterType{ + NV3D_TEXHEAD_FILTER_TYPE_NEAREST, + NV3D_TEXHEAD_FILTER_TYPE_LINEAR, + NV3D_TEXHEAD_FILTER_TYPE_ANISO_2X, + NV3D_TEXHEAD_FILTER_TYPE_ANISO_4X, + NV3D_TEXHEAD_FILTER_TYPE_ANISO_8X, + NV3D_TEXHEAD_FILTER_TYPE_ANISO_16X +}; + +enum Nv3dTexType { + NV3D_TEX_TYPE_ONE_D, + NV3D_TEX_TYPE_ONE_D_BUFFER, + NV3D_TEX_TYPE_TWO_D_PITCH, + NV3D_TEX_TYPE_TWO_D_BLOCKLINEAR, +}; + +typedef struct { + NvU32 x; + NvU32 y; + NvU32 z; +} Nv3dBlockLinearLog2GobsPerBlock; + +// Intermediate representation of a texture header +typedef struct { + NvBool error; + + enum Nv3dTexHeaderComponentSizes sizes; + + // Currently, we always use the same data type for all components. + enum Nv3dTexHeaderDataType dataType; + + struct { + enum Nv3dTexHeaderSource x; + enum Nv3dTexHeaderSource y; + enum Nv3dTexHeaderSource z; + enum Nv3dTexHeaderSource w; + } source; + + enum Nv3dTexType texType; + + NvU64 offset; + NvBool normalizedCoords; + enum Nv3dTexHeaderRepeatType repeatType; + enum Nv3dTextureFilterType filtering; + int pitch; + int width; + int height; + + Nv3dBlockLinearLog2GobsPerBlock log2GobsPerBlock; +} Nv3dRenderTexInfo; + +typedef NvU32 Nv3dTexSampler[8]; +typedef NvU32 Nv3dTexHeader[8]; + +// HW representation of a texture header +typedef struct { + Nv3dTexSampler samp; + Nv3dTexHeader head; +} Nv3dTexture; + +#define NV3D_CONSTANT_BUFFER_SIZE (4096 * 4) + +#define NV3D_TEXTURE_INDEX_INVALID (-1) + +#define NV3D_VERTEX_ATTRIBUTE_STREAM_SIZE (64 * 1024) + +/* + * The constant buffer alignment constraints, specifically for the methods: + * + * NV*97_SET_CONSTANT_BUFFER_SELECTOR_A_SIZE + * NV*97_SET_CONSTANT_BUFFER_SELECTOR_C_ADDRESS_LOWER + * + * have evolved over GPU architectures: + * + * kepler maxwell pascal volta turing + * SIZE 256 16 16 16 16 + * ADDRESS 256 256 256 256 64 + * + * But, using an alignment of 256 all the time is simpler. + */ +#define NV3D_MIN_CONSTBUF_ALIGNMENT 256 + +/* + * 3D engine pitch alignment requirements for texture surface. + */ +#define NV3D_TEXTURE_PITCH_ALIGNMENT 256 + +typedef struct _Nv3dStreamSurfaceRec { + NvU64 gpuAddress; + NvU64 size; +} Nv3dStreamSurfaceRec; + +typedef struct _Nv3dVertexAttributeStreamRec { + // Current GPU address within the stream. + NvU64 current; + // Terminating GPU address within the stream. + NvU64 end; + // Number of bytes per vertex. + NvU32 stride; + // Index of the next vertex to be launched. + int nextLaunch; +} Nv3dVertexAttributeStreamRec; + +typedef struct _Nv3dHal Nv3dHal; + +typedef struct _Nv3dDeviceCapsRec { + NvU32 hasSetBindlessTexture :1; /* Supports SetBindlessTexture method */ + NvU32 hasProgramRegion :1; + + NvU32 maxDim; /* + * Maximum width or height of the + * texture surface in pixels. + */ +} Nv3dDeviceCapsRec, *Nv3dDeviceCapsPtr; + +typedef struct _Nv3dDeviceSpaVersionRec { + NvU16 major; + NvU16 minor; +} Nv3dDeviceSpaVersionRec; + +/* + * Enum for each compiled shader version. + */ +enum Nv3dShaderArch { + NV3D_SHADER_ARCH_MAXWELL, + NV3D_SHADER_ARCH_PASCAL, + NV3D_SHADER_ARCH_VOLTA, + NV3D_SHADER_ARCH_TURING, + NV3D_SHADER_ARCH_AMPERE, + NV3D_SHADER_ARCH_COUNT, +}; + +typedef struct _Nv3dDeviceRec { + + NvPushDevicePtr pPushDevice; + Nv3dDeviceCapsRec caps; + NvU32 classNumber; + enum Nv3dShaderArch shaderArch; + + Nv3dDeviceSpaVersionRec spaVersion; + + NvU32 maxThreadsPerWarp; + NvU32 maxWarps; + + const Nv3dHal *hal; + +} Nv3dDeviceRec, *Nv3dDevicePtr; + +typedef struct _Nv3dChannelProgramsRec { + /* + * An array of program descriptors, and the number of elements + * in the array. + */ + size_t num; + const Nv3dProgramInfo *info; + + size_t maxLocalBytes; + size_t maxStackBytes; + + /* + * The shader program code segment. + * + * The size is in bytes. + */ + struct { + size_t decompressedSize; + const unsigned char *compressedStart; + const unsigned char *compressedEnd; + } code; + + /* + * The constant buffers generated by the compiler for use with the above + * code segment. + * + * 'size' is the total size of the surface to allocate, in bytes. + * 'sizeAlign' is the minimum alignment required by the hardware for each + * particular constant buffer. (Although we may only have + * N bytes of data to upload for each constant buffer, that + * size should be padded out with zeroes to a multiple of this + * value.) + * 'count' is the number of entries in the 'info' array. + * 'info' is a pointer to an array of Nv3dShaderConstBufInfo entries. + */ + struct { + size_t size; + NvU32 sizeAlign; + NvU32 count; + const Nv3dShaderConstBufInfo *info; + } constants; +} Nv3dChannelProgramsRec; + +typedef struct _Nv3dChannelRec { + + Nv3dDevicePtr p3dDevice; + NvPushChannelPtr pPushChannel; + + NvU32 handle[NV_MAX_SUBDEVICES]; + NvU16 numTextures; + NvU16 numTextureBindings; + + Nv3dVertexAttributeStreamRec + vertexStreams[NV3D_VERTEX_ATTRIBUTE_STREAM_COUNT]; + + /* + * Begin / end state. ~0 if outside begin/end, or NV9097_BEGIN_OP_* if + * inside. + */ + NvU32 currentPrimitiveMode; + + Nv3dChannelProgramsRec programs; + int currentProgramIndex[NV3D_HW_SHADER_STAGE_COUNT]; + NvU64 programLocalMemorySize; + + NvBool hasFrameBoundaries; + + struct { + NvU32 handle[NV_MAX_SUBDEVICES]; + NvU64 gpuAddress; + NvU64 programOffset; + NvU64 programConstantsOffset; + NvU64 programLocalMemoryOffset; + NvU64 textureOffset; + NvU64 bindlessTextureConstantBufferOffset; + NvU64 constantBufferOffset; + NvU64 vertexStreamOffset[NV3D_VERTEX_ATTRIBUTE_STREAM_COUNT]; + NvU64 totalSize; + } surface; + +} Nv3dChannelRec, *Nv3dChannelPtr; + +typedef struct { + Nv3dFloat red; + Nv3dFloat green; + Nv3dFloat blue; + Nv3dFloat alpha; +} Nv3dColor; + +typedef struct { + NvU32 blendFactorSrc; /* NV9097_SET_BLEND_COLOR/ALPHA_SOURCE_COEFF_ */ + NvU32 blendFactorDst; /* NV9097_SET_BLEND_COLOR/ALPHA_DEST_COEFF_ */ + NvU32 blendEquation; /* NV9097_SET_BLEND_COLOR/ALPHA_OP_ */ +} Nv3dBlendState; +#endif /* __NVIDIA_3D_TYPES_H__ */ diff --git a/src/common/unix/nvidia-3d/interface/nvidia-3d-utils.h b/src/common/unix/nvidia-3d/interface/nvidia-3d-utils.h new file mode 100644 index 0000000..6b95994 --- /dev/null +++ b/src/common/unix/nvidia-3d/interface/nvidia-3d-utils.h @@ -0,0 +1,104 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_3D_UTILS_H__ +#define __NVIDIA_3D_UTILS_H__ + +#include "nvidia-3d.h" + +#include +#include + +static inline void nv3dSetSurfaceClip( + Nv3dChannelRec *p3dChannel, + NvS16 x, + NvS16 y, + NvU16 w, + NvU16 h) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, + NV9097_SET_SURFACE_CLIP_HORIZONTAL, 2); + nvPushSetMethodData(p, + NV3D_V(9097, SET_SURFACE_CLIP_HORIZONTAL, X, x) | + NV3D_V(9097, SET_SURFACE_CLIP_HORIZONTAL, WIDTH, w)); + nvPushSetMethodData(p, + NV3D_V(9097, SET_SURFACE_CLIP_VERTICAL, Y, y) | + NV3D_V(9097, SET_SURFACE_CLIP_VERTICAL, HEIGHT, h)); +} + +static inline void nv3dClearSurface( + Nv3dChannelRec *p3dChannel, + const NvU32 clearColor[4], + NvU16 x, + NvU16 y, + NvU16 w, + NvU16 h) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_COLOR_CLEAR_VALUE(0), 4); + nvPushSetMethodData(p, clearColor[0]); + nvPushSetMethodData(p, clearColor[1]); + nvPushSetMethodData(p, clearColor[2]); + nvPushSetMethodData(p, clearColor[3]); + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_CLEAR_RECT_HORIZONTAL, 2); + nvPushSetMethodData(p, + NV3D_V(9097, SET_CLEAR_RECT_HORIZONTAL, XMIN, x) | + NV3D_V(9097, SET_CLEAR_RECT_HORIZONTAL, XMAX, x + w)); + nvPushSetMethodData(p, + NV3D_V(9097, SET_CLEAR_RECT_VERTICAL, YMIN, y) | + NV3D_V(9097, SET_CLEAR_RECT_VERTICAL, YMAX, y + h)); + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, NV9097_CLEAR_SURFACE, + NV3D_C(9097, CLEAR_SURFACE, R_ENABLE, TRUE) | + NV3D_C(9097, CLEAR_SURFACE, G_ENABLE, TRUE) | + NV3D_C(9097, CLEAR_SURFACE, B_ENABLE, TRUE) | + NV3D_C(9097, CLEAR_SURFACE, A_ENABLE, TRUE)); +} + +static inline void nv3dVasBegin( + Nv3dChannelRec *p3dChannel, + NvU32 mode) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + + nvAssert(p3dChannel->currentPrimitiveMode == ~0); + + p3dChannel->currentPrimitiveMode = mode; + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, NV9097_BEGIN, mode); +} + +static inline void nv3dVasEnd( + Nv3dChannelRec *p3dChannel) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + + nvAssert(p3dChannel->currentPrimitiveMode != ~0); + + p3dChannel->currentPrimitiveMode = ~0; + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, NV9097_END, 0); +} + +#endif /* __NVIDIA_3D_UTILS_H__ */ diff --git a/src/common/unix/nvidia-3d/interface/nvidia-3d.h b/src/common/unix/nvidia-3d/interface/nvidia-3d.h new file mode 100644 index 0000000..cdac165 --- /dev/null +++ b/src/common/unix/nvidia-3d/interface/nvidia-3d.h @@ -0,0 +1,296 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * The nvidia-3d library provides utility code for programming a 3D + * object. + */ + +#ifndef __NVIDIA_3D_H__ +#define __NVIDIA_3D_H__ + +#include "nvtypes.h" +#include "nvmisc.h" /* DRF_DEF, et al */ +#include "nvlimits.h" /* NV_MAX_SUBDEVICES */ + +#include "nvidia-push-types.h" +#include "nvidia-3d-types.h" + +#define NV3D_C(d, r, f, c) DRF_DEF( d, _ ## r, _ ## f, _ ## c) +#define NV3D_V(d, r, f, v) DRF_NUM( d, _ ## r, _ ## f, (NvU32)(v) ) + +/* + * Allocate and free an Nv3dDeviceRec + */ +typedef struct _Nv3dAllocDeviceParams { + NvPushDevicePtr pPushDevice; +} Nv3dAllocDeviceParams; + +NvBool nv3dAllocDevice( + const Nv3dAllocDeviceParams *pParams, + Nv3dDevicePtr p3dDevice); + +void nv3dFreeDevice( + Nv3dDevicePtr p3dDevice); + +/* + * Allocate and free an Nv3dChannelRec data structure. + * + * Note that all pointers provided in this parameter structure are + * cached in the Nv3dChannelRec. They must remain valid from + * nv3dAllocChannelState() until the corresponding + * nv3dFreeChannelState() call. + */ +typedef struct _Nv3dAllocChannelStateParams { + /* + * The Nv3dDeviceRec to use with this channel. + */ + Nv3dDevicePtr p3dDevice; + + /* + * The number of texture headers/samplers nvidia-3d should + * allocate. + */ + NvU16 numTextures; + + /* + * The number of general purpose constant buffers nvidia-3d should + * allocate. + */ + NvU16 numConstantBuffers; + + /* + * The number of texture bindings. + */ + NvU16 numTextureBindings; + + /* + * Whether the host driver renders in terms of frames, or, like the X + * driver, renders directly to the front buffer. On >= Pascal, the pipe + * needs to be explicitly flushed at the end of a frame. + */ + NvBool hasFrameBoundaries; + +} Nv3dAllocChannelStateParams; + +NvBool nv3dAllocChannelState( + const Nv3dAllocChannelStateParams *pParams, + Nv3dChannelPtr p3dChannel); + +void nv3dFreeChannelState( + Nv3dChannelPtr p3dChannel); + + +/* + * Allocate and free the RM object for an Nv3dChannelRec. + */ +typedef struct _Nv3dAllocChannelObjectParams { + NvPushChannelPtr pPushChannel; + NvU32 handle[NV_MAX_SUBDEVICES]; +} Nv3dAllocChannelObjectParams; + +NvBool nv3dAllocChannelObject( + const Nv3dAllocChannelObjectParams *pParams, + Nv3dChannelPtr p3dChannel); + +void nv3dFreeChannelObject( + Nv3dChannelPtr p3dChannel); + + +/* + * Allocate and free the surface needed by the Nv3dChannelRec. + */ +NvBool nv3dAllocChannelSurface(Nv3dChannelPtr p3dChannel); + +void nv3dFreeChannelSurface(Nv3dChannelPtr p3dChannel); + + +/* + * Once the Nv3dChannelRec is allocated, and the objects and surface + * for it are allocated, nv3dInitChannel() is used to initialize the + * graphics engine and make it ready to use. + */ +NvBool nv3dInitChannel(Nv3dChannelPtr p3dChannel); + + +/* + * Return the offset or GPU address of the specified item within the + * Nv3dChannelRec's surface. + */ + +static inline NvU64 nv3dGetTextureOffset( + const Nv3dChannelRec *p3dChannel, + NvU32 textureIndex) +{ + const NvU64 offset = p3dChannel->surface.textureOffset; + + return offset + (sizeof(Nv3dTexture) * textureIndex); +} + +static inline NvU64 nv3dGetTextureGpuAddress( + const Nv3dChannelRec *p3dChannel, + NvU32 textureIndex) +{ + return p3dChannel->surface.gpuAddress + + nv3dGetTextureOffset(p3dChannel, textureIndex); +} + +static inline NvU64 nv3dGetConstantBufferOffset( + const Nv3dChannelRec *p3dChannel, + NvU32 constantBufferIndex) +{ + const NvU64 offset = p3dChannel->surface.constantBufferOffset; + + return offset + (NV3D_CONSTANT_BUFFER_SIZE * constantBufferIndex); +} + +static inline NvU64 nv3dGetConstantBufferGpuAddress( + const Nv3dChannelRec *p3dChannel, + NvU32 constantBufferIndex) +{ + return p3dChannel->surface.gpuAddress + + nv3dGetConstantBufferOffset(p3dChannel, constantBufferIndex); +} + +static inline NvU64 nv3dGetProgramOffset( + const Nv3dChannelRec *p3dChannel) +{ + return p3dChannel->surface.programOffset; +} + +static inline NvU64 nv3dGetProgramGpuAddress( + const Nv3dChannelRec *p3dChannel) +{ + return p3dChannel->surface.gpuAddress + nv3dGetProgramOffset(p3dChannel); +} + +static inline NvU64 nv3dGetProgramConstantsOffset( + const Nv3dChannelRec *p3dChannel) +{ + return p3dChannel->surface.programConstantsOffset; +} + +static inline NvU64 nv3dGetProgramConstantsGpuAddress( + const Nv3dChannelRec *p3dChannel) +{ + return p3dChannel->surface.gpuAddress + + nv3dGetProgramConstantsOffset(p3dChannel); +} + +static inline NvU64 nv3dGetProgramLocalMemoryOffset( + const Nv3dChannelRec *p3dChannel) +{ + return p3dChannel->surface.programLocalMemoryOffset; +} + +static inline NvU64 nv3dGetProgramLocalMemoryGpuAddress( + const Nv3dChannelRec *p3dChannel) +{ + return p3dChannel->surface.gpuAddress + + nv3dGetProgramLocalMemoryOffset(p3dChannel); +} + +static inline NvU64 nv3dGetBindlessTextureConstantBufferOffset( + const Nv3dChannelRec *p3dChannel) +{ + return p3dChannel->surface.bindlessTextureConstantBufferOffset; +} + +static inline NvU64 nv3dGetBindlessTextureConstantBufferGpuAddress( + const Nv3dChannelRec *p3dChannel) +{ + return p3dChannel->surface.gpuAddress + + nv3dGetBindlessTextureConstantBufferOffset(p3dChannel); +} + +static inline NvU64 nv3dGetVertexAttributestreamOffset( + const Nv3dChannelRec *p3dChannel, + enum Nv3dVertexAttributeStreamType stream) +{ + return p3dChannel->surface.vertexStreamOffset[stream]; +} + +static inline NvU64 nv3dGetVertexAttributestreamGpuAddress( + const Nv3dChannelRec *p3dChannel, + enum Nv3dVertexAttributeStreamType stream) +{ + return p3dChannel->surface.gpuAddress + + nv3dGetVertexAttributestreamOffset(p3dChannel, stream); +} + +void nv3dUploadDataInline( + Nv3dChannelRec *p3dChannel, + NvU64 gpuBaseAddress, + size_t offset, + const void *data, + size_t bytes); + +void nv3dClearProgramCache( + Nv3dChannelRec *p3dChannel); + +void nv3dLoadProgram( + Nv3dChannelRec *p3dChannel, + int programIndex); + +void nv3dLoadTextures( + Nv3dChannelRec *p3dChannel, + int firstTextureIndex, + const Nv3dRenderTexInfo *texInfo, + int numTexures); + +void nv3dBindTextures( + Nv3dChannelPtr p3dChannel, + int programIndex, + const int *textureBindingIndices); + +void nv3dSetBlendColorCoefficients( + Nv3dChannelPtr p3dChannel, + enum Nv3dBlendOperation op, + NvBool forceNoDstAlphaBits, + NvBool dualSourceBlending); + +void nv3dSetBlend( + Nv3dChannelPtr p3dChannel, + const Nv3dBlendState *blendStateColor, + const Nv3dBlendState *blendStateAlpha, + const Nv3dColor *blendColor); + +int nv3dVasSetup( + Nv3dChannelRec *p3dChannel, + const Nv3dVertexAttributeInfoRec *attribs, + const Nv3dStreamSurfaceRec *pSurf); + +void nv3dVasSelectCbForVertexData( + Nv3dChannelRec *p3dChannel); + +void nv3dVasDrawInlineVerts( + Nv3dChannelRec *p3dChannel, + const void *data, + int numVerts); + +NvBool nv3dVasMakeRoom( + Nv3dChannelRec *p3dChannel, + NvU32 pendingVerts, + NvU32 moreVerts); + +#endif /* __NVIDIA_3D_H__ */ diff --git a/src/common/unix/nvidia-3d/src/nvidia-3d-core.c b/src/common/unix/nvidia-3d/src/nvidia-3d-core.c new file mode 100644 index 0000000..0540e32 --- /dev/null +++ b/src/common/unix/nvidia-3d/src/nvidia-3d-core.c @@ -0,0 +1,162 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-3d.h" +#include "nvidia-3d-types-priv.h" +#include "nvos.h" +#include "nvidia-3d-fermi.h" +#include "nvidia-3d-kepler.h" +#include "nvidia-push-utils.h" + +NvBool nv3dAllocChannelObject( + const Nv3dAllocChannelObjectParams *pParams, + Nv3dChannelPtr p3dChannel) +{ + NvPushChannelPtr pPushChannel = pParams->pPushChannel; + NvPushDevicePtr pPushDevice = pPushChannel->pDevice; + const NvU32 classNumber = p3dChannel->p3dDevice->classNumber; + const NvU32 numChannels = + pPushDevice->clientSli ? pPushDevice->numSubDevices : 1; + int sd; + + /* + * nv3dAllocChannel() should have been called to assign p3dDevice. + */ + nvAssert(p3dChannel->p3dDevice != NULL); + nvAssert(p3dChannel->p3dDevice->pPushDevice == + pParams->pPushChannel->pDevice); + + for (sd = 0; sd < numChannels; sd++) { + + if (nvPushIsAModel(pPushDevice)) { + nvAssert(sd == 0); + } else { + const NvPushImports *pImports = pPushDevice->pImports; + nvAssert(pPushChannel->channelHandle[sd] != 0); + nvAssert(pParams->handle[sd] != 0); + NvU32 ret = pImports->rmApiAlloc(pPushDevice, + pPushChannel->channelHandle[sd], + pParams->handle[sd], + classNumber, + NULL); + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + } + + p3dChannel->handle[sd] = pParams->handle[sd]; + } + + p3dChannel->pPushChannel = pPushChannel; + + return TRUE; +} + +void nv3dFreeChannelObject(Nv3dChannelPtr p3dChannel) +{ + int sd; + + p3dChannel->pPushChannel = NULL; + + // No need to actually free the object here. It gets destroyed during + // channel teardown. + for (sd = 0; sd < ARRAY_LEN(p3dChannel->handle); sd++) { + p3dChannel->handle[sd] = 0; + } +} + +void nv3dUploadDataInline( + Nv3dChannelRec *p3dChannel, + NvU64 gpuBaseAddress, + size_t offset, + const void *data, + size_t bytes) +{ + const Nv3dHal *pHal = p3dChannel->p3dDevice->hal; + + pHal->uploadDataInline(p3dChannel, gpuBaseAddress, offset, data, bytes); +} + +void nv3dClearProgramCache(Nv3dChannelRec *p3dChannel) +{ + Nv3dShaderStage stage; + + for (stage = 0; + stage < ARRAY_LEN(p3dChannel->currentProgramIndex); + stage++) { + p3dChannel->currentProgramIndex[stage] = -1; + } +} + +void nv3dLoadTextures( + Nv3dChannelRec *p3dChannel, + int firstTex, + const Nv3dRenderTexInfo *texInfo, + int numTex) +{ + /* Limit number of texture/samplers on the stack to 4 (256 bytes) */ +#define MAX_TEX_CHUNK 4 + Nv3dTexture textures[MAX_TEX_CHUNK]; + const Nv3dHal *pHal = p3dChannel->p3dDevice->hal; + const NvU64 gpuBaseAddress = nv3dGetTextureGpuAddress(p3dChannel, 0); + + nvAssert(numTex >= 1); + + // Invalidate the texture/sampler caches. This will cause a wait for idle + // if there's rendering still in progress. This is necessary in case the + // texture parameters we're about to overwrite are in use. + _nv3dInvalidateTexturesFermi(p3dChannel); + + while (numTex) { + const NvU32 chunkNumTex = NV_MIN(numTex, MAX_TEX_CHUNK); + const size_t startOffset = sizeof(Nv3dTexture) * firstTex; + const size_t bytes = sizeof(Nv3dTexture) * chunkNumTex; + int i; + + NVMISC_MEMSET(textures, 0, sizeof(textures)); + + nvAssert(firstTex + numTex <= p3dChannel->numTextures); + + // Write texture header to HW format + for (i = 0; i < chunkNumTex; i++) { + pHal->assignNv3dTexture(texInfo[i], &textures[i]); + } + + nv3dUploadDataInline(p3dChannel, gpuBaseAddress, startOffset, + textures, bytes); + + numTex -= chunkNumTex; + firstTex += chunkNumTex; + texInfo += chunkNumTex; + } +} + +void nv3dBindTextures( + Nv3dChannelPtr p3dChannel, + int programIndex, + const int *textureBindingIndices) +{ + nvAssert(programIndex < p3dChannel->programs.num); + + _nv3dBindTexturesKepler(p3dChannel, programIndex, textureBindingIndices); +} diff --git a/src/common/unix/nvidia-3d/src/nvidia-3d-fermi.c b/src/common/unix/nvidia-3d/src/nvidia-3d-fermi.c new file mode 100644 index 0000000..6d42f69 --- /dev/null +++ b/src/common/unix/nvidia-3d/src/nvidia-3d-fermi.c @@ -0,0 +1,557 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-3d-types-priv.h" +#include "nvidia-3d-fermi.h" +#include "nvidia-3d.h" +#include "nvidia-3d-imports.h" +#include "nvidia-3d-constant-buffers.h" +#include "nvidia-3d-shader-constants.h" +#include "nvidia-3d-vertex-arrays.h" +#include "nvidia-push-utils.h" /* nvPushSetObject */ + +#include +#include + +#include + +#if NV_PUSH_ALLOW_FLOAT + #define NV3D_FLOAT_ONE (1.00f) +#else + #define NV3D_FLOAT_ONE 0x3F800000 /* 1.00f */ +#endif + +static void *DecompressUsingXz( + const Nv3dChannelRec *p3dChannel, + const void *compressedData, + size_t compressedSize, + size_t decompressedSize) +{ + NvPushDevicePtr pPushDevice = p3dChannel->p3dDevice->pPushDevice; + const NvPushImports *pImports = pPushDevice->pImports; + void *decompressedData = nv3dImportAlloc(decompressedSize); + struct xz_dec *xzState; + enum xz_ret ret; + + struct xz_buf xzBuf = { + .in = compressedData, + .in_pos = 0, + .in_size = compressedSize, + .out = decompressedData, + .out_pos = 0, + .out_size = decompressedSize, + }; + + if (decompressedData == NULL) { + return NULL; + } + + xz_crc32_init(); + + xzState = xz_dec_init(XZ_SINGLE, 0); + + if (xzState == NULL) { + pImports->logError(pPushDevice, + "Failed to initialize xz decompression."); + goto fail; + } + + ret = xz_dec_run(xzState, &xzBuf); + + xz_dec_end(xzState); + + if (ret != XZ_STREAM_END) { + pImports->logError(pPushDevice, "Failed to decompress xz data."); + goto fail; + } + + return decompressedData; + +fail: + nv3dImportFree(decompressedData); + return NULL; +} + +static void *DecompressPrograms(const Nv3dChannelRec *p3dChannel) +{ + const Nv3dChannelProgramsRec *pPrograms = &p3dChannel->programs; + const size_t compressedSize = + pPrograms->code.compressedEnd - pPrograms->code.compressedStart; + + nvAssert(pPrograms->code.compressedEnd > pPrograms->code.compressedStart); + + return DecompressUsingXz(p3dChannel, + pPrograms->code.compressedStart, + compressedSize, + pPrograms->code.decompressedSize); +} + +/* + * This function attempts to upload the precompiled shaders to the GPU through + * a temporary CPU mapping. + * Failure of this function is not fatal -- we can fall back to uploading + * through the pushbuffer. + */ +static NvBool UploadPrograms(Nv3dChannelPtr p3dChannel, const void *programCode) +{ + NvPushDevicePtr pPushDevice = p3dChannel->p3dDevice->pPushDevice; + const NvPushImports *pImports = pPushDevice->pImports; + const size_t size = p3dChannel->programs.code.decompressedSize; + NvU32 sd; + + for (sd = 0; sd < pPushDevice->numSubDevices; sd++) { + NvU32 status; + void *ptr; + const NvU32 hMemory = pPushDevice->clientSli ? + p3dChannel->surface.handle[sd] : + p3dChannel->surface.handle[0]; + + status = pImports->rmApiMapMemory(pPushDevice, + pPushDevice->subDevice[sd].handle, + hMemory, + p3dChannel->surface.programOffset, + size, + &ptr, + 0); + if (status != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + nvAssert((size % 4) == 0); + nvDmaMoveDWORDS(ptr, programCode, size / 4); + + status = pImports->rmApiUnmapMemory(pPushDevice, + pPushDevice->subDevice[sd].handle, + hMemory, + ptr, + 0); + nvAssert(status == NVOS_STATUS_SUCCESS); + } + + return TRUE; +} + +NvBool nv3dInitChannel(Nv3dChannelPtr p3dChannel) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + const Nv3dDeviceCapsRec *pCaps = &p3dChannel->p3dDevice->caps; + const Nv3dHal *pHal = p3dChannel->p3dDevice->hal; + const NvU64 tex0GpuAddress = nv3dGetTextureGpuAddress(p3dChannel, 0); + NvU64 gpuAddress; + NvU32 i; + void *programCode = DecompressPrograms(p3dChannel); + + if (programCode == NULL) { + return FALSE; + } + + /* + * nv3dAllocChannel() should have been called to assign p3dDevice. + */ + nvAssert(p3dChannel->p3dDevice != NULL); + + /* + * nv3dAllocChannelObject() should have been called to assign + * pPushChannel. + */ + nvAssert(p3dChannel->pPushChannel != NULL); + + /* + * nv3dAllocChannelSurface() should have been called to allocate + * the surface. + */ + nvAssert(p3dChannel->surface.handle[0] != 0); + + nv3dClearProgramCache(p3dChannel); + + p3dChannel->currentPrimitiveMode = ~0; + + nvPushSetObject(p, NVA06F_SUBCHANNEL_3D, p3dChannel->handle); + + // Ct[0]'s format defaults to A8R8G8B8, rather than DISABLED. + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, + NV9097_SET_COLOR_TARGET_FORMAT(0), + NV3D_C(9097, SET_COLOR_TARGET_FORMAT, V, DISABLED)); + + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, + NV9097_SET_ZT_SELECT, + NV3D_V(9097, SET_ZT_SELECT, TARGET_COUNT, 0)); + + // Set a substitute stream address. This is used when the Vertex Attribute + // Fetch unit tries to fetch outside the bounds of an enabled stream, which + // should never happen. However, AModel always fetches this value + // regardless of whether it actually needs it, so it causes MMU errors if + // it's not set. + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, + NV9097_SET_VERTEX_STREAM_SUBSTITUTE_A, 2); + nvPushSetMethodDataU64(p, p3dChannel->surface.gpuAddress); + + if (p3dChannel->programLocalMemorySize) { + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, + NV9097_SET_SHADER_LOCAL_MEMORY_A, 4); + // ADDRESS_{UPPER,LOWER} + nvPushSetMethodDataU64(p, + nv3dGetProgramLocalMemoryGpuAddress(p3dChannel)); + // SIZE_{UPPER,LOWER} + nvPushSetMethodDataU64(p, p3dChannel->programLocalMemorySize); + } + + // Point rasterization. + nvPushImmed(p, NVA06F_SUBCHANNEL_3D, + NV9097_SET_POINT_CENTER_MODE, OGL); + + // SPA Control. + nvPushImmed(p, NVA06F_SUBCHANNEL_3D, + NV9097_SET_SAMPLER_BINDING, VIA_HEADER_BINDING); + + // Viewport parameters. + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_VIEWPORT_SCALE_OFFSET, + NV3D_C(9097, SET_VIEWPORT_SCALE_OFFSET, ENABLE, FALSE)); + + // Viewport clip. There are 16 viewports + for (i = 0; i < 16; i++) { + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, + NV9097_SET_VIEWPORT_CLIP_HORIZONTAL(i), 2); + nvPushSetMethodData(p, pCaps->maxDim << 16); + nvPushSetMethodData(p, pCaps->maxDim << 16); + } + + nvPushImmed(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_PROVOKING_VERTEX, LAST); + + // Use one rop state for all targets + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_SINGLE_ROP_CONTROL, + NV3D_C(9097, SET_SINGLE_ROP_CONTROL, ENABLE, TRUE)); + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_SINGLE_CT_WRITE_CONTROL, + NV3D_C(9097, SET_SINGLE_CT_WRITE_CONTROL, ENABLE, TRUE)); + + // Set up blending: enable Ct[0]. It's disabled by default for the rest. + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_BLEND(0), + NV3D_C(9097, SET_BLEND, ENABLE, TRUE)); + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_BLEND_CONST_ALPHA, 1); + nv3dPushFloat(p, NV3D_FLOAT_ONE); + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, + NV9097_SET_BLEND_SEPARATE_FOR_ALPHA, 2); + nvPushSetMethodData(p, + NV3D_C(9097, SET_BLEND_SEPARATE_FOR_ALPHA, ENABLE, FALSE)); + nvPushSetMethodData(p, + NV3D_C(9097, SET_BLEND_COLOR_OP, V, OGL_FUNC_ADD)); + + // Upload the pixel shaders. First, attempt to upload through a CPU + // mapping (which is generally faster); if that fails (e.g., because there + // is no space in BAR1 for the mapping), then fall back to uploading inline + // through the pushbuffer. + if (!UploadPrograms(p3dChannel, programCode)) { + pHal->uploadDataInline(p3dChannel, + nv3dGetProgramGpuAddress(p3dChannel), + 0, + programCode, + p3dChannel->programs.code.decompressedSize); + } + + nv3dImportFree(programCode); + programCode = NULL; + + for (i = 0; i < p3dChannel->programs.constants.count; i++) { + const Nv3dShaderConstBufInfo *pInfo = + &p3dChannel->programs.constants.info[i]; + + pHal->uploadDataInline(p3dChannel, + nv3dGetProgramConstantsGpuAddress(p3dChannel), + pInfo->offset, + pInfo->data, + pInfo->size); + } + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, NV9097_INVALIDATE_SHADER_CACHES, 1); + nvPushSetMethodData(p, + DRF_DEF(9097, _INVALIDATE_SHADER_CACHES, _INSTRUCTION, _TRUE) | + DRF_DEF(9097, _INVALIDATE_SHADER_CACHES, _CONSTANT, _TRUE)); + + if (pCaps->hasProgramRegion) { + gpuAddress = nv3dGetProgramGpuAddress(p3dChannel); + + nvAssert((gpuAddress & 255) == 0); + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_PROGRAM_REGION_A, 2); + nvPushSetMethodDataU64(p, gpuAddress); + } + + // Initialize the texture header and sampler area. + // + // To update these things, we upload data through the pushbuffer. The + // upload has an alignment twice the size of a texture header/sampler, so we + // interleave the two. Texture samplers come first. Thus, "texture sampler + // 2i+1" is actually texture header 2i. This allows us to use a single + // upload to update a single texture sampler/header pair if we so desire. + gpuAddress = tex0GpuAddress + offsetof(Nv3dTexture, samp); + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_TEX_SAMPLER_POOL_A, 3); + nvPushSetMethodDataU64(p, gpuAddress); + nvPushSetMethodData(p, 0); // Max index. 0 because we use VIA_HEADER mode. + + gpuAddress = tex0GpuAddress + offsetof(Nv3dTexture, head); + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_TEX_HEADER_POOL_A, 3); + nvPushSetMethodDataU64(p, gpuAddress); + nvPushSetMethodData(p, 2 * (NV_MAX(p3dChannel->numTextures, 1) - 1)); // Max index + + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_WINDOW_ORIGIN, + NV3D_C(9097, SET_WINDOW_ORIGIN, MODE, UPPER_LEFT) | + NV3D_C(9097, SET_WINDOW_ORIGIN, FLIP_Y, TRUE)); + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_ZCULL_BOUNDS, 1); + nvPushSetMethodData(p, + NV3D_C(9097, SET_ZCULL_BOUNDS, Z_MIN_UNBOUNDED_ENABLE, FALSE) | + NV3D_C(9097, SET_ZCULL_BOUNDS, Z_MAX_UNBOUNDED_ENABLE, FALSE)); + + pHal->setSpaVersion(p3dChannel); + + pHal->initChannel(p3dChannel); + + _nv3dInitializeStreams(p3dChannel); + + return TRUE; +} + +void nv3dLoadProgram( + Nv3dChannelRec *p3dChannel, + int programIndex) +{ + const Nv3dHal *pHal = p3dChannel->p3dDevice->hal; + const Nv3dProgramInfo *pgm = &p3dChannel->programs.info[programIndex]; + NvPushChannelPtr p = p3dChannel->pPushChannel; + + nvAssert(programIndex < p3dChannel->programs.num); + nvAssert(programIndex >= 0); + nvAssert(pgm->stage < ARRAY_LEN(p3dChannel->currentProgramIndex)); + nvAssert(pgm->bindGroup <= NV3D_HW_BIND_GROUP_LAST); + + if (p3dChannel->currentProgramIndex[pgm->stage] == programIndex) { + return; + } + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, + NV9097_SET_PIPELINE_SHADER(pgm->stage), 1); + nvPushSetMethodData(p, + NV3D_C(9097, SET_PIPELINE_SHADER, ENABLE, TRUE) | + NV3D_V(9097, SET_PIPELINE_SHADER, TYPE, pgm->type)); + + pHal->setProgramOffset(p3dChannel, pgm->stage, pgm->offset); + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, + NV9097_SET_PIPELINE_REGISTER_COUNT(pgm->stage), 2); + nvPushSetMethodData(p, pgm->registerCount); + nvPushSetMethodData(p, pgm->bindGroup); + + // Bind or invalidate the compiler-generated constant buffer slot, which the + // compiler always puts in NV3D_CB_SLOT_COMPILER. + if (pgm->constIndex == -1) { + nv3dBindCb(p3dChannel, pgm->bindGroup, + NV3D_CB_SLOT_COMPILER, FALSE); + } else if (p3dChannel->programs.constants.size > 0) { + const Nv3dShaderConstBufInfo *pInfo = + &p3dChannel->programs.constants.info[pgm->constIndex]; + const NvU64 gpuAddress = + nv3dGetProgramConstantsGpuAddress(p3dChannel) + pInfo->offset; + const NvU32 paddedSize = + NV_ALIGN_UP(pInfo->size, p3dChannel->programs.constants.sizeAlign); + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, + NV9097_SET_CONSTANT_BUFFER_SELECTOR_A, 3); + nvPushSetMethodData(p, paddedSize); + nvPushSetMethodDataU64(p, gpuAddress); + nv3dBindCb(p3dChannel, pgm->bindGroup, NV3D_CB_SLOT_COMPILER, TRUE); + } + + p3dChannel->currentProgramIndex[pgm->stage] = programIndex; +} + +void _nv3dSetProgramOffsetFermi( + Nv3dChannelRec *p3dChannel, + NvU32 stage, + NvU32 offset) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, + NV9097_SET_PIPELINE_PROGRAM(stage), 1); + nvPushSetMethodData(p, offset); +} + +void _nv3dInvalidateTexturesFermi( + Nv3dChannelRec *p3dChannel) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, + NV9097_INVALIDATE_SAMPLER_CACHE, + NV3D_C(9097, INVALIDATE_SAMPLER_CACHE, LINES, ALL)); + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, + NV9097_INVALIDATE_TEXTURE_HEADER_CACHE, + NV3D_C(9097, INVALIDATE_TEXTURE_HEADER_CACHE, LINES, ALL)); +} + +void nv3dSetBlendColorCoefficients( + Nv3dChannelPtr p3dChannel, + enum Nv3dBlendOperation op, + NvBool forceNoDstAlphaBits, + NvBool dualSourceBlending) +{ +#define SFACTOR(factor) (NV9097_SET_BLEND_COLOR_SOURCE_COEFF_V_OGL_##factor) +#define DFACTOR(factor) (NV9097_SET_BLEND_COLOR_DEST_COEFF_V_OGL_##factor) +#define OP(op) (NV3D_BLEND_OP_##op) + + static const struct { + NvU32 sfactor; + NvU32 dfactor; + } BlendOps[] = { + [OP(CLEAR)] = {SFACTOR(ZERO), DFACTOR(ZERO)}, + [OP(SRC)] = {SFACTOR(ONE), DFACTOR(ZERO)}, + [OP(DST)] = {SFACTOR(ZERO), DFACTOR(ONE)}, + [OP(OVER)] = {SFACTOR(ONE), DFACTOR(ONE_MINUS_SRC_ALPHA)}, + [OP(OVER_REVERSE)] = {SFACTOR(ONE_MINUS_DST_ALPHA), DFACTOR(ONE)}, + [OP(IN)] = {SFACTOR(DST_ALPHA), DFACTOR(ZERO)}, + [OP(IN_REVERSE)] = {SFACTOR(ZERO), DFACTOR(SRC_ALPHA)}, + [OP(OUT)] = {SFACTOR(ONE_MINUS_DST_ALPHA), DFACTOR(ZERO)}, + [OP(OUT_REVERSE)] = {SFACTOR(ZERO), DFACTOR(ONE_MINUS_SRC_ALPHA)}, + [OP(ATOP)] = {SFACTOR(DST_ALPHA), DFACTOR(ONE_MINUS_SRC_ALPHA)}, + [OP(ATOP_REVERSE)] = {SFACTOR(ONE_MINUS_DST_ALPHA), DFACTOR(SRC_ALPHA)}, + [OP(XOR)] = {SFACTOR(ONE_MINUS_DST_ALPHA), DFACTOR(ONE_MINUS_SRC_ALPHA)}, + [OP(ADD)] = {SFACTOR(ONE), DFACTOR(ONE)}, + [OP(SATURATE)] = {SFACTOR(SRC_ALPHA_SATURATE), DFACTOR(ONE)} + }; + + NvU32 sfactor, dfactor; + + nvAssert(op < ARRAY_LEN(BlendOps)); + + sfactor = BlendOps[op].sfactor; + dfactor = BlendOps[op].dfactor; + + // if we're rendering to a picture that has an XRGB format that HW doesn't + // support, feed in the 1.0 constant DstAlpha value + if (forceNoDstAlphaBits) { + switch (sfactor) { + case SFACTOR(DST_ALPHA): + sfactor = SFACTOR(CONSTANT_ALPHA); + break; + case SFACTOR(ONE_MINUS_DST_ALPHA): + sfactor = SFACTOR(ONE_MINUS_CONSTANT_ALPHA); + break; + default: + break; + } + } + + // If dual-source blending is enabled, swap the dfactor for one that uses + // the second source color. + if (dualSourceBlending) { + switch (dfactor) { + case DFACTOR(SRC_ALPHA): + case DFACTOR(SRC_COLOR): + dfactor = DFACTOR(SRC1COLOR); + break; + case DFACTOR(ONE_MINUS_SRC_ALPHA): + case DFACTOR(ONE_MINUS_SRC_COLOR): + dfactor = DFACTOR(INVSRC1COLOR); + break; + default: + break; + } + } + + Nv3dBlendState nv3dBlendStateColor = { }; + + nv3dBlendStateColor.blendEquation = NV3D_C(9097, SET_BLEND_COLOR_OP, V, OGL_FUNC_ADD); + nv3dBlendStateColor.blendFactorSrc = sfactor; + nv3dBlendStateColor.blendFactorDst = dfactor; + + nv3dSetBlend(p3dChannel, &nv3dBlendStateColor, NULL, NULL); +} + +void nv3dSetBlend( + Nv3dChannelPtr p3dChannel, + const Nv3dBlendState *blendStateColor, + const Nv3dBlendState *blendStateAlpha, + const Nv3dColor *blendColor) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + + const Nv3dColor defaultColor = { + NV3D_FLOAT_ONE, + NV3D_FLOAT_ONE, + NV3D_FLOAT_ONE, + NV3D_FLOAT_ONE + }; + + if (blendColor == NULL) { + blendColor = &defaultColor; + } + + if (blendStateColor == NULL && blendStateAlpha == NULL) { + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_BLEND(0), + NV3D_C(9097, SET_BLEND, ENABLE, FALSE)); + return; + } + + if (blendStateColor != NULL) { + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_BLEND_COLOR_OP, 3); + nvPushSetMethodData(p, blendStateColor->blendEquation); + nvPushSetMethodData(p, blendStateColor->blendFactorSrc); + nvPushSetMethodData(p, blendStateColor->blendFactorDst); + } + + if (blendStateAlpha != NULL) { + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_BLEND_SEPARATE_FOR_ALPHA, + NV3D_C(9097, SET_BLEND_SEPARATE_FOR_ALPHA, ENABLE, TRUE)); + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_BLEND_ALPHA_OP, 2); + nvPushSetMethodData(p, blendStateAlpha->blendEquation); + nvPushSetMethodData(p, blendStateAlpha->blendFactorSrc); + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_BLEND_ALPHA_DEST_COEFF, 1); + nvPushSetMethodData(p, blendStateAlpha->blendFactorDst); + } else { + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_BLEND_SEPARATE_FOR_ALPHA, + NV3D_C(9097, SET_BLEND_SEPARATE_FOR_ALPHA, ENABLE, FALSE)); + } + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_BLEND_CONST_RED, 4); + nv3dPushFloat(p, blendColor->red); + nv3dPushFloat(p, blendColor->green); + nv3dPushFloat(p, blendColor->blue); + nv3dPushFloat(p, blendColor->alpha); + + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_BLEND(0), + NV3D_C(9097, SET_BLEND, ENABLE, TRUE)); + +} + +void _nv3dSetVertexStreamEndFermi( + Nv3dChannelPtr p3dChannel, + enum Nv3dVertexAttributeStreamType stream, + const Nv3dVertexAttributeStreamRec *pStream) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, + NV9097_SET_VERTEX_STREAM_LIMIT_A_A(stream), 2); + nvPushSetMethodDataU64(p, pStream->end - 1); +} diff --git a/src/common/unix/nvidia-3d/src/nvidia-3d-hopper.c b/src/common/unix/nvidia-3d/src/nvidia-3d-hopper.c new file mode 100644 index 0000000..9d233a2 --- /dev/null +++ b/src/common/unix/nvidia-3d/src/nvidia-3d-hopper.c @@ -0,0 +1,384 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-3d-turing.h" +#include "nvidia-3d-hopper.h" +#include "nvidia-3d.h" + +#include +#include +#include + +void _nv3dInitChannelHopper(Nv3dChannelRec *p3dChannel) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + + _nv3dInitChannelTuring(p3dChannel); + + // Select texture header major version 1 for the new Hopper format. + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, NVCB97_SET_TEXTURE_HEADER_VERSION, 1); +} + +void _nv3dAssignNv3dTextureHopper( + Nv3dRenderTexInfo info, + Nv3dTexture *tex) +{ + nvAssert(!info.error); + + switch (info.sizes) { + case NV3D_TEXHEAD_A8B8G8R8: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _COMPONENTS, + _SIZES_A8B8G8R8, tex->head); + break; + case NV3D_TEXHEAD_A2B10G10R10: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _COMPONENTS, + _SIZES_A2B10G10R10, tex->head); + break; + case NV3D_TEXHEAD_B5G6R5: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _COMPONENTS, + _SIZES_B5G6R5, tex->head); + break; + case NV3D_TEXHEAD_A1B5G5R5: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _COMPONENTS, + _SIZES_A1B5G5R5, tex->head); + break; + case NV3D_TEXHEAD_R8: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _COMPONENTS, + _SIZES_R8, tex->head); + break; + case NV3D_TEXHEAD_R32: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _COMPONENTS, + _SIZES_R32, tex->head); + break; + case NV3D_TEXHEAD_R16: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _COMPONENTS, + _SIZES_R16, tex->head); + break; + case NV3D_TEXHEAD_G8R8: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _COMPONENTS, + _SIZES_G8R8, tex->head); + break; + case NV3D_TEXHEAD_R16G16B16A16: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _COMPONENTS, + _SIZES_R16_G16_B16_A16, tex->head); + break; + case NV3D_TEXHEAD_R32G32B32A32: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _COMPONENTS, + _SIZES_R32_G32_B32_A32, tex->head); + break; + case NV3D_TEXHEAD_Y8_VIDEO: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _COMPONENTS, + _SIZES_Y8_VIDEO, tex->head); + break; + default: + nvAssert(!"Unrecognized component sizes"); + } + + switch (info.dataType) { + case NV3D_TEXHEAD_NUM_UNORM: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _DATA_TYPE, + _TEX_DATA_TYPE_UNORM, tex->head); + break; + case NV3D_TEXHEAD_NUM_UINT: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _DATA_TYPE, + _TEX_DATA_TYPE_UINT, tex->head); + break; + case NV3D_TEXHEAD_NUM_FLOAT: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _DATA_TYPE, + _TEX_DATA_TYPE_FLOAT, tex->head); + break; + case NV3D_TEXHEAD_NUM_SNORM: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _DATA_TYPE, + _TEX_DATA_TYPE_FLOAT, tex->head); + break; + case NV3D_TEXHEAD_NUM_SINT: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _DATA_TYPE, + _TEX_DATA_TYPE_SINT, tex->head); + break; + } + + switch (info.source.x) { + case NV3D_TEXHEAD_IN_A: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _X_SOURCE, + _IN_A, tex->head); + break; + case NV3D_TEXHEAD_IN_R: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _X_SOURCE, + _IN_R, tex->head); + break; + case NV3D_TEXHEAD_IN_G: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _X_SOURCE, + _IN_G, tex->head); + break; + case NV3D_TEXHEAD_IN_B: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _X_SOURCE, + _IN_B, tex->head); + break; + case NV3D_TEXHEAD_IN_ZERO: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _X_SOURCE, + _IN_ZERO, tex->head); + break; + case NV3D_TEXHEAD_IN_ONE_FLOAT: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _X_SOURCE, + _IN_ONE_FLOAT, tex->head); + break; + } + + switch (info.source.y) { + case NV3D_TEXHEAD_IN_A: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _Y_SOURCE, + _IN_A, tex->head); + break; + case NV3D_TEXHEAD_IN_R: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _Y_SOURCE, + _IN_R, tex->head); + break; + case NV3D_TEXHEAD_IN_G: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _Y_SOURCE, + _IN_G, tex->head); + break; + case NV3D_TEXHEAD_IN_B: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _Y_SOURCE, + _IN_B, tex->head); + break; + case NV3D_TEXHEAD_IN_ZERO: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _Y_SOURCE, + _IN_ZERO, tex->head); + break; + case NV3D_TEXHEAD_IN_ONE_FLOAT: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _Y_SOURCE, + _IN_ONE_FLOAT, tex->head); + break; + } + + switch (info.source.z) { + case NV3D_TEXHEAD_IN_A: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _Z_SOURCE, + _IN_A, tex->head); + break; + case NV3D_TEXHEAD_IN_R: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _Z_SOURCE, + _IN_R, tex->head); + break; + case NV3D_TEXHEAD_IN_G: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _Z_SOURCE, + _IN_G, tex->head); + break; + case NV3D_TEXHEAD_IN_B: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _Z_SOURCE, + _IN_B, tex->head); + break; + case NV3D_TEXHEAD_IN_ZERO: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _Z_SOURCE, + _IN_ZERO, tex->head); + break; + case NV3D_TEXHEAD_IN_ONE_FLOAT: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _Z_SOURCE, + _IN_ONE_FLOAT, tex->head); + break; + } + + switch (info.source.w) { + case NV3D_TEXHEAD_IN_A: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _W_SOURCE, + _IN_A, tex->head); + break; + case NV3D_TEXHEAD_IN_R: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _W_SOURCE, + _IN_R, tex->head); + break; + case NV3D_TEXHEAD_IN_G: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _W_SOURCE, + _IN_G, tex->head); + break; + case NV3D_TEXHEAD_IN_B: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _W_SOURCE, + _IN_B, tex->head); + break; + case NV3D_TEXHEAD_IN_ZERO: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _W_SOURCE, + _IN_ZERO, tex->head); + break; + case NV3D_TEXHEAD_IN_ONE_FLOAT: + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _W_SOURCE, + _IN_ONE_FLOAT, tex->head); + break; + } + + // Default to edge clamping. Our GPU seems to support wrapping + // even with non-normalized coordinates. + tex->samp[0] = + NV3D_C(CB97, TEXSAMP0, ADDRESS_U, CLAMP_TO_EDGE) | + NV3D_C(CB97, TEXSAMP0, ADDRESS_V, CLAMP_TO_EDGE) | + NV3D_C(CB97, TEXSAMP0, ADDRESS_P, CLAMP_TO_EDGE); + + if (info.texType == NV3D_TEX_TYPE_ONE_D_BUFFER) { + FLD_SET_DRF_NUM_MW(CB97, _TEXHEAD_V2_1DRT, _ADDRESS_BITS31TO0, + NvU64_LO32(info.offset), tex->head); + + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_1DRT, _HEADER_VERSION, + _SELECT_ONE_D_RAW_TYPED, tex->head); + FLD_SET_DRF_NUM_MW(CB97_, TEXHEAD_V2_1DRT, _ADDRESS_BITS63TO32, + NvU64_HI32(info.offset), tex->head); + + FLD_SET_DRF_NUM_MW(CB97, _TEXHEAD_V2_1DRT, _WIDTH_MINUS_ONE, + info.width - 1, tex->head); + } else if (info.texType == NV3D_TEX_TYPE_TWO_D_PITCH) { + FLD_SET_DRF_NUM_MW(CB97, _TEXHEAD_V2_PITCH, _ADDRESS_BITS31TO5, + info.offset >> 5, tex->head); + + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_PITCH, _HEADER_VERSION, + _SELECT_PITCH_V2, tex->head); + FLD_SET_DRF_NUM_MW(CB97, _TEXHEAD_V2_PITCH, _ADDRESS_BITS56TO32, + NvU64_HI32(info.offset), tex->head); + + FLD_SET_DRF_NUM_MW(CB97, _TEXHEAD_V2_PITCH, _PITCH_BITS21TO5, + NvU32_LO16(info.pitch >> 5), tex->head); + + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_PITCH, _TEXTURE_TYPE, + _TWO_D_NO_MIPMAP, tex->head); + FLD_SET_DRF_NUM_MW(CB97, _TEXHEAD_V2_PITCH, _WIDTH_MINUS_ONE, + info.width - 1, tex->head); + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_PITCH, _BORDER_SOURCE, + _BORDER_COLOR, tex->head); + + FLD_SET_DRF_NUM_MW(CB97, _TEXHEAD_V2_PITCH, _HEIGHT_MINUS_ONE, + info.height - 1, tex->head); + FLD_SET_DRF_NUM_MW(CB97, _TEXHEAD_V2_PITCH, _NORMALIZED_COORDS, + info.normalizedCoords, tex->head); + } else { + if (info.texType == NV3D_TEX_TYPE_ONE_D) { + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _TEXTURE_TYPE, + _ONE_D, tex->head); + } else if (info.texType == NV3D_TEX_TYPE_TWO_D_BLOCKLINEAR) { + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _TEXTURE_TYPE, + _TWO_D_NO_MIPMAP, tex->head); + } + FLD_SET_DRF_NUM_MW(CB97, _TEXHEAD_V2_BL, _ADDRESS_BITS31TO9, + info.offset >> 9, tex->head); + + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _HEADER_VERSION, + _SELECT_BLOCKLINEAR_V2, tex->head); + FLD_SET_DRF_NUM_MW(CB97, _TEXHEAD_V2_BL, _ADDRESS_BITS56TO32, + NvU64_HI32(info.offset), tex->head); + + FLD_SET_DRF_NUM_MW(CB97, _TEXHEAD_V2_BL, _GOBS_PER_BLOCK_WIDTH, + info.log2GobsPerBlock.x, tex->head); + FLD_SET_DRF_NUM_MW(CB97, _TEXHEAD_V2_BL, _GOBS_PER_BLOCK_HEIGHT, + info.log2GobsPerBlock.y, tex->head); + FLD_SET_DRF_NUM_MW(CB97, _TEXHEAD_V2_BL, _GOBS_PER_BLOCK_DEPTH, + info.log2GobsPerBlock.z, tex->head); + + FLD_SET_DRF_NUM_MW(CB97, _TEXHEAD_V2_BL, _WIDTH_MINUS_ONE, + info.width - 1, tex->head); + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _BORDER_SOURCE, + _BORDER_COLOR, tex->head); + + FLD_SET_DRF_NUM_MW(CB97, _TEXHEAD_V2_BL, _HEIGHT_MINUS_ONE, + info.height - 1, tex->head); + FLD_SET_DRF_NUM_MW(CB97, _TEXHEAD_V2_BL, _DEPTH_MINUS_ONE, + 0, tex->head); + FLD_SET_DRF_NUM_MW(CB97, _TEXHEAD_V2_BL, _NORMALIZED_COORDS, + info.normalizedCoords, tex->head); + } + + switch (info.repeatType) { + case NV3D_TEXHEAD_REPEAT_TYPE_NORMAL: + tex->samp[0] = NV3D_C(CB97, TEXSAMP0, ADDRESS_U, WRAP) | + NV3D_C(CB97, TEXSAMP0, ADDRESS_V, WRAP); + break; + case NV3D_TEXHEAD_REPEAT_TYPE_PAD: + tex->samp[0] = NV3D_C(CB97, TEXSAMP0, ADDRESS_U, CLAMP_TO_EDGE) | + NV3D_C(CB97, TEXSAMP0, ADDRESS_V, CLAMP_TO_EDGE); + break; + case NV3D_TEXHEAD_REPEAT_TYPE_REFLECT: + tex->samp[0] = NV3D_C(CB97, TEXSAMP0, ADDRESS_U, MIRROR) | + NV3D_C(CB97, TEXSAMP0, ADDRESS_V, MIRROR); + break; + case NV3D_TEXHEAD_REPEAT_TYPE_NONE: + tex->samp[0] = NV3D_C(CB97, TEXSAMP0, ADDRESS_U, BORDER) | + NV3D_C(CB97, TEXSAMP0, ADDRESS_V, BORDER); + break; + } + + switch (info.filtering) { + case NV3D_TEXHEAD_FILTER_TYPE_NEAREST: + tex->samp[1] = NV3D_C(CB97, TEXSAMP1, MAG_FILTER, MAG_POINT) | + NV3D_C(CB97, TEXSAMP1, MIN_FILTER, MIN_POINT) | + NV3D_C(CB97, TEXSAMP1, MIP_FILTER, MIP_NONE); + break; + + case NV3D_TEXHEAD_FILTER_TYPE_LINEAR: + tex->samp[1] = NV3D_C(CB97, TEXSAMP1, MAG_FILTER, MAG_LINEAR) | + NV3D_C(CB97, TEXSAMP1, MIN_FILTER, MIN_LINEAR) | + NV3D_C(CB97, TEXSAMP1, MIP_FILTER, MIP_NONE); + break; + + case NV3D_TEXHEAD_FILTER_TYPE_ANISO_2X: + tex->samp[0] |= NV3D_C(CB97, TEXSAMP0, MAX_ANISOTROPY, ANISO_2_TO_1); + tex->samp[1] = NV3D_C(CB97, TEXSAMP1, MAG_FILTER, MAG_LINEAR) | + NV3D_C(CB97, TEXSAMP1, MIN_FILTER, MIN_ANISO) | + NV3D_C(CB97, TEXSAMP1, MIP_FILTER, MIP_NONE); + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _MAX_ANISOTROPY, + _ANISO_2_TO_1, tex->head); + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _ANISO_FINE_SPREAD_MODIFIER, + _SPREAD_MODIFIER_CONST_TWO, tex->head); + + break; + + case NV3D_TEXHEAD_FILTER_TYPE_ANISO_4X: + tex->samp[0] |= NV3D_C(CB97, TEXSAMP0, MAX_ANISOTROPY, ANISO_4_TO_1); + tex->samp[1] = NV3D_C(CB97, TEXSAMP1, MAG_FILTER, MAG_LINEAR) | + NV3D_C(CB97, TEXSAMP1, MIN_FILTER, MIN_ANISO) | + NV3D_C(CB97, TEXSAMP1, MIP_FILTER, MIP_NONE); + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _MAX_ANISOTROPY, + _ANISO_4_TO_1, tex->head); + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _ANISO_FINE_SPREAD_MODIFIER, + _SPREAD_MODIFIER_CONST_TWO, tex->head); + break; + + case NV3D_TEXHEAD_FILTER_TYPE_ANISO_8X: + tex->samp[0] |= NV3D_C(CB97, TEXSAMP0, MAX_ANISOTROPY, ANISO_8_TO_1); + tex->samp[1] = NV3D_C(CB97, TEXSAMP1, MAG_FILTER, MAG_LINEAR) | + NV3D_C(CB97, TEXSAMP1, MIN_FILTER, MIN_ANISO) | + NV3D_C(CB97, TEXSAMP1, MIP_FILTER, MIP_NONE); + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _MAX_ANISOTROPY, + _ANISO_8_TO_1, tex->head); + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _ANISO_FINE_SPREAD_MODIFIER, + _SPREAD_MODIFIER_CONST_TWO, tex->head); + + break; + + case NV3D_TEXHEAD_FILTER_TYPE_ANISO_16X: + tex->samp[0] |= NV3D_C(CB97, TEXSAMP0, MAX_ANISOTROPY, ANISO_16_TO_1); + tex->samp[1] = NV3D_C(CB97, TEXSAMP1, MAG_FILTER, MAG_LINEAR) | + NV3D_C(CB97, TEXSAMP1, MIN_FILTER, MIN_ANISO) | + NV3D_C(CB97, TEXSAMP1, MIP_FILTER, MIP_NONE); + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _MAX_ANISOTROPY, + _ANISO_16_TO_1, tex->head); + FLD_SET_DRF_DEF_MW(CB97, _TEXHEAD_V2_BL, _ANISO_FINE_SPREAD_MODIFIER, + _SPREAD_MODIFIER_CONST_TWO, tex->head); + break; + + } +} diff --git a/src/common/unix/nvidia-3d/src/nvidia-3d-init.c b/src/common/unix/nvidia-3d/src/nvidia-3d-init.c new file mode 100644 index 0000000..df1f150 --- /dev/null +++ b/src/common/unix/nvidia-3d/src/nvidia-3d-init.c @@ -0,0 +1,493 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + + +#include "nvidia-3d.h" +#include "nvidia-3d-surface.h" +#include "nvidia-3d-types-priv.h" + +#include "nvidia-3d-fermi.h" +#include "nvidia-3d-kepler.h" +#include "nvidia-3d-maxwell.h" +#include "nvidia-3d-pascal.h" +#include "nvidia-3d-volta.h" +#include "nvidia-3d-turing.h" +#include "nvidia-3d-hopper.h" + +#include "nvidia-push-init.h" // nvPushGetSupportedClassIndex() +#include "nvidia-push-utils.h" // nvPushIsAmodel() + +#include // HOPPER_A +#include // ADA_A +#include // AMPERE_B +#include // AMPERE_A +#include // TURING_A +#include // VOLTA_A +#include // PASCAL_B +#include // PASCAL_A +#include // MAXWELL_B +#include // MAXWELL_A + +#include +#include + +#include "g_maxwell_shader_info.h" +#include "g_pascal_shader_info.h" +#include "g_volta_shader_info.h" +#include "g_turing_shader_info.h" +#include "g_ampere_shader_info.h" +#include "g_hopper_shader_info.h" + +#define _NV3D_CHANNEL_PROGRAMS_ENTRY(_archLower, _archCamel, _archUpper) \ + [NV3D_SHADER_ARCH_ ## _archUpper ] = { \ + .num = NUM_PROGRAMS, \ + .info = _archCamel ## ProgramInfo, \ + .maxLocalBytes = _archCamel ## ShaderMaxLocalBytes, \ + .maxStackBytes = _archCamel ## ShaderMaxStackBytes, \ + .code.decompressedSize = _archCamel ## ProgramHeapSize, \ + .code.compressedStart = \ + ({ extern const unsigned char \ + _binary_ ## _archLower ## _shaders_xz_start[]; \ + _binary_ ## _archLower ## _shaders_xz_start; }), \ + .code.compressedEnd = \ + ({ extern const unsigned char \ + _binary_ ## _archLower ## _shaders_xz_end[]; \ + _binary_ ## _archLower ## _shaders_xz_end; }), \ + .constants.info = _archCamel ## ConstBufInfo, \ + .constants.count = \ + (NvU32)ARRAY_LEN(_archCamel ## ConstBufInfo), \ + .constants.size = _archCamel ## ConstBufSize, \ + .constants.sizeAlign = _archCamel ## ConstBufSizeAlign, \ + } + +static Nv3dChannelProgramsRec PickProgramsRec( + const Nv3dDeviceRec *p3dDevice) +{ + const Nv3dChannelProgramsRec programsTable[NV3D_SHADER_ARCH_COUNT] = { + + _NV3D_CHANNEL_PROGRAMS_ENTRY(maxwell, Maxwell, MAXWELL), + _NV3D_CHANNEL_PROGRAMS_ENTRY(pascal, Pascal, PASCAL), + _NV3D_CHANNEL_PROGRAMS_ENTRY(volta, Volta, VOLTA), + _NV3D_CHANNEL_PROGRAMS_ENTRY(turing, Turing, TURING), + _NV3D_CHANNEL_PROGRAMS_ENTRY(ampere, Ampere, AMPERE), + }; + + return programsTable[p3dDevice->shaderArch]; +} + +#undef _NV3D_CHANNEL_PROGRAMS_ENTRY + + +static NvBool QueryThreadsAndWarpsOneSd( + Nv3dDevicePtr p3dDevice, + NvU32 sd, + NvU32 *pMaxWarps, + NvU32 *pThreadsPerWarp) +{ + NvPushDevicePtr pPushDevice = p3dDevice->pPushDevice; + const NvPushImports *pImports = pPushDevice->pImports; + NvU32 ret; + + NV2080_CTRL_GR_GET_INFO_PARAMS grInfoParams = { 0 }; + struct { + NV2080_CTRL_GR_INFO numSMs; + NV2080_CTRL_GR_INFO maxWarpsPerSM; + NV2080_CTRL_GR_INFO threadsPerWarp; + } grInfo; + + NVMISC_MEMSET(&grInfo, 0, sizeof(grInfo)); + + grInfo.numSMs.index = + NV2080_CTRL_GR_INFO_INDEX_THREAD_STACK_SCALING_FACTOR; + grInfo.maxWarpsPerSM.index = + NV2080_CTRL_GR_INFO_INDEX_MAX_WARPS_PER_SM; + grInfo.threadsPerWarp.index = + NV2080_CTRL_GR_INFO_INDEX_MAX_THREADS_PER_WARP; + + grInfoParams.grInfoListSize = + sizeof(grInfo) / sizeof(NV2080_CTRL_GR_INFO); + + grInfoParams.grInfoList = NV_PTR_TO_NvP64(&grInfo); + + ret = pImports->rmApiControl(pPushDevice, + pPushDevice->subDevice[sd].handle, + NV2080_CTRL_CMD_GR_GET_INFO, + &grInfoParams, + sizeof(grInfoParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + *pMaxWarps = grInfo.numSMs.data * grInfo.maxWarpsPerSM.data; + *pThreadsPerWarp = grInfo.threadsPerWarp.data; + + return TRUE; +} + +static NvBool GetMaxThreadsAndWarps(Nv3dDevicePtr p3dDevice) +{ + NvU32 sd; + + p3dDevice->maxThreadsPerWarp = 0; + p3dDevice->maxWarps = 0; + + for (sd = 0; sd < p3dDevice->pPushDevice->numSubDevices; sd++) { + + NvU32 maxWarps, threadsPerWarp; + + if (!QueryThreadsAndWarpsOneSd(p3dDevice, sd, + &maxWarps, &threadsPerWarp)) { + return FALSE; + } + + p3dDevice->maxThreadsPerWarp = + NV_MAX(p3dDevice->maxThreadsPerWarp, threadsPerWarp); + + p3dDevice->maxWarps = NV_MAX(p3dDevice->maxWarps, maxWarps); + } + + return TRUE; +} + +/*! + * Get the SM version reported by resman. + * + * \params pPushDevice The nvidia-push device corresponding to the GPU. + * + * \return The SM version of this device. + */ +static NvU32 GetSmVersion( + NvPushDevicePtr pPushDevice) +{ + NvU32 sd, smVersion = NV2080_CTRL_GR_INFO_SM_VERSION_NONE; + + if (nvPushIsAModel(pPushDevice)) { + /* + * On amodel resman cannot tell us the SM version, so we pick + * the SM version based on NVAModelConfig. + */ + static const NvU32 table[] = { + [NV_AMODEL_MAXWELL] = NV2080_CTRL_GR_INFO_SM_VERSION_5_0, + [NV_AMODEL_PASCAL] = NV2080_CTRL_GR_INFO_SM_VERSION_6_0, + [NV_AMODEL_VOLTA] = NV2080_CTRL_GR_INFO_SM_VERSION_7_0, + [NV_AMODEL_TURING] = NV2080_CTRL_GR_INFO_SM_VERSION_7_5, + [NV_AMODEL_AMPERE] = NV2080_CTRL_GR_INFO_SM_VERSION_8_2, + }; + + if (pPushDevice->amodelConfig >= ARRAY_LEN(table)) { + return NV2080_CTRL_GR_INFO_SM_VERSION_NONE; + } + + return table[pPushDevice->amodelConfig]; + } + + /* + * Query the SM version from resman. This query is per-subDevice, + * but we use SM version per-device, so assert that the SM version + * matches across subDevices. + */ + for (sd = 0; sd < pPushDevice->numSubDevices; sd++) { + + const NvPushImports *pImports = pPushDevice->pImports; + NV2080_CTRL_GR_GET_INFO_PARAMS params = { }; + NV2080_CTRL_GR_INFO smVersionParams = { }; + NvU32 ret; + + smVersionParams.index = NV2080_CTRL_GR_INFO_INDEX_SM_VERSION; + params.grInfoListSize = 1; + params.grInfoList = NV_PTR_TO_NvP64(&smVersionParams); + + ret = pImports->rmApiControl(pPushDevice, + pPushDevice->subDevice[sd].handle, + NV2080_CTRL_CMD_GR_GET_INFO, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + return NV2080_CTRL_GR_INFO_SM_VERSION_NONE; + } + + if (sd == 0) { + smVersion = smVersionParams.data; + } else { + nvAssert(smVersion == smVersionParams.data); + } + } + + return smVersion; +} + +/*! + * Get the SPA version to use with the 3D Class. + * + * Note that resman only reports the SM version (the "hardware + * revision"), not the SPA version (the ISA version). So we use a + * table to map from SM version to SPA version. + * + * \params pPushDevice The nvidia-push device corresponding to the GPU. + * \params pSpaVersion The spaVersion to assign. + * + * \return TRUE if the SPA version could be assigned. + */ +static NvBool GetSpaVersion( + NvPushDevicePtr pPushDevice, + Nv3dDeviceSpaVersionRec *pSpaVersion) +{ + static const struct { + NvU32 smVersion; + Nv3dDeviceSpaVersionRec spaVersion; + } table[] = { + /* Maxwell */ + { NV2080_CTRL_GR_INFO_SM_VERSION_5_0, { 5,0 } }, + { NV2080_CTRL_GR_INFO_SM_VERSION_5_2, { 5,2 } }, + { NV2080_CTRL_GR_INFO_SM_VERSION_5_3, { 5,3 } }, + + /* Pascal */ + { NV2080_CTRL_GR_INFO_SM_VERSION_6_0, { 5,5 } }, + { NV2080_CTRL_GR_INFO_SM_VERSION_6_1, { 5,5 } }, + { NV2080_CTRL_GR_INFO_SM_VERSION_6_2, { 5,6 } }, + + /* Volta */ + { NV2080_CTRL_GR_INFO_SM_VERSION_7_0, { 7,0 } }, + { NV2080_CTRL_GR_INFO_SM_VERSION_7_2, { 7,2 } }, + + /* Turing */ + { NV2080_CTRL_GR_INFO_SM_VERSION_7_3, { 7,3 } }, + { NV2080_CTRL_GR_INFO_SM_VERSION_7_5, { 7,5 } }, + + /* Ampere */ + { NV2080_CTRL_GR_INFO_SM_VERSION_8_2, { 8,2 } }, + { NV2080_CTRL_GR_INFO_SM_VERSION_8_6, { 8,6 } }, + { NV2080_CTRL_GR_INFO_SM_VERSION_8_7, { 8,6 } }, + { NV2080_CTRL_GR_INFO_SM_VERSION_8_8, { 8,6 } }, + + /* Ada */ + { NV2080_CTRL_GR_INFO_SM_VERSION_8_9, { 8,9 } }, + + /* Hopper */ + { NV2080_CTRL_GR_INFO_SM_VERSION_9_0, { 9,0 } }, + + }; + + const NvU32 smVersion = GetSmVersion(pPushDevice); + NvU32 i; + + for (i = 0; i < ARRAY_LEN(table); i++) { + if (table[i].smVersion == smVersion) { + *pSpaVersion = table[i].spaVersion; + return TRUE; + } + } + + return FALSE; +} + +static const Nv3dHal _nv3dHalMaxwell = { + _nv3dSetSpaVersionKepler, /* setSpaVersion */ + _nv3dInitChannelMaxwell, /* initChannel */ + _nv3dUploadDataInlineKepler, /* uploadDataInline */ + _nv3dSetProgramOffsetFermi, /* setProgramOffset */ + _nv3dAssignNv3dTextureMaxwell, /* assignNv3dTexture */ + _nv3dSetVertexStreamEndFermi, /* setVertexStreamEnd */ +}; + +static const Nv3dHal _nv3dHalPascal = { + _nv3dSetSpaVersionKepler, /* setSpaVersion */ + _nv3dInitChannelPascal, /* initChannel */ + _nv3dUploadDataInlineKepler, /* uploadDataInline */ + _nv3dSetProgramOffsetFermi, /* setProgramOffset */ + _nv3dAssignNv3dTexturePascal, /* assignNv3dTexture */ + _nv3dSetVertexStreamEndFermi, /* setVertexStreamEnd */ +}; + +static const Nv3dHal _nv3dHalVolta = { + _nv3dSetSpaVersionKepler, /* setSpaVersion */ + _nv3dInitChannelPascal, /* initChannel */ + _nv3dUploadDataInlineKepler, /* uploadDataInline */ + _nv3dSetProgramOffsetVolta, /* setProgramOffset */ + _nv3dAssignNv3dTexturePascal, /* assignNv3dTexture */ + _nv3dSetVertexStreamEndFermi, /* setVertexStreamEnd */ +}; + +static const Nv3dHal _nv3dHalTuring = { + _nv3dSetSpaVersionKepler, /* setSpaVersion */ + _nv3dInitChannelTuring, /* initChannel */ + _nv3dUploadDataInlineKepler, /* uploadDataInline */ + _nv3dSetProgramOffsetVolta, /* setProgramOffset */ + _nv3dAssignNv3dTexturePascal, /* assignNv3dTexture */ + _nv3dSetVertexStreamEndTuring, /* setVertexStreamEnd */ +}; + +static const Nv3dHal _nv3dHalAmpere = { + _nv3dSetSpaVersionKepler, /* setSpaVersion */ + _nv3dInitChannelTuring, /* initChannel */ + _nv3dUploadDataInlineKepler, /* uploadDataInline */ + _nv3dSetProgramOffsetVolta, /* setProgramOffset */ + _nv3dAssignNv3dTexturePascal, /* assignNv3dTexture */ + _nv3dSetVertexStreamEndTuring, /* setVertexStreamEnd */ +}; + +NvBool nv3dAllocDevice( + const Nv3dAllocDeviceParams *pParams, + Nv3dDevicePtr p3dDevice) +{ + static const struct { + NvPushSupportedClass base; + const Nv3dDeviceCapsRec caps; + const Nv3dHal *hal; + enum Nv3dShaderArch shaderArch; + } table[] = { + +#define ENTRY(_classNumber, \ + _arch, \ + _amodelArch, \ + _hasSetBindlessTexture, \ + _hasProgramRegion, \ + _maxDim, \ + _hal) \ + { \ + .base.classNumber = _classNumber, \ + .base.amodelConfig = NV_AMODEL_ ## _amodelArch, \ + .caps.hasSetBindlessTexture = _hasSetBindlessTexture, \ + .caps.hasProgramRegion = _hasProgramRegion, \ + .caps.maxDim = _maxDim, \ + .hal = &_nv3dHal ## _hal, \ + .shaderArch = NV3D_SHADER_ARCH_ ## _arch,\ + } + + /* + * hal--------------------------------------------------+ + * maxDim----------------------------------------+ | + * hasProgramRegion---------------------------+ | | + * hasSetBindlessTexture-------------------+ | | | + * amodel arch----------------+ | | | | + * shader arch---+ | | | | | + * classNumber | | | | | | + * | | | | | | | + */ + ENTRY(AMPERE_B, AMPERE, AMPERE, 0, 0, 32768, Ampere), + ENTRY(AMPERE_A, AMPERE, AMPERE, 0, 0, 32768, Ampere), + ENTRY(TURING_A, TURING, TURING, 0, 0, 32768, Turing), + ENTRY(VOLTA_A, VOLTA, VOLTA, 0, 0, 32768, Volta), + ENTRY(PASCAL_B, PASCAL, PASCAL, 1, 1, 32768, Pascal), + ENTRY(PASCAL_A, PASCAL, PASCAL, 1, 1, 32768, Pascal), + ENTRY(MAXWELL_B, MAXWELL, MAXWELL, 1, 1, 16384, Maxwell), + ENTRY(MAXWELL_A, MAXWELL, MAXWELL, 1, 1, 16384, Maxwell), + }; + + int i; + + NVMISC_MEMSET(p3dDevice, 0, sizeof(*p3dDevice)); + + /* find the first supported 3D HAL */ + + i = nvPushGetSupportedClassIndex(pParams->pPushDevice, + table, + sizeof(table[0]), + ARRAY_LEN(table)); + if (i == -1) { + goto fail; + } + + if (!GetSpaVersion(pParams->pPushDevice, &p3dDevice->spaVersion)) { + goto fail; + } + + p3dDevice->pPushDevice = pParams->pPushDevice; + p3dDevice->caps = table[i].caps; + p3dDevice->classNumber = table[i].base.classNumber; + p3dDevice->hal = table[i].hal; + p3dDevice->shaderArch = table[i].shaderArch; + + if (!GetMaxThreadsAndWarps(p3dDevice)) { + goto fail; + } + + return TRUE; + +fail: + nv3dFreeDevice(p3dDevice); + return FALSE; +} + +void nv3dFreeDevice(Nv3dDevicePtr p3dDevice) +{ + /* + * So far, there is nothing to free: Nv3dDevicePtr only stores + * queried information. + */ + NVMISC_MEMSET(p3dDevice, 0, sizeof(*p3dDevice)); +} + +static NvU64 ComputeProgramLocalMemorySize( + const Nv3dChannelRec *p3dChannel) +{ + const Nv3dDeviceRec *p3dDevice = p3dChannel->p3dDevice; + + // LocalMemorySizePerSM needs to be a multiple of 512 + // Note that maxLocalBytes and/or maxStackBytes might be zero. + const NvU64 defaultSizePerWarp = + NV_ALIGN_UP(p3dChannel->programs.maxLocalBytes * + p3dDevice->maxThreadsPerWarp + + p3dChannel->programs.maxStackBytes, 512); + + // shader local memory lower bits must be a multiple of 128kB + return NV_ALIGN_UP(defaultSizePerWarp * p3dDevice->maxWarps, 128*1024); +} + +NvBool nv3dAllocChannelState( + const Nv3dAllocChannelStateParams *pParams, + Nv3dChannelPtr p3dChannel) +{ + NVMISC_MEMSET(p3dChannel, 0, sizeof(*p3dChannel)); + + p3dChannel->p3dDevice = pParams->p3dDevice; + + p3dChannel->numTextures = pParams->numTextures; + p3dChannel->numTextureBindings = pParams->numTextureBindings; + + p3dChannel->hasFrameBoundaries = pParams->hasFrameBoundaries; + + p3dChannel->programs = PickProgramsRec(pParams->p3dDevice); + + p3dChannel->programLocalMemorySize = + ComputeProgramLocalMemorySize(p3dChannel); + + _nv3dAssignSurfaceOffsets(pParams, p3dChannel); + + return TRUE; +} + +void nv3dFreeChannelState(Nv3dChannelPtr p3dChannel) +{ + int sd; + for (sd = 0; sd < NV_MAX_SUBDEVICES; sd++) { + nvAssert(p3dChannel->surface.handle[sd] == 0); + } + nvAssert(p3dChannel->pPushChannel == NULL); + + NVMISC_MEMSET(p3dChannel, 0, sizeof(*p3dChannel)); +} + diff --git a/src/common/unix/nvidia-3d/src/nvidia-3d-kepler.c b/src/common/unix/nvidia-3d/src/nvidia-3d-kepler.c new file mode 100644 index 0000000..5ac99c3 --- /dev/null +++ b/src/common/unix/nvidia-3d/src/nvidia-3d-kepler.c @@ -0,0 +1,150 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-3d-kepler.h" +#include "nvidia-3d.h" +#include "nvidia-3d-constant-buffers.h" +#include "nvidia-3d-shader-constants.h" + +#include +#include + +void _nv3dSetSpaVersionKepler(Nv3dChannelRec *p3dChannel) +{ + NvPushChannelPtr pPushChannel = p3dChannel->pPushChannel; + const NvU16 major = p3dChannel->p3dDevice->spaVersion.major; + const NvU16 minor = p3dChannel->p3dDevice->spaVersion.minor; + + // Tell AModel or fmodel what shader model version to use. This has no + // effect on real hardware. The SM version (the "hardware revision" of the + // SM block) does not always match the SPA version (the ISA version). + nvPushMethod(pPushChannel, NVA06F_SUBCHANNEL_3D, + NVA097_SET_SPA_VERSION, 1); + nvPushSetMethodData(pPushChannel, + NV3D_V(A097, SET_SPA_VERSION, MAJOR, major) | + NV3D_V(A097, SET_SPA_VERSION, MINOR, minor)); +} + +void _nv3dInitChannelKepler(Nv3dChannelRec *p3dChannel) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + + // Configure constant buffer slot NV3D_CB_SLOT_BINDLESS_TEXTURE as the + // place the texture binding table is stored. This is obsolete on Volta and + // later, so don't run it there. + if (p3dChannel->p3dDevice->caps.hasSetBindlessTexture) { + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, + NVA097_SET_BINDLESS_TEXTURE, + NV3D_V(A097, SET_BINDLESS_TEXTURE, CONSTANT_BUFFER_SLOT_SELECT, + NV3D_CB_SLOT_BINDLESS_TEXTURE)); + } + + // Disable shader exceptions. This matches OpenGL driver behavior. + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, + NV9097_SET_SHADER_EXCEPTIONS, + NV3D_C(9097, SET_SHADER_EXCEPTIONS, ENABLE, FALSE)); +} + +/*! + * Upload data using the INLINE_TO_MEMORY methods embedded in the KEPLER_A + * class. + * + * The number of dwords pushed inline is limited by nvPushMaxMethodCount(). + * Push the data in multiple chunks, if necessary. + */ +void _nv3dUploadDataInlineKepler( + Nv3dChannelRec *p3dChannel, + NvU64 gpuBaseAddress, + size_t offset, + const void *data, + size_t bytes) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + /* + * Below we use '1 + dwordsThisChunk' as the method count, so subtract one + * when computing chunkSizeDwords. + */ + const NvU32 chunkSizeDwords = nvPushMaxMethodCount(p) - 1; + const NvU32 chunkSize = chunkSizeDwords * 4; /* in bytes */ + size_t bytesSoFar; + + // Only allow uploading complete dwords. + nvAssert((bytes & 3) == 0); + + for (bytesSoFar = 0; bytesSoFar < bytes; bytesSoFar += chunkSize) { + + const NvU32 bytesThisChunk = NV_MIN(bytes - bytesSoFar, chunkSize); + const NvU32 dwordsThisChunk = bytesThisChunk / 4; + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, NVA097_LINE_LENGTH_IN, 5); + nvPushSetMethodData(p, bytesThisChunk); + nvPushSetMethodData(p, 1); // NVA097_LINE_COUNT + nvPushSetMethodDataU64(p, gpuBaseAddress + offset + bytesSoFar); + nvPushSetMethodData(p, bytesThisChunk); // NVA097_PITCH_OUT + + nvPushMethodOneIncr(p, NVA06F_SUBCHANNEL_3D, NVA097_LAUNCH_DMA, + 1 + dwordsThisChunk); + nvPushSetMethodData(p, + NV3D_C(A097, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) | + // Disable flush -- As long as only 3D requires the data uploaded, + // we don't need to incur the performance penalty of a sys-membar. + NV3D_C(A097, LAUNCH_DMA, COMPLETION_TYPE, FLUSH_DISABLE) | + NV3D_C(A097, LAUNCH_DMA, INTERRUPT_TYPE, NONE) | + NV3D_C(A097, LAUNCH_DMA, SYSMEMBAR_DISABLE, TRUE)); + nvPushInlineData(p, (const NvU8 *)data + bytesSoFar, dwordsThisChunk); + } +} + +void _nv3dBindTexturesKepler( + Nv3dChannelPtr p3dChannel, + int programIndex, + const int *textureBindingIndices) +{ + const NvU16 numTextureBindings = p3dChannel->numTextureBindings; + NvPushChannelUnion *remappedBinding = NULL; + NvU8 slot; + const NvU64 gpuAddress = + nv3dGetBindlessTextureConstantBufferGpuAddress(p3dChannel); + + nv3dSelectCbAddress(p3dChannel, gpuAddress, NV3D_CONSTANT_BUFFER_SIZE); + nv3dBindCb(p3dChannel, NV3D_HW_BIND_GROUP_FRAGMENT, + NV3D_CB_SLOT_BINDLESS_TEXTURE, TRUE); + /* + * Set up the header in the pushbuffer for the LOAD_CONSTANTS method. The + * below loop will write the data to upload directly into the pushbuffer. + */ + remappedBinding = nv3dLoadConstantsHeader(p3dChannel, 0, + numTextureBindings); + + for (slot = 0; slot < numTextureBindings; slot++) { + int tex = textureBindingIndices[slot]; + + /* + * Bindless texture packed pointers. Technically, these consist of + * a header at bits 19:0 and a sampler in 32:20, but we don't need + * to set a separate header because we enabled + * SET_SAMPLER_BINDING_VIA_HEADER_BINDING. + */ + remappedBinding[slot].u = tex * 2; + } +} diff --git a/src/common/unix/nvidia-3d/src/nvidia-3d-maxwell.c b/src/common/unix/nvidia-3d/src/nvidia-3d-maxwell.c new file mode 100644 index 0000000..295f827 --- /dev/null +++ b/src/common/unix/nvidia-3d/src/nvidia-3d-maxwell.c @@ -0,0 +1,435 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-3d-maxwell.h" +#include "nvidia-3d-kepler.h" +#include "nvidia-3d.h" + +#include "class/clb097.h" +#include "class/clb097tex.h" +#include + +void _nv3dInitChannelMaxwell(Nv3dChannelRec *p3dChannel) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + const Nv3dDeviceRec *p3dDevice = p3dChannel->p3dDevice; + + _nv3dInitChannelKepler(p3dChannel); + + if (p3dDevice->classNumber == MAXWELL_A) { + /* + * Use Maxwell texture header format. + * + * maxwell.mfs says: + * NOTE: this method is required to be sent in GM10x. It is ignored + * and treated as a NOP in GM20x. + * + * And on later chips, it is removed and causes exceptions. So we only + * send this on GM10x (class MAXWELL_A). + */ + nvPushImmed(p, NVA06F_SUBCHANNEL_3D, + NVB097_SET_SELECT_MAXWELL_TEXTURE_HEADERS, TRUE); + } +} + +void _nv3dAssignNv3dTextureMaxwell( + Nv3dRenderTexInfo info, + Nv3dTexture *tex) +{ + NvU32 hi_offset = NvU32_LO16(info.offset >> 32); + + nvAssert(!info.error); + + switch (info.sizes) { + case NV3D_TEXHEAD_A8B8G8R8: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _COMPONENTS, + _SIZES_A8B8G8R8, tex->head); + break; + case NV3D_TEXHEAD_A2B10G10R10: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _COMPONENTS, + _SIZES_A2B10G10R10, tex->head); + break; + case NV3D_TEXHEAD_B5G6R5: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _COMPONENTS, + _SIZES_B5G6R5, tex->head); + break; + case NV3D_TEXHEAD_A1B5G5R5: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _COMPONENTS, + _SIZES_A1B5G5R5, tex->head); + break; + case NV3D_TEXHEAD_R8: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _COMPONENTS, + _SIZES_R8, tex->head); + break; + case NV3D_TEXHEAD_R32: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _COMPONENTS, + _SIZES_R32, tex->head); + break; + case NV3D_TEXHEAD_R16: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _COMPONENTS, + _SIZES_R16, tex->head); + break; + case NV3D_TEXHEAD_G8R8: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _COMPONENTS, + _SIZES_G8R8, tex->head); + break; + case NV3D_TEXHEAD_R16G16B16A16: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _COMPONENTS, + _SIZES_R16_G16_B16_A16, tex->head); + break; + case NV3D_TEXHEAD_R32G32B32A32: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _COMPONENTS, + _SIZES_R32_G32_B32_A32, tex->head); + break; + case NV3D_TEXHEAD_Y8_VIDEO: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _COMPONENTS, + _SIZES_Y8_VIDEO, tex->head); + break; + default: + nvAssert(!"Unrecognized component sizes"); + } + + switch (info.dataType) { + case NV3D_TEXHEAD_NUM_UNORM: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _R_DATA_TYPE, + _NUM_UNORM, tex->head); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _G_DATA_TYPE, + _NUM_UNORM, tex->head); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _B_DATA_TYPE, + _NUM_UNORM, tex->head); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _A_DATA_TYPE, + _NUM_UNORM, tex->head); + break; + case NV3D_TEXHEAD_NUM_UINT: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _R_DATA_TYPE, + _NUM_UINT, tex->head); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _G_DATA_TYPE, + _NUM_UINT, tex->head); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _B_DATA_TYPE, + _NUM_UINT, tex->head); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _A_DATA_TYPE, + _NUM_UINT, tex->head); + break; + case NV3D_TEXHEAD_NUM_FLOAT: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _R_DATA_TYPE, + _NUM_FLOAT, tex->head); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _G_DATA_TYPE, + _NUM_FLOAT, tex->head); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _B_DATA_TYPE, + _NUM_FLOAT, tex->head); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _A_DATA_TYPE, + _NUM_FLOAT, tex->head); + break; + case NV3D_TEXHEAD_NUM_SNORM: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _R_DATA_TYPE, + _NUM_FLOAT, tex->head); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _G_DATA_TYPE, + _NUM_FLOAT, tex->head); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _B_DATA_TYPE, + _NUM_FLOAT, tex->head); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _A_DATA_TYPE, + _NUM_FLOAT, tex->head); + break; + case NV3D_TEXHEAD_NUM_SINT: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _R_DATA_TYPE, + _NUM_SINT, tex->head); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _G_DATA_TYPE, + _NUM_SINT, tex->head); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _B_DATA_TYPE, + _NUM_SINT, tex->head); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _A_DATA_TYPE, + _NUM_SINT, tex->head); + break; + } + + switch (info.source.x) { + case NV3D_TEXHEAD_IN_A: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _X_SOURCE, + _IN_A, tex->head); + break; + case NV3D_TEXHEAD_IN_R: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _X_SOURCE, + _IN_R, tex->head); + break; + case NV3D_TEXHEAD_IN_G: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _X_SOURCE, + _IN_G, tex->head); + break; + case NV3D_TEXHEAD_IN_B: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _X_SOURCE, + _IN_B, tex->head); + break; + case NV3D_TEXHEAD_IN_ZERO: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _X_SOURCE, + _IN_ZERO, tex->head); + break; + case NV3D_TEXHEAD_IN_ONE_FLOAT: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _X_SOURCE, + _IN_ONE_FLOAT, tex->head); + break; + } + + switch (info.source.y) { + case NV3D_TEXHEAD_IN_A: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _Y_SOURCE, + _IN_A, tex->head); + break; + case NV3D_TEXHEAD_IN_R: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _Y_SOURCE, + _IN_R, tex->head); + break; + case NV3D_TEXHEAD_IN_G: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _Y_SOURCE, + _IN_G, tex->head); + break; + case NV3D_TEXHEAD_IN_B: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _Y_SOURCE, + _IN_B, tex->head); + break; + case NV3D_TEXHEAD_IN_ZERO: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _Y_SOURCE, + _IN_ZERO, tex->head); + break; + case NV3D_TEXHEAD_IN_ONE_FLOAT: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _Y_SOURCE, + _IN_ONE_FLOAT, tex->head); + break; + } + + switch (info.source.z) { + case NV3D_TEXHEAD_IN_A: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _Z_SOURCE, + _IN_A, tex->head); + break; + case NV3D_TEXHEAD_IN_R: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _Z_SOURCE, + _IN_R, tex->head); + break; + case NV3D_TEXHEAD_IN_G: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _Z_SOURCE, + _IN_G, tex->head); + break; + case NV3D_TEXHEAD_IN_B: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _Z_SOURCE, + _IN_B, tex->head); + break; + case NV3D_TEXHEAD_IN_ZERO: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _Z_SOURCE, + _IN_ZERO, tex->head); + break; + case NV3D_TEXHEAD_IN_ONE_FLOAT: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _Z_SOURCE, + _IN_ONE_FLOAT, tex->head); + break; + } + + switch (info.source.w) { + case NV3D_TEXHEAD_IN_A: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _W_SOURCE, + _IN_A, tex->head); + break; + case NV3D_TEXHEAD_IN_R: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _W_SOURCE, + _IN_R, tex->head); + break; + case NV3D_TEXHEAD_IN_G: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _W_SOURCE, + _IN_G, tex->head); + break; + case NV3D_TEXHEAD_IN_B: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _W_SOURCE, + _IN_B, tex->head); + break; + case NV3D_TEXHEAD_IN_ZERO: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _W_SOURCE, + _IN_ZERO, tex->head); + break; + case NV3D_TEXHEAD_IN_ONE_FLOAT: + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _W_SOURCE, + _IN_ONE_FLOAT, tex->head); + break; + } + + // Default to edge clamping. Our GPU seems to support wrapping + // even with non-normalized coordinates. + tex->samp[0] = + NV3D_C(B097, TEXSAMP0, ADDRESS_U, CLAMP_TO_EDGE) | + NV3D_C(B097, TEXSAMP0, ADDRESS_V, CLAMP_TO_EDGE) | + NV3D_C(B097, TEXSAMP0, ADDRESS_P, CLAMP_TO_EDGE); + + if (info.texType == NV3D_TEX_TYPE_ONE_D_BUFFER) { + FLD_SET_DRF_NUM_MW(B097, _TEXHEAD_1D, _ADDRESS_BITS31TO0, + NvU64_LO32(info.offset), tex->head); + + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_1D, _HEADER_VERSION, + _SELECT_ONE_D_BUFFER, tex->head); + FLD_SET_DRF_NUM_MW(B097_, TEXHEAD_1D_, ADDRESS_BITS47TO32, + hi_offset, tex->head); + + FLD_SET_DRF_NUM_MW(B097, _TEXHEAD_1D, _WIDTH_MINUS_ONE_BITS31TO16, + NvU32_HI16(info.width - 1), tex->head); + + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_1D, _TEXTURE_TYPE, + _ONE_D_BUFFER, tex->head); + FLD_SET_DRF_NUM_MW(B097, _TEXHEAD_1D, _WIDTH_MINUS_ONE_BITS15TO0, + NvU32_LO16(info.width - 1), tex->head); + } else if (info.texType == NV3D_TEX_TYPE_TWO_D_PITCH) { + FLD_SET_DRF_NUM_MW(B097, _TEXHEAD_PITCH, _ADDRESS_BITS31TO5, + (NvU32)((info.offset >> 5) & 0x7ffffff), tex->head); + + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_PITCH, _HEADER_VERSION, + _SELECT_PITCH, tex->head); + FLD_SET_DRF_NUM_MW(B097, _TEXHEAD_PITCH, _ADDRESS_BITS47TO32, + hi_offset, tex->head); + + FLD_SET_DRF_NUM_MW(B097, _TEXHEAD_PITCH, _PITCH_BITS20TO5, + NvU32_LO16(info.pitch >> 5), tex->head); + + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_PITCH, _TEXTURE_TYPE, + _TWO_D_NO_MIPMAP, tex->head); + FLD_SET_DRF_NUM_MW(B097, _TEXHEAD_PITCH, _WIDTH_MINUS_ONE, + info.width - 1, tex->head); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_PITCH, _BORDER_SIZE, + _BORDER_SAMPLER_COLOR, tex->head); + + FLD_SET_DRF_NUM_MW(B097, _TEXHEAD_PITCH, _HEIGHT_MINUS_ONE, + info.height - 1, tex->head); + FLD_SET_DRF_NUM_MW(B097, _TEXHEAD_PITCH, _DEPTH_MINUS_ONE, + 0, tex->head); + FLD_SET_DRF_NUM_MW(B097, _TEXHEAD_PITCH, _NORMALIZED_COORDS, + info.normalizedCoords, tex->head); + } else { + if (info.texType == NV3D_TEX_TYPE_ONE_D) { + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _TEXTURE_TYPE, + _ONE_D, tex->head); + } else if (info.texType == NV3D_TEX_TYPE_TWO_D_BLOCKLINEAR) { + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _TEXTURE_TYPE, + _TWO_D_NO_MIPMAP, tex->head); + } + FLD_SET_DRF_NUM_MW(B097, _TEXHEAD_BL, _ADDRESS_BITS31TO9, + (info.offset >> 9) & 0x7fffff, tex->head); + + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _HEADER_VERSION, + _SELECT_BLOCKLINEAR, tex->head); + FLD_SET_DRF_NUM_MW(B097, _TEXHEAD_BL, _ADDRESS_BITS47TO32, + hi_offset, tex->head); + + FLD_SET_DRF_NUM_MW(B097, _TEXHEAD_BL, _GOBS_PER_BLOCK_WIDTH, + info.log2GobsPerBlock.x, tex->head); + FLD_SET_DRF_NUM_MW(B097, _TEXHEAD_BL, _GOBS_PER_BLOCK_HEIGHT, + info.log2GobsPerBlock.y, tex->head); + FLD_SET_DRF_NUM_MW(B097, _TEXHEAD_BL, _GOBS_PER_BLOCK_DEPTH, + info.log2GobsPerBlock.z, tex->head); + FLD_SET_DRF_NUM_MW(B097, _TEXHEAD_BL, _WIDTH_MINUS_ONE, + info.width - 1, tex->head); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _BORDER_SIZE, + _BORDER_SAMPLER_COLOR, tex->head); + + FLD_SET_DRF_NUM_MW(B097, _TEXHEAD_BL, _HEIGHT_MINUS_ONE, + info.height - 1, tex->head); + FLD_SET_DRF_NUM_MW(B097, _TEXHEAD_BL, _DEPTH_MINUS_ONE, + 0, tex->head); + FLD_SET_DRF_NUM_MW(B097, _TEXHEAD_BL, _NORMALIZED_COORDS, + info.normalizedCoords, tex->head); + } + + switch (info.repeatType) { + case NV3D_TEXHEAD_REPEAT_TYPE_NORMAL: + tex->samp[0] = NV3D_C(B097, TEXSAMP0, ADDRESS_U, WRAP) | + NV3D_C(B097, TEXSAMP0, ADDRESS_V, WRAP); + break; + case NV3D_TEXHEAD_REPEAT_TYPE_PAD: + tex->samp[0] = NV3D_C(B097, TEXSAMP0, ADDRESS_U, CLAMP_TO_EDGE) | + NV3D_C(B097, TEXSAMP0, ADDRESS_V, CLAMP_TO_EDGE); + break; + case NV3D_TEXHEAD_REPEAT_TYPE_REFLECT: + tex->samp[0] = NV3D_C(B097, TEXSAMP0, ADDRESS_U, MIRROR) | + NV3D_C(B097, TEXSAMP0, ADDRESS_V, MIRROR); + break; + case NV3D_TEXHEAD_REPEAT_TYPE_NONE: + tex->samp[0] = NV3D_C(B097, TEXSAMP0, ADDRESS_U, BORDER) | + NV3D_C(B097, TEXSAMP0, ADDRESS_V, BORDER); + break; + } + + switch (info.filtering) { + case NV3D_TEXHEAD_FILTER_TYPE_NEAREST: + tex->samp[1] = NV3D_C(B097, TEXSAMP1, MAG_FILTER, MAG_POINT) | + NV3D_C(B097, TEXSAMP1, MIN_FILTER, MIN_POINT) | + NV3D_C(B097, TEXSAMP1, MIP_FILTER, MIP_NONE); + break; + + case NV3D_TEXHEAD_FILTER_TYPE_LINEAR: + tex->samp[1] = NV3D_C(B097, TEXSAMP1, MAG_FILTER, MAG_LINEAR) | + NV3D_C(B097, TEXSAMP1, MIN_FILTER, MIN_LINEAR) | + NV3D_C(B097, TEXSAMP1, MIP_FILTER, MIP_NONE); + break; + + case NV3D_TEXHEAD_FILTER_TYPE_ANISO_2X: + tex->samp[0] |= NV3D_C(B097, TEXSAMP0, MAX_ANISOTROPY, ANISO_2_TO_1); + tex->samp[1] = NV3D_C(B097, TEXSAMP1, MAG_FILTER, MAG_LINEAR) | + NV3D_C(B097, TEXSAMP1, MIN_FILTER, MIN_ANISO) | + NV3D_C(B097, TEXSAMP1, MIP_FILTER, MIP_NONE); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _MAX_ANISOTROPY, + _ANISO_2_TO_1, tex->head); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _ANISO_FINE_SPREAD_MODIFIER, + _SPREAD_MODIFIER_CONST_TWO, tex->head); + + break; + + case NV3D_TEXHEAD_FILTER_TYPE_ANISO_4X: + tex->samp[0] |= NV3D_C(B097, TEXSAMP0, MAX_ANISOTROPY, ANISO_4_TO_1); + tex->samp[1] = NV3D_C(B097, TEXSAMP1, MAG_FILTER, MAG_LINEAR) | + NV3D_C(B097, TEXSAMP1, MIN_FILTER, MIN_ANISO) | + NV3D_C(B097, TEXSAMP1, MIP_FILTER, MIP_NONE); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _MAX_ANISOTROPY, + _ANISO_4_TO_1, tex->head); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _ANISO_FINE_SPREAD_MODIFIER, + _SPREAD_MODIFIER_CONST_TWO, tex->head); + break; + + case NV3D_TEXHEAD_FILTER_TYPE_ANISO_8X: + tex->samp[0] |= NV3D_C(B097, TEXSAMP0, MAX_ANISOTROPY, ANISO_8_TO_1); + tex->samp[1] = NV3D_C(B097, TEXSAMP1, MAG_FILTER, MAG_LINEAR) | + NV3D_C(B097, TEXSAMP1, MIN_FILTER, MIN_ANISO) | + NV3D_C(B097, TEXSAMP1, MIP_FILTER, MIP_NONE); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _MAX_ANISOTROPY, + _ANISO_8_TO_1, tex->head); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _ANISO_FINE_SPREAD_MODIFIER, + _SPREAD_MODIFIER_CONST_TWO, tex->head); + + break; + + case NV3D_TEXHEAD_FILTER_TYPE_ANISO_16X: + tex->samp[0] |= NV3D_C(B097, TEXSAMP0, MAX_ANISOTROPY, ANISO_16_TO_1); + tex->samp[1] = NV3D_C(B097, TEXSAMP1, MAG_FILTER, MAG_LINEAR) | + NV3D_C(B097, TEXSAMP1, MIN_FILTER, MIN_ANISO) | + NV3D_C(B097, TEXSAMP1, MIP_FILTER, MIP_NONE); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _MAX_ANISOTROPY, + _ANISO_16_TO_1, tex->head); + FLD_SET_DRF_DEF_MW(B097, _TEXHEAD_BL, _ANISO_FINE_SPREAD_MODIFIER, + _SPREAD_MODIFIER_CONST_TWO, tex->head); + break; + + } +} diff --git a/src/common/unix/nvidia-3d/src/nvidia-3d-pascal.c b/src/common/unix/nvidia-3d/src/nvidia-3d-pascal.c new file mode 100644 index 0000000..fe7ae44 --- /dev/null +++ b/src/common/unix/nvidia-3d/src/nvidia-3d-pascal.c @@ -0,0 +1,431 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-3d-pascal.h" +#include "nvidia-3d-maxwell.h" +#include "nvidia-3d.h" + +#include "class/clc197.h" /* NVC197_SET_GO_IDLE_TIMEOUT */ +#include "class/clc097tex.h" +#include + +void _nv3dInitChannelPascal(Nv3dChannelRec *p3dChannel) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + + _nv3dInitChannelMaxwell(p3dChannel); + + if (!p3dChannel->hasFrameBoundaries) { + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, NVC197_SET_GO_IDLE_TIMEOUT, 1); + nvPushSetMethodData(p, 0x800); + } +} + +void _nv3dAssignNv3dTexturePascal( + Nv3dRenderTexInfo info, + Nv3dTexture *tex) +{ + nvAssert(!info.error); + + switch (info.sizes) { + case NV3D_TEXHEAD_A8B8G8R8: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _COMPONENTS, + _SIZES_A8B8G8R8, tex->head); + break; + case NV3D_TEXHEAD_A2B10G10R10: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _COMPONENTS, + _SIZES_A2B10G10R10, tex->head); + break; + case NV3D_TEXHEAD_B5G6R5: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _COMPONENTS, + _SIZES_B5G6R5, tex->head); + break; + case NV3D_TEXHEAD_A1B5G5R5: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _COMPONENTS, + _SIZES_A1B5G5R5, tex->head); + break; + case NV3D_TEXHEAD_R8: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _COMPONENTS, + _SIZES_R8, tex->head); + break; + case NV3D_TEXHEAD_R32: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _COMPONENTS, + _SIZES_R32, tex->head); + break; + case NV3D_TEXHEAD_R16: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _COMPONENTS, + _SIZES_R16, tex->head); + break; + case NV3D_TEXHEAD_G8R8: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _COMPONENTS, + _SIZES_G8R8, tex->head); + break; + case NV3D_TEXHEAD_R16G16B16A16: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _COMPONENTS, + _SIZES_R16_G16_B16_A16, tex->head); + break; + case NV3D_TEXHEAD_R32G32B32A32: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _COMPONENTS, + _SIZES_R32_G32_B32_A32, tex->head); + break; + case NV3D_TEXHEAD_Y8_VIDEO: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _COMPONENTS, + _SIZES_Y8_VIDEO, tex->head); + break; + default: + nvAssert(!"Unrecognized component sizes"); + } + + switch (info.dataType) { + case NV3D_TEXHEAD_NUM_UNORM: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _R_DATA_TYPE, + _NUM_UNORM, tex->head); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _G_DATA_TYPE, + _NUM_UNORM, tex->head); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _B_DATA_TYPE, + _NUM_UNORM, tex->head); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _A_DATA_TYPE, + _NUM_UNORM, tex->head); + break; + case NV3D_TEXHEAD_NUM_UINT: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _R_DATA_TYPE, + _NUM_UINT, tex->head); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _G_DATA_TYPE, + _NUM_UINT, tex->head); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _B_DATA_TYPE, + _NUM_UINT, tex->head); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _A_DATA_TYPE, + _NUM_UINT, tex->head); + break; + case NV3D_TEXHEAD_NUM_FLOAT: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _R_DATA_TYPE, + _NUM_FLOAT, tex->head); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _G_DATA_TYPE, + _NUM_FLOAT, tex->head); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _B_DATA_TYPE, + _NUM_FLOAT, tex->head); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _A_DATA_TYPE, + _NUM_FLOAT, tex->head); + break; + case NV3D_TEXHEAD_NUM_SNORM: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _R_DATA_TYPE, + _NUM_FLOAT, tex->head); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _G_DATA_TYPE, + _NUM_FLOAT, tex->head); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _B_DATA_TYPE, + _NUM_FLOAT, tex->head); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _A_DATA_TYPE, + _NUM_FLOAT, tex->head); + break; + case NV3D_TEXHEAD_NUM_SINT: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _R_DATA_TYPE, + _NUM_SINT, tex->head); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _G_DATA_TYPE, + _NUM_SINT, tex->head); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _B_DATA_TYPE, + _NUM_SINT, tex->head); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _A_DATA_TYPE, + _NUM_SINT, tex->head); + break; + } + + switch (info.source.x) { + case NV3D_TEXHEAD_IN_A: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _X_SOURCE, + _IN_A, tex->head); + break; + case NV3D_TEXHEAD_IN_R: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _X_SOURCE, + _IN_R, tex->head); + break; + case NV3D_TEXHEAD_IN_G: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _X_SOURCE, + _IN_G, tex->head); + break; + case NV3D_TEXHEAD_IN_B: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _X_SOURCE, + _IN_B, tex->head); + break; + case NV3D_TEXHEAD_IN_ZERO: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _X_SOURCE, + _IN_ZERO, tex->head); + break; + case NV3D_TEXHEAD_IN_ONE_FLOAT: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _X_SOURCE, + _IN_ONE_FLOAT, tex->head); + break; + } + + switch (info.source.y) { + case NV3D_TEXHEAD_IN_A: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _Y_SOURCE, + _IN_A, tex->head); + break; + case NV3D_TEXHEAD_IN_R: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _Y_SOURCE, + _IN_R, tex->head); + break; + case NV3D_TEXHEAD_IN_G: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _Y_SOURCE, + _IN_G, tex->head); + break; + case NV3D_TEXHEAD_IN_B: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _Y_SOURCE, + _IN_B, tex->head); + break; + case NV3D_TEXHEAD_IN_ZERO: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _Y_SOURCE, + _IN_ZERO, tex->head); + break; + case NV3D_TEXHEAD_IN_ONE_FLOAT: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _Y_SOURCE, + _IN_ONE_FLOAT, tex->head); + break; + } + + switch (info.source.z) { + case NV3D_TEXHEAD_IN_A: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _Z_SOURCE, + _IN_A, tex->head); + break; + case NV3D_TEXHEAD_IN_R: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _Z_SOURCE, + _IN_R, tex->head); + break; + case NV3D_TEXHEAD_IN_G: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _Z_SOURCE, + _IN_G, tex->head); + break; + case NV3D_TEXHEAD_IN_B: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _Z_SOURCE, + _IN_B, tex->head); + break; + case NV3D_TEXHEAD_IN_ZERO: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _Z_SOURCE, + _IN_ZERO, tex->head); + break; + case NV3D_TEXHEAD_IN_ONE_FLOAT: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _Z_SOURCE, + _IN_ONE_FLOAT, tex->head); + break; + } + + switch (info.source.w) { + case NV3D_TEXHEAD_IN_A: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _W_SOURCE, + _IN_A, tex->head); + break; + case NV3D_TEXHEAD_IN_R: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _W_SOURCE, + _IN_R, tex->head); + break; + case NV3D_TEXHEAD_IN_G: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _W_SOURCE, + _IN_G, tex->head); + break; + case NV3D_TEXHEAD_IN_B: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _W_SOURCE, + _IN_B, tex->head); + break; + case NV3D_TEXHEAD_IN_ZERO: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _W_SOURCE, + _IN_ZERO, tex->head); + break; + case NV3D_TEXHEAD_IN_ONE_FLOAT: + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _W_SOURCE, + _IN_ONE_FLOAT, tex->head); + break; + } + + // Default to edge clamping. Our GPU seems to support wrapping + // even with non-normalized coordinates. + tex->samp[0] = + NV3D_C(C097, TEXSAMP0, ADDRESS_U, CLAMP_TO_EDGE) | + NV3D_C(C097, TEXSAMP0, ADDRESS_V, CLAMP_TO_EDGE) | + NV3D_C(C097, TEXSAMP0, ADDRESS_P, CLAMP_TO_EDGE); + + if (info.texType == NV3D_TEX_TYPE_ONE_D_BUFFER) { + FLD_SET_DRF_NUM_MW(C097, _TEXHEAD_1D, _ADDRESS_BITS31TO0, + NvU64_LO32(info.offset), tex->head); + + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_1D, _HEADER_VERSION, + _SELECT_ONE_D_BUFFER, tex->head); + FLD_SET_DRF_NUM_MW(C097_, TEXHEAD_1D_, ADDRESS_BITS48TO32, + NvU64_HI32(info.offset), tex->head); + + FLD_SET_DRF_NUM_MW(C097, _TEXHEAD_1D, _WIDTH_MINUS_ONE_BITS31TO16, + NvU32_HI16(info.width - 1), tex->head); + + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_1D, _TEXTURE_TYPE, + _ONE_D_BUFFER, tex->head); + FLD_SET_DRF_NUM_MW(C097, _TEXHEAD_1D, _WIDTH_MINUS_ONE_BITS15TO0, + NvU32_LO16(info.width - 1), tex->head); + } else if (info.texType == NV3D_TEX_TYPE_TWO_D_PITCH) { + FLD_SET_DRF_NUM_MW(C097, _TEXHEAD_PITCH, _ADDRESS_BITS31TO5, + info.offset >> 5, tex->head); + + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_PITCH, _HEADER_VERSION, + _SELECT_PITCH, tex->head); + FLD_SET_DRF_NUM_MW(C097, _TEXHEAD_PITCH, _ADDRESS_BITS48TO32, + NvU64_HI32(info.offset), tex->head); + + FLD_SET_DRF_NUM_MW(C097, _TEXHEAD_PITCH, _PITCH_BITS20TO5, + NvU32_LO16(info.pitch >> 5), tex->head); + FLD_SET_DRF_NUM_MW(C097, _TEXHEAD_PITCH, _PITCH_BIT21, + info.pitch >> 21, tex->head); + + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_PITCH, _TEXTURE_TYPE, + _TWO_D_NO_MIPMAP, tex->head); + FLD_SET_DRF_NUM_MW(C097, _TEXHEAD_PITCH, _WIDTH_MINUS_ONE, + info.width - 1, tex->head); + FLD_SET_DRF_NUM_MW(C097, _TEXHEAD_PITCH, _HEIGHT_MINUS_ONE_BIT16, + (info.height - 1) >> 16, tex->head); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_PITCH, _BORDER_SIZE, + _BORDER_SAMPLER_COLOR, tex->head); + + FLD_SET_DRF_NUM_MW(C097, _TEXHEAD_PITCH, _HEIGHT_MINUS_ONE, + info.height - 1, tex->head); + FLD_SET_DRF_NUM_MW(C097, _TEXHEAD_PITCH, _DEPTH_MINUS_ONE, + 0, tex->head); + FLD_SET_DRF_NUM_MW(C097, _TEXHEAD_PITCH, _NORMALIZED_COORDS, + info.normalizedCoords, tex->head); + } else { + if (info.texType == NV3D_TEX_TYPE_ONE_D) { + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _TEXTURE_TYPE, + _ONE_D, tex->head); + } else if (info.texType == NV3D_TEX_TYPE_TWO_D_BLOCKLINEAR) { + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _TEXTURE_TYPE, + _TWO_D_NO_MIPMAP, tex->head); + } + FLD_SET_DRF_NUM_MW(C097, _TEXHEAD_BL, _ADDRESS_BITS31TO9, + info.offset >> 9, tex->head); + + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _HEADER_VERSION, + _SELECT_BLOCKLINEAR, tex->head); + FLD_SET_DRF_NUM_MW(C097, _TEXHEAD_BL, _ADDRESS_BITS48TO32, + NvU64_HI32(info.offset), tex->head); + + FLD_SET_DRF_NUM_MW(C097, _TEXHEAD_BL, _GOBS_PER_BLOCK_WIDTH, + info.log2GobsPerBlock.x, tex->head); + FLD_SET_DRF_NUM_MW(C097, _TEXHEAD_BL, _GOBS_PER_BLOCK_HEIGHT, + info.log2GobsPerBlock.y, tex->head); + FLD_SET_DRF_NUM_MW(C097, _TEXHEAD_BL, _GOBS_PER_BLOCK_DEPTH, + info.log2GobsPerBlock.z, tex->head); + + FLD_SET_DRF_NUM_MW(C097, _TEXHEAD_BL, _WIDTH_MINUS_ONE, + info.width - 1, tex->head); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _BORDER_SIZE, + _BORDER_SAMPLER_COLOR, tex->head); + + FLD_SET_DRF_NUM_MW(C097, _TEXHEAD_BL, _HEIGHT_MINUS_ONE, + info.height - 1, tex->head); + FLD_SET_DRF_NUM_MW(C097, _TEXHEAD_BL, _HEIGHT_MINUS_ONE_BIT16, + (info.height - 1) >> 16, tex->head); + FLD_SET_DRF_NUM_MW(C097, _TEXHEAD_BL, _DEPTH_MINUS_ONE, + 0, tex->head); + FLD_SET_DRF_NUM_MW(C097, _TEXHEAD_BL, _DEPTH_MINUS_ONE_BIT14, + 0, tex->head); + FLD_SET_DRF_NUM_MW(C097, _TEXHEAD_BL, _NORMALIZED_COORDS, + info.normalizedCoords, tex->head); + } + + switch (info.repeatType) { + case NV3D_TEXHEAD_REPEAT_TYPE_NORMAL: + tex->samp[0] = NV3D_C(C097, TEXSAMP0, ADDRESS_U, WRAP) | + NV3D_C(C097, TEXSAMP0, ADDRESS_V, WRAP); + break; + case NV3D_TEXHEAD_REPEAT_TYPE_PAD: + tex->samp[0] = NV3D_C(C097, TEXSAMP0, ADDRESS_U, CLAMP_TO_EDGE) | + NV3D_C(C097, TEXSAMP0, ADDRESS_V, CLAMP_TO_EDGE); + break; + case NV3D_TEXHEAD_REPEAT_TYPE_REFLECT: + tex->samp[0] = NV3D_C(C097, TEXSAMP0, ADDRESS_U, MIRROR) | + NV3D_C(C097, TEXSAMP0, ADDRESS_V, MIRROR); + break; + case NV3D_TEXHEAD_REPEAT_TYPE_NONE: + tex->samp[0] = NV3D_C(C097, TEXSAMP0, ADDRESS_U, BORDER) | + NV3D_C(C097, TEXSAMP0, ADDRESS_V, BORDER); + break; + } + + switch (info.filtering) { + case NV3D_TEXHEAD_FILTER_TYPE_NEAREST: + tex->samp[1] = NV3D_C(C097, TEXSAMP1, MAG_FILTER, MAG_POINT) | + NV3D_C(C097, TEXSAMP1, MIN_FILTER, MIN_POINT) | + NV3D_C(C097, TEXSAMP1, MIP_FILTER, MIP_NONE); + break; + + case NV3D_TEXHEAD_FILTER_TYPE_LINEAR: + tex->samp[1] = NV3D_C(C097, TEXSAMP1, MAG_FILTER, MAG_LINEAR) | + NV3D_C(C097, TEXSAMP1, MIN_FILTER, MIN_LINEAR) | + NV3D_C(C097, TEXSAMP1, MIP_FILTER, MIP_NONE); + break; + + case NV3D_TEXHEAD_FILTER_TYPE_ANISO_2X: + tex->samp[0] |= NV3D_C(C097, TEXSAMP0, MAX_ANISOTROPY, ANISO_2_TO_1); + tex->samp[1] = NV3D_C(C097, TEXSAMP1, MAG_FILTER, MAG_LINEAR) | + NV3D_C(C097, TEXSAMP1, MIN_FILTER, MIN_ANISO) | + NV3D_C(C097, TEXSAMP1, MIP_FILTER, MIP_NONE); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _MAX_ANISOTROPY, + _ANISO_2_TO_1, tex->head); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _ANISO_FINE_SPREAD_MODIFIER, + _SPREAD_MODIFIER_CONST_TWO, tex->head); + + break; + + case NV3D_TEXHEAD_FILTER_TYPE_ANISO_4X: + tex->samp[0] |= NV3D_C(C097, TEXSAMP0, MAX_ANISOTROPY, ANISO_4_TO_1); + tex->samp[1] = NV3D_C(C097, TEXSAMP1, MAG_FILTER, MAG_LINEAR) | + NV3D_C(C097, TEXSAMP1, MIN_FILTER, MIN_ANISO) | + NV3D_C(C097, TEXSAMP1, MIP_FILTER, MIP_NONE); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _MAX_ANISOTROPY, + _ANISO_4_TO_1, tex->head); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _ANISO_FINE_SPREAD_MODIFIER, + _SPREAD_MODIFIER_CONST_TWO, tex->head); + break; + + case NV3D_TEXHEAD_FILTER_TYPE_ANISO_8X: + tex->samp[0] |= NV3D_C(C097, TEXSAMP0, MAX_ANISOTROPY, ANISO_8_TO_1); + tex->samp[1] = NV3D_C(C097, TEXSAMP1, MAG_FILTER, MAG_LINEAR) | + NV3D_C(C097, TEXSAMP1, MIN_FILTER, MIN_ANISO) | + NV3D_C(C097, TEXSAMP1, MIP_FILTER, MIP_NONE); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _MAX_ANISOTROPY, + _ANISO_8_TO_1, tex->head); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _ANISO_FINE_SPREAD_MODIFIER, + _SPREAD_MODIFIER_CONST_TWO, tex->head); + + break; + + case NV3D_TEXHEAD_FILTER_TYPE_ANISO_16X: + tex->samp[0] |= NV3D_C(C097, TEXSAMP0, MAX_ANISOTROPY, ANISO_16_TO_1); + tex->samp[1] = NV3D_C(C097, TEXSAMP1, MAG_FILTER, MAG_LINEAR) | + NV3D_C(C097, TEXSAMP1, MIN_FILTER, MIN_ANISO) | + NV3D_C(C097, TEXSAMP1, MIP_FILTER, MIP_NONE); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _MAX_ANISOTROPY, + _ANISO_16_TO_1, tex->head); + FLD_SET_DRF_DEF_MW(C097, _TEXHEAD_BL, _ANISO_FINE_SPREAD_MODIFIER, + _SPREAD_MODIFIER_CONST_TWO, tex->head); + break; + + } +} diff --git a/src/common/unix/nvidia-3d/src/nvidia-3d-surface.c b/src/common/unix/nvidia-3d/src/nvidia-3d-surface.c new file mode 100644 index 0000000..421b09d --- /dev/null +++ b/src/common/unix/nvidia-3d/src/nvidia-3d-surface.c @@ -0,0 +1,296 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-3d.h" +#include "nvidia-3d-surface.h" +#include "nvidia-push-utils.h" /* nvPushIsAmodel() */ + +#include + +static void FreeSurface( + Nv3dChannelRec *p3dChannel) +{ + NvPushDevicePtr pPushDevice = p3dChannel->p3dDevice->pPushDevice; + int sd; + + for (sd = ARRAY_LEN(pPushDevice->subDevice) - 1; + sd >= 0; + sd--) { + if (p3dChannel->surface.handle[sd]) { + NvU32 ret = pPushDevice->pImports->rmApiFree( + pPushDevice, + pPushDevice->subDevice[sd].deviceHandle, + p3dChannel->surface.handle[sd]); + nvAssert(ret == NVOS_STATUS_SUCCESS); + (void)ret; + p3dChannel->surface.handle[sd] = 0; + } + } +} + +static NvBool AllocSurface( + Nv3dChannelRec *p3dChannel, + NvU64 size) +{ + NvPushDevicePtr pPushDevice = p3dChannel->p3dDevice->pPushDevice; + const NvPushImports *pImports = pPushDevice->pImports; + int sd; + + for (sd = 0; + sd < ARRAY_LEN(pPushDevice->subDevice) && + pPushDevice->subDevice[sd].deviceHandle != 0; + sd++) { + + NVOS32_PARAMETERS params = { + .hRoot = pPushDevice->clientHandle, + .hObjectParent = pPushDevice->subDevice[sd].deviceHandle, + .function = NVOS32_FUNCTION_ALLOC_SIZE, + .data.AllocSize.owner = pPushDevice->clientHandle, + .data.AllocSize.type = NVOS32_TYPE_SHADER_PROGRAM, + .data.AllocSize.size = size, + .data.AllocSize.attr = + (pPushDevice->hasFb ? + DRF_DEF(OS32, _ATTR, _LOCATION, _VIDMEM) : + DRF_DEF(OS32, _ATTR, _LOCATION, _PCI)) | + DRF_DEF(OS32, _ATTR, _PHYSICALITY, _ALLOW_NONCONTIGUOUS) | + DRF_DEF(OS32, _ATTR, _COHERENCY, _WRITE_COMBINE), + .data.AllocSize.attr2 = + DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _YES), + .data.AllocSize.flags = 0, + .data.AllocSize.alignment = 4096, + }; + + NvU32 ret = pImports->rmApiVidHeapControl(pPushDevice, ¶ms); + + if (ret != NVOS_STATUS_SUCCESS) { + FreeSurface(p3dChannel); + return FALSE; + } + + p3dChannel->surface.handle[sd] = params.data.AllocSize.hMemory; + } + + return TRUE; +} + +static void UnmapSurface( + const Nv3dChannelRec *p3dChannel, + NvU64 gpuAddress) +{ + NvPushDevicePtr pPushDevice = p3dChannel->p3dDevice->pPushDevice; + const NvPushImports *pImports = pPushDevice->pImports; + int sd; + + for (sd = ARRAY_LEN(p3dChannel->surface.handle) - 1; sd >= 0; sd--) { + if (p3dChannel->surface.handle[sd]) { + NvU32 ret = pImports->rmApiUnmapMemoryDma( + pPushDevice, + pPushDevice->subDevice[sd].deviceHandle, + pPushDevice->subDevice[sd].gpuVASpaceCtxDma, + p3dChannel->surface.handle[sd], + 0, + gpuAddress); + nvAssert(ret == NVOS_STATUS_SUCCESS); + (void)ret; + } + } +} + +static NvU64 MapSurface( + const Nv3dChannelRec *p3dChannel, + NvU64 size) +{ + NvPushDevicePtr pPushDevice = p3dChannel->p3dDevice->pPushDevice; + const NvPushImports *pImports = pPushDevice->pImports; + NvU64 gpuAddress = 0; + int sd; + + for (sd = 0; + sd < ARRAY_LEN(p3dChannel->surface.handle) && + p3dChannel->surface.handle[sd] != 0; + sd++) { + NvU32 flags = DRF_DEF(OS46, _FLAGS, _CACHE_SNOOP, _ENABLE); + NvU64 thisGpuAddress; + + if (sd == 0) { + /* For the first device, RM assigns a virtual address. */ + thisGpuAddress = 0; + } else { + /* For subsequent devices, use the same virtual address. */ + flags = FLD_SET_DRF(OS46, _FLAGS, _DMA_OFFSET_FIXED, _TRUE, flags); + nvAssert(gpuAddress != 0); + thisGpuAddress = gpuAddress; + } + + NvU32 ret = pImports->rmApiMapMemoryDma(pPushDevice, + pPushDevice->subDevice[sd].deviceHandle, + pPushDevice->subDevice[sd].gpuVASpaceCtxDma, + p3dChannel->surface.handle[sd], + 0, + size, + flags, + &thisGpuAddress); + if (ret != NVOS_STATUS_SUCCESS) { + if (sd != 0) { + /* Clean up earlier successful mappings */ + UnmapSurface(p3dChannel, gpuAddress); + } + return 0; + } + + if (sd == 0) { + gpuAddress = thisGpuAddress; + } else { + nvAssert(gpuAddress == thisGpuAddress); + } + } + + return gpuAddress; +} + +NvBool nv3dAllocChannelSurface(Nv3dChannelPtr p3dChannel) +{ + const NvU64 size = p3dChannel->surface.totalSize; + NvU64 gpuAddress; + + if (!AllocSurface(p3dChannel, size)) { + return FALSE; + } + + gpuAddress = MapSurface(p3dChannel, size); + + if (gpuAddress == 0) { + FreeSurface(p3dChannel); + return FALSE; + } + + p3dChannel->surface.gpuAddress = gpuAddress; + + return TRUE; +} + +void nv3dFreeChannelSurface(Nv3dChannelPtr p3dChannel) +{ + if (p3dChannel->p3dDevice == NULL) { + return; + } + + if (p3dChannel->surface.gpuAddress != 0) { + /* + * If the surface is mapped into our channel, we need to ensure + * that any methods in the channel that might reference the + * gpuAddress have idled before we unmap the address. + */ + nvPushIdleChannel(p3dChannel->pPushChannel); + + UnmapSurface(p3dChannel, + p3dChannel->surface.gpuAddress); + p3dChannel->surface.gpuAddress = 0; + } + + FreeSurface(p3dChannel); +} + +/* + * The Nv3dChannelRec's surface contains: + * + * programLocalMemory + * programCode + * programConstants + * Nv3dTexture[numTextures] + * bindlessTextureConstantBuffer (optionally) + * Nv3dConstantBuffer[numConstantBuffers] + * vertexStreams + * + * Where all items are aligned to NV3D_TEXTURE_PITCH_ALIGNMENT. + * + * Compute all the offsets into the surface, and the total surface + * size. + * + * XXX TODO: use correct alignment for all items, rather than + * NV3D_TEXTURE_PITCH_ALIGNMENT. + */ +void _nv3dAssignSurfaceOffsets( + const Nv3dAllocChannelStateParams *pParams, + Nv3dChannelPtr p3dChannel) +{ + const NvU32 programPrefetchPadding = 2048; + + NvU64 offset = 0; + enum Nv3dVertexAttributeStreamType stream; + + /* + * Program local memory requires at least 4k alignment. So, place + * it at the start of the surface. + */ + p3dChannel->surface.programLocalMemoryOffset = offset; + + offset += p3dChannel->programLocalMemorySize; + offset = NV_ALIGN_UP(offset, NV3D_TEXTURE_PITCH_ALIGNMENT); + + p3dChannel->surface.programOffset = offset; + + offset += p3dChannel->programs.code.decompressedSize; + offset = NV_ALIGN_UP(offset, NV3D_TEXTURE_PITCH_ALIGNMENT); + + p3dChannel->surface.programConstantsOffset = offset; + + offset += p3dChannel->programs.constants.size; + offset = NV_ALIGN_UP(offset, NV3D_TEXTURE_PITCH_ALIGNMENT); + + p3dChannel->surface.textureOffset = offset; + + offset += (sizeof(Nv3dTexture) * pParams->numTextures); + offset = NV_ALIGN_UP(offset, NV3D_TEXTURE_PITCH_ALIGNMENT); + + p3dChannel->surface.bindlessTextureConstantBufferOffset = offset; + offset += NV3D_CONSTANT_BUFFER_SIZE; + offset = NV_ALIGN_UP(offset, NV3D_TEXTURE_PITCH_ALIGNMENT); + + p3dChannel->surface.constantBufferOffset = offset; + + offset += (NV3D_CONSTANT_BUFFER_SIZE * pParams->numConstantBuffers); + offset = NV_ALIGN_UP(offset, NV3D_TEXTURE_PITCH_ALIGNMENT); + + /* + * TODO: not all nvidia-3d host drivers will require the vertex stream + * memory; maybe host drivers should opt in? + */ + for (stream = NV3D_VERTEX_ATTRIBUTE_STREAM_FIRST; + stream < NV3D_VERTEX_ATTRIBUTE_STREAM_COUNT; + stream++) { + + p3dChannel->surface.vertexStreamOffset[stream] = offset; + + offset += NV3D_VERTEX_ATTRIBUTE_STREAM_SIZE; + offset = NV_ALIGN_UP(offset, NV3D_TEXTURE_PITCH_ALIGNMENT); + } + + /* + * Make sure the total surface size is large enough to cover any + * potential prefetch region. + */ + p3dChannel->surface.totalSize = + NV_MAX(p3dChannel->surface.programOffset + programPrefetchPadding, + offset); +} diff --git a/src/common/unix/nvidia-3d/src/nvidia-3d-turing.c b/src/common/unix/nvidia-3d/src/nvidia-3d-turing.c new file mode 100644 index 0000000..71c3938 --- /dev/null +++ b/src/common/unix/nvidia-3d/src/nvidia-3d-turing.c @@ -0,0 +1,56 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-3d-turing.h" +#include "nvidia-3d-pascal.h" +#include "nvidia-3d.h" + +#include "class/clc597.h" +#include + +void _nv3dInitChannelTuring(Nv3dChannelRec *p3dChannel) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + + _nv3dInitChannelPascal(p3dChannel); + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, NVC597_SET_SPH_VERSION, 2); + nvPushSetMethodData(p, + NV3D_V(C597, SET_SPH_VERSION, CURRENT, 4) | + NV3D_V(C597, SET_SPH_VERSION, OLDEST_SUPPORTED, 4)); + nvPushSetMethodData(p, + NV3D_V(C597, CHECK_SPH_VERSION, CURRENT, 4) | + NV3D_V(C597, CHECK_SPH_VERSION, OLDEST_SUPPORTED, 4)); +} + +void _nv3dSetVertexStreamEndTuring( + Nv3dChannelPtr p3dChannel, + enum Nv3dVertexAttributeStreamType stream, + const Nv3dVertexAttributeStreamRec *pStream) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, + NVC597_SET_VERTEX_STREAM_SIZE_A(stream), 2); + nvPushSetMethodDataU64(p, pStream->end - pStream->current); +} diff --git a/src/common/unix/nvidia-3d/src/nvidia-3d-vertex-arrays.c b/src/common/unix/nvidia-3d/src/nvidia-3d-vertex-arrays.c new file mode 100644 index 0000000..4a69911 --- /dev/null +++ b/src/common/unix/nvidia-3d/src/nvidia-3d-vertex-arrays.c @@ -0,0 +1,531 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-3d.h" +#include "nvidia-3d-vertex-arrays.h" +#include "nvidia-3d-types-priv.h" +#include "nvidia-3d-constant-buffers.h" +#include "nvidia-3d-utils.h" + +#include +#include + +static void InitializeStreamFromSurf( + const Nv3dStreamSurfaceRec *pSurf, + Nv3dVertexAttributeStreamRec *pStream) +{ + pStream->current = pSurf->gpuAddress; + pStream->end = pSurf->gpuAddress + pSurf->size; + pStream->stride = 0; + pStream->nextLaunch = 0; +} + +static void InitializeStream( + Nv3dChannelRec *p3dChannel, + enum Nv3dVertexAttributeStreamType stream, + Nv3dVertexAttributeStreamRec *pStream) +{ + const Nv3dStreamSurfaceRec tmpSurf = { + .gpuAddress = + nv3dGetVertexAttributestreamGpuAddress(p3dChannel, stream), + .size = NV3D_VERTEX_ATTRIBUTE_STREAM_SIZE, + }; + InitializeStreamFromSurf(&tmpSurf, pStream); +} + +void _nv3dInitializeStreams( + Nv3dChannelRec *p3dChannel) +{ + enum Nv3dVertexAttributeStreamType stream; + NvPushChannelPtr p = p3dChannel->pPushChannel; + + // Disable vertex attribute vectors 16 through 31 (scalars 64 through 127). + // We don't use them. + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, + NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_B(0), 2); + nvPushSetMethodData(p, ~0); + nvPushSetMethodData(p, ~0); + + for (stream = NV3D_VERTEX_ATTRIBUTE_STREAM_FIRST; + stream < NV3D_VERTEX_ATTRIBUTE_STREAM_COUNT; + stream++) { + + Nv3dVertexAttributeStreamRec *pStream = + &p3dChannel->vertexStreams[stream]; + + InitializeStream(p3dChannel, stream, pStream); + } +} + +static void AdvanceStream( + Nv3dVertexAttributeStreamRec *pStream) +{ + pStream->current += pStream->stride * pStream->nextLaunch; + nvAssert(pStream->current <= pStream->end); + pStream->nextLaunch = 0; +} + +/*! + * Configure a vertex attribute stream to fetch from a surface. + * + * \param[in] p3dChannel The channel + * \param[in] stream The vertex attribute stream + * \param[in] pStream The vertex attribute stream tracking structure + */ +static void +SetVertexStreamSurface( + Nv3dChannelRec *p3dChannel, + enum Nv3dVertexAttributeStreamType stream, + const Nv3dVertexAttributeStreamRec *pStream) +{ + const Nv3dHal *pHal = p3dChannel->p3dDevice->hal; + NvPushChannelPtr p = p3dChannel->pPushChannel; + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, + NV9097_SET_VERTEX_STREAM_A_FORMAT(stream), 3); + nvPushSetMethodData(p, + NV3D_V(9097, SET_VERTEX_STREAM_A_FORMAT, STRIDE, pStream->stride) | + NV3D_C(9097, SET_VERTEX_STREAM_A_FORMAT, ENABLE, TRUE)); + nvPushSetMethodDataU64(p, pStream->current); + + pHal->setVertexStreamEnd(p3dChannel, stream, pStream); +} + +/*! + * Reset a vertex attribute stream to the specified offset, while leaving its + * stride and limit alone. + */ +static void +SetVertexStreamOffset( + Nv3dChannelRec *p3dChannel, + enum Nv3dVertexAttributeStreamType stream, + NvU64 offset) +{ + const Nv3dHal *pHal = p3dChannel->p3dDevice->hal; + NvPushChannelPtr p = p3dChannel->pPushChannel; + Nv3dVertexAttributeStreamRec *pStream = &p3dChannel->vertexStreams[stream]; + + pStream->current = offset; + pStream->nextLaunch = 0; + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, + NV9097_SET_VERTEX_STREAM_A_LOCATION_A(stream), 2); + nvPushSetMethodDataU64(p, offset); + + pHal->setVertexStreamEnd(p3dChannel, stream, pStream); +} + +/*! + * Point the constant buffer selector at the next location for data in the + * given stream. + */ +static void SelectCbForStream( + Nv3dChannelRec *p3dChannel, + enum Nv3dVertexAttributeStreamType stream) +{ + Nv3dVertexAttributeStreamRec *pStream = &p3dChannel->vertexStreams[stream]; + const NvU64 gpuAddress = + nv3dGetVertexAttributestreamGpuAddress(p3dChannel, stream); + int startOffset = pStream->current + pStream->stride * pStream->nextLaunch - + gpuAddress; + + nv3dSelectCbAddress(p3dChannel, gpuAddress, + NV3D_VERTEX_ATTRIBUTE_STREAM_SIZE); + nv3dSetConstantBufferOffset(p3dChannel, startOffset); +} + +/*! + * Configure the DA and VAF to fetch from vertex attribute streams. + * + * This function configures the Data Assembler (DA) and Vertex Attribute Fetch + * (VAF) units to fetch vertex attributes from pSurf using a format configured + * by the 'attribs' array. + * + * It configures two streams: NV3D_VERTEX_ATTRIBUTE_STREAM_STATIC and + * NV3D_VERTEX_ATTRIBUTE_STREAM_DYNAMIC. The static stream contains attributes + * that are the same across all vertices. The dynamic stream contains + * attributes that are different for each vertex. The static stream sources + * from the next available location in the static vertex data surface and uses a + * stride of 0, so that all vertices in an array fetch the same values for those + * attributes. Then, it configures the dynamic stream to fetch starting at + * offset 0 of pSurf, unless pSurf is NULL in which case it starts at the + * appropriate offset in the dynamic vertex data surface. + * + * The 'attribs' array stores Nv3dVertexAttributeInfoRecs, terminated with an + * element where attributeType is NV3D_VERTEX_ATTRIBUTE_END. Each element + * contains: + * + * (a) An enum Nv3dVertexAttributeType indicating which vertex attribute this + * array element describes. + * + * (b) An enum Nv3dVertexAttributeDataType indicating the data type to use for + * the attribute. + * + * (c) An enum Nv3dVertexAttributeStreamType indicating which stream should use + * the attribute. + * + * If any attributes are enabled as static, this function selects the static + * stream surface as the current constant buffer. The caller should push the + * appropriate vertex data. + * + * Note that if you launch rendering using vertex attributes from a surface, you + * must wait for idle before changing those attributes later. Otherwise, the + * VAF unit may fetch the new data instead of the old data, causing corruption. + * + * \param[in] p3dChannel The 3d channel to program + * \param[in] attribs Description of vertex attributes (see above) + * \param[in] pSurf Surface that dynamic attributes will be fetched from + * + * \return The size in bytes of the static attribute data + */ +int nv3dVasSetup( + Nv3dChannelRec *p3dChannel, + const Nv3dVertexAttributeInfoRec *attribs, + const Nv3dStreamSurfaceRec *pSurf) +{ + /* This table is indexed by enum Nv3dVertexAttributeDataType. */ + static const struct { + NvU32 size; + NvU32 setVertexAttributeA; + } attribTypeTable[] = { + + [NV3D_VERTEX_ATTRIBUTE_DATA_TYPE_2_32_FLOAT] = { + sizeof(float) * 2, + NV3D_C(9097, SET_VERTEX_ATTRIBUTE_A, + COMPONENT_BIT_WIDTHS, R32_G32) | + NV3D_C(9097, SET_VERTEX_ATTRIBUTE_A, NUMERICAL_TYPE, NUM_FLOAT), + }, + + [NV3D_VERTEX_ATTRIBUTE_DATA_TYPE_4_32_FLOAT] = { + sizeof(float) * 4, + NV3D_C(9097, SET_VERTEX_ATTRIBUTE_A, + COMPONENT_BIT_WIDTHS, R32_G32_B32_A32) | + NV3D_C(9097, SET_VERTEX_ATTRIBUTE_A, NUMERICAL_TYPE, NUM_FLOAT), + }, + + [NV3D_VERTEX_ATTRIBUTE_DATA_TYPE_4_16_UNORM] = { + sizeof(NvU16) * 4, + NV3D_C(9097, SET_VERTEX_ATTRIBUTE_A, + COMPONENT_BIT_WIDTHS, R16_G16_B16_A16) | + NV3D_C(9097, SET_VERTEX_ATTRIBUTE_A, NUMERICAL_TYPE, NUM_UNORM), + }, + + [NV3D_VERTEX_ATTRIBUTE_DATA_TYPE_4_8_UNORM] = { + sizeof(NvU8) * 4, + NV3D_C(9097, SET_VERTEX_ATTRIBUTE_A, + COMPONENT_BIT_WIDTHS, A8B8G8R8) | + NV3D_C(9097, SET_VERTEX_ATTRIBUTE_A, NUMERICAL_TYPE, NUM_UNORM), + }, + + [NV3D_VERTEX_ATTRIBUTE_DATA_TYPE_2_16_SSCALED] = { + sizeof(NvU32), + NV3D_C(9097, SET_VERTEX_ATTRIBUTE_A, + COMPONENT_BIT_WIDTHS, R16_G16) | + NV3D_C(9097, SET_VERTEX_ATTRIBUTE_A, NUMERICAL_TYPE, NUM_SSCALED), + }, + + }; + + NvPushChannelPtr p = p3dChannel->pPushChannel; + Nv3dVertexAttributeStreamRec *pStatic = + &p3dChannel->vertexStreams[NV3D_VERTEX_ATTRIBUTE_STREAM_STATIC]; + Nv3dVertexAttributeStreamRec *pDynamic = + &p3dChannel->vertexStreams[NV3D_VERTEX_ATTRIBUTE_STREAM_DYNAMIC]; + int staticOffset = 0, dynamicOffset = 0; + Nv3dVertexAttributeStreamRec tmpStreamRec; + NvU32 stride = 0; + NvU64 daEnableMask = 0, daSkipMask; + NvBool hasStaticAttribs = FALSE; + NvBool hasPositionAttrib = FALSE; + int i; + + // POSITION must be specified and must be a dynamic attribute. + for (i = 0; attribs[i].attributeType != NV3D_VERTEX_ATTRIBUTE_END; i++) { + if (attribs[i].attributeType != NV3D_VERTEX_ATTRIBUTE_POSITION) { + continue; + } + hasPositionAttrib = TRUE; + nvAssert(attribs[i].streamType == NV3D_VERTEX_ATTRIBUTE_STREAM_DYNAMIC); + } + if (!hasPositionAttrib) { + nvAssert(!"POSITION vertex attribute not specified."); + } + + // Configure the DA output skip mask so that it only fetches attributes for + // enabled streams. + for (i = 0; attribs[i].attributeType != NV3D_VERTEX_ATTRIBUTE_END; i++) { + const enum Nv3dVertexAttributeType attrib = attribs[i].attributeType; + // Always enable all four components of the value. This causes the + // DA to generate default values if there are not enough components + // in the pulled vertex data. This sets W=1 if W is missing. + // + // Otherwise, the value would come from the default the hardware + // generates as input to the vertex shader when that attribute is + // skipped in the DA, which is specified in the .mfs file as, "a + // default value is inserted". + // + // Note all attribute values are expected to be less than 16 (i.e., fit + // in MASK_A; attributes 16 through 31 would go in MASK_B). + nvAssert(attrib < 16); + daEnableMask |= 0xfULL << (4 * attrib); + } + daSkipMask = ~daEnableMask; + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, + NV9097_SET_DA_OUTPUT_ATTRIBUTE_SKIP_MASK_A(0), 2); + nvPushSetMethodData(p, NvU64_LO32(daSkipMask)); + nvPushSetMethodData(p, NvU64_HI32(daSkipMask)); + + // Configure the attributes to fetch from the streams. + for (i = 0; attribs[i].attributeType != NV3D_VERTEX_ATTRIBUTE_END; i++) { + + const enum Nv3dVertexAttributeType attrib = attribs[i].attributeType; + const enum Nv3dVertexAttributeDataType dataType = attribs[i].dataType; + const enum Nv3dVertexAttributeStreamType stream = attribs[i].streamType; + const NvU32 size = attribTypeTable[dataType].size; + const NvU32 setVertexAttributeA = + attribTypeTable[dataType].setVertexAttributeA; + + int offset; + + if (stream == NV3D_VERTEX_ATTRIBUTE_STREAM_STATIC) { + offset = staticOffset; + staticOffset += size; + hasStaticAttribs = TRUE; + } else { + nvAssert(stream == NV3D_VERTEX_ATTRIBUTE_STREAM_DYNAMIC); + offset = dynamicOffset; + dynamicOffset += size; + stride += size; + } + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, + NV9097_SET_VERTEX_ATTRIBUTE_A(attrib), 1); + nvPushSetMethodData(p, + NV3D_V(9097, SET_VERTEX_ATTRIBUTE_A, STREAM, stream) | + NV3D_C(9097, SET_VERTEX_ATTRIBUTE_A, SOURCE, ACTIVE) | + NV3D_V(9097, SET_VERTEX_ATTRIBUTE_A, OFFSET, offset) | + setVertexAttributeA); + } + + + // Advance the stream past any attribs used previously. + AdvanceStream(pStatic); + // Although we may have set a non-zero stride on a previous call to this + // function (mostly so the bookkeeping above works out), as far as the GPU + // is concerned we should program a stride of 0. + pStatic->stride = 0; + + // See if we need to wrap the static stream. + if (pStatic->current + staticOffset >= pStatic->end) { + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, NV9097_WAIT_FOR_IDLE, 0); + + // Reset both the static and dynamic streams, since we know the GPU is + // done reading from both. + InitializeStream(p3dChannel, + NV3D_VERTEX_ATTRIBUTE_STREAM_STATIC, pStatic); + InitializeStream(p3dChannel, + NV3D_VERTEX_ATTRIBUTE_STREAM_DYNAMIC, pDynamic); + } else if (!pSurf) { + // Advance the dynamic stream past any attribs used previously (unless + // we just reset the stream). + AdvanceStream(pDynamic); + } + + /* override dynamic stream with pSurf */ + if (pSurf) { + pDynamic = &tmpStreamRec; + InitializeStreamFromSurf(pSurf, pDynamic); + } + + // Configure the streams. A stride of 0 makes it read the same attribute + // each time. + nvAssert(pStatic->stride == 0); + SetVertexStreamSurface(p3dChannel, + NV3D_VERTEX_ATTRIBUTE_STREAM_STATIC, + pStatic); + nvAssert(stride != 0); + pDynamic->stride = stride; + SetVertexStreamSurface(p3dChannel, + NV3D_VERTEX_ATTRIBUTE_STREAM_DYNAMIC, + pDynamic); + + // If there are static attributes, set up the constant buffer selector. + if (hasStaticAttribs) { + SelectCbForStream(p3dChannel, NV3D_VERTEX_ATTRIBUTE_STREAM_STATIC); + + // Override the static stream's "stride" so that the next time this + // function is called it will set staticStartOffset to right after the + // static data here. + pStatic->stride = staticOffset; + pStatic->nextLaunch = 1; + } + + return staticOffset; +} + +/*! + * Check if uploading the specified number of vertices will write past the end + * of the given vertex stream. + */ +static NvBool WillVertexDataWrap( + Nv3dVertexAttributeStreamRec *pStream, + int n) +{ + // >= here is intentional: It's illegal to set the constant buffer selector + // past the end of the constant buffer, which could happen if the last + // primitive drawn exactly fills the dynamic data stream and another + // primitive is drawn. Then the next call to nv3dVasSelectCbForVertexData() + // would cause a channel error. + // + // Instead of trying to detect that case there, just disallow completely + // filling the stream so it wraps slightly earlier. + return pStream->current + pStream->stride * (pStream->nextLaunch + n) >= + pStream->end; +} + +/*! + * Launch vertices and update tracked vertex array state. + */ +static void DrawVertexArray(Nv3dChannelRec *p3dChannel, int numVerts) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + Nv3dVertexAttributeStreamRec *pDynamic = + &p3dChannel->vertexStreams[NV3D_VERTEX_ATTRIBUTE_STREAM_DYNAMIC]; + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_VERTEX_ARRAY_START, 2); + nvPushSetMethodData(p, pDynamic->nextLaunch); + nvPushSetMethodData(p, numVerts); // NV9097_DRAW_VERTEX_ARRAY + + pDynamic->nextLaunch += numVerts; +} + +/*! + * Reset both the static and dynamic vertex array streams to the base of the + * corresponding surfaces. + */ +static void WrapVertexStreams(Nv3dChannelRec *p3dChannel) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + Nv3dVertexAttributeStreamRec *pStatic = + &p3dChannel->vertexStreams[NV3D_VERTEX_ATTRIBUTE_STREAM_STATIC]; + const NvU64 gpuAddress = + nv3dGetVertexAttributestreamGpuAddress(p3dChannel, + NV3D_VERTEX_ATTRIBUTE_STREAM_DYNAMIC); + const NvU32 primMode = p3dChannel->currentPrimitiveMode; + + // Set the software tracking for the static stream so it starts over at the + // beginning next time nv3dVasSetup() is called, but leave the hardware + // configured to read the data that's already there, in case vertices + // submitted later still need it. + pStatic->current = pStatic->end; + pStatic->nextLaunch = 0; + + // The hardware can't handle changing the vertex stream offset inside a + // BEGIN / END block, so temporarily end now. + nv3dVasEnd(p3dChannel); + + // Wrap the dynamic vertex stream. + nvPushImmedVal(p, NVA06F_SUBCHANNEL_3D, NV9097_WAIT_FOR_IDLE, 0); + SetVertexStreamOffset(p3dChannel, NV3D_VERTEX_ATTRIBUTE_STREAM_DYNAMIC, + gpuAddress); + + nv3dVasBegin(p3dChannel, primMode); +} + +/*! + * Point the constant buffer selector at the next location for vertex data in + * the dynamic data surface. + */ +void nv3dVasSelectCbForVertexData(Nv3dChannelRec *p3dChannel) +{ + SelectCbForStream(p3dChannel, NV3D_VERTEX_ATTRIBUTE_STREAM_DYNAMIC); +} + +/*! + * Upload and draw vertices using the dynamic vertex data surface + * + * This function uploads data to the dynamic vertex attribute stream surface + * using inline constant buffer updates starting at the next free space in that + * surface, and then launches rendering. The number of vertices rendered is + * specified by 'numVerts'. + * + * Static data should have already been written to the static vertex attribute + * stream surface by the caller. + * + * If not enough space is available in the dynamic data surface, this function + * waits for idle before wrapping to the beginning of the surface to avoid + * conflicting with earlier rendering that might be in flight. + * + * It is up to the caller to send BEGIN and END methods around calls to this + * function. + * + * \param[in] p3dChannel The channel + * \param[in] data Data to upload + * \param[in] numVerts Number of vertices rendered + */ +void nv3dVasDrawInlineVerts( + Nv3dChannelRec *p3dChannel, + const void *data, + int numVerts) +{ + if (data != NULL) { + Nv3dVertexAttributeStreamRec *pDynamic = + &p3dChannel->vertexStreams[NV3D_VERTEX_ATTRIBUTE_STREAM_DYNAMIC]; + + // See if we need to wrap the dynamic stream. + if (WillVertexDataWrap(pDynamic, numVerts)) { + WrapVertexStreams(p3dChannel); + } + + nv3dVasSelectCbForVertexData(p3dChannel); + nv3dPushConstants(p3dChannel, pDynamic->stride * numVerts, data); + } + + DrawVertexArray(p3dChannel, numVerts); +} + +NvBool nv3dVasMakeRoom( + Nv3dChannelRec *p3dChannel, + NvU32 pendingVerts, + NvU32 moreVerts) +{ + Nv3dVertexAttributeStreamRec *pDynamic = + &p3dChannel->vertexStreams[NV3D_VERTEX_ATTRIBUTE_STREAM_DYNAMIC]; + + const NvBool wrap = WillVertexDataWrap(pDynamic, pendingVerts + moreVerts); + + // If pendingVerts + moreVerts would exceed the dynamic vertex array buffer, + // flush it now and start over at the beginning. + if (wrap) { + DrawVertexArray(p3dChannel, pendingVerts); + WrapVertexStreams(p3dChannel); + + // Reset the constant buffer update pointer to the beginning of the + // dynamic vertex data buffer. + nv3dSetConstantBufferOffset(p3dChannel, 0); + } + + return wrap; +} + diff --git a/src/common/unix/nvidia-3d/src/nvidia-3d-volta.c b/src/common/unix/nvidia-3d/src/nvidia-3d-volta.c new file mode 100644 index 0000000..4eb1106 --- /dev/null +++ b/src/common/unix/nvidia-3d/src/nvidia-3d-volta.c @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-3d-volta.h" +#include "nvidia-3d.h" + +#include "class/clc397.h" +#include + +void _nv3dSetProgramOffsetVolta( + Nv3dChannelRec *p3dChannel, + NvU32 stage, + NvU32 offset) +{ + NvPushChannelPtr p = p3dChannel->pPushChannel; + const NvU64 gpuAddress = nv3dGetProgramGpuAddress(p3dChannel) + offset; + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, + NVC397_SET_PIPELINE_PROGRAM_ADDRESS_A(stage), 2); + nvPushSetMethodDataU64(p, gpuAddress); +} diff --git a/src/common/unix/nvidia-headsurface/nvidia-headsurface-constants.h b/src/common/unix/nvidia-headsurface/nvidia-headsurface-constants.h new file mode 100644 index 0000000..48fd49a --- /dev/null +++ b/src/common/unix/nvidia-headsurface/nvidia-headsurface-constants.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVIDIA_HEADSURFACE_CONSTANTS_H_ +#define _NVIDIA_HEADSURFACE_CONSTANTS_H_ + +/* Possible values for NvHsFragmentUniforms::resamplingMethod */ +#define NVIDIA_HEADSURFACE_RESAMPLING_METHOD_BICUBIC_TRIANGULAR 1 +#define NVIDIA_HEADSURFACE_RESAMPLING_METHOD_BICUBIC_BELL_SHAPED 2 +#define NVIDIA_HEADSURFACE_RESAMPLING_METHOD_BICUBIC_BSPLINE 3 +#define NVIDIA_HEADSURFACE_RESAMPLING_METHOD_BICUBIC_ADAPTIVE_TRIANGULAR 4 +#define NVIDIA_HEADSURFACE_RESAMPLING_METHOD_BICUBIC_ADAPTIVE_BELL_SHAPED 5 +#define NVIDIA_HEADSURFACE_RESAMPLING_METHOD_BICUBIC_ADAPTIVE_BSPLINE 6 + +/* Uniform sampler binding indices */ +#define NVIDIA_HEADSURFACE_UNIFORM_SAMPLER_BINDING_PRIMARY_TEX 0 +#define NVIDIA_HEADSURFACE_UNIFORM_SAMPLER_BINDING_CURSOR_TEX 1 +#define NVIDIA_HEADSURFACE_UNIFORM_SAMPLER_BINDING_BLEND_TEX 2 +#define NVIDIA_HEADSURFACE_UNIFORM_SAMPLER_BINDING_OFFSET_TEX 3 +#define NVIDIA_HEADSURFACE_UNIFORM_SAMPLER_BINDING_OVERLAY_TEX 4 +#define NVIDIA_HEADSURFACE_UNIFORM_SAMPLER_BINDING_LUT_TEX 5 +#define NVIDIA_HEADSURFACE_UNIFORM_SAMPLER_BINDING_NUM 6 + +#endif /* _NVIDIA_HEADSURFACE_CONSTANTS_H_ */ diff --git a/src/common/unix/nvidia-headsurface/nvidia-headsurface-types.h b/src/common/unix/nvidia-headsurface/nvidia-headsurface-types.h new file mode 100644 index 0000000..1e643d2 --- /dev/null +++ b/src/common/unix/nvidia-headsurface/nvidia-headsurface-types.h @@ -0,0 +1,67 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_HEADSURFACE_TYPES_H__ +#define __NVIDIA_HEADSURFACE_TYPES_H__ + +#include "nvtypes.h" +#include "nvidia-3d-types.h" + +typedef struct _NvHsVertexUniforms { + Nv3dVertexAttrib2U vertexScale; + Nv3dVertexAttrib2U primaryTextureScale; + Nv3dVertexAttrib2U primaryTextureBias; + Nv3dVertexAttrib2S cursorPosition; +} __attribute__((packed)) NvHsVertexUniforms; + +typedef struct _NvHsFragmentUniforms { // Byte offsets + Nv3dVertexAttrib2U vertexScale; // 0 + Nv3dVertexAttrib3U numLutEntries NV_ALIGN_BYTES(16); // 16 + Nv3dVertexAttrib2U primaryTextureBias NV_ALIGN_BYTES(8); // 32 + Nv3dVertexAttrib2S cursorPosition; // 40 + // Although this is really a 3x3 matrix, GLSL std140 uniform block + // layout says that the column stride is equal to a vec4. + Nv3dFloat transform[3][4]; // 48 + Nv3dVertexAttrib2F pixelShiftOffset; // 96 + Nv3dVertexAttrib3F luminanceCoefficient NV_ALIGN_BYTES(16); // 112 + Nv3dVertexAttrib2F chromaCoefficient NV_ALIGN_BYTES(8); // 128 + Nv3dFloat luminanceScale; // 136 + Nv3dFloat luminanceBlackLevel; // 140 + Nv3dFloat chrominanceScale; // 144 + Nv3dFloat chrominanceBlackLevel; // 148 + NvU32 useSatHue; // 152 + Nv3dFloat satCos; // 156 + int resamplingMethod; // 160 +} __attribute__((packed)) NvHsFragmentUniforms; + +/* + * The static warp mesh consists of four vertices, each vertex has six + * components: (XY, UVRQ). + */ +typedef struct { + struct { + Nv3dFloat x, y, u, v, r, q; + } vertex[4]; +} NvHsStaticWarpMesh; + +#endif /* __NVIDIA_HEADSURFACE_TYPES_H__ */ diff --git a/src/common/unix/nvidia-push/include/nvidia-push-priv-imports.h b/src/common/unix/nvidia-push/include/nvidia-push-priv-imports.h new file mode 100644 index 0000000..587c753 --- /dev/null +++ b/src/common/unix/nvidia-push/include/nvidia-push-priv-imports.h @@ -0,0 +1,203 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(__NVIDIA_PUSH_PRIV_IMPORTS_H__) +#define __NVIDIA_PUSH_PRIV_IMPORTS_H__ + +#include "nvidia-push-types.h" + +static inline NvU32 nvPushImportRmApiControl( + NvPushDevicePtr pDevice, + NvU32 hObject, + NvU32 cmd, + void *pParams, + NvU32 paramsSize) +{ + return pDevice->pImports->rmApiControl(pDevice, hObject, cmd, + pParams, paramsSize); +} + +static inline NvU32 nvPushImportRmApiAlloc( + NvPushDevicePtr pDevice, + NvU32 hParent, + NvU32 hObject, + NvU32 hClass, + void *pAllocParams) +{ + + return pDevice->pImports->rmApiAlloc(pDevice, hParent, hObject, hClass, + pAllocParams); +} + +static inline NvU32 nvPushImportRmApiFree( + NvPushDevicePtr pDevice, + NvU32 hParent, + NvU32 hObject) +{ + return pDevice->pImports->rmApiFree(pDevice, hParent, hObject); +} + +static inline NvU32 nvPushImportRmApiMapMemoryDma( + NvPushDevicePtr pDevice, + NvU32 hDevice, + NvU32 hDma, + NvU32 hMemory, + NvU64 offset, + NvU64 length, + NvU32 flags, + NvU64 *pDmaOffset) +{ + return pDevice->pImports->rmApiMapMemoryDma(pDevice, + hDevice, + hDma, + hMemory, + offset, + length, + flags, + pDmaOffset); +} + +static inline NvU32 nvPushImportRmApiUnmapMemoryDma( + NvPushDevicePtr pDevice, + NvU32 hDevice, + NvU32 hDma, + NvU32 hMemory, + NvU32 flags, + NvU64 dmaOffset) +{ + return pDevice->pImports->rmApiUnmapMemoryDma(pDevice, + hDevice, + hDma, + hMemory, + flags, + dmaOffset); + +} + +static inline NvU32 nvPushImportRmApiAllocMemory64( + NvPushDevicePtr pDevice, + NvU32 hParent, + NvU32 hMemory, + NvU32 hClass, + NvU32 flags, + void **ppAddress, + NvU64 *pLimit) +{ + return pDevice->pImports->rmApiAllocMemory64(pDevice, + hParent, + hMemory, + hClass, + flags, + ppAddress, + pLimit); +} + +static inline NvU32 nvPushImportRmApiVidHeapControl( + NvPushDevicePtr pDevice, + void *pVidHeapControlParms) +{ + return pDevice->pImports->rmApiVidHeapControl(pDevice, + pVidHeapControlParms); +} + +static inline NvU32 nvPushImportRmApiMapMemory( + NvPushDevicePtr pDevice, + NvU32 hDevice, + NvU32 hMemory, + NvU64 offset, + NvU64 length, + void **ppLinearAddress, + NvU32 flags) +{ + return pDevice->pImports->rmApiMapMemory(pDevice, + hDevice, + hMemory, + offset, + length, + ppLinearAddress, + flags); +} + +static inline NvU32 nvPushImportRmApiUnmapMemory( + NvPushDevicePtr pDevice, + NvU32 hDevice, + NvU32 hMemory, + void *pLinearAddress, + NvU32 flags) +{ + return pDevice->pImports->rmApiUnmapMemory(pDevice, + hDevice, + hMemory, + pLinearAddress, + flags); +} + +static inline NvU64 nvPushImportGetMilliSeconds( + NvPushDevicePtr pDevice) +{ + return pDevice->pImports->getMilliSeconds(pDevice); +} + +static inline void nvPushImportYield( + NvPushDevicePtr pDevice) +{ + pDevice->pImports->yield(pDevice); +} + +static inline NvBool nvPushImportWaitForEvent( + NvPushDevicePtr pDevice, + NvPushImportEvent *pEvent, + NvU64 timeout) +{ + return pDevice->pImports->waitForEvent(pDevice, pEvent, timeout); +} + +static inline void nvPushImportEmptyEventFifo( + NvPushDevicePtr pDevice, + NvPushImportEvent *pEvent) +{ + pDevice->pImports->emptyEventFifo(pDevice, pEvent); +} + +static inline void nvPushImportChannelErrorOccurred( + NvPushChannelPtr pChannel, + NvU32 channelErrCode) +{ + pChannel->pDevice->pImports->channelErrorOccurred(pChannel, channelErrCode); +} + +static inline void nvPushImportPushbufferWrapped( + NvPushChannelPtr pChannel) +{ + pChannel->pDevice->pImports->pushbufferWrapped(pChannel); +} + +#define nvPushImportLogError(_pDevice, ...) \ + (_pDevice)->pImports->logError((_pDevice), __VA_ARGS__) + +#if defined(DEBUG) +#define nvPushImportLogNvDiss(_pChannel, ...) \ + (_pChannel)->pDevice->pImports->logNvDiss((_pChannel), __VA_ARGS__) +#endif /* DEBUG */ + +#endif /* __NVIDIA_PUSH_PRIV_IMPORTS_H__ */ diff --git a/src/common/unix/nvidia-push/include/nvidia-push-priv.h b/src/common/unix/nvidia-push/include/nvidia-push-priv.h new file mode 100644 index 0000000..db76c02 --- /dev/null +++ b/src/common/unix/nvidia-push/include/nvidia-push-priv.h @@ -0,0 +1,122 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_PUSH_PRIV_H__ +#define __NVIDIA_PUSH_PRIV_H__ + +#include "nvmisc.h" // NV_ALIGN_UP +#include "class/cla16f.h" // NVA16F_GP_ENTRY__SIZE + +/* + * Push buffer constants + * "The pushbuffer" consists of several regions packed into a single memory + * allocation. In order, they are: + * 1. The "main" pushbuffer. Most of the driver pushes methods here; + * 2. GPFIFO entries; + * 3. The "progress tracker" pushbuffer. This is used by the DMA kickoff code + * as a reserved area to put semaphore release methods, which we use to + * track HOST's progress fetching the pushbuffer. We also use this to + * workaround hardware bug 1667921. + */ + +/* Offset of the GPFIFO entries: entry (2) above. */ +static inline NvU32 __nvPushGpFifoOffset(const NvPushChannelRec *pChannel) +{ + nvAssert(pChannel->main.sizeInBytes != 0); + return NV_ALIGN_UP(pChannel->main.sizeInBytes, NVA16F_GP_ENTRY__SIZE); +} + +/* + * We need to align each set of methods in the progress tracker pushbuffer to + * 128 bytes so that we avoid HW bug 1667921 (on chips that are affected). + * This is used for both the start of the GPFIFO segment _and_ the size (for + * each GPFIFO entry). + */ +#define NV_ALIGN_LBDAT_EXTRA_BUG 128 +/* + * Offset of the progress tracker pushbuffer: entry (3) above. + * + * Note that we always use the appropriate alignment to WAR the LBDAT_EXTRA bug + * for the offset. Although this is only necessary on some chips, it's simpler + * to always use this alignment. + */ +static inline NvU32 __nvPushProgressTrackerOffset( + const NvPushChannelRec *pChannel) +{ + const NvU32 gpFifoOffset = __nvPushGpFifoOffset(pChannel); + const NvU32 gpFifoLength = + pChannel->numGpFifoEntries * NVA16F_GP_ENTRY__SIZE; + + nvAssert(gpFifoLength != 0); + + return NV_ALIGN_UP(gpFifoOffset + gpFifoLength, NV_ALIGN_LBDAT_EXTRA_BUG); +} + +/* We always write two GPFIFO entries: one for the main pushbuffer, and one + * for the progress tracker pushbuffer. */ +#define NV_PUSH_NUM_GPFIFO_ENTRIES_PER_KICKOFF 2 + +/* + * Encoding for the progress tracker semaphore payload. + * _GET stores dwords, rather than bytes. + * _GP_GET stores the number of "pairs" of gpFifo entries. + */ +#define NV_PUSH_PROGRESS_TRACKER_SEMAPHORE_GET 17:0 +#define NV_PUSH_PROGRESS_TRACKER_SEMAPHORE_GP_GET 31:18 + +/* + * The number of 0080 RM devices for the given NvPushDevice. + * This is 1 for RM SLI and numSubDevices for client SLI. + */ +static inline int +__nvPushGetNumDevices(const NvPushDeviceRec *pDevice) +{ + if (pDevice->clientSli) { + return pDevice->numSubDevices; + } + return 1; +} + +/* + * The 0080 RM device index for the given subdevice index. + * This is 0 for RM SLI, and the subdevice index for client SLI. + */ +static inline int +__nvPushGetDeviceIndex(const NvPushDeviceRec *pDevice, int sd) +{ + if (pDevice->clientSli) { + return sd; + } + return 0; +} + +NvU32 __nvPushProgressTrackerEntrySize(const NvPushDeviceRec *pDevice); + +NvBool __nvPushTestPushBuffer(NvPushChannelPtr p); + +NvBool __nvPushGetHal( + const NvPushAllocDeviceParams *pParams, + NvU32 channelClass, + NvPushHal *pHal); + +#endif /* __NVIDIA_PUSH_PRIV_H__ */ diff --git a/src/common/unix/nvidia-push/interface/nvidia-push-init.h b/src/common/unix/nvidia-push/interface/nvidia-push-init.h new file mode 100644 index 0000000..d0988f6 --- /dev/null +++ b/src/common/unix/nvidia-push/interface/nvidia-push-init.h @@ -0,0 +1,267 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file contains nvidia-push device and channel setup structures and + * functions. + */ + +#ifndef __NVIDIA_PUSH_INIT_H__ +#define __NVIDIA_PUSH_INIT_H__ + + +#include "nvidia-push-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * Return the index of the first class table element supported on this device. + * + * pClassTable is an array where each element corresponds to a class + * the caller supports. The first field in the array element should + * be an NvPushSupportedClass struct. There may be additional fields + * in the array element that are specific to the caller. The + * classTableStride argument indicates the size in bytes of one array + * element, such that nvPushGetSupportedClassIndex() can step from one + * array element to the next by adding classTableStride. + * + * nvPushGetSupportedClassIndex() will query the list of classes + * supported by this device, and return the index of the first + * pClassTable array element that is supported by the device. -1 is + * returned if there is no match. + * + * \param pDevice The nvidia-push device whose class list to consider. + * \param pClassTable The table of classes supported. + * \param classTableStride The size in bytes of one table element. + * \param classTableLength The number of table elements. + * + * \return The index of the first table element that matches, or -1. + */ + +typedef struct _NvPushSupportedClass { + NvU32 classNumber; + NVAModelConfig amodelConfig; +} NvPushSupportedClass; + +int nvPushGetSupportedClassIndex( + NvPushDevicePtr pDevice, + const void *pClassTable, + size_t classTableStride, + size_t classTableLength); + +/* + * Parameter structure populated by the host driver when requesting an + * NvPushDeviceRec. + */ +typedef struct _NvPushAllocDeviceParams { + + /* Pointer to host device, filled by host driver as needed */ + void *hostDevice; + + const NvPushImports *pImports; + + /* The host driver's RMAPI client (NV0000) handle. */ + NvU32 clientHandle; + + /* TRUE iff this device is in client-side SLI mode. */ + NvBool clientSli; + + /* The number of subDevices allocated by the host driver. */ + NvU32 numSubDevices; + + struct { + /* The host driver's RMAPI device (NV0080) handles */ + NvU32 deviceHandle; + /* The host driver's RMAPI subDevice (NV2080) handles. */ + NvU32 handle; + /* FERMI_VASPACE_A object in which channels on this device should be + * mapped. */ + NvU32 gpuVASpaceObject; + /* ctxDma handle to be used with MapMemoryDma. */ + NvU32 gpuVASpace; + } subDevice[NV_MAX_SUBDEVICES]; + + struct { + /* + * The Amodel configuration requested by the host driver. + */ + NVAModelConfig config; + } amodel; + + /* Whether channels on this device will be used to program Tegra. */ + NvBool isTegra; + + /* + * Pool of RMAPI object handles. The host driver should populate + * all of the elements in this array before calling + * nvPushAllocDevice(), and release all of these handles if + * nvPushAllocDevice() fails, or after calling nvPushFreeDevice(). + * + * The number of possible handles is: + * + * hUserMode (per-sd) + */ +#define NV_PUSH_DEVICE_HANDLE_POOL_NUM \ + (NV_MAX_SUBDEVICES) + + NvU32 handlePool[NV_PUSH_DEVICE_HANDLE_POOL_NUM]; + + NvU32 numClasses; + const NvU32 *supportedClasses; + + NvPushConfidentialComputeMode confidentialComputeMode; +} NvPushAllocDeviceParams; + +NvBool nvPushAllocDevice( + const NvPushAllocDeviceParams *pParams, + NvPushDevicePtr pDevice); + +void nvPushFreeDevice( + NvPushDevicePtr pDevice); + + +/* + * Parameter structure populated by the host driver when requesting an + * NvPushChannelRec. + */ +typedef struct _NvPushAllocChannelParams { + + /* NV2080_ENGINE_TYPE_ */ + NvU32 engineType; + + /* + * Whether to log the pushbuffer in nvdiss format, by calling + * nvPushImportLogNvDiss(). + */ + NvBool logNvDiss; + + /* + * Normally, the pushbuffer utility library will time out when + * waiting for things (space in the pushbuffer, waiting for + * notifiers, etc). When the channel is created with + * noTimeout=TRUE, the channel will wait indefinitely for these + * things. + */ + NvBool noTimeout; + + /* + * Normally, the pushbuffer utility library checks for channel + * errors and reports them to the host driver by calling + * nvPushImportChannelErrorOccurred(). Host drivers can set + * ignoreChannelErrors=TRUE to disable this check. + */ + NvBool ignoreChannelErrors; + + /* + * This flag specifies if channel is intended to be used for + * encryption/decryption of data between SYSMEM <-> VIDMEM. Only CE + * & SEC2 Channels are capable of handling encrypted content. + */ + NvBool secureChannel; + + /* + * DIFR stands for Display Idle Frame Refresh in which a CE is used to + * prefetch framebuffer pixels into the GPU's L2 cache. The prefetch + * operation requires the channel to be specifically configured for DIFR + * prefetching. This flag indicates if this channel is intended to be + * used for just that. + */ + NvBool difrPrefetch; + + /* + * Host drivers should specify how many notifiers they want. The + * pushbuffer utility library will allocate memory to hold this + * many notifiers on each subDevice, plus an error notifier. + * + * The 'notifierIndex' argument to, e.g., nvPushGetNotifierCpuAddress() + * should be in the range [0,numNotifiers). + */ + NvU8 numNotifiers; + + /* + * The size of the "main" pushbuffer in bytes. Note this does not + * include space for gpfifo entries or progress tracking: + * nvidia-push will implicitly pad the total pushbuffer for those + * items. + */ + NvU32 pushBufferSizeInBytes; + + /* + * Pool of RMAPI object handles. The host driver should populate + * all of the elements in this array before calling + * nvPushAllocChannel(), and release all of these handles if + * nvPushAllocChannel() fails, or after calling nvPushFreeChannel(). + * + * The number of possible handles is: + * + * progressSemaphore hMemory (per-sd) + + * pushbufferHandle (per-device) + + * pushbufferVAHandle (per-sd) + + * userD.hMemory (per-sd) + + * channelHandle (per-sd) + + * notifier memoryHandle (per-device) + + * error notifier ctxDma (per-device) + */ +#define NV_PUSH_CHANNEL_HANDLE_POOL_NUM \ + (NV_MAX_SUBDEVICES + \ + 1 + \ + NV_MAX_SUBDEVICES + \ + NV_MAX_SUBDEVICES + \ + NV_MAX_SUBDEVICES + \ + 1 + \ + 1) + + NvU32 handlePool[NV_PUSH_CHANNEL_HANDLE_POOL_NUM]; + + /* + * A pointer to an NvPushDeviceRec, initialized with + * nvPushAllocDevice(). One or more NvPushChannelRecs may share + * the same NvPushDevicePtr. + * + * This pDevice should be kept allocated until all + * NvPushChannelRecs using it have been freed. + */ + NvPushDevicePtr pDevice; + +} NvPushAllocChannelParams; + +NvBool nvPushAllocChannel( + const NvPushAllocChannelParams *pParams, + NvPushChannelPtr buffer); + +void nvPushFreeChannel( + NvPushChannelPtr buffer); + + +void nvPushInitWaitForNotifier( + NvPushChannelPtr pChannel, + NvU32 notifierIndex, + NvU32 subdeviceMask); + +#ifdef __cplusplus +}; +#endif + +#endif /*__NVIDIA_PUSH_INIT_H__ */ diff --git a/src/common/unix/nvidia-push/interface/nvidia-push-methods.h b/src/common/unix/nvidia-push/interface/nvidia-push-methods.h new file mode 100644 index 0000000..6e06a7b --- /dev/null +++ b/src/common/unix/nvidia-push/interface/nvidia-push-methods.h @@ -0,0 +1,259 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file contains macros and inline functions used to actually program + * methods. + */ + +#ifndef __NVIDIA_PUSH_METHODS_H__ +#define __NVIDIA_PUSH_METHODS_H__ + +#include "nvidia-push-types.h" + +#include "class/cla16f.h" + +#ifdef __cplusplus +extern "C" { +#endif + +static inline void __nvPushSetMethodDataSegment(NvPushChannelSegmentPtr s, const NvU32 data) +{ + s->buffer->u = data; + s->buffer++; +} + +static inline void nvPushSetMethodData(NvPushChannelPtr p, const NvU32 data) +{ + __nvPushSetMethodDataSegment(&p->main, data); +} + +#if NV_PUSH_ALLOW_FLOAT +static inline void __nvPushSetMethodDataSegmentF(NvPushChannelSegmentPtr s, const float data) +{ + s->buffer->f = data; + s->buffer++; +} + +static inline void nvPushSetMethodDataF(NvPushChannelPtr p, const float data) +{ + __nvPushSetMethodDataSegmentF(&p->main, data); +} +#endif + +static inline void __nvPushSetMethodDataSegmentU64(NvPushChannelSegmentPtr s, const NvU64 data) +{ + __nvPushSetMethodDataSegment(s, NvU64_HI32(data)); + __nvPushSetMethodDataSegment(s, NvU64_LO32(data)); +} + +static inline void nvPushSetMethodDataU64(NvPushChannelPtr p, const NvU64 data) +{ + __nvPushSetMethodDataSegmentU64(&p->main, data); +} + +/* Little-endian: least significant bits first. */ +static inline void __nvPushSetMethodDataSegmentU64LE(NvPushChannelSegmentPtr s, const NvU64 data) +{ + __nvPushSetMethodDataSegment(s, NvU64_LO32(data)); + __nvPushSetMethodDataSegment(s, NvU64_HI32(data)); +} + +static inline void nvPushSetMethodDataU64LE(NvPushChannelPtr p, const NvU64 data) +{ + __nvPushSetMethodDataSegmentU64LE(&p->main, data); +} + +void __nvPushMoveDWORDS(NvU32* dst, const NvU32* src, int dwords); + +static inline void +nvDmaMoveDWORDS(NvPushChannelUnion *dst, const NvU32* src, int dwords) +{ + // The 'dst' argument is an array of NvPushChannelUnion; it is safe + // to treat this as an array of NvU32, as long as NvU32 and + // NvPushChannelUnion are the same size. + ct_assert(sizeof(NvU32) == sizeof(NvPushChannelUnion)); + __nvPushMoveDWORDS((NvU32 *)dst, src, dwords); +} + +static inline void nvPushInlineData(NvPushChannelPtr p, const void *data, + size_t dwords) +{ + nvDmaMoveDWORDS(p->main.buffer, (const NvU32 *)data, dwords); + p->main.buffer += dwords; +} + +/*! + * Return the maximum method count: the maximum number of dwords that can be + * specified in the nvPushMethod() family of macros. + */ +static inline NvU32 nvPushMaxMethodCount(const NvPushChannelRec *p) +{ + /* + * The number of methods that can be specified in one NVA16F_DMA_METHOD + * header is limited by the bit field size of NVA16F_DMA_METHOD_COUNT: 28:16 + * (i.e., maximum representable value 8191). + */ + const NvU32 maxFromMethodCountMask = DRF_MASK(NVA16F_DMA_METHOD_COUNT); + + /* + * Further, the method count must be smaller than half the total pushbuffer + * size minus one, to correctly distinguish empty and full pushbuffers. See + * nvPushHeader() for details. + */ + const NvU32 pushBufferSizeInBytes = p->main.sizeInBytes; + const NvU32 pushBufferSizeInDWords = pushBufferSizeInBytes / 4; + const NvU32 pushBufferHalfSizeInDWords = pushBufferSizeInDWords / 2; + + /* + * Subtract two from pushBufferHalfSizeInDWords: + * + * -1 to distinguish pushbuffer empty from full (see above). + * + * -1 to be smaller than, rather than equal to, the above constraints. + */ + const NvU32 maxFromPushBufferSize = pushBufferHalfSizeInDWords - 2; + + return NV_MIN(maxFromMethodCountMask, maxFromPushBufferSize); +} + +// These macros verify that the values used in the methods fits +// into the defined ranges. +#define ASSERT_DRF_DEF(d, r, f, n) \ + nvAssert(!(~DRF_MASK(NV ## d ## r ## f) & (NV ## d ## r ## f ## n))) +#define ASSERT_DRF_NUM(d, r, f, n) \ + nvAssert(!(~DRF_MASK(NV ## d ## r ## f) & (n))) + +#if defined(DEBUG) +#include "class/clc36f.h" /* VOLTA_CHANNEL_GPFIFO_A */ + +/* + * When pushing GPFIFO methods (NVA16F_SEMAPHORE[ABCD]), all four + * methods must be pushed together. If the four methods are not + * pushed together, nvidia-push might wrap, injecting its progress + * tracking semaphore release methods in the middle, and perturb the + * NVA16F_SEMAPHOREA_OFFSET_UPPER and NVA16F_SEMAPHOREB_OFFSET_LOWER + * channel state. + * + * Return whether the methods described by the arguments include some, + * but not all, of A, B, C, and D. I.e., if the range starts at B, C, + * or D, or if the range ends at A, B, or C. + * + * Perform a similar check for Volta+ semaphore methods + * NVC36F_SEM_ADDR_LO..NVC36F_SEM_EXECUTE. Note that we always check for both + * sets of methods, regardless of the GPU we're actually running on. This is + * okay since: + * a) the NVC36F_SEM_ADDR_LO..NVC36F_SEM_EXECUTE method offsets were not used + * for anything from (a16f..c36f]. + * b) the SEMAPHORE[ABCD] methods still exist on the newer classes (they + * haven't been reused for anything else) + */ +static inline NvBool __nvPushStartSplitsSemaphore( + NvU32 method, + NvU32 count, + NvU32 secOp) +{ + ct_assert(NVA16F_SEMAPHOREA < NVA16F_SEMAPHORED); + ct_assert(NVC36F_SEM_ADDR_LO < NVC36F_SEM_EXECUTE); + + /* + * compute start and end as inclusive; if not incrementing, we + * assume end==start + */ + const NvU32 start = method; + const NvU32 end = (secOp == NVA16F_DMA_SEC_OP_INC_METHOD) ? + (method + ((count - 1) * 4)) : method; + + return ((start > NVA16F_SEMAPHOREA) && (start <= NVA16F_SEMAPHORED)) || + ((end >= NVA16F_SEMAPHOREA) && (end < NVA16F_SEMAPHORED)) || + ((start > NVC36F_SEM_ADDR_LO) && (start <= NVC36F_SEM_EXECUTE)) || + ((end >= NVC36F_SEM_ADDR_LO) && (end < NVC36F_SEM_EXECUTE)); +} +#endif /* DEBUG */ + +/* + * Note that _count+1 must be less than half the total pushbuffer size. This is + * required by GPFIFO because we can't reliably tell when we can write all the + * way to the end of the pushbuffer if we wrap (see bug 232454). This + * assumption ensures that there will be enough space once GET reaches PUT. + */ +#define nvPushHeader(_push_buffer, _segment, _count, _header) do { \ + NvPushChannelSegmentPtr _pSegment = &(_push_buffer)->_segment; \ + nvAssert(((_count)+1) < ((_pSegment)->sizeInBytes / 8)); \ + if ((_pSegment)->freeDwords < ((_count)+1)) \ + __nvPushMakeRoom((_push_buffer), (_count) + 1); \ + __nvPushSetMethodDataSegment((_pSegment), (_header)); \ + (_pSegment)->freeDwords -= ((_count)+1); \ +} while(0) + +#define __nvPushStart(_push_buffer, _segment, _subch, _offset, _count, _opcode) \ +{ \ + nvAssert(!__nvPushStartSplitsSemaphore( \ + (_offset), \ + (_count), \ + NVA16F_DMA_SEC_OP ## _opcode)); \ + ASSERT_DRF_DEF(A16F, _DMA, _SEC_OP, _opcode); \ + ASSERT_DRF_NUM(A16F, _DMA, _METHOD_COUNT, _count); \ + ASSERT_DRF_NUM(A16F, _DMA, _METHOD_SUBCHANNEL, _subch); \ + ASSERT_DRF_NUM(A16F, _DMA, _METHOD_ADDRESS, (_offset) >> 2); \ + nvPushHeader((_push_buffer), _segment, (_count), \ + DRF_DEF(A16F, _DMA, _SEC_OP, _opcode) | \ + DRF_NUM(A16F, _DMA, _METHOD_COUNT, _count) | \ + DRF_NUM(A16F, _DMA, _METHOD_SUBCHANNEL, _subch) | \ + DRF_NUM(A16F, _DMA, _METHOD_ADDRESS, (_offset) >> 2)); \ +} + +// The GPU can encode a 13-bit constant method/data pair in a single DWORD. +#define nvPushImmedValSegment(_push_buffer, _segment, _subch, _offset, _data) { \ + ASSERT_DRF_NUM(A16F, _DMA, _IMMD_DATA, _data); \ + ASSERT_DRF_NUM(A16F, _DMA, _METHOD_SUBCHANNEL, _subch); \ + ASSERT_DRF_NUM(A16F, _DMA, _METHOD_ADDRESS, (_offset) >> 2); \ + if ((_push_buffer)->_segment.freeDwords < 1) \ + __nvPushMakeRoom((_push_buffer), 1); \ + __nvPushSetMethodDataSegment(&(_push_buffer)->_segment, \ + DRF_DEF(A16F, _DMA, _SEC_OP, _IMMD_DATA_METHOD) | \ + DRF_NUM(A16F, _DMA, _IMMD_DATA, _data) | \ + DRF_NUM(A16F, _DMA, _METHOD_SUBCHANNEL, _subch) | \ + DRF_NUM(A16F, _DMA, _METHOD_ADDRESS, (_offset) >> 2)); \ + (_push_buffer)->_segment.freeDwords--; \ +} + +#define nvPushImmedVal(_push_buffer, _subch, _offset, _data) \ + nvPushImmedValSegment(_push_buffer, main, _subch, _offset, _data) + +#define nvPushImmed(_push_buffer, _subch, _offset, _val) \ + nvPushImmedVal(_push_buffer, _subch, _offset, _offset##_V_##_val) + +// Method headers. +#define nvPushMethod(_push_buffer, _subch, _offset, _count) \ + __nvPushStart(_push_buffer, main, _subch, _offset, _count, _INC_METHOD) +#define nvPushMethodNoIncr(_push_buffer, _subch, _offset, _count) \ + __nvPushStart(_push_buffer, main, _subch, _offset, _count, _NON_INC_METHOD) +#define nvPushMethodOneIncr(_push_buffer, _subch, _offset, _count) \ + __nvPushStart(_push_buffer, main, _subch, _offset, _count, _ONE_INC) + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVIDIA_PUSH_METHODS_H__ */ diff --git a/src/common/unix/nvidia-push/interface/nvidia-push-types.h b/src/common/unix/nvidia-push/interface/nvidia-push-types.h new file mode 100644 index 0000000..6fdbd8c --- /dev/null +++ b/src/common/unix/nvidia-push/interface/nvidia-push-types.h @@ -0,0 +1,295 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file contains core definitions (structures and enums) for use in the + * rest of the nvidia-push code. + */ + +#ifndef __NVIDIA_PUSH_TYPES_H__ +#define __NVIDIA_PUSH_TYPES_H__ + +#include /* size_t */ + + + +#include "nvtypes.h" +#include "nvlimits.h" +#include "nvmisc.h" +#include "nvgputypes.h" /* NvNotificationRec */ +#include "nv_common_utils.h" /* TRUE/FALSE */ +#include "nvctassert.h" +#include "nv_assert.h" /* nvAssert() */ +#include "nv_amodel_enum.h" /* NVAModelConfig */ +#include "nvos.h" /* NV_CHANNELGPFIFO_NOTIFICATION_* */ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV_PUSH_NOTIFIER_SHORT_TIMEOUT 3000 /* in milliseconds (ie: 3 seconds) */ +#define NV_PUSH_NOTIFIER_LONG_TIMEOUT 10000 /* in milliseconds (ie: 10 seconds) */ + +# define NV_PUSH_PRINTF_FORMAT_ARGUMENT +# define NV_PUSH_PRINTF_ATTRIBUTES(_fmt,_var) \ + __attribute__((format (printf, _fmt, _var))) + + +#if defined(NV_PUSH_IN_KERNEL) +# define NV_PUSH_ALLOW_FLOAT 0 +#else +# define NV_PUSH_ALLOW_FLOAT 1 +#endif + +typedef union _NvPushChannelUnion +{ + NvU32 u; +#if NV_PUSH_ALLOW_FLOAT + float f; +#endif +} NvPushChannelUnion; + +typedef enum _NvPushConfidentialComputeMode { + /* Confidential computing is not in use. */ + NV_PUSH_CONFIDENTIAL_COMPUTE_MODE_NONE, + + /* + * The confidential compute mode of operation is Hopper Confidential + * Compute (HCC). + */ + NV_PUSH_CONFIDENTIAL_COMPUTE_MODE_HCC, +} NvPushConfidentialComputeMode; + +typedef struct _NvPushChannelRec NvPushChannelRec; +typedef struct _NvPushChannelRec *NvPushChannelPtr; + +typedef struct _nv_push_hal { + void (*kickoff)(struct _NvPushChannelRec*, NvU32 oldGpPut, NvU32 newGpPut); + void (*releaseTimelineSemaphore)(NvPushChannelPtr, void *cpuAddress, NvU64 gpuAddress, NvU64 val); + void (*acquireTimelineSemaphore)(NvPushChannelPtr, NvU64 gpuAddress, NvU64 val); + struct { + /* Requires USERD memory to be specified at channel allocation */ + NvU32 clientAllocatesUserD :1; + + /* On Tegra, we currently need to allocate double the requested GPFIFO + * entries */ + NvU32 allocateDoubleSizeGpFifo :1; + + /* Use Volta+ semaphore methods */ + NvU32 voltaSemMethods :1; + + NvU32 extendedBase :1; + } caps; +} NvPushHal; + +typedef struct _NvPushDeviceRec { + + void *hostDevice; /* Provided by the host driver */ + + NvBool hostLBoverflowBug1667921 : 1; + NvBool clientSli : 1; /* Provided by the host driver */ + NvBool hasFb : 1; /* Computed from supportedClasses[] */ + + NvU32 clientHandle; /* Provided by the host driver */ + NvU32 numSubDevices; /* Provided by the host driver */ + + NvU32 numClasses; /* Provided by the host driver */ + const NvU32 *supportedClasses;/* Provided by the host driver */ + + struct { + NvU32 handle; /* Provided by the host driver */ + NvU32 deviceHandle; /* Provided by the host driver */ + NvU32 gpuVASpaceObject;/* Provided by the host driver */ + NvU32 gpuVASpaceCtxDma;/* Provided by the host driver */ + NvU32 hUserMode; /* VOLTA_USERMODE_A object */ + void *pUserMode; /* VOLTA_USERMODE_A mapping */ + } subDevice[NV_MAX_SUBDEVICES]; + + NvU32 gpfifoClass; + size_t userDSize; + + NVAModelConfig amodelConfig; + + NvPushHal hal; + const struct _NvPushImports *pImports; + + /* Provided by the host driver */ + NvPushConfidentialComputeMode confidentialComputeMode; +} NvPushDeviceRec, *NvPushDevicePtr; + + +typedef struct _NvPushChannelSegmentRec +{ + NvU32 freeDwords; // free space (in dwords) + NvU32 sizeInBytes; // Push buffer size (in bytes) + NvU32 putOffset; // Offset of last kickoff + NvPushChannelUnion *base; // Push buffer start pointer + NvPushChannelUnion *buffer; // Push buffer current pointer + NvU64 gpuMapOffset; +} NvPushChannelSegmentRec, *NvPushChannelSegmentPtr; + +struct _NvPushChannelRec +{ + NvBool initialized : 1; + NvBool logNvDiss : 1; + NvBool noTimeout : 1; + NvBool ignoreChannelErrors : 1; + NvBool channelErrorOccurred : 1; + + NvU32 channelHandle[NV_MAX_SUBDEVICES]; + NvU32 pushbufferHandle; + NvU32 pushbufferVAHandle[NV_MAX_SUBDEVICES]; + NvPushChannelSegmentRec main; + + void *control[NV_MAX_SUBDEVICES]; + NvU32 numGpFifoEntries; + NvU32 *gpfifo; // GPFIFO entries + NvU32 gpPutOffset; // GPFIFO entries last kicked off offset + NvU32 currentSubDevMask; + + NvPushChannelSegmentRec progressTracker; + struct { + NvU32 handle[NV_MAX_SUBDEVICES]; + void *ptr[NV_MAX_SUBDEVICES]; + NvU64 gpuVA; + } progressSemaphore; + + struct { + NvU32 hMemory; + } userD[NV_MAX_SUBDEVICES]; + + struct { + NvU8 num; + NvU32 memoryHandle; + NvNotification *cpuAddress; + NvU64 gpuAddress; + NvU32 errorCtxDma; + } notifiers; + + NvPushDeviceRec *pDevice; +}; + +/* Opaque type, only used by pointer within the push buffer utility library. */ +typedef struct _NvPushImportEvent NvPushImportEvent; + +/* Table of function pointers to be provided by the nvidia-push host driver. */ +typedef struct _NvPushImports { + + NvU32 (*rmApiControl) (NvPushDevicePtr pDevice, + NvU32 hObject, + NvU32 cmd, + void *pParams, + NvU32 paramsSize); + + NvU32 (*rmApiAlloc) (NvPushDevicePtr pDevice, + NvU32 hParent, + NvU32 hObject, + NvU32 hClass, + void *pAllocParams); + + NvU32 (*rmApiFree) (NvPushDevicePtr pDevice, + NvU32 hParent, + NvU32 hObject); + + NvU32 (*rmApiMapMemoryDma) (NvPushDevicePtr pDevice, + NvU32 hDevice, + NvU32 hDma, + NvU32 hMemory, + NvU64 offset, + NvU64 length, + NvU32 flags, + NvU64 *pDmaOffset); + + NvU32 (*rmApiUnmapMemoryDma) (NvPushDevicePtr pDevice, + NvU32 hDevice, + NvU32 hDma, + NvU32 hMemory, + NvU32 flags, + NvU64 dmaOffset); + + NvU32 (*rmApiAllocMemory64) (NvPushDevicePtr pDevice, + NvU32 hParent, + NvU32 hMemory, + NvU32 hClass, + NvU32 flags, + void **ppAddress, + NvU64 *pLimit); + + NvU32 (*rmApiVidHeapControl) (NvPushDevicePtr pDevice, + void *pVidHeapControlParms); + + NvU32 (*rmApiMapMemory) (NvPushDevicePtr pDevice, + NvU32 hDevice, + NvU32 hMemory, + NvU64 offset, + NvU64 length, + void **ppLinearAddress, + NvU32 flags); + + NvU32 (*rmApiUnmapMemory) (NvPushDevicePtr pDevice, + NvU32 hDevice, + NvU32 hMemory, + void *pLinearAddress, + NvU32 flags); + + NvU64 (*getMilliSeconds) (NvPushDevicePtr pDevice); + + void (*yield) (NvPushDevicePtr pDevice); + + NvBool (*waitForEvent) (NvPushDevicePtr pDevice, + NvPushImportEvent *pEvent, + NvU64 timeout); + + void (*emptyEventFifo) (NvPushDevicePtr pDevice, + NvPushImportEvent *pEvent); + + void (*channelErrorOccurred) (NvPushChannelPtr pChannel, NvU32 channelErrCode); + + void (*pushbufferWrapped) (NvPushChannelPtr pChannel); + + void (*logError) (NvPushDevicePtr pDevice, + NV_PUSH_PRINTF_FORMAT_ARGUMENT const char *fmt, ...) + NV_PUSH_PRINTF_ATTRIBUTES(2,3); + + /* + * The logNvDiss() import, in DEBUG builds, logs strings to be + * parsed by nvdiss. Note that multiple nvPushImportLogNvDiss() + * calls may be used to build one line of output (so, respect the + * newlines provided in the strings). + */ +#if defined(DEBUG) + void (*logNvDiss) (NvPushChannelPtr pChannel, + NV_PUSH_PRINTF_FORMAT_ARGUMENT const char *fmt, ...) + NV_PUSH_PRINTF_ATTRIBUTES(2,3); +#endif + +} NvPushImports; + + +void __nvPushMakeRoom(NvPushChannelPtr, NvU32 count); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVIDIA_PUSH_TYPES_H__ */ diff --git a/src/common/unix/nvidia-push/interface/nvidia-push-utils.h b/src/common/unix/nvidia-push/interface/nvidia-push-utils.h new file mode 100644 index 0000000..9eeb617 --- /dev/null +++ b/src/common/unix/nvidia-push/interface/nvidia-push-utils.h @@ -0,0 +1,180 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* This file contains push buffer utility functions and declarations */ + +#ifndef __NVIDIA_PUSH_UTILS_H__ +#define __NVIDIA_PUSH_UTILS_H__ + +#include "nvidia-push-types.h" +#include "nvlimits.h" + +#include "class/cla16f.h" + +#ifdef __cplusplus +extern "C" { +#endif + +static inline NvBool nvPushIsAModel(const NvPushDeviceRec *pDevice) +{ + return FALSE; +} + + +/* declare prototypes: */ +NvBool nvPushCheckChannelError(NvPushChannelPtr pChannel); +void nvPushKickoff(NvPushChannelPtr); +NvBool nvPushIdleChannelTest(NvPushChannelPtr pChannel, NvU32 timeoutMSec); +NvBool nvPushIdleChannel(NvPushChannelPtr); + +void nvPushWaitForNotifier( + NvPushChannelPtr pChannel, + NvU32 notifierIndex, + NvU32 subdeviceMask, + NvBool yield, + NvPushImportEvent *pEvent, + int id); + +void nvPushReleaseTimelineSemaphore( + NvPushChannelPtr p, + void *cpuAddress, + NvU64 gpuAddress, + NvU64 val); + +void nvPushAcquireTimelineSemaphore( + NvPushChannelPtr p, + NvU64 gpuAddress, + NvU64 val); + +NvBool nvPushDecodeMethod(NvU32 header, NvU32 *count); +void nvPushSetObject(NvPushChannelPtr p, NvU32 subch, NvU32 *object); +void nvPushSetSubdeviceMask(NvPushChannelPtr p, NvU32 mask); +void __nvPushMakeRoom(NvPushChannelPtr, NvU32 count); + +#define NV_PUSH_SUBDEVICE_MASK_PRIMARY 0x00000001 +#define NV_PUSH_SUBDEVICE_MASK_ALL DRF_MASK(NVA16F_DMA_SET_SUBDEVICE_MASK_VALUE) + +/* + * Evaluates to TRUE if the two subDevMasks are equivalent for the given SLI + * device + */ +static inline NvBool nvPushSubDeviceMaskEquiv( + const NvPushDeviceRec *pDevice, + NvU32 maskA, + NvU32 maskB) +{ + const NvU32 allSubDevices = (1 << pDevice->numSubDevices) - 1; + + return (maskA & allSubDevices) == (maskB & allSubDevices); +} + +/* Evaluates to TRUE if subDevMask will write to all of the GPUs */ +static inline NvBool nvPushSubDeviceMaskAllActive( + const NvPushDeviceRec *pDevice, + NvU32 subDevMask) +{ + return nvPushSubDeviceMaskEquiv(pDevice, subDevMask, + NV_PUSH_SUBDEVICE_MASK_ALL); +} + +#define NV_PUSH_NOTIFIER_INTERNAL_BIT 0x80 +ct_assert(NV_PUSH_NOTIFIER_INTERNAL_BIT >= + NV_CHANNELGPFIFO_NOTIFICATION_TYPE__SIZE_1); +#define NV_PUSH_ERROR_NOTIFIER_INDEX \ + (NV_PUSH_NOTIFIER_INTERNAL_BIT | \ + NV_CHANNELGPFIFO_NOTIFICATION_TYPE_ERROR) +#define NV_PUSH_TOKEN_NOTIFIER_INDEX \ + (NV_PUSH_NOTIFIER_INTERNAL_BIT | \ + NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN) + +/* + * Notifiers for use by nvidia-push, not exposed to clients: + * NV_CHANNELGPFIFO_NOTIFICATION_TYPE__SIZE_1: defined by RM + * NV_MAX_SUBDEVICES: one for each subdevice to track work submission token + */ +#define NV_PUSH_NUM_INTERNAL_NOTIFIERS \ + (NV_CHANNELGPFIFO_NOTIFICATION_TYPE__SIZE_1 + NV_MAX_SUBDEVICES) + +static inline NvU32 __nvPushGetNotifierRawIndex( + const NvPushDeviceRec *pDevice, + NvU32 notifierIndex, + NvU32 sd) +{ + if (notifierIndex & NV_PUSH_NOTIFIER_INTERNAL_BIT) { + return notifierIndex & ~NV_PUSH_NOTIFIER_INTERNAL_BIT; + } else { + return (notifierIndex * pDevice->numSubDevices) + sd + + NV_PUSH_NUM_INTERNAL_NOTIFIERS; + } +} + +static inline NvNotification *nvPushGetNotifierCpuAddress( + const NvPushChannelRec *pChannel, + NvU32 notifierIndex, + NvU32 sd) +{ + const NvU32 rawIndex = + __nvPushGetNotifierRawIndex(pChannel->pDevice, notifierIndex, sd); + + return &pChannel->notifiers.cpuAddress[rawIndex]; +} + +static inline NvU64 nvPushGetNotifierGpuAddress( + const NvPushChannelRec *pChannel, + NvU32 notifierIndex, + NvU32 sd) +{ + const NvU32 rawIndex = + __nvPushGetNotifierRawIndex(pChannel->pDevice, notifierIndex, sd); + const size_t offset = rawIndex * sizeof(NvNotification); + + return pChannel->notifiers.gpuAddress + offset; +} + + +extern NvU32 nvPushReadGetOffset(NvPushChannelPtr push_buffer, NvBool minimum); + + +/*! + * Make room in the pushbuffer, checking for errors. + * + * If a channel error occurred, channelErrorOccurred is set to TRUE. + * nvPushCheckForRoomAndErrors() is designed to be called just before a + * nvPushMethod() with the same size. + */ +static inline void nvPushCheckForRoomAndErrors( + NvPushChannelPtr pChannel, + NvU32 count) +{ + pChannel->channelErrorOccurred = FALSE; + + if (pChannel->main.freeDwords < (count + 1)) { + __nvPushMakeRoom(pChannel, count + 1); + } +} + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVIDIA_PUSH_UTILS_H__ */ diff --git a/src/common/unix/nvidia-push/src/nvidia-push-init.c b/src/common/unix/nvidia-push/src/nvidia-push-init.c new file mode 100644 index 0000000..b062172 --- /dev/null +++ b/src/common/unix/nvidia-push/src/nvidia-push-init.c @@ -0,0 +1,1551 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + + +#include "nvidia-push-init.h" +#include "nvidia-push-utils.h" +#include "nvidia-push-priv.h" +#include "nvidia-push-priv-imports.h" + +#include "nvos.h" + +#include "nv_assert.h" + +#include "alloc/alloc_channel.h" +#include "class/cl0002.h" // NV01_CONTEXT_DMA +#include "class/cl003e.h" // NV01_MEMORY_SYSTEM +#include "class/cl0040.h" // NV01_MEMORY_LOCAL_USER + +#include "class/cla16f.h" // KEPLER_CHANNEL_GPFIFO_B +#include "class/cla26f.h" // KEPLER_CHANNEL_GPFIFO_C +#include "class/clb06f.h" // MAXWELL_CHANNEL_GPFIFO_A +#include "class/clc06f.h" // PASCAL_CHANNEL_GPFIFO_A +#include "class/clc36f.h" // VOLTA_CHANNEL_GPFIFO_A +#include "class/clc46f.h" // TURING_CHANNEL_GPFIFO_A +#include "class/cl50a0.h" // NV50_MEMORY_VIRTUAL +#include "class/clc56f.h" // AMPERE_CHANNEL_GPFIFO_A +#include "class/clc86f.h" // HOPPER_CHANNEL_GPFIFO_A +#include "class/clc361.h" // VOLTA_USERMODE_A +#include "class/clc661.h" // HOPPER_USERMODE_A + +#include "ctrl/ctrl0080/ctrl0080fifo.h" // NV0080_CTRL_CMD_FIFO_GET_CAPS_V2 +#include "ctrl/ctrl2080/ctrl2080bus.h" // NV2080_CTRL_CMD_BUS_GET_INFO +#include "ctrl/ctrla06f/ctrla06fgpfifo.h" // KEPLER_CHANNEL_GPFIFO_A +#include "ctrl/ctrlc36f.h" // VOLTA_CHANNEL_GPFIFO_A + +static NvU32 GetHandle( + const NvU32 *pHandlePool, + NvU8 handlePoolSize, + NvU64 *pUsedHandleBitmask) +{ + NvU8 i; + const NvU64 usedHandleBitmask = *pUsedHandleBitmask; + + /* + * We assume there are less than 64 handles in the pool. If the + * pool is larger than that, we'll need a fancier bitmask. + */ + nvAssert(handlePoolSize < (sizeof(NvU64) * 8)); + + for (i = 0; i < handlePoolSize; i++) { + if ((usedHandleBitmask & NVBIT64(i)) == 0) { + *pUsedHandleBitmask |= NVBIT64(i); + return pHandlePool[i]; + } + } + + nvAssert(!"Exhausted handlePool!"); + + return 0; +} + +static NvU32 GetChannelHandle( + const NvPushAllocChannelParams *pParams, + NvU64 *pUsedHandleBitmask) +{ + return GetHandle(pParams->handlePool, + ARRAY_LEN(pParams->handlePool), + pUsedHandleBitmask); +} + +static NvU32 GetDeviceHandle( + const NvPushAllocDeviceParams *pParams, + NvU64 *pUsedHandleBitmask) +{ + return GetHandle(pParams->handlePool, + ARRAY_LEN(pParams->handlePool), + pUsedHandleBitmask); +} + +static void FreeSemaSurface(NvPushChannelPtr p) +{ + NvPushDevicePtr pDevice = p->pDevice; + NvU32 *handle = p->progressSemaphore.handle; + void **ptr = p->progressSemaphore.ptr; + NvU32 status; + int sd; + + if (p->progressSemaphore.gpuVA) { + for (sd = pDevice->numSubDevices - 1; sd >= 0; sd--) { + const int deviceIndex = __nvPushGetDeviceIndex(pDevice, sd); + status = nvPushImportRmApiUnmapMemoryDma( + pDevice, + pDevice->subDevice[sd].handle, + pDevice->subDevice[deviceIndex].gpuVASpaceCtxDma, + handle[sd], + 0, + p->progressSemaphore.gpuVA); + if (status != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to unmap progressSemaphore"); + } + } + p->progressSemaphore.gpuVA = 0; + } + + for (sd = pDevice->numSubDevices - 1; sd >= 0; sd--) { + const int deviceIndex = __nvPushGetDeviceIndex(pDevice, sd); + if (!handle[sd]) { + continue; + } + status = nvPushImportRmApiFree( + pDevice, + pDevice->subDevice[deviceIndex].deviceHandle, + handle[sd]); + if (status != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to free progressSemaphore"); + } + handle[sd] = 0; + + /* Freeing this memory automatically unmaps it. */ + ptr[sd] = NULL; + } +} + +static NvBool AllocSemaSurface( + NvPushChannelPtr p, + const NvPushAllocChannelParams *pParams, + NvBool coherent, + NvU64 *pUsedHandleBitmask) +{ + NvPushDevicePtr pDevice = p->pDevice; + NvU32 *handle = p->progressSemaphore.handle; + void **ptr = p->progressSemaphore.ptr; + NvU32 status; + const NvU64 size = 4096; + unsigned int sd; + + /* 1. Allocate sysmem surface(s) to back the semaphore, get CPU mapping */ + for (sd = 0; sd < pDevice->numSubDevices; sd++) { + const int deviceIndex = __nvPushGetDeviceIndex(pDevice, sd); + NvU64 limit = size - 1; + const NvU32 flags = DRF_DEF(OS02, _FLAGS, _PHYSICALITY, _NONCONTIGUOUS) | + (coherent ? DRF_DEF(OS02, _FLAGS, _COHERENCY, _CACHED) : + DRF_DEF(OS02, _FLAGS, _COHERENCY, _UNCACHED)); + + handle[sd] = GetChannelHandle(pParams, pUsedHandleBitmask); + + status = nvPushImportRmApiAllocMemory64(pDevice, + pDevice->subDevice[deviceIndex].deviceHandle, + handle[sd], + NV01_MEMORY_SYSTEM, + flags, + &ptr[sd], + &limit); + + if (status != NVOS_STATUS_SUCCESS) { + handle[sd] = 0; + nvAssert(!"Failed to allocate FIFO semaphore surface"); + goto fail; + } + } + + /* 2. Map the surface(s) into the GPU(s) */ + for (sd = 0; sd < pDevice->numSubDevices; sd++) { + NvU32 flags = DRF_DEF(OS46, _FLAGS, _ACCESS, _READ_WRITE) | + DRF_DEF(OS46, _FLAGS, _PAGE_SIZE, _4KB) | + (coherent ? DRF_DEF(OS46, _FLAGS, _CACHE_SNOOP, _ENABLE) : + DRF_DEF(OS46, _FLAGS, _CACHE_SNOOP, _DISABLE)); + const int deviceIndex = __nvPushGetDeviceIndex(pDevice, sd); + + /* + * Note that this mapping is somewhat special because we use a + * different surface for each subdevice, but want to map at the same + * virtual address on all subdevices. + */ + if (sd == 0) { + /* + * Create a new virtual mapping. + * + * The MapMemoryDma call will assign to + * 'p->progressSemaphore.gpuVA'. + * + * In !clientSli, this creates a broadcast mapping that we override + * with the _DMA_UNICAST_REUSE_ALLOC flag below. + * In clientSli, each mapping is already unicast. + * + * In both cases, the DMA_OFFSET_FIXED flag ensures the VA matches + * between all subdevices. + */ + p->progressSemaphore.gpuVA = 0; + flags = FLD_SET_DRF(OS46, _FLAGS, _DMA_OFFSET_FIXED, _FALSE, flags); + } else { + /* + * The MapMemoryDma call will read from + * 'p->progressSemaphore.gpuVA'. + */ + nvAssert(p->progressSemaphore.gpuVA != 0); + if (!pDevice->clientSli) { + flags = FLD_SET_DRF(OS46, _FLAGS, _DMA_UNICAST_REUSE_ALLOC, _TRUE, flags); + } + flags = FLD_SET_DRF(OS46, _FLAGS, _DMA_OFFSET_FIXED, _TRUE, flags); + } + + status = nvPushImportRmApiMapMemoryDma(pDevice, + pDevice->subDevice[sd].handle, + pDevice->subDevice[deviceIndex].gpuVASpaceCtxDma, + handle[sd], + 0, + size, + flags, + &p->progressSemaphore.gpuVA); + if (status != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to map FIFO semaphore surface"); + goto fail; + } + } + + return TRUE; +fail: + FreeSemaSurface(p); + return FALSE; +} + +/* + * The size of the "progress tracker" portion of the pushbuffer. + * + * We use one set of progress tracker methods for every two GPFIFO entries (one + * GPFIFO entry is for the main pushbuffer, the other is for the progress + * tracker methods). + */ +static inline NvU32 ProgressTrackerBufferSize(NvPushChannelPtr buffer) +{ + return __nvPushProgressTrackerEntrySize(buffer->pDevice) * + (buffer->numGpFifoEntries / 2); +} + +/* + * The size of the pushbuffer allocation, including all segments and GPFIFO + * entries. + */ +static inline NvU32 CalculateGPBufferSize(NvPushChannelPtr buffer) +{ + return __nvPushProgressTrackerOffset(buffer) + + ProgressTrackerBufferSize(buffer); +} + +/*! + * Set up an NvPushChannelSegmentRec's initial state based on the provided data + * + * \param segment Pointer to segment structure to initialize + * \param ptr CPU mapping to the base of the segment. + * \param gpuOffset GPU mapping of the base of the segment. + * \param size Size of the segment, in bytes. + */ +static void InitDmaSegment(NvPushChannelSegmentPtr segment, + void *ptr, + NvU64 gpuOffset, + NvU32 size) +{ + segment->base = (NvPushChannelUnion *)ptr; + segment->buffer = (NvPushChannelUnion *)ptr; + segment->sizeInBytes = size; + segment->freeDwords = size >> 2; + segment->gpuMapOffset = gpuOffset; + segment->putOffset = 0; +} + +/*! + * Set up the work submit token. RM will write this into the "error context + * DMA" at the offset we request. + */ +static NvBool RequestChidToken(NvPushChannelPtr p) +{ + NvPushDevicePtr pDevice = p->pDevice; + int deviceIndex; + + for (deviceIndex = 0; + deviceIndex < __nvPushGetNumDevices(pDevice); + deviceIndex++) { + + NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS notifParams = { 0 }; + NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS tokenParams = { 0 }; + NvU32 status; + + notifParams.index = NV_CHANNELGPFIFO_NOTIFICATION_TYPE__SIZE_1 + deviceIndex; + + status = nvPushImportRmApiControl(pDevice, + p->channelHandle[deviceIndex], + NVC36F_CTRL_CMD_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX, + ¬ifParams, + sizeof(notifParams)); + if (status != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + /* + * Request the channel's "work submit token". This isn't actually used for + * anything but RM needs it to be called after the channel has been allocated, + * for reasons. + */ + status = nvPushImportRmApiControl(pDevice, + p->channelHandle[deviceIndex], + NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN, + &tokenParams, + sizeof(tokenParams)); + if (status != NVOS_STATUS_SUCCESS) { + return FALSE; + } + } + return TRUE; +} + +static NvBool BindAndScheduleChannel(NvPushDevicePtr pDevice, + NvU32 channelHandle, + NvU32 engineType) +{ + NVA06F_CTRL_BIND_PARAMS bindParams = { 0 }; + NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS scheduleParams = { 0 }; + NvBool ret; + + bindParams.engineType = engineType; + ret = nvPushImportRmApiControl(pDevice, + channelHandle, + NVA06F_CTRL_CMD_BIND, + &bindParams, + sizeof(bindParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvPushImportLogError(pDevice, "Failed to bind the channel"); + return FALSE; + } + + scheduleParams.bEnable = NV_TRUE; + ret = nvPushImportRmApiControl(pDevice, + channelHandle, + NVA06F_CTRL_CMD_GPFIFO_SCHEDULE, + &scheduleParams, + sizeof(scheduleParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvPushImportLogError(pDevice, + "Failed to schedule the channel"); + return FALSE; + } + + return TRUE; +} + +static NvBool AllocChannelObject( + NvPushChannelPtr buffer, + const NvPushAllocChannelParams *pParams, + NvU64 *pUsedHandleBitmask, + NvU64 gpuAddress) +{ + NvPushDevicePtr pDevice = buffer->pDevice; + NV_CHANNEL_ALLOC_PARAMS params = { 0 }; + unsigned int sd; + NvU32 userdMapHandle[NV_MAX_SUBDEVICES]; + NvU32 ret; + const NvU64 gpFifoOffset = gpuAddress + __nvPushGpFifoOffset(buffer); + int deviceIndex; + + for (deviceIndex = 0; + deviceIndex < __nvPushGetNumDevices(pDevice); + deviceIndex++) { + buffer->channelHandle[deviceIndex] = GetChannelHandle(pParams, pUsedHandleBitmask); + nvAssert(buffer->notifiers.errorCtxDma != 0); + + /* Open the DMA channel by allocating the CHANNEL_GPFIFO object */ + params.hObjectError = buffer->notifiers.errorCtxDma; + if (pDevice->subDevice[deviceIndex].gpuVASpaceObject != 0) { + params.hVASpace = pDevice->subDevice[deviceIndex].gpuVASpaceObject; + } else { + params.hObjectBuffer = pDevice->subDevice[deviceIndex].gpuVASpaceCtxDma; + } + // Offset is relative to the ctx dma + params.gpFifoOffset = gpFifoOffset; + + if (pDevice->hal.caps.allocateDoubleSizeGpFifo) { + // On Tegra, we have to allocate twice the GPFIFO size. This is because + // the kernel will add its own entries (max 2) for the kickoff for the + // pre-sync and post-sync fences. This means the max kickoff size is not + // actually buffer->numGpFifoEntries - 1, it's + // most likely buffer->numGpFifoEntries - 3. + // + // TODO: Tell the users the actual max kickoff size to avoid this + // WAR. NvRmTegraChannelGetMaxKickoffGpfifoCount() retrieves this piece + // of info on Tegra. Bug 2404063. + params.gpFifoEntries = buffer->numGpFifoEntries * 2; + } else { + params.gpFifoEntries = buffer->numGpFifoEntries; + } + + params.flags = 0; + if (pParams->secureChannel) { + params.flags |= DRF_DEF(OS04, _FLAGS, _CC_SECURE, _TRUE); + } + if (pParams->difrPrefetch) { + params.flags |= DRF_DEF(OS04, + _FLAGS, + _SET_EVICT_LAST_CE_PREFETCH_CHANNEL, + _TRUE); + } + + if (pDevice->hal.caps.clientAllocatesUserD) { + if (pDevice->clientSli) { + params.hUserdMemory[0] = buffer->userD[deviceIndex].hMemory; + params.userdOffset[0] = 0; + } else { + for (sd = 0; sd < pDevice->numSubDevices; sd++) { + params.hUserdMemory[sd] = buffer->userD[0].hMemory; + params.userdOffset[sd] = 0; + } + } + userdMapHandle[deviceIndex] = buffer->userD[deviceIndex].hMemory; + } else { + userdMapHandle[deviceIndex] = buffer->channelHandle[deviceIndex]; + } + params.engineType = pParams->engineType; + if (pDevice->clientSli) { + params.subDeviceId = (1 << deviceIndex); + } + + if ((ret = nvPushImportRmApiAlloc(pDevice, + pDevice->subDevice[deviceIndex].deviceHandle, + buffer->channelHandle[deviceIndex], + pDevice->gpfifoClass, + ¶ms)) != NVOS_STATUS_SUCCESS) + { + nvPushImportLogError(pDevice, + "Push buffer object allocation failed: 0x%x (%s)", + ret, nvstatusToString(ret)); + buffer->channelHandle[deviceIndex] = 0; + return FALSE; + } + + if (!BindAndScheduleChannel(pDevice, + buffer->channelHandle[deviceIndex], + pParams->engineType)) { + return FALSE; + } + } + + for (sd = 0; sd < pDevice->numSubDevices; sd++) { + void *pUserD; + + deviceIndex = __nvPushGetDeviceIndex(pDevice, sd); + + // Map the DMA controls for each subdevice. + ret = nvPushImportRmApiMapMemory(pDevice, + pDevice->subDevice[sd].handle, + userdMapHandle[deviceIndex], + 0, + pDevice->userDSize, + &pUserD, + 0); + if (ret != NVOS_STATUS_SUCCESS) { + nvPushImportLogError(pDevice, + "Push buffer mapping failed: 0x%x (%s)", + ret, nvstatusToString(ret)); + return FALSE; + } + + buffer->control[sd] = pUserD; + } + + return TRUE; +} + +/* + * It might be nice to suballocate these rather + * than create a separate RM allocation for each channel. + */ +static NvBool nvDmaAllocUserD( + NvPushChannelPtr p, + const NvPushAllocChannelParams *pParams, + NvU64 *pUsedHandleBitmask) +{ + NvPushDevicePtr pDevice = p->pDevice; + int deviceIndex; + NvBool bHasFB = pDevice->hasFb; + + if (!pDevice->hal.caps.clientAllocatesUserD) { + return TRUE; + } + + for (deviceIndex = 0; + deviceIndex < __nvPushGetNumDevices(pDevice); + deviceIndex++) { + NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { 0 }; + NvU32 ret; + + /* For GPUs which do not have framebuffer memory, use allocation from + * system memory instead. + */ + const NvU32 hClass = bHasFB ? NV01_MEMORY_LOCAL_USER : NV01_MEMORY_SYSTEM; + const NvU32 attr = + bHasFB ? + DRF_DEF(OS32, _ATTR, _LOCATION, _VIDMEM) | + DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _4KB) | + DRF_DEF(OS32, _ATTR, _COHERENCY, _UNCACHED) + : + DRF_DEF(OS32, _ATTR, _LOCATION, _PCI) | + DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _4KB) | + DRF_DEF(OS32, _ATTR, _COHERENCY, _UNCACHED); + const NvU32 flags = + bHasFB ? + NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE | + NVOS32_ALLOC_FLAGS_PERSISTENT_VIDMEM + : + NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE; + + NvU32 hMemory = GetChannelHandle(pParams, pUsedHandleBitmask); + + memAllocParams.owner = pDevice->clientHandle; + memAllocParams.type = NVOS32_TYPE_DMA; + memAllocParams.size = pDevice->userDSize; + memAllocParams.attr = attr; + memAllocParams.flags = flags; + memAllocParams.alignment = pDevice->userDSize; + + ret = nvPushImportRmApiAlloc(pDevice, + pDevice->subDevice[deviceIndex].deviceHandle, + hMemory, + hClass, + &memAllocParams); + if (ret != NV_OK) { + return FALSE; + } + + p->userD[deviceIndex].hMemory = hMemory; + } + + return TRUE; +} + +static NvBool IsClassSupported( + const NvPushDeviceRec *pDevice, + NvU32 classNumber) +{ + unsigned int j; + for (j = 0; j < pDevice->numClasses; j++) { + if (classNumber == pDevice->supportedClasses[j]) { + return TRUE; + } + } + return FALSE; +} + +int nvPushGetSupportedClassIndex( + NvPushDevicePtr pDevice, + const void *pClassTable, + size_t classTableStride, + size_t classTableLength) +{ + unsigned int i; + + for (i = 0; i < classTableLength; i++) { + + const NvU8 *bytes = (const NvU8 *)pClassTable; + const size_t byteOffset = i * classTableStride; + const NvPushSupportedClass *pClass = + (const NvPushSupportedClass *) (bytes + byteOffset); + + if (nvPushIsAModel(pDevice)) { + if (pDevice->amodelConfig == pClass->amodelConfig) { + return i; + } + continue; + } + + if (IsClassSupported(pDevice, pClass->classNumber)) { + return i; + } + } + return -1; +} + +static NvBool GetChannelClassAndUserDSize( + NvPushDevicePtr pDevice, + const NvPushAllocDeviceParams *pParams) +{ + const struct { + NvPushSupportedClass base; + size_t gpFifoSize; + } gpFifoDmaClasses[] = { + { + { AMPERE_CHANNEL_GPFIFO_A, + NV_AMODEL_AMPERE }, + sizeof(AmpereAControlGPFifo) + }, + { + { TURING_CHANNEL_GPFIFO_A, + NV_AMODEL_TURING }, + sizeof(TuringAControlGPFifo) + }, + { + { VOLTA_CHANNEL_GPFIFO_A, + NV_AMODEL_VOLTA }, + sizeof(VoltaAControlGPFifo) + }, + { + { PASCAL_CHANNEL_GPFIFO_A, + NV_AMODEL_PASCAL }, + sizeof(PascalAControlGPFifo) + }, + { + { MAXWELL_CHANNEL_GPFIFO_A, + NV_AMODEL_MAXWELL }, + sizeof(MaxwellAControlGPFifo) + }, + { + { KEPLER_CHANNEL_GPFIFO_C, + NV_AMODEL_KEPLER_SM35 }, + sizeof(KeplerCControlGPFifo) + }, + { + { KEPLER_CHANNEL_GPFIFO_B, + NV_AMODEL_KEPLER }, + sizeof(KeplerBControlGPFifo) + }, + + }; + + int i; + + i = nvPushGetSupportedClassIndex(pDevice, gpFifoDmaClasses, + sizeof(gpFifoDmaClasses[0]), + ARRAY_LEN(gpFifoDmaClasses)); + if (i == -1) { + return FALSE; + } + + pDevice->gpfifoClass = gpFifoDmaClasses[i].base.classNumber; + pDevice->userDSize = gpFifoDmaClasses[i].gpFifoSize; + return TRUE; +} + +/* + * Query GPU<->CPU coherency. In particular, *pCoherent is set to TRUE when + * the GPU is capable of accessing CPU-cached system memory coherently with + * respect to CPU accesses. + * + * For surfaces with CPU read/write or CPU read-mostly such as notifiers: + * If *pCoherent is TRUE: + * - create CPU mappings with COHERENCY_WRITE_BACK + * - create GPU mappings with CACHE_SNOOP_ENABLE + * If *pCoherent is FALSE: + * - create CPU mappings with COHERENCY_UNCACHED + * - create GPU mappings with CACHE_SNOOP_DISABLE + * + * (CPU write-mostly surfaces such as the pushbuffer always use WRITE_COMBINED + * memory.) + * + * Note we only query on the first subdevice and assume the other subdevices + * are the same. + */ +static NvBool GetCoherenceFlags( + NvPushChannelPtr pChannel, + NvBool *pCoherent) +{ + NvPushDevicePtr pDevice = pChannel->pDevice; + NV2080_CTRL_BUS_GET_INFO_PARAMS busInfo = { 0 }; + struct { + NV2080_CTRL_BUS_INFO coherentFlags; + } busInfoList; + + NvU32 ret; + + NVMISC_MEMSET(&busInfoList, 0, sizeof(busInfoList)); + busInfoList.coherentFlags.index = + NV2080_CTRL_BUS_INFO_INDEX_COHERENT_DMA_FLAGS; + + busInfo.busInfoListSize = sizeof(busInfoList) / + sizeof(NV2080_CTRL_BUS_INFO); + busInfo.busInfoList = NV_PTR_TO_NvP64(&busInfoList); + + ret = nvPushImportRmApiControl(pDevice, + pDevice->subDevice[0].handle, + NV2080_CTRL_CMD_BUS_GET_INFO, + &busInfo, sizeof(busInfo)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + *pCoherent = + FLD_TEST_DRF(2080_CTRL_BUS_INFO, _COHERENT_DMA_FLAGS, _GPUGART, _TRUE, + busInfoList.coherentFlags.data); + return TRUE; +} + +static NvBool TryAllocAndMapPushbuffer( + NvPushChannelPtr pChannel, + const NvU32 allocFlags, + const NvU32 mapFlags, + const NvU32 limit, + void **pCpuAddress, + NvU64 *pGpuAddress) +{ + NvU32 ret; + NvU64 localLimit; + NvU64 size = limit + 1; + void *cpuAddress = NULL; + NvU64 gpuAddress = 0; + NvPushDevicePtr pDevice = pChannel->pDevice; + int deviceIndex; + NvBool vaAlloc[NV_MAX_SUBDEVICES] = { 0 }; + NvBool vaMap[NV_MAX_SUBDEVICES] = { 0 }; + NvBool surfaceAlloc = FALSE; + + for (deviceIndex = 0; + deviceIndex < __nvPushGetNumDevices(pDevice); + deviceIndex++) { + NV_MEMORY_ALLOCATION_PARAMS vaParams = { 0 }; + + vaParams.owner = 0x70757368; + vaParams.type = NVOS32_TYPE_DMA; + vaParams.flags = + NVOS32_ALLOC_FLAGS_MEMORY_HANDLE_PROVIDED | + NVOS32_ALLOC_FLAGS_VIRTUAL; + vaParams.size = size; + vaParams.hVASpace = pDevice->subDevice[deviceIndex].gpuVASpaceObject; + + if (deviceIndex == 0) { + /* For the first device, RM assigns a virtual address. */ + if (pChannel->pDevice->hal.caps.extendedBase) { + /* + * Force the virtual mapping to be naturally aligned. + * This ensures that the allocation cannot cross a 40-bit + * boundary, so we can initialize the higher bits of the VA + * with the PB_EXTENDED_BASE_OPERAND GPFIFO command once at + * init time and not worry about it being able to change + * between any two GPFIFO entries. + */ + vaParams.flags |= NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE; + vaParams.alignment = size; + ROUNDUP_POW2_U64(vaParams.alignment); + } + } else { + /* For subsequent devices, use the same virtual address. */ + vaParams.flags |= NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE; + nvAssert(gpuAddress != 0); + vaParams.offset = gpuAddress; + } + + ret = nvPushImportRmApiAlloc( + pDevice, + pDevice->subDevice[deviceIndex].deviceHandle, + pChannel->pushbufferVAHandle[deviceIndex], + NV50_MEMORY_VIRTUAL, + &vaParams); + + if (ret != NVOS_STATUS_SUCCESS) { + goto fail; + } + vaAlloc[deviceIndex] = TRUE; + + if (deviceIndex == 0) { + gpuAddress = vaParams.offset; + nvAssert(vaParams.size >= size); + /* The VA allocation may have been bloated to a larger size, to + * align with the page size. Adjust to ensure that we allocate a + * surface of at least that size, or else attempts to map it will + * fail. */ + size = vaParams.size; + } else { + nvAssert(gpuAddress == vaParams.offset); + nvAssert(vaParams.size == size); + } + } + + /* Allocate a single surface in system memory for the pushbuffer. */ + localLimit = size - 1; + ret = nvPushImportRmApiAllocMemory64( + pDevice, + pDevice->subDevice[0].deviceHandle, + pChannel->pushbufferHandle, + NV01_MEMORY_SYSTEM, + allocFlags, + &cpuAddress, + &localLimit); + + if (ret != NVOS_STATUS_SUCCESS) { + goto fail; + } + nvAssert(localLimit + 1 >= size); + surfaceAlloc = TRUE; + + for (deviceIndex = 0; + deviceIndex < __nvPushGetNumDevices(pDevice); + deviceIndex++) { + NvU64 mapOffset = 0; + + ret = nvPushImportRmApiMapMemoryDma( + pDevice, + pDevice->subDevice[deviceIndex].deviceHandle, + pChannel->pushbufferVAHandle[deviceIndex], + pChannel->pushbufferHandle, + 0, + size, + mapFlags, + &mapOffset); + + if (ret != NVOS_STATUS_SUCCESS) { + goto fail; + } + vaMap[deviceIndex] = TRUE; + /* mapMemoryDma takes in a relative offset but assigns an absolute VA */ + nvAssert(mapOffset == gpuAddress); + } + + /* success */ + *pCpuAddress = cpuAddress; + *pGpuAddress = gpuAddress; + return TRUE; + +fail: + for (deviceIndex = __nvPushGetNumDevices(pDevice) - 1; + deviceIndex >= 0; + deviceIndex--) { + if (vaMap[deviceIndex]) { + ret = nvPushImportRmApiUnmapMemoryDma(pDevice, + pDevice->subDevice[deviceIndex].deviceHandle, + pChannel->pushbufferVAHandle[deviceIndex], + pChannel->pushbufferHandle, + 0, + gpuAddress); + nvAssert(ret == NVOS_STATUS_SUCCESS); + vaMap[deviceIndex] = FALSE; + } + if (vaAlloc[deviceIndex]) { + ret = nvPushImportRmApiFree(pDevice, + pDevice->subDevice[deviceIndex].deviceHandle, + pChannel->pushbufferVAHandle[deviceIndex]); + nvAssert(ret == NVOS_STATUS_SUCCESS); + vaAlloc[deviceIndex] = FALSE; + } + }; + + if (surfaceAlloc) { + ret = nvPushImportRmApiFree(pDevice, + pDevice->subDevice[0].deviceHandle, + pChannel->pushbufferHandle); + nvAssert(ret == NVOS_STATUS_SUCCESS); + } + + return FALSE; +} + +static NvBool AllocPushbuffer( + NvPushChannelPtr pChannel, + const NvPushAllocChannelParams *pParams, + NvU64 *pUsedHandleBitmask, + void **pCpuAddress, + NvU64 *pGpuAddress) +{ + const NvU32 size = CalculateGPBufferSize(pChannel); + NvU32 limit = size - 1; + int deviceIndex; + + pChannel->pushbufferHandle = GetChannelHandle(pParams, pUsedHandleBitmask); + for (deviceIndex = 0; + deviceIndex < __nvPushGetNumDevices(pChannel->pDevice); + deviceIndex++) { + pChannel->pushbufferVAHandle[deviceIndex] = + GetChannelHandle(pParams, pUsedHandleBitmask); + } + + if (TryAllocAndMapPushbuffer( + pChannel, + DRF_DEF(OS02, _FLAGS, _PHYSICALITY, _NONCONTIGUOUS) | + DRF_DEF(OS02, _FLAGS, _COHERENCY, _WRITE_COMBINE), + DRF_DEF(OS46, _FLAGS, _CACHE_SNOOP, _DISABLE), + limit, + pCpuAddress, + pGpuAddress)) { + return TRUE; + } + + pChannel->pushbufferHandle = 0; + NVMISC_MEMSET(pChannel->pushbufferVAHandle, 0, sizeof(pChannel->pushbufferVAHandle)); + return FALSE; +} + +/*! + * Free resources allocated in AllocUserMode(). + */ +static void FreeUserMode( + NvPushDevicePtr pDevice) +{ + NvU32 sd; + + for (sd = 0; sd < pDevice->numSubDevices; sd++) { + + if (pDevice->subDevice[sd].pUserMode != NULL) { + nvPushImportRmApiUnmapMemory( + pDevice, + pDevice->subDevice[sd].handle, + pDevice->subDevice[sd].hUserMode, + pDevice->subDevice[sd].pUserMode, + 0 /* flags */); + pDevice->subDevice[sd].pUserMode = NULL; + } + + if (pDevice->subDevice[sd].hUserMode != 0) { + nvPushImportRmApiFree( + pDevice, + pDevice->subDevice[sd].handle, + pDevice->subDevice[sd].hUserMode); + pDevice->subDevice[sd].hUserMode = 0; + } + } +} + +/*! + * Allocate and map the "usermode" object on each subdevice, supported on GV100 + * and up. This mapping exposes registers considered safe for userspace to + * access directly. Most importantly, it contains the "doorbell" register + * which we use to notify HOST that we've updated GP_PUT so that it will fetch + * work for the channel. + */ +static NvBool AllocUserMode( + NvPushDevicePtr pDevice, + const NvPushAllocDeviceParams *pParams, + NvU64 *pUsedHandleBitmask) +{ + unsigned int sd; + + static const NvPushSupportedClass userModeClasses[] = { + { VOLTA_USERMODE_A, + NV_AMODEL_VOLTA }, + }; + int i; + + if (!pDevice->hal.caps.clientAllocatesUserD) { + return TRUE; + } + + i = nvPushGetSupportedClassIndex(pDevice, userModeClasses, + sizeof(userModeClasses[0]), + ARRAY_LEN(userModeClasses)); + if (i == -1) { + return FALSE; + } + + for (sd = 0; sd < pDevice->numSubDevices; sd++) { + NvU32 ret; + void *allocParams = NULL; + + NV_HOPPER_USERMODE_A_PARAMS hopperParams = { 0 }; + if (userModeClasses[i].classNumber != VOLTA_USERMODE_A) { + allocParams = &hopperParams; + // The BAR1 mapping is used for (faster and more efficient) writes + // to perform work submission, but can't be used for reads. + // If we ever want to read from the USERMODE region (e.g., to read + // PTIMER) then we need a second mapping. + hopperParams.bBar1Mapping = NV_TRUE; + } + + pDevice->subDevice[sd].hUserMode = + GetDeviceHandle(pParams, pUsedHandleBitmask); + + ret = nvPushImportRmApiAlloc( + pDevice, + pDevice->subDevice[sd].handle, + pDevice->subDevice[sd].hUserMode, + userModeClasses[i].classNumber, + allocParams); + + if (ret != NVOS_STATUS_SUCCESS) { + pDevice->subDevice[sd].hUserMode = 0; + goto fail; + } + + ret = nvPushImportRmApiMapMemory( + pDevice, + pDevice->subDevice[sd].handle, + pDevice->subDevice[sd].hUserMode, + 0, /* offset */ + NVC361_NV_USERMODE__SIZE, + &pDevice->subDevice[sd].pUserMode, + 0 /* flags */); + + if (ret != NVOS_STATUS_SUCCESS) { + goto fail; + } + } + + return TRUE; + +fail: + FreeUserMode(pDevice); + return FALSE; +} + +static void CheckCaps(NvPushDevicePtr pDevice) +{ + int deviceIndex; + + pDevice->hostLBoverflowBug1667921 = FALSE; + + for (deviceIndex = 0; + deviceIndex < __nvPushGetNumDevices(pDevice); + deviceIndex++) { + NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS fifoCapsParams = { 0 }; + NvU32 ret; + + ret = nvPushImportRmApiControl(pDevice, + pDevice->subDevice[deviceIndex].deviceHandle, + NV0080_CTRL_CMD_FIFO_GET_CAPS_V2, + &fifoCapsParams, + sizeof(fifoCapsParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to determine chip fifo capabilities"); + return; + } + + pDevice->hostLBoverflowBug1667921 |= + !!NV0080_CTRL_FIFO_GET_CAP(fifoCapsParams.capsTbl, + NV0080_CTRL_FIFO_CAPS_HAS_HOST_LB_OVERFLOW_BUG_1667921); + } +} + + +static void FreeNotifiers( + NvPushChannelPtr pChannel) +{ + NvPushDevicePtr pDevice = pChannel->pDevice; + + if (pChannel->notifiers.errorCtxDma != 0) { + nvPushImportRmApiFree(pDevice, + pDevice->clientHandle, + pChannel->notifiers.errorCtxDma); + pChannel->notifiers.errorCtxDma = 0; + + } + + if (pChannel->notifiers.gpuAddress != 0) { + int deviceIndex; + for (deviceIndex = __nvPushGetNumDevices(pDevice) - 1; + deviceIndex >= 0; + deviceIndex--) { + nvPushImportRmApiUnmapMemoryDma(pDevice, + pDevice->subDevice[deviceIndex].deviceHandle, + pDevice->subDevice[deviceIndex].gpuVASpaceCtxDma, + pChannel->notifiers.memoryHandle, + 0, + pChannel->notifiers.gpuAddress); + } + pChannel->notifiers.gpuAddress = 0; + } + + if (pChannel->notifiers.memoryHandle != 0) { + nvPushImportRmApiFree(pDevice, + pDevice->subDevice[0].deviceHandle, + pChannel->notifiers.memoryHandle); + pChannel->notifiers.memoryHandle = 0; + } +} + +/* + * Allocate enough notifier memory to store: + * - numNotifiers host driver requested NvNotifications, per subDevice + * - NV_PUSH_NUM_INTERNAL_NOTIFIERS NvNotifications, per channel + */ +static NvBool AllocNotifiers( + NvPushChannelPtr pChannel, + const NvPushAllocChannelParams *pParams, + NvBool coherent, + NvU64 *pUsedHandleBitmask) +{ + NvPushDevicePtr pDevice = pChannel->pDevice; + const NvU32 size = + (((pParams->numNotifiers * pDevice->numSubDevices) + + NV_PUSH_NUM_INTERNAL_NOTIFIERS) * + sizeof(NvNotification)); + NV_CONTEXT_DMA_ALLOCATION_PARAMS ctxdmaParams = { 0 }; + + NvU64 limit = size - 1; + int deviceIndex; + NvU32 ret; + NvU32 allocFlags, gpuMapFlags; + + /* + * The host-driver specified number of notifiers must not collide + * with the reserved bit we use to indicate internal notifiers. + */ + if (pParams->numNotifiers & NV_PUSH_NOTIFIER_INTERNAL_BIT) { + return FALSE; + } + + pChannel->notifiers.num = pParams->numNotifiers; + pChannel->notifiers.memoryHandle = + GetChannelHandle(pParams, pUsedHandleBitmask); + + allocFlags = DRF_DEF(OS02,_FLAGS,_PHYSICALITY,_NONCONTIGUOUS), + gpuMapFlags = 0; + if (coherent) { + allocFlags = FLD_SET_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_BACK, allocFlags); + gpuMapFlags = FLD_SET_DRF(OS46, _FLAGS, _CACHE_SNOOP, _ENABLE, gpuMapFlags); + } else { + allocFlags = FLD_SET_DRF(OS02, _FLAGS, _COHERENCY, _UNCACHED, allocFlags); + gpuMapFlags = FLD_SET_DRF(OS46, _FLAGS, _CACHE_SNOOP, _DISABLE, gpuMapFlags); + } + + ret = nvPushImportRmApiAllocMemory64( + pDevice, + pDevice->subDevice[0].deviceHandle, + pChannel->notifiers.memoryHandle, + NV01_MEMORY_SYSTEM, + allocFlags, + (void **)&pChannel->notifiers.cpuAddress, + &limit); + + if (ret != NVOS_STATUS_SUCCESS) { + pChannel->notifiers.memoryHandle = 0; + goto fail; + } + + /* Map the memory into the GPU's VA space. */ + + for (deviceIndex = 0; + deviceIndex < __nvPushGetNumDevices(pDevice); + deviceIndex++) { + NvU32 mapFlags = gpuMapFlags; + NvU64 gpuAddress; + if (deviceIndex == 0) { + /* For the first device, RM assigns a virtual address. */ + gpuAddress = 0; + } else { + /* For subsequent devices, use the same virtual address. */ + mapFlags = FLD_SET_DRF(OS46, _FLAGS, _DMA_OFFSET_FIXED, _TRUE, + mapFlags); + gpuAddress = pChannel->notifiers.gpuAddress; + nvAssert(gpuAddress != 0); + } + ret = nvPushImportRmApiMapMemoryDma( + pDevice, + pDevice->subDevice[deviceIndex].deviceHandle, + pDevice->subDevice[deviceIndex].gpuVASpaceCtxDma, + pChannel->notifiers.memoryHandle, + 0, /* offset */ + size, + mapFlags, + &gpuAddress); + + if (ret != NVOS_STATUS_SUCCESS) { + goto fail; + } + + if (deviceIndex == 0) { + pChannel->notifiers.gpuAddress = gpuAddress; + } else { + nvAssert(pChannel->notifiers.gpuAddress == gpuAddress); + } + } + + /* Create the internal notifier ctxDma. */ + + pChannel->notifiers.errorCtxDma = + GetChannelHandle(pParams, pUsedHandleBitmask); + + ctxdmaParams.hMemory = pChannel->notifiers.memoryHandle; + ctxdmaParams.flags = DRF_DEF(OS03, _FLAGS, _MAPPING, _KERNEL) | + DRF_DEF(OS03, _FLAGS, _HASH_TABLE, _DISABLE); + /* the internal notifiers are at the start of the memory */ + ctxdmaParams.offset = 0; + ctxdmaParams.limit = (NV_PUSH_NUM_INTERNAL_NOTIFIERS * + sizeof(NvNotification)) - 1; + + ret = nvPushImportRmApiAlloc(pDevice, + pDevice->subDevice[0].deviceHandle, + pChannel->notifiers.errorCtxDma, + NV01_CONTEXT_DMA, + &ctxdmaParams); + + if (ret != NVOS_STATUS_SUCCESS) { + pChannel->notifiers.errorCtxDma = 0; + goto fail; + } + + /* + * Initialize the error notifier; note that there is only one + * error notifier shared by all subdevices, so we specify master as the + * subDeviceMask. + */ + nvPushInitWaitForNotifier(pChannel, + NV_PUSH_ERROR_NOTIFIER_INDEX, + NV_PUSH_SUBDEVICE_MASK_PRIMARY); + + return TRUE; + +fail: + FreeNotifiers(pChannel); + return FALSE; +} + +static NvU32 GetExtendedBase(NvU64 offset) +{ + return NvU64_HI32(offset) >> 8; +} + +static void InitGpFifoExtendedBase( + NvPushChannelPtr pChannel) +{ + const NvU64 pbBase = pChannel->main.gpuMapOffset; + const NvU32 extendedBase = GetExtendedBase(pbBase); + NvU32 *gpPointer = &(pChannel->gpfifo[pChannel->gpPutOffset*2]); + NvU32 i; + + if (!pChannel->pDevice->hal.caps.extendedBase) { + nvAssert(extendedBase == 0); + return; + } + + /* + * Because of the natural VA alignment specified when allocating the + * pushbuffer, all parts of the pushbuffer surface should be in the same + * 40-bit region. + */ + nvAssert(GetExtendedBase(pChannel->main.gpuMapOffset) == + GetExtendedBase(pChannel->progressTracker.gpuMapOffset)); + nvAssert(GetExtendedBase(pChannel->main.gpuMapOffset + + pChannel->main.sizeInBytes - 1) == + GetExtendedBase(pChannel->main.gpuMapOffset)); + nvAssert(GetExtendedBase(pChannel->progressTracker.gpuMapOffset + + pChannel->progressTracker.sizeInBytes - 1) == + GetExtendedBase(pChannel->progressTracker.gpuMapOffset)); + + /* Set the "extended base" for all subsequent methods */ + gpPointer[0] = DRF_NUM(C86F, _GP_ENTRY0, _PB_EXTENDED_BASE_OPERAND, extendedBase); + gpPointer[1] = DRF_DEF(C86F, _GP_ENTRY1, _OPCODE, _SET_PB_SEGMENT_EXTENDED_BASE); + gpPointer += 2; + + /* Pad out with NOP GPFIFO methods so everything remains aligned. */ + for (i = 1; i < NV_PUSH_NUM_GPFIFO_ENTRIES_PER_KICKOFF; i++) { + gpPointer[0] = 0; + gpPointer[1] = DRF_DEF(C86F, _GP_ENTRY1, _OPCODE, _NOP); + gpPointer += 2; + } + + pChannel->gpPutOffset += NV_PUSH_NUM_GPFIFO_ENTRIES_PER_KICKOFF; + +} + +NvBool nvPushAllocChannel( + const NvPushAllocChannelParams *pParams, + NvPushChannelPtr buffer) +{ + NvPushDevicePtr pDevice; + void *cpuAddress = NULL; + NvU64 gpuAddress = 0; + NvU64 usedHandleBitmask = 0; + NvBool coherent = FALSE; + + NVMISC_MEMSET(buffer, 0, sizeof(*buffer)); + + pDevice = pParams->pDevice; + + buffer->pDevice = pDevice; + buffer->logNvDiss = pParams->logNvDiss; + buffer->noTimeout = pParams->noTimeout; + buffer->ignoreChannelErrors = pParams->ignoreChannelErrors; + + buffer->currentSubDevMask = NV_PUSH_SUBDEVICE_MASK_ALL; + + /* + * Assign main.sizeInBytes early, because the rest of + * initialization relies on knowing the main pushbuffer size. + * Note this must fit in NV_PUSH_PROGRESS_TRACKER_SEMAPHORE_GET, + * which stores dwords. + */ + nvAssert((DRF_MASK(NV_PUSH_PROGRESS_TRACKER_SEMAPHORE_GET) * 4) > + pParams->pushBufferSizeInBytes); + buffer->main.sizeInBytes = pParams->pushBufferSizeInBytes; + + /* + * Compute numGpFifoEntries. There are several constraints: + * + * - We make numGpFifoEntries 1/64th the size of the main + * pushbuffer. The maximum pushbuffer size is 1048572, and we + * consume 2 gpFifo entries per kickoff. This works out to be + * 128 bytes of pushbuffer (32 dwords) per kickoff, before we + * are gpFifo-limited. + * + * - Per dev_pbdma.ref, "The number of GP entries in the circular + * buffer is always a power of 2." So, round up to the next + * power of two. + * + * - Because we consume 2 gpFifo entries per kickoff + * (NV_PUSH_NUM_GPFIFO_ENTRIES_PER_KICKOFF), we also align to a + * multiple of 2. This should be guaranteed by the power of 2 + * check. + * + * - numGpFifoEntries must fit in + * NV_PUSH_PROGRESS_TRACKER_SEMAPHORE_GP_GET so that the + * progress tracker semaphore releases can report the consumed + * gpFifo entry. The distribution of bits in + * NV_PUSH_PROGRESS_TRACKER_SEMAPHORE should ensure this is + * satisfied. + */ + + buffer->numGpFifoEntries = pParams->pushBufferSizeInBytes / 64; + + buffer->numGpFifoEntries = nvNextPow2_U32(buffer->numGpFifoEntries); + + nvAssert((buffer->numGpFifoEntries % + NV_PUSH_NUM_GPFIFO_ENTRIES_PER_KICKOFF) == 0); + + nvAssert((DRF_MASK(NV_PUSH_PROGRESS_TRACKER_SEMAPHORE_GP_GET) * 2) > + buffer->numGpFifoEntries); + + if (!GetCoherenceFlags(buffer, &coherent)) { + goto failed; + } + + if (!AllocNotifiers(buffer, pParams, coherent, &usedHandleBitmask)) { + nvPushImportLogError(pDevice, + "Failed to allocate notification memory."); + goto failed; + } + + /* Only allocate memory for one pushbuffer. All subdevices will share */ + if (!AllocPushbuffer(buffer, + pParams, + &usedHandleBitmask, + &cpuAddress, + &gpuAddress)) { + nvPushImportLogError(pDevice, + "Push buffer DMA allocation failed"); + goto failed; + } + + /* First the "main" pushbuffer */ + InitDmaSegment(&buffer->main, + cpuAddress, + gpuAddress, + pParams->pushBufferSizeInBytes); + /* Next the GPFIFO */ + buffer->gpfifo = + (NvU32 *)((char *)cpuAddress + __nvPushGpFifoOffset(buffer)); + buffer->gpPutOffset = 0; + /* Next the "progressTracker" */ + InitDmaSegment(&buffer->progressTracker, + (char *)cpuAddress + __nvPushProgressTrackerOffset(buffer), + gpuAddress + __nvPushProgressTrackerOffset(buffer), + ProgressTrackerBufferSize(buffer)); + + if (!nvDmaAllocUserD(buffer, pParams, &usedHandleBitmask)) { + goto failed; + } + + if (!AllocChannelObject(buffer, pParams, + &usedHandleBitmask, gpuAddress)) { + goto failed; + } + + if (pDevice->hal.caps.clientAllocatesUserD && + !RequestChidToken(buffer)) { + goto failed; + } + + if (!AllocSemaSurface(buffer, pParams, coherent, &usedHandleBitmask)) { + goto failed; + } + +#if defined(DEBUG) + if (buffer->logNvDiss) { + nvPushImportLogNvDiss(buffer, "nvdiss: encoding 2\n"); + } +#endif /* DEBUG */ + + InitGpFifoExtendedBase(buffer); + + if (!__nvPushTestPushBuffer(buffer)) { + goto failed; + } + + buffer->initialized = TRUE; + + return TRUE; + +failed: + nvPushFreeChannel(buffer); + return FALSE; +} + +/*! + * Free resources allocated by AllocChannel(). + */ +void nvPushFreeChannel(NvPushChannelPtr buffer) +{ + NvPushDevicePtr pDevice = buffer->pDevice; + unsigned int sd; + int deviceIndex; + + if (pDevice == NULL) { + goto done; + } + + /* Unmap pushbuffer DMA controls */ + for (sd = 0; sd < pDevice->numSubDevices; sd++) { + NvU32 userdMapHandle; + + deviceIndex = __nvPushGetDeviceIndex(pDevice, sd); + if (pDevice->hal.caps.clientAllocatesUserD) { + userdMapHandle = buffer->userD[deviceIndex].hMemory; + } else { + userdMapHandle = buffer->channelHandle[deviceIndex]; + } + + if (buffer->control[sd]) { + nvPushImportRmApiUnmapMemory(pDevice, + pDevice->subDevice[sd].handle, + userdMapHandle, + buffer->control[sd], + 0); + buffer->control[sd] = NULL; + } + } + + for (deviceIndex = __nvPushGetNumDevices(pDevice) - 1; + deviceIndex >= 0; + deviceIndex--) { + if (buffer->channelHandle[deviceIndex] != 0) { + nvPushImportRmApiFree(pDevice, + pDevice->subDevice[deviceIndex].deviceHandle, + buffer->channelHandle[deviceIndex]); + buffer->channelHandle[deviceIndex] = 0; + } + + if (buffer->userD[deviceIndex].hMemory != 0) { + nvPushImportRmApiFree(pDevice, + pDevice->subDevice[deviceIndex].deviceHandle, + buffer->userD[deviceIndex].hMemory); + buffer->userD[deviceIndex].hMemory = 0; + } + + if (buffer->pushbufferVAHandle[deviceIndex] != 0) { + nvPushImportRmApiFree(pDevice, + pDevice->subDevice[deviceIndex].deviceHandle, + buffer->pushbufferVAHandle[deviceIndex]); + buffer->pushbufferVAHandle[deviceIndex] = 0; + } + } + + if (buffer->pushbufferHandle != 0) { + nvPushImportRmApiFree(pDevice, + pDevice->subDevice[0].deviceHandle, + buffer->pushbufferHandle); + buffer->pushbufferHandle = 0; + } + + FreeNotifiers(buffer); + + FreeSemaSurface(buffer); + +done: + NVMISC_MEMSET(buffer, 0, sizeof(*buffer)); +} + +NvBool nvPushAllocDevice( + const NvPushAllocDeviceParams *pParams, + NvPushDevicePtr pDevice) +{ + unsigned int sd; + NvU64 usedHandleBitmask = 0; + + NVMISC_MEMSET(pDevice, 0, sizeof(*pDevice)); + + pDevice->hostDevice = pParams->hostDevice; + pDevice->pImports = pParams->pImports; + pDevice->numSubDevices = pParams->numSubDevices; + pDevice->clientSli = pParams->clientSli; + pDevice->clientHandle = pParams->clientHandle; + + pDevice->numClasses = pParams->numClasses; + pDevice->supportedClasses = pParams->supportedClasses; + + pDevice->hasFb = IsClassSupported(pDevice, NV01_MEMORY_LOCAL_USER); + + pDevice->confidentialComputeMode = pParams->confidentialComputeMode; + + for (sd = 0; sd < pParams->numSubDevices; sd++) { + pDevice->subDevice[sd].handle = pParams->subDevice[sd].handle; + pDevice->subDevice[sd].deviceHandle = pParams->subDevice[sd].deviceHandle; + pDevice->subDevice[sd].gpuVASpaceObject = pParams->subDevice[sd].gpuVASpaceObject; + pDevice->subDevice[sd].gpuVASpaceCtxDma = pParams->subDevice[sd].gpuVASpace; + } + + if (pParams->amodel.config != NV_AMODEL_NONE) { + nvAssert(!"Ignoring AModel configuration on non-XAMODEL build"); + } + pDevice->amodelConfig = pParams->amodel.config; + + CheckCaps(pDevice); + + if (!GetChannelClassAndUserDSize(pDevice, pParams)) { + nvPushImportLogError(pDevice, + "No supported command buffer format found"); + goto fail; + } + + if (!__nvPushGetHal(pParams, pDevice->gpfifoClass, &pDevice->hal)) { + nvPushImportLogError(pDevice, "No push buffer implementation found."); + goto fail; + } + + if (!AllocUserMode(pDevice, pParams, &usedHandleBitmask)) { + nvPushImportLogError(pDevice, + "Unable to allocate push buffer controls."); + goto fail; + } + + + return TRUE; + +fail: + nvPushFreeDevice(pDevice); + + return FALSE; +} + +void nvPushFreeDevice( + NvPushDevicePtr pDevice) +{ + FreeUserMode(pDevice); + + NVMISC_MEMSET(pDevice, 0, sizeof(*pDevice)); +} diff --git a/src/common/unix/nvidia-push/src/nvidia-push.c b/src/common/unix/nvidia-push/src/nvidia-push.c new file mode 100644 index 0000000..992a3bd --- /dev/null +++ b/src/common/unix/nvidia-push/src/nvidia-push.c @@ -0,0 +1,1161 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + + + +#include "nvidia-push-init.h" +#include "nvidia-push-methods.h" +#include "nvidia-push-utils.h" +#include "nvidia-push-priv.h" +#include "nvidia-push-priv-imports.h" + +#include "nvos.h" +#include "nv_assert.h" +#include "nvSemaphoreCommon.h" + +#include "class/cl2080.h" // NV2080_SUBDEVICE_NOTIFICATION_STATUS_DONE_SUCCESS + +#include "class/cla16f.h" // KEPLER_CHANNEL_GPFIFO_B +#include "class/cla26f.h" // KEPLER_CHANNEL_GPFIFO_C +#include "class/clb06f.h" // MAXWELL_CHANNEL_GPFIFO_A +#include "class/clc06f.h" // PASCAL_CHANNEL_GPFIFO_A +#include "class/clc36f.h" // VOLTA_CHANNEL_GPFIFO_A +#include "class/clc46f.h" // TURING_CHANNEL_GPFIFO_A +#include "class/clc56f.h" // AMPERE_CHANNEL_GPFIFO_A +#include "class/clc86f.h" // HOPPER_CHANNEL_GPFIFO_A +#include "class/clc361.h" // VOLTA_USERMODE_A +#include "ctrl/ctrl906f.h" // NV906F_CTRL_GET_CLASS_ENGINEID + +/* + * This low-level macro pushes SetSubDevice methods. Most of the driver should + * use the higher-level NV_DMA_SET_SUBDEVICE_MASK macro. + */ +#define SetSubDeviceMask(_push_buffer, _segment, _mask) \ + do { \ + ASSERT_DRF_NUM(A16F, _DMA, _SET_SUBDEVICE_MASK_VALUE, _mask); \ + nvPushHeader(_push_buffer, _segment, 0, \ + DRF_DEF(A16F, _DMA, _SEC_OP, _GRP0_USE_TERT) | \ + DRF_DEF(A16F, _DMA, _TERT_OP, _GRP0_SET_SUB_DEV_MASK) | \ + DRF_NUM(A16F, _DMA, _SET_SUBDEVICE_MASK_VALUE, _mask)); \ + } while(0) + +/*! + * Check for a channel error on pChannel. + * + * Check the channel's error notifier to determine if a channel error + * occurred. If an error occurred, call the host driver's + * nvPushImportChannelErrorOccurred() implementation, which may take + * steps to recover from the error. + * + * \param[in] pChannel The GPU channel. + * + * \return Return TRUE if a channel error occurred, FALSE otherwise. + */ +NvBool nvPushCheckChannelError(NvPushChannelPtr pChannel) +{ + unsigned short status; + NvNotification *pNotifier; + + if (pChannel->ignoreChannelErrors) { + return FALSE; + } + + pNotifier = + nvPushGetNotifierCpuAddress(pChannel, NV_PUSH_ERROR_NOTIFIER_INDEX, 0); + + status = pNotifier->status; + + if (status == 0xFFFF) { + nvPushImportChannelErrorOccurred(pChannel, pNotifier->info32); + pChannel->channelErrorOccurred = TRUE; + return TRUE; + } + + return FALSE; +} + + +/* + * This function dumps the pushbuffer. The output format is compatible with + * nvdiss. + */ +static void DumpPB(NvPushChannelPtr pChannel) +{ +#if defined(DEBUG) + const NvPushChannelSegmentRec *segment = &pChannel->main; + const int columns = 8; + const NvPushChannelUnion *base = segment->base; + const NvPushChannelUnion *b = + (NvPushChannelUnion *)((char *)segment->base + segment->putOffset); + const NvPushChannelUnion *end = segment->buffer; + NvU32 i; + + if (!pChannel->logNvDiss) { + return; + } + + nvPushImportLogNvDiss(pChannel, + "***************** Push Buffer Contents: ********************\n"); + + for (i = 0; b && b != end; b++, i++) { + if ((i % columns) == 0) { + nvPushImportLogNvDiss(pChannel, "%06" NvUPtr_fmtx ":", + (NvUPtr)((b - base) * 4)); + } + + nvPushImportLogNvDiss(pChannel, " %08X", b->u); + + if ((i % columns) == (columns - 1) || (b + 1) == end) { + nvPushImportLogNvDiss(pChannel, "\n"); + } + } + + nvPushImportLogNvDiss(pChannel, + "***************** End Push Buffer Contents: ****************\n"); + nvPushImportLogNvDiss(pChannel, + "************************************************************\n"); +#endif /* DEBUG */ +} + +static NvU32 ReadProgressTrackerSemaphore(NvPushChannelPtr p, int sd) +{ + volatile NvU32 *ptr = (volatile NvU32 *)p->progressSemaphore.ptr[sd]; + return *ptr; +} + +static NvU32 GpFifoReadGet(NvPushChannelPtr p, int sd) +{ + const NvU32 current = ReadProgressTrackerSemaphore(p, sd); + const NvU32 getDwords = + DRF_VAL(_PUSH_PROGRESS_TRACKER, _SEMAPHORE, _GET, current); + return getDwords * 4; /* return get in bytes */ +} + +static NvU32 GpFifoReadGpGet(NvPushChannelPtr p, int sd) +{ + const NvU32 current = ReadProgressTrackerSemaphore(p, sd); + const NvU32 gpGetPairs = + DRF_VAL(_PUSH_PROGRESS_TRACKER, _SEMAPHORE, _GP_GET, current); + /* return gpGet in gpFifo indices, not pairs of indices */ + return gpGetPairs * 2; +} + +/* Read GPGET for all devices and return the minimum */ +static NvU32 ReadGpGetOffset(NvPushChannelPtr push_buffer) +{ + unsigned int i; + NvU32 bestGet = 0; + NvS32 distanceToPut, maxDistanceToPut = 0; + const NvPushDeviceRec *pDevice = push_buffer->pDevice; + + if (pDevice->numSubDevices <= 1) { + bestGet = GpFifoReadGpGet(push_buffer, 0); + } else { + for (i = 0; i < pDevice->numSubDevices; i++) { + NvU32 get = GpFifoReadGpGet(push_buffer, i); + + /* Compute distance to put, accounting for wraps */ + distanceToPut = push_buffer->gpPutOffset - get; + if (distanceToPut < 0) { + distanceToPut += push_buffer->numGpFifoEntries; + } + + nvAssert(distanceToPut >= 0); + + /* Track the maximum distance to put and the corresponding get. */ + if (distanceToPut >= maxDistanceToPut) { + maxDistanceToPut = distanceToPut; + bestGet = get; + } + } + } + + /* We should never see an odd index, since we always kick off two entries + * at a time. */ + nvAssert((bestGet & 1) == 0); + + return bestGet; +} + +static void FillGpEntry(NvPushChannelSegmentPtr segment, NvU32 putOffset, + NvU32 *gpEntry0, NvU32 *gpEntry1) +{ + const NvU32 length = putOffset - segment->putOffset; + const NvU64 base = segment->gpuMapOffset + segment->putOffset; + *gpEntry0 = + DRF_NUM(A16F, _GP_ENTRY0, _GET, NvU64_LO32(base) >> 2); + *gpEntry1 = + DRF_NUM(A16F, _GP_ENTRY1, _GET_HI, NvU64_HI32(base)) | + DRF_NUM(A16F, _GP_ENTRY1, _LENGTH, length >> 2); + + ASSERT_DRF_NUM(A16F, _GP_ENTRY1, _LENGTH, length >> 2); + nvAssert(segment->putOffset + length <= segment->sizeInBytes); + nvAssert(!(segment->putOffset & 0x3)); + nvAssert(!(length & 0x3)); + nvAssert(length != 0); +} + +/*! + * Calculate the amount of space that we need for each kickoff in the progress + * tracker pushbuffer. + */ +NvU32 __nvPushProgressTrackerEntrySize(const NvPushDeviceRec *pDevice) +{ + NvU32 dwords, size; + + /* + * The minimum number of methods we need is 4 for pre-Volta (SemaphoreA-D), + * and 5 for Volta+ (SemAddrHi/Lo, SemPayloadHi/Lo, SemExecute). + * The pushbuffer encoding consists of one 32-bit header plus one 32-bit + * data entry for each method. + */ + dwords = 1; + + if (pDevice->hal.caps.voltaSemMethods) { + dwords += 5; + } else { + dwords += 4; + } + + /* + * If SLI is enabled, we may need to ensure that the semaphore is written + * on all subdevices. This requires two extra dwords: SET_SUBDEVICE_MASK + * to a broadcast state, and a second SET_SUBDEVICE_MASK to restore the old + * state. + */ + if (pDevice->numSubDevices > 1) { + dwords += 2; + } + + size = dwords * sizeof(NvU32); + + /* + * If the GPU is affected by hardware bug 1667921, then we pad this out to + * NV_ALIGN_LBDAT_EXTRA_BUG. (The workaround requires that each entry be a + * multiple of this size.) + */ + nvAssert(size <= NV_ALIGN_LBDAT_EXTRA_BUG); + return pDevice->hostLBoverflowBug1667921 ? NV_ALIGN_LBDAT_EXTRA_BUG : size; +} + +/*! + * This function emits methods into the progress tracker pushbuffer region that + * will cause host to release a semaphore with payload 'putOffset', then writes + * a GPFIFO entry to 'gpPointer' that will make host fetch those methods. + * + * Normally, for progress tracking purposes, we only need to know if + * host has read the method, in which case _RELEASE_WFI=DIS is sufficient + * below. But, when we need to know if the channel is really idle, host + * must send WAIT_FOR_IDLE to the downstream engine (_RELEASE_WFI=EN). + * Use 'progressTrackerWFI' to control this behavior. + */ +static void InsertProgressTracker(NvPushChannelPtr p, NvU32 putOffset, + NvU32 gpPutOffset, NvU32 *gpPointer, + NvBool progressTrackerWFI) +{ + const NvPushDeviceRec *pDevice = p->pDevice; + NvPushChannelSegmentPtr segment = &p->progressTracker; + + /* + * The progress tracker pushbuffer segment is sized such that we have + * just enough space to write the methods we need to for every (two) + * entries in the GPFIFO. + * + * We directly calculate the location to begin writing methods from the + * size of a single entry and gpPutOffset (divided by two because we only + * write one progress tracker entry for every two GPFIFO entries). + */ + const NvU32 entrySize = __nvPushProgressTrackerEntrySize(pDevice); + const NvU32 entry = gpPutOffset >> 1; + const NvU32 entryOffset = entrySize * entry; + const NvU32 putOffsetDwords = putOffset / 4; + const NvU32 payload = + DRF_NUM(_PUSH_PROGRESS_TRACKER, _SEMAPHORE, _GET, putOffsetDwords) | + DRF_NUM(_PUSH_PROGRESS_TRACKER, _SEMAPHORE, _GP_GET, gpPutOffset / 2); + + NvU32 restoreSDM = NV_PUSH_SUBDEVICE_MASK_ALL; + + /* + * PROGRESS_TRACKER_SEMAPHORE_GET above stores dwords, so + * putOffset had better be a multiple of four. + */ + nvAssert((putOffset % 4) == 0); + + /* + * PROGRESS_TRACKER_SEMAPHORE_GP_GET above stores pairs of gpFifo + * entries, so gpPutOffset had better be even. + */ + nvAssert((gpPutOffset % 2) == 0); + + segment->freeDwords = entrySize / sizeof(NvU32); + segment->putOffset = entryOffset; + segment->buffer = (NvPushChannelUnion *)((char *)segment->base + entryOffset); + + /* + * __nvPushProgressTrackerEntrySize() contains a calculation of how many + * methods we need. This must be kept up-to-date with the actual methods + * pushed below. + */ + if (!nvPushSubDeviceMaskEquiv(pDevice, p->currentSubDevMask, + NV_PUSH_SUBDEVICE_MASK_ALL)) { + restoreSDM = p->currentSubDevMask; + SetSubDeviceMask(p, progressTracker, NV_PUSH_SUBDEVICE_MASK_ALL); + } + if (pDevice->hal.caps.voltaSemMethods) { + const NvU32 semaphoreOperation = + DRF_DEF(C36F, _SEM_EXECUTE, _OPERATION, _RELEASE) | + DRF_DEF(C36F, _SEM_EXECUTE, _PAYLOAD_SIZE, _32BIT) | + DRF_DEF(C36F, _SEM_EXECUTE, _RELEASE_TIMESTAMP, _DIS) | + (progressTrackerWFI ? + DRF_DEF(C36F, _SEM_EXECUTE, _RELEASE_WFI, _EN) : + DRF_DEF(C36F, _SEM_EXECUTE, _RELEASE_WFI, _DIS)); + + __nvPushStart(p, progressTracker, 0, NVC36F_SEM_ADDR_LO, 5, _INC_METHOD); + __nvPushSetMethodDataSegmentU64LE(segment, p->progressSemaphore.gpuVA); + __nvPushSetMethodDataSegment(segment, payload); + __nvPushSetMethodDataSegment(segment, 0); + __nvPushSetMethodDataSegment(segment, semaphoreOperation); + } else { + const NvU32 semaphoreOperation = + DRF_DEF(A16F, _SEMAPHORED, _OPERATION, _RELEASE) | + DRF_DEF(A16F, _SEMAPHORED, _RELEASE_SIZE, _4BYTE) | + (progressTrackerWFI ? + DRF_DEF(A16F, _SEMAPHORED, _RELEASE_WFI, _EN) : + DRF_DEF(A16F, _SEMAPHORED, _RELEASE_WFI, _DIS)); + + __nvPushStart(p, progressTracker, 0, NVA16F_SEMAPHOREA, 4, _INC_METHOD); + __nvPushSetMethodDataSegmentU64(segment, p->progressSemaphore.gpuVA); + __nvPushSetMethodDataSegment(segment, payload); + __nvPushSetMethodDataSegment(segment, semaphoreOperation); + } + + if (restoreSDM != NV_PUSH_SUBDEVICE_MASK_ALL) { + SetSubDeviceMask(p, progressTracker, restoreSDM); + } + + if (pDevice->hostLBoverflowBug1667921) { + /* The workaround for bug 1667921 dictates that we must kick off a + * GPFIFO segment of an exact size. Pad out with NOPs. */ + while (segment->freeDwords) { + nvPushImmedValSegment(p, progressTracker, 0, NVA16F_NOP, 0); + } + } + + FillGpEntry(segment, + (NvU32)((char *)segment->buffer - (char *)segment->base), + &gpPointer[0], &gpPointer[1]); +} + +#if defined(NVCPU_X86) || defined(NVCPU_X86_64) +#define NV_CPU_MEMFENCE() __asm__ __volatile__ ("sfence" : : : "memory") +#elif NVCPU_IS_FAMILY_ARM +#define NV_CPU_MEMFENCE() __asm__ __volatile__ ("dsb sy\n\t" : : : "memory"); +#elif NVCPU_IS_PPC64LE +#define NV_CPU_MEMFENCE() __asm__ __volatile__ ("lwsync\n\t" : : : "memory") +#else +#define NV_CPU_MEMFENCE() /* nothing */ +#endif + +static NvBool nvWriteGpEntry( + NvPushChannelPtr push_buffer, + NvU32 putOffset, + NvBool progressTrackerWFI) +{ + NvU32 gpEntry0, gpEntry1; + + NvU32 nextGpPut; + NvU32 *gpPointer; + const NvU32 entriesNeeded = NV_PUSH_NUM_GPFIFO_ENTRIES_PER_KICKOFF; + NvPushDevicePtr pDevice = push_buffer->pDevice; + + FillGpEntry(&push_buffer->main, putOffset, &gpEntry0, &gpEntry1); + + nextGpPut = (push_buffer->gpPutOffset + entriesNeeded) & + (push_buffer->numGpFifoEntries - 1); + gpPointer = &(push_buffer->gpfifo[push_buffer->gpPutOffset*2]); + + nvAssert((nextGpPut % 2) == 0); + + // Wait for a free entry in the buffer + while (nextGpPut == ReadGpGetOffset(push_buffer)) { + if (nvPushCheckChannelError(push_buffer)) { + nvAssert(!"A channel error occurred in nvWriteGpEntry()"); + return FALSE; + } + } + gpPointer[0] = gpEntry0; + gpPointer[1] = gpEntry1; + gpPointer += 2; + + InsertProgressTracker(push_buffer, putOffset, + push_buffer->gpPutOffset, gpPointer, + progressTrackerWFI); + + /* Make sure all CPU writes to writecombined memory get flushed */ + NV_CPU_MEMFENCE(); + + pDevice->hal.kickoff(push_buffer, push_buffer->gpPutOffset, nextGpPut); + + push_buffer->gpPutOffset = nextGpPut; + + return TRUE; +} + +static void Kickoff(NvPushChannelPtr p, NvBool progressTrackerWFI) +{ + NvU32 putOffset; + + if (!p) { + return; + } + + putOffset = (NvU32)((char *)p->main.buffer - (char *)p->main.base); + + if (p->main.putOffset == putOffset) { + return; + } + + DumpPB(p); + + if (nvWriteGpEntry(p, putOffset, progressTrackerWFI)) { + // Change putOffset only if nvWriteGpEntry succeeds. + // If it fails, it means we went through channel recovery and the + // recovery process changed putOffset. + p->main.putOffset = putOffset; + } +} + +void nvPushKickoff(NvPushChannelPtr p) +{ + Kickoff(p, FALSE /* progressTrackerWFI */); +} + +/*! + * Write GP_PUT to USERD. On GPUs where HOST snoops USERD, this is all we need + * to do to kick off the channel. + */ +static void UserDKickoff(NvPushChannelPtr push_buffer, + NvU32 oldGpPut, NvU32 newGpPut) +{ + /* Kick off all push buffers */ + unsigned int sd; + + for (sd = 0; sd < push_buffer->pDevice->numSubDevices; sd++) { + KeplerBControlGPFifo *pUserd = + (KeplerBControlGPFifo *)push_buffer->control[sd]; + pUserd->GPPut = newGpPut; + } +} + +/*! + * Kick off a channel on GPUs where HOST does not snoop USERD. + * + * This is implemented in two steps: + * 1. Write GP_PUT to USERD; + * 2. Write the channel's token to HOST's doorbell register. + */ +static void DoorbellKickoff(NvPushChannelPtr pChannel, + NvU32 oldGpPut, NvU32 newGpPut) +{ + const NvPushDeviceRec *pDevice = pChannel->pDevice; + NvU32 sd; + + /* First update GPPUT in USERD. */ + UserDKickoff(pChannel, oldGpPut, newGpPut); + +#if NVCPU_IS_PPC64LE + __asm__ __volatile__ ("sync\n\t" : : : "memory"); +#elif NVCPU_IS_FAMILY_ARM + __asm__ __volatile__ ("dsb sy\n\t" : : : "memory"); +#endif + + /* Then ring the doorbells so HOST knows to check for the updated GPPUT. */ + for (sd = 0; sd < pDevice->numSubDevices; sd++) { + volatile NvU32 *doorbell; + NvU8 *pUserMode = (NvU8 *)pDevice->subDevice[sd].pUserMode; + NvU32 notifIndex; + NvNotification *pTokenNotifier = NULL; + + if (pDevice->clientSli) { + notifIndex = NV_CHANNELGPFIFO_NOTIFICATION_TYPE__SIZE_1 + sd; + } else { + /* RM doesn't maintain a separate token for each subdevice. */ + notifIndex = NV_CHANNELGPFIFO_NOTIFICATION_TYPE__SIZE_1; + } + notifIndex |= NV_PUSH_NOTIFIER_INTERNAL_BIT; + + /* The final parameter 'sd' in nvPushGetNotifierCpuAddress is unused + * for internal notifiers. */ + pTokenNotifier = + nvPushGetNotifierCpuAddress(pChannel, notifIndex, 0); + + nvAssert(pUserMode != NULL); + + doorbell = (volatile NvU32 *)(pUserMode + + NVC361_NOTIFY_CHANNEL_PENDING); + *doorbell = pTokenNotifier->info32; + } +} + +/* Read GET for all devices and return the minimum or maximum*/ +NvU32 nvPushReadGetOffset(NvPushChannelPtr push_buffer, NvBool minimum) +{ + unsigned int i; + NvU32 get, bestGet = 0; + const NvPushDeviceRec *pDevice = push_buffer->pDevice; + NvS32 distanceToPut, minmaxDistanceToPut = + minimum ? 0 : push_buffer->main.sizeInBytes; + + if (pDevice->numSubDevices <= 1) { + return GpFifoReadGet(push_buffer, 0); + } + + for (i = 0; i < pDevice->numSubDevices; i++) { + get = GpFifoReadGet(push_buffer, i); + + /* Compute distance to put, accounting for wraps */ + distanceToPut = push_buffer->main.putOffset - get; + if (distanceToPut < 0) { + distanceToPut += push_buffer->main.sizeInBytes; + } + + /* Accumulate the maximum distance to put and the corresponding get. */ + if ((minimum && (distanceToPut >= minmaxDistanceToPut)) || + (!minimum && (distanceToPut <= minmaxDistanceToPut))) { + minmaxDistanceToPut = distanceToPut; + bestGet = get; + } + } + return bestGet; +} + +static void WriteGetOffset(NvPushChannelPtr p, NvU32 value) +{ + NvPushDeviceRec *pDevice = p->pDevice; + unsigned int sd; + + for (sd = 0; sd < pDevice->numSubDevices; sd++) { + volatile NvU32 *ptr = (volatile NvU32 *)(p->progressSemaphore.ptr[sd]); + *ptr = value; + } + + NV_CPU_MEMFENCE(); +} + +static NvBool IdleChannel(NvPushChannelPtr p, NvBool progressTrackerWFI, + NvU32 timeoutMSec) +{ + NvU64 baseTime, currentTime; + + /* + * Write a channel NOP, kick off the pushbuffer, and wait for the + * pushbuffer to drain. It is important for the NOP to be written + * here: this ensures the kickoff won't be optimized away, as it + * otherwise would if the host driver called: + * + * nvPushKickoff(pChannel); + * nvPushIdleChannel(pChannel); + * + * The path nvPushIdleChannel() => IdleChannel() => Kickoff() will + * issue a WFI, which is important to ensure the channel is really + * idle. + */ + nvPushHeader(p, main, 0, NVA16F_DMA_NOP); + + Kickoff(p, progressTrackerWFI); + + for (baseTime = currentTime = nvPushImportGetMilliSeconds(p->pDevice); + TRUE; currentTime = nvPushImportGetMilliSeconds(p->pDevice)) { + + nvAssert(p->main.putOffset != 0); + + if (nvPushReadGetOffset(p, TRUE) == p->main.putOffset) { + return TRUE; + } + + if (currentTime > (baseTime + timeoutMSec) && + !p->noTimeout) { + return FALSE; + } + } +} + +/* + * Idle channel with the requested timeout, but don't print an error when it + * times out. This should be used for verifying expected timeouts on + * idlechannels in testing. + */ +NvBool nvPushIdleChannelTest(NvPushChannelPtr pChannel, NvU32 timeoutMSec) +{ + return IdleChannel(pChannel, TRUE /* progressTrackerWFI */, timeoutMSec); +} + +NvBool nvPushIdleChannel(NvPushChannelPtr pChannel) +{ + NvBool ret; + + ret = IdleChannel(pChannel, TRUE /* progressTrackerWFI */, + NV_PUSH_NOTIFIER_SHORT_TIMEOUT); + + if (!ret) { + nvPushImportLogError(pChannel->pDevice, "Failed to idle DMA."); + } + + return ret; +} + +NvBool __nvPushTestPushBuffer(NvPushChannelPtr p) +{ + NvBool ret; + + /* + * Immediately after allocating the pushbuffer, push a channel NOP and + * babysit the channel until it's consumed as a quick sanity check. + + * Note we use a full long timeout (10 seconds) when performing this + * sanity test. In normal operation, idling will happen very quickly. + * However, when the GPU is under heavy load in stress tests, it can + * take much longer to idle the channel. + */ + WriteGetOffset(p, 0); + + ret = IdleChannel(p, FALSE /* progressTrackerWFI */, + NV_PUSH_NOTIFIER_LONG_TIMEOUT); + + if (!ret) { + nvPushImportLogError(p->pDevice, "Failed to initialize DMA."); + } + + return ret; +} + +void __nvPushMakeRoom(NvPushChannelPtr push_buffer, NvU32 count) +{ + NvU32 getOffset; + NvU32 putOffset; + NvBool fenceToEnd = FALSE; + + putOffset = (NvU32) ((char *)push_buffer->main.buffer - + (char *)push_buffer->main.base); + + nvAssert(putOffset <= push_buffer->main.sizeInBytes); + nvAssert((count << 2) <= push_buffer->main.sizeInBytes); + + if (putOffset != push_buffer->main.putOffset) { + nvPushKickoff(push_buffer); + } + nvAssert(putOffset == push_buffer->main.putOffset); + + while (count >= push_buffer->main.freeDwords) { + if (nvPushCheckChannelError(push_buffer)) { + nvAssert(!"A channel error occurred in __nvPushMakeRoom()"); + // Unlike with non-gpfifo channels, RC recovery can't reset GET to + // 0 so we need to continue as if we just started waiting for space. + return __nvPushMakeRoom(push_buffer, count); + } + + getOffset = nvPushReadGetOffset(push_buffer, TRUE); + nvAssert(getOffset <= push_buffer->main.sizeInBytes); + + if (getOffset > putOffset) { + // We previously wrapped. The space between PUT and GET is + // available. + push_buffer->main.freeDwords = ((getOffset - putOffset) >> 2) - 1; + + // TODO: If still not enough room, call DelayRegisterReadsCaller + // here. + } else if(!fenceToEnd) { + // GET wrapped, so we can write all the way to the end of the + // pushbuffer. + fenceToEnd = TRUE; + push_buffer->main.freeDwords = + (push_buffer->main.sizeInBytes - putOffset) >> 2; + } else { + // getOffset is behind putOffset and there wasn't enough space + // between putOffset and the end of the pushbuffer. Wrap to the + // beginning and wait for GET to advance far enough. + + nvAssert((putOffset >> 2) > count); + + // Record where the last method was written so that RC recovery can + // know where to wrap. + nvPushImportPushbufferWrapped(push_buffer); + + // We can't write putOffset to 0 while getOffset is 0 + // otherwise we could fool ourselves into thinking a full + // pushbuffer is empty + if (getOffset) { + // XXX NOTE: While it would be nice to be able to decide that we + // can write to the whole pushbuffer when getOffset == putOffset, we + // can fall into the trap of writing the same amount of data to the + // pushbuffer twice and not being able to tell whether GET has + // wrapped all the way around, or hasn't moved at all. + + push_buffer->main.putOffset = 0; + push_buffer->main.buffer = push_buffer->main.base; + push_buffer->main.freeDwords = (getOffset >> 2) - 1; + } + } + + if (nvPushCheckChannelError(push_buffer)) { + nvAssert(!"A channel error was recovered when waiting for room in push buffer"); + return __nvPushMakeRoom(push_buffer, count); + } + } + +#if defined(DEBUG) + { + const NvU32 freeBytes = push_buffer->main.freeDwords * 4; + const NvU8 *curPtr = + ((NvU8 *)push_buffer->main.buffer) + freeBytes; + const NvU8 *endPtr = + ((NvU8 *)push_buffer->main.base) + push_buffer->main.sizeInBytes; + + nvAssert(freeBytes <= push_buffer->main.sizeInBytes); + nvAssert(curPtr <= endPtr); + } +#endif /* DEBUG */ +} + +/* + * This function intializes a notifier to IN_PROGRESS on all subdevices. + */ +void nvPushInitWaitForNotifier( + NvPushChannelPtr pChannel, + NvU32 notifierIndex, + NvU32 subdeviceMask) +{ + unsigned int sd; + + for (sd = 0; sd < pChannel->pDevice->numSubDevices; sd++) { + + NvNotification *pNotifier; + + if (!(subdeviceMask & (1 << sd))) { + continue; + + } + + pNotifier = nvPushGetNotifierCpuAddress(pChannel, notifierIndex, sd); + pNotifier->status = NV2080_SUBDEVICE_NOTIFICATION_STATUS_IN_PROGRESS; + } +} + +/* + * This function waits for a notifier. It does the following: + * + * - has a short and long timeout + * + * - if the short timeout was exceeded then check if PUT == GET. If PUT + * == GET then it is almost certain that the notifier really should + * have been delivered and something is wrong. We should just stop + * waiting for the notifier. + * + * - if the short timeout was exceeded and PUT != GET then trying + * rewriting PUT in hopes of getting the hardware back on track. + * (note: this doesn't actually do anything on GPFIFO channels) + * + * - if the PUT != GET and the long timeout is exceeded the chip must + * be hung or our channel encountered an error. + * + * - For either timeout, in a debug build print a diagnostic message + * to indicate what went wrong. + * + * This function always prints out a terse message so that we can + * diagnose problems from the field. In debug builds we print out + * additional information. + */ + +void nvPushWaitForNotifier( + NvPushChannelPtr p, + NvU32 notifyIndex, + NvU32 subdeviceMask, + NvBool yield, + NvPushImportEvent *pEvent, + int id) +{ + NvU64 newtime, short_timeout_value = 0, long_timeout_value = 0; + NvBool short_timeout, long_timeout, shortTimeOutDone = FALSE; + NvU32 getOffset; + unsigned int sd; + NvNotification * pNotify; + int timeout = 0; + NvBool must_wait_for_event = (pEvent != NULL); + NvPushDevicePtr pDevice = p->pDevice; + + for (sd = 0; sd < pDevice->numSubDevices; sd++) { + if (!(subdeviceMask & (1 << sd))) + continue; + + pNotify = nvPushGetNotifierCpuAddress(p, notifyIndex, sd); + + // Give a chance not to enter the while loop if notifier is + // already ready. + while ((must_wait_for_event) || + (pNotify->status != NV2080_SUBDEVICE_NOTIFICATION_STATUS_DONE_SUCCESS)) { + + newtime = nvPushImportGetMilliSeconds(pDevice); + + if (!short_timeout_value) { + short_timeout_value = newtime + NV_PUSH_NOTIFIER_SHORT_TIMEOUT; + long_timeout_value = newtime + NV_PUSH_NOTIFIER_LONG_TIMEOUT; + } + + if (must_wait_for_event && pEvent) { + if (timeout == 0) { + timeout = NV_PUSH_NOTIFIER_SHORT_TIMEOUT; + } else { + timeout = NV_PUSH_NOTIFIER_LONG_TIMEOUT-NV_PUSH_NOTIFIER_SHORT_TIMEOUT; + must_wait_for_event = FALSE; + } + + if (nvPushImportWaitForEvent(pDevice, pEvent, timeout)) { + // At this point we won't wait for OS events + // anymore, but still for the notifier. + must_wait_for_event = FALSE; + continue; + } + // We timed out or an error occurred + } + + short_timeout = (newtime > short_timeout_value); + long_timeout = (newtime > long_timeout_value); + + if (p->noTimeout) { + short_timeout = FALSE; + long_timeout = FALSE; + } + + if (nvPushCheckChannelError(p)) { + nvAssert(!"A channel error was recovered when waiting for a notifier; returning"); + return; + } + + if (short_timeout || long_timeout) { + + getOffset = GpFifoReadGet(p, sd); + if (p->main.putOffset == getOffset) { + + /* + * If PUT == GET then it is almost certain that the + * notifier really should have been delivered and + * something is wrong. We should just stop waiting for + * the notifier. + */ + + nvPushImportLogError(pDevice, + "WAIT (0, %d, 0x%04x, 0x%08x, 0x%08x)", + id, pNotify->status, getOffset, p->main.putOffset); + nvAssert(!"PUT == GET, but notifier has not completed; returning!"); + + /* Set status to done in case it is read */ + pNotify->status = NV2080_SUBDEVICE_NOTIFICATION_STATUS_DONE_SUCCESS; + + continue; + } + + if (long_timeout) { + + nvPushImportLogError(pDevice, + "WAIT (1, %d, 0x%04x, 0x%08x, 0x%08x)", + id, pNotify->status, getOffset, p->main.putOffset); + nvAssert(!"Long timeout exceeded; PUT != GET; returning!"); + + /* Set status to done in case it is read */ + pNotify->status = NV2080_SUBDEVICE_NOTIFICATION_STATUS_DONE_SUCCESS; + + return; + } + + if (!shortTimeOutDone) { + nvPushImportLogError(pDevice, + "WAIT (2, %d, 0x%04x, 0x%08x, 0x%08x)", + id, pNotify->status, getOffset, p->main.putOffset); + nvAssert(!"Short timeout exceeded; PUT != GET"); + + /* Once is enough */ + shortTimeOutDone = TRUE; + } + } + + /* optionally yield */ + if (!pEvent && yield) { + nvPushImportYield(pDevice); + } + } + } + + if (pEvent) { + /* Empty the event FIFO */ + nvPushImportEmptyEventFifo(pDevice, pEvent); + } +} + +// Decode a method header, returning the method count. Return FALSE if the +// value is not recognizable as a method header. +NvBool nvPushDecodeMethod(NvU32 header, NvU32 *count) +{ + switch (DRF_VAL(A16F, _DMA, _SEC_OP, header)) { + case NVA16F_DMA_SEC_OP_IMMD_DATA_METHOD: + *count = 0; + return TRUE; + case NVA16F_DMA_SEC_OP_INC_METHOD: + case NVA16F_DMA_SEC_OP_NON_INC_METHOD: + case NVA16F_DMA_SEC_OP_ONE_INC: + *count = DRF_VAL(A16F, _DMA, _METHOD_COUNT, header); + return TRUE; + default: + // Not a recognized method header! + return FALSE; + } +} + +static NvU32 GetSetObjectHandle(NvPushChannelPtr pChannel, NvU32 handle, + int deviceIndex) +{ + NvPushDevicePtr pDevice = pChannel->pDevice; + NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS params = { 0 }; + NvU32 ret; + + // AModel has a bug where it requires an object handle instead of a class + // and engine ID. + if (nvPushIsAModel(pDevice)) { + return handle; + } + + // Query RM for the class and engine ID of this handle. + params.hObject = handle; + ret = nvPushImportRmApiControl(pDevice, + pChannel->channelHandle[deviceIndex], + NV906F_CTRL_GET_CLASS_ENGINEID, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvPushImportLogError(pDevice, "Failed to query object info."); + return 0; + } + +#if defined(DEBUG) + // Print debug spew mapping the SetObject ID to a class number for + // nvdiss + if (params.classID && pChannel->logNvDiss) { + nvPushImportLogNvDiss(pChannel, + "SetObjectId: class 0x%x, objectID 0x%x\n", + params.classID, params.classEngineID); + } +#endif /* DEBUG */ + + return params.classEngineID; +} + +// Issue a SET_OBJECT method on the specified subchannel. +void nvPushSetObject(NvPushChannelPtr p, NvU32 subch, NvU32 *object) +{ + const NvPushDeviceRec *pDevice = p->pDevice; + const NvU32 oldSubDevMask = p->currentSubDevMask; + int deviceIndex; + + for (deviceIndex = 0; + deviceIndex < __nvPushGetNumDevices(pDevice); + deviceIndex++) { + if (pDevice->clientSli) { + const NvU32 thisSDM = 1 << deviceIndex; + if ((thisSDM & oldSubDevMask) == 0) { + continue; + } + nvPushSetSubdeviceMask(p, thisSDM); + } + nvPushMethod(p, subch, NVA16F_SET_OBJECT, 1); + nvPushSetMethodData(p, + GetSetObjectHandle(p, object[deviceIndex], deviceIndex)); + } + nvPushSetSubdeviceMask(p, oldSubDevMask); +} + +void nvPushSetSubdeviceMask(NvPushChannelPtr p, NvU32 mask) +{ + if (nvPushSubDeviceMaskEquiv(p->pDevice, p->currentSubDevMask, mask)) { + return; + } + p->currentSubDevMask = mask; + + SetSubDeviceMask(p, main, mask); +} + +static void KeplerReleaseTimelineSemaphore( + NvPushChannelPtr p, + void *cpuAddress, + NvU64 gpuAddress, + NvU64 val) +{ + NvReportSemaphore32 *report = (NvReportSemaphore32 *)cpuAddress; + + // Must be done before submitting the semaphore release to ensure the maximum + // known-submitted value is never less than the semaphore's current value. + NvTimeSemFermiSetMaxSubmittedVal(&report->timer, val); + + nvPushMethod(p, 0, NVA16F_SEMAPHOREA, 4); + nvPushSetMethodDataU64(p, gpuAddress); // NVA16F_SEMAPHOREB + nvPushSetMethodData(p, val); // NVA16F_SEMAPHOREC + nvPushSetMethodData(p, // NVA16F_SEMAPHORED + DRF_DEF(A16F, _SEMAPHORED, _OPERATION, _RELEASE) | + DRF_DEF(A16F, _SEMAPHORED, _RELEASE_SIZE, _4BYTE)); + + nvPushMethod(p, 0, NVA16F_NON_STALL_INTERRUPT, 1); + nvPushSetMethodData(p, 0); +} + +static void KeplerAcquireTimelineSemaphore( + NvPushChannelPtr p, + NvU64 gpuAddress, + NvU64 val) +{ + nvPushMethod(p, 0, NVA16F_SEMAPHOREA, 4); + nvPushSetMethodDataU64(p, gpuAddress); // NVA16F_SEMAPHOREB + nvPushSetMethodData(p, val); // NVA16F_SEMAPHOREC + nvPushSetMethodData(p, // NVA16F_SEMAPHORED + DRF_DEF(A16F, _SEMAPHORED, _ACQUIRE_SWITCH, _ENABLED) | + DRF_DEF(A16F, _SEMAPHORED, _OPERATION, _ACQ_GEQ)); +} + +static void VoltaReleaseTimelineSemaphore( + NvPushChannelPtr p, + void *cpuAddress, + NvU64 gpuAddress, + NvU64 val) +{ + nvPushMethod(p, 0, NVC36F_SEM_ADDR_LO, 5); + nvPushSetMethodDataU64LE(p, gpuAddress); // NVC36F_SEM_ADDR_LO/HI + nvPushSetMethodDataU64LE(p, val); // NVC36F_SEM_PAYLOAD_LO/HI + nvPushSetMethodData(p, // NVC36F_SEM_EXECUTE + DRF_DEF(C36F, _SEM_EXECUTE, _OPERATION, _RELEASE) | + DRF_DEF(C36F, _SEM_EXECUTE, _RELEASE_WFI, _EN) | + DRF_DEF(C36F, _SEM_EXECUTE, _PAYLOAD_SIZE, _64BIT) | + DRF_DEF(C36F, _SEM_EXECUTE, _RELEASE_TIMESTAMP, _EN)); + + nvPushMethod(p, 0, NVC36F_NON_STALL_INTERRUPT, 1); + nvPushSetMethodData(p, 0); +} + +static void VoltaAcquireTimelineSemaphore( + NvPushChannelPtr p, + NvU64 gpuAddress, + NvU64 val) +{ + nvPushMethod(p, 0, NVC36F_SEM_ADDR_LO, 5); + nvPushSetMethodDataU64LE(p, gpuAddress); // NVC36F_SEM_ADDR_LO/HI + nvPushSetMethodDataU64LE(p, val); // NVC36F_SEM_PAYLOAD_LO/HI + nvPushSetMethodData(p, // NVC36F_SEM_EXECUTE + DRF_DEF(C36F, _SEM_EXECUTE, _OPERATION, _ACQ_STRICT_GEQ) | + DRF_DEF(C36F, _SEM_EXECUTE, _ACQUIRE_SWITCH_TSG, _EN) | + DRF_DEF(C36F, _SEM_EXECUTE, _PAYLOAD_SIZE, _64BIT)); +} + +void nvPushReleaseTimelineSemaphore( + NvPushChannelPtr p, + void *cpuAddress, + NvU64 gpuAddress, + NvU64 val) +{ + NvPushDevicePtr pDevice = p->pDevice; + pDevice->hal.releaseTimelineSemaphore(p, cpuAddress, gpuAddress, val); +} + +void nvPushAcquireTimelineSemaphore( + NvPushChannelPtr p, + NvU64 gpuAddress, + NvU64 val) +{ + NvPushDevicePtr pDevice = p->pDevice; + pDevice->hal.acquireTimelineSemaphore(p, gpuAddress, val); +} + +NvBool __nvPushGetHal( + const NvPushAllocDeviceParams *pParams, + NvU32 channelClass, + NvPushHal *pHal) +{ + switch (channelClass) { + case HOPPER_CHANNEL_GPFIFO_A: + pHal->caps.extendedBase = TRUE; + // otherwise backwards compatible with the Volta DMA HAL + // fall through + case AMPERE_CHANNEL_GPFIFO_A: + // backwards compatible with the Volta DMA HAL + // fall through + case TURING_CHANNEL_GPFIFO_A: + // backwards compatible with the Volta DMA HAL + // fall through + case VOLTA_CHANNEL_GPFIFO_A: + pHal->kickoff = DoorbellKickoff; + pHal->caps.clientAllocatesUserD = TRUE; + pHal->caps.allocateDoubleSizeGpFifo = FALSE; + pHal->caps.voltaSemMethods = TRUE; + pHal->releaseTimelineSemaphore = VoltaReleaseTimelineSemaphore; + pHal->acquireTimelineSemaphore = VoltaAcquireTimelineSemaphore; + break; + case PASCAL_CHANNEL_GPFIFO_A: + // backwards compatible with the Kepler DMA HAL + // fall through + case MAXWELL_CHANNEL_GPFIFO_A: + // backwards compatible with the Kepler DMA HAL + // fall through + case KEPLER_CHANNEL_GPFIFO_B: + pHal->kickoff = UserDKickoff; + pHal->caps.clientAllocatesUserD = FALSE; + pHal->caps.allocateDoubleSizeGpFifo = FALSE; + pHal->releaseTimelineSemaphore = KeplerReleaseTimelineSemaphore; + pHal->acquireTimelineSemaphore = KeplerAcquireTimelineSemaphore; + break; + default: + nvAssert(!"There's no DMA HAL for this channel class"); + } + + if (pParams->amodel.config != NV_AMODEL_NONE) { + pHal->kickoff = NULL; + } else if (pParams->isTegra) { + pHal->kickoff = NULL; + } + + return !!(pHal->kickoff); +} + +void __nvPushMoveDWORDS(NvU32* dst, const NvU32* src, int dwords) +{ + while (dwords & ~0x03) { + *dst = *src; + *(dst + 1) = *(src + 1); + *(dst + 2) = *(src + 2); + *(dst + 3) = *(src + 3); + src += 4; + dst += 4; + dwords -= 4; + } + if (!dwords) return; + *dst = *src; + if (dwords == 1) return; + *(dst + 1) = *(src + 1); + if (dwords == 2) return; + *(dst + 2) = *(src + 2); +} diff --git a/src/common/unix/xzminidec/interface/xz.h b/src/common/unix/xzminidec/interface/xz.h new file mode 100644 index 0000000..088065a --- /dev/null +++ b/src/common/unix/xzminidec/interface/xz.h @@ -0,0 +1,285 @@ +/* + * XZ decompressor + * + * Authors: Lasse Collin + * Igor Pavlov + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#ifndef XZ_H +#define XZ_H + +/* Get the definition of size_t. */ +#if defined(__KERNEL__) +# include +#else +# include +#endif + +/* Get the definition of uint32_t and friends. */ +#if defined(NV_XZ_USE_NVTYPES) +# include + typedef NvU8 uint8_t; + typedef NvU16 uint16_t; + typedef NvU32 uint32_t; + typedef NvU64 uint64_t; +#elif defined(__KERNEL__) +# include +#else +# include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* In Linux, this is used to make extern functions static when needed. */ +#ifndef XZ_EXTERN +# define XZ_EXTERN extern +#endif + +/** + * enum xz_mode - Operation mode + * + * @XZ_SINGLE: Single-call mode. This uses less RAM than + * than multi-call modes, because the LZMA2 + * dictionary doesn't need to be allocated as + * part of the decoder state. All required data + * structures are allocated at initialization, + * so xz_dec_run() cannot return XZ_MEM_ERROR. + * @XZ_PREALLOC: Multi-call mode with preallocated LZMA2 + * dictionary buffer. All data structures are + * allocated at initialization, so xz_dec_run() + * cannot return XZ_MEM_ERROR. + * @XZ_DYNALLOC: Multi-call mode. The LZMA2 dictionary is + * allocated once the required size has been + * parsed from the stream headers. If the + * allocation fails, xz_dec_run() will return + * XZ_MEM_ERROR. + * + * It is possible to enable support only for a subset of the above + * modes at compile time by defining XZ_DEC_SINGLE, XZ_DEC_PREALLOC, + * or XZ_DEC_DYNALLOC. The xz_dec kernel module is always compiled + * with support for all operation modes, but the preboot code may + * be built with fewer features to minimize code size. + */ +enum xz_mode { + XZ_SINGLE, + XZ_PREALLOC, + XZ_DYNALLOC +}; + +/** + * enum xz_ret - Return codes + * @XZ_OK: Everything is OK so far. More input or more + * output space is required to continue. This + * return code is possible only in multi-call mode + * (XZ_PREALLOC or XZ_DYNALLOC). + * @XZ_STREAM_END: Operation finished successfully. + * @XZ_UNSUPPORTED_CHECK: Integrity check type is not supported. Decoding + * is still possible in multi-call mode by simply + * calling xz_dec_run() again. + * Note that this return value is used only if + * XZ_DEC_ANY_CHECK was defined at build time, + * which is not used in the kernel. Unsupported + * check types return XZ_OPTIONS_ERROR if + * XZ_DEC_ANY_CHECK was not defined at build time. + * @XZ_MEM_ERROR: Allocating memory failed. This return code is + * possible only if the decoder was initialized + * with XZ_DYNALLOC. The amount of memory that was + * tried to be allocated was no more than the + * dict_max argument given to xz_dec_init(). + * @XZ_MEMLIMIT_ERROR: A bigger LZMA2 dictionary would be needed than + * allowed by the dict_max argument given to + * xz_dec_init(). This return value is possible + * only in multi-call mode (XZ_PREALLOC or + * XZ_DYNALLOC); the single-call mode (XZ_SINGLE) + * ignores the dict_max argument. + * @XZ_FORMAT_ERROR: File format was not recognized (wrong magic + * bytes). + * @XZ_OPTIONS_ERROR: This implementation doesn't support the requested + * compression options. In the decoder this means + * that the header CRC32 matches, but the header + * itself specifies something that we don't support. + * @XZ_DATA_ERROR: Compressed data is corrupt. + * @XZ_BUF_ERROR: Cannot make any progress. Details are slightly + * different between multi-call and single-call + * mode; more information below. + * + * In multi-call mode, XZ_BUF_ERROR is returned when two consecutive calls + * to XZ code cannot consume any input and cannot produce any new output. + * This happens when there is no new input available, or the output buffer + * is full while at least one output byte is still pending. Assuming your + * code is not buggy, you can get this error only when decoding a compressed + * stream that is truncated or otherwise corrupt. + * + * In single-call mode, XZ_BUF_ERROR is returned only when the output buffer + * is too small or the compressed input is corrupt in a way that makes the + * decoder produce more output than the caller expected. When it is + * (relatively) clear that the compressed input is truncated, XZ_DATA_ERROR + * is used instead of XZ_BUF_ERROR. + */ +enum xz_ret { + XZ_OK, + XZ_STREAM_END, + XZ_UNSUPPORTED_CHECK, + XZ_MEM_ERROR, + XZ_MEMLIMIT_ERROR, + XZ_FORMAT_ERROR, + XZ_OPTIONS_ERROR, + XZ_DATA_ERROR, + XZ_BUF_ERROR +}; + +/** + * struct xz_buf - Passing input and output buffers to XZ code + * @in: Beginning of the input buffer. This may be NULL if and only + * if in_pos is equal to in_size. + * @in_pos: Current position in the input buffer. This must not exceed + * in_size. + * @in_size: Size of the input buffer + * @out: Beginning of the output buffer. This may be NULL if and only + * if out_pos is equal to out_size. + * @out_pos: Current position in the output buffer. This must not exceed + * out_size. + * @out_size: Size of the output buffer + * + * Only the contents of the output buffer from out[out_pos] onward, and + * the variables in_pos and out_pos are modified by the XZ code. + */ +struct xz_buf { + const uint8_t *in; + size_t in_pos; + size_t in_size; + + uint8_t *out; + size_t out_pos; + size_t out_size; +}; + +/** + * struct xz_dec - Opaque type to hold the XZ decoder state + */ +struct xz_dec; + +/** + * xz_dec_init() - Allocate and initialize a XZ decoder state + * @mode: Operation mode + * @dict_max: Maximum size of the LZMA2 dictionary (history buffer) for + * multi-call decoding. This is ignored in single-call mode + * (mode == XZ_SINGLE). LZMA2 dictionary is always 2^n bytes + * or 2^n + 2^(n-1) bytes (the latter sizes are less common + * in practice), so other values for dict_max don't make sense. + * In the kernel, dictionary sizes of 64 KiB, 128 KiB, 256 KiB, + * 512 KiB, and 1 MiB are probably the only reasonable values, + * except for kernel and initramfs images where a bigger + * dictionary can be fine and useful. + * + * Single-call mode (XZ_SINGLE): xz_dec_run() decodes the whole stream at + * once. The caller must provide enough output space or the decoding will + * fail. The output space is used as the dictionary buffer, which is why + * there is no need to allocate the dictionary as part of the decoder's + * internal state. + * + * Because the output buffer is used as the workspace, streams encoded using + * a big dictionary are not a problem in single-call mode. It is enough that + * the output buffer is big enough to hold the actual uncompressed data; it + * can be smaller than the dictionary size stored in the stream headers. + * + * Multi-call mode with preallocated dictionary (XZ_PREALLOC): dict_max bytes + * of memory is preallocated for the LZMA2 dictionary. This way there is no + * risk that xz_dec_run() could run out of memory, since xz_dec_run() will + * never allocate any memory. Instead, if the preallocated dictionary is too + * small for decoding the given input stream, xz_dec_run() will return + * XZ_MEMLIMIT_ERROR. Thus, it is important to know what kind of data will be + * decoded to avoid allocating excessive amount of memory for the dictionary. + * + * Multi-call mode with dynamically allocated dictionary (XZ_DYNALLOC): + * dict_max specifies the maximum allowed dictionary size that xz_dec_run() + * may allocate once it has parsed the dictionary size from the stream + * headers. This way excessive allocations can be avoided while still + * limiting the maximum memory usage to a sane value to prevent running the + * system out of memory when decompressing streams from untrusted sources. + * + * On success, xz_dec_init() returns a pointer to struct xz_dec, which is + * ready to be used with xz_dec_run(). If memory allocation fails, + * xz_dec_init() returns NULL. + */ +XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max); + +/** + * xz_dec_run() - Run the XZ decoder + * @s: Decoder state allocated using xz_dec_init() + * @b: Input and output buffers + * + * The possible return values depend on build options and operation mode. + * See enum xz_ret for details. + * + * Note that if an error occurs in single-call mode (return value is not + * XZ_STREAM_END), b->in_pos and b->out_pos are not modified and the + * contents of the output buffer from b->out[b->out_pos] onward are + * undefined. This is true even after XZ_BUF_ERROR, because with some filter + * chains, there may be a second pass over the output buffer, and this pass + * cannot be properly done if the output buffer is truncated. Thus, you + * cannot give the single-call decoder a too small buffer and then expect to + * get that amount valid data from the beginning of the stream. You must use + * the multi-call decoder if you don't want to uncompress the whole stream. + */ +XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b); + +/** + * xz_dec_reset() - Reset an already allocated decoder state + * @s: Decoder state allocated using xz_dec_init() + * + * This function can be used to reset the multi-call decoder state without + * freeing and reallocating memory with xz_dec_end() and xz_dec_init(). + * + * In single-call mode, xz_dec_reset() is always called in the beginning of + * xz_dec_run(). Thus, explicit call to xz_dec_reset() is useful only in + * multi-call mode. + */ +XZ_EXTERN void xz_dec_reset(struct xz_dec *s); + +/** + * xz_dec_end() - Free the memory allocated for the decoder state + * @s: Decoder state allocated using xz_dec_init(). If s is NULL, + * this function does nothing. + */ +XZ_EXTERN void xz_dec_end(struct xz_dec *s); + +/* + * Standalone build (userspace build or in-kernel build for boot time use) + * needs a CRC32 implementation. For normal in-kernel use, kernel's own + * CRC32 module is used instead, and users of this module don't need to + * care about the functions below. + */ +#ifndef XZ_INTERNAL_CRC32 +# ifdef __KERNEL__ +# define XZ_INTERNAL_CRC32 0 +# else +# define XZ_INTERNAL_CRC32 1 +# endif +#endif + +#if XZ_INTERNAL_CRC32 +/* + * This must be called before any other xz_* function to initialize + * the CRC32 lookup table. + */ +XZ_EXTERN void xz_crc32_init(void); + +/* + * Update CRC32 value using the polynomial from IEEE-802.3. To start a new + * calculation, the third argument must be zero. To continue the calculation, + * the previously returned value is passed as the third argument. + */ +XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/common/unix/xzminidec/src/xz_config.h b/src/common/unix/xzminidec/src/xz_config.h new file mode 100644 index 0000000..a552b4a --- /dev/null +++ b/src/common/unix/xzminidec/src/xz_config.h @@ -0,0 +1,113 @@ +/* + * Private includes and definitions for userspace use of XZ Embedded + * + * Author: Lasse Collin + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#ifndef XZ_CONFIG_H +#define XZ_CONFIG_H + +/* Uncomment as needed to enable BCJ filter decoders. */ +/* #define XZ_DEC_X86 */ +/* #define XZ_DEC_POWERPC */ +/* #define XZ_DEC_IA64 */ +/* #define XZ_DEC_ARM */ +/* #define XZ_DEC_ARMTHUMB */ +/* #define XZ_DEC_SPARC */ + +#include + +#include "xz.h" + +#if defined(NV_XZ_CUSTOM_MEM_HOOKS) +# include "nv_xz_mem_hooks.h" +#else +# include +# include +# define kmalloc(size, flags) malloc(size) +# define kfree(ptr) free(ptr) +# define vmalloc(size) malloc(size) +# define vfree(ptr) free(ptr) + +# define memeq(a, b, size) (memcmp(a, b, size) == 0) +# define memzero(buf, size) memset(buf, 0, size) +#endif /* defined(NV_XZ_CUSTOM_MEM_HOOKS) */ + +#ifndef min +# define min(x, y) ((x) < (y) ? (x) : (y)) +#endif +#define min_t(type, x, y) min(x, y) + +/* + * Some functions have been marked with __always_inline to keep the + * performance reasonable even when the compiler is optimizing for + * small code size. You may be able to save a few bytes by #defining + * __always_inline to plain inline, but don't complain if the code + * becomes slow. + * + * NOTE: System headers on GNU/Linux may #define this macro already, + * so if you want to change it, you need to #undef it first. + */ +#ifndef __always_inline +# ifdef __GNUC__ +# define __always_inline \ + inline __attribute__((__always_inline__)) +# else +# define __always_inline inline +# endif +#endif + +/* Inline functions to access unaligned unsigned 32-bit integers */ +#ifndef get_unaligned_le32 +static inline uint32_t get_unaligned_le32(const uint8_t *buf) +{ + return (uint32_t)buf[0] + | ((uint32_t)buf[1] << 8) + | ((uint32_t)buf[2] << 16) + | ((uint32_t)buf[3] << 24); +} +#endif + +#ifndef get_unaligned_be32 +static inline uint32_t get_unaligned_be32(const uint8_t *buf) +{ + return (uint32_t)(buf[0] << 24) + | ((uint32_t)buf[1] << 16) + | ((uint32_t)buf[2] << 8) + | (uint32_t)buf[3]; +} +#endif + +#ifndef put_unaligned_le32 +static inline void put_unaligned_le32(uint32_t val, uint8_t *buf) +{ + buf[0] = (uint8_t)val; + buf[1] = (uint8_t)(val >> 8); + buf[2] = (uint8_t)(val >> 16); + buf[3] = (uint8_t)(val >> 24); +} +#endif + +#ifndef put_unaligned_be32 +static inline void put_unaligned_be32(uint32_t val, uint8_t *buf) +{ + buf[0] = (uint8_t)(val >> 24); + buf[1] = (uint8_t)(val >> 16); + buf[2] = (uint8_t)(val >> 8); + buf[3] = (uint8_t)val; +} +#endif + +/* + * Use get_unaligned_le32() also for aligned access for simplicity. On + * little endian systems, #define get_le32(ptr) (*(const uint32_t *)(ptr)) + * could save a few bytes in code size. + */ +#ifndef get_le32 +# define get_le32 get_unaligned_le32 +#endif + +#endif diff --git a/src/common/unix/xzminidec/src/xz_crc32.c b/src/common/unix/xzminidec/src/xz_crc32.c new file mode 100644 index 0000000..5627b00 --- /dev/null +++ b/src/common/unix/xzminidec/src/xz_crc32.c @@ -0,0 +1,59 @@ +/* + * CRC32 using the polynomial from IEEE-802.3 + * + * Authors: Lasse Collin + * Igor Pavlov + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +/* + * This is not the fastest implementation, but it is pretty compact. + * The fastest versions of xz_crc32() on modern CPUs without hardware + * accelerated CRC instruction are 3-5 times as fast as this version, + * but they are bigger and use more memory for the lookup table. + */ + +#include "xz_private.h" + +/* + * STATIC_RW_DATA is used in the pre-boot environment on some architectures. + * See for details. + */ +#ifndef STATIC_RW_DATA +# define STATIC_RW_DATA static +#endif + +STATIC_RW_DATA uint32_t xz_crc32_table[256]; + +XZ_EXTERN void xz_crc32_init(void) +{ + const uint32_t poly = 0xEDB88320; + + uint32_t i; + uint32_t j; + uint32_t r; + + for (i = 0; i < 256; ++i) { + r = i; + for (j = 0; j < 8; ++j) + r = (r >> 1) ^ (poly & ~((r & 1) - 1)); + + xz_crc32_table[i] = r; + } + + return; +} + +XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc) +{ + crc = ~crc; + + while (size != 0) { + crc = xz_crc32_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8); + --size; + } + + return ~crc; +} diff --git a/src/common/unix/xzminidec/src/xz_dec_bcj.c b/src/common/unix/xzminidec/src/xz_dec_bcj.c new file mode 100644 index 0000000..72ddac6 --- /dev/null +++ b/src/common/unix/xzminidec/src/xz_dec_bcj.c @@ -0,0 +1,574 @@ +/* + * Branch/Call/Jump (BCJ) filter decoders + * + * Authors: Lasse Collin + * Igor Pavlov + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#include "xz_private.h" + +/* + * The rest of the file is inside this ifdef. It makes things a little more + * convenient when building without support for any BCJ filters. + */ +#ifdef XZ_DEC_BCJ + +struct xz_dec_bcj { + /* Type of the BCJ filter being used */ + enum { + BCJ_X86 = 4, /* x86 or x86-64 */ + BCJ_POWERPC = 5, /* Big endian only */ + BCJ_IA64 = 6, /* Big or little endian */ + BCJ_ARM = 7, /* Little endian only */ + BCJ_ARMTHUMB = 8, /* Little endian only */ + BCJ_SPARC = 9 /* Big or little endian */ + } type; + + /* + * Return value of the next filter in the chain. We need to preserve + * this information across calls, because we must not call the next + * filter anymore once it has returned XZ_STREAM_END. + */ + enum xz_ret ret; + + /* True if we are operating in single-call mode. */ + bool single_call; + + /* + * Absolute position relative to the beginning of the uncompressed + * data (in a single .xz Block). We care only about the lowest 32 + * bits so this doesn't need to be uint64_t even with big files. + */ + uint32_t pos; + + /* x86 filter state */ + uint32_t x86_prev_mask; + + /* Temporary space to hold the variables from struct xz_buf */ + uint8_t *out; + size_t out_pos; + size_t out_size; + + struct { + /* Amount of already filtered data in the beginning of buf */ + size_t filtered; + + /* Total amount of data currently stored in buf */ + size_t size; + + /* + * Buffer to hold a mix of filtered and unfiltered data. This + * needs to be big enough to hold Alignment + 2 * Look-ahead: + * + * Type Alignment Look-ahead + * x86 1 4 + * PowerPC 4 0 + * IA-64 16 0 + * ARM 4 0 + * ARM-Thumb 2 2 + * SPARC 4 0 + */ + uint8_t buf[16]; + } temp; +}; + +#ifdef XZ_DEC_X86 +/* + * This is used to test the most significant byte of a memory address + * in an x86 instruction. + */ +static inline int bcj_x86_test_msbyte(uint8_t b) +{ + return b == 0x00 || b == 0xFF; +} + +static size_t bcj_x86(struct xz_dec_bcj *s, uint8_t *buf, size_t size) +{ + static const bool mask_to_allowed_status[8] + = { true, true, true, false, true, false, false, false }; + + static const uint8_t mask_to_bit_num[8] = { 0, 1, 2, 2, 3, 3, 3, 3 }; + + size_t i; + size_t prev_pos = (size_t)-1; + uint32_t prev_mask = s->x86_prev_mask; + uint32_t src; + uint32_t dest; + uint32_t j; + uint8_t b; + + if (size <= 4) + return 0; + + size -= 4; + for (i = 0; i < size; ++i) { + if ((buf[i] & 0xFE) != 0xE8) + continue; + + prev_pos = i - prev_pos; + if (prev_pos > 3) { + prev_mask = 0; + } else { + prev_mask = (prev_mask << (prev_pos - 1)) & 7; + if (prev_mask != 0) { + b = buf[i + 4 - mask_to_bit_num[prev_mask]]; + if (!mask_to_allowed_status[prev_mask] + || bcj_x86_test_msbyte(b)) { + prev_pos = i; + prev_mask = (prev_mask << 1) | 1; + continue; + } + } + } + + prev_pos = i; + + if (bcj_x86_test_msbyte(buf[i + 4])) { + src = get_unaligned_le32(buf + i + 1); + while (true) { + dest = src - (s->pos + (uint32_t)i + 5); + if (prev_mask == 0) + break; + + j = mask_to_bit_num[prev_mask] * 8; + b = (uint8_t)(dest >> (24 - j)); + if (!bcj_x86_test_msbyte(b)) + break; + + src = dest ^ (((uint32_t)1 << (32 - j)) - 1); + } + + dest &= 0x01FFFFFF; + dest |= (uint32_t)0 - (dest & 0x01000000); + put_unaligned_le32(dest, buf + i + 1); + i += 4; + } else { + prev_mask = (prev_mask << 1) | 1; + } + } + + prev_pos = i - prev_pos; + s->x86_prev_mask = prev_pos > 3 ? 0 : prev_mask << (prev_pos - 1); + return i; +} +#endif + +#ifdef XZ_DEC_POWERPC +static size_t bcj_powerpc(struct xz_dec_bcj *s, uint8_t *buf, size_t size) +{ + size_t i; + uint32_t instr; + + for (i = 0; i + 4 <= size; i += 4) { + instr = get_unaligned_be32(buf + i); + if ((instr & 0xFC000003) == 0x48000001) { + instr &= 0x03FFFFFC; + instr -= s->pos + (uint32_t)i; + instr &= 0x03FFFFFC; + instr |= 0x48000001; + put_unaligned_be32(instr, buf + i); + } + } + + return i; +} +#endif + +#ifdef XZ_DEC_IA64 +static size_t bcj_ia64(struct xz_dec_bcj *s, uint8_t *buf, size_t size) +{ + static const uint8_t branch_table[32] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 4, 4, 6, 6, 0, 0, 7, 7, + 4, 4, 0, 0, 4, 4, 0, 0 + }; + + /* + * The local variables take a little bit stack space, but it's less + * than what LZMA2 decoder takes, so it doesn't make sense to reduce + * stack usage here without doing that for the LZMA2 decoder too. + */ + + /* Loop counters */ + size_t i; + size_t j; + + /* Instruction slot (0, 1, or 2) in the 128-bit instruction word */ + uint32_t slot; + + /* Bitwise offset of the instruction indicated by slot */ + uint32_t bit_pos; + + /* bit_pos split into byte and bit parts */ + uint32_t byte_pos; + uint32_t bit_res; + + /* Address part of an instruction */ + uint32_t addr; + + /* Mask used to detect which instructions to convert */ + uint32_t mask; + + /* 41-bit instruction stored somewhere in the lowest 48 bits */ + uint64_t instr; + + /* Instruction normalized with bit_res for easier manipulation */ + uint64_t norm; + + for (i = 0; i + 16 <= size; i += 16) { + mask = branch_table[buf[i] & 0x1F]; + for (slot = 0, bit_pos = 5; slot < 3; ++slot, bit_pos += 41) { + if (((mask >> slot) & 1) == 0) + continue; + + byte_pos = bit_pos >> 3; + bit_res = bit_pos & 7; + instr = 0; + for (j = 0; j < 6; ++j) + instr |= (uint64_t)(buf[i + j + byte_pos]) + << (8 * j); + + norm = instr >> bit_res; + + if (((norm >> 37) & 0x0F) == 0x05 + && ((norm >> 9) & 0x07) == 0) { + addr = (norm >> 13) & 0x0FFFFF; + addr |= ((uint32_t)(norm >> 36) & 1) << 20; + addr <<= 4; + addr -= s->pos + (uint32_t)i; + addr >>= 4; + + norm &= ~((uint64_t)0x8FFFFF << 13); + norm |= (uint64_t)(addr & 0x0FFFFF) << 13; + norm |= (uint64_t)(addr & 0x100000) + << (36 - 20); + + instr &= (1 << bit_res) - 1; + instr |= norm << bit_res; + + for (j = 0; j < 6; j++) + buf[i + j + byte_pos] + = (uint8_t)(instr >> (8 * j)); + } + } + } + + return i; +} +#endif + +#ifdef XZ_DEC_ARM +static size_t bcj_arm(struct xz_dec_bcj *s, uint8_t *buf, size_t size) +{ + size_t i; + uint32_t addr; + + for (i = 0; i + 4 <= size; i += 4) { + if (buf[i + 3] == 0xEB) { + addr = (uint32_t)buf[i] | ((uint32_t)buf[i + 1] << 8) + | ((uint32_t)buf[i + 2] << 16); + addr <<= 2; + addr -= s->pos + (uint32_t)i + 8; + addr >>= 2; + buf[i] = (uint8_t)addr; + buf[i + 1] = (uint8_t)(addr >> 8); + buf[i + 2] = (uint8_t)(addr >> 16); + } + } + + return i; +} +#endif + +#ifdef XZ_DEC_ARMTHUMB +static size_t bcj_armthumb(struct xz_dec_bcj *s, uint8_t *buf, size_t size) +{ + size_t i; + uint32_t addr; + + for (i = 0; i + 4 <= size; i += 2) { + if ((buf[i + 1] & 0xF8) == 0xF0 + && (buf[i + 3] & 0xF8) == 0xF8) { + addr = (((uint32_t)buf[i + 1] & 0x07) << 19) + | ((uint32_t)buf[i] << 11) + | (((uint32_t)buf[i + 3] & 0x07) << 8) + | (uint32_t)buf[i + 2]; + addr <<= 1; + addr -= s->pos + (uint32_t)i + 4; + addr >>= 1; + buf[i + 1] = (uint8_t)(0xF0 | ((addr >> 19) & 0x07)); + buf[i] = (uint8_t)(addr >> 11); + buf[i + 3] = (uint8_t)(0xF8 | ((addr >> 8) & 0x07)); + buf[i + 2] = (uint8_t)addr; + i += 2; + } + } + + return i; +} +#endif + +#ifdef XZ_DEC_SPARC +static size_t bcj_sparc(struct xz_dec_bcj *s, uint8_t *buf, size_t size) +{ + size_t i; + uint32_t instr; + + for (i = 0; i + 4 <= size; i += 4) { + instr = get_unaligned_be32(buf + i); + if ((instr >> 22) == 0x100 || (instr >> 22) == 0x1FF) { + instr <<= 2; + instr -= s->pos + (uint32_t)i; + instr >>= 2; + instr = ((uint32_t)0x40000000 - (instr & 0x400000)) + | 0x40000000 | (instr & 0x3FFFFF); + put_unaligned_be32(instr, buf + i); + } + } + + return i; +} +#endif + +/* + * Apply the selected BCJ filter. Update *pos and s->pos to match the amount + * of data that got filtered. + * + * NOTE: This is implemented as a switch statement to avoid using function + * pointers, which could be problematic in the kernel boot code, which must + * avoid pointers to static data (at least on x86). + */ +static void bcj_apply(struct xz_dec_bcj *s, + uint8_t *buf, size_t *pos, size_t size) +{ + size_t filtered; + + buf += *pos; + size -= *pos; + + switch (s->type) { +#ifdef XZ_DEC_X86 + case BCJ_X86: + filtered = bcj_x86(s, buf, size); + break; +#endif +#ifdef XZ_DEC_POWERPC + case BCJ_POWERPC: + filtered = bcj_powerpc(s, buf, size); + break; +#endif +#ifdef XZ_DEC_IA64 + case BCJ_IA64: + filtered = bcj_ia64(s, buf, size); + break; +#endif +#ifdef XZ_DEC_ARM + case BCJ_ARM: + filtered = bcj_arm(s, buf, size); + break; +#endif +#ifdef XZ_DEC_ARMTHUMB + case BCJ_ARMTHUMB: + filtered = bcj_armthumb(s, buf, size); + break; +#endif +#ifdef XZ_DEC_SPARC + case BCJ_SPARC: + filtered = bcj_sparc(s, buf, size); + break; +#endif + default: + /* Never reached but silence compiler warnings. */ + filtered = 0; + break; + } + + *pos += filtered; + s->pos += filtered; +} + +/* + * Flush pending filtered data from temp to the output buffer. + * Move the remaining mixture of possibly filtered and unfiltered + * data to the beginning of temp. + */ +static void bcj_flush(struct xz_dec_bcj *s, struct xz_buf *b) +{ + size_t copy_size; + + copy_size = min_t(size_t, s->temp.filtered, b->out_size - b->out_pos); + memcpy(b->out + b->out_pos, s->temp.buf, copy_size); + b->out_pos += copy_size; + + s->temp.filtered -= copy_size; + s->temp.size -= copy_size; + memmove(s->temp.buf, s->temp.buf + copy_size, s->temp.size); +} + +/* + * The BCJ filter functions are primitive in sense that they process the + * data in chunks of 1-16 bytes. To hide this issue, this function does + * some buffering. + */ +XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, + struct xz_dec_lzma2 *lzma2, + struct xz_buf *b) +{ + size_t out_start; + + /* + * Flush pending already filtered data to the output buffer. Return + * immediatelly if we couldn't flush everything, or if the next + * filter in the chain had already returned XZ_STREAM_END. + */ + if (s->temp.filtered > 0) { + bcj_flush(s, b); + if (s->temp.filtered > 0) + return XZ_OK; + + if (s->ret == XZ_STREAM_END) + return XZ_STREAM_END; + } + + /* + * If we have more output space than what is currently pending in + * temp, copy the unfiltered data from temp to the output buffer + * and try to fill the output buffer by decoding more data from the + * next filter in the chain. Apply the BCJ filter on the new data + * in the output buffer. If everything cannot be filtered, copy it + * to temp and rewind the output buffer position accordingly. + * + * This needs to be always run when temp.size == 0 to handle a special + * case where the output buffer is full and the next filter has no + * more output coming but hasn't returned XZ_STREAM_END yet. + */ + if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) { + out_start = b->out_pos; + memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size); + b->out_pos += s->temp.size; + + s->ret = xz_dec_lzma2_run(lzma2, b); + if (s->ret != XZ_STREAM_END + && (s->ret != XZ_OK || s->single_call)) + return s->ret; + + bcj_apply(s, b->out, &out_start, b->out_pos); + + /* + * As an exception, if the next filter returned XZ_STREAM_END, + * we can do that too, since the last few bytes that remain + * unfiltered are meant to remain unfiltered. + */ + if (s->ret == XZ_STREAM_END) + return XZ_STREAM_END; + + s->temp.size = b->out_pos - out_start; + b->out_pos -= s->temp.size; + memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size); + + /* + * If there wasn't enough input to the next filter to fill + * the output buffer with unfiltered data, there's no point + * to try decoding more data to temp. + */ + if (b->out_pos + s->temp.size < b->out_size) + return XZ_OK; + } + + /* + * We have unfiltered data in temp. If the output buffer isn't full + * yet, try to fill the temp buffer by decoding more data from the + * next filter. Apply the BCJ filter on temp. Then we hopefully can + * fill the actual output buffer by copying filtered data from temp. + * A mix of filtered and unfiltered data may be left in temp; it will + * be taken care on the next call to this function. + */ + if (b->out_pos < b->out_size) { + /* Make b->out{,_pos,_size} temporarily point to s->temp. */ + s->out = b->out; + s->out_pos = b->out_pos; + s->out_size = b->out_size; + b->out = s->temp.buf; + b->out_pos = s->temp.size; + b->out_size = sizeof(s->temp.buf); + + s->ret = xz_dec_lzma2_run(lzma2, b); + + s->temp.size = b->out_pos; + b->out = s->out; + b->out_pos = s->out_pos; + b->out_size = s->out_size; + + if (s->ret != XZ_OK && s->ret != XZ_STREAM_END) + return s->ret; + + bcj_apply(s, s->temp.buf, &s->temp.filtered, s->temp.size); + + /* + * If the next filter returned XZ_STREAM_END, we mark that + * everything is filtered, since the last unfiltered bytes + * of the stream are meant to be left as is. + */ + if (s->ret == XZ_STREAM_END) + s->temp.filtered = s->temp.size; + + bcj_flush(s, b); + if (s->temp.filtered > 0) + return XZ_OK; + } + + return s->ret; +} + +XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call) +{ + struct xz_dec_bcj *s = kmalloc(sizeof(*s), GFP_KERNEL); + if (s != NULL) + s->single_call = single_call; + + return s; +} + +XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id) +{ + switch (id) { +#ifdef XZ_DEC_X86 + case BCJ_X86: +#endif +#ifdef XZ_DEC_POWERPC + case BCJ_POWERPC: +#endif +#ifdef XZ_DEC_IA64 + case BCJ_IA64: +#endif +#ifdef XZ_DEC_ARM + case BCJ_ARM: +#endif +#ifdef XZ_DEC_ARMTHUMB + case BCJ_ARMTHUMB: +#endif +#ifdef XZ_DEC_SPARC + case BCJ_SPARC: +#endif + break; + + default: + /* Unsupported Filter ID */ + return XZ_OPTIONS_ERROR; + } + + s->type = id; + s->ret = XZ_OK; + s->pos = 0; + s->x86_prev_mask = 0; + s->temp.filtered = 0; + s->temp.size = 0; + + return XZ_OK; +} + +#endif diff --git a/src/common/unix/xzminidec/src/xz_dec_lzma2.c b/src/common/unix/xzminidec/src/xz_dec_lzma2.c new file mode 100644 index 0000000..169fb97 --- /dev/null +++ b/src/common/unix/xzminidec/src/xz_dec_lzma2.c @@ -0,0 +1,1173 @@ +/* + * LZMA2 decoder + * + * Authors: Lasse Collin + * Igor Pavlov + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#include "xz_private.h" +#include "xz_lzma2.h" + +/* + * Range decoder initialization eats the first five bytes of each LZMA chunk. + */ +#define RC_INIT_BYTES 5 + +/* + * Minimum number of usable input buffer to safely decode one LZMA symbol. + * The worst case is that we decode 22 bits using probabilities and 26 + * direct bits. This may decode at maximum of 20 bytes of input. However, + * lzma_main() does an extra normalization before returning, thus we + * need to put 21 here. + */ +#define LZMA_IN_REQUIRED 21 + +/* + * Dictionary (history buffer) + * + * These are always true: + * start <= pos <= full <= end + * pos <= limit <= end + * + * In multi-call mode, also these are true: + * end == size + * size <= size_max + * allocated <= size + * + * Most of these variables are size_t to support single-call mode, + * in which the dictionary variables address the actual output + * buffer directly. + */ +struct dictionary { + /* Beginning of the history buffer */ + uint8_t *buf; + + /* Old position in buf (before decoding more data) */ + size_t start; + + /* Position in buf */ + size_t pos; + + /* + * How full dictionary is. This is used to detect corrupt input that + * would read beyond the beginning of the uncompressed stream. + */ + size_t full; + + /* Write limit; we don't write to buf[limit] or later bytes. */ + size_t limit; + + /* + * End of the dictionary buffer. In multi-call mode, this is + * the same as the dictionary size. In single-call mode, this + * indicates the size of the output buffer. + */ + size_t end; + + /* + * Size of the dictionary as specified in Block Header. This is used + * together with "full" to detect corrupt input that would make us + * read beyond the beginning of the uncompressed stream. + */ + uint32_t size; + + /* + * Maximum allowed dictionary size in multi-call mode. + * This is ignored in single-call mode. + */ + uint32_t size_max; + + /* + * Amount of memory currently allocated for the dictionary. + * This is used only with XZ_DYNALLOC. (With XZ_PREALLOC, + * size_max is always the same as the allocated size.) + */ + uint32_t allocated; + + /* Operation mode */ + enum xz_mode mode; +}; + +/* Range decoder */ +struct rc_dec { + uint32_t range; + uint32_t code; + + /* + * Number of initializing bytes remaining to be read + * by rc_read_init(). + */ + uint32_t init_bytes_left; + + /* + * Buffer from which we read our input. It can be either + * temp.buf or the caller-provided input buffer. + */ + const uint8_t *in; + size_t in_pos; + size_t in_limit; +}; + +/* Probabilities for a length decoder. */ +struct lzma_len_dec { + /* Probability of match length being at least 10 */ + uint16_t choice; + + /* Probability of match length being at least 18 */ + uint16_t choice2; + + /* Probabilities for match lengths 2-9 */ + uint16_t low[POS_STATES_MAX][LEN_LOW_SYMBOLS]; + + /* Probabilities for match lengths 10-17 */ + uint16_t mid[POS_STATES_MAX][LEN_MID_SYMBOLS]; + + /* Probabilities for match lengths 18-273 */ + uint16_t high[LEN_HIGH_SYMBOLS]; +}; + +struct lzma_dec { + /* Distances of latest four matches */ + uint32_t rep0; + uint32_t rep1; + uint32_t rep2; + uint32_t rep3; + + /* Types of the most recently seen LZMA symbols */ + enum lzma_state state; + + /* + * Length of a match. This is updated so that dict_repeat can + * be called again to finish repeating the whole match. + */ + uint32_t len; + + /* + * LZMA properties or related bit masks (number of literal + * context bits, a mask dervied from the number of literal + * position bits, and a mask dervied from the number + * position bits) + */ + uint32_t lc; + uint32_t literal_pos_mask; /* (1 << lp) - 1 */ + uint32_t pos_mask; /* (1 << pb) - 1 */ + + /* If 1, it's a match. Otherwise it's a single 8-bit literal. */ + uint16_t is_match[STATES][POS_STATES_MAX]; + + /* If 1, it's a repeated match. The distance is one of rep0 .. rep3. */ + uint16_t is_rep[STATES]; + + /* + * If 0, distance of a repeated match is rep0. + * Otherwise check is_rep1. + */ + uint16_t is_rep0[STATES]; + + /* + * If 0, distance of a repeated match is rep1. + * Otherwise check is_rep2. + */ + uint16_t is_rep1[STATES]; + + /* If 0, distance of a repeated match is rep2. Otherwise it is rep3. */ + uint16_t is_rep2[STATES]; + + /* + * If 1, the repeated match has length of one byte. Otherwise + * the length is decoded from rep_len_decoder. + */ + uint16_t is_rep0_long[STATES][POS_STATES_MAX]; + + /* + * Probability tree for the highest two bits of the match + * distance. There is a separate probability tree for match + * lengths of 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273]. + */ + uint16_t dist_slot[DIST_STATES][DIST_SLOTS]; + + /* + * Probility trees for additional bits for match distance + * when the distance is in the range [4, 127]. + */ + uint16_t dist_special[FULL_DISTANCES - DIST_MODEL_END]; + + /* + * Probability tree for the lowest four bits of a match + * distance that is equal to or greater than 128. + */ + uint16_t dist_align[ALIGN_SIZE]; + + /* Length of a normal match */ + struct lzma_len_dec match_len_dec; + + /* Length of a repeated match */ + struct lzma_len_dec rep_len_dec; + + /* Probabilities of literals */ + uint16_t literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE]; +}; + +struct lzma2_dec { + /* Position in xz_dec_lzma2_run(). */ + enum lzma2_seq { + SEQ_CONTROL, + SEQ_UNCOMPRESSED_1, + SEQ_UNCOMPRESSED_2, + SEQ_COMPRESSED_0, + SEQ_COMPRESSED_1, + SEQ_PROPERTIES, + SEQ_LZMA_PREPARE, + SEQ_LZMA_RUN, + SEQ_COPY + } sequence; + + /* Next position after decoding the compressed size of the chunk. */ + enum lzma2_seq next_sequence; + + /* Uncompressed size of LZMA chunk (2 MiB at maximum) */ + uint32_t uncompressed; + + /* + * Compressed size of LZMA chunk or compressed/uncompressed + * size of uncompressed chunk (64 KiB at maximum) + */ + uint32_t compressed; + + /* + * True if dictionary reset is needed. This is false before + * the first chunk (LZMA or uncompressed). + */ + bool need_dict_reset; + + /* + * True if new LZMA properties are needed. This is false + * before the first LZMA chunk. + */ + bool need_props; +}; + +struct xz_dec_lzma2 { + /* + * The order below is important on x86 to reduce code size and + * it shouldn't hurt on other platforms. Everything up to and + * including lzma.pos_mask are in the first 128 bytes on x86-32, + * which allows using smaller instructions to access those + * variables. On x86-64, fewer variables fit into the first 128 + * bytes, but this is still the best order without sacrificing + * the readability by splitting the structures. + */ + struct rc_dec rc; + struct dictionary dict; + struct lzma2_dec lzma2; + struct lzma_dec lzma; + + /* + * Temporary buffer which holds small number of input bytes between + * decoder calls. See lzma2_lzma() for details. + */ + struct { + uint32_t size; + uint8_t buf[3 * LZMA_IN_REQUIRED]; + } temp; +}; + +/************** + * Dictionary * + **************/ + +/* + * Reset the dictionary state. When in single-call mode, set up the beginning + * of the dictionary to point to the actual output buffer. + */ +static void dict_reset(struct dictionary *dict, struct xz_buf *b) +{ + if (DEC_IS_SINGLE(dict->mode)) { + dict->buf = b->out + b->out_pos; + dict->end = b->out_size - b->out_pos; + } + + dict->start = 0; + dict->pos = 0; + dict->limit = 0; + dict->full = 0; +} + +/* Set dictionary write limit */ +static void dict_limit(struct dictionary *dict, size_t out_max) +{ + if (dict->end - dict->pos <= out_max) + dict->limit = dict->end; + else + dict->limit = dict->pos + out_max; +} + +/* Return true if at least one byte can be written into the dictionary. */ +static inline bool dict_has_space(const struct dictionary *dict) +{ + return dict->pos < dict->limit; +} + +/* + * Get a byte from the dictionary at the given distance. The distance is + * assumed to valid, or as a special case, zero when the dictionary is + * still empty. This special case is needed for single-call decoding to + * avoid writing a '\0' to the end of the destination buffer. + */ +static inline uint32_t dict_get(const struct dictionary *dict, uint32_t dist) +{ + size_t offset = dict->pos - dist - 1; + + if (dist >= dict->pos) + offset += dict->end; + + return dict->full > 0 ? dict->buf[offset] : 0; +} + +/* + * Put one byte into the dictionary. It is assumed that there is space for it. + */ +static inline void dict_put(struct dictionary *dict, uint8_t byte) +{ + dict->buf[dict->pos++] = byte; + + if (dict->full < dict->pos) + dict->full = dict->pos; +} + +/* + * Repeat given number of bytes from the given distance. If the distance is + * invalid, false is returned. On success, true is returned and *len is + * updated to indicate how many bytes were left to be repeated. + */ +static bool dict_repeat(struct dictionary *dict, uint32_t *len, uint32_t dist) +{ + size_t back; + uint32_t left; + + if (dist >= dict->full || dist >= dict->size) + return false; + + left = min_t(size_t, dict->limit - dict->pos, *len); + *len -= left; + + back = dict->pos - dist - 1; + if (dist >= dict->pos) + back += dict->end; + + do { + dict->buf[dict->pos++] = dict->buf[back++]; + if (back == dict->end) + back = 0; + } while (--left > 0); + + if (dict->full < dict->pos) + dict->full = dict->pos; + + return true; +} + +/* Copy uncompressed data as is from input to dictionary and output buffers. */ +static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b, + uint32_t *left) +{ + size_t copy_size; + + while (*left > 0 && b->in_pos < b->in_size + && b->out_pos < b->out_size) { + copy_size = min(b->in_size - b->in_pos, + b->out_size - b->out_pos); + if (copy_size > dict->end - dict->pos) + copy_size = dict->end - dict->pos; + if (copy_size > *left) + copy_size = *left; + + *left -= copy_size; + + memcpy(dict->buf + dict->pos, b->in + b->in_pos, copy_size); + dict->pos += copy_size; + + if (dict->full < dict->pos) + dict->full = dict->pos; + + if (DEC_IS_MULTI(dict->mode)) { + if (dict->pos == dict->end) + dict->pos = 0; + + memcpy(b->out + b->out_pos, b->in + b->in_pos, + copy_size); + } + + dict->start = dict->pos; + + b->out_pos += copy_size; + b->in_pos += copy_size; + } +} + +/* + * Flush pending data from dictionary to b->out. It is assumed that there is + * enough space in b->out. This is guaranteed because caller uses dict_limit() + * before decoding data into the dictionary. + */ +static uint32_t dict_flush(struct dictionary *dict, struct xz_buf *b) +{ + size_t copy_size = dict->pos - dict->start; + + if (DEC_IS_MULTI(dict->mode)) { + if (dict->pos == dict->end) + dict->pos = 0; + + memcpy(b->out + b->out_pos, dict->buf + dict->start, + copy_size); + } + + dict->start = dict->pos; + b->out_pos += copy_size; + return copy_size; +} + +/***************** + * Range decoder * + *****************/ + +/* Reset the range decoder. */ +static void rc_reset(struct rc_dec *rc) +{ + rc->range = (uint32_t)-1; + rc->code = 0; + rc->init_bytes_left = RC_INIT_BYTES; +} + +/* + * Read the first five initial bytes into rc->code if they haven't been + * read already. (Yes, the first byte gets completely ignored.) + */ +static bool rc_read_init(struct rc_dec *rc, struct xz_buf *b) +{ + while (rc->init_bytes_left > 0) { + if (b->in_pos == b->in_size) + return false; + + rc->code = (rc->code << 8) + b->in[b->in_pos++]; + --rc->init_bytes_left; + } + + return true; +} + +/* Return true if there may not be enough input for the next decoding loop. */ +static inline bool rc_limit_exceeded(const struct rc_dec *rc) +{ + return rc->in_pos > rc->in_limit; +} + +/* + * Return true if it is possible (from point of view of range decoder) that + * we have reached the end of the LZMA chunk. + */ +static inline bool rc_is_finished(const struct rc_dec *rc) +{ + return rc->code == 0; +} + +/* Read the next input byte if needed. */ +static inline __always_inline void rc_normalize(struct rc_dec *rc) +{ + if (rc->range < RC_TOP_VALUE) { + rc->range <<= RC_SHIFT_BITS; + rc->code = (rc->code << RC_SHIFT_BITS) + rc->in[rc->in_pos++]; + } +} + +/* + * Decode one bit. In some versions, this function has been splitted in three + * functions so that the compiler is supposed to be able to more easily avoid + * an extra branch. In this particular version of the LZMA decoder, this + * doesn't seem to be a good idea (tested with GCC 3.3.6, 3.4.6, and 4.3.3 + * on x86). Using a non-splitted version results in nicer looking code too. + * + * NOTE: This must return an int. Do not make it return a bool or the speed + * of the code generated by GCC 3.x decreases 10-15 %. (GCC 4.3 doesn't care, + * and it generates 10-20 % faster code than GCC 3.x from this file anyway.) + */ +static inline __always_inline int rc_bit(struct rc_dec *rc, uint16_t *prob) +{ + uint32_t bound; + int bit; + + rc_normalize(rc); + bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob; + if (rc->code < bound) { + rc->range = bound; + *prob += (RC_BIT_MODEL_TOTAL - *prob) >> RC_MOVE_BITS; + bit = 0; + } else { + rc->range -= bound; + rc->code -= bound; + *prob -= *prob >> RC_MOVE_BITS; + bit = 1; + } + + return bit; +} + +/* Decode a bittree starting from the most significant bit. */ +static inline __always_inline uint32_t rc_bittree(struct rc_dec *rc, + uint16_t *probs, uint32_t limit) +{ + uint32_t symbol = 1; + + do { + if (rc_bit(rc, &probs[symbol])) + symbol = (symbol << 1) + 1; + else + symbol <<= 1; + } while (symbol < limit); + + return symbol; +} + +/* Decode a bittree starting from the least significant bit. */ +static inline __always_inline void rc_bittree_reverse(struct rc_dec *rc, + uint16_t *probs, + uint32_t *dest, uint32_t limit) +{ + uint32_t symbol = 1; + uint32_t i = 0; + + do { + if (rc_bit(rc, &probs[symbol])) { + symbol = (symbol << 1) + 1; + *dest += 1 << i; + } else { + symbol <<= 1; + } + } while (++i < limit); +} + +/* Decode direct bits (fixed fifty-fifty probability) */ +static inline void rc_direct(struct rc_dec *rc, uint32_t *dest, uint32_t limit) +{ + uint32_t mask; + + do { + rc_normalize(rc); + rc->range >>= 1; + rc->code -= rc->range; + mask = (uint32_t)0 - (rc->code >> 31); + rc->code += rc->range & mask; + *dest = (*dest << 1) + (mask + 1); + } while (--limit > 0); +} + +/******** + * LZMA * + ********/ + +/* Get pointer to literal coder probability array. */ +static uint16_t *lzma_literal_probs(struct xz_dec_lzma2 *s) +{ + uint32_t prev_byte = dict_get(&s->dict, 0); + uint32_t low = prev_byte >> (8 - s->lzma.lc); + uint32_t high = (s->dict.pos & s->lzma.literal_pos_mask) << s->lzma.lc; + return s->lzma.literal[low + high]; +} + +/* Decode a literal (one 8-bit byte) */ +static void lzma_literal(struct xz_dec_lzma2 *s) +{ + uint16_t *probs; + uint32_t symbol; + uint32_t match_byte; + uint32_t match_bit; + uint32_t offset; + uint32_t i; + + probs = lzma_literal_probs(s); + + if (lzma_state_is_literal(s->lzma.state)) { + symbol = rc_bittree(&s->rc, probs, 0x100); + } else { + symbol = 1; + match_byte = dict_get(&s->dict, s->lzma.rep0) << 1; + offset = 0x100; + + do { + match_bit = match_byte & offset; + match_byte <<= 1; + i = offset + match_bit + symbol; + + if (rc_bit(&s->rc, &probs[i])) { + symbol = (symbol << 1) + 1; + offset &= match_bit; + } else { + symbol <<= 1; + offset &= ~match_bit; + } + } while (symbol < 0x100); + } + + dict_put(&s->dict, (uint8_t)symbol); + lzma_state_literal(&s->lzma.state); +} + +/* Decode the length of the match into s->lzma.len. */ +static void lzma_len(struct xz_dec_lzma2 *s, struct lzma_len_dec *l, + uint32_t pos_state) +{ + uint16_t *probs; + uint32_t limit; + + if (!rc_bit(&s->rc, &l->choice)) { + probs = l->low[pos_state]; + limit = LEN_LOW_SYMBOLS; + s->lzma.len = MATCH_LEN_MIN; + } else { + if (!rc_bit(&s->rc, &l->choice2)) { + probs = l->mid[pos_state]; + limit = LEN_MID_SYMBOLS; + s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS; + } else { + probs = l->high; + limit = LEN_HIGH_SYMBOLS; + s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS + + LEN_MID_SYMBOLS; + } + } + + s->lzma.len += rc_bittree(&s->rc, probs, limit) - limit; +} + +/* Decode a match. The distance will be stored in s->lzma.rep0. */ +static void lzma_match(struct xz_dec_lzma2 *s, uint32_t pos_state) +{ + uint16_t *probs; + uint32_t dist_slot; + uint32_t limit; + + lzma_state_match(&s->lzma.state); + + s->lzma.rep3 = s->lzma.rep2; + s->lzma.rep2 = s->lzma.rep1; + s->lzma.rep1 = s->lzma.rep0; + + lzma_len(s, &s->lzma.match_len_dec, pos_state); + + probs = s->lzma.dist_slot[lzma_get_dist_state(s->lzma.len)]; + dist_slot = rc_bittree(&s->rc, probs, DIST_SLOTS) - DIST_SLOTS; + + if (dist_slot < DIST_MODEL_START) { + s->lzma.rep0 = dist_slot; + } else { + limit = (dist_slot >> 1) - 1; + s->lzma.rep0 = 2 + (dist_slot & 1); + + if (dist_slot < DIST_MODEL_END) { + s->lzma.rep0 <<= limit; + probs = s->lzma.dist_special + s->lzma.rep0 + - dist_slot - 1; + rc_bittree_reverse(&s->rc, probs, + &s->lzma.rep0, limit); + } else { + rc_direct(&s->rc, &s->lzma.rep0, limit - ALIGN_BITS); + s->lzma.rep0 <<= ALIGN_BITS; + rc_bittree_reverse(&s->rc, s->lzma.dist_align, + &s->lzma.rep0, ALIGN_BITS); + } + } +} + +/* + * Decode a repeated match. The distance is one of the four most recently + * seen matches. The distance will be stored in s->lzma.rep0. + */ +static void lzma_rep_match(struct xz_dec_lzma2 *s, uint32_t pos_state) +{ + uint32_t tmp; + + if (!rc_bit(&s->rc, &s->lzma.is_rep0[s->lzma.state])) { + if (!rc_bit(&s->rc, &s->lzma.is_rep0_long[ + s->lzma.state][pos_state])) { + lzma_state_short_rep(&s->lzma.state); + s->lzma.len = 1; + return; + } + } else { + if (!rc_bit(&s->rc, &s->lzma.is_rep1[s->lzma.state])) { + tmp = s->lzma.rep1; + } else { + if (!rc_bit(&s->rc, &s->lzma.is_rep2[s->lzma.state])) { + tmp = s->lzma.rep2; + } else { + tmp = s->lzma.rep3; + s->lzma.rep3 = s->lzma.rep2; + } + + s->lzma.rep2 = s->lzma.rep1; + } + + s->lzma.rep1 = s->lzma.rep0; + s->lzma.rep0 = tmp; + } + + lzma_state_long_rep(&s->lzma.state); + lzma_len(s, &s->lzma.rep_len_dec, pos_state); +} + +/* LZMA decoder core */ +static bool lzma_main(struct xz_dec_lzma2 *s) +{ + uint32_t pos_state; + + /* + * If the dictionary was reached during the previous call, try to + * finish the possibly pending repeat in the dictionary. + */ + if (dict_has_space(&s->dict) && s->lzma.len > 0) + dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0); + + /* + * Decode more LZMA symbols. One iteration may consume up to + * LZMA_IN_REQUIRED - 1 bytes. + */ + while (dict_has_space(&s->dict) && !rc_limit_exceeded(&s->rc)) { + pos_state = s->dict.pos & s->lzma.pos_mask; + + if (!rc_bit(&s->rc, &s->lzma.is_match[ + s->lzma.state][pos_state])) { + lzma_literal(s); + } else { + if (rc_bit(&s->rc, &s->lzma.is_rep[s->lzma.state])) + lzma_rep_match(s, pos_state); + else + lzma_match(s, pos_state); + + if (!dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0)) + return false; + } + } + + /* + * Having the range decoder always normalized when we are outside + * this function makes it easier to correctly handle end of the chunk. + */ + rc_normalize(&s->rc); + + return true; +} + +/* + * Reset the LZMA decoder and range decoder state. Dictionary is nore reset + * here, because LZMA state may be reset without resetting the dictionary. + */ +static void lzma_reset(struct xz_dec_lzma2 *s) +{ + uint16_t *probs; + size_t i; + + s->lzma.state = STATE_LIT_LIT; + s->lzma.rep0 = 0; + s->lzma.rep1 = 0; + s->lzma.rep2 = 0; + s->lzma.rep3 = 0; + + /* + * All probabilities are initialized to the same value. This hack + * makes the code smaller by avoiding a separate loop for each + * probability array. + * + * This could be optimized so that only that part of literal + * probabilities that are actually required. In the common case + * we would write 12 KiB less. + */ + probs = s->lzma.is_match[0]; + for (i = 0; i < PROBS_TOTAL; ++i) + probs[i] = RC_BIT_MODEL_TOTAL / 2; + + rc_reset(&s->rc); +} + +/* + * Decode and validate LZMA properties (lc/lp/pb) and calculate the bit masks + * from the decoded lp and pb values. On success, the LZMA decoder state is + * reset and true is returned. + */ +static bool lzma_props(struct xz_dec_lzma2 *s, uint8_t props) +{ + if (props > (4 * 5 + 4) * 9 + 8) + return false; + + s->lzma.pos_mask = 0; + while (props >= 9 * 5) { + props -= 9 * 5; + ++s->lzma.pos_mask; + } + + s->lzma.pos_mask = (1 << s->lzma.pos_mask) - 1; + + s->lzma.literal_pos_mask = 0; + while (props >= 9) { + props -= 9; + ++s->lzma.literal_pos_mask; + } + + s->lzma.lc = props; + + if (s->lzma.lc + s->lzma.literal_pos_mask > 4) + return false; + + s->lzma.literal_pos_mask = (1 << s->lzma.literal_pos_mask) - 1; + + lzma_reset(s); + + return true; +} + +/********* + * LZMA2 * + *********/ + +/* + * The LZMA decoder assumes that if the input limit (s->rc.in_limit) hasn't + * been exceeded, it is safe to read up to LZMA_IN_REQUIRED bytes. This + * wrapper function takes care of making the LZMA decoder's assumption safe. + * + * As long as there is plenty of input left to be decoded in the current LZMA + * chunk, we decode directly from the caller-supplied input buffer until + * there's LZMA_IN_REQUIRED bytes left. Those remaining bytes are copied into + * s->temp.buf, which (hopefully) gets filled on the next call to this + * function. We decode a few bytes from the temporary buffer so that we can + * continue decoding from the caller-supplied input buffer again. + */ +static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b) +{ + size_t in_avail; + uint32_t tmp; + + in_avail = b->in_size - b->in_pos; + if (s->temp.size > 0 || s->lzma2.compressed == 0) { + tmp = 2 * LZMA_IN_REQUIRED - s->temp.size; + if (tmp > s->lzma2.compressed - s->temp.size) + tmp = s->lzma2.compressed - s->temp.size; + if (tmp > in_avail) + tmp = in_avail; + + memcpy(s->temp.buf + s->temp.size, b->in + b->in_pos, tmp); + + if (s->temp.size + tmp == s->lzma2.compressed) { + memzero(s->temp.buf + s->temp.size + tmp, + sizeof(s->temp.buf) + - s->temp.size - tmp); + s->rc.in_limit = s->temp.size + tmp; + } else if (s->temp.size + tmp < LZMA_IN_REQUIRED) { + s->temp.size += tmp; + b->in_pos += tmp; + return true; + } else { + s->rc.in_limit = s->temp.size + tmp - LZMA_IN_REQUIRED; + } + + s->rc.in = s->temp.buf; + s->rc.in_pos = 0; + + if (!lzma_main(s) || s->rc.in_pos > s->temp.size + tmp) + return false; + + s->lzma2.compressed -= s->rc.in_pos; + + if (s->rc.in_pos < s->temp.size) { + s->temp.size -= s->rc.in_pos; + memmove(s->temp.buf, s->temp.buf + s->rc.in_pos, + s->temp.size); + return true; + } + + b->in_pos += s->rc.in_pos - s->temp.size; + s->temp.size = 0; + } + + in_avail = b->in_size - b->in_pos; + if (in_avail >= LZMA_IN_REQUIRED) { + s->rc.in = b->in; + s->rc.in_pos = b->in_pos; + + if (in_avail >= s->lzma2.compressed + LZMA_IN_REQUIRED) + s->rc.in_limit = b->in_pos + s->lzma2.compressed; + else + s->rc.in_limit = b->in_size - LZMA_IN_REQUIRED; + + if (!lzma_main(s)) + return false; + + in_avail = s->rc.in_pos - b->in_pos; + if (in_avail > s->lzma2.compressed) + return false; + + s->lzma2.compressed -= in_avail; + b->in_pos = s->rc.in_pos; + } + + in_avail = b->in_size - b->in_pos; + if (in_avail < LZMA_IN_REQUIRED) { + if (in_avail > s->lzma2.compressed) + in_avail = s->lzma2.compressed; + + memcpy(s->temp.buf, b->in + b->in_pos, in_avail); + s->temp.size = in_avail; + b->in_pos += in_avail; + } + + return true; +} + +/* + * Take care of the LZMA2 control layer, and forward the job of actual LZMA + * decoding or copying of uncompressed chunks to other functions. + */ +XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, + struct xz_buf *b) +{ + uint32_t tmp; + + while (b->in_pos < b->in_size || s->lzma2.sequence == SEQ_LZMA_RUN) { + switch (s->lzma2.sequence) { + case SEQ_CONTROL: + /* + * LZMA2 control byte + * + * Exact values: + * 0x00 End marker + * 0x01 Dictionary reset followed by + * an uncompressed chunk + * 0x02 Uncompressed chunk (no dictionary reset) + * + * Highest three bits (s->control & 0xE0): + * 0xE0 Dictionary reset, new properties and state + * reset, followed by LZMA compressed chunk + * 0xC0 New properties and state reset, followed + * by LZMA compressed chunk (no dictionary + * reset) + * 0xA0 State reset using old properties, + * followed by LZMA compressed chunk (no + * dictionary reset) + * 0x80 LZMA chunk (no dictionary or state reset) + * + * For LZMA compressed chunks, the lowest five bits + * (s->control & 1F) are the highest bits of the + * uncompressed size (bits 16-20). + * + * A new LZMA2 stream must begin with a dictionary + * reset. The first LZMA chunk must set new + * properties and reset the LZMA state. + * + * Values that don't match anything described above + * are invalid and we return XZ_DATA_ERROR. + */ + tmp = b->in[b->in_pos++]; + + if (tmp == 0x00) + return XZ_STREAM_END; + + if (tmp >= 0xE0 || tmp == 0x01) { + s->lzma2.need_props = true; + s->lzma2.need_dict_reset = false; + dict_reset(&s->dict, b); + } else if (s->lzma2.need_dict_reset) { + return XZ_DATA_ERROR; + } + + if (tmp >= 0x80) { + s->lzma2.uncompressed = (tmp & 0x1F) << 16; + s->lzma2.sequence = SEQ_UNCOMPRESSED_1; + + if (tmp >= 0xC0) { + /* + * When there are new properties, + * state reset is done at + * SEQ_PROPERTIES. + */ + s->lzma2.need_props = false; + s->lzma2.next_sequence + = SEQ_PROPERTIES; + + } else if (s->lzma2.need_props) { + return XZ_DATA_ERROR; + + } else { + s->lzma2.next_sequence + = SEQ_LZMA_PREPARE; + if (tmp >= 0xA0) + lzma_reset(s); + } + } else { + if (tmp > 0x02) + return XZ_DATA_ERROR; + + s->lzma2.sequence = SEQ_COMPRESSED_0; + s->lzma2.next_sequence = SEQ_COPY; + } + + break; + + case SEQ_UNCOMPRESSED_1: + s->lzma2.uncompressed + += (uint32_t)b->in[b->in_pos++] << 8; + s->lzma2.sequence = SEQ_UNCOMPRESSED_2; + break; + + case SEQ_UNCOMPRESSED_2: + s->lzma2.uncompressed + += (uint32_t)b->in[b->in_pos++] + 1; + s->lzma2.sequence = SEQ_COMPRESSED_0; + break; + + case SEQ_COMPRESSED_0: + s->lzma2.compressed + = (uint32_t)b->in[b->in_pos++] << 8; + s->lzma2.sequence = SEQ_COMPRESSED_1; + break; + + case SEQ_COMPRESSED_1: + s->lzma2.compressed + += (uint32_t)b->in[b->in_pos++] + 1; + s->lzma2.sequence = s->lzma2.next_sequence; + break; + + case SEQ_PROPERTIES: + if (!lzma_props(s, b->in[b->in_pos++])) + return XZ_DATA_ERROR; + + s->lzma2.sequence = SEQ_LZMA_PREPARE; + + /* fallthrough */ + case SEQ_LZMA_PREPARE: + if (s->lzma2.compressed < RC_INIT_BYTES) + return XZ_DATA_ERROR; + + if (!rc_read_init(&s->rc, b)) + return XZ_OK; + + s->lzma2.compressed -= RC_INIT_BYTES; + s->lzma2.sequence = SEQ_LZMA_RUN; + + /* fallthrough */ + case SEQ_LZMA_RUN: + /* + * Set dictionary limit to indicate how much we want + * to be encoded at maximum. Decode new data into the + * dictionary. Flush the new data from dictionary to + * b->out. Check if we finished decoding this chunk. + * In case the dictionary got full but we didn't fill + * the output buffer yet, we may run this loop + * multiple times without changing s->lzma2.sequence. + */ + dict_limit(&s->dict, min_t(size_t, + b->out_size - b->out_pos, + s->lzma2.uncompressed)); + if (!lzma2_lzma(s, b)) + return XZ_DATA_ERROR; + + s->lzma2.uncompressed -= dict_flush(&s->dict, b); + + if (s->lzma2.uncompressed == 0) { + if (s->lzma2.compressed > 0 || s->lzma.len > 0 + || !rc_is_finished(&s->rc)) + return XZ_DATA_ERROR; + + rc_reset(&s->rc); + s->lzma2.sequence = SEQ_CONTROL; + + } else if (b->out_pos == b->out_size + || (b->in_pos == b->in_size + && s->temp.size + < s->lzma2.compressed)) { + return XZ_OK; + } + + break; + + case SEQ_COPY: + dict_uncompressed(&s->dict, b, &s->lzma2.compressed); + if (s->lzma2.compressed > 0) + return XZ_OK; + + s->lzma2.sequence = SEQ_CONTROL; + break; + } + } + + return XZ_OK; +} + +XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, + uint32_t dict_max) +{ + struct xz_dec_lzma2 *s = kmalloc(sizeof(*s), GFP_KERNEL); + if (s == NULL) + return NULL; + + s->dict.mode = mode; + s->dict.size_max = dict_max; + + if (DEC_IS_PREALLOC(mode)) { + s->dict.buf = vmalloc(dict_max); + if (s->dict.buf == NULL) { + kfree(s); + return NULL; + } + } else if (DEC_IS_DYNALLOC(mode)) { + s->dict.buf = NULL; + s->dict.allocated = 0; + } + + return s; +} + +XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props) +{ + /* This limits dictionary size to 3 GiB to keep parsing simpler. */ + if (props > 39) + return XZ_OPTIONS_ERROR; + + s->dict.size = 2 + (props & 1); + s->dict.size <<= (props >> 1) + 11; + + if (DEC_IS_MULTI(s->dict.mode)) { + if (s->dict.size > s->dict.size_max) + return XZ_MEMLIMIT_ERROR; + + s->dict.end = s->dict.size; + + if (DEC_IS_DYNALLOC(s->dict.mode)) { + if (s->dict.allocated < s->dict.size) { + vfree(s->dict.buf); + s->dict.buf = vmalloc(s->dict.size); + if (s->dict.buf == NULL) { + s->dict.allocated = 0; + return XZ_MEM_ERROR; + } + } + } + } + + s->lzma.len = 0; + + s->lzma2.sequence = SEQ_CONTROL; + s->lzma2.need_dict_reset = true; + + s->temp.size = 0; + + return XZ_OK; +} + +XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s) +{ + if (DEC_IS_MULTI(s->dict.mode)) + vfree(s->dict.buf); + + kfree(s); +} diff --git a/src/common/unix/xzminidec/src/xz_dec_stream.c b/src/common/unix/xzminidec/src/xz_dec_stream.c new file mode 100644 index 0000000..6ba94eb --- /dev/null +++ b/src/common/unix/xzminidec/src/xz_dec_stream.c @@ -0,0 +1,829 @@ +/* + * .xz Stream decoder + * + * Author: Lasse Collin + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#include "xz_private.h" +#include "xz_stream.h" + +/* Hash used to validate the Index field */ +struct xz_dec_hash { + vli_type unpadded; + vli_type uncompressed; + uint32_t crc32; +}; + +struct xz_dec { + /* Position in dec_main() */ + enum { + SEQ_STREAM_HEADER, + SEQ_BLOCK_START, + SEQ_BLOCK_HEADER, + SEQ_BLOCK_UNCOMPRESS, + SEQ_BLOCK_PADDING, + SEQ_BLOCK_CHECK, + SEQ_INDEX, + SEQ_INDEX_PADDING, + SEQ_INDEX_CRC32, + SEQ_STREAM_FOOTER + } sequence; + + /* Position in variable-length integers and Check fields */ + uint32_t pos; + + /* Variable-length integer decoded by dec_vli() */ + vli_type vli; + + /* Saved in_pos and out_pos */ + size_t in_start; + size_t out_start; + + /* CRC32 value in Block or Index */ + uint32_t crc32; + + /* Type of the integrity check calculated from uncompressed data */ + enum xz_check check_type; + + /* Operation mode */ + enum xz_mode mode; + + /* + * True if the next call to xz_dec_run() is allowed to return + * XZ_BUF_ERROR. + */ + bool allow_buf_error; + + /* Information stored in Block Header */ + struct { + /* + * Value stored in the Compressed Size field, or + * VLI_UNKNOWN if Compressed Size is not present. + */ + vli_type compressed; + + /* + * Value stored in the Uncompressed Size field, or + * VLI_UNKNOWN if Uncompressed Size is not present. + */ + vli_type uncompressed; + + /* Size of the Block Header field */ + uint32_t size; + } block_header; + + /* Information collected when decoding Blocks */ + struct { + /* Observed compressed size of the current Block */ + vli_type compressed; + + /* Observed uncompressed size of the current Block */ + vli_type uncompressed; + + /* Number of Blocks decoded so far */ + vli_type count; + + /* + * Hash calculated from the Block sizes. This is used to + * validate the Index field. + */ + struct xz_dec_hash hash; + } block; + + /* Variables needed when verifying the Index field */ + struct { + /* Position in dec_index() */ + enum { + SEQ_INDEX_COUNT, + SEQ_INDEX_UNPADDED, + SEQ_INDEX_UNCOMPRESSED + } sequence; + + /* Size of the Index in bytes */ + vli_type size; + + /* Number of Records (matches block.count in valid files) */ + vli_type count; + + /* + * Hash calculated from the Records (matches block.hash in + * valid files). + */ + struct xz_dec_hash hash; + } index; + + /* + * Temporary buffer needed to hold Stream Header, Block Header, + * and Stream Footer. The Block Header is the biggest (1 KiB) + * so we reserve space according to that. buf[] has to be aligned + * to a multiple of four bytes; the size_t variables before it + * should guarantee this. + */ + struct { + size_t pos; + size_t size; + uint8_t buf[1024]; + } temp; + + struct xz_dec_lzma2 *lzma2; + +#ifdef XZ_DEC_BCJ + struct xz_dec_bcj *bcj; + bool bcj_active; +#endif +}; + +#ifdef XZ_DEC_ANY_CHECK +/* Sizes of the Check field with different Check IDs */ +static const uint8_t check_sizes[16] = { + 0, + 4, 4, 4, + 8, 8, 8, + 16, 16, 16, + 32, 32, 32, + 64, 64, 64 +}; +#endif + +/* + * Fill s->temp by copying data starting from b->in[b->in_pos]. Caller + * must have set s->temp.pos to indicate how much data we are supposed + * to copy into s->temp.buf. Return true once s->temp.pos has reached + * s->temp.size. + */ +static bool fill_temp(struct xz_dec *s, struct xz_buf *b) +{ + size_t copy_size = min_t(size_t, + b->in_size - b->in_pos, s->temp.size - s->temp.pos); + + memcpy(s->temp.buf + s->temp.pos, b->in + b->in_pos, copy_size); + b->in_pos += copy_size; + s->temp.pos += copy_size; + + if (s->temp.pos == s->temp.size) { + s->temp.pos = 0; + return true; + } + + return false; +} + +/* Decode a variable-length integer (little-endian base-128 encoding) */ +static enum xz_ret dec_vli(struct xz_dec *s, const uint8_t *in, + size_t *in_pos, size_t in_size) +{ + uint8_t byte; + + if (s->pos == 0) + s->vli = 0; + + while (*in_pos < in_size) { + byte = in[*in_pos]; + ++*in_pos; + + s->vli |= (vli_type)(byte & 0x7F) << s->pos; + + if ((byte & 0x80) == 0) { + /* Don't allow non-minimal encodings. */ + if (byte == 0 && s->pos != 0) + return XZ_DATA_ERROR; + + s->pos = 0; + return XZ_STREAM_END; + } + + s->pos += 7; + if (s->pos == 7 * VLI_BYTES_MAX) + return XZ_DATA_ERROR; + } + + return XZ_OK; +} + +/* + * Decode the Compressed Data field from a Block. Update and validate + * the observed compressed and uncompressed sizes of the Block so that + * they don't exceed the values possibly stored in the Block Header + * (validation assumes that no integer overflow occurs, since vli_type + * is normally uint64_t). Update the CRC32 if presence of the CRC32 + * field was indicated in Stream Header. + * + * Once the decoding is finished, validate that the observed sizes match + * the sizes possibly stored in the Block Header. Update the hash and + * Block count, which are later used to validate the Index field. + */ +static enum xz_ret dec_block(struct xz_dec *s, struct xz_buf *b) +{ + enum xz_ret ret; + + s->in_start = b->in_pos; + s->out_start = b->out_pos; + +#ifdef XZ_DEC_BCJ + if (s->bcj_active) + ret = xz_dec_bcj_run(s->bcj, s->lzma2, b); + else +#endif + ret = xz_dec_lzma2_run(s->lzma2, b); + + s->block.compressed += b->in_pos - s->in_start; + s->block.uncompressed += b->out_pos - s->out_start; + + /* + * There is no need to separately check for VLI_UNKNOWN, since + * the observed sizes are always smaller than VLI_UNKNOWN. + */ + if (s->block.compressed > s->block_header.compressed + || s->block.uncompressed + > s->block_header.uncompressed) + return XZ_DATA_ERROR; + + if (s->check_type == XZ_CHECK_CRC32) + s->crc32 = xz_crc32(b->out + s->out_start, + b->out_pos - s->out_start, s->crc32); + + if (ret == XZ_STREAM_END) { + if (s->block_header.compressed != VLI_UNKNOWN + && s->block_header.compressed + != s->block.compressed) + return XZ_DATA_ERROR; + + if (s->block_header.uncompressed != VLI_UNKNOWN + && s->block_header.uncompressed + != s->block.uncompressed) + return XZ_DATA_ERROR; + + s->block.hash.unpadded += s->block_header.size + + s->block.compressed; + +#ifdef XZ_DEC_ANY_CHECK + s->block.hash.unpadded += check_sizes[s->check_type]; +#else + if (s->check_type == XZ_CHECK_CRC32) + s->block.hash.unpadded += 4; +#endif + + s->block.hash.uncompressed += s->block.uncompressed; + s->block.hash.crc32 = xz_crc32( + (const uint8_t *)&s->block.hash, + sizeof(s->block.hash), s->block.hash.crc32); + + ++s->block.count; + } + + return ret; +} + +/* Update the Index size and the CRC32 value. */ +static void index_update(struct xz_dec *s, const struct xz_buf *b) +{ + size_t in_used = b->in_pos - s->in_start; + s->index.size += in_used; + s->crc32 = xz_crc32(b->in + s->in_start, in_used, s->crc32); +} + +/* + * Decode the Number of Records, Unpadded Size, and Uncompressed Size + * fields from the Index field. That is, Index Padding and CRC32 are not + * decoded by this function. + * + * This can return XZ_OK (more input needed), XZ_STREAM_END (everything + * successfully decoded), or XZ_DATA_ERROR (input is corrupt). + */ +static enum xz_ret dec_index(struct xz_dec *s, struct xz_buf *b) +{ + enum xz_ret ret; + + do { + ret = dec_vli(s, b->in, &b->in_pos, b->in_size); + if (ret != XZ_STREAM_END) { + index_update(s, b); + return ret; + } + + switch (s->index.sequence) { + case SEQ_INDEX_COUNT: + s->index.count = s->vli; + + /* + * Validate that the Number of Records field + * indicates the same number of Records as + * there were Blocks in the Stream. + */ + if (s->index.count != s->block.count) + return XZ_DATA_ERROR; + + s->index.sequence = SEQ_INDEX_UNPADDED; + break; + + case SEQ_INDEX_UNPADDED: + s->index.hash.unpadded += s->vli; + s->index.sequence = SEQ_INDEX_UNCOMPRESSED; + break; + + case SEQ_INDEX_UNCOMPRESSED: + s->index.hash.uncompressed += s->vli; + s->index.hash.crc32 = xz_crc32( + (const uint8_t *)&s->index.hash, + sizeof(s->index.hash), + s->index.hash.crc32); + --s->index.count; + s->index.sequence = SEQ_INDEX_UNPADDED; + break; + } + } while (s->index.count > 0); + + return XZ_STREAM_END; +} + +/* + * Validate that the next four input bytes match the value of s->crc32. + * s->pos must be zero when starting to validate the first byte. + */ +static enum xz_ret crc32_validate(struct xz_dec *s, struct xz_buf *b) +{ + do { + if (b->in_pos == b->in_size) + return XZ_OK; + + if (((s->crc32 >> s->pos) & 0xFF) != b->in[b->in_pos++]) + return XZ_DATA_ERROR; + + s->pos += 8; + + } while (s->pos < 32); + + s->crc32 = 0; + s->pos = 0; + + return XZ_STREAM_END; +} + +#ifdef XZ_DEC_ANY_CHECK +/* + * Skip over the Check field when the Check ID is not supported. + * Returns true once the whole Check field has been skipped over. + */ +static bool check_skip(struct xz_dec *s, struct xz_buf *b) +{ + while (s->pos < check_sizes[s->check_type]) { + if (b->in_pos == b->in_size) + return false; + + ++b->in_pos; + ++s->pos; + } + + s->pos = 0; + + return true; +} +#endif + +/* Decode the Stream Header field (the first 12 bytes of the .xz Stream). */ +static enum xz_ret dec_stream_header(struct xz_dec *s) +{ + if (!memeq(s->temp.buf, HEADER_MAGIC, HEADER_MAGIC_SIZE)) + return XZ_FORMAT_ERROR; + + if (xz_crc32(s->temp.buf + HEADER_MAGIC_SIZE, 2, 0) + != get_le32(s->temp.buf + HEADER_MAGIC_SIZE + 2)) + return XZ_DATA_ERROR; + + if (s->temp.buf[HEADER_MAGIC_SIZE] != 0) + return XZ_OPTIONS_ERROR; + + /* + * Of integrity checks, we support only none (Check ID = 0) and + * CRC32 (Check ID = 1). However, if XZ_DEC_ANY_CHECK is defined, + * we will accept other check types too, but then the check won't + * be verified and a warning (XZ_UNSUPPORTED_CHECK) will be given. + */ + s->check_type = s->temp.buf[HEADER_MAGIC_SIZE + 1]; + +#ifdef XZ_DEC_ANY_CHECK + if (s->check_type > XZ_CHECK_MAX) + return XZ_OPTIONS_ERROR; + + if (s->check_type > XZ_CHECK_CRC32) + return XZ_UNSUPPORTED_CHECK; +#else + if (s->check_type > XZ_CHECK_CRC32) + return XZ_OPTIONS_ERROR; +#endif + + return XZ_OK; +} + +/* Decode the Stream Footer field (the last 12 bytes of the .xz Stream) */ +static enum xz_ret dec_stream_footer(struct xz_dec *s) +{ + if (!memeq(s->temp.buf + 10, FOOTER_MAGIC, FOOTER_MAGIC_SIZE)) + return XZ_DATA_ERROR; + + if (xz_crc32(s->temp.buf + 4, 6, 0) != get_le32(s->temp.buf)) + return XZ_DATA_ERROR; + + /* + * Validate Backward Size. Note that we never added the size of the + * Index CRC32 field to s->index.size, thus we use s->index.size / 4 + * instead of s->index.size / 4 - 1. + */ + if ((s->index.size >> 2) != get_le32(s->temp.buf + 4)) + return XZ_DATA_ERROR; + + if (s->temp.buf[8] != 0 || s->temp.buf[9] != s->check_type) + return XZ_DATA_ERROR; + + /* + * Use XZ_STREAM_END instead of XZ_OK to be more convenient + * for the caller. + */ + return XZ_STREAM_END; +} + +/* Decode the Block Header and initialize the filter chain. */ +static enum xz_ret dec_block_header(struct xz_dec *s) +{ + enum xz_ret ret; + + /* + * Validate the CRC32. We know that the temp buffer is at least + * eight bytes so this is safe. + */ + s->temp.size -= 4; + if (xz_crc32(s->temp.buf, s->temp.size, 0) + != get_le32(s->temp.buf + s->temp.size)) + return XZ_DATA_ERROR; + + s->temp.pos = 2; + + /* + * Catch unsupported Block Flags. We support only one or two filters + * in the chain, so we catch that with the same test. + */ +#ifdef XZ_DEC_BCJ + if (s->temp.buf[1] & 0x3E) +#else + if (s->temp.buf[1] & 0x3F) +#endif + return XZ_OPTIONS_ERROR; + + /* Compressed Size */ + if (s->temp.buf[1] & 0x40) { + if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) + != XZ_STREAM_END) + return XZ_DATA_ERROR; + + s->block_header.compressed = s->vli; + } else { + s->block_header.compressed = VLI_UNKNOWN; + } + + /* Uncompressed Size */ + if (s->temp.buf[1] & 0x80) { + if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) + != XZ_STREAM_END) + return XZ_DATA_ERROR; + + s->block_header.uncompressed = s->vli; + } else { + s->block_header.uncompressed = VLI_UNKNOWN; + } + +#ifdef XZ_DEC_BCJ + /* If there are two filters, the first one must be a BCJ filter. */ + s->bcj_active = s->temp.buf[1] & 0x01; + if (s->bcj_active) { + if (s->temp.size - s->temp.pos < 2) + return XZ_OPTIONS_ERROR; + + ret = xz_dec_bcj_reset(s->bcj, s->temp.buf[s->temp.pos++]); + if (ret != XZ_OK) + return ret; + + /* + * We don't support custom start offset, + * so Size of Properties must be zero. + */ + if (s->temp.buf[s->temp.pos++] != 0x00) + return XZ_OPTIONS_ERROR; + } +#endif + + /* Valid Filter Flags always take at least two bytes. */ + if (s->temp.size - s->temp.pos < 2) + return XZ_DATA_ERROR; + + /* Filter ID = LZMA2 */ + if (s->temp.buf[s->temp.pos++] != 0x21) + return XZ_OPTIONS_ERROR; + + /* Size of Properties = 1-byte Filter Properties */ + if (s->temp.buf[s->temp.pos++] != 0x01) + return XZ_OPTIONS_ERROR; + + /* Filter Properties contains LZMA2 dictionary size. */ + if (s->temp.size - s->temp.pos < 1) + return XZ_DATA_ERROR; + + ret = xz_dec_lzma2_reset(s->lzma2, s->temp.buf[s->temp.pos++]); + if (ret != XZ_OK) + return ret; + + /* The rest must be Header Padding. */ + while (s->temp.pos < s->temp.size) + if (s->temp.buf[s->temp.pos++] != 0x00) + return XZ_OPTIONS_ERROR; + + s->temp.pos = 0; + s->block.compressed = 0; + s->block.uncompressed = 0; + + return XZ_OK; +} + +static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) +{ + enum xz_ret ret; + + /* + * Store the start position for the case when we are in the middle + * of the Index field. + */ + s->in_start = b->in_pos; + + while (true) { + switch (s->sequence) { + case SEQ_STREAM_HEADER: + /* + * Stream Header is copied to s->temp, and then + * decoded from there. This way if the caller + * gives us only little input at a time, we can + * still keep the Stream Header decoding code + * simple. Similar approach is used in many places + * in this file. + */ + if (!fill_temp(s, b)) + return XZ_OK; + + /* + * If dec_stream_header() returns + * XZ_UNSUPPORTED_CHECK, it is still possible + * to continue decoding if working in multi-call + * mode. Thus, update s->sequence before calling + * dec_stream_header(). + */ + s->sequence = SEQ_BLOCK_START; + + ret = dec_stream_header(s); + if (ret != XZ_OK) + return ret; + + /* fallthrough */ + case SEQ_BLOCK_START: + /* We need one byte of input to continue. */ + if (b->in_pos == b->in_size) + return XZ_OK; + + /* See if this is the beginning of the Index field. */ + if (b->in[b->in_pos] == 0) { + s->in_start = b->in_pos++; + s->sequence = SEQ_INDEX; + break; + } + + /* + * Calculate the size of the Block Header and + * prepare to decode it. + */ + s->block_header.size + = ((uint32_t)b->in[b->in_pos] + 1) * 4; + + s->temp.size = s->block_header.size; + s->temp.pos = 0; + s->sequence = SEQ_BLOCK_HEADER; + + /* fallthrough */ + case SEQ_BLOCK_HEADER: + if (!fill_temp(s, b)) + return XZ_OK; + + ret = dec_block_header(s); + if (ret != XZ_OK) + return ret; + + s->sequence = SEQ_BLOCK_UNCOMPRESS; + + /* fallthrough */ + case SEQ_BLOCK_UNCOMPRESS: + ret = dec_block(s, b); + if (ret != XZ_STREAM_END) + return ret; + + s->sequence = SEQ_BLOCK_PADDING; + + /* fallthrough */ + case SEQ_BLOCK_PADDING: + /* + * Size of Compressed Data + Block Padding + * must be a multiple of four. We don't need + * s->block.compressed for anything else + * anymore, so we use it here to test the size + * of the Block Padding field. + */ + while (s->block.compressed & 3) { + if (b->in_pos == b->in_size) + return XZ_OK; + + if (b->in[b->in_pos++] != 0) + return XZ_DATA_ERROR; + + ++s->block.compressed; + } + + s->sequence = SEQ_BLOCK_CHECK; + + /* fallthrough */ + case SEQ_BLOCK_CHECK: + if (s->check_type == XZ_CHECK_CRC32) { + ret = crc32_validate(s, b); + if (ret != XZ_STREAM_END) + return ret; + } +#ifdef XZ_DEC_ANY_CHECK + else if (!check_skip(s, b)) { + return XZ_OK; + } +#endif + + s->sequence = SEQ_BLOCK_START; + break; + + case SEQ_INDEX: + ret = dec_index(s, b); + if (ret != XZ_STREAM_END) + return ret; + + s->sequence = SEQ_INDEX_PADDING; + + /* fallthrough */ + case SEQ_INDEX_PADDING: + while ((s->index.size + (b->in_pos - s->in_start)) + & 3) { + if (b->in_pos == b->in_size) { + index_update(s, b); + return XZ_OK; + } + + if (b->in[b->in_pos++] != 0) + return XZ_DATA_ERROR; + } + + /* Finish the CRC32 value and Index size. */ + index_update(s, b); + + /* Compare the hashes to validate the Index field. */ + if (!memeq(&s->block.hash, &s->index.hash, + sizeof(s->block.hash))) + return XZ_DATA_ERROR; + + s->sequence = SEQ_INDEX_CRC32; + + /* fallthrough */ + case SEQ_INDEX_CRC32: + ret = crc32_validate(s, b); + if (ret != XZ_STREAM_END) + return ret; + + s->temp.size = STREAM_HEADER_SIZE; + s->sequence = SEQ_STREAM_FOOTER; + + /* fallthrough */ + case SEQ_STREAM_FOOTER: + if (!fill_temp(s, b)) + return XZ_OK; + + return dec_stream_footer(s); + } + } + + /* Never reached */ +} + +/* + * xz_dec_run() is a wrapper for dec_main() to handle some special cases in + * multi-call and single-call decoding. + * + * In multi-call mode, we must return XZ_BUF_ERROR when it seems clear that we + * are not going to make any progress anymore. This is to prevent the caller + * from calling us infinitely when the input file is truncated or otherwise + * corrupt. Since zlib-style API allows that the caller fills the input buffer + * only when the decoder doesn't produce any new output, we have to be careful + * to avoid returning XZ_BUF_ERROR too easily: XZ_BUF_ERROR is returned only + * after the second consecutive call to xz_dec_run() that makes no progress. + * + * In single-call mode, if we couldn't decode everything and no error + * occurred, either the input is truncated or the output buffer is too small. + * Since we know that the last input byte never produces any output, we know + * that if all the input was consumed and decoding wasn't finished, the file + * must be corrupt. Otherwise the output buffer has to be too small or the + * file is corrupt in a way that decoding it produces too big output. + * + * If single-call decoding fails, we reset b->in_pos and b->out_pos back to + * their original values. This is because with some filter chains there won't + * be any valid uncompressed data in the output buffer unless the decoding + * actually succeeds (that's the price to pay of using the output buffer as + * the workspace). + */ +XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b) +{ + size_t in_start; + size_t out_start; + enum xz_ret ret; + + if (DEC_IS_SINGLE(s->mode)) + xz_dec_reset(s); + + in_start = b->in_pos; + out_start = b->out_pos; + ret = dec_main(s, b); + + if (DEC_IS_SINGLE(s->mode)) { + if (ret == XZ_OK) + ret = b->in_pos == b->in_size + ? XZ_DATA_ERROR : XZ_BUF_ERROR; + + if (ret != XZ_STREAM_END) { + b->in_pos = in_start; + b->out_pos = out_start; + } + + } else if (ret == XZ_OK && in_start == b->in_pos + && out_start == b->out_pos) { + if (s->allow_buf_error) + ret = XZ_BUF_ERROR; + + s->allow_buf_error = true; + } else { + s->allow_buf_error = false; + } + + return ret; +} + +XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max) +{ + struct xz_dec *s = kmalloc(sizeof(*s), GFP_KERNEL); + if (s == NULL) + return NULL; + + s->mode = mode; + +#ifdef XZ_DEC_BCJ + s->bcj = xz_dec_bcj_create(DEC_IS_SINGLE(mode)); + if (s->bcj == NULL) + goto error_bcj; +#endif + + s->lzma2 = xz_dec_lzma2_create(mode, dict_max); + if (s->lzma2 == NULL) + goto error_lzma2; + + xz_dec_reset(s); + return s; + +error_lzma2: +#ifdef XZ_DEC_BCJ + xz_dec_bcj_end(s->bcj); +error_bcj: +#endif + kfree(s); + return NULL; +} + +XZ_EXTERN void xz_dec_reset(struct xz_dec *s) +{ + s->sequence = SEQ_STREAM_HEADER; + s->allow_buf_error = false; + s->pos = 0; + s->crc32 = 0; + memzero(&s->block, sizeof(s->block)); + memzero(&s->index, sizeof(s->index)); + s->temp.pos = 0; + s->temp.size = STREAM_HEADER_SIZE; +} + +XZ_EXTERN void xz_dec_end(struct xz_dec *s) +{ + if (s != NULL) { + xz_dec_lzma2_end(s->lzma2); +#ifdef XZ_DEC_BCJ + xz_dec_bcj_end(s->bcj); +#endif + kfree(s); + } +} diff --git a/src/common/unix/xzminidec/src/xz_lzma2.h b/src/common/unix/xzminidec/src/xz_lzma2.h new file mode 100644 index 0000000..92d852d --- /dev/null +++ b/src/common/unix/xzminidec/src/xz_lzma2.h @@ -0,0 +1,204 @@ +/* + * LZMA2 definitions + * + * Authors: Lasse Collin + * Igor Pavlov + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#ifndef XZ_LZMA2_H +#define XZ_LZMA2_H + +/* Range coder constants */ +#define RC_SHIFT_BITS 8 +#define RC_TOP_BITS 24 +#define RC_TOP_VALUE (1 << RC_TOP_BITS) +#define RC_BIT_MODEL_TOTAL_BITS 11 +#define RC_BIT_MODEL_TOTAL (1 << RC_BIT_MODEL_TOTAL_BITS) +#define RC_MOVE_BITS 5 + +/* + * Maximum number of position states. A position state is the lowest pb + * number of bits of the current uncompressed offset. In some places there + * are different sets of probabilities for different position states. + */ +#define POS_STATES_MAX (1 << 4) + +/* + * This enum is used to track which LZMA symbols have occurred most recently + * and in which order. This information is used to predict the next symbol. + * + * Symbols: + * - Literal: One 8-bit byte + * - Match: Repeat a chunk of data at some distance + * - Long repeat: Multi-byte match at a recently seen distance + * - Short repeat: One-byte repeat at a recently seen distance + * + * The symbol names are in from STATE_oldest_older_previous. REP means + * either short or long repeated match, and NONLIT means any non-literal. + */ +enum lzma_state { + STATE_LIT_LIT, + STATE_MATCH_LIT_LIT, + STATE_REP_LIT_LIT, + STATE_SHORTREP_LIT_LIT, + STATE_MATCH_LIT, + STATE_REP_LIT, + STATE_SHORTREP_LIT, + STATE_LIT_MATCH, + STATE_LIT_LONGREP, + STATE_LIT_SHORTREP, + STATE_NONLIT_MATCH, + STATE_NONLIT_REP +}; + +/* Total number of states */ +#define STATES 12 + +/* The lowest 7 states indicate that the previous state was a literal. */ +#define LIT_STATES 7 + +/* Indicate that the latest symbol was a literal. */ +static inline void lzma_state_literal(enum lzma_state *state) +{ + if (*state <= STATE_SHORTREP_LIT_LIT) + *state = STATE_LIT_LIT; + else if (*state <= STATE_LIT_SHORTREP) + *state -= 3; + else + *state -= 6; +} + +/* Indicate that the latest symbol was a match. */ +static inline void lzma_state_match(enum lzma_state *state) +{ + *state = *state < LIT_STATES ? STATE_LIT_MATCH : STATE_NONLIT_MATCH; +} + +/* Indicate that the latest state was a long repeated match. */ +static inline void lzma_state_long_rep(enum lzma_state *state) +{ + *state = *state < LIT_STATES ? STATE_LIT_LONGREP : STATE_NONLIT_REP; +} + +/* Indicate that the latest symbol was a short match. */ +static inline void lzma_state_short_rep(enum lzma_state *state) +{ + *state = *state < LIT_STATES ? STATE_LIT_SHORTREP : STATE_NONLIT_REP; +} + +/* Test if the previous symbol was a literal. */ +static inline bool lzma_state_is_literal(enum lzma_state state) +{ + return state < LIT_STATES; +} + +/* Each literal coder is divided in three sections: + * - 0x001-0x0FF: Without match byte + * - 0x101-0x1FF: With match byte; match bit is 0 + * - 0x201-0x2FF: With match byte; match bit is 1 + * + * Match byte is used when the previous LZMA symbol was something else than + * a literal (that is, it was some kind of match). + */ +#define LITERAL_CODER_SIZE 0x300 + +/* Maximum number of literal coders */ +#define LITERAL_CODERS_MAX (1 << 4) + +/* Minimum length of a match is two bytes. */ +#define MATCH_LEN_MIN 2 + +/* Match length is encoded with 4, 5, or 10 bits. + * + * Length Bits + * 2-9 4 = Choice=0 + 3 bits + * 10-17 5 = Choice=1 + Choice2=0 + 3 bits + * 18-273 10 = Choice=1 + Choice2=1 + 8 bits + */ +#define LEN_LOW_BITS 3 +#define LEN_LOW_SYMBOLS (1 << LEN_LOW_BITS) +#define LEN_MID_BITS 3 +#define LEN_MID_SYMBOLS (1 << LEN_MID_BITS) +#define LEN_HIGH_BITS 8 +#define LEN_HIGH_SYMBOLS (1 << LEN_HIGH_BITS) +#define LEN_SYMBOLS (LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS + LEN_HIGH_SYMBOLS) + +/* + * Maximum length of a match is 273 which is a result of the encoding + * described above. + */ +#define MATCH_LEN_MAX (MATCH_LEN_MIN + LEN_SYMBOLS - 1) + +/* + * Different sets of probabilities are used for match distances that have + * very short match length: Lengths of 2, 3, and 4 bytes have a separate + * set of probabilities for each length. The matches with longer length + * use a shared set of probabilities. + */ +#define DIST_STATES 4 + +/* + * Get the index of the appropriate probability array for decoding + * the distance slot. + */ +static inline uint32_t lzma_get_dist_state(uint32_t len) +{ + return len < DIST_STATES + MATCH_LEN_MIN + ? len - MATCH_LEN_MIN : DIST_STATES - 1; +} + +/* + * The highest two bits of a 32-bit match distance are encoded using six bits. + * This six-bit value is called a distance slot. This way encoding a 32-bit + * value takes 6-36 bits, larger values taking more bits. + */ +#define DIST_SLOT_BITS 6 +#define DIST_SLOTS (1 << DIST_SLOT_BITS) + +/* Match distances up to 127 are fully encoded using probabilities. Since + * the highest two bits (distance slot) are always encoded using six bits, + * the distances 0-3 don't need any additional bits to encode, since the + * distance slot itself is the same as the actual distance. DIST_MODEL_START + * indicates the first distance slot where at least one additional bit is + * needed. + */ +#define DIST_MODEL_START 4 + +/* + * Match distances greater than 127 are encoded in three pieces: + * - distance slot: the highest two bits + * - direct bits: 2-26 bits below the highest two bits + * - alignment bits: four lowest bits + * + * Direct bits don't use any probabilities. + * + * The distance slot value of 14 is for distances 128-191. + */ +#define DIST_MODEL_END 14 + +/* Distance slots that indicate a distance <= 127. */ +#define FULL_DISTANCES_BITS (DIST_MODEL_END / 2) +#define FULL_DISTANCES (1 << FULL_DISTANCES_BITS) + +/* + * For match distances greater than 127, only the highest two bits and the + * lowest four bits (alignment) is encoded using probabilities. + */ +#define ALIGN_BITS 4 +#define ALIGN_SIZE (1 << ALIGN_BITS) +#define ALIGN_MASK (ALIGN_SIZE - 1) + +/* Total number of all probability variables */ +#define PROBS_TOTAL (1846 + LITERAL_CODERS_MAX * LITERAL_CODER_SIZE) + +/* + * LZMA remembers the four most recent match distances. Reusing these + * distances tends to take less space than re-encoding the actual + * distance value. + */ +#define REPS 4 + +#endif diff --git a/src/common/unix/xzminidec/src/xz_private.h b/src/common/unix/xzminidec/src/xz_private.h new file mode 100644 index 0000000..482b90f --- /dev/null +++ b/src/common/unix/xzminidec/src/xz_private.h @@ -0,0 +1,156 @@ +/* + * Private includes and definitions + * + * Author: Lasse Collin + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#ifndef XZ_PRIVATE_H +#define XZ_PRIVATE_H + +#ifdef __KERNEL__ +# include +# include +# include + /* XZ_PREBOOT may be defined only via decompress_unxz.c. */ +# ifndef XZ_PREBOOT +# include +# include +# include +# ifdef CONFIG_XZ_DEC_X86 +# define XZ_DEC_X86 +# endif +# ifdef CONFIG_XZ_DEC_POWERPC +# define XZ_DEC_POWERPC +# endif +# ifdef CONFIG_XZ_DEC_IA64 +# define XZ_DEC_IA64 +# endif +# ifdef CONFIG_XZ_DEC_ARM +# define XZ_DEC_ARM +# endif +# ifdef CONFIG_XZ_DEC_ARMTHUMB +# define XZ_DEC_ARMTHUMB +# endif +# ifdef CONFIG_XZ_DEC_SPARC +# define XZ_DEC_SPARC +# endif +# define memeq(a, b, size) (memcmp(a, b, size) == 0) +# define memzero(buf, size) memset(buf, 0, size) +# endif +# define get_le32(p) le32_to_cpup((const uint32_t *)(p)) +#else + /* + * For userspace builds, use a separate header to define the required + * macros and functions. This makes it easier to adapt the code into + * different environments and avoids clutter in the Linux kernel tree. + */ +# include "xz_config.h" +#endif + +/* If no specific decoding mode is requested, enable support for all modes. */ +#if !defined(XZ_DEC_SINGLE) && !defined(XZ_DEC_PREALLOC) \ + && !defined(XZ_DEC_DYNALLOC) +# define XZ_DEC_SINGLE +# define XZ_DEC_PREALLOC +# define XZ_DEC_DYNALLOC +#endif + +/* + * The DEC_IS_foo(mode) macros are used in "if" statements. If only some + * of the supported modes are enabled, these macros will evaluate to true or + * false at compile time and thus allow the compiler to omit unneeded code. + */ +#ifdef XZ_DEC_SINGLE +# define DEC_IS_SINGLE(mode) ((mode) == XZ_SINGLE) +#else +# define DEC_IS_SINGLE(mode) (false) +#endif + +#ifdef XZ_DEC_PREALLOC +# define DEC_IS_PREALLOC(mode) ((mode) == XZ_PREALLOC) +#else +# define DEC_IS_PREALLOC(mode) (false) +#endif + +#ifdef XZ_DEC_DYNALLOC +# define DEC_IS_DYNALLOC(mode) ((mode) == XZ_DYNALLOC) +#else +# define DEC_IS_DYNALLOC(mode) (false) +#endif + +#if !defined(XZ_DEC_SINGLE) +# define DEC_IS_MULTI(mode) (true) +#elif defined(XZ_DEC_PREALLOC) || defined(XZ_DEC_DYNALLOC) +# define DEC_IS_MULTI(mode) ((mode) != XZ_SINGLE) +#else +# define DEC_IS_MULTI(mode) (false) +#endif + +/* + * If any of the BCJ filter decoders are wanted, define XZ_DEC_BCJ. + * XZ_DEC_BCJ is used to enable generic support for BCJ decoders. + */ +#ifndef XZ_DEC_BCJ +# if defined(XZ_DEC_X86) || defined(XZ_DEC_POWERPC) \ + || defined(XZ_DEC_IA64) || defined(XZ_DEC_ARM) \ + || defined(XZ_DEC_ARM) || defined(XZ_DEC_ARMTHUMB) \ + || defined(XZ_DEC_SPARC) +# define XZ_DEC_BCJ +# endif +#endif + +/* + * Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used + * before calling xz_dec_lzma2_run(). + */ +XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, + uint32_t dict_max); + +/* + * Decode the LZMA2 properties (one byte) and reset the decoder. Return + * XZ_OK on success, XZ_MEMLIMIT_ERROR if the preallocated dictionary is not + * big enough, and XZ_OPTIONS_ERROR if props indicates something that this + * decoder doesn't support. + */ +XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, + uint8_t props); + +/* Decode raw LZMA2 stream from b->in to b->out. */ +XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, + struct xz_buf *b); + +/* Free the memory allocated for the LZMA2 decoder. */ +XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s); + +#ifdef XZ_DEC_BCJ +/* + * Allocate memory for BCJ decoders. xz_dec_bcj_reset() must be used before + * calling xz_dec_bcj_run(). + */ +XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call); + +/* + * Decode the Filter ID of a BCJ filter. This implementation doesn't + * support custom start offsets, so no decoding of Filter Properties + * is needed. Returns XZ_OK if the given Filter ID is supported. + * Otherwise XZ_OPTIONS_ERROR is returned. + */ +XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id); + +/* + * Decode raw BCJ + LZMA2 stream. This must be used only if there actually is + * a BCJ filter in the chain. If the chain has only LZMA2, xz_dec_lzma2_run() + * must be called directly. + */ +XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, + struct xz_dec_lzma2 *lzma2, + struct xz_buf *b); + +/* Free the memory allocated for the BCJ filters. */ +#define xz_dec_bcj_end(s) kfree(s) +#endif + +#endif diff --git a/src/common/unix/xzminidec/src/xz_stream.h b/src/common/unix/xzminidec/src/xz_stream.h new file mode 100644 index 0000000..430bb3a --- /dev/null +++ b/src/common/unix/xzminidec/src/xz_stream.h @@ -0,0 +1,62 @@ +/* + * Definitions for handling the .xz file format + * + * Author: Lasse Collin + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#ifndef XZ_STREAM_H +#define XZ_STREAM_H + +#if defined(__KERNEL__) && !XZ_INTERNAL_CRC32 +# include +# undef crc32 +# define xz_crc32(buf, size, crc) \ + (~crc32_le(~(uint32_t)(crc), buf, size)) +#endif + +/* + * See the .xz file format specification at + * https://tukaani.org/xz/xz-file-format.txt + * to understand the container format. + */ + +#define STREAM_HEADER_SIZE 12 + +#define HEADER_MAGIC "\3757zXZ" +#define HEADER_MAGIC_SIZE 6 + +#define FOOTER_MAGIC "YZ" +#define FOOTER_MAGIC_SIZE 2 + +/* + * Variable-length integer can hold a 63-bit unsigned integer or a special + * value indicating that the value is unknown. + * + * Experimental: vli_type can be defined to uint32_t to save a few bytes + * in code size (no effect on speed). Doing so limits the uncompressed and + * compressed size of the file to less than 256 MiB and may also weaken + * error detection slightly. + */ +typedef uint64_t vli_type; + +#define VLI_MAX ((vli_type)-1 / 2) +#define VLI_UNKNOWN ((vli_type)-1) + +/* Maximum encoded size of a VLI */ +#define VLI_BYTES_MAX (sizeof(vli_type) * 8 / 7) + +/* Integrity Check types */ +enum xz_check { + XZ_CHECK_NONE = 0, + XZ_CHECK_CRC32 = 1, + XZ_CHECK_CRC64 = 4, + XZ_CHECK_SHA256 = 10 +}; + +/* Maximum possible Check ID */ +#define XZ_CHECK_MAX 15 + +#endif diff --git a/src/nvidia-modeset/Makefile b/src/nvidia-modeset/Makefile new file mode 100644 index 0000000..92f9472 --- /dev/null +++ b/src/nvidia-modeset/Makefile @@ -0,0 +1,225 @@ +########################################################################### +# Makefile for nv-modeset-kernel.o +########################################################################### + +NV_MODULE_LOGGING_NAME ?= nvidia-modeset + +VERSION_MK_DIR = ../../ +include ../../utils.mk + +include srcs.mk + +############################################################################## +# Helper functions to determine the compiler type +############################################################################## +GET_COMPILER_TYPE = \ + $(shell $(VERSION_MK_DIR)/nv-compiler.sh type $(1)) +############################################################################## + +# The source files for nv-modeset-kernel.o are all SRCS and SRCS_CXX defined in +# srcs.mk, and the NVIDIA ID string +ALL_SRCS = $(SRCS) $(SRCS_CXX) +ALL_SRCS += $(NVIDSTRING) + +SRC_COMMON = ../common + +CONDITIONAL_CFLAGS := + +CFLAGS += -include $(SRC_COMMON)/sdk/nvidia/inc/cpuopsys.h + +CFLAGS += -I $(SRC_COMMON)/sdk/nvidia/inc +CFLAGS += -I $(SRC_COMMON)/sdk/nvidia/inc/hw +CFLAGS += -I $(SRC_COMMON)/shared/inc +CFLAGS += -I $(SRC_COMMON)/inc +CFLAGS += -I $(SRC_COMMON)/softfloat/nvidia +CFLAGS += -I $(SRC_COMMON)/softfloat/source/include +CFLAGS += -I $(SRC_COMMON)/softfloat/source/8086-SSE +CFLAGS += -I $(SRC_COMMON)/unix/common/utils/interface +CFLAGS += -I $(SRC_COMMON)/unix/common/inc +CFLAGS += -I $(SRC_COMMON)/modeset +CFLAGS += -I os-interface/include +CFLAGS += -I kapi/interface +CFLAGS += -I ../nvidia/arch/nvalloc/unix/include +CFLAGS += -I interface +CFLAGS += -I include +CFLAGS += -I kapi/include +CFLAGS += -I generated +CFLAGS += -I $(SRC_COMMON)/displayport/inc +CFLAGS += -I $(SRC_COMMON)/displayport/inc/dptestutil +CFLAGS += -I $(SRC_COMMON)/inc/displayport + +CFLAGS += -DNDEBUG +CFLAGS += -D_LANGUAGE_C +CFLAGS += -D__NO_CTYPE + +CFLAGS += -DNV_CPU_INTRINSICS_KERNEL +CFLAGS += -DNVHDMIPKT_RM_CALLS_INTERNAL=0 + +# XXX it would be nice to only define these for appropriate files... +CFLAGS += -DSOFTFLOAT_ROUND_ODD +CFLAGS += -DSOFTFLOAT_FAST_DIV32TO16 +CFLAGS += -DSOFTFLOAT_FAST_DIV64TO32 + +# Tell nvtiming to use nvkms import functions +CFLAGS += -DNVT_USE_NVKMS + +# Tell SMG we're being compiled into kernel +CFLAGS += -DNV_SMG_IN_NVKMS + +CFLAGS += -Wformat +CFLAGS += -Wreturn-type +CFLAGS += -Wswitch +CFLAGS += -Wunused-local-typedefs +CFLAGS += -Wchar-subscripts +CFLAGS += -Wparentheses +CFLAGS += -Wpointer-arith +CFLAGS += -Wcast-qual +CFLAGS += -Wall +CFLAGS += -Wextra +CFLAGS += -Wno-sign-compare +CFLAGS += -Wno-unused-parameter +CFLAGS += -Wno-missing-field-initializers +CFLAGS += -Wno-format-zero-length +CFLAGS += -Wmissing-declarations +CFLAGS += -Wno-cast-qual + +CFLAGS += -O2 + +ifeq ($(TARGET_ARCH),x86_64) + CFLAGS += -msoft-float + CFLAGS += -mno-red-zone + CFLAGS += -mcmodel=kernel + CFLAGS += -mno-mmx + CFLAGS += -mno-sse + CFLAGS += -mno-sse2 + CFLAGS += -mno-3dnow +endif + +ifeq ($(TARGET_ARCH),aarch64) + CFLAGS += -mgeneral-regs-only + CFLAGS += -march=armv8-a + CFLAGS += -ffixed-x18 + CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -mno-outline-atomics) +endif + +ifeq ($(TARGET_ARCH),riscv64) + CFLAGS += -march=rv64imac_zicsr_zifencei + CFLAGS += -mabi=lp64 + CFLAGS += -mcmodel=medany + CFLAGS += -mno-relax +endif + +CFLAGS += -fno-pic +CFLAGS += -fno-common +CFLAGS += -fomit-frame-pointer +CFLAGS += -fno-strict-aliasing +CFLAGS += -ffunction-sections +CFLAGS += -fdata-sections +CFLAGS += -ffreestanding +CFLAGS += -fno-stack-protector + +CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -Wformat-overflow=2) +CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -Wformat-truncation=1) + +ifeq ($(TARGET_ARCH),x86_64) + COMPILER_TYPE := $(call GET_COMPILER_TYPE, $(CC)) + ENDBR_SUPPORTED := $(call AS_HAS_INSTR, endbr64) + + FCF_SUPPORTED = + + # + # GCC flags -fcf-protection=branch and -mindirect-branch=extern-thunk can + # be used together after GCC version 9.4.0. See + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=93654 for details. + # Check if GCC version is appropriate. + # + ifeq ($(COMPILER_TYPE),gcc) + FCF_SUPPORTED := \ + $(shell $(VERSION_MK_DIR)/nv-compiler.sh version_is_at_least $(CC) 90400) + endif + + # + # Clang version 14.0.0 is required for -fcf-protection=branch to work + # correctly. See commit + # https://github.com/llvm/llvm-project/commit/dfcf69770bc522b9e411c66454934a37c1f35332 + # + ifeq ($(COMPILER_TYPE),clang) + FCF_SUPPORTED := \ + $(shell $(VERSION_MK_DIR)/nv-compiler.sh version_is_at_least $(CC) 140000) + endif + + ifeq ($(FCF_SUPPORTED)-$(ENDBR_SUPPORTED),1-1) + CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -fcf-protection=branch) + endif + CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -fno-jump-tables) + CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -mindirect-branch=thunk-extern) + CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -mindirect-branch-register) +endif + +CFLAGS += $(CONDITIONAL_CFLAGS) + +CC_ONLY_CFLAGS += -Wimplicit +CC_ONLY_CFLAGS += -Wstrict-prototypes +CC_ONLY_CFLAGS += -Wmissing-prototypes +CC_ONLY_CFLAGS += -std=gnu11 + +CXX_ONLY_CFLAGS += -std=gnu++11 +CXX_ONLY_CFLAGS += -fno-operator-names +CXX_ONLY_CFLAGS += -fno-rtti +CXX_ONLY_CFLAGS += -fno-exceptions +CXX_ONLY_CFLAGS += -fcheck-new + +SHADER_OBJS = + +CFLAGS += -I $(SRC_COMMON)/unix/nvidia-3d/interface +CFLAGS += -I $(SRC_COMMON)/unix/nvidia-push/interface +CFLAGS += -I $(SRC_COMMON)/unix/nvidia-3d/include +CFLAGS += -I $(SRC_COMMON)/unix/nvidia-push/include +CFLAGS += -I $(SRC_COMMON)/unix/xzminidec/interface +CFLAGS += -I $(SRC_COMMON)/unix/nvidia-headsurface +CFLAGS += -I src/shaders + +CFLAGS += -DNV_PUSH_IN_KERNEL +CFLAGS += -DNV_XZ_CUSTOM_MEM_HOOKS +CFLAGS += -DNV_XZ_USE_NVTYPES +CFLAGS += -DXZ_DEC_SINGLE + +# Compress the shaders and embed in ELF object files. +define COMPRESS_SHADERS +$$(OUTPUTDIR)/$(1)_shaders.xz: src/shaders/g_$(1)_shaders + @$(MKDIR) $$(OUTPUTDIR) + $$(call quiet_cmd,XZ) -ce -C none < $$^ > $$@ + +$$(eval $$(call READ_ONLY_OBJECT_FROM_FILE_RULE,$$(OUTPUTDIR)/$(1)_shaders.xz)) + +SHADER_OBJS += $$(OUTPUTDIR)/$(1)_shaders.xz.o +endef + +$(eval $(call COMPRESS_SHADERS,maxwell)) +$(eval $(call COMPRESS_SHADERS,pascal)) +$(eval $(call COMPRESS_SHADERS,volta)) +$(eval $(call COMPRESS_SHADERS,turing)) +$(eval $(call COMPRESS_SHADERS,ampere)) + +OBJS = $(call BUILD_OBJECT_LIST,$(ALL_SRCS)) +OBJS += $(SHADER_OBJS) + +# Define how to generate the NVIDIA ID string +$(eval $(call GENERATE_NVIDSTRING, \ + NV_KMS_ID, \ + UNIX Open Kernel Mode Setting Driver, $(OBJS))) + +# Define how to build each object file from the corresponding source file. +$(foreach src, $(ALL_SRCS), $(eval $(call DEFINE_OBJECT_RULE,TARGET,$(src)))) + +NV_MODESET_KERNEL_O = $(OUTPUTDIR)/nv-modeset-kernel.o + +.PHONY: all +all: $(NV_MODESET_KERNEL_O) + +$(NV_MODESET_KERNEL_O): $(OBJS) + $(call quiet_cmd,LD) -r -o $(NV_MODESET_KERNEL_O) $(OBJS) + +.PHONY: clean +clean: + $(RM) -rf $(OUTPUTDIR) diff --git a/src/nvidia-modeset/include/dp/nvdp-connector-event-sink.h b/src/nvidia-modeset/include/dp/nvdp-connector-event-sink.h new file mode 100644 index 0000000..b374822 --- /dev/null +++ b/src/nvidia-modeset/include/dp/nvdp-connector-event-sink.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_DP_NVDP_CONNECTOR_EVENT_SINK_H__ +#define __NVKMS_DP_NVDP_CONNECTOR_EVENT_SINK_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void nvDPLibSetAdaptiveSync(const NVDispEvoRec *pDispEvo, NvU32 head, + NvBool enable); +void nvDPLibUpdateDpyLinkConfiguration(NVDpyEvoPtr pDpyEvo); +NvBool nvDPLibDpyIsConnected(NVDpyEvoPtr pDpyEvo); +NvBool nvDPLibDpyIsYuv420ModeSupported(const NVDpyEvoRec *pDpyEvo); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_DP_NVDP_CONNECTOR_EVENT_SINK_H__ */ + diff --git a/src/nvidia-modeset/include/dp/nvdp-connector.h b/src/nvidia-modeset/include/dp/nvdp-connector.h new file mode 100644 index 0000000..9a17863 --- /dev/null +++ b/src/nvidia-modeset/include/dp/nvdp-connector.h @@ -0,0 +1,98 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_DP_NVDP_CONNECTOR_H__ +#define __NVKMS_DP_NVDP_CONNECTOR_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvkms-types.h" + +NVDPLibConnectorPtr nvDPCreateConnector(NVConnectorEvoPtr pConnectorEvo); + +void nvDPNotifyLongPulse(NVConnectorEvoPtr pConnectorEvo, + NvBool connected); + +void nvDPNotifyShortPulse(NVDPLibConnectorPtr pNVDpLibConnector); + +void nvDPDestroyConnector(NVDPLibConnectorPtr pNVDpLibConnector); + +NvBool nvDPIsLinkAwaitingTransition(NVConnectorEvoPtr pConnectorEvo); + +NVDPLibModesetStatePtr nvDPLibCreateModesetState( + const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvU32 displayId, + const NVDpyIdList dpyIdList, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace, + const enum NvKmsDpyAttributeColorBpcValue colorBpc, + const NVHwModeTimingsEvo *pTimings, + const NVDscInfoEvoRec *pDscInfo); + +void nvDPLibFreeModesetState(NVDPLibModesetStatePtr pDpLibModesetState); + +NvBool nvDPLibIsModePossible(const NVDPLibConnectorRec *pDpLibConnector, + const NVDpLibIsModePossibleParamsRec *pParams, + NvU32 *pFailedHeadMask); + +NvBool nvDPValidateModeForDpyEvo( + const NVDpyEvoRec *pDpyEvo, + const NVDpyAttributeColor *pDpyColor, + const struct NvKmsModeValidationParams *pModeValidationParams, + const NVHwModeTimingsEvo *pTimings, + const NvBool b2Heads1Or, + NVDscInfoEvoRec *pDscInfo); + +void nvDPPreSetMode(NVDPLibConnectorPtr pDpLibConnector, + const NVEvoModesetUpdateState *pModesetUpdateState); + +void nvDPPostSetMode(NVDPLibConnectorPtr pDpLibConnector, + const NVEvoModesetUpdateState *pModesetUpdateState); + +void nvDPPause(NVDPLibConnectorPtr pNVDpLibConnector); + +NvBool nvDPResume(NVDPLibConnectorPtr pNVDpLibConnector, NvBool plugged); + +void nvDPSetAllowMultiStreamingOneConnector( + NVDPLibConnectorPtr pDpLibConnector, + NvBool allowMST); + +void nvDPSetAllowMultiStreaming(NVDevEvoPtr pDevEvo, NvBool allowMST); + +enum NVDpLinkMode { + NV_DP_LINK_MODE_OFF, + NV_DP_LINK_MODE_SST, + NV_DP_LINK_MODE_MST, +}; + +enum NVDpLinkMode nvDPGetActiveLinkMode(NVDPLibConnectorPtr pDpLibConnector); + +void nvDPSetLinkHandoff(NVDPLibConnectorPtr pDpLibConnector, NvBool enable); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_DP_NVDP_CONNECTOR_H__ */ diff --git a/src/nvidia-modeset/include/dp/nvdp-device.h b/src/nvidia-modeset/include/dp/nvdp-device.h new file mode 100644 index 0000000..ef120ec --- /dev/null +++ b/src/nvidia-modeset/include/dp/nvdp-device.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_DP_NVDP_DEVICE_H__ +#define __NVKMS_DP_NVDP_DEVICE_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void nvDPDeviceSetPowerState(NVDpyEvoPtr pDpyEvo, NvBool on); +unsigned int nvDPGetEDIDSize(const NVDpyEvoRec *pDpyEvo); +NvBool nvDPGetEDID(const NVDpyEvoRec *pDpyEvo, void *buffer, unsigned int size); +void nvDPGetDpyGUID(NVDpyEvoPtr pDpyEvo); +void nvDPDpyFree(NVDpyEvoPtr pDpyEvo); +NvBool nvDPDpyIsDscPossible(const NVDpyEvoRec *pDpyEvo); +NvBool nvDPDpyGetDpcdRevision(const NVDpyEvoRec *pDpyEvo, + unsigned int *major, + unsigned int *minor); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_DP_NVDP_DEVICE_H__ */ diff --git a/src/nvidia-modeset/include/dp/nvdp-timer.h b/src/nvidia-modeset/include/dp/nvdp-timer.h new file mode 100644 index 0000000..f1b2ecb --- /dev/null +++ b/src/nvidia-modeset/include/dp/nvdp-timer.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_DP_NVDP_TIMER_H__ +#define __NVKMS_DP_NVDP_TIMER_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +NVDPLibTimerPtr nvDPAllocTimer(NVDevEvoPtr pDevEvo); +void nvDPFreeTimer(NVDPLibTimerPtr pTimer); +void nvDPFireExpiredTimers(NVDevEvoPtr pDevEvo); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_DP_NVDP_TIMER_H__ */ diff --git a/src/nvidia-modeset/include/g_nvkms-evo-states.h b/src/nvidia-modeset/include/g_nvkms-evo-states.h new file mode 100644 index 0000000..828539a --- /dev/null +++ b/src/nvidia-modeset/include/g_nvkms-evo-states.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2010 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __EVO_STATE_H__ +#define __EVO_STATE_H__ + + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void nvEvoStateStartNoLock(NVEvoSubDevPtr); +#if defined(DEBUG) +void nvEvoStateAssertNoLock(const NVEvoSubDevRec *); +#else +static inline void nvEvoStateAssertNoLock(const NVEvoSubDevRec *unused) { }; +#endif + +#ifdef __cplusplus +}; +#endif + +#endif /* __EVO_STATE_H__ */ + diff --git a/src/nvidia-modeset/include/nvkms-3dvision.h b/src/nvidia-modeset/include/nvkms-3dvision.h new file mode 100644 index 0000000..6387632 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-3dvision.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_3DVISION_H__ +#define __NVKMS_3DVISION_H__ + +#include "nvkms-types.h" + +void nv3DVisionAuthenticationEvo(NVDispEvoRec *pDispEvo, const NvU32 apiHead); + +void nvDpyCheck3DVisionCapsEvo(NVDpyEvoPtr pDpyEvo); +NvBool +nvPatch3DVisionModeTimingsEvo(NVT_TIMING *pTiming, NVDpyEvoPtr pDpyEvo, + NVEvoInfoStringPtr pInfoString); +void nvDisable3DVisionAegis(const NVDpyEvoRec *pDpyEvo); +void nvSendHwModeTimingsToAegisEvo(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead); + +#endif /* __NVKMS_3DVISION_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-attributes.h b/src/nvidia-modeset/include/nvkms-attributes.h new file mode 100644 index 0000000..d2cea1d --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-attributes.h @@ -0,0 +1,51 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_ATTRIBUTES_H__ +#define __NVKMS_ATTRIBUTES_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +NvS64 nvRMLaneCountToNvKms(NvU32 rmLaneCount); + +NvBool nvSetDpyAttributeEvo(NVDpyEvoPtr pDpyEvo, + struct NvKmsSetDpyAttributeParams *pParams); + +NvBool nvGetDpyAttributeEvo(const NVDpyEvoRec *pDpyEvo, + struct NvKmsGetDpyAttributeParams *pParams); + +NvBool nvGetDpyAttributeValidValuesEvo( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsGetDpyAttributeValidValuesParams *pParams); + +NvBool nvDpyValidateColorSpace(const NVDpyEvoRec *pDpyEvo, NvS64 value); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_ATTRIBUTES_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-console-restore.h b/src/nvidia-modeset/include/nvkms-console-restore.h new file mode 100644 index 0000000..5b5abeb --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-console-restore.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_CONSOLE_RESTORE_H__ +#define __NVKMS_CONSOLE_RESTORE_H__ + +#include "nvkms-types.h" + +NvBool nvEvoRestoreConsole(NVDevEvoPtr pDevEvo, const NvBool allowMST); + +#endif // __NVKMS_CONSOLE_RESTORE_H__ diff --git a/src/nvidia-modeset/include/nvkms-ctxdma.h b/src/nvidia-modeset/include/nvkms-ctxdma.h new file mode 100644 index 0000000..1ebb188 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-ctxdma.h @@ -0,0 +1,42 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_CTXDMA_H__ +#define __NVKMS_CTXDMA_H__ + +#include "nvkms-types.h" +#include "nvkms-flip-workarea.h" + +NvBool nvCtxDmaRegisterPreSyncpt(NVDevEvoRec *pDevEvo, + struct NvKmsFlipWorkArea *pWorkArea); + +void nvCtxDmaFreeSyncptHandle(NVDevEvoRec *pDevEvo, NVEvoSyncpt *pSyncpt); + +NvU32 nvCtxDmaBind(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel, NvU32 hCtxDma); + +void nvCtxDmaFree(NVDevEvoPtr pDevEvo, NvU32 deviceHandle, NvU32 *hDispCtxDma); + +NvU32 nvCtxDmaAlloc(NVDevEvoPtr pDevEvo, NvU32 *pCtxDmaHandle, + NvU32 memoryHandle, NvU32 localCtxDmaFlags, NvU64 limit); + +#endif /* __NVKMS_CTXDMA_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-cursor.h b/src/nvidia-modeset/include/nvkms-cursor.h new file mode 100644 index 0000000..a20562e --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-cursor.h @@ -0,0 +1,53 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_CURSOR_H__ +#define __NVKMS_CURSOR_H__ + +#include "nvkms-types.h" + +NvBool nvGetCursorImageSurfaces( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const struct NvKmsSetCursorImageCommonParams *pParams, + NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES]); + +NvBool nvSetCursorImage( + NVDispEvoPtr pDispEvo, + const struct NvKmsPerOpenDev *pOpenDevice, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + NvU32 apiHead, + const struct NvKmsSetCursorImageCommonParams *pParams); + +void nvEvoMoveCursorInternal(NVDispEvoPtr pDispEvo, + NvU32 head, NvS16 x, NvS16 y); + +void nvMoveCursor(NVDispEvoPtr pDispEvo, const NvU32 apiHead, + const struct NvKmsMoveCursorCommonParams *pParams); + +NvBool nvAllocCursorEvo(NVDevEvoPtr pDevEvo); +void nvFreeCursorEvo(NVDevEvoPtr pDevEvo); + +enum NvKmsAllocDeviceStatus nvInitDispHalCursorEvo(NVDevEvoPtr pDevEvo); + +#endif /* __NVKMS_CURSOR_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-difr.h b/src/nvidia-modeset/include/nvkms-difr.h new file mode 100644 index 0000000..f00e896 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-difr.h @@ -0,0 +1,45 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __NVKMS_DIFR_H__ +#define __NVKMS_DIFR_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvkms-types.h" + +NVDIFRStateEvoPtr nvDIFRAllocate(NVDevEvoPtr pDevEvo); +void nvDIFRFree(NVDIFRStateEvoPtr pDifr); + +void nvDIFRNotifyFlip(NVDIFRStateEvoPtr pDifr); +NvU32 nvDIFRPrefetchSurfaces(NVDIFRStateEvoPtr pDifr, + size_t l2CacheSize); +NvBool nvDIFRSendPrefetchResponse(NVDIFRStateEvoPtr pDifr, + NvU32 responseStatus); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_DIFR_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-dma.h b/src/nvidia-modeset/include/nvkms-dma.h new file mode 100644 index 0000000..26f5e06 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-dma.h @@ -0,0 +1,286 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* this file contains dma push buffer inlined routines */ + +#ifndef __NVKMS_DMA_H__ +#define __NVKMS_DMA_H__ + +#include + +#include "nvkms-types.h" +#include "nvkms-utils.h" + +#include "class/cl917d.h" + +/* declare prototypes: */ +void nvDmaKickoffEvo(NVEvoChannelPtr); + +void nvEvoMakeRoom(NVEvoChannelPtr pChannel, NvU32 count); +void nvWriteEvoCoreNotifier(const NVDispEvoRec *, NvU32 offset, NvU32 value); + +NvBool nvEvoIsCoreNotifierComplete(NVDispEvoPtr pDispEvo, + NvU32 offset, NvU32 done_base_bit, + NvU32 done_extent_bit, + NvU32 done_false_value); +void nvEvoWaitForCoreNotifier(const NVDispEvoRec *pDispEvo, NvU32 offset, + NvU32 done_base_bit, + NvU32 done_extent_bit, NvU32 done_false_value); +void nvEvoSetSubdeviceMask(NVEvoChannelPtr pChannel, NvU32 mask); + +NvU32 nvEvoReadCRC32Notifier(volatile NvU32 *pCRC32Notifier, + NvU32 entry_stride, + NvU32 entry_count, + NvU32 status_offset, + NvU32 field_count, + NvU32 flag_count, + const CRC32NotifierEntryRec *field_info, + const CRC32NotifierEntryFlags *flag_info); +void nvEvoResetCRC32Notifier(volatile NvU32 *pCRC32Notifier, + NvU32 offset, + NvU32 reset_base_bit, + NvU32 reset_value); +NvBool nvEvoWaitForCRC32Notifier(const NVDevEvoPtr pDevEvo, + volatile NvU32 *pCRC32Notifier, + NvU32 offset, + NvU32 done_base_bit, + NvU32 done_extent_bit, + NvU32 done_value); + +#define SUBDEVICE_MASK_ALL DRF_MASK(NV917D_DMA_SET_SUBDEVICE_MASK_VALUE) + +static inline void nvDmaStorePioMethod( + void *pBase, NvU32 offset, NvU32 value) +{ + NvU32 *ptr = ((NvU32 *)pBase) + (offset/sizeof(NvU32)); + + /* + * Use gcc built-in atomic store to ensure the write happens exactly once + * and to ensure ordering. We can use the weaker "relaxed" model because we + * separately use appropriate fencing on anything that needs to preceed this + * write. + */ + __atomic_store_n(ptr, value, __ATOMIC_RELAXED); +} + +static inline NvU32 nvDmaLoadPioMethod( + const void *pBase, NvU32 offset) +{ + const NvU32 *ptr = ((const NvU32 *)pBase) + (offset/sizeof(NvU32)); + + /* + * Use gcc built-in atomic load to ensure the read happens exactly once and + * to ensure ordering. We use the "acquire" model to ensure anything after + * this read doesn't get reordered earlier than this read. (E.g., we don't + * want any writes to the pushbuffer that are waiting on GET to advance to + * get reordered before this read, potentially clobbering the pushbuffer + * before it's been read.) + */ + return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); +} + +static inline NvBool nvDmaSubDevMaskMatchesCurrent( + const NVEvoChannel *pChannel, + const NvU32 subDevMask) +{ + const NvU32 allSubDevices = (1 << pChannel->pb.num_channels) - 1; + + return (subDevMask & allSubDevices) == + (pChannel->pb.currentSubDevMask & allSubDevices); +} + +static inline void nvDmaSetEvoMethodData( + NVEvoChannelPtr pChannel, + const NvU32 data) +{ + *(pChannel->pb.buffer) = data; + pChannel->pb.buffer++; +} + +static inline void nvDmaSetEvoMethodDataU64( + NVEvoChannelPtr pChannel, + const NvU64 data) +{ + nvDmaSetEvoMethodData(pChannel, NvU64_HI32(data)); + nvDmaSetEvoMethodData(pChannel, NvU64_LO32(data)); +} + + +/* Get the SDM for a given pDisp */ +static inline NvU32 nvDispSubDevMaskEvo(const NVDispEvoRec *pDispEvo) +{ + return NVBIT(pDispEvo->displayOwner); +} + +/* Initialize the EVO SDM stack */ +static inline void nvInitEvoSubDevMask(NVDevEvoPtr pDevEvo) { + pDevEvo->subDevMaskStackDepth = 0; + pDevEvo->subDevMaskStack[0] = SUBDEVICE_MASK_ALL; +} + +/* Return the SDM at the top of the stack (i.e. the currently active one) */ +static inline NvU32 nvPeekEvoSubDevMask(NVDevEvoPtr pDevEvo) { + return pDevEvo->subDevMaskStack[pDevEvo->subDevMaskStackDepth]; +} + +/* Push the given mask onto the stack and set it. */ +static inline void nvPushEvoSubDevMask(NVDevEvoPtr pDevEvo, NvU32 mask) { + pDevEvo->subDevMaskStackDepth++; + + nvAssert(pDevEvo->subDevMaskStackDepth < NV_EVO_SUBDEV_STACK_SIZE); + + pDevEvo->subDevMaskStack[pDevEvo->subDevMaskStackDepth] = mask; +} + +/* Automagically push the SDM for broadcast to disp. */ +static inline void nvPushEvoSubDevMaskDisp(const NVDispEvoRec *pDispEvo) { + NvU32 mask = nvDispSubDevMaskEvo(pDispEvo); + + nvPushEvoSubDevMask(pDispEvo->pDevEvo, mask); +} + +/* Pop the last entry on the stack */ +static inline void nvPopEvoSubDevMask(NVDevEvoPtr pDevEvo) { + pDevEvo->subDevMaskStackDepth--; +} + +/* + * Update the state tracked in updateState to indicate that pChannel has + * pending methods and requires an update/kickoff. + */ +static inline void nvUpdateUpdateState(NVDevEvoPtr pDevEvo, + NVEvoUpdateState *updateState, + const NVEvoChannel *pChannel) +{ + const NvU32 subDevMask = nvPeekEvoSubDevMask(pDevEvo); + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (subDevMask & (1 << sd)) { + updateState->subdev[sd].channelMask |= pChannel->channelMask; + } + } +} + +/* + * Update the state tracked in updateState to indicate that pChannel has + * pending WindowImmediate methods. + */ +static inline void nvWinImmChannelUpdateState(NVDevEvoPtr pDevEvo, + NVEvoUpdateState *updateState, + const NVEvoChannel *pChannel) +{ + const NvU32 subDevMask = nvPeekEvoSubDevMask(pDevEvo); + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (subDevMask & (1 << sd)) { + updateState->subdev[sd].winImmChannelMask |= pChannel->channelMask; + } + } +} + +/* + * Update the state tracked in updateState to prevent pChannel from + * interlocking with the core channel on the next UPDATE. + */ +static inline +void nvDisableCoreInterlockUpdateState(NVDevEvoPtr pDevEvo, + NVEvoUpdateState *updateState, + const NVEvoChannel *pChannel) +{ + const NvU32 subDevMask = nvPeekEvoSubDevMask(pDevEvo); + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (subDevMask & (1 << sd)) { + updateState->subdev[sd].noCoreInterlockMask |= + pChannel->channelMask; + } + } +} + +// These macros verify that the values used in the methods fit +// into the defined ranges. +#define ASSERT_DRF_NUM(d, r, f, n) \ + nvAssert(!(~DRF_MASK(NV ## d ## r ## f) & (n))) + +// From resman nv50/dev_disp.h +#define NV_UDISP_DMA_OPCODE 31:29 /* RWXUF */ +#define NV_UDISP_DMA_OPCODE_METHOD 0x00000000 /* RW--V */ +#define NV_UDISP_DMA_METHOD_COUNT 27:18 /* RWXUF */ +#define NV_UDISP_DMA_METHOD_OFFSET 15:2 /* RWXUF */ + +// Start an EVO method. +static inline void nvDmaSetStartEvoMethod( + NVEvoChannelPtr pChannel, + NvU32 method, + NvU32 count) +{ + NVDmaBufferEvoPtr p = &pChannel->pb; + const NvU32 sdMask = nvPeekEvoSubDevMask(p->pDevEvo); + + // We add 1 to the count for the method header. + const NvU32 countPlusHeader = count + 1; + + const NvU32 methodDwords = method >> 2; + + nvAssert((method & 0x3) == 0); + + ASSERT_DRF_NUM(_UDISP, _DMA, _METHOD_COUNT, count); + ASSERT_DRF_NUM(_UDISP, _DMA, _METHOD_OFFSET, methodDwords); + + if (!nvDmaSubDevMaskMatchesCurrent(pChannel, sdMask)) { + if (p->num_channels > 1) { + nvEvoSetSubdeviceMask(pChannel, sdMask); + } + } + + if (p->fifo_free_count <= countPlusHeader) { + nvEvoMakeRoom(pChannel, countPlusHeader); + } + + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(_UDISP, _DMA, _OPCODE, _METHOD) | + DRF_NUM(_UDISP, _DMA, _METHOD_COUNT, count) | + DRF_NUM(_UDISP, _DMA, _METHOD_OFFSET, methodDwords)); + + p->fifo_free_count -= countPlusHeader; +} + +static inline NvBool nvIsUpdateStateEmpty(const NVDevEvoRec *pDevEvo, + const NVEvoUpdateState *updateState) +{ + NvU32 sd; + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (updateState->subdev[sd].channelMask != 0x0) { + return FALSE; + } + } + return TRUE; +} + +NvBool nvEvoPollForEmptyChannel(NVEvoChannelPtr pChannel, NvU32 sd, + NvU64 *pStartTime, const NvU32 timeout); + +#endif /* __NVKMS_DMA_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-dpy-override.h b/src/nvidia-modeset/include/nvkms-dpy-override.h new file mode 100644 index 0000000..2228839 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-dpy-override.h @@ -0,0 +1,63 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_DPY_OVERRIDE_H__ +#define __NVKMS_DPY_OVERRIDE_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct _DpyOverride { + NVListRec entry; + + NvU32 gpuId; + char name[NVKMS_DPY_NAME_SIZE]; + + NvBool connected; + NVEdidRec edid; +} NVDpyOverrideRec, *NVDpyOverridePtr; + +NVDpyOverrideRec *nvCreateDpyOverride(NvU32 gpuId, + const char *name, + NvBool connected, + const char *edid, + size_t edidSize); + +void nvDeleteDpyOverride(NvU32 gpuId, const char *name); + +void nvLogDpyOverrides(NvU32 gpuId, NVEvoInfoStringPtr pInfoStr); + +NVDpyOverridePtr nvDpyEvoGetOverride(const NVDpyEvoRec *pDpyEvo); +size_t nvReadDpyOverrideEdid(const NVDpyOverrideRec *pDpyOverride, NvU8 *buff, + size_t len); + +void nvClearDpyOverrides(void); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_DPY_OVERRIDE_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-dpy.h b/src/nvidia-modeset/include/nvkms-dpy.h new file mode 100644 index 0000000..ca4b86a --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-dpy.h @@ -0,0 +1,98 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_DPY_H__ +#define __NVKMS_DPY_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void nvDpyProbeMaxPixelClock(NVDpyEvoPtr pDpyEvo); +void nvDpySetValidSyncsEvo(const NVDpyEvoRec *pDpyEvo, + struct NvKmsModeValidationValidSyncs *pValidSyncs); +NVDpyEvoPtr nvAllocDpyEvo(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo, + NVDpyId dpyId, const char *dpAddress); +void nvFreeDpyEvo(NVDispEvoPtr pDispEvo, NVDpyEvoPtr pDpyEvo); +NVConnectorEvoPtr nvGetConnectorFromDisp(NVDispEvoPtr pDispEvo, NVDpyId dpyId); + +void nvDpyAssignSDRInfoFramePayload(NVT_HDR_INFOFRAME_PAYLOAD *pPayload); +void nvCancelSDRTransitionTimer(NVDpyEvoRec *pDpyEvo); +void nvUpdateInfoFrames(NVDpyEvoRec *pDpyEvo); + +NvBool nvDpyRequiresDualLinkEvo(const NVDpyEvoRec *pDpyEvo, + const NVHwModeTimingsEvo *pTimings); + +NVDpyEvoPtr nvGetDpyEvoFromDispEvo(const NVDispEvoRec *pDispEvo, NVDpyId dpyId); + +NVDpyEvoPtr nvGetDPMSTDpyEvo(NVConnectorEvoPtr pConnectorEvo, + const char *address, NvBool *pDynamicDpyCreated); + +typedef enum { + NVKMS_EDID_READ_MODE_DEFAULT, + NVKMS_EDID_READ_MODE_ACPI, +} NvKmsEdidReadMode; + +NvBool nvDpyReadAndParseEdidEvo( + const NVDpyEvoRec *pDpyEvo, + const struct NvKmsQueryDpyDynamicDataRequest *pRequest, + NvKmsEdidReadMode readMode, + NVEdidRec *pEdid, + NVParsedEdidEvoPtr *ppParsedEdid, + NVEvoInfoStringPtr pInfoString); + +char *nvGetDpyIdListStringEvo(NVDispEvoPtr pDispEvo, + const NVDpyIdList dpyIdList); + +NvBool nvDpyGetDynamicData( + NVDpyEvoPtr pDpyEvo, + struct NvKmsQueryDpyDynamicDataParams *pParams); + +void nvDpyUpdateCurrentAttributes(NVDpyEvoRec *pDpyEvo); + +NvBool nvDpyIsAdaptiveSync(const NVDpyEvoRec *pDpyEvo); + +NvBool nvDpyIsAdaptiveSyncDefaultlisted(const NVDpyEvoRec *pDpyEvo); + +enum NvKmsDpyAttributeDigitalSignalValue +nvGetDefaultDpyAttributeDigitalSignalValue(const NVConnectorEvoRec *pConnectorEvo); + +NvKmsDpyOutputColorFormatInfo nvDpyGetOutputColorFormatInfo( + const NVDpyEvoRec *pDpyEvo); + +NvU32 nvDpyGetPossibleApiHeadsMask(const NVDpyEvoRec *pDpyEvo); + +NvBool nvDpyIsHDRCapable(const NVDpyEvoRec *pDpyEvo); + +void nvConstructDpVscSdp(const NVDispHeadInfoFrameStateEvoRec *pInfoFrame, + const NVDpyAttributeColor *pDpyColor, + DPSDP_DP_VSC_SDP_DESCRIPTOR *sdp); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_DPY_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-event.h b/src/nvidia-modeset/include/nvkms-event.h new file mode 100644 index 0000000..087476e --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-event.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_EVENT_H__ +#define __NVKMS_EVENT_H__ + +#include "nvkms.h" + +void nvHandleHotplugEventDeferredWork(void *dataPtr, NvU32 dataU32); +void nvHandleDPIRQEventDeferredWork(void *dataPtr, NvU32 dataU32); + +#endif /* __NVKMS_EVENT_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-evo-states.h b/src/nvidia-modeset/include/nvkms-evo-states.h new file mode 100644 index 0000000..ab34f2a --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-evo-states.h @@ -0,0 +1,107 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_STATES_H__ +#define __NVKMS_STATES_H__ + +#include "nvkms-types.h" + +#include "g_nvkms-evo-states.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum NVEvoLockSignal { + NV_EVO_LOCK_SIGNAL_FLIP_LOCK, + NV_EVO_LOCK_SIGNAL_FRAME_LOCK, + NV_EVO_LOCK_SIGNAL_RASTER_LOCK, + NV_EVO_LOCK_SIGNAL_STEREO, +} NVEvoLockSignal; + +typedef enum NVEvoLockAction { + NV_EVO_PROHIBIT_LOCK, + NV_EVO_PROHIBIT_LOCK_DISABLE, + NV_EVO_LOCK_HEADS, + NV_EVO_UNLOCK_HEADS, + NV_EVO_ADD_FRAME_LOCK_SERVER, + NV_EVO_REM_FRAME_LOCK_SERVER, + NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC, + NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC, + NV_EVO_ADD_FRAME_LOCK_CLIENT, + NV_EVO_REM_FRAME_LOCK_CLIENT, + NV_EVO_ADD_FRAME_LOCK_REF, + NV_EVO_REM_FRAME_LOCK_REF, + NV_EVO_ADD_SLI_SECONDARY, + NV_EVO_ADD_SLI_LAST_SECONDARY, + NV_EVO_ADD_SLI_PRIMARY, + NV_EVO_REM_SLI, +} NVEvoLockAction; + +/* nv_evo.c */ + +NVEvoLockPin nvEvoGetPinForSignal(const NVDispEvoRec *, + NVEvoSubDevPtr, + NVEvoLockSignal); +NvBool nvEvoRefFrameLockSli(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads); +NvBool nvEvoUnRefFrameLockSli(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads); + +/* nvkms-hw-states.c */ + +NvBool nvEvoLockHWStateNoLock(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateFrameLockClientManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateFrameLockServerManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateFrameLockServerHouseSyncManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateLockHeadsFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimary(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimaryLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliSecondary(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliLastSecondary(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliSecondaryLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliLastSecondaryLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliSecondaryFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliLastSecondaryFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliSecondaryLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliLastSecondaryLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimaryFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimaryFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimaryFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_STATES_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-evo.h b/src/nvidia-modeset/include/nvkms-evo.h new file mode 100644 index 0000000..a9e4bd8 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-evo.h @@ -0,0 +1,423 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_H__ +#define __NVKMS_H__ + +#include "nvkms-types.h" +#include "nvkms-api.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern NVEvoInfoStringRec dummyInfoString; + +NVDevEvoPtr nvFindDevEvoByDeviceId(struct NvKmsDeviceId deviceId); +NvU8 nvGetGpuLogIndex(void); +void nvEvoDetachConnector(NVConnectorEvoRec *pConnectorEvo, const NvU32 head, + NVEvoModesetUpdateState *pModesetUpdateState); +void nvEvoAttachConnector(NVConnectorEvoRec *pConnectorEvo, + const NvU32 head, + const NvU32 isPrimaryHead, + NVDPLibModesetStatePtr pDpLibModesetState, + NVEvoModesetUpdateState *pModesetUpdateState); +void nvEvoUpdateAndKickOff(const NVDispEvoRec *pDispEvo, NvBool sync, + NVEvoUpdateState *updateState, NvBool releaseElv); +void nvDoIMPUpdateEvo(NVDispEvoPtr pDispEvo, + NVEvoUpdateState *updateState); +void nvEvoFlipUpdate(NVDispEvoPtr pDispEvo, + NVEvoUpdateState *updateState); +void nvEvoArmLightweightSupervisor(NVDispEvoPtr pDispEvo, + const NvU32 head, + NvBool isVrr, + NvBool enable); + +void nvSetViewPortsEvo(NVDispEvoPtr pDispEvo, + const NvU32 head, NVEvoUpdateState *updateState); +void nvSetViewPortPointInEvo(NVDispEvoPtr pDispEvo, + const NvU32 head, + const NvU16 x, + NvU16 y, + NVEvoUpdateState *updateState); +void +nvConstructNvModeTimingsFromHwModeTimings(const NVHwModeTimingsEvo *pTimings, + NvModeTimingsPtr pModeTimings); +void nvEvoSetTimings(NVDispEvoPtr pDispEvo, const NvU32 head, + NVEvoUpdateState *updateState); + +void nvInitScalingUsageBounds(const NVDevEvoRec *pDevEvo, + struct NvKmsScalingUsageBounds *pScaling); +NvBool nvComputeScalingUsageBounds(const NVEvoScalerCaps *pScalerCaps, + const NvU32 inWidth, const NvU32 inHeight, + const NvU32 outWidth, const NvU32 outHeight, + NVEvoScalerTaps hTaps, NVEvoScalerTaps vTaps, + struct NvKmsScalingUsageBounds *out); +NvBool nvAssignScalerTaps(const NVDevEvoRec *pDevEvo, + const NVEvoScalerCaps *pScalerCaps, + const NvU32 inWidth, const NvU32 inHeight, + const NvU32 outWidth, const NvU32 outHeight, + NvBool doubleScan, + NVEvoScalerTaps *hTapsOut, NVEvoScalerTaps *vTapsOut); +NvBool nvValidateHwModeTimingsViewPort(const NVDevEvoRec *pDevEvo, + const NVEvoScalerCaps *pScalerCaps, + NVHwModeTimingsEvoPtr pTimings, + NVEvoInfoStringPtr pInfoString); +void nvAssignDefaultUsageBounds(const NVDispEvoRec *pDispEvo, + NVHwModeViewPortEvo *pViewPort); +void nvUnionUsageBounds(const struct NvKmsUsageBounds *a, + const struct NvKmsUsageBounds *b, + struct NvKmsUsageBounds *ret); +void nvIntersectUsageBounds(const struct NvKmsUsageBounds *a, + const struct NvKmsUsageBounds *b, + struct NvKmsUsageBounds *ret); +NvBool UsageBoundsEqual(const struct NvKmsUsageBounds *a, + const struct NvKmsUsageBounds *b); +NvU64 nvEvoGetFormatsWithEqualOrLowerUsageBound( + const enum NvKmsSurfaceMemoryFormat format, + const NvU64 supportedFormatsCapMask); +void nvCancelLowerDispBandwidthTimer(NVDevEvoPtr pDevEvo); +void nvScheduleLowerDispBandwidthTimer(NVDevEvoPtr pDevEvo); +void nvAssertAllDpysAreInactive(NVDevEvoPtr pDevEvo); +void nvEvoLockStatePreModeset(NVDevEvoPtr pDevEvo); +void nvEvoLockStatePostModeset(NVDevEvoPtr pDevEvo, const NvBool doRasterLock); +NvBool nvSetFlipLockGroup(NVDevEvoRec *pDevEvo[NV_MAX_SUBDEVICES], + const struct NvKmsSetFlipLockGroupRequest *pRequest); +void nvEvoRemoveOverlappingFlipLockRequestGroupsForModeset( + NVDevEvoPtr pDevEvo, + const struct NvKmsSetModeRequest *pRequest); +NvBool nvSetUsageBoundsEvo( + NVDevEvoPtr pDevEvo, + NvU32 sd, + NvU32 head, + const struct NvKmsUsageBounds *pUsage, + NVEvoUpdateState *updateState); +void nvEnableMidFrameAndDWCFWatermark(NVDevEvoPtr pDevEvo, + NvU32 sd, + NvU32 head, + NvBool enable, + NVEvoUpdateState *pUpdateState); + +void nvEvoHeadSetControlOR(NVDispEvoPtr pDispEvo, + const NvU32 head, + const NVDpyAttributeColor *pDpyColor, + NVEvoUpdateState *pUpdateState); + +void nvChooseDitheringEvo( + const NVConnectorEvoRec *pConnectorEvo, + enum NvKmsDpyAttributeColorBpcValue bpc, + enum NvKmsOutputColorimetry colorimetry, + const NVDpyAttributeRequestedDitheringConfig *pReqDithering, + NVDpyAttributeCurrentDitheringConfig *pCurrDithering); + +void nvSetDitheringEvo( + NVDispEvoPtr pDispEvo, + const NvU32 head, + const NVDpyAttributeCurrentDitheringConfig *pCurrDithering, + NVEvoUpdateState *pUpdateState); + +NvBool nvEnableFrameLockEvo(NVDispEvoPtr pDispEvo); +NvBool nvDisableFrameLockEvo(NVDispEvoPtr pDispEvo); +NvBool nvQueryRasterLockEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *val); +void nvInvalidateRasterLockGroupsEvo(void); +NvBool nvSetFlipLockEvo(NVDpyEvoPtr pDpyEvo, NvS64 value); +NvBool nvGetFlipLockEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue); +NvBool nvAllowFlipLockEvo(NVDispEvoPtr pDispEvo, NvS64 value); +NvBool nvSetStereoEvo(const NVDispEvoRec *pDispEvo, + const NvU32 head, NvBool enable); +NvBool nvGetStereoEvo(const NVDispEvoRec *pDispEvo, const NvU32 head); +struct NvKmsCompositionParams nvDefaultCursorCompositionParams(const NVDevEvoRec *pDevEvo); +NvBool nvAllocCoreChannelEvo(NVDevEvoPtr pDevEvo); +void nvFreeCoreChannelEvo(NVDevEvoPtr pDevEvo); + +void nvEvoUpdateSliVideoBridge(NVDevEvoPtr pDevEvo); + +void nvSetDVCEvo(NVDispEvoPtr pDispEvo, + const NvU32 head, + NvS32 dvc, + NVEvoUpdateState *updateState); +void nvSetImageSharpeningEvo(NVDispEvoRec *pDispEvo, const NvU32 head, + const NvU32 value, NVEvoUpdateState *updateState); + +NvBool nvLayerSetPositionEvo( + NVDevEvoPtr pDevEvo, + const struct NvKmsSetLayerPositionRequest *pRequest); + +NvBool nvConstructHwModeTimingsEvo(const NVDpyEvoRec *pDpyEvo, + const struct NvKmsMode *pKmsMode, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut, + const NvBool dscPassThrough, + NVDpyAttributeColor *pDpyColor, + NVHwModeTimingsEvoPtr pTimings, + const struct NvKmsModeValidationParams + *pParams, + NVEvoInfoStringPtr pInfoString); + +NvBool nvConstructHwModeTimingsImpCheckEvo( + const NVConnectorEvoRec *pConnectorEvo, + const NVHwModeTimingsEvo *pTimings, + const NVDscInfoEvoRec *pDscInfo, + const NvBool b2Heads1Or, + const NVDpyAttributeColor *pDpyColor, + const struct NvKmsModeValidationParams *pParams, + NVHwModeTimingsEvo timings[NVKMS_MAX_HEADS_PER_DISP], + NvU32 *pNumHeads, + NVEvoInfoStringPtr pInfoString); + +NvBool nvDowngradeColorBpc( + const NvKmsDpyOutputColorFormatInfo *pSupportedColorFormats, + NVDpyAttributeColor *pDpyColor); + +NvBool nvDowngradeColorSpaceAndBpc( + const NVDpyEvoRec *pDpyEvo, + const NvKmsDpyOutputColorFormatInfo *pSupportedColorFormats, + NVDpyAttributeColor *pDpyColor); + +NvBool nvDPValidateModeEvo(NVDpyEvoPtr pDpyEvo, + NVHwModeTimingsEvoPtr pTimings, + NVDpyAttributeColor *pDpyColor, + const NvBool b2Heads1Or, + NVDscInfoEvoRec *pDscInfo, + const struct NvKmsModeValidationParams *pParams); + +NvBool nvEvoUpdateHwModeTimingsViewPort( + const NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pModeValidationParams, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut, + NVHwModeTimingsEvo *pTimings); + +typedef struct _NVValidateImpOneDispHeadParamsRec +{ + const NVConnectorEvoRec *pConnectorEvo; + const struct NvKmsUsageBounds *pUsage; + NvU32 activeRmId; + enum nvKmsPixelDepth pixelDepth; + NVHwModeTimingsEvoPtr pTimings; + NvBool enableDsc; + NvU32 dscSliceCount; + NvU32 possibleDscSliceCountMask; + NvBool b2Heads1Or; +} NVValidateImpOneDispHeadParamsRec; + +NvBool nvValidateImpOneDisp( + NVDispEvoPtr pDispEvo, + NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP], + NvBool requireBootClocks, + NVEvoReallocateBandwidthMode reallocBandwidth, + NvU32 *pMinIsoBandwidthKBPS, + NvU32 *pMinDramFloorKBPS, + const NvU32 modesetRequestedHeadsMask); + +NvBool nvAllocateDisplayBandwidth( + NVDispEvoPtr pDispEvo, + NvU32 newIsoBandwidthKBPS, + NvU32 newDramFloorKBPS); + +NvBool nvValidateImpOneDispDowngrade( + NVDispEvoPtr pDispEvo, + NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP], + NvBool requireBootClocks, + NVEvoReallocateBandwidthMode reallocBandwidth, + NvU32 modesetRequestedHeadsMask); + +NvBool nvFrameLockServerPossibleEvo(const NVDpyEvoRec *pDpyEvo); +NvBool nvFrameLockClientPossibleEvo(const NVDpyEvoRec *pDpyEvo); + +NvBool nvEvoLUTNotifiersNeedCommit(NVDispEvoPtr pDispEvo); +int nvEvoCommitLUTNotifiers(NVDispEvoPtr pDispEvo); +void nvEvoClearStagedLUTNotifiers(NVDispEvoPtr pDispEvo); +void nvEvoStageLUTNotifier(NVDispEvoPtr pDispEvo, NvU32 apiHead); +NvBool nvEvoIsLUTNotifierComplete(NVDispEvoPtr pDispEvo, NvU32 apiHead); +void nvEvoWaitForLUTNotifier(const NVDispEvoPtr pDispEvo, NvU32 apiHead); + +void nvEvoSetLut(NVDispEvoPtr pDispEvo, NvU32 apiHead, NvBool kickoff, + const struct NvKmsSetLutCommonParams *pParams); +NvBool nvValidateSetLutCommonParams( + const NVDevEvoRec *pDevEvo, + const struct NvKmsSetLutCommonParams *pParams); + +NvBool nvChooseColorRangeEvo( + const enum NvKmsDpyAttributeColorRangeValue requestedColorRange, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace, + const enum NvKmsDpyAttributeColorBpcValue colorBpc, + enum NvKmsDpyAttributeColorRangeValue *pColorRange); + +NvBool nvChooseCurrentColorSpaceAndRangeEvo( + const NVDpyEvoRec *pDpyEvo, + const enum NvYuv420Mode yuv420Mode, + enum NvKmsOutputColorimetry colorimetry, + const enum NvKmsDpyAttributeRequestedColorSpaceValue requestedColorSpace, + const enum NvKmsDpyAttributeColorBpcValue requestedColorBpc, + const enum NvKmsDpyAttributeColorRangeValue requestedColorRange, + enum NvKmsDpyAttributeCurrentColorSpaceValue *pCurrentColorSpace, + enum NvKmsDpyAttributeColorBpcValue *pCurrentColorBpc, + enum NvKmsDpyAttributeColorRangeValue *pCurrentColorRange); + +void nvUpdateCurrentHardwareColorSpaceAndRangeEvo( + NVDispEvoPtr pDispEvo, + const NvU32 head, + const NVDpyAttributeColor *pDpyColor, + NVEvoUpdateState *pUpdateState); + +NvBool nvAssignSOREvo(const NVConnectorEvoRec *pConnectorEvo, + const NvU32 targetDisplayId, + const NvBool b2Heads1Or, + const NvU32 sorExcludeMask); + +void nvSetSwapBarrierNotifyEvo(NVDispEvoPtr pDispEvo, + NvBool enable, NvBool isPre); + +void nvUnbloatHwModeTimingsEvo(NVHwModeTimingsEvoPtr pTimings, NvU32 factor); + +NvBool nvReadCRC32Evo(NVDispEvoPtr pDispEvo, NvU32 head, + CRC32NotifierCrcOut *crcOut /* out */); + +NvBool nvFreeDevEvo(NVDevEvoPtr pDevEvo); +NVDevEvoPtr nvAllocDevEvo(const struct NvKmsAllocDeviceRequest *pRequest, + enum NvKmsAllocDeviceStatus *pStatus); +NvU32 nvGetActiveSorMask(const NVDispEvoRec *pDispEvo); +NvBool nvUpdateFlipLockEvoOneHead(NVDispEvoPtr pDispEvo, const NvU32 head, + NvU32 *val, NvBool set, + NVEvoUpdateState *updateState); + +void nvEvoSetLUTContextDma(NVDispEvoPtr pDispEvo, + const NvU32 head, NVEvoUpdateState *pUpdateState); + +NvBool nvEvoPollForNoMethodPending(NVDevEvoPtr pDevEvo, + const NvU32 sd, + NVEvoChannelPtr pChannel, + NvU64 *pStartTime, + const NvU32 timeout); + +static inline void nvAssertSameSemaphoreSurface( + const NVFlipChannelEvoHwState *pHwState) +{ + + /*! + * pHwState->syncObject contains separate fields to track the semaphore + * surface used for acquire, and the semaphore surface used for release. + * Prior to NvDisplay 4.0, display HW only supports using a single semaphore + * surface for both acquire and release. As such, assert that the semaphore + * surfaces in pHwState->syncObject are the same, and that we're also not + * using syncpoints. This is enforced during flip validation. + */ + + nvAssert(pHwState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo == + pHwState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo); + + nvAssert(!pHwState->syncObject.usingSyncpt); +} + +void nvDPSerializerHandleDPIRQ(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo); + +void nvDPSerializerPreSetMode(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo); + +void nvDPSerializerPostSetMode(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo); + +NvBool nvFramelockSetControlUnsyncEvo(NVDispEvoPtr pDispEvo, const NvU32 headMask, + NvBool server); + +NvU32 nvGetHDRSrcMaxLum(const NVFlipChannelEvoHwState *pHwState); + +NvBool nvNeedsTmoLut(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NvU32 srcMaxLum, + NvU32 targetMaxCLL); + +NvBool nvIsCscMatrixIdentity(const struct NvKmsCscMatrix *matrix); + +enum nvKmsPixelDepth nvEvoDpyColorToPixelDepth( + const NVDpyAttributeColor *pDpyColor); + +void nvSuspendDevEvo(NVDevEvoRec *pDevEvo); +NvBool nvResumeDevEvo(NVDevEvoRec *pDevEvo); + +NvBool nvGetDefaultDpyColor( + const NvKmsDpyOutputColorFormatInfo *pColorFormatsInfo, + NVDpyAttributeColor *pDpyColor); + +static inline void nvEvoSetFlipOccurredEvent(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvU32 layer, + struct nvkms_ref_ptr *ref_ptr, + NVEvoModesetUpdateState + *pModesetUpdate) +{ + nvAssert((head < pDispEvo->pDevEvo->numHeads) && + (layer < pDispEvo->pDevEvo->head[head].numLayers)); + pModesetUpdate->flipOccurredEvent[head].layer[layer].ref_ptr = ref_ptr; + pModesetUpdate->flipOccurredEvent[head].layer[layer].changed = TRUE; +} + +void nvEvoPreModesetRegisterFlipOccurredEvent(NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVEvoModesetUpdateState + *pModesetUpdate); + +void nvEvoPostModesetUnregisterFlipOccurredEvent(NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVEvoModesetUpdateState + *pModesetUpdate); + +void nvEvoLockStateSetMergeMode(NVDispEvoPtr pDispEvo); + +void nvEvoEnableMergeModePreModeset(NVDispEvoRec *pDispEvo, + const NvU32 headsMask, + NVEvoUpdateState *pUpdateState); +void nvEvoEnableMergeModePostModeset(NVDispEvoRec *pDispEvo, + const NvU32 headsMask, + NVEvoUpdateState *pUpdateState); +void nvEvoDisableMergeMode(NVDispEvoRec *pDispEvo, + const NvU32 headsMask, + NVEvoUpdateState *pUpdateState); + +void nvEvoDisableHwYUV420Packer(const NVDispEvoRec *pDispEvo, + const NvU32 head, + NVEvoUpdateState *pUpdateState); + +NvBool nvEvoGetSingleMergeHeadSectionHwModeTimings( + const NVHwModeTimingsEvo *pSrc, + const NvU32 numSections, + NVHwModeTimingsEvo *pDst); + +NvBool nvEvoUse2Heads1OR(const NVDpyEvoRec *pDpyEvo, + const NVHwModeTimingsEvo *pTimings, + const struct NvKmsModeValidationParams *pParams); + +NvU32 nvGetRefreshRate10kHz(const NVHwModeTimingsEvo *pTimings); + +NvBool nvIsLockGroupFlipLocked(const NVLockGroup *pLockGroup); + +NvBool nvEvoIsConsoleActive(const NVDevEvoRec *pDevEvo); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-evo1.h b/src/nvidia-modeset/include/nvkms-evo1.h new file mode 100644 index 0000000..22fae9d --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-evo1.h @@ -0,0 +1,78 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_EVO_1_H__ +#define __NVKMS_EVO_1_H__ + +#include "nvkms-types.h" + +NvBool nvEvo1IsChannelIdle(NVDevEvoPtr, NVEvoChannelPtr, NvU32 sd, + NvBool *result); +NvBool nvEvo1IsChannelMethodPending(NVDevEvoPtr, NVEvoChannelPtr, NvU32 sd, + NvBool *result); + +void nvEvo1IsModePossible(NVDispEvoPtr pDispEvo, + const NVEvoIsModePossibleDispInput *pInput, + NVEvoIsModePossibleDispOutput *pOutput); +void nvEvo1PrePostIMP(NVDispEvoPtr pDispEvo, NvBool isPre); + +void nvEvo1SetDscParams(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVDscInfoEvoRec *pDscInfo, + const enum nvKmsPixelDepth pixelDepth); + +NVEvoChannel* nvEvo1AllocateCoreChannel(NVDevEvoRec *pDevEvo); +void nvEvo1FreeCoreChannel(NVDevEvoRec *pDevEvo, NVEvoChannel *pChannel); + +NvBool nvEvo1NvtToHdmiInfoFramePacketType(const NvU32 srcType, NvU8 *pDstType); + +void nvEvo1SendHdmiInfoFrame(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvEvoInfoFrameTransmitControl transmitCtrl, + const NVT_INFOFRAME_HEADER *pInfoFrameHeader, + const NvU32 infoframeSize, + NvBool needChecksum); + +void nvEvo1DisableHdmiInfoFrame(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvU8 nvtInfoFrameType); + +void nvEvo1SendDpInfoFrameSdp(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvEvoInfoFrameTransmitControl transmitCtrl, + const DPSDP_DESCRIPTOR *sdp); + +static inline NvU16 nvEvo1GetColorSpaceFlag(NVDevEvoPtr pDevEvo, + const NvBool colorSpaceOverride) +{ + NvU16 colorSpaceFlag = 0; + + if (colorSpaceOverride) { + nvAssert(pDevEvo->caps.supportsDP13); + colorSpaceFlag = 1 << 11; + } + + return colorSpaceFlag; +} + +#endif /* __NVKMS_EVO_1_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-evo3.h b/src/nvidia-modeset/include/nvkms-evo3.h new file mode 100644 index 0000000..35d5824 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-evo3.h @@ -0,0 +1,333 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_EVO_3_H__ +#define __NVKMS_EVO_3_H__ + +#include "nvkms-types.h" +#include "nv-float.h" +#include "nvkms-softfloat.h" +#include // NVC57D_CORE_CHANNEL_DMA + +#include + +#define NV_EVO3_X_EMULATED_SURFACE_MEMORY_FORMATS_C6 \ + (NVBIT64(NvKmsSurfaceMemoryFormatRF16GF16BF16XF16) | \ + NVBIT64(NvKmsSurfaceMemoryFormatX2B10G10R10)) + +#define NV_EVO3_SUPPORTED_DITHERING_MODES \ + ((1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_AUTO) | \ + (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_DYNAMIC_2X2) | \ + (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_STATIC_2X2) | \ + (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_TEMPORAL)) + +#define NV_EVO3_SUPPORTED_CURSOR_COMP_BLEND_MODES \ + ((1 << NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA)) + +#define NV_EVO3_DEFAULT_WINDOW_USAGE_BOUNDS_C5 \ + (DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _ILUT_ALLOWED, _TRUE) | \ + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _TMO_LUT_ALLOWED, _TRUE)) + +// HW supports ratio = 1, 2 (downscaling), 4 (downscaling) +#define NUM_SCALER_RATIOS 3 + +// There are 16 phases stored in matrix, but HW can derive the values of phase +// +16 and -16 from phase 0. Therefore, SW loads phase +16/-16 in phase 0 coeff +// values. +// coeff values in phase 0. +#define NUM_TAPS5_COEFF_PHASES 16 + +// There are 5 coefficient values per phase (or matrix row), but SW doesn't need +// to upload c2. So, the value here is set to 4. +#define NUM_TAPS5_COEFF_VALUES 4 + +extern const NvU32 scalerTaps5Coeff[NUM_SCALER_RATIOS][NUM_TAPS5_COEFF_PHASES][NUM_TAPS5_COEFF_VALUES]; + +struct EvoClampRangeC5 { + NvU32 green, red_blue; +}; + +typedef void (NVEvoParseCapabilityNotifierFunc3)(NVDevEvoPtr pDevEvo, + NVEvoSubDevPtr pEvoSubDev, volatile const NvU32 *pCaps); + +typedef NvU32 (NVEvoHwFormatFromKmsFormatFunc3)( + const enum NvKmsSurfaceMemoryFormat format); + +/* + * Converts FP32 to fixed point S5.14 coefficient format + */ +static inline NvU32 nvCscCoefConvertS514(float32_t x) +{ + /* more concisely, (NvS32)floor(x * 65536.0 + 2.0) */ + const NvS32 y = f32_to_i32(f32_mulAdd(x, + NvU32viewAsF32(NV_FLOAT_65536), + NvU32viewAsF32(NV_FLOAT_TWO)), + softfloat_round_min, FALSE); + return (NvU32)(0x001ffffc & clamp_S32(y, -0x100000, 0xfffff)); +} + +NvBool nvComputeMinFrameIdle( + const NVHwModeTimingsEvo *pTimings, + NvU16 *pLeadingRasterLines, + NvU16 *pTrailingRasterLines); + +void nvEvoSetControlC3(NVDevEvoPtr pDevEvo, int sd); + +void nvEvoORSetControlC3(NVDevEvoPtr pDevEvo, + const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask, + NVEvoUpdateState *updateState); + +NvU32 nvEvoGetPixelDepthC3(const enum nvKmsPixelDepth pixelDepth); + +NvBool nvEvoSetUsageBoundsC5(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const struct NvKmsUsageBounds *pUsage, + NVEvoUpdateState *updateState); + +void nvEvoUpdateC3(NVDevEvoPtr pDevEvo, + const NVEvoUpdateState *updateState, + NvBool releaseElv); + +NvBool +nvEvoSetCtrlIsModePossibleParams3(NVDispEvoPtr pDispEvo, + const NVEvoIsModePossibleDispInput *pInput, + NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS *pImp); +void +nvEvoSetIsModePossibleDispOutput3(const NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS *pImp, + const NvBool result, + NVEvoIsModePossibleDispOutput *pOutput); + +void +nvEvoIsModePossibleC3(NVDispEvoPtr pDispEvo, + const NVEvoIsModePossibleDispInput *pInput, + NVEvoIsModePossibleDispOutput *pOutput); + +void nvEvoPrePostIMPC3(NVDispEvoPtr pDispEvo, NvBool isPre); + +void nvEvoSetNotifierC3(NVDevEvoRec *pDevEvo, + const NvBool notify, + const NvBool awaken, + const NvU32 notifier, + NVEvoUpdateState *updateState); + +static inline NvU32 nvEvoReadCapReg3(volatile const NvU32 *pCaps, NvU32 offset) +{ + /* Offsets are in bytes, but the array has dword-sized elements. */ + return pCaps[offset / sizeof(NvU32)]; +} + +NvBool nvEvoGetCapabilities3(NVDevEvoPtr pDevEvo, + NVEvoParseCapabilityNotifierFunc3 *pParse, + NVEvoHwFormatFromKmsFormatFunc3 *pGetHwFmt, + NvU32 hwclass, size_t length); + +void nvEvoParseCapabilityNotifier6(NVDevEvoPtr pDevEvo, + NVEvoSubDevPtr pEvoSubDev, + volatile const NvU32 *pCaps); + +NvU32 nvHwFormatFromKmsFormatC6(const enum NvKmsSurfaceMemoryFormat format); + +NvBool nvEvoGetCapabilitiesC6(NVDevEvoPtr pDevEvo); + +void +nvEvoFlipC6(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition); + +void nvEvoFlipTransitionWARC6(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const NVEvoSubDevHeadStateRec *pSdHeadState, + const NVFlipEvoHwState *pFlipState, + NVEvoUpdateState *updateState); + +void +nvEvoFillLUTSurfaceC5(NVEvoLutEntryRec *pLUTBuffer, + const NvU16 *red, + const NvU16 *green, + const NvU16 *blue, + int nColorMapEntries, int depth); + +void nvSetupOutputLUT5(NVDevEvoPtr pDevEvo, + const NVDispHeadStateEvoRec *pHeadState, + NvBool enableOutputLut, + NvBool bypassComposition, + NVSurfaceDescriptor **pSurfaceDesc, + NvU32 *lutSize, + NvU64 *offset, + NvBool *disableOcsc0, + NvU32 *fpNormScale, + NvBool *isLutModeVss); + +NvBool nvEvoGetHeadSetControlCursorValueC3(const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo, + NvU32 *pValue); + +NvBool nvEvoValidateCursorSurfaceC3(const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo); + +NvBool nvEvoValidateWindowFormatC6( + const enum NvKmsSurfaceMemoryFormat format, + const struct NvKmsRect *sourceFetchRect, + NvU32 *hwFormatOut); + +void nvEvoInitCompNotifierC3(const NVDispEvoRec *pDispEvo, int idx); + +NvBool nvEvoIsCompNotifierCompleteC3(NVDispEvoPtr pDispEvo, int idx); + +void nvEvoWaitForCompNotifierC3(const NVDispEvoRec *pDispEvo, int idx); + +void nvEvoInitChannel3(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel); + +void nvInitScalerCoefficientsPrecomp5(NVEvoChannelPtr pChannel, + NvU32 coeff, NvU32 index); + +void nvEvoInitDefaultLutC5(NVDevEvoPtr pDevEvo); + +void nvEvoInitWindowMappingC5(const NVDispEvoRec *pDispEvo, + NVEvoModesetUpdateState *pModesetUpdateState); + +NvBool nvEvoIsChannelIdleC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvBool *result); + +NvBool nvEvoIsChannelMethodPendingC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvBool *result); + +NvBool nvEvoForceIdleSatelliteChannelC3( + NVDevEvoPtr pDevEvo, + const NVEvoIdleChannelState *idleChannelState); + +NvBool nvEvoForceIdleSatelliteChannelIgnoreLockC3( + NVDevEvoPtr pDevEvo, + const NVEvoIdleChannelState *idleChannelState); + +void nvEvoAccelerateChannelC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NvU32 sd, + const NvBool trashPendingMethods, + const NvBool unblockMethodsInExecutation, + NvU32 *pOldAccelerators); + +void nvEvoResetChannelAcceleratorsC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NvU32 sd, + const NvBool trashPendingMethods, + const NvBool unblockMethodsInExecutation, + NvU32 oldAccelerators); + +NvBool nvEvoAllocRmCtrlObjectC3(NVDevEvoPtr pDevEvo); + +void nvEvoFreeRmCtrlObjectC3(NVDevEvoPtr pDevEvo); + +void nvEvoSetImmPointOutC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd, + NVEvoUpdateState *updateState, + NvU16 x, NvU16 y); + +NvBool nvEvoQueryHeadCRC32_C3(NVDevEvoPtr pDevEvo, + NVEvoDmaPtr pDma, + NvU32 sd, + NvU32 entry_count, + CRC32NotifierCrcOut *crc32, + NvU32 *numCRC32); + +void nvEvoGetScanLineC3(const NVDispEvoRec *pDispEvo, + const NvU32 head, + NvU16 *pScanLine, + NvBool *pInBlankingPeriod); + +NvU32 nvEvoGetActiveViewportOffsetC3(NVDispEvoRec *pDispEvo, NvU32 head); + +NvBool nvEvoComputeWindowScalingTapsC5(const NVDevEvoRec *pDevEvo, + const NVEvoChannel *pChannel, + NVFlipChannelEvoHwState *pHwState); + +const struct NvKmsCscMatrix* nvEvoGetOCsc1MatrixC5(const NVDispHeadStateEvoRec *pHeadState); + +struct EvoClampRangeC5 nvEvoGetOCsc1ClampRange(const NVDispHeadStateEvoRec *pHeadState); + +void nvEvo3PickOCsc0(const NVDispEvoRec* pDispEvo, const NvU32 head, + struct NvKms3x4MatrixF32 *ocsc0Matrix, NvBool *pOutputRoundingFix); + +static inline const NVEvoScalerCaps* +nvEvoGetWindowScalingCapsC3(const NVDevEvoRec *pDevEvo) +{ + /* + * Use window 0 by default. This should be fine for now since precomp + * scaling will only be enabled on Orin, and all windows have the same + * capabilities on Orin. + * + * The mapping in this function can be updated if/when precomp scaling + * support is extended to other display architectures. + */ + return &pDevEvo->gpus[0].capabilities.window[0].scalerCaps; +} + +static inline NvU32 nvGetMaxPixelsFetchedPerLine(NvU16 inWidth, + NvU16 maxHDownscaleFactor) +{ + /* + * Volta should be: + * (((SetViewportSizeIn.Width + 6) * SetMaxInputScaleFactor.Horizontal + 1023 ) >> 10 ) + 6 + * + * Turing should be: + * (((SetViewportSizeIn.Width + 6) * SetMaxInputScaleFactor.Horizontal + 1023 ) >> 10 ) + 8 + * + * Ampere, which adds "overfetch" to have tiled displays / 2-head-1-OR use cases without + * visual artefacts at head boundaries: + * (((SetViewportSizeIn.Width + 14) * SetMaxInputScaleFactor.Horizontal + 1023) >> 10) + 8 + * + * We don't have to be super-precise when programming maxPixelsFetchedPerLine, + * so return realistic worst-case value. + */ + return (((inWidth + 14) * maxHDownscaleFactor + 1023) >> 10) + 8; +} + +void nvEvoSendHdmiInfoFrameC8(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvEvoInfoFrameTransmitControl transmitCtrl, + const NVT_INFOFRAME_HEADER *pInfoFrameHeader, + const NvU32 infoFrameSize, + NvBool needChecksum); + +void nvEvoDisableHdmiInfoFrameC8(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvU8 nvtInfoFrameType); + +void nvEvoSendDpInfoFrameSdpC8(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvEvoInfoFrameTransmitControl transmitCtrl, + const DPSDP_DESCRIPTOR *sdp); + +#endif /* __NVKMS_EVO_3_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-flip-workarea.h b/src/nvidia-modeset/include/nvkms-flip-workarea.h new file mode 100644 index 0000000..1833ba1 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-flip-workarea.h @@ -0,0 +1,73 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_FLIP_WORKAREA_H__ +#define __NVKMS_FLIP_WORKAREA_H__ + +#include "nvkms-types.h" + +typedef struct { + struct { + enum NvKmsOutputTf tf; + NVDpyAttributeColor dpyColor; + NvBool infoFrameOverride; + NvU32 staticMetadataLayerMask; + } hdr; + + struct NvKmsPoint viewPortPointIn; + struct NvKmsSetLutCommonParams lut; + + struct { + NvU32 viewPortPointIn : 1; + NvU32 hdr : 1; + } dirty; +} NVProposedFlipStateOneApiHead; + +struct NvKmsFlipWorkArea { + struct { + NvBool changed; + struct { + /* + * Pre flip usage bounds are the union of current and new + * usable usage bounds: the unioned usage bounds have to + * allow both the current state and the state being flipped to. + * This field is set and used by PreFlipIMP() and its + * helper functions. + */ + struct NvKmsUsageBounds preFlipUsage; + + NVFlipEvoHwState newState; + NVFlipEvoHwState oldState; + } head[NVKMS_MAX_HEADS_PER_DISP]; + } sd[NVKMS_MAX_SUBDEVICES]; + + struct { + struct { + NVProposedFlipStateOneApiHead proposedFlipState; + } apiHead[NVKMS_MAX_HEADS_PER_DISP]; + } disp[NVKMS_MAX_SUBDEVICES]; + + NVEvoUpdateState updateState; +}; + +#endif /* __NVKMS_FLIP_WORKAREA_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-flip.h b/src/nvidia-modeset/include/nvkms-flip.h new file mode 100644 index 0000000..af41e5b --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-flip.h @@ -0,0 +1,110 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_FLIP_H__ +#define __NVKMS_FLIP_H__ + + +#include "nvkms-types.h" + + +NvBool nvCheckLayerPermissions( + const struct NvKmsPerOpenDev *pOpenDev, + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 apiHead, + const NvU8 changedLayersMask); + +NvBool nvCheckFlipPermissions( + const struct NvKmsPerOpenDev *pOpenDev, + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 apiHead, + const struct NvKmsFlipCommonParams *pParams); + +NvBool nvFlipEvo(NVDevEvoPtr pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsFlipRequestOneHead *pFlipHead, + NvU32 numFlipHeads, + NvBool commit, + struct NvKmsFlipReply *reply, + NvBool skipUpdate, + NvBool allowFlipLock); + +void nvApiHeadGetLayerSurfaceArray(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + const NvU32 layer, + NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES]); + +void nvApiHeadGetCursorInfo(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + NVSurfaceEvoPtr *ppSurfaceEvo, + NvS16 *x, NvS16 *y); + +void nvApiHeadSetViewportPointIn(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + const NvU16 x, + const NvU16 y); + +NvU32 nvApiHeadGetActiveViewportOffset(NVDispEvoRec *pDispEvo, + NvU32 apiHead); + +void nvApiHeadIdleMainLayerChannels(NVDevEvoRec *pDevEvo, + const NvU32 apiHeadMaskPerSd[NVKMS_MAX_SUBDEVICES]); + +void nvApiHeadUpdateFlipLock(NVDevEvoRec *pDevEvo, + const NvU32 apiHeadMaskPerSd[NVKMS_MAX_SUBDEVICES], + const NvBool enable); + +NvBool nvIdleMainLayerChannelCheckIdleOneApiHead(NVDispEvoPtr pDispEvo, + NvU32 apiHead); + +#define NV_SURFACE_USAGE_MASK_CURSOR 0:0 +#define NV_SURFACE_USAGE_MASK_CURSOR_DISABLE 0 +#define NV_SURFACE_USAGE_MASK_CURSOR_ENABLE 1 +#define NV_SURFACE_USAGE_MASK_LAYER(_n) (3+(3*(_n))):(1+(3*(_n))) + +#define NV_SURFACE_USAGE_MASK_LAYER_SEMAPHORE 1:1 +#define NV_SURFACE_USAGE_MASK_LAYER_SEMAPHORE_DISABLE 0 +#define NV_SURFACE_USAGE_MASK_LAYER_SEMAPHORE_ENABLE 1 +#define NV_SURFACE_USAGE_MASK_LAYER_NOTIFIER 2:2 +#define NV_SURFACE_USAGE_MASK_LAYER_NOTIFIER_DISABLE 0 +#define NV_SURFACE_USAGE_MASK_LAYER_NOTIFIER_ENABLE 1 +#define NV_SURFACE_USAGE_MASK_LAYER_SCANOUT 0:0 +#define NV_SURFACE_USAGE_MASK_LAYER_SCANOUT_DISABLE 0 +#define NV_SURFACE_USAGE_MASK_LAYER_SCANOUT_ENABLE 1 + +NvU32 nvCollectSurfaceUsageMaskOneApiHead(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + NVSurfaceEvoPtr pSurfaceEvo); + +void nvIdleLayerChannels(NVDevEvoRec *pDevEvo, + NvU32 layerMaskPerSdApiHead[NVKMS_MAX_SUBDEVICES][NVKMS_MAX_HEADS_PER_DISP]); + +void nvEvoClearSurfaceUsage(NVDevEvoRec *pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo); + +NvBool nvIdleBaseChannelOneApiHead(NVDispEvoRec *pDispEvo, NvU32 apiHead, + NvBool *pStoppedBase); + +#endif /* __NVKMS_FLIP_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-framelock.h b/src/nvidia-modeset/include/nvkms-framelock.h new file mode 100644 index 0000000..9579fb0 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-framelock.h @@ -0,0 +1,79 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_FRAMELOCK_H__ +#define __NVKMS_FRAMELOCK_H__ + +#include "nvkms-types.h" + +void nvAllocFrameLocksEvo(NVDevEvoPtr pDevEvo); +void nvFreeFrameLocksEvo(NVDevEvoPtr pDevEvo); + +NvBool nvFrameLockSetUseHouseSyncEvo(NVFrameLockEvoPtr, NvU32); +NvBool nvFrameLockGetStatusEvo(const NVFrameLockEvoRec *, + enum NvKmsFrameLockAttribute attribute, + NvS64*); + +NvBool nvSetFrameLockDisplayConfigEvo(NVDpyEvoRec *pDpyEvo, NvS64 val); +NvBool nvGetFrameLockDisplayConfigEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *val); +NvBool nvGetFrameLockDisplayConfigValidValuesEvo( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues); + +NvBool nvSetDispAttributeEvo(NVDispEvoPtr pDispEvo, + struct NvKmsSetDispAttributeParams *pParams); + +NvBool nvGetDispAttributeEvo(NVDispEvoPtr pDispEvo, + struct NvKmsGetDispAttributeParams *pParams); + +NvBool nvGetDispAttributeValidValuesEvo( + const NVDispEvoRec *pDispEvo, + struct NvKmsGetDispAttributeValidValuesParams *pParams); + +NvBool nvSetFrameLockAttributeEvo( + NVFrameLockEvoRec *pFrameLockEvo, + const struct NvKmsSetFrameLockAttributeParams *pParams); + +NvBool nvGetFrameLockAttributeEvo( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsGetFrameLockAttributeParams *pParams); + +NvBool nvGetFrameLockAttributeValidValuesEvo( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsGetFrameLockAttributeValidValuesParams *pParams); + +NvU32 nvGetFramelockServerHead(const NVDispEvoRec *pDispEvo); +NvU32 nvGetFramelockClientHeadsMask(const NVDispEvoRec *pDispEvo); + +static inline NvBool +nvIsFramelockableHead(const NVDispEvoRec *pDispEvo, const NvU32 head) +{ + return (head != NV_INVALID_HEAD) && + ((head == nvGetFramelockServerHead(pDispEvo)) || + ((NVBIT(head) & nvGetFramelockClientHeadsMask(pDispEvo)) != 0x0)); +} + +void nvUpdateGLSFramelock(const NVDispEvoRec *pDispEvo, const NvU32 head, + const NvBool enable, const NvBool server); + +#endif /* __NVKMS_FRAMELOCK_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-hal.h b/src/nvidia-modeset/include/nvkms-hal.h new file mode 100644 index 0000000..6675a0b --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-hal.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_HAL_H__ +#define __NVKMS_HAL_H__ + +#include "nvkms-types.h" + +enum NvKmsAllocDeviceStatus nvAssignEvoCaps(NVDevEvoPtr pDevEvo); + +#endif /* __NVKMS_HAL_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-hdmi.h b/src/nvidia-modeset/include/nvkms-hdmi.h new file mode 100644 index 0000000..ec427b0 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-hdmi.h @@ -0,0 +1,94 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_HDMI_H__ +#define __NVKMS_HDMI_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvkms-types.h" + +void nvUpdateHdmiInfoFrames(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVDpyAttributeColor *pDpyColor, + const NVDispHeadInfoFrameStateEvoRec *pInfoFrameState, + NVDpyEvoRec *pDpyEvo); + +void nvDpyUpdateHdmiPreModesetEvo(NVDpyEvoPtr pDpyEvo); +void nvDpyUpdateHdmiVRRCaps(NVDpyEvoPtr pDpyEvo); +void nvSendHdmiCapsToRm(NVDpyEvoPtr pDpyEvo); + +void nvLogEdidCea861InfoEvo(NVDpyEvoPtr pDpyEvo, + NVEvoInfoStringPtr pInfoString); +NvBool nvDpyIsHdmiEvo(const NVDpyEvoRec *pDpyEvo); +NvBool nvDpyIsHdmiDepth30Evo(const NVDpyEvoRec *pDpyEvo); + +NvBool nvHdmi204k60HzRGB444Allowed(const NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + const NVT_TIMING *pTiming); + +void nvHdmiDpEnableDisableAudio(const NVDispEvoRec *pDispEvo, + const NvU32 head, const NvBool enable); + +void nvRemoveUnusedHdmiDpAudioDevice(const NVDispEvoRec *pDispEvo); + +void nvHdmiSetVRR(NVDispEvoPtr pDispEvo, NvU32 head, NvBool enable); + +NvBool nvInitHdmiLibrary(NVDevEvoRec *pDevEvo); +void nvTeardownHdmiLibrary(NVDevEvoRec *pDevEvo); + +NvBool nvHdmiFrlAssessLink(NVDpyEvoPtr pDpyEvo); +NvBool nvHdmiDpySupportsFrl(const NVDpyEvoRec *pDpyEvo); +NvBool nvHdmiFrlQueryConfig(const NVDpyEvoRec *pDpyEvo, + const NvModeTimings *pModeTimings, + const NVHwModeTimingsEvo *pHwTimings, + NVDpyAttributeColor *pDpyColor, + const NvBool b2Heads1Or, + const struct NvKmsModeValidationParams *pValidationParams, + HDMI_FRL_CONFIG *pConfig, + NVDscInfoEvoRec *pDscInfo); +void nvHdmiFrlClearConfig(NVDispEvoRec *pDispEvo, NvU32 activeRmId); +void nvHdmiFrlSetConfig(NVDispEvoRec *pDispEvo, NvU32 head); + +void nvHdmiDpConstructHeadAudioState(const NvU32 displayId, + const NVDpyEvoRec *pDpyEvo, + NVDispHeadAudioStateEvoRec *pAudioState); + +NvU32 nvHdmiGetEffectivePixelClockKHz(const NVDpyEvoRec *pDpyEvo, + const NVHwModeTimingsEvo *pHwTimings, + const NVDpyAttributeColor *pDpyColor); + +static inline NvBool nvHdmiDpySupportsDsc(const NVDpyEvoRec *pDpyEvo) +{ + return nvDpyIsHdmiEvo(pDpyEvo) && + pDpyEvo->parsedEdid.valid && + pDpyEvo->parsedEdid.info.hdmiForumInfo.dsc_1p2; +} + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_HDMI_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-headsurface-3d.h b/src/nvidia-modeset/include/nvkms-headsurface-3d.h new file mode 100644 index 0000000..fe00775 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-headsurface-3d.h @@ -0,0 +1,70 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_HEADSURFACE_3D_H__ +#define __NVKMS_HEADSURFACE_3D_H__ + +#include "nvkms-types.h" +#include "nvkms-headsurface.h" +#include "nvkms-headsurface-priv.h" +#include "nvkms-headsurface-config.h" + +NvBool nvHs3dAllocDevice(NVHsDeviceEvoPtr pHsDevice); + +void nvHs3dFreeDevice(NVHsDeviceEvoPtr pHsDevice); + +NvBool nvHs3dAllocChannel(NVHsChannelEvoPtr pHsChannel); + +void nvHs3dFreeChannel(NVHsChannelEvoPtr pHsChannel); + +void nvHs3dClearSurface( + NVHsChannelEvoPtr pHsChannel, + const NVHsSurfaceRec *pHsSurface, + const struct NvKmsRect surfaceRect, + NvBool yuv420); + +void nvHs3dSetConfig(NVHsChannelEvoPtr pHsChannel); + +NvBool nvHs3dRenderFrame( + NVHsChannelEvoPtr pHsChannel, + const NvHsNextFrameRequestType requestType, + const NvBool honorSwapGroupClipList, + const NvU8 dstEye, + const NvU8 dstBufferIndex, + const enum NvKmsPixelShiftMode pixelShift, + const struct NvKmsRect destRect, + const NVSurfaceEvoRec *pSurfaceEvo[NVKMS_MAX_LAYERS_PER_HEAD]); + +void nvHs3dReleaseSemaphore( + NVHsChannelEvoPtr pHsChannel, + const NVSurfaceEvoRec *pSurfaceEvo, + const enum NvKmsNIsoFormat nIsoFormat, + const NvU16 offsetInWords, + const NvU32 payload, + const NvBool allPreceedingReads); + +NvU32 nvHs3dLastRenderedOffset(NVHsChannelEvoPtr pHsChannel); + +void nvHs3dPushPendingViewportFlip(NVHsChannelEvoPtr pHsChannel); + +#endif /* __NVKMS_HEADSURFACE_3D_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-headsurface-config.h b/src/nvidia-modeset/include/nvkms-headsurface-config.h new file mode 100644 index 0000000..54740cf --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-headsurface-config.h @@ -0,0 +1,238 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_HEADSURFACE_CONFIG_H__ +#define __NVKMS_HEADSURFACE_CONFIG_H__ + +#include "nvkms-types.h" +#include "nvkms-softfloat.h" /* NvKmsMatrixF32 */ +#include "nvidia-headsurface-types.h" /* NvHsStaticWarpMesh */ +#include "g_shader_names.h" /* ProgramName */ + +typedef enum { + /* + * The head config does not need headSurface; there would be + * nothing headSurface could do to make the configuration + * achievable. + */ + NVKMS_HEAD_SURFACE_CONFIG_STATE_NO_HEAD_SURFACE, + + /* + * The head config might be achievable in hardware, but + * headSurface might help if we fail to program it in hardware. + */ + NVKMS_HEAD_SURFACE_CONFIG_STATE_MAYBE_HEAD_SURFACE, + + /* + * HeadSurface is needed, but display hardware should still be used for + * ViewPortOut positioning within Raster. I.e., from a display hardware + * standpoint, ViewPortIn == ViewPortOut, but ViewPortOut!=Raster. + */ + NVKMS_HEAD_SURFACE_CONFIG_STATE_PARTIAL_HEAD_SURFACE, + + /* + * HeadSurface is needed, and should also be used for ViewPortOut + * scaling. I.e., from a display hardware standpoint, ViewPortIn + * == ViewPortOut, and ViewPortOut == Raster. + */ + NVKMS_HEAD_SURFACE_CONFIG_STATE_FULL_HEAD_SURFACE, +} NVHsConfigState; + +/* + * Configuration state to be used by NVHsChannelEvoRec. + */ +typedef struct { + + NVHsConfigState state; + + /* + * Which eyes are required by this headSurface configuration. Valid + * combinations are: + * + * NVBIT(NVKMS_LEFT), or + * NVBIT(NVKMS_LEFT) | NVBIT(NVKMS_RIGHT) + */ + NvU8 eyeMask; + + /* + * The dimensions of headSurface frames. This is only assigned if ::state + * is PARTIAL_HEAD_SURFACE or FULL_HEAD_SURFACE. + * + * If state is PARTIAL_HEAD_SURFACE, then frameSize will be the size of the + * display engine's viewPortOut. Any difference between the + * client-requested viewPortOut and the active raster region will be + * resolved by the display engine. Also, NVHsChannelConfig::viewPortOut + * will equal frameSize. + * + * If state is FULL_HEAD_SURFACE, then frameSize will be the same size as + * the display engine's active raster region. Any difference between the + * client-requested viewPortOut and the active raster region will be + * resolved by headSurface. NVHsChannelConfig::viewPortOut might be smaller + * than frameSize. + */ + struct NvKmsSize frameSize; + + /* + * The size of the surface for headSurface. + * + * Normally, ::surfaceSize will be equal to ::frameSize. However, when + * SwapGroup is enabled, surfaceSize is bloated to twice ::frameSize. + * + * Note that the actual vidmem surface might be allocated larger than + * surfaceSize: the video memory allocation is the maximum of the + * surfaceSize for this head across all subdevices. + */ + struct NvKmsSize surfaceSize; + + /* + * When SwapGroup is enabled, we need a staging surface to perform + * screen-aligned 2d blits to assemble frames of SwapGroup and non-SwapGroup + * content, prior to applying headSurface transformations. + * + * When SwapGroup is not enabled ::stagingSurfaceSize will be zero size. + */ + struct NvKmsSize stagingSurfaceSize; + + /* The region within the headSurface surfaces, where the image should be + rendered. */ + struct NvKmsRect viewPortOut; + + /* The region headSurface will read from. */ + struct NvKmsRect viewPortIn; + + NvBool hs10bpcHint; + NvBool yuv420; + NvBool blendAfterWarp; + + enum NvKmsPixelShiftMode pixelShift; + enum NvKmsResamplingMethod resamplingMethod; + struct NvKmsMatrixF32 transform; + NvHsStaticWarpMesh staticWarpMesh; + + /* + * Note that any NVSurfaceEvoPtr's stored here require special attention + * for reference counting. See, e.g., HsConfigUpdateSurfaceRefCount(). + */ + + NVSurfaceEvoPtr pBlendTexSurface; + NVSurfaceEvoPtr pOffsetTexSurface; + + struct { + NVSurfaceEvoPtr pSurface; + NvU32 vertexCount; + enum NvKmsWarpMeshDataType dataType; + } warpMesh; + + NVFlipCursorEvoHwState cursor; + + /* XXX NVKMS HEADSURFACE TODO: plumb through dvc */ + NvS32 dvc; + + NvBool neededForModeset; + NvBool neededForSwapGroup; + +} NVHsChannelConfig; + +typedef struct { + + NVHsChannelEvoPtr pHsChannel; + NvBool channelReused; + + NVHsChannelConfig channelConfig; + + /* The initial surfaces to read from when initializing headSurface. */ + struct { + NVSurfaceEvoPtr pSurfaceEvo[NVKMS_MAX_EYES]; + } layer[NVKMS_MAX_LAYERS_PER_HEAD]; +} NVHsConfigOneHead; + +typedef struct { + /* State that is per-disp, per-api-head. */ + NVHsConfigOneHead apiHead[NVKMS_MAX_SUBDEVICES][NVKMS_MAX_HEADS_PER_DISP]; + + /* State that is per-head, but spans subdevices. */ + NVHsStateOneHeadAllDisps apiHeadAllDisps[NVKMS_MAX_HEADS_PER_DISP]; + + /* + * Whether apiHeadAllDisps[]::surface[] were reused from the current + * configuration. + * + * surfacesReused[] is indexed by the api heads. + */ + NvBool surfacesReused[NVKMS_MAX_HEADS_PER_DISP]; + + /* + * Whether the modeset asked to apply the requested configuration, or only + * test if the requested configuration is valid. I.e., + * NvKmsSetModeParams::request::commit. + */ + NvBool commit; + +} NVHsConfig; + +NvBool nvHsConfigInitModeset( + NVDevEvoRec *pDevEvo, + const struct NvKmsSetModeRequest *pRequest, + struct NvKmsSetModeReply *pReply, + const struct NvKmsPerOpenDev *pOpenDev, + NVHsConfig *pHsConfig); + +void nvHsConfigInitSwapGroup( + const NVDevEvoRec *pDevEvo, + const NVSwapGroupRec *pSwapGroup, + const NvBool neededForSwapGroup, + NVHsConfig *pHsConfig); + +NvBool nvHsConfigDowngrade( + NVDevEvoRec *pDevEvo, + const struct NvKmsSetModeRequest *pRequest, + NVHsConfig *pHsConfig); + +NvBool nvHsConfigAllocResources( + NVDevEvoRec *pDevEvo, + NVHsConfig *pHsConfig); + +void nvHsConfigFreeResources( + NVDevEvoRec *pDevEvo, + NVHsConfig *pHsConfig); + +void nvHsConfigStop( + NVDevEvoPtr pDevEvo, + const NVHsConfig *pHsConfig); + +void nvHsConfigStart( + NVDevEvoPtr pDevEvo, + NVHsConfig *pHsConfig); + +NvBool nvHsConfigPatchSetModeRequest(const NVDevEvoRec *pDevEvo, + const NVHsConfig *pHsConfig, + struct NvKmsPerOpenDev *pOpenDev, + struct NvKmsSetModeRequest *pRequest, + NvU32 patchedHeadsMask[NVKMS_MAX_SUBDEVICES]); +void +nvHsConfigClearPatchedSetModeRequest(const NVDevEvoRec *pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + struct NvKmsSetModeRequest *pRequest, + const NvU32 patchedHeadsMask[NVKMS_MAX_SUBDEVICES]); + +#endif /* __NVKMS_HEADSURFACE_CONFIG_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-headsurface-ioctl.h b/src/nvidia-modeset/include/nvkms-headsurface-ioctl.h new file mode 100644 index 0000000..3f2ed6d --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-headsurface-ioctl.h @@ -0,0 +1,50 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_HEADSURFACE_IOCTL_H__ +#define __NVKMS_HEADSURFACE_IOCTL_H__ + +#include "nvkms-types.h" +#include "nvkms-api.h" + +NvBool nvHsIoctlMoveCursor( + NVDispEvoPtr pDispEvo, + NvU32 head, + const struct NvKmsMoveCursorCommonParams *pParams); + +NvBool nvHsIoctlSetCursorImage( + NVDispEvoPtr pDispEvo, + const struct NvKmsPerOpenDev *pOpenDevice, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + NvU32 head, + const struct NvKmsSetCursorImageCommonParams *pParams); + +NvBool nvHsIoctlFlip( + NVDevEvoPtr pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsFlipRequestOneHead *pFlipHead, + NvU32 numFlipHeads, + NvBool commit, + struct NvKmsFlipReply *pReply); + +#endif /* __NVKMS_HEADSURFACE_IOCTL_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-headsurface-matrix.h b/src/nvidia-modeset/include/nvkms-headsurface-matrix.h new file mode 100644 index 0000000..7f8dddc --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-headsurface-matrix.h @@ -0,0 +1,33 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_HEADSURFACE_MATRIX_H__ +#define __NVKMS_HEADSURFACE_MATRIX_H__ + +#include "nvkms-headsurface-config.h" + +NvBool nvHsAssignTransformMatrix( + NVHsChannelConfig *pChannelConfig, + const struct NvKmsSetModeHeadSurfaceParams *p); + +#endif /* __NVKMS_HEADSURFACE_MATRIX_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-headsurface-priv.h b/src/nvidia-modeset/include/nvkms-headsurface-priv.h new file mode 100644 index 0000000..22eda17 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-headsurface-priv.h @@ -0,0 +1,531 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_HEADSURFACE_PRIV_H__ +#define __NVKMS_HEADSURFACE_PRIV_H__ + +#include "nvkms-types.h" +#include "nvkms-headsurface.h" +#include "nvkms-headsurface-config.h" +#include "nvkms-surface.h" +#include "nvkms-utils.h" + +#include "nvidia-push-init.h" +#include "nvidia-3d.h" + +#include "nv_list.h" + +/* + * This header file defines structures shared by the nvkms-headsurface*.c source + * files. To the rest of nvkms, these structures should be opaque. + */ + +#define NVKMS_HEAD_SURFACE_MAX_NOTIFIERS_PER_HEAD 4 +#define NVKMS_HEAD_SURFACE_MAX_NOTIFIER_SIZE 16 +#define NVKMS_HEAD_SURFACE_NOTIFIER_BYTES_PER_HEAD \ + (NVKMS_HEAD_SURFACE_MAX_NOTIFIERS_PER_HEAD * \ + NVKMS_HEAD_SURFACE_MAX_NOTIFIER_SIZE) + +#define NVKMS_HEAD_SURFACE_MAX_FRAME_SEMAPHORES 2 + +#define NVKMS_HEAD_SURFACE_SEMAPHORE_BYTES_PER_HEAD \ + (sizeof(NvGpuSemaphore) * NVKMS_HEAD_SURFACE_MAX_FRAME_SEMAPHORES) + +#define NVKMS_HEAD_SURFACE_FRAME_SEMAPHORE_DISPLAYABLE 0xFFFFFFFF +#define NVKMS_HEAD_SURFACE_FRAME_SEMAPHORE_RENDERABLE 0x11111111 + +/* + * XXX NVKMS HEADSURFACE TODO: HeadSurface uses both notifiers and semaphores + * for synchronization: + * + * - Notifiers to ensure the CPU waits until after the previous frame's flip + * completes before starting the next frame. + * + * - Semaphores to ensure the flip to the next frame is not started until the + * rendering for the next frame completes. + * + * We should simplify things by using semaphores for both. + */ +typedef struct _NVHsNotifiersOneSdRec { + NvU8 notifier + [NVKMS_MAX_HEADS_PER_DISP][NVKMS_HEAD_SURFACE_NOTIFIER_BYTES_PER_HEAD]; + NvU8 semaphore + [NVKMS_MAX_HEADS_PER_DISP][NVKMS_HEAD_SURFACE_SEMAPHORE_BYTES_PER_HEAD]; +} NVHsNotifiersOneSdRec; + +#define NVKMS_HEAD_SURFACE_NOTIFIERS_SIZE_IN_BYTES 4096 + +ct_assert(NVKMS_HEAD_SURFACE_NOTIFIERS_SIZE_IN_BYTES >= + sizeof(NVHsNotifiersOneSdRec)); + +typedef struct _NVHsNotifiersRec { + + NvU32 rmHandle; + NvKmsSurfaceHandle nvKmsHandle; + const NVSurfaceEvoRec *pSurfaceEvo; + + struct { + NVHsNotifiersOneSdRec *ptr; + struct { + NvU8 nextSlot; + } apiHead[NVKMS_MAX_HEADS_PER_DISP]; + } sd[NVKMS_MAX_SUBDEVICES]; + + enum NvKmsNIsoFormat nIsoFormat; + +} NVHsNotifiersRec; + +typedef struct _NVHsSurfaceRec { + + NvKmsSurfaceHandle nvKmsHandle; + NvU32 rmHandle; + + Nv3dBlockLinearLog2GobsPerBlock gobsPerBlock; + + const NVSurfaceEvoRec *pSurfaceEvo; + +} NVHsSurfaceRec; + +typedef struct _NVHsDeviceEvoRec { + + NVDevEvoRec *pDevEvo; + + NvU32 gpuVASpace; + + struct { + Nv3dDeviceRec device; + } nv3d; + + NVHsNotifiersRec notifiers; + +} NVHsDeviceEvoRec; + +enum NVHsChannelTexInfoEnum { + NVKMS_HEADSURFACE_TEXINFO_SRC = 0, + /* XXX NVKMS HEADSURFACE TODO: enable all the below */ + NVKMS_HEADSURFACE_TEXINFO_CURSOR = 1, + NVKMS_HEADSURFACE_TEXINFO_BLEND = 2, + NVKMS_HEADSURFACE_TEXINFO_OFFSET = 3, + NVKMS_HEADSURFACE_TEXINFO_OVERLAY = 4, + /* NVKMS_HEADSURFACE_TEXINFO_LUT = 5, */ + NVKMS_HEADSURFACE_TEXINFO_NUM, +}; + +typedef struct _NVHsChannelStatisticsOneEyeRec { + /* Running total of the number of frames rendered by headSurface. */ + NvU64 nFrames; + + /* Running total of the GPU time spent rendering, in nanoseconds. */ + NvU64 gpuTimeSpent; + + /* We compute the FPS for 5 second periods. */ + struct { + /* + * Running total of the number of frames rendered by headSurface; reset + * every 5 seconds. + */ + NvU64 nFrames; + /* + * The time, in nanoseconds, when this FPS period started, so we know + * when the 5 second period is done. + */ + NvU64 startTime; + /* + * Most recently computed FPS for the last 5 second period. + */ + NvU64 framesPerMs; + } fps; +} NVHsChannelStatisticsOneEyeRec; + +typedef struct _NVHsChannelFlipQueueEntry { + NVListRec flipQueueEntry; + NVHsLayerRequestedFlipState hwState; +} NVHsChannelFlipQueueEntry; + +typedef struct _NVHsChannelEvoRec { + + NVDispEvoRec *pDispEvo; + + NvU32 apiHead; + + struct { + NvPushChannelRec channel; + NvU32 handlePool[NV_PUSH_CHANNEL_HANDLE_POOL_NUM]; + } nvPush; + + struct { + NvU32 handle; + Nv3dChannelRec channel; + Nv3dRenderTexInfo texInfo[NVKMS_HEADSURFACE_TEXINFO_NUM]; + } nv3d; + + struct { + NvU32 handle[1]; + } nv2d; + + /* + * Flip request parameters are too large to declare on the stack. We + * preallocate them here so that we don't have to allocate and free them on + * every headSurface flip. + */ + struct NvKmsFlipRequestOneHead scratchParams; + + /* + * The index into NVDevEvoRec::apiHeadSurfaceAllDisps[apiHead]::surface[] to use + * for the next frame of headSurface. + */ + NvU8 nextIndex; + + /* + * When neededForSwapGroup is true, frames of headSurface are rendered to + * alternating offsets within double-sized headSurface surfaces. nextOffset + * is either 0 or 1, to select the offset of the next headSurface frame. + */ + NvU8 nextOffset; + + /* + * HeadSurface flips are semaphore interlocked with headSurface rendering. + * We need to use a different semaphore offset for subsequent flips. + * frameSemaphoreIndex is used to alternate between + * NVKMS_HEAD_SURFACE_MAX_FRAME_SEMAPHORES offsets. + */ + NvU8 frameSemaphoreIndex; + + NVHsChannelConfig config; + + NVVBlankCallbackPtr vBlankCallback; + + /* + * NVHsChannelEvoRec keeps a list of flip queue entries, and the "current" + * entry. NVHsChannelFlipQueueEntry is a single entry in the flip queue. + * + * Each entry describes a validated flip request. When NVKMS is called to + * build the next frame of headSurface, it inspects if the next entry in the + * queue is ready to flip (e.g., any semaphore acquires have been + * satisfied). If the next flip queue entry is ready, we use it to replace + * the current entry. Otherwise, we continue to use the existing current + * entry. + * + * Surfaces within an NVHsChannelFlipQueueEntry have their reference counts: + * + * - incremented when the NVHsChannelFlipQueueEntry is added to the flip + * queue. + * + * - decremented when the NVHsChannelFlipQueueEntry is removed from current + * (i.e., when we do the equivalent of "flip away"). + * + * To simulate EVO/NVDisplay semaphore behavior, if an + * NVHsChannelFlipQueueEntry specifies a semaphore: + * + * - We wait for the semaphore's acquire value to be reached before + * promoting the entry from the flip queue to current. + * + * - We write the semaphore's release value when the + * NVHsChannelFlipQueueEntry is removed from current (i.e., when we do the + * equivalent of "flip away"). + */ + + struct { + NVHsLayerRequestedFlipState current; + NVListRec queue; + } flipQueue[NVKMS_MAX_LAYERS_PER_HEAD]; + + /* + * This cached main layer surface needed when the main layer transitioning + * out of headSurface due to exiting a swapgroup. I.e. in this path: + * nvHsConfigStop() => HsConfigRestoreMainLayerSurface() + */ + struct { + NVSurfaceEvoPtr pSurfaceEvo[NVKMS_MAX_EYES]; + } flipQueueMainLayerState; + + NvU64 lastCallbackUSec; + + /* + * For NVKMS headsurface swap groups, at some point after the flip has been + * issued, NVKMS needs to check the notifier associated with that flip to + * see if the flip has been completed and release the deferred request + * fifo entry associated with that flip. This bool reflects whether that + * check is done during the headsurface vblank interrupt callback or later + * during the RG line 1 interrupt callback. + */ + NvBool usingRgIntrForSwapGroups; + + /* + * Pointer to the RG line interrupt callback object. This is needed to + * enabled and disable the RG interrupt callback. + */ + NVRgLine1CallbackPtr pRgIntrCallback; + +#if NVKMS_PROCFS_ENABLE + + /* + * We track statistics differently for SwapGroup and non-SwapGroup + * headSurface; abstract the grouping into "slots". For non-SwapGroup there + * is only one rendered frame (one "slot"). For SwapGroup, there are three + * different rendered frames (so three "slots"). + */ +#define NVKMS_HEADSURFACE_STATS_MAX_SLOTS 3 + +#define NVKMS_HEADSURFACE_STATS_SEMAPHORE_BEFORE 0 +#define NVKMS_HEADSURFACE_STATS_SEMAPHORE_AFTER 1 + + /* + * One semaphore before the frame, and one semaphore after the frame. + */ +#define NVKMS_HEAD_SURFACE_STATS_SEMAPHORE_STAGE_COUNT 2 + + /* + * We need semaphores for each stereo eye for each "slot". + */ +#define NVKMS_HEADSURFACE_STATS_MAX_SEMAPHORES \ + (NVKMS_HEAD_SURFACE_STATS_SEMAPHORE_STAGE_COUNT * \ + NVKMS_MAX_EYES * \ + NVKMS_HEADSURFACE_STATS_MAX_SLOTS) + + struct { + + NVHsChannelStatisticsOneEyeRec + perEye[NVKMS_MAX_EYES][NVKMS_HEADSURFACE_STATS_MAX_SLOTS]; + + /* How often we were called back before the previous frame was done. */ + NvU64 nPreviousFrameNotDone; + + /* How often we did not update HS backbuffer with non-sg content. */ + NvU64 nOmittedNonSgHsUpdates; + + /* How often did we have fullscreen swapgroup, and didn't. */ + NvU64 nFullscreenSgFrames; + NvU64 nNonFullscreenSgFrames; + + /* + * Statistics on which Display Memory Interface (DMI) scanline we are on + * when headSurface is called. + * + * pHistogram is a dynamically allocated array of counts. The array has + * vVisible + 1 elements (the +1 is because the hardware-reported + * scanline values are in the inclusive range [0,vVisible]). Each + * element contains how many times we've been called back while on that + * scanline. + * + * When in the blanking region, there isn't a DMI scanline. We + * increment n{,Not}InBlankingPeriod to keep track of how often we are + * called back while in the blanking region. + */ + struct { + NvU64 *pHistogram; /* array with vVisible elements */ + NvU16 vVisible; + NvU64 nInBlankingPeriod; + NvU64 nNotInBlankingPeriod; + } scanLine; + + } statistics; +#else +#define NVKMS_HEADSURFACE_STATS_MAX_SEMAPHORES 0 +#endif /* NVKMS_PROCFS_ENABLE */ + + /* + * We need one semaphore for the non-stall interrupt following rendering to + * the next viewport offset with swapgroups enabled. + */ +#define NVKMS_HEADSURFACE_VIEWPORT_OFFSET_SEMAPHORE_INDEX \ + NVKMS_HEADSURFACE_STATS_MAX_SEMAPHORES + +#define NVKMS_HEADSURFACE_MAX_SEMAPHORES \ + (NVKMS_HEADSURFACE_VIEWPORT_OFFSET_SEMAPHORE_INDEX + 1) + + /* + * Whether this channel has kicked off rendering to a new viewport offset + * for non-swapgroup content updates, but hasn't yet kicked off the + * viewport flip to the new offset. Used to prevent rendering a new + * frame if rendering the previous frame took longer than a full frame of + * scanout. + */ + NvBool viewportFlipPending; + + /* + * Recorded timestamp of the last headsurface flip. Used for deciding if + * certain blits to the headsurface can be omitted. + */ + NvU64 lastHsClientFlipTimeUs; + + /* + * If this channel has kicked off a real flip while swapgroups were active, + * mark this channel as using real flips instead of blits for swapgroups, + * don't fast forward through headsurface flips (since every flip needs to + * be kicked off with every swapgroup ready event), and skip the part of + * the RG interrupt that would update non-swapgroup content. + */ + NvBool swapGroupFlipping; + +} NVHsChannelEvoRec; + +static inline NvU8 Hs3dStatisticsGetSlot( + const NVHsChannelEvoRec *pHsChannel, + const NvHsNextFrameRequestType requestType, + const NvU8 dstBufferIndex, + const NvBool honorSwapGroupClipList) +{ + if (pHsChannel->config.neededForSwapGroup) { + switch (requestType) { + case NV_HS_NEXT_FRAME_REQUEST_TYPE_FIRST_FRAME: + /* + * SwapGroup FIRST_FRAME will render to pHsChannel->nextIndex with + * honorSwapGroupClipList==false. + */ + nvAssert(dstBufferIndex < 2); + return dstBufferIndex; + case NV_HS_NEXT_FRAME_REQUEST_TYPE_VBLANK: + /* + * SwapGroup VBLANK fully populates the nextIndex buffer + * (honorSwapGroupClipList==false), and only populates the + * non-swapgroup regions of the current index. + */ + return honorSwapGroupClipList ? 0 : 1; + case NV_HS_NEXT_FRAME_REQUEST_TYPE_SWAP_GROUP_READY: + return 2; + } + } + + return 0; /* non-SwapGroup always only uses slot 0 */ +} + +/*! + * Get the offset, in words, of the frame semaphore within NVHsNotifiersOneSdRec + * that corresponds to (head, frameSemaphoreIndex). + */ +static inline NvU16 HsGetFrameSemaphoreOffsetInWords( + const NVHsChannelEvoRec *pHsChannel) +{ + const NvU16 semBase = + offsetof(NVHsNotifiersOneSdRec, semaphore[pHsChannel->apiHead]); + const NvU16 semOffset = sizeof(NvGpuSemaphore) * + pHsChannel->frameSemaphoreIndex; + + const NvU16 offsetInBytes = semBase + semOffset; + + /* + * NVHsNotifiersOneSdRec::semaphore should be word-aligned, and + * sizeof(NvGpuSemaphore) is a multiple of words, so the offset to any + * NvGpuSemaphore within the array should be word-aligned. + */ + nvAssert((offsetInBytes % 4) == 0); + + return offsetInBytes / 4; +} + +static inline void HsIncrementFrameSemaphoreIndex( + NVHsChannelEvoRec *pHsChannel) +{ + pHsChannel->frameSemaphoreIndex++; + pHsChannel->frameSemaphoreIndex %= NVKMS_HEAD_SURFACE_MAX_FRAME_SEMAPHORES; +} + +static inline NvU8 HsGetPreviousOffset( + const NVHsChannelEvoRec *pHsChannel) +{ + nvAssert(pHsChannel->config.neededForSwapGroup); + + nvAssert(pHsChannel->config.surfaceSize.height == + (pHsChannel->config.frameSize.height * 2)); + + return A_minus_b_with_wrap_U8(pHsChannel->nextOffset, 1, + NVKMS_HEAD_SURFACE_MAX_BUFFERS); +} + +static inline void HsIncrementNextOffset( + const NVHsDeviceEvoRec *pHsDevice, + NVHsChannelEvoRec *pHsChannel) +{ + nvAssert(pHsChannel->config.neededForSwapGroup); + + nvAssert(pHsChannel->config.surfaceSize.height == + (pHsChannel->config.frameSize.height * 2)); + + pHsChannel->nextOffset++; + pHsChannel->nextOffset %= 2; +} + +static inline void HsIncrementNextIndex( + const NVHsDeviceEvoRec *pHsDevice, + NVHsChannelEvoRec *pHsChannel) +{ + const NVDevEvoRec *pDevEvo = pHsDevice->pDevEvo; + const NvU32 surfaceCount = + pDevEvo->apiHeadSurfaceAllDisps[pHsChannel->apiHead].surfaceCount; + + nvAssert(surfaceCount > 0); + + pHsChannel->nextIndex++; + pHsChannel->nextIndex %= surfaceCount; +} + +static inline void HsChangeSurfaceFlipRefCount( + NVDevEvoPtr pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo, + NvBool increase) +{ + if (pSurfaceEvo != NULL) { + if (increase) { + nvEvoIncrementSurfaceRefCnts(pSurfaceEvo); + } else { + nvEvoDecrementSurfaceRefCnts(pDevEvo, pSurfaceEvo); + } + } +} + +/*! + * Get the last NVHsLayerRequestedFlipState entry in the pHsChannel's flip queue for + * the specified layer. + * + * If the flip queue is empty, return the 'current' entry. Otherwise, return + * the most recently queued entry. + * + * This function cannot fail. + */ +static inline const NVHsLayerRequestedFlipState *HsGetLastFlipQueueEntry( + const NVHsChannelEvoRec *pHsChannel, + const NvU8 layer) +{ + const NVListRec *pFlipQueue = &pHsChannel->flipQueue[layer].queue; + const NVHsChannelFlipQueueEntry *pEntry; + + /* + * XXX NVKMS HEADSURFACE TODO: use nvListIsEmpty() once bugfix_main is + * updated to make nvListIsEmpty()'s argument const; see changelist + * 23614050. + * + * if (nvListIsEmpty(pFlipQueue)) { + */ + if (pFlipQueue->next == pFlipQueue) { + return &pHsChannel->flipQueue[layer].current; + } + + pEntry = nvListLastEntry(pFlipQueue, + NVHsChannelFlipQueueEntry, + flipQueueEntry); + + return &pEntry->hwState; +} + +#endif /* __NVKMS_HEADSURFACE_PRIV_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-headsurface-swapgroup.h b/src/nvidia-modeset/include/nvkms-headsurface-swapgroup.h new file mode 100644 index 0000000..d86c7e3 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-headsurface-swapgroup.h @@ -0,0 +1,89 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_HEADSURFACE_SWAPGROUP_H__ +#define __NVKMS_HEADSURFACE_SWAPGROUP_H__ + +#include "nvkms-types.h" + +NvBool nvHsSwapGroupIsHeadSurfaceNeeded( + const NVDispEvoRec *pDispEvo, + const NvU32 apiHead); + +void nvHsSwapGroupRelease( + NVDevEvoPtr pDevEvo, + NVSwapGroupRec *pSwapGroup); + +NVSwapGroupRec* nvHsAllocSwapGroup( + NVDevEvoPtr pDevEvo, + const struct NvKmsAllocSwapGroupRequest *pRequest); + +NVSwapGroupRec *nvHsGetSwapGroupStruct( + const NVEvoApiHandlesRec *pEvoApiHandles, + NvKmsSwapGroupHandle handle); + +NVSwapGroupRec *nvHsGetSwapGroup( + const NVEvoApiHandlesRec *pEvoApiHandles, + NvKmsSwapGroupHandle handle); + +void nvHsDecrementSwapGroupRefCnt(NVSwapGroupPtr pSwapGroup); + +NvBool nvHsIncrementSwapGroupRefCnt(NVSwapGroupPtr pSwapGroup); + +void nvHsFreeSwapGroup( + NVDevEvoPtr pDevEvo, + NVSwapGroupRec *pSwapGroup); + +typedef struct _NVHsJoinSwapGroupWorkArea { + NVDevEvoPtr pDevEvo; + NVSwapGroupRec *pSwapGroup; + NVDeferredRequestFifoRec *pDeferredRequestFifo; + struct NvKmsPerOpen *pEventOpenFd; + NvBool enabledHeadSurface; +} NVHsJoinSwapGroupWorkArea; + +NvBool nvHsJoinSwapGroup( + NVHsJoinSwapGroupWorkArea *joinSwapGroupWorkArea, + NvU32 numHandles, + NvBool pendingJoin); + +void nvHsLeaveSwapGroup( + NVDevEvoPtr pDevEvo, + NVDeferredRequestFifoRec *pDeferredRequestFifo, + NvBool teardown); + +NvBool nvHsSetSwapGroupClipList( + NVDevEvoPtr pDevEvo, + NVSwapGroupRec *pSwapGroup, + const NvU16 nClips, + struct NvKmsRect *pClipList); + +void nvHsSwapGroupReady( + NVDevEvoPtr pDevEvo, + NVDeferredRequestFifoRec *pDeferredRequestFifo, + const NvU32 request); + +NvBool nvHsSwapGroupGetPerEyeStereo( + const NVSwapGroupRec *pSwapGroup); + +#endif /* __NVKMS_HEADSURFACE_SWAPGROUP_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-headsurface.h b/src/nvidia-modeset/include/nvkms-headsurface.h new file mode 100644 index 0000000..a32b14b --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-headsurface.h @@ -0,0 +1,134 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_HEADSURFACE_H__ +#define __NVKMS_HEADSURFACE_H__ + +#include "nvkms-types.h" + +typedef struct _NVHsLayerRequestedFlipStateRec { + NVSurfaceEvoPtr pSurfaceEvo[NVKMS_MAX_EYES]; + NVFlipCompletionNotifierEvoHwState completionNotifier; + NVFlipSyncObjectEvoHwState syncObject; + NvBool perEyeStereoFlip; + NvU8 minPresentInterval; +} NVHsLayerRequestedFlipState; + +NvU64 nvHsMapSurfaceToDevice( + const NVDevEvoRec *pDevEvo, + const NvU32 rmHandle, + const NvU64 sizeInBytes, + enum NvHsMapPermissions hsMapPermissions); + +void nvHsUnmapSurfaceFromDevice( + const NVDevEvoRec *pDevEvo, + const NvU32 rmHandle, + const NvU64 gpuAddress); + +NVHsSurfacePtr nvHsAllocSurface( + NVDevEvoRec *pDevEvo, + const NvBool requireDisplayHardwareAccess, + const enum NvKmsSurfaceMemoryFormat format, + const NvU32 widthInPixels, + const NvU32 heightInPixels); + +void nvHsFreeSurface( + NVDevEvoRec *pDevEvo, + NVHsSurfacePtr pHsSurface); + +NvBool nvHsAllocDevice( + NVDevEvoRec *pDevEvo, + const struct NvKmsAllocDeviceRequest *pRequest); + +void nvHsFreeDevice(NVDevEvoRec *pDevEvo); + +NVHsChannelEvoPtr nvHsAllocChannel(NVDispEvoRec *pDispEvo, NvU32 apiHead); + +void nvHsFreeChannel(NVHsChannelEvoPtr pHsChannel); + +void nvHsPushFlipQueueEntry( + NVHsChannelEvoPtr pHsChannel, + const NvU8 layer, + const NVHsLayerRequestedFlipState *pHwState); + +void nvHsDrainFlipQueue( + NVHsChannelEvoPtr pHsChannel); + +NvBool nvHsIdleFlipQueue( + NVHsChannelEvoPtr pHsChannel, + NvBool force); + +void nvHsInitNotifiers( + NVHsDeviceEvoPtr pHsDevice, + NVHsChannelEvoPtr pHsChannel); + +void nvHsFlip( + NVHsDeviceEvoPtr pHsDevice, + NVHsChannelEvoPtr pHsChannel, + const NvU8 eyeMask, + const NvBool perEyeStereoFlip, + const NvU8 currentIndex, + const NVHsStateOneHeadAllDisps *pHsOneHeadAllDisps, + const NvBool isFirstFlip, + const NvBool allowFlipLock); + +typedef enum { + NV_HS_NEXT_FRAME_REQUEST_TYPE_FIRST_FRAME, + NV_HS_NEXT_FRAME_REQUEST_TYPE_VBLANK, + NV_HS_NEXT_FRAME_REQUEST_TYPE_SWAP_GROUP_READY, +} NvHsNextFrameRequestType; + +void nvHsNextFrame( + NVHsDeviceEvoPtr pHsDevice, + NVHsChannelEvoPtr pHsChannel, + const NvHsNextFrameRequestType requestType); + +void nvHsAddVBlankCallback(NVHsChannelEvoPtr pHsChannel); + +void nvHsRemoveVBlankCallback(NVHsChannelEvoPtr pHsChannel); + +void nvHsAddRgLine1Callback(NVHsChannelEvoPtr pHsChannel); + +void nvHsRemoveRgLine1Callback(NVHsChannelEvoPtr pHsChannel); + +void nvHsAllocStatistics( + NVHsChannelEvoPtr pHsChannel); + +void nvHsFreeStatistics( + NVHsChannelEvoPtr pHsChannel); + +void nvHsProcessPendingViewportFlips(NVDevEvoPtr pDevEvo); + +NVSurfaceEvoRec *nvHsGetNvKmsSurface(const NVDevEvoRec *pDevEvo, + NvKmsSurfaceHandle surfaceHandle, + const NvBool requireDisplayHardwareAccess); + +#if NVKMS_PROCFS_ENABLE +void nvHsProcFs( + NVEvoInfoStringRec *pInfoString, + NVDevEvoRec *pDevEvo, + NvU32 dispIndex, + NvU32 apiHead); +#endif + +#endif /* __NVKMS_HEADSURFACE_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-hw-flip.h b/src/nvidia-modeset/include/nvkms-hw-flip.h new file mode 100644 index 0000000..19f2485 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-hw-flip.h @@ -0,0 +1,128 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_HW_FLIP_H__ +#define __NVKMS_HW_FLIP_H__ + + +#include "nvkms-types.h" +#include "nvkms-flip-workarea.h" + +NvBool nvIsLayerDirty(const struct NvKmsFlipCommonParams *pParams, + const NvU32 layer); + +void nvClearFlipEvoHwState( + NVFlipEvoHwState *pFlipState); + +void nvInitFlipEvoHwState( + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + NVFlipEvoHwState *pFlipState); + +NvBool nvUpdateFlipEvoHwState( + const struct NvKmsPerOpenDev *pOpenDev, + NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + const struct NvKmsFlipCommonParams *pParams, + const NVHwModeTimingsEvo *pTimings, + const NvU8 tilePosition, + NVFlipEvoHwState *pFlipState, + NvBool allowVrr); + +void +nvOverrideScalingUsageBounds(const NVDevEvoRec *pDevEvo, + NvU32 head, + NVFlipEvoHwState *pFlipState, + const struct NvKmsUsageBounds *pPossibleUsage); + +NvBool nvValidateFlipEvoHwState( + const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVHwModeTimingsEvo *pTimings, + const NVFlipEvoHwState *pFlipState); + +void +nvUpdateSurfacesFlipRefCount( + NVDevEvoPtr pDevEvo, + const NvU32 head, + NVFlipEvoHwState *pFlipState, + NvBool increase); + +void nvFlipEvoOneHead( + NVDevEvoPtr pDevEvo, + const NvU32 sd, + const NvU32 head, + const NVT_HDR_STATIC_METADATA *pHdrInfo, + const NVFlipEvoHwState *pFlipState, + NvBool allowFlipLock, + NVEvoUpdateState *updateState); + +void nvEvoCancelPostFlipIMPTimer( + NVDevEvoPtr pDevEvo); + +void nvFillPostSyncptReplyOneChannel( + NVEvoChannel *pChannel, + enum NvKmsSyncptType postType, + struct NvKmsSyncpt *postSyncpt, + const NVFlipSyncObjectEvoHwState *pHwSyncObject); + +NvBool nvAllocatePreFlipBandwidth(NVDevEvoPtr pDevEvo, + struct NvKmsFlipWorkArea *pWorkArea); + +void nvPreFlip(NVDevEvoRec *pDevEvo, + struct NvKmsFlipWorkArea *pWorkArea, + const NvU32 applyAllowVrrApiHeadMasks[NVKMS_MAX_SUBDEVICES], + const NvU32 allowVrrApiHeadMasks[NVKMS_MAX_SUBDEVICES], + const NvBool skipUpdate); + +void nvPostFlip(NVDevEvoRec *pDevEvo, + struct NvKmsFlipWorkArea *pWorkArea, + const NvBool skipUpdate, + const NvU32 applyAllowVrrApiHeadMasks[NVKMS_MAX_SUBDEVICES], + NvS32 *pVrrSemaphoreIndex); + +NvBool nvPrepareToDoPreFlip(NVDevEvoRec *pDevEvo, + struct NvKmsFlipWorkArea *pWorkArea); + +NvBool nvAssignNVFlipEvoHwState(NVDevEvoRec *pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const NvU32 sd, + const NvU32 head, + const struct NvKmsFlipCommonParams *pParams, + const NvBool allowVrr, + NVFlipEvoHwState *pFlipHwState); + +void nvIdleMainLayerChannels( + NVDevEvoPtr pDevEvo, + const NVEvoChannelMask *idleChannelMaskPerSd, + NvBool allowStopBase); + +NvBool nvNeedToToggleFlipLock(const NVDispEvoRec *pDispEvo, + const NvU32 head, const NvBool enable); + +void nvToggleFlipLockPerDisp(NVDispEvoRec *pDispEvo, const NvU32 headMask, + const NvBool enable); + +#endif /* __NVKMS_HW_FLIP_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-lut.h b/src/nvidia-modeset/include/nvkms-lut.h new file mode 100644 index 0000000..623ecfe --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-lut.h @@ -0,0 +1,63 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_LUT_H__ +#define __NVKMS_LUT_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvkms-types.h" + +NvBool nvSetTmoLutSurfaceEvo(NVDevEvoPtr pDevEvo, + NVFlipChannelEvoHwState *pHwState); + +void nvFreeUnrefedTmoLutSurfacesEvo(NVDevEvoPtr pDevEvo, + NVFlipEvoHwState *pFlipState, + NvU32 head); + +void nvInvalidateDefaultLut(NVDevEvoPtr pDevEvo); + +NvBool nvAllocLutSurfacesEvo(NVDevEvoPtr pDevEvo); + +void nvFreeLutSurfacesEvo(NVDevEvoPtr pDevEvo); + +void nvUploadDataToLutSurfaceEvo(NVSurfaceEvoPtr pSurfEvo, + const NVEvoLutDataRec *pLUTBuffer, + NVDispEvoPtr pDispEvo); + +static inline void nvCancelLutUpdateEvo( + const NVDispEvoRec *pDispEvo, + const NvU32 apiHead) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + nvkms_free_timer(pDevEvo->lut.apiHead[apiHead].disp[pDispEvo->displayOwner].updateTimer); + pDevEvo->lut.apiHead[apiHead].disp[pDispEvo->displayOwner].updateTimer = NULL; +} + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_LUT_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-modepool.h b/src/nvidia-modeset/include/nvkms-modepool.h new file mode 100644 index 0000000..4e2c677 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-modepool.h @@ -0,0 +1,65 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_MODEPOOL_H__ +#define __NVKMS_MODEPOOL_H__ + +#include "nvkms-types.h" +#include "nvkms-utils.h" /* NVEvoLogType */ + +#ifdef __cplusplus +extern "C" { +#endif + +void +nvValidateModeIndex(NVDpyEvoPtr pDpyEvo, + const struct NvKmsValidateModeIndexRequest *pRequest, + struct NvKmsValidateModeIndexReply *pReply); +void +nvValidateModeEvo(NVDpyEvoPtr pDpyEvo, + const struct NvKmsValidateModeRequest *pRequest, + struct NvKmsValidateModeReply *pReply); + +void nvEvoLogModeValidationModeTimings(NVEvoInfoStringPtr + pInfoString, + const NvModeTimings *pModeTimings); + +NvBool nvValidateModeForModeset(NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + const struct NvKmsMode *pKmsMode, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut, + NVDpyAttributeColor *pDpyColor, + NVHwModeTimingsEvo *pTimingsEvo, + NVDispHeadInfoFrameStateEvoRec *pInfoFrameState); + +const NVT_TIMING *nvFindEdidNVT_TIMING( + const NVDpyEvoRec *pDpyEvo, + const NvModeTimings *pModeTimings, + const struct NvKmsModeValidationParams *pParams); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_MODEPOOL_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-modeset-types.h b/src/nvidia-modeset/include/nvkms-modeset-types.h new file mode 100644 index 0000000..aeff65e --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-modeset-types.h @@ -0,0 +1,88 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_MODESET_TYPES_H__ +#define __NVKMS_MODESET_TYPES_H__ + +/* This header file defines types used internally by the modeset path. */ + +#include "nvkms-types.h" + +typedef struct { + NvU32 hwHeadsMask; + struct NvKmsModeValidationParams modeValidationParams; + NVHwModeTimingsEvo timings; + struct NvKmsPoint viewPortPointIn; + NvU32 activeRmId; + NVDpyIdList dpyIdList; + NVAttributesSetEvoRec attributes; + struct NvKmsSetLutCommonParams lut; + NVDispStereoParamsEvoRec stereo; + NVDscInfoEvoRec dscInfo; + NVDispHeadInfoFrameStateEvoRec infoFrame; + enum NvKmsOutputTf tf; + NvBool hdrInfoFrameOverride; + NvU32 hdrStaticMetadataLayerMask; + NvBool colorSpaceSpecified : 1; + NvBool colorBpcSpecified : 1; + NvBool colorRangeSpecified : 1; + NvBool hs10bpcHint : 1; + NvBool changed : 1; +} NVProposedModeSetStateOneApiHead; + +typedef struct { + NvU8 mergeHeadSection; + NVHwModeTimingsEvo timings; + NVConnectorEvoRec *pConnectorEvo; + HDMI_FRL_CONFIG hdmiFrlConfig; + NVDPLibModesetStatePtr pDpLibModesetState; + NVDispHeadAudioStateEvoRec audio; +} NVProposedModeSetHwStateOneHead; + +typedef struct { + NVProposedModeSetStateOneApiHead apiHead[NVKMS_MAX_HEADS_PER_DISP]; + NVProposedModeSetHwStateOneHead head[NVKMS_MAX_HEADS_PER_DISP]; +} NVProposedModeSetHwStateOneDisp; + +typedef struct { + struct { + NVFlipEvoHwState flip; + } head[NVKMS_MAX_HEADS_PER_DISP]; +} NVProposedModeSetHwStateOneSubDev; + +typedef struct { + NVProposedModeSetHwStateOneDisp disp[NVKMS_MAX_SUBDEVICES]; + NVProposedModeSetHwStateOneSubDev sd[NVKMS_MAX_SUBDEVICES]; + NvBool allowHeadSurfaceInNvKms : 1; +} NVProposedModeSetHwState; + +static inline void nvAssignHwHeadsMaskProposedApiHead( + NVProposedModeSetStateOneApiHead *pProposedApiHead, + const NvU32 hwHeadsMask) +{ + pProposedApiHead->hwHeadsMask = hwHeadsMask; + pProposedApiHead->attributes.numberOfHardwareHeadsUsed = + nvPopCount32(hwHeadsMask); +} + +#endif /* __NVKMS_MODESET_TYPES_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-modeset-workarea.h b/src/nvidia-modeset/include/nvkms-modeset-workarea.h new file mode 100644 index 0000000..a4db8b7 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-modeset-workarea.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_MODESET_WORKAREA_H__ +#define __NVKMS_MODESET_WORKAREA_H__ + +#include "nvkms-headsurface-config.h" + +typedef struct { + struct { + struct { + NVFlipEvoHwState newState; + NVFlipEvoHwState oldState; + } head[NVKMS_MAX_HEADS_PER_DISP]; + + struct { + NvU32 oldActiveRmId; + } apiHead[NVKMS_MAX_HEADS_PER_DISP]; + + NVDpyIdList changedDpyIdList; + + NvU32 assignedSorMask; + } sd[NVKMS_MAX_SUBDEVICES]; + NVHsConfig hsConfig; + NVEvoModesetUpdateState modesetUpdateState; + + /* + * The display bandwidth values that NVKMS needs to allocate after the + * modeset is complete. + */ + NvU32 postModesetIsoBandwidthKBPS; + NvU32 postModesetDramFloorKBPS; +} NVModeSetWorkArea; + +struct NvKmsVrrTimings { + struct { + struct { + NVHwModeTimingsEvo timings; + NvBool adjusted; + } head[NVKMS_MAX_HEADS_PER_DISP]; + } disp[NVKMS_MAX_SUBDEVICES]; +}; + +#endif /* __NVKMS_MODESET_WORKAREA_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-modeset.h b/src/nvidia-modeset/include/nvkms-modeset.h new file mode 100644 index 0000000..65f22cf --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-modeset.h @@ -0,0 +1,89 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_MODESET_H__ +#define __NVKMS_MODESET_H__ + +#include "nvkms-types.h" +#include "class/cl0092_callback.h" + +#ifdef __cplusplus +extern "C" { +#endif + +NvBool +nvGetHwModeTimings(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + const struct NvKmsSetModeOneHeadRequest *pRequestHead, + NVHwModeTimingsEvo *pTimings, + NVDpyAttributeColor *pDpyColor, + NVDispHeadInfoFrameStateEvoRec *pInfoFrameState); + +NvBool nvGetAllowHeadSurfaceInNvKms(const NVDevEvoRec *pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsSetModeRequest *pRequest); + +NvBool nvSetDispModeEvo(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsSetModeRequest *pRequest, + struct NvKmsSetModeReply *pReply, + NvBool bypassComposition, + NvBool doRasterLock); + +typedef NvBool (*NVShutDownApiHeadsTestFunc)( + const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + void *pData); + +void nvShutDownApiHeads(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + NVShutDownApiHeadsTestFunc pTestFunc, + void *pData, + NvBool doRasterLock); + +NVVBlankCallbackPtr +nvApiHeadRegisterVBlankCallback(NVDispEvoPtr pDispEvo, + const NvU32 apiHead, + NVVBlankCallbackProc pCallback, + void *pUserData, + NvU8 listIndex); + +void nvApiHeadUnregisterVBlankCallback(NVDispEvoPtr pDispEvo, + NVVBlankCallbackPtr pCallback); + +NVRgLine1CallbackPtr +nvApiHeadAddRgLine1Callback(NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + NVRgLine1CallbackProc pCallbackProc, + void *pUserData); + +void nvApiHeadGetScanLine(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + NvU16 *pScanLine, + NvBool *pInBlankingPeriod); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_MODESET_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-prealloc-types.h b/src/nvidia-modeset/include/nvkms-prealloc-types.h new file mode 100644 index 0000000..044c4d2 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-prealloc-types.h @@ -0,0 +1,55 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_PREALLOC_TYPES_H__ +#define __NVKMS_PREALLOC_TYPES_H__ + +#include "nvtypes.h" + +enum NVPreallocType { + PREALLOC_TYPE_IMP_PARAMS, + PREALLOC_TYPE_SHUT_DOWN_HEADS_SET_MODE, + PREALLOC_TYPE_RESTORE_CONSOLE_SET_MODE, + PREALLOC_TYPE_MODE_SET_WORK_AREA, + PREALLOC_TYPE_FLIP_WORK_AREA, + PREALLOC_TYPE_PROPOSED_MODESET_HW_STATE, + PREALLOC_TYPE_HS_PATCHED_MODESET_REQUEST, + PREALLOC_TYPE_HS_INIT_CONFIG_HW_TIMINGS, + PREALLOC_TYPE_VALIDATE_PROPOSED_MODESET_HW_STATE, + PREALLOC_TYPE_VALIDATE_MODE_HW_MODE_TIMINGS, + PREALLOC_TYPE_VALIDATE_MODE_HDMI_FRL_CONFIG, + PREALLOC_TYPE_VALIDATE_MODE_DSC_INFO, + PREALLOC_TYPE_MODE_SET_REPLY_TMP_USAGE_BOUNDS, + PREALLOC_TYPE_VALIDATE_MODE_IMP_OUT_HW_MODE_TIMINGS, + PREALLOC_TYPE_VALIDATE_MODE_TMP_USAGE_BOUNDS, + PREALLOC_TYPE_DPLIB_IS_MODE_POSSIBLE_PARAMS, + PREALLOC_TYPE_SET_LUT_WORK_AREA, + PREALLOC_TYPE_MAX +}; + +struct NVDevPreallocRec { + void *ptr[PREALLOC_TYPE_MAX]; + NvU8 used[(PREALLOC_TYPE_MAX + 7) / 8]; +}; + +#endif /* __NVKMS_PREALLOC_TYPES_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-prealloc.h b/src/nvidia-modeset/include/nvkms-prealloc.h new file mode 100644 index 0000000..25266f3 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-prealloc.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_PREALLOC_H__ +#define __NVKMS_PREALLOC_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvkms-types.h" +#include "nvkms-prealloc-types.h" + +void *nvPreallocGet(NVDevEvoPtr pDevEvo, enum NVPreallocType type, size_t sizeCheck); +void nvPreallocRelease(NVDevEvoPtr pDevEvo, enum NVPreallocType type); + +NvBool nvPreallocAlloc(NVDevEvoPtr pDevEvo); +void nvPreallocFree(NVDevEvoPtr pDevEvo); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_PREALLOC_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-private.h b/src/nvidia-modeset/include/nvkms-private.h new file mode 100644 index 0000000..31c7608 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-private.h @@ -0,0 +1,80 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_KMS_PRIVATE_H__ +#define __NV_KMS_PRIVATE_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct NvKmsPerOpenDev *nvAllocPerOpenDev(struct NvKmsPerOpen *pOpen, + NVDevEvoPtr pDevEvo, NvBool isPrivileged); + +void nvRevokeDevice(NVDevEvoPtr pDevEvo); + +void nvFreePerOpenDev(struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDev *pOpenDev); + +void nvSendDpyEventEvo(const NVDpyEvoRec *pDpyEvo, const NvU32 eventType); + +void nvSendDpyAttributeChangedEventEvo(const NVDpyEvoRec *pDpyEvo, + const enum NvKmsDpyAttribute attribute, + const NvS64 value); + +void nvSendFrameLockAttributeChangedEventEvo( + const NVFrameLockEvoRec *pFrameLockEvo, + const enum NvKmsFrameLockAttribute attribute, + const NvS64 value); + +void nvSendFlipOccurredEventEvo(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, const NvU32 layer); + +void nvSendUnicastEvent(struct NvKmsPerOpen *pOpen); + +void nvRemoveUnicastEvent(struct NvKmsPerOpen *pOpen); + +#if defined(DEBUG) +NvBool nvSurfaceEvoInAnyOpens(const NVSurfaceEvoRec *pSurfaceEvo); +#endif + +const struct NvKmsFlipPermissions *nvGetFlipPermissionsFromOpenDev( + const struct NvKmsPerOpenDev *pOpenDev); + +const struct NvKmsModesetPermissions *nvGetModesetPermissionsFromOpenDev( + const struct NvKmsPerOpenDev *pOpenDev); + +NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDev( + struct NvKmsPerOpenDev *pOpenDev); +const NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDevConst( + const struct NvKmsPerOpenDev *pOpenDev); + +void nvKmsServiceNonStallInterrupt(void *dataPtr, NvU32 dataU32); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NV_KMS_PRIVATE_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-push.h b/src/nvidia-modeset/include/nvkms-push.h new file mode 100644 index 0000000..8ad6d4a --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-push.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __NVKMS_PUSH_H__ +#define __NVKMS_PUSH_H__ + +#include "nvkms-types.h" + +NvBool nvAllocNvPushDevice(NVDevEvoPtr pDevEvo); +void nvFreeNvPushDevice(NVDevEvoPtr pDevEvo); + +#endif diff --git a/src/nvidia-modeset/include/nvkms-rm.h b/src/nvidia-modeset/include/nvkms-rm.h new file mode 100644 index 0000000..dc503f5 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-rm.h @@ -0,0 +1,164 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_RM_H__ +#define __NVKMS_RM_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "nvkms-types.h" +#include /* NV0092_REGISTER_RG_LINE_CALLBACK_FN */ +#include /* OSVBLANKCALLBACKPROC */ + +#define NVKMS_RM_HANDLE_SPACE_DEVICE(_i) ((_i) + 1) +#define NVKMS_RM_HANDLE_SPACE_FRAMELOCK(_i) (NV_MAX_DEVICES + (_i) + 1) + +NvBool nvWriteDPCDReg(NVConnectorEvoPtr pConnectorEvo, + NvU32 dpcdAddr, + NvU8 dpcdData); + +NvBool nvRmRegisterCallback(const NVDevEvoRec *pDevEvo, + NVOS10_EVENT_KERNEL_CALLBACK_EX *cb, + struct nvkms_ref_ptr *ref_ptr, + NvU32 parentHandle, + NvU32 eventHandle, + Callback5ArgVoidReturn func, + NvU32 event); + +enum NvKmsAllocDeviceStatus nvRmAllocDisplays(NVDevEvoPtr pDevEvo); +void nvRmDestroyDisplays(NVDevEvoPtr pDevEvo); +enum NvKmsBeginEndModeset { + BEGIN_MODESET, + END_MODESET +}; +void nvRmBeginEndModeset(NVDispEvoPtr pDispEvo, enum NvKmsBeginEndModeset, NvU32 mask); +NvU32 nvRmAllocDisplayId(const NVDispEvoRec *pDispEvo, const NVDpyIdList dpyList); +void nvRmFreeDisplayId(const NVDispEvoRec *pDispEvo, NvU32 dpyId); +void nvRmGetConnectorORInfo(NVConnectorEvoPtr pConnectorEvo, NvBool assertOnly); +NVDpyIdList nvRmGetConnectedDpys(const NVDispEvoRec *pDispEvo, + NVDpyIdList dpyIdList); +NvBool nvRmResumeDP(NVDevEvoPtr pDevEvo); +void nvRmPauseDP(NVDevEvoPtr pDevEvo); +NvBool nvRmSetDpmsEvo(NVDpyEvoPtr pDpyEvo, NvS64 value); +NvBool nvRmAllocSysmem(NVDevEvoPtr pDevEvo, NvU32 memoryHandle, + NvU32 *ctxDmaFlags, void **ppBase, NvU64 size, + NvKmsMemoryIsoType isoType); +NvBool nvRMAllocateBaseChannels(NVDevEvoPtr pDevEvo); +NvBool nvRMAllocateOverlayChannels(NVDevEvoPtr pDevEvo); +NvBool nvRMAllocateWindowChannels(NVDevEvoPtr pDevEvo); +NvBool nvRMSetupEvoCoreChannel(NVDevEvoPtr pDevEvo); +void nvRMFreeBaseChannels(NVDevEvoPtr pDevEvo); +void nvRMFreeOverlayChannels(NVDevEvoPtr pDevEvo); +void nvRMFreeWindowChannels(NVDevEvoPtr pDevEvo); +void nvRMFreeEvoCoreChannel(NVDevEvoPtr pDevEvo); +NvBool nvRMSyncEvoChannel( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 errorToken); +NvBool nvRMIdleBaseChannel(NVDevEvoPtr pDevEvo, NvU32 head, NvU32 sd, + NvBool *stoppedBase); +NvBool nvRmEvoClassListCheck(const NVDevEvoRec *pDevEvo, NvU32 classID); +NvBool nvRmEvoAllocAndBindSyncpt( + NVDevEvoRec *pDevEvo, + NVEvoChannel *pChannel, + NvU32 id, + NVSurfaceDescriptor *pSurfaceDesc, + NVEvoSyncpt *pEvoSyncpt); +void nvRmEvoFreePreSyncpt(NVDevEvoRec *pDevEvo, + NVEvoChannel *pChannel); +void nvRmFreeSyncptHandle(NVDevEvoRec *pDevEvo, + NVEvoSyncpt *pSyncpt); +void nvRmEvoFreeSyncpt(NVDevEvoRec *pDevEvo, + NVEvoSyncpt *pEvoSyncpt); +void nvRmEvoFreeDispContextDMA(NVDevEvoPtr pDevEvo, + NvU32 *hDispCtxDma); +void nvRmEvoUnMapVideoMemory(NVDevEvoPtr pDevEvo, + NvU32 memoryHandle, + void *subDeviceAddress[NVKMS_MAX_SUBDEVICES]); +NvBool nvRmEvoMapVideoMemory(NVDevEvoPtr pDevEvo, + NvU32 memoryHandle, NvU64 size, + void *subDeviceAddress[NVKMS_MAX_SUBDEVICES], + NvU32 subDeviceMask); +NvBool nvRmAllocDeviceEvo(NVDevEvoPtr pDevEvo, + const struct NvKmsAllocDeviceRequest *pRequest); +void nvRmFreeDeviceEvo(NVDevEvoPtr pDevEvo); +NvBool nvRmRegisterDIFREventHandler(NVDevEvoPtr pDevEvo); +void nvRmUnregisterDIFREventHandler(NVDevEvoPtr pDevEvo); +NvBool nvRmIsPossibleToActivateDpyIdList(NVDispEvoPtr pDispEvo, + const NVDpyIdList dpyIdList); +NvBool nvRmVTSwitch(NVDevEvoPtr pDevEvo, NvU32 cmd); +NvBool nvRmGetVTFBInfo(NVDevEvoPtr pDevEvo); +void nvRmImportFbConsoleMemory(NVDevEvoPtr pDevEvo); +void nvRmUnmapFbConsoleMemory(NVDevEvoPtr pDevEvo); +NvBool nvRmAllocEvoDma(NVDevEvoPtr pDevEvo, + NVEvoDmaPtr pDma, + NvU64 limit, + NvU32 ctxDmaFlags, + NvU32 subDeviceMask); +void nvRmFreeEvoDma(NVDevEvoPtr pDevEvo, NVEvoDmaPtr pDma); +NvBool nvRmQueryDpAuxLog(NVDispEvoRec *pDispEvo, NvS64 *pValue); +NvU64 nvRmGetGpuTime(NVDevEvoPtr pDevEvo); +NvBool nvRmSetGc6Allowed(NVDevEvoPtr pDevEvo, NvBool allowed); +NVRgLine1CallbackPtr +nvRmAddRgLine1Callback(NVDispEvoRec *pDispEvo, + NvU32 head, + NVRgLine1CallbackProc pCallbackProc, + void *pUserData); +void nvRmRemoveRgLine1Callback(const NVDispEvoRec *pDispEvo, + NVRgLine1CallbackPtr pCallback); + +NvU32 nvRmAddVBlankCallback( + const NVDispEvoRec *pDispEvo, + NvU32 head, + OSVBLANKCALLBACKPROC pCallback, + void *pParam2); +void nvRmRemoveVBlankCallback(const NVDispEvoRec *pDispEvo, + NvU32 callbackObjectHandle); +void nvRmMuxInit(NVDevEvoPtr pDevEvo); +NvBool nvRmMuxPre(const NVDpyEvoRec *pDpyEvo, NvMuxState state); +NvBool nvRmMuxSwitch(const NVDpyEvoRec *pDpyEvo, NvMuxState state); +NvBool nvRmMuxPost(const NVDpyEvoRec *pDpyEvo, NvMuxState state); +NvMuxState nvRmMuxState(const NVDpyEvoRec *pDpyEvo); + +void nvRmRegisterBacklight(NVDispEvoRec *pDispEvo); +void nvRmUnregisterBacklight(NVDispEvoRec *pDispEvo); + +void nvRmAllocCoreRGSyncpts(NVDevEvoPtr pDevEvo); +void nvRmFreeCoreRGSyncpts(NVDevEvoPtr pDevEvo); + +NvU32 nvRmAllocAndBindSurfaceDescriptor( + NVDevEvoPtr pDevEvo, + NvU32 hMemory, + const enum NvKmsSurfaceMemoryLayout layout, + NvU64 limit, + NVSurfaceDescriptor *pSurfaceDesc, + NvBool mapToDisplayRm); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_RM_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-rmapi.h b/src/nvidia-modeset/include/nvkms-rmapi.h new file mode 100644 index 0000000..faadd8a --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-rmapi.h @@ -0,0 +1,119 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_RMAPI_H__ + +#define __NVKMS_RMAPI_H__ + +#include "nvtypes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +NvU32 nvRmApiAlloc( + NvU32 hClient, + NvU32 hParent, + NvU32 hObject, + NvU32 hClass, + void *pAllocParams); + +NvU32 nvRmApiAllocMemory64( + NvU32 hClient, + NvU32 hParent, + NvU32 hMemory, + NvU32 hClass, + NvU32 flags, + void **ppAddress, + NvU64 *pLimit); + +NvU32 nvRmApiControl( + NvU32 hClient, + NvU32 hObject, + NvU32 cmd, + void *pParams, + NvU32 paramsSize); + +NvU32 nvRmApiDupObject2( + NvU32 hClient, + NvU32 hParent, + NvU32 *hObjectDest, + NvU32 hClientSrc, + NvU32 hObjectSrc, + NvU32 flags); + +NvU32 nvRmApiDupObject( + NvU32 hClient, + NvU32 hParent, + NvU32 hObjectDest, + NvU32 hClientSrc, + NvU32 hObjectSrc, + NvU32 flags); + +NvU32 nvRmApiFree( + NvU32 hClient, + NvU32 hParent, + NvU32 hObject); + +NvU32 nvRmApiVidHeapControl( + void *pVidHeapControlParams); + +NvU32 nvRmApiMapMemory( + NvU32 hClient, + NvU32 hDevice, + NvU32 hMemory, + NvU64 offset, + NvU64 length, + void **ppLinearAddress, + NvU32 flags); + +NvU32 nvRmApiUnmapMemory( + NvU32 hClient, + NvU32 hDevice, + NvU32 hMemory, + const void *pLinearAddress, + NvU32 flags); + +NvU32 nvRmApiMapMemoryDma( + NvU32 hClient, + NvU32 hDevice, + NvU32 hDma, + NvU32 hMemory, + NvU64 offset, + NvU64 length, + NvU32 flags, + NvU64 *pDmaOffset); + +NvU32 nvRmApiUnmapMemoryDma( + NvU32 hClient, + NvU32 hDevice, + NvU32 hDma, + NvU32 hMemory, + NvU32 flags, + NvU64 dmaOffset); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_RMAPI_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-setlut-workarea.h b/src/nvidia-modeset/include/nvkms-setlut-workarea.h new file mode 100644 index 0000000..1c5d98d --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-setlut-workarea.h @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_SETLUT_WORKAREA_H__ +#define __NVKMS_SETLUT_WORKAREA_H__ + +#include "nvkms-types.h" + +struct NvKmsSetLutWorkArea { + struct { + NVFlipEvoHwState newState; + NVFlipEvoHwState oldState; + } head[NVKMS_MAX_HEADS_PER_DISP]; +}; + +#endif diff --git a/src/nvidia-modeset/include/nvkms-softfloat.h b/src/nvidia-modeset/include/nvkms-softfloat.h new file mode 100644 index 0000000..43f9fa5 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-softfloat.h @@ -0,0 +1,90 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_SOFTFLOAT_H__ +#define __NVKMS_SOFTFLOAT_H__ + +/* + * This header file provides utility code built on top of the softfloat floating + * point emulation library. + */ + +#include "nv-softfloat.h" +#include "nvkms-api-types.h" + +/* + * A 3x3 row-major matrix of float32_t's. + */ +struct NvKmsMatrixF32 { + float32_t m[3][3]; +}; + +/* + * A 3x4 row-major matrix of float32_t's. + */ +struct NvKms3x4MatrixF32 { + float32_t m[3][4]; +}; + +/* + * Convert from an NvKmsMatrix (stores floating point values in NvU32s) to an + * NvKmsMatrixF32 (stores floating point values in float32_t). + */ +static inline struct NvKmsMatrixF32 NvKmsMatrixToNvKmsMatrixF32( + const struct NvKmsMatrix in) +{ + struct NvKmsMatrixF32 out = { }; + int i, j; + + for (j = 0; j < 3; j++) { + for (i = 0; i < 3; i++) { + out.m[i][j] = NvU32viewAsF32(in.m[i][j]); + } + } + + return out; +} + +/* + * Compute the matrix product A * B, where A is a 3x3 matrix and B is a 3x4 matrix, + * and return the resulting 3x4 matrix. + */ +static inline struct NvKms3x4MatrixF32 nvMultiply3x4Matrix(const struct NvKmsMatrixF32 *A, + const struct NvKms3x4MatrixF32 *B) +{ + struct NvKms3x4MatrixF32 C = { }; + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 4; ++j) { + for (int k = 0; k < 3; ++k) { + C.m[i][j] = f32_mulAdd(A->m[i][k], B->m[k][j], C.m[i][j]); + } + } + } + + return C; +} + +/* return x**y */ +float64_t nvKmsPow(float64_t x, float64_t y); + +#endif /* __NVKMS_SOFTFLOAT_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-stereo.h b/src/nvidia-modeset/include/nvkms-stereo.h new file mode 100644 index 0000000..245c2b8 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-stereo.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_STEREO_H__ +#define __NVKMS_STEREO_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +NvBool nvSetStereo(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + NvBool enable); + +NvBool nvGetStereo(const NVDispEvoRec *pDispEvo, const NvU32 apiHead); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_STEREO_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-surface.h b/src/nvidia-modeset/include/nvkms-surface.h new file mode 100644 index 0000000..57be852 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-surface.h @@ -0,0 +1,111 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_SURFACE_H__ +#define __NVKMS_SURFACE_H__ + +#include "nvkms-types.h" + +void nvEvoRegisterSurface(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + struct NvKmsRegisterSurfaceParams *pParams, + enum NvHsMapPermissions hsMapPermissions); + +void nvEvoUnregisterSurface(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + NvKmsSurfaceHandle surfaceHandle, + NvBool skipUpdate, + NvBool skipSync); +void nvEvoReleaseSurface(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + NvKmsSurfaceHandle surfaceHandle); + +void nvEvoFreeClientSurfaces(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + NVEvoApiHandlesRec *pOpenDevSurfaceHandles); + +void nvEvoIncrementSurfaceStructRefCnt(NVSurfaceEvoPtr pSurfaceEvo); +void nvEvoDecrementSurfaceStructRefCnt(NVSurfaceEvoPtr pSurfaceEvo); + +void nvEvoIncrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo); +void nvEvoDecrementSurfaceRefCnts(NVDevEvoPtr pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo); +void nvEvoDecrementSurfaceRefCntsWithSync(NVDevEvoPtr pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo, + NvBool skipSync); + +NvBool nvEvoSurfaceRefCntsTooLarge(const NVSurfaceEvoRec *pSurfaceEvo); + +NVSurfaceEvoPtr nvEvoGetSurfaceFromHandle( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const NvKmsSurfaceHandle surfaceHandle, + const NvBool isUsedByCursorChannel, + const NvBool isUsedByLayerChannel); + +NVSurfaceEvoPtr nvEvoGetSurfaceFromHandleNoDispHWAccessOk( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + NvKmsSurfaceHandle surfaceHandle); + +NVSurfaceEvoPtr nvEvoGetSurfaceFromHandleNoHWAccess( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + NvKmsSurfaceHandle surfaceHandle); + +NVDeferredRequestFifoRec *nvEvoRegisterDeferredRequestFifo( + NVDevEvoPtr pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo); + +void nvEvoUnregisterDeferredRequestFifo( + NVDevEvoPtr pDevEvo, + NVDeferredRequestFifoRec *pDeferredRequestFifo); + +NvBool nvEvoCpuMapSurface( + NVDevEvoPtr pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo); + +static inline NvBool nvEvoIsSurfaceOwner(const NVSurfaceEvoRec *pSurfaceEvo, + const struct NvKmsPerOpenDev *pOpenDev, + NvKmsSurfaceHandle surfaceHandle) +{ + return ((pSurfaceEvo->owner.pOpenDev == pOpenDev) && + (pSurfaceEvo->owner.surfaceHandle == surfaceHandle)); +} + +#define ASSERT_EYES_MATCH(_arr, _field) \ + nvAssert((_arr)[NVKMS_RIGHT] == NULL || \ + (_arr)[NVKMS_LEFT]->_field == (_arr)[NVKMS_RIGHT]->_field); + +ct_assert((NVKMS_RIGHT - NVKMS_LEFT) == 1); + +#define FOR_ALL_EYES(_eye) \ + for ((_eye) = NVKMS_LEFT; (_eye) <= NVKMS_RIGHT; (_eye)++) + +#define FOR_ALL_VALID_PLANES(_planeIndex, _pSurface) \ + for ((_planeIndex) = 0; \ + (_planeIndex) < \ + (nvKmsGetSurfaceMemoryFormatInfo((_pSurface)->format))->numPlanes; \ + (_planeIndex)++) + +#endif /* __NVKMS_SURFACE_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-types.h b/src/nvidia-modeset/include/nvkms-types.h new file mode 100644 index 0000000..115e82b --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-types.h @@ -0,0 +1,3283 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_TYPES_H__ +#define __NVKMS_TYPES_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvidia-modeset-os-interface.h" + +#include "nvctassert.h" +#include "nv_list.h" + +#include /* NV0073_CTRL_SPECIFIC_OR_PROTOCOL_* */ +#include /* NV0073_CTRL_SYSTEM_CAPS_TBL_SIZE */ +#include /* NV0000_CTRL_GPU_MAX_ATTACHED_GPUS */ +#include /* NV0080_CTRL_OS_UNIX_VT_SWITCH_FB_INFO */ +#include /* NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_* */ +#include /* NV30F1_CTRL_MAX_GPUS_PER_GSYNC */ +#include /* NV5070_CTRL_SYSTEM_CAPS_TBL_SIZE */ +#include + +#include "nvkms-api.h" +#include "nvkms-prealloc-types.h" + +#include "nvos.h" + +#include "nv_common_utils.h" +#include "nv_assert.h" +#include "unix_rm_handle.h" + +#include "nvmisc.h" + +#include "nvidia-push-init.h" + +#include "timing/nvtiming.h" +#include "timing/dpsdp.h" +#include "timing/nvt_dsc_pps.h" +#include "timing/dpsdp.h" +#include "hdmipacket/nvhdmi_frlInterface.h" // HDMI_{SRC,SINK}_CAPS + +#include + +#include "nv_smg.h" + +#if defined(DEBUG) || defined(DEVELOP) +#define NVKMS_PROCFS_ENABLE 1 +#else +#define NVKMS_PROCFS_ENABLE 0 +#endif + +#define NV_DMA_EVO_PUSH_BUFFER_SIZE (4 * 1024) +#define NV_DMA_EVO_PUSH_BUFFER_PAD_SIZE (4 * 12) +#define NV_DMA_EVO_NOTIFIER_SIZE 4096 + +#define NV_NUM_EVO_LUT_ENTRIES 1025 +/* + * Size of the nvdisplay 3 LUT variable segment size header, in LUT entries + * (which are 8 bytes each). + */ +#define NV_LUT_VSS_HEADER_SIZE 4 + +#define NV_EVO_SUBDEV_STACK_SIZE 10 + +#define NV_DP_READ_EDID_RETRIES 18 +#define NV_DP_REREAD_EDID_DELAY_USEC 500 /* in microseconds */ + +#define NV_EVO_SURFACE_ALIGNMENT 0x1000 + +/* + * Prior to nvdisplay 4.0, the final address for all scanout surfaces must be + * 256B-aligned. + * + * For nvdisplay 4.0, the final address for all scanout surfaces must be + * 512B-aligned for GPU, and 1KB-aligned for Tegra. + * + * NVKMS already uses NV_EVO_SURFACE_ALIGNMENT to force 4KB-alignment for the + * base address of each scanout surface. As such, we're forcing 1KB-alignment + * for the corresponding ctxdma offsets in order to be compatible with all + * display architectures. + */ +#define NV_SURFACE_OFFSET_ALIGNMENT_SHIFT 10 + +#define NVKMS_BLOCK_LINEAR_LOG_GOB_WIDTH 6U /* 64 bytes (2^6) */ +#define NVKMS_BLOCK_LINEAR_GOB_WIDTH ((NvU32)1 << NVKMS_BLOCK_LINEAR_LOG_GOB_WIDTH) + +#define NVKMS_BLOCK_LINEAR_LOG_GOB_HEIGHT 3U /* 8 rows (2^3) */ +#define NVKMS_BLOCK_LINEAR_GOB_HEIGHT ((NvU32)1 << NVKMS_BLOCK_LINEAR_LOG_GOB_HEIGHT) + +#define NV_INVALID_OR 0xFFFFFFFF + +#define NVKMS_RM_HEAP_ID 0xDCBA + +#define NVKMS_MAX_WINDOWS_PER_DISP 32 + +#define NV_SYNCPT_GLOBAL_TABLE_LENGTH 1024 + +#define HEAD_MASK_QUERY(_mask, _head) (!!((_mask) & (1 << (_head)))) +#define HEAD_MASK_SET(_mask, _head) ((_mask) | (1 << (_head))) +#define HEAD_MASK_UNSET(_mask, _head) ((_mask) & ~(1 << (_head))) + +#define NVKMS_COMPOSITION_FOR_MATCH_BITS(__colorKeySelect, __match) \ + for ((__match) = (((__colorKeySelect) == \ + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE) ? 1 : 0); \ + (__match) <= 1; (__match)++) + +typedef struct _NVEvoApiHandlesRec *NVEvoApiHandlesPtr; +typedef struct _NVEvoSubDeviceRec *NVSubDeviceEvoPtr; +typedef struct _NVEvoDevRec *NVDevEvoPtr; +typedef struct _NVDIFRStateEvoRec *NVDIFRStateEvoPtr; +typedef struct _NVDmaBufferEvoRec *NVDmaBufferEvoPtr; +typedef struct _NVEvoChannel *NVEvoChannelPtr; +typedef struct _NVEvoHeadControl *NVEvoHeadControlPtr; +typedef struct _NVEvoCapabilities *NVEvoCapabilitiesPtr; +typedef struct _NVEvoSubDevHeadStateRec *NVEvoSubDevHeadStatePtr; +typedef struct _NVEvoSubDevRec *NVEvoSubDevPtr; +typedef struct _NVEvoColorRec *NVEvoColorPtr; +typedef struct _NVHwModeViewPortEvo *NVHwModeViewPortEvoPtr; +typedef struct _NVHwModeTimingsEvo *NVHwModeTimingsEvoPtr; +typedef struct _NVConnectorEvoRec *NVConnectorEvoPtr; +typedef struct _NVVblankSyncObjectRec *NVVblankSyncObjectPtr; +typedef struct _NVDispHeadStateEvoRec *NVDispHeadStateEvoPtr; +typedef struct _NVDispEvoRec *NVDispEvoPtr; +typedef struct _NVParsedEdidEvoRec *NVParsedEdidEvoPtr; +typedef struct _NVVBlankCallbackRec *NVVBlankCallbackPtr; +typedef struct _NVRgLine1CallbackRec *NVRgLine1CallbackPtr; +typedef struct _NVDpyEvoRec *NVDpyEvoPtr; +typedef struct _NVFrameLockEvo *NVFrameLockEvoPtr; +typedef struct _NVEvoInfoString *NVEvoInfoStringPtr; +typedef struct _NVSurfaceEvoRec NVSurfaceEvoRec, *NVSurfaceEvoPtr; +typedef struct _NVDeferredRequestFifoRec *NVDeferredRequestFifoPtr; +typedef struct _NVSwapGroupRec *NVSwapGroupPtr; +typedef struct _NVEvoModesetUpdateState NVEvoModesetUpdateState; +typedef struct _NVLockGroup NVLockGroup; +typedef struct _NVVblankSemControl *NVVblankSemControlPtr; + +/* + * _NVHs*EvoRec are defined in nvkms-headsurface-priv.h; they are intentionally + * opaque outside of the nvkms-headsurface code. + */ +typedef struct _NVHsDeviceEvoRec *NVHsDeviceEvoPtr; +typedef struct _NVHsChannelEvoRec *NVHsChannelEvoPtr; +typedef struct _NVHsSurfaceRec *NVHsSurfacePtr; + +/* _nv_dplibXXX are defined in dp/nvdp-connector-event-sink.h */ +typedef struct _nv_dplibconnector NVDPLibConnectorRec, *NVDPLibConnectorPtr; +typedef struct _nv_dplibdevice NVDPLibDeviceRec, *NVDPLibDevicePtr; +typedef struct __nv_dplibmodesetstate NVDPLibModesetStateRec, *NVDPLibModesetStatePtr; + +/* _nv_dplibtimer is defined in nvdp-timer.hpp */ +typedef struct _nv_dplibtimer NVDPLibTimerRec, *NVDPLibTimerPtr; + +typedef struct _NVEvoApiHandlesRec { + void **pointers; /* Dynamically allocated array of pointers. */ + NvU32 numPointers; /* Number of elements in pointers array. */ + NvU32 defaultSize; +} NVEvoApiHandlesRec; + +typedef struct _NVSurfaceDescriptor +{ + NvU32 memoryHandle; + NvU32 ctxDmaHandle; + NvU32 memAperture; + NvU64 memOffset; + NvBool bValid; + NvBool isMemoryMappedForDisplayAccess; +} NVSurfaceDescriptor; + +typedef struct _NVEvoDma +{ + NvU32 memoryHandle; + NVSurfaceDescriptor surfaceDesc; + + NvU64 limit; + + /* Whether this is sysmem, or vidmem accessed through a BAR1 mapping. */ + NvBool isBar1Mapping; + + void *subDeviceAddress[NVKMS_MAX_SUBDEVICES]; +} NVEvoDma, *NVEvoDmaPtr; + +typedef struct _NVDmaBufferEvoRec +{ + NVEvoDma dma; + + NvU32 channel_handle; // handles + NvU32 num_channels; + void *control[NVKMS_MAX_SUBDEVICES]; + NvU32 *base; // Push buffer start pointer + NvU32 *buffer;// Push buffer current pointer + NvU32 *end; // Push buffer end pointer + NvU32 offset_max; // Push buffer max offset (in bytes) + NvU32 put_offset; // Push buffer last kicked off offset + NvU32 fifo_free_count; // fifo free space (in words) + NvU32 currentSubDevMask; + NVDevEvoPtr pDevEvo; +} NVDmaBufferEvoRec; + +/* EVO capabilities */ +typedef struct { + NvBool flipLock; + NvBool stereo; + NvBool scanLock; +} NVEvoLockPinCaps; +#define NV_EVO_NUM_LOCK_PIN_CAPS 16 + +typedef struct { + NvBool supportsInterlaced; + NvBool supportsSemiPlanar; + NvBool supportsPlanar; + NvBool supportsHVFlip; + NvBool supportsDSI; +} NVEvoMiscCaps; + +static inline NvU8 NVEvoScalerTapsToNum(NVEvoScalerTaps taps) +{ + NvU8 numTaps = 1; + + switch (taps) { + case NV_EVO_SCALER_8TAPS: + numTaps = 8; + break; + case NV_EVO_SCALER_5TAPS: + numTaps = 5; + break; + case NV_EVO_SCALER_3TAPS: + numTaps = 3; + break; + case NV_EVO_SCALER_2TAPS: + numTaps = 2; + break; + case NV_EVO_SCALER_1TAP: + numTaps = 1; + break; + } + + return numTaps; +} + +#define NV_EVO_SCALE_FACTOR_1X (1 << 10) +#define NV_EVO_SCALE_FACTOR_2X (2 << 10) +#define NV_EVO_SCALE_FACTOR_3X (3 << 10) +#define NV_EVO_SCALE_FACTOR_4X (4 << 10) + +typedef struct { + NvU32 maxPixelsVTaps; + NvU16 maxVDownscaleFactor; /* Scaled by 1024 */ + NvU16 maxHDownscaleFactor; /* Scaled by 1024 */ +} NVEvoScalerTapsCaps; + +typedef struct { + NvBool present; + NVEvoScalerTapsCaps taps[NV_EVO_SCALER_TAPS_MAX + 1]; +} NVEvoScalerCaps; + +typedef struct { + NvBool usable; + NvU32 maxPClkKHz; + NvBool supportsHDMIYUV420HW; + NVEvoScalerCaps scalerCaps; +} NVEvoHeadCaps; +#define NV_EVO_NUM_HEAD_CAPS 8 + +typedef struct { + NvBool dualTMDS; + NvU32 maxTMDSClkKHz; +} NVEvoSorCaps; +#define NV_EVO_NUM_SOR_CAPS 8 + +typedef struct { + NvBool usable; + NvBool csc0MatricesPresent; + NvBool cscLUTsPresent; + NvBool csc10MatrixPresent; + NvBool csc11MatrixPresent; + NvBool tmoPresent; + NVEvoScalerCaps scalerCaps; +} NVEvoWindowCaps; +#define NV_EVO_NUM_WINDOW_CAPS 32 + + +typedef NvU64 NVEvoChannelMask; + +#define NV_EVO_CHANNEL_MASK_CORE 0:0 +#define NV_EVO_CHANNEL_MASK_CORE_ENABLE 1 +#define NV_EVO_CHANNEL_MASK_CORE_DISABLE 0 +#define NV_EVO_CHANNEL_MASK_WINDOW_FIELD 32:1 +#define NV_EVO_CHANNEL_MASK_WINDOW(_n) (1+(_n)):(1+(_n)) +#define NV_EVO_CHANNEL_MASK_WINDOW__SIZE 32 +#define NV_EVO_CHANNEL_MASK_WINDOW_ENABLE 1 +#define NV_EVO_CHANNEL_MASK_WINDOW_DISABLE 0 +#define NV_EVO_CHANNEL_MASK_CURSOR_FIELD 40:33 +#define NV_EVO_CHANNEL_MASK_CURSOR(_n) (33+(_n)):(33+(_n)) +#define NV_EVO_CHANNEL_MASK_CURSOR__SIZE 8 +#define NV_EVO_CHANNEL_MASK_CURSOR_ENABLE 1 +#define NV_EVO_CHANNEL_MASK_CURSOR_DISABLE 0 +#define NV_EVO_CHANNEL_MASK_BASE_FIELD 44:41 +#define NV_EVO_CHANNEL_MASK_BASE(_n) (41+(_n)):(41+(_n)) +#define NV_EVO_CHANNEL_MASK_BASE__SIZE 4 +#define NV_EVO_CHANNEL_MASK_BASE_ENABLE 1 +#define NV_EVO_CHANNEL_MASK_BASE_DISABLE 0 +#define NV_EVO_CHANNEL_MASK_OVERLAY_FIELD 48:45 +#define NV_EVO_CHANNEL_MASK_OVERLAY(_n) (45+(_n)):(45+(_n)) +#define NV_EVO_CHANNEL_MASK_OVERLAY__SIZE 4 +#define NV_EVO_CHANNEL_MASK_OVERLAY_ENABLE 1 +#define NV_EVO_CHANNEL_MASK_OVERLAY_DISABLE 0 +/* Window Immediate channels get only one bit. */ +#define NV_EVO_CHANNEL_MASK_WINDOW_IMM 49:49 +#define NV_EVO_CHANNEL_MASK_WINDOW_IMM_ENABLE 1 +#define NV_EVO_CHANNEL_MASK_WINDOW_IMM_DISABLE 0 + +#define NV_EVO_CHANNEL_MASK_WINDOW_ALL \ + DRF_SHIFTMASK64(NV_EVO_CHANNEL_MASK_WINDOW_FIELD) +#define NV_EVO_CHANNEL_MASK_CURSOR_ALL \ + DRF_SHIFTMASK64(NV_EVO_CHANNEL_MASK_CURSOR_FIELD) +#define NV_EVO_CHANNEL_MASK_BASE_ALL \ + DRF_SHIFTMASK64(NV_EVO_CHANNEL_MASK_BASE_FIELD) +#define NV_EVO_CHANNEL_MASK_OVERLAY_ALL \ + DRF_SHIFTMASK64(NV_EVO_CHANNEL_MASK_OVERLAY_FIELD) + +static inline NvU32 NV_EVO_CHANNEL_MASK_POPCOUNT(NvU64 mask) +{ + // It's tempting to use __builtin_popcountll here, but that depends on + // intrinsics not available to nvkms in the kernel. + return nvPopCount64(mask); +} + +static inline NvU32 NV_EVO_CHANNEL_MASK_BASE_HEAD_NUMBER(NvU64 mask) +{ + nvAssert(NV_EVO_CHANNEL_MASK_POPCOUNT(mask) == 1); + return BIT_IDX_64(DRF_VAL64(_EVO, _CHANNEL_MASK, _BASE_FIELD, mask)); +} +static inline NvU32 NV_EVO_CHANNEL_MASK_OVERLAY_HEAD_NUMBER(NvU64 mask) +{ + nvAssert(NV_EVO_CHANNEL_MASK_POPCOUNT(mask) == 1); + return BIT_IDX_64(DRF_VAL64(_EVO, _CHANNEL_MASK, _OVERLAY_FIELD, mask)); +} +static inline NvU32 NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(NvU64 mask) +{ + nvAssert(NV_EVO_CHANNEL_MASK_POPCOUNT(mask) == 1); + return BIT_IDX_64(DRF_VAL64(_EVO, _CHANNEL_MASK, _WINDOW_FIELD, mask)); +} + +/* EVO structures */ + +typedef struct { + struct { + NVEvoChannelMask channelMask; + NVEvoChannelMask noCoreInterlockMask; + /* Each channel in this mask was programmed with a "flip lock + * qualifying" flip. */ + NVEvoChannelMask flipLockQualifyingMask; + /* Channels set here are transitioning from NULL ctxdma to non-NULL + * ctxdma or vice-versa on this update. Only necessary/valid on Turing + * (class C5*). */ + NVEvoChannelMask flipTransitionWAR; + + struct { + NvBool vrrTearing; + } base[NVKMS_MAX_HEADS_PER_DISP]; + + /* + * Window immediate channels with pending methods are represented + * here by NV_EVO_CHANNEL_MASK_WINDOW(n) for window immediate + * channel n. + */ + NVEvoChannelMask winImmChannelMask; + + /* + * Each window channel NV_EVO_CHANNEL_MASK_WINDOW(n) needs to + * be interlocked with its corresponding window immediate channel n. + */ + NVEvoChannelMask winImmInterlockMask; + + } subdev[NVKMS_MAX_SUBDEVICES]; + +} NVEvoUpdateState; + +struct _NVEvoModesetUpdateState { + NVEvoUpdateState updateState; + NVDpyIdList connectorIds; + const NVDPLibModesetStateRec + *pDpLibModesetState[NVKMS_MAX_HEADS_PER_DISP]; + NvBool windowMappingChanged; + struct { + struct _NVEvoModesetUpdateStateOneLayer { + struct nvkms_ref_ptr *ref_ptr; + NvBool changed; + } layer[NVKMS_MAX_LAYERS_PER_HEAD]; + } flipOccurredEvent[NVKMS_MAX_HEADS_PER_DISP]; +}; + +typedef struct { + struct { + NVEvoChannelMask channelMask; + } subdev[NVKMS_MAX_SUBDEVICES]; +} NVEvoIdleChannelState; + +typedef struct { + NvU8 validTimeStampBits; + NvBool tearingFlips :1; + NvBool vrrTearingFlips :1; + NvBool perEyeStereoFlips :1; +} NVEvoChannelCaps; + +enum NVEvoImmChannel { + NV_EVO_IMM_CHANNEL_NONE, + NV_EVO_IMM_CHANNEL_PIO, + NV_EVO_IMM_CHANNEL_DMA, +}; + +typedef struct { + NvU32 handle; + void *control[NVKMS_MAX_SUBDEVICES]; +} NVEvoPioChannel; + +/*! basic syncpt structure used for pre and post syncpt usage */ +typedef struct _NVEvoSyncpt { + /* syncpt is allocated */ + NvBool allocated; + /*! syncpt id (only useful for post-syncpt) */ + NvU32 id; + /*! bitmask of channels using this syncpt */ + NVEvoChannelMask channelMask; + /*! Surface descriptor for the syncpt */ + NVSurfaceDescriptor surfaceDesc; + /*! handle of syncpt object */ + NvU32 hSyncpt; + /*! stores syncpt max value */ + NvU32 syncptMaxVal; +} NVEvoSyncpt; + +/* Tracks internal state of a vblank sync object. */ +typedef struct _NVVblankSyncObjectRec { + /* Whether the vblank sync object is currently in use by some client. */ + NvBool inUse; + + /* Whether the vblank sync object is enabled or disabled. */ + NvBool enabled; + + /* + * The index of this Rec inside of the HeadState's vblankSyncObjects array. + * Also corresponds with the index of the sync object in hardware. + */ + NvU32 index; + + /* + * This syncpoint object should be created as part of + * nvRmSetupEvoCoreChannel(). + */ + NVEvoSyncpt evoSyncpt; +} NVVblankSyncObjectRec; + +/* EVO channel, encompassing multiple subdevices and a single pushbuf */ +typedef struct _NVEvoChannel { + /* Pointer to array of per subdev notifier dma structs */ + NVEvoDmaPtr notifiersDma; + + NvU32 hwclass; + NvU32 instance; + NVEvoChannelMask channelMask; /* only one bit should be set */ + + NVDmaBufferEvoRec pb; + + NVOS10_EVENT_KERNEL_CALLBACK_EX completionNotifierEventCallback; + const struct nvkms_ref_ptr *completionNotifierEventRefPtr; + NvU32 completionNotifierEventHandle; + + /* + * GV100 timestamped flips need a duplicate update which only changes + * TIMESTAMP_MODE and MIN_PRESENT_INTERVAL fields in SET_PRESENT_CONTROL; + * to allow updating these fields without changing anything else in + * SET_PRESENT_CONTROL, normal updates to SET_PRESENT_CONTROL are cached + * here. (bug 1990958) + */ + NvU32 oldPresentControl; + + // On Turing, RM wants to be notified when the tearing mode changes. + NvBool oldTearingMode; + + struct { + enum NVEvoImmChannel type; + union { + NVEvoPioChannel *pio; + struct _NVEvoChannel *dma; + } u; + } imm; + + NVEvoChannelCaps caps; + + NVEvoSyncpt postSyncpt; + + struct { + NvBool enabled; + NvBool clientSpecified; + NvU32 srcMaxLum; + NvU32 targetMaxLums[NVKMS_MAX_SUBDEVICES]; + } tmoParams; +} NVEvoChannel; + +typedef enum { + NV_EVO_NO_LOCK, + NV_EVO_FRAME_LOCK, + NV_EVO_RASTER_LOCK, +} NVEvoLockMode; + +typedef enum { + NV_EVO_LOCK_PIN_ERROR = -1, + NV_EVO_LOCK_PIN_INTERNAL_0 = 0, + NV_EVO_LOCK_PIN_0 = 0x20, +} NVEvoLockPin; + +typedef struct _NVEvoHeadControl { + NvBool interlaced; + NVEvoLockMode clientLock; + NVEvoLockPin clientLockPin; + int clientLockoutWindow; + NVEvoLockMode serverLock; + NVEvoLockPin serverLockPin; + NvBool flipLock; + NVEvoLockPin flipLockPin; + NVEvoLockPin stereoPin; + + NvBool mergeMode; + NvBool setLockOffsetX; + + NvU32 stallLockPin; + NvBool useStallLockPin; + NvBool crashLockUnstallMode; + + /* + * Whether or not this GPU is stereo locked. True if all heads are either + * frame or raster locked, and all heads are driving non-interlaced modes. + */ + NvBool stereoLocked; + + /* + * Whether or not this head is driving a HDMI 3D frame packed mode. Used + * in headcontrol only on >=GV100. + */ + NvBool hdmi3D; + + /* + * Whether or not this head is driving a mode requiring the HW YUV420 + * packer. Used in headcontrol only on >=nvdisplay 4.0. + */ + NvBool hwYuv420; + + /* This isn't actually part of HeadControl, but it's convenient */ + NvU32 lockChainPosition; +} NVEvoHeadControl; + +typedef struct _NVEvoCapabilities { + NVEvoLockPinCaps pin[NV_EVO_NUM_LOCK_PIN_CAPS]; + NVEvoMiscCaps misc; + NVEvoHeadCaps head[NV_EVO_NUM_HEAD_CAPS]; + NVEvoSorCaps sor[NV_EVO_NUM_SOR_CAPS]; + NVEvoWindowCaps window[NV_EVO_NUM_WINDOW_CAPS]; +} NVEvoCapabilities; + +typedef struct { + NVSurfaceEvoPtr pSurfaceEvo; + enum NvKmsNIsoFormat format; + NvU16 offsetInWords; +} NVFlipNIsoSurfaceEvoHwState; + +typedef struct { + NVFlipNIsoSurfaceEvoHwState surface; + NvBool awaken; +} NVFlipCompletionNotifierEvoHwState; + +typedef struct { + NvBool usingSyncpt; + union { + struct { + NVFlipNIsoSurfaceEvoHwState acquireSurface; + NvU32 acquireValue; + NVFlipNIsoSurfaceEvoHwState releaseSurface; + NvU32 releaseValue; + } semaphores; + struct { + NvBool isPreSyncptSpecified; + NvU32 preSyncpt; + NvU32 preValue; + + NvBool isPostSyncptSpecified; + NVSurfaceDescriptor surfaceDesc; + NvU32 postValue; + } syncpts; + } u; +} NVFlipSyncObjectEvoHwState; + +typedef struct { + NVSurfaceEvoPtr pLutSurfaceEvo; + NvU64 offset; + NvU32 vssSegments; + NvU32 lutEntries; + NvBool fromOverride; +} NVFlipLutHwState; + +typedef struct { + NVSurfaceEvoPtr pSurfaceEvo; + NvS16 x, y; + + struct NvKmsCompositionParams cursorCompParams; +} NVFlipCursorEvoHwState; + +typedef struct { + NVSurfaceEvoPtr pSurfaceEvo[NVKMS_MAX_EYES]; + NVFlipCompletionNotifierEvoHwState completionNotifier; + NVFlipSyncObjectEvoHwState syncObject; + + // Non-zero timeStamp value is only allowed if the channel's + // 'timeStampFlipBits' capability is > 0. + NvU64 timeStamp; + NvU8 minPresentInterval; + // True means immediate or tearing flip. False means flip-at-vblank. + NvBool tearing; + // The tearing mode passed to RM's VRR code via + // NV_VRR_TRAP_ARGUMENT_MAX_FPS_TEARING. + NvBool vrrTearing; + NvBool perEyeStereoFlip; + + struct NvKmsSize sizeIn; + struct NvKmsSize sizeOut; + struct NvKmsSignedPoint outputPosition; + + NVEvoScalerTaps hTaps; + NVEvoScalerTaps vTaps; + + struct NvKmsCscMatrix cscMatrix; + + NVFlipLutHwState inputLut; + + struct NvKmsRRParams rrParams; + + struct NvKmsCompositionParams composition; + + NVFlipLutHwState tmoLut; + struct { + struct NvKmsHDRStaticMetadata val; + NvBool enabled; + } hdrStaticMetadata; + + enum NvKmsInputColorSpace colorSpace; + enum NvKmsInputTf tf; + enum NvKmsInputColorRange colorRange; + + struct { + NvBool specified; + + /* + * Maximum vertical downscale factor (scaled by 1024) + * + * For example, if the downscale factor is 1.5, then maxVDownscaleFactor + * would be 1.5 x 1024 = 1536. + */ + NvU16 vertical; + + /* + * Maximum horizontal downscale factor (scaled by 1024) + * + * See the example above for vertical. + */ + NvU16 horizontal; + } maxDownscaleFactors; + + struct { + struct NvKmsCscMatrix matrix; + NvBool enabled; + } csc00Override; + + struct { + struct NvKmsCscMatrix matrix; + NvBool enabled; + } csc01Override; + + struct { + struct NvKmsCscMatrix matrix; + NvBool enabled; + } csc10Override; + + struct { + struct NvKmsCscMatrix matrix; + NvBool enabled; + } csc11Override; +} NVFlipChannelEvoHwState; + +typedef struct { + struct NvKmsPoint viewPortPointIn; + NVFlipCursorEvoHwState cursor; + NVFlipChannelEvoHwState layer[NVKMS_MAX_LAYERS_PER_HEAD]; + struct NvKmsUsageBounds usage; + NvBool disableMidFrameAndDWCFWatermark; + enum NvKmsOutputTf tf; + + struct { + NvBool enabled; + enum NvKmsInfoFrameEOTF eotf; + struct NvKmsHDRStaticMetadata staticMetadata; + } hdrInfoFrame; + + NVFlipLutHwState outputLut; + NvU32 olutFpNormScale; + + NvBool skipLayerPendingFlips[NVKMS_MAX_LAYERS_PER_HEAD]; + + struct { + NvBool viewPortPointIn : 1; + NvBool cursorSurface : 1; + NvBool cursorPosition : 1; + NvBool tf : 1; + NvBool hdrStaticMetadata : 1; + NvBool olut : 1; + + NvBool layerPosition[NVKMS_MAX_LAYERS_PER_HEAD]; + NvBool layerSyncObjects[NVKMS_MAX_LAYERS_PER_HEAD]; + NvBool layer[NVKMS_MAX_LAYERS_PER_HEAD]; + } dirty; +} NVFlipEvoHwState; + +/* + * XXX Default to NVKMS_HDR_INFOFRAME_STATE_TRANSITIONING to send SDR + * infoframe for 2 seconds to WAR issue where some displays may remain in HDR + * mode after head has been previously shut down. + */ +enum NvKmsHDRInfoFrameState { + NVKMS_HDR_INFOFRAME_STATE_TRANSITIONING = 0, + NVKMS_HDR_INFOFRAME_STATE_DISABLED = 1, + NVKMS_HDR_INFOFRAME_STATE_ENABLED = 2, +}; + +typedef struct _NVEvoSubDevHeadStateRec { + struct NvKmsPoint viewPortPointIn; + NVFlipCursorEvoHwState cursor; + NVFlipChannelEvoHwState layer[NVKMS_MAX_LAYERS_PER_HEAD]; + NVFlipLutHwState outputLut; + NvU32 olutFpNormScale; + // Current usage bounds programmed into the hardware. + struct NvKmsUsageBounds usage; + // Usage bounds required after the last scheduled flip completes. + struct NvKmsUsageBounds targetUsage; + // Preallocated usage bounds that will be required for upcoming flips. + struct NvKmsUsageBounds preallocatedUsage; + + // Current state of MidFrameAndDWCFWatermark programmed into the hardware. + NvBool disableMidFrameAndDWCFWatermark; + // + // State of MidFrameAndDWCFWatermark required after the last scheduled + // flip completes. + // + NvBool targetDisableMidFrameAndDWCFWatermark; +} NVEvoSubDevHeadStateRec; + +#define NVKMS_HEAD_SURFACE_MAX_BUFFERS 2 + +/* + * HeadSurface state that applies to a single head, but spans across + * all subdevices. + */ +typedef struct { + /* + * The size of the headSurfaces for this head, across all subdevices. + * headSurface might only use a subset of the surfaces on one or more + * subdevices in SLI Mosaic. + */ + struct NvKmsSize size; + struct NvKmsSize stagingSize; + + /* + * The surfaces allocated for use by headSurface on this head. + * Surface allocations are broadcast across subdevices, though + * headSurface may unicast its rendering to the headSurface + * surface allocations on specific subdevices. + */ + struct { + NVHsSurfacePtr pSurface; + NVHsSurfacePtr pStagingSurface; + } surfaces[NVKMS_MAX_EYES][NVKMS_HEAD_SURFACE_MAX_BUFFERS]; + + /* + * The number of surfaces in the NVKMS_HEAD_SURFACE_MAX_BUFFERS dimension of + * the surfaces[][] array. Elements [0,surfaceCount-1] in the surfaces[][] + * array will be populated. + */ + NvU32 surfaceCount; +} NVHsStateOneHeadAllDisps; + +/* Subdevice-specific, channel-independent state */ +typedef struct _NVEvoSubDevRec { + NvU32 subDeviceInstance; + + NVEvoCapabilities capabilities; + + NVDispEvoPtr pDispEvo; + + NvU32 setSwSpareA[NVKMS_MAX_HEADS_PER_DISP]; + + NVEvoSubDevHeadStateRec headState[NVKMS_MAX_HEADS_PER_DISP]; + NVEvoHeadControl headControl[NVKMS_MAX_HEADS_PER_DISP]; + NVEvoHeadControl headControlAssy[NVKMS_MAX_HEADS_PER_DISP]; + void *cursorPio[NVKMS_MAX_HEADS_PER_DISP]; + NvBool (*scanLockState)(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NvU32 action, + /* NV_INVALID_HEAD-terminated + * array of head indices */ + const NvU32 *pHeads); + + /* + * EVO state machine refcounter for the number of SLI or proxy framelock + * clients that are connected to this server. + */ + NvU32 frameLockSliProxyClients; + + /* + * Since we add all active heads as framelock clients whenever we enable + * the second head as a framelock client, there's no need for EVO state + * transitions for heads 3 and more. Instead of those state transitions, + * we use the frameLockExtraClients ref counter to keep track of heads + * 3 and greater being added as framelock clients. + * + * XXX The state machine currently will naively framelock heads 3 and + * greater during this state transition, even if they're not capable + * of being framelocked (for example, when they have very different + * refresh rates). Bug 976532 + */ + NvU32 frameLockExtraClients; + + /* + * All of the following except the "armed" versions are set by the EVO + * state machine to the desired HW configuration given the current locking + * state. + * The "armed" versions represent the current hardware configuration, used + * to avoid excess hardware updates. + */ + NvU32 frameLockServerMaskArmed; + NvU32 frameLockServerMaskAssy; + NvU32 frameLockClientMaskArmed; + NvU32 frameLockClientMaskAssy; + NvU32 frameLockExtRefClkMaskArmed; + NvU32 frameLockExtRefClkMaskAssy; + NvBool frameLockHouseSync; + + NvU8 flipLockPinSetForFrameLockHeadMask; + NvU8 flipLockEnabledForFrameLockHeadMask; + NvU8 flipLockPinSetForSliHeadMask; + NvU8 flipLockEnabledForSliHeadMask; + + NvU32 flipLockProhibitedHeadMask; + + NvU32 sliRasterLockServerMask; + NvU32 sliRasterLockClientMask; + + NVEvoLockPin sliServerLockPin; + NVEvoLockPin sliClientLockPin; +} NVEvoSubDevRec; + +typedef struct _NVEvoColorRec { + NvU16 red; + NvU16 green; + NvU16 blue; +} NVEvoColorRec; + +typedef struct { + NvU16 Red; + NvU16 Green; + NvU16 Blue; + NvU16 Unused; +} NVEvoLutEntryRec; + +typedef struct { + NVEvoLutEntryRec base[NV_LUT_VSS_HEADER_SIZE + NV_NUM_EVO_LUT_ENTRIES]; + // The output LUT requires 8-bit alignment. + NVEvoLutEntryRec output[NV_LUT_VSS_HEADER_SIZE + NV_NUM_EVO_LUT_ENTRIES] + __attribute__((aligned(0x100))); +} NVEvoLutDataRec; + +typedef struct { + NvBool supportsDP13 :1; + NvBool supportsHDMI20 :1; + NvBool supportsYUV2020 :1; + NvBool inputLutAppliesToBase :1; + NvU8 validNIsoFormatMask; + NvU32 maxPitchValue; + int maxWidthInBytes; + int maxWidthInPixels; + int maxHeight; + NvU32 maxRasterWidth; + NvU32 maxRasterHeight; + struct NvKmsCompositionCapabilities cursorCompositionCaps; + NvU16 validLayerRRTransforms; + struct NvKmsLayerCapabilities layerCaps[NVKMS_MAX_LAYERS_PER_HEAD]; + struct NvKmsLUTCaps olut; + NvU8 legacyNotifierFormatSizeBytes[NVKMS_MAX_LAYERS_PER_HEAD]; + NvU8 dpYCbCr422MaxBpc; + NvU8 hdmiYCbCr422MaxBpc; +} NVEvoCapsRec; + +typedef struct { + NvU32 coreChannelClass; + size_t dmaArmedSize; + NvU32 dmaArmedOffset; +} NVEvoCoreChannelDmaRec; + + +typedef struct _NVEvoSubDeviceRec { + NvU32 handle; + NvU32 gpuId; +#define NV_INVALID_GPU_LOG_INDEX 0xFF + NvU8 gpuLogIndex; + char gpuString[NVKMS_GPU_STRING_SIZE]; + + NvU32 numEngines; + NvU32 *supportedEngines; + + /* Core channel memory mapping for ARM values */ + void *pCoreDma; + + /* ISO ctxdma programmed by EVO2 hal, into the overlay channel */ + NvU32 overlayContextDmaIso[NVKMS_MAX_HEADS_PER_DISP]; + enum NvKmsSurfaceMemoryFormat overlaySurfFormat[NVKMS_MAX_HEADS_PER_DISP]; + + /* Per head surface programmed into the core channel */ + const NVSurfaceEvoRec *pCoreChannelSurface[NVKMS_MAX_HEADS_PER_DISP]; + + /* EVO2 only, TRUE if a valid base surface passed to ->Flip() */ + NvBool isBaseSurfSpecified[NVKMS_MAX_HEADS_PER_DISP]; + enum NvKmsSurfaceMemoryFormat baseSurfFormat[NVKMS_MAX_HEADS_PER_DISP]; + + /* EVO2 only, base and output LUT state - prevents unnecessary flip interlocking */ + const NVSurfaceEvoRec *pBaseLutSurface[NVKMS_MAX_HEADS_PER_DISP]; + NvU64 baseLutOffset[NVKMS_MAX_HEADS_PER_DISP]; + const NVSurfaceEvoRec *pOutputLutSurface[NVKMS_MAX_HEADS_PER_DISP]; + NvU64 outputLutOffset[NVKMS_MAX_HEADS_PER_DISP]; + + /* Composition parameters considered for hardware programming by EVO2 hal */ + struct { + NvBool initialized; + enum NvKmsCompositionColorKeySelect colorKeySelect; + NVColorKey colorKey; + } baseComp[NVKMS_MAX_HEADS_PER_DISP], overlayComp[NVKMS_MAX_HEADS_PER_DISP]; + +} NVEvoSubDeviceRec; + +enum NvKmsLUTState { + NvKmsLUTStateUninitialized = 0, + NvKmsLUTStateIdentity = 1, + NvKmsLUTStatePQ = 2, +}; + +/* Device-specific EVO state (subdevice- and channel-independent) */ +typedef struct _NVEvoDevRec { + + NvU8 gpuLogIndex; + NvU32 allocRefCnt; /* number of ALLOC_DEVICE calls */ + NVListRec devListEntry; + + /* array of gpuIds opened with nvkms_open_gpu() */ + NvU32 openedGpuIds[NV0000_CTRL_GPU_MAX_ATTACHED_GPUS]; + + NVUnixRmHandleAllocatorRec handleAllocator; + + struct NvKmsDeviceId deviceId; + + /* MIG subscription state for SMG support */ + struct { + NvU32 gpuInstSubHandle; + NvU32 computeInstSubHandle; + } smg; + + NvU32 deviceHandle; + struct NvKmsPerOpenDev *pNvKmsOpenDev; + + struct { + NvPushDeviceRec device; + NvU32 handlePool[NV_PUSH_DEVICE_HANDLE_POOL_NUM]; + } nvPush; + + /* SLI Info */ + struct { + NvBool mosaic; + struct { + NvBool present :1; + + /* Current hardware state */ + NvBool powered :1; + + /* Software state tracking needs from hardware */ + NvBool powerNeededForRasterLock :1; + } bridge; + } sli; + + NvU32 numHeads; + NvU32 numWindows; /* NVDisplay only. */ + + NvU32 displayHandle; + + + /*! + * modesetOwner points to the pOpenDev of the client that called + * NVKMS_IOCTL_GRAB_OWNERSHIP. + */ + const struct NvKmsPerOpenDev *modesetOwner; + + /*! + * Indicates whether modeset ownership or sub-ownership has changed since + * last modeset. + */ + NvBool modesetOwnerOrSubOwnerChanged; + + /*! + * modesetSubOwner points to the pOpenDev of the client that called + * NVKMS_IOCTL_ACQUIRE_PERMISSIONS with a file descriptor that grants + * NV_KMS_PERMISSIONS_TYPE_SUB_OWNER. + */ + const struct NvKmsPerOpenDev *modesetSubOwner; + + /*! + * NVEvoDevRec::numSubDevices is the number of GPUs in the SLI + * device. This is the number of NVEvoSubDevPtrs in + * NVEvoDevRec::gpus[] and the number of NVSubDeviceEvoPtr in + * NVEvoDevRec::pSubDevices. + * + * The data structure organization is summarized by the following table: + * + * NVDevEvoRec::numSubDevices (# of pSubDevs) + * | NVDevEvoRec::nDispEvo (# of pDispEvos) + * | | NVDispEvoRec::numSubDevices (# of sd per disp) + * | | | + * no SLI 1 1 1 + * SLI Mosaic N N 1 + */ + NvU32 numSubDevices; + NVSubDeviceEvoPtr pSubDevices[NVKMS_MAX_SUBDEVICES]; + + NvU32 dispClass; + NvU32 displayCommonHandle; + NvU32 rmCtrlHandle; + + unsigned int nDispEvo; + NVDispEvoPtr pDispEvo[NVKMS_MAX_SUBDEVICES]; + + NVEvoChannelPtr base[NVKMS_MAX_HEADS_PER_DISP]; + NVEvoChannelPtr core; + NVEvoChannelPtr overlay[NVKMS_MAX_HEADS_PER_DISP]; + NVEvoChannelPtr window[NVKMS_MAX_WINDOWS_PER_DISP]; + + /* NVDisplay head<->window mapping */ + NvU32 headForWindow[NVKMS_MAX_WINDOWS_PER_DISP]; + + struct { + NVEvoChannelPtr layer[NVKMS_MAX_LAYERS_PER_HEAD]; + NvU32 numLayers; + } head[NVKMS_MAX_HEADS_PER_DISP]; + + /* Pointer to array of subdev structs */ + NVEvoSubDevPtr gpus; + + NvU32 subDevMaskStack[NV_EVO_SUBDEV_STACK_SIZE]; + NvU32 subDevMaskStackDepth; + + NvU32 cursorHandle[NVKMS_MAX_HEADS_PER_DISP]; + + NVDPLibTimerPtr dpTimer; + + NvU8 capsBits[NV5070_CTRL_SYSTEM_CAPS_TBL_SIZE]; + NvU8 commonCapsBits[NV0073_CTRL_SYSTEM_CAPS_TBL_SIZE]; + + NVEvoCapsRec caps; + + NVEvoCoreChannelDmaRec coreChannelDma; + NvU32 nvkmsGpuVASpace; + + NvBool mobile : 1; + + /* + * IO coherency modes that display supports for ISO and NISO memory + * allocations, respectively. + */ + NvKmsDispIOCoherencyModes isoIOCoherencyModes; + NvKmsDispIOCoherencyModes nisoIOCoherencyModes; + + /* + * Indicates whether the init_no_update methods that were pushed by the + * hardware during core channel allocation are still pending. + */ + NvBool coreInitMethodsPending : 1; + /* + * Indicates that NVKMS restored the console and freeing the core channel + * should leave the display configuration alone. + * + * This should be set to FALSE whenever an update is sent that flips away + * from the framebuffer console. + * + * TODO: Remove this in favor of passing a parameter explicitly to the + * functions that use it. + */ + NvBool skipConsoleRestore : 1; + /* + * Indicates that hotplug events that occur while NVKMS is the modeset owner + * should trigger console restore modesets. + */ + NvBool handleConsoleHotplugs : 1; + /* + * Cached from NvKmsSetModeRequest::allowHeadSurfaceInNvKms when the + * modeset owner does a modeset. This is needed so that when non-modeset + * owners do a partial modeset they don't override this value. + */ + NvBool allowHeadSurfaceInNvKms : 1; + + NvBool gc6Allowed : 1; + + /* + * Indicates whether NVKMS is driving an SOC display device, or an external + * dGPU device. + */ + NvBool isSOCDisplay : 1; + + /* + * Indicates whether NVKMS is supporting syncpts. + */ + NvBool supportsSyncpts : 1; + + /* + * Indicates whether the display device that NVKMS is driving requires all + * memory allocations that display will access to come from sysmem. + * + * For SOC display devices, this should be set to TRUE since the only + * memory aperture that they support is sysmem. + */ + NvBool requiresAllAllocationsInSysmem : 1; + /* + * Indicates whether the device that NVKMS is driving supports headSurface + * composition. + * + * For SOC display devices (e.g., Orin), this should be set to FALSE since + * there's currently zero nvgpu support, and no Tegra clients should be + * using headSurface right now. + */ + NvBool isHeadSurfaceSupported : 1; + + /* + * vblank Sem Control requires support in resman; that support is not + * currently available on Tegra. + */ + NvBool supportsVblankSemControl : 1; + + nvkms_timer_handle_t *postFlipIMPTimer; + nvkms_timer_handle_t *consoleRestoreTimer; + + nvkms_timer_handle_t *lowerDispBandwidthTimer; + + NvU32 simulationType; + + NvU32 numClasses; + NvU32 *supportedClasses; + + struct { + /* name[0] == '\0' for unused registryKeys[] array elements. */ + char name[NVKMS_MAX_DEVICE_REGISTRY_KEYNAME_LEN]; + NvU32 value; + } registryKeys[NVKMS_MAX_DEVICE_REGISTRY_KEYS]; + + /* Returns true if the Quadro Sync card connected to this GPU has + * a firmware version incompatible with this GPU. + */ + NvBool badFramelockFirmware; + + const struct _nv_evo_hal *hal; + const struct _nv_evo_cursor_hal *cursorHal; + + /*! + * ref_ptr to the structure. + * + * nvkms_timer_handle_t objects refer to the pDevEvo via references to this, + * so that timers that fire after the pDevEvo has been freed can detect that + * case and do nothing. + */ + struct nvkms_ref_ptr *ref_ptr; + + struct { + void *handle; + } hdmiLib; + + struct { + NvU32 semaphoreHandle; + void *pSemaphores; + NvBool enabled; + NvU32 flipCounter; + } vrr; + + /* + * Information about the framebuffer console returned by + * NV0080_CTRL_CMD_OS_UNIX_VT_GET_FB_INFO. + */ + NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS vtFbInfo; + + /* + * Handle referencing the memory reserved by RM that is used by the kernel + * as the framebuffer console surface. + */ + NvKmsSurfaceHandle fbConsoleSurfaceHandle; + + NVHsDeviceEvoPtr pHsDevice; + + /* The current headSurface configuration. */ + NVHsStateOneHeadAllDisps apiHeadSurfaceAllDisps[NVKMS_MAX_HEADS_PER_DISP]; + + struct NVDevPreallocRec prealloc; + + struct { + NvU32 handle; + NVOS10_EVENT_KERNEL_CALLBACK_EX callback; + } nonStallInterrupt; + + /* + * Track the LUT with per-head, per-pDisp scope. The LUT itself + * is triple buffered. + * + * RM surface allocations are broadcast in SLI, so LUT is allocated with + * per-device scope. However, writes into the LUT are unicast with + * per-pDisp scope. + * + * The LUT surface in the core channel contains both the base and output + * LUTs. + */ + struct { + struct { + NVSurfaceEvoPtr LUT[3]; + struct { + NvBool waitForPreviousUpdate; + NvBool curBaseLutEnabled; + NvBool curOutputLutEnabled; + NvU8 curLUTIndex; + nvkms_timer_handle_t *updateTimer; + } disp[NVKMS_MAX_SUBDEVICES]; + } apiHead[NVKMS_MAX_HEADS_PER_DISP]; + NVSurfaceEvoPtr defaultLut; + enum NvKmsLUTState defaultBaseLUTState[NVKMS_MAX_SUBDEVICES]; + enum NvKmsLUTState defaultOutputLUTState[NVKMS_MAX_SUBDEVICES]; + + /* + * Track outstanding LUT notifiers. Each notifier can have any number + * of apiHeads waiting on it. + * + * waitingApiHeadMask tracks all apiHeads waiting on a LUT notifier. + * stagedApiHeadMask tracks a set of apiHeads that will be tracked by + * the next LUT notifier kickoff. + * + * notifiers is an array of trackers for specific core + * notifiers. If notifiers[i].waiting is true, then the apiHeads listed + * in notifiers[i].apiHeadMask are waiting on notifiers[i].notifier. + */ + struct { + struct { + NvU32 waitingApiHeadMask; + NvU32 stagedApiHeadMask; + struct { + int notifier; + NvU32 apiHeadMask; + NvBool waiting; + } notifiers[NVKMS_MAX_HEADS_PER_DISP]; + } sd[NVKMS_MAX_SUBDEVICES]; + } notifierState; + } lut; + + /*! stores pre-syncpts */ + NVEvoSyncpt *preSyncptTable; + NvBool *pAllSyncptUsedInCurrentFlip; + + /* DIFR prefetch event handling. */ + NVOS10_EVENT_KERNEL_CALLBACK_EX difrPrefetchCallback; + NvU32 difrPrefetchEventHandler; + + /* DIFR runtime state. */ + NVDIFRStateEvoPtr pDifrState; + + NvU32 numApiHeads; + + struct { + NvU32 numLayers; + } apiHead[NVKMS_MAX_HEADS_PER_DISP]; +} NVDevEvoRec; + +/* + * The NVHwModeTimingsEvo structure stores all the values necessary to + * perform a modeset with EVO + */ + +typedef struct _NVHwModeViewPortEvo { + struct { + /* + * note that EVO centers ViewPortOut within the active raster, + * so xAdjust,yAdjust are signed; to position ViewPortOut at + * 0,0 within active raster: + * + * viewPortOut.xAdjust = (activeRaster.w - viewPortOut.w)/2 * -1; + * viewPortOut.yAdjust = (activeRaster.h - viewPortOut.h)/2 * -1; + */ + NvS16 xAdjust; + NvS16 yAdjust; + NvU16 width; + NvU16 height; + } out; + + struct { + NvU16 width; + NvU16 height; + } in; + + NVEvoScalerTaps hTaps; + NVEvoScalerTaps vTaps; + + // These are the window features that may be possible if the required ISO + // bw is available at the time that the feature needs to be enabled. By + // default possibleUsage is set considering that everything is supported + // by the HW and for dGPU, IMP will scale it as needed. + struct NvKmsUsageBounds possibleUsage; + + // Guaranteed usage bounds allowed by IMP. These are never assigned to + // NVDpyEvoRec::usage or the hardware directly, but rather are used to + // validate usage bound change requests. + struct NvKmsUsageBounds guaranteedUsage; +} NVHwModeViewPortEvo; + +static inline NvBool nvIsImageSharpeningAvailable( + const NVHwModeViewPortEvo *pViewPort) +{ + return (pViewPort->out.width != pViewPort->in.width) || + (pViewPort->out.height != pViewPort->in.height); +} + +enum nvKmsPixelDepth { + NVKMS_PIXEL_DEPTH_18_444, + NVKMS_PIXEL_DEPTH_24_444, + NVKMS_PIXEL_DEPTH_30_444, + NVKMS_PIXEL_DEPTH_20_422, + NVKMS_PIXEL_DEPTH_16_422, +}; + +enum nvKmsTimingsProtocol { + NVKMS_PROTOCOL_DAC_RGB, + + NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A, + NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B, + NVKMS_PROTOCOL_SOR_DUAL_TMDS, + NVKMS_PROTOCOL_SOR_DP_A, + NVKMS_PROTOCOL_SOR_DP_B, + NVKMS_PROTOCOL_SOR_LVDS_CUSTOM, + NVKMS_PROTOCOL_SOR_HDMI_FRL, + + NVKMS_PROTOCOL_DSI, + + NVKMS_PROTOCOL_PIOR_EXT_TMDS_ENC, +}; + +enum NVDscInfoEvoType { + NV_DSC_INFO_EVO_TYPE_DISABLED = 0, + NV_DSC_INFO_EVO_TYPE_HDMI = 1, + NV_DSC_INFO_EVO_TYPE_DP = 3, +}; + +enum NVDscEvoMode { + NV_DSC_EVO_MODE_SINGLE = 0, + NV_DSC_EVO_MODE_DUAL = 1, +}; + +typedef struct _NVDscInfoEvoRec { + union { + /* DisplayPort Display Stream Compression */ + struct { + /* + * The DSC target bits per pixel (bpp) rate value multiplied by 16 that + * is being used by the DSC encoder. + * + * It maps respectively to {pps4[1:0], pps5[7:0]}. + */ + NvU32 bitsPerPixelX16; + + /* + * The DSC picture parameter set (PPS), which the DSC encoder must + * communicate to the decoder. + */ + NvU32 pps[DSC_MAX_PPS_SIZE_DWORD]; + enum NVDscEvoMode dscMode; + } dp; + + struct { + NvU32 bitsPerPixelX16; + NvU32 pps[HDMI_DSC_MAX_PPS_SIZE_DWORD]; + NvU32 dscHActiveBytes; + NvU32 dscHActiveTriBytes; + NvU32 dscHBlankTriBytes; + NvU32 dscTBlankToTTotalRatioX1k; + NvU32 hblankMin; + enum NVDscEvoMode dscMode; + } hdmi; + }; + + NvU32 sliceCount; + NvU32 possibleSliceCountMask; + + enum NVDscInfoEvoType type; +} NVDscInfoEvoRec; + +/* + * This structure defines all of the values necessary to program mode timings + * on EVO hardware. + * NOTE: if you add anything to this, consider adding it to + * RasterLockPossible() in nvkms-evo.c + */ +typedef struct _NVHwModeTimingsEvo { + struct NvKmsPoint rasterSize; + struct NvKmsPoint rasterSyncEnd; + struct NvKmsPoint rasterBlankEnd; + struct NvKmsPoint rasterBlankStart; + NvU32 rasterVertBlank2Start; + NvU32 rasterVertBlank2End; + + NvU32 pixelClock; /* in kHz */ + enum nvKmsTimingsProtocol protocol; + /* + * yuv420Mode reflects whether this mode requires YUV 4:2:0 decimation into + * a half-width output through headsurface (SW YUV420) or >=nvdisplay 4.0 HW + * CSC (HW YUV420). + * + * If a mode requires SW YUV 4:2:0 emulation, the pixelClock and width + * values in NvModeTimings will still be the full width values specified by + * the mode parsed from the EDID (e.g. 3840x2160@60), but the pixelClock + * and width values in NVHwModeTimingsEvo will be the "real" half width + * values programmed in HW and rendered to through a headSurface transform + * (e.g. 1920x2160@60). If a mode requires HW YUV 4:2:0 CSC, the + * pixelClock and width values in both NvModeTimings and NVHwModeTimingsEvo + * will be full width, and the decimation to the half width scanout surface + * is performed in HW. In both cases, only the full width values should + * ever be reported to the client. + */ + enum NvYuv420Mode yuv420Mode; + /* *SyncPol is TRUE if negative */ + NvBool hSyncPol : 1; + NvBool vSyncPol : 1; + NvBool interlaced : 1; + NvBool doubleScan : 1; + /* + * hdmi3D reflects whether this mode is a HDMI 3D frame packed mode. True + * only if the user selected HDMI 3D stereo mode and the GPU supports it. + * If true, then pixelClock is doubled. + */ + NvBool hdmi3D : 1; + NvBool dscPassThrough : 1; + struct { + /* The vrr type for which this mode is adjusted. */ + enum NvKmsDpyVRRType type; + NvU32 timeoutMicroseconds; + } vrr; + + NVHwModeViewPortEvo viewPort; +} NVHwModeTimingsEvo; + +static inline NvBool nvIsAdaptiveSyncDpyVrrType(enum NvKmsDpyVRRType type) +{ + return ((type == NVKMS_DPY_VRR_TYPE_ADAPTIVE_SYNC_DEFAULTLISTED) || + (type == NVKMS_DPY_VRR_TYPE_ADAPTIVE_SYNC_NON_DEFAULTLISTED)); +} + +static inline NvBool nvIsGsyncDpyVrrType(enum NvKmsDpyVRRType type) +{ + return (type == NVKMS_DPY_VRR_TYPE_GSYNC); +} + +static inline NvU64 nvEvoFrametimeUsFromTimings(const NVHwModeTimingsEvo *pTimings) +{ + NvU64 pixelsPerFrame = pTimings->rasterSize.x * pTimings->rasterSize.y; + NvU64 pixelsPerSecond = KHzToHz(pTimings->pixelClock); + NvU64 framesPerSecond = pixelsPerSecond / pixelsPerFrame; + + return 1000000ULL / framesPerSecond; +} + +static inline NvU16 nvEvoVisibleWidth(const NVHwModeTimingsEvo *pTimings) +{ + return pTimings->rasterBlankStart.x - pTimings->rasterBlankEnd.x; +} + +static inline NvU16 nvEvoVisibleHeight(const NVHwModeTimingsEvo *pTimings) +{ + /* rasterVertBlank2{Start,End} should only be != 0 for interlaced modes. */ + nvAssert(pTimings->interlaced || + ((pTimings->rasterVertBlank2Start == 0) && + (pTimings->rasterVertBlank2End == 0))); + + return pTimings->rasterBlankStart.y - pTimings->rasterBlankEnd.y + + pTimings->rasterVertBlank2Start - pTimings->rasterVertBlank2End; +} + +/* + * Calculate BackendSizeHeight, based on this HD or SD quality is + * defined. + */ +static inline NvBool nvEvoIsHDQualityVideoTimings( + const NVHwModeTimingsEvo *pTimings) +{ + NvU32 height = nvEvoVisibleHeight(pTimings); + + // as per windows code, nvva uses < 720. + if (height <= 576) { + // SD quality: 240, 288, 480, 576 + return FALSE; + } + + // HD quality: 720, 1080 + return TRUE; +} + +static inline struct NvKmsRect nvEvoViewPortOutHwView( + const NVHwModeTimingsEvo *pTimings) +{ + const NVHwModeViewPortEvo *pViewPort = &pTimings->viewPort; + const NvU16 hVisible = nvEvoVisibleWidth(pTimings); + const NvU16 vVisible = nvEvoVisibleHeight(pTimings); + struct NvKmsRect viewPortOut = { 0 }; + + viewPortOut.width = pViewPort->out.width; + viewPortOut.height = pViewPort->out.height; + viewPortOut.x = pViewPort->out.xAdjust + + (hVisible - pViewPort->out.width) / 2; + viewPortOut.y = (pViewPort->out.yAdjust + + (vVisible - pViewPort->out.height) / 2); + + return viewPortOut; +} + +static inline struct NvKmsRect nvEvoViewPortOutClientView( + const NVHwModeTimingsEvo *pTimings) +{ + struct NvKmsRect viewPortOut = nvEvoViewPortOutHwView(pTimings); + + if (pTimings->doubleScan) { + + nvAssert((viewPortOut.x % 2) == 0); + viewPortOut.x /= 2; + + nvAssert((viewPortOut.height % 2) == 0); + viewPortOut.height /= 2; + } + + return viewPortOut; +} + +/* + * The ELD contains a subset of the digital display device's EDID + * information related to audio capabilities. The GPU driver sends the + * ELD to hardware and the audio driver reads it by issuing the ELD + * command verb. + */ + +#define NV_MAX_AUDIO_DEVICE_ENTRIES \ + (NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_3 + 1) + +typedef enum { + NV_ELD_PRE_MODESET = 0, + NV_ELD_POST_MODESET, + NV_ELD_POWER_ON_RESET, +} NvEldCase; + +/* OR indices are per OR-type. The maximum OR index for each type + * on each GPU is: + * + * Pre-GV10X : 8 SORs, 4 PIORs and 4 Dacs; + * GV10X : 8 SORs, 4 PIORs; + * TU10X+ : 8 SORs; + */ +#define NV_EVO_MAX_ORS 8 + +/* + * The scoping of heads, ORs, and dpys relative to connectors can be + * complicated. Here is how objects are scoped for various configurations: + * + * #heads #ORs #dpys #NVConnectorEvoRecs + * DP 1.1 1 1 1 1 + * DP-MST n 1 n 1 + * DP cloning: 1 1 n 1 + * 2-Heads-1-OR: 2 2 1 1 + */ +typedef struct _NVConnectorEvoRec { + char name[NVKMS_DPY_NAME_SIZE]; + + NVDispEvoPtr pDispEvo; + + NVListRec connectorListEntry; + + NvBool detectComplete; /* For sync'ing dpy detection w/ DP lib */ + NVDPLibConnectorPtr pDpLibConnector; // DP Lib + NvBool dpSerializerEnabled; + + struct { + NvU8 maxLinkBW; + NvU8 maxLaneCount; + NvBool supportsMST; + } dpSerializerCaps; + + NVDpyId displayId; // RM Display ID + NvKmsConnectorSignalFormat signalFormat; + NvKmsConnectorType type; + NvU32 typeIndex; + NvU32 legacyType; /* NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_ */ + NvU32 legacyTypeIndex; + NvU32 physicalIndex; + NvU32 physicalLocation; + + NvU32 dfpInfo; /* DFP info query through NV0073_CTRL_CMD_DFP_GET_INFO */ + + NVDpyIdList ddcPartnerDpyIdsList; + + struct { + NvU32 type; + NvU32 protocol; // NV0073_CTRL_SPECIFIC_OR_PROTOCOL_* + NvU32 location; // NV0073_CTRL_SPECIFIC_OR_LOCATION_* + NvU32 ditherType; + NvU32 ditherAlgo; + /* Hardware heads attached to assigned OR */ + NvU32 ownerHeadMask[NV_EVO_MAX_ORS]; + /* The mask of secondary ORs assigned to this connector */ + NvU32 secondaryMask; + /* The primary OR assigned to this connector */ + NvU32 primary; + } or; + + NvEldCase audioDevEldCase[NV_MAX_AUDIO_DEVICE_ENTRIES]; + + NvBool isHdmiEnabled; +} NVConnectorEvoRec; + +static inline NvU32 nvConnectorGetORMaskEvo(const NVConnectorEvoRec *pConnectorEvo) +{ + if (pConnectorEvo->or.primary != NV_INVALID_OR) { + return NVBIT(pConnectorEvo->or.primary) | pConnectorEvo->or.secondaryMask; + } + return 0x0; +} + +static inline NvU32 nvConnectorGetAttachedHeadMaskEvo( + const NVConnectorEvoRec *pConnectorEvo) +{ + NvU32 headMask = 0x0; + NvU32 orIndex; + + FOR_EACH_INDEX_IN_MASK(32, orIndex, nvConnectorGetORMaskEvo(pConnectorEvo)) { + headMask |= pConnectorEvo->or.ownerHeadMask[orIndex]; + } FOR_EACH_INDEX_IN_MASK_END; + + return headMask; +} + +static inline +NvBool nvIsConnectorActiveEvo(const NVConnectorEvoRec *pConnectorEvo) +{ + return (pConnectorEvo->or.primary != NV_INVALID_OR) && + (pConnectorEvo->or.ownerHeadMask[pConnectorEvo->or.primary] != 0x0); +} + +enum NVDpLibIsModePossibleQueryMode { + NV_DP_LIB_IS_MODE_POSSIBLE_QUERY_MODE_NONE, + NV_DP_LIB_IS_MODE_POSSIBLE_QUERY_MODE_PRE_IMP = + NV_DP_LIB_IS_MODE_POSSIBLE_QUERY_MODE_NONE, + NV_DP_LIB_IS_MODE_POSSIBLE_QUERY_MODE_POST_IMP, +}; + +typedef struct _NVDpLibIsModePossibleParamsRec { + enum NVDpLibIsModePossibleQueryMode queryMode; + + struct { + NvU32 displayId; + NVDpyIdList dpyIdList; + enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace; + enum NvKmsDpyAttributeColorBpcValue colorBpc; + const struct NvKmsModeValidationParams *pModeValidationParams; + const NVHwModeTimingsEvo *pTimings; + NvBool b2Heads1Or; + NVDscInfoEvoRec *pDscInfo; + } head[NV_MAX_HEADS]; +} NVDpLibIsModePossibleParamsRec; + +typedef struct _NVDpyAttributeCurrentDitheringConfigRec { + NvBool enabled; + enum NvKmsDpyAttributeCurrentDitheringDepthValue depth; + enum NvKmsDpyAttributeCurrentDitheringModeValue mode; +} NVDpyAttributeCurrentDitheringConfig; + +typedef struct __NVDpyAttributeColorRec { + /* + * For both colorSpace and colorRange, the value for + * NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_{SPACE,RANGE} sent by the client is + * stored in NVDpyEvoRec::requestedColor(Space, Range}. The structure stores + * the actual color space and color range in use. + * + * nvChooseCurrentColorSpaceAndRangeEvo() chooses the actual color + * space, color bpc, and color range, for a dpy. It sets colorBpc + * to the max bpc supported by the given dpy. + * + * Since YUV444 mode only allows limited color range, changes to the + * current color space may trigger changes to the current color + * range (see nvChooseCurrentColorSpaceAndRangeEvo()). + * + * For SW YUV420 mode, these values are ignored in + * HEAD_SET_PROCAMP and applied in the headSurface composite shader. + * + * XXX Rename NvKmsDpyAttributeCurrentColorSpaceValue to + * NvKmsDpyAttributeCurrentFormatValue. + */ + enum NvKmsDpyAttributeCurrentColorSpaceValue format; + enum NvKmsDpyAttributeColorBpcValue bpc; + enum NvKmsDpyAttributeColorRangeValue range; + + enum NvKmsOutputColorimetry colorimetry; +} NVDpyAttributeColor; + +typedef struct __NVAttributesSetEvoRec { + +#define NV_EVO_DVC_MIN (-1024) +#define NV_EVO_DVC_MAX 1023 +#define NV_EVO_DVC_DEFAULT 0 + + NvS32 dvc; + + NVDpyAttributeColor color; + + NVDpyAttributeCurrentDitheringConfig dithering; + +#define NV_EVO_IMAGE_SHARPENING_MIN 0 +#define NV_EVO_IMAGE_SHARPENING_MAX 255 +#define NV_EVO_IMAGE_SHARPENING_DEFAULT 127 + + struct { + NvBool available; + NvU32 value; + } imageSharpening; + + enum NvKmsDpyAttributeDigitalSignalValue digitalSignal; + + NvU8 numberOfHardwareHeadsUsed; +} NVAttributesSetEvoRec; + +#define NV_EVO_DEFAULT_ATTRIBUTES_SET \ + (NVAttributesSetEvoRec) { \ + .dvc = NV_EVO_DVC_DEFAULT, \ + .color = { \ + .format = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB, \ + .range = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL, \ + }, \ + .dithering = { \ + .enabled = FALSE, \ + .mode = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_NONE, \ + .depth = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_NONE, \ + }, \ + .imageSharpening = { \ + .value = NV_EVO_IMAGE_SHARPENING_DEFAULT, \ + }, \ + .numberOfHardwareHeadsUsed = 0, \ + } + +typedef struct _NVEldEvoRec { + NvU32 size; + NvU8 buffer[NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER]; +} NVEldEvoRec; + +/* + * This structure stores information about the active per-head audio state. + */ +typedef struct _NVDispHeadAudioStateEvoRec { + NvU32 maxFreqSupported; + NVEldEvoRec eld; + + NvBool isAudioOverHdmi : 1; + NvBool supported : 1; + NvBool enabled : 1; +} NVDispHeadAudioStateEvoRec; + +typedef struct _NVDispHeadInfoFrameStateEvoRec { + NVT_VIDEO_INFOFRAME_CTRL videoCtrl; + NVT_VENDOR_SPECIFIC_INFOFRAME_CTRL vendorCtrl; + NvBool hdTimings; +} NVDispHeadInfoFrameStateEvoRec; + +typedef enum _NVEvoMergeMode { + NV_EVO_MERGE_MODE_DISABLED, + NV_EVO_MERGE_MODE_SETUP, + NV_EVO_MERGE_MODE_PRIMARY, + NV_EVO_MERGE_MODE_SECONDARY, +} NVEvoMergeMode; + +/* + * This structure stores information about the active per-head display state. + */ +typedef struct _NVDispHeadStateEvoRec { + + NvU32 displayRate; + + /*! Cached, to preserve across modesets. */ + struct NvKmsModeValidationParams modeValidationParams; + + /* + * For Turing and newer, enable display composition bypass mode. + * + * This is intended to be used by console restore to avoid bug 2168873. + */ + NvBool bypassComposition : 1; + + struct { + NVT_COLOR_FORMAT colorFormat; + NVT_COLORIMETRY colorimetry; + NVT_COLOR_RANGE colorRange; + NvU32 satCos; + } procAmp; + + /* + * The activeRmId is the identifier that we use to talk to RM + * about the display device(s) on this head. It is zero except + * when a mode is being driven by this head. For DP MST, it is the + * identifier of the displayport library group to which the driven + * DP device belongs. Otherwise, it is the identifier of the connector + * driven by the head. + */ + NvU32 activeRmId; + + NVHwModeTimingsEvo timings; + NVConnectorEvoRec *pConnectorEvo; /* NULL if the head is not active */ + + HDMI_FRL_CONFIG hdmiFrlConfig; + + NVDscInfoEvoRec dscInfo; + + enum nvKmsPixelDepth pixelDepth; + + NVDispHeadAudioStateEvoRec audio; + + enum NvKmsOutputTf tf; + + struct { + NvBool enabled; + enum NvKmsInfoFrameEOTF eotf; + struct NvKmsHDRStaticMetadata staticMetadata; + } hdrInfoFrameOverride; + + struct { + enum NvKmsHDRInfoFrameState state; + enum NvKmsInfoFrameEOTF eotf; + struct NvKmsHDRStaticMetadata staticMetadata; + } hdrInfoFrame; + + struct { + NVSurfaceEvoPtr pCurrSurface; + NvBool outputLutEnabled : 1; + NvBool baseLutEnabled : 1; + } lut; + + /* + * The api head can be mapped onto the N harware heads, a frame presented + * by the api head gets split horizontally into N sections, + * 'mergeHeadSection' describe the section presented by this hardware + * head. + */ + NvU8 mergeHeadSection; + + NVEvoMergeMode mergeMode; +} NVDispHeadStateEvoRec; + +typedef struct _NVDispStereoParamsEvoRec { + enum NvKmsStereoMode mode; + NvBool isAegis; +} NVDispStereoParamsEvoRec; + +typedef struct _NVDispFlipOccurredEventDataEvoRec { + NVDispEvoPtr pDispEvo; + NvU32 apiHead; + NvU32 layer; +} NVDispFlipOccurredEventDataEvoRec; + +typedef struct _NVDispApiHeadStateEvoRec { + /* + * The mask of hardware heads mapped onto this api head, + * set to zero if the api head is not active. + */ + NvU32 hwHeadsMask; + + NVDpyIdList activeDpys; /* Empty if the head is not active */ + NVAttributesSetEvoRec attributes; + + enum NvKmsOutputTf tf; + + NvBool hdrInfoFrameOverride; + NvU32 hdrStaticMetadataLayerMask; + + /* + * Hardware timings which are split across hardware heads. + * + * XXX[2Heads1OR] The api-head state does not require to track full + * hardware timings. Replace 'timings' by minimal per api-head hardware + * timings information used in code. + */ + NVHwModeTimingsEvo timings; + + struct { + NvBool active; + NvBool pendingCursorMotion; + } vrr; + + NVDispStereoParamsEvoRec stereo; + + struct NvKmsPoint viewPortPointIn; + + NVDispHeadInfoFrameStateEvoRec infoFrame; + + /* + * Each head can have up to NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD + * programmable Core semaphores. + * + * The numVblankSyncObjectsCreated will ideally always be equal to + * NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD, but could be lower if errors + * occured during syncpt allocation in nvRMSetupEvoCoreChannel(). + */ + NvU8 numVblankSyncObjectsCreated; + NVVblankSyncObjectRec vblankSyncObjects[NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD]; + + struct { + struct nvkms_ref_ptr *ref_ptr; + NVDispFlipOccurredEventDataEvoRec data; + } flipOccurredEvent[NVKMS_MAX_LAYERS_PER_HEAD]; + + NvU32 rmVBlankCallbackHandle; + + NvBool hs10bpcHint : 1; +} NVDispApiHeadStateEvoRec; + +typedef struct _NVDispVblankApiHeadState { + + NvU64 vblankCount; + + /* + * All entries in vblankCallbackList[0] get called before any entries in + * vblankCallbackList[1]. + */ + NVListRec vblankCallbackList[2]; + + struct { + NVListRec list; + NVVBlankCallbackPtr pCallbackPtr; + } vblankSemControl; + +} NVDispVblankApiHeadState; + +typedef struct _NVDispEvoRec { + NvU8 gpuLogIndex; + NVDevEvoPtr pDevEvo; + NvU32 hotplugEventHandle; + NvU32 DPIRQEventHandle; + NVOS10_EVENT_KERNEL_CALLBACK_EX rmHotplugCallback; + NVOS10_EVENT_KERNEL_CALLBACK_EX rmDPIRQCallback; + + NVDispHeadStateEvoRec headState[NVKMS_MAX_HEADS_PER_DISP]; + NVDispApiHeadStateEvoRec apiHeadState[NVKMS_MAX_HEADS_PER_DISP]; + NVDispVblankApiHeadState vblankApiHeadState[NVKMS_MAX_HEADS_PER_DISP]; + + NVDpyIdList vbiosDpyConfig[NVKMS_MAX_HEADS_PER_DISP]; + + NvU32 isoBandwidthKBPS; + NvU32 dramFloorKBPS; + + /* + * The list of physical connector display IDs. This is the union + * of pConnectorEvo->displayId values, which is also the union of + * pDpyEvo->id values for non-MST pDpys. + */ + NVDpyIdList connectorIds; + + NVListRec connectorList; + + NvU32 displayOwner; + + NVListRec dpyList; + + NVDpyIdList bootDisplays; + NVDpyIdList validDisplays; + NVDpyIdList connectedDisplays; + + /* + * displayPortMSTIds is a superset of dynamicDpyIds because not all DP MST + * dpys are dynamic dpys. For example, the DP MST dpys that are driven by + * a DP serializer connector are part of a fixed topology, and are static in + * nature. + */ + NVDpyIdList displayPortMSTIds; /* DP MST dpys */ + NVDpyIdList dynamicDpyIds; + + NVDpyIdList muxDisplays; + + struct { + // Indicates whether a VRR cookie was detected + NvBool hasPlatformCookie; + + nvkms_timer_handle_t *unstallTimer; + } vrr; + + NVFrameLockEvoPtr pFrameLockEvo; + struct { + NVDpyId server; + NVDpyIdList clients; + NvBool syncEnabled; /* GPU is syncing to framelock */ + NvU32 connectorIndex;/* NV30F1_GSYNC_CONNECTOR_* */ + NvU32 currentServerHead; /* used for disabling */ + NvU32 currentClientHeadsMask; /* used for disabling */ + NvBool currentHouseSync; /* if state machine thinks house sync + is enabled -- used for disabling */ + + /* Framelock event-related data */ +#define NV_FRAMELOCK_SYNC_LOSS 0 +#define NV_FRAMELOCK_SYNC_GAIN 1 +#define NV_FRAMELOCK_NUM_EVENTS 2 + + struct { + NvU32 handle; + NVOS10_EVENT_KERNEL_CALLBACK_EX callback; + } gsyncEvent[NV_FRAMELOCK_NUM_EVENTS]; + + } framelock; + + /* NVDevEvoRec::pHsChannel[] is indexed by the api heads */ + NVHsChannelEvoPtr pHsChannel[NVKMS_MAX_HEADS_PER_DISP]; + + /* NVDevEvoRec::pSwapGroup[] is indexed by the api heads */ + NVSwapGroupPtr pSwapGroup[NVKMS_MAX_HEADS_PER_DISP]; + + /* If cross-GPU rasterlock is possible with the currently-active + * configuration */ + NvBool rasterLockPossible; + + /* + * This points to an *active* lock group (i.e., a set of 1 or more pDisps + * across which rasterlock -- and possibly fliplock -- is currently + * enabled), or NULL if no lock group is active on this pDisp. + */ + NVLockGroup *pLockGroup; + + /*! + * ref_ptr to the structure. + * + * nvkms_timer_handle_t objects refer to the pDispEvo via references to + * this, so that timers that fire after the pDispEvo has been freed can + * detect that case and do nothing. + */ + struct nvkms_ref_ptr *ref_ptr; + + /* + * Indicates that NV_KMS_DISP_ATTRIBUTE_QUERY_DP_AUX_LOG has been queried at + * least once on this device. If set, nvRmDestroyDisplays() will flush any + * remaining AUX log messages to the system log. + */ + NvBool dpAuxLoggingEnabled; + + struct nvkms_backlight_device *backlightDevice; +} NVDispEvoRec; + +static inline NvU32 GetNextHwHead(NvU32 hwHeadsMask, const NvU32 prevHwHead) +{ + NvU32 head; + + if ((hwHeadsMask == 0x0) || + ((prevHwHead != NV_INVALID_HEAD) && + ((hwHeadsMask &= ~((1 << (prevHwHead + 1)) -1 )) == 0x0))) { + return NV_INVALID_HEAD; + } + + head = BIT_IDX_32(LOWESTBIT(hwHeadsMask)); + + if (head >= NV_MAX_HEADS) { + return NV_INVALID_HEAD; + } + + return head; +} + +#define FOR_EACH_EVO_HW_HEAD_IN_MASK(__hwHeadsMask, __hwHead) \ + for ((__hwHead) = GetNextHwHead((__hwHeadsMask), NV_INVALID_HEAD); \ + (__hwHead) != NV_INVALID_HEAD; \ + (__hwHead) = GetNextHwHead((__hwHeadsMask), (__hwHead))) + +#define FOR_EACH_EVO_HW_HEAD(__pDispEvo, __apiHead, __hwHead) \ + FOR_EACH_EVO_HW_HEAD_IN_MASK((__pDispEvo)->apiHeadState[(__apiHead)].hwHeadsMask, \ + (__hwHead)) + +static inline NvU32 nvGetPrimaryHwHeadFromMask(const NvU32 hwHeadsMask) +{ + return GetNextHwHead(hwHeadsMask, NV_INVALID_HEAD); +} + +static inline NvU32 nvGetPrimaryHwHead(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead) +{ + return (apiHead != NV_INVALID_HEAD) ? + nvGetPrimaryHwHeadFromMask(pDispEvo->apiHeadState[apiHead].hwHeadsMask) : + NV_INVALID_HEAD; +} + +typedef struct NVEdidRec { + NvU8 *buffer; + size_t length; +} NVEdidRec, *NVEdidPtr; + +typedef struct _NVParsedEdidEvoRec { + NvBool valid; + NVT_EDID_INFO info; + NVT_EDID_RANGE_LIMIT limits; + char monitorName[NVT_EDID_MONITOR_NAME_STRING_LENGTH]; + char serialNumberString[NVT_EDID_LDD_PAYLOAD_SIZE+1]; +} NVParsedEdidEvoRec; + +typedef void (*NVVBlankCallbackProc)(NVDispEvoRec *pDispEvo, + NVVBlankCallbackPtr pCallbackData); + +typedef struct _NVVBlankCallbackRec { + NVListRec vblankCallbackListEntry; + NVVBlankCallbackProc pCallback; + void *pUserData; + NvU32 apiHead; +} NVVBlankCallbackRec; + +typedef void (*NVRgLine1CallbackProc)(NVDispEvoRec *pDispEvo, + const NvU32 head, + NVRgLine1CallbackPtr pCallbackData); + +typedef struct _NVRgLine1CallbackRec { + NVRgLine1CallbackProc pCallbackProc; + void *pUserData; +} NVRgLine1CallbackRec; + +typedef struct _NVDpyAttributeRequestedDitheringConfigRec { + enum NvKmsDpyAttributeRequestedDitheringValue state; + enum NvKmsDpyAttributeRequestedDitheringDepthValue depth; + enum NvKmsDpyAttributeRequestedDitheringModeValue mode; +} NVDpyAttributeRequestedDitheringConfig; + +typedef struct _NVDpyEvoRec { + NVListRec dpyListEntry; + NVDpyId id; + + char name[NVKMS_DPY_NAME_SIZE]; + + NvU32 apiHead; + + struct _NVDispEvoRec *pDispEvo; + NVConnectorEvoPtr pConnectorEvo; + + NvBool hasBacklightBrightness : 1; + NvBool internal : 1; + NvBool allowDVISpecPClkOverride : 1; + + /* whether the connected dpy is HDMI capable */ + NvBool hdmiCapable : 1; + NvBool isVrHmd : 1; + + /* + * Maximum single link and total allowed pixel clock. This is first + * reported by RM through DpyProbeMaxPixelClock, and then potentially + * overridden by the EVO SOR capabilities for HDMI and DVI through + * UpdateMaxPixelClock. + */ + NvU32 maxPixelClockKHz; + NvU32 maxSingleLinkPixelClockKHz; + + NVEdidRec edid; + NVParsedEdidEvoRec parsedEdid; + + NVDpyAttributeRequestedDitheringConfig requestedDithering; + + enum NvKmsDpyAttributeRequestedColorSpaceValue requestedColorSpace; + enum NvKmsDpyAttributeColorRangeValue requestedColorRange; + + NVAttributesSetEvoRec currentAttributes; + + nvkms_timer_handle_t *hdrToSdrTransitionTimer; + + struct { + char *addressString; + NVDPLibDevicePtr pDpLibDevice; // DP Lib's notion of the device. + NvBool inbandStereoSignaling; + + NvU8 laneCount; // NV0073_CTRL_DP_DATA_SET_LANE_COUNT + NvU8 linkRate; // NV0073_CTRL_DP_DATA_SET_LINK_BW + NvU32 linkRate10MHz; + enum NvKmsDpyAttributeDisplayportConnectorTypeValue connectorType; + NvBool sinkIsAudioCapable; + + struct { + NvBool valid; + NvU8 buffer[NVKMS_GUID_SIZE]; + char str[NVKMS_GUID_STRING_SIZE]; + } guid; + + /* + * When the DP serializer is in MST mode, this field is used to uniquely + * identify each MST DPY that's connected to the DP serializer. + * + * This field is only valid for DP serializer DPYs, and pDpLibDevice + * must be NULL in this case. + */ + NvU8 serializerStreamIndex; + } dp; + + struct { + HDMI_SRC_CAPS srcCaps; + HDMI_SINK_CAPS sinkCaps; + } hdmi; + + struct { + NvBool ycbcr422Capable; + NvBool ycbcr444Capable; + } colorSpaceCaps; + + struct { + NvBool supported : 1; + NvBool requiresModetimingPatching : 1; + NvBool isDLP : 1; + NvBool isAegis : 1; + NvBool requiresVbiAdjustment : 1; + NvU32 subType; + int indexInOverrideTimings; + } stereo3DVision; + + struct { + enum NvKmsDpyVRRType type; + } vrr; +} NVDpyEvoRec; + +static inline NvBool nvDpyEvoIsDPMST(const NVDpyEvoRec *pDpyEvo) +{ + return nvDpyIdIsInDpyIdList(pDpyEvo->id, + pDpyEvo->pDispEvo->displayPortMSTIds); +} + +// Return a pDpy's connector's display ID +static inline NvU32 nvDpyEvoGetConnectorId(const NVDpyEvoRec *pDpyEvo) +{ + NvU32 rmDpyId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId); + + // This function shouldn't be used for DP MST dynamic devices. + nvAssert(!nvDpyEvoIsDPMST(pDpyEvo)); + nvAssert(ONEBITSET(rmDpyId)); + + return rmDpyId; +} + +static inline +NvBool nvConnectorIsInternal(const NVConnectorEvoRec *pConnectorEvo) +{ + /* For mobile GPUs check for LVDS or embedded DisplayPort signal flag. + * If found, DFP is internal*/ + return (pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) && + (((pConnectorEvo->pDispEvo->pDevEvo->mobile) && + (FLD_TEST_DRF(0073, _CTRL_DFP_FLAGS, _SIGNAL, _LVDS, + pConnectorEvo->dfpInfo))) || + (FLD_TEST_DRF(0073, _CTRL_DFP_FLAGS, _SIGNAL, _DSI, + pConnectorEvo->dfpInfo)) || + (FLD_TEST_DRF(0073, _CTRL_DFP_FLAGS, _EMBEDDED_DISPLAYPORT, _TRUE, + pConnectorEvo->dfpInfo))); +} + +static inline NvU32 NV_EVO_LOCK_PIN(NvU32 n) +{ + return NV_EVO_LOCK_PIN_0 + n; +} + +static inline NvU32 NV_EVO_LOCK_PIN_INTERNAL(NvU32 n) +{ + return NV_EVO_LOCK_PIN_INTERNAL_0 + n; +} + +static inline NvBool NV_EVO_LOCK_PIN_IS_INTERNAL(NvU32 n) +{ + ct_assert(NV_IS_UNSIGNED(n) && NV_EVO_LOCK_PIN_INTERNAL_0 == 0); + return n < NV_EVO_LOCK_PIN_0; +} + + +/* + * Utility macro for looping over all the pConnectorsEvo on a pDispEvo. + */ +#define FOR_ALL_EVO_CONNECTORS(_pConnectorEvo, _pDispEvo) \ + nvListForEachEntry((_pConnectorEvo), \ + &(_pDispEvo)->connectorList, connectorListEntry) + +/* + * Utility macro for declaring a for loop to walk over all the + * pDispEvos on a particular pDevEvo. + */ +#define FOR_ALL_EVO_DISPLAYS(_pDispEvo, _i, _pDevEvo) \ + for ((_i) = 0, \ + (_pDispEvo) = (_pDevEvo)->pDispEvo[0]; \ + (_pDispEvo); \ + (_i)++, (_pDispEvo) = ((_i) < (_pDevEvo)->nDispEvo) ? \ + (_pDevEvo)->pDispEvo[(_i)] : NULL) + +#define FOR_ALL_EVO_DPYS(_pDpyEvo, _dpyIdList, _pDispEvo) \ + nvListForEachEntry((_pDpyEvo), &(_pDispEvo)->dpyList, dpyListEntry) \ + if (nvDpyIdIsInDpyIdList((_pDpyEvo)->id, (_dpyIdList))) + +#define FOR_ALL_EVO_FRAMELOCKS(_pFrameLockEvo) \ + nvListForEachEntry(_pFrameLockEvo, &nvEvoGlobal.frameLockList, \ + frameLockListEntry) + +#define FOR_ALL_EVO_DEVS(_pDevEvo) \ + nvListForEachEntry(_pDevEvo, &nvEvoGlobal.devList, devListEntry) + +#define FOR_ALL_EVO_DEVS_SAFE(_pDevEvo, _pDevEvo_tmp) \ + nvListForEachEntry_safe(_pDevEvo, _pDevEvo_tmp, &nvEvoGlobal.devList, devListEntry) + +#define FOR_ALL_DEFERRED_REQUEST_FIFOS_IN_SWAP_GROUP( \ + _pSwapGroup, _pDeferredRequestFifo) \ + nvListForEachEntry((_pDeferredRequestFifo), \ + &(_pSwapGroup)->deferredRequestFifoList, \ + swapGroup.deferredRequestFifoListEntry) + +#define FOR_EACH_SUBDEV_IN_MASK(_sd, _mask) \ + FOR_EACH_INDEX_IN_MASK(32, _sd, _mask) + +#define FOR_EACH_SUBDEV_IN_MASK_END \ + FOR_EACH_INDEX_IN_MASK_END + +static inline NVDpyEvoPtr nvGetOneArbitraryDpyEvo(NVDpyIdList dpyIdList, + const NVDispEvoRec *pDispEvo) +{ + NVDpyEvoPtr pDpyEvo; + + nvAssert(nvDpyIdListIsASubSetofDpyIdList(dpyIdList, + pDispEvo->validDisplays)); + + FOR_ALL_EVO_DPYS(pDpyEvo, dpyIdList, pDispEvo) { + return pDpyEvo; + } + + return NULL; +} + + +/* + * Return whether or not the display devices on the connector should + * be handled by the DP library. + */ +static inline NvBool nvConnectorUsesDPLib(const NVConnectorEvoRec + *pConnectorEvo) +{ + return (pConnectorEvo->pDpLibConnector != NULL); +} + +static inline +NvBool nvConnectorIsDPSerializer(const NVConnectorEvoRec *pConnectorEvo) +{ + return (pConnectorEvo->type == NVKMS_CONNECTOR_TYPE_DP_SERIALIZER); +} + +/* + * Return whether or not the display device given is handled by the DP + * library. + */ +static inline NvBool nvDpyUsesDPLib(const NVDpyEvoRec *pDpyEvo) +{ + return nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo); +} + +/* + * Return whether this dpy is active. The dpy is active if it is + * driven by a head. + */ +static inline NvBool nvDpyEvoIsActive(const NVDpyEvoRec *pDpyEvo) +{ + return (pDpyEvo->apiHead != NV_INVALID_HEAD); +} + +/* + * Return true if this dpy reports an EDID supporting HDMI 3D and + * isn't connected via active DisplayPort. + */ +static inline NvBool nvDpyEvoSupportsHdmi3D(const NVDpyEvoRec *pDpyEvo) +{ + return (pDpyEvo->parsedEdid.valid && + pDpyEvo->parsedEdid.info.HDMI3DSupported && + !((pDpyEvo->pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A) || + (pDpyEvo->pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B))); + +} + +static inline NvBool nvHeadIsActive(const NVDispEvoRec *pDispEvo, + const NvU32 head) +{ + return (head < ARRAY_LEN(pDispEvo->headState)) && + (pDispEvo->headState[head].pConnectorEvo != NULL); +} + +static inline NvBool nvApiHeadIsActive(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead) +{ + return (apiHead < ARRAY_LEN(pDispEvo->apiHeadState)) && + (!nvDpyIdListIsEmpty(pDispEvo->apiHeadState[apiHead].activeDpys)); +} + +/*! + * Return the mask of active heads on this pDispEvo. + */ +static inline NvU32 nvGetActiveHeadMask(const NVDispEvoRec *pDispEvo) +{ + NvU32 head; + NvU32 headMask = 0; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if (nvHeadIsActive(pDispEvo, head)) { + headMask |= 1 << head; + } + } + + return headMask; +} + +static inline NvBool nvAllHeadsInactive(const NVDevEvoRec *pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + NvU32 head; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + for (head = 0; head < pDevEvo->numHeads; head++) { + if (nvHeadIsActive(pDispEvo, head)) { + return FALSE; + } + } + } + + return TRUE; +} + +/* + * Return the list of dpys that are currently active on the given disp. + */ +static inline NVDpyIdList nvActiveDpysOnDispEvo(const NVDispEvoRec *pDispEvo) +{ + NVDpyIdList dpyIdList = nvEmptyDpyIdList(); + NvU32 apiHead; + + for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) { + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + + dpyIdList = nvAddDpyIdListToDpyIdList(dpyIdList, + pApiHeadState->activeDpys); + } + + return dpyIdList; +} + +static inline NvU32 nvGpuIdOfDispEvo(const NVDispEvoRec *pDispEvo) +{ + nvAssert(pDispEvo->displayOwner < pDispEvo->pDevEvo->numSubDevices); + return pDispEvo->pDevEvo->pSubDevices[pDispEvo->displayOwner]->gpuId; +} + +static inline NvBool nvIsEmulationEvo(const NVDevEvoRec *pDevEvo) +{ + return pDevEvo->simulationType != + NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_NONE; +} + +static inline NvBool nvIsDfpgaEvo(const NVDevEvoRec *pDevEvo) +{ + return pDevEvo->simulationType == + NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_DFPGA; +} + +static inline NvBool nvIs3DVisionStereoEvo(const enum NvKmsStereoMode stereo) +{ + return (stereo == NVKMS_STEREO_NVIDIA_3D_VISION || + stereo == NVKMS_STEREO_NVIDIA_3D_VISION_PRO); +} + +/* + * Utility macro for iterating over all head bits set in a head bit mask + */ +#define FOR_ALL_HEADS(_head, _headMask) \ + for((_head) = 0; \ + (_headMask) >> (_head); \ + (_head)++) \ + if ((_headMask) & (1 << (_head))) + +typedef struct _NVFrameLockEvo { + NVListRec frameLockListEntry; + + /* array of subdev GPU IDs */ + NvU32 nGpuIds; + NvU32 gpuIds[NV30F1_CTRL_MAX_GPUS_PER_GSYNC]; + + NvU32 gsyncId; + NvU32 device; /* RM device handle for this object */ + + int fpgaIdAndRevision; /* FPGA revId (including firmware version + * and board ID) */ + + int firmwareMajorVersion; /* FPGA firmware major version */ + int firmwareMinorVersion; /* FPGA firmware minor version */ + NvU32 boardId; /* NV30F1_CTRL_GSYNC_GET_CAPS_BOARD_ID_* */ + NvU32 caps; /* Various capabilities flags */ + + NvU32 maxSyncSkew; /* Max sync skew increment */ + NvU32 syncSkewResolution; /* In nanoseconds */ + NvU32 maxSyncInterval; /* Max sync interval */ + + NvU32 houseSyncUseable; + + /* House sync mode requested by user */ + enum NvKmsFrameLockAttributeHouseSyncModeValue houseSyncMode; + NvU32 houseSyncModeValidValues; + + NvBool houseSyncAssy; /* Current desired state */ + NvBool houseSyncArmed; /* Current hardware state */ + + NvU8 connectedGpuMask; /* bitmask of GPUs that are connected */ + NvU8 syncReadyGpuMask; /* bitmask of GPUs that are syncReady */ + + NvBool syncReadyLast; /* Previous NV_CTRL_FRAMELOCK_SYNC_READY + * value changed either from nvctrl or + * the RM, used to avoid resending events + * since RM doesn't trigger a SYNC_READY + * event on framelock disable */ + + NvBool videoModeReadOnly; /* If video mode is read-only */ + + NvU32 maxMulDivValue; /* Max sync multiply/divide value */ + + NvBool mulDivSupported; /* Whether this board supports setting a sync + * multiplier/divider; maxMulDivValue is only + * valid if this is true */ + + /* Current device state */ + enum NvKmsFrameLockAttributePolarityValue polarity; + NvU32 syncDelay; + NvU32 syncInterval; + enum NvKmsFrameLockAttributeVideoModeValue videoMode; + NvU8 mulDivValue; + enum NvKmsFrameLockAttributeMulDivModeValue mulDivMode; + NvBool testMode; + + NVUnixRmHandleAllocatorRec handleAllocator; + +} NVFrameLockEvoRec; + +/*! + * The buffer that accumulates a string with information returned to + * the client. + */ +typedef struct _NVEvoInfoString { + NvU16 length; /*! strlen(s); excludes the nul terminator */ + NvU16 totalLength; /*! number of bytes in the buffer pointed to by 's' */ + char *s; /*! pointer to the buffer to be written to */ +} NVEvoInfoStringRec; + +enum NvHsMapPermissions { + NvHsMapPermissionsNone, + NvHsMapPermissionsReadOnly, + NvHsMapPermissionsReadWrite, +}; + +#define NV_HS_BAD_GPU_ADDRESS ((NvU64) -1) + +#define NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP ( \ + NVBIT64(NvKmsSurfaceMemoryFormatI8)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP ( \ + NVBIT64(NvKmsSurfaceMemoryFormatA1R5G5B5) | \ + NVBIT64(NvKmsSurfaceMemoryFormatX1R5G5B5) | \ + NVBIT64(NvKmsSurfaceMemoryFormatR5G6B5)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP ( \ + NVBIT64(NvKmsSurfaceMemoryFormatA8R8G8B8) | \ + NVBIT64(NvKmsSurfaceMemoryFormatX8R8G8B8) | \ + NVBIT64(NvKmsSurfaceMemoryFormatA2B10G10R10) | \ + NVBIT64(NvKmsSurfaceMemoryFormatX2B10G10R10) | \ + NVBIT64(NvKmsSurfaceMemoryFormatA8B8G8R8) | \ + NVBIT64(NvKmsSurfaceMemoryFormatX8B8G8R8)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP ( \ + NVBIT64(NvKmsSurfaceMemoryFormatRF16GF16BF16AF16) | \ + NVBIT64(NvKmsSurfaceMemoryFormatRF16GF16BF16XF16) | \ + NVBIT64(NvKmsSurfaceMemoryFormatR16G16B16A16)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_YUV_PACKED422 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422) | \ + NVBIT64(NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP420 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___U8V8_N420) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___V8U8_N420)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP422 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___U8V8_N422) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___V8U8_N422)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP444 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___U8V8_N444) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___V8U8_N444)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP420 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY10___U10V10_N420) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY10___V10U10_N420) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY12___U12V12_N420) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY12___V12U12_N420)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP422 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY10___U10V10_N422) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY10___V10U10_N422) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY12___U12V12_N422) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY12___V12U12_N422)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP444 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY10___U10V10_N444) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY10___V10U10_N444) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY12___U12V12_N444) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY12___V12U12_N444)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR444 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___U8___V8_N444)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR420 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___U8___V8_N420)) + +struct _NVSurfaceEvoRec { + /* + * By default, all NVSurfaceEvoRecs will have requireDisplayHardwareAccess + * as TRUE and on chips where ctxDma is supported, a ctxDma is allocated + * and placed in the display engine hash table for each plane. + * + * But, if the client specified the noDisplayHardwareAccess flag, + * requireDisplayHardwareAccess will be FALSE, and ctxDma will be 0 for + * all planes. + * + * requireDisplayHardwareAccess is used to remember what the client + * requested, so that we correctly honor noDisplayHardwareAccess across + * FreeSurfaceCtxDmasForAllOpens() / + * AllocSurfaceCtxDmasForAllOpens() cycles. + */ + NvBool requireDisplayHardwareAccess; + + struct { + NvU32 rmHandle; + NVSurfaceDescriptor surfaceDesc; + NvU32 pitch; + NvU64 offset; + NvU64 rmObjectSizeInBytes; + } planes[NVKMS_MAX_PLANES_PER_SURFACE]; + + struct { + const struct NvKmsPerOpenDev *pOpenDev; + NvKmsSurfaceHandle surfaceHandle; + } owner; + + NvU32 widthInPixels; + NvU32 heightInPixels; + + NvU32 log2GobsPerBlockY; + + /* + * GPU virtual address of the surface, in NVKMS's VA space for use by + * headSurface. + */ + NvU64 gpuAddress; + + /* + * HeadSurface needs a CPU mapping of surfaces containing semaphores. + */ + void *cpuAddress[NVKMS_MAX_SUBDEVICES]; + + enum NvKmsSurfaceMemoryLayout layout; + enum NvKmsSurfaceMemoryFormat format; + + NvKmsMemoryIsoType isoType; + + /* + * A surface has two reference counts: + * + * - rmRefCnt indicates how many uses of the surface reference + * NVSurfaceEvoRec::planes[]::rmHandle (the surface owner who registered + * the surface, EVO currently displaying the surface, an open + * surface grant file descriptor). + * + * - structRefCnt indicates how many uses of the surface reference + * the NVSurfaceEvoRec. In addition to the rmRefCnt uses, this + * will also count NVKMS clients who acquired the surface + * through GRANT_SURFACE/ACQUIRE_SURFACE. + * + * When a client registers a surface, both reference counts will + * be initialized to 1. The RM surface for each plane will be unduped when + * rmRefCnt reaches zero. The NVSurfaceEvoRec structure will be + * freed when structRefCnt reaches zero. + * + * In most cases, one of the following will be true: + * (rmRefCnt == 0) && (structRefCnt == 0) + * (rmRefCnt != 0) && (structRefCnt != 0) + * The only exception is when the owner of the surface unregisters it while + * other clients still have references to it; in that case, the rmRefCnt + * can drop to zero while structRefCnt is still non-zero. + * + * If rmRefCnt reaches zero before structRefCnt, the surface is + * "orphaned": it still exists in ACQUIRE_SURFACE clients' handle + * namespaces and/or granted FDs, but is not usable in subsequent API + * requests (e.g., to flip, specify cursor image, etc). + * + * Described in a table: + * + * ACTION rmRefCnt structRefCnt + * a) NVKMS_IOCTL_REGISTER_SURFACE =1 =1 + * b) flip to surface +1 +1 + * c) NVKMS_IOCTL_GRANT_SURFACE(grantFd) n/a +1 + * d) NVKMS_IOCTL_ACQUIRE_SURFACE n/a +1 + * e) NVKMS_IOCTL_UNREGISTER_SURFACE -1 -1 + * f) flip away from surface -1 -1 + * g) close(grantFd) n/a -1 + * h) NVKMS_IOCTL_RELEASE_SURFACE n/a -1 + * i) ..._REGISTER_DEFERRED_REQUEST_FIFO +1 +1 + * j) ..._UNREGISTER_DEFERRED_REQUEST_FIFO -1 -1 + * + * (e) complements (a) + * (f) complements (b) + * (g) complements (c) + * (h) complements (d) + * (j) complements (i) + */ + NvU64 rmRefCnt; + NvU64 structRefCnt; + +#if NVKMS_PROCFS_ENABLE + NvBool procFsFlag; +#endif + + /* + * Disallow DIFR if display caching is forbidden. This will be set for + * CPU accessible surfaces. + */ + NvBool noDisplayCaching; + + /* Keep track of prefetched surfaces. */ + NvU32 difrLastPrefetchPass; + + /* Map memory allocation into display GPU's IOMMU space */ + NvBool mapToDisplayRm; +}; + +typedef struct _NVDeferredRequestFifoRec { + NVSurfaceEvoPtr pSurfaceEvo; + struct NvKmsDeferredRequestFifo *fifo; + + /* A deferred request fifo may be joined to a swapGroup. */ + struct { + NVSwapGroupPtr pSwapGroup; + NVListRec deferredRequestFifoListEntry; + NvBool ready; + NvBool perEyeStereo; + NvBool pendingJoined; + NvBool pendingReady; + NvU32 semaphoreIndex; + struct NvKmsPerOpen *pOpenUnicastEvent; + } swapGroup; +} NVDeferredRequestFifoRec; + +typedef struct _NVSwapGroupRec { + NVListRec deferredRequestFifoList; + NvBool zombie; + NvBool pendingFlip; + NvU32 nMembers; + NvU32 nMembersReady; + NvU32 nMembersPendingJoined; + + NvU16 nClips; + struct NvKmsRect *pClipList; + NvBool swapGroupIsFullscreen; + + NvU64 refCnt; +} NVSwapGroupRec; + +typedef struct { + NvU32 clientHandle; + nvRMContext rmSmgContext; + + NVListRec devList; + NVListRec frameLockList; + +#if defined(DEBUG) + NVListRec debugMemoryAllocationList; +#endif + + struct NvKmsPerOpen *nvKmsPerOpen; +} NVEvoGlobal; + +extern NVEvoGlobal nvEvoGlobal; + +/* + * These enums are used during IMP validation: + * - NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE means that no changes will be made to + * the current display bandwidth values. + * - NV_EVO_REALLOCATE_BANDWIDTH_MODE_PRE means that NVKMS will increase the + * current display bandwidth values if required by IMP. This is typically + * specified pre-modeset/flip. + * - NV_EVO_REALLOCATE_BANDWIDTH_MODE_POST means that NVKMS may potentially + * decrease the current display bandwidth values to match the current display + * configuration. This is typically specified post-modeset/flip. + */ +typedef enum { + NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE = 0, + NV_EVO_REALLOCATE_BANDWIDTH_MODE_PRE = 1, + NV_EVO_REALLOCATE_BANDWIDTH_MODE_POST = 2, +} NVEvoReallocateBandwidthMode; + +typedef struct { + struct { + /* pTimings == NULL => this head is disabled */ + const NVHwModeTimingsEvo *pTimings; + NvBool enableDsc; + NvBool b2Heads1Or; + enum nvKmsPixelDepth pixelDepth; + const struct NvKmsUsageBounds *pUsage; + NvU32 displayId; + NvU32 orIndex; + NvU8 orType; /* NV0073_CTRL_SPECIFIC_OR_TYPE_* */ + NvU32 dscSliceCount; + NvU32 possibleDscSliceCountMask; + NvBool modesetRequested : 1; + } head[NVKMS_MAX_HEADS_PER_DISP]; + + NvBool requireBootClocks; + NVEvoReallocateBandwidthMode reallocBandwidth; +} NVEvoIsModePossibleDispInput; + +typedef struct { + NvBool possible; + NvU32 minRequiredBandwidthKBPS; + NvU32 floorBandwidthKBPS; + struct { + NvU32 dscSliceCount; + } head[NVKMS_MAX_HEADS_PER_DISP]; +} NVEvoIsModePossibleDispOutput; + +/* CRC-query specific defines */ +/*! + * Structure that defines information about where a single variable is stored in + * the CRC32NotifierEntry structure + */ +typedef struct _CRC32NotifierEntryRec { + NvU32 field_offset; /* Var's offset from start of CRC32Notifier Struct */ + NvU32 field_base_bit; /* LSB bit index for variable in entry */ + NvU32 field_extent_bit; /* MSB bit index for variable in entry */ + struct NvKmsDpyCRC32 *field_frame_values; /* Array to store read field values across frames */ +} CRC32NotifierEntryRec; + +/*! + * Internally identifies flag read from CRC32Notifier's Status for error-checking + */ +enum CRC32NotifierFlagType { + NVEvoCrc32NotifierFlagCount, + NVEvoCrc32NotifierFlagCrcOverflow, +}; + +/*! + * Structure that defines information about where a single flag is stored in + * the Status of the CRC32NotifierEntry structure + */ +typedef struct _CRC32NotifierEntryFlags { + NvU32 flag_base_bit; /* LSB bit index for flag in entry */ + NvU32 flag_extent_bit; /* MSB bit index for flag in entry */ + enum CRC32NotifierFlagType flag_type; /* Type of error-checking to perform on flag */ +} CRC32NotifierEntryFlags; + +/*! + * Internal Crc32NotifierRead structure used to collect multiple frames of CRC + * data from a QueryCRC32 call. Arrays should be allocated to match + * entry_count frames. + */ +typedef struct _CRC32NotifierCrcOut { + /*! + * Array of CRCs generated from the Compositor hardware + */ + struct NvKmsDpyCRC32 *compositorCrc32; + + /*! + * CRCs generated from the RG hardware, if head is driving RG/SF. + */ + struct NvKmsDpyCRC32 *rasterGeneratorCrc32; + + /*! + * Crc values generated from the target SF/OR depending on connector's OR type + */ + struct NvKmsDpyCRC32 *outputCrc32; + +} CRC32NotifierCrcOut; + +typedef enum { + NV_EVO_INFOFRAME_TRANSMIT_CONTROL_EVERY_FRAME, + NV_EVO_INFOFRAME_TRANSMIT_CONTROL_INIT = + NV_EVO_INFOFRAME_TRANSMIT_CONTROL_EVERY_FRAME, + NV_EVO_INFOFRAME_TRANSMIT_CONTROL_SINGLE_FRAME, +} NvEvoInfoFrameTransmitControl; + +typedef const struct _nv_evo_hal { + void (*SetRasterParams) (NVDevEvoPtr pDevEvo, int head, + const NVHwModeTimingsEvo *pTimings, + const NvU8 tilePosition, + const NVDscInfoEvoRec *pDscInfo, + const NVEvoColorRec *pOverscanColor, + NVEvoUpdateState *updateState); + void (*SetProcAmp) (NVDispEvoPtr pDispEvo, const NvU32 head, + NVEvoUpdateState *updateState); + void (*SetHeadControl) (NVDevEvoPtr, int sd, int head, + NVEvoUpdateState *updateState); + void (*SetHeadRefClk) (NVDevEvoPtr, int head, NvBool external, + NVEvoUpdateState *updateState); + void (*HeadSetControlOR) (NVDevEvoPtr pDevEvo, + const int head, + const NVHwModeTimingsEvo *pTimings, + const enum nvKmsPixelDepth pixelDepth, + const NvBool colorSpaceOverride, + NVEvoUpdateState *updateState); + void (*ORSetControl) (NVDevEvoPtr pDevEvo, + const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask, + NVEvoUpdateState *updateState); + void (*HeadSetDisplayId) (NVDevEvoPtr pDevEvo, + const NvU32 head, const NvU32 displayId, + NVEvoUpdateState *updateState); + NvBool (*SetUsageBounds) (NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const struct NvKmsUsageBounds *pUsage, + NVEvoUpdateState *updateState); + void (*Update) (NVDevEvoPtr, + const NVEvoUpdateState *updateState, + NvBool releaseElv); + void (*IsModePossible) (NVDispEvoPtr, + const NVEvoIsModePossibleDispInput *, + NVEvoIsModePossibleDispOutput *); + void (*PrePostIMP) (NVDispEvoPtr, NvBool isPre); + void (*SetNotifier) (NVDevEvoRec *pDevEvo, + const NvBool notify, + const NvBool awaken, + const NvU32 notifier, + NVEvoUpdateState *updateState); + NvBool (*GetCapabilities) (NVDevEvoPtr); + void (*Flip) (NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition); + void (*FlipTransitionWAR) (NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const NVEvoSubDevHeadStateRec *pSdHeadState, + const NVFlipEvoHwState *pFlipState, + NVEvoUpdateState *updateState); + void (*FillLUTSurface) (NVEvoLutEntryRec *pLUTBuffer, + const NvU16 *red, + const NvU16 *green, + const NvU16 *blue, + int nColorMapEntries, int depth); + void (*SetOutputLut) (NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const NVFlipLutHwState *pOutputLut, + NvU32 fpNormScale, + NVEvoUpdateState *updateState, + NvBool bypassComposition); + void (*SetOutputScaler) (const NVDispEvoRec *pDispEvo, const NvU32 head, + const NvU32 imageSharpeningValue, + NVEvoUpdateState *updateState); + void (*SetViewportPointIn) (NVDevEvoPtr pDevEvo, const int head, + NvU16 x, NvU16 y, + NVEvoUpdateState *updateState); + void (*SetViewportInOut) (NVDevEvoPtr pDevEvo, const int head, + const NVHwModeViewPortEvo *pViewPortOutMin, + const NVHwModeViewPortEvo *pViewPortOut, + const NVHwModeViewPortEvo *pViewPortOutMax, + NVEvoUpdateState *updateState); + void (*SetCursorImage) (NVDevEvoPtr pDevEvo, const int head, + const NVSurfaceEvoRec *, + NVEvoUpdateState *updateState, + const struct NvKmsCompositionParams *pCursorCompParams); + NvBool (*ValidateCursorSurface)(const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo); + NvBool (*ValidateWindowFormat)(const enum NvKmsSurfaceMemoryFormat format, + const struct NvKmsRect *sourceFetchRect, + NvU32 *hwFormatOut); + void (*InitCompNotifier) (const NVDispEvoRec *pDispEvo, int idx); + NvBool (*IsCompNotifierComplete) (NVDispEvoPtr pDispEvo, int idx); + void (*WaitForCompNotifier) (const NVDispEvoRec *pDispEvo, int idx); + void (*SetDither) (NVDispEvoPtr pDispEvo, const int head, + const NvBool enabled, const NvU32 type, + const NvU32 algo, + NVEvoUpdateState *updateState); + void (*SetStallLock) (NVDispEvoPtr pDispEvo, const int head, + NvBool enable, NVEvoUpdateState *updateState); + void (*SetDisplayRate) (NVDispEvoPtr pDispEvo, const int head, + NvBool enable, + NVEvoUpdateState *updateState, + NvU32 timeoutMicroseconds); + void (*InitChannel) (NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel); + void (*InitDefaultLut) (NVDevEvoPtr pDevEvo); + void (*InitWindowMapping) (const NVDispEvoRec *pDispEvo, + NVEvoModesetUpdateState *pModesetUpdateState); + NvBool (*IsChannelIdle) (NVDevEvoPtr, NVEvoChannelPtr, NvU32 sd, + NvBool *result); + NvBool (*IsChannelMethodPending)(NVDevEvoPtr, NVEvoChannelPtr, NvU32 sd, + NvBool *result); + NvBool (*ForceIdleSatelliteChannel)(NVDevEvoPtr, + const NVEvoIdleChannelState *idleChannelState); + NvBool (*ForceIdleSatelliteChannelIgnoreLock)(NVDevEvoPtr, + const NVEvoIdleChannelState *idleChannelState); + + void (*AccelerateChannel)(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NvU32 sd, + const NvBool trashPendingMethods, + const NvBool unblockMethodsInExecutation, + NvU32 *pOldAccelerators); + + void (*ResetChannelAccelerators)(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NvU32 sd, + const NvBool trashPendingMethods, + const NvBool unblockMethodsInExecutation, + NvU32 oldAccelerators); + + NvBool (*AllocRmCtrlObject) (NVDevEvoPtr); + void (*FreeRmCtrlObject) (NVDevEvoPtr); + void (*SetImmPointOut) (NVDevEvoPtr, NVEvoChannelPtr, NvU32 sd, + NVEvoUpdateState *updateState, + NvU16 x, NvU16 y); + void (*StartCRC32Capture) (NVDevEvoPtr pDevEvo, + NVEvoDmaPtr pDma, + NVConnectorEvoPtr pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + NvU32 head, + NvU32 sd, + NVEvoUpdateState *updateState /* out */); + void (*StopCRC32Capture) (NVDevEvoPtr pDevEvo, + NvU32 head, + NVEvoUpdateState *updateState /* out */); + NvBool (*QueryCRC32) (NVDevEvoPtr pDevEvo, + NVEvoDmaPtr pDma, + NvU32 sd, + NvU32 entry_count, + CRC32NotifierCrcOut *crc32 /* out */, + NvU32 *numCRC32 /* out */); + void (*GetScanLine) (const NVDispEvoRec *pDispEvo, + const NvU32 head, + NvU16 *pScanLine, + NvBool *pInBlankingPeriod); + void (*ConfigureVblankSyncObject) (NVDevEvoPtr pDevEvo, + NvU16 rasterLine, + NvU32 head, + NvU32 semaphoreIndex, + const NVSurfaceDescriptor *pSurfaceDesc, + NVEvoUpdateState* pUpdateState); + + void (*SetDscParams) (const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVDscInfoEvoRec *pDscInfo, + const enum nvKmsPixelDepth pixelDepth); + + void (*EnableMidFrameAndDWCFWatermark)(NVDevEvoPtr pDevEvo, + NvU32 sd, + NvU32 head, + NvBool enable, + NVEvoUpdateState *pUpdateState); + + NvU32 (*GetActiveViewportOffset)(NVDispEvoRec *pDispEvo, NvU32 head); + + void (*ClearSurfaceUsage) (NVDevEvoPtr pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo); + + NvBool (*ComputeWindowScalingTaps)(const NVDevEvoRec *pDevEvo, + const NVEvoChannel *pChannel, + NVFlipChannelEvoHwState *pHwState); + + const NVEvoScalerCaps* (*GetWindowScalingCaps)(const NVDevEvoRec *pDevEvo); + + void (*SetMergeMode)(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVEvoMergeMode mode, + NVEvoUpdateState* pUpdateState); + void (*SendHdmiInfoFrame)(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvEvoInfoFrameTransmitControl transmitCtrl, + const NVT_INFOFRAME_HEADER *pInfoFrameHeader, + const NvU32 infoFrameSize, + NvBool needChecksum); + void (*DisableHdmiInfoFrame)(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvU8 nvtInfoFrameType); + void (*SendDpInfoFrameSdp)(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvEvoInfoFrameTransmitControl transmitCtrl, + const DPSDP_DESCRIPTOR *sdp); + + NvU32 (*AllocSurfaceDescriptor) (NVDevEvoPtr pDevEvo, + NVSurfaceDescriptor *pSurfaceDesc, + NvU32 memoryHandle, + NvU32 localCtxDmaFlags, + NvU64 limit, + NvBool mapToDisplayRm); + + void (*FreeSurfaceDescriptor) (NVDevEvoPtr pDevEvo, + NvU32 deviceHandle, + NVSurfaceDescriptor *pSurfaceDesc); + + NvU32 (*BindSurfaceDescriptor) (NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NVSurfaceDescriptor *pSurfaceDesc); + + void (*SetTmoLutSurfaceAddress) (const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 offset); + + void (*SetILUTSurfaceAddress) (const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 offset); + + void (*SetISOSurfaceAddress) (const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 offset, + NvU32 ctxDmaIdx, + NvBool isBlocklinear); + + void (*SetCoreNotifierSurfaceAddressAndControl) (const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 notifierOffset, + NvU32 ctrlVal); + + void (*SetWinNotifierSurfaceAddressAndControl) (const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 notifierOffset, + NvU32 ctrlVal); + + void (*SetSemaphoreSurfaceAddressAndControl) (const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 semaphoreOffset, + NvU32 ctrlVal); + + void (*SetAcqSemaphoreSurfaceAddressAndControl) (const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 semaphoreOffset, + NvU32 ctrlVal); + + struct { + NvU32 supportsNonInterlockedUsageBoundsUpdate :1; + NvU32 supportsDisplayRate :1; + NvU32 supportsFlipLockRGStatus :1; + NvU32 needDefaultLutSurface :1; + NvU32 hasUnorm16OLUT :1; + NvU32 supportsImageSharpening :1; + NvU32 supportsHDMIVRR :1; + NvU32 supportsCoreChannelSurface :1; + NvU32 supportsHDMIFRL :1; + NvU32 supportsSetStorageMemoryLayout :1; + NvU32 supportsIndependentAcqRelSemaphore :1; + NvU32 supportsCoreLut :1; + NvU32 supportsSynchronizedOverlayPositionUpdate :1; + NvU32 supportsVblankSyncObjects :1; + NvU32 requiresScalingTapsInBothDimensions :1; + NvU32 supportsMergeMode :1; + NvU32 supportsHDMI10BPC :1; + NvU32 supportsDPAudio192KHz :1; + NvU32 supportsInputColorSpace :1; + NvU32 supportsInputColorRange :1; + NvU32 supportsYCbCr422OverHDMIFRL :1; + + NvU32 supportedDitheringModes; + size_t impStructSize; + NVEvoScalerTaps minScalerTaps; + NvU64 xEmulatedSurfaceMemoryFormats; + } caps; +} NVEvoHAL, *NVEvoHALPtr; + +typedef const struct _nv_evo_cursor_hal { + NvU32 klass; + + void (*MoveCursor) (NVDevEvoPtr, NvU32 sd, NvU32 head, + NvS16 x, NvS16 y); + void (*ReleaseElv) (NVDevEvoPtr, NvU32 sd, NvU32 head); + + struct { + NvU16 maxSize; + } caps; +} NVEvoCursorHAL, *NVEvoCursorHALPtr; + +NvU32 nvEvoGetHeadSetStoragePitchValue(const NVDevEvoRec *pDevEvo, + enum NvKmsSurfaceMemoryLayout layout, + NvU32 pitch); + +NvBool nvEvoGetHeadSetControlCursorValue90(const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo, + NvU32 *pValue); + +static inline NvBool nvEvoScalingUsageBoundsEqual( + const struct NvKmsScalingUsageBounds *a, + const struct NvKmsScalingUsageBounds *b) +{ + return (a->maxVDownscaleFactor == b->maxVDownscaleFactor) && + (a->maxHDownscaleFactor == b->maxHDownscaleFactor) && + (a->vTaps == b->vTaps) && + (a->vUpscalingAllowed == b->vUpscalingAllowed); +} + +static inline NvBool +nvEvoLayerUsageBoundsEqual(const struct NvKmsUsageBounds *a, + const struct NvKmsUsageBounds *b, + const NvU32 layer) +{ + return (a->layer[layer].usable == b->layer[layer].usable) && + (a->layer[layer].supportedSurfaceMemoryFormats == + b->layer[layer].supportedSurfaceMemoryFormats) && + nvEvoScalingUsageBoundsEqual(&a->layer[layer].scaling, + &b->layer[layer].scaling); +} + +static inline void nvAssignHwHeadsMaskApiHeadState( + NVDispApiHeadStateEvoRec *pApiHeadState, + const NvU32 hwHeadsMask) +{ + pApiHeadState->hwHeadsMask = hwHeadsMask; + pApiHeadState->attributes.numberOfHardwareHeadsUsed = + nvPopCount32(hwHeadsMask); +} + +typedef struct _NVVblankSemControlHeadEntryRec { + NVListRec listEntry; + NvU32 previousRequestCounter; + NvU64 previousVblankCount; + struct NvKmsVblankSemControlDataOneHead *pDataOneHead; +} NVVblankSemControlHeadEntry; + +typedef struct _NVVblankSemControl { + NvU32 dispIndex; + NvU32 apiHeadMask; + NVSurfaceEvoRec *pSurfaceEvo; + NVVblankSemControlHeadEntry headEntry[NV_MAX_HEADS]; +} NVVblankSemControl; + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_TYPES_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-utils-flip.h b/src/nvidia-modeset/include/nvkms-utils-flip.h new file mode 100644 index 0000000..ac736d8 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-utils-flip.h @@ -0,0 +1,73 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_FLIP_UTILS_H__ +#define __NVKMS_FLIP_UTILS_H__ + + +#include "nvkms-types.h" + +NvBool nvAssignSurfaceArray( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const NvKmsSurfaceHandle surfaceHandles[NVKMS_MAX_EYES], + const NvBool isUsedByCursorChannel, + const NvBool isUsedByLayerChannel, + NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES]); + +NvBool nvAssignNIsoEvoHwState( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const struct NvKmsNIsoSurface *pParamsNIso, + const NvBool notifier, /* TRUE=notifier; FALSE=semaphore */ + const NvU32 layer, + NVFlipNIsoSurfaceEvoHwState *pNIsoState); + +NvBool nvAssignCompletionNotifierEvoHwState( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const struct NvKmsCompletionNotifierDescription *pParamsNotif, + const NvU32 layer, + NVFlipCompletionNotifierEvoHwState *pNotif); + +NvBool nvAssignSemaphoreEvoHwState( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const NvU32 layer, + const NvU32 sd, + const struct NvKmsChannelSyncObjects *pChannelSyncObjects, + NVFlipSyncObjectEvoHwState *pFlipSyncObject); + +NvBool nvValidatePerLayerCompParams( + const struct NvKmsCompositionParams *pCompParams, + const struct NvKmsCompositionCapabilities *pCaps, + NVSurfaceEvoPtr pSurfaceEvo); + +NvBool +nvAssignCursorSurface(const struct NvKmsPerOpenDev *pOpenDev, + const NVDevEvoRec *pDevEvo, + const struct NvKmsSetCursorImageCommonParams *pImgParams, + NVSurfaceEvoPtr *pSurfaceEvo); + + +#endif /* __NVKMS_FLIP_UTILS_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-utils.h b/src/nvidia-modeset/include/nvkms-utils.h new file mode 100644 index 0000000..d9505f5 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-utils.h @@ -0,0 +1,288 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_UTILS_H__ +#define __NVKMS_UTILS_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvidia-modeset-os-interface.h" + +/*! + * Subtract B from A, and handle wrap around. + * + * This is useful for cases where A is a number that is incremented and wrapped; + * e.g., + * + * a = (a + 1) % max; + * + * and we want to subtract some amount from A to get one of its previous values. + */ +static inline NvU8 A_minus_b_with_wrap_U8(NvU8 a, NvU8 b, NvU8 max) +{ + return (a + max - b) % max; +} + +/*! + * Return whether (A + B) > C, avoiding integer overflow in the addition. + */ +static inline NvBool A_plus_B_greater_than_C_U16(NvU16 a, NvU16 b, NvU16 c) +{ + return (NV_U16_MAX - a < b) || ((a + b) > c); +} + +static inline NvBool A_plus_B_greater_than_C_U64(NvU64 a, NvU64 b, NvU64 c) +{ + return (NV_U64_MAX - a < b) || ((a + b) > c); +} + +static inline NvS32 clamp_S32(NvS32 val, NvS32 lo, NvS32 hi) +{ + if (val < lo) { + return lo; + } else if (val > hi) { + return hi; + } else { + return val; + } +} + +/*! + * Return whether the bitmask contains bits greater than or equal to + * the maximum. + */ +static inline NvBool nvHasBitAboveMax(NvU32 bitmask, NvU8 max) +{ + nvAssert(max <= 32); + if (max == 32) { + return FALSE; + } + return (bitmask & ~((1 << max) - 1)) != 0; +} + +static inline NvU32 nvPackNvU32(NvU8 a, NvU8 b, NvU8 c, NvU8 d) +{ + return (a << 24) | (b << 16) | (c << 8) | d; +} + +/*! + * Check if a timeout is exceeded. + * + * This is intended to be used when busy waiting in a loop, like this: + * + * NvU64 startTime = 0; + * + * do { + * if (SOME-CONDITION) { + * break; + * } + * + * if (nvExceedsTimeoutUSec(pDevEvo, &startTime, TIMEOUT-IN-USEC)) { + * break; + * } + * + * nvkms_yield(); + * + * } while (TRUE); + * + * The caller should zero-initialize startTime, and nvExceedsTimeoutUSec() will + * set startTime to the starting time on the first call. This is structured + * this way to avoid the nvkms_get_usec() call in the common case where + * SOME-CONDITION is true on the first iteration (nvkms_get_usec() is not + * expected to be a large penalty, but it still seems nice to avoid it when not + * needed). + */ +static inline NvBool nvExceedsTimeoutUSec( + const NVDevEvoRec *pDevEvo, + NvU64 *pStartTime, + NvU64 timeoutPeriod) +{ + const NvU64 currentTime = nvkms_get_usec(); + + if (nvIsEmulationEvo(pDevEvo) && !nvIsDfpgaEvo(pDevEvo)) { + timeoutPeriod *= 100; + } + + if (*pStartTime == 0) { + *pStartTime = currentTime; + return FALSE; + } + + if (currentTime < *pStartTime) { /* wraparound?! */ + return TRUE; + } + + return (currentTime - *pStartTime) > timeoutPeriod; +} + +/*! + * Return a non-NULL string. + * + * The first argument, stringMightBeNull, could be NULL. In which + * case, return the second argument, safeString, which the caller + * should ensure is not NULL (e.g., by providing a literal). + * + * This is intended as a convenience for situations like this: + * + * char *s = FunctionThatMightReturnNull(); + * printf("%s\n", nvSafeString(s, "stringLiteral")); + */ +static inline const char *nvSafeString(char *stringMightBeNull, + const char *safeString) +{ + return (stringMightBeNull != NULL) ? stringMightBeNull : safeString; +} + +static inline NvU64 nvCtxDmaOffsetFromBytes(NvU64 ctxDmaOffset) +{ + nvAssert((ctxDmaOffset & ((1 << NV_SURFACE_OFFSET_ALIGNMENT_SHIFT) - 1)) + == 0); + + return (ctxDmaOffset >> 8); +} + +NvU8 nvPixelDepthToBitsPerComponent(enum nvKmsPixelDepth pixelDepth); + +typedef enum { + EVO_LOG_WARN, + EVO_LOG_ERROR, + EVO_LOG_INFO, +} NVEvoLogType; + +void *nvInternalAlloc(size_t size, NvBool zero); +void *nvInternalRealloc(void *ptr, size_t size); +void nvInternalFree(void *ptr); +char *nvInternalStrDup(const char *str); +NvBool nvGetRegkeyValue(const NVDevEvoRec *pDevEvo, + const char *key, NvU32 *val); + +#if defined(DEBUG) + +void nvReportUnfreedAllocations(void); + +void *nvDebugAlloc(size_t size, int line, const char *file); +void *nvDebugCalloc(size_t nmemb, size_t size, int line, const char *file); +void *nvDebugRealloc(void *ptr, size_t size, int line, const char *file); +void nvDebugFree(void *ptr); +char *nvDebugStrDup(const char *str, int line, const char *file); + +#define nvAlloc(s) nvDebugAlloc((s), __LINE__, __FILE__) +#define nvCalloc(n,s) nvDebugCalloc((n), (s), __LINE__, __FILE__) +#define nvFree(p) nvDebugFree(p) +#define nvRealloc(p,s) nvDebugRealloc((p), (s), __LINE__, __FILE__) +#define nvStrDup(s) nvDebugStrDup((s), __LINE__, __FILE__) + +#else + +#define nvAlloc(s) nvInternalAlloc((s), FALSE) +#define nvCalloc(n,s) nvInternalAlloc((n)*(s), TRUE) +#define nvRealloc(p,s) nvInternalRealloc((p),(s)) +#define nvFree(s) nvInternalFree(s) +#define nvStrDup(s) nvInternalStrDup(s) + +#endif + +void nvVEvoLog(NVEvoLogType logType, NvU8 gpuLogIndex, + const char *fmt, va_list ap); + +void nvEvoLogDev(const NVDevEvoRec *pDevEvo, NVEvoLogType logType, + const char *fmt, ...) + __attribute__((format (printf, 3, 4))); + +void nvEvoLogDisp(const NVDispEvoRec *pDispEvo, NVEvoLogType logType, + const char *fmt, ...) + __attribute__((format (printf, 3, 4))); + +void nvEvoLog(NVEvoLogType logType, const char *fmt, ...) + __attribute__((format (printf, 2, 3))); + + + +#if defined(DEBUG) + +void nvEvoLogDebug(NVEvoLogType logType, const char *fmt, ...) + __attribute__((format (printf, 2, 3))); + +void nvEvoLogDevDebug(const NVDevEvoRec *pDevEvo, NVEvoLogType logType, + const char *fmt, ...) + __attribute__((format (printf, 3, 4))); + +void nvEvoLogDispDebug(const NVDispEvoRec *pDispEvo, NVEvoLogType logType, + const char *fmt, ...) + __attribute__((format (printf, 3, 4))); + +#else + +# define nvEvoLogDebug(...) +# define nvEvoLogDevDebug(pDevEvo, ...) +# define nvEvoLogDispDebug(pDispEvo, ...) + +#endif /* DEBUG */ + +void nvInitInfoString(NVEvoInfoStringPtr pInfoString, + char *s, NvU16 totalLength); + +void nvEvoLogInfoStringRaw(NVEvoInfoStringPtr pInfoString, + const char *format, ...) + __attribute__((format (printf, 2, 3))); +void nvEvoLogInfoString(NVEvoInfoStringPtr pInfoString, + const char *format, ...) + __attribute__((format (printf, 2, 3))); + + +typedef NvU32 NvKmsGenericHandle; + +NvBool nvEvoApiHandlePointerIsPresent(NVEvoApiHandlesPtr pEvoApiHandles, + void *pointer); +NvKmsGenericHandle nvEvoCreateApiHandle(NVEvoApiHandlesPtr pEvoApiHandles, + void *pointer); +void *nvEvoGetPointerFromApiHandle(const NVEvoApiHandlesRec *pEvoApiHandles, + NvKmsGenericHandle handle); +void *nvEvoGetPointerFromApiHandleNext(const NVEvoApiHandlesRec *pEvoApiHandles, + NvKmsGenericHandle *pHandle); +void nvEvoDestroyApiHandle(NVEvoApiHandlesPtr pEvoApiHandles, + NvKmsGenericHandle handle); +NvBool nvEvoInitApiHandles(NVEvoApiHandlesPtr pEvoApiHandles, + NvU32 defaultSize); +void nvEvoDestroyApiHandles(NVEvoApiHandlesPtr pEvoApiHandles); + +#define FOR_ALL_POINTERS_IN_EVO_API_HANDLES(_pEvoApiHandles, \ + _pointer, _handle) \ + for ((_handle) = 0, \ + (_pointer) = nvEvoGetPointerFromApiHandleNext(_pEvoApiHandles, \ + &(_handle)); \ + (_pointer) != NULL; \ + (_pointer) = nvEvoGetPointerFromApiHandleNext(_pEvoApiHandles, \ + &(_handle))) + + + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_UTILS_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-vblank-sem-control.h b/src/nvidia-modeset/include/nvkms-vblank-sem-control.h new file mode 100644 index 0000000..f334c70 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-vblank-sem-control.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_VBLANK_SEM_CONTROL_H__ +#define __NVKMS_VBLANK_SEM_CONTROL_H__ + +#include "nvkms-types.h" + +NVVblankSemControl *nvEvoEnableVblankSemControl( + NVDevEvoRec *pDevEvo, + NVDispEvoRec *pDispEvo, + NVSurfaceEvoRec *pSurfaceEvo, + NvU64 surfaceOffset); + +NvBool nvEvoDisableVblankSemControl( + NVDevEvoRec *pDevEvo, + NVVblankSemControl *pVblankSemControl); + +NvBool nvEvoAccelVblankSemControls( + NVDevEvoPtr pDevEvo, + NVDispEvoRec *pDispEvo, + NvU32 apiHeadMask); + +#endif /* __NVKMS_VBLANK_SEM_CONTROL_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-vrr.h b/src/nvidia-modeset/include/nvkms-vrr.h new file mode 100644 index 0000000..47f7667 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-vrr.h @@ -0,0 +1,80 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_VRR_H__ +#define __NVKMS_VRR_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +enum NvKmsDpyVRRType +nvGetAllowedDpyVrrType(const NVDpyEvoRec *pDpyEvo, + const NvModeTimings *pTimings, + enum NvKmsStereoMode stereoMode, + const NvBool allowGsync, + const enum NvKmsAllowAdaptiveSync allowAdaptiveSync); +void nvAdjustHwModeTimingsForVrrEvo( + const NVDpyEvoRec *pDpyEvo, + const enum NvKmsDpyVRRType vrrType, + const NvU32 vrrOverrideMinRefreshRate, + NVHwModeTimingsEvoPtr pTimings); + +void nvAllocVrrEvo(NVDevEvoPtr pDevEvo); +void nvFreeVrrEvo(NVDevEvoPtr pDevEvo); +void nvDisableVrr(NVDevEvoPtr pDevEvo); +void nvEnableVrr(NVDevEvoPtr pDevEvo); +void nvCancelVrrFrameReleaseTimers(NVDevEvoPtr pDevEvo, + const NvU32 applyAllowVrrApiHeadMasks[NVKMS_MAX_SUBDEVICES]); +void nvSetVrrActive(NVDevEvoPtr pDevEvo, + const NvU32 applyAllowVrrApiHeadMasks[NVKMS_MAX_SUBDEVICES], + const NvU32 vrrActiveApiHeadMasks[NVKMS_MAX_SUBDEVICES]); +void nvApplyVrrBaseFlipOverrides(const NVDispEvoRec *pDispEvo, NvU32 head, + const NVFlipChannelEvoHwState *pOld, + NVFlipChannelEvoHwState *pNew); +enum NvKmsVrrFlipType nvGetActiveVrrType(const NVDevEvoRec *pDevEvo); +NvS32 nvIncVrrSemaphoreIndex(NVDevEvoPtr pDevEvo, + const NvU32 applyAllowVrrApiHeadMasks[NVKMS_MAX_SUBDEVICES]); +void nvTriggerVrrUnstallMoveCursor(NVDispEvoPtr pDispEvo); +void nvTriggerVrrUnstallSetCursorImage(NVDispEvoPtr pDispEvo, + NvBool elvReleased); +void nvGetDpyMinRefreshRateValidValues( + const NVHwModeTimingsEvo *pTimings, + const enum NvKmsDpyVRRType vrrType, + const NvU32 edidTimeoutMicroseconds, + NvU32 *minMinRefreshRate, + NvU32 *maxMinRefreshRate); + +NvBool nvDispSupportsVrr(const NVDispEvoRec *pDispEvo); + +NvBool nvExportVrrSemaphoreSurface(const NVDevEvoRec *pDevEvo, int fd); + +void nvVrrSignalSemaphore(NVDevEvoPtr pDevEvo, NvS32 vrrSemaphoreIndex); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_VRR_H__ */ diff --git a/src/nvidia-modeset/interface/nvkms-api-types.h b/src/nvidia-modeset/interface/nvkms-api-types.h new file mode 100644 index 0000000..ff98237 --- /dev/null +++ b/src/nvidia-modeset/interface/nvkms-api-types.h @@ -0,0 +1,788 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_API_TYPES_H) +#define NVKMS_API_TYPES_H + +#include +#include +#include + +#define NVKMS_MAX_SUBDEVICES NV_MAX_SUBDEVICES +#define NVKMS_MAX_HEADS_PER_DISP NV_MAX_HEADS + +#define NVKMS_LEFT 0 +#define NVKMS_RIGHT 1 +#define NVKMS_MAX_EYES 2 + +#define NVKMS_MAIN_LAYER 0 +#define NVKMS_OVERLAY_LAYER 1 +#define NVKMS_MAX_LAYERS_PER_HEAD 8 + +#define NVKMS_MAX_PLANES_PER_SURFACE 3 + +#define NVKMS_DP_ADDRESS_STRING_LENGTH 64 + +#define NVKMS_DEVICE_ID_TEGRA 0x0000ffff + +#define NVKMS_MAX_SUPERFRAME_VIEWS 4 + +#define NVKMS_LOG2_LUT_ARRAY_SIZE 10 +#define NVKMS_LUT_ARRAY_SIZE (1 << NVKMS_LOG2_LUT_ARRAY_SIZE) + +#define NVKMS_OLUT_FP_NORM_SCALE_DEFAULT 0xffffffff + +typedef NvU32 NvKmsDeviceHandle; +typedef NvU32 NvKmsDispHandle; +typedef NvU32 NvKmsConnectorHandle; +typedef NvU32 NvKmsSurfaceHandle; +typedef NvU32 NvKmsFrameLockHandle; +typedef NvU32 NvKmsDeferredRequestFifoHandle; +typedef NvU32 NvKmsSwapGroupHandle; +typedef NvU32 NvKmsVblankSyncObjectHandle; +typedef NvU32 NvKmsVblankSemControlHandle; + +struct NvKmsSize { + NvU16 width; + NvU16 height; +}; + +struct NvKmsPoint { + NvU16 x; + NvU16 y; +}; + +struct NvKmsSignedPoint { + NvS16 x; + NvS16 y; +}; + +struct NvKmsRect { + NvU16 x; + NvU16 y; + NvU16 width; + NvU16 height; +}; + +/* + * A 3x3 row-major matrix. + * + * The elements are 32-bit single-precision IEEE floating point values. The + * floating point bit pattern should be stored in NvU32s to be passed into the + * kernel. + */ +struct NvKmsMatrix { + NvU32 m[3][3]; +}; + +typedef enum { + NVKMS_CONNECTOR_TYPE_DP = 0, + NVKMS_CONNECTOR_TYPE_VGA = 1, + NVKMS_CONNECTOR_TYPE_DVI_I = 2, + NVKMS_CONNECTOR_TYPE_DVI_D = 3, + NVKMS_CONNECTOR_TYPE_ADC = 4, + NVKMS_CONNECTOR_TYPE_LVDS = 5, + NVKMS_CONNECTOR_TYPE_HDMI = 6, + NVKMS_CONNECTOR_TYPE_USBC = 7, + NVKMS_CONNECTOR_TYPE_DSI = 8, + NVKMS_CONNECTOR_TYPE_DP_SERIALIZER = 9, + NVKMS_CONNECTOR_TYPE_UNKNOWN = 10, + NVKMS_CONNECTOR_TYPE_MAX = NVKMS_CONNECTOR_TYPE_UNKNOWN, +} NvKmsConnectorType; + +static inline +const char *NvKmsConnectorTypeString(const NvKmsConnectorType connectorType) +{ + switch (connectorType) { + case NVKMS_CONNECTOR_TYPE_DP: return "DP"; + case NVKMS_CONNECTOR_TYPE_VGA: return "VGA"; + case NVKMS_CONNECTOR_TYPE_DVI_I: return "DVI-I"; + case NVKMS_CONNECTOR_TYPE_DVI_D: return "DVI-D"; + case NVKMS_CONNECTOR_TYPE_ADC: return "ADC"; + case NVKMS_CONNECTOR_TYPE_LVDS: return "LVDS"; + case NVKMS_CONNECTOR_TYPE_HDMI: return "HDMI"; + case NVKMS_CONNECTOR_TYPE_USBC: return "USB-C"; + case NVKMS_CONNECTOR_TYPE_DSI: return "DSI"; + case NVKMS_CONNECTOR_TYPE_DP_SERIALIZER: return "DP-SERIALIZER"; + default: break; + } + return "Unknown"; +} + +typedef enum { + NVKMS_CONNECTOR_SIGNAL_FORMAT_VGA = 0, + NVKMS_CONNECTOR_SIGNAL_FORMAT_LVDS = 1, + NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS = 2, + NVKMS_CONNECTOR_SIGNAL_FORMAT_DP = 3, + NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI = 4, + NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN = 5, + NVKMS_CONNECTOR_SIGNAL_FORMAT_MAX = + NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN, +} NvKmsConnectorSignalFormat; + +/*! + * Description of Notifiers and Semaphores (Non-isochronous (NISO) surfaces). + * + * When flipping, the client can optionally specify a notifier and/or + * a semaphore to use with the flip. The surfaces used for these + * should be registered with NVKMS to get an NvKmsSurfaceHandle. + * + * NvKmsNIsoSurface::offsetInWords indicates the starting location, in + * 32-bit words, within the surface where EVO should write the + * notifier or semaphore. Note that only the first 4096 bytes of a + * surface can be used by semaphores or notifiers; offsetInWords must + * allow for the semaphore or notifier to be written within the first + * 4096 bytes of the surface. I.e., this must be satisfied: + * + * ((offsetInWords * 4) + elementSizeInBytes) <= 4096 + * + * Where elementSizeInBytes is: + * + * if NISO_FORMAT_FOUR_WORD*, elementSizeInBytes = 16 + * if NISO_FORMAT_LEGACY, + * if overlay && notifier, elementSizeInBytes = 16 + * else, elementSizeInBytes = 4 + * + * Note that different GPUs support different semaphore and notifier formats. + * Check NvKmsAllocDeviceReply::validNIsoFormatMask to determine which are + * valid for the given device. + * + * Note also that FOUR_WORD and FOUR_WORD_NVDISPLAY are the same size, but + * FOUR_WORD uses a format compatible with display class 907[ce], and + * FOUR_WORD_NVDISPLAY uses a format compatible with c37e (actually defined by + * the NV_DISP_NOTIFIER definition in clc37d.h). + */ +enum NvKmsNIsoFormat { + NVKMS_NISO_FORMAT_LEGACY, + NVKMS_NISO_FORMAT_FOUR_WORD, + NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY, +}; + +enum NvKmsEventType { + NVKMS_EVENT_TYPE_DPY_CHANGED, + NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED, + NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED, + NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED, + NVKMS_EVENT_TYPE_FRAMELOCK_ATTRIBUTE_CHANGED, + NVKMS_EVENT_TYPE_FLIP_OCCURRED, +}; + +enum NvKmsFlipResult { + NV_KMS_FLIP_RESULT_SUCCESS = 0, /* Success */ + NV_KMS_FLIP_RESULT_INVALID_PARAMS, /* Parameter validation failed */ + NV_KMS_FLIP_RESULT_IN_PROGRESS, /* Flip would fail because an outstanding + flip containing changes that cannot be + queued is in progress */ +}; + +typedef enum { + NV_EVO_SCALER_1TAP = 0, + NV_EVO_SCALER_2TAPS = 1, + NV_EVO_SCALER_3TAPS = 2, + NV_EVO_SCALER_5TAPS = 3, + NV_EVO_SCALER_8TAPS = 4, + NV_EVO_SCALER_TAPS_MIN = NV_EVO_SCALER_1TAP, + NV_EVO_SCALER_TAPS_MAX = NV_EVO_SCALER_8TAPS, +} NVEvoScalerTaps; + +/* This structure describes the scaling bounds for a given layer. */ +struct NvKmsScalingUsageBounds { + /* + * Maximum vertical downscale factor (scaled by 1024) + * + * For example, if the downscale factor is 1.5, then maxVDownscaleFactor + * would be 1.5 x 1024 = 1536. + */ + NvU16 maxVDownscaleFactor; + + /* + * Maximum horizontal downscale factor (scaled by 1024) + * + * See the example above for maxVDownscaleFactor. + */ + NvU16 maxHDownscaleFactor; + + /* Maximum vertical taps allowed */ + NVEvoScalerTaps vTaps; + + /* Whether vertical upscaling is allowed */ + NvBool vUpscalingAllowed; +}; + +struct NvKmsUsageBounds { + struct { + NvBool usable; + struct NvKmsScalingUsageBounds scaling; + NvU64 supportedSurfaceMemoryFormats NV_ALIGN_BYTES(8); + } layer[NVKMS_MAX_LAYERS_PER_HEAD]; +}; + +/*! + * Per-component arrays of NvU16s describing the LUT; used for both the input + * LUT and output LUT. + */ +struct NvKmsLutRamps { + NvU16 red[NVKMS_LUT_ARRAY_SIZE]; /*! in */ + NvU16 green[NVKMS_LUT_ARRAY_SIZE]; /*! in */ + NvU16 blue[NVKMS_LUT_ARRAY_SIZE]; /*! in */ +}; + +/* Datatypes for LUT capabilities */ +enum NvKmsLUTFormat { + /* + * Normalized fixed-point format mapping [0, 1] to [0x0, 0xFFFF]. + */ + NVKMS_LUT_FORMAT_UNORM16, + + /* + * Half-precision floating point. + */ + NVKMS_LUT_FORMAT_FP16, + + /* + * 14-bit fixed-point format required to work around hardware bug 813188. + * + * To convert from UNORM16 to UNORM14_WAR_813188: + * unorm14_war_813188 = ((unorm16 >> 2) & ~7) + 0x6000 + */ + NVKMS_LUT_FORMAT_UNORM14_WAR_813188 +}; + +enum NvKmsLUTVssSupport { + NVKMS_LUT_VSS_NOT_SUPPORTED, + NVKMS_LUT_VSS_SUPPORTED, + NVKMS_LUT_VSS_REQUIRED, +}; + +enum NvKmsLUTVssType { + NVKMS_LUT_VSS_TYPE_NONE, + NVKMS_LUT_VSS_TYPE_LINEAR, + NVKMS_LUT_VSS_TYPE_LOGARITHMIC, +}; + +struct NvKmsLUTCaps { + /*! Whether this layer or head on this device supports this LUT stage. */ + NvBool supported; + + /*! Whether this LUT supports VSS. */ + enum NvKmsLUTVssSupport vssSupport; + + /*! + * The type of VSS segmenting this LUT uses. + */ + enum NvKmsLUTVssType vssType; + + /*! + * Expected number of VSS segments. + */ + NvU32 vssSegments; + + /*! + * Expected number of LUT entries. + */ + NvU32 lutEntries; + + /*! + * Format for each of the LUT entries. + */ + enum NvKmsLUTFormat entryFormat; +}; + +/* each LUT entry uses this many bytes */ +#define NVKMS_LUT_CAPS_LUT_ENTRY_SIZE (4 * sizeof(NvU16)) + +/* if the LUT surface uses VSS, size of the VSS header */ +#define NVKMS_LUT_VSS_HEADER_SIZE (4 * NVKMS_LUT_CAPS_LUT_ENTRY_SIZE) + +struct NvKmsLUTSurfaceParams { + NvKmsSurfaceHandle surfaceHandle; + NvU64 offset NV_ALIGN_BYTES(8); + NvU32 vssSegments; + NvU32 lutEntries; +}; + +/* + * A 3x4 row-major colorspace conversion matrix. + * + * The output color C' is the CSC matrix M times the column vector + * [ R, G, B, 1 ]. + * + * Each entry in the matrix is a signed 2's-complement fixed-point number with + * 3 integer bits and 16 fractional bits. + */ +struct NvKmsCscMatrix { + NvS32 m[3][4]; +}; + +#define NVKMS_IDENTITY_CSC_MATRIX \ + (struct NvKmsCscMatrix){{ \ + { 0x10000, 0, 0, 0 }, \ + { 0, 0x10000, 0, 0 }, \ + { 0, 0, 0x10000, 0 } \ + }} + +/*! + * A color key match bit used in the blend equations and one can select the src + * or dst Color Key when blending. Assert key bit means match, de-assert key + * bit means nomatch. + * + * The src Color Key means using the key bit from the current layer, the dst + * Color Key means using key bit from the previous layer composition stage. The + * src or dst key bit will be inherited by blended pixel for the preparation of + * next blending, as dst Color Key. + * + * src: Forward the color key match bit from the current layer pixel to next layer + * composition stage. + * + * dst: Forward the color key match bit from the previous composition stage + * pixel to next layer composition stage. + * + * disable: Forward “1” to the next layer composition stage as the color key. + */ +enum NvKmsCompositionColorKeySelect { + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE = 0, + NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC, + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST, +}; + +#define NVKMS_COMPOSITION_NUMBER_OF_COLOR_KEY_SELECTS 3 + +/*! + * Composition modes used for surfaces in general. + * The various types of composition are: + * + * Opaque: source pixels are opaque regardless of alpha, + * and will occlude the destination pixel. + * + * Alpha blending: aka opacity, which could be specified + * for a surface in its entirety, or on a per-pixel basis. + * + * Non-premultiplied: alpha value applies to source pixel, + * and also counter-weighs the destination pixel. + * Premultiplied: alpha already applied to source pixel, + * so it only counter-weighs the destination pixel. + * + * Color keying: use a color key structure to decide + * the criteria for matching and compositing. + * (See NVColorKey below.) + */ +enum NvKmsCompositionBlendingMode { + /*! + * Modes that use no other parameters. + */ + NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE, + + /*! + * Mode that ignores both per-pixel alpha provided + * by client and the surfaceAlpha, makes source pixel + * totally transparent. + */ + NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT, + + /*! + * Modes that use per-pixel alpha provided by client, + * and the surfaceAlpha must be set to 0. + */ + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA, + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA, + + /*! + * These use both the surface-wide and per-pixel alpha values. + * surfaceAlpha is treated as numerator ranging from 0 to 255 + * of a fraction whose denominator is 255. + */ + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA, + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA, +}; + +static inline NvBool +NvKmsIsCompositionModeUseAlpha(enum NvKmsCompositionBlendingMode mode) +{ + return mode == NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA || + mode == NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA || + mode == NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA || + mode == NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA; +} + +/*! + * Abstract description of a color key. + * + * a, r, g, and b are component values in the same width as the framebuffer + * values being scanned out. + * + * match[ARGB] defines whether that component is considered when matching the + * color key -- TRUE means that the value of the corresponding component must + * match the given value for the given pixel to be considered a 'key match'; + * FALSE means that the value of that component is not a key match criterion. + */ +typedef struct { + NvU16 a, r, g, b; + NvBool matchA, matchR, matchG, matchB; +} NVColorKey; + +/*! + * Describes the composition parameters for the single layer. + */ +struct NvKmsCompositionParams { + enum NvKmsCompositionColorKeySelect colorKeySelect; + NVColorKey colorKey; + /* + * It is possible to assign different blending mode for match pixels and + * nomatch pixels. blendingMode[0] is used to blend a pixel with the color key + * match bit "0", and blendingMode[1] is used to blend a pixel with the color + * key match bit "1". + * + * But because of the hardware restrictions match and nomatch pixels can + * not use blending mode PREMULT_ALPHA, NON_PREMULT_ALPHA, + * PREMULT_SURFACE_ALPHA, and NON_PREMULT_SURFACE_ALPHA at once. + */ + enum NvKmsCompositionBlendingMode blendingMode[2]; + NvU8 surfaceAlpha; /* Applies to all pixels of entire surface */ + /* + * Defines the composition order. A smaller value moves the layer closer to + * the top (away from the background). No need to pick consecutive values, + * requirements are that the value should be different for each of the + * layers owned by the head and the value for the main layer should be + * the greatest one. + * + * Cursor always remains at the top of all other layers, this parameter + * has no effect on cursor. NVKMS assigns default depth to each of the + * supported layers, by default depth of the layer is calculated as + * (NVKMS_MAX_LAYERS_PER_HEAD - index of the layer). If depth is set to + * '0' then default depth value will get used. + */ + NvU8 depth; +}; + +/*! + * Describes the composition capabilities supported by the hardware for + * cursor or layer. It describes supported the color key selects and for each + * of the supported color key selects it describes supported blending modes + * for match and nomatch pixles. + */ +struct NvKmsCompositionCapabilities { + + struct { + /* + * A bitmask of the supported blending modes for match and nomatch + * pixels. It should be the bitwise 'or' of one or more + * NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_*) values. + */ + NvU32 supportedBlendModes[2]; + } colorKeySelect[NVKMS_COMPOSITION_NUMBER_OF_COLOR_KEY_SELECTS]; + + /* + * A bitmask of the supported color key selects. + * + * It should be the bitwise 'or' of one or more + * NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_*) + * values. + */ + NvU32 supportedColorKeySelects; +}; + +struct NvKmsLayerCapabilities { + /*! + * Whether Layer supports the window mode. If window mode is supported, + * then clients can set the layer's dimensions so that they're smaller than + * the viewport, and can also change the output position of the layer to a + * non-(0, 0) position. + * + * NOTE: Dimension changes are currently unsupported for the main layer, + * and output position changes for the main layer are currently only + * supported via IOCTL_SET_LAYER_POSITION but not via flips. Support for + * these is coming soon, via changes to flip code. + */ + NvBool supportsWindowMode :1; + + /*! + * Whether layer supports ICtCp pipe. + */ + NvBool supportsICtCp :1; + + + /*! + * Describes the supported Color Key selects and blending modes for + * match and nomatch layer pixels. + */ + struct NvKmsCompositionCapabilities composition; + + /*! + * Which NvKmsSurfaceMemoryFormat enum values are supported by the NVKMS + * device on the given scanout surface layer. + * + * Iff a particular enum NvKmsSurfaceMemoryFormat 'value' is supported, + * then (1 << value) will be set in the appropriate bitmask. + * + * Note that these bitmasks just report the static SW/HW capabilities, + * and are a superset of the formats that IMP may allow. Clients are + * still expected to honor the NvKmsUsageBounds for each head. + */ + NvU64 supportedSurfaceMemoryFormats NV_ALIGN_BYTES(8); + + /* Capabilities for each LUT stage in the EVO3 precomp pipeline. */ + struct NvKmsLUTCaps ilut; + struct NvKmsLUTCaps tmo; +}; + +/*! + * Surface layouts. + * + * BlockLinear is the NVIDIA GPU native tiling format, arranging pixels into + * blocks or tiles for better locality during common GPU operations. + * + * Pitch is the naive "linear" surface layout with pixels laid out sequentially + * in memory line-by-line, optionally with some padding at the end of each line + * for alignment purposes. + */ +enum NvKmsSurfaceMemoryLayout { + NvKmsSurfaceMemoryLayoutBlockLinear = 0, + NvKmsSurfaceMemoryLayoutPitch = 1, +}; + +static inline const char *NvKmsSurfaceMemoryLayoutToString( + enum NvKmsSurfaceMemoryLayout layout) +{ + switch (layout) { + default: + return "Unknown"; + case NvKmsSurfaceMemoryLayoutBlockLinear: + return "BlockLinear"; + case NvKmsSurfaceMemoryLayoutPitch: + return "Pitch"; + } +} + +typedef enum { + MUX_STATE_GET = 0, + MUX_STATE_INTEGRATED = 1, + MUX_STATE_DISCRETE = 2, + MUX_STATE_UNKNOWN = 3, +} NvMuxState; + +enum NvKmsRotation { + NVKMS_ROTATION_0 = 0, + NVKMS_ROTATION_90 = 1, + NVKMS_ROTATION_180 = 2, + NVKMS_ROTATION_270 = 3, + NVKMS_ROTATION_MIN = NVKMS_ROTATION_0, + NVKMS_ROTATION_MAX = NVKMS_ROTATION_270, +}; + +struct NvKmsRRParams { + enum NvKmsRotation rotation; + NvBool reflectionX; + NvBool reflectionY; +}; + +/*! + * Convert each possible NvKmsRRParams to a unique integer [0..15], + * so that we can describe possible NvKmsRRParams with an NvU16 bitmask. + * + * E.g. + * rotation = 0, reflectionX = F, reflectionY = F == 0|0|0 == 0 + * ... + * rotation = 270, reflectionX = T, reflectionY = T == 3|4|8 == 15 + */ +static inline NvU8 NvKmsRRParamsToCapBit(const struct NvKmsRRParams *rrParams) +{ + NvU8 bitPosition = (NvU8)rrParams->rotation; + if (rrParams->reflectionX) { + bitPosition |= NVBIT(2); + } + if (rrParams->reflectionY) { + bitPosition |= NVBIT(3); + } + return bitPosition; +} + +/* + * NVKMS_MEMORY_ISO is used to tag surface memory that will be accessed via + * display's isochronous interface. Examples of this type of memory are pixel + * data and LUT entries. + * + * NVKMS_MEMORY_NISO is used to tag surface memory that will be accessed via + * display's non-isochronous interface. Examples of this type of memory are + * semaphores and notifiers. + */ +typedef enum { + NVKMS_MEMORY_ISO = 0, + NVKMS_MEMORY_NISO = 1, +} NvKmsMemoryIsoType; + +typedef struct { + NvBool coherent; + NvBool noncoherent; +} NvKmsDispIOCoherencyModes; + +enum NvKmsInputColorRange { + /* + * If DEFAULT is provided, driver will assume full range for RGB formats + * and limited range for YUV formats. + */ + NVKMS_INPUT_COLOR_RANGE_DEFAULT = 0, + + NVKMS_INPUT_COLOR_RANGE_LIMITED = 1, + + NVKMS_INPUT_COLOR_RANGE_FULL = 2, +}; + +enum NvKmsInputColorSpace { + /* Unknown colorspace */ + NVKMS_INPUT_COLOR_SPACE_NONE = 0, + + NVKMS_INPUT_COLOR_SPACE_BT601 = 1, + NVKMS_INPUT_COLOR_SPACE_BT709 = 2, + NVKMS_INPUT_COLOR_SPACE_BT2020 = 3, + NVKMS_INPUT_COLOR_SPACE_BT2100 = NVKMS_INPUT_COLOR_SPACE_BT2020, + + NVKMS_INPUT_COLOR_SPACE_SCRGB = 4 +}; + +enum NvKmsInputTf { + NVKMS_INPUT_TF_LINEAR = 0, + NVKMS_INPUT_TF_PQ = 1 +}; + +enum NvKmsOutputColorimetry { + NVKMS_OUTPUT_COLORIMETRY_DEFAULT = 0, + + NVKMS_OUTPUT_COLORIMETRY_BT2100 = 1, +}; + +enum NvKmsOutputTf { + /* + * NVKMS itself won't apply any OETF (clients are still + * free to provide a custom OLUT) + */ + NVKMS_OUTPUT_TF_NONE = 0, + NVKMS_OUTPUT_TF_TRADITIONAL_GAMMA_SDR = 1, + NVKMS_OUTPUT_TF_PQ = 2, +}; + +/*! + * EOTF Data Byte 1 as per CTA-861-G spec. + * This is expected to match exactly with the spec. + */ +enum NvKmsInfoFrameEOTF { + NVKMS_INFOFRAME_EOTF_SDR_GAMMA = 0, + NVKMS_INFOFRAME_EOTF_HDR_GAMMA = 1, + NVKMS_INFOFRAME_EOTF_ST2084 = 2, + NVKMS_INFOFRAME_EOTF_HLG = 3, +}; + +/*! + * HDR Static Metadata Type1 Descriptor as per CEA-861.3 spec. + * This is expected to match exactly with the spec. + */ +struct NvKmsHDRStaticMetadata { + /*! + * Color primaries of the data. + * These are coded as unsigned 16-bit values in units of 0.00002, + * where 0x0000 represents zero and 0xC350 represents 1.0000. + */ + struct { + NvU16 x, y; + } displayPrimaries[3]; + + /*! + * White point of colorspace data. + * These are coded as unsigned 16-bit values in units of 0.00002, + * where 0x0000 represents zero and 0xC350 represents 1.0000. + */ + struct { + NvU16 x, y; + } whitePoint; + + /** + * Maximum mastering display luminance. + * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, + * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. + */ + NvU16 maxDisplayMasteringLuminance; + + /*! + * Minimum mastering display luminance. + * This value is coded as an unsigned 16-bit value in units of + * 0.0001 cd/m2, where 0x0001 represents 0.0001 cd/m2 and 0xFFFF + * represents 6.5535 cd/m2. + */ + NvU16 minDisplayMasteringLuminance; + + /*! + * Maximum content light level. + * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, + * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. + */ + NvU16 maxCLL; + + /*! + * Maximum frame-average light level. + * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, + * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. + */ + NvU16 maxFALL; +}; + +/*! + * A superframe is made of two or more video streams that are combined in + * a specific way. A DP serializer (an external device connected to a Tegra + * ARM SOC over DP or HDMI) can receive a video stream comprising multiple + * videos combined into a single frame and then split it into multiple + * video streams. The following structure describes the number of views + * and dimensions of each view inside a superframe. + */ +struct NvKmsSuperframeInfo { + NvU8 numViews; + struct { + /* x offset inside superframe at which this view starts */ + NvU16 x; + + /* y offset inside superframe at which this view starts */ + NvU16 y; + + /* Horizontal active width in pixels for this view */ + NvU16 width; + + /* Vertical active height in lines for this view */ + NvU16 height; + } view[NVKMS_MAX_SUPERFRAME_VIEWS]; +}; + +/* Fields within NvKmsVblankSemControlDataOneHead::flags */ +#define NVKMS_VBLANK_SEM_CONTROL_SWAP_INTERVAL 15:0 + +struct NvKmsVblankSemControlDataOneHead { + NvU32 requestCounterAccel; + NvU32 requestCounter; + NvU32 flags; + + NvU32 semaphore; + NvU64 vblankCount NV_ALIGN_BYTES(8); +}; + +struct NvKmsVblankSemControlData { + struct NvKmsVblankSemControlDataOneHead head[NV_MAX_HEADS]; +}; + +#endif /* NVKMS_API_TYPES_H */ diff --git a/src/nvidia-modeset/interface/nvkms-api.h b/src/nvidia-modeset/interface/nvkms-api.h new file mode 100644 index 0000000..c1e74fa --- /dev/null +++ b/src/nvidia-modeset/interface/nvkms-api.h @@ -0,0 +1,4435 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_API_H) +#define NVKMS_API_H + +/* + * NVKMS API + * + * + * All file operations described in this header file go through a + * single device file that has system-wide scope. The individual + * ioctl request data structures specify the objects to which the + * request is targeted. + * + * + * OBJECTS + * + * The NVKMS API is organized into several objects: + * + * - A device, which corresponds to an RM device. This can either be + * a single GPU, or multiple GPUs linked into SLI. Each GPU is + * called a "subdevice". The subdevices used by an NVKMS device are + * reported in NvKmsAllocDeviceReply::subDeviceMask. + * + * A device is specified by a deviceHandle, returned by + * NVKMS_IOCTL_ALLOC_DEVICE. + * + * - A disp, which represents an individually programmable display + * engine of a GPU. In SLI Mosaic, there is one disp per physical + * GPU. In all other configurations there is one disp for the + * entire device. A disp is specified by a (deviceHandle, + * dispHandle) duple. A dispHandle is only unique within a single + * device: multiple devices may have disps with the same dispHandle + * value. + * + * A disp represents one subdevice; disp index N corresponds to subdevice + * index N. + * + * - A connector, which represents an electrical connection to the + * GPU. E.g., a physical DVI-I connector has two NVKMS connector + * objects (a VGA NVKMS connector and a TMDS NVKMS connector). + * However, a physical DisplayPort connector has one NVKMS connector + * object, even if there is a tree of DisplayPort1.2 Multistream + * monitors connected to it. + * + * Connectors are associated with a specific disp. A connector is + * specified by a (deviceHandle, dispHandle, connectorHandle) + * triplet. A connectorHandle is only unique within a single disp: + * multiple disps may have connectors with the same connectorHandle + * value. + * + * - A dpy, which represents a connection of a display device to the + * system. Multiple dpys can map to the same connector in the case + * of DisplayPort1.2 MultiStream. A dpy is specified by a + * (deviceHandle, dispHandle, dpyId) triplet. A dpyId is only + * unique within a single disp: multiple disps may have dpys with + * the same dpyId value. + * + * - A surface, which represents memory to be scanned out. Surfaces + * should be allocated by resman, and then registered and + * unregistered with NVKMS. The NvKmsSurfaceHandle value of 0 is + * reserved to mean no surface. + * + * NVKMS clients should treat the device, disp, connector, and surface + * handles as opaque values. They are specific to the file descriptor + * through which a client allocated and queried them. Dpys should + * also be treated as opaque, though they can be passed between + * clients. + * + * NVKMS clients initialize NVKMS by allocating an NVKMS device. The + * device can either be a single GPU, or an SLI group. It is expected + * that the client has already attached/linked the GPUs through + * resman and created a resman device. + * + * NVKMS device allocation returns a device handle, the disp handles, + * and capabilities of the device. + * + * + * MODE VALIDATION + * + * When a client requests to set a mode via NVKMS_IOCTL_SET_MODE, + * NVKMS will validate the mode at that point in time, honoring the + * NvKmsModeValidationParams specified as part of the request. + * + * Clients can use NVKMS_IOCTL_VALIDATE_MODE to test if a mode is valid. + * + * Clients can use NVKMS_IOCTL_VALIDATE_MODE_INDEX to get the list of + * modes that NVKMS currently considers valid for the dpy (modes from + * the EDID, etc). + * + * IMPLEMENTATION NOTE: the same mode validation common code will be + * used in each of NVKMS_IOCTL_SET_MODE, NVKMS_IOCTL_VALIDATE_MODE, + * and NVKMS_IOCTL_VALIDATE_MODE_INDEX, but NVKMS won't generally maintain + * a "mode pool" with an exhaustive list of the allowable modes for a + * dpy. + * + * + * DYNAMIC DPY HANDLING + * + * Dynamic dpys (namely, DisplayPort multistream dpys) share the NVDpyId + * namespace with non-dynamic dpys on the same disp. However, dynamic dpys will + * not be listed in NvKmsQueryDispReply::validDpys. Instead, dynamic dpys are + * added and removed from the system dynamically. + * + * When a dynamic dpy is first connected, NVKMS will allocate a new NVDpyId for + * it and generate an NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED event. When the + * dynamic dpy is disconnected, NVKMS will generate an + * NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED event. Whether the corresponding + * NVDpyId is immediately freed and made available for subsequent dynamic dpys + * depends on client behavior. + * + * Clients may require that a dynamic NVDpyId persist even after the dynamic dpy + * is disconnected. Clients who require this can use + * NVKMS_IOCTL_DECLARE_DYNAMIC_DPY_INTEREST. NVKMS will retain the NVDpyId + * until the dynamic dpy is disconnected and there are no clients who have + * declared "interest" on the particular dynamic dpy. While the NVDpyId + * persists, it will be used for any monitor that is connected at the same + * dynamic dpy address (i.e., port address, in the case of DP MST). + * + * + * FILE DESCRIPTOR HANDLING + * + * With the exception of NVDpyIds, all handles should be assumed to be + * specific to the current file descriptor on which the ioctls are + * performed. + * + * Multiple devices can be allocated on the same file descriptor. + * E.g., to drive the display of multiple GPUs. + * + * If a file descriptor is closed prematurely, either explicitly by + * the client or implicitly by the operating system because the client + * process was terminated, NVKMS will perform an + * NVKMS_IOCTL_FREE_DEVICE for any devices currently allocated by the + * client on the closed file descriptor. + * + * NVKMS file descriptors are normally used as the first argument of + * ioctl(2). However, NVKMS file descriptors are also used for + * granting surfaces (see NVKMS_IOCTL_GRANT_SURFACE) or permissions + * (see NVKMS_IOCTL_GRANT_PERMISSIONS). Any given NVKMS file + * descriptor can only be used for one of these uses. + * + * QUESTIONS: + * + * - Is there any reason for errors to be returned through a status field + * in the Param structures, rather than the ioctl(2) return value? + * + * - Is it too asymmetric that NVKMS_IOCTL_SET_MODE can set a + * mode across heads/disps, but other requests (e.g., + * NVKMS_IOCTL_SET_CURSOR_IMAGE) operate on a single head? + * + * + * IOCTL PARAMETER ORGANIZATION + * + * For table-driven processing of ioctls, it is useful for all ioctl + * parameters to follow the same convention: + * + * struct NvKmsFooRequest { + * (...) + * }; + * + * struct NvKmsFooReply { + * (...) + * }; + * + * struct NvKmsFooParams { + * struct NvKmsFooRequest request; //! in + * struct NvKmsFooReply reply; //! out + * }; + * + * I.e., all ioctl parameter structures NvKmsFooParams should have + * "request" and "reply" fields, with types "struct NvKmsFooRequest" + * and "struct NvKmsFooReply". C doesn't technically support empty + * structures, so the convention is to place a "padding" NvU32 in + * request or reply structures that would otherwise be empty. + */ +#include "nvtypes.h" +#include "nvlimits.h" +#include "nv_mig_types.h" +#include "nv_dpy_id.h" +#include "nv_mode_timings.h" +#include "nvkms-api-types.h" +#include "nvgputypes.h" /* NvGpuSemaphore */ +#include "nvkms-format.h" + +/* + * The NVKMS ioctl commands. See the ioctl parameter declarations + * later in this header file for an explanation of each ioctl command. + */ +enum NvKmsIoctlCommand { + NVKMS_IOCTL_ALLOC_DEVICE, + NVKMS_IOCTL_FREE_DEVICE, + NVKMS_IOCTL_QUERY_DISP, + NVKMS_IOCTL_QUERY_CONNECTOR_STATIC_DATA, + NVKMS_IOCTL_QUERY_CONNECTOR_DYNAMIC_DATA, + NVKMS_IOCTL_QUERY_DPY_STATIC_DATA, + NVKMS_IOCTL_QUERY_DPY_DYNAMIC_DATA, + NVKMS_IOCTL_VALIDATE_MODE_INDEX, + NVKMS_IOCTL_VALIDATE_MODE, + NVKMS_IOCTL_SET_MODE, + NVKMS_IOCTL_SET_CURSOR_IMAGE, + NVKMS_IOCTL_MOVE_CURSOR, + NVKMS_IOCTL_SET_LUT, + NVKMS_IOCTL_CHECK_LUT_NOTIFIER, + NVKMS_IOCTL_IDLE_BASE_CHANNEL, + NVKMS_IOCTL_FLIP, + NVKMS_IOCTL_DECLARE_DYNAMIC_DPY_INTEREST, + NVKMS_IOCTL_REGISTER_SURFACE, + NVKMS_IOCTL_UNREGISTER_SURFACE, + NVKMS_IOCTL_GRANT_SURFACE, + NVKMS_IOCTL_ACQUIRE_SURFACE, + NVKMS_IOCTL_RELEASE_SURFACE, + NVKMS_IOCTL_SET_DPY_ATTRIBUTE, + NVKMS_IOCTL_GET_DPY_ATTRIBUTE, + NVKMS_IOCTL_GET_DPY_ATTRIBUTE_VALID_VALUES, + NVKMS_IOCTL_SET_DISP_ATTRIBUTE, + NVKMS_IOCTL_GET_DISP_ATTRIBUTE, + NVKMS_IOCTL_GET_DISP_ATTRIBUTE_VALID_VALUES, + NVKMS_IOCTL_QUERY_FRAMELOCK, + NVKMS_IOCTL_SET_FRAMELOCK_ATTRIBUTE, + NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE, + NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE_VALID_VALUES, + NVKMS_IOCTL_GET_NEXT_EVENT, + NVKMS_IOCTL_DECLARE_EVENT_INTEREST, + NVKMS_IOCTL_CLEAR_UNICAST_EVENT, + NVKMS_IOCTL_GET_3DVISION_DONGLE_PARAM_BYTES, + NVKMS_IOCTL_SET_3DVISION_AEGIS_PARAMS, + NVKMS_IOCTL_SET_LAYER_POSITION, + NVKMS_IOCTL_GRAB_OWNERSHIP, + NVKMS_IOCTL_RELEASE_OWNERSHIP, + NVKMS_IOCTL_GRANT_PERMISSIONS, + NVKMS_IOCTL_ACQUIRE_PERMISSIONS, + NVKMS_IOCTL_REVOKE_PERMISSIONS, + NVKMS_IOCTL_QUERY_DPY_CRC32, + NVKMS_IOCTL_REGISTER_DEFERRED_REQUEST_FIFO, + NVKMS_IOCTL_UNREGISTER_DEFERRED_REQUEST_FIFO, + NVKMS_IOCTL_ALLOC_SWAP_GROUP, + NVKMS_IOCTL_FREE_SWAP_GROUP, + NVKMS_IOCTL_JOIN_SWAP_GROUP, + NVKMS_IOCTL_LEAVE_SWAP_GROUP, + NVKMS_IOCTL_SET_SWAP_GROUP_CLIP_LIST, + NVKMS_IOCTL_GRANT_SWAP_GROUP, + NVKMS_IOCTL_ACQUIRE_SWAP_GROUP, + NVKMS_IOCTL_RELEASE_SWAP_GROUP, + NVKMS_IOCTL_SWITCH_MUX, + NVKMS_IOCTL_GET_MUX_STATE, + NVKMS_IOCTL_EXPORT_VRR_SEMAPHORE_SURFACE, + NVKMS_IOCTL_ENABLE_VBLANK_SYNC_OBJECT, + NVKMS_IOCTL_DISABLE_VBLANK_SYNC_OBJECT, + NVKMS_IOCTL_NOTIFY_VBLANK, + NVKMS_IOCTL_SET_FLIPLOCK_GROUP, + NVKMS_IOCTL_ENABLE_VBLANK_SEM_CONTROL, + NVKMS_IOCTL_DISABLE_VBLANK_SEM_CONTROL, + NVKMS_IOCTL_ACCEL_VBLANK_SEM_CONTROLS, + NVKMS_IOCTL_VRR_SIGNAL_SEMAPHORE, + NVKMS_IOCTL_FRAMEBUFFER_CONSOLE_DISABLED, +}; + + +#define NVKMS_NVIDIA_DRIVER_VERSION_STRING_LENGTH 32 +#define NVKMS_MAX_CONNECTORS_PER_DISP 16 +#define NVKMS_MAX_GPUS_PER_FRAMELOCK 4 +#define NVKMS_MAX_DEVICE_REGISTRY_KEYS 16 +#define NVKMS_MAX_DEVICE_REGISTRY_KEYNAME_LEN 32 +#define NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD 6 + + +/* + * There can be at most one SwapGroup per-head, per-disp (and, + * in the extreme, there is one disp per-GPU). + */ +#define NVKMS_MAX_SWAPGROUPS (NVKMS_MAX_HEADS_PER_DISP * NV_MAX_DEVICES) + +#define NVKMS_MAX_VALID_SYNC_RANGES 8 + +#define NVKMS_DPY_NAME_SIZE 128 +#define NVKMS_GUID_SIZE 16 +#define NVKMS_3DVISION_DONGLE_PARAM_BYTES 20 +#define NVKMS_GPU_STRING_SIZE 80 + +#define NVKMS_VRR_SEMAPHORE_SURFACE_COUNT 256 +#define NVKMS_VRR_SEMAPHORE_SURFACE_SIZE (sizeof(NvU32) * NVKMS_VRR_SEMAPHORE_SURFACE_COUNT) + +/* + * The GUID string has the form: + * XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + * Two Xs per byte, plus four dashes and a NUL byte. + */ +#define NVKMS_GUID_STRING_SIZE ((NVKMS_GUID_SIZE * 2) + 5) + +#define NVKMS_MODE_VALIDATION_MAX_INFO_STRING_LENGTH 2048 +#define NVKMS_EDID_INFO_STRING_LENGTH (32 * 1024) + +/*! + * A base EDID is 128 bytes, with 128 bytes per extension block. 2048 + * should be large enough for any EDID we see. + */ +#define NVKMS_EDID_BUFFER_SIZE 2048 + +/*! + * Description of modetimings. + * + * YUV420 modes require special care since some GPUs do not support YUV420 + * scanout in hardware. When timings::yuv420Mode is NV_YUV420_SW, NVKMS will + * set a mode with horizontal values that are half of what are described in + * NvKmsMode, and not enable any color space conversion. When clients allocate + * a surface and populate it with content, the region of interest within the + * surface should be half the width of the NvKmsMode, and the surface content + * should be RGB->YUV color space converted, and decimated from 4:4:4 to 4:2:0. + * + * The NvKmsMode and viewPortOut, specified by the NVKMS client, + * should be in "full" horizontal space, but the surface and + * viewPortIn should be in "half" horizontal space. + */ +struct NvKmsMode { + NvModeTimings timings; + char name[32]; +}; + +/*! + * Mode validation override bit flags, for use in + * NvKmsModeValidationParams::overrides. + */ +enum NvKmsModeValidationOverrides { + NVKMS_MODE_VALIDATION_NO_MAX_PCLK_CHECK = (1 << 0), + NVKMS_MODE_VALIDATION_NO_EDID_MAX_PCLK_CHECK = (1 << 1), + NVKMS_MODE_VALIDATION_NO_HORIZ_SYNC_CHECK = (1 << 2), + NVKMS_MODE_VALIDATION_NO_VERT_REFRESH_CHECK = (1 << 3), + NVKMS_MODE_VALIDATION_NO_EDID_DFP_MAX_SIZE_CHECK = (1 << 4), + NVKMS_MODE_VALIDATION_NO_EXTENDED_GPU_CAPABILITIES_CHECK = (1 << 5), + NVKMS_MODE_VALIDATION_OBEY_EDID_CONTRADICTIONS = (1 << 6), + NVKMS_MODE_VALIDATION_NO_TOTAL_SIZE_CHECK = (1 << 7), + NVKMS_MODE_VALIDATION_NO_DUAL_LINK_DVI_CHECK = (1 << 8), + NVKMS_MODE_VALIDATION_NO_DISPLAYPORT_BANDWIDTH_CHECK = (1 << 9), + NVKMS_MODE_VALIDATION_ALLOW_NON_3DVISION_MODES = (1 << 10), + NVKMS_MODE_VALIDATION_ALLOW_NON_EDID_MODES = (1 << 11), + NVKMS_MODE_VALIDATION_ALLOW_NON_HDMI3D_MODES = (1 << 12), + NVKMS_MODE_VALIDATION_NO_MAX_SIZE_CHECK = (1 << 13), + NVKMS_MODE_VALIDATION_NO_HDMI2_CHECK = (1 << 14), + NVKMS_MODE_VALIDATION_NO_RRX1K_CHECK = (1 << 15), + NVKMS_MODE_VALIDATION_REQUIRE_BOOT_CLOCKS = (1 << 16), + NVKMS_MODE_VALIDATION_ALLOW_DP_INTERLACED = (1 << 17), + NVKMS_MODE_VALIDATION_NO_INTERLACED_MODES = (1 << 18), + NVKMS_MODE_VALIDATION_MAX_ONE_HARDWARE_HEAD = (1 << 19), +}; + +/*! + * Frequency information used during mode validation (for HorizSync + * and VertRefresh) can come from several possible sources. NVKMS + * selects the frequency information by prioritizing the input sources + * and then reports the selected source. + * + * Without client input, NVKMS will use frequency ranges from the + * EDID, if available. If there is no EDID, NVKMS will fall back to + * builtin conservative defaults. + * + * The client can specify frequency ranges that are used instead of + * anything in the EDID (_CLIENT_BEFORE_EDID), or frequency ranges + * that are used only if no EDID-reported ranges are available + * (_CLIENT_AFTER_EDID). + */ +enum NvKmsModeValidationFrequencyRangesSource { + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_NONE = 0, + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CLIENT_BEFORE_EDID = 1, + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_EDID = 2, + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CLIENT_AFTER_EDID = 3, + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CONSERVATIVE_DEFAULTS = 4, +}; + + +/*! + * Mode validation parameters. + */ +struct NvKmsModeValidationFrequencyRanges { + enum NvKmsModeValidationFrequencyRangesSource source; + NvU32 numRanges; + struct { + NvU32 high; + NvU32 low; + } range[NVKMS_MAX_VALID_SYNC_RANGES]; +}; + +struct NvKmsModeValidationValidSyncs { + + /*! If TRUE, ignore frequency information from the EDID. */ + NvBool ignoreEdidSource; + + /*! values are in Hz */ + struct NvKmsModeValidationFrequencyRanges horizSyncHz; + + /*! values are in 1/1000 Hz */ + struct NvKmsModeValidationFrequencyRanges vertRefreshHz1k; +}; + +enum NvKmsStereoMode { + NVKMS_STEREO_DISABLED = 0, + NVKMS_STEREO_NVIDIA_3D_VISION, + NVKMS_STEREO_NVIDIA_3D_VISION_PRO, + NVKMS_STEREO_HDMI_3D, + NVKMS_STEREO_OTHER, +}; + +enum NvKmsDscMode { + NVKMS_DSC_MODE_DEFAULT = 0, + NVKMS_DSC_MODE_FORCE_ENABLE, + NVKMS_DSC_MODE_FORCE_DISABLE, +}; + +struct NvKmsModeValidationParams { + NvBool verboseModeValidation; + NvBool moreVerboseModeValidation; + + /*! + * Normally, if a mode supports both YUV 4:2:0 and RGB 4:4:4, + * NVKMS will prefer RGB 4:4:4 if both the monitor and the GPU + * support it. Use preferYUV420 to override that preference. + */ + NvBool preferYUV420; + + enum NvKmsStereoMode stereoMode; + NvU32 overrides; + + struct NvKmsModeValidationValidSyncs validSyncs; + + /*! + * Normally, NVKMS will determine on its own whether to enable/disable + * Display Stream Compression (DSC). Use dscMode to force NVKMS to + * enable/disable DSC, when both the GPU and display supports it. + */ + enum NvKmsDscMode dscMode; + + /*! + * When enabled, Display Stream Compression (DSC) has an + * associated bits/pixel rate, which NVKMS normally computes. + * Use dscOverrideBitsPerPixelX16 to override the DSC bits/pixel rate. + * This is in units of 1/16 of a bit per pixel. + * + * This target bits/pixel rate should be >= 8.0 and <= 32.0, i.e. the valid + * bits/pixel values are members of the sequence 8.0, 8.0625, 8.125, ..., + * 31.9375, 32.0. You can convert bits/pixel value to + * the dscOverrideBitsPerPixelX16 as follow: + * + * +------------------+--------------------------------------------+ + * | bits_per_pixel | dscBitsPerPixelX16 = bits_per_pixel * 16 | + * +------------------+--------------------------------------------+ + * | 8.0 | 128 | + * | 8.0625 | 129 | + * | . | . | + * | . | . | + * | . | . | + * | 31.9375 | 511 | + * | 32.0 | 512 | + * +------------------+--------------------------------------------+ + * + * If the specified dscOverrideBitsPerPixelX16 is out of range, + * then mode validation may fail. + * + * When dscOverrideBitsPerPixelX16 is 0, NVKMS compute the rate itself. + */ + NvU32 dscOverrideBitsPerPixelX16; +}; + +/*! + * The list of pixelShift modes. + */ +enum NvKmsPixelShiftMode { + NVKMS_PIXEL_SHIFT_NONE = 0, + NVKMS_PIXEL_SHIFT_4K_TOP_LEFT, + NVKMS_PIXEL_SHIFT_4K_BOTTOM_RIGHT, + NVKMS_PIXEL_SHIFT_8K, +}; + +/*! + * The available resampling methods used when viewport scaling is requested. + */ +enum NvKmsResamplingMethod { + NVKMS_RESAMPLING_METHOD_BILINEAR = 0, + NVKMS_RESAMPLING_METHOD_BICUBIC_TRIANGULAR, + NVKMS_RESAMPLING_METHOD_BICUBIC_BELL_SHAPED, + NVKMS_RESAMPLING_METHOD_BICUBIC_BSPLINE, + NVKMS_RESAMPLING_METHOD_BICUBIC_ADAPTIVE_TRIANGULAR, + NVKMS_RESAMPLING_METHOD_BICUBIC_ADAPTIVE_BELL_SHAPED, + NVKMS_RESAMPLING_METHOD_BICUBIC_ADAPTIVE_BSPLINE, + NVKMS_RESAMPLING_METHOD_NEAREST, + NVKMS_RESAMPLING_METHOD_DEFAULT = NVKMS_RESAMPLING_METHOD_BILINEAR, +}; + +enum NvKmsWarpMeshDataType { + NVKMS_WARP_MESH_DATA_TYPE_TRIANGLES_XYUVRQ, + NVKMS_WARP_MESH_DATA_TYPE_TRIANGLE_STRIP_XYUVRQ, +}; + +/*! + * Description of a cursor image on a single head; this is used by any + * NVKMS request that needs to specify the cursor image. + */ +struct NvKmsSetCursorImageCommonParams { + /*! The surface to display in the cursor. */ + NvKmsSurfaceHandle surfaceHandle[NVKMS_MAX_EYES]; + /*! + * The cursor composition parameters gets read and applied only if the + * specified cursor surface is not null. + */ + struct NvKmsCompositionParams cursorCompParams; +}; + + +/*! + * Description of the cursor position on a single head; this is used + * by any NVKMS request that needs to specify the cursor position. + * + * x,y are relative to the current viewPortIn configured on the head. + */ +struct NvKmsMoveCursorCommonParams { + NvS16 x; /*! in */ + NvS16 y; /*! in */ +}; + +/*! + * Description of the main layer LUT on a single head; this is used by any NVKMS + * request that needs to specify the LUT. + */ +struct NvKmsSetInputLutParams { + NvBool specified; + NvU32 depth; /*! used bits per pixel (8, 15, 16, 24, 30) */ + + /*! + * The first and last elements (inclusive) in the color arrays to + * use. Valid values are in the range [0,N], where N is a + * function of depth: + * + * Depth N + * 8 256 + * 15 32 + * 16 64 + * 24 256 + * 30 1024 + * + * 'start' is the first element in the color arrays to use. + */ + NvU32 start; + + /*! + * 'end' is the last element (inclusive) in the color arrays to + * use. If end == 0, this command will disable the HW LUT for + * this head. + * + * The other fields in this structure, besides 'specified', are ignored if + * end == 0. + */ + NvU32 end; + + /*! + * Pointer to struct NvKmsLutRamps describing the LUT. + * Elements [start,end] will be used. + * + * Each entry in the input LUT has valid values in the range [0, 65535]. + * However, on pre-Turing GPUs only 11 bits are significant; NVKMS will + * convert values in this range into the appropriate internal format. + * + * Use nvKmsPointerToNvU64() to assign pRamps. + */ + NvU64 pRamps NV_ALIGN_BYTES(8); +}; + + +/*! + * Description of the output LUT on a single head; this is used by any NVKMS + * request that needs to specify the LUT. + * + * Unlike the input LUT: + * - specifying the output LUT updates all values at once. + * + * Each entry in the output LUT has valid values in the range [0, 65535]. + * However, only 11 bits are significant; NVKMS will convert values in this + * range into the appropriate internal format. + */ +struct NvKmsSetOutputLutParams { + NvBool specified; + NvBool enabled; + + /*! + * Pointer to struct NvKmsLutRamps containing the actual LUT data, if + * required. + * Use nvKmsPointerToNvU64() to assign pRamps. + */ + NvU64 pRamps NV_ALIGN_BYTES(8); +}; + +/*! + * Description of the LUT on a single head; this is used by any NVKMS + * request that needs to specify the LUT. + */ +struct NvKmsSetLutCommonParams { + struct NvKmsSetInputLutParams input; + struct NvKmsSetOutputLutParams output; + + NvBool synchronous; /*! block until the LUT update is complete */ +}; + +struct NvKmsNIsoSurface { + NvKmsSurfaceHandle surfaceHandle; + enum NvKmsNIsoFormat format; + NvU16 offsetInWords; +}; + +struct NvKmsCompletionNotifierDescription { + struct NvKmsNIsoSurface surface; + NvBool awaken; +}; + +struct NvKmsSemaphore { + struct NvKmsNIsoSurface surface; + NvU32 value; +}; + +enum NvKmsSyncptType { + NVKMS_SYNCPT_TYPE_NONE, + NVKMS_SYNCPT_TYPE_RAW, + NVKMS_SYNCPT_TYPE_FD, +}; + +struct NvKmsSyncpt { + enum NvKmsSyncptType type; + union { + int fd; + struct { + NvU32 id; + NvU32 value; + } raw; + } u; +}; + +struct NvKmsChannelSyncObjects { + /* + * If useSyncpt is set to FALSE, clients can provide an acquisition and/or + * release semaphore via the 'syncObjects.semaphores' struct. + * + * If NvKmsAllocDeviceReply::supportsIndependentAcqRelSemaphore is + * FALSE, then 'syncObjects.semaphores.acquire.surface' must be the same + * as 'syncObjects.semaphores.release.surface'. In other words, the same + * exact semaphore surface must be used for both acquire and release. + * + * If NvKmsAllocDeviceReply::supportsIndependentAcqRelSemaphore is + * TRUE, then the client is allowed to provide different semaphore + * surfaces for acquire and release. + * + * If useSyncpt is set to TRUE, clients can provide a pre-syncpt that they + * want the display engine to wait on before scanning out from the given + * buffer, and can specify that they want NVKMS to return a post-syncpt + * that they can wait on, via the 'syncObjects.syncpts' struct. + * + * The post-syncpt that NVKMS returns will be signaled once the + * buffer that was activated by this flip is displaced. As a typical + * example: + * - Client flips buffer A, and requests a post-syncpt PS. + * - Buffer A becomes active at the next frame boundary, and display + * starts scanning out buffer A. + * - Client flips buffer B. + * - Once the UPDATE for the buffer B flip is processed and display + * has finished sending the last pixel of buffer A to precomp for + * the current frame, post-syncpt PS will get signaled. + * + * Clients can use this option iff + * NvKmsAllocDeviceReply::supportsSyncpts is TRUE. + */ + NvBool useSyncpt; + + union { + struct { + struct NvKmsSemaphore acquire; + struct NvKmsSemaphore release; + } semaphores; + + struct { + struct NvKmsSyncpt pre; + enum NvKmsSyncptType requestedPostType; + } syncpts; + } u; +}; + +/*! + * Description of how to flip on a single head. + * + * viewPortIn::point describes the position of the viewPortIn that + * should be scaled to the viewPortOut of the head. The + * viewPortSizeIn is specified by NvKmsSetModeOneHeadRequest. Note + * that viewPortIn::point is in desktop coordinate space, and + * therefore applies across all layers. + * + * For YUV420 modes, the surfaces and position should be in "half" + * horizontal space. See the explanation in NvKmsMode. + * + * If 'specified' is FALSE for any of the layers, then the current + * hardware value is used. + */ +struct NvKmsFlipCommonParams { + + NvBool allowVrr; + + struct { + NvBool specified; + struct NvKmsPoint point; + } viewPortIn; + + struct { + struct NvKmsSetCursorImageCommonParams image; + NvBool imageSpecified; + + struct NvKmsMoveCursorCommonParams position; + NvBool positionSpecified; + } cursor; + + /* + * Set the output transfer function. + * + * If output transfer function is HDR and no staticMetadata is specified + * for the head or layers, flip request will be rejected. + * + * If output transfer is set, output lut values specified during modeset + * will be ignored and output lut will be set with the specified HDR + * transfer function. + * + * If output transfer function is SDR and staticMetadata is enabled, + * HDR content for that layer will be tonemapped to the SDR output + * range. + */ + struct { + enum NvKmsOutputTf val; + NvBool specified; + } tf; + + /*! + * Describe the LUT to be used with the modeset or flip. + */ + struct NvKmsSetLutCommonParams lut; + + struct { + NvBool specified; + NvBool enabled; + struct NvKmsLUTSurfaceParams lut; + } olut; + + struct { + NvBool specified; + NvU32 val; + } olutFpNormScale; + + struct { + NvBool specified; + /*! + * If TRUE, override HDR static metadata for the head, instead of + * calculating it from HDR layer(s). If FALSE, do not override. + * + * Note that “specified” serves to mark the field as being changed in + * this flip request, rather than as specified for this frame. So to + * disable HDR static metadata, set hdrStaticMetadata.specified = TRUE + * and hdrStaticMetadata.enabled = FALSE. + */ + NvBool enabled; + enum NvKmsInfoFrameEOTF eotf; + struct NvKmsHDRStaticMetadata staticMetadata; + } hdrInfoFrame; + + struct { + NvBool specified; + enum NvKmsOutputColorimetry val; + } colorimetry; + + struct { + struct { + NvKmsSurfaceHandle handle[NVKMS_MAX_EYES]; + struct NvKmsRRParams rrParams; + NvBool specified; + } surface; + + /* + * sizeIn/sizeOut can be used when + * NvKmsAllocDeviceReply::layerCaps[layer].supportsWindowMode is TRUE. + */ + struct { + struct NvKmsSize val; + NvBool specified; + } sizeIn; + + struct { + struct NvKmsSize val; + NvBool specified; + } sizeOut; + + /* + * Set the position of the layer, relative to the upper left + * corner of the surface. This controls the same state as + * NVKMS_IOCTL_SET_LAYER_POSITION. + * + * This field can be used when + * NvKmsAllocDeviceReply::layerCaps[layer].supportsWindowMode is TRUE. + */ + struct { + struct NvKmsSignedPoint val; + NvBool specified; + } outputPosition; + + struct { + struct NvKmsCompletionNotifierDescription val; + NvBool specified; + } completionNotifier; + + struct { + struct NvKmsChannelSyncObjects val; + + /* If 'specified' is FALSE, then the current hardware value is used. */ + NvBool specified; + } syncObjects; + + /* + * If 'maxDownscaleFactors::specified' is true, nvkms will set the + * max H/V downscale usage bounds to the values specified in + * 'maxDownscaleFactors::horizontal' and 'maxDownscaleFactors::vertical'. + * + * If the 'maxDownscaleFactors::specified' values are within the bounds + * of 'NvKmsSetModeOneHeadReply::guaranteedUsage', then clients can expect + * the flip to succeed. If the 'maxDownscaleFactors::specified' values are + * beyond the bounds of 'NvKmsSetModeOneHeadReply::guaranteedUsage' but + * within 'NvKmsSetModeOneHeadReply::possibleUsage', then the request may + * legitimately fail due to insufficient display bandwidth and clients + * need to be prepared to handle that flip request failure. + * + * If 'maxDownscaleFactors::specified' is false, nvkms will calculate max + * H/V downscale factor by quantizing the range. E.g., max H/V downscale + * factor supported by HW is 4x for 5-tap and 2x for 2-tap mode. If + * 5-tap mode is required, the target usage bound that nvkms will + * attempt to program will either allow up to 2x downscaling, or up to + * 4x downscaling. If 2-tap mode is required, the target usage bound + * that NVKMS will attempt to program will allow up to 2x downscaling. + * Example: to downscale from 4096x2160 -> 2731x864 in 5-tap mode, + * NVKMS would specify up to 2x for the H downscale bound (required is + * 1.5x), and up to 4x for the V downscale bound (required is 2.5x). + */ + struct { + /* + * Maximum vertical downscale factor (scaled by 1024) + * + * For example, if the downscale factor is 1.5, then maxVDownscaleFactor + * would be 1.5 x 1024 = 1536. + */ + NvU16 vertical; + + /* + * Maximum horizontal downscale factor (scaled by 1024) + * + * See the example above for vertical. + */ + NvU16 horizontal; + + NvBool specified; + } maxDownscaleFactors; + + NvBool tearing; + + /* + * When true, we will flip to this buffer whenever the current eye is + * finished scanning out. Otherwise, this flip will only execute after + * both eyes have finished scanout. + * + * Note that if this is FALSE and a vsynced stereo flip is requested, + * the buffers in this flip will be displayed for minPresentInterval*2 + * vblanks, one for each eye. + * + * This flag cannot be used for the overlay layer. + */ + NvBool perEyeStereoFlip; + + /* When non-zero, block the flip until PTIMER >= timeStamp. */ + NvU64 timeStamp NV_ALIGN_BYTES(8); + NvU8 minPresentInterval; + + /* This field cannot be used for the main layer right now. */ + struct { + struct NvKmsCompositionParams val; + NvBool specified; + } compositionParams; + + struct { + NvBool specified; + NvBool enabled; + struct NvKmsLUTSurfaceParams lut; + } ilut; + + struct { + NvBool specified; + NvBool enabled; + struct NvKmsLUTSurfaceParams lut; + } tmo; + + /* + * Color-space conversion matrix applied to the layer before + * compositing. + * + * If csc::specified is TRUE and csc::useMain is TRUE, then the CSC + * matrix specified in the main layer is used instead of the one here. + * If csc::specified is FALSE, then the CSC matrix used from the previous + * flip is used. csc::useMain must be set to FALSE for the main layer. + */ + struct { + NvBool specified; + NvBool useMain; + struct NvKmsCscMatrix matrix; + } csc; + + /* + * When true, all pending flips and synchronization operations get + * ignored, and channel flips to given buffer. Notifier and semaphore + * should not be specified if this flag is true. This flag does + * nothing if set true for NVKMS_IOCTL_SET_MODE ioctl. + * + * This flag allows client to remove stalled flips and unblock + * the channel. + * + * This flag cannot be used for the overlay layer. + */ + NvBool skipPendingFlips; + + /* + * This field can be used when + * NvKmsAllocDeviceReply::layerCaps[layer].supportsICtCp = TRUE. + * + * If staticMetadata is enabled for multiple layers, flip request + * will be rejected. + */ + struct { + NvBool specified; + /*! + * If TRUE, enable HDR static metadata. If FALSE, disable it. + * + * Note that “specified” serves to mark the field as being changed + * in this flip request, rather than as specified for this frame. + * So to disable HDR static metadata, set hdr.specified = TRUE and + * hdr.staticMetadata.enabled = FALSE. + */ + NvBool enabled; + struct NvKmsHDRStaticMetadata staticMetadata; + } hdr; + + /* Specifies whether the input color range is FULL or LIMITED. */ + struct { + enum NvKmsInputColorRange val; + NvBool specified; + } colorRange; + + /* This field has no effect right now. */ + struct { + enum NvKmsInputColorSpace val; + NvBool specified; + } colorSpace; + + /* Specifies input transfer function to be used */ + struct { + enum NvKmsInputTf val; + NvBool specified; + } tf; + + /* When enabled, explicitly set CSC00 with provided matrix */ + struct { + struct NvKmsCscMatrix matrix; + NvBool enabled; + NvBool specified; + } csc00Override; + + /* When enabled, explicitly set CSC01 with provided matrix */ + struct { + struct NvKmsCscMatrix matrix; + NvBool enabled; + NvBool specified; + } csc01Override; + + /* When enabled, explicitly set CSC10 with provided matrix */ + struct { + struct NvKmsCscMatrix matrix; + NvBool enabled; + NvBool specified; + } csc10Override; + + /* When enabled, explicitly set CSC11 with provided matrix */ + struct { + struct NvKmsCscMatrix matrix; + NvBool enabled; + NvBool specified; + } csc11Override; + } layer[NVKMS_MAX_LAYERS_PER_HEAD]; +}; + +struct NvKmsFlipCommonReplyOneHead { + struct { + struct NvKmsSyncpt postSyncpt; + } layer[NVKMS_MAX_LAYERS_PER_HEAD]; +}; + +/*! + * NVKMS_IOCTL_ALLOC_DEVICE: Allocate an NVKMS device object. + * + * This has the scope of a resman SLI device. + * + * Multiple clients can allocate devices (DRM-KMS, multiple X + * servers). Clients should configure SLI before initializing NVKMS. + * NVKMS will query resman for the current SLI topology. + * + * The SLI configuration (both the linked SLI device, and the sliMosaic + * boolean below) will be latched when the specified GPU transitions + * from zero NVKMS devices allocated to one NVKMS device allocated. + * + * The returned information will remain static until the NVKMS device + * object is freed. + */ + +struct NvKmsDeviceId { + /*! + * The (primary) GPU for this device; this is used as the value + * for NV0080_ALLOC_PARAMETERS::deviceId. + */ + NvU32 rmDeviceId; + + /*! + * The SMG (MIG) partition ID that this client must subscribe to in + * N-way SMG mode; or, if not in MIG mode, the value NO_MIG_DEVICE which + * equals to leaving this field initialized to zero (0). + */ + MIGDeviceId migDevice; +}; + +struct NvKmsAllocDeviceRequest { + /*! + * Clients should populate versionString with the value of + * NV_VERSION_STRING from nvUnixVersion.h. This is used for a + * version handshake. + */ + char versionString[NVKMS_NVIDIA_DRIVER_VERSION_STRING_LENGTH]; + + /*! + * The underlying GPU for this device: this may point to a physical GPU + * or a graphics capable MIG partition (= an SMG device). + */ + struct NvKmsDeviceId deviceId; + + /*! + * Whether SLI Mosaic is requested: i.e., multiple disps, one + * per physical GPU, for the SLI device. + */ + NvBool sliMosaic; + + /*! + * When tryInferSliMosaicFromExistingDevice=TRUE, then the above + * 'sliMosaic' field is ignored and the ALLOC_DEVICE request will + * inherit the current sliMosaic state of the existing device + * identified by deviceId. If there is not an existing device for + * deviceId, then the ALLOC_DEVICE request will proceed normally, honoring + * the requested sliMosaic state. + */ + NvBool tryInferSliMosaicFromExistingDevice; + + /*! + * NVKMS will use the 3D engine for headSurface. If clients want to avoid + * the use of the 3D engine, set no3d = TRUE. Note this will cause modesets + * that require headSurface to fail. + * + * This flag is only honored when there is not already an existing device + * for the deviceId. + */ + NvBool no3d; + + /*! + * When enableConsoleHotplugHandling is TRUE, NVKMS will start handling + * hotplug events at the console when no modeset owner is present. + * + * If FALSE, console hotplug handling behavior is not changed. + * + * This should be set to TRUE for clients that intend to allocate the device + * but don't intend to become the modeset owner right away. It should be set + * to FALSE for clients that may take modeset ownership immediately, in + * order to suppress hotplug handling between the NVKMS_IOCTL_ALLOC_DEVICE + * and NVKMS_IOCTL_GRAB_OWNERSHIP calls when the calling client is the first + * to allocate the device. + * + * Note that NVKMS_IOCTL_RELEASE_OWNERSHIP also enables console hotplug + * handling. Once enabled, console hotplug handling remains enabled until + * the last client frees the device. + */ + NvBool enableConsoleHotplugHandling; + + struct { + /* name[0] == '\0' for unused registryKeys[] array elements. */ + char name[NVKMS_MAX_DEVICE_REGISTRY_KEYNAME_LEN]; + NvU32 value; + } registryKeys[NVKMS_MAX_DEVICE_REGISTRY_KEYS]; +}; + +enum NvKmsAllocDeviceStatus { + NVKMS_ALLOC_DEVICE_STATUS_SUCCESS, + NVKMS_ALLOC_DEVICE_STATUS_VERSION_MISMATCH, + NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST, + NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR, + NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE, + NVKMS_ALLOC_DEVICE_STATUS_CORE_CHANNEL_ALLOC_FAILED, +}; + + +struct NvKmsAllocDeviceReply { + + enum NvKmsAllocDeviceStatus status; + + /*! + * The handle to use when identifying this NVKMS device in + * subsequent calls. + */ + NvKmsDeviceHandle deviceHandle; + + /*! + * A bitmask, indicating the GPUs, one per bit, contained by this + * device. + */ + NvU32 subDeviceMask; + + /*! The number of heads on each disp. */ + NvU32 numHeads; + + /*! The number of disps. */ + NvU32 numDisps; + + /*! The handle to identify each disp, in dispHandles[0..numDisps). */ + NvKmsDispHandle dispHandles[NVKMS_MAX_SUBDEVICES]; + + /*! + * Device-wide Capabilities: of the display engine. + * + * IMPLEMENTATION NOTE: this is the portion of DispHalRec::caps + * that can vary between EVO classes. + */ + NvBool requiresVrrSemaphores; + NvBool inputLutAppliesToBase; + + /*! + * Whether the client can allocate and manipulate SwapGroup objects via + * NVKMS_IOCTL_ALLOC_SWAP_GROUP and friends. + */ + NvBool supportsSwapGroups; + + /*! + * Whether NVKMS supports Warp and Blend on this device. + */ + NvBool supportsWarpAndBlend; + + /*! + * When nIsoSurfacesInVidmemOnly=TRUE, then only video memory + * surfaces can be used for the surface in + * NvKmsCompletionNotifierDescription or NvKmsSemaphore. + */ + NvBool nIsoSurfacesInVidmemOnly; + + /* + * When requiresAllAllocationsInSysmem=TRUE, then all memory allocations + * that will be accessed by display must come from sysmem. + */ + NvBool requiresAllAllocationsInSysmem; + + /* + * Whether the device that NVKMS is driving supports headSurface GPU + * composition. + */ + NvBool supportsHeadSurface; + + /*! + * The display engine supports a "legacy" format for notifiers and + * semaphores (one word for semaphores and base channel notifiers; + * two words for overlay notifiers). On newer GPUs, the display + * engine also supports a similar four word semaphore and notifier + * format used by graphics. + * + * This describes which values are valid for NvKmsNIsoFormat. + * + * Iff a particular enum NvKmsNIsoFormat 'value' is supported, + * then (1 << value) will be set in validNIsoFormatMask. + */ + NvU8 validNIsoFormatMask; + + NvU32 surfaceAlignment; + NvU32 maxWidthInBytes; + NvU32 maxWidthInPixels; + NvU32 maxHeightInPixels; + NvU32 maxCursorSize; + + /*! + * Describes the supported Color Key selects and blending modes for match + * and nomatch cursor pixels. + */ + struct NvKmsCompositionCapabilities cursorCompositionCaps; + + /*! The number of layers attached to each head. */ + NvU32 numLayers[NVKMS_MAX_HEADS_PER_DISP]; + + /*! + * Describes supported functionalities for each layer. + */ + struct NvKmsLayerCapabilities layerCaps[NVKMS_MAX_LAYERS_PER_HEAD]; + + /*! + * Describes supported functionalities for the output LUT on each head + */ + struct NvKmsLUTCaps olutCaps; + + /*! + * This bitmask specifies all of the (rotation, reflectionX, reflectionY) + * combinations that are supported for the main and overlay layers. + * Each bit in this bitmask is mapped to one combination per the scheme + * in NvKmsRRParamsToCapBit(). + */ + NvU16 validLayerRRTransforms; + + /*! + * IO coherency modes that display supports for ISO and NISO memory + * allocations, respectively. + */ + NvKmsDispIOCoherencyModes isoIOCoherencyModes; + NvKmsDispIOCoherencyModes nisoIOCoherencyModes; + + /*! + * 'displayIsGpuL2Coherent' indicates whether display is coherent with + * GPU's L2 cache. + */ + NvBool displayIsGpuL2Coherent; + + /*! + * 'supportsSyncpts' indicates whether NVKMS supports the use of syncpts + * for synchronization. + */ + NvBool supportsSyncpts; + + /*! + * 'supportsIndependentAcqRelSemaphore' indicates whether HW supports + * configuring different semaphores for acquire and release for a buffer + * flip on a given layer. + */ + NvBool supportsIndependentAcqRelSemaphore; + + /*! + * 'supportsVblankSyncObjects' indicates whether HW supports raster + * generator sync objects that signal at vblank. + */ + NvBool supportsVblankSyncObjects; + + /*! + * 'supportsVblankSemControl' indicates whether the VBlank Semaphore Control + * interface: + * + * NVKMS_IOCTL_ENABLE_VBLANK_SEM_CONTROL, + * NVKMS_IOCTL_DISABLE_VBLANK_SEM_CONTROL, + * NVKMS_IOCTL_ACCEL_VBLANK_SEM_CONTROLS, + * + * is supported. + */ + NvBool supportsVblankSemControl; + + /*! + * 'supportsInputColorSpace' indicates whether the HW supports setting the + * input color space. + */ + NvBool supportsInputColorSpace; + + /*! + * 'supportsInputColorRange' indicates whether the HW supports setting the + * input color range. + */ + NvBool supportsInputColorRange; + + /*! framebuffer console base address and size. */ + NvU64 vtFbBaseAddress; + NvU64 vtFbSize; +}; + +struct NvKmsAllocDeviceParams { + struct NvKmsAllocDeviceRequest request; /*! in */ + struct NvKmsAllocDeviceReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_FREE_DEVICE: Free the NVKMS device object specified by + * deviceHandle. + * + * The underlying device is not actually freed until all callers of + * NVKMS_IOCTL_ALLOC_DEVICE have freed their reference to the device. + * + * When a client calls FREE_DEVICE, any configuration specified by + * that client will be removed: + * - Any EDID overrides. + * - Any interest declared on dynamic dpys. + * - Any cursor image on any head. + * - Any custom LUT contents. + * - Any interest declared on any events. + * + * XXX define how FREE_DEVICE interacts with: + * - concurrent X servers on different VTs + * - console restore + */ + +struct NvKmsFreeDeviceRequest { + NvKmsDeviceHandle deviceHandle; +}; + +struct NvKmsFreeDeviceReply { + NvU32 padding; +}; + +struct NvKmsFreeDeviceParams { + struct NvKmsFreeDeviceRequest request; /*! in */ + struct NvKmsFreeDeviceReply reply; /*!out */ +}; + + +/*! + * NVKMS_IOCTL_QUERY_DISP: Query information about the NVKMS disp + * object specified by the tuple (deviceHandle, dispHandle). + * + * The returned information will remain static until the NVKMS device + * object is freed. + */ + +struct NvKmsQueryDispRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; +}; + +struct NvKmsQueryDispReply { + /*! The possible dpys for this disp, excluding any dynamic dpys. */ + NVDpyIdList validDpys; + + /*! The dpys that were driven at boot-time, if any. */ + NVDpyIdList bootDpys; + + /*! The dpys that are capable of dynamic mux switching, if any. */ + NVDpyIdList muxDpys; + + /*! The framelock device, if any, connected to this disp. */ + NvKmsFrameLockHandle frameLockHandle; + + /*! The number of connectors on this disp. */ + NvU32 numConnectors; + + /*! + * The handle to identify each connector, in + * connectorHandles[0..numConnectors) + */ + NvKmsConnectorHandle connectorHandles[NVKMS_MAX_CONNECTORS_PER_DISP]; + + /*! + * A string describing one of the the GPUs used by this disp. The + * NVKMS log will also print this string to the kernel log. Users + * should be able to correlate GPUs between NVKMS and NVKMS + * clients using this string. + */ + char gpuString[NVKMS_GPU_STRING_SIZE]; +}; + +struct NvKmsQueryDispParams { + struct NvKmsQueryDispRequest request; /*! in */ + struct NvKmsQueryDispReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_QUERY_CONNECTOR_STATIC_DATA: Query information about the NVKMS + * connector object specified by the triplet (deviceHandle, dispHandle, + * connectorHandle). + * + * The returned information will remain static until the NVKMS device + * object is freed. + */ + +struct NvKmsQueryConnectorStaticDataRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvKmsConnectorHandle connectorHandle; +}; + +struct NvKmsQueryConnectorStaticDataReply { + NVDpyId dpyId; + NvBool isDP; + NvBool isLvds; + NvBool locationOnChip; + NvU32 legacyTypeIndex; + NvKmsConnectorType type; + NvU32 typeIndex; + NvKmsConnectorSignalFormat signalFormat; + NvU32 physicalIndex; + NvU32 physicalLocation; +}; + +struct NvKmsQueryConnectorStaticDataParams { + struct NvKmsQueryConnectorStaticDataRequest request; /*! in */ + struct NvKmsQueryConnectorStaticDataReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_QUERY_CONNECTOR_DYNAMIC_DATA: Query dynamic information about the + * NVKMS connector object specified by the triplet (deviceHandle, dispHandle, + * connectorHandle). + */ + +struct NvKmsQueryConnectorDynamicDataRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvKmsConnectorHandle connectorHandle; +}; + +struct NvKmsQueryConnectorDynamicDataReply { +#define NVKMS_DP_DETECT_COMPLETE_POLL_INTERVAL_USEC 100000 /* in microseconds */ +#define NVKMS_DP_DETECT_COMPLETE_TIMEOUT_USEC 10000000 /* in microseconds */ + + /* + * For DisplayPort devices, indicates whether the DisplayPort library is + * finished detecting devices on this connector. This is set to TRUE for + * other devices because NVKMS knows as soon as ALLOC_DEVICE is complete + * whether the device is connected or not. + */ + NvBool detectComplete; + /* + * Contains the list of display IDs for dynamic dpys detected on this + * connector. + */ + NVDpyIdList dynamicDpyIdList; +}; + +struct NvKmsQueryConnectorDynamicDataParams { + struct NvKmsQueryConnectorDynamicDataRequest request; /*! in */ + struct NvKmsQueryConnectorDynamicDataReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_QUERY_DPY_STATIC_DATA: Query static information about + * the NVKMS dpy object specified by the triplet (deviceHandle, + * dispHandle, dpyId). This information should remain static for the + * lifetime of the dpy. + */ + +struct NvKmsQueryDpyStaticDataRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; +}; + +struct NvKmsQueryDpyStaticDataReply { + NvKmsConnectorHandle connectorHandle; /*! The connector driving this dpy. */ + NvU32 type; /*! NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_ */ + char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]; + NvBool mobileInternal; + NvBool isDpMST; + /* Bitmask of valid heads to drive this dpy. */ + NvU32 headMask; +}; + +struct NvKmsQueryDpyStaticDataParams { + struct NvKmsQueryDpyStaticDataRequest request; /*! in */ + struct NvKmsQueryDpyStaticDataReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_QUERY_DPY_DYNAMIC_DATA: Query dynamic information about + * the NVKMS dpy object specified by the triplet (deviceHandle, + * dispHandle, dpyId). + * + * This information should be re-queried after an + * NVKMS_EVENT_TYPE_DPY_CHANGED event. + */ + +struct NvKmsQueryDpyDynamicDataRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + + NvBool forceConnected; + NvBool forceDisconnected; + NvBool overrideEdid; + NvBool ignoreEdid; + NvBool ignoreEdidChecksum; + NvBool allowDVISpecPClkOverride; + NvBool dpInbandStereoSignaling; + NvBool disableACPIBrightnessHotkeys; + + /* + * If overrideEdid is TRUE, then edid::buffer[] contains an EDID + * to override anything detected. + */ + struct { + NvU16 bufferSize; + NvU8 buffer[NVKMS_EDID_BUFFER_SIZE]; + } edid; +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC attributes. */ +enum NvKmsDpyAttributeColorBpcValue { + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN = 0, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6 = 6, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8 = 8, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10 = 10, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_MAX = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10, +}; + +typedef struct _NvKmsDpyOutputColorFormatInfo { + struct { + enum NvKmsDpyAttributeColorBpcValue maxBpc; + enum NvKmsDpyAttributeColorBpcValue minBpc; + } rgb444, yuv444, yuv422; +} NvKmsDpyOutputColorFormatInfo; + +enum NvKmsDpyVRRType { + NVKMS_DPY_VRR_TYPE_NONE, + NVKMS_DPY_VRR_TYPE_GSYNC, + NVKMS_DPY_VRR_TYPE_ADAPTIVE_SYNC_DEFAULTLISTED, + NVKMS_DPY_VRR_TYPE_ADAPTIVE_SYNC_NON_DEFAULTLISTED, +}; + +struct NvKmsQueryDpyDynamicDataReply { + char name[NVKMS_DPY_NAME_SIZE]; + + NvU32 maxPixelClockKHz; + NvBool connected; + NvBool isVirtualRealityHeadMountedDisplay; + + struct { + NvU8 heightInCM; /* vertical screen size */ + NvU8 widthInCM; /* horizontal screen size */ + } physicalDimensions; + + /*! + * Which VRR type has been selected for this display, either true + * G-SYNC, Adaptive-Sync defaultlisted, or Adaptive-Sync non-defaultlisted. + */ + enum NvKmsDpyVRRType vrrType; + + NvBool supportsHDR; + + struct { + NvBool supported; + NvBool isDLP; + NvBool isAegis; + NvU32 subType; /*! STEREO_PLUG_AND_PLAY_ from nvStereoDisplayDef.h */ + } stereo3DVision; + + struct { + struct { + NvBool valid; + NvU8 buffer[NVKMS_GUID_SIZE]; + char str[NVKMS_GUID_STRING_SIZE]; + } guid; + } dp; + + struct { + /*! + * The size of the EDID in buffer[], or 0 if there is no EDID + * available in buffer[]. + */ + NvU16 bufferSize; + + /*! + * Whether NVKMS determined that the EDID is valid. If the + * EDID is not valid, there may still be information available + * in infoString: the infoString will describe why the EDID + * was deemed invalid. + */ + NvBool valid; + + /*! + * The raw EDID bytes. + */ + NvU8 buffer[NVKMS_EDID_BUFFER_SIZE]; + + /*! + * Parsed information from the EDID. For the raw EDID bytes, + * see NvKmsQueryDpyDynamicDataParams::edid::buffer[]. + */ + char infoString[NVKMS_EDID_INFO_STRING_LENGTH]; + } edid; + + NvKmsDpyOutputColorFormatInfo supportedOutputColorFormats; + + struct NvKmsSuperframeInfo superframeInfo; +}; + +struct NvKmsQueryDpyDynamicDataParams { + struct NvKmsQueryDpyDynamicDataRequest request; /*! in */ + struct NvKmsQueryDpyDynamicDataReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_VALIDATE_MODE_INDEX: Validate a particular mode from a + * dpy's candidate modes. + * + * NVKMS can consider modes from a dpy's EDID, as well as a + * variety of builtin modes. + * + * This ioctl identifies one of those candidate modes by index. NVKMS + * will attempt to validate that candidate mode for the dpy, using the + * specified mode validation parameters. + * + * If the mode index is larger than the list of candidate modes, + * reply::end will be TRUE. Otherwise, reply::end will be FALSE, and + * reply::mode will contain the candidate mode. + * + * If the mode is valid, then reply::valid will be TRUE. Otherwise, + * reply::valid will be FALSE. In either case, request::pInfoString[] + * will contain a description of what happened during mode validation. + * + * To query the full modepool, clients should repeatedly call + * NVKMS_IOCTL_VALIDATE_MODE_INDEX with increasing mode index values, + * until NVKMS reports end==TRUE. + * + * Note that the candidate mode list can change when the dpy changes + * (reported by the NVKMS_EVENT_TYPE_DPY_CHANGED event). The client + * should restart its modepool querying if it receives a DPY_CHANGED + * event. The candidate mode list can also change based on the + * parameters in request::modeValidation. Clients should not change + * request::modeValidation while looping over candidate mode indices. + * + * Pseudocode example usage pattern: + * + * struct NvKmsModeValidationParams modeValidation = Initialize(); + * + * retry: + * NvU32 modeIndex = 0; + * + * while (1) { + * char infoString[INFO_STRING_LENGTH]; + * memset(¶ms); + * params.request.dpyId = dpyId; + * params.request.modeIndex = modeIndex++; + * params.request.modeValidation = modeValidation; + * params.request.pInfoString = nvKmsPointerToNvU64(infoString); + * params.request.infoStringLength = sizeof(infoString); + * + * ioctl(¶ms); + * + * if (params.reply.end) break; + * + * print(infoString); + * + * if (params.reply.valid) { + * AddToModePool(params.reply.mode); + * } + * } + * + * if (dpyChanged) goto retry; + * + */ + +struct NvKmsValidateModeIndexRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + struct NvKmsModeValidationParams modeValidation; + NvU32 modeIndex; + + /* + * Pointer to a string of size 'infoStringSize'. + * Use nvKmsPointerToNvU64() to assign pInfoString. + * The maximum size allowed is + * NVKMS_MODE_VALIDATION_MAX_INFO_STRING_LENGTH. + */ + NvU32 infoStringSize; + NvU64 pInfoString NV_ALIGN_BYTES(8); +}; + +struct NvKmsValidateModeIndexReply { + NvBool end; + NvBool valid; + + struct NvKmsMode mode; + + /*! The validSyncs used by NVKMS when validating the mode. */ + struct NvKmsModeValidationValidSyncs validSyncs; + + /*! Whether this mode is marked as "preferred" by the EDID. */ + NvBool preferredMode; + + /*! A text description of the mode. */ + char description[64]; + + /*! Where the mode came from. */ + enum NvKmsModeSource { + NvKmsModeSourceUnknown = 0, + NvKmsModeSourceEdid = 1, + NvKmsModeSourceVesa = 2, + } source; + + /* The number of bytes written to 'pInfoString' (from the request) */ + NvU32 infoStringLenWritten; + + /*! + * These are the usage bounds that may be possible with this mode, + * assuming that only one head is active. For actual usage bounds, + * see guaranteedUsage and possibleUsage returned in + * NvKmsSetModeOneHeadReply. + */ + struct NvKmsUsageBounds modeUsage; + + /* + * Whether this mode supports stereo mode hdmi3D, but wasn't + * requested. + */ + NvBool hdmi3DAvailable; +}; + +struct NvKmsValidateModeIndexParams { + struct NvKmsValidateModeIndexRequest request; /*! in */ + struct NvKmsValidateModeIndexReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_VALIDATE_MODE: Validate an individual mode for the + * specified dpy. + * + * Given the validation parameters, NVKMS will test whether the given + * mode is currently valid for the specified dpy. + * + * If the mode is valid, then reply::valid will be TRUE. Otherwise, + * reply::valid will be FALSE. In either case, reply::infoString[] + * will contain a description of what happened during mode validation. + */ + +struct NvKmsValidateModeRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + struct NvKmsModeValidationParams modeValidation; + struct NvKmsMode mode; + + /* + * Pointer to a string of size 'infoStringSize'. + * Use nvKmsPointerToNvU64() to assign pInfoString. + * The maximum size allowed is + * NVKMS_MODE_VALIDATION_MAX_INFO_STRING_LENGTH. + */ + NvU32 infoStringSize; + NvU64 pInfoString NV_ALIGN_BYTES(8); +}; + +struct NvKmsValidateModeReply { + NvBool valid; + + /*! The validSyncs used by NVKMS when validating the mode. */ + struct NvKmsModeValidationValidSyncs validSyncs; + + /* The number of bytes written to 'pInfoString' (from the request) */ + NvU32 infoStringLenWritten; + + /*! + * These are the usage bounds that may be possible with this mode, + * assuming that only one head is active. For actual usage bounds, + * see guaranteedUsage and possibleUsage returned in + * NvKmsSetModeOneHeadReply. + */ + struct NvKmsUsageBounds modeUsage; +}; + +struct NvKmsValidateModeParams { + struct NvKmsValidateModeRequest request; /*! in */ + struct NvKmsValidateModeReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_MODE: Perform a modeset. + * + * NvKmsSetModeRequest can describe the modetiming configuration + * across all heads of all disps within the SLI device. + * + * The elements in NvKmsSetModeRequest::disp[] correspond to the disps + * returned in NvKmsAllocDeviceReply::dispHandles[]. + * + * To only touch certain heads and disps, use the + * requestedHeadsBitMask and requestedDispsBitMask fields to limit + * which array elements are honored. + * + * If the request is invalid, one or more of the + * NvKmsSetMode{,OneDisp,OneHead}Reply::status fields will have a + * non-SUCCESS value. If the mode set completed successfully, then + * all {NvKmsSetMode{,OneDisp,OneHead}Reply::status fields should be + * SUCCESS. + */ + +struct NvKmsSetModeHeadSurfaceParams { + NvBool forceCompositionPipeline; + NvBool forceFullCompositionPipeline; + NvBool fakeOverlay; + NvBool blendAfterWarp; + NvBool transformSpecified; + + /* Reflect the image along the X axis. */ + NvBool reflectionX; + + /* Reflect the image along the Y axis. */ + NvBool reflectionY; + + /* + * Rotate the image counter-clockwise in 90 degree increments. + * + * Reflection (specified above by ::reflection[XY]) is applied + * before rotation. This matches the semantics of RandR. From: + * + * https://cgit.freedesktop.org/xorg/proto/randrproto/tree/randrproto.txt + * + * "Rotation and reflection and how they interact can be confusing. In + * Randr, the coordinate system is rotated in a counter-clockwise direction + * relative to the normal orientation. Reflection is along the window system + * coordinate system, not the physical screen X and Y axis, so that rotation + * and reflection do not interact. The other way to consider reflection is + * to is specified in the 'normal' orientation, before rotation, if you find + * the other way confusing." + */ + enum NvKmsRotation rotation; + enum NvKmsPixelShiftMode pixelShift; + enum NvKmsResamplingMethod resamplingMethod; + struct NvKmsMatrix transform; /* Only honored if transformSpecified. */ + + NvKmsSurfaceHandle blendTexSurfaceHandle; + NvKmsSurfaceHandle offsetTexSurfaceHandle; + + /* + * When warpMesh::surfaceHandle is non-zero, it indicates a surface + * containing warp mesh vertex data. The surface should: + * + * - Have a width multiple of 1024 pixels. + * - Have a depth of 32. + * - Contain a binary representation of a list of six-component + * vertices. Each of these components is a 32-bit floating point value. + * + * The X, Y components should contain normalized vertex coordinates, to be + * rendered as a triangle list or strip. The X and Y components' [0,1] + * range map to the head's ViewportOut X and Y, respectively. + * + * The U, V, R, and Q components should contain normalized, projective + * texture coordinates: + * + * U, V: 2D texture coordinate. U and V components' [0,1] range maps to the + * display's MetaMode ViewportIn X and Y, respectively. + * + * R: unused + * + * Q: Used for interpolation purposes. This is typically the third + * component of the result of a multiplication by a 3x3 projective transform + * matrix. + * + * warpMesh::vertexCount should contain the amount of vertices stored in the + * surface. + * + * warpMesh::dataType indicates if the vertices describe a triangle list or + * a triangle strip. A triangle list must have a vertexCount that is a + * multiple of 3. + */ + struct { + NvKmsSurfaceHandle surfaceHandle; + NvU32 vertexCount; + enum NvKmsWarpMeshDataType dataType; + } warpMesh; +}; + +#define NVKMS_VRR_MIN_REFRESH_RATE_MAX_VARIANCE 10 // 10hz + +enum NvKmsAllowAdaptiveSync { + NVKMS_ALLOW_ADAPTIVE_SYNC_DISABLED = 0, + NVKMS_ALLOW_ADAPTIVE_SYNC_DEFAULTLISTED_ONLY, + NVKMS_ALLOW_ADAPTIVE_SYNC_ALL, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE attribute. */ +enum NvKmsDpyAttributeRequestedColorSpaceValue { + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_RGB = 0, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr422 = 1, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr444 = 2, +}; + +/*! + * Values for the NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_RANGE and + * NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_RANGE attributes. + */ +enum NvKmsDpyAttributeColorRangeValue { + NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL = 0, + NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED = 1, +}; + +struct NvKmsSetModeOneHeadRequest { + /*! + * The list of dpys to drive with this head; or, empty to disable + * the head. + */ + NVDpyIdList dpyIdList; + + /*! The modetimings to set on the head. */ + struct NvKmsMode mode; + + /*! The above mode will be validated, using these validation parameters. */ + struct NvKmsModeValidationParams modeValidationParams; + + /*! + * The region within the raster timings that should contain an image. + * This is only used when viewPortOutSpecified is TRUE. Otherwise, the + * viewPortOut is inferred from the raster timings. + * + * For YUV420 modes, the viewPortOut should be in "full" + * horizontal space. See the explanation in NvKmsMode. + */ + struct NvKmsRect viewPortOut; + + /*! + * The size, in pixels, that the head will fetch from any surface + * it scans from. The viewPortPointIn is specified in + * NvKmsFlipCommonParams. + * + * For YUV420 modes, the viewPortSizeIn should be in "half" + * horizontal space. See the explanation in NvKmsMode. + */ + struct NvKmsSize viewPortSizeIn; + + /*! + * Describe the surfaces to present on this head. + */ + struct NvKmsFlipCommonParams flip; + + /*! + * The headSurface configuration requested, if any. + */ + struct NvKmsSetModeHeadSurfaceParams headSurface; + + NvBool viewPortOutSpecified; /*! Whether to use viewPortOut. */ + + /*! + * Allow G-SYNC to be enabled on this head if it is supported by the GPU + * and monitor. + */ + NvBool allowGsync; + + /*! + * Whether to allow Adaptive-Sync to be enabled on this head if it is + * supported by the GPU: + * + * NVKMS_ALLOW_ADAPTIVE_SYNC_ALL: + * VRR is enabled as long as this monitor supports Adaptive-Sync. + * + * NVKMS_ALLOW_ADAPTIVE_SYNC_DEFAULTLISTED_ONLY: + * VRR is only enabled on this head if the monitor is on the + * Adaptive-Sync defaultlist. + * + * NVKMS_ALLOW_ADAPTIVE_SYNC_DISABLED: + * VRR is forced to be disabled if this is an Adaptive-Sync monitor. + */ + enum NvKmsAllowAdaptiveSync allowAdaptiveSync; + + /*! + * Override the minimum refresh rate for VRR monitors specified by the + * EDID (0 to not override the EDID-provided value). Clamped at modeset + * time to within NVKMS_VRR_MIN_REFRESH_RATE_MAX_VARIANCE of the + * EDID-specified minimum refresh rate, as long as the minimum is no + * lower than 1hz and the maximum does not exceed the maximum refresh rate + * defined by the mode timings. The current minimum refresh rate and this + * valid range are exposed through + * NV_KMS_DPY_ATTRIBUTE_VRR_MIN_REFRESH_RATE. + * + * Does not affect G-SYNC monitors, which do not have a minimum refresh + * rate. + */ + NvU32 vrrOverrideMinRefreshRate; + + /*! + * Output colorspace. Valid only when colorSpaceSpecified is true. + */ + enum NvKmsDpyAttributeRequestedColorSpaceValue colorSpace; + NvBool colorSpaceSpecified; + + /*! + * Output color bpc. Valid only when colorBpcSpecified is true. + */ + enum NvKmsDpyAttributeColorBpcValue colorBpc; + NvBool colorBpcSpecified; + + /*! + * Output color range. Valid only when colorRangeSpecified is true. + */ + enum NvKmsDpyAttributeColorRangeValue colorRange; + NvBool colorRangeSpecified; +}; + +struct NvKmsSetModeOneDispRequest { + /*! + * The bit mask of which head[] elements to look at on this disp; + * any other head will use its existing configuration. + */ + NvU32 requestedHeadsBitMask; + struct NvKmsSetModeOneHeadRequest head[NVKMS_MAX_HEADS_PER_DISP]; +}; + +struct NvKmsSetModeRequest { + NvKmsDeviceHandle deviceHandle; + + /*! + * When a modeset request is made, NVKMS will first perform + * validation to confirm whether the request can be satisfied. If + * the requested configuration cannot be fulfilled, the request + * returns FALSE. + * + * Only the modeset owner can issue a modeset with commit set to TRUE. + * + * If 'commit' is FALSE, then the status of validation will be returned. + * + * If 'commit' is TRUE, and validation passes, then NVKMS will + * apply the requested configuration. + */ + NvBool commit; + + /*! + * The bitmask of which indices within disp[] describe requested + * configuration changes. Any other disps will use their existing + * configuration. + */ + NvU32 requestedDispsBitMask; + + /* + * disp[n] corresponds to the disp named by + * NvKmsAllocDeviceReply::dispHandles[n]. + */ + struct NvKmsSetModeOneDispRequest disp[NVKMS_MAX_SUBDEVICES]; + + /*! + * Whether to use NVKMS's builtin headSurface support when necessary. + * + * XXX NVKMS HEADSURFACE TODO: Make this the default and remove this field. + */ + NvBool allowHeadSurfaceInNvKms; +}; + +enum NvKmsSetModeOneHeadStatus { + NVKMS_SET_MODE_ONE_HEAD_STATUS_SUCCESS = 0, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_MODE = 1, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_DPY = 2, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_CURSOR_IMAGE = 3, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_CURSOR_POSITION = 4, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_LUT = 5, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_FLIP = 6, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_PERMISSIONS = 7, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_HEAD_SURFACE = 8, + NVKMS_SET_MODE_ONE_HEAD_STATUS_UNSUPPORTED_HEAD_SURFACE_COMBO = 9, +}; + +struct NvKmsSetModeOneHeadReply { + /*! + * When the NVKMS_IOCTL_SET_MODE succeeds, then this will be SUCCESS. + * Otherwise, 'status' will be a non-SUCCESS value for one or more + * heads and/or one or more disps. + * + * Note that a failure could occur for a preexisting head + * configuration, so this status could be != SUCCESS for a head + * not listed in NvKmsSetModeOneDispRequest::requestedHeadsBitMask. + */ + enum NvKmsSetModeOneHeadStatus status; + + NvU32 hwHead; + + /*! + * The usage bounds that may be possible on this head based on the ISO + * BW at that point. + * + * If a flip request is within the bounds of NvKmsSetModeOneHeadReply:: + * guaranteedUsage, then clients can expect the flip to succeed. + * If a flip request is beyond the bounds of NvKmsSetModeOneHeadReply:: + * guaranteedUsage but within NvKmsSetModeOneHeadReply::possibleUsage, + * then the request may legitimately fail due to insufficient display + * bandwidth and clients need to be prepared to handle that flip + * request failure. + */ + struct NvKmsUsageBounds possibleUsage; + + /*! + * The guaranteed usage bounds usable on this head. + */ + struct NvKmsUsageBounds guaranteedUsage; + + /*! + * Whether NVKMS chose to use headSurface on this head. + */ + NvBool usingHeadSurface; + + /*! + * Whether NVKMS enabled VRR on this head. + */ + NvBool vrrEnabled; + + /*! + * Contains the 'postSyncObject' that the client requested via + * NvKmsSetModeOneHeadRequest::flip. + */ + struct NvKmsFlipCommonReplyOneHead flipReply; +}; + +enum NvKmsSetModeOneDispStatus { + NVKMS_SET_MODE_ONE_DISP_STATUS_SUCCESS = 0, + NVKMS_SET_MODE_ONE_DISP_STATUS_INVALID_REQUESTED_HEADS_BITMASK = 1, + NVKMS_SET_MODE_ONE_DISP_STATUS_FAILED_EXTENDED_GPU_CAPABILITIES_CHECK = 2, + NVKMS_SET_MODE_ONE_DISP_STATUS_FAILED_DISPLAY_PORT_BANDWIDTH_CHECK = 3, + NVKMS_SET_MODE_ONE_DISP_STATUS_INCOMPATIBLE_DPYS = 4, + NVKMS_SET_MODE_ONE_DISP_STATUS_DUPLICATE_DPYS = 5, + NVKMS_SET_MODE_ONE_DISP_STATUS_FAILED_TO_ASSIGN_HARDWARE_HEADS = 6, +}; + +struct NvKmsSetModeOneDispReply { + /*! + * When the NVKMS_IOCTL_SET_MODE succeeds, then this will be SUCCESS. + * Otherwise, 'status' will be a non-SUCCESS value for one or more + * heads and/or one or more disps. + * + * Note that a failure could occur for a preexisting disp + * configuration, so this status could be != SUCCESS for a disp + * not listed in NvKmsSetModeRequest::requestedDispsBitMask. + */ + enum NvKmsSetModeOneDispStatus status; + struct NvKmsSetModeOneHeadReply head[NVKMS_MAX_HEADS_PER_DISP]; +}; + +enum NvKmsSetModeStatus { + NVKMS_SET_MODE_STATUS_SUCCESS = 0, + NVKMS_SET_MODE_STATUS_INVALID_REQUESTED_DISPS_BITMASK = 1, + NVKMS_SET_MODE_STATUS_NOT_MODESET_OWNER = 2, +}; + +struct NvKmsSetModeReply { + enum NvKmsSetModeStatus status; + struct NvKmsSetModeOneDispReply disp[NVKMS_MAX_SUBDEVICES]; +}; + +struct NvKmsSetModeParams { + struct NvKmsSetModeRequest request; /*! in */ + struct NvKmsSetModeReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_CURSOR_IMAGE: Set the cursor image for the + * specified head. + */ + +struct NvKmsSetCursorImageRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvU32 head; + + struct NvKmsSetCursorImageCommonParams common; +}; + +struct NvKmsSetCursorImageReply { + NvU32 padding; +}; + +struct NvKmsSetCursorImageParams { + struct NvKmsSetCursorImageRequest request; /*! in */ + struct NvKmsSetCursorImageReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_MOVE_CURSOR: Set the cursor position for the specified + * head. + * + * x,y are relative to the current viewPortIn configured on the head. + */ + +struct NvKmsMoveCursorRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvU32 head; + + struct NvKmsMoveCursorCommonParams common; +}; + +struct NvKmsMoveCursorReply { + NvU32 padding; +}; + +struct NvKmsMoveCursorParams { + struct NvKmsMoveCursorRequest request; /*! in */ + struct NvKmsMoveCursorReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_LUT: Set the LUT contents for the specified head. + */ + +struct NvKmsSetLutRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvU32 head; + + struct NvKmsSetLutCommonParams common; +}; + +struct NvKmsSetLutReply { + NvU32 padding; +}; + +struct NvKmsSetLutParams { + struct NvKmsSetLutRequest request; /*! in */ + struct NvKmsSetLutReply reply; /*! out */ +}; + +/*! + * NVKMS_IOCTL_CHECK_LUT_NOTIFIER: Check or wait on the LUT notifier for the + * specified apiHead. + */ + +struct NvKmsCheckLutNotifierRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvU32 head; + + NvBool waitForCompletion; +}; + +struct NvKmsCheckLutNotifierReply { + NvBool complete; +}; + +struct NvKmsCheckLutNotifierParams { + struct NvKmsCheckLutNotifierRequest request; /*! in */ + struct NvKmsCheckLutNotifierReply reply; /*! out */ +}; + +/*! + * NVKMS_IOCTL_IDLE_BASE_CHANNEL: Wait for the base channel to be idle on + * the requested heads on the requested subdevices of a device. + * + * Each (head,sd) pair to be idled is described by: + * + * subDevicesPerHead[head] |= NVBIT(sd) + */ + +struct NvKmsIdleBaseChannelRequest { + NvKmsDeviceHandle deviceHandle; + NvU32 subDevicesPerHead[NVKMS_MAX_HEADS_PER_DISP]; +}; + +struct NvKmsIdleBaseChannelReply { + /*! + * If stopping the base channel is necessary due to a timeout, (head,sd) + * pairs will be described with: + * + * stopSubDevicesPerHead[head] |= NVBIT(sd) + * + * indicating that semaphore releases from the stalled channels may not have + * occurred. + */ + NvU32 stopSubDevicesPerHead[NVKMS_MAX_HEADS_PER_DISP]; +}; + +struct NvKmsIdleBaseChannelParams { + struct NvKmsIdleBaseChannelRequest request; /*! in */ + struct NvKmsIdleBaseChannelReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_FLIP: Flip one or more heads on the subdevices of a device. + * + * At least one head must be specified in a flip request, and at most + * NV_MAX_FLIP_REQUEST_HEADS may be specified. + */ + +struct NvKmsFlipRequestOneHead { + NvU32 sd; + NvU32 head; + struct NvKmsFlipCommonParams flip; +}; + +#define NV_MAX_FLIP_REQUEST_HEADS (NV_MAX_SUBDEVICES * NV_MAX_HEADS) + +struct NvKmsFlipRequest { + NvKmsDeviceHandle deviceHandle; + + /* Pointer to an array of length 'numFlipHeads'; each entry in the array is + * of type 'struct NvKmsFlipRequestOneHead'. */ + NvU64 pFlipHead NV_ALIGN_BYTES(8); + NvU32 numFlipHeads; + + /*! + * When a flip request is made, NVKMS will first perform + * validation to confirm whether the request can be satisfied. If + * the requested configuration cannot be fulfilled, the request + * returns FALSE. + * + * If 'commit' is FALSE, then the status of validation will be returned. + * + * If 'commit' is TRUE, and validation passes, then NVKMS will + * apply the requested configuration. + */ + NvBool commit; + +}; + +enum NvKmsVrrFlipType { + NV_KMS_VRR_FLIP_NON_VRR = 0, + NV_KMS_VRR_FLIP_GSYNC, + NV_KMS_VRR_FLIP_ADAPTIVE_SYNC, +}; + +struct NvKmsFlipReply { + /*! + * If vrrFlipType != NV_KMS_VRR_FLIP_NON_VRR, then VRR was used for the + * requested flip. In this case, vrrSemaphoreIndex indicates the index + * into the VRR semaphore surface that the client should release to + * trigger the flip. + * + * A value of -1 indicates that no VRR semaphore release is needed. + */ + NvS32 vrrSemaphoreIndex; + + /*! + * Indicates whether the flip was non-VRR, was a VRR flip on one or more + * G-SYNC displays, or was a VRR flip exclusively on Adaptive-Sync + * displays. + */ + enum NvKmsVrrFlipType vrrFlipType; + + /*! + * Indicates either success or the reason the flip request failed. + */ + enum NvKmsFlipResult flipResult; + + /*! + * Entries correspond to the heads specified in + * NvKmsFlipRequest::pFlipHead, in the same order. + */ + struct NvKmsFlipCommonReplyOneHead flipHead[NV_MAX_FLIP_REQUEST_HEADS]; +}; + +struct NvKmsFlipParams { + struct NvKmsFlipRequest request; /*! in */ + struct NvKmsFlipReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_DECLARE_DYNAMIC_DPY_INTEREST: "Dynamic dpy" reference + * counting. + * + * Most dpys have a lifetime equal to the NVKMS device. However, some + * dpys are dynamic and are created and destroyed in response to + * getting connected or disconnected. DisplayPort MST dpys are dynamic dpys. + * + * When a dynamic dpy is disconnected, its NVDpyId will be freed and + * made available for use by dynamic dpys connected later, unless any + * client has declared "interest" in the NVDpyId. The dynamic NVDpyId + * will persist as long as a client has declared interest on it, and + * will be reused for newly connected monitors at the same dynamic dpy + * address (port address, in the case of DP MST dynamic dpys). + * + * The 'interest' field selects interest in the dynamic dpy. + * + * If the dynamic dpy has already been disconnected (and therefore + * removed) before the client has declared interest in it, this ioctl + * will fail. + * + * The recommended usage pattern is: + * + * - Declare interest in the event types: + * NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED + * NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED + * + * - When a DYNAMIC_DPY_CONNECTED event is received, call + * NVKMS_IOCTL_DECLARE_DYNAMIC_DPY_INTEREST + * to declare interest on the dpy. Be sure to check the return + * value, in case the dynamic dpy was already removed. Update any + * client bookkeeping, to start tracking the dpy. + * + * - When a DYNAMIC_DPY_DISCONNECTED event is received, update any + * client bookkeeping, to stop tracking this dynamic dpy. Call + * NVKMS_IOCTL_DECLARE_DYNAMIC_DPY_INTEREST + * to remove interest on the dpy. + */ + +struct NvKmsDeclareDynamicDpyInterestRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + NvBool interest; +}; + +struct NvKmsDeclareDynamicDpyInterestReply { + NvU32 padding; +}; + +struct NvKmsDeclareDynamicDpyInterestParams { + struct NvKmsDeclareDynamicDpyInterestRequest request; /*! in */ + struct NvKmsDeclareDynamicDpyInterestReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_{,UN}REGISTER_SURFACE: Register and unregister an + * RM-allocated surface with NVKMS. + * + * A surface must be registered with NVKMS before NVKMS can display + * it. Note that NVKMS will create its own RM object for the registered + * surface. The surface will not be freed by resman until the surface + * is unregistered by the client. + */ + +struct NvKmsRegisterSurfaceRequest { + NvKmsDeviceHandle deviceHandle; + + /*! + * Surfaces can be specified either by file descriptor or by + * (rmClient, rmObject) tuple. useFd indicates which is specified + * in this request. Userspace clients are required to specify surface by + * file descriptor. + */ + NvBool useFd; + + /*! + * The RM client handle that was used to allocate the surface. + * NVKMS will use this as the hClientSrc argument to + * NvRmDupObject(). Only used when useFd is FALSE. + */ + NvU32 rmClient; + + /* + * For multi-plane formats, clients are free to use one memory allocation + * for all planes, or a separate memory allocation per plane: + * - For the first usecase, 'rmObject'/'fd' and 'rmObjectSizeInBytes' + * should be the same for all planes, and each plane should have a + * different 'offset'. + * - For the second usecase, 'rmObject'/'fd' should be different for each + * plane. + * + * The 'planes' array is indexed as follows: + * - For RGB and YUV packed formats, 'planes[0]' refers to the single plane + * that's used for these formats. + * - For YUV semi-planar formats, 'planes[0]' refers to the Y-plane and + * 'planes[1]' refers to the UV-plane. + * - For YUV planar formats, 'planes[0]' refers to the Y-plane, 'planes[1]' + * refers to the U plane, and 'planes[2]' refers to the V plane. + */ + struct { + + union { + NvU32 rmObject; /* RM memory handle */ + NvS32 fd; /* file descriptor describing memory */ + } u; + + /* + * This byte offset will be added to the base address of the RM memory + * allocation, and determines the starting address of this plane within + * that allocation. This offset must be 1KB-aligned. + */ + NvU64 offset NV_ALIGN_BYTES(8); + + /* + * If the surface layout is NvKmsSurfaceMemoryLayoutPitch, then + * 'pitch' should be the pitch of this plane in bytes, and must + * have an alignment of 256 bytes. If the surface layout is + * NvKmsSurfaceMemoryLayoutBlockLinear, then 'pitch' should be the + * pitch of this plane in _blocks_. Blocks are always 64 bytes + * wide. + */ + NvU32 pitch; + + /* + * This is the size of the entire RM memory allocation pointed to by + * rmObject or fd prior to taking the offset into account. This is + * _not_ always the size of this plane since a single RM memory + * allocation can contain multiple planes, and we're also not taking + * the offset into account. + */ + NvU64 rmObjectSizeInBytes NV_ALIGN_BYTES(8); + } planes[NVKMS_MAX_PLANES_PER_SURFACE]; + + NvU32 widthInPixels; + NvU32 heightInPixels; + + enum NvKmsSurfaceMemoryLayout layout; + enum NvKmsSurfaceMemoryFormat format; + + NvBool noDisplayHardwareAccess; + + /* + * This flag should be set if the surface can potentially be updated + * directly on the screen after the flip. For example, this is the case + * if the surface is CPU mapped, accessible by more than one GPU, or in + * a similar situation. If this flag is set NVKMS knows not to consider + * the surface content cacheable between flips. + */ + NvBool noDisplayCaching; + + /* + * If isoType == NVKMS_MEMORY_NISO, NVKMS will create CPU and GPU mappings + * for the surface memory. + */ + NvKmsMemoryIsoType isoType; + + NvU32 log2GobsPerBlockY; +}; + +struct NvKmsRegisterSurfaceReply { + NvKmsSurfaceHandle surfaceHandle; +}; + +struct NvKmsRegisterSurfaceParams { + struct NvKmsRegisterSurfaceRequest request; /*! in */ + struct NvKmsRegisterSurfaceReply reply; /*! out */ +}; + +struct NvKmsUnregisterSurfaceRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSurfaceHandle surfaceHandle; + /* + * Normally, when a surface is unregistered, nvkms will sync any + * outstanding flips to ensure the surface is no longer referenced by + * display hardware before being torn down. + * + * To improve performance with GSP firmware, when checking if this sync is + * necessary a trusted kernel-mode client who knows it is safe to do so + * may indicate to nvkms that the sync is unneeded. + */ + NvBool skipSync; +}; + +struct NvKmsUnregisterSurfaceReply { + NvU32 padding; +}; + +struct NvKmsUnregisterSurfaceParams { + struct NvKmsUnregisterSurfaceRequest request; /*! in */ + struct NvKmsUnregisterSurfaceReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_GRANT_SURFACE: + * NVKMS_IOCTL_ACQUIRE_SURFACE: + * NVKMS_IOCTL_RELEASE_SURFACE: + * + * An NVKMS client can "grant" a registered surface to another NVKMS + * client through the following steps: + * + * - The granting NVKMS client should open /dev/nvidia-modeset, and + * call NVKMS_IOCTL_GRANT_SURFACE to associate an NvKmsSurfaceHandle + * with the file descriptor. + * + * - The granting NVKMS client should pass the file descriptor over a + * UNIX domain socket to one or more clients who should acquire the + * surface. + * + * - The granting NVKMS client can optionally close the file + * descriptor now or later. + * + * - Each acquiring client should call NVKMS_IOCTL_ACQUIRE_SURFACE, + * and pass in the file descriptor it received. This returns an + * NvKmsSurfaceHandle that the acquiring client can use to refer to + * the surface in any other NVKMS API call that takes an + * NvKmsSurfaceHandle. + * + * - The acquiring clients can optionally close the file descriptor + * now or later. + * + * - Each acquiring client should call NVKMS_IOCTL_RELEASE_SURFACE to + * release it when they are done with the surface. + * + * - When the granting client unregisters the surface, it is + * "orphaned": NVKMS will flip away from the surface if necessary, + * the RM surface allocation is unduped, and the surface is + * unregistered from EVO. But, the acquiring clients will continue + * to hold a reference to this orphaned surface until they release + * it. + * + * Notes: + * + * - It is an error to call NVKMS_IOCTL_GRANT_SURFACE more than once + * on a /dev/nvidia-modeset file descriptor, or to use a file + * descriptor other than one created by opening /dev/nvidia-modeset, + * or to use a file descriptor that was previously used as the first + * argument to ioctl(2). + * + * - The special handling of surfaces when the granting client + * unregisters the surface might be a little asymmetric. However, + * this strikes a balance between: + * + * (a) Making sure modesetting NVKMS clients can free memory when + * they intend to. + * + * (b) Making sure acquiring clients don't get a stale view of their + * surface handle namespace: if the surface were completely + * unregistered out from under them, the surface handle could be + * recycled without them knowing. If they later attempted to + * release the original surface, they could inadvertently release a + * different surface that happened to have the recycled handle. + * + * - Do we need an NVKMS_IOCTL_REVOKE_SURFACE? Or is the + * automatic-unregistration-in-acquiring-clients behavior + * sufficient? + */ + +struct NvKmsGrantSurfaceRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSurfaceHandle surfaceHandle; + int fd; +}; + +struct NvKmsGrantSurfaceReply { + NvU32 padding; +}; + +struct NvKmsGrantSurfaceParams { + struct NvKmsGrantSurfaceRequest request; /*! in */ + struct NvKmsGrantSurfaceReply reply; /*! out */ +}; + +struct NvKmsAcquireSurfaceRequest { + int fd; +}; + +struct NvKmsAcquireSurfaceReply { + NvKmsDeviceHandle deviceHandle; + NvKmsSurfaceHandle surfaceHandle; +}; + +struct NvKmsAcquireSurfaceParams { + struct NvKmsAcquireSurfaceRequest request; /*! in */ + struct NvKmsAcquireSurfaceReply reply; /*! out */ +}; + +struct NvKmsReleaseSurfaceRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSurfaceHandle surfaceHandle; +}; + +struct NvKmsReleaseSurfaceReply { + NvU32 padding; +}; + +struct NvKmsReleaseSurfaceParams { + struct NvKmsReleaseSurfaceRequest request; /*! in */ + struct NvKmsReleaseSurfaceReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_DPY_ATTRIBUTE: + * NVKMS_IOCTL_GET_DPY_ATTRIBUTE: + * NVKMS_IOCTL_GET_DPY_ATTRIBUTE_VALID_VALUES: + * + * Dpys have several attributes that can be queried and set. + * + * An attribute has a type (defined by NvKmsAttributeType), read/write + * permissions, and potentially other descriptions of its valid + * values. Use NVKMS_IOCTL_GET_DPY_ATTRIBUTE_VALID_VALUES to get the + * valid values of an attribute. + */ + +enum NvKmsAttributeType { + NV_KMS_ATTRIBUTE_TYPE_INTEGER = 0, + NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + NV_KMS_ATTRIBUTE_TYPE_INTBITS, + NV_KMS_ATTRIBUTE_TYPE_RANGE, + NV_KMS_ATTRIBUTE_TYPE_BITMASK, + NV_KMS_ATTRIBUTE_TYPE_DPY_ID, + NV_KMS_ATTRIBUTE_TYPE_DPY_ID_LIST, +}; + +enum NvKmsDpyAttribute { + NV_KMS_DPY_ATTRIBUTE_BACKLIGHT_BRIGHTNESS = 0, + NV_KMS_DPY_ATTRIBUTE_SCANLINE, + NV_KMS_DPY_ATTRIBUTE_HW_HEAD, + NV_KMS_DPY_ATTRIBUTE_HEAD, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_VIBRANCE, + NV_KMS_DPY_ATTRIBUTE_IMAGE_SHARPENING, + NV_KMS_DPY_ATTRIBUTE_IMAGE_SHARPENING_AVAILABLE, + NV_KMS_DPY_ATTRIBUTE_IMAGE_SHARPENING_DEFAULT, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_RANGE, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_RANGE, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_LINK_RATE, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_LINK_RATE_10MHZ, + NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG, + /* + * XXX NVKMS TODO: Delete UPDATE_GLS_FRAMELOCK; this event-only + * attribute is a kludge to tell GLS about a change in framelock + * configuration made by NVKMS. Eventually, NVKMS should manage + * framelock itself and GLS shouldn't need to be notified. + * + * Note that the event data reports two boolean values: enable + * (bit 0) and server (bit 1). + */ + NV_KMS_DPY_ATTRIBUTE_UPDATE_GLS_FRAMELOCK, + NV_KMS_DPY_ATTRIBUTE_RASTER_LOCK, + NV_KMS_DPY_ATTRIBUTE_UPDATE_FLIPLOCK, + NV_KMS_DPY_ATTRIBUTE_UPDATE_STEREO, + NV_KMS_DPY_ATTRIBUTE_DPMS, + NV_KMS_DPY_ATTRIBUTE_VRR_MIN_REFRESH_RATE, + + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_IS_MULTISTREAM, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_SINK_IS_AUDIO_CAPABLE, + + NV_KMS_DPY_ATTRIBUTE_NUMBER_OF_HARDWARE_HEADS_USED, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING attribute. */ +enum NvKmsDpyAttributeRequestedDitheringValue { + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_AUTO = 0, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_ENABLED = 1, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DISABLED = 2, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE attribute. */ +enum NvKmsDpyAttributeRequestedDitheringModeValue { + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_AUTO = 0, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_DYNAMIC_2X2 = 1, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_STATIC_2X2 = 2, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_TEMPORAL = 3, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE attribute. */ +enum NvKmsDpyAttributeCurrentDitheringModeValue { + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_NONE = 0, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_DYNAMIC_2X2 = 1, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_STATIC_2X2 = 2, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_TEMPORAL = 3, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH attribute. */ +enum NvKmsDpyAttributeRequestedDitheringDepthValue { + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_AUTO = 0, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_6_BITS = 1, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_8_BITS = 2, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH attribute. */ +enum NvKmsDpyAttributeCurrentDitheringDepthValue { + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_NONE = 0, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_6_BITS = 1, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_8_BITS = 2, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE attribute. */ +enum NvKmsDpyAttributeCurrentColorSpaceValue { + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB = 0, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422 = 1, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444 = 2, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420 = 3, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL attribute. */ +enum NvKmsDpyAttributeDigitalSignalValue { + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_LVDS = 0, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_TMDS = 1, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_DISPLAYPORT = 2, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_HDMI_FRL = 3, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_DSI = 4, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE attribute. */ +enum NvKmsDpyAttributeDigitalLinkTypeValue { + NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_SINGLE = 0, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_DUAL = 1, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_QUAD = 3, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG attribute. */ +enum NvKmsDpyAttributeFrameLockDisplayConfigValue { + NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_DISABLED = 0, + NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_CLIENT = 1, + NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_SERVER = 2, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_DPMS attribute. */ +enum NvKmsDpyAttributeDpmsValue { + NV_KMS_DPY_ATTRIBUTE_DPMS_ON, + NV_KMS_DPY_ATTRIBUTE_DPMS_STANDBY, + NV_KMS_DPY_ATTRIBUTE_DPMS_SUSPEND, + NV_KMS_DPY_ATTRIBUTE_DPMS_OFF, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE attribute */ +enum NvKmsDpyAttributeDisplayportConnectorTypeValue { + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_UNKNOWN = 0, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_DISPLAYPORT, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_HDMI, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_DVI, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_VGA, +}; + +struct NvKmsSetDpyAttributeRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + enum NvKmsDpyAttribute attribute; + NvS64 value NV_ALIGN_BYTES(8); +}; + +struct NvKmsSetDpyAttributeReply { + NvU32 padding; +}; + +struct NvKmsSetDpyAttributeParams { + struct NvKmsSetDpyAttributeRequest request; /*! in */ + struct NvKmsSetDpyAttributeReply reply; /*! out */ +}; + + +struct NvKmsGetDpyAttributeRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + enum NvKmsDpyAttribute attribute; +}; + +struct NvKmsGetDpyAttributeReply { + NvS64 value NV_ALIGN_BYTES(8); +}; + +struct NvKmsGetDpyAttributeParams { + struct NvKmsGetDpyAttributeRequest request; /*! in */ + struct NvKmsGetDpyAttributeReply reply; /*! out */ +}; + + +struct NvKmsAttributeValidValuesCommonReply { + NvBool readable; + NvBool writable; + enum NvKmsAttributeType type; + union { + struct { + NvS64 min NV_ALIGN_BYTES(8); + NvS64 max NV_ALIGN_BYTES(8); + } range; /*! Used when type == NV_KMS_ATTRIBUTE_TYPE_RANGE. */ + struct { + NvU32 ints; + } bits; /*! Used when type == NV_KMS_ATTRIBUTE_TYPE_INTBITS. */ + } u; +}; + +struct NvKmsGetDpyAttributeValidValuesRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + enum NvKmsDpyAttribute attribute; +}; + +struct NvKmsGetDpyAttributeValidValuesReply { + struct NvKmsAttributeValidValuesCommonReply common; +}; + + +struct NvKmsGetDpyAttributeValidValuesParams { + struct NvKmsGetDpyAttributeValidValuesRequest request; /*! in */ + struct NvKmsGetDpyAttributeValidValuesReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_DISP_ATTRIBUTE: + * NVKMS_IOCTL_GET_DISP_ATTRIBUTE: + * NVKMS_IOCTL_GET_DISP_ATTRIBUTE_VALID_VALUES: + */ + + +enum NvKmsDispAttribute { + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK = 0, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_SYNC, + NV_KMS_DISP_ATTRIBUTE_GPU_FRAMELOCK_FPGA_REVISION_UNSUPPORTED, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_STEREO_SYNC, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_TIMING, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_TEST_SIGNAL, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_RESET, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_SET_SWAP_BARRIER, + NV_KMS_DISP_ATTRIBUTE_QUERY_DP_AUX_LOG, +}; + + +struct NvKmsSetDispAttributeRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + enum NvKmsDispAttribute attribute; + NvS64 value NV_ALIGN_BYTES(8); +}; + +struct NvKmsSetDispAttributeReply { + NvU32 padding; +}; + +struct NvKmsSetDispAttributeParams { + struct NvKmsSetDispAttributeRequest request; /*! in */ + struct NvKmsSetDispAttributeReply reply; /*! out */ +}; + + +struct NvKmsGetDispAttributeRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + enum NvKmsDispAttribute attribute; +}; + +struct NvKmsGetDispAttributeReply { + NvS64 value NV_ALIGN_BYTES(8); +}; + +struct NvKmsGetDispAttributeParams { + struct NvKmsGetDispAttributeRequest request; /*! in */ + struct NvKmsGetDispAttributeReply reply; /*! out */ +}; + + +struct NvKmsGetDispAttributeValidValuesRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + enum NvKmsDispAttribute attribute; +}; + +struct NvKmsGetDispAttributeValidValuesReply { + struct NvKmsAttributeValidValuesCommonReply common; +}; + +struct NvKmsGetDispAttributeValidValuesParams { + struct NvKmsGetDispAttributeValidValuesRequest request; /*! in */ + struct NvKmsGetDispAttributeValidValuesReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_QUERY_FRAMELOCK: Query information about a framelock + * device. + */ + +struct NvKmsQueryFrameLockRequest { + NvKmsFrameLockHandle frameLockHandle; +}; + +struct NvKmsQueryFrameLockReply { + NvU32 gpuIds[NVKMS_MAX_GPUS_PER_FRAMELOCK]; +}; + +struct NvKmsQueryFrameLockParams { + struct NvKmsQueryFrameLockRequest request; /*! in */ + struct NvKmsQueryFrameLockReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_FRAMELOCK_ATTRIBUTE: + * NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE: + * NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE_VALID_VALUES: + */ + +enum NvKmsFrameLockAttribute { + NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY = 0, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_DELAY, + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_INTERVAL, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_READY, + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE, + NV_KMS_FRAMELOCK_ATTRIBUTE_FPGA_REVISION, + NV_KMS_FRAMELOCK_ATTRIBUTE_FIRMWARE_MAJOR_VERSION, + NV_KMS_FRAMELOCK_ATTRIBUTE_FIRMWARE_MINOR_VERSION, + NV_KMS_FRAMELOCK_ATTRIBUTE_BOARD_ID, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_DELAY_RESOLUTION, + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT0_STATUS, + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT1_STATUS, + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_STATUS, + NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE_4, + NV_KMS_FRAMELOCK_ATTRIBUTE_INCOMING_HOUSE_SYNC_RATE, + NV_KMS_FRAMELOCK_ATTRIBUTE_MULTIPLY_DIVIDE_VALUE, + NV_KMS_FRAMELOCK_ATTRIBUTE_MULTIPLY_DIVIDE_MODE, +}; + +/*! Values for the NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY attribute. */ +enum NvKmsFrameLockAttributePolarityValue { + NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_RISING_EDGE = 0x1, + NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_FALLING_EDGE = 0x2, + NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_BOTH_EDGES = 0x3, +}; + +/*! Values for the NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE attribute. */ +enum NvKmsFrameLockAttributeHouseSyncModeValue { + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_DISABLED = 0, + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_INPUT = 0x1, + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_OUTPUT = 0x2, +}; + +/*! Values for the NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED attribute. */ +enum NvKmsFrameLockAttributeEthernetDetectedValue { + NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED_NONE = 0, + NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED_PORT0 = 0x1, + NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED_PORT1 = 0x2, +}; + +/*! Values for the NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE attribute. */ +enum NvKmsFrameLockAttributeVideoModeValue { + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_AUTO = 0, + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_TTL = 1, + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_BI_LEVEL = 2, + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_TRI_LEVEL = 3, +}; + +/*! Values for the NV_KMS_FRAMELOCK_ATTRIBUTE_PORT[01]_STATUS attributes. */ +enum NvKmsFrameLockAttributePortStatusValue { + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT_STATUS_INPUT = 0, + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT_STATUS_OUTPUT = 1, +}; + +/*! Values for the NV_KMS_FRAMELOCK_ATTRIBUTE_MULTIPLY_DIVIDE_MODE attribute. */ +enum NvKmsFrameLockAttributeMulDivModeValue { + NV_KMS_FRAMELOCK_ATTRIBUTE_MULTIPLY_DIVIDE_MODE_MULTIPLY = 0, + NV_KMS_FRAMELOCK_ATTRIBUTE_MULTIPLY_DIVIDE_MODE_DIVIDE = 1, +}; + +struct NvKmsSetFrameLockAttributeRequest { + NvKmsFrameLockHandle frameLockHandle; + enum NvKmsFrameLockAttribute attribute; + NvS64 value NV_ALIGN_BYTES(8); +}; + +struct NvKmsSetFrameLockAttributeReply { + NvU32 padding; +}; + +struct NvKmsSetFrameLockAttributeParams { + struct NvKmsSetFrameLockAttributeRequest request; /*! in */ + struct NvKmsSetFrameLockAttributeReply reply; /*! out */ +}; + + +struct NvKmsGetFrameLockAttributeRequest { + NvKmsFrameLockHandle frameLockHandle; + enum NvKmsFrameLockAttribute attribute; +}; + +struct NvKmsGetFrameLockAttributeReply { + NvS64 value NV_ALIGN_BYTES(8); +}; + +struct NvKmsGetFrameLockAttributeParams { + struct NvKmsGetFrameLockAttributeRequest request; /*! in */ + struct NvKmsGetFrameLockAttributeReply reply; /*! out */ +}; + + +struct NvKmsGetFrameLockAttributeValidValuesRequest { + NvKmsFrameLockHandle frameLockHandle; + enum NvKmsFrameLockAttribute attribute; +}; + +struct NvKmsGetFrameLockAttributeValidValuesReply { + struct NvKmsAttributeValidValuesCommonReply common; +}; + +struct NvKmsGetFrameLockAttributeValidValuesParams { + struct NvKmsGetFrameLockAttributeValidValuesRequest request; /*! in */ + struct NvKmsGetFrameLockAttributeValidValuesReply reply; /*! out */ +}; + + + +/*! + * NVKMS_IOCTL_GET_NEXT_EVENT, NVKMS_IOCTL_DECLARE_EVENT_INTEREST: + * Event handling. + * + * Clients should call NVKMS_IOCTL_DECLARE_EVENT_INTEREST to indicate + * the events in which they are interested. Then, block on poll(2) or + * select(2) until there are events available to read on the file + * descriptor. + * + * When events are available, the client should call + * NVKMS_IOCTL_GET_NEXT_EVENT to get an NvKmsEvent structure, and + * interpret the union based on eventType. + * + * Clients can remove interest for events by calling + * NVKMS_IOCTL_DECLARE_EVENT_INTEREST again, specifying a new + * interestMask. + * + * Note that there may still be events queued for the client when the + * client calls NVKMS_IOCTL_DECLARE_EVENT_INTEREST to change its + * interestMask. So, clients should be prepared to ignore unexpected + * events after calling NVKMS_IOCTL_DECLARE_EVENT_INTEREST. + */ + + + +/*! + * NVKMS_EVENT_TYPE_DPY_CHANGED + * + * When a dpy changes, this event will be generated. The client + * should call NVKMS_IOCTL_QUERY_DPY_DYNAMIC_DATA to get an updated + * NvKmsQueryDpyDynamicDataReply. + */ + +struct NvKmsEventDpyChanged { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; +}; + + +/*! + * NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED + * + * When a dynamic dpy is connected, this event will be generated. + */ + +struct NvKmsEventDynamicDpyConnected { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; +}; + + +/*! + * NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED + * + * When a dynamic dpy is disconnected, this event will be generated. + */ + +struct NvKmsEventDynamicDpyDisconnected { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; +}; + + +/*! + * NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED + * + * When a dpy attribute changes, this event will be generated. + */ + +struct NvKmsEventDpyAttributeChanged { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + enum NvKmsDpyAttribute attribute; + NvS64 value NV_ALIGN_BYTES(8); +}; + + +/*! + * NVKMS_EVENT_TYPE_FRAMELOCK_ATTRIBUTE_CHANGED + * + * When a framelock attribute changes, this event will be generated. + */ + +struct NvKmsEventFrameLockAttributeChanged { + NvKmsFrameLockHandle frameLockHandle; + enum NvKmsFrameLockAttribute attribute; + NvS64 value NV_ALIGN_BYTES(8); +}; + + +/*! + * NVKMS_EVENT_TYPE_FLIP_OCCURRED + * + * When a client requests a flip and specifies a completion notifier + * with NvKmsCompletionNotifierDescription::awaken == TRUE, this event + * will be generated. This event is only delivered to clients with + * flipping permission. + */ + +struct NvKmsEventFlipOccurred { + NvKmsDeviceHandle deviceHandle; + /* XXX NVKMS TODO: the dispHandle is currently hard-coded to 0. */ + NvKmsDispHandle dispHandle; + NvU32 head; + NvU32 layer; +}; + + +struct NvKmsEvent { + enum NvKmsEventType eventType; + union { + struct NvKmsEventDpyChanged dpyChanged; + struct NvKmsEventDynamicDpyConnected dynamicDpyConnected; + struct NvKmsEventDynamicDpyDisconnected dynamicDpyDisconnected; + struct NvKmsEventDpyAttributeChanged dpyAttributeChanged; + struct NvKmsEventFrameLockAttributeChanged frameLockAttributeChanged; + struct NvKmsEventFlipOccurred flipOccurred; + } u; +}; + + +struct NvKmsGetNextEventRequest { + NvU32 padding; +}; + +struct NvKmsGetNextEventReply { + /*! + * If an event is available, valid = TRUE and the NvKmsEvent + * contains the event. If no event is available, valid = FALSE. + */ + NvBool valid; + struct NvKmsEvent event; +}; + +struct NvKmsGetNextEventParams { + struct NvKmsGetNextEventRequest request; /*! in */ + struct NvKmsGetNextEventReply reply; /*! out */ +}; + + +struct NvKmsDeclareEventInterestRequest { + /*! + * Mask of event types, where each event type is indicated by (1 + * << NVKMS_EVENT_TYPE_). + */ + NvU32 interestMask; +}; + +struct NvKmsDeclareEventInterestReply { + NvU32 padding; +}; + +struct NvKmsDeclareEventInterestParams { + struct NvKmsDeclareEventInterestRequest request; /*! in */ + struct NvKmsDeclareEventInterestReply reply; /*! out */ +}; + +/*! + * NVKMS_IOCTL_CLEAR_UNICAST_EVENT + * + * The events generated through NVKMS_IOCTL_DECLARE_EVENT_INTEREST and + * NVKMS_IOCTL_GET_NEXT_EVENT are most useful for system-wide events which + * multiple clients may be interested in. Clients declare their interest in a + * collection of event types, and when they are notified that some number of + * events arrived, they have to query the events from the event queue. + * + * In contrast, "Unicast Events" are for use in cases where a client is only + * interested in a particular type of event on a particular object. + * + * To use a Unicast Event: + * + * - Create an fd through nvKmsOpen(). + * + * - Do _not_ use the fd for anything else (the first argument to ioctl(2), the + * fd in any of the granting APIs such as NvKmsGrantSurfaceParams::request:fd, + * etc). + * + * - Pass the fd into an API that allows a unicast event. E.g., + * NvKmsJoinSwapGroupParams::request::member::unicastEvent::fd + * + * - Clear the unicast event with NVKMS_IOCTL_CLEAR_UNICAST_EVENT. + * + * - Check if the event arrived; if it hasn't, then wait for the event through + * poll(2) or select(2). + */ + +struct NvKmsClearUnicastEventRequest { + int unicastEventFd; +}; + +struct NvKmsClearUnicastEventReply { + NvU32 padding; +}; + +struct NvKmsClearUnicastEventParams { + struct NvKmsClearUnicastEventRequest request; /*! in */ + struct NvKmsClearUnicastEventReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_LAYER_POSITION: Set the position of the layer + * for the specified heads on the specified disps. The layer + * position is in "desktop coordinate space", i.e., relative to the + * upper left corner of the input viewport. + * + * Note that this is only valid if + * NvKmsAllocDeviceReply::layerCaps[layer].supportsWindowMode is TRUE. + */ +struct NvKmsSetLayerPositionRequest { + NvKmsDeviceHandle deviceHandle; + + /*! + * The bitmask of which indices within disp[] describe requested + * configuration changes. Any other disps will use their existing + * configuration. + */ + NvU32 requestedDispsBitMask; + + struct { + /*! + * The bitmask of which head[] elements to look at on this + * disp; any other head will use its existing configuration. + */ + NvU32 requestedHeadsBitMask; + + struct { + struct NvKmsSignedPoint layerPosition[NVKMS_MAX_LAYERS_PER_HEAD]; + /*! + * The bitmask of which layerPosition[] elements to look at on this + * head; any other layer will use its existing configuration. + */ + NvU32 requestedLayerBitMask; + } head[NVKMS_MAX_HEADS_PER_DISP]; + + } disp[NVKMS_MAX_SUBDEVICES]; +}; + +struct NvKmsSetLayerPositionReply { + NvU32 padding; +}; + +struct NvKmsSetLayerPositionParams { + struct NvKmsSetLayerPositionRequest request; /*! in */ + struct NvKmsSetLayerPositionReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_GRAB_OWNERSHIP: + * NVKMS_IOCTL_RELEASE_OWNERSHIP: + * + * NVKMS_IOCTL_GRAB_OWNERSHIP notifies NVKMS that the calling client wants to + * control modesets on the device, and NVKMS_IOCTL_RELEASE_OWNERSHIP indicates + * that the modeset ownership should be released and the VT console mode + * restored. + * + * It is not necessary to call NVKMS_IOCTL_RELEASE_OWNERSHIP during shutdown; + * NVKMS will implicitly clear modeset ownership in nvKmsClose(). + * + * Releasing modeset ownership enables console hotplug handling. See the + * explanation in the comment for enableConsoleHotplugHandling above. + * + * If modeset ownership is held by nvidia-drm, then NVKMS_IOCTL_GRAB_OWNERSHIP + * will fail. Clients should open the corresponding DRM device node, acquire + * 'master' on it, and then use DRM_NVIDIA_GRANT_PERMISSIONS with permission + * type NV_DRM_PERMISSIONS_TYPE_SUB_OWNER to acquire sub-owner permission. + */ + +struct NvKmsGrabOwnershipRequest { + NvKmsDeviceHandle deviceHandle; +}; + +struct NvKmsGrabOwnershipReply { + NvU32 padding; +}; + +struct NvKmsGrabOwnershipParams { + struct NvKmsGrabOwnershipRequest request; /*! in */ + struct NvKmsGrabOwnershipReply reply; /*! out */ +}; + +struct NvKmsReleaseOwnershipRequest { + NvKmsDeviceHandle deviceHandle; +}; + +struct NvKmsReleaseOwnershipReply { + NvU32 padding; +}; + +struct NvKmsReleaseOwnershipParams { + struct NvKmsReleaseOwnershipRequest request; /*! in */ + struct NvKmsReleaseOwnershipReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_GRANT_PERMISSIONS: + * NVKMS_IOCTL_ACQUIRE_PERMISSIONS: + * NVKMS_IOCTL_REVOKE_PERMISSIONS: + * + * By default, only the modeset owning NVKMS client (the one who + * successfully called NVKMS_IOCTL_GRAB_OWNERSHIP) is allowed to flip + * or set modes. + * + * However, the modeset owner or another NVKMS client with + * NV_KMS_PERMISSIONS_TYPE_SUB_OWNER permission can grant various + * permissions to other clients through the following steps: + * + * - The modeset owner should open /dev/nvidia-modeset, and call + * NVKMS_IOCTL_GRANT_PERMISSIONS to define a set of permissions + * associated with the file descriptor. + * + * - The modeset owner should pass the file descriptor over a UNIX + * domain socket to one or more clients who should acquire these + * permissions. + * + * - The modeset owner can optionally close the file descriptor now or + * later. + * + * - The acquiring clients should call NVKMS_IOCTL_ACQUIRE_PERMISSIONS + * and pass in the file descriptor they received, to update their + * client connection to include the permissions specified by the modeset + * owner in the first bullet. + * + * - The acquiring clients can optionally close the file descriptor + * now or later. + * + * - From this point forward, both the modeset owner and the clients + * are allowed to perform the actions allowed by the granted + * permissions. + * + * - The modeset owner can optionally revoke any previously granted + * permissions with NVKMS_IOCTL_REVOKE_PERMISSIONS. This can be + * device-scope for all of a type, or just a set of permissions. + * Note that _REVOKE_PERMISSIONS to revoke a set of modeset permissions + * will cause the revoked heads to be shut down. + * + * Notes: + * + * - NvKmsPermissions::disp[n] corresponds to the disp named by + * NvKmsAllocDeviceReply::dispHandles[n]. + * + * - It is an error to call NVKMS_IOCTL_GRANT_PERMISSIONS more than + * once on a /dev/nvidia-modeset file descriptor, or to use a file + * descriptor other than one created by opening /dev/nvidia-modeset, + * or to use a file descriptor that was previously used as the first + * argument to ioctl(2). + * + * - Calling NVKMS_IOCTL_ACQUIRE_PERMISSIONS more than once on the + * same NVKMS client will cause the new permissions for that client + * to be the union of the previous permissions and the latest + * permissions being acquired. + */ + +enum NvKmsPermissionsType { + NV_KMS_PERMISSIONS_TYPE_FLIPPING = 1, + NV_KMS_PERMISSIONS_TYPE_MODESET = 2, + NV_KMS_PERMISSIONS_TYPE_SUB_OWNER = 3, +}; + +struct NvKmsFlipPermissions { + struct { + struct { + /* + * Bitmask of flippable layers, where each layer is + * indicated by '1 << layer'. It is an error for bits + * above NVKMS_MAX_LAYERS_PER_HEAD to be set. + * + * Only applicable when type==FLIPPING. + */ + NvU8 layerMask; + } head[NVKMS_MAX_HEADS_PER_DISP]; + } disp[NVKMS_MAX_SUBDEVICES]; +}; + +struct NvKmsModesetPermissions { + struct { + struct { + /* + * A list of dpys which a particular NVKMS client is + * allowed to use when performing a modeset on this head. + * + * If the NVKMS client is not allowed to set a mode on + * this head, this list will be empty. + * + * If an NVKMS client can drive the head without + * restrictions, this will be nvAllDpyIdList(). + * + * Only applicable when type==MODESET. + */ + NVDpyIdList dpyIdList; + } head[NVKMS_MAX_HEADS_PER_DISP]; + } disp[NVKMS_MAX_SUBDEVICES]; +}; + +struct NvKmsPermissions { + enum NvKmsPermissionsType type; + union { + struct NvKmsFlipPermissions flip; + struct NvKmsModesetPermissions modeset; + }; +}; + +struct NvKmsGrantPermissionsRequest { + int fd; + NvKmsDeviceHandle deviceHandle; + struct NvKmsPermissions permissions; +}; + +struct NvKmsGrantPermissionsReply { + NvU32 padding; +}; + +struct NvKmsGrantPermissionsParams { + struct NvKmsGrantPermissionsRequest request; /*! in */ + struct NvKmsGrantPermissionsReply reply; /*! out */ +}; + +struct NvKmsAcquirePermissionsRequest { + int fd; +}; + +struct NvKmsAcquirePermissionsReply { + /*! This client's handle for the device which acquired new permissions */ + NvKmsDeviceHandle deviceHandle; + + /*! + * The acquired permissions. + * + * If permissions::type == FLIPPING, the new combined flipping + * permissions of the calling client on this device, including + * prior permissions and permissions added by this operation. + */ + struct NvKmsPermissions permissions; +}; + +struct NvKmsAcquirePermissionsParams { + struct NvKmsAcquirePermissionsRequest request; /*! in */ + struct NvKmsAcquirePermissionsReply reply; /*! out */ +}; + +struct NvKmsRevokePermissionsRequest { + NvKmsDeviceHandle deviceHandle; + + /* + * A bitmask of permission types to be revoked for this device. + * It should be the bitwise 'or' of any number of + * NVBIT(NV_KMS_PERMISSIONS_TYPE_*) values. + */ + NvU32 permissionsTypeBitmask; + + /* + * If permissionsTypeBitmask is 0, instead revoke only these permissions. + */ + struct NvKmsPermissions permissions; +}; + +struct NvKmsRevokePermissionsReply { + NvU32 padding; +}; + +struct NvKmsRevokePermissionsParams { + struct NvKmsRevokePermissionsRequest request; /*! in */ + struct NvKmsRevokePermissionsReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_QUERY_DPY_CRC32 + * + * Query last CRC32 value from the NVKMS disp head specified by the triplet + * (deviceHandle, dispHandle, head). + */ + +struct NvKmsQueryDpyCRC32Request { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvU32 head; +}; + +/*! + * Generic CRC-structure type to represent CRC value obtained and if + * hardware architecture supports collection of the CRC type. If + * the CRC is not supported by hardware, its value is undefined. + */ +struct NvKmsDpyCRC32 { + /*! + * Value of the CRC. If it is not supported, value is undefined. + */ + NvU32 value; + + /*! + * Boolean which represents if hardware supports CRC collection + * If this boolean is FALSE, CRC hardware collection is not supported. + */ + NvBool supported; +}; + +/*! + * Reply structure that contains CRC32 values returned from hardware. + * Supported CRCs obtained are represented by supported boolean in crc struct + * Note- Crcs that are not supported will not be updated and will remain at 0 + */ +struct NvKmsQueryDpyCRC32Reply { + /*! + * CRC generated from the Compositor hardware + */ + struct NvKmsDpyCRC32 compositorCrc32; + + /*! + * CRC generated from the RG hardware, if head is driving RG/SF. + * Note that if Dithering is enabled, this CRC will vary across reads + * from the same frame. + */ + struct NvKmsDpyCRC32 rasterGeneratorCrc32; + + /*! + * Crc value generated from the target SF/OR depending on connector's OR type + * Note that if Dithering is enabled, this CRC will vary across reads + * from the same frame. + */ + struct NvKmsDpyCRC32 outputCrc32; + +}; + +struct NvKmsQueryDpyCRC32Params { + struct NvKmsQueryDpyCRC32Request request; /*! in */ + struct NvKmsQueryDpyCRC32Reply reply; /*! out */ +}; + +/*! + * User-space pointers are always passed to NVKMS in an NvU64. + * This user-space address is eventually passed into the platform's + * copyin/copyout functions, in a void* argument. + * + * This utility function converts from a pointer to an NvU64. + */ + +static inline NvU64 nvKmsPointerToNvU64(const void *ptr) +{ + return (NvU64)(NvUPtr)ptr; +} + + +/*! + * NVKMS_IOCTL_REGISTER_DEFERRED_REQUEST_FIFO: + * NVKMS_IOCTL_UNREGISTER_DEFERRED_REQUEST_FIFO: + * + * To make a request that is deferred until after a specific point in a client's + * graphics channel, a client should register a surface with NVKMS as a + * "deferred request fifo". The surface is interpreted as having the layout of + * struct NvKmsDeferredRequestFifo. + * + * To make deferred requests, the client should: + * + * - Write the NVKMS_DEFERRED_REQUEST_OPCODE for the desired operation to + * NvKmsDeferredRequestFifo::request[i], where 'i' is the next available + * element in the request[] array. Repeat as necessary. + * + * - Push NV906F_SEMAPHORE[ABCD] methods in its graphics channel to write + * '(i + 1) % NVKMS_MAX_DEFERRED_REQUESTS' to + * NvKmsDeferredRequestFifo::put. + * + * - Push an NV906F_NON_STALL_INTERRUPT method in its graphics channel. + * + * NVKMS will be notified of the non-stall interrupt, and scan all clients' + * deferred request fifos for put != get. NVKMS will then perform the requests + * specified in request[get] through request[put-1]. Finally, NVKMS will update + * get to indicate how much of the fifo it consumed. + * + * Wrapping behaves as expected. In pseudo code: + * + * while (get != put) { + * do(request[get]); + * get = (get + 1) % NVKMS_MAX_DEFERRED_REQUESTS; + * } + * + * The only time it is safe for clients to write to get is when get == put and + * there are no outstanding semaphore releases to gpuPut. + * + * The surface used for the deferred request fifo must be: + * + * - In system memory (NVKMS will create one device-scoped mapping, not one per + * subdevice, as would be needed if the surface were in video memory). + * + * - At least as large as sizeof(NvKmsDeferredRequestFifo). + * + * Some NVKMS_DEFERRED_REQUESTs may need to write to a semaphore after some + * operation is performed (e.g., to indicate that a SwapGroup is ready, or that + * we've reached vblank). The NVKMS_DEFERRED_REQUEST_SEMAPHORE_INDEX field + * within the request specifies a semaphore within the + * NvKmsDeferredRequestFifo::semaphore[] array. The semantics of that semaphore + * index are opcode-specific. + * + * The opcode and semaphore index are in the low 16-bits of the request. The + * upper 16-bits are opcode-specific. + */ + +#define NVKMS_MAX_DEFERRED_REQUESTS 128 + +#define NVKMS_DEFERRED_REQUEST_OPCODE 7:0 + +#define NVKMS_DEFERRED_REQUEST_SEMAPHORE_INDEX 15:8 + +#define NVKMS_DEFERRED_REQUEST_OPCODE_NOP 0 + +/* + * The SWAP_GROUP_READY request means that this NvKmsDeferredRequestFifo is + * ready for the next swap of the SwapGroup (see NVKMS_IOCTL_JOIN_SWAP_GROUP, + * below). NVKMS_DEFERRED_REQUEST_SEMAPHORE_INDEX should specify an element in + * the semaphore[] array which will be released to + * + * NVKMS_DEFERRED_REQUEST_SEMAPHORE_VALUE_SWAP_GROUP_READY + * + * when the SwapGroup actually swaps. + */ +#define NVKMS_DEFERRED_REQUEST_OPCODE_SWAP_GROUP_READY 1 +#define NVKMS_DEFERRED_REQUEST_SEMAPHORE_VALUE_SWAP_GROUP_NOT_READY 0x00000000 +#define NVKMS_DEFERRED_REQUEST_SEMAPHORE_VALUE_SWAP_GROUP_READY 0xFFFFFFFF + + +/* + * The SWAP_GROUP_READY_PER_EYE_STEREO field indicates whether this deferred + * request fifo wants the SwapGroup to present new content at every eye boundary + * (PER_EYE), or present new content only when transitioning from the right eye + * to the left eye (PER_PAIR). + */ +#define NVKMS_DEFERRED_REQUEST_SWAP_GROUP_READY_PER_EYE_STEREO 16:16 +#define NVKMS_DEFERRED_REQUEST_SWAP_GROUP_READY_PER_EYE_STEREO_PER_PAIR 0 +#define NVKMS_DEFERRED_REQUEST_SWAP_GROUP_READY_PER_EYE_STEREO_PER_EYE 1 + + +struct NvKmsDeferredRequestFifo { + NvU32 put; + NvU32 get; + NvU32 request[NVKMS_MAX_DEFERRED_REQUESTS]; + NvGpuSemaphore semaphore[NVKMS_MAX_DEFERRED_REQUESTS]; +}; + +struct NvKmsRegisterDeferredRequestFifoRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSurfaceHandle surfaceHandle; +}; + +struct NvKmsRegisterDeferredRequestFifoReply { + NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle; +}; + +struct NvKmsRegisterDeferredRequestFifoParams { + struct NvKmsRegisterDeferredRequestFifoRequest request; /*! in */ + struct NvKmsRegisterDeferredRequestFifoReply reply; /*! out */ +}; + +struct NvKmsUnregisterDeferredRequestFifoRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle; +}; + +struct NvKmsUnregisterDeferredRequestFifoReply { + NvU32 padding; +}; + +struct NvKmsUnregisterDeferredRequestFifoParams { + struct NvKmsUnregisterDeferredRequestFifoRequest request; /*! in */ + struct NvKmsUnregisterDeferredRequestFifoReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_ALLOC_SWAP_GROUP + * NVKMS_IOCTL_FREE_SWAP_GROUP + * + * An NVKMS client creates a SwapGroup by calling NVKMS_IOCTL_ALLOC_SWAP_GROUP + * and specifying the heads in the SwapGroup with + * NvKmsAllocSwapGroupRequest::disp[]::headMask. + * + * The SwapGroup can be shared with clients through + * NVKMS_IOCTL_GRANT_SWAP_GROUP, and it is destroyed once all clients that have + * acquired the swap group through NVKMS_IOCTL_ACQUIRE_SWAP_GROUP have released + * it through NVKMS_IOCTL_RELEASE_SWAP_GROUP and when the client that created + * the swap group has called NVKMS_IOCTL_FREE_SWAP_GROUP or freed the device. + * + * The SwapGroup allocation is expected to have a long lifetime (e.g., the X + * driver might call ALLOC_SWAP_GROUP from ScreenInit and FREE_SWAP_GROUP from + * CloseScreen). The point of these requests is to define the head topology of + * the SwapGroup (for X driver purposes, presumably all the heads that are + * assigned to the X screen). + * + * As such: + * + * - Not all heads described in the ALLOC_SWAP_GROUP request need to be active + * (they can come and go with different modesets). + * + * - The SwapGroup persists across modesets. + * + * - SwapGroup allocation is expected to be lightweight: the heavyweight + * operations like allocating and freeing headSurface resources are done when + * the number of SwapGroup members (see {JOIN,LEAVE}_SWAP_GROUP below) + * transitions between 0 and 1. + * + * Only an NVKMS modeset owner can alloc or free a SwapGroup. + */ + +struct NvKmsSwapGroupConfig { + struct { + NvU32 headMask; + } disp[NVKMS_MAX_SUBDEVICES]; +}; + +struct NvKmsAllocSwapGroupRequest { + NvKmsDeviceHandle deviceHandle; + struct NvKmsSwapGroupConfig config; +}; + +struct NvKmsAllocSwapGroupReply { + NvKmsSwapGroupHandle swapGroupHandle; +}; + +struct NvKmsAllocSwapGroupParams { + struct NvKmsAllocSwapGroupRequest request; /*! in */ + struct NvKmsAllocSwapGroupReply reply; /*! out */ +}; + +struct NvKmsFreeSwapGroupRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSwapGroupHandle swapGroupHandle; +}; + +struct NvKmsFreeSwapGroupReply { + NvU32 padding; +}; + +struct NvKmsFreeSwapGroupParams { + struct NvKmsFreeSwapGroupRequest request; /*! in */ + struct NvKmsFreeSwapGroupReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_JOIN_SWAP_GROUP + * NVKMS_IOCTL_LEAVE_SWAP_GROUP + * + * Clients can join NvKmsDeferredRequestFifos to SwapGroups using + * NVKMS_IOCTL_JOIN_SWAP_GROUP, and remove NvKmsDeferredRequestFifos from + * SwapGroups using NVKMS_IOCTL_LEAVE_SWAP_GROUP (or freeing the + * NvKmsDeferredRequestFifo, or freeing the device). + * + * Once an NvKmsDeferredRequestFifo is joined to a SwapGroup, the SwapGroup will + * not become ready again until the SwapGroup member sends the + * NVKMS_DEFERRED_REQUEST_OPCODE_SWAP_GROUP_READY request through their + * NvKmsDeferredRequestFifo. The NVKMS_DEFERRED_REQUEST_SEMAPHORE_INDEX + * specified as part of the request indicates an index into + * NvKmsDeferredRequestFifo::semaphore[] where NVKMS will write + * + * NVKMS_DEFERRED_REQUEST_SEMAPHORE_VALUE_SWAP_GROUP_READY + * + * when the SwapGroup becomes ready. + * + * If unicastEvent::specified is TRUE, then unicastEvent::fd will be interpreted + * as a unicast event file descriptor. See NVKMS_IOCTL_CLEAR_UNICAST_EVENT for + * details. Whenever SWAP_GROUP_READY is written to a semaphore within + * NvKmsDeferredRequestFifo, the unicastEvent fd will be notified. + * + * An NvKmsDeferredRequestFifo can be joined to at most one SwapGroup at a time. + * + * If one client uses multiple NvKmsDeferredRequestFifos joined to multiple + * SwapGroups and wants to synchronize swaps between these fifos, it should + * bundle all of the (deviceHandle, swapGroupHandle, deferredRequestFifoHandle) + * tuples into a single join/leave request. + * + * If any client joins multiple NvKmsDeferredRequestFifos to multiple + * SwapGroups, all NVKMS_IOCTL_JOIN_SWAP_GROUP requests must specify the same + * set of SwapGroups. + */ + +struct NvKmsJoinSwapGroupRequestOneMember { + NvKmsDeviceHandle deviceHandle; + NvKmsSwapGroupHandle swapGroupHandle; + NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle; + + struct { + int fd; + NvBool specified; + } unicastEvent; +}; + +struct NvKmsJoinSwapGroupRequest { + NvU32 numMembers; + struct NvKmsJoinSwapGroupRequestOneMember member[NVKMS_MAX_SWAPGROUPS]; +}; + +struct NvKmsJoinSwapGroupReply { + NvU32 padding; +}; + +struct NvKmsJoinSwapGroupParams { + struct NvKmsJoinSwapGroupRequest request; /*! in */ + struct NvKmsJoinSwapGroupReply reply; /*! out */ +}; + +struct NvKmsLeaveSwapGroupRequestOneMember { + NvKmsDeviceHandle deviceHandle; + NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle; +}; + +struct NvKmsLeaveSwapGroupRequest { + NvU32 numMembers; + struct NvKmsLeaveSwapGroupRequestOneMember member[NVKMS_MAX_SWAPGROUPS]; +}; + +struct NvKmsLeaveSwapGroupReply { + NvU32 padding; +}; + +struct NvKmsLeaveSwapGroupParams { + struct NvKmsLeaveSwapGroupRequest request; /*! in */ + struct NvKmsLeaveSwapGroupReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_SWAP_GROUP_CLIP_LIST + * + * The X driver needs to define which pixels on-screen are owned by the + * SwapGroup. NVKMS will use this to prevent those pixels from updating until + * all SwapGroup members indicate that they are ready. + * + * The clip list is interpreted by NVKMS as relative to the surface specified + * during a flip or modeset. The clip list is intersected with the ViewPortIn + * of the head, described by + * + * NvKmsFlipCommonParams::viewPortIn::point + * + * and + * + * NvKmsSetModeOneHeadRequest::viewPortSizeIn + * + * The clip list is exclusive. I.e., each NvKmsRect is a region outside of the + * SwapGroup. One surface-sized NvKmsRect would mean that there are no + * SwapGroup-owned pixels. + * + * When no clip list is specified, NVKMS behaves as if there were no + * SwapGroup-owned pixels. + * + * Only an NVKMS modeset owner can set the clip list of a SwapGroup. + */ + +struct NvKmsSetSwapGroupClipListRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSwapGroupHandle swapGroupHandle; + + /*! The number of struct NvKmsRects pointed to by pClipList. */ + NvU16 nClips; + + /*! + * Pointer to an array of struct NvKmsRects describing the inclusive clip + * list for the SwapGroup. The NvKmsRects are in desktop coordinate space. + * + * Use nvKmsPointerToNvU64() to assign pClipList. + */ + NvU64 pClipList NV_ALIGN_BYTES(8); +}; + +struct NvKmsSetSwapGroupClipListReply { + NvU32 padding; +}; + +struct NvKmsSetSwapGroupClipListParams { + struct NvKmsSetSwapGroupClipListRequest request; /*! in */ + struct NvKmsSetSwapGroupClipListReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_GRANT_SWAP_GROUP: + * NVKMS_IOCTL_ACQUIRE_SWAP_GROUP: + * NVKMS_IOCTL_RELEASE_SWAP_GROUP: + * + * An NVKMS client can "grant" a swap group that it has allocated through + * NVKMS_IOCTL_ALLOC_SWAP_GROUP to another NVKMS client through the following + * steps: + * + * - The granting NVKMS client should open /dev/nvidia-modeset, and call + * NVKMS_IOCTL_GRANT_SWAP_GROUP to associate an NvKmsSwapGroupHandle + * with the file descriptor. + * + * - The granting NVKMS client should pass the file descriptor over a + * UNIX domain socket to one or more clients who should acquire the + * swap group. + * + * - The granting NVKMS client can optionally close the file + * descriptor now or later. + * + * - Each acquiring client should call NVKMS_IOCTL_ACQUIRE_SWAP_GROUP, + * and pass in the file descriptor it received. This returns an + * NvKmsSwapGroupHandle that the acquiring client can use to refer to + * the swap group in any other NVKMS API call that takes an + * NvKmsSwapGroupHandle. + * + * - The acquiring clients can optionally close the file descriptor + * now or later. + * + * - Each acquiring client should call NVKMS_IOCTL_RELEASE_SWAP_GROUP to + * release it when they are done with the swap group. + */ + +struct NvKmsGrantSwapGroupRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSwapGroupHandle swapGroupHandle; + int fd; +}; + +struct NvKmsGrantSwapGroupReply { + NvU32 padding; +}; + +struct NvKmsGrantSwapGroupParams { + struct NvKmsGrantSwapGroupRequest request; /*! in */ + struct NvKmsGrantSwapGroupReply reply; /*! out */ +}; + +struct NvKmsAcquireSwapGroupRequest { + int fd; +}; + +struct NvKmsAcquireSwapGroupReply { + NvKmsDeviceHandle deviceHandle; + NvKmsSwapGroupHandle swapGroupHandle; +}; + +struct NvKmsAcquireSwapGroupParams { + struct NvKmsAcquireSwapGroupRequest request; /*! in */ + struct NvKmsAcquireSwapGroupReply reply; /*! out */ +}; + +struct NvKmsReleaseSwapGroupRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSwapGroupHandle swapGroupHandle; +}; + +struct NvKmsReleaseSwapGroupReply { + NvU32 padding; +}; + +struct NvKmsReleaseSwapGroupParams { + struct NvKmsReleaseSwapGroupRequest request; /*! in */ + struct NvKmsReleaseSwapGroupReply reply; /*! out */ +}; + +/*! + * NVKMS_IOCTL_SWITCH_MUX: + * + * Switch the mux for the given Dpy in the given direction. The mux switch is + * performed in three stages. + */ + +enum NvKmsMuxOperation { + NVKMS_SWITCH_MUX_PRE, + NVKMS_SWITCH_MUX, + NVKMS_SWITCH_MUX_POST, +}; + +struct NvKmsSwitchMuxRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + enum NvKmsMuxOperation operation; + NvMuxState state; +}; + +struct NvKmsSwitchMuxReply { + NvU32 padding; +}; + +struct NvKmsSwitchMuxParams { + struct NvKmsSwitchMuxRequest request; + struct NvKmsSwitchMuxReply reply; +}; + +struct NvKmsGetMuxStateRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; +}; + +struct NvKmsGetMuxStateReply { + NvMuxState state; +}; + +struct NvKmsGetMuxStateParams { + struct NvKmsGetMuxStateRequest request; + struct NvKmsGetMuxStateReply reply; +}; + +/*! + * NVKMS_IOCTL_EXPORT_VRR_SEMAPHORE_SURFACE: + * + * Export the VRR semaphore surface onto the provided RM 'memFd'. + * The RM memory FD should be "empty". An empty FD can be allocated by calling + * NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD with 'EMPTY_FD' set. + */ + +struct NvKmsExportVrrSemaphoreSurfaceRequest { + NvKmsDeviceHandle deviceHandle; + int memFd; +}; + +struct NvKmsExportVrrSemaphoreSurfaceReply { + NvU32 padding; +}; + +struct NvKmsExportVrrSemaphoreSurfaceParams { + struct NvKmsExportVrrSemaphoreSurfaceRequest request; + struct NvKmsExportVrrSemaphoreSurfaceReply reply; +}; + +/*! + * NVKMS_IOCTL_ENABLE_VBLANK_SYNC_OBJECT: + * NVKMS_IOCTL_DISABLE_VBLANK_SYNC_OBJECT: + * + * The NVKMS client can use NVKMS_IOCTL_ENABLE_VBLANK_SYNC_OBJECT to request a + * vblank syncpt that continuously triggers each time the raster generator + * reaches the start of vblank. NVKMS will return the syncpt id in + * 'NvKmsEnableVblankSyncObjectReply::syncptId'. + * + * The NVKMS client can use NVKMS_IOCTL_DISABLE_VBLANK_SYNC_OBJECT to disable + * the vblank syncpt. + * + * If a vblank syncpt is currently enabled on a head, and a modeset request is + * issued to reconfigure that head with a new set of mode timings, NVKMS will + * automatically reenable the vblank syncpt so it continues to trigger with the + * new mode timings. + * + * Clients can use these IOCTLs only if both NvKmsAllocDeviceReply:: + * supportsVblankSyncObjects and NvKmsAllocDeviceReply::supportsSyncpts are + * TRUE. + */ + +struct NvKmsEnableVblankSyncObjectRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvU32 head; +}; + +struct NvKmsEnableVblankSyncObjectReply { + /* + * Clients should explicitly disable the vblank sync object to consume the + * handle. + */ + NvKmsVblankSyncObjectHandle vblankHandle; + + NvU32 syncptId; +}; + +struct NvKmsEnableVblankSyncObjectParams { + struct NvKmsEnableVblankSyncObjectRequest request; /*! in */ + struct NvKmsEnableVblankSyncObjectReply reply; /*! out */ +}; + +struct NvKmsDisableVblankSyncObjectRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvU32 head; + + /* This handle is received in NVKMS_IOCTL_ENABLE_VBLANK_SYNC_OBJECT. */ + NvKmsVblankSyncObjectHandle vblankHandle; +}; + +struct NvKmsDisableVblankSyncObjectReply { + NvU32 padding; +}; + +struct NvKmsDisableVblankSyncObjectParams { + struct NvKmsDisableVblankSyncObjectRequest request; /*! in */ + struct NvKmsDisableVblankSyncObjectReply reply; /*! out */ +}; + +/*! + * NVKMS_IOCTL_NOTIFY_VBLANK: + * + * Register a unicast event fd to be notified when the next vblank event occurs + * on the specified head. This is a one-shot notification, and in order to be + * notified of subsequent vblank events the caller must clear and re-register + * the unicast event fd. + */ + +struct NvKmsNotifyVblankRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvU32 head; + + struct { + int fd; + } unicastEvent; +}; + +struct NvKmsNotifyVblankReply { + NvU32 padding; +}; + +struct NvKmsNotifyVblankParams { + struct NvKmsNotifyVblankRequest request; /*! in */ + struct NvKmsNotifyVblankReply reply; /*! out */ +}; + +/*! + * NVKMS_IOCTL_SET_FLIPLOCK_GROUP: + * + * This ioctl specifies a set of active heads on which fliplock is allowed. + * The heads can span devices. + * The requested state for this group will be maintained until: + * a) A subsequent SetFlipLockGroup ioctl that specifies any of the heads + * b) A subsequent ModeSet ioctl that specifies any of the heads + * If either of those occurs, the requested state will be destroyed for *all* + * of the heads. + * + * Note that a successful call with 'enable = TRUE' only indicates that the + * request to enable fliplock is registered, not that fliplock was actually + * enabled. Fliplock may not be enabled due to incompatible modetimings, for + * example. + */ + +struct NvKmsSetFlipLockGroupOneDev { + NvKmsDeviceHandle deviceHandle; + + NvU32 requestedDispsBitMask; + struct { + NvU32 requestedHeadsBitMask; + } disp[NVKMS_MAX_SUBDEVICES]; +}; + +struct NvKmsSetFlipLockGroupRequest { + NvBool enable; + struct NvKmsSetFlipLockGroupOneDev dev[NV_MAX_SUBDEVICES]; +}; + +struct NvKmsSetFlipLockGroupReply { + NvU32 padding; +}; + +struct NvKmsSetFlipLockGroupParams { + struct NvKmsSetFlipLockGroupRequest request; /*! in */ + struct NvKmsSetFlipLockGroupReply reply; /*! out */ +}; + +/* + * NVKMS_IOCTL_ENABLE_VBLANK_SEM_CONTROL + * NVKMS_IOCTL_DISABLE_VBLANK_SEM_CONTROL + * NVKMS_IOCTL_ACCEL_VBLANK_SEM_CONTROLS + * + * The VBlank Semaphore Control API ("VBlank Sem Control") allows clients to + * register for a semaphore release to be performed on the specified system + * memory. + * + * One or more clients may register a memory allocation + offset by specifying + * an NvKmsSurfaceHandle and offset within that surface. Until the + * vblank_sem_control is disabled, during each vblank on all enabled heads, + * nvkms will interpret the specified memory location as an + * NvKmsVblankSemControlData data structure. Each enabled head will inspect the + * corresponding NvKmsVblankSemControlDataOneHead at + * NvKmsVblankSemControlData::head[head]. + * + * NvKmsEnableVblankSemControlRequest::surfaceOffset must be a multiple of 8, so + * that GPU semaphore releases can write to 8-byte fields within + * NvKmsVblankSemControlDataOneHead with natural alignment. + * + * During vblank, the NvKmsVblankSemControlDataOneHead::requestCounter field + * will be read, and the following pseudocode will be performed: + * + * swapInterval = DRF_VAL(data->flags) + * + * if (data->requestCounter == prevRequestCounter) + * return + * + * if (currentVblankCount < (prevVBlankCount + swapInterval)) + * return + * + * data->vblankCount = currentVblankCount + * data->semaphore = data->requestCounter + * + * prevRequestCounter = data->requestCounter + * previousVblankCount = currentVblankCount + * + * I.e., if the client-described conditions are met, nvkms will write + * NvKmsVblankSemControlDataOneHead::semaphore to the client-requested + * 'requestCounter' along with the vblankCount. + * + * The intent is for clients to use semaphore releases to write: + * + * NvKmsVblankSemControlDataOneHead::swapInterval + * NvKmsVblankSemControlDataOneHead::requestCounter + * + * and then perform a semaphore acquire on + * NvKmsVblankSemControlDataOneHead::semaphore >= requestCounter (using the + * ACQ_GEQ semaphore operation). This will block any following methods in the + * client's channel (e.g., a blit) until the requested conditions are met. Note + * the ::requestCounter should be written last, because the change in value of + * ::requestCounter is what causes nvkms, during a vblank callback, to inspect + * the other fields. + * + * Additionally, clients should use the CPU (not semaphore releases in their + * channel) to write the field + * NvKmsVblankSemControlDataOneHead::requestCounterAccel at the same time that + * they enqueue the semaphore release to write to + * NvKmsVblankSemControlDataOneHead::requestCounter. ::requestCounterAccel will + * be used by nvkms to "accelerate" the vblank sem control by copying the value + * from ::requestCounterAccel to ::semaphore. This will be done when the vblank + * sem control is disabled, and when a client calls + * NVKMS_IOCTL_ACCEL_VBLANK_SEM_CONTROLS. It is important for nvkms to have + * access to the value in ::requestCounterAccel, and not just ::requestCounter. + * The latter is only the last value released so far by the client's channel + * (further releases to ::requestCounter may still be inflight, perhaps blocked + * on pending semaphore acquires). The former should be the most recent value + * enqueued in the channel. This is also why it is important for clients to + * acquire with ACQ_GEQ (greater-than-or-equal-to), rather than just ACQUIRE. + * + * The same NvKmsSurfaceHandle (with different surfaceOffsets) may be used by + * multiple VBlank Sem Controls. + * + * It is the responsibility of the nvkms client(s) to coordinate at modeset + * time: the mapping of nvkms apiHeads to underlying hwHeads may change during a + * modeset, such that a registered vblank sem control will no longer receive + * vblank callbacks if the head is shutdown. Before a modeset shuts down a + * head, nvkms clients should ensure that all in-flight semaphore acquires are + * satisfied. + * + * NVKMS_IOCTL_ACCEL_VBLANK_SEM_CONTROLS can be used, specifying a particular + * set of heads, to set all vblank sem controls on those heads to have their + * semaphore set to the value in their respective + * NvKmsVblankSemControlDataOneHead::requestCounterAccel fields. + * + * These ioctls are only available when + * NvKmsAllocDeviceReply::supportsVblankSemControl is true. + */ + +struct NvKmsEnableVblankSemControlRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvKmsSurfaceHandle surfaceHandle; + NvU64 surfaceOffset NV_ALIGN_BYTES(8); +}; + +struct NvKmsEnableVblankSemControlReply { + NvKmsVblankSemControlHandle vblankSemControlHandle; +}; + +struct NvKmsEnableVblankSemControlParams { + struct NvKmsEnableVblankSemControlRequest request; + struct NvKmsEnableVblankSemControlReply reply; +}; + +struct NvKmsDisableVblankSemControlRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvKmsVblankSemControlHandle vblankSemControlHandle; +}; + +struct NvKmsDisableVblankSemControlReply { + NvU32 padding; +}; + +struct NvKmsDisableVblankSemControlParams { + struct NvKmsDisableVblankSemControlRequest request; + struct NvKmsDisableVblankSemControlReply reply; +}; + +struct NvKmsAccelVblankSemControlsRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvU32 headMask; +}; + +struct NvKmsAccelVblankSemControlsReply { + NvU32 padding; +}; + +struct NvKmsAccelVblankSemControlsParams { + struct NvKmsAccelVblankSemControlsRequest request; + struct NvKmsAccelVblankSemControlsReply reply; +}; + +/*! + * NVKMS_IOCTL_VRR_SIGNAL_SEMAPHORE + * + * This IOCTL is used to signal a semaphore from VRR semaphore surface. + * It should be invoked after flip if needed. If device does not supports + * VRR semaphores, then this is a no-op action for compatibility. + */ +struct NvKmsVrrSignalSemaphoreRequest { + NvKmsDeviceHandle deviceHandle; + NvS32 vrrSemaphoreIndex; +}; + +struct NvKmsVrrSignalSemaphoreReply { + NvU32 padding; +}; + +struct NvKmsVrrSignalSemaphoreParams { + struct NvKmsVrrSignalSemaphoreRequest request; /*! in */ + struct NvKmsVrrSignalSemaphoreReply reply; /*! out */ +}; + +/* + * NVKMS_IOCTL_FRAMEBUFFER_CONSOLE_DISABLED + * + * Notify NVKMS that the calling client has disabled the framebuffer console. + * NVKMS will free the framebuffer console reserved memory and disable + * NVKMS-based console restore. + * + * This IOCTL can only be used by kernel-mode clients. + */ + +struct NvKmsFramebufferConsoleDisabledRequest { + NvKmsDeviceHandle deviceHandle; +}; + +struct NvKmsFramebufferConsoleDisabledReply { + NvU32 padding; +}; + +struct NvKmsFramebufferConsoleDisabledParams { + struct NvKmsFramebufferConsoleDisabledRequest request; + struct NvKmsFramebufferConsoleDisabledReply reply; +}; + +#endif /* NVKMS_API_H */ diff --git a/src/nvidia-modeset/interface/nvkms-format.h b/src/nvidia-modeset/interface/nvkms-format.h new file mode 100644 index 0000000..88b26b3 --- /dev/null +++ b/src/nvidia-modeset/interface/nvkms-format.h @@ -0,0 +1,126 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_FORMAT_H) +#define NVKMS_FORMAT_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* + * In order to interpret these pixel format namings, please take note of these + * conventions: + * - The Y8_U8__Y8_V8_N422 and U8_Y8__V8_Y8_N422 formats are both packed formats + * that have an interleaved chroma component across every two pixels. The + * double-underscore is a separator between these two pixel groups. + * - The triple-underscore is a separator between planes. + * - The 'N' suffix is a delimiter for the chroma decimation factor. + * + * As examples of the above rules: + * - The Y8_U8__Y8_V8_N422 format has one 8-bit luma component (Y8) and one + * 8-bit chroma component (U8) in pixel N, and one 8-bit luma component (Y8) + * and one 8-bit chroma component (V8) in pixel (N + 1). This format is + * 422-decimated since the U and V chroma samples are shared between each + * pair of adjacent pixels per line. + * - The Y10___U10V10_N444 format has one plane of 10-bit luma (Y10) components, + * and another plane of 10-bit chroma components (U10V10). This format has no + * chroma decimation since the luma and chroma components are sampled at the + * same rate. + */ +enum NvKmsSurfaceMemoryFormat { + NvKmsSurfaceMemoryFormatI8 = 0, + NvKmsSurfaceMemoryFormatA1R5G5B5 = 1, + NvKmsSurfaceMemoryFormatX1R5G5B5 = 2, + NvKmsSurfaceMemoryFormatR5G6B5 = 3, + NvKmsSurfaceMemoryFormatA8R8G8B8 = 4, + NvKmsSurfaceMemoryFormatX8R8G8B8 = 5, + NvKmsSurfaceMemoryFormatA2B10G10R10 = 6, + NvKmsSurfaceMemoryFormatX2B10G10R10 = 7, + NvKmsSurfaceMemoryFormatA8B8G8R8 = 8, + NvKmsSurfaceMemoryFormatX8B8G8R8 = 9, + NvKmsSurfaceMemoryFormatRF16GF16BF16AF16 = 10, + NvKmsSurfaceMemoryFormatR16G16B16A16 = 11, + NvKmsSurfaceMemoryFormatRF32GF32BF32AF32 = 12, + NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422 = 13, + NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422 = 14, + NvKmsSurfaceMemoryFormatY8___U8V8_N444 = 15, + NvKmsSurfaceMemoryFormatY8___V8U8_N444 = 16, + NvKmsSurfaceMemoryFormatY8___U8V8_N422 = 17, + NvKmsSurfaceMemoryFormatY8___V8U8_N422 = 18, + NvKmsSurfaceMemoryFormatY8___U8V8_N420 = 19, + NvKmsSurfaceMemoryFormatY8___V8U8_N420 = 20, + NvKmsSurfaceMemoryFormatY10___U10V10_N444 = 21, + NvKmsSurfaceMemoryFormatY10___V10U10_N444 = 22, + NvKmsSurfaceMemoryFormatY10___U10V10_N422 = 23, + NvKmsSurfaceMemoryFormatY10___V10U10_N422 = 24, + NvKmsSurfaceMemoryFormatY10___U10V10_N420 = 25, + NvKmsSurfaceMemoryFormatY10___V10U10_N420 = 26, + NvKmsSurfaceMemoryFormatY12___U12V12_N444 = 27, + NvKmsSurfaceMemoryFormatY12___V12U12_N444 = 28, + NvKmsSurfaceMemoryFormatY12___U12V12_N422 = 29, + NvKmsSurfaceMemoryFormatY12___V12U12_N422 = 30, + NvKmsSurfaceMemoryFormatY12___U12V12_N420 = 31, + NvKmsSurfaceMemoryFormatY12___V12U12_N420 = 32, + NvKmsSurfaceMemoryFormatY8___U8___V8_N444 = 33, + NvKmsSurfaceMemoryFormatY8___U8___V8_N420 = 34, + NvKmsSurfaceMemoryFormatRF16GF16BF16XF16 = 35, + NvKmsSurfaceMemoryFormatMin = NvKmsSurfaceMemoryFormatI8, + NvKmsSurfaceMemoryFormatMax = NvKmsSurfaceMemoryFormatRF16GF16BF16XF16, +}; + +typedef struct NvKmsSurfaceMemoryFormatInfo { + enum NvKmsSurfaceMemoryFormat format; + const char *name; + NvU8 depth; + NvBool isYUV; + NvU8 numPlanes; + + union { + struct { + NvU8 bytesPerPixel; + NvU8 bitsPerPixel; + } rgb; + + struct { + NvU8 depthPerComponent; + NvU8 storageBitsPerComponent; + NvU8 horizChromaDecimationFactor; + NvU8 vertChromaDecimationFactor; + } yuv; + }; +} NvKmsSurfaceMemoryFormatInfo; + +const NvKmsSurfaceMemoryFormatInfo *nvKmsGetSurfaceMemoryFormatInfo( + const enum NvKmsSurfaceMemoryFormat format); + +const char *nvKmsSurfaceMemoryFormatToString( + const enum NvKmsSurfaceMemoryFormat format); + +#ifdef __cplusplus +}; +#endif + +#endif /* NVKMS_FORMAT_H */ diff --git a/src/nvidia-modeset/interface/nvkms-ioctl.h b/src/nvidia-modeset/interface/nvkms-ioctl.h new file mode 100644 index 0000000..cb27573 --- /dev/null +++ b/src/nvidia-modeset/interface/nvkms-ioctl.h @@ -0,0 +1,73 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_IOCTL_H) +#define NVKMS_IOCTL_H + +#include "nvtypes.h" + +/*! + * Some of the NVKMS ioctl parameter data structures are quite large + * and would exceed the parameter size constraints on at least SunOS. + * + * Redirect ioctls through a level of indirection: user-space assigns + * NvKmsIoctlParams with the real command, size, and pointer, and + * passes the NvKmsIoctlParams through the ioctl. + */ + +struct NvKmsIoctlParams { + NvU32 cmd; + NvU32 size; + NvU64 address NV_ALIGN_BYTES(8); +}; + +#define NVKMS_IOCTL_MAGIC 'm' +#define NVKMS_IOCTL_CMD 0 + +#define NVKMS_IOCTL_IOWR \ + _IOWR(NVKMS_IOCTL_MAGIC, NVKMS_IOCTL_CMD, struct NvKmsIoctlParams) + +/*! + * User-space pointers are always passed to NVKMS in an NvU64. + * This user-space address is eventually passed into the platform's + * copyin/copyout functions, in a void* argument. + * + * This utility function converts from an NvU64 to a pointer. + */ + +static inline void *nvKmsNvU64ToPointer(NvU64 value) +{ + return (void *)(NvUPtr)value; +} + +/*! + * Before casting the NvU64 to a void*, check that casting to a pointer + * size within the kernel does not lose any precision in the current + * environment. + */ +static inline NvBool nvKmsNvU64AddressIsSafe(NvU64 address) +{ + return address == (NvU64)(NvUPtr)address; +} + +#endif /* NVKMS_IOCTL_H */ diff --git a/src/nvidia-modeset/interface/nvkms-modetimings.h b/src/nvidia-modeset/interface/nvkms-modetimings.h new file mode 100644 index 0000000..e4db871 --- /dev/null +++ b/src/nvidia-modeset/interface/nvkms-modetimings.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_MODETIMINGS_H) +#define NVKMS_MODETIMINGS_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" +#include "nv_mode_timings.h" + +/* + * For Kepler HW HDMI 1.4 frame packed stereo, HW combines two flips + * into a single top-down double-height frame, and it needs a + * doubled refresh rate to accommodate this. + */ +static inline void nvKmsUpdateNvModeTimingsForHdmi3D(NvModeTimings *pModeTimings, + NvBool hdmi3D) +{ + if (pModeTimings->hdmi3D == hdmi3D) { + return; + } + + if (hdmi3D) { + pModeTimings->pixelClockHz *= 2; + pModeTimings->RRx1k *= 2; + } else { + pModeTimings->pixelClockHz /= 2; + pModeTimings->RRx1k /= 2; + } + + pModeTimings->hdmi3D = hdmi3D; +} + +#ifdef __cplusplus +}; +#endif + +#endif /* NVKMS_MODETIMINGS_H */ diff --git a/src/nvidia-modeset/interface/nvkms-sync.h b/src/nvidia-modeset/interface/nvkms-sync.h new file mode 100644 index 0000000..e399ad4 --- /dev/null +++ b/src/nvidia-modeset/interface/nvkms-sync.h @@ -0,0 +1,100 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_SYNC_H) +#define NVKMS_SYNC_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" +#include "nvkms-api-types.h" + +/* These functions are implemented in nvkms-lib. */ + +enum nvKmsNotifierStatus { + NVKMS_NOTIFIER_STATUS_NOT_BEGUN, + NVKMS_NOTIFIER_STATUS_BEGUN, + NVKMS_NOTIFIER_STATUS_FINISHED, +}; + +struct nvKmsParsedNotifier { + NvU64 timeStamp; + NvBool timeStampValid; + enum nvKmsNotifierStatus status; + NvU8 presentCount; +}; + +static inline NvU32 nvKmsSizeOfNotifier(enum NvKmsNIsoFormat format, + NvBool overlay) { + switch (format) { + default: + case NVKMS_NISO_FORMAT_LEGACY: + return overlay ? 16 : 4; + case NVKMS_NISO_FORMAT_FOUR_WORD: + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + return 16; + } +} + +void nvKmsSetNotifier(enum NvKmsNIsoFormat format, NvBool overlay, + NvU32 index, void *base, NvU64 timeStamp); + +void nvKmsResetNotifier(enum NvKmsNIsoFormat format, NvBool overlay, + NvU32 index, void *base); + +void nvKmsParseNotifier(enum NvKmsNIsoFormat format, NvBool overlay, + NvU32 index, const void *base, + struct nvKmsParsedNotifier *out); + +struct nvKmsParsedSemaphore { + NvU32 payload; +}; + +static inline NvU32 nvKmsSizeOfSemaphore(enum NvKmsNIsoFormat format) { + switch (format) { + default: + case NVKMS_NISO_FORMAT_LEGACY: + return 4; + case NVKMS_NISO_FORMAT_FOUR_WORD: + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + return 16; + } +} + +NvU32 nvKmsSemaphorePayloadOffset(enum NvKmsNIsoFormat format); + +void nvKmsResetSemaphore(enum NvKmsNIsoFormat format, + NvU32 index, void *base, + NvU32 payload); + +void nvKmsParseSemaphore(enum NvKmsNIsoFormat format, + NvU32 index, const void *base, + struct nvKmsParsedSemaphore *out); + +#ifdef __cplusplus +}; +#endif + +#endif /* NVKMS_SYNC_H */ diff --git a/src/nvidia-modeset/kapi/include/nvkms-kapi-internal.h b/src/nvidia-modeset/kapi/include/nvkms-kapi-internal.h new file mode 100644 index 0000000..2bda79d --- /dev/null +++ b/src/nvidia-modeset/kapi/include/nvkms-kapi-internal.h @@ -0,0 +1,268 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_KAPI_INTERNAL_H__ + +#define __NVKMS_KAPI_INTERNAL_H__ + + + +#include "unix_rm_handle.h" + +#include "nvkms-utils.h" +#include "nvkms-kapi-private.h" + +#include "nv_smg.h" + +//XXX Decouple functions like nvEvoLog used for logging from NVKMS + +#define nvKmsKapiLogDebug(__format...) \ + nvEvoLogDebug(EVO_LOG_INFO, "[kapi] "__format) + +#define nvKmsKapiLogDeviceDebug(__device, __format, ...) \ + nvEvoLogDebug(EVO_LOG_INFO, "[kapi][GPU Id 0x%08x] "__format, \ + device->gpuId, ##__VA_ARGS__) + +/* + * Semaphore values used when using semaphore-based synchronization between + * userspace rendering and flips. + */ +enum NvKmsKapiSemaphoreValues { + /* + * Initial state on driver init, and the value written by the hardware when + * it has completed processing of a frame using this semaphore. + */ + NVKMS_KAPI_SEMAPHORE_VALUE_DONE = 0xd00dd00d, + + /* + * Value of the semaphore when a flip is pending in the display pushbuffer, + * but userspace rendering is not yet complete. + */ + NVKMS_KAPI_SEMAPHORE_VALUE_NOT_READY = 0x13371337, + + /* + * Value of the semaphore when userspace rendering is complete and the + * pending flip may proceed. + */ + NVKMS_KAPI_SEMAPHORE_VALUE_READY = 0xf473f473, +}; + +struct NvKmsKapiNisoSurface { + NvU32 hRmHandle; + NvKmsSurfaceHandle hKmsHandle; + + NvBool mapped; + void *pLinearAddress; + + enum NvKmsNIsoFormat format; + +}; + +struct NvKmsKapiDevice { + + NvU32 gpuId; + + nvkms_sema_handle_t *pSema; + + /* RM handles */ + + NvU32 hRmClient; + NvU32 hRmDevice, hRmSubDevice; + NvU32 deviceInstance; + + NVUnixRmHandleAllocatorRec handleAllocator; + + /* NVKMS handles */ + + struct nvkms_per_open *pKmsOpen; + + NvKmsDeviceHandle hKmsDevice; + NvKmsDispHandle hKmsDisp; + NvU32 dispIdx; + + NvU32 subDeviceMask; + + NvBool isSOC; + NvKmsDispIOCoherencyModes isoIOCoherencyModes; + NvKmsDispIOCoherencyModes nisoIOCoherencyModes; + NvBool supportsSyncpts; + + /* SMG state */ + + MIGDeviceId migDevice; + NvU32 smgGpuInstSubscriptionHdl; + NvU32 smgComputeInstSubscriptionHdl; + + nvRMContext rmSmgContext; + + /* Device capabilities */ + + struct { + struct NvKmsCompositionCapabilities cursorCompositionCaps; + struct NvKmsCompositionCapabilities overlayCompositionCaps; + + NvU16 validLayerRRTransforms; + + NvU32 maxWidthInPixels; + NvU32 maxHeightInPixels; + NvU32 maxCursorSizeInPixels; + + NvU8 genericPageKind; + NvBool requiresVrrSemaphores; + + NvBool supportsInputColorSpace; + NvBool supportsInputColorRange; + } caps; + + NvU64 supportedSurfaceMemoryFormats[NVKMS_KAPI_LAYER_MAX]; + NvBool supportsICtCp[NVKMS_KAPI_LAYER_MAX]; + + struct NvKmsKapiLutCaps lutCaps; + + NvU32 numHeads; + NvU32 numLayers[NVKMS_KAPI_MAX_HEADS]; + + struct NvKmsKapiNisoSurface notifier; + struct NvKmsKapiNisoSurface semaphore; + + NvU32 numDisplaySemaphores; + + struct { + NvU32 currFlipNotifierIndex; + } layerState[NVKMS_KAPI_MAX_HEADS][NVKMS_MAX_LAYERS_PER_HEAD]; + + void *privateData; + + void (*eventCallback)(const struct NvKmsKapiEvent *event); + + NvU64 vtFbBaseAddress; + NvU64 vtFbSize; +}; + +struct NvKmsKapiMemory { + NvU32 hRmHandle; + NvU64 size; + + struct NvKmsKapiPrivSurfaceParams surfaceParams; + + NvBool isVidmem; + /* Whether memory can be updated directly on the screen */ + NvBool noDisplayCaching; +}; + +struct NvKmsKapiSurface { + NvKmsSurfaceHandle hKmsHandle; +}; + +static inline void *nvKmsKapiCalloc(size_t nmem, size_t size) +{ + return nvInternalAlloc(nmem * size, NV_TRUE); +} + +static inline void nvKmsKapiFree(void *ptr) +{ + return nvInternalFree(ptr); +} + +static inline NvU32 nvKmsKapiGenerateRmHandle(struct NvKmsKapiDevice *device) +{ + NvU32 handle; + + nvkms_sema_down(device->pSema); + handle = nvGenerateUnixRmHandle(&device->handleAllocator); + nvkms_sema_up(device->pSema); + + return handle; +} + +static inline void nvKmsKapiFreeRmHandle(struct NvKmsKapiDevice *device, + NvU32 handle) +{ + nvkms_sema_down(device->pSema); + nvFreeUnixRmHandle(&device->handleAllocator, handle); + nvkms_sema_up(device->pSema); +} + +NvBool nvKmsKapiAllocateVideoMemory(struct NvKmsKapiDevice *device, + NvU32 hRmHandle, + enum NvKmsSurfaceMemoryLayout layout, + NvU64 size, + enum NvKmsKapiAllocationType type, + NvU8 *compressible); + +NvBool nvKmsKapiAllocateSystemMemory(struct NvKmsKapiDevice *device, + NvU32 hRmHandle, + enum NvKmsSurfaceMemoryLayout layout, + NvU64 size, + enum NvKmsKapiAllocationType type, + NvU8 *compressible); + +struct NvKmsKapiChannelEvent* +nvKmsKapiAllocateChannelEvent(struct NvKmsKapiDevice *device, + NvKmsChannelEventProc *proc, + void *data, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize); + +void +nvKmsKapiFreeChannelEvent(struct NvKmsKapiDevice *device, + struct NvKmsKapiChannelEvent *cb); + +struct NvKmsKapiSemaphoreSurface* +nvKmsKapiImportSemaphoreSurface(struct NvKmsKapiDevice *device, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize, + void **pSemaphoreMap, + void **pMaxSubmittedMap); + +void +nvKmsKapiFreeSemaphoreSurface(struct NvKmsKapiDevice *device, + struct NvKmsKapiSemaphoreSurface *ss); + +NvKmsKapiRegisterWaiterResult +nvKmsKapiRegisterSemaphoreSurfaceCallback( + struct NvKmsKapiDevice *device, + struct NvKmsKapiSemaphoreSurface *semaphoreSurface, + NvKmsSemaphoreSurfaceCallbackProc *pCallback, + void *pData, + NvU64 index, + NvU64 wait_value, + NvU64 new_value, + struct NvKmsKapiSemaphoreSurfaceCallback **pCallbackHandle); + +NvBool +nvKmsKapiUnregisterSemaphoreSurfaceCallback( + struct NvKmsKapiDevice *device, + struct NvKmsKapiSemaphoreSurface *semaphoreSurface, + NvU64 index, + NvU64 wait_value, + struct NvKmsKapiSemaphoreSurfaceCallback *callbackHandle); + +NvBool +nvKmsKapiSetSemaphoreSurfaceValue( + struct NvKmsKapiDevice *device, + struct NvKmsKapiSemaphoreSurface *semaphoreSurface, + NvU64 index, + NvU64 new_value); + +#endif /* __NVKMS_KAPI_INTERNAL_H__ */ diff --git a/src/nvidia-modeset/kapi/include/nvkms-kapi-notifiers.h b/src/nvidia-modeset/kapi/include/nvkms-kapi-notifiers.h new file mode 100644 index 0000000..029087c --- /dev/null +++ b/src/nvidia-modeset/kapi/include/nvkms-kapi-notifiers.h @@ -0,0 +1,99 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_KAPI_NOTIFIERS_H__ + +#define __NVKMS_KAPI_NOTIFIERS_H__ + +#include "nvkms-kapi-internal.h" + +#define NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER 0x2 +#define NVKMS_KAPI_NOTIFIER_SIZE 0x10 + +static inline NvU32 NVKMS_KAPI_INC_NOTIFIER_INDEX(const NvU32 index) +{ + return (index + 1) % NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER; +} + +static inline NvU32 NVKMS_KAPI_DEC_NOTIFIER_INDEX(const NvU32 index) +{ + if (index == 0) { + /* + * Wrap "backwards" to the largest allowed notifier index. + */ + return NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER - 1; + } + + return index - 1; +} + +static inline NvU32 NVKMS_KAPI_NOTIFIER_INDEX(NvU32 head, NvU32 layer, + NvU32 index) +{ + NvU64 notifierIndex = 0; + + notifierIndex = head * + NVKMS_MAX_LAYERS_PER_HEAD * + NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER; + + notifierIndex += layer * + NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER; + + notifierIndex += index; + + return notifierIndex; +} + +static inline NvU32 NVKMS_KAPI_NOTIFIER_OFFSET(NvU32 head, + NvU32 layer, NvU32 index) +{ + return NVKMS_KAPI_NOTIFIER_INDEX(head, layer, index) * + NVKMS_KAPI_NOTIFIER_SIZE; +} + +NvBool nvKmsKapiAllocateNotifiers(struct NvKmsKapiDevice *device, NvBool inVideoMemory); +NvBool nvKmsKapiAllocateSemaphores(struct NvKmsKapiDevice *device, NvBool inVideoMemory); + +void nvKmsKapiFreeNisoSurface(struct NvKmsKapiDevice *device, + struct NvKmsKapiNisoSurface *surf); + +NvBool nvKmsKapiIsNotifierFinish(const struct NvKmsKapiDevice *device, + const NvU32 head, const NvU32 layer, + const NvU32 index); + +void nvKmsKapiNotifierSetNotBegun(struct NvKmsKapiDevice *device, + NvU32 head, NvU32 layer, NvU32 index); + +NvBool nvKmsKapiTryInitDisplaySemaphore(struct NvKmsKapiDevice *device, + NvU32 index); + +void nvKmsKapiSignalDisplaySemaphore(struct NvKmsKapiDevice *device, + NvU32 index); + +void nvKmsKapiCancelDisplaySemaphore(struct NvKmsKapiDevice *device, + NvU32 index); + +NvU32 nvKmsKapiGetDisplaySemaphoreOffset(struct NvKmsKapiDevice *device, + NvU32 index); + +#endif /* __NVKMS_KAPI_NOTIFIERS_H__ */ diff --git a/src/nvidia-modeset/kapi/interface/nvkms-kapi-private.h b/src/nvidia-modeset/kapi/interface/nvkms-kapi-private.h new file mode 100644 index 0000000..cc0c4e3 --- /dev/null +++ b/src/nvidia-modeset/kapi/interface/nvkms-kapi-private.h @@ -0,0 +1,67 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(__NVKMS_KAPI_PRIVATE_H__) +#define __NVKMS_KAPI_PRIVATE_H__ + +#include "nvtypes.h" +#include "nvkms-api.h" + +#define NVKMS_KAPI_MAX_EVENT_CHANNELS 3 + +struct NvKmsKapiPrivAllocateChannelEventParams { + NvU32 hClient; + NvU32 hChannels[NVKMS_KAPI_MAX_EVENT_CHANNELS]; +}; + +struct NvKmsKapiPrivSurfaceParams { + enum NvKmsSurfaceMemoryLayout layout; + + struct { + struct { + NvU32 x; + NvU32 y; + NvU32 z; + } log2GobsPerBlock; + + NvU32 pitchInBlocks; + NvBool genericMemory; + } blockLinear; +}; + +struct NvKmsKapiPrivImportMemoryParams { + int memFd; + struct NvKmsKapiPrivSurfaceParams surfaceParams; +}; + +struct NvKmsKapiPrivExportMemoryParams { + int memFd; +}; + +struct NvKmsKapiPrivImportSemaphoreSurfaceParams { + NvHandle hClient; + NvHandle hSemaphoreSurface; + NvU64 semaphoreSurfaceSize; +}; + +#endif /* !defined(__NVKMS_KAPI_PRIVATE_H__) */ diff --git a/src/nvidia-modeset/kapi/interface/nvkms-kapi.h b/src/nvidia-modeset/kapi/interface/nvkms-kapi.h new file mode 100644 index 0000000..5f229ff --- /dev/null +++ b/src/nvidia-modeset/kapi/interface/nvkms-kapi.h @@ -0,0 +1,1624 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(__NVKMS_KAPI_H__) + +#include "nvtypes.h" +#include "nv_mig_types.h" + +#include "nv-gpu-info.h" +#include "nv_dpy_id.h" +#include "nvkms-api-types.h" +#include "nvkms-format.h" + +#define __NVKMS_KAPI_H__ + +#define NVKMS_KAPI_MAX_HEADS 4 + +#define NVKMS_KAPI_MAX_CONNECTORS 16 +#define NVKMS_KAPI_MAX_CLONE_DISPLAYS 16 + +#define NVKMS_KAPI_EDID_BUFFER_SIZE 2048 + +#define NVKMS_KAPI_MODE_NAME_LEN 32 + +/** + * \defgroup Objects + * @{ + */ + +struct NvKmsKapiDevice; +struct NvKmsKapiMemory; +struct NvKmsKapiSurface; +struct NvKmsKapiChannelEvent; +struct NvKmsKapiSemaphoreSurface; +struct NvKmsKapiSemaphoreSurfaceCallback; + +typedef NvU32 NvKmsKapiConnector; +typedef NvU32 NvKmsKapiDisplay; + +/** @} */ + +/** + * \defgroup FuncPtrs + * @{ + */ + +/* + * Note: The channel event proc should not call back into NVKMS-KAPI driver. + * The callback into NVKMS-KAPI from the channel event proc, may cause + * deadlock. + */ +typedef void NvKmsChannelEventProc(void *dataPtr, NvU32 dataU32); + +/* + * Note: Same as above, this function must not call back into NVKMS-KAPI, nor + * directly into RM. Doing so could cause deadlocks given the notification + * function will most likely be called from within RM's interrupt handler + * callchain. + */ +typedef void NvKmsSemaphoreSurfaceCallbackProc(void *pData); + +/** @} */ + +/** + * \defgroup Structs + * @{ + */ + +struct NvKmsKapiDisplayModeTimings { + + NvU32 refreshRate; + NvU32 pixelClockHz; + NvU32 hVisible; + NvU32 hSyncStart; + NvU32 hSyncEnd; + NvU32 hTotal; + NvU32 hSkew; + NvU32 vVisible; + NvU32 vSyncStart; + NvU32 vSyncEnd; + NvU32 vTotal; + + struct { + + NvU32 interlaced : 1; + NvU32 doubleScan : 1; + NvU32 hSyncPos : 1; + NvU32 hSyncNeg : 1; + NvU32 vSyncPos : 1; + NvU32 vSyncNeg : 1; + + } flags; + + NvU32 widthMM; + NvU32 heightMM; + +}; + +struct NvKmsKapiDisplayMode { + struct NvKmsKapiDisplayModeTimings timings; + char name[NVKMS_KAPI_MODE_NAME_LEN]; +}; + +#define NVKMS_KAPI_LAYER_MAX 8 + +#define NVKMS_KAPI_LAYER_INVALID_IDX 0xff +#define NVKMS_KAPI_LAYER_PRIMARY_IDX 0 + +struct NvKmsKapiLutCaps { + struct { + struct NvKmsLUTCaps ilut; + struct NvKmsLUTCaps tmo; + } layer[NVKMS_KAPI_LAYER_MAX]; + struct NvKmsLUTCaps olut; +}; + +struct NvKmsKapiDeviceResourcesInfo { + + NvU32 numHeads; + NvU32 numLayers[NVKMS_KAPI_MAX_HEADS]; + + NvU32 numConnectors; + NvKmsKapiConnector connectorHandles[NVKMS_KAPI_MAX_CONNECTORS]; + + struct { + NvU32 validCursorCompositionModes; + NvU64 supportedCursorSurfaceMemoryFormats; + + struct { + NvU64 maxSubmittedOffset; + NvU64 stride; + } semsurf; + + struct { + NvU16 validRRTransforms; + NvU32 validCompositionModes; + } layer[NVKMS_KAPI_LAYER_MAX]; + + NvU32 minWidthInPixels; + NvU32 maxWidthInPixels; + + NvU32 minHeightInPixels; + NvU32 maxHeightInPixels; + + NvU32 maxCursorSizeInPixels; + + NvU32 pitchAlignment; + + NvU32 hasVideoMemory; + + NvU32 numDisplaySemaphores; + + NvU8 genericPageKind; + + NvBool supportsSyncpts; + + NvBool requiresVrrSemaphores; + + NvBool supportsInputColorRange; + NvBool supportsInputColorSpace; + } caps; + + NvU64 supportedSurfaceMemoryFormats[NVKMS_KAPI_LAYER_MAX]; + NvBool supportsICtCp[NVKMS_KAPI_LAYER_MAX]; + + struct NvKmsKapiLutCaps lutCaps; + + NvU64 vtFbBaseAddress; + NvU64 vtFbSize; +}; + +#define NVKMS_KAPI_LAYER_MASK(layerType) (1 << (layerType)) + +typedef enum NvKmsKapiMappingTypeRec { + NVKMS_KAPI_MAPPING_TYPE_USER = 1, + NVKMS_KAPI_MAPPING_TYPE_KERNEL = 2, +} NvKmsKapiMappingType; + +struct NvKmsKapiConnectorInfo { + + NvKmsKapiConnector handle; + + NvU32 physicalIndex; + + NvKmsConnectorSignalFormat signalFormat; + NvKmsConnectorType type; + + /* + * List of connectors, not possible to serve together with this connector + * because they are competing for same resources. + */ + NvU32 numIncompatibleConnectors; + NvKmsKapiConnector incompatibleConnectorHandles[NVKMS_KAPI_MAX_CONNECTORS]; + + NVDpyIdList dynamicDpyIdList; +}; + +struct NvKmsKapiStaticDisplayInfo { + + NvKmsKapiDisplay handle; + + NvKmsKapiConnector connectorHandle; + + /* Set for DisplayPort MST displays (dynamic displays) */ + char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]; + + NvBool internal; + + /* List of potential sibling display for cloning */ + NvU32 numPossibleClones; + NvKmsKapiDisplay possibleCloneHandles[NVKMS_KAPI_MAX_CLONE_DISPLAYS]; + + NvU32 headMask; + + NvBool isDpMST; +}; + +struct NvKmsKapiSyncParams { + union { + struct { + /*! + * Possible syncpt use case in kapi. + * For pre-syncpt, use only id and value + * and for post-syncpt, use only fd. + */ + NvU32 preSyncptId; + NvU32 preSyncptValue; + } syncpt; + + struct { + NvU32 index; + } semaphore; + } u; + + NvBool preSyncptSpecified; + NvBool postSyncptRequested; + NvBool semaphoreSpecified; +}; + +struct NvKmsKapiLayerConfig { + struct NvKmsKapiSurface *surface; + struct { + enum NvKmsCompositionBlendingMode compMode; + NvU8 surfaceAlpha; + } compParams; + struct NvKmsRRParams rrParams; + struct NvKmsKapiSyncParams syncParams; + + struct { + struct NvKmsHDRStaticMetadata val; + NvBool enabled; + } hdrMetadata; + + enum NvKmsInputTf inputTf; + enum NvKmsOutputTf outputTf; + + NvU8 minPresentInterval; + NvBool tearing; + + NvU16 srcX, srcY; + NvU16 srcWidth, srcHeight; + + NvS16 dstX, dstY; + NvU16 dstWidth, dstHeight; + + enum NvKmsInputColorSpace inputColorSpace; + enum NvKmsInputColorRange inputColorRange; + + struct { + NvBool enabled; + struct NvKmsKapiSurface *lutSurface; + NvU64 offset; + NvU32 vssSegments; + NvU32 lutEntries; + } ilut; + + struct { + NvBool enabled; + struct NvKmsKapiSurface *lutSurface; + NvU64 offset; + NvU32 vssSegments; + NvU32 lutEntries; + } tmo; + + struct NvKmsCscMatrix csc; + NvBool cscUseMain; + + struct { + struct NvKmsCscMatrix lmsCtm; + struct NvKmsCscMatrix lmsToItpCtm; + struct NvKmsCscMatrix itpToLmsCtm; + struct NvKmsCscMatrix blendCtm; + struct { + NvBool lmsCtm : 1; + NvBool lmsToItpCtm : 1; + NvBool itpToLmsCtm : 1; + NvBool blendCtm : 1; + } enabled; + } matrixOverrides; +}; + +struct NvKmsKapiLayerRequestedConfig { + struct NvKmsKapiLayerConfig config; + struct { + NvBool surfaceChanged : 1; + NvBool srcXYChanged : 1; + NvBool srcWHChanged : 1; + NvBool dstXYChanged : 1; + NvBool dstWHChanged : 1; + NvBool cscChanged : 1; + NvBool inputTfChanged : 1; + NvBool outputTfChanged : 1; + NvBool inputColorSpaceChanged : 1; + NvBool inputColorRangeChanged : 1; + NvBool hdrMetadataChanged : 1; + NvBool matrixOverridesChanged : 1; + NvBool ilutChanged : 1; + NvBool tmoChanged : 1; + } flags; +}; + +struct NvKmsKapiCursorRequestedConfig { + struct NvKmsKapiSurface *surface; + struct { + enum NvKmsCompositionBlendingMode compMode; + NvU8 surfaceAlpha; + } compParams; + + NvS16 dstX, dstY; + + struct { + NvBool surfaceChanged : 1; + NvBool dstXYChanged : 1; + } flags; +}; + +struct NvKmsKapiHeadModeSetConfig { + /* + * DRM distinguishes between the head state "enabled" (the specified + * configuration for the head is valid, its resources are allocated, + * etc, but the head may not necessarily be currently driving pixels + * to its output resource) and the head state "active" (the head is + * "enabled" _and_ the head is actively driving pixels to its output + * resource). + * + * This distinction is for DPMS: + * + * DPMS On : enabled=true, active=true + * DPMS Off : enabled=true, active=false + * + * "Enabled" state is indicated by numDisplays != 0. + * "Active" state is indicated by bActive == true. + */ + NvBool bActive; + + NvU32 numDisplays; + NvKmsKapiDisplay displays[NVKMS_KAPI_MAX_CLONE_DISPLAYS]; + + struct NvKmsKapiDisplayMode mode; + + NvBool vrrEnabled; + + struct { + NvBool enabled; + enum NvKmsInfoFrameEOTF eotf; + struct NvKmsHDRStaticMetadata staticMetadata; + } hdrInfoFrame; + + enum NvKmsOutputColorimetry colorimetry; + + struct { + struct { + NvU32 depth; + NvU32 start; + NvU32 end; + struct NvKmsLutRamps *pRamps; + } input; + + struct { + NvBool enabled; + struct NvKmsLutRamps *pRamps; + } output; + } lut; + + struct { + NvBool enabled; + struct NvKmsKapiSurface *lutSurface; + NvU64 offset; + NvU32 vssSegments; + NvU32 lutEntries; + } olut; + + NvU32 olutFpNormScale; +}; + +struct NvKmsKapiHeadRequestedConfig { + struct NvKmsKapiHeadModeSetConfig modeSetConfig; + struct { + NvBool activeChanged : 1; + NvBool displaysChanged : 1; + NvBool modeChanged : 1; + NvBool hdrInfoFrameChanged : 1; + NvBool colorimetryChanged : 1; + NvBool legacyIlutChanged : 1; + NvBool legacyOlutChanged : 1; + NvBool olutChanged : 1; + NvBool olutFpNormScaleChanged : 1; + } flags; + + struct NvKmsKapiCursorRequestedConfig cursorRequestedConfig; + + struct NvKmsKapiLayerRequestedConfig + layerRequestedConfig[NVKMS_KAPI_LAYER_MAX]; +}; + +struct NvKmsKapiRequestedModeSetConfig { + NvU32 headsMask; + struct NvKmsKapiHeadRequestedConfig + headRequestedConfig[NVKMS_KAPI_MAX_HEADS]; +}; + +struct NvKmsKapiLayerReplyConfig { + int postSyncptFd; +}; + +struct NvKmsKapiHeadReplyConfig { + struct NvKmsKapiLayerReplyConfig + layerReplyConfig[NVKMS_KAPI_LAYER_MAX]; +}; + +struct NvKmsKapiModeSetReplyConfig { + enum NvKmsFlipResult flipResult; + NvBool vrrFlip; + NvS32 vrrSemaphoreIndex; + struct NvKmsKapiHeadReplyConfig + headReplyConfig[NVKMS_KAPI_MAX_HEADS]; +}; + +struct NvKmsKapiEventDisplayChanged { + NvKmsKapiDisplay display; +}; + +struct NvKmsKapiEventDynamicDisplayConnected { + NvKmsKapiDisplay display; +}; + +struct NvKmsKapiEventFlipOccurred { + NvU32 head; + NvU32 layer; +}; + +struct NvKmsKapiDpyCRC32 { + NvU32 value; + NvBool supported; +}; + +struct NvKmsKapiCrcs { + struct NvKmsKapiDpyCRC32 compositorCrc32; + struct NvKmsKapiDpyCRC32 rasterGeneratorCrc32; + struct NvKmsKapiDpyCRC32 outputCrc32; +}; + +struct NvKmsKapiEvent { + enum NvKmsEventType type; + + struct NvKmsKapiDevice *device; + + void *privateData; + + union { + struct NvKmsKapiEventDisplayChanged displayChanged; + struct NvKmsKapiEventDynamicDisplayConnected dynamicDisplayConnected; + struct NvKmsKapiEventFlipOccurred flipOccurred; + } u; +}; + +struct NvKmsKapiAllocateDeviceParams { + /* [IN] GPU ID obtained from enumerateGpus() */ + NvU32 gpuId; + /* [IN] MIG device if requested */ + MIGDeviceId migDevice; + + /* [IN] Private data of device allocator */ + void *privateData; + /* [IN] Event callback */ + void (*eventCallback)(const struct NvKmsKapiEvent *event); +}; + +struct NvKmsKapiDynamicDisplayParams { + /* [IN] Display Handle returned by getDisplays() */ + NvKmsKapiDisplay handle; + + /* [OUT] Connection status */ + NvU32 connected; + + /* [OUT] VRR status */ + NvBool vrrSupported; + + /* [IN/OUT] EDID of connected monitor/ Input to override EDID */ + struct { + NvU16 bufferSize; + NvU8 buffer[NVKMS_KAPI_EDID_BUFFER_SIZE]; + } edid; + + /* [IN] Set true to override EDID */ + NvBool overrideEdid; + + /* [IN] Set true to force connected status */ + NvBool forceConnected; + + /* [IN] Set true to force disconnect status */ + NvBool forceDisconnected; +}; + +struct NvKmsKapiCreateSurfaceParams { + + /* [IN] Parameter of each plane */ + struct { + /* [IN] Memory allocated for plane, using allocateMemory() */ + struct NvKmsKapiMemory *memory; + /* [IN] Offsets within the memory object */ + NvU32 offset; + /* [IN] Byte pitch of plane */ + NvU32 pitch; + } planes[NVKMS_MAX_PLANES_PER_SURFACE]; + + /* [IN] Width of the surface, in pixels */ + NvU32 width; + /* [IN] Height of the surface, in pixels */ + NvU32 height; + + /* [IN] The format describing number of planes and their content */ + enum NvKmsSurfaceMemoryFormat format; + + /* [IN] Whether to override the surface objects memory layout parameters + * with those provided here. */ + NvBool explicit_layout; + /* [IN] Whether the surface layout is block-linear or pitch. Used only + * if explicit_layout is NV_TRUE */ + enum NvKmsSurfaceMemoryLayout layout; + /* [IN] block-linear block height of surface. Used only when + * explicit_layout is NV_TRUE and layout is + * NvKmsSurfaceMemoryLayoutBlockLinear */ + NvU8 log2GobsPerBlockY; +}; + +enum NvKmsKapiAllocationType { + NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT = 0, + NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER = 1, + NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN = 2, +}; + +struct NvKmsKapiAllocateMemoryParams { + /* [IN] BlockLinear or Pitch */ + enum NvKmsSurfaceMemoryLayout layout; + + /* [IN] Allocation type */ + enum NvKmsKapiAllocationType type; + + /* [IN] Size, in bytes, of the memory to allocate */ + NvU64 size; + + /* [IN] Whether memory can be updated directly on the screen */ + NvBool noDisplayCaching; + + /* [IN] Whether to allocate memory from video memory or system memory */ + NvBool useVideoMemory; + + /* [IN/OUT] For input, non-zero if compression backing store should be + * allocated for the memory, for output, non-zero if compression backing + * store was allocated for the memory */ + NvU8 *compressible; +}; + +typedef enum NvKmsKapiRegisterWaiterResultRec { + NVKMS_KAPI_REG_WAITER_FAILED, + NVKMS_KAPI_REG_WAITER_SUCCESS, + NVKMS_KAPI_REG_WAITER_ALREADY_SIGNALLED, +} NvKmsKapiRegisterWaiterResult; + +typedef void NvKmsKapiSuspendResumeCallbackFunc(NvBool suspend); + +struct NvKmsKapiGpuInfo { + nv_gpu_info_t gpuInfo; + MIGDeviceId migDevice; +}; + +struct NvKmsKapiFunctionsTable { + + /*! + * NVIDIA Driver version string. + */ + const char *versionString; + + /*! + * System Information. + */ + struct { + /* Availability of write combining support for video memory */ + NvBool bAllowWriteCombining; + } systemInfo; + + /*! + * Enumerate the available GPUs that can be used with NVKMS. + * + * The gpuCallback will be called with a NvKmsKapiGpuInfo for each + * physical and MIG GPU currently available in the system. + * + * \param [in] gpuCallback Client function to handle each GPU. + * + * \return Count of enumerated gpus. + */ + NvU32 (*enumerateGpus) + ( + void (*gpuCallback)(const struct NvKmsKapiGpuInfo *info) + ); + + /*! + * Allocate an NVK device using which you can query/allocate resources on + * GPU and do modeset. + * + * \param [in] params Parameters required for device allocation. + * + * \return An valid device handle on success, NULL on failure. + */ + struct NvKmsKapiDevice* (*allocateDevice) + ( + const struct NvKmsKapiAllocateDeviceParams *params + ); + + /*! + * Frees a device allocated by allocateDevice() and all its resources. + * + * \param [in] device A device returned by allocateDevice(). + * This function is a no-op if device is not valid. + */ + void (*freeDevice)(struct NvKmsKapiDevice *device); + + /*! + * Grab ownership of device, ownership is required to do modeset. + * + * \param [in] device A device returned by allocateDevice(). + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*grabOwnership)(struct NvKmsKapiDevice *device); + + /*! + * Release ownership of device. + * + * \param [in] device A device returned by allocateDevice(). + */ + void (*releaseOwnership)(struct NvKmsKapiDevice *device); + + /*! + * Grant modeset permissions for a display to fd. Only one (dispIndex, head, + * display) is currently supported. + * + * \param [in] fd fd from opening /dev/nvidia-modeset. + * + * \param [in] device A device returned by allocateDevice(). + * + * \param [in] head head of display. + * + * \param [in] display The display to grant. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*grantPermissions) + ( + NvS32 fd, + struct NvKmsKapiDevice *device, + NvU32 head, + NvKmsKapiDisplay display + ); + + /*! + * Revoke modeset permissions previously granted. Only one (dispIndex, + * head, display) is currently supported. + * + * \param [in] device A device returned by allocateDevice(). + * + * \param [in] head head of display. + * + * \param [in] display The display to revoke. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*revokePermissions) + ( + struct NvKmsKapiDevice *device, + NvU32 head, + NvKmsKapiDisplay display + ); + + /*! + * Grant modeset sub-owner permissions to fd. This is used by clients to + * convert drm 'master' permissions into nvkms sub-owner permission. + * + * \param [in] fd fd from opening /dev/nvidia-modeset. + * + * \param [in] device A device returned by allocateDevice(). + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*grantSubOwnership) + ( + NvS32 fd, + struct NvKmsKapiDevice *device + ); + + /*! + * Revoke sub-owner permissions previously granted. + * + * \param [in] device A device returned by allocateDevice(). + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*revokeSubOwnership) + ( + struct NvKmsKapiDevice *device + ); + + /*! + * Registers for notification, via + * NvKmsKapiAllocateDeviceParams::eventCallback, of the events specified + * in interestMask. + * + * This call does nothing if eventCallback is NULL when NvKmsKapiDevice + * is allocated. + * + * Supported events are DPY_CHANGED and DYNAMIC_DPY_CONNECTED. + * + * \param [in] device A device returned by allocateDevice(). + * + * \param [in] interestMask A mask of events requested to listen. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*declareEventInterest) + ( + const struct NvKmsKapiDevice *device, + const NvU32 interestMask + ); + + /*! + * Retrieve various static resources like connector, head etc. present on + * device and capacities. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in/out] info A pointer to an NvKmsKapiDeviceResourcesInfo + * struct that the call will fill out with number + * of resources and their handles. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getDeviceResourcesInfo) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiDeviceResourcesInfo *info + ); + + /*! + * Retrieve the number of displays on a device and an array of handles to + * those displays. + * + * \param [in] device A device allocated using + * allocateDevice(). + * + * \param [in/out] displayCount The caller should set this to the size + * of the displayHandles array it passed + * in. The function will set it to the + * number of displays returned, or the + * total number of displays on the device + * if displayHandles is NULL or array size + * of less than number of number of displays. + * + * \param [out] displayHandles An array of display handles with + * displayCount entries. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getDisplays) + ( + struct NvKmsKapiDevice *device, + NvU32 *numDisplays, NvKmsKapiDisplay *displayHandles + ); + + /*! + * Retrieve information about a specified connector. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] connector Which connector to query, handle return by + * getDeviceResourcesInfo(). + * + * \param [out] info A pointer to an NvKmsKapiConnectorInfo struct + * that the call will fill out with information + * about connector. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getConnectorInfo) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiConnector connector, struct NvKmsKapiConnectorInfo *info + ); + + /*! + * Retrieve information about a specified display. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] display Which connector to query, handle return by + * getDisplays(). + * + * \param [out] info A pointer to an NvKmsKapiStaticDisplayInfo struct + * that the call will fill out with information + * about display. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getStaticDisplayInfo) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, struct NvKmsKapiStaticDisplayInfo *info + ); + + /*! + * Detect/force connection status/EDID of display. + * + * \param [in/out] params Parameters containing display + * handle, EDID and flags to force connection + * status. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getDynamicDisplayInfo) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiDynamicDisplayParams *params + ); + + /*! + * Allocate some unformatted video or system memory of the specified size. + * + * This function allocates video or system memory on the specified GPU. It + * should be suitable for mapping on the CPU as a pitch linear or + * block-linear surface. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in/out] params Parameters required for memory allocation. + * + * \return An valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* (*allocateMemory) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiAllocateMemoryParams *params + ); + + /*! + * Import some unformatted memory of the specified size. + * + * This function accepts a driver-specific parameter structure representing + * memory allocated elsewhere and imports it to a NVKMS KAPI memory object + * of the specified size. + * + * \param [in] device A device allocated using allocateDevice(). The + * memory being imported must have been allocated + * against the same physical device this device object + * represents. + * + * \param [in] size Size, in bytes, of the memory being imported. + * + * \param [in] nvKmsParamsUser Userspace pointer to driver-specific + * parameters describing the memory object being + * imported. + * + * \param [in] nvKmsParamsSize Size of the driver-specific parameter struct. + * + * \return A valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* (*importMemory) + ( + struct NvKmsKapiDevice *device, NvU64 size, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize + ); + + /*! + * Duplicate an existing NVKMS KAPI memory object, taking a reference on the + * underlying memory. + * + * \param [in] device A device allocated using allocateDevice(). The + * memory being imported need not have been allocated + * against the same physical device this device object + * represents. + * + * \param [in] srcDevice The device associated with srcMemory. + * + * \param [in] srcMemory The memory object to duplicate. + * + * \return A valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* (*dupMemory) + ( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiDevice *srcDevice, + const struct NvKmsKapiMemory *srcMemory + ); + + /*! + * Export the specified memory object to a userspace object handle. + * + * This function accepts a driver-specific parameter structure representing + * a new handle to be assigned to an existing NVKMS KAPI memory object. + * + * \param [in] device A device allocated using allocateDevice(). The + * memory being exported must have been created against + * or imported to the same device object, and the + * destination object handle must be valid for this + * device as well. + * + * \param [in] memory The memory object to export. + * + * \param [in] nvKmsParamsUser Userspace pointer to driver-specific + * parameters specifying a handle to add to the + * memory object being exported. + * + * \param [in] nvKmsParamsSize Size of the driver-specific parameter struct. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*exportMemory) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize + ); + + /*! + * Free memory allocated using allocateMemory() + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory Memory allocated using allocateMemory(). + * + * \return NV_TRUE on success, NV_FALSE if memory is in use. + */ + void (*freeMemory) + ( + struct NvKmsKapiDevice *device, struct NvKmsKapiMemory *memory + ); + + /*! + * Create MMIO mappings for a memory object allocated using + * allocateMemory(). + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory Memory allocated using allocateMemory() + * + * \param [in] type Userspace or kernelspace mapping + * + * \param [out] ppLinearAddress The MMIO address where memory object is + * mapped. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*mapMemory) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, NvKmsKapiMappingType type, + void **ppLinearAddress + ); + + /*! + * Destroy MMIO mappings created for a memory object allocated using + * allocateMemory(). + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory Memory allocated using allocateMemory() + * + * \param [in] type Userspace or kernelspace mapping + * + * \param [in] pLinearAddress The MMIO address return by mapMemory() + */ + void (*unmapMemory) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, NvKmsKapiMappingType type, + const void *pLinearAddress + ); + + /*! + * Check if memory object allocated is video memory. + * + * \param [in] memory Memory allocated using allocateMemory() + * + * \return NV_TRUE if memory is vidmem, NV_FALSE otherwise. + */ + NvBool (*isVidmem)( + const struct NvKmsKapiMemory *memory + ); + + /*! + * Create a formatted surface from an NvKmsKapiMemory object. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] params Parameters to the surface creation. + * + * \return An valid surface handle on success. NULL on failure. + */ + struct NvKmsKapiSurface* (*createSurface) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiCreateSurfaceParams *params + ); + + /*! + * Destroy a surface created by createSurface(). + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] surface A surface created using createSurface() + */ + void (*destroySurface) + ( + struct NvKmsKapiDevice *device, struct NvKmsKapiSurface *surface + ); + + /*! + * Enumerate the mode timings available on a given display. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] display A display handle returned by getDisplays(). + * + * \param [in] modeIndex A mode index (Any integer >= 0). + * + * \param [out] mode A pointer to an NvKmsKapiDisplayMode struct that + * the call will fill out with mode-timings of mode + * at index modeIndex. + * + * \param [out] valid Returns TRUE in this param if mode-timings of + * mode at index modeIndex are valid on display. + * + * \param [out] preferredMode Returns TRUE if this mode is marked as + * "preferred" by the EDID. + * + * \return Value >= 1 if more modes are available, 0 if no more modes are + * available, and Value < 0 on failure. + */ + int (*getDisplayMode) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, NvU32 modeIndex, + struct NvKmsKapiDisplayMode *mode, NvBool *valid, + NvBool *preferredMode + ); + + /*! + * Validate given mode timings available on a given display. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] display A display handle returned by getDisplays(). + * + * \param [in] mode A pointer to an NvKmsKapiDisplayMode struct that + * filled with mode-timings to validate. + * + * \return NV_TRUE if mode-timings are valid, NV_FALSE on failure. + */ + NvBool (*validateDisplayMode) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, const struct NvKmsKapiDisplayMode *mode + ); + + /*! + * Apply a mode configuration to the device. + * + * Client can describe damaged part of configuration but still it is must + * to describe entire configuration. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] requestedConfig Parameters describing a device-wide + * display configuration. + * + * \param [in] commit If set to 0 them call will only validate + * mode configuration, will not apply it. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*applyModeSetConfig) + ( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, + struct NvKmsKapiModeSetReplyConfig *replyConfig, + const NvBool commit + ); + + /*! + * Return status of flip. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] head A head returned by getDeviceResourcesInfo(). + * + * \param [in] layer A layer index. + * + * \param [out] pending Return TRUE if head has pending flip for + * given layer. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getFlipPendingStatus) + ( + const struct NvKmsKapiDevice *device, + const NvU32 head, + const NvU32 layer, + NvBool *pending + ); + + /*! + * Allocate an event callback. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] proc Function pointer to call when triggered. + * + * \param [in] data Argument to pass into function. + * + * \param [in] nvKmsParamsUser Userspace pointer to driver-specific + * parameters describing the event callback + * being created. + * + * \param [in] nvKmsParamsSize Size of the driver-specific parameter struct. + * + * \return struct NvKmsKapiChannelEvent* on success, NULL on failure. + */ + struct NvKmsKapiChannelEvent* (*allocateChannelEvent) + ( + struct NvKmsKapiDevice *device, + NvKmsChannelEventProc *proc, + void *data, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize + ); + + /*! + * Free an event callback. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] cb struct NvKmsKapiChannelEvent* returned from + * allocateChannelEvent() + */ + void (*freeChannelEvent) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiChannelEvent *cb + ); + + /*! + * Get 32-bit CRC value for the last contents presented on the specified + * head. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] head A head returned by getDeviceResourcesInfo(). + * + * \param [out] crc32 The CRC32 generated from the content currently + * presented onto the given head + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getCRC32) + ( + struct NvKmsKapiDevice *device, + NvU32 head, + struct NvKmsKapiCrcs *crc32 + ); + + /*! + * Get the list allocation pages corresponding to the specified memory object. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory The memory object for which we need to find the + * list of allocation pages and number of pages. + * + * \param [out] pPages A pointer to the list of NvU64 pointers. Caller + * should free pPages on success using freeMemoryPages(). + * + * \param [out] pNumPages It gives the total number of NvU64 pointers + * returned in pPages. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getMemoryPages) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, + NvU64 **pPages, + NvU32 *pNumPages + ); + + /*! + * Free the list of allocation pages returned by getMemoryPages() + * + * \param [in] pPages A list of NvU64 pointers allocated by getMemoryPages(). + * + */ + void (*freeMemoryPages) + ( + NvU64 *pPages + ); + + /* + * Import SGT as a memory handle. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] sgt SGT pointer. + * \param [in] gem GEM pointer that pinned SGT, to be refcounted. + * + * \param [in] limit Size, in bytes, of the memory backed by the SGT. + * + * \return A valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* + (*getSystemMemoryHandleFromSgt)(struct NvKmsKapiDevice *device, + NvP64 sgt, + NvP64 gem, + NvU32 limit); + + /* + * Import dma-buf in the memory handle. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] dmaBuf DMA-BUF pointer. + * + * \param [in] limit Size, in bytes, of the dma-buf. + * + * \return An valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* + (*getSystemMemoryHandleFromDmaBuf)(struct NvKmsKapiDevice *device, + NvP64 dmaBuf, + NvU32 limit); + + /*! + * Import a semaphore surface allocated elsewhere to NVKMS and return a + * handle to the new object. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] nvKmsParamsUser Userspace pointer to driver-specific + * parameters describing the semaphore + * surface being imported. + * + * \param [in] nvKmsParamsSize Size of the driver-specific parameter + * struct. + * + * \param [out] pSemaphoreMap Returns a CPU mapping of the semaphore + * surface's semaphore memory to the client. + * + * \param [out] pMaxSubmittedMap Returns a CPU mapping of the semaphore + * surface's semaphore memory to the client. + * + * \return struct NvKmsKapiSemaphoreSurface* on success, NULL on failure. + */ + struct NvKmsKapiSemaphoreSurface* (*importSemaphoreSurface) + ( + struct NvKmsKapiDevice *device, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize, + void **pSemaphoreMap, + void **pMaxSubmittedMap + ); + + /*! + * Free an imported semaphore surface. + * + * \param [in] device The device passed to + * importSemaphoreSurface() when creating + * semaphoreSurface. + * + * \param [in] semaphoreSurface A semaphore surface returned by + * importSemaphoreSurface(). + */ + void (*freeSemaphoreSurface) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiSemaphoreSurface *semaphoreSurface + ); + + /*! + * Register a callback to be called when a semaphore reaches a value. + * + * The callback will be called when the semaphore at index in + * semaphoreSurface reaches the value wait_value. The callback will + * be called at most once and is automatically unregistered when called. + * It may also be unregistered (i.e., cancelled) explicitly using the + * unregisterSemaphoreSurfaceCallback() function. To avoid leaking the + * memory used to track the registered callback, callers must ensure one + * of these methods of unregistration is used for every successful + * callback registration that returns a non-NULL pCallbackHandle. + * + * \param [in] device The device passed to + * importSemaphoreSurface() when creating + * semaphoreSurface. + * + * \param [in] semaphoreSurface A semaphore surface returned by + * importSemaphoreSurface(). + * + * \param [in] pCallback A pointer to the function to call when + * the specified value is reached. NULL + * means no callback. + * + * \param [in] pData Arbitrary data to be passed back to the + * callback as its sole parameter. + * + * \param [in] index The index of the semaphore within + * semaphoreSurface. + * + * \param [in] wait_value The value the semaphore must reach or + * exceed before the callback is called. + * + * \param [in] new_value The value the semaphore will be set to + * when it reaches or exceeds . + * 0 means do not update the value. + * + * \param [out] pCallbackHandle On success, the value pointed to will + * contain an opaque handle to the + * registered callback that may be used to + * cancel it if needed. Unused if pCallback + * is NULL. + * + * \return NVKMS_KAPI_REG_WAITER_SUCCESS if the waiter was registered or if + * no callback was requested and the semaphore at has + * already reached or exceeded + * + * NVKMS_KAPI_REG_WAITER_ALREADY_SIGNALLED if a callback was + * requested and the semaphore at has already reached or + * exceeded + * + * NVKMS_KAPI_REG_WAITER_FAILED if waiter registration failed. + */ + NvKmsKapiRegisterWaiterResult + (*registerSemaphoreSurfaceCallback) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiSemaphoreSurface *semaphoreSurface, + NvKmsSemaphoreSurfaceCallbackProc *pCallback, + void *pData, + NvU64 index, + NvU64 wait_value, + NvU64 new_value, + struct NvKmsKapiSemaphoreSurfaceCallback **pCallbackHandle + ); + + /*! + * Unregister a callback registered via registerSemaphoreSurfaceCallback() + * + * If the callback has not yet been called, this function will cancel the + * callback and free its associated resources. + * + * Note this function treats the callback handle as a pointer. While this + * function does not dereference that pointer itself, the underlying call + * to RM does within a properly guarded critical section that first ensures + * it is not in the process of being used within a callback. This means + * the callstack must take into consideration that pointers are not in + * general unique handles if they may have been freed, since a subsequent + * malloc could return the same pointer value at that point. This callchain + * avoids that by leveraging the behavior of the underlying RM APIs: + * + * 1) A callback handle is referenced relative to its corresponding + * (semaphore surface, index, wait_value) tuple here and within RM. It + * is not a valid handle outside of that scope. + * + * 2) A callback can not be registered against an already-reached value + * for a given semaphore surface index. + * + * 3) A given callback handle can not be registered twice against the same + * (semaphore surface, index, wait_value) tuple, so unregistration will + * never race with registration at the RM level, and would only race at + * a higher level if used incorrectly. Since this is kernel code, we + * can safely assume there won't be malicious clients purposely misuing + * the API, but the burden is placed on the caller to ensure its usage + * does not lead to races at higher levels. + * + * These factors considered together ensure any valid registered handle is + * either still in the relevant waiter list and refers to the same event/ + * callback as when it was registered, or has been removed from the list + * as part of a critical section that also destroys the list itself and + * makes future lookups in that list impossible, and hence eliminates the + * chance of comparing a stale handle with a new handle of the same value + * as part of a lookup. + * + * \param [in] device The device passed to + * importSemaphoreSurface() when creating + * semaphoreSurface. + * + * \param [in] semaphoreSurface The semaphore surface passed to + * registerSemaphoreSurfaceCallback() when + * registering the callback. + * + * \param [in] index The index passed to + * registerSemaphoreSurfaceCallback() when + * registering the callback. + * + * \param [in] wait_value The wait_value passed to + * registerSemaphoreSurfaceCallback() when + * registering the callback. + * + * \param [in] callbackHandle The callback handle returned by + * registerSemaphoreSurfaceCallback(). + */ + NvBool + (*unregisterSemaphoreSurfaceCallback) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiSemaphoreSurface *semaphoreSurface, + NvU64 index, + NvU64 wait_value, + struct NvKmsKapiSemaphoreSurfaceCallback *callbackHandle + ); + + /*! + * Update the value of a semaphore surface from the CPU. + * + * Update the semaphore value at the specified index from the CPU, then + * wake up any pending CPU waiters associated with that index that are + * waiting on it reaching a value <= the new value. + */ + NvBool + (*setSemaphoreSurfaceValue) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiSemaphoreSurface *semaphoreSurface, + NvU64 index, + NvU64 new_value + ); + + /*! + * Set the callback function for suspending and resuming the display system. + */ + void + (*setSuspendResumeCallback) + ( + NvKmsKapiSuspendResumeCallbackFunc *function + ); + + /*! + * Immediately initialize the specified display semaphore to the pending state. + * + * Must be called prior to applying a mode set that utilizes the specified + * display semaphore for synchronization. + * + * \param [in] device The device which will utilize the semaphore. + * + * \param [in] semaphoreIndex Index of the desired semaphore within the + * NVKMS semaphore pool. Must be less than + * NvKmsKapiDeviceResourcesInfo::caps::numDisplaySemaphores + * for the specified device. + */ + NvBool + (*tryInitDisplaySemaphore) + ( + struct NvKmsKapiDevice *device, + NvU32 semaphoreIndex + ); + + /*! + * Immediately set the specified display semaphore to the displayable state. + * + * Must be called after \ref tryInitDisplaySemaphore to indicate a mode + * configuration change that utilizes the specified display semaphore for + * synchronization may proceed. + * + * \param [in] device The device which will utilize the semaphore. + * + * \param [in] semaphoreIndex Index of the desired semaphore within the + * NVKMS semaphore pool. Must be less than + * NvKmsKapiDeviceResourcesInfo::caps::numDisplaySemaphores + * for the specified device. + */ + void + (*signalDisplaySemaphore) + ( + struct NvKmsKapiDevice *device, + NvU32 semaphoreIndex + ); + + /*! + * Immediately cancel use of a display semaphore by resetting its value to + * its initial state. + * + * This can be used by clients to restore a semaphore to a consistent state + * when they have prepared it for use by previously calling + * \ref tryInitDisplaySemaphore() on it, but are then prevented from + * submitting the associated hardware operations to consume it due to the + * subsequent failure of some software or hardware operation. + * + * \param [in] device The device which will utilize the semaphore. + * + * \param [in] semaphoreIndex Index of the desired semaphore within the + * NVKMS semaphore pool. Must be less than + * NvKmsKapiDeviceResourcesInfo::caps::numDisplaySemaphores + * for the specified device. + */ + void + (*cancelDisplaySemaphore) + ( + struct NvKmsKapiDevice *device, + NvU32 semaphoreIndex + ); + + /*! + * Signal the VRR semaphore at the specified index from the CPU. + * If device does not support VRR semaphores, this is a no-op. + * Returns true if signal is success or no-op, otherwise returns false. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] index The VRR semaphore index to be signalled. + */ + NvBool + (*signalVrrSemaphore) + ( + struct NvKmsKapiDevice *device, + NvS32 index + ); + + /*! + * Check or wait on a head's LUT notifier. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] head The head to check for LUT completion. + * + * \param [in] waitForCompletion If true, wait for the notifier in NvKms + * before returning. + * + * \param [out] complete Returns whether the notifier has completed. + */ + NvBool + (*checkLutNotifier) + ( + struct NvKmsKapiDevice *device, + NvU32 head, + NvBool waitForCompletion + ); + + /* + * Notify NVKMS that the system's framebuffer console has been disabled and + * the reserved allocation for the old framebuffer console can be unmapped. + */ + void + (*framebufferConsoleDisabled) + ( + struct NvKmsKapiDevice *device + ); +}; + +/** @} */ + +/** + * \defgroup Functions + * @{ + */ + +NvBool nvKmsKapiGetFunctionsTable +( + struct NvKmsKapiFunctionsTable *funcsTable +); + +NvU32 nvKmsKapiF16ToF32(NvU16 a); + +NvU16 nvKmsKapiF32ToF16(NvU32 a); + +NvU32 nvKmsKapiF32Mul(NvU32 a, NvU32 b); + +NvU32 nvKmsKapiF32Div(NvU32 a, NvU32 b); + +NvU32 nvKmsKapiF32Add(NvU32 a, NvU32 b); + +NvU32 nvKmsKapiF32ToUI32RMinMag(NvU32 a, NvBool exact); + +NvU32 nvKmsKapiUI32ToF32(NvU32 a); + +/** @} */ + +#endif /* defined(__NVKMS_KAPI_H__) */ diff --git a/src/nvidia-modeset/kapi/src/nvkms-kapi-notifiers.c b/src/nvidia-modeset/kapi/src/nvkms-kapi-notifiers.c new file mode 100644 index 0000000..12490a5 --- /dev/null +++ b/src/nvidia-modeset/kapi/src/nvkms-kapi-notifiers.c @@ -0,0 +1,349 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-modeset-os-interface.h" + +#include "nvkms-api.h" +#include "nvkms-sync.h" +#include "nvkms-rmapi.h" +#include "nvkms-kapi-notifiers.h" + +#define NVKMS_KAPI_MAX_NOTIFIERS \ + (NVKMS_KAPI_MAX_HEADS * \ + NVKMS_MAX_LAYERS_PER_HEAD * \ + NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER) + +void nvKmsKapiFreeNisoSurface(struct NvKmsKapiDevice *device, + struct NvKmsKapiNisoSurface *surface) +{ + if (surface->hKmsHandle != 0) { + struct NvKmsUnregisterSurfaceParams paramsUnreg = { }; + NvBool status; + + paramsUnreg.request.deviceHandle = device->hKmsDevice; + paramsUnreg.request.surfaceHandle = surface->hKmsHandle; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_UNREGISTER_SURFACE, + ¶msUnreg, sizeof(paramsUnreg)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "NVKMS_IOCTL_UNREGISTER_SURFACE failed"); + } + + device->notifier.hKmsHandle = 0; + } + + if (surface->mapped) { + NV_STATUS status; + + status = nvRmApiUnmapMemory(device->hRmClient, + device->hRmSubDevice, + surface->hRmHandle, + surface->pLinearAddress, + 0); + + if (status != NV_OK) { + nvKmsKapiLogDeviceDebug( + device, + "UnmapMemory failed with error code 0x%08x", + status); + } + + device->notifier.mapped = NV_FALSE; + } + + if (surface->hRmHandle != 0) { + NvU32 status; + + status = nvRmApiFree(device->hRmClient, + device->hRmDevice, + surface->hRmHandle); + + if (status != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "RmFree failed with error code 0x%08x", + status); + } + + nvFreeUnixRmHandle(&device->handleAllocator, surface->hRmHandle); + surface->hRmHandle = 0; + } +} + +static void InitNotifier(struct NvKmsKapiDevice *device, + NvU32 head, NvU32 layer, NvU32 index) +{ + nvKmsResetNotifier(device->notifier.format, + (layer == NVKMS_OVERLAY_LAYER), + NVKMS_KAPI_NOTIFIER_INDEX(head, layer, index), + device->notifier.pLinearAddress); +} + +static NvBool AllocateNisoSurface(struct NvKmsKapiDevice *device, + struct NvKmsKapiNisoSurface *surface, + NvU64 size, + NvBool inVideoMemory) +{ + struct NvKmsRegisterSurfaceParams surfParams = {}; + NV_STATUS status = 0; + NvU8 compressible = 0; + NvBool ret; + + surface->hRmHandle = + nvGenerateUnixRmHandle(&device->handleAllocator); + + if (surface->hRmHandle == 0x0) { + nvKmsKapiLogDeviceDebug( + device, + "nvGenerateUnixRmHandle() failed"); + return NV_FALSE; + } + + if (inVideoMemory) { + ret = nvKmsKapiAllocateVideoMemory(device, + surface->hRmHandle, + NvKmsSurfaceMemoryLayoutPitch, + size, + NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER, + &compressible); + } else { + ret = nvKmsKapiAllocateSystemMemory(device, + surface->hRmHandle, + NvKmsSurfaceMemoryLayoutPitch, + size, + NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER, + &compressible); + } + + if (!ret) { + nvFreeUnixRmHandle(&device->handleAllocator, surface->hRmHandle); + surface->hRmHandle = 0x0; + goto failed; + } + + status = nvRmApiMapMemory(device->hRmClient, + device->hRmSubDevice, + surface->hRmHandle, + 0, + size, + &surface->pLinearAddress, + 0); + + if (status != NV_OK) { + nvKmsKapiLogDeviceDebug( + device, + "MapMemory failed with error code 0x%08x", + status); + goto failed; + } + + surface->mapped = NV_TRUE; + + surfParams.request.deviceHandle = device->hKmsDevice; + surfParams.request.useFd = FALSE; + surfParams.request.rmClient = device->hRmClient; + + surfParams.request.widthInPixels = size; + surfParams.request.heightInPixels = 1; + surfParams.request.layout = NvKmsSurfaceMemoryLayoutPitch; + surfParams.request.format = NvKmsSurfaceMemoryFormatI8; + surfParams.request.log2GobsPerBlockY = 0; + surfParams.request.isoType = NVKMS_MEMORY_NISO; + + surfParams.request.planes[0].u.rmObject = surface->hRmHandle; + surfParams.request.planes[0].pitch = size; + surfParams.request.planes[0].rmObjectSizeInBytes = size; + + if (!nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_REGISTER_SURFACE, + &surfParams, sizeof(surfParams))) { + nvKmsKapiLogDeviceDebug( + device, + "NVKMS_IOCTL_REGISTER_SURFACE failed"); + goto failed; + } + + surface->hKmsHandle = surfParams.reply.surfaceHandle; + + return NV_TRUE; + +failed: + + nvKmsKapiFreeNisoSurface(device, surface); + + return NV_FALSE; +} + +#define NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE 0x1000 + +NvBool nvKmsKapiAllocateNotifiers(struct NvKmsKapiDevice *device, + NvBool inVideoMemory) +{ + ct_assert((NVKMS_KAPI_MAX_NOTIFIERS * NVKMS_KAPI_NOTIFIER_SIZE) <= + NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE); + + ct_assert(NVKMS_KAPI_NOTIFIER_SIZE >= sizeof(NvNotification)); + nvAssert(NVKMS_KAPI_NOTIFIER_SIZE >= + nvKmsSizeOfNotifier(device->notifier.format, TRUE /* overlay */)); + nvAssert(NVKMS_KAPI_NOTIFIER_SIZE >= + nvKmsSizeOfNotifier(device->notifier.format, FALSE /* overlay */)); + + if (!AllocateNisoSurface(device, + &device->notifier, + NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE, + inVideoMemory)) { + return NV_FALSE; + } + + /* Init Notifiers */ + + { + NvU32 head; + + for (head = 0; head < device->numHeads; head++) { + NvU32 layer; + + for (layer = 0; layer < NVKMS_MAX_LAYERS_PER_HEAD; layer++) { + NvU32 index; + + for (index = 0; + index < NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER; index++) { + InitNotifier(device, head, layer, index); + } + } + } + } + + return NV_TRUE; +} + +static void ResetSemaphore(struct NvKmsKapiDevice *device, + NvU32 index, + NvU32 payload) +{ + nvKmsResetSemaphore(device->semaphore.format, + index, + device->semaphore.pLinearAddress, + payload); +} + +#define NVKMS_KAPI_SEMAPHORE_SURFACE_SIZE 0x1000 + +NvBool nvKmsKapiAllocateSemaphores(struct NvKmsKapiDevice *device, + NvBool inVideoMemory) +{ + NvU32 index; + + if (!AllocateNisoSurface(device, + &device->semaphore, + NVKMS_KAPI_SEMAPHORE_SURFACE_SIZE, + inVideoMemory)) { + return NV_FALSE; + } + + /* Init Semaphores */ + + device->numDisplaySemaphores = NVKMS_KAPI_SEMAPHORE_SURFACE_SIZE / + nvKmsSizeOfSemaphore(device->semaphore.format); + + /* + * See the comment in nvKmsKapiSignalDisplaySemaphore() for the full + * justification of this requirement. The current implementation requires + * only 16 semaphores (2 per window) given a maximum of one outstanding + * flip, but this value allows up to 32 outstanding flips, as recommended + * by the architecture team in an old hardware bug. + */ + nvAssert(device->numDisplaySemaphores >= 256); + + for (index = 0; index < device->numDisplaySemaphores; index++) { + ResetSemaphore(device, index, NVKMS_KAPI_SEMAPHORE_VALUE_DONE); + } + + return NV_TRUE; +} + +NvBool nvKmsKapiTryInitDisplaySemaphore(struct NvKmsKapiDevice *device, + NvU32 index) +{ + struct nvKmsParsedSemaphore semParsed; + + nvKmsParseSemaphore(device->semaphore.format, + index, + device->semaphore.pLinearAddress, + &semParsed); + + if (semParsed.payload != NVKMS_KAPI_SEMAPHORE_VALUE_DONE) { + return NV_FALSE; + } + + ResetSemaphore(device, index, NVKMS_KAPI_SEMAPHORE_VALUE_NOT_READY); + + return NV_TRUE; +} + +void nvKmsKapiSignalDisplaySemaphore(struct NvKmsKapiDevice *device, + NvU32 index) +{ + /* + * Note most users of semaphores use a "ready" value that varies from + * frame to frame, citing bug 194936. However, this "bug" simply + * notes that the hardware may read ahead and grab semaphore values for + * pending semaphore acquires such that two pending frames using the + * same semaphore might be signaled "ready" by the same semaphore write. + * Given this implementation cycles through at least 256 semaphores, + * meaning if all 8 hardware windows were programmed in every flip, there + * could be at least 32 frames in-flight without re-using a semaphore + * slot, and the code above that initializes a semaphore for each frame + * first ensures the prior frame using that semaphore has completed, + * this approach is not necessary here. There should be no opportunity + * for the hardware to "pre-fetch" a prior frame's semaphore acquire + * value from the semaphore here, and hence a constant value is sufficient. + */ + ResetSemaphore(device, index, NVKMS_KAPI_SEMAPHORE_VALUE_READY); +} + +void nvKmsKapiCancelDisplaySemaphore(struct NvKmsKapiDevice *device, + NvU32 index) +{ + struct nvKmsParsedSemaphore semParsed; + + nvKmsParseSemaphore(device->semaphore.format, + index, + device->semaphore.pLinearAddress, + &semParsed); + + if (semParsed.payload != NVKMS_KAPI_SEMAPHORE_VALUE_DONE) { + nvAssert(semParsed.payload == NVKMS_KAPI_SEMAPHORE_VALUE_NOT_READY); + ResetSemaphore(device, index, NVKMS_KAPI_SEMAPHORE_VALUE_DONE); + } +} + +NvU32 nvKmsKapiGetDisplaySemaphoreOffset(struct NvKmsKapiDevice *device, + NvU32 index) +{ + return nvKmsSizeOfSemaphore(device->semaphore.format) * index; +} diff --git a/src/nvidia-modeset/kapi/src/nvkms-kapi-sync.c b/src/nvidia-modeset/kapi/src/nvkms-kapi-sync.c new file mode 100644 index 0000000..1b243d6 --- /dev/null +++ b/src/nvidia-modeset/kapi/src/nvkms-kapi-sync.c @@ -0,0 +1,571 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-modeset-os-interface.h" + +#include "nvkms-rmapi.h" + +#include "nvkms-kapi.h" +#include "nvkms-kapi-private.h" +#include "nvkms-kapi-internal.h" + +#include "class/cl0000.h" +#include "class/cl0005.h" +#include "ctrl/ctrl00da.h" + +struct NvKmsKapiChannelEvent { + struct NvKmsKapiDevice *device; + + NvKmsChannelEventProc *proc; + void *data; + + struct NvKmsKapiPrivAllocateChannelEventParams nvKmsParams; + + NvHandle hCallbacks[NVKMS_KAPI_MAX_EVENT_CHANNELS]; + NVOS10_EVENT_KERNEL_CALLBACK_EX rmCallback; +}; + +static void ChannelEventHandler(void *arg1, void *arg2, NvHandle hEvent, + NvU32 data, NvU32 status) +{ + struct NvKmsKapiChannelEvent *cb = arg1; + cb->proc(cb->data, 0); +} + +void nvKmsKapiFreeChannelEvent +( + struct NvKmsKapiDevice *device, + struct NvKmsKapiChannelEvent *cb +) +{ + int i; + + if (device == NULL || cb == NULL) { + return; + } + + for (i = 0; i < NVKMS_KAPI_MAX_EVENT_CHANNELS; ++i) { + if (!cb->hCallbacks[i]) { + continue; + } + + nvRmApiFree(device->hRmClient, + device->hRmClient, + cb->hCallbacks[i]); + + nvFreeUnixRmHandle(&device->handleAllocator, + cb->hCallbacks[i]); + } + + nvKmsKapiFree(cb); +} + +struct NvKmsKapiChannelEvent* nvKmsKapiAllocateChannelEvent +( + struct NvKmsKapiDevice *device, + NvKmsChannelEventProc *proc, + void *data, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize +) +{ + int status, i; + struct NvKmsKapiChannelEvent *cb = NULL; + + if (device == NULL || proc == NULL) { + goto fail; + } + + cb = nvKmsKapiCalloc(1, sizeof(*cb)); + if (cb == NULL) { + goto fail; + } + + /* Verify the driver-private params size and copy it in from userspace */ + + if (nvKmsParamsSize != sizeof(cb->nvKmsParams)) { + nvKmsKapiLogDebug( + "NVKMS private memory import parameter size mismatch - " + "expected: 0x%llx, caller specified: 0x%llx", + (NvU64)sizeof(cb->nvKmsParams), nvKmsParamsSize); + goto fail; + } + + status = nvkms_copyin(&cb->nvKmsParams, + nvKmsParamsUser, sizeof(cb->nvKmsParams)); + if (status != 0) { + nvKmsKapiLogDebug( + "NVKMS private memory import parameters could not be read from " + "userspace"); + goto fail; + } + + cb->device = device; + + cb->proc = proc; + cb->data = data; + + cb->rmCallback.func = ChannelEventHandler; + cb->rmCallback.arg = cb; + + for (i = 0; i < NVKMS_KAPI_MAX_EVENT_CHANNELS; ++i) { + NV0005_ALLOC_PARAMETERS eventParams = { }; + NvU32 ret; + + if (!cb->nvKmsParams.hChannels[i]) { + continue; + } + + cb->hCallbacks[i] = nvGenerateUnixRmHandle(&device->handleAllocator); + if (cb->hCallbacks[i] == 0x0) { + nvKmsKapiLogDeviceDebug(device, + "Failed to allocate event callback handle for channel 0x%x", + cb->nvKmsParams.hChannels[i]); + goto fail; + } + + eventParams.hParentClient = cb->nvKmsParams.hClient; + eventParams.hClass = NV01_EVENT_KERNEL_CALLBACK_EX; + eventParams.notifyIndex = 0; + eventParams.data = NV_PTR_TO_NvP64(&cb->rmCallback); + + ret = nvRmApiAlloc(device->hRmClient, + cb->nvKmsParams.hChannels[i], + cb->hCallbacks[i], + NV01_EVENT_KERNEL_CALLBACK_EX, + &eventParams); + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug(device, + "Failed to allocate event callback for channel 0x%x", + cb->nvKmsParams.hChannels[i]); + nvFreeUnixRmHandle(&device->handleAllocator, cb->hCallbacks[i]); + cb->hCallbacks[i] = 0; + goto fail; + } + } + + return cb; + +fail: + nvKmsKapiFreeChannelEvent(device, cb); + return NULL; +} + +struct NvKmsKapiSemaphoreSurface { + NvHandle hSemaphoreSurface; + + NvHandle hSemaphoreMem; + NvHandle hMaxSubmittedMem; +}; + +struct NvKmsKapiSemaphoreSurface* +nvKmsKapiImportSemaphoreSurface +( + struct NvKmsKapiDevice *device, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize, + void **pSemaphoreMap, + void **pMaxSubmittedMap +) +{ + struct NvKmsKapiSemaphoreSurface *ss = NULL; + struct NvKmsKapiPrivImportSemaphoreSurfaceParams p, *pHeap; + NV_SEMAPHORE_SURFACE_CTRL_REF_MEMORY_PARAMS refParams = {}; + NvU32 ret; + int status; + + /* Verify the driver-private params size and copy it in from userspace */ + + if (nvKmsParamsSize != sizeof(p)) { + nvKmsKapiLogDebug( + "NVKMS semaphore surface import parameter size mismatch - expected: 0x%llx, caller specified: 0x%llx", + (NvU64)sizeof(p), nvKmsParamsSize); + goto fail; + } + + /* + * Use a heap allocation as the destination pointer passed to + * nvkms_copyin; stack allocations created within core NVKMS may not + * be recognizable to the Linux kernel's CONFIG_HARDENED_USERCOPY + * checker, triggering false errors. But then save the result to a + * variable on the stack, so that we can free the heap memory + * immediately and not worry about its lifetime. + */ + + pHeap = nvKmsKapiCalloc(1, sizeof(*pHeap)); + if (pHeap == NULL) { + nvKmsKapiLogDebug( + "NVKMS failed to allocate semaphore surface parameter struct of size: %ld", + (long)sizeof(*pHeap)); + goto fail; + } + + status = nvkms_copyin(pHeap, nvKmsParamsUser, sizeof(*pHeap)); + + p = *pHeap; + nvKmsKapiFree(pHeap); + + if (status != 0) { + nvKmsKapiLogDebug( + "NVKMS semaphore surface import parameters could not be read from userspace"); + goto fail; + } + + ss = nvKmsKapiCalloc(1, sizeof(*ss)); + if (ss == NULL) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to allocate memory for NVKMS semaphore surface while importing (0x%08x, 0x%08x)", + p.hClient, p.hSemaphoreSurface); + goto fail; + } + + ret = nvRmApiDupObject2(device->hRmClient, + device->hRmSubDevice, + &ss->hSemaphoreSurface, + p.hClient, + p.hSemaphoreSurface, + 0); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to dup RM semaphore surface object (0x%08x, 0x%08x)", + p.hClient, p.hSemaphoreSurface); + goto fail; + } + + ret = nvRmApiControl(device->hRmClient, + ss->hSemaphoreSurface, + NV_SEMAPHORE_SURFACE_CTRL_CMD_REF_MEMORY, + &refParams, + sizeof(refParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to ref RM semaphore surface memory objects (0x%08x, 0x%08x)", + p.hClient, p.hSemaphoreSurface); + goto fail; + } + + ss->hSemaphoreMem = refParams.hSemaphoreMem; + ss->hMaxSubmittedMem = refParams.hMaxSubmittedMem; + + ret = nvRmApiMapMemory(device->hRmClient, + device->hRmDevice, + ss->hSemaphoreMem, + 0, + p.semaphoreSurfaceSize, + pSemaphoreMap, + 0); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to map RM semaphore surface semaphore memory (0x%08x, 0x%08x)", + p.hClient, p.hSemaphoreSurface); + goto fail; + } + + if (ss->hMaxSubmittedMem != NV01_NULL_OBJECT) { + if (ss->hMaxSubmittedMem != ss->hSemaphoreMem) { + ret = nvRmApiMapMemory(device->hRmClient, + device->hRmDevice, + ss->hMaxSubmittedMem, + 0, + p.semaphoreSurfaceSize, + pMaxSubmittedMap, + 0); + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to map RM semaphore surface max submitted memory (0x%08x, 0x%08x)", + p.hClient, p.hSemaphoreSurface); + goto fail; + } + } else { + *pMaxSubmittedMap = *pSemaphoreMap; + } + } else { + *pMaxSubmittedMap = NULL; + } + + return ss; + +fail: + if (ss && ss->hSemaphoreSurface) { + if ((ss->hMaxSubmittedMem != NV01_NULL_OBJECT) && + (ss->hMaxSubmittedMem != ss->hSemaphoreMem)) { + nvRmApiFree(device->hRmClient, + device->hRmDevice, + ss->hMaxSubmittedMem); + } + + if (ss->hSemaphoreMem != NV01_NULL_OBJECT) { + nvRmApiFree(device->hRmClient, + device->hRmDevice, + ss->hSemaphoreMem); + } + + nvRmApiFree(device->hRmClient, + device->hRmDevice, + ss->hSemaphoreSurface); + } + nvKmsKapiFree(ss); + return NULL; +} + +void nvKmsKapiFreeSemaphoreSurface +( + struct NvKmsKapiDevice *device, + struct NvKmsKapiSemaphoreSurface *ss +) +{ + if (device == NULL || ss == NULL) { + return; + } + + if ((ss->hMaxSubmittedMem != NV01_NULL_OBJECT) && + (ss->hMaxSubmittedMem != ss->hSemaphoreMem)) { + nvRmApiFree(device->hRmClient, + device->hRmDevice, + ss->hMaxSubmittedMem); + } + + nvRmApiFree(device->hRmClient, + device->hRmDevice, + ss->hSemaphoreMem); + + nvRmApiFree(device->hRmClient, + device->hRmSubDevice, + ss->hSemaphoreSurface); + + nvKmsKapiFree(ss); +} + +struct NvKmsKapiSemaphoreSurfaceCallback { + NvKmsSemaphoreSurfaceCallbackProc *pCallback; + void *pData; + + NVOS10_EVENT_KERNEL_CALLBACK_EX rmCallback; +}; + +static void SemaphoreSurfaceKapiCallback(void *arg1, void *arg2, NvHandle hEvent, + NvU32 data, NvU32 status) +{ + struct NvKmsKapiSemaphoreSurfaceCallback *cb = arg1; + cb->pCallback(cb->pData); + nvKmsKapiFree(cb); +} + +NvKmsKapiRegisterWaiterResult +nvKmsKapiRegisterSemaphoreSurfaceCallback( + struct NvKmsKapiDevice *device, + struct NvKmsKapiSemaphoreSurface *semaphoreSurface, + NvKmsSemaphoreSurfaceCallbackProc *pCallback, + void *pData, + NvU64 index, + NvU64 wait_value, + NvU64 new_value, + struct NvKmsKapiSemaphoreSurfaceCallback **pCallbackHandle) +{ + NvU32 ret; + + struct NvKmsKapiSemaphoreSurfaceCallback *cb = NULL; + NV_SEMAPHORE_SURFACE_CTRL_REGISTER_WAITER_PARAMS waiterParams = { }; + + if (device == NULL) { + nvKmsKapiLogDebug( + "Invalid device while registering semaphore surface callback"); + goto fail; + } + + if ((semaphoreSurface == NULL) || + ((pCallback == NULL) && (new_value == 0))) { + nvKmsKapiLogDeviceDebug( + device, + "Invalid parameter while registering semaphore surface callback"); + goto fail; + } + + waiterParams.index = index; + waiterParams.waitValue = wait_value; + waiterParams.newValue = new_value; + + if (pCallback) { + cb = nvKmsKapiCalloc(1, sizeof(*cb)); + if (cb == NULL) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to allocate memory for semaphore surface (0x%08x, 0x%08x) callback on index %" NvU64_fmtu " for value %" NvU64_fmtx, + device->hRmClient, semaphoreSurface->hSemaphoreSurface, + index, wait_value); + goto fail; + } + + cb->pCallback = pCallback; + cb->pData = pData; + cb->rmCallback.func = SemaphoreSurfaceKapiCallback; + cb->rmCallback.arg = cb; + + waiterParams.notificationHandle = (NvUPtr)&cb->rmCallback; + } + + ret = nvRmApiControl(device->hRmClient, + semaphoreSurface->hSemaphoreSurface, + NV_SEMAPHORE_SURFACE_CTRL_CMD_REGISTER_WAITER, + &waiterParams, + sizeof(waiterParams)); + + switch (ret) { + case NVOS_STATUS_SUCCESS: + if (pCallback) { + *pCallbackHandle = cb; + } + return NVKMS_KAPI_REG_WAITER_SUCCESS; + case NVOS_STATUS_ERROR_ALREADY_SIGNALLED: + return NVKMS_KAPI_REG_WAITER_ALREADY_SIGNALLED; + default: + break; + } + +fail: + nvKmsKapiFree(cb); + return NVKMS_KAPI_REG_WAITER_FAILED; +} + +NvBool +nvKmsKapiUnregisterSemaphoreSurfaceCallback( + struct NvKmsKapiDevice *device, + struct NvKmsKapiSemaphoreSurface *semaphoreSurface, + NvU64 index, + NvU64 wait_value, + struct NvKmsKapiSemaphoreSurfaceCallback *callbackHandle) +{ + NV_SEMAPHORE_SURFACE_CTRL_UNREGISTER_WAITER_PARAMS waiterParams = { }; + NvU32 ret; + + + if (device == NULL) { + nvKmsKapiLogDebug( + "Invalid device while unregistering semaphore surface callback"); + return NV_FALSE; + } + + if ((semaphoreSurface == NULL) || (callbackHandle == NULL) || + (wait_value == 0)) { + nvKmsKapiLogDeviceDebug( + device, + "Invalid parameter while unregistering semaphore surface callback"); + return NV_FALSE; + } + + /* + * Note this function does not actually dereference callbackHandle before + * making the RM control call. This is important, as there may exist a race + * such that the client is calling this function while another thread is + * running the callback and freeing its handle. + * + * The existance of this race seems to imply there is an additional hazard + * where a new callback may be registered against the same wait value during + * the first race, which this call would then mistakenly delete. That is + * impossible because the RM semaphore surface code would detect that such a + * waiter is already signaled and return without adding it to the waiter + * list. + */ + waiterParams.index = index; + waiterParams.waitValue = wait_value; + + /* + * Manually perform the equivalent of &callbackHandle->rmCallback, but with + * semantics that make it clearer there is no access of the memory pointed + * to by callbackHandle. + */ + waiterParams.notificationHandle = (NvUPtr)callbackHandle + + offsetof(struct NvKmsKapiSemaphoreSurfaceCallback, rmCallback); + + ret = nvRmApiControl(device->hRmClient, + semaphoreSurface->hSemaphoreSurface, + NV_SEMAPHORE_SURFACE_CTRL_CMD_UNREGISTER_WAITER, + &waiterParams, + sizeof(waiterParams)); + + switch (ret) { + case NVOS_STATUS_SUCCESS: + /* + * The callback was successfully unregistered, and will never run. Free + * its associated data. + */ + nvKmsKapiFree(callbackHandle); + return NV_TRUE; + + default: + /* + * This code must assume failure to unregister for any reason indicates + * the callback is being run right now, or is on a list of pending + * callbacks which will be run in finite time. Do not free its data. + */ + return NV_FALSE; + } +} + +NvBool +nvKmsKapiSetSemaphoreSurfaceValue( + struct NvKmsKapiDevice *device, + struct NvKmsKapiSemaphoreSurface *semaphoreSurface, + NvU64 index, + NvU64 new_value) +{ + NvU32 ret; + + NV_SEMAPHORE_SURFACE_CTRL_SET_VALUE_PARAMS setParams = { }; + + if (device == NULL) { + nvKmsKapiLogDebug( + "Invalid device used to set semaphore surface value"); + return NV_FALSE; + } + + if (semaphoreSurface == NULL) { + nvKmsKapiLogDeviceDebug( + device, + "Attempt to set value on Invalid semaphore surface"); + return NV_FALSE; + } + + setParams.index = index; + setParams.newValue = new_value; + + ret = nvRmApiControl(device->hRmClient, + semaphoreSurface->hSemaphoreSurface, + NV_SEMAPHORE_SURFACE_CTRL_CMD_SET_VALUE, + &setParams, + sizeof(setParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + return NV_FALSE; + } + + return NV_TRUE; +} diff --git a/src/nvidia-modeset/kapi/src/nvkms-kapi.c b/src/nvidia-modeset/kapi/src/nvkms-kapi.c new file mode 100644 index 0000000..7a82a1a --- /dev/null +++ b/src/nvidia-modeset/kapi/src/nvkms-kapi.c @@ -0,0 +1,4113 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvUnixVersion.h" + +#include "nvidia-modeset-os-interface.h" + +#include "nvkms-api.h" +#include "nvkms-sync.h" +#include "nvkms-rmapi.h" +#include "nvkms-vrr.h" + +#include "nvkms-softfloat.h" +#include "nv-float.h" + +#include "nvkms-kapi.h" +#include "nvkms-kapi-private.h" +#include "nvkms-kapi-internal.h" +#include "nvkms-kapi-notifiers.h" + +#include "nv_smg.h" + +#include /* NV01_ROOT/NV01_NULL_OBJECT */ +#include /* NV01_MEMORY_SYSTEM */ +#include /* NV01_DEVICE */ +#include /* NV01_MEMORY_LOCAL_USER */ +#include /* NV01_MEMORY_SYSTEM_OS_DESCRIPTOR */ +#include /* NV20_SUBDEVICE_0 */ + +#include /* NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2 */ +#include /* NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD */ +#include /* NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM */ +#include /* NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES */ +#include /* NV0080_CTRL_CMD_FB_GET_CAPS_V2 */ +#include /* NV2080_CTRL_CMD_FB_GET_SEMAPHORE_SURFACE_LAYOUT */ +#include /* NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT */ + +#include "ctrl/ctrl003e.h" /* NV003E_CTRL_CMD_GET_SURFACE_PHYS_PAGES */ +#include "ctrl/ctrl0041.h" /* NV0041_CTRL_SURFACE_INFO */ + +#include "nv_smg.h" + +ct_assert(NVKMS_KAPI_LAYER_PRIMARY_IDX == NVKMS_MAIN_LAYER); +ct_assert(NVKMS_KAPI_LAYER_MAX == NVKMS_MAX_LAYERS_PER_HEAD); + +/* XXX Move to NVKMS */ +#define NV_EVO_PITCH_ALIGNMENT 0x100 + +#define NVKMS_KAPI_SUPPORTED_EVENTS_MASK \ + ((1 << NVKMS_EVENT_TYPE_DPY_CHANGED) | \ + (1 << NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED) | \ + (1 << NVKMS_EVENT_TYPE_FLIP_OCCURRED)) + +static NvU32 EnumerateGpus(void (*gpuCallback)(const struct NvKmsKapiGpuInfo *info)) +{ + nv_gpu_info_t *gpu_info = NULL; + struct NvKmsKapiGpuInfo kapiGpuInfo; + nvMIGDeviceDescription *activeDevices = NULL; + NvU32 activeDeviceCount = 0; + NvU32 gpuCount; + NvU32 kapiGpuCount; + + if (NV_OK != nvSMGGetDeviceList(&nvEvoGlobal.rmSmgContext, + &activeDevices, + &activeDeviceCount)) { + nvKmsKapiLogDebug("Failed to query SMG device list"); + return 0; + } + + gpu_info = nvkms_alloc(NV_MAX_GPUS * sizeof(*gpu_info), NV_TRUE); + if (!gpu_info) { + nvKmsKapiLogDebug("Out of memory"); + return 0; + } + + /* + * Enumerate physical GPUs and generate an expanded list where entries + * for GPUs in MIG mode are replaced by a list of MIG GPUs on that GPU. + */ + gpuCount = nvkms_enumerate_gpus(gpu_info); + kapiGpuCount = 0; + + for (NvU32 i = 0; i < gpuCount; i++) { + NvBool foundMig = NV_FALSE; + + for (NvU32 j = 0; j < activeDeviceCount; j++) { + /* + * Pass back an NvKmsKapiGpuInfo for each active device found + * for the current gpu_id. For regular GPUs this will only be + * one but in MIG mode the same gpu_id can host multiple MIG + * devices. In MIG mode, only offer graphics capable MIG device + * (SMG). + */ + if (activeDevices[j].gpuId == gpu_info[i].gpu_id) { + foundMig = NV_TRUE; + + if (activeDevices[j].smgAccessOk) { + kapiGpuInfo.gpuInfo = gpu_info[i]; + kapiGpuInfo.migDevice = activeDevices[j].migDeviceId; + gpuCallback(&kapiGpuInfo); + + kapiGpuCount++; + } + } + } + + if (!foundMig) { + kapiGpuInfo.gpuInfo = gpu_info[i]; + kapiGpuInfo.migDevice = NO_MIG_DEVICE; + gpuCallback(&kapiGpuInfo); + + kapiGpuCount++; + } + } + + nvkms_free(gpu_info, NV_MAX_GPUS * sizeof(*gpu_info)); + + return kapiGpuCount; +} + +/* + * Helper function to free RM objects allocated for NvKmsKapiDevice. + */ +static void RmFreeDevice(struct NvKmsKapiDevice *device) +{ + if (device->smgGpuInstSubscriptionHdl != 0x0) { + nvKmsKapiFreeRmHandle(device, device->smgGpuInstSubscriptionHdl); + device->smgGpuInstSubscriptionHdl = 0x0; + } + if (device->smgComputeInstSubscriptionHdl != 0x0) { + nvKmsKapiFreeRmHandle(device, device->smgComputeInstSubscriptionHdl); + device->smgComputeInstSubscriptionHdl = 0x0; + } + + if (device->hRmSubDevice != 0x0) { + nvRmApiFree(device->hRmClient, + device->hRmDevice, + device->hRmSubDevice); + nvKmsKapiFreeRmHandle(device, device->hRmSubDevice); + device->hRmSubDevice = 0x0; + } + + /* Free RM device object */ + + if (device->hRmDevice != 0x0) { + nvRmApiFree(device->hRmClient, + device->hRmClient, + device->hRmDevice); + nvKmsKapiFreeRmHandle(device, device->hRmDevice); + + device->hRmDevice = 0x0; + } + + nvTearDownUnixRmHandleAllocator(&device->handleAllocator); + + device->deviceInstance = 0; + + /* Free RM client */ + + if (device->hRmClient != 0x0) { + nvRmApiFree(device->hRmClient, + device->hRmClient, + device->hRmClient); + + device->hRmClient = 0x0; + } +} + +/* + * Wrappers to help SMG access NvKmsKAPI's RM context. + */ +static NvU32 NvKmsKapiRMControl(nvRMContextPtr rmctx, NvU32 client, NvU32 object, NvU32 cmd, void *params, NvU32 paramsSize) +{ + return nvRmApiControl(client, object, cmd, params, paramsSize); +} + +static NvU32 NvKmsKapiRMAlloc(nvRMContextPtr rmctx, NvU32 client, NvHandle parent, NvHandle object, NvU32 cls, void *allocParams) +{ + return nvRmApiAlloc(client, parent, object, cls, allocParams); +} + +static NvU32 NvKmsKapiRMFree(nvRMContextPtr rmctx, NvU32 client, NvHandle parent, NvHandle object) +{ + return nvRmApiFree(client, parent, object); +} + +/* + * Helper function to allocate RM objects for NvKmsKapiDevice. + */ +static NvBool RmAllocateDevice(struct NvKmsKapiDevice *device) +{ + NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS getNumSubDevicesParams = { 0 }; + NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS idInfoParams = { }; + NV2080_ALLOC_PARAMETERS subdevAllocParams = { 0 }; + NV0080_ALLOC_PARAMETERS allocParams = { }; + NV0080_CTRL_FB_GET_CAPS_V2_PARAMS fbCapsParams = { 0 }; + + NvU32 hRmDevice, hRmSubDevice; + NvBool supportsGenericPageKind; + NvU32 ret; + + /* Allocate RM client */ + + ret = nvRmApiAlloc(NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_ROOT, + &device->hRmClient); + + if (ret != NVOS_STATUS_SUCCESS || device->hRmClient == 0x0) { + nvKmsKapiLogDeviceDebug(device, "Failed to allocate RM client"); + goto failed; + } + + /* Initialize RM context */ + + device->rmSmgContext.clientHandle = device->hRmClient; + device->rmSmgContext.control = NvKmsKapiRMControl; + device->rmSmgContext.alloc = NvKmsKapiRMAlloc; + device->rmSmgContext.free = NvKmsKapiRMFree; + + /* Query device instance */ + + idInfoParams.gpuId = device->gpuId; + + ret = nvRmApiControl(device->hRmClient, + device->hRmClient, + NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2, + &idInfoParams, + sizeof(idInfoParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug(device, "Failed to query device instance"); + goto failed; + } + + device->deviceInstance = idInfoParams.deviceInstance; + device->isSOC = + FLD_TEST_DRF(0000, _CTRL_GPU_ID_INFO, _SOC, _TRUE, + idInfoParams.gpuFlags); + + /* Initialize RM handle allocator */ + + if (!nvInitUnixRmHandleAllocator(&device->handleAllocator, + device->hRmClient, + device->deviceInstance + 1)) { + nvKmsKapiLogDeviceDebug(device, "Failed to initialize RM handle allocator"); + goto failed; + } + + /* Allocate RM device object */ + + hRmDevice = nvKmsKapiGenerateRmHandle(device); + + if (hRmDevice == 0x0) { + nvKmsKapiLogDeviceDebug(device, "Failed to allocate RM handle"); + goto failed; + } + + allocParams.deviceId = device->deviceInstance; + + allocParams.hClientShare = device->hRmClient; + + ret = nvRmApiAlloc(device->hRmClient, + device->hRmClient, + hRmDevice, + NV01_DEVICE_0, + &allocParams); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug(device, "Failed to allocate RM device object"); + nvKmsKapiFreeRmHandle(device, hRmDevice); + goto failed; + } + + device->hRmDevice = hRmDevice; + + ret = nvRmApiControl(device->hRmClient, + device->hRmDevice, + NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES, + &getNumSubDevicesParams, + sizeof(getNumSubDevicesParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug(device, "Failed to determine number of GPUs"); + goto failed; + } + + if (getNumSubDevicesParams.numSubDevices != 1) { + nvKmsKapiLogDeviceDebug( + device, + "Unsupported number of GPUs: %d", + getNumSubDevicesParams.numSubDevices); + goto failed; + } + + hRmSubDevice = nvKmsKapiGenerateRmHandle(device); + + if (hRmDevice == 0x0) { + nvKmsKapiLogDeviceDebug(device, "Failed to allocate RM handle"); + goto failed; + } + + subdevAllocParams.subDeviceId = 0; + + ret = nvRmApiAlloc(device->hRmClient, + device->hRmDevice, + hRmSubDevice, + NV20_SUBDEVICE_0, + &subdevAllocParams); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug(device, "Failed to initialize subDevice"); + nvKmsKapiFreeRmHandle(device, hRmSubDevice); + goto failed; + } + + device->hRmSubDevice = hRmSubDevice; + + if (device->migDevice != NO_MIG_DEVICE) { + device->smgGpuInstSubscriptionHdl = nvKmsKapiGenerateRmHandle(device); + device->smgComputeInstSubscriptionHdl = nvKmsKapiGenerateRmHandle(device); + + if (!device->smgGpuInstSubscriptionHdl || !device->smgComputeInstSubscriptionHdl) { + goto failed; + } + + if (!nvSMGSubscribeSubDevToPartition(&device->rmSmgContext, + device->hRmSubDevice, + device->migDevice, + device->smgGpuInstSubscriptionHdl, + device->smgComputeInstSubscriptionHdl)) { + nvKmsKapiLogDeviceDebug(device, "Unable to configure MIG (Multi-Instance GPU) partition"); + goto failed; + } + } + + if (device->isSOC) { + /* NVKMS is only used on T23X and later chips, + * which all support generic memory. */ + supportsGenericPageKind = NV_TRUE; + } else { + ret = nvRmApiControl(device->hRmClient, + device->hRmDevice, + NV0080_CTRL_CMD_FB_GET_CAPS_V2, + &fbCapsParams, + sizeof (fbCapsParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug(device, "Failed to query framebuffer capabilities"); + goto failed; + } + supportsGenericPageKind = + NV0080_CTRL_FB_GET_CAP(fbCapsParams.capsTbl, + NV0080_CTRL_FB_CAPS_GENERIC_PAGE_KIND); + } + + device->caps.genericPageKind = + supportsGenericPageKind ? + 0x06 /* NV_MMU_PTE_KIND_GENERIC_MEMORY */ : + 0xfe /* NV_MMU_PTE_KIND_GENERIC_16BX2 */; + + return NV_TRUE; + +failed: + + RmFreeDevice(device); + + return NV_FALSE; +} + +/* + * Helper function to free NVKMS objects allocated for NvKmsKapiDevice. + */ +static void KmsFreeDevice(struct NvKmsKapiDevice *device) +{ + /* Free notifier and semaphore memory */ + + nvKmsKapiFreeNisoSurface(device, &device->semaphore); + nvKmsKapiFreeNisoSurface(device, &device->notifier); + + /* Free NVKMS device */ + + if (device->hKmsDevice != 0x0) { + struct NvKmsFreeDeviceParams paramsFree = { }; + + paramsFree.request.deviceHandle = device->hKmsDevice; + + nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_FREE_DEVICE, + ¶msFree, sizeof(paramsFree)); + + device->hKmsDevice = device->hKmsDisp = 0x0; + } + + /* Close NVKMS */ + + if (device->pKmsOpen != NULL) { + nvkms_close_from_kapi(device->pKmsOpen); + device->pKmsOpen = NULL; + } +} + +/* + * Helper function to allocate NVKMS objects for NvKmsKapiDevice. + */ +static NvBool KmsAllocateDevice(struct NvKmsKapiDevice *device) +{ + struct NvKmsAllocDeviceParams *paramsAlloc; + NvBool status; + NvBool inVideoMemory = FALSE; + NvU32 head; + NvBool ret = FALSE; + NvU32 layer; + + paramsAlloc = nvKmsKapiCalloc(1, sizeof(*paramsAlloc)); + if (paramsAlloc == NULL) { + return FALSE; + } + + /* Open NVKMS */ + + device->pKmsOpen = nvkms_open_from_kapi(device); + + if (device->pKmsOpen == NULL) { + nvKmsKapiLogDeviceDebug(device, "Failed to Open NVKMS"); + goto done; + } + + /* Allocate NVKMS device */ + + nvkms_strncpy( + paramsAlloc->request.versionString, + NV_VERSION_STRING, + sizeof(paramsAlloc->request.versionString)); + + paramsAlloc->request.deviceId.rmDeviceId = device->deviceInstance; + paramsAlloc->request.deviceId.migDevice = device->migDevice; + paramsAlloc->request.sliMosaic = NV_FALSE; + paramsAlloc->request.enableConsoleHotplugHandling = NV_TRUE; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_ALLOC_DEVICE, + paramsAlloc, sizeof(*paramsAlloc)); + + if (!status || + paramsAlloc->reply.status != NVKMS_ALLOC_DEVICE_STATUS_SUCCESS) { + + if (paramsAlloc->reply.status == + NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE) { + nvKmsKapiLogDeviceDebug( + device, + "Display hardware is not available; falling back to " + "displayless mode"); + + ret = TRUE; + goto done; + } + + nvKmsKapiLogDeviceDebug( + device, + "Failed to NVKM device %u(%u): %d %d\n", + device->gpuId, + paramsAlloc->request.deviceId.rmDeviceId, + status, + paramsAlloc->reply.status); + + goto done; + } + + device->hKmsDevice = paramsAlloc->reply.deviceHandle; + + device->caps.cursorCompositionCaps = + paramsAlloc->reply.cursorCompositionCaps; + + device->caps.overlayCompositionCaps = + paramsAlloc->reply.layerCaps[NVKMS_OVERLAY_LAYER].composition; + + device->caps.validLayerRRTransforms = + paramsAlloc->reply.validLayerRRTransforms; + + device->caps.maxWidthInPixels = paramsAlloc->reply.maxWidthInPixels; + device->caps.maxHeightInPixels = paramsAlloc->reply.maxHeightInPixels; + device->caps.maxCursorSizeInPixels = paramsAlloc->reply.maxCursorSize; + device->caps.requiresVrrSemaphores = paramsAlloc->reply.requiresVrrSemaphores; + + device->caps.supportsInputColorSpace = + paramsAlloc->reply.supportsInputColorSpace; + device->caps.supportsInputColorRange = + paramsAlloc->reply.supportsInputColorRange; + + + /* XXX Add LUT support */ + + device->numHeads = paramsAlloc->reply.numHeads; + + device->vtFbBaseAddress = paramsAlloc->reply.vtFbBaseAddress; + device->vtFbSize = paramsAlloc->reply.vtFbSize; + + for (head = 0; head < device->numHeads; head++) { + if (paramsAlloc->reply.numLayers[head] < 1) { + goto done; + } + device->numLayers[head] = paramsAlloc->reply.numLayers[head]; + } + + for (layer = 0; layer < NVKMS_KAPI_LAYER_MAX; layer++) { + device->supportedSurfaceMemoryFormats[layer] = + paramsAlloc->reply.layerCaps[layer].supportedSurfaceMemoryFormats; + device->supportsICtCp[layer] = paramsAlloc->reply.layerCaps[layer].supportsICtCp; + + device->lutCaps.layer[layer].ilut = + paramsAlloc->reply.layerCaps[layer].ilut; + device->lutCaps.layer[layer].tmo = + paramsAlloc->reply.layerCaps[layer].tmo; + } + device->lutCaps.olut = paramsAlloc->reply.olutCaps; + + if (paramsAlloc->reply.validNIsoFormatMask & + (1 << NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY)) { + device->notifier.format = NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY; + } else if (paramsAlloc->reply.validNIsoFormatMask & + (1 << NVKMS_NISO_FORMAT_FOUR_WORD)) { + device->notifier.format = NVKMS_NISO_FORMAT_FOUR_WORD; + } else { + nvAssert(paramsAlloc->reply.validNIsoFormatMask & + (1 << NVKMS_NISO_FORMAT_LEGACY)); + device->notifier.format = NVKMS_NISO_FORMAT_LEGACY; + } + + if (paramsAlloc->reply.validNIsoFormatMask & + (1 << NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY)) { + device->semaphore.format = NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY; + } else { + nvAssert(paramsAlloc->reply.validNIsoFormatMask & + (1 << NVKMS_NISO_FORMAT_LEGACY)); + device->semaphore.format = NVKMS_NISO_FORMAT_LEGACY; + } + + /* XXX Add support for SLI/multiple display engines per device */ + if (paramsAlloc->reply.numDisps != 1) + { + nvKmsKapiLogDeviceDebug(device, "Found unsupported SLI configuration"); + goto done; + } + + device->hKmsDisp = paramsAlloc->reply.dispHandles[0]; + device->dispIdx = 0; + + device->subDeviceMask = paramsAlloc->reply.subDeviceMask; + + device->isoIOCoherencyModes = paramsAlloc->reply.isoIOCoherencyModes; + device->nisoIOCoherencyModes = paramsAlloc->reply.nisoIOCoherencyModes; + + device->supportsSyncpts = paramsAlloc->reply.supportsSyncpts; + + if (paramsAlloc->reply.nIsoSurfacesInVidmemOnly) { + inVideoMemory = TRUE; + } + + /* Allocate notifier memory */ + if (!nvKmsKapiAllocateNotifiers(device, inVideoMemory)) { + nvKmsKapiLogDebug( + "Failed to allocate Notifier objects for GPU ID 0x%08x", + device->gpuId); + goto done; + } + + /* Allocate semaphore memory in video memory whenever available */ + if (!nvKmsKapiAllocateSemaphores(device, !device->isSOC)) { + nvKmsKapiLogDebug( + "Failed to allocate Semaphore objects for GPU ID 0x%08x", + device->gpuId); + goto done; + } + + ret = NV_TRUE; + +done: + if (!ret) { + KmsFreeDevice(device); + } + + nvKmsKapiFree(paramsAlloc); + + return ret; +} + +static void FreeDevice(struct NvKmsKapiDevice *device) +{ + /* Free NVKMS objects allocated for NvKmsKapiDevice */ + + KmsFreeDevice(device); + + /* Free RM objects allocated for NvKmsKapiDevice */ + + RmFreeDevice(device); + + /* Lower the reference count of gpu. */ + + nvkms_close_gpu(device->gpuId); + + if (device->pSema != NULL) { + nvkms_sema_free(device->pSema); + } + + nvKmsKapiFree(device); +} + +NvBool nvKmsKapiAllocateSystemMemory(struct NvKmsKapiDevice *device, + NvU32 hRmHandle, + enum NvKmsSurfaceMemoryLayout layout, + NvU64 size, + enum NvKmsKapiAllocationType type, + NvU8 *compressible) +{ + NvU32 ret; + NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { }; + const NvKmsDispIOCoherencyModes *pIOCoherencyModes = NULL; + + memAllocParams.owner = NVKMS_RM_HEAP_ID; + memAllocParams.size = size; + + switch (layout) { + case NvKmsSurfaceMemoryLayoutBlockLinear: + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _FORMAT, _BLOCK_LINEAR, + memAllocParams.attr); + if (*compressible) { + /* + * RM will choose a compressed page kind and hence allocate + * comptags for color surfaces >= 32bpp. The actual kind + * chosen isn't important, as it can be overridden by creating + * a virtual alloc with a different kind when mapping the + * memory into the GPU. + */ + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _DEPTH, _32, + memAllocParams.attr); + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COMPR, _ANY, + memAllocParams.attr); + } else { + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _DEPTH, _UNKNOWN, + memAllocParams.attr); + } + break; + + case NvKmsSurfaceMemoryLayoutPitch: + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _FORMAT, _PITCH, + memAllocParams.attr); + break; + + default: + nvKmsKapiLogDeviceDebug(device, "Unknown Memory Layout"); + return NV_FALSE; + } + + switch (type) { + case NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT: + /* XXX Note compression and scanout do not work together on + * any current GPUs. However, some use cases do involve scanning + * out a compression-capable surface: + * + * 1) Mapping the compressible surface as non-compressed when + * generating its content. + * + * 2) Using decompress-in-place to decompress the surface content + * before scanning it out. + * + * Hence creating compressed allocations of TYPE_SCANOUT is allowed. + */ + + pIOCoherencyModes = &device->isoIOCoherencyModes; + + memAllocParams.attr2 = FLD_SET_DRF(OS32, _ATTR2, _ISO, + _YES, memAllocParams.attr2); + break; + case NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER: + if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + nvKmsKapiLogDeviceDebug(device, + "Attempting creation of BlockLinear notifier memory"); + return NV_FALSE; + } + + memAllocParams.attr2 = FLD_SET_DRF(OS32, _ATTR2, _NISO_DISPLAY, + _YES, memAllocParams.attr2); + + pIOCoherencyModes = &device->nisoIOCoherencyModes; + + break; + case NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN: + memAllocParams.flags |= NVOS32_ALLOC_FLAGS_NO_SCANOUT; + break; + default: + nvKmsKapiLogDeviceDebug(device, "Unknown Allocation Type"); + return NV_FALSE; + } + + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _PCI, + memAllocParams.attr); + memAllocParams.attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _NO, + memAllocParams.attr2); + + if (pIOCoherencyModes == NULL || !pIOCoherencyModes->coherent) { + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, + _WRITE_COMBINE, memAllocParams.attr); + } else { + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, + _WRITE_BACK, memAllocParams.attr); + } + + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS, + memAllocParams.attr); + + ret = nvRmApiAlloc(device->hRmClient, + device->hRmDevice, + hRmHandle, + NV01_MEMORY_SYSTEM, + &memAllocParams); + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "nvRmApiAlloc failed with error code 0x%08x", + ret); + + return NV_FALSE; + } + + if (FLD_TEST_DRF(OS32, _ATTR, _COMPR, _NONE, + memAllocParams.attr)) { + *compressible = 0; + } else { + *compressible = 1; + } + + return TRUE; +} + +NvBool nvKmsKapiAllocateVideoMemory(struct NvKmsKapiDevice *device, + NvU32 hRmHandle, + enum NvKmsSurfaceMemoryLayout layout, + NvU64 size, + enum NvKmsKapiAllocationType type, + NvU8 *compressible) +{ + NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { }; + NvU32 ret; + + memAllocParams.owner = NVKMS_RM_HEAP_ID; + memAllocParams.size = size; + + switch (layout) { + case NvKmsSurfaceMemoryLayoutBlockLinear: + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _FORMAT, _BLOCK_LINEAR, + memAllocParams.attr); + + if (*compressible) { + /* + * RM will choose a compressed page kind and hence allocate + * comptags for color surfaces >= 32bpp. The actual kind + * chosen isn't important, as it can be overridden by creating + * a virtual alloc with a different kind when mapping the + * memory into the GPU. + */ + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _DEPTH, _32, + memAllocParams.attr); + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _COMPR, _ANY, + memAllocParams.attr); + } else { + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _DEPTH, _UNKNOWN, + memAllocParams.attr); + } + break; + + case NvKmsSurfaceMemoryLayoutPitch: + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _FORMAT, _PITCH, + memAllocParams.attr); + break; + + default: + nvKmsKapiLogDeviceDebug(device, "Unknown Memory Layout"); + return NV_FALSE; + } + + + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, + memAllocParams.attr); + memAllocParams.attr2 = + FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _NO, + memAllocParams.attr2); + + switch (type) { + case NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT: + /* XXX [JRJ] Not quite right. This can also be used to allocate + * cursor images. The stuff RM does with this field is kind of + * black magic, and I can't tell if it actually matters. + */ + memAllocParams.type = NVOS32_TYPE_PRIMARY; + + memAllocParams.alignment = NV_EVO_SURFACE_ALIGNMENT; + memAllocParams.flags |= + NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE | /* Pick up above EVO alignment */ + NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_UP; /* X sets this for cursors */ + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS, + memAllocParams.attr); + + memAllocParams.attr2 = FLD_SET_DRF(OS32, _ATTR2, _ISO, + _YES, memAllocParams.attr2); + + /* XXX [JRJ] Note compression and scanout do not work together on + * any current GPUs. However, some use cases do involve scanning + * out a compression-capable surface: + * + * 1) Mapping the compressible surface as non-compressed when + * generating its content. + * + * 2) Using decompress-in-place to decompress the surface content + * before scanning it out. + * + * Hence creating compressed allocations of TYPE_SCANOUT is allowed. + */ + + break; + case NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER: + if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + nvKmsKapiLogDeviceDebug(device, + "Attempting creation of BlockLinear notifier memory"); + return NV_FALSE; + } + + memAllocParams.type = NVOS32_TYPE_DMA; + + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _4KB, + memAllocParams.attr); + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _UNCACHED, + memAllocParams.attr); + + break; + case NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN: + memAllocParams.type = NVOS32_TYPE_IMAGE; + memAllocParams.flags |= + NVOS32_ALLOC_FLAGS_NO_SCANOUT | + NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_UP; + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS, + memAllocParams.attr); + break; + default: + nvKmsKapiLogDeviceDebug(device, "Unknown Allocation Type"); + return NV_FALSE; + } + + ret = nvRmApiAlloc(device->hRmClient, + device->hRmDevice, + hRmHandle, + NV01_MEMORY_LOCAL_USER, + &memAllocParams); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "VidHeapControl failed with error code 0x%08x", + ret); + + return NV_FALSE; + } + + if (FLD_TEST_DRF(OS32, _ATTR, _COMPR, _NONE, + memAllocParams.attr)) { + *compressible = 0; + } else { + *compressible = 1; + } + + return NV_TRUE; +} + +static struct NvKmsKapiDevice* AllocateDevice +( + const struct NvKmsKapiAllocateDeviceParams *params +) +{ + struct NvKmsKapiDevice *device = NULL; + + device = nvKmsKapiCalloc(1, sizeof(*device)); + + if (device == NULL) { + nvKmsKapiLogDebug( + "Failed to allocate memory for NvKmsKapiDevice of GPU ID 0x%08x", + params->gpuId); + goto failed; + } + + device->pSema = nvkms_sema_alloc(); + + if (device->pSema == NULL) { + nvKmsKapiLogDebug( + "Failed to allocate semaphore for NvKmsKapiDevice of GPU ID 0x%08x", + params->gpuId); + goto failed; + } + + /* Raise the reference count of gpu. */ + + if (!nvkms_open_gpu(params->gpuId)) { + nvKmsKapiLogDebug("Failed to open GPU ID 0x%08x", params->gpuId); + goto failed; + } + + device->gpuId = params->gpuId; + device->migDevice = params->migDevice; + + nvKmsKapiLogDebug( + "Allocating NvKmsKapiDevice 0x%p for GPU ID 0x%08x", + device, + device->gpuId); + + /* Allocate RM object for NvKmsKapiDevice */ + + if (!RmAllocateDevice(device)) { + nvKmsKapiLogDebug( + "Failed to allocate RM objects for GPU ID 0x%08x", + device->gpuId); + goto failed; + } + + /* Allocate NVKMS objects for NvKmsKapiDevice */ + + if (!KmsAllocateDevice(device)) { + nvKmsKapiLogDebug( + "Failed to allocate NVKMS objects for GPU ID 0x%08x", + device->gpuId); + goto failed; + } + + device->privateData = params->privateData; + device->eventCallback = params->eventCallback; + + return device; + +failed: + + FreeDevice(device); + + return NULL; +} + +static NvBool GrabOwnership(struct NvKmsKapiDevice *device) +{ + struct NvKmsGrabOwnershipParams paramsGrab = { }; + + if (device->hKmsDevice == 0x0) { + return NV_TRUE; + } + + paramsGrab.request.deviceHandle = device->hKmsDevice; + + return nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_GRAB_OWNERSHIP, + ¶msGrab, sizeof(paramsGrab)); + +} + +static void ReleaseOwnership(struct NvKmsKapiDevice *device) +{ + struct NvKmsReleaseOwnershipParams paramsRelease = { }; + + if (device->hKmsDevice == 0x0) { + return; + } + + paramsRelease.request.deviceHandle = device->hKmsDevice; + + nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_RELEASE_OWNERSHIP, + ¶msRelease, sizeof(paramsRelease)); +} + +static NvBool GrantPermissions +( + NvS32 fd, + struct NvKmsKapiDevice *device, + NvU32 head, + NvKmsKapiDisplay display +) +{ + struct NvKmsGrantPermissionsParams paramsGrant = { }; + struct NvKmsPermissions *perm = ¶msGrant.request.permissions; + NvU32 dispIdx = device->dispIdx; + + if (dispIdx >= ARRAY_LEN(perm->modeset.disp) || + head >= ARRAY_LEN(perm->modeset.disp[0].head) || device == NULL) { + return NV_FALSE; + } + + if (device->hKmsDevice == 0x0) { + return NV_TRUE; + } + + perm->type = NV_KMS_PERMISSIONS_TYPE_MODESET; + perm->modeset.disp[dispIdx].head[head].dpyIdList = + nvAddDpyIdToEmptyDpyIdList(nvNvU32ToDpyId(display)); + + paramsGrant.request.fd = fd; + paramsGrant.request.deviceHandle = device->hKmsDevice; + + return nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_GRANT_PERMISSIONS, ¶msGrant, + sizeof(paramsGrant)); +} + +static NvBool RevokePermissions +( + struct NvKmsKapiDevice *device, + NvU32 head, + NvKmsKapiDisplay display +) +{ + struct NvKmsRevokePermissionsParams paramsRevoke = { }; + struct NvKmsPermissions *perm = ¶msRevoke.request.permissions; + NvU32 dispIdx = device->dispIdx; + + + if (dispIdx >= ARRAY_LEN(perm->modeset.disp) || + head >= ARRAY_LEN(perm->modeset.disp[0].head) || device == NULL) { + return NV_FALSE; + } + + if (device->hKmsDevice == 0x0) { + return NV_TRUE; + } + + perm->type = NV_KMS_PERMISSIONS_TYPE_MODESET; + perm->modeset.disp[dispIdx].head[head].dpyIdList = + nvAddDpyIdToEmptyDpyIdList(nvNvU32ToDpyId(display)); + + paramsRevoke.request.deviceHandle = device->hKmsDevice; + + return nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_REVOKE_PERMISSIONS, ¶msRevoke, + sizeof(paramsRevoke)); +} + +static NvBool GrantSubOwnership +( + NvS32 fd, + struct NvKmsKapiDevice *device +) +{ + struct NvKmsGrantPermissionsParams paramsGrant = { }; + struct NvKmsPermissions *perm = ¶msGrant.request.permissions; + + if (device->hKmsDevice == 0x0) { + return NV_TRUE; + } + + perm->type = NV_KMS_PERMISSIONS_TYPE_SUB_OWNER; + + paramsGrant.request.fd = fd; + paramsGrant.request.deviceHandle = device->hKmsDevice; + + return nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_GRANT_PERMISSIONS, ¶msGrant, + sizeof(paramsGrant)); +} + +static NvBool RevokeSubOwnership +( + struct NvKmsKapiDevice *device +) +{ + struct NvKmsRevokePermissionsParams paramsRevoke = { }; + + if (device->hKmsDevice == 0x0) { + return NV_TRUE; + } + + paramsRevoke.request.permissionsTypeBitmask = + NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING) | + NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET) | + NVBIT(NV_KMS_PERMISSIONS_TYPE_SUB_OWNER); + paramsRevoke.request.deviceHandle = device->hKmsDevice; + + return nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_REVOKE_PERMISSIONS, ¶msRevoke, + sizeof(paramsRevoke)); +} + +static NvBool DeclareEventInterest +( + const struct NvKmsKapiDevice *device, + const NvU32 interestMask +) +{ + struct NvKmsDeclareEventInterestParams kmsEventParams = { }; + + if (device->hKmsDevice == 0x0 || device->eventCallback == NULL) { + return NV_TRUE; + } + + kmsEventParams.request.interestMask = + interestMask & NVKMS_KAPI_SUPPORTED_EVENTS_MASK; + + return nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_DECLARE_EVENT_INTEREST, + &kmsEventParams, sizeof(kmsEventParams)); +} + +static NvBool GetDeviceResourcesInfo +( + struct NvKmsKapiDevice *device, + struct NvKmsKapiDeviceResourcesInfo *info +) +{ + struct NvKmsQueryDispParams paramsDisp = { }; + NV2080_CTRL_FB_GET_SEMAPHORE_SURFACE_LAYOUT_PARAMS semsurfLayoutParams = { }; + NvBool status = NV_FALSE; + NvU32 ret; + + NvU32 i; + + nvkms_memset(info, 0, sizeof(*info)); + + ret = nvRmApiControl(device->hRmClient, + device->hRmSubDevice, + NV2080_CTRL_CMD_FB_GET_SEMAPHORE_SURFACE_LAYOUT, + &semsurfLayoutParams, + sizeof(semsurfLayoutParams)); + + if (ret == NVOS_STATUS_SUCCESS) { + info->caps.semsurf.stride = semsurfLayoutParams.size; + info->caps.semsurf.maxSubmittedOffset = + semsurfLayoutParams.maxSubmittedSemaphoreValueOffset; + } else { + /* Non-fatal. No semaphore surface support. */ + info->caps.semsurf.stride = 0; + info->caps.semsurf.maxSubmittedOffset = 0; + } + + info->caps.hasVideoMemory = !device->isSOC; + info->caps.genericPageKind = device->caps.genericPageKind; + info->caps.requiresVrrSemaphores = device->caps.requiresVrrSemaphores; + + info->vtFbBaseAddress = device->vtFbBaseAddress; + info->vtFbSize = device->vtFbSize; + + if (device->hKmsDevice == 0x0) { + info->caps.pitchAlignment = 0x1; + return NV_TRUE; + } + + paramsDisp.request.deviceHandle = device->hKmsDevice; + paramsDisp.request.dispHandle = device->hKmsDisp; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_QUERY_DISP, + ¶msDisp, sizeof(paramsDisp)); + + if (!status) + { + nvKmsKapiLogDeviceDebug( + device, + "Failed to query display engine information"); + + goto done; + } + + info->numHeads = device->numHeads; + + ct_assert(sizeof(info->numLayers) == sizeof(device->numLayers)); + nvkms_memcpy(info->numLayers, device->numLayers, sizeof(device->numLayers)); + + ct_assert(ARRAY_LEN(info->connectorHandles) >= + ARRAY_LEN(paramsDisp.reply.connectorHandles)); + + info->numConnectors = paramsDisp.reply.numConnectors; + + for (i = 0; i < paramsDisp.reply.numConnectors; i++) { + info->connectorHandles[i] = paramsDisp.reply.connectorHandles[i]; + } + + { + const struct NvKmsCompositionCapabilities *pCaps = + &device->caps.cursorCompositionCaps; + + info->caps.validCursorCompositionModes = + pCaps->colorKeySelect[NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE]. + supportedBlendModes[1]; + } + + for (i = 0; i < NVKMS_KAPI_LAYER_MAX; i++) { + if (i == NVKMS_KAPI_LAYER_PRIMARY_IDX) { + info->caps.layer[i].validCompositionModes = + NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE); + } else { + const struct NvKmsCompositionCapabilities *pCaps = + &device->caps.overlayCompositionCaps; + + info->caps.layer[i].validCompositionModes = + pCaps->colorKeySelect[NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE]. + supportedBlendModes[1]; + } + } + + for (i = 0; i < NVKMS_KAPI_LAYER_MAX; i++) { + info->caps.layer[i].validRRTransforms = + device->caps.validLayerRRTransforms; + } + + info->caps.maxWidthInPixels = device->caps.maxWidthInPixels; + info->caps.maxHeightInPixels = device->caps.maxHeightInPixels; + info->caps.maxCursorSizeInPixels = device->caps.maxCursorSizeInPixels; + + info->caps.pitchAlignment = NV_EVO_PITCH_ALIGNMENT; + + info->caps.supportsSyncpts = device->supportsSyncpts; + info->caps.supportsInputColorRange = device->caps.supportsInputColorRange; + info->caps.supportsInputColorSpace = device->caps.supportsInputColorSpace; + + info->caps.supportedCursorSurfaceMemoryFormats = + NVBIT(NvKmsSurfaceMemoryFormatA8R8G8B8); + + info->caps.numDisplaySemaphores = device->numDisplaySemaphores; + + ct_assert(sizeof(info->supportedSurfaceMemoryFormats) == + sizeof(device->supportedSurfaceMemoryFormats)); + + nvkms_memcpy(info->supportedSurfaceMemoryFormats, + device->supportedSurfaceMemoryFormats, + sizeof(device->supportedSurfaceMemoryFormats)); + + ct_assert(sizeof(info->supportsICtCp) == + sizeof(device->supportsICtCp)); + + nvkms_memcpy(info->supportsICtCp, + device->supportsICtCp, + sizeof(device->supportsICtCp)); + + info->lutCaps = device->lutCaps; + +done: + + return status; +} + +/* + * XXX Make it per-connector, query valid dpyId list as dynamic data of + * connector. + */ +static NvBool GetDisplays +( + struct NvKmsKapiDevice *device, + NvU32 *numDisplays, NvKmsKapiDisplay *displayHandles +) +{ + struct NvKmsQueryDispParams paramsDisp = { }; + NvBool status = NV_FALSE; + + NVDpyId dpyId; + + if (device->hKmsDevice == 0x0) { + *numDisplays = 0; + return NV_TRUE; + } + + paramsDisp.request.deviceHandle = device->hKmsDevice; + paramsDisp.request.dispHandle = device->hKmsDisp; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_QUERY_DISP, + ¶msDisp, sizeof(paramsDisp)); + + if (!status) + { + nvKmsKapiLogDeviceDebug( + device, + "Failed to query display engine information"); + + return NV_FALSE; + } + + if (*numDisplays == 0) { + goto done; + } + + if (*numDisplays < nvCountDpyIdsInDpyIdList(paramsDisp.reply.validDpys)) { + nvKmsKapiLogDebug( + "Size of display handle array is less than number of displays"); + goto done; + } + + FOR_ALL_DPY_IDS(dpyId, paramsDisp.reply.validDpys) { + *(displayHandles++) = nvDpyIdToNvU32(dpyId); + } + +done: + + *numDisplays = nvCountDpyIdsInDpyIdList(paramsDisp.reply.validDpys); + + return NV_TRUE; +} + +static NvBool GetConnectorInfo +( + struct NvKmsKapiDevice *device, + NvKmsKapiConnector connector, struct NvKmsKapiConnectorInfo *info +) +{ + struct NvKmsQueryConnectorStaticDataParams paramsConnector = { }; + struct NvKmsQueryConnectorDynamicDataParams paramsDynamicConnector = { }; + NvBool status = NV_FALSE; + NvU64 startTime = 0; + NvBool timeout; + + if (device == NULL || info == NULL) { + goto done; + } + + paramsConnector.request.deviceHandle = device->hKmsDevice; + paramsConnector.request.dispHandle = device->hKmsDisp; + paramsConnector.request.connectorHandle = connector; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_QUERY_CONNECTOR_STATIC_DATA, + ¶msConnector, sizeof(paramsConnector)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to query static data of connector 0x%08x", + connector); + + goto done; + } + + info->handle = connector; + + info->physicalIndex = paramsConnector.reply.physicalIndex; + + info->signalFormat = paramsConnector.reply.signalFormat; + + info->type = paramsConnector.reply.type; + + + startTime = nvkms_get_usec(); + do { + nvkms_memset(¶msDynamicConnector, 0, sizeof(paramsDynamicConnector)); + paramsDynamicConnector.request.deviceHandle = device->hKmsDevice; + paramsDynamicConnector.request.dispHandle = device->hKmsDisp; + paramsDynamicConnector.request.connectorHandle = connector; + + if (!nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_QUERY_CONNECTOR_DYNAMIC_DATA, + ¶msDynamicConnector, + sizeof(paramsDynamicConnector))) { + + nvKmsKapiLogDeviceDebug( + device, + "Failed to query dynamic data of connector 0x%08x", + connector); + status = NV_FALSE; + + goto done; + } + + timeout = nvkms_get_usec() - startTime > + NVKMS_DP_DETECT_COMPLETE_TIMEOUT_USEC; + + if (!paramsDynamicConnector.reply.detectComplete && !timeout) { + nvkms_usleep(NVKMS_DP_DETECT_COMPLETE_POLL_INTERVAL_USEC); + } + } while (!paramsDynamicConnector.reply.detectComplete && !timeout); + + if (!paramsDynamicConnector.reply.detectComplete) { + nvKmsKapiLogDeviceDebug(device, "Timed out waiting for DisplayPort" + " device detection to complete."); + status = NV_FALSE; + } + + info->dynamicDpyIdList = paramsDynamicConnector.reply.dynamicDpyIdList; + +done: + + return status; +} + +static NvBool GetStaticDisplayInfo +( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, struct NvKmsKapiStaticDisplayInfo *info +) +{ + struct NvKmsQueryDpyStaticDataParams paramsDpyStatic = { }; + NvBool status = NV_FALSE; + + if (device == NULL || info == NULL) { + goto done; + } + + /* Query static data of display */ + + paramsDpyStatic.request.deviceHandle = device->hKmsDevice; + paramsDpyStatic.request.dispHandle = device->hKmsDisp; + + paramsDpyStatic.request.dpyId = nvNvU32ToDpyId(display); + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_QUERY_DPY_STATIC_DATA, + ¶msDpyStatic, sizeof(paramsDpyStatic)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to query static data of dpy 0x%08x", + display); + + goto done; + } + + info->handle = display; + + info->connectorHandle = paramsDpyStatic.reply.connectorHandle; + + ct_assert(sizeof(info->dpAddress) == + sizeof(paramsDpyStatic.reply.dpAddress)); + + nvkms_memcpy(info->dpAddress, + paramsDpyStatic.reply.dpAddress, + sizeof(paramsDpyStatic.reply.dpAddress)); + info->dpAddress[sizeof(paramsDpyStatic.reply.dpAddress) - 1] = '\0'; + + info->internal = paramsDpyStatic.reply.mobileInternal; + info->headMask = paramsDpyStatic.reply.headMask; + info->isDpMST = paramsDpyStatic.reply.isDpMST; +done: + + return status; +} + +static NvBool GetDynamicDisplayInfo( + struct NvKmsKapiDevice *device, + struct NvKmsKapiDynamicDisplayParams *params) +{ + struct NvKmsQueryDpyDynamicDataParams *pParamsDpyDynamic = NULL; + NvBool status = NV_FALSE; + + if (device == NULL || params == NULL) { + goto done; + } + + pParamsDpyDynamic = nvKmsKapiCalloc(1, sizeof(*pParamsDpyDynamic)); + + if (pParamsDpyDynamic == NULL) { + goto done; + } + + pParamsDpyDynamic->request.deviceHandle = device->hKmsDevice; + pParamsDpyDynamic->request.dispHandle = device->hKmsDisp; + + pParamsDpyDynamic->request.dpyId = nvNvU32ToDpyId(params->handle); + + if (params->overrideEdid) { + ct_assert(sizeof(params->edid.buffer) == + sizeof(pParamsDpyDynamic->reply.edid.buffer)); + nvkms_memcpy( + pParamsDpyDynamic->request.edid.buffer, + params->edid.buffer, + sizeof(pParamsDpyDynamic->request.edid.buffer)); + + pParamsDpyDynamic->request.edid.bufferSize = params->edid.bufferSize; + + pParamsDpyDynamic->request.overrideEdid = NV_TRUE; + } + + pParamsDpyDynamic->request.forceConnected = params->forceConnected; + + pParamsDpyDynamic->request.forceDisconnected = params->forceDisconnected; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_QUERY_DPY_DYNAMIC_DATA, + pParamsDpyDynamic, sizeof(*pParamsDpyDynamic)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to query dynamic data of dpy 0x%08x", + params->handle); + + goto done; + } + + params->connected = pParamsDpyDynamic->reply.connected; + + if (pParamsDpyDynamic->reply.connected && !params->overrideEdid) { + + nvkms_memcpy( + params->edid.buffer, + pParamsDpyDynamic->reply.edid.buffer, + sizeof(params->edid.buffer)); + + params->edid.bufferSize = pParamsDpyDynamic->reply.edid.bufferSize; + } + + if (pParamsDpyDynamic->reply.connected) { + NvBool vrrSupported = + (pParamsDpyDynamic->reply.vrrType != NVKMS_DPY_VRR_TYPE_NONE) ? NV_TRUE : NV_FALSE; + params->vrrSupported = vrrSupported; + } + +done: + + if (pParamsDpyDynamic != NULL) { + nvKmsKapiFree(pParamsDpyDynamic); + } + + return status; +} + +static void FreeMemory +( + struct NvKmsKapiDevice *device, struct NvKmsKapiMemory *memory +) +{ + if (device == NULL || memory == NULL) { + return; + } + + if (memory->hRmHandle != 0x0) { + NvU32 ret; + + ret = nvRmApiFree(device->hRmClient, + device->hRmDevice, + memory->hRmHandle); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to free RM memory object 0x%08x allocated for " + "NvKmsKapiMemory 0x%p", + memory->hRmHandle, memory); + } + + nvKmsKapiFreeRmHandle(device, memory->hRmHandle); + } + + nvKmsKapiFree(memory); +} + +static struct NvKmsKapiMemory *AllocMemoryObjectAndHandle( + struct NvKmsKapiDevice *device, + NvU32 *handleOut +) +{ + struct NvKmsKapiMemory *memory; + + /* Allocate the container object */ + + memory = nvKmsKapiCalloc(1, sizeof(*memory)); + + if (memory == NULL) { + nvKmsKapiLogDebug( + "Failed to allocate memory for NVKMS memory object on " + "NvKmsKapiDevice 0x%p", + device); + return NULL; + } + + /* Generate RM handle for memory object */ + + *handleOut = nvKmsKapiGenerateRmHandle(device); + + if (*handleOut == 0x0) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to allocate RM handle for memory"); + nvKmsKapiFree(memory); + return NULL; + } + + return memory; +} + +static struct NvKmsKapiMemory* AllocateMemory +( + struct NvKmsKapiDevice *device, + struct NvKmsKapiAllocateMemoryParams *params +) +{ + struct NvKmsKapiMemory *memory = NULL; + NvU32 hRmHandle; + NvBool allocSucceeded; + + memory = AllocMemoryObjectAndHandle(device, &hRmHandle); + + if (!memory) { + return NULL; + } + + allocSucceeded = + params->useVideoMemory + ? nvKmsKapiAllocateVideoMemory(device, hRmHandle, params->layout, + params->size, params->type, + params->compressible) + : nvKmsKapiAllocateSystemMemory(device, hRmHandle, params->layout, + params->size, params->type, + params->compressible); + if (!allocSucceeded) { + nvKmsKapiFreeRmHandle(device, hRmHandle); + FreeMemory(device, memory); + return NULL; + } + + memory->hRmHandle = hRmHandle; + memory->size = params->size; + memory->surfaceParams.layout = params->layout; + memory->noDisplayCaching = params->noDisplayCaching; + memory->isVidmem = params->useVideoMemory; + + if (params->layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + memory->surfaceParams.blockLinear.genericMemory = NV_TRUE; + } + + return memory; +} + +static struct NvKmsKapiMemory* ImportMemory +( + struct NvKmsKapiDevice *device, + NvU64 memorySize, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize +) +{ + struct NvKmsKapiPrivImportMemoryParams nvKmsParams, *pNvKmsParams = NULL; + NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS importParams = { }; + struct NvKmsKapiMemory *memory = NULL; + NvU32 hMemory; + NvU32 ret; + int status; + + /* Verify the driver-private params size and copy it in from userspace */ + + if (nvKmsParamsSize != sizeof(nvKmsParams)) { + nvKmsKapiLogDebug( + "NVKMS private memory import parameter size mismatch - " + "expected: 0x%llx, caller specified: 0x%llx", + (NvU64)sizeof(nvKmsParams), nvKmsParamsSize); + return NULL; + } + + /* + * Use a heap allocation as the destination pointer passed to + * nvkms_copyin; stack allocations created within core NVKMS may not + * be recognizable to the Linux kernel's CONFIG_HARDENED_USERCOPY + * checker, triggering false errors. But then save the result to a + * variable on the stack, so that we can free the heap memory + * immediately and not worry about its lifetime. + */ + + pNvKmsParams = nvKmsKapiCalloc(1, sizeof(*pNvKmsParams)); + + if (pNvKmsParams == NULL) { + nvKmsKapiLogDebug("Failed to allocate memory for ImportMemory"); + return NULL; + } + + status = nvkms_copyin(pNvKmsParams, nvKmsParamsUser, sizeof(*pNvKmsParams)); + + nvKmsParams = *pNvKmsParams; + + nvKmsKapiFree(pNvKmsParams); + + if (status != 0) { + nvKmsKapiLogDebug( + "NVKMS private memory import parameters could not be read from " + "userspace"); + return NULL; + } + + memory = AllocMemoryObjectAndHandle(device, &hMemory); + + if (!memory) { + return NULL; + } + + importParams.fd = nvKmsParams.memFd; + importParams.object.type = NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM; + importParams.object.data.rmObject.hDevice = device->hRmDevice; + importParams.object.data.rmObject.hParent = device->hRmDevice; + importParams.object.data.rmObject.hObject = hMemory; + + ret = nvRmApiControl(device->hRmClient, + device->hRmClient, + NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD, + &importParams, + sizeof(importParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to import RM memory object (%d) of size %llu bytes", + nvKmsParams.memFd, memorySize); + + nvKmsKapiFreeRmHandle(device, hMemory); + goto failed; + } + + memory->hRmHandle = hMemory; + memory->size = memorySize; + memory->surfaceParams = nvKmsParams.surfaceParams; + + /* + * Determine address space of imported memory. For Tegra, there is only a + * single unified address space. + */ + if (!device->isSOC) { + NV0041_CTRL_GET_SURFACE_INFO_PARAMS surfaceInfoParams = {}; + NV0041_CTRL_SURFACE_INFO surfaceInfo = {}; + + surfaceInfo.index = NV0041_CTRL_SURFACE_INFO_INDEX_ADDR_SPACE_TYPE; + + surfaceInfoParams.surfaceInfoListSize = 1; + surfaceInfoParams.surfaceInfoList = (NvP64)&surfaceInfo; + + ret = nvRmApiControl(device->hRmClient, + memory->hRmHandle, + NV0041_CTRL_CMD_GET_SURFACE_INFO, + &surfaceInfoParams, + sizeof(surfaceInfoParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to get memory location of RM memory object 0x%x", + memory->hRmHandle); + + nvKmsKapiFreeRmHandle(device, hMemory); + goto failed; + } + + memory->isVidmem = + surfaceInfo.data == NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM; + } + + return memory; + +failed: + + FreeMemory(device, memory); + + return NULL; +} + +static struct NvKmsKapiMemory* DupMemory +( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiDevice *srcDevice, + const struct NvKmsKapiMemory *srcMemory +) +{ + struct NvKmsKapiMemory *memory; + NvU32 hMemory; + NvU32 ret; + + if (srcMemory->isVidmem && + (device->gpuId != srcDevice->gpuId)) { + nvKmsKapiLogDeviceDebug( + device, + "It is not possible to dup an NVKMS memory object located on the vidmem of a different device"); + return NULL; + } + + memory = AllocMemoryObjectAndHandle(device, &hMemory); + + if (!memory) { + return NULL; + } + + ret = nvRmApiDupObject(device->hRmClient, + device->hRmDevice, + hMemory, + srcDevice->hRmClient, + srcMemory->hRmHandle, + 0); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to dup NVKMS memory object 0x%p (0x%08x, 0x%08x) " + "of size %llu bytes", + srcMemory, srcDevice->hRmClient, srcMemory->hRmHandle, + srcMemory->size); + + nvKmsKapiFreeRmHandle(device, hMemory); + goto failed; + } + + memory->hRmHandle = hMemory; + memory->size = srcMemory->size; + memory->surfaceParams = srcMemory->surfaceParams; + memory->isVidmem = srcMemory->isVidmem; + + return memory; + +failed: + FreeMemory(device, memory); + + return NULL; +} + +static NvBool ExportMemory +( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize +) +{ + struct NvKmsKapiPrivExportMemoryParams nvKmsParams, *pNvKmsParams = NULL; + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS exportParams = { }; + int status; + NvU32 ret; + + if (device == NULL || memory == NULL) { + nvKmsKapiLogDebug( + "Invalid device or memory parameter while exporting memory"); + return NV_FALSE; + } + + /* Verify the driver-private params size and copy it in from userspace */ + + if (nvKmsParamsSize != sizeof(nvKmsParams)) { + nvKmsKapiLogDebug( + "NVKMS private memory export parameter size mismatch - " + "expected: 0x%llx, caller specified: 0x%llx", + (NvU64)sizeof(nvKmsParams), nvKmsParamsSize); + return NV_FALSE; + } + + /* + * Use a heap allocation as the destination pointer passed to + * nvkms_copyin; stack allocations created within core NVKMS may not + * be recognizable to the Linux kernel's CONFIG_HARDENED_USERCOPY + * checker, triggering false errors. But then save the result to a + * variable on the stack, so that we can free the heap memory + * immediately and not worry about its lifetime. + */ + + pNvKmsParams = nvKmsKapiCalloc(1, sizeof(*pNvKmsParams)); + + if (pNvKmsParams == NULL) { + nvKmsKapiLogDebug("Failed to allocate scratch memory for ExportMemory"); + return NV_FALSE; + } + + status = nvkms_copyin(pNvKmsParams, nvKmsParamsUser, sizeof(*pNvKmsParams)); + + nvKmsParams = *pNvKmsParams; + nvKmsKapiFree(pNvKmsParams); + + if (status != 0) { + nvKmsKapiLogDebug( + "NVKMS private memory export parameters could not be read from " + "userspace"); + return NV_FALSE; + } + + exportParams.fd = nvKmsParams.memFd; + exportParams.object.type = NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM; + exportParams.object.data.rmObject.hDevice = device->hRmDevice; + exportParams.object.data.rmObject.hParent = device->hRmDevice; + exportParams.object.data.rmObject.hObject = memory->hRmHandle; + + ret = nvRmApiControl(device->hRmClient, + device->hRmClient, + NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECT_TO_FD, + &exportParams, + sizeof(exportParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to export RM memory object of size %llu bytes " + "to (%d)", memory->size, nvKmsParams.memFd); + return NV_FALSE; + } + + return NV_TRUE; +} + +static struct NvKmsKapiMemory* +GetSystemMemoryHandleFromDmaBufSgtHelper(struct NvKmsKapiDevice *device, + NvU32 descriptorType, + NvP64 descriptor, + NvU32 limit) +{ + NvU32 ret; + NV_OS_DESC_MEMORY_ALLOCATION_PARAMS memAllocParams = {0}; + struct NvKmsKapiMemory *memory = NULL; + NvU32 hRmHandle; + + memory = AllocMemoryObjectAndHandle(device, &hRmHandle); + + if (!memory) { + return NULL; + } + + memAllocParams.type = NVOS32_TYPE_PRIMARY; + memAllocParams.descriptorType = descriptorType; + memAllocParams.descriptor = descriptor; + memAllocParams.limit = limit; + + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _LOCATION, _PCI, memAllocParams.attr); + + memAllocParams.attr2 = + FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _NO, memAllocParams.attr2); + + /* dmabuf import is currently only used for ISO memory. */ + if (!device->isoIOCoherencyModes.coherent) { + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_COMBINE, + memAllocParams.attr); + } else { + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_BACK, + memAllocParams.attr); + } + + ret = nvRmApiAlloc(device->hRmClient, + device->hRmDevice, + hRmHandle, + NV01_MEMORY_SYSTEM_OS_DESCRIPTOR, + &memAllocParams); + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "nvRmApiAlloc failed with error code 0x%08x", + ret); + nvKmsKapiFreeRmHandle(device, hRmHandle); + FreeMemory(device, memory); + return NULL; + } + + memory->hRmHandle = hRmHandle; + memory->size = limit + 1; + memory->surfaceParams.layout = NvKmsSurfaceMemoryLayoutPitch; + memory->isVidmem = NV_FALSE; + + return memory; +} + +static struct NvKmsKapiMemory* +GetSystemMemoryHandleFromSgt(struct NvKmsKapiDevice *device, + NvP64 sgt, + NvP64 gem, + NvU32 limit) +{ + NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR_PARAMETERS params = { + .sgt = sgt, + .gem = gem + }; + + return GetSystemMemoryHandleFromDmaBufSgtHelper( + device, NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR, ¶ms, limit); +} + +static struct NvKmsKapiMemory* +GetSystemMemoryHandleFromDmaBuf(struct NvKmsKapiDevice *device, + NvP64 dmaBuf, + NvU32 limit) +{ + return GetSystemMemoryHandleFromDmaBufSgtHelper( + device, NVOS32_DESCRIPTOR_TYPE_OS_DMA_BUF_PTR, dmaBuf, limit); +} + +static NvBool RmGc6BlockerRefCntAction(const struct NvKmsKapiDevice *device, + NvU32 action) +{ + NV_STATUS status; + NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS params = { 0 }; + + nvAssert((action == NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_INC) || + (action == NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_DEC)); + + params.action = action; + + status = nvRmApiControl(device->hRmClient, + device->hRmSubDevice, + NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT, + ¶ms, + sizeof(params)); + if (status != NV_OK) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to modify GC6 blocker refcount for 0x%x, status: 0x%x", + device->hRmSubDevice, status); + return NV_FALSE; + } + + return NV_TRUE; +} + +static NvBool RmGc6BlockerRefCntInc(const struct NvKmsKapiDevice *device) +{ + return RmGc6BlockerRefCntAction( + device, + NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_INC); +} + +static NvBool RmGc6BlockerRefCntDec(const struct NvKmsKapiDevice *device) +{ + return RmGc6BlockerRefCntAction( + device, + NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_DEC); +} + +static NvBool GetMemoryPages +( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, + NvU64 **pPages, + NvU32 *pNumPages +) +{ + NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS paramsGetNumPages = {}; + NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS paramsGetPages = {}; + NvU64 *pages; + NV_STATUS status; + + if (device == NULL || memory == NULL) { + return NV_FALSE; + } + + status = nvRmApiControl(device->hRmClient, + memory->hRmHandle, + NV003E_CTRL_CMD_GET_SURFACE_NUM_PHYS_PAGES, + ¶msGetNumPages, + sizeof(paramsGetNumPages)); + if (status != NV_OK) { + nvKmsKapiLogDeviceDebug(device, + "Failed to get number of physical allocation pages for RM" + "memory object 0x%x", memory->hRmHandle); + return NV_FALSE; + } + + if (!paramsGetNumPages.numPages) { + return NV_FALSE; + } + + pages = nvKmsKapiCalloc(paramsGetNumPages.numPages, sizeof(pages)); + if (!pages) { + nvKmsKapiLogDeviceDebug(device, "Failed to allocate memory"); + return NV_FALSE; + } + + paramsGetPages.pPages = NV_PTR_TO_NvP64(pages); + paramsGetPages.numPages = paramsGetNumPages.numPages; + + status = nvRmApiControl(device->hRmClient, + memory->hRmHandle, + NV003E_CTRL_CMD_GET_SURFACE_PHYS_PAGES, + ¶msGetPages, + sizeof(paramsGetPages)); + if (status != NV_OK) { + nvKmsKapiFree(pages); + nvKmsKapiLogDeviceDebug(device, + "Failed to get physical allocation pages for RM" + "memory object 0x%x", memory->hRmHandle); + return NV_FALSE; + } + + nvAssert(paramsGetPages.numPages == paramsGetNumPages.numPages); + + *pPages = pages; + *pNumPages = paramsGetPages.numPages; + + return NV_TRUE; +} + +static void FreeMemoryPages +( + NvU64 *pPages +) +{ + nvKmsKapiFree(pPages); +} + +static NvBool MapMemory +( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, NvKmsKapiMappingType type, + void **ppLinearAddress +) +{ + NV_STATUS status; + NvU32 flags = 0; + + if (device == NULL || memory == NULL) { + return NV_FALSE; + } + + switch (type) { + case NVKMS_KAPI_MAPPING_TYPE_USER: + /* + * Usermode clients can't be trusted not to access mappings while + * the GPU is in GC6. + * + * TODO: Revoke/restore mappings rather than blocking GC6 + */ + if (!RmGc6BlockerRefCntInc(device)) { + return NV_FALSE; + } + flags |= DRF_DEF(OS33, _FLAGS, _MEM_SPACE, _USER); + break; + case NVKMS_KAPI_MAPPING_TYPE_KERNEL: + /* + * Kernel clients should ensure on their own that the GPU isn't in + * GC6 before making accesses to mapped vidmem surfaces. + */ + break; + } + + status = nvRmApiMapMemory( + device->hRmClient, + device->hRmSubDevice, + memory->hRmHandle, + 0, + memory->size, + ppLinearAddress, + flags); + + if (status != NV_OK) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to Map RM memory object 0x%x allocated for NVKMemory 0x%p", + memory->hRmHandle, memory); + if (type == NVKMS_KAPI_MAPPING_TYPE_USER) { + RmGc6BlockerRefCntDec(device); // XXX Can't handle failure. + } + return NV_FALSE; + } + + return NV_TRUE; +} + +static void UnmapMemory +( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, NvKmsKapiMappingType type, + const void *pLinearAddress +) +{ + NV_STATUS status; + NvU32 flags = 0; + + if (device == NULL || memory == NULL) { + return; + } + + switch (type) { + case NVKMS_KAPI_MAPPING_TYPE_USER: + flags |= DRF_DEF(OS33, _FLAGS, _MEM_SPACE, _USER); + break; + case NVKMS_KAPI_MAPPING_TYPE_KERNEL: + break; + } + + status = + nvRmApiUnmapMemory(device->hRmClient, + device->hRmSubDevice, + memory->hRmHandle, + pLinearAddress, + flags); + + if (status != NV_OK) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to Ummap RM memory object 0x%x allocated for NVKMemory 0x%p", + memory->hRmHandle, memory); + } + + if (type == NVKMS_KAPI_MAPPING_TYPE_USER) { + RmGc6BlockerRefCntDec(device); // XXX Can't handle failure. + } +} + +static NvBool IsVidmem( + const struct NvKmsKapiMemory *memory) +{ + return memory->isVidmem; +} + +static NvBool GetSurfaceParams( + struct NvKmsKapiCreateSurfaceParams *params, + NvU32 *pNumPlanes, + enum NvKmsSurfaceMemoryLayout *pLayout, + NvU32 *pLog2GobsPerBlockY, + NvU32 pitch[]) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(params->format); + enum NvKmsSurfaceMemoryLayout layout = NvKmsSurfaceMemoryLayoutPitch; + NvU32 log2GobsPerBlockY = 0; + NvU32 i; + + if (pFormatInfo->numPlanes == 0) + { + nvKmsKapiLogDebug("Unknown surface format"); + return NV_FALSE; + } + + for (i = 0; i < pFormatInfo->numPlanes; i++) { + struct NvKmsKapiMemory *memory = + params->planes[i].memory; + + if (memory == NULL) { + return FALSE; + } + + if (i == 0) { + if (params->explicit_layout) { + layout = params->layout; + } else { + layout = memory->surfaceParams.layout; + } + + switch (layout) { + case NvKmsSurfaceMemoryLayoutBlockLinear: + if (params->explicit_layout) { + log2GobsPerBlockY = params->log2GobsPerBlockY; + } else { + log2GobsPerBlockY = + memory->surfaceParams.blockLinear.log2GobsPerBlock.y; + } + break; + + case NvKmsSurfaceMemoryLayoutPitch: + log2GobsPerBlockY = 0; + break; + + default: + nvKmsKapiLogDebug("Invalid surface layout: %u", layout); + return NV_FALSE; + } + } else { + if (!params->explicit_layout) { + if (layout != memory->surfaceParams.layout) { + nvKmsKapiLogDebug("All planes are not of same layout"); + return FALSE; + } + + if (layout == NvKmsSurfaceMemoryLayoutBlockLinear && + log2GobsPerBlockY != + memory->surfaceParams.blockLinear.log2GobsPerBlock.y) { + + nvKmsKapiLogDebug( + "All planes do not have the same blocklinear parameters"); + return FALSE; + } + } + } + + if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + if (params->explicit_layout) { + pitch[i] = params->planes[i].pitch; + if (pitch[i] & 63) { + nvKmsKapiLogDebug( + "Invalid block-linear pitch alignment: %u", pitch[i]); + return NV_FALSE; + } + + pitch[i] = pitch[i] >> 6; + } else { + /* + * The caller (nvidia-drm) is not blocklinear-aware, so the + * passed-in pitch cannot accurately reflect block information. + * Override the pitch with what was specified when the surface + * was imported. + */ + pitch[i] = memory->surfaceParams.blockLinear.pitchInBlocks; + } + } else { + pitch[i] = params->planes[i].pitch; + } + + } + + *pNumPlanes = pFormatInfo->numPlanes; + *pLayout = layout; + *pLog2GobsPerBlockY = log2GobsPerBlockY; + + return NV_TRUE; +} +static struct NvKmsKapiSurface* CreateSurface +( + struct NvKmsKapiDevice *device, + struct NvKmsKapiCreateSurfaceParams *params +) +{ + struct NvKmsRegisterSurfaceParams paramsReg = { }; + NvBool status; + + struct NvKmsKapiSurface *surface = NULL; + + enum NvKmsSurfaceMemoryLayout layout = NvKmsSurfaceMemoryLayoutPitch; + NvU32 log2GobsPerBlockY = 0; + NvU32 numPlanes = 0; + NvU32 pitch[NVKMS_MAX_PLANES_PER_SURFACE] = { 0 }; + NvU32 i; + + if (!GetSurfaceParams(params, + &numPlanes, + &layout, + &log2GobsPerBlockY, + pitch)) + { + goto failed; + } + + surface = nvKmsKapiCalloc(1, sizeof(*surface)); + + if (surface == NULL) { + nvKmsKapiLogDebug( + "Failed to allocate memory for NVKMS surface object on " + "NvKmsKapiDevice 0x%p", + device); + goto failed; + } + + if (device->hKmsDevice == 0x0) { + goto done; + } + + /* Create NVKMS surface */ + + paramsReg.request.deviceHandle = device->hKmsDevice; + + paramsReg.request.useFd = FALSE; + paramsReg.request.rmClient = device->hRmClient; + + paramsReg.request.widthInPixels = params->width; + paramsReg.request.heightInPixels = params->height; + + paramsReg.request.format = params->format; + + paramsReg.request.layout = layout; + paramsReg.request.log2GobsPerBlockY = log2GobsPerBlockY; + + for (i = 0; i < numPlanes; i++) { + struct NvKmsKapiMemory *memory = + params->planes[i].memory; + + paramsReg.request.planes[i].u.rmObject = memory->hRmHandle; + paramsReg.request.planes[i].rmObjectSizeInBytes = memory->size; + paramsReg.request.planes[i].offset = params->planes[i].offset; + paramsReg.request.planes[i].pitch = pitch[i]; + + paramsReg.request.noDisplayCaching |= memory->noDisplayCaching; + } + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_REGISTER_SURFACE, + ¶msReg, sizeof(paramsReg)); + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to register NVKMS surface of dimensions %ux%u " + "and %s format", + params->width, + params->height, + nvKmsSurfaceMemoryFormatToString(params->format)); + + goto failed; + } + + surface->hKmsHandle = paramsReg.reply.surfaceHandle; + +done: + return surface; + +failed: + nvKmsKapiFree(surface); + + return NULL; +} + +static void DestroySurface +( + struct NvKmsKapiDevice *device, struct NvKmsKapiSurface *surface +) +{ + struct NvKmsUnregisterSurfaceParams paramsUnreg = { }; + NvBool status; + + if (device->hKmsDevice == 0x0) { + goto done; + } + + paramsUnreg.request.deviceHandle = device->hKmsDevice; + paramsUnreg.request.surfaceHandle = surface->hKmsHandle; + /* + * Since we are unregistering this surface from KAPI we know that this is + * primarily happens from nv_drm_framebuffer_destroy and access to this + * framebuffer has been externally synchronized, we are done with it. + * Because of that we do not need to synchronize this unregister. + */ + paramsUnreg.request.skipSync = NV_TRUE; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_UNREGISTER_SURFACE, + ¶msUnreg, sizeof(paramsUnreg)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to unregister NVKMS surface registered for " + "NvKmsKapiSurface 0x%p", + surface); + } + +done: + nvKmsKapiFree(surface); +} + +/* + * Helper function to convert NvKmsMode to NvKmsKapiDisplayMode. + */ +static void NvKmsModeToKapi +( + const struct NvKmsMode *kmsMode, + struct NvKmsKapiDisplayMode *mode +) +{ + const NvModeTimings *timings = &kmsMode->timings; + + nvkms_memset(mode, 0, sizeof(*mode)); + + mode->timings.refreshRate = timings->RRx1k; + mode->timings.pixelClockHz = timings->pixelClockHz; + mode->timings.hVisible = timings->hVisible; + mode->timings.hSyncStart = timings->hSyncStart; + mode->timings.hSyncEnd = timings->hSyncEnd; + mode->timings.hTotal = timings->hTotal; + mode->timings.hSkew = timings->hSkew; + mode->timings.vVisible = timings->vVisible; + mode->timings.vSyncStart = timings->vSyncStart; + mode->timings.vSyncEnd = timings->vSyncEnd; + mode->timings.vTotal = timings->vTotal; + + mode->timings.flags.interlaced = timings->interlaced; + mode->timings.flags.doubleScan = timings->doubleScan; + mode->timings.flags.hSyncPos = timings->hSyncPos; + mode->timings.flags.hSyncNeg = timings->hSyncNeg; + mode->timings.flags.vSyncPos = timings->vSyncPos; + mode->timings.flags.vSyncNeg = timings->vSyncNeg; + + mode->timings.widthMM = timings->sizeMM.w; + mode->timings.heightMM = timings->sizeMM.h; + + ct_assert(sizeof(mode->name) == sizeof(kmsMode->name)); + + nvkms_memcpy(mode->name, kmsMode->name, sizeof(mode->name)); +} + +static void InitNvKmsModeValidationParams( + const struct NvKmsKapiDevice *device, + struct NvKmsModeValidationParams *params) +{ + /* + * Mode timings structures of KAPI clients may not have field like + * RRx1k, it does not guarantee that computed RRx1k value during + * conversion from - + * KAPI client's mode-timings structure + * -> NvKmsKapiDisplayMode -> NvModeTimings + * is same as what we get from edid, this may cause mode-set to fail. + * + * The RRx1k filed don't impact hardware modetiming values, therefore + * override RRx1k check. + * + * XXX NVKMS TODO: Bug 200156338 is filed to delete NvModeTimings::RRx1k + * if possible. + */ + params->overrides = NVKMS_MODE_VALIDATION_NO_RRX1K_CHECK; +} + +static int GetDisplayMode +( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, NvU32 modeIndex, + struct NvKmsKapiDisplayMode *mode, NvBool *valid, + NvBool *preferredMode +) +{ + struct NvKmsValidateModeIndexParams paramsValidate = { }; + NvBool status; + + if (device == NULL) { + return -1; + } + + paramsValidate.request.deviceHandle = device->hKmsDevice; + paramsValidate.request.dispHandle = device->hKmsDisp; + + paramsValidate.request.dpyId = nvNvU32ToDpyId(display); + + InitNvKmsModeValidationParams(device, + ¶msValidate.request.modeValidation); + + paramsValidate.request.modeIndex = modeIndex; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_VALIDATE_MODE_INDEX, + ¶msValidate, sizeof(paramsValidate)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to get validated mode index 0x%x for NvKmsKapiDisplay 0x%08x", + modeIndex, display); + return -1; + } + + if (mode != NULL) { + NvKmsModeToKapi(¶msValidate.reply.mode, mode); + } + + + if (valid != NULL) { + *valid = paramsValidate.reply.valid; + } + + if (preferredMode != NULL) { + *preferredMode = paramsValidate.reply.preferredMode; + } + + return paramsValidate.reply.end ? 0 : 1; +} + +/* + * Helper function to convert NvKmsKapiDisplayMode to NvKmsMode. + */ +static void NvKmsKapiDisplayModeToKapi +( + const struct NvKmsKapiDisplayMode *mode, + struct NvKmsMode *kmsMode +) +{ + NvModeTimings *timings = &kmsMode->timings; + + nvkms_memset(kmsMode, 0, sizeof(*kmsMode)); + + nvkms_memcpy(kmsMode->name, mode->name, sizeof(mode->name)); + + timings->RRx1k = mode->timings.refreshRate; + timings->pixelClockHz = mode->timings.pixelClockHz; + timings->hVisible = mode->timings.hVisible; + timings->hSyncStart = mode->timings.hSyncStart; + timings->hSyncEnd = mode->timings.hSyncEnd; + timings->hTotal = mode->timings.hTotal; + timings->hSkew = mode->timings.hSkew; + timings->vVisible = mode->timings.vVisible; + timings->vSyncStart = mode->timings.vSyncStart; + timings->vSyncEnd = mode->timings.vSyncEnd; + timings->vTotal = mode->timings.vTotal; + + timings->interlaced = mode->timings.flags.interlaced; + timings->doubleScan = mode->timings.flags.doubleScan; + timings->hSyncPos = mode->timings.flags.hSyncPos; + timings->hSyncNeg = mode->timings.flags.hSyncNeg; + timings->vSyncPos = mode->timings.flags.vSyncPos; + timings->vSyncNeg = mode->timings.flags.vSyncNeg; + + timings->sizeMM.w = mode->timings.widthMM; + timings->sizeMM.h = mode->timings.heightMM; +} + +static NvBool ValidateDisplayMode +( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, const struct NvKmsKapiDisplayMode *mode +) +{ + struct NvKmsValidateModeParams paramsValidate; + NvBool status; + + if (device == NULL) { + return NV_FALSE; + } + + nvkms_memset(¶msValidate, 0, sizeof(paramsValidate)); + + paramsValidate.request.deviceHandle = device->hKmsDevice; + paramsValidate.request.dispHandle = device->hKmsDisp; + + paramsValidate.request.dpyId = nvNvU32ToDpyId(display); + + InitNvKmsModeValidationParams(device, + ¶msValidate.request.modeValidation); + + + NvKmsKapiDisplayModeToKapi(mode, ¶msValidate.request.mode); + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_VALIDATE_MODE, + ¶msValidate, sizeof(paramsValidate)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to get validated mode %ux%u@%uHz for NvKmsKapiDisplay 0x%08x of " + "NvKmsKapiDevice 0x%p", + mode->timings.hVisible, mode->timings.vVisible, + mode->timings.refreshRate/1000, display, + device); + return NV_FALSE; + } + + return paramsValidate.reply.valid; +} + +static NvBool AssignSyncObjectConfig( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiLayerConfig *pLayerConfig, + struct NvKmsChannelSyncObjects *pSyncObject) +{ + if (!device->supportsSyncpts) { + if (pLayerConfig->syncParams.preSyncptSpecified || + pLayerConfig->syncParams.postSyncptRequested) { + return NV_FALSE; + } + + } + + /* Syncpt and Semaphore usage are mutually exclusive. */ + if (pLayerConfig->syncParams.semaphoreSpecified && + (pLayerConfig->syncParams.preSyncptSpecified || + pLayerConfig->syncParams.postSyncptRequested)) { + return NV_FALSE; + } + + pSyncObject->useSyncpt = FALSE; + + if (pLayerConfig->syncParams.preSyncptSpecified) { + pSyncObject->useSyncpt = TRUE; + + pSyncObject->u.syncpts.pre.type = NVKMS_SYNCPT_TYPE_RAW; + pSyncObject->u.syncpts.pre.u.raw.id = pLayerConfig->syncParams.u.syncpt.preSyncptId; + pSyncObject->u.syncpts.pre.u.raw.value = pLayerConfig->syncParams.u.syncpt.preSyncptValue; + } else if (pLayerConfig->syncParams.semaphoreSpecified) { + pSyncObject->u.semaphores.release.surface.surfaceHandle = + pSyncObject->u.semaphores.acquire.surface.surfaceHandle = + device->semaphore.hKmsHandle; + pSyncObject->u.semaphores.release.surface.format = + pSyncObject->u.semaphores.acquire.surface.format = + device->semaphore.format; + pSyncObject->u.semaphores.release.surface.offsetInWords = + pSyncObject->u.semaphores.acquire.surface.offsetInWords = + nvKmsKapiGetDisplaySemaphoreOffset( + device, + pLayerConfig->syncParams.u.semaphore.index) >> 2; + pSyncObject->u.semaphores.acquire.value = + NVKMS_KAPI_SEMAPHORE_VALUE_READY; + pSyncObject->u.semaphores.release.value = + NVKMS_KAPI_SEMAPHORE_VALUE_DONE; + } + + if (pLayerConfig->syncParams.postSyncptRequested) { + pSyncObject->useSyncpt = TRUE; + pSyncObject->u.syncpts.requestedPostType = NVKMS_SYNCPT_TYPE_FD; + } + return NV_TRUE; +} + +static NvBool AssignHDRMetadataConfig( + const struct NvKmsKapiLayerConfig *layerConfig, + const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig, + const NvU32 layer, + struct NvKmsFlipCommonParams *params, + NvBool bFromKmsSetMode) +{ + params->layer[layer].hdr.specified = + bFromKmsSetMode || layerRequestedConfig->flags.hdrMetadataChanged; + params->layer[layer].hdr.enabled = + layerConfig->hdrMetadata.enabled; + if (layerConfig->hdrMetadata.enabled) { + params->layer[layer].hdr.staticMetadata = + layerConfig->hdrMetadata.val; + } + + return params->layer[layer].hdr.specified; +} + +static NvBool AssignLayerLutConfig( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiLayerConfig *layerConfig, + const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig, + const NvU32 layer, + struct NvKmsFlipCommonParams *params, + NvBool bFromKmsSetMode) +{ + NvBool changed = FALSE; + + if ((device->lutCaps.layer[layer].ilut.supported) && + (layerRequestedConfig->flags.ilutChanged || bFromKmsSetMode)) { + + params->layer[layer].ilut.specified = TRUE; + params->layer[layer].ilut.enabled = layerConfig->ilut.enabled; + + if (layerConfig->ilut.lutSurface != NULL) { + params->layer[layer].ilut.lut.surfaceHandle = + layerConfig->ilut.lutSurface->hKmsHandle; + } else { + params->layer[layer].ilut.lut.surfaceHandle = 0; + } + params->layer[layer].ilut.lut.offset = layerConfig->ilut.offset; + params->layer[layer].ilut.lut.vssSegments = + layerConfig->ilut.vssSegments; + params->layer[layer].ilut.lut.lutEntries = + layerConfig->ilut.lutEntries; + + changed = TRUE; + } + + if ((device->lutCaps.layer[layer].tmo.supported) && + (layerRequestedConfig->flags.tmoChanged || bFromKmsSetMode)) { + + params->layer[layer].tmo.specified = TRUE; + params->layer[layer].tmo.enabled = layerConfig->tmo.enabled; + + if (layerConfig->tmo.lutSurface != NULL) { + params->layer[layer].tmo.lut.surfaceHandle = + layerConfig->tmo.lutSurface->hKmsHandle; + } else { + params->layer[layer].tmo.lut.surfaceHandle = 0; + } + params->layer[layer].tmo.lut.offset = layerConfig->tmo.offset; + params->layer[layer].tmo.lut.vssSegments = + layerConfig->tmo.vssSegments; + params->layer[layer].tmo.lut.lutEntries = + layerConfig->tmo.lutEntries; + + changed = TRUE; + } + + return changed; +} + +static void NvKmsKapiCursorConfigToKms( + const struct NvKmsKapiCursorRequestedConfig *requestedConfig, + struct NvKmsFlipCommonParams *params, + NvBool bFromKmsSetMode) +{ + if (requestedConfig->flags.surfaceChanged || bFromKmsSetMode) { + params->cursor.imageSpecified = NV_TRUE; + + if (requestedConfig->surface != NULL) { + params->cursor.image.surfaceHandle[NVKMS_LEFT] = + requestedConfig->surface->hKmsHandle; + } + + params->cursor.image.cursorCompParams.colorKeySelect = + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE; + params->cursor.image.cursorCompParams.blendingMode[1] = + requestedConfig->compParams.compMode; + params->cursor.image.cursorCompParams.surfaceAlpha = + requestedConfig->compParams.surfaceAlpha; + } + + if (requestedConfig->flags.dstXYChanged || bFromKmsSetMode) { + params->cursor.position.x = requestedConfig->dstX; + params->cursor.position.y = requestedConfig->dstY; + + params->cursor.positionSpecified = NV_TRUE; + } +} + +static NvBool NvKmsKapiOverlayLayerConfigToKms( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig, + const NvU32 layer, + const NvU32 head, + struct NvKmsFlipCommonParams *params, + NvBool commit, + NvBool bFromKmsSetMode) +{ + NvBool ret = NV_FALSE; + const struct NvKmsKapiLayerConfig *layerConfig = + &layerRequestedConfig->config; + + if (layerRequestedConfig->flags.surfaceChanged || bFromKmsSetMode) { + params->layer[layer].syncObjects.specified = NV_TRUE; + params->layer[layer].completionNotifier.specified = NV_TRUE; + params->layer[layer].surface.specified = NV_TRUE; + + if (layerConfig->surface != NULL) { + params->layer[layer].surface.handle[NVKMS_LEFT] = + layerConfig->surface->hKmsHandle; + } + + params->layer[layer].surface.rrParams = + layerConfig->rrParams; + + params->layer[layer].compositionParams.val.colorKeySelect = + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE; + params->layer[layer].compositionParams.val.blendingMode[1] = + layerConfig->compParams.compMode; + params->layer[layer].compositionParams.val.surfaceAlpha = + layerConfig->compParams.surfaceAlpha; + params->layer[layer].compositionParams.specified = TRUE; + params->layer[layer].minPresentInterval = + layerConfig->minPresentInterval; + } + + if (layerRequestedConfig->flags.cscChanged || + layerRequestedConfig->flags.matrixOverridesChanged || + bFromKmsSetMode) { + params->layer[layer].csc.specified = NV_TRUE; + params->layer[layer].csc.useMain = layerConfig->cscUseMain; + if (!layerConfig->cscUseMain) { + params->layer[layer].csc.matrix = layerConfig->csc; + } + + // 'blendCtm' overrides 'csc', but provides a 3x4 matrix. + if (layerConfig->matrixOverrides.enabled.blendCtm) { + params->layer[layer].csc.useMain = FALSE; + params->layer[layer].csc.matrix = + layerConfig->matrixOverrides.blendCtm; + } + } + + if (layerRequestedConfig->flags.srcWHChanged || bFromKmsSetMode) { + params->layer[layer].sizeIn.val.width = layerConfig->srcWidth; + params->layer[layer].sizeIn.val.height = layerConfig->srcHeight; + params->layer[layer].sizeIn.specified = TRUE; + } + + if (layerRequestedConfig->flags.dstWHChanged || bFromKmsSetMode) { + params->layer[layer].sizeOut.val.width = layerConfig->dstWidth; + params->layer[layer].sizeOut.val.height = layerConfig->dstHeight; + params->layer[layer].sizeOut.specified = TRUE; + } + + if (layerRequestedConfig->flags.dstXYChanged || bFromKmsSetMode) { + params->layer[layer].outputPosition.val.x = layerConfig->dstX; + params->layer[layer].outputPosition.val.y = layerConfig->dstY; + + params->layer[layer].outputPosition.specified = NV_TRUE; + } + + if (layerRequestedConfig->flags.inputColorSpaceChanged || bFromKmsSetMode) { + params->layer[layer].colorSpace.val = layerConfig->inputColorSpace; + params->layer[layer].colorSpace.specified = TRUE; + } + + if (layerRequestedConfig->flags.inputTfChanged || bFromKmsSetMode) { + params->layer[layer].tf.val = layerConfig->inputTf; + params->layer[layer].tf.specified = TRUE; + } + + if (layerRequestedConfig->flags.inputColorRangeChanged || bFromKmsSetMode) { + params->layer[layer].colorRange.val = layerConfig->inputColorRange; + params->layer[layer].colorSpace.specified = TRUE; + } + + AssignHDRMetadataConfig(layerConfig, layerRequestedConfig, layer, + params, bFromKmsSetMode); + + if (layerRequestedConfig->flags.matrixOverridesChanged || bFromKmsSetMode) { + // 'lmsCtm' explicitly provides a matrix to program CSC00. + if (layerConfig->matrixOverrides.enabled.lmsCtm) { + params->layer[layer].csc00Override.matrix = + layerConfig->matrixOverrides.lmsCtm; + params->layer[layer].csc00Override.enabled = TRUE; + } else { + params->layer[layer].csc00Override.enabled = FALSE; + } + params->layer[layer].csc00Override.specified = TRUE; + + // 'lmsToItpCtm' explicitly provides a matrix to program CSC01. + if (layerConfig->matrixOverrides.enabled.lmsToItpCtm) { + params->layer[layer].csc01Override.matrix = + layerConfig->matrixOverrides.lmsToItpCtm; + params->layer[layer].csc01Override.enabled = TRUE; + } else { + params->layer[layer].csc01Override.enabled = FALSE; + } + params->layer[layer].csc01Override.specified = TRUE; + + // 'itpToLmsCtm' explicitly provides a matrix to program CSC10. + if (layerConfig->matrixOverrides.enabled.itpToLmsCtm) { + params->layer[layer].csc10Override.matrix = + layerConfig->matrixOverrides.itpToLmsCtm; + params->layer[layer].csc10Override.enabled = TRUE; + } else { + params->layer[layer].csc10Override.enabled = FALSE; + } + params->layer[layer].csc10Override.specified = TRUE; + + // 'blendCtm' explicitly provides a matrix to program CSC11. + if (layerConfig->matrixOverrides.enabled.blendCtm) { + params->layer[layer].csc11Override.matrix = + layerConfig->matrixOverrides.blendCtm; + params->layer[layer].csc11Override.enabled = TRUE; + } else { + params->layer[layer].csc11Override.enabled = FALSE; + } + params->layer[layer].csc11Override.specified = TRUE; + } + + AssignLayerLutConfig(device, layerConfig, layerRequestedConfig, layer, + params, bFromKmsSetMode); + + if (commit) { + NvU32 nextIndex = NVKMS_KAPI_INC_NOTIFIER_INDEX( + device->layerState[head][layer]. + currFlipNotifierIndex); + + if (layerConfig->surface != NULL) { + NvU32 nextIndexOffsetInBytes = + NVKMS_KAPI_NOTIFIER_OFFSET(head, + layer, nextIndex); + + params->layer[layer].completionNotifier.val. + surface.surfaceHandle = device->notifier.hKmsHandle; + + params->layer[layer].completionNotifier.val. + surface.format = device->notifier.format; + + params->layer[layer].completionNotifier.val. + surface.offsetInWords = nextIndexOffsetInBytes >> 2; + + params->layer[layer].completionNotifier.val.awaken = NV_TRUE; + } + + ret = AssignSyncObjectConfig(device, + layerConfig, + ¶ms->layer[layer].syncObjects.val); + if (ret == NV_FALSE) { + return ret; + } + + /* + * XXX Should this be done after commit? + * What if commit fail? + * + * It is not expected to fail any commit in KAPI layer, + * only validated configuration is expected + * to commit. + */ + device->layerState[head][layer]. + currFlipNotifierIndex = nextIndex; + } + + return NV_TRUE; +} + +static NvBool NvKmsKapiPrimaryLayerConfigToKms( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig, + const NvU32 head, + struct NvKmsFlipCommonParams *params, + NvBool commit, + NvBool bFromKmsSetMode) +{ + NvBool ret = NV_FALSE; + const struct NvKmsKapiLayerConfig *layerConfig = + &layerRequestedConfig->config; + + NvBool changed = FALSE; + + if (layerRequestedConfig->flags.surfaceChanged || bFromKmsSetMode) { + params->layer[NVKMS_MAIN_LAYER].surface.specified = NV_TRUE; + params->layer[NVKMS_MAIN_LAYER].completionNotifier.specified = NV_TRUE; + params->layer[NVKMS_MAIN_LAYER].syncObjects.specified = NV_TRUE; + + + params->layer[NVKMS_MAIN_LAYER].minPresentInterval = + layerConfig->minPresentInterval; + params->layer[NVKMS_MAIN_LAYER].tearing = layerConfig->tearing; + params->layer[NVKMS_MAIN_LAYER].surface.rrParams = layerConfig->rrParams; + + if (layerConfig->surface != NULL) { + params->layer[NVKMS_MAIN_LAYER].surface.handle[0] = + layerConfig->surface->hKmsHandle; + + if (params->layer[NVKMS_MAIN_LAYER].surface.handle[0] != 0) { + params->layer[NVKMS_MAIN_LAYER].sizeIn.val.width = layerConfig->srcWidth; + params->layer[NVKMS_MAIN_LAYER].sizeIn.val.height = layerConfig->srcHeight; + params->layer[NVKMS_MAIN_LAYER].sizeIn.specified = TRUE; + + params->layer[NVKMS_MAIN_LAYER].sizeOut.val.width = layerConfig->dstWidth; + params->layer[NVKMS_MAIN_LAYER].sizeOut.val.height = layerConfig->dstHeight; + params->layer[NVKMS_MAIN_LAYER].sizeOut.specified = TRUE; + } + } + + changed = TRUE; + } + + if (layerRequestedConfig->flags.srcXYChanged || bFromKmsSetMode) { + params->viewPortIn.point.x = layerConfig->srcX; + params->viewPortIn.point.y = layerConfig->srcY; + params->viewPortIn.specified = NV_TRUE; + + changed = TRUE; + } + + if (layerRequestedConfig->flags.cscChanged || + layerRequestedConfig->flags.matrixOverridesChanged || + bFromKmsSetMode) { + nvAssert(!layerConfig->cscUseMain); + + params->layer[NVKMS_MAIN_LAYER].csc.specified = NV_TRUE; + params->layer[NVKMS_MAIN_LAYER].csc.useMain = FALSE; + params->layer[NVKMS_MAIN_LAYER].csc.matrix = layerConfig->csc; + + // 'blendCtm' overrides 'csc', but provides a 3x4 matrix. + if (layerConfig->matrixOverrides.enabled.blendCtm) { + params->layer[NVKMS_MAIN_LAYER].csc.matrix = + layerConfig->matrixOverrides.blendCtm; + } + + changed = TRUE; + } + + if (layerRequestedConfig->flags.inputColorSpaceChanged || bFromKmsSetMode) { + params->layer[NVKMS_MAIN_LAYER].colorSpace.val = layerConfig->inputColorSpace; + params->layer[NVKMS_MAIN_LAYER].colorSpace.specified = TRUE; + + changed = TRUE; + } + + if (layerRequestedConfig->flags.inputTfChanged || bFromKmsSetMode) { + params->layer[NVKMS_MAIN_LAYER].tf.val = layerConfig->inputTf; + params->layer[NVKMS_MAIN_LAYER].tf.specified = TRUE; + + changed = TRUE; + } + + if (layerRequestedConfig->flags.inputColorRangeChanged || bFromKmsSetMode) { + params->layer[NVKMS_MAIN_LAYER].colorRange.val = layerConfig->inputColorRange; + params->layer[NVKMS_MAIN_LAYER].colorRange.specified = TRUE; + + changed = TRUE; + } + + if (AssignHDRMetadataConfig(layerConfig, layerRequestedConfig, + NVKMS_MAIN_LAYER, params, bFromKmsSetMode)) { + changed = TRUE; + } + + if (layerRequestedConfig->flags.matrixOverridesChanged || bFromKmsSetMode) { + // 'lmsCtm' explicitly provides a matrix to program CSC00. + if (layerConfig->matrixOverrides.enabled.lmsCtm) { + params->layer[NVKMS_MAIN_LAYER].csc00Override.matrix = + layerConfig->matrixOverrides.lmsCtm; + params->layer[NVKMS_MAIN_LAYER].csc00Override.enabled = TRUE; + } else { + params->layer[NVKMS_MAIN_LAYER].csc00Override.enabled = FALSE; + } + params->layer[NVKMS_MAIN_LAYER].csc00Override.specified = TRUE; + + // 'lmsToItpCtm' explicitly provides a matrix to program CSC01. + if (layerConfig->matrixOverrides.enabled.lmsToItpCtm) { + params->layer[NVKMS_MAIN_LAYER].csc01Override.matrix = + layerConfig->matrixOverrides.lmsToItpCtm; + params->layer[NVKMS_MAIN_LAYER].csc01Override.enabled = TRUE; + } else { + params->layer[NVKMS_MAIN_LAYER].csc01Override.enabled = FALSE; + } + params->layer[NVKMS_MAIN_LAYER].csc01Override.specified = TRUE; + + // 'itpToLmsCtm' explicitly provides a matrix to program CSC10. + if (layerConfig->matrixOverrides.enabled.itpToLmsCtm) { + params->layer[NVKMS_MAIN_LAYER].csc10Override.matrix = + layerConfig->matrixOverrides.itpToLmsCtm; + params->layer[NVKMS_MAIN_LAYER].csc10Override.enabled = TRUE; + } else { + params->layer[NVKMS_MAIN_LAYER].csc10Override.enabled = FALSE; + } + params->layer[NVKMS_MAIN_LAYER].csc10Override.specified = TRUE; + + // 'blendCtm' explicitly provides a matrix to program CSC11. + if (layerConfig->matrixOverrides.enabled.blendCtm) { + params->layer[NVKMS_MAIN_LAYER].csc11Override.matrix = + layerConfig->matrixOverrides.blendCtm; + params->layer[NVKMS_MAIN_LAYER].csc11Override.enabled = TRUE; + } else { + params->layer[NVKMS_MAIN_LAYER].csc11Override.enabled = FALSE; + } + params->layer[NVKMS_MAIN_LAYER].csc11Override.specified = TRUE; + + changed = TRUE; + } + + if (AssignLayerLutConfig(device, layerConfig, layerRequestedConfig, + NVKMS_MAIN_LAYER, params, bFromKmsSetMode)) { + changed = TRUE; + } + + if (commit && changed) { + NvU32 nextIndex = NVKMS_KAPI_INC_NOTIFIER_INDEX( + device->layerState[head][NVKMS_MAIN_LAYER]. + currFlipNotifierIndex); + + if (layerConfig->surface != NULL) { + NvU32 nextIndexOffsetInBytes = + NVKMS_KAPI_NOTIFIER_OFFSET(head, + NVKMS_MAIN_LAYER, nextIndex); + + params->layer[NVKMS_MAIN_LAYER].completionNotifier. + val.surface.surfaceHandle = device->notifier.hKmsHandle; + + params->layer[NVKMS_MAIN_LAYER].completionNotifier. + val.surface.format = device->notifier.format; + + params->layer[NVKMS_MAIN_LAYER].completionNotifier. + val.surface.offsetInWords = nextIndexOffsetInBytes >> 2; + + params->layer[NVKMS_MAIN_LAYER].completionNotifier.val.awaken = NV_TRUE; + } + + ret = AssignSyncObjectConfig(device, + layerConfig, + ¶ms->layer[NVKMS_MAIN_LAYER].syncObjects.val); + if (ret == NV_FALSE) { + return ret; + } + + /* + * XXX Should this be done after commit? + * What if commit fail? + * + * It is not expected to fail any commit in KAPI layer, + * only validated configuration is expected + * to commit. + */ + device->layerState[head][NVKMS_MAIN_LAYER]. + currFlipNotifierIndex = nextIndex; + } + + return NV_TRUE; +} + +static NvBool NvKmsKapiLayerConfigToKms( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig, + const NvU32 layer, + const NvU32 head, + struct NvKmsFlipCommonParams *params, + NvBool commit, + NvBool bFromKmsSetMode) +{ + if (layer == NVKMS_KAPI_LAYER_PRIMARY_IDX) { + return NvKmsKapiPrimaryLayerConfigToKms(device, + layerRequestedConfig, + head, + params, + commit, + bFromKmsSetMode); + + } + + return NvKmsKapiOverlayLayerConfigToKms(device, + layerRequestedConfig, + layer, + head, + params, + commit, + bFromKmsSetMode); +} + +static void NvKmsKapiHeadLutConfigToKms( + const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig, + struct NvKmsSetLutCommonParams *lutParams, + NvBool bFromKmsSetMode) +{ + const struct NvKmsKapiHeadModeSetConfig *modeSetConfig = + &headRequestedConfig->modeSetConfig; + struct NvKmsSetInputLutParams *input = &lutParams->input; + struct NvKmsSetOutputLutParams *output = &lutParams->output; + + /* input LUT */ + if (headRequestedConfig->flags.legacyIlutChanged || bFromKmsSetMode) { + input->specified = NV_TRUE; + input->depth = modeSetConfig->lut.input.depth; + input->start = modeSetConfig->lut.input.start; + input->end = modeSetConfig->lut.input.end; + + input->pRamps = nvKmsPointerToNvU64(modeSetConfig->lut.input.pRamps); + } + + /* output LUT */ + if (headRequestedConfig->flags.legacyOlutChanged || bFromKmsSetMode) { + output->specified = NV_TRUE; + output->enabled = modeSetConfig->lut.output.enabled; + + output->pRamps = nvKmsPointerToNvU64(modeSetConfig->lut.output.pRamps); + } +} + +static NvBool AnyLayerOutputTransferFunctionChanged( + const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig) +{ + NvU32 layer; + + for (layer = 0; + layer < ARRAY_LEN(headRequestedConfig->layerRequestedConfig); + layer++) { + const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig = + &headRequestedConfig->layerRequestedConfig[layer]; + + if (layerRequestedConfig->flags.outputTfChanged) { + return NV_TRUE; + } + } + + return NV_FALSE; +} + +static NvBool GetOutputTransferFunction( + const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig, + enum NvKmsOutputTf *tf) +{ + NvBool found = NV_FALSE; + NvU32 layer; + + *tf = NVKMS_OUTPUT_TF_NONE; + + for (layer = 0; + layer < ARRAY_LEN(headRequestedConfig->layerRequestedConfig); + layer++) { + const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig = + &headRequestedConfig->layerRequestedConfig[layer]; + const struct NvKmsKapiLayerConfig *layerConfig = + &layerRequestedConfig->config; + + if (layerConfig->hdrMetadata.enabled) { + if (!found) { + *tf = layerConfig->outputTf; + found = NV_TRUE; + } else if (*tf != layerConfig->outputTf) { + nvKmsKapiLogDebug( + "Output transfer function should be the same for all layers on a head"); + return NV_FALSE; + } + } + } + + return NV_TRUE; +} + +/* + * Helper function to convert NvKmsKapiRequestedModeSetConfig + * to NvKmsSetModeParams. + */ +static NvBool NvKmsKapiRequestedModeSetConfigToKms( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, + struct NvKmsSetModeParams *params, + NvBool commit) +{ + NvU32 dispIdx = device->dispIdx; + NvU32 head; + + nvkms_memset(params, 0, sizeof(*params)); + + params->request.commit = commit; + params->request.deviceHandle = device->hKmsDevice; + params->request.requestedDispsBitMask = 1 << dispIdx; + + for (head = 0; + head < ARRAY_LEN(requestedConfig->headRequestedConfig); head++) { + + const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig = + &requestedConfig->headRequestedConfig[head]; + const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig = + &headRequestedConfig->modeSetConfig; + struct NvKmsSetModeOneHeadRequest *paramsHead; + enum NvKmsOutputTf tf; + NvU32 layer; + NvU32 i; + + if ((requestedConfig->headsMask & (1 << head)) == 0x0) { + continue; + } + + params->request.disp[dispIdx].requestedHeadsBitMask |= 1 << head; + + if (headModeSetConfig->numDisplays == 0) { + continue; + } + + if (params->request.commit && !headModeSetConfig->bActive) { + continue; + } + + paramsHead = ¶ms->request.disp[dispIdx].head[head]; + + InitNvKmsModeValidationParams(device, + ¶msHead->modeValidationParams); + + for (i = 0; i < headModeSetConfig->numDisplays; i++) { + paramsHead->dpyIdList = nvAddDpyIdToDpyIdList( + nvNvU32ToDpyId(headModeSetConfig->displays[i]), + paramsHead->dpyIdList); + } + + NvKmsKapiDisplayModeToKapi(&headModeSetConfig->mode, ¶msHead->mode); + + NvKmsKapiHeadLutConfigToKms(headRequestedConfig, + ¶msHead->flip.lut, + NV_TRUE /* bFromKmsSetMode */); + + if (device->lutCaps.olut.supported) { + paramsHead->flip.olut.specified = TRUE; + paramsHead->flip.olut.enabled = headModeSetConfig->olut.enabled; + + if (headModeSetConfig->olut.lutSurface != NULL) { + paramsHead->flip.olut.lut.surfaceHandle = + headModeSetConfig->olut.lutSurface->hKmsHandle; + } else { + paramsHead->flip.olut.lut.surfaceHandle = 0; + } + paramsHead->flip.olut.lut.offset = headModeSetConfig->olut.offset; + paramsHead->flip.olut.lut.vssSegments = + headModeSetConfig->olut.vssSegments; + paramsHead->flip.olut.lut.lutEntries = + headModeSetConfig->olut.lutEntries; + + paramsHead->flip.olutFpNormScale.specified = TRUE; + paramsHead->flip.olutFpNormScale.val = + headModeSetConfig->olutFpNormScale; + } + + NvKmsKapiCursorConfigToKms(&headRequestedConfig->cursorRequestedConfig, + ¶msHead->flip, + NV_TRUE /* bFromKmsSetMode */); + for (layer = 0; + layer < ARRAY_LEN(headRequestedConfig->layerRequestedConfig); + layer++) { + + const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig = + &headRequestedConfig->layerRequestedConfig[layer]; + + if (!NvKmsKapiLayerConfigToKms(device, + layerRequestedConfig, + layer, + head, + ¶msHead->flip, + commit, + NV_TRUE /* bFromKmsSetMode */)) { + return NV_FALSE; + } + } + + if (!GetOutputTransferFunction(headRequestedConfig, &tf)) { + return NV_FALSE; + } + + paramsHead->flip.tf.val = tf; + paramsHead->flip.tf.specified = NV_TRUE; + + paramsHead->flip.hdrInfoFrame.specified = NV_TRUE; + paramsHead->flip.hdrInfoFrame.enabled = + headModeSetConfig->hdrInfoFrame.enabled; + if (headModeSetConfig->hdrInfoFrame.enabled) { + paramsHead->flip.hdrInfoFrame.eotf = + headModeSetConfig->hdrInfoFrame.eotf; + paramsHead->flip.hdrInfoFrame.staticMetadata = + headModeSetConfig->hdrInfoFrame.staticMetadata; + } + + paramsHead->flip.colorimetry.specified = NV_TRUE; + paramsHead->flip.colorimetry.val = headModeSetConfig->colorimetry; + + paramsHead->viewPortSizeIn.width = + headModeSetConfig->mode.timings.hVisible; + paramsHead->viewPortSizeIn.height = + headModeSetConfig->mode.timings.vVisible; + + paramsHead->allowGsync = NV_TRUE; + paramsHead->allowAdaptiveSync = NVKMS_ALLOW_ADAPTIVE_SYNC_ALL; + } + + return NV_TRUE; +} + + +static NvBool KmsSetMode( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, + struct NvKmsKapiModeSetReplyConfig *replyConfig, + const NvBool commit) +{ + struct NvKmsSetModeParams *params = NULL; + NvBool status = NV_FALSE; + + params = nvKmsKapiCalloc(1, sizeof(*params)); + + if (params == NULL) { + goto done; + } + + if (!NvKmsKapiRequestedModeSetConfigToKms(device, + requestedConfig, + params, + commit)) { + goto done; + } + + status = nvkms_ioctl_from_kapi_try_pmlock(device->pKmsOpen, + NVKMS_IOCTL_SET_MODE, + params, sizeof(*params)); + + replyConfig->flipResult = + (params->reply.status == NVKMS_SET_MODE_STATUS_SUCCESS) ? + NV_KMS_FLIP_RESULT_SUCCESS : + NV_KMS_FLIP_RESULT_INVALID_PARAMS; + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "NVKMS_IOCTL_SET_MODE ioctl failed"); + goto done; + } + + if (params->reply.status != NVKMS_SET_MODE_STATUS_SUCCESS) + { + int i; + + nvKmsKapiLogDeviceDebug( + device, + "NVKMS_IOCTL_SET_MODE failed! Status:\n"); + + nvKmsKapiLogDeviceDebug( + device, + " top-level status: %d\n", params->reply.status); + + nvKmsKapiLogDeviceDebug( + device, + " disp0 status: %d\n", params->reply.disp[0].status); + + for (i = 0; i < ARRAY_LEN(params->reply.disp[0].head); i++) + { + nvKmsKapiLogDeviceDebug( + device, + " head%d status: %d\n", + i, params->reply.disp[0].head[i].status); + } + + status = NV_FALSE; + } + +done: + + if (params != NULL) { + nvKmsKapiFree(params); + } + + return status; +} + +static NvBool IsHeadConfigValid( + const struct NvKmsFlipParams *params, + const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, + const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig, + NvU32 head) +{ + if ((requestedConfig->headsMask & (1 << head)) == 0x0) { + return NV_FALSE; + } + + if (headModeSetConfig->numDisplays == 0) { + return NV_FALSE; + } + + if (params->request.commit && !headModeSetConfig->bActive) { + return NV_FALSE; + } + return NV_TRUE; +} + +static NvBool KmsFlip( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, + struct NvKmsKapiModeSetReplyConfig *replyConfig, + const NvBool commit) +{ + struct NvKmsFlipParams *params = NULL; + struct NvKmsFlipRequestOneHead *pFlipHead = NULL; + NvBool status = NV_TRUE; + NvU32 i, head; + + /* Allocate space for the params structure, plus space for each possible + * head. */ + params = nvKmsKapiCalloc(1, + sizeof(*params) + sizeof(pFlipHead[0]) * NVKMS_KAPI_MAX_HEADS); + + if (params == NULL) { + return NV_FALSE; + } + + /* The flipHead array was allocated in the same block above. */ + pFlipHead = (struct NvKmsFlipRequestOneHead *)(params + 1); + + params->request.deviceHandle = device->hKmsDevice; + params->request.commit = commit; + params->request.pFlipHead = nvKmsPointerToNvU64(pFlipHead); + params->request.numFlipHeads = 0; + for (head = 0; + head < ARRAY_LEN(requestedConfig->headRequestedConfig); head++) { + + const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig = + &requestedConfig->headRequestedConfig[head]; + const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig = + &headRequestedConfig->modeSetConfig; + + struct NvKmsFlipCommonParams *flipParams = NULL; + + NvU32 layer; + + if (!IsHeadConfigValid(params, requestedConfig, headModeSetConfig, head)) { + continue; + } + + pFlipHead[params->request.numFlipHeads].sd = 0; + pFlipHead[params->request.numFlipHeads].head = head; + flipParams = &pFlipHead[params->request.numFlipHeads].flip; + params->request.numFlipHeads++; + + NvKmsKapiCursorConfigToKms(&headRequestedConfig->cursorRequestedConfig, + flipParams, + NV_FALSE /* bFromKmsSetMode */); + + for (layer = 0; + layer < ARRAY_LEN(headRequestedConfig->layerRequestedConfig); + layer++) { + + const struct NvKmsKapiLayerRequestedConfig + *layerRequestedConfig = + &headRequestedConfig->layerRequestedConfig[layer]; + + status = NvKmsKapiLayerConfigToKms(device, + layerRequestedConfig, + layer, + head, + flipParams, + commit, + NV_FALSE /* bFromKmsSetMode */); + + if (status != NV_TRUE) { + goto done; + } + } + + flipParams->tf.specified = + AnyLayerOutputTransferFunctionChanged(headRequestedConfig); + if (flipParams->tf.specified) { + enum NvKmsOutputTf tf; + status = GetOutputTransferFunction(headRequestedConfig, &tf); + if (status != NV_TRUE) { + goto done; + } + flipParams->tf.val = tf; + } + + flipParams->hdrInfoFrame.specified = + headRequestedConfig->flags.hdrInfoFrameChanged; + if (flipParams->hdrInfoFrame.specified) { + flipParams->hdrInfoFrame.enabled = + headModeSetConfig->hdrInfoFrame.enabled; + if (headModeSetConfig->hdrInfoFrame.enabled) { + flipParams->hdrInfoFrame.eotf = + headModeSetConfig->hdrInfoFrame.eotf; + flipParams->hdrInfoFrame.staticMetadata = + headModeSetConfig->hdrInfoFrame.staticMetadata; + } + } + + flipParams->colorimetry.specified = + headRequestedConfig->flags.colorimetryChanged; + if (flipParams->colorimetry.specified) { + flipParams->colorimetry.val = headModeSetConfig->colorimetry; + } + + if (headModeSetConfig->vrrEnabled) { + flipParams->allowVrr = NV_TRUE; + } + + NvKmsKapiHeadLutConfigToKms(headRequestedConfig, + &flipParams->lut, + NV_FALSE /* bFromKmsSetMode */); + + if (device->lutCaps.olut.supported && headRequestedConfig->flags.olutChanged) { + flipParams->olut.specified = TRUE; + flipParams->olut.enabled = headModeSetConfig->olut.enabled; + + if (headModeSetConfig->olut.lutSurface != NULL) { + flipParams->olut.lut.surfaceHandle = + headModeSetConfig->olut.lutSurface->hKmsHandle; + } else { + flipParams->olut.lut.surfaceHandle = 0; + } + flipParams->olut.lut.offset = headModeSetConfig->olut.offset; + flipParams->olut.lut.vssSegments = + headModeSetConfig->olut.vssSegments; + flipParams->olut.lut.lutEntries = + headModeSetConfig->olut.lutEntries; + } + + if (device->lutCaps.olut.supported && + headRequestedConfig->flags.olutFpNormScaleChanged) { + + flipParams->olutFpNormScale.specified = TRUE; + flipParams->olutFpNormScale.val = headModeSetConfig->olutFpNormScale; + } + } + + if (params->request.numFlipHeads == 0) { + goto done; + } + + status = nvkms_ioctl_from_kapi_try_pmlock(device->pKmsOpen, + NVKMS_IOCTL_FLIP, + params, sizeof(*params)); + + replyConfig->flipResult = params->reply.flipResult; + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "NVKMS_IOCTL_FLIP ioctl failed"); + goto done; + } + + if (!commit) { + goto done; + } + + /*! fill back flip reply */ + replyConfig->vrrFlip = params->reply.vrrFlipType; + replyConfig->vrrSemaphoreIndex = params->reply.vrrSemaphoreIndex; + + for (i = 0; i < params->request.numFlipHeads; i++) { + const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig = + &requestedConfig->headRequestedConfig[pFlipHead[i].head]; + + struct NvKmsKapiHeadReplyConfig *headReplyConfig = + &replyConfig->headReplyConfig[pFlipHead[i].head]; + + const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig = + &headRequestedConfig->modeSetConfig; + + struct NvKmsFlipCommonReplyOneHead *flipParams = ¶ms->reply.flipHead[i]; + + NvU32 layer; + + if (!IsHeadConfigValid(params, requestedConfig, headModeSetConfig, pFlipHead[i].head)) { + continue; + } + + for (layer = 0; + layer < ARRAY_LEN(headRequestedConfig->layerRequestedConfig); + layer++) { + + const struct NvKmsKapiLayerConfig *layerRequestedConfig = + &headRequestedConfig->layerRequestedConfig[layer].config; + + struct NvKmsKapiLayerReplyConfig *layerReplyConfig = + &headReplyConfig->layerReplyConfig[layer]; + + /*! initialize explicitly to -1 as 0 is valid file descriptor */ + layerReplyConfig->postSyncptFd = -1; + if (layerRequestedConfig->syncParams.postSyncptRequested) { + layerReplyConfig->postSyncptFd = + flipParams->layer[layer].postSyncpt.u.fd; + } + } + } + +done: + + nvKmsKapiFree(params); + + return status; +} + +static NvBool ApplyModeSetConfig( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, + struct NvKmsKapiModeSetReplyConfig *replyConfig, + const NvBool commit) +{ + NvBool bRequiredModeset = NV_FALSE; + NvU32 head; + + if (device == NULL || requestedConfig == NULL) { + return NV_FALSE; + } + + for (head = 0; + head < ARRAY_LEN(requestedConfig->headRequestedConfig); head++) { + + const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig = + &requestedConfig->headRequestedConfig[head]; + const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig = + &headRequestedConfig->modeSetConfig; + + if ((requestedConfig->headsMask & (1 << head)) == 0x0) { + continue; + } + + bRequiredModeset = + headRequestedConfig->flags.activeChanged || + headRequestedConfig->flags.displaysChanged || + headRequestedConfig->flags.modeChanged || + headRequestedConfig->flags.hdrInfoFrameChanged || + headRequestedConfig->flags.colorimetryChanged; + + /* + * NVKMS flip ioctl could not validate flip configuration for an + * inactive head, therefore use modeset ioctl if configuration contain + * any such head. + */ + if (!commit && + headModeSetConfig->numDisplays != 0 && !headModeSetConfig->bActive) { + bRequiredModeset = TRUE; + } + + if (bRequiredModeset) { + break; + } + } + + if (bRequiredModeset) { + return KmsSetMode(device, requestedConfig, replyConfig, commit); + } + + return KmsFlip(device, requestedConfig, replyConfig, commit); +} + +/* + * This executes without the nvkms_lock held. The lock will be grabbed + * during the kapi dispatching contained in this function. + */ +void nvKmsKapiHandleEventQueueChange +( + struct NvKmsKapiDevice *device +) +{ + if (device == NULL) { + return; + } + + /* + * If the callback is NULL, event interest declaration should be + * rejected, and no events would be reported. + */ + nvAssert(device->eventCallback != NULL); + + do + { + struct NvKmsGetNextEventParams kmsEventParams = { }; + struct NvKmsKapiEvent kapiEvent = { }; + NvBool err = NV_FALSE; + + if (!nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_GET_NEXT_EVENT, + &kmsEventParams, sizeof(kmsEventParams))) { + break; + } + + if (!kmsEventParams.reply.valid) { + break; + } + + kapiEvent.type = kmsEventParams.reply.event.eventType; + + kapiEvent.device = device; + kapiEvent.privateData = device->privateData; + + switch (kmsEventParams.reply.event.eventType) { + case NVKMS_EVENT_TYPE_DPY_CHANGED: + kapiEvent.u.displayChanged.display = + nvDpyIdToNvU32(kmsEventParams. + reply.event.u.dpyChanged.dpyId); + break; + case NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED: + kapiEvent.u.dynamicDisplayConnected.display = + nvDpyIdToNvU32(kmsEventParams. + reply.event.u.dynamicDpyConnected.dpyId); + break; + case NVKMS_EVENT_TYPE_FLIP_OCCURRED: + kapiEvent.u.flipOccurred.head = + kmsEventParams.reply.event.u.flipOccurred.head; + kapiEvent.u.flipOccurred.layer = + kmsEventParams.reply.event.u.flipOccurred.layer; + break; + default: + continue; + } + + if (err) { + nvKmsKapiLogDeviceDebug( + device, + "Error in conversion from " + "NvKmsGetNextEventParams to NvKmsKapiEvent"); + continue; + } + + device->eventCallback(&kapiEvent); + + } while(1); +} + +/* + * Helper function to convert NvKmsQueryDpyCRC32Reply to NvKmsKapiDpyCRC32. + */ +static void NvKmsCrcsToKapi +( + const struct NvKmsQueryDpyCRC32Reply *crcs, + struct NvKmsKapiCrcs *kmsCrcs +) +{ + kmsCrcs->outputCrc32.value = crcs->outputCrc32.value; + kmsCrcs->outputCrc32.supported = crcs->outputCrc32.supported; + kmsCrcs->rasterGeneratorCrc32.value = crcs->rasterGeneratorCrc32.value; + kmsCrcs->rasterGeneratorCrc32.supported = crcs->rasterGeneratorCrc32.supported; + kmsCrcs->compositorCrc32.value = crcs->compositorCrc32.value; + kmsCrcs->compositorCrc32.supported = crcs->compositorCrc32.supported; +} + +static NvBool GetCRC32 +( + struct NvKmsKapiDevice *device, + NvU32 head, + struct NvKmsKapiCrcs *crc32 +) +{ + struct NvKmsQueryDpyCRC32Params params = { }; + NvBool status; + + if (device->hKmsDevice == 0x0) { + return NV_TRUE; + } + + params.request.deviceHandle = device->hKmsDevice; + params.request.dispHandle = device->hKmsDisp; + params.request.head = head; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_QUERY_DPY_CRC32, + ¶ms, sizeof(params)); + + if (!status) { + nvKmsKapiLogDeviceDebug(device, "NVKMS QueryDpyCRC32Data failed."); + return NV_FALSE; + } + NvKmsCrcsToKapi(¶ms.reply, crc32); + return NV_TRUE; +} + +static NvKmsKapiSuspendResumeCallbackFunc *pSuspendResumeFunc; + +void nvKmsKapiSuspendResume +( + NvBool suspend +) +{ + if (pSuspendResumeFunc) { + pSuspendResumeFunc(suspend); + } +} + +static void nvKmsKapiSetSuspendResumeCallback +( + NvKmsKapiSuspendResumeCallbackFunc *function +) +{ + if (pSuspendResumeFunc && function) { + nvKmsKapiLogDebug("Kapi suspend/resume callback function already registered"); + } + + pSuspendResumeFunc = function; +} + +static NvBool SignalVrrSemaphore +( + struct NvKmsKapiDevice *device, + NvS32 index +) +{ + NvBool status = NV_TRUE; + struct NvKmsVrrSignalSemaphoreParams params = { }; + params.request.deviceHandle = device->hKmsDevice; + params.request.vrrSemaphoreIndex = index; + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_VRR_SIGNAL_SEMAPHORE, + ¶ms, sizeof(params)); + if (!status) { + nvKmsKapiLogDeviceDebug(device, "NVKMS VrrSignalSemaphore failed"); + } + return status; +} + +static NvBool CheckLutNotifier +( + struct NvKmsKapiDevice *device, + NvU32 head, + NvBool waitForCompletion +) +{ + NvBool status = NV_TRUE; + struct NvKmsCheckLutNotifierParams params = { }; + params.request.deviceHandle = device->hKmsDevice; + params.request.dispHandle = device->hKmsDisp; + params.request.head = head; + params.request.waitForCompletion = waitForCompletion; + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_CHECK_LUT_NOTIFIER, + ¶ms, sizeof(params)); + + /* + * In cases where we're first enabling a head, we would expect status to be + * false, but in that case, there's no LUT notifier to wait for, so treat + * that case as complete. + */ + return !status || params.reply.complete; +} + +static void FramebufferConsoleDisabled +( + struct NvKmsKapiDevice *device +) +{ + struct NvKmsFramebufferConsoleDisabledParams params = { }; + NvBool status; + + if (device->hKmsDevice == 0x0) { + return; + } + + params.request.deviceHandle = device->hKmsDevice; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_FRAMEBUFFER_CONSOLE_DISABLED, + ¶ms, sizeof(params)); + if (!status) { + nvKmsKapiLogDeviceDebug(device, "NVKMS FramebufferConsoleDisabled failed"); + } +} + +NvBool nvKmsKapiGetFunctionsTableInternal +( + struct NvKmsKapiFunctionsTable *funcsTable +) +{ + if (funcsTable == NULL) { + return NV_FALSE; + } + + if (nvkms_strcmp(funcsTable->versionString, NV_VERSION_STRING) != 0) { + funcsTable->versionString = NV_VERSION_STRING; + return NV_FALSE; + } + + funcsTable->systemInfo.bAllowWriteCombining = + nvkms_allow_write_combining(); + + funcsTable->enumerateGpus = EnumerateGpus; + + funcsTable->allocateDevice = AllocateDevice; + funcsTable->freeDevice = FreeDevice; + + funcsTable->grabOwnership = GrabOwnership; + funcsTable->releaseOwnership = ReleaseOwnership; + + funcsTable->grantPermissions = GrantPermissions; + funcsTable->revokePermissions = RevokePermissions; + funcsTable->grantSubOwnership = GrantSubOwnership; + funcsTable->revokeSubOwnership = RevokeSubOwnership; + + funcsTable->declareEventInterest = DeclareEventInterest; + + funcsTable->getDeviceResourcesInfo = GetDeviceResourcesInfo; + funcsTable->getDisplays = GetDisplays; + funcsTable->getConnectorInfo = GetConnectorInfo; + + funcsTable->getStaticDisplayInfo = GetStaticDisplayInfo; + funcsTable->getDynamicDisplayInfo = GetDynamicDisplayInfo; + + funcsTable->allocateMemory = AllocateMemory; + funcsTable->importMemory = ImportMemory; + funcsTable->dupMemory = DupMemory; + funcsTable->exportMemory = ExportMemory; + funcsTable->freeMemory = FreeMemory; + funcsTable->getSystemMemoryHandleFromSgt = GetSystemMemoryHandleFromSgt; + funcsTable->getSystemMemoryHandleFromDmaBuf = + GetSystemMemoryHandleFromDmaBuf; + + funcsTable->mapMemory = MapMemory; + funcsTable->unmapMemory = UnmapMemory; + funcsTable->isVidmem = IsVidmem; + + funcsTable->createSurface = CreateSurface; + funcsTable->destroySurface = DestroySurface; + + funcsTable->getDisplayMode = GetDisplayMode; + funcsTable->validateDisplayMode = ValidateDisplayMode; + + funcsTable->applyModeSetConfig = ApplyModeSetConfig; + + funcsTable->allocateChannelEvent = nvKmsKapiAllocateChannelEvent; + funcsTable->freeChannelEvent = nvKmsKapiFreeChannelEvent; + + funcsTable->getCRC32 = GetCRC32; + + funcsTable->getMemoryPages = GetMemoryPages; + funcsTable->freeMemoryPages = FreeMemoryPages; + + funcsTable->importSemaphoreSurface = nvKmsKapiImportSemaphoreSurface; + funcsTable->freeSemaphoreSurface = nvKmsKapiFreeSemaphoreSurface; + funcsTable->registerSemaphoreSurfaceCallback = + nvKmsKapiRegisterSemaphoreSurfaceCallback; + funcsTable->unregisterSemaphoreSurfaceCallback = + nvKmsKapiUnregisterSemaphoreSurfaceCallback; + funcsTable->setSemaphoreSurfaceValue = + nvKmsKapiSetSemaphoreSurfaceValue; + funcsTable->setSuspendResumeCallback = nvKmsKapiSetSuspendResumeCallback; + funcsTable->framebufferConsoleDisabled = FramebufferConsoleDisabled; + + funcsTable->tryInitDisplaySemaphore = nvKmsKapiTryInitDisplaySemaphore; + funcsTable->signalDisplaySemaphore = nvKmsKapiSignalDisplaySemaphore; + funcsTable->cancelDisplaySemaphore = nvKmsKapiCancelDisplaySemaphore; + funcsTable->signalVrrSemaphore = SignalVrrSemaphore; + funcsTable->checkLutNotifier = CheckLutNotifier; + + return NV_TRUE; +} + +NvU32 nvKmsKapiF16ToF32Internal(NvU16 a) +{ + float16_t fa = { .v = a }; + return f16_to_f32(fa).v; +} + +NvU16 nvKmsKapiF32ToF16Internal(NvU32 a) +{ + float32_t fa = { .v = a }; + return f32_to_f16(fa).v; +} + +NvU32 nvKmsKapiF32MulInternal(NvU32 a, NvU32 b) +{ + float32_t fa = { .v = a }; + float32_t fb = { .v = b }; + return f32_mul(fa, fb).v; +} + +NvU32 nvKmsKapiF32DivInternal(NvU32 a, NvU32 b) +{ + float32_t fa = { .v = a }; + float32_t fb = { .v = b }; + return f32_div(fa, fb).v; +} + +NvU32 nvKmsKapiF32AddInternal(NvU32 a, NvU32 b) +{ + float32_t fa = { .v = a }; + float32_t fb = { .v = b }; + return f32_add(fa, fb).v; +} + +NvU32 nvKmsKapiF32ToUI32RMinMagInternal(NvU32 a, NvBool exact) +{ + float32_t fa = { .v = a }; + return f32_to_ui32_r_minMag(fa, exact); +} + +NvU32 nvKmsKapiUI32ToF32Internal(NvU32 a) +{ + return ui32_to_f32(a).v; +} diff --git a/src/nvidia-modeset/lib/nvkms-format.c b/src/nvidia-modeset/lib/nvkms-format.c new file mode 100644 index 0000000..cb8c8fd --- /dev/null +++ b/src/nvidia-modeset/lib/nvkms-format.c @@ -0,0 +1,133 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-format.h" +#include "nv_common_utils.h" +#include "nvctassert.h" + +#include + +#define RGB_ENTRY(_format, _depth, _bytesPerPixel) \ + [NvKmsSurfaceMemoryFormat##_format] = { \ + .format = NvKmsSurfaceMemoryFormat##_format, \ + .name = #_format, \ + .depth = _depth, \ + .isYUV = NV_FALSE, \ + .numPlanes = 1, \ + { \ + .rgb = { \ + .bytesPerPixel = _bytesPerPixel, \ + .bitsPerPixel = _bytesPerPixel * 8, \ + }, \ + }, \ + } + +#define YUV_ENTRY(_format, \ + _depth, \ + _numPlanes, \ + _depthPerComponent, \ + _storageBitsPerComponent, \ + _horizChromaDecimationFactor, \ + _vertChromaDecimationFactor) \ + [NvKmsSurfaceMemoryFormat##_format] = { \ + .format = NvKmsSurfaceMemoryFormat##_format, \ + .name = #_format, \ + .depth = _depth, \ + .isYUV = NV_TRUE, \ + .numPlanes = _numPlanes, \ + { \ + .yuv = { \ + .depthPerComponent = _depthPerComponent, \ + .storageBitsPerComponent = _storageBitsPerComponent, \ + .horizChromaDecimationFactor = _horizChromaDecimationFactor, \ + .vertChromaDecimationFactor = _vertChromaDecimationFactor, \ + }, \ + }, \ + } + +static const NvKmsSurfaceMemoryFormatInfo nvKmsEmptyFormatInfo; + +/* + * For 10/12-bit YUV formats, each component is packed in a 16-bit container in + * memory, and fetched by display HW as such. + */ +static const NvKmsSurfaceMemoryFormatInfo nvKmsSurfaceMemoryFormatInfo[] = { + RGB_ENTRY(I8, 8, 1), + RGB_ENTRY(A1R5G5B5, 16, 2), + RGB_ENTRY(X1R5G5B5, 15, 2), + RGB_ENTRY(R5G6B5, 16, 2), + RGB_ENTRY(A8R8G8B8, 32, 4), + RGB_ENTRY(X8R8G8B8, 24, 4), + RGB_ENTRY(A2B10G10R10, 32, 4), + RGB_ENTRY(X2B10G10R10, 30, 4), + RGB_ENTRY(A8B8G8R8, 32, 4), + RGB_ENTRY(X8B8G8R8, 24, 4), + RGB_ENTRY(RF16GF16BF16AF16, 64, 8), + RGB_ENTRY(RF16GF16BF16XF16, 64, 8), + RGB_ENTRY(R16G16B16A16, 64, 8), + RGB_ENTRY(RF32GF32BF32AF32, 128, 16), + YUV_ENTRY(Y8_U8__Y8_V8_N422, 16, 1, 8, 8, 2, 1), + YUV_ENTRY(U8_Y8__V8_Y8_N422, 16, 1, 8, 8, 2, 1), + YUV_ENTRY(Y8___U8V8_N444, 24, 2, 8, 8, 1, 1), + YUV_ENTRY(Y8___V8U8_N444, 24, 2, 8, 8, 1, 1), + YUV_ENTRY(Y8___U8V8_N422, 16, 2, 8, 8, 2, 1), + YUV_ENTRY(Y8___V8U8_N422, 16, 2, 8, 8, 2, 1), + YUV_ENTRY(Y8___U8V8_N420, 12, 2, 8, 8, 2, 2), + YUV_ENTRY(Y8___V8U8_N420, 12, 2, 8, 8, 2, 2), + YUV_ENTRY(Y10___U10V10_N444, 30, 2, 10, 16, 1, 1), + YUV_ENTRY(Y10___V10U10_N444, 30, 2, 10, 16, 1, 1), + YUV_ENTRY(Y10___U10V10_N422, 20, 2, 10, 16, 2, 1), + YUV_ENTRY(Y10___V10U10_N422, 20, 2, 10, 16, 2, 1), + YUV_ENTRY(Y10___U10V10_N420, 15, 2, 10, 16, 2, 2), + YUV_ENTRY(Y10___V10U10_N420, 15, 2, 10, 16, 2, 2), + YUV_ENTRY(Y12___U12V12_N444, 36, 2, 12, 16, 1, 1), + YUV_ENTRY(Y12___V12U12_N444, 36, 2, 12, 16, 1, 1), + YUV_ENTRY(Y12___U12V12_N422, 24, 2, 12, 16, 2, 1), + YUV_ENTRY(Y12___V12U12_N422, 24, 2, 12, 16, 2, 1), + YUV_ENTRY(Y12___U12V12_N420, 18, 2, 12, 16, 2, 2), + YUV_ENTRY(Y12___V12U12_N420, 18, 2, 12, 16, 2, 2), + YUV_ENTRY(Y8___U8___V8_N444, 24, 3, 8, 8, 1, 1), + YUV_ENTRY(Y8___U8___V8_N420, 12, 3, 8, 8, 2, 2), +}; + +ct_assert(ARRAY_LEN(nvKmsSurfaceMemoryFormatInfo) == + (NvKmsSurfaceMemoryFormatMax + 1)); + +const NvKmsSurfaceMemoryFormatInfo *nvKmsGetSurfaceMemoryFormatInfo( + const enum NvKmsSurfaceMemoryFormat format) +{ + if (format >= ARRAY_LEN(nvKmsSurfaceMemoryFormatInfo)) { + return &nvKmsEmptyFormatInfo; + } + + return &nvKmsSurfaceMemoryFormatInfo[format]; +} + +const char *nvKmsSurfaceMemoryFormatToString( + const enum NvKmsSurfaceMemoryFormat format) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(format); + + return (pFormatInfo != NULL) ? pFormatInfo->name : NULL; +} diff --git a/src/nvidia-modeset/lib/nvkms-sync.c b/src/nvidia-modeset/lib/nvkms-sync.c new file mode 100644 index 0000000..996bcf6 --- /dev/null +++ b/src/nvidia-modeset/lib/nvkms-sync.c @@ -0,0 +1,402 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include +#include /* NV_DISP_BASE_NOTIFIER_1, NV_DISP_NOTIFICATION_2 */ +#include /* NV_DISP_NOTIFIER */ + +/* + * HW will never write 1 to lower 32bits of timestamp + */ +#define NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_LO_INVALID 1 + +/* + * Higher 32bits of timestamp will be 0 only during first ~4sec of + * boot. So for practical purposes, we can consider 0 as invalid. + */ +#define NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_HI_INVALID 0 + +static void GetNotifierTimeStamp(volatile const NvU32 *notif, + NvU32 timeStampLoIdx, + NvU32 timeStampHiIdx, + struct nvKmsParsedNotifier *out) +{ + NvU32 lo, hi; + NvU32 pollCount = 0; + + /* + * Caller of ParseNotifier() is expected to poll for notifier + * status to become BEGUN/FINISHED for valid timestamp. + */ + if (out->status == NVKMS_NOTIFIER_STATUS_NOT_BEGUN) { + return; + } + + /* + * HW does 4B writes to notifier, so poll till both timestampLo + * and timestampHi bytes become valid. + */ + do { + lo = notif[timeStampLoIdx]; + hi = notif[timeStampHiIdx]; + + if ((lo != NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_LO_INVALID) && + (hi != NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_HI_INVALID)) { + out->timeStamp = (NvU64)lo | ((NvU64)hi << 32); + out->timeStampValid = NV_TRUE; + break; + } + + if (++pollCount >= 100) { + break; + } + } while (1); +} + +static void SetNotifierLegacy(NvBool overlay, volatile void *in, NvBool begun, + NvU64 timeStamp) +{ + volatile NvU32 *notif = in; + + if (overlay) { + notif[NV_DISP_NOTIFICATION_2_INFO16_3] = begun ? + DRF_DEF(_DISP, _NOTIFICATION_2__3, _STATUS, _BEGUN) : + DRF_DEF(_DISP, _NOTIFICATION_2__3, _STATUS, _NOT_BEGUN); + + notif[NV_DISP_NOTIFICATION_2_TIME_STAMP_0] = begun ? NvU64_LO32(timeStamp) : + NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_LO_INVALID; + notif[NV_DISP_NOTIFICATION_2_TIME_STAMP_1] = begun ? NvU64_HI32(timeStamp) : + NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_HI_INVALID; + } else { + notif[NV_DISP_BASE_NOTIFIER_1__0] = begun ? + DRF_DEF(_DISP, _BASE_NOTIFIER_1__0, _STATUS, _BEGUN) : + DRF_DEF(_DISP, _BASE_NOTIFIER_1__0, _STATUS, _NOT_BEGUN); + } +} + +static void SetNotifierFourWord(volatile void *in, NvBool begun, + NvU64 timeStamp) +{ + volatile NvU32 *notif = in; + + notif[NV_DISP_NOTIFICATION_2_INFO16_3] = begun ? + DRF_DEF(_DISP, _NOTIFICATION_2__3, _STATUS, _BEGUN) : + DRF_DEF(_DISP, _NOTIFICATION_2__3, _STATUS, _NOT_BEGUN); + + notif[NV_DISP_NOTIFICATION_2_TIME_STAMP_0] = begun ? NvU64_LO32(timeStamp) : + NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_LO_INVALID; + notif[NV_DISP_NOTIFICATION_2_TIME_STAMP_1] = begun ? NvU64_HI32(timeStamp) : + NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_HI_INVALID; +} + +static void SetNotifierFourWordNVDisplay(volatile void *in, NvBool begun, + NvU64 timeStamp) +{ + volatile NvU32 *notif = in; + + notif[NV_DISP_NOTIFIER__0] = begun ? + DRF_DEF(_DISP, _NOTIFIER__0, _STATUS, _BEGUN) : + DRF_DEF(_DISP, _NOTIFIER__0, _STATUS, _NOT_BEGUN); + + notif[NV_DISP_NOTIFIER__2] = begun ? NvU64_LO32(timeStamp) : + NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_LO_INVALID; + notif[NV_DISP_NOTIFIER__3] = begun ? NvU64_HI32(timeStamp) : + NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_HI_INVALID; +} + +static void SetNotifier(enum NvKmsNIsoFormat format, NvBool overlay, + NvU32 index, void *base, NvBool begun, NvU64 timeStamp) +{ + const NvU32 sizeInBytes = nvKmsSizeOfNotifier(format, overlay); + void *notif = + (void *)((char *)base + (sizeInBytes * index)); + + switch (format) { + case NVKMS_NISO_FORMAT_LEGACY: + SetNotifierLegacy(overlay, notif, begun, timeStamp); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD: + SetNotifierFourWord(notif, begun, timeStamp); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + SetNotifierFourWordNVDisplay(notif, begun, timeStamp); + break; + } +} + +void nvKmsSetNotifier(enum NvKmsNIsoFormat format, NvBool overlay, + NvU32 index, void *base, NvU64 timeStamp) +{ + SetNotifier(format, overlay, index, base, NV_TRUE, timeStamp); +} + +void nvKmsResetNotifier(enum NvKmsNIsoFormat format, NvBool overlay, + NvU32 index, void *base) +{ + SetNotifier(format, overlay, index, base, NV_FALSE, 0); +} + +static void ParseNotifierLegacy(NvBool overlay, volatile const void *in, + struct nvKmsParsedNotifier *out) +{ + volatile const NvU32 *notif = in; + + if (overlay) { + NvU32 notif3; + + /* Read this once since it may be in video memory and we need multiple + * fields */ + notif3 = notif[NV_DISP_NOTIFICATION_2_INFO16_3]; + + switch(DRF_VAL(_DISP, _NOTIFICATION_2__3, _STATUS, notif3)) { + case NV_DISP_NOTIFICATION_2__3_STATUS_NOT_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_NOT_BEGUN; + break; + case NV_DISP_NOTIFICATION_2__3_STATUS_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_BEGUN; + break; + case NV_DISP_NOTIFICATION_2__3_STATUS_FINISHED: + out->status = NVKMS_NOTIFIER_STATUS_FINISHED; + break; + } + + out->presentCount = + DRF_VAL(_DISP, _NOTIFICATION_2_INFO16_3, _PRESENT_COUNT, notif3); + + GetNotifierTimeStamp(notif, + NV_DISP_NOTIFICATION_2_TIME_STAMP_0, + NV_DISP_NOTIFICATION_2_TIME_STAMP_1, + out); + } else { + NvU32 notif0; + + /* There's a timestamp available in this notifier, but it's a weird + * 14-bit "audit timestamp" that's not useful for us. */ + out->timeStampValid = NV_FALSE; + + /* Read this once since it may be in video memory and we need multiple + * fields */ + notif0 = notif[NV_DISP_BASE_NOTIFIER_1__0]; + + switch(DRF_VAL(_DISP, _BASE_NOTIFIER_1__0, _STATUS, notif0)) { + case NV_DISP_BASE_NOTIFIER_1__0_STATUS_NOT_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_NOT_BEGUN; + break; + case NV_DISP_BASE_NOTIFIER_1__0_STATUS_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_BEGUN; + break; + case NV_DISP_BASE_NOTIFIER_1__0_STATUS_FINISHED: + out->status = NVKMS_NOTIFIER_STATUS_FINISHED; + break; + } + + out->presentCount = + DRF_VAL(_DISP, _BASE_NOTIFIER_1__0, _PRESENTATION_COUNT, notif0); + } +} + +static void ParseNotifierFourWord(const void *in, + struct nvKmsParsedNotifier *out) +{ + volatile const NvU32 *notif = in; + NvU32 notif3; + + /* Read this once since it may be in video memory and we need multiple + * fields */ + notif3 = notif[NV_DISP_NOTIFICATION_2_INFO16_3]; + + switch(DRF_VAL(_DISP, _NOTIFICATION_2__3, _STATUS, notif3)) { + case NV_DISP_NOTIFICATION_2__3_STATUS_NOT_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_NOT_BEGUN; + break; + case NV_DISP_NOTIFICATION_2__3_STATUS_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_BEGUN; + break; + case NV_DISP_NOTIFICATION_2__3_STATUS_FINISHED: + out->status = NVKMS_NOTIFIER_STATUS_FINISHED; + break; + } + + out->presentCount = + DRF_VAL(_DISP, _NOTIFICATION_2_INFO16_3, _PRESENT_COUNT, notif3); + + GetNotifierTimeStamp(notif, + NV_DISP_NOTIFICATION_2_TIME_STAMP_0, + NV_DISP_NOTIFICATION_2_TIME_STAMP_1, + out); +} + +static void ParseNotifierFourWordNVDisplay(const void *in, + struct nvKmsParsedNotifier *out) +{ + volatile const NvU32 *notif = in; + NvU32 notif0; + + /* Read this once since it may be in video memory and we need multiple + * fields */ + notif0 = notif[NV_DISP_NOTIFIER__0]; + + switch(DRF_VAL(_DISP, _NOTIFIER__0, _STATUS, notif0)) { + case NV_DISP_NOTIFIER__0_STATUS_NOT_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_NOT_BEGUN; + break; + case NV_DISP_NOTIFIER__0_STATUS_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_BEGUN; + break; + case NV_DISP_NOTIFIER__0_STATUS_FINISHED: + out->status = NVKMS_NOTIFIER_STATUS_FINISHED; + break; + } + + out->presentCount = + DRF_VAL(_DISP, _NOTIFIER__0, _PRESENT_COUNT, notif0); + + GetNotifierTimeStamp(notif, + NV_DISP_NOTIFIER__2, + NV_DISP_NOTIFIER__3, + out); +} + +void nvKmsParseNotifier(enum NvKmsNIsoFormat format, NvBool overlay, + NvU32 index, const void *base, + struct nvKmsParsedNotifier *out) +{ + const NvU32 sizeInBytes = nvKmsSizeOfNotifier(format, overlay); + const void *notif = + (const void *)((const char *)base + (sizeInBytes * index)); + + switch (format) { + case NVKMS_NISO_FORMAT_LEGACY: + ParseNotifierLegacy(overlay, notif, out); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD: + ParseNotifierFourWord(notif, out); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + ParseNotifierFourWordNVDisplay(notif, out); + break; + } +} + +NvU32 nvKmsSemaphorePayloadOffset(enum NvKmsNIsoFormat format) +{ + switch (format) { + case NVKMS_NISO_FORMAT_LEGACY: + return 0; + case NVKMS_NISO_FORMAT_FOUR_WORD: + return NV_DISP_NOTIFICATION_2_INFO32_2; + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + return NV_DISP_NOTIFIER__0; + } + + return 0; +} + +static void ResetSemaphoreLegacy(volatile void *in, NvU32 payload) +{ + volatile NvU32 *sema = in; + + *sema = payload; +} + +static void ResetSemaphoreFourWord(volatile void *in, NvU32 payload) +{ + volatile NvU32 *sema = in; + + sema[NV_DISP_NOTIFICATION_2_INFO32_2] = payload; +} + +static void ResetSemaphoreFourWordNVDisplay(volatile void *in, NvU32 payload) +{ + volatile NvU32 *sema = in; + + sema[NV_DISP_NOTIFIER__0] = payload; +} + +void nvKmsResetSemaphore(enum NvKmsNIsoFormat format, + NvU32 index, void *base, + NvU32 payload) +{ + const NvU32 sizeInBytes = nvKmsSizeOfSemaphore(format); + void *sema = + (void *)((char *)base + (sizeInBytes * index)); + + switch (format) { + case NVKMS_NISO_FORMAT_LEGACY: + ResetSemaphoreLegacy(sema, payload); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD: + ResetSemaphoreFourWord(sema, payload); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + ResetSemaphoreFourWordNVDisplay(sema, payload); + break; + } +} + +static NvU32 ParseSemaphoreLegacy(const volatile void *in) +{ + const volatile NvU32 *sema = in; + + return *sema; +} + +static NvU32 ParseSemaphoreFourWord(const volatile void *in) +{ + const volatile NvU32 *sema = in; + + return sema[NV_DISP_NOTIFICATION_2_INFO32_2]; +} + +static NvU32 ParseSemaphoreFourWordNVDisplay(const volatile void *in) +{ + const volatile NvU32 *sema = in; + + return sema[NV_DISP_NOTIFIER__0]; +} + +void nvKmsParseSemaphore(enum NvKmsNIsoFormat format, + NvU32 index, const void *base, + struct nvKmsParsedSemaphore *out) +{ + const NvU32 sizeInBytes = nvKmsSizeOfSemaphore(format); + const void *sema = + (const void *)((const char *)base + (sizeInBytes * index)); + NvU32 payload = 0; + + switch (format) { + case NVKMS_NISO_FORMAT_LEGACY: + payload = ParseSemaphoreLegacy(sema); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD: + payload = ParseSemaphoreFourWord(sema); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + payload = ParseSemaphoreFourWordNVDisplay(sema); + break; + } + + out->payload = payload; +} diff --git a/src/nvidia-modeset/os-interface/include/nvidia-modeset-os-interface.h b/src/nvidia-modeset/os-interface/include/nvidia-modeset-os-interface.h new file mode 100644 index 0000000..25bc5d0 --- /dev/null +++ b/src/nvidia-modeset/os-interface/include/nvidia-modeset-os-interface.h @@ -0,0 +1,387 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * Define the entry points which the NVKMS kernel interface layer + * provides to core NVKMS. + */ + +#if !defined(_NVIDIA_MODESET_OS_INTERFACE_H_) +#define _NVIDIA_MODESET_OS_INTERFACE_H_ + +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) +#include /* size_t */ +#else +#include /* size_t */ +#endif +#include "nvtypes.h" /* NvU8 */ + +#include "nvkms.h" +#include "nv_stdarg.h" + +enum NvKmsSyncPtOp { + /* + * Call into Tegra's kernel nvhost driver, and allocate a syncpoint that can + * be exclusively used by the caller. Internally, this operation will call + * get() to set the initial refcount of the syncpoint to 1. + */ + NVKMS_SYNCPT_OP_ALLOC, + /* + * Decrease the refcount of an already allocated syncpoint. Once the + * refcount drops to 0, the syncpoint will be returned to the free pool that + * nvhost manages, so PUT can also be used to balance out an ALLOC. + */ + NVKMS_SYNCPT_OP_PUT, + /* + * Extract syncpt id and thresh from the sync-file file descriptor + */ + NVKMS_SYNCPT_OP_FD_TO_ID_AND_THRESH, + /* + * Create dma-fence from syncpt id and thresh value and create sync_file + * file descriptor for the dma-fence handle created. + */ + NVKMS_SYNCPT_OP_ID_AND_THRESH_TO_FD, + /* + * read syncpt minimum value of given syncpt + */ + NVKMS_SYNCPT_OP_READ_MINVAL, +}; + +enum NvKmsDebugForceColorSpace { + NVKMS_DEBUG_FORCE_COLOR_SPACE_NONE, + NVKMS_DEBUG_FORCE_COLOR_SPACE_RGB, + NVKMS_DEBUG_FORCE_COLOR_SPACE_YUV444, + NVKMS_DEBUG_FORCE_COLOR_SPACE_YUV422, + NVKMS_DEBUG_FORCE_COLOR_SPACE_MAX, +}; + +typedef struct { + + struct { + const char *syncpt_name; /* in */ + NvU32 id; /* out */ + } alloc; + + struct { + NvU32 id; /* in */ + } put; + + struct { + NvS32 fd; /* in */ + NvU32 id; /* out */ + NvU32 thresh; /* out */ + } fd_to_id_and_thresh; + + struct { + NvU32 id; /* in */ + NvU32 thresh; /* in */ + NvS32 fd; /* out */ + } id_and_thresh_to_fd; + + struct { + NvU32 id; /* in */ + NvU32 minval; /* out */ + } read_minval; +} NvKmsSyncPtOpParams; + +enum FailAllocCoreChannelMethod { + FAIL_ALLOC_CORE_CHANNEL_RM_SETUP_CORE_CHANNEL = 0, + FAIL_ALLOC_CORE_CHANNEL_RESTORE_CONSOLE = 1, +}; + +NvBool nvkms_test_fail_alloc_core_channel(enum FailAllocCoreChannelMethod method); +NvBool nvkms_conceal_vrr_caps(void); +NvBool nvkms_output_rounding_fix(void); +NvBool nvkms_disable_hdmi_frl(void); +NvBool nvkms_disable_vrr_memclk_switch(void); +NvBool nvkms_hdmi_deepcolor(void); +NvBool nvkms_vblank_sem_control(void); +NvBool nvkms_opportunistic_display_sync(void); +enum NvKmsDebugForceColorSpace nvkms_debug_force_color_space(void); +NvBool nvkms_enable_overlay_layers(void); + +void nvkms_call_rm (void *ops); +void* nvkms_alloc (size_t size, + NvBool zero); +void nvkms_free (void *ptr, + size_t size); +void* nvkms_memset (void *ptr, + NvU8 c, + size_t size); +void* nvkms_memcpy (void *dest, + const void *src, + size_t n); +void* nvkms_memmove (void *dest, + const void *src, + size_t n); +int nvkms_memcmp (const void *s1, + const void *s2, + size_t n); +size_t nvkms_strlen (const char *s); +int nvkms_strcmp (const char *s1, + const char *s2); +char* nvkms_strncpy (char *dest, + const char *src, + size_t n); +void nvkms_usleep (NvU64 usec); +NvU64 nvkms_get_usec (void); +int nvkms_copyin (void *kptr, + NvU64 uaddr, + size_t n); +int nvkms_copyout (NvU64 uaddr, + const void *kptr, + size_t n); +void nvkms_yield (void); +void nvkms_dump_stack (void); +NvBool nvkms_syncpt_op (enum NvKmsSyncPtOp op, + NvKmsSyncPtOpParams *params); +int nvkms_snprintf (char *str, + size_t size, + const char *format, ...) + __attribute__((format (printf, 3, 4))); + +int nvkms_vsnprintf (char *str, + size_t size, + const char *format, + va_list ap); + +#define NVKMS_LOG_LEVEL_INFO 0 +#define NVKMS_LOG_LEVEL_WARN 1 +#define NVKMS_LOG_LEVEL_ERROR 2 + +void nvkms_log (const int level, + const char *gpuPrefix, + const char *msg); + +/*! + * Refcounted pointer to an object that may be freed while references still + * exist. + * + * This structure is intended to be used for nvkms timers to refer to objects + * that may be freed while timers with references to the object are still + * pending. + * + * When the owner of an nvkms_ref_ptr is freed, the teardown code should call + * nvkms_free_ref_ptr(). That marks the pointer as invalid so that later calls + * to nvkms_dec_ref() (i.e. from a workqueue callback) return NULL rather than + * the pointer originally passed to nvkms_alloc_ref_ptr(). + */ +struct nvkms_ref_ptr; + +/*! + * Allocate and initialize a ref_ptr. + * + * The pointer stored in the ref_ptr is initialized to ptr, and its refcount is + * initialized to 1. + */ +struct nvkms_ref_ptr* nvkms_alloc_ref_ptr(void *ptr); + +/*! + * Clear a ref_ptr. + * + * This function sets the pointer stored in the ref_ptr to NULL and drops the + * reference created by nvkms_alloc_ref_ptr(). This function should be called + * when the object pointed to by the ref_ptr is freed. + * + * A caller should make sure that no code that can call nvkms_inc_ref() can + * execute after nvkms_free_ref_ptr() is called. + */ +void nvkms_free_ref_ptr(struct nvkms_ref_ptr *ref_ptr); + +/*! + * Increment the refcount of a ref_ptr. + * + * This function should be used when a pointer to the ref_ptr is stored + * somewhere. For example, when the ref_ptr is used as the argument to + * nvkms_alloc_timer. + * + * This may be called outside of the nvkms_lock, for example by an RM callback. + */ +void nvkms_inc_ref(struct nvkms_ref_ptr *ref_ptr); + +/*! + * Decrement the refcount of a ref_ptr and extract the embedded pointer. + * + * This should be used by code that needs to atomically determine whether the + * object pointed to by the ref_ptr still exists. To prevent the object from + * being destroyed while the current thread is executing, this should be called + * from inside the nvkms_lock. + */ +void* nvkms_dec_ref(struct nvkms_ref_ptr *ref_ptr); + +typedef void nvkms_timer_proc_t(void *dataPtr, NvU32 dataU32); +typedef struct nvkms_timer_t nvkms_timer_handle_t; + +/*! + * Schedule a callback function to be called in the future. + * + * The callback function 'proc' will be called with the arguments + * 'dataPtr' and 'dataU32' at 'usec' (or later) microseconds from now. + * If usec==0, the callback will be scheduled to be called as soon as + * possible. + * + * The callback function is guaranteed to be called back with the + * nvkms_lock held, and in process context. + * + * Returns an opaque handle, nvkms_timer_handle_t*, or NULL on + * failure. If non-NULL, the caller is responsible for caching the + * handle and eventually calling nvkms_free_timer() to free the + * memory. + * + * The nvkms_lock may be held when nvkms_alloc_timer() is called, but + * the nvkms_lock is not required. + */ +nvkms_timer_handle_t* nvkms_alloc_timer (nvkms_timer_proc_t *proc, + void *dataPtr, NvU32 dataU32, + NvU64 usec); + +/*! + * Schedule a callback function to be called in the future. + * + * This function is like nvkms_alloc_timer() except that instead of returning a + * pointer to a structure that the caller should free later, the timer will free + * itself after executing the callback function. This is only intended for + * cases where the caller cannot cache the nvkms_alloc_timer() return value. + */ +NvBool +nvkms_alloc_timer_with_ref_ptr(nvkms_timer_proc_t *proc, + struct nvkms_ref_ptr *ref_ptr, + NvU32 dataU32, NvU64 usec); + +/*! + * Free the nvkms_timer_t object. If the callback function has not + * yet been called, freeing the nvkms_timer_handle_t will guarantee + * that it is not called. + * + * The nvkms_lock must be held when calling nvkms_free_timer(). + */ +void nvkms_free_timer (nvkms_timer_handle_t *handle); + + + +/*! + * Notify the NVKMS kernel interface that the event queue has changed. + * + * \param[in] pOpenKernel This indicates the file descriptor + * ("per-open") of the client whose event queue + * has been updated. This is the pointer + * passed by the kernel interface to nvKmsOpen(). + * \param[in] eventsAvailable If TRUE, a new event has been added to the + * event queue. If FALSE, the last event has + * been removed from the event queue. + */ +void +nvkms_event_queue_changed(nvkms_per_open_handle_t *pOpenKernel, + NvBool eventsAvailable); + + +/*! + * Get the "per-open" data (the pointer returned by nvKmsOpen()) + * associated with this fd. + */ +void* nvkms_get_per_open_data(int fd); + + +/*! + * Raise and lower the reference count of the specified GPU. + */ +NvBool nvkms_open_gpu(NvU32 gpuId); +void nvkms_close_gpu(NvU32 gpuId); + + +/*! + * Enumerate nvidia gpus. + */ + +NvU32 nvkms_enumerate_gpus(nv_gpu_info_t *gpu_info); + +/*! + * Availability of write combining support for video memory. + */ + +NvBool nvkms_allow_write_combining(void); + +/*! + * Check if OS supports syncpoints. + */ +NvBool nvkms_kernel_supports_syncpts(void); + +/*! + * Checks whether the fd is associated with an nvidia character device. + */ +NvBool nvkms_fd_is_nvidia_chardev(int fd); + +/*! + * NVKMS interface for kernel space NVKMS clients like KAPI + */ + +struct nvkms_per_open; + +struct nvkms_per_open* nvkms_open_from_kapi +( + struct NvKmsKapiDevice *device +); + +void nvkms_close_from_kapi(struct nvkms_per_open *popen); + +NvBool nvkms_ioctl_from_kapi +( + struct nvkms_per_open *popen, + NvU32 cmd, void *params_address, const size_t params_size +); + +/*! + * Like nvkms_ioctl_from_kapi, but return NV_FALSE instead of waiting if the + * power management read lock cannot be acquired. + */ +NvBool nvkms_ioctl_from_kapi_try_pmlock +( + struct nvkms_per_open *popen, + NvU32 cmd, void *params_address, const size_t params_size +); + +/*! + * APIs for locking. + */ + +typedef struct nvkms_sema_t nvkms_sema_handle_t; + +nvkms_sema_handle_t* + nvkms_sema_alloc (void); +void nvkms_sema_free (nvkms_sema_handle_t *sema); +void nvkms_sema_down (nvkms_sema_handle_t *sema); +void nvkms_sema_up (nvkms_sema_handle_t *sema); + +/*! + * APIs to register/unregister backlight device. + */ +struct nvkms_backlight_device; + +struct nvkms_backlight_device* +nvkms_register_backlight(NvU32 gpu_id, NvU32 display_id, void *drv_priv, + NvU32 current_brightness); + +void nvkms_unregister_backlight(struct nvkms_backlight_device *nvkms_bd); + +#endif /* _NVIDIA_MODESET_OS_INTERFACE_H_ */ + diff --git a/src/nvidia-modeset/os-interface/include/nvkms.h b/src/nvidia-modeset/os-interface/include/nvkms.h new file mode 100644 index 0000000..0ac3f79 --- /dev/null +++ b/src/nvidia-modeset/os-interface/include/nvkms.h @@ -0,0 +1,127 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_KMS_H__ +#define __NV_KMS_H__ + +#include "nvtypes.h" +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) +#include /* size_t */ +#else +#include /* size_t */ +#endif + +#include "nvkms-kapi.h" + +typedef struct nvkms_per_open nvkms_per_open_handle_t; + +typedef void nvkms_procfs_out_string_func_t(void *data, + const char *str); + +typedef void nvkms_procfs_proc_t(void *data, + char *buffer, size_t size, + nvkms_procfs_out_string_func_t *outString); + +/* max number of loops to prevent hanging the kernel if an edge case is hit */ +#define NVKMS_READ_FILE_MAX_LOOPS 1000 +/* max size for any file read by the config system */ +#define NVKMS_READ_FILE_MAX_SIZE 8192 + +/* + * The read file callback should allocate a buffer pointed to by *buff, fill it + * with the contents of fname, and return the size of the buffer. Buffer is not + * guaranteed to be null-terminated. The caller is responsible for freeing the + * buffer with nvkms_free, not nvFree. + */ +typedef size_t nvkms_config_read_file_func_t(char *fname, + char ** const buff); + +typedef struct { + const char *name; + nvkms_procfs_proc_t *func; +} nvkms_procfs_file_t; + +enum NvKmsClientType { + NVKMS_CLIENT_USER_SPACE, + NVKMS_CLIENT_KERNEL_SPACE, +}; + +struct NvKmsPerOpenDev; + +NvBool nvKmsIoctl( + void *pOpenVoid, + NvU32 cmd, + NvU64 paramsAddress, + const size_t paramSize); + +void nvKmsClose(void *pOpenVoid); + +void* nvKmsOpen( + NvU32 pid, + enum NvKmsClientType clientType, + nvkms_per_open_handle_t *pOpenKernel); + +NvBool nvKmsModuleLoad(void); + +void nvKmsModuleUnload(void); + +void nvKmsSuspend(NvU32 gpuId); +void nvKmsResume(NvU32 gpuId); + +void nvKmsGetProcFiles(const nvkms_procfs_file_t **ppProcFiles); + +NvBool nvKmsReadConf(const char *buff, size_t size, + nvkms_config_read_file_func_t readfile); + +void nvKmsKapiHandleEventQueueChange +( + struct NvKmsKapiDevice *device +); + +NvBool nvKmsKapiGetFunctionsTableInternal +( + struct NvKmsKapiFunctionsTable *funcsTable +); + +void nvKmsKapiSuspendResume(NvBool suspend); + +NvBool nvKmsGetBacklight(NvU32 display_id, void *drv_priv, NvU32 *brightness); +NvBool nvKmsSetBacklight(NvU32 display_id, void *drv_priv, NvU32 brightness); + +NvBool nvKmsOpenDevHasSubOwnerPermissionOrBetter(const struct NvKmsPerOpenDev *pOpenDev); + +NvU32 nvKmsKapiF16ToF32Internal(NvU16 a); + +NvU16 nvKmsKapiF32ToF16Internal(NvU32 a); + +NvU32 nvKmsKapiF32MulInternal(NvU32 a, NvU32 b); + +NvU32 nvKmsKapiF32DivInternal(NvU32 a, NvU32 b); + +NvU32 nvKmsKapiF32AddInternal(NvU32 a, NvU32 b); + +NvU32 nvKmsKapiF32ToUI32RMinMagInternal(NvU32 a, NvBool exact); + +NvU32 nvKmsKapiUI32ToF32Internal(NvU32 a); + +#endif /* __NV_KMS_H__ */ diff --git a/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.cpp b/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.cpp new file mode 100644 index 0000000..53bd73e --- /dev/null +++ b/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.cpp @@ -0,0 +1,653 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// This file implements the event sink class, which the DisplayPort library +// uses to notify the driver of display devices being connected or +// disconnected. + +#include "dp/nvdp-connector-event-sink.h" + +#include "nvdp-connector-event-sink.hpp" + +#include "nvkms-types.h" +#include "nvkms-dpy.h" +#include "nvkms-utils.h" +#include "nvkms-vrr.h" + +#include "nvkms-attributes.h" +#include "nvkms-private.h" + +namespace nvkmsDisplayPort { + +static void EnableVRR(NVDpyEvoPtr pDpyEvo); + +ConnectorEventSink::ConnectorEventSink(NVConnectorEvoPtr pConnectorEvo) + : pConnectorEvo(pConnectorEvo) +{ +} + +static NVDpyEvoPtr FindDpyByDevice(NVConnectorEvoPtr pConnectorEvo, + DisplayPort::Device *device) +{ + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + NVDpyEvoPtr pDpyEvo; + + if (nvConnectorUsesDPLib(pConnectorEvo)) { + FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->validDisplays, pDispEvo) { + if (pDpyEvo->dp.pDpLibDevice && + pDpyEvo->dp.pDpLibDevice->device == device) { + return pDpyEvo; + } + } + } + return NULL; +} + +// Looks for a display that matches the given DP device from +// the list of disconnected dpys. +static NVDpyEvoPtr FindMatchingDisconnectedDpy(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo, + NVDPLibDevicePtr pDpLibDevice) +{ + NVDpyEvoPtr pDpyEvo; + + // A match is simply that the display appears on the same connector. + // DP MST devices are matched by topology address in nvGetDPMSTDpy. + const NVDpyIdList dpyIdList = + nvAddDpyIdToEmptyDpyIdList(pConnectorEvo->displayId); + + FOR_ALL_EVO_DPYS(pDpyEvo, dpyIdList, pDispEvo) { + if (!pDpyEvo->dp.pDpLibDevice || !pDpyEvo->dp.pDpLibDevice->isPlugged) { + return pDpyEvo; + } + } + return NULL; +} + +const char *nvDPGetDeviceGUIDStr(DisplayPort::Device *device) +{ + DisplayPort::GUID guid; + + if (!device) { + return NULL; + } + + guid = device->getGUID(); + if (!guid.isGuidZero()) { + static DisplayPort::GUID::StringBuffer sb; + guid.toString(sb); + return sb; + } + + return NULL; +} + +bool nvDPGetDeviceGUID(DisplayPort::Device *device, + NvU8 guidData[DPCD_GUID_SIZE]) +{ + DisplayPort::GUID guid; + + if (!device) { + return false; + } + + guid = device->getGUID(); + if (guid.isGuidZero()) { + return false; + } + + nvkms_memcpy((void*)guidData, (void*)guid.data, sizeof(guid.data)); + + return true; +} + + +static const char *DPGetDevicePortStr(DisplayPort::Device *device, + bool skipLeadingZero) +{ + DisplayPort::Address addr; + + if (!device) { + return NULL; + } + + addr = device->getTopologyAddress(); + if (addr.size() > 0) { + static DisplayPort::Address::StringBuffer sb; + addr.toString(sb, skipLeadingZero); + return sb; + } + + return NULL; +} + + +static void nvDPPrintDeviceInfo(NVConnectorEvoPtr pConnectorEvo, + DisplayPort::Device *device) +{ +#if defined(DEBUG) + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + const char *connectorType; + unsigned major, minor; + const char *tmp; + + device->getDpcdRevision(&major, &minor); + + switch (device->getConnectorType()) { + case DisplayPort::connectorDisplayPort: + connectorType = "DisplayPort"; + break; + + case DisplayPort::connectorHDMI: + connectorType = "HDMI"; + break; + + case DisplayPort::connectorDVI: + connectorType = "DVI"; + break; + + case DisplayPort::connectorVGA: + connectorType = "VGA"; + break; + + default: + connectorType = "unknown"; + break; + } + + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + "%s-%d: new DisplayPort %d.%d device detected", + NvKmsConnectorTypeString(pConnectorEvo->type), + pConnectorEvo->typeIndex, major, minor); + tmp = DPGetDevicePortStr(device, false /* skipLeadingZero */); + if (tmp) { + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + " Address: %s", tmp); + } + tmp = nvDPGetDeviceGUIDStr(device); + if (tmp) { + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + " GUID: {%s}", tmp); + } + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + " Connector: %s", connectorType); + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + " Video: %s", device->isVideoSink() ? "yes" : "no"); + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + " Audio: %s", device->isAudioSink() ? "yes" : "no"); +#endif +} + +static void nvDPAddDeviceToActiveGroup(NVDpyEvoPtr pDpyEvo) +{ + const NVDPLibConnectorRec *pDpLibConnector = + pDpyEvo->pConnectorEvo->pDpLibConnector; + const NVDevEvoRec *pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + NvU32 head; + + // If the device is being driven by the firmware group, then we're just + // tracking it so that it can be shut down by the modeset path, and we + // don't have any timing information for it. + + if (pDpLibConnector->headInFirmware) { + return; + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + if (nvDpyIdIsInDpyIdList(pDpyEvo->id, + pDpLibConnector->dpyIdList[head])) { + pDpLibConnector->pGroup[head]->insert( + pDpyEvo->dp.pDpLibDevice->device); + break; + } + } +} + +static bool DpyHasVRREDID(NVDpyEvoPtr pDpyEvo) +{ + return pDpyEvo->parsedEdid.valid && + pDpyEvo->parsedEdid.info.nvdaVsdbInfo.valid && + // As of this writing, only version 1 is defined. + pDpyEvo->parsedEdid.info.nvdaVsdbInfo.vsdbVersion == 1 && + pDpyEvo->parsedEdid.info.nvdaVsdbInfo.vrrData.v1.supportsVrr; +} + +static void EnableVRR(NVDpyEvoPtr pDpyEvo) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + DisplayPort::Device *device = pDpyEvo->dp.pDpLibDevice->device; + const NvBool conceal = nvkms_conceal_vrr_caps(); + const NvBool dispSupportsVrr = nvDispSupportsVrr(pDispEvo) && !conceal; + + // If the dpy is a laptop internal panel and an SBIOS cookie indicates that + // it supports VRR, override its enable flag and timeout. Note that in the + // internal panel scenario, the EDID may not claim VRR support, so honor + // hasPlatformCookie even if DpyHasVRREDID() reports FALSE. + if (!conceal && (pDpyEvo->internal && pDispEvo->vrr.hasPlatformCookie)) { + pDpyEvo->vrr.type = NVKMS_DPY_VRR_TYPE_GSYNC; + return; + } + + // If the DP library already has the monitor VRR-enabled, then we don't need to + // do it again, but we should still update the minimum refresh rate from the + // EDID if one is available. + const bool alreadyEnabled = device->isVrrMonitorEnabled() && + device->isVrrDriverEnabled(); + + if (DpyHasVRREDID(pDpyEvo) && !alreadyEnabled) { + // Perform VRR enablement whenever the monitor supports VRR, but only + // record it as actually enabled if the rest of the system supports VRR. + // Other state such as the availability of NV_CTRL_GSYNC_ALLOWED is + // keyed off of the presence of a dpy with vrr.type != + // NVKMS_DPY_VRR_TYPE_NONE. + if (device->startVrrEnablement() && dispSupportsVrr) { + pDpyEvo->vrr.type = NVKMS_DPY_VRR_TYPE_GSYNC; + } else { + pDpyEvo->vrr.type = NVKMS_DPY_VRR_TYPE_NONE; + } + + if ((pDpyEvo->vrr.type == NVKMS_DPY_VRR_TYPE_NONE) && dispSupportsVrr) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "%s: Failed to initialize G-SYNC", + pDpyEvo->name); + } + } else if (pDispEvo->pDevEvo->caps.supportsDP13 && + device->getIgnoreMSACap()) { + // DP monitors indicate Adaptive-Sync support through the + // MSA_TIMING_PAR_IGNORED bit in the DOWN_STREAM_PORT_COUNT register + // (DP spec 1.4a section 2.2.4.1.1) + if (dispSupportsVrr) { + if (nvDpyIsAdaptiveSyncDefaultlisted(pDpyEvo)) { + pDpyEvo->vrr.type = + NVKMS_DPY_VRR_TYPE_ADAPTIVE_SYNC_DEFAULTLISTED; + } else { + pDpyEvo->vrr.type = + NVKMS_DPY_VRR_TYPE_ADAPTIVE_SYNC_NON_DEFAULTLISTED; + } + } else { + pDpyEvo->vrr.type = NVKMS_DPY_VRR_TYPE_NONE; + } + } else { + // Assign pDpyEvo->vrr.type independent of DpyHasVRREDID(), so that if + // the monitor is successfully reenabled by the DP library before it + // calls notifyZombieStateChange(), it'll pick up the correct state. If + // reenablement succeeds, the monitor supports VRR even if we haven't + // read an EDID that says it does yet. + if (alreadyEnabled && dispSupportsVrr) { + pDpyEvo->vrr.type = NVKMS_DPY_VRR_TYPE_GSYNC; + } else { + pDpyEvo->vrr.type = NVKMS_DPY_VRR_TYPE_NONE; + } + } +} + +// when we get this event, the DP lib has done link training and the +// EDID has been read (by the DP lib) +void ConnectorEventSink::newDevice(DisplayPort::Device *device) +{ + NVDPLibDevicePtr pDpLibDevice = NULL; + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + NVDpyEvoPtr pDpyEvo = NULL; + NvBool dynamicDpyCreated = FALSE; + + // XXX [VM DP MST] Current POR requires we also check/handle: + // - More than 64 DP dpys on a connector = print error. + // - More than 127 dpys on a system = print error. + + nvDPPrintDeviceInfo(pConnectorEvo, device); + + // Only add video sink devices. + if (!device->isVideoSink()) { + return; + } + + // Protect against redundant newDevices() + pDpyEvo = FindDpyByDevice(pConnectorEvo, device); + if (pDpyEvo) { + nvAssert(!"Got (redundant) DP Lib newDevice() on known display, " + "ignoring."); + return; + } + + pDpLibDevice = (NVDPLibDevicePtr)nvCalloc(1, sizeof(*pDpLibDevice)); + if (!pDpLibDevice) { + goto fail; + } + + nvAssert(!device->getOwningGroup()); + + // XXX For DP MST, we'll want to handle dynamic display IDs. For now, + // use the connector's display ID. + pDpLibDevice->device = device; + + if (device->isMultistream()) { + // Get a dynamic pDpy for this device based on its bus topology path. + // This will create one if it doesn't exist. + pDpyEvo = nvGetDPMSTDpyEvo( + pConnectorEvo, + DPGetDevicePortStr(device, true /* skipLeadingZero */), + &dynamicDpyCreated); + + } else { + // Look for a (previously) disconnected pDpy that matches this device. + pDpyEvo = FindMatchingDisconnectedDpy(pDispEvo, pConnectorEvo, + pDpLibDevice); + } + + if (!pDpyEvo) { + goto fail; + } + + nvAssert(pDpyEvo->pConnectorEvo == pConnectorEvo); + + // At this point, the pDpy should no longer be tracking a DP lib device. + if (pDpyEvo->dp.pDpLibDevice) { + nvAssert(!"DP Lib should have already called lostDevice() for this DP " + "device"); + + // Call lost device ourselves, if the DP lib calls this again later, + // we'll ignore it then. + lostDevice(pDpyEvo->dp.pDpLibDevice->device); + } + + nvAssert(device->isPlugged()); + + pDpLibDevice->isPlugged = TRUE; + pDpyEvo->dp.pDpLibDevice = pDpLibDevice; + + // If there's an active group that this pDpy is supposed to be a member of, + // insert it now. + nvDPAddDeviceToActiveGroup(pDpyEvo); + + if (dynamicDpyCreated) { + nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED); + } + + nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DPY_CHANGED); + + return; + + fail: + nvAssert(pDpyEvo == NULL); + nvFree(pDpLibDevice); +} + +void ConnectorEventSink::lostDevice(DisplayPort::Device *device) +{ + NVDpyEvoPtr pDpyEvo; + + // Ignore non-video sink devices. + if (!device->isVideoSink()) { + return; + } + + pDpyEvo = FindDpyByDevice(pConnectorEvo, device); + if (!pDpyEvo) { + nvAssert(!"Got DP Lib lostDevice() on unknown display."); + return; + } + + NVDPLibDevicePtr pDpLibDevice = pDpyEvo->dp.pDpLibDevice; + nvAssert(pDpLibDevice != NULL); + + if (pDpyEvo->vrr.type != NVKMS_DPY_VRR_TYPE_NONE) { + device->resetVrrEnablement(); + pDpyEvo->vrr.type = NVKMS_DPY_VRR_TYPE_NONE; + } + + if (device->getOwningGroup()) { + device->getOwningGroup()->remove(device); + } + + if (pDpLibDevice->isPlugged) { + pDpLibDevice->isPlugged = FALSE; + nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DPY_CHANGED); + } + + if (device->isMultistream()) { + nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED); + } + + pDpyEvo->dp.pDpLibDevice = NULL; + nvFree(pDpLibDevice); +} + +void ConnectorEventSink::notifyMustDisconnect(DisplayPort::Group *grp) +{ +} + +// notifyDetectComplete() is called when DP Library has done a full detect on +// the topology. There is no one-to-one relationship between a long pulse to +// a detectCompleted. +void ConnectorEventSink::notifyDetectComplete() +{ + pConnectorEvo->detectComplete = TRUE; + + // XXX[DP MST] potentially use this call to notify NV-CONTROL of topology + // change; + + // issue: not as current as new/lostDevice and may pose sync issues, but + // less chatty. +} + +void ConnectorEventSink::bandwidthChangeNotification(DisplayPort::Device *dev, + bool isComplianceMode) +{ + nvDPLibUpdateDpyLinkConfiguration(FindDpyByDevice(pConnectorEvo, dev)); +} + +void ConnectorEventSink::notifyZombieStateChange(DisplayPort::Device *dev, + bool zombied) +{ + NVDpyEvoPtr pDpyEvo = FindDpyByDevice(pConnectorEvo, dev); + NvBool sendEvent = FALSE; + + if (pDpyEvo == NULL) { + return; + } + + NVDPLibDevicePtr pDpLibDevice = pDpyEvo->dp.pDpLibDevice; + if (zombied) { + dev->getOwningGroup()->remove(dev); + + if (pDpLibDevice->isPlugged && !dev->isPlugged()) { + pDpLibDevice->isPlugged = FALSE; + sendEvent = TRUE; + } + + // Don't reset VRR enablement here. Though normally NVKMS initiates VRR + // enablement, the DP library needs to initiate VRR re-enablement of a + // zombie device itself before performing link training or else the + // monitor might remain blank if a VRR stream is active when it's + // plugged back in. + } else { + if (!pDpLibDevice->isPlugged && dev->isPlugged()) { + pDpLibDevice->isPlugged = TRUE; + sendEvent = TRUE; + } + + // Determine whether the DP library reenabled VRR on this display. + EnableVRR(pDpyEvo); + + nvDPAddDeviceToActiveGroup(pDpyEvo); + } + + if (sendEvent) { + nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DPY_CHANGED); + } +} + +void ConnectorEventSink::notifyCableOkStateChange(DisplayPort::Device *dev, + bool cableOk) +{ +} + +void ConnectorEventSink::notifyHDCPCapDone(DisplayPort::Device *dev, + bool hdcpCap) +{ +} + +void ConnectorEventSink::notifyMCCSEvent(DisplayPort::Device *dev) +{ +} + +}; // namespace nvkmsDisplayPort + +// The functions below are exported to the rest of nvkms. Declare them outside +// of the 'nvkmsDisplayPort' namespace. Their prototypes in +// nvdp-connector-event-sink.h are declared as extern "C". + +NvBool nvDPLibDpyIsConnected(NVDpyEvoPtr pDpyEvo) +{ + nvAssert(nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)); + + return ((pDpyEvo->dp.pDpLibDevice != NULL) && + pDpyEvo->dp.pDpLibDevice->isPlugged); +} + +NvBool nvDPLibDpyIsYuv420ModeSupported(const NVDpyEvoRec *pDpyEvo) +{ + DisplayPort::Device *dev = (pDpyEvo->dp.pDpLibDevice != NULL) ? + pDpyEvo->dp.pDpLibDevice->device : NULL; + return (dev != NULL) && (dev->getSDPExtnForColorimetrySupported()); +} + +// Adaptive-Sync is enabled/disabled by setting the MSA_TIMING_PAR_IGNORE_EN +// bit in the DOWNSPREAD_CTRL register (DP spec 1.4a appendix K) +void nvDPLibSetAdaptiveSync(const NVDispEvoRec *pDispEvo, NvU32 head, + NvBool enable) +{ + const NVConnectorEvoRec *pConnectorEvo = + pDispEvo->headState[head].pConnectorEvo; + NVDPLibConnectorPtr pDpLibConnector = pConnectorEvo->pDpLibConnector; + DisplayPort::Group *pGroup = pDpLibConnector->pGroup[head]; + DisplayPort::Device *dev; + + for (dev = pGroup->enumDevices(0); dev != NULL; + dev = pGroup->enumDevices(dev)) { + dev->setIgnoreMSAEnable(enable); + } +} + +// Read the link configuration from the connector and stores it in the pDpy so +// it can be sent to clients via NV-CONTROL. Also generate events if the values +// change. +void nvDPLibUpdateDpyLinkConfiguration(NVDpyEvoPtr pDpyEvo) +{ + if (!pDpyEvo) { + return; + } + + NVDPLibDevicePtr pDpLibDevice = pDpyEvo->dp.pDpLibDevice; + DisplayPort::Device *dev = pDpLibDevice ? pDpLibDevice->device : NULL; + DisplayPort::Connector *connector = + pDpyEvo->pConnectorEvo->pDpLibConnector->connector; + unsigned laneCount; + NvU64 linkRate; + NvU64 linkRate10MHz; + enum NvKmsDpyAttributeDisplayportConnectorTypeValue connectorType; + NvBool sinkIsAudioCapable; + + if (!dev || !pDpLibDevice->isPlugged) { + linkRate = 0; + linkRate10MHz = 0; + laneCount = NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_1; + connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_UNKNOWN; + sinkIsAudioCapable = FALSE; + } else { + // XXX[AGP]: Can the path down to a single device have a different link + // configuration from the connector itself? + connector->getCurrentLinkConfig(laneCount, linkRate); + + // The DisplayPort library multiplies the link rate enum value by + // 27000000. Convert back to NV-CONTROL's defines. + linkRate /= 27000000; + linkRate10MHz = linkRate * 27; + + nvkmsDisplayPort::EnableVRR(pDpyEvo); + + switch (pDpLibDevice->device->getConnectorType()) { + case DisplayPort::connectorDisplayPort: + connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_DISPLAYPORT; + break; + case DisplayPort::connectorHDMI: + connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_HDMI; + break; + case DisplayPort::connectorDVI: + connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_DVI; + break; + case DisplayPort::connectorVGA: + connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_VGA; + break; + default: + connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_UNKNOWN; + break; + } + + sinkIsAudioCapable = pDpLibDevice->device->isAudioSink(); + } + + // The DisplayPort library reports a disabled link as 0 lanes. NV-CONTROL, + // for historical reasons, uses a setting of "1 lane @ disabled" for a + // disabled link, so translate to that. + if (laneCount == 0) { + linkRate = 0; + laneCount = NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_1; + } + + // Update pDpy and send events if anything changed. + if (laneCount != pDpyEvo->dp.laneCount) { + pDpyEvo->dp.laneCount = laneCount; + nvSendDpyAttributeChangedEventEvo(pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE, + nvRMLaneCountToNvKms(laneCount)); + } + + if (linkRate != pDpyEvo->dp.linkRate) { + pDpyEvo->dp.linkRate = linkRate; + nvSendDpyAttributeChangedEventEvo(pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_LINK_RATE, + linkRate); + } + + if (linkRate10MHz != pDpyEvo->dp.linkRate10MHz) { + pDpyEvo->dp.linkRate10MHz = linkRate10MHz; + nvSendDpyAttributeChangedEventEvo(pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_LINK_RATE_10MHZ, + linkRate10MHz); + } + + if (connectorType != pDpyEvo->dp.connectorType) { + pDpyEvo->dp.connectorType = connectorType; + nvSendDpyAttributeChangedEventEvo(pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE, + connectorType); + } + + if (sinkIsAudioCapable != pDpyEvo->dp.sinkIsAudioCapable) { + pDpyEvo->dp.sinkIsAudioCapable = sinkIsAudioCapable; + nvSendDpyAttributeChangedEventEvo(pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_SINK_IS_AUDIO_CAPABLE, + sinkIsAudioCapable); + } +} diff --git a/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.hpp b/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.hpp new file mode 100644 index 0000000..7a0c6b8 --- /dev/null +++ b/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.hpp @@ -0,0 +1,103 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVDP_CONNECTOR_EVENT_SINK_HPP__ +#define __NVDP_CONNECTOR_EVENT_SINK_HPP__ + +#include +#include + +#include "nvdp-evo-interface.hpp" + + +namespace nvkmsDisplayPort +{ + +class ConnectorEventSink : public DisplayPort::Object, + public DisplayPort::Connector::EventSink +{ +private: + const NVConnectorEvoPtr pConnectorEvo; + +public: + ConnectorEventSink(NVConnectorEvoPtr pConnectorEvo); + + // From DisplayPort::Connector::EventSink + virtual void newDevice(DisplayPort::Device *dev); + virtual void lostDevice(DisplayPort::Device *dev); + virtual void notifyMustDisconnect(DisplayPort::Group *grp); + virtual void notifyDetectComplete(); + virtual void bandwidthChangeNotification(DisplayPort::Device *dev, bool isComplianceMode); + virtual void notifyZombieStateChange(DisplayPort::Device *dev, bool zombied); + virtual void notifyCableOkStateChange(DisplayPort::Device *dev, bool cableOk); + virtual void notifyHDCPCapDone(DisplayPort::Device *dev, bool hdcpCap); + virtual void notifyMCCSEvent(DisplayPort::Device *dev); +}; + +const char *nvDPGetDeviceGUIDStr(DisplayPort::Device *device); +bool nvDPGetDeviceGUID(DisplayPort::Device *device, NvU8 guid[DPCD_GUID_SIZE]); + +}; // namespace nvkmsDisplayPort + +struct _nv_dplibconnector { + DisplayPort::Connector *connector; + nvkmsDisplayPort::EvoInterface *evoInterface; + nvkmsDisplayPort::ConnectorEventSink *evtSink; + DisplayPort::MainLink *mainLink; + DisplayPort::AuxBus *auxBus; + + NvBool isActive; + + // The VBIOS head is actively driving this connector. + bool headInFirmware; + NVConnectorEvoRec *pConnectorEvo; + // Per-head DpLib group, allocated at the time of connector creation: + // In case of multi-streaming, multiple heads can be attached to single + // DP connector driving distinct DP streams. + DisplayPort::Group *pGroup[NVKMS_MAX_HEADS_PER_DISP]; + NVDpyIdList dpyIdList[NVKMS_MAX_HEADS_PER_DISP]; + // Attached heads bitmask + NvU32 headMask; + + // Connection status plugged/unplugged; gets initialized by + // Connector::resume() and gets updated by + // Connector::notifyLongPulse(). + NvBool plugged; + + // Indicates whether the HDMI/DVI half of the connector is active + // If so link is being driven by HDMI/DVI and avoid LT etc on DP + // link of the connector + NvBool linkHandoffEnabled; +}; + +struct _nv_dplibdevice { + DisplayPort::Device *device; + NvBool isPlugged; +}; + +struct __nv_dplibmodesetstate { + NVDpyIdList dpyIdList; + DisplayPort::DpModesetParams modesetParams; +}; + +#endif // __NVDP_CONNECTOR_EVENT_SINK_HPP__ diff --git a/src/nvidia-modeset/src/dp/nvdp-connector.cpp b/src/nvidia-modeset/src/dp/nvdp-connector.cpp new file mode 100644 index 0000000..69eeb95 --- /dev/null +++ b/src/nvidia-modeset/src/dp/nvdp-connector.cpp @@ -0,0 +1,1234 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "dp/nvdp-connector.h" +#include "nvdp-timer.hpp" +#include "nvdp-connector-event-sink.hpp" +#include "dp/nvdp-connector-event-sink.h" +#include "dp/nvdp-timer.h" + +#include "nvkms-evo.h" +#include "nvkms-types.h" +#include "nvkms-modeset.h" +#include "nvkms-utils.h" +#include "nvkms-rmapi.h" +#include "nvkms-prealloc.h" + +#include + +// Loop over all display devices attached to a connector. +// Connector::enumDevices(NULL) returns the first device, and then +// enumDevices(previous) returns each subsequent device. +#define for_each_device(connector, dev) \ + for (DisplayPort::Device *(dev) = NULL; ((dev) = (connector)->enumDevices(dev)); ) + +NVDPLibConnectorPtr nvDPCreateConnector(NVConnectorEvoPtr pConnectorEvo) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + DisplayPort::Timer *pTimer = &pDevEvo->dpTimer->timer; + NVDPLibConnectorPtr pNVDpLibConnector = + (NVDPLibConnectorPtr) nvCalloc(1, sizeof(*pNVDpLibConnector)); + + if (!pNVDpLibConnector) { + return NULL; + } + + pNVDpLibConnector->pConnectorEvo = pConnectorEvo; + + // Create the EVO interface object. + pNVDpLibConnector->evoInterface = + new nvkmsDisplayPort::EvoInterface(pConnectorEvo); + if (!pNVDpLibConnector->evoInterface) { + goto fail; + } + + // Create the event sink object. + pNVDpLibConnector->evtSink = + new nvkmsDisplayPort::ConnectorEventSink(pConnectorEvo); + if (!pNVDpLibConnector->evtSink) { + goto fail; + } + + // Create the MainLink object. + pNVDpLibConnector->mainLink = + DisplayPort::MakeEvoMainLink(pNVDpLibConnector->evoInterface, pTimer); + if (!pNVDpLibConnector->mainLink) { + goto fail; + } + + // Create the AuxBus object. + pNVDpLibConnector->auxBus = + DisplayPort::MakeEvoAuxBus(pNVDpLibConnector->evoInterface, pTimer); + if (!pNVDpLibConnector->auxBus) { + goto fail; + } + + pNVDpLibConnector->connector = + DisplayPort::createConnector(pNVDpLibConnector->mainLink, + pNVDpLibConnector->auxBus, + pTimer, + pNVDpLibConnector->evtSink); + if (!pNVDpLibConnector->connector) { + goto fail; + } + + pNVDpLibConnector->connector->setPolicyAssessLinkSafely(TRUE); + + return pNVDpLibConnector; + + fail: + nvDPDestroyConnector(pNVDpLibConnector); + return NULL; +} + +void nvDPNotifyLongPulse(NVConnectorEvoPtr pConnectorEvo, + NvBool connected) +{ + NVDPLibConnectorPtr pNVDpLibConnector = pConnectorEvo->pDpLibConnector; + DisplayPort::Connector *c = pNVDpLibConnector->connector; + + pNVDpLibConnector->plugged = connected; + + if (!pNVDpLibConnector->linkHandoffEnabled && + connected && !nvAssignSOREvo(pConnectorEvo, + nvDpyIdToNvU32(pConnectorEvo->displayId), + FALSE /* b2Heads1Or */, + 0 /* sorExcludeMask */)) { + // DPLib takes care of skipping LT on unassigned SOR Display. + } + + c->notifyLongPulse(connected); + +} + +void nvDPNotifyShortPulse(NVDPLibConnectorPtr pNVDpLibConnector) +{ + DisplayPort::Connector *c = pNVDpLibConnector->connector; + + c->notifyShortPulse(); +} + +void nvDPDestroyConnector(NVDPLibConnectorPtr pNVDpLibConnector) +{ + if (!pNVDpLibConnector) return; + + if (pNVDpLibConnector->connector) { + pNVDpLibConnector->connector->destroy(); + } + if (pNVDpLibConnector->auxBus) { + delete pNVDpLibConnector->auxBus; + } + if (pNVDpLibConnector->mainLink) { + delete pNVDpLibConnector->mainLink; + } + if (pNVDpLibConnector->evoInterface) { + delete pNVDpLibConnector->evoInterface; + } + if (pNVDpLibConnector->evtSink) { + delete pNVDpLibConnector->evtSink; + } + + nvFree(pNVDpLibConnector); +} + +NvBool nvDPIsLinkAwaitingTransition(NVConnectorEvoPtr pConnectorEvo) +{ + if (nvConnectorUsesDPLib(pConnectorEvo)) { + DisplayPort::Connector *c = pConnectorEvo->pDpLibConnector->connector; + return c->isLinkAwaitingTransition(); + } + + return FALSE; +} + +/*! + * Create a new DisplayPort group and populate it with the devices specified by + * dpyIdList. For MST groups, this allocates a dynamic RM display ID. + * Otherwise, it uses the connector's display ID. + */ +static DisplayPort::Group* CreateGroup( + const NVDPLibConnectorRec *pDpLibConnector, + const NVDpyIdList dpyIdList) +{ + NVDpyEvoPtr pDpyEvo; + DisplayPort::Group *pGroup = NULL; + + pGroup = pDpLibConnector->connector->newGroup(); + if (pGroup == NULL) { + return NULL; + } + + // Populate the group + FOR_ALL_EVO_DPYS(pDpyEvo, + dpyIdList, pDpLibConnector->pConnectorEvo->pDispEvo) { + if (pDpyEvo->dp.pDpLibDevice) { + pGroup->insert(pDpyEvo->dp.pDpLibDevice->device); + } + } + + return pGroup; +} + +static NvU32 GetColorDepth( + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace, + const enum NvKmsDpyAttributeColorBpcValue colorBpc) +{ + switch (colorSpace) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420: + /* + * In YUV420, HW is programmed with RGB color space and full color + * range. The color space conversion and color range compression + * happen in a headSurface composite shader. + */ + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + /* + * For RGB/YCbCr444, each pixel is always 3 components. For + * YCbCr/YUV420, we currently always scan out from the headSurface + * as RGB. + */ + return colorBpc * 3; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + return colorBpc * 2; + } + + return 0; +} + +static void SetDPMSATiming(const NVDispEvoRec *pDispEvo, + const NvU32 displayId, + NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS *msaParams, + const NVHwModeTimingsEvo *pTimings) +{ + NV0073_CTRL_DP_MSA_PROPERTIES_MASK *featureMask = &msaParams->featureMask; + NV0073_CTRL_DP_MSA_PROPERTIES_VALUES *featureValues = + &msaParams->featureValues; + + nvkms_memset(msaParams, 0, sizeof(*msaParams)); + + /* + * Fill in displayId and subDeviceInstance unconditionally. + * From CL#27980662, dplib started passing the client provided displayId + * to RM for setting MSA properties. + * Default value of displayId is 0, leading to RMControl failure in + * the displayport library. + */ + msaParams->subDeviceInstance = pDispEvo->displayOwner; + msaParams->displayId = displayId; + + if ((displayId == 0x0) || + ((pTimings->yuv420Mode != NV_YUV420_MODE_SW) && + !nvIsAdaptiveSyncDpyVrrType(pTimings->vrr.type))) { + return; + } + + msaParams->bEnableMSA = 1; + msaParams->bCacheMsaOverrideForNextModeset = 1; + + if (pTimings->yuv420Mode == NV_YUV420_MODE_SW) { + featureMask->bRasterTotalHorizontal = true; + featureMask->bActiveStartHorizontal = true; + featureMask->bSurfaceTotalHorizontal = true; + featureMask->bSyncWidthHorizontal = true; + featureValues->rasterTotalHorizontal = 2 * pTimings->rasterSize.x; + featureValues->activeStartHorizontal = 2 * (pTimings->rasterBlankEnd.x + 1); + featureValues->surfaceTotalHorizontal = 2 * nvEvoVisibleWidth(pTimings); + featureValues->syncWidthHorizontal = 2 * (pTimings->rasterSyncEnd.x + 1); + } + + /* + * In case of Adaptive-Sync VRR, override VTotal field of MSA (Main Stream + * Attributes) to workaround bug 4164132. + */ + if (nvIsAdaptiveSyncDpyVrrType(pTimings->vrr.type)) { + featureMask->bRasterTotalVertical = true; + featureValues->rasterTotalVertical = pTimings->rasterSize.y; + } +} + +static void InitDpModesetParams( + const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvU32 displayId, + const NVHwModeTimingsEvo *pTimings, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace, + const enum NvKmsDpyAttributeColorBpcValue colorBpc, + DisplayPort::DpModesetParams *pParams) +{ + pParams->modesetInfo.pixelClockHz = pTimings->pixelClock * 1000; + pParams->modesetInfo.rasterWidth = pTimings->rasterSize.x; + pParams->modesetInfo.rasterHeight = pTimings->rasterSize.y; + pParams->modesetInfo.rasterBlankStartX = pTimings->rasterBlankStart.x; + pParams->modesetInfo.rasterBlankEndX = pTimings->rasterBlankEnd.x; + pParams->modesetInfo.surfaceWidth = nvEvoVisibleWidth(pTimings); + pParams->modesetInfo.surfaceHeight = nvEvoVisibleHeight(pTimings); + + pParams->modesetInfo.depth = + GetColorDepth(colorSpace, colorBpc); + pParams->modesetInfo.bitsPerComponent = colorBpc; + + pParams->colorFormat = dpColorFormat_Unknown; + switch (colorSpace) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420: + /* HW YUV420 mode is only supported for HDMI, not DP */ + nvAssert(pTimings->yuv420Mode == NV_YUV420_MODE_SW); + pParams->modesetInfo.pixelClockHz *= 2; + pParams->colorFormat = dpColorFormat_YCbCr420; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + pParams->colorFormat = dpColorFormat_YCbCr444; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + pParams->colorFormat = dpColorFormat_YCbCr422; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + pParams->colorFormat = dpColorFormat_RGB; + break; + } + + pParams->headIndex = head; + + SetDPMSATiming(pDispEvo, displayId, &pParams->msaparams, pTimings); +} + +NVDPLibModesetStatePtr nvDPLibCreateModesetState( + const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvU32 displayId, + const NVDpyIdList dpyIdList, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace, + const enum NvKmsDpyAttributeColorBpcValue colorBpc, + const NVHwModeTimingsEvo *pTimings, + const NVDscInfoEvoRec *pDscInfo) +{ + bool found = false; + const NVDPLibConnectorRec *pDpLibConnector = NULL; + const NVDpyEvoRec *pDpyEvo; + NVDPLibModesetStatePtr pDpLibModesetState = NULL; + + FOR_ALL_EVO_DPYS(pDpyEvo, dpyIdList, pDispEvo) { + if (!found) { + pDpLibConnector = pDpyEvo->pConnectorEvo->pDpLibConnector; + found = true; + } else if (pDpLibConnector != pDpyEvo->pConnectorEvo->pDpLibConnector) { + /* All Dpys must belongs to same DP connector */ + return NULL; + } + } + + /* Do nothing if any of the display is not DP */ + if (pDpLibConnector == NULL) { + return NULL; + } + + pDpLibModesetState = + (NVDPLibModesetStatePtr) nvCalloc(1, sizeof(*pDpLibModesetState)); + if (pDpLibModesetState == NULL) { + return NULL; + } + + InitDpModesetParams(pDispEvo, + head, + displayId, + pTimings, + colorSpace, + colorBpc, + &pDpLibModesetState->modesetParams); + if (pDscInfo->type == NV_DSC_INFO_EVO_TYPE_DP) { + pDpLibModesetState->modesetParams.modesetInfo.bEnableDsc = true; + + /* + * If DSC is enabled then override normal pixel depth with + * target bpp rate of DSC encoder, the rate at which it is going to + * output compressed stream. + */ + pDpLibModesetState->modesetParams.modesetInfo.depth = + pDscInfo->dp.bitsPerPixelX16; + + switch (pDscInfo->dp.dscMode) { + case NV_DSC_EVO_MODE_SINGLE: + pDpLibModesetState->modesetParams.modesetInfo.mode = + DSC_SINGLE; + break; + case NV_DSC_EVO_MODE_DUAL: + pDpLibModesetState->modesetParams.modesetInfo.mode = + DSC_DUAL; + break; + } + } else { + nvAssert(pDscInfo->type == NV_DSC_INFO_EVO_TYPE_DISABLED); + } + + pDpLibModesetState->dpyIdList = dpyIdList; + + return pDpLibModesetState; +} + +void nvDPLibFreeModesetState(NVDPLibModesetStatePtr pDpLibModesetState) +{ + nvFree(pDpLibModesetState); +} + +static void DestructDpLibIsModesetPossibleParamsOneHead( + const NvU32 head, + DisplayPort::DpLinkIsModePossibleParams *pParams) +{ + nvFree(pParams->head[head].pModesetParams); + + if (pParams->head[head].pDscParams != NULL) { + if (pParams->head[head].pDscParams->forcedParams != NULL) { + nvFree(pParams->head[head].pDscParams->forcedParams); + } + nvFree(pParams->head[head].pDscParams->pDscOutParams); + } + nvFree(pParams->head[head].pDscParams); + + if (pParams->head[head].pTarget != NULL) { + pParams->head[head].pTarget->destroy(); + } + + nvkms_memset(&pParams->head[head], 0, sizeof(pParams->head[head])); +} + +static NvBool ConstructDpLibIsModesetPossibleParamsOneHead( + const NVDPLibConnectorRec *pDpLibConnector, + const NvU32 head, + const NvU32 displayId, + const NVDpyIdList dpyIdList, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace, + const enum NvKmsDpyAttributeColorBpcValue colorBpc, + const struct NvKmsModeValidationParams *pModeValidationParams, + const NVHwModeTimingsEvo *pTimings, + NVDscInfoEvoRec *pDscInfo, + const NvBool b2Heads1Or, + enum NVDpLibIsModePossibleQueryMode queryMode, + DisplayPort::DP_IMP_ERROR *pErrorCode, + DisplayPort::DpLinkIsModePossibleParams *pParams) +{ + const NVConnectorEvoRec *pConnectorEvo = pDpLibConnector->pConnectorEvo; + const NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + const NVDpyEvoRec *pDpyEvo; + + FOR_ALL_EVO_DPYS(pDpyEvo, dpyIdList, pDispEvo) { + if (pDpyEvo->pConnectorEvo->pDpLibConnector != pDpLibConnector) { + goto failed; + } + } + + pParams->head[head].pTarget = CreateGroup(pDpLibConnector, dpyIdList); + if (pParams->head[head].pTarget == NULL) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to create a DisplayPort group"); + goto failed; + } + + pParams->head[head].pDscParams = (DisplayPort::DscParams*) + nvCalloc(1, sizeof(*pParams->head[head].pDscParams)); + if (pParams->head[head].pDscParams == NULL) { + goto failed; + } + + pParams->head[head].pDscParams->forcedParams = (DSC_INFO::FORCED_DSC_PARAMS*) + nvCalloc(1, sizeof(*pParams->head[head].pDscParams->forcedParams)); + if (pParams->head[head].pDscParams->forcedParams == NULL) { + goto failed; + } + + pParams->head[head].pDscParams->pDscOutParams = (DisplayPort::DscOutParams*) + nvCalloc(1, sizeof(*pParams->head[head].pDscParams->pDscOutParams)); + if (pParams->head[head].pDscParams->pDscOutParams == NULL) { + goto failed; + } + + pParams->head[head].pModesetParams = (DisplayPort::DpModesetParams*) + nvCalloc(1, sizeof(*pParams->head[head].pModesetParams)); + if (pParams->head[head].pModesetParams == NULL) { + goto failed; + } + + InitDpModesetParams(pDispEvo, + head, + displayId, + pTimings, + colorSpace, + colorBpc, + pParams->head[head].pModesetParams); + + if (b2Heads1Or) { + pParams->head[head].pModesetParams->modesetInfo.mode = DSC_DUAL; + } + + pParams->head[head].pDscParams->bCheckWithDsc = true; + switch (queryMode) { + case NV_DP_LIB_IS_MODE_POSSIBLE_QUERY_MODE_PRE_IMP: + pParams->head[head].pDscParams->forceDsc = + DisplayPort::DSC_DEFAULT; + switch (pModeValidationParams->dscMode) { + case NVKMS_DSC_MODE_FORCE_ENABLE: + pParams->head[head].pDscParams->forceDsc = + DisplayPort::DSC_FORCE_ENABLE; + break; + case NVKMS_DSC_MODE_FORCE_DISABLE: + nvAssert(!pTimings->dscPassThrough); + pParams->head[head].pDscParams->forceDsc = + DisplayPort::DSC_FORCE_DISABLE; + break; + default: + if (pTimings->dscPassThrough) { + pParams->head[head].pDscParams->forceDsc = + DisplayPort::DSC_FORCE_ENABLE; + } else { + pParams->head[head].pDscParams->forceDsc = + DisplayPort::DSC_DEFAULT; + } + break; + } + + /* + * 2Heads1Or requires either YUV420 or DSC; if b2Heads1Or is + * enabled but YUV420 is not, force DSC. + */ + if (b2Heads1Or && (pTimings->yuv420Mode != NV_YUV420_MODE_HW)) { + if (pModeValidationParams->dscMode == + NVKMS_DSC_MODE_FORCE_DISABLE) { + goto failed; + } + pParams->head[head].pDscParams->forceDsc = + DisplayPort::DSC_FORCE_ENABLE; + } + break; + case NV_DP_LIB_IS_MODE_POSSIBLE_QUERY_MODE_POST_IMP: + if (pDscInfo->type == NV_DSC_INFO_EVO_TYPE_DP) { + pParams->head[head].pDscParams->forceDsc = + DisplayPort::DSC_FORCE_ENABLE; + pParams->head[head].pDscParams->forcedParams->sliceCount = + pDscInfo->sliceCount; + } else { + nvAssert(pDscInfo->type == NV_DSC_INFO_EVO_TYPE_DISABLED); + pParams->head[head].pDscParams->forceDsc = + DisplayPort::DSC_FORCE_DISABLE; + } + break; + } + + if (pTimings->dscPassThrough) { + const NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(dpyIdList, pDispEvo); + const NVT_DISPLAYID_2_0_INFO *pDisplyIdInfo = + &pDpyEvo->parsedEdid.info.ext_displayid20; + const NVT_DISPLAYID_VENDOR_SPECIFIC *pDisplayIdVS = + &pDisplyIdInfo->vendor_specific; + const VESA_VSDB_PARSED_INFO *pVesaVSDB = &pDisplayIdVS->vesaVsdb; + + if (pVesaVSDB->pass_through_integer.pass_through_integer_dsc == 0) { + goto failed; + } + + const NvU32 dscPassThroughBitsPerPixel16 = + (pVesaVSDB->pass_through_integer.pass_through_integer_dsc * 16) + + pVesaVSDB->pass_through_fractional.pass_through_fraction_dsc; + + if ((pModeValidationParams->dscOverrideBitsPerPixelX16 != 0) && + (pModeValidationParams->dscOverrideBitsPerPixelX16 != + dscPassThroughBitsPerPixel16)) { + goto failed; + } + + pParams->head[head].pDscParams->bitsPerPixelX16 = + dscPassThroughBitsPerPixel16; + } else { + pParams->head[head].pDscParams->bitsPerPixelX16 = + pModeValidationParams->dscOverrideBitsPerPixelX16; + } + pParams->head[head].pErrorStatus = pErrorCode; + + return TRUE; + +failed: + DestructDpLibIsModesetPossibleParamsOneHead(head, pParams); + return FALSE; +} + +static NvBool DPLibNeedPostIMPDpIsModePossible( + const NVDevEvoRec *pDevEvo, + const NVDpLibIsModePossibleParamsRec *pParams) +{ + return FALSE; +} + +/* + * Validate the DP link for all specified NVHwModeTimingsEvos + dpyIdLists + heads. + * + * If validation fails, this function returns FALSE and the mask of heads for + * which validation is failed. + * + * If validation succeeds, the DSC fields within the per head mode parameters + * are updated with what is returned by dpLinkIsModePossible(). + */ +NvBool nvDPLibIsModePossible(const NVDPLibConnectorRec *pDpLibConnector, + const NVDpLibIsModePossibleParamsRec *pParams, + NvU32 *pFailedHeadMask) +{ + DisplayPort::DpLinkIsModePossibleParams dpImpParams = { }; + DisplayPort::DP_IMP_ERROR dpErrorCode[NV_MAX_HEADS] = { }; + const NVConnectorEvoRec *pConnectorEvo = pDpLibConnector->pConnectorEvo; + const NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvBool ret = FALSE; + NvU32 head; + + if ((pParams->queryMode == + NV_DP_LIB_IS_MODE_POSSIBLE_QUERY_MODE_POST_IMP) && + !DPLibNeedPostIMPDpIsModePossible(pDevEvo, pParams)) { + return TRUE; + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + if (nvDpyIdListIsEmpty(pParams->head[head].dpyIdList)) { + continue; + } + + if (!ConstructDpLibIsModesetPossibleParamsOneHead( + pDpLibConnector, + head, + pParams->head[head].displayId, + pParams->head[head].dpyIdList, + pParams->head[head].colorSpace, + pParams->head[head].colorBpc, + pParams->head[head].pModeValidationParams, + pParams->head[head].pTimings, + pParams->head[head].pDscInfo, + pParams->head[head].b2Heads1Or, + pParams->queryMode, + &dpErrorCode[head], + &dpImpParams)) { + goto done; + } + } + + ret = pDpLibConnector->connector->dpLinkIsModePossible(dpImpParams); + + for (head = 0; head < pDevEvo->numHeads; head++) { + DisplayPort::DscParams *pDpDscParams = + dpImpParams.head[head].pDscParams; + NVDscInfoEvoRec *pDscInfo = pParams->head[head].pDscInfo; + const NvBool b2Heads1Or = pParams->head[head].b2Heads1Or; +#if defined(DEBUG) + const NVHwModeTimingsEvo *pTimings = pParams->head[head].pTimings; +#endif + + if (nvDpyIdListIsEmpty(pParams->head[head].dpyIdList)) { + continue; + } + + if (ret) { + if (b2Heads1Or) { + /* + * 2Heads1OR requires either YUV420 or DSC; + * dpDscParams.bEnableDsc is assigned by compoundQueryAttach(). + */ + nvAssert(pDpDscParams->bEnableDsc || + (pTimings->yuv420Mode == NV_YUV420_MODE_HW)); + } + + if (pDscInfo != NULL) { + switch (pParams->queryMode) { + case NV_DP_LIB_IS_MODE_POSSIBLE_QUERY_MODE_PRE_IMP: + nvkms_memset(pDscInfo, 0, sizeof(*pDscInfo)); + + if (pDpDscParams->bEnableDsc) { + pDscInfo->type = NV_DSC_INFO_EVO_TYPE_DP; + pDscInfo->possibleSliceCountMask = + pDpDscParams->sliceCountMask; + + pDscInfo->dp.dscMode = b2Heads1Or ? + NV_DSC_EVO_MODE_DUAL : NV_DSC_EVO_MODE_SINGLE; + pDscInfo->dp.bitsPerPixelX16 = + pDpDscParams->bitsPerPixelX16; + ct_assert(sizeof(pDscInfo->dp.pps) == + sizeof(pDpDscParams->pDscOutParams->PPS)); + nvkms_memcpy(pDscInfo->dp.pps, + pDpDscParams->pDscOutParams->PPS, + sizeof(pDscInfo->dp.pps)); + } else { + pDscInfo->type = NV_DSC_INFO_EVO_TYPE_DISABLED; + } + break; + case NV_DP_LIB_IS_MODE_POSSIBLE_QUERY_MODE_POST_IMP: + if (pDpDscParams->bEnableDsc) { + nvAssert(pDscInfo->type == NV_DSC_INFO_EVO_TYPE_DP); + nvAssert(pDscInfo->possibleSliceCountMask != 0x0); + nvAssert(pDscInfo->sliceCount != 0x0); + + pDscInfo->dp.bitsPerPixelX16 = + pDpDscParams->bitsPerPixelX16; + ct_assert(sizeof(pDscInfo->dp.pps) == + sizeof(pDpDscParams->pDscOutParams->PPS)); + nvkms_memcpy(pDscInfo->dp.pps, + pDpDscParams->pDscOutParams->PPS, + sizeof(pDscInfo->dp.pps)); + } else { + nvAssert(pDscInfo->type == + NV_DSC_INFO_EVO_TYPE_DISABLED); + } + break; + } + } + } else if (dpErrorCode[head] != DisplayPort::DP_IMP_ERROR_NONE) { + *pFailedHeadMask |= NVBIT(head); + } + } + + nvAssert(ret || (*pFailedHeadMask != 0x0)); + +done: + for (head = 0; head < pDevEvo->numHeads; head++) { + DestructDpLibIsModesetPossibleParamsOneHead(head, &dpImpParams); + } + + return ret; +} + +NvBool nvDPValidateModeForDpyEvo( + const NVDpyEvoRec *pDpyEvo, + const NVDpyAttributeColor *pDpyColor, + const struct NvKmsModeValidationParams *pModeValidationParams, + const NVHwModeTimingsEvo *pTimings, + const NvBool b2Heads1Or, + NVDscInfoEvoRec *pDscInfo) +{ + const NVConnectorEvoRec *pConnectorEvo = pDpyEvo->pConnectorEvo; + const NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvU32 failedHeadMask = 0x0; + const NvU32 head = 0; + NvBool ret; + + NVDpLibIsModePossibleParamsRec *pParams = (NVDpLibIsModePossibleParamsRec*) + nvPreallocGet(pDevEvo, PREALLOC_TYPE_DPLIB_IS_MODE_POSSIBLE_PARAMS, + sizeof(*pParams)); + nvAssert(pParams != NULL); + + nvkms_memset(pParams, 0, sizeof(*pParams)); + + nvAssert(nvConnectorUsesDPLib(pConnectorEvo)); + + pParams->queryMode = NV_DP_LIB_IS_MODE_POSSIBLE_QUERY_MODE_NONE; + + pParams->head[head].displayId = 0; + pParams->head[head].dpyIdList = nvAddDpyIdToEmptyDpyIdList(pDpyEvo->id); + pParams->head[head].colorSpace = pDpyColor->format; + pParams->head[head].colorBpc = pDpyColor->bpc; + pParams->head[head].pModeValidationParams = pModeValidationParams; + pParams->head[head].pTimings = pTimings; + pParams->head[head].b2Heads1Or = b2Heads1Or; + pParams->head[head].pDscInfo = pDscInfo; + + ret = nvDPLibIsModePossible(pConnectorEvo->pDpLibConnector, pParams, + &failedHeadMask); + + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_DPLIB_IS_MODE_POSSIBLE_PARAMS); + + return ret; +} + +static +void DPAttachBeginOneHead(NVDPLibConnectorPtr pDpLibConnector, + const NvU32 head, + const NVDPLibModesetStateRec *pDpLibModesetState, + DisplayPort::DpPreModesetParams *pPreModesetParams) +{ + const NVConnectorEvoRec *pConnectorEvo = pDpLibConnector->pConnectorEvo; + const NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + const DisplayPort::DpModesetParams *pParams = + &pDpLibModesetState->modesetParams; + const NVDpyEvoRec *pDpyEvo = NULL; + + /* Insert active dpys into group */ + pDpLibConnector->dpyIdList[head] = pDpLibModesetState->dpyIdList; + FOR_ALL_EVO_DPYS(pDpyEvo, pDpLibConnector->dpyIdList[head], pDispEvo) { + if (pDpyEvo->dp.pDpLibDevice) { + pDpLibConnector->pGroup[head]->insert( + pDpyEvo->dp.pDpLibDevice->device); + } + } + + pPreModesetParams->head[head].pTarget = pDpLibConnector->pGroup[head]; + pPreModesetParams->head[head].pModesetParams = pParams; + + pPreModesetParams->headMask |= NVBIT(head); +} + +static void DPAttachEndOneHead(NVDPLibConnectorPtr pDpLibConnector, NvU32 head) +{ + pDpLibConnector->headMask |= NVBIT(head); +} + +static void DPDetachBeginOneHead(NVDPLibConnectorPtr pDpLibConnector, + const NvU32 head, + DisplayPort::DpPreModesetParams *pPreModesetParams) +{ + nvAssert((NVBIT(head) & pDpLibConnector->headMask) != 0x0); + + pPreModesetParams->head[head].pTarget = NULL; + pPreModesetParams->headMask |= NVBIT(head); +} + +static void DPDetachEndOneHead(NVDPLibConnectorPtr pDpLibConnector, const NvU32 head) +{ + if (!pDpLibConnector->headInFirmware) { + const NVConnectorEvoRec *pConnectorEvo = + pDpLibConnector->pConnectorEvo; + const NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + const NVDpyEvoRec *pDpyEvo; + + + /* Empty inactive group */ + FOR_ALL_EVO_DPYS(pDpyEvo, pDpLibConnector->dpyIdList[head], pDispEvo) { + if (pDpyEvo->dp.pDpLibDevice) { + pDpLibConnector->pGroup[head]->remove( + pDpyEvo->dp.pDpLibDevice->device); + } + } + pDpLibConnector->dpyIdList[head] = nvEmptyDpyIdList(); + } else { + nvAssert(pDpLibConnector->pGroup[head]->enumDevices(0) == NULL); + pDpLibConnector->headInFirmware = false; + } + + pDpLibConnector->headMask &= ~NVBIT(head); +} + +/* + * Handles DP stream programming requires to be done before committing MODESET + * update. The function should be called for each of affected(change in + * head-connector attachment) DpLib connectors, before commit. + */ +void nvDPPreSetMode(NVDPLibConnectorPtr pDpLibConnector, + const NVEvoModesetUpdateState *pModesetUpdateState) +{ + const NVConnectorEvoRec *pConnectorEvo = + pDpLibConnector->pConnectorEvo; + DisplayPort::Connector *connector = pDpLibConnector->connector; + NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + const NvU32 oldHeadMask = pDpLibConnector->headMask; + const NvU32 newHeadMask = + nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo); + DisplayPort::DpPreModesetParams preModesetParams = { }; + + for (NvU32 head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + + if ((newHeadMask & NVBIT(head)) != 0x0 && + (oldHeadMask & NVBIT(head)) == 0x0) { + + if (pModesetUpdateState->pDpLibModesetState[head] != NULL) { + DPAttachBeginOneHead(pDpLibConnector, + head, + pModesetUpdateState->pDpLibModesetState[head], + &preModesetParams); + } + } else if ((newHeadMask & NVBIT(head)) == 0x0 && + (oldHeadMask & NVBIT(head)) != 0x0) { + + DPDetachBeginOneHead(pDpLibConnector, head, &preModesetParams); + + } + } + + connector->dpPreModeset(preModesetParams); +} + +/* + * Handles DP stream programming requires to be done before committing MODESET + * update. The function should be called for each of affected(change in + * head-connector attachment) DpLib connectors, before commit. + */ +void nvDPPostSetMode(NVDPLibConnectorPtr pDpLibConnector, + const NVEvoModesetUpdateState *pModesetUpdateState) +{ + const NVConnectorEvoRec *pConnectorEvo = + pDpLibConnector->pConnectorEvo; + DisplayPort::Connector *connector = pDpLibConnector->connector; + const NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + const NvU32 oldHeadMask = pDpLibConnector->headMask; + const NvU32 newHeadMask = + nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo); + + connector->dpPostModeset(); + + for (NvU32 head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + + if ((newHeadMask & NVBIT(head)) != 0x0 && + (oldHeadMask & NVBIT(head)) == 0x0) { + + if (pModesetUpdateState->pDpLibModesetState[head] != NULL) { + DPAttachEndOneHead(pDpLibConnector, head); + } + } else if ((newHeadMask & NVBIT(head)) == 0x0 && + (oldHeadMask & NVBIT(head)) != 0x0) { + + DPDetachEndOneHead(pDpLibConnector, head); + + } + } + + /* + * Update DisplayPort link information for all displays on DpLib connector + */ + if (newHeadMask != oldHeadMask) { + NVDpyEvoPtr pDpyEvo; + + FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->validDisplays, pDispEvo) { + if (pDpyEvo->pConnectorEvo->pDpLibConnector == pDpLibConnector) { + nvDPLibUpdateDpyLinkConfiguration(pDpyEvo); + } + } + } +} + +void nvDPPause(NVDPLibConnectorPtr pNVDpLibConnector) +{ + DisplayPort::Connector *connector = pNVDpLibConnector->connector; + const NVConnectorEvoRec *pConnectorEvo = pNVDpLibConnector->pConnectorEvo; + const NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + if (!pNVDpLibConnector->isActive) { + return; + } + + if (pDevEvo->skipConsoleRestore && pNVDpLibConnector->headMask != 0) { + /* Clear vbios DisplayPort RAD scratch registers, see bug 200471345 */ + + nvAssert(nvPopCount32(pNVDpLibConnector->headMask) == 1); + nvAssert(connector->isDp11ProtocolForced()); + + NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS params = {0}; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + + nvAssert(pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A || + pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B); + + params.dpLink = pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A ? 0 : 1; + params.sorIndex = pConnectorEvo->or.primary; + + NvU32 ret = nvRmApiControl( + nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug( + pDispEvo, + EVO_LOG_ERROR, + "NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG " + "failed, error code 0x%x", + ret); + } + } + + /* Before pausing DpLib, destroy group and clear head bitmask */ + for (NvU32 head = 0; head < ARRAY_LEN(pNVDpLibConnector->pGroup); head++) { + pNVDpLibConnector->pGroup[head]->destroy(); + } + pNVDpLibConnector->headMask = 0x0; + + connector->pause(); + + pNVDpLibConnector->isActive = false; +} + +/*! + * Determine which head, if any, is driving this connector. + */ +static NvU32 GetFirmwareHead(NVConnectorEvoPtr pConnectorEvo) +{ + NvU32 orIndex = pConnectorEvo->or.primary; + NvU32 ret; + + if (orIndex == NV_INVALID_OR || + pConnectorEvo->or.ownerHeadMask[orIndex] == 0) { + return NV_INVALID_HEAD; + } + + ret = BIT_IDX_32(pConnectorEvo->or.ownerHeadMask[orIndex]); + nvAssert(ret < NV_MAX_HEADS); + + return ret; +} + +/*! + * Determine whether an active connector shares an OR with this connector. + */ +static bool IsDDCPartnerActive(NVDPLibConnectorPtr pNVDpLibConnector) +{ + NVConnectorEvoRec *pConnectorEvo = + pNVDpLibConnector->pConnectorEvo; + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + NVConnectorEvoPtr pOtherConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pOtherConnectorEvo, pDispEvo) { + if (pOtherConnectorEvo != pConnectorEvo && + nvIsConnectorActiveEvo(pOtherConnectorEvo) && + nvDpyIdIsInDpyIdList(pOtherConnectorEvo->displayId, + pConnectorEvo->ddcPartnerDpyIdsList)) { + return true; + } + } + + return false; +} + +NvBool nvDPResume(NVDPLibConnectorPtr pNVDpLibConnector, NvBool plugged) +{ + NVConnectorEvoRec *pConnectorEvo = + pNVDpLibConnector->pConnectorEvo; + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + DisplayPort::Connector *c = pNVDpLibConnector->connector; + const unsigned int firmwareHead = GetFirmwareHead(pConnectorEvo); + bool dpyIdIsDynamic = false; + /* By default allow MST */ + bool allowMST = true; + + pNVDpLibConnector->linkHandoffEnabled = + IsDDCPartnerActive(pNVDpLibConnector); + + if (firmwareHead != NV_INVALID_HEAD) { + NVDpyId firmwareDpyId = nvInvalidDpyId(); + + pNVDpLibConnector->headInFirmware = true; + pNVDpLibConnector->headMask = NVBIT(firmwareHead); + + // Use the first displayId in the boot display list. + // + // TODO: What should we do if more than one dpy ID is listed for a boot + // display? + nvAssert(nvCountDpyIdsInDpyIdList(pDispEvo->vbiosDpyConfig[firmwareHead]) == 1); + firmwareDpyId = + nvNextDpyIdInDpyIdListUnsorted(nvInvalidDpyId(), + pDispEvo->vbiosDpyConfig[firmwareHead]); + + dpyIdIsDynamic = !nvDpyIdsAreEqual(firmwareDpyId, + pConnectorEvo->displayId); + + /* Do not allow MST if firmware driving DP connector in SST mode */ + if (!dpyIdIsDynamic) { + allowMST = false; + } + } + + pConnectorEvo->detectComplete = FALSE; + + pNVDpLibConnector->plugged = plugged; + if (plugged && !pNVDpLibConnector->headInFirmware) { + NvBool ret = nvAssignSOREvo(pConnectorEvo, + nvDpyIdToNvU32(pConnectorEvo->displayId), + FALSE /* b2Heads1Or */, + 0 /* sorExcludeMask */); + + nvAssert(ret); + if (!ret) { + // DP lib skips LT for unassigned SOR. + } + } + + c->resume(pNVDpLibConnector->linkHandoffEnabled, + pNVDpLibConnector->headInFirmware, + plugged, + false /* isUefiSystem */, + firmwareHead, + dpyIdIsDynamic /* bFirmwareLinkUseMultistream */, + true /* bDisableVbiosScratchRegisterUpdate, bug 200471345 */, + allowMST); + + for (NvU32 head = 0; head < ARRAY_LEN(pNVDpLibConnector->pGroup); head++) { + pNVDpLibConnector->pGroup[head] = + pNVDpLibConnector->connector->newGroup(); + + if (pNVDpLibConnector->pGroup[head] == NULL) { + for (NvU32 i = 0; i < head; i++) { + pNVDpLibConnector->pGroup[i]->destroy(); + } + goto failed; + } + } + + pNVDpLibConnector->isActive = true; + return TRUE; + +failed: + pNVDpLibConnector->connector->pause(); + return FALSE; +} + +void nvDPSetAllowMultiStreamingOneConnector( + NVDPLibConnectorPtr pDpLibConnector, + NvBool allowMST) +{ + NVConnectorEvoRec *pConnectorEvo = + pDpLibConnector->pConnectorEvo; + + if (pDpLibConnector->connector->getAllowMultiStreaming() == allowMST) { + return; + } + + /* + * If there is change in MST capability and DPlib re-runs device detection + * routine for plugged sink. Reset 'pConnectorEvo->detectComplete' only for + * MST capable sinks, in order to track completion of that fresh detection + * routine. + */ + if (pDpLibConnector->plugged && + pDpLibConnector->connector->getSinkMultiStreamCap()) { + pConnectorEvo->detectComplete = FALSE; + } + pDpLibConnector->connector->setAllowMultiStreaming(allowMST); +} + +static NvBool IsDpSinkMstCapableForceSst(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + void *pData) +{ + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + const NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pApiHeadState->activeDpys, pDispEvo); + const NVConnectorEvoRec *pConnectorEvo = (pDpyEvo != NULL) ? + pDpyEvo->pConnectorEvo : NULL; + + if ((pConnectorEvo == NULL) || + (pConnectorEvo->pDpLibConnector == NULL)) { + return FALSE; + } + + DisplayPort::Connector *c = + pConnectorEvo->pDpLibConnector->connector; + + return (c->getSinkMultiStreamCap() && !c->getAllowMultiStreaming()); +} + +static NvBool IsDpLinkTransitionWaitingForHeadShutDown( + const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + void *pData) +{ + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + const NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pApiHeadState->activeDpys, pDispEvo); + + return (pDpyEvo != NULL) && + nvDPIsLinkAwaitingTransition(pDpyEvo->pConnectorEvo); +} + +void nvDPSetAllowMultiStreaming(NVDevEvoPtr pDevEvo, NvBool allowMST) +{ + NvBool needUpdate = FALSE; + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NVConnectorEvoPtr pConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + NVDPLibConnectorPtr pDpLibConnector = + pConnectorEvo->pDpLibConnector; + if (pDpLibConnector && + pDpLibConnector->connector->getAllowMultiStreaming() + != allowMST) { + needUpdate = TRUE; + } + } + } + + if (!needUpdate) { + return; + } + + nvShutDownApiHeads(pDevEvo, pDevEvo->pNvKmsOpenDev, + IsDpSinkMstCapableForceSst, NULL /* pData */, + TRUE /* doRasterLock */); + + /* + * Heads driving MST capable sinks in force SST mode, are shut down. Now you + * can allow MST on all DisplayPort Connector, safely in compliance + * of DP 1.2 specification. + * + * The section 5.4 and table 2-75 (of section 2.9.3.1) of DisplayPort 1.2 + * specification, does not allow to enable/disable MST mode of sink while + * transmitting active stream (see description of CL#25551338). + */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NVConnectorEvoPtr pConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (!pConnectorEvo->pDpLibConnector) { + continue; + } + nvDPSetAllowMultiStreamingOneConnector( + pConnectorEvo->pDpLibConnector, + allowMST); + } + } + + /* Shut down all DisplayPort heads that need to transition to/from SST. */ + nvShutDownApiHeads(pDevEvo, pDevEvo->pNvKmsOpenDev, + IsDpLinkTransitionWaitingForHeadShutDown, + NULL /* pData */, + TRUE /* doRasterLock */); + + /* + * Handle any pending timers the DP library scheduled to notify us + * about changes in the connected device list. + */ + nvDPFireExpiredTimers(pDevEvo); +} + +enum NVDpLinkMode nvDPGetActiveLinkMode(NVDPLibConnectorPtr pDpLibConnector) +{ + DisplayPort::LinkConfiguration linkConfig = + pDpLibConnector->connector->getActiveLinkConfig(); + if (linkConfig.lanes == 0) { + return NV_DP_LINK_MODE_OFF; + } + return linkConfig.multistream ? NV_DP_LINK_MODE_MST : + NV_DP_LINK_MODE_SST; +} + +void nvDPSetLinkHandoff(NVDPLibConnectorPtr pDpLibConnector, NvBool enable) +{ + if (enable) { + pDpLibConnector->connector->enableLinkHandsOff(); + pDpLibConnector->linkHandoffEnabled = TRUE; + } else { + pDpLibConnector->linkHandoffEnabled = FALSE; + pDpLibConnector->connector->releaseLinkHandsOff(); + } +} diff --git a/src/nvidia-modeset/src/dp/nvdp-device.cpp b/src/nvidia-modeset/src/dp/nvdp-device.cpp new file mode 100644 index 0000000..f901504 --- /dev/null +++ b/src/nvidia-modeset/src/dp/nvdp-device.cpp @@ -0,0 +1,169 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "dp/nvdp-device.h" +#include "nvdp-connector-event-sink.hpp" +#include "dp/nvdp-connector-event-sink.h" + +#include "nvkms-types.h" +#include "nvkms-rm.h" +#include "nvkms-dpy.h" + +#include "nvctassert.h" + +void nvDPDeviceSetPowerState(NVDpyEvoPtr pDpyEvo, NvBool on) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + if (!pDpyEvo->dp.pDpLibDevice) { + return; + } + + nvAssert(nvDpyUsesDPLib(pDpyEvo)); + + DisplayPort::Device *device = pDpyEvo->dp.pDpLibDevice->device; + + nvRMSyncEvoChannel(pDevEvo, pDevEvo->core, __LINE__); + device->setPanelPowerParams(on, on); + + /* + * WAR: Some monitors clear the MSA_TIMING_PAR_IGNORE_EN bit in the + * DOWNSPREAD_CTRL DPCD register after changing power state, which will + * cause the monitor to fail to restore the image after powering back on + * while VRR flipping. To work around this, re-enable Adaptive-Sync + * immediately after powering on. (Bug 200488547) + */ + if (nvDpyIsAdaptiveSync(pDpyEvo) && on) { + NVConnectorEvoRec *pConnectorEvo = pDpyEvo->pConnectorEvo; + NVDPLibConnectorPtr pDpLibConnector = pConnectorEvo->pDpLibConnector; + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + if (nvDpyIdIsInDpyIdList(pDpyEvo->id, + pDpLibConnector->dpyIdList[head]) && + (pDispEvo->headState[head].timings.vrr.type != + NVKMS_DPY_VRR_TYPE_NONE)) { + nvDPLibSetAdaptiveSync(pDispEvo, head, TRUE); + break; + } + } + } +} + +unsigned int nvDPGetEDIDSize(const NVDpyEvoRec *pDpyEvo) +{ + NVDPLibDevicePtr pDpLibDevice = pDpyEvo->dp.pDpLibDevice; + + nvAssert(nvDpyUsesDPLib(pDpyEvo)); + + if (!pDpLibDevice) { + return 0; + } + + return pDpLibDevice->device->getEDIDSize(); +} + +NvBool nvDPGetEDID(const NVDpyEvoRec *pDpyEvo, void *buffer, unsigned int size) +{ + NVDPLibDevicePtr pDpLibDevice = pDpyEvo->dp.pDpLibDevice; + + nvAssert(nvDpyUsesDPLib(pDpyEvo)); + + if (!pDpLibDevice) { + return FALSE; + } + + return pDpLibDevice->device->getEDID((char *)buffer, size); +} + +void nvDPGetDpyGUID(NVDpyEvoPtr pDpyEvo) +{ + NVDPLibDevicePtr pDpLibDevice; + const char *str; + + nvkms_memset(&pDpyEvo->dp.guid, 0, sizeof(pDpyEvo->dp.guid)); + + ct_assert(sizeof(pDpyEvo->dp.guid.buffer) == DPCD_GUID_SIZE); + + if (!nvDpyUsesDPLib(pDpyEvo)) { + return; + } + + pDpLibDevice = pDpyEvo->dp.pDpLibDevice; + if (!pDpLibDevice) { + return; + } + + pDpyEvo->dp.guid.valid = + nvkmsDisplayPort::nvDPGetDeviceGUID(pDpLibDevice->device, + pDpyEvo->dp.guid.buffer) == true; + if (!pDpyEvo->dp.guid.valid) { + return; + } + + str = nvkmsDisplayPort::nvDPGetDeviceGUIDStr(pDpLibDevice->device); + if (str != NULL) { + nvkms_strncpy(pDpyEvo->dp.guid.str, str, sizeof(pDpyEvo->dp.guid.str)); + } else { + pDpyEvo->dp.guid.valid = FALSE; + } +} + +// Perform a fake lostDevice during device teardown. This function is called by +// DpyFree before it deletes a pDpy. +void nvDPDpyFree(NVDpyEvoPtr pDpyEvo) +{ + if (!nvDpyUsesDPLib(pDpyEvo)) { + return; + } + + if (!pDpyEvo->dp.pDpLibDevice) { + return; + } + + DisplayPort::Device *device = pDpyEvo->dp.pDpLibDevice->device; + + pDpyEvo->pConnectorEvo->pDpLibConnector->evtSink->lostDevice(device); +} + +NvBool nvDPDpyIsDscPossible(const NVDpyEvoRec *pDpyEvo) +{ + if (!nvDpyUsesDPLib(pDpyEvo) || + (pDpyEvo->dp.pDpLibDevice == NULL)) { + return FALSE; + } + return pDpyEvo->dp.pDpLibDevice->device->isDSCPossible(); +} + +NvBool nvDPDpyGetDpcdRevision(const NVDpyEvoRec *pDpyEvo, + unsigned int *major, + unsigned int *minor) +{ + if (!nvDpyUsesDPLib(pDpyEvo) || + (pDpyEvo->dp.pDpLibDevice == NULL)) { + return FALSE; + } + + return pDpyEvo->dp.pDpLibDevice->device->getDpcdRevision(major, minor); +} diff --git a/src/nvidia-modeset/src/dp/nvdp-evo-interface.cpp b/src/nvidia-modeset/src/dp/nvdp-evo-interface.cpp new file mode 100644 index 0000000..fa7e823 --- /dev/null +++ b/src/nvidia-modeset/src/dp/nvdp-evo-interface.cpp @@ -0,0 +1,168 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// This file implements the EVO RM interface used by the DisplayPort library. + +#include "nvkms-utils.h" + +#include "nvdp-evo-interface.hpp" + +#include "nvkms-rmapi.h" + +#include "nvdp-connector-event-sink.hpp" + +namespace nvkmsDisplayPort { + +EvoInterface::EvoInterface(NVConnectorEvoPtr pConnectorEvo) + : pConnectorEvo(pConnectorEvo) +{ +} + +NvU32 EvoInterface::rmControl0073(NvU32 command, void * params, + NvU32 paramSize) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + + return nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + command, + params, + paramSize); +} + +NvU32 EvoInterface::rmControl5070(NvU32 command, void * params, + NvU32 paramSize) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + + return nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + command, + params, + paramSize); +} + +/*! + * Look up the value of a particular key in the DisplayPort-specific registry + * corresponding to this connector. These values are provided at device + * allocation time, copied from the client request during nvAllocDevEvo(). + * + * \param[in] key The name of the key to look up. + * + * \return The unsigned 32-bit value set for the key, or 0 if the key is + * not set. + */ +NvU32 EvoInterface::getRegkeyValue(const char *key) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + NvU32 val; + NvBool found = nvGetRegkeyValue(pDevEvo, key, &val); + + if (found) { + return val; + } else { + return 0; + } +} + +bool EvoInterface::isInbandStereoSignalingSupported() +{ + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + NVDpyEvoPtr pDpyEvo; + + FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->validDisplays, pDispEvo) { + if ((pDpyEvo->pConnectorEvo == pConnectorEvo) && + pDpyEvo->dp.inbandStereoSignaling) { + return TRUE; + } + } + + return FALSE; +} + +NvU32 EvoInterface::getSubdeviceIndex() +{ + return pConnectorEvo->pDispEvo->displayOwner; +} + +NvU32 EvoInterface::getDisplayId() +{ + return nvDpyIdToNvU32(pConnectorEvo->displayId); +} + +NvU32 EvoInterface::getSorIndex() +{ + if (pConnectorEvo->pDpLibConnector) { + if (pConnectorEvo->pDpLibConnector->linkHandoffEnabled) { + return DP_INVALID_SOR_INDEX; + } else { + return pConnectorEvo->or.primary; + } + } else { + return pConnectorEvo->or.primary; + } +} + +NvU32 EvoInterface::getLinkIndex() +{ + switch (pConnectorEvo->or.protocol) { + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A: + return 0; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B: + return 1; + } + + nvAssert(!"Unrecognized DP protocol"); + return -1; +} + +NvU32 EvoInterface::monitorDenylistInfo( + NvU32 manufId, NvU32 productId, + DisplayPort::DpMonitorDenylistData *pDenylistData) +{ + // + // WAR for Toshiba/Dell internal(eDP) panel Sharp , overriding + // optimal link configuration to HBR2. + // + // HBR2 is required to drive 4K resolution, which is supported on DP1.2 + // onward specifications. Panel advertises itself as DP1.2 capable, but + // does not have ESI address space, this is violation the specification + // and hence inside DP library we downgrade the DPCD revision to 1.1. + // With this downgrade in DPCD version, link rate also gets downgraded + // to HBR. + // + if (manufId == 0x104d && + (productId == 0x1414 || productId == 0x1430)) { + + NvU32 warFlags = DisplayPort::DP_MONITOR_CAPABILITY_DP_OVERRIDE_OPTIMAL_LINK_CONFIG; + + pDenylistData->dpOverrideOptimalLinkConfig.linkRate = 0x14; // HBR2 + pDenylistData->dpOverrideOptimalLinkConfig.laneCount = laneCount_4; // 4 lanes + + return warFlags; + } + + return 0; +} + +}; // namespace nvkmsDisplayPort diff --git a/src/nvidia-modeset/src/dp/nvdp-evo-interface.hpp b/src/nvidia-modeset/src/dp/nvdp-evo-interface.hpp new file mode 100644 index 0000000..e918254 --- /dev/null +++ b/src/nvidia-modeset/src/dp/nvdp-evo-interface.hpp @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVDP_EVO_INTERFACE_HPP__ +#define __NVDP_EVO_INTERFACE_HPP__ + +#include +#include +#include + +namespace nvkmsDisplayPort +{ + +class EvoInterface : public DisplayPort::Object, + public DisplayPort::EvoInterface +{ +public: + const NVConnectorEvoPtr pConnectorEvo; + + EvoInterface(NVConnectorEvoPtr pConnectorEvo); + + // Functions inherited from DisplayPort::EvoInterface + virtual NvU32 rmControl0073(NvU32 command, void * params, NvU32 paramSize); + virtual NvU32 rmControl5070(NvU32 command, void * params, NvU32 paramSize); + + virtual NvU32 getSubdeviceIndex(); + virtual NvU32 getDisplayId(); + virtual NvU32 getSorIndex(); + virtual NvU32 getLinkIndex(); + virtual NvU32 getRegkeyValue(const char *key); + virtual bool isInbandStereoSignalingSupported(); + + virtual NvU32 monitorDenylistInfo( + NvU32 manufId, + NvU32 productId, + DisplayPort::DpMonitorDenylistData *pDenylistData); +}; + +}; // namespace nvkmsDisplayPort + +#endif // __NVDP_EVO_INTERFACE_HPP__ diff --git a/src/nvidia-modeset/src/dp/nvdp-host.cpp b/src/nvidia-modeset/src/dp/nvdp-host.cpp new file mode 100644 index 0000000..275a5e9 --- /dev/null +++ b/src/nvidia-modeset/src/dp/nvdp-host.cpp @@ -0,0 +1,77 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* DisplayPort management routines */ + +#include + +#include "nvkms-utils.h" + +#include "dp_hostimp.h" +#include "dp_printf.h" + +void *dpMalloc(NvLength sz) +{ + return nvAlloc(sz); +} + +void dpFree(void *p) +{ + nvFree(p); +} + +static NVEvoLogType dpSeverityToNvkmsMap(DP_LOG_LEVEL severity) +{ + NVEvoLogType level = EVO_LOG_INFO; + return level; +} + +void dpPrintf(DP_LOG_LEVEL severity, const char *format, ...) +{ + if (severity == DP_SILENT) return; + + va_list ap; + va_start(ap, format); + nvVEvoLog(dpSeverityToNvkmsMap(severity), NV_INVALID_GPU_LOG_INDEX, format, ap); + va_end(ap); +} + +void dpDebugBreakpoint(void) +{ + nvAssert(!"DisplayPort library debug breakpoint"); +} + +#if NV_DP_ASSERT_ENABLED +void dpAssert(const char *expression, const char *file, + const char *function, int line) +{ + nvDebugAssert(expression, file, function, line); +} +#endif + +void dpTraceEvent(NV_DP_TRACING_EVENT event, + NV_DP_TRACING_PRIORITY priority, NvU32 numArgs, ...) +{ + // To support DPlib tracing, implement this function. +} + diff --git a/src/nvidia-modeset/src/dp/nvdp-timer.cpp b/src/nvidia-modeset/src/dp/nvdp-timer.cpp new file mode 100644 index 0000000..8739440 --- /dev/null +++ b/src/nvidia-modeset/src/dp/nvdp-timer.cpp @@ -0,0 +1,141 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// This file implements the timer callback mechanism for the DisplayPort +// library. + +#include "nvkms-types.h" + +#include "dp/nvdp-timer.h" +#include "nvdp-timer.hpp" + +namespace nvkmsDisplayPort { + Timer::Callback::Callback(DisplayPort::List *pList, + NVDevEvoPtr pDevEvo, + DisplayPort::RawTimer::Callback *dpCallback, + int ms) + : dpCallback(dpCallback), + ref_ptr(pDevEvo->ref_ptr), + handle(nvkms_alloc_timer(onTimerFired, this, 0, ms * 1000)), + expireTimeUs(nvkms_get_usec() + ms * 1000) + { + if (!allocFailed()) { + pList->insertFront(this); + nvkms_inc_ref(ref_ptr); + } + } + + Timer::Callback::~Callback() + { + nvkms_free_timer(handle); + } + + bool Timer::Callback::allocFailed() const + { + return handle == NULL; + } + + bool Timer::Callback::isExpired(NvU64 timeNowUs) const + { + return timeNowUs >= expireTimeUs; + } + + void Timer::Callback::onTimerFired(void *data, NvU32 dataU32) + { + Timer::Callback *cb = static_cast(data); + cb->onTimerFired(); + } + + void Timer::Callback::onTimerFired() + { + if (nvkms_dec_ref(ref_ptr)) { + dpCallback->expired(); + } + delete this; + } + + void Timer::Callback::fireIfExpired(NvU64 timeNowUs) + { + if (isExpired(timeNowUs)) { + onTimerFired(); + } + } + + Timer::Timer(NVDevEvoPtr pDevEvo) + : pDevEvo(pDevEvo) + { + } + + void Timer::queueCallback(DisplayPort::RawTimer::Callback *dpCallback, int ms) + { + Callback *cb = new Callback(&timerList, pDevEvo, dpCallback, ms); + nvAssert(cb && !cb->allocFailed()); + if (!cb || cb->allocFailed()) { + delete cb; + return; + } + } + + NvU64 Timer::getTimeUs() + { + return nvkms_get_usec(); + } + + void Timer::sleep(int ms) + { + nvkms_usleep(ms * 1000); + } + + void Timer::fireExpiredTimers() + { + const NvU64 timeNowUs = getTimeUs(); + DisplayPort::ListElement *pElem = timerList.begin(); + DisplayPort::ListElement *pNext; + + while (pElem != timerList.end()) { + Callback *cb = static_cast(pElem); + pNext = pElem->next; + + cb->fireIfExpired(timeNowUs); + + pElem = pNext; + } + } + +}; // namespace nvkmsDisplayPort + +NVDPLibTimerPtr nvDPAllocTimer(NVDevEvoPtr pDevEvo) +{ + NVDPLibTimerPtr pTimer = new _nv_dplibtimer(pDevEvo); + return pTimer; +} + +void nvDPFreeTimer(NVDPLibTimerPtr pTimer) +{ + delete pTimer; +} + +void nvDPFireExpiredTimers(NVDevEvoPtr pDevEvo) +{ + pDevEvo->dpTimer->rawTimer.fireExpiredTimers(); +} diff --git a/src/nvidia-modeset/src/dp/nvdp-timer.hpp b/src/nvidia-modeset/src/dp/nvdp-timer.hpp new file mode 100644 index 0000000..125739e --- /dev/null +++ b/src/nvidia-modeset/src/dp/nvdp-timer.hpp @@ -0,0 +1,93 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVDP_TIMER_HPP__ +#define __NVDP_TIMER_HPP__ + +#include +#include +#include + +namespace nvkmsDisplayPort +{ + +class Timer : public DisplayPort::RawTimer +{ + NVDevEvoPtr pDevEvo; + DisplayPort::List timerList; + + class Callback : public DisplayPort::ListElement { + DisplayPort::RawTimer::Callback *dpCallback; + // ref_ptr to the pDevEvo + nvkms_ref_ptr *ref_ptr; + nvkms_timer_handle_t *handle; + NvU64 expireTimeUs; + + static void onTimerFired(void *data, NvU32 dataU32); + void onTimerFired(); + + public: + // Construct an NVKMS timer callback. Since exceptions cannot be used + // in NVKMS code, callers must call Callback::allocFailed() to query + // whether the constructor succeeded. + // + // Scheduling a callback bumps the refcount on the corresponding + // pDevEvo, so that a device isn't freed until all pending callbacks + // have fired. + Callback(DisplayPort::List *pList, + NVDevEvoPtr pDevEvo, + DisplayPort::RawTimer::Callback *dpCallback, + int ms); + ~Callback(); + + // Returns TRUE if the constructor failed. + bool allocFailed() const; + // Returns TRUE if the timer is ready to fire. + bool isExpired(NvU64 timeNowUs) const; + // Fire the timer if it's ready. + // NOTE: If the timer fires, this deletes it. + void fireIfExpired(NvU64 timeNowUs); + }; +public: + Timer(NVDevEvoPtr pDevEvo); + + virtual void queueCallback(DisplayPort::RawTimer::Callback *cb, int ms); + virtual NvU64 getTimeUs(); + virtual void sleep(int ms); + + void fireExpiredTimers(); +}; + +}; // namespace nvkmsDisplayPort + +struct _nv_dplibtimer : public DisplayPort::Object { + nvkmsDisplayPort::Timer rawTimer; + DisplayPort::Timer timer; + + _nv_dplibtimer(NVDevEvoPtr pDevEvo) + : rawTimer(pDevEvo), timer(&rawTimer) + { + } +}; + +#endif // __NVDP_TIMER_HPP__ diff --git a/src/nvidia-modeset/src/g_nvkms-evo-states.c b/src/nvidia-modeset/src/g_nvkms-evo-states.c new file mode 100644 index 0000000..373241e --- /dev/null +++ b/src/nvidia-modeset/src/g_nvkms-evo-states.c @@ -0,0 +1,2826 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2010 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-evo-states.h" + +static NvBool EvoLockStateFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockClientManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockClientManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockClientPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServerHouseSyncManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServerHouseSyncManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServerHouseSyncPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServerManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServerManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServerPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockClientManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockClientManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockClientPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSyncPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServerManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServerManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServerPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateNoLock(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateProhibitLock(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliLastSecondary(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliLastSecondaryFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliLastSecondaryLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliLastSecondaryLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliLastSecondaryLockHeadsFrameLockClientManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimary(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryFrameLockClientPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryFrameLockRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryFrameLockServerHouseSyncPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryFrameLockServerPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClientPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliSecondary(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliSecondaryFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliSecondaryLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliSecondaryLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliSecondaryLockHeadsFrameLockClientManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); + +static NvBool EvoLockStateFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClientManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerManyHeads; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockClientManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClientManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClient; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockClientManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateFrameLockClientManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockClientPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClientManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClient; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServer( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSync; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServerHouseSync( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerHouseSyncManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServer; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServerHouseSyncManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerHouseSyncManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSync; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerManyHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServerHouseSyncManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateFrameLockServerHouseSyncManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServerHouseSyncPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSync; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServerManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerHouseSyncManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClient; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServer; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServerManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerHouseSyncManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServerPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServer; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClient; + } + return TRUE; + + case NV_EVO_ADD_SLI_PRIMARY: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeads; + } + return TRUE; + + case NV_EVO_ADD_SLI_LAST_SECONDARY: + if (!queryOnly) { + nvEvoLockHWStateSliLastSecondaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondaryLockHeads; + } + return TRUE; + + case NV_EVO_UNLOCK_HEADS: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServer; + } + return TRUE; + + case NV_EVO_ADD_SLI_SECONDARY: + if (!queryOnly) { + nvEvoLockHWStateSliSecondaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondaryLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClientManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeads; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockClientManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClientManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClient; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockClientManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClientManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClientPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockClientPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClientManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClient; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServer( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSync; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSync( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServer; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSync; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSyncPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSync; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServerManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClient; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServer; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServerManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServerPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServer; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateNoLock( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_PROHIBIT_LOCK: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateProhibitLock; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClient; + } + return TRUE; + + case NV_EVO_ADD_SLI_PRIMARY: + if (!queryOnly) { + nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimary; + } + return TRUE; + + case NV_EVO_ADD_SLI_LAST_SECONDARY: + if (!queryOnly) { + nvEvoLockHWStateSliLastSecondary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondary; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServer; + } + return TRUE; + + case NV_EVO_LOCK_HEADS: + if (!queryOnly) { + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeads; + } + return TRUE; + + case NV_EVO_ADD_SLI_SECONDARY: + if (!queryOnly) { + nvEvoLockHWStateSliSecondary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondary; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateProhibitLock( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_PROHIBIT_LOCK_DISABLE: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliLastSecondary( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!nvEvoRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliLastSecondaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondaryFrameLockClient; + } + return TRUE; + + case NV_EVO_REM_SLI: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliLastSecondaryFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!nvEvoUnRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliLastSecondary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondary; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliLastSecondaryLockHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!nvEvoRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliLastSecondaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondaryLockHeadsFrameLockClient; + } + return TRUE; + + case NV_EVO_REM_SLI: + if (!queryOnly) { + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliLastSecondaryLockHeadsFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliLastSecondaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondaryLockHeadsFrameLockClientManyHeads; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!nvEvoUnRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliLastSecondaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondaryLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliLastSecondaryLockHeadsFrameLockClientManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliLastSecondaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondaryLockHeadsFrameLockClient; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimary( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockClient; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServer; + } + return TRUE; + + case NV_EVO_REM_SLI: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimary; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryFrameLockClientPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockClient; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimary; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryFrameLockRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimary; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServerPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryFrameLockServer( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServerHouseSync; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServerPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimary; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryFrameLockServerHouseSync( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServer; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryFrameLockServerHouseSyncPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServerHouseSync; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServer; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryFrameLockServerPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimary; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServer; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClient; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServer; + } + return TRUE; + + case NV_EVO_REM_SLI: + if (!queryOnly) { + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeads; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClient; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClientPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClient; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServer( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSync; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSync( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServer; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSync; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSync; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClient; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServer; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServer; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliSecondary( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!nvEvoRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliSecondaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondaryFrameLockClient; + } + return TRUE; + + case NV_EVO_REM_SLI: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliSecondaryFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!nvEvoUnRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliSecondary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondary; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliSecondaryLockHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!nvEvoRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliSecondaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondaryLockHeadsFrameLockClient; + } + return TRUE; + + case NV_EVO_REM_SLI: + if (!queryOnly) { + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliSecondaryLockHeadsFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliSecondaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondaryLockHeadsFrameLockClientManyHeads; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!nvEvoUnRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliSecondaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondaryLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliSecondaryLockHeadsFrameLockClientManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliSecondaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondaryLockHeadsFrameLockClient; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +void nvEvoStateStartNoLock( + NVEvoSubDevPtr pEvoSubDev +) +{ + pEvoSubDev->scanLockState = EvoLockStateNoLock; +} + +#if defined(DEBUG) +void nvEvoStateAssertNoLock( + const NVEvoSubDevRec *pEvoSubDev +) +{ + nvAssert(pEvoSubDev->scanLockState == EvoLockStateNoLock); +} +#endif diff --git a/src/nvidia-modeset/src/nvkms-3dvision.c b/src/nvidia-modeset/src/nvkms-3dvision.c new file mode 100644 index 0000000..499148e --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-3dvision.c @@ -0,0 +1,54 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-types.h" +#include "nvkms-3dvision.h" + +void nv3DVisionAuthenticationEvo(NVDispEvoRec *pDispEvo, const NvU32 apiHead) +{ + return; +} + +void nvDpyCheck3DVisionCapsEvo(NVDpyEvoPtr pDpyEvo) +{ + return; +} + +NvBool +nvPatch3DVisionModeTimingsEvo(NVT_TIMING *pTiming, NVDpyEvoPtr pDpyEvo, + NVEvoInfoStringPtr pInfoString) +{ + return FALSE; +} + +void nvDisable3DVisionAegis(const NVDpyEvoRec *pDpyEvo) +{ + return; +} + +void nvSendHwModeTimingsToAegisEvo(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead) +{ + return; +} + diff --git a/src/nvidia-modeset/src/nvkms-attributes.c b/src/nvidia-modeset/src/nvkms-attributes.c new file mode 100644 index 0000000..d8ff618 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-attributes.c @@ -0,0 +1,1552 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-evo.h" +#include "nvkms-types.h" +#include "nvkms-attributes.h" +#include "nvkms-dpy.h" +#include "nvkms-framelock.h" +#include "nvkms-vrr.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "nvos.h" +#include "nvkms-stereo.h" +#include "nvkms-hdmi.h" + +#include // NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_* + +/*! + * Set the current backlight brightness for the given pDpyEvo. + * + * \param[in] pDpyEvo The display device whose backlight brightness + * should be assigned. + * \param[in] brightness The backlight brightness value to program + * + * \return TRUE if backlight brightness is available for this pDpyEvo, + * otherwise FALSE. + */ +static NvBool DpySetBacklightBrightness(NVDpyEvoRec *pDpyEvo, NvS64 brightness) +{ + NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + if (!pDpyEvo->hasBacklightBrightness) { + return FALSE; + } + + if (brightness > NV0073_CTRL_BACKLIGHT_BRIGHTNESS_MAX_VALUE) { + return FALSE; + } + + if (brightness < NV0073_CTRL_BACKLIGHT_BRIGHTNESS_MIN_VALUE) { + return FALSE; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyEvoGetConnectorId(pDpyEvo); + params.brightness = brightness; + params.brightnessType = NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100; + + ret = nvRmApiControl( + nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS, + ¶ms, sizeof(params)); + + return (ret == NVOS_STATUS_SUCCESS); +} + +/*! + * Query the current backlight brightness for the given pDpyEvo. + * + * \param[in] pDpyEvo The display device whose backlight brightness + * should be queried. + * \param[out] pBrightness The backlight brightness value + * + * \return TRUE if backlight brightness is available for this pDpyEvo, + * otherwise FALSE. + */ +static NvBool DpyGetBacklightBrightness(const NVDpyEvoRec *pDpyEvo, + NvS64 *pBrightness) +{ + NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyEvoGetConnectorId(pDpyEvo); + params.brightnessType = NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100; + + ret = nvRmApiControl( + nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + nvAssert(params.brightness <= NV0073_CTRL_BACKLIGHT_BRIGHTNESS_MAX_VALUE); + + *pBrightness = params.brightness; + + return TRUE; +} + +/*! + * Populate NvKmsAttributeValidValuesCommonReply for backlight brightness. + * + * \param[in] pDpyEvo The display device whose backlight brightness + * should be queried. + * \param[out] pValidValues The ValidValues structure to populate. + * + * \return TRUE if backlight brightness is available for this pDpy, + * otherwise FALSE. + */ +static NvBool DpyGetBacklightBrightnessValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!pDpyEvo->hasBacklightBrightness) { + return FALSE; + } + + pValidValues->type = NV_KMS_ATTRIBUTE_TYPE_RANGE; + + pValidValues->u.range.min = NV0073_CTRL_BACKLIGHT_BRIGHTNESS_MIN_VALUE; + pValidValues->u.range.max = NV0073_CTRL_BACKLIGHT_BRIGHTNESS_MAX_VALUE; + + return TRUE; +} + +/*! + * Query RM for the current scanline of the given pDpyEvo. + * + * \param[in] pDpyEvo The display device whose scanline + * should be queried. + * \param[out] pScanLine The scanline value. + * + * \return TRUE if the scanline could be queried for this pDpyEvo, + * otherwise FALSE. + */ +static NvBool GetScanLine(const NVDpyEvoRec *pDpyEvo, NvS64 *pScanLine) +{ + NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 head, ret; + + if (pDpyEvo->apiHead == NV_INVALID_HEAD) { + return FALSE; + } + + head = nvGetPrimaryHwHead(pDispEvo, pDpyEvo->apiHead); + nvAssert(head != NV_INVALID_HEAD); + + params.subDeviceInstance = pDispEvo->displayOwner; + params.head = head; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_SCANLINE, + ¶ms, sizeof(params)); + + if (ret == NVOS_STATUS_SUCCESS) { + *pScanLine = params.currentScanline; + return TRUE; + } + + return FALSE; +} + +/*! + * Retrieve the current head of the given pDpyEvo. + * + * \param[in] pDpyEvo The display device whose head + * should be queried. + * \param[out] pHead The head value. + * + * \return TRUE. If there is no valid head pHead will + * return NV_INVALID_HEAD + */ +static NvBool GetHead(const NVDpyEvoRec *pDpyEvo, NvS64 *pHead) +{ + *pHead = (NvS64)pDpyEvo->apiHead; + return TRUE; +} + +static NvBool GetHwHead(const NVDpyEvoRec *pDpyEvo, NvS64 *pHead) +{ + NvU32 primaryHwHead = + nvGetPrimaryHwHead(pDpyEvo->pDispEvo, pDpyEvo->apiHead); + *pHead = (NvS64)primaryHwHead; + return TRUE; +} + +static NvBool DitherConfigurationAllowed(const NVDpyEvoRec *pDpyEvo) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + return pDevEvo->hal->caps.supportedDitheringModes != 0; +} + +static void SetDitheringCommon(NVDpyEvoPtr pDpyEvo) +{ + NVEvoUpdateState updateState = { }; + const NVConnectorEvoRec *pConnectorEvo = pDpyEvo->pConnectorEvo; + NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + NVDispApiHeadStateEvoRec *pApiHeadState; + NvU32 head; + + if (pDpyEvo->apiHead == NV_INVALID_HEAD) { + return; + } + pApiHeadState = &pDispEvo->apiHeadState[pDpyEvo->apiHead]; + + nvAssert((pApiHeadState->hwHeadsMask) != 0x0 && + (nvDpyIdIsInDpyIdList(pDpyEvo->id, pApiHeadState->activeDpys))); + + nvChooseDitheringEvo(pConnectorEvo, + pApiHeadState->attributes.color.bpc, + pApiHeadState->attributes.color.colorimetry, + &pDpyEvo->requestedDithering, + &pApiHeadState->attributes.dithering); + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + nvSetDitheringEvo(pDispEvo, + head, + &pApiHeadState->attributes.dithering, + &updateState); + } + + nvEvoUpdateAndKickOff(pDpyEvo->pDispEvo, FALSE, &updateState, + TRUE /* releaseElv */); +} + +/*! + * Assigns dithering on all dpys driven by pDpyEvo's head. + */ +static NvBool SetDithering(NVDpyEvoRec *pDpyEvo, NvS64 dithering) +{ + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + switch (dithering) { + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_AUTO: + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_ENABLED: + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DISABLED: + break; + default: + return FALSE; + } + + pDpyEvo->requestedDithering.state = dithering; + + SetDitheringCommon(pDpyEvo); + + return TRUE; +} + +static NvBool GetDithering(const NVDpyEvoRec *pDpyEvo, NvS64 *pDithering) +{ + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + *pDithering = pDpyEvo->requestedDithering.state; + + return TRUE; +} + +static NvBool GetDitheringGenericValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + return DitherConfigurationAllowed(pDpyEvo); +} + +/*! + * Assigns ditheringMode on all dpys driven by pDpyEvo's head. + */ +static NvBool SetDitheringMode(NVDpyEvoRec *pDpyEvo, NvS64 ditheringMode) +{ + NVDevEvoPtr pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + NvU32 mask = (1 << ditheringMode); + + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + if (!(mask & pDevEvo->hal->caps.supportedDitheringModes)) { + return FALSE; + } + + switch (ditheringMode) { + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_AUTO: + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_DYNAMIC_2X2: + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_STATIC_2X2: + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_TEMPORAL: + break; + default: + return FALSE; + } + + pDpyEvo->requestedDithering.mode = ditheringMode; + + SetDitheringCommon(pDpyEvo); + + return TRUE; +} + +static NvBool GetDitheringMode(const NVDpyEvoRec *pDpyEvo, + NvS64 *pDitheringMode) +{ + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + *pDitheringMode = pDpyEvo->requestedDithering.mode; + + return TRUE; +} + +static NvBool GetDitheringModeValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTBITS); + + pValidValues->u.bits.ints = + pDevEvo->hal->caps.supportedDitheringModes; + + return TRUE; +} + +/*! + * Assigns ditheringDepth on all dpys driven by pDpyEvo's head. + */ +static NvBool SetDitheringDepth(NVDpyEvoRec *pDpyEvo, NvS64 ditheringDepth) +{ + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + switch (ditheringDepth) { + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_AUTO: + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_6_BITS: + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_8_BITS: + break; + default: + return FALSE; + } + + pDpyEvo->requestedDithering.depth = ditheringDepth; + + SetDitheringCommon(pDpyEvo); + + return TRUE; +} + +static NvBool GetDitheringDepth(const NVDpyEvoRec *pDpyEvo, + NvS64 *pDitheringDepth) +{ + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + *pDitheringDepth = pDpyEvo->requestedDithering.depth; + + return TRUE; +} + +static NvBool GetCurrentDithering(const NVDpyEvoRec *pDpyEvo, + NvS64 *pCurrentDithering) +{ + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + *pCurrentDithering = pDpyEvo->currentAttributes.dithering.enabled; + + return TRUE; +} + +static NvBool GetCurrentDitheringMode(const NVDpyEvoRec *pDpyEvo, + NvS64 *pCurrentDitheringMode) +{ + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + *pCurrentDitheringMode = + pDpyEvo->currentAttributes.dithering.mode; + + return TRUE; +} + +static NvBool GetCurrentDitheringDepth(const NVDpyEvoRec *pDpyEvo, + NvS64 *pCurrentDitheringDepth) +{ + + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + *pCurrentDitheringDepth = + pDpyEvo->currentAttributes.dithering.depth; + + return TRUE; +} + +static NvBool DigitalVibranceAvailable(const NVDpyEvoRec *pDpyEvo) +{ + return nvDpyEvoIsActive(pDpyEvo); +} + +/*! + * Assigns dvc on all dpys driven by pDpyEvo's head. + */ +static NvBool SetDigitalVibrance(NVDpyEvoRec *pDpyEvo, NvS64 dvc) +{ + NVEvoUpdateState updateState = { }; + NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + NVDispApiHeadStateEvoRec *pApiHeadState; + NvU32 head; + + if ((pDpyEvo->apiHead == NV_INVALID_HEAD) || + !DigitalVibranceAvailable(pDpyEvo)) { + return FALSE; + } + pApiHeadState = &pDispEvo->apiHeadState[pDpyEvo->apiHead]; + + nvAssert((pApiHeadState->hwHeadsMask) != 0x0 && + (nvDpyIdIsInDpyIdList(pDpyEvo->id, pApiHeadState->activeDpys))); + + dvc = NV_MAX(dvc, NV_EVO_DVC_MIN); + dvc = NV_MIN(dvc, NV_EVO_DVC_MAX); + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + nvSetDVCEvo(pDispEvo, head, dvc, &updateState); + } + + nvEvoUpdateAndKickOff(pDpyEvo->pDispEvo, FALSE, &updateState, + TRUE /* releaseElv */); + + pApiHeadState->attributes.dvc = dvc; + + return TRUE; +} + +static NvBool GetDigitalVibrance(const NVDpyEvoRec *pDpyEvo, NvS64 *pDvc) +{ + if (!DigitalVibranceAvailable(pDpyEvo)) { + return FALSE; + } + + *pDvc = pDpyEvo->currentAttributes.dvc; + + return TRUE; +} + +static NvBool GetDigitalVibranceValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!DigitalVibranceAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_RANGE); + + pValidValues->u.range.min = NV_EVO_DVC_MIN; + pValidValues->u.range.max = NV_EVO_DVC_MAX; + + return TRUE; +} + +static NvBool ImageSharpeningAvailable(const NVDpyEvoRec *pDpyEvo) +{ + if (!pDpyEvo->pDispEvo->pDevEvo->hal->caps.supportsImageSharpening) { + return FALSE; + } + + if (!nvDpyEvoIsActive(pDpyEvo)) { + return FALSE; + } + + return pDpyEvo->currentAttributes.imageSharpening.available; +} + +/*! + * Assigns imageSharpening on all dpys driven by pDpyEvo's head. + */ +static NvBool SetImageSharpening(NVDpyEvoRec *pDpyEvo, NvS64 imageSharpening) +{ + NVEvoUpdateState updateState = { }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDispApiHeadStateEvoRec *pApiHeadState; + NvU32 head; + + if ((pDpyEvo->apiHead == NV_INVALID_HEAD) || + !ImageSharpeningAvailable(pDpyEvo)) { + return FALSE; + } + pApiHeadState = &pDispEvo->apiHeadState[pDpyEvo->apiHead]; + + nvAssert((pApiHeadState->hwHeadsMask) != 0x0 && + (nvDpyIdIsInDpyIdList(pDpyEvo->id, pApiHeadState->activeDpys))); + + imageSharpening = NV_MAX(imageSharpening, NV_EVO_IMAGE_SHARPENING_MIN); + imageSharpening = NV_MIN(imageSharpening, NV_EVO_IMAGE_SHARPENING_MAX); + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + nvSetImageSharpeningEvo(pDispEvo, head, imageSharpening, &updateState); + } + + nvEvoUpdateAndKickOff(pDispEvo, FALSE, &updateState, + TRUE /* releaseElv */); + + pApiHeadState->attributes.imageSharpening.value = imageSharpening; + + return TRUE; +} + +static NvBool GetImageSharpening(const NVDpyEvoRec *pDpyEvo, + NvS64 *pImageSharpening) +{ + if (!ImageSharpeningAvailable(pDpyEvo)) { + return FALSE; + } + + *pImageSharpening = pDpyEvo->currentAttributes.imageSharpening.value; + + return TRUE; +} + +static NvBool GetImageSharpeningValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!ImageSharpeningAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_RANGE); + + pValidValues->u.range.min = NV_EVO_IMAGE_SHARPENING_MIN; + pValidValues->u.range.max = NV_EVO_IMAGE_SHARPENING_MAX; + + return TRUE; +} + +static NvBool GetImageSharpeningAvailable(const NVDpyEvoRec *pDpyEvo, + NvS64 *pImageSharpeningAvailable) +{ + *pImageSharpeningAvailable = ImageSharpeningAvailable(pDpyEvo); + + return TRUE; +} + +static NvBool GetImageSharpeningDefault(const NVDpyEvoRec *pDpyEvo, + NvS64 *pImageSharpeningDefault) +{ + if (!nvDpyEvoIsActive(pDpyEvo)) { + return FALSE; + } + + *pImageSharpeningDefault = NV_EVO_IMAGE_SHARPENING_DEFAULT; + + return TRUE; +} + +static NvBool ColorSpaceAndRangeAvailable(const NVDpyEvoRec *pDpyEvo) +{ + return ((pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) && + (pDpyEvo->pConnectorEvo->signalFormat != + NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI)); +} + +/*! + * Send infoFrame with new color{Space,Range}. + */ +static void DpyPostColorSpaceOrRangeSetEvo(NVDpyEvoPtr pDpyEvo) +{ + enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace; + enum NvKmsDpyAttributeColorBpcValue colorBpc; + enum NvKmsDpyAttributeColorRangeValue colorRange; + NVEvoUpdateState updateState = { }; + NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + NVDispApiHeadStateEvoRec *pApiHeadState; + NvU32 head; + NvBool colorSpaceChanged = FALSE; + NvBool colorBpcChanged = FALSE; + + if (pDpyEvo->apiHead == NV_INVALID_HEAD) { + return; + } + pApiHeadState = &pDispEvo->apiHeadState[pDpyEvo->apiHead]; + + nvAssert((pApiHeadState->hwHeadsMask) != 0x0 && + (nvDpyIdIsInDpyIdList(pDpyEvo->id, pApiHeadState->activeDpys))); + + /* + * Choose current colorSpace and colorRange based on the current mode + * timings and the requested color space and range. + */ + if (!nvChooseCurrentColorSpaceAndRangeEvo(pDpyEvo, + pApiHeadState->timings.yuv420Mode, + pApiHeadState->attributes.color.colorimetry, + pDpyEvo->requestedColorSpace, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN, + pDpyEvo->requestedColorRange, + &colorSpace, + &colorBpc, + &colorRange)) { + nvAssert(!"Failed to choose current color space and color range"); + return; + } + + colorSpaceChanged = (pApiHeadState->attributes.color.format != colorSpace); + colorBpcChanged = (pApiHeadState->attributes.color.bpc != colorBpc); + + /* For DP and HDMI FRL, neither color space nor bpc can be changed without a modeset */ + if ((nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo) || + (pApiHeadState->timings.protocol == NVKMS_PROTOCOL_SOR_HDMI_FRL)) && + (colorSpaceChanged || colorBpcChanged)) { + return; + } + + if (nvDpyIsHdmiEvo(pDpyEvo) && + (colorBpc > pApiHeadState->attributes.color.bpc)) { + NVDpyAttributeColor tmpDpyColor = pApiHeadState->attributes.color; + + tmpDpyColor.format = colorSpace; + tmpDpyColor.range = colorRange; + tmpDpyColor.bpc = colorBpc; + + /* + * For HDMI FRL, downgrade the selected color bpc to the current color + * bpc so that the current color bpc remains unchanged. + */ + if (pApiHeadState->timings.protocol == NVKMS_PROTOCOL_SOR_HDMI_FRL) { + tmpDpyColor.bpc = pApiHeadState->attributes.color.bpc; + } else { + const NvKmsDpyOutputColorFormatInfo colorFormatsInfo = + nvDpyGetOutputColorFormatInfo(pDpyEvo); + + while (nvHdmiGetEffectivePixelClockKHz(pDpyEvo, + &pApiHeadState->timings, + &tmpDpyColor) > + pDpyEvo->maxSingleLinkPixelClockKHz) { + + if(!nvDowngradeColorSpaceAndBpc(pDpyEvo, + &colorFormatsInfo, + &tmpDpyColor)) { + return; + } + } + } + + pApiHeadState->attributes.color.format = tmpDpyColor.format; + pApiHeadState->attributes.color.range = tmpDpyColor.range; + pApiHeadState->attributes.color.bpc = tmpDpyColor.bpc; + } else { + pApiHeadState->attributes.color.format = colorSpace; + pApiHeadState->attributes.color.range = colorRange; + pApiHeadState->attributes.color.bpc = colorBpc; + } + + /* Update hardware's current colorSpace and colorRange */ + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + enum nvKmsPixelDepth newPixelDepth = + nvEvoDpyColorToPixelDepth(&pApiHeadState->attributes.color); + + nvUpdateCurrentHardwareColorSpaceAndRangeEvo(pDispEvo, + head, + &pApiHeadState->attributes.color, + &updateState); + + if ((newPixelDepth != pDispEvo->headState[head].pixelDepth) || + colorSpaceChanged) { + pDispEvo->headState[head].pixelDepth = newPixelDepth; + nvEvoHeadSetControlOR(pDispEvo, + head, + &pApiHeadState->attributes.color, + &updateState); + } + } + + /* Update InfoFrames as needed. */ + nvUpdateInfoFrames(pDpyEvo); + + // Kick off + nvEvoUpdateAndKickOff(pDispEvo, FALSE, &updateState, TRUE /* releaseElv */); + + // XXX DisplayPort sets color format. +} + +static NvU32 DpyGetValidColorSpaces(const NVDpyEvoRec *pDpyEvo) +{ + const NVDevEvoRec *pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + NvU32 val = (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_RGB); + + if ((nvDpyIsHdmiEvo(pDpyEvo) && + (pDevEvo->caps.hdmiYCbCr422MaxBpc != 0)) || + (nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo) && + (pDevEvo->caps.dpYCbCr422MaxBpc != 0))) { + val |= (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr422); + } + + if (nvDpyIsHdmiEvo(pDpyEvo) || + nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + val |= (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr444); + } + + return val; +} + +NvBool nvDpyValidateColorSpace(const NVDpyEvoRec *pDpyEvo, NvS64 value) +{ + NvU32 validMask = DpyGetValidColorSpaces(pDpyEvo); + + if (!ColorSpaceAndRangeAvailable(pDpyEvo) || !(validMask & (1 << value))) { + return FALSE; + } + + return TRUE; +} + +static NvBool SetRequestedColorSpace(NVDpyEvoRec *pDpyEvo, NvS64 value) +{ + if (!nvDpyValidateColorSpace(pDpyEvo, value)) { + return FALSE; + } + + pDpyEvo->requestedColorSpace = value; + + DpyPostColorSpaceOrRangeSetEvo(pDpyEvo); + + return TRUE; +} + +static NvBool GetCurrentColorSpace(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->currentAttributes.color.format; + + return TRUE; +} + +static NvBool GetRequestedColorSpace(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->requestedColorSpace; + + return TRUE; +} + +static NvBool GetCurrentColorSpaceValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTBITS); + + pValidValues->u.bits.ints = DpyGetValidColorSpaces(pDpyEvo); + + /* + * The current color space may be YUV420 depending on the current mode. + * Rather than determine whether this pDpy is capable of driving any + * YUV420 modes, just assume this is always a valid current color space. + */ + pValidValues->u.bits.ints |= + (1 << NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420); + + return TRUE; +} + +static NvBool GetRequestedColorSpaceValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTBITS); + + pValidValues->u.bits.ints = DpyGetValidColorSpaces(pDpyEvo); + + return TRUE; +} + +static NvBool SetRequestedColorRange(NVDpyEvoRec *pDpyEvo, NvS64 value) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + pDpyEvo->requestedColorRange = value; + + DpyPostColorSpaceOrRangeSetEvo(pDpyEvo); + + return TRUE; +} + +static NvBool GetCurrentColorRange(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->currentAttributes.color.range; + + return TRUE; +} + +static NvBool GetRequestedColorRange(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->requestedColorRange; + + return TRUE; +} + +static NvBool GetColorRangeValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTBITS); + + /* + * The preferred color range may always select between full or limited + * range, but the actual resulting color range depends on the current + * color space. Both color ranges are always valid values for both + * preferred and current color range attributes. + */ + pValidValues->u.bits.ints = (1 << NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL) | + (1 << NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED); + + return TRUE; +} + +static NvBool GetCurrentColorBpc(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + *pValue = pDpyEvo->currentAttributes.color.bpc; + return TRUE; +} + +static NvBool GetColorBpcValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTBITS); + + /* If new enum values are added, update the u.bits.ints assignment. */ + ct_assert(NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_MAX == + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10); + + pValidValues->u.bits.ints = + NVBIT(NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6) | + NVBIT(NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8) | + NVBIT(NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10); + + return TRUE; +} + +static NvBool DigitalSignalAvailable(const NVDpyEvoRec *pDpyEvo) +{ + return pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP; +} + +static NvBool GetDigitalSignal(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (!DigitalSignalAvailable(pDpyEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->currentAttributes.digitalSignal; + + return TRUE; +} + +static NvBool GetDigitalSignalValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!DigitalSignalAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTEGER); + + return TRUE; +} + +static NvBool DigitalLinkTypeAvailable(const NVDpyEvoRec *pDpyEvo) +{ + return (nvDpyEvoIsActive(pDpyEvo) && + (pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP)); +} + +static NvBool GetDigitalLinkType(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (!DigitalLinkTypeAvailable(pDpyEvo)) { + return FALSE; + } + + if (nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + *pValue = nvRMLaneCountToNvKms(pDpyEvo->dp.laneCount); + } else { + enum nvKmsTimingsProtocol protocol; + const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + NvU32 head = nvGetPrimaryHwHead(pDispEvo, pDpyEvo->apiHead); + + nvAssert(head != NV_INVALID_HEAD); + protocol = pDispEvo->headState[head].timings.protocol; +#if defined(DEBUG) + { + NvU32 h; + FOR_EACH_EVO_HW_HEAD(pDispEvo, pDpyEvo->apiHead, h) { + nvAssert(protocol == pDispEvo->headState[h].timings.protocol); + } + } +#endif + + *pValue = (protocol == NVKMS_PROTOCOL_SOR_DUAL_TMDS) ? + NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_DUAL : + NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_SINGLE; + } + + return TRUE; +} + +static NvBool GetDigitalLinkTypeValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!DigitalLinkTypeAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTEGER); + + return TRUE; +} + +static NvBool DisplayportLinkRateAvailable(const NVDpyEvoRec *pDpyEvo) +{ + return ((pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) && + nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)); +} + +static NvBool GetDisplayportLinkRate(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (!DisplayportLinkRateAvailable(pDpyEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->dp.linkRate; + + return TRUE; +} + +static NvBool GetDisplayportLinkRateValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!DisplayportLinkRateAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTEGER); + + return TRUE; +} + +static NvBool GetDisplayportLinkRate10MHz(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (!DisplayportLinkRateAvailable(pDpyEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->dp.linkRate10MHz; + + return TRUE; +} + +static NvBool GetDisplayportLinkRate10MHzValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!DisplayportLinkRateAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTEGER); + + return TRUE; +} + +static NvBool GetDisplayportConnectorType(const NVDpyEvoRec *pDpyEvo, + NvS64 *pValue) +{ + if (!nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->dp.connectorType; + + return TRUE; +} + +static NvBool GetDisplayportConnectorTypeValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTEGER); + + return TRUE; +} + +static NvBool GetDisplayportIsMultistream(const NVDpyEvoRec *pDpyEvo, + NvS64 *pValue) +{ + if (!nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + return FALSE; + } + + *pValue = nvDpyEvoIsDPMST(pDpyEvo); + + return TRUE; +} + +static NvBool GetDisplayportIsMultistreamValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_BOOLEAN); + + return TRUE; +} + +static NvBool GetDisplayportSinkIsAudioCapable(const NVDpyEvoRec *pDpyEvo, + NvS64 *pValue) +{ + if (!nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->dp.sinkIsAudioCapable; + + return TRUE; +} + +static NvBool GetDisplayportSinkIsAudioCapableValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_BOOLEAN); + + return TRUE; +} + +NvS64 nvRMLaneCountToNvKms(NvU32 rmLaneCount) +{ + switch (rmLaneCount) { + case NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_0: + // fallthrough + default: + nvAssert(!"Unexpected DisplayPort lane configuration!"); + // fallthrough + case NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_1: + return NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_SINGLE; + case NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_2: + return NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_DUAL; + case NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_4: + return NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_QUAD; + } +} + +static NvBool SetStereoEvo(NVDpyEvoPtr pDpyEvo, NvS64 value) +{ + NvBool enable = !!value; + + if (pDpyEvo->apiHead == NV_INVALID_HEAD) { + return FALSE; + } + + return nvSetStereo(pDpyEvo->pDispEvo, pDpyEvo->apiHead, enable); +} + +static NvBool GetStereoEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (pDpyEvo->apiHead == NV_INVALID_HEAD) { + return FALSE; + } + + *pValue = !!nvGetStereo(pDpyEvo->pDispEvo, pDpyEvo->apiHead); + + return TRUE; +} + +static NvBool GetVrrMinRefreshRate(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + NvU32 timeoutMicroseconds; + const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + NvU32 head; + + if (pDpyEvo->apiHead == NV_INVALID_HEAD) { + return FALSE; + } + + head = nvGetPrimaryHwHead(pDispEvo, pDpyEvo->apiHead); + nvAssert(head != NV_INVALID_HEAD); + timeoutMicroseconds = + pDispEvo->headState[head].timings.vrr.timeoutMicroseconds; +#if defined(DEBUG) + { + NvU32 h; + FOR_EACH_EVO_HW_HEAD(pDispEvo, pDpyEvo->apiHead, h) { + nvAssert(timeoutMicroseconds == + pDispEvo->headState[h].timings.vrr.timeoutMicroseconds); + } + } +#endif + + *pValue = timeoutMicroseconds ? (1000000 / timeoutMicroseconds) : 0; + + return TRUE; +} + +static NvBool GetVrrMinRefreshRateValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + NvU32 minMinRefreshRate, maxMinRefreshRate; + const NVHwModeTimingsEvo *pTimings; + const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + NvU32 head; + + if (pDpyEvo->apiHead == NV_INVALID_HEAD) { + return FALSE; + } + + head = nvGetPrimaryHwHead(pDispEvo, pDpyEvo->apiHead); + nvAssert(head != NV_INVALID_HEAD); + + pTimings = &pDispEvo->headState[head].timings; + + nvGetDpyMinRefreshRateValidValues(pTimings, + pTimings->vrr.type, + pTimings->vrr.timeoutMicroseconds, + &minMinRefreshRate, + &maxMinRefreshRate); +#if defined(DEBUG) + { + NvU32 h; + FOR_EACH_EVO_HW_HEAD(pDispEvo, pDpyEvo->apiHead, h) { + NvU32 tmpMinMinRefreshRate, tmpMaxMinRefreshRate; + + pTimings = &pDispEvo->headState[h].timings; + + nvGetDpyMinRefreshRateValidValues(pTimings, + pTimings->vrr.type, + pTimings->vrr.timeoutMicroseconds, + &tmpMinMinRefreshRate, + &tmpMaxMinRefreshRate); + + nvAssert(tmpMinMinRefreshRate == minMinRefreshRate); + nvAssert(tmpMaxMinRefreshRate == maxMinRefreshRate); + } + } +#endif + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_RANGE); + + pValidValues->u.range.min = minMinRefreshRate; + pValidValues->u.range.max = maxMinRefreshRate; + + return TRUE; +} + +static NvBool GetNumberOfHardwareHeadsUsed( + const NVDpyEvoRec *pDpyEvo, + NvS64 *pNumHwHeadsUsed) +{ + *pNumHwHeadsUsed = pDpyEvo->currentAttributes.numberOfHardwareHeadsUsed; + return TRUE; +} +static const struct { + NvBool (*set)(NVDpyEvoPtr pDpyEvo, NvS64 value); + NvBool (*get)(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue); + NvBool (*getValidValues)( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues); + enum NvKmsAttributeType type; +} DpyAttributesDispatchTable[] = { + [NV_KMS_DPY_ATTRIBUTE_BACKLIGHT_BRIGHTNESS] = { + .set = DpySetBacklightBrightness, + .get = DpyGetBacklightBrightness, + .getValidValues = DpyGetBacklightBrightnessValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_RANGE, + }, + [NV_KMS_DPY_ATTRIBUTE_SCANLINE] = { + .set = NULL, + .get = GetScanLine, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_HEAD] = { + .set = NULL, + .get = GetHead, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_HW_HEAD] = { + .set = NULL, + .get = GetHwHead, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING] = { + .set = SetDithering, + .get = GetDithering, + .getValidValues = GetDitheringGenericValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE] = { + .set = SetDitheringMode, + .get = GetDitheringMode, + .getValidValues = GetDitheringModeValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTBITS, + }, + [NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH] = { + .set = SetDitheringDepth, + .get = GetDitheringDepth, + .getValidValues = GetDitheringGenericValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING] = { + .set = NULL, + .get = GetCurrentDithering, + .getValidValues = GetDitheringGenericValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE] = { + .set = NULL, + .get = GetCurrentDitheringMode, + .getValidValues = GetDitheringGenericValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH] = { + .set = NULL, + .get = GetCurrentDitheringDepth, + .getValidValues = GetDitheringGenericValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_DIGITAL_VIBRANCE] = { + .set = SetDigitalVibrance, + .get = GetDigitalVibrance, + .getValidValues = GetDigitalVibranceValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_RANGE, + }, + [NV_KMS_DPY_ATTRIBUTE_IMAGE_SHARPENING] = { + .set = SetImageSharpening, + .get = GetImageSharpening, + .getValidValues = GetImageSharpeningValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_RANGE, + }, + [NV_KMS_DPY_ATTRIBUTE_IMAGE_SHARPENING_AVAILABLE] = { + .set = NULL, + .get = GetImageSharpeningAvailable, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DPY_ATTRIBUTE_IMAGE_SHARPENING_DEFAULT] = { + .set = NULL, + .get = GetImageSharpeningDefault, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE] = { + .set = SetRequestedColorSpace, + .get = GetRequestedColorSpace, + .getValidValues = GetRequestedColorSpaceValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTBITS, + }, + [NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE] = { + .set = NULL, + .get = GetCurrentColorSpace, + .getValidValues = GetCurrentColorSpaceValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTBITS, + }, + [NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_RANGE] = { + .set = SetRequestedColorRange, + .get = GetRequestedColorRange, + .getValidValues = GetColorRangeValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTBITS, + }, + [NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_RANGE] = { + .set = NULL, + .get = GetCurrentColorRange, + .getValidValues = GetColorRangeValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTBITS, + }, + [NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC] = { + .set = NULL, + .get = GetCurrentColorBpc, + .getValidValues = GetColorBpcValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTBITS, + }, + [NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL] = { + .set = NULL, + .get = GetDigitalSignal, + .getValidValues = GetDigitalSignalValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE] = { + .set = NULL, + .get = GetDigitalLinkType, + .getValidValues = GetDigitalLinkTypeValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_LINK_RATE] = { + .set = NULL, + .get = GetDisplayportLinkRate, + .getValidValues = GetDisplayportLinkRateValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_LINK_RATE_10MHZ] = { + .set = NULL, + .get = GetDisplayportLinkRate10MHz, + .getValidValues = GetDisplayportLinkRate10MHzValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE] = { + .set = NULL, + .get = GetDisplayportConnectorType, + .getValidValues = GetDisplayportConnectorTypeValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_IS_MULTISTREAM] = { + .set = NULL, + .get = GetDisplayportIsMultistream, + .getValidValues = GetDisplayportIsMultistreamValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_SINK_IS_AUDIO_CAPABLE] = { + .set = NULL, + .get = GetDisplayportSinkIsAudioCapable, + .getValidValues = GetDisplayportSinkIsAudioCapableValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG] = { + .set = nvSetFrameLockDisplayConfigEvo, + .get = nvGetFrameLockDisplayConfigEvo, + .getValidValues = nvGetFrameLockDisplayConfigValidValuesEvo, + .type = NV_KMS_ATTRIBUTE_TYPE_INTBITS, + }, + [NV_KMS_DPY_ATTRIBUTE_RASTER_LOCK] = { + .set = NULL, + .get = nvQueryRasterLockEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DPY_ATTRIBUTE_UPDATE_FLIPLOCK] = { + .set = nvSetFlipLockEvo, + .get = nvGetFlipLockEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DPY_ATTRIBUTE_UPDATE_STEREO] = { + .set = SetStereoEvo, + .get = GetStereoEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DPY_ATTRIBUTE_DPMS] = { + .set = nvRmSetDpmsEvo, + .get = NULL, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_VRR_MIN_REFRESH_RATE] = { + .set = NULL, + .get = GetVrrMinRefreshRate, + .getValidValues = GetVrrMinRefreshRateValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_RANGE, + }, + [NV_KMS_DPY_ATTRIBUTE_NUMBER_OF_HARDWARE_HEADS_USED] = { + .set = NULL, + .get = GetNumberOfHardwareHeadsUsed, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, +}; + +/*! + * Set pParams->attribute to pParams->value on the given dpy. + */ +NvBool nvSetDpyAttributeEvo(NVDpyEvoPtr pDpyEvo, + struct NvKmsSetDpyAttributeParams *pParams) +{ + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(DpyAttributesDispatchTable)) { + return FALSE; + } + + if (DpyAttributesDispatchTable[index].set == NULL) { + return FALSE; + } + + if (!DpyAttributesDispatchTable[index].set(pDpyEvo, + pParams->request.value)) { + return FALSE; + } + + if (pDpyEvo->apiHead != NV_INVALID_HEAD) { + NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[pDpyEvo->apiHead]; + NVDpyEvoRec *pClonedDpyEvo; + + /* + * The current attributes state should be consistent across all cloned + * dpys. + */ + FOR_ALL_EVO_DPYS(pClonedDpyEvo, pApiHeadState->activeDpys, pDispEvo) { + nvDpyUpdateCurrentAttributes(pClonedDpyEvo); + } + } else { + nvDpyUpdateCurrentAttributes(pDpyEvo); + } + + return TRUE; +} + +/*! + * Get the value of pParams->attribute on the given dpy. + */ +NvBool nvGetDpyAttributeEvo(const NVDpyEvoRec *pDpyEvo, + struct NvKmsGetDpyAttributeParams *pParams) +{ + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(DpyAttributesDispatchTable)) { + return FALSE; + } + + if (DpyAttributesDispatchTable[index].get == NULL) { + return FALSE; + } + + return DpyAttributesDispatchTable[index].get(pDpyEvo, + &pParams->reply.value); +} + +/*! + * Get the valid values of pParams->attribute on the given dpy. + */ +NvBool nvGetDpyAttributeValidValuesEvo( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsGetDpyAttributeValidValuesParams *pParams) +{ + NvU32 index = pParams->request.attribute; + struct NvKmsAttributeValidValuesCommonReply *pReply = + &pParams->reply.common; + + if (index >= ARRAY_LEN(DpyAttributesDispatchTable)) { + return FALSE; + } + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + pReply->readable = (DpyAttributesDispatchTable[index].get != NULL); + pReply->writable = (DpyAttributesDispatchTable[index].set != NULL); + + pReply->type = DpyAttributesDispatchTable[index].type; + + /* + * The getValidValues function provides three important things: + * - If type==Range, then assigns reply::u::range. + * - If type==IntBits, then assigns reply::u:bits::ints. + * - If the attribute is not currently available, returns FALSE. + * If the getValidValues function is NULL, assume the attribute is + * available. The type must not be something that requires assigning + * to reply::u. + */ + if (DpyAttributesDispatchTable[index].getValidValues == NULL) { + nvAssert(pReply->type != NV_KMS_ATTRIBUTE_TYPE_INTBITS); + nvAssert(pReply->type != NV_KMS_ATTRIBUTE_TYPE_RANGE); + return TRUE; + } + + return DpyAttributesDispatchTable[index].getValidValues(pDpyEvo, pReply); +} diff --git a/src/nvidia-modeset/src/nvkms-conf.c b/src/nvidia-modeset/src/nvkms-conf.c new file mode 100644 index 0000000..c6117e8 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-conf.c @@ -0,0 +1,599 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-utils.h" +#include "nvkms-dpy-override.h" + +static NvBool CharIsSpace(char c) +{ + return (c == ' ' || /* space */ + c == '\f' || /* form feed */ + c == '\n' || /* line feed */ + c == '\r' || /* carriage return */ + c == '\t' || /* horizontal tab */ + c == '\v'); /* vertical tab */ +} + +/* Display Override Conf File Initialization */ +static inline NvU8 AsciiHexCharToNibble(char hex) +{ + if (hex >= '0' && hex <= '9') { + return hex - '0'; + } else if (hex >= 'A' && hex <= 'F') { + return hex - 'A' + 0xA; + } else if (hex >= 'a' && hex <= 'f') { + return hex - 'a' + 0xA; + } else { + return 0xFF; + } +} + +/* bin should be able to hold at least (size / 2) bytes */ +static size_t AsciiHexToBin(char *bin, const char *hex, size_t size) +{ + size_t hi, bi; + NvBool highNibble = TRUE; + NvU8 upnib = 0; + + if (hex == NULL || bin == NULL) { + return 0; + } + if (bin >= hex && bin <= hex + size) { + /* + * Although, theoretically, decoding ascii hex in place should work + * fine - each set of 2 characters encodes 1 byte - it's best to avoid + * the complexity + */ + nvAssert(!"ASCII hex would be decoded in place."); + return 0; + } + + for(hi = 0, bi = 0; hi < size; hi++) { + NvU8 nibble; + + if (CharIsSpace(hex[hi])) { + continue; + } + + nibble = AsciiHexCharToNibble(hex[hi]); + if (nibble > 0xF) { + return 0; /* invalid character */ + } + + if (highNibble) { + upnib = nibble << 4; + } else { + bin[bi++] = upnib | nibble; + } + highNibble = !highNibble; + } + + if (!highNibble) { + return 0; /* odd number of hex chars */ + } + + return bi; +} + +#define SUBPARSER(name) static NvBool Subparser_ ## name( \ + const char *key, \ + const char *value, \ + nvkms_config_read_file_func_t readfile) + +SUBPARSER(override) +{ + size_t i; + size_t idLen = 0; + size_t valLen = nvkms_strlen(value); + + NvU32 gpuId = NV0000_CTRL_GPU_INVALID_ID; + const char *name = NULL; + + static nv_gpu_info_t devs[NV_MAX_GPUS]; + NvU32 numdevs = nvkms_enumerate_gpus(devs); + + if (key[0] == '[') { + while (key[idLen] != 0 && key[idLen] != ']') { + idLen++; + } + } + + while (key[idLen] != 0 && key[idLen] != '.') { + idLen++; + } + + /* Get the GPU ID */ + if (key[0] == '[') { + /* GPU PCI Address */ + NvU32 domain = 0xFFFFFFFF, temp = 0; + NvU8 bus = 0xFF, slot = 0xFF, function = 0xFF; + size_t j; + + for (i = 1, j = 0; (key[i] != 0) && (key[i] != ']') && (j < 4); i++) { + if (key[i] == ':') { + if (j == 0) { + domain = temp; + } else if (j == 1) { + bus = (NvU8)temp; + } else { + break; + } + j++; + temp = 0; + } else if (key[i] == '.') { + if (j == 2) { + slot = (NvU8)temp; + } else { + break; + } + j++; + temp = 0; + } else { + NvU8 nibble = AsciiHexCharToNibble(key[i]); + if (nibble > 0xF) { + break; + } + temp <<= 4; + temp |= nibble; + } + } + + if (j == 3) { + function = (NvU8)temp; + j++; + } + + if (j != 4) { + nvEvoLog(EVO_LOG_WARN, "Syntax error in override entry: " + "Error reading PCI Address: %.*s " + "(%zu fields read)", (int)idLen, key, j); + nvClearDpyOverrides(); + return FALSE; + } + + for (size_t i = 0; i < numdevs; i++) { + if (devs[i].pci_info.domain == domain && + devs[i].pci_info.bus == bus && + devs[i].pci_info.slot == slot && + devs[i].pci_info.function == function) { + + gpuId = devs[i].gpu_id; + break; + } + } + if (gpuId == NV0000_CTRL_GPU_INVALID_ID) { + nvEvoLog(EVO_LOG_WARN, "Error in override entry: " + "No GPU with PCI Address %04x:%02hhx:%02hhx.%hhx", + domain, bus, slot, function); + nvClearDpyOverrides(); + return FALSE; + } + } else if (idLen == 5 && nvkms_memcmp(key, "tegra", 5) == 0){ + gpuId = NVKMS_DEVICE_ID_TEGRA; + } else { + nvEvoLog(EVO_LOG_WARN, "Syntax error in override entry: " + "Unknown GPU designator: %.*s", (int)idLen, key); + return FALSE; + } + + /* Get the dpy name */ + if (key[idLen] != '.' || key[idLen + 1] == 0) { + nvEvoLog(EVO_LOG_WARN, "Syntax error in override entry: " + "Expected '.' followed by display name"); + return FALSE; + } + name = key + (idLen + 1); + + /* Get the edid */ + if (value[0] == '"' && value[valLen - 1] == '"') { + valLen -= 2; + value += 1; + } + + if (value[0] == '/') { + size_t bufflen = 0; + char *buff = NULL; + + if (readfile != NULL) { + char *fname = nvCalloc(valLen + 1, 1); + nvkms_memcpy(fname, value, valLen); + bufflen = readfile(fname, &buff); + nvFree(fname); + } + + if (bufflen == 0) { + nvEvoLog(EVO_LOG_WARN, "Error in override entry: " + "Error opening EDID file: %.*s", (int)valLen, value); + nvClearDpyOverrides(); + return FALSE; + } + + nvCreateDpyOverride(gpuId, name, TRUE, buff, bufflen); + nvkms_free(buff, bufflen); + } else if (valLen == 12 && nvkms_memcmp(value, "disconnected", 12) == 0) { + nvCreateDpyOverride(gpuId, name, FALSE, NULL, 0); + } else { + char *edidBuf = nvCalloc(valLen / 2, 1); + size_t decoded = AsciiHexToBin(edidBuf, value, valLen); + if (decoded == 0) { + nvEvoLog(EVO_LOG_WARN, "Error in override entry: " + "Error decoding ASCII hex: %.*s\n", (int)valLen, value); + nvFree(edidBuf); + nvClearDpyOverrides(); + return FALSE; + } + + nvCreateDpyOverride(gpuId, name, TRUE, edidBuf, decoded); + nvFree(edidBuf); + } + + return TRUE; +} + +#undef SUBPARSER + +static NvBool Subparse( + const char *keyhead, + const char *keytail, + const char *value, + nvkms_config_read_file_func_t readfile) +{ +#define SUBPARSE(name) if (nvkms_strcmp(keyhead, #name) == 0) { \ + return Subparser_ ## name(keytail, value, readfile); \ +} + SUBPARSE(override); + nvEvoLog(EVO_LOG_WARN, "Error reading configuration file: " + "Parser not found for key: %s.%s", keyhead, keytail); + return FALSE; +#undef SUBPARSE +} + +#define ST_KEYHEAD 0 /* read head of key */ +#define ST_KEYTAIL 1 /* read tail of key */ +#define ST_VALUE 2 /* read value of key */ +#define ST_EQUALS 3 /* expect '=' */ +#define ST_SEND 4 /* send values */ + +/* + * This function implements a state machine: + * KEYHEAD -> KEYTAIL : when a '.' is read + * KEYHEAD -> EQUALS : when whitespace is read + * KEYHEAD -> VALUE : when '=' is read + * KEYTAIL -> EQUALS : when whitespace is read + * KEYTAIL -> VALUE : when '=' is read + * EQUALS -> VALUE : when '=' is read + * VALUE -> SEND : when whitespace is read + * SEND -> KEYHEAD : consumes no input but sends strings to further parser functions + * + * Here, whitespace means any block of one or more whitespace characters, as + * determined by the CharIsSpace() helper function. Whitespace only causes state + * transitions if at least one non-whitespace character has been read in that + * state + * + * When a '#' is read, all input is consumed until a '\n' is read or the end of + * the buffer is reached. This input is treated as whitespace for the purposes + * of the above state transitions and is considered part of any surrounding + * whitespace + * + * When a '"', '(', '{', '[', or '<' is read while in KEYHEAD, KEYTAIL, or VALUE, + * a substate is entered which consumes all input until a corresponding '"', ')', + * '}', ']', or '>' is read. While in this substate, '.', whitespace, '=', and '#' + * do not cause state transitions. + * + * While in KEYHEAD, KEYTAIL, or VALUE, all consumed input which does not cause + * state transitions are added to the corresponding string to be passed to + * further parser functions. + * + * NOTE: buff is not guaranteed to be NULL-terminated + */ +NvBool nvKmsReadConf( + const char *buff, + size_t size, + nvkms_config_read_file_func_t readfile) +{ + size_t i; + NvBool ret = FALSE; /* set false until success noted */ + + int state = ST_KEYHEAD; /* state machine's current state */ + char watchfor = 0; /* marker for paired char blocks */ + const char *ptr = NULL; /* pointer into buffer */ + char *strs[3] = { NULL, NULL, NULL }; /* alloced strings to be copied to */ + size_t lens[3] = { 0, 0, 0 }; /* lengths of strings in strs */ + + /* named pointers into strs */ + char ** const keyhead = &strs[ST_KEYHEAD]; + char ** const keytail = &strs[ST_KEYTAIL]; + char ** const value = &strs[ST_VALUE]; + + /* verify that there's always a string allocated in each of the slots */ + for (i = 0; i < 3; ++i) { + strs[i] = nvCalloc(1, 1); + if (strs[i] == NULL) { + nvEvoLog(EVO_LOG_WARN, "Error reading configuration file: " + "Out of memory"); + goto teardown; + } + lens[i] = 1; + } + +#define COPYPTR() do { \ + size_t strlen; \ + nvAssert(ptr != NULL); \ + nvAssert(state == ST_KEYHEAD || state == ST_KEYTAIL || state == ST_VALUE); \ + strlen = (buff + i) - ptr; \ + if (lens[state] < strlen + 1) { \ + /* allocate strlen + 1 to ensure a null terminator */ \ + nvFree(strs[state]); \ + strs[state] = nvCalloc(strlen + 1, 1); \ + if (strs[state] == NULL) { \ + nvEvoLog(EVO_LOG_WARN, "Error reading configuration file: " \ + "Out of memory"); \ + goto teardown; \ + } \ + lens[state] = strlen + 1; \ + } \ + nvkms_memcpy(strs[state], ptr, strlen); \ + strs[state][strlen] = 0; \ + ptr = NULL; \ +} while (0) + + for (i = 0; i < size; i++) { + /* + * If watchfor is set, then either we're in a comment or we're in a + * paired block. If we're in a comment (watchfor == '\n'), then we + * want ptr to be NULL, so that the comment isn't included in content. + * If we're not in a comment (watchfor == '"', ')', '}', ']', or '>'), + * we want ptr to be non-NULL, so that the paired block is included in + * the content. + */ + nvAssert(watchfor == 0 || + (watchfor != '\n' && ptr != NULL) || + (watchfor == '\n' && ptr == NULL)); + + if (state == ST_KEYHEAD) { + if (watchfor == 0) { + if (buff[i] == '.') { + if (ptr == NULL) { + nvEvoLog(EVO_LOG_WARN, "Syntax error in configuration file: " + "'.' at start of key"); + goto teardown; + } + + COPYPTR(); + state = ST_KEYTAIL; + } else if (buff[i] == '=') { + if (ptr == NULL) { + nvEvoLog(EVO_LOG_WARN, "Syntax error in configuration file: " + "key expected before '='"); + goto teardown; + } + + COPYPTR(); + state = ST_VALUE; + } else if (buff[i] == '"') { + if (ptr == NULL) { + ptr = buff + i; + } + watchfor = '"'; + } else if (buff[i] == '(') { + if (ptr == NULL) { + ptr = buff + i; + } + watchfor = ')'; + } else if (buff[i] == '{') { + if (ptr == NULL) { + ptr = buff + i; + } + watchfor = '}'; + } else if (buff[i] == '[') { + if (ptr == NULL) { + ptr = buff + i; + } + watchfor = ']'; + } else if (buff[i] == '<') { + if (ptr == NULL) { + ptr = buff + i; + } + watchfor = '>'; + } else if (buff[i] == '#') { + if (ptr != NULL) { + COPYPTR(); + state = ST_EQUALS; + } + watchfor = '\n'; + } else if (CharIsSpace(buff[i])) { + if (ptr != NULL) { + COPYPTR(); + state = ST_EQUALS; + } + } else if (ptr == NULL) { + ptr = buff + i; + } + } else if (buff[i] == watchfor) { + watchfor = 0; + } + } else if (state == ST_KEYTAIL) { + if (watchfor == 0) { + if (buff[i] == '=') { + if (ptr == NULL) { + nvEvoLog(EVO_LOG_WARN, "Syntax error in configuration file: " + "identifier expected after '.' in \"%s\" key", + *keyhead); + goto teardown; + } + + COPYPTR(); + state = ST_VALUE; + } else if (buff[i] == '"') { + if (ptr == NULL) { + ptr = buff + i; + } + watchfor = '"'; + } else if (buff[i] == '(') { + if (ptr == NULL) { + ptr = buff + i; + } + watchfor = ')'; + } else if (buff[i] == '{') { + if (ptr == NULL) { + ptr = buff + i; + } + watchfor = '}'; + } else if (buff[i] == '[') { + if (ptr == NULL) { + ptr = buff + i; + } + watchfor = ']'; + } else if (buff[i] == '<') { + if (ptr == NULL) { + ptr = buff + i; + } + watchfor = '>'; + } else if (buff[i] == '#') { + if (ptr != NULL) { + COPYPTR(); + state = ST_EQUALS; + } + watchfor = '\n'; + } else if (CharIsSpace(buff[i])) { + if (ptr != NULL) { + COPYPTR(); + state = ST_EQUALS; + } + } else if (ptr == NULL) { + ptr = buff + i; + } + } else if (buff[i] == watchfor) { + watchfor = 0; + } + } else if (state == ST_EQUALS) { + nvAssert(ptr == NULL); + /* watchfor should only ever be set if we're in a comment */ + nvAssert(watchfor == 0 || watchfor == '\n'); + + if (watchfor == 0) { + if (buff[i] == '=') { + state = ST_VALUE; + } else if (buff[i] == '#') { + watchfor = '\n'; + } else if (!CharIsSpace(buff[i])) { + nvEvoLog(EVO_LOG_WARN, "Syntax error in configuration file: " + "expected '=' before value"); + goto teardown; + } + } else if (buff[i] == watchfor) { + watchfor = 0; + } + + } else if (state == ST_VALUE) { + if (watchfor == 0) { + if (buff[i] == '=') { + nvEvoLog(EVO_LOG_WARN, "Syntax error in configuration file: " + "unexpected '=' in value"); + goto teardown; + } else if (buff[i] == '"') { + if (ptr == NULL) { + ptr = buff + i; + } + watchfor = '"'; + } else if (buff[i] == '(') { + if (ptr == NULL) { + ptr = buff + i; + } + watchfor = ')'; + } else if (buff[i] == '{') { + if (ptr == NULL) { + ptr = buff + i; + } + watchfor = '}'; + } else if (buff[i] == '[') { + if (ptr == NULL) { + ptr = buff + i; + } + watchfor = ']'; + } else if (buff[i] == '<') { + if (ptr == NULL) { + ptr = buff + i; + } + watchfor = '>'; + } else if (buff[i] == '#') { + if (ptr != NULL) { + COPYPTR(); + state = ST_SEND; + } + watchfor = '\n'; + } else if (CharIsSpace(buff[i])) { + if (ptr != NULL) { + COPYPTR(); + state = ST_SEND; + } + } else if (ptr == NULL) { + ptr = buff + i; + } + } else if (buff[i] == watchfor) { + watchfor = 0; + } + } else if (state == ST_SEND) { + if (!Subparse(*keyhead, *keytail, *value, readfile)) { + goto teardown; + } + state = ST_KEYHEAD; + (*keyhead)[0] = 0; + (*keytail)[0] = 0; + (*value)[0] = 0; + i--; /* don't consume input */ + } else { + nvAssert(!"Invalid state!"); + } + } + + if (state == ST_SEND) { + if (!Subparse(*keyhead, *keytail, *value, readfile)) { + goto teardown; + } + } else if ((state != ST_KEYHEAD) || (ptr != NULL)) { + /* + * if state is KEYHEAD and ptr is NULL, then we've just got trailing + * whitespace or comments, which is valid syntax + */ + nvEvoLog(EVO_LOG_WARN, "Syntax error in configuration file: " + "trailing input after last key-value pair"); + goto teardown; + } + + ret = TRUE; + + /* fallthrough */ +teardown: + for (i = 0; i < 3; i++) { + nvFree(strs[i]); + } + return ret; +#undef COPYPTR +} + +#undef ST_KEYHEAD +#undef ST_KEYTAIL +#undef ST_VALUE +#undef ST_EQUALS +#undef ST_SEND diff --git a/src/nvidia-modeset/src/nvkms-console-restore.c b/src/nvidia-modeset/src/nvkms-console-restore.c new file mode 100644 index 0000000..0c6cc5b --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-console-restore.c @@ -0,0 +1,983 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-console-restore.h" +#include "nvkms-dpy.h" +#include "nvkms-flip.h" +#include "nvkms-modepool.h" +#include "nvkms-modeset.h" +#include "nvkms-prealloc.h" +#include "nvkms-private.h" +#include "nvkms-rm.h" +#include "nvkms-utils.h" + +#include "dp/nvdp-connector.h" + +/*! + * Find the first valid mode of given dimensions (width and height) that passes + * IMP at boot clocks. If input dimensions are not given then return first + * valid mode that passes IMP at boot clocks. + */ +static NvBool FindMode(NVDpyEvoPtr pDpyEvo, + const enum NvKmsSurfaceMemoryFormat format, + const NvU32 width, + const NvU32 height, + struct NvKmsMode *pModeOut) +{ + NvU32 index = 0; + + while (TRUE) { + struct NvKmsValidateModeIndexParams params = { }; + + params.request.dpyId = pDpyEvo->id; + params.request.modeIndex = index++; + params.request.modeValidation.overrides = + NVKMS_MODE_VALIDATION_REQUIRE_BOOT_CLOCKS | + NVKMS_MODE_VALIDATION_MAX_ONE_HARDWARE_HEAD; + + nvValidateModeIndex(pDpyEvo, ¶ms.request, ¶ms.reply); + + if (params.reply.end) { + break; + } + + if (!params.reply.valid) { + continue; + } + + if (!(NVBIT64(format) & + params.reply.modeUsage.layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats)) { + continue; + } + + if (height != 0 && height != params.reply.mode.timings.vVisible) { + continue; + } + + if (width != 0 && width != params.reply.mode.timings.hVisible) { + continue; + } + + *pModeOut = params.reply.mode; + return TRUE; + } + + return FALSE; +} + +/*! + * Make sure pDispEvo->connectedDpys is up to date. + * + * Do this by querying the dpy dynamic data for all dpys. The results aren't + * actually important, but querying the dynamic data has the side effect of + * updating pDispEvo->connectedDpys. + */ +static NVDpyIdList UpdateConnectedDpys(NVDispEvoPtr pDispEvo) +{ + NVDpyEvoPtr pDpyEvo; + struct NvKmsQueryDpyDynamicDataParams *pParams = + nvCalloc(1, sizeof(*pParams)); + + if (!pParams) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_WARN, + "Failed to allocate NvKmsQueryDpyDynamicDataParams"); + return pDispEvo->connectedDisplays; + } + + FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->validDisplays, pDispEvo) { + nvkms_memset(pParams, 0, sizeof(*pParams)); + nvDpyGetDynamicData(pDpyEvo, pParams); + } + + nvFree(pParams); + + return pDispEvo->connectedDisplays; +} + +static void FlipBaseToNull(NVDevEvoPtr pDevEvo) +{ + struct NvKmsFlipRequestOneHead *pFlipApiHead = NULL; + NvU32 numFlipApiHeads = 0, i; + NvU32 sd; + NVDispEvoPtr pDispEvo; + NvBool ret = TRUE; + + /* First count the number of active heads. */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 apiHead; + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + if (!nvApiHeadIsActive(pDispEvo, apiHead)) { + continue; + } + numFlipApiHeads++; + } + } + + if (numFlipApiHeads == 0) { + // If no heads require changes, there's nothing to do. + return; + } + + /* Allocate an array of head structures */ + pFlipApiHead = nvCalloc(numFlipApiHeads, sizeof(pFlipApiHead[0])); + + if (!pFlipApiHead) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_WARN, + "Failed to allocate flip parameters for console restore base flip " + "to NULL"); + return; + } + + i = 0; + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 apiHead; + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + struct NvKmsFlipCommonParams *pRequestApiHead = NULL; + NvU32 layer; + + if (!nvApiHeadIsActive(pDispEvo, apiHead)) { + continue; + } + + pFlipApiHead[i].sd = sd; + pFlipApiHead[i].head = apiHead; + pRequestApiHead = &pFlipApiHead[i].flip; + i++; + nvAssert(i <= numFlipApiHeads); + + // Disable HDR on head + pRequestApiHead->tf.specified = TRUE; + pRequestApiHead->tf.val = NVKMS_OUTPUT_TF_NONE; + + pRequestApiHead->hdrInfoFrame.specified = TRUE; + pRequestApiHead->hdrInfoFrame.enabled = FALSE; + + pRequestApiHead->colorimetry.specified = TRUE; + pRequestApiHead->colorimetry.val = NVKMS_OUTPUT_COLORIMETRY_DEFAULT; + + for (layer = 0; layer < pDevEvo->apiHead[apiHead].numLayers; layer++) { + pRequestApiHead->layer[layer].surface.specified = TRUE; + // No need to specify sizeIn/sizeOut as we are flipping NULL surface. + pRequestApiHead->layer[layer].compositionParams.specified = TRUE; + pRequestApiHead->layer[layer].completionNotifier.specified = TRUE; + pRequestApiHead->layer[layer].syncObjects.specified = TRUE; + + // Disable HDR on layers + pRequestApiHead->layer[layer].hdr.enabled = FALSE; + pRequestApiHead->layer[layer].hdr.specified = TRUE; + + pRequestApiHead->layer[layer].colorSpace.val = + NVKMS_INPUT_COLOR_SPACE_NONE; + pRequestApiHead->layer[layer].colorSpace.specified = TRUE; + + pRequestApiHead->layer[layer].tf.val = + NVKMS_INPUT_TF_LINEAR; + pRequestApiHead->layer[layer].tf.specified = TRUE; + } + } + } + + ret = nvFlipEvo(pDevEvo, pDevEvo->pNvKmsOpenDev, + pFlipApiHead, + numFlipApiHeads, + TRUE /* commit */, + NULL /* pReply */, + FALSE /* skipUpdate */, + FALSE /* allowFlipLock */); + nvFree(pFlipApiHead); + + if (!ret) { + nvAssert(!"Console restore failed to flip base to NULL"); + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 apiHead; + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NvBool stoppedBase; + ret = nvIdleBaseChannelOneApiHead(pDispEvo, apiHead, &stoppedBase); + if (!ret) { + nvAssert(!"Console restore failed to idle base"); + } + } + } +} + +/*! + * Return the mask of active api heads on this pDispEvo. + */ +static NvU32 GetActiveApiHeadMask(NVDispEvoPtr pDispEvo) +{ + NvU32 apiHead; + NvU32 apiHeadMask = 0; + + for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) { + if (nvApiHeadIsActive(pDispEvo, apiHead)) { + apiHeadMask |= 1 << apiHead; + } + } + + return apiHeadMask; +} + +static NvU32 PickApiHead(const NVDpyEvoRec *pDpyEvo, + const NvU32 availableApiHeadsMask) +{ + const NvU32 possibleApiHeads = availableApiHeadsMask & + nvDpyGetPossibleApiHeadsMask(pDpyEvo); + const NvU32 activeApiHeadsMask = + GetActiveApiHeadMask(pDpyEvo->pDispEvo); + + NvU32 head; + + if (possibleApiHeads == 0) { + return NV_INVALID_HEAD; + } + + if ((pDpyEvo->apiHead != NV_INVALID_HEAD) && + ((NVBIT(pDpyEvo->apiHead) & possibleApiHeads) != 0x0)) { + return pDpyEvo->apiHead; + } + + if ((possibleApiHeads & ~activeApiHeadsMask) != 0x0) { + head = BIT_IDX_32(LOWESTBIT(possibleApiHeads & + ~activeApiHeadsMask)); + } else { + head = BIT_IDX_32(LOWESTBIT(possibleApiHeads)); + } + + if (head >= NV_MAX_HEADS) { + return NV_INVALID_HEAD; + } + + return head; +} + +static NvBool InitModeOneHeadRequest( + NVDpyEvoRec *pDpyEvo, + NVSurfaceEvoPtr pSurfaceEvo, + const struct NvKmsMode *pOverrideMode, + const struct NvKmsSize *pOverrideViewPortSizeIn, + const struct NvKmsPoint *pOverrideViewPortPointIn, + const NvU32 apiHead, + struct NvKmsSetModeOneHeadRequest *pRequestHead) +{ + + struct NvKmsFlipCommonParams *pFlip = &pRequestHead->flip; + NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvU32 layer; + + if (pOverrideMode != NULL) { + pRequestHead->mode = *pOverrideMode; + } else { + if (!FindMode(pDpyEvo, + pSurfaceEvo->format, + 0 /* Ignore mode width */, + 0 /* Ignore mode height */, + &pRequestHead->mode)) { + return FALSE; + } + } + + pRequestHead->dpyIdList = nvAddDpyIdToEmptyDpyIdList(pDpyEvo->id); + pRequestHead->modeValidationParams.overrides = + NVKMS_MODE_VALIDATION_REQUIRE_BOOT_CLOCKS | + NVKMS_MODE_VALIDATION_MAX_ONE_HARDWARE_HEAD; + if (pOverrideViewPortSizeIn != NULL) { + pRequestHead->viewPortSizeIn = *pOverrideViewPortSizeIn; + } else { + pRequestHead->viewPortSizeIn.width = pSurfaceEvo->widthInPixels; + pRequestHead->viewPortSizeIn.height = pSurfaceEvo->heightInPixels; + } + + pFlip->viewPortIn.specified = TRUE; + if (pOverrideViewPortPointIn != NULL) { + pFlip->viewPortIn.point = *pOverrideViewPortPointIn; + } + pFlip->layer[NVKMS_MAIN_LAYER].surface.handle[NVKMS_LEFT] = + pDevEvo->fbConsoleSurfaceHandle; + + pFlip->layer[NVKMS_MAIN_LAYER].sizeIn.specified = TRUE; + pFlip->layer[NVKMS_MAIN_LAYER].sizeIn.val.width = pSurfaceEvo->widthInPixels; + pFlip->layer[NVKMS_MAIN_LAYER].sizeIn.val.height = pSurfaceEvo->heightInPixels; + + pFlip->layer[NVKMS_MAIN_LAYER].sizeOut.specified = TRUE; + pFlip->layer[NVKMS_MAIN_LAYER].sizeOut.val = + pFlip->layer[NVKMS_MAIN_LAYER].sizeIn.val; + + /* Disable other layers except Main */ + for (layer = 0; layer < pDevEvo->apiHead[apiHead].numLayers; layer++) { + + if (layer == NVKMS_MAIN_LAYER) { + pFlip->layer[layer].csc.matrix = NVKMS_IDENTITY_CSC_MATRIX; + pFlip->layer[layer].csc.specified = TRUE; + } + pFlip->layer[layer].surface.specified = TRUE; + + pFlip->layer[layer].completionNotifier.specified = TRUE; + pFlip->layer[layer].syncObjects.specified = TRUE; + pFlip->layer[layer].compositionParams.specified = TRUE; + } + + // Disable other features. + pFlip->cursor.imageSpecified = TRUE; + pFlip->lut.input.specified = TRUE; + pFlip->lut.output.specified = TRUE; + pFlip->lut.synchronous = TRUE; + pRequestHead->allowGsync = FALSE; + pRequestHead->allowAdaptiveSync = + NVKMS_ALLOW_ADAPTIVE_SYNC_DISABLED; + + return TRUE; +} + +static NvBool +ConstructModeOneHeadRequestForOneDpy(NVDpyEvoRec *pDpyEvo, + NVSurfaceEvoPtr pSurfaceEvo, + struct NvKmsSetModeParams *pParams, + const NvU32 dispIndex, + NvU32 *pAvailableApiHeadsMask) +{ + NvBool ret = FALSE; + const NvU32 apiHead = PickApiHead(pDpyEvo, *pAvailableApiHeadsMask); + + if ((apiHead == NV_INVALID_HEAD) || pDpyEvo->isVrHmd) { + goto done; + } + + NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + struct NvKmsSetModeRequest *pRequest = &pParams->request; + struct NvKmsSetModeOneDispRequest *pRequestDisp = + &pRequest->disp[dispIndex]; + struct NvKmsSetModeOneHeadRequest *pRequestHead = + &pRequestDisp->head[apiHead]; + + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + if (!InitModeOneHeadRequest(pDpyEvo, + pSurfaceEvo, + NULL /* Use default Mode */, + NULL /* Use default ViewPortSizeIn */, + NULL /* Use default ViewPortPointIn */, + apiHead, + pRequestHead)) { + goto done; + } + + nvAssert(!pRequestHead->viewPortOutSpecified); + nvAssert(!pRequest->commit); + + while (!nvSetDispModeEvo(pDevEvo, + pDevEvo->pNvKmsOpenDev, + pRequest, + &pParams->reply, + TRUE /* bypassComposition */, + FALSE /* doRasterLock */)) { + /* + * If validation is failing even after disabling scaling then leave + * this dpy inactive. + */ + if (pRequestHead->viewPortOutSpecified) { + nvkms_memset(pRequestHead, 0, sizeof(*pRequestHead)); + goto done; + } + + /* Disable scaling and try again */ + pRequestHead->viewPortOut = (struct NvKmsRect) { + .height = pRequestHead->viewPortSizeIn.height, + .width = pRequestHead->viewPortSizeIn.width, + .x = 0, + .y = 0, + }; + pRequestHead->viewPortOutSpecified = TRUE; + } + + *pAvailableApiHeadsMask &= ~NVBIT(apiHead); + + ret = TRUE; + +done: + + return ret; +} + +typedef struct _TiledDisplayInfo { + NVDpyIdList detectedDpysList; + NvBool isDetectComplete; + NvBool isCapToScaleSingleTile; +} TiledDisplayInfo; + +/* + * Detect Tiled-display of topology-id described in given pDisplayIdInfo. + * + * Loop over given all dpys from candidateConnectedDpys list, look for matching + * topology-id. Add dpys of matching topology-id into + * detectedTiledDisplayDpysList list. Mark Tiled-Display detect complete if all + * exact number of tiles are found. + */ +static NvBool DetectTiledDisplay(const NVDispEvoRec *pDispEvo, + const NVT_DISPLAYID_INFO *pDisplayIdInfo, + const NVDpyIdList candidateConnectedDpys, + TiledDisplayInfo *pTiledDisplayInfo) +{ + const NVT_TILEDDISPLAY_TOPOLOGY_ID nullTileDisplayTopoId = { 0 }; + const NVDpyEvoRec *pDpyEvo; + const NvU32 numTiles = pDisplayIdInfo->tile_topology.row * + pDisplayIdInfo->tile_topology.col; + const NvU32 numTilesMask = NVBIT(numTiles) - 1; + NvU32 detectedTilesCount = 0; + NvU32 detectedTilesMask = 0; + + NVDpyIdList detectedTiledDisplayDpysList = nvEmptyDpyIdList(); + + /* + * If parsed edid is valid and tile_topology_id is non-zero then the dpy + * is considered a valid tile of a tiled display. + * + * The 'tile_topology_id' is a triplet of ids consisting of vendor_id, + * product_id, and serial_number. The DisplayId specification does not + * clearly define an invalid 'tile_topology_id', but here the + * tile_topology_id is considered invalid only if all three ids are zero + * which is consistent with other protocols like RandR1.2 'The tile group + * identifier'. + */ + if (!nvkms_memcmp(&pDisplayIdInfo->tile_topology_id, + &nullTileDisplayTopoId, sizeof(nullTileDisplayTopoId))) { + return FALSE; + } + + /* + * Reject Tiled-Display consists of multiple physical display enclosures or + * requires to configure bezel. + */ + if (!pDisplayIdInfo->tile_capability.bSingleEnclosure || + pDisplayIdInfo->tile_capability.bHasBezelInfo) { + return FALSE; + } + + /* + * Reject Tiled-Display which has number of horizontal or vertical tiles + * greater than 4. + */ + if (pDisplayIdInfo->tile_topology.row <= 0 || + pDisplayIdInfo->tile_topology.col <= 0 || + pDisplayIdInfo->tile_topology.row > 4 || + pDisplayIdInfo->tile_topology.col > 4) { + return FALSE; + } + + FOR_ALL_EVO_DPYS(pDpyEvo, candidateConnectedDpys, pDispEvo) { + const NVT_EDID_INFO *pEdidInfo = &pDpyEvo->parsedEdid.info; + const NVT_DISPLAYID_INFO *pDpyDisplayIdInfo = + &pEdidInfo->ext_displayid; + + if (!pDpyEvo->parsedEdid.valid) { + continue; + } + + if (nvkms_memcmp(&pDisplayIdInfo->tile_topology_id, + &pDpyDisplayIdInfo->tile_topology_id, + sizeof(&pDpyDisplayIdInfo->tile_topology_id))) { + continue; + } + + /* + * Tiled-Display Topology: + * + * |-----------col + * + * ___ +------------+------------+... + * | | (x=0,y=0) | (x=1,y=0) | + * | | | | + * | | | | + * | +------------+------------+ + * row | (x=0,y=1) | (x=1,y=1) | + * | | | + * | | | + * +------------+------------+ + * . + * . + * . + */ + if (pDpyDisplayIdInfo->tile_topology.row != + pDisplayIdInfo->tile_topology.row) { + continue; + } + + if (pDpyDisplayIdInfo->tile_topology.col != + pDisplayIdInfo->tile_topology.col) { + continue; + } + + if (pDpyDisplayIdInfo->tile_location.x >= + pDpyDisplayIdInfo->tile_topology.col) { + continue; + } + + if (pDpyDisplayIdInfo->tile_location.y >= + pDpyDisplayIdInfo->tile_topology.row) { + continue; + } + + nvAssert(pDpyDisplayIdInfo->tile_capability.single_tile_behavior == + pDisplayIdInfo->tile_capability.single_tile_behavior); + + detectedTiledDisplayDpysList = + nvAddDpyIdToDpyIdList(pDpyEvo->id, detectedTiledDisplayDpysList); + + detectedTilesMask |= NVBIT((pDpyDisplayIdInfo->tile_location.y * + pDpyDisplayIdInfo->tile_topology.col) + + (pDpyDisplayIdInfo->tile_location.x)); + detectedTilesCount++; + } + + pTiledDisplayInfo->detectedDpysList = detectedTiledDisplayDpysList; + + if (detectedTilesCount != numTiles || detectedTilesMask != numTilesMask) { + pTiledDisplayInfo->isDetectComplete = FALSE; + } else { + pTiledDisplayInfo->isDetectComplete = TRUE; + } + + pTiledDisplayInfo->isCapToScaleSingleTile = + pDisplayIdInfo->tile_capability.single_tile_behavior == + NVT_SINGLE_TILE_BEHAVIOR_SCALE; + + return TRUE; +} + +/* Construct modeset request for given Tiled-display */ +static NvBool +ConstructModeRequestForTiledDisplay(const NVDispEvoRec *pDispEvo, + NVSurfaceEvoPtr pSurfaceEvo, + struct NvKmsSetModeParams *pParams, + const NvU32 dispIndex, + NVDpyIdList tiledDisplayDpysList, + NvU32 *pAvailableApiHeadsMask) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + /* + * Get arbitrary dpy from tiledDisplayDpysList, + * to extract Tiled-Display information which should be same across all + * tiles. + */ + NVDpyEvoRec *pArbitraryDpyEvo = + nvGetOneArbitraryDpyEvo(tiledDisplayDpysList, pDispEvo); + const NVT_DISPLAYID_INFO *pPrimaryDisplayIdInfo = + &pArbitraryDpyEvo->parsedEdid.info.ext_displayid; + const NvU32 numRows = pPrimaryDisplayIdInfo->tile_topology.row; + const NvU32 numColumns = pPrimaryDisplayIdInfo->tile_topology.col; + /* + * Split entire input viewport across all tiles of Tiled-Display. + */ + const struct NvKmsSize viewPortSizeIn = { + .width = (pSurfaceEvo->widthInPixels / numColumns), + .height = (pSurfaceEvo->heightInPixels / numRows), + }; + struct NvKmsSetModeRequest *pRequest = &pParams->request; + struct NvKmsSetModeOneDispRequest *pRequestDisp = + &pRequest->disp[dispIndex]; + NvU32 firstClaimedApiHead = NV_INVALID_HEAD; + NvU32 claimedApiHeadMask = 0x0; + NVDpyEvoRec *pDpyEvo; + NvU32 apiHead; + + /* + * Return failure if not enough number of heads available to construct + * modeset request for Tiled-Display. + */ + if (nvPopCount32(*pAvailableApiHeadsMask) < + nvCountDpyIdsInDpyIdList(tiledDisplayDpysList)) { + return FALSE; + } + + /* + * Return failure if input viewport has not been split across + * tiles evenly. + */ + if ((pSurfaceEvo->widthInPixels % numRows != 0) || + (pSurfaceEvo->heightInPixels % numColumns != 0)) { + return FALSE; + } + + FOR_ALL_EVO_DPYS(pDpyEvo, tiledDisplayDpysList, pDispEvo) { + const NVT_DISPLAYID_INFO *pDpyDisplayIdInfo = + &pDpyEvo->parsedEdid.info.ext_displayid; + const struct NvKmsPoint viewPortPointIn = { + .x = pDpyDisplayIdInfo->tile_location.x * viewPortSizeIn.width, + .y = pDpyDisplayIdInfo->tile_location.y * viewPortSizeIn.height + }; + const NvU32 localAvailableApiHeadsMask = + *pAvailableApiHeadsMask & ~claimedApiHeadMask; + const NvU32 apiHead = PickApiHead(pDpyEvo, + localAvailableApiHeadsMask); + + if ((apiHead == NV_INVALID_HEAD) || pDpyEvo->isVrHmd) { + goto failed; + } + + struct NvKmsSetModeOneHeadRequest *pRequestHead = + &pRequestDisp->head[apiHead]; + struct NvKmsMode mode; + + if (firstClaimedApiHead == NV_INVALID_HEAD) { + /* + * Find mode of native dimensions reported in Tiled-Display + * information. + */ + if (!FindMode(pDpyEvo, + pSurfaceEvo->format, + pPrimaryDisplayIdInfo->native_resolution.width, + pPrimaryDisplayIdInfo->native_resolution.height, + &mode)) { + goto failed; + } + + firstClaimedApiHead = apiHead; + } else { + /* All tiles should support same set of modes */ + mode = pRequestDisp->head[firstClaimedApiHead].mode; + } + + claimedApiHeadMask |= NVBIT(apiHead); + + if (!InitModeOneHeadRequest(pDpyEvo, + pSurfaceEvo, + &mode, + &viewPortSizeIn, + &viewPortPointIn, + apiHead, + pRequestHead)) { + goto failed; + } + } + + nvAssert(!pRequest->commit); + + if (!nvSetDispModeEvo(pDevEvo, + pDevEvo->pNvKmsOpenDev, + pRequest, + &pParams->reply, + TRUE /* bypassComposition */, + FALSE /* doRasterLock */)) { + goto failed; + } + *pAvailableApiHeadsMask &= ~claimedApiHeadMask; + + return TRUE; + +failed: + + for (apiHead = 0; apiHead < ARRAY_LEN(pRequestDisp->head); apiHead++) { + if ((NVBIT(apiHead) & claimedApiHeadMask) == 0x0) { + continue; + } + nvkms_memset(&pRequestDisp->head[apiHead], + 0, + sizeof(pRequestDisp->head[apiHead])); + } + + return FALSE; +} + +static NvBool isDpMSTModeActiveOnAnyConnector(NVDevEvoPtr pDevEvo) +{ + NvU32 i; + NVDispEvoPtr pDispEvo; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) { + NvU32 apiHead; + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + const NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pApiHeadState->activeDpys, pDispEvo); + const NVConnectorEvoRec *pConnectorEvo = (pDpyEvo != NULL) ? + pDpyEvo->pConnectorEvo : NULL; + + if ((pConnectorEvo != NULL) && + nvConnectorUsesDPLib(pConnectorEvo)) { + const enum NVDpLinkMode activeLinkMode = + nvDPGetActiveLinkMode(pConnectorEvo->pDpLibConnector); + + nvAssert(activeLinkMode != NV_DP_LINK_MODE_OFF); + + if (activeLinkMode == NV_DP_LINK_MODE_MST) { + return TRUE; + } + } + } + } + + return FALSE; +} + +/*! + * Attempt to restore the console. + * + * If a framebuffer console surface was successfully imported from RM, then use + * the core channel to set a mode that displays it. + * + * This enables as many heads as possible in a clone configuration. + * In the first pass we select connected active dpys, in the second pass + * any other connected boot dpys, and in a third pass any other + * remaining connected dpys: + * + * 1. Populate modeset request to enable the given dpy. + * + * 2. Do modeset request validation, if it fails then disable scaling. If + * modeset request validation fails even after disabling scaling then do not + * enable that dpy. + * + * If console restore succeeds, set pDevEvo->skipConsoleRestore to skip + * deallocating the core channel and triggering RM's console restore code. + */ +NvBool nvEvoRestoreConsole(NVDevEvoPtr pDevEvo, const NvBool allowMST) +{ + NvBool ret = FALSE; + NvU32 dispIndex; + NVDispEvoPtr pDispEvo; + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDevConst(pDevEvo->pNvKmsOpenDev); + NVSurfaceEvoPtr pSurfaceEvo = + nvEvoGetPointerFromApiHandle(pOpenDevSurfaceHandles, + pDevEvo->fbConsoleSurfaceHandle); + struct NvKmsSetModeParams *params; + + /* + * If this function fails to restore a console then NVKMS frees + * and reallocates the core channel, to attempt the console + * restore using Resman. The core channel reallocation also may + * fail and nvEvoRestoreConsole() again may get called from + * nvFreeDevEvo() when client frees the NVKMS device. + * + * If nvEvoRestoreConsole() gets called after the core channel + * allocation/reallocation failure then do nothing and return + * early. + */ + if (pDevEvo->displayHandle == 0x0) { + goto done; + } + + /* + * If any DP-MST mode is active on any connector of this device but + * DP-MST is disallowed then force console-restore. + */ + if (pDevEvo->skipConsoleRestore && + !allowMST && isDpMSTModeActiveOnAnyConnector(pDevEvo)) { + pDevEvo->skipConsoleRestore = FALSE; + } + + if (pDevEvo->skipConsoleRestore) { + ret = TRUE; + goto done; + } + + if (!pSurfaceEvo) { + // No console surface to restore. + goto done; + } + + FlipBaseToNull(pDevEvo); + + params = nvPreallocGet(pDevEvo, PREALLOC_TYPE_RESTORE_CONSOLE_SET_MODE, + sizeof(*params)); + nvkms_memset(params, 0, sizeof(*params)); + + nvDPSetAllowMultiStreaming(pDevEvo, allowMST); + + // Construct the request. + // + // To start with, try to enable as many connected dpys as possible, + // preferring the connected active displays first. + struct NvKmsSetModeRequest *pRequest = ¶ms->request; + NvBool foundDpysConfigForConsoleRestore = FALSE; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 availableApiHeadsMask = NVBIT(pDevEvo->numApiHeads) - 1; + NVDpyIdList connectedDpys = UpdateConnectedDpys(pDispEvo); + const NVDpyIdList activeDpys = nvActiveDpysOnDispEvo(pDispEvo); + const NVDpyIdList connectedActiveDpys = + nvIntersectDpyIdListAndDpyIdList(connectedDpys, + activeDpys); + const NVDpyIdList connectedBootDpys = + nvIntersectDpyIdListAndDpyIdList(connectedDpys, + pDispEvo->bootDisplays); + struct NvKmsSetModeOneDispRequest *pRequestDisp = + &pRequest->disp[dispIndex]; + int pass; + + pRequest->requestedDispsBitMask |= NVBIT(dispIndex); + pRequestDisp->requestedHeadsBitMask = availableApiHeadsMask; + + // Only enable heads on the subdevice that actually contains the + // console. + if (dispIndex != pDevEvo->vtFbInfo.subDeviceInstance) { + continue; + } + + NVDpyIdList handledDpysList = nvEmptyDpyIdList(); + + for (pass = 0; pass < 3; pass++) { + NVDpyIdList candidateDpys; + NVDpyEvoPtr pDpyEvo; + + if (availableApiHeadsMask == 0) { + break; + } + + if (pass == 0) { + candidateDpys = connectedActiveDpys; + } else if (pass == 1) { + candidateDpys = nvDpyIdListMinusDpyIdList(connectedBootDpys, + connectedActiveDpys); + } else if (pass == 2) { + candidateDpys = nvDpyIdListMinusDpyIdList(connectedDpys, + connectedBootDpys); + candidateDpys = nvDpyIdListMinusDpyIdList(candidateDpys, + connectedActiveDpys); + } + + FOR_ALL_EVO_DPYS(pDpyEvo, candidateDpys, pDispEvo) { + NvBool isTiledDisplayFound = FALSE; + TiledDisplayInfo tiledDisplayInfo = { }; + NvBool isTiledDisplayEnable = FALSE; + const NVT_DISPLAYID_INFO *pDpyDisplayIdInfo = + pDpyEvo->parsedEdid.valid ? + &pDpyEvo->parsedEdid.info.ext_displayid : NULL; + NvBool done = FALSE; + + if (availableApiHeadsMask == 0) { + break; + } + + if (nvDpyIdIsInDpyIdList(pDpyEvo->id, + handledDpysList)) { + continue; + } + + isTiledDisplayFound = + pDpyDisplayIdInfo != NULL && + DetectTiledDisplay(pDispEvo, + pDpyDisplayIdInfo, + nvDpyIdListMinusDpyIdList( + connectedDpys, handledDpysList), + &tiledDisplayInfo); + + /* + * Construct modeset request for Tiled-Display which don't have + * a capability to scale single tile input across entire + * display. If fails then fallback to construct modeset request + * for this single dpy. + */ + + if (isTiledDisplayFound && + tiledDisplayInfo.isDetectComplete && + !tiledDisplayInfo.isCapToScaleSingleTile) { + + done = ConstructModeRequestForTiledDisplay( + pDispEvo, + pSurfaceEvo, + params, + dispIndex, + tiledDisplayInfo.detectedDpysList, + &availableApiHeadsMask); + isTiledDisplayEnable = done; + } + + /* + * If Tiled-Display has capability to scale single tile input + * across entire display then for console restore it is + * sufficient to light up any single tile and ignore rest of + * remaining tiles. + */ + + if (!done || + !isTiledDisplayFound || + !tiledDisplayInfo.isDetectComplete || + tiledDisplayInfo.isCapToScaleSingleTile) { + + done = ConstructModeOneHeadRequestForOneDpy( + pDpyEvo, + pSurfaceEvo, + params, + dispIndex, + &availableApiHeadsMask); + isTiledDisplayEnable = + done && tiledDisplayInfo.isCapToScaleSingleTile; + } + + handledDpysList = + nvAddDpyIdToDpyIdList(pDpyEvo->id, handledDpysList); + + if (isTiledDisplayEnable) { + handledDpysList = nvAddDpyIdListToDpyIdList( + tiledDisplayInfo.detectedDpysList, + handledDpysList); + } + + foundDpysConfigForConsoleRestore = + foundDpysConfigForConsoleRestore || done; + + } + } + } + + /* + * Disable all (flip/raster) locks, dirty locking state in hardware + * left behind by NVKMS console restore causes XID errors and engine hang + * on next modeset because the NVKMS doesn't get back existing display + * hardware state at the time of initialization. + */ + + if (foundDpysConfigForConsoleRestore) { + pRequest->commit = TRUE; + + ret = nvSetDispModeEvo(pDevEvo, + pDevEvo->pNvKmsOpenDev, + pRequest, + ¶ms->reply, + TRUE /* bypassComposition */, + FALSE /* doRasterLock */); + } + + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_RESTORE_CONSOLE_SET_MODE); + +done: + nvkms_free_timer(pDevEvo->consoleRestoreTimer); + pDevEvo->consoleRestoreTimer = NULL; + + /* If console restore failed then simply shut down all heads */ + if (!ret) { + nvShutDownApiHeads(pDevEvo, pDevEvo->pNvKmsOpenDev, + NULL /* pTestFunc, shut down all heads */, + NULL /* pData */, + FALSE /* doRasterLock */); + } + + // If restoring the console from here succeeded, then skip triggering RM's + // console restore. + pDevEvo->skipConsoleRestore = ret; + return ret; +} diff --git a/src/nvidia-modeset/src/nvkms-ctxdma.c b/src/nvidia-modeset/src/nvkms-ctxdma.c new file mode 100644 index 0000000..f485c1a --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-ctxdma.c @@ -0,0 +1,221 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-private.h" +#include "nvkms-ctxdma.h" +#include "nvkms-utils.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" + +#include "class/cl0002.h" /* NV01_CONTEXT_DMA */ + +#include /* NV0002_CTRL_CMD_BIND_CONTEXTDMA */ + +static NvBool GarbageCollectSyncptHelperOneChannel( + NVDevEvoRec *pDevEvo, + NvU32 sd, + NVEvoChannel *pChannel, + NVEvoSyncpt *pSyncpt, + NVEvoChannelMask *pIdledChannelMask) +{ + NvBool isChannelIdle = FALSE; + + if ((pChannel->channelMask & pSyncpt->channelMask) == 0) { + return TRUE; + } + + if ((*pIdledChannelMask) & pChannel->channelMask) { + goto done; + } + + /*! Check whether channel is idle. */ + pDevEvo->hal->IsChannelIdle(pDevEvo, pChannel, sd, &isChannelIdle); + + if (!isChannelIdle) { + return FALSE; + } + + /*! record idle channel mask to use in next check */ + *pIdledChannelMask |= pChannel->channelMask; + +done: + pSyncpt->channelMask &= ~pChannel->channelMask; + return TRUE; +} + +static NvBool GarbageCollectSyncptHelperOneSyncpt( + NVDevEvoRec *pDevEvo, + NVEvoSyncpt *pSyncpt, + NVEvoChannelMask *pIdledChannelMask) +{ + NvBool ret = TRUE; + NvU32 head, sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + + for (head = 0; head < pDevEvo->numHeads; head++) { + NvU32 layer; + + /*! + * If a given channel isn't idle, continue to check if this syncpt + * is used on other channels which may be idle. + */ + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (!GarbageCollectSyncptHelperOneChannel( + pDevEvo, + sd, + pDevEvo->head[head].layer[layer], + pSyncpt, + &pIdledChannelMask[sd])) { + ret = FALSE; + } + } + } + } + + return ret; +} + +/*! + * This API try to find free syncpt and then unregisters it. + * It searches global table, and when finds that all channels using this + * syncpt are idle then frees it. It makes sure that syncpt is not part + * of current flip. + */ +static NvBool GarbageCollectSyncpts( + NVDevEvoRec *pDevEvo) +{ + NvU32 i; + NvBool freedSyncpt = FALSE; + NVEvoChannelMask idledChannelMask[NVKMS_MAX_SUBDEVICES] = { 0 }; + + if (!pDevEvo->supportsSyncpts) { + return FALSE; + } + + for (i = 0; i < NV_SYNCPT_GLOBAL_TABLE_LENGTH; i++) { + + NvBool allLayersIdle = NV_TRUE; + + if (pDevEvo->pAllSyncptUsedInCurrentFlip != NULL) { + if (pDevEvo->pAllSyncptUsedInCurrentFlip[i]) { + /*! syncpt is part of current flip, so skip it */ + continue; + } + } + + if (pDevEvo->preSyncptTable[i].surfaceDesc.ctxDmaHandle == 0) { + /*! syncpt isn't registered, so skip it */ + continue; + } + + allLayersIdle = GarbageCollectSyncptHelperOneSyncpt( + pDevEvo, + &pDevEvo->preSyncptTable[i], + idledChannelMask); + + if (allLayersIdle) { + /*! Free handles */ + nvRmFreeSyncptHandle(pDevEvo, &pDevEvo->preSyncptTable[i]); + freedSyncpt = TRUE; + } + } + + return freedSyncpt; +} + +NvU32 nvCtxDmaBind( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 hCtxDma) +{ + NV0002_CTRL_BIND_CONTEXTDMA_PARAMS params = { }; + NvU32 ret; + NvBool retryOnlyOnce = TRUE; + + params.hChannel = pChannel->pb.channel_handle; + +retryOnce: + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + hCtxDma, + NV0002_CTRL_CMD_BIND_CONTEXTDMA, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + /*! + * syncpts (lazily freed) occupy space in the disp ctxDma hash + * table, and therefore may cause bind ctxDma to fail. + * Free any unused syncpts and try again. + */ + if (retryOnlyOnce) { + /*! try to free syncpt only once */ + if (GarbageCollectSyncpts(pDevEvo)) { + retryOnlyOnce = FALSE; + goto retryOnce; + } + } + } + return ret; +} + +void nvCtxDmaFree(NVDevEvoPtr pDevEvo, + NvU32 deviceHandle, + NvU32 *hDispCtxDma) +{ + if (*hDispCtxDma) { + nvRmApiFree(nvEvoGlobal.clientHandle, + deviceHandle, *hDispCtxDma); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, *hDispCtxDma); + *hDispCtxDma = 0; + } +} + +NvU32 nvCtxDmaAlloc(NVDevEvoPtr pDevEvo, NvU32 *pCtxDmaHandle, + NvU32 memoryHandle, NvU32 localCtxDmaFlags, NvU64 limit) +{ + NV_CONTEXT_DMA_ALLOCATION_PARAMS ctxdmaParams = { }; + NvU32 ret, ctxDmaHandle; + + ctxDmaHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + if (ctxDmaHandle == 0) { + return NVOS_STATUS_ERROR_GENERIC; + } + + ctxdmaParams.hMemory = memoryHandle; + ctxdmaParams.flags = localCtxDmaFlags; + ctxdmaParams.offset = 0; + ctxdmaParams.limit = limit; + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + ctxDmaHandle, + NV01_CONTEXT_DMA, + &ctxdmaParams); + + if (ret != NVOS_STATUS_SUCCESS) { + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, ctxDmaHandle); + } else { + *pCtxDmaHandle = ctxDmaHandle; + } + + return ret; +} diff --git a/src/nvidia-modeset/src/nvkms-cursor.c b/src/nvidia-modeset/src/nvkms-cursor.c new file mode 100644 index 0000000..b3bf077 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-cursor.c @@ -0,0 +1,457 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* this source file contains routines for setting and moving the cursor. + * NV50 specific */ + +#include "nvkms-cursor.h" +#include "nvkms-types.h" +#include "nvkms-dma.h" +#include "nvkms-utils.h" +#include "nvkms-rm.h" +#include "nvkms-evo.h" +#include "nvkms-vrr.h" +#include "nvkms-surface.h" +#include "nvkms-flip.h" + +#include "nvkms-rmapi.h" + +#include /* sizeof(GK104DispCursorControlPio) */ + +#include /* NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS */ + +/*! + * Get the NVSurfaceEvoPtrs described by NvKmsSetCursorImageCommonParams. + * + * Look up the surfaces described by NvKmsSetCursorImageCommonParams, + * and check that the surfaces are valid for use by cursor on the + * given pDevEvo. + * + * \param[in] pDevEvo The device on which the cursor image will be set. + * \param[in] pParams The parameter structure indicating the surfaces. + * \param[out] pSurfaceEvo The array of surfaces to be assigned. + * + * \return If the parameters are valid, return TRUE and assign + * pSurfaceEvo. Otherwise, return FALSE. + */ +NvBool nvGetCursorImageSurfaces( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const struct NvKmsSetCursorImageCommonParams *pParams, + NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES]) +{ + NvU32 eye; + + nvkms_memset(pSurfaceEvos, 0, sizeof(NVSurfaceEvoRec *) * NVKMS_MAX_EYES); + + /* XXX NVKMS TODO: add support for stereo cursor */ + nvAssert(pParams->surfaceHandle[NVKMS_RIGHT] == 0); + + for (eye = 0; eye < ARRAY_LEN(pParams->surfaceHandle); eye++) { + if (pParams->surfaceHandle[eye] != 0) { + NVSurfaceEvoPtr pSurfaceEvo = NULL; + pSurfaceEvo = + nvEvoGetSurfaceFromHandle(pDevEvo, + pOpenDevSurfaceHandles, + pParams->surfaceHandle[eye], + TRUE /* isUsedByCursorChannel */, + FALSE /* isUsedByLayerChannel */); + if ((pSurfaceEvo == NULL) || + (pSurfaceEvo->isoType != NVKMS_MEMORY_ISO)) { + return FALSE; + } + + pSurfaceEvos[eye] = pSurfaceEvo; + } + } + + return TRUE; +} + +static void +SetCursorImageOneHead(NVDispEvoPtr pDispEvo, + const NvU32 head, + NVSurfaceEvoRec *pSurfaceEvoNew, + const struct NvKmsCompositionParams *pCursorCompParams, + NVEvoUpdateState *pUpdateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + NvBool changed = FALSE; + + NVSurfaceEvoPtr pSurfaceEvoOld = + pDevEvo->gpus[sd].headState[head].cursor.pSurfaceEvo; + + if (pSurfaceEvoNew != NULL && + nvkms_memcmp(pCursorCompParams, + &pDevEvo->gpus[sd].headState[head].cursor.cursorCompParams, + sizeof(*pCursorCompParams)) != 0) { + pDevEvo->gpus[sd].headState[head].cursor.cursorCompParams = + *pCursorCompParams; + changed = TRUE; + } + + if (pSurfaceEvoNew != pSurfaceEvoOld) { + + if (pSurfaceEvoNew != NULL) { + nvEvoIncrementSurfaceRefCnts(pSurfaceEvoNew); + } + + if (pSurfaceEvoOld) { + nvEvoDecrementSurfaceRefCnts(pDevEvo, pSurfaceEvoOld); + } + + pDevEvo->gpus[sd].headState[head].cursor.pSurfaceEvo = pSurfaceEvoNew; + changed = TRUE; + } + + if (changed) { + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetCursorImage( + pDevEvo, + head, + pDevEvo->gpus[sd].headState[head].cursor.pSurfaceEvo, + pUpdateState, + &pDevEvo->gpus[sd].headState[head].cursor.cursorCompParams); + nvPopEvoSubDevMask(pDevEvo); + } +} + +static void +SetCursorImage(NVDispEvoPtr pDispEvo, + const NvU32 apiHead, + NVSurfaceEvoRec *pSurfaceEvoNew, + const struct NvKmsCompositionParams *pCursorCompParams) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + NvU32 head; + NVEvoUpdateState updateState = { }; + NvBool changed = FALSE; + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + SetCursorImageOneHead(pDispEvo, + head, + pSurfaceEvoNew, + pCursorCompParams, + &updateState); + } + + if (!nvIsUpdateStateEmpty(pDevEvo, &updateState)) { + nvEvoUpdateAndKickOff(pDispEvo, FALSE, &updateState, + TRUE /* releaseElv */); + changed = TRUE; + } + + /* + * Unconditionally trigger an unstall: even if the cursor image or + * composition didn't change, clients setting the cursor image would expect + * a VRR unstall. Also, if the cursor changed from an image to no image + * (i.e., hiding the cursor), that should trigger a VRR unstall, too. + */ + nvTriggerVrrUnstallSetCursorImage(pDispEvo, changed); +} + +static NvBool +FlipCursorImage(NVDispEvoPtr pDispEvo, + const struct NvKmsPerOpenDev *pOpenDevice, + NvU32 apiHead, + const struct NvKmsSetCursorImageCommonParams *pImageParams) +{ + const NvU32 sd = pDispEvo->displayOwner; + NvBool ret; + struct NvKmsFlipRequestOneHead *pFlipHead = + nvCalloc(1, sizeof(*pFlipHead)); + + if (pFlipHead == NULL) { + return FALSE; + } + + pFlipHead->sd = sd; + pFlipHead->head = apiHead; + pFlipHead->flip.cursor.image = *pImageParams; + pFlipHead->flip.cursor.imageSpecified = TRUE; + + ret = nvFlipEvo(pDispEvo->pDevEvo, + pOpenDevice, + pFlipHead, + 1 /* numFlipHeads */, + TRUE /* commit */, + NULL /* pReply */, + FALSE /* skipUpdate */, + FALSE /* allowFlipLock */); + + nvFree(pFlipHead); + + return ret; +} + +NvBool nvSetCursorImage( + NVDispEvoPtr pDispEvo, + const struct NvKmsPerOpenDev *pOpenDevice, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + NvU32 apiHead, + const struct NvKmsSetCursorImageCommonParams *pParams) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES]; + NVSurfaceEvoPtr pSurfaceEvoNew; + NvBool flipCursorImage = FALSE; + + if (!nvGetCursorImageSurfaces(pDevEvo, pOpenDevSurfaceHandles, + pParams, pSurfaceEvos)) { + return FALSE; + } + + pSurfaceEvoNew = pSurfaceEvos[NVKMS_LEFT]; + + /* + * Use flip to apply or remove workaround for hardware bug 2052012 + */ + if (NV5070_CTRL_SYSTEM_GET_CAP( + pDevEvo->capsBits, + NV5070_CTRL_SYSTEM_CAPS_BUG_2052012_GLITCHY_MCLK_SWITCH)) { + const NvU32 sd = pDispEvo->displayOwner; + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + NvU32 head; + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + NVSurfaceEvoPtr pSurfaceEvoOld = + pDevEvo->gpus[sd].headState[head].cursor.pSurfaceEvo; + + if ((pSurfaceEvoOld != pSurfaceEvoNew) && + (pSurfaceEvoOld == NULL || pSurfaceEvoNew == NULL)) { + flipCursorImage = TRUE; + break; + } + } + } + + if (flipCursorImage) { + return FlipCursorImage(pDispEvo, + pOpenDevice, apiHead, pParams); + } + + SetCursorImage(pDispEvo, + apiHead, + pSurfaceEvoNew, + &pParams->cursorCompParams); + + return TRUE; +} + +void nvEvoMoveCursorInternal(NVDispEvoPtr pDispEvo, + NvU32 head, NvS16 x, NvS16 y) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + + pDevEvo->cursorHal->MoveCursor(pDevEvo, sd, head, x, y); + + /* If the cursor is visible, trigger VRR unstall to display the + * cursor at the new postion */ + if (pEvoSubDev->headState[head].cursor.pSurfaceEvo) { + nvTriggerVrrUnstallMoveCursor(pDispEvo); + } +} + +void nvMoveCursor(NVDispEvoPtr pDispEvo, const NvU32 apiHead, + const struct NvKmsMoveCursorCommonParams *pParams) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + NvU16 hwViewportInWidth; + NvU32 head; + NvBool firstHead; + + /* XXX NVKMS TODO: validate x,y against current viewport in? */ + + nvAssert(apiHead != NV_INVALID_HEAD); + + firstHead = NV_TRUE; + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + const NvU32 sd = pDispEvo->displayOwner; + + if (firstHead) { + hwViewportInWidth = pTimings->viewPort.in.width; + } else { + nvAssert(hwViewportInWidth == pTimings->viewPort.in.width); + } + + pDevEvo->gpus[sd].headState[head].cursor.x = + pParams->x - (hwViewportInWidth * pHeadState->mergeHeadSection); + pDevEvo->gpus[sd].headState[head].cursor.y = pParams->y; + + nvEvoMoveCursorInternal(pDispEvo, + head, + pDevEvo->gpus[sd].headState[head].cursor.x, + pDevEvo->gpus[sd].headState[head].cursor.y); + + firstHead = NV_FALSE; + } +} + +// Allocate and map cursor position PIO channels +NvBool nvAllocCursorEvo(NVDevEvoPtr pDevEvo) +{ + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS PioChannelAllocParams = { 0 }; + NVDispEvoPtr pDispEvo; + NvU32 sd; + + PioChannelAllocParams.channelInstance = head; + // No notifiers in cursor channel + PioChannelAllocParams.hObjectNotify = 0; + pDevEvo->cursorHandle[head] = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (nvRmApiAlloc( + nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + pDevEvo->cursorHandle[head], + pDevEvo->cursorHal->klass, + &PioChannelAllocParams) != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate CURSOR PIO for head %d", + head); + nvFreeCursorEvo(pDevEvo); + return FALSE; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + void *pPioDisplayChannel; + NvU32 status; + + status = nvRmApiMapMemory( + nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pDevEvo->cursorHandle[head], + 0, + sizeof(GK104DispCursorControlPio), + &pPioDisplayChannel, + 0); + if (status != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Failed to map CURSOR PIO for head %d", + head); + nvFreeCursorEvo(pDevEvo); + return FALSE; + } + pEvoSubDev->cursorPio[head] = pPioDisplayChannel; + } + } + + return TRUE; +} + +// Free and unmap Cursor PIO Channels +void nvFreeCursorEvo(NVDevEvoPtr pDevEvo) +{ + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVDispEvoPtr pDispEvo; + NvU32 sd; + NvU32 status; + + if (pDevEvo->cursorHandle[head] == 0) { + continue; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NvU32 status; + + if (pEvoSubDev->cursorPio[head] == NULL) { + continue; + } + + status = nvRmApiUnmapMemory( + nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pDevEvo->cursorHandle[head], + pEvoSubDev->cursorPio[head], + 0); + + if (status != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Failed to unmap cursor channel memory"); + } + pEvoSubDev->cursorPio[head] = NULL; + } + + status = nvRmApiFree( + nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + pDevEvo->cursorHandle[head]); + + if (status != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to tear down Cursor channel"); + } + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDevEvo->cursorHandle[head]); + + pDevEvo->cursorHandle[head] = 0; + } +} + +extern NVEvoCursorHAL nvEvoCursor91; +extern NVEvoCursorHAL nvEvoCursorC3; +extern NVEvoCursorHAL nvEvoCursorC5; +extern NVEvoCursorHAL nvEvoCursorC6; +extern NVEvoCursorHAL nvEvoCursorC9; + + +enum NvKmsAllocDeviceStatus nvInitDispHalCursorEvo(NVDevEvoPtr pDevEvo) +{ + static const NVEvoCursorHALPtr cursorTable[] = { + &nvEvoCursor91, + &nvEvoCursorC3, + &nvEvoCursorC5, + &nvEvoCursorC6, + &nvEvoCursorC9, + }; + + int i; + + for (i = 0; i < ARRAY_LEN(cursorTable); i++) { + if (nvRmEvoClassListCheck(pDevEvo, cursorTable[i]->klass)) { + + pDevEvo->cursorHal = cursorTable[i]; + + return NVKMS_ALLOC_DEVICE_STATUS_SUCCESS; + } + } + + return NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; +} diff --git a/src/nvidia-modeset/src/nvkms-cursor2.c b/src/nvidia-modeset/src/nvkms-cursor2.c new file mode 100644 index 0000000..30ba3a8 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-cursor2.c @@ -0,0 +1,50 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include + +static void MoveCursor90(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + NvS16 x, NvS16 y) +{ + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + GK104DispCursorControlPio *pEvoCursorControl = + pEvoSubDev->cursorPio[head]; + + pEvoCursorControl->SetCursorHotSpotPointsOut[NVKMS_LEFT] = + DRF_NUM(917A, _SET_CURSOR_HOT_SPOT_POINTS_OUT, _X, x) | + DRF_NUM(917A, _SET_CURSOR_HOT_SPOT_POINTS_OUT, _Y, y); + + pEvoCursorControl->Update = + DRF_DEF(917A, _UPDATE, _INTERLOCK_WITH_CORE, _DISABLE); +} + +NVEvoCursorHAL nvEvoCursor91 = { + NV917A_CURSOR_CHANNEL_PIO, /* klass */ + MoveCursor90, /* MoveCursor */ + NULL, /* ReleaseElv */ + { /* caps */ + 256, /* maxSize */ + }, +}; diff --git a/src/nvidia-modeset/src/nvkms-cursor3.c b/src/nvidia-modeset/src/nvkms-cursor3.c new file mode 100644 index 0000000..687bf52 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-cursor3.c @@ -0,0 +1,125 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +#include +#include +#include +#include + +static void WaitForFreeSpace(NVDevEvoPtr pDevEvo, + NVC37ADispCursorImmControlPio *pEvoCursorControl) +{ + /* + * Wait for Free to be non-zero, indicating there is space to push a method. + * The only case where Free is expected to be zero is when display + * frontend (FE) hardware is processing a previous method. + * .1s should be more than enough time to wait for that. + */ + NvU64 startTime = 0; + const NvU64 timeout = 100000; /* 0.1 seconds */ + + do { + if (pEvoCursorControl->Free != 0) { + return; + } + + if (nvExceedsTimeoutUSec(pDevEvo, &startTime, timeout)) { + break; + } + + nvkms_yield(); + + } while (TRUE); + + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Timed out waiting for cursor PIO space"); +} + +static void MoveCursorC3(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + NvS16 x, NvS16 y) +{ + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NVC37ADispCursorImmControlPio *pEvoCursorControl = + pEvoSubDev->cursorPio[head]; + + WaitForFreeSpace(pDevEvo, pEvoCursorControl); + pEvoCursorControl->SetCursorHotSpotPointOut[0] = + DRF_NUM(C37A, _SET_CURSOR_HOT_SPOT_POINT_OUT, _X, x) | + DRF_NUM(C37A, _SET_CURSOR_HOT_SPOT_POINT_OUT, _Y, y); + + WaitForFreeSpace(pDevEvo, pEvoCursorControl); + pEvoCursorControl->Update = + DRF_DEF(C37A, _UPDATE, _FLIP_LOCK_PIN, _LOCK_PIN_NONE); +} + +static void ReleaseElvC3(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head) +{ + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NVC37ADispCursorImmControlPio *pEvoCursorControl = + pEvoSubDev->cursorPio[head]; + + WaitForFreeSpace(pDevEvo, pEvoCursorControl); + pEvoCursorControl->Update = + DRF_DEF(C37A, _UPDATE, _FLIP_LOCK_PIN, _LOCK_PIN_NONE) | + DRF_DEF(C37A, _UPDATE, _RELEASE_ELV, _TRUE); +} + +NVEvoCursorHAL nvEvoCursorC3 = { + NVC37A_CURSOR_IMM_CHANNEL_PIO, /* klass */ + MoveCursorC3, /* MoveCursor */ + ReleaseElvC3, /* ReleaseElv */ + { /* caps */ + 256, /* maxSize */ + }, +}; + +NVEvoCursorHAL nvEvoCursorC5 = { + NVC57A_CURSOR_IMM_CHANNEL_PIO, /* klass */ + MoveCursorC3, /* MoveCursor */ + ReleaseElvC3, /* ReleaseElv */ + { /* caps */ + 256, /* maxSize */ + }, +}; + +NVEvoCursorHAL nvEvoCursorC6 = { + NVC67A_CURSOR_IMM_CHANNEL_PIO, /* klass */ + MoveCursorC3, /* MoveCursor */ + ReleaseElvC3, /* ReleaseElv */ + { /* caps */ + 256, /* maxSize */ + }, +}; + +NVEvoCursorHAL nvEvoCursorC9 = { + NVC97A_CURSOR_IMM_CHANNEL_PIO, /* klass */ + MoveCursorC3, /* MoveCursor */ + ReleaseElvC3, /* ReleaseElv */ + { /* caps */ + 256, /* maxSize */ + }, +}; + diff --git a/src/nvidia-modeset/src/nvkms-difr.c b/src/nvidia-modeset/src/nvkms-difr.c new file mode 100644 index 0000000..9b83889 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-difr.c @@ -0,0 +1,867 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * DIFR stands for Display Idle Frame Refresh which is a low-power feature + * for display that allows scanning out frames from L2 cache. The actual GPU + * memory can be gated off while the display outputs are served off the + * cache. + * + * DIFR is defined in three operational layers 1, 2, and 3 and operates in + * terms of entering and exiting these layers in order. + * + * Layer 1 has to deem it's possible to enter DIFR until layer 2 and 3 can + * start considering. Any layer seeing conditions that prevent entering DIFR + * mode can abort the attempt to enter. But, finally, if all layers agree + * the hardware will switch to low-power mode, turn off GPU memory, and + * start serving pixels off the cache. + * + * Managing some high-level state to help the hardware transition from one + * layer to another is implemented in NVKMS and RM. Simplified, NVKMS + * handles assistance for layer 1 and RM for layer 2. + * + * Much of the layer 1 or NVKMS DIFR specific code is collected into this + * file, centered around an object called NVDIFRStateEvo. + * + * The role of NVKMS is to listen for DIFR prefetch events (which originate + * from h/w and get dispatched by RM), prefetch framebuffer pixels into L2 + * cache, and report back to h/w (via RM). NVKMS will also disable DIFR each + * time there's an explicitly known display update (such as a flip) and + * re-enable it once enough idle time has passed. + * + * The rest of NVKMS will call entrypoints in this file to inform the DIFR + * implementation here about changes in relevant state. + * + * For each DevEvo object nvkms-evo.c will call + * nvDIFRAllocate()/nvDIFRFree() here to also create a corresponding DIFR + * state object. The DIFR state will contain everything needed to implement + * prefetching such as channel and copy engine allocation. + * + * If DIFR state was successfully allocated, nvkms-rm.c will create an event + * listener for DIFR prefetch events which will call back to + * nvDIFRPrefetchSurfaces() here in order to do prefetching. This means + * going through each active head and issuing a special CE copy, for all + * layers of the surface, to populate the L2 cache with framebuffer pixel + * data. + * + * After all prefetches are complete, RM needs to know about the completion + * status. This is implemented in nvDIFRSendPrefetchResponse(), again called + * by nvkms-rm.c. + * + * NVKMS must also temporarily disable DIFR in hardware if it knows about + * upcoming updates to the framebuffer and then re-enable DIFR when the + * screen becomes idle again. For this, nvFlipEvoOneHead() will call us back + * via nvDIFRNotifyFlip() when a new flip is happening. We will call RM to + * disable DIFR, then set up a timer into the future and when it triggers we + * will re-enable DIFR again. But if nvFlipEvoOneHead() notifies us about + * another upcoming frame, we'll just replace the old timer with a new one. + * Thus, the timer will eventually wake us after notifications of new frames + * cease to come in. + * + * The DIFR hardware will automatically detect activity in graphics/copy + * engines and will not try to enter the low-power mode if there is any. So + * this is something NVKMS doesn't have to worry about. + * + * Userspace can also flag surfaces as non-cacheable which makes us abort + * any prefetches if those surfaces are currently displayed on any active + * heads. For now, CPU mapped surfaces are flagged as such because neither + * NVKMS nor the hardware can observe CPU writes into a surface. + */ + + + +#include "nvkms-difr.h" +#include "nvkms-push.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "nvkms-utils.h" +#include "nvkms-evo.h" + +#include "nvidia-push-init.h" +#include "nvidia-push-methods.h" +#include "nvidia-push-types.h" +#include "nvidia-push-types.h" +#include "nvidia-push-utils.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#define PREFETCH_DONE_VALUE 0x00000fed + +/* How long to wait after last flip until re-enabling DIFR. */ +#define DIFR_IDLE_WAIT_PERIOD_US 500000 + +/* How long to wait for prefetch dma completion. */ +#define DIFR_PREFETCH_WAIT_PERIOD_US 10000 /* 10ms */ + +/* + * DIFR runtime state + */ +typedef struct _NVDIFRStateEvoRec { + NVDevEvoPtr pDevEvo; + NvU32 copyEngineClass; + NvU32 copyEngineType; + + /* + * This is kept in sync with whether DIFR is explicitly disabled in + * hardware. + */ + NvBool hwDisabled; + NvU64 lastFlipTime; + nvkms_timer_handle_t *idleTimer; + + /* Pushbuffer for DIFR prefetches. */ + NvPushChannelRec prefetchPushChannel; + NvU32 pushChannelHandlePool[NV_PUSH_CHANNEL_HANDLE_POOL_NUM]; + + /* Copy engine instance for DIFR prefetches. */ + NvU32 prefetchEngine; + + /* For tracking which surfaces have been prefetched already. */ + NvU32 prefetchPass; +} NVDIFRStateEvoRec; + +/* + * Prefetch parameters for DMA copy. + */ +typedef struct { + NvUPtr surfGpuAddress; + size_t surfSizeBytes; + enum NvKmsSurfaceMemoryFormat surfFormat; + NvU32 surfPitchBytes; +} NVDIFRPrefetchParams; + +static NvBool AllocDIFRPushChannel(NVDIFRStateEvoPtr pDifr); +static void FreeDIFRPushChannel(NVDIFRStateEvoPtr pDifr); +static NvBool AllocDIFRCopyEngine(NVDIFRStateEvoPtr pDifr); +static void FreeDIFRCopyEngine(NVDIFRStateEvoPtr pDifr); + +static NvU32 PrefetchSingleSurface(NVDIFRStateEvoPtr pDifr, + NVDIFRPrefetchParams *pParams, + size_t *remainingCache); +static NvBool PrefetchHelperSurfaceEvo(NVDIFRStateEvoPtr pDifr, + size_t *cacheRemaining, + NVSurfaceEvoPtr pSurfaceEvo, + NvU32 *status); + +static NvBool SetDisabledState(NVDIFRStateEvoPtr pDifr, + NvBool shouldDisable); +static NvBool IsCECompatibleWithDIFR(NVDevEvoPtr pDevEvo, + NvU32 instance); +static void EnsureIdleTimer(NVDIFRStateEvoPtr pDifr); +static void IdleTimerProc(void *dataPtr, NvU32 dataU32); + +/* + * Public entry points. + */ + +NVDIFRStateEvoPtr nvDIFRAllocate(NVDevEvoPtr pDevEvo) +{ + NV2080_CTRL_CMD_LPWR_DIFR_CTRL_PARAMS params = { 0 }; + NVDIFRStateEvoPtr pDifr; + NvU32 ret; + + /* DIFR not supported/implemented on RM SLI */ + if (pDevEvo->numSubDevices > 1) { + return NULL; + } + + params.ctrlParamVal = NV2080_CTRL_LPWR_DIFR_CTRL_SUPPORT_STATUS; + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[0]->handle, + NV2080_CTRL_CMD_LPWR_DIFR_CTRL, + ¶ms, + sizeof(params)); + + if (ret == NV_ERR_NOT_SUPPORTED) { + return NULL; + } + + if (ret != NV_OK) { + nvEvoLogDev(pDevEvo, + EVO_LOG_WARN, + "unable to query whether display caching is supported"); + return NULL; + } + + if (params.ctrlParamVal != NV2080_CTRL_LPWR_DIFR_SUPPORTED) { + return NULL; + } + + pDifr = nvCalloc(sizeof(*pDifr), 1); + if (!pDifr) { + return NULL; + } + + pDifr->pDevEvo = pDevEvo; + + if (!AllocDIFRPushChannel(pDifr) || + !AllocDIFRCopyEngine(pDifr)) { + nvDIFRFree(pDifr); + + return NULL; + } + + return pDifr; +} + +void nvDIFRFree(NVDIFRStateEvoPtr pDifr) +{ + nvAssert(pDifr); + + /* Cancel pending idle timer. */ + nvkms_free_timer(pDifr->idleTimer); + + /* Leave DIFR enabled (default state). */ + SetDisabledState(pDifr, FALSE); + + /* Free resources. */ + FreeDIFRCopyEngine(pDifr); + FreeDIFRPushChannel(pDifr); + + nvFree(pDifr); +} + +/* + * Notify of a new or upcoming flip. This will disable DIFR for a brief + * period in anticipation of further flips. + */ +void nvDIFRNotifyFlip(NVDIFRStateEvoPtr pDifr) +{ + pDifr->lastFlipTime = nvkms_get_usec(); + + /* A flip is coming: signal RM to disable DIFR if we haven't already. */ + if (SetDisabledState(pDifr, TRUE)) { + /* Check back after a while and re-enable if idle again. */ + EnsureIdleTimer(pDifr); + } +} + +NvU32 nvDIFRPrefetchSurfaces(NVDIFRStateEvoPtr pDifr, size_t l2CacheSize) +{ + NVDevEvoPtr pDevEvo = pDifr->pDevEvo; + NVEvoSubDevPtr pSubDev; + NVEvoSubDevHeadStatePtr pHeadState; + size_t cacheRemaining = l2CacheSize; + NvU32 layer; + NvU32 head; + NvU32 apiHead; + NvU32 eye; + NvU32 i; + NvU32 status; + + /* + * If the console is active then the scanout surfaces will get updated by + * the OS console driver without any knowledge of NVKMS, DIFR should not be + * enabled in that case. + */ + if (nvEvoIsConsoleActive(pDevEvo)) { + /* + * NV2080_CTRL_LPWR_DIFR_PREFETCH_FAIL_INSUFFICIENT_L2_SIZE: Despite + * what the name suggests this will actually tell RM (and further PMU) + * to disable DIFR until the next modeset. + */ + return NV2080_CTRL_LPWR_DIFR_PREFETCH_FAIL_INSUFFICIENT_L2_SIZE; + } + + /* + * If DIFR is disabled it's because we know we were or will be flipping. + */ + if (pDifr->hwDisabled) { + return NV2080_CTRL_LPWR_DIFR_PREFETCH_FAIL_OS_FLIPS_ENABLED; + } + + status = NV2080_CTRL_LPWR_DIFR_PREFETCH_SUCCESS; + + pSubDev = &pDevEvo->gpus[0]; + + /* Get new prefetch pass counter for this iteration. */ + pDifr->prefetchPass++; + + /* + * Start by prefetching the cursor surface and image surfaces from + * present layers. + */ + for (head = 0; head < pDevEvo->numHeads; head++) { + pHeadState = &pSubDev->headState[head]; + + if (!PrefetchHelperSurfaceEvo(pDifr, + &cacheRemaining, + pHeadState->cursor.pSurfaceEvo, + &status)) { + goto out; + } + + for (layer = 0; layer <= pDevEvo->head[head].numLayers; layer++) { + for (eye = 0; eye < NVKMS_MAX_EYES; eye++) { + + if (!PrefetchHelperSurfaceEvo(pDifr, + &cacheRemaining, + pHeadState->layer[layer].pSurfaceEvo[eye], + &status)) { + goto out; + } + } + + /* + * Prefetch per-layer LUTs, if any, but skip null LUTs and + * duplicates already prefetched. + */ + if (!PrefetchHelperSurfaceEvo(pDifr, + &cacheRemaining, + pHeadState->layer[layer].inputLut.pLutSurfaceEvo, + &status)) { + goto out; + } + + if (!PrefetchHelperSurfaceEvo(pDifr, + &cacheRemaining, + pHeadState->layer[layer].tmoLut.pLutSurfaceEvo, + &status)) { + goto out; + } + } + } + + /* + * Finally prefetch the known main LUTs. + */ + if (!PrefetchHelperSurfaceEvo(pDifr, + &cacheRemaining, + pDevEvo->lut.defaultLut, + &status)) { + goto out; + } + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + for (i = 0; i < ARRAY_LEN(pDevEvo->lut.apiHead[apiHead].LUT); i++) { + if (!PrefetchHelperSurfaceEvo(pDifr, + &cacheRemaining, + pDevEvo->lut.apiHead[apiHead].LUT[i], + &status)) { + goto out; + } + } + } + +out: + return status; +} + +NvBool nvDIFRSendPrefetchResponse(NVDIFRStateEvoPtr pDifr, NvU32 responseStatus) +{ + NVDevEvoPtr pDevEvo = pDifr->pDevEvo; + NV2080_CTRL_CMD_LPWR_DIFR_PREFETCH_RESPONSE_PARAMS params = { 0 }; + + params.responseVal = responseStatus; + + return (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[0]->handle, + NV2080_CTRL_CMD_LPWR_DIFR_PREFETCH_RESPONSE, + ¶ms, + sizeof(params)) + == NV_OK); +} + +/* + * Local helper functions. + */ +static NvBool AllocDIFRPushChannel(NVDIFRStateEvoPtr pDifr) +{ + NVDevEvoPtr pDevEvo = pDifr->pDevEvo; + NvPushAllocChannelParams params = { 0 }; + NvU32 i; + + pDifr->copyEngineType = NV2080_ENGINE_TYPE_NULL; + + for (i = 0; i < NV2080_ENGINE_TYPE_COPY_SIZE; i++) { + if (IsCECompatibleWithDIFR(pDevEvo, i)) { + pDifr->copyEngineType = NV2080_ENGINE_TYPE_COPY(i); + break; + } + } + + if (pDifr->copyEngineType == NV2080_ENGINE_TYPE_NULL) { + return FALSE; + } + + params.engineType = pDifr->copyEngineType; + params.pDevice = &pDifr->pDevEvo->nvPush.device; + params.difrPrefetch = TRUE; + params.logNvDiss = FALSE; + params.noTimeout = FALSE; + params.ignoreChannelErrors = FALSE; + params.numNotifiers = 1; + params.pushBufferSizeInBytes = 1024; + + ct_assert(sizeof(params.handlePool) == sizeof(pDifr->pushChannelHandlePool)); + + for (i = 0; i < ARRAY_LEN(pDifr->pushChannelHandlePool); i++) { + pDifr->pushChannelHandlePool[i] = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + params.handlePool[i] = pDifr->pushChannelHandlePool[i]; + } + + if (!nvPushAllocChannel(¶ms, &pDifr->prefetchPushChannel)) { + return FALSE; + } + + return TRUE; +} + +static void FreeDIFRPushChannel(NVDIFRStateEvoPtr pDifr) +{ + NVDevEvoPtr pDevEvo = pDifr->pDevEvo; + NvU32 i; + + nvPushFreeChannel(&pDifr->prefetchPushChannel); + + for (i = 0; i < ARRAY_LEN(pDifr->pushChannelHandlePool); i++) { + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDifr->pushChannelHandlePool[i]); + pDifr->pushChannelHandlePool[i] = 0; + } +} + +static NvBool AllocDIFRCopyEngine(NVDIFRStateEvoPtr pDifr) +{ + NVB0B5_ALLOCATION_PARAMETERS allocParams = { 0 }; + NVDevEvoPtr pDevEvo = pDifr->pDevEvo; + NvU32 ret; + NvU32 ceClass = 0, i; + + static const NvU32 ceClasses[] = { + AMPERE_DMA_COPY_B, + }; + + for (i = 0; i < ARRAY_LEN(ceClasses); i++) { + if (nvRmEvoClassListCheck(pDevEvo, ceClasses[i])) { + ceClass = ceClasses[i]; + break; + } + } + + if (ceClass == 0) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to find a supported DIFR CE class."); + return NV_FALSE; + } + + pDifr->prefetchEngine = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + if (pDifr->prefetchEngine == 0) { + return NV_FALSE; + } + + allocParams.version = NVB0B5_ALLOCATION_PARAMETERS_VERSION_1; + allocParams.engineType = pDifr->copyEngineType; + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDifr->prefetchPushChannel.channelHandle[0], + pDifr->prefetchEngine, + ceClass, + &allocParams); + if (ret != NVOS_STATUS_SUCCESS) { + nvFreeUnixRmHandle(&pDifr->pDevEvo->handleAllocator, + pDifr->prefetchEngine); + pDifr->prefetchEngine = 0; + return NV_FALSE; + } + + // For Ampere vs Blackwell+ differentiation later + pDifr->copyEngineClass = ceClass; + + return NV_TRUE; +} + +static void FreeDIFRCopyEngine(NVDIFRStateEvoPtr pDifr) +{ + if (pDifr->prefetchEngine != 0) { + nvRmApiFree(nvEvoGlobal.clientHandle, + pDifr->pDevEvo->pSubDevices[0]->handle, + pDifr->prefetchEngine); + } + + nvFreeUnixRmHandle(&pDifr->pDevEvo->handleAllocator, + pDifr->prefetchEngine); + pDifr->prefetchEngine = 0; +} + +static NvU32 PrefetchSingleSurface(NVDIFRStateEvoPtr pDifr, + NVDIFRPrefetchParams *pParams, + size_t *cacheRemaining) +{ + NvPushChannelPtr p = &pDifr->prefetchPushChannel; + NvU64 semaphoreGPUAddress = nvPushGetNotifierGpuAddress(p, 0, 0); + NvGpuSemaphore *semaphore = (NvGpuSemaphore *) + nvPushGetNotifierCpuAddress(p, 0, 0); + const NvKmsSurfaceMemoryFormatInfo *finfo = + nvKmsGetSurfaceMemoryFormatInfo(pParams->surfFormat); + NvU32 componentSizes; + NvU32 dataTransferType; + NvU32 line_length_in; + NvU32 line_count; + NvU64 starttime; + NvU64 endtime; + + /* + * Tell SET_REMAP_COMPONENTS the byte-size of a pixel in terms of color + * component size and count. It doesn't matter which actual combinations we + * choose as long as size*count will be equal to bytesPerPixel. This is + * because we won't be doing any actual remapping per se: we will just + * effectively tell the prefetch operation to fetch the correct amount of + * bytes for each pixel. + */ + switch (finfo->rgb.bytesPerPixel) { +#define COMPONENTS(size, num) \ + (DRF_DEF(A0B5, _SET_REMAP_COMPONENTS, _COMPONENT_SIZE, size) | \ + DRF_DEF(A0B5, _SET_REMAP_COMPONENTS, _NUM_SRC_COMPONENTS, num) | \ + DRF_DEF(A0B5, _SET_REMAP_COMPONENTS, _NUM_DST_COMPONENTS, num)) + + case 1: componentSizes = COMPONENTS(_ONE, _ONE); break; + case 2: componentSizes = COMPONENTS(_ONE, _TWO); break; + case 3: componentSizes = COMPONENTS(_ONE, _THREE); break; + case 4: componentSizes = COMPONENTS(_ONE, _FOUR); break; + case 6: componentSizes = COMPONENTS(_TWO, _THREE); break; + case 8: componentSizes = COMPONENTS(_TWO, _FOUR); break; + case 12: componentSizes = COMPONENTS(_FOUR, _THREE); break; + case 16: componentSizes = COMPONENTS(_FOUR, _FOUR); break; + default: componentSizes = 0; break; +#undef COMPONENTS + } + + /* + * TODO: For now, we don't prefetch multiplane surfaces. In order to do so + * we'd need to loop over all valid planes of the pSurfaceEvo and issue a + * prefetch for each plane. + */ + if (finfo->numPlanes > 1) { + /* + * Regardless of its wording, this is the proper failure code to send + * upstream. This lets the RM disable DIFR until the next modeset. + */ + return NV2080_CTRL_LPWR_DIFR_PREFETCH_FAIL_INSUFFICIENT_L2_SIZE; + } + + /* + * Compute some dimensional values to obtain correct blob size for + * prefetching. Use the given pitch and calculate the number of lines + * needed to cover the whole memory region. + */ + nvAssert(pParams->surfPitchBytes % finfo->rgb.bytesPerPixel == 0); + line_length_in = pParams->surfPitchBytes / finfo->rgb.bytesPerPixel; + + nvAssert(pParams->surfSizeBytes % pParams->surfPitchBytes == 0); + line_count = pParams->surfSizeBytes / pParams->surfPitchBytes; + + /* + * Greedy strategy: assume all surfaces will fit in the supplied L2 size but + * the first one that doesn't will cause the prefetch request to fail. If we + * run out of cache then DIFR will disable itself until the next modeset. + */ + if (*cacheRemaining < pParams->surfSizeBytes) { + return NV2080_CTRL_LPWR_DIFR_PREFETCH_FAIL_INSUFFICIENT_L2_SIZE; + } + + *cacheRemaining -= pParams->surfSizeBytes; + + /* + * Push buffer DMA copy and semaphore programming. + */ + nvPushSetObject(p, NVA06F_SUBCHANNEL_COPY_ENGINE, &pDifr->prefetchEngine); + nvPushMethod(p, NVA06F_SUBCHANNEL_COPY_ENGINE, + NVA0B5_SET_REMAP_COMPONENTS, 1); + nvPushSetMethodData(p, + componentSizes | + DRF_DEF(A0B5, _SET_REMAP_COMPONENTS, _DST_X, _CONST_A) | + DRF_DEF(A0B5, _SET_REMAP_COMPONENTS, _DST_Y, _CONST_A) | + DRF_DEF(A0B5, _SET_REMAP_COMPONENTS, _DST_Z, _CONST_A) | + DRF_DEF(A0B5, _SET_REMAP_COMPONENTS, _DST_W, _CONST_A)); + nvPushImmedVal(p, NVA06F_SUBCHANNEL_COPY_ENGINE, + NVA0B5_SET_REMAP_CONST_A, 0); + nvPushMethod(p, NVA06F_SUBCHANNEL_COPY_ENGINE, NVA0B5_OFFSET_IN_UPPER, 2); + nvPushSetMethodDataU64(p, pParams->surfGpuAddress); + nvPushMethod(p, NVA06F_SUBCHANNEL_COPY_ENGINE, NVA0B5_OFFSET_OUT_UPPER, 2); + nvPushSetMethodDataU64(p, pParams->surfGpuAddress); + + /* + * We don't expect phenomally large pitches but the .mfs for DMA copy + * defines PitchIn/PitchOut to be of signed 32-bit type for all + * architectures so assert that the value will be what h/w understands. + */ + nvAssert(pParams->surfPitchBytes <= NV_S32_MAX); + + nvPushMethod(p, NVA06F_SUBCHANNEL_COPY_ENGINE, NVA0B5_PITCH_IN, 1); + nvPushSetMethodData(p, pParams->surfPitchBytes); + nvPushMethod(p, NVA06F_SUBCHANNEL_COPY_ENGINE, NVA0B5_PITCH_OUT, 1); + nvPushSetMethodData(p, pParams->surfPitchBytes); + + nvPushMethod(p, NVA06F_SUBCHANNEL_COPY_ENGINE, NVA0B5_LINE_LENGTH_IN, 1); + nvPushSetMethodData(p, line_length_in); + nvPushMethod(p, NVA06F_SUBCHANNEL_COPY_ENGINE, NVA0B5_LINE_COUNT, 1); + nvPushSetMethodData(p, line_count); + nvAssert(pParams->surfPitchBytes * line_count == pParams->surfSizeBytes); + + { + dataTransferType = DRF_DEF(A0B5, _LAUNCH_DMA, _DATA_TRANSFER_TYPE, _PIPELINED); + } + + nvPushMethod(p, NVA06F_SUBCHANNEL_COPY_ENGINE, NVA0B5_LAUNCH_DMA, 1); + nvPushSetMethodData + (p, + dataTransferType | + DRF_DEF(A0B5, _LAUNCH_DMA, _FLUSH_ENABLE, _TRUE) | + DRF_DEF(A0B5, _LAUNCH_DMA, _SEMAPHORE_TYPE, _NONE) | + DRF_DEF(A0B5, _LAUNCH_DMA, _INTERRUPT_TYPE, _NONE) | + DRF_DEF(A0B5, _LAUNCH_DMA, _REMAP_ENABLE, _TRUE) | + DRF_DEF(A0B5, _LAUNCH_DMA, _SRC_MEMORY_LAYOUT, _PITCH) | + DRF_DEF(A0B5, _LAUNCH_DMA, _DST_MEMORY_LAYOUT, _PITCH) | + DRF_DEF(A0B5, _LAUNCH_DMA, _MULTI_LINE_ENABLE, _TRUE) | + DRF_DEF(A0B5, _LAUNCH_DMA, _SRC_TYPE, _VIRTUAL) | + DRF_DEF(A0B5, _LAUNCH_DMA, _DST_TYPE, _VIRTUAL)); + + /* + * Reset semaphore value. A memory barrier will be issued by nvidia-push so + * we don't need one here. + */ + semaphore->data[0] = 0; + + /* Program a semaphore release after prefetch DMA copy. */ + nvPushMethod(p, 0, NVA06F_SEMAPHOREA, 4); + nvPushSetMethodDataU64(p, semaphoreGPUAddress); + nvPushSetMethodData(p, PREFETCH_DONE_VALUE); + nvPushSetMethodData(p, + DRF_DEF(A06F, _SEMAPHORED, _OPERATION, _RELEASE) | + DRF_DEF(A06F, _SEMAPHORED, _RELEASE_WFI, _EN) | + DRF_DEF(A06F, _SEMAPHORED, _RELEASE_SIZE, _4BYTE)); + nvPushKickoff(p); + + /* + * Errors and prefetch faults are handled as follows. If prefetch + * succeeds the semaphore release will trigger and we will exit upon + * seeing PREFETCH_DONE_VALUE in the memory location. Upon failure we + * will end up timing out, signal RM of the CE fault and DIFR will + * remain disabled until next driver load. + * + * Currently the total launch-to-end effective (with scheduling) + * prefetch rate on silicon seems to be around 15k pixels per + * microsecond, empirically. Thus, the time will range from a couple of + * hundred microseconds for a very small panel to slightly less than 2 + * milliseconds for a single 4k display. We'll wait for 100us at a time + * and expect a realistic completion within few milliseconds at most. + */ + starttime = nvkms_get_usec(); + do { + endtime = nvkms_get_usec(); + + if (semaphore->data[0] == PREFETCH_DONE_VALUE) { + return NV2080_CTRL_LPWR_DIFR_PREFETCH_SUCCESS; + } + + nvkms_usleep(100); + } while (endtime - starttime < DIFR_PREFETCH_WAIT_PERIOD_US); /* 10ms */ + + return NV2080_CTRL_LPWR_DIFR_PREFETCH_FAIL_CE_HW_ERROR; +} + +static NvBool PrefetchHelperSurfaceEvo(NVDIFRStateEvoPtr pDifr, + size_t *cacheRemaining, + NVSurfaceEvoPtr pSurfaceEvo, + NvU32 *status) +{ + NVDIFRPrefetchParams params; + + nvAssert(*status == NV2080_CTRL_LPWR_DIFR_PREFETCH_SUCCESS); + + if (!pSurfaceEvo) { + return TRUE; + } + + if (pSurfaceEvo->noDisplayCaching) { + *status = NV2080_CTRL_LPWR_DIFR_PREFETCH_FAIL_OS_FLIPS_ENABLED; + return FALSE; + } + + /* + * If we see the same SurfaceEvo twice (UBB, multi-head X screens, etc) + * we only ever want to prefetch it once within a single + * nvDIFRPrefetchSurfaces() call. + */ + if (pSurfaceEvo->difrLastPrefetchPass == pDifr->prefetchPass) { + return TRUE; + } + + /* + * Update pass counter even if we fail later: we want to try each + * surface only once. + */ + pSurfaceEvo->difrLastPrefetchPass = pDifr->prefetchPass; + + /* Collect copy parameters and do the prefetch. */ + params.surfGpuAddress = pSurfaceEvo->gpuAddress; + params.surfSizeBytes = pSurfaceEvo->planes[0].rmObjectSizeInBytes; + params.surfPitchBytes = pSurfaceEvo->planes[0].pitch; + params.surfFormat = pSurfaceEvo->format; + + if (pSurfaceEvo->layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + params.surfPitchBytes *= NVKMS_BLOCK_LINEAR_GOB_WIDTH; + } + + *status = PrefetchSingleSurface(pDifr, ¶ms, cacheRemaining); + + return *status == NV2080_CTRL_LPWR_DIFR_PREFETCH_SUCCESS; +} + +/* + * Set DIFR disabled state in H/W. Return true if state was changed and it + * was successfully signalled downstream. + */ +static NvBool SetDisabledState(NVDIFRStateEvoPtr pDifr, + NvBool shouldDisable) +{ + NVDevEvoPtr pDevEvo = pDifr->pDevEvo; + NV2080_CTRL_CMD_LPWR_DIFR_CTRL_PARAMS params = { 0 }; + NvU32 ret; + + if (shouldDisable == pDifr->hwDisabled) { + return TRUE; + } + + params.ctrlParamVal = shouldDisable + ? NV2080_CTRL_LPWR_DIFR_CTRL_DISABLE + : NV2080_CTRL_LPWR_DIFR_CTRL_ENABLE; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[0]->handle, + NV2080_CTRL_CMD_LPWR_DIFR_CTRL, + ¶ms, + sizeof(params)); + + if (ret != NV_OK) { + return FALSE; + } + + pDifr->hwDisabled = shouldDisable; + + return TRUE; +} + +static NvBool IsCECompatibleWithDIFR(NVDevEvoPtr pDevEvo, NvU32 instance) +{ + NV2080_CTRL_CE_GET_CAPS_V2_PARAMS params; + NvU32 ret; + + nvkms_memset(¶ms, 0, sizeof(params)); + params.ceEngineType = NV2080_ENGINE_TYPE_COPY(instance); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[0]->handle, + NV2080_CTRL_CMD_CE_GET_CAPS_V2, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + ct_assert(sizeof(params.capsTbl) == NV2080_CTRL_CE_CAPS_TBL_SIZE); + + /* Current criteria: DIFR prefetches can't use graphics CEs. */ + if (NV2080_CTRL_CE_GET_CAP(params.capsTbl, NV2080_CTRL_CE_CAPS_CE_GRCE)) { + return FALSE; + } + + return TRUE; +} + +/* + * Make sure we have a pending idle timer to check back on idleness. + */ +static void EnsureIdleTimer(NVDIFRStateEvoPtr pDifr) +{ + if (!pDifr->idleTimer) { + /* Wait 100x longer in emulation. */ + NvU64 idlePeriod = + DIFR_IDLE_WAIT_PERIOD_US * + (nvIsEmulationEvo(pDifr->pDevEvo) ? 100 : 1); + + pDifr->idleTimer = + nvkms_alloc_timer(IdleTimerProc, pDifr, 0, idlePeriod); + } +} + +/* + * An idle timer should always remain pending after a flip until further + * flips cease and DIFR can be re-enabled. + * + * Currently we'll try to re-enable DIFR after a constant period of idleness + * since the last flip but this could resonate badly with a client that's + * rendering at the same pace. + * + * To avoid churn we could track the time DIFR actually did remain enabled. + * If the enabled-period is relatively short against the disabled-period, we + * should bump the timeout to re-enable so that we won't be retrying all the + * time. Conversely, we should reset the bumped timeout after we actually + * managed to sleep long enough with DIFR enabled. + * + * Note: There's the question of whether we should apply slight hysteresis + * within NVKMS regarding enabling/disabling DIFR. The hardware itself does + * some churn-limiting and practical observations show that it seems to work + * sufficiently and I've not observed rapid, repeating prefetch requests. + * Keeping this note here in case this matter needs to be revisited later. + */ +static void IdleTimerProc(void *dataPtr, NvU32 dataU32) +{ + NVDIFRStateEvoPtr pDifr = (NVDIFRStateEvoPtr)dataPtr; + NvU64 now = nvkms_get_usec(); + NvU64 idlePeriod = + DIFR_IDLE_WAIT_PERIOD_US * + (nvIsEmulationEvo(pDifr->pDevEvo) ? 100 : 1); + + /* First free the timer that triggered us. */ + nvkms_free_timer(pDifr->idleTimer); + pDifr->idleTimer = NULL; + + if (now - pDifr->lastFlipTime >= idlePeriod) { + /* + * Enough time has passed with no new flips, enable DIFR if the console + * is not active. If the console is active then the scanout surfaces + * will get updated by the OS console driver without any knowledge of + * NVKMS, DIFR can not be enabled in that case; the idle timer will get + * scheduled by nvDIFRNotifyFlip() on next modeset/flip, till then DIFR + * will remain disabled. + */ + if (!nvEvoIsConsoleActive(pDifr->pDevEvo)) { + SetDisabledState(pDifr, FALSE); + } + } else { + /* New flips have happened since the original, reset idle timer. */ + EnsureIdleTimer(pDifr); + } +} diff --git a/src/nvidia-modeset/src/nvkms-dma.c b/src/nvidia-modeset/src/nvkms-dma.c new file mode 100644 index 0000000..4438fd0 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-dma.c @@ -0,0 +1,506 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include "nvkms-dma.h" +#include "nvkms-utils.h" +#include "nvkms-rmapi.h" +#include "class/cl917d.h" // NV917DDispControlDma, NV917D_DMA_* +#include // NV0080_CTRL_CMD_DMA_FLUSH +#include "nvos.h" + +#define NV_DMA_PUSHER_CHASE_PAD 5 +#define NV_EVO_NOTIFIER_SHORT_TIMEOUT_USEC 3000000 // 3 seconds + +static void EvoCoreKickoff(NVDmaBufferEvoPtr push_buffer, NvU32 putOffset); + +void nvDmaKickoffEvo(NVEvoChannelPtr pChannel) +{ + NVDmaBufferEvoPtr p = &pChannel->pb; + NvU32 putOffset = (NvU32)((char *)p->buffer - (char *)p->base); + + if (p->put_offset == putOffset) { + return; + } + + EvoCoreKickoff(p, putOffset); +} + +static void EvoCoreKickoff(NVDmaBufferEvoPtr push_buffer, NvU32 putOffset) +{ + NVEvoDmaPtr pDma = &push_buffer->dma; + int i; + + nvAssert(putOffset % 4 == 0); + nvAssert(putOffset <= push_buffer->offset_max); + + /* If needed, copy the chunk to be kicked off into each GPU's FB */ + if (pDma->isBar1Mapping) { + NVDevEvoPtr pDevEvo = push_buffer->pDevEvo; + int sd; + + NV0080_CTRL_DMA_FLUSH_PARAMS flushParams = { 0 }; + NvU32 ret; + + NvU32 *endAddress; + + if (putOffset < push_buffer->put_offset) { + /* If we've wrapped, copy to the end of the pushbuffer */ + nvAssert(putOffset == 0); + endAddress = push_buffer->base + push_buffer->offset_max / + sizeof(NvU32); + } else { + endAddress = push_buffer->buffer; + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NvU32 startOffset = push_buffer->put_offset / sizeof(NvU32); + + NvU32 *src = push_buffer->base; + NvU32 *dst = pDma->subDeviceAddress[sd]; + + nvAssert(dst != NULL); + + src += startOffset; + dst += startOffset; + while (src < endAddress) { + *dst++ = *src++; + } + } + + /* + * Finally, tell RM to flush so that the data actually lands in FB + * before telling the GPU to fetch it. + */ + flushParams.targetUnit = DRF_DEF(0080_CTRL_DMA, _FLUSH_TARGET, + _UNIT_FB, _ENABLE); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_DMA_FLUSH, + &flushParams, sizeof(flushParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV0080_CTRL_CMD_DMA_FLUSH failed"); + } + } + +#if NVCPU_IS_X86_64 + __asm__ __volatile__ ("sfence\n\t" : : : "memory"); +#elif NVCPU_IS_FAMILY_ARM + __asm__ __volatile__ ("dsb sy\n\t" : : : "memory"); +#endif + + /* Kick off all push buffers */ + push_buffer->put_offset = putOffset; + for (i = 0; i < push_buffer->num_channels; i++) { + void *pControl = push_buffer->control[i]; + nvDmaStorePioMethod(pControl, NV917D_PUT, putOffset); + } +} + +/* Read GET from an EVO core channel */ +static NvU32 EvoCoreReadGet(NVDmaBufferEvoPtr push_buffer, int sd) +{ + void *pControl = push_buffer->control[sd]; + return nvDmaLoadPioMethod(pControl, NV917D_GET); +} + +/* Read GET for all devices and return the minimum or maximum*/ +static NvU32 EvoReadGetOffset(NVDmaBufferEvoPtr push_buffer, NvBool minimum) +{ + int i; + NvU32 get, bestGet = 0; + NvS32 distanceToPut, minmaxDistanceToPut = (minimum ? + 0 : + (push_buffer->dma.limit + 1)); + + if (push_buffer->num_channels <= 1) { + return EvoCoreReadGet(push_buffer, 0); + } + + for (i =0; i < push_buffer->num_channels; i++) { + get = EvoCoreReadGet(push_buffer, i); + + /* Compute distance to put, accounting for wraps */ + distanceToPut = push_buffer->put_offset - get; + if (distanceToPut < 0) + distanceToPut += push_buffer->dma.limit + 1; + + /* Accumulate the maximum distance to put and the corresponding get. */ + if ((minimum && (distanceToPut >= minmaxDistanceToPut)) || + (!minimum && (distanceToPut <= minmaxDistanceToPut))) { + minmaxDistanceToPut = distanceToPut; + bestGet = get; + } + } + return bestGet; +} + +NvBool nvEvoPollForEmptyChannel(NVEvoChannelPtr pChannel, NvU32 sd, + NvU64 *pStartTime, const NvU32 timeout) +{ + NVDmaBufferEvoPtr push_buffer = &pChannel->pb; + + do { + if (EvoCoreReadGet(push_buffer, sd) == push_buffer->put_offset) { + break; + } + + if (nvExceedsTimeoutUSec(push_buffer->pDevEvo, pStartTime, timeout)) { + return FALSE; + } + + nvkms_yield(); + } while (TRUE); + + return TRUE; +} + +void nvEvoMakeRoom(NVEvoChannelPtr pChannel, NvU32 count) +{ + NVDmaBufferEvoPtr push_buffer = &pChannel->pb; + NvU32 getOffset; + NvU32 putOffset; + NvU64 startTime = 0; + const NvU64 timeout = 5000000; /* 5 seconds */ + + putOffset = (NvU32) ((char *)push_buffer->buffer - + (char *)push_buffer->base); + + if (putOffset >= push_buffer->offset_max) { + *(push_buffer->buffer) = 0x20000000; + push_buffer->buffer = push_buffer->base; + nvDmaKickoffEvo(pChannel); + putOffset = 0; + } + + while (1) { + getOffset = EvoReadGetOffset(push_buffer, TRUE); + + if (putOffset >= getOffset) { + push_buffer->fifo_free_count = + (push_buffer->offset_max - putOffset) >> 2; + + if (push_buffer->fifo_free_count <= count) { + if (getOffset) { + *(push_buffer->buffer) = 0x20000000; + push_buffer->buffer = push_buffer->base; + nvDmaKickoffEvo(pChannel); + putOffset = 0; + } + else if (putOffset != push_buffer->put_offset) { + nvDmaKickoffEvo(pChannel); + // Put offset will have changed if a tail was inserted. + putOffset = push_buffer->put_offset; + } + } + } + else { + getOffset = (getOffset > push_buffer->offset_max) ? + push_buffer->offset_max : getOffset; + + if ((putOffset + (NV_DMA_PUSHER_CHASE_PAD * 4)) >= getOffset) + push_buffer->fifo_free_count = 0; + else + push_buffer->fifo_free_count = + ((getOffset - putOffset) >> 2) - 1; + } + if (push_buffer->fifo_free_count > count) { + break; + } + + /* + * If we have been waiting too long, print an error message. There + * isn't much we can do as currently structured, so just reset + * startTime. + */ + if (nvExceedsTimeoutUSec(push_buffer->pDevEvo, &startTime, timeout)) { + nvEvoLogDev(push_buffer->pDevEvo, EVO_LOG_ERROR, + "Error while waiting for GPU progress: " + "0x%08x:%d %d:%d:%d:%d", + pChannel->hwclass, pChannel->instance, + count, push_buffer->fifo_free_count, getOffset, putOffset); + startTime = 0; + } + + nvkms_yield(); + } +} + +static inline void EvoWriteNotifier(volatile NvU32 *pNotifier, NvU32 value) +{ + /* + * Note that we don't need to flush to vidmem here; any subsequent GPU + * write will always be triggered by kicking off pushbuffer methods, + * which will perform a general FB flush. This does assume that the + * pushbuffer and its associated notifier surfaces are either both in + * sysmem or both in vidmem, however. + */ + + *pNotifier = value; +} + +/* Write the EVO core notifier at the given offset to the given value. */ +void nvWriteEvoCoreNotifier( + const NVDispEvoRec *pDispEvo, + NvU32 offset, + NvU32 value) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + NVEvoDmaPtr pSubChannel = &pDevEvo->core->notifiersDma[sd]; + volatile NvU32 *pNotifiers = pSubChannel->subDeviceAddress[sd]; + + EvoWriteNotifier(pNotifiers + offset, value); +} + +static NvBool EvoCheckNotifier(const NVDispEvoRec *pDispEvo, + NvU32 offset, NvU32 done_base_bit, + NvU32 done_extent_bit, NvU32 done_value, + NvBool wait) +{ + const NvU32 sd = pDispEvo->displayOwner; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoDmaPtr pSubChannel = &pDevEvo->core->notifiersDma[sd]; + NVDmaBufferEvoPtr p = &pDevEvo->core->pb; + volatile NvU32 *pNotifier; + NvU64 startTime = 0; + + pNotifier = pSubChannel->subDeviceAddress[sd]; + + nvAssert(pNotifier != NULL); + pNotifier += offset; + + // While the completion notifier is not set to done_true + do { + const NvU32 val = *pNotifier; + const NvU32 done_mask = DRF_SHIFTMASK(done_extent_bit:done_base_bit); + const NvU32 done_val = done_value << done_base_bit; + + if ((val & done_mask) == done_val) { + return TRUE; + } + + if (!wait) { + return FALSE; + } + + if (nvExceedsTimeoutUSec( + pDevEvo, + &startTime, + NV_EVO_NOTIFIER_SHORT_TIMEOUT_USEC) && + (p->put_offset == EvoCoreReadGet(p, sd))) + { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Lost display notification (%d:0x%08x); " + "continuing.", sd, val); + EvoWriteNotifier(pNotifier, done_value << done_base_bit); + return TRUE; + } + + nvkms_yield(); + } while (TRUE); +} + +/* + * Used by NV_EVO_WAIT_FOR_NOTIFIER() and NV_EVO_WAIT_FOR_CAPS_NOTIFIER() + */ +void nvEvoWaitForCoreNotifier(const NVDispEvoRec *pDispEvo, NvU32 offset, + NvU32 done_base_bit, NvU32 done_extent_bit, + NvU32 done_value) +{ + EvoCheckNotifier(pDispEvo, offset, + done_base_bit, done_extent_bit, done_value, TRUE); +} + +/* + * Used by the EVO HAL IsNotifierComplete functions. Returns TRUE if the + * notifier is complete. + */ +NvBool nvEvoIsCoreNotifierComplete(NVDispEvoPtr pDispEvo, NvU32 offset, + NvU32 done_base_bit, NvU32 done_extent_bit, + NvU32 done_value) +{ + return EvoCheckNotifier(pDispEvo, + offset, done_base_bit, done_extent_bit, + done_value, FALSE); +} + +void nvEvoSetSubdeviceMask(NVEvoChannelPtr pChannel, NvU32 mask) +{ + NVDmaBufferEvoPtr p = &pChannel->pb; + + nvAssert(!nvDmaSubDevMaskMatchesCurrent(pChannel, mask)); + + p->currentSubDevMask = mask; + + ASSERT_DRF_NUM(917D, _DMA, _SET_SUBDEVICE_MASK_VALUE, mask); + + if (p->fifo_free_count <= 1) { + nvEvoMakeRoom(pChannel, 1); + } + + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(917D, _DMA, _OPCODE, _SET_SUBDEVICE_MASK) | + DRF_NUM(917D, _DMA, _SET_SUBDEVICE_MASK_VALUE, mask)); + p->fifo_free_count -= 1; +} + +/*! + * Reads CRC values from the notifier. + * + * This function will attempt to read in the first 'entry_count' CRC notifier + * entries that HW generated. The actual number of entries that are read may + * be less. + * + * \param[in] pCRC32Notifier Pointer to the CRC notifier memory. + * \param[in] entry_stride Stride of a single CRC notifier entry + * \param[in] entry_count Expected count of notifier entries to read + * \param[in] status_offset Offset for Status flags header in CRC notifier + * \param[in] field_count Number of fields to read from each CRC notifier + * entry. + * \param[in] flag_count Number of flags to read from the Status Header + * \param[in out] field_info Specifies the offset/base/extent info for each field. + * Each 'field_info' contains an output array for + * storing 'entry_count' field values. + * \param[in] flag_info Specifies the base/extent info for each flag. + * Each 'flag_info' contains a 'flag_type' for + * addressing error cases related to the flags. + * + * \return Returns the MIN(count, entry_count) of successfully + * read entries. + */ +NvU32 nvEvoReadCRC32Notifier(volatile NvU32 *pCRC32Notifier, + NvU32 entry_stride, + NvU32 entry_count, + NvU32 status_offset, + NvU32 field_count, + NvU32 flag_count, + const CRC32NotifierEntryRec *field_info, + const CRC32NotifierEntryFlags *flag_info) +{ + NvU32 count = 0; + NvU32 i, j, k; + + nvAssert(pCRC32Notifier != NULL); + + // Iterate over flags (unique at start of the CRC32Notifier Struct) + for (k = 0; k < flag_count; k++) { + CRC32NotifierEntryFlags info = flag_info[k]; + volatile NvU32 *pFlag = pCRC32Notifier + status_offset; + NvU32 flag_mask = + DRF_SHIFTMASK((info.flag_extent_bit):(info.flag_base_bit)); + NvU32 flag = (*pFlag & flag_mask) >> info.flag_base_bit; + + switch (info.flag_type) + { + case NVEvoCrc32NotifierFlagCount: + count = flag; + // entry_count is max of each field_frame_values[i] array + if (count > entry_count) { + nvEvoLog(EVO_LOG_WARN, "Too many CRC32 generated entries " + "(%d expected; %d found)", entry_count, count); + count = entry_count; + } + break; + + case NVEvoCrc32NotifierFlagCrcOverflow: + if (flag) { + count = 0; + nvEvoLog(EVO_LOG_ERROR, "CRC Overflow occured, " + "CRC value unable to be processed fast enough.\n" + "Failing flag index in status_info array: %d", + k); + + return count; + } + break; + } + } + + // Iterate over each collection of fields, for count pairs of values + for (i = 0; i < count; i++) { + for (j = 0; j < field_count; j++) { + CRC32NotifierEntryRec info = field_info[j]; + volatile NvU32 *pEntry = pCRC32Notifier + info.field_offset; + NvU32 field_mask = + DRF_SHIFTMASK((info.field_extent_bit):(info.field_base_bit)); + + info.field_frame_values[i].value = + (*pEntry & field_mask) >> info.field_base_bit; + info.field_frame_values[i].supported = TRUE; + } + pCRC32Notifier += entry_stride; + } + + return count; +} + +void nvEvoResetCRC32Notifier(volatile NvU32 *pCRC32Notifier, + NvU32 offset, + NvU32 reset_base_bit, + NvU32 reset_value) +{ + const NvU32 reset_val = reset_value << reset_base_bit; + + nvAssert(pCRC32Notifier != NULL); + pCRC32Notifier += offset; + + EvoWriteNotifier(pCRC32Notifier, reset_val); +} + +NvBool nvEvoWaitForCRC32Notifier(const NVDevEvoPtr pDevEvo, + volatile NvU32 *pCRC32Notifier, + NvU32 offset, + NvU32 done_base_bit, + NvU32 done_extent_bit, + NvU32 done_value) +{ + const NvU32 done_mask = DRF_SHIFTMASK(done_extent_bit:done_base_bit); + const NvU32 done_val = done_value << done_base_bit; + NvU64 startTime = 0; + + nvAssert(pCRC32Notifier != NULL); + pCRC32Notifier += offset; + + do { + const NvU32 status = *pCRC32Notifier; + + if ((status & done_mask) == done_val) { + return TRUE; + } + + if (nvExceedsTimeoutUSec( + pDevEvo, + &startTime, + NV_EVO_NOTIFIER_SHORT_TIMEOUT_USEC)) { + return FALSE; + } + + nvkms_yield(); + + } while (TRUE); + + return FALSE; +} diff --git a/src/nvidia-modeset/src/nvkms-dpy-override.c b/src/nvidia-modeset/src/nvkms-dpy-override.c new file mode 100644 index 0000000..a98abf5 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-dpy-override.c @@ -0,0 +1,264 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "nvkms-utils.h" +#include "nvkms-dpy-override.h" + +#include "nv_list.h" + +static NVListRec dpyOverrideList = NV_LIST_INIT(&dpyOverrideList); + +#define FOR_ALL_DPY_OVERRIDES(_pDpyOverride) \ + nvListForEachEntry(_pDpyOverride, &dpyOverrideList, entry) +#define FOR_ALL_DPY_OVERRIDES_SAFE(_pDpyOverride, _next) \ + nvListForEachEntry_safe(_pDpyOverride, _next, &dpyOverrideList, entry) + +#define DPY_OVERRIDE_MATCHES(_pDpyOverride, _gpuId, _name) \ + ((_pDpyOverride->gpuId == _gpuId) && \ + !nvkms_strcmp(_pDpyOverride->name, _name)) + +static NvBool DpyOverrideReadEdid(NVDpyOverrideRec *dpy, + const char *buffer, + size_t size) +{ + if ((dpy->edid.length != size) || (dpy->edid.buffer == NULL)) { + NvU8 *newbuf = nvRealloc(dpy->edid.buffer, size); + if (newbuf == NULL) { + return FALSE; + } + dpy->edid.buffer = newbuf; + dpy->edid.length = size; + } + + nvkms_memcpy(dpy->edid.buffer, buffer, size); + return TRUE; +} + + +/* + * Creates a display override on the given GPU for the given display name. + * If the override already exists it will be overwritten. + * + * \param[in] gpuId The ID of the GPU on which to create the display + * override, as returned in nvkms_enumerate_gpus() + * \param[in] name The name of the display to override in + * PROTOCOL-Index format, e.g. HDMI-0. + * \param[in] edid A buffer containing EDID data for the override. + * \param[in] edidSize The size of the edid buffer. + * + * \return A pointer to the created or edited NVDpyOverrideRec, or NULL if + * creation failed. + */ +NVDpyOverrideRec *nvCreateDpyOverride(NvU32 gpuId, + const char *name, + NvBool connected, + const char *edid, + size_t edidSize) +{ + NVDpyOverridePtr pDpyOverride; + size_t namelen, cpsz; + NvBool found = FALSE; + + /* if such a display override already exists, let it be changed */ + FOR_ALL_DPY_OVERRIDES(pDpyOverride) { + if (DPY_OVERRIDE_MATCHES(pDpyOverride, gpuId, name)) { + + found = TRUE; + break; + } + } + + /* if such a display override doesn't exist, create a new one */ + if (!found) { + pDpyOverride = nvCalloc(1, sizeof(*pDpyOverride)); + if (pDpyOverride == NULL) { + nvEvoLog(EVO_LOG_WARN, "Failed allocating data for display override"); + return NULL; + } + + nvListAdd(&pDpyOverride->entry, &dpyOverrideList); + + namelen = nvkms_strlen(name); + cpsz = namelen > NVKMS_DPY_NAME_SIZE - 1 ? NVKMS_DPY_NAME_SIZE - 1 + : namelen; + + nvkms_memcpy(pDpyOverride->name, name, cpsz); + pDpyOverride->gpuId = gpuId; + } + + pDpyOverride->connected = connected; + if (connected && !DpyOverrideReadEdid(pDpyOverride, edid, edidSize)) { + nvEvoLog(EVO_LOG_WARN, "Failed reading EDID"); + nvListDel(&pDpyOverride->entry); + nvFree(pDpyOverride); + return NULL; + } + + return pDpyOverride; +} + +/* + * Deletes a display override on the given GPU for the given display name. + * + * \param[in] gpuId The ID of the GPU on which to delete the display + * override, as returned in nvkms_enumerate_gpus() + * \param[in] name The name of the display whose override to delete in + * PROTOCOL-Index format, e.g. HDMI-0. + */ +void nvDeleteDpyOverride(NvU32 gpuId, const char *name) +{ + NVDpyOverridePtr pDpyOverride; + + /* If such a display override already exists, delete it */ + FOR_ALL_DPY_OVERRIDES(pDpyOverride) { + if (DPY_OVERRIDE_MATCHES(pDpyOverride, gpuId, name)) { + + nvListDel(&pDpyOverride->entry); + nvFree(pDpyOverride); + return; /* This makes using nvListForEachEntry safe. */ + } + } +} + +/* + * Logs a list of currently active override names to pInfoStr for a given + * GPU. + * + * \param[in] gpuId The ID of the GPU whose overrides to print, as + * returned in nvkms_enumerate_gpus() + * \param[in] pInfoStr A pointer to the NVEvoInfoString to log to. + */ +void nvLogDpyOverrides(NvU32 gpuId, NVEvoInfoStringPtr pInfoStr) +{ + NVDpyOverridePtr pDpyOverride; + FOR_ALL_DPY_OVERRIDES(pDpyOverride) { + if (pDpyOverride->gpuId == gpuId) { + nvEvoLogInfoString(pInfoStr, "%s", pDpyOverride->name); + } + } +} + +/* + * Checks if there is a matching, valid, and enabled NVDpyOverrideRec for the + * pDpyEvo in the global display override list and returns it if it is found. + * O(N) in length of display override list + * + * \param[in] pDpyEvo The display to check for an override. + * + * \return The NVDpyOverrideRec override for the pDpyEvo, or NULL if it isn't + * found. + */ +NVDpyOverridePtr nvDpyEvoGetOverride(const NVDpyEvoRec *pDpyEvo) +{ + NVDevEvoPtr pDevEvo; + NVDispEvoPtr pDispEvo; + NVSubDeviceEvoPtr pSubDevice; + NVDpyOverridePtr it; + + if (pDpyEvo == NULL) { + return NULL; + } + + /* + * Don't override DP MST displays, because there could be multiple attached + * to the single connector, which would result in the number of displays + * this override creates being dependent on the number of plugged in + * displays, which seems incorrect for this feature + */ + if (nvDpyEvoIsDPMST(pDpyEvo)) { + return NULL; + } + pDispEvo = pDpyEvo->pDispEvo; + pDevEvo = pDispEvo->pDevEvo; + + pSubDevice = pDevEvo->pSubDevices[pDispEvo->displayOwner]; + + FOR_ALL_DPY_OVERRIDES(it) { + /* Ensure valid and enabled override */ + if ((it->edid.length == 0) || (it->edid.buffer == NULL)) { + continue; + } + + /* + * Both NVDpyOverrideRec.gpuId and NVSubDeviceEvoRec.gpuId ultimately + * derive from nvRmApiControl(NV2080_CTRL_CMD_GPU_GET_ID), so we can + * use them to match GPUs. Additionally, NVConnectorEvo.name is of the + * format TYPE-N, e.g. HDMI-0, but pDpyEvo.name may have additional + * qualifiers (e.g., an existing EDID-derived name). + */ + if (DPY_OVERRIDE_MATCHES(it, pSubDevice->gpuId, pDpyEvo->pConnectorEvo->name)) { + + nvEvoLogDebug(EVO_LOG_INFO, "NVDpyOverrideRec found: %s\n", + it->name); + return it; + } + } + + return NULL; +} + +/* + * Reads the EDID data from a given NVDpyOverrideRec into the buffer buff. + * Does not write to the buffer if the operation fails. + * + * \param[in] pDpyOverride The override from which to read the EDID data. + * \param[out] buff A pointer to a buffer into which to read + * the override's EDID data. + * \param[in] len The length of the buffer. + * + * \return The number of bytes written into the buffer, or 0 if the operation + * failed. + */ +size_t nvReadDpyOverrideEdid(const NVDpyOverrideRec *pDpyOverride, + NvU8 *buff, size_t len) +{ + if ((pDpyOverride == NULL) || + (buff == NULL) || + (pDpyOverride->edid.length == 0) || + (pDpyOverride->edid.length > len)) { + return 0; + } + + nvkms_memcpy(buff, pDpyOverride->edid.buffer, + pDpyOverride->edid.length); + return pDpyOverride->edid.length; +} + +/* + * Delete all display overrides. This should only ever be called during shutdown + * of NVKMS or to cleanup when display override initialization fails. + */ +void nvClearDpyOverrides(void) +{ + NVDpyOverridePtr pDpyOverride, tmp; + FOR_ALL_DPY_OVERRIDES_SAFE(pDpyOverride, tmp) { + nvListDel(&pDpyOverride->entry); + if (pDpyOverride->edid.buffer != NULL) { + nvFree(pDpyOverride->edid.buffer); + } + nvFree(pDpyOverride); + } + + nvAssert(nvListIsEmpty(&dpyOverrideList)); +} diff --git a/src/nvidia-modeset/src/nvkms-dpy.c b/src/nvidia-modeset/src/nvkms-dpy.c new file mode 100644 index 0000000..af0e67a --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-dpy.c @@ -0,0 +1,3531 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "dp/nvdp-device.h" +#include "dp/nvdp-connector-event-sink.h" + +#include "nvkms-evo.h" +#include "nvkms-dpy.h" +#include "nvkms-dpy-override.h" +#include "nvkms-hdmi.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "nvkms-types.h" +#include "nvkms-attributes.h" +#include "nvkms-utils.h" +#include "nvkms-3dvision.h" + +#include "nv_mode_timings_utils.h" + +#include "nvkms-api.h" +#include "nvkms-private.h" + +#include "nvos.h" +#include "timing/dpsdp.h" + +#include "displayport/displayport.h" + +#include // NV0073_CTRL_DFP_FLAGS_* +#include // NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_* + +#define TMDS_SINGLE_LINK_PCLK_MAX 165000 +#define TMDS_DUAL_LINK_PCLK_MAX 330000 + +static void DpyGetDynamicDfpProperties( + NVDpyEvoPtr pDpyEvo, + const NvBool disableACPIBrightnessHotkeys); + +static void +CreateParsedEdidFromNVT_TIMING(NVT_TIMING *pTimings, + NvU8 bpc, + NVParsedEdidEvoPtr pParsedEdid); + +static NvBool ReadEdidFromDP (const NVDpyEvoRec *pDpyEvo, + NVEdidPtr pEdid); +static NvBool ReadEdidFromResman (const NVDpyEvoRec *pDpyEvo, + NVEdidPtr pEdid, + NvKmsEdidReadMode readMode); +static NvBool ValidateEdid (const NVDpyEvoRec *pDpyEvo, + NVEdidPtr pEdid, + NVEvoInfoStringPtr pInfoString, + const NvBool ignoreEdidChecksum); +static void LogEdid (NVDpyEvoPtr pDpyEvo, + NVEvoInfoStringPtr pInfoString); +static void ClearEdid (NVDpyEvoPtr pDpyEvo, const NvBool bSendHdmiCapsToRm); +static void ClearCustomEdid (const NVDpyEvoRec *pDpyEvo); +static void WriteEdidToResman (const NVDpyEvoRec *pDpyEvo, + const NVEdidRec *pEdid); +static void PatchAndParseEdid (const NVDpyEvoRec *pDpyEvo, + NVEdidPtr pEdid, + NVParsedEdidEvoPtr, + NVEvoInfoStringPtr pInfoString); +static void ReadAndApplyEdidEvo (NVDpyEvoPtr pDpyEvo, + struct NvKmsQueryDpyDynamicDataParams *pParams); +static NvBool GetFixedModeTimings (NVDpyEvoPtr pDpyEvo, struct NvKmsSuperframeInfo *pSuperframeInfo); +static NvBool ReadDSITimingsFromResman (const NVDpyEvoRec *pDpyEvo, + NVT_TIMING *pTimings, + NvU8 *pBpc); +static void AssignDpyEvoName (NVDpyEvoPtr pDpyEvo); + +static NvBool IsConnectorTMDS (NVConnectorEvoPtr); + + +static void DpyDisconnectEvo(NVDpyEvoPtr pDpyEvo, const NvBool bSendHdmiCapsToRm) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + + pDispEvo->connectedDisplays = + nvDpyIdListMinusDpyId(pDispEvo->connectedDisplays, pDpyEvo->id); + + ClearEdid(pDpyEvo, bSendHdmiCapsToRm); +} + +static NvBool DpyConnectEvo( + NVDpyEvoPtr pDpyEvo, + struct NvKmsQueryDpyDynamicDataParams *pParams) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + + pDispEvo->connectedDisplays = + nvAddDpyIdToDpyIdList(pDpyEvo->id, pDispEvo->connectedDisplays); + + DpyGetDynamicDfpProperties(pDpyEvo, pParams->request.disableACPIBrightnessHotkeys); + nvDPGetDpyGUID(pDpyEvo); + + if ((pDpyEvo->pConnectorEvo->signalFormat == NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI) || + nvConnectorIsDPSerializer(pDpyEvo->pConnectorEvo)) { + if (!GetFixedModeTimings(pDpyEvo, &pParams->reply.superframeInfo)) { + return FALSE; + } + } else { + ReadAndApplyEdidEvo(pDpyEvo, pParams); + } + + nvUpdateInfoFrames(pDpyEvo); + + return TRUE; +} + +/* + * DpyAssignColorSpaceCaps() - parse both the CEA-861 extension block and + * the EDID 1.4 block to determine YCbCr422/444 capability. + */ +static void DpyAssignColorSpaceCaps(NVDpyEvoPtr pDpyEvo, + NVEvoInfoStringPtr pInfoString) +{ + NvBool ycbr422_cap = FALSE; + NvBool ycbr444_cap = FALSE; + const NVParsedEdidEvoRec *pParsedEdid = &pDpyEvo->parsedEdid; + + /* check for edid YCbCr422/YCbCr444 capability */ + if (pParsedEdid->valid) { + NvBool haveCEA861Block = + (pParsedEdid->info.ext861.revision != NVT_CEA861_REV_NONE); + if (haveCEA861Block) { + ycbr422_cap = !!(pParsedEdid->info.ext861.basic_caps & + NVT_CEA861_CAP_YCbCr_422); + ycbr444_cap = !!(pParsedEdid->info.ext861.basic_caps & + NVT_CEA861_CAP_YCbCr_444); + } + /* check EDID 1.4 base block */ + if (pParsedEdid->info.version == 0x104 && + pParsedEdid->info.input.isDigital) { + NvBool edid14_ycbr422 = + pParsedEdid->info.u.feature_ver_1_4_digital.support_ycrcb_422; + NvBool edid14_ycbr444 = + pParsedEdid->info.u.feature_ver_1_4_digital.support_ycrcb_444; + if (haveCEA861Block && ycbr422_cap != edid14_ycbr422) { + nvEvoLogInfoString(pInfoString, + "%s EDID inconsistency: the EDID 1.4 base block %s " + "YCbCr 4:2:2 support, but the CEA-861 extension block " + "%s. Assuming YCbCr 4:2:2 is supported.", + pDpyEvo->name, + edid14_ycbr422 ? "indicates" : "does not indicate", + ycbr422_cap ? "does" : "does not"); + } + if (edid14_ycbr422) { + ycbr422_cap = TRUE; + } + if (haveCEA861Block && ycbr444_cap != edid14_ycbr444) { + nvEvoLogInfoString(pInfoString, + "%s EDID inconsistency: the EDID 1.4 base block %s " + "YCbCr 4:4:4 support, but the CEA-861 extension block " + "%s. Assuming YCbCr 4:4:4 is supported.", + pDpyEvo->name, + edid14_ycbr444 ? "indicates" : "does not indicate", + ycbr444_cap ? "does" : "does not"); + } + if (edid14_ycbr444) { + ycbr444_cap = TRUE; + } + } + } + pDpyEvo->colorSpaceCaps.ycbcr422Capable = ycbr422_cap; + pDpyEvo->colorSpaceCaps.ycbcr444Capable = ycbr444_cap; +} + + + +static NvBool GetEdidOverride( + const struct NvKmsQueryDpyDynamicDataRequest *pRequest, + NVEdidRec *pEdid) +{ + if ((pRequest == NULL) || + !pRequest->overrideEdid || + (pRequest->edid.bufferSize == 0) || + (pRequest->edid.bufferSize > sizeof(pRequest->edid.buffer))) { + return FALSE; + } + + pEdid->buffer = nvAlloc(pRequest->edid.bufferSize); + + if (pEdid->buffer == NULL) { + return FALSE; + } + + nvkms_memcpy(pEdid->buffer, pRequest->edid.buffer, pRequest->edid.bufferSize); + + pEdid->length = pRequest->edid.bufferSize; + + return TRUE; +} + +/*! + * Query resman for the EDID for the pDpyEvo, then parse the EDID into usable + * data. Do not modify the pDpyEvoRec. + */ + +NvBool nvDpyReadAndParseEdidEvo( + const NVDpyEvoRec *pDpyEvo, + const struct NvKmsQueryDpyDynamicDataRequest *pRequest, + NvKmsEdidReadMode readMode, + NVEdidRec *pEdid, + NVParsedEdidEvoPtr *ppParsedEdid, + NVEvoInfoStringPtr pInfoString) +{ + NvBool ignoreEdid = FALSE; + NvBool ignoreEdidChecksum = FALSE; + + if (pRequest != NULL) { + ignoreEdid = pRequest->ignoreEdid; + ignoreEdidChecksum = pRequest->ignoreEdidChecksum; + } + + nvkms_memset(pEdid, 0, sizeof(*pEdid)); + + /* Just return an empty EDID if requested. */ + if (ignoreEdid) { + return TRUE; + } + + /* Load any custom EDID, (or see if DP lib has EDID) */ + ClearCustomEdid(pDpyEvo); + + if ((pRequest && GetEdidOverride(pRequest, pEdid)) || + ReadEdidFromDP(pDpyEvo, pEdid)) { + /* XXX [VSM] Write, clear and re-read the EDID to/from RM here to make + * sure RM and X agree on the final EDID bits. Once RM no longer + * parses the EDID, we can avoid doing this for DP devices. + * + * If it's a DisplayPort 1.2 multistream device then don't bother trying + * to ping-pong the EDID through RM. + */ + if (nvDpyEvoIsDPMST(pDpyEvo)) { + goto validateEdid; + } + + WriteEdidToResman(pDpyEvo, pEdid); + + nvFree(pEdid->buffer); + pEdid->buffer = NULL; + pEdid->length = 0; + } + + if (!ReadEdidFromResman(pDpyEvo, pEdid, readMode)) { + goto fail; + } + +validateEdid: + /* Validate the EDID */ + if (!ValidateEdid(pDpyEvo, pEdid, pInfoString, ignoreEdidChecksum)) { + goto fail; + } + + *ppParsedEdid = nvCalloc(1, sizeof(**ppParsedEdid)); + if (*ppParsedEdid == NULL) { + goto fail; + } + /* Parse the EDID. Note this may *change* the EDID bytes. */ + PatchAndParseEdid(pDpyEvo, pEdid, *ppParsedEdid, pInfoString); + + return TRUE; + +fail: + + /* We failed to read a valid EDID. Free any EDID buffer allocated above. */ + nvFree(pEdid->buffer); + pEdid->buffer = NULL; + pEdid->length = 0; + + return FALSE; +} + +static void AssignIsVrHmd(NVDpyEvoRec *pDpyEvo) +{ + NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS params = { }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + pDpyEvo->isVrHmd = FALSE; + + if (!pDpyEvo->parsedEdid.valid) { + return; + } + + params.manufacturerID = pDpyEvo->parsedEdid.info.manuf_id; + params.productID = pDpyEvo->parsedEdid.info.product_id; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_IS_DIRECTMODE_DISPLAY, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to query VR headset for %s", pDpyEvo->name); + return; + } + + /* + * bIsDirectmode indicates any monitor that by default shouldn't be part of + * a desktop (VR headset, touch panel, etc). But, close enough for our + * usage of isVrHmd. + */ + pDpyEvo->isVrHmd = params.bIsDirectmode; +} + +static NvBool EdidHasChanged( + const NVDpyEvoRec *pDpyEvo, + const NVEdidRec *pEdid, + const NVParsedEdidEvoRec *pParsedEdid) +{ + /* Compare EDID bytes */ + if (pEdid->length != pDpyEvo->edid.length || + nvkms_memcmp(pEdid->buffer, pDpyEvo->edid.buffer, pEdid->length) != 0) { + return TRUE; + } + + /* Compare parsed data */ + if (pParsedEdid != NULL) { + if (nvkms_memcmp(pParsedEdid, &pDpyEvo->parsedEdid, + sizeof(*pParsedEdid)) != 0) { + return TRUE; + } + } else if (pDpyEvo->parsedEdid.valid) { + return TRUE; + } + + return FALSE; +} + +static void ApplyNewEdid( + NVDpyEvoPtr pDpyEvo, + const NVEdidRec *pEdid, + const NVParsedEdidEvoRec *pParsedEdid, + const NvBool bSendHdmiCapsToRm, + NVEvoInfoStringPtr pInfoString) +{ + if (pDpyEvo->edid.buffer != NULL) { + nvFree(pDpyEvo->edid.buffer); + } + pDpyEvo->edid.buffer = pEdid->buffer; + pDpyEvo->edid.length = pEdid->length; + + if (pParsedEdid != NULL) { + nvkms_memcpy(&pDpyEvo->parsedEdid, pParsedEdid, + sizeof(pDpyEvo->parsedEdid)); + } else { + nvkms_memset(&pDpyEvo->parsedEdid, 0, sizeof(pDpyEvo->parsedEdid)); + } + + /* + * Regenerate the dpy's name, because the parsed EDID monitorName + * may have changed. + */ + AssignDpyEvoName(pDpyEvo); + + /* Write information about the parsed EDID to the infoString. */ + LogEdid(pDpyEvo, pInfoString); + + if (pDpyEvo->parsedEdid.valid) { + /* + * check 3D Vision capability + */ + nvDpyCheck3DVisionCapsEvo(pDpyEvo); + + /* + * Check HDMI VRR capability + */ + nvDpyUpdateHdmiVRRCaps(pDpyEvo); + } + + if (pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) { + DpyAssignColorSpaceCaps(pDpyEvo, pInfoString); + } + + if (bSendHdmiCapsToRm) { + nvSendHdmiCapsToRm(pDpyEvo); + } + + nvDpyProbeMaxPixelClock(pDpyEvo); + + AssignIsVrHmd(pDpyEvo); +} + +/* + * ReadDSITimingsFromResman() - Obtains modetimings for a DSI connector, + * passing it into pTimings + */ +static NvBool ReadDSITimingsFromResman( + const NVDpyEvoRec *pDpyEvo, + NVT_TIMING *pTimings, + NvU8 *pBpc) +{ + NvU32 ret; + NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS dsiModeTimingParams = { 0 }; + + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + dsiModeTimingParams.subDeviceInstance = pDispEvo->displayOwner; + + /* + * Currently displayId must be hardcoded to 0 to receive timings from RM. + * Once the corresponding DCB support is added for DSI, this hack will be + * removed and NVKMS will use the actual displayId instead. + */ + dsiModeTimingParams.displayId = 0; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING, + &dsiModeTimingParams, sizeof(dsiModeTimingParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Unable to read DSI mode timings for display device %s", + pDpyEvo->name); + return FALSE; + } + + // Converts refresh (Hz) into appropriate units for rr1k (units of 0.001Hz) + pTimings->etc.rrx1k = dsiModeTimingParams.refresh * 1000; + pTimings->HVisible = dsiModeTimingParams.hActive; + pTimings->HFrontPorch = dsiModeTimingParams.hFrontPorch; + pTimings->HSyncWidth = dsiModeTimingParams.hSyncWidth; + pTimings->HTotal = dsiModeTimingParams.hActive + + dsiModeTimingParams.hFrontPorch + + dsiModeTimingParams.hSyncWidth + + dsiModeTimingParams.hBackPorch; + + pTimings->VVisible = dsiModeTimingParams.vActive; + pTimings->VFrontPorch = dsiModeTimingParams.vFrontPorch; + pTimings->VSyncWidth = dsiModeTimingParams.vSyncWidth; + pTimings->VTotal = dsiModeTimingParams.vActive + + dsiModeTimingParams.vFrontPorch + + dsiModeTimingParams.vSyncWidth + + dsiModeTimingParams.vBackPorch; + + pTimings->pclk = HzToKHz(dsiModeTimingParams.pclkHz) / 10; + + // DSI only supports RGB444 + *pBpc = dsiModeTimingParams.bpp / 3; + + return TRUE; +} + +static NvBool ParseSuperframeInfo( + NVDpyEvoRec *pDpyEvo, + const NV0073_CTRL_DFP_GET_FIXED_MODE_TIMING_PARAMS *pParams, + struct NvKmsSuperframeInfo *pSuperframeInfo) +{ + NvU8 i; + + if (pParams->superframeInfo.numViews == 0) { + return TRUE; + } + + // Currently, we support only dual view superframe. + if (pParams->superframeInfo.numViews != 2) { + nvEvoLog(EVO_LOG_ERROR, "Invalid number of superframe views"); + return FALSE; + } + + // Currently, we support only packed symmetrical side-by-side superframe. + if ((pParams->superframeInfo.view[0].width * pParams->superframeInfo.numViews) != + pParams->hActive) { + nvEvoLog(EVO_LOG_ERROR, "The width of Superframe view[0] is invalid"); + return FALSE; + } + + if (pParams->superframeInfo.view[0].height != pParams->vActive) { + nvEvoLog(EVO_LOG_ERROR, "The height of Superframe view[0] is invalid"); + return FALSE; + } + + pSuperframeInfo->numViews = 0; + + for (i = 0; i < pParams->superframeInfo.numViews; i++) { + // All superframe views must not have horizontal spacing in between them. + if ((pParams->superframeInfo.view[0].width * i) != + pParams->superframeInfo.view[i].x) { + nvEvoLog(EVO_LOG_ERROR, "The x offset of Superframe view[%u] is invalid", i); + goto fail; + } + + // All superframe views must have y offset as 0. + if (pParams->superframeInfo.view[i].y != 0) { + nvEvoLog(EVO_LOG_ERROR, "The y offset of Superframe view[%u] is invalid", i); + goto fail; + } + + // All superframe views must have the same width. + if (pParams->superframeInfo.view[0].width != + pParams->superframeInfo.view[i].width) { + nvEvoLog(EVO_LOG_ERROR, "The width of Superframe view[%u] is invalid", i); + goto fail; + } + + // All superframe views must have the same height. + if (pParams->superframeInfo.view[0].height != + pParams->superframeInfo.view[i].height) { + nvEvoLog(EVO_LOG_ERROR, "The height of Superframe view[%u] is invalid", i); + goto fail; + } + + pSuperframeInfo->view[i].x = pParams->superframeInfo.view[i].x; + pSuperframeInfo->view[i].width = pParams->superframeInfo.view[i].width; + pSuperframeInfo->view[i].y = pParams->superframeInfo.view[i].y; + pSuperframeInfo->view[i].height = pParams->superframeInfo.view[i].height; + pSuperframeInfo->numViews++; + } + + return TRUE; + +fail: + nvkms_memset(pSuperframeInfo, 0, sizeof(*pSuperframeInfo)); + return FALSE; +} + +static NvBool ReadDPSerializerTimings( + NVDpyEvoRec *pDpyEvo, + NVT_TIMING *pTimings, + NvU8 *pBpc, + struct NvKmsSuperframeInfo *pSuperframeInfo) +{ + NV0073_CTRL_DFP_GET_FIXED_MODE_TIMING_PARAMS timingParams = { }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + timingParams.subDeviceInstance = pDispEvo->displayOwner; + timingParams.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId); + timingParams.stream = pDpyEvo->dp.serializerStreamIndex; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_GET_FIXED_MODE_TIMING, + &timingParams, sizeof(timingParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Unable to read fixed mode timings for display device %s", + pDpyEvo->name); + return FALSE; + } + + if (!timingParams.valid) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Fixed mode timings are invalid for display device %s", + pDpyEvo->name); + return FALSE; + } + + if (!ParseSuperframeInfo(pDpyEvo, &timingParams, pSuperframeInfo)) { + return FALSE; + } + + nvkms_memset(pTimings, 0, sizeof(NVT_TIMING)); + + pTimings->HVisible = timingParams.hActive; + pTimings->HFrontPorch = timingParams.hFrontPorch; + pTimings->HSyncWidth = timingParams.hSyncWidth; + pTimings->HTotal = timingParams.hActive + timingParams.hFrontPorch + + timingParams.hSyncWidth + timingParams.hBackPorch; + + pTimings->VVisible = timingParams.vActive; + pTimings->VFrontPorch = timingParams.vFrontPorch; + pTimings->VSyncWidth = timingParams.vSyncWidth; + pTimings->VTotal = timingParams.vActive + timingParams.vFrontPorch + + timingParams.vSyncWidth + timingParams.vBackPorch; + + pTimings->pclk = timingParams.pclkKHz / 10; + pTimings->etc.rrx1k = timingParams.rrx1k; + + *pBpc = 0; + + return TRUE; +} + +static NvBool GetFixedModeTimings( + NVDpyEvoPtr pDpyEvo, + struct NvKmsSuperframeInfo *pSuperframeInfo) +{ + NVT_TIMING timings = { }; + NvBool ret = FALSE; + NvU8 bpc; + + if (pDpyEvo->pConnectorEvo->signalFormat == NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI) { + ret = ReadDSITimingsFromResman(pDpyEvo, &timings, &bpc); + } else if (nvConnectorIsDPSerializer(pDpyEvo->pConnectorEvo)) { + ret = ReadDPSerializerTimings(pDpyEvo, &timings, &bpc, + pSuperframeInfo); + } + + if (!ret) { + return ret; + } + + CreateParsedEdidFromNVT_TIMING(&timings, bpc, &pDpyEvo->parsedEdid); + + AssignDpyEvoName(pDpyEvo); + nvDpyProbeMaxPixelClock(pDpyEvo); + + return TRUE; +} + +static void ReadAndApplyEdidEvo( + NVDpyEvoPtr pDpyEvo, + struct NvKmsQueryDpyDynamicDataParams *pParams) +{ + const struct NvKmsQueryDpyDynamicDataRequest *pRequest = NULL; + NVEdidRec edid = {NULL, 0}; + NVParsedEdidEvoPtr pParsedEdid = NULL; + NVEvoInfoStringRec infoString; + NvBool readSuccess; + + if (pParams != NULL) { + nvInitInfoString(&infoString, pParams->reply.edid.infoString, + sizeof(pParams->reply.edid.infoString)); + pRequest = &pParams->request; + } else { + nvInitInfoString(&infoString, NULL, 0); + } + + readSuccess = nvDpyReadAndParseEdidEvo(pDpyEvo, pRequest, + NVKMS_EDID_READ_MODE_DEFAULT, + &edid, &pParsedEdid, &infoString); + + if (pParams != NULL) { + pParams->reply.edid.valid = readSuccess; + } + + if (EdidHasChanged(pDpyEvo, &edid, pParsedEdid)) { + /* + * Do not plumb pRequest into ApplyNewEdid(). This helps ensure that + * its operation is purely a function of the EDID and parsed EDID data, + * which means that if we get into this function again with the same + * EDID and parsed EDID data, we can safely skip ApplyNewEdid() without + * worrying that this request has different parameters (like CustomEdid + * or mode validation overrides). + */ + ApplyNewEdid(pDpyEvo, &edid, pParsedEdid, TRUE /* bSendHdmiCapsToRm */, + &infoString); + } else { + nvFree(edid.buffer); + } + nvFree(pParsedEdid); +} + +typedef enum { + NV_EVO_PASSIVE_DP_DONGLE_UNUSED, + NV_EVO_PASSIVE_DP_DONGLE_DP2DVI, + NV_EVO_PASSIVE_DP_DONGLE_DP2HDMI_TYPE_1, + NV_EVO_PASSIVE_DP_DONGLE_DP2HDMI_TYPE_2, +} NVEvoPassiveDpDongleType; + +/*! + * Query RM for the passive Displayport dongle type; this can influence + * the maximum pixel clock allowed on that display. + */ +static NVEvoPassiveDpDongleType +DpyGetPassiveDpDongleType(const NVDpyEvoRec *pDpyEvo, + NvU32 *passiveDpDongleMaxPclkKHz) +{ + NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS params = { 0 }; + NvU32 ret; + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + NVEvoPassiveDpDongleType passiveDpDongleType = + NV_EVO_PASSIVE_DP_DONGLE_UNUSED; + + // The rmcontrol below fails if we try querying the dongle info on + // non-TMDS connectors. + if (!IsConnectorTMDS(pConnectorEvo)) { + return passiveDpDongleType; + } + + params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + params.subDeviceInstance = pDispEvo->displayOwner; + params.flags = 0; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_GET_DISPLAYPORT_DONGLE_INFO, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failure reading DP dongle info " + "for display device %s.", pDpyEvo->name); + return passiveDpDongleType; + } + + if (FLD_TEST_DRF(0073_CTRL_DFP, + _GET_DISPLAYPORT_DONGLE_INFO_FLAGS, + _ATTACHED, _TRUE, params.flags)) + { + if (FLD_TEST_DRF(0073_CTRL_DFP, + _GET_DISPLAYPORT_DONGLE_INFO_FLAGS, _TYPE, _DP2DVI, + params.flags)) { + + passiveDpDongleType = NV_EVO_PASSIVE_DP_DONGLE_DP2DVI; + + if (passiveDpDongleMaxPclkKHz) { + *passiveDpDongleMaxPclkKHz = TMDS_SINGLE_LINK_PCLK_MAX; + } + } else if (FLD_TEST_DRF(0073_CTRL_DFP, + _GET_DISPLAYPORT_DONGLE_INFO_FLAGS, _TYPE, _DP2HDMI, + params.flags)) { + if (FLD_TEST_DRF(0073_CTRL_DFP, + _GET_DISPLAYPORT_DONGLE_INFO_FLAGS_DP2TMDS_DONGLE, _TYPE, _1, + params.flags)) { + + passiveDpDongleType = NV_EVO_PASSIVE_DP_DONGLE_DP2HDMI_TYPE_1; + + if (passiveDpDongleMaxPclkKHz) { + *passiveDpDongleMaxPclkKHz = params.maxTmdsClkRateHz / 1000; + } + } else if (FLD_TEST_DRF(0073_CTRL_DFP, + _GET_DISPLAYPORT_DONGLE_INFO_FLAGS_DP2TMDS_DONGLE, _TYPE, _2, + params.flags)) { + + passiveDpDongleType = NV_EVO_PASSIVE_DP_DONGLE_DP2HDMI_TYPE_2; + + if (passiveDpDongleMaxPclkKHz) { + *passiveDpDongleMaxPclkKHz = params.maxTmdsClkRateHz / 1000; + } + } + // For other dongle types: LFH_DVI (DMS59-DVI) and LFH_VGA (DMS59-VGA) breakout dongles, + // We consider them as native connection, hence we don't track passiveDpDongleType here + } + } + + return passiveDpDongleType; +} + +/*! + * Get the maximum allowed pixel clock for pDpyEvo. + * + * This depends on the following conditions: + * + * - The RM's returned value is sufficient for non-TMDS connectors + * - For HDMI, the SOR capabilities exceed the RM's returned value to allow + * for HDMI 1.4 modes that exceed 165MHz on a single link, or + * for HDMI 2.1 modes if the source and sink is capable of FRL + * - For DVI, the user is allowed to set an option to exceed the 165MHz + * per-TMDS limit if the SOR capabilities allow it + * - Contrary to the above, passive DP->DVI and DP->HDMI dongles have their + * own limits + */ +void nvDpyProbeMaxPixelClock(NVDpyEvoPtr pDpyEvo) +{ + NvU32 ret; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + NvU32 displayOwner = pDispEvo->displayOwner; + NVEvoPassiveDpDongleType passiveDpDongleType; + NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS params = { 0 }; + NvU32 passiveDpDongleMaxPclkKHz; + + /* First, get the RM-reported value. */ + + params.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId); + params.subDeviceInstance = pDispEvo->displayOwner; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_PCLK_LIMIT, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failure reading maximum pixel clock value " + "for display device %s.", pDpyEvo->name); + pDpyEvo->maxPixelClockKHz = 100000; + pDpyEvo->maxSingleLinkPixelClockKHz = pDpyEvo->maxPixelClockKHz; + return; + } + + pDpyEvo->maxPixelClockKHz = params.orPclkLimit; + pDpyEvo->maxSingleLinkPixelClockKHz = pDpyEvo->maxPixelClockKHz; + + /* + * The RM's returned max pclk value is sufficient for non-TMDS + * connectors + */ + if (!IsConnectorTMDS(pConnectorEvo)) { + return; + } + + /* + * The RM returns a 165MHz max pclk for single link TMDS and 330MHz + * max pclk for dual link TMDS. We can exceed that in the + * following cases: + * + * - HDMI 1.4a 4Kx2K and 1080p60hz frame packed stereo modes + * require a 297MHz single TMDS link pixel clock, and HDMI 2.0 + * allows an even higher pixel clock. + * - While the DVI spec mandates a limit of 165MHz per TMDS link, + * since certain GPUs and certain displays support DVI + * connections at higher pixel clocks, we allow users to + * override this limit to allow validation of higher maximum + * pixel clocks over DVI. + */ + if (pDevEvo->gpus != NULL) { + + NVEvoSorCaps *sorCaps = pDevEvo->gpus[displayOwner].capabilities.sor; + NvU32 orIndex = pConnectorEvo->or.primary; + + if (NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)) { + /* + * With the SOR crossbar, pConnectorEvo->or.mask is unknown, + * and may change at modeset time. Use the caps of SOR 0 + * for validation. + */ + orIndex = 0; + } + + if (nvDpyIsHdmiEvo(pDpyEvo)) { + pDpyEvo->maxPixelClockKHz = + pDpyEvo->maxSingleLinkPixelClockKHz = + sorCaps[orIndex].maxTMDSClkKHz; + + nvkms_memset(&pDpyEvo->hdmi.srcCaps, 0, sizeof(pDpyEvo->hdmi.srcCaps)); + nvkms_memset(&pDpyEvo->hdmi.sinkCaps, 0, sizeof(pDpyEvo->hdmi.sinkCaps)); + + if (nvHdmiDpySupportsFrl(pDpyEvo)) { + /* + * An SOR needs to be assigned temporarily to do FRL training. + * + * Since the only other SORs in use at the moment (if any) are + * those driving heads, we don't need to exclude RM from + * selecting any SOR, so an sorExcludeMask of 0 is appropriate. + */ + if (nvAssignSOREvo(pConnectorEvo, + nvDpyIdToNvU32(pConnectorEvo->displayId), + FALSE /* b2Heads1Or */, + 0 /* sorExcludeMask */) && + nvHdmiFrlAssessLink(pDpyEvo)) { + /* + * Note that although we "assessed" the link above, the + * maximum pixel clock set here doesn't take that into + * account -- it's the maximum the GPU hardware is capable + * of on the most capable link, mostly for reporting + * purposes. + * + * The calculation for if a given mode can fit in the + * assessed FRL configuration is complex and depends on + * things like the amount of blanking, rather than a simple + * pclk cutoff. So, we query the hdmi library when + * validating each individual mode, when we know actual + * timings. + */ + + /* + * This comes from the Windows display driver: (4 lanes * + * 12Gb per lane * FRL encoding i.e 16/18) / 1K + */ + pDpyEvo->maxPixelClockKHz = + ((4 * 12 * 1000 * 1000 * 16) / 18); + } + } + } else { + /* + * Connector and SOR both must be capable to drive dual-TMDS + * resolutions. + */ + NvBool bDualTMDS = sorCaps[orIndex].dualTMDS && + FLD_TEST_DRF(0073, _CTRL_DFP_FLAGS, _LINK, _DUAL, + pDpyEvo->pConnectorEvo->dfpInfo); + + pDpyEvo->maxPixelClockKHz = (bDualTMDS ? + TMDS_DUAL_LINK_PCLK_MAX : + TMDS_SINGLE_LINK_PCLK_MAX); + + pDpyEvo->maxSingleLinkPixelClockKHz = TMDS_SINGLE_LINK_PCLK_MAX; + + if (pDpyEvo->allowDVISpecPClkOverride) { + pDpyEvo->maxPixelClockKHz = sorCaps[orIndex].maxTMDSClkKHz * + (bDualTMDS ? 2 : 1); + pDpyEvo->maxSingleLinkPixelClockKHz = + sorCaps[orIndex].maxTMDSClkKHz; + } + } + } + + /* + * Passive DP->DVI and DP->HDMI dongles may have a limit more + * restrictive than the one described above. Check whether one of + * these dongles is in use, and override the limit accordingly. + */ + passiveDpDongleType = + DpyGetPassiveDpDongleType(pDpyEvo, &passiveDpDongleMaxPclkKHz); + + if (passiveDpDongleType != NV_EVO_PASSIVE_DP_DONGLE_UNUSED) { + pDpyEvo->maxPixelClockKHz = NV_MIN(passiveDpDongleMaxPclkKHz, + pDpyEvo->maxPixelClockKHz); + pDpyEvo->maxSingleLinkPixelClockKHz = pDpyEvo->maxPixelClockKHz; + } +} + +static void DpyGetDynamicDfpProperties( + NVDpyEvoPtr pDpyEvo, + const NvBool disableACPIBrightnessHotkeys) +{ + if (disableACPIBrightnessHotkeys) { + return; + } + if (!disableACPIBrightnessHotkeys) { + struct NvKmsGetDpyAttributeParams params; + nvkms_memset(¶ms, 0, sizeof(params)); + params.request.attribute = NV_KMS_DPY_ATTRIBUTE_BACKLIGHT_BRIGHTNESS; + + pDpyEvo->hasBacklightBrightness = + nvGetDpyAttributeEvo(pDpyEvo, ¶ms); + } +} +/* + * DpyGetDfpProperties() - get DFP properties: reduced blanking flags + * and general DFP flags + */ + +static void DpyGetStaticDfpProperties(NVDpyEvoPtr pDpyEvo) +{ + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + + if (pConnectorEvo->legacyType != NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) { + return; + } + + if (nvDpyEvoIsDPMST(pDpyEvo)) { + // None of this stuff can be queried directly for dynamic DP MST + // displays. + // XXX DP MST: Should we fill in these fields somehow anyway? + return; + } + + pDpyEvo->internal = FALSE; + pDpyEvo->hdmiCapable = FALSE; + + if (pConnectorEvo->dfpInfo == 0x0) { + return; + } + /* Check if the connected DFP is HDMI capable */ + + if (FLD_TEST_DRF(0073, _CTRL_DFP_FLAGS, _HDMI_CAPABLE, _TRUE, + pConnectorEvo->dfpInfo)) { + pDpyEvo->hdmiCapable = TRUE; + } + + pDpyEvo->internal = nvConnectorIsInternal(pDpyEvo->pConnectorEvo); +} + +/*! + * Return true if the connector is single or dual link TMDS (not CRT, not DP). + */ +static NvBool IsConnectorTMDS(NVConnectorEvoPtr pConnectorEvo) +{ + NvU32 protocol = pConnectorEvo->or.protocol; + return ((pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) && + ((protocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A) || + (protocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B) || + (protocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS))); +} + +/*! + * Validate an NVKMS client-specified NvKmsModeValidationFrequencyRanges. + */ +static NvBool ValidateFrequencyRanges( + const struct NvKmsModeValidationFrequencyRanges *pRanges) +{ + NvU32 i; + + if (pRanges->numRanges >= ARRAY_LEN(pRanges->range)) { + return FALSE; + } + + for (i = 0; i < pRanges->numRanges; i++) { + if (pRanges->range[i].high < pRanges->range[i].low) { + return FALSE; + } + if (pRanges->range[i].high == 0) { + return FALSE; + } + } + + return TRUE; +} + + +static void DpySetValidSyncsHelper( + struct NvKmsModeValidationFrequencyRanges *pRanges, + const NVParsedEdidEvoRec *pParsedEdid, + NvBool isHorizSync, NvBool ignoreEdidSource) +{ + NvBool found = FALSE; + NvU32 edidMin = 0, edidMax = 0; + + if (pParsedEdid->valid) { + if (isHorizSync) { + edidMin = pParsedEdid->limits.min_h_rate_hz; + edidMax = pParsedEdid->limits.max_h_rate_hz; + } else { + edidMin = pParsedEdid->limits.min_v_rate_hzx1k; + edidMax = pParsedEdid->limits.max_v_rate_hzx1k; + } + } + + /* If the client-specified ranges are invalid, clear them. */ + + if ((pRanges->source == + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CLIENT_BEFORE_EDID) || + (pRanges->source == + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CLIENT_AFTER_EDID)) { + + if (!ValidateFrequencyRanges(pRanges)) { + nvkms_memset(pRanges, 0, sizeof(*pRanges)); + pRanges->source = NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_NONE; + } + } + + /* Use CLIENT_BEFORE_EDID, if provided. */ + + if (pRanges->source == + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CLIENT_BEFORE_EDID) { + found = TRUE; + } + + /* + * Otherwise, if EDID-reported sync ranges are available, use + * those. + */ + if (!found && + !ignoreEdidSource && + (edidMin != 0) && (edidMax != 0)) { + + pRanges->numRanges = 1; + pRanges->range[0].low = edidMin; + pRanges->range[0].high = edidMax; + pRanges->source = NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_EDID; + found = TRUE; + } + + /* + * Otherwise, use CLIENT_AFTER_EDID, if available. + */ + if (!found && + (pRanges->source == + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CLIENT_AFTER_EDID)) { + found = TRUE; + } + + /* + * Finally, fall back to conservative defaults if we could not + * find anything else; this will validate 1024x768 @ 60Hz. + */ + if (!found) { + + pRanges->numRanges = 1; + + if (isHorizSync) { + pRanges->range[0].low = 28000; + pRanges->range[0].high = 55000; + } else { + pRanges->range[0].low = 43000; + pRanges->range[0].high = 72000; + } + + pRanges->source = + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CONSERVATIVE_DEFAULTS; + } +} + + +/*! + * Assign NvKmsModeValidationValidSyncs + * + * Assign the HorizSync and VertRefresh ranges in + * NvKmsModeValidationValidSyncs. The priority order is: + * + * (1) Any HorizSync and VertRefresh provided by the client that + * overrides the EDID (CLIENT_BEFORE_EDID). + * (2) Valid range information from the EDID. + * (3) Any HorizSync and VertRefresh specified by the client as a + * fallback for the EDID (CLIENT_AFTER_EDID). + * (4) Conservative builtin defaults. + * + * HorizSync and VertRefresh can come from different sources. (1) and + * (3) are provided through pValidSyncs. (2) and (4) get written to + * pValidSyncs. + * + * \param[in] pDpy The dpy whose EDID will be used. + * \param[in,out] pValidSyncs This is initialized by the client, and + * will be updated based on the frequency + * range priority described above. + */ +void nvDpySetValidSyncsEvo(const NVDpyEvoRec *pDpyEvo, + struct NvKmsModeValidationValidSyncs *pValidSyncs) +{ + const NVParsedEdidEvoRec *pParsedEdid = &pDpyEvo->parsedEdid; + + DpySetValidSyncsHelper(&pValidSyncs->horizSyncHz, + pParsedEdid, + TRUE, /* isHorizSync */ + pValidSyncs->ignoreEdidSource); + + DpySetValidSyncsHelper(&pValidSyncs->vertRefreshHz1k, + pParsedEdid, + FALSE, /* isHorizSync */ + pValidSyncs->ignoreEdidSource); +} + + +/* + * ReadEdidFromDP() - query the EDID for the specified display device from the + * DP lib. + */ + +static NvBool ReadEdidFromDP(const NVDpyEvoRec *pDpyEvo, NVEdidPtr pEdid) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NvU8 *pNewEdid = NULL; + int newEdidLength; + + if (!nvDpyUsesDPLib(pDpyEvo)) { + return FALSE; + } + + /* get size and allocate space for the EDID data */ + newEdidLength = nvDPGetEDIDSize(pDpyEvo); + if (newEdidLength == 0) { + goto fail; + } + + pNewEdid = nvCalloc(newEdidLength, 1); + + if (pNewEdid == NULL) { + goto fail; + } + + if (!nvDPGetEDID(pDpyEvo, pNewEdid, newEdidLength)) { + goto fail; + } + + pEdid->buffer = pNewEdid; + pEdid->length = newEdidLength; + + return TRUE; + + fail: + + nvFree(pNewEdid); + + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Unable to read EDID for display device %s", + pDpyEvo->name); + return FALSE; + +} // ReadEdidFromDP() + + + +/* + * ReadEdidFromResman() - query the EDID for the specified display device + */ + +static NvBool ReadEdidFromResman(const NVDpyEvoRec *pDpyEvo, NVEdidPtr pEdid, + NvKmsEdidReadMode readMode) +{ + NvU32 ret; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *getEdidParams; + int retryEdidReadCount = 0; + NvBool success = FALSE; + + if (nvDpyEvoIsDPMST(pDpyEvo)) { + // RM doesn't track this device, so leave the EDID alone. + return TRUE; + } + + getEdidParams = nvCalloc(sizeof(*getEdidParams), 1); + if (getEdidParams == NULL) { + goto done; + } + + query_edid: + + getEdidParams->subDeviceInstance = pDispEvo->displayOwner; + getEdidParams->displayId = nvDpyEvoGetConnectorId(pDpyEvo); + getEdidParams->flags = NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_COPY_CACHE_NO; + + if (readMode == NVKMS_EDID_READ_MODE_ACPI) { + getEdidParams->flags |= DRF_DEF(0073_CTRL_SPECIFIC, _GET_EDID_FLAGS, + _DISPMUX_READ_MODE, _ACPI); + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2, + getEdidParams, sizeof(*getEdidParams)); + + if ((ret != NVOS_STATUS_SUCCESS) || (getEdidParams->bufferSize <= 0)) { + /* WAR for Bug 777646: retry reading the EDID on error for DP + * devices to avoid possible TDR assertion in the RM. + * + * XXX This should be moved to the DP library. + */ + if (nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo) && + (retryEdidReadCount < NV_DP_READ_EDID_RETRIES)) { + retryEdidReadCount++; + + nvkms_usleep(NV_DP_REREAD_EDID_DELAY_USEC); + + goto query_edid; + } + goto done; + } + + pEdid->buffer = nvCalloc(getEdidParams->bufferSize, 1); + + if (pEdid->buffer == NULL) { + goto done; + } + + nvkms_memcpy(pEdid->buffer, &getEdidParams->edidBuffer, + getEdidParams->bufferSize); + pEdid->length = getEdidParams->bufferSize; + + success = TRUE; + + done: + + nvFree(getEdidParams); + + if (!success) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Unable to read EDID for display device %s", + pDpyEvo->name); + } + + return success; +} // ReadEdidFromResman() + + +/* + * Check if the EDID meets basic validation criteria. + */ +static NvBool ValidateEdid(const NVDpyEvoRec *pDpyEvo, NVEdidPtr pEdid, + NVEvoInfoStringPtr pInfoString, + const NvBool ignoreEdidChecksum) +{ + NvU32 status, tmpStatus; + + status = NvTiming_EDIDValidationMask(pEdid->buffer, pEdid->length, TRUE); + tmpStatus = status; + + if (status == 0) { + return TRUE; + } + + nvEvoLogInfoString(pInfoString, + "The EDID read for display device %s is invalid:", + pDpyEvo->name); + + /* + * Warn about every error we know about, masking it out of tmpStatus, then + * warn about an unknown error if there are still any bits remaining in + * tmpStatus. + */ + if (status & + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_VERSION)) { + nvEvoLogInfoString(pInfoString, + "- The EDID has an unrecognized version."); + tmpStatus &= ~NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_VERSION); + } + + if (status & + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_SIZE)) { + nvEvoLogInfoString(pInfoString, + "- The EDID is too short."); + tmpStatus &= ~NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_SIZE); + } + + if (status & + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_CHECKSUM)) { + /* + * XXX NVKMS TODO: massage wording to not reference X + * configuration option. + */ + nvEvoLogInfoString(pInfoString, + "- The EDID has a bad checksum. %s", + ignoreEdidChecksum ? "This error will be ignored. Note " + "that an EDID with a bad checksum could indicate a " + "corrupt EDID. A corrupt EDID may have mode timings " + "beyond the capabilities of your display, and could " + "damage your hardware. Please use with care." : + "The \"IgnoreEDIDChecksum\" X configuration option may " + "be used to attempt using mode timings in this EDID in " + "spite of this error. A corrupt EDID may have mode " + "timings beyond the capabilities of your display, and " + "could damage your hardware. Please use with care."); + tmpStatus &= ~NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_CHECKSUM); + } + + if (status & + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_RANGE_LIMIT)) { + nvEvoLogInfoString(pInfoString, + "- The EDID has a bad range limit."); + tmpStatus &= ~NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_RANGE_LIMIT); + } + + if (status & + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_DTD)) { + nvEvoLogInfoString(pInfoString, + "- The EDID has a bad detailed timing descriptor."); + tmpStatus &= ~NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_DTD); + } + + if (status & + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DTD)) { + nvEvoLogInfoString(pInfoString, + "- The EDID has an extension block with a bad detailed " + "timing descriptor."); + tmpStatus &= ~NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DTD); + } + + if (status & + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT)) { + nvEvoLogInfoString(pInfoString, + "- The EDID extension block is invalid."); + tmpStatus &= ~NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT); + } + + if (tmpStatus) { + nvEvoLogInfoString(pInfoString, + "- The EDID has an unrecognized error."); + } + + /* + * Unset the bits for errors we don't care about (invalid DTDs in the + * extension block, or checksum errors if ignoreEdidChecksum is in use) + * then return true if there are any remaining errors we do care about. + */ + if (ignoreEdidChecksum) { + status &= ~(NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_CHECKSUM)); + } + + if (status == + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DTD)) { + /* + * If the only problem with the EDID is invalid DTDs in the extension + * block, don't reject the EDID; those timings can be safely skipped in + * NvTiming_ParseEDIDInfo()/parse861ExtDetailedTiming() + */ + nvEvoLogInfoString(pInfoString, + "This bad detailed timing descriptor will be ignored."); + } + + status &= ~(NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DTD)); + + return (status == 0); +} + +static const char *GetColorDepthBpc(NVT_COLORDEPTH colorDepth) +{ + static char buffer[32]; + NVEvoInfoStringRec infoString; + NvBool first = TRUE; + int i; + + struct { + NvBool val; + int bpc; + } table[] = { + { colorDepth.bpc.bpc6, 6 }, + { colorDepth.bpc.bpc8, 8 }, + { colorDepth.bpc.bpc10, 10 }, + { colorDepth.bpc.bpc12, 12 }, + { colorDepth.bpc.bpc14, 14 }, + { colorDepth.bpc.bpc16, 16 }, + }; + + nvInitInfoString(&infoString, buffer, sizeof(buffer)); + + buffer[0] = '\0'; + + for (i = 0; i < ARRAY_LEN(table); i++) { + if (table[i].val) { + nvEvoLogInfoStringRaw(&infoString, "%s%d", + first ? "" : ", ", + table[i].bpc); + first = FALSE; + } + } + + return buffer; +} + + +/* + * Log information about the EDID. + */ + +static void LogEdid(NVDpyEvoPtr pDpyEvo, NVEvoInfoStringPtr pInfoString) +{ + int k; + NVParsedEdidEvoPtr pParsedEdid; + + static const struct { + NVT_TIMING_TYPE type; + const char *name; + } mode_type_table[] = { + { NVT_TYPE_DMT, "Display Monitor Timings" }, + { NVT_TYPE_GTF, "Generalized Timing Formula Timings" }, + { NVT_TYPE_ASPR, "ASPR Timings"}, + { NVT_TYPE_NTSC_TV, "NTSC Timings" }, + { NVT_TYPE_PAL_TV, "PAL Timings" }, + { NVT_TYPE_CVT, "Coordinated Video Timings"}, + { NVT_TYPE_CVT_RB, "Reduced Blanking Coordinated Video Timings" }, + { NVT_TYPE_CUST, "Customized Timings" }, + { NVT_TYPE_EDID_STD, "Standard Timings" }, + { NVT_TYPE_EDID_DTD, "Detailed Timings" }, + { NVT_TYPE_EDID_CVT, "Coordinated Video Timings" }, + { NVT_TYPE_EDID_EST, "Established Timings"}, + { NVT_TYPE_EDID_861ST, "CEA-861B Timings" }, + { NVT_TYPE_NV_PREDEFINED, "Predefined Timings" }, + { NVT_TYPE_DMT_RB, "Reduced Blanking Display Monitor Timings" }, + { NVT_TYPE_EDID_EXT_DTD, "Extension Block Detailed Timings" }, + { NVT_TYPE_SDTV, "SDTV Timings "}, + { NVT_TYPE_HDTV, "HDTV Timings" }, + { NVT_TYPE_SMPTE, "SMPTE Timings" }, + { NVT_TYPE_EDID_VTB_EXT, "VTB Extension Timings" }, + { NVT_TYPE_EDID_VTB_EXT_STD, "VTB Extension Detailed Timings" }, + { NVT_TYPE_EDID_VTB_EXT_DTD, "VTB Extension Standard Timings" }, + { NVT_TYPE_EDID_VTB_EXT_CVT, "VTB Extension CVT Timings" }, + { NVT_TYPE_HDMI_STEREO, "HDMI Stereo Timings" }, + { NVT_TYPE_DISPLAYID_1, "DisplayID Type 1 Timings" }, + { NVT_TYPE_DISPLAYID_2, "DisplayID Type 2 Timings" }, + { NVT_TYPE_HDMI_EXT, "HDMI Extended Resolution Timings" }, + { NVT_TYPE_CUST_AUTO, "Customized Auto Timings" }, + { NVT_TYPE_CUST_MANUAL, "Customized Manual Timings" }, + { NVT_TYPE_CVT_RB_2,"Reduced Blanking Coordinated Video Timings, v2" }, + { NVT_TYPE_DMT_RB_2, "Display Monitor Timings, V2" }, + { NVT_TYPE_DISPLAYID_7, "DisplayID Type 7 Timings" }, + { NVT_TYPE_DISPLAYID_8, "DisplayID Type 8 Timings" }, + { NVT_TYPE_DISPLAYID_9, "DisplayID Type 9 Timings" }, + { NVT_TYPE_DISPLAYID_10, "DisplayID Type 10 Timings" }, + { NVT_TYPE_CVT_RB_3, "Reduced Blanking Coordinated Video Timings, v3" }, + }; + + /* + * Trigger a warning if new NVT_TIMING_TYPE values are added + * without updating this function. + * + * If a warning is produced about unhandled enum in the below + * switch statement, please update both the switch statement and + * mode_type_table[], or contact the sw-nvkms email alias. + */ + if (pDpyEvo->parsedEdid.valid) { + for (k = 0; k < pDpyEvo->parsedEdid.info.total_timings; k++) { + NvU32 status = pDpyEvo->parsedEdid.info.timing[k].etc.status; + NVT_TIMING_TYPE type = NVT_GET_TIMING_STATUS_TYPE(status); + + switch (type) { + case NVT_TYPE_DMT: + case NVT_TYPE_GTF: + case NVT_TYPE_ASPR: + case NVT_TYPE_NTSC_TV: + case NVT_TYPE_PAL_TV: + case NVT_TYPE_CVT: + case NVT_TYPE_CVT_RB: + case NVT_TYPE_CUST: + case NVT_TYPE_EDID_DTD: + case NVT_TYPE_EDID_STD: + case NVT_TYPE_EDID_EST: + case NVT_TYPE_EDID_CVT: + case NVT_TYPE_EDID_861ST: + case NVT_TYPE_NV_PREDEFINED: + case NVT_TYPE_DMT_RB: + case NVT_TYPE_EDID_EXT_DTD: + case NVT_TYPE_SDTV: + case NVT_TYPE_HDTV: + case NVT_TYPE_SMPTE: + case NVT_TYPE_EDID_VTB_EXT: + case NVT_TYPE_EDID_VTB_EXT_STD: + case NVT_TYPE_EDID_VTB_EXT_DTD: + case NVT_TYPE_EDID_VTB_EXT_CVT: + case NVT_TYPE_HDMI_STEREO: + case NVT_TYPE_DISPLAYID_1: + case NVT_TYPE_DISPLAYID_2: + case NVT_TYPE_HDMI_EXT: + case NVT_TYPE_CUST_AUTO: + case NVT_TYPE_CUST_MANUAL: + case NVT_TYPE_CVT_RB_2: + case NVT_TYPE_DMT_RB_2: + case NVT_TYPE_DISPLAYID_7: + case NVT_TYPE_DISPLAYID_8: + case NVT_TYPE_DISPLAYID_9: + case NVT_TYPE_DISPLAYID_10: + case NVT_TYPE_CVT_RB_3: + /* + * XXX temporarily disable the warning so that additional + * NVT_TYPEs_ can be added to nvtiming.h. Bug 3849339. + */ + default: + break; + } + break; + } + } + + nvEvoLogInfoString(pInfoString, ""); + nvEvoLogInfoString(pInfoString, + "--- EDID for %s ---", pDpyEvo->name); + + if (!pDpyEvo->parsedEdid.valid) { + nvEvoLogInfoString(pInfoString, ""); + nvEvoLogInfoString(pInfoString, "No EDID Available."); + nvEvoLogInfoString(pInfoString, ""); + goto done; + } + + pParsedEdid = &pDpyEvo->parsedEdid; + + nvEvoLogInfoString(pInfoString, + "EDID Version : %d.%d", + pParsedEdid->info.version >> 8, + pParsedEdid->info.version & 0xff); + + nvEvoLogInfoString(pInfoString, + "Manufacturer : %s", + pParsedEdid->info.manuf_name); + + nvEvoLogInfoString(pInfoString, + "Monitor Name : %s", + pParsedEdid->monitorName); + + nvEvoLogInfoString(pInfoString, + "Product ID : 0x%04x", + pParsedEdid->info.product_id); + + nvEvoLogInfoString(pInfoString, + "32-bit Serial Number : 0x%08x", + pParsedEdid->info.serial_number); + + nvEvoLogInfoString(pInfoString, + "Serial Number String : %s", + pParsedEdid->serialNumberString); + + nvEvoLogInfoString(pInfoString, + "Manufacture Date : %d, week %d", + pParsedEdid->info.year, + pParsedEdid->info.week); + + /* + * despite the name feature_ver_1_3, the below features are + * reported on all EDID versions + */ + nvEvoLogInfoString(pInfoString, + "DPMS Capabilities :%s%s%s", + pParsedEdid->info.u.feature_ver_1_3.support_standby ? + " Standby" : "", + pParsedEdid->info.u.feature_ver_1_3.support_suspend ? + " Suspend" : "", + pParsedEdid->info.u.feature_ver_1_3.support_active_off ? + " Active Off" : ""); + + nvEvoLogInfoString(pInfoString, + "Input Type : %s", + pParsedEdid->info.input.isDigital ? + "Digital" : "Analog"); + + nvEvoLogInfoString(pInfoString, + "Prefer first detailed timing : %s", + pParsedEdid->info.u.feature_ver_1_3.preferred_timing_is_native ? + "Yes" : "No"); + + if (pParsedEdid->info.version == NVT_EDID_VER_1_3) { + nvEvoLogInfoString(pInfoString, + "Supports GTF : %s", + pParsedEdid->info.u.feature_ver_1_3.support_gtf ? + "Yes" : "No"); + } + + if (pParsedEdid->info.version >= NVT_EDID_VER_1_4) { + NvBool continuousFrequency = FALSE; + if (pParsedEdid->info.input.isDigital) { + continuousFrequency = + pParsedEdid->info.u.feature_ver_1_4_digital.continuous_frequency; + } else { + continuousFrequency = + pParsedEdid->info.u.feature_ver_1_4_analog.continuous_frequency; + } + + nvEvoLogInfoString(pInfoString, + "Supports Continuous Frequency: %s", + continuousFrequency ? "Yes" : "No"); + + nvEvoLogInfoString(pInfoString, + "EDID 1.4 YCbCr 422 support : %s", + pParsedEdid->info.u.feature_ver_1_4_digital.support_ycrcb_422 + ? "Yes" : "No"); + + nvEvoLogInfoString(pInfoString, + "EDID 1.4 YCbCr 444 support : %s", + pParsedEdid->info.u.feature_ver_1_4_digital.support_ycrcb_444 + ? "Yes" : "No"); + } + + nvEvoLogInfoString(pInfoString, + "Maximum Image Size : %d mm x %d mm", + pParsedEdid->info.screen_size_x * 10, /* screen_size_* is in cm */ + pParsedEdid->info.screen_size_y * 10); + + nvEvoLogInfoString(pInfoString, + "Valid HSync Range : " + NV_FMT_DIV_1000_POINT_1 + " kHz - " NV_FMT_DIV_1000_POINT_1 " kHz", + NV_VA_DIV_1000_POINT_1(pParsedEdid->limits.min_h_rate_hz), + NV_VA_DIV_1000_POINT_1(pParsedEdid->limits.max_h_rate_hz)); + + nvEvoLogInfoString(pInfoString, + "Valid VRefresh Range : " + NV_FMT_DIV_1000_POINT_1 " Hz - " + NV_FMT_DIV_1000_POINT_1 " Hz", + NV_VA_DIV_1000_POINT_1(pParsedEdid->limits.min_v_rate_hzx1k), + NV_VA_DIV_1000_POINT_1(pParsedEdid->limits.max_v_rate_hzx1k)); + + nvEvoLogInfoString(pInfoString, + "EDID maximum pixel clock : " + NV_FMT_DIV_1000_POINT_1 " MHz", + NV_VA_DIV_1000_POINT_1(pParsedEdid->limits.max_pclk_10khz * 10)); + + if (pParsedEdid->info.nvdaVsdbInfo.valid) { + nvEvoLogInfoString(pInfoString, + "G-Sync capable : %s", + pParsedEdid->info.nvdaVsdbInfo.vrrData.v1.supportsVrr + ? "Yes" : "No"); + nvEvoLogInfoString(pInfoString, + "G-Sync minimum refresh rate : %d Hz", + pParsedEdid->info.nvdaVsdbInfo.vrrData.v1.minRefreshRate); + } + + if (pParsedEdid->info.ext_displayid.version) { + nvEvoLogInfoString(pInfoString, + "DisplayID vfreq_min : %d Hz", + pParsedEdid->info.ext_displayid.range_limits[0].vfreq_min); + } + + if (pParsedEdid->info.ext_displayid20.version && + pParsedEdid->info.ext_displayid20.range_limits.seamless_dynamic_video_timing_change) { + nvEvoLogInfoString(pInfoString, + "DisplayID 2.0 vfreq_min : %d Hz", + pParsedEdid->info.ext_displayid20.range_limits.vfreq_min); + } + + if (pParsedEdid->info.ext_displayid20.version && + pParsedEdid->info.ext_displayid20.total_adaptive_sync_descriptor != 0) { + for (k = 0; + k < pParsedEdid->info.ext_displayid20.total_adaptive_sync_descriptor && + k < ARRAY_LEN(pParsedEdid->info.ext_displayid20.adaptive_sync_descriptor); + k++) { + nvEvoLogInfoString(pInfoString, + "DisplayID 2.0 adaptive sync : %d Hz (max), %d Hz (min)", + pParsedEdid->info.ext_displayid20.adaptive_sync_descriptor[k].max_rr, + pParsedEdid->info.ext_displayid20.adaptive_sync_descriptor[k].min_rr); + } + } + + for (k = 0; k < ARRAY_LEN(pParsedEdid->info.ldd); k++) { + if (pParsedEdid->info.ldd[k].tag == NVT_EDID_DISPLAY_DESCRIPTOR_DRL) { + nvEvoLogInfoString(pInfoString, + "min_v_rate : %d Hz", + pParsedEdid->info.ldd[k].u.range_limit.min_v_rate); + } + } + + if (pParsedEdid->info.hdmiForumInfo.vrr_min != 0) { + nvEvoLogInfoString(pInfoString, + "HDMI Forum vrr_min : %d Hz", + pParsedEdid->info.hdmiForumInfo.vrr_min); + } + + nvLogEdidCea861InfoEvo(pDpyEvo, pInfoString); + + if (pParsedEdid->info.input.isDigital && + pParsedEdid->info.version >= NVT_EDID_VER_1_4) { + nvEvoLogInfoString(pInfoString, + "EDID bits per component : %d", + pParsedEdid->info.input.u.digital.bpc); + } + + /* print the tiled display extension block, if present */ + if (pParsedEdid->info.ext_displayid.tile_topology_id.vendor_id) { + const NVT_DISPLAYID_INFO *tile = &pParsedEdid->info.ext_displayid; + const char *tmp; + + nvEvoLogInfoString(pInfoString, + "Tiled display information :"); + nvEvoLogInfoString(pInfoString, + " Revision : %d", + tile->tiled_display_revision); + nvEvoLogInfoString(pInfoString, + " Single Enclosure : %s", + tile->tile_capability.bSingleEnclosure ? + "Yes" : "No"); + + tmp = "Unknown"; + switch (tile->tile_capability.multi_tile_behavior) { + case NVT_MULTI_TILE_BEHAVIOR_OTHER: + tmp = "Other"; + break; + case NVT_MULTI_TILE_BEHAVIOR_SOURCE_DRIVEN: + tmp = "Source-driven"; + break; + } + nvEvoLogInfoString(pInfoString, + " Multi-tile Behavior : %s", tmp); + + tmp = "Unknown"; + switch (tile->tile_capability.single_tile_behavior) { + case NVT_SINGLE_TILE_BEHAVIOR_OTHER: + tmp = "Other"; + break; + case NVT_SINGLE_TILE_BEHAVIOR_SOURCE_DRIVEN: + tmp = "Source-driven"; + break; + case NVT_SINGLE_TILE_BEHAVIOR_SCALE: + tmp = "Scale"; + break; + case NVT_SINGLE_TILE_BEHAVIOR_CLONE: + tmp = "Clone"; + break; + } + nvEvoLogInfoString(pInfoString, + " Single-tile Behavior : %s", tmp); + nvEvoLogInfoString(pInfoString, + " Topology : %d row%s, %d column%s", + tile->tile_topology.row, + (tile->tile_topology.row == 1) ? "" : "s", + tile->tile_topology.col, + (tile->tile_topology.col == 1) ? "" : "s"); + nvEvoLogInfoString(pInfoString, + " Location : (%d,%d)", + tile->tile_location.x, tile->tile_location.y); + nvEvoLogInfoString(pInfoString, + " Native Resolution : %dx%d", + tile->native_resolution.width, + tile->native_resolution.height); + if (tile->tile_capability.bHasBezelInfo) { + nvEvoLogInfoString(pInfoString, + " Bezel Information :"); + nvEvoLogInfoString(pInfoString, + " Pixel Density : %d", + tile->bezel_info.pixel_density); + nvEvoLogInfoString(pInfoString, + " Top : %d", + tile->bezel_info.top); + nvEvoLogInfoString(pInfoString, + " Bottom : %d", + tile->bezel_info.bottom); + nvEvoLogInfoString(pInfoString, + " Left : %d", + tile->bezel_info.right); + nvEvoLogInfoString(pInfoString, + " Right : %d", + tile->bezel_info.left); + } + nvEvoLogInfoString(pInfoString, + " Vendor ID : 0x%x", + tile->tile_topology_id.vendor_id); + nvEvoLogInfoString(pInfoString, + " Product ID : 0x%x", + tile->tile_topology_id.product_id); + nvEvoLogInfoString(pInfoString, + " Serial Number : 0x%x", + tile->tile_topology_id.serial_number); + } + + for (k = 0; k < ARRAY_LEN(mode_type_table); k++) { + + int i; + + /* scan through the ModeList to find a mode of the current type */ + + for (i = 0; i < pParsedEdid->info.total_timings; i++) { + NVT_TIMING *pTiming = &pParsedEdid->info.timing[i]; + if (mode_type_table[k].type == + NVT_GET_TIMING_STATUS_TYPE(pTiming->etc.status)) { + break; + } + } + + /* if there are none of this type, skip to the next mode type */ + + if (i == pParsedEdid->info.total_timings) { + continue; + } + + nvEvoLogInfoString(pInfoString, ""); + nvEvoLogInfoString(pInfoString, "%s:", mode_type_table[k].name); + + for (i = 0; i < pParsedEdid->info.total_timings; i++) { + + NVT_TIMING *pTiming = &pParsedEdid->info.timing[i]; + NVT_TIMING_TYPE type = + NVT_GET_TIMING_STATUS_TYPE(pTiming->etc.status); + int vScale = 1; + + if (mode_type_table[k].type != type) { + continue; + } + + if ((type == NVT_TYPE_EDID_EST) || + (type == NVT_TYPE_EDID_STD)) { + + nvEvoLogInfoString(pInfoString, + " %-4d x %-4d @ %d Hz", + NV_NVT_TIMING_HVISIBLE(pTiming), + NV_NVT_TIMING_VVISIBLE(pTiming), + pTiming->etc.rr); + continue; + } + + if (pTiming->interlaced) { + vScale = 2; + } + + nvEvoLogInfoString(pInfoString, + " %-4d x %-4d @ %d Hz", + NV_NVT_TIMING_HVISIBLE(pTiming), + NV_NVT_TIMING_VVISIBLE(pTiming), + pTiming->etc.rr); + + nvEvoLogInfoString(pInfoString, + " Pixel Clock : " + NV_FMT_DIV_1000_POINT_2 " MHz", + NV_VA_DIV_1000_POINT_2(pTiming->pclk + * 10)); + + nvEvoLogInfoString(pInfoString, + " HRes, HSyncStart : %d, %d", + pTiming->HVisible, + pTiming->HVisible + + pTiming->HFrontPorch); + + nvEvoLogInfoString(pInfoString, + " HSyncEnd, HTotal : %d, %d", + pTiming->HVisible + + pTiming->HFrontPorch + + pTiming->HSyncWidth, + pTiming->HTotal); + + nvEvoLogInfoString(pInfoString, + " VRes, VSyncStart : %d, %d", + pTiming->VVisible * vScale, + (pTiming->VVisible + + pTiming->VFrontPorch) * vScale); + + nvEvoLogInfoString(pInfoString, + " VSyncEnd, VTotal : %d, %d", + (pTiming->VVisible + + pTiming->VFrontPorch + + pTiming->VSyncWidth) * vScale, + pTiming->VTotal * vScale); + + nvEvoLogInfoString(pInfoString, + " H/V Polarity : %s/%s", + (pTiming->HSyncPol == NVT_H_SYNC_NEGATIVE) ? + "-" : "+", + (pTiming->VSyncPol == NVT_V_SYNC_NEGATIVE) ? + "-" : "+"); + + if (pTiming->interlaced) { + nvEvoLogInfoString(pInfoString, + " Interlaced : yes"); + } + if (pTiming->etc.flag & NVT_FLAG_NV_DOUBLE_SCAN_TIMING) { + nvEvoLogInfoString(pInfoString, + " Double Scanned : yes"); + } + + if (type == NVT_TYPE_EDID_861ST) { + nvEvoLogInfoString(pInfoString, + " CEA Format : %d", + NVT_GET_CEA_FORMAT(pTiming->etc.status)); + } + + if (NV_NVT_TIMING_HAS_ASPECT_RATIO(pTiming)) { + nvEvoLogInfoString(pInfoString, + " Aspect Ratio : %d:%d", + NV_NVT_TIMING_IMAGE_SIZE_WIDTH(pTiming), + NV_NVT_TIMING_IMAGE_SIZE_HEIGHT(pTiming)); + } + + if (NV_NVT_TIMING_HAS_IMAGE_SIZE(pTiming)) { + nvEvoLogInfoString(pInfoString, + " Image Size : %d mm x %d mm", + NV_NVT_TIMING_IMAGE_SIZE_WIDTH(pTiming), + NV_NVT_TIMING_IMAGE_SIZE_HEIGHT(pTiming)); + } + + if (IS_BPC_SUPPORTED_COLORFORMAT(pTiming->etc.rgb444.bpcs)) { + nvEvoLogInfoString(pInfoString, + " RGB 444 bpcs : %s", + GetColorDepthBpc(pTiming->etc.rgb444)); + } + + if (IS_BPC_SUPPORTED_COLORFORMAT(pTiming->etc.yuv444.bpcs)) { + nvEvoLogInfoString(pInfoString, + " YUV 444 bpcs : %s", + GetColorDepthBpc(pTiming->etc.yuv444)); + } + + if (IS_BPC_SUPPORTED_COLORFORMAT(pTiming->etc.yuv422.bpcs)) { + nvEvoLogInfoString(pInfoString, + " YUV 422 bpcs : %s", + GetColorDepthBpc(pTiming->etc.yuv422)); + } + + if (IS_BPC_SUPPORTED_COLORFORMAT(pTiming->etc.yuv420.bpcs)) { + nvEvoLogInfoString(pInfoString, + " YUV 420 bpcs : %s", + GetColorDepthBpc(pTiming->etc.yuv420)); + } + } // i + } // k + + nvEvoLogInfoString(pInfoString, ""); + + done: + nvEvoLogInfoString(pInfoString, + "--- End of EDID for %s ---", pDpyEvo->name); + nvEvoLogInfoString(pInfoString, ""); +} + + + +/* + * Clear the EDID and related fields in the display device data + * structure. + */ + +static void ClearEdid(NVDpyEvoPtr pDpyEvo, const NvBool bSendHdmiCapsToRm) +{ + NVEdidRec edid = { }; + NVEvoInfoStringRec infoString; + nvInitInfoString(&infoString, NULL, 0); + + if (EdidHasChanged(pDpyEvo, &edid, NULL)) { + ApplyNewEdid(pDpyEvo, &edid, NULL, + bSendHdmiCapsToRm, &infoString); + } +} + + + +/* + * ClearCustomEdid() - send an empty custom EDID to RM; this is to + * clear out any stale state in RM about custom EDIDs that we may have + * told RM about previous runs of X. + */ + +static void ClearCustomEdid(const NVDpyEvoRec *pDpyEvo) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS *setEdidParams; + + if (nvDpyEvoIsDPMST(pDpyEvo)) { + // RM doesn't track this device, so leave the EDID alone. + return; + } + + setEdidParams = nvCalloc(sizeof(*setEdidParams), 1); + if (setEdidParams == NULL) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Unable to clear custom EDID for display device %s", + pDpyEvo->name); + return; + } + + setEdidParams->subDeviceInstance = pDispEvo->displayOwner; + setEdidParams->displayId = nvDpyEvoGetConnectorId(pDpyEvo); + setEdidParams->bufferSize = 0; + + /* ignore the NvRmControl() return value */ + + (void) nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_EDID_V2, + setEdidParams, sizeof(*setEdidParams)); + + nvFree(setEdidParams); +} // ClearCustomEdid() + + + +/* + * WriteEdidToResman() - send a custom EDID to RM. + */ + +static void WriteEdidToResman(const NVDpyEvoRec *pDpyEvo, + const NVEdidRec *pEdid) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS *setEdidParams = NULL; + NvU32 status = NVOS_STATUS_ERROR_OPERATING_SYSTEM; + + if (pEdid->length > sizeof(setEdidParams->edidBuffer)) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "EDID for display device %s is too long for NV0073_CTRL_CMD_SPECIFIC_SET_EDID_V2", + pDpyEvo->name); + goto done; + } + + setEdidParams = nvCalloc(sizeof(*setEdidParams), 1); + if (setEdidParams == NULL) { + goto done; + } + + setEdidParams->subDeviceInstance = pDispEvo->displayOwner; + setEdidParams->displayId = nvDpyEvoGetConnectorId(pDpyEvo); + nvkms_memcpy(&setEdidParams->edidBuffer, pEdid->buffer, pEdid->length); + setEdidParams->bufferSize = pEdid->length; + + status = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_EDID_V2, + setEdidParams, sizeof(*setEdidParams)); + +done: + if (status != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Failure processing EDID for display device " + "%s.", pDpyEvo->name); + } + + nvFree(setEdidParams); +} // WriteEdidToResman() + + +/* + * NvTiming_ParseEDIDInfo() will ignore some modes that are blatantly + * wrong, so we need to apply any patching to the EDID bytes before + * parsing the EDID. + */ +static void PrePatchEdid(const NVDpyEvoRec *pDpyEvo, NVEdidPtr pEdid, + NVEvoInfoStringPtr pInfoString) +{ + NvU8 *pEdidData = pEdid->buffer; + + if (pEdid->buffer == NULL || pEdid->length < 128) { + return; + } + + /* + * Work around bug 628240: some AUO flat panels have invalid + * native modes where HSyncEnd is larger than HTotal, putting the + * end of the sync pulse several columns into the active region of + * the next frame. AUO confirmed these corrected timings: + * + * "1366x768" 69.30 1366 1398 1422 1432 768 771 775 806 -hsync -vsync + */ + if (pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP && + pEdidData[0x36] == 0x26 && + pEdidData[0x37] == 0x1b && + pEdidData[0x38] == 0x56 && + pEdidData[0x39] == 0x47 && + pEdidData[0x3a] == 0x50 && + pEdidData[0x3b] == 0x00 && + pEdidData[0x3c] == 0x26 && + pEdidData[0x3d] == 0x30 && + pEdidData[0x3e] == 0x30 && + pEdidData[0x3f] == 0x20 && + pEdidData[0x40] == 0x34 && + pEdidData[0x41] == 0x00 && + pEdidData[0x42] == 0x58 && + pEdidData[0x43] == 0xc1 && + pEdidData[0x44] == 0x10 && + pEdidData[0x45] == 0x00 && + pEdidData[0x46] == 0x00 && + pEdidData[0x47] == 0x18 && + pEdidData[0x7f] == 0x2e) { + + pEdidData[0x36] = 0x12; + pEdidData[0x37] = 0x1b; + pEdidData[0x38] = 0x56; + pEdidData[0x39] = 0x42; + pEdidData[0x3a] = 0x50; + pEdidData[0x3b] = 0x00; + pEdidData[0x3c] = 0x26; + pEdidData[0x3d] = 0x30; + pEdidData[0x3e] = 0x20; + pEdidData[0x3f] = 0x18; + pEdidData[0x40] = 0x34; + pEdidData[0x41] = 0x00; + pEdidData[0x42] = 0x58; + pEdidData[0x43] = 0xc1; + pEdidData[0x44] = 0x10; + pEdidData[0x45] = 0x00; + pEdidData[0x46] = 0x00; + pEdidData[0x47] = 0x18; + pEdidData[0x7f] = 0x5f; + + nvEvoLogInfoString(pInfoString, "Fixed invalid mode for 1366x768"); + } +} + +/* + * CreateParsedEdidFromNVT_TIMING() - Puts modetiming data from RM into an EDID format + */ +static void CreateParsedEdidFromNVT_TIMING( + NVT_TIMING *pTimings, + NvU8 bpc, + NVParsedEdidEvoPtr pParsedEdid) +{ + nvkms_memset(pParsedEdid, 0, sizeof(*pParsedEdid)); + pParsedEdid->info.total_timings = 1; + nvkms_memcpy(&pParsedEdid->info.timing[0], pTimings, sizeof(*pTimings)); + pParsedEdid->info.timing[0].etc.status = NVT_STATUS_CUST; + pParsedEdid->info.u.feature_ver_1_4_digital.continuous_frequency = FALSE; + pParsedEdid->info.version = NVT_EDID_VER_1_4; + pParsedEdid->info.input.isDigital = TRUE; + pParsedEdid->info.input.u.digital.bpc = bpc; + pParsedEdid->limits.min_h_rate_hz = 1; + pParsedEdid->limits.min_v_rate_hzx1k = 1; + pParsedEdid->limits.max_h_rate_hz = NV_U32_MAX; + pParsedEdid->limits.max_v_rate_hzx1k = NV_U32_MAX; + pParsedEdid->valid = TRUE; +} + +/* + * PatchAndParseEdid() - use the nvtiming library to parse the EDID data. The + * EDID data provided in the 'pEdid' argument may be patched or modified. + */ + +static void PatchAndParseEdid( + const NVDpyEvoRec *pDpyEvo, + NVEdidPtr pEdid, + NVParsedEdidEvoPtr pParsedEdid, + NVEvoInfoStringPtr pInfoString) +{ + int i; + NVT_STATUS status; + NvU32 edidSize; + + if (pEdid->buffer == NULL || pEdid->length == 0) { + return; + } + + nvkms_memset(pParsedEdid, 0, sizeof(*pParsedEdid)); + + PrePatchEdid(pDpyEvo, pEdid, pInfoString); + + /* parse the majority of information from the EDID */ + + status = NvTiming_ParseEDIDInfo(pEdid->buffer, pEdid->length, + &pParsedEdid->info); + + if (status != NVT_STATUS_SUCCESS) { + return; + } + + /* interpret the frequency range limits from the EDID */ + + NvTiming_CalculateEDIDLimits(&pParsedEdid->info, &pParsedEdid->limits); + + /* get the user-friendly monitor name */ + + NvTiming_GetMonitorName(&pParsedEdid->info, + (NvU8 *) &pParsedEdid->monitorName); + nvAssert(pParsedEdid->monitorName[0] != '\0'); + + /* find the serial number string */ + + pParsedEdid->serialNumberString[0] = '\0'; + + for (i = 0; i < NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR; i++) { + if (pParsedEdid->info.ldd[i].tag == NVT_EDID_DISPLAY_DESCRIPTOR_DPSN) { + nvkms_strncpy( + pParsedEdid->serialNumberString, + (const char *)pParsedEdid->info.ldd[i].u.serial_number.str, + sizeof(pParsedEdid->serialNumberString)); + pParsedEdid->serialNumberString[ + sizeof(pParsedEdid->serialNumberString) - 1] = '\0'; + break; + } + } + + + for (i = 0; i < pParsedEdid->info.total_timings; i++) { + NVT_TIMING *pTiming = &pParsedEdid->info.timing[i]; + + /* patch up RRx1k for 640x480@60Hz */ + + if (IsEdid640x480_60_NVT_TIMING(pTiming)) { + pTiming->etc.rrx1k = 59940; + } + + /* + * Invalidate modes that require pixel repetition (i.e., modes + * that don't support Pixel Repetition 0). See bug 1459376. + */ + + nvAssert(pTiming->etc.rep != 0); + + if ((pTiming->etc.rep & NVBIT(0)) == 0) { + pTiming->etc.status = 0; + } + } + + pParsedEdid->valid = TRUE; + + /* resize the EDID buffer, if necessary */ + + edidSize = NVT_EDID_ACTUAL_SIZE(&pParsedEdid->info); + + if (edidSize < pEdid->length) { + NvU8 *pEdidData = nvAlloc(edidSize); + + if (pEdidData != NULL) { + nvkms_memcpy(pEdidData, pEdid->buffer, edidSize); + + nvFree(pEdid->buffer); + + pEdid->buffer = pEdidData; + pEdid->length = edidSize; + } + } +} + + +/*! + * Assign NVDpyEvoRec::name. + * + * The name has the form: + * + * "edidName (typeName-N.dpAddress)" + * + * If edidName is unavailable, then it, and the parentheses are omitted: + * + * "typeName-N.dpAddress" + * "typeName-N" + * + * if dpAddress is unavailable, then the ".dpAddress" is omitted: + * + * "edidName (typeName-N)" + * "typeName-N" + */ +static void AssignDpyEvoName(NVDpyEvoPtr pDpyEvo) +{ + const NVConnectorEvoRec *pConnectorEvo = pDpyEvo->pConnectorEvo; + const char *edidName = ""; + const char *openParen = ""; + const char *closeParen = ""; + const char *dpAddress = ""; + const char *dpAddressSeparator = ""; + + if (pDpyEvo->parsedEdid.valid && + pDpyEvo->parsedEdid.monitorName[0] != '\0') { + edidName = pDpyEvo->parsedEdid.monitorName; + openParen = " ("; + closeParen = ")"; + } + + if (pDpyEvo->dp.addressString != NULL) { + dpAddress = pDpyEvo->dp.addressString; + dpAddressSeparator = "."; + } + + nvkms_snprintf(pDpyEvo->name, sizeof(pDpyEvo->name), + "%s%s%s%s%s%s", + edidName, + openParen, + pConnectorEvo->name, + dpAddressSeparator, + dpAddress, + closeParen); + + pDpyEvo->name[sizeof(pDpyEvo->name) - 1] = '\0'; +} + +enum NvKmsDpyAttributeDigitalSignalValue +nvGetDefaultDpyAttributeDigitalSignalValue(const NVConnectorEvoRec *pConnectorEvo) +{ + enum NvKmsDpyAttributeDigitalSignalValue signal = + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_LVDS; + + if (pConnectorEvo->legacyType == NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) { + if (nvConnectorUsesDPLib(pConnectorEvo)) { + signal = NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_DISPLAYPORT; + } else { + nvAssert((pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) || + (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_DSI)); + + if (pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM) { + signal = NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_LVDS; + } else if (pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI) { + signal = NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_DSI; + } else { + // May be later changed to HDMI_FRL at modeset time. + signal = NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_TMDS; + } + } + } + + return signal; +} + +NVDpyEvoPtr nvAllocDpyEvo(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo, + NVDpyId dpyId, const char *dpAddress) +{ + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = nvCalloc(1, sizeof(*pDpyEvo)); + + if (pDpyEvo == NULL) { + return NULL; + } + + pDpyEvo->pDispEvo = pDispEvo; + pDpyEvo->pConnectorEvo = pConnectorEvo; + pDpyEvo->apiHead = NV_INVALID_HEAD; + pDpyEvo->id = dpyId; + + nvListAdd(&pDpyEvo->dpyListEntry, &pDispEvo->dpyList); + + if (dpAddress) { + pDpyEvo->dp.addressString = nvStrDup(dpAddress); + pDispEvo->displayPortMSTIds = + nvAddDpyIdToDpyIdList(dpyId, pDispEvo->displayPortMSTIds); + + if (!nvConnectorIsDPSerializer(pConnectorEvo)) { + pDispEvo->dynamicDpyIds = + nvAddDpyIdToDpyIdList(dpyId, pDispEvo->dynamicDpyIds); + } + } + + AssignDpyEvoName(pDpyEvo); + + nvDpyProbeMaxPixelClock(pDpyEvo); + + pDpyEvo->requestedDithering.state = + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_AUTO; + pDpyEvo->requestedDithering.mode = + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_AUTO; + pDpyEvo->requestedDithering.depth = + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_AUTO; + + // Initialize DP link rate and lane count to sane values. + // This is normally done in nvDPLibUpdateDpyLinkConfiguration, + // but do it here as well in case we query flat panel properties for + // screenless DP devices. + if (nvConnectorUsesDPLib(pConnectorEvo)) { + pDpyEvo->dp.linkRate = 0; + pDpyEvo->dp.laneCount = NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_1; + pDpyEvo->dp.connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_UNKNOWN; + pDpyEvo->dp.sinkIsAudioCapable = FALSE; + } + + pDpyEvo->requestedColorSpace = + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_RGB; + pDpyEvo->requestedColorRange = + NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL; + + pDpyEvo->currentAttributes = NV_EVO_DEFAULT_ATTRIBUTES_SET; + pDpyEvo->currentAttributes.digitalSignal = + nvGetDefaultDpyAttributeDigitalSignalValue(pConnectorEvo); + + DpyGetStaticDfpProperties(pDpyEvo); + + return pDpyEvo; +} + + +void nvFreeDpyEvo(NVDispEvoPtr pDispEvo, NVDpyEvoPtr pDpyEvo) +{ + nvCancelSDRTransitionTimer(pDpyEvo); + + DpyDisconnectEvo(pDpyEvo, FALSE /* bSendHdmiCapsToRm */); + + // Let the DP library host implementation handle deleting a pDpy as if the + // library had notified it of a lost device. + nvDPDpyFree(pDpyEvo); + nvAssert(!pDpyEvo->dp.pDpLibDevice); + + pDispEvo->validDisplays = + nvDpyIdListMinusDpyId(pDispEvo->validDisplays, pDpyEvo->id); + + pDispEvo->displayPortMSTIds = + nvDpyIdListMinusDpyId(pDispEvo->displayPortMSTIds, pDpyEvo->id); + pDispEvo->dynamicDpyIds = + nvDpyIdListMinusDpyId(pDispEvo->dynamicDpyIds, pDpyEvo->id); + + nvListDel(&pDpyEvo->dpyListEntry); + + nvFree(pDpyEvo->dp.addressString); + nvFree(pDpyEvo); +} + + +/*! + * Return the pConnectorEvo associated with the given (static) display ID. + * + * XXX[DP] not valid for DP monitors, the connector will be known before + * initialization so this will not be needed. + * + * \param[in] pDisp The pDisp on which to search for the pConnector. + * \param[in] dpyId The ID of the connector to search for. + * + * \return The pConnectorEvo from pDisp that matches the ID, or NULL if + * no connector is found. + */ +NVConnectorEvoPtr nvGetConnectorFromDisp(NVDispEvoPtr pDispEvo, NVDpyId dpyId) +{ + NVConnectorEvoPtr pConnectorEvo; + + nvAssert(nvDpyIdIsInDpyIdList(dpyId, pDispEvo->connectorIds)); + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (nvDpyIdsAreEqual(dpyId, pConnectorEvo->displayId)) { + return pConnectorEvo; + } + } + + nvAssert(!"Failed to find pDpy's connector!"); + return NULL; +} + +void nvDpyAssignSDRInfoFramePayload(NVT_HDR_INFOFRAME_PAYLOAD *pPayload) +{ + nvkms_memset(pPayload, 0, sizeof(*pPayload)); + pPayload->eotf = NVT_CEA861_HDR_INFOFRAME_EOTF_SDR_GAMMA; + pPayload->static_metadata_desc_id = NVT_CEA861_STATIC_METADATA_SM0; +} + +static void ConstructHdrInfoFrameSdp(const NVDispEvoRec *pDispEvo, + const NvU32 head, + DPSDP_DESCRIPTOR *sdp) +{ + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + + sdp->hb.hb0 = 0; + sdp->hb.hb1 = dp_pktType_DynamicRangeMasteringInfoFrame; + sdp->hb.hb2 = DP_INFOFRAME_SDP_V1_3_NON_AUDIO_SIZE - 1; + sdp->hb.hb3 = DP_INFOFRAME_SDP_V1_3_VERSION << + DP_INFOFRAME_SDP_V1_3_HB3_VERSION_SHIFT; + + sdp->db.db0 = NVT_VIDEO_INFOFRAME_VERSION_1; + sdp->db.db1 = sizeof(NVT_HDR_INFOFRAME_PAYLOAD); + + nvAssert(sizeof(NVT_HDR_INFOFRAME_PAYLOAD) <= (sizeof(sdp->db) - 2)); + + if (pHeadState->hdrInfoFrame.state == NVKMS_HDR_INFOFRAME_STATE_ENABLED) { + NVT_HDR_INFOFRAME_PAYLOAD *payload = + (NVT_HDR_INFOFRAME_PAYLOAD *) &sdp->db.db2; + + payload->eotf = pHeadState->hdrInfoFrame.eotf; + + payload->static_metadata_desc_id = NVT_CEA861_STATIC_METADATA_SM0; + + // payload->type1 = static metadata + nvAssert(sizeof(NVT_HDR_INFOFRAME_MASTERING_DATA) == + (sizeof(struct NvKmsHDRStaticMetadata))); + nvkms_memcpy(&payload->type1, + &pHeadState->hdrInfoFrame.staticMetadata, + sizeof(NVT_HDR_INFOFRAME_MASTERING_DATA)); + } else if (pHeadState->hdrInfoFrame.state == + NVKMS_HDR_INFOFRAME_STATE_TRANSITIONING) { + nvDpyAssignSDRInfoFramePayload((NVT_HDR_INFOFRAME_PAYLOAD *) &sdp->db.db2); + } else { + nvAssert(pHeadState->hdrInfoFrame.state == NVKMS_HDR_INFOFRAME_STATE_DISABLED); + + nvDpyAssignSDRInfoFramePayload((NVT_HDR_INFOFRAME_PAYLOAD *) &sdp->db.db2); + } + + sdp->dataSize = sizeof(NVT_HDR_INFOFRAME_PAYLOAD) + 2; +} + +static void UpdateDpHDRInfoFrame(const NVDispEvoRec *pDispEvo, const NvU32 head) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + DPSDP_DESCRIPTOR sdp = { }; + NvEvoInfoFrameTransmitControl transmitCtrl = + NV_EVO_INFOFRAME_TRANSMIT_CONTROL_INIT; + + ConstructHdrInfoFrameSdp(pDispEvo, head, &sdp); + + switch (pHeadState->hdrInfoFrame.state) { + case NVKMS_HDR_INFOFRAME_STATE_DISABLED: + transmitCtrl = NV_EVO_INFOFRAME_TRANSMIT_CONTROL_SINGLE_FRAME; + break; + case NVKMS_HDR_INFOFRAME_STATE_ENABLED: + case NVKMS_HDR_INFOFRAME_STATE_TRANSITIONING: + transmitCtrl = NV_EVO_INFOFRAME_TRANSMIT_CONTROL_EVERY_FRAME; + break; + } + + pDevEvo->hal->SendDpInfoFrameSdp(pDispEvo, head, transmitCtrl, &sdp); +} + +void nvConstructDpVscSdp(const NVDispHeadInfoFrameStateEvoRec *pInfoFrame, + const NVDpyAttributeColor *pDpyColor, + DPSDP_DP_VSC_SDP_DESCRIPTOR *sdp) +{ + nvkms_memset(sdp, 0, sizeof(*sdp)); + + // Header + // Per DP1.3 spec + sdp->hb.hb0 = 0; + sdp->hb.hb1 = SDP_PACKET_TYPE_VSC; + sdp->hb.revisionNumber = SDP_VSC_REVNUM_STEREO_PSR2_COLOR; + sdp->hb.numValidDataBytes = SDP_VSC_VALID_DATA_BYTES_PSR2_COLOR; + + sdp->db.stereoInterface = 0; + sdp->db.psrState = 0; + sdp->db.contentType = SDP_VSC_CONTENT_TYPE_GRAPHICS; + switch (pDpyColor->format) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + sdp->db.pixEncoding = SDP_VSC_PIX_ENC_RGB; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + sdp->db.pixEncoding = SDP_VSC_PIX_ENC_YCBCR444; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + sdp->db.pixEncoding = SDP_VSC_PIX_ENC_YCBCR422; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420: + sdp->db.pixEncoding = SDP_VSC_PIX_ENC_YCBCR420; + break; + default: + nvAssert(!"unrecognized color format"); + break; + } + + switch (pDpyColor->format) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + switch (pDpyColor->colorimetry) { + case NVKMS_OUTPUT_COLORIMETRY_BT2100: + sdp->db.colorimetryFormat = + SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_ITU_R_BT2020_RGB; + break; + case NVKMS_OUTPUT_COLORIMETRY_DEFAULT: + sdp->db.colorimetryFormat = + SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_SRGB; + break; + } + + switch (pDpyColor->bpc) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10: + sdp->db.bitDepth = SDP_VSC_BIT_DEPTH_RGB_10BPC; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8: + sdp->db.bitDepth = SDP_VSC_BIT_DEPTH_RGB_8BPC; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6: + sdp->db.bitDepth = SDP_VSC_BIT_DEPTH_RGB_6BPC; + break; + default: + nvAssert(!"Invalid bpc value for RBG format"); + break; + } + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420: + switch (pDpyColor->colorimetry) { + case NVKMS_OUTPUT_COLORIMETRY_BT2100: + sdp->db.colorimetryFormat = + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ITU_R_BT2020_YCBCR; + break; + case NVKMS_OUTPUT_COLORIMETRY_DEFAULT: + sdp->db.colorimetryFormat = + (pInfoFrame->hdTimings ? + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ITU_R_BT709 : + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ITU_R_BT601); + break; + } + + switch (pDpyColor->bpc) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10: + sdp->db.bitDepth = SDP_VSC_BIT_DEPTH_YCBCR_10BPC; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8: + sdp->db.bitDepth = SDP_VSC_BIT_DEPTH_YCBCR_8BPC; + break; + default: + nvAssert(!"Invalid bpc value for YUV color format"); + break; + } + break; + default: + nvAssert(!"unrecognized color format"); + break; + } + + switch (pDpyColor->range) { + case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL: + sdp->db.dynamicRange = SDP_VSC_DYNAMIC_RANGE_VESA; + break; + case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED: + sdp->db.dynamicRange = SDP_VSC_DYNAMIC_RANGE_CEA; + break; + default: + nvAssert(!"Invalid colorRange value"); + break; + } +} + +/* + * Construct the DP 1.3 VSC SDP infoframe, and toggle it on or off based on + * whether or not YUV420 mode or BT2100 colorimetry is in use. + */ +static void UpdateDpVscSdpInfoFrame( + const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVDpyAttributeColor *pDpyColor, + const NVDispHeadInfoFrameStateEvoRec *pInfoFrame) +{ + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS params = { 0 }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = pHeadState->activeRmId; + + if ((pDpyColor->format == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420) || + (pDpyColor->colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100)) { + + // DPSDP_DP_VSC_SDP_DESCRIPTOR has a (dataSize, hb, db) layout, while + // NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS.aPacket needs to contain + // (hb, db) without dataSize, so this makes sdp->hb align with aPacket. + DPSDP_DP_VSC_SDP_DESCRIPTOR *sdp = + (DPSDP_DP_VSC_SDP_DESCRIPTOR *)(params.aPacket - + offsetof(DPSDP_DP_VSC_SDP_DESCRIPTOR, hb)); + + nvAssert((void *)&sdp->hb == (void *)params.aPacket); + + nvConstructDpVscSdp(pInfoFrame, pDpyColor, sdp); + + params.packetSize = sizeof(sdp->hb) + sdp->hb.numValidDataBytes; + + params.transmitControl = + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _ENABLE, _YES) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _OTHER_FRAME, _DISABLE) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _SINGLE_FRAME, _DISABLE) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _ON_HBLANK, _DISABLE); + } else { + params.transmitControl = + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _ENABLE, _NO); + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET failed"); + } +} + +static void UpdateDpInfoFrames(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVDpyAttributeColor *pDpyColor, + const NVDispHeadInfoFrameStateEvoRec *pInfoFrame) +{ + UpdateDpHDRInfoFrame(pDispEvo, head); + + UpdateDpVscSdpInfoFrame(pDispEvo, head, pDpyColor, pInfoFrame); +} + +void nvCancelSDRTransitionTimer(NVDpyEvoRec *pDpyEvo) +{ + nvkms_free_timer(pDpyEvo->hdrToSdrTransitionTimer); + pDpyEvo->hdrToSdrTransitionTimer = NULL; +} + +static void SDRTransition(void *dataPtr, NvU32 dataU32) +{ + NvU32 head; + NVDpyEvoRec *pDpyEvo = dataPtr; + NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[pDpyEvo->apiHead]; + + nvCancelSDRTransitionTimer(pDpyEvo); + + nvAssert(pApiHeadState->hwHeadsMask != 0); + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + nvAssert(pHeadState->hdrInfoFrame.state == + NVKMS_HDR_INFOFRAME_STATE_TRANSITIONING); + pHeadState->hdrInfoFrame.state = NVKMS_HDR_INFOFRAME_STATE_DISABLED; + } + + nvUpdateInfoFrames(pDpyEvo); +} + +static +void ScheduleSDRTransitionTimer(NVDpyEvoRec *pDpyEvo) +{ + if (pDpyEvo->hdrToSdrTransitionTimer) { + return; + } + + pDpyEvo->hdrToSdrTransitionTimer = + nvkms_alloc_timer(SDRTransition, + pDpyEvo, + 0, + 2000000 /* 2 seconds */); +} + +void nvUpdateInfoFrames(NVDpyEvoRec *pDpyEvo) +{ + NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + const NVDispApiHeadStateEvoRec *pApiHeadState; + const NVDispHeadStateEvoRec *pHeadState; + NvU32 head; + + if (pDpyEvo->apiHead == NV_INVALID_HEAD) { + return; + } + pApiHeadState = &pDispEvo->apiHeadState[pDpyEvo->apiHead]; + + nvAssert((pApiHeadState->hwHeadsMask) != 0x0 && + (nvDpyIdIsInDpyIdList(pDpyEvo->id, pApiHeadState->activeDpys))); + + head = nvGetPrimaryHwHead(pDispEvo, pDpyEvo->apiHead); + + nvAssert(head != NV_INVALID_HEAD); + + pHeadState = &pDispEvo->headState[head]; + + if (nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + UpdateDpInfoFrames(pDispEvo, + head, + &pApiHeadState->attributes.color, + &pApiHeadState->infoFrame); + } else { + nvUpdateHdmiInfoFrames(pDispEvo, + head, + &pApiHeadState->attributes.color, + &pApiHeadState->infoFrame, + pDpyEvo); + } + + if (pHeadState->hdrInfoFrame.state == NVKMS_HDR_INFOFRAME_STATE_ENABLED) { + nvCancelSDRTransitionTimer(pDpyEvo); + } else if (pHeadState->hdrInfoFrame.state == + NVKMS_HDR_INFOFRAME_STATE_TRANSITIONING) { + ScheduleSDRTransitionTimer(pDpyEvo); + } +} + +/*! + * nvDpyRequiresDualLinkEvo() - Returns whether or not the given mode exceeds + * the maximum single TMDS link pixel clock. + * + * \param[in] pDpyEvo display to check the maximum single link pixel clock + * + * \param[in] pTimings mode timings to check pixel clock + * + * \return TRUE if pixel clock exceeds display's maximum single link pixel + * clock + */ +NvBool nvDpyRequiresDualLinkEvo(const NVDpyEvoRec *pDpyEvo, + const NVHwModeTimingsEvo *pTimings) +{ + const NvU32 pixelClock = (pTimings->yuv420Mode == NV_YUV420_MODE_HW) ? + (pTimings->pixelClock / 2) : pTimings->pixelClock; + + // Dual link HDMI is not possible. + nvAssert(!(nvDpyIsHdmiEvo(pDpyEvo) && + (pixelClock > pDpyEvo->maxSingleLinkPixelClockKHz))); + return (pixelClock > pDpyEvo->maxSingleLinkPixelClockKHz); +} + +/*! + * Return the NVDpyEvoPtr that corresponds to the given dpyId, on the + * given NVDispEvoPtr, or NULL if no matching NVDpyEvoPtr can be + * found. + */ +NVDpyEvoPtr nvGetDpyEvoFromDispEvo(const NVDispEvoRec *pDispEvo, NVDpyId dpyId) +{ + NVDpyEvoPtr pDpyEvo; + + FOR_ALL_EVO_DPYS(pDpyEvo, nvAddDpyIdToEmptyDpyIdList(dpyId), pDispEvo) { + return pDpyEvo; + } + + return NULL; +} + +/* + * Find or create a pDpy with a given root connector and topology path. + */ +NVDpyEvoPtr nvGetDPMSTDpyEvo(NVConnectorEvoPtr pConnectorEvo, + const char *address, NvBool *pDynamicDpyCreated) +{ + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + NVDpyEvoPtr pDpyEvo = NULL, pTmpDpyEvo; + NVDpyId dpyId; + + // Look for a pDpyEvo on pConnectorEvo whose dp address matches. + FOR_ALL_EVO_DPYS(pTmpDpyEvo, pDispEvo->validDisplays, pDispEvo) { + if (pTmpDpyEvo->pConnectorEvo != pConnectorEvo) { + continue; + } + if (pTmpDpyEvo->dp.addressString == NULL) { + continue; + } + if (nvkms_strcmp(pTmpDpyEvo->dp.addressString, address) == 0) { + pDpyEvo = pTmpDpyEvo; + goto done; + } + } + + // Find a display ID that is not used on this GPU. + dpyId = nvNewDpyId(pDispEvo->validDisplays); + if (nvDpyIdIsInvalid(dpyId)) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to allocate a display ID for device %s.%s", + pConnectorEvo->name, + address); + goto done; + } + + // Create a new pDpy for this address. + pDpyEvo = nvAllocDpyEvo(pDispEvo, pConnectorEvo, dpyId, address); + if (pDpyEvo == NULL) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to create a display device object for %s-%u.%s", + NvKmsConnectorTypeString(pConnectorEvo->type), + pConnectorEvo->typeIndex, + address); + goto done; + } + + pDispEvo->validDisplays = + nvAddDpyIdToDpyIdList(dpyId, pDispEvo->validDisplays); + + *pDynamicDpyCreated = TRUE; + +done: + return pDpyEvo; +} + +/*! + * Return a string with a comma-separated list of dpy names, for all + * dpys in dpyIdList. + * + * If there are no dpys in the dpyIdList, return "none". + * + * The string is dynamically allocated and should be freed by the caller. + * + * Return NULL if an allocation failure occurs. + */ +char *nvGetDpyIdListStringEvo(NVDispEvoPtr pDispEvo, + const NVDpyIdList dpyIdList) +{ + NVDpyEvoPtr pDpyEvo; + char *listString = NULL; + NvU32 lengths[NV_DPY_ID_MAX_DPYS_IN_LIST]; + NvU32 totalLength = 0; + NvU32 currentOffset; + NvU32 index; + + index = 0; + FOR_ALL_EVO_DPYS(pDpyEvo, dpyIdList, pDispEvo) { + + nvAssert(index < ARRAY_LEN(lengths)); + + lengths[index] = nvkms_strlen(pDpyEvo->name); + + totalLength += lengths[index]; + + if (index != 0) { + totalLength += 2; /* nvkms_strlen(", ") */ + } + + index++; + } + + totalLength += 1; /* for nul terminator */ + + if (index == 0) { + return nvStrDup("none"); + } + + listString = nvAlloc(totalLength); + + if (listString == NULL) { + return NULL; + } + + index = 0; + currentOffset = 0; + + FOR_ALL_EVO_DPYS(pDpyEvo, dpyIdList, pDispEvo) { + + if (index != 0) { + listString[currentOffset] = ','; + listString[currentOffset+1] = ' '; + currentOffset += 2; + } + + nvkms_memcpy(listString + currentOffset, pDpyEvo->name, lengths[index]); + + currentOffset += lengths[index]; + + index++; + } + + listString[currentOffset] = '\0'; + currentOffset += 1; + + nvAssert(currentOffset == totalLength); + + return listString; +} + +NvBool nvDpyGetDynamicData( + NVDpyEvoPtr pDpyEvo, + struct NvKmsQueryDpyDynamicDataParams *pParams) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + struct NvKmsQueryDpyDynamicDataRequest *pRequest = &pParams->request; + struct NvKmsQueryDpyDynamicDataReply *pReply = &pParams->reply; + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + NVDpyIdList connectedList; + NVDpyIdList oneDpyIdList = nvAddDpyIdToEmptyDpyIdList(pDpyEvo->id); + NVDpyOverridePtr pDpyOverride = nvDpyEvoGetOverride(pDpyEvo); + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + if (pDpyOverride != NULL) { + if (pDpyOverride->connected && !pRequest->forceDisconnected) { + /* + * If display is overridden as connected, treat the request as if it + * had both forceConnected and overrideEdid set, unless the request + * had forceDisconnected set. + * + * If the request already had an EDID override, honor that EDID instead + * of the display override EDID. + */ + NvBool old = pRequest->forceConnected; + pRequest->forceConnected = TRUE; + + if (!pRequest->overrideEdid) { + size_t len = nvReadDpyOverrideEdid(pDpyOverride, + pRequest->edid.buffer, + ARRAY_LEN(pRequest->edid.buffer)); + + if (len != 0) { + pRequest->overrideEdid = TRUE; + pRequest->edid.bufferSize = len; + } else { + pRequest->forceConnected = old; + } + } + } else if (!pDpyOverride->connected && !pRequest->forceConnected) { + /* + * If display is overriden as disconnected, treat the request as if it + * had forceDisconnected set, unless the request had forceConnected set. + */ + pRequest->forceDisconnected = TRUE; + } + } + + /* + * Check for the connection state of the dpy. + * + * For DP MST, we need to honor the current DPlib state; if a DP + * MST monitor is physically connected but forceDisconnected, its + * hotplug events won't get serviced and DPlib will complain + * loudly. This doesn't apply to DP serializer (which is not managed + * by DPLib) since we don't need to do any topology/branch detection, + * and we can honor force{Connected,Disconnected} in MST & SST mode. + * + * Otherwise, allow the client to override detection. + * + * Otherwise, honor the current DPlib state. + * + * If we're using a DP serializer connector in MST mode, don't expose any + * SST displays as connected. In all other cases, assume that everything + * is connected since the serializer connector has a fixed topology. + * + * Lastly, call RM to check if the dpy is connected. + */ + + if (nvDpyEvoIsDPMST(pDpyEvo) && + nvConnectorUsesDPLib(pConnectorEvo)) { + /* honor DP MST connectedness */ + connectedList = nvDPLibDpyIsConnected(pDpyEvo) ? + oneDpyIdList : nvEmptyDpyIdList(); + } else if (pRequest->forceConnected) { + connectedList = oneDpyIdList; + } else if (pRequest->forceDisconnected) { + connectedList = nvEmptyDpyIdList(); + } else if (nvConnectorUsesDPLib(pConnectorEvo)) { + connectedList = nvDPLibDpyIsConnected(pDpyEvo) ? + oneDpyIdList : nvEmptyDpyIdList(); + } else if (nvConnectorIsDPSerializer(pConnectorEvo)) { + if (pConnectorEvo->dpSerializerCaps.supportsMST && + !nvDpyEvoIsDPMST(pDpyEvo)) { + connectedList = nvEmptyDpyIdList(); + } else { + connectedList = oneDpyIdList; + } + } else { + connectedList = nvRmGetConnectedDpys(pDispEvo, oneDpyIdList); + } + + pDpyEvo->dp.inbandStereoSignaling = pRequest->dpInbandStereoSignaling; + + /* + * XXX NVKMS TODO: once NVKMS is in the kernel and + * nvAllocCoreChannelEvo() is guaranteed to happen before + * nvDpyGetDynamicData(), pass allowDVISpecPClkOverride through to + * nvDpyProbeMaxPixelClock() rather than cache it. + */ + pDpyEvo->allowDVISpecPClkOverride = pRequest->allowDVISpecPClkOverride; + + if (nvDpyIdIsInDpyIdList(pDpyEvo->id, connectedList)) { + if (!DpyConnectEvo(pDpyEvo, pParams)) { + return FALSE; + } + } else { + DpyDisconnectEvo(pDpyEvo, TRUE /* bSendHdmiCapsToRm */); + } + + if (nvConnectorUsesDPLib(pConnectorEvo)) { + nvDPLibUpdateDpyLinkConfiguration(pDpyEvo); + } + + ct_assert(sizeof(pDpyEvo->name) == sizeof(pReply->name)); + + nvkms_memcpy(pReply->name, pDpyEvo->name, sizeof(pDpyEvo->name)); + + if (pDpyEvo->parsedEdid.valid) { + pReply->physicalDimensions.heightInCM = + pDpyEvo->parsedEdid.info.screen_size_y; + pReply->physicalDimensions.widthInCM = + pDpyEvo->parsedEdid.info.screen_size_x; + } + + /* + * XXX NVKMS until NVKMS is in the kernel and + * nvAllocCoreChannelEvo() is guaranteed to happen before + * nvDpyGetDynamicData(), pDpyEvo->maxPixelClockKHz could change + * later after the assignment here. + */ + pReply->maxPixelClockKHz = pDpyEvo->maxPixelClockKHz; + + pReply->connected = + nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->connectedDisplays); + + pReply->isVirtualRealityHeadMountedDisplay = pDpyEvo->isVrHmd; + + pReply->vrrType = pDpyEvo->vrr.type; + pReply->supportsHDR = nvDpyIsHDRCapable(pDpyEvo); + + pReply->stereo3DVision.supported = pDpyEvo->stereo3DVision.supported; + pReply->stereo3DVision.isDLP = pDpyEvo->stereo3DVision.isDLP; + pReply->stereo3DVision.isAegis = pDpyEvo->stereo3DVision.isAegis; + pReply->stereo3DVision.subType = pDpyEvo->stereo3DVision.subType; + + pReply->dp.guid.valid = pDpyEvo->dp.guid.valid; + + ct_assert(sizeof(pReply->dp.guid.buffer) == + sizeof(pDpyEvo->dp.guid.buffer)); + nvkms_memcpy(pReply->dp.guid.buffer, pDpyEvo->dp.guid.buffer, + sizeof(pDpyEvo->dp.guid.buffer)); + + ct_assert(sizeof(pReply->dp.guid.str) == sizeof(pDpyEvo->dp.guid.str)); + nvkms_memcpy(pReply->dp.guid.str, pDpyEvo->dp.guid.str, + sizeof(pDpyEvo->dp.guid.str)); + + if (pDpyEvo->edid.length > sizeof(pReply->edid.buffer)) { + nvAssert(!"EDID larger than can be returned in NVKMS API"); + return FALSE; + } + + if (pDpyEvo->edid.length > 0) { + pReply->edid.bufferSize = pDpyEvo->edid.length; + nvkms_memcpy(pReply->edid.buffer, pDpyEvo->edid.buffer, pDpyEvo->edid.length); + } + + pReply->supportedOutputColorFormats = + nvDpyGetOutputColorFormatInfo(pDpyEvo); + + return TRUE; +} + +void nvDpyUpdateCurrentAttributes(NVDpyEvoRec *pDpyEvo) +{ + NVAttributesSetEvoRec newAttributes = pDpyEvo->currentAttributes; + + if (pDpyEvo->apiHead != NV_INVALID_HEAD) { + newAttributes = + pDpyEvo->pDispEvo->apiHeadState[pDpyEvo->apiHead].attributes; + } else { + newAttributes.dithering.enabled = FALSE; + newAttributes.dithering.depth = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_NONE; + newAttributes.dithering.mode = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_NONE; + newAttributes.digitalSignal = + nvGetDefaultDpyAttributeDigitalSignalValue(pDpyEvo->pConnectorEvo); + newAttributes.numberOfHardwareHeadsUsed = 0; + } + + if (newAttributes.color.format != + pDpyEvo->currentAttributes.color.format) { + nvSendDpyAttributeChangedEventEvo( + pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE, + newAttributes.color.format); + } + + if (newAttributes.color.range != + pDpyEvo->currentAttributes.color.range) { + nvSendDpyAttributeChangedEventEvo( + pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_RANGE, + newAttributes.color.range); + } + + if (newAttributes.dithering.enabled != + pDpyEvo->currentAttributes.dithering.enabled) { + nvSendDpyAttributeChangedEventEvo( + pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING, + newAttributes.dithering.enabled); + } + + if (newAttributes.dithering.depth != + pDpyEvo->currentAttributes.dithering.depth) { + nvSendDpyAttributeChangedEventEvo( + pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH, + newAttributes.dithering.depth); + } + + if (newAttributes.dithering.mode != + pDpyEvo->currentAttributes.dithering.mode) { + nvSendDpyAttributeChangedEventEvo( + pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE, + newAttributes.dithering.mode); + } + + if (newAttributes.imageSharpening.available != + pDpyEvo->currentAttributes.imageSharpening.available) { + nvSendDpyAttributeChangedEventEvo( + pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_IMAGE_SHARPENING_AVAILABLE, + newAttributes.imageSharpening.available); + } + + if (newAttributes.digitalSignal != + pDpyEvo->currentAttributes.digitalSignal) { + nvSendDpyAttributeChangedEventEvo( + pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL, + newAttributes.digitalSignal); + } + + if (newAttributes.numberOfHardwareHeadsUsed != + pDpyEvo->currentAttributes.numberOfHardwareHeadsUsed) { + nvSendDpyAttributeChangedEventEvo( + pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_NUMBER_OF_HARDWARE_HEADS_USED, + newAttributes.numberOfHardwareHeadsUsed); + } + + pDpyEvo->currentAttributes = newAttributes; +} + +// Returns TRUE if this display is capable of Adaptive-Sync +NvBool nvDpyIsAdaptiveSync(const NVDpyEvoRec *pDpyEvo) +{ + return nvIsAdaptiveSyncDpyVrrType(pDpyEvo->vrr.type); +} + +// Returns TRUE if this display is in the Adaptive-Sync defaultlist +NvBool nvDpyIsAdaptiveSyncDefaultlisted(const NVDpyEvoRec *pDpyEvo) +{ + NV0073_CTRL_SPECIFIC_DEFAULT_ADAPTIVESYNC_DISPLAY_PARAMS params = { }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + if (!pDpyEvo->parsedEdid.valid) { + return FALSE; + } + + params.manufacturerID = pDpyEvo->parsedEdid.info.manuf_id; + params.productID = pDpyEvo->parsedEdid.info.product_id; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_DEFAULT_ADAPTIVESYNC_DISPLAY, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to query default adaptivesync listing for %s", pDpyEvo->name); + return FALSE; + } + + return params.bDefaultAdaptivesync; +} + +static enum NvKmsDpyAttributeColorBpcValue GetYuv422MaxBpc( + const NVDpyEvoRec *pDpyEvo) +{ + const NVT_EDID_CEA861_INFO *p861Info = + &pDpyEvo->parsedEdid.info.ext861; + + nvAssert(nvDpyIsHdmiEvo(pDpyEvo) || + nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)); + + if (!pDpyEvo->parsedEdid.valid || + !pDpyEvo->parsedEdid.info.input.isDigital) { + return NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN; + } + + if (pDpyEvo->parsedEdid.info.version >= NVT_EDID_VER_1_4) { + if (pDpyEvo->parsedEdid.info.u.feature_ver_1_4_digital.support_ycrcb_422) { + if (pDpyEvo->parsedEdid.info.input.u.digital.bpc >= 10) { + return NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10; + } else if (pDpyEvo->parsedEdid.info.input.u.digital.bpc >= 8) { + return NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8; + } + } + } else { + nvAssert(!nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)); + + if (p861Info->revision >= NVT_CEA861_REV_A && + !!(p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_422)) { + return NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10; + } + } + + return NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN; +} + +NvKmsDpyOutputColorFormatInfo nvDpyGetOutputColorFormatInfo( + const NVDpyEvoRec *pDpyEvo) +{ + const NVConnectorEvoRec *pConnectorEvo = + pDpyEvo->pConnectorEvo; + NvKmsDpyOutputColorFormatInfo colorFormatsInfo = { }; + + if (pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT) { + + colorFormatsInfo.rgb444.maxBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10; + colorFormatsInfo.rgb444.minBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10; + + } else if (pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) { + + if (pConnectorEvo->signalFormat == + NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI) { + + if (pDpyEvo->parsedEdid.valid) { + switch (pDpyEvo->parsedEdid.info.input.u.digital.bpc) { + case 10: + colorFormatsInfo.rgb444.maxBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10; + break; + case 6: + colorFormatsInfo.rgb444.maxBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6; + break; + default: + nvAssert(!"Unsupported bpc for DSI"); + // fall through + case 8: + colorFormatsInfo.rgb444.maxBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8; + break; + } + + colorFormatsInfo.rgb444.minBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6; + } else { + colorFormatsInfo.rgb444.maxBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8; + colorFormatsInfo.rgb444.minBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8; + } + } else if (nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + + if (pDpyEvo->parsedEdid.valid && + pDpyEvo->parsedEdid.info.input.isDigital && + pDpyEvo->parsedEdid.info.version >= NVT_EDID_VER_1_4) { + if (pDpyEvo->parsedEdid.info.input.u.digital.bpc >= 10) { + colorFormatsInfo.rgb444.maxBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10; + colorFormatsInfo.yuv444.maxBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10; + } else if (pDpyEvo->parsedEdid.info.input.u.digital.bpc < 8) { + colorFormatsInfo.rgb444.maxBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6; + colorFormatsInfo.yuv444.maxBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN; + } else { + colorFormatsInfo.rgb444.maxBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8; + colorFormatsInfo.yuv444.maxBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8; + } + + colorFormatsInfo.rgb444.minBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6; + if (colorFormatsInfo.yuv444.maxBpc != + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN) { + colorFormatsInfo.yuv444.minBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8; + } else { + colorFormatsInfo.yuv444.minBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN; + } + + colorFormatsInfo.yuv422.maxBpc = GetYuv422MaxBpc(pDpyEvo); + if (colorFormatsInfo.yuv422.maxBpc != + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN) { + colorFormatsInfo.yuv422.minBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8; + } else { + colorFormatsInfo.yuv422.minBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN; + } + } else { + colorFormatsInfo.rgb444.maxBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8; + colorFormatsInfo.rgb444.minBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8; + } + } else { + colorFormatsInfo.rgb444.maxBpc = + nvDpyIsHdmiDepth30Evo(pDpyEvo) ? + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10 : + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8; + colorFormatsInfo.rgb444.minBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8; + + if (nvDpyIsHdmiEvo(pDpyEvo)) { + colorFormatsInfo.yuv444.maxBpc = + nvDpyIsHdmiDepth30Evo(pDpyEvo) ? + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10 : + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8; + colorFormatsInfo.yuv444.minBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8; + + colorFormatsInfo.yuv422.maxBpc = GetYuv422MaxBpc(pDpyEvo); + if (colorFormatsInfo.yuv422.maxBpc != + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN) { + colorFormatsInfo.yuv422.minBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8; + } else { + colorFormatsInfo.yuv422.minBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN; + } + } + } + } + + switch (nvkms_debug_force_color_space()) { + case NVKMS_DEBUG_FORCE_COLOR_SPACE_RGB: + colorFormatsInfo.yuv444.maxBpc = + colorFormatsInfo.yuv444.minBpc = + colorFormatsInfo.yuv422.maxBpc = + colorFormatsInfo.yuv422.minBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN; + break; + case NVKMS_DEBUG_FORCE_COLOR_SPACE_YUV444: + colorFormatsInfo.rgb444.maxBpc = + colorFormatsInfo.rgb444.minBpc = + colorFormatsInfo.yuv422.maxBpc = + colorFormatsInfo.yuv422.minBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN; + break; + case NVKMS_DEBUG_FORCE_COLOR_SPACE_YUV422: + colorFormatsInfo.rgb444.maxBpc = + colorFormatsInfo.rgb444.minBpc = + colorFormatsInfo.yuv444.maxBpc = + colorFormatsInfo.yuv444.minBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN; + break; + default: + nvAssert(!"Unrecognzed debug_force_color_space value"); + // fallthrough + case NVKMS_DEBUG_FORCE_COLOR_SPACE_NONE: + break; + } + + return colorFormatsInfo; +} + +NvU32 nvDpyGetPossibleApiHeadsMask(const NVDpyEvoRec *pDpyEvo) +{ + NvU32 possibleApiHeadMask = 0x0; + NvU32 possibleNumLayers = NVKMS_MAX_LAYERS_PER_HEAD; + const NVDevEvoRec *pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + + /* + * DSI supports only the hardware head-0 assigment, and the + * dp-serializer dpys are bound to the specific hardware head; + * the modeset client can be allowed to choose only those + * api-heads to drive these dpys which has the number of layers + * less than or equal to the number of layers supported by the + * bound hardware heads. + */ + if (pDpyEvo->pConnectorEvo->signalFormat == + NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI) { + possibleNumLayers = pDevEvo->head[0].numLayers; + } else if (nvConnectorIsDPSerializer(pDpyEvo->pConnectorEvo)) { + const NvU32 boundHead = pDpyEvo->dp.serializerStreamIndex; + possibleNumLayers = pDevEvo->head[boundHead].numLayers; + } + + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + if (pDevEvo->apiHead[apiHead].numLayers <= possibleNumLayers) { + possibleApiHeadMask |= NVBIT(apiHead); + } + } + + return possibleApiHeadMask; +} + +NvBool nvDpyIsHDRCapable(const NVDpyEvoRec *pDpyEvo) +{ + const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + const NVT_EDID_INFO *pInfo = &pDpyEvo->parsedEdid.info; + const NVT_HDR_STATIC_METADATA *pHdrInfo = + &pInfo->hdr_static_metadata_info; + + // Only supported on DP 1.3+ or HDMI + if (nvDpyUsesDPLib(pDpyEvo)) { + unsigned int major; + unsigned int minor; + + if(!pDevEvo->caps.supportsDP13) { + return FALSE; + } + + if (!nvDPDpyGetDpcdRevision(pDpyEvo, &major, &minor)) { + return FALSE; + } + + nvAssert(major >= 1); + if ((major == 1) && (minor < 3)) { + return FALSE; + } + } else if (!nvDpyIsHdmiEvo(pDpyEvo)) { + return FALSE; + } + + if (!pDpyEvo->parsedEdid.valid) { + return FALSE; + } + + /* + * XXX HDR is not supported with HDMI 3D due to both using VSI + * infoframes. + */ + if (pInfo->HDMI3DSupported) { + return FALSE; + } + + // Sink should support ST2084 EOTF. + if (!pHdrInfo->supported_eotf.smpte_st_2084_eotf) { + return FALSE; + } + + /* + * Sink should support static metadata type1. Nvtiming sets + * static_metadata_type to 1 if the sink supports static metadata type1. + */ + if (pHdrInfo->static_metadata_type != 1) { + return FALSE; + } + + return TRUE; +} diff --git a/src/nvidia-modeset/src/nvkms-event.c b/src/nvidia-modeset/src/nvkms-event.c new file mode 100644 index 0000000..a2285ba --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-event.c @@ -0,0 +1,207 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvos.h" +#include "dp/nvdp-connector.h" +#include "nvkms-event.h" +#include "nvkms-rm.h" +#include "nvkms-types.h" +#include "nvkms-dpy.h" +#include "nvkms-rmapi.h" +#include "nvkms-utils.h" +#include "nvkms-private.h" +#include "nvkms-evo.h" + +/* + * Handle a display device hotplug event. + * + * What "hotplug" means is unclear, but it could mean any of the following: + * - A display device is plugged in. + * - A display device is unlugged. + * - A display device was unplugged and then plugged back in. + * - A display device was plugged in and then unplugged. + * - An already connected display device is turned on. + * - An already connected display device is turned off. + * - A DisplayPort device needs its link status and RX Capabilities fields + * read and may need to be retrained ("long" hotplug event, > 2ms). + * + * DisplayPort "short" hotplug events, which are between 0.25ms and 2ms, are + * handled separately by nvHandleDPIRQEventDeferredWork below. + */ + +void +nvHandleHotplugEventDeferredWork(void *dataPtr, NvU32 dataU32) +{ + NVDispEvoPtr pDispEvo = dataPtr; + NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS hotplugParams = { 0 }; + NvU32 ret; + NVDpyIdList hotplugged, unplugged, tmpUnplugged, changed; + NVDpyIdList connectedDisplays; + NVDpyEvoPtr pDpyEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + // Get the hotplug state. + hotplugParams.subDeviceInstance = pDispEvo->displayOwner; + + if ((ret = nvRmApiControl( + nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_UNPLUG_STATE, + &hotplugParams, + sizeof(hotplugParams))) + != NVOS_STATUS_SUCCESS) { + + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, "Failed to determine which " + "devices were hotplugged: 0x%x\n", ret); + return; + } + + /* + * Work around an RM bug in hotplug notification when the GPU is in + * GC6. In this case, the RM will notify us of a hotplug event, but + * NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_UNPLUG_STATE returns both + * hotPlugMask and hotUnplugMask as 0. + * Bug 200528641 tracks finding a root cause. Until that bug is + * fixed, call NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE to get the + * full list of connected dpys and construct hotplugged and + * unplugged lists from that if we encounter this case. + */ + if ((hotplugParams.hotPlugMask == 0) && + (hotplugParams.hotUnplugMask == 0)) { + const NVDpyIdList updatedDisplayList = nvRmGetConnectedDpys(pDispEvo, + pDispEvo->connectorIds); + hotplugged = nvDpyIdListMinusDpyIdList(updatedDisplayList, + pDispEvo->connectedDisplays); + unplugged = nvDpyIdListMinusDpyIdList(pDispEvo->connectedDisplays, + updatedDisplayList); + } else { + hotplugged = nvNvU32ToDpyIdList(hotplugParams.hotPlugMask); + unplugged = nvNvU32ToDpyIdList(hotplugParams.hotUnplugMask); + } + + // The RM only reports the latest plug/unplug status of each dpy. + nvAssert(nvDpyIdListIsEmpty(nvIntersectDpyIdListAndDpyIdList(hotplugged, + unplugged))); + nvAssert(nvDpyIdListIsASubSetofDpyIdList(hotplugged, + pDispEvo->connectorIds)); + nvAssert(nvDpyIdListIsASubSetofDpyIdList(unplugged, + pDispEvo->connectorIds)); + + connectedDisplays = pDispEvo->connectedDisplays; + + // Ignore non-DP devices that were reported as unplugged while already + // disconnected. + tmpUnplugged = nvEmptyDpyIdList(); + FOR_ALL_EVO_DPYS(pDpyEvo, unplugged, pDispEvo) { + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + + if (nvConnectorUsesDPLib(pConnectorEvo) || + nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, connectedDisplays)) { + + tmpUnplugged = + nvAddDpyIdToDpyIdList(pConnectorEvo->displayId, tmpUnplugged); + } + } + unplugged = tmpUnplugged; + + // Non-DP devices that were disconnected and connected again should generate an + // unplug / plug pair. + FOR_ALL_EVO_DPYS(pDpyEvo, hotplugged, pDispEvo) { + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + + if (!nvConnectorUsesDPLib(pConnectorEvo) && + nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, connectedDisplays)) { + + unplugged = nvAddDpyIdToDpyIdList(pConnectorEvo->displayId, unplugged); + } + } + +#if defined(DEBUG) + if (!nvDpyIdListIsEmpty(hotplugged)) { + char *str = nvGetDpyIdListStringEvo(pDispEvo, hotplugged); + nvEvoLogDispDebug(pDispEvo, EVO_LOG_INFO, + "Received display hotplug event: %s", + nvSafeString(str, "unknown")); + nvFree(str); + } + if (!nvDpyIdListIsEmpty(unplugged)) { + char *str = nvGetDpyIdListStringEvo(pDispEvo, unplugged); + nvEvoLogDispDebug(pDispEvo, EVO_LOG_INFO, + "Received display unplug event: %s", + nvSafeString(str, "unknown")); + nvFree(str); + } +#endif /* DEBUG */ + + // First, the OR configuration of the connector should not change, but + // re-query it to make sure. + changed = nvAddDpyIdListToDpyIdList(hotplugged, unplugged); + FOR_ALL_EVO_DPYS(pDpyEvo, changed, pDispEvo) { + nvRmGetConnectorORInfo(pDpyEvo->pConnectorEvo, TRUE); + } + + // Next, disconnect devices that are in the unplug mask. + FOR_ALL_EVO_DPYS(pDpyEvo, unplugged, pDispEvo) { + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + + if (nvConnectorUsesDPLib(pConnectorEvo)) { + nvDPNotifyLongPulse(pConnectorEvo, FALSE); + } else { + nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DPY_CHANGED); + } + } + + // Finally, connect devices that are in the plug mask. + FOR_ALL_EVO_DPYS(pDpyEvo, hotplugged, pDispEvo) { + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + + if (nvConnectorUsesDPLib(pConnectorEvo)) { + nvDPNotifyLongPulse(pConnectorEvo, TRUE); + } else { + nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DPY_CHANGED); + } + } +} + +void +nvHandleDPIRQEventDeferredWork(void *dataPtr, NvU32 dataU32) +{ + NVDispEvoPtr pDispEvo = dataPtr; + + // XXX[AGP]: ReceiveDPIRQEvent throws away the DisplayID of the device that + // caused the event, so for now we have to poll all of the connected DP + // devices to see which ones need attention. When RM is fixed, this can be + // improved. + + NVConnectorEvoPtr pConnectorEvo; + + // Notify all connectors which are using DP lib. For DP Serializer connector, + // HPD_IRQ indicates loss of clock/sync, so re-train the link. + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (nvConnectorUsesDPLib(pConnectorEvo)) { + nvDPNotifyShortPulse(pConnectorEvo->pDpLibConnector); + } else if (nvConnectorIsDPSerializer(pConnectorEvo)) { + nvDPSerializerHandleDPIRQ(pDispEvo, pConnectorEvo); + } + } +} diff --git a/src/nvidia-modeset/src/nvkms-evo.c b/src/nvidia-modeset/src/nvkms-evo.c new file mode 100644 index 0000000..f1817a9 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-evo.c @@ -0,0 +1,10006 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-api-types.h" +#include "nvkms-types.h" + +#include "nvkms-evo-states.h" +#include "dp/nvdp-connector.h" +#include "dp/nvdp-device.h" +#include "nvkms-console-restore.h" +#include "nvkms-rm.h" +#include "nvkms-dpy.h" +#include "nvkms-cursor.h" +#include "nvkms-hal.h" +#include "nvkms-hdmi.h" +#include "nvkms-modepool.h" +#include "nvkms-evo.h" +#include "nvkms-flip.h" +#include "nvkms-hw-flip.h" +#include "nvkms-dma.h" +#include "nvkms-framelock.h" +#include "nvkms-utils.h" +#include "nvkms-lut.h" +#include "nvkms-modeset.h" +#include "nvkms-prealloc.h" +#include "nvkms-rmapi.h" +#include "nvkms-surface.h" +#include "nvkms-headsurface.h" +#include "nvkms-difr.h" +#include "nvkms-vrr.h" +#include "nvkms-ioctl.h" +#include "nvkms-setlut-workarea.h" + +#include "nvctassert.h" + +#include // NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS +#include // NV0073_CTRL_CMD_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH +#include // NV0080_CTRL_CMD_GPU_* +#include // NV0080_CTRL_OS_UNIX_VT_SWITCH_* +#include // NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_* +#include // NV5070_CTRL_CMD_GET_FRAMELOCK_HEADER_LOCKPINS +#include // NV5070_CTRL_CMD_SYSTEM_GET_CAPS_V2 +#include // NV5070_CTRL_CMD_SET_SOR_FLUSH_MODE +#include // NV0073_CTRL_DP_CTRL + +#include "nvkms.h" +#include "nvkms-private.h" +#include "nvos.h" + +#include "displayport/dpcd.h" + +#define EVO_RASTER_LOCK 1 +#define EVO_FLIP_LOCK 2 + +#define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_HEAD 7:0 +#define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_BASE_LUT 8:8 +#define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_BASE_LUT_DISABLE 0 +#define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_BASE_LUT_ENABLE 1 +#define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_OUTPUT_LUT 9:9 +#define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_OUTPUT_LUT_DISABLE 0 +#define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_OUTPUT_LUT_ENABLE 1 + +/* + * This struct is used to describe a single set of GPUs to lock together by + * GetRasterLockGroups(). + */ +typedef struct _NVEvoRasterLockGroup { + NvU32 numDisps; + NVDispEvoPtr pDispEvoOrder[NVKMS_MAX_SUBDEVICES]; +} RasterLockGroup; + +/* + * These are used hold additional state for each DispEvo during building of + * RasterLockGroups. + */ +typedef struct +{ + NVDispEvoPtr pDispEvo; + NvU32 gpuId; + RasterLockGroup *pRasterLockGroup; +} DispEntry; + +typedef struct +{ + /* Array of DispEvos and their assigned RasterLockGroups. */ + NvU32 numDisps; + DispEntry disps[NVKMS_MAX_SUBDEVICES]; +} DispEvoList; + +struct _NVLockGroup { + RasterLockGroup rasterLockGroup; + NvBool flipLockEnabled; +}; + +static void EvoSetViewportPointIn(NVDispEvoPtr pDispEvo, NvU32 head, + NvU16 x, NvU16 y, + NVEvoUpdateState *updateState); +static void GetRasterLockPin(NVDispEvoPtr pDispEvo0, NvU32 head0, + NVDispEvoPtr pDispEvo1, NvU32 head1, + NVEvoLockPin *serverPin, NVEvoLockPin *clientPin); +static NvBool EvoWaitForLock(const NVDevEvoRec *pDevEvo, const NvU32 sd, + const NvU32 head, const NvU32 type, + NvU64 *pStartTime); +static void EvoUpdateHeadParams(const NVDispEvoRec *pDispEvo, NvU32 head, + NVEvoUpdateState *updateState); + +static void SetRefClk(NVDevEvoPtr pDevEvo, + NvU32 sd, NvU32 head, NvBool external, + NVEvoUpdateState *updateState); +static NvBool ApplyLockActionIfPossible(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action); +static void FinishModesetOneDev(NVDevEvoRec *pDevEvo); +static void FinishModesetOneGroup(RasterLockGroup *pRasterLockGroup); +static void EnableFlipLockIfRequested(NVLockGroup *pLockGroup); + +static void SyncEvoLockState(void); +static void UpdateEvoLockState(void); + +static void ScheduleLutUpdate(NVDispEvoRec *pDispEvo, + const NvU32 apiHead, const NvU32 data, + const NvU64 usec); + +NVEvoGlobal nvEvoGlobal = { + .clientHandle = 0, + .frameLockList = NV_LIST_INIT(&nvEvoGlobal.frameLockList), + .devList = NV_LIST_INIT(&nvEvoGlobal.devList), +#if defined(DEBUG) + .debugMemoryAllocationList = + NV_LIST_INIT(&nvEvoGlobal.debugMemoryAllocationList), +#endif /* DEBUG */ +}; + +static RasterLockGroup *globalRasterLockGroups = NULL; +static NvU32 numGlobalRasterLockGroups = 0; + +/* + * Keep track of groups of HW heads which the modeset owner has requested to be + * fliplocked together. All of the heads specified here are guaranteed to be + * active. A given head can only be in one group at a time. Fliplock is not + * guaranteed to be enabled in the hardware for these groups. + */ +typedef struct _FlipLockRequestedGroup { + struct { + NVDispEvoPtr pDispEvo; + NvU32 flipLockHeads; + } disp[NV_MAX_SUBDEVICES]; + + NVListRec listEntry; +} FlipLockRequestedGroup; + +static NVListRec requestedFlipLockGroups = + NV_LIST_INIT(&requestedFlipLockGroups); + +/* + * The dummy infoString should be used in paths that take an + * NVEvoInfoStringPtr where we don't need to log to a + * string. By setting the 's' field to NULL, nothing will be printed + * to the infoString buffer. + */ +NVEvoInfoStringRec dummyInfoString = { + .length = 0, + .totalLength = 0, + .s = NULL, +}; + +/*! + * Return the NVDevEvoPtr, if any, that matches deviceId. + * + * If the deviceId is NVKMS_DEVICE_ID_TEGRA, then find the device with + * pDevEvo->isSOCDisplay set and use that instead. + */ +NVDevEvoPtr nvFindDevEvoByDeviceId(struct NvKmsDeviceId deviceId) +{ + NVDevEvoPtr pDevEvo; + + FOR_ALL_EVO_DEVS(pDevEvo) { + if (pDevEvo->isSOCDisplay && + (deviceId.rmDeviceId == NVKMS_DEVICE_ID_TEGRA)) { + return pDevEvo; + } else if (pDevEvo->deviceId.rmDeviceId == deviceId.rmDeviceId && + pDevEvo->deviceId.migDevice == deviceId.migDevice) { + return pDevEvo; + } + } + + return NULL; +} + +/*! + * Find the first unused gpuLogIndex. + */ +NvU8 nvGetGpuLogIndex(void) +{ + NVDevEvoPtr pDevEvo; + NvU8 gpuLogIndex = 0; + + tryAgain: + FOR_ALL_EVO_DEVS(pDevEvo) { + NvU32 sd; + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (pDevEvo->pSubDevices[sd] == NULL) { + continue; + } + if (gpuLogIndex == pDevEvo->pSubDevices[sd]->gpuLogIndex) { + gpuLogIndex++; + if (gpuLogIndex == 0xFF) { + nvAssert(!"Too many GPUs"); + return NV_INVALID_GPU_LOG_INDEX; + } + goto tryAgain; + } + } + } + + return gpuLogIndex; +} + +/*! + * Return whether there are active heads on this pDispEvo. + */ +static NvBool HasActiveHeads(NVDispEvoPtr pDispEvo) +{ + return nvGetActiveHeadMask(pDispEvo) != 0; +} + +static void EvoSetLUTContextDmaHelper(const NVDispEvoRec *pDispEvo, + const NvU32 head, + NVSurfaceEvoPtr pLutSurfEvo, + NvBool enableBaseLut, + NvBool enableOutputLut, + NVEvoUpdateState *pUpdateState, + NvBool bypassComposition); + +static void BlankHeadEvo(NVDispEvoPtr pDispEvo, const NvU32 head, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + struct NvKmsCompositionParams emptyCursorCompParams = + nvDefaultCursorCompositionParams(pDevEvo); + + /* + * If core channel surface is supported, ->SetSurface() + * disables Lut along with core channel surface. Otherwise need to disable + * Lut explicitly. + */ + if (!pDevEvo->hal->caps.supportsCoreChannelSurface) { + EvoSetLUTContextDmaHelper(pDispEvo, + head, + NULL /* pLutSurfEvo */, + FALSE /* baseLutEnabled */, + FALSE /* outputLutEnabled */, + updateState, + pHeadState->bypassComposition); + } + + nvPushEvoSubDevMaskDisp(pDispEvo); + + pDevEvo->hal->SetCursorImage(pDevEvo, + head, + NULL /* pSurfaceEvo */, + updateState, + &emptyCursorCompParams); + + { + NVFlipChannelEvoHwState hwState = { { 0 } }; + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + pDevEvo->hal->Flip(pDevEvo, + pDevEvo->head[head].layer[layer], + &hwState, + updateState, + FALSE /* bypassComposition */); + } + } + + nvPopEvoSubDevMask(pDevEvo); +} + +void nvEvoDetachConnector(NVConnectorEvoRec *pConnectorEvo, const NvU32 head, + NVEvoModesetUpdateState *pModesetUpdateState) +{ + NVEvoUpdateState *updateState = &pModesetUpdateState->updateState; + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 orIndex; + + for (orIndex = 0; + orIndex < ARRAY_LEN(pConnectorEvo->or.ownerHeadMask); orIndex++) { + if ((pConnectorEvo->or.ownerHeadMask[orIndex] & NVBIT(head)) != 0x0) { + break; + } + } + + if (orIndex >= ARRAY_LEN(pConnectorEvo->or.ownerHeadMask)) { + nvAssert(!"Not found attached OR"); + return; + } + + pConnectorEvo->or.ownerHeadMask[orIndex] &= ~NVBIT(head); + + /* Disable the palette, cursor, and ISO ctxDma on this head. */ + BlankHeadEvo(pDispEvo, head, updateState); + + // Only tear down the actual output for SLI primary. + nvPushEvoSubDevMask(pDevEvo, 1 << pDispEvo->displayOwner); + + pDevEvo->hal->ORSetControl(pDevEvo, + pConnectorEvo, + pTimings->protocol, + orIndex, + pConnectorEvo->or.ownerHeadMask[orIndex], + updateState); + + /* + * Tell RM that there is no DisplayID is associated with this head anymore. + */ + pDevEvo->hal->HeadSetDisplayId(pDevEvo, head, 0x0, updateState); + + nvPopEvoSubDevMask(pDevEvo); + + pModesetUpdateState->connectorIds = + nvAddDpyIdToDpyIdList(pHeadState->pConnectorEvo->displayId, + pModesetUpdateState->connectorIds); +} + +static +NvU32 GetSorIndexToAttachConnector(const NVConnectorEvoRec *pConnectorEvo, + const NvBool isPrimaryHead) +{ + NvU32 orIndex = NV_INVALID_OR; + + nvAssert(isPrimaryHead || + (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR)); + + if (isPrimaryHead || + (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR)) { + orIndex = pConnectorEvo->or.primary; + } else { + NvU32 i; + + FOR_EACH_INDEX_IN_MASK(32, i, pConnectorEvo->or.secondaryMask) { + if (pConnectorEvo->or.ownerHeadMask[i] == 0x0) { + orIndex = i; + break; + } + } FOR_EACH_INDEX_IN_MASK_END; + } + + return orIndex; +} + +void nvEvoAttachConnector(NVConnectorEvoRec *pConnectorEvo, + const NvU32 head, + const NvU32 isPrimaryHead, + NVDPLibModesetStatePtr pDpLibModesetState, + NVEvoModesetUpdateState *pModesetUpdateState) +{ + NVEvoUpdateState *updateState = &pModesetUpdateState->updateState; + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 orIndex = + GetSorIndexToAttachConnector(pConnectorEvo, isPrimaryHead); + NvU32 i; + + nvAssert(orIndex != NV_INVALID_OR); + nvAssert(!(pConnectorEvo->or.ownerHeadMask[orIndex] & NVBIT(head))); + nvAssert(pHeadState->activeRmId != 0); + + FOR_EACH_INDEX_IN_MASK(32, i, pConnectorEvo->or.ownerHeadMask[orIndex]) { + nvAssert(pTimings->protocol == + pDispEvo->headState[i].timings.protocol); + } FOR_EACH_INDEX_IN_MASK_END; + + pConnectorEvo->or.ownerHeadMask[orIndex] |= NVBIT(head); + + // Only set up the actual output for SLI primary. + nvPushEvoSubDevMask(pDevEvo, 1 << pDispEvo->displayOwner); + + pDevEvo->hal->ORSetControl(pDevEvo, + pConnectorEvo, + pTimings->protocol, + orIndex, + pConnectorEvo->or.ownerHeadMask[orIndex], + updateState); + + + /* Tell RM which DisplayID is associated with the head. */ + pDevEvo->hal->HeadSetDisplayId(pDevEvo, + head, pHeadState->activeRmId, + updateState); + + nvPopEvoSubDevMask(pDevEvo); + + pModesetUpdateState->connectorIds = + nvAddDpyIdToDpyIdList(pConnectorEvo->displayId, + pModesetUpdateState->connectorIds); + pModesetUpdateState->pDpLibModesetState[head] = pDpLibModesetState; +} + +void nvSetViewPortPointInEvo(NVDispEvoPtr pDispEvo, + const NvU32 head, + const NvU16 x, + NvU16 y, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[pDispEvo->displayOwner].headState[head]; + + pSdHeadState->viewPortPointIn.x = x; + pSdHeadState->viewPortPointIn.y = y; + + EvoSetViewportPointIn(pDispEvo, head, x, y, updateState); +} + +// +// Sets the Update method which makes all the other methods in the PB to take effect. +// +static void EvoUpdateAndKickOffWithNotifier( + const NVDispEvoRec *pDispEvo, + NvBool notify, + NvBool sync, int notifier, + NVEvoUpdateState *updateState, + NvBool releaseElv) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + // Calling code should reject operations that send updates while the console + // is active. + nvAssert(!pDevEvo->coreInitMethodsPending); + + // It doesn't make sense to request sync without requesting a notifier. + nvAssert(!sync || notify); + + if (notify) { + // Clear the completion notifier. + pDevEvo->hal->InitCompNotifier(pDispEvo, notifier); + } + + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetNotifier(pDevEvo, notify, sync, notifier, + updateState); + pDevEvo->hal->Update(pDevEvo, updateState, releaseElv); + nvPopEvoSubDevMask(pDevEvo); + + // Wait for completion. + if (sync) { + pDevEvo->hal->WaitForCompNotifier(pDispEvo, notifier); + } + + if (notify) { + const NVDispEvoRec *pDispEvoTmp; + NVEvoUpdateState coreUpdateState = { }; + NvU32 sd; + + // To work around HW bug 1945716 and to prevent subsequent core updates + // from triggering unwanted notifier writes, set the core channel + // completion notifier control and context DMA disables when + // notification is not requested. + + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetNotifier(pDevEvo, + FALSE /* notify */, + FALSE /* awaken */, + 0 /* notifier */, + &coreUpdateState); + nvPopEvoSubDevMask(pDevEvo); + + // SetCoreNotifier is only expected to push core channel methods. + FOR_ALL_EVO_DISPLAYS(pDispEvoTmp, sd, pDevEvo) { + if (pDispEvoTmp == pDispEvo) { + nvAssert(coreUpdateState.subdev[sd].channelMask == + DRF_DEF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE)); + } else { + nvAssert(coreUpdateState.subdev[sd].channelMask == 0x0); + } + } + + // We don't really need to kick off here, but might as well to keep the + // state cache up to date. Note that we intentionally don't use + // pDevEvo->hal->Update since we don't want another Update. + nvDmaKickoffEvo(pDevEvo->core); + } + + return; +} + +void nvEvoUpdateAndKickOff(const NVDispEvoRec *pDispEvo, NvBool sync, + NVEvoUpdateState *updateState, NvBool releaseElv) +{ + EvoUpdateAndKickOffWithNotifier(pDispEvo, sync, sync, 0, updateState, + releaseElv); +} + +void nvDoIMPUpdateEvo(NVDispEvoPtr pDispEvo, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + // IMP pre-modeset + pDevEvo->hal->PrePostIMP(pDispEvo, TRUE /* isPre */); + + // Do the update + nvEvoUpdateAndKickOff(pDispEvo, TRUE, updateState, TRUE /* releaseElv */); + + // IMP post-modeset + pDevEvo->hal->PrePostIMP(pDispEvo, FALSE /* isPre */); +} + +void nvEvoFlipUpdate(NVDispEvoPtr pDispEvo, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + int notifier = -1; + + if (nvEvoLUTNotifiersNeedCommit(pDispEvo)) { + notifier = nvEvoCommitLUTNotifiers(pDispEvo); + } + + if (notifier >= 0) { + EvoUpdateAndKickOffWithNotifier(pDispEvo, + TRUE /* notify */, + FALSE /* sync */, + notifier, + updateState, + TRUE /* releaseElv */); + } else { + pDevEvo->hal->Update(pDevEvo, updateState, TRUE /* releaseElv */); + } +} + +/*! + * Tell RM not to expect anything other than a stall lock change during the next + * update. + */ +void nvEvoArmLightweightSupervisor(NVDispEvoPtr pDispEvo, + const NvU32 head, + NvBool isVrr, + NvBool enable) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS params = { }; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + + if (!nvHeadIsActive(pDispEvo, head)) { + return; + } + + nvAssert(!pTimings->interlaced && !pTimings->doubleScan); + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = pHeadState->activeRmId; + params.bArmLWSV = enable; + params.bVrrState = isVrr; + params.vActive = nvEvoVisibleHeight(pTimings); + params.vfp = pTimings->rasterSize.y - + pTimings->rasterBlankStart.y; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDispEvo->pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR, + ¶ms, sizeof(params)) + != NVOS_STATUS_SUCCESS) { + nvAssert(!"ARM_LIGHTWEIGHT_SUPERVISOR failed"); + } +} + +/* + * Convert from NVHwModeTimingsEvoPtr to NvModeTimingsPtr. + * + * Note that converting from NvModeTimingsPtr to + * NVHwModeTimingsEvoPtr (via + * ConstructHwModeTimingsFromNvModeTimings()) and converting back from + * NVHwModeTimingsEvoPtr to NvModeTimingsPtr (via + * nvConstructNvModeTimingsFromHwModeTimings()) can lose precision in + * the case of interlaced modes due to the division by 2. This + * function should only be used for reporting purposes. + */ + +void +nvConstructNvModeTimingsFromHwModeTimings(const NVHwModeTimingsEvo *pTimings, + NvModeTimingsPtr pModeTimings) +{ + NvU32 rasterBlankEndY, rasterSyncEndY; + + if (!pTimings || !pModeTimings) { + nvAssert(!"Null params"); + return; + } + + pModeTimings->pixelClockHz = KHzToHz(pTimings->pixelClock); + pModeTimings->hVisible = nvEvoVisibleWidth(pTimings); + pModeTimings->hSyncStart = pTimings->rasterSize.x - + pTimings->rasterBlankEnd.x - 1; + pModeTimings->hSyncEnd = pTimings->rasterSize.x - + pTimings->rasterBlankEnd.x + + pTimings->rasterSyncEnd.x; + pModeTimings->hTotal = pTimings->rasterSize.x; + pModeTimings->vVisible = nvEvoVisibleHeight(pTimings); + rasterBlankEndY = pTimings->rasterBlankEnd.y + 1; + rasterSyncEndY = pTimings->rasterSyncEnd.y + 1; + + if (pTimings->interlaced) { + rasterBlankEndY *= 2; + rasterSyncEndY *= 2; + } + + /* + * The real pixel clock and width values for modes using YUV 420 emulation + * are half of the incoming values parsed from the EDID. This conversion is + * performed here, so NvModeTimings will have the user-visible (full width) + * values, and NVHwModeTimingsEvo will have the real (half width) values. + */ + if (pTimings->yuv420Mode == NV_YUV420_MODE_SW) { + pModeTimings->pixelClockHz *= 2; + pModeTimings->hVisible *= 2; + pModeTimings->hSyncStart *= 2; + pModeTimings->hSyncEnd *= 2; + pModeTimings->hTotal *= 2; + } + + pModeTimings->vSyncStart = pTimings->rasterSize.y - rasterBlankEndY; + pModeTimings->vSyncEnd = pTimings->rasterSize.y - rasterBlankEndY + + rasterSyncEndY; + pModeTimings->vTotal = pTimings->rasterSize.y; + pModeTimings->interlaced = pTimings->interlaced; + pModeTimings->doubleScan = pTimings->doubleScan; + pModeTimings->hSyncNeg = pTimings->hSyncPol; + pModeTimings->hSyncPos = !pTimings->hSyncPol; + pModeTimings->vSyncNeg = pTimings->vSyncPol; + pModeTimings->vSyncPos = !pTimings->vSyncPol; + pModeTimings->RRx1k = (pModeTimings->pixelClockHz / + (pModeTimings->hTotal * + pModeTimings->vTotal)); + + if (pModeTimings->doubleScan) { + pModeTimings->vVisible /= 2; + pModeTimings->vSyncStart /= 2; + pModeTimings->vSyncEnd /= 2; + pModeTimings->vTotal /= 2; + } + + pModeTimings->hdmi3D = pTimings->hdmi3D; + pModeTimings->yuv420Mode = pTimings->yuv420Mode; +} + + + +/* + * Tweak pTimings to be compatible with gsync. + */ + +static void TweakTimingsForGsync(const NVDpyEvoRec *pDpyEvo, + NVHwModeTimingsEvoPtr pTimings, + NVEvoInfoStringPtr pInfoString, + const enum NvKmsStereoMode stereo) +{ + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PARAMS gsyncOptTimingParams = { 0 }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NvModeTimings modeTimings; + NvU32 ret; + + /* + * if 3D Vision Stereo is enabled, do not actually + * tweak the modetimings; WAR for bug 692266 + */ + + if (nvIs3DVisionStereoEvo(stereo)) { + + nvEvoLogInfoString(pInfoString, + "Not adjusting mode timings of %s for Quadro Sync " + "compatibility since 3D Vision Stereo is enabled.", + pDpyEvo->name); + return; + } + + gsyncOptTimingParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + + if (pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) { + + gsyncOptTimingParams.output = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_OUTPUT_SOR; + gsyncOptTimingParams.adjust = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_DEFAULT_DFP; + + } else if (pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT) { + + gsyncOptTimingParams.output = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_OUTPUT_DAC; + gsyncOptTimingParams.adjust = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_DEFAULT_CRT; + } + + gsyncOptTimingParams.pixelClockHz = KHzToHz(pTimings->pixelClock); + + if (pTimings->interlaced) { + gsyncOptTimingParams.structure = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_STRUCTURE_INTERLACED; + } else { + gsyncOptTimingParams.structure = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_STRUCTURE_PROGRESSIVE; + } + + gsyncOptTimingParams.hDeltaStep = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_H_DELTA_STEP_USE_DEFAULTS; + gsyncOptTimingParams.vDeltaStep = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_V_DELTA_STEP_USE_DEFAULTS; + gsyncOptTimingParams.hDeltaMax = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_H_DELTA_MAX_USE_DEFAULTS; + gsyncOptTimingParams.vDeltaMax = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_V_DELTA_MAX_USE_DEFAULTS; + + gsyncOptTimingParams.hSyncEnd = pTimings->rasterSyncEnd.x + 1; + gsyncOptTimingParams.hBlankEnd = pTimings->rasterBlankEnd.x + 1; + gsyncOptTimingParams.hBlankStart = pTimings->rasterBlankStart.x + 1; + gsyncOptTimingParams.hTotal = pTimings->rasterSize.x; + + gsyncOptTimingParams.vSyncEnd = pTimings->rasterSyncEnd.y + 1; + gsyncOptTimingParams.vBlankEnd = pTimings->rasterBlankEnd.y + 1; + gsyncOptTimingParams.vBlankStart = pTimings->rasterBlankStart.y + 1; + gsyncOptTimingParams.vTotal = pTimings->rasterSize.y; + + gsyncOptTimingParams.vInterlacedBlankEnd = pTimings->rasterVertBlank2End; + gsyncOptTimingParams.vInterlacedBlankStart = + pTimings->rasterVertBlank2Start; + + switch (pTimings->protocol) { + case NVKMS_PROTOCOL_DAC_RGB: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_DAC_RGB_CRT; + break; + case NVKMS_PROTOCOL_PIOR_EXT_TMDS_ENC: + nvAssert(!"GSYNC_GET_OPTIMIZED_TIMING doesn't handle external TMDS."); + // fallthrough + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_SINGLE_TMDS_A; + break; + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_SINGLE_TMDS_B; + break; + case NVKMS_PROTOCOL_SOR_DUAL_TMDS: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DUAL_TMDS; + break; + case NVKMS_PROTOCOL_SOR_DP_A: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DP_A; + break; + case NVKMS_PROTOCOL_SOR_DP_B: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DP_B; + break; + case NVKMS_PROTOCOL_SOR_LVDS_CUSTOM: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_LVDS_CUSTOM; + break; + case NVKMS_PROTOCOL_SOR_HDMI_FRL: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_HDMI_FRL; + break; + case NVKMS_PROTOCOL_DSI: + nvAssert(!"GSYNC_GET_OPTIMIZED_TIMING doesn't handle DSI."); + return; + } + + nvEvoLogInfoString(pInfoString, + "Adjusting Mode Timings for Quadro Sync Compatibility"); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDispEvo->pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_OPTIMIZED_TIMING, + &gsyncOptTimingParams, + sizeof(gsyncOptTimingParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to convert to Quadro Sync safe timing"); + /* do not apply the timings returned by RM if the call failed */ + return; + } else if (!gsyncOptTimingParams.bOptimized) { + nvEvoLogInfoString(pInfoString, " Timings Unchanged."); + return; + } + + nvConstructNvModeTimingsFromHwModeTimings(pTimings, &modeTimings); + + nvEvoLogInfoString(pInfoString, " Old Timings:"); + nvEvoLogModeValidationModeTimings(pInfoString, &modeTimings); + + pTimings->rasterSyncEnd.x = gsyncOptTimingParams.hSyncEnd - 1; + pTimings->rasterSyncEnd.y = gsyncOptTimingParams.vSyncEnd - 1; + pTimings->rasterBlankEnd.x = gsyncOptTimingParams.hBlankEnd - 1; + pTimings->rasterBlankEnd.y = gsyncOptTimingParams.vBlankEnd - 1; + pTimings->rasterBlankStart.x = gsyncOptTimingParams.hBlankStart - 1; + pTimings->rasterBlankStart.y = gsyncOptTimingParams.vBlankStart - 1; + pTimings->rasterSize.x = gsyncOptTimingParams.hTotal; + pTimings->rasterSize.y = gsyncOptTimingParams.vTotal; + + if (gsyncOptTimingParams.structure == + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_STRUCTURE_INTERLACED) { + pTimings->rasterVertBlank2Start = + gsyncOptTimingParams.vInterlacedBlankStart; + pTimings->rasterVertBlank2End = + gsyncOptTimingParams.vInterlacedBlankEnd; + } + + pTimings->pixelClock = HzToKHz(gsyncOptTimingParams.pixelClockHz); // Hz to KHz + + nvConstructNvModeTimingsFromHwModeTimings(pTimings, &modeTimings); + + nvEvoLogInfoString(pInfoString, " New Timings:"); + nvEvoLogModeValidationModeTimings(pInfoString, &modeTimings); +} + +static NvBool HeadStateIsHdmiTmdsDeepColor(const NVDispHeadStateEvoRec *pHeadState) +{ + nvAssert(pHeadState->pConnectorEvo != NULL); + + // Check for HDMI TMDS. + if (pHeadState->pConnectorEvo->isHdmiEnabled && + (pHeadState->timings.protocol != NVKMS_PROTOCOL_SOR_HDMI_FRL)) { + // Check for pixelDepth >= 30. + switch (pHeadState->pixelDepth) { + case NVKMS_PIXEL_DEPTH_18_444: + case NVKMS_PIXEL_DEPTH_24_444: + case NVKMS_PIXEL_DEPTH_20_422: + case NVKMS_PIXEL_DEPTH_16_422: + return FALSE; + case NVKMS_PIXEL_DEPTH_30_444: + return TRUE; + } + } + + return FALSE; +} + +/*! + * Check whether rasterlock is possible between the two head states. + * Note that we don't compare viewports, but I don't believe the viewport size + * affects whether it is possible to rasterlock. + */ + +static NvBool RasterLockPossible(const NVDispHeadStateEvoRec *pHeadState1, + const NVDispHeadStateEvoRec *pHeadState2) +{ + const NVHwModeTimingsEvo *pTimings1 = &pHeadState1->timings; + const NVHwModeTimingsEvo *pTimings2 = &pHeadState2->timings; + + /* + * XXX Bug 4235728: With HDMI TMDS signaling >= 10 BPC, display requires a + * higher VPLL clock multiplier varying by pixel depth, which can cause + * rasterlock to fail between heads with differing multipliers. So, if a + * head is using HDMI TMDS >= 10 BPC, it can only rasterlock with heads that + * that are using HDMI TMDS with the same pixel depth. + */ + + // If either head is HDMI TMDS DeepColor (10+ BPC)... + if (HeadStateIsHdmiTmdsDeepColor(pHeadState1) || + HeadStateIsHdmiTmdsDeepColor(pHeadState2)) { + // The other head must also be HDMI TMDS DeepColor. + if (!HeadStateIsHdmiTmdsDeepColor(pHeadState1) || + !HeadStateIsHdmiTmdsDeepColor(pHeadState2)) { + return FALSE; + } + + // Both heads must have identical pixel depth. + if (pHeadState1->pixelDepth != pHeadState2->pixelDepth) { + return FALSE; + } + } + + return ((pTimings1->rasterSize.x == pTimings2->rasterSize.x) && + (pTimings1->rasterSize.y == pTimings2->rasterSize.y) && + (pTimings1->rasterSyncEnd.x == pTimings2->rasterSyncEnd.x) && + (pTimings1->rasterSyncEnd.y == pTimings2->rasterSyncEnd.y) && + (pTimings1->rasterBlankEnd.x == pTimings2->rasterBlankEnd.x) && + (pTimings1->rasterBlankEnd.y == pTimings2->rasterBlankEnd.y) && + (pTimings1->rasterBlankStart.x == pTimings2->rasterBlankStart.x) && + (pTimings1->rasterBlankStart.y == pTimings2->rasterBlankStart.y) && + (pTimings1->rasterVertBlank2Start == + pTimings2->rasterVertBlank2Start) && + (pTimings1->rasterVertBlank2End == + pTimings2->rasterVertBlank2End) && + (pTimings1->pixelClock == pTimings2->pixelClock) && + (pTimings1->hSyncPol == pTimings2->hSyncPol) && + (pTimings1->vSyncPol == pTimings2->vSyncPol) && + (pTimings1->interlaced == pTimings2->interlaced) && + (pTimings1->doubleScan == pTimings2->doubleScan)); + +} + +/*! + * Fill the overscan color struct to be passed to SetRasterParams based on + * whether or not SW yuv420 is enabled. + * + * \param[out] pOverscanColor The overscan color struct to be filled + * \param[in] yuv420 Whether or not SW yuv420 is enabled + */ +static void SetOverscanColor(NVEvoColorPtr pOverscanColor, NvBool yuv420) +{ + // Black in RGB format. + // If we're using an emulated YUV 4:2:0 mode, set the equivalent in + // YUV ITU-R BT.709 (64/64/512). + if (yuv420) { + pOverscanColor->red = 64; + pOverscanColor->green = 64; + pOverscanColor->blue = 512; + } else { + pOverscanColor->red = 0; + pOverscanColor->green = 0; + pOverscanColor->blue = 0; + } + +#if defined(DEBUG) + // Override the overscan color to red in debug builds. + // XXX This will look different for YUV 4:2:0 + pOverscanColor->red = 1023; + pOverscanColor->green = 0; + pOverscanColor->blue = 0; +#endif +} + +void nvEvoDisableHwYUV420Packer(const NVDispEvoRec *pDispEvo, + const NvU32 head, + NVEvoUpdateState *pUpdateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + pDevEvo->gpus[pDispEvo->displayOwner].headControl[head].hwYuv420 = FALSE; + EvoUpdateHeadParams(pDispEvo, head, pUpdateState); +} + +/* + * Send the raster timings for the pDpyEvo to EVO. + */ +void nvEvoSetTimings(NVDispEvoPtr pDispEvo, + const NvU32 head, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + const NVDscInfoEvoRec *pDscInfo = &pHeadState->dscInfo; + const enum nvKmsPixelDepth pixelDepth = pHeadState->pixelDepth; + NVEvoColorRec overscanColor; + + nvPushEvoSubDevMaskDisp(pDispEvo); + SetOverscanColor(&overscanColor, (pTimings->yuv420Mode == + NV_YUV420_MODE_SW)); + + pDevEvo->hal->SetRasterParams(pDevEvo, head, + pTimings, pHeadState->mergeHeadSection, + pDscInfo, &overscanColor, updateState); + + // Set the head parameters + pDevEvo->gpus[pDispEvo->displayOwner].headControl[head].interlaced = + pTimings->interlaced; + pDevEvo->gpus[pDispEvo->displayOwner].headControl[head].hdmi3D = + pTimings->hdmi3D; + + /* + * Current HW does not support the combination of HW YUV420 and DSC. + * HW YUV420 is currently only supported with HDMI, so we should never see + * the combination of DP DSC and HW YUV420. + * The combination of HDMI FRL DSC and HW YUV420 should be disallowed by + * the HDMI library. + */ + nvAssert(!((pTimings->yuv420Mode == NV_YUV420_MODE_HW) && + (pDscInfo->type != NV_DSC_INFO_EVO_TYPE_DISABLED))); + + pDevEvo->gpus[pDispEvo->displayOwner].headControl[head].hwYuv420 = + (pTimings->yuv420Mode == NV_YUV420_MODE_HW); + + EvoUpdateHeadParams(pDispEvo, head, updateState); + + pDevEvo->hal->SetDscParams(pDispEvo, head, pDscInfo, pixelDepth); + + nvPopEvoSubDevMask(pDevEvo); +} + + +/* + * Increase the size of the provided raster lock group by 1. + * + * This involves incrementing *pNumRasterLockGroups, reallocating the + * pRasterLockGroups array, and initializing the new entry. + */ +static RasterLockGroup *GrowRasterLockGroup(RasterLockGroup *pRasterLockGroups, + unsigned int *pNumRasterLockGroups) +{ + RasterLockGroup *pNewRasterLockGroups, *pRasterLockGroup; + unsigned int numRasterLockGroups; + + numRasterLockGroups = *pNumRasterLockGroups; + + numRasterLockGroups++; + pNewRasterLockGroups = + nvRealloc(pRasterLockGroups, + numRasterLockGroups * sizeof(RasterLockGroup)); + if (!pNewRasterLockGroups) { + nvFree(pRasterLockGroups); + *pNumRasterLockGroups = 0; + return NULL; + } + + pRasterLockGroup = &pNewRasterLockGroups[numRasterLockGroups - 1]; + pRasterLockGroup->numDisps = 0; + + *pNumRasterLockGroups = numRasterLockGroups; + + return pNewRasterLockGroups; +} + +static RasterLockGroup *CopyAndAppendRasterLockGroup( + RasterLockGroup *pRasterLockGroups, + unsigned int *pNumRasterLockGroups, + const RasterLockGroup *source) +{ + RasterLockGroup *dest; + + pRasterLockGroups = GrowRasterLockGroup(pRasterLockGroups, + pNumRasterLockGroups); + if (pRasterLockGroups) { + dest = &pRasterLockGroups[*pNumRasterLockGroups - 1]; + nvkms_memcpy(dest, source, sizeof(RasterLockGroup)); + } + + return pRasterLockGroups; +} + +static void AddDispEvoIntoRasterLockGroup(RasterLockGroup *pRasterLockGroup, + NVDispEvoPtr pDispEvo) +{ + NvU32 i; + + /* + * The extent of a RasterLockGroup is the largest number of GPUs that can + * be linked together. + */ + nvAssert(pRasterLockGroup->numDisps < NVKMS_MAX_SUBDEVICES); + + /* Caller should keep track of not adding duplicate entries. */ + for (i = 0; i < pRasterLockGroup->numDisps; i++) { + nvAssert(pRasterLockGroup->pDispEvoOrder[i] != pDispEvo); + } + + /* Add to the end of the array. */ + pRasterLockGroup->pDispEvoOrder[pRasterLockGroup->numDisps] = pDispEvo; + pRasterLockGroup->numDisps++; +} + +static const RasterLockGroup *FindRasterLockGroupForDispEvo( + const RasterLockGroup *pRasterLockGroups, + unsigned int numRasterLockGroups, + const NVDispEvoPtr pDispEvo) +{ + const RasterLockGroup *pRasterLockGroup; + NvU32 i; + + for (pRasterLockGroup = pRasterLockGroups; + pRasterLockGroup < pRasterLockGroups + numRasterLockGroups; + pRasterLockGroup++) { + for (i = 0; i < pRasterLockGroup->numDisps; i++) { + if (pRasterLockGroup->pDispEvoOrder[i] == pDispEvo) { + return pRasterLockGroup; + } + } + } + + return NULL; +} + +static DispEntry *DispEvoListFindDispByGpuId (DispEvoList *list, NvU32 gpuId) +{ + NvU32 i; + + for (i = 0; i < list->numDisps; i++) { + if (list->disps[i].gpuId == gpuId) { + return &list->disps[i]; + } + } + + return NULL; +} + +static void DispEvoListInit(DispEvoList *list) +{ + list->numDisps = 0; +} + +static void DispEvoListAppend(DispEvoList *list, NVDispEvoPtr pDispEvo) +{ + nvAssert(DispEvoListFindDispByGpuId( + list, nvGpuIdOfDispEvo(pDispEvo)) == NULL); + + nvAssert(list->numDisps < ARRAY_LEN(list->disps)); + list->disps[list->numDisps].pDispEvo = pDispEvo; + list->disps[list->numDisps].gpuId = nvGpuIdOfDispEvo(pDispEvo); + list->disps[list->numDisps].pRasterLockGroup = NULL; + list->numDisps++; +} + +/* + * Helper function to look up, for a gpuId, the list of connected GPUs in + * NV0000_CTRL_GPU_GET_VIDEO_LINKS_PARAMS. + */ +static NV0000_CTRL_GPU_VIDEO_LINKS *FindLinksForGpuId( + NV0000_CTRL_GPU_GET_VIDEO_LINKS_PARAMS *vidLinksParams, + NvU32 gpuId) +{ + NvU32 i; + + for (i = 0; i < NV0000_CTRL_GPU_MAX_ATTACHED_GPUS; i++) { + if (vidLinksParams->links[i].gpuId == NV0000_CTRL_GPU_INVALID_ID) { + break; + } + + if (vidLinksParams->links[i].gpuId == gpuId) { + return &vidLinksParams->links[i]; + } + } + + return NULL; +} + +static void BuildRasterLockGroupFromVideoLinks( + DispEvoList *list, + RasterLockGroup *pRasterLockGroup, + NvU32 gpuId, + NV0000_CTRL_GPU_GET_VIDEO_LINKS_PARAMS *vidLinksParams) +{ + DispEntry *dispEntry; + NV0000_CTRL_GPU_VIDEO_LINKS *links; + NvU32 i; + + /* Find the correct DispEntry for the gpuId. If we can't find one the + * gpuId must be pointing to a DevEvo that was not listed in our + * DevEvoList: ignore these links at this point. */ + dispEntry = DispEvoListFindDispByGpuId(list, gpuId); + if (!dispEntry) { + return; + } + + /* + * Unless we've seen this gpuId already add into current RasterLockGroup + * and try to discover bridged GPUs. + */ + if (!dispEntry->pRasterLockGroup) { + /* Assign in the current RasterLockGroup. */ + AddDispEvoIntoRasterLockGroup(pRasterLockGroup, dispEntry->pDispEvo); + dispEntry->pRasterLockGroup = pRasterLockGroup; + + /* First, get the links for this gpuId. */ + links = FindLinksForGpuId(vidLinksParams, gpuId); + + /* Recurse into connected GPUs. */ + if (links) { + for (i = 0; i < NV0000_CTRL_GPU_MAX_VIDEO_LINKS; i++) { + if (links->connectedGpuIds[i] == NV0000_CTRL_GPU_INVALID_ID) { + break; + } + + BuildRasterLockGroupFromVideoLinks(list, + pRasterLockGroup, + links->connectedGpuIds[i], + vidLinksParams); + } + } + } +} + +/* + * Stateless (RM SLI/client SLI agnostic) discovery of bridged GPUs: build + * RasterLockGroups for all non-RM SLI devices based on the found GPU links. + * + * This function and BuildRasterLockGroupFromVideoLinks() implement a simple + * algorithm that puts clusters of bridged GPUs into distinct RasterLockGroups. + * Here's an outline of how we basically generate the final RasterLockGroups: + * + * 1. Create a DispEvoList array to hold RasterLockGroup state for all the + * DispEvo objects in the system. + * + * 2. Query RM for an array of video links for each GPU. + * + * 3. As long as the DispEvoList contains DispEvos of the given pDevEvo + * without a group, find the first occurrence of such, create a new + * group, and populate it by recursively adding the DispEvo and all + * its connected DispEvos into the new group. + * + * 4. Once all known DispEvos are assigned the result will be a list of + * global RasterLockGroups, each of which hosts DispEvos that are + * connected together. + * + * The result of this function should be cached once and later used to + * cheaply look up the appropriate, immutable RasterLockGroup for a DispEvo. + * + */ +static RasterLockGroup *GetRasterLockGroupsStateless( + unsigned int *pNumRasterLockGroups) +{ + RasterLockGroup *pRasterLockGroups = NULL; + RasterLockGroup *pRasterLockGroup; + DispEvoList evoList; + NVDevEvoPtr pCurDev; + NVDispEvoPtr pCurDisp; + NV0000_CTRL_GPU_GET_VIDEO_LINKS_PARAMS *vidLinksParams; + NvU32 sd; + NvU32 i; + + DispEvoListInit(&evoList); + + /* + * First create an array of DispEntries to hold some state for all the + * DispEvos in the system. + */ + FOR_ALL_EVO_DEVS(pCurDev) { + /* + * Only include non RM SLI devices so as to not clash with multi-GPU + * RM SLI devices. + */ + if (pCurDev->numSubDevices == 1) { + FOR_ALL_EVO_DISPLAYS(pCurDisp, sd, pCurDev) { + DispEvoListAppend(&evoList, pCurDisp); + } + } + } + + /* + * Ask RM about the currently known video links. + */ + vidLinksParams = nvCalloc(1, sizeof(*vidLinksParams)); + if (!vidLinksParams) { + return NULL; + } + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_GPU_GET_VIDEO_LINKS, + vidLinksParams, + sizeof(*vidLinksParams)) == NVOS_STATUS_SUCCESS) { + + for (i = 0; i < evoList.numDisps; i++) { + /* + * Create a new group starting from the first DispEvo not yet + * assigned into a RasterLockGroup, and all GPUs possibly reachable + * from it through bridges. + * + * TODO: Consider if we should only ever start a new + * RasterLockGroup with a GPU that has only one connection and not + * two. Then the group's pDispEvoOrder would always start from a + * "leaf" GPU of a linkage graph. But will the GPU links always be + * linear and non-branching? NV0000_CTRL_GPU_GET_VIDEO_LINKS_PARAMS + * makes it possible to represent GPUs with any number of links. + * Either FinishModesetOneGroup() must be able to handle that + * (in which case this is not a concern) or we must be able to + * trust that only 0-2 links will be reported per GPU. + */ + if (evoList.disps[i].pRasterLockGroup) { + continue; + } + + pRasterLockGroups = GrowRasterLockGroup(pRasterLockGroups, + pNumRasterLockGroups); + if (!pRasterLockGroups) { + nvFree(vidLinksParams); + return NULL; + } + + pRasterLockGroup = &pRasterLockGroups[*pNumRasterLockGroups - 1]; + + BuildRasterLockGroupFromVideoLinks(&evoList, + pRasterLockGroup, + evoList.disps[i].gpuId, + vidLinksParams); + } + + nvFree(vidLinksParams); + nvAssert(*pNumRasterLockGroups > 0); + return pRasterLockGroups; + } + + nvFree(vidLinksParams); + nvFree(pRasterLockGroups); + return NULL; +} + +/* + * GetRasterLockGroups() - Determine which GPUs to consider for locking (or + * unlocking) displays. This is one of the following: + * 1. SLI video bridge order, if SLI is enabled; + * 2. GPUs linked through rasterlock pins, no SLI (like in clientSLI); + * 3. A single GPU, + * in that order. + * + * Note that we still go through the same codepaths for the last degenerate + * case, in order to potentially lock heads on the same GPU together. + */ +static RasterLockGroup *GetRasterLockGroups( + NVDevEvoPtr pDevEvo, + unsigned int *pNumRasterLockGroups) +{ + unsigned int i; + RasterLockGroup *pRasterLockGroups = NULL; + + *pNumRasterLockGroups = 0; + + if (pDevEvo->numSubDevices > 1 && pDevEvo->sli.bridge.present) { + NV0080_CTRL_GPU_GET_VIDLINK_ORDER_PARAMS params = { 0 }; + NvU32 ret; + + /* In SLI, with a video bridge. Get the video bridge order from RM. */ + + if ((ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_GPU_GET_VIDLINK_ORDER, + ¶ms, sizeof(params))) + != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "NvRmControl(GET_VIDLINK_ORDER) failed; " + "ret: %d\n", ret); + return NULL; + } + + if (params.ConnectionCount > 0) { + RasterLockGroup *pRasterLockGroup; + pRasterLockGroups = GrowRasterLockGroup(pRasterLockGroups, + pNumRasterLockGroups); + + if (!pRasterLockGroups) { + return NULL; + } + + pRasterLockGroup = &pRasterLockGroups[*pNumRasterLockGroups - 1]; + + /* + * For some reason this interface returns a mask instead of an + * index, so we have to convert + */ + for (i = 0; i < pDevEvo->numSubDevices; i++) { + NvU32 subDeviceMask = params.Order[i]; + NvU32 sd = 0; + + nvAssert(nvPopCount32(subDeviceMask) == 1); + + if (!subDeviceMask) continue; + + while (!(subDeviceMask & (1 << sd))) sd++; + + nvAssert(sd < NVKMS_MAX_SUBDEVICES); + nvAssert(pDevEvo->pDispEvo[sd] != NULL); + + /* SLI Mosaic. */ + AddDispEvoIntoRasterLockGroup(pRasterLockGroup, + pDevEvo->pDispEvo[sd]); + } + } + + if (*pNumRasterLockGroups > 0) { + return pRasterLockGroups; + } + } + + /* + * Client SLI: Create a RasterLockGroup from pDevEvo's only DispEvo + * and other DispEvos potentially bridged to that. + */ + + if (pDevEvo->numSubDevices == 1) { + /* Get-or-create cached RasterLockGroup for this device. */ + if (!globalRasterLockGroups) { + globalRasterLockGroups = + GetRasterLockGroupsStateless(&numGlobalRasterLockGroups); + } + + /* Look for a cached group containing this device's DispEvo. */ + if (globalRasterLockGroups && numGlobalRasterLockGroups > 0) { + const RasterLockGroup *pRasterLockGroup = + FindRasterLockGroupForDispEvo(globalRasterLockGroups, + numGlobalRasterLockGroups, + pDevEvo->pDispEvo[0]); + + /* Make a copy of it and add to 'pRasterLockGroups'. */ + if (pRasterLockGroup) { + pRasterLockGroups = + CopyAndAppendRasterLockGroup(pRasterLockGroups, + pNumRasterLockGroups, + pRasterLockGroup); + } + } + + if (*pNumRasterLockGroups > 0) { + return pRasterLockGroups; + } + } + + /* + * Single GPU or bridgeless SLI. We create a group for each + * individual DispEvo. + */ + + NVDispEvoPtr pDispEvo; + unsigned int sd; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + RasterLockGroup *pRasterLockGroup; + pRasterLockGroups = GrowRasterLockGroup(pRasterLockGroups, + pNumRasterLockGroups); + + if (!pRasterLockGroups) { + return NULL; + } + + pRasterLockGroup = &pRasterLockGroups[*pNumRasterLockGroups - 1]; + + AddDispEvoIntoRasterLockGroup(pRasterLockGroup, pDispEvo); + } + + return pRasterLockGroups; +} + +/* + * ApplyLockActionIfPossible() - Check if the given action is a valid + * transition for this pEvoSubDev's state, and apply it if so. + * Return TRUE if any hardware state needs to be updated, FALSE o.w. + */ +static NvBool ApplyLockActionIfPossible(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action) +{ + if (!pEvoSubDev) { + return FALSE; + } + + if (pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, + action, NULL)) { + unsigned int i = 0; + NvU32 pHeads[NVKMS_MAX_HEADS_PER_DISP + 1] = { NV_INVALID_HEAD, }; + NvU32 head; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if (nvHeadIsActive(pDispEvo, head)) { + pHeads[i++] = head; + } + } + nvAssert(i <= NVKMS_MAX_HEADS_PER_DISP); + pHeads[i] = NV_INVALID_HEAD; + + pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, action, pHeads); + + return TRUE; + } + + return FALSE; + +} // ApplyLockActionIfPossible() + +/* + * Disable any intra-GPU lock state set up in FinishModesetOneDisp(). + * This assumes that any cross-GPU locking which may have been set up on this + * GPU was already torn down. + */ +static void UnlockRasterLockOneDisp(NVDispEvoPtr pDispEvo) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 sd = pDispEvo->displayOwner; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NvBool changed = FALSE; + + /* Initialize the assembly state */ + SyncEvoLockState(); + + /* We want to evaluate all of these, so don't use || */ + changed |= ApplyLockActionIfPossible(pDispEvo, pEvoSubDev, + NV_EVO_PROHIBIT_LOCK_DISABLE); + changed |= ApplyLockActionIfPossible(pDispEvo, pEvoSubDev, + NV_EVO_UNLOCK_HEADS); + + /* Update the hardware if anything has changed */ + if (changed) { + UpdateEvoLockState(); + } + + pDispEvo->rasterLockPossible = FALSE; +} + +/* + * Call UnlockRasterLockOneDisp() for each disp on this device to tear down + * intra-GPU locking on each. + */ +static void UnlockRasterLockOneDev(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 sd; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + UnlockRasterLockOneDisp(pDispEvo); + } +} + +static void DisableLockGroupFlipLock(NVLockGroup *pLockGroup) +{ + + const RasterLockGroup *pRasterLockGroup = &pLockGroup->rasterLockGroup; + NvU32 i; + + if (!pLockGroup->flipLockEnabled) { + return; + } + + for (i = 0; i < pRasterLockGroup->numDisps; i++) { + NVEvoUpdateState updateState = { }; + NVDispEvoPtr pDispEvo = pRasterLockGroup->pDispEvoOrder[i]; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 sd = pDispEvo->displayOwner; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NvU32 head; + NvBool changed = FALSE; + + for (head = 0; head < pDevEvo->numHeads; head++) { + NvBool headChanged = FALSE; + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + /* + * scanLockState transitions (such as nvEvoLockHWStateLockHeads) + * will update headControlAssy values for all heads, so we should + * update flipLock and flipLockPin for all heads as well. + */ + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + /* + * Reset the fliplock pin, if it's not in use for framelock, + * and unregister our use of the fliplock pin + */ + if (!HEAD_MASK_QUERY(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, + head)) { + if (pHC->flipLockPin != NV_EVO_LOCK_PIN_INTERNAL(0)) { + pHC->flipLockPin = NV_EVO_LOCK_PIN_INTERNAL(0); + headChanged = TRUE; + } + } + pEvoSubDev->flipLockPinSetForSliHeadMask = + HEAD_MASK_UNSET(pEvoSubDev->flipLockPinSetForSliHeadMask, + head); + + /* + * Disable fliplock, if it's not in use for framelock, and + * unregister our need for fliplock to be enabled + */ + if (!HEAD_MASK_QUERY(pEvoSubDev->flipLockEnabledForFrameLockHeadMask, + head)) { + if (pHC->flipLock) { + pHC->flipLock = FALSE; + headChanged = TRUE; + } + } + pEvoSubDev->flipLockEnabledForSliHeadMask = + HEAD_MASK_UNSET(pEvoSubDev->flipLockEnabledForSliHeadMask, + head); + if (headChanged) { + EvoUpdateHeadParams(pDispEvo, head, &updateState); + } + } + if (changed) { + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, + TRUE /* releaseElv */); + } + } + + pLockGroup->flipLockEnabled = FALSE; +} + +/* + * Unlock cross-GPU locking in the given lock group. + */ +static void UnlockLockGroup(NVLockGroup *pLockGroup) +{ + RasterLockGroup *pRasterLockGroup; + int i; + + if (pLockGroup == NULL) { + return; + } + + pRasterLockGroup = &pLockGroup->rasterLockGroup; + + DisableLockGroupFlipLock(pLockGroup); + + for (i = (int)pRasterLockGroup->numDisps - 1; i >= 0; i--) { + NVDispEvoPtr pDispEvo = pRasterLockGroup->pDispEvoOrder[i]; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 sd = pDispEvo->displayOwner; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + + /* Initialize the assembly state */ + SyncEvoLockState(); + + if (ApplyLockActionIfPossible(pDispEvo, pEvoSubDev, + NV_EVO_REM_SLI)) { + /* Update the hardware if anything has changed */ + UpdateEvoLockState(); + } + + pEvoSubDev->flipLockProhibitedHeadMask = 0x0; + + nvAssert(pDispEvo->pLockGroup == pLockGroup); + pDispEvo->pLockGroup = NULL; + } + + /* + * Disable any SLI video bridge features we may have enabled for locking. + * This is a separate loop from the above in order to handle both cases: + * + * a) Multiple pDispEvos on the same pDevEvo (linked RM-SLI): all disps in + * the lock group will share the same pDevEvo. In that case we should + * not call RM to disable the video bridge power across the entire + * device until we've disabled locking on all GPUs). This loop will + * call nvEvoUpdateSliVideoBridge() redundantly for the same pDevEvo, + * but those calls will be filtered out. (If we did this in the loop + * above, RM would broadcast the video bridge disable call to all pDisps + * on the first call, even before we've disabled locking on them.) + * + * b) Each pDispEvo on a separate pDevEvo (client-side SLI or no SLI, when + * a video bridge is present): in that case each pDispEvo has a separate + * pDevEvo, and we need to call nvEvoUpdateSliVideoBridge() on each. + * (It would be okay in this case to call nvEvoUpdateSliVideoBridge() in + * the loop above since it will only disable the video bridge power for + * one GPU at a time.) + */ + for (i = (int)pRasterLockGroup->numDisps - 1; i >= 0; i--) { + NVDispEvoPtr pDispEvo = pRasterLockGroup->pDispEvoOrder[i]; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + pDevEvo->sli.bridge.powerNeededForRasterLock = FALSE; + nvEvoUpdateSliVideoBridge(pDevEvo); + } + + nvFree(pLockGroup); +} + +/* + * Unlock all any cross-GPU locking in the rasterlock group(s) associated with + * the given device. + */ +static void UnlockLockGroupsForDevice(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 sd; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + UnlockLockGroup(pDispEvo->pLockGroup); + nvAssert(pDispEvo->pLockGroup == NULL); + } +} + +void nvAssertAllDpysAreInactive(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + int i; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) { + NvU32 head; + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + nvAssert(!nvHeadIsActive(pDispEvo, head)); + } + } +} + +/*! + * Disable locking-related state. + */ +static void DisableLockState(NVDevEvoPtr pDevEvo) +{ + NvU32 dispIndex; + NVDispEvoPtr pDispEvo; + + /* Disable flip lock as requested by swap groups/framelock. */ + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + nvToggleFlipLockPerDisp(pDispEvo, + nvGetActiveHeadMask(pDispEvo), + FALSE /* enable */); + } + + /* Disable any locking across GPUs. */ + + UnlockLockGroupsForDevice(pDevEvo); + + /* Disable intra-GPU rasterlock on this pDevEvo. */ + UnlockRasterLockOneDev(pDevEvo); + + /* Reset the EVO locking state machine. */ + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + nvEvoStateAssertNoLock(&pDevEvo->gpus[pDispEvo->displayOwner]); + nvEvoStateStartNoLock(&pDevEvo->gpus[pDispEvo->displayOwner]); + } +} + +void nvEvoLockStatePreModeset(NVDevEvoPtr pDevEvo) +{ + DisableLockState(pDevEvo); +} + +/*! + * Set up raster lock between GPUs, if applicable. + */ +void nvEvoLockStatePostModeset(NVDevEvoPtr pDevEvo, const NvBool doRasterLock) +{ + RasterLockGroup *pRasterLockGroups, *pRasterLockGroup; + unsigned int numRasterLockGroups; + + if (!doRasterLock) { + return; + } + + FinishModesetOneDev(pDevEvo); + + pRasterLockGroups = GetRasterLockGroups(pDevEvo, &numRasterLockGroups); + if (!pRasterLockGroups) { + return; + } + + for (pRasterLockGroup = pRasterLockGroups; + pRasterLockGroup < pRasterLockGroups + numRasterLockGroups; + pRasterLockGroup++) { + FinishModesetOneGroup(pRasterLockGroup); + } + + nvFree(pRasterLockGroups); +} + +/*! + * Updates the hardware based on software needs tracked in pDevEvo->sli.bridge. + * Call this function after changing any of those needs variables. + */ +void nvEvoUpdateSliVideoBridge(NVDevEvoPtr pDevEvo) +{ + NV0080_CTRL_GPU_SET_VIDLINK_PARAMS params = { 0 }; + const NvBool enable = pDevEvo->sli.bridge.powerNeededForRasterLock; + NvU32 status; + + if (pDevEvo->sli.bridge.powered == enable) { + return; + } + + if (enable) { + /* SLI should be prohibited earlier if no bridge is present. */ + nvAssert(pDevEvo->sli.bridge.present); + } + + params.enable = enable ? + NV0080_CTRL_GPU_SET_VIDLINK_ENABLE_TRUE : + NV0080_CTRL_GPU_SET_VIDLINK_ENABLE_FALSE; + + status = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_GPU_SET_VIDLINK, + ¶ms, sizeof(params)); + if (status != NV_OK) { + nvAssert(!"NV0080_CTRL_CMD_GPU_SET_VIDLINK failed"); + } + + pDevEvo->sli.bridge.powered = enable; +} + +/* + * Check if VRR or MergeMode are enabled; if so, go into the special "prohibit + * lock" mode which prevents other scanlock states from being reached. + * + * Return TRUE iff VRR or MergeMode is in use on this GPU. + */ +static NvBool ProhibitLockIfNecessary(NVDispEvoRec *pDispEvo) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + NvU32 activeHeads[NVKMS_MAX_HEADS_PER_DISP + 1] = { NV_INVALID_HEAD, }; + NvBool prohibitLock = FALSE; + NvU32 numActiveHeads = 0; + NvU32 head; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if (nvHeadIsActive(pDispEvo, head)) { + activeHeads[numActiveHeads++] = head; + if ((pDispEvo->headState[head].timings.vrr.type != + NVKMS_DPY_VRR_TYPE_NONE)) { + prohibitLock = TRUE; + } + + if (pDispEvo->headState[head].mergeMode != + NV_EVO_MERGE_MODE_DISABLED) { + prohibitLock = TRUE; + } + } + } + + + if (prohibitLock) { + activeHeads[numActiveHeads] = NV_INVALID_HEAD; + + SyncEvoLockState(); + + if (!pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, + NV_EVO_PROHIBIT_LOCK, + activeHeads)) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Failed to prohibit lock"); + return FALSE; + } + + UpdateEvoLockState(); + + return TRUE; + } + return FALSE; +} + + +/* + * Prohibit locking if necessary for the active configuration. + * + * Set up rasterlock between heads on a single GPU, if certain conditions are met: + * - Locking is not prohibited due to the active configuration + * - Opportunistic display sync is not disabled via kernel module parameter + * - All active heads have identical mode timings + * + * Set pDispEvo->pRasterLockPossible to indicate whether rasterlock is possible + * on this GPU, which will be used to determine if rasterlock is possible + * between this GPU and other GPUs. + * Note that this isn't the same as whether heads were locked: if fewer than + * two heads were active, heads will not be locked but rasterlock with other + * GPUs may still be possible. + */ +static void FinishModesetOneDisp( + NVDispEvoRec *pDispEvo) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev; + const NVDispHeadStateEvoRec *pPrevHeadState = NULL; + NvU32 head, usedHeads = 0; + NvU32 headsToLock[NVKMS_MAX_HEADS_PER_DISP + 1] = { NV_INVALID_HEAD, }; + + if (pDevEvo->gpus == NULL) { + return; + } + + pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + + pDispEvo->rasterLockPossible = FALSE; + + if (ProhibitLockIfNecessary(pDispEvo)) { + /* If all locking is prohibited, do not attempt rasterlock. */ + return; + } + + if (!nvkms_opportunistic_display_sync()) { + /* If opportunistic display sync is disabled, do not attempt rasterlock. */ + return; + } + + /* + * Determine if rasterlock is possible: check each active display for + * rasterlock compatibility with the previous one we looked at. If any of + * them aren't compatible, rasterlock is not possible. + */ + pDispEvo->rasterLockPossible = TRUE; + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + if (pPrevHeadState && + !RasterLockPossible(pHeadState, pPrevHeadState)) { + pDispEvo->rasterLockPossible = FALSE; + break; + } + + pPrevHeadState = pHeadState; + + headsToLock[usedHeads] = head; + usedHeads++; + } + + if (!pDispEvo->rasterLockPossible) { + return; + } + + if (usedHeads > 1) { + /* Terminate array */ + headsToLock[usedHeads] = NV_INVALID_HEAD; + + /* Initialize the assembly state */ + SyncEvoLockState(); + + /* Set up rasterlock between heads on this disp. */ + nvAssert(headsToLock[0] != NV_INVALID_HEAD); + if (!pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_HEADS, + headsToLock)) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Unable to lock heads"); + pDispEvo->rasterLockPossible = FALSE; + } + + /* Update the hardware with the new state */ + UpdateEvoLockState(); + } +} + +/* Call FinishModesetOneDisp() for each disp on this device to set up intra-GPU + * locking on each. */ +static void FinishModesetOneDev( + NVDevEvoRec *pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 sd; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + FinishModesetOneDisp(pDispEvo); + } +} + +/* + * Enable fliplock for the specified pLockGroup. + * This assumes that rasterlock was already enabled. + */ +static void EnableLockGroupFlipLock(NVLockGroup *pLockGroup) +{ + const RasterLockGroup *pRasterLockGroup = &pLockGroup->rasterLockGroup; + NvU32 i; + + if (pRasterLockGroup->numDisps < 2) { + /* TODO: enable fliplock for single GPUs */ + return; + } + + pLockGroup->flipLockEnabled = TRUE; + + for (i = 0; i < pRasterLockGroup->numDisps; i++) { + NVEvoUpdateState updateState = { }; + NVDispEvoPtr pDispEvo = pRasterLockGroup->pDispEvoOrder[i]; + NvU32 sd = pDispEvo->displayOwner; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + NvU64 startTime = 0; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + NVEvoLockPin pin = + nvEvoGetPinForSignal(pDispEvo, &pDevEvo->gpus[sd], + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + + /* Wait for the raster lock to sync in.. */ + if (pin == NV_EVO_LOCK_PIN_ERROR || + !EvoWaitForLock(pDevEvo, sd, head, EVO_RASTER_LOCK, + &startTime)) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Timed out waiting for rasterlock; not enabling fliplock."); + goto fail; + } + + /* + * Enable fliplock, and register that we've enabled + * fliplock for SLI to ensure it doesn't get disabled + * later. + */ + pDevEvo->gpus[sd].headControl[head].flipLockPin = pin; + pDevEvo->gpus[sd].flipLockPinSetForSliHeadMask = + HEAD_MASK_SET(pDevEvo->gpus[sd].flipLockPinSetForSliHeadMask, head); + + pDevEvo->gpus[sd].headControl[head].flipLock = TRUE; + pDevEvo->gpus[sd].flipLockEnabledForSliHeadMask = + HEAD_MASK_SET(pDevEvo->gpus[sd].flipLockEnabledForSliHeadMask, head); + + EvoUpdateHeadParams(pDispEvo, head, &updateState); + } + + /* + * This must be synchronous as EVO reports lock success if + * locking isn't enabled, so we could race through the + * WaitForLock check below otherwise. + */ + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, + TRUE /* releaseElv */); + + /* + * Wait for flip lock sync. I'm not sure this is really + * necessary, but the docs say to do this before attempting any + * flips in the base channel. + */ + for (head = 0; head < pDevEvo->numHeads; head++) { + NvU64 startTime = 0; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + if (!EvoWaitForLock(pDevEvo, sd, head, EVO_FLIP_LOCK, + &startTime)) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Timed out waiting for fliplock."); + goto fail; + } + } + } + + return; +fail: + DisableLockGroupFlipLock(pLockGroup); +} + +/* + * FinishModesetOneGroup() - Set up raster lock between GPUs, if applicable, + * for one RasterLockGroup. Called in a loop from nvFinishModesetEvo(). + */ + +static void FinishModesetOneGroup(RasterLockGroup *pRasterLockGroup) +{ + NVDispEvoPtr *pDispEvoOrder = pRasterLockGroup->pDispEvoOrder; + NvU32 numUsedGpus = 0; + const NVDispHeadStateEvoRec *pPrevHeadState = NULL; + NvBool headInUse[NVKMS_MAX_SUBDEVICES][NVKMS_MAX_HEADS_PER_DISP]; + NvBool rasterLockPossible = TRUE, foundUnused = FALSE; + unsigned int i, j; + NVLockGroup *pLockGroup = NULL; + + /* Don't attempt locking across GPUs if, on any individual GPU, rasterlock + * isn't possible. */ + for (i = 0; i < pRasterLockGroup->numDisps; i++) { + NVDispEvoPtr pDispEvo = pDispEvoOrder[i]; + + if (!pDispEvo->rasterLockPossible) { + return; + } + } + + nvkms_memset(headInUse, 0, sizeof(headInUse)); + + /* + * Next, figure out if we can perform cross-GPU locking and which + * GPUs/heads we can use. Only attempt locking if all heads across GPUs + * have compatible timings and are consecutive in the video bridge order. + */ + for (i = 0; i < pRasterLockGroup->numDisps; i++) { + NVDispEvoPtr pDispEvo = pDispEvoOrder[i]; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 head; + + /* + * We can't lock if there is an unused GPU between two used GPUs on the + * video bridge chain. + * We much check if pDevEvo->gpus is NULL in case we haven't been + * through AllocDeviceObject for this pDev (yet?). + */ + if (!HasActiveHeads(pDispEvo) || + !pDevEvo->gpus) { + foundUnused = TRUE; + continue; + } else { + if (foundUnused) { + rasterLockPossible = FALSE; + break; + } + + numUsedGpus++; + } + + /* + * Compare modetimings for each active display with the previous one we + * looked at. If any of them don't match, punt on locking. + */ + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + if (pPrevHeadState && + !RasterLockPossible(pHeadState, pPrevHeadState)) { + rasterLockPossible = FALSE; + goto exitHeadLoop; + } + + headInUse[i][head] = TRUE; + + pPrevHeadState = pHeadState; + } + +exitHeadLoop: + if (!rasterLockPossible) { + break; + } + } + + if (!rasterLockPossible || numUsedGpus == 0) { + return; + } + + /* Create a new lock group to store the current configuration */ + pLockGroup = nvCalloc(1, sizeof(*pLockGroup)); + + if (pLockGroup == NULL) { + return; + } + + pLockGroup->rasterLockGroup = *pRasterLockGroup; + + /* + * Finally, actually set up locking: go through the video bridge order + * setting it up. + */ + for (i = 0; i < pRasterLockGroup->numDisps; i++) { + NVDispEvoPtr pDispEvo = pDispEvoOrder[i]; + NvU32 sd = pDispEvo->displayOwner; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 head[NVKMS_MAX_HEADS_PER_DISP + 1] = { NV_INVALID_HEAD, }; + unsigned int usedHeads = 0; + NvBool gpusLocked = FALSE; + + /* Remember that we've enabled this lock group on this GPU. */ + nvAssert(pDispEvo->pLockGroup == NULL); + pDispEvo->pLockGroup = pLockGroup; + + /* If we're past the end of the chain, stop applying locking below, but + * continue this loop to assign pDispEvo->pLockGroup above. */ + if (i >= numUsedGpus) { + continue; + } + + /* Initialize the assembly state */ + SyncEvoLockState(); + + for (j = 0; j < NVKMS_MAX_HEADS_PER_DISP; j++) { + if (headInUse[i][j]) { + + head[usedHeads] = j; + + usedHeads++; + } + } + head[usedHeads] = NV_INVALID_HEAD; + + /* Then set up cross-GPU locking, if we have enough active GPUs */ + if (numUsedGpus > 1) { + NVEvoLockAction action; + NVEvoLockPin *pServerPin = &pDevEvo->gpus[sd].sliServerLockPin; + NVEvoLockPin *pClientPin = &pDevEvo->gpus[sd].sliClientLockPin; + + *pServerPin = NV_EVO_LOCK_PIN_ERROR; + *pClientPin = NV_EVO_LOCK_PIN_ERROR; + + if (i == 0) { + action = NV_EVO_ADD_SLI_PRIMARY; + } else { + if (i == (numUsedGpus - 1)) { + action = NV_EVO_ADD_SLI_LAST_SECONDARY; + } else { + action = NV_EVO_ADD_SLI_SECONDARY; + } + } + + if (action == NV_EVO_ADD_SLI_PRIMARY || + action == NV_EVO_ADD_SLI_SECONDARY) { + /* Find pin for server to next */ + NVDispEvoPtr pDispEvoNext = pDispEvoOrder[i + 1]; + NvU32 headNext = 0; + + for (j = 0; j < NVKMS_MAX_HEADS_PER_DISP; j++) { + if (headInUse[i + 1][j]) { + headNext = j; + break; + } + } + + GetRasterLockPin(pDispEvo, head[0], + pDispEvoNext, headNext, + pServerPin, NULL); + } + + if (action == NV_EVO_ADD_SLI_SECONDARY || + action == NV_EVO_ADD_SLI_LAST_SECONDARY) { + + /* Find pin for client to prev */ + NVDispEvoPtr pDispEvoPrev = pDispEvoOrder[i - 1]; + NvU32 headPrev = 0; + + for (j = 0; j < NVKMS_MAX_HEADS_PER_DISP; j++) { + if (headInUse[i - 1][j]) { + headPrev = j; + break; + } + } + + GetRasterLockPin(pDispEvo, head[0], + pDispEvoPrev, headPrev, + NULL, pClientPin); + } + + if (!pDevEvo->gpus[sd].scanLockState(pDispEvo, &pDevEvo->gpus[sd], + action, head)) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Unable to set up SLI locking"); + } else { + gpusLocked = TRUE; + } + } + + /* + * On certain GPUs, we need to enable the video bridge (MIO pads) when + * enabling rasterlock. Note that we don't disable in this function, + * so if gpusLocked is true for any iteration of these loops, this bit + * will be on. + */ + if (gpusLocked && NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_RASTER_LOCK_NEEDS_MIO_POWER)) { + pDevEvo->sli.bridge.powerNeededForRasterLock = TRUE; + nvEvoUpdateSliVideoBridge(pDevEvo); + } + + /* If anything changed, update the hardware */ + if (gpusLocked) { + UpdateEvoLockState(); + } + } + + /* Enable fliplock, if we can */ + EnableFlipLockIfRequested(pLockGroup); +} + +/* + * Check if the given LockGroup matches the given FlipLockRequestedGroup. + * This is true if the flip lock heads match the currently-active + * heads on all pDispEvos. + */ +static NvBool CheckLockGroupMatchFlipLockRequestedGroup( + const NVLockGroup *pLockGroup, + const FlipLockRequestedGroup *pFLRG) +{ + const RasterLockGroup *pRasterLockGroup = &pLockGroup->rasterLockGroup; + NvU32 disp, requestedDisp; + + /* Verify the number of disps is the same. */ + NvU32 numRequestedDisps = 0; + for (requestedDisp = 0; + requestedDisp < ARRAY_LEN(pFLRG->disp); + requestedDisp++) { + const NVDispEvoRec *pRequestedDispEvo = + pFLRG->disp[requestedDisp].pDispEvo; + if (pRequestedDispEvo == NULL) { + break; + } + numRequestedDisps++; + } + if (numRequestedDisps != pRasterLockGroup->numDisps) { + return FALSE; + } + + /* + * For each disp in the rasterlock group: + * - If there is no matching disp in the pFLRG, no match + * - If the disp's active head mask doesn't match the pFLRG's requested + * head mask for that disp, no match + * If none of the conditions above failed, then we have a match. + */ + for (disp = 0; disp < pRasterLockGroup->numDisps; disp++) { + const NVDispEvoRec *pDispEvo = pRasterLockGroup->pDispEvoOrder[disp]; + NvBool found = FALSE; + for (requestedDisp = 0; + requestedDisp < ARRAY_LEN(pFLRG->disp); + requestedDisp++) { + const NVDispEvoRec *pRequestedDispEvo = + pFLRG->disp[requestedDisp].pDispEvo; + if (pRequestedDispEvo == NULL) { + break; + } + if (pRequestedDispEvo == pDispEvo) { + if (pFLRG->disp[requestedDisp].flipLockHeads != + nvGetActiveHeadMask(pDispEvo)) { + return FALSE; + } + found = TRUE; + break; + } + } + if (!found) { + return FALSE; + } + } + + return TRUE; +} + +/* + * Check if any requested fliplock groups match this lockgroup; if so, enable + * fliplock on the lockgroup. + */ +static void EnableFlipLockIfRequested(NVLockGroup *pLockGroup) +{ + FlipLockRequestedGroup *pFLRG; + nvListForEachEntry(pFLRG, &requestedFlipLockGroups, listEntry) { + if (CheckLockGroupMatchFlipLockRequestedGroup(pLockGroup, pFLRG)) { + EnableLockGroupFlipLock(pLockGroup); + break; + } + } +} + +/* + * Check if there is an active NVLockGroup that matches the given + * FlipLockRequestedGroup. + * "Matches" means that the NVLockGroup extends to the exact same GPUs as the + * FlipLockRequestedGroup, and that the *active* heads on those GPUs exactly + * match the heads requested in the FlipLockRequestedGroup. + */ +static NVLockGroup *FindMatchingLockGroup(const FlipLockRequestedGroup *pFLRG) +{ + /* If there is an active lock group that matches this pFLRG, it must also + * be active on the first disp, so we don't need to bother looping over + * all disps. */ + NVLockGroup *pLockGroup = pFLRG->disp[0].pDispEvo->pLockGroup; + + if (pLockGroup != NULL && + CheckLockGroupMatchFlipLockRequestedGroup(pLockGroup, pFLRG)) { + return pLockGroup; + } + return NULL; +} + +/* Disable any currently-active lock groups that match the given pFLRG */ +static void +DisableRequestedFlipLockGroup(const FlipLockRequestedGroup *pFLRG) +{ + NVLockGroup *pLockGroup = FindMatchingLockGroup(pFLRG); + if (pLockGroup != NULL) { + DisableLockGroupFlipLock(pLockGroup); + + nvAssert(!pLockGroup->flipLockEnabled); + } +} + +/* + * Check if there is a currently-active rasterlock group that matches the + * disps/heads of this FlipLockRequestedGroup. If so, enable flip lock between + * those heads. + */ +static void +EnableRequestedFlipLockGroup(const FlipLockRequestedGroup *pFLRG) +{ + NVLockGroup *pLockGroup = FindMatchingLockGroup(pFLRG); + if (pLockGroup != NULL) { + EnableLockGroupFlipLock(pLockGroup); + } +} + +/* + * Convert the given API head mask to a HW head mask, using the + * currently-active API head->HW head mapping. + */ +static NvU32 ApiHeadMaskToHwHeadMask( + const NVDispEvoRec *pDispEvo, + const NvU32 apiHeadMask) +{ + const NvU32 numApiHeads = pDispEvo->pDevEvo->numApiHeads; + NvU32 apiHead; + NvU32 hwHeadMask = 0; + + for (apiHead = 0; apiHead < numApiHeads; apiHead++) { + if ((apiHeadMask & (1 << apiHead)) != 0) { + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + if (nvApiHeadIsActive(pDispEvo, apiHead)) { + hwHeadMask |= pApiHeadState->hwHeadsMask; + } + } + } + + return hwHeadMask; +} + +/* + * Return true if all main channels are idle on the heads specified in the + * FlipLockRequestedGroup. + */ +static NvBool CheckFlipLockGroupIdle( + const FlipLockRequestedGroup *pFLRG) +{ + NvU32 i; + + for (i = 0; i < ARRAY_LEN(pFLRG->disp); i++) { + NVDispEvoPtr pDispEvo = pFLRG->disp[i].pDispEvo; + if (pDispEvo != NULL) { + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + const NvU32 numHeads = pDevEvo->numHeads; + NvU32 head; + + for (head = 0; head < numHeads; head++) { + NvBool isMethodPending; + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + if (!pDevEvo->hal->IsChannelMethodPending( + pDevEvo, + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER], + sd, + &isMethodPending) || isMethodPending) { + return FALSE; + } + } + } + } + + return TRUE; +} + +/* + * Return true if all main channels are idle on each head in overlapping flip + * lock groups. + */ +static NvBool CheckOverlappingFlipLockRequestGroupsIdle( + NVDevEvoRec *pDevEvo[NV_MAX_SUBDEVICES], + const struct NvKmsSetFlipLockGroupRequest *pRequest) +{ + NvU32 dev; + + /* Loop over the GPUs specified in this FlipLockGroupRequest */ + for (dev = 0; dev < NV_MAX_SUBDEVICES && pDevEvo[dev] != NULL; dev++) { + NVDispEvoPtr pDispEvo; + NvU32 sd; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo[dev]) { + FlipLockRequestedGroup *pFLRG; + + if ((pRequest->dev[dev].requestedDispsBitMask & (1 << sd)) == 0) { + continue; + } + + /* + * For each specified GPU, search through existing requested + * fliplock groups and find any that overlap with heads in this + * request. + * + * Return FALSE if any overlapping fliplock groups are not idle. + */ + nvListForEachEntry(pFLRG, &requestedFlipLockGroups, listEntry) { + NvU32 i; + for (i = 0; i < ARRAY_LEN(pFLRG->disp); i++) { + if (pFLRG->disp[i].pDispEvo == NULL) { + break; + } + if (pFLRG->disp[i].pDispEvo == pDispEvo) { + /* API heads requested for this disp by the client */ + const NvU32 requestedApiHeadMask = + pRequest->dev[dev].disp[sd].requestedHeadsBitMask; + const NvU32 requestedHwHeadMask = + ApiHeadMaskToHwHeadMask(pDispEvo, requestedApiHeadMask); + + if ((requestedHwHeadMask & + pFLRG->disp[i].flipLockHeads) != 0) { + /* Match */ + if (!CheckFlipLockGroupIdle(pFLRG)) { + return FALSE; + } + } + break; + } + } + } + } + } + + return TRUE; +} + +/* + * Disable and remove any FlipLockRequestGroups that contain any of the heads + * in 'hwHeadsMask' on the given pDispEvo. + */ +static void +RemoveOverlappingFlipLockRequestGroupsOneDisp( + NVDispEvoRec *pDispEvo, + NvU32 hwHeadMask) +{ + FlipLockRequestedGroup *pFLRG, *tmp; + + /* + * For each specified GPU, search through existing requested + * fliplock groups and find any that overlap with heads in this + * request. + * + * For any that are found, disable fliplock and remove the + * requested flip lock group. + */ + nvListForEachEntry_safe(pFLRG, tmp, &requestedFlipLockGroups, listEntry) { + NvU32 i; + + for (i = 0; i < ARRAY_LEN(pFLRG->disp); i++) { + if (pFLRG->disp[i].pDispEvo == NULL) { + break; + } + if (pFLRG->disp[i].pDispEvo == pDispEvo) { + + if ((hwHeadMask & + pFLRG->disp[i].flipLockHeads) != 0) { + /* Match */ + DisableRequestedFlipLockGroup(pFLRG); + + /* Remove from global list */ + nvListDel(&pFLRG->listEntry); + nvFree(pFLRG); + } + break; + } + } + } +} + +/* + * Disable and remove any FlipLockRequestGroups that contain any of the heads + * specified in 'pRequest'. + */ +static void +RemoveOverlappingFlipLockRequestGroups( + NVDevEvoRec *pDevEvo[NV_MAX_SUBDEVICES], + const struct NvKmsSetFlipLockGroupRequest *pRequest) +{ + NvU32 dev; + + /* Loop over the GPUs specified in this FlipLockGroupRequest */ + for (dev = 0; dev < NV_MAX_SUBDEVICES && pDevEvo[dev] != NULL; dev++) { + NVDispEvoPtr pDispEvo; + NvU32 sd; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo[dev]) { + NvU32 requestedApiHeadMask, requestedHwHeadMask; + + if ((pRequest->dev[dev].requestedDispsBitMask & (1 << sd)) == 0) { + continue; + } + + /* API heads requested for this disp by the client */ + requestedApiHeadMask = + pRequest->dev[dev].disp[sd].requestedHeadsBitMask; + requestedHwHeadMask = + ApiHeadMaskToHwHeadMask(pDispEvo, requestedApiHeadMask); + + RemoveOverlappingFlipLockRequestGroupsOneDisp(pDispEvo, + requestedHwHeadMask); + } + } +} + +/* + * Disable and remove any FlipLockRequestGroups that contain any of the heads + * specified in 'pRequest'. + */ +void nvEvoRemoveOverlappingFlipLockRequestGroupsForModeset( + NVDevEvoPtr pDevEvo, + const struct NvKmsSetModeRequest *pRequest) +{ + NVDispEvoPtr pDispEvo; + NvU32 sd; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 requestedApiHeadMask, requestedHwHeadMask; + + if ((pRequest->requestedDispsBitMask & (1 << sd)) == 0) { + continue; + } + + /* API heads requested for this disp by the client */ + requestedApiHeadMask = + pRequest->disp[sd].requestedHeadsBitMask; + requestedHwHeadMask = + ApiHeadMaskToHwHeadMask(pDispEvo, requestedApiHeadMask); + + RemoveOverlappingFlipLockRequestGroupsOneDisp(pDispEvo, + requestedHwHeadMask); + } +} + +/*! + * Handle a NVKMS_IOCTL_SET_FLIPLOCK_GROUP request. This assumes that the + * request was already validated by nvkms.c:SetFlipLockGroup(). + * + * param[in] pDevEvo Array of NVDevEvoPtr pointers, in the same order as + * the deviceHandle were specified in the request. + * param[in] pRequest The ioctl request. + */ +NvBool +nvSetFlipLockGroup(NVDevEvoRec *pDevEvo[NV_MAX_SUBDEVICES], + const struct NvKmsSetFlipLockGroupRequest *pRequest) +{ + FlipLockRequestedGroup *pFLRG = NULL; + + /* Construct the new RequestedFlipLockGroup first, so if it fails we can + * return before removing overlapping groups. */ + if (pRequest->enable) { + NvU32 dev, disp; + + pFLRG = nvCalloc(1, sizeof(*pFLRG)); + if (pFLRG == NULL) { + goto fail; + } + + disp = 0; + for (dev = 0; dev < NV_MAX_SUBDEVICES && pDevEvo[dev] != NULL; dev++) { + NVDispEvoPtr pDispEvo; + NvU32 sd; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo[dev]) { + const NvU32 requestedApiHeads = + pRequest->dev[dev].disp[sd].requestedHeadsBitMask; + + if ((pRequest->dev[dev].requestedDispsBitMask & (1 << sd)) == 0) { + continue; + } + + if (disp >= ARRAY_LEN(pFLRG->disp)) { + nvAssert(!"FlipLockRequestedGroup::disp too short?"); + goto fail; + } + + pFLRG->disp[disp].pDispEvo = pDispEvo; + pFLRG->disp[disp].flipLockHeads = + ApiHeadMaskToHwHeadMask(pDispEvo, requestedApiHeads); + disp++; + } + } + + if (!CheckFlipLockGroupIdle(pFLRG)) { + nvEvoLogDebug(EVO_LOG_ERROR, + "Failed to request flip lock: group not idle"); + goto fail; + } + } + + if (!CheckOverlappingFlipLockRequestGroupsIdle(pDevEvo, pRequest)) { + nvEvoLogDebug(EVO_LOG_ERROR, + "Failed to request flip lock: overlapping group(s) not idle"); + goto fail; + } + + RemoveOverlappingFlipLockRequestGroups(pDevEvo, pRequest); + + if (pFLRG) { + nvListAdd(&pFLRG->listEntry, &requestedFlipLockGroups); + + EnableRequestedFlipLockGroup(pFLRG); + } + + return TRUE; + +fail: + nvFree(pFLRG); + return FALSE; +} + +NvBool nvSetUsageBoundsEvo( + NVDevEvoPtr pDevEvo, + const NvU32 sd, + const NvU32 head, + const struct NvKmsUsageBounds *pUsage, + NVEvoUpdateState *updateState) +{ + NvBool needCoreUpdate; + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + + needCoreUpdate = pDevEvo->hal->SetUsageBounds(pDevEvo, sd, head, pUsage, + updateState); + + nvPopEvoSubDevMask(pDevEvo); + + pDevEvo->gpus[sd].headState[head].usage = *pUsage; + + return needCoreUpdate; +} + +void nvEnableMidFrameAndDWCFWatermark(NVDevEvoPtr pDevEvo, + NvU32 sd, + NvU32 head, + NvBool enable, + NVEvoUpdateState *pUpdateState) +{ + pDevEvo->gpus[sd].headState[head]. + disableMidFrameAndDWCFWatermark = !enable; + + if (pDevEvo->hal->EnableMidFrameAndDWCFWatermark == NULL) { + nvEvoLogDev(pDevEvo, + EVO_LOG_ERROR, + "EnableMidFrameAndDWCFWatermark() is not defined"); + return; + } + + pDevEvo->hal->EnableMidFrameAndDWCFWatermark(pDevEvo, + sd, + head, + enable, + pUpdateState); +} + +static enum NvKmsDpyAttributeColorBpcValue GetMinRequiredBpc( + enum NvKmsOutputColorimetry colorimetry) +{ + // >= 8 BPC required for HDR + // XXX HDR TODO: Handle other colorimetries + return (colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) ? + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8 : + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6; +} + +static NvBool GetDefaultColorSpace( + const NvKmsDpyOutputColorFormatInfo *pColorFormatsInfo, + enum NvKmsDpyAttributeCurrentColorSpaceValue *pColorSpace, + enum NvKmsDpyAttributeColorBpcValue *pColorBpc, + const enum NvKmsDpyAttributeColorBpcValue minRequiredBpc) +{ + nvAssert(minRequiredBpc != + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN); + + if (pColorFormatsInfo->rgb444.maxBpc >= minRequiredBpc) { + *pColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB; + *pColorBpc = pColorFormatsInfo->rgb444.maxBpc; + return TRUE; + } + + if (pColorFormatsInfo->yuv444.maxBpc >= minRequiredBpc) { + *pColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444; + *pColorBpc = pColorFormatsInfo->yuv444.maxBpc; + return TRUE; + } + + if (pColorFormatsInfo->yuv422.maxBpc >= minRequiredBpc) { + *pColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422; + *pColorBpc = pColorFormatsInfo->yuv422.maxBpc; + return TRUE; + } + + return FALSE; +} + +NvBool nvGetDefaultDpyColor( + const NvKmsDpyOutputColorFormatInfo *pColorFormatsInfo, + NVDpyAttributeColor *pDpyColor) +{ + nvkms_memset(pDpyColor, 0, sizeof(*pDpyColor)); + + if (!GetDefaultColorSpace(pColorFormatsInfo, + &pDpyColor->format, + &pDpyColor->bpc, + GetMinRequiredBpc(pDpyColor->colorimetry))) { + return FALSE; + } + + if (pDpyColor->format != NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB) { + pDpyColor->range = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED; + } else { + pDpyColor->range = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL; + } + + pDpyColor->colorimetry = NVKMS_OUTPUT_COLORIMETRY_DEFAULT; + + return TRUE; +} + +NvBool nvChooseColorRangeEvo( + const enum NvKmsDpyAttributeColorRangeValue requestedColorRange, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace, + const enum NvKmsDpyAttributeColorBpcValue colorBpc, + enum NvKmsDpyAttributeColorRangeValue *pColorRange) +{ + /* Hardware supports BPC_6 only for RGB */ + nvAssert((colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB) || + (colorBpc != NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6)); + + if ((colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB) && + (colorBpc == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6)) { + /* At depth 18 only RGB and full range are allowed */ + *pColorRange = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL; + } else if ((colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444) || + (colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422) || + (colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420)) { + /* YUV requires limited color range. */ + *pColorRange = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED; + } else { + *pColorRange = requestedColorRange; + } + + return TRUE; +} + +static enum NvKmsDpyAttributeColorBpcValue ChooseColorBpc( + const enum NvKmsDpyAttributeColorBpcValue requested, + const enum NvKmsDpyAttributeColorBpcValue max, + const enum NvKmsDpyAttributeColorBpcValue min) +{ + if ((requested == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN) || + (requested > max)) { + return max; + } + + if (requested < min) { + return min; + } + + return requested; +} + +/*! + * Choose current colorSpace and colorRange for the given dpy based on + * the dpy's color format capailities, the given modeset parameters (YUV420 + * mode and output transfer function) and the requested color space and range. + * + * This needs to be called during a modeset as well as when the requested color + * space or range have changed. + * + * If SW YUV420 mode is enabled, EVO HW is programmed with default (RGB color + * space, FULL color range) values, and the real values are used in a + * headSurface composite shader. + */ +NvBool nvChooseCurrentColorSpaceAndRangeEvo( + const NVDpyEvoRec *pDpyEvo, + const enum NvYuv420Mode yuv420Mode, + enum NvKmsOutputColorimetry colorimetry, + const enum NvKmsDpyAttributeRequestedColorSpaceValue requestedColorSpace, + const enum NvKmsDpyAttributeColorBpcValue requestedColorBpc, + const enum NvKmsDpyAttributeColorRangeValue requestedColorRange, + enum NvKmsDpyAttributeCurrentColorSpaceValue *pCurrentColorSpace, + enum NvKmsDpyAttributeColorBpcValue *pCurrentColorBpc, + enum NvKmsDpyAttributeColorRangeValue *pCurrentColorRange) +{ + enum NvKmsDpyAttributeCurrentColorSpaceValue newColorSpace = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB; + enum NvKmsDpyAttributeColorBpcValue newColorBpc = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10; + enum NvKmsDpyAttributeColorRangeValue newColorRange = + NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL; + const NvKmsDpyOutputColorFormatInfo colorFormatsInfo = + nvDpyGetOutputColorFormatInfo(pDpyEvo); + const enum NvKmsDpyAttributeColorBpcValue minRequiredBpc = + GetMinRequiredBpc(colorimetry); + + if (yuv420Mode != NV_YUV420_MODE_NONE) { + // XXX HDR TODO: Support YUV420 + HDR + // XXX HDR TODO: Handle other colorimetries + if (colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) { + return FALSE; + } + + /* + * If the current mode timing requires YUV420 compression, we override the + * requested color space with YUV420. + */ + newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420; + newColorBpc = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8; + + nvAssert(colorFormatsInfo.rgb444.maxBpc >= + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8); + } else if ((colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) && + !pDpyEvo->pDispEvo->pDevEvo->caps.supportsYUV2020) { + newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB; + newColorBpc = ChooseColorBpc(requestedColorBpc, + colorFormatsInfo.rgb444.maxBpc, + colorFormatsInfo.rgb444.minBpc); + } else { + /* + * Note this is an assignment between different enum types. Checking the + * value of requested colorSpace and then assigning the value to current + * colorSpace, to avoid warnings about cross-enum assignment. + */ + switch (requestedColorSpace) { + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_RGB: + newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB; + newColorBpc = ChooseColorBpc(requestedColorBpc, + colorFormatsInfo.rgb444.maxBpc, + colorFormatsInfo.rgb444.minBpc); + break; + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr422: + newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422; + newColorBpc = ChooseColorBpc(requestedColorBpc, + colorFormatsInfo.yuv422.maxBpc, + colorFormatsInfo.yuv422.minBpc); + break; + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr444: + newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444; + newColorBpc = ChooseColorBpc(requestedColorBpc, + colorFormatsInfo.yuv444.maxBpc, + colorFormatsInfo.yuv444.minBpc); + break; + default: + nvAssert(!"Invalid Requested ColorSpace"); + } + + if ((newColorBpc < minRequiredBpc) && + !GetDefaultColorSpace(&colorFormatsInfo, &newColorSpace, + &newColorBpc, minRequiredBpc)) { + return FALSE; + } + } + + if (newColorBpc < minRequiredBpc) { + return FALSE; + } + + if (!nvChooseColorRangeEvo(requestedColorRange, newColorSpace, + newColorBpc, &newColorRange)) { + } + + *pCurrentColorSpace = newColorSpace; + *pCurrentColorRange = newColorRange; + *pCurrentColorBpc = newColorBpc; + + return TRUE; +} + +void nvUpdateCurrentHardwareColorSpaceAndRangeEvo( + NVDispEvoPtr pDispEvo, + const NvU32 head, + const NVDpyAttributeColor *pDpyColor, + NVEvoUpdateState *pUpdateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVConnectorEvoRec *pConnectorEvo = pHeadState->pConnectorEvo; + + nvAssert(pConnectorEvo != NULL); + + if ((pHeadState->timings.yuv420Mode == NV_YUV420_MODE_SW) && + (pDpyColor->format == + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420)) { + // XXX HDR TODO: Support SW YUV420 + HDR Output + nvAssert(pDpyColor->colorimetry != NVKMS_OUTPUT_COLORIMETRY_BT2100); + + /* + * In SW YUV420 mode, HW is programmed with RGB color space and full + * color range. The color space conversion and color range compression + * happen in a headSurface composite shader. + */ + pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_RGB; + pHeadState->procAmp.colorRange = NVT_COLOR_RANGE_FULL; + pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_RGB; + } else { + + // Set default colorimetry to RGB and default color range to full + pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_RGB; + pHeadState->procAmp.colorRange = NVT_COLOR_RANGE_FULL; + + // Set color format + switch (pDpyColor->format) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_RGB; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_YCbCr444; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_YCbCr422; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420: + pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_YCbCr420; + break; + default: + nvAssert(!"unrecognized colorSpace"); + } + + switch (pConnectorEvo->legacyType) { + case NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP: + // program HW with RGB/YCbCr + switch (pDpyColor->format) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + if (pDpyColor->colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) { + pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_BT2020RGB; + } else { + pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_RGB; + } + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420: + if (pDpyColor->colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) { + pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_BT2020YCC; + } else if (nvEvoIsHDQualityVideoTimings(&pHeadState->timings)) { + pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_YUV_709; + } else { + pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_YUV_601; + } + break; + default: + nvAssert(!"unrecognized colorSpace"); + } + break; + case NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT: + // colorSpace isn't used for DEVICE_TYPE_CRT and + // hence should be set to the "unchanged" value + // (i.e. the default - RGB) + nvAssert(pDpyColor->format == + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB); + nvAssert(pDpyColor->colorimetry != NVKMS_OUTPUT_COLORIMETRY_BT2100); + + // program HW with RGB only + pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_RGB; + break; + default: + nvAssert(!"ERROR: invalid pDpyEvo->type"); + } + + /* YCbCr444 should be advertise only for DisplayPort and HDMI */ + nvAssert((pDpyColor->format != + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444) || + nvConnectorUsesDPLib(pConnectorEvo) || + pConnectorEvo->isHdmiEnabled); + + /* YcbCr422 should be advertised only for HDMI and DP on supported GPUs */ + nvAssert((pDpyColor->format != + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422) || + (((pDevEvo->caps.hdmiYCbCr422MaxBpc != 0) && + pConnectorEvo->isHdmiEnabled)) || + ((pDevEvo->caps.dpYCbCr422MaxBpc != 0) && + nvConnectorUsesDPLib(pConnectorEvo))); + + switch (pDpyColor->range) { + case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL: + pHeadState->procAmp.colorRange = NVT_COLOR_RANGE_FULL; + break; + case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED: + pHeadState->procAmp.colorRange = NVT_COLOR_RANGE_LIMITED; + break; + default: + nvAssert(!"Invalid colorRange"); + break; + } + } + + // Full color range is only allowed with RGB color format. + nvAssert((pHeadState->procAmp.colorFormat == NVT_COLOR_FORMAT_RGB) || + (pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_LIMITED)); + + // Limited color range is not allowed with 18bpp mode + nvAssert(!((pHeadState->pixelDepth == NVKMS_PIXEL_DEPTH_18_444) && + (pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_LIMITED))); + + nvPushEvoSubDevMaskDisp(pDispEvo); + + // Set the procamp head method + pDevEvo->hal->SetProcAmp(pDispEvo, head, pUpdateState); + + // Clean up + nvPopEvoSubDevMask(pDevEvo); +} + +void nvEvoHeadSetControlOR(NVDispEvoPtr pDispEvo, + const NvU32 head, + const NVDpyAttributeColor *pDpyColor, + NVEvoUpdateState *pUpdateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NVDispHeadStateEvoPtr pHeadState = &pDispEvo->headState[head]; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + const enum nvKmsPixelDepth pixelDepth = pHeadState->pixelDepth; + NvBool colorSpaceOverride = FALSE; + + nvAssert(pHeadState->pixelDepth == nvEvoDpyColorToPixelDepth(pDpyColor)); + + /* + * Determine whether or not this dpy will need its color space + * overridden. + * + * This is currently only used for DP 1.3 YUV420 mode or BT2100 colorimetry, + * where the HW's normal support for carrying color space information + * together with the frame is insufficient. + */ + if (((pTimings->yuv420Mode == NV_YUV420_MODE_SW) || + (pDpyColor->colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100)) && + nvConnectorUsesDPLib(pHeadState->pConnectorEvo)) { + + nvAssert(pDispEvo->pDevEvo->caps.supportsDP13); + colorSpaceOverride = TRUE; + } + + // Only set up the actual output for SLI primary. + nvPushEvoSubDevMask(pDevEvo, 1 << pDispEvo->displayOwner); + + pDevEvo->hal->HeadSetControlOR(pDevEvo, head, pTimings, pixelDepth, + colorSpaceOverride, + pUpdateState); + + nvPopEvoSubDevMask(pDevEvo); +} + +static const struct { + NvU32 algo; + enum NvKmsDpyAttributeCurrentDitheringModeValue nvKmsDitherMode; +} ditherModeTable[] = { + { NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_2X2, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_DYNAMIC_2X2 }, + { NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_2X2, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_STATIC_2X2 }, + { NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_TEMPORAL, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_TEMPORAL }, + { NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_NONE } +}; + +static const struct { + NvU32 type; + enum NvKmsDpyAttributeCurrentDitheringDepthValue nvKmsDitherDepth; +} ditherDepthTable[] = { + { NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_6_BITS, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_6_BITS }, + { NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_8_BITS, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_8_BITS }, + { NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_NONE } +}; + +/*! + * Choose dithering based on the requested dithering config + * NVConnectorEvo::or::dither. + */ +void nvChooseDitheringEvo( + const NVConnectorEvoRec *pConnectorEvo, + enum NvKmsDpyAttributeColorBpcValue bpc, + enum NvKmsOutputColorimetry colorimetry, + const NVDpyAttributeRequestedDitheringConfig *pReqDithering, + NVDpyAttributeCurrentDitheringConfig *pCurrDithering) +{ + NvU32 i; + NVDpyAttributeCurrentDitheringConfig currDithering = { + .enabled = FALSE, + .mode = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_NONE, + .depth = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_NONE, + }; + + currDithering.enabled = (pConnectorEvo->or.ditherType != + NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF); + + for (i = 0; i < ARRAY_LEN(ditherDepthTable); i++) { + if (ditherDepthTable[i].type == pConnectorEvo->or.ditherType) { + currDithering.depth = ditherDepthTable[i].nvKmsDitherDepth; + break; + } + } + + for (i = 0; i < ARRAY_LEN(ditherModeTable); i++) { + if (ditherModeTable[i].algo == pConnectorEvo->or.ditherAlgo) { + currDithering.mode = ditherModeTable[i].nvKmsDitherMode; + break; + } + } + + switch (pReqDithering->state) { + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_ENABLED: + currDithering.enabled = TRUE; + break; + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DISABLED: + currDithering.enabled = FALSE; + break; + default: + nvAssert(!"Unknown Dithering configuration"); + // Fall through + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_AUTO: + /* + * Left it initialized + * based on value NVDpyEvoRec::or::dither::init::enabled. + */ + break; + } + + switch (pReqDithering->depth) { + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_6_BITS: + currDithering.depth = + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_6_BITS; + break; + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_8_BITS: + currDithering.depth = + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_8_BITS; + break; + default: + nvAssert(!"Unknown Dithering Depth"); + // Fall through + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_AUTO: + /* + * Left it initialized + * based on value NVDpyEvoRec::or::dither::init::type. + */ + break; + } + + + if (nvConnectorUsesDPLib(pConnectorEvo) && + (pReqDithering->state != + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DISABLED)) { + NvU32 lutBits = 11; + + /* If we are using DisplayPort panel with bandwidth constraints + * which lowers the color depth, consider that while applying + * dithering effects. + */ + if (bpc == 0) { + nvAssert(!"Unknown dpBits"); + bpc = 8; + } + + /* + * If fewer than 8 DP bits are available, dither. Ideally we'd + * dither from lutBits > 10 to 10 bpc, but EVO doesn't have an + * option for that. + * + * XXX TODO: nvdisplay can dither to 10 bpc. + */ + if ((bpc <= 8) && (lutBits > bpc)) { + if (pReqDithering->state == + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_AUTO) { + currDithering.enabled = TRUE; + } + } + + if (pReqDithering->depth == + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_AUTO) { + if (bpc <= 6) { + currDithering.depth = + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_6_BITS; + } else if (bpc <= 8) { + currDithering.depth = + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_8_BITS; + } + } + } + + // XXX HDR TODO: Handle other colorimetries + if ((colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) && + (pReqDithering->state != + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DISABLED)) { + + // GetMinRequiredBpc() enforces >= 8 BPC for HDR + nvAssert(bpc >= 8); + + /* + * If output has BT.2100 (HDR10) colorimetry but fewer than 10 bits of + * precision, dither to 8 BPC, or as requested. + */ + if (bpc < 10) { + currDithering.enabled = TRUE; + + if (pReqDithering->depth == + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_AUTO) { + currDithering.depth = + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_8_BITS; + } + } + } + + if (currDithering.enabled) { + switch (pReqDithering->mode) { + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_TEMPORAL: + currDithering.mode = + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_TEMPORAL; + break; + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_DYNAMIC_2X2: + currDithering.mode = + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_DYNAMIC_2X2; + break; + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_STATIC_2X2: + currDithering.mode = + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_STATIC_2X2; + break; + default: + nvAssert(!"Unknown Dithering Mode"); + // Fall through + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_AUTO: + /* + * Left it initialized + * based on value NVDpyEvoRec::or::dither::init::algo. + */ + break; + } + } else { + currDithering.depth = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_NONE; + currDithering.mode = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_NONE; + } + + *pCurrDithering = currDithering; +} + +void nvSetDitheringEvo( + NVDispEvoPtr pDispEvo, + const NvU32 head, + const NVDpyAttributeCurrentDitheringConfig *pCurrDithering, + NVEvoUpdateState *pUpdateState) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvU32 i; + NvU32 algo = NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN; + NvU32 type = NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF; + NvU32 enabled = pCurrDithering->enabled; + + for (i = 0; i < ARRAY_LEN(ditherModeTable); i++) { + if (ditherModeTable[i].nvKmsDitherMode == pCurrDithering->mode) { + algo = ditherModeTable[i].algo; + break; + } + } + nvAssert(i < ARRAY_LEN(ditherModeTable)); + + for (i = 0; i < ARRAY_LEN(ditherDepthTable); i++) { + if (ditherDepthTable[i].nvKmsDitherDepth == pCurrDithering->depth) { + type = ditherDepthTable[i].type; + break; + } + } + nvAssert(i < ARRAY_LEN(ditherDepthTable)); + + /* + * Make sure algo is a recognizable value that we will be able to program + * in hardware. + */ + if (algo == NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN) { + algo = NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_2X2; + } + + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetDither(pDispEvo, head, enabled, type, algo, + pUpdateState); + nvPopEvoSubDevMask(pDevEvo); +} + +/* + * HeadCanStereoLock() - Return whether or not this head can use stereo lock + * mode. This can only be called from UpdateEvoLockState, when the pending + * interlaced/locked values are still in the head control assembly structure. + */ +static NvBool HeadCanStereoLock(NVDevEvoPtr pDevEvo, int sd, int head) +{ + NVEvoHeadControlPtr pHC = &pDevEvo->gpus[sd].headControlAssy[head]; + + return (!pHC->interlaced && !pHC->mergeMode && + ((pHC->serverLock != NV_EVO_NO_LOCK) || + (pHC->clientLock != NV_EVO_NO_LOCK))); +} + +/* + * SetStereoLockMode() - For stereo lock mode, we need to notify + * the gsync board that this GPU requires stereo lock mode. + */ +static NvBool SetStereoLockMode(NVDispEvoPtr pDispEvo, NvBool stereoLocked) +{ + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE_PARAMS + statusParams = { 0 }; + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + + if (!pFrameLockEvo || + ((pFrameLockEvo->boardId != NV30F1_CTRL_GSYNC_GET_CAPS_BOARD_ID_P2060) && + (pFrameLockEvo->boardId != NV30F1_CTRL_GSYNC_GET_CAPS_BOARD_ID_P2061))) { + return TRUE; + } + + statusParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + statusParams.enable = stereoLocked ? 1 : 0; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE, + &statusParams, + sizeof(statusParams)) != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to set stereo lock mode"); + return FALSE; + } + + return TRUE; +} + +/* + * SyncEvoLockState() + * + * Set the Assembly state based on the current Armed state. This should be + * called before transitioning between states in the EVO state machine. + */ +static void SyncEvoLockState(void) +{ + NVDispEvoPtr pDispEvo; + unsigned int sd; + NVDevEvoPtr pDevEvo; + + FOR_ALL_EVO_DEVS(pDevEvo) { + + if (!pDevEvo->gpus) { + continue; + } + + if (pDevEvo->displayHandle == 0) { + continue; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NvU32 updateHeadMask = nvGetActiveHeadMask(pDispEvo); + unsigned int head; + + /* Update the cached HEAD_SET_CONTROL EVO method state */ + FOR_ALL_HEADS(head, updateHeadMask) { + pEvoSubDev->headControlAssy[head] = + pEvoSubDev->headControl[head]; + + /* + * The following are probably not necessary, since no other + * code touches them (as opposed to headControl above which + * is updated beyond the scope of the state machine). But + * update them here anyway to be consistent. + */ + pEvoSubDev->frameLockClientMaskAssy = + pEvoSubDev->frameLockClientMaskArmed; + pEvoSubDev->frameLockServerMaskAssy = + pEvoSubDev->frameLockServerMaskArmed; + pEvoSubDev->frameLockExtRefClkMaskAssy = + pEvoSubDev->frameLockExtRefClkMaskArmed; + } + } + } +} + +/* + * Determine a unique index for the given (pDevEvo, sd) tuple. + * This is used to index into an array of size NV_MAX_DEVICES. + * + * It would be more straightforward to use a two-dimensional array of + * NV_MAX_DEVICES x NV_MAX_SUBDEVICES and index by (devIndex, sd), but + * that makes the array too large to fit on the stack. This is safe because + * we should only ever have at most NV_MAX_DEVICES GPUs in the system + * total, although at any given time they may be split into many single-GPU + * device or a small number of many-GPU SLI devices. + */ +static NvU32 GpuIndex(const NVDevEvoRec *pDevEvo, NvU32 sd) +{ + const NVDevEvoRec *pDevEvoIter; + NvU32 index = 0; + + nvAssert(sd < pDevEvo->numSubDevices); + + FOR_ALL_EVO_DEVS(pDevEvoIter) { + if (pDevEvoIter == pDevEvo) { + index += sd; + nvAssert(index < NV_MAX_DEVICES); + return index; + } + index += pDevEvo->numSubDevices; + } + + nvAssert(!"Failed to look up GPU index"); + return 0; +} + +NvU32 nvGetRefreshRate10kHz(const NVHwModeTimingsEvo *pTimings) +{ + const NvU32 totalPixels = pTimings->rasterSize.x * pTimings->rasterSize.y; + + /* + * pTimings->pixelClock is in 1000/s + * we want 0.0001/s + * factor = 1000/0.0001 = 10000000. + */ + NvU32 factor = 10000000; + + if (pTimings->doubleScan) factor /= 2; + if (pTimings->interlaced) factor *= 2; + + if (totalPixels == 0) { + return 0; + } + + return axb_div_c(pTimings->pixelClock, factor, totalPixels); +} + +/*! + * Get the current refresh rate for the heads in headMask, in 0.0001 Hz units. + * All heads in headMask are expected to have the same refresh rate. + */ +static NvU32 GetRefreshRateHeadMask10kHz(const NVDispEvoRec *pDispEvo, + NvU32 headMask) +{ + const NVHwModeTimingsEvo *pTimings = NULL; + NvU32 head; + + FOR_ALL_HEADS(head, headMask) { + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + + if (head >= pDispEvo->pDevEvo->numHeads && + pHeadState->activeRmId == 0x0) { + continue; + } + + if (pTimings == NULL) { + pTimings = &pHeadState->timings; + } else { + nvAssert(pTimings->rasterSize.x == + pHeadState->timings.rasterSize.x); + nvAssert(pTimings->rasterSize.y == + pHeadState->timings.rasterSize.y); + nvAssert(pTimings->doubleScan == pHeadState->timings.doubleScan); + nvAssert(pTimings->interlaced == pHeadState->timings.interlaced); + nvAssert(pTimings->pixelClock == pHeadState->timings.pixelClock); + } + } + + if (pTimings == NULL) { + return 0; + } + + return nvGetRefreshRate10kHz(pTimings); +} + +/*! + * Return a the mask of RmIds from the heads mask. + */ +static NvU32 HeadMaskToActiveRmIdMask(const NVDispEvoRec *pDispEvo, + const NvU32 headMask) +{ + NvU32 head; + NvU32 rmDisplayMask = 0; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if ((NVBIT(head) & headMask) != 0x0) { + rmDisplayMask |= + pDispEvo->headState[head].activeRmId; + } + } + + return rmDisplayMask; +} + +static NvBool FramelockSetControlSync(NVDispEvoPtr pDispEvo, const NvU32 headMask, + NvBool server) +{ + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_PARAMS gsyncSetControlSyncParams = { 0 }; + NvU32 ret; + + /* There can only be one server. */ + + nvAssert(!server || (nvPopCount32(headMask) == 1)); + + gsyncSetControlSyncParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + gsyncSetControlSyncParams.master = server; + gsyncSetControlSyncParams.displays = + HeadMaskToActiveRmIdMask(pDispEvo, headMask); + + if (gsyncSetControlSyncParams.displays == 0x0) { + return FALSE; + } + + gsyncSetControlSyncParams.refresh = + GetRefreshRateHeadMask10kHz(pDispEvo, headMask); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_SYNC, + &gsyncSetControlSyncParams, + sizeof(gsyncSetControlSyncParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + return TRUE; +} + +NvBool nvFramelockSetControlUnsyncEvo(NVDispEvoPtr pDispEvo, const NvU32 headMask, + NvBool server) +{ + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + NV30F1_CTRL_GSYNC_SET_CONTROL_UNSYNC_PARAMS + gsyncSetControlUnsyncParams = { 0 }; + NvU32 ret; + + gsyncSetControlUnsyncParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + gsyncSetControlUnsyncParams.master = server; + gsyncSetControlUnsyncParams.displays = + HeadMaskToActiveRmIdMask(pDispEvo, headMask); + + if (gsyncSetControlUnsyncParams.displays == 0x0) { + return FALSE; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_UNSYNC, + &gsyncSetControlUnsyncParams, + sizeof(gsyncSetControlUnsyncParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + return TRUE; +} + +/* + * UpdateEvoLockState() + * + * Update the hardware based on the Assembly state, if it is different from the + * current Armed state. This should be called after transitioning through + * states in the EVO state machine to propagate all of the necessary values to + * HW. + */ +static void UpdateEvoLockState(void) +{ + NVDispEvoPtr pDispEvo; + NVFrameLockEvoPtr pFrameLockEvo; + unsigned int sd; + NVDevEvoPtr pDevEvo; + NvBool ret; + enum { + FIRST_ITERATION, + DISABLE_UNNEEDED_CLIENTS = FIRST_ITERATION, + DISABLE_UNNEEDED_SERVER, + COMPUTE_HOUSE_SYNC, + UPDATE_HOUSE_SYNC, + ENABLE_SERVER, + ENABLE_CLIENTS, + LAST_ITERATION = ENABLE_CLIENTS, + } iteration; + struct { + unsigned char disableServer:1; + unsigned char disableClient:1; + unsigned char enableServer:1; + unsigned char enableClient:1; + } cache[NV_MAX_DEVICES][NVKMS_MAX_HEADS_PER_DISP]; + + nvkms_memset(cache, 0, sizeof(cache)); + + /* XXX NVKMS TODO: idle base channel, first? */ + + /* + * Stereo lock mode is enabled if all heads are either raster locked or + * frame locked, and if all heads are not using interlaced mode. + */ + FOR_ALL_EVO_DEVS(pDevEvo) { + if (!pDevEvo->gpus) { + continue; + } + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvBool gpuCanStereoLock = TRUE; + NvBool testedOneHead = FALSE; + + /* + * If at least one head is not locked or driving an interlaced + * mode, then no heads on this GPU will use stereo lock mode. + */ + NvU32 head; + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + NVEvoHeadControlPtr pHC = &pDevEvo->gpus[sd].headControlAssy[head]; + + if (!nvHeadIsActive(pDispEvo, head) || + ((pHC->serverLock == NV_EVO_NO_LOCK) && + (pHC->clientLock == NV_EVO_NO_LOCK))) { + /* + * If the heads aren't scan locked then we should skip + * them as if they aren't connected. NOTE this + * conservative approach means that we won't disable + * StereoLockMode when frameLock is turned off. This + * should be harmless. + */ + continue; + } + testedOneHead = TRUE; + if (!HeadCanStereoLock(pDevEvo, sd, head)) { + gpuCanStereoLock = FALSE; + } + } + /* + * Don't set StereoLockMode for screenless GPUs. As above we'll also + * count heads that can't stereoLock as unconnected. + */ + if (!testedOneHead) { + continue; + } + + /* + * Notify the framelock board whether or not we we will use stereo + * lock mode. If it failed, then don't enable stereo lock mode on + * the GPU. + */ + if (!SetStereoLockMode(pDispEvo, gpuCanStereoLock)) { + gpuCanStereoLock = FALSE; + } + + /* + * Cache whether or not we can use stereo lock mode, so we know + * whether or not to enable stereo lock mode on the GPU during + * SetHeadControl + */ + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if (nvHeadIsActive(pDispEvo, head)) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + pEvoSubDev->headControlAssy[head].stereoLocked = + gpuCanStereoLock; + } + } + } + } + + /* + * Go through every GPU on the system, making its framelock state match the + * assembly state that we've saved. + * + * We do this in six steps, in order to keep the overall system state sane + * throughout: + * 1. Disable any clients we no longer need + * 2. Disable server we no longer need + * 3. Compute which framelock devices need house sync + * 4. Update framelock devices with new house sync info + * 5. Enable new server + * 6. Enable new clients + */ + for (iteration = FIRST_ITERATION; + iteration <= LAST_ITERATION; + iteration++) { + + if (iteration == COMPUTE_HOUSE_SYNC) { + /* First, clear assy state */ + FOR_ALL_EVO_FRAMELOCKS(pFrameLockEvo) { + pFrameLockEvo->houseSyncAssy = FALSE; + } + } + + if (iteration == UPDATE_HOUSE_SYNC) { + FOR_ALL_EVO_FRAMELOCKS(pFrameLockEvo) { + /* + * Since nvFrameLockSetUseHouseSyncEvo sets house sync + * output mode in addition to house sync input mode and + * input polarity, this needs to be done unconditionally, + * even if a house sync state transition hasn't occurred. + */ + if (!nvFrameLockSetUseHouseSyncEvo( + pFrameLockEvo, pFrameLockEvo->houseSyncAssy)) { + nvAssert(!"Setting house sync failed"); + } else { + pFrameLockEvo->houseSyncArmed = + pFrameLockEvo->houseSyncAssy; + } + } + + continue; + } + + FOR_ALL_EVO_DEVS(pDevEvo) { + + if (!pDevEvo->gpus) { + continue; + } + + if (pDevEvo->displayHandle == 0) { + /* + * This may happen during init, when setting initial modes on + * one device while other devices have not yet been allocated. + * Skip these devices for now; we'll come back later when + * they've been brought up. + */ + continue; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NvBool server = FALSE; + NvU32 needsEnableMask = 0, needsDisableMask = 0; + unsigned int head; + + switch (iteration) { + case COMPUTE_HOUSE_SYNC: + /* Accumulate house sync across pDisps */ + if (pEvoSubDev->frameLockHouseSync) { + pDispEvo->pFrameLockEvo->houseSyncAssy = TRUE; + } + break; + case DISABLE_UNNEEDED_CLIENTS: + needsDisableMask = pEvoSubDev->frameLockClientMaskArmed & + ~pEvoSubDev->frameLockClientMaskAssy; + server = FALSE; + break; + case DISABLE_UNNEEDED_SERVER: + needsDisableMask = pEvoSubDev->frameLockServerMaskArmed & + ~pEvoSubDev->frameLockServerMaskAssy; + server = TRUE; + break; + case ENABLE_SERVER: + needsEnableMask = pEvoSubDev->frameLockServerMaskAssy & + ~pEvoSubDev->frameLockServerMaskArmed; + server = TRUE; + break; + case ENABLE_CLIENTS: + needsEnableMask = pEvoSubDev->frameLockClientMaskAssy & + ~pEvoSubDev->frameLockClientMaskArmed; + server = FALSE; + break; + case UPDATE_HOUSE_SYNC: + nvAssert(!"Shouldn't reach here"); + break; + } + + if (needsDisableMask) { + ret = nvFramelockSetControlUnsyncEvo(pDispEvo, + needsDisableMask, + server); + nvAssert(ret); + + if (ret) { + if (server) { + pEvoSubDev->frameLockServerMaskArmed &= + ~needsDisableMask; + + FOR_ALL_HEADS(head, needsDisableMask) { + cache[GpuIndex(pDevEvo, sd)][head].disableServer = TRUE; + } + } else { + pEvoSubDev->frameLockClientMaskArmed &= + ~needsDisableMask; + + FOR_ALL_HEADS(head, needsDisableMask) { + cache[GpuIndex(pDevEvo, sd)][head].disableClient = TRUE; + } + } + } + } + if (needsEnableMask) { + ret = FramelockSetControlSync(pDispEvo, + needsEnableMask, + server); + + nvAssert(ret); + + if (ret) { + if (server) { + pEvoSubDev->frameLockServerMaskArmed |= + needsEnableMask; + + FOR_ALL_HEADS(head, needsEnableMask) { + cache[GpuIndex(pDevEvo, sd)][head].enableServer = TRUE; + } + } else { + pEvoSubDev->frameLockClientMaskArmed |= + needsEnableMask; + + FOR_ALL_HEADS(head, needsEnableMask) { + cache[GpuIndex(pDevEvo, sd)][head].enableClient = TRUE; + } + } + } + } + + /* After the above process, we should have "promoted" assy + * to armed */ + if (iteration == LAST_ITERATION) { + nvAssert(pEvoSubDev->frameLockServerMaskArmed == + pEvoSubDev->frameLockServerMaskAssy); + nvAssert(pEvoSubDev->frameLockClientMaskArmed == + pEvoSubDev->frameLockClientMaskAssy); + } + } + } + } + + /* + * Update the EVO HW state. Make this a separate set of loops to not + * confuse the one above + */ + FOR_ALL_EVO_DEVS(pDevEvo) { + + if (!pDevEvo->gpus) { + continue; + } + + if (pDevEvo->displayHandle == 0) { + continue; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvBool needUpdate = FALSE; + NVEvoUpdateState updateState = { }; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NvU32 extRefClkMaskAssy, extRefClkUpdateMask; + NvU32 possibleHeadMask; + NvBool refClkChanged[NVKMS_MAX_HEADS_PER_DISP] = { FALSE }; + unsigned int head; + + extRefClkMaskAssy = pEvoSubDev->frameLockExtRefClkMaskAssy; + + /* Set the external reference clock, if different */ + extRefClkUpdateMask = extRefClkMaskAssy ^ + pEvoSubDev->frameLockExtRefClkMaskArmed; + + FOR_ALL_HEADS(head, extRefClkUpdateMask) { + NvBool extRefClkNeeded = + !!(extRefClkMaskAssy & (1 << head)); + + SetRefClk(pDevEvo, sd, head, extRefClkNeeded, &updateState); + refClkChanged[head] = TRUE; + + /* Update armed state for this head */ + pEvoSubDev->frameLockExtRefClkMaskArmed = + (pEvoSubDev->frameLockExtRefClkMaskArmed & + (~(1 << head))) | + (extRefClkMaskAssy & (1 << head)); + } + /* After the above process, the armed state should match + * assembly state */ + nvAssert(extRefClkMaskAssy == + pEvoSubDev->frameLockExtRefClkMaskArmed); + + /* Update the HEAD_SET_CONTROL EVO method state */ + + possibleHeadMask = nvGetActiveHeadMask(pDispEvo); + + FOR_ALL_HEADS(head, possibleHeadMask) { + if (nvkms_memcmp(&pEvoSubDev->headControl[head], + &pEvoSubDev->headControlAssy[head], + sizeof(NVEvoHeadControl))) { + + nvPushEvoSubDevMask(pDevEvo, 1 << sd); + + pEvoSubDev->headControl[head] = + pEvoSubDev->headControlAssy[head]; + pDevEvo->hal->SetHeadControl(pDevEvo, sd, head, + &updateState); + needUpdate = TRUE; + + nvPopEvoSubDevMask(pDevEvo); + } else if (refClkChanged[head]) { + needUpdate = TRUE; + } + } + + if (needUpdate) { + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, + TRUE /* releaseElv */); + } + } + } + + /* + * Inform GLS of framelock changes. It uses this information to do things + * like enable fake stereo to get stereo sync when stereo apps start + * without flickering the displays. + */ + for (iteration = FIRST_ITERATION; + iteration <= LAST_ITERATION; + iteration++) { + + FOR_ALL_EVO_DEVS(pDevEvo) { + + if (!pDevEvo->gpus) { + continue; + } + + if (pDevEvo->displayHandle == 0) { + continue; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 head; + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + NvBool sendEvent = FALSE; + NvBool enable = FALSE, server = FALSE; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + switch (iteration) { + case DISABLE_UNNEEDED_CLIENTS: + if (cache[GpuIndex(pDevEvo, sd)][head].disableClient) { + enable = FALSE; + server = FALSE; + sendEvent = TRUE; + } + break; + case DISABLE_UNNEEDED_SERVER: + if (cache[GpuIndex(pDevEvo, sd)][head].disableServer) { + enable = FALSE; + server = TRUE; + sendEvent = TRUE; + } + break; + case ENABLE_SERVER: + if (cache[GpuIndex(pDevEvo, sd)][head].enableServer) { + enable = TRUE; + server = TRUE; + sendEvent = TRUE; + } + break; + case ENABLE_CLIENTS: + if (cache[GpuIndex(pDevEvo, sd)][head].enableClient) { + enable = TRUE; + server = FALSE; + sendEvent = TRUE; + } + break; + case UPDATE_HOUSE_SYNC: + case COMPUTE_HOUSE_SYNC: + sendEvent = FALSE; + break; + } + + if (sendEvent) { + nvUpdateGLSFramelock(pDispEvo, head, enable, server); + } + } + } + } + } +} + +/* + * For every head in the headMask on pDispEvo, construct a prioritized + * list of heads and call into the EVO locking state machine to + * perform the given transition. + * + * Return the list of heads that actually succeeded. + */ +static NvU32 applyActionForHeads(NVDispEvoPtr pDispEvo, + const NvU32 headMask, + NVEvoLockAction action) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + NvU32 appliedHeadMask = 0; + NvU32 head; + + FOR_ALL_HEADS(head, headMask) { + NvU32 pHeads[NVKMS_MAX_HEADS_PER_DISP + 1] = { NV_INVALID_HEAD, }; + unsigned int i = 0; + NvU32 tmpHead, usedHeadMask = 0; + + /* Fill in the array starting with this head, then with the others in + * the list, and finally any other active heads */ + pHeads[i++] = head; + usedHeadMask |= (1 << head); + + FOR_ALL_HEADS(tmpHead, headMask) { + if (usedHeadMask & (1 << tmpHead)) { + continue; + } + pHeads[i++] = tmpHead; + usedHeadMask |= (1 << tmpHead); + } + + for (tmpHead = 0; tmpHead < NVKMS_MAX_HEADS_PER_DISP; tmpHead++) { + if (!nvHeadIsActive(pDispEvo, tmpHead)) { + continue; + } + if (usedHeadMask & (1 << tmpHead)) { + continue; + } + pHeads[i++] = tmpHead; + usedHeadMask |= (1 << tmpHead); + } + + nvAssert(i <= NVKMS_MAX_HEADS_PER_DISP); + pHeads[i] = NV_INVALID_HEAD; + + if (pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, action, pHeads)) { + appliedHeadMask |= (1 << head); + } + } + + return appliedHeadMask; +} + +// +// Set up raster lock and frame lock for external frame lock +// + +NvBool nvEnableFrameLockEvo(NVDispEvoPtr pDispEvo) +{ + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + NvU32 serverHead = nvGetFramelockServerHead(pDispEvo); + NvU32 clientHeadsMask = nvGetFramelockClientHeadsMask(pDispEvo); + NvU32 appliedHeadMask; + NvU32 activeClientHeadsMask; + NvBool useHouseSync = FALSE; + NvU32 head; + + nvAssert(pDispEvo->framelock.currentServerHead == NV_INVALID_HEAD); + nvAssert(pDispEvo->framelock.currentClientHeadsMask == 0x0); + + if (serverHead != NV_INVALID_HEAD && + (pFrameLockEvo->houseSyncMode == + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_INPUT)) { + + NvS64 houseSync; + + /* + * Only use house sync if present. + * XXX what happens when house sync is unplugged? why not enable it + * now and let the FPGA decide? + */ + if (!nvFrameLockGetStatusEvo(pFrameLockEvo, + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_STATUS, + &houseSync)) { + return FALSE; + } + + useHouseSync = (houseSync != 0); + } + + /* Initialize the assembly state */ + SyncEvoLockState(); + + /* Enable the server */ + if ((serverHead != NV_INVALID_HEAD) && + nvHeadIsActive(pDispEvo, serverHead)) { + NvU32 serverHeadMask; + + serverHeadMask = (1 << serverHead); + appliedHeadMask = applyActionForHeads(pDispEvo, serverHeadMask, + NV_EVO_ADD_FRAME_LOCK_SERVER); + + nvAssert(appliedHeadMask == serverHeadMask); + pDispEvo->framelock.currentServerHead = serverHead; + + /* Enable house sync, if requested */ + if (useHouseSync) { + appliedHeadMask = + applyActionForHeads(pDispEvo, serverHeadMask, + NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC); + + if (appliedHeadMask == serverHeadMask) { + pDispEvo->framelock.currentHouseSync = TRUE; + } + } + } + + /* Enable the clients */ + activeClientHeadsMask = 0; + FOR_ALL_HEADS(head, clientHeadsMask) { + if (nvHeadIsActive(pDispEvo, head)) { + activeClientHeadsMask |= (1 << head); + } + } + appliedHeadMask = applyActionForHeads(pDispEvo, activeClientHeadsMask, + NV_EVO_ADD_FRAME_LOCK_CLIENT); + + nvAssert(appliedHeadMask == activeClientHeadsMask); + pDispEvo->framelock.currentClientHeadsMask = activeClientHeadsMask; + + /* Finally, update the hardware */ + UpdateEvoLockState(); + + return TRUE; +} + +// +// Disable raster lock and frame lock +// + +NvBool nvDisableFrameLockEvo(NVDispEvoPtr pDispEvo) +{ + NvU32 serverHead = nvGetFramelockServerHead(pDispEvo); + NvU32 clientHeadsMask = nvGetFramelockClientHeadsMask(pDispEvo); + NvU32 activeClientHeadsMask; + NvU32 appliedHeadMask; + NvU32 head; + + /* Initialize the assembly state */ + SyncEvoLockState(); + + /* Disable the clients */ + activeClientHeadsMask = 0; + FOR_ALL_HEADS(head, clientHeadsMask) { + if (nvHeadIsActive(pDispEvo, head)) { + activeClientHeadsMask |= (1 << head); + } + } + appliedHeadMask = applyActionForHeads(pDispEvo, + activeClientHeadsMask, + NV_EVO_REM_FRAME_LOCK_CLIENT); + + nvAssert(appliedHeadMask == activeClientHeadsMask); + pDispEvo->framelock.currentClientHeadsMask &= ~activeClientHeadsMask; + + /* Disable house sync */ + if (serverHead != NV_INVALID_HEAD && + nvHeadIsActive(pDispEvo, serverHead)) { + NvU32 serverHeadMask = (1 << serverHead); + + if (pDispEvo->framelock.currentHouseSync) { + appliedHeadMask = + applyActionForHeads(pDispEvo, serverHeadMask, + NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC); + + nvAssert(appliedHeadMask == serverHeadMask); + pDispEvo->framelock.currentHouseSync = FALSE; + } + + /* Disable the server */ + appliedHeadMask = applyActionForHeads(pDispEvo, serverHeadMask, + NV_EVO_REM_FRAME_LOCK_SERVER); + nvAssert(appliedHeadMask == serverHeadMask); + if (appliedHeadMask == serverHeadMask) { + pDispEvo->framelock.currentServerHead = NV_INVALID_HEAD; + } + } + + /* Finally, update the hardware */ + UpdateEvoLockState(); + + return TRUE; +} + +// +// Enable/Disable External Reference Clock Sync +// +// This function is used by frame lock to make the GPU sync to +// the external device's reference clock. +// +static void SetRefClk(NVDevEvoPtr pDevEvo, + NvU32 sd, NvU32 head, NvBool external, + NVEvoUpdateState *updateState) +{ + nvPushEvoSubDevMask(pDevEvo, 1 << sd); + + pDevEvo->hal->SetHeadRefClk(pDevEvo, head, external, updateState); + + nvPopEvoSubDevMask(pDevEvo); +} + + +// +// Query raster lock state +// + +NvBool nvQueryRasterLockEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *val) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev; + const NvU32 apiHead = pDpyEvo->apiHead; + const NvU32 head = nvGetPrimaryHwHead(pDispEvo, apiHead); + NVEvoHeadControlPtr pHC; + + /* + * XXX[2Heads1OR] The EVO lock state machine is not currently supported with + * 2Heads1OR, the api head is expected to be mapped onto a single + * hardware head (which is the primary hardware head) if 2Heads1OR is not + * active and the EVO lock state machine is in use. + */ + if ((apiHead == NV_INVALID_HEAD) || + (nvPopCount32(pDispEvo->apiHeadState[apiHead].hwHeadsMask) != 1)) { + return FALSE; + } + + if ((head == NV_INVALID_HEAD) || (pDevEvo->gpus == NULL)) { + return FALSE; + } + + pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + pHC = &pEvoSubDev->headControl[head]; + + *val = pHC->serverLock == NV_EVO_RASTER_LOCK || + pHC->clientLock == NV_EVO_RASTER_LOCK; + + return TRUE; +} + +void nvInvalidateRasterLockGroupsEvo(void) +{ + if (globalRasterLockGroups) { + nvFree(globalRasterLockGroups); + + globalRasterLockGroups = NULL; + numGlobalRasterLockGroups = 0; + } +} + +/* + * Return the surface format usage bounds that NVKMS will program for the + * requested format. + * + * For an RGB XBPP format, this function will return a bitmask of all RGB YBPP + * formats, where Y <= X. + * + * For a YUV format, this function will return a bitmask of all YUV formats + * that: + * - Have the same number of planes as the requested format + * - Have the same chroma decimation factors as the requested format + * - Have the same or lower effective fetch bpp as the requested format + * + * For example, if the requested format is YUV420 12-bit SP, this function will + * include all YUV420 8/10/12-bit SP formats. + */ +NvU64 nvEvoGetFormatsWithEqualOrLowerUsageBound( + const enum NvKmsSurfaceMemoryFormat format, + NvU64 supportedFormatsCapMask) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(format); + NvU64 supportedFormatsUsageBound = 0; + NvU8 formatIdx; + + FOR_EACH_INDEX_IN_MASK(64, formatIdx, supportedFormatsCapMask) { + + const NvKmsSurfaceMemoryFormatInfo *pOtherFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(formatIdx); + + if ((pFormatInfo->isYUV != pOtherFormatInfo->isYUV) || + (pFormatInfo->numPlanes != pOtherFormatInfo->numPlanes)) { + continue; + } + + if (pFormatInfo->isYUV) { + if ((pFormatInfo->yuv.horizChromaDecimationFactor != + pOtherFormatInfo->yuv.horizChromaDecimationFactor) || + (pFormatInfo->yuv.vertChromaDecimationFactor != + pOtherFormatInfo->yuv.vertChromaDecimationFactor) || + (pFormatInfo->yuv.depthPerComponent < + pOtherFormatInfo->yuv.depthPerComponent)) { + continue; + } + } else { + if (pFormatInfo->rgb.bitsPerPixel < + pOtherFormatInfo->rgb.bitsPerPixel) { + continue; + } + } + + supportedFormatsUsageBound |= NVBIT64(formatIdx); + + } FOR_EACH_INDEX_IN_MASK_END; + + return supportedFormatsUsageBound; +} + +// +// Enable or disable flip lock (or query state) +// + +NvBool nvUpdateFlipLockEvoOneHead(NVDispEvoPtr pDispEvo, const NvU32 head, + NvU32 *val, NvBool set, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + + if (set) { + // make sure we're dealing with a bool + NvBool setVal = !!*val; + + if (setVal ^ pHC->flipLock) { + NvBool isMethodPending; + NvBool changed = FALSE; + + if (!pDevEvo->hal-> + IsChannelMethodPending(pDevEvo, + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER], + pDispEvo->displayOwner, + &isMethodPending) || + isMethodPending) { + nvAssert(!"Base channel not idle"); + return FALSE; + } + + if (setVal) { + /* make sure flip lock is not prohibited and raster lock is enabled + * + * XXX: [2Heads1OR] If head is locked in the merge mode then + * its flip-lock state can not be changed. + */ + if ((pHC->serverLock == NV_EVO_NO_LOCK && + pHC->clientLock == NV_EVO_NO_LOCK) || + HEAD_MASK_QUERY(pEvoSubDev->flipLockProhibitedHeadMask, + head) || + pHC->mergeMode) { + return FALSE; + } + pHC->flipLock = TRUE; + changed = TRUE; + } else { + /* Only actually disable fliplock if it's not needed for SLI. + * + * XXX: [2Heads1OR] If head is locked in the merge mode then + * its flip-lock state can not be changed. + */ + if (!pHC->mergeMode && + !HEAD_MASK_QUERY(pEvoSubDev->flipLockEnabledForSliHeadMask, + head)) { + pHC->flipLock = FALSE; + changed = TRUE; + } + } + + if (changed) { + EvoUpdateHeadParams(pDispEvo, head, updateState); + } + } + + /* Remember if we currently need fliplock enabled for framelock */ + pEvoSubDev->flipLockEnabledForFrameLockHeadMask = + setVal ? + HEAD_MASK_SET(pEvoSubDev->flipLockEnabledForFrameLockHeadMask, head) : + HEAD_MASK_UNSET(pEvoSubDev->flipLockEnabledForFrameLockHeadMask, head); + } + + /* + * XXX should the query return the cached "enabled for framelock" state + * instead? + */ + *val = pHC->flipLock; + + + return TRUE; +} + + +static NvBool UpdateFlipLock50(const NVDpyEvoRec *pDpyEvo, + NvU32 *val, NvBool set) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + const NvU32 apiHead = pDpyEvo->apiHead; + const NvU32 head = nvGetPrimaryHwHead(pDispEvo, apiHead); + NVEvoUpdateState updateState = { }; + NvBool ret; + + if (head == NV_INVALID_HEAD) { + return FALSE; + } + + ret = nvUpdateFlipLockEvoOneHead(pDispEvo, head, val, set, + &updateState); + + if (set && ret) { + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, + TRUE /* releaseElv */); + } + + return ret; +} + +NvBool nvSetFlipLockEvo(NVDpyEvoPtr pDpyEvo, NvS64 value) +{ + NvU32 val32 = !!value; + return UpdateFlipLock50(pDpyEvo, &val32, TRUE /* set */); +} + +NvBool nvGetFlipLockEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + NvBool ret; + NvU32 val32 = 0; + ret = UpdateFlipLock50(pDpyEvo, &val32, FALSE /* set */); + + if (ret) { + *pValue = !!val32; + } + + return ret; +} + +static void ProhibitFlipLock50(NVDispEvoPtr pDispEvo) +{ + NvU32 head; + NvBool needUpdate = FALSE; + NVEvoUpdateState updateState = { }; + + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + /* + * XXX: [2Heads1OR] If head is locked in the merge mode then its flip-lock + * state can not be changed. + */ + if (!nvHeadIsActive(pDispEvo, head) || pHC->mergeMode) { + continue; + } + + if (HEAD_MASK_QUERY(pEvoSubDev->flipLockEnabledForFrameLockHeadMask, + head)) { + nvAssert(!"Can not prohibit flip lock " + "because it is already enabled for frame lock"); + continue; + } + + if (pHC->flipLock) { + needUpdate = TRUE; + + pHC->flipLock = FALSE; + EvoUpdateHeadParams(pDispEvo, head, &updateState); + } + + pEvoSubDev->flipLockProhibitedHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockProhibitedHeadMask, head); + } + + if (needUpdate) { + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, + TRUE /* releaseElv */); + } +} + +static void AllowFlipLock50(NVDispEvoPtr pDispEvo) +{ + NvU32 head; + NvBool needUpdate = FALSE; + NVEvoUpdateState updateState = { }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + + /* + * XXX: [2Heads1OR] If head is locked in the merge mode then its flip-lock + * state can not be changed. + */ + if (!nvHeadIsActive(pDispEvo, head) || pHC->mergeMode) { + continue; + } + + if (!pHC->flipLock && + HEAD_MASK_QUERY(pEvoSubDev->flipLockEnabledForSliHeadMask, + head)) { + needUpdate = TRUE; + + nvAssert(pHC->serverLock != NV_EVO_NO_LOCK || + pHC->clientLock != NV_EVO_NO_LOCK); + + pHC->flipLock = TRUE; + EvoUpdateHeadParams(pDispEvo, head, &updateState); + } + + pEvoSubDev->flipLockProhibitedHeadMask = + HEAD_MASK_UNSET(pEvoSubDev->flipLockProhibitedHeadMask, head); + } + + if (needUpdate) { + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, + TRUE /* releaseElv */); + } +} + +NvBool nvAllowFlipLockEvo(NVDispEvoPtr pDispEvo, NvS64 value) +{ + if (value == 0) { + ProhibitFlipLock50(pDispEvo); + } else { + AllowFlipLock50(pDispEvo); + } + return TRUE; +} + +/*! + * Enable or disable stereo. + * + * XXX SLI+Stereo For now, just set stereo on the display owner. + */ +NvBool nvSetStereoEvo( + const NVDispEvoRec *pDispEvo, + const NvU32 head, + NvBool enable) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + NVEvoHeadControlPtr pHC; + NVEvoLockPin pin; + + nvAssert(head != NV_INVALID_HEAD); + + pHC = &pEvoSubDev->headControl[head]; + pin = NV_EVO_LOCK_PIN_INTERNAL(head); + + // make sure we're dealing with a bool + NvBool stereo = !NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->stereoPin); + + if (enable ^ stereo) { + NVEvoUpdateState updateState = { }; + + if (enable) { + NvU32 otherHead; + NvU32 signalPin; + + // If any other head is already driving stereo, fail + for (otherHead = 0; otherHead < NVKMS_MAX_HEADS_PER_DISP; + otherHead++) { + if (!nvHeadIsActive(pDispEvo, otherHead)) { + continue; + } + if (head == otherHead) { + continue; + } + + const NVEvoHeadControl *pOtherHC = + &pEvoSubDev->headControl[otherHead]; + + if (!NV_EVO_LOCK_PIN_IS_INTERNAL(pOtherHC->stereoPin)) { + return FALSE; + } + } + + signalPin = nvEvoGetPinForSignal(pDispEvo, + pEvoSubDev, + NV_EVO_LOCK_SIGNAL_STEREO); + if (signalPin != NV_EVO_LOCK_PIN_ERROR) { + pin = signalPin; + } + } + + pHC->stereoPin = pin; + + EvoUpdateHeadParams(pDispEvo, head, &updateState); + + // Make method take effect. + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, + TRUE /* releaseElv */); + } + + return TRUE; +} + +/*! + * Query stereo state. + * + * XXX SLI+Stereo For now, just get stereo on the display owner. + */ +NvBool nvGetStereoEvo(const NVDispEvoRec *pDispEvo, const NvU32 head) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + NVEvoHeadControlPtr pHC; + + nvAssert(head != NV_INVALID_HEAD); + + pHC = &pEvoSubDev->headControl[head]; + + return !NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->stereoPin); +} + +void nvSetViewPortsEvo(NVDispEvoPtr pDispEvo, + const NvU32 head, NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVHwModeViewPortEvo *pViewPort = &pHeadState->timings.viewPort; + + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetViewportInOut(pDevEvo, head, + pViewPort, pViewPort, pViewPort, + updateState); + nvPopEvoSubDevMask(pDevEvo); + + /* + * Specify safe default values of 0 for viewPortPointIn x and y; these + * may be changed when panning out of band of a modeset. + */ + EvoSetViewportPointIn(pDispEvo, head, 0 /* x */, 0 /* y */, updateState); +} + + + +static void EvoSetViewportPointIn(NVDispEvoPtr pDispEvo, const NvU32 head, + NvU16 x, NvU16 y, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetViewportPointIn(pDevEvo, head, x, y, updateState); + nvPopEvoSubDevMask(pDevEvo); +} + +static void EvoSetLUTContextDmaHelper(const NVDispEvoRec *pDispEvo, + const NvU32 head, + NVSurfaceEvoPtr pLutSurfEvo, + NvBool enableBaseLut, + NvBool enableOutputLut, + NVEvoUpdateState *pUpdateState, + NvBool bypassComposition) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 layer, sd = pDispEvo->displayOwner; + NVEvoSubDevHeadStateRec *pSdHeadState = &pDevEvo->gpus[sd].headState[head]; + NVFlipLutHwState inputLut, outputLut; + + if (enableBaseLut) { + inputLut.pLutSurfaceEvo = pLutSurfEvo; + inputLut.offset = offsetof(NVEvoLutDataRec, base); + inputLut.lutEntries = NV_NUM_EVO_LUT_ENTRIES; + } else { + inputLut.pLutSurfaceEvo = NULL; + inputLut.offset = 0; + inputLut.lutEntries = 0; + } + inputLut.vssSegments = 0; + inputLut.fromOverride = FALSE; + + if (enableOutputLut) { + outputLut.pLutSurfaceEvo = pLutSurfEvo; + outputLut.offset = offsetof(NVEvoLutDataRec, output); + outputLut.lutEntries = NV_NUM_EVO_LUT_ENTRIES; + } else { + outputLut.pLutSurfaceEvo = NULL; + outputLut.offset = 0; + outputLut.lutEntries = 0; + } + outputLut.vssSegments = 0; + outputLut.fromOverride = FALSE; + + nvPushEvoSubDevMask(pDevEvo, NVBIT(pDispEvo->displayOwner)); + if ((pSdHeadState->outputLut.pLutSurfaceEvo != outputLut.pLutSurfaceEvo) || + (pSdHeadState->outputLut.offset != outputLut.offset) || + (pSdHeadState->olutFpNormScale != NVKMS_OLUT_FP_NORM_SCALE_DEFAULT)) { + + pSdHeadState->outputLut = outputLut; + pSdHeadState->olutFpNormScale = NVKMS_OLUT_FP_NORM_SCALE_DEFAULT; + pSdHeadState->layer[NVKMS_MAIN_LAYER].tearing = FALSE; + + pDevEvo->hal->SetOutputLut(pDevEvo, sd, head, + &outputLut, + NVKMS_OLUT_FP_NORM_SCALE_DEFAULT, + pUpdateState, + bypassComposition); + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + NVEvoChannelPtr pChannel = pDevEvo->head[head].layer[layer]; + NVFlipChannelEvoHwState *pFlipState = &pSdHeadState->layer[layer]; + + if ((pFlipState->inputLut.pLutSurfaceEvo == inputLut.pLutSurfaceEvo) && + (pFlipState->inputLut.offset == inputLut.offset)) { + continue; + } + + pFlipState->tearing = FALSE; + pFlipState->inputLut = inputLut; + + pDevEvo->hal->Flip(pDevEvo, + pChannel, + pFlipState, + pUpdateState, + bypassComposition); + } + + nvPopEvoSubDevMask(pDevEvo); +} + +void nvEvoSetLUTContextDma(NVDispEvoPtr pDispEvo, + const NvU32 head, NVEvoUpdateState *pUpdateState) +{ + const NVDispHeadStateEvoRec *pDispHeadState = &pDispEvo->headState[head]; + + EvoSetLUTContextDmaHelper(pDispEvo, + head, + pDispHeadState->lut.pCurrSurface, + pDispHeadState->lut.baseLutEnabled, + pDispHeadState->lut.outputLutEnabled, + pUpdateState, + pDispHeadState->bypassComposition); +} + +static void EvoUpdateCurrentPalette(NVDispEvoPtr pDispEvo, const NvU32 apiHead) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + const int dispIndex = pDispEvo->displayOwner; + NvU32 head; + NVEvoUpdateState updateState = { }; + + struct NvKmsSetLutWorkArea *workarea = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_SET_LUT_WORK_AREA, sizeof(*workarea)); + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + nvInitFlipEvoHwState(pDevEvo, dispIndex, head, &workarea->head[head].oldState); + + nvEvoSetLUTContextDma(pDispEvo, head, &updateState); + + nvInitFlipEvoHwState(pDevEvo, dispIndex, head, &workarea->head[head].newState); + nvUpdateSurfacesFlipRefCount(pDevEvo, head, &workarea->head[head].newState, TRUE); + } + + /* + * EVO2 does not set LUT context DMA if the core channel + * doesn't have a scanout surface set, in that case there is no update + * state to kickoff. + */ + if (!nvIsUpdateStateEmpty(pDevEvo, &updateState)) { + int notifier; + NvBool notify; + + nvEvoStageLUTNotifier(pDispEvo, apiHead); + notifier = nvEvoCommitLUTNotifiers(pDispEvo); + + nvAssert(notifier >= 0); + + /* + * XXX: The notifier index returned by nvEvoCommitLUTNotifiers here + * shouldn't be < 0 because this function shouldn't have been called + * while a previous LUT update is outstanding. If + * nvEvoCommitLUTNotifiers ever returns -1 for one reason or another, + * using notify and setting notifier to 0 in this manner to avoid + * setting an invalid notifier in the following Update call prevents + * potential kernel panics and Xids. + */ + notify = notifier >= 0; + if (!notify) { + notifier = 0; + } + + // Clear the completion notifier and kick off an update. Wait for it + // here if NV_CTRL_SYNCHRONOUS_PALETTE_UPDATES is enabled. Otherwise, + // don't wait for the notifier -- it'll be checked the next time a LUT + // change request comes in. + EvoUpdateAndKickOffWithNotifier(pDispEvo, + notify, /* notify */ + FALSE, /* sync */ + notifier, + &updateState, + TRUE /* releaseElv */); + pDevEvo->lut.apiHead[apiHead].disp[dispIndex].waitForPreviousUpdate |= notify; + } + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + nvUpdateSurfacesFlipRefCount(pDevEvo, head, &workarea->head[head].oldState, FALSE); + } + + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_SET_LUT_WORK_AREA); +} + +static void UpdateMaxPixelClock(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NVDpyEvoPtr pDpyEvo; + int i; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) { + FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->validDisplays, pDispEvo) { + nvDpyProbeMaxPixelClock(pDpyEvo); + } + } +} + +static NvBool AllocEvoSubDevs(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 sd; + + pDevEvo->gpus = nvCalloc(pDevEvo->numSubDevices, sizeof(NVEvoSubDevRec)); + + if (pDevEvo->gpus == NULL) { + return FALSE; + } + + /* Assign the pDispEvo for each evoSubDevice */ + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + pDevEvo->gpus[sd].pDispEvo = pDispEvo; + } + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + nvAssert(pDevEvo->gpus[sd].pDispEvo != NULL); + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NvU32 head; + + pDevEvo->gpus[sd].subDeviceInstance = sd; + // Initialize the lock state. + nvEvoStateStartNoLock(pEvoSubDev); + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[sd].headState[head]; + NvU32 i; + + for (i = 0; i < ARRAY_LEN(pSdHeadState->layer); i++) { + pSdHeadState->layer[i].cscMatrix = NVKMS_IDENTITY_CSC_MATRIX; + } + + pSdHeadState->cursor.cursorCompParams = + nvDefaultCursorCompositionParams(pDevEvo); + pSdHeadState->olutFpNormScale = NVKMS_OLUT_FP_NORM_SCALE_DEFAULT; + } + } + + return TRUE; +} + + +// Replace default cursor composition params when zeroed-out values are unsupported. +struct NvKmsCompositionParams nvDefaultCursorCompositionParams(const NVDevEvoRec *pDevEvo) +{ + const struct NvKmsCompositionCapabilities *pCaps = + &pDevEvo->caps.cursorCompositionCaps; + const NvU32 supportedBlendMode = + pCaps->colorKeySelect[NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE].supportedBlendModes[1]; + + struct NvKmsCompositionParams params = { }; + + if ((supportedBlendMode & NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE)) != 0x0) { + params.blendingMode[1] = NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE; + } else { + params.blendingMode[1] = NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA; + } + + return params; +} + +static NvBool ValidateConnectorTypes(const NVDevEvoRec *pDevEvo) +{ + const NVDispEvoRec *pDispEvo; + const NVConnectorEvoRec *pConnectorEvo; + NvU32 dispIndex; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + const NVEvoSubDevRec *pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + const NVEvoCapabilities *pEvoCaps = &pEvoSubDev->capabilities; + const NVEvoMiscCaps *pMiscCaps = &pEvoCaps->misc; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (!pMiscCaps->supportsDSI && + pConnectorEvo->signalFormat == NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "DSI connectors are unsupported!"); + return FALSE; + } + } + } + return TRUE; +} + +static void UnregisterFlipOccurredEventOneHead(NVDispEvoRec *pDispEvo, + const NvU32 head) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvU32 layer; + + /* XXX NVKMS TODO: need disp-scope in event */ + if (pDispEvo->displayOwner != 0) { + return; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + NVEvoChannelPtr pChannel = pDevEvo->head[head].layer[layer]; + + nvAssert((pChannel->completionNotifierEventHandle == 0) || + (pChannel->completionNotifierEventRefPtr != NULL)); + + if (pChannel->completionNotifierEventHandle != 0) { + nvRmApiFree(nvEvoGlobal.clientHandle, + pChannel->pb.channel_handle, + pChannel->completionNotifierEventHandle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pChannel->completionNotifierEventHandle); + pChannel->completionNotifierEventHandle = 0; + pChannel->completionNotifierEventRefPtr = NULL; + } + } +} + +static void ClearApiHeadStateOneDisp(NVDispEvoRec *pDispEvo) +{ + NvU32 apiHead; + + /* + * Unregister all the flip-occurred event callbacks which are + * registered with the (api-head, layer) pair event data, + * before destroying the api-head states. + */ + for (NvU32 head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + UnregisterFlipOccurredEventOneHead(pDispEvo, head); + } + + for (apiHead = 0; apiHead < ARRAY_LEN(pDispEvo->apiHeadState); apiHead++) { + NvU32 layer; + NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + + nvAssert(pApiHeadState->rmVBlankCallbackHandle == 0); + + for (layer = 0; layer < ARRAY_LEN(pApiHeadState->flipOccurredEvent); layer++) { + if (pApiHeadState->flipOccurredEvent[layer].ref_ptr != NULL) { + nvkms_free_ref_ptr(pApiHeadState->flipOccurredEvent[layer].ref_ptr); + pApiHeadState->flipOccurredEvent[layer].ref_ptr = NULL; + } + } + } + + nvkms_memset(pDispEvo->apiHeadState, 0, sizeof(pDispEvo->apiHeadState)); +} + +static void ClearApiHeadState(NVDevEvoRec *pDevEvo) +{ + NvU32 dispIndex; + NVDispEvoRec *pDispEvo; + + nvRmFreeCoreRGSyncpts(pDevEvo); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + ClearApiHeadStateOneDisp(pDispEvo); + } + + nvkms_memset(pDevEvo->apiHead, 0, sizeof(pDevEvo->apiHead)); +} + +static NvBool InitApiHeadStateOneDisp(NVDispEvoRec *pDispEvo) +{ + NvU32 usedApiHeadsMask = 0x0; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + for (NvU32 apiHead = 0; apiHead < ARRAY_LEN(pDispEvo->apiHeadState); apiHead++) { + NvU32 layer; + NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + + pApiHeadState->activeDpys = nvEmptyDpyIdList(); + pApiHeadState->attributes = NV_EVO_DEFAULT_ATTRIBUTES_SET; + + for (layer = 0; layer < ARRAY_LEN(pApiHeadState->flipOccurredEvent); layer++) { + pApiHeadState->flipOccurredEvent[layer].ref_ptr = + nvkms_alloc_ref_ptr(&pApiHeadState->flipOccurredEvent[layer].data); + if (pApiHeadState->flipOccurredEvent[layer].ref_ptr == NULL) { + goto failed; + } + + pApiHeadState->flipOccurredEvent[layer].data = + (NVDispFlipOccurredEventDataEvoRec) { + .pDispEvo = pDispEvo, + .apiHead = apiHead, + .layer = layer, + }; + } + } + + for (NvU32 head = 0; head < pDevEvo->numHeads; head++) { + if (pDispEvo->headState[head].pConnectorEvo != NULL) { + NvU32 apiHead; + const NVConnectorEvoRec *pConnectorEvo = + pDispEvo->headState[head].pConnectorEvo; + + /* Find unused api-head which support the equal number of layers */ + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + if ((NVBIT(apiHead) & usedApiHeadsMask) != 0x0) { + continue; + } + + if (pDevEvo->apiHead[apiHead].numLayers == + pDevEvo->head[head].numLayers) { + usedApiHeadsMask |= NVBIT(apiHead); + break; + } + } + nvAssert(apiHead < pDevEvo->numApiHeads); + + /* + * Use the pDpyEvo for the connector, since we may not have one + * for display id if it's a dynamic one. + */ + NVDpyEvoRec *pDpyEvo = nvGetDpyEvoFromDispEvo(pDispEvo, + pConnectorEvo->displayId); + + nvAssert(pDpyEvo->apiHead == NV_INVALID_HEAD); + + pDpyEvo->apiHead = apiHead; + nvAssignHwHeadsMaskApiHeadState( + &pDispEvo->apiHeadState[apiHead], + NVBIT(head)); + pDispEvo->apiHeadState[apiHead].activeDpys = + nvAddDpyIdToEmptyDpyIdList(pConnectorEvo->displayId); + } + } + + return TRUE; + +failed: + ClearApiHeadStateOneDisp(pDispEvo); + + return FALSE; +} + +static void +CompletionNotifierEventDeferredWork(void *dataPtr, NvU32 dataU32) +{ + NVDispFlipOccurredEventDataEvoRec *pEventData = dataPtr; + + nvSendFlipOccurredEventEvo(pEventData->pDispEvo, pEventData->apiHead, + pEventData->layer); +} + +static void CompletionNotifierEvent(void *arg, void *pEventDataVoid, + NvU32 hEvent, NvU32 Data, NV_STATUS Status) +{ + (void) nvkms_alloc_timer_with_ref_ptr( + CompletionNotifierEventDeferredWork, /* callback */ + arg, /* argument (this is a ref_ptr to NVDispFlipOccurredEventDataEvoRec) */ + 0, /* dataU32 */ + 0); /* timeout: schedule the work immediately */ +} + +void nvEvoPreModesetRegisterFlipOccurredEvent(NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVEvoModesetUpdateState + *pModesetUpdate) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvU32 layer; + + /* XXX NVKMS TODO: need disp-scope in event */ + if (pDispEvo->displayOwner != 0) { + return; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + NVEvoChannelPtr pChannel = pDevEvo->head[head].layer[layer]; + const struct _NVEvoModesetUpdateStateOneLayer *pLayer = + &pModesetUpdate->flipOccurredEvent[head].layer[layer]; + + if (!pLayer->changed || + (pLayer->ref_ptr == NULL) || + (pLayer->ref_ptr == pChannel->completionNotifierEventRefPtr)) { + continue; + } + + nvAssert((pChannel->completionNotifierEventHandle == 0) && + (pChannel->completionNotifierEventRefPtr == NULL)); + + pChannel->completionNotifierEventHandle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (!nvRmRegisterCallback(pDevEvo, + &pChannel->completionNotifierEventCallback, + pLayer->ref_ptr, + pChannel->pb.channel_handle, + pChannel->completionNotifierEventHandle, + CompletionNotifierEvent, + 0)) { + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pChannel->completionNotifierEventHandle); + pChannel->completionNotifierEventHandle = 0; + } else { + pChannel->completionNotifierEventRefPtr = pLayer->ref_ptr; + } + } +} + +void nvEvoPostModesetUnregisterFlipOccurredEvent(NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVEvoModesetUpdateState + *pModesetUpdate) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvU32 layer; + + /* XXX NVKMS TODO: need disp-scope in event */ + if (pDispEvo->displayOwner != 0) { + return; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + NVEvoChannelPtr pChannel = pDevEvo->head[head].layer[layer]; + const struct _NVEvoModesetUpdateStateOneLayer *pLayer = + &pModesetUpdate->flipOccurredEvent[head].layer[layer]; + + if (!pLayer->changed || + (pLayer->ref_ptr != NULL) || + (pChannel->completionNotifierEventHandle == 0)) { + + /* + * If the flip occurred event of this layer is updated to get + * enabled (pLayer->ref_ptr != NULL) then that update should have + * been already processed by + * nvEvoPreModesetRegisterFlipOccurredEvent() and + * pChannel->completionNotifierEventRefPtr == pLayer->ref_ptr. + */ + nvAssert(!pLayer->changed || + (pChannel->completionNotifierEventHandle == 0) || + (pChannel->completionNotifierEventRefPtr == + pLayer->ref_ptr)); + continue; + } + + nvRmApiFree(nvEvoGlobal.clientHandle, + pChannel->pb.channel_handle, + pChannel->completionNotifierEventHandle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pChannel->completionNotifierEventHandle); + pChannel->completionNotifierEventHandle = 0; + pChannel->completionNotifierEventRefPtr = NULL; + } +} + +static NvBool InitApiHeadState(NVDevEvoRec *pDevEvo) +{ + NVDispEvoRec *pDispEvo; + NvU32 dispIndex; + + /* + * For every hardware head, there should be at least one api-head + * which supports the equal number of layer. + */ + nvAssert(pDevEvo->numApiHeads == pDevEvo->numHeads); + for (NvU32 head = 0; head < pDevEvo->numHeads; head++) { + pDevEvo->apiHead[head].numLayers = pDevEvo->head[head].numLayers; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + if (!InitApiHeadStateOneDisp(pDispEvo)) { + goto failed; + } + } + + nvRmAllocCoreRGSyncpts(pDevEvo); + + return TRUE; + +failed: + ClearApiHeadState(pDevEvo); + + return FALSE; +} + +/*! + * Allocate the EVO core channel. + * + * This function trivially succeeds if the core channel is already allocated. + */ +NvBool nvAllocCoreChannelEvo(NVDevEvoPtr pDevEvo) +{ + NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS capsParams = { }; + NvU32 ret; + NvBool bRet; + NVDispEvoRec *pDispEvo; + NvU32 dispIndex; + NvU32 head; + + const NvBool bFailCoreChannelSetup = + nvkms_test_fail_alloc_core_channel(FAIL_ALLOC_CORE_CHANNEL_RM_SETUP_CORE_CHANNEL); + + /* Do nothing if the display was already allocated */ + if (pDevEvo->displayHandle != 0) { + return TRUE; + } + + if (!AllocEvoSubDevs(pDevEvo)) { + goto failed; + } + + // Disallow GC6 in anticipation of touching GPU/displays. + if (!nvRmSetGc6Allowed(pDevEvo, FALSE)) { + goto failed; + } + + /* Query console FB info, and save the result into pDevEvo->vtFbInfo. + * This is done at device allocation time. + * nvRmImportFbConsoleMemory will import the surface for console restore by + * nvEvoRestoreConsole if the surface format is compatible. + * Else, console restore will cause core channel realloc, telling RM to + * restore the console via nvRmVTSwitch. + */ + if (!nvRmGetVTFBInfo(pDevEvo)) { + goto failed; + } + + if (!nvRmVTSwitch(pDevEvo, + NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_SAVE_VT_STATE)) { + goto failed; + } + + /* Evo object (parent of all other NV50 display stuff) */ + nvAssert(nvRmEvoClassListCheck(pDevEvo, pDevEvo->dispClass)); + pDevEvo->displayHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDevEvo->displayHandle, + pDevEvo->dispClass, + NULL); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to initialize display engine: 0x%x (%s)", + ret, nvstatusToString(ret)); + goto failed; + } + + /* Get the display caps bits */ + + ct_assert(sizeof(pDevEvo->capsBits) == sizeof(capsParams.capsTbl)); + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_SYSTEM_GET_CAPS_V2, + &capsParams, sizeof(capsParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to determine display capabilities"); + goto failed; + } + nvkms_memcpy(pDevEvo->capsBits, capsParams.capsTbl, + sizeof(pDevEvo->capsBits)); + + // Evo core channel. Allocated once, shared per GPU + if (bFailCoreChannelSetup || !nvRMSetupEvoCoreChannel(pDevEvo)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate display engine core DMA push buffer"); + goto failed; + } + + pDevEvo->coreInitMethodsPending = TRUE; + + bRet = pDevEvo->hal->GetCapabilities(pDevEvo); + + if (!bRet) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to query display engine capability bits."); + goto failed; + } + + /* + * XXX NVKMS TODO: if the EVO core channel is allocated (and + * capability notifier queried) before any nvDpyConnectEvo(), then + * we won't need to update the pixelClock here. + */ + UpdateMaxPixelClock(pDevEvo); + + if (pDevEvo->numWindows > 0) { + int win; + + if (!nvRMAllocateWindowChannels(pDevEvo)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate display engine window channels"); + goto failed; + } + + for (win = 0; win < pDevEvo->numWindows; win++) { + const NvU32 head = pDevEvo->headForWindow[win]; + + if (head == NV_INVALID_HEAD) { + continue; + } + + pDevEvo->head[head].layer[pDevEvo->head[head].numLayers] = + pDevEvo->window[win]; + pDevEvo->head[head].numLayers++; + } + } else { + // Allocate the base channels + if (!nvRMAllocateBaseChannels(pDevEvo)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate display engine base channels"); + goto failed; + } + + // Allocate the overlay channels + if (!nvRMAllocateOverlayChannels(pDevEvo)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate display engine overlay channels"); + goto failed; + } + + /* Map base and overlay channels onto main and overlay layers. */ + for (head = 0; head < pDevEvo->numHeads; head++) { + nvAssert(pDevEvo->base[head] != NULL && pDevEvo->overlay[head] != NULL); + + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER] = pDevEvo->base[head]; + pDevEvo->head[head].numLayers++; + + if (!nvkms_enable_overlay_layers()) { + continue; + } + + pDevEvo->head[head].layer[NVKMS_OVERLAY_LAYER] = pDevEvo->overlay[head]; + pDevEvo->head[head].numLayers++; + } + } + + // Allocate and map the cursor controls for all heads + bRet = nvAllocCursorEvo(pDevEvo); + if (!bRet) { + goto failed; + } + + // Resume the DisplayPort library's control of the device. + if (!nvRmResumeDP(pDevEvo)) { + nvEvoLogDev( + pDevEvo, + EVO_LOG_ERROR, + "Failed to initialize DisplayPort sub-system."); + goto failed; + } + + if (!InitApiHeadState(pDevEvo)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to initialize the api heads."); + goto failed; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + nvRmRegisterBacklight(pDispEvo); + } + + // Allow GC6 if no heads are active. + if (nvAllHeadsInactive(pDevEvo)) { + if (!nvRmSetGc6Allowed(pDevEvo, TRUE)) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "No head is active, but failed to allow GC6"); + } + } + + return TRUE; + +failed: + nvFreeCoreChannelEvo(pDevEvo); + + return FALSE; +} + +/*! + * Clear the pConnectorEvo->or.primary and pConnectorEvo->or.secondaryMask + * tracking. + */ +static void ClearSORAssignmentsOneDisp(const NVDispEvoRec *pDispEvo) +{ + NVConnectorEvoPtr pConnectorEvo; + + nvAssert(NV0073_CTRL_SYSTEM_GET_CAP(pDispEvo->pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)); + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + continue; + } + + pConnectorEvo->or.primary = NV_INVALID_OR; + pConnectorEvo->or.secondaryMask = 0x0; + } +} + +/*! + * Update pConnectorEvo->or.primary and pConnectorEvo->or.secondaryMask from + * the list given to us by RM. + */ +static void RefreshSORAssignments(const NVDispEvoRec *pDispEvo, + const NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *pParams) +{ + NVConnectorEvoPtr pConnectorEvo; + + ClearSORAssignmentsOneDisp(pDispEvo); + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + const NvU32 displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + NvU32 sorIndex; + + if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + continue; + } + + for (sorIndex = 0; + sorIndex < ARRAY_LEN(pParams->sorAssignList) && + sorIndex < ARRAY_LEN(pConnectorEvo->or.ownerHeadMask); + sorIndex++) { + if ((pParams->sorAssignListWithTag[sorIndex].displayMask & + displayId) == displayId) { + if ((pParams->sorAssignListWithTag[sorIndex].sorType == + NV0073_CTRL_DFP_SOR_TYPE_SINGLE) || + (pParams->sorAssignListWithTag[sorIndex].sorType == + NV0073_CTRL_DFP_SOR_TYPE_2H1OR_PRIMARY)) { + pConnectorEvo->or.primary = sorIndex; + } else { + nvAssert(pParams->sorAssignListWithTag[sorIndex].sorType == + NV0073_CTRL_DFP_SOR_TYPE_2H1OR_SECONDARY); + pConnectorEvo->or.secondaryMask |= NVBIT(sorIndex); + } + } + } + + nvAssert((pConnectorEvo->or.secondaryMask == 0) || + (pConnectorEvo->or.primary != NV_INVALID_OR)); + } +} + +/* + * Ask RM to assign an SOR to given displayId. + * + * In 2Heads1OR MST case, this function gets called with the dynamic displayId. + * + * Note that this assignment may be temporary. This function will always call + * RM, and unless the connector is currently in use (i.e., being driven by a + * head), a previously-assigned SOR may be reused. + * + * The RM will either: + * a) return an SOR that's already assigned/attached + * to root port of this displayId, or + * b) pick a new "unused" SOR, assign and attach it to this connector, and + * return that -- where "unused" means both not being actively driven by a + * head and not in the "exclude mask" argument. + * The "exclude mask" is useful if we need to assign multiple SORs up front + * before activating heads to drive them. + * + * For example, if head 0 is currently actively scanning out to SOR 0 and we + * are doing a modeset to activate currently-inactive heads 1 and 2: + * 1. nvkms calls RM for nvAssignSOREvo(pConnectorForHead1, 0); + * RM returns any SOR other than 0 (say 3) + * 2. nvkms calls RM for nvAssignSOREvo(pConnectorForHead2, (1 << 3)); + * RM returns any SOR other than 0 and 3 (say 1) + * 3. At this point nvkms can push methods and UPDATE to enable heads 1 and 2 + * to drive SORs 3 and 1. + * In the example above, the sorExcludeMask == (1 << 3) at step 2 is important + * to ensure that RM doesn't reuse the SOR 3 from step 1. It won't reuse SOR 0 + * because it's in use by head 0. + * + * If an SOR is only needed temporarily (e.g., to do link training to "assess" + * a DisplayPort or HDMI FRL link), then sorExcludeMask should be 0 -- any SOR + * that's not actively used by a head can be used, and as soon as nvkms + * finishes the "assessment", the SOR is again eligible for reuse. + * + * Because of the potential for SOR reuse, nvAssignSOREvo() will always call + * RefreshSORAssignments() to update pConnectorEvo->or.primary and + * pConnectorEvo->or.secondaryMask on *every* connector after calling + * NV0073_CTRL_CMD_DFP_ASSIGN_SOR for *any* connector. + */ +NvBool nvAssignSOREvo(const NVConnectorEvoRec *pConnectorEvo, + const NvU32 targetDisplayId, + const NvBool b2Heads1Or, + const NvU32 sorExcludeMask) +{ + const NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS params = { 0 }; + NvU32 ret; + + if (!NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)) { + return TRUE; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = targetDisplayId; + params.bIs2Head1Or = b2Heads1Or; + params.sorExcludeMask = sorExcludeMask; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_ASSIGN_SOR, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + RefreshSORAssignments(pDispEvo, ¶ms); + + return TRUE; +} + +static void CacheSorAssignList(const NVDispEvoRec *pDispEvo, + const NVConnectorEvoRec *sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS]) +{ + const NVConnectorEvoRec *pConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if ((pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) || + (pConnectorEvo->or.primary == NV_INVALID_OR)) { + continue; + } + + /* + * RM populates same sor index into more than one connectors if + * they are are DCC partners, this checks make sure SOR + * assignment happens only for a single connector. The sor + * assignment call before modeset/dp-link-training makes sure + * assignment happens for the correct connector. + */ + if (sorAssignList[pConnectorEvo->or.primary] != NULL) { + continue; + } + sorAssignList[pConnectorEvo->or.primary] = + pConnectorEvo; + } +} + +static void RestoreSorAssignList(NVDispEvoRec *pDispEvo, + const NVConnectorEvoRec *sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS]) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvU32 sorIndex; + + for (sorIndex = 0; + sorIndex < NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS; sorIndex++) { + + if (sorAssignList[sorIndex] == NULL) { + continue; + } + + NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS params = { + .subDeviceInstance = pDispEvo->displayOwner, + .displayId = nvDpyIdToNvU32(sorAssignList[sorIndex]->displayId), + .sorExcludeMask = ~NVBIT(sorIndex), + }; + NvU32 ret; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_ASSIGN_SOR, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, + EVO_LOG_ERROR, + "Failed to restore SOR-%u -> %s assignment.", + sorIndex, sorAssignList[sorIndex]->name); + } else { + RefreshSORAssignments(pDispEvo, ¶ms); + } + } +} + +NvBool nvResumeDevEvo(NVDevEvoRec *pDevEvo) +{ + struct { + const NVConnectorEvoRec * + sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS]; + } disp[NVKMS_MAX_SUBDEVICES] = { }; + NVDispEvoRec *pDispEvo; + NvU32 dispIndex; + + if (NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)) { + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + CacheSorAssignList(pDispEvo, disp[dispIndex].sorAssignList); + } + } + + nvInvalidateDefaultLut(pDevEvo); + + if (!nvAllocCoreChannelEvo(pDevEvo)) { + // free the device if core channel allocation fails + nvRevokeDevice(pDevEvo); + return FALSE; + } + + /* + * During the hibernate-resume cycle vbios or GOP driver programs + * the display engine to lit up the boot display. In + * hibernate-resume path, doing NV0073_CTRL_CMD_DFP_ASSIGN_SOR + * rm-control call before the core channel allocation causes display + * channel hang because at that stage RM is not aware of the boot + * display actived by vbios and it ends up unrouting active SOR + * assignments. Therefore restore the SOR assignment only after the + * core channel allocation. + */ + + if (NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)) { + + /* + * Shutdown all heads before restoring the SOR assignments because in + * case of hibernate-resume the SOR, for which NVKMS is trying to + * restore the assignment, might be in use by the boot display setup + * by vbios/gop driver. + */ + nvShutDownApiHeads(pDevEvo, pDevEvo->pNvKmsOpenDev, + NULL /* pTestFunc, shut down all heads */, + NULL /* pData */, + TRUE /* doRasterLock */); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + RestoreSorAssignList(pDispEvo, disp[dispIndex].sorAssignList); + } + } + + return TRUE; +} + +void nvSuspendDevEvo(NVDevEvoRec *pDevEvo) +{ + nvFreeCoreChannelEvo(pDevEvo); +} + +/*! + * Free the EVO core channel. + * + * This function does nothing if the core channel was already free. + */ +void nvFreeCoreChannelEvo(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + NvU32 head; + + ClearApiHeadState(pDevEvo); + + nvEvoCancelPostFlipIMPTimer(pDevEvo); + + NvU32 fullApiHeadMasks[NVKMS_MAX_SUBDEVICES]; + nvkms_memset(fullApiHeadMasks, 0xFF, sizeof(fullApiHeadMasks)); + nvCancelVrrFrameReleaseTimers(pDevEvo, fullApiHeadMasks); + + nvCancelLowerDispBandwidthTimer(pDevEvo); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + nvRmUnregisterBacklight(pDispEvo); + + nvAssert(pDevEvo->skipConsoleRestore || + nvDpyIdListIsEmpty(nvActiveDpysOnDispEvo(pDispEvo))); + } + + // Pause the DisplayPort library's control of the device. + nvRmPauseDP(pDevEvo); + + // Unmap and free the cursor controls for all heads + nvFreeCursorEvo(pDevEvo); + + // TODO: Unregister all surfaces registered with this device. + + for (head = 0; head < pDevEvo->numHeads; head++) { + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + nvRmEvoFreePreSyncpt(pDevEvo, pDevEvo->head[head].layer[layer]); + pDevEvo->head[head].layer[layer] = NULL; + } + pDevEvo->head[head].numLayers = 0; + } + + nvRMFreeWindowChannels(pDevEvo); + nvRMFreeOverlayChannels(pDevEvo); + nvRMFreeBaseChannels(pDevEvo); + + nvRMFreeEvoCoreChannel(pDevEvo); + + if (pDevEvo->displayHandle != 0) { + if (nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDevEvo->displayHandle) != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to tear down Disp"); + } + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pDevEvo->displayHandle); + pDevEvo->displayHandle = 0; + + if (!pDevEvo->skipConsoleRestore) { + nvRmVTSwitch(pDevEvo, + NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_RESTORE_VT_STATE); + } else { + nvRmVTSwitch(pDevEvo, + NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_CONSOLE_RESTORED); + } + } + + // No longer possible that NVKMS is driving any displays, allow GC6. + nvRmSetGc6Allowed(pDevEvo, TRUE); + + nvFree(pDevEvo->gpus); + pDevEvo->gpus = NULL; +} + + +#define ASSIGN_PIN(_pPin, _pin) \ + do { \ + ct_assert(NV_IS_UNSIGNED((_pin))); \ + if ((_pPin)) { \ + if ((_pin) >= NV_EVO_NUM_LOCK_PIN_CAPS) { \ + return FALSE; \ + } \ + *(_pPin) = (_pin); \ + } \ + } while (0) + +static NvBool QueryFrameLockHeaderPins(const NVDispEvoRec *pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NvU32 *pFrameLockPin, + NvU32 *pRasterLockPin, + NvU32 *pFlipLockPin) +{ + NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_PARAMS params = { }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + params.base.subdeviceIndex = pEvoSubDev->subDeviceInstance; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_GET_FRAMELOCK_HEADER_LOCKPINS, + ¶ms, sizeof(params)) != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Failed to query framelock header pins"); + return FALSE; + } + + ASSIGN_PIN(pFrameLockPin, params.frameLockPin); + ASSIGN_PIN(pRasterLockPin, params.rasterLockPin); + ASSIGN_PIN(pFlipLockPin, params.flipLockPin); + + return TRUE; +} + +// Gets the lock pin dedicated for a given signal and returns the corresponding method +NVEvoLockPin nvEvoGetPinForSignal(const NVDispEvoRec *pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockSignal signal) +{ + NVEvoLockPinCaps *caps = pEvoSubDev->capabilities.pin; + NvU32 pin; + + switch (signal) { + + case NV_EVO_LOCK_SIGNAL_RASTER_LOCK: + if (!QueryFrameLockHeaderPins(pDispEvo, pEvoSubDev, + NULL, &pin, NULL)) { + break; + } + + if (!caps[pin].scanLock) break; + + return NV_EVO_LOCK_PIN_0 + pin; + + case NV_EVO_LOCK_SIGNAL_FRAME_LOCK: + if (!QueryFrameLockHeaderPins(pDispEvo, pEvoSubDev, + &pin, NULL, NULL)) { + break; + } + + if (!caps[pin].scanLock) break; + + return NV_EVO_LOCK_PIN_0 + pin; + + case NV_EVO_LOCK_SIGNAL_FLIP_LOCK: + if (!QueryFrameLockHeaderPins(pDispEvo, pEvoSubDev, + NULL, NULL, &pin) || + !caps[pin].flipLock) { + // If the query from RM fails (or returns a bogus pin), fall + // back to an alternate mechanism. This may happen on boards + // with no framelock header. Look in the capabilities for the + // pin that has the requested capability. + for (pin = 0; pin < NV_EVO_NUM_LOCK_PIN_CAPS; pin++) { + if (caps[pin].flipLock) + break; + } + + if (pin == NV_EVO_NUM_LOCK_PIN_CAPS) { + // Not found + break; + } + } + + if (!caps[pin].flipLock) { + break; + } + + return NV_EVO_LOCK_PIN_0 + pin; + + case NV_EVO_LOCK_SIGNAL_STEREO: + // Look in the capabilities for the pin that has the requested capability + for (pin = 0; pin < NV_EVO_NUM_LOCK_PIN_CAPS; pin++) { + if (caps[pin].stereo) + break; + } + + if (pin == NV_EVO_NUM_LOCK_PIN_CAPS) break; + + return NV_EVO_LOCK_PIN_0 + pin; + + default: + nvAssert(!"Unknown signal type"); + break; + } + + // Pin not found + return NV_EVO_LOCK_PIN_ERROR; +} + +void nvSetDVCEvo(NVDispEvoPtr pDispEvo, + const NvU32 head, + NvS32 dvc, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + + nvAssert(dvc >= NV_EVO_DVC_MIN); + nvAssert(dvc <= NV_EVO_DVC_MAX); + + // HW range is from -2048 to + 2047 + // Negative values, are not used they distort the colors + // Values from 1023 to 0 are greying the colors out. + // We use 0 to 2047 with 1024 as default. + dvc += 1024; + nvAssert(dvc >= 0); + pHeadState->procAmp.satCos = dvc; + + // In SW YUV420 mode, HW is programmed with default DVC. The DVC is handled + // in a headSurface composite shader. + if (pHeadState->timings.yuv420Mode == NV_YUV420_MODE_SW) { + pHeadState->procAmp.satCos = 1024; + } + + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetProcAmp(pDispEvo, head, updateState); + nvPopEvoSubDevMask(pDevEvo); +} + +void nvSetImageSharpeningEvo(NVDispEvoRec *pDispEvo, const NvU32 head, + NvU32 value, NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + /* + * Evo values are from -128 to 127, with a default of 0. + * Negative values sharpen. + * Control panel values from 0 (less sharp) to 255 + */ + value = 127 - value; + + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetOutputScaler(pDispEvo, head, value, updateState); + nvPopEvoSubDevMask(pDevEvo); +} + +static void LayerSetPositionOneApiHead(NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + const NvU32 layer, + const NvS16 x, + const NvS16 y, + NVEvoUpdateState *pUpdateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + const NvU32 sd = pDispEvo->displayOwner; + NvU32 head; + + nvPushEvoSubDevMaskDisp(pDispEvo); + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[sd].headState[head]; + + if ((pSdHeadState->layer[layer].outputPosition.x != x) || + (pSdHeadState->layer[layer].outputPosition.y != y)) { + NVEvoChannelPtr pChannel = + pDevEvo->head[head].layer[layer]; + + pSdHeadState->layer[layer].outputPosition.x = x; + pSdHeadState->layer[layer].outputPosition.y = y; + + pDevEvo->hal->SetImmPointOut(pDevEvo, pChannel, sd, pUpdateState, + x, y); + } + } + nvPopEvoSubDevMask(pDevEvo); +} + +NvBool nvLayerSetPositionEvo( + NVDevEvoPtr pDevEvo, + const struct NvKmsSetLayerPositionRequest *pRequest) +{ + NVDispEvoPtr pDispEvo; + NvU32 sd; + + /* + * We need this call to not modify any state if it will fail, so we + * first verify that all relevant layers support output positioning, + * then go back through the layers to actually modify the relevant + * state. + */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 apiHead; + + if ((pRequest->requestedDispsBitMask & NVBIT(sd)) == 0) { + continue; + } + + for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) { + NvU32 layer; + + if ((pRequest->disp[sd].requestedHeadsBitMask & + NVBIT(apiHead)) == 0) { + continue; + } + + if (!nvApiHeadIsActive(pDispEvo, apiHead)) { + continue; + } + + for (layer = 0; layer < pDevEvo->apiHead[apiHead].numLayers; layer++) { + const NvS16 x = pRequest->disp[sd].head[apiHead].layerPosition[layer].x; + const NvS16 y = pRequest->disp[sd].head[apiHead].layerPosition[layer].y; + + if ((pRequest->disp[sd].head[apiHead].requestedLayerBitMask & + NVBIT(layer)) == 0x0) { + continue; + } + + /* + * Error out if a requested layer does not support position + * updates and the requested position is not (0, 0). + */ + if (!pDevEvo->caps.layerCaps[layer].supportsWindowMode && + (x != 0 || y != 0)) { + nvEvoLogDebug(EVO_LOG_ERROR, "Layer %d does not support " + "position updates.", layer); + return FALSE; + } + } + } + } + + /* Checks in above block passed, so make the requested changes. */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 apiHead; + + if ((pRequest->requestedDispsBitMask & NVBIT(sd)) == 0) { + continue; + } + + for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) { + NVEvoUpdateState updateState = { }; + NvU32 layer; + + if ((pRequest->disp[sd].requestedHeadsBitMask & + NVBIT(apiHead)) == 0) { + continue; + } + + if (!nvApiHeadIsActive(pDispEvo, apiHead)) { + continue; + } + + for (layer = 0; layer < pDevEvo->apiHead[apiHead].numLayers; layer++) { + const NvS16 x = pRequest->disp[sd].head[apiHead].layerPosition[layer].x; + const NvS16 y = pRequest->disp[sd].head[apiHead].layerPosition[layer].y; + + if ((pRequest->disp[sd].head[apiHead].requestedLayerBitMask & + NVBIT(layer)) == 0x0) { + continue; + } + + LayerSetPositionOneApiHead(pDispEvo, apiHead, layer, x, y, + &updateState); + } + + pDevEvo->hal->Update(pDevEvo, &updateState, TRUE /* releaseElv */); + } + } + + return TRUE; +} + +/* + * nvConstructHwModeTimingsImpCheckEvo() - perform an IMP check on the + * given raster timings and viewport during the + * nvConstructHwModeTimingsEvo path. If IMP fails, we try multiple + * times, each time scaling back the usage bounds until we find a + * configuration IMP will accept, or until we can't scale back any + * further. If this fails, mark the viewport as invalid. + */ + +NvBool nvConstructHwModeTimingsImpCheckEvo( + const NVConnectorEvoRec *pConnectorEvo, + const NVHwModeTimingsEvo *pTimings, + const NVDscInfoEvoRec *pDscInfo, + const NvBool b2Heads1Or, + const NVDpyAttributeColor *pColor, + const struct NvKmsModeValidationParams *pParams, + NVHwModeTimingsEvo timings[NVKMS_MAX_HEADS_PER_DISP], + NvU32 *pNumHeads, + NVEvoInfoStringPtr pInfoString) +{ + NvU32 head; + NvU32 activeRmId; + const NvU32 numHeads = b2Heads1Or ? 2 : 1; + NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP]; + NvBool requireBootClocks = !!(pParams->overrides & + NVKMS_MODE_VALIDATION_REQUIRE_BOOT_CLOCKS); + NvU32 ret; + + activeRmId = nvRmAllocDisplayId(pConnectorEvo->pDispEvo, + nvAddDpyIdToEmptyDpyIdList(pConnectorEvo->displayId)); + if (activeRmId == 0x0) { + return FALSE; + } + + nvkms_memset(&timingsParams, 0, sizeof(timingsParams)); + + for (head = 0; head < numHeads; head++) { + timingsParams[head].pConnectorEvo = pConnectorEvo; + timingsParams[head].activeRmId = activeRmId; + timingsParams[head].pixelDepth = nvEvoDpyColorToPixelDepth(pColor); + if (!nvEvoGetSingleMergeHeadSectionHwModeTimings(pTimings, numHeads, + &timings[head])) { + ret = FALSE; + goto done; + } + timingsParams[head].pTimings = &timings[head]; + timingsParams[head].enableDsc = + (pDscInfo->type != NV_DSC_INFO_EVO_TYPE_DISABLED); + timingsParams[head].dscSliceCount = pDscInfo->sliceCount; + timingsParams[head].possibleDscSliceCountMask = + pDscInfo->possibleSliceCountMask; + timingsParams[head].b2Heads1Or = b2Heads1Or; + timingsParams[head].pUsage = &timings[head].viewPort.guaranteedUsage; + } + + /* bypass this checking if the user disabled IMP */ + if ((pParams->overrides & + NVKMS_MODE_VALIDATION_NO_EXTENDED_GPU_CAPABILITIES_CHECK) != 0) { + ret = TRUE; + } else { + ret = nvValidateImpOneDispDowngrade(pConnectorEvo->pDispEvo, timingsParams, + requireBootClocks, + NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE, + /* modesetRequestedHeadsMask */ + (NVBIT(NVKMS_MAX_HEADS_PER_DISP) - 1UL)); + } + + if (ret) { + for (NvU32 head = 1; head < numHeads; head++) { + nvAssert(timingsParams[head].dscSliceCount == + timingsParams[head - 1].dscSliceCount); + } + *pNumHeads = numHeads; + } else { + nvEvoLogInfoString(pInfoString, + "ViewPort %dx%d exceeds hardware capabilities.", + pTimings->viewPort.out.width, + pTimings->viewPort.out.height); + } + +done: + nvRmFreeDisplayId(pConnectorEvo->pDispEvo, activeRmId); + + return ret; +} + +/* + * Convert from NvModeTimings values to NVHwModeTimingsEvo. + */ + +static void +ConstructHwModeTimingsFromNvModeTimings(const NvModeTimings *pModeTimings, + NVHwModeTimingsEvoPtr pTimings) +{ + NvU32 hBlankStart; + NvU32 vBlankStart; + NvU32 hBlankEnd; + NvU32 vBlankEnd; + NvU32 hSyncWidth; + NvU32 vSyncWidth; + NvU32 vTotalAdjustment = 0; + + NvModeTimings modeTimings; + + modeTimings = *pModeTimings; + + if (modeTimings.doubleScan) { + modeTimings.vVisible *= 2; + modeTimings.vSyncStart *= 2; + modeTimings.vSyncEnd *= 2; + modeTimings.vTotal *= 2; + } + + /* + * The real pixel clock and width values for modes using YUV 420 emulation + * are half of the incoming values parsed from the EDID. This conversion is + * performed here, so NvModeTimings will have the user-visible (full width) + * values, and NVHwModeTimingsEvo will have the real (half width) values. + * + * HW YUV 420 requires setting the full width mode timings, which are then + * converted in HW. RM will recognize YUV420 mode is in use and halve + * these values for IMP. + * + * In either case, only modes with even width are allowed in YUV 420 mode. + */ + if (modeTimings.yuv420Mode != NV_YUV420_MODE_NONE) { + nvAssert(((modeTimings.pixelClockHz & 1) == 0) && + ((modeTimings.hVisible & 1) == 0) && + ((modeTimings.hSyncStart & 1) == 0) && + ((modeTimings.hSyncEnd & 1) == 0) && + ((modeTimings.hTotal & 1) == 0) && + ((modeTimings.vVisible & 1) == 0)); + if (modeTimings.yuv420Mode == NV_YUV420_MODE_SW) { + modeTimings.pixelClockHz /= 2; + modeTimings.hVisible /= 2; + modeTimings.hSyncStart /= 2; + modeTimings.hSyncEnd /= 2; + modeTimings.hTotal /= 2; + } + } + + pTimings->hSyncPol = modeTimings.hSyncNeg; + pTimings->vSyncPol = modeTimings.vSyncNeg; + pTimings->interlaced = modeTimings.interlaced; + pTimings->doubleScan = modeTimings.doubleScan; + + /* pTimings->pixelClock are in KHz but modeTimings.pixelClock are in Hz */ + + pTimings->pixelClock = HzToKHz(modeTimings.pixelClockHz); + + /* + * assign total width, height; note that when the rastertimings + * are interlaced, we need to make sure SetRasterSize.Height is + * odd, per EVO's mfs file + */ + + if (pTimings->interlaced) vTotalAdjustment = 1; + + pTimings->rasterSize.x = modeTimings.hTotal; + pTimings->rasterSize.y = modeTimings.vTotal | vTotalAdjustment; + + /* + * A bit of EVO quirkiness: The hw increases the blank/sync values + * by one. So we need to offset by subtracting one. + * + * In other words, the h/w inserts one extra sync line/pixel thus + * incrementing the raster params by one. The number of blank + * lines/pixels we get is true to what we ask for. Note the hw + * does not increase the TotalImageSize by one so we don't need to + * adjust SetRasterSize. + * + * This is slightly unintuitive. Per Evo's specs, the blankEnd + * comes before blankStart as defined below: BlankStart: The last + * pixel/line at the end of the h/v active area. BlankEnd: The + * last pixel/line at the end of the h/v blanking. + * + * Also: note that in the below computations, we divide by two for + * interlaced modes *before* subtracting; see bug 263622. + */ + + hBlankStart = modeTimings.hVisible + + (modeTimings.hTotal - modeTimings.hSyncStart); + + vBlankStart = modeTimings.vVisible + + (modeTimings.vTotal - modeTimings.vSyncStart); + + hBlankEnd = (modeTimings.hTotal - modeTimings.hSyncStart); + vBlankEnd = (modeTimings.vTotal - modeTimings.vSyncStart); + + hSyncWidth = (modeTimings.hSyncEnd - modeTimings.hSyncStart); + vSyncWidth = (modeTimings.vSyncEnd - modeTimings.vSyncStart); + + if (pTimings->interlaced) { + vBlankStart /= 2; + vBlankEnd /= 2; + vSyncWidth /= 2; + } + + pTimings->rasterSyncEnd.x = hSyncWidth - 1; + pTimings->rasterSyncEnd.y = vSyncWidth - 1; + pTimings->rasterBlankStart.x = hBlankStart - 1; + pTimings->rasterBlankStart.y = vBlankStart - 1; + pTimings->rasterBlankEnd.x = hBlankEnd - 1; + pTimings->rasterBlankEnd.y = vBlankEnd - 1; + + /* assign rasterVertBlank2 */ + + if (pTimings->interlaced) { + const NvU32 firstFieldHeight = modeTimings.vTotal / 2; + + pTimings->rasterVertBlank2Start = firstFieldHeight + vBlankStart - 1; + pTimings->rasterVertBlank2End = firstFieldHeight + vBlankEnd - 1; + } else { + pTimings->rasterVertBlank2Start = 0; + pTimings->rasterVertBlank2End = 0; + } + + pTimings->hdmi3D = modeTimings.hdmi3D; + pTimings->yuv420Mode = modeTimings.yuv420Mode; +} + + + +/* + * Adjust the HwModeTimings as necessary to meet dual link dvi + * requirements; returns TRUE if the timings were successfully + * modified; returns FALSE if the timings cannot be made valid for + * dual link dvi. + */ +static NvBool ApplyDualLinkRequirements(const NVDpyEvoRec *pDpyEvo, + const struct + NvKmsModeValidationParams *pParams, + NVHwModeTimingsEvoPtr pTimings, + NVEvoInfoStringPtr pInfoString) +{ + int adjust; + + nvAssert(pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP); + + if (pTimings->protocol != NVKMS_PROTOCOL_SOR_DUAL_TMDS) { + return TRUE; + } + + if ((pParams->overrides & + NVKMS_MODE_VALIDATION_NO_DUAL_LINK_DVI_CHECK) != 0) { + return TRUE; + } + + /* extract the fields we will need below */ + + /* + * hTotal must be even for dual link dvi; we won't try to patch + * the htotal size; just give up if it isn't even + */ + + if ((pTimings->rasterSize.x % 2) != 0) { + nvEvoLogInfoString(pInfoString, + "Horizontal Total (%d) must be even for dual link DVI mode timings.", + pTimings->rasterSize.x); + return FALSE; + } + + /* + * RASTER_BLANK_END_X must be odd, so that the active region + * starts on the following (even) pixel; if it is odd, we are + * already done + */ + + if ((pTimings->rasterBlankEnd.x % 2) == 1) return TRUE; + + /* + * RASTER_BLANK_END_X is even, so we need to adjust both + * RASTER_BLANK_END_X and RASTER_BLANK_START_X by one; we'll first + * try to subtract one pixel from both + */ + + adjust = -1; + + /* + * if RASTER_BLANK_END_X cannot be made smaller (would collide + * with hSyncEnd), see if it would be safe to instead add one to + * RASTER_BLANK_END_X and RASTER_BLANK_START_X + */ + + if (pTimings->rasterBlankEnd.x <= pTimings->rasterSyncEnd.x + 1) { + if (pTimings->rasterBlankStart.x + 1 >= pTimings->rasterSize.x) { + nvEvoLogInfoString(pInfoString, + "Cannot adjust mode timings for dual link DVI requirements."); + return FALSE; + } + adjust = 1; + } + + pTimings->rasterBlankEnd.x += adjust; + pTimings->rasterBlankStart.x += adjust; + + nvEvoLogInfoString(pInfoString, + "Adjusted mode timings for dual link DVI requirements."); + + return TRUE; +} + +void nvInitScalingUsageBounds(const NVDevEvoRec *pDevEvo, + struct NvKmsScalingUsageBounds *pScaling) +{ + pScaling->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_1X; + pScaling->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_1X; + pScaling->vTaps = pDevEvo->hal->caps.minScalerTaps; + pScaling->vUpscalingAllowed = FALSE; +} + +/* + * Check if the provided number of vertical taps is possible based on the + * capabilities: the lineStore (the smaller of inWidth and outWidth) must + * not exceed the maximum pixels for the desired taps; see bug 241014 + */ +static NvBool IsVTapsPossible(const NVEvoScalerCaps *pScalerCaps, + NvU32 inWidth, NvU32 outWidth, + NVEvoScalerTaps nTaps) +{ + const NvU32 lineStore = NV_MIN(inWidth, outWidth); + NvU32 maxPixels = pScalerCaps->taps[nTaps].maxPixelsVTaps; + + return lineStore <= maxPixels; +} + +/*! + * Compute the scale factor and check against the maximum. + * + * param[in] max Max scale factor to check against (* 1024) + * param[in] in Input width or height + * param[in] out Output width or height + * param[out] pFactor Output scale factor (* 1024) + */ +static NvBool ComputeScalingFactor(NvU32 max, + NvU16 in, NvU16 out, + NvU16 *pFactor) +{ + /* Use a 32-bit temporary to prevent overflow */ + NvU32 tmp; + + /* Add (out - 1) to round up */ + tmp = ((in * 1024) + (out - 1)) / out; + + /* Check against scaling limits. */ + if (tmp > max) { + return FALSE; + } + + *pFactor = tmp; + return TRUE; +} + +/*! + * Compute scaling factors based on in/out dimensions. + * Used by IMP and when programming viewport and window parameters in HW. + * + * The 'maxScaleFactor' values are defined by nvdClass_01.mfs as: + * SizeIn/SizeOut * 1024 + */ +NvBool nvComputeScalingUsageBounds(const NVEvoScalerCaps *pScalerCaps, + const NvU32 inWidth, const NvU32 inHeight, + const NvU32 outWidth, const NvU32 outHeight, + NVEvoScalerTaps hTaps, NVEvoScalerTaps vTaps, + struct NvKmsScalingUsageBounds *out) +{ + const NVEvoScalerTapsCaps *pTapsCaps = NULL; + + out->vTaps = vTaps; + + /* Start with default values (1.0) */ + out->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_1X; + out->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_1X; + + if (outHeight > inHeight) { + out->vUpscalingAllowed = TRUE; + } else if (outHeight < inHeight) { + out->vUpscalingAllowed = FALSE; + + pTapsCaps = &pScalerCaps->taps[vTaps]; + if (!ComputeScalingFactor(pTapsCaps->maxVDownscaleFactor, + inHeight, outHeight, + &out->maxVDownscaleFactor)) { + return FALSE; + } + } + + if (outWidth < inWidth) { + pTapsCaps = &pScalerCaps->taps[hTaps]; + if (!ComputeScalingFactor(pTapsCaps->maxHDownscaleFactor, + inWidth, outWidth, + &out->maxHDownscaleFactor)) { + return FALSE; + } + } + + return TRUE; +} + +NvBool nvAssignScalerTaps(const NVDevEvoRec *pDevEvo, + const NVEvoScalerCaps *pScalerCaps, + const NvU32 inWidth, const NvU32 inHeight, + const NvU32 outWidth, const NvU32 outHeight, + NvBool doubleScan, + NVEvoScalerTaps *hTapsOut, NVEvoScalerTaps *vTapsOut) +{ + NVEvoScalerTaps hTaps, vTaps; + NvBool setHTaps = (outWidth != inWidth); + NvBool setVTaps = (outHeight != inHeight); + + /* + * Select the taps filtering; we select the highest taps allowed with our + * scaling configuration. + * + * Note if requiresScalingTapsInBothDimensions is true and if we are + * scaling in *either* dimension, then we need to program > 1 taps + * in *both* dimensions. + */ + if ((setHTaps || setVTaps) && + pDevEvo->hal->caps.requiresScalingTapsInBothDimensions) { + setHTaps = TRUE; + setVTaps = TRUE; + } + + /* + * Horizontal taps: if not scaling, then no filtering; otherwise, set the + * maximum filtering, because htaps shouldn't have any constraints (unlike + * vtaps... see below). + */ + if (setHTaps) { + /* + * XXX dispClass_01.mfs says: "For text and desktop scaling, the 2 tap + * bilinear frequently looks better than the 8 tap filter which is more + * optimized for video type scaling." Once we determine how best to + * expose configuration of taps, we should choose how to indicate that 8 + * or 5 taps is the maximum. + * + * For now, we'll start with 2 taps as the default, but may end up + * picking a higher taps value if the required H downscaling factor + * isn't possible with 2 taps. + */ + NvBool hTapsFound = FALSE; + + for (hTaps = NV_EVO_SCALER_2TAPS; + hTaps <= NV_EVO_SCALER_TAPS_MAX; + hTaps++) { + NvU16 hFactor; + + if (!ComputeScalingFactor( + pScalerCaps->taps[hTaps].maxHDownscaleFactor, + inWidth, outWidth, + &hFactor)) { + continue; + } + + hTapsFound = TRUE; + break; + } + + if (!hTapsFound) { + return FALSE; + } + } else { + hTaps = pDevEvo->hal->caps.minScalerTaps; + } + + /* + * Vertical taps: if scaling, set the maximum valid filtering, otherwise, no + * filtering. + */ + if (setVTaps) { + /* + * Select the maximum vertical taps based on the capabilities. + * + * For doublescan modes, limit to 2 taps to reduce blurriness. We really + * want plain old line doubling, but EVO doesn't support that. + */ + if (IsVTapsPossible(pScalerCaps, inWidth, outWidth, NV_EVO_SCALER_5TAPS) && + !doubleScan) { + vTaps = NV_EVO_SCALER_5TAPS; + } else if (IsVTapsPossible(pScalerCaps, inWidth, outWidth, NV_EVO_SCALER_3TAPS) && + !doubleScan) { + vTaps = NV_EVO_SCALER_3TAPS; + } else if (IsVTapsPossible(pScalerCaps, inWidth, outWidth, NV_EVO_SCALER_2TAPS)) { + vTaps = NV_EVO_SCALER_2TAPS; + } else { + return FALSE; + } + } else { + vTaps = pDevEvo->hal->caps.minScalerTaps; + } + + *hTapsOut = hTaps; + *vTapsOut = vTaps; + + return TRUE; +} + +/* + * Check that ViewPortIn does not exceed hardware limits and compute vTaps and + * hTaps based on configured ViewPortIn/Out scaling if possible given scaler + * capabilities. + */ +NvBool nvValidateHwModeTimingsViewPort(const NVDevEvoRec *pDevEvo, + const NVEvoScalerCaps *pScalerCaps, + NVHwModeTimingsEvoPtr pTimings, + NVEvoInfoStringPtr pInfoString) +{ + NVHwModeViewPortEvoPtr pViewPort = &pTimings->viewPort; + const NvU32 inWidth = pViewPort->in.width; + const NvU32 inHeight = pViewPort->in.height; + const NvU32 outWidth = pViewPort->out.width; + const NvU32 outHeight = pViewPort->out.height; + const NvBool scaling = (outWidth != inWidth) || (outHeight != inHeight); + NVEvoScalerTaps hTaps, vTaps; + + /* + * As per the MFS, there is a restriction for the width and height + * of ViewPortIn and ViewPortOut + */ + if (inWidth > 8192 || inHeight > 8192 || + outWidth > 8192 || outHeight > 8192) { + nvEvoLogInfoString(pInfoString, + "Viewport dimensions exceed hardware capabilities"); + return FALSE; + } + + if (!nvAssignScalerTaps(pDevEvo, pScalerCaps, inWidth, inHeight, outWidth, outHeight, + pTimings->doubleScan, &hTaps, &vTaps)) { + nvEvoLogInfoString(pInfoString, + "Unable to configure scaling from %dx%d to %dx%d (exceeds filtering capabilities)", + inWidth, inHeight, + outWidth, outHeight); + return FALSE; + } + + /* + * If this is an interlaced mode but we don't have scaling + * configured, check that the width will fit in the 2-tap vertical + * LineStoreSize; this is an EVO requirement for interlaced + * rasters + */ + if (pTimings->interlaced && !scaling) { + /* !scaling means widths should be same */ + nvAssert(outWidth == inWidth); + + if (outWidth > pScalerCaps->taps[NV_EVO_SCALER_2TAPS].maxPixelsVTaps) { + nvEvoLogInfoString(pInfoString, + "Interlaced mode requires filtering, but line width (%d) exceeds filtering capabilities", + outWidth); + return FALSE; + } + + /* hTaps and vTaps should have been set to minScalerTaps above */ + nvAssert(hTaps == pDevEvo->hal->caps.minScalerTaps); + nvAssert(vTaps == pDevEvo->hal->caps.minScalerTaps); + } + + pViewPort->hTaps = hTaps; + pViewPort->vTaps = vTaps; + return TRUE; +} + +static void AssignGuaranteedSOCBounds(const NVDevEvoRec *pDevEvo, + struct NvKmsUsageBounds *pGuaranteed) +{ + NvU32 layer; + + pGuaranteed->layer[NVKMS_MAIN_LAYER].usable = TRUE; + pGuaranteed->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats = + nvEvoGetFormatsWithEqualOrLowerUsageBound( + NvKmsSurfaceMemoryFormatA8R8G8B8, + pDevEvo->caps.layerCaps[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats); + nvInitScalingUsageBounds(pDevEvo, &pGuaranteed->layer[NVKMS_MAIN_LAYER].scaling); + + for (layer = 1; layer < ARRAY_LEN(pGuaranteed->layer); layer++) { + pGuaranteed->layer[layer].usable = FALSE; + nvInitScalingUsageBounds(pDevEvo, &pGuaranteed->layer[layer].scaling); + } +} + +/* + * Initialize the given NvKmsUsageBounds. Ask for everything supported by the HW + * by default. Later, based on what IMP says, we will scale back as needed. + */ +void nvAssignDefaultUsageBounds(const NVDispEvoRec *pDispEvo, + NVHwModeViewPortEvo *pViewPort) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + struct NvKmsUsageBounds *pPossible = &pViewPort->possibleUsage; + NvU32 i; + + for (i = 0; i < ARRAY_LEN(pPossible->layer); i++) { + struct NvKmsScalingUsageBounds *pScaling = &pPossible->layer[i].scaling; + + pPossible->layer[i].supportedSurfaceMemoryFormats = + pDevEvo->caps.layerCaps[i].supportedSurfaceMemoryFormats; + pPossible->layer[i].usable = + (pPossible->layer[i].supportedSurfaceMemoryFormats != 0); + if (!pPossible->layer[i].usable) { + continue; + } + + nvInitScalingUsageBounds(pDevEvo, pScaling); + + if (pDevEvo->hal->GetWindowScalingCaps) { + const NVEvoScalerCaps *pScalerCaps = + pDevEvo->hal->GetWindowScalingCaps(pDevEvo); + int j; + + for (j = NV_EVO_SCALER_TAPS_MAX; j >= NV_EVO_SCALER_TAPS_MIN; j--) { + const NVEvoScalerTapsCaps *pTapsCaps = &pScalerCaps->taps[j]; + + if ((pTapsCaps->maxVDownscaleFactor == 0) && + (pTapsCaps->maxHDownscaleFactor == 0)) { + continue; + } + + pScaling->maxVDownscaleFactor = pTapsCaps->maxVDownscaleFactor; + pScaling->maxHDownscaleFactor = pTapsCaps->maxHDownscaleFactor; + pScaling->vTaps = j; + pScaling->vUpscalingAllowed = (pTapsCaps->maxPixelsVTaps > 0); + break; + } + } + } + + if (pDevEvo->isSOCDisplay) { + AssignGuaranteedSOCBounds(pDevEvo, &pViewPort->guaranteedUsage); + } else { + pViewPort->guaranteedUsage = *pPossible; + } +} + +/* + * ConstructHwModeTimingsViewPort() - determine the ViewPortOut size + * + * ViewPortIn (specified by inWidth, inHeight) selects the pixels to + * extract from the scanout surface; ViewPortOut positions those + * pixels within the raster timings. + * + * If the configuration is not possible, pViewPort->valid will be set + * to false; otherwise, pViewPort->valid will be set to true. + */ + +static NvBool +ConstructHwModeTimingsViewPort(const NVDispEvoRec *pDispEvo, + NVHwModeTimingsEvoPtr pTimings, + NVEvoInfoStringPtr pInfoString, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut) +{ + NVHwModeViewPortEvoPtr pViewPort = &pTimings->viewPort; + NvU32 outWidth, outHeight; + const NvU32 hVisible = nvEvoVisibleWidth(pTimings); + const NvU32 vVisible = nvEvoVisibleHeight(pTimings); + + /* the ViewPortOut should default to the raster size */ + + outWidth = hVisible; + outHeight = vVisible; + + pViewPort->out.xAdjust = 0; + pViewPort->out.yAdjust = 0; + pViewPort->out.width = outWidth; + pViewPort->out.height = outHeight; + + /* + * If custom viewPortOut or viewPortIn were specified, do basic + * validation and then assign them to pViewPort. We'll do more + * extensive checking of these values as part of IMP. Note that + * pViewPort->out.[xy]Adjust are relative to viewPortOut centered + * within the raster timings, but pViewPortOut->[xy]1 are relative + * to 0,0. + */ + if (pViewPortOut) { + NvS16 offset; + struct NvKmsRect viewPortOut = *pViewPortOut; + + /* + * When converting from user viewport out to hardware raster timings, + * double in the vertical dimension + */ + if (pTimings->doubleScan) { + viewPortOut.y *= 2; + viewPortOut.height *= 2; + } + + /* + * The client-specified viewPortOut is in "full" horizontal space for + * SW YUV420 modes. Convert to "half" horizontal space (matching + * NVHwModeTimingsEvo and viewPortIn). + */ + if (pTimings->yuv420Mode == NV_YUV420_MODE_SW) { + viewPortOut.x /= 2; + viewPortOut.width /= 2; + } + + if (A_plus_B_greater_than_C_U16(viewPortOut.x, + viewPortOut.width, + hVisible)) { + return FALSE; + } + + if (A_plus_B_greater_than_C_U16(viewPortOut.y, + viewPortOut.height, + vVisible)) { + return FALSE; + } + + offset = (hVisible - viewPortOut.width) / 2 * -1; + pViewPort->out.xAdjust = offset + viewPortOut.x; + + offset = (vVisible - viewPortOut.height) / 2 * -1; + pViewPort->out.yAdjust = offset + viewPortOut.y; + + pViewPort->out.width = viewPortOut.width; + pViewPort->out.height = viewPortOut.height; + } + + if (pViewPortSizeIn) { + if (pViewPortSizeIn->width <= 0) { + return FALSE; + } + if (pViewPortSizeIn->height <= 0) { + return FALSE; + } + + pViewPort->in.width = pViewPortSizeIn->width; + pViewPort->in.height = pViewPortSizeIn->height; + } else { + pViewPort->in.width = pViewPort->out.width; + pViewPort->in.height = pViewPort->out.height; + + /* When deriving viewportIn from viewportOut, halve the height for + * doubleScan */ + if (pTimings->doubleScan) { + pViewPort->in.height /= 2; + } + } + + nvAssignDefaultUsageBounds(pDispEvo, &pTimings->viewPort); + + return TRUE; +} + + +static NvBool FrlOverrideForYCbCr422( + const NVDevEvoRec *pDevEvo, + const NvKmsDpyOutputColorFormatInfo *pColorFormatsInfo, + NVDpyAttributeColor *pDpyColor) +{ + /* + * If the hardware natively supports YCbCr422 + FRL, + * there is nothing to do. + */ + if (pDevEvo->hal->caps.supportsYCbCr422OverHDMIFRL) { + return TRUE; + } + + nvkms_memset(pDpyColor, 0, sizeof(*pDpyColor)); + pDpyColor->colorimetry = NVKMS_OUTPUT_COLORIMETRY_DEFAULT; + + if (pColorFormatsInfo->rgb444.maxBpc >= + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8) { + pDpyColor->format = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB; + pDpyColor->bpc = pColorFormatsInfo->rgb444.maxBpc; + pDpyColor->range = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL; + return TRUE; + } + + if (pColorFormatsInfo->yuv444.maxBpc >= + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8) { + pDpyColor->format = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444; + pDpyColor->bpc = pColorFormatsInfo->yuv444.maxBpc; + pDpyColor->range = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED; + return TRUE; + } + + return FALSE; +} + +static NvBool GetDfpHdmiProtocol(const NVDpyEvoRec *pDpyEvo, + const NvU32 overrides, + NVDpyAttributeColor *pDpyColor, + NVHwModeTimingsEvoPtr pTimings, + enum nvKmsTimingsProtocol *pTimingsProtocol) +{ + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + const NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NvU32 rmProtocol = pConnectorEvo->or.protocol; + const NvKmsDpyOutputColorFormatInfo colorFormatsInfo = + nvDpyGetOutputColorFormatInfo(pDpyEvo); + const NvBool forceHdmiFrlIsSupported = FALSE; + + nvAssert(rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS || + rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A || + rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B); + + /* Override protocol if this mode requires HDMI FRL. */ + /* If we don't require boot clocks... */ + if (((overrides & NVKMS_MODE_VALIDATION_REQUIRE_BOOT_CLOCKS) == 0) && + ((nvHdmiGetEffectivePixelClockKHz(pDpyEvo, pTimings, pDpyColor) > + pDpyEvo->maxSingleLinkPixelClockKHz) || + forceHdmiFrlIsSupported) && + /* If FRL is supported... */ + nvHdmiDpySupportsFrl(pDpyEvo)) { + + /* + * Not all hardware configurations support YCbCr422 with FRL; + * override if necessary, or fail FRL. + */ + if (pDpyColor->format == + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422) { + if (!FrlOverrideForYCbCr422(pDevEvo, &colorFormatsInfo, pDpyColor)) { + return FALSE; + } + } + + *pTimingsProtocol = NVKMS_PROTOCOL_SOR_HDMI_FRL; + return TRUE; + } + + do { + if (nvHdmiGetEffectivePixelClockKHz(pDpyEvo, pTimings, pDpyColor) <= + pDpyEvo->maxSingleLinkPixelClockKHz) { + + switch (rmProtocol) { + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS: + /* + * Force single link TMDS protocol. HDMI does not support + * physically support dual link TMDS. + * + * TMDS_A: "use A side of the link" + */ + *pTimingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A: + *pTimingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B: + *pTimingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B; + break; + default: + return FALSE; + } + return TRUE; + } + } while (nvDowngradeColorSpaceAndBpc(pDpyEvo, + &colorFormatsInfo, + pDpyColor)); + return FALSE; +} + +/* + * GetDfpProtocol()- determine the protocol to use on the given pDpy + * with the given pTimings; assigns pTimings->protocol. + */ + +static NvBool GetDfpProtocol(const NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + NVDpyAttributeColor *pDpyColor, + NVHwModeTimingsEvoPtr pTimings) +{ + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + const NvU32 rmProtocol = pConnectorEvo->or.protocol; + const NvU32 overrides = pParams->overrides; + enum nvKmsTimingsProtocol timingsProtocol; + + nvAssert(pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP); + + if (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + if (nvDpyIsHdmiEvo(pDpyEvo)) { + if (!GetDfpHdmiProtocol(pDpyEvo, overrides, pDpyColor, pTimings, + &timingsProtocol)) { + return FALSE; + } + + } else { + switch (rmProtocol) { + default: + nvAssert(!"unrecognized SOR RM protocol"); + return FALSE; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A: + if (nvDpyRequiresDualLinkEvo(pDpyEvo, pTimings) && + ((overrides & NVKMS_MODE_VALIDATION_NO_MAX_PCLK_CHECK) == 0)) { + return FALSE; + } + timingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B: + if (nvDpyRequiresDualLinkEvo(pDpyEvo, pTimings) && + ((overrides & NVKMS_MODE_VALIDATION_NO_MAX_PCLK_CHECK) == 0)) { + return FALSE; + } + timingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS: + /* + * Override dual/single link TMDS protocol if necessary. + * XXX might be nice to give a way for users to override the + * SingleLink/DualLink decision. + * + * TMDS_A: "use A side of the link" + * TMDS_B: "use B side of the link" + */ + if (nvDpyRequiresDualLinkEvo(pDpyEvo, pTimings)) { + timingsProtocol = NVKMS_PROTOCOL_SOR_DUAL_TMDS; + } else { + timingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A; + } + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A: + timingsProtocol = NVKMS_PROTOCOL_SOR_DP_A; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B: + timingsProtocol = NVKMS_PROTOCOL_SOR_DP_B; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM: + timingsProtocol = NVKMS_PROTOCOL_SOR_LVDS_CUSTOM; + break; + } + } + } else if (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR) { + nvAssert(rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC); + timingsProtocol = NVKMS_PROTOCOL_PIOR_EXT_TMDS_ENC; + } else if (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_DSI) { + nvAssert(rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI); + timingsProtocol = NVKMS_PROTOCOL_DSI; + } else { + nvAssert(!"Unknown OR type"); + return FALSE; + } + + pTimings->protocol = timingsProtocol; + + return TRUE; + +} + + + +/* + * ConstructHwModeTimingsEvoCrt() - construct EVO hardware timings to + * drive a CRT, given the mode timings in pMt + */ + +static NvBool +ConstructHwModeTimingsEvoCrt(const NVConnectorEvoRec *pConnectorEvo, + const NvModeTimings *pModeTimings, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut, + NVHwModeTimingsEvoPtr pTimings, + NVEvoInfoStringPtr pInfoString) +{ + ConstructHwModeTimingsFromNvModeTimings(pModeTimings, pTimings); + + /* assign the protocol; we expect DACs to have RGB protocol */ + + nvAssert(pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT); + + pTimings->protocol = NVKMS_PROTOCOL_DAC_RGB; + + /* assign scaling fields */ + + return ConstructHwModeTimingsViewPort(pConnectorEvo->pDispEvo, pTimings, + pInfoString, pViewPortSizeIn, + pViewPortOut); +} + + +/*! + * Construct EVO hardware timings to drive a digital protocol (TMDS, + * DP, etc). + * + * \param[in] pDpy The display device for which to build timings. + * \param[in] pModeTimings The hw-neutral description of the timings. + * \param[out] pTimings The EVO-specific modetimings. + * + * \return TRUE if the EVO modetimings could be built; FALSE if failure. + */ +static NvBool ConstructHwModeTimingsEvoDfp(const NVDpyEvoRec *pDpyEvo, + const NvModeTimings *pModeTimings, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut, + const NvBool dscPassThrough, + NVDpyAttributeColor *pDpyColor, + NVHwModeTimingsEvoPtr pTimings, + const struct + NvKmsModeValidationParams *pParams, + NVEvoInfoStringPtr pInfoString) +{ + NvBool ret; + + ConstructHwModeTimingsFromNvModeTimings(pModeTimings, pTimings); + + pTimings->dscPassThrough = dscPassThrough; + if (pTimings->dscPassThrough && + (pDpyColor->format != + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB)) { + const NvKmsDpyOutputColorFormatInfo colorFormatsInfo = + nvDpyGetOutputColorFormatInfo(pDpyEvo); + + if (colorFormatsInfo.rgb444.maxBpc == + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN) { + return FALSE; + } + + nvkms_memset(pDpyColor, 0, sizeof(*pDpyColor)); + + pDpyColor->colorimetry = NVKMS_OUTPUT_COLORIMETRY_DEFAULT; + pDpyColor->format = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB; + pDpyColor->bpc = colorFormatsInfo.rgb444.maxBpc; + pDpyColor->range = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL; + } + + ret = GetDfpProtocol(pDpyEvo, pParams, pDpyColor, pTimings); + + if (!ret) { + return ret; + } + + ret = ApplyDualLinkRequirements(pDpyEvo, pParams, pTimings, pInfoString); + + if (!ret) { + return ret; + } + + return ConstructHwModeTimingsViewPort(pDpyEvo->pDispEvo, pTimings, + pInfoString, pViewPortSizeIn, + pViewPortOut); +} + +static NvBool IsColorBpcSupported( + const NvKmsDpyOutputColorFormatInfo *pSupportedColorFormats, + const enum NvKmsDpyAttributeCurrentColorSpaceValue format, + const enum NvKmsDpyAttributeColorBpcValue bpc) +{ + switch (format) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + return (bpc <= pSupportedColorFormats->rgb444.maxBpc) && + (bpc >= pSupportedColorFormats->rgb444.minBpc); + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + return (bpc <= pSupportedColorFormats->yuv422.maxBpc) && + (bpc >= pSupportedColorFormats->yuv422.minBpc); + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + return (bpc <= pSupportedColorFormats->yuv444.maxBpc) && + (bpc >= pSupportedColorFormats->yuv444.minBpc); + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420: + return (bpc == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8); + } + + return FALSE; +} + +NvBool nvDowngradeColorBpc( + const NvKmsDpyOutputColorFormatInfo *pSupportedColorFormats, + NVDpyAttributeColor *pDpyColor) +{ + switch (pDpyColor->bpc) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10: + if (pDpyColor->colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) { + return FALSE; + } + + if (!IsColorBpcSupported(pSupportedColorFormats, + pDpyColor->format, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8)) { + return FALSE; + } + + pDpyColor->bpc = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8: + /* At depth 18 only RGB and full range are allowed */ + if (pDpyColor->format == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB) { + if (!IsColorBpcSupported(pSupportedColorFormats, + pDpyColor->format, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6)) { + return FALSE; + } + + pDpyColor->bpc = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6; + pDpyColor->range = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL; + } else { + return FALSE; + } + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN: + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6: + return FALSE; + } + + return TRUE; +} + +NvBool nvDowngradeColorSpaceAndBpc( + const NVDpyEvoRec *pDpyEvo, + const NvKmsDpyOutputColorFormatInfo *pSupportedColorFormats, + NVDpyAttributeColor *pDpyColor) +{ + if (nvDowngradeColorBpc(pSupportedColorFormats, pDpyColor)) { + return TRUE; + } + + switch (pDpyColor->format) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + if ((pDpyColor->colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) && + !pDpyEvo->pDispEvo->pDevEvo->caps.supportsYUV2020) { + break; + } + /* fallthrough */ + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + if (pSupportedColorFormats->yuv422.maxBpc != + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN) { + pDpyColor->format = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422; + pDpyColor->bpc = pSupportedColorFormats->yuv422.maxBpc; + pDpyColor->range = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED; + return TRUE; + } + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: /* fallthrough */ + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420: + break; + } + + return FALSE; +} + +/* + * nvDPValidateModeEvo() - For DP devices handled by the DP lib, check DP + * bandwidth and pick the best possible/supported pixel depth to use for + * the given mode timings. + */ + +NvBool nvDPValidateModeEvo(NVDpyEvoPtr pDpyEvo, + NVHwModeTimingsEvoPtr pTimings, + NVDpyAttributeColor *pDpyColor, + const NvBool b2Heads1Or, + NVDscInfoEvoRec *pDscInfo, + const struct NvKmsModeValidationParams *pParams) +{ + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + NVDpyAttributeColor dpyColor = *pDpyColor; + const NvKmsDpyOutputColorFormatInfo supportedColorFormats = + nvDpyGetOutputColorFormatInfo(pDpyEvo); + + /* Only do this for DP devices. */ + if (!nvConnectorUsesDPLib(pConnectorEvo)) { + return TRUE; + } + + if ((pParams->overrides & + NVKMS_MODE_VALIDATION_NO_DISPLAYPORT_BANDWIDTH_CHECK) != 0) { + return TRUE; + } + + nvAssert(nvDpyUsesDPLib(pDpyEvo)); + nvAssert(pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR); + + tryAgain: + + if (!nvDPValidateModeForDpyEvo(pDpyEvo, &dpyColor, pParams, pTimings, + b2Heads1Or, pDscInfo)) { + if (nvDowngradeColorSpaceAndBpc(pDpyEvo, &supportedColorFormats, &dpyColor)) { + goto tryAgain; + } + /* + * Cannot downgrade pixelDepth further -- + * this mode is not possible on this DP link, so fail. + */ + + return FALSE; + } + + *pDpyColor = dpyColor; + return TRUE; +} + +/* + * Construct the hardware values to program EVO for the specified + * NVModeTimings + */ + +NvBool nvConstructHwModeTimingsEvo(const NVDpyEvoRec *pDpyEvo, + const struct NvKmsMode *pKmsMode, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut, + const NvBool dscPassThrough, + NVDpyAttributeColor *pDpyColor, + NVHwModeTimingsEvoPtr pTimings, + const struct NvKmsModeValidationParams + *pParams, + NVEvoInfoStringPtr pInfoString) +{ + const NVConnectorEvoRec *pConnectorEvo = pDpyEvo->pConnectorEvo; + NvBool ret; + + /* assign the pTimings values */ + + if (pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) { + ret = ConstructHwModeTimingsEvoDfp(pDpyEvo, + &pKmsMode->timings, + pViewPortSizeIn, pViewPortOut, + dscPassThrough, + pDpyColor, pTimings, pParams, + pInfoString); + } else if (pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT) { + nvAssert(dscPassThrough == FALSE); + ret = ConstructHwModeTimingsEvoCrt(pConnectorEvo, + &pKmsMode->timings, + pViewPortSizeIn, pViewPortOut, + pTimings, pInfoString); + } else { + nvAssert(!"Invalid pDpyEvo->type"); + return FALSE; + } + + if (!ret) return FALSE; + + /* tweak the raster timings for gsync */ + + if (pDpyEvo->pDispEvo->pFrameLockEvo) { + // if this fails, the timing remains untweaked, which just means + // that the mode may not work well with frame lock + TweakTimingsForGsync(pDpyEvo, pTimings, pInfoString, pParams->stereoMode); + } + + return TRUE; +} + +static NvBool DowngradeViewPortTaps(const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NVEvoScalerTaps srcTaps, + NVEvoScalerTaps dstTaps, + NvBool isVert, + NVEvoScalerTaps *pTaps) +{ + const NVEvoScalerCaps *pScalerCaps = &pHeadCaps->scalerCaps; + NvBool dstPossible; + + if (isVert) { + dstPossible = IsVTapsPossible(pScalerCaps, pViewPort->in.width, + pViewPort->out.width, dstTaps); + } else { + dstPossible = pScalerCaps->taps[dstTaps].maxHDownscaleFactor > 0; + } + + if (*pTaps >= srcTaps && dstPossible) { + *pTaps = dstTaps; + return TRUE; + } + + return FALSE; +} + +/* Downgrade the htaps from 8 to 5 */ +static NvBool DowngradeViewPortHTaps8(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + return DowngradeViewPortTaps(pHeadCaps, + pViewPort, + NV_EVO_SCALER_8TAPS, + NV_EVO_SCALER_5TAPS, + FALSE /* isVert */, + &pViewPort->hTaps); +} + +/* Downgrade the htaps from 5 to 2 */ +static NvBool DowngradeViewPortHTaps5(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + return DowngradeViewPortTaps(pHeadCaps, + pViewPort, + NV_EVO_SCALER_5TAPS, + NV_EVO_SCALER_2TAPS, + FALSE /* isVert */, + &pViewPort->hTaps); +} + +/* Downgrade the vtaps from 5 to 3 */ +static NvBool DowngradeViewPortVTaps5(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + return DowngradeViewPortTaps(pHeadCaps, + pViewPort, + NV_EVO_SCALER_5TAPS, + NV_EVO_SCALER_3TAPS, + TRUE /* isVert */, + &pViewPort->vTaps); +} + +/* Downgrade the vtaps from 3 to 2 */ +static NvBool DowngradeViewPortVTaps3(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + return DowngradeViewPortTaps(pHeadCaps, + pViewPort, + NV_EVO_SCALER_3TAPS, + NV_EVO_SCALER_2TAPS, + TRUE /* isVert */, + &pViewPort->vTaps); +} + +static NvBool +DowngradeLayerDownscaleFactor(NVHwModeViewPortEvoPtr pViewPort, + const NvU32 layer, + NvU16 srcFactor, + NvU16 dstFactor, + NvU16 *pFactor) +{ + struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage; + + if (!pUsage->layer[layer].usable) { + return FALSE; + } + + if (*pFactor == srcFactor) { + *pFactor = dstFactor; + return TRUE; + } + + return FALSE; +} + +static NvBool +DowngradeLayerVDownscaleFactor4X(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pViewPort->guaranteedUsage.layer[layer].scaling; + + if (DowngradeLayerDownscaleFactor(pViewPort, + layer, + NV_EVO_SCALE_FACTOR_4X, + NV_EVO_SCALE_FACTOR_3X, + &pScaling->maxVDownscaleFactor)) { + return TRUE; + } + } + + return FALSE; +} + +static NvBool +DowngradeLayerVDownscaleFactor3X(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pViewPort->guaranteedUsage.layer[layer].scaling; + + if (DowngradeLayerDownscaleFactor(pViewPort, + layer, + NV_EVO_SCALE_FACTOR_3X, + NV_EVO_SCALE_FACTOR_2X, + &pScaling->maxVDownscaleFactor)) { + return TRUE; + } + } + + return FALSE; +} + +static NvBool +DowngradeLayerVDownscaleFactor2X(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pViewPort->guaranteedUsage.layer[layer].scaling; + + if (DowngradeLayerDownscaleFactor(pViewPort, + layer, + NV_EVO_SCALE_FACTOR_2X, + NV_EVO_SCALE_FACTOR_1X, + &pScaling->maxVDownscaleFactor)) { + return TRUE; + } + } + + return FALSE; +} + +static NvBool +DowngradeLayerHDownscaleFactor4X(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pViewPort->guaranteedUsage.layer[layer].scaling; + + if (DowngradeLayerDownscaleFactor(pViewPort, + layer, + NV_EVO_SCALE_FACTOR_4X, + NV_EVO_SCALE_FACTOR_3X, + &pScaling->maxHDownscaleFactor)) { + return TRUE; + } + } + + return FALSE; +} + +static NvBool DowngradeLayerHDownscaleFactor3X(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pViewPort->guaranteedUsage.layer[layer].scaling; + + if (DowngradeLayerDownscaleFactor(pViewPort, + layer, + NV_EVO_SCALE_FACTOR_3X, + NV_EVO_SCALE_FACTOR_2X, + &pScaling->maxHDownscaleFactor)) { + return TRUE; + } + } + + return FALSE; +} + +static NvBool DowngradeLayerHDownscaleFactor2X(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pViewPort->guaranteedUsage.layer[layer].scaling; + + if (DowngradeLayerDownscaleFactor(pViewPort, + layer, + NV_EVO_SCALE_FACTOR_2X, + NV_EVO_SCALE_FACTOR_1X, + &pScaling->maxHDownscaleFactor)) { + return TRUE; + } + } + + return FALSE; +} + +/* Downgrade the vtaps from 5 to 2 */ +static NvBool DowngradeLayerVTaps5(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage; + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pUsage->layer[layer].scaling; + + if (!pUsage->layer[layer].usable) { + continue; + } + + if (pScaling->vTaps == NV_EVO_SCALER_5TAPS) { + pScaling->vTaps = NV_EVO_SCALER_2TAPS; + return TRUE; + } + } + + return FALSE; +} + +static NvBool DowngradeLayerVUpscaling(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage; + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pUsage->layer[layer].scaling; + + if (!pUsage->layer[layer].usable) { + continue; + } + + if (pScaling->vUpscalingAllowed) { + pScaling->vUpscalingAllowed = FALSE; + return TRUE; + } + } + + return FALSE; +} + +static NvBool DowngradeViewPortOverlayFormats( + const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 removeSurfaceMemoryFormats) +{ + struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage; + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (layer == NVKMS_MAIN_LAYER || !pUsage->layer[layer].usable) { + continue; + } + + if (pUsage->layer[layer].supportedSurfaceMemoryFormats & + removeSurfaceMemoryFormats) { + pUsage->layer[layer].supportedSurfaceMemoryFormats &= + ~removeSurfaceMemoryFormats; + if (pUsage->layer[layer].supportedSurfaceMemoryFormats == 0) { + pUsage->layer[layer].usable = FALSE; + } + + return TRUE; + } + } + + return FALSE; +} + +static NvBool DowngradeViewPortBaseFormats( + const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 removeSurfaceMemoryFormats) +{ + struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage; + + if (!pUsage->layer[NVKMS_MAIN_LAYER].usable) { + return FALSE; + } + + if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + removeSurfaceMemoryFormats) { + pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats &= + ~removeSurfaceMemoryFormats; + if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats == 0) { + pUsage->layer[NVKMS_MAIN_LAYER].usable = FALSE; + } + + return TRUE; + } + + return FALSE; +} + +typedef NvBool (*DowngradeViewPortFuncPtr)(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 removeSurfaceMemoryFormats); + +/* + * Try to downgrade the usage bounds of the viewports, keeping the + * viewports roughly equal in capability; we do this from + * ValidateMetaMode50() when IMP rejects the mode. Return TRUE if we + * were able to downgrade something; return FALSE if there was nothing + * left to downgrade. + */ + +static NvBool DownGradeMetaModeUsageBounds( + const NVDevEvoRec *pDevEvo, + const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP], + NvU32 modesetRequestedHeadsMask) +{ + static const struct { + DowngradeViewPortFuncPtr downgradeFunc; + NvU64 removeSurfaceMemoryFormats; + } downgradeFuncs[] = { + { DowngradeLayerVDownscaleFactor4X, + 0 }, + { DowngradeLayerHDownscaleFactor4X, + 0 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR444 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR444 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR420 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR420 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP444 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP444 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP422 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP422 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP420 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP420 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP444 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP444 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP422 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP422 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP420 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP420 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_PACKED422 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_PACKED422 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP }, + { DowngradeLayerVDownscaleFactor3X, + 0 }, + { DowngradeLayerHDownscaleFactor3X, + 0 }, + { DowngradeViewPortVTaps5, + 0 }, + { DowngradeViewPortVTaps3, + 0 }, + { DowngradeViewPortHTaps8, + 0 }, + { DowngradeViewPortHTaps5, + 0 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP }, + { DowngradeLayerVTaps5, + 0 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP }, + { DowngradeLayerVDownscaleFactor2X, + 0 }, + { DowngradeLayerHDownscaleFactor2X, + 0 }, + { DowngradeLayerVUpscaling, + 0 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP }, + }; + int i; + + // XXX assume the heads have equal capabilities + // XXX assume the gpus have equal capabilities + + const NVEvoHeadCaps *pHeadCaps = + &pDevEvo->gpus[0].capabilities.head[0]; + + + for (i = 0; i < ARRAY_LEN(downgradeFuncs); i++) { + int head; + FOR_ALL_HEADS(head, modesetRequestedHeadsMask) { + if (timingsParams[head].pTimings == NULL) { + continue; + } + + if (downgradeFuncs[i].downgradeFunc( + pDevEvo, + head, + pHeadCaps, + &timingsParams[head].pTimings->viewPort, + downgradeFuncs[i].removeSurfaceMemoryFormats)) { + return TRUE; + } + } + } + + /* Nothing else to downgrade */ + return FALSE; +} + +NvBool nvAllocateDisplayBandwidth( + NVDispEvoPtr pDispEvo, + NvU32 newIsoBandwidthKBPS, + NvU32 newDramFloorKBPS) +{ + NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS params = { }; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + if (!pDevEvo->isSOCDisplay) { + return TRUE; + } + + params.subDeviceInstance = 0; + params.averageBandwidthKBPS = newIsoBandwidthKBPS; + params.floorBandwidthKBPS = newDramFloorKBPS; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH, + ¶ms, sizeof(params)); + if (ret != NV_OK) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate %u KBPS Iso and %u KBPS Dram", + newIsoBandwidthKBPS, newDramFloorKBPS); + return FALSE; + } + + pDispEvo->isoBandwidthKBPS = newIsoBandwidthKBPS; + pDispEvo->dramFloorKBPS = newDramFloorKBPS; + + return TRUE; +} + +static void AssignNVEvoIsModePossibleDispInput( + NVDispEvoPtr pDispEvo, + const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP], + NvBool requireBootClocks, + NVEvoReallocateBandwidthMode reallocBandwidth, + NVEvoIsModePossibleDispInput *pImpInput, + const NvU32 modesetRequestedHeadsMask) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 head; + NvU32 nextSorIndex = 0; + + nvkms_memset(pImpInput, 0, sizeof(*pImpInput)); + + pImpInput->requireBootClocks = requireBootClocks; + pImpInput->reallocBandwidth = reallocBandwidth; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + const NVConnectorEvoRec *pConnectorEvo = + timingsParams[head].pConnectorEvo; + NvU32 otherHead = 0; + + nvAssert((timingsParams[head].pTimings == NULL) == + (timingsParams[head].pConnectorEvo == NULL)); + + pImpInput->head[head].orIndex = NV_INVALID_OR; + + if (timingsParams[head].pTimings == NULL) { + continue; + } + + if ((modesetRequestedHeadsMask & NVBIT(head)) != 0x0) { + pImpInput->head[head].modesetRequested = TRUE; + } + + pImpInput->head[head].pTimings = timingsParams[head].pTimings; + pImpInput->head[head].enableDsc = timingsParams[head].enableDsc; + pImpInput->head[head].dscSliceCount = timingsParams[head].dscSliceCount; + pImpInput->head[head].possibleDscSliceCountMask = + timingsParams[head].possibleDscSliceCountMask; + pImpInput->head[head].b2Heads1Or = timingsParams[head].b2Heads1Or; + pImpInput->head[head].pixelDepth = timingsParams[head].pixelDepth; + pImpInput->head[head].displayId = timingsParams[head].activeRmId; + pImpInput->head[head].orType = pConnectorEvo->or.type; + pImpInput->head[head].pUsage = timingsParams[head].pUsage; + + if (!NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED) || + pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + + nvAssert(pConnectorEvo->or.primary != NV_INVALID_OR); + + pImpInput->head[head].orIndex = pConnectorEvo->or.primary; + continue; + } + + /* + * If more than one head is attached to the same connector, then make + * sure that all of them use the same SOR index. + */ + for (otherHead = 0; otherHead < head; otherHead++) { + if (timingsParams[otherHead].pConnectorEvo == pConnectorEvo) { + pImpInput->head[head].orIndex = pImpInput->head[otherHead].orIndex; + break; + } + } + + /* + * On GPUs with a full crossbar, the SORs are equally capable, so just + * use next unused SOR. + * + * We assume there are as many SORs as there are heads. + */ + if (pImpInput->head[head].orIndex == NV_INVALID_OR) { + pImpInput->head[head].orIndex = nextSorIndex; + nextSorIndex++; + } + } +} + +/*! + * Validate the described disp configuration through IMP. + + * \param[in] pDispEvo The disp of the dpyIdList. + * + * \param[in.out] timingsParams[] The proposed configuration to use on each head + * includes - + * + * pConnectorEvo - + * The proposed connector to drive on each head. + * + * activeRmId - + * The display ID that we use to talk to RM + * about the dpy(s) on each head. + * + * pTimings - + * The proposed timings to use on each head; + * note the usage bounds within pTimings + * may be altered by this function. + * + * depth - + * The depth of the buffer to be displayed on + * each head. + * \param[in] requireBootClocks + * Only validate modes that will work at P8 + * clocks. + * + * \param[in] reallocBandwidth + * Try to allocate the required display + * bandwidth if IMP passes. + * + * \param[out] pMinIsoBandwidthKBPS + * The ISO bandwidth that's required for the + * proposed disp configuration only. This value + * doesn't take the current display state into + * account. + * + * \param[out] pMinDramFloorKBPS + * The DRAM floor that's required for the + * proposed disp configuration only. This value + * doesn't take the current display state into + * account. + * + * \return Return TRUE if the proposed disp configuration is + * considered valid for IMP purposes. + */ +NvBool nvValidateImpOneDisp( + NVDispEvoPtr pDispEvo, + NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP], + NvBool requireBootClocks, + NVEvoReallocateBandwidthMode reallocBandwidth, + NvU32 *pMinIsoBandwidthKBPS, + NvU32 *pMinDramFloorKBPS, + const NvU32 modesetRequestedHeadsMask) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoIsModePossibleDispInput impInput = { }; + NVEvoIsModePossibleDispOutput impOutput = { }; + NvU32 newIsoBandwidthKBPS, newDramFloorKBPS; + NvBool needToRealloc = FALSE; + + AssignNVEvoIsModePossibleDispInput(pDispEvo, + timingsParams, requireBootClocks, + reallocBandwidth, + &impInput, + modesetRequestedHeadsMask); + + pDevEvo->hal->IsModePossible(pDispEvo, &impInput, &impOutput); + if (!impOutput.possible) { + return FALSE; + } + + switch (reallocBandwidth) { + case NV_EVO_REALLOCATE_BANDWIDTH_MODE_PRE: + needToRealloc = (impOutput.minRequiredBandwidthKBPS > pDispEvo->isoBandwidthKBPS) || + (impOutput.floorBandwidthKBPS > pDispEvo->dramFloorKBPS); + newIsoBandwidthKBPS = + NV_MAX(pDispEvo->isoBandwidthKBPS, impOutput.minRequiredBandwidthKBPS); + newDramFloorKBPS = + NV_MAX(pDispEvo->dramFloorKBPS, impOutput.floorBandwidthKBPS); + + break; + case NV_EVO_REALLOCATE_BANDWIDTH_MODE_POST: + needToRealloc = (impOutput.minRequiredBandwidthKBPS != pDispEvo->isoBandwidthKBPS) || + (impOutput.floorBandwidthKBPS != pDispEvo->dramFloorKBPS); + newIsoBandwidthKBPS = impOutput.minRequiredBandwidthKBPS; + newDramFloorKBPS = impOutput.floorBandwidthKBPS; + + break; + case NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE: + default: + break; + } + + if (needToRealloc) { + if (!nvAllocateDisplayBandwidth(pDispEvo, + newIsoBandwidthKBPS, + newDramFloorKBPS)) { + return FALSE; + } + } + + if (pMinIsoBandwidthKBPS != NULL) { + *pMinIsoBandwidthKBPS = impOutput.minRequiredBandwidthKBPS; + } + + if (pMinDramFloorKBPS != NULL) { + *pMinDramFloorKBPS = impOutput.floorBandwidthKBPS; + } + + for (NvU32 head = 0; head < pDevEvo->numHeads; head++) { + if (timingsParams[head].pTimings == NULL) { + continue; + } + + if ((modesetRequestedHeadsMask & NVBIT(head)) == 0x0) { + nvAssert(timingsParams[head].dscSliceCount == + impOutput.head[head].dscSliceCount); + + } else { + timingsParams[head].dscSliceCount = + impOutput.head[head].dscSliceCount; + + } + } + + return TRUE; +} + +NvBool nvValidateImpOneDispDowngrade( + NVDispEvoPtr pDispEvo, + NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP], + NvBool requireBootClocks, + NVEvoReallocateBandwidthMode reallocBandwidth, + NvU32 modesetRequestedHeadsMask) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvBool impPassed = FALSE; + + do { + impPassed = nvValidateImpOneDisp(pDispEvo, + timingsParams, + requireBootClocks, + reallocBandwidth, + NULL /* pMinIsoBandwidthKBPS */, + NULL /* pMinDramFloorKBPS */, + modesetRequestedHeadsMask); + if (impPassed) { + break; + } + } while (DownGradeMetaModeUsageBounds(pDevEvo, timingsParams, + modesetRequestedHeadsMask)); + + if (impPassed && !pDevEvo->isSOCDisplay) { + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + if (timingsParams[head].pTimings != NULL) { + timingsParams[head].pTimings->viewPort.possibleUsage = + timingsParams[head].pTimings->viewPort.guaranteedUsage; + } + } + } + + return impPassed; +} + +/* + * Return TRUE iff this display can be configured as a framelock + * server given the current modetimings/framelock configuration, FALSE + * o.w. + */ + +NvBool nvFrameLockServerPossibleEvo(const NVDpyEvoRec *pDpyEvo) +{ + + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + + return pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, + NV_EVO_ADD_FRAME_LOCK_SERVER, + NULL); +} + +/* + * Return TRUE iff this display can be configured as a framelock client + * given the current modetimings/framelock configuration, FALSE o.w. + */ + +NvBool nvFrameLockClientPossibleEvo(const NVDpyEvoRec *pDpyEvo) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + + return pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, + NV_EVO_ADD_FRAME_LOCK_CLIENT, + NULL); +} + + +/* + * FrameLockSli() - Helper function for nvEvoRefFrameLockSli() and + * nvEvoUnRefFrameLockSli(), which are hooked into the EVO locking state + * machine via custom rules. This function will find the GPU acting as the + * given GPU's SLI primary and perform the NV_EVO_{ADD,REM}_FRAMELOCK_REF action + * to increment or decrement the refcount on that GPU. + * If queryOnly, it also figures out which displays to pass into the EVO state + * machine; otherwise, it passes NULLs to perform a query without affecting + * state. + */ + +static NvBool FrameLockSli(NVDevEvoPtr pDevEvo, + NvU32 action, + NvBool queryOnly) +{ + RasterLockGroup *pRasterLockGroups; + NVEvoSubDevPtr pEvoSubDev; + NVDispEvoPtr pDispEvo; + unsigned int numRasterLockGroups; + + pRasterLockGroups = GetRasterLockGroups(pDevEvo, &numRasterLockGroups); + if (!pRasterLockGroups) { + return FALSE; + } + + nvAssert(numRasterLockGroups == 1); + if (numRasterLockGroups != 1) { + nvFree(pRasterLockGroups); + return FALSE; + } + + /* Want to be framelock server */ + pDispEvo = pRasterLockGroups[0].pDispEvoOrder[0]; + + nvFree(pRasterLockGroups); + + if (!pDispEvo) { + return FALSE; + } + + nvAssert(pDevEvo == pDispEvo->pDevEvo); + + pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + + if (queryOnly) { + return pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, action, NULL); + } else { + NvU32 pHeads[NVKMS_MAX_HEADS_PER_DISP + 1] = { NV_INVALID_HEAD, }; + NvU32 i = 0; + NvU32 head; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if (nvHeadIsActive(pDispEvo, head)) { + pHeads[i++] = head; + } + } + nvAssert(i > 0 && i <= NVKMS_MAX_HEADS_PER_DISP); + pHeads[i] = NV_INVALID_HEAD; + + return pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, action, + pHeads); + } +} + + +/* + * nvEvoRefFrameLockSli() - Attempt to set up framelock on the GPU's SLI + * primary. Hooked into EVO state machine via custom rules. + * If pHeads is NULL, only perform a query. + */ + +NvBool nvEvoRefFrameLockSli(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + return FrameLockSli(pDispEvo->pDevEvo, NV_EVO_ADD_FRAME_LOCK_REF, + pHeads == NULL); + +} /* nvEvoRefFrameLockSli */ + + +/* + * nvEvoUnRefFrameLockSli() - Attempt to clean up framelock on the GPU's SLI + * primary. Hooked into EVO state machine via custom rules. + * If pHeads is NULL, only perform a query. + */ + +NvBool nvEvoUnRefFrameLockSli(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + return FrameLockSli(pDispEvo->pDevEvo, NV_EVO_REM_FRAME_LOCK_REF, + pHeads == NULL); + +} /* nvEvoUnRefFrameLockSli */ + + +/* + * GetRasterLockPin() - Ask RM which lockpin to use in order to configure GPU0 + * be a server or client of GPU1, where GPUn is represented by the duple + * (pDispn, headn) (or NV_EVO_LOCK_PIN_ERROR if the two cannot be locked). + */ +static void GetRasterLockPin(NVDispEvoPtr pDispEvo0, NvU32 head0, + NVDispEvoPtr pDispEvo1, NvU32 head1, + NVEvoLockPin *serverPin, NVEvoLockPin *clientPin) +{ + NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS params = { }; + NvU32 displayHandle0 = pDispEvo0->pDevEvo->displayHandle; + NvU32 displayHandle1 = pDispEvo1->pDevEvo->displayHandle; + NvU32 ret; + + params.base.subdeviceIndex = pDispEvo0->displayOwner; + params.head = head0; + + params.peer.hDisplay = displayHandle1; + params.peer.subdeviceIndex = pDispEvo1->displayOwner; + params.peer.head = head1; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + displayHandle0, + NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo0, EVO_LOG_ERROR, + "stateless lockpin query failed; ret: 0x%x", ret); + if (serverPin) *serverPin = NV_EVO_LOCK_PIN_ERROR; + if (clientPin) *clientPin = NV_EVO_LOCK_PIN_ERROR; + return; + } + + if (serverPin) { + if (FLD_TEST_DRF(5070, _CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS, + _MASTER_SCAN_LOCK_CONNECTED, _NO, + params.masterScanLock)) { + *serverPin = NV_EVO_LOCK_PIN_ERROR; + } else { + int pin = DRF_VAL(5070, _CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS, + _MASTER_SCAN_LOCK_PIN, + params.masterScanLock); + *serverPin = NV_EVO_LOCK_PIN_0 + pin; + } + } + + if (clientPin) { + if (FLD_TEST_DRF(5070, _CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS, + _SLAVE_SCAN_LOCK_CONNECTED, _NO, + params.slaveScanLock)) { + *clientPin = NV_EVO_LOCK_PIN_ERROR; + } else { + int pin = DRF_VAL(5070, _CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS, + _SLAVE_SCAN_LOCK_PIN, + params.slaveScanLock); + *clientPin = NV_EVO_LOCK_PIN_0 + pin; + } + } +} /* GetRasterLockPin */ + +static void UpdateLUTNotifierTracking( + NVDispEvoPtr pDispEvo) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const int dispIndex = pDispEvo->displayOwner; + NvU32 i; + + for (i = 0; i < ARRAY_LEN(pDevEvo->lut.notifierState.sd[dispIndex].notifiers); i++) { + int notifier = pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].notifier; + + if (!pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].waiting) { + continue; + } + + if (!pDevEvo->hal->IsCompNotifierComplete(pDevEvo->pDispEvo[dispIndex], + notifier)) { + continue; + } + + pDevEvo->lut.notifierState.sd[dispIndex].waitingApiHeadMask &= + ~pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].apiHeadMask; + pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].waiting = FALSE; + } +} + +/* + * Check whether there are any staged API head LUT notifiers that need to be + * committed. + */ +NvBool nvEvoLUTNotifiersNeedCommit( + NVDispEvoPtr pDispEvo) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const int dispIndex = pDispEvo->displayOwner; + NvU32 apiHeadMask = pDevEvo->lut.notifierState.sd[dispIndex].stagedApiHeadMask; + + return apiHeadMask != 0; +} + +/* + * Set up tracking for a LUT Notifier for the apiHeads in stagedApiHeadMask. + * + * The notifier returned by this function must be passed to a subsequent call to + * EvoUpdateAndKickOffWithNotifier. + * + * Returns -1 if an error occurs or no apiHeads need a new LUT notifier. Passing + * the -1 to EvoUpdateAndKickOffWithNotifier with its notify parameter set may + * result in kernel panics. + */ +int nvEvoCommitLUTNotifiers( + NVDispEvoPtr pDispEvo) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const int dispIndex = pDispEvo->displayOwner; + NvU32 apiHeadMask = pDevEvo->lut.notifierState.sd[dispIndex].stagedApiHeadMask; + int i; + + pDevEvo->lut.notifierState.sd[dispIndex].stagedApiHeadMask = 0; + + UpdateLUTNotifierTracking(pDispEvo); + + if (apiHeadMask == 0) { + return -1; + } + + if (pDevEvo->lut.notifierState.sd[dispIndex].waitingApiHeadMask & + apiHeadMask) { + /* + * an apiHead in the requested list is already waiting on a + * notifier + */ + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, "A requested API head is already waiting on a notifier"); + return -1; + } + + for (i = 0; i < ARRAY_LEN(pDevEvo->lut.notifierState.sd[dispIndex].notifiers); i++) { + int notifier = (dispIndex * NVKMS_MAX_HEADS_PER_DISP) + i + 1; + + if (pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].waiting) { + continue; + } + + /* use this notifier */ + pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].notifier = notifier; + pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].waiting = TRUE; + pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].apiHeadMask = apiHeadMask; + + pDevEvo->lut.notifierState.sd[dispIndex].waitingApiHeadMask |= + apiHeadMask; + + return notifier; + } + + /* slot not found */ + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, "No remaining LUT notifier slots"); + return -1; +} + +/* + * Unstage any staged API Heads' notifiers. + */ +void nvEvoClearStagedLUTNotifiers( + NVDispEvoPtr pDispEvo) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const int dispIndex = pDispEvo->displayOwner; + + pDevEvo->lut.notifierState.sd[dispIndex].stagedApiHeadMask = 0; +} + +/* + * Stage the API Head's notifier for tracking. In order to kickoff the staged + * notifier, nvEvoCommitLUTNotifiers must be called and its return value + * passed to EvoUpdateAndKickoffWithNotifier. + * + * This function and its siblings nvEvoIsLUTNotifierComplete and + * nvEvoWaitForLUTNotifier can be used by callers of nvEvoSetLut to ensure the + * triple-buffer for the color LUT is not overflowed even when nvEvoSetLut is + * called with kickoff = FALSE. + */ +void nvEvoStageLUTNotifier( + NVDispEvoPtr pDispEvo, + NvU32 apiHead) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const int dispIndex = pDispEvo->displayOwner; + + nvAssert((pDevEvo->lut.notifierState.sd[dispIndex].stagedApiHeadMask & + NVBIT(apiHead)) == 0); + + pDevEvo->lut.notifierState.sd[dispIndex].stagedApiHeadMask |= + NVBIT(apiHead); +} + +/* + * Check if the api head's LUT Notifier is complete. + */ + +NvBool nvEvoIsLUTNotifierComplete( + NVDispEvoPtr pDispEvo, + NvU32 apiHead) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const int dispIndex = pDispEvo->displayOwner; + + UpdateLUTNotifierTracking(pDispEvo); + + return (pDevEvo->lut.notifierState.sd[dispIndex].waitingApiHeadMask & + NVBIT(apiHead)) == 0; +} + +/* + * Wait for the api head's LUT Notifier to complete. + * + * This function blocks while waiting for the notifier. + */ + +void nvEvoWaitForLUTNotifier( + const NVDispEvoPtr pDispEvo, + NvU32 apiHead) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const int dispIndex = pDispEvo->displayOwner; + int i; + + if (nvEvoIsLUTNotifierComplete(pDispEvo, apiHead)) { + return; + } + + for (i = 0; i < ARRAY_LEN(pDevEvo->lut.notifierState.sd[dispIndex].notifiers); i++) { + int notifier = pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].notifier; + + if (!pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].waiting) { + continue; + } + + if ((pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].apiHeadMask & + NVBIT(apiHead)) == 0) { + + continue; + } + + pDevEvo->hal->WaitForCompNotifier(pDispEvo, notifier); + return; + } +} + +static void EvoIncrementCurrentLutIndex(NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + const NvBool baseLutEnabled, + const NvBool outputLutEnabled) +{ + NvU32 head; + const int dispIndex = pDispEvo->displayOwner; + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const int numLUTs = ARRAY_LEN(pDevEvo->lut.apiHead[apiHead].LUT); + NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + + pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curLUTIndex++; + pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curLUTIndex %= numLUTs; + pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curBaseLutEnabled = baseLutEnabled; + pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curOutputLutEnabled = outputLutEnabled; + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + const NvU32 curLutIndex = + pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curLUTIndex; + NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + + pHeadState->lut.outputLutEnabled = + pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curOutputLutEnabled; + pHeadState->lut.baseLutEnabled = + pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curBaseLutEnabled; + pHeadState->lut.pCurrSurface = + pDevEvo->lut.apiHead[apiHead].LUT[curLutIndex]; + + } +} + +static NvU32 UpdateLUTTimer(NVDispEvoPtr pDispEvo, + const NvU32 apiHead, + const NvBool baseLutEnabled, + const NvBool outputLutEnabled) +{ + if (!nvEvoIsLUTNotifierComplete(pDispEvo, apiHead)) { + // If the notifier is still pending, then the previous update is still + // pending and further LUT changes should continue to go into the third + // buffer. Reschedule the timer for another 10 ms. + return 10; + } + + // Update the current LUT index and kick off an update. + EvoIncrementCurrentLutIndex(pDispEvo, apiHead, baseLutEnabled, + outputLutEnabled); + + EvoUpdateCurrentPalette(pDispEvo, apiHead); + + // Return 0 to cancel the timer. + return 0; +} + +static void UpdateLUTTimerNVKMS(void *dataPtr, NvU32 dataU32) +{ + NVDispEvoPtr pDispEvo = dataPtr; + const NvU32 apiHead = DRF_VAL(UPDATE_LUT_TIMER_NVKMS, _DATAU32, _HEAD, + dataU32); + const NvBool baseLutEnabled = FLD_TEST_DRF(UPDATE_LUT_TIMER_NVKMS, _DATAU32, + _BASE_LUT, _ENABLE, dataU32); + const NvBool outputLutEnabled = FLD_TEST_DRF(UPDATE_LUT_TIMER_NVKMS, _DATAU32, + _OUTPUT_LUT, _ENABLE, dataU32); + NvU32 ret = UpdateLUTTimer(pDispEvo, apiHead, baseLutEnabled, + outputLutEnabled); + + if (ret != 0) { + ScheduleLutUpdate(pDispEvo, apiHead, dataU32, ret * 1000); + } +} + +static void ScheduleLutUpdate(NVDispEvoRec *pDispEvo, + const NvU32 apiHead, const NvU32 data, + const NvU64 usec) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + /* Cancel previous update */ + nvCancelLutUpdateEvo(pDispEvo, apiHead); + + /* schedule a new timer */ + pDevEvo->lut.apiHead[apiHead].disp[pDispEvo->displayOwner].updateTimer = + nvkms_alloc_timer(UpdateLUTTimerNVKMS, + pDispEvo, data, + usec); +} + +/* + * The gamma ramp, if specified, has a 16-bit range. Convert it to EVO's 14-bit + * shifted range and zero out the low 3 bits for bug 813188. + */ +static inline NvU16 GammaToEvo(NvU16 gamma) +{ + return ((gamma >> 2) & ~7) + 24576; +} + +static NVEvoLutDataRec *GetNewLutBuffer( + const NVDispEvoRec *pDispEvo, + const struct NvKmsSetLutCommonParams *pParams) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVEvoLutDataRec *pLUTBuffer = NULL; + + // XXX NVKMS TODO: If only input or output are specified and the other one + // is enabled in the hardware, this will zero out the one not specified. In + // practice it isn't a problem today because the X driver always specifies + // both, but we should fix this once we start always using the base channel, + // where we have a separate base LUT ctxdma. + // + // This is also a problem if a partial update of the input LUT is attempted + // (i.e. start != 0 or end != numberOfLutEntries-1). + // + // Filed bug: 2042919 to track removing this TODO. + + pLUTBuffer = nvCalloc(1, sizeof(*pLUTBuffer)); + + if (pLUTBuffer == NULL) { + goto done; + } + + if (pParams->input.specified && pParams->input.end != 0) { + const struct NvKmsLutRamps *pRamps = + nvKmsNvU64ToPointer(pParams->input.pRamps); + const NvU16 *red = pRamps->red; + const NvU16 *green = pRamps->green; + const NvU16 *blue = pRamps->blue; + + nvAssert(pRamps != NULL); + + // Update our shadow copy of the LUT. + pDevEvo->hal->FillLUTSurface(pLUTBuffer->base, + red, green, blue, + pParams->input.end + 1, + pParams->input.depth); + } + + if (pParams->output.specified && pParams->output.enabled) { + const struct NvKmsLutRamps *pRamps = + nvKmsNvU64ToPointer(pParams->output.pRamps); + int i; + + nvAssert(pRamps != NULL); + + if (pDevEvo->hal->caps.hasUnorm16OLUT) { + for (i = 0; i < 1024; i++) { + // Copy the client's 16-bit ramp directly to the LUT buffer. + pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + i].Red = pRamps->red[i]; + pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + i].Green = pRamps->green[i]; + pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + i].Blue = pRamps->blue[i]; + } + + pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + 1024] = + pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + 1023]; + } else { + for (i = 0; i < 1024; i++) { + // Convert from the client's 16-bit range to the EVO 14-bit shifted + // range. + pLUTBuffer->output[i].Red = GammaToEvo(pRamps->red[i]); + pLUTBuffer->output[i].Green = GammaToEvo(pRamps->green[i]); + pLUTBuffer->output[i].Blue = GammaToEvo(pRamps->blue[i]); + } + + pLUTBuffer->output[1024] = pLUTBuffer->output[1023]; + } + } + + /* fall through */ + +done: + return pLUTBuffer; +} + + +/* + * Update the api head's LUT with the given colors. + * + * The color LUT is triple-buffered. + * + * curLUTIndex indicates the buffer currently being updated. What the other + * two buffers are used for depends on whether the previous update has + * completed. If not (case 1): + * curLUTIndex + 1 (mod 3): currently being displayed + * curLUTIndex + 2 (mod 3): will be displayed at next vblank + * If so (case 2): + * curLUTIndex + 1 (mod 3): unused + * curLUTIndex + 2 (mod 3): currently being displayed + * + * In case 1, just update the current buffer and kick off a timer to submit the + * update from i+2 to i. If more LUT changes come in before the first update + * happens, kill the timer and start a new one. + * + * In case 2, kill the timer if it still hasn't gone off, update buffer i, and + * kick off an update. No new timer needs to be scheduled. + */ + +void nvEvoSetLut(NVDispEvoPtr pDispEvo, NvU32 apiHead, NvBool kickoff, + const struct NvKmsSetLutCommonParams *pParams) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const int dispIndex = pDispEvo->displayOwner; + const int curLUT = pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curLUTIndex; + const NvBool waitForPreviousUpdate = + pDevEvo->lut.apiHead[apiHead].disp[dispIndex].waitForPreviousUpdate; + const int numLUTs = ARRAY_LEN(pDevEvo->lut.apiHead[apiHead].LUT); + const int lutToFill = (curLUT + 1) % numLUTs; + NVSurfaceEvoPtr pSurfEvo = pDevEvo->lut.apiHead[apiHead].LUT[lutToFill]; + NvBool baseLutEnabled = + pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curBaseLutEnabled ; + NvBool outputLutEnabled = + pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curOutputLutEnabled; + + if (!pParams->input.specified && !pParams->output.specified) { + return; + } + + if (pParams->input.specified) { + baseLutEnabled = (pParams->input.end != 0); + } + + if (pParams->output.specified) { + outputLutEnabled = pParams->output.enabled; + } + + nvAssert(pSurfEvo != NULL); + + if ((pParams->input.specified && pParams->input.end != 0) || + (pParams->output.specified && pParams->output.enabled)) { + NVEvoLutDataRec *pLUTBuffer = GetNewLutBuffer(pDispEvo, pParams); + + if (pLUTBuffer == NULL) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "LUT Allocation failure; skipping LUT update"); + return; + } + + // Fill in the new LUT buffer. + nvUploadDataToLutSurfaceEvo(pSurfEvo, pLUTBuffer, pDispEvo); + + nvFree(pLUTBuffer); + } + + /* Kill a pending timer */ + nvCancelLutUpdateEvo(pDispEvo, apiHead); + + if (!kickoff) { + EvoIncrementCurrentLutIndex(pDispEvo, apiHead, baseLutEnabled, + outputLutEnabled); + return; + } + + // See if we can just fill the next LUT buffer and kick off an update now. + // We can do that if this is the very first update, or if the previous + // update is complete, or if we need to guarantee that this update + // is synchronous. + NvBool previousUpdateComplete = + nvEvoIsLUTNotifierComplete(pDispEvo, apiHead); + if (!waitForPreviousUpdate || previousUpdateComplete || + pParams->synchronous) { + + if (!previousUpdateComplete) { + nvEvoWaitForLUTNotifier(pDispEvo, apiHead); + } + + // Kick off an update now. + EvoIncrementCurrentLutIndex(pDispEvo, apiHead, baseLutEnabled, + outputLutEnabled); + EvoUpdateCurrentPalette(pDispEvo, apiHead); + + // If this LUT update is synchronous, then sync before returning. + if (pParams->synchronous && + pDevEvo->lut.apiHead[apiHead].disp[dispIndex].waitForPreviousUpdate) { + + nvEvoWaitForLUTNotifier(pDispEvo, apiHead); + pDevEvo->lut.apiHead[apiHead].disp[dispIndex].waitForPreviousUpdate = + FALSE; + } + } else { + // Schedule a timer to kick off an update later. + // XXX 5 ms is a guess. We could probably look at this pDpy's refresh + // rate to come up with a more reasonable estimate. + NvU32 dataU32 = DRF_NUM(UPDATE_LUT_TIMER_NVKMS, _DATAU32, _HEAD, apiHead); + + nvAssert((apiHead & ~0xff) == 0); + + if (baseLutEnabled) { + dataU32 |= DRF_DEF(UPDATE_LUT_TIMER_NVKMS, _DATAU32, _BASE_LUT, + _ENABLE); + } + + if (outputLutEnabled) { + dataU32 |= DRF_DEF(UPDATE_LUT_TIMER_NVKMS, _DATAU32, _OUTPUT_LUT, + _ENABLE); + } + + ScheduleLutUpdate(pDispEvo, apiHead, dataU32, 5 * 1000); + } +} + +NvBool nvValidateSetLutCommonParams( + const NVDevEvoRec *pDevEvo, + const struct NvKmsSetLutCommonParams *pParams) +{ + NvU32 maxSize = 0; + + if (pParams->output.specified && pParams->output.enabled) { + if (pParams->output.pRamps == 0) { + return FALSE; + } + } + + if (!pParams->input.specified || pParams->input.end == 0) { + return TRUE; + } + + if (pParams->input.pRamps == 0) { + return FALSE; + } + + switch (pParams->input.depth) { + case 8: maxSize = 256; break; + case 15: maxSize = 32; break; + case 16: maxSize = 64; break; + case 24: maxSize = 256; break; + case 30: maxSize = 1024; break; + default: return FALSE; + } + + nvAssert(maxSize <= NVKMS_LUT_ARRAY_SIZE); + nvAssert(maxSize <= NVKMS_LUT_ARRAY_SIZE); + nvAssert(maxSize <= NVKMS_LUT_ARRAY_SIZE); + + /* Currently, the implementation assumes start==0. */ + if (pParams->input.start != 0) { + return FALSE; + } + + if (pParams->input.end >= maxSize) { + return FALSE; + } + + return TRUE; +} + +static NvU32 GetSwapLockoutWindowUs(NVDispEvoPtr pDispEvo) +{ + NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW_PARAMS params = { 0 }; + NvU32 ret; + + nvAssert(pDispEvo->pFrameLockEvo != NULL); + + ret = nvRmApiControl( + nvEvoGlobal.clientHandle, + pDispEvo->pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW failed"); + } + + return params.tSwapRdyHi; +} + +static NvU32 CalculateSwapLockoutStartP2060(NVDispEvoPtr pDispEvo, + const NvU32 head, + const NvU32 tSwapRdyHiUs) +{ + const NVHwModeTimingsEvo *pTimings; + + nvAssert(head != NV_INVALID_HEAD); + nvAssert(nvHeadIsActive(pDispEvo, head)); + + pTimings = &pDispEvo->headState[head].timings; + + /* + * SWAP_LOCKOUT_START = Vtotal * TswapRdyHi * Refresh_Rate + * + * = Vtotal * TswapRdyHi * (pclk / Refresh_Rate) + * = Vtotal * TswapRdyHi * (pclk / (Votal * Htotal)) + * = Vtotal * TswapRdyHi * (pclk / (Votal * Htotal)) + * = TswapRdyHi * (pclk / Htotal) + * = TswapRdyHiUs * 1e-6 * pclk / Htotal + * = TswapRdyHiUs * pclk / (Htotal * 1000000) + * = TswapRdyHiUs * (pclkKhz * 1000) / (Htotal * 1000000) + * = TswapRdyHiUs * pclkKhz / (Htotal * 1000) + * + * Since SWAP_LOCKOUT_START must be higher than LSR_MIN_TIME, round this + * result up to the nearest integer. + */ + + return NV_ROUNDUP_DIV(tSwapRdyHiUs * pTimings->pixelClock, + pTimings->rasterSize.x * 1000); +} + +/** + * Override the swap lockout start value on heads on this pDisp, or restore the + * default value. + * + * This is called before (with isPre == TRUE) and after (with isPre == FALSE) + * swap barriers are enabled on the G-Sync board. In order to satisfy certain + * timing criteria, we need to set a special value for SWAP_LOCKOUT_START for + * the duration of swap barriers being enabled. + */ +void nvSetSwapBarrierNotifyEvo(NVDispEvoPtr pDispEvo, + NvBool enable, NvBool isPre) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 tSwapRdyHiUs = 0; + NvU32 head; + + if ((isPre && !enable) || (!isPre && enable)) { + return; + } + + if (enable) { + tSwapRdyHiUs = GetSwapLockoutWindowUs(pDispEvo); + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS params = { }; + NvU32 ret; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + params.maxSwapLockoutSkew = + NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_MAX_SWAP_LOCKOUT_SKEW_INIT; + + if (enable) { + params.swapLockoutStart = + CalculateSwapLockoutStartP2060(pDispEvo, head, tSwapRdyHiUs); + } else { + params.swapLockoutStart = + NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_SWAP_LOCKOUT_START_INIT; + } + + params.head = head; + + params.base.subdeviceIndex = pDispEvo->displayOwner; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP failed"); + } + } +} + +/*! + * Release a reference to a pDevEvo + * + * If the refcount of the device drops to 0, this frees the device. + * + * \return TRUE if the device was freed, FALSE otherwise. + */ +NvBool nvFreeDevEvo(NVDevEvoPtr pDevEvo) +{ + if (pDevEvo == NULL) { + return FALSE; + } + + pDevEvo->allocRefCnt--; + + if (pDevEvo->allocRefCnt > 0) { + return FALSE; + } + + if (pDevEvo->pDifrState) { + nvRmUnregisterDIFREventHandler(pDevEvo); + nvDIFRFree(pDevEvo->pDifrState); + pDevEvo->pDifrState = NULL; + } + + if (pDevEvo->pNvKmsOpenDev != NULL) { + /* + * DP-MST allows to attach more than one heads/stream to single DP + * connector, and there is no way to convey that DP-MST configuration to + * next driver load; therefore disallow DP-MST. + */ + nvEvoRestoreConsole(pDevEvo, FALSE /* allowMST */); + + nvEvoUnregisterSurface(pDevEvo, pDevEvo->pNvKmsOpenDev, + pDevEvo->fbConsoleSurfaceHandle, + TRUE /* skipUpdate */, + FALSE /* skipSync */); + pDevEvo->fbConsoleSurfaceHandle = 0; + } + + nvFreeLutSurfacesEvo(pDevEvo); + + nvFreeCoreChannelEvo(pDevEvo); + + nvTeardownHdmiLibrary(pDevEvo); + + nvHsFreeDevice(pDevEvo); + + nvFreePerOpenDev(nvEvoGlobal.nvKmsPerOpen, pDevEvo->pNvKmsOpenDev); + + nvFreeFrameLocksEvo(pDevEvo); + + if (pDevEvo->hal) { + pDevEvo->hal->FreeRmCtrlObject(pDevEvo); + } + + nvRmDestroyDisplays(pDevEvo); + + nvkms_free_timer(pDevEvo->consoleRestoreTimer); + pDevEvo->consoleRestoreTimer = NULL; + + nvPreallocFree(pDevEvo); + + nvRmFreeDeviceEvo(pDevEvo); + + nvListDel(&pDevEvo->devListEntry); + + nvkms_free_ref_ptr(pDevEvo->ref_ptr); + + nvFree(pDevEvo); + return TRUE; +} + +static void AssignNumberOfApiHeads(NVDevEvoRec *pDevEvo) +{ + pDevEvo->numApiHeads = pDevEvo->numHeads; +} + +NVDevEvoPtr nvAllocDevEvo(const struct NvKmsAllocDeviceRequest *pRequest, + enum NvKmsAllocDeviceStatus *pStatus) +{ + NVDevEvoPtr pDevEvo = NULL; + enum NvKmsAllocDeviceStatus status = + NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; + NvU32 i; + + nvAssert(nvFindDevEvoByDeviceId(pRequest->deviceId) == NULL); + + pDevEvo = nvCalloc(1, sizeof(*pDevEvo)); + + if (pDevEvo == NULL) { + goto done; + } + + pDevEvo->allocRefCnt = 1; + + pDevEvo->gpuLogIndex = NV_INVALID_GPU_LOG_INDEX; + + pDevEvo->gc6Allowed = TRUE; + + nvListAppend(&pDevEvo->devListEntry, &nvEvoGlobal.devList); + + pDevEvo->ref_ptr = nvkms_alloc_ref_ptr(pDevEvo); + if (!pDevEvo->ref_ptr) { + goto done; + } + + for (i = 0; i < ARRAY_LEN(pDevEvo->openedGpuIds); i++) { + pDevEvo->openedGpuIds[i] = NV0000_CTRL_GPU_INVALID_ID; + } + + for (i = 0; i < ARRAY_LEN(pDevEvo->headForWindow); i++) { + pDevEvo->headForWindow[i] = NV_INVALID_HEAD; + } + + if (!nvRmAllocDeviceEvo(pDevEvo, pRequest)) { + goto done; + } + + status = nvAssignEvoCaps(pDevEvo); + + if (status != NVKMS_ALLOC_DEVICE_STATUS_SUCCESS) { + goto done; + } + + if (!nvPreallocAlloc(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + goto done; + } + + /* + * Copy the registry keys from the alloc device request to the device. + * + * This needs to be set before nvRmAllocDisplays, because nvRmAllocDisplays + * will initialize DP lib which may read registry keys that we want to + * allow clients to override. + */ + ct_assert(ARRAY_LEN(pRequest->registryKeys) == + ARRAY_LEN(pDevEvo->registryKeys)); + ct_assert(ARRAY_LEN(pRequest->registryKeys[0].name) == + ARRAY_LEN(pDevEvo->registryKeys[0].name)); + + for (i = 0; i < ARRAY_LEN(pRequest->registryKeys); i++) { + const size_t nameLen = sizeof(pDevEvo->registryKeys[i].name); + nvkms_memcpy(pDevEvo->registryKeys[i].name, + pRequest->registryKeys[i].name, + nameLen); + pDevEvo->registryKeys[i].name[nameLen - 1] = '\0'; + pDevEvo->registryKeys[i].value = pRequest->registryKeys[i].value; + } + + status = nvRmAllocDisplays(pDevEvo); + + if (status != NVKMS_ALLOC_DEVICE_STATUS_SUCCESS) { + goto done; + } + + nvAllocFrameLocksEvo(pDevEvo); + + if (!pDevEvo->hal->AllocRmCtrlObject(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + goto done; + } + + AssignNumberOfApiHeads(pDevEvo); + + if (!nvAllocCoreChannelEvo(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_CORE_CHANNEL_ALLOC_FAILED; + goto done; + } + + pDevEvo->pNvKmsOpenDev = nvAllocPerOpenDev(nvEvoGlobal.nvKmsPerOpen, + pDevEvo, TRUE /* isPrivileged */); + if (!pDevEvo->pNvKmsOpenDev) { + status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + goto done; + } + + nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */); + + /* + * Import the framebuffer console, if there is one, + * as a surface we can flip to. + */ + nvRmImportFbConsoleMemory(pDevEvo); + + /* + * This check must be placed after nvAllocCoreChannelEvo() since it depends + * on the HW capabilities that are read in that function. + */ + if (!ValidateConnectorTypes(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + goto done; + } + + if (!nvHsAllocDevice(pDevEvo, pRequest)) { + status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + goto done; + } + + if (!nvAllocLutSurfacesEvo(pDevEvo)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate memory for the display color lookup table."); + status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + goto done; + } + + if (!nvInitHdmiLibrary(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + goto done; + } + + nvRmMuxInit(pDevEvo); + + status = NVKMS_ALLOC_DEVICE_STATUS_SUCCESS; + + /* + * We can't allocate DIFR state if h/w doesn't support it. Only register + * event handlers with DIFR state. + */ + pDevEvo->pDifrState = nvDIFRAllocate(pDevEvo); + if (pDevEvo->pDifrState) { + if (!nvRmRegisterDIFREventHandler(pDevEvo)) { + nvDIFRFree(pDevEvo->pDifrState); + pDevEvo->pDifrState = NULL; + } + } + + /* fall through */ + +done: + if (status != NVKMS_ALLOC_DEVICE_STATUS_SUCCESS) { + nvFreeDevEvo(pDevEvo); + pDevEvo = NULL; + } + + *pStatus = status; + + return pDevEvo; +} + + +// How long before we time out waiting for lock? +// In microseconds. +#define LOCK_TIMEOUT 5000000 + +// +// EvoWaitForLock() +// Wait for raster or flip lock to complete +// Note that we use pDev and subdevice here instead of pDisp since this is used +// per-subdev in SLI (including the pDispEvo->numSubDevices > 1 case). +// +static NvBool EvoWaitForLock(const NVDevEvoRec *pDevEvo, const NvU32 sd, + const NvU32 head, const NvU32 type, + NvU64 *pStartTime) +{ + NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS status = { }; + NvU32 ret; + + nvAssert(type == EVO_RASTER_LOCK || type == EVO_FLIP_LOCK); + + if ((type == EVO_FLIP_LOCK) && + !pDevEvo->hal->caps.supportsFlipLockRGStatus) { + return TRUE; + } + + status.head = head; + status.base.subdeviceIndex = sd; + status.scanLocked = NV5070_CTRL_CMD_GET_RG_STATUS_SCANLOCKED_NO; + status.flipLocked = NV5070_CTRL_CMD_GET_RG_STATUS_FLIPLOCKED_NO; + + // Just keep looping until we get what we want. + do { + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_GET_RG_STATUS, + &status, + sizeof(status)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Unable to read SLI lock status"); + return FALSE; + } + + if ((type == EVO_RASTER_LOCK) && + (status.scanLocked == + NV5070_CTRL_CMD_GET_RG_STATUS_SCANLOCKED_YES)) { + break; + } + if ((type == EVO_FLIP_LOCK) && + (status.flipLocked == + NV5070_CTRL_CMD_GET_RG_STATUS_FLIPLOCKED_YES)) { + break; + } + + if (nvExceedsTimeoutUSec(pDevEvo, pStartTime, LOCK_TIMEOUT)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "SLI lock timeout exceeded (type %d)", type); + return FALSE; + } + + nvkms_yield(); + + } while (TRUE); + + // Once we've exited from the various loops above, we should be locked + // as requested. + return TRUE; +} + +// +// EvoUpdateHeadParams() +// Send GPUs HeadParams updates; accounts for SLI. +// +static void EvoUpdateHeadParams(const NVDispEvoRec *pDispEvo, NvU32 head, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + nvPushEvoSubDevMaskDisp(pDispEvo); + + pDevEvo->hal->SetHeadControl(pDevEvo, pDispEvo->displayOwner, head, updateState); + + nvPopEvoSubDevMask(pDevEvo); +} + +// +// nvReadCRC32Evo() +// Returns the last CRC32 value +NvBool nvReadCRC32Evo(NVDispEvoPtr pDispEvo, NvU32 head, + CRC32NotifierCrcOut *crcOut) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + NVEvoDmaPtr dma = NULL; + NVConnectorEvoPtr pConnectorEvo = NULL; + NVEvoUpdateState updateState = { }; + NvU32 numCRC32 = 0; + NvBool res = TRUE; + NvBool found = FALSE; + NvU32 ret; + + // Look up the head connector + nvListForEachEntry(pConnectorEvo, + &pDispEvo->connectorList, + connectorListEntry) { + NvU32 activeHeadMask = + nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo); + if (activeHeadMask & NVBIT(head)) { + found = TRUE; + break; + } + } + + if (!found) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Unable to find active connector for head %d", head); + return FALSE; + } + + // Allocate a temporary DMA notifier + dma = nvCalloc(1, sizeof(NVEvoDma)); + if ((dma == NULL) || + !nvRmAllocEvoDma(pDevEvo, + dma, + NV_DMA_EVO_NOTIFIER_SIZE - 1, + DRF_DEF(OS03, _FLAGS, _TYPE, _NOTIFIER), + 1 << pDispEvo->displayOwner)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "CRC32 notifier DMA allocation failed"); + nvFree(dma); + return FALSE; + } + + // Bind the CRC32 notifier surface descriptor + ret = pDevEvo->hal->BindSurfaceDescriptor(pDevEvo, pDevEvo->core, &dma->surfaceDesc); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to bind display engine CRC32 notify surface descriptor " + ": 0x%x (%s)", ret, nvstatusToString(ret)); + res = FALSE; + goto done; + } + + // Only set up the actual output for SLI primary. + nvPushEvoSubDevMask(pDevEvo, 1 << pDispEvo->displayOwner); + + /* CRC notifiers are similar to completion notifiers, but work slightly + * different: + * + * 1. In order to start CRC generation for a head, we need to: + * + * - Point an EVO head at a block of memory with + * HEAD_SET_CONTEXT_DMA_CRC(head) + * + * - Program the CRC control with HEAD_SET_CRC_CONTROL(head) to select + * what output we want to capture CRC values from, and kicking off a + * core channel update (this already generates a CRC value for the + * last scanout buffer) + * + * ----> hal->StartCRC32Capture() + * + * 2. From 1) on, a new CRC value is generated per vblank and written to + * an incrementing entry in the CRC notifier. With pre-nvdisplay chips, + * a CRC notifier can hold up to 256 entries. Once filled up, new CRC + * values are discarded. Either case, we are only interested in the + * last CRC32 value. + * + * 3. In order to stop CRC generation, we need to perform the inverse + * operation of 1): + * + * - Program the CRC control with HEAD_SET_CRC_CONTROL(head) to + * unselect all outputs we were capturing CRC values from. + * + * - Unset the CRC context DMA with HEAD_SET_CONTEXT_DMA_CRC(head) + * + * ----> hal->StopCRC32Capture() + * + * 4. From 3) on, it is safe to wait for the CRC notifier and query all + * entries. + * + * ----> hal->QueryCRC32() + */ + pDevEvo->hal->StartCRC32Capture(pDevEvo, + dma, + pConnectorEvo, + pTimings->protocol, + pConnectorEvo->or.primary, + head, + pDispEvo->displayOwner, + &updateState); + + // This update should generate one CRC value. + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, TRUE /* releaseElv */); + + pDevEvo->hal->StopCRC32Capture(pDevEvo, + head, + &updateState); + + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, TRUE /* releaseElv */); + + if (!pDevEvo->hal->QueryCRC32(pDevEvo, + dma, + pDispEvo->displayOwner, + 1, + crcOut, + &numCRC32) || + (numCRC32 == 0)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to query all CRC32 values"); + } + + nvPopEvoSubDevMask(pDevEvo); + +done: + // Clean-up + nvRmFreeEvoDma(pDevEvo, dma); + nvFree(dma); + + return res; +} + +NvU32 nvGetActiveSorMask(const NVDispEvoRec *pDispEvo) +{ + NvU32 activeSorMask = 0; + NvU32 head; + + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + NVConnectorEvoPtr pConnectorEvo = + pDispEvo->headState[head].pConnectorEvo; + + if (pConnectorEvo != NULL && + pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + NvU32 orIndex; + nvAssert(pConnectorEvo->or.primary != NV_INVALID_OR); + FOR_EACH_INDEX_IN_MASK(32, orIndex, nvConnectorGetORMaskEvo(pConnectorEvo)) { + if (pConnectorEvo->or.ownerHeadMask[orIndex] == 0x0) { + continue; + } + activeSorMask |= NVBIT(orIndex); + } FOR_EACH_INDEX_IN_MASK_END; + } + } + + return activeSorMask; +} + +NvBool nvEvoPollForNoMethodPending(NVDevEvoPtr pDevEvo, + const NvU32 sd, + NVEvoChannelPtr pChannel, + NvU64 *pStartTime, + const NvU32 timeout) +{ + do + { + NvBool isMethodPending = TRUE; + + if (pDevEvo->hal->IsChannelMethodPending( + pDevEvo, + pChannel, + sd, + &isMethodPending) && !isMethodPending) { + break; + } + + if (nvExceedsTimeoutUSec(pDevEvo, pStartTime, timeout)) { + return FALSE; + } + + nvkms_yield(); + } while (TRUE); + + return TRUE; +} + +static NvU32 SetSORFlushMode(NVDevEvoPtr pDevEvo, + NvU32 sorNumber, + NvU32 headMask, + NvBool enable) +{ + NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS params = { }; + + params.base.subdeviceIndex = 0; + params.sorNumber = sorNumber; + params.headMask = headMask; + params.bEnable = enable; + + return nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_SET_SOR_FLUSH_MODE, + ¶ms, sizeof(params)); +} + +static void DPSerializerLinkTrain(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo, + NvBool enableLink, + NvBool reTrain) +{ + const NvU32 displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + const NvU32 sorNumber = pConnectorEvo->or.primary; + const NvU32 headMask = nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo); + NvBool force = NV_FALSE; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + /* + * The NV0073_CTRL_DP_DATA_SET_{LANE_COUNT, LINK_BW} defines are the same + * as the actual DPCD values. As such, we can directly assign the + * dpSerializerCaps here. + */ + NvBool isMST = pConnectorEvo->dpSerializerCaps.supportsMST; + NvU32 linkBW = pConnectorEvo->dpSerializerCaps.maxLinkBW; + NvU32 laneCount = pConnectorEvo->dpSerializerCaps.maxLaneCount; + + nvAssert(nvConnectorIsDPSerializer(pConnectorEvo)); + + if (sorNumber == NV_INVALID_OR) { + return; + } + + if (reTrain) { + if (!pConnectorEvo->dpSerializerEnabled) { + nvEvoLogDev(pDevEvo, EVO_LOG_INFO, + "Received expected HPD_IRQ during serializer shutdown"); + return; + } + } else if (enableLink) { + pConnectorEvo->dpSerializerEnabled = NV_TRUE; + } else { + linkBW = 0; + laneCount = NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0; + pConnectorEvo->dpSerializerEnabled = NV_FALSE; + } + + if (isMST) { + NvU32 dpcdData = 0; + + dpcdData = FLD_SET_DRF(_DPCD, _MSTM_CTRL, _EN, _YES, dpcdData); + dpcdData = + FLD_SET_DRF(_DPCD, _MSTM_CTRL, _UPSTREAM_IS_SRC, _YES, dpcdData); + if (!nvWriteDPCDReg(pConnectorEvo, NV_DPCD_MSTM_CTRL, dpcdData)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to enable MST DPCD"); + return; + } + } + + /* + * We cannot perform link training while the OR has an attached head + * since we would be changing the OR clocks and link frequency while + * it's actively encoding pixels, and this could lead to FIFO overflow/ + * underflow issues. Instead, the recommended, safe sequence is to enter + * flush mode first, re-train the link, and exit flush mode after. + */ + if (reTrain) { + if (SetSORFlushMode(pDevEvo, sorNumber, headMask, NV_TRUE) != + NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to enter flush mode"); + return; + } + } + + do { + NvU32 dpCtrlData = 0; + NvU32 dpCtrlCmd = 0; + NV0073_CTRL_DP_CTRL_PARAMS dpCtrlParams = { }; + + dpCtrlCmd = DRF_DEF(0073_CTRL, _DP_CMD, _SET_LANE_COUNT, _TRUE) | + DRF_DEF(0073_CTRL, _DP_CMD, _SET_LINK_BW, _TRUE) | + DRF_DEF(0073_CTRL, _DP_CMD, _SET_ENHANCED_FRAMING, _TRUE); + + if (isMST) { + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _SET_FORMAT_MODE, _MULTI_STREAM); + } + + if (force) { + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _FAKE_LINK_TRAINING, _DONOT_TOGGLE_TRANSMISSION); + } + + dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _SET_LINK_BW, + linkBW, dpCtrlData); + dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _SET_LANE_COUNT, + laneCount, dpCtrlData); + dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _TARGET, + NV0073_CTRL_DP_DATA_TARGET_SINK, + dpCtrlData); + + dpCtrlParams.subDeviceInstance = pDispEvo->displayOwner; + dpCtrlParams.displayId = displayId; + dpCtrlParams.cmd = dpCtrlCmd; + dpCtrlParams.data = dpCtrlData; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DP_CTRL, + &dpCtrlParams, sizeof(dpCtrlParams)) == NVOS_STATUS_SUCCESS) { + break; + } + + if (force) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Fake link training failed"); + break; + } + + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Link training failed"); + + /* + * XXX Force the link config on the GPU side to avoid hanging the display + * pipe during modeset. Eventually, we need to figure out how to deal + * with/report these kinds of LT failures. + */ + force = NV_TRUE; + + } while (NV_TRUE); + + if (reTrain) { + if (SetSORFlushMode(pDevEvo, sorNumber, headMask, NV_FALSE) != + NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to exit flush mode"); + } + } +} + +void nvDPSerializerHandleDPIRQ(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo) +{ + DPSerializerLinkTrain(pDispEvo, pConnectorEvo, + NV_TRUE /* enableLink */, + NV_TRUE /* reTrain */); +} + +void nvDPSerializerPreSetMode(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo) +{ + const NvU32 headMask = nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo); + + if (!pConnectorEvo->dpSerializerEnabled && (headMask != 0)) { + DPSerializerLinkTrain(pDispEvo, pConnectorEvo, + NV_TRUE /* enableLink */, + NV_FALSE /* reTrain */); + } +} + +void nvDPSerializerPostSetMode(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo) +{ + const NvU32 headMask = nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo); + + if (pConnectorEvo->dpSerializerEnabled && (headMask == 0)) { + DPSerializerLinkTrain(pDispEvo, pConnectorEvo, + NV_FALSE /* enableLink */, + NV_FALSE /* reTrain */); + } +} + +NvU32 nvGetHDRSrcMaxLum(const NVFlipChannelEvoHwState *pHwState) +{ + if (!pHwState->hdrStaticMetadata.enabled) { + return 0; + } + + if (pHwState->hdrStaticMetadata.val.maxCLL > 0) { + return pHwState->hdrStaticMetadata.val.maxCLL; + } + + return pHwState->hdrStaticMetadata.val.maxDisplayMasteringLuminance; +} + +NvBool nvNeedsTmoLut(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NvU32 srcMaxLum, + NvU32 targetMaxCLL) +{ + const NvU32 win = NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(pChannel->channelMask); + const NvU32 head = pDevEvo->headForWindow[win]; + const NvU32 sdMask = nvPeekEvoSubDevMask(pDevEvo); + const NvU32 sd = (sdMask == 0) ? 0 : nv_ffs(sdMask) - 1; + const NVDispHeadStateEvoRec *pHeadState = + &pDevEvo->pDispEvo[sd]->headState[head]; + const NVEvoWindowCaps *pWinCaps = + &pDevEvo->gpus[sd].capabilities.window[pChannel->instance]; + + // Don't tone map if flipped to NULL. + if (!pHwState->pSurfaceEvo[NVKMS_LEFT]) { + return FALSE; + } + + // If the TMO is set directly by the client, honor the client's request. + if (pHwState->tmoLut.fromOverride) { + return (pHwState->tmoLut.pLutSurfaceEvo != NULL); + } + + // Don't tone map if layer doesn't have static metadata. + // XXX HDR TODO: Support tone mapping SDR surfaces to HDR + if (!pHwState->hdrStaticMetadata.enabled) { + return FALSE; + } + + // Don't tone map if HDR infoframe isn't enabled + // XXX HDR TODO: Support tone mapping HDR surfaces to SDR + if (pHeadState->hdrInfoFrame.state != NVKMS_HDR_INFOFRAME_STATE_ENABLED) { + return FALSE; + } + + // Don't tone map if TMO not present + if (!pWinCaps->tmoPresent) { + return FALSE; + } + + // Don't tone map if source or target max luminance is unspecified. + if ((srcMaxLum == 0) || (targetMaxCLL == 0)) { + return FALSE; + } + + // Don't tone map unless source max luminance exceeds target by 10%. + if (srcMaxLum <= ((targetMaxCLL * 110) / 100)) { + return FALSE; + } + + return TRUE; +} + +NvBool nvIsCscMatrixIdentity(const struct NvKmsCscMatrix *matrix) +{ + const struct NvKmsCscMatrix identity = NVKMS_IDENTITY_CSC_MATRIX; + + int y; + for (y = 0; y < 3; y++) { + int x; + + for (x = 0; x < 4; x++) { + if (matrix->m[y][x] != identity.m[y][x]) { + return FALSE; + } + } + } + + return TRUE; +} + +enum nvKmsPixelDepth nvEvoDpyColorToPixelDepth( + const NVDpyAttributeColor *pDpyColor) +{ + switch (pDpyColor->format) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420: + switch (pDpyColor->bpc) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10: + return NVKMS_PIXEL_DEPTH_30_444; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8: + return NVKMS_PIXEL_DEPTH_24_444; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN: /* fallthrough */ + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6: + return NVKMS_PIXEL_DEPTH_18_444; + } + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + nvAssert(pDpyColor->bpc != NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6); + switch (pDpyColor->bpc) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10: + return NVKMS_PIXEL_DEPTH_20_422; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6: /* fallthrough */ + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN: /* fallthrough */ + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8: + return NVKMS_PIXEL_DEPTH_16_422; + } + break; + } + + return NVKMS_PIXEL_DEPTH_18_444; +} + +void nvEvoEnableMergeModePreModeset(NVDispEvoRec *pDispEvo, + const NvU32 headsMask, + NVEvoUpdateState *pUpdateState) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + const NvU32 primaryHead = nvGetPrimaryHwHeadFromMask(headsMask); + NvU32 head; + + nvAssert(pDevEvo->hal->caps.supportsMergeMode); + nvAssert((nvPopCount32(headsMask) > 1) && + (primaryHead != NV_INVALID_HEAD)); + + FOR_EACH_EVO_HW_HEAD_IN_MASK(headsMask, head) { + NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + NVEvoHeadControl *pHC = + &pDevEvo->gpus[sd].headControl[head]; + + nvAssert(pHeadState->mergeMode == + NV_EVO_MERGE_MODE_DISABLED); + + /* + * Heads requires to be raster locked before they transition to + * PRIMARY/SECONDARY merge mode. + * + * SETUP should be the intermediate state before head transition to + * PRIMARY/SECONDARY merge mode. During SETUP state, there is no pixel + * transmission from secondary to primary head, RG fetches and drops + * pixels, viewport gets filled by the special gray/black pixels. + */ + pHeadState->mergeMode = NV_EVO_MERGE_MODE_SETUP; + pDevEvo->hal->SetMergeMode(pDispEvo, head, pHeadState->mergeMode, + pUpdateState); + + nvAssert((pHC->serverLock == NV_EVO_NO_LOCK) && + (pHC->clientLock == NV_EVO_NO_LOCK)); + + pHC->mergeMode = TRUE; + if (head == primaryHead) { + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = NV_EVO_LOCK_PIN_INTERNAL(primaryHead); + pHC->setLockOffsetX = TRUE; + pHC->crashLockUnstallMode = FALSE; + } else { + pHC->clientLock = NV_EVO_RASTER_LOCK; + pHC->clientLockPin = NV_EVO_LOCK_PIN_INTERNAL(primaryHead); + if (pTimings->vrr.type != NVKMS_DPY_VRR_TYPE_NONE) { + pHC->clientLockoutWindow = 4; + pHC->useStallLockPin = TRUE; + pHC->stallLockPin = NV_EVO_LOCK_PIN_INTERNAL(primaryHead); + } else { + pHC->clientLockoutWindow = 2; + } + pHC->crashLockUnstallMode = + (pTimings->vrr.type != NVKMS_DPY_VRR_TYPE_NONE); + } + + pHC->stereoLocked = FALSE; + + EvoUpdateHeadParams(pDispEvo, head, pUpdateState); + } +} + +void nvEvoEnableMergeModePostModeset(NVDispEvoRec *pDispEvo, + const NvU32 headsMask, + NVEvoUpdateState *pUpdateState) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + const NvU32 primaryHead = nvGetPrimaryHwHeadFromMask(headsMask); + NvU64 startTime = 0; + NvU32 head; + + nvAssert(pDevEvo->hal->caps.supportsMergeMode); + nvAssert((nvPopCount32(headsMask) > 1) && + (primaryHead != NV_INVALID_HEAD)); + + FOR_EACH_EVO_HW_HEAD_IN_MASK(headsMask, head) { + nvAssert(pDispEvo->headState[head].mergeMode == + NV_EVO_MERGE_MODE_SETUP); + + if (!EvoWaitForLock(pDevEvo, sd, head, EVO_RASTER_LOCK, + &startTime)) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, "Raster lock timeout"); + return; + } + } + + FOR_EACH_EVO_HW_HEAD_IN_MASK(headsMask, head) { + NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NVEvoHeadControl *pHC = &pDevEvo->gpus[sd].headControl[head]; + + pHC->flipLockPin = NV_EVO_LOCK_PIN_INTERNAL(primaryHead); + pHC->flipLock = TRUE; + + EvoUpdateHeadParams(pDispEvo, head, pUpdateState); + + pHeadState->mergeMode = (head == primaryHead) ? + NV_EVO_MERGE_MODE_PRIMARY : NV_EVO_MERGE_MODE_SECONDARY; + pDevEvo->hal->SetMergeMode(pDispEvo, head, pHeadState->mergeMode, + pUpdateState); + } +} + +void nvEvoDisableMergeMode(NVDispEvoRec *pDispEvo, + const NvU32 headsMask, + NVEvoUpdateState *pUpdateState) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + NvU32 head; + + nvAssert(pDevEvo->hal->caps.supportsMergeMode); + nvAssert(nvPopCount32(headsMask) > 1); + + FOR_EACH_EVO_HW_HEAD_IN_MASK(headsMask, head) { + NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NVEvoHeadControl *pHC = + &pDevEvo->gpus[sd].headControl[head]; + + pHeadState->mergeMode = NV_EVO_MERGE_MODE_DISABLED; + pDevEvo->hal->SetMergeMode(pDispEvo, head, pHeadState->mergeMode, + pUpdateState); + + pHC->mergeMode = FALSE; + pHC->serverLock = NV_EVO_NO_LOCK; + pHC->serverLockPin = NV_EVO_LOCK_PIN_INTERNAL(0); + pHC->clientLock = NV_EVO_NO_LOCK; + pHC->clientLockPin = NV_EVO_LOCK_PIN_INTERNAL(0); + pHC->clientLockoutWindow = 0; + pHC->setLockOffsetX = FALSE; + pHC->flipLockPin = NV_EVO_LOCK_PIN_INTERNAL(0); + pHC->flipLock = FALSE; + pHC->useStallLockPin = FALSE; + pHC->stallLockPin = NV_EVO_LOCK_PIN_INTERNAL(0); + pHC->crashLockUnstallMode = FALSE; + + + EvoUpdateHeadParams(pDispEvo, head, pUpdateState); + } +} + +NvBool nvEvoGetSingleMergeHeadSectionHwModeTimings( + const NVHwModeTimingsEvo *pSrc, + const NvU32 numMergeHeadSections, + NVHwModeTimingsEvo *pDst) +{ + if (numMergeHeadSections == 1) { + *pDst = *pSrc; + return TRUE; + } + + if ((numMergeHeadSections == 0) || + (pSrc->viewPort.out.xAdjust != 0) || + (pSrc->viewPort.out.width != nvEvoVisibleWidth(pSrc))) { + return FALSE; + } + + if (((pSrc->rasterSize.x % numMergeHeadSections) != 0) || + (((pSrc->rasterSyncEnd.x + 1) % numMergeHeadSections) != 0) || + (((pSrc->rasterBlankEnd.x + 1) % numMergeHeadSections) != 0) || + (((pSrc->rasterBlankStart.x + 1) % numMergeHeadSections) != 0) || + ((pSrc->pixelClock % numMergeHeadSections) != 0) || + ((pSrc->viewPort.in.width % numMergeHeadSections) != 0)) { + return FALSE; + } + + *pDst = *pSrc; + + pDst->rasterSize.x /= numMergeHeadSections; + pDst->rasterSyncEnd.x /= numMergeHeadSections; + pDst->rasterBlankEnd.x /= numMergeHeadSections; + pDst->rasterBlankStart.x /= numMergeHeadSections; + + pDst->pixelClock /= numMergeHeadSections; + + pDst->viewPort.out.width /= numMergeHeadSections; + pDst->viewPort.in.width /= numMergeHeadSections; + + return TRUE; +} + +NvBool nvEvoUse2Heads1OR(const NVDpyEvoRec *pDpyEvo, + const NVHwModeTimingsEvo *pTimings, + const struct NvKmsModeValidationParams *pParams) +{ + const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + const NvU32 sd = pDispEvo->displayOwner; + const NVEvoHeadCaps *pHeadCaps = + &pDispEvo->pDevEvo->gpus[sd].capabilities.head[0]; + + /* The 2Heads1OR mode can not be used if GPU does not + * support merge mode, or */ + if (!pDispEvo->pDevEvo->hal->caps.supportsMergeMode || + /* the 2Heads1OR mode is forced disabled by client, or */ + ((pParams->overrides & + NVKMS_MODE_VALIDATION_MAX_ONE_HARDWARE_HEAD) != 0) || + /* the given dpy does not support the display stream compression + * and the given mode timings are not using the hardware YUV420 + * packer, or */ + (!nvDPDpyIsDscPossible(pDpyEvo) && !nvHdmiDpySupportsDsc(pDpyEvo) && + (pTimings->yuv420Mode != NV_YUV420_MODE_HW)) || + /* the non-centered viewport out does not work with 2Heads1OR mode + * an for simplicity disable all customized viewport out, or */ + (pTimings->viewPort.out.width != nvEvoVisibleWidth(pTimings)) || + (pTimings->viewPort.out.xAdjust != 0) || + /* either HVisible, HSyncWidth, HBackPorch, HForntPorch, + * pixelClock, or viewPortIn width is odd and can not be split + * equally across two heads, or */ + ((pTimings->rasterSize.x & 1 ) != 0) || + ((pTimings->rasterSyncEnd.x & 1) != 1) || + ((pTimings->rasterBlankEnd.x & 1) != 1) || + ((pTimings->rasterBlankStart.x & 1) != 1) || + ((pTimings->pixelClock & 1) != 0) || + ((pTimings->viewPort.in.width & 1) != 0)) { + return FALSE; + } + + /* Use 2Heads1OR mode only if the required pixel clock is greater than the + * maximum pixel clock support by a head. */ + return (pTimings->pixelClock > pHeadCaps->maxPClkKHz); +} + +NvBool nvIsLockGroupFlipLocked(const NVLockGroup *pLockGroup) +{ + return pLockGroup->flipLockEnabled; +} + +NvBool nvEvoIsConsoleActive(const NVDevEvoRec *pDevEvo) +{ + /* + * The actual console state can be known only after allocation of core + * channel, if core channel is not allocated yet then assume that console + * is active. + */ + if (pDevEvo->core == NULL) { + return TRUE; + } + + /* + * If (pDevEvo->modesetOwner == NULL) that means either the vbios + * console or the NVKMS console might be active. + * + * If (pDevEvo->modesetOwner != NULL) but + * pDevEvo->modesetOwnerOrSubOwnerChanged is TRUE, that means the modeset + * ownership is grabbed by the external client but it hasn't + * performed any modeset and the console might still be active. + */ + if ((pDevEvo->modesetOwner == NULL) || pDevEvo->modesetOwnerOrSubOwnerChanged) { + NvU32 sd; + const NVDispEvoRec *pDispEvo; + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + if (nvGetActiveHeadMask(pDispEvo) != 0x0) { + return TRUE; + } + } + } + + return FALSE; +} + diff --git a/src/nvidia-modeset/src/nvkms-evo1.c b/src/nvidia-modeset/src/nvkms-evo1.c new file mode 100644 index 0000000..3054769 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-evo1.c @@ -0,0 +1,809 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file contains implementations of the EVO HAL methods for display class + * 1.x, found in the Tesla and Fermi 1 (GF10x) chips. + */ + +#include "nvkms-types.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "nvkms-evo1.h" +#include "nvkms-prealloc.h" +#include "nvkms-utils.h" + +#include "hdmi_spec.h" + +#include // NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS + +/*! + * Initialize head-independent IMP param fields. + * + * Initializes an NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS structure. + * IMP users should call this once, followed by per-head calls to + * AssignPerHeadImpParams(). + * + * \param pImp[in] A pointer to a param structure. + */ +static void InitImpParams(NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS *pImp) +{ + int i; + + nvkms_memset(pImp, 0, sizeof(*pImp)); + + /* Initialize to not possible. */ + pImp->IsPossible = NV5070_CTRL_CMD_IS_MODE_POSSIBLE_IS_POSSIBLE_NO; + + /* Set all heads to inactive. */ + for (i = 0; i < NV5070_CTRL_CMD_MAX_HEADS; i++) { + pImp->Head[i].HeadActive = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_HEAD_ACTIVE_NO; + } + + /* Set all ORs to no owner. */ + for (i = 0; i < NV5070_CTRL_CMD_MAX_DACS; i++) { + pImp->Dac[i].owner = NV5070_CTRL_CMD_OR_OWNER_NONE; + } + + pImp->bUseSorOwnerMask = TRUE; + for (i = 0; i < NV5070_CTRL_CMD_MAX_SORS; i++) { + pImp->Sor[i].ownerMask = NV5070_CTRL_CMD_SOR_OWNER_MASK_NONE; + } + + for (i = 0; i < NV5070_CTRL_CMD_MAX_PIORS; i++) { + pImp->Pior[i].owner = NV5070_CTRL_CMD_OR_OWNER_NONE; + } +} + +/*! + * Initialize head-specific IMP param fields. + * + * Initialize the portion of the NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS + * structure that applies to a specific head, and the OR driven by + * that head. + * + * The param structure should be initialized by InitImpParams() + * before calling this per-head function. + * + * \param[out] pImp The param structure to initialize. + * \param[in] pTimings The rastering timings and viewport configuration. + * \param[in] pUsage The usage bounds that will be used for this head. + * \param[in] head The number of the head that will be driven. + * \param[in] orNumber The number of the OR driven by the head. + * \param[in] orType The type of the OR driven by the head. + */ +static void AssignPerHeadImpParams(const NVDevEvoRec *pDevEvo, + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS *pImp, + const NVHwModeTimingsEvo *pTimings, + const enum nvKmsPixelDepth pixelDepth, + const struct NvKmsUsageBounds *pUsage, + const int head, + const int orNumber, + const int orType) +{ + const NVHwModeViewPortEvo *pViewPort = &pTimings->viewPort; + NvU64 overlayFormats = 0; + NvU32 protocol; + + nvkms_memset(&pImp->Head[head], 0, sizeof(pImp->Head[head])); + + nvAssert(head < NV5070_CTRL_CMD_MAX_HEADS); + pImp->Head[head].HeadActive = TRUE; + + nvAssert(orType == NV0073_CTRL_SPECIFIC_OR_TYPE_NONE || + orNumber != NV_INVALID_OR); + + /* raster timings */ + + pImp->Head[head].PixelClock.Frequency = pTimings->pixelClock; + + pImp->Head[head].PixelClock.Adj1000Div1001 = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PIXEL_CLOCK_ADJ1000DIV1001_NO; + + pImp->Head[head].RasterSize.Width = pTimings->rasterSize.x; + pImp->Head[head].RasterSize.Height = pTimings->rasterSize.y; + pImp->Head[head].RasterBlankStart.X = pTimings->rasterBlankStart.x; + pImp->Head[head].RasterBlankStart.Y = pTimings->rasterBlankStart.y; + pImp->Head[head].RasterBlankEnd.X = pTimings->rasterBlankEnd.x; + pImp->Head[head].RasterBlankEnd.Y = pTimings->rasterBlankEnd.y; + pImp->Head[head].RasterVertBlank2.YStart = pTimings->rasterVertBlank2Start; + pImp->Head[head].RasterVertBlank2.YEnd = pTimings->rasterVertBlank2End; + pImp->Head[head].Control.Structure = + pTimings->interlaced ? + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_CONTROL_STRUCTURE_INTERLACED : + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_CONTROL_STRUCTURE_PROGRESSIVE; + + if (orType == NV0073_CTRL_SPECIFIC_OR_TYPE_DAC) { + nvAssert(orNumber < ARRAY_LEN(pImp->Dac)); + nvAssert(pImp->Dac[orNumber].owner == NV5070_CTRL_CMD_OR_OWNER_NONE); + pImp->Dac[orNumber].owner = NV5070_CTRL_CMD_OR_OWNER_HEAD(head); + nvAssert(pTimings->protocol == NVKMS_PROTOCOL_DAC_RGB); + pImp->Dac[orNumber].protocol = NV5070_CTRL_CMD_DAC_PROTOCOL_RGB_CRT; + } else if (orType == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + nvAssert(orNumber < ARRAY_LEN(pImp->Sor)); + pImp->Sor[orNumber].ownerMask |= NV5070_CTRL_CMD_SOR_OWNER_MASK_HEAD(head); + switch (pTimings->protocol) { + default: + nvAssert(!"Unknown protocol"); + /* fall through */ + case NVKMS_PROTOCOL_SOR_LVDS_CUSTOM: + protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_LVDS_CUSTOM; + break; + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A: + protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_SINGLE_TMDS_A; + break; + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B: + protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_SINGLE_TMDS_B; + break; + case NVKMS_PROTOCOL_SOR_DUAL_TMDS: + protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_DUAL_TMDS; + break; + case NVKMS_PROTOCOL_SOR_DP_A: + protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_DP_A; + break; + case NVKMS_PROTOCOL_SOR_DP_B: + protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_DP_B; + break; + } + pImp->Sor[orNumber].protocol = protocol; + pImp->Sor[orNumber].pixelReplicateMode = + NV5070_CTRL_IS_MODE_POSSIBLE_PIXEL_REPLICATE_MODE_OFF; + } else if (orType == NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR) { + nvAssert(orNumber < ARRAY_LEN(pImp->Pior)); + nvAssert(pImp->Pior[orNumber].owner == NV5070_CTRL_CMD_OR_OWNER_NONE); + pImp->Pior[orNumber].owner = NV5070_CTRL_CMD_OR_OWNER_HEAD(head); + switch (pTimings->protocol) { + default: + nvAssert(!"Unknown protocol"); + /* fall through */ + case NVKMS_PROTOCOL_PIOR_EXT_TMDS_ENC: + protocol = NV5070_CTRL_CMD_PIOR_PROTOCOL_EXT_TMDS_ENC; + break; + } + pImp->Pior[orNumber].protocol = protocol; + } else { + nvAssert(orType == NV0073_CTRL_SPECIFIC_OR_TYPE_NONE); + } + + /* viewport out */ + + pImp->Head[head].OutputScaler.VerticalTaps = + NVEvoScalerTapsToNum(pViewPort->vTaps); + + pImp->Head[head].OutputScaler.HorizontalTaps = + NVEvoScalerTapsToNum(pViewPort->hTaps); + + pImp->Head[head].ViewportSizeOut.Width = pViewPort->out.width; + pImp->Head[head].ViewportSizeOut.Height = pViewPort->out.height; + + pImp->Head[head].ViewportSizeOutMin.Width = + pImp->Head[head].ViewportSizeOut.Width; + + pImp->Head[head].ViewportSizeOutMin.Height = + pImp->Head[head].ViewportSizeOut.Height; + + pImp->Head[head].ViewportSizeOutMax.Width = + pImp->Head[head].ViewportSizeOut.Width; + + pImp->Head[head].ViewportSizeOutMax.Height = + pImp->Head[head].ViewportSizeOut.Height; + + /* viewport in */ + + pImp->Head[head].ViewportSizeIn.Width = pViewPort->in.width; + pImp->Head[head].ViewportSizeIn.Height = pViewPort->in.height; + + /* + * The actual format doesn't really matter, since RM just + * converts it back to bits per pixel for its IMP calculation anyway. The + * hardware doesn't have a "usage bound" for core -- changing the format + * of the core surface will always incur a supervisor interrupt and rerun + * IMP (XXX if we change the core surface as part of a flip to one of a + * different depth, should we force the pre/post IMP update path?). + * + * EVO2 hal uses surfaces of the same format in the core and base channels, + * see needToReprogramCoreSurface() in nvkms-evo2.c. + */ + if (pUsage->layer[NVKMS_MAIN_LAYER].usable) { + if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP) { + pImp->Head[head].Params.Format = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_RF16_GF16_BF16_AF16; + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) { + pImp->Head[head].Params.Format = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A8R8G8B8; + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) { + pImp->Head[head].Params.Format = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_R5G6B5; + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP) { + pImp->Head[head].Params.Format = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_I8; + } else { /* default to RGB 4BPP */ + nvAssert(!"Unknown core format"); + pImp->Head[head].Params.Format = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A8R8G8B8; + } + } else { + pImp->Head[head].Params.Format = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A8R8G8B8; + } + + pImp->Head[head].Params.SuperSample = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_SUPER_SAMPLE_X1AA; + + /* base usage bounds */ + + if (pUsage->layer[NVKMS_MAIN_LAYER].usable) { + pImp->Head[head].BaseUsageBounds.Usable = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_USABLE_YES; + + if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP) { + pImp->Head[head].BaseUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_64; + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) { + pImp->Head[head].BaseUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_32; + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) { + pImp->Head[head].BaseUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_16; + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP) { + pImp->Head[head].BaseUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_8; + } else { /* default to RGB 8BPP */ + nvAssert(!"Unknown base channel usage bound format"); + pImp->Head[head].BaseUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_64; + } + + pImp->Head[head].BaseUsageBounds.SuperSample = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_SUPER_SAMPLE_X1AA; + } else { + pImp->Head[head].BaseUsageBounds.Usable = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_USABLE_NO; + } + + /* overlay usage bounds */ + + pImp->Head[head].OverlayUsageBounds.Usable = + pUsage->layer[NVKMS_OVERLAY_LAYER].usable + ? NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_USABLE_YES + : NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_USABLE_NO; + + overlayFormats = pUsage->layer[NVKMS_OVERLAY_LAYER].usable ? + pUsage->layer[NVKMS_OVERLAY_LAYER].supportedSurfaceMemoryFormats : + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP; + + if (overlayFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) { + pImp->Head[head].OverlayUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_32; + } else if (overlayFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) { + pImp->Head[head].OverlayUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_16; + } else { + nvAssert(!"Unknown overlay channel usage bound format"); + pImp->Head[head].OverlayUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_32; + } + + /* pixel depth */ + + switch (pixelDepth) { + case NVKMS_PIXEL_DEPTH_18_444: + pImp->Head[head].outputResourcePixelDepthBPP = + NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444; + break; + case NVKMS_PIXEL_DEPTH_24_444: + pImp->Head[head].outputResourcePixelDepthBPP = + NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444; + break; + case NVKMS_PIXEL_DEPTH_30_444: + pImp->Head[head].outputResourcePixelDepthBPP = + NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444; + break; + case NVKMS_PIXEL_DEPTH_16_422: + pImp->Head[head].outputResourcePixelDepthBPP = + NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422; + break; + case NVKMS_PIXEL_DEPTH_20_422: + pImp->Head[head].outputResourcePixelDepthBPP = + NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422; + break; + } +} + +void nvEvo1IsModePossible(NVDispEvoPtr pDispEvo, + const NVEvoIsModePossibleDispInput *pInput, + NVEvoIsModePossibleDispOutput *pOutput) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS *pImp = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_IMP_PARAMS, sizeof(*pImp)); + NvBool result = FALSE; + NvU32 head; + NvU32 ret; + + InitImpParams(pImp); + + pImp->RequestedOperation = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_QUERY; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if (pInput->head[head].pTimings == NULL) { + continue; + } + + AssignPerHeadImpParams(pDevEvo, pImp, + pInput->head[head].pTimings, + pInput->head[head].pixelDepth, + pInput->head[head].pUsage, + head, + pInput->head[head].orIndex, + pInput->head[head].orType); + } + + pImp->base.subdeviceIndex = pDispEvo->displayOwner; + + if (pInput->requireBootClocks) { + // XXX TODO: IMP requires lock pin information if pstate information is + // requested. For now, just assume no locking. + pImp->MinPState = NV5070_CTRL_IS_MODE_POSSIBLE_NEED_MIN_PSTATE; + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + pImp->Head[head].displayId[0] = pInput->head[head].displayId; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_IS_MODE_POSSIBLE, + pImp, sizeof(*pImp)); + + if (ret != NV_OK || !pImp->IsPossible || + (pInput->requireBootClocks && + // P8 = "boot clocks" + (pImp->MinPState < NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P8 && + // XXX TODO: With PStates 3.0, only a "v-pstate" is returned in + // impParams.minPerfLevel. We need to correlate that with "boot + // clocks" somehow. + pImp->MinPState != NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_UNDEFINED))) { + goto done; + } + + result = TRUE; + +done: + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_IMP_PARAMS); + pOutput->possible = result; +} + +void nvEvo1PrePostIMP(NVDispEvoPtr pDispEvo, NvBool isPre) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS *pImp = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_IMP_PARAMS, sizeof(*pImp)); + NvU32 ret; + + if (isPre) { + /* + * Sync the core channel for pre-modeset IMP to ensure that the state + * cache reflects all of the methods we've pushed + */ + ret = nvRMSyncEvoChannel(pDevEvo, pDevEvo->core, __LINE__); + if (!ret) { + nvAssert(!"nvRMSyncEvoChannel failed during PreModesetIMP"); + } + } + + nvkms_memset(pImp, 0, sizeof(*pImp)); + + pImp->RequestedOperation = isPre ? + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_PRE_MODESET_USE_SC : + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_POST_MODESET_USE_SC; + + pImp->base.subdeviceIndex = pDispEvo->displayOwner; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_IS_MODE_POSSIBLE, + pImp, sizeof(*pImp)); + if ((ret != NVOS_STATUS_SUCCESS) || !pImp->IsPossible) { + nvAssert(!"NV5070_CTRL_CMD_IS_MODE_POSSIBLE failed"); + } + + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_IMP_PARAMS); +} + +/*! + * Return the value to use for HEAD_SET_STORAGE_PITCH. + * + * Per dispClass_02.mfs, the HEAD_SET_STORAGE_PITCH "units are blocks + * if the layout is BLOCKLINEAR, the units are multiples of 256 bytes + * if the layout is PITCH." + * + * \return Returns 0 if the pitch is invalid. Otherwise returns the + * HEAD_SET_STORAGE_PITCH value. + */ +NvU32 nvEvoGetHeadSetStoragePitchValue(const NVDevEvoRec *pDevEvo, + enum NvKmsSurfaceMemoryLayout layout, + NvU32 pitch) +{ + if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + /* pitch is already in units of blocks; nothing else needed. */ + } else { + /* pitch is in units of bytes, and must be aligned to 0x100. */ + if ((pitch & 0xFF) != 0) { + return 0; + } + + pitch >>= 8; + } + + if (pitch > pDevEvo->caps.maxPitchValue) { + return 0; + } + + return pitch; +} + +static NvBool GetChannelState(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvU32 *result) +{ + NV5070_CTRL_CMD_GET_CHANNEL_INFO_PARAMS info = { }; + NvU32 ret; + + info.base.subdeviceIndex = sd; + info.channelClass = pChan->hwclass; + info.channelInstance = pChan->instance; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_GET_CHANNEL_INFO, + &info, sizeof(info)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to query display engine channel state: 0x%08x:%d:%d:0x%08x", + pChan->hwclass, pChan->instance, sd, ret); + return FALSE; + } + + *result = info.channelState; + + return TRUE; +} + +NvBool nvEvo1IsChannelIdle(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvBool *result) +{ + NvU32 channelState; + + if (!GetChannelState(pDevEvo, pChan, sd, &channelState)) { + return FALSE; + } + + *result = (channelState == NV5070_CTRL_GET_CHANNEL_INFO_STATE_IDLE); + + return TRUE; +} + +/* + * Result is false if an EVO channel is either one of NO_METHOD_PENDING or + * UNCONNECTED, true o.w. + * + * NO_METHOD_PENDING is a mask for EMPTY | WRTIDLE | IDLE. + * + * If NVKMS hasn't grabbed the channel, it can be seen as UNCONNECTED. + */ +NvBool nvEvo1IsChannelMethodPending(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvBool *result) +{ + NvU32 channelState; + + if (!GetChannelState(pDevEvo, pChan, sd, &channelState)) { + return FALSE; + } + + *result = !(channelState & + (NV5070_CTRL_GET_CHANNEL_INFO_STATE_NO_METHOD_PENDING | + NV5070_CTRL_GET_CHANNEL_INFO_STATE_UNCONNECTED)); + + return TRUE; +} + +void nvEvo1SetDscParams(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVDscInfoEvoRec *pDscInfo, + const enum nvKmsPixelDepth pixelDepth) +{ + nvAssert(pDscInfo->type == NV_DSC_INFO_EVO_TYPE_DISABLED); +} + +/* + * The 'type' the timing library writes into the NVT_INFOFRAME_HEADER + * structure is not the same as the protocol values that hardware + * expects to see in the real packet header; those are defined in the + * HDMI_PACKET_TYPE enums (hdmi_pktType_*) from hdmi_spec.h; use those + * to fill in the first byte of the packet. + */ +NvBool nvEvo1NvtToHdmiInfoFramePacketType(const NvU32 srcType, NvU8 *pDstType) +{ + NvU8 hdmiPacketType; + + switch (srcType) { + default: + nvAssert(!"unsupported packet type"); + return FALSE; + case NVT_INFOFRAME_TYPE_EXTENDED_METADATA_PACKET: + hdmiPacketType = hdmi_pktType_ExtendedMetadata; + break; + case NVT_INFOFRAME_TYPE_VIDEO: + hdmiPacketType = hdmi_pktType_AviInfoFrame; + break; + case NVT_INFOFRAME_TYPE_VENDOR_SPECIFIC: + hdmiPacketType = hdmi_pktType_VendorSpecInfoFrame; + break; + case NVT_INFOFRAME_TYPE_DYNAMIC_RANGE_MASTERING: + hdmiPacketType = hdmi_pktType_DynamicRangeMasteringInfoFrame; + break; + } + + *pDstType = hdmiPacketType; + + return TRUE; +} + +static NVHDMIPKT_TC EvoInfoFrameToHdmiLibTransmitCtrl( + NvEvoInfoFrameTransmitControl src, + NvBool needChecksum) +{ + NVHDMIPKT_TC hdmiLibTransmitCtrl = + NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_EVERY_FRAME; + + switch (src) { + case NV_EVO_INFOFRAME_TRANSMIT_CONTROL_SINGLE_FRAME: + hdmiLibTransmitCtrl = NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_SINGLE_FRAME; + break; + case NV_EVO_INFOFRAME_TRANSMIT_CONTROL_EVERY_FRAME: + hdmiLibTransmitCtrl = NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_EVERY_FRAME; + break; + } + + if (!needChecksum) { + hdmiLibTransmitCtrl &= + ~DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _CHKSUM_HW, _EN); + } + + return hdmiLibTransmitCtrl; +} + +void nvEvo1SendHdmiInfoFrame(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvEvoInfoFrameTransmitControl transmitCtrl, + const NVT_INFOFRAME_HEADER *pInfoFrameHeader, + const NvU32 infoframeSize, + NvBool needChecksum) +{ + NVHDMIPKT_TC hdmiLibTransmitCtrl = + EvoInfoFrameToHdmiLibTransmitCtrl(transmitCtrl, needChecksum); + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVHDMIPKT_TYPE hdmiLibType; + NVHDMIPKT_RESULT ret; + NvU8 *infoframe = NULL; + NvU8 hdmiPacketType, checksum; + NvU32 i; + const NvU8 *pPayload; + size_t headerSize; + + /* + * The 'type' the timing library is not the type that the HDMI + * library expects to see in its NvHdmiPkt_PacketWrite call; those + * are NVHDMIPKT_TYPE_*. Determine both below. + */ + switch (pInfoFrameHeader->type) { + default: + nvAssert(0); + return; + case NVT_INFOFRAME_TYPE_EXTENDED_METADATA_PACKET: + hdmiLibType = NVHDMIPKT_TYPE_GENERIC; + break; + case NVT_INFOFRAME_TYPE_VIDEO: + hdmiLibType = NVHDMIPKT_TYPE_AVI_INFOFRAME; + break; + case NVT_INFOFRAME_TYPE_VENDOR_SPECIFIC: + hdmiLibType = NVHDMIPKT_TYPE_VENDOR_SPECIFIC_INFOFRAME; + break; + case NVT_INFOFRAME_TYPE_DYNAMIC_RANGE_MASTERING: + hdmiLibType = NVHDMIPKT_TYPE_VENDOR_SPECIFIC_INFOFRAME; + break; + } + + if (!nvEvo1NvtToHdmiInfoFramePacketType(pInfoFrameHeader->type, + &hdmiPacketType)) { + return; + } + + /* + * These structures are weird. The NVT_VIDEO_INFOFRAME, + * NVT_VENDOR_SPECIFIC_INFOFRAME, NVT_EXTENDED_METADATA_PACKET_INFOFRAME, + * etc structures are *kind of* what we want to send to the hdmipkt library, + * except the type in the header is different, and a single checksum byte + * may need to be inserted *between* the header and the payload (requiring + * us to allocate a buffer one byte larger). + */ + infoframe = nvAlloc(infoframeSize + (needChecksum ? sizeof(checksum) : 0)); + if (infoframe == NULL) { + return; + } + + /* + * The fields and size of NVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER + * match with those of NVT_INFOFRAME_HEADER at the time of writing, but + * nvtiming.h declares them separately. To be safe, special case + * NVT_INFOFRAME_TYPE_EXTENDED_METADATA_PACKET. + */ + if (pInfoFrameHeader->type == NVT_INFOFRAME_TYPE_EXTENDED_METADATA_PACKET) { + const NVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER *pExtMetadataHeader = + (const NVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER *) pInfoFrameHeader; + + pPayload = (const NvU8 *)(pExtMetadataHeader + 1); + headerSize = sizeof(NVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER); + } else { + pPayload = (const NvU8 *)(pInfoFrameHeader + 1); + headerSize = sizeof(NVT_INFOFRAME_HEADER); + } + + infoframe[0] = hdmiPacketType; + nvkms_memcpy(&infoframe[1], &((const NvU8*) pInfoFrameHeader)[1], + headerSize - 1); + + /* copy the payload, starting after the 3-byte header and checksum */ + nvkms_memcpy(&infoframe[headerSize + (needChecksum ? sizeof(checksum) : 0)], + pPayload, infoframeSize - headerSize /* payload size */); + + /* + * XXX Redundant since needsChecksum implies + * _HDMI_PKT_TRANSMIT_CTRL_CHKSUM_HW_EN via + * EvoInfoFrameToHdmiLibTransmitCtrl()? + */ + if (needChecksum) { + /* PB0: checksum */ + checksum = 0; + infoframe[headerSize] = 0; + for (i = 0; i < infoframeSize + sizeof(checksum); i++) { + checksum += infoframe[i]; + } + infoframe[headerSize] = ~checksum + 1; + } + + ret = NvHdmiPkt_PacketWrite(pDevEvo->hdmiLib.handle, + pDispEvo->displayOwner, + pHeadState->activeRmId, + head, + hdmiLibType, + hdmiLibTransmitCtrl, + infoframeSize, + infoframe); + + if (ret != NVHDMIPKT_SUCCESS) { + nvAssert(ret == NVHDMIPKT_SUCCESS); + } + + nvFree(infoframe); +} + +void nvEvo1DisableHdmiInfoFrame(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvU8 nvtInfoFrameType) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS params = { 0 }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU8 hdmiPacketType; + NvU32 ret; + + if (!nvEvo1NvtToHdmiInfoFramePacketType(nvtInfoFrameType, + &hdmiPacketType)) { + return; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = pHeadState->activeRmId; + params.type = hdmiPacketType; + params.transmitControl = DRF_DEF(0073_CTRL_SPECIFIC, + _SET_OD_PACKET_CTRL_TRANSMIT_CONTROL, _ENABLE, _NO); + + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET_CTRL, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET_CTRL failed"); + } +} + +void nvEvo1SendDpInfoFrameSdp(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvEvoInfoFrameTransmitControl transmitCtrl, + const DPSDP_DESCRIPTOR *sdp) +{ + NvU32 ret; + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS params = { + .subDeviceInstance = pDispEvo->displayOwner, + .displayId = pHeadState->activeRmId, + }; + + switch (transmitCtrl) { + case NV_EVO_INFOFRAME_TRANSMIT_CONTROL_EVERY_FRAME: + params.transmitControl = + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, + _SINGLE_FRAME, _DISABLE); + break; + case NV_EVO_INFOFRAME_TRANSMIT_CONTROL_SINGLE_FRAME: + params.transmitControl = + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, + _SINGLE_FRAME, _ENABLE); + break; + } + + params.transmitControl |= + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _OTHER_FRAME, _DISABLE) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _ENABLE, _YES) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _GEN_INFOFRAME_MODE, _INFOFRAME1) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _ON_HBLANK, _DISABLE); + + nvAssert((sizeof(sdp->hb) + sdp->dataSize) <= sizeof(params.aPacket)); + + params.packetSize = NV_MIN((sizeof(sdp->hb) + sdp->dataSize), + sizeof(params.aPacket)); + nvkms_memcpy(params.aPacket, &sdp->hb, params.packetSize); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET failed"); + } +} diff --git a/src/nvidia-modeset/src/nvkms-evo2.c b/src/nvidia-modeset/src/nvkms-evo2.c new file mode 100644 index 0000000..5ca3f5d --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-evo2.c @@ -0,0 +1,4200 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file contains implementations of the EVO HAL methods for display class + * 2.x. + */ + +#include "nvkms-dma.h" +#include "nvkms-types.h" +#include "nvkms-rmapi.h" +#include "nvkms-surface.h" + +#include "nvkms-evo.h" +#include "nvkms-evo1.h" +#include "nvkms-ctxdma.h" + +#include + +#include // NV5070_NOTIFICATION_STATUS + +#include // NV917C_BASE_CHANNEL_DMA +#include // GK104DispOverlayImmControlPio +#include // NV917E_OVERLAY_CHANNEL_DMA +#include // NV917C_SET_SPARE_{PRE,POST}_UPDATE_TRAP + +#include // NV917D_CORE_CHANNEL_DMA +#include // NV917D_NOTIFIER_CRC +#include // NV927D_CORE_CHANNEL_DMA +#include // NV977D_CORE_CHANNEL_DMA +#include // NV947D_CORE_CHANNEL_DMA +#include + +#include // NV5070_CTRL_CMD_STOP_BASE_PARAMS + +ct_assert(NV_EVO_LOCK_PIN_0 > + NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1); +ct_assert(NV_EVO_LOCK_PIN_0 > + NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1); + +/** Number of CRCs supported by hardware on NV917D hardware (Comp and SF/SOR) */ +#define NV_EVO2_NUM_CRC_FIELDS 2 + +/** Flags read from CRCNotifier on NV917D hardware (Comp, SF/SOR Ovf and count) */ +#define NV_EVO2_NUM_CRC_FLAGS 3 + +#define NV_EVO2_SUPPORTED_DITHERING_MODES \ + ((1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_AUTO) | \ + (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_DYNAMIC_2X2) | \ + (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_STATIC_2X2) | \ + (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_TEMPORAL)) + +#define NV_EVO2_SUPPORTED_CURSOR_COMP_BLEND_MODES \ + ((1 << NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA)) + +static void +EvoSetCursorImage(NVDevEvoPtr pDevEvo, + const int head, + const NVSurfaceEvoRec *pSurfaceEvo, + NVEvoUpdateState *updateState, + const struct NvKmsCompositionParams *pCursorCompParams); + +static void +EvoPushUpdateComposition(NVDevEvoPtr pDevEvo, + const int head, + const NVFlipChannelEvoHwState *pBaseHwState, + const NVFlipChannelEvoHwState *pOverlayHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition); + +static void InitChannelCaps90(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel) +{ + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_OVERLAY_ALL) != 0) { + static const NVEvoChannelCaps OverlayCaps = { + /* + * Overlay supports timestamp flips on class 9x7e, but error checks + * that it doesn't exceed 61 bits. + */ + .validTimeStampBits = 61, + /* Overlay does not support tearing/immediate flips. */ + .tearingFlips = FALSE, + .vrrTearingFlips = FALSE, + /* Overlay does not support per-eye stereo flips. */ + .perEyeStereoFlips = FALSE, + }; + + pChannel->caps = OverlayCaps; + } + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL) != 0) { + static const NVEvoChannelCaps BaseCaps = { + /* + * Base supports timestamp flips on class 9x7c, but error checks + * that it doesn't exceed 61 bits. + */ + .validTimeStampBits = 61, + /* Base supports tearing/immediate flips. */ + .tearingFlips = TRUE, + /* Base supports VRR tearing flips. */ + .vrrTearingFlips = TRUE, + /* Base supports per-eye stereo flips. */ + .perEyeStereoFlips = TRUE, + }; + + pChannel->caps = BaseCaps; + } +} + +static void EvoInitChannel90(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + const NvBool isCore = + FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + pChannel->channelMask); + + InitChannelCaps90(pDevEvo, pChannel); + + /* Set up core channel state. */ + if (isCore) { + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_DEFAULT_BASE_COLOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_DEFAULT_BASE_COLOR, _RED, 0) | + DRF_NUM(917D, _HEAD_SET_DEFAULT_BASE_COLOR, _GREEN, 0) | + DRF_NUM(917D, _HEAD_SET_DEFAULT_BASE_COLOR, _BLUE, 0)); + } + } + + /* Set up base channel state. */ + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL) != 0) { + NvU32 head = NV_EVO_CHANNEL_MASK_BASE_HEAD_NUMBER(pChannel->channelMask); + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + pDevEvo->pSubDevices[sd]->baseComp[head].initialized = FALSE; + } + + // For now we only support USE_CORE_LUT mode, but sending this method every + // flip causes an error check to fire for tearing flips even if the LUT mode + // isn't changing. So instead, program it here. ApplyBaseFlipOverrides() + // will force the first flip to be non-tearing. + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_BASE_LUT_LO, 1); + nvDmaSetEvoMethodData(pChannel, DRF_DEF(917C, _SET_BASE_LUT_LO, _ENABLE, + _USE_CORE_LUT)); + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_OUTPUT_LUT_LO, 1); + nvDmaSetEvoMethodData(pChannel, DRF_DEF(917C, _SET_OUTPUT_LUT_LO, _ENABLE, + _USE_CORE_LUT)); + } + + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_OVERLAY_ALL) != 0) { + NvU32 head = NV_EVO_CHANNEL_MASK_OVERLAY_HEAD_NUMBER(pChannel->channelMask); + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + pDevEvo->pSubDevices[sd]->overlayComp[head].initialized = FALSE; + } + } +} + +static void EvoInitWindowMapping90(const NVDispEvoRec *pDispEvo, + NVEvoModesetUpdateState *pModesetUpdateState) +{ + /* Fixed window mapping on EVO 2 -- nothing to do. */ +} + +/* + * These values are the same between all overlay + * (7E_SURFACE_SET_PARAMS_FORMAT_) EVO classes. + * + * Return 0 in the case of an unrecognized NvKmsSurfaceMemoryFormat. + */ +static NvU32 EvoOverlayFormatFromKmsFormat91(enum NvKmsSurfaceMemoryFormat format) +{ + switch (format) { + case NvKmsSurfaceMemoryFormatI8: + return 0; + case NvKmsSurfaceMemoryFormatR5G6B5: + return 0; + case NvKmsSurfaceMemoryFormatA1R5G5B5: + case NvKmsSurfaceMemoryFormatX1R5G5B5: + return NV917E_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5; + case NvKmsSurfaceMemoryFormatA8R8G8B8: + case NvKmsSurfaceMemoryFormatX8R8G8B8: + return NV917E_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8; + case NvKmsSurfaceMemoryFormatA2B10G10R10: + case NvKmsSurfaceMemoryFormatX2B10G10R10: + return NV917E_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10; + case NvKmsSurfaceMemoryFormatRF16GF16BF16AF16: + case NvKmsSurfaceMemoryFormatRF16GF16BF16XF16: + return NV917E_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16; + case NvKmsSurfaceMemoryFormatR16G16B16A16: + return NV917E_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16; + case NvKmsSurfaceMemoryFormatA8B8G8R8: + case NvKmsSurfaceMemoryFormatX8B8G8R8: + case NvKmsSurfaceMemoryFormatRF32GF32BF32AF32: + case NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422: + case NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N444: + case NvKmsSurfaceMemoryFormatY8___V8U8_N444: + case NvKmsSurfaceMemoryFormatY8___U8V8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N420: + case NvKmsSurfaceMemoryFormatY8___V8U8_N420: + case NvKmsSurfaceMemoryFormatY10___U10V10_N444: + case NvKmsSurfaceMemoryFormatY10___V10U10_N444: + case NvKmsSurfaceMemoryFormatY10___U10V10_N422: + case NvKmsSurfaceMemoryFormatY10___V10U10_N422: + case NvKmsSurfaceMemoryFormatY10___U10V10_N420: + case NvKmsSurfaceMemoryFormatY10___V10U10_N420: + case NvKmsSurfaceMemoryFormatY12___U12V12_N444: + case NvKmsSurfaceMemoryFormatY12___V12U12_N444: + case NvKmsSurfaceMemoryFormatY12___U12V12_N422: + case NvKmsSurfaceMemoryFormatY12___V12U12_N422: + case NvKmsSurfaceMemoryFormatY12___U12V12_N420: + case NvKmsSurfaceMemoryFormatY12___V12U12_N420: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N444: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N420: + return 0; + } + + return 0; +} + +static void EvoSetRasterParams90(NVDevEvoPtr pDevEvo, int head, + const NVHwModeTimingsEvo *pTimings, + const NVEvoColorRec *pOverscanColor, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 overscanColor = + DRF_NUM(917D, _HEAD_SET_OVERSCAN_COLOR, _RED, pOverscanColor->red) | + DRF_NUM(917D, _HEAD_SET_OVERSCAN_COLOR, _GRN, pOverscanColor->green) | + DRF_NUM(917D, _HEAD_SET_OVERSCAN_COLOR, _BLU, pOverscanColor->blue); + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + // XXX[AGP]: These methods are sequential and could use an incrementing + // method, but it's not clear if there's a bug in EVO that causes corruption + // sometimes. Play it safe and send methods with count=1. + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_OVERSCAN_COLOR(head), 1); + nvDmaSetEvoMethodData(pChannel, overscanColor); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_RASTER_SIZE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_RASTER_SIZE, _WIDTH, pTimings->rasterSize.x) | + DRF_NUM(917D, _HEAD_SET_RASTER_SIZE, _HEIGHT, pTimings->rasterSize.y)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_RASTER_SYNC_END(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_RASTER_SYNC_END, _X, pTimings->rasterSyncEnd.x) | + DRF_NUM(917D, _HEAD_SET_RASTER_SYNC_END, _Y, pTimings->rasterSyncEnd.y)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_RASTER_BLANK_END(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_RASTER_BLANK_END, _X, pTimings->rasterBlankEnd.x) | + DRF_NUM(917D, _HEAD_SET_RASTER_BLANK_END, _Y, pTimings->rasterBlankEnd.y)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_RASTER_BLANK_START(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_RASTER_BLANK_START, _X, pTimings->rasterBlankStart.x) | + DRF_NUM(917D, _HEAD_SET_RASTER_BLANK_START, _Y, pTimings->rasterBlankStart.y)); + + if (pTimings->interlaced) { + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_RASTER_VERT_BLANK2(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_RASTER_VERT_BLANK2, _YSTART, + pTimings->rasterVertBlank2Start) | + DRF_NUM(917D, _HEAD_SET_RASTER_VERT_BLANK2, _YEND, + pTimings->rasterVertBlank2End)); + } + + nvAssert((KHzToHz(pTimings->pixelClock) & + ~DRF_MASK(NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ)) == 0x0); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY, _HERTZ, + KHzToHz(pTimings->pixelClock)) | + DRF_DEF(917D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY, _ADJ1000DIV1001,_FALSE)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(917D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _MODE, _CLK_CUSTOM) | + DRF_DEF(917D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _NOT_DRIVER, _FALSE) | + DRF_DEF(917D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _ENABLE_HOPPING, _FALSE) | + DRF_DEF(917D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _HOPPING_MODE, _VBLANK)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, _HERTZ, + KHzToHz(pTimings->pixelClock)) | + DRF_DEF(917D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, _ADJ1000DIV1001,_FALSE)); +} + +/* + * Wrapper for EvoSetRasterParams90 which additionally sends the HDMI 3D + * control methods. + */ +static void EvoSetRasterParams91(NVDevEvoPtr pDevEvo, int head, + const NVHwModeTimingsEvo *pTimings, + const NvU8 tilePosition, + const NVDscInfoEvoRec *pDscInfo, + const NVEvoColorRec *pOverscanColor, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 hdmiStereoCtrl = + DRF_DEF(917D, _HEAD_SET_HDMI_CTRL, _STEREO3D_STRUCTURE, _FRAME_PACKED) | + DRF_NUM(917D, _HEAD_SET_HDMI_CTRL, _HDMI_VIC, 0); + + nvAssert(tilePosition == 0); + nvAssert(pDscInfo->type == NV_DSC_INFO_EVO_TYPE_DISABLED); + + EvoSetRasterParams90(pDevEvo, head, + pTimings, + pOverscanColor, updateState); + + if (pTimings->hdmi3D) { + hdmiStereoCtrl |= + DRF_DEF(917D, _HEAD_SET_HDMI_CTRL, _VIDEO_FORMAT, _STEREO3D); + } else { + hdmiStereoCtrl |= + DRF_DEF(917D, _HEAD_SET_HDMI_CTRL, _VIDEO_FORMAT, _NORMAL); + } + + nvDmaSetStartEvoMethod(pChannel, + NV917D_HEAD_SET_VACTIVE_SPACE_COLOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_VACTIVE_SPACE_COLOR, _RED_CR, 0) | +#if defined(DEBUG) + DRF_NUM(917D, _HEAD_SET_VACTIVE_SPACE_COLOR, _GRN_Y, 512) | +#else + DRF_NUM(917D, _HEAD_SET_VACTIVE_SPACE_COLOR, _GRN_Y, 0) | +#endif + DRF_NUM(917D, _HEAD_SET_VACTIVE_SPACE_COLOR, _BLU_CB, 0)); + + nvDmaSetStartEvoMethod(pChannel, + NV917D_HEAD_SET_HDMI_CTRL(head), 1); + nvDmaSetEvoMethodData(pChannel, hdmiStereoCtrl); +} + +static void EvoSetProcAmp97(NVDispEvoPtr pDispEvo, const NvU32 head, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NvU8 colorSpace; + NvU32 dynRange; + + /* These methods should only apply to a single pDpyEvo */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + // These NVT defines match the HEAD_SET_PROCAMP ones. + ct_assert(NVT_COLORIMETRY_RGB == NV977D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB); + ct_assert(NVT_COLORIMETRY_YUV_601 == NV977D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601); + ct_assert(NVT_COLORIMETRY_YUV_709 == NV977D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709); + ct_assert(NVT_COLOR_RANGE_FULL == NV977D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE); + ct_assert(NVT_COLOR_RANGE_LIMITED == NV977D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE); + + if (pHeadState->procAmp.colorimetry == NVT_COLORIMETRY_BT2020RGB) { + colorSpace = NV977D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB; + } else if (pHeadState->procAmp.colorimetry == NVT_COLORIMETRY_BT2020YCC) { + colorSpace = NV977D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020; + } else { + colorSpace = pHeadState->procAmp.colorimetry; + } + + if (pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_FULL) { + dynRange = DRF_DEF(977D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _VESA); + } else { + nvAssert(pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_LIMITED); + dynRange = DRF_DEF(977D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _CEA); + } + + nvDmaSetStartEvoMethod(pChannel, NV977D_HEAD_SET_PROCAMP(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(977D, _HEAD_SET_PROCAMP, _COLOR_SPACE, colorSpace) | + DRF_DEF(977D, _HEAD_SET_PROCAMP, _CHROMA_LPF, _AUTO) | + DRF_NUM(977D, _HEAD_SET_PROCAMP, _SAT_COS, + pHeadState->procAmp.satCos) | + DRF_NUM(977D, _HEAD_SET_PROCAMP, _SAT_SINE, 0) | + dynRange | + DRF_NUM(977D, _HEAD_SET_PROCAMP, _RANGE_COMPRESSION, + pHeadState->procAmp.colorRange)); +} + +static void EvoSetProcAmp90(NVDispEvoPtr pDispEvo, const NvU32 head, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NvU8 colorSpace; + NvU32 dynRange; + + /* These methods should only apply to a single pDpyEvo */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + // These NVT defines match the HEAD_SET_PROCAMP ones. + ct_assert(NVT_COLORIMETRY_RGB == NV917D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB); + ct_assert(NVT_COLORIMETRY_YUV_601 == NV917D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601); + ct_assert(NVT_COLORIMETRY_YUV_709 == NV917D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709); + ct_assert(NVT_COLOR_RANGE_FULL == NV917D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE); + ct_assert(NVT_COLOR_RANGE_LIMITED == NV917D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE); + + if (pHeadState->procAmp.colorimetry == NVT_COLORIMETRY_BT2020RGB) { + colorSpace = NV917D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB; + } else { + nvAssert(pHeadState->procAmp.colorimetry != NVT_COLORIMETRY_BT2020YCC); + colorSpace = pHeadState->procAmp.colorimetry; + } + + if (pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_FULL) { + dynRange = DRF_DEF(917D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _VESA); + } else { + nvAssert(pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_LIMITED); + dynRange = DRF_DEF(917D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _CEA); + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_PROCAMP(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_PROCAMP, _COLOR_SPACE, colorSpace) | + DRF_DEF(917D, _HEAD_SET_PROCAMP, _CHROMA_LPF, _AUTO) | + DRF_NUM(917D, _HEAD_SET_PROCAMP, _SAT_COS, + pHeadState->procAmp.satCos) | + DRF_NUM(917D, _HEAD_SET_PROCAMP, _SAT_SINE, 0) | + dynRange | + DRF_NUM(917D, _HEAD_SET_PROCAMP, _RANGE_COMPRESSION, + pHeadState->procAmp.colorRange)); +} + +static void EvoSetHeadControl90(NVDevEvoPtr pDevEvo, int sd, int head, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + /* + * NOTE: This function should only push state to the hardware based on data + * in the pHC. If not, then we may miss updates due to the memcmp of the + * HeadControl structure in UpdateEvoLockState(). + */ + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + NvU32 data = 0, pin; + NvU32 serverLockMode, clientLockMode; + + /* These methods should only apply to a single subdevice */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + switch (pHC->serverLock) { + case NV_EVO_NO_LOCK: + serverLockMode = NV917D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK; + break; + case NV_EVO_FRAME_LOCK: + serverLockMode = NV917D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK; + break; + case NV_EVO_RASTER_LOCK: + serverLockMode = NV917D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK; + break; + default: + nvAssert(!"Invalid server lock mode"); + return; + } + + switch (pHC->clientLock) { + case NV_EVO_NO_LOCK: + clientLockMode = NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK; + break; + case NV_EVO_FRAME_LOCK: + clientLockMode = NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK; + break; + case NV_EVO_RASTER_LOCK: + clientLockMode = NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK; + break; + default: + nvAssert(!"Invalid client lock mode"); + return; + } + + // Convert head control state to EVO method values. + if (pHC->interlaced) { + data |= DRF_DEF(917D, _HEAD_SET_CONTROL, _STRUCTURE, _INTERLACED); + } else { + data |= DRF_DEF(917D, _HEAD_SET_CONTROL, _STRUCTURE, _PROGRESSIVE); + } + + nvAssert(pHC->serverLockPin != NV_EVO_LOCK_PIN_ERROR); + nvAssert(pHC->clientLockPin != NV_EVO_LOCK_PIN_ERROR); + nvAssert(pHC->flipLockPin != NV_EVO_LOCK_PIN_ERROR); + + if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->serverLockPin)) { + pin = pHC->serverLockPin - NV_EVO_LOCK_PIN_INTERNAL_0; + /* + * dispClass_02.mfs says: + * "master lock pin, if internal, must be set to the corresponding + * internal pin for that head" (error check #12) + * (Note that this is only enforced when scanlock master is enabled) + */ + nvAssert(pHC->serverLock == NV_EVO_NO_LOCK || pin == head); + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _MASTER_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(pin)); + } else { + pin = pHC->serverLockPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _MASTER_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(pin)); + } + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _MASTER_LOCK_MODE, serverLockMode); + + if (clientLockMode == NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK) { + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _SLAVE_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_UNSPECIFIED); + } else if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->clientLockPin)) { + pin = pHC->clientLockPin - NV_EVO_LOCK_PIN_INTERNAL_0; + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _SLAVE_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(pin)); + } else { + pin = pHC->clientLockPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _SLAVE_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(pin)); + } + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _SLAVE_LOCK_MODE, clientLockMode); + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _SLAVE_LOCKOUT_WINDOW, + pHC->clientLockoutWindow); + + /* + * Interlaced with stereo lock mode is not supported. + * + * We always enable stereo lock when it's available and either framelock + * or rasterlock is in use. + */ + if (pHC->stereoLocked) { + nvAssert(!pHC->interlaced); + + if (pHC->serverLock != NV_EVO_NO_LOCK) { + data |= DRF_NUM(927D, _HEAD_SET_CONTROL, _MASTER_STEREO_LOCK_MODE, + NV927D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE); + } + if (pHC->clientLock != NV_EVO_NO_LOCK) { + data |= DRF_NUM(927D, _HEAD_SET_CONTROL, _SLAVE_STEREO_LOCK_MODE, + NV927D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE); + } + } + + /* + * Changing the flip lock pin induces a head shutdown. We want to avoid + * this in two cases: + * + * 1) When transitioning from the UEFI console, the flip lock pin is + * currently set to UNSPECIFIED, so we want to preserve that setting + * if possible to avoid an unnecessary flicker. + * + * 2) While framelock is enabled, we need to avoid head shutdown when + * transitioning to and from fliplock to guarantee no loss of stereo + * sync. + * + * To guarantee stereo sync while also avoiding unnecessary flicker when + * transitioning from UEFI, we'll set the flip lock pin to UNSPECIFIED + * unless fliplock, frame lock, or raster lock are enabled. Enabling + * framelock may induce one head shutdown when transitioning away from + * UNSPECIFIED, but then enabling/disabling fliplock after that will + * have no effect on the fliplock pin. + */ + if (!pHC->flipLock && + (pHC->serverLock == NV_EVO_NO_LOCK) && + (pHC->clientLock == NV_EVO_NO_LOCK)) { + + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _FLIP_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_UNSPECIFIED); + } else if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->flipLockPin)) { + pin = pHC->flipLockPin - NV_EVO_LOCK_PIN_INTERNAL_0; + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _FLIP_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK(pin)); + } else { + pin = pHC->flipLockPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _FLIP_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN(pin)); + } + if (pHC->flipLock) { + data |= DRF_DEF(917D, _HEAD_SET_CONTROL, _FLIP_LOCK, _ENABLE); + } + + nvAssert(pHC->stereoPin != NV_EVO_LOCK_PIN_ERROR); + if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->stereoPin)) { + /* + * dispClass_02.mfs says: + * "stereo pin, if internal, must be set to the corresponding internal + * pin for that head" (error check #14) + * So just ignore which pin we selected; no sense in wasting cycles + * keeping track of it + */ + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _STEREO_PIN, + NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(head)); + } else { + pin = pHC->stereoPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _STEREO_PIN, + NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(pin)); + } + + // Send the method. + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, data); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_LOCK_CHAIN(head), 1); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(917D, _HEAD_SET_LOCK_CHAIN, _POSITION, + pHC->lockChainPosition)); +} + +static void EvoSetHeadRefClk90(NVDevEvoPtr pDevEvo, int head, NvBool external, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single subdevice */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_SW_SPARE_A(head), 1); + nvDmaSetEvoMethodData(pChannel, external ? + DRF_DEF(907D, _HEAD_SET_SW_SPARE_A_CODE, _VPLL_REF, _GSYNC) : + DRF_DEF(907D, _HEAD_SET_SW_SPARE_A_CODE, _VPLL_REF, _NO_PREF)); +} + +static void EvoDACSetControl90(const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + if (headMask != 0) { + nvAssert(protocol == NVKMS_PROTOCOL_DAC_RGB); + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_DAC_SET_CONTROL(orIndex), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _DAC_SET_CONTROL, _OWNER_MASK, headMask) | + DRF_DEF(917D, _DAC_SET_CONTROL, _PROTOCOL, _RGB_CRT)); +} + +static void EvoSORSetControl90(const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 hwProtocol = 0; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + nvAssert(orIndex != NV_INVALID_OR); + + if (headMask != 0) { + switch (protocol) { + default: + nvAssert(!"unexpected protocol"); + /* fallthrough */ + case NVKMS_PROTOCOL_SOR_LVDS_CUSTOM: + hwProtocol = NV917D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM; + break; + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A: + hwProtocol = NV917D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A; + break; + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B: + hwProtocol = NV917D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B; + break; + case NVKMS_PROTOCOL_SOR_DUAL_TMDS: + hwProtocol = NV917D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS; + break; + case NVKMS_PROTOCOL_SOR_DP_A: + hwProtocol = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_A; + break; + case NVKMS_PROTOCOL_SOR_DP_B: + hwProtocol = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_B; + break; + } + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_SOR_SET_CONTROL(orIndex), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _SOR_SET_CONTROL, _OWNER_MASK, headMask) | + DRF_NUM(917D, _SOR_SET_CONTROL, _PROTOCOL, hwProtocol) | + DRF_DEF(917D, _SOR_SET_CONTROL, _DE_SYNC_POLARITY, _POSITIVE_TRUE) | + DRF_DEF(917D, _SOR_SET_CONTROL, _PIXEL_REPLICATE_MODE, _OFF)); +} + +static void EvoPIORSetControl90(const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + if (headMask != 0) { + nvAssert(protocol == NVKMS_PROTOCOL_PIOR_EXT_TMDS_ENC); + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_PIOR_SET_CONTROL(orIndex), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _PIOR_SET_CONTROL, _OWNER_MASK, headMask) | + DRF_DEF(917D, _PIOR_SET_CONTROL, _PROTOCOL, _EXT_TMDS_ENC) | + DRF_DEF(917D, _PIOR_SET_CONTROL, _DE_SYNC_POLARITY, _POSITIVE_TRUE)); +} + +static NvU32 EvoGetPixelDepth90(const enum nvKmsPixelDepth pixelDepth) +{ + switch (pixelDepth) { + case NVKMS_PIXEL_DEPTH_18_444: + return NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444; + case NVKMS_PIXEL_DEPTH_24_444: + return NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444; + case NVKMS_PIXEL_DEPTH_30_444: + return NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444; + case NVKMS_PIXEL_DEPTH_16_422: + return NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422; + case NVKMS_PIXEL_DEPTH_20_422: + return NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422; + } + nvAssert(!"Unexpected pixel depth"); + return NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444; +} + +static void EvoHeadSetControlOR90(NVDevEvoPtr pDevEvo, + const int head, + const NVHwModeTimingsEvo *pTimings, + const enum nvKmsPixelDepth pixelDepth, + const NvBool colorSpaceOverride, + NVEvoUpdateState *updateState) +{ + const NvU32 hwPixelDepth = EvoGetPixelDepth90(pixelDepth); + const NvU16 colorSpaceFlag = nvEvo1GetColorSpaceFlag(pDevEvo, + colorSpaceOverride); + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(917D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _CRC_MODE, _ACTIVE_RASTER) | + (pTimings->hSyncPol ? + DRF_DEF(917D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _HSYNC_POLARITY, _NEGATIVE_TRUE) : + DRF_DEF(917D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _HSYNC_POLARITY, _POSITIVE_TRUE)) | + (pTimings->vSyncPol ? + DRF_DEF(917D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _VSYNC_POLARITY, _NEGATIVE_TRUE) : + DRF_DEF(917D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _VSYNC_POLARITY, _POSITIVE_TRUE)) | + (colorSpaceOverride ? + (DRF_DEF(977D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_OVERRIDE, _ENABLE) | + DRF_NUM(977D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_FLAG, colorSpaceFlag)) : + DRF_DEF(977D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_OVERRIDE, _DISABLE)) | + DRF_NUM(917D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _PIXEL_DEPTH, hwPixelDepth)); +} + +static void EvoORSetControl90(NVDevEvoPtr pDevEvo, + const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask, + NVEvoUpdateState *updateState) +{ + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pDevEvo->core); + + switch (pConnectorEvo->or.type) { + case NV0073_CTRL_SPECIFIC_OR_TYPE_DAC: + EvoDACSetControl90(pConnectorEvo, protocol, orIndex, headMask); + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR: + EvoSORSetControl90(pConnectorEvo, protocol, orIndex, headMask); + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR: + EvoPIORSetControl90(pConnectorEvo, protocol, orIndex, headMask); + break; + default: + nvAssert(!"Invalid pConnectorEvo->or.type"); + break; + } +} + +static void EvoHeadSetDisplayId90(NVDevEvoPtr pDevEvo, + const NvU32 head, const NvU32 displayId, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_DISPLAY_ID(head, 0), 1); + nvDmaSetEvoMethodData(pChannel, displayId); +} + +static NvBool EvoSetUsageBounds90(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const struct NvKmsUsageBounds *pUsage, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVEvoSubDevHeadStateRec *pCurrentFlipState = + &pDevEvo->gpus[sd].headState[head]; + const struct NvKmsUsageBounds *pCurrentUsage = + &pCurrentFlipState->usage; + NvU64 overlayFormats = 0; + NvU32 baseUsage = 0, overlayUsage = 0; + const NVSurfaceEvoRec *pCurrentBaseSurf = + pCurrentFlipState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT]; + const NVSurfaceEvoRec *pCurrentOverlaySurf = + pCurrentFlipState->layer[NVKMS_OVERLAY_LAYER].pSurfaceEvo[NVKMS_LEFT]; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + if (UsageBoundsEqual(pCurrentUsage, pUsage)) { + return FALSE; + } + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* + * Make sure to interlock usage bounds update with the base and overlay + * channel updates, otherwise it ends up violating hardware error check for + * the base/overlay channel blocking. + * + * // check for blocking violations + * for (vlt_index = 0; vlt_index < NV_CHIP_DISP_TOTAL_HEADS_PRESENT_por; vlt_index++) { + * if ((wir_BlockBase[vlt_index] == TRUE) + * && (wir_BaseQuiescent[vlt_index] == FALSE) + * && ((ecv_GlobalHeadConnected[vlt_index] == TRUE) || (pri_ErrcheckWhenDisconnected == TRUE))) + * throw (vlt_index << NV_DISP_CORE_STATE_ERROR_HEAD_INDEX_SHIFT) | NV_DISP_CORE_STATE_ERROR_001; + * } + * + * for (vlt_index = 0; vlt_index < NV_CHIP_DISP_TOTAL_HEADS_PRESENT_por; vlt_index++) { + * if ((wir_BlockOverlay[vlt_index] == TRUE) + * && (wir_OverlayQuiescent[vlt_index] == FALSE) + * && ((ecv_GlobalHeadConnected[vlt_index] == TRUE) || (pri_ErrcheckWhenDisconnected == TRUE))) + * throw (vlt_index << NV_DISP_CORE_STATE_ERROR_HEAD_INDEX_SHIFT) | NV_DISP_CORE_STATE_ERROR_002; + */ + + if (pCurrentBaseSurf != NULL && + !nvEvoLayerUsageBoundsEqual(pUsage, pCurrentUsage, NVKMS_MAIN_LAYER)) { + nvUpdateUpdateState(pDevEvo, updateState, pDevEvo->base[head]); + } + + if (pCurrentOverlaySurf != NULL && + !nvEvoLayerUsageBoundsEqual(pUsage, pCurrentUsage, NVKMS_OVERLAY_LAYER)) { + nvUpdateUpdateState(pDevEvo, updateState, pDevEvo->overlay[head]); + } + + + if (pUsage->layer[NVKMS_MAIN_LAYER].usable) { + baseUsage |= DRF_DEF(917D, _HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, _USABLE, + _TRUE); + + if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP) { + baseUsage |= DRF_DEF(917D, _HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, + _PIXEL_DEPTH, _BPP_64); + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) { + baseUsage |= DRF_DEF(917D, _HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, + _PIXEL_DEPTH, _BPP_32); + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) { + baseUsage |= DRF_DEF(917D, _HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, + _PIXEL_DEPTH, _BPP_16); + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP) { + baseUsage |= DRF_DEF(917D, _HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, + _PIXEL_DEPTH, _BPP_8); + } else { + nvAssert(!"Unexpected base pixel depth"); + return FALSE; + } + + baseUsage |= DRF_DEF(917D, _HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, + _SUPER_SAMPLE, _X1_AA); + baseUsage |= DRF_DEF(917D, _HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, + _OUTPUT_LUT, _USAGE_1025); + } + + overlayUsage |= pUsage->layer[NVKMS_OVERLAY_LAYER].usable ? + DRF_DEF(917D, _HEAD_SET_OVERLAY_USAGE_BOUNDS, _USABLE, _TRUE) : + DRF_DEF(917D, _HEAD_SET_OVERLAY_USAGE_BOUNDS, _USABLE, _FALSE); + + overlayFormats = pUsage->layer[NVKMS_OVERLAY_LAYER].usable ? + pUsage->layer[NVKMS_OVERLAY_LAYER].supportedSurfaceMemoryFormats : + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP; + + if (overlayFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) { + overlayUsage |= DRF_DEF(917D, _HEAD_SET_OVERLAY_USAGE_BOUNDS, + _PIXEL_DEPTH, _BPP_32); + } else if (overlayFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) { + overlayUsage |= DRF_DEF(917D, _HEAD_SET_OVERLAY_USAGE_BOUNDS, + _PIXEL_DEPTH, _BPP_16); + } else { + nvAssert(!"Unsupported overlay depth"); + overlayUsage |= DRF_DEF(917D, _HEAD_SET_OVERLAY_USAGE_BOUNDS, + _PIXEL_DEPTH, _BPP_16); + } + + overlayUsage |= DRF_DEF(917D, _HEAD_SET_OVERLAY_USAGE_BOUNDS, + _OVERLAY_LUT, _USAGE_1025); + + nvDmaSetStartEvoMethod(pChannel, + NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(head), 2); + nvDmaSetEvoMethodData(pChannel, baseUsage); + nvDmaSetEvoMethodData(pChannel, overlayUsage); + + return TRUE; +} + +static void EvoSetNotifierMethods90(NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + NvBool notify, + NvBool awaken, + NvU32 notifier) +{ + ASSERT_DRF_NUM(917D, _SET_NOTIFIER_CONTROL, _OFFSET, notifier); + + if (notify) { + NvU32 sd; + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (nvPeekEvoSubDevMask(pDevEvo) & (1 << sd)) { + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + nvDmaSetStartEvoMethod(pChannel, + NV917D_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, + _SET_CONTEXT_DMA_NOTIFIER, + _HANDLE, + pDevEvo->core->notifiersDma[sd].surfaceDesc.ctxDmaHandle)); + nvPopEvoSubDevMask(pDevEvo); + } + } + } else { + nvDmaSetStartEvoMethod(pChannel, + NV917D_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _SET_CONTEXT_DMA_NOTIFIER, _HANDLE, 0)); + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _SET_NOTIFIER_CONTROL, _OFFSET, notifier) | + (awaken ? + DRF_DEF(917D, _SET_NOTIFIER_CONTROL, _MODE, _WRITE_AWAKEN) : + DRF_DEF(917D, _SET_NOTIFIER_CONTROL, _MODE, _WRITE)) | + (notify ? + DRF_DEF(917D, _SET_NOTIFIER_CONTROL, _NOTIFY, _ENABLE) : + DRF_DEF(917D, _SET_NOTIFIER_CONTROL, _NOTIFY, _DISABLE))); +} + +static void UpdateCore9x(NVEvoChannelPtr pChannel, + NVEvoChannelMask interlockChannelMask) +{ + NvU32 head, value = 0; + + ct_assert(NV_EVO_CHANNEL_MASK_BASE__SIZE == + NV_EVO_CHANNEL_MASK_OVERLAY__SIZE); + for (head = 0; head < NV_EVO_CHANNEL_MASK_BASE__SIZE; head++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _BASE, head, _ENABLE, + interlockChannelMask)) { + value |= DRF_IDX_DEF(917D, _UPDATE, + _INTERLOCK_WITH_BASE, head, _ENABLE); + } + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _OVERLAY, head, _ENABLE, + interlockChannelMask)) { + value |= DRF_IDX_DEF(917D, _UPDATE, + _INTERLOCK_WITH_OVERLAY, head, _ENABLE); + } + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_UPDATE, 1); + nvDmaSetEvoMethodData(pChannel, value); + + nvDmaKickoffEvo(pChannel); +} + +static void UpdateBase91(NVEvoChannelPtr pChannel, + NvBool interlockWithCore, + NvBool vrrTearing) +{ + NvU32 updateValue = 0; + NvU32 trapParam = 0; + + if (interlockWithCore) { + updateValue |= DRF_DEF(917C, _UPDATE, _INTERLOCK_WITH_CORE, _ENABLE); + } + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SPARE_PRE_UPDATE_TRAP, 1); + nvDmaSetEvoMethodData(pChannel, trapParam); + + nvDmaSetStartEvoMethod(pChannel, NV917C_UPDATE, 1); + nvDmaSetEvoMethodData(pChannel, updateValue); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SPARE_POST_UPDATE_TRAP, 1); + nvDmaSetEvoMethodData(pChannel, trapParam); + + nvDmaKickoffEvo(pChannel); +} + +static void UpdateOverlay9x(NVEvoChannelPtr pChannel, + NvBool interlockWithCore) +{ + NvU32 value = 0; + + if (interlockWithCore) { + value |= DRF_DEF(917E, _UPDATE, _INTERLOCK_WITH_CORE, _ENABLE); + } + + nvDmaSetStartEvoMethod(pChannel, NV917E_UPDATE, 1); + nvDmaSetEvoMethodData(pChannel, value); + + nvDmaKickoffEvo(pChannel); +} + +static void EvoUpdate91(NVDevEvoPtr pDevEvo, + const NVEvoUpdateState *updateState, + NvBool releaseElv) +{ + NvU32 sd; + NVEvoChannelMask fliplockedBaseChannels[NVKMS_MAX_SUBDEVICES] = { }; + NvBool updateAllFliplockedBaseChannels = FALSE; + + /* + * Multiple 'base + core channel interlocked' updates can create deadlock + * if heads are flip locked. + * + * For example - if head-0 and head-1 are flip locked and you initiate two + * 'base + core channel interlocked' updates separately for each of + * the head then that creates deadlock: + * + * + * +--------+ +--------+ +--------+ + * | BASE-0 | | CORE | | BASE-1 | + * +--------+ +--------+ +--------+ + * | | | | | | + * | | | | | | + * +--------+------+--------+ | | + * | INTERLOCKED | | | + * | UPDATE-0 | | | + * +--------+------+--------+ | | + * | Base | | Core | | | + * <...| update |<.... | Update | | | + * : | for | | for | | | + * : | head-0 | | head-0 | | | + * : +--------+------+--------+ | | + * : | | | ^ | | | + * : | | | : | | | + * : +--------+ | : | | | + * : | : | | | + * : +---(----+------+--------+ + * : | : INTERLOCKED | + * : | : UPDATE-1 | + * : +--------+------+--------+ + * V | Core | | Base | + * : | update |<.... | Update | + * : | for | | for |<... + * : | head-1 | | head-1 | : + * : +--------+------+--------+ : + * : | | | | ^ + * : +--------+ +--------+ : + * : : + * V...................>............................> + * + * ^ + * | + * | + * [ BASE-0 and BASE-1 are fliplocked ] + * + * Here you can follow the dotted arrow line and see how deadlock + * has been formed. The dotted arrow line indicates the execution + * dependency of the one update onto another, e.g. the core update + * for head-1 can't get executed unless the core update for head-0 + * gets executed. + * + * To prevent this deadlock, initiate the base channel updates for all flip + * locked heads if update state contains 'base + core channel interlocked' + * for the flip locked head. + */ + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NVEvoChannelMask updateChannelMask = updateState->subdev[sd].channelMask; + NVEvoChannelMask interlockChannelMask = + updateChannelMask & ~updateState->subdev[sd].noCoreInterlockMask; + NvU32 head; + + for (head = 0; head < NV_EVO_CHANNEL_MASK_BASE__SIZE; head++) { + NVEvoChannelMask thisMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _BASE, head, _ENABLE); + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + + if (pHC->flipLock) { + fliplockedBaseChannels[sd] |= thisMask; + } + + /* + * If this update is updating only one base channel without any core + * interlock, in that case, we don't need to also update all flip + * locked base channels. + */ + if (NV_EVO_CHANNEL_MASK_POPCOUNT(interlockChannelMask) <= 1 && + !FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + updateChannelMask)) { + continue; + } + + if ((updateChannelMask & thisMask) != 0x0 && pHC->flipLock) { + updateAllFliplockedBaseChannels = TRUE; + } + } + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NVEvoChannelMask updateChannelMask = updateState->subdev[sd].channelMask | + (updateAllFliplockedBaseChannels ? fliplockedBaseChannels[sd] : 0x0); + NVEvoChannelMask interlockChannelMask = + updateChannelMask & ~updateState->subdev[sd].noCoreInterlockMask; + NvBool interlockWithCore = FALSE; + const NvU32 subDeviceMask = (1 << sd); + NvU32 head; + + nvPushEvoSubDevMask(pDevEvo, subDeviceMask); + + if (NV_EVO_CHANNEL_MASK_POPCOUNT(interlockChannelMask) > 1) { + /* We can only interlock updates if core is included. */ + nvAssert(!FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + updateState->subdev[sd].noCoreInterlockMask)); + updateChannelMask |= DRF_DEF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE); + interlockChannelMask |= + DRF_DEF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE); + interlockWithCore = TRUE; + } + + if (FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + updateChannelMask)) { + UpdateCore9x(pDevEvo->core, updateChannelMask); + } + + for (head = 0; head < NV_EVO_CHANNEL_MASK_OVERLAY__SIZE; head++) { + NVEvoChannelMask thisMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _OVERLAY, head, _ENABLE); + if (updateChannelMask & thisMask) { + NvBool thisInterlockWithCore = interlockWithCore && + (interlockChannelMask & thisMask); + UpdateOverlay9x(pDevEvo->overlay[head], + thisInterlockWithCore); + } + } + + for (head = 0; head < NV_EVO_CHANNEL_MASK_BASE__SIZE; head++) { + NVEvoChannelMask thisMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _BASE, head, _ENABLE); + if (updateChannelMask & thisMask) { + NvBool thisInterlockWithCore = interlockWithCore && + (interlockChannelMask & thisMask); + NvBool vrrTearing = + updateState->subdev[sd].base[head].vrrTearing; + + UpdateBase91(pDevEvo->base[head], + thisInterlockWithCore, vrrTearing); + } + } + + nvPopEvoSubDevMask(pDevEvo); + } +} + +static void EvoSetNotifier90(NVDevEvoRec *pDevEvo, + const NvBool notify, + const NvBool awaken, + const NvU32 notifier, + NVEvoUpdateState *updateState) +{ + /* These methods should only apply to a single subdevice */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pDevEvo->core); + + EvoSetNotifierMethods90(pDevEvo, pDevEvo->core, notify, awaken, notifier); +} + +/* + * Returns the data for the SET_STORAGE method. The method data + * format is the same between classes 90[CDE]. + */ +static NvU32 EvoComputeSetStorage90(const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo) +{ + NvU32 setStorage; + + NvU32 pitch = nvEvoGetHeadSetStoragePitchValue( + pDevEvo, + pSurfaceEvo->layout, + pSurfaceEvo->planes[0].pitch); + nvAssert(pitch != 0); + + if (pSurfaceEvo->layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + // 1 block = 1 * X Gobs; 1 Gob = 64B * 4Lines; X = 1 << + // blockHeightLog2Gobs + NvU32 blockHeight = pSurfaceEvo->log2GobsPerBlockY; + + setStorage = DRF_NUM(917D, _HEAD_SET_STORAGE, _BLOCK_HEIGHT, blockHeight) | + DRF_DEF(917D, _HEAD_SET_STORAGE, _MEMORY_LAYOUT, _BLOCKLINEAR); + } else { + setStorage = DRF_DEF(917D, _HEAD_SET_STORAGE, _MEMORY_LAYOUT, _PITCH); + } + + ASSERT_DRF_NUM(917D, _HEAD_SET_STORAGE, _PITCH, pitch); + setStorage |= DRF_NUM(917D, _HEAD_SET_STORAGE, _PITCH, pitch); + + return setStorage; +} + +static void SetCscMatrix(NVEvoChannelPtr pChannel, NvU32 method, + const struct NvKmsCscMatrix *matrix, + NvU32 extraFirstWordBits) +{ + int y; + + // The _COEFF fields are the same across all of the methods on all + // channels. + ct_assert(DRF_SHIFTMASK(NV917C_SET_CSC_RED2RED_COEFF) == + DRF_SHIFTMASK(NV917D_HEAD_SET_CSC_RED2RED_COEFF)); + ct_assert(DRF_SHIFTMASK(NV917C_SET_CSC_RED2RED_COEFF) == + DRF_SHIFTMASK(NV917E_SET_CSC_RED2RED_COEFF)); + + for (y = 0; y < 3; y++) { + int x; + + for (x = 0; x < 4; x++) { + // Use DRF_NUM to truncate client-supplied values that are out of + // range. + NvU32 val = DRF_NUM(917C, _SET_CSC_RED2RED, _COEFF, + matrix->m[y][x]); + + if (x == 0 && y == 0) { + val |= extraFirstWordBits; + } + + nvDmaSetStartEvoMethod(pChannel, method, 1); + nvDmaSetEvoMethodData(pChannel, val); + + method += 4; + } + } +} + +/* + * These values are the same between all base + * (_SURFACE_SET_PARAMS_FORMAT_) and core (_HEAD_SET_PARAMS_FORMAT_) + * EVO classes. + * + * Return 0 in the case of an unrecognized NvKmsSurfaceMemoryFormat. + */ +static NvU32 nvHwFormatFromKmsFormat90( + const enum NvKmsSurfaceMemoryFormat format) +{ + switch (format) { + case NvKmsSurfaceMemoryFormatI8: + return NV917D_HEAD_SET_PARAMS_FORMAT_I8; + case NvKmsSurfaceMemoryFormatA1R5G5B5: + case NvKmsSurfaceMemoryFormatX1R5G5B5: + return NV917D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5; + case NvKmsSurfaceMemoryFormatR5G6B5: + return NV917D_HEAD_SET_PARAMS_FORMAT_R5G6B5; + case NvKmsSurfaceMemoryFormatA8R8G8B8: + case NvKmsSurfaceMemoryFormatX8R8G8B8: + return NV917D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8; + case NvKmsSurfaceMemoryFormatA8B8G8R8: + case NvKmsSurfaceMemoryFormatX8B8G8R8: + return NV917D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8; + case NvKmsSurfaceMemoryFormatA2B10G10R10: + case NvKmsSurfaceMemoryFormatX2B10G10R10: + return NV917D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10; + case NvKmsSurfaceMemoryFormatRF16GF16BF16AF16: + case NvKmsSurfaceMemoryFormatRF16GF16BF16XF16: + return NV917D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16; + case NvKmsSurfaceMemoryFormatR16G16B16A16: + return NV917D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16; + case NvKmsSurfaceMemoryFormatRF32GF32BF32AF32: + case NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422: + case NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N444: + case NvKmsSurfaceMemoryFormatY8___V8U8_N444: + case NvKmsSurfaceMemoryFormatY8___U8V8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N420: + case NvKmsSurfaceMemoryFormatY8___V8U8_N420: + case NvKmsSurfaceMemoryFormatY10___U10V10_N444: + case NvKmsSurfaceMemoryFormatY10___V10U10_N444: + case NvKmsSurfaceMemoryFormatY10___U10V10_N422: + case NvKmsSurfaceMemoryFormatY10___V10U10_N422: + case NvKmsSurfaceMemoryFormatY10___U10V10_N420: + case NvKmsSurfaceMemoryFormatY10___V10U10_N420: + case NvKmsSurfaceMemoryFormatY12___U12V12_N444: + case NvKmsSurfaceMemoryFormatY12___V12U12_N444: + case NvKmsSurfaceMemoryFormatY12___U12V12_N422: + case NvKmsSurfaceMemoryFormatY12___V12U12_N422: + case NvKmsSurfaceMemoryFormatY12___U12V12_N420: + case NvKmsSurfaceMemoryFormatY12___V12U12_N420: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N444: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N420: + return 0; + } + + return 0; +} + +static void EvoSetSurface(NVDevEvoPtr pDevEvo, + const int head, + const NVSurfaceEvoRec *pSurfaceEvo, + const struct NvKmsCscMatrix *pCscMatrix, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 sd; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + FOR_EACH_SUBDEV_IN_MASK(sd, nvPeekEvoSubDevMask(pDevEvo)) { + /* + * The EVO2 ->SetCursorImage() function programs cursor image surface + * only if NVEvoSubDeviceRec::pCoreChannelSurface is non-null. + */ + pDevEvo->pSubDevices[sd]->pCoreChannelSurface[head] = pSurfaceEvo; + } FOR_EACH_SUBDEV_IN_MASK_END + + if (!pSurfaceEvo) { + // Disable surface scanout on this head. It will scan out the default + // base color instead. + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTEXT_DMAS_ISO(head), 1); + nvDmaSetEvoMethodData(pChannel, 0); + return; + } + + nvAssert(pSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle); + + // XXX[AGP]: These methods are sequential, but sending them with a single + // count=7 method header sometimes causes EVO to throw an IsoViolation + // exception. + + // Set the surface parameters. + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_OFFSET(head), 1); + nvDmaSetEvoMethodData(pChannel, + nvCtxDmaOffsetFromBytes(pSurfaceEvo->planes[0].offset)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_SIZE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_SIZE, _WIDTH, pSurfaceEvo->widthInPixels) | + DRF_NUM(917D, _HEAD_SET_SIZE, _HEIGHT, pSurfaceEvo->heightInPixels)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_STORAGE(head), 1); + nvDmaSetEvoMethodData(pChannel, EvoComputeSetStorage90(pDevEvo, pSurfaceEvo)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_PARAMS(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_PARAMS, _FORMAT, + nvHwFormatFromKmsFormat90(pSurfaceEvo->format)) | + DRF_DEF(917D, _HEAD_SET_PARAMS, _SUPER_SAMPLE, _X1_AA) | + DRF_DEF(917D, _HEAD_SET_PARAMS, _GAMMA, _LINEAR)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTEXT_DMAS_ISO(head), 1); + nvDmaSetEvoMethodData(pChannel, pSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle); + + /* NULL => don't change the CSC. */ + if (pCscMatrix) { + SetCscMatrix(pChannel, NV917D_HEAD_SET_CSC_RED2RED(head), pCscMatrix, 0); + } +} + +static void +SetPresentControlBase(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState) +{ + NvU32 presentControl = + DRF_NUM(917C, _SET_PRESENT_CONTROL, _MIN_PRESENT_INTERVAL, + pHwState->minPresentInterval); + + if (pHwState->tearing) { + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, _BEGIN_MODE, + _IMMEDIATE, presentControl); + /* + * This avoids an invalid state exception: + * + * if ((SetPresentControl.BeginMode != NON_TEARING) && + * (SetPresentControl.BeginMode != AT_FRAME) + * && (wir_InterlockWithCore == ENABLE)) + * throw NV_DISP_BASE_STATE_ERROR_001; + */ + nvDisableCoreInterlockUpdateState(pDevEvo, updateState, pChannel); + } else { + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, _BEGIN_MODE, + _NON_TEARING, presentControl); + } + + if (pHwState->pSurfaceEvo[NVKMS_RIGHT]) { + if (pHwState->perEyeStereoFlip) { + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, + _STEREO_FLIP_MODE, _AT_ANY_FRAME, + presentControl); + } else { + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, + _STEREO_FLIP_MODE, _PAIR_FLIP, + presentControl); + } + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, + _MODE, _STEREO, presentControl); + } else { + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, + _MODE, _MONO, presentControl); + } + + // If we have a non-zero timestamp we need to enable timestamp mode + if (pHwState->timeStamp == 0) { + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, + _TIMESTAMP_MODE, _DISABLE, presentControl); + } else { + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, + _TIMESTAMP_MODE, _ENABLE, presentControl); + } + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_TIMESTAMP_ORIGIN_LO, 2); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_UPDATE_TIMESTAMP_LO, 2); + nvDmaSetEvoMethodData(pChannel, NvU64_LO32(pHwState->timeStamp)); + nvDmaSetEvoMethodData(pChannel, NvU64_HI32(pHwState->timeStamp)); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_PRESENT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, presentControl); +} + +static void EvoSetBaseInputLut(NVDevEvoPtr pDevEvo, + NvU32 sd, NvU32 head, + const NVFlipLutHwState *pInputLut, + NvBool enable, + NVEvoUpdateState *updateState) +{ + /* + * Program input LUT on the core channel, but output LUT on the base + * channel, so LUT surfaces can be split. The input LUT must be on the core + * channel so that I8 surfaces don't fail the error check. + */ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvBool enableLut = enable && pInputLut->pLutSurfaceEvo != NULL; + NvU64 offset = enableLut ? pInputLut->offset : 0; + NvU32 ctxdma = enableLut ? + pInputLut->pLutSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle : 0; + + const NVSurfaceEvoRec *pOldSurface = pDevEvo->pSubDevices[sd]->pBaseLutSurface[head]; + NvBool oldEnableLut = (pOldSurface != NULL); + NvU64 oldOffset = oldEnableLut ? + pDevEvo->pSubDevices[sd]->baseLutOffset[head] : 0; + NvU32 oldCtxdma = oldEnableLut ? + pOldSurface->planes[0].surfaceDesc.ctxDmaHandle : 0; + + nvAssert((offset & 0xff) == 0); + nvAssert((oldOffset & 0xff) == 0); + + if ((enableLut == oldEnableLut) && + (ctxdma == oldCtxdma) && + (offset == oldOffset)) { + return; + } + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_BASE_LUT_LO(head), 1); + nvDmaSetEvoMethodData(pChannel, + (enableLut ? DRF_DEF(917D, _HEAD_SET_BASE_LUT_LO, _ENABLE, _ENABLE) : + DRF_DEF(917D, _HEAD_SET_BASE_LUT_LO, _ENABLE, _DISABLE)) | + DRF_DEF(917D, _HEAD_SET_BASE_LUT_LO, _MODE, _INTERPOLATE_1025_UNITY_RANGE) | + DRF_DEF(917D, _HEAD_SET_BASE_LUT_LO, _NEVER_YIELD_TO_BASE, _DISABLE)); + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_BASE_LUT_HI(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_BASE_LUT_HI, _ORIGIN, offset >> 8)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTEXT_DMA_LUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_CONTEXT_DMA_LUT, _HANDLE, ctxdma)); + + /* + * Use this backdoor to disable "wide pipe" underreplication during + * expansion of color components into the display pipe. + * Underreplication of a non-zero 8-bit color to more than 8 bits + * causes lookups to fall between LUT entries in interpolating LUTs . + * See bug 734919 for details. However, we use + * INDEX_1025_UNITY_RANGE mode for the ILUT, so no interpolation + * occurs. + * The "wide pipe" may also cause scanout of 8-bit data to an 8-bit + * OR to not be a straight passthrough (bug 895401). + */ + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CRC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _PRIMARY_OUTPUT, _NONE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _SECONDARY_OUTPUT, _NONE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _WIDE_PIPE_CRC, _DISABLE)); + + pDevEvo->pSubDevices[sd]->pBaseLutSurface[head] = enableLut ? + pInputLut->pLutSurfaceEvo : NULL; + pDevEvo->pSubDevices[sd]->baseLutOffset[head] = offset; +} + +static void EvoSetOverlayInputLut(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipLutHwState *pInputLut, + NvBool enable) +{ + NvBool enableLut = enable && pInputLut->pLutSurfaceEvo != NULL; + NvU64 offset = enableLut ? pInputLut->offset : 0; + NvU32 ctxdma = enableLut ? + pInputLut->pLutSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle : 0; + + nvAssert((offset & 0xff) == 0); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_OVERLAY_LUT_LO, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(917E, _SET_OVERLAY_LUT_LO, _MODE, _INDEX_1025_UNITY_RANGE) | + (enableLut ? DRF_DEF(917E, _SET_OVERLAY_LUT_LO, _ENABLE, _ENABLE) : + DRF_DEF(917E, _SET_OVERLAY_LUT_LO, _ENABLE, _DISABLE))); + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_OVERLAY_LUT_HI, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917E, _SET_OVERLAY_LUT_HI, _ORIGIN, offset >> 8)); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_CONTEXT_DMA_LUT, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917E, _SET_CONTEXT_DMA_LUT, _HANDLE, ctxdma)); +} + +static void EvoSetOutputLut(NVDevEvoPtr pDevEvo, + NvU32 sd, NvU32 head, + const NVFlipLutHwState *pOutputLut, + NvBool enable, + NVEvoUpdateState *updateState) +{ + /* + * Program input LUT on the core channel, but output LUT on the base + * channel, so LUT surfaces can be split. The input LUT must be on the core + * channel so that I8 surfaces don't fail the error check. + */ + NVEvoChannelPtr pChannel = pDevEvo->base[head]; + NvBool enableLut = enable && pOutputLut->pLutSurfaceEvo != NULL; + NvU64 offset = enableLut ? pOutputLut->offset : 0; + NvU32 ctxdma = enableLut ? + pOutputLut->pLutSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle : 0; + + const NVSurfaceEvoRec *pOldSurface = pDevEvo->pSubDevices[sd]->pOutputLutSurface[head]; + NvBool oldEnableLut = (pOldSurface != NULL); + NvU64 oldOffset = oldEnableLut ? + pDevEvo->pSubDevices[sd]->outputLutOffset[head] : 0; + NvU32 oldCtxdma = oldEnableLut ? + pOldSurface->planes[0].surfaceDesc.ctxDmaHandle : 0; + + NVFlipChannelEvoHwState *pBaseHwState = + &pDevEvo->gpus[sd].headState[head].layer[NVKMS_MAIN_LAYER]; + + nvAssert((offset & 0xff) == 0); + nvAssert((oldOffset & 0xff) == 0); + + if ((enableLut == oldEnableLut) && + (ctxdma == oldCtxdma) && + (offset == oldOffset)) { + return; + } + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* + * If we're changing the OLUT, this update will need to interlock the base + * and core channels. The caller must set the main layer on the head to + * non-tearing. This function may end up being called without a flip on the + * base layer if it's not dirty, so ensure that we set the present mode + * here. + */ + nvAssert(!pBaseHwState->tearing); + SetPresentControlBase(pDevEvo, pChannel, pBaseHwState, updateState); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_OUTPUT_LUT_LO, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(917C, _SET_OUTPUT_LUT_LO, _MODE, _INDEX_1025_UNITY_RANGE) | + (enableLut ? DRF_DEF(917C, _SET_OUTPUT_LUT_LO, _ENABLE, _ENABLE) : + DRF_DEF(917C, _SET_OUTPUT_LUT_LO, _ENABLE, _DISABLE))); + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_OUTPUT_LUT_HI, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917C, _SET_OUTPUT_LUT_HI, _ORIGIN, offset >> 8)); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CONTEXT_DMA_LUT, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917C, _SET_CONTEXT_DMA_LUT, _HANDLE, ctxdma)); + + pDevEvo->pSubDevices[sd]->pOutputLutSurface[head] = enableLut ? + pOutputLut->pLutSurfaceEvo : NULL; + pDevEvo->pSubDevices[sd]->outputLutOffset[head] = offset; +} + +static void EvoSetOutputLut90(NVDevEvoPtr pDevEvo, + NvU32 sd, NvU32 head, + const NVFlipLutHwState *pOutputLut, + NvU32 fpNormScale, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + NVFlipChannelEvoHwState *pBaseHwState = + &pDevEvo->gpus[sd].headState[head].layer[NVKMS_MAIN_LAYER]; + + EvoSetOutputLut(pDevEvo, sd, head, pOutputLut, + pBaseHwState->pSurfaceEvo[NVKMS_LEFT] != NULL, + updateState); +} + +static void +EvoPushSetCoreSurfaceMethodsForOneSd(NVDevEvoRec *pDevEvo, + const NvU32 sd, + const int head, + const NVSurfaceEvoRec *pSurfaceEvo, + const struct NvKmsCscMatrix *pCscMatrix, + NVEvoUpdateState *updateState) +{ + const NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[sd].headState[head]; + const NVFlipCursorEvoHwState *pSdCursorState = &pSdHeadState->cursor; + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + + EvoSetSurface(pDevEvo, head, pSurfaceEvo, pCscMatrix, updateState); + + EvoSetCursorImage(pDevEvo, + head, + pSurfaceEvo != NULL ? + pSdCursorState->pSurfaceEvo : NULL, + updateState, + &pSdCursorState->cursorCompParams); + + /* If we're disabling the core surface, we need to disable the LUTs. */ + if (pSurfaceEvo == NULL) { + EvoSetBaseInputLut(pDevEvo, sd, head, + NULL, FALSE, + updateState); + EvoSetOutputLut(pDevEvo, sd, head, + NULL, FALSE, + updateState); + + } + + nvPopEvoSubDevMask(pDevEvo); +} + +static void +FlipBase90(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState) +{ + int eye; + + /* program notifier */ + + if (pHwState->completionNotifier.surface.pSurfaceEvo == NULL) { + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + } else { + const NVFlipNIsoSurfaceEvoHwState *pNIso = + &pHwState->completionNotifier.surface; + NvU32 value = 0; + + if (pNIso->format == NVKMS_NISO_FORMAT_LEGACY) { + value = FLD_SET_DRF(917C, _SET_NOTIFIER_CONTROL, _FORMAT, + _LEGACY, value); + } else { + value = FLD_SET_DRF(917C, _SET_NOTIFIER_CONTROL, _FORMAT, + _FOUR_WORD, value); + } + + value = FLD_SET_DRF_NUM(917C, _SET_NOTIFIER_CONTROL, _OFFSET, + pNIso->offsetInWords, value); + + value = FLD_SET_DRF_NUM(917C, _SET_NOTIFIER_CONTROL, _MODE, + pHwState->completionNotifier.awaken ? + NV917C_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN : + NV917C_SET_NOTIFIER_CONTROL_MODE_WRITE, value); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, pNIso->pSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, value); + } + + /* program semaphore */ + nvAssertSameSemaphoreSurface(pHwState); + + if (pHwState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo == NULL) { + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CONTEXT_DMA_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SEMAPHORE_ACQUIRE, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SEMAPHORE_RELEASE, 1); + nvDmaSetEvoMethodData(pChannel, 0); + } else { + const NVFlipNIsoSurfaceEvoHwState *pNIso = + &pHwState->syncObject.u.semaphores.acquireSurface; + NvU32 value = 0; + + if (pNIso->format == NVKMS_NISO_FORMAT_LEGACY) { + value = FLD_SET_DRF(917C, _SET_SEMAPHORE_CONTROL, _FORMAT, + _LEGACY, value); + } else { + value = FLD_SET_DRF(917C, _SET_SEMAPHORE_CONTROL, _FORMAT, + _FOUR_WORD, value); + } + + value = FLD_SET_DRF_NUM(917C, _SET_SEMAPHORE_CONTROL, _OFFSET, + pNIso->offsetInWords, value); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CONTEXT_DMA_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, pNIso->pSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SEMAPHORE_ACQUIRE, 1); + nvDmaSetEvoMethodData(pChannel, + pHwState->syncObject.u.semaphores.acquireValue); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SEMAPHORE_RELEASE, 1); + nvDmaSetEvoMethodData(pChannel, + pHwState->syncObject.u.semaphores.releaseValue); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, value); + } + + if (!pHwState->pSurfaceEvo[NVKMS_LEFT]) { + nvAssert(!pHwState->pSurfaceEvo[NVKMS_RIGHT]); + + // Disable base on this head. + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CONTEXT_DMAS_ISO(0), 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CONTEXT_DMAS_ISO(1), 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_PRESENT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CSC_RED2RED, 1); + nvDmaSetEvoMethodData(pChannel, DRF_DEF(917C, _SET_CSC_RED2RED, _OWNER, _CORE)); + + return; + } + + SetPresentControlBase(pDevEvo, pChannel, pHwState, updateState); + + SetCscMatrix(pChannel, NV917C_SET_CSC_RED2RED, &pHwState->cscMatrix, + DRF_DEF(917C, _SET_CSC_RED2RED, _OWNER, _BASE)); + + // Set the surface parameters. + FOR_ALL_EYES(eye) { + NvU32 ctxdma = 0; + NvU64 offset = 0; + + if (pHwState->pSurfaceEvo[eye]) { + ctxdma = pHwState->pSurfaceEvo[eye]->planes[0].surfaceDesc.ctxDmaHandle; + offset = pHwState->pSurfaceEvo[eye]->planes[0].offset; + } + + nvDmaSetStartEvoMethod(pChannel, NV917C_SURFACE_SET_OFFSET(0, eye), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917C, _SURFACE_SET_OFFSET, _ORIGIN, + nvCtxDmaOffsetFromBytes(offset))); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CONTEXT_DMAS_ISO(eye), 1); + nvDmaSetEvoMethodData(pChannel, ctxdma); + } + + ASSERT_EYES_MATCH(pHwState->pSurfaceEvo, widthInPixels); + ASSERT_EYES_MATCH(pHwState->pSurfaceEvo, heightInPixels); + nvDmaSetStartEvoMethod(pChannel, NV917C_SURFACE_SET_SIZE(0), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917C, _SURFACE_SET_SIZE, _WIDTH, + pHwState->pSurfaceEvo[NVKMS_LEFT]->widthInPixels) | + DRF_NUM(917C, _SURFACE_SET_SIZE, _HEIGHT, + pHwState->pSurfaceEvo[NVKMS_LEFT]->heightInPixels)); + + nvAssert(pHwState->sizeIn.width == pHwState->pSurfaceEvo[NVKMS_LEFT]->widthInPixels); + nvAssert(pHwState->sizeIn.height == pHwState->pSurfaceEvo[NVKMS_LEFT]->heightInPixels); + nvAssert(pHwState->sizeIn.width == pHwState->sizeOut.width); + nvAssert(pHwState->sizeIn.height == pHwState->sizeOut.height); + + nvAssert(!pHwState->pSurfaceEvo[NVKMS_RIGHT] || + (EvoComputeSetStorage90(pDevEvo, pHwState->pSurfaceEvo[NVKMS_LEFT]) == + EvoComputeSetStorage90(pDevEvo, pHwState->pSurfaceEvo[NVKMS_RIGHT]))); + nvDmaSetStartEvoMethod(pChannel, NV917C_SURFACE_SET_STORAGE(0), 1); + nvDmaSetEvoMethodData(pChannel, EvoComputeSetStorage90(pDevEvo, pHwState->pSurfaceEvo[NVKMS_LEFT])); + + ASSERT_EYES_MATCH(pHwState->pSurfaceEvo, format); + nvDmaSetStartEvoMethod(pChannel, NV917C_SURFACE_SET_PARAMS(0), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917C, _SURFACE_SET_PARAMS, _FORMAT, + nvHwFormatFromKmsFormat90(pHwState->pSurfaceEvo[NVKMS_LEFT]->format)) | + DRF_DEF(917C, _SURFACE_SET_PARAMS, _SUPER_SAMPLE, _X1_AA) | + DRF_DEF(917C, _SURFACE_SET_PARAMS, _GAMMA, _LINEAR)); +} + +static void +FlipOverlay90(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NvBool *pInterlockwithCore) +{ + const NvU32 head = + NV_EVO_CHANNEL_MASK_BASE_HEAD_NUMBER(pChannel->channelMask); + const NVSurfaceEvoRec *pSurfaceEvo = pHwState->pSurfaceEvo[NVKMS_LEFT]; + NvU32 value; + NvU32 sd; + + /* Overlay class 917E can't do stereo */ + nvAssert(!pHwState->pSurfaceEvo[NVKMS_RIGHT]); + + /* + * The NVKMS driver enforces these conditions on its clients: 1) enable a + * core-surface before enabling an overlay-surface, 2) disable an + * overlay-surface before disabling a core-surface. + * + * Updates to enable/disable a core and an overlay surface execute + * separately and are not interlocked. To avoid a race condition between a + * core and an overlay channel, detect an overlay channel update which is + * enabling/disabling an overlay-surface and interlock that update with a + * core channel update. + * + * This makes sure that an update to disable an overlay-surface interlocked + * with a core channel and a follow-on update to disable the core-surface + * will wait for the previous overlay flip to complete. It also makes sure + * that an update to enable an overlay-surface will wait for the previous + * core channel flip to complete. + */ + + FOR_EACH_SUBDEV_IN_MASK(sd, nvPeekEvoSubDevMask(pDevEvo)) { + NvBool prevCtxDmaIso = + pDevEvo->pSubDevices[sd]->overlayContextDmaIso[head]; + + if ((prevCtxDmaIso != 0x0 && pSurfaceEvo == NULL) || + (prevCtxDmaIso == 0x0 && pSurfaceEvo != NULL)) { + *pInterlockwithCore = TRUE; + } + + if (pSurfaceEvo != NULL) { + pDevEvo->pSubDevices[sd]->overlayContextDmaIso[head] = + pSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle; + pDevEvo->pSubDevices[sd]->overlaySurfFormat[head] = pSurfaceEvo->format; + + } else { + pDevEvo->pSubDevices[sd]->overlayContextDmaIso[head] = 0x0; + } + } FOR_EACH_SUBDEV_IN_MASK_END + + /* program notifier */ + + if (pHwState->completionNotifier.surface.pSurfaceEvo == NULL) { + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + } else { + const NVFlipNIsoSurfaceEvoHwState *pNIso = + &pHwState->completionNotifier.surface; + value = 0; + + if (pNIso->format == NVKMS_NISO_FORMAT_LEGACY) { + value = FLD_SET_DRF(917E, _SET_NOTIFIER_CONTROL, _FORMAT, + _LEGACY, value); + } else { + value = FLD_SET_DRF(917E, _SET_NOTIFIER_CONTROL, _FORMAT, + _FOUR_WORD, value); + } + + value = FLD_SET_DRF_NUM(917E, _SET_NOTIFIER_CONTROL, _OFFSET, + pNIso->offsetInWords, value); + + value = FLD_SET_DRF_NUM(917E, _SET_NOTIFIER_CONTROL, _MODE, + pHwState->completionNotifier.awaken ? + NV917E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN : + NV917E_SET_NOTIFIER_CONTROL_MODE_WRITE, value); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, pNIso->pSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, value); + } + + /* program semaphore */ + nvAssertSameSemaphoreSurface(pHwState); + + if (pHwState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo == NULL) { + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_CONTEXT_DMA_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + } else { + const NVFlipNIsoSurfaceEvoHwState *pNIso = + &pHwState->syncObject.u.semaphores.acquireSurface; + value = 0; + + if (pNIso->format == NVKMS_NISO_FORMAT_LEGACY) { + value = FLD_SET_DRF(917E, _SET_SEMAPHORE_CONTROL, _FORMAT, + _LEGACY, value); + } else { + value = FLD_SET_DRF(917E, _SET_SEMAPHORE_CONTROL, _FORMAT, + _FOUR_WORD, value); + } + + value = FLD_SET_DRF_NUM(917E, _SET_SEMAPHORE_CONTROL, _OFFSET, + pNIso->offsetInWords, value); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_CONTEXT_DMA_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, pNIso->pSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_SEMAPHORE_ACQUIRE, 1); + nvDmaSetEvoMethodData(pChannel, + pHwState->syncObject.u.semaphores.acquireValue); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_SEMAPHORE_RELEASE, 1); + nvDmaSetEvoMethodData(pChannel, + pHwState->syncObject.u.semaphores.releaseValue); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, value); + } + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_TIMESTAMP_ORIGIN_LO, 2); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_UPDATE_TIMESTAMP_LO, 2); + nvDmaSetEvoMethodData(pChannel, NvU64_LO32(pHwState->timeStamp)); + nvDmaSetEvoMethodData(pChannel, NvU64_HI32(pHwState->timeStamp)); + + if (pHwState->timeStamp == 0) { + value = NV917E_SET_PRESENT_CONTROL_BEGIN_MODE_ASAP; + } else { + value = NV917E_SET_PRESENT_CONTROL_BEGIN_MODE_TIMESTAMP; + } + nvAssert(!pHwState->tearing); + nvAssert(!pHwState->vrrTearing); + nvAssert(!pHwState->perEyeStereoFlip); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_PRESENT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917E, _SET_PRESENT_CONTROL, _BEGIN_MODE, value) | + DRF_NUM(917E, _SET_PRESENT_CONTROL, _MIN_PRESENT_INTERVAL, + pHwState->minPresentInterval)); + + if (!pSurfaceEvo) { + // Disable overlay on this head. + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_CONTEXT_DMAS_ISO(NVKMS_LEFT), 1); + nvDmaSetEvoMethodData(pChannel, 0); + + EvoSetOverlayInputLut(pDevEvo, pChannel, NULL, FALSE); + return; + } + + nvAssert(pSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_SIZE_IN, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917E, _SET_SIZE_IN, _WIDTH, pHwState->sizeIn.width) | + DRF_NUM(917E, _SET_SIZE_IN, _HEIGHT, pHwState->sizeIn.height)); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_SIZE_OUT, 1); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(917E, _SET_SIZE_OUT, _WIDTH, + pHwState->sizeOut.width)); + + // Set the surface parameters. + nvDmaSetStartEvoMethod(pChannel, NV917E_SURFACE_SET_OFFSET(NVKMS_LEFT), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917E, _SURFACE_SET_OFFSET, _ORIGIN, + nvCtxDmaOffsetFromBytes(pSurfaceEvo->planes[0].offset))); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SURFACE_SET_SIZE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917E, _SURFACE_SET_SIZE, _WIDTH, pSurfaceEvo->widthInPixels) | + DRF_NUM(917E, _SURFACE_SET_SIZE, _HEIGHT, pSurfaceEvo->heightInPixels)); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SURFACE_SET_STORAGE, 1); + nvDmaSetEvoMethodData(pChannel, EvoComputeSetStorage90(pDevEvo, pSurfaceEvo)); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SURFACE_SET_PARAMS, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917E, _SURFACE_SET_PARAMS, _FORMAT, + EvoOverlayFormatFromKmsFormat91(pSurfaceEvo->format)) | + DRF_DEF(917E, _SURFACE_SET_PARAMS, _COLOR_SPACE, _RGB)); + + SetCscMatrix(pChannel, NV917E_SET_CSC_RED2RED, &pHwState->cscMatrix, 0); + + EvoSetOverlayInputLut(pDevEvo, pChannel, &pHwState->inputLut, TRUE); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_CONTEXT_DMAS_ISO(NVKMS_LEFT), 1); + nvDmaSetEvoMethodData(pChannel, pSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle); +} + +static NvBool +needToReprogramCoreSurface(NVDevEvoPtr pDevEvo, + const NvU32 sd, + const NvU32 head, + const NVSurfaceEvoRec *pNewSurfaceEvo) +{ + const NVDispEvoRec *pDispEvo = pDevEvo->gpus[sd].pDispEvo; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NvBool enableBaseLut = pHeadState->lut.baseLutEnabled; + + const NVSurfaceEvoRec *pCurrCoreSurfaceEvo = + pDevEvo->pSubDevices[sd]->pCoreChannelSurface[head]; + const NvBool currIsBaseSurfSpecified = + pDevEvo->pSubDevices[sd]->isBaseSurfSpecified[head]; + const NvU32 currHeightInPixels = pCurrCoreSurfaceEvo != NULL ? + pCurrCoreSurfaceEvo->heightInPixels : 0; + const NvU32 currWidthInPixels = pCurrCoreSurfaceEvo != NULL ? + pCurrCoreSurfaceEvo->widthInPixels : 0; + const enum NvKmsSurfaceMemoryFormat currFormat = + pCurrCoreSurfaceEvo != NULL ? + pCurrCoreSurfaceEvo->format : NvKmsSurfaceMemoryFormatI8; + + const NvBool newIsBaseSurfSpecified = pNewSurfaceEvo != NULL; + const NvU32 newHeightInPixels = pNewSurfaceEvo != NULL ? + pNewSurfaceEvo->heightInPixels : 0; + const NvU32 newWidthInPixels = pNewSurfaceEvo != NULL ? + pNewSurfaceEvo->widthInPixels : 0; + const enum NvKmsSurfaceMemoryFormat newFormat = pNewSurfaceEvo != NULL ? + pNewSurfaceEvo->format : NvKmsSurfaceMemoryFormatI8; + + /* If base channel flips from NULL to non-NULL surface or vice-versa */ + if (currIsBaseSurfSpecified != newIsBaseSurfSpecified) { + return TRUE; + } + + /* + * Reprogram the core surface if the current and new base surfaces have + * different size or format. The format check is needed to enable/disable + * the input lut if the input lut is not explicitly enabled/disabled by + * client and the base surface if flipping to or flipping away from the I8 + * format. + */ + if (newIsBaseSurfSpecified) { + + if (newWidthInPixels != currWidthInPixels || + newHeightInPixels != currHeightInPixels) { + return TRUE; + } + + if (!enableBaseLut && + newFormat != currFormat && + (currFormat == NvKmsSurfaceMemoryFormatI8 || + newFormat == NvKmsSurfaceMemoryFormatI8)) { + return TRUE; + } + } + + return !currIsBaseSurfSpecified; +} + +static void +EvoPushUpdateCompositionIfNeeded(NVDevEvoPtr pDevEvo, + const NvU32 sd, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + const NVSurfaceEvoRec *pNewSurfaceEvo = pHwState->pSurfaceEvo[NVKMS_LEFT]; + NvBool updateComposition = FALSE; + const NVFlipChannelEvoHwState *pBaseHwState = NULL; + const NVFlipChannelEvoHwState *pOverlayHwState = NULL; + NvU32 head = NV_INVALID_HEAD; + + if (pNewSurfaceEvo == NULL) { + return; + } + + /* + * Re-program the composition parameters if this is first layer update, or + * if color key selection method is changed, or if layer is using source + * color keying and color key is changed. + */ + + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL) != 0) { + head = NV_EVO_CHANNEL_MASK_BASE_HEAD_NUMBER(pChannel->channelMask); + pOverlayHwState = + &pDevEvo->gpus[sd].headState[head].layer[NVKMS_OVERLAY_LAYER]; + pBaseHwState = pHwState; + + if ((!pDevEvo->pSubDevices[sd]->baseComp[head].initialized) || + + (pHwState->composition.colorKeySelect != + pDevEvo->pSubDevices[sd]->baseComp[head].colorKeySelect) || + + ((pHwState->composition.colorKeySelect == + NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC) && + (pNewSurfaceEvo->format != + pDevEvo->pSubDevices[sd]->baseSurfFormat[head] || + nvkms_memcmp(&pHwState->composition.colorKey, + &pDevEvo->pSubDevices[sd]->baseComp[head].colorKey, + sizeof(pHwState->composition.colorKey)) != 0))) { + + pDevEvo->pSubDevices[sd]->baseComp[head].initialized = TRUE; + pDevEvo->pSubDevices[sd]->baseComp[head].colorKeySelect = + pHwState->composition.colorKeySelect; + pDevEvo->pSubDevices[sd]->baseComp[head].colorKey = + pHwState->composition.colorKey; + updateComposition = TRUE; + } + } + + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_OVERLAY_ALL) != 0) { + head = NV_EVO_CHANNEL_MASK_OVERLAY_HEAD_NUMBER(pChannel->channelMask); + pBaseHwState = + &pDevEvo->gpus[sd].headState[head].layer[NVKMS_MAIN_LAYER]; + pOverlayHwState = pHwState; + + if ((!pDevEvo->pSubDevices[sd]->overlayComp[head].initialized) || + + (pHwState->composition.colorKeySelect != + pDevEvo->pSubDevices[sd]->overlayComp[head].colorKeySelect) || + + ((pHwState->composition.colorKeySelect == + NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC) && + (pNewSurfaceEvo->format != + pDevEvo->pSubDevices[sd]->overlaySurfFormat[head] || + nvkms_memcmp(&pHwState->composition.colorKey, + &pDevEvo->pSubDevices[sd]->overlayComp[head].colorKey, + sizeof(&pHwState->composition.colorKey)) != 0))) { + + pDevEvo->pSubDevices[sd]->overlayComp[head].initialized = TRUE; + pDevEvo->pSubDevices[sd]->overlayComp[head].colorKeySelect = + pHwState->composition.colorKeySelect; + pDevEvo->pSubDevices[sd]->overlayComp[head].colorKey = + pHwState->composition.colorKey; + updateComposition = TRUE; + } + } + + if (updateComposition) { + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + EvoPushUpdateComposition(pDevEvo, head, pBaseHwState, pOverlayHwState, + updateState, bypassComposition); + nvPopEvoSubDevMask(pDevEvo); + } +} + +static void EvoFlip90(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + NvU32 sd; + + FOR_EACH_SUBDEV_IN_MASK(sd, nvPeekEvoSubDevMask(pDevEvo)) { + EvoPushUpdateCompositionIfNeeded(pDevEvo, sd, pChannel, pHwState, + updateState, bypassComposition); + } FOR_EACH_SUBDEV_IN_MASK_END + + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL) != 0) { + const NvU32 head = + NV_EVO_CHANNEL_MASK_BASE_HEAD_NUMBER(pChannel->channelMask); + + FOR_EACH_SUBDEV_IN_MASK(sd, nvPeekEvoSubDevMask(pDevEvo)) { + if (needToReprogramCoreSurface( + pDevEvo, + sd, + head, + pHwState->pSurfaceEvo[NVKMS_LEFT])) { + const struct NvKmsCscMatrix zeroCscMatrix = { }; + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + EvoPushSetCoreSurfaceMethodsForOneSd(pDevEvo, sd, head, + pHwState->pSurfaceEvo[NVKMS_LEFT], + &zeroCscMatrix, updateState); + nvPopEvoSubDevMask(pDevEvo); + } + + if (pHwState->pSurfaceEvo[NVKMS_LEFT] != NULL) { + pDevEvo->pSubDevices[sd]->isBaseSurfSpecified[head] = TRUE; + pDevEvo->pSubDevices[sd]->baseSurfFormat[head] = + pHwState->pSurfaceEvo[NVKMS_LEFT]->format; + } else { + pDevEvo->pSubDevices[sd]->isBaseSurfSpecified[head] = FALSE; + } + + /* + * On EVO2, error 52 will be thrown if the any of the + * SET_{BASE,OUTPUT}_LUT_{LO,HI} methods are programmed on a + * tearing flip, regardless of whether they actually update the + * hardware state. + */ + if (!pHwState->tearing) { + EvoSetBaseInputLut(pDevEvo, sd, head, + &pHwState->inputLut, + pHwState->pSurfaceEvo[NVKMS_LEFT] != NULL, + updateState); + EvoSetOutputLut(pDevEvo, sd, head, + &pDevEvo->gpus[sd].headState[head].outputLut, + pHwState->pSurfaceEvo[NVKMS_LEFT] != NULL, + updateState); + } + } FOR_EACH_SUBDEV_IN_MASK_END + + FlipBase90(pDevEvo, pChannel, pHwState, updateState); + + if (pHwState->vrrTearing) { + int head = NV_EVO_CHANNEL_MASK_BASE_HEAD_NUMBER(pChannel->channelMask); + NvU32 sd, subDeviceMask = nvPeekEvoSubDevMask(pDevEvo); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (subDeviceMask & (1 << sd)) { + updateState->subdev[sd].base[head].vrrTearing = TRUE; + } + } + } + } else if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_OVERLAY_ALL) != 0) { + NvBool interlockWithCore = FALSE; + + FlipOverlay90(pDevEvo, pChannel, pHwState, &interlockWithCore); + + if (interlockWithCore) { + nvUpdateUpdateState(pDevEvo, updateState, pDevEvo->core); + } + } else { + nvAssert(!"Unknown channel mask in EvoFlip90"); + } + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); +} + +static void EvoFlipTransitionWAR90(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const NVEvoSubDevHeadStateRec *pSdHeadState, + const NVFlipEvoHwState *pFlipState, + NVEvoUpdateState *updateState) +{ + /* Nothing to do pre-Turing */ +} + +/*! + * Pack the given abstract color key into a key and mask as required + * by the display engine. + * + * \param[in] format NVKMS format for the input surface + * \param[in] key NVKMS representation of a color key + * \param[out] pValue NV857E_SET_KEY_COLOR_COLOR value + * \param[out] pMask NV857E_SET_KEY_COLOR_MASK value + */ +static void EvoPackColorKey91(enum NvKmsSurfaceMemoryFormat format, + const NVColorKey key, + NvU32 *pValue, NvU32 *pMask) +{ + NvU32 value = 0, mask = 0; + switch (format) { + case NvKmsSurfaceMemoryFormatR5G6B5: + if (key.matchR) { + mask |= 0x1f << 11; + value |= (key.r & 0x1f) << 11; + } + if (key.matchG) { + mask |= 0x3f << 5; + value |= (key.g & 0x3f) << 5; + } + if (key.matchB) { + mask |= 0x1f << 0; + value |= (key.b & 0x1f) << 0; + } + break; + case NvKmsSurfaceMemoryFormatA1R5G5B5: + case NvKmsSurfaceMemoryFormatX1R5G5B5: + if (key.matchA) { + mask |= 0x1 << 15; + value |= (key.a & 0x1) << 15; + } + if (key.matchR) { + mask |= 0x1f << 10; + value |= (key.r & 0x1f) << 10; + } + if (key.matchG) { + mask |= 0x1f << 5; + value |= (key.g & 0x1f) << 5; + } + if (key.matchB) { + mask |= 0x1f << 0; + value |= (key.b & 0x1f) << 0; + } + break; + case NvKmsSurfaceMemoryFormatA8R8G8B8: + case NvKmsSurfaceMemoryFormatX8R8G8B8: + if (key.matchA) { + /* Only one bit of alpha is handled by the hw. */ + mask |= 0x1 << 31; + value |= (key.a ? 1:0) << 31; + } + if (key.matchR) { + mask |= 0xff << 16; + value |= (key.r & 0xff) << 16; + } + if (key.matchG) { + mask |= 0xff << 8; + value |= (key.g & 0xff) << 8; + } + if (key.matchB) { + mask |= 0xff << 0; + value |= (key.b & 0xff) << 0; + } + break; + case NvKmsSurfaceMemoryFormatA8B8G8R8: + case NvKmsSurfaceMemoryFormatX8B8G8R8: + if (key.matchA) { + /* Only one bit of alpha is handled by the hw. */ + mask |= 0x1 << 31; + value |= (key.a ? 1:0) << 31; + } + if (key.matchB) { + mask |= 0xff << 16; + value |= (key.b & 0xff) << 16; + } + if (key.matchG) { + mask |= 0xff << 8; + value |= (key.g & 0xff) << 8; + } + if (key.matchR) { + mask |= 0xff << 0; + value |= (key.r & 0xff) << 0; + } + break; + case NvKmsSurfaceMemoryFormatA2B10G10R10: + case NvKmsSurfaceMemoryFormatX2B10G10R10: + if (key.matchA) { + /* Only one bit of alpha is handled by the hw. */ + mask |= 0x1 << 31; + value |= (key.a ? 1:0) << 31; + } + if (key.matchB) { + mask |= 0x3ff << 20; + value |= (key.b & 0x3ff) << 20; + } + if (key.matchG) { + mask |= 0x3ff << 10; + value |= (key.g & 0x3ff) << 10; + } + if (key.matchR) { + mask |= 0x3ff << 0; + value |= (key.r & 0x3ff) << 0; + } + break; + case NvKmsSurfaceMemoryFormatI8: + case NvKmsSurfaceMemoryFormatRF16GF16BF16AF16: + case NvKmsSurfaceMemoryFormatRF16GF16BF16XF16: + case NvKmsSurfaceMemoryFormatR16G16B16A16: + case NvKmsSurfaceMemoryFormatRF32GF32BF32AF32: + case NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422: + case NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N444: + case NvKmsSurfaceMemoryFormatY8___V8U8_N444: + case NvKmsSurfaceMemoryFormatY8___U8V8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N420: + case NvKmsSurfaceMemoryFormatY8___V8U8_N420: + case NvKmsSurfaceMemoryFormatY10___U10V10_N444: + case NvKmsSurfaceMemoryFormatY10___V10U10_N444: + case NvKmsSurfaceMemoryFormatY10___U10V10_N422: + case NvKmsSurfaceMemoryFormatY10___V10U10_N422: + case NvKmsSurfaceMemoryFormatY10___U10V10_N420: + case NvKmsSurfaceMemoryFormatY10___V10U10_N420: + case NvKmsSurfaceMemoryFormatY12___U12V12_N444: + case NvKmsSurfaceMemoryFormatY12___V12U12_N444: + case NvKmsSurfaceMemoryFormatY12___U12V12_N422: + case NvKmsSurfaceMemoryFormatY12___V12U12_N422: + case NvKmsSurfaceMemoryFormatY12___U12V12_N420: + case NvKmsSurfaceMemoryFormatY12___V12U12_N420: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N444: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N420: + nvAssert(!"Unhandled format in nvEvo1PackColorKey"); + break; + } + + *pMask = mask; + *pValue = value; +} + +static NvBool EvoOverlayCompositionControlFromNvKmsCompositionParams( + const NVFlipChannelEvoHwState *pBaseHwState, + const NVFlipChannelEvoHwState *pOverlayHwState, + NvU32 *pMode, + NvU32 *pColorKeyValue, + NvU32 *pColorKeyMask) +{ + const struct NvKmsCompositionParams *pBaseCompParams = + &pBaseHwState->composition; + const struct NvKmsCompositionParams *pOverlayCompParams = + &pOverlayHwState->composition; + + switch (pOverlayCompParams->colorKeySelect) { + case NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE: + if (pOverlayCompParams->blendingMode[1] == NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE) { + *pMode = NV917E_SET_COMPOSITION_CONTROL_MODE_OPAQUE; + *pColorKeyValue = *pColorKeyMask = 0; + } else { + return FALSE; + } + break; + case NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC: + if ((pOverlayCompParams->blendingMode[0] == + NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE) && + (pOverlayCompParams->blendingMode[1] == + NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT)) { + + *pMode = NV917E_SET_COMPOSITION_CONTROL_MODE_SOURCE_COLOR_VALUE_KEYING; + + if (pOverlayHwState->pSurfaceEvo[NVKMS_LEFT] != NULL) { + EvoPackColorKey91(pOverlayHwState->pSurfaceEvo[NVKMS_LEFT]->format, + pOverlayCompParams->colorKey, + pColorKeyValue, + pColorKeyMask); + } else { + *pColorKeyValue = *pColorKeyMask = 0; + } + + } else { + return FALSE; + } + break; + case NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST: + if ((pBaseCompParams->colorKeySelect == + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE) && + (pOverlayCompParams->blendingMode[1] == + NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE)) { + *pMode = NV917E_SET_COMPOSITION_CONTROL_MODE_OPAQUE; + *pColorKeyValue = *pColorKeyMask = 0; + } else if ((pBaseCompParams->colorKeySelect == + NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC) && + (pOverlayCompParams->blendingMode[1] == + NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE) && + (pOverlayCompParams->blendingMode[0] == + NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT)) { + + *pMode = NV917E_SET_COMPOSITION_CONTROL_MODE_DESTINATION_COLOR_VALUE_KEYING; + + if (pBaseHwState->pSurfaceEvo[NVKMS_LEFT] != NULL) { + EvoPackColorKey91(pBaseHwState->pSurfaceEvo[NVKMS_LEFT]->format, + pBaseCompParams->colorKey, + pColorKeyValue, + pColorKeyMask); + } else { + *pColorKeyValue = *pColorKeyMask = 0; + } + + } else { + return FALSE; + } + break; + default: + return FALSE; + } + + return TRUE; +} + +static void +EvoPushUpdateComposition(NVDevEvoPtr pDevEvo, + const int head, + const NVFlipChannelEvoHwState *pBaseHwState, + const NVFlipChannelEvoHwState *pOverlayHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + + /* Composition is always programmed through the overlay channel. */ + NVEvoChannelPtr pChannel = pDevEvo->overlay[head]; + NvU32 colorKeyValue = 0, colorKeyMask = 0; + NvU32 compositionModeValue = 0; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + if (!EvoOverlayCompositionControlFromNvKmsCompositionParams( + pBaseHwState, pOverlayHwState, + &compositionModeValue, + &colorKeyValue, + &colorKeyMask)) { + /* + * composition mode is validated during + * nvUpdateFlipEvoHwState(), so it should always be valid when + * we get here. + */ + nvAssert(!"Invalid composition params"); + return; + } + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_COMPOSITION_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, compositionModeValue); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_KEY_COLOR_LO, 2); + nvDmaSetEvoMethodData(pChannel, colorKeyValue); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_KEY_MASK_LO, 2); + nvDmaSetEvoMethodData(pChannel, colorKeyMask); + nvDmaSetEvoMethodData(pChannel, 0); +} + +/* + * The LUT entries in INDEX_1025_UNITY_RANGE have 16 bits, with the + * black value at 24576, and the white at 49151. Since the effective + * range is 16384, we treat this as a 14-bit LUT. However, we need to + * clear the low 3 bits to WAR hardware bug 813188. This gives us + * 14-bit LUT values, but only 11 bits of precision. + */ +static inline NvU16 ColorToLUTEntry(NvU16 val) +{ + const NvU16 val14bit = val >> 2; + return (val14bit & ~7) + 24576; +} + +/* In INDEX_1025_UNITY_RANGE, the LUT indices for color depths with less + * than 10 bpc are the indices you'd have in 257-entry mode multiplied + * by four. So, you under-replicate all but the two least significant bits. + * Since when is EVO supposed to make sense? + */ +static void +EvoFillLUTSurface90(NVEvoLutEntryRec *pLUTBuffer, + const NvU16 *red, + const NvU16 *green, + const NvU16 *blue, + int nColorMapEntries, int depth) +{ + int i, lutIndex; + + switch (depth) { + case 15: + for (i = 0; i < nColorMapEntries; i++) { + lutIndex = PALETTE_DEPTH_SHIFT(i, 5) << 2; + pLUTBuffer[lutIndex].Red = ColorToLUTEntry(red[i]); + pLUTBuffer[lutIndex].Green = ColorToLUTEntry(green[i]); + pLUTBuffer[lutIndex].Blue = ColorToLUTEntry(blue[i]); + } + break; + case 16: + for (i = 0; i < nColorMapEntries; i++) { + pLUTBuffer[PALETTE_DEPTH_SHIFT(i, 6) << 2].Green = ColorToLUTEntry(green[i]); + if (i < 32) { + lutIndex = PALETTE_DEPTH_SHIFT(i, 5) << 2; + pLUTBuffer[lutIndex].Red = ColorToLUTEntry(red[i]); + pLUTBuffer[lutIndex].Blue = ColorToLUTEntry(blue[i]); + } + } + break; + case 8: + case 24: + for (i = 0; i < nColorMapEntries; i++) { + lutIndex = i << 2; + pLUTBuffer[lutIndex].Red = ColorToLUTEntry(red[i]); + pLUTBuffer[lutIndex].Green = ColorToLUTEntry(green[i]); + pLUTBuffer[lutIndex].Blue = ColorToLUTEntry(blue[i]); + } + break; + case 30: + for (i = 0; i < nColorMapEntries; i++) { + pLUTBuffer[i].Red = ColorToLUTEntry(red[i]); + pLUTBuffer[i].Green = ColorToLUTEntry(green[i]); + pLUTBuffer[i].Blue = ColorToLUTEntry(blue[i]); + } + break; + default: + nvAssert(!"invalid depth"); + return; + } +} + +#define NV_EVO2_CAP_GET_PIN(cl, n, pEvoCaps, word, name, idx, pCaps) \ + (pEvoCaps)->pin[(idx)].flipLock = \ + FLD_TEST_DRF(cl##_CORE_NOTIFIER_##n, _CAPABILITIES_##word, \ + _LOCK_PIN##name##USAGE, _FLIP_LOCK, \ + (pCaps)[NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_##word]); \ + (pEvoCaps)->pin[(idx)].stereo = \ + FLD_TEST_DRF(cl##_CORE_NOTIFIER_##n, _CAPABILITIES_##word, \ + _LOCK_PIN##name##USAGE, _STEREO, \ + (pCaps)[NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_##word]); \ + (pEvoCaps)->pin[(idx)].scanLock = \ + FLD_TEST_DRF(cl##_CORE_NOTIFIER_##n, _CAPABILITIES_##word, \ + _LOCK_PIN##name##USAGE, _SCAN_LOCK, \ + (pCaps)[NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_##word]); + +/* Take the max of MAX_PIXELS_t_TAP422 and MAX_PIXELS_t_TAP444 */ +#define NV_EVO2_CAP_GET_HEAD_MAX_PIXELS(cl, n, pEvoCaps, i, x, t, pCaps) \ + (pEvoCaps)->head[(i)].scalerCaps.taps[NV_EVO_SCALER_##t##TAPS].maxPixelsVTaps = \ + NV_MAX(REF_VAL(NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_CAP_HEAD##i##_##x##_MAX_PIXELS##t##TAP422, \ + (pCaps)[NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_CAP_HEAD##i##_##x]), \ + REF_VAL(NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_CAP_HEAD##i##_##x##_MAX_PIXELS##t##TAP444, \ + (pCaps)[NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_CAP_HEAD##i##_##x])) + +#define NV_EVO2_CAP_GET_HEAD(cl, n, pEvoCaps, i, x, y, z, a, pCaps) \ + (pEvoCaps)->head[(i)].usable = TRUE; \ + (pEvoCaps)->head[(i)].maxPClkKHz = \ + DRF_VAL(cl##_CORE_NOTIFIER_##n, _CAPABILITIES_CAP_HEAD##i##_##a, _PCLK_MAX, \ + (pCaps)[NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_CAP_HEAD##i##_##a]) * 10000; \ + (pEvoCaps)->head[(i)].scalerCaps.present = TRUE; \ + NV_EVO2_CAP_GET_HEAD_MAX_PIXELS(cl, n, pEvoCaps, i, x, 5, pCaps); \ + NV_EVO2_CAP_GET_HEAD_MAX_PIXELS(cl, n, pEvoCaps, i, y, 3, pCaps); \ + NV_EVO2_CAP_GET_HEAD_MAX_PIXELS(cl, n, pEvoCaps, i, z, 2, pCaps); \ + (pEvoCaps)->head[(i)].scalerCaps.taps[NV_EVO_SCALER_8TAPS].maxHDownscaleFactor = NV_U16_MAX; \ + (pEvoCaps)->head[(i)].scalerCaps.taps[NV_EVO_SCALER_5TAPS].maxVDownscaleFactor = NV_U16_MAX; \ + (pEvoCaps)->head[(i)].scalerCaps.taps[NV_EVO_SCALER_3TAPS].maxVDownscaleFactor = NV_U16_MAX; \ + (pEvoCaps)->head[(i)].scalerCaps.taps[NV_EVO_SCALER_2TAPS].maxVDownscaleFactor = NV_U16_MAX; \ + (pEvoCaps)->head[(i)].scalerCaps.taps[NV_EVO_SCALER_2TAPS].maxHDownscaleFactor = NV_U16_MAX; + +#define NV_EVO2_CAP_GET_SOR(cl, n, pEvoCaps, i, x, y, pCaps) \ + (pEvoCaps)->sor[(i)].dualTMDS = \ + FLD_TEST_DRF(cl##_CORE_NOTIFIER_##n, _CAPABILITIES_CAP_SOR##i##_##x, \ + _DUAL_TMDS, _TRUE, \ + (pCaps)[NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_CAP_SOR##i##_##x]); \ + (pEvoCaps)->sor[(i)].maxTMDSClkKHz = \ + DRF_VAL(cl##_CORE_NOTIFIER_##n, _CAPABILITIES_CAP_SOR##i##_##y, _TMDS_LVDS_CLK_MAX, \ + (pCaps)[NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_CAP_SOR##i##_##y]) * 10000; + +static void EvoParseCapabilityNotifier3(NVEvoCapabilitiesPtr pEvoCaps, + volatile const NvU32 *pCaps) +{ + // Lock pins + // These magic numbers (5, 6, _A, etc.) are token-pasted into the + // NV917D_CORE_NOTIFIER_3_* macros and can't be autogenerated by the + // preprocessor. Architecture appears to have no plans to ever fix this. + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 0, 0x0, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 1, 0x1, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 2, 0x2, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 3, 0x3, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 4, 0x4, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 5, 0x5, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 6, 0x6, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 7, 0x7, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, 8, 0x8, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, 9, 0x9, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, _A, 0xa, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, _B, 0xb, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, _C, 0xc, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, _D, 0xd, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, _E, 0xe, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, _F, 0xf, pCaps); + + // Miscellaneous capabilities + pEvoCaps->misc.supportsInterlaced = TRUE; + pEvoCaps->misc.supportsSemiPlanar = FALSE; + pEvoCaps->misc.supportsPlanar = FALSE; + pEvoCaps->misc.supportsDSI = FALSE; + + // Heads + NV_EVO2_CAP_GET_HEAD(917D, 3, pEvoCaps, 0, 53, 54, 55, 56, pCaps); + NV_EVO2_CAP_GET_HEAD(917D, 3, pEvoCaps, 1, 61, 62, 63, 64, pCaps); + NV_EVO2_CAP_GET_HEAD(917D, 3, pEvoCaps, 2, 69, 70, 71, 72, pCaps); + NV_EVO2_CAP_GET_HEAD(917D, 3, pEvoCaps, 3, 77, 78, 79, 80, pCaps); + + // SORs + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 0, 20, 21, pCaps); + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 1, 22, 23, pCaps); + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 2, 24, 25, pCaps); + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 3, 26, 27, pCaps); + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 4, 28, 29, pCaps); + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 5, 30, 31, pCaps); + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 6, 32, 33, pCaps); + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 7, 34, 35, pCaps); + + // Don't need any PIOR caps currently. +} + +/* + * VSS is unsupported on EVO2 and all LUT entries are in UNORM14_WAR_813188 + */ +static void EvoFillLUTCaps(struct NvKmsLUTCaps *pCaps, NvBool supported) +{ + pCaps->supported = supported; + pCaps->vssSupport = NVKMS_LUT_VSS_NOT_SUPPORTED; + pCaps->vssType = NVKMS_LUT_VSS_TYPE_NONE; + pCaps->vssSegments = 0; + pCaps->lutEntries = supported ? 1025 : 0; + pCaps->entryFormat = NVKMS_LUT_FORMAT_UNORM14_WAR_813188; +} + +static NvBool EvoGetCapabilities90(NVDevEvoPtr pDevEvo) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NVDispEvoPtr pDispEvo; + unsigned int i, sd; + struct NvKmsRRParams rrParams = { NVKMS_ROTATION_0, FALSE, FALSE }; + NvU8 layer; + + nvAssert(nvPeekEvoSubDevMask(pDevEvo) == SUBDEVICE_MASK_ALL); + + /* Main layer position and size updates are not supported on EVO. */ + for (layer = 0; + layer < ARRAY_LEN(pDevEvo->caps.layerCaps); + layer++) { + if (layer != NVKMS_MAIN_LAYER) { + pDevEvo->caps.layerCaps[layer].supportsWindowMode = TRUE; + pDevEvo->caps.legacyNotifierFormatSizeBytes[layer] = + NV_DISP_NOTIFICATION_2_SIZEOF; + } else { + pDevEvo->caps.layerCaps[layer].supportsWindowMode = FALSE; + pDevEvo->caps.legacyNotifierFormatSizeBytes[layer] = + NV_DISP_BASE_NOTIFIER_1_SIZEOF; + } + + /* Only the ILUT and OLUT are supported on EVO2. The TMO is EVO3+. */ + EvoFillLUTCaps(&pDevEvo->caps.layerCaps[layer].ilut, TRUE /* supported */); + EvoFillLUTCaps(&pDevEvo->caps.layerCaps[layer].tmo, FALSE /* supported */); + } + + EvoFillLUTCaps(&pDevEvo->caps.olut, TRUE /* supported */); + + pDevEvo->caps.cursorCompositionCaps = + (struct NvKmsCompositionCapabilities) { + .supportedColorKeySelects = + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE), + + .colorKeySelect = { + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE] = { + .supportedBlendModes = { + [1] = NV_EVO2_SUPPORTED_CURSOR_COMP_BLEND_MODES, + }, + }, + } + }; + + /* Base doesn't support any composition with underlying layers. */ + pDevEvo->caps.layerCaps[NVKMS_MAIN_LAYER].composition = + (struct NvKmsCompositionCapabilities) { + .supportedColorKeySelects = + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE) | + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC), + + .colorKeySelect = { + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE] = { + .supportedBlendModes = { + [1] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE), + }, + }, + + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC] = { + .supportedBlendModes = { + [0] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE), + [1] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE), + }, + }, + }, + }; + + pDevEvo->caps.layerCaps[NVKMS_OVERLAY_LAYER].composition = + (struct NvKmsCompositionCapabilities) { + .supportedColorKeySelects = + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE) | + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC) | + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST), + + .colorKeySelect = { + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE] = { + .supportedBlendModes = { + [1] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE), + }, + }, + + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC] = { + .supportedBlendModes = { + [0] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE), + [1] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT), + }, + }, + + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST] = { + .supportedBlendModes = { + [0] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT), + [1] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE), + }, + }, + }, + }; + + pDevEvo->caps.validLayerRRTransforms |= + NVBIT(NvKmsRRParamsToCapBit(&rrParams)); + + for (i = NvKmsSurfaceMemoryFormatMin; + i <= NvKmsSurfaceMemoryFormatMax; + i++) { + if (nvHwFormatFromKmsFormat90(i) != 0) { + pDevEvo->caps.layerCaps[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats |= + NVBIT64(i); + } + + if (EvoOverlayFormatFromKmsFormat91(i) != 0) { + pDevEvo->caps.layerCaps[NVKMS_OVERLAY_LAYER].supportedSurfaceMemoryFormats |= + NVBIT64(i); + } + } + + EvoSetNotifierMethods90(pDevEvo, + pChannel, + TRUE /* notify */, + TRUE /* awaken */, + 0 /* notifier */); + + /* Initialize the capability notifiers. */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) { + nvWriteEvoCoreNotifier(pDispEvo, NV917D_CORE_NOTIFIER_3_CAPABILITIES_4, + DRF_DEF(917D_CORE_NOTIFIER_3, _CAPABILITIES_4, _DONE, _FALSE)); + } + + /* Tell the hardware to fill in the notifier. */ + nvDmaSetStartEvoMethod(pChannel, NV917D_GET_CAPABILITIES, 1); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaKickoffEvo(pChannel); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + NVEvoSubDevPtr pEvoSubDev; + volatile NvU32 *pCaps; + + nvEvoWaitForCoreNotifier(pDispEvo, NV917D_CORE_NOTIFIER_3_CAPABILITIES_4, + DRF_BASE(NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE), + DRF_EXTENT(NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE), + NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE); + + pEvoSubDev = &pDevEvo->gpus[sd]; + pCaps = pDevEvo->core->notifiersDma[sd].subDeviceAddress[sd]; + + nvkms_memset(&pEvoSubDev->capabilities, 0, + sizeof(pEvoSubDev->capabilities)); + EvoParseCapabilityNotifier3(&pEvoSubDev->capabilities, pCaps); + } + + /* Reset notifier state so it isn't on for future updates */ + EvoSetNotifierMethods90(pDevEvo, + pChannel, + FALSE /* notify */, + FALSE /* awaken */, + 0 /* notifier */); + nvDmaKickoffEvo(pChannel); + + return TRUE; +} + +static void EvoSetViewportPointIn90(NVDevEvoPtr pDevEvo, const int head, + NvU16 x, NvU16 y, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + // Set the input viewport point + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_VIEWPORT_POINT_IN(head), 1); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(917D, _HEAD_SET_VIEWPORT_POINT_IN, _X, x) | + DRF_NUM(917D, _HEAD_SET_VIEWPORT_POINT_IN, _Y, y)); +} + +static void EvoSetOutputScaler90(const NVDispEvoRec *pDispEvo, const NvU32 head, + const NvU32 imageSharpeningValue, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVHwModeViewPortEvo *pViewPort = &pHeadState->timings.viewPort; + NvU32 setControlOutputScaler = 0; + NvU32 vTapsHw = 0, hTapsHw = 0; + + /* These methods should only apply to a single pDpyEvo */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + switch (pViewPort->vTaps) { + case NV_EVO_SCALER_5TAPS: + vTapsHw = NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5; + break; + case NV_EVO_SCALER_3TAPS: + // XXX TAPS_3_ADAPTIVE instead? --> I think only allowed with interlaced + vTapsHw = NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3; + break; + case NV_EVO_SCALER_2TAPS: + vTapsHw = NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2; + break; + case NV_EVO_SCALER_8TAPS: + nvAssert(!"Unknown pHeadState->vTaps"); + // fall through + case NV_EVO_SCALER_1TAP: + vTapsHw = NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1; + break; + } + switch (pViewPort->hTaps) { + case NV_EVO_SCALER_8TAPS: + hTapsHw = NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8; + break; + case NV_EVO_SCALER_2TAPS: + hTapsHw = NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2; + break; + case NV_EVO_SCALER_5TAPS: + case NV_EVO_SCALER_3TAPS: + nvAssert(!"Unknown pHeadState->hTaps"); + // fall through + case NV_EVO_SCALER_1TAP: + hTapsHw = NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1; + break; + } + setControlOutputScaler = + DRF_NUM(917D, _HEAD_SET_CONTROL_OUTPUT_SCALER, _HORIZONTAL_TAPS, + hTapsHw) | + DRF_NUM(917D, _HEAD_SET_CONTROL_OUTPUT_SCALER, _VERTICAL_TAPS, + vTapsHw); + + if (nvIsImageSharpeningAvailable(&pHeadState->timings.viewPort)) { + setControlOutputScaler = + FLD_SET_DRF_NUM(917D, _HEAD_SET_CONTROL_OUTPUT_SCALER, + _HRESPONSE_BIAS, imageSharpeningValue, + setControlOutputScaler); + + setControlOutputScaler = + FLD_SET_DRF_NUM(917D, _HEAD_SET_CONTROL_OUTPUT_SCALER, + _VRESPONSE_BIAS, imageSharpeningValue, + setControlOutputScaler); + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER(head), 1); + nvDmaSetEvoMethodData(pChannel, setControlOutputScaler); +} + +static void EvoSetViewportInOut90(NVDevEvoPtr pDevEvo, const int head, + const NVHwModeViewPortEvo *pViewPortMin, + const NVHwModeViewPortEvo *pViewPort, + const NVHwModeViewPortEvo *pViewPortMax, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* The input viewport shouldn't vary. */ + nvAssert(pViewPortMin->in.width == pViewPort->in.width); + nvAssert(pViewPortMax->in.width == pViewPort->in.width); + nvAssert(pViewPortMin->in.height == pViewPort->in.height); + nvAssert(pViewPortMax->in.height == pViewPort->in.height); + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_VIEWPORT_SIZE_IN(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_IN, _WIDTH, pViewPort->in.width) | + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_IN, _HEIGHT, pViewPort->in.height)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_VIEWPORT_POINT_OUT, _ADJUST_X, pViewPort->out.xAdjust) | + DRF_NUM(917D, _HEAD_SET_VIEWPORT_POINT_OUT, _ADJUST_Y, pViewPort->out.yAdjust)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_VIEWPORT_SIZE_OUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_OUT, _WIDTH, pViewPort->out.width) | + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_OUT, _HEIGHT, pViewPort->out.height)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_OUT_MIN, _WIDTH, pViewPortMin->out.width) | + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_OUT_MIN, _HEIGHT, pViewPortMin->out.height)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_OUT_MAX, _WIDTH, pViewPortMax->out.width) | + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_OUT_MAX, _HEIGHT, pViewPortMax->out.height)); + +} + + +/*! + * Compute the 917D_HEAD_SET_CONTROL_CURSOR method value. + * + * This function also validates that the given NVSurfaceEvoRec can be + * used as a cursor image. + * + * Pre-nvdisplay core channel classes have the same layout of the + * *7D_HEAD_SET_CONTROL_CURSOR method value. + + * + * \param[in] pDevEvo The device on which the cursor will be programmed. + * \param[in] pSurfaceEvo The surface to be used as the cursor image. + * \param[out] pValue The 917D_HEAD_SET_CONTROL_CURSOR method value. + + * \return If TRUE, the surface can be used as a cursor image, and + * pValue contains the method value. If FALSE, the surface + * cannot be used as a cursor image. + */ +NvBool nvEvoGetHeadSetControlCursorValue90(const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo, + NvU32 *pValue) +{ + NvU32 plane, numPlanes; + NvU64 minRequiredSize = 0; + NvU32 value = 0; + + if (pSurfaceEvo == NULL) { + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _ENABLE, _DISABLE); + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _FORMAT, _A8R8G8B8); + goto done; + } else { + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _ENABLE, _ENABLE); + } + + /* The cursor must always be pitch. */ + + if (pSurfaceEvo->layout != NvKmsSurfaceMemoryLayoutPitch) { + return FALSE; + } + + /* + * The only supported cursor image memory format is A8R8G8B8. + */ + if (pSurfaceEvo->format == NvKmsSurfaceMemoryFormatA8R8G8B8) { + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _FORMAT, _A8R8G8B8); + } else { + return FALSE; + } + + numPlanes = nvKmsGetSurfaceMemoryFormatInfo(pSurfaceEvo->format)->numPlanes; + + /* + * The cursor only supports a few image sizes. + * + * Compute minRequiredSize as widthInPixels x heightInPixels x 4 bytes per + * pixel, except for 32x32: we require a minimum pitch of 256, so we use + * that instead of widthInPixels x 4. + */ + if ((pSurfaceEvo->widthInPixels == 32) && + (pSurfaceEvo->heightInPixels == 32)) { + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W32_H32); + minRequiredSize = 256 * 32; + } else if ((pSurfaceEvo->widthInPixels == 64) && + (pSurfaceEvo->heightInPixels == 64)) { + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W64_H64); + minRequiredSize = 64 * 64 * 4; + } else if ((pDevEvo->cursorHal->caps.maxSize >= 128) && + (pSurfaceEvo->widthInPixels == 128) && + (pSurfaceEvo->heightInPixels == 128)) { + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W128_H128); + minRequiredSize = 128 * 128 * 4; + } else if ((pDevEvo->cursorHal->caps.maxSize >= 256) && + (pSurfaceEvo->widthInPixels == 256) && + (pSurfaceEvo->heightInPixels == 256)) { + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W256_H256); + minRequiredSize = 256 * 256 * 4; + } else { + return FALSE; + } + + /* The surface size cannot be smaller than the required minimum. */ + + for (plane = 0; plane < numPlanes; plane++) { + if (pSurfaceEvo->planes[plane].rmObjectSizeInBytes < minRequiredSize) { + return FALSE; + } + } + + /* + * Hard code the cursor hotspot. + */ + value |= DRF_NUM(927D, _HEAD_SET_CONTROL_CURSOR, _HOT_SPOT_Y, 0); + value |= DRF_NUM(927D, _HEAD_SET_CONTROL_CURSOR, _HOT_SPOT_X, 0); + +done: + + if (pValue != NULL) { + *pValue = value; + } + + return TRUE; +} + +static void EvoSetCursorImage(NVDevEvoPtr pDevEvo, const int head, + const NVSurfaceEvoRec *pSurfaceEvo, + NVEvoUpdateState *updateState, + const struct NvKmsCompositionParams *pCursorCompParams) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + const NvU32 ctxdma = pSurfaceEvo ? pSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle : 0; + const NvU64 offset = pSurfaceEvo ? pSurfaceEvo->planes[0].offset : 0; + NvU32 headSetControlCursorValue = 0; + NvBool ret; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + nvAssert(pCursorCompParams->colorKeySelect == + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE); + nvAssert(NVBIT(pCursorCompParams->blendingMode[1]) & + NV_EVO2_SUPPORTED_CURSOR_COMP_BLEND_MODES); + nvAssert(!pSurfaceEvo || ctxdma); + + ret = nvEvoGetHeadSetControlCursorValue90(pDevEvo, pSurfaceEvo, + &headSetControlCursorValue); + /* + * The caller should have already validated the surface, so there + * shouldn't be a failure. + */ + if (!ret) { + nvAssert(!"Could not construct HEAD_SET_CONTROL_CURSOR value"); + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_PRESENT_CONTROL_CURSOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(917D, _HEAD_SET_PRESENT_CONTROL_CURSOR, _MODE, _MONO)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_OFFSETS_CURSOR(head, 0), 4); + // The cursor has its own context DMA. + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_OFFSETS_CURSOR, _ORIGIN, + nvCtxDmaOffsetFromBytes(offset))); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_OFFSETS_CURSOR, _ORIGIN, + nvCtxDmaOffsetFromBytes(offset))); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_CONTEXT_DMAS_CURSOR, _HANDLE, ctxdma)); + // Always set the right cursor context DMA. + // HW will just ignore this if it is not in stereo cursor mode. + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_CONTEXT_DMAS_CURSOR, _HANDLE, ctxdma)); + + switch (pCursorCompParams->blendingMode[1]) { + case NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA: + headSetControlCursorValue |= + DRF_DEF(917D, _HEAD_SET_CONTROL_CURSOR, _COMPOSITION, _ALPHA_BLEND); + break; + case NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA: + headSetControlCursorValue |= + DRF_DEF(917D, _HEAD_SET_CONTROL_CURSOR, _COMPOSITION, _PREMULT_ALPHA_BLEND); + break; + default: + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "%s: composition mode %d not supported for cursor", + __func__, pCursorCompParams->blendingMode[1]); + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTROL_CURSOR(head), 1); + nvDmaSetEvoMethodData(pChannel, headSetControlCursorValue); +} + +static void EvoSetCursorImage91(NVDevEvoPtr pDevEvo, const int head, + const NVSurfaceEvoRec *pSurfaceEvo, + NVEvoUpdateState *updateState, + const struct NvKmsCompositionParams *pCursorCompParams) +{ + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (!((nvPeekEvoSubDevMask(pDevEvo) & (1 << sd)))) { + continue; + } + + /* + * Set up the cursor surface: a cursor surface is allowed only if + * there's a non-NULL ISO ctxdma. + */ + if (pDevEvo->pSubDevices[sd]->pCoreChannelSurface[head] == NULL && + pSurfaceEvo != NULL) { + continue; + } + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + EvoSetCursorImage(pDevEvo, + head, + pSurfaceEvo, + updateState, + pCursorCompParams); + nvPopEvoSubDevMask(pDevEvo); + } +} + +static NvBool EvoValidateCursorSurface90(const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo) +{ + return nvEvoGetHeadSetControlCursorValue90(pDevEvo, pSurfaceEvo, NULL); +} + +/* + * The 'sourceFetchRect' parameter is ignored by this function because there are + * no format-dependent restrictions for the source fetch rectangle on EVO. + */ +static NvBool EvoValidateWindowFormat90( + const enum NvKmsSurfaceMemoryFormat format, + const struct NvKmsRect *sourceFetchRect, + NvU32 *hwFormatOut) +{ + const NvU32 hwFormat = nvHwFormatFromKmsFormat90(format); + + if (hwFormat == 0) { + return FALSE; + } + + if (hwFormatOut != NULL) { + *hwFormatOut = hwFormat; + } + + return TRUE; +} + +static void EvoInitCompNotifier3(const NVDispEvoRec *pDispEvo, int idx) +{ + nvWriteEvoCoreNotifier(pDispEvo, NV917D_CORE_NOTIFIER_3_COMPLETION_0 + idx, + DRF_DEF(917D_CORE_NOTIFIER_3, _COMPLETION_0, _DONE, _FALSE)); +} + +static NvBool EvoIsCompNotifierComplete3(NVDispEvoPtr pDispEvo, int idx) { + return nvEvoIsCoreNotifierComplete(pDispEvo, + NV917D_CORE_NOTIFIER_3_COMPLETION_0 + idx, + DRF_BASE(NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE), + DRF_EXTENT(NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE), + NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE); +} + +static void EvoWaitForCompNotifier3(const NVDispEvoRec *pDispEvo, int idx) +{ + nvEvoWaitForCoreNotifier(pDispEvo, NV917D_CORE_NOTIFIER_3_COMPLETION_0 + idx, + DRF_BASE(NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE), + DRF_EXTENT(NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE), + NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE); +} + +static void EvoSetDither91(NVDispEvoPtr pDispEvo, const int head, + const NvBool enabled, const NvU32 type, + const NvU32 algo, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 ditherControl; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + if (enabled) { + ditherControl = DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _ENABLE, _ENABLE); + + switch (type) { + case NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_6_BITS: + ditherControl |= + DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _BITS, _DITHER_TO_6_BITS); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_8_BITS: + ditherControl |= + DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _BITS, _DITHER_TO_8_BITS); + break; + default: + nvAssert(!"Unknown ditherType"); + // Fall through + case NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF: + ditherControl = NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE; + break; + } + + } else { + ditherControl = DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _ENABLE, _DISABLE); + } + + switch (algo) { + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_ERR_ACC: + ditherControl |= + DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _MODE, _STATIC_ERR_ACC); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_2X2: + ditherControl |= + DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _MODE, _DYNAMIC_2X2); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_2X2: + ditherControl |= + DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _MODE, _STATIC_2X2); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_TEMPORAL: + ditherControl |= + DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _MODE, _TEMPORAL); + break; + default: + nvAssert(!"Unknown DitherAlgo"); + // Fall through + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN: + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_ERR_ACC: + ditherControl |= + DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _MODE, _DYNAMIC_ERR_ACC); + break; + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_DITHER_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, ditherControl); +} + +static void EvoSetStallLock94(NVDispEvoPtr pDispEvo, const int head, + NvBool enable, NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + if (enable) { + nvDmaSetStartEvoMethod(pChannel, NV947D_HEAD_SET_STALL_LOCK(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(947D, _HEAD_SET_STALL_LOCK, _ENABLE, _TRUE) | + DRF_DEF(947D, _HEAD_SET_STALL_LOCK, _MODE, _ONE_SHOT) | + DRF_DEF(947D, _HEAD_SET_STALL_LOCK, _LOCK_PIN, _UNSPECIFIED) | + DRF_DEF(947D, _HEAD_SET_STALL_LOCK, _UNSTALL_MODE, _LINE_LOCK)); + } else { + nvDmaSetStartEvoMethod(pChannel, NV947D_HEAD_SET_STALL_LOCK(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(947D, _HEAD_SET_STALL_LOCK, _ENABLE, _FALSE)); + } +} + +static NvBool ForceIdleBaseChannel( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd) +{ + NV5070_CTRL_CMD_STOP_BASE_PARAMS stopParams = { }; + NvNotification *pNotifyData = pChannel->notifiersDma[sd].subDeviceAddress[sd]; + NvU64 startTime = 0; + const NvU32 timeout = 2000000; // 2 seconds + NvU32 ret; + + nvAssert((pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL) != 0); + + pNotifyData->status = NV5070_NOTIFICATION_STATUS_IN_PROGRESS; + + stopParams.base.subdeviceIndex = sd; + stopParams.channelInstance = pChannel->instance; + stopParams.notifyMode = NV5070_CTRL_CMD_STOP_BASE_NOTIFY_MODE_WRITE; + stopParams.hNotifierCtxDma = pChannel->notifiersDma[sd].surfaceDesc.ctxDmaHandle; + stopParams.offset = 0; + stopParams.hEvent = 0; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_STOP_BASE, + &stopParams, sizeof(stopParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"STOP_BASE failed"); + return FALSE; + } + + do { + if (pNotifyData->status == NV5070_NOTIFICATION_STATUS_DONE_SUCCESS) { + break; + } + + if (nvExceedsTimeoutUSec(pDevEvo, &startTime, timeout)) { + nvAssert(!"STOP_BASE timed out"); + return FALSE; + } + + nvkms_yield(); + + } while (TRUE); + + return TRUE; +} + +static NvBool ForceIdleOverlayChannel( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd) +{ + NV5070_CTRL_CMD_STOP_OVERLAY_PARAMS stopParams = { }; + NvNotification *pNotifyData = pChannel->notifiersDma[sd].subDeviceAddress[sd]; + NvU64 startTime = 0; + const NvU32 timeout = 2000000; // 2 seconds + NvU32 ret; + + nvAssert((pChannel->channelMask & NV_EVO_CHANNEL_MASK_OVERLAY_ALL) != 0); + + pNotifyData->status = NV5070_NOTIFICATION_STATUS_IN_PROGRESS; + + stopParams.base.subdeviceIndex = sd; + stopParams.channelInstance = pChannel->instance; + stopParams.notifyMode = NV5070_CTRL_CMD_STOP_OVERLAY_NOTIFY_MODE_WRITE; + stopParams.hNotifierCtxDma = pChannel->notifiersDma[sd].surfaceDesc.ctxDmaHandle; + stopParams.offset = 0; + stopParams.hEvent = 0; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_STOP_OVERLAY, + &stopParams, sizeof(stopParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"STOP_OVERLAY failed"); + return FALSE; + } + + do { + if (pNotifyData->status == NV5070_NOTIFICATION_STATUS_DONE_SUCCESS) { + break; + } + + if (nvExceedsTimeoutUSec(pDevEvo, &startTime, timeout)) { + nvAssert(!"STOP_OVERLAY timed out"); + return FALSE; + } + + nvkms_yield(); + + } while (TRUE); + + return TRUE; +} + +static NvBool EvoForceIdleSatelliteChannel90( + NVDevEvoPtr pDevEvo, + const NVEvoIdleChannelState *idleChannelState) +{ + NvU32 head, sd; + NvBool ret = TRUE; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + /* + * Forcing a channel to be idle is currently only implemented for + * base. + */ + if ((idleChannelState->subdev[sd].channelMask & + ~(NV_EVO_CHANNEL_MASK_BASE_ALL | + NV_EVO_CHANNEL_MASK_OVERLAY_ALL)) != 0) { + + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Forcing channel idle only implemented for base and overlay"); + return FALSE; + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + const NVEvoChannelMask thisBaseMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _BASE, head, _ENABLE); + const NVEvoChannelMask thisOverlayMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _OVERLAY, head, _ENABLE); + + if (idleChannelState->subdev[sd].channelMask & + thisBaseMask) { + + NVEvoChannelPtr pBaseChannel = pDevEvo->base[head]; + + if (!ForceIdleBaseChannel(pDevEvo, pBaseChannel, sd)) { + ret = FALSE; + } + } + + if (idleChannelState->subdev[sd].channelMask & + thisOverlayMask) { + + NVEvoChannelPtr pOverlayChannel = pDevEvo->overlay[head]; + + if (!ForceIdleOverlayChannel(pDevEvo, pOverlayChannel, sd)) { + ret = FALSE; + } + } + } + } + + return ret; +} + +static NvBool EvoAllocRmCtrlObject90(NVDevEvoPtr pDevEvo) +{ + /* Nothing to do for pre-nvdisplay */ + return TRUE; +} + +static void EvoFreeRmCtrlObject90(NVDevEvoPtr pDevEvo) +{ + /* Nothing to do for pre-nvdisplay */ +} + +static void EvoSetImmPointOut91(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd, + NVEvoUpdateState *updateState, + NvU16 x, NvU16 y) +{ + GK104DispOverlayImmControlPio *pOverlayImm = + pChannel->imm.u.pio->control[sd]; + + /* The only immediate channel we have is overlay. */ + nvAssert((pChannel->channelMask & NV_EVO_CHANNEL_MASK_OVERLAY_ALL) != 0); + nvAssert(pChannel->imm.type == NV_EVO_IMM_CHANNEL_PIO); + nvAssert(pOverlayImm != NULL); + + /* Left eye */ + pOverlayImm->SetPointsOut[0] = + DRF_NUM(917B, _SET_POINTS_OUT, _X, x) | + DRF_NUM(917B, _SET_POINTS_OUT, _Y, y); + + pOverlayImm->Update = + DRF_DEF(917B, _UPDATE, _INTERLOCK_WITH_CORE, _DISABLE); +} + +static void EvoStartHeadCRC32Capture90(NVDevEvoPtr pDevEvo, + NVEvoDmaPtr pDma, + NVConnectorEvoPtr pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + NvU32 head, + NvU32 sd, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 dmaCtx = pDma->surfaceDesc.ctxDmaHandle; + NvU32 orOutput = 0; + + /* These method should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + switch (pConnectorEvo->or.type) { + case NV0073_CTRL_SPECIFIC_OR_TYPE_DAC: + orOutput = + NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC(orIndex); + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR: + if (protocol == NVKMS_PROTOCOL_SOR_DP_A || + protocol == NVKMS_PROTOCOL_SOR_DP_B) { + orOutput = + NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF(head); + } else { + orOutput = + NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR(orIndex); + } + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR: + orOutput = + NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR(orIndex); + break; + default: + nvAssert(!"Invalid pConnectorEvo->or.type"); + break; + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTEXT_DMA_CRC(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_CONTEXT_DMA_CRC, _HANDLE, dmaCtx)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CRC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_CRC_CONTROL, _PRIMARY_OUTPUT, orOutput) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _SECONDARY_OUTPUT, _NONE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _CONTROLLING_CHANNEL, _CORE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _EXPECT_BUFFER_COLLAPSE, _FALSE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _TIMESTAMP_MODE, _FALSE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _FLIPLOCK_MODE, _FALSE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _CRC_DURING_SNOOZE, _DISABLE)); + + /* Reset the CRC notifier */ + nvEvoResetCRC32Notifier(pDma->subDeviceAddress[sd], + NV917D_NOTIFIER_CRC_1_STATUS_0, + DRF_BASE(NV917D_NOTIFIER_CRC_1_STATUS_0_DONE), + NV917D_NOTIFIER_CRC_1_STATUS_0_DONE_FALSE); +} + +static void EvoStopHeadCRC32Capture90(NVDevEvoPtr pDevEvo, + NvU32 head, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These method should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTEXT_DMA_CRC(head), 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CRC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _PRIMARY_OUTPUT, _NONE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _SECONDARY_OUTPUT, _NONE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _CONTROLLING_CHANNEL, _CORE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _EXPECT_BUFFER_COLLAPSE, _FALSE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _TIMESTAMP_MODE, _FALSE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _FLIPLOCK_MODE, _FALSE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _CRC_DURING_SNOOZE, _DISABLE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _WIDE_PIPE_CRC, _DISABLE)); +} + +/*! + * Queries the current head's CRC Notifier and returns values if successful + * + * First waits for hardware to finish writing to the CRC32Notifier, + * and performs a read of the Compositor and SF/OR CRCs in numCRC32 frames + * Crc fields in input array crc32 should be calloc'd to 0s. + * + * \param[in] pDevEvo NVKMS device pointer + * \param[in] pDma Pointer to DMA-mapped memory + * \param[in] sd Subdevice index + * \param[in] entry_count Number of independent frames to read CRCs from + * \param[out] crc32 Contains pointers to CRC output arrays + * \param[out] numCRC32 Number of CRC frames successfully read from DMA + * + * \return Returns TRUE if was able to successfully read CRCs from DMA, + * otherwise FALSE + */ +static NvBool EvoQueryHeadCRC32_90(NVDevEvoPtr pDevEvo, + NVEvoDmaPtr pDma, + NvU32 sd, + NvU32 entry_count, + CRC32NotifierCrcOut *crc32, + NvU32 *numCRC32) +{ + volatile NvU32 *pCRC32Notifier = pDma->subDeviceAddress[sd]; + const NvU32 entry_stride = + NV917D_NOTIFIER_CRC_1_CRC_ENTRY1_8 - NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_4; + // Define how many/which variables to read from each CRCNotifierEntry struct + const CRC32NotifierEntryRec field_info[NV_EVO2_NUM_CRC_FIELDS] = { + { + .field_offset = NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_3, + .field_base_bit = + DRF_BASE(NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_3_COMPOSITOR_CRC), + .field_extent_bit = + DRF_EXTENT(NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_3_COMPOSITOR_CRC), + .field_frame_values = crc32->compositorCrc32 + }, + { + .field_offset = NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_4, + .field_base_bit = + DRF_BASE(NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_4_PRIMARY_OUTPUT_CRC), + .field_extent_bit = + DRF_EXTENT(NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_4_PRIMARY_OUTPUT_CRC), + .field_frame_values = crc32->outputCrc32 + } + }; + const CRC32NotifierEntryFlags flag_info[NV_EVO2_NUM_CRC_FLAGS] = { + { + .flag_base_bit = + DRF_BASE(NV917D_NOTIFIER_CRC_1_STATUS_0_COUNT), + .flag_extent_bit = + DRF_EXTENT(NV917D_NOTIFIER_CRC_1_STATUS_0_COUNT), + .flag_type = NVEvoCrc32NotifierFlagCount + }, + { + .flag_base_bit = + DRF_BASE(NV917D_NOTIFIER_CRC_1_STATUS_0_COMPOSITOR_OVERFLOW), + .flag_extent_bit = + DRF_EXTENT(NV917D_NOTIFIER_CRC_1_STATUS_0_COMPOSITOR_OVERFLOW), + .flag_type = NVEvoCrc32NotifierFlagCrcOverflow + }, + { + .flag_base_bit = + DRF_BASE(NV917D_NOTIFIER_CRC_1_STATUS_0_PRIMARY_OUTPUT_OVERFLOW), + .flag_extent_bit = + DRF_EXTENT(NV917D_NOTIFIER_CRC_1_STATUS_0_PRIMARY_OUTPUT_OVERFLOW), + .flag_type = NVEvoCrc32NotifierFlagCrcOverflow + } + }; + + if (!nvEvoWaitForCRC32Notifier(pDevEvo, + pCRC32Notifier, + NV917D_NOTIFIER_CRC_1_STATUS_0, + DRF_BASE(NV917D_NOTIFIER_CRC_1_STATUS_0_DONE), + DRF_EXTENT(NV917D_NOTIFIER_CRC_1_STATUS_0_DONE), + NV917D_NOTIFIER_CRC_1_STATUS_0_DONE_TRUE)) { + return FALSE; + } + + *numCRC32 = nvEvoReadCRC32Notifier(pCRC32Notifier, + entry_stride, + entry_count, + NV917D_NOTIFIER_CRC_1_STATUS_0, /* Status offset */ + NV_EVO2_NUM_CRC_FIELDS, + NV_EVO2_NUM_CRC_FLAGS, + field_info, + flag_info); + + + nvEvoResetCRC32Notifier(pCRC32Notifier, + NV917D_NOTIFIER_CRC_1_STATUS_0, + DRF_BASE(NV917D_NOTIFIER_CRC_1_STATUS_0_DONE), + NV917D_NOTIFIER_CRC_1_STATUS_0_DONE_FALSE); + + return TRUE; +} + +static void EvoGetScanLine90(const NVDispEvoRec *pDispEvo, + const NvU32 head, + NvU16 *pScanLine, + NvBool *pInBlankingPeriod) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + const void *pDma = pDevEvo->base[head]->pb.control[sd]; + NvU32 scanLine = nvDmaLoadPioMethod(pDma, NV917C_GET_SCANLINE); + + /* + * This method immediately returns the value of the scanline currently being + * read by the DMI. This method is a channel method so it operates + * completely asynchronously from the processing of methods in the + * pushbuffer. A negative value indicate that the DMI is in vertical + * blanking. Note that this is a PIO method that executes immediately. The + * coding of this value is as follows: + * If Line[15] == 0 (positive value) + * then Line[14:0] is the post-aa resolved line currently being read by + * the DMI. + * If Line[15] == 1 (negative value) + * then Line[14:0] is the number of microseconds remaining in the vertical + * blanking interval. + * Examples: + * Line = 0x0192 - DMI is reading line 402 of the current buffer. + * Line = 0x8023 - DMI is 35 uS from the end of vertical blanking. + */ + + if ((scanLine & NVBIT(15)) == 0) { + *pInBlankingPeriod = FALSE; + *pScanLine = scanLine & DRF_MASK(14:0); + } else { + *pInBlankingPeriod = TRUE; + } +} + +static NvU32 EvoGetActiveViewportOffset94(NVDispEvoRec *pDispEvo, NvU32 head) +{ + NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE_PARAMS params = { }; + NvU32 ret; + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + params.base.subdeviceIndex = pDispEvo->displayOwner; + params.head = head; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to query active viewport offset"); + } + + return params.activeViewportBase; +} + +static void +EvoClearSurfaceUsage91(NVDevEvoPtr pDevEvo, NVSurfaceEvoPtr pSurfaceEvo) +{ + NvU32 sd; + NvBool kickOff = FALSE; + NVEvoUpdateState updateState = { }; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + const struct NvKmsCscMatrix zeroCscMatrix = { }; + const NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[sd].headState[head]; + + /* + * In background, if the given surface is used for the core surface + * programming to satisfy the EVO hardware constraints then clear + * that usage. Reuse the client specified base surface for the core + * channel programming. + */ + if (pSurfaceEvo != + pDevEvo->pSubDevices[sd]->pCoreChannelSurface[head]) { + continue; + } + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + EvoPushSetCoreSurfaceMethodsForOneSd(pDevEvo, sd, head, + pSdHeadState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT], + &zeroCscMatrix, &updateState); + nvPopEvoSubDevMask(pDevEvo); + kickOff = TRUE; + } + } + + if (kickOff) { + EvoUpdate91(pDevEvo, &updateState, TRUE /* releaseElv */); + } +} + +static NvBool EvoComputeWindowScalingTaps91(const NVDevEvoRec *pDevEvo, + const NVEvoChannel *pChannel, + NVFlipChannelEvoHwState *pHwState) +{ + /* Window scaling isn't supported on EVO. */ + if ((pHwState->sizeIn.width != pHwState->sizeOut.width) || + (pHwState->sizeIn.height != pHwState->sizeOut.height)) + { + return FALSE; + } + + pHwState->hTaps = NV_EVO_SCALER_1TAP; + pHwState->vTaps = NV_EVO_SCALER_1TAP; + + return TRUE; +} + +static NvU32 GetAccelerators( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd) +{ + NV5070_CTRL_GET_ACCL_PARAMS params = { }; + NvU32 ret; + + params.base.subdeviceIndex = sd; + params.channelClass = pChannel->hwclass; + nvAssert(pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL); + params.channelInstance = pChannel->instance; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_GET_ACCL, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to retrieve accelerators"); + return 0; + } + + return params.accelerators; +} + +static NvBool SetAccelerators( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd, + NvU32 accelerators, + NvU32 accelMask) +{ + NV5070_CTRL_SET_ACCL_PARAMS params = { }; + NvU32 ret; + + params.base.subdeviceIndex = sd; + params.channelClass = pChannel->hwclass; + nvAssert(pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL); + params.channelInstance = pChannel->instance; + params.accelerators = accelerators; + params.accelMask = accelMask; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_SET_ACCL, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to set accelerators"); + return FALSE; + } + + return TRUE; +} + +static void EvoAccelerateChannel91(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NvU32 sd, + const NvBool trashPendingMethods, + const NvBool unblockMethodsInExecutation, + NvU32 *pOldAccelerators) +{ + NvU32 accelMask = 0x0; + + if (trashPendingMethods) { + accelMask |= NV5070_CTRL_ACCL_TRASH_ONLY; + } + + /* Start with a conservative set of accelerators; may need to add more + * later. */ + if (unblockMethodsInExecutation) { + accelMask |= NV5070_CTRL_ACCL_IGNORE_PI | + NV5070_CTRL_ACCL_SKIP_SEMA | + NV5070_CTRL_ACCL_IGNORE_FLIPLOCK; + } + + if (accelMask == 0x0) { + return; + } + + *pOldAccelerators = GetAccelerators(pDevEvo, pChannel, sd); + + /* Accelerate window channel. */ + if (!SetAccelerators(pDevEvo, pChannel, sd, accelMask, accelMask)) { + nvAssert(!"Failed to set accelerators"); + } +} + +static void EvoResetChannelAccelerators91(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NvU32 sd, + const NvBool trashPendingMethods, + const NvBool unblockMethodsInExecutation, + NvU32 oldAccelerators) +{ + NvU32 accelMask = 0x0; + + if (trashPendingMethods) { + accelMask |= NV5070_CTRL_ACCL_TRASH_ONLY; + } + + /* Start with a conservative set of accelerators; may need to add more + * later. */ + if (unblockMethodsInExecutation) { + accelMask |= NV5070_CTRL_ACCL_IGNORE_PI | + NV5070_CTRL_ACCL_SKIP_SEMA | + NV5070_CTRL_ACCL_IGNORE_FLIPLOCK; + } + + if (accelMask == 0x0) { + return; + } + + /* Accelerate window channel. */ + if (!SetAccelerators(pDevEvo, pChannel, sd, oldAccelerators, accelMask)) { + nvAssert(!"Failed to set accelerators"); + } +} + +static NvU32 EvoAllocSurfaceDescriptor90( + NVDevEvoPtr pDevEvo, NVSurfaceDescriptor *pSurfaceDesc, + NvU32 memoryHandle, NvU32 localCtxDmaFlags, + NvU64 limit, + NvBool mapToDisplayRm) +{ + return nvCtxDmaAlloc(pDevEvo, &pSurfaceDesc->ctxDmaHandle, + memoryHandle, + localCtxDmaFlags, limit); +} + +static void EvoFreeSurfaceDescriptor90( + NVDevEvoPtr pDevEvo, + NvU32 deviceHandle, + NVSurfaceDescriptor *pSurfaceDesc) +{ + nvCtxDmaFree(pDevEvo, deviceHandle, &pSurfaceDesc->ctxDmaHandle); +} + +static NvU32 EvoBindSurfaceDescriptor90( + NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel, NVSurfaceDescriptor *pSurfaceDesc) +{ + return nvCtxDmaBind(pDevEvo, pChannel, pSurfaceDesc->ctxDmaHandle); +} + +NVEvoHAL nvEvo97 = { + EvoSetRasterParams91, /* SetRasterParams */ + EvoSetProcAmp97, /* SetProcAmp */ + EvoSetHeadControl90, /* SetHeadControl */ + EvoSetHeadRefClk90, /* SetHeadRefClk */ + EvoHeadSetControlOR90, /* HeadSetControlOR */ + EvoORSetControl90, /* ORSetControl */ + EvoHeadSetDisplayId90, /* HeadSetDisplayId */ + EvoSetUsageBounds90, /* SetUsageBounds */ + EvoUpdate91, /* Update */ + nvEvo1IsModePossible, /* IsModePossible */ + nvEvo1PrePostIMP, /* PrePostIMP */ + EvoSetNotifier90, /* SetNotifier */ + EvoGetCapabilities90, /* GetCapabilities */ + EvoFlip90, /* Flip */ + EvoFlipTransitionWAR90, /* FlipTransitionWAR */ + EvoFillLUTSurface90, /* FillLUTSurface */ + EvoSetOutputLut90, /* SetOutputLut */ + EvoSetOutputScaler90, /* SetOutputScaler */ + EvoSetViewportPointIn90, /* SetViewportPointIn */ + EvoSetViewportInOut90, /* SetViewportInOut */ + EvoSetCursorImage91, /* SetCursorImage */ + EvoValidateCursorSurface90, /* ValidateCursorSurface */ + EvoValidateWindowFormat90, /* ValidateWindowFormat */ + EvoInitCompNotifier3, /* InitCompNotifier */ + EvoIsCompNotifierComplete3, /* IsCompNotifierComplete */ + EvoWaitForCompNotifier3, /* WaitForCompNotifier */ + EvoSetDither91, /* SetDither */ + EvoSetStallLock94, /* SetStallLock */ + NULL, /* SetDisplayRate */ + EvoInitChannel90, /* InitChannel */ + NULL, /* InitDefaultLut */ + EvoInitWindowMapping90, /* InitWindowMapping */ + nvEvo1IsChannelIdle, /* IsChannelIdle */ + nvEvo1IsChannelMethodPending, /* IsChannelMethodPending */ + EvoForceIdleSatelliteChannel90, /* ForceIdleSatelliteChannel */ + EvoForceIdleSatelliteChannel90, /* ForceIdleSatelliteChannelIgnoreLock */ + EvoAccelerateChannel91, /* AccelerateChannel */ + EvoResetChannelAccelerators91, /* ResetChannelAccelerators */ + EvoAllocRmCtrlObject90, /* AllocRmCtrlObject */ + EvoFreeRmCtrlObject90, /* FreeRmCtrlObject */ + EvoSetImmPointOut91, /* SetImmPointOut */ + EvoStartHeadCRC32Capture90, /* StartCRC32Capture */ + EvoStopHeadCRC32Capture90, /* StopCRC32Capture */ + EvoQueryHeadCRC32_90, /* QueryCRC32 */ + EvoGetScanLine90, /* GetScanLine */ + NULL, /* ConfigureVblankSyncObject */ + nvEvo1SetDscParams, /* SetDscParams */ + NULL, /* EnableMidFrameAndDWCFWatermark */ + EvoGetActiveViewportOffset94, /* GetActiveViewportOffset */ + EvoClearSurfaceUsage91, /* ClearSurfaceUsage */ + EvoComputeWindowScalingTaps91, /* ComputeWindowScalingTaps */ + NULL, /* GetWindowScalingCaps */ + NULL, /* SetMergeMode */ + nvEvo1SendHdmiInfoFrame, /* SendHdmiInfoFrame */ + nvEvo1DisableHdmiInfoFrame, /* DisableHdmiInfoFrame */ + nvEvo1SendDpInfoFrameSdp, /* SendDpInfoFrameSdp */ + EvoAllocSurfaceDescriptor90, /* AllocSurfaceDescriptor */ + EvoFreeSurfaceDescriptor90, /* FreeSurfaceDescriptor */ + EvoBindSurfaceDescriptor90, /* BindSurfaceDescriptor */ + NULL, /* SetTmoLutSurfaceAddress */ + NULL, /* SetILUTSurfaceAddress */ + NULL, /* SetISOSurfaceAddress */ + NULL, /* SetCoreNotifierSurfaceAddressAndControl */ + NULL, /* SetWinNotifierSurfaceAddressAndControl */ + NULL, /* SetSemaphoreSurfaceAddressAndControl */ + NULL, /* SetAcqSemaphoreSurfaceAddressAndControl */ + { /* caps */ + FALSE, /* supportsNonInterlockedUsageBoundsUpdate */ + FALSE, /* supportsDisplayRate */ + TRUE, /* supportsFlipLockRGStatus */ + FALSE, /* needDefaultLutSurface */ + FALSE, /* hasUnorm10OLUT */ + TRUE, /* supportsImageSharpening */ + FALSE, /* supportsHDMIVRR */ + TRUE, /* supportsCoreChannelSurface */ + FALSE, /* supportsHDMIFRL */ + TRUE, /* supportsSetStorageMemoryLayout */ + FALSE, /* supportsIndependentAcqRelSemaphore */ + TRUE, /* supportsCoreLut */ + FALSE, /* supportsSynchronizedOverlayPositionUpdate */ + FALSE, /* supportsVblankSyncObjects */ + TRUE, /* requiresScalingTapsInBothDimensions */ + FALSE, /* supportsMergeMode */ + FALSE, /* supportsHDMI10BPC */ + FALSE, /* supportsDPAudio192KHz */ + FALSE, /* supportsInputColorSpace */ + FALSE, /* supportsInputColorRange */ + FALSE, /* supportsYCbCr422OverHDMIFRL */ + NV_EVO2_SUPPORTED_DITHERING_MODES, /* supportedDitheringModes */ + sizeof(NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS), /* impStructSize */ + NV_EVO_SCALER_1TAP, /* minScalerTaps */ + 0, /* xEmulatedSurfaceMemoryFormats */ + }, +}; + +NVEvoHAL nvEvo94 = { + EvoSetRasterParams91, /* SetRasterParams */ + EvoSetProcAmp90, /* SetProcAmp */ + EvoSetHeadControl90, /* SetHeadControl */ + EvoSetHeadRefClk90, /* SetHeadRefClk */ + EvoHeadSetControlOR90, /* HeadSetControlOR */ + EvoORSetControl90, /* ORSetControl */ + EvoHeadSetDisplayId90, /* HeadSetDisplayId */ + EvoSetUsageBounds90, /* SetUsageBounds */ + EvoUpdate91, /* Update */ + nvEvo1IsModePossible, /* IsModePossible */ + nvEvo1PrePostIMP, /* PrePostIMP */ + EvoSetNotifier90, /* SetNotifier */ + EvoGetCapabilities90, /* GetCapabilities */ + EvoFlip90, /* Flip */ + EvoFlipTransitionWAR90, /* FlipTransitionWAR */ + EvoFillLUTSurface90, /* FillLUTSurface */ + EvoSetOutputLut90, /* SetOutputLut */ + EvoSetOutputScaler90, /* SetOutputScaler */ + EvoSetViewportPointIn90, /* SetViewportPointIn */ + EvoSetViewportInOut90, /* SetViewportInOut */ + EvoSetCursorImage91, /* SetCursorImage */ + EvoValidateCursorSurface90, /* ValidateCursorSurface */ + EvoValidateWindowFormat90, /* ValidateWindowFormat */ + EvoInitCompNotifier3, /* InitCompNotifier */ + EvoIsCompNotifierComplete3, /* IsCompNotifierComplete */ + EvoWaitForCompNotifier3, /* WaitForCompNotifier */ + EvoSetDither91, /* SetDither */ + EvoSetStallLock94, /* SetStallLock */ + NULL, /* SetDisplayRate */ + EvoInitChannel90, /* InitChannel */ + NULL, /* InitDefaultLut */ + EvoInitWindowMapping90, /* InitWindowMapping */ + nvEvo1IsChannelIdle, /* IsChannelIdle */ + nvEvo1IsChannelMethodPending, /* IsChannelMethodPending */ + EvoForceIdleSatelliteChannel90, /* ForceIdleSatelliteChannel */ + EvoForceIdleSatelliteChannel90, /* ForceIdleSatelliteChannelIgnoreLock */ + EvoAccelerateChannel91, /* AccelerateChannel */ + EvoResetChannelAccelerators91, /* ResetChannelAccelerators */ + EvoAllocRmCtrlObject90, /* AllocRmCtrlObject */ + EvoFreeRmCtrlObject90, /* FreeRmCtrlObject */ + EvoSetImmPointOut91, /* SetImmPointOut */ + EvoStartHeadCRC32Capture90, /* StartCRC32Capture */ + EvoStopHeadCRC32Capture90, /* StopCRC32Capture */ + EvoQueryHeadCRC32_90, /* QueryCRC32 */ + EvoGetScanLine90, /* GetScanLine */ + NULL, /* ConfigureVblankSyncObject */ + nvEvo1SetDscParams, /* SetDscParams */ + NULL, /* EnableMidFrameAndDWCFWatermark */ + EvoGetActiveViewportOffset94, /* GetActiveViewportOffset */ + EvoClearSurfaceUsage91, /* ClearSurfaceUsage */ + EvoComputeWindowScalingTaps91, /* ComputeWindowScalingTaps */ + NULL, /* GetWindowScalingCaps */ + NULL, /* SetMergeMode */ + nvEvo1SendHdmiInfoFrame, /* SendHdmiInfoFrame */ + nvEvo1DisableHdmiInfoFrame, /* DisableHdmiInfoFrame */ + nvEvo1SendDpInfoFrameSdp, /* SendDpInfoFrameSdp */ + EvoAllocSurfaceDescriptor90, /* AllocSurfaceDescriptor */ + EvoFreeSurfaceDescriptor90, /* FreeSurfaceDescriptor */ + EvoBindSurfaceDescriptor90, /* BindSurfaceDescriptor */ + NULL, /* SetTmoLutSurfaceAddress */ + NULL, /* SetILUTSurfaceAddress */ + NULL, /* SetISOSurfaceAddress */ + NULL, /* SetCoreNotifierSurfaceAddressAndControl */ + NULL, /* SetWinNotifierSurfaceAddressAndControl */ + NULL, /* SetSemaphoreSurfaceAddressAndControl */ + NULL, /* SetAcqSemaphoreSurfaceAddressAndControl */ + { /* caps */ + FALSE, /* supportsNonInterlockedUsageBoundsUpdate */ + FALSE, /* supportsDisplayRate */ + TRUE, /* supportsFlipLockRGStatus */ + FALSE, /* needDefaultLutSurface */ + FALSE, /* hasUnorm10OLUT */ + TRUE, /* supportsImageSharpening */ + FALSE, /* supportsHDMIVRR */ + TRUE, /* supportsCoreChannelSurface */ + FALSE, /* supportsHDMIFRL */ + TRUE, /* supportsSetStorageMemoryLayout */ + FALSE, /* supportsIndependentAcqRelSemaphore */ + TRUE, /* supportsCoreLut */ + FALSE, /* supportsSynchronizedOverlayPositionUpdate */ + FALSE, /* supportsVblankSyncObjects */ + TRUE, /* requiresScalingTapsInBothDimensions */ + FALSE, /* supportsMergeMode */ + FALSE, /* supportsHDMI10BPC */ + FALSE, /* supportsDPAudio192KHz */ + FALSE, /* supportsInputColorSpace */ + FALSE, /* supportsInputColorRange */ + FALSE, /* supportsYCbCr422OverHDMIFRL */ + NV_EVO2_SUPPORTED_DITHERING_MODES, /* supportedDitheringModes */ + sizeof(NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS), /* impStructSize */ + NV_EVO_SCALER_1TAP, /* minScalerTaps */ + 0, /* xEmulatedSurfaceMemoryFormats */ + }, +}; diff --git a/src/nvidia-modeset/src/nvkms-evo3.c b/src/nvidia-modeset/src/nvkms-evo3.c new file mode 100644 index 0000000..1c4af0d --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-evo3.c @@ -0,0 +1,8596 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file contains implementations of the EVO HAL methods for display class + * 3.x (also known as "nvdisplay"). + */ + +#include "nvkms-dma.h" +#include "nvkms-types.h" +#include "nvkms-rmapi.h" +#include "nvkms-surface.h" +#include "nvkms-softfloat.h" +#include "nvkms-evo.h" +#include "nvkms-evo1.h" +#include "nvkms-evo3.h" +#include "nvkms-prealloc.h" +#include "nv-float.h" +#include "nvkms-dpy.h" +#include "nvkms-vrr.h" +#include "nvkms-ctxdma.h" +#include "displayport/displayport.h" + +#include + +#include // NVC372_DISPLAY_SW +#include // NVC373_DISP_CAPABILITIES +#include // NVC37B_WINDOW_IMM_CHANNEL_DMA +#include // NVC37D_CORE_CHANNEL_DMA +#include // NVC37D_NOTIFIER_CRC +#include // NVC37D_HEAD_SET_SW_SPARE_* +#include // NVC37E_WINDOW_CHANNEL_DMA +#include // NVC573_DISP_CAPABILITIES +#include // NVC57D_CORE_CHANNEL_DMA +#include // NVC57E_WINDOW_CHANNEL_DMA +#include +#include // NVC673_DISP_CAPABILITIES +#include // NVC67D_CORE_CHANNEL_DMA +#include // NVC67E_WINDOW_CHANNEL_DMA + +#include +#include +#include + +#define NV_EVO3_X_EMULATED_SURFACE_MEMORY_FORMATS_C3 \ + (NVBIT64(NvKmsSurfaceMemoryFormatRF16GF16BF16XF16)) + +#define NV_EVO3_X_EMULATED_SURFACE_MEMORY_FORMATS_C5 \ + (NVBIT64(NvKmsSurfaceMemoryFormatRF16GF16BF16XF16)) + +/** Number of CRCs supported by hardware on NVC37D hardware (SF/SOR, Comp, RG) */ +#define NV_EVO3_NUM_CRC_FIELDS 3 + +/** Number of CRCs supported by hardware on NVC37D hardware SF/SOR, Comp, RG Ovf and Count */ +#define NV_EVO3_NUM_CRC_FLAGS 4 + +enum FMTCoeffType +{ + FMT_COEFF_TYPE_IDENTITY = 0, + + FMT_COEFF_TYPE_REC601_YUV_8BPC_LTD_TO_RGB_16BPC_FULL, + FMT_COEFF_TYPE_REC601_YUV_8BPC_FULL_TO_RGB_16BPC_FULL, + FMT_COEFF_TYPE_REC601_YUV_10BPC_LTD_TO_RGB_16BPC_FULL, + FMT_COEFF_TYPE_REC601_YUV_10BPC_FULL_TO_RGB_16BPC_FULL, + FMT_COEFF_TYPE_REC601_YUV_12BPC_LTD_TO_RGB_16BPC_FULL, + FMT_COEFF_TYPE_REC601_YUV_12BPC_FULL_TO_RGB_16BPC_FULL, + + FMT_COEFF_TYPE_REC709_YUV_8BPC_LTD_TO_RGB_16BPC_FULL, + FMT_COEFF_TYPE_REC709_YUV_8BPC_FULL_TO_RGB_16BPC_FULL, + FMT_COEFF_TYPE_REC709_YUV_10BPC_LTD_TO_RGB_16BPC_FULL, + FMT_COEFF_TYPE_REC709_YUV_10BPC_FULL_TO_RGB_16BPC_FULL, + FMT_COEFF_TYPE_REC709_YUV_12BPC_LTD_TO_RGB_16BPC_FULL, + FMT_COEFF_TYPE_REC709_YUV_12BPC_FULL_TO_RGB_16BPC_FULL, + + FMT_COEFF_TYPE_REC2020_YUV_8BPC_LTD_TO_RGB_16BPC_FULL, + FMT_COEFF_TYPE_REC2020_YUV_8BPC_FULL_TO_RGB_16BPC_FULL, + FMT_COEFF_TYPE_REC2020_YUV_10BPC_LTD_TO_RGB_16BPC_FULL, + FMT_COEFF_TYPE_REC2020_YUV_10BPC_FULL_TO_RGB_16BPC_FULL, + FMT_COEFF_TYPE_REC2020_YUV_12BPC_LTD_TO_RGB_16BPC_FULL, + FMT_COEFF_TYPE_REC2020_YUV_12BPC_FULL_TO_RGB_16BPC_FULL, + + // FMT is always identity for RGB to avoid possible calculation error. + + // must be the last entry + FMT_COEFF_TYPE_MAX +}; + +static const NvU32 FMTMatrix[FMT_COEFF_TYPE_MAX][12] = +{ + // FMT_COEFF_TYPE_IDENTITY + { 0x10000, 0, 0, 0, 0, 0x10000, 0, 0, 0, 0, 0x10000, 0 }, + + // FMT_COEFF_TYPE_REC601_YUV_8BPC_LTD_TO_RGB_16BPC_FULL + { 0x19A29, 0x12B3C, 0, 0x1F2038, 0x1F2F14, 0x12B3C, 0x1F9B52, 0x8819, 0, 0x12B3C, 0x20668, 0x1EEA18 }, + // FMT_COEFF_TYPE_REC601_YUV_8BPC_FULL_TO_RGB_16BPC_FULL + { 0x1684C, 0x100FD, 0, 0x1F4D42, 0x1F487A, 0x100FD, 0x1FA790, 0x86EB, 0, 0x100FD, 0x1C762, 0x1F1E16 }, + // FMT_COEFF_TYPE_REC601_YUV_10BPC_LTD_TO_RGB_16BPC_FULL + { 0x19A29, 0x12B3C, 0, 0x1F2038, 0x1F2F14, 0x12B3C, 0x1F9B52, 0x8819, 0, 0x12B3C, 0x20668, 0x1EEA18 }, + // FMT_COEFF_TYPE_REC601_YUV_10BPC_FULL_TO_RGB_16BPC_FULL + { 0x1673E, 0x1003C, 0, 0x1F4CBB, 0x1F4903, 0x1003C, 0x1FA7D2, 0x8751, 0, 0x1003C, 0x1C60C, 0x1F1D6B }, + // FMT_COEFF_TYPE_REC601_YUV_12BPC_LTD_TO_RGB_16BPC_FULL + { 0x19A29, 0x12B3C, 0, 0x1F2038, 0x1F2F14, 0x12B3C, 0x1F9B52, 0x8819, 0, 0x12B3C, 0x20668, 0x1EEA18 }, + // FMT_COEFF_TYPE_REC601_YUV_12BPC_FULL_TO_RGB_16BPC_FULL + { 0x166FA, 0x1000C, 0, 0x1F4C99, 0x1F4926, 0x1000C, 0x1FA7E3, 0x876B, 0, 0x1000C, 0x1C5B7, 0x1F1D41 }, + + // FMT_COEFF_TYPE_REC709_YUV_8BPC_LTD_TO_RGB_16BPC_FULL + { 0x1CCB7, 0x12B3C, 0, 0x1F06F1, 0x1F770C, 0x12B3C, 0x1FC933, 0x4D2D, 0, 0x12B3C, 0x21EDD, 0x1EDDDE }, + // FMT_COEFF_TYPE_REC709_YUV_8BPC_FULL_TO_RGB_16BPC_FULL + { 0x194B4, 0x100FD, 0, 0x1F373A, 0x1F87B3, 0x100FD, 0x1FCFDC, 0x5390, 0, 0x100FD, 0x1DCDE, 0x1F136E }, + // FMT_COEFF_TYPE_REC709_YUV_10BPC_LTD_TO_RGB_16BPC_FULL + { 0x1CCB7, 0x12B3C, 0, 0x1F06F1, 0x1F770C, 0x12B3C, 0x1FC933, 0x4D2D, 0, 0x12B3C, 0x21EDD, 0x1EDDDE }, + // FMT_COEFF_TYPE_REC709_YUV_10BPC_FULL_TO_RGB_16BPC_FULL + { 0x19385, 0x1003C, 0, 0x1F36A3, 0x1F880D, 0x1003C, 0x1FD000, 0x53CF, 0, 0x1003C, 0x1DB78, 0x1F12BB }, + // FMT_COEFF_TYPE_REC709_YUV_12BPC_LTD_TO_RGB_16BPC_FULL + { 0x1CCB7, 0x12B3C, 0, 0x1F06F1, 0x1F770C, 0x12B3C, 0x1FC933, 0x4D2D, 0, 0x12B3C, 0x21EDD, 0x1EDDDE }, + // FMT_COEFF_TYPE_REC709_YUV_12BPC_FULL_TO_RGB_16BPC_FULL + { 0x19339, 0x1000C, 0, 0x1F367D, 0x1F8823, 0x1000C, 0x1FD009, 0x53DF, 0, 0x1000C, 0x1DB1F, 0x1F128E }, + + // FMT_COEFF_TYPE_REC2020_YUV_8BPC_LTD_TO_RGB_16BPC_FULL + { 0x1AF66, 0x12B3C, 0, 0x1F1599, 0x1F58D9, 0x12B3C, 0x1FCFDC, 0x58F2, 0, 0x12B3C, 0x22669, 0x1EDA18 }, + // FMT_COEFF_TYPE_REC2020_YUV_8BPC_FULL_TO_RGB_16BPC_FULL + { 0x17AF4, 0x100FD, 0, 0x1F4401, 0x1F6D2B, 0x100FD, 0x1FD5B6, 0x5DD2, 0, 0x100FD, 0x1E37F, 0x1F1024 }, + // FMT_COEFF_TYPE_REC2020_YUV_10BPC_LTD_TO_RGB_16BPC_FULL + { 0x1AF66, 0x12B3C, 0, 0x1F1599, 0x1F58D9, 0x12B3C, 0x1FCFDC, 0x58F2, 0, 0x12B3C, 0x22669, 0x1EDA18 }, + // FMT_COEFF_TYPE_REC2020_YUV_10BPC_FULL_TO_RGB_16BPC_FULL + { 0x179D8, 0x1003C, 0, 0x1F4372, 0x1F6D99, 0x1003C, 0x1FD5D6, 0x5E19, 0, 0x1003C, 0x1E214, 0x1F0F6E }, + // FMT_COEFF_TYPE_REC2020_YUV_12BPC_LTD_TO_RGB_16BPC_FULL + { 0x1AF66, 0x12B3C, 0, 0x1F1599, 0x1F58D9, 0x12B3C, 0x1FCFDC, 0x58F2, 0, 0x12B3C, 0x22669, 0x1EDA18 }, + // FMT_COEFF_TYPE_REC2020_YUV_12BPC_FULL_TO_RGB_16BPC_FULL + { 0x17991, 0x1000C, 0, 0x1F434F, 0x1F6DB5, 0x1000C, 0x1FD5DE, 0x5E2B, 0, 0x1000C, 0x1E1BA, 0x1F0F41 }, +}; + +static void SetCsc00MatrixC5(NVEvoChannelPtr pChannel, + const struct NvKmsCscMatrix *matrix); +static void SetCsc01MatrixC5(NVEvoChannelPtr pChannel, + const struct NvKmsCscMatrix *matrix); +static void SetCsc10MatrixC5(NVEvoChannelPtr pChannel, + const struct NvKmsCscMatrix *matrix); +static void SetCsc11MatrixC5(NVEvoChannelPtr pChannel, + const struct NvKmsCscMatrix *matrix); +static void +UpdateCompositionC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const struct NvKmsCompositionParams *pCompParams, + NVEvoUpdateState *updateState, + enum NvKmsSurfaceMemoryFormat format); +static void +UpdateCompositionC5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const struct NvKmsCompositionParams *pCompParams, + NVEvoUpdateState *updateState, + NvBool bypassComposition, + enum NvKmsSurfaceMemoryFormat format); + +static void +EvoSetupIdentityOutputLutC5(NVEvoLutDataRec *pData, + enum NvKmsLUTState *lutState, + NvU32 *lutSize, + NvBool *isLutModeVss); + +static void +EvoSetupIdentityBaseLutC5(NVEvoLutDataRec *pData, + enum NvKmsLUTState *lutState, + NvU32 *lutSize, + NvBool *isLutModeVss); + +ct_assert(NV_EVO_LOCK_PIN_0 > + NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1); + +/* nvdisplay has a maximum of 2 eyes and 3 planes per surface */ +ct_assert((NVKMS_MAX_EYES * NVKMS_MAX_PLANES_PER_SURFACE) == 6); + +/* Windows support all composition modes. */ +#define NV_EVO3_SUPPORTED_WINDOW_COMP_BLEND_MODES \ + ((1 << NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA)) + +#define NV_EVO3_DEFAULT_WINDOW_USAGE_BOUNDS_C3 \ + (DRF_DEF(C37D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _INPUT_LUT, _USAGE_1025) | \ + DRF_DEF(C37D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _INPUT_SCALER_TAPS, _TAPS_2) | \ + DRF_DEF(C37D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _UPSCALING_ALLOWED, _FALSE)) + +static inline NvU8 EyeAndPlaneToCtxDmaIdx(const NvU8 eye, const NvU8 plane) +{ + /* + * See the definition of the SetContextDmaIso and SetOffset methods in the + * relevant nvdClass_01.mfs file to see how these method array indices are + * mapped. + */ + nvAssert((eye < NVKMS_MAX_EYES) && (plane < NVKMS_MAX_PLANES_PER_SURFACE)); + + return eye + (plane << 1); +} + +static void InitChannelCapsC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel) +{ + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_WINDOW_ALL) != 0) { + static const NVEvoChannelCaps WindowCaps = { + /* + * Window classes always support timestamp flips, and allow full + * use of the 64-bit timestamp value. + */ + .validTimeStampBits = 64, + /* Window classes always support tearing flips. */ + .tearingFlips = TRUE, + .vrrTearingFlips = TRUE, + /* Window classes support per-eye stereo flips. */ + .perEyeStereoFlips = TRUE, + }; + + pChannel->caps = WindowCaps; + } +} + +// The coefficient values are obtained from bug 1953108 comment 10 +// Per MFS: However since all 5 coefficients have to add up to 1.0, only 4 need to be specified, and +// HW can derive the missing one. The center coefficient is the one that is left out, so +// if the 5 taps need weights (c0, c1, c2, c3, c4) then only (c0, c1, c3, c4) are stored, +// and c2 is calculated by HW. +// Phase 0 is the center phase and the corresponding filter kernel is symmetrical: +// c0=c4, c1=c3 --> only c0 and c1 need to be stored. +// Phase 16 (and -16) is the edge phase and the corresponding filter kernels are: +// (0, c0, c1, c1, c0) for phase +16 +// (c0, c1, c1, c0, 0) for phase -16 +// The difference between +16 and -16 is automatically handled by HW. The table only needs +// to store c0 and c1 for either case. +// Therefore, based on MFS above, the matrix below contains the values loaded to HW. +// Real Phase 0 is commented for easy reference. +// Also, phase 16 values (last row) are commented, but its C0,C1 values are loaded in row 0/phase 0. +const NvU32 scalerTaps5Coeff[NUM_SCALER_RATIOS][NUM_TAPS5_COEFF_PHASES][NUM_TAPS5_COEFF_VALUES] = +{ + // ratio = 1 + {{ 0 , 0 , -16 , 144}, // real phase 0:{ 0, 0, /*256,*/ 0, 0 }, + { 0 , -5 , /*255,*/ 5 , 0}, + { 0 , -9 , /*254,*/ 11 , 0}, + { -1 , -12 , /*251,*/ 18 , -1}, + { -1 , -15 , /*248,*/ 25 , -1}, + { -1 , -18 , /*243,*/ 33 , -2}, + { -2 , -20 , /*238,*/ 42 , -3}, + { -2 , -21 , /*232,*/ 51 , -3}, + { -3 , -22 , /*225,*/ 60 , -5}, + { -3 , -22 , /*217,*/ 70 , -6}, + { -4 , -22 , /*208,*/ 81 , -7}, + { -4 , -22 , /*199,*/ 91 , -9}, + { -5 , -21 , /*190,*/ 102 , -10}, + { -5 , -20 , /*180,*/ 113 , -12}, + { -5 , -19 , /*169,*/ 125 , -13}, + { -6 , -18 , /*158,*/ 136 , -15} + // real phase 16: { 0 , -16 , 144, 144 , -16 } + }, + // ratio = 2 + {{ 3, 60 , 20 , 108 }, // real phase 0: {3 , 60 , 130 , 60 , 3 }, + { 3 , 57 , /*130,*/ 63 , 4 }, + { 2 , 54 , /*130,*/ 66 , 4 }, + { 2 , 51 , /*129,*/ 69 , 5 }, + { 2 , 48 , /*128,*/ 72 , 6 }, + { 1 , 45 , /*128,*/ 75 , 7 }, + { 1 , 43 , /*127,*/ 78 , 7 }, + { 1 , 40 , /*125,*/ 81 , 8 }, + { 1 , 37 , /*124,*/ 84 , 9 }, + { 0 , 35 , /*122,*/ 88 , 10 }, + { 0 , 33 , /*121,*/ 91 , 12 }, + { 0 , 30 , /*119,*/ 94 , 13 }, + { 0 , 28 , /*117,*/ 97 , 14 }, + { 0 , 26 , /*115,*/ 99 , 16 }, + { 0 , 24 , /*112,*/ 102 , 17 }, + { 0 , 22 , /*110,*/ 105 , 19 }, + // real phase 16:{0 , 20 , 108 , 108 , 20 }, + }, + // ratio = 4 + {{ 4 , 62 , 23 , 105 }, // real phase 0: {4 , 62 , 124 , 62 , 4 , + { 4 , 59 , /*124,*/ 64 , 5 }, + { 3 , 56 , /*124,*/ 67 , 6 }, + { 3 , 53 , /*123,*/ 70 , 7 }, + { 2 , 51 , /*123,*/ 73 , 8 }, + { 2 , 48 , /*122,*/ 76 , 8 }, + { 2 , 45 , /*121,*/ 79 , 9 }, + { 1 , 43 , /*120,*/ 81 , 10 }, + { 1 , 40 , /*119,*/ 84 , 12 }, + { 1 , 38 , /*117,*/ 87 , 13 }, + { 1 , 36 , /*116,*/ 90 , 14 }, + { 0 , 34 , /*114,*/ 92 , 15 }, + { 0 , 31 , /*113,*/ 95 , 17 }, + { 0 , 29 , /*111,*/ 97 , 18 }, + { 0 , 27 , /*109,*/ 100 , 20 }, + { 0 , 25 , /*107,*/ 102 , 22 }, + // real phase 16: {0 , 23 , 105 , 105 , 23 }, + } +}; + +void nvInitScalerCoefficientsPrecomp5(NVEvoChannelPtr pChannel, + NvU32 coeff, NvU32 index) +{ + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_INPUT_SCALER_COEFF_VALUE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_INPUT_SCALER_COEFF_VALUE, _DATA, coeff) | + DRF_NUM(C57E, _SET_INPUT_SCALER_COEFF_VALUE, _INDEX, index)); +} + +static void InitScalerCoefficientsPostcomp5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 coeff, NvU32 index) +{ + NvU32 h; + + for (h = 0; h < pDevEvo->numHeads; h++) { + nvDmaSetStartEvoMethod(pChannel, + NVC57D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE(h), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57D, _HEAD_SET_OUTPUT_SCALER_COEFF_VALUE, _DATA, coeff) | + DRF_NUM(C57D, _HEAD_SET_OUTPUT_SCALER_COEFF_VALUE, _INDEX, index)); + } +} + +static void InitTaps5ScalerCoefficientsC5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvBool isPrecomp) +{ + NvU8 ratio; + + if (isPrecomp) { + const NVEvoWindowCaps *pWinCaps = + &pDevEvo->gpus[0].capabilities.window[pChannel->instance]; + const NVEvoScalerCaps *pScalerCaps = &pWinCaps->scalerCaps; + + if (!pScalerCaps->present) { + return; + } + } + + for (ratio = 0; ratio < NUM_SCALER_RATIOS; ratio++) { + NvU8 phase; + for (phase = 0; phase < NUM_TAPS5_COEFF_PHASES; phase++) { + NvU8 coeffIdx; + for (coeffIdx = 0; coeffIdx < NUM_TAPS5_COEFF_VALUES; coeffIdx++) { + NvU32 coeff = scalerTaps5Coeff[ratio][phase][coeffIdx]; + NvU32 index = ratio << 6 | phase << 2 | coeffIdx; + + if (isPrecomp) { + nvInitScalerCoefficientsPrecomp5(pChannel, coeff, index); + } else { + InitScalerCoefficientsPostcomp5(pDevEvo, + pChannel, coeff, index); + } + } + } + } +} + +/* + * This is a 3x4 matrix with S5.14 coefficients (truncated from S5.16 + * SW-specified values). + */ +static const struct NvKmsCscMatrix Rec2020RGBToLMS = {{ + { 0x697c, 0x8620, 0x1064, 0 }, + { 0x2aa8, 0xb86c, 0x1ce8, 0 }, + { 0x62c, 0x1354, 0xe684, 0 }, +}}; + +/* + * This is a 3x4 matrix with S5.14 coefficients (truncated from S5.16 + * SW-specified values). + */ +static const struct NvKmsCscMatrix Rec709RGBToLMS = {{ + { 0x4bb8, 0x9f84, 0x14c8, 0 }, + { 0x27fc, 0xba2c, 0x1dd4, 0 }, + { 0x8fc, 0x2818, 0xcef0, 0 }, +}}; + +/* + * This is a 3x4 matrix with S5.14 coefficients (truncated from S5.16 + * SW-specified values). + */ +static const struct NvKmsCscMatrix LMSToRec709RGB = {{ + { 0x62c48, 0x1aadf4, 0x25a8, 0 }, + { 0x1ead18, 0x28f64, 0x1fc390, 0 }, + { 0x1ffd00, 0x1fbc34, 0x146c4, 0 }, +}}; + +/* + * This is a 3x4 matrix with S5.14 coefficients (truncated from S5.16 + * SW-specified values). + */ +static const struct NvKmsCscMatrix LMSToRec2020RGB = {{ + { 0x36fc0, 0x1d7e54, 0x11e0, 0 }, + { 0x1f3584, 0x1fbc8, 0x1fcebc, 0 }, + { 0x1ff964, 0x1fe6a4, 0x11ff4, 0 }, +}}; + +/* + * This is a 3x4 matrix with S5.14 coefficients (truncated from S5.16 + * SW-specified values). + */ +static const struct NvKmsCscMatrix LMSToICtCp = {{ + { 0x460d0, 0x1bc120, 0x1fde10, 0x8000 }, + { 0x8000, 0x8000, 0, 0 }, + { 0x19d20, 0x1cad30, 0x1b5b0, 0x8000 }, +}}; + +/* + * This is a 3x4 matrix with S5.14 coefficients (truncated from S5.16 + * SW-specified values). + */ +static const struct NvKmsCscMatrix ICtCpToLMS = {{ + { 0x1c6c, 0x10000, 0x234, 0x1ff0b0 }, + { 0x1fe394, 0x10000, 0x1ffdcc, 0xf50 }, + { 0x1fadec, 0x10000, 0x8f5e, 0x1fe15b }, +}}; + +/* + * The two arrays below specify the PQ OETF transfer function that's used to + * convert from linear LMS FP16 to PQ encoded L'M'S' fixed-point. + */ +static const NvU32 OetfPQ512SegSizesLog2[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, + 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, + 5, +}; + +static const NvU16 OetfPQ512Entries[] = { + 0x0000, 0x000C, 0x0014, 0x001C, 0x0028, 0x003C, 0x005C, 0x008C, 0x00D0, 0x0134, 0x0184, 0x01C8, 0x0238, 0x029C, 0x033C, 0x03C4, + 0x043C, 0x04A4, 0x0504, 0x0560, 0x0600, 0x0690, 0x0714, 0x078C, 0x07FC, 0x0864, 0x08C8, 0x0924, 0x0980, 0x09D4, 0x0A24, 0x0A70, + 0x0B04, 0x0B90, 0x0C10, 0x0C88, 0x0CFC, 0x0D68, 0x0DD4, 0x0E38, 0x0EF4, 0x0FA4, 0x1048, 0x10E4, 0x1174, 0x1200, 0x1284, 0x1304, + 0x13F4, 0x14D0, 0x159C, 0x165C, 0x1714, 0x17C0, 0x1864, 0x1900, 0x1A28, 0x1B34, 0x1C30, 0x1D1C, 0x1DFC, 0x1ECC, 0x1F94, 0x2050, + 0x2104, 0x21B0, 0x2258, 0x22F8, 0x2390, 0x2424, 0x24B4, 0x2540, 0x25C4, 0x2648, 0x26C4, 0x2740, 0x27B8, 0x282C, 0x289C, 0x290C, + 0x29E0, 0x2AAC, 0x2B70, 0x2C2C, 0x2CE0, 0x2D90, 0x2E38, 0x2ED8, 0x2F74, 0x300C, 0x30A0, 0x3130, 0x31BC, 0x3244, 0x32C8, 0x3348, + 0x3440, 0x352C, 0x360C, 0x36E4, 0x37B4, 0x387C, 0x393C, 0x39F8, 0x3AA8, 0x3B58, 0x3C00, 0x3CA4, 0x3D44, 0x3DDC, 0x3E74, 0x3F04, + 0x401C, 0x4128, 0x4228, 0x431C, 0x4408, 0x44E8, 0x45C4, 0x4694, 0x475C, 0x4820, 0x48DC, 0x4994, 0x4A48, 0x4AF4, 0x4B9C, 0x4C3C, + 0x4D78, 0x4EA0, 0x4FBC, 0x50CC, 0x51D0, 0x52CC, 0x53BC, 0x54A0, 0x5580, 0x5658, 0x5728, 0x57F0, 0x58B4, 0x5974, 0x5A2C, 0x5ADC, + 0x5C34, 0x5D7C, 0x5EB4, 0x5FDC, 0x60F4, 0x6204, 0x630C, 0x6404, 0x64F8, 0x65E0, 0x66C4, 0x679C, 0x6870, 0x693C, 0x6A04, 0x6AC4, + 0x6C38, 0x6D94, 0x6EE4, 0x7020, 0x7150, 0x7274, 0x738C, 0x7498, 0x7598, 0x7694, 0x7784, 0x786C, 0x794C, 0x7A24, 0x7AF8, 0x7BC4, + 0x7D50, 0x7EC4, 0x8024, 0x8174, 0x82B4, 0x83E8, 0x850C, 0x8628, 0x8738, 0x883C, 0x8938, 0x8A2C, 0x8B18, 0x8BFC, 0x8CD8, 0x8DB0, + 0x8F4C, 0x90D0, 0x9240, 0x939C, 0x94EC, 0x962C, 0x975C, 0x9880, 0x999C, 0x9AAC, 0x9BB0, 0x9CAC, 0x9DA0, 0x9E8C, 0x9F70, 0xA04C, + 0xA1F4, 0xA384, 0xA500, 0xA664, 0xA7BC, 0xA904, 0xAA3C, 0xAB6C, 0xAC8C, 0xADA0, 0xAEAC, 0xAFAC, 0xB0A4, 0xB194, 0xB27C, 0xB360, + 0xB510, 0xB6A4, 0xB824, 0xB994, 0xBAF0, 0xBC3C, 0xBD78, 0xBEA8, 0xBFCC, 0xC0E4, 0xC1F0, 0xC2F4, 0xC3F0, 0xC4E4, 0xC5CC, 0xC6B0, + 0xC78C, 0xC860, 0xC930, 0xC9F8, 0xCABC, 0xCB7C, 0xCC38, 0xCCEC, 0xCD9C, 0xCE48, 0xCEF0, 0xCF94, 0xD034, 0xD0D4, 0xD16C, 0xD200, + 0xD294, 0xD324, 0xD3B4, 0xD43C, 0xD4C4, 0xD54C, 0xD5CC, 0xD650, 0xD6CC, 0xD748, 0xD7C4, 0xD83C, 0xD8B0, 0xD924, 0xD994, 0xDA08, + 0xDAE0, 0xDBB4, 0xDC84, 0xDD4C, 0xDE10, 0xDECC, 0xDF84, 0xE038, 0xE0E8, 0xE194, 0xE238, 0xE2DC, 0xE37C, 0xE418, 0xE4B0, 0xE544, + 0xE5D4, 0xE664, 0xE6F0, 0xE778, 0xE800, 0xE884, 0xE904, 0xE984, 0xEA00, 0xEA7C, 0xEAF4, 0xEB68, 0xEBDC, 0xEC50, 0xECC0, 0xED30, + 0xEE08, 0xEED8, 0xEFA4, 0xF068, 0xF128, 0xF1E4, 0xF298, 0xF348, 0xF3F4, 0xF49C, 0xF540, 0xF5E0, 0xF67C, 0xF714, 0xF7A8, 0xF83C, + 0xF8CC, 0xF958, 0xF9E0, 0xFA68, 0xFAEC, 0xFB6C, 0xFBE8, 0xFC64, 0xFCE0, 0xFD58, 0xFDCC, 0xFE40, 0xFEB4, 0xFF24, 0xFF90, 0xFFFC, +}; + +/* + * The two arrays below specify the PQ EOTF transfer function that's used to + * convert from PQ encoded L'M'S' fixed-point to linear LMS FP16. This transfer + * function is the inverse of the OETF curve. + */ +static const NvU32 EotfPQ512SegSizesLog2[] = { + 6, 6, 4, 4, 4, 3, 4, 3, 3, 3, 2, 2, 2, 3, 3, 2, + 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 6, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 1, 2, + 2, 1, 1, 2, 2, 2, 2, 1, 2, 1, 1, 2, 1, 4, 2, 2, +}; + +static const NvU16 EotfPQ512Entries[] = { + 0x0000, 0x0001, 0x0003, 0x0005, 0x0008, 0x000C, 0x0011, 0x0016, 0x001B, 0x0022, 0x0028, 0x002F, 0x0037, 0x003F, 0x0048, 0x0051, + 0x005A, 0x0064, 0x006F, 0x007A, 0x0085, 0x0091, 0x009E, 0x00AB, 0x00B8, 0x00C6, 0x00D4, 0x00E3, 0x00F3, 0x0102, 0x0113, 0x0123, + 0x0135, 0x0146, 0x0158, 0x016B, 0x017E, 0x0192, 0x01A6, 0x01BB, 0x01D0, 0x01E5, 0x01FC, 0x0212, 0x0229, 0x0241, 0x0259, 0x0272, + 0x028B, 0x02A4, 0x02BE, 0x02D9, 0x02F4, 0x0310, 0x032C, 0x0349, 0x0366, 0x0384, 0x03A2, 0x03C1, 0x03E0, 0x0400, 0x0421, 0x0442, + 0x0463, 0x0485, 0x04A8, 0x04CB, 0x04EF, 0x0513, 0x0538, 0x055D, 0x0583, 0x05AA, 0x05D1, 0x05F9, 0x0621, 0x064A, 0x0673, 0x069D, + 0x06C7, 0x06F3, 0x071E, 0x074B, 0x0777, 0x07A5, 0x07D3, 0x0801, 0x0819, 0x0830, 0x0849, 0x0861, 0x087A, 0x0893, 0x08AD, 0x08C7, + 0x08E1, 0x08FB, 0x0916, 0x0931, 0x094C, 0x0968, 0x0984, 0x09A0, 0x09BD, 0x09DA, 0x09F7, 0x0A15, 0x0A33, 0x0A51, 0x0A70, 0x0A8F, + 0x0AAE, 0x0ACE, 0x0AEE, 0x0B0E, 0x0B2F, 0x0B50, 0x0B71, 0x0B93, 0x0BB5, 0x0BD7, 0x0BFA, 0x0C0F, 0x0C20, 0x0C32, 0x0C44, 0x0C56, + 0x0C69, 0x0CB5, 0x0D03, 0x0D55, 0x0DA9, 0x0E01, 0x0E5B, 0x0EB9, 0x0F1B, 0x0F7F, 0x0FE7, 0x1029, 0x1061, 0x109A, 0x10D5, 0x1111, + 0x1150, 0x1190, 0x11D3, 0x1217, 0x125E, 0x12A6, 0x12F0, 0x133D, 0x138B, 0x13DC, 0x1417, 0x1442, 0x146D, 0x149A, 0x14C8, 0x14F7, + 0x1527, 0x1558, 0x158B, 0x15BF, 0x15F4, 0x162A, 0x1662, 0x169B, 0x16D5, 0x1711, 0x174E, 0x178C, 0x17CC, 0x1806, 0x1828, 0x184A, + 0x186D, 0x18B4, 0x18FF, 0x194D, 0x199E, 0x19F3, 0x1A4B, 0x1AA7, 0x1B06, 0x1B37, 0x1B69, 0x1B9B, 0x1BCF, 0x1C02, 0x1C1D, 0x1C38, + 0x1C54, 0x1C70, 0x1C8D, 0x1CAB, 0x1CC9, 0x1CE7, 0x1D06, 0x1D26, 0x1D46, 0x1D88, 0x1DCC, 0x1E13, 0x1E5C, 0x1EA8, 0x1EF6, 0x1F47, + 0x1F9A, 0x1FF1, 0x2025, 0x2053, 0x2082, 0x20B3, 0x20E6, 0x211A, 0x214F, 0x2187, 0x21C0, 0x21FA, 0x2237, 0x2275, 0x22B5, 0x22F7, + 0x233B, 0x23C9, 0x2430, 0x247F, 0x24D3, 0x252B, 0x2589, 0x25EB, 0x2653, 0x26C1, 0x2734, 0x27AD, 0x2817, 0x2838, 0x285A, 0x287C, + 0x28A0, 0x28C5, 0x28EA, 0x2911, 0x2938, 0x2960, 0x298A, 0x29B4, 0x29DF, 0x2A0C, 0x2A39, 0x2A68, 0x2A98, 0x2AFA, 0x2B62, 0x2BCE, + 0x2C20, 0x2C5B, 0x2C99, 0x2CDA, 0x2D1E, 0x2D65, 0x2DB0, 0x2DFD, 0x2E4E, 0x2EA3, 0x2EFC, 0x2F58, 0x2FB8, 0x300E, 0x3043, 0x307A, + 0x30B3, 0x30D0, 0x30EE, 0x310D, 0x312C, 0x314C, 0x316D, 0x318E, 0x31B0, 0x31D3, 0x31F6, 0x321A, 0x323F, 0x3265, 0x328B, 0x32B2, + 0x32DA, 0x332D, 0x3383, 0x33DC, 0x341D, 0x344D, 0x347F, 0x34B4, 0x34EA, 0x3523, 0x355E, 0x359B, 0x35DB, 0x361D, 0x3662, 0x36A9, + 0x36F3, 0x3740, 0x3791, 0x37E4, 0x381D, 0x384A, 0x3879, 0x38A9, 0x38DB, 0x3910, 0x3946, 0x397E, 0x39B8, 0x39F5, 0x3A34, 0x3A75, + 0x3AB9, 0x3AFF, 0x3B48, 0x3B94, 0x3BE2, 0x3C1A, 0x3C44, 0x3C70, 0x3C9D, 0x3CA0, 0x3CA3, 0x3CA6, 0x3CA9, 0x3CAC, 0x3CAF, 0x3CB1, + 0x3CB4, 0x3CB7, 0x3CBA, 0x3CBD, 0x3CC0, 0x3CC3, 0x3CC6, 0x3CC9, 0x3CCC, 0x3CCF, 0x3CD2, 0x3CD5, 0x3CD8, 0x3CDB, 0x3CDE, 0x3CE1, + 0x3CE4, 0x3CE7, 0x3CEA, 0x3CEE, 0x3CF1, 0x3CF4, 0x3CF7, 0x3CFA, 0x3CFD, 0x3D00, 0x3D03, 0x3D06, 0x3D09, 0x3D0D, 0x3D10, 0x3D13, + 0x3D16, 0x3D19, 0x3D1C, 0x3D20, 0x3D23, 0x3D26, 0x3D29, 0x3D2C, 0x3D30, 0x3D33, 0x3D36, 0x3D39, 0x3D3D, 0x3D40, 0x3D43, 0x3D46, + 0x3D4A, 0x3D4D, 0x3D50, 0x3D54, 0x3D57, 0x3D5A, 0x3D5D, 0x3D61, 0x3D64, 0x3D9B, 0x3DD3, 0x3E0D, 0x3E4A, 0x3E89, 0x3ECA, 0x3F0E, + 0x3F54, 0x3F9C, 0x3FE8, 0x401B, 0x4043, 0x406D, 0x4099, 0x40C6, 0x40F4, 0x4124, 0x4156, 0x418A, 0x41C0, 0x41F8, 0x4232, 0x426D, + 0x42AB, 0x42EB, 0x432E, 0x4373, 0x43BA, 0x4428, 0x4479, 0x44D0, 0x452D, 0x4591, 0x45FC, 0x466F, 0x46EB, 0x472C, 0x476F, 0x47B5, + 0x47FE, 0x4824, 0x484B, 0x4874, 0x489D, 0x48F5, 0x4954, 0x4986, 0x49B9, 0x49EF, 0x4A26, 0x4A5F, 0x4A9B, 0x4AD9, 0x4B19, 0x4B9F, + 0x4C18, 0x4C66, 0x4CBA, 0x4CE6, 0x4D13, 0x4D43, 0x4D74, 0x4DA7, 0x4DDC, 0x4E12, 0x4E4B, 0x4E86, 0x4EC3, 0x4F02, 0x4F44, 0x4F88, + 0x4FCE, 0x500C, 0x5032, 0x5082, 0x50D8, 0x5106, 0x5135, 0x5166, 0x5199, 0x5205, 0x5278, 0x52F5, 0x537C, 0x53C3, 0x5406, 0x542D, + 0x5454, 0x54A9, 0x5503, 0x550F, 0x551B, 0x5527, 0x5533, 0x5540, 0x554C, 0x5559, 0x5565, 0x5572, 0x557F, 0x558C, 0x5599, 0x55A7, + 0x55B4, 0x55C1, 0x55CF, 0x5607, 0x5641, 0x567E, 0x56BC, 0x56FE, 0x5741, 0x5788, 0x57D1, +}; + +#define TMO_LUT_NUM_SEGMENTS 64 +#define TMO_LUT_SEG_SIZE_LOG2 4 +#define TMO_LUT_NUM_ENTRIES 1024 + +struct TmoLutSettings +{ + NvU32 satMode; + NvU32 lowIntensityZoneEnd; + NvU32 lowIntensityValueLinWeight; + NvU32 lowIntensityValueNonLinWeight; + NvU32 lowIntensityValueThreshold; + + NvU32 medIntensityZoneStart; + NvU32 medIntensityZoneEnd; + NvU32 medIntensityValueLinWeight; + NvU32 medIntensityValueNonLinWeight; + NvU32 medIntensityValueThreshold; + + NvU32 highIntensityZoneStart; + NvU32 highIntensityValueLinWeight; + NvU32 highIntensityValueNonLinWeight; + NvU32 highIntensityValueThreshold; +}; + +// No color correction. +static const struct TmoLutSettings TMO_LUT_SETTINGS_NO_CORRECTION = { 2, 1280, 256, 256, 255, 4960, 4961, 256, 256, 255, 10640, 256, 256, 255 }; + +static void InitCsc0LUT(NVEvoChannelPtr pChannel, + const NvU32 *pSegmentSizes, NvU32 numSegmentSizes, + const NvU16 *pLUTEntries, NvU32 numEntries) +{ + NvU32 i; + + for (i = 0; i < numSegmentSizes; i++) { + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CSC0LUT_SEGMENT_SIZE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_CSC0LUT_SEGMENT_SIZE, _IDX, i) | + DRF_NUM(C57E, _SET_CSC0LUT_SEGMENT_SIZE, _VALUE, pSegmentSizes[i])); + } + + for (i = 0; i < numEntries; i++) { + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CSC0LUT_ENTRY, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_CSC0LUT_ENTRY, _IDX, i) | + DRF_NUM(C57E, _SET_CSC0LUT_ENTRY, _VALUE, pLUTEntries[i])); + } +} + +static void InitCsc1LUT(NVEvoChannelPtr pChannel, + const NvU32 *pSegmentSizes, NvU32 numSegmentSizes, + const NvU16 *pLUTEntries, NvU32 numEntries) +{ + NvU32 i; + + for (i = 0; i < numSegmentSizes; i++) { + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CSC1LUT_SEGMENT_SIZE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_CSC1LUT_SEGMENT_SIZE, _IDX, i) | + DRF_NUM(C57E, _SET_CSC1LUT_SEGMENT_SIZE, _VALUE, pSegmentSizes[i])); + } + + for (i = 0; i < numEntries; i++) { + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CSC1LUT_ENTRY, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_CSC1LUT_ENTRY, _IDX, i) | + DRF_NUM(C57E, _SET_CSC1LUT_ENTRY, _VALUE, pLUTEntries[i])); + } +} + +static void ConfigureCsc0C5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NvBool enable) +{ + NVEvoWindowCaps *pWinCaps = + &pDevEvo->gpus[0].capabilities.window[pChannel->instance]; + struct NvKmsCscMatrix csc00Matrix = { }; + struct NvKmsCscMatrix csc01Matrix = { }; + NvU32 lutData = 0; + + if (!pWinCaps->csc0MatricesPresent) { + return; + } + + nvAssert(pWinCaps->cscLUTsPresent); + + if (enable) { + /* Linear RGB FP16 -> Linear LMS FP16 */ + if (pHwState->colorSpace == NVKMS_INPUT_COLOR_SPACE_BT2100) { + csc00Matrix = Rec2020RGBToLMS; + } else { + csc00Matrix = Rec709RGBToLMS; + } + + /* Linear LMS FP16 -> PQ encoded L'M'S' fixed-point */ + lutData = DRF_DEF(C57E, _SET_CSC0LUT_CONTROL, _INTERPOLATE, _ENABLE) | + DRF_DEF(C57E, _SET_CSC0LUT_CONTROL, _MIRROR, _DISABLE) | + DRF_DEF(C57E, _SET_CSC0LUT_CONTROL, _ENABLE, _ENABLE); + + /* PQ encoded L'M'S' fixed-point -> ICtCp */ + csc01Matrix = LMSToICtCp; + } else { + csc00Matrix = NVKMS_IDENTITY_CSC_MATRIX; + + lutData = DRF_DEF(C57E, _SET_CSC0LUT_CONTROL, _INTERPOLATE, _DISABLE) | + DRF_DEF(C57E, _SET_CSC0LUT_CONTROL, _MIRROR, _DISABLE) | + DRF_DEF(C57E, _SET_CSC0LUT_CONTROL, _ENABLE, _DISABLE); + + csc01Matrix = NVKMS_IDENTITY_CSC_MATRIX; + } + + if (pHwState->csc00Override.enabled) { + csc00Matrix = pHwState->csc00Override.matrix; + } + + if (pHwState->csc01Override.enabled) { + csc01Matrix = pHwState->csc01Override.matrix; + } + + /* CSC0LUT must be enabled if CSC01 or CSC10 is in use. */ + if ((pHwState->csc01Override.enabled && + !nvIsCscMatrixIdentity(&pHwState->csc01Override.matrix)) || + (pHwState->csc10Override.enabled && + !nvIsCscMatrixIdentity(&pHwState->csc10Override.matrix))) { + + lutData = DRF_DEF(C57E, _SET_CSC0LUT_CONTROL, _INTERPOLATE, _ENABLE) | + DRF_DEF(C57E, _SET_CSC0LUT_CONTROL, _MIRROR, _DISABLE) | + DRF_DEF(C57E, _SET_CSC0LUT_CONTROL, _ENABLE, _ENABLE); + } + + SetCsc00MatrixC5(pChannel, &csc00Matrix); + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CSC0LUT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, lutData); + + SetCsc01MatrixC5(pChannel, &csc01Matrix); +} + +static inline float64_t maxF(float64_t a, float64_t b) +{ + return f64_lt(a, b) ? b : a; +} + +static inline float64_t clampF(float64_t value, float64_t min, float64_t max) +{ + value = maxF(value, min); + value = f64_lt(max, value) ? max : value; + return value; +} + +static float64_t PQEotf(float64_t colorValue, NvBool inverse) +{ + const float64_t zero = {0x0000000000000000}; // 0.0 + const float64_t one = {0x3FF0000000000000}; // 1.0 + const float64_t m1 = {0x3FC463FFFFFFB9A2}; // 0.1593017578125 + const float64_t m2 = {0x4053B60000000000}; // 78.84375 + const float64_t c1 = {0x3FEAC00000000000}; // 0.8359375 + const float64_t c2 = {0x4032DA0000000000}; // 18.8515625 + const float64_t c3 = {0x4032B00000000000}; // 18.6875 + + const float64_t invm1 = {0x40191C0D56E72ABA}; // 1/m1 = 6.27739463602 + const float64_t invm2 = {0x3F89F9B585D7C997}; // 1/m2 = 0.01268331351 + + if (inverse) { + // Convert from linear to PQ-encoded values. + float64_t L = clampF(colorValue, zero, one); + float64_t powLm1 = nvKmsPow(L, m1); + float64_t N = nvKmsPow(f64_div(f64_add(c1, f64_mul(c2, powLm1)), + f64_add(one, f64_mul(c3, powLm1))), m2); + + return clampF(N, zero, one); + } else { + // Convert from PQ-encoded values to linear values. + float64_t N = clampF(colorValue, zero, one); + float64_t powNinvM2 = nvKmsPow(N, invm2); + float64_t L = nvKmsPow(f64_div(maxF(f64_sub(powNinvM2, c1), zero), + f64_sub(c2, f64_mul(c3, powNinvM2))), + invm1); + + return clampF(L, zero, one); + } +} + +// Hermite spline +static float64_t P(float64_t B, float64_t KS, float64_t maxLum) +{ + const float64_t one = {0x3FF0000000000000}; // 1.0 + const float64_t two = {0x4000000000000000}; // 2.0 + const float64_t negtwo = {0xC000000000000000}; // -2.0 + const float64_t three = {0x4008000000000000}; // 3.0 + + float64_t t = f64_div(f64_sub(B, KS), f64_sub(one, KS)); + float64_t t2 = f64_mul(t, t); + float64_t t3 = f64_mul(t2, t); + + return + f64_add(f64_add( + f64_mul(f64_add(f64_sub(f64_mul(two, t3), f64_mul(three, t2)), one), KS), + f64_mul(f64_add(f64_sub(t3, f64_mul(two, t2)), t), f64_sub(one, KS))), + f64_mul(f64_add(f64_mul(negtwo, t3), f64_mul(three, t2)), maxLum)); +} + +/* + * PQ tone mapping operator with no remapping of blacks or "toe" section of + * curve. Messing with nonlinearity and remapping in the SDR portion of the + * curve results in bad looking PC desktop and game content. + * + * Lmax = InvPQEotf(targetMaxLum/10000.0) + * Lw = InvPQEotf(srcMaxLum/10000.0) + * maxLumRatio = Lmax/Lw + * KS = 1.5*maxLumRatio - 0.5 + * KSEqualsOne = (KS == 1.0) + * + * XXX HDR TODO: Remap blacks and implement toe section for video content? + */ +static NvU16 TmoLutEntry(NvU32 i, + const float64_t Lmax, + const float64_t Lw, + const float64_t maxLumRatio, + const float64_t KS, + const NvBool KSEqualsOne) +{ + const float64_t zero = {0x0000000000000000}; // 0.0 + const float64_t maxIntensity = {0x40CFFF8000000000}; // 16383.0 + + float64_t outputF; + float64_t inputF = + f64_div(ui32_to_f64(i), ui32_to_f64(TMO_LUT_NUM_ENTRIES - 1)); + + float64_t E1; + float64_t E2; + + E1 = f64_div(inputF, Lw); + + if (KSEqualsOne || f64_lt(E1, KS)) { + E2 = E1; + } else { + E2 = P(E1, KS, maxLumRatio); + } + + outputF = clampF(f64_mul(E2, Lw), zero, Lmax); + + return (NvU16) f64_to_ui32(clampF(f64_mul(outputF, maxIntensity), + zero, maxIntensity), + softfloat_round_near_even, FALSE) << 2; +} + +static void InitializeTmoLut(const NVEvoChannelPtr pChannel, + NVSurfaceEvoPtr pLutSurfaceEvo, + NvU32 sd) +{ + NVEvoLutDataRec *pData = pLutSurfaceEvo->cpuAddress[sd]; + NvU64 vssHead = 0; + NvU32 lutEntryCounter = 0, i; + + // Precalculate constants for TmoLutEntry(). + const float64_t tenThousand = {0x40C3880000000000}; // 10000.0 + const float64_t one = {0x3FF0000000000000}; // 1.0 + const float64_t half = {0x3FE0000000000000}; // 0.5 + const float64_t oneHalf = {0x3FF8000000000000}; // 1.5 + // Lmax = InvPQEotf(targetMaxLum/10,000) + const float64_t Lmax = + PQEotf(f64_div(ui32_to_f64(pChannel->tmoParams.targetMaxLums[sd]), + tenThousand), TRUE); + // Lw = InvPQEotf(srcMaxLum/10,000) + const float64_t Lw = + PQEotf(f64_div(ui32_to_f64(pChannel->tmoParams.srcMaxLum), + tenThousand), TRUE); + // maxLumRatio = Lmax/Lw + const float64_t maxLumRatio = f64_div(Lmax, Lw); + // KS = 1.5*maxLumRatio - 0.5 + const float64_t KS = f64_sub(f64_mul(oneHalf, maxLumRatio), half); + // KSEqualsOne = (KS == 1.0) + const NvBool KSEqualsOne = f64_eq(KS, one); + + nvAssert(pChannel->tmoParams.srcMaxLum >= + pChannel->tmoParams.targetMaxLums[sd]); + + // VSS Header + for (lutEntryCounter = 0; lutEntryCounter < NV_LUT_VSS_HEADER_SIZE; lutEntryCounter++) { + vssHead = 0; + for (i = 0; ((i < 16) && (((lutEntryCounter * 16) + i) < TMO_LUT_NUM_SEGMENTS)); i++) { + NvU64 temp = TMO_LUT_SEG_SIZE_LOG2; + vssHead |= temp << (i * 3); + } + nvkms_memcpy(&(pData->base[lutEntryCounter]), &vssHead, sizeof(NVEvoLutEntryRec)); + } + + for (i = 0; i < TMO_LUT_NUM_ENTRIES; i++) { + pData->base[i + NV_LUT_VSS_HEADER_SIZE].Red = + pData->base[i + NV_LUT_VSS_HEADER_SIZE].Green = + pData->base[i + NV_LUT_VSS_HEADER_SIZE].Blue = + TmoLutEntry(i, Lmax, Lw, maxLumRatio, KS, KSEqualsOne); + } + + // Copy the last entry for interpolation + pData->base[TMO_LUT_NUM_ENTRIES + NV_LUT_VSS_HEADER_SIZE].Red = + pData->base[TMO_LUT_NUM_ENTRIES + NV_LUT_VSS_HEADER_SIZE - 1].Red; + pData->base[TMO_LUT_NUM_ENTRIES + NV_LUT_VSS_HEADER_SIZE].Blue = + pData->base[TMO_LUT_NUM_ENTRIES + NV_LUT_VSS_HEADER_SIZE - 1].Blue; + pData->base[TMO_LUT_NUM_ENTRIES + NV_LUT_VSS_HEADER_SIZE].Green = + pData->base[TMO_LUT_NUM_ENTRIES + NV_LUT_VSS_HEADER_SIZE - 1].Green; +} + +static NvBool UpdateTmoParams(NVEvoChannelPtr pChannel, + NvBool enabled, + NvU32 srcMaxLum, + const NvU32 targetMaxLums[NVKMS_MAX_SUBDEVICES]) +{ + NvU16 sd; + NvBool dirty = FALSE; + + if (pChannel->tmoParams.clientSpecified) { + pChannel->tmoParams.clientSpecified = FALSE; + dirty = TRUE; + } + + if (pChannel->tmoParams.enabled != enabled) { + pChannel->tmoParams.enabled = enabled; + dirty = TRUE; + } + + if (pChannel->tmoParams.srcMaxLum != srcMaxLum) { + pChannel->tmoParams.srcMaxLum = srcMaxLum; + dirty = TRUE; + } + + for (sd = 0; sd < NVKMS_MAX_SUBDEVICES; sd++) { + if (pChannel->tmoParams.targetMaxLums[sd] != targetMaxLums[sd]) { + pChannel->tmoParams.targetMaxLums[sd] = targetMaxLums[sd]; + dirty = TRUE; + } + } + + return dirty; +} + +static void EvoSetTmoLutSurfaceAddressC5( + const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 offset) +{ + NvU32 ctxDmaHandle = pSurfaceDesc ? pSurfaceDesc->ctxDmaHandle : 0; + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CONTEXT_DMA_TMO_LUT, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_CONTEXT_DMA_TMO_LUT, _HANDLE, ctxDmaHandle)); + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_OFFSET_TMO_LUT, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_OFFSET_TMO_LUT, _ORIGIN, offset >> 8)); +} + +static void ConfigureTmoLut(NVDevEvoPtr pDevEvo, + const NVFlipChannelEvoHwState *pHwState, + NVEvoChannelPtr pChannel) +{ + NvU16 sd; + const NvU32 win = NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(pChannel->channelMask); + const NvU32 head = pDevEvo->headForWindow[win]; + NvU32 offset = offsetof(NVEvoLutDataRec, base); + NvU32 lutSize = NV_LUT_VSS_HEADER_SIZE + TMO_LUT_NUM_ENTRIES + 1; + const struct TmoLutSettings *tmoLutSettings = &TMO_LUT_SETTINGS_NO_CORRECTION; + NvBool enableLut = FALSE; + + if (pHwState->tmoLut.fromOverride) { + enableLut = (pHwState->tmoLut.pLutSurfaceEvo != NULL); + pChannel->tmoParams.clientSpecified = TRUE; + offset = pHwState->tmoLut.offset; + lutSize = NV_LUT_VSS_HEADER_SIZE + pHwState->tmoLut.lutEntries; + } else { + NvBool needsTmoLut = FALSE; + const NvU32 srcMaxLum = nvGetHDRSrcMaxLum(pHwState); + NvU32 targetMaxLums[NVKMS_MAX_SUBDEVICES] = {0}; + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + const NVDispHeadStateEvoRec *pHeadState = + &pDevEvo->pDispEvo[sd]->headState[head]; + + targetMaxLums[sd] = pHeadState->hdrInfoFrame.staticMetadata.maxCLL; + + // If any head needs tone mapping, enable TMO for channel + if (nvNeedsTmoLut(pDevEvo, pChannel, pHwState, + srcMaxLum, targetMaxLums[sd])) { + needsTmoLut = TRUE; + } + } + + if (!UpdateTmoParams(pChannel, needsTmoLut, srcMaxLum, targetMaxLums)) { + // No change in parameters, no need to reconfigure. + return; + } + enableLut = pChannel->tmoParams.enabled; + + if (enableLut) { + // Initialize TMO LUT on all subdevices + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + nvAssert(pHwState->tmoLut.pLutSurfaceEvo != NULL); + InitializeTmoLut(pChannel, pHwState->tmoLut.pLutSurfaceEvo, sd); + } + } + } + + if (!enableLut) { + pDevEvo->hal->SetTmoLutSurfaceAddress(pDevEvo, pChannel, + NULL /* pSurfaceDesc */, 0 /* offset */); + return; + } + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_TMO_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_TMO_CONTROL, _SIZE, lutSize) | + DRF_DEF(C57E, _SET_TMO_CONTROL, _INTERPOLATE, _ENABLE) | + DRF_NUM(C57E, _SET_TMO_CONTROL, _SAT_MODE, tmoLutSettings->satMode)); + + pDevEvo->hal->SetTmoLutSurfaceAddress(pDevEvo, pChannel, + &pHwState->tmoLut.pLutSurfaceEvo->planes[0].surfaceDesc, offset); + + // Low Intensity + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_TMO_LOW_INTENSITY_ZONE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_TMO_LOW_INTENSITY_ZONE, _END, + tmoLutSettings->lowIntensityZoneEnd)); + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_TMO_LOW_INTENSITY_VALUE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_TMO_LOW_INTENSITY_VALUE, _LIN_WEIGHT, + tmoLutSettings->lowIntensityValueLinWeight) | + DRF_NUM(C57E, _SET_TMO_LOW_INTENSITY_VALUE, _NON_LIN_WEIGHT, + tmoLutSettings->lowIntensityValueNonLinWeight) | + DRF_NUM(C57E, _SET_TMO_LOW_INTENSITY_VALUE, _THRESHOLD, + tmoLutSettings->lowIntensityValueThreshold)); + + // Medium Intensity + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_TMO_MEDIUM_INTENSITY_ZONE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_TMO_MEDIUM_INTENSITY_ZONE, _START, + tmoLutSettings->medIntensityZoneStart) | + DRF_NUM(C57E, _SET_TMO_MEDIUM_INTENSITY_ZONE, _END, + tmoLutSettings->medIntensityZoneEnd)); + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_TMO_MEDIUM_INTENSITY_VALUE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_TMO_MEDIUM_INTENSITY_VALUE, _LIN_WEIGHT, + tmoLutSettings->medIntensityValueLinWeight) | + DRF_NUM(C57E, _SET_TMO_MEDIUM_INTENSITY_VALUE, _NON_LIN_WEIGHT, + tmoLutSettings->medIntensityValueNonLinWeight) | + DRF_NUM(C57E, _SET_TMO_MEDIUM_INTENSITY_VALUE, _THRESHOLD, + tmoLutSettings->medIntensityValueThreshold)); + + // High Intensity + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_TMO_HIGH_INTENSITY_ZONE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_TMO_HIGH_INTENSITY_ZONE, _START, + tmoLutSettings->highIntensityZoneStart)); + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_TMO_HIGH_INTENSITY_VALUE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_TMO_HIGH_INTENSITY_VALUE, _LIN_WEIGHT, + tmoLutSettings->highIntensityValueLinWeight) | + DRF_NUM(C57E, _SET_TMO_HIGH_INTENSITY_VALUE, _NON_LIN_WEIGHT, + tmoLutSettings->highIntensityValueNonLinWeight) | + DRF_NUM(C57E, _SET_TMO_HIGH_INTENSITY_VALUE, _THRESHOLD, + tmoLutSettings->highIntensityValueThreshold)); +} + +static void ConfigureCsc1C5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NvBool enable) +{ + NVEvoWindowCaps *pWinCaps = + &pDevEvo->gpus[0].capabilities.window[pChannel->instance]; + struct NvKmsCscMatrix csc10Matrix = { }; + struct NvKmsCscMatrix csc11Matrix = { }; + NvU32 lutData = 0; + const NvU32 win = NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(pChannel->channelMask); + const NvU32 head = pDevEvo->headForWindow[win]; + + if (head == NV_INVALID_HEAD) { + return; + } + + if (enable) { + const NvU32 sdMask = nvPeekEvoSubDevMask(pDevEvo); + const NvU32 sd = (sdMask == 0) ? 0 : nv_ffs(sdMask) - 1; + const NVDispHeadStateEvoRec *pHeadState; + + /* + * All callers of this path should push a single sd on the stack, + * so that ffs(sdMask) is safe. + */ + nvAssert(nvPopCount32(sdMask) == 1); + + pHeadState = &pDevEvo->pDispEvo[sd]->headState[head]; + + /* ICtCp -> PQ encoded L'M'S' fixed-point */ + csc10Matrix = ICtCpToLMS; + + /* PQ encoded L'M'S' fixed-point -> Linear LMS FP16 */ + lutData = DRF_DEF(C57E, _SET_CSC1LUT_CONTROL, _INTERPOLATE, _ENABLE) | + DRF_DEF(C57E, _SET_CSC1LUT_CONTROL, _MIRROR, _DISABLE) | + DRF_DEF(C57E, _SET_CSC1LUT_CONTROL, _ENABLE, _ENABLE); + + /* Linear LMS FP16 -> Linear RGB FP16 */ + // If postcomp is PQ, composite in Rec2020 + if (pHeadState->tf == NVKMS_OUTPUT_TF_PQ) { + csc11Matrix = LMSToRec2020RGB; + } else { + csc11Matrix = LMSToRec709RGB; + } + } else { + csc10Matrix = NVKMS_IDENTITY_CSC_MATRIX; + + lutData = DRF_DEF(C57E, _SET_CSC1LUT_CONTROL, _INTERPOLATE, _DISABLE) | + DRF_DEF(C57E, _SET_CSC1LUT_CONTROL, _MIRROR, _DISABLE) | + DRF_DEF(C57E, _SET_CSC1LUT_CONTROL, _ENABLE, _DISABLE); + + csc11Matrix = pHwState->cscMatrix; + } + + if (pHwState->csc10Override.enabled) { + csc10Matrix = pHwState->csc10Override.matrix; + } + + if (pHwState->csc11Override.enabled) { + csc11Matrix = pHwState->csc11Override.matrix; + } + + nvAssert(pWinCaps->csc10MatrixPresent || + !pWinCaps->csc0MatricesPresent); + + if (pWinCaps->csc10MatrixPresent) { + nvAssert(pWinCaps->cscLUTsPresent); + + /* CSC1LUT must be enabled if CSC01 or CSC10 is in use. */ + if ((pHwState->csc01Override.enabled && + !nvIsCscMatrixIdentity(&pHwState->csc01Override.matrix)) || + (pHwState->csc10Override.enabled && + !nvIsCscMatrixIdentity(&pHwState->csc10Override.matrix))) { + lutData = DRF_DEF(C57E, _SET_CSC1LUT_CONTROL, _INTERPOLATE, _ENABLE) | + DRF_DEF(C57E, _SET_CSC1LUT_CONTROL, _MIRROR, _DISABLE) | + DRF_DEF(C57E, _SET_CSC1LUT_CONTROL, _ENABLE, _ENABLE); + } + + SetCsc10MatrixC5(pChannel, &csc10Matrix); + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CSC1LUT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, lutData); + } + + nvAssert(pWinCaps->csc11MatrixPresent); + SetCsc11MatrixC5(pChannel, &csc11Matrix); +} + +static void InitDesktopColorC3(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_DESKTOP_COLOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_DESKTOP_COLOR, _RED, 0) | + DRF_NUM(C37D, _HEAD_SET_DESKTOP_COLOR, _GREEN, 0) | + DRF_NUM(C37D, _HEAD_SET_DESKTOP_COLOR, _BLUE, 0) | + DRF_NUM(C37D, _HEAD_SET_DESKTOP_COLOR, _ALPHA, 255)); + } +} + +static void InitDesktopColorC5(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57D, _HEAD_SET_DESKTOP_COLOR_ALPHA_RED, _ALPHA, 255) | + DRF_NUM(C57D, _HEAD_SET_DESKTOP_COLOR_ALPHA_RED, _RED, 0)); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57D, _HEAD_SET_DESKTOP_COLOR_GREEN_BLUE, _GREEN, 0) | + DRF_NUM(C57D, _HEAD_SET_DESKTOP_COLOR_GREEN_BLUE, _BLUE, 0)); + } +} + +void nvEvoInitChannel3(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + InitChannelCapsC3(pDevEvo, pChannel); +} + +static void EvoInitChannelC3(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + const NvBool isCore = + FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + pChannel->channelMask); + + nvEvoInitChannel3(pDevEvo, pChannel); + + if (isCore) { + InitDesktopColorC3(pDevEvo, pChannel); + } +} + +static void EvoInitChannelC5(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + const NvBool isCore = + FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + pChannel->channelMask); + const NvBool isWindow = + ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_WINDOW_ALL) != 0); + + nvEvoInitChannel3(pDevEvo, pChannel); + + if (isCore) { + InitTaps5ScalerCoefficientsC5(pDevEvo, pChannel, FALSE); + InitDesktopColorC5(pDevEvo, pChannel); + } else if (isWindow) { + NVEvoWindowCaps *pWinCaps = + &pDevEvo->gpus[0].capabilities.window[pChannel->instance]; + NvU32 csc0SizesLen = ARRAY_LEN(OetfPQ512SegSizesLog2); + NvU32 csc0EntriesLen = ARRAY_LEN(OetfPQ512Entries); + NvU32 csc1SizesLen = ARRAY_LEN(EotfPQ512SegSizesLog2); + NvU32 csc1EntriesLen = ARRAY_LEN(EotfPQ512Entries); + + InitTaps5ScalerCoefficientsC5(pDevEvo, pChannel, TRUE); + + if (pWinCaps->cscLUTsPresent) { + InitCsc0LUT(pChannel, + OetfPQ512SegSizesLog2, csc0SizesLen, + OetfPQ512Entries, csc0EntriesLen); + InitCsc1LUT(pChannel, + EotfPQ512SegSizesLog2, csc1SizesLen, + EotfPQ512Entries, csc1EntriesLen); + } + } +} + +static enum FMTCoeffType EvoGetFMTCoeffType( + NvBool isYUV, + enum NvKmsInputColorSpace colorSpace, + NvU8 depthPerComponent, + enum NvKmsInputColorRange colorRange) +{ +#define FMT(nvkms_space, coeff_space, depth, nvkms_range, coeff_range) \ + if ((colorSpace == NVKMS_INPUT_COLOR_SPACE_##nvkms_space) && \ + (depthPerComponent == depth) && \ + (colorRange == NVKMS_INPUT_COLOR_RANGE_##nvkms_range)) { \ + return FMT_COEFF_TYPE_##coeff_space##_YUV_##depth##BPC_##coeff_range##_TO_RGB_16BPC_FULL; \ + } + + // RGB colorspaces use identity FMT + if (!isYUV) { + return FMT_COEFF_TYPE_IDENTITY; + } + + if (colorRange == NVKMS_INPUT_COLOR_RANGE_DEFAULT) { + // For YUV, default to limited color range + colorRange = NVKMS_INPUT_COLOR_RANGE_LIMITED; + } + + FMT(BT601, REC601, 8, LIMITED, LTD); + FMT(BT601, REC601, 8, FULL, FULL); + FMT(BT601, REC601, 10, LIMITED, LTD); + FMT(BT601, REC601, 10, FULL, FULL); + FMT(BT601, REC601, 12, LIMITED, LTD); + FMT(BT601, REC601, 12, FULL, FULL); + + FMT(BT709, REC709, 8, LIMITED, LTD); + FMT(BT709, REC709, 8, FULL, FULL); + FMT(BT709, REC709, 10, LIMITED, LTD); + FMT(BT709, REC709, 10, FULL, FULL); + FMT(BT709, REC709, 12, LIMITED, LTD); + FMT(BT709, REC709, 12, FULL, FULL); + + FMT(BT2100, REC2020, 8, LIMITED, LTD); + FMT(BT2100, REC2020, 8, FULL, FULL); + FMT(BT2100, REC2020, 10, LIMITED, LTD); + FMT(BT2100, REC2020, 10, FULL, FULL); + FMT(BT2100, REC2020, 12, LIMITED, LTD); + FMT(BT2100, REC2020, 12, FULL, FULL); + + // Unsupported formats also use identity FMT + return FMT_COEFF_TYPE_IDENTITY; +#undef FMT +} + +static const NvU32* EvoGetFMTMatrixC5( + const enum NvKmsSurfaceMemoryFormat format, + const NVFlipChannelEvoHwState *pHwState) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(format); + + // Choose FMT matrix based on input colorspace, bpc, and colorrange. + return FMTMatrix[EvoGetFMTCoeffType(pFormatInfo->isYUV, + pHwState->colorSpace, + pFormatInfo->isYUV ? pFormatInfo->yuv.depthPerComponent + : pFormatInfo->rgb.bitsPerPixel / 3, + pHwState->colorRange)]; +} + +static void EvoSetFMTMatrixC5( + NVEvoChannelPtr pChannel, const enum NvKmsSurfaceMemoryFormat format, + const NVFlipChannelEvoHwState *pHwState) +{ + const NvU32 *matrix = EvoGetFMTMatrixC5(format, pHwState); + NvU32 method = NVC57E_SET_FMT_COEFFICIENT_C00; + int i; + + for (i = 0; i < 12; i++) { + nvDmaSetStartEvoMethod(pChannel, method, 1); + nvDmaSetEvoMethodData(pChannel, matrix[i]); + + method += 4; + } +} + +void nvEvoInitDefaultLutC5(NVDevEvoPtr pDevEvo) +{ + NVSurfaceEvoPtr pLut = pDevEvo->lut.defaultLut; + NvU16 sd; + + nvAssert(pLut); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NvU32 lutSize; + NvBool isLutModeVss; + NVEvoLutDataRec *pData = pLut->cpuAddress[sd]; + + EvoSetupIdentityBaseLutC5(pData, + &pDevEvo->lut.defaultBaseLUTState[sd], + &lutSize, &isLutModeVss); + + EvoSetupIdentityOutputLutC5(pData, + &pDevEvo->lut.defaultOutputLUTState[sd], + &lutSize, &isLutModeVss); + } +} + +static NvU32 GetWindowOwnerHead(const NVDevEvoRec *pDevEvo, const NvU32 win) +{ + if (pDevEvo->headForWindow[win] == NV_INVALID_HEAD) { + return NVC37D_WINDOW_SET_CONTROL_OWNER_NONE; + } + + nvAssert(pDevEvo->headForWindow[win] < pDevEvo->numHeads); + return pDevEvo->headForWindow[win]; +} + +static void EvoInitWindowMapping3(NVDevEvoPtr pDevEvo, + NVEvoModesetUpdateState *pModesetUpdateState) +{ + NVEvoUpdateState *updateState = &pModesetUpdateState->updateState; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 win, sd; + + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* Bind each window to a head. On GV100, there is a fixed mapping. */ + for (win = 0; win < pDevEvo->numWindows; win++) { + NvU32 head = GetWindowOwnerHead(pDevEvo, win); + nvDmaSetStartEvoMethod(pChannel, NVC37D_WINDOW_SET_CONTROL(win), 1); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C37D, _WINDOW_SET_CONTROL, _OWNER, head)); + } + + pModesetUpdateState->windowMappingChanged = FALSE; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + void *pCoreDma = pDevEvo->pSubDevices[sd]->pCoreDma; + /* + * Short timeout (100ms) because we don't expect display to be very + * busy at this point (it should at most be processing methods from + * InitChannel()). + */ + const NvU32 timeout = 100000; + NvU64 startTime = 0; + + if (!((nvPeekEvoSubDevMask(pDevEvo) & (1 << sd)))) { + continue; + } + + /* This core channel must be idle before reading state cache */ + do { + NvBool isIdle = NV_FALSE; + if (!nvEvoIsChannelIdleC3(pDevEvo, pChannel, sd, &isIdle)) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, "nvEvoIsChannelIdleC3() failed!"); + } + if (isIdle) { + break; + } + if (nvExceedsTimeoutUSec(pDevEvo, &startTime, timeout)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Timed out waiting for core channel idle."); + break; + } + } while (TRUE); + + for (win = 0; win < pDevEvo->numWindows; win++) { + NvU32 data = nvDmaLoadPioMethod(pCoreDma, NVC37D_WINDOW_SET_CONTROL(win)); + NvU32 head = GetWindowOwnerHead(pDevEvo, win); + + if (DRF_VAL(C37D, _WINDOW_SET_CONTROL, _OWNER, data) != head) { + pModesetUpdateState->windowMappingChanged = TRUE; + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + nvDisableCoreInterlockUpdateState(pDevEvo, + updateState, + pDevEvo->window[win]); + nvPopEvoSubDevMask(pDevEvo); + } + } + } +} + +static void EvoInitWindowMappingC3(const NVDispEvoRec *pDispEvo, + NVEvoModesetUpdateState *pModesetUpdateState) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVEvoUpdateState *updateState = &pModesetUpdateState->updateState; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 win; + + nvPushEvoSubDevMaskDisp(pDispEvo); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + EvoInitWindowMapping3(pDevEvo, + pModesetUpdateState); + + // Set window usage bounds + for (win = 0; win < pDevEvo->numWindows; win++) { + nvDmaSetStartEvoMethod(pChannel, NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS(win), 1); + /* XXXnvdisplay: window scaling */ + nvDmaSetEvoMethodData(pChannel, NV_EVO3_DEFAULT_WINDOW_USAGE_BOUNDS_C3); + } + nvPopEvoSubDevMask(pDevEvo); +} + +void nvEvoInitWindowMappingC5(const NVDispEvoRec *pDispEvo, + NVEvoModesetUpdateState *pModesetUpdateState) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVEvoUpdateState *updateState = &pModesetUpdateState->updateState; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 win; + + nvPushEvoSubDevMaskDisp(pDispEvo); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + EvoInitWindowMapping3(pDevEvo, + pModesetUpdateState); + + // Set window usage bounds + for (win = 0; win < pDevEvo->numWindows; win++) { + NvU32 bounds = NV_EVO3_DEFAULT_WINDOW_USAGE_BOUNDS_C5; + + bounds |= + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _INPUT_SCALER_TAPS, _TAPS_2) | + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _UPSCALING_ALLOWED, _FALSE); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS(win), 1); + nvDmaSetEvoMethodData(pChannel, bounds); + } + nvPopEvoSubDevMask(pDevEvo); +} + +NvBool nvComputeMinFrameIdle( + const NVHwModeTimingsEvo *pTimings, + NvU16 *pLeadingRasterLines, + NvU16 *pTrailingRasterLines) +{ + const NVHwModeViewPortEvo *pViewPort = &pTimings->viewPort; + + /* + * leadingRasterLines defines the number of lines between the start of the + * frame (vsync) and the start of the active region. This includes Vsync, + * Vertical Back Porch, and the top part of the overscan border. The + * minimum value is 2 because vsync and VBP must be at least 1 line each. + * + * trailingRasterLines defines the number of lines between the end of the + * active region and the end of the frame. This includes the bottom part + * of the overscan border and the Vertical Front Porch. + */ + const NvU32 activeHeight = (pTimings->rasterBlankStart.y - + pTimings->rasterBlankEnd.y); + /* This is how it's done in dispClassNVD20CoreUpdateErrorChecks_hls.c */ + const NvU32 overscan = (activeHeight / 2) - (pViewPort->out.height / 2); + + /* + * The +1 is justified by this comment in the error check: + * + * If the value is 1, that means there are 2 lines of vblank (lines 0 and + * 1) before active. That is why the uLeadingBorder equation needs +1; + */ + const NvU32 leadingRasterLines = + pTimings->rasterBlankEnd.y + overscan + pViewPort->out.yAdjust + 1; + const NvU32 trailingRasterLines = + pTimings->rasterSize.y - (leadingRasterLines + pViewPort->out.height); + + /* nvdClass_01.mfs says: "The minimum value is 2 because vsync and VBP must + * be at least 1 line each." */ + if (leadingRasterLines < 2) { + return FALSE; + } + + *pLeadingRasterLines = leadingRasterLines; + *pTrailingRasterLines = trailingRasterLines; + + return TRUE; +} + +static void EvoSetRasterParams3(NVDevEvoPtr pDevEvo, int head, + const NVHwModeTimingsEvo *pTimings, + const NVEvoColorRec *pOverscanColor, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + /* XXXnvdisplay: Convert these for YCbCr, as necessary */ + NvU32 overscanColor = + DRF_NUM(C37D, _HEAD_SET_OVERSCAN_COLOR, _RED_CR, pOverscanColor->red) | + DRF_NUM(C37D, _HEAD_SET_OVERSCAN_COLOR, _GREEN_Y, pOverscanColor->green) | + DRF_NUM(C37D, _HEAD_SET_OVERSCAN_COLOR, _BLUE_CB, pOverscanColor->blue); + NvU32 hdmiStereoCtrl; + NvU16 minFrameIdleLeadingRasterLines, minFrameIdleTrailingRasterLines; + NvBool ret; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + // XXX[AGP]: These methods are sequential and could use an incrementing + // method, but it's not clear if there's a bug in EVO that causes corruption + // sometimes. Play it safe and send methods with count=1. + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_OVERSCAN_COLOR(head), 1); + nvDmaSetEvoMethodData(pChannel, overscanColor); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_RASTER_SIZE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_RASTER_SIZE, _WIDTH, pTimings->rasterSize.x) | + DRF_NUM(C37D, _HEAD_SET_RASTER_SIZE, _HEIGHT, pTimings->rasterSize.y)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_RASTER_SYNC_END(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_RASTER_SYNC_END, _X, pTimings->rasterSyncEnd.x) | + DRF_NUM(C37D, _HEAD_SET_RASTER_SYNC_END, _Y, pTimings->rasterSyncEnd.y)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_RASTER_BLANK_END(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_RASTER_BLANK_END, _X, pTimings->rasterBlankEnd.x) | + DRF_NUM(C37D, _HEAD_SET_RASTER_BLANK_END, _Y, pTimings->rasterBlankEnd.y)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_RASTER_BLANK_START(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_RASTER_BLANK_START, _X, pTimings->rasterBlankStart.x) | + DRF_NUM(C37D, _HEAD_SET_RASTER_BLANK_START, _Y, pTimings->rasterBlankStart.y)); + + ret = nvComputeMinFrameIdle(pTimings, + &minFrameIdleLeadingRasterLines, + &minFrameIdleTrailingRasterLines); + if (!ret) { + /* This should have been ensured by IMP in AssignPerHeadImpParams. */ + nvAssert(ret); + /* In case a mode validation override was used to skip IMP, program the + * default values. This may still cause a hardware exception. */ + minFrameIdleLeadingRasterLines = 2; + minFrameIdleTrailingRasterLines = 1; + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_MIN_FRAME_IDLE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_MIN_FRAME_IDLE, _LEADING_RASTER_LINES, + minFrameIdleLeadingRasterLines) | + DRF_NUM(C37D, _HEAD_SET_MIN_FRAME_IDLE, _TRAILING_RASTER_LINES, + minFrameIdleTrailingRasterLines)); + + nvAssert((KHzToHz(pTimings->pixelClock) & + ~DRF_MASK(NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ)) == 0x0); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY, _HERTZ, + KHzToHz(pTimings->pixelClock)) | + DRF_DEF(C37D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY, _ADJ1000DIV1001,_FALSE)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _NOT_DRIVER, _FALSE) | + DRF_DEF(C37D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _HOPPING, _DISABLE) | + DRF_DEF(C37D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _HOPPING_MODE, _VBLANK)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, _HERTZ, + KHzToHz(pTimings->pixelClock)) | + DRF_DEF(C37D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, _ADJ1000DIV1001,_FALSE)); + + nvDmaSetStartEvoMethod(pChannel, + NVC37D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_FRAME_PACKED_VACTIVE_COLOR, _RED_CR, 0) | +#if defined(DEBUG) + DRF_NUM(C37D, _HEAD_SET_FRAME_PACKED_VACTIVE_COLOR, _GREEN_Y, 512) | +#else + DRF_NUM(C37D, _HEAD_SET_FRAME_PACKED_VACTIVE_COLOR, _GREEN_Y, 0) | +#endif + DRF_NUM(C37D, _HEAD_SET_FRAME_PACKED_VACTIVE_COLOR, _BLUE_CB, 0)); + + hdmiStereoCtrl = DRF_NUM(C37D, _HEAD_SET_HDMI_CTRL, _HDMI_VIC, 0); + if (pTimings->hdmi3D) { + hdmiStereoCtrl = + FLD_SET_DRF(C37D, _HEAD_SET_HDMI_CTRL, _VIDEO_FORMAT, _STEREO3D, hdmiStereoCtrl); + } else { + hdmiStereoCtrl = + FLD_SET_DRF(C37D, _HEAD_SET_HDMI_CTRL, _VIDEO_FORMAT, _NORMAL, hdmiStereoCtrl); + } + nvDmaSetStartEvoMethod(pChannel, + NVC37D_HEAD_SET_HDMI_CTRL(head), 1); + nvDmaSetEvoMethodData(pChannel, hdmiStereoCtrl); +} + +static void EvoSetRasterParamsC3(NVDevEvoPtr pDevEvo, int head, + const NVHwModeTimingsEvo *pTimings, + const NvU8 tilePosition, + const NVDscInfoEvoRec *pDscInfo, + const NVEvoColorRec *pOverscanColor, + NVEvoUpdateState *updateState) +{ + nvAssert(tilePosition == 0); + EvoSetRasterParams3(pDevEvo, head, pTimings, pOverscanColor, updateState); +} + +static void EvoSetRasterParams5(NVDevEvoPtr pDevEvo, int head, + const NVHwModeTimingsEvo *pTimings, + const NvU8 tilePosition, + const NVEvoColorRec *pOverscanColor, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + EvoSetRasterParams3(pDevEvo, head, pTimings, pOverscanColor, updateState); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_TILE_POSITION(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57D, _HEAD_SET_TILE_POSITION, _X, tilePosition) | + DRF_NUM(C57D, _HEAD_SET_TILE_POSITION, _Y, 0)); +} + +static void EvoSetRasterParamsC5(NVDevEvoPtr pDevEvo, int head, + const NVHwModeTimingsEvo *pTimings, + const NvU8 tilePosition, + const NVDscInfoEvoRec *pDscInfo, + const NVEvoColorRec *pOverscanColor, + NVEvoUpdateState *updateState) +{ + nvAssert(pDscInfo->type != NV_DSC_INFO_EVO_TYPE_HDMI); + EvoSetRasterParams5(pDevEvo, head, pTimings, tilePosition, pOverscanColor, + updateState); +} + +static NvU32 GetHdmiDscHBlankPixelTarget(const NVHwModeTimingsEvo *pTimings, + const NVDscInfoEvoRec *pDscInfo) +{ + nvAssert((pDscInfo->dp.dscMode == NV_DSC_EVO_MODE_DUAL) || + (pDscInfo->dp.dscMode == NV_DSC_EVO_MODE_SINGLE)); + + const NvU32 hblankMin = + (pDscInfo->dp.dscMode == NV_DSC_EVO_MODE_DUAL) ? + ((pDscInfo->hdmi.hblankMin + 1) / 2) : + pDscInfo->hdmi.hblankMin; + + NvU32 hBlankPixelTarget = + NV_UNSIGNED_DIV_CEIL((pTimings->rasterSize.x * + pDscInfo->hdmi.dscTBlankToTTotalRatioX1k), + 1000); + + hBlankPixelTarget = NV_MAX(hblankMin, hBlankPixelTarget); + + if (pDscInfo->dp.dscMode == NV_DSC_EVO_MODE_DUAL) { + hBlankPixelTarget += (hBlankPixelTarget % 2); + } + + return hBlankPixelTarget; +} + +static void EvoSetRasterParamsC6(NVDevEvoPtr pDevEvo, int head, + const NVHwModeTimingsEvo *pTimings, + const NvU8 tilePosition, + const NVDscInfoEvoRec *pDscInfo, + const NVEvoColorRec *pOverscanColor, + NVEvoUpdateState *updateState) +{ + NvU32 rasterHBlankDelay; + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + EvoSetRasterParams5(pDevEvo, head, pTimings, tilePosition, pOverscanColor, + updateState); + + if (pDscInfo->type == NV_DSC_INFO_EVO_TYPE_HDMI) { + const NvU32 hBlank = pTimings->rasterSize.x - + pTimings->rasterBlankStart.x + pTimings->rasterBlankEnd.x; + const NvU32 hBlankPixelTarget = + GetHdmiDscHBlankPixelTarget(pTimings, pDscInfo); + + const NvU32 headSetRasterHBlankDelayStart = + pTimings->rasterSize.x - pTimings->rasterBlankStart.x - 2; + const NvU32 headSetRasterHBlankDelayEnd = + hBlankPixelTarget - hBlank + headSetRasterHBlankDelayStart; + + rasterHBlankDelay = + DRF_NUM(C67D, _HEAD_SET_RASTER_HBLANK_DELAY, _BLANK_START, + headSetRasterHBlankDelayStart); + rasterHBlankDelay |= + DRF_NUM(C67D, _HEAD_SET_RASTER_HBLANK_DELAY, _BLANK_END, + headSetRasterHBlankDelayEnd); + } else { + rasterHBlankDelay = 0; + } + + nvDmaSetStartEvoMethod(pChannel, NVC67D_HEAD_SET_RASTER_HBLANK_DELAY(head), 1); + nvDmaSetEvoMethodData(pChannel, rasterHBlankDelay); +} + +static void EvoSetProcAmpC3(NVDispEvoPtr pDispEvo, const NvU32 head, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NvU8 colorSpace; + NvU32 dynRange; + + /* These methods should only apply to a single pDpyEvo */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + // These NVT defines match the HEAD_SET_PROCAMP ones. + ct_assert(NVT_COLORIMETRY_RGB == NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB); + ct_assert(NVT_COLORIMETRY_YUV_601 == NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601); + ct_assert(NVT_COLORIMETRY_YUV_709 == NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709); + ct_assert(NVT_COLOR_RANGE_FULL == NVC37D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE); + ct_assert(NVT_COLOR_RANGE_LIMITED == NVC37D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE); + + if (pHeadState->procAmp.colorimetry == NVT_COLORIMETRY_BT2020RGB) { + colorSpace = NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB; + } else if (pHeadState->procAmp.colorimetry == NVT_COLORIMETRY_BT2020YCC) { + colorSpace = NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020; + } else { + colorSpace = pHeadState->procAmp.colorimetry; + } + + if (pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_FULL) { + dynRange = DRF_DEF(C37D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _VESA); + } else { + nvAssert(pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_LIMITED); + dynRange = DRF_DEF(C37D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _CEA); + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_PROCAMP(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_PROCAMP, _COLOR_SPACE, colorSpace) | + DRF_DEF(C37D, _HEAD_SET_PROCAMP, _CHROMA_LPF, _DISABLE) | + DRF_NUM(C37D, _HEAD_SET_PROCAMP, _SAT_COS, + pHeadState->procAmp.satCos) | + DRF_NUM(C37D, _HEAD_SET_PROCAMP, _SAT_SINE, 0) | + dynRange | + DRF_NUM(C37D, _HEAD_SET_PROCAMP, _RANGE_COMPRESSION, + pHeadState->procAmp.colorRange) | + DRF_DEF(C37D, _HEAD_SET_PROCAMP, _BLACK_LEVEL, _GRAPHICS)); +} + +static const struct NvKmsCscMatrix RGBToLimitedRangeYCbCrRec2020Matrix = {{ + { 0x7000, 0x1f9900, 0x1ff700, 0x8000 }, + { 0x3988, 0x947c, 0xcfc, 0x1000 }, + { 0x1fe0b8, 0x1faf44, 0x7000, 0x8000 }, +}}; +static const struct NvKmsCscMatrix RGBToLimitedRangeYCbCrRec709Matrix = {{ + { 0x7000, 0x1f9a44, 0x1ff5bc, 0x8000 }, + { 0x2e90, 0x9ca4, 0xfd0, 0x1000 }, + { 0x1fe654, 0x1fa9a8, 0x7000, 0x8000 }, +}}; +static const struct NvKmsCscMatrix RGBToLimitedRangeYCbCrRec601Matrix = {{ + { 0x7000, 0x1fa234, 0x1fedc8, 0x8000 }, + { 0x417c, 0x8090, 0x18f8, 0x1000 }, + { 0x1fda34, 0x1fb5cc, 0x7000, 0x8000 }, +}}; +static const struct NvKmsCscMatrix RGBToLimitedRangeRGB = {{ + { 0xdb04, 0, 0, 0x1000 }, + { 0, 0xdb04, 0, 0x1000 }, + { 0, 0, 0xdb04, 0x1000 }, +}}; + + +/*! + * Return the appropriate OCSC1 matrix for the requested color range and + * colorimetry, or NULL if the OCSC1 should be disabled. + */ +const struct NvKmsCscMatrix* nvEvoGetOCsc1MatrixC5(const NVDispHeadStateEvoRec *pHeadState) +{ + if (pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_FULL) { + switch (pHeadState->procAmp.colorimetry) { + case NVT_COLORIMETRY_BT2020RGB: + // fall through + case NVT_COLORIMETRY_RGB: + // No OCSC1 needed. + return NULL; + default: + nvAssert(!"Unexpected colorimetry"); + return NULL; + } + } else { + switch (pHeadState->procAmp.colorimetry) { + case NVT_COLORIMETRY_BT2020RGB: + // fall through + case NVT_COLORIMETRY_RGB: + return &RGBToLimitedRangeRGB; + case NVT_COLORIMETRY_YUV_601: + return &RGBToLimitedRangeYCbCrRec601Matrix; + case NVT_COLORIMETRY_YUV_709: + return &RGBToLimitedRangeYCbCrRec709Matrix; + case NVT_COLORIMETRY_BT2020YCC: + return &RGBToLimitedRangeYCbCrRec2020Matrix; + default: + nvAssert(!"Unexpected colorimetry"); + return NULL; + } + } +} + +/*! + * Return the output clamping ranges for the requested color range and + * colorimetry. + */ +struct EvoClampRangeC5 +nvEvoGetOCsc1ClampRange(const NVDispHeadStateEvoRec *pHeadState) +{ + if (pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_FULL) { + return (struct EvoClampRangeC5) { + .green = DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_GREEN, _LOW, 0x0) | + DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_GREEN, _HIGH, 0xFFF), + .red_blue = DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_RED_BLUE, _LOW, 0x0) | + DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_RED_BLUE, _HIGH, 0xFFF), + }; + } else { + switch (pHeadState->procAmp.colorimetry) { + default: + nvAssert(!"Unexpected colorimetry"); + // fall through + case NVT_COLORIMETRY_BT2020RGB: + // fall through + case NVT_COLORIMETRY_RGB: + return (struct EvoClampRangeC5) { + .green = DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_GREEN, _LOW, 0x100) | + DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_GREEN, _HIGH, 0xEB0), + .red_blue = DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_RED_BLUE, _LOW, 0x100) | + DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_RED_BLUE, _HIGH, 0xEB0), + }; + case NVT_COLORIMETRY_YUV_601: + case NVT_COLORIMETRY_YUV_709: + case NVT_COLORIMETRY_BT2020YCC: + return (struct EvoClampRangeC5) { + .green = DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_GREEN, _LOW, 0x100) | + DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_GREEN, _HIGH, 0xEB0), + .red_blue = DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_RED_BLUE, _LOW, 0x100) | + DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_RED_BLUE, _HIGH, 0xF00), + }; + } + } +} + +/* + * 1.402 1.0 0.0 + * -0.714136 1.0 -0.344136 + * 0.0 1.0 1.772 + */ +static const struct NvKmsMatrix CrYCb601toRGBMatrix = { { + { 0x3fb374bc, 0x3f800000, 0x00000000 }, + { 0xbf36d19e, 0x3f800000, 0xbeb03298 }, + { 0x00000000, 0x3f800000, 0x3fe2d0e5 } +} }; + +/* + * 1.5748 1.0 0.0 + * -0.468124 1.0 -0.187324 + * 0.0 1.0 1.8556 + */ +static const struct NvKmsMatrix CrYCb709toRGBMatrix = { { + { 0x3fc9930c, 0x3f800000, 0x00000000 }, + { 0xbeefadf3, 0x3f800000, 0xbe3fd1dd }, + { 0x00000000, 0x3f800000, 0x3fed844d } +} }; + +/* + * 0.5 -0.418688 -0.081312 + * 0.299 0.587 0.114 + * -0.168736 -0.331264 0.5 + */ +static const struct NvKmsMatrix RGBtoCrYCb601Matrix = { { + { 0x3f000000, 0xbed65e46, 0xbda686e8 }, + { 0x3e991687, 0x3f1645a2, 0x3de978d5 }, + { 0xbe2cc921, 0xbea99b6f, 0x3f000000 } +} }; + +/* + * 0.5 -0.45415 -0.04585 + * 0.21260 0.71520 0.07220 + * -0.11457 -0.38543 0.5 + */ +static const struct NvKmsMatrix RGBtoCrYCb709Matrix = { { + { 0x3f000000, 0xbee88659, 0xbd3bcd36 }, + { 0x3e59b3d0, 0x3f371759, 0x3d93dd98 }, + { 0xbdeaa3ad, 0xbec55715, 0x3f000000 } +} }; + +static void EvoSetOCsc1C5(NVDispEvoPtr pDispEvo, const NvU32 head) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const struct NvKmsCscMatrix *matrix = nvEvoGetOCsc1MatrixC5(pHeadState); + struct EvoClampRangeC5 clamp = nvEvoGetOCsc1ClampRange(pHeadState); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_CLAMP_RANGE_GREEN(head), 1); + nvDmaSetEvoMethodData(pChannel, clamp.green); + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_CLAMP_RANGE_RED_BLUE(head), 1); + nvDmaSetEvoMethodData(pChannel, clamp.red_blue); + + if (matrix) { + int x, y; + NvU32 method = NVC57D_HEAD_SET_OCSC1COEFFICIENT_C00(head); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OCSC1CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_OCSC1CONTROL, _ENABLE, _ENABLE)); + + for (y = 0; y < 3; y++) { + for (x = 0; x < 4; x++) { + nvDmaSetStartEvoMethod(pChannel, method, 1); + nvDmaSetEvoMethodData(pChannel, matrix->m[y][x]); + + method += 4; + } + } + } else { + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OCSC1CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_OCSC1CONTROL, _ENABLE, _DISABLE)); + } +} + +/* + * Sets up the OCSC0 matrix coefficients, used to perform saturation + * adjustment. + * + * The pipeline operates in FP16 RGB, however this adjustment must be + * performed in CrYCb. Therefore, we multiply the saturation + * adjustment matrix by the appropriate color space conversion + * matrix. The specific color space used depends on the colorimetry of + * the final output. Then we multiply by its inverse to convert back + * to RGB. Finally, we convert the coefficients to S5.14 fixed point + * format. + * + * NOTE: Hue and saturation adjustment would not typically be used with HDR + * (BT2020) output, but is allowed here for the sake of compatibility. + * TODO: Do hue/saturation adjustment in BT2020 with BT2020 output colorimetry. + */ +void nvEvo3PickOCsc0(const NVDispEvoRec *pDispEvo, const NvU32 head, + struct NvKms3x4MatrixF32 *ocsc0MatrixOutput, NvBool *pOutputRoundingFix) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NvU32 dispIdx = pDispEvo->displayOwner; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVEvoSubDevHeadStateRec *pSdHeadState = &pDevEvo->gpus[dispIdx].headState[head]; + + const float32_t zeroF32 = NvU32viewAsF32(NV_FLOAT_ZERO); + const float32_t oneF32 = NvU32viewAsF32(NV_FLOAT_ONE); + const float32_t inv2048F32 = f32_div(NvU32viewAsF32(NV_FLOAT_HALF), + NvU32viewAsF32(NV_FLOAT_1024)); + /* divide satCos by the default setting of 1024 */ + const float32_t satCos = f32_div(i32_to_f32(pHeadState->procAmp.satCos), + NvU32viewAsF32(NV_FLOAT_1024)); + const struct NvKmsMatrixF32 satHueMatrix = { { + { satCos, zeroF32, zeroF32 }, + { zeroF32, oneF32, zeroF32 }, + { zeroF32, zeroF32, satCos } + } }; + struct NvKms3x4MatrixF32 ocsc0Matrix = { { + { oneF32, zeroF32, zeroF32, zeroF32 }, + { zeroF32, oneF32, zeroF32, zeroF32 }, + { zeroF32, zeroF32, oneF32, zeroF32 } + } }; + + struct NvKmsMatrixF32 CrYCbtoRGBMatrix; + struct NvKmsMatrixF32 RGBtoCrYCbMatrix; + + NvBool outputRoundingFix = FALSE; + + + switch (pHeadState->procAmp.colorimetry) { + default: + nvAssert(!"Unexpected colorimetry"); + /* fallthrough */ + case NVT_COLORIMETRY_RGB: + case NVT_COLORIMETRY_BT2020RGB: + /* fallthrough; for RGB output, perform saturation adjustment in YUV709 */ + case NVT_COLORIMETRY_BT2020YCC: + /* fallthrough; for BT2020 YUV output, perform saturation adjustment in YUV709 */ + case NVT_COLORIMETRY_YUV_709: + CrYCbtoRGBMatrix = NvKmsMatrixToNvKmsMatrixF32(CrYCb709toRGBMatrix); + RGBtoCrYCbMatrix = NvKmsMatrixToNvKmsMatrixF32(RGBtoCrYCb709Matrix); + break; + case NVT_COLORIMETRY_YUV_601: + CrYCbtoRGBMatrix = NvKmsMatrixToNvKmsMatrixF32(CrYCb601toRGBMatrix); + RGBtoCrYCbMatrix = NvKmsMatrixToNvKmsMatrixF32(RGBtoCrYCb601Matrix); + break; + } + + ocsc0Matrix = nvMultiply3x4Matrix(&RGBtoCrYCbMatrix, &ocsc0Matrix); + ocsc0Matrix = nvMultiply3x4Matrix(&satHueMatrix, &ocsc0Matrix); + ocsc0Matrix = nvMultiply3x4Matrix(&CrYCbtoRGBMatrix, &ocsc0Matrix); + + if (nvkms_output_rounding_fix()) { + /* + * Only apply WAR for bug 2267663 for linear output TFs. Non-linear + * TFs could amplify the 1/2048 factor to the point of being + * perceptible, so don't apply WAR if the OLUT has been specified. + * + * Custom OLUTs may be non-linear, so unconditionally disable the WAR if + * one is specified. + */ + + if ((pHeadState->tf == NVKMS_OUTPUT_TF_NONE) && + (pSdHeadState->outputLut.pLutSurfaceEvo == NULL)) { + ocsc0Matrix.m[0][3] = f32_add(ocsc0Matrix.m[0][3], inv2048F32); + ocsc0Matrix.m[1][3] = f32_add(ocsc0Matrix.m[1][3], inv2048F32); + ocsc0Matrix.m[2][3] = f32_add(ocsc0Matrix.m[2][3], inv2048F32); + + outputRoundingFix = TRUE; + } + } + + *ocsc0MatrixOutput = ocsc0Matrix; + *pOutputRoundingFix = outputRoundingFix; +} + +/* + * Programs the OCSC0 matrix coefficients, used to perform saturation + * adjustment. + * + * The OCSC0 matrix will be enabled later in EvoSetOutputLutC5 if + * and only if we also enable the OLUT as required by the + * specification. + */ +static void EvoSetOCsc0C5(const NVDispEvoRec *pDispEvo, const NvU32 head, + NvBool *pOutputRoundingFix) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + struct NvKms3x4MatrixF32 ocsc0Matrix; + + nvEvo3PickOCsc0(pDispEvo, head, &ocsc0Matrix, pOutputRoundingFix); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OCSC0COEFFICIENT_C00(head), 12); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C00, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[0][0]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C01, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[0][1]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C02, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[0][2]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C03, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[0][3]))); + + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C10, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[1][0]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C11, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[1][1]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C12, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[1][2]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C13, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[1][3]))); + + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C20, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[2][0]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C21, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[2][1]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C22, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[2][2]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C23, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[2][3]))); +} + +static void EvoSetProcAmpC5(NVDispEvoPtr pDispEvo, const NvU32 head, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NvU32 dynRange, chromaLpf, chromaDownV; + NvU32 colorimetry; + NvBool outputRoundingFix; + + NVT_COLORIMETRY nvtColorimetry = pHeadState->procAmp.colorimetry; + NVT_COLOR_RANGE nvtColorRange = pHeadState->procAmp.colorRange; + + /* These methods should only apply to a single pDpyEvo */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + switch (nvtColorimetry) { + default: + nvAssert(!"Unrecognized colorimetry"); + // fall through + case NVT_COLORIMETRY_BT2020RGB: + // fall through + case NVT_COLORIMETRY_RGB: + colorimetry = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _COLOR_SPACE, _RGB); + break; + case NVT_COLORIMETRY_YUV_601: + colorimetry = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _COLOR_SPACE, _YUV_601); + break; + case NVT_COLORIMETRY_YUV_709: + colorimetry = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _COLOR_SPACE, _YUV_709); + break; + case NVT_COLORIMETRY_BT2020YCC: + colorimetry = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _COLOR_SPACE, _YUV_2020); + break; + } + + if (nvtColorRange == NVT_COLOR_RANGE_FULL) { + dynRange = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _VESA); + } else { + nvAssert(nvtColorRange == NVT_COLOR_RANGE_LIMITED); + dynRange = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _CEA); + } + + /* + * NVC67D_HEAD_SET_PROCAMP_CHROMA_DOWN_V is only defined in NVC67D, but + * it is an unused bit in NVC57D_HEAD_SET_PROCAMP, and YUV420 should only + * be set on >=nvdisplay 4.0, so it's okay to set it here. + */ + if (pHeadState->procAmp.colorFormat == NVT_COLOR_FORMAT_YCbCr420) { + chromaLpf = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _CHROMA_LPF, _ENABLE); + chromaDownV = DRF_DEF(C67D, _HEAD_SET_PROCAMP, _CHROMA_DOWN_V, _ENABLE); + } else { + chromaLpf = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _CHROMA_LPF, _DISABLE); + chromaDownV = DRF_DEF(C67D, _HEAD_SET_PROCAMP, _CHROMA_DOWN_V, _DISABLE); + } + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_PROCAMP(head), 1); + nvDmaSetEvoMethodData(pChannel, + colorimetry | dynRange | chromaLpf | chromaDownV); + + EvoSetOCsc0C5(pDispEvo, head, &outputRoundingFix); + EvoSetOCsc1C5(pDispEvo, head); +} + +/* + * With nvdisplay, external fliplock pins are controlled via a headless + * SetControl method, unlike previous EVO display implementations which + * specified this information in the per-head HeadSetControl method. This + * function loops over all of the core nvkms HeadControl data structures to + * determine which pins should be enabled in the SetControl method. It should + * be called any time the HeadControl data structures are updated. + */ +void nvEvoSetControlC3(NVDevEvoPtr pDevEvo, int sd) +{ + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 data = 0; + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + if (pHC->flipLock && !NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->flipLockPin)) { + NvU32 pin = pHC->flipLockPin - NV_EVO_LOCK_PIN_0; + data = FLD_IDX_SET_DRF(C37D, _SET_CONTROL, _FLIP_LOCK_PIN, + pin, _ENABLE, data); + } + } + + /* + * GV100 HW bug 2062029 WAR + * + * GV100 always holds the external fliplock line low as if + * NVC37D_SET_CONTROL_FLIP_LOCK_PIN was enabled. To work around this, + * the GV100 VBIOS initializes the fliplock GPIOs to be software + * controlled (forced off). The following rmctrl needs to be called to + * switch HW control of the fliplock GPIOs back on whenever external + * fliplock is enabled. + */ + { + NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS params = { }; + + params.base.subdeviceIndex = pEvoSubDev->subDeviceInstance; + params.bEnable = (data != 0); + + if (nvRmApiControl( + nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NVC370_CTRL_CMD_SET_SWAPRDY_GPIO_WAR, + ¶ms, sizeof(params)) != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, "Failed to override fliplock GPIO"); + } + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_SET_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, data); +} + +static void EvoSetHeadControlC3(NVDevEvoPtr pDevEvo, int sd, int head, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + /* + * NOTE: This function should only push state to the hardware based on data + * in the pHC. If not, then we may miss updates due to the memcmp of the + * HeadControl structure in UpdateEvoLockState(). + */ + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + NvU32 data = 0, pin; + NvU32 serverLockMode, clientLockMode; + + /* These methods should only apply to a single subdevice */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + switch (pHC->serverLock) { + case NV_EVO_NO_LOCK: + serverLockMode = NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK; + break; + case NV_EVO_FRAME_LOCK: + serverLockMode = NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK; + break; + case NV_EVO_RASTER_LOCK: + serverLockMode = NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK; + break; + default: + nvAssert(!"Invalid server lock mode"); + return; + } + + switch (pHC->clientLock) { + case NV_EVO_NO_LOCK: + clientLockMode = NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK; + break; + case NV_EVO_FRAME_LOCK: + clientLockMode = NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK; + break; + case NV_EVO_RASTER_LOCK: + clientLockMode = NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK; + break; + default: + nvAssert(!"Invalid client lock mode"); + return; + } + + // Convert head control state to EVO method values. + nvAssert(!pHC->interlaced); + data |= DRF_DEF(C37D, _HEAD_SET_CONTROL, _STRUCTURE, _PROGRESSIVE); + + nvAssert(pHC->serverLockPin != NV_EVO_LOCK_PIN_ERROR); + nvAssert(pHC->clientLockPin != NV_EVO_LOCK_PIN_ERROR); + + if (serverLockMode == NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK) { + data |= DRF_DEF(C37D, _HEAD_SET_CONTROL, _MASTER_LOCK_PIN, _LOCK_PIN_NONE); + } else if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->serverLockPin)) { + pin = pHC->serverLockPin - NV_EVO_LOCK_PIN_INTERNAL_0; + /* + * nvdClass_01.mfs says: + * "master lock pin, if internal, must be set to the corresponding + * internal pin for that head" (error check #12) + */ + nvAssert(pin == head); + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _MASTER_LOCK_PIN, + NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(pin)); + } else { + pin = pHC->serverLockPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _MASTER_LOCK_PIN, + NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(pin)); + } + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _MASTER_LOCK_MODE, serverLockMode); + + if (clientLockMode == NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK) { + data |= DRF_DEF(C37D, _HEAD_SET_CONTROL, _SLAVE_LOCK_PIN, _LOCK_PIN_NONE); + } else if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->clientLockPin)) { + pin = pHC->clientLockPin - NV_EVO_LOCK_PIN_INTERNAL_0; + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _SLAVE_LOCK_PIN, + NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(pin)); + } else { + pin = pHC->clientLockPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _SLAVE_LOCK_PIN, + NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(pin)); + } + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _SLAVE_LOCK_MODE, clientLockMode); + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _SLAVE_LOCKOUT_WINDOW, + pHC->clientLockoutWindow); + + /* + * We always enable stereo lock when it's available and either framelock + * or rasterlock is in use. + */ + if (pHC->stereoLocked) { + if (pHC->serverLock != NV_EVO_NO_LOCK) { + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _MASTER_STEREO_LOCK_MODE, + NVC37D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE); + } + if (pHC->clientLock != NV_EVO_NO_LOCK) { + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _SLAVE_STEREO_LOCK_MODE, + NVC37D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE); + } + } + + nvAssert(pHC->stereoPin != NV_EVO_LOCK_PIN_ERROR); + if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->stereoPin)) { + data |= DRF_DEF(C37D, _HEAD_SET_CONTROL, _STEREO_PIN, _LOCK_PIN_NONE); + } else { + pin = pHC->stereoPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _STEREO_PIN, + NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(pin)); + } + + if (pHC->hdmi3D) { + data |= DRF_DEF(C37D, _HEAD_SET_CONTROL, _STEREO3D_STRUCTURE, _FRAME_PACKED); + } else { + data |= DRF_DEF(C37D, _HEAD_SET_CONTROL, _STEREO3D_STRUCTURE, _NORMAL); + } + + /* + * NVC67D_HEAD_SET_CONTROL_YUV420PACKER is only defined in NVC67D, but + * it is an unused bit in NVC37D_HEAD_SET_CONTROL, and YUV420 should only + * be set on >=nvdisplay 4.0, so it's okay to set it here. + */ + if (pHC->hwYuv420) { + data |= DRF_DEF(C67D, _HEAD_SET_CONTROL, _YUV420PACKER, _ENABLE); + } else { + data |= DRF_DEF(C67D, _HEAD_SET_CONTROL, _YUV420PACKER, _DISABLE); + } + + // Send the HeadSetControl method. + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, data); + + nvEvoSetControlC3(pDevEvo, sd); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_LOCK_CHAIN(head), 1); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C37D, _HEAD_SET_LOCK_CHAIN, _POSITION, + pHC->lockChainPosition)); + +/* XXX temporary WAR; see bug 4028718 */ +#if !defined(NVC37D_HEAD_SET_LOCK_OFFSET) +#define NVC37D_HEAD_SET_LOCK_OFFSET(a) (0x00002040 + (a)*0x00000400) +#define NVC37D_HEAD_SET_LOCK_OFFSET_X 14:0 +#define NVC37D_HEAD_SET_LOCK_OFFSET_Y 30:16 +#endif + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_LOCK_OFFSET(head), 1); + nvDmaSetEvoMethodData(pChannel, pHC->setLockOffsetX ? + DRF_NUM(C37D, _HEAD_SET_LOCK_OFFSET, _X, + 27) : 0); +} + +static void EvoSetHeadRefClkC3(NVDevEvoPtr pDevEvo, int head, NvBool external, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 sd; + + /* These methods should only apply to a single subdevice */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (nvPeekEvoSubDevMask(pDevEvo) & (1 << sd)) { + if (external) { + pDevEvo->gpus[sd].setSwSpareA[head] = + FLD_SET_DRF(C37D, + _HEAD_SET_SW_SPARE_A_CODE, + _VPLL_REF, + _QSYNC, + pDevEvo->gpus[sd].setSwSpareA[head]); + } else { + pDevEvo->gpus[sd].setSwSpareA[head] = + FLD_SET_DRF(C37D, + _HEAD_SET_SW_SPARE_A_CODE, + _VPLL_REF, + _NO_PREF, + pDevEvo->gpus[sd].setSwSpareA[head]); + } + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_SW_SPARE_A(head), 1); + nvDmaSetEvoMethodData(pChannel, pDevEvo->gpus[sd].setSwSpareA[head]); + nvPopEvoSubDevMask(pDevEvo); + } + } +} + +static void EvoSORSetControlC3(const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 hwProtocol = 0; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + nvAssert(orIndex != NV_INVALID_OR); + + if (headMask != 0) { + switch (protocol) { + default: + nvAssert(!"Unknown SOR protocol"); + /* Fall through */ + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A: + hwProtocol = NVC37D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A; + break; + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B: + hwProtocol = NVC37D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B; + break; + case NVKMS_PROTOCOL_SOR_DUAL_TMDS: + hwProtocol = NVC37D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS; + break; + case NVKMS_PROTOCOL_SOR_DP_A: + hwProtocol = NVC37D_SOR_SET_CONTROL_PROTOCOL_DP_A; + break; + case NVKMS_PROTOCOL_SOR_DP_B: + hwProtocol = NVC37D_SOR_SET_CONTROL_PROTOCOL_DP_B; + break; + case NVKMS_PROTOCOL_SOR_LVDS_CUSTOM: + hwProtocol = NVC37D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM; + break; + case NVKMS_PROTOCOL_SOR_HDMI_FRL: + hwProtocol = NVC67D_SOR_SET_CONTROL_PROTOCOL_HDMI_FRL; + break; + } + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_SOR_SET_CONTROL(orIndex), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _SOR_SET_CONTROL, _OWNER_MASK, headMask) | + DRF_NUM(C37D, _SOR_SET_CONTROL, _PROTOCOL, hwProtocol) | + DRF_DEF(C37D, _SOR_SET_CONTROL, _DE_SYNC_POLARITY, _POSITIVE_TRUE) | + DRF_DEF(C37D, _SOR_SET_CONTROL, _PIXEL_REPLICATE_MODE, _OFF)); +} + +NvU32 nvEvoGetPixelDepthC3(const enum nvKmsPixelDepth pixelDepth) +{ + switch (pixelDepth) { + case NVKMS_PIXEL_DEPTH_18_444: + return NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444; + case NVKMS_PIXEL_DEPTH_24_444: + return NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444; + case NVKMS_PIXEL_DEPTH_30_444: + return NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444; + case NVKMS_PIXEL_DEPTH_16_422: + return NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422; + case NVKMS_PIXEL_DEPTH_20_422: + return NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422; + + } + nvAssert(!"Unexpected pixel depth"); + return NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444; +} + +static void EvoPIORSetControlC3(const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + if (headMask != 0) { + nvAssert(protocol == NVKMS_PROTOCOL_PIOR_EXT_TMDS_ENC); + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_PIOR_SET_CONTROL(orIndex), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _PIOR_SET_CONTROL, _OWNER_MASK, headMask) | + DRF_DEF(C37D, _PIOR_SET_CONTROL, _PROTOCOL, _EXT_TMDS_ENC) | + DRF_DEF(C37D, _PIOR_SET_CONTROL, _DE_SYNC_POLARITY, _POSITIVE_TRUE)); +} + +static void EvoDSISetControlC6(const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + /* Only Head 0 can be used to drive DSI output on Orin */ + nvAssert((headMask == 0x0) || (headMask == 0x1)); + /* Only one DSI engine exists on Orin */ + nvAssert(orIndex == 0); + + if (headMask != 0) { + nvAssert(protocol == NVKMS_PROTOCOL_DSI); + } + + nvDmaSetStartEvoMethod(pChannel, NVC67D_DSI_SET_CONTROL(orIndex), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67D, _DSI_SET_CONTROL, _OWNER_MASK, headMask)); +} + +static void EvoORSetControlC3Helper(const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask) +{ + switch (pConnectorEvo->or.type) { + case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR: + EvoSORSetControlC3(pConnectorEvo, protocol, orIndex, headMask); + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR: + EvoPIORSetControlC3(pConnectorEvo, protocol, orIndex, headMask); + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_DAC: + /* No DAC support on nvdisplay. Fall through. */ + default: + nvAssert(!"Invalid pConnectorEvo->or.type"); + break; + } +} + +void nvEvoORSetControlC3(NVDevEvoPtr pDevEvo, + const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask, + NVEvoUpdateState *updateState) +{ + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pDevEvo->core); + + EvoORSetControlC3Helper(pConnectorEvo, protocol, orIndex, headMask); +} + +static void EvoORSetControlC6(NVDevEvoPtr pDevEvo, + const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask, + NVEvoUpdateState *updateState) +{ + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pDevEvo->core); + + switch (pConnectorEvo->or.type) { + case NV0073_CTRL_SPECIFIC_OR_TYPE_DSI: + EvoDSISetControlC6(pConnectorEvo, protocol, orIndex, headMask); + break; + default: + EvoORSetControlC3Helper(pConnectorEvo, protocol, orIndex, headMask); + break; + } +} + +static void EvoHeadSetControlORC3(NVDevEvoPtr pDevEvo, + const int head, + const NVHwModeTimingsEvo *pTimings, + const enum nvKmsPixelDepth pixelDepth, + const NvBool colorSpaceOverride, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + const NvU32 hwPixelDepth = nvEvoGetPixelDepthC3(pixelDepth); + const NvU16 colorSpaceFlag = nvEvo1GetColorSpaceFlag(pDevEvo, + colorSpaceOverride); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _CRC_MODE, _COMPLETE_RASTER) | + (pTimings->hSyncPol ? + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _HSYNC_POLARITY, _NEGATIVE_TRUE) : + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _HSYNC_POLARITY, _POSITIVE_TRUE)) | + (pTimings->vSyncPol ? + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _VSYNC_POLARITY, _NEGATIVE_TRUE) : + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _VSYNC_POLARITY, _POSITIVE_TRUE)) | + DRF_NUM(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _PIXEL_DEPTH, hwPixelDepth) | + (colorSpaceOverride ? + (DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_OVERRIDE, _ENABLE) | + DRF_NUM(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_FLAG, colorSpaceFlag)) : + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_OVERRIDE, _DISABLE))); +} + +static void EvoHeadSetControlORC5(NVDevEvoPtr pDevEvo, + const int head, + const NVHwModeTimingsEvo *pTimings, + const enum nvKmsPixelDepth pixelDepth, + const NvBool colorSpaceOverride, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + const NvU32 hwPixelDepth = nvEvoGetPixelDepthC3(pixelDepth); + const NvU16 colorSpaceFlag = nvEvo1GetColorSpaceFlag(pDevEvo, + colorSpaceOverride); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _CRC_MODE, _COMPLETE_RASTER) | + (pTimings->hSyncPol ? + DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _HSYNC_POLARITY, _NEGATIVE_TRUE) : + DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _HSYNC_POLARITY, _POSITIVE_TRUE)) | + (pTimings->vSyncPol ? + DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _VSYNC_POLARITY, _NEGATIVE_TRUE) : + DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _VSYNC_POLARITY, _POSITIVE_TRUE)) | + DRF_NUM(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _PIXEL_DEPTH, hwPixelDepth) | + (colorSpaceOverride ? + (DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_OVERRIDE, _ENABLE) | + DRF_NUM(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_FLAG, colorSpaceFlag)) : + DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_OVERRIDE, _DISABLE)) | + DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _EXT_PACKET_WIN, _NONE)); +} + +static void EvoHeadSetDisplayIdC3(NVDevEvoPtr pDevEvo, + const NvU32 head, const NvU32 displayId, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_DISPLAY_ID(head, 0), 1); + nvDmaSetEvoMethodData(pChannel, displayId); +} + +static void SetFormatUsageBoundsOneWindow3(NVDevEvoPtr pDevEvo, NvU32 window, + const NvU64 supportedFormats, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 value = 0; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _RGB_PACKED1BPP, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _RGB_PACKED2BPP, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _RGB_PACKED4BPP, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _RGB_PACKED8BPP, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_PACKED422) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _YUV_PACKED422, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP420) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _YUV_SEMI_PLANAR420, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP422) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _YUV_SEMI_PLANAR422, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP444) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _YUV_SEMI_PLANAR444, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP420) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _EXT_YUV_SEMI_PLANAR420, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP422) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _EXT_YUV_SEMI_PLANAR422, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP444) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _EXT_YUV_SEMI_PLANAR444, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR444) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _YUV_PLANAR444, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR420) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _YUV_PLANAR420, _TRUE, value); + } + + if (supportedFormats != 0 && value == 0) { + nvAssert(!"Unknown depth in SetFormatUsageBoundsOneWindow"); + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(window), 1); + nvDmaSetEvoMethodData(pChannel, value); +} + +static void SetScalingUsageBoundsOneWindow5( + NVDevEvoPtr pDevEvo, NvU32 window, + const struct NvKmsScalingUsageBounds *pScaling, + NvBool layerUsable, + const NVHwModeViewPortEvo *pViewPort, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 setWindowUsageBounds = NV_EVO3_DEFAULT_WINDOW_USAGE_BOUNDS_C5; + NvU32 maxPixelsFetchedPerLine; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, + NVC57D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR(window), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57D, _WINDOW_SET_MAX_INPUT_SCALE_FACTOR, _HORIZONTAL, + pScaling->maxHDownscaleFactor) | + DRF_NUM(C57D, _WINDOW_SET_MAX_INPUT_SCALE_FACTOR, _VERTICAL, + pScaling->maxVDownscaleFactor)); + + if (layerUsable) { + maxPixelsFetchedPerLine = nvGetMaxPixelsFetchedPerLine(pViewPort->in.width, + pScaling->maxHDownscaleFactor); + } else { + maxPixelsFetchedPerLine = 0; + } + + setWindowUsageBounds |= + (DRF_NUM(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _MAX_PIXELS_FETCHED_PER_LINE,maxPixelsFetchedPerLine)) | + (pScaling->vTaps >= NV_EVO_SCALER_5TAPS ? + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _INPUT_SCALER_TAPS, _TAPS_5) : + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _INPUT_SCALER_TAPS, _TAPS_2)) | + (pScaling->vUpscalingAllowed ? + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _UPSCALING_ALLOWED, _TRUE) : + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _UPSCALING_ALLOWED, _FALSE)); + nvDmaSetStartEvoMethod(pChannel, + NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS(window), 1); + nvDmaSetEvoMethodData(pChannel, setWindowUsageBounds); +} + +static NvBool EvoSetUsageBounds3(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const struct NvKmsUsageBounds *pUsage, + NVEvoUpdateState *updateState) +{ + const struct NvKmsUsageBounds *pCurrentUsage = + &pDevEvo->gpus[sd].headState[head].usage; + /* Return FALSE if a core channel UPDATE isn't actually needed. */ + NvBool needCoreUpdate = FALSE; + NvU32 layer; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + NvU64 currentFormats = 0; + NvU64 targetFormats = 0; + + if (pCurrentUsage->layer[layer].usable) { + currentFormats = + pCurrentUsage->layer[layer].supportedSurfaceMemoryFormats; + } + + if (pUsage->layer[layer].usable) { + targetFormats = pUsage->layer[layer].supportedSurfaceMemoryFormats; + } + + if (targetFormats == currentFormats) { + continue; + } + + SetFormatUsageBoundsOneWindow3(pDevEvo, + NV_EVO_CHANNEL_MASK_WINDOW_NUMBER( + pDevEvo->head[head].layer[layer]->channelMask), + targetFormats, + updateState); + needCoreUpdate = TRUE; + } + + return needCoreUpdate; +} + +static NvBool EvoSetUsageBoundsC3(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const struct NvKmsUsageBounds *pUsage, + NVEvoUpdateState *updateState) +{ + return EvoSetUsageBounds3(pDevEvo, sd, head, pUsage, updateState); +} + +NvBool nvEvoSetUsageBoundsC5(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const struct NvKmsUsageBounds *pUsage, + NVEvoUpdateState *updateState) +{ + const struct NvKmsUsageBounds *pCurrentUsage = + &pDevEvo->gpus[sd].headState[head].usage; + NvBool needCoreUpdate; + NvU32 layer; + + needCoreUpdate = EvoSetUsageBounds3(pDevEvo, sd, head, pUsage, updateState); + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if ((pCurrentUsage->layer[layer].usable != pUsage->layer[layer].usable) || + (!nvEvoScalingUsageBoundsEqual(&pCurrentUsage->layer[layer].scaling, + &pUsage->layer[layer].scaling))) { + const NVHwModeViewPortEvo *pViewPort = + &pDevEvo->gpus[sd].pDispEvo->headState[head].timings.viewPort; + + SetScalingUsageBoundsOneWindow5( + pDevEvo, + NV_EVO_CHANNEL_MASK_WINDOW_NUMBER( + pDevEvo->head[head].layer[layer]->channelMask), + &pUsage->layer[layer].scaling, + pUsage->layer[layer].usable, + pViewPort, + updateState); + needCoreUpdate = TRUE; + } + } + + return needCoreUpdate; +} + +static void EvoSetCoreNotifierSurfaceAddressAndControlC3( + const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 notifierOffset, + NvU32 ctrlVal) +{ + NvU32 ctxDmaHandle = pSurfaceDesc ? pSurfaceDesc->ctxDmaHandle : 0; + + nvDmaSetStartEvoMethod(pChannel, NVC37D_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _SET_CONTEXT_DMA_NOTIFIER, _HANDLE, ctxDmaHandle)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _SET_NOTIFIER_CONTROL, _OFFSET, notifierOffset) | ctrlVal); +} + +void nvEvoSetNotifierC3(NVDevEvoRec *pDevEvo, + const NvBool notify, + const NvBool awaken, + const NvU32 notifier, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 ctrlVal = 0; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* + * XXXnvdisplay: Note that nvdClass_01.mfs says: + * "The units of the offset are 16 bytes.", while dispClass_02.mfs says: + * "The units of the offset are 32 bit words." + * The "legacy" 32-bit notifier format is no longer supported. This will + * have to be exposed to upper layers. + */ + ASSERT_DRF_NUM(C37D, _SET_NOTIFIER_CONTROL, _OFFSET, notifier); + + ctrlVal = (awaken ? + DRF_DEF(C37D, _SET_NOTIFIER_CONTROL, _MODE, _WRITE_AWAKEN) : + DRF_DEF(C37D, _SET_NOTIFIER_CONTROL, _MODE, _WRITE)); + ctrlVal |= (notify ? + DRF_DEF(C37D, _SET_NOTIFIER_CONTROL, _NOTIFY, _ENABLE) : + DRF_DEF(C37D, _SET_NOTIFIER_CONTROL, _NOTIFY, _DISABLE)); + + // To work around HW BUG 1945716, set the core channel completion notifier + // context DMA to 0 when notification is not requested. + if (notify) { + NvU32 sd; + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (nvPeekEvoSubDevMask(pDevEvo) & (1 << sd)) { + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + pDevEvo->hal->SetCoreNotifierSurfaceAddressAndControl(pDevEvo, + pChannel, &pDevEvo->core->notifiersDma[sd].surfaceDesc, + notifier, ctrlVal); + nvPopEvoSubDevMask(pDevEvo); + } + } + } else { + pDevEvo->hal->SetCoreNotifierSurfaceAddressAndControl(pDevEvo, pChannel, + NULL /* pSurfaceDesc */, 0 /* offset */ , 0 /* ctrlVal */); + } +} + +static void UpdateCoreC3(NVEvoChannelPtr pChannel, + NVEvoChannelMask interlockChannelMask, + NvU32 flipLockPin, + NvBool releaseElv) +{ + NvU32 head, interlockFlags = 0; + NvU32 window, windowInterlockFlags = 0; + NvU32 update = DRF_NUM(C37D, _UPDATE, _FLIP_LOCK_PIN, flipLockPin); + + update |= releaseElv ? DRF_DEF(C37D, _UPDATE, _RELEASE_ELV, _TRUE) : 0; + + for (head = 0; head < NV_EVO_CHANNEL_MASK_CURSOR__SIZE; head++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _CURSOR, head, _ENABLE, + interlockChannelMask)) { + interlockFlags |= + DRF_IDX_DEF(C37D, _SET_INTERLOCK_FLAGS, + _INTERLOCK_WITH_CURSOR, head, _ENABLE); + } + } + + for (window = 0; window < NV_EVO_CHANNEL_MASK_WINDOW__SIZE; window++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE, + interlockChannelMask)) { + windowInterlockFlags |= + DRF_IDX_DEF(C37D, _SET_WINDOW_INTERLOCK_FLAGS, + _INTERLOCK_WITH_WINDOW, window, _ENABLE); + } + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_SET_INTERLOCK_FLAGS, 1); + nvDmaSetEvoMethodData(pChannel, interlockFlags); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_SET_WINDOW_INTERLOCK_FLAGS, 1); + nvDmaSetEvoMethodData(pChannel, windowInterlockFlags); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_UPDATE, 1); + nvDmaSetEvoMethodData(pChannel, update); + + nvDmaKickoffEvo(pChannel); +} + +static void UpdateWindowIMM(NVEvoChannelPtr pChannel, + NVEvoChannelMask winImmChannelMask, + NVEvoChannelMask winImmInterlockMask, + NvBool releaseElv) +{ + nvAssert((winImmChannelMask & ~NV_EVO_CHANNEL_MASK_WINDOW_ALL) == 0); + nvAssert((winImmInterlockMask & ~NV_EVO_CHANNEL_MASK_WINDOW_ALL) == 0); + + if ((winImmChannelMask & pChannel->channelMask) != 0) { + NvU32 updateImm = 0; + + if ((winImmInterlockMask & pChannel->channelMask) != 0) { + updateImm |= DRF_DEF(C37B, _UPDATE, _INTERLOCK_WITH_WINDOW, _ENABLE); + } else { + updateImm |= DRF_DEF(C37B, _UPDATE, _INTERLOCK_WITH_WINDOW, _DISABLE); + } + updateImm |= releaseElv ? DRF_DEF(C37B, _UPDATE, _RELEASE_ELV, _TRUE) : 0; + + nvDmaSetStartEvoMethod(pChannel->imm.u.dma, NVC37B_UPDATE, 1); + nvDmaSetEvoMethodData(pChannel->imm.u.dma, updateImm); + nvDmaKickoffEvo(pChannel->imm.u.dma); + } +} + +static void UpdateWindowC3(NVEvoChannelPtr pChannel, + NVEvoChannelMask interlockChannelMask, + NVEvoChannelMask winImmChannelMask, + NVEvoChannelMask winImmInterlockMask, + NvBool transitionWAR, + NvU32 flipLockPin, + NvBool releaseElv) +{ + NvU32 head, interlockFlags = 0; + NvU32 window, windowInterlockFlags = 0; + NvU32 update = DRF_NUM(C37E, _UPDATE, _FLIP_LOCK_PIN, flipLockPin); + + update |= releaseElv ? DRF_DEF(C37E, _UPDATE, _RELEASE_ELV, _TRUE) : 0; + + if ((winImmInterlockMask & pChannel->channelMask) != 0) { + /* + * We expect winImmChannelMask to always be a superset of + * winImmInterlockMask. We should never interlock with a window + * immediate channel if we're not also going to kick off that + * window immediate channel. + */ + nvAssert((winImmChannelMask & pChannel->channelMask) != 0); + + update |= DRF_DEF(C37E, _UPDATE, _INTERLOCK_WITH_WIN_IMM, _ENABLE); + } else { + update |= DRF_DEF(C37E, _UPDATE, _INTERLOCK_WITH_WIN_IMM, _DISABLE); + } + + // Nothing currently requires updating a window channel without releasing + // ELV. + nvAssert(releaseElv); + + if (FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + interlockChannelMask)) { + interlockFlags |= + DRF_DEF(C37E, _SET_INTERLOCK_FLAGS, _INTERLOCK_WITH_CORE, _ENABLE); + } + + for (head = 0; head < NV_EVO_CHANNEL_MASK_CURSOR__SIZE; head++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _CURSOR, head, _ENABLE, + interlockChannelMask)) { + interlockFlags |= + DRF_IDX_DEF(C37E, _SET_INTERLOCK_FLAGS, + _INTERLOCK_WITH_CURSOR, head, _ENABLE); + } + } + + for (window = 0; window < NV_EVO_CHANNEL_MASK_WINDOW__SIZE; window++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE, + interlockChannelMask)) { + windowInterlockFlags |= + DRF_IDX_DEF(C37E, _SET_WINDOW_INTERLOCK_FLAGS, + _INTERLOCK_WITH_WINDOW, window, _ENABLE); + } + } + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_INTERLOCK_FLAGS, 1); + nvDmaSetEvoMethodData(pChannel, interlockFlags); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_WINDOW_INTERLOCK_FLAGS, 1); + nvDmaSetEvoMethodData(pChannel, windowInterlockFlags); + + /* + * If we determined that this update will transition from NULL to non-NULL + * ctxdma or vice-versa, bookend this update method with software methods + * to notify RM to apply a workaround for hardware bug 2193096. + */ + if (transitionWAR) { + nvDmaSetStartEvoMethod(pChannel, NVC57E_SW_SET_MCLK_SWITCH, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57E, _SW_SET_MCLK_SWITCH, _ENABLE, _FALSE)); + } + + nvDmaSetStartEvoMethod(pChannel, NVC37E_UPDATE, 1); + nvDmaSetEvoMethodData(pChannel, update); + + if (transitionWAR) { + nvDmaSetStartEvoMethod(pChannel, NVC57E_SW_SET_MCLK_SWITCH, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57E, _SW_SET_MCLK_SWITCH, _ENABLE, _TRUE)); + } + + UpdateWindowIMM(pChannel, winImmChannelMask, + winImmInterlockMask, releaseElv); + + nvDmaKickoffEvo(pChannel); +} + +/*! + * This function finds any fliplocked channels in the current update and pushes + * flips for them setting the appropriate fliplock pin and interlock masks. + * + * All of this complexity is here to support the case where multiple heads on a + * single GPU are fliplocked together, but flip requests come in for only a + * subset of those heads at a time (e.g., separate X screens on a single GPU). + * Unlike previous hardware, we're required to interlock all channels which are + * part of a fliplock update, instead of just using fliplock across heads. + */ +/* + * There are two scenarios: + * a) All fliplocked channels on this GPU are already part of this update. In + * that case we just need to set the appropriate fliplock pin for each, and + * we're done -- they're already interlocked. + * b) Some fliplocked channels are not part of this update. We still need to + * set them in the interlock mask, but it's dangerous to interlock with any + * channels *not* in the fliplock group; as an example: + * With two separate X screens on a single GPU, each driving one monitor, + * fliplocked together, if we get a flip request for screen 0/head 0 that + * interlocks core and base, then a second flip request for screen 1/head1 + * that interlocks core and base, we would end up programming one flip on + * the window on head 0, one flip on the window on head 1, and two flips in + * the core channel. The second core channel flip would never complete + * since it would be waiting for an interlock with the other window + * channels. + * + * To handle this case we pull the fliplocked channels out of this update + * and update them interlocked with all fliplocked channels (including those + * that aren't in this update), then proceed with a normal interlocked + * update excluding the fliplocked channels. + * + * \return Channel mask of channels which were handled by this function. + * Channels in this mask should be considered done and have no + * further updates pushed. No other channels should be + * interlocked with them. + */ +static NVEvoChannelMask ProcessFlipLockUpdates( + NVDevEvoPtr pDevEvo, + NvU32 sd, + NvU32 *pFlipLockPin, + const NVEvoUpdateState *updateState) +{ + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NvU32 head, window; + /* Channels that are part of this update which need to be fliplocked. */ + NVEvoChannelMask flipLockUpdateMask = 0; + /* All channels on this subdevice which are fliplocked. */ + NVEvoChannelMask flipLockAllMask = 0; + /* Channels which this function has handled and do not need further + * processing. */ + NVEvoChannelMask handledMask = 0; + NVEvoLockPin pin = NV_EVO_LOCK_PIN_ERROR; + NvU32 hwPin = NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE; + + /* First check if any of the fliplock-qualifying channels are actually + * fliplocked, and determine which pin they're using. */ + for (head = 0; head < pDevEvo->numHeads; head++) { + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + + if (pHC->flipLock) { + /* Convert the head index to a window index (two windows per head, + * one "base" and one "overlay"; we only fliplock "base") */ + NVEvoChannelMask windowMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _WINDOW, head * 2, _ENABLE); + if (updateState->subdev[sd].flipLockQualifyingMask & windowMask) { + if (flipLockUpdateMask == 0) { + pin = pHC->flipLockPin; + } else { + /* For now, we only support kicking off a single fliplock + * group as part of a single update call. */ + nvAssert(pin == pHC->flipLockPin); + } + flipLockUpdateMask |= windowMask; + } + } + } + + /* If we don't have any fliplocked updates, then we're done. */ + if (flipLockUpdateMask == 0) { + goto done; + } + + /* + * Gather all of the channels on this GPU which are part of this fliplock + * group (some of which may not be part of this update). + */ + for (head = 0; head < pDevEvo->numHeads; head++) { + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + + if (pHC->flipLock && (pHC->flipLockPin == pin)) { + NVEvoChannelMask windowMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _WINDOW, head * 2, _ENABLE); + flipLockAllMask |= windowMask; + } + } + + /* Convert the pin to a hardware enum. */ + if (NV_EVO_LOCK_PIN_IS_INTERNAL(pin)) { + hwPin = NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 + + (pin - NV_EVO_LOCK_PIN_INTERNAL_0); + } else { + hwPin = NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(pin - NV_EVO_LOCK_PIN_0); + } + + /* If we're updating all of the fliplocked channels in this update, we can + * interlock with other channels as normal. */ + if (flipLockUpdateMask == flipLockAllMask) { + goto done; + } + + /* + * Kick off each of our update channels, using the full fliplock mask and + * hwPin calculated above. + */ + nvAssert((flipLockUpdateMask & ~NV_EVO_CHANNEL_MASK_WINDOW_ALL) == 0); + for (window = 0; window < pDevEvo->numWindows; window++) { + const NVEvoChannelMask windowMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE); + NVEvoChannelMask winImmChannelMask = + updateState->subdev[sd].winImmChannelMask; + NVEvoChannelMask winImmInterlockMask = + updateState->subdev[sd].winImmInterlockMask; + if (flipLockUpdateMask & windowMask) { + const NvBool transitionWAR = + (updateState->subdev[sd].flipTransitionWAR & windowMask) != 0; + UpdateWindowC3(pDevEvo->window[window], + flipLockAllMask, + winImmChannelMask, + winImmInterlockMask, + transitionWAR, + hwPin, TRUE /* releaseElv */); + } else { + UpdateWindowIMM(pDevEvo->window[window], winImmChannelMask, + winImmInterlockMask, TRUE /* releaseElv */); + } + } + handledMask = flipLockUpdateMask; + hwPin = NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE; + +done: + *pFlipLockPin = hwPin; + return handledMask; +} + +void nvEvoUpdateC3(NVDevEvoPtr pDevEvo, + const NVEvoUpdateState *updateState, + NvBool releaseElv) +{ + NvU32 sd, window; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NVEvoChannelMask updateChannelMask = + updateState->subdev[sd].channelMask; + const NVEvoChannelMask noCoreInterlockMask = + updateState->subdev[sd].noCoreInterlockMask; + NVEvoChannelMask coreInterlockMask = + updateChannelMask & ~noCoreInterlockMask; + const NvU32 subDeviceMask = (1 << sd); + NvU32 flipLockPin = NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE; + + nvPushEvoSubDevMask(pDevEvo, subDeviceMask); + + if (updateState->subdev[sd].flipLockQualifyingMask) { + NVEvoChannelMask handledChannels = 0; + + nvAssert((updateState->subdev[sd].flipLockQualifyingMask & + ~updateChannelMask) == 0); + nvAssert((updateState->subdev[sd].flipLockQualifyingMask & + updateState->subdev[sd].noCoreInterlockMask) == 0); + + handledChannels = + ProcessFlipLockUpdates(pDevEvo, sd, &flipLockPin, updateState); + + updateChannelMask &= ~handledChannels; + coreInterlockMask &= ~handledChannels; + } + + if (FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + updateChannelMask)) { + const NVEvoChannelMask thisInterlockMask = + FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + coreInterlockMask) ? coreInterlockMask : 0; + UpdateCoreC3(pDevEvo->core, thisInterlockMask, flipLockPin, + releaseElv); + } + + for (window = 0; window < pDevEvo->numWindows; window++) { + const NVEvoChannelMask windowMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE); + NVEvoChannelMask winImmChannelMask = + updateState->subdev[sd].winImmChannelMask; + NVEvoChannelMask winImmInterlockMask = + updateState->subdev[sd].winImmInterlockMask; + if (updateChannelMask & windowMask) { + const NvBool transitionWAR = + (updateState->subdev[sd].flipTransitionWAR & windowMask) != 0; + NVEvoChannelMask thisInterlockMask = + FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE, + coreInterlockMask) ? coreInterlockMask : 0; + UpdateWindowC3(pDevEvo->window[window], + thisInterlockMask, + winImmChannelMask, + winImmInterlockMask, + transitionWAR, + flipLockPin, + releaseElv); + } else { + UpdateWindowIMM(pDevEvo->window[window], winImmChannelMask, + winImmInterlockMask, releaseElv); + } + } + + nvPopEvoSubDevMask(pDevEvo); + } +} + +/*! + * Initialize head-specific IMP param fields. + * + * Initialize the NVC372_CTRL_IMP_HEAD for the specific head. + * + * \param[out] pImpHead The param structure to initialize. + * \param[in] pTimings The rastering timings and viewport configuration. + * \param[in] head The number of the head that will be driven. + * + * \return FALSE iff the parameters aren't even legal for HW. + */ +static NvBool AssignPerHeadImpParams(NVC372_CTRL_IMP_HEAD *pImpHead, + const NVHwModeTimingsEvo *pTimings, + const NvBool enableDsc, + const NvBool b2Heads1Or, + const int head, + const NVEvoScalerCaps *pScalerCaps) +{ + const NVHwModeViewPortEvo *pViewPort = &pTimings->viewPort; + struct NvKmsScalingUsageBounds scalingUsageBounds = { }; + + pImpHead->headIndex = head; + + /* raster timings */ + + pImpHead->maxPixelClkKHz = pTimings->pixelClock; + + pImpHead->rasterSize.width = pTimings->rasterSize.x; + pImpHead->rasterSize.height = pTimings->rasterSize.y; + pImpHead->rasterBlankStart.X = pTimings->rasterBlankStart.x; + pImpHead->rasterBlankStart.Y = pTimings->rasterBlankStart.y; + pImpHead->rasterBlankEnd.X = pTimings->rasterBlankEnd.x; + pImpHead->rasterBlankEnd.Y = pTimings->rasterBlankEnd.y; + pImpHead->rasterVertBlank2.yStart = pTimings->rasterVertBlank2Start; + pImpHead->rasterVertBlank2.yEnd = pTimings->rasterVertBlank2End; + + /* XXX TODO: Fill in correct scanlock information (only needed for + * MIN_VPSTATE). */ + pImpHead->control.masterLockMode = NV_DISP_LOCK_MODE_NO_LOCK; + pImpHead->control.masterLockPin = NV_DISP_LOCK_PIN_UNSPECIFIED; + pImpHead->control.slaveLockMode = NV_DISP_LOCK_MODE_NO_LOCK; + pImpHead->control.slaveLockPin = NV_DISP_LOCK_PIN_UNSPECIFIED; + + if (!nvComputeScalingUsageBounds(pScalerCaps, + pViewPort->in.width, pViewPort->in.height, + pViewPort->out.width, pViewPort->out.height, + pViewPort->hTaps, pViewPort->vTaps, + &scalingUsageBounds)) { + return FALSE; + } + pImpHead->bUpscalingAllowedV = scalingUsageBounds.vUpscalingAllowed; + pImpHead->maxDownscaleFactorV = scalingUsageBounds.maxVDownscaleFactor; + pImpHead->maxDownscaleFactorH = scalingUsageBounds.maxHDownscaleFactor; + pImpHead->outputScalerVerticalTaps = + NVEvoScalerTapsToNum(scalingUsageBounds.vTaps); + + if (!nvComputeMinFrameIdle(pTimings, + &pImpHead->minFrameIdle.leadingRasterLines, + &pImpHead->minFrameIdle.trailingRasterLines)) { + return FALSE; + } + + /* Assume we'll need the full 1025-entry output LUT. */ + pImpHead->lut = NVC372_CTRL_IMP_LUT_USAGE_1025; + + /* Cursor width, in units of 32 pixels. Assume we use the maximum size. */ + pImpHead->cursorSize32p = 256 / 32; + + pImpHead->bEnableDsc = enableDsc; + + pImpHead->bIs2Head1Or = b2Heads1Or; + + pImpHead->bYUV420Format = + (pTimings->yuv420Mode == NV_YUV420_MODE_HW); + + return TRUE; +} + +/*! + * Initialize window-specific IMP param fields. + * + * Initialize the NVC372_CTRL_IMP_WINDOW for the specific window. + * + * \param[out] pImpWindow The param structure to initialize. + * \param[in] pViewPort The viewport configuration for the head that + * the window is bound to. + * \param[in] supportedFormats The surface memory formats that can be + * supported on this window. + * \param[in] window The number of the window. + * \param[in] head The number of the head that the window is + * bound to. + */ +static void AssignPerWindowImpParams(NVC372_CTRL_IMP_WINDOW *pImpWindow, + const NVHwModeViewPortEvo *pViewPort, + const NvU64 supportedFormats, + const struct NvKmsScalingUsageBounds *pScaling, + const int window, + const int head) +{ + pImpWindow->windowIndex = window; + pImpWindow->owningHead = head; + + pImpWindow->formatUsageBound = 0; + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_RGB_PACKED_1_BPP; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_RGB_PACKED_2_BPP; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_RGB_PACKED_4_BPP; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_RGB_PACKED_8_BPP; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_PACKED422) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_YUV_PACKED_422; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP420) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_YUV_SEMI_PLANAR_420; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP422) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_YUV_SEMI_PLANAR_422; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP444) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_YUV_SEMI_PLANAR_444; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP420) { + pImpWindow->formatUsageBound |= + NVC372_CTRL_FORMAT_EXT_YUV_SEMI_PLANAR_420; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP422) { + pImpWindow->formatUsageBound |= + NVC372_CTRL_FORMAT_EXT_YUV_SEMI_PLANAR_422; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP444) { + pImpWindow->formatUsageBound |= + NVC372_CTRL_FORMAT_EXT_YUV_SEMI_PLANAR_444; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR444) { + pImpWindow->formatUsageBound |= + NVC372_CTRL_FORMAT_YUV_PLANAR_444; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR420) { + pImpWindow->formatUsageBound |= + NVC372_CTRL_FORMAT_YUV_PLANAR_420; + } + + if (pImpWindow->formatUsageBound == 0) { + nvAssert(!"Unknown format in AssignPerWindowImpParams"); + } + + pImpWindow->maxPixelsFetchedPerLine = + nvGetMaxPixelsFetchedPerLine(pViewPort->in.width, + pScaling->maxHDownscaleFactor); + + pImpWindow->maxDownscaleFactorH = pScaling->maxHDownscaleFactor; + pImpWindow->maxDownscaleFactorV = pScaling->maxVDownscaleFactor; + pImpWindow->bUpscalingAllowedV = pScaling->vUpscalingAllowed; + pImpWindow->inputScalerVerticalTaps = + NVEvoScalerTapsToNum(pScaling->vTaps); + + /* Assume we need a full 1025-entry window (input) and tone-mapping + * output (TMO) LUT. */ + pImpWindow->lut = NVC372_CTRL_IMP_LUT_USAGE_1025; + pImpWindow->tmoLut = NVC372_CTRL_IMP_LUT_USAGE_1025; +} + +NvBool +nvEvoSetCtrlIsModePossibleParams3(NVDispEvoPtr pDispEvo, + const NVEvoIsModePossibleDispInput *pInput, + NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS *pImp) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NVEvoCapabilitiesPtr pEvoCaps = &pDevEvo->gpus[0].capabilities; + NvU32 head; + + nvkms_memset(pImp, 0, sizeof(*pImp)); + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + const NVHwModeTimingsEvo *pTimings = pInput->head[head].pTimings; + const NvU32 enableDsc = pInput->head[head].enableDsc; + const NvBool b2Heads1Or = pInput->head[head].b2Heads1Or; + const struct NvKmsUsageBounds *pUsage = pInput->head[head].pUsage; + const NVHwModeViewPortEvo *pViewPort; + NvU8 impHeadIndex; + NvU32 layer; + + if (pTimings == NULL) { + continue; + } + + pViewPort = &pTimings->viewPort; + + impHeadIndex = pImp->numHeads; + pImp->numHeads++; + nvAssert(impHeadIndex < NVC372_CTRL_MAX_POSSIBLE_HEADS); + + if (!AssignPerHeadImpParams(&pImp->head[impHeadIndex], + pTimings, + enableDsc, + b2Heads1Or, + head, + &pEvoCaps->head[head].scalerCaps)) { + return FALSE; + } + + /* XXXnvdisplay: This assumes a fixed window<->head mapping */ + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (!pUsage->layer[layer].usable) { + continue; + } + + nvAssert(pImp->numWindows < NVC372_CTRL_MAX_POSSIBLE_WINDOWS); + + AssignPerWindowImpParams( + &pImp->window[pImp->numWindows], + pViewPort, + pUsage->layer[layer].supportedSurfaceMemoryFormats, + &pUsage->layer[layer].scaling, + NV_EVO_CHANNEL_MASK_WINDOW_NUMBER( + pDevEvo->head[head].layer[layer]->channelMask), + head); + + pImp->numWindows++; + } + } + + pImp->base.subdeviceIndex = pDispEvo->displayOwner; + + /* XXXnvdisplay: Set bUseCachedPerfState? */ + + /* + * Set NEED_MIN_VPSTATE if reallocBandwidth != NONE. RM-IMP will only + * output the min required display bandwidth values if NEED_MIN_VPSTATE + * is set. + */ + if (pInput->requireBootClocks || + (pInput->reallocBandwidth != NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE)) { + // XXX TODO: IMP requires lock pin information if pstate information is + // requested. For now, just assume no locking. + pImp->options = NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE; + } + + return TRUE; +} + +void +nvEvoSetIsModePossibleDispOutput3(const NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS *pImp, + const NvBool result, + NVEvoIsModePossibleDispOutput *pOutput) +{ + pOutput->possible = result; + if (pOutput->possible) { + pOutput->minRequiredBandwidthKBPS = pImp->minRequiredBandwidthKBPS; + pOutput->floorBandwidthKBPS = pImp->floorBandwidthKBPS; + } +} + +void +nvEvoIsModePossibleC3(NVDispEvoPtr pDispEvo, + const NVEvoIsModePossibleDispInput *pInput, + NVEvoIsModePossibleDispOutput *pOutput) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS *pImp = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_IMP_PARAMS, sizeof(*pImp)); + NvBool result = FALSE; + NvU32 ret; + + if (!nvEvoSetCtrlIsModePossibleParams3(pDispEvo, pInput, pImp)) { + goto done; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->rmCtrlHandle, + NVC372_CTRL_CMD_IS_MODE_POSSIBLE, + pImp, sizeof(*pImp)); + + // XXXnvdisplay TODO: check pImp->minImpVPState if + // pInput->requireBootClocks is true? + if (ret != NV_OK || !pImp->bIsPossible) { + goto done; + } + + result = TRUE; + +done: + for (NvU32 head = 0; head < pDevEvo->numHeads; head++) { + pOutput->head[head].dscSliceCount = pInput->head[head].dscSliceCount; + } + + nvEvoSetIsModePossibleDispOutput3(pImp, result, pOutput); + + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_IMP_PARAMS); +} + +void nvEvoPrePostIMPC3(NVDispEvoPtr pDispEvo, NvBool isPre) +{ + /* Nothing to do on nvdisplay -- pre/post IMP calls are not required. */ +} + +static void +EvoFlipC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition); + +/* + * Returns TRUE iff the CSC should be enabled (i.e., the matrix is not the + * identity matrix). + */ +static NvBool SetCscMatrixC3(NVEvoChannelPtr pChannel, + const struct NvKmsCscMatrix *matrix) +{ + NvU32 method = NVC37E_SET_CSC_RED2RED; + int y; + + if (nvIsCscMatrixIdentity(matrix)) { + return FALSE; + } + + for (y = 0; y < 3; y++) { + int x; + + for (x = 0; x < 4; x++) { + // Use DRF_NUM to truncate client-supplied values that are out of + // range. + NvU32 val = DRF_NUM(C37E, _SET_CSC_RED2RED, _COEFF, + matrix->m[y][x]); + + nvDmaSetStartEvoMethod(pChannel, method, 1); + nvDmaSetEvoMethodData(pChannel, val); + + method += 4; + } + } + + return TRUE; +} + +static void SetCscMatrixC5Wrapper(NVEvoChannelPtr pChannel, + const struct NvKmsCscMatrix *matrix, + NvU32 coeffMethod, NvU32 controlMethod, + NvU32 enableMethodData, + NvU32 disableMethodData) +{ + int y; + + if (nvIsCscMatrixIdentity(matrix)) { + nvDmaSetStartEvoMethod(pChannel, controlMethod, 1); + nvDmaSetEvoMethodData(pChannel, disableMethodData); + return; + } + + nvDmaSetStartEvoMethod(pChannel, controlMethod, 1); + nvDmaSetEvoMethodData(pChannel, enableMethodData); + + for (y = 0; y < 3; y++) { + int x; + + for (x = 0; x < 4; x++) { + // Use DRF_NUM to truncate client-supplied values that are out of + // range. + // + // Note that it doesn't matter whether we use the CSC00 or CSC11 + // methods to truncate since they're identical. + NvU32 val = DRF_NUM(C57E, _SET_CSC00COEFFICIENT_C00, _VALUE, + matrix->m[y][x]); + + nvDmaSetStartEvoMethod(pChannel, coeffMethod, 1); + nvDmaSetEvoMethodData(pChannel, val); + + coeffMethod += 4; + } + } +} + +static void SetCsc00MatrixC5(NVEvoChannelPtr pChannel, + const struct NvKmsCscMatrix *matrix) +{ + SetCscMatrixC5Wrapper(pChannel, + matrix, + NVC57E_SET_CSC00COEFFICIENT_C00, NVC57E_SET_CSC00CONTROL, + DRF_DEF(C57E, _SET_CSC00CONTROL, _ENABLE, _ENABLE), + DRF_DEF(C57E, _SET_CSC00CONTROL, _ENABLE, _DISABLE)); +} + +static void SetCsc01MatrixC5(NVEvoChannelPtr pChannel, + const struct NvKmsCscMatrix *matrix) +{ + SetCscMatrixC5Wrapper(pChannel, + matrix, + NVC57E_SET_CSC01COEFFICIENT_C00, NVC57E_SET_CSC01CONTROL, + DRF_DEF(C57E, _SET_CSC01CONTROL, _ENABLE, _ENABLE), + DRF_DEF(C57E, _SET_CSC01CONTROL, _ENABLE, _DISABLE)); +} + +static void SetCsc10MatrixC5(NVEvoChannelPtr pChannel, + const struct NvKmsCscMatrix *matrix) +{ + SetCscMatrixC5Wrapper(pChannel, + matrix, + NVC57E_SET_CSC10COEFFICIENT_C00, NVC57E_SET_CSC10CONTROL, + DRF_DEF(C57E, _SET_CSC10CONTROL, _ENABLE, _ENABLE), + DRF_DEF(C57E, _SET_CSC10CONTROL, _ENABLE, _DISABLE)); +} + +static void SetCsc11MatrixC5(NVEvoChannelPtr pChannel, + const struct NvKmsCscMatrix *matrix) +{ + SetCscMatrixC5Wrapper(pChannel, + matrix, + NVC57E_SET_CSC11COEFFICIENT_C00, NVC57E_SET_CSC11CONTROL, + DRF_DEF(C57E, _SET_CSC11CONTROL, _ENABLE, _ENABLE), + DRF_DEF(C57E, _SET_CSC11CONTROL, _ENABLE, _DISABLE)); +} + +/* + * WAR for GV100 HW bug 1978592: + * + * Timestamped flips allow SW to specify the earliest time that the next UPDATE + * will complete. Due to a HW bug, GV100 waits for the timestamp in the ARMED + * state (i.e. the timestamps that were pushed in the previous UPDATE) instead + * of the timestamp in the ASSEMBLY state (the time we want to postpone this + * flip until). + * + * This WAR inserts an additional UPDATE to push the timestamp from ASSEMBLY to + * ARMED while changing no other state, so the following normal UPDATE can + * wait for the correct timestamp. + * + * This update needs to have the following characteristics: + * + * - MIN_PRESENT_INTERVAL 0 + * - TIMESTAMP_MODE _ENABLE + * - All other SET_PRESENT_CONTROL fields unmodified from previous UPDATE + * - SET_UPDATE_TIMESTAMP (target timestamp) + * - RELEASE_ELV _FALSE + * - Non-interlocked + * - Non-fliplocked + */ +static void +InsertAdditionalTimestampFlip(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState) +{ + NvU32 presentControl = pChannel->oldPresentControl; + + /* This hardware bug is only present on GV100 which uses window + * class C37E. */ + nvAssert(pChannel->hwclass == NVC37E_WINDOW_CHANNEL_DMA); + + nvAssert(pHwState->timeStamp != 0); + + /* + * Update the necessary fields in SET_PRESENT_CONTROL without modifying + * the existing values by using the cached SET_PRESENT_CONTROL values + * from the previous update. + * + * Note that BEGIN_MODE must not be changed here; even though BEGIN_MODE + * may currently be NON_TEARING, a NON_TEARING + MIN_PRESENT_INTERVAL 0 + * flip will be correctly collapsed with the surrounding + * MIN_PRESENT_INTERVAL 1 flips. If we were to change BEGIN_MODE to + * IMMEDIATE, this would cause an additional delay due to the transition + * from NON_TEARING to IMMEDIATE. + */ + presentControl = FLD_SET_DRF_NUM(C37E, _SET_PRESENT_CONTROL, + _MIN_PRESENT_INTERVAL, + 0, presentControl); + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, + _TIMESTAMP_MODE, + _ENABLE, presentControl); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_PRESENT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, presentControl); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_UPDATE_TIMESTAMP_LO, 2); + nvDmaSetEvoMethodData(pChannel, NvU64_LO32(pHwState->timeStamp)); + nvDmaSetEvoMethodData(pChannel, NvU64_HI32(pHwState->timeStamp)); + + // Issue non-interlocked, non-fliplocked, non-ReleaseElv UPDATE + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_INTERLOCK_FLAGS, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, + NVC37E_SET_WINDOW_INTERLOCK_FLAGS, + 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_UPDATE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37E, _UPDATE, _RELEASE_ELV, _FALSE) | + DRF_NUM(C37E, _UPDATE, _FLIP_LOCK_PIN, + NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE) | + DRF_DEF(C37E, _UPDATE, _INTERLOCK_WITH_WIN_IMM, + _DISABLE)); +} + +static void +EvoProgramSemaphore3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState) +{ + nvAssertSameSemaphoreSurface(pHwState); + + if (pHwState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo == NULL) { + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_CONTEXT_DMA_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SEMAPHORE_ACQUIRE, 1); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SEMAPHORE_RELEASE, 1); + nvDmaSetEvoMethodData(pChannel, 0); + } else { + const NVFlipNIsoSurfaceEvoHwState *pNIso = + &pHwState->syncObject.u.semaphores.acquireSurface; + + nvAssert(pNIso->format == NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY); + /* XXX nvdisplay: enforce this at a higher level */ + nvAssert((pNIso->offsetInWords % 4) == 0); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_CONTEXT_DMA_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, pNIso->pSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SEMAPHORE_ACQUIRE, 1); + nvDmaSetEvoMethodData(pChannel, + pHwState->syncObject.u.semaphores.acquireValue); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SEMAPHORE_RELEASE, 1); + nvDmaSetEvoMethodData(pChannel, + pHwState->syncObject.u.semaphores.releaseValue); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C37E, _SET_SEMAPHORE_CONTROL, _OFFSET, + pNIso->offsetInWords / 4)); + } +} + +static void EvoSetSemaphoreSurfaceAddressAndControlC6( + const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 semaphoreOffset, + NvU32 ctrlVal) +{ + NvU32 ctxDmaHandle = pSurfaceDesc ? pSurfaceDesc->ctxDmaHandle : 0; + + /*! set ctx dma handle */ + nvDmaSetStartEvoMethod(pChannel, NVC67E_SET_CONTEXT_DMA_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67E, _SET_CONTEXT_DMA_SEMAPHORE, _HANDLE, ctxDmaHandle)); + + /*! set semaphore control and acq-rel mode */ + nvDmaSetStartEvoMethod(pChannel, NVC67E_SET_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, semaphoreOffset | ctrlVal); +} + +static void EvoSetAcqSemaphoreSurfaceAddressAndControlC6( + const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 semaphoreOffset, + NvU32 ctrlVal) +{ + NvU32 ctxDmaHandle = pSurfaceDesc ? pSurfaceDesc->ctxDmaHandle : 0; + + /*! set ctx dma handle */ + nvDmaSetStartEvoMethod(pChannel, NVC67E_SET_CONTEXT_DMA_ACQ_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67E, _SET_CONTEXT_DMA_ACQ, _SEMAPHORE_HANDLE, ctxDmaHandle)); + + /*! set semaphore control and acq mode */ + nvDmaSetStartEvoMethod(pChannel, NVC67E_SET_ACQ_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, semaphoreOffset | ctrlVal); +} + +/*! + * On Tegra, syncpts are used for synchronization between SW and HW, + * and also across HW engines. Since NvDisplay 4.0 only natively + * understands semaphores, there's a SHIM layer in the memory subsystem + * that will convert semaphore acquires/releases into corresponding + * syncpoint reads/writes. As such, each syncpoint is mapped to an + * underlying 'dummy' semaphore surface, and the methods for these surfaces + * need to be programmed as if they were real memory-backed semaphores. + */ + +static void +EvoProgramSemaphore6(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState) +{ + NvU32 offset, acqMode, relMode, value; + const NVSurfaceDescriptor *pSurfaceDesc = NULL; + const NVFlipNIsoSurfaceEvoHwState *pNIso; + + /*! Program Acq-only semaphore */ + pSurfaceDesc = NULL; + offset = acqMode = relMode = value = 0; + if (pHwState->syncObject.usingSyncpt && + pHwState->syncObject.u.syncpts.isPreSyncptSpecified) { + NvU32 id = pHwState->syncObject.u.syncpts.preSyncpt; + pSurfaceDesc = &pDevEvo->preSyncptTable[id].surfaceDesc; + acqMode = DRF_DEF(C67E, _SET_ACQ_SEMAPHORE_CONTROL, _ACQ_MODE, _CGEQ); + value = pHwState->syncObject.u.syncpts.preValue; + } else { + if (pHwState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo != NULL) { + pNIso = &pHwState->syncObject.u.semaphores.acquireSurface; + pSurfaceDesc = &pNIso->pSurfaceEvo->planes[0].surfaceDesc; + offset = pNIso->offsetInWords / 4; + acqMode = DRF_DEF(C67E, _SET_ACQ_SEMAPHORE_CONTROL, _ACQ_MODE, _EQ); + value = pHwState->syncObject.u.semaphores.acquireValue; + } + } + + pDevEvo->hal->SetAcqSemaphoreSurfaceAddressAndControl(pDevEvo, pChannel, + pSurfaceDesc, offset, acqMode); + + /*! set semaphore value */ + nvDmaSetStartEvoMethod(pChannel, NVC67E_SET_ACQ_SEMAPHORE_VALUE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67E, _SET_ACQ_SEMAPHORE_VALUE, _VALUE, value)); + + /*! Program Rel-only semaphore */ + pSurfaceDesc = NULL; + offset = acqMode = relMode = value = 0; + if (pHwState->syncObject.usingSyncpt && + pHwState->syncObject.u.syncpts.isPostSyncptSpecified) { + pSurfaceDesc = &pHwState->syncObject.u.syncpts.surfaceDesc; + acqMode = DRF_DEF(C67E, _SET_SEMAPHORE_CONTROL, _SKIP_ACQ, _TRUE); + relMode = DRF_DEF(C67E, _SET_SEMAPHORE_CONTROL, _REL_MODE, _WRITE); + value = pHwState->syncObject.u.syncpts.postValue; + /*! increase local max val as well */ + pChannel->postSyncpt.syncptMaxVal++; + } else { + if (pHwState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo != NULL) { + pNIso = &pHwState->syncObject.u.semaphores.releaseSurface; + pSurfaceDesc = &pNIso->pSurfaceEvo->planes[0].surfaceDesc; + offset = pNIso->offsetInWords / 4; + acqMode = DRF_DEF(C67E, _SET_SEMAPHORE_CONTROL, _SKIP_ACQ, _TRUE); + relMode = DRF_DEF(C67E, _SET_SEMAPHORE_CONTROL, _REL_MODE, _WRITE); + value = pHwState->syncObject.u.semaphores.releaseValue; + } + } + + pDevEvo->hal->SetSemaphoreSurfaceAddressAndControl(pDevEvo, pChannel, + pSurfaceDesc, offset, (acqMode | relMode)); + + /*! set semaphore value */ + nvDmaSetStartEvoMethod(pChannel, NVC67E_SET_SEMAPHORE_RELEASE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67E, _SET_SEMAPHORE_RELEASE, _VALUE, value)); +} + +static void EvoSetWinNotifierSurfaceAddressAndControlC3( + const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 notifierOffset, + NvU32 ctrlVal) +{ + NvU32 ctxDmaHandle = pSurfaceDesc ? pSurfaceDesc->ctxDmaHandle : 0; + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_CONTEXT_DMA_NOTIFIER, _HANDLE, ctxDmaHandle)); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_NOTIFIER_CONTROL, _OFFSET, notifierOffset) | ctrlVal); +} + +static void EvoSetISOSurfaceAddressC3( + const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 offset, + NvU32 ctxDmaIdx, + NvBool isBlocklinear) +{ + NvU32 ctxDmaHandle = pSurfaceDesc ? pSurfaceDesc->ctxDmaHandle : 0; + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_CONTEXT_DMA_ISO(ctxDmaIdx), 1); + nvDmaSetEvoMethodData(pChannel, ctxDmaHandle); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_OFFSET(ctxDmaIdx), 1); + nvDmaSetEvoMethodData(pChannel, nvCtxDmaOffsetFromBytes(offset)); +} + +static NvBool +EvoFlipC3Common(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo; + NvU32 presentControl, eye; + NvU32 storage; + NvU8 planeIndex; + NVSurfaceDescriptor *pSurfaceDesc = NULL; + NvU32 offset, ctrlVal; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* program notifier */ + + if (pHwState->completionNotifier.surface.pSurfaceEvo == NULL) { + offset = ctrlVal = 0; + pDevEvo->hal->SetWinNotifierSurfaceAddressAndControl(pDevEvo, + pChannel, NULL, offset, ctrlVal); + } else { + const NVFlipNIsoSurfaceEvoHwState *pNIso = + &pHwState->completionNotifier.surface; + + nvAssert(pNIso->format == NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY); + /* XXX nvdisplay: enforce this at a higher level */ + nvAssert((pNIso->offsetInWords % 4) == 0); + + pSurfaceDesc = &pNIso->pSurfaceEvo->planes[0].surfaceDesc; + offset = pNIso->offsetInWords / 4; + ctrlVal = 0; + if (pHwState->completionNotifier.awaken) { + ctrlVal = FLD_SET_DRF(C37E, _SET_NOTIFIER_CONTROL, _MODE, + _WRITE_AWAKEN, ctrlVal); + } else { + ctrlVal = FLD_SET_DRF(C37E, _SET_NOTIFIER_CONTROL, _MODE, + _WRITE, ctrlVal); + } + + pDevEvo->hal->SetWinNotifierSurfaceAddressAndControl(pDevEvo, + pChannel, pSurfaceDesc, offset, ctrlVal); + } + + if (!pHwState->pSurfaceEvo[NVKMS_LEFT]) { + // Disable this window, and set all its ctxdma entries to NULL. + for (eye = 0; eye < NVKMS_MAX_EYES; eye++) { + for (planeIndex = 0; + planeIndex < NVKMS_MAX_PLANES_PER_SURFACE; + planeIndex++) { + const NvU8 ctxDmaIdx = EyeAndPlaneToCtxDmaIdx(eye, planeIndex); + pDevEvo->hal->SetISOSurfaceAddress(pDevEvo, pChannel, + NULL /* pSurfaceDec */, 0 /* offset */, ctxDmaIdx, + NV_FALSE /* isBlocklinear */); + } + } + + return FALSE; + } + + presentControl = DRF_NUM(C37E, _SET_PRESENT_CONTROL, _MIN_PRESENT_INTERVAL, + pHwState->minPresentInterval); + + if (pHwState->timeStamp != 0) { + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, _TIMESTAMP_MODE, + _ENABLE, presentControl); + } else { + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, _TIMESTAMP_MODE, + _DISABLE, presentControl); + } + + if (pHwState->tearing) { + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, _BEGIN_MODE, + _IMMEDIATE, presentControl); + } else { + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, _BEGIN_MODE, + _NON_TEARING, presentControl); + } + + if (pHwState->pSurfaceEvo[NVKMS_RIGHT]) { + if (pHwState->perEyeStereoFlip) { + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, _STEREO_MODE, + _AT_ANY_FRAME, presentControl); + } else { + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, _STEREO_MODE, + _PAIR_FLIP, presentControl); + } + } else { + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, _STEREO_MODE, + _MONO, presentControl); + } + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_PRESENT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, presentControl); + + /* + * GV100 timestamped flips need a duplicate update which only changes + * TIMESTAMP_MODE and MIN_PRESENT_INTERVAL fields in SET_PRESENT_CONTROL; + * to allow updating these fields without changing anything else in + * SET_PRESENT_CONTROL, cache the values we sent in previous flips here. + * (bug 1990958) + */ + pChannel->oldPresentControl = presentControl; + + /* Set the surface parameters. */ + FOR_ALL_EYES(eye) { + const NVSurfaceEvoRec *pSurfaceEvoPerEye = pHwState->pSurfaceEvo[eye]; + NvU8 numSurfacePlanes = 0; + NvBool isBlockLinear = NV_FALSE; + + if (pSurfaceEvoPerEye != NULL) { + pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(pSurfaceEvoPerEye->format); + numSurfacePlanes = pFormatInfo->numPlanes; + isBlockLinear = + (pSurfaceEvoPerEye->layout == NvKmsSurfaceMemoryLayoutBlockLinear); + } + + for (planeIndex = 0; + planeIndex < NVKMS_MAX_PLANES_PER_SURFACE; + planeIndex++) { + const NVSurfaceDescriptor *pSurfaceDesc = NULL; + NvU64 offset = 0; + const NvU8 ctxDmaIdx = EyeAndPlaneToCtxDmaIdx(eye, planeIndex); + + if (planeIndex < numSurfacePlanes) { + pSurfaceDesc = &pSurfaceEvoPerEye->planes[planeIndex].surfaceDesc; + offset = pSurfaceEvoPerEye->planes[planeIndex].offset; + } + + pDevEvo->hal->SetISOSurfaceAddress(pDevEvo, pChannel, + pSurfaceDesc, offset, ctxDmaIdx, isBlockLinear); + } + } + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SIZE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_SIZE, _WIDTH, pHwState->pSurfaceEvo[NVKMS_LEFT]->widthInPixels) | + DRF_NUM(C37E, _SET_SIZE, _HEIGHT, pHwState->pSurfaceEvo[NVKMS_LEFT]->heightInPixels)); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SIZE_IN, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_SIZE_IN, _WIDTH, pHwState->sizeIn.width) | + DRF_NUM(C37E, _SET_SIZE_IN, _HEIGHT, pHwState->sizeIn.height)); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SIZE_OUT, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_SIZE_OUT, _WIDTH, pHwState->sizeOut.width) | + DRF_NUM(C37E, _SET_SIZE_OUT, _HEIGHT, pHwState->sizeOut.height)); + + /* XXX nvdisplay: enforce pitch/BL layout are consistent between eyes at a + * higher level */ + + storage = 0; + if (pHwState->pSurfaceEvo[NVKMS_LEFT]->layout == + NvKmsSurfaceMemoryLayoutBlockLinear) { + const NvU32 blockHeight = pHwState->pSurfaceEvo[NVKMS_LEFT]->log2GobsPerBlockY; + storage |= DRF_NUM(C37E, _SET_STORAGE, _BLOCK_HEIGHT, blockHeight); + if (pDevEvo->hal->caps.supportsSetStorageMemoryLayout) { + storage |= DRF_DEF(C37E, _SET_STORAGE, _MEMORY_LAYOUT, _BLOCKLINEAR); + } + } else if (pDevEvo->hal->caps.supportsSetStorageMemoryLayout) { + storage |= DRF_DEF(C37E, _SET_STORAGE, _MEMORY_LAYOUT, _PITCH); + } + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_STORAGE, 1); + nvDmaSetEvoMethodData(pChannel, storage); + + pFormatInfo = nvKmsGetSurfaceMemoryFormatInfo( + pHwState->pSurfaceEvo[NVKMS_LEFT]->format); + + for (planeIndex = 0; + planeIndex < NVKMS_MAX_PLANES_PER_SURFACE; + planeIndex++) { + NvU32 pitch; + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_PLANAR_STORAGE(planeIndex), + 1); + + if (planeIndex >= pFormatInfo->numPlanes) { + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_PLANAR_STORAGE, _PITCH, 0)); + continue; + } + + /* + * Per nvdClass_01.mfs, the HEAD_SET_STORAGE_PITCH "units are blocks + * if the layout is BLOCKLINEAR, the units are multiples of 64 bytes + * if the layout is PITCH." + */ + pitch = pHwState->pSurfaceEvo[NVKMS_LEFT]->planes[planeIndex].pitch; + if (pHwState->pSurfaceEvo[NVKMS_LEFT]->layout == + NvKmsSurfaceMemoryLayoutBlockLinear) { + /* pitch is already in units of blocks; no conversion needed. */ + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_PLANAR_STORAGE, _PITCH, pitch)); + } else { + /* XXX nvdisplay: enforce this at a higher level */ + nvAssert((pitch & 63) == 0); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_PLANAR_STORAGE, _PITCH, pitch >> 6)); + } + } + + ASSERT_EYES_MATCH(pHwState->pSurfaceEvo, format); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_UPDATE_TIMESTAMP_LO, 2); + nvDmaSetEvoMethodData(pChannel, NvU64_LO32(pHwState->timeStamp)); + nvDmaSetEvoMethodData(pChannel, NvU64_HI32(pHwState->timeStamp)); + + return TRUE; +} + +/* + * This function returns TRUE if precomp needs to swap the U and V components to + * support the given input surface format. For all such formats, + * SetParams.SwapUV needs to be enabled. + * + * Due to the "feature" described in bug 1640117, there's a mismatch in the + * ihub<->precomp interface: + * - For all Yx___UxVx_N444 and Yx___UxVx_N422 formats, ihub will fetch and send + * the V sample as the first chroma byte, and the U sample as the second byte. + * However, precomp expects the U sample as the first byte, and the V sample + * as the second byte. + * - For all Yx___VxUx_N420 formats, ihub will fetch and send the U sample as + * the first chroma byte, and the V sample as the second byte. + * However, precomp expects the V sample as the first byte, and the U sample + * as the second byte. + * + * In the above explanation, note that ihub simply fetches and sends the chroma + * bytes in the same order that they're packed in memory. + */ +static NvBool IsSurfaceFormatUVSwapped( + const enum NvKmsSurfaceMemoryFormat format) +{ + switch (format) { + case NvKmsSurfaceMemoryFormatY8___U8V8_N444: + case NvKmsSurfaceMemoryFormatY8___U8V8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N420: + case NvKmsSurfaceMemoryFormatY10___U10V10_N444: + case NvKmsSurfaceMemoryFormatY10___U10V10_N422: + case NvKmsSurfaceMemoryFormatY10___V10U10_N420: + case NvKmsSurfaceMemoryFormatY12___U12V12_N444: + case NvKmsSurfaceMemoryFormatY12___U12V12_N422: + case NvKmsSurfaceMemoryFormatY12___V12U12_N420: + return TRUE; + case NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422: + case NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N444: + case NvKmsSurfaceMemoryFormatY8___V8U8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N420: + case NvKmsSurfaceMemoryFormatY10___V10U10_N444: + case NvKmsSurfaceMemoryFormatY10___V10U10_N422: + case NvKmsSurfaceMemoryFormatY10___U10V10_N420: + case NvKmsSurfaceMemoryFormatY12___V12U12_N444: + case NvKmsSurfaceMemoryFormatY12___V12U12_N422: + case NvKmsSurfaceMemoryFormatY12___U12V12_N420: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N444: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N420: + return FALSE; + case NvKmsSurfaceMemoryFormatI8: + case NvKmsSurfaceMemoryFormatA1R5G5B5: + case NvKmsSurfaceMemoryFormatX1R5G5B5: + case NvKmsSurfaceMemoryFormatR5G6B5: + case NvKmsSurfaceMemoryFormatA8R8G8B8: + case NvKmsSurfaceMemoryFormatX8R8G8B8: + case NvKmsSurfaceMemoryFormatA8B8G8R8: + case NvKmsSurfaceMemoryFormatX8B8G8R8: + case NvKmsSurfaceMemoryFormatA2B10G10R10: + case NvKmsSurfaceMemoryFormatX2B10G10R10: + case NvKmsSurfaceMemoryFormatRF16GF16BF16AF16: + case NvKmsSurfaceMemoryFormatRF16GF16BF16XF16: + case NvKmsSurfaceMemoryFormatR16G16B16A16: + case NvKmsSurfaceMemoryFormatRF32GF32BF32AF32: + return FALSE; + } + + return FALSE; +} + +/* + * Map the given NvKmsSurfaceMemoryFormat to its corresponding HW format for the + * C370 (Volta) NVDISPLAY class. + * + * Volta supports YUV422 packed, but this function excludes the corresponding + * mappings because the required programming support hasn't been added to NVKMS + * yet. + * + * Return 0 in the case of an unrecognized NvKmsSurfaceMemoryFormat. + */ +static NvU32 nvHwFormatFromKmsFormatC3( + const enum NvKmsSurfaceMemoryFormat format) +{ + switch (format) { + case NvKmsSurfaceMemoryFormatI8: + return NVC37E_SET_PARAMS_FORMAT_I8; + case NvKmsSurfaceMemoryFormatA1R5G5B5: + case NvKmsSurfaceMemoryFormatX1R5G5B5: + return NVC37E_SET_PARAMS_FORMAT_A1R5G5B5; + case NvKmsSurfaceMemoryFormatR5G6B5: + return NVC37E_SET_PARAMS_FORMAT_R5G6B5; + case NvKmsSurfaceMemoryFormatA8R8G8B8: + return NVC37E_SET_PARAMS_FORMAT_A8R8G8B8; + case NvKmsSurfaceMemoryFormatX8R8G8B8: + return NVC37E_SET_PARAMS_FORMAT_X8R8G8B8; + case NvKmsSurfaceMemoryFormatA8B8G8R8: + return NVC37E_SET_PARAMS_FORMAT_A8B8G8R8; + case NvKmsSurfaceMemoryFormatX8B8G8R8: + return NVC37E_SET_PARAMS_FORMAT_X8B8G8R8; + case NvKmsSurfaceMemoryFormatA2B10G10R10: + return NVC37E_SET_PARAMS_FORMAT_A2B10G10R10; + case NvKmsSurfaceMemoryFormatX2B10G10R10: + return NVC37E_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS; + case NvKmsSurfaceMemoryFormatRF16GF16BF16AF16: + case NvKmsSurfaceMemoryFormatRF16GF16BF16XF16: + return NVC37E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16; + case NvKmsSurfaceMemoryFormatR16G16B16A16: + return NVC37E_SET_PARAMS_FORMAT_R16_G16_B16_A16; + case NvKmsSurfaceMemoryFormatRF32GF32BF32AF32: + case NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422: + case NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N444: + case NvKmsSurfaceMemoryFormatY8___V8U8_N444: + case NvKmsSurfaceMemoryFormatY8___U8V8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N420: + case NvKmsSurfaceMemoryFormatY8___V8U8_N420: + case NvKmsSurfaceMemoryFormatY10___U10V10_N444: + case NvKmsSurfaceMemoryFormatY10___V10U10_N444: + case NvKmsSurfaceMemoryFormatY10___U10V10_N422: + case NvKmsSurfaceMemoryFormatY10___V10U10_N422: + case NvKmsSurfaceMemoryFormatY10___U10V10_N420: + case NvKmsSurfaceMemoryFormatY10___V10U10_N420: + case NvKmsSurfaceMemoryFormatY12___U12V12_N444: + case NvKmsSurfaceMemoryFormatY12___V12U12_N444: + case NvKmsSurfaceMemoryFormatY12___U12V12_N422: + case NvKmsSurfaceMemoryFormatY12___V12U12_N422: + case NvKmsSurfaceMemoryFormatY12___U12V12_N420: + case NvKmsSurfaceMemoryFormatY12___V12U12_N420: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N444: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N420: + return 0; + } + + return 0; +} + +/* + * Map the given NvKmsSurfaceMemoryFormat to its corresponding HW format for the + * C570 (Turing) NVDISPLAY class. + * + * Return 0 in the case of an unrecognized NvKmsSurfaceMemoryFormat. + */ +static NvU32 nvHwFormatFromKmsFormatC5( + const enum NvKmsSurfaceMemoryFormat format) +{ + switch (format) { + case NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422: + return NVC57E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422; + case NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422: + return NVC57E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422; + case NvKmsSurfaceMemoryFormatY8___U8V8_N444: + case NvKmsSurfaceMemoryFormatY8___V8U8_N444: + return NVC57E_SET_PARAMS_FORMAT_Y8___U8V8_N444; + case NvKmsSurfaceMemoryFormatY8___U8V8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N422: + return NVC57E_SET_PARAMS_FORMAT_Y8___U8V8_N422; + case NvKmsSurfaceMemoryFormatY8___U8V8_N420: + case NvKmsSurfaceMemoryFormatY8___V8U8_N420: + return NVC57E_SET_PARAMS_FORMAT_Y8___V8U8_N420; + case NvKmsSurfaceMemoryFormatY10___U10V10_N444: + case NvKmsSurfaceMemoryFormatY10___V10U10_N444: + return NVC57E_SET_PARAMS_FORMAT_Y10___U10V10_N444; + case NvKmsSurfaceMemoryFormatY10___U10V10_N422: + case NvKmsSurfaceMemoryFormatY10___V10U10_N422: + return NVC57E_SET_PARAMS_FORMAT_Y10___U10V10_N422; + case NvKmsSurfaceMemoryFormatY10___U10V10_N420: + case NvKmsSurfaceMemoryFormatY10___V10U10_N420: + return NVC57E_SET_PARAMS_FORMAT_Y10___V10U10_N420; + case NvKmsSurfaceMemoryFormatY12___U12V12_N444: + case NvKmsSurfaceMemoryFormatY12___V12U12_N444: + return NVC57E_SET_PARAMS_FORMAT_Y12___U12V12_N444; + case NvKmsSurfaceMemoryFormatY12___U12V12_N422: + case NvKmsSurfaceMemoryFormatY12___V12U12_N422: + return NVC57E_SET_PARAMS_FORMAT_Y12___U12V12_N422; + case NvKmsSurfaceMemoryFormatY12___U12V12_N420: + case NvKmsSurfaceMemoryFormatY12___V12U12_N420: + return NVC57E_SET_PARAMS_FORMAT_Y12___V12U12_N420; + case NvKmsSurfaceMemoryFormatY8___U8___V8_N444: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N420: + case NvKmsSurfaceMemoryFormatI8: + case NvKmsSurfaceMemoryFormatA1R5G5B5: + case NvKmsSurfaceMemoryFormatX1R5G5B5: + case NvKmsSurfaceMemoryFormatR5G6B5: + case NvKmsSurfaceMemoryFormatA8R8G8B8: + case NvKmsSurfaceMemoryFormatX8R8G8B8: + case NvKmsSurfaceMemoryFormatA8B8G8R8: + case NvKmsSurfaceMemoryFormatX8B8G8R8: + case NvKmsSurfaceMemoryFormatA2B10G10R10: + case NvKmsSurfaceMemoryFormatX2B10G10R10: + case NvKmsSurfaceMemoryFormatRF16GF16BF16AF16: + case NvKmsSurfaceMemoryFormatRF16GF16BF16XF16: + case NvKmsSurfaceMemoryFormatR16G16B16A16: + case NvKmsSurfaceMemoryFormatRF32GF32BF32AF32: + return nvHwFormatFromKmsFormatC3(format); + } + + return 0; +} + +/* + * Map the given NvKmsSurfaceMemoryFormat to its corresponding HW format for the + * C670 (Orin and Ampere) NVDISPLAY class. + * + * Return 0 in the case of an unrecognized NvKmsSurfaceMemoryFormat. + */ +NvU32 nvHwFormatFromKmsFormatC6(const enum NvKmsSurfaceMemoryFormat format) +{ + switch (format) { + case NvKmsSurfaceMemoryFormatY8___U8___V8_N444: + return NVC67E_SET_PARAMS_FORMAT_Y8___U8___V8_N444; + case NvKmsSurfaceMemoryFormatY8___U8___V8_N420: + return NVC67E_SET_PARAMS_FORMAT_Y8___U8___V8_N420; + case NvKmsSurfaceMemoryFormatX2B10G10R10: + return NVC67E_SET_PARAMS_FORMAT_A2B10G10R10; + case NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422: + case NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N444: + case NvKmsSurfaceMemoryFormatY8___V8U8_N444: + case NvKmsSurfaceMemoryFormatY8___U8V8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N420: + case NvKmsSurfaceMemoryFormatY8___V8U8_N420: + case NvKmsSurfaceMemoryFormatY10___U10V10_N444: + case NvKmsSurfaceMemoryFormatY10___V10U10_N444: + case NvKmsSurfaceMemoryFormatY10___U10V10_N422: + case NvKmsSurfaceMemoryFormatY10___V10U10_N422: + case NvKmsSurfaceMemoryFormatY10___U10V10_N420: + case NvKmsSurfaceMemoryFormatY10___V10U10_N420: + case NvKmsSurfaceMemoryFormatY12___U12V12_N444: + case NvKmsSurfaceMemoryFormatY12___V12U12_N444: + case NvKmsSurfaceMemoryFormatY12___U12V12_N422: + case NvKmsSurfaceMemoryFormatY12___V12U12_N422: + case NvKmsSurfaceMemoryFormatY12___U12V12_N420: + case NvKmsSurfaceMemoryFormatY12___V12U12_N420: + case NvKmsSurfaceMemoryFormatI8: + case NvKmsSurfaceMemoryFormatA1R5G5B5: + case NvKmsSurfaceMemoryFormatX1R5G5B5: + case NvKmsSurfaceMemoryFormatR5G6B5: + case NvKmsSurfaceMemoryFormatA8R8G8B8: + case NvKmsSurfaceMemoryFormatX8R8G8B8: + case NvKmsSurfaceMemoryFormatA8B8G8R8: + case NvKmsSurfaceMemoryFormatX8B8G8R8: + case NvKmsSurfaceMemoryFormatA2B10G10R10: + case NvKmsSurfaceMemoryFormatRF16GF16BF16AF16: + case NvKmsSurfaceMemoryFormatRF16GF16BF16XF16: + case NvKmsSurfaceMemoryFormatR16G16B16A16: + case NvKmsSurfaceMemoryFormatRF32GF32BF32AF32: + return nvHwFormatFromKmsFormatC5(format); + } + + return 0; +} + +static +NVSurfaceEvoPtr EvoGetLutSurface3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NvU32 *lutSize, + NvU64 *offset, + NvBool *isLutModeVss) +{ + NvU32 win = NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(pChannel->channelMask); + NvU32 head = pDevEvo->headForWindow[win]; + NvBool found = FALSE; + const NVDispEvoRec *pDispEvo = NULL; + NvU32 sd; + + if ((pHwState->pSurfaceEvo[NVKMS_LEFT] == NULL) || + (head == NV_INVALID_HEAD)) { + return NULL; + } + + /* Input Lut is explicitly enabled by client */ + if (pHwState->inputLut.pLutSurfaceEvo != NULL) { + *lutSize = pHwState->inputLut.lutEntries + NV_LUT_VSS_HEADER_SIZE; + *offset = pHwState->inputLut.offset; + *isLutModeVss = (pHwState->inputLut.vssSegments > 0); + + return pHwState->inputLut.pLutSurfaceEvo; + } + + /* + * For everything but I8 surfaces, we can just use the specified + * LUT, even if it's NULL. + * For I8 surfaces, we can only use the specified surface if it's + * non-NULL (an input LUT is required). + */ + if (pHwState->pSurfaceEvo[NVKMS_LEFT]->format != + NvKmsSurfaceMemoryFormatI8) { + return NULL; + } + + /* + * The rest of the function is to handle the I8 case where no input + * LUT was specified: look up the LUT to use from the device. + */ + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (nvPeekEvoSubDevMask(pDevEvo) & (1 << sd)) { + if (found) { + nvAssert(pDispEvo == pDevEvo->gpus[sd].pDispEvo); + } else { + pDispEvo = pDevEvo->gpus[sd].pDispEvo; + found = TRUE; + } + } + } + + nvAssert(found); + + /* + * It is not allowed to change the input LUT on immediate flips. The + * higher-level code should makes sure to disable tearing if there is change + * in the surface format and curLUTIndex does not change until next + * EvoSetLUTContextDma3() call which also makes sure to disable tearing. + */ + return pDispEvo->headState[head].lut.pCurrSurface; +} + +static void +EvoFlipC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + NvBool enableCSC, swapUV, flip3Return; + enum NvKmsSurfaceMemoryFormat format; + /* + * lutSize and isLutModeVss are unused, since we only support 1025 and + * non-VSS on Volta, but we declare them to pass to EvoGetLutSurface3. + * + * TODO: Maybe validate the resulting values? + */ + NvU32 lutSize = NV_NUM_EVO_LUT_ENTRIES; + NvU64 offset = offsetof(NVEvoLutDataRec, base); + NvBool isLutModeVss = FALSE; + NVSurfaceEvoPtr pLutSurfaceEvo = EvoGetLutSurface3(pDevEvo, pChannel, pHwState, + &lutSize, &offset, &isLutModeVss); + + if (pHwState->timeStamp != 0) { + InsertAdditionalTimestampFlip(pDevEvo, pChannel, pHwState, + updateState); + } + + flip3Return = EvoFlipC3Common(pDevEvo, pChannel, pHwState, updateState); + + /* program semaphore */ + EvoProgramSemaphore3(pDevEvo, pChannel, pHwState); + + if (!flip3Return) { + return; + } + + format = pHwState->pSurfaceEvo[NVKMS_LEFT]->format; + + enableCSC = SetCscMatrixC3(pChannel, &pHwState->cscMatrix); + swapUV = IsSurfaceFormatUVSwapped(format); + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_PARAMS, 1); + nvDmaSetEvoMethodData(pChannel, + (enableCSC ? DRF_DEF(C37E, _SET_PARAMS, _CSC, _ENABLE) : + DRF_DEF(C37E, _SET_PARAMS, _CSC, _DISABLE)) | + DRF_NUM(C37E, _SET_PARAMS, _FORMAT, nvHwFormatFromKmsFormatC3(format)) | + (swapUV ? DRF_DEF(C37E, _SET_PARAMS, _SWAP_UV, _ENABLE) : + DRF_DEF(C37E, _SET_PARAMS, _SWAP_UV, _DISABLE)) | + DRF_DEF(C37E, _SET_PARAMS, _UNDERREPLICATE, _DISABLE)); + + if (pLutSurfaceEvo) { + const NvU32 ctxDma = pLutSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle; + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_CONTROL_INPUT_LUT, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37E, _SET_CONTROL_INPUT_LUT, _SIZE, _SIZE_1025) | + DRF_DEF(C37E, _SET_CONTROL_INPUT_LUT, _RANGE, _UNITY) | + DRF_DEF(C37E, _SET_CONTROL_INPUT_LUT, _OUTPUT_MODE, _INDEX)); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_OFFSET_INPUT_LUT, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_OFFSET_INPUT_LUT, _ORIGIN, offset)); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_CONTEXT_DMA_INPUT_LUT, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_CONTEXT_DMA_INPUT_LUT, _HANDLE, ctxDma)); + } else { + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_CONTEXT_DMA_INPUT_LUT, 1); + nvDmaSetEvoMethodData(pChannel, 0); + } + + UpdateCompositionC3(pDevEvo, pChannel, + &pHwState->composition, updateState, + format); +} + +static void EvoSetupPQEotfBaseLutC5(NVEvoLutDataRec *pData, + enum NvKmsLUTState *lutState, + NvU32 *lutSize, + NvBool *isLutModeVss) +{ + NvU32 lutDataStartingIndex = NV_LUT_VSS_HEADER_SIZE; + NvU32 numEotfPQ512Entries = ARRAY_LEN(EotfPQ512Entries); + NvU32 eotfTableIdx; + NvU64 vssHead = 0; + NvU32 lutEntryCounter = 0, i; + + // Skip LUT data init if already done + if (*lutState == NvKmsLUTStatePQ) { + goto skipInit; + } + + // VSS Header + for (lutEntryCounter = 0; lutEntryCounter < NV_LUT_VSS_HEADER_SIZE; lutEntryCounter++) { + vssHead = 0; + for (i = 0; ((i < 16) && (((lutEntryCounter * 16) + i) < ARRAY_LEN(EotfPQ512SegSizesLog2))); i++) { + NvU64 temp = EotfPQ512SegSizesLog2[(lutEntryCounter * 16) + i]; + temp = temp << (i * 3); + vssHead |= temp; + } + nvkms_memcpy(&(pData->base[lutEntryCounter]), &vssHead, sizeof(NVEvoLutEntryRec)); + } + + for (eotfTableIdx = 0; eotfTableIdx < numEotfPQ512Entries; eotfTableIdx++) { + /* + * Values are in range [0.0, 125.0], will be scaled back by OLUT. + * XXX HDR TODO: Divide by 125.0 if output mode is not HDR? + */ + pData->base[eotfTableIdx + lutDataStartingIndex].Red = + pData->base[eotfTableIdx + lutDataStartingIndex].Green = + pData->base[eotfTableIdx + lutDataStartingIndex].Blue = + EotfPQ512Entries[eotfTableIdx]; + } + + // Copy the last entry for interpolation + pData->base[numEotfPQ512Entries + lutDataStartingIndex].Red = + pData->base[numEotfPQ512Entries + lutDataStartingIndex - 1].Red; + pData->base[numEotfPQ512Entries + lutDataStartingIndex].Green = + pData->base[numEotfPQ512Entries + lutDataStartingIndex - 1].Green; + pData->base[numEotfPQ512Entries + lutDataStartingIndex].Blue = + pData->base[numEotfPQ512Entries + lutDataStartingIndex - 1].Blue; + +skipInit: + *lutState = NvKmsLUTStatePQ; + *lutSize = NV_LUT_VSS_HEADER_SIZE + numEotfPQ512Entries + 1; + *isLutModeVss = TRUE; +} + +static void +EvoSetupIdentityBaseLutC5(NVEvoLutDataRec *pData, + enum NvKmsLUTState *lutState, + NvU32 *lutSize, + NvBool *isLutModeVss) +{ + int i; + + // Skip LUT data init if already done + if (*lutState == NvKmsLUTStateIdentity) { + goto skipInit; + } + + ct_assert(NV_NUM_EVO_LUT_ENTRIES == 1025); + + // nvdisplay 3 uses FP16 entries in the ILUT. + for (i = 0; i < 1024; i++) { + pData->base[NV_LUT_VSS_HEADER_SIZE + i].Red = + pData->base[NV_LUT_VSS_HEADER_SIZE + i].Green = + pData->base[NV_LUT_VSS_HEADER_SIZE + i].Blue = nvUnorm10ToFp16(i).v; + } + pData->base[NV_LUT_VSS_HEADER_SIZE + 1024] = + pData->base[NV_LUT_VSS_HEADER_SIZE + 1023]; + +skipInit: + *lutState = NvKmsLUTStateIdentity; + *lutSize = NV_LUT_VSS_HEADER_SIZE + NV_NUM_EVO_LUT_ENTRIES; + *isLutModeVss = FALSE; +} + +static void EvoSetILUTSurfaceAddressC5( + const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 offset) +{ + NvU32 ctxDmaHandle = pSurfaceDesc ? pSurfaceDesc->ctxDmaHandle : 0; + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CONTEXT_DMA_ILUT, 1); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57E, _SET_CONTEXT_DMA_ILUT, _HANDLE, ctxDmaHandle)); + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_OFFSET_ILUT, 1); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57E, _SET_OFFSET_ILUT, _ORIGIN, offset)); +} + +static void +EvoFlipC5Common(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + enum NvKmsSurfaceMemoryFormat format; + NvBool swapUV; + NvU32 hTaps, vTaps; + NvBool scaling = FALSE; + NVSurfaceEvoPtr pLutSurfaceEvo = NULL; + NvU64 lutOffset = offsetof(NVEvoLutDataRec, base); + NvU32 lutSize = NV_NUM_EVO_LUT_ENTRIES; + NvBool isLutModeVss = FALSE; + + NvU32 win = NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(pChannel->channelMask); + NvU32 head = pDevEvo->headForWindow[win]; + + const NvU32 sdMask = nvPeekEvoSubDevMask(pDevEvo); + const NvU32 sd = (sdMask == 0) ? 0 : nv_ffs(sdMask) - 1; + const NVDispHeadStateEvoRec *pHeadState = &pDevEvo->pDispEvo[sd]->headState[head]; + + // XXX HDR TODO: Handle other transfer funcions + // XXX HDR TODO: Enable custom input LUTs with HDR + if (pHwState->inputLut.fromOverride || + (pHwState->tf != NVKMS_INPUT_TF_PQ)) { + pLutSurfaceEvo = EvoGetLutSurface3(pDevEvo, pChannel, pHwState, + &lutSize, &lutOffset, &isLutModeVss); + } + + if (!EvoFlipC3Common(pDevEvo, pChannel, pHwState, updateState)) { + ConfigureTmoLut(pDevEvo, pHwState, pChannel); + return; + } + + format = pHwState->pSurfaceEvo[NVKMS_LEFT]->format; + + swapUV = IsSurfaceFormatUVSwapped(format); + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_PARAMS, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_PARAMS, _FORMAT, nvHwFormatFromKmsFormatC6(format)) | + (swapUV ? DRF_DEF(C57E, _SET_PARAMS, _SWAP_UV, _ENABLE) : + DRF_DEF(C57E, _SET_PARAMS, _SWAP_UV, _DISABLE))); + + /* + * In nvdisplay 2, there was a fixed-function block in the precomp FMT + * module that was responsible for YUV->RGB conversion. + * + * In nvdisplay 3, that fixed-function block no longer exists. + * In its place, there's a generic 3x4 S5.16 coefficient matrix that SW must + * explicitly configure to convert the input surface format to the internal + * RGB pipe native format. + */ + EvoSetFMTMatrixC5(pChannel, format, pHwState); + + vTaps = (pHwState->vTaps >= NV_EVO_SCALER_5TAPS) ? + NVC57E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_5 : + NVC57E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_2; + hTaps = (pHwState->hTaps >= NV_EVO_SCALER_5TAPS) ? + NVC57E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 : + NVC57E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_2; + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CONTROL_INPUT_SCALER, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_CONTROL_INPUT_SCALER, _VERTICAL_TAPS, vTaps) | + DRF_NUM(C57E, _SET_CONTROL_INPUT_SCALER, _HORIZONTAL_TAPS, hTaps)); + + scaling = (pHwState->sizeIn.width != pHwState->sizeOut.width) || + (pHwState->sizeIn.height != pHwState->sizeOut.height); + nvAssert(!(scaling && bypassComposition)); + + /* + * If scaling or tonemapping, we must enable the CSC0 and CSC1 pipelines. + * + * If no scaling or tonemapping, just use CSC11 to convert from the input + * gamut to the output (panel) gamut, and disable everything else. + */ + if (scaling || + nvNeedsTmoLut(pDevEvo, pChannel, pHwState, + nvGetHDRSrcMaxLum(pHwState), + pHeadState->hdrInfoFrame.staticMetadata.maxCLL)) { + ConfigureCsc0C5(pDevEvo, pChannel, pHwState, TRUE); + ConfigureCsc1C5(pDevEvo, pChannel, pHwState, TRUE); + } else { + ConfigureCsc0C5(pDevEvo, pChannel, pHwState, FALSE); + ConfigureCsc1C5(pDevEvo, pChannel, pHwState, FALSE); + } + + // In nvdisplay 3, an ILUT is required to convert the input surface to FP16, + // unless the surface being displayed is already FP16 to begin with. + if ((format == NvKmsSurfaceMemoryFormatRF16GF16BF16AF16) || + (format == NvKmsSurfaceMemoryFormatRF16GF16BF16XF16) || bypassComposition) { + nvAssert(pHwState->tf == NVKMS_INPUT_TF_LINEAR); + pLutSurfaceEvo = NULL; + } else if (!pLutSurfaceEvo) { + NVEvoLutDataRec *pData = NULL; + + pLutSurfaceEvo = pDevEvo->lut.defaultLut; + pData = pLutSurfaceEvo->cpuAddress[sd]; + + nvAssert(pData); + + switch (pHwState->tf) { + case NVKMS_INPUT_TF_PQ: + EvoSetupPQEotfBaseLutC5(pData, + &pDevEvo->lut.defaultBaseLUTState[sd], + &lutSize, &isLutModeVss); + break; + case NVKMS_INPUT_TF_LINEAR: + EvoSetupIdentityBaseLutC5(pData, + &pDevEvo->lut.defaultBaseLUTState[sd], + &lutSize, &isLutModeVss); + break; + default: // XXX HDR TODO: Handle other colorspaces + nvAssert(FALSE); + EvoSetupIdentityBaseLutC5(pData, + &pDevEvo->lut.defaultBaseLUTState[sd], + &lutSize, &isLutModeVss); + break; + } + } + + if (pLutSurfaceEvo) { + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_ILUT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, + (isLutModeVss ? DRF_DEF(C57E, _SET_ILUT_CONTROL, _INTERPOLATE, _ENABLE) : + DRF_DEF(C57E, _SET_ILUT_CONTROL, _INTERPOLATE, _DISABLE)) | + DRF_DEF(C57E, _SET_ILUT_CONTROL, _MIRROR, _DISABLE) | + (isLutModeVss ? DRF_DEF(C57E, _SET_ILUT_CONTROL, _MODE, _SEGMENTED) : + DRF_DEF(C57E, _SET_ILUT_CONTROL, _MODE, _DIRECT10)) | + DRF_NUM(C57E, _SET_ILUT_CONTROL, _SIZE, lutSize)); + + pDevEvo->hal->SetILUTSurfaceAddress(pDevEvo, pChannel, + &pLutSurfaceEvo->planes[0].surfaceDesc, lutOffset); + } else { + pDevEvo->hal->SetILUTSurfaceAddress(pDevEvo, pChannel, + NULL /* pSurfaceDesc */, 0 /* offset */); + } + + ConfigureTmoLut(pDevEvo, pHwState, pChannel); + + UpdateCompositionC5(pDevEvo, pChannel, + &pHwState->composition, updateState, + bypassComposition, + format); +} + +static void +EvoFlipC5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + EvoFlipC5Common(pDevEvo, pChannel, pHwState, updateState, bypassComposition); + + /* Work around bug 2117571: whenever the tearing mode is changing, send a + * software method to notify RM. */ + if (pHwState->tearing != pChannel->oldTearingMode) { + NvU32 win = NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(pChannel->channelMask); + NvU32 head = pDevEvo->headForWindow[win]; + + if (head != NV_INVALID_HEAD) { + nvDmaSetStartEvoMethod(pChannel, NVC57E_WINDOWS_NOTIFY_RM, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57E, _WINDOWS_NOTIFY_RM, _VSYNC_STATE_CHANGE, _TRUE) | + DRF_NUM(C57E, _WINDOWS_NOTIFY_RM, _ASSOCIATED_HEAD, head) | + (pHwState->tearing ? + DRF_DEF(C57E, _WINDOWS_NOTIFY_RM, _VSYNC_STATE, _OFF) : + DRF_DEF(C57E, _WINDOWS_NOTIFY_RM, _VSYNC_STATE, _ON))); + } + + pChannel->oldTearingMode = pHwState->tearing; + } + + /* program semaphore */ + EvoProgramSemaphore3(pDevEvo, pChannel, pHwState); +} + +void +nvEvoFlipC6(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + NvBool fromTop = TRUE; + NvBool fromLeft = TRUE; + + NvU32 vDirVal = 0; + NvU32 hDirVal = 0; + + switch (pHwState->rrParams.rotation) { + case NVKMS_ROTATION_90: + case NVKMS_ROTATION_270: + nvAssert(!"Invalid rotation requested."); + /* Fall-through */ + case NVKMS_ROTATION_0: + break; + case NVKMS_ROTATION_180: + fromTop = FALSE; + fromLeft = FALSE; + break; + } + + if (pHwState->rrParams.reflectionX) { + fromLeft = !fromLeft; + } + if (pHwState->rrParams.reflectionY) { + fromTop = !fromTop; + } + + vDirVal = (fromTop ? + DRF_DEF(C67E, _SET_SCAN_DIRECTION, _VERTICAL_DIRECTION, _FROM_TOP) : + DRF_DEF(C67E, _SET_SCAN_DIRECTION, _VERTICAL_DIRECTION, _FROM_BOTTOM)); + hDirVal = (fromLeft ? + DRF_DEF(C67E, _SET_SCAN_DIRECTION, _HORIZONTAL_DIRECTION, _FROM_LEFT) : + DRF_DEF(C67E, _SET_SCAN_DIRECTION, _HORIZONTAL_DIRECTION, _FROM_RIGHT)); + + nvDmaSetStartEvoMethod(pChannel, NVC67E_SET_SCAN_DIRECTION, 1); + nvDmaSetEvoMethodData(pChannel, vDirVal | hDirVal); + + EvoFlipC5Common(pDevEvo, pChannel, pHwState, updateState, bypassComposition); + + /* program semaphore */ + EvoProgramSemaphore6(pDevEvo, pChannel, pHwState); +} + +static void UpdateComposition(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + /* smaller => closer to front */ + NvU32 depth, + NvU32 colorKeySelect, + NvU32 constantAlpha, + NvU32 compositionFactorSelect, + const NVColorKey key, + NVEvoUpdateState *updateState) +{ + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_COMPOSITION_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_COMPOSITION_CONTROL, _COLOR_KEY_SELECT, colorKeySelect) | + DRF_NUM(C37E, _SET_COMPOSITION_CONTROL, _DEPTH, depth)); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_COMPOSITION_CONSTANT_ALPHA, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_COMPOSITION_CONSTANT_ALPHA, _K1, constantAlpha)); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_COMPOSITION_FACTOR_SELECT, 1); + nvDmaSetEvoMethodData(pChannel, compositionFactorSelect); + +#define UPDATE_COMPONENT(_COMP, _C, _c) \ + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_KEY_##_COMP, 1); \ + if (key.match##_C) { \ + nvDmaSetEvoMethodData(pChannel, \ + DRF_NUM(C37E, _SET_KEY_##_COMP, _MIN, key._c) | \ + DRF_NUM(C37E, _SET_KEY_##_COMP, _MAX, key._c)); \ + } else { \ + nvDmaSetEvoMethodData(pChannel, \ + DRF_NUM(C37E, _SET_KEY_##_COMP, _MIN, 0) | \ + DRF_SHIFTMASK(NVC37E_SET_KEY_##_COMP##_MAX)); \ + } + + if (colorKeySelect != + NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DISABLE) { + UPDATE_COMPONENT(ALPHA, A, a); + UPDATE_COMPONENT(RED_CR, R, r); + UPDATE_COMPONENT(GREEN_Y, G, g); + UPDATE_COMPONENT(BLUE_CB, B, b); + } + +#undef UPDATE_COMPONENT +} + +static void EvoFlipTransitionWARC3(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const NVEvoSubDevHeadStateRec *pSdHeadState, + const NVFlipEvoHwState *pFlipState, + NVEvoUpdateState *updateState) +{ + /* Nothing to do for Volta */ +} + +/* + * Hardware bug 2193096 requires that we send special software methods around + * a window channel update that transitions from NULL ctxdma to non-NULL or + * vice versa. Below we compare the current hardware state in pSdHeadState + * against the state to be pushed in this update in pFlipState, and add any + * window(s) that qualify to the 'flipTransitionWAR' mask in the updateState. + */ +static void EvoFlipTransitionWARC5(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const NVEvoSubDevHeadStateRec *pSdHeadState, + const NVFlipEvoHwState *pFlipState, + NVEvoUpdateState *updateState) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + const NvBool enabledPrev = + pSdHeadState->layer[layer].pSurfaceEvo[NVKMS_LEFT] != NULL; + const NvBool enabledNext = + pFlipState->layer[layer].pSurfaceEvo[NVKMS_LEFT] != NULL; + + if (enabledPrev != enabledNext) { + /* XXX TODO: dynamic window assignment */ + const NvU32 win = NV_EVO_CHANNEL_MASK_WINDOW_NUMBER( + pDevEvo->head[head].layer[layer]->channelMask); + updateState->subdev[sd].flipTransitionWAR |= + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _WINDOW, win, _ENABLE); + + nvAssert(pFlipState->dirty.layer[layer]); + } + } +} + +void nvEvoFlipTransitionWARC6(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const NVEvoSubDevHeadStateRec *pSdHeadState, + const NVFlipEvoHwState *pFlipState, + NVEvoUpdateState *updateState) +{ + /* Nothing to do for Orin/Ampere for now */ +} + +static void +UpdateCompositionC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const struct NvKmsCompositionParams *pCompParams, + NVEvoUpdateState *updateState, + enum NvKmsSurfaceMemoryFormat format) +{ + NvU32 colorKeySelect; + NvU32 compositionFactorSelect = 0; + NvU32 constantAlpha = 0; + NvU32 match; + + switch (pCompParams->colorKeySelect) { + case NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE: + colorKeySelect = + NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DISABLE; + break; + case NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC: + colorKeySelect = + NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_SRC; + + break; + case NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST: + colorKeySelect = + NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DST; + + break; + default: + nvAssert(!"Invalid color key select"); + return; + } + + /* Match and nomatch pixels should not use alpha blending mode at once. */ + nvAssert((colorKeySelect == NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE) || + (!NvKmsIsCompositionModeUseAlpha(pCompParams->blendingMode[0])) || + (!NvKmsIsCompositionModeUseAlpha(pCompParams->blendingMode[1]))); + + /* + * Match and nomatch pixels should not use blending mode PREMULT_ALPHA, + * NON_PREMULT_ALPHA, PREMULT_SURFACE_ALPHA, and NON_PREMULT_SURFACE_ALPHA + * at once. + */ + nvAssert(pCompParams->blendingMode[0] == NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE || + pCompParams->blendingMode[0] == NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT || + pCompParams->blendingMode[1] == NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE || + pCompParams->blendingMode[1] == NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT); + + for (match = 0; match <= 1; match++) { + switch (pCompParams->blendingMode[match]) { + case NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE: + if (match == 1) { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_MATCH_SELECT, _ONE) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_MATCH_SELECT, _ZERO); + } else { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_NO_MATCH_SELECT, _ONE) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_NO_MATCH_SELECT, _ZERO); + } + break; + case NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT: + if (match == 1) { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_MATCH_SELECT, _ZERO) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_MATCH_SELECT, _ONE); + } else { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_NO_MATCH_SELECT, _ZERO) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_NO_MATCH_SELECT, _ONE); + } + break; + case NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA: + constantAlpha = 255; + if (match == 1) { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_MATCH_SELECT, _K1_TIMES_SRC) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } else { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_NO_MATCH_SELECT, _K1_TIMES_SRC) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_NO_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } + break; + case NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA: + constantAlpha = 255; + if (match == 1) { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_MATCH_SELECT, _K1) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } else { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_NO_MATCH_SELECT, _K1) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_NO_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } + break; + case NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA: + constantAlpha = pCompParams->surfaceAlpha; + if (match == 1) { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_MATCH_SELECT, _K1_TIMES_SRC) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } else { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_NO_MATCH_SELECT, _K1_TIMES_SRC) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_NO_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } + break; + case NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA: + constantAlpha = pCompParams->surfaceAlpha; + if (match == 1) { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_MATCH_SELECT, _K1) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } else { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_NO_MATCH_SELECT, _K1) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_NO_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } + break; + default: + nvAssert(!"Invalid blend mode"); + return; + } + + /* Override the composition factors for X channel emulated surface format. */ + if (NvKmsIsCompositionModeUseAlpha(pCompParams->blendingMode[match]) && + ((pDevEvo->hal->caps.xEmulatedSurfaceMemoryFormats & NVBIT64(format)) != 0U)) { + if (match == 1) { + /* Clear the previously selected composition factors for match pixels. */ + compositionFactorSelect &= ~(DRF_MASK(NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT) << + DRF_SHIFT(NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT)); + compositionFactorSelect &= ~(DRF_MASK(NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT) << + DRF_SHIFT(NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT)); + + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_MATCH_SELECT, _K1) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_MATCH_SELECT, _NEG_K1); + } else { + /* Clear the previously selected composition factors for no-match pixels. */ + compositionFactorSelect &= ~(DRF_MASK(NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT) << + DRF_SHIFT(NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT)); + compositionFactorSelect &= ~(DRF_MASK(NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT) << + DRF_SHIFT(NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT)); + + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_NO_MATCH_SELECT, _K1) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_NO_MATCH_SELECT, _NEG_K1); + } + } + } + + UpdateComposition(pDevEvo, + pChannel, + pCompParams->depth, + colorKeySelect, + constantAlpha, + compositionFactorSelect, + pCompParams->colorKey, + updateState); +} + +static void EvoBypassCompositionC5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NVEvoUpdateState *updateState) +{ + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_COMPOSITION_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57E, _SET_COMPOSITION_CONTROL, _BYPASS, _ENABLE)); +} + +static void +UpdateCompositionC5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const struct NvKmsCompositionParams *pCompParams, + NVEvoUpdateState *updateState, + NvBool bypassComposition, + enum NvKmsSurfaceMemoryFormat format) +{ + if (bypassComposition) { + EvoBypassCompositionC5(pDevEvo, pChannel, updateState); + } else { + UpdateCompositionC3(pDevEvo, pChannel, pCompParams, + updateState, format); + } +} + +/* + * The LUT entries in INDEX_1025_UNITY_RANGE have 16 bits, with the + * black value at 24576, and the white at 49151. Since the effective + * range is 16384, we treat this as a 14-bit LUT. However, we need to + * clear the low 3 bits to WAR hardware bug 813188. This gives us + * 14-bit LUT values, but only 11 bits of precision. + * XXXnvdisplay: Bug 813188 is supposed to be fixed on NVDisplay; can we expose + * more precision? + */ +static inline NvU16 ColorToLUTEntry(NvU16 val) +{ + const NvU16 val14bit = val >> 2; + return (val14bit & ~7) + 24576; +} + +/* + * Unlike earlier EVO implementations, the INDEX mode of the input LUT on + * NVDisplay is straightforward: the value of the input component is expanded + * to the LUT size by simply shifting left by the difference between the LUT + * index width and the component width. We do the same, here, to select the + * right LUT entry to fill. + */ +static inline NvU32 GetLUTIndex(int i, int componentSize) +{ + return i << (10 - componentSize); +} + +static void +EvoFillLUTSurfaceC3(NVEvoLutEntryRec *pLUTBuffer, + const NvU16 *red, + const NvU16 *green, + const NvU16 *blue, + int nColorMapEntries, int depth) +{ + int i; + NvU32 rSize, gSize, bSize; + + switch (depth) { + case 15: + rSize = gSize = bSize = 5; + break; + case 16: + rSize = bSize = 5; + gSize = 6; + break; + case 8: + case 24: + rSize = gSize = bSize = 8; + break; + case 30: + rSize = gSize = bSize = 10; + break; + default: + nvAssert(!"invalid depth"); + return; + } + + for (i = 0; i < nColorMapEntries; i++) { + if (i < (1 << rSize)) { + pLUTBuffer[GetLUTIndex(i, rSize)].Red = ColorToLUTEntry(red[i]); + } + if (i < (1 << gSize)) { + pLUTBuffer[GetLUTIndex(i, gSize)].Green = ColorToLUTEntry(green[i]); + } + if (i < (1 << bSize)) { + pLUTBuffer[GetLUTIndex(i, bSize)].Blue = ColorToLUTEntry(blue[i]); + } + } +} + +static inline float16_t ColorToFp16(NvU16 val, float32_t maxf) +{ + return nvUnormToFp16(val, maxf); +} + +void +nvEvoFillLUTSurfaceC5(NVEvoLutEntryRec *pLUTBuffer, + const NvU16 *red, + const NvU16 *green, + const NvU16 *blue, + int nColorMapEntries, int depth) +{ + int i; + NvU32 rSize, gSize, bSize; + const float32_t maxf = ui32_to_f32(0xffff); + + switch (depth) { + case 15: + rSize = gSize = bSize = 5; + break; + case 16: + rSize = bSize = 5; + gSize = 6; + break; + case 8: + case 24: + rSize = gSize = bSize = 8; + break; + case 30: + rSize = gSize = bSize = 10; + break; + default: + nvAssert(!"invalid depth"); + return; + } + + // Skip the VSS header + pLUTBuffer += NV_LUT_VSS_HEADER_SIZE; + + for (i = 0; i < nColorMapEntries; i++) { + if (i < (1 << rSize)) { + pLUTBuffer[GetLUTIndex(i, rSize)].Red = + ColorToFp16(red[i], maxf).v; + } + if (i < (1 << gSize)) { + pLUTBuffer[GetLUTIndex(i, gSize)].Green = + ColorToFp16(green[i], maxf).v; + } + if (i < (1 << bSize)) { + pLUTBuffer[GetLUTIndex(i, bSize)].Blue = + ColorToFp16(blue[i], maxf).v; + } + } +} + +static void EvoSetOutputLutC3(NVDevEvoPtr pDevEvo, + NvU32 sd, + NvU32 head, + const NVFlipLutHwState *pOutputLut, + NvU32 fpNormScale, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvBool enableLut = (pOutputLut->pLutSurfaceEvo != NULL); + NvU64 offset = enableLut ? pOutputLut->offset : offsetof(NVEvoLutDataRec, output); + NvU32 ctxdma = enableLut ? + pOutputLut->pLutSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle : 0; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvAssert((offset & 0xff) == 0); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_LUT, _SIZE, _SIZE_1025) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_LUT, _RANGE, _UNITY) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_LUT, _OUTPUT_MODE, _INTERPOLATE)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_OFFSET_OUTPUT_LUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_OFFSET_OUTPUT_LUT, _ORIGIN, offset >> 8)); + + /* Set the ctxdma for the output LUT */ + + if (!enableLut) { + /* Class C37D has no separate enable flag. */ + ctxdma = 0; + } + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTEXT_DMA_OUTPUT_LUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTEXT_DMA_OUTPUT_LUT, _HANDLE, ctxdma)); +} + +static void EvoSetupPQOetfOutputLutC5(NVEvoLutDataRec *pData, + enum NvKmsLUTState *lutState, + NvU32 *lutSize, + NvBool *isLutModeVss) +{ + NvU32 lutDataStartingIndex = NV_LUT_VSS_HEADER_SIZE; + NvU32 numOetfPQ512Entries = ARRAY_LEN(OetfPQ512Entries); + NvU32 oetfTableIdx; + NvU64 vssHead = 0; + NvU32 lutEntryCounter = 0, i; + + // Skip LUT data init if already done + if (*lutState == NvKmsLUTStatePQ) { + goto skipInit; + } + + // VSS Header + for (lutEntryCounter = 0; lutEntryCounter < NV_LUT_VSS_HEADER_SIZE; lutEntryCounter++) { + vssHead = 0; + for (i = 0; ((i < 16) && (((lutEntryCounter * 16) + i) < ARRAY_LEN(OetfPQ512SegSizesLog2))); i++) { + NvU64 temp = OetfPQ512SegSizesLog2[(lutEntryCounter * 16) + i]; + temp = temp << (i * 3); + vssHead |= temp; + } + nvkms_memcpy(&(pData->output[lutEntryCounter]), &vssHead, sizeof(NVEvoLutEntryRec)); + } + + for (oetfTableIdx = 0; oetfTableIdx < numOetfPQ512Entries; oetfTableIdx++) { + pData->output[oetfTableIdx + lutDataStartingIndex].Red = + pData->output[oetfTableIdx + lutDataStartingIndex].Green = + pData->output[oetfTableIdx + lutDataStartingIndex].Blue = + OetfPQ512Entries[oetfTableIdx]; + } + + // Copy the last entry for interpolation + pData->output[numOetfPQ512Entries + lutDataStartingIndex].Red = + pData->output[numOetfPQ512Entries + lutDataStartingIndex - 1].Red; + pData->output[numOetfPQ512Entries + lutDataStartingIndex].Green = + pData->output[numOetfPQ512Entries + lutDataStartingIndex - 1].Green; + pData->output[numOetfPQ512Entries + lutDataStartingIndex].Blue = + pData->output[numOetfPQ512Entries + lutDataStartingIndex - 1].Blue; + +skipInit: + *lutState = NvKmsLUTStatePQ; + *lutSize = numOetfPQ512Entries + 1; + *isLutModeVss = TRUE; +} + +static void EvoSetupIdentityOutputLutC5(NVEvoLutDataRec *pData, + enum NvKmsLUTState *lutState, + NvU32 *lutSize, + NvBool *isLutModeVss) +{ + NvU32 i; + + // Skip LUT data init if already done + if (*lutState == NvKmsLUTStateIdentity) { + goto skipInit; + } + + ct_assert(NV_NUM_EVO_LUT_ENTRIES == 1025); + + // nvdisplay 3 uses 16-bit fixed-point entries in the OLUT. + for (i = 0; i < 1024; i++) { + pData->output[NV_LUT_VSS_HEADER_SIZE + i].Red = + pData->output[NV_LUT_VSS_HEADER_SIZE + i].Green = + pData->output[NV_LUT_VSS_HEADER_SIZE + i].Blue = (i << (16 - 10)); + } + pData->output[NV_LUT_VSS_HEADER_SIZE + 1024] = + pData->output[NV_LUT_VSS_HEADER_SIZE + 1023]; + +skipInit: + *lutState = NvKmsLUTStateIdentity; + *lutSize = 1025; + *isLutModeVss = FALSE; +} + +static void SetupHDROutputLUT(NVDevEvoPtr pDevEvo, + const NVDispHeadStateEvoRec *pHeadState, + NvU32 sd, + enum NvKmsLUTState *lutState, + NvU32 *lutSize, + NvBool *isLutModeVss) +{ + NVSurfaceEvoPtr pLut = pDevEvo->lut.defaultLut; + NVEvoLutDataRec *pData = pLut->cpuAddress[sd]; + + // XXX HDR TODO: Support other transfer functions + nvAssert(pHeadState->tf == NVKMS_OUTPUT_TF_PQ); + + EvoSetupPQOetfOutputLutC5(pData, lutState, lutSize, isLutModeVss); +} + +void nvSetupOutputLUT5(NVDevEvoPtr pDevEvo, + const NVDispHeadStateEvoRec *pHeadState, + NvBool enableOutputLut, + NvBool bypassComposition, + NVSurfaceDescriptor **pSurfaceDesc, + NvU32 *lutSize, + NvU64 *offset, + NvBool *disableOcsc0, + NvU32 *fpNormScale, + NvBool *isLutModeVss) +{ + NvU32 sd; + + /* Set the ctxdma for the output LUT */ + + if (bypassComposition) { + *pSurfaceDesc = NULL; + + /* if we're not enabling the OLUT, OCSC0 also needs to be disabled */ + *disableOcsc0 = TRUE; + } else if (!enableOutputLut) { + /* Use the default OLUT if the client didn't provide one */ + *pSurfaceDesc = &pDevEvo->lut.defaultLut->planes[0].surfaceDesc; + *offset = offsetof(NVEvoLutDataRec, output); + + // Setup default OLUT + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + // XXX HDR TODO: Support other transfer functions + if (pHeadState->tf == NVKMS_OUTPUT_TF_PQ) { + SetupHDROutputLUT(pDevEvo, pHeadState, sd, + &pDevEvo->lut.defaultOutputLUTState[sd], + lutSize, isLutModeVss); + + *disableOcsc0 = TRUE; + + /* + * Scale from [0.0, 125.0] to [0.0, 1.0] + * XXX HDR TODO: Assumes input is in this range, SDR is not. + */ + *fpNormScale = NVKMS_OLUT_FP_NORM_SCALE_DEFAULT / 125; + } else { + NVSurfaceEvoPtr pLut = pDevEvo->lut.defaultLut; + NVEvoLutDataRec *pData = pLut->cpuAddress[sd]; + + EvoSetupIdentityOutputLutC5( + pData, + &pDevEvo->lut.defaultOutputLUTState[sd], + lutSize, isLutModeVss); + } + } + } +} + +static void SetOLUTSurfaceAddress( + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 offset, + NvU32 head) +{ + NvU32 ctxDmaHandle = pSurfaceDesc ? pSurfaceDesc->ctxDmaHandle : 0; + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_CONTEXT_DMA_OLUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57D, _HEAD_SET_CONTEXT_DMA_OLUT, _HANDLE, ctxDmaHandle)); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OFFSET_OLUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57D, _HEAD_SET_OFFSET_OLUT, _ORIGIN, offset >> 8)); +} + +static void EvoSetOutputLutC5(NVDevEvoPtr pDevEvo, + NvU32 sd, + NvU32 head, + const NVFlipLutHwState *pOutputLut, + NvU32 fpNormScale, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + const NVDispEvoRec *pDispEvo = pDevEvo->pDispEvo[sd]; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvBool enableOutputLut = (pOutputLut->pLutSurfaceEvo != NULL); + NVSurfaceEvoPtr pLutSurfEvo = pOutputLut->pLutSurfaceEvo; + NVSurfaceDescriptor *pSurfaceDesc = + enableOutputLut ? &pLutSurfEvo->planes[0].surfaceDesc : NULL; + NvU64 offset = enableOutputLut ? pOutputLut->offset : offsetof(NVEvoLutDataRec, output); + NvBool isLutModeVss = enableOutputLut ? (pOutputLut->vssSegments != 0) : FALSE; + NvU32 lutSize = enableOutputLut ? pOutputLut->lutEntries : NV_NUM_EVO_LUT_ENTRIES; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NvBool disableOcsc0 = FALSE; + NvBool outputRoundingFix = nvkms_output_rounding_fix(); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + // XXX HDR TODO: Enable custom output LUTs with HDR + // XXX HDR TODO: Support other transfer functions + if (!pOutputLut->fromOverride && + (pHeadState->tf == NVKMS_OUTPUT_TF_PQ)) { + enableOutputLut = FALSE; + } + + nvSetupOutputLUT5(pDevEvo, + pHeadState, + enableOutputLut, + bypassComposition, + &pSurfaceDesc, + &lutSize, + &offset, + &disableOcsc0, + &fpNormScale, + &isLutModeVss); + + if (disableOcsc0) { + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OCSC0CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, DRF_DEF(C57D, _HEAD_SET_OCSC0CONTROL, _ENABLE, _DISABLE)); + + outputRoundingFix = FALSE; + } else { + /* Update status of output rounding fix. */ + EvoSetOCsc0C5(pDispEvo, head, &outputRoundingFix); + } + + /* Program the output LUT */ + nvAssert((offset & 0xff) == 0); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OLUT_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + ((isLutModeVss || !outputRoundingFix) ? + DRF_DEF(C57D, _HEAD_SET_OLUT_CONTROL, _INTERPOLATE, _ENABLE) : + DRF_DEF(C57D, _HEAD_SET_OLUT_CONTROL, _INTERPOLATE, _DISABLE)) | + DRF_DEF(C57D, _HEAD_SET_OLUT_CONTROL, _MIRROR, _DISABLE) | + (isLutModeVss ? DRF_DEF(C57D, _HEAD_SET_OLUT_CONTROL, _MODE, _SEGMENTED) : + DRF_DEF(C57D, _HEAD_SET_OLUT_CONTROL, _MODE, _DIRECT10)) | + DRF_NUM(C57D, _HEAD_SET_OLUT_CONTROL, _SIZE, NV_LUT_VSS_HEADER_SIZE + lutSize)); + + SetOLUTSurfaceAddress(pChannel, pSurfaceDesc, offset, head); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OLUT_FP_NORM_SCALE(head), 1); + nvDmaSetEvoMethodData(pChannel, fpNormScale); + + if (!disableOcsc0) { + /* only enable OCSC0 after enabling the OLUT */ + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OCSC0CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, DRF_DEF(C57D, _HEAD_SET_OCSC0CONTROL, _ENABLE, _ENABLE)); + } +} + +static NvBool QueryStereoPinC3(NVDevEvoPtr pDevEvo, + NVEvoSubDevPtr pEvoSubDev, + NvU32 *pStereoPin) +{ + NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS params = { }; + + params.base.subdeviceIndex = pEvoSubDev->subDeviceInstance; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NVC370_CTRL_CMD_GET_LOCKPINS_CAPS, + ¶ms, sizeof(params)) != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to query stereo pin"); + return FALSE; + } + + if ((params.stereoPin >= NV_EVO_NUM_LOCK_PIN_CAPS) || + (params.stereoPin == NVC370_CTRL_GET_LOCKPINS_CAPS_STEREO_PIN_NONE)) { + return FALSE; + } else { + *pStereoPin = params.stereoPin; + return TRUE; + } +} + +static void EvoParseCapabilityNotifier3(NVDevEvoPtr pDevEvo, + NVEvoSubDevPtr pEvoSubDev, + volatile const NvU32 *pCaps) +{ + NVEvoCapabilitiesPtr pEvoCaps = &pEvoSubDev->capabilities; + const NvU32 sysCap = nvEvoReadCapReg3(pCaps, NVC373_SYS_CAP); + const NvU32 sysCapB = nvEvoReadCapReg3(pCaps, NVC373_SYS_CAPB); + NvU32 i, stereoPin; + NvU32 layer; + + pDevEvo->caps.cursorCompositionCaps = + (struct NvKmsCompositionCapabilities) { + .supportedColorKeySelects = + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE), + + .colorKeySelect = { + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE] = { + .supportedBlendModes = { + [1] = NV_EVO3_SUPPORTED_CURSOR_COMP_BLEND_MODES, + }, + }, + } + }; + + for (layer = 0; + layer < ARRAY_LEN(pDevEvo->caps.layerCaps); layer++) { + pDevEvo->caps.layerCaps[layer].composition = + (struct NvKmsCompositionCapabilities) { + .supportedColorKeySelects = + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE) | + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC) | + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST), + + .colorKeySelect = { + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE] = { + .supportedBlendModes = { + [1] = NV_EVO3_SUPPORTED_WINDOW_COMP_BLEND_MODES, + }, + }, + + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC] = { + .supportedBlendModes = { + [0] = NV_EVO3_SUPPORTED_WINDOW_COMP_BLEND_MODES, + [1] = NV_EVO3_SUPPORTED_WINDOW_COMP_BLEND_MODES, + }, + }, + + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST] = { + .supportedBlendModes = { + [0] = NV_EVO3_SUPPORTED_WINDOW_COMP_BLEND_MODES, + [1] = NV_EVO3_SUPPORTED_WINDOW_COMP_BLEND_MODES, + }, + }, + }, + }; + } + + /* + * Previous EVO display implementations exposed capabilities for lock pins, + * detailing which pin(s) could be used for which functions. The idea was + * that it didn't make sense to try to drive a stereo pin with a fliplock + * signal (for example), so the pin associated with the stereo function was + * marked as stereo-capable but not any other function; attempting to use a + * non-stereo-capable pin for stereo or vice-versa would result in an error. + * + * With nvdisplay, the meaning of lock pins was changed such that they no + * longer have a shared namespace. So stereo lockpin 0 is not the same as + * fliplock lockpin 0 and neither is the same as scanlock lockpin 0. With + * this scheme, there is no way to specify a pin that is incapable of a + * given function, so the entire capabilities mechanism was removed. + * + * However, the pins chosen for HEAD_SET_CONTROL still need to match the + * pins selected for each function in the VBIOS DCB. Fliplock and scanlock + * query this information through + * NV5070_CTRL_CMD_GET_FRAMELOCK_HEADER_LOCKPINS. Stereo is handled + * here, using NVC370_CTRL_CMD_GET_LOCKPINS_CAPS. + */ + + for (i = 0; i < NV_EVO_NUM_LOCK_PIN_CAPS; i++) { + pEvoCaps->pin[i].flipLock = TRUE; + pEvoCaps->pin[i].scanLock = TRUE; + } + + if (QueryStereoPinC3(pDevEvo, pEvoSubDev, &stereoPin)) { + pEvoCaps->pin[stereoPin].stereo = TRUE; + } + + // Miscellaneous capabilities + // NVDisplay does not support interlaced modes. + pEvoCaps->misc.supportsInterlaced = FALSE; + +/* XXX temporary WAR; see bug 4028718 */ +#if !defined(NVC373_HEAD_CLK_CAP) +#define NVC373_HEAD_CLK_CAP(i) (0x5e8+(i)*4) /* RW-4A */ +#define NVC373_HEAD_CLK_CAP__SIZE_1 8 /* */ +#define NVC373_HEAD_CLK_CAP_PCLK_MAX 7:0 /* RWIUF */ +#define NVC373_HEAD_CLK_CAP_PCLK_MAX_INIT 0x00000085 /* RWI-V */ +#endif + + // Heads + ct_assert(ARRAY_LEN(pEvoCaps->head) >= NVC373_HEAD_CAPA__SIZE_1); + for (i = 0; i < NVC373_HEAD_CAPA__SIZE_1; i++) { + NVEvoHeadCaps *pHeadCaps = &pEvoCaps->head[i]; + + pHeadCaps->usable = + FLD_IDX_TEST_DRF(C373, _SYS_CAP, _HEAD_EXISTS, i, _YES, sysCap); + if (pHeadCaps->usable) { + pHeadCaps->maxPClkKHz = + DRF_VAL(C373, _HEAD_CLK_CAP, _PCLK_MAX, + nvEvoReadCapReg3(pCaps, NVC373_HEAD_CLK_CAP(i))) * 10000; + } + + } + + // SORs + ct_assert(ARRAY_LEN(pEvoCaps->sor) >= NVC373_SOR_CAP__SIZE_1); + for (i = 0; i < NVC373_SOR_CAP__SIZE_1; i++) { + NVEvoSorCaps *pSorCaps = &pEvoCaps->sor[i]; + + NvBool sorUsable = + FLD_IDX_TEST_DRF(C373, _SYS_CAP, _SOR_EXISTS, i, _YES, sysCap); + + /* XXXnvdisplay: add SOR caps: max DP clk, ... */ + if (sorUsable) { + const NvU32 sorCap = nvEvoReadCapReg3(pCaps, NVC373_SOR_CAP(i)); + pSorCaps->dualTMDS = + FLD_TEST_DRF(C373, _SOR_CAP, _DUAL_TMDS, _TRUE, sorCap); + + /* + * Assume that all SORs are equally capable, and that all SORs + * support HDMI FRL if the display class supports it. (If this + * assert fires, we may need to rework SOR assignment for such HDMI + * sinks.) + * + * Although HDMI_FRL is only defined for class C6, classes C3 and + * C5 don't use that bit in the SOR_CAP register so it should + * always be 0 on those chips. + */ + nvAssert(!!FLD_TEST_DRF(C673, _SOR_CAP, _HDMI_FRL, _TRUE, sorCap) == + !!pDevEvo->hal->caps.supportsHDMIFRL); + + pSorCaps->maxTMDSClkKHz = + DRF_VAL(C373, _SOR_CLK_CAP, _TMDS_MAX, + nvEvoReadCapReg3(pCaps, NVC373_SOR_CLK_CAP(i))) * 10000; + } + } + + // Don't need any PIOR caps currently. + + // Windows + ct_assert(ARRAY_LEN(pEvoCaps->window) >= NVC373_SYS_CAPB_WINDOW_EXISTS__SIZE_1); + for (i = 0; i < NVC373_SYS_CAPB_WINDOW_EXISTS__SIZE_1; i++) { + NVEvoWindowCaps *pWinCaps = &pEvoCaps->window[i]; + + pWinCaps->usable = + FLD_IDX_TEST_DRF(C373, _SYS_CAPB, _WINDOW_EXISTS, i, _YES, sysCapB); + } +} + +static void EvoParseCapabilityNotifierC3(NVDevEvoPtr pDevEvo, + NVEvoSubDevPtr pEvoSubDev, + volatile const NvU32 *pCaps) +{ + NVEvoCapabilitiesPtr pEvoCaps = &pEvoSubDev->capabilities; + NvU32 i; + + // Miscellaneous capabilities + pEvoCaps->misc.supportsSemiPlanar = FALSE; + pEvoCaps->misc.supportsPlanar = FALSE; + pEvoCaps->misc.supportsDSI = FALSE; + + // Heads + ct_assert(ARRAY_LEN(pEvoCaps->head) >= NVC373_HEAD_CAPA__SIZE_1); + for (i = 0; i < NVC373_HEAD_CAPA__SIZE_1; i++) { + NVEvoHeadCaps *pHeadCaps = &pEvoCaps->head[i]; + + /* XXXnvdisplay: add caps for hsat, ocsc, lut */ + if (pHeadCaps->usable) { + NVEvoScalerCaps *pScalerCaps = &pHeadCaps->scalerCaps; + + pScalerCaps->present = + FLD_TEST_DRF(C373, _HEAD_CAPA, _SCALER, _TRUE, + nvEvoReadCapReg3(pCaps, NVC373_HEAD_CAPA(i))); + if (pScalerCaps->present) { + NVEvoScalerTapsCaps *pTapsCaps; + NvU32 tmp; + + /* + * Note that some of these may be zero (e.g., only 2-tap 444 + * mode is supported on GV100, so the rest are all zero. + * + * Downscaling by more than 2x in either direction is not + * allowed by state error check for both horizontal and + * vertical 2-tap scaling. + * + * Downscaling by more than 4x in either direction is not + * allowed by argument error check (and state error check) for + * 5-tap scaling. + * + * 5-tap scaling is not implemented on GV100, though, so we + * should never see numTaps == 5 on GV100, and we can just use a + * max of 2 here all the time. + */ + + /* 2-tap capabilities */ + tmp = nvEvoReadCapReg3(pCaps, NVC373_HEAD_CAPD(i)); + pTapsCaps = &pScalerCaps->taps[NV_EVO_SCALER_2TAPS]; + pTapsCaps->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + pTapsCaps->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + pTapsCaps->maxPixelsVTaps = + NV_MAX(DRF_VAL(C373, _HEAD_CAPD, _MAX_PIXELS_2TAP422, tmp), + DRF_VAL(C373, _HEAD_CAPD, _MAX_PIXELS_2TAP444, tmp)); + + /* + * Note that there is a capability register for 1TAP, but there + * doesn't appear to be a way to select 1-tap scaling in the + * channel methods, so don't bother reading it for now. + */ + } + } + } +} + +static void EvoParsePrecompScalerCaps5(NVEvoCapabilitiesPtr pEvoCaps, + volatile const NvU32 *pCaps) +{ + int i; + + for (i = 0; i < NVC573_SYS_CAPB_WINDOW_EXISTS__SIZE_1; i++) { + NVEvoWindowCaps *pWinCaps = &pEvoCaps->window[i]; + NVEvoScalerCaps *pScalerCaps = &pWinCaps->scalerCaps; + NVEvoScalerTapsCaps *pTapsCaps; + NvU32 capA = nvEvoReadCapReg3(pCaps, NVC573_PRECOMP_WIN_PIPE_HDR_CAPA(i)); + NvU32 capD, capF; + + pScalerCaps->present = + FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPA, _SCLR_PRESENT, + _TRUE, capA); + if (pScalerCaps->present) { + capD = nvEvoReadCapReg3(pCaps, NVC573_PRECOMP_WIN_PIPE_HDR_CAPD(i)); + capF = nvEvoReadCapReg3(pCaps, NVC573_PRECOMP_WIN_PIPE_HDR_CAPF(i)); + + /* 5-tap capabilities */ + pTapsCaps = &pScalerCaps->taps[NV_EVO_SCALER_5TAPS]; + if (FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPD, + _SCLR_VS_MAX_SCALE_FACTOR, _4X, capD)) { + pTapsCaps->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_4X; + } else { + pTapsCaps->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + } + + if (FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPD, + _SCLR_HS_MAX_SCALE_FACTOR, _4X, capD)) { + pTapsCaps->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_4X; + } else { + pTapsCaps->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + } + + pTapsCaps->maxPixelsVTaps = + DRF_VAL(C573, _PRECOMP_WIN_PIPE_HDR_CAPF, + _VSCLR_MAX_PIXELS_5TAP, capF); + + /* 2-tap capabilities */ + pTapsCaps = &pScalerCaps->taps[NV_EVO_SCALER_2TAPS]; + pTapsCaps->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + pTapsCaps->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + pTapsCaps->maxPixelsVTaps = + DRF_VAL(C573, _PRECOMP_WIN_PIPE_HDR_CAPF, _VSCLR_MAX_PIXELS_2TAP, + capF); + } + } +} + +static void EvoParseCapabilityNotifierC5C6Common(NVEvoCapabilitiesPtr pEvoCaps, + volatile const NvU32 *pCaps) +{ + NvU32 i; + NvBool postcompScalingSupported = FALSE; + + // Heads + ct_assert(ARRAY_LEN(pEvoCaps->head) >= NVC573_SYS_CAP_HEAD_EXISTS__SIZE_1); + for (i = 0; i < NVC573_SYS_CAP_HEAD_EXISTS__SIZE_1; i++) { + NVEvoHeadCaps *pHeadCaps = &pEvoCaps->head[i]; + + if (pHeadCaps->usable) { + NVEvoScalerCaps *pScalerCaps = &pHeadCaps->scalerCaps; + NVEvoScalerTapsCaps *pTapsCaps; + NvU32 capA = nvEvoReadCapReg3(pCaps, NVC573_POSTCOMP_HEAD_HDR_CAPA(i)); + NvU32 capC, capD; + + pScalerCaps->present = + FLD_TEST_DRF(C573, _POSTCOMP_HEAD_HDR_CAPA, _SCLR_PRESENT, + _TRUE, capA); + if (pScalerCaps->present) { + postcompScalingSupported = TRUE; + + capC = nvEvoReadCapReg3(pCaps, NVC573_POSTCOMP_HEAD_HDR_CAPC(i)); + capD = nvEvoReadCapReg3(pCaps, NVC573_POSTCOMP_HEAD_HDR_CAPD(i)); + + /* + * Note that some of these may be zero. + * + * XXXnvdisplay: what about POSTCOMP_HEAD_HDR_CAPC_SCLR_*? + */ + + /* 5-tap capabilities */ + pTapsCaps = &pScalerCaps->taps[NV_EVO_SCALER_5TAPS]; + if (FLD_TEST_DRF(C573, _POSTCOMP_HEAD_HDR_CAPC, + _SCLR_VS_MAX_SCALE_FACTOR, _4X, capC)) { + pTapsCaps->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_4X; + } else { + pTapsCaps->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + } + + if (FLD_TEST_DRF(C573, _POSTCOMP_HEAD_HDR_CAPC, + _SCLR_HS_MAX_SCALE_FACTOR, _4X, capC)) { + pTapsCaps->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_4X; + } else { + pTapsCaps->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + } + + pTapsCaps->maxPixelsVTaps = + DRF_VAL(C573, _POSTCOMP_HEAD_HDR_CAPD, + _VSCLR_MAX_PIXELS_5TAP, capD); + + /* 2-tap capabilities */ + pTapsCaps = &pScalerCaps->taps[NV_EVO_SCALER_2TAPS]; + pTapsCaps->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + pTapsCaps->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + pTapsCaps->maxPixelsVTaps = + DRF_VAL(C573, _POSTCOMP_HEAD_HDR_CAPD, + _VSCLR_MAX_PIXELS_2TAP, capD); + } + +#if defined(NV_DEBUG) + NvU32 capA = nvEvoReadCapReg3(pCaps, NVC573_POSTCOMP_HEAD_HDR_CAPA(i)); + NvU32 unitWidth = DRF_VAL(C573, _POSTCOMP_HEAD_HDR_CAPA, _UNIT_WIDTH, capA); + + // EvoInitChannelC5 assumes 16-bit fixed-point. + nvAssert(unitWidth == 16); +#endif + } + } + + for (i = 0; i < NVC573_SYS_CAPB_WINDOW_EXISTS__SIZE_1; i++) { + NVEvoWindowCaps *pWinCaps = &pEvoCaps->window[i]; + NvU32 capA = nvEvoReadCapReg3(pCaps, NVC573_PRECOMP_WIN_PIPE_HDR_CAPA(i)); + + pWinCaps->tmoPresent = FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPA, + _TMO_PRESENT, _TRUE, capA); + + pWinCaps->csc0MatricesPresent = + FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPA, + _CSC00_PRESENT, _TRUE, capA) && + FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPA, + _CSC01_PRESENT, _TRUE, capA); + + pWinCaps->csc10MatrixPresent = + FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPA, + _CSC10_PRESENT, _TRUE, capA); + pWinCaps->csc11MatrixPresent = + FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPA, + _CSC11_PRESENT, _TRUE, capA); + + pWinCaps->cscLUTsPresent = + FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPA, + _CSC0LUT_PRESENT, _TRUE, capA) && + FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPA, + _CSC1LUT_PRESENT, _TRUE, capA); + + nvAssert(!pWinCaps->tmoPresent || + (pWinCaps->csc0MatricesPresent && + pWinCaps->csc10MatrixPresent && + pWinCaps->csc11MatrixPresent && + pWinCaps->cscLUTsPresent)); + } + + /* + * To keep the design simple, NVKMS will expose support for precomp scaling + * iff postcomp scaling isn't supported. This means that on chips which have + * both precomp and postcomp scalers (e.g., Turing), NVKMS will only report + * that postcomp scaling is supported. + */ + if (!postcompScalingSupported) { + EvoParsePrecompScalerCaps5(pEvoCaps, pCaps); + } + + // XXXnvdisplay3: add SOR caps for DP over USB +} + +static void EvoParseCapabilityNotifierC5(NVDevEvoPtr pDevEvo, + NVEvoSubDevPtr pEvoSubDev, + volatile const NvU32 *pCaps) +{ + NVEvoCapabilitiesPtr pEvoCaps = &pEvoSubDev->capabilities; + + // Miscellaneous capabilities + + /* + * On Turing, the NVC573_IHUB_COMMON_CAPA_SUPPORT_PLANAR bit actually + * reports whether IHUB supports YUV _semi-planar_ formats. + */ + pEvoCaps->misc.supportsSemiPlanar = + FLD_TEST_DRF(C573, _IHUB_COMMON_CAPA, _SUPPORT_PLANAR, _TRUE, + nvEvoReadCapReg3(pCaps, NVC573_IHUB_COMMON_CAPA)); + pEvoCaps->misc.supportsDSI = FALSE; + + EvoParseCapabilityNotifierC5C6Common(pEvoCaps, pCaps); +} + +void nvEvoParseCapabilityNotifier6(NVDevEvoPtr pDevEvo, + NVEvoSubDevPtr pEvoSubDev, + volatile const NvU32 *pCaps) +{ + NVEvoCapabilitiesPtr pEvoCaps = &pEvoSubDev->capabilities; + NvU32 capC = nvEvoReadCapReg3(pCaps, NVC673_IHUB_COMMON_CAPC); + NvU32 i; + + // Miscellaneous capabilities + + pEvoCaps->misc.supportsPlanar = + FLD_TEST_DRF(C673, _IHUB_COMMON_CAPA, _SUPPORT_PLANAR, _TRUE, + nvEvoReadCapReg3(pCaps, NVC673_IHUB_COMMON_CAPA)); + + pEvoCaps->misc.supportsSemiPlanar = + FLD_TEST_DRF(C673, _IHUB_COMMON_CAPC, _SUPPORT_SEMI_PLANAR, _TRUE, capC); + + pEvoCaps->misc.supportsHVFlip = + FLD_TEST_DRF(C673, _IHUB_COMMON_CAPC, _SUPPORT_HOR_VER_FLIP, _TRUE, capC); + + ct_assert(ARRAY_LEN(pEvoCaps->head) >= NVC673_SYS_CAP_HEAD_EXISTS__SIZE_1); + + // DSI is currently supported on just Orin, which has only 1 DSI engine (DSI0). + pEvoCaps->misc.supportsDSI = + FLD_TEST_DRF(C673, _SYS_CAP, _DSI0_EXISTS, _YES, + nvEvoReadCapReg3(pCaps, NVC673_SYS_CAP)); + + for (i = 0; i < NVC673_SYS_CAP_HEAD_EXISTS__SIZE_1; i++) { + NVEvoHeadCaps *pHeadCaps = &pEvoCaps->head[i]; + + if (pHeadCaps->usable) { + NvU32 capA = nvEvoReadCapReg3(pCaps, NVC673_POSTCOMP_HEAD_HDR_CAPA(i)); + NvBool hclpfPresent = + FLD_TEST_DRF(C673, _POSTCOMP_HEAD_HDR_CAPA, _HCLPF_PRESENT, + _TRUE, capA); + NvBool vfilterPresent = + FLD_TEST_DRF(C673, _POSTCOMP_HEAD_HDR_CAPA, _VFILTER_PRESENT, + _TRUE, capA); + + pHeadCaps->supportsHDMIYUV420HW = hclpfPresent && vfilterPresent; + } + } + + EvoParseCapabilityNotifierC5C6Common(pEvoCaps, pCaps); +} + +static NvU32 UsableWindowCount(const NVEvoCapabilities *pEvoCaps) +{ + NvU32 i, count = 0; + + for (i = 0; i < ARRAY_LEN(pEvoCaps->window); i++) { + if (pEvoCaps->window[i].usable) { + count++; + /* + * We expect usable windows to be contiguous and start at 0. + * Check that the number of usable windows matches the + * number of loop iterations. + */ + nvAssert(count == (i + 1)); + } + } + + return count; +} + +static void FillLUTCaps(struct NvKmsLUTCaps *pCaps, + NvBool supported, + enum NvKmsLUTVssSupport vssSupport, + enum NvKmsLUTVssType vssType, + NvU32 vssSegments, + NvU32 lutEntries, + enum NvKmsLUTFormat entryFormat) +{ + pCaps->supported = supported; + pCaps->vssSupport = supported ? vssSupport : NVKMS_LUT_VSS_NOT_SUPPORTED; + pCaps->vssType = supported ? vssType : NVKMS_LUT_VSS_TYPE_NONE; + pCaps->vssSegments = supported ? vssSegments : 0; + pCaps->lutEntries = supported ? lutEntries : 0; + pCaps->entryFormat = supported ? entryFormat : NVKMS_LUT_FORMAT_UNORM14_WAR_813188; +} + +static void SetHDRLayerCaps(NVDevEvoPtr pDevEvo) +{ + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[0]; + NVEvoCapabilitiesPtr pEvoCaps = &pEvoSubDev->capabilities; + NvU32 win; +#if defined(DEBUG) + NvBool hdrLayerCapSet[NVKMS_MAX_LAYERS_PER_HEAD] = {FALSE}; +#endif + NvU32 numLayers[NVKMS_MAX_HEADS_PER_DISP] = {0}; + + /* + * XXX HDR: This assumes the window => layer mapping from + * nvAllocCoreChannelEvo(). + * + * TODO: Rework API to explicitly factor in window => layer mapping. + */ + for (win = 0; win < pDevEvo->numWindows; win++) { + NVEvoWindowCaps *pWinCaps = &pEvoCaps->window[win]; + const NvU32 head = pDevEvo->headForWindow[win]; + + if (head == NV_INVALID_HEAD) { + continue; + } + + /* + * XXX HDR: layerCaps assumes that every head has layers with the + * same capabilities. + * + * TODO: Rework API for per-head layerCaps if this assert trips. + */ + nvAssert(!hdrLayerCapSet[numLayers[head]] || + (pDevEvo->caps.layerCaps[numLayers[head]].supportsICtCp == + pWinCaps->tmoPresent)); + + pDevEvo->caps.layerCaps[numLayers[head]].supportsICtCp = pWinCaps->tmoPresent; + + if (pDevEvo->hal->caps.needDefaultLutSurface) { + /* Turing+ uses an FP16, linear 64-segment VSS supported ILUT */ + FillLUTCaps(&pDevEvo->caps.layerCaps[numLayers[head]].ilut, TRUE, + NVKMS_LUT_VSS_SUPPORTED, NVKMS_LUT_VSS_TYPE_LINEAR, + 64, 1025, NVKMS_LUT_FORMAT_FP16); + } else { + /* Volta uses a UNORM14_WAR_813188, non-VSS ILUT */ + FillLUTCaps(&pDevEvo->caps.layerCaps[numLayers[head]].ilut, TRUE, + NVKMS_LUT_VSS_NOT_SUPPORTED, NVKMS_LUT_VSS_TYPE_NONE, + 0, 1025, NVKMS_LUT_FORMAT_UNORM14_WAR_813188); + } + + if (pWinCaps->tmoPresent) { + FillLUTCaps(&pDevEvo->caps.layerCaps[numLayers[head]].tmo, TRUE, + NVKMS_LUT_VSS_REQUIRED, NVKMS_LUT_VSS_TYPE_LINEAR, + 64, 1025, NVKMS_LUT_FORMAT_UNORM16); + + } else { + FillLUTCaps(&pDevEvo->caps.layerCaps[numLayers[head]].tmo, FALSE, + NVKMS_LUT_VSS_NOT_SUPPORTED, NVKMS_LUT_VSS_TYPE_NONE, + 0, 0, NVKMS_LUT_FORMAT_UNORM14_WAR_813188); + } + +#if defined(DEBUG) + hdrLayerCapSet[numLayers[head]] = TRUE; +#endif + + numLayers[head]++; + } + + if (pDevEvo->hal->caps.hasUnorm16OLUT) { + /* Turing+ uses a UNORM16, logarithmic 33-segment VSS supported OLUT */ + FillLUTCaps(&pDevEvo->caps.olut, TRUE, + NVKMS_LUT_VSS_SUPPORTED, NVKMS_LUT_VSS_TYPE_LOGARITHMIC, + 33, 1025, NVKMS_LUT_FORMAT_UNORM16); + } else { + /* Volta uses a UNORM14_WAR_813188, non-VSS OLUT */ + FillLUTCaps(&pDevEvo->caps.olut, TRUE, + NVKMS_LUT_VSS_NOT_SUPPORTED, NVKMS_LUT_VSS_TYPE_NONE, + 0, 1025, NVKMS_LUT_FORMAT_UNORM14_WAR_813188); + } +} + +NvBool nvEvoGetCapabilities3(NVDevEvoPtr pDevEvo, + NVEvoParseCapabilityNotifierFunc3 *pParse, + NVEvoHwFormatFromKmsFormatFunc3 *pGetHwFmt, + NvU32 hwclass, + size_t length) +{ + NvU32 capsHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + NVDispEvoPtr pDispEvo; + NvU32 sd; + NvU32 status; + NvBool ret = FALSE; + NvBool first = TRUE; + NvBool supportsSemiPlanar = TRUE; + NvBool supportsPlanar = TRUE; + NvBool supportsHVFlip = TRUE; + unsigned int i; + enum NvKmsRotation curRotation; + NvBool reflectionX; + NvBool reflectionY; + NvU8 layer; + + /* With nvdisplay, capabilities are exposed in a separate object. */ + status = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + capsHandle, + hwclass, NULL); + if (status != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to allocate caps object"); + goto free_handle; + } + + for (layer = 0; + layer < ARRAY_LEN(pDevEvo->caps.layerCaps); + layer++) { + pDevEvo->caps.layerCaps[layer].supportsWindowMode = TRUE; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + void *ptr; + + status = nvRmApiMapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + capsHandle, + 0, + length, + &ptr, + 0); + if (status != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to map caps memory"); + goto free_object; + } + + nvkms_memset(&pEvoSubDev->capabilities, 0, + sizeof(pEvoSubDev->capabilities)); + + EvoParseCapabilityNotifier3(pDevEvo, pEvoSubDev, ptr); + pParse(pDevEvo, pEvoSubDev, ptr); + + status = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + capsHandle, ptr, 0); + if (status != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to unmap caps memory"); + } + + if (first) { + pDevEvo->numWindows = + UsableWindowCount(&pEvoSubDev->capabilities); + first = FALSE; + } else { + /* Assert that each subdevice has the same number of windows. */ + nvAssert(pDevEvo->numWindows == + UsableWindowCount(&pEvoSubDev->capabilities)); + } + + /* + * Expose YUV semi-planar iff all of the disps belonging to pDevEvo + * support it. + */ + supportsSemiPlanar &= + pEvoSubDev->capabilities.misc.supportsSemiPlanar; + + /* + * Expose YUV planar iff all of the disps belonging to pDevEvo + * support it. + */ + supportsPlanar &= + pEvoSubDev->capabilities.misc.supportsPlanar; + + supportsHVFlip &= + pEvoSubDev->capabilities.misc.supportsHVFlip; + } + + SetHDRLayerCaps(pDevEvo); + + for (i = NvKmsSurfaceMemoryFormatMin; + i <= NvKmsSurfaceMemoryFormatMax; + i++) { + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(i); + + if ((pFormatInfo->numPlanes == 2 && !supportsSemiPlanar) || + (pFormatInfo->numPlanes == 3 && !supportsPlanar)) { + continue; + } + + if (pGetHwFmt(i) != 0) { + NvU8 layer; + + for (layer = 0; + layer < ARRAY_LEN(pDevEvo->caps.layerCaps); + layer++) { + pDevEvo->caps.layerCaps[layer].supportedSurfaceMemoryFormats |= + NVBIT64(i); + } + } + } + + for (reflectionX = FALSE; + reflectionX <= TRUE; + reflectionX++) { + + for (reflectionY = FALSE; + reflectionY <= TRUE; + reflectionY++) { + + for (curRotation = NVKMS_ROTATION_MIN; + curRotation <= NVKMS_ROTATION_MAX; + curRotation++) { + struct NvKmsRRParams rrParams = { curRotation, + reflectionX, + reflectionY }; + NvU8 bitPosition; + + if ((reflectionX || reflectionY) && !supportsHVFlip) { + continue; + } + + if (curRotation == NVKMS_ROTATION_180 && !supportsHVFlip) { + continue; + } + + /* + * Skipping over rotations by 90 and 270 degrees + * because these rotations require support for + * SCAN_COLUMN rotation, which hasn't been added + * to NVKMS yet. + */ + if (curRotation == NVKMS_ROTATION_90 || + curRotation == NVKMS_ROTATION_270) { + continue; + } + + bitPosition = NvKmsRRParamsToCapBit(&rrParams); + pDevEvo->caps.validLayerRRTransforms |= NVBIT(bitPosition); + } + } + } + + ret = TRUE; + +free_object: + status = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + capsHandle); + if (status != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to free caps object"); + } + +free_handle: + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, capsHandle); + + return ret; +} + +static NvBool EvoGetCapabilitiesC3(NVDevEvoPtr pDevEvo) +{ + return nvEvoGetCapabilities3(pDevEvo, EvoParseCapabilityNotifierC3, + nvHwFormatFromKmsFormatC3, + NVC373_DISP_CAPABILITIES, + sizeof(_NvC373DispCapabilities)); +} + +static NvBool EvoGetCapabilitiesC5(NVDevEvoPtr pDevEvo) +{ + return nvEvoGetCapabilities3(pDevEvo, EvoParseCapabilityNotifierC5, + nvHwFormatFromKmsFormatC5, + NVC573_DISP_CAPABILITIES, + sizeof(_NvC573DispCapabilities)); +} + +NvBool nvEvoGetCapabilitiesC6(NVDevEvoPtr pDevEvo) +{ + return nvEvoGetCapabilities3(pDevEvo, + nvEvoParseCapabilityNotifier6, + nvHwFormatFromKmsFormatC6, + NVC673_DISP_CAPABILITIES, + sizeof(_NvC673DispCapabilities)); +} + +static void EvoSetViewportPointInC3(NVDevEvoPtr pDevEvo, const int head, + NvU16 x, NvU16 y, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* Set the input viewport point */ + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_VIEWPORT_POINT_IN(head), 1); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C37D, _HEAD_SET_VIEWPORT_POINT_IN, _X, x) | + DRF_NUM(C37D, _HEAD_SET_VIEWPORT_POINT_IN, _Y, y)); + /* XXXnvdisplay set ViewportValidPointIn to configure overfetch */ +} + +static void EvoSetOutputScalerC3(const NVDispEvoRec *pDispEvo, const NvU32 head, + const NvU32 imageSharpeningValue, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVHwModeViewPortEvo *pViewPort = &pHeadState->timings.viewPort; + + /* These methods should only apply to a single pDpyEvo */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + NvU32 vTaps = pViewPort->vTaps > NV_EVO_SCALER_2TAPS ? + NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 : + NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2; + NvU32 hTaps = pViewPort->hTaps > NV_EVO_SCALER_2TAPS ? + NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 : + NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2; + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTROL_OUTPUT_SCALER, _VERTICAL_TAPS, vTaps) | + DRF_NUM(C37D, _HEAD_SET_CONTROL_OUTPUT_SCALER, _HORIZONTAL_TAPS, hTaps)); +} + +static NvBool EvoSetViewportInOut3(NVDevEvoPtr pDevEvo, const int head, + const NVHwModeViewPortEvo *pViewPortMin, + const NVHwModeViewPortEvo *pViewPort, + const NVHwModeViewPortEvo *pViewPortMax, + NVEvoUpdateState *updateState, + NvU32 setWindowUsageBounds) +{ + const NVEvoCapabilitiesPtr pEvoCaps = &pDevEvo->gpus[0].capabilities; + NVEvoChannelPtr pChannel = pDevEvo->core; + struct NvKmsScalingUsageBounds scalingUsageBounds = { }; + NvU32 win; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* The input viewport shouldn't vary. */ + nvAssert(pViewPortMin->in.width == pViewPort->in.width); + nvAssert(pViewPortMax->in.width == pViewPort->in.width); + nvAssert(pViewPortMin->in.height == pViewPort->in.height); + nvAssert(pViewPortMax->in.height == pViewPort->in.height); + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_VIEWPORT_SIZE_IN(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_VIEWPORT_SIZE_IN, _WIDTH, pViewPort->in.width) | + DRF_NUM(C37D, _HEAD_SET_VIEWPORT_SIZE_IN, _HEIGHT, pViewPort->in.height)); + /* XXXnvdisplay set ViewportValidSizeIn to configure overfetch */ + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_VIEWPORT_POINT_OUT, _ADJUST_X, pViewPort->out.xAdjust) | + DRF_NUM(C37D, _HEAD_SET_VIEWPORT_POINT_OUT, _ADJUST_Y, pViewPort->out.yAdjust)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_VIEWPORT_SIZE_OUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_VIEWPORT_SIZE_OUT, _WIDTH, pViewPort->out.width) | + DRF_NUM(C37D, _HEAD_SET_VIEWPORT_SIZE_OUT, _HEIGHT, pViewPort->out.height)); + + /* XXXnvdisplay deal with pViewPortMin, pViewPortMax */ + + if (!nvComputeScalingUsageBounds(&pEvoCaps->head[head].scalerCaps, + pViewPort->in.width, pViewPort->in.height, + pViewPort->out.width, pViewPort->out.height, + pViewPort->hTaps, pViewPort->vTaps, + &scalingUsageBounds)) { + /* Should have been rejected by validation */ + nvAssert(!"Attempt to program invalid viewport"); + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_MAX_OUTPUT_SCALE_FACTOR, _HORIZONTAL, + scalingUsageBounds.maxHDownscaleFactor) | + DRF_NUM(C37D, _HEAD_SET_MAX_OUTPUT_SCALE_FACTOR, _VERTICAL, + scalingUsageBounds.maxVDownscaleFactor)); + + /* + * Program MAX_PIXELS_FETCHED_PER_LINE window usage bounds + * for each window that is attached to the head. + * + * Precomp will clip the post-scaled window to the input viewport, reverse-scale + * this cropped size back to the input surface domain, and isohub will fetch + * this cropped size. This function assumes that there's no window scaling yet, + * so the MAX_PIXELS_FETCHED_PER_LINE will be bounded by the input viewport + * width. SetScalingUsageBoundsOneWindow5() will take care of updating + * MAX_PIXELS_FETCHED_PER_LINE, if window scaling is enabled later. + * + * Program MAX_PIXELS_FETCHED_PER_LINE for each window that is attached to + * head. For Turing+, SetScalingUsageBoundsOneWindow5() will take care of + * programming window usage bounds only for the layers/windows in use. + */ + setWindowUsageBounds |= + DRF_NUM(C37D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _MAX_PIXELS_FETCHED_PER_LINE, + nvGetMaxPixelsFetchedPerLine(pViewPort->in.width, + NV_EVO_SCALE_FACTOR_1X)); + + for (win = 0; win < pDevEvo->numWindows; win++) { + if (head != pDevEvo->headForWindow[win]) { + continue; + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS(win), 1); + nvDmaSetEvoMethodData(pChannel, setWindowUsageBounds); + } + + return scalingUsageBounds.vUpscalingAllowed; +} + +static void EvoSetViewportInOutC3(NVDevEvoPtr pDevEvo, const int head, + const NVHwModeViewPortEvo *pViewPortMin, + const NVHwModeViewPortEvo *pViewPort, + const NVHwModeViewPortEvo *pViewPortMax, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvBool verticalUpscalingAllowed = + EvoSetViewportInOut3(pDevEvo, head, pViewPortMin, pViewPort, + pViewPortMax, updateState, + NV_EVO3_DEFAULT_WINDOW_USAGE_BOUNDS_C3); + + nvDmaSetStartEvoMethod(pChannel, + NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_HEAD_USAGE_BOUNDS, _CURSOR, _USAGE_W256_H256) | + DRF_DEF(C37D, _HEAD_SET_HEAD_USAGE_BOUNDS, _OUTPUT_LUT, _USAGE_1025) | + (verticalUpscalingAllowed ? + DRF_DEF(C37D, _HEAD_SET_HEAD_USAGE_BOUNDS, _UPSCALING_ALLOWED, _TRUE) : + DRF_DEF(C37D, _HEAD_SET_HEAD_USAGE_BOUNDS, _UPSCALING_ALLOWED, _FALSE))); +} + +static void EvoSetViewportInOutC5(NVDevEvoPtr pDevEvo, const int head, + const NVHwModeViewPortEvo *pViewPortMin, + const NVHwModeViewPortEvo *pViewPort, + const NVHwModeViewPortEvo *pViewPortMax, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 setWindowUsageBounds = + (NV_EVO3_DEFAULT_WINDOW_USAGE_BOUNDS_C5 | + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _INPUT_SCALER_TAPS, _TAPS_2) | + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _UPSCALING_ALLOWED, _FALSE)); + NvU32 verticalUpscalingAllowed = + EvoSetViewportInOut3(pDevEvo, head, pViewPortMin, pViewPort, + pViewPortMax, updateState, setWindowUsageBounds); + + nvDmaSetStartEvoMethod(pChannel, + NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_HEAD_USAGE_BOUNDS, _CURSOR, _USAGE_W256_H256) | + DRF_DEF(C57D, _HEAD_SET_HEAD_USAGE_BOUNDS, _OLUT_ALLOWED, _TRUE) | + /* Despite the generic name of this field, it's specific to vertical taps. */ + (pViewPort->vTaps > NV_EVO_SCALER_2TAPS ? + DRF_DEF(C57D, _HEAD_SET_HEAD_USAGE_BOUNDS, _OUTPUT_SCALER_TAPS, _TAPS_5) : + DRF_DEF(C57D, _HEAD_SET_HEAD_USAGE_BOUNDS, _OUTPUT_SCALER_TAPS, _TAPS_2)) | + (verticalUpscalingAllowed ? + DRF_DEF(C57D, _HEAD_SET_HEAD_USAGE_BOUNDS, _UPSCALING_ALLOWED, _TRUE) : + DRF_DEF(C57D, _HEAD_SET_HEAD_USAGE_BOUNDS, _UPSCALING_ALLOWED, _FALSE))); +} + +/*! + * Compute the C37D_HEAD_SET_CONTROL_CURSOR method value. + * + * This function also validates that the given NVSurfaceEvoRec can be + * used as a cursor image. + + * + * \param[in] pDevEvo The device on which the cursor will be programmed. + * \param[in] pSurfaceEvo The surface to be used as the cursor image. + * \param[out] pValue The C37D_HEAD_SET_CONTROL_CURSOR method value. + + * \return If TRUE, the surface can be used as a cursor image, and + * pValue contains the method value. If FALSE, the surface + * cannot be used as a cursor image. + */ +NvBool nvEvoGetHeadSetControlCursorValueC3(const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo, + NvU32 *pValue) +{ + NvU32 plane, numPlanes; + NvU64 minRequiredSize = 0; + NvU32 value = 0; + + if (pSurfaceEvo == NULL) { + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _ENABLE, _DISABLE); + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _FORMAT, _A8R8G8B8); + goto done; + } else { + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _ENABLE, _ENABLE); + } + + /* The cursor must always be pitch. */ + + if (pSurfaceEvo->layout != NvKmsSurfaceMemoryLayoutPitch) { + return FALSE; + } + + /* + * The only supported cursor image memory format is A8R8G8B8. + */ + if (pSurfaceEvo->format == NvKmsSurfaceMemoryFormatA8R8G8B8) { + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _FORMAT, _A8R8G8B8); + } else { + return FALSE; + } + + numPlanes = nvKmsGetSurfaceMemoryFormatInfo(pSurfaceEvo->format)->numPlanes; + + /* + * The cursor only supports a few image sizes. + * + * Compute minRequiredSize as widthInPixels x heightInPixels x 4 bytes per + * pixel, except for 32x32: we require a minimum pitch of 256, so we use + * that instead of widthInPixels x 4. + */ + if ((pSurfaceEvo->widthInPixels == 32) && + (pSurfaceEvo->heightInPixels == 32)) { + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W32_H32); + minRequiredSize = 256 * 32; + } else if ((pSurfaceEvo->widthInPixels == 64) && + (pSurfaceEvo->heightInPixels == 64)) { + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W64_H64); + minRequiredSize = 64 * 64 * 4; + } else if ((pDevEvo->cursorHal->caps.maxSize >= 128) && + (pSurfaceEvo->widthInPixels == 128) && + (pSurfaceEvo->heightInPixels == 128)) { + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W128_H128); + minRequiredSize = 128 * 128 * 4; + } else if ((pDevEvo->cursorHal->caps.maxSize >= 256) && + (pSurfaceEvo->widthInPixels == 256) && + (pSurfaceEvo->heightInPixels == 256)) { + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W256_H256); + minRequiredSize = 256 * 256 * 4; + } else { + return FALSE; + } + + /* The surface size cannot be smaller than the required minimum. */ + + for (plane = 0; plane < numPlanes; plane++) { + if (pSurfaceEvo->planes[plane].rmObjectSizeInBytes < minRequiredSize) { + return FALSE; + } + } + + /* + * Hard code the cursor hotspot. + */ + value |= DRF_NUM(C37D, _HEAD_SET_CONTROL_CURSOR, _HOT_SPOT_Y, 0); + value |= DRF_NUM(C37D, _HEAD_SET_CONTROL_CURSOR, _HOT_SPOT_X, 0); + + // XXXnvdisplay: Add support for cursor de-gamma. + +done: + + if (pValue != NULL) { + *pValue = value; + } + + return TRUE; +} + +static void SetCursorSurfaceAddress( + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 offset, + NvU32 head) +{ + NvU32 ctxDmaHandle = pSurfaceDesc ? pSurfaceDesc->ctxDmaHandle : 0; + + nvAssert(!pSurfaceDesc || ctxDmaHandle); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTEXT_DMA_CURSOR(head, 0), 4); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTEXT_DMA_CURSOR, _HANDLE, ctxDmaHandle)); + + // Always set the right cursor context DMA. + // HW will just ignore this if it is not in stereo cursor mode. + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTEXT_DMA_CURSOR, _HANDLE, ctxDmaHandle)); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_OFFSET_CURSOR, _ORIGIN, + nvCtxDmaOffsetFromBytes(offset))); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_OFFSET_CURSOR, _ORIGIN, + nvCtxDmaOffsetFromBytes(offset))); +} + +static void EvoSetCursorImageC3(NVDevEvoPtr pDevEvo, const int head, + const NVSurfaceEvoRec *pSurfaceEvo, + NVEvoUpdateState *updateState, + const struct NvKmsCompositionParams *pCursorCompParams) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVSurfaceDescriptor *pSurfaceDesc = + pSurfaceEvo ? &pSurfaceEvo->planes[0].surfaceDesc : NULL; + const NvU64 offset = pSurfaceEvo ? pSurfaceEvo->planes[0].offset : 0; + NvU32 headSetControlCursorValue = 0; + NvBool ret; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + nvAssert(pCursorCompParams->colorKeySelect == + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE); + nvAssert(NVBIT(pCursorCompParams->blendingMode[1]) & + NV_EVO3_SUPPORTED_CURSOR_COMP_BLEND_MODES); + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + ret = nvEvoGetHeadSetControlCursorValueC3(pDevEvo, pSurfaceEvo, + &headSetControlCursorValue); + /* + * The caller should have already validated the surface, so there + * shouldn't be a failure. + */ + if (!ret) { + nvAssert(!"Could not construct HEAD_SET_CONTROL_CURSOR value"); + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_PRESENT_CONTROL_CURSOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_PRESENT_CONTROL_CURSOR, _MODE, _MONO)); + + SetCursorSurfaceAddress(pChannel, pSurfaceDesc, offset, head); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTROL_CURSOR(head), 1); + nvDmaSetEvoMethodData(pChannel, headSetControlCursorValue); + + nvDmaSetStartEvoMethod(pChannel, + NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(head), 1); + switch (pCursorCompParams->blendingMode[1]) { + case NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE: + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _K1, 255) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _CURSOR_COLOR_FACTOR_SELECT, _K1) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _VIEWPORT_COLOR_FACTOR_SELECT, _ZERO) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _MODE, _BLEND)); + break; + case NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA: + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _K1, 255) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _CURSOR_COLOR_FACTOR_SELECT, _K1_TIMES_SRC) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _VIEWPORT_COLOR_FACTOR_SELECT, _NEG_K1_TIMES_SRC) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _MODE, _BLEND)); + break; + case NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA: + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _K1, 255) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _CURSOR_COLOR_FACTOR_SELECT, _K1) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _VIEWPORT_COLOR_FACTOR_SELECT, _NEG_K1_TIMES_SRC) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _MODE, _BLEND)); + break; + case NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA: + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _K1, + pCursorCompParams->surfaceAlpha) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _CURSOR_COLOR_FACTOR_SELECT, _K1_TIMES_SRC) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _VIEWPORT_COLOR_FACTOR_SELECT, _NEG_K1_TIMES_SRC) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _MODE, _BLEND)); + break; + case NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA: + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _K1, + pCursorCompParams->surfaceAlpha) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _CURSOR_COLOR_FACTOR_SELECT, _K1) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _VIEWPORT_COLOR_FACTOR_SELECT, _NEG_K1_TIMES_SRC) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _MODE, _BLEND)); + break; + default: + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "%s: composition mode %d not supported for cursor", + __func__, pCursorCompParams->blendingMode[1]); + break; + } +} + +NvBool nvEvoValidateCursorSurfaceC3(const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo) +{ + return nvEvoGetHeadSetControlCursorValueC3(pDevEvo, pSurfaceEvo, NULL); +} + +static NvBool ValidateWindowFormatSourceRectC3( + const struct NvKmsRect *sourceFetchRect, + const enum NvKmsSurfaceMemoryFormat format) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(format); + + /* + * sourceFetchRect represents the dimensions of the source fetch rectangle. + * If YUV crop and scaler overfetch are supported, it is up to the caller to + * provide the correct dimensions (e.g., ValidSizeIn/ValidPointIn vs. + * SizeIn/PointIn). + * + * For all YUV formats, the position and size of the fetch rectangle must be + * even in the horizontal direction. + * + * For YUV420 formats, there is an additional restriction that the position + * and size of the fetch rectangle must be even in the vertical direction as + * well. + */ + if (pFormatInfo->isYUV) { + if (((sourceFetchRect->x & 1) != 0) || + (sourceFetchRect->width & 1) != 0) { + return FALSE; + } + + if (pFormatInfo->yuv.vertChromaDecimationFactor > 1) { + if (((sourceFetchRect->y & 1) != 0) || + (sourceFetchRect->height & 1) != 0) { + return FALSE; + } + } + } + + return TRUE; +} + +typedef typeof(ValidateWindowFormatSourceRectC3) val_src_rect_t; + +static NvBool EvoValidateWindowFormatWrapper( + const enum NvKmsSurfaceMemoryFormat format, + NVEvoHwFormatFromKmsFormatFunc3 *pGetHwFmt, + const struct NvKmsRect *sourceFetchRect, + val_src_rect_t *pValSrcRect, + NvU32 *hwFormatOut) +{ + const NvU32 hwFormat = pGetHwFmt(format); + + if (hwFormat == 0) { + return FALSE; + } + + if (hwFormatOut != NULL) { + *hwFormatOut = hwFormat; + } + + /* + * If sourceFetchRect is NULL, this function is only responsible for + * verifying whether the given NvKmsSurfaceMemoryFormat has a corresponding + * HW format. + */ + if (sourceFetchRect == NULL) { + return TRUE; + } + + return pValSrcRect(sourceFetchRect, format); +} + +static NvBool EvoValidateWindowFormatC3( + const enum NvKmsSurfaceMemoryFormat format, + const struct NvKmsRect *sourceFetchRect, + NvU32 *hwFormatOut) +{ + return EvoValidateWindowFormatWrapper( + format, + nvHwFormatFromKmsFormatC3, + sourceFetchRect, + ValidateWindowFormatSourceRectC3, + hwFormatOut); +} + +static NvBool EvoValidateWindowFormatC5( + const enum NvKmsSurfaceMemoryFormat format, + const struct NvKmsRect *sourceFetchRect, + NvU32 *hwFormatOut) +{ + return EvoValidateWindowFormatWrapper( + format, + nvHwFormatFromKmsFormatC5, + sourceFetchRect, + ValidateWindowFormatSourceRectC3, + hwFormatOut); +} + +NvBool nvEvoValidateWindowFormatC6( + const enum NvKmsSurfaceMemoryFormat format, + const struct NvKmsRect *sourceFetchRect, + NvU32 *hwFormatOut) +{ + return EvoValidateWindowFormatWrapper( + format, + nvHwFormatFromKmsFormatC6, + sourceFetchRect, + ValidateWindowFormatSourceRectC3, + hwFormatOut); +} + +static NvU32 OffsetForNotifier(int idx) +{ + /* NVDisplay notifiers are always the 16-byte variety. We only care about + * the NV_DISP_NOTIFIER__0 dword which contains the status. */ + NvU32 base = idx * (NV_DISP_NOTIFIER_SIZEOF / sizeof(NvU32)); + return base + NV_DISP_NOTIFIER__0; +} + +void nvEvoInitCompNotifierC3(const NVDispEvoRec *pDispEvo, int idx) +{ + nvWriteEvoCoreNotifier(pDispEvo, OffsetForNotifier(idx), + DRF_DEF(_DISP, _NOTIFIER__0, _STATUS, _NOT_BEGUN)); +} + +NvBool nvEvoIsCompNotifierCompleteC3(NVDispEvoPtr pDispEvo, int idx) { + return nvEvoIsCoreNotifierComplete(pDispEvo, OffsetForNotifier(idx), + DRF_BASE(NV_DISP_NOTIFIER__0_STATUS), + DRF_EXTENT(NV_DISP_NOTIFIER__0_STATUS), + NV_DISP_NOTIFIER__0_STATUS_FINISHED); +} + +void nvEvoWaitForCompNotifierC3(const NVDispEvoRec *pDispEvo, int idx) +{ + nvEvoWaitForCoreNotifier(pDispEvo, OffsetForNotifier(idx), + DRF_BASE(NV_DISP_NOTIFIER__0_STATUS), + DRF_EXTENT(NV_DISP_NOTIFIER__0_STATUS), + NV_DISP_NOTIFIER__0_STATUS_FINISHED); +} + +static void EvoSetDitherC3(NVDispEvoPtr pDispEvo, const int head, + const NvBool enabled, const NvU32 type, + const NvU32 algo, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 ditherControl; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + if (enabled) { + ditherControl = DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _ENABLE, _ENABLE); + + switch (type) { + case NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_6_BITS: + ditherControl |= + DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _BITS, _TO_6_BITS); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_8_BITS: + ditherControl |= + DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _BITS, _TO_8_BITS); + break; + /* XXXnvdisplay: Support DITHER_TO_{10,12}_BITS (see also bug 1729668). */ + default: + nvAssert(!"Unknown ditherType"); + // Fall through + case NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF: + ditherControl = NVC37D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE; + break; + } + + } else { + ditherControl = DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _ENABLE, _DISABLE); + } + + switch (algo) { + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_ERR_ACC: + ditherControl |= + DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _MODE, _STATIC_ERR_ACC); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_2X2: + ditherControl |= + DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _MODE, _DYNAMIC_2X2); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_2X2: + ditherControl |= + DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _MODE, _STATIC_2X2); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_TEMPORAL: + ditherControl |= + DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _MODE, _TEMPORAL); + break; + default: + nvAssert(!"Unknown DitherAlgo"); + // Fall through + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN: + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_ERR_ACC: + ditherControl |= + DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _MODE, _DYNAMIC_ERR_ACC); + break; + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_DITHER_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, ditherControl); +} + +static void EvoSetDisplayRateC3(NVDispEvoPtr pDispEvo, const int head, + NvBool enable, + NVEvoUpdateState *updateState, + NvU32 timeoutMicroseconds) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + if (enable) { + timeoutMicroseconds = + NV_MIN(timeoutMicroseconds, + DRF_MASK(NVC37D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_INTERVAL)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_DISPLAY_RATE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_DISPLAY_RATE, _RUN_MODE, _ONE_SHOT) | + DRF_NUM(C37D, _HEAD_SET_DISPLAY_RATE, _MIN_REFRESH_INTERVAL, + timeoutMicroseconds) | + (timeoutMicroseconds == 0 ? + DRF_DEF(C37D, _HEAD_SET_DISPLAY_RATE, _MIN_REFRESH, _DISABLE) : + DRF_DEF(C37D, _HEAD_SET_DISPLAY_RATE, _MIN_REFRESH, _ENABLE))); + } else { + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_DISPLAY_RATE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_DISPLAY_RATE, _RUN_MODE, _CONTINUOUS)); + } +} + +static void EvoSetStallLockC3(NVDispEvoPtr pDispEvo, const int head, + NvBool enable, NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + NvU32 data = 0x0; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + if (pHC->crashLockUnstallMode) { + data |= DRF_DEF(C37D, _HEAD_SET_STALL_LOCK, _UNSTALL_MODE, _CRASH_LOCK); + } else { + data |= DRF_DEF(C37D, _HEAD_SET_STALL_LOCK, _UNSTALL_MODE, _LINE_LOCK); + } + + if (enable) { + data |= DRF_DEF(C37D, _HEAD_SET_STALL_LOCK, _ENABLE, _TRUE) | + DRF_DEF(C37D, _HEAD_SET_STALL_LOCK, _MODE, _ONE_SHOT); + + if (!pHC->useStallLockPin) { + data |= DRF_DEF(C37D, _HEAD_SET_STALL_LOCK, _LOCK_PIN, _LOCK_PIN_NONE); + } else if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->stallLockPin)) { + NvU32 pin = pHC->stallLockPin - NV_EVO_LOCK_PIN_INTERNAL_0; + data |= DRF_NUM(C37D, _HEAD_SET_STALL_LOCK, _LOCK_PIN, + NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(pin)); + } else { + NvU32 pin = pHC->stallLockPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(C37D, _HEAD_SET_STALL_LOCK, _LOCK_PIN, + NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(pin)); + } + } else { + data |= DRF_DEF(C37D, _HEAD_SET_STALL_LOCK, _ENABLE, _FALSE); + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_STALL_LOCK(head), 1); + nvDmaSetEvoMethodData(pChannel, data); +} + +static NvBool GetChannelState(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvU32 *result) +{ + NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS info = { }; + NvU32 ret; + + info.base.subdeviceIndex = sd; + info.channelClass = pChan->hwclass; + info.channelInstance = pChan->instance; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NVC370_CTRL_CMD_GET_CHANNEL_INFO, + &info, sizeof(info)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to query display engine channel state: 0x%08x:%d:%d:0x%08x", + pChan->hwclass, pChan->instance, sd, ret); + return FALSE; + } + + *result = info.channelState; + + return TRUE; +} + +NvBool nvEvoIsChannelIdleC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvBool *result) +{ + NvU32 channelState; + + if (!GetChannelState(pDevEvo, pChan, sd, &channelState)) { + return FALSE; + } + + *result = (channelState == NVC370_CTRL_GET_CHANNEL_INFO_STATE_IDLE); + + return TRUE; +} + +NvBool nvEvoIsChannelMethodPendingC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvBool *result) +{ + NvBool tmpResult; + + /* With C370, Idle and NoMethodPending are equivalent. */ + ct_assert(NVC370_CTRL_GET_CHANNEL_INFO_STATE_IDLE == + NVC370_CTRL_GET_CHANNEL_INFO_STATE_NO_METHOD_PENDING); + + if (!nvEvoIsChannelIdleC3(pDevEvo, pChan, sd, &tmpResult)) { + return FALSE; + } + + *result = !tmpResult; + + return TRUE; +} + +NvBool nvEvoAllocRmCtrlObjectC3(NVDevEvoPtr pDevEvo) +{ + const NvU32 handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + /* Note that this object is not at all related to the GF100_DISP_SW (9072) + * or NV50_DISPLAY_SW (5072) objects, despite their similarity in name. */ + NvU32 status = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + handle, + NVC372_DISPLAY_SW, NULL); + if (status != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to allocate nvdisplay rmctrl object"); + goto fail; + } + + pDevEvo->rmCtrlHandle = handle; + + return TRUE; + +fail: + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, handle); + return FALSE; +} + +static NvU32 GetAccelerators( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd) +{ + NVC370_CTRL_GET_ACCL_PARAMS params = { }; + NvU32 ret; + + params.base.subdeviceIndex = sd; + params.channelClass = pChannel->hwclass; + nvAssert(pChannel->channelMask & NV_EVO_CHANNEL_MASK_WINDOW_ALL); + params.channelInstance = + NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(pChannel->channelMask); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NVC370_CTRL_CMD_GET_ACCL, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to retrieve accelerators"); + return 0; + } + + return params.accelerators; +} + +static NvBool SetAccelerators( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd, + NvU32 accelerators, + NvU32 accelMask) +{ + NVC370_CTRL_SET_ACCL_PARAMS params = { }; + NvU32 ret; + + params.base.subdeviceIndex = sd; + params.channelClass = pChannel->hwclass; + nvAssert(pChannel->channelMask & NV_EVO_CHANNEL_MASK_WINDOW_ALL); + params.channelInstance = + NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(pChannel->channelMask); + params.accelerators = accelerators; + params.accelMask = accelMask; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NVC370_CTRL_CMD_SET_ACCL, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to set accelerators"); + return FALSE; + } + + return TRUE; +} + +void nvEvoAccelerateChannelC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NvU32 sd, + const NvBool trashPendingMethods, + const NvBool unblockMethodsInExecutation, + NvU32 *pOldAccelerators) +{ + NvU32 accelMask = 0x0; + + if (trashPendingMethods) { + accelMask |= NVC370_CTRL_ACCL_TRASH_ONLY; + } + + /* Start with a conservative set of accelerators; may need to add more + * later. */ + if (unblockMethodsInExecutation) { + accelMask |= NVC370_CTRL_ACCL_IGNORE_PI | + NVC370_CTRL_ACCL_SKIP_SEMA | + NVC370_CTRL_ACCL_IGNORE_FLIPLOCK; + } + + if (accelMask == 0x0) { + return; + } + + *pOldAccelerators = GetAccelerators(pDevEvo, pChannel, sd); + + /* Accelerate window channel. */ + if (!SetAccelerators(pDevEvo, pChannel, sd, accelMask, accelMask)) { + nvAssert(!"Failed to set accelerators"); + } +} + +void nvEvoResetChannelAcceleratorsC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NvU32 sd, + const NvBool trashPendingMethods, + const NvBool unblockMethodsInExecutation, + NvU32 oldAccelerators) +{ + NvU32 accelMask = 0x0; + + if (trashPendingMethods) { + accelMask |= NVC370_CTRL_ACCL_TRASH_ONLY; + } + + /* Start with a conservative set of accelerators; may need to add more + * later. */ + if (unblockMethodsInExecutation) { + accelMask |= NVC370_CTRL_ACCL_IGNORE_PI | + NVC370_CTRL_ACCL_SKIP_SEMA | + NVC370_CTRL_ACCL_IGNORE_FLIPLOCK; + } + + if (accelMask == 0x0) { + return; + } + + /* Accelerate window channel. */ + if (!SetAccelerators(pDevEvo, pChannel, sd, oldAccelerators, accelMask)) { + nvAssert(!"Failed to set accelerators"); + } +} + +static void ForceFlipToNull( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd, + NVEvoUpdateState *updateState, + const NVFlipChannelEvoHwState *pNullHwState) +{ + const NvU32 subDeviceMask = (1 << sd); + + nvPushEvoSubDevMask(pDevEvo, subDeviceMask); + + pDevEvo->hal->Flip(pDevEvo, pChannel, pNullHwState, updateState, + FALSE /* bypassComposition */); + + nvPopEvoSubDevMask(pDevEvo); +} + +static NvBool PollForChannelIdle( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd) +{ + const NvU32 timeout = 2000000; // 2 seconds + NvU64 startTime = 0; + NvBool isMethodPending = TRUE; + + do { + if (!nvEvoIsChannelMethodPendingC3(pDevEvo, pChannel, sd, + &isMethodPending)) { + break; + } + + if (!isMethodPending) { + break; + } + + if (nvExceedsTimeoutUSec(pDevEvo, &startTime, timeout)) { + return FALSE; + } + + nvkms_yield(); + + } while (TRUE); + + return TRUE; +} + +/*! + * This function emulates the behavior of the STOP_BASE/STOP_OVERLAY RM control + * calls for pre-EVO hardware. + * + * STOP_BASE/STOP_OVERLAY will apply hardware channel accelerators, push + * methods via the debug interface to NULL context DMAs, and wait for the + * channel to go idle (which means the surface programmed into the core channel + * will become visible). + * + * If we asked RM to do the same thing for the window channel that is emulating + * the base channel on nvdisplay, the display would just go black: there's no + * surface in the core channel, so NULLing the context DMA in the window + * channel will disable both "core" and "base". + * + * So instead, similar functionality is implemented here: we apply + * accelerators, push methods to flip to core, and wait for the channel to + * idle. + */ +typedef struct { + struct { + NvU32 accelerators; + NvBool overridden; + } window[NVKMS_MAX_WINDOWS_PER_DISP]; + NVFlipChannelEvoHwState nullEvoHwState; +} EvoIdleChannelAcceleratorState; + +static NvBool EvoForceIdleSatelliteChannelsWithAccel( + NVDevEvoPtr pDevEvo, + const NVEvoIdleChannelState *idleChannelState, + const NvU32 accelMask) +{ + NvU32 sd, window; + NVEvoUpdateState updateState = { }; + NvBool ret = FALSE; + + EvoIdleChannelAcceleratorState *pAcceleratorState = nvCalloc( + pDevEvo->numSubDevices, sizeof(EvoIdleChannelAcceleratorState)); + + if (!pAcceleratorState) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to alloc accelerator state"); + return FALSE; + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + /* + * Forcing a channel to be idle is currently only implemented for window + * channels. + */ + if ((idleChannelState->subdev[sd].channelMask & + ~NV_EVO_CHANNEL_MASK_WINDOW_ALL) != 0) { + + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Forcing non-window channel idle not implemented"); + goto done; + } + + for (window = 0; window < pDevEvo->numWindows; window++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, + _WINDOW, window, _ENABLE, + idleChannelState->subdev[sd].channelMask)) { + NVEvoChannelPtr pChannel = pDevEvo->window[window]; + + /* Save old window channel accelerators. */ + NvU32 oldAccel = GetAccelerators(pDevEvo, pChannel, sd); + + pAcceleratorState[sd].window[window].accelerators = + oldAccel; + + /* Accelerate window channel. */ + if (!SetAccelerators(pDevEvo, pChannel, sd, accelMask, + accelMask)) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to set accelerators"); + goto done; + } + pAcceleratorState[sd].window[window].overridden = TRUE; + + /* + * Push a flip to null in this channel. + * + * XXX nullEvoHwState isn't a valid state for NULL flip, for + * example 'csc' will be all 0s instead of the identity. This + * will also lead to the HW state being out of sync with the SW + * state. + */ + ForceFlipToNull(pDevEvo, pChannel, sd, &updateState, + &pAcceleratorState->nullEvoHwState); + } + } + } + + /* Push one update for all of the flips programmed above. */ + nvEvoUpdateC3(pDevEvo, &updateState, TRUE /* releaseElv */); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + for (window = 0; window < pDevEvo->numWindows; window++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE, + idleChannelState->subdev[sd].channelMask)) { + NVEvoChannelPtr pChannel = pDevEvo->window[window]; + + /* Wait for the flips to complete. */ + if (!PollForChannelIdle(pDevEvo, pChannel, sd)) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Timed out while idling base channel"); + goto done; + } + } + } + } + + ret = TRUE; + +done: + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + for (window = 0; window < pDevEvo->numWindows; window++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE, + idleChannelState->subdev[sd].channelMask)) { + NVEvoChannelPtr pChannel = pDevEvo->window[window]; + + const NvU32 oldAccel = + pAcceleratorState[sd].window[window].accelerators; + + if (!pAcceleratorState[sd].window[window].overridden) { + continue; + } + + /* Restore window channel accelerators. */ + if (!SetAccelerators(pDevEvo, pChannel, sd, oldAccel, + accelMask)) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to restore accelerators"); + } + } + } + } + + nvFree(pAcceleratorState); + return ret; +} + +NvBool nvEvoForceIdleSatelliteChannelC3( + NVDevEvoPtr pDevEvo, + const NVEvoIdleChannelState *idleChannelState) +{ + /* Start with a conservative set of accelerators; may need to add more + * later. */ + const NvU32 accelMask = + NVC370_CTRL_ACCL_IGNORE_PI | + NVC370_CTRL_ACCL_SKIP_SEMA; + + return EvoForceIdleSatelliteChannelsWithAccel(pDevEvo, + idleChannelState, + accelMask); +} + +NvBool nvEvoForceIdleSatelliteChannelIgnoreLockC3( + NVDevEvoPtr pDevEvo, + const NVEvoIdleChannelState *idleChannelState) +{ + const NvU32 accelMask = + NVC370_CTRL_ACCL_IGNORE_PI | + NVC370_CTRL_ACCL_SKIP_SEMA | + NVC370_CTRL_ACCL_IGNORE_FLIPLOCK | + NVC370_CTRL_ACCL_IGNORE_INTERLOCK; + + return EvoForceIdleSatelliteChannelsWithAccel(pDevEvo, + idleChannelState, + accelMask); +} + +void nvEvoFreeRmCtrlObjectC3(NVDevEvoPtr pDevEvo) +{ + if (pDevEvo->rmCtrlHandle) { + NvU32 status; + + status = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDevEvo->rmCtrlHandle); + + if (status != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to free nvdisplay rmctrl object"); + } + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pDevEvo->rmCtrlHandle); + pDevEvo->rmCtrlHandle = 0; + } +} + +void nvEvoSetImmPointOutC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd, + NVEvoUpdateState *updateState, + NvU16 x, NvU16 y) +{ + NVEvoChannelPtr pImmChannel = pChannel->imm.u.dma; + + nvAssert((pChannel->channelMask & NV_EVO_CHANNEL_MASK_WINDOW_ALL) != 0); + nvAssert(pChannel->imm.type == NV_EVO_IMM_CHANNEL_DMA); + + /* This should only be called for one GPU at a time, since the + * pre-nvdisplay version uses PIO and cannot broadcast. */ + nvAssert(ONEBITSET(nvPeekEvoSubDevMask(pDevEvo))); + + nvDmaSetStartEvoMethod(pImmChannel, + NVC37B_SET_POINT_OUT(0 /* Left eye */), 1); + + nvDmaSetEvoMethodData(pImmChannel, + DRF_NUM(C37B, _SET_POINT_OUT, _X, x) | + DRF_NUM(C37B, _SET_POINT_OUT, _Y, y)); + + nvWinImmChannelUpdateState(pDevEvo, updateState, pChannel); +} + +static void SetCrcSurfaceAddress( + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 head) +{ + NvU32 ctxDmaHandle = pSurfaceDesc ? pSurfaceDesc->ctxDmaHandle : 0; + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTEXT_DMA_CRC(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTEXT_DMA_CRC, _HANDLE, ctxDmaHandle)); +} + +static void EvoStartHeadCRC32CaptureC3(NVDevEvoPtr pDevEvo, + NVEvoDmaPtr pDma, + NVConnectorEvoPtr pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + NvU32 head, + NvU32 sd, + NVEvoUpdateState *updateState) +{ + const NvU32 winChannel = head << 1; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 orOutput = 0; + + /* These method should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + /* The window channel should fit in + * NVC37D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL */ + nvAssert(winChannel < DRF_MASK(NVC37D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL)); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + switch (pConnectorEvo->or.type) { + case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR: + if (protocol == NVKMS_PROTOCOL_SOR_DP_A || + protocol == NVKMS_PROTOCOL_SOR_DP_B) { + orOutput = NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF; + } else { + orOutput = + NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(orIndex); + } + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR: + orOutput = + NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR(orIndex); + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_DAC: + /* No DAC support on nvdisplay. Fall through. */ + default: + nvAssert(!"Invalid pConnectorEvo->or.type"); + break; + } + + SetCrcSurfaceAddress(pChannel, &pDma->surfaceDesc, head); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CRC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CRC_CONTROL, _PRIMARY_CRC, orOutput) | + DRF_DEF(C37D, _HEAD_SET_CRC_CONTROL, _SECONDARY_CRC, _NONE) | + DRF_NUM(C37D, _HEAD_SET_CRC_CONTROL, _CONTROLLING_CHANNEL, winChannel) | + DRF_DEF(C37D, _HEAD_SET_CRC_CONTROL, _EXPECT_BUFFER_COLLAPSE, _FALSE) | + DRF_DEF(C37D, _HEAD_SET_CRC_CONTROL, _CRC_DURING_SNOOZE, _DISABLE)); + + /* Reset the CRC notifier */ + nvEvoResetCRC32Notifier(pDma->subDeviceAddress[sd], + NVC37D_NOTIFIER_CRC_STATUS_0, + DRF_BASE(NVC37D_NOTIFIER_CRC_STATUS_0_DONE), + NVC37D_NOTIFIER_CRC_STATUS_0_DONE_FALSE); +} + +static void EvoStopHeadCRC32CaptureC3(NVDevEvoPtr pDevEvo, + NvU32 head, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These method should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + SetCrcSurfaceAddress(pChannel, NULL /* pSurfaceDesc */, head); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CRC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_CRC_CONTROL, _PRIMARY_CRC, _NONE) | + DRF_DEF(C37D, _HEAD_SET_CRC_CONTROL, _SECONDARY_CRC, _NONE) | + DRF_NUM(C37D, _HEAD_SET_CRC_CONTROL, _CONTROLLING_CHANNEL, 0) | + DRF_DEF(C37D, _HEAD_SET_CRC_CONTROL, _EXPECT_BUFFER_COLLAPSE, _FALSE) | + DRF_DEF(C37D, _HEAD_SET_CRC_CONTROL, _CRC_DURING_SNOOZE, _DISABLE)); +} + +/*! + * Queries the current head's CRC Notifier and returns values if successful + * + * First waits for hardware to finish writing to the CRC32Notifier, + * and performs a read of the Compositor, SF/OR CRCs, + * and the RG CRC in numCRC32 frames. + * Crc fields in input array crc32 should be calloc'd to 0s. + * + * \param[in] pDevEvo NVKMS device pointer + * \param[in] pDma Pointer to DMA-mapped memory + * \param[in] sd Subdevice index + * \param[in] entry_count Number of independent frames to read CRCs from + * \param[out] crc32 Contains pointers to CRC output arrays + * \param[out] numCRC32 Number of CRC frames successfully read from DMA + * + * \return Returns TRUE if was able to successfully read CRCs from DMA, + * otherwise FALSE + */ +NvBool nvEvoQueryHeadCRC32_C3(NVDevEvoPtr pDevEvo, + NVEvoDmaPtr pDma, + NvU32 sd, + NvU32 entry_count, + CRC32NotifierCrcOut *crc32, + NvU32 *numCRC32) +{ + volatile NvU32 *pCRC32Notifier = pDma->subDeviceAddress[sd]; + const NvU32 entry_stride = + NVC37D_NOTIFIER_CRC_CRC_ENTRY1_21 - NVC37D_NOTIFIER_CRC_CRC_ENTRY0_13; + // Define how many/which variables to read from each CRCNotifierEntry struct + const CRC32NotifierEntryRec field_info[NV_EVO3_NUM_CRC_FIELDS] = { + { + .field_offset = NVC37D_NOTIFIER_CRC_CRC_ENTRY0_11, + .field_base_bit = + DRF_BASE(NVC37D_NOTIFIER_CRC_CRC_ENTRY0_11_COMPOSITOR_CRC), + .field_extent_bit = + DRF_EXTENT(NVC37D_NOTIFIER_CRC_CRC_ENTRY0_11_COMPOSITOR_CRC), + .field_frame_values = crc32->compositorCrc32, + }, + { + .field_offset = NVC37D_NOTIFIER_CRC_CRC_ENTRY0_12, + .field_base_bit = + DRF_BASE(NVC37D_NOTIFIER_CRC_CRC_ENTRY0_12_RG_CRC), + .field_extent_bit = + DRF_EXTENT(NVC37D_NOTIFIER_CRC_CRC_ENTRY0_12_RG_CRC), + .field_frame_values = crc32->rasterGeneratorCrc32, + }, + { + .field_offset = NVC37D_NOTIFIER_CRC_CRC_ENTRY0_13, + .field_base_bit = + DRF_BASE(NVC37D_NOTIFIER_CRC_CRC_ENTRY0_13_PRIMARY_OUTPUT_CRC), + .field_extent_bit = + DRF_EXTENT(NVC37D_NOTIFIER_CRC_CRC_ENTRY0_13_PRIMARY_OUTPUT_CRC), + .field_frame_values = crc32->outputCrc32 + } + }; + + const CRC32NotifierEntryFlags flag_info[NV_EVO3_NUM_CRC_FLAGS] = { + { + .flag_base_bit = + DRF_BASE(NVC37D_NOTIFIER_CRC_STATUS_0_COUNT), + .flag_extent_bit = + DRF_EXTENT(NVC37D_NOTIFIER_CRC_STATUS_0_COUNT), + .flag_type = NVEvoCrc32NotifierFlagCount + }, + { + .flag_base_bit = + DRF_BASE(NVC37D_NOTIFIER_CRC_STATUS_0_COMPOSITOR_OVERFLOW), + .flag_extent_bit = + DRF_EXTENT(NVC37D_NOTIFIER_CRC_STATUS_0_COMPOSITOR_OVERFLOW), + .flag_type = NVEvoCrc32NotifierFlagCrcOverflow + }, + { + .flag_base_bit = + DRF_BASE(NVC37D_NOTIFIER_CRC_STATUS_0_RG_OVERFLOW), + .flag_extent_bit = + DRF_EXTENT(NVC37D_NOTIFIER_CRC_STATUS_0_RG_OVERFLOW), + .flag_type = NVEvoCrc32NotifierFlagCrcOverflow + }, + { + .flag_base_bit = + DRF_BASE(NVC37D_NOTIFIER_CRC_STATUS_0_PRIMARY_OUTPUT_OVERFLOW), + .flag_extent_bit = + DRF_EXTENT(NVC37D_NOTIFIER_CRC_STATUS_0_PRIMARY_OUTPUT_OVERFLOW), + .flag_type = NVEvoCrc32NotifierFlagCrcOverflow + } + }; + + if (!nvEvoWaitForCRC32Notifier(pDevEvo, + pCRC32Notifier, + NVC37D_NOTIFIER_CRC_STATUS_0, + DRF_BASE(NVC37D_NOTIFIER_CRC_STATUS_0_DONE), + DRF_EXTENT(NVC37D_NOTIFIER_CRC_STATUS_0_DONE), + NVC37D_NOTIFIER_CRC_STATUS_0_DONE_TRUE)) { + return FALSE; + } + + *numCRC32 = nvEvoReadCRC32Notifier(pCRC32Notifier, + entry_stride, + entry_count, + NVC37D_NOTIFIER_CRC_STATUS_0, /* Status offset */ + NV_EVO3_NUM_CRC_FIELDS, + NV_EVO3_NUM_CRC_FLAGS, + field_info, + flag_info); + + nvEvoResetCRC32Notifier(pCRC32Notifier, + NVC37D_NOTIFIER_CRC_STATUS_0, + DRF_BASE(NVC37D_NOTIFIER_CRC_STATUS_0_DONE), + NVC37D_NOTIFIER_CRC_STATUS_0_DONE_FALSE); + + return TRUE; +} + +void nvEvoGetScanLineC3(const NVDispEvoRec *pDispEvo, + const NvU32 head, + NvU16 *pScanLine, + NvBool *pInBlankingPeriod) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + const NvU32 window = head << 1; + void *pDma = pDevEvo->window[window]->pb.control[sd]; + const NvU32 scanLine = nvDmaLoadPioMethod(pDma, NVC37E_GET_LINE); + + if ((scanLine & NVBIT(15)) == 0) { + *pInBlankingPeriod = FALSE; + *pScanLine = scanLine & DRF_MASK(14:0); + } else { + *pInBlankingPeriod = TRUE; + } +} + +/* + * This method configures and programs the RG Core Semaphores. Default behavior + * is to continuously trigger on the specified rasterline when enabled. + */ +static void +EvoConfigureVblankSyncObjectC6(const NVDevEvoPtr pDevEvo, + const NvU16 rasterLine, + const NvU32 head, + const NvU32 semaphoreIndex, + const NVSurfaceDescriptor *pSurfaceDesc, + NVEvoUpdateState* pUpdateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* + * Populate the NVEvoUpdateState for the caller. The Update State contains + * a mask of which display channels need to be updated. + */ + nvUpdateUpdateState(pDevEvo, pUpdateState, pChannel); + + /* + * Tell HW what ctxdma entry to use to look up actual RG semaphore surface. + * If ctxdma handle is 0, HW will disable the semaphore. + */ + nvDmaSetStartEvoMethod(pChannel, + NVC67D_HEAD_SET_CONTEXT_DMA_RG_REL_SEMAPHORE(head, semaphoreIndex), + 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67D, _HEAD_SET_CONTEXT_DMA_RG_REL_SEMAPHORE, _HANDLE, + (pSurfaceDesc == NULL) ? 0 : pSurfaceDesc->ctxDmaHandle)); + + if ((pSurfaceDesc == NULL) || (pSurfaceDesc->ctxDmaHandle == 0)) { + /* Disabling semaphore so no configuration necessary. */ + return; + } + + /* + * Configure the semaphore with the following: + * Set OFFSET to 0 (default). + * Set PAYLOAD_SIZE to 32bits (default). + * Set REL_MODE to WRITE (default). + * Set RUN_MODE to CONTINUOUS. + * Set RASTER_LINE to start of Vblank: Vsync + Vbp + Vactive. + * + * Note that all these options together fit in 32bits, and that all 32 bits + * must be written each time any given option changes. + * + * The actual payload value doesn't currently matter since this RG + * semaphore will be mapped to a syncpt for now. Each HW-issued payload + * write is converted to a single syncpt increment irrespective of what the + * actual semaphore payload value is. + */ + nvDmaSetStartEvoMethod(pChannel, + NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL(head, semaphoreIndex), + 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67D, _HEAD_SET_RG_REL_SEMAPHORE_CONTROL, _OFFSET, 0) | + DRF_DEF(C67D, _HEAD_SET_RG_REL_SEMAPHORE_CONTROL, _PAYLOAD_SIZE, + _PAYLOAD_32BIT) | + DRF_DEF(C67D, _HEAD_SET_RG_REL_SEMAPHORE_CONTROL, _REL_MODE, + _WRITE) | + DRF_DEF(C67D, _HEAD_SET_RG_REL_SEMAPHORE_CONTROL, _RUN_MODE, + _CONTINUOUS) | + DRF_NUM(C67D, _HEAD_SET_RG_REL_SEMAPHORE_CONTROL, _RASTER_LINE, + rasterLine)); +} + +static void EvoSetHdmiDscParams(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVDscInfoEvoRec *pDscInfo, + const enum nvKmsPixelDepth pixelDepth) +{ + NVEvoChannelPtr pChannel = pDispEvo->pDevEvo->core; + NvU32 bpc, flatnessDetThresh; + NvU32 i; + + nvAssert(pDispEvo->pDevEvo->hal->caps.supportsHDMIFRL && + pDscInfo->type == NV_DSC_INFO_EVO_TYPE_HDMI); + + bpc = nvPixelDepthToBitsPerComponent(pixelDepth); + if (bpc < 8) { + nvAssert(bpc >= 8); + bpc = 8; + } + flatnessDetThresh = (2 << (bpc - 8)); + + nvDmaSetStartEvoMethod(pChannel, NVC67D_HEAD_SET_DSC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C67D, _HEAD_SET_DSC_CONTROL, _ENABLE, _TRUE) | + ((pDscInfo->hdmi.dscMode == NV_DSC_EVO_MODE_DUAL) ? + DRF_DEF(C57D, _HEAD_SET_DSC_CONTROL, _MODE, _DUAL) : + DRF_DEF(C57D, _HEAD_SET_DSC_CONTROL, _MODE, _SINGLE)) | + DRF_NUM(C67D, _HEAD_SET_DSC_CONTROL, _FLATNESS_DET_THRESH, flatnessDetThresh) | + DRF_DEF(C67D, _HEAD_SET_DSC_CONTROL, _FULL_ICH_ERR_PRECISION, _ENABLE) | + DRF_DEF(C67D, _HEAD_SET_DSC_CONTROL, _AUTO_RESET, _ENABLE) | + DRF_DEF(C67D, _HEAD_SET_DSC_CONTROL, _FORCE_ICH_RESET, _FALSE)); + + nvDmaSetStartEvoMethod(pChannel, NVC67D_HEAD_SET_DSC_PPS_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C67D, _HEAD_SET_DSC_PPS_CONTROL, _ENABLE, _TRUE) | + DRF_DEF(C67D, _HEAD_SET_DSC_PPS_CONTROL, _LOCATION, _VBLANK) | + DRF_DEF(C67D, _HEAD_SET_DSC_PPS_CONTROL, _FREQUENCY, _EVERY_FRAME) | + /* MFS says "For FRL DSC CVTEM, it should be 0x21 (136bytes)." */ + DRF_NUM(C67D, _HEAD_SET_DSC_PPS_CONTROL, _SIZE, 0x21)); + + /* The loop below assumes the methods are tightly packed. */ + ct_assert(ARRAY_LEN(pDscInfo->hdmi.pps) == 32); + ct_assert((NVC67D_HEAD_SET_DSC_PPS_DATA1(0) - NVC67D_HEAD_SET_DSC_PPS_DATA0(0)) == 4); + ct_assert((NVC67D_HEAD_SET_DSC_PPS_DATA31(0) - NVC67D_HEAD_SET_DSC_PPS_DATA0(0)) == (31 * 4)); + for (i = 0; i < ARRAY_LEN(pDscInfo->hdmi.pps); i++) { + nvDmaSetStartEvoMethod(pChannel, NVC67D_HEAD_SET_DSC_PPS_DATA0(head) + (i * 4), 1); + nvDmaSetEvoMethodData(pChannel, pDscInfo->hdmi.pps[i]); + } + + /* Byte 0 must be 0x7f, the rest are don't care (will be filled in by HW) */ + nvDmaSetStartEvoMethod(pChannel, NVC67D_HEAD_SET_DSC_PPS_HEAD(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67D, _HEAD_SET_DSC_PPS_HEAD, _BYTE0, 0x7f)); + + nvDmaSetStartEvoMethod(pChannel, NVC67D_HEAD_SET_HDMI_DSC_HCACTIVE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67D, _HEAD_SET_HDMI_DSC_HCACTIVE, _BYTES, pDscInfo->hdmi.dscHActiveBytes) | + DRF_NUM(C67D, _HEAD_SET_HDMI_DSC_HCACTIVE, _TRI_BYTES, pDscInfo->hdmi.dscHActiveTriBytes)); + nvDmaSetStartEvoMethod(pChannel, NVC67D_HEAD_SET_HDMI_DSC_HCBLANK(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67D, _HEAD_SET_HDMI_DSC_HCBLANK, _WIDTH, pDscInfo->hdmi.dscHBlankTriBytes)); +} + +static void EvoSetDpDscParams(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVDscInfoEvoRec *pDscInfo) +{ + NVEvoChannelPtr pChannel = pDispEvo->pDevEvo->core; + NvU32 flatnessDetThresh; + NvU32 i; + + nvAssert(pDscInfo->type == NV_DSC_INFO_EVO_TYPE_DP); + + // XXX: I'm pretty sure that this is wrong. + // BitsPerPixelx16 is something like (24 * 16) = 384, and 2 << (384 - 8) is + // an insanely large number. + flatnessDetThresh = (2 << (pDscInfo->dp.bitsPerPixelX16 - 8)); /* ??? */ + + nvAssert((pDscInfo->dp.dscMode == NV_DSC_EVO_MODE_DUAL) || + (pDscInfo->dp.dscMode == NV_DSC_EVO_MODE_SINGLE)); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_DSC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_DSC_CONTROL, _ENABLE, _TRUE) | + ((pDscInfo->dp.dscMode == NV_DSC_EVO_MODE_DUAL) ? + DRF_DEF(C57D, _HEAD_SET_DSC_CONTROL, _MODE, _DUAL) : + DRF_DEF(C57D, _HEAD_SET_DSC_CONTROL, _MODE, _SINGLE)) | + DRF_NUM(C57D, _HEAD_SET_DSC_CONTROL, _FLATNESS_DET_THRESH, flatnessDetThresh) | + DRF_DEF(C57D, _HEAD_SET_DSC_CONTROL, _FULL_ICH_ERR_PRECISION, _ENABLE) | + DRF_DEF(C57D, _HEAD_SET_DSC_CONTROL, _AUTO_RESET, _DISABLE) | + DRF_DEF(C57D, _HEAD_SET_DSC_CONTROL, _FORCE_ICH_RESET, _TRUE)); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_DSC_PPS_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_DSC_PPS_CONTROL, _ENABLE, _TRUE) | + DRF_DEF(C57D, _HEAD_SET_DSC_PPS_CONTROL, _LOCATION, _VSYNC) | + DRF_DEF(C57D, _HEAD_SET_DSC_PPS_CONTROL, _FREQUENCY, _EVERY_FRAME) | + DRF_NUM(C57D, _HEAD_SET_DSC_PPS_CONTROL, _SIZE, 0x1F /* 32 PPS Dwords - 1 = 31 */)); + + +#define NV_EVO5_NUM_HEAD_SET_DSC_PPS_DATA_DWORDS \ + (((NVC57D_HEAD_SET_DSC_PPS_DATA31(0) - NVC57D_HEAD_SET_DSC_PPS_DATA0(0)) / 4) + 1) + + ct_assert(NV_EVO5_NUM_HEAD_SET_DSC_PPS_DATA_DWORDS <= ARRAY_LEN(pDscInfo->dp.pps)); + + for (i = 0; i < NV_EVO5_NUM_HEAD_SET_DSC_PPS_DATA_DWORDS; i++) { + nvDmaSetStartEvoMethod(pChannel,(NVC57D_HEAD_SET_DSC_PPS_DATA0(head) + (i * 4)), 1); + nvDmaSetEvoMethodData(pChannel, pDscInfo->dp.pps[i]); + } + + /* + * In case of DP, PPS is sent using the SDP over the Main-Link + * during the vertical blanking interval. The PPS SDP header is defined + * in DP 1.4 specification under section 2.2.5.9.1. + */ + + nvDmaSetStartEvoMethod(pChannel, + NVC57D_HEAD_SET_DSC_PPS_HEAD(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57D, _HEAD_SET_DSC_PPS_HEAD, _BYTE0, 0x00) | /* SDP ID = 0x0 */ + DRF_NUM(C57D, _HEAD_SET_DSC_PPS_HEAD, _BYTE1, 0x10) | /* SDP Type = 0x10 */ + DRF_NUM(C57D, _HEAD_SET_DSC_PPS_HEAD, _BYTE2, 0x7f) | /* Number of payload data bytes - 1 = 0x7F */ + DRF_NUM(C57D, _HEAD_SET_DSC_PPS_HEAD, _BYTE3, 0x00)); /* Reserved */ +} + +static void EvoSetDscParamsC5(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVDscInfoEvoRec *pDscInfo, + const enum nvKmsPixelDepth pixelDepth) +{ + if (pDscInfo->type == NV_DSC_INFO_EVO_TYPE_HDMI) { + EvoSetHdmiDscParams(pDispEvo, head, pDscInfo, pixelDepth); + } else if (pDscInfo->type == NV_DSC_INFO_EVO_TYPE_DP) { + EvoSetDpDscParams(pDispEvo, head, pDscInfo); + } else { + NVEvoChannelPtr pChannel = pDispEvo->pDevEvo->core; + + nvAssert(pDscInfo->type == NV_DSC_INFO_EVO_TYPE_DISABLED); + + /* Disable DSC function */ + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_DSC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_DSC_CONTROL, _ENABLE, _FALSE)); + + /* Disable PPS SDP (Secondary-Data Packet), DP won't send out PPS SDP */ + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_DSC_PPS_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_DSC_PPS_CONTROL, _ENABLE, _FALSE)); + } +} + +static void +EvoEnableMidFrameAndDWCFWatermarkC5(NVDevEvoPtr pDevEvo, + NvU32 sd, + NvU32 head, + NvBool enable, + NVEvoUpdateState *pUpdateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + if (enable) { + pDevEvo->gpus[sd].setSwSpareA[head] = + FLD_SET_DRF(C37D, + _HEAD_SET_SW_SPARE_A, + _DISABLE_MID_FRAME_AND_DWCF_WATERMARK, + _FALSE, + pDevEvo->gpus[sd].setSwSpareA[head]); + } else { + pDevEvo->gpus[sd].setSwSpareA[head] = + FLD_SET_DRF(C37D, + _HEAD_SET_SW_SPARE_A, + _DISABLE_MID_FRAME_AND_DWCF_WATERMARK, + _TRUE, + pDevEvo->gpus[sd].setSwSpareA[head]); + } + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + + nvUpdateUpdateState(pDevEvo, pUpdateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_SW_SPARE_A(head), 1); + nvDmaSetEvoMethodData(pChannel, pDevEvo->gpus[sd].setSwSpareA[head]); + + nvPopEvoSubDevMask(pDevEvo); +} + +NvU32 nvEvoGetActiveViewportOffsetC3(NVDispEvoRec *pDispEvo, NvU32 head) +{ + NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS params = { }; + NvU32 ret; + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + params.base.subdeviceIndex = pDispEvo->displayOwner; + params.windowIndex = head << 1; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->rmCtrlHandle, + NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to query active viewport offset"); + } + + return params.activeViewportPointIn.y; +} + +static NvBool EvoComputeWindowScalingTapsC3(const NVDevEvoRec *pDevEvo, + const NVEvoChannel *pChannel, + NVFlipChannelEvoHwState *pHwState) +{ + NvU32 win = NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(pChannel->channelMask); + const NVEvoScalerCaps *pScalerCaps = + &pDevEvo->gpus[0].capabilities.window[win].scalerCaps; + + if (!nvAssignScalerTaps(pDevEvo, + pScalerCaps, + pHwState->sizeIn.width, pHwState->sizeIn.height, + pHwState->sizeOut.width, pHwState->sizeOut.height, + FALSE /* doubleScan */, + &pHwState->hTaps, &pHwState->vTaps)) { + return FALSE; + } + + return TRUE; +} + +NvBool nvEvoComputeWindowScalingTapsC5(const NVDevEvoRec *pDevEvo, + const NVEvoChannel *pChannel, + NVFlipChannelEvoHwState *pHwState) +{ + if (!EvoComputeWindowScalingTapsC3(pDevEvo, pChannel, pHwState)) { + return FALSE; + } + + /* + * If scaling is enabled, CSC11 will be used by NVKMS to convert from + * linear FP16 LMS to linear FP16 RGB. As such, the user-supplied precomp + * CSC can't be programmed into CSC11 in this case. + */ + if ((pHwState->sizeIn.width != pHwState->sizeOut.width) || + (pHwState->sizeIn.height != pHwState->sizeOut.height)) { + if (!nvIsCscMatrixIdentity(&pHwState->cscMatrix)) { + return FALSE; + } + } + + return TRUE; +} + +static void EvoSetMergeModeC5(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVEvoMergeMode mode, + NVEvoUpdateState* pUpdateState) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 data = 0x0; + + nvPushEvoSubDevMask(pDevEvo, NVBIT(pDispEvo->displayOwner)); + + nvUpdateUpdateState(pDevEvo, pUpdateState, pChannel); + + switch (mode) { + case NV_EVO_MERGE_MODE_DISABLED: + data = DRF_DEF(C57D, _HEAD_SET_RG_MERGE, _MODE, _DISABLE); + break; + case NV_EVO_MERGE_MODE_SETUP: + data = DRF_DEF(C57D, _HEAD_SET_RG_MERGE, _MODE, _SETUP); + break; + case NV_EVO_MERGE_MODE_PRIMARY: + data = DRF_DEF(C57D, _HEAD_SET_RG_MERGE, _MODE, _MASTER); + break; + case NV_EVO_MERGE_MODE_SECONDARY: + data = DRF_DEF(C57D, _HEAD_SET_RG_MERGE, _MODE, _SLAVE); + break; + } + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_RG_MERGE(head), 1); + nvDmaSetEvoMethodData(pChannel, data); + + nvPopEvoSubDevMask(pDevEvo); +} + +/* + * The 'type' the timing library writes into the NVT_INFOFRAME_HEADER + * structure is not the type that the HDMI library expects to see in its + * NvHdmiPkt_SetupAdvancedInfoframe call; those are NVHDMIPKT_TYPE_*. + * Map the timing library infoframe type to the + * NVHDMIPKT_TYPE_SHARED_GENERIC*. + */ +static NvBool NvtToHdmiLibGenericInfoFramePktType(const NvU32 srcType, + NVHDMIPKT_TYPE *pDstType) +{ + NVHDMIPKT_TYPE hdmiLibType; + + switch (srcType) { + default: + return FALSE; + case NVT_INFOFRAME_TYPE_EXTENDED_METADATA_PACKET: + hdmiLibType = NVHDMIPKT_TYPE_SHARED_GENERIC1; + break; + case NVT_INFOFRAME_TYPE_VENDOR_SPECIFIC: + hdmiLibType = NVHDMIPKT_TYPE_SHARED_GENERIC2; + break; + case NVT_INFOFRAME_TYPE_DYNAMIC_RANGE_MASTERING: + hdmiLibType = NVHDMIPKT_TYPE_SHARED_GENERIC3; + break; + } + + *pDstType = hdmiLibType; + + return TRUE; +} + +static NvBool ConstructAdvancedInfoFramePacket( + const NVT_INFOFRAME_HEADER *pInfoFrameHeader, + const NvU32 infoframeSize, + const NvBool needChecksum, + const NvBool swChecksum, + NvU8 *pPacket, + const NvU32 packetLen) +{ + NvU8 hdmiPacketType; + const NvU8 *pPayload; + NvU32 payloadLen; + + if (!nvEvo1NvtToHdmiInfoFramePacketType(pInfoFrameHeader->type, + &hdmiPacketType)) { + return FALSE; + } + + /* + * XXX If required, add support for the large infoframe with + * multiple infoframes grouped together. + */ + nvAssert((infoframeSize + 1 /* + HB3 */ + (needChecksum ? 1 : 0)) <= + packetLen); + + pPacket[0] = hdmiPacketType; /* HB0 */ + + /* + * The fields and size of NVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER + * match with those of NVT_INFOFRAME_HEADER at the time of writing, but + * nvtiming.h declares them separately. To be safe, special case + * NVT_INFOFRAME_TYPE_EXTENDED_METADATA_PACKET. + */ + if (pInfoFrameHeader->type == NVT_INFOFRAME_TYPE_EXTENDED_METADATA_PACKET) { + const NVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER *pExtMetadataHeader = + (const NVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER *) + pInfoFrameHeader; + + pPacket[1] = pExtMetadataHeader->firstLast; /* HB1 */ + pPacket[2] = pExtMetadataHeader->sequenceIndex; /* HB2 */ + + pPayload = (const NvU8 *)(pExtMetadataHeader + 1); + payloadLen = infoframeSize - + sizeof(NVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER); + } else { + pPacket[1] = pInfoFrameHeader->version; /* HB1 */ + pPacket[2] = pInfoFrameHeader->length; /* HB2 */ + + pPayload = (const NvU8 *)(pInfoFrameHeader + 1); + payloadLen = infoframeSize - sizeof(NVT_INFOFRAME_HEADER); + } + pPacket[3] = 0; /* HB3, reserved */ + + if (needChecksum) { + pPacket[4] = 0; /* PB0: checksum */ + + nvkms_memcpy(&pPacket[5], pPayload, payloadLen); /* PB1~ */ + + if (swChecksum) { + NvU8 checksum = 0; + + for (NvU32 i = 0; i < packetLen; i++) { + checksum += pPacket[i]; + } + pPacket[4] = ~checksum + 1; + } + } else { + nvAssert(!swChecksum); + nvkms_memcpy(&pPacket[4], pPayload, payloadLen); /* PB0~ */ + } + + return TRUE; +} + +void nvEvoSendHdmiInfoFrameC8(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvEvoInfoFrameTransmitControl transmitCtrl, + const NVT_INFOFRAME_HEADER *pInfoFrameHeader, + const NvU32 infoFrameSize, + NvBool needChecksum) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVHDMIPKT_TYPE hdmiLibType; + NVHDMIPKT_RESULT ret; + ADVANCED_INFOFRAME advancedInfoFrame = { }; + NvBool swChecksum; + + /* + * These structures are weird. The NVT_VIDEO_INFOFRAME, + * NVT_VENDOR_SPECIFIC_INFOFRAME, + * NVT_EXTENDED_METADATA_PACKET_INFOFRAME, etc structures are *kind + * of* what we want to send to the hdmipkt library, except the type + * in the header is different, and a single checksum byte may need + * to be inserted *between* the header and the payload (requiring us + * to allocate a buffer one byte larger). + */ + NvU8 packet[36] = { }; + + if (!NvtToHdmiLibGenericInfoFramePktType(pInfoFrameHeader->type, + &hdmiLibType)) { + nvEvo1SendHdmiInfoFrame(pDispEvo, head, transmitCtrl, pInfoFrameHeader, + infoFrameSize, needChecksum); + return; + } + + switch (transmitCtrl) { + case NV_EVO_INFOFRAME_TRANSMIT_CONTROL_EVERY_FRAME: + advancedInfoFrame.runMode = INFOFRAME_CTRL_RUN_MODE_ALWAYS; + break; + case NV_EVO_INFOFRAME_TRANSMIT_CONTROL_SINGLE_FRAME: + advancedInfoFrame.runMode = INFOFRAME_CTRL_RUN_MODE_ONCE; + break; + } + advancedInfoFrame.location = INFOFRAME_CTRL_LOC_VBLANK; + advancedInfoFrame.hwChecksum = needChecksum; + + // Large infoframes are incompatible with hwChecksum + nvAssert(!(advancedInfoFrame.isLargeInfoframe && + advancedInfoFrame.hwChecksum)); + + // XXX WAR bug 5124145 by always computing checksum in software if needed. + swChecksum = needChecksum; + + // If we need a checksum: hwChecksum, swChecksum, or both must be enabled. + nvAssert(!needChecksum || + (advancedInfoFrame.hwChecksum || swChecksum)); + + if (!ConstructAdvancedInfoFramePacket(pInfoFrameHeader, + infoFrameSize, + needChecksum, + swChecksum, + packet, + sizeof(packet))) { + return; + } + + advancedInfoFrame.packetLen = sizeof(packet); + advancedInfoFrame.pPacket = packet; + + ret = NvHdmiPkt_SetupAdvancedInfoframe(pDevEvo->hdmiLib.handle, + pDispEvo->displayOwner, + head, + hdmiLibType, + &advancedInfoFrame); + if (ret != NVHDMIPKT_SUCCESS) { + nvAssert(ret == NVHDMIPKT_SUCCESS); + } +} + +void nvEvoDisableHdmiInfoFrameC8(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvU8 nvtInfoFrameType) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVHDMIPKT_TYPE hdmiLibType; + NVHDMIPKT_RESULT ret; + + if (!NvtToHdmiLibGenericInfoFramePktType(nvtInfoFrameType, + &hdmiLibType)) { + return; + } + + ret = NvHdmiPkt_PacketCtrl(pDevEvo->hdmiLib.handle, + pDispEvo->displayOwner, + pHeadState->activeRmId, + head, + hdmiLibType, + NVHDMIPKT_TRANSMIT_CONTROL_DISABLE); + if (ret != NVHDMIPKT_SUCCESS) { + nvAssert(!"Failed to disable vendor specific infoframe"); + } +} + +void nvEvoSendDpInfoFrameSdpC8(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvEvoInfoFrameTransmitControl transmitCtrl, + const DPSDP_DESCRIPTOR *sdp) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVHDMIPKT_RESULT ret; + ADVANCED_INFOFRAME advanceInfoFrame = { }; + NvU8 packet[36] = { }; + + /* + * XXX Using NVHDMIPKT_TYPE_SHARED_GENERIC1 for DP HDR SDP, add + * support for other infoframe as needed. + */ + if (sdp->hb.hb1 != dp_pktType_DynamicRangeMasteringInfoFrame) { + nvAssert(!"Unsupported infoframe"); + return; + } + + nvAssert((sizeof(sdp->hb) + sdp->dataSize) <= sizeof(packet)); + + nvkms_memcpy(packet, &sdp->hb, + NV_MIN((sizeof(sdp->hb) + sdp->dataSize), sizeof(packet))); + + switch (transmitCtrl) { + case NV_EVO_INFOFRAME_TRANSMIT_CONTROL_EVERY_FRAME: + advanceInfoFrame.runMode = INFOFRAME_CTRL_RUN_MODE_ALWAYS; + break; + case NV_EVO_INFOFRAME_TRANSMIT_CONTROL_SINGLE_FRAME: + advanceInfoFrame.runMode = INFOFRAME_CTRL_RUN_MODE_ONCE; + break; + } + advanceInfoFrame.location = INFOFRAME_CTRL_LOC_VBLANK; + advanceInfoFrame.packetLen = sizeof(packet); + advanceInfoFrame.pPacket = packet; + + ret = NvHdmiPkt_SetupAdvancedInfoframe(pDevEvo->hdmiLib.handle, + pDispEvo->displayOwner, + head, + NVHDMIPKT_TYPE_SHARED_GENERIC1, + &advanceInfoFrame); + if (ret != NVHDMIPKT_SUCCESS) { + nvAssert(ret == NVHDMIPKT_SUCCESS); + } +} + +static NvU32 EvoAllocSurfaceDescriptorC3( + NVDevEvoPtr pDevEvo, NVSurfaceDescriptor *pSurfaceDesc, + NvU32 memoryHandle, NvU32 localCtxDmaFlags, + NvU64 limit, + NvBool mapToDisplayRm) +{ + return nvCtxDmaAlloc(pDevEvo, &pSurfaceDesc->ctxDmaHandle, + memoryHandle, + localCtxDmaFlags, limit); +} + +static void EvoFreeSurfaceDescriptorC3( + NVDevEvoPtr pDevEvo, + NvU32 deviceHandle, + NVSurfaceDescriptor *pSurfaceDesc) +{ + nvCtxDmaFree(pDevEvo, deviceHandle, &pSurfaceDesc->ctxDmaHandle); +} + +static NvU32 EvoBindSurfaceDescriptorC3( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NVSurfaceDescriptor *pSurfaceDesc) +{ + return nvCtxDmaBind(pDevEvo, pChannel, pSurfaceDesc->ctxDmaHandle); +} + +NVEvoHAL nvEvoC3 = { + EvoSetRasterParamsC3, /* SetRasterParams */ + EvoSetProcAmpC3, /* SetProcAmp */ + EvoSetHeadControlC3, /* SetHeadControl */ + EvoSetHeadRefClkC3, /* SetHeadRefClk */ + EvoHeadSetControlORC3, /* HeadSetControlOR */ + nvEvoORSetControlC3, /* ORSetControl */ + EvoHeadSetDisplayIdC3, /* HeadSetDisplayId */ + EvoSetUsageBoundsC3, /* SetUsageBounds */ + nvEvoUpdateC3, /* Update */ + nvEvoIsModePossibleC3, /* IsModePossible */ + nvEvoPrePostIMPC3, /* PrePostIMP */ + nvEvoSetNotifierC3, /* SetNotifier */ + EvoGetCapabilitiesC3, /* GetCapabilities */ + EvoFlipC3, /* Flip */ + EvoFlipTransitionWARC3, /* FlipTransitionWAR */ + EvoFillLUTSurfaceC3, /* FillLUTSurface */ + EvoSetOutputLutC3, /* SetOutputLut */ + EvoSetOutputScalerC3, /* SetOutputScaler */ + EvoSetViewportPointInC3, /* SetViewportPointIn */ + EvoSetViewportInOutC3, /* SetViewportInOut */ + EvoSetCursorImageC3, /* SetCursorImage */ + nvEvoValidateCursorSurfaceC3, /* ValidateCursorSurface */ + EvoValidateWindowFormatC3, /* ValidateWindowFormat */ + nvEvoInitCompNotifierC3, /* InitCompNotifier */ + nvEvoIsCompNotifierCompleteC3, /* IsCompNotifierComplete */ + nvEvoWaitForCompNotifierC3, /* WaitForCompNotifier */ + EvoSetDitherC3, /* SetDither */ + EvoSetStallLockC3, /* SetStallLock */ + EvoSetDisplayRateC3, /* SetDisplayRate */ + EvoInitChannelC3, /* InitChannel */ + NULL, /* InitDefaultLut */ + EvoInitWindowMappingC3, /* InitWindowMapping */ + nvEvoIsChannelIdleC3, /* IsChannelIdle */ + nvEvoIsChannelMethodPendingC3, /* IsChannelMethodPending */ + nvEvoForceIdleSatelliteChannelC3, /* ForceIdleSatelliteChannel */ + nvEvoForceIdleSatelliteChannelIgnoreLockC3, /* ForceIdleSatelliteChannelIgnoreLock */ + nvEvoAccelerateChannelC3, /* AccelerateChannel */ + nvEvoResetChannelAcceleratorsC3, /* ResetChannelAccelerators */ + nvEvoAllocRmCtrlObjectC3, /* AllocRmCtrlObject */ + nvEvoFreeRmCtrlObjectC3, /* FreeRmCtrlObject */ + nvEvoSetImmPointOutC3, /* SetImmPointOut */ + EvoStartHeadCRC32CaptureC3, /* StartCRC32Capture */ + EvoStopHeadCRC32CaptureC3, /* StopCRC32Capture */ + nvEvoQueryHeadCRC32_C3, /* QueryCRC32 */ + nvEvoGetScanLineC3, /* GetScanLine */ + NULL, /* ConfigureVblankSyncObject */ + nvEvo1SetDscParams, /* SetDscParams */ + NULL, /* EnableMidFrameAndDWCFWatermark */ + nvEvoGetActiveViewportOffsetC3, /* GetActiveViewportOffset */ + NULL, /* ClearSurfaceUsage */ + EvoComputeWindowScalingTapsC3, /* ComputeWindowScalingTaps */ + nvEvoGetWindowScalingCapsC3, /* GetWindowScalingCaps */ + NULL, /* SetMergeMode */ + nvEvo1SendHdmiInfoFrame, /* SendHdmiInfoFrame */ + nvEvo1DisableHdmiInfoFrame, /* DisableHdmiInfoFrame */ + nvEvo1SendDpInfoFrameSdp, /* SendDpInfoFrameSdp */ + EvoAllocSurfaceDescriptorC3, /* AllocSurfaceDescriptor */ + EvoFreeSurfaceDescriptorC3, /* FreeSurfaceDescriptor */ + EvoBindSurfaceDescriptorC3, /* BindSurfaceDescriptor */ + NULL, /* SetTmoLutSurfaceAddress */ + NULL, /* SetILUTSurfaceAddress */ + EvoSetISOSurfaceAddressC3, /* SetISOSurfaceAddress */ + EvoSetCoreNotifierSurfaceAddressAndControlC3, /* SetCoreNotifierSurfaceAddressAndControl */ + EvoSetWinNotifierSurfaceAddressAndControlC3, /* SetWinNotifierSurfaceAddressAndControl */ + NULL, /* SetSemaphoreSurfaceAddressAndControl */ + NULL, /* SetAcqSemaphoreSurfaceAddressAndControl */ + { /* caps */ + TRUE, /* supportsNonInterlockedUsageBoundsUpdate */ + TRUE, /* supportsDisplayRate */ + FALSE, /* supportsFlipLockRGStatus */ + FALSE, /* needDefaultLutSurface */ + FALSE, /* hasUnorm10OLUT */ + FALSE, /* supportsImageSharpening */ + FALSE, /* supportsHDMIVRR */ + FALSE, /* supportsCoreChannelSurface */ + FALSE, /* supportsHDMIFRL */ + TRUE, /* supportsSetStorageMemoryLayout */ + FALSE, /* supportsIndependentAcqRelSemaphore */ + FALSE, /* supportsCoreLut */ + TRUE, /* supportsSynchronizedOverlayPositionUpdate */ + FALSE, /* supportsVblankSyncObjects */ + FALSE, /* requiresScalingTapsInBothDimensions */ + FALSE, /* supportsMergeMode */ + FALSE, /* supportsHDMI10BPC */ + FALSE, /* supportsDPAudio192KHz */ + FALSE, /* supportsInputColorSpace */ + FALSE, /* supportsInputColorRange */ + FALSE, /* supportsYCbCr422OverHDMIFRL */ + NV_EVO3_SUPPORTED_DITHERING_MODES, /* supportedDitheringModes */ + sizeof(NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS), /* impStructSize */ + NV_EVO_SCALER_2TAPS, /* minScalerTaps */ + NV_EVO3_X_EMULATED_SURFACE_MEMORY_FORMATS_C3, /* xEmulatedSurfaceMemoryFormats */ + }, +}; + +NVEvoHAL nvEvoC5 = { + EvoSetRasterParamsC5, /* SetRasterParams */ + EvoSetProcAmpC5, /* SetProcAmp */ + EvoSetHeadControlC3, /* SetHeadControl */ + EvoSetHeadRefClkC3, /* SetHeadRefClk */ + EvoHeadSetControlORC5, /* HeadSetControlOR */ + nvEvoORSetControlC3, /* ORSetControl */ + EvoHeadSetDisplayIdC3, /* HeadSetDisplayId */ + nvEvoSetUsageBoundsC5, /* SetUsageBounds */ + nvEvoUpdateC3, /* Update */ + nvEvoIsModePossibleC3, /* IsModePossible */ + nvEvoPrePostIMPC3, /* PrePostIMP */ + nvEvoSetNotifierC3, /* SetNotifier */ + EvoGetCapabilitiesC5, /* GetCapabilities */ + EvoFlipC5, /* Flip */ + EvoFlipTransitionWARC5, /* FlipTransitionWAR */ + nvEvoFillLUTSurfaceC5, /* FillLUTSurface */ + EvoSetOutputLutC5, /* SetOutputLut */ + EvoSetOutputScalerC3, /* SetOutputScaler */ + EvoSetViewportPointInC3, /* SetViewportPointIn */ + EvoSetViewportInOutC5, /* SetViewportInOut */ + EvoSetCursorImageC3, /* SetCursorImage */ + nvEvoValidateCursorSurfaceC3, /* ValidateCursorSurface */ + EvoValidateWindowFormatC5, /* ValidateWindowFormat */ + nvEvoInitCompNotifierC3, /* InitCompNotifier */ + nvEvoIsCompNotifierCompleteC3, /* IsCompNotifierComplete */ + nvEvoWaitForCompNotifierC3, /* WaitForCompNotifier */ + EvoSetDitherC3, /* SetDither */ + EvoSetStallLockC3, /* SetStallLock */ + EvoSetDisplayRateC3, /* SetDisplayRate */ + EvoInitChannelC5, /* InitChannel */ + nvEvoInitDefaultLutC5, /* InitDefaultLut */ + nvEvoInitWindowMappingC5, /* InitWindowMapping */ + nvEvoIsChannelIdleC3, /* IsChannelIdle */ + nvEvoIsChannelMethodPendingC3, /* IsChannelMethodPending */ + nvEvoForceIdleSatelliteChannelC3, /* ForceIdleSatelliteChannel */ + nvEvoForceIdleSatelliteChannelIgnoreLockC3, /* ForceIdleSatelliteChannelIgnoreLock */ + nvEvoAccelerateChannelC3, /* AccelerateChannel */ + nvEvoResetChannelAcceleratorsC3, /* ResetChannelAccelerators */ + nvEvoAllocRmCtrlObjectC3, /* AllocRmCtrlObject */ + nvEvoFreeRmCtrlObjectC3, /* FreeRmCtrlObject */ + nvEvoSetImmPointOutC3, /* SetImmPointOut */ + EvoStartHeadCRC32CaptureC3, /* StartCRC32Capture */ + EvoStopHeadCRC32CaptureC3, /* StopCRC32Capture */ + nvEvoQueryHeadCRC32_C3, /* QueryCRC32 */ + nvEvoGetScanLineC3, /* GetScanLine */ + NULL, /* ConfigureVblankSyncObject */ + EvoSetDscParamsC5, /* SetDscParams */ + EvoEnableMidFrameAndDWCFWatermarkC5, /* EnableMidFrameAndDWCFWatermark */ + nvEvoGetActiveViewportOffsetC3, /* GetActiveViewportOffset */ + NULL, /* ClearSurfaceUsage */ + nvEvoComputeWindowScalingTapsC5, /* ComputeWindowScalingTaps */ + nvEvoGetWindowScalingCapsC3, /* GetWindowScalingCaps */ + EvoSetMergeModeC5, /* SetMergeMode */ + nvEvo1SendHdmiInfoFrame, /* SendHdmiInfoFrame */ + nvEvo1DisableHdmiInfoFrame, /* DisableHdmiInfoFrame */ + nvEvo1SendDpInfoFrameSdp, /* SendDpInfoFrameSdp */ + EvoAllocSurfaceDescriptorC3, /* AllocSurfaceDescriptor */ + EvoFreeSurfaceDescriptorC3, /* FreeSurfaceDescriptor */ + EvoBindSurfaceDescriptorC3, /* BindSurfaceDescriptor */ + EvoSetTmoLutSurfaceAddressC5, /* SetTmoLutSurfaceAddress */ + EvoSetILUTSurfaceAddressC5, /* SetILUTSurfaceAddress */ + EvoSetISOSurfaceAddressC3, /* SetISOSurfaceAddress */ + EvoSetCoreNotifierSurfaceAddressAndControlC3, /* SetCoreNotifierSurfaceAddressAndControl */ + EvoSetWinNotifierSurfaceAddressAndControlC3, /* SetWinNotifierSurfaceAddressAndControl */ + NULL, /* SetSemaphoreSurfaceAddressAndControl */ + NULL, /* SetAcqSemaphoreSurfaceAddressAndControl */ + { /* caps */ + TRUE, /* supportsNonInterlockedUsageBoundsUpdate */ + TRUE, /* supportsDisplayRate */ + FALSE, /* supportsFlipLockRGStatus */ + TRUE, /* needDefaultLutSurface */ + TRUE, /* hasUnorm10OLUT */ + FALSE, /* supportsImageSharpening */ + TRUE, /* supportsHDMIVRR */ + FALSE, /* supportsCoreChannelSurface */ + FALSE, /* supportsHDMIFRL */ + TRUE, /* supportsSetStorageMemoryLayout */ + FALSE, /* supportsIndependentAcqRelSemaphore */ + FALSE, /* supportsCoreLut */ + TRUE, /* supportsSynchronizedOverlayPositionUpdate */ + FALSE, /* supportsVblankSyncObjects */ + FALSE, /* requiresScalingTapsInBothDimensions */ + TRUE, /* supportsMergeMode */ + FALSE, /* supportsHDMI10BPC */ + FALSE, /* supportsDPAudio192KHz */ + TRUE, /* supportsInputColorSpace */ + TRUE, /* supportsInputColorRange */ + FALSE, /* supportsYCbCr422OverHDMIFRL */ + NV_EVO3_SUPPORTED_DITHERING_MODES, /* supportedDitheringModes */ + sizeof(NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS), /* impStructSize */ + NV_EVO_SCALER_2TAPS, /* minScalerTaps */ + NV_EVO3_X_EMULATED_SURFACE_MEMORY_FORMATS_C5, /* xEmulatedSurfaceMemoryFormats */ + }, +}; + +NVEvoHAL nvEvoC6 = { + EvoSetRasterParamsC6, /* SetRasterParams */ + EvoSetProcAmpC5, /* SetProcAmp */ + EvoSetHeadControlC3, /* SetHeadControl */ + EvoSetHeadRefClkC3, /* SetHeadRefClk */ + EvoHeadSetControlORC5, /* HeadSetControlOR */ + EvoORSetControlC6, /* ORSetControl */ + EvoHeadSetDisplayIdC3, /* HeadSetDisplayId */ + nvEvoSetUsageBoundsC5, /* SetUsageBounds */ + nvEvoUpdateC3, /* Update */ + nvEvoIsModePossibleC3, /* IsModePossible */ + nvEvoPrePostIMPC3, /* PrePostIMP */ + nvEvoSetNotifierC3, /* SetNotifier */ + nvEvoGetCapabilitiesC6, /* GetCapabilities */ + nvEvoFlipC6, /* Flip */ + nvEvoFlipTransitionWARC6, /* FlipTransitionWAR */ + nvEvoFillLUTSurfaceC5, /* FillLUTSurface */ + EvoSetOutputLutC5, /* SetOutputLut */ + EvoSetOutputScalerC3, /* SetOutputScaler */ + EvoSetViewportPointInC3, /* SetViewportPointIn */ + EvoSetViewportInOutC5, /* SetViewportInOut */ + EvoSetCursorImageC3, /* SetCursorImage */ + nvEvoValidateCursorSurfaceC3, /* ValidateCursorSurface */ + nvEvoValidateWindowFormatC6, /* ValidateWindowFormat */ + nvEvoInitCompNotifierC3, /* InitCompNotifier */ + nvEvoIsCompNotifierCompleteC3, /* IsCompNotifierComplete */ + nvEvoWaitForCompNotifierC3, /* WaitForCompNotifier */ + EvoSetDitherC3, /* SetDither */ + EvoSetStallLockC3, /* SetStallLock */ + EvoSetDisplayRateC3, /* SetDisplayRate */ + EvoInitChannelC5, /* InitChannel */ + nvEvoInitDefaultLutC5, /* InitDefaultLut */ + nvEvoInitWindowMappingC5, /* InitWindowMapping */ + nvEvoIsChannelIdleC3, /* IsChannelIdle */ + nvEvoIsChannelMethodPendingC3, /* IsChannelMethodPending */ + nvEvoForceIdleSatelliteChannelC3, /* ForceIdleSatelliteChannel */ + nvEvoForceIdleSatelliteChannelIgnoreLockC3, /* ForceIdleSatelliteChannelIgnoreLock */ + nvEvoAccelerateChannelC3, /* AccelerateChannel */ + nvEvoResetChannelAcceleratorsC3, /* ResetChannelAccelerators */ + nvEvoAllocRmCtrlObjectC3, /* AllocRmCtrlObject */ + nvEvoFreeRmCtrlObjectC3, /* FreeRmCtrlObject */ + nvEvoSetImmPointOutC3, /* SetImmPointOut */ + EvoStartHeadCRC32CaptureC3, /* StartCRC32Capture */ + EvoStopHeadCRC32CaptureC3, /* StopCRC32Capture */ + nvEvoQueryHeadCRC32_C3, /* QueryCRC32 */ + nvEvoGetScanLineC3, /* GetScanLine */ + EvoConfigureVblankSyncObjectC6, /* ConfigureVblankSyncObject */ + EvoSetDscParamsC5, /* SetDscParams */ + NULL, /* EnableMidFrameAndDWCFWatermark */ + nvEvoGetActiveViewportOffsetC3, /* GetActiveViewportOffset */ + NULL, /* ClearSurfaceUsage */ + nvEvoComputeWindowScalingTapsC5, /* ComputeWindowScalingTaps */ + nvEvoGetWindowScalingCapsC3, /* GetWindowScalingCaps */ + EvoSetMergeModeC5, /* SetMergeMode */ + nvEvo1SendHdmiInfoFrame, /* SendHdmiInfoFrame */ + nvEvo1DisableHdmiInfoFrame, /* DisableHdmiInfoFrame */ + nvEvo1SendDpInfoFrameSdp, /* SendDpInfoFrameSdp */ + EvoAllocSurfaceDescriptorC3, /* AllocSurfaceDescriptor */ + EvoFreeSurfaceDescriptorC3, /* FreeSurfaceDescriptor */ + EvoBindSurfaceDescriptorC3, /* BindSurfaceDescriptor */ + EvoSetTmoLutSurfaceAddressC5, /* SetTmoLutSurfaceAddress */ + EvoSetILUTSurfaceAddressC5, /* SetILUTSurfaceAddress */ + EvoSetISOSurfaceAddressC3, /* SetISOSurfaceAddress */ + EvoSetCoreNotifierSurfaceAddressAndControlC3, /* SetCoreNotifierSurfaceAddressAndControl */ + EvoSetWinNotifierSurfaceAddressAndControlC3, /* SetWinNotifierSurfaceAddressAndControl */ + EvoSetSemaphoreSurfaceAddressAndControlC6, /* SetSemaphoreSurfaceAddressAndControl */ + EvoSetAcqSemaphoreSurfaceAddressAndControlC6, /* SetAcqSemaphoreSurfaceAddressAndControl */ + { /* caps */ + TRUE, /* supportsNonInterlockedUsageBoundsUpdate */ + TRUE, /* supportsDisplayRate */ + FALSE, /* supportsFlipLockRGStatus */ + TRUE, /* needDefaultLutSurface */ + TRUE, /* hasUnorm10OLUT */ + FALSE, /* supportsImageSharpening */ + TRUE, /* supportsHDMIVRR */ + FALSE, /* supportsCoreChannelSurface */ + FALSE, /* supportsHDMIFRL */ + FALSE, /* supportsSetStorageMemoryLayout */ + TRUE, /* supportsIndependentAcqRelSemaphore */ + FALSE, /* supportsCoreLut */ + TRUE, /* supportsSynchronizedOverlayPositionUpdate */ + TRUE, /* supportsVblankSyncObjects */ + FALSE, /* requiresScalingTapsInBothDimensions */ + TRUE, /* supportsMergeMode */ + FALSE, /* supportsHDMI10BPC */ + FALSE, /* supportsDPAudio192KHz */ + TRUE, /* supportsInputColorSpace */ + TRUE, /* supportsInputColorRange */ + FALSE, /* supportsYCbCr422OverHDMIFRL */ + NV_EVO3_SUPPORTED_DITHERING_MODES, /* supportedDitheringModes */ + sizeof(NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS), /* impStructSize */ + NV_EVO_SCALER_2TAPS, /* minScalerTaps */ + NV_EVO3_X_EMULATED_SURFACE_MEMORY_FORMATS_C6, /* xEmulatedSurfaceMemoryFormats */ + }, +}; + diff --git a/src/nvidia-modeset/src/nvkms-evo4.c b/src/nvidia-modeset/src/nvkms-evo4.c new file mode 100644 index 0000000..7b6d497 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-evo4.c @@ -0,0 +1,1913 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file contains implementations of the EVO HAL methods for display class + * 9.x (also known as "nvdisplay"). + */ + +#include "nvkms-dma.h" +#include "nvkms-types.h" +#include "nvkms-rmapi.h" +#include "nvkms-surface.h" +#include "nvkms-softfloat.h" +#include "nvkms-evo.h" +#include "nvkms-evo1.h" +#include "nvkms-evo3.h" +#include "nvkms-prealloc.h" +#include "nv-float.h" +#include "nvkms-dpy.h" +#include "nvkms-vrr.h" +#include "nvkms-sync.h" +#include // NVC37D_NOTIFIER_CRC +#include +#include // NV0041_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS +#include +#include // NVC973_DISP_CAPABILITIES +#include "class/clc97d.h" // NVC97D_CORE_CHANNEL_DMA +#include // NVC97D_HEAD_SET_SW_SPARE_* +#include "class/clc97e.h" // NVC97E_WINDOW_CHANNEL_DMA + +/* + * XXX temporary WAR: See Bug 4146656 + * Currently RM expects ctxdma handle in hObjectBuffer field + * of NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS, which is used to + * allocate PB channel. + */ +#define NV_EVO4_PB_ALLOC_WAR + +#if defined(NV_EVO4_PB_ALLOC_WAR) +#include "nvkms-ctxdma.h" +#endif + +static NvU8 GetSurfaceAddressTarget(const NVDevEvoRec *pDevEvo, + const NVSurfaceDescriptor *pSurfaceDesc) +{ + switch (pSurfaceDesc->memAperture) { + case NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR_APERTURE_VIDMEM: + nvAssert(!pDevEvo->isSOCDisplay); + return NVC97E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_PHYSICAL_NVM; + case NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR_APERTURE_SYSMEM: + if (pDevEvo->isSOCDisplay) { + return NVC97E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_IOVA; + } else { + return NVC97E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_PHYSICAL_PCI_COHERENT; + } + default: + nvAssert(!"Unknow memory aperture"); + break; + } + return 0x0; +} + +static void GetSurfaceAddress(const NVDevEvoRec *pDevEvo, + const NVSurfaceDescriptor *pSurfaceDesc, + const NvU32 offset, + NvU32 *pAddressHi, + NvU32 *pAddressLo, + NvBool *pEnable, + NvU8 *pTarget) +{ + if (pSurfaceDesc == NULL) { + *pAddressHi = *pAddressLo = 0; + *pEnable = FALSE; + *pTarget = 0x0; + return; + } + + NvU64 address = pSurfaceDesc->memOffset + offset; + *pTarget = GetSurfaceAddressTarget(pDevEvo, pSurfaceDesc); + *pAddressHi = (address >> 32) & 0xFFFFFFFF; + *pAddressLo = ((address) & 0xFFFFFFFF) >> 4; + *pEnable = TRUE; +} + +static void InitScalerCoefficientsPostcomp9(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 coeff, NvU32 index) +{ + NvU32 h; + + for (h = 0; h < pDevEvo->numHeads; h++) { + nvDmaSetStartEvoMethod(pChannel, + NVC97D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE(h), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_OUTPUT_SCALER_COEFF_VALUE, _DATA, coeff) | + DRF_NUM(C97D, _HEAD_SET_OUTPUT_SCALER_COEFF_VALUE, _INDEX, index)); + } +} + +static void InitTaps5ScalerCoefficientsC9(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvBool isPrecomp) +{ + NvU8 ratio; + + if (isPrecomp) { + const NVEvoWindowCaps *pWinCaps = + &pDevEvo->gpus[0].capabilities.window[pChannel->instance]; + const NVEvoScalerCaps *pScalerCaps = &pWinCaps->scalerCaps; + + if (!pScalerCaps->present) { + return; + } + } + + for (ratio = 0; ratio < NUM_SCALER_RATIOS; ratio++) { + NvU8 phase; + for (phase = 0; phase < NUM_TAPS5_COEFF_PHASES; phase++) { + NvU8 coeffIdx; + for (coeffIdx = 0; coeffIdx < NUM_TAPS5_COEFF_VALUES; coeffIdx++) { + NvU32 coeff = scalerTaps5Coeff[ratio][phase][coeffIdx]; + NvU32 index = ratio << 6 | phase << 2 | coeffIdx; + + if (isPrecomp) { + nvInitScalerCoefficientsPrecomp5(pChannel, coeff, index); + } else { + InitScalerCoefficientsPostcomp9(pDevEvo, + pChannel, coeff, index); + } + } + } + } +} + +static void InitDesktopColorC9(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_DESKTOP_COLOR_ALPHA_RED, _ALPHA, 255) | + DRF_NUM(C97D, _HEAD_SET_DESKTOP_COLOR_ALPHA_RED, _RED, 0)); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_DESKTOP_COLOR_GREEN_BLUE, _GREEN, 0) | + DRF_NUM(C97D, _HEAD_SET_DESKTOP_COLOR_GREEN_BLUE, _BLUE, 0)); + } +} + +static void EvoInitChannelC9(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + const NvBool isCore = + FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + pChannel->channelMask); + const NvBool isWindow = + ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_WINDOW_ALL) != 0); + + nvEvoInitChannel3(pDevEvo, pChannel); + + if (isCore) { + InitTaps5ScalerCoefficientsC9(pDevEvo, pChannel, FALSE); + InitDesktopColorC9(pDevEvo, pChannel); + } else if (isWindow) { + InitTaps5ScalerCoefficientsC9(pDevEvo, pChannel, TRUE); + } +} + +static void EvoSetRasterParams9(NVDevEvoPtr pDevEvo, int head, + const NVHwModeTimingsEvo *pTimings, + const NVEvoColorRec *pOverscanColor, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + /* XXXnvdisplay: Convert these for YCbCr, as necessary */ + NvU32 overscanColor = + DRF_NUM(C97D, _HEAD_SET_OVERSCAN_COLOR, _RED_CR, pOverscanColor->red) | + DRF_NUM(C97D, _HEAD_SET_OVERSCAN_COLOR, _GREEN_Y, pOverscanColor->green) | + DRF_NUM(C97D, _HEAD_SET_OVERSCAN_COLOR, _BLUE_CB, pOverscanColor->blue); + NvU32 hdmiStereoCtrl; + NvU16 minFrameIdleLeadingRasterLines, minFrameIdleTrailingRasterLines; + NvBool ret; + const NvU64 pixelClockHz = KHzToHz(pTimings->pixelClock); + const NvU32 pixelClockLo = (pixelClockHz & DRF_MASK(NVC97D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ)); + const NvU32 pixelClockHi = (pixelClockHz >> DRF_SIZE(NVC97D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ)) & + DRF_MASK(NVC97D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HI_HERTZ); + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + // XXX[AGP]: These methods are sequential and could use an incrementing + // method, but it's not clear if there's a bug in EVO that causes corruption + // sometimes. Play it safe and send methods with count=1. + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_OVERSCAN_COLOR(head), 1); + nvDmaSetEvoMethodData(pChannel, overscanColor); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_RASTER_SIZE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_RASTER_SIZE, _WIDTH, pTimings->rasterSize.x) | + DRF_NUM(C97D, _HEAD_SET_RASTER_SIZE, _HEIGHT, pTimings->rasterSize.y)); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_RASTER_SYNC_END(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_RASTER_SYNC_END, _X, pTimings->rasterSyncEnd.x) | + DRF_NUM(C97D, _HEAD_SET_RASTER_SYNC_END, _Y, pTimings->rasterSyncEnd.y)); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_RASTER_BLANK_END(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_RASTER_BLANK_END, _X, pTimings->rasterBlankEnd.x) | + DRF_NUM(C97D, _HEAD_SET_RASTER_BLANK_END, _Y, pTimings->rasterBlankEnd.y)); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_RASTER_BLANK_START(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_RASTER_BLANK_START, _X, pTimings->rasterBlankStart.x) | + DRF_NUM(C97D, _HEAD_SET_RASTER_BLANK_START, _Y, pTimings->rasterBlankStart.y)); + + ret = nvComputeMinFrameIdle(pTimings, + &minFrameIdleLeadingRasterLines, + &minFrameIdleTrailingRasterLines); + if (!ret) { + /* This should have been ensured by IMP in AssignPerHeadImpParams. */ + nvAssert(ret); + /* In case a mode validation override was used to skip IMP, program the + * default values. This may still cause a hardware exception. */ + minFrameIdleLeadingRasterLines = 2; + minFrameIdleTrailingRasterLines = 1; + } + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_MIN_FRAME_IDLE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_MIN_FRAME_IDLE, _LEADING_RASTER_LINES, + minFrameIdleLeadingRasterLines) | + DRF_NUM(C97D, _HEAD_SET_MIN_FRAME_IDLE, _TRAILING_RASTER_LINES, + minFrameIdleTrailingRasterLines)); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY, _HERTZ, pixelClockLo) | + DRF_DEF(C97D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY, _ADJ1000DIV1001,_FALSE)); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HI(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY_HI, _HERTZ, pixelClockHi)); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C97D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _NOT_DRIVER, _FALSE) | + DRF_DEF(C97D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _HOPPING, _DISABLE) | + DRF_DEF(C97D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _HOPPING_MODE, _VBLANK)); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, _HERTZ, pixelClockLo) | + DRF_DEF(C97D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, _ADJ1000DIV1001,_FALSE)); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HI_MAX(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY_HI_MAX, _HERTZ, pixelClockHi)); + + nvDmaSetStartEvoMethod(pChannel, + NVC97D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_FRAME_PACKED_VACTIVE_COLOR, _RED_CR, 0) | +#if defined(DEBUG) + DRF_NUM(C97D, _HEAD_SET_FRAME_PACKED_VACTIVE_COLOR, _GREEN_Y, 512) | +#else + DRF_NUM(C97D, _HEAD_SET_FRAME_PACKED_VACTIVE_COLOR, _GREEN_Y, 0) | +#endif + DRF_NUM(C97D, _HEAD_SET_FRAME_PACKED_VACTIVE_COLOR, _BLUE_CB, 0)); + + hdmiStereoCtrl = DRF_NUM(C97D, _HEAD_SET_HDMI_CTRL, _HDMI_VIC, 0); + if (pTimings->hdmi3D) { + hdmiStereoCtrl = + FLD_SET_DRF(C97D, _HEAD_SET_HDMI_CTRL, _VIDEO_FORMAT, _STEREO3D, hdmiStereoCtrl); + } else { + hdmiStereoCtrl = + FLD_SET_DRF(C97D, _HEAD_SET_HDMI_CTRL, _VIDEO_FORMAT, _NORMAL, hdmiStereoCtrl); + } + nvDmaSetStartEvoMethod(pChannel, + NVC97D_HEAD_SET_HDMI_CTRL(head), 1); + nvDmaSetEvoMethodData(pChannel, hdmiStereoCtrl); +} + +static void EvoSetRasterParamsC9(NVDevEvoPtr pDevEvo, int head, + const NVHwModeTimingsEvo *pTimings, + const NvU8 tilePosition, + const NVDscInfoEvoRec *pDscInfo, + const NVEvoColorRec *pOverscanColor, + NVEvoUpdateState *updateState) +{ + nvAssert(tilePosition == 0); + EvoSetRasterParams9(pDevEvo, head, pTimings, pOverscanColor, updateState); +} + +static void EvoSetOCsc1C9(NVDispEvoPtr pDispEvo, const NvU32 head) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const struct NvKmsCscMatrix *matrix = nvEvoGetOCsc1MatrixC5(pHeadState); + struct EvoClampRangeC5 clamp = nvEvoGetOCsc1ClampRange(pHeadState); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_CLAMP_RANGE_GREEN(head), 1); + nvDmaSetEvoMethodData(pChannel, clamp.green); + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_CLAMP_RANGE_RED_BLUE(head), 1); + nvDmaSetEvoMethodData(pChannel, clamp.red_blue); + + if (matrix) { + int x, y; + NvU32 method = NVC97D_HEAD_SET_OCSC1COEFFICIENT_C00(head); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_OCSC1CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C97D, _HEAD_SET_OCSC1CONTROL, _ENABLE, _ENABLE)); + + for (y = 0; y < 3; y++) { + for (x = 0; x < 4; x++) { + nvDmaSetStartEvoMethod(pChannel, method, 1); + nvDmaSetEvoMethodData(pChannel, matrix->m[y][x]); + + method += 4; + } + } + } else { + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_OCSC1CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C97D, _HEAD_SET_OCSC1CONTROL, _ENABLE, _DISABLE)); + } +} + +static void EvoSetOCsc0C9(const NVDispEvoRec *pDispEvo, const NvU32 head, + NvBool *pOutputRoundingFix) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + struct NvKms3x4MatrixF32 ocsc0Matrix; + + nvEvo3PickOCsc0(pDispEvo, head, &ocsc0Matrix, pOutputRoundingFix); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_OCSC0COEFFICIENT_C00(head), 12); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C97D, _HEAD_SET_OCSC0COEFFICIENT_C00, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[0][0]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C97D, _HEAD_SET_OCSC0COEFFICIENT_C01, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[0][1]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C97D, _HEAD_SET_OCSC0COEFFICIENT_C02, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[0][2]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C97D, _HEAD_SET_OCSC0COEFFICIENT_C03, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[0][3]))); + + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C97D, _HEAD_SET_OCSC0COEFFICIENT_C10, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[1][0]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C97D, _HEAD_SET_OCSC0COEFFICIENT_C11, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[1][1]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C97D, _HEAD_SET_OCSC0COEFFICIENT_C12, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[1][2]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C97D, _HEAD_SET_OCSC0COEFFICIENT_C13, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[1][3]))); + + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C97D, _HEAD_SET_OCSC0COEFFICIENT_C20, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[2][0]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C97D, _HEAD_SET_OCSC0COEFFICIENT_C21, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[2][1]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C97D, _HEAD_SET_OCSC0COEFFICIENT_C22, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[2][2]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C97D, _HEAD_SET_OCSC0COEFFICIENT_C23, _VALUE, nvCscCoefConvertS514(ocsc0Matrix.m[2][3]))); +} + +static void EvoSetProcAmpC9(NVDispEvoPtr pDispEvo, const NvU32 head, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NvU32 dynRange, chromaLpf, chromaDownV; + NvU32 colorimetry; + NvBool outputRoundingFix; + + NVT_COLORIMETRY nvtColorimetry = pHeadState->procAmp.colorimetry; + NVT_COLOR_RANGE nvtColorRange = pHeadState->procAmp.colorRange; + + /* These methods should only apply to a single pDpyEvo */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + switch (nvtColorimetry) { + default: + nvAssert(!"Unrecognized colorimetry"); + // fall through + case NVT_COLORIMETRY_BT2020RGB: + // fall through + case NVT_COLORIMETRY_RGB: + colorimetry = DRF_DEF(C97D, _HEAD_SET_PROCAMP, _COLOR_SPACE, _RGB); + break; + case NVT_COLORIMETRY_YUV_601: + colorimetry = DRF_DEF(C97D, _HEAD_SET_PROCAMP, _COLOR_SPACE, _YUV_601); + break; + case NVT_COLORIMETRY_YUV_709: + colorimetry = DRF_DEF(C97D, _HEAD_SET_PROCAMP, _COLOR_SPACE, _YUV_709); + break; + case NVT_COLORIMETRY_BT2020YCC: + colorimetry = DRF_DEF(C97D, _HEAD_SET_PROCAMP, _COLOR_SPACE, _YUV_2020); + break; + } + + if (nvtColorRange == NVT_COLOR_RANGE_FULL) { + dynRange = DRF_DEF(C97D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _VESA); + } else { + nvAssert(nvtColorRange == NVT_COLOR_RANGE_LIMITED); + dynRange = DRF_DEF(C97D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _CEA); + } + + if (pHeadState->procAmp.colorFormat == NVT_COLOR_FORMAT_YCbCr420) { + chromaLpf = DRF_DEF(C97D, _HEAD_SET_PROCAMP, _CHROMA_LPF, _ENABLE); + chromaDownV = DRF_DEF(C97D, _HEAD_SET_PROCAMP, _CHROMA_DOWN_V, _ENABLE); + } else { + chromaLpf = DRF_DEF(C97D, _HEAD_SET_PROCAMP, _CHROMA_LPF, _DISABLE); + chromaDownV = DRF_DEF(C97D, _HEAD_SET_PROCAMP, _CHROMA_DOWN_V, _DISABLE); + } + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_PROCAMP(head), 1); + nvDmaSetEvoMethodData(pChannel, + colorimetry | dynRange | chromaLpf | chromaDownV); + + EvoSetOCsc0C9(pDispEvo, head, &outputRoundingFix); + EvoSetOCsc1C9(pDispEvo, head); +} + +static void EvoSetHeadControlC9(NVDevEvoPtr pDevEvo, int sd, int head, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + /* + * NOTE: This function should only push state to the hardware based on data + * in the pHC. If not, then we may miss updates due to the memcmp of the + * HeadControl structure in UpdateEvoLockState(). + */ + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + NvU32 data = 0, pin; + NvU32 serverLockMode, clientLockMode; + + /* These methods should only apply to a single subdevice */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + switch (pHC->serverLock) { + case NV_EVO_NO_LOCK: + serverLockMode = NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK; + break; + /* + * NOTE: Rasterlock is being dropped in NVD5.0. MASTER/SLAVE_LOCK_MODE_RASTER_LOCK + * fields have been retained just for compatibility purposes, and will anyways + * result in framelock being configured. + */ + case NV_EVO_RASTER_LOCK: + case NV_EVO_FRAME_LOCK: + serverLockMode = NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK; + break; + default: + nvAssert(!"Invalid server lock mode"); + return; + } + + switch (pHC->clientLock) { + case NV_EVO_NO_LOCK: + clientLockMode = NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK; + break; + case NV_EVO_RASTER_LOCK: + case NV_EVO_FRAME_LOCK: + clientLockMode = NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK; + break; + default: + nvAssert(!"Invalid client lock mode"); + return; + } + + // Convert head control state to EVO method values. + nvAssert(!pHC->interlaced); + data |= DRF_DEF(C97D, _HEAD_SET_CONTROL, _STRUCTURE, _PROGRESSIVE); + + nvAssert(pHC->serverLockPin != NV_EVO_LOCK_PIN_ERROR); + nvAssert(pHC->clientLockPin != NV_EVO_LOCK_PIN_ERROR); + + if (serverLockMode == NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK) { + data |= DRF_DEF(C97D, _HEAD_SET_CONTROL, _MASTER_LOCK_PIN, _LOCK_PIN_NONE); + } else if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->serverLockPin)) { + pin = pHC->serverLockPin - NV_EVO_LOCK_PIN_INTERNAL_0; + /* + * nvdClass_01.mfs says: + * "master lock pin, if internal, must be set to the corresponding + * internal pin for that head" (error check #12) + */ + nvAssert(pin == head); + data |= DRF_NUM(C97D, _HEAD_SET_CONTROL, _MASTER_LOCK_PIN, + NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(pin)); + } else { + pin = pHC->serverLockPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(C97D, _HEAD_SET_CONTROL, _MASTER_LOCK_PIN, + NVC97D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(pin)); + } + data |= DRF_NUM(C97D, _HEAD_SET_CONTROL, _MASTER_LOCK_MODE, serverLockMode); + + if (clientLockMode == NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK) { + data |= DRF_DEF(C97D, _HEAD_SET_CONTROL, _SLAVE_LOCK_PIN, _LOCK_PIN_NONE); + } else if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->clientLockPin)) { + pin = pHC->clientLockPin - NV_EVO_LOCK_PIN_INTERNAL_0; + data |= DRF_NUM(C97D, _HEAD_SET_CONTROL, _SLAVE_LOCK_PIN, + NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(pin)); + } else { + pin = pHC->clientLockPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(C97D, _HEAD_SET_CONTROL, _SLAVE_LOCK_PIN, + NVC97D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(pin)); + } + data |= DRF_NUM(C97D, _HEAD_SET_CONTROL, _SLAVE_LOCK_MODE, clientLockMode); + data |= DRF_NUM(C97D, _HEAD_SET_CONTROL, _SLAVE_LOCKOUT_WINDOW, + pHC->clientLockoutWindow); + + /* + * We always enable stereo lock when it's available and either framelock + * or rasterlock is in use. + */ + if (pHC->stereoLocked) { + if (pHC->serverLock != NV_EVO_NO_LOCK) { + data |= DRF_NUM(C97D, _HEAD_SET_CONTROL, _MASTER_STEREO_LOCK_MODE, + NVC97D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE); + } + if (pHC->clientLock != NV_EVO_NO_LOCK) { + data |= DRF_NUM(C97D, _HEAD_SET_CONTROL, _SLAVE_STEREO_LOCK_MODE, + NVC97D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE); + } + } + + nvAssert(pHC->stereoPin != NV_EVO_LOCK_PIN_ERROR); + if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->stereoPin)) { + data |= DRF_DEF(C97D, _HEAD_SET_CONTROL, _STEREO_PIN, _LOCK_PIN_NONE); + } else { + pin = pHC->stereoPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(C97D, _HEAD_SET_CONTROL, _STEREO_PIN, + NVC97D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(pin)); + } + + if (pHC->hdmi3D) { + data |= DRF_DEF(C97D, _HEAD_SET_CONTROL, _STEREO3D_STRUCTURE, _FRAME_PACKED); + } else { + data |= DRF_DEF(C97D, _HEAD_SET_CONTROL, _STEREO3D_STRUCTURE, _NORMAL); + } + + if (pHC->hwYuv420) { + data |= DRF_DEF(C97D, _HEAD_SET_CONTROL, _YUV420PACKER, _ENABLE); + } else { + data |= DRF_DEF(C97D, _HEAD_SET_CONTROL, _YUV420PACKER, _DISABLE); + } + + // Send the HeadSetControl method. + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, data); + + nvEvoSetControlC3(pDevEvo, sd); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_LOCK_CHAIN(head), 1); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C97D, _HEAD_SET_LOCK_CHAIN, _POSITION, + pHC->lockChainPosition)); +} + +static void EvoHeadSetControlORC9(NVDevEvoPtr pDevEvo, + const int head, + const NVHwModeTimingsEvo *pTimings, + const enum nvKmsPixelDepth pixelDepth, + const NvBool colorSpaceOverride, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + const NvU32 hwPixelDepth = nvEvoGetPixelDepthC3(pixelDepth); + const NvU16 colorSpaceFlag = nvEvo1GetColorSpaceFlag(pDevEvo, + colorSpaceOverride); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C97D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _CRC_MODE, _COMPLETE_RASTER) | + (pTimings->hSyncPol ? + DRF_DEF(C97D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _HSYNC_POLARITY, _NEGATIVE_TRUE) : + DRF_DEF(C97D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _HSYNC_POLARITY, _POSITIVE_TRUE)) | + (pTimings->vSyncPol ? + DRF_DEF(C97D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _VSYNC_POLARITY, _NEGATIVE_TRUE) : + DRF_DEF(C97D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _VSYNC_POLARITY, _POSITIVE_TRUE)) | + DRF_NUM(C97D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _PIXEL_DEPTH, hwPixelDepth) | + (colorSpaceOverride ? + (DRF_DEF(C97D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_OVERRIDE, _ENABLE) | + DRF_NUM(C97D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_FLAG, colorSpaceFlag)) : + DRF_DEF(C97D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_OVERRIDE, _DISABLE)) | + DRF_DEF(C97D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _EXT_PACKET_WIN, _NONE)); +} + +static void EvoHeadSetDisplayIdC9(NVDevEvoPtr pDevEvo, + const NvU32 head, const NvU32 displayId, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_DISPLAY_ID(head, 0), 1); + nvDmaSetEvoMethodData(pChannel, displayId); +} + +static void SetOLUTSurfaceAddress( + const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 offset, + NvU32 head) +{ + NvU32 addrHi, addrLo; + NvBool enable; + NvU8 target; + NvU32 value = 0; + + GetSurfaceAddress(pDevEvo, pSurfaceDesc, offset, &addrHi, &addrLo, + &enable, &target); + + nvDmaSetStartEvoMethod(pChannel, + NVC97D_HEAD_SET_SURFACE_ADDRESS_HI_OLUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_SURFACE_ADDRESS_HI_OLUT, _ADDRESS_HI, addrHi)); + + value = FLD_SET_DRF_NUM(C97D, _HEAD_SET_SURFACE_ADDRESS_LO_OLUT, + _ADDRESS_LO, addrLo, value); + value = FLD_SET_DRF_NUM(C97D, _HEAD_SET_SURFACE_ADDRESS_LO_OLUT, + _TARGET, target, value); + value = FLD_SET_DRF_NUM(C97D, _HEAD_SET_SURFACE_ADDRESS_LO_OLUT, + _ENABLE, enable, value); + nvDmaSetStartEvoMethod(pChannel, + NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT(head), 1); + nvDmaSetEvoMethodData(pChannel, value); +} + +static void EvoSetOutputLutC9(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const NVFlipLutHwState *pOutputLut, + NvU32 fpNormScale, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + const NVDispEvoRec *pDispEvo = pDevEvo->pDispEvo[sd]; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvBool enableOutputLut = (pOutputLut->pLutSurfaceEvo != NULL); + NVSurfaceEvoPtr pLutSurfEvo = pOutputLut->pLutSurfaceEvo; + NVSurfaceDescriptor *pSurfaceDesc = + enableOutputLut ? &pLutSurfEvo->planes[0].surfaceDesc : NULL; + NvU64 offset = enableOutputLut ? pOutputLut->offset : offsetof(NVEvoLutDataRec, output); + NvBool isLutModeVss = enableOutputLut ? (pOutputLut->vssSegments != 0) : FALSE; + NvU32 lutSize = enableOutputLut ? pOutputLut->lutEntries : NV_NUM_EVO_LUT_ENTRIES; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NvBool disableOcsc0 = FALSE; + NvBool outputRoundingFix = nvkms_output_rounding_fix(); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + // XXX HDR TODO: Enable custom output LUTs with HDR + // XXX HDR TODO: Support other transfer functions + if (!pOutputLut->fromOverride && + (pHeadState->tf == NVKMS_OUTPUT_TF_PQ)) { + enableOutputLut = FALSE; + } + + nvSetupOutputLUT5(pDevEvo, + pHeadState, + enableOutputLut, + bypassComposition, + &pSurfaceDesc, + &lutSize, + &offset, + &disableOcsc0, + &fpNormScale, + &isLutModeVss); + + if (disableOcsc0) { + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_OCSC0CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, DRF_DEF(C97D, _HEAD_SET_OCSC0CONTROL, _ENABLE, _DISABLE)); + + outputRoundingFix = FALSE; + } else { + /* Update status of output rounding fix. */ + EvoSetOCsc0C9(pDispEvo, head, &outputRoundingFix); + } + + /* Program the output LUT */ + nvAssert((offset & 0xff) == 0); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_OLUT_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + ((isLutModeVss || !outputRoundingFix) ? + DRF_DEF(C97D, _HEAD_SET_OLUT_CONTROL, _INTERPOLATE, _ENABLE) : + DRF_DEF(C97D, _HEAD_SET_OLUT_CONTROL, _INTERPOLATE, _DISABLE)) | + DRF_DEF(C97D, _HEAD_SET_OLUT_CONTROL, _MIRROR, _DISABLE) | + (isLutModeVss ? DRF_DEF(C97D, _HEAD_SET_OLUT_CONTROL, _MODE, _SEGMENTED) : + DRF_DEF(C97D, _HEAD_SET_OLUT_CONTROL, _MODE, _DIRECT10)) | + DRF_NUM(C97D, _HEAD_SET_OLUT_CONTROL, _SIZE, NV_LUT_VSS_HEADER_SIZE + lutSize)); + + SetOLUTSurfaceAddress(pDevEvo, pChannel, pSurfaceDesc, offset, head); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_OLUT_FP_NORM_SCALE(head), 1); + nvDmaSetEvoMethodData(pChannel, fpNormScale); + + if (!disableOcsc0) { + /* only enable OCSC0 after enabling the OLUT */ + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_OCSC0CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, DRF_DEF(C97D, _HEAD_SET_OCSC0CONTROL, _ENABLE, _ENABLE)); + } +} + +static void EvoSetViewportPointInC9(NVDevEvoPtr pDevEvo, const int head, + NvU16 x, NvU16 y, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* Set the input viewport point */ + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_VIEWPORT_POINT_IN(head), 1); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C97D, _HEAD_SET_VIEWPORT_POINT_IN, _X, x) | + DRF_NUM(C97D, _HEAD_SET_VIEWPORT_POINT_IN, _Y, y)); + /* XXXnvdisplay set ViewportValidPointIn to configure overfetch */ +} + +static void EvoSetOutputScalerC9(const NVDispEvoRec *pDispEvo, const NvU32 head, + const NvU32 imageSharpeningValue, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVHwModeViewPortEvo *pViewPort = &pHeadState->timings.viewPort; + + /* These methods should only apply to a single pDpyEvo */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + NvU32 vTaps = pViewPort->vTaps > NV_EVO_SCALER_2TAPS ? + NVC97D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 : + NVC97D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2; + NvU32 hTaps = pViewPort->hTaps > NV_EVO_SCALER_2TAPS ? + NVC97D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 : + NVC97D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2; + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_CONTROL_OUTPUT_SCALER(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_CONTROL_OUTPUT_SCALER, _VERTICAL_TAPS, vTaps) | + DRF_NUM(C97D, _HEAD_SET_CONTROL_OUTPUT_SCALER, _HORIZONTAL_TAPS, hTaps)); +} + +static NvBool EvoSetViewportInOut9(NVDevEvoPtr pDevEvo, const int head, + const NVHwModeViewPortEvo *pViewPortMin, + const NVHwModeViewPortEvo *pViewPort, + const NVHwModeViewPortEvo *pViewPortMax, + NVEvoUpdateState *updateState, + NvU32 setWindowUsageBounds) +{ + const NVEvoCapabilitiesPtr pEvoCaps = &pDevEvo->gpus[0].capabilities; + NVEvoChannelPtr pChannel = pDevEvo->core; + struct NvKmsScalingUsageBounds scalingUsageBounds = { }; + NvU32 win; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* The input viewport shouldn't vary. */ + nvAssert(pViewPortMin->in.width == pViewPort->in.width); + nvAssert(pViewPortMax->in.width == pViewPort->in.width); + nvAssert(pViewPortMin->in.height == pViewPort->in.height); + nvAssert(pViewPortMax->in.height == pViewPort->in.height); + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_VIEWPORT_SIZE_IN(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_VIEWPORT_SIZE_IN, _WIDTH, pViewPort->in.width) | + DRF_NUM(C97D, _HEAD_SET_VIEWPORT_SIZE_IN, _HEIGHT, pViewPort->in.height)); + /* XXXnvdisplay set ViewportValidSizeIn to configure overfetch */ + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_VIEWPORT_POINT_OUT, _ADJUST_X, pViewPort->out.xAdjust) | + DRF_NUM(C97D, _HEAD_SET_VIEWPORT_POINT_OUT, _ADJUST_Y, pViewPort->out.yAdjust)); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_VIEWPORT_SIZE_OUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_VIEWPORT_SIZE_OUT, _WIDTH, pViewPort->out.width) | + DRF_NUM(C97D, _HEAD_SET_VIEWPORT_SIZE_OUT, _HEIGHT, pViewPort->out.height)); + + /* XXXnvdisplay deal with pViewPortMin, pViewPortMax */ + + if (!nvComputeScalingUsageBounds(&pEvoCaps->head[head].scalerCaps, + pViewPort->in.width, pViewPort->in.height, + pViewPort->out.width, pViewPort->out.height, + pViewPort->hTaps, pViewPort->vTaps, + &scalingUsageBounds)) { + /* Should have been rejected by validation */ + nvAssert(!"Attempt to program invalid viewport"); + } + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_MAX_OUTPUT_SCALE_FACTOR, _HORIZONTAL, + scalingUsageBounds.maxHDownscaleFactor) | + DRF_NUM(C97D, _HEAD_SET_MAX_OUTPUT_SCALE_FACTOR, _VERTICAL, + scalingUsageBounds.maxVDownscaleFactor)); + + /* + * Program MAX_PIXELS_FETCHED_PER_LINE window usage bounds + * for each window that is attached to the head. + * + * Precomp will clip the post-scaled window to the input viewport, reverse-scale + * this cropped size back to the input surface domain, and isohub will fetch + * this cropped size. This function assumes that there's no window scaling yet, + * so the MAX_PIXELS_FETCHED_PER_LINE will be bounded by the input viewport + * width. SetScalingUsageBoundsOneWindow5() will take care of updating + * MAX_PIXELS_FETCHED_PER_LINE, if window scaling is enabled later. + * + * Program MAX_PIXELS_FETCHED_PER_LINE for each window that is attached to + * head. For Turing+, SetScalingUsageBoundsOneWindow5() will take care of + * programming window usage bounds only for the layers/windows in use. + */ + setWindowUsageBounds |= + DRF_NUM(C97D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _MAX_PIXELS_FETCHED_PER_LINE, + nvGetMaxPixelsFetchedPerLine(pViewPort->in.width, + NV_EVO_SCALE_FACTOR_1X)); + + for (win = 0; win < pDevEvo->numWindows; win++) { + if (head != pDevEvo->headForWindow[win]) { + continue; + } + + nvDmaSetStartEvoMethod(pChannel, NVC97D_WINDOW_SET_WINDOW_USAGE_BOUNDS(win), 1); + nvDmaSetEvoMethodData(pChannel, setWindowUsageBounds); + } + + return scalingUsageBounds.vUpscalingAllowed; +} + +static void EvoSetViewportInOutC9(NVDevEvoPtr pDevEvo, const int head, + const NVHwModeViewPortEvo *pViewPortMin, + const NVHwModeViewPortEvo *pViewPort, + const NVHwModeViewPortEvo *pViewPortMax, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 setWindowUsageBounds = + (NV_EVO3_DEFAULT_WINDOW_USAGE_BOUNDS_C5 | + DRF_DEF(C97D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _INPUT_SCALER_TAPS, _TAPS_2) | + DRF_DEF(C97D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _UPSCALING_ALLOWED, _FALSE)); + NvU32 verticalUpscalingAllowed = + EvoSetViewportInOut9(pDevEvo, head, pViewPortMin, pViewPort, + pViewPortMax, updateState, setWindowUsageBounds); + + nvDmaSetStartEvoMethod(pChannel, + NVC97D_HEAD_SET_HEAD_USAGE_BOUNDS(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C97D, _HEAD_SET_HEAD_USAGE_BOUNDS, _CURSOR, _USAGE_W256_H256) | + DRF_DEF(C97D, _HEAD_SET_HEAD_USAGE_BOUNDS, _OLUT_ALLOWED, _TRUE) | + /* Despite the generic name of this field, it's specific to vertical taps. */ + (pViewPort->vTaps > NV_EVO_SCALER_2TAPS ? + DRF_DEF(C97D, _HEAD_SET_HEAD_USAGE_BOUNDS, _OUTPUT_SCALER_TAPS, _TAPS_5) : + DRF_DEF(C97D, _HEAD_SET_HEAD_USAGE_BOUNDS, _OUTPUT_SCALER_TAPS, _TAPS_2)) | + (verticalUpscalingAllowed ? + DRF_DEF(C97D, _HEAD_SET_HEAD_USAGE_BOUNDS, _UPSCALING_ALLOWED, _TRUE) : + DRF_DEF(C97D, _HEAD_SET_HEAD_USAGE_BOUNDS, _UPSCALING_ALLOWED, _FALSE))); +} + +static void SetCursorSurfaceAddress( + const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 offset, + NvU32 head) +{ + NvU32 addrHi, addrLo; + NvBool enable; + NvU8 target; + NvU32 value = 0; + + GetSurfaceAddress(pDevEvo, pSurfaceDesc, offset, &addrHi, &addrLo, + &enable, &target); + + nvDmaSetStartEvoMethod(pChannel, + NVC97D_HEAD_SET_SURFACE_ADDRESS_HI_CURSOR(head, 0), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_SURFACE_ADDRESS_HI_CURSOR, _ADDRESS_HI, addrHi)); + + value = FLD_SET_DRF_NUM(C97D, _HEAD_SET_SURFACE_ADDRESS_LO_CURSOR, + _ADDRESS_LO, addrLo, value); + value = FLD_SET_DRF_NUM(C97D, _HEAD_SET_SURFACE_ADDRESS_LO_CURSOR, + _TARGET, target, value); + value = FLD_SET_DRF_NUM(C97D, _HEAD_SET_SURFACE_ADDRESS_LO_CURSOR, + _ENABLE, enable, value); + nvDmaSetStartEvoMethod(pChannel, + NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR(head, 0), 1); + nvDmaSetEvoMethodData(pChannel, value); +} + +static void EvoSetCursorImageC9(NVDevEvoPtr pDevEvo, const int head, + const NVSurfaceEvoRec *pSurfaceEvo, + NVEvoUpdateState *updateState, + const struct NvKmsCompositionParams *pCursorCompParams) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVSurfaceDescriptor *pSurfaceDesc = + pSurfaceEvo ? &pSurfaceEvo->planes[0].surfaceDesc : NULL; + const NvU64 offset = pSurfaceEvo ? pSurfaceEvo->planes[0].offset : 0; + NvU32 headSetControlCursorValue = 0; + NvBool ret; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + nvAssert(pCursorCompParams->colorKeySelect == + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE); + nvAssert(NVBIT(pCursorCompParams->blendingMode[1]) & + NV_EVO3_SUPPORTED_CURSOR_COMP_BLEND_MODES); + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + ret = nvEvoGetHeadSetControlCursorValueC3(pDevEvo, pSurfaceEvo, + &headSetControlCursorValue); + /* + * The caller should have already validated the surface, so there + * shouldn't be a failure. + */ + if (!ret) { + nvAssert(!"Could not construct HEAD_SET_CONTROL_CURSOR value"); + } + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_PRESENT_CONTROL_CURSOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C97D, _HEAD_SET_PRESENT_CONTROL_CURSOR, _MODE, _MONO)); + + SetCursorSurfaceAddress(pDevEvo, pChannel, pSurfaceDesc, offset, head); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_CONTROL_CURSOR(head), 1); + nvDmaSetEvoMethodData(pChannel, headSetControlCursorValue); + + nvDmaSetStartEvoMethod(pChannel, + NVC97D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(head), 1); + switch (pCursorCompParams->blendingMode[1]) { + case NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE: + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _K1, 255) | + DRF_DEF(C97D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _CURSOR_COLOR_FACTOR_SELECT, _K1) | + DRF_DEF(C97D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _VIEWPORT_COLOR_FACTOR_SELECT, _ZERO) | + DRF_DEF(C97D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _MODE, _BLEND)); + break; + case NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA: + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _K1, 255) | + DRF_DEF(C97D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _CURSOR_COLOR_FACTOR_SELECT, _K1_TIMES_SRC) | + DRF_DEF(C97D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _VIEWPORT_COLOR_FACTOR_SELECT, _NEG_K1_TIMES_SRC) | + DRF_DEF(C97D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _MODE, _BLEND)); + break; + case NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA: + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _K1, 255) | + DRF_DEF(C97D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _CURSOR_COLOR_FACTOR_SELECT, _K1) | + DRF_DEF(C97D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _VIEWPORT_COLOR_FACTOR_SELECT, _NEG_K1_TIMES_SRC) | + DRF_DEF(C97D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _MODE, _BLEND)); + break; + case NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA: + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _K1, + pCursorCompParams->surfaceAlpha) | + DRF_DEF(C97D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _CURSOR_COLOR_FACTOR_SELECT, _K1_TIMES_SRC) | + DRF_DEF(C97D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _VIEWPORT_COLOR_FACTOR_SELECT, _NEG_K1_TIMES_SRC) | + DRF_DEF(C97D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _MODE, _BLEND)); + break; + case NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA: + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _K1, + pCursorCompParams->surfaceAlpha) | + DRF_DEF(C97D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _CURSOR_COLOR_FACTOR_SELECT, _K1) | + DRF_DEF(C97D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _VIEWPORT_COLOR_FACTOR_SELECT, _NEG_K1_TIMES_SRC) | + DRF_DEF(C97D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _MODE, _BLEND)); + break; + default: + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "%s: composition mode %d not supported for cursor", + __func__, pCursorCompParams->blendingMode[1]); + break; + } +} + +static void EvoSetDitherC9(NVDispEvoPtr pDispEvo, const int head, + const NvBool enabled, const NvU32 type, + const NvU32 algo, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 ditherControl; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + if (enabled) { + ditherControl = DRF_DEF(C97D, _HEAD_SET_DITHER_CONTROL, _ENABLE, _ENABLE); + + switch (type) { + case NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_6_BITS: + ditherControl |= + DRF_DEF(C97D, _HEAD_SET_DITHER_CONTROL, _BITS, _TO_6_BITS); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_8_BITS: + ditherControl |= + DRF_DEF(C97D, _HEAD_SET_DITHER_CONTROL, _BITS, _TO_8_BITS); + break; + /* XXXnvdisplay: Support DITHER_TO_{10,12}_BITS (see also bug 1729668). */ + default: + nvAssert(!"Unknown ditherType"); + // Fall through + case NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF: + ditherControl = NVC97D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE; + break; + } + + } else { + ditherControl = DRF_DEF(C97D, _HEAD_SET_DITHER_CONTROL, _ENABLE, _DISABLE); + } + + switch (algo) { + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_ERR_ACC: + ditherControl |= + DRF_DEF(C97D, _HEAD_SET_DITHER_CONTROL, _MODE, _STATIC_ERR_ACC); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_2X2: + ditherControl |= + DRF_DEF(C97D, _HEAD_SET_DITHER_CONTROL, _MODE, _DYNAMIC_2X2); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_2X2: + ditherControl |= + DRF_DEF(C97D, _HEAD_SET_DITHER_CONTROL, _MODE, _STATIC_2X2); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_TEMPORAL: + ditherControl |= + DRF_DEF(C97D, _HEAD_SET_DITHER_CONTROL, _MODE, _TEMPORAL); + break; + default: + nvAssert(!"Unknown DitherAlgo"); + // Fall through + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN: + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_ERR_ACC: + ditherControl |= + DRF_DEF(C97D, _HEAD_SET_DITHER_CONTROL, _MODE, _DYNAMIC_ERR_ACC); + break; + } + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_DITHER_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, ditherControl); +} + +static void EvoSetDisplayRateC9(NVDispEvoPtr pDispEvo, const int head, + NvBool enable, + NVEvoUpdateState *updateState, + NvU32 timeoutMicroseconds) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + if (enable) { + timeoutMicroseconds = + NV_MIN(timeoutMicroseconds, + DRF_MASK(NVC97D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_INTERVAL)); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_DISPLAY_RATE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C97D, _HEAD_SET_DISPLAY_RATE, _RUN_MODE, _ONE_SHOT) | + DRF_NUM(C97D, _HEAD_SET_DISPLAY_RATE, _MIN_REFRESH_INTERVAL, + timeoutMicroseconds) | + (timeoutMicroseconds == 0 ? + DRF_DEF(C97D, _HEAD_SET_DISPLAY_RATE, _MIN_REFRESH, _DISABLE) : + DRF_DEF(C97D, _HEAD_SET_DISPLAY_RATE, _MIN_REFRESH, _ENABLE))); + } else { + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_DISPLAY_RATE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C97D, _HEAD_SET_DISPLAY_RATE, _RUN_MODE, _CONTINUOUS)); + } +} + +static void EvoSetStallLockC9(NVDispEvoPtr pDispEvo, const int head, + NvBool enable, NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + NvU32 data = 0x0; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* CRASH_LOCK is not supported in NV5.0, only LINE_LOCK will be supported */ + nvAssert(!pHC->crashLockUnstallMode); + data |= DRF_DEF(C97D, _HEAD_SET_STALL_LOCK, _UNSTALL_MODE, _LINE_LOCK); + + if (enable) { + data |= DRF_DEF(C97D, _HEAD_SET_STALL_LOCK, _ENABLE, _TRUE) | + DRF_DEF(C97D, _HEAD_SET_STALL_LOCK, _MODE, _ONE_SHOT); + + if (!pHC->useStallLockPin) { + data |= DRF_DEF(C97D, _HEAD_SET_STALL_LOCK, _LOCK_PIN, _LOCK_PIN_NONE); + } else if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->stallLockPin)) { + NvU32 pin = pHC->stallLockPin - NV_EVO_LOCK_PIN_INTERNAL_0; + data |= DRF_NUM(C97D, _HEAD_SET_STALL_LOCK, _LOCK_PIN, + NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(pin)); + } else { + NvU32 pin = pHC->stallLockPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(C97D, _HEAD_SET_STALL_LOCK, _LOCK_PIN, + NVC97D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(pin)); + } + } else { + data |= DRF_DEF(C97D, _HEAD_SET_STALL_LOCK, _ENABLE, _FALSE); + } + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_STALL_LOCK(head), 1); + nvDmaSetEvoMethodData(pChannel, data); +} + +static void SetCrcSurfaceAddress( + const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 head) +{ + NvU32 addrHi, addrLo; + NvBool enable; + NvU8 target; + NvU32 value = 0; + + GetSurfaceAddress(pDevEvo, pSurfaceDesc, 0, &addrHi, &addrLo, + &enable, &target); + + nvDmaSetStartEvoMethod(pChannel, + NVC97D_HEAD_SET_SURFACE_ADDRESS_HI_CRC(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_SURFACE_ADDRESS_HI_CRC, _ADDRESS_HI, addrHi)); + + value = FLD_SET_DRF_NUM(C97D, _HEAD_SET_SURFACE_ADDRESS_LO_CRC, + _ADDRESS_LO, addrLo, value); + value = FLD_SET_DRF_NUM(C97D, _HEAD_SET_SURFACE_ADDRESS_LO_CRC, + _TARGET, target, value); + value = FLD_SET_DRF_NUM(C97D, _HEAD_SET_SURFACE_ADDRESS_LO_CRC, + _ENABLE, enable, value); + nvDmaSetStartEvoMethod(pChannel, + NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_CRC(head), 1); + nvDmaSetEvoMethodData(pChannel, value); +} + +static void EvoStartHeadCRC32CaptureC9(NVDevEvoPtr pDevEvo, + NVEvoDmaPtr pDma, + NVConnectorEvoPtr pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + NvU32 head, + NvU32 sd, + NVEvoUpdateState *updateState) +{ + const NvU32 winChannel = head << 1; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 orOutput = 0; + + /* These method should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + /* The window channel should fit in + * NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL */ + nvAssert(winChannel < DRF_MASK(NVC97D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL)); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + switch (pConnectorEvo->or.type) { + case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR: + if (protocol == NVKMS_PROTOCOL_SOR_DP_A || + protocol == NVKMS_PROTOCOL_SOR_DP_B) { + orOutput = NVC97D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF; + } else { + orOutput = + NVC97D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(orIndex); + } + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR: + /* No PIOR support in C9 HAL. Fall through. */ + case NV0073_CTRL_SPECIFIC_OR_TYPE_DAC: + /* No DAC support on nvdisplay. Fall through. */ + default: + nvAssert(!"Invalid pConnectorEvo->or.type"); + break; + } + + SetCrcSurfaceAddress(pDevEvo, pChannel, &pDma->surfaceDesc, head); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_CRC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_CRC_CONTROL, _PRIMARY_CRC, orOutput) | + DRF_DEF(C97D, _HEAD_SET_CRC_CONTROL, _SECONDARY_CRC, _NONE) | + DRF_NUM(C97D, _HEAD_SET_CRC_CONTROL, _CONTROLLING_CHANNEL, winChannel) | + DRF_DEF(C97D, _HEAD_SET_CRC_CONTROL, _EXPECT_BUFFER_COLLAPSE, _FALSE) | + DRF_DEF(C97D, _HEAD_SET_CRC_CONTROL, _CRC_DURING_SNOOZE, _DISABLE)); + + /* Reset the CRC notifier */ + nvEvoResetCRC32Notifier(pDma->subDeviceAddress[sd], + NVC37D_NOTIFIER_CRC_STATUS_0, + DRF_BASE(NVC37D_NOTIFIER_CRC_STATUS_0_DONE), + NVC37D_NOTIFIER_CRC_STATUS_0_DONE_FALSE); +} + +static void EvoStopHeadCRC32CaptureC9(NVDevEvoPtr pDevEvo, + NvU32 head, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These method should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + SetCrcSurfaceAddress(pDevEvo, pChannel, NULL, head); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_CRC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C97D, _HEAD_SET_CRC_CONTROL, _PRIMARY_CRC, _NONE) | + DRF_DEF(C97D, _HEAD_SET_CRC_CONTROL, _SECONDARY_CRC, _NONE) | + DRF_NUM(C97D, _HEAD_SET_CRC_CONTROL, _CONTROLLING_CHANNEL, 0) | + DRF_DEF(C97D, _HEAD_SET_CRC_CONTROL, _EXPECT_BUFFER_COLLAPSE, _FALSE) | + DRF_DEF(C97D, _HEAD_SET_CRC_CONTROL, _CRC_DURING_SNOOZE, _DISABLE)); +} + +/* + * This method configures and programs the RG Core Semaphores. Default behavior + * is to continuously trigger on the specified rasterline when enabled. + */ +static void +EvoConfigureVblankSyncObjectC9(const NVDevEvoPtr pDevEvo, + const NvU16 rasterLine, + const NvU32 head, + const NvU32 semaphoreIndex, + const NVSurfaceDescriptor *pSurfaceDesc, + NVEvoUpdateState* pUpdateState) +{ + NvU32 addrHi, addrLo; + NvBool enable; + NvU8 target; + NvU32 value = 0; + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* + * Populate the NVEvoUpdateState for the caller. The Update State contains + * a mask of which display channels need to be updated. + */ + nvUpdateUpdateState(pDevEvo, pUpdateState, pChannel); + + GetSurfaceAddress(pDevEvo, pSurfaceDesc, 0, &addrHi, &addrLo, + &enable, &target); + + nvDmaSetStartEvoMethod(pChannel, + NVC97D_HEAD_SET_SURFACE_ADDRESS_HI_RG_REL_SEMAPHORE(head, semaphoreIndex), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_SURFACE_ADDRESS_HI_RG_REL_SEMAPHORE, _ADDRESS_HI, addrHi)); + + value = FLD_SET_DRF_NUM(C97D, _HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE, + _ADDRESS_LO, addrLo, value); + value = FLD_SET_DRF_NUM(C97D, _HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE, + _TARGET, target, value); + value = FLD_SET_DRF_NUM(C97D, _HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE, + _ENABLE, enable, value); + nvDmaSetStartEvoMethod(pChannel, + NVC97D_HEAD_SET_SURFACE_ADDRESS_LO_RG_REL_SEMAPHORE(head, semaphoreIndex), 1); + nvDmaSetEvoMethodData(pChannel, value); + + if (!enable) { + /* Disabling semaphore so no configuration necessary. */ + return; + } + + /* + * Configure the semaphore with the following: + * Set PAYLOAD_SIZE to 32bits (default). + * Set REL_MODE to WRITE (default). + * Set RUN_MODE to CONTINUOUS. + * Set RASTER_LINE to start of Vblank: Vsync + Vbp + Vactive. + * + * Note that all these options together fit in 32bits, and that all 32 bits + * must be written each time any given option changes. + * + * The actual payload value doesn't currently matter since this RG + * semaphore will be mapped to a syncpt for now. Each HW-issued payload + * write is converted to a single syncpt increment irrespective of what the + * actual semaphore payload value is. + */ + nvDmaSetStartEvoMethod(pChannel, + NVC97D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL(head, semaphoreIndex), + 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C97D, _HEAD_SET_RG_REL_SEMAPHORE_CONTROL, _PAYLOAD_SIZE, + _PAYLOAD_32BIT) | + DRF_DEF(C97D, _HEAD_SET_RG_REL_SEMAPHORE_CONTROL, _REL_MODE, + _WRITE) | + DRF_DEF(C97D, _HEAD_SET_RG_REL_SEMAPHORE_CONTROL, _RUN_MODE, + _CONTINUOUS) | + DRF_NUM(C97D, _HEAD_SET_RG_REL_SEMAPHORE_CONTROL, _RASTER_LINE, + rasterLine)); +} + +static void EvoSetHdmiDscParamsC9(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVDscInfoEvoRec *pDscInfo, + const enum nvKmsPixelDepth pixelDepth) +{ + NVEvoChannelPtr pChannel = pDispEvo->pDevEvo->core; + NvU32 bpc, flatnessDetThresh; + NvU32 i; + + nvAssert(pDispEvo->pDevEvo->hal->caps.supportsHDMIFRL && + pDscInfo->type == NV_DSC_INFO_EVO_TYPE_HDMI); + + bpc = nvPixelDepthToBitsPerComponent(pixelDepth); + if (bpc < 8) { + nvAssert(bpc >= 8); + bpc = 8; + } + flatnessDetThresh = (2 << (bpc - 8)); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_DSC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C97D, _HEAD_SET_DSC_CONTROL, _ENABLE, _TRUE) | + DRF_NUM(C97D, _HEAD_SET_DSC_CONTROL, _FLATNESS_DET_THRESH, flatnessDetThresh) | + DRF_DEF(C97D, _HEAD_SET_DSC_CONTROL, _FULL_ICH_ERR_PRECISION, _ENABLE) | + DRF_DEF(C97D, _HEAD_SET_DSC_CONTROL, _AUTO_RESET, _ENABLE) | + DRF_DEF(C97D, _HEAD_SET_DSC_CONTROL, _FORCE_ICH_RESET, _FALSE)); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_DSC_PPS_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C97D, _HEAD_SET_DSC_PPS_CONTROL, _ENABLE, _TRUE) | + DRF_DEF(C97D, _HEAD_SET_DSC_PPS_CONTROL, _LOCATION, _VBLANK) | + DRF_DEF(C97D, _HEAD_SET_DSC_PPS_CONTROL, _FREQUENCY, _EVERY_FRAME) | + /* MFS says "For FRL DSC CVTEM, it should be 0x21 (136bytes)." */ + DRF_NUM(C97D, _HEAD_SET_DSC_PPS_CONTROL, _SIZE, 0x21)); + + /* The loop below assumes the methods are tightly packed. */ + ct_assert(ARRAY_LEN(pDscInfo->hdmi.pps) == 32); + ct_assert((NVC97D_HEAD_SET_DSC_PPS_DATA1(0) - NVC97D_HEAD_SET_DSC_PPS_DATA0(0)) == 4); + ct_assert((NVC97D_HEAD_SET_DSC_PPS_DATA31(0) - NVC97D_HEAD_SET_DSC_PPS_DATA0(0)) == (31 * 4)); + for (i = 0; i < ARRAY_LEN(pDscInfo->hdmi.pps); i++) { + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_DSC_PPS_DATA0(head) + (i * 4), 1); + nvDmaSetEvoMethodData(pChannel, pDscInfo->hdmi.pps[i]); + } + + /* Byte 0 must be 0x7f, the rest are don't care (will be filled in by HW) */ + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_DSC_PPS_HEAD(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_DSC_PPS_HEAD, _BYTE0, 0x7f)); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_HDMI_DSC_HCACTIVE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_HDMI_DSC_HCACTIVE, _BYTES, pDscInfo->hdmi.dscHActiveBytes) | + DRF_NUM(C97D, _HEAD_SET_HDMI_DSC_HCACTIVE, _TRI_BYTES, pDscInfo->hdmi.dscHActiveTriBytes)); + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_HDMI_DSC_HCBLANK(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_HDMI_DSC_HCBLANK, _WIDTH, pDscInfo->hdmi.dscHBlankTriBytes)); +} + +static void EvoSetDpDscParamsC9(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVDscInfoEvoRec *pDscInfo) +{ + NVEvoChannelPtr pChannel = pDispEvo->pDevEvo->core; + NvU32 flatnessDetThresh; + NvU32 i; + + nvAssert(pDscInfo->type == NV_DSC_INFO_EVO_TYPE_DP); + + // XXX: I'm pretty sure that this is wrong. + // BitsPerPixelx16 is something like (24 * 16) = 384, and 2 << (384 - 8) is + // an insanely large number. + flatnessDetThresh = (2 << (pDscInfo->dp.bitsPerPixelX16 - 8)); /* ??? */ + + nvAssert((pDscInfo->dp.dscMode == NV_DSC_EVO_MODE_DUAL) || + (pDscInfo->dp.dscMode == NV_DSC_EVO_MODE_SINGLE)); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_DSC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C97D, _HEAD_SET_DSC_CONTROL, _ENABLE, _TRUE) | + DRF_NUM(C97D, _HEAD_SET_DSC_CONTROL, _FLATNESS_DET_THRESH, flatnessDetThresh) | + DRF_DEF(C97D, _HEAD_SET_DSC_CONTROL, _FULL_ICH_ERR_PRECISION, _ENABLE) | + DRF_DEF(C97D, _HEAD_SET_DSC_CONTROL, _AUTO_RESET, _DISABLE) | + DRF_DEF(C97D, _HEAD_SET_DSC_CONTROL, _FORCE_ICH_RESET, _TRUE)); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_DSC_PPS_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C97D, _HEAD_SET_DSC_PPS_CONTROL, _ENABLE, _TRUE) | + DRF_DEF(C97D, _HEAD_SET_DSC_PPS_CONTROL, _LOCATION, _VSYNC) | + DRF_DEF(C97D, _HEAD_SET_DSC_PPS_CONTROL, _FREQUENCY, _EVERY_FRAME) | + DRF_NUM(C97D, _HEAD_SET_DSC_PPS_CONTROL, _SIZE, 0x1F /* 32 PPS Dwords - 1 = 31 */)); + + +#define NV_EVO5_NUM_HEAD_SET_DSC_PPS_DATA_DWORDS \ + (((NVC97D_HEAD_SET_DSC_PPS_DATA31(0) - NVC97D_HEAD_SET_DSC_PPS_DATA0(0)) / 4) + 1) + + ct_assert(NV_EVO5_NUM_HEAD_SET_DSC_PPS_DATA_DWORDS <= ARRAY_LEN(pDscInfo->dp.pps)); + + for (i = 0; i < NV_EVO5_NUM_HEAD_SET_DSC_PPS_DATA_DWORDS; i++) { + nvDmaSetStartEvoMethod(pChannel,(NVC97D_HEAD_SET_DSC_PPS_DATA0(head) + (i * 4)), 1); + nvDmaSetEvoMethodData(pChannel, pDscInfo->dp.pps[i]); + } + + /* + * In case of DP, PPS is sent using the SDP over the Main-Link + * during the vertical blanking interval. The PPS SDP header is defined + * in DP 1.4 specification under section 2.2.5.9.1. + */ + + nvDmaSetStartEvoMethod(pChannel, + NVC97D_HEAD_SET_DSC_PPS_HEAD(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _HEAD_SET_DSC_PPS_HEAD, _BYTE0, 0x00) | /* SDP ID = 0x0 */ + DRF_NUM(C97D, _HEAD_SET_DSC_PPS_HEAD, _BYTE1, 0x10) | /* SDP Type = 0x10 */ + DRF_NUM(C97D, _HEAD_SET_DSC_PPS_HEAD, _BYTE2, 0x7f) | /* Number of payload data bytes - 1 = 0x7F */ + DRF_NUM(C97D, _HEAD_SET_DSC_PPS_HEAD, _BYTE3, 0x00)); /* Reserved */ +} + +static void EvoSetDscParamsC9(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVDscInfoEvoRec *pDscInfo, + const enum nvKmsPixelDepth pixelDepth) +{ + if (pDscInfo->type == NV_DSC_INFO_EVO_TYPE_HDMI) { + EvoSetHdmiDscParamsC9(pDispEvo, head, pDscInfo, pixelDepth); + } else if (pDscInfo->type == NV_DSC_INFO_EVO_TYPE_DP) { + EvoSetDpDscParamsC9(pDispEvo, head, pDscInfo); + } else { + NVEvoChannelPtr pChannel = pDispEvo->pDevEvo->core; + + nvAssert(pDscInfo->type == NV_DSC_INFO_EVO_TYPE_DISABLED); + + /* Disable DSC function */ + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_DSC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C97D, _HEAD_SET_DSC_CONTROL, _ENABLE, _FALSE)); + + /* Disable PPS SDP (Secondary-Data Packet), DP won't send out PPS SDP */ + nvDmaSetStartEvoMethod(pChannel, NVC97D_HEAD_SET_DSC_PPS_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C97D, _HEAD_SET_DSC_PPS_CONTROL, _ENABLE, _FALSE)); + } +} + +static NvU32 EvoAllocSurfaceDescriptorC9( + NVDevEvoPtr pDevEvo, NVSurfaceDescriptor *pSurfaceDesc, + NvU32 memoryHandle, NvU32 localCtxDmaFlags, + NvU64 limit, + NvBool mapToDisplayRm) +{ + NV0041_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS params = { }; + NvU32 ret; + + /* + * NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR is supposed to work + * irrespective of whether the allocation is from sysmem or + * vidmem and SMMU is enabled or bypassed. + */ + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + memoryHandle, + NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + return ret; + } + + pSurfaceDesc->memAperture = params.memAperture; + +#if defined(NV_EVO4_PB_ALLOC_WAR) + ret = nvCtxDmaAlloc(pDevEvo, &pSurfaceDesc->ctxDmaHandle, + memoryHandle, + localCtxDmaFlags, limit); +#endif + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLog(EVO_LOG_ERROR, "nvCtxDmaAlloc failed\n"); + return ret; + } + + if (mapToDisplayRm) { + NV0041_CTRL_MAP_MEMORY_FOR_GPU_ACCESS_PARAMS mapParams = { }; + + mapParams.hSubdevice = pDevEvo->pSubDevices[0]->handle; + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + memoryHandle, + NV0041_CTRL_CMD_MAP_MEMORY_FOR_GPU_ACCESS, + &mapParams, sizeof(mapParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLog(EVO_LOG_ERROR, "NV0041_CTRL_CMD_MAP_MEMORY_FOR_GPU_ACCESS failed\n"); +#if defined(NV_EVO4_PB_ALLOC_WAR) + nvCtxDmaFree(pDevEvo, pDevEvo->deviceHandle, &pSurfaceDesc->ctxDmaHandle); +#endif + return ret; + } + + pSurfaceDesc->memOffset = mapParams.address; + pSurfaceDesc->memoryHandle = memoryHandle; + pSurfaceDesc->isMemoryMappedForDisplayAccess = TRUE; + } else { + pSurfaceDesc->memOffset = params.memOffset; + pSurfaceDesc->isMemoryMappedForDisplayAccess = FALSE; + } + + pSurfaceDesc->bValid = TRUE; + + return ret; +} + +static void EvoFreeSurfaceDescriptorC9( + NVDevEvoPtr pDevEvo, + NvU32 deviceHandle, + NVSurfaceDescriptor *pSurfaceDesc) +{ + NvU32 ret; + + if (!pSurfaceDesc->bValid) { + return; + } + + if (pSurfaceDesc->isMemoryMappedForDisplayAccess) { + NV0041_CTRL_UNMAP_MEMORY_FOR_GPU_ACCESS_PARAMS params = { }; + + params.hSubdevice = pDevEvo->pSubDevices[0]->handle; + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pSurfaceDesc->memoryHandle, + NV0041_CTRL_CMD_UNMAP_MEMORY_FOR_GPU_ACCESS, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLog(EVO_LOG_ERROR, "NV0041_CTRL_CMD_UNMAP_MEMORY_FOR_GPU_ACCESS failed\n"); + } + pSurfaceDesc->isMemoryMappedForDisplayAccess = FALSE; + } + +#if defined(NV_EVO4_PB_ALLOC_WAR) + nvCtxDmaFree(pDevEvo, deviceHandle, &pSurfaceDesc->ctxDmaHandle); +#endif + pSurfaceDesc->bValid = FALSE; +} + +static NvU32 EvoBindSurfaceDescriptorC9( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NVSurfaceDescriptor *pSurfaceDesc) +{ + return NVOS_STATUS_SUCCESS; +} + +static void EvoSetTmoLutSurfaceAddressC9( + const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 offset) +{ + NvU32 addrHi, addrLo; + NvBool enable; + NvU8 target; + NvU32 value = 0; + + GetSurfaceAddress(pDevEvo, pSurfaceDesc, offset, &addrHi, &addrLo, + &enable, &target); + + nvDmaSetStartEvoMethod(pChannel, NVC97E_SET_SURFACE_ADDRESS_HI_TMO_LUT, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97E, _SET_SURFACE_ADDRESS_HI_TMO_LUT, _ADDRESS_HI, addrHi)); + + value = FLD_SET_DRF_NUM(C97E, _SET_SURFACE_ADDRESS_LO_TMO_LUT, + _ADDRESS_LO, addrLo, value); + value = FLD_SET_DRF_NUM(C97E, _SET_SURFACE_ADDRESS_LO_TMO_LUT, + _TARGET, target, value); + value = FLD_SET_DRF_NUM(C97E, _SET_SURFACE_ADDRESS_LO_TMO_LUT, + _ENABLE, enable, value); + nvDmaSetStartEvoMethod(pChannel, NVC97E_SET_SURFACE_ADDRESS_LO_TMO_LUT, 1); + nvDmaSetEvoMethodData(pChannel, value); +} + +static void EvoSetILUTSurfaceAddressC9( + const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 offset) +{ + NvU32 addrHi, addrLo; + NvBool enable; + NvU8 target; + NvU32 value = 0; + + GetSurfaceAddress(pDevEvo, pSurfaceDesc, offset, &addrHi, &addrLo, + &enable, &target); + + nvDmaSetStartEvoMethod(pChannel, NVC97E_SET_SURFACE_ADDRESS_HI_ILUT, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97E, _SET_SURFACE_ADDRESS_HI_ILUT, _ADDRESS_HI, addrHi)); + + value = FLD_SET_DRF_NUM(C97E, _SET_SURFACE_ADDRESS_LO_ILUT, + _ADDRESS_LO, addrLo, value); + value = FLD_SET_DRF_NUM(C97E, _SET_SURFACE_ADDRESS_LO_ILUT, + _TARGET, target, value); + value = FLD_SET_DRF_NUM(C97E, _SET_SURFACE_ADDRESS_LO_ILUT, + _ENABLE, enable, value); + nvDmaSetStartEvoMethod(pChannel, NVC97E_SET_SURFACE_ADDRESS_LO_ILUT, 1); + nvDmaSetEvoMethodData(pChannel, value); +} + +static void EvoSetISOSurfaceAddressC9( + const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 offset, + NvU32 idx, + NvBool isBlocklinear) +{ + NvU32 addrHi, addrLo; + NvBool enable; + NvU8 target; + NvU32 value = 0; + + GetSurfaceAddress(pDevEvo, pSurfaceDesc, offset, &addrHi, &addrLo, + &enable, &target); + + nvDmaSetStartEvoMethod(pChannel, NVC97E_SET_SURFACE_ADDRESS_HI_ISO(idx), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97E, _SET_SURFACE_ADDRESS_HI_ISO, _ADDRESS_HI, addrHi)); + + value = FLD_SET_DRF_NUM(C97E, _SET_SURFACE_ADDRESS_LO_ISO, + _ADDRESS_LO, addrLo, value); + if (isBlocklinear) { + value = FLD_SET_DRF(C97E, _SET_SURFACE_ADDRESS_LO_ISO, + _KIND, _BLOCKLINEAR, value); + } + value = FLD_SET_DRF_NUM(C97E, _SET_SURFACE_ADDRESS_LO_ISO, + _TARGET, target, value); + value = FLD_SET_DRF_NUM(C97E, _SET_SURFACE_ADDRESS_LO_ISO, + _ENABLE, enable, value); + nvDmaSetStartEvoMethod(pChannel, NVC97E_SET_SURFACE_ADDRESS_LO_ISO(idx), 1); + nvDmaSetEvoMethodData(pChannel, value); +} + +static void EvoSetCoreNotifierSurfaceAddressAndControlC9( + const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 notifierOffset, + NvU32 ctrlVal) +{ + NvU32 addrHi, addrLo; + NvBool enable; + NvU8 target; + NvU32 value = 0; + + // The unit of the notifierOffset is size of the notifier. Convert it to bytes. + notifierOffset *= + nvKmsSizeOfNotifier(NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY, FALSE /* overlay */); + GetSurfaceAddress(pDevEvo, pSurfaceDesc, notifierOffset, &addrHi, &addrLo, + &enable, &target); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_SET_SURFACE_ADDRESS_HI_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97D, _SET_SURFACE_ADDRESS_HI_NOTIFIER, _ADDRESS_HI, addrHi)); + + value = FLD_SET_DRF_NUM(C97D, _SET_SURFACE_ADDRESS_LO_NOTIFIER, + _ADDRESS_LO, addrLo, value); + value = FLD_SET_DRF_NUM(C97D, _SET_SURFACE_ADDRESS_LO_NOTIFIER, + _TARGET, target, value); + value = FLD_SET_DRF_NUM(C97D, _SET_SURFACE_ADDRESS_LO_NOTIFIER, + _ENABLE, enable, value); + nvDmaSetStartEvoMethod(pChannel, NVC97D_SET_SURFACE_ADDRESS_LO_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, value); + + nvDmaSetStartEvoMethod(pChannel, NVC97D_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, ctrlVal); +} + +static void EvoSetWinNotifierSurfaceAddressAndControlC9( + const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 notifierOffset, + NvU32 ctrlVal) +{ + NvU32 addrHi, addrLo; + NvBool enable; + NvU8 target; + NvU32 value = 0; + + notifierOffset *= + nvKmsSizeOfNotifier(NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY, FALSE /* overlay */); + GetSurfaceAddress(pDevEvo, pSurfaceDesc, notifierOffset, &addrHi, &addrLo, + &enable, &target); + + nvDmaSetStartEvoMethod(pChannel, NVC97E_SET_SURFACE_ADDRESS_HI_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97E, _SET_SURFACE_ADDRESS_HI_NOTIFIER, _ADDRESS_HI, addrHi)); + + value = FLD_SET_DRF_NUM(C97E, _SET_SURFACE_ADDRESS_LO_NOTIFIER, + _ADDRESS_LO, addrLo, value); + value = FLD_SET_DRF_NUM(C97E, _SET_SURFACE_ADDRESS_LO_NOTIFIER, + _TARGET, target, value); + value = FLD_SET_DRF_NUM(C97E, _SET_SURFACE_ADDRESS_LO_NOTIFIER, + _ENABLE, enable, value); + nvDmaSetStartEvoMethod(pChannel, NVC97E_SET_SURFACE_ADDRESS_LO_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, value); + + nvDmaSetStartEvoMethod(pChannel, NVC97E_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, ctrlVal); +} + +static void EvoSetSemaphoreSurfaceAddressAndControlC9( + const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 semaphoreOffset, + NvU32 ctrlVal) +{ + NvU32 addrHi, addrLo; + NvBool enable; + NvU8 target; + NvU32 value = 0; + + // The unit of the semaphoreOffset is size of the semaphore. Convert it to bytes. + semaphoreOffset *= + nvKmsSizeOfSemaphore(NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY); + GetSurfaceAddress(pDevEvo, pSurfaceDesc, semaphoreOffset, &addrHi, &addrLo, + &enable, &target); + + nvDmaSetStartEvoMethod(pChannel, NVC97E_SET_SURFACE_ADDRESS_HI_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97E, _SET_SURFACE_ADDRESS_HI_SEMAPHORE, _ADDRESS_HI, addrHi)); + + value = FLD_SET_DRF_NUM(C97E, _SET_SURFACE_ADDRESS_LO_SEMAPHORE, + _ADDRESS_LO, addrLo, value); + value = FLD_SET_DRF_NUM(C97E, _SET_SURFACE_ADDRESS_LO_SEMAPHORE, + _TARGET, target, value); + value = FLD_SET_DRF_NUM(C97E, _SET_SURFACE_ADDRESS_LO_SEMAPHORE, + _ENABLE, enable, value); + nvDmaSetStartEvoMethod(pChannel, NVC97E_SET_SURFACE_ADDRESS_LO_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, value); + + nvDmaSetStartEvoMethod(pChannel, NVC97E_SET_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, ctrlVal); +} + +static void EvoSetAcqSemaphoreSurfaceAddressAndControlC9( + const NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + const NVSurfaceDescriptor *pSurfaceDesc, + NvU32 semaphoreOffset, + NvU32 ctrlVal) +{ + NvU32 addrHi, addrLo; + NvBool enable; + NvU8 target; + NvU32 value = 0; + + // The unit of the semaphoreOffset is size of the semaphore. Convert it to bytes. + semaphoreOffset *= + nvKmsSizeOfSemaphore(NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY); + GetSurfaceAddress(pDevEvo, pSurfaceDesc, semaphoreOffset, &addrHi, &addrLo, + &enable, &target); + + nvDmaSetStartEvoMethod(pChannel, NVC97E_SET_SURFACE_ADDRESS_HI_ACQ_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C97E, _SET_SURFACE_ADDRESS_HI_ACQ_SEMAPHORE, _ADDRESS_HI, addrHi)); + + value = FLD_SET_DRF_NUM(C97E, _SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE, + _ADDRESS_LO, addrLo, value); + value = FLD_SET_DRF_NUM(C97E, _SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE, + _TARGET, target, value); + value = FLD_SET_DRF_NUM(C97E, _SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE, + _ENABLE, enable, value); + nvDmaSetStartEvoMethod(pChannel, NVC97E_SET_SURFACE_ADDRESS_LO_ACQ_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, value); + + nvDmaSetStartEvoMethod(pChannel, NVC97E_SET_ACQ_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, ctrlVal); +} + +NVEvoHAL nvEvoC9 = { + EvoSetRasterParamsC9, /* SetRasterParams */ + EvoSetProcAmpC9, /* SetProcAmp */ + EvoSetHeadControlC9, /* SetHeadControl */ + NULL, /* SetHeadRefClk */ + EvoHeadSetControlORC9, /* HeadSetControlOR */ + nvEvoORSetControlC3, /* ORSetControl */ + EvoHeadSetDisplayIdC9, /* HeadSetDisplayId */ + nvEvoSetUsageBoundsC5, /* SetUsageBounds */ + nvEvoUpdateC3, /* Update */ + nvEvoIsModePossibleC3, /* IsModePossible */ + nvEvoPrePostIMPC3, /* PrePostIMP */ + nvEvoSetNotifierC3, /* SetNotifier */ + nvEvoGetCapabilitiesC6, /* GetCapabilities */ + nvEvoFlipC6, /* Flip */ + nvEvoFlipTransitionWARC6, /* FlipTransitionWAR */ + nvEvoFillLUTSurfaceC5, /* FillLUTSurface */ + EvoSetOutputLutC9, /* SetOutputLut */ + EvoSetOutputScalerC9, /* SetOutputScaler */ + EvoSetViewportPointInC9, /* SetViewportPointIn */ + EvoSetViewportInOutC9, /* SetViewportInOut */ + EvoSetCursorImageC9, /* SetCursorImage */ + nvEvoValidateCursorSurfaceC3, /* ValidateCursorSurface */ + nvEvoValidateWindowFormatC6, /* ValidateWindowFormat */ + nvEvoInitCompNotifierC3, /* InitCompNotifier */ + nvEvoIsCompNotifierCompleteC3, /* IsCompNotifierComplete */ + nvEvoWaitForCompNotifierC3, /* WaitForCompNotifier */ + EvoSetDitherC9, /* SetDither */ + EvoSetStallLockC9, /* SetStallLock */ + EvoSetDisplayRateC9, /* SetDisplayRate */ + EvoInitChannelC9, /* InitChannel */ + nvEvoInitDefaultLutC5, /* InitDefaultLut */ + nvEvoInitWindowMappingC5, /* InitWindowMapping */ + nvEvoIsChannelIdleC3, /* IsChannelIdle */ + nvEvoIsChannelMethodPendingC3, /* IsChannelMethodPending */ + nvEvoForceIdleSatelliteChannelC3, /* ForceIdleSatelliteChannel */ + nvEvoForceIdleSatelliteChannelIgnoreLockC3, /* ForceIdleSatelliteChannelIgnoreLock */ + nvEvoAccelerateChannelC3, /* AccelerateChannel */ + nvEvoResetChannelAcceleratorsC3, /* ResetChannelAccelerators */ + nvEvoAllocRmCtrlObjectC3, /* AllocRmCtrlObject */ + nvEvoFreeRmCtrlObjectC3, /* FreeRmCtrlObject */ + nvEvoSetImmPointOutC3, /* SetImmPointOut */ + EvoStartHeadCRC32CaptureC9, /* StartCRC32Capture */ + EvoStopHeadCRC32CaptureC9, /* StopCRC32Capture */ + nvEvoQueryHeadCRC32_C3, /* QueryCRC32 */ + nvEvoGetScanLineC3, /* GetScanLine */ + EvoConfigureVblankSyncObjectC9, /* ConfigureVblankSyncObject */ + EvoSetDscParamsC9, /* SetDscParams */ + NULL, /* EnableMidFrameAndDWCFWatermark */ + nvEvoGetActiveViewportOffsetC3, /* GetActiveViewportOffset */ + NULL, /* ClearSurfaceUsage */ + nvEvoComputeWindowScalingTapsC5, /* ComputeWindowScalingTaps */ + nvEvoGetWindowScalingCapsC3, /* GetWindowScalingCaps */ + NULL, /* SetMergeMode */ + nvEvoSendHdmiInfoFrameC8, /* SendHdmiInfoFrame */ + nvEvoDisableHdmiInfoFrameC8, /* DisableHdmiInfoFrame */ + nvEvoSendDpInfoFrameSdpC8, /* SendDpInfoFrameSdp */ + EvoAllocSurfaceDescriptorC9, /* AllocSurfaceDescriptor */ + EvoFreeSurfaceDescriptorC9, /* FreeSurfaceDescriptor */ + EvoBindSurfaceDescriptorC9, /* BindSurfaceDescriptor */ + EvoSetTmoLutSurfaceAddressC9, /* SetTmoLutSurfaceAddress */ + EvoSetILUTSurfaceAddressC9, /* SetILUTSurfaceAddress */ + EvoSetISOSurfaceAddressC9, /* SetISOSurfaceAddress */ + EvoSetCoreNotifierSurfaceAddressAndControlC9, /* SetCoreNotifierSurfaceAddressAndControl */ + EvoSetWinNotifierSurfaceAddressAndControlC9, /* SetWinNotifierSurfaceAddressAndControl */ + EvoSetSemaphoreSurfaceAddressAndControlC9, /* SetSemaphoreSurfaceAddressAndControl */ + EvoSetAcqSemaphoreSurfaceAddressAndControlC9, /* SetAcqSemaphoreSurfaceAddressAndControl */ + { /* caps */ + TRUE, /* supportsNonInterlockedUsageBoundsUpdate */ + TRUE, /* supportsDisplayRate */ + FALSE, /* supportsFlipLockRGStatus */ + TRUE, /* needDefaultLutSurface */ + TRUE, /* hasUnorm10OLUT */ + FALSE, /* supportsImageSharpening */ + TRUE, /* supportsHDMIVRR */ + FALSE, /* supportsCoreChannelSurface */ + FALSE, /* supportsHDMIFRL */ + FALSE, /* supportsSetStorageMemoryLayout */ + TRUE, /* supportsIndependentAcqRelSemaphore */ + FALSE, /* supportsCoreLut */ + TRUE, /* supportsSynchronizedOverlayPositionUpdate */ + TRUE, /* supportsVblankSyncObjects */ + FALSE, /* requiresScalingTapsInBothDimensions */ + FALSE, /* supportsMergeMode */ + FALSE, /* supportsHDMI10BPC */ + TRUE, /* supportsDPAudio192KHz */ + TRUE, /* supportsInputColorSpace */ + TRUE, /* supportsInputColorRange */ + TRUE, /* supportsYCbCr422OverHDMIFRL */ + NV_EVO3_SUPPORTED_DITHERING_MODES, /* supportedDitheringModes */ + sizeof(NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS), /* impStructSize */ + NV_EVO_SCALER_2TAPS, /* minScalerTaps */ + NV_EVO3_X_EMULATED_SURFACE_MEMORY_FORMATS_C6, /* xEmulatedSurfaceMemoryFormats */ + }, +}; + diff --git a/src/nvidia-modeset/src/nvkms-flip.c b/src/nvidia-modeset/src/nvkms-flip.c new file mode 100644 index 0000000..2a84244 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-flip.c @@ -0,0 +1,1281 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-dma.h" +#include "nvkms-evo.h" +#include "nvkms-flip.h" +#include "nvkms-hw-flip.h" +#include "nvkms-utils-flip.h" +#include "nvkms-lut.h" +#include "nvkms-prealloc.h" +#include "nvkms-private.h" +#include "nvkms-utils.h" +#include "nvkms-vrr.h" +#include "nvkms-dpy.h" +#include "nvkms-rm.h" + +/*! + * Check whether the permissions for pOpenDev allow changing the + * changedLayersMask. + */ +NvBool nvCheckLayerPermissions( + const struct NvKmsPerOpenDev *pOpenDev, + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 apiHead, + const NvU8 changedLayersMask) +{ + const int dispIndex = pDevEvo->gpus[sd].pDispEvo->displayOwner; + const struct NvKmsFlipPermissions *pFlipPermissions = + nvGetFlipPermissionsFromOpenDev(pOpenDev); + const struct NvKmsModesetPermissions *pModesetPermissions = + nvGetModesetPermissionsFromOpenDev(pOpenDev); + const NvU8 allLayersMask = NVBIT(pDevEvo->apiHead[apiHead].numLayers) - 1; + NvU8 layerMask = 0; + + layerMask = pFlipPermissions->disp[dispIndex].head[apiHead].layerMask; + + /* + * If the client has modeset permissions for this disp+head, allow + * the client to also perform flips on any layer. + */ + if (!nvDpyIdListIsEmpty(pModesetPermissions->disp[dispIndex]. + head[apiHead].dpyIdList)) { + layerMask = allLayersMask; + } + + /* + * This one-liner picks out any layers which are changed but don't have + * permission (lM == layerMask, cLM == changedLayersMask): + * + * Scenario | lM | cLM | ~lM & cLM + * ----------------------------|----|-----|---------- + * Permission and changed | 1 | 1 | 0 + * Permission and unchanged | 1 | 0 | 0 + * No permission and changed | 0 | 1 | 1 + * No permission and unchanged | 0 | 0 | 0 + * + * If the result is anything other than 0, we have a change that violates + * permissions. + */ + return (~layerMask & changedLayersMask) == 0; +} + +/*! + * Check whether the flipPermissions for pOpenDev allow the flipping + * requested by NvKmsFlipCommonParams. + */ +NvBool nvCheckFlipPermissions( + const struct NvKmsPerOpenDev *pOpenDev, + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 apiHead, + const struct NvKmsFlipCommonParams *pParams) +{ + const NvU8 allLayersMask = NVBIT(pDevEvo->apiHead[apiHead].numLayers) - 1; + NvU8 changedLayersMask = 0; + NvU32 layer; + + /* Changing viewPortIn or output LUT requires permission to alter all layers. */ + if ((pParams->viewPortIn.specified) || + (pParams->olut.specified) || + (pParams->lut.input.specified) || + (pParams->lut.output.specified)) { + return nvCheckLayerPermissions(pOpenDev, pDevEvo, + sd, apiHead, + allLayersMask); + } + + for (layer = 0; layer < pDevEvo->apiHead[apiHead].numLayers; layer++) { + if (nvIsLayerDirty(pParams, layer)) { + changedLayersMask |= NVBIT(layer); + } + } + + return nvCheckLayerPermissions(pOpenDev, pDevEvo, + sd, apiHead, + changedLayersMask); +} + +static void FillPostSyncptReplyOneApiHead( + NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 apiHead, + const struct NvKmsFlipCommonParams *pRequestParams, + struct NvKmsFlipCommonReplyOneHead *pReplyParams, + const struct NvKmsFlipWorkArea *pWorkArea) +{ + /* XXX[2Heads1OR] Return per hardware-head post syncpt */ + const NvU32 head = nvGetPrimaryHwHead(pDevEvo->gpus[sd].pDispEvo, apiHead); + NvU32 layer; + + /*! check for valid config */ + if ((head == NV_INVALID_HEAD) || !pDevEvo->supportsSyncpts) { + return; + } + + for (layer = 0; layer < ARRAY_LEN(pRequestParams->layer); layer++) { + const NVFlipEvoHwState *pFlipState = + &pWorkArea->sd[sd].head[head].newState; + + if (!pRequestParams->layer[layer].syncObjects.specified || + !pRequestParams->layer[layer].syncObjects.val.useSyncpt) { + continue; + } + + nvFillPostSyncptReplyOneChannel( + pDevEvo->head[head].layer[layer], + pRequestParams->layer[layer].syncObjects.val.u.syncpts.requestedPostType, + &pReplyParams->layer[layer].postSyncpt, + &pFlipState->layer[layer].syncObject); + } +} + + +static NvBool UpdateProposedFlipStateOneApiHead( + const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + const struct NvKmsFlipCommonParams *pParams, + NVProposedFlipStateOneApiHead *pProposedApiHead) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + const NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pApiHeadState->activeDpys, pDispEvo); + NvU32 layer; + + if (pParams->tf.specified) { + pProposedApiHead->dirty.hdr = TRUE; + pProposedApiHead->hdr.tf = pParams->tf.val; + } + + if (pParams->colorimetry.specified) { + pProposedApiHead->dirty.hdr = TRUE; + pProposedApiHead->hdr.dpyColor.colorimetry = pParams->colorimetry.val; + } + + if (pParams->hdrInfoFrame.specified) { + pProposedApiHead->dirty.hdr = TRUE; + pProposedApiHead->hdr.infoFrameOverride = + pParams->hdrInfoFrame.enabled; + } + + for (layer = 0; layer < pDevEvo->apiHead[apiHead].numLayers; layer++) { + if (pParams->layer[layer].hdr.specified) { + pProposedApiHead->dirty.hdr = TRUE; + if (pParams->layer[layer].hdr.enabled) { + pProposedApiHead->hdr.staticMetadataLayerMask |= + 1 << layer; + } else { + pProposedApiHead->hdr.staticMetadataLayerMask &= + ~(1 << layer); + } + } + } + + if (pProposedApiHead->dirty.hdr) { + // If enabling HDR output TF... + if (pProposedApiHead->hdr.tf == NVKMS_OUTPUT_TF_PQ) { + // Cannot be an SLI configuration. + // XXX HDR TODO: Test SLI Mosaic + HDR and remove this check + if (pDevEvo->numSubDevices > 1) { + return FALSE; + } + + /* NVKMS_OUTPUT_TF_PQ requires the RGB color space */ + if (pProposedApiHead->hdr.dpyColor.format != + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB) { + return FALSE; + } + } + + // If enabling HDR signaling... + // XXX HDR TODO: Handle other colorimetries + if (pProposedApiHead->hdr.infoFrameOverride || + (pProposedApiHead->hdr.staticMetadataLayerMask != 0) || + (pProposedApiHead->hdr.dpyColor.colorimetry == + NVKMS_OUTPUT_COLORIMETRY_BT2100)) { + const NVDpyEvoRec *pDpyEvoIter; + + // All dpys on apiHead must support HDR. + FOR_ALL_EVO_DPYS(pDpyEvoIter, + pApiHeadState->activeDpys, + pDispEvo) { + if (!nvDpyIsHDRCapable(pDpyEvoIter)) { + return FALSE; + } + } + } + + if (!nvChooseColorRangeEvo(pDpyEvo->requestedColorRange, + pProposedApiHead->hdr.dpyColor.format, + pProposedApiHead->hdr.dpyColor.bpc, + &pProposedApiHead->hdr.dpyColor.range)) { + return FALSE; + } + } + + if (pParams->viewPortIn.specified) { + pProposedApiHead->dirty.viewPortPointIn = TRUE; + pProposedApiHead->viewPortPointIn = pParams->viewPortIn.point; + } + + if (!nvValidateSetLutCommonParams(pDispEvo->pDevEvo, &pParams->lut)) { + return FALSE; + } + pProposedApiHead->lut = pParams->lut; + + return TRUE; +} + +static NvBool GetAllowVrr(const NVDevEvoRec *pDevEvo, + const struct NvKmsFlipRequestOneHead *pFlipHead, + NvU32 numFlipHeads) +{ + NvU32 sd, i; + const NVDispEvoRec *pDispEvo; + const NvU32 requestedApiHeadCount = numFlipHeads; + NvU32 activeApiHeadCount, dirtyMainLayerCount; + NvBool allowVrr = TRUE; + + if (!pDevEvo->hal->caps.supportsDisplayRate) + { + /*! + * Count active heads so we can make a decision about VRR + * and register syncpts if specified. + */ + activeApiHeadCount = dirtyMainLayerCount = 0; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 apiHead; + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + if (nvApiHeadIsActive(pDispEvo, apiHead)) { + activeApiHeadCount++; + } + } + } + + for (i = 0; i < numFlipHeads; i++) { + if (nvIsLayerDirty(&pFlipHead[i].flip, NVKMS_MAIN_LAYER)) { + dirtyMainLayerCount++; + } + } + + /* + * Deactivate VRR if only a subset of the heads are requested, + * only a subset of the heads are being flipped, or only a subset + * of the heads are allowed a VRR flip. + */ + if ((activeApiHeadCount != requestedApiHeadCount) || + (activeApiHeadCount != dirtyMainLayerCount)) { + allowVrr = FALSE; + } + } + + if (!pDevEvo->vrr.enabled) { + allowVrr = FALSE; + } + + return allowVrr; +} + +static void FillNvKmsFlipReply(NVDevEvoRec *pDevEvo, + struct NvKmsFlipWorkArea *pWorkArea, + const NvBool applyAllowVrr, + const NvS32 vrrSemaphoreIndex, + const struct NvKmsFlipRequestOneHead *pFlipHead, + NvU32 numFlipHeads, + struct NvKmsFlipReply *reply) +{ + NvU32 i; + + if (reply == NULL) { + return; + } + + for (i = 0; i < numFlipHeads; i++) { + const NvU32 sd = pFlipHead[i].sd; + const NvU32 apiHead = pFlipHead[i].head; + + FillPostSyncptReplyOneApiHead(pDevEvo, + sd, + apiHead, + &pFlipHead[i].flip, + &reply->flipHead[i], + pWorkArea); + } + + if (applyAllowVrr) { + reply->vrrFlipType = nvGetActiveVrrType(pDevEvo); + reply->vrrSemaphoreIndex = vrrSemaphoreIndex; + } else { + reply->vrrFlipType = NV_KMS_VRR_FLIP_NON_VRR; + reply->vrrSemaphoreIndex = -1; + } +} + +static void InitNvKmsFlipWorkArea(const NVDevEvoRec *pDevEvo, + struct NvKmsFlipWorkArea *pWorkArea) +{ + const NVDispEvoRec *pDispEvo; + NvU32 sd, head, apiHead; + + nvkms_memset(pWorkArea, 0, sizeof(*pWorkArea)); + + /* + * Initialize the work area. Note we take two snapshots of the + * current headState: newState and oldState. newState will + * describe the new configuration. After that is applied, we will + * refer to oldState to identify any surfaces that are no longer + * in use. + */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + for (head = 0; head < ARRAY_LEN(pWorkArea->sd[sd].head); head++) { + nvInitFlipEvoHwState(pDevEvo, sd, head, + &pWorkArea->sd[sd].head[head].newState); + nvInitFlipEvoHwState(pDevEvo, sd, head, + &pWorkArea->sd[sd].head[head].oldState); + } + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NVProposedFlipStateOneApiHead *pProposedApiHead = + &pWorkArea->disp[sd].apiHead[apiHead].proposedFlipState; + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + + pProposedApiHead->hdr.tf = pApiHeadState->tf; + pProposedApiHead->hdr.dpyColor = pApiHeadState->attributes.color; + pProposedApiHead->hdr.infoFrameOverride = + pApiHeadState->hdrInfoFrameOverride; + pProposedApiHead->hdr.staticMetadataLayerMask = + pApiHeadState->hdrStaticMetadataLayerMask; + + pProposedApiHead->viewPortPointIn = + pApiHeadState->viewPortPointIn; + + pProposedApiHead->lut.input.specified = + FALSE; + pProposedApiHead->lut.output.specified = + FALSE; + } + } +} + +static void CleanupNvKmsFlipWorkArea(NVDevEvoPtr pDevEvo, + struct NvKmsFlipWorkArea *pWorkArea) +{ + const NVDispEvoRec *pDispEvo; + NvU32 sd, head; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + for (head = 0; head < ARRAY_LEN(pWorkArea->sd[sd].head); head++) { + /* + * If the flip failed or wasn't committed, any TMO surfaces + * allocated by nvSetTmoLutSurfaceEvo will be left in newState with + * 1 refcnt, so free them now. + */ + nvFreeUnrefedTmoLutSurfacesEvo(pDevEvo, + &pWorkArea->sd[sd].head[head].newState, + head); + } + } +} + +static void FlipEvoOneApiHead(NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + const struct NvKmsFlipWorkArea *pWorkArea, + const NvBool allowFlipLock, + NVEvoUpdateState *pUpdateState) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + NvU32 head; + const NVProposedFlipStateOneApiHead *pProposedApiHead = + &pWorkArea->disp[sd].apiHead[apiHead].proposedFlipState; + NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pApiHeadState->activeDpys, pDispEvo); + const NVT_EDID_INFO *pInfo = &pDpyEvo->parsedEdid.info; + const NVT_HDR_STATIC_METADATA *pHdrInfo = + &pInfo->hdr_static_metadata_info; + + nvAssert(nvApiHeadIsActive(pDispEvo, apiHead)); + + if (pProposedApiHead->lut.input.specified || + pProposedApiHead->lut.output.specified) { + /* Set LUT settings */ + nvEvoSetLut(pDispEvo, apiHead, FALSE /* kickoff */, + &pProposedApiHead->lut); + nvEvoStageLUTNotifier(pDispEvo, apiHead); + } + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + nvFlipEvoOneHead(pDevEvo, sd, head, pHdrInfo, + &pWorkArea->sd[sd].head[head].newState, + allowFlipLock, + pUpdateState); + + if (pProposedApiHead->dirty.hdr) { + /* Update hardware's current colorSpace and colorRange */ + nvUpdateCurrentHardwareColorSpaceAndRangeEvo( + pDispEvo, + head, + &pProposedApiHead->hdr.dpyColor, + pUpdateState); + } + } + + if (pProposedApiHead->dirty.hdr) { + pApiHeadState->attributes.color = pProposedApiHead->hdr.dpyColor; + pApiHeadState->tf = pProposedApiHead->hdr.tf; + + pApiHeadState->hdrInfoFrameOverride = + pProposedApiHead->hdr.infoFrameOverride; + pApiHeadState->hdrStaticMetadataLayerMask = + pProposedApiHead->hdr.staticMetadataLayerMask; + + nvUpdateInfoFrames(pDpyEvo); + } + + if (pProposedApiHead->dirty.viewPortPointIn) { + pApiHeadState->viewPortPointIn = + pProposedApiHead->viewPortPointIn; + } +} + +static NvU32 FlipEvo2Head1OrOneDisp(NVDispEvoRec *pDispEvo, + struct NvKmsFlipWorkArea *pWorkArea, + const NvBool skipUpdate) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvU32 flip2Heads1OrApiHeadsMask = 0x0; + + for (NvU32 apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) { + NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + const NvBool b2Heads1Or = + (nvPopCount32(pApiHeadState->hwHeadsMask) >= 2); + + if (!nvApiHeadIsActive(pDispEvo, apiHead) || !b2Heads1Or) { + continue; + } + + nvkms_memset(&pWorkArea->updateState, 0, + sizeof(pWorkArea->updateState)); + + FlipEvoOneApiHead(pDispEvo, apiHead, pWorkArea, + TRUE /* allowFlipLock */, &pWorkArea->updateState); + + /* + * If api-head is using 2Heads1OR mode then it can not be flip with + * other ap-heads in a single update; because each api-head, which is + * using 2Heads1OR mode, uses different fliplock group and kicking off + * multiple fliplock groups as part of a single update call is not + * supported yet. + */ + pDevEvo->hal->Update(pDevEvo, &pWorkArea->updateState, + TRUE /* releaseElv */); + nvAssert(!skipUpdate); + + flip2Heads1OrApiHeadsMask |= NVBIT(apiHead); + } + + return flip2Heads1OrApiHeadsMask; +} + +/*! + * Program a flip on all requested layers on all requested heads on + * all requested disps in NvKmsFlipRequest. + * + * /param[in] skipUpdate Update software state tracking, but don't kick + * off or perform an UPDATE. + * + * Note that this should be used only when the + * satellite channels (including the cursor) are + * disabled -- only the core channel should be + * displaying anything, and only the core surface + * should be specified in a skipUpdate flip. + * /param[in] allowFlipLock Whether this update should use fliplocked base + * flips. This is used on nvdisplay to set the + * interlock mask to include all fliplocked + * channels if necessary. This should currently + * only be set when this flip was initiated + * through NVKMS_IOCTL_FLIP. + */ +NvBool nvFlipEvo(NVDevEvoPtr pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsFlipRequestOneHead *pFlipHead, + NvU32 numFlipHeads, + NvBool commit, + struct NvKmsFlipReply *reply, + NvBool skipUpdate, + NvBool allowFlipLock) +{ + NvS32 vrrSemaphoreIndex = -1; + NvU32 apiHead, sd; + NvBool ret = FALSE; + enum NvKmsFlipResult result = NV_KMS_FLIP_RESULT_INVALID_PARAMS; + NvBool changed = FALSE; + NvBool vrrOnSubsetOfHeads; + NVDispEvoPtr pDispEvo; + const NvBool allowVrrDev = + GetAllowVrr(pDevEvo, pFlipHead, numFlipHeads); + struct NvKmsFlipWorkArea *pWorkArea; + NvU32 i; + + vrrOnSubsetOfHeads = pDevEvo->hal->caps.supportsDisplayRate; + + /* + * Do not execute NVKMS_IOCTL_FLIP if the display channel yet has not + * been transitioned from vbios to driver. A modeset requires, to make + * display channel transition from vbios to driver. + * + * The NVKMS client should do modeset before initiating + * NVKMS_IOCTL_FLIP requests. + */ + if (pDevEvo->coreInitMethodsPending) { + if (reply) { + reply->flipResult = result; + } + return ret; + } + + pWorkArea = nvPreallocGet(pDevEvo, PREALLOC_TYPE_FLIP_WORK_AREA, + sizeof(*pWorkArea)); + InitNvKmsFlipWorkArea(pDevEvo, pWorkArea); + + NvU32 allowVrrApiHeadMasks[NVKMS_MAX_SUBDEVICES]; + NvU32 applyAllowVrrApiHeadMasks[NVKMS_MAX_SUBDEVICES]; + + nvkms_memset(allowVrrApiHeadMasks, 0, sizeof(allowVrrApiHeadMasks)); + nvkms_memset(applyAllowVrrApiHeadMasks, 0, sizeof(applyAllowVrrApiHeadMasks)); + + /* Validate the flip parameters and update the work area. */ + + /* + * In the pre-displayrate case, any head requesting a VRR flip means + * all heads within pDevEvo request a VRR flip. Similarly, any head + * with a dirty main layer means allowVrr must be applied to all heads. + */ + NvBool anyAllowVrrHead = FALSE; + NvBool anyDirtyMainLayer = FALSE; + for (i = 0; i < numFlipHeads; i++) { + if (pFlipHead[i].flip.allowVrr) { + anyAllowVrrHead = TRUE; + } + if (nvIsLayerDirty(&pFlipHead[i].flip, NVKMS_MAIN_LAYER)) { + anyDirtyMainLayer = TRUE; + } + } + + for (i = 0; i < numFlipHeads; i++) { + const NvU32 apiHead = pFlipHead[i].head; + const NvU32 sd = pFlipHead[i].sd; + const NvBool allowVrrHead = pFlipHead[i].flip.allowVrr; + const NvBool dirtyMainLayer = nvIsLayerDirty(&pFlipHead[i].flip, NVKMS_MAIN_LAYER); + const NvBool allowVrr = allowVrrDev && + (vrrOnSubsetOfHeads ? allowVrrHead : anyAllowVrrHead); + + const NvBool applyAllowVrr = (vrrOnSubsetOfHeads ? + dirtyMainLayer : anyDirtyMainLayer); + + NVDispEvoPtr pDispEvo = pDevEvo->pDispEvo[sd]; + NvU32 head; + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + + if (!nvApiHeadIsActive(pDispEvo, apiHead)) { + goto done; + } + + if (!nvCheckFlipPermissions(pOpenDev, pDevEvo, sd, apiHead, + &pFlipHead[i].flip)) { + goto done; + } + + if (!UpdateProposedFlipStateOneApiHead( + pDispEvo, + apiHead, + &pFlipHead[i].flip, + &pWorkArea->disp[sd].apiHead[apiHead].proposedFlipState)) { + goto done; + } + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + if (!nvAssignNVFlipEvoHwState(pDevEvo, + pOpenDev, + sd, + head, + &pFlipHead[i].flip, + allowVrr, + &pWorkArea->sd[sd].head[head].newState)) { + goto done; + } + } + if (applyAllowVrr) { + applyAllowVrrApiHeadMasks[sd] |= (1 << apiHead); + if (allowVrr) { + allowVrrApiHeadMasks[sd] |= (1 << apiHead); + } + } + pWorkArea->sd[sd].changed = TRUE; + changed = TRUE; + } + + /* If nothing changed, fail. */ + + if (!changed) { + goto done; + } + + ret = nvAllocatePreFlipBandwidth(pDevEvo, pWorkArea); + if (!ret) { + goto done; + } + + /* XXX: Fail flip if LUT update in progress. + * + * Really, we should have a more robust system for this, but currently, the + * only user of the LUT parameter to the flip IOCTL is nvidia-drm, which + * waits for flips to be complete anyways. We should actually find a way to + * properly queue as many LUT-changing flips as we support queued flips in + * general. + * + * This failure returns NV_KMS_FLIP_RESULT_IN_PROGRESS rather than + * NV_KMS_FLIP_RESULT_INVALID_PARAMS. + * + * See bug 4054546 for efforts to update this system. + */ + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + pDispEvo = pDevEvo->gpus[sd].pDispEvo; + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + const NVProposedFlipStateOneApiHead *pProposedApiHead = + &pWorkArea->disp[sd].apiHead[apiHead].proposedFlipState; + + if ((pProposedApiHead->lut.input.specified || + pProposedApiHead->lut.output.specified) && + !nvEvoIsLUTNotifierComplete(pDispEvo, apiHead)) { + + if (commit) { + nvEvoLogDispDebug( + pDispEvo, + EVO_LOG_ERROR, + "Flip request with LUT parameter on API Head %d while LUT update outstanding", + apiHead); + } + + result = NV_KMS_FLIP_RESULT_IN_PROGRESS; + goto done; + } + } + } + + if (!commit) { + ret = NV_TRUE; + result = NV_KMS_FLIP_RESULT_SUCCESS; + goto done; + } + + if (!nvPrepareToDoPreFlip(pDevEvo, pWorkArea)) { + goto done; + } + + /* + * At this point, something changed on at least one head of one + * subdevice, and has been validated. Apply the request to our + * hardware and software state. We must not fail beyond this + * point. + */ + + ret = TRUE; + result = NV_KMS_FLIP_RESULT_SUCCESS; + + nvPreFlip(pDevEvo, pWorkArea, applyAllowVrrApiHeadMasks, + allowVrrApiHeadMasks, skipUpdate); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NvU32 flip2Heads1OrApiHeadsMask = 0x0; + + if (!pWorkArea->sd[sd].changed) { + continue; + } + + pDispEvo = pDevEvo->gpus[sd].pDispEvo; + + flip2Heads1OrApiHeadsMask = + FlipEvo2Head1OrOneDisp(pDispEvo, pWorkArea, skipUpdate); + + nvkms_memset(&pWorkArea->updateState, 0, + sizeof(pWorkArea->updateState)); + + /* + * Ensure that we only commit the LUT notifiers staged in this + * nvFlipEvo call. + */ + nvEvoClearStagedLUTNotifiers(pDispEvo); + + for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) { + if (!nvApiHeadIsActive(pDispEvo, apiHead) || + ((NVBIT(apiHead) & flip2Heads1OrApiHeadsMask) != 0x0)) { + continue; + } + + FlipEvoOneApiHead(pDispEvo, apiHead, pWorkArea, allowFlipLock, + &pWorkArea->updateState); + } + + if (!skipUpdate) { + nvEvoFlipUpdate(pDispEvo, &pWorkArea->updateState); + } + } + + nvPostFlip(pDevEvo, pWorkArea, skipUpdate, applyAllowVrrApiHeadMasks, + &vrrSemaphoreIndex); + + NvBool replyApplyVrr = NV_FALSE; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (applyAllowVrrApiHeadMasks[sd] > 0) { + replyApplyVrr = NV_TRUE; + break; + } + } + + FillNvKmsFlipReply(pDevEvo, pWorkArea, replyApplyVrr, vrrSemaphoreIndex, + pFlipHead, numFlipHeads, reply); + + /* fall through */ + +done: + CleanupNvKmsFlipWorkArea(pDevEvo, pWorkArea); + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_FLIP_WORK_AREA); + if (reply) { + reply->flipResult = result; + } + + return ret; +} + +void nvApiHeadGetLayerSurfaceArray(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + const NvU32 layer, + NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES]) +{ + + const NvU32 sd = pDispEvo->displayOwner; + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + NvU32 head; + NvBool firstHead; + + nvAssert(apiHead != NV_INVALID_HEAD); + + firstHead = NV_TRUE; + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + const NVEvoSubDevHeadStateRec *pSdHeadState = + &pDispEvo->pDevEvo->gpus[sd].headState[head]; + NvU8 eye; + + if (firstHead) { + for (eye = NVKMS_LEFT; eye < NVKMS_MAX_EYES; eye++) { + pSurfaceEvos[eye] = + pSdHeadState->layer[layer].pSurfaceEvo[eye]; + } + } else { + for (eye = NVKMS_LEFT; eye < NVKMS_MAX_EYES; eye++) { + nvAssert(pSurfaceEvos[eye] == + pSdHeadState->layer[layer].pSurfaceEvo[eye]); + } + } + + firstHead = NV_FALSE; + } +} + +void nvApiHeadGetCursorInfo(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + NVSurfaceEvoPtr *ppSurfaceEvo, + NvS16 *x, NvS16 *y) +{ + + const NvU32 sd = pDispEvo->displayOwner; + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + NvU32 head; + NvBool firstHead; + + nvAssert(apiHead != NV_INVALID_HEAD); + + firstHead = NV_TRUE; + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + const NVEvoSubDevHeadStateRec *pSdHeadState = + &pDispEvo->pDevEvo->gpus[sd].headState[head]; + + if (firstHead) { + *ppSurfaceEvo = pSdHeadState->cursor.pSurfaceEvo; + *x = pSdHeadState->cursor.x; + *y = pSdHeadState->cursor.y; + } else { + nvAssert(*ppSurfaceEvo == pSdHeadState->cursor.pSurfaceEvo); + nvAssert(*x == pSdHeadState->cursor.x); + nvAssert(*y == pSdHeadState->cursor.y); + } + + firstHead = NV_FALSE; + } +} + +void nvApiHeadSetViewportPointIn(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + const NvU16 x, + const NvU16 y) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVEvoUpdateState updateState = { }; + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + NvU16 hwViewportInWidth; + NvU32 head; + NvBool firstHead; + + nvAssert(apiHead != NV_INVALID_HEAD); + + firstHead = NV_TRUE; + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + const NVHwModeTimingsEvo *pTimings = + &pHeadState->timings; + + if (firstHead) { + hwViewportInWidth = pTimings->viewPort.in.width; + } else { + nvAssert(hwViewportInWidth == pTimings->viewPort.in.width); + } + + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetViewportPointIn(pDevEvo, head, + x + (hwViewportInWidth * pHeadState->mergeHeadSection), y, + &updateState); + nvPopEvoSubDevMask(pDevEvo); + + firstHead = NV_FALSE; + } + + if (!firstHead) { + nvEvoUpdateAndKickOff(pDispEvo, FALSE /* sync */, &updateState, + TRUE /* releaseElv */); + } +} + +NvU32 nvApiHeadGetActiveViewportOffset(NVDispEvoRec *pDispEvo, + NvU32 apiHead) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + NvU32 head; + NvU32 offset = 0; + NvBool firstHead; + + nvAssert(apiHead != NV_INVALID_HEAD); + + firstHead = NV_TRUE; + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + if (firstHead) { + offset = pDevEvo->hal->GetActiveViewportOffset(pDispEvo, head); + } else { + nvAssert(offset == pDevEvo->hal->GetActiveViewportOffset(pDispEvo, head)); + } + firstHead = NV_FALSE; + } + + return offset; +} + +void nvApiHeadIdleMainLayerChannels(NVDevEvoRec *pDevEvo, + const NvU32 apiHeadMaskPerSd[NVKMS_MAX_SUBDEVICES]) +{ + NVEvoChannelMask idleChannelMaskPerSd[NVKMS_MAX_SUBDEVICES] = { }; + const NVDispEvoRec *pDispEvo; + NvU32 dispIndex, apiHead; + NvBool found = FALSE; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + NvU32 head; + + if ((apiHeadMaskPerSd[pDispEvo->displayOwner] & + NVBIT(apiHead)) == 0x0) { + continue; + } + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + NVEvoChannelPtr pMainLayerChannel = + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER]; + idleChannelMaskPerSd[pDispEvo->displayOwner] |= + pMainLayerChannel->channelMask; + found = TRUE; + } + } + } + + if (!found) { + return; + } + + nvIdleMainLayerChannels(pDevEvo, idleChannelMaskPerSd, + FALSE /* allowForceIdle */); +} + +void nvApiHeadUpdateFlipLock(NVDevEvoRec *pDevEvo, + const NvU32 apiHeadMaskPerSd[NVKMS_MAX_SUBDEVICES], + const NvBool enable) +{ + NvU32 dispIndex; + NVDispEvoPtr pDispEvo; + NvU32 headMaskPerSd[NVKMS_MAX_SUBDEVICES] = { }; + NVEvoChannelMask channelMaskPerSd[NVKMS_MAX_SUBDEVICES] = { }; + NvBool found = FALSE; + + /* Determine which channels need to enable or disable fliplock. */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 apiHead; + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + NvU32 head; + + if ((apiHeadMaskPerSd[pDispEvo->displayOwner] & + NVBIT(apiHead)) == 0x0) { + continue; + } + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + NVEvoChannelPtr pMainLayerChannel = + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER]; + + if (!nvNeedToToggleFlipLock(pDispEvo, head, enable)) { + continue; + } + + if (enable) { + /* + * Override the prohibition of fliplock on pDispEvos with + * headsurface enabled (calculated earlier in + * HsConfigAllowFlipLock) to allow enabling fliplock for + * headSurface swapgroups. + */ + nvAllowFlipLockEvo(pDispEvo, TRUE /* allowFlipLock */); + } + + headMaskPerSd[pDispEvo->displayOwner] |= NVBIT(head); + channelMaskPerSd[pDispEvo->displayOwner] |= + pMainLayerChannel->channelMask; + found = TRUE; + } + } + } + + if (!found) { + return; + } + + /* + * Wait for all base channels that are enabling/disabling fliplock to be + * idle. This shouldn't timeout if we're enabling fliplock while bringing + * up swapgroups on a new head. + */ + nvIdleMainLayerChannels(pDevEvo, channelMaskPerSd, !enable /* forceIdle */); + + /* Now that all channels are idle, update fliplock. */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + nvToggleFlipLockPerDisp(pDispEvo, + headMaskPerSd[pDispEvo->displayOwner], + enable); + } +} + +NvBool nvIdleMainLayerChannelCheckIdleOneApiHead(NVDispEvoPtr pDispEvo, + NvU32 apiHead) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + NvU32 head; + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + NVEvoChannelPtr pMainLayerChannel = + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER]; + NvBool isMethodPending = FALSE; + NvBool ret; + + ret = pDevEvo->hal->IsChannelMethodPending(pDevEvo, pMainLayerChannel, + pDispEvo->displayOwner, &isMethodPending); + + if (ret && isMethodPending) { + return FALSE; + } + } + + return TRUE; +} + +NvU32 nvCollectSurfaceUsageMaskOneApiHead(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + NVSurfaceEvoPtr pSurfaceEvo) +{ + NvU32 usageMaskOneHead = 0x0; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + NvU32 head; + + if (!nvApiHeadIsActive(pDispEvo, apiHead)) { + return 0; + } + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + const NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[sd].headState[head]; + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + const NVFlipChannelEvoHwState *pLayerFlipState = + &pSdHeadState->layer[layer]; + const NVFlipSyncObjectEvoHwState *pSyncObject = + &pLayerFlipState->syncObject; + NvU32 usageMaskOneLayer = 0x0; + + if ((pSurfaceEvo == pLayerFlipState->pSurfaceEvo[NVKMS_LEFT]) || + (pSurfaceEvo == pLayerFlipState->pSurfaceEvo[NVKMS_RIGHT])) { + usageMaskOneLayer = FLD_SET_DRF(_SURFACE, _USAGE_MASK_LAYER, + _SCANOUT, _ENABLE, usageMaskOneLayer); + } + + if(pSurfaceEvo == + pLayerFlipState->completionNotifier.surface.pSurfaceEvo) { + usageMaskOneLayer = FLD_SET_DRF(_SURFACE, _USAGE_MASK_LAYER, + _NOTIFIER, _ENABLE, usageMaskOneLayer); + } + + if ((!pLayerFlipState->syncObject.usingSyncpt) && + (pSurfaceEvo == + pSyncObject->u.semaphores.acquireSurface.pSurfaceEvo) && + (pSurfaceEvo == + pSyncObject->u.semaphores.releaseSurface.pSurfaceEvo)) { + usageMaskOneLayer = FLD_SET_DRF(_SURFACE, _USAGE_MASK_LAYER, + _SEMAPHORE, _ENABLE, usageMaskOneLayer); + } + + usageMaskOneHead = FLD_IDX_SET_DRF_NUM(_SURFACE, _USAGE_MASK, + _LAYER, layer, usageMaskOneLayer, usageMaskOneHead); + } + + if (pSurfaceEvo == pSdHeadState->cursor.pSurfaceEvo) { + usageMaskOneHead = FLD_SET_DRF(_SURFACE, _USAGE_MASK, + _CURSOR, _ENABLE, usageMaskOneHead); + } + } + + return usageMaskOneHead; +} + +void nvIdleLayerChannels(NVDevEvoRec *pDevEvo, + NvU32 layerMaskPerSdApiHead[NVKMS_MAX_SUBDEVICES][NVKMS_MAX_HEADS_PER_DISP]) +{ + NVEvoChannelMask channelMaskPerSd[NVKMS_MAX_SUBDEVICES] = { }; + const NVDispEvoRec *pDispEvo; + NvU32 sd; + NvU64 startTime = 0; + const NvU32 timeout = 500000; // .5 seconds + NvBool allIdle; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + NvU32 head; + + if (!nvApiHeadIsActive(pDispEvo, apiHead)) { + continue; + } + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + for (NvU32 layer = 0; + layer < pDevEvo->head[head].numLayers; layer++) { + if ((NVBIT(layer) & + layerMaskPerSdApiHead[sd][apiHead]) != 0x0) { + channelMaskPerSd[sd] |= + pDevEvo->head[head].layer[layer]->channelMask; + } + } + } + } + } + + do { + allIdle = TRUE; + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + for (NvU32 head = 0; head < pDevEvo->numHeads; head++) { + NvU32 layer; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + for (layer = 0; + layer < pDevEvo->head[head].numLayers; layer++) { + NVEvoChannelPtr pLayerChannel = + pDevEvo->head[head].layer[layer]; + NvBool isMethodPending; + + if ((pLayerChannel->channelMask & + channelMaskPerSd[sd]) == 0x0) { + continue; + } + + if (pDevEvo->hal->IsChannelMethodPending(pDevEvo, + pLayerChannel, sd, &isMethodPending) && + isMethodPending) { + + allIdle = FALSE; + } else { + /* This has been completed, no need to keep trying */ + channelMaskPerSd[sd] &= ~pLayerChannel->channelMask; + } + } + } + } + + if (!allIdle) { + if (nvExceedsTimeoutUSec(pDevEvo, &startTime, timeout)) { + break; + } + nvkms_yield(); + } + } while (!allIdle); + + /* If we timed out above, force things to be idle. */ + if (!allIdle) { + NVEvoIdleChannelState idleChannelState = { }; + NvBool tryToForceIdle = FALSE; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + for (NvU32 head = 0; head < pDevEvo->numHeads; head++) { + NvU32 layer; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + for (layer = 0; + layer < pDevEvo->head[head].numLayers; layer++) { + NVEvoChannelPtr pLayerChannel = + pDevEvo->head[head].layer[layer]; + + if ((pLayerChannel->channelMask & + channelMaskPerSd[sd]) != 0x0) { + idleChannelState.subdev[sd].channelMask |= + pLayerChannel->channelMask; + tryToForceIdle = TRUE; + } + } + } + } + + if (tryToForceIdle) { + NvBool ret = pDevEvo->hal->ForceIdleSatelliteChannel(pDevEvo, + &idleChannelState); + if (!ret) { + nvAssert(ret); + } + } + } +} + +/* + * XXX NVKMS TODO + * Make the sync more efficient: we only need to sync if the + * in-flight methods flip away from this surface. + */ +void nvEvoClearSurfaceUsage(NVDevEvoRec *pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo) +{ + NvU32 head; + + /* + * If the core channel is no longer allocated, we don't need to + * sync. This assumes the channels are allocated/deallocated + * together. + */ + if (pDevEvo->core) { + + if (pDevEvo->hal->ClearSurfaceUsage != NULL) { + pDevEvo->hal->ClearSurfaceUsage(pDevEvo, pSurfaceEvo); + } + + nvRMSyncEvoChannel(pDevEvo, pDevEvo->core, __LINE__); + + for (head = 0; head < pDevEvo->numHeads; head++) { + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + NVEvoChannelPtr pChannel = + pDevEvo->head[head].layer[layer]; + + nvRMSyncEvoChannel(pDevEvo, pChannel, __LINE__); + } + } + } +} + +NvBool nvIdleBaseChannelOneApiHead(NVDispEvoRec *pDispEvo, NvU32 apiHead, + NvBool *pStoppedBase) +{ + NvBool ret = TRUE; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + NvU32 head; + + *pStoppedBase = FALSE; + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + NvBool stoppedBase = FALSE; + if (!nvRMIdleBaseChannel(pDevEvo, head, + pDispEvo->displayOwner, &stoppedBase)) { + ret = FALSE; + } else if (stoppedBase) { + *pStoppedBase = TRUE; + } + } + + return ret; +} diff --git a/src/nvidia-modeset/src/nvkms-framelock.c b/src/nvidia-modeset/src/nvkms-framelock.c new file mode 100644 index 0000000..5ed8fbe --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-framelock.c @@ -0,0 +1,2396 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-framelock.h" +#include "nvkms-dpy.h" +#include "nvkms-utils.h" +#include "nvkms-evo.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" + +#include "nvkms-private.h" /* nvSendDpyAttributeChangedEventEvo() */ + +#include +#include /* NV0000_CTRL_CMD_GSYNC_GET_ATTACHED_IDS */ +#include +#include "nvos.h" + +static NvBool FrameLockUseHouseSyncGetSupport(NVFrameLockEvoPtr pFrameLockEvo, + NvU32 *val); +static NvBool FrameLockSetPolarity( + NVFrameLockEvoPtr pFrameLockEvo, + enum NvKmsFrameLockAttributePolarityValue val); +static NvBool HouseSyncOutputModeUsable(const NVFrameLockEvoRec *pFrameLockEvo); + +/*! + * Handle framelock sync gain/loss events triggered from resman. + * + * When RM sends an event notification that's handled by FrameLockEvent, + * that function schedules a timer to service that event notification. + * These timers are serviced out of order, though; we may receive a + * SYNC_LOSS event followed by a SYNC_GAIN event, but our scheduled + * callbacks may be called in the reverse order. + * + * Since we can't trust that events were serviced in order, this function + * responds to every sync gain or loss event by querying the actual + * sync status across all GPUs from RM and updating our cached sync status + * and notifying clients if necessary. + */ +static void +FrameLockHandleSyncEvent(void *dataPtr, NvU32 dataU32) +{ + NVDispEvoPtr pDispEvo = dataPtr; + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + NvU32 connectorIndex = pDispEvo->framelock.connectorIndex; + NvBool syncReadyCurrent = FALSE; + NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_PARAMS statusParams = { 0 }; + + statusParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SYNC, + &statusParams, + sizeof(statusParams)) != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to query gsync status after event"); + } else { + if (statusParams.bTiming && statusParams.bSyncReady) { + syncReadyCurrent = TRUE; + } + } + + // Update syncReadyGpuMask for consistency with non-NVKMS path, although + // it is currently unused. + if (syncReadyCurrent) { + pFrameLockEvo->syncReadyGpuMask |= (1 << connectorIndex); + } else { + pFrameLockEvo->syncReadyGpuMask &= ~(1 << connectorIndex); + } + + if (syncReadyCurrent != pFrameLockEvo->syncReadyLast) { + pFrameLockEvo->syncReadyLast = syncReadyCurrent; + nvSendFrameLockAttributeChangedEventEvo( + pFrameLockEvo, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_READY, + pFrameLockEvo->syncReadyLast); + } +} + +/*! + * Receive framelock events from resman. + * + * This function is registered as a kernel callback function from + * resman. + * + * However, it is called with resman's context (resman locks held, etc). + * Schedule deferred work, so that we can process the event without resman's + * encumbrances. + */ +static void FrameLockEvent(void *arg, void *pEventDataVoid, + NvU32 hEvent, + NvU32 Data, NV_STATUS Status) +{ + static nvkms_timer_proc_t *callbackTable[] = { + [NV30F1_GSYNC_NOTIFIERS_SYNC_LOSS(0)] = FrameLockHandleSyncEvent, + [NV30F1_GSYNC_NOTIFIERS_SYNC_LOSS(1)] = FrameLockHandleSyncEvent, + [NV30F1_GSYNC_NOTIFIERS_SYNC_LOSS(2)] = FrameLockHandleSyncEvent, + [NV30F1_GSYNC_NOTIFIERS_SYNC_LOSS(3)] = FrameLockHandleSyncEvent, + + [NV30F1_GSYNC_NOTIFIERS_SYNC_GAIN(0)] = FrameLockHandleSyncEvent, + [NV30F1_GSYNC_NOTIFIERS_SYNC_GAIN(1)] = FrameLockHandleSyncEvent, + [NV30F1_GSYNC_NOTIFIERS_SYNC_GAIN(2)] = FrameLockHandleSyncEvent, + [NV30F1_GSYNC_NOTIFIERS_SYNC_GAIN(3)] = FrameLockHandleSyncEvent, + }; + + const NvNotification *pNotifyData = pEventDataVoid; + NvU32 notifyIndex; + + /* callbackTable[] assumes at most four connectors per gsync */ + ct_assert(NV30F1_GSYNC_CONNECTOR_COUNT == 4); + + if (pNotifyData == NULL) { + nvAssert(!"Invalid pNotifyData from resman"); + return; + } + + notifyIndex = pNotifyData->info32; + + if ((notifyIndex >= ARRAY_LEN(callbackTable)) || + (callbackTable[notifyIndex] == NULL)) { + nvAssert(!"Invalid notifyIndex from resman"); + return; + } + + (void) nvkms_alloc_timer_with_ref_ptr( + callbackTable[notifyIndex], /* callback */ + arg, /* argument (this is a ref_ptr to a pDispEvo) */ + 0, /* unused */ + 0); /* timeout (i.e., service as soon as possible) */ +} + +/*! + * Free all events and handles allocated in FrameLockCreateEvents(). + */ +static void FrameLockDestroyEvents(NVDispEvoPtr pDispEvo) +{ + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + unsigned int i; + + if (pFrameLockEvo == NULL) { + return; + } + + for (i = 0; i < NV_FRAMELOCK_NUM_EVENTS; i++) { + if (pDispEvo->framelock.gsyncEvent[i].handle) { + nvRmApiFree(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + pDispEvo->framelock.gsyncEvent[i].handle); + nvFreeUnixRmHandle(&pDispEvo->pDevEvo->handleAllocator, + pDispEvo->framelock.gsyncEvent[i].handle); + pDispEvo->framelock.gsyncEvent[i].handle = 0; + } + } +} + +/*! + * Allocate and configure all events and handles associated with them. + */ +static NvBool FrameLockCreateEvents(NVDispEvoPtr pDispEvo) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + const NvU32 connectorIndex = pDispEvo->framelock.connectorIndex; + unsigned int i; + + if (pDispEvo->pFrameLockEvo == NULL) { + return TRUE; + } + + nvAssert(connectorIndex < NV30F1_GSYNC_CONNECTOR_COUNT); + + /* We should only get here on hardware that has per-connector events */ + nvAssert(!(pFrameLockEvo->caps & + NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_ONLY_PRIMARY_CONNECTOR_EVENT)); + + for (i = 0; i < NV_FRAMELOCK_NUM_EVENTS; i++) { + NvU32 notifier; + NvBool ret; + + switch (i) { + case NV_FRAMELOCK_SYNC_LOSS: + notifier = NV30F1_GSYNC_NOTIFIERS_SYNC_LOSS(connectorIndex); + break; + case NV_FRAMELOCK_SYNC_GAIN: + notifier = NV30F1_GSYNC_NOTIFIERS_SYNC_GAIN(connectorIndex); + break; + default: + nvAssert(!"Unknown gsync event index"); + continue; + } + + pDispEvo->framelock.gsyncEvent[i].handle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + ret = TRUE; + + if (!nvRmRegisterCallback(pDevEvo, + &pDispEvo->framelock.gsyncEvent[i].callback, + pDispEvo->ref_ptr, + pFrameLockEvo->device, + pDispEvo->framelock.gsyncEvent[i].handle, + FrameLockEvent, + notifier)) { + ret = FALSE; + } + + if (!ret) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Failed to register for framelock event %d", i); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDispEvo->framelock.gsyncEvent[i].handle); + pDispEvo->framelock.gsyncEvent[i].handle = 0; + goto noEvents; + } + } + + return TRUE; + +noEvents: + + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Failed to register for framelock events"); + + FrameLockDestroyEvents(pDispEvo); + + return FALSE; +} + +/*! + * Bind a pSubDev to a pFrameLock. + */ +static void BindGpuToFrameLock(NVDevEvoPtr pDevEvo, + const NvU32 gpuId, + NVFrameLockEvoPtr pFrameLockEvo, + NvU32 connectorIndex) +{ + NVDispEvoPtr pDispEvo; + unsigned int dispIndex; + + if (pFrameLockEvo->nGpuIds >= ARRAY_LEN(pFrameLockEvo->gpuIds)) { + return; + } + + pFrameLockEvo->gpuIds[pFrameLockEvo->nGpuIds] = gpuId; + pFrameLockEvo->nGpuIds++; + + /* + * If a disp exists for this subdevice, wire it up. + * Note that this should not happen for SLI non-display-owners. + */ + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + if (nvGpuIdOfDispEvo(pDispEvo) != gpuId) { + continue; + } + + pDispEvo->pFrameLockEvo = pFrameLockEvo; + + pDispEvo->framelock.connectorIndex = connectorIndex; + + pFrameLockEvo->connectedGpuMask |= (1 << connectorIndex); + pFrameLockEvo->syncReadyGpuMask &= ~(1 << connectorIndex); + + /* Set up stereo synchronization events */ + FrameLockCreateEvents(pDispEvo); + } +} + +/*! + * Break the binding of pSubDev and pDisp to pFrameLock that we + * created in BindGpuToFrameLock(). + */ +static void UnbindGpuFromFrameLock(NVDevEvoPtr pDevEvo, + const NvU32 gpuId, + NVFrameLockEvoPtr pFrameLockEvo) +{ + NVDispEvoPtr pDispEvo; + unsigned int dispIndex; + unsigned int gpu, j; + + for (gpu = 0; gpu < pFrameLockEvo->nGpuIds; gpu++) { + if (pFrameLockEvo->gpuIds[gpu] == gpuId) { + break; + } + } + + if (gpu == pFrameLockEvo->nGpuIds) { + return; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + const NvU32 connectorIndex = pDispEvo->framelock.connectorIndex; + + if (nvGpuIdOfDispEvo(pDispEvo) != gpuId) { + continue; + } + + FrameLockDestroyEvents(pDispEvo); + + pFrameLockEvo->connectedGpuMask &= ~(1 << connectorIndex); + pFrameLockEvo->syncReadyGpuMask &= ~(1 << connectorIndex); + + pDispEvo->framelock.connectorIndex = 0; + + pDispEvo->pFrameLockEvo = NULL; + } + + for (j = gpu; j < (pFrameLockEvo->nGpuIds - 1); j++) { + pFrameLockEvo->gpuIds[j] = pFrameLockEvo->gpuIds[j+1]; + } + + pFrameLockEvo->nGpuIds--; +} + +/*! + * Find the NVFrameLockEvoPtr with the specified gsyncId. + */ +static NVFrameLockEvoPtr FindFrameLock(NvU32 gsyncId) +{ + NVFrameLockEvoPtr pFrameLockEvo; + + FOR_ALL_EVO_FRAMELOCKS(pFrameLockEvo) { + if (pFrameLockEvo->gsyncId == gsyncId) { + return pFrameLockEvo; + } + } + + return NULL; +} + +/*! + * Return whether the NVDevEvoPtr contains a GPU with the specified gpuId. + */ +static NvBool GpuIdInDevEvo(NVDevEvoPtr pDevEvo, NvU32 gpuId) +{ + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (pDevEvo->pSubDevices[sd]->gpuId == gpuId) { + return TRUE; + } + } + + return FALSE; +} + +/*! + * Free the pFrameLock object. + */ +static void FreeFrameLockEvo(NVFrameLockEvoPtr pFrameLockEvo) +{ + if (pFrameLockEvo == NULL) { + return; + } + + if (pFrameLockEvo->device != 0) { + nvRmApiFree(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + pFrameLockEvo->device); + + nvFreeUnixRmHandle(&pFrameLockEvo->handleAllocator, + pFrameLockEvo->device); + pFrameLockEvo->device = 0; + } + + nvAssert(pFrameLockEvo->nGpuIds == 0); + + nvTearDownUnixRmHandleAllocator(&pFrameLockEvo->handleAllocator); + + nvListDel(&pFrameLockEvo->frameLockListEntry); + + nvFree(pFrameLockEvo); +} + +/*! + * Allocate and initialize a new pFrameLock object. + */ +static NVFrameLockEvoPtr AllocFrameLockEvo(int instance, NvU32 gsyncId, + NvBool *pBadFirmware) +{ + NV30F1_ALLOC_PARAMETERS gsyncAllocParams = { 0 }; + NV30F1_CTRL_GSYNC_GET_CAPS_PARAMS gsyncGetCapsParams = { 0 }; + NVFrameLockEvoPtr pFrameLockEvo; + + nvAssert(FindFrameLock(gsyncId) == NULL); + + *pBadFirmware = FALSE; + + pFrameLockEvo = nvCalloc(1, sizeof(NVFrameLockEvoRec)); + + if (pFrameLockEvo == NULL) { + return NULL; + } + + nvListInit(&pFrameLockEvo->frameLockListEntry); + + if (!nvInitUnixRmHandleAllocator( + &pFrameLockEvo->handleAllocator, + nvEvoGlobal.clientHandle, + NVKMS_RM_HANDLE_SPACE_FRAMELOCK(instance))) { + nvEvoLog(EVO_LOG_ERROR, "Failed to initialize framelock handles"); + goto fail; + } + + pFrameLockEvo->device = + nvGenerateUnixRmHandle(&pFrameLockEvo->handleAllocator); + + gsyncAllocParams.gsyncInstance = instance; + + /* allocate a framelock object for the framelock device */ + if (nvRmApiAlloc(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30_GSYNC, + &gsyncAllocParams) != NVOS_STATUS_SUCCESS) { + pFrameLockEvo->device = 0; + goto fail; + } + + /* Store unique frame lock device ID */ + pFrameLockEvo->gsyncId = gsyncId; + pFrameLockEvo->houseSyncUseable = 0; + pFrameLockEvo->nGpuIds = 0; + + /* Initialize the state for the framelock board */ + pFrameLockEvo->polarity = NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_FALLING_EDGE; + pFrameLockEvo->syncDelay = 0; + pFrameLockEvo->syncInterval = 0; + pFrameLockEvo->videoMode = + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_AUTO; + pFrameLockEvo->testMode = FALSE; + pFrameLockEvo->houseSyncMode = + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_DISABLED; + pFrameLockEvo->mulDivValue = 1; + pFrameLockEvo->mulDivMode = NV_KMS_FRAMELOCK_ATTRIBUTE_MULTIPLY_DIVIDE_MODE_MULTIPLY; + + /* Query the framelock revision information */ + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_CAPS, + &gsyncGetCapsParams, + sizeof(gsyncGetCapsParams)) + != NVOS_STATUS_SUCCESS) { + goto fail; + } + + /* Check if the Quadro Sync card has a firmware + * version compatible with the GPUs connected to it. + */ + if (gsyncGetCapsParams.isFirmwareRevMismatch) { + *pBadFirmware = TRUE; + goto fail; + } + + /* gsyncGetCapsParams.revId has the framelock board id in the high 4 bits + * and the FPGA revision in the low 4 bits. This is preserved here for + * legacy clients, but we expose the full board ID (e.g. 0x358, 0x2060, + * 0x2061) and firmware version individually, so clients can more easily + * distinguish P2061 ("Quadro Sync II") from P2060 and P358 + * ("Quadro Sync"). + */ + + pFrameLockEvo->fpgaIdAndRevision = gsyncGetCapsParams.revId; + pFrameLockEvo->boardId = gsyncGetCapsParams.boardId; + pFrameLockEvo->firmwareMajorVersion = gsyncGetCapsParams.revision; + pFrameLockEvo->firmwareMinorVersion = gsyncGetCapsParams.extendedRevision; + pFrameLockEvo->caps = gsyncGetCapsParams.capFlags; + pFrameLockEvo->maxSyncSkew = gsyncGetCapsParams.maxSyncSkew; + pFrameLockEvo->syncSkewResolution = gsyncGetCapsParams.syncSkewResolution; + pFrameLockEvo->maxSyncInterval = gsyncGetCapsParams.maxSyncInterval; + pFrameLockEvo->videoModeReadOnly = !!(gsyncGetCapsParams.capFlags & + NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_ONLY_GET_VIDEO_MODE); + pFrameLockEvo->mulDivSupported = !!(gsyncGetCapsParams.capFlags & + NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_MULTIPLY_DIVIDE_SYNC); + pFrameLockEvo->maxMulDivValue = gsyncGetCapsParams.maxMulDivValue; + + /* Determine if house sync is selectable on this frame lock device */ + if (!FrameLockUseHouseSyncGetSupport(pFrameLockEvo, + &pFrameLockEvo->houseSyncUseable)) { + pFrameLockEvo->houseSyncUseable = FALSE; + } + + pFrameLockEvo->houseSyncModeValidValues = + (1 << NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_DISABLED); + + if (pFrameLockEvo->houseSyncUseable) { + pFrameLockEvo->houseSyncModeValidValues |= + (1 << NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_INPUT); + } + + if (HouseSyncOutputModeUsable(pFrameLockEvo)) { + pFrameLockEvo->houseSyncModeValidValues |= + (1 << NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_OUTPUT); + } + + /* Add frame lock device to global list. */ + nvListAppend(&pFrameLockEvo->frameLockListEntry, &nvEvoGlobal.frameLockList); + + return pFrameLockEvo; + +fail: + + FreeFrameLockEvo(pFrameLockEvo); + return NULL; +} + + +static void BindFrameLockToDevEvo(NVFrameLockEvoPtr pFrameLockEvo, + NVDevEvoPtr pDevEvo) +{ + NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_PARAMS gsyncTopologyParams = { }; + int i; + + /* find out which gpus are attached to which connectors */ + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GET_GSYNC_GPU_TOPOLOGY, + &gsyncTopologyParams, + sizeof(gsyncTopologyParams)) + != NVOS_STATUS_SUCCESS) { + return; + } + + /* Bind corresponding GPUs to the Frame Lock device */ + for (i = 0; i < ARRAY_LEN(gsyncTopologyParams.gpus); i++) { + + NvU32 connectorIndex; + const NvU32 gpuId = gsyncTopologyParams.gpus[i].gpuId; + + if (gpuId == NV30F1_CTRL_GPU_INVALID_ID) { + continue; + } + + if (!GpuIdInDevEvo(pDevEvo, gpuId)) { + continue; + } + + /* + * Connector type of _NONE means we sync through a proxy GPU, + * which we do not support. + */ + if (gsyncTopologyParams.gpus[i].connector == + NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_NONE) { + continue; + } + /* + * gsyncTopologyParams.gpus[i].connector is an enumerated + * type; convert it to a 0-based index + */ + nvAssert(gsyncTopologyParams.gpus[i].connector < + (NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_ONE + + NV30F1_GSYNC_CONNECTOR_COUNT)); + connectorIndex = gsyncTopologyParams.gpus[i].connector - + NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_ONE; + + BindGpuToFrameLock(pDevEvo, gpuId, pFrameLockEvo, connectorIndex); + } +} + +static void UnBindFrameLockFromDevEvo(NVFrameLockEvoPtr pFrameLockEvo, + NVDevEvoPtr pDevEvo) +{ + int i; + + /* + * Loop through GPUs from highest to lowest, because + * UnbindGpuFromFrameLock() may remove gpuIds[i]. + */ + for (i = pFrameLockEvo->nGpuIds - 1; i >= 0; i--) { + const NvU32 gpuId = pFrameLockEvo->gpuIds[i]; + + if (!GpuIdInDevEvo(pDevEvo, gpuId)) { + continue; + } + + UnbindGpuFromFrameLock(pDevEvo, gpuId, pFrameLockEvo); + } +} + + +/*! + * Find all of the available framelock devices. + * + * Framelock devices can only be recognized by resman after an RM + * client has attached a GPU that the framelock device is connected + * to. So, subsequent calls to this function may find additional + * framelock devices. + * + * Allocate framelock objects for all the newly found framelock devices. + */ +void nvAllocFrameLocksEvo(NVDevEvoPtr pDevEvo) +{ + NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS attachedGsyncParams = { }; + int i; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_GSYNC_GET_ATTACHED_IDS, + &attachedGsyncParams, sizeof(attachedGsyncParams)) + != NVOS_STATUS_SUCCESS) { + return; + } + + for (i = 0; i < ARRAY_LEN(attachedGsyncParams.gsyncIds); i++) { + NVFrameLockEvoPtr pFrameLockEvo; + NvBool badFirmware = FALSE; + + if (attachedGsyncParams.gsyncIds[i] == NV0000_CTRL_GSYNC_INVALID_ID) { + continue; + } + + pFrameLockEvo = FindFrameLock(attachedGsyncParams.gsyncIds[i]); + + if (pFrameLockEvo == NULL) { + pFrameLockEvo = AllocFrameLockEvo(i, + attachedGsyncParams.gsyncIds[i], + &badFirmware); + } + + if (pFrameLockEvo == NULL) { + if (badFirmware) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "The firmware on this Quadro Sync card is not compatible " + "with the GPUs connected to it. Please visit " + " " + "for instructions on installing the correct firmware."); + pDevEvo->badFramelockFirmware = TRUE; + } + continue; + } + + BindFrameLockToDevEvo(pFrameLockEvo, pDevEvo); + } +} + +/*! + * Free any framelock devices connected to any GPU on this pDevEvo. + */ + +void nvFreeFrameLocksEvo(NVDevEvoPtr pDevEvo) +{ + NVFrameLockEvoPtr pFrameLockEvo, pFrameLockEvoTmp; + + /* Destroy the pFrameLockEvos */ + nvListForEachEntry_safe(pFrameLockEvo, pFrameLockEvoTmp, + &nvEvoGlobal.frameLockList, frameLockListEntry) { + + UnBindFrameLockFromDevEvo(pFrameLockEvo, pDevEvo); + + if (pFrameLockEvo->nGpuIds == 0) { + FreeFrameLockEvo(pFrameLockEvo); + } + } +} + +/*! + * Determine if this framelock device supports user selection of house + * sync. assign val appropriately. Returns TRUE if the attribute was + * successfully queried. + */ +static NvBool FrameLockUseHouseSyncGetSupport(NVFrameLockEvoPtr pFrameLockEvo, + NvU32 *val) +{ + NV30F1_CTRL_GSYNC_GET_CONTROL_PARAMS_PARAMS + gsyncGetControlParamsParams = { 0 }; + NvU32 ret; + + if (!val) return FALSE; + + gsyncGetControlParamsParams.which = + NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_USE_HOUSE; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_PARAMS, + &gsyncGetControlParamsParams, + sizeof(gsyncGetControlParamsParams)); + + /* If we can query Use House Sync, then it is available */ + *val = (ret == NVOS_STATUS_SUCCESS) ? TRUE : FALSE; + + return *val; +} + + +/*! + * Return whether or not this framelock device supports house sync mode. + * + * House sync mode is currently only available on P2061 (Quadro Sync II). + */ +static NvBool HouseSyncOutputModeUsable(const NVFrameLockEvoRec *pFrameLockEvo) +{ + return (pFrameLockEvo->houseSyncUseable && + (pFrameLockEvo->boardId == + NV30F1_CTRL_GSYNC_GET_CAPS_BOARD_ID_P2061)); +} + + +/*! + * Enable or disable house sync output mode in the framelock board. + */ +static NvBool FrameLockSetHouseSyncOutputMode(NVFrameLockEvoPtr pFrameLockEvo, + NvBool enable) +{ + NV30F1_CTRL_GSYNC_HOUSE_SYNC_MODE_PARAMS + gsyncSetHouseSyncModeParams = { 0 }; + NvU32 ret; + NvU8 houseSyncMode = enable ? NV30F1_CTRL_GSYNC_HOUSE_SYNC_MODE_OUTPUT : + NV30F1_CTRL_GSYNC_HOUSE_SYNC_MODE_INPUT; + + nvAssert(HouseSyncOutputModeUsable(pFrameLockEvo)); + + gsyncSetHouseSyncModeParams.houseSyncMode = houseSyncMode; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_HOUSE_SYNC_MODE, + &gsyncSetHouseSyncModeParams, + sizeof(gsyncSetHouseSyncModeParams)); + + return (ret == NVOS_STATUS_SUCCESS); +} + + +/*! + * Set the framelock to use the house sync if val is TRUE, otherwise + * set the framelock to use external sync. Returns FALSE if the + * assignment failed. + */ +NvBool nvFrameLockSetUseHouseSyncEvo(NVFrameLockEvoPtr pFrameLockEvo, NvU32 val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS + gsyncSetControlParamsParams = { 0 }; + NvU32 ret; + NvBool houseSyncOutputMode = FALSE; + + gsyncSetControlParamsParams.which = + NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_USE_HOUSE; + + gsyncSetControlParamsParams.useHouseSync = val; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS, + &gsyncSetControlParamsParams, + sizeof(gsyncSetControlParamsParams)); + + if (ret != NVOS_STATUS_SUCCESS) return FALSE; + + if (HouseSyncOutputModeUsable(pFrameLockEvo)) { + + NvS64 houseSyncInputPresent; + NvBool allowHouseSyncOutput = FALSE; + + if (nvFrameLockGetStatusEvo(pFrameLockEvo, + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_STATUS, + &houseSyncInputPresent)) { + if (houseSyncInputPresent == 0) { + allowHouseSyncOutput = TRUE; + } + } + + if (!val && allowHouseSyncOutput && + (pFrameLockEvo->houseSyncMode == + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_OUTPUT)) { + + houseSyncOutputMode = TRUE; + } + + if (!FrameLockSetHouseSyncOutputMode(pFrameLockEvo, houseSyncOutputMode)) { + return FALSE; + } + } + + /* + * House sync polarity is required to be rising edge if house sync is not + * in use. + * + * In addition, house sync polarity has no effect when house sync output + * mode is in use. + */ + if (val && !houseSyncOutputMode) { + return FrameLockSetPolarity(pFrameLockEvo, pFrameLockEvo->polarity); + } else { + return FrameLockSetPolarity(pFrameLockEvo, + NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_RISING_EDGE); + } +} + +/*! + * Set the polarity according to val; val is interpreted as an + * NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY value. Returns FALSE if the + * assignment failed. + */ +static NvBool FrameLockSetPolarity( + NVFrameLockEvoPtr pFrameLockEvo, + enum NvKmsFrameLockAttributePolarityValue val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS + gsyncSetControlParamsParams = { 0 }; + NvU32 ret; + NvU32 polarity; + + gsyncSetControlParamsParams.which = + NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY; + + switch (val) { + case NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_RISING_EDGE: + polarity = NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY_RISING_EDGE; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_FALLING_EDGE: + polarity = NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY_FALLING_EDGE; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_BOTH_EDGES: + polarity = NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY_BOTH_EDGES; + break; + + default: + return FALSE; + } + + gsyncSetControlParamsParams.syncPolarity = polarity; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS, + &gsyncSetControlParamsParams, + sizeof(gsyncSetControlParamsParams)); + + if (ret != NVOS_STATUS_SUCCESS) return FALSE; + + return TRUE; +} + +/*! + * Set the sync delay to the value given in val. Returns FALSE if the + * assignment failed. Assigns pFrameLockEvo->syncDelay upon success. + */ +static NvBool FrameLockSetSyncDelay(NVFrameLockEvoPtr pFrameLockEvo, NvS64 val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS + gsyncSetControlParamsParams = { 0 }; + NvU32 ret; + + if (val > pFrameLockEvo->maxSyncSkew) return FALSE; + + gsyncSetControlParamsParams.which = + NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_SKEW; + + gsyncSetControlParamsParams.syncSkew = val; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS, + &gsyncSetControlParamsParams, + sizeof(gsyncSetControlParamsParams)); + + if (ret != NVOS_STATUS_SUCCESS) return FALSE; + + pFrameLockEvo->syncDelay = val; + + return TRUE; +} + +/*! + * Set the sync multiply/divide value given in val. Returns FALSE if the + * assignment failed. Assigns pFrameLockEvo->mulDivValue upon success. + */ +static NvBool SetFrameLockMulDivVal(NVFrameLockEvoPtr pFrameLockEvo, NvS64 val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS + gsyncSetControlParamsParams = { 0 }; + NvU32 ret; + + if (!pFrameLockEvo->mulDivSupported || + (val > pFrameLockEvo->maxMulDivValue)) { + return FALSE; + } + + gsyncSetControlParamsParams.which = + NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_MULTIPLY_DIVIDE; + + gsyncSetControlParamsParams.syncMulDiv.multiplyDivideValue = val; + gsyncSetControlParamsParams.syncMulDiv.multiplyDivideMode = pFrameLockEvo->mulDivMode; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS, + &gsyncSetControlParamsParams, + sizeof(gsyncSetControlParamsParams)); + + if (ret != NVOS_STATUS_SUCCESS) return FALSE; + + pFrameLockEvo->mulDivValue = val; + + return TRUE; +} + +/*! + * Set the sync multiply/divide mode given in val. Returns FALSE if the + * assignment failed. Assigns pFrameLockEvo->mulDivMode upon success. + */ +static NvBool SetFrameLockMulDivMode(NVFrameLockEvoPtr pFrameLockEvo, NvS64 val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS + gsyncSetControlParamsParams = { 0 }; + NvU32 ret; + + if (!pFrameLockEvo->mulDivSupported || + ((val != NV_KMS_FRAMELOCK_ATTRIBUTE_MULTIPLY_DIVIDE_MODE_MULTIPLY) && + (val != NV_KMS_FRAMELOCK_ATTRIBUTE_MULTIPLY_DIVIDE_MODE_DIVIDE))) { + return FALSE; + } + + gsyncSetControlParamsParams.which = + NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_MULTIPLY_DIVIDE; + + gsyncSetControlParamsParams.syncMulDiv.multiplyDivideValue = pFrameLockEvo->mulDivValue; + gsyncSetControlParamsParams.syncMulDiv.multiplyDivideMode = val; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS, + &gsyncSetControlParamsParams, + sizeof(gsyncSetControlParamsParams)); + + if (ret != NVOS_STATUS_SUCCESS) return FALSE; + + pFrameLockEvo->mulDivMode = val; + + return TRUE; +} +/*! + * Set the sync interval to the value given in val. Returns FALSE if + * the assignment failed. Assigns pFrameLockEvo->syncInterval upon + * success. + */ +static NvBool FrameLockSetSyncInterval(NVFrameLockEvoPtr pFrameLockEvo, + NvS64 val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS + gsyncSetControlParamsParams = { 0 }; + NvU32 ret; + + gsyncSetControlParamsParams.which = + NV30F1_CTRL_GSYNC_SET_CONTROL_NSYNC; + + gsyncSetControlParamsParams.nSync = val; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS, + &gsyncSetControlParamsParams, + sizeof(gsyncSetControlParamsParams)); + + if (ret != NVOS_STATUS_SUCCESS) return FALSE; + + pFrameLockEvo->syncInterval = val; + + return TRUE; +} + +/*! + * Query the status of the values that are acquired through the + * GET_STATUS_SYNC command, and assign the value to val. Returns + * FALSE if the query failed or if attr is not one of the currently + * handled attributes. + */ +static NvBool FrameLockGetStatusSync(const NVDispEvoRec *pDispEvo, NvS64 *val, + enum NvKmsDispAttribute nvKmsAttribute) +{ + NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_PARAMS gsyncGetStatusSyncParams = { 0 }; + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + + gsyncGetStatusSyncParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SYNC, + &gsyncGetStatusSyncParams, + sizeof(gsyncGetStatusSyncParams)) + != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + switch (nvKmsAttribute) + { + + case NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_STEREO_SYNC: + *val = (gsyncGetStatusSyncParams.bTiming && + gsyncGetStatusSyncParams.bStereoSync && + gsyncGetStatusSyncParams.bSyncReady); + break; + + case NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_TIMING: + *val = gsyncGetStatusSyncParams.bTiming ? TRUE : FALSE; + break; + + default: + return FALSE; + } + + return TRUE; +} + +/*! + * Return the sync rate. + */ +static NvS64 FrameLockInterpretSyncRate(const NVFrameLockEvoRec *pFrameLockEvo, + NvS64 val) +{ + /* Only show decimal places if they are accurate. The queried + value provides 4 decimal places */ + if (pFrameLockEvo->caps & + NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_FREQ_ACCURACY_2DPS) { + // only two are valid + val -= (val % 100); + } else if (pFrameLockEvo->caps & + NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_FREQ_ACCURACY_3DPS) { + // only three are valid + val -= (val % 10); + } else if (pFrameLockEvo->caps & + NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_FREQ_ACCURACY_4DPS) { + // all four are valid, nothing to do + } + return val; +} + +/*! + * Query the status of one of the values that are acquired through the + * GET_STATUS command, and assign the value to val. Returns FALSE if + * the query failed or if attr is not one of the currently handled + * attributes. + */ +NvBool nvFrameLockGetStatusEvo(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + NV30F1_CTRL_GSYNC_GET_STATUS_PARAMS gsyncGetStatusParams = { 0 }; + + switch (attribute) { + + case NV_KMS_FRAMELOCK_ATTRIBUTE_PORT0_STATUS: + gsyncGetStatusParams.which = NV30F1_CTRL_GSYNC_GET_STATUS_PORT0_INPUT; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_PORT1_STATUS: + gsyncGetStatusParams.which = NV30F1_CTRL_GSYNC_GET_STATUS_PORT1_INPUT; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_STATUS: + gsyncGetStatusParams.which = NV30F1_CTRL_GSYNC_GET_STATUS_HOUSE_SYNC; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_INCOMING_HOUSE_SYNC_RATE: + gsyncGetStatusParams.which = + NV30F1_CTRL_GSYNC_GET_STATUS_HOUSE_SYNC_INCOMING; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_READY: + gsyncGetStatusParams.which = NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_READY; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED: + gsyncGetStatusParams.which = + NV30F1_CTRL_GSYNC_GET_STATUS_PORT0_ETHERNET | + NV30F1_CTRL_GSYNC_GET_STATUS_PORT1_ETHERNET; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE: + case NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE_4: + gsyncGetStatusParams.which = NV30F1_CTRL_GSYNC_GET_STATUS_REFRESH; + break; + + default: + return FALSE; + } + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_STATUS, + &gsyncGetStatusParams, + sizeof(gsyncGetStatusParams)) + != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + switch (attribute) { + + case NV_KMS_FRAMELOCK_ATTRIBUTE_PORT0_STATUS: + *val = gsyncGetStatusParams.bPort0Input ? + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT_STATUS_INPUT : + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT_STATUS_OUTPUT; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_PORT1_STATUS: + *val = gsyncGetStatusParams.bPort1Input ? + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT_STATUS_INPUT : + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT_STATUS_OUTPUT; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_STATUS: + *val = gsyncGetStatusParams.bHouseSync; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_READY: + *val = gsyncGetStatusParams.bSyncReady; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED: + *val = 0x0; + if (gsyncGetStatusParams.bPort0Ethernet) + *val |= NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED_PORT0; + if (gsyncGetStatusParams.bPort1Ethernet) + *val |= NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED_PORT1; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_INCOMING_HOUSE_SYNC_RATE: + *val = + FrameLockInterpretSyncRate(pFrameLockEvo, + gsyncGetStatusParams.houseSyncIncoming); + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE: + case NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE_4: + *val = FrameLockInterpretSyncRate(pFrameLockEvo, + gsyncGetStatusParams.refresh); + if (attribute == NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE) { + /* _STATUS_REFRESH is in Hz/10000, _SYNC_RATE is Hz/1000 */ + *val /= 10; + } + break; + + default: + return FALSE; + } + + return TRUE; +} + +/*! + * [en|dis]able syncing of the GPU to the FrameLock board for the + * display mask associated with that gpu. val controls whether we are + * enabling or disabling. + */ +static NvBool FrameLockSetEnable(NVDispEvoPtr pDispEvo, NvS64 val) +{ + if (val) { + + /* XXX NVKMS TODO: address the following: + + In Xinerama a single app has a channel on each gpu. Before + framelock is enabled the first time per X server, vblanks + are not synchronized, so if a swap groupped app is started + before framelock is enabled the channels get unstalled at + different times, and it's likely that one display will be + armed while the other is not. When framelock is enabled in + this state, we'll deadlock because suddenly the armed display + is waiting on the unarmed display to unstall, and the unarmed + display cannot arm. Prevent this by idling all channels */ + + return nvEnableFrameLockEvo(pDispEvo); + } else { + return nvDisableFrameLockEvo(pDispEvo); + } +} + +static NvBool FrameLockSetWatchdog(NVFrameLockEvoPtr pFrameLockEvo, NvU32 val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_WATCHDOG_PARAMS + gsyncSetControlWatchdogParams = { 0 }; + NvU32 ret; + + gsyncSetControlWatchdogParams.enable = val; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_WATCHDOG, + &gsyncSetControlWatchdogParams, + sizeof(gsyncSetControlWatchdogParams)); + + if (ret != NVOS_STATUS_SUCCESS) return FALSE; + + return TRUE; +} + + +/*! + * For the given display, determine if it can be set as a frame lock + * server + */ +static NvBool FrameLockDpyCanBeServer(const NVDpyEvoRec *pDpyEvo) +{ + NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_PARAMS gsyncGetControlSyncParams = { 0 }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + const NvU32 head = nvGetPrimaryHwHead(pDispEvo, pDpyEvo->apiHead); + const NVDispHeadStateEvoRec *pHeadState; + NvU32 ret; + + nvAssert(head != NV_INVALID_HEAD); + nvAssert(pDispEvo); + nvAssert(pDispEvo->pFrameLockEvo); + + pHeadState = &pDispEvo->headState[head]; + nvAssert(pHeadState->activeRmId); + + /* If already a server, assume it can be a server. */ + if (nvDpyIdsAreEqual(pDispEvo->framelock.server, pDpyEvo->id)) { + return TRUE; + } + + gsyncGetControlSyncParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + gsyncGetControlSyncParams.displays = pHeadState->activeRmId; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SYNC, + &gsyncGetControlSyncParams, + sizeof(gsyncGetControlSyncParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + if (gsyncGetControlSyncParams.master && + nvFrameLockServerPossibleEvo(pDpyEvo)) { + return TRUE; + } + + return FALSE; +} + + +/*! + * For the given display, determine if it can be set as a frame lock + * client. + */ +static NvBool FrameLockDpyCanBeClient(const NVDpyEvoRec *pDpyEvo) +{ + NVDispEvoPtr pDispEvo; + + nvAssert(pDpyEvo->pDispEvo); + nvAssert(pDpyEvo->pDispEvo->pFrameLockEvo); + nvAssert(nvDpyEvoIsActive(pDpyEvo)); + + pDispEvo = pDpyEvo->pDispEvo; + + /* If already a client, assume it can be a client. */ + if (nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->framelock.clients)) { + return TRUE; + } + + /* Otherwise, see if we can make it a client. */ + return nvFrameLockClientPossibleEvo(pDpyEvo); +} + + +/*! + * [en|dis]able test mode (based on the value of val). Returns FALSE + * if changing the test mode failed. Assigns pFrameLockEvo->testMode + * upon success. + */ +static NvBool FrameLockSetTestMode(NVFrameLockEvoPtr pFrameLockEvo, NvS64 val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_TESTING_PARAMS + gsyncSetControlTestingParams = { 0 }; + NvU32 ret; + + gsyncSetControlTestingParams.bEmitTestSignal = (val == TRUE); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_TESTING, + &gsyncSetControlTestingParams, + sizeof(gsyncSetControlTestingParams)); + + if (ret != NVOS_STATUS_SUCCESS) return FALSE; + + pFrameLockEvo->testMode = val; + + return TRUE; +} + + +/*! + * Set the video mode according to val; returns FALSE if the + * assignment failed. Assigns pFrameLockEvo->videoMode upon success. + */ +static NvBool FrameLockSetVideoMode(NVFrameLockEvoPtr pFrameLockEvo, NvS64 val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS + gsyncSetControlParamsParams = { 0 }; + NvU32 ret; + + gsyncSetControlParamsParams.which = + NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE; + + switch (val) { + + case NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_AUTO: + gsyncSetControlParamsParams.syncVideoMode = + NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_NONE; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_TTL: + gsyncSetControlParamsParams.syncVideoMode = + NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_TTL; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_BI_LEVEL: + gsyncSetControlParamsParams.syncVideoMode = + NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_NTSCPALSECAM; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_TRI_LEVEL: + gsyncSetControlParamsParams.syncVideoMode = + NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_HDTV; + break; + + default: + return FALSE; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS, + &gsyncSetControlParamsParams, + sizeof(gsyncSetControlParamsParams)); + + if (ret != NVOS_STATUS_SUCCESS) return FALSE; + + pFrameLockEvo->videoMode = val; + + return TRUE; +} + + +/*! + * Enable or disable the swap ready connection through the gsync + * connector. This should be called when we bind the swap barrier. + */ +static NvBool SetSwapBarrier(NVDispEvoPtr pDispEvo, NvS64 val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_SWAP_BARRIER_PARAMS + gsyncSetSwapBarrierParams = { 0 }; + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + NvU32 ret; + NvBool enable = !!val; + + if (!pFrameLockEvo) return FALSE; + + nvSetSwapBarrierNotifyEvo(pDispEvo, enable, TRUE /* isPre */); + + gsyncSetSwapBarrierParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + gsyncSetSwapBarrierParams.enable = enable; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_SWAP_BARRIER, + &gsyncSetSwapBarrierParams, + sizeof(gsyncSetSwapBarrierParams)); + + nvSetSwapBarrierNotifyEvo(pDispEvo, enable, FALSE /* isPre */); + + return (ret == NVOS_STATUS_SUCCESS); +} + + +/*! + * Flush all of our known framelock SW state out to the HW, to make + * sure both are in sync. This should be called any time we get the + * HW back from outside control (e.g., starting X or coming back from + * a VT switch). + */ +static NvBool ResetHardwareOneDisp(NVDispEvoPtr pDispEvo, NvS64 value) +{ + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + NvU32 activeHeadsMask; + NvBool ret = TRUE; + + if (!pDispEvo->pFrameLockEvo || !value) { + /* Nothing to do */ + return TRUE; + } + + /* We should never get here when framelock is enabled */ + if (pDispEvo->framelock.syncEnabled) { + nvAssert(!"Attempted to reset framelock HW while framelock is enabled"); + return FALSE; + } + + /* (Re-)set the HW state to match the SW state */ + if (!nvFrameLockSetUseHouseSyncEvo(pFrameLockEvo, + pFrameLockEvo->houseSyncArmed)) { + ret = FALSE; + } + if (!FrameLockSetSyncDelay(pFrameLockEvo, pFrameLockEvo->syncDelay)) { + ret = FALSE; + } + if (!FrameLockSetSyncInterval(pFrameLockEvo, pFrameLockEvo->syncInterval)) { + ret = FALSE; + } + if (!FrameLockSetVideoMode(pFrameLockEvo, pFrameLockEvo->videoMode)) { + ret = FALSE; + } + if (!FrameLockSetTestMode(pFrameLockEvo, pFrameLockEvo->testMode)) { + ret = FALSE; + } + if (!SetFrameLockMulDivVal(pFrameLockEvo, pFrameLockEvo->mulDivValue)) { + ret = FALSE; + } + if (!SetFrameLockMulDivMode(pFrameLockEvo, pFrameLockEvo->mulDivMode)) { + ret = FALSE; + } + + /* Since (we think) sync is disabled, these should always be disabled */ + if (!FrameLockSetWatchdog(pFrameLockEvo, FALSE)) { + ret = FALSE; + } + if (!SetSwapBarrier(pDispEvo, FALSE)) { + ret = FALSE; + } + + /* Disable both server and client lock for all heads */ + activeHeadsMask = nvGetActiveHeadMask(pDispEvo); + + if (!nvFramelockSetControlUnsyncEvo(pDispEvo, activeHeadsMask, TRUE)) { + ret = FALSE; + } + if (!nvFramelockSetControlUnsyncEvo(pDispEvo, activeHeadsMask, FALSE)) { + ret = FALSE; + } + + return ret; +} + + +/*! + * Returns the allowable configurations for the given display device. + * The device must be enabled to advertise server/client + * configuration. + */ +static unsigned int FrameLockGetValidDpyConfig(const NVDpyEvoRec *pDpyEvo) +{ + NVDispEvoPtr pDispEvo; + unsigned int valid = + (1 << (NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_DISABLED)); + + if (!pDpyEvo || !nvDpyEvoIsActive(pDpyEvo)) { + goto done; + } + + pDispEvo = pDpyEvo->pDispEvo; + + if (!pDispEvo || !pDispEvo->pFrameLockEvo) { + goto done; + } + + /* Check if display can be a server */ + + if (FrameLockDpyCanBeServer(pDpyEvo)) { + valid |= (1 << (NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_SERVER)); + } + + /* Check if display can be a client */ + + if (FrameLockDpyCanBeClient(pDpyEvo)) { + valid |= (1 << (NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_CLIENT)); + } + + done: + + return valid; +} + +static NvBool GetFrameLock(NVDispEvoPtr pDispEvo, NvS64 *val) +{ + *val = (pDispEvo->pFrameLockEvo) ? 1 : 0; + return TRUE; +} + +static NvBool SetFrameLockPolarity(NVFrameLockEvoPtr pFrameLockEvo, NvS64 val) +{ + if ((val != NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_RISING_EDGE) && + (val != NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_FALLING_EDGE) && + (val != NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_BOTH_EDGES)) { + return FALSE; + } + + pFrameLockEvo->polarity = val; + + return TRUE; +} + +static NvBool GetFrameLockPolarity(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->polarity; + + return TRUE; +} + +static NvBool GetFrameLockSyncDelay(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->syncDelay; + + return TRUE; +} + +static NvBool GetFrameLockSyncDelayValidValues( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_RANGE); + + pValidValues->u.range.min = 0; + pValidValues->u.range.max = pFrameLockEvo->maxSyncSkew; + + return TRUE; +} + +static NvBool GetFrameLockMulDivVal(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + if (!pFrameLockEvo->mulDivSupported) { + return FALSE; + } + + *val = pFrameLockEvo->mulDivValue; + + return TRUE; +} + +static NvBool GetFrameLockMulDivValValidValues( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_RANGE); + + if (!pFrameLockEvo->mulDivSupported) { + return FALSE; + } + + pValidValues->u.range.min = 1; + pValidValues->u.range.max = pFrameLockEvo->maxMulDivValue; + + return TRUE; +} + +static NvBool GetFrameLockMulDivModeValidValues( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!pFrameLockEvo->mulDivSupported) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTEGER); + + return TRUE; +} + +static NvBool GetFrameLockMulDivMode(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + if (!pFrameLockEvo->mulDivSupported) { + return FALSE; + } + + *val = pFrameLockEvo->mulDivMode; + + return TRUE; +} + +static NvBool SetHouseSyncMode(NVFrameLockEvoPtr pFrameLockEvo, NvS64 val) +{ + if ((val < 0) || (val > 31)) { + return FALSE; + } + + if ((pFrameLockEvo->houseSyncModeValidValues & NVBIT(val)) == 0) { + return FALSE; + } + + pFrameLockEvo->houseSyncMode = val; + + return TRUE; +} + +static NvBool GetHouseSyncMode(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + if (!pFrameLockEvo->houseSyncUseable) return FALSE; + + *val = pFrameLockEvo->houseSyncMode; + + return TRUE; +} + +static NvBool GetHouseSyncModeValidValues( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!pFrameLockEvo->houseSyncUseable) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTBITS); + + pValidValues->u.bits.ints = pFrameLockEvo->houseSyncModeValidValues; + + return TRUE; +} + +static NvBool GetFrameLockSyncInterval(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->syncInterval; + + return TRUE; +} + +static NvBool GetFrameLockSyncIntervalValidValues( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_RANGE); + + pValidValues->u.range.min = 0; + pValidValues->u.range.max = pFrameLockEvo->maxSyncInterval; + + return TRUE; +} + +static NvBool SetFrameLockSync(NVDispEvoRec *pDispEvo, NvS64 val) +{ + NvBool a, b; + + if (!pDispEvo->pFrameLockEvo) return FALSE; + + /* If we are already enabled or already disabled, we're done. */ + if (val == pDispEvo->framelock.syncEnabled) return TRUE; + + /* Something must be set to enable/disable */ + if (nvDpyIdIsInvalid(pDispEvo->framelock.server) && + nvDpyIdListIsEmpty(pDispEvo->framelock.clients)) return FALSE; + + /* If we're disabling and test mode is currently enabled, disable it */ + if (!val && + !nvDpyIdIsInvalid(pDispEvo->framelock.server) && + pDispEvo->pFrameLockEvo->testMode) { + + FrameLockSetTestMode(pDispEvo->pFrameLockEvo, FALSE); + } + + /* + * It is important to set syncEnabled before calling FrameLockSetEnable. + * FrameLockSetEnable may call into GLS which may call back into the + * driver to query if framelock is enabled, which checks this field. + */ + pDispEvo->framelock.syncEnabled = val; + + a = FrameLockSetEnable(pDispEvo, val); + b = FrameLockSetWatchdog(pDispEvo->pFrameLockEvo, val); + + /* + * Since RM doesn't send a SYNC_READY event on sync disable through nvctrl, + * send it here. + */ + if (!val && a && b) { + nvSendFrameLockAttributeChangedEventEvo( + pDispEvo->pFrameLockEvo, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_READY, + FALSE); + pDispEvo->pFrameLockEvo->syncReadyLast = val; + } + + return (a && b); +} + +static NvBool GetFrameLockSync(NVDispEvoPtr pDispEvo, NvS64 *val) +{ + if (!pDispEvo->pFrameLockEvo) return FALSE; + + /* return the cached state */ + + *val = ((pDispEvo->framelock.currentServerHead != NV_INVALID_HEAD) || + (pDispEvo->framelock.currentClientHeadsMask != 0x0)); + + return TRUE; +} + +static NvBool GetFrameLockSyncReady(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + /* return the cached state */ + + *val = pFrameLockEvo->syncReadyLast; + + return TRUE; +} + +static NvBool GetFrameLockStereoSync(NVDispEvoPtr pDispEvo, NvS64 *val) +{ + if (!pDispEvo->pFrameLockEvo) return FALSE; + + return FrameLockGetStatusSync(pDispEvo, val, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_STEREO_SYNC); +} + +static NvBool GetFrameLockTiming(NVDispEvoPtr pDispEvo, NvS64 *val) +{ + if (!pDispEvo->pFrameLockEvo) return FALSE; + + return FrameLockGetStatusSync(pDispEvo, val, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_TIMING); +} + +static NvBool SetFrameLockTestSignal(NVDispEvoRec *pDispEvo, NvS64 val) +{ + if (!pDispEvo->pFrameLockEvo) return FALSE; + + /* The test signal can only be emitted if the GPU is the server + * and framelock is enabled. + */ + + if (!nvDpyIdIsInvalid(pDispEvo->framelock.server) && + pDispEvo->framelock.syncEnabled) { + return FrameLockSetTestMode(pDispEvo->pFrameLockEvo, val); + } + + return FALSE; +} + +static NvBool GetFrameLockTestSignal(NVDispEvoPtr pDispEvo, NvS64 *val) +{ + if (!pDispEvo->pFrameLockEvo || + nvDpyIdIsInvalid(pDispEvo->framelock.server)) { + return FALSE; + } + + *val = pDispEvo->pFrameLockEvo->testMode; + + return TRUE; +} + +static NvBool SetFrameLockVideoMode(NVFrameLockEvoPtr pFrameLockEvo, NvS64 val) +{ + if (pFrameLockEvo->videoModeReadOnly) { + return FALSE; + } + + return FrameLockSetVideoMode(pFrameLockEvo, val); +} + +static NvBool GetFrameLockVideoMode(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->videoMode; + + return TRUE; +} + +static NvBool GetFrameLockVideoModeValidValues( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_RANGE); + + pValidValues->u.range.min = + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_AUTO; + pValidValues->u.range.max = + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_TRI_LEVEL; + + if (pFrameLockEvo->videoModeReadOnly) { + pValidValues->writable = FALSE; + } + + return TRUE; +} + +static NvBool GetFrameLockFpgaRevision(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->fpgaIdAndRevision; + + return TRUE; +} + +static NvBool GetFrameLockFirmwareMajorVersion( + const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->firmwareMajorVersion; + + return TRUE; +} + +static NvBool GetFrameLockFirmwareMinorVersion( + const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->firmwareMinorVersion; + + return TRUE; +} + +static NvBool GetFrameLockBoardId(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->boardId; + + return TRUE; +} + +static NvBool GetFrameLockFpgaRevisionUnsupported( + NVDispEvoPtr pDispEvo, + NvS64 *val) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + *val = pDevEvo->badFramelockFirmware; + + return TRUE; +} + +static NvBool GetFrameLockSyncDelayResolution( + const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->syncSkewResolution; + + return TRUE; +} + +NvBool nvSetFrameLockDisplayConfigEvo(NVDpyEvoRec *pDpyEvo, NvS64 val) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + unsigned int valid; + NvBool removeFromClients = FALSE; + NvBool removeFromServer = FALSE; + + if (!pDispEvo || !pDispEvo->pFrameLockEvo) { + return FALSE; + } + + /* Only set the config when framelock is disabled */ + + if (pDispEvo->framelock.syncEnabled) { + return FALSE; + } + + valid = FrameLockGetValidDpyConfig(pDpyEvo); + + /* Display device cannot be set as such */ + if (!((1<framelock.server) && + !nvDpyIdsAreEqual(pDispEvo->framelock.server, pDpyEvo->id)) { + NVDpyEvoPtr pOtherDpyEvo; + + pOtherDpyEvo = + nvGetDpyEvoFromDispEvo(pDispEvo, pDispEvo->framelock.server); + if (pOtherDpyEvo) { + nvSendDpyAttributeChangedEventEvo( + pOtherDpyEvo, + NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG, + NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_DISABLED); + } + } + pDispEvo->framelock.server = pDpyEvo->id; + removeFromClients = TRUE; + break; + + case NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_CLIENT: + pDispEvo->framelock.clients = + nvAddDpyIdToDpyIdList(pDpyEvo->id, pDispEvo->framelock.clients); + removeFromServer = TRUE; + break; + + case NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_DISABLED: + removeFromClients = TRUE; + removeFromServer = TRUE; + break; + + default: + return FALSE; + } + + if (removeFromClients) { + if (nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->framelock.clients)) { + pDispEvo->framelock.clients = + nvDpyIdListMinusDpyId(pDispEvo->framelock.clients, pDpyEvo->id); + } + } + + if (removeFromServer) { + if (nvDpyIdsAreEqual(pDispEvo->framelock.server, pDpyEvo->id)) { + pDispEvo->framelock.server = nvInvalidDpyId(); + } + } + + return TRUE; +} + +NvBool nvGetFrameLockDisplayConfigEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *val) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + + if (!pDispEvo || !pDispEvo->pFrameLockEvo) { + return FALSE; + } + + if (nvDpyIdsAreEqual(pDispEvo->framelock.server, pDpyEvo->id)) { + *val = NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_SERVER; + } else if (nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->framelock.clients)) { + *val = NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_CLIENT; + } else { + *val = NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_DISABLED; + } + + return TRUE; +} + +NvBool nvGetFrameLockDisplayConfigValidValuesEvo( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (pDpyEvo->pDispEvo->pFrameLockEvo == NULL) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTBITS); + + pValidValues->u.bits.ints = FrameLockGetValidDpyConfig(pDpyEvo); + + return TRUE; +} + +static const struct { + NvBool (*set)(NVDispEvoPtr pDispEvo, NvS64 value); + NvBool (*get)(NVDispEvoPtr pDispEvo, NvS64 *pValue); + enum NvKmsAttributeType type; +} DispAttributesDispatchTable[] = { + [NV_KMS_DISP_ATTRIBUTE_FRAMELOCK] = { + .set = NULL, + .get = GetFrameLock, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_SYNC] = { + .set = SetFrameLockSync, + .get = GetFrameLockSync, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_GPU_FRAMELOCK_FPGA_REVISION_UNSUPPORTED] = { + .set = NULL, + .get = GetFrameLockFpgaRevisionUnsupported, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_STEREO_SYNC] = { + .set = NULL, + .get = GetFrameLockStereoSync, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_TIMING] = { + .set = NULL, + .get = GetFrameLockTiming, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_TEST_SIGNAL] = { + .set = SetFrameLockTestSignal, + .get = GetFrameLockTestSignal, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_RESET] = { + .set = ResetHardwareOneDisp, + .get = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_SET_SWAP_BARRIER] = { + .set = SetSwapBarrier, + .get = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_QUERY_DP_AUX_LOG] = { + .set = NULL, + .get = nvRmQueryDpAuxLog, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, +}; + + +/*! + * Set pParams->attribute to pParams->value on the given disp. + */ +NvBool nvSetDispAttributeEvo(NVDispEvoPtr pDispEvo, + struct NvKmsSetDispAttributeParams *pParams) +{ + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(DispAttributesDispatchTable)) { + return FALSE; + } + + if (DispAttributesDispatchTable[index].set == NULL) { + return FALSE; + } + + return DispAttributesDispatchTable[index].set(pDispEvo, + pParams->request.value); +} + + +/*! + * Get the value of pParams->attribute on the given disp. + */ +NvBool nvGetDispAttributeEvo(NVDispEvoPtr pDispEvo, + struct NvKmsGetDispAttributeParams *pParams) +{ + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(DispAttributesDispatchTable)) { + return FALSE; + } + + if (DispAttributesDispatchTable[index].get == NULL) { + return FALSE; + } + + return DispAttributesDispatchTable[index].get(pDispEvo, + &pParams->reply.value); +} + + +/*! + * Get the valid values of pParams->attribute on the given disp. + */ +NvBool nvGetDispAttributeValidValuesEvo( + const NVDispEvoRec *pDispEvo, + struct NvKmsGetDispAttributeValidValuesParams *pParams) +{ + struct NvKmsAttributeValidValuesCommonReply *pReply = + &pParams->reply.common; + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(DispAttributesDispatchTable)) { + return FALSE; + } + + /* + * FRAMELOCK and GPU_FRAMELOCK_FPGA_REVISION_UNSUPPORTED + * can be queried without a pFrameLockEvo; all other + * attributes require a pFrameLockEvo. + */ + if (((pParams->request.attribute != NV_KMS_DISP_ATTRIBUTE_FRAMELOCK) && + (pParams->request.attribute != + NV_KMS_DISP_ATTRIBUTE_GPU_FRAMELOCK_FPGA_REVISION_UNSUPPORTED)) && + (pDispEvo->pFrameLockEvo == NULL)) { + return FALSE; + } + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + pReply->readable = (DispAttributesDispatchTable[index].get != NULL); + pReply->writable = (DispAttributesDispatchTable[index].set != NULL); + + pReply->type = DispAttributesDispatchTable[index].type; + + return TRUE; +} + + +static const struct { + NvBool (*set)(NVFrameLockEvoPtr pFrameLockEvo, NvS64 value); + NvBool (*get)(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, NvS64 *pValue); + NvBool (*getValidValues)( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues); + enum NvKmsAttributeType type; +} FrameLockAttributesDispatchTable[] = { + [NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY] = { + .set = SetFrameLockPolarity, + .get = GetFrameLockPolarity, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BITMASK, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_DELAY] = { + .set = FrameLockSetSyncDelay, + .get = GetFrameLockSyncDelay, + .getValidValues = GetFrameLockSyncDelayValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_RANGE, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE] = { + .set = SetHouseSyncMode, + .get = GetHouseSyncMode, + .getValidValues = GetHouseSyncModeValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTBITS, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_INTERVAL] = { + .set = FrameLockSetSyncInterval, + .get = GetFrameLockSyncInterval, + .getValidValues = GetFrameLockSyncIntervalValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_RANGE, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_READY] = { + .set = NULL, + .get = GetFrameLockSyncReady, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE] = { + .set = SetFrameLockVideoMode, + .get = GetFrameLockVideoMode, + .getValidValues = GetFrameLockVideoModeValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_RANGE, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_FPGA_REVISION] = { + .set = NULL, + .get = GetFrameLockFpgaRevision, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_FIRMWARE_MAJOR_VERSION] = { + .set = NULL, + .get = GetFrameLockFirmwareMajorVersion, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_FIRMWARE_MINOR_VERSION] = { + .set = NULL, + .get = GetFrameLockFirmwareMinorVersion, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_BOARD_ID] = { + .set = NULL, + .get = GetFrameLockBoardId, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_DELAY_RESOLUTION] = { + .set = NULL, + .get = GetFrameLockSyncDelayResolution, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_PORT0_STATUS] = { + .set = NULL, + .get = nvFrameLockGetStatusEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_PORT1_STATUS] = { + .set = NULL, + .get = nvFrameLockGetStatusEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_STATUS] = { + .set = NULL, + .get = nvFrameLockGetStatusEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED] = { + .set = NULL, + .get = nvFrameLockGetStatusEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BITMASK, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE] = { + .set = NULL, + .get = nvFrameLockGetStatusEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE_4] = { + .set = NULL, + .get = nvFrameLockGetStatusEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_INCOMING_HOUSE_SYNC_RATE] = { + .set = NULL, + .get = nvFrameLockGetStatusEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_MULTIPLY_DIVIDE_VALUE] = { + .set = SetFrameLockMulDivVal, + .get = GetFrameLockMulDivVal, + .getValidValues = GetFrameLockMulDivValValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_RANGE, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_MULTIPLY_DIVIDE_MODE] = { + .set = SetFrameLockMulDivMode, + .get = GetFrameLockMulDivMode, + .getValidValues = GetFrameLockMulDivModeValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, +}; + +NvBool nvSetFrameLockAttributeEvo( + NVFrameLockEvoRec *pFrameLockEvo, + const struct NvKmsSetFrameLockAttributeParams *pParams) +{ + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(FrameLockAttributesDispatchTable)) { + return FALSE; + } + + if (FrameLockAttributesDispatchTable[index].set == NULL) { + return FALSE; + } + + if ((FrameLockAttributesDispatchTable[index].type == + NV_KMS_ATTRIBUTE_TYPE_BOOLEAN) && + (pParams->request.value != TRUE) && + (pParams->request.value != FALSE)) { + return FALSE; + } + + return FrameLockAttributesDispatchTable[index].set(pFrameLockEvo, + pParams->request.value); +} + +NvBool nvGetFrameLockAttributeEvo( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsGetFrameLockAttributeParams *pParams) +{ + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(FrameLockAttributesDispatchTable)) { + return FALSE; + } + + if (FrameLockAttributesDispatchTable[index].get == NULL) { + return FALSE; + } + + return FrameLockAttributesDispatchTable[index].get(pFrameLockEvo, + pParams->request.attribute, + &pParams->reply.value); +} + +NvBool nvGetFrameLockAttributeValidValuesEvo( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsGetFrameLockAttributeValidValuesParams *pParams) +{ + struct NvKmsAttributeValidValuesCommonReply *pReply = + &pParams->reply.common; + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(FrameLockAttributesDispatchTable)) { + return FALSE; + } + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + pReply->readable = (FrameLockAttributesDispatchTable[index].get != NULL); + pReply->writable = (FrameLockAttributesDispatchTable[index].set != NULL); + + pReply->type = FrameLockAttributesDispatchTable[index].type; + + /* + * The getValidValues function provides two important things: + * - If type==Range, then assigns reply::u::range. + * - If the attribute is not currently available, returns FALSE. + * If the getValidValues function is NULL, assume the attribute is + * available. The type must not be something requires assigning + * to reply::u. + */ + if (FrameLockAttributesDispatchTable[index].getValidValues == NULL) { + nvAssert(pReply->type != NV_KMS_ATTRIBUTE_TYPE_RANGE); + return TRUE; + } + + return FrameLockAttributesDispatchTable[index].getValidValues( + pFrameLockEvo, pReply); +} + +NvU32 nvGetFramelockServerHead(const NVDispEvoRec *pDispEvo) +{ + const NVDpyEvoRec *pDpyEvo = + nvGetDpyEvoFromDispEvo(pDispEvo, pDispEvo->framelock.server); + return (pDpyEvo != NULL) ? nvGetPrimaryHwHead(pDispEvo, pDpyEvo->apiHead) : + NV_INVALID_HEAD; +} + +NvU32 nvGetFramelockClientHeadsMask(const NVDispEvoRec *pDispEvo) +{ + NvU32 headsMask = 0x0; + const NVDpyEvoRec *pServerDpyEvo, *pClientDpyEvo; + + pServerDpyEvo = nvGetDpyEvoFromDispEvo(pDispEvo, + pDispEvo->framelock.server); + if ((pServerDpyEvo != NULL) && + (pServerDpyEvo->apiHead != NV_INVALID_HEAD)) { + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[pServerDpyEvo->apiHead]; + NvU32 primaryHead = nvGetPrimaryHwHead(pDispEvo, + pServerDpyEvo->apiHead); + + nvAssert(primaryHead != NV_INVALID_HEAD); + + /* + * The secondary hardware-head of the server dpy are client of the + * primary head. + */ + headsMask |= pApiHeadState->hwHeadsMask; + headsMask &= ~NVBIT(primaryHead); + } + + FOR_ALL_EVO_DPYS(pClientDpyEvo, pDispEvo->framelock.clients, pDispEvo) { + if (pClientDpyEvo->apiHead == NV_INVALID_HEAD) { + continue; + } + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[pClientDpyEvo->apiHead]; + headsMask |= pApiHeadState->hwHeadsMask; + } + + return headsMask; +} + +void nvUpdateGLSFramelock(const NVDispEvoRec *pDispEvo, const NvU32 head, + const NvBool enable, const NvBool server) +{ + NVDpyEvoRec *pDpyEvo; + NvS64 value = enable | (server << 1); + + /* + * XXX[2Heads1OR] Optimize this loop in follow on code change when + * apiHead -> pDpyEvo mapping will get implemented. + */ + FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->validDisplays, pDispEvo) { + /* + * XXX[2Heads1OR] Framelock is currently not supported with + * 2Heads1OR, the api head is expected to be mapped onto a single + * hardware head which is the primary hardware head. + */ + if ((pDpyEvo->apiHead == NV_INVALID_HEAD) || + (nvGetPrimaryHwHead(pDispEvo, pDpyEvo->apiHead) != head)) { + continue; + } + + nvSendDpyAttributeChangedEventEvo(pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_UPDATE_GLS_FRAMELOCK, + value); + } +} diff --git a/src/nvidia-modeset/src/nvkms-hal.c b/src/nvidia-modeset/src/nvkms-hal.c new file mode 100644 index 0000000..d49f629 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-hal.c @@ -0,0 +1,212 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-types.h" +#include "nvkms-cursor.h" +#include "nvkms-hal.h" +#include "nvkms-rm.h" + + + +#include "class/cl9470.h" // NV9470_DISPLAY +#include "class/cl9570.h" // NV9570_DISPLAY +#include "class/cl9770.h" // NV9770_DISPLAY +#include "class/cl9870.h" // NV9870_DISPLAY +#include "class/clc370.h" // NVC370_DISPLAY +#include "class/clc570.h" // NVC570_DISPLAY +#include "class/clc670.h" // NVC670_DISPLAY +#include "class/clc770.h" // NVC770_DISPLAY +#include "class/clc970.h" // NVC970_DISPLAY + +#include "class/cl947d.h" // NV947D_CORE_CHANNEL_DMA +#include "class/cl957d.h" // NV957D_CORE_CHANNEL_DMA +#include "class/cl977d.h" // NV977D_CORE_CHANNEL_DMA +#include "class/cl987d.h" // NV987D_CORE_CHANNEL_DMA +#include "class/clc37d.h" // NVC37D_CORE_CHANNEL_DMA +#include "class/clc37e.h" // NVC37E_WINDOW_CHANNEL_DMA +#include "class/clc57d.h" // NVC57D_CORE_CHANNEL_DMA +#include "class/clc57e.h" // NVC57E_WINDOW_CHANNEL_DMA +#include "class/clc67d.h" // NVC67D_CORE_CHANNEL_DMA +#include "class/clc67e.h" // NVC67E_WINDOW_CHANNEL_DMA +#include "class/clc77d.h" // NVC67D_CORE_CHANNEL_DMA +#include "class/clc97d.h" // NVC97D_CORE_CHANNEL_DMA +#include "class/clc97e.h" // NVC97E_WINDOW_CHANNEL_DMA + +extern NVEvoHAL nvEvo94; +extern NVEvoHAL nvEvo97; +extern NVEvoHAL nvEvoC3; +extern NVEvoHAL nvEvoC5; +extern NVEvoHAL nvEvoC6; +extern NVEvoHAL nvEvoC9; + +enum NvKmsAllocDeviceStatus nvAssignEvoCaps(NVDevEvoPtr pDevEvo) +{ +#define ENTRY(_classPrefix, \ + _pEvoHal, \ + _supportsDP13, \ + _supportsHDMI20, \ + _supportsYUV2020, \ + _inputLutAppliesToBase, \ + _dpYCbCr422MaxBpc, \ + _hdmiYCbCr422MaxBpc, \ + _validNIsoFormatMask, \ + _maxPitch, \ + _maxWidthInBytes, \ + _maxWidthInPixels, \ + _maxHeight, \ + _coreChannelDmaArmedOffset, \ + _dmaArmedSize) \ + { \ + .class = NV ## _classPrefix ## 70_DISPLAY, \ + .pEvoHal = _pEvoHal, \ + .coreChannelDma = { \ + .coreChannelClass = \ + NV ## _classPrefix ## 7D_CORE_CHANNEL_DMA, \ + .dmaArmedSize = _dmaArmedSize, \ + .dmaArmedOffset = \ + _coreChannelDmaArmedOffset, \ + }, \ + .evoCaps = { \ + .supportsDP13 = _supportsDP13, \ + .supportsHDMI20 = _supportsHDMI20, \ + .supportsYUV2020 = _supportsYUV2020, \ + .validNIsoFormatMask = _validNIsoFormatMask, \ + .inputLutAppliesToBase = _inputLutAppliesToBase, \ + .maxPitchValue = _maxPitch, \ + .maxWidthInBytes = _maxWidthInBytes, \ + .maxWidthInPixels = _maxWidthInPixels, \ + .maxHeight = _maxHeight, \ + .maxRasterWidth = DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_RASTER_SIZE_WIDTH), \ + .maxRasterHeight = DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_RASTER_SIZE_HEIGHT),\ + .dpYCbCr422MaxBpc = _dpYCbCr422MaxBpc, \ + .hdmiYCbCr422MaxBpc = _hdmiYCbCr422MaxBpc, \ + } \ + } + +#define EVO_CORE_CHANNEL_DMA_ARMED_OFFSET 0x0 + +#define EVO_CORE_CHANNEL_DMA_ARMED_SIZE 0x1000 + + +/* Pre-NVDisplay EVO entries */ +#define ENTRY_EVO(_classPrefix, ...) \ + ENTRY(_classPrefix, __VA_ARGS__, \ + ((1 << NVKMS_NISO_FORMAT_LEGACY) | \ + (1 << NVKMS_NISO_FORMAT_FOUR_WORD)), \ + DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_STORAGE_PITCH), \ + DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_STORAGE_PITCH) * \ + NVKMS_BLOCK_LINEAR_GOB_WIDTH, \ + DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_SIZE_WIDTH), \ + DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_SIZE_HEIGHT), \ + EVO_CORE_CHANNEL_DMA_ARMED_OFFSET, \ + EVO_CORE_CHANNEL_DMA_ARMED_SIZE) + + +/* + * The file + * https://github.com/NVIDIA/open-gpu-doc/blob/master/manuals/volta/gv100/dev_display_withoffset.ref.txt + * defines: + * + * #define NV_UDISP_FE_CHN_ASSY_BASEADR_CORE 0x00680000 + * #define NV_UDISP_FE_CHN_ARMED_BASEADR_CORE (0x00680000+32768) + * + * The NVD_CORE_CHANNEL_DMA_ARMED_OFFSET is calculated as + * (NV_UDISP_FE_CHN_ARMED_BASEADR_CORE - NV_UDISP_FE_CHN_ASSY_BASEADR_CORE). + */ +#define NVD_CORE_CHANNEL_DMA_ARMED_OFFSET 0x8000 + +/* + * From the above in dev_display_withoffset.ref.txt, ARMED is the upper + * 32k of the core channel's 64k space. + */ +#define NVD_CORE_CHANNEL_DMA_ARMED_SIZE 0x8000 + +/* NVDisplay and later entries */ +#define ENTRY_NVD(_coreClassPrefix, _windowClassPrefix, ...) \ + ENTRY(_coreClassPrefix, __VA_ARGS__, \ + (1 << NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY), \ + DRF_MASK(NV ## _windowClassPrefix ## 7E_SET_PLANAR_STORAGE_PITCH), \ + DRF_MASK(NV ## _windowClassPrefix ## 7E_SET_PLANAR_STORAGE_PITCH) * \ + NVKMS_BLOCK_LINEAR_GOB_WIDTH, \ + DRF_MASK(NV ## _windowClassPrefix ## 7E_SET_SIZE_IN_WIDTH), \ + DRF_MASK(NV ## _windowClassPrefix ## 7E_SET_SIZE_IN_WIDTH), \ + NVD_CORE_CHANNEL_DMA_ARMED_OFFSET, \ + NVD_CORE_CHANNEL_DMA_ARMED_SIZE) + + static const struct { + NvU32 class; + const NVEvoHAL *pEvoHal; + const NVEvoCoreChannelDmaRec coreChannelDma; + const NVEvoCapsRec evoCaps; + } dispTable[] = { + /* + * hdmiYCbCr422MaxBpc-----------------------+ + * dpYCbCr422MaxBpc---------------------+ | + * inputLutAppliesToBase ------------+ | | + * supportsYUV2020 ---------------+ | | | + * supportsHDMI20 -------------+ | | | | + * supportsDP13 ------------+ | | | | | + * pEvoHal --------------+ | | | | | | + * windowClassPrefix | | | | | | | + * classPrefix | | | | | | | | + * | | | | | | | | | + */ + /* Blackwell */ + ENTRY_NVD(C9, C9, &nvEvoC9, 1, 1, 1, 0, 12, 12), + /* Ada */ + ENTRY_NVD(C7, C6, &nvEvoC6, 1, 1, 1, 0, 12, 12), + /* Ampere */ + ENTRY_NVD(C6, C6, &nvEvoC6, 1, 1, 1, 0, 12, 12), + /* Turing */ + ENTRY_NVD(C5, C5, &nvEvoC5, 1, 1, 1, 0, 12, 12), + /* Volta */ + ENTRY_NVD(C3, C3, &nvEvoC3, 1, 1, 1, 0, 12, 12), + /* gp10x */ + ENTRY_EVO(98, &nvEvo97, 1, 1, 1, 1, 12, 12), + /* gp100 */ + ENTRY_EVO(97, &nvEvo97, 1, 1, 1, 1, 12, 12), + /* gm20x */ + ENTRY_EVO(95, &nvEvo94, 0, 1, 0, 1, 8, 0), + /* gm10x */ + ENTRY_EVO(94, &nvEvo94, 0, 0, 0, 1, 8, 0), + }; + + int i; + + for (i = 0; i < ARRAY_LEN(dispTable); i++) { + if (nvRmEvoClassListCheck(pDevEvo, dispTable[i].class)) { + pDevEvo->hal = dispTable[i].pEvoHal; + pDevEvo->dispClass = dispTable[i].class; + pDevEvo->caps = dispTable[i].evoCaps; + + pDevEvo->coreChannelDma = dispTable[i].coreChannelDma; + nvAssert(nvRmEvoClassListCheck( + pDevEvo, + pDevEvo->coreChannelDma.coreChannelClass)); + + return nvInitDispHalCursorEvo(pDevEvo); + } + } + + return NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; +} diff --git a/src/nvidia-modeset/src/nvkms-hdmi.c b/src/nvidia-modeset/src/nvkms-hdmi.c new file mode 100644 index 0000000..e8c9b6c --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-hdmi.c @@ -0,0 +1,2453 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This source file contains codes for enabling HDMI audio. + */ + + +#include "nvkms-dpy.h" +#include "nvkms-hdmi.h" +#include "nvkms-evo.h" +#include "nvkms-modepool.h" +#include "nvkms-rmapi.h" +#include "nvkms-utils.h" +#include "nvkms-vrr.h" +#include "dp/nvdp-connector.h" + +#include "hdmi_spec.h" +#include "nvos.h" + +#include // NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS +#include // NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM +#include // NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS +#include // NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER + +#include + +#define CAP_HDMI_SUPPORT_GPU 0x00000001 +#define CAP_HDMI_SUPPORT_MONITOR 0x00000002 +#define CAP_HDMI_SUPPORT_MONITOR_48_BPP 0x00000004 +#define CAP_HDMI_SUPPORT_MONITOR_36_BPP 0x00000008 +#define CAP_HDMI_SUPPORT_MONITOR_30_BPP 0x00000010 + +static inline const NVT_EDID_CEA861_INFO *GetExt861(const NVParsedEdidEvoRec *pParsedEdid, + int extIndex) +{ + if (!pParsedEdid->valid || extIndex > 1) { + return NULL; + } + + return (extIndex == 0) ? &pParsedEdid->info.ext861 : + &pParsedEdid->info.ext861_2; +} + +/* + * CalculateVideoInfoFrameColorFormat() - calculate colorspace, + * colorimetry and colorrange for video infoframe. + */ +static void CalculateVideoInfoFrameColorFormat( + const NVDpyAttributeColor *pDpyColor, + const NvU32 hdTimings, + const NVT_EDID_INFO *pEdidInfo, + NVT_VIDEO_INFOFRAME_CTRL *pCtrl) +{ + NvBool sinkSupportsRGBQuantizationOverride = FALSE; + + // sets video infoframe colorspace (RGB/YUV). + switch (pDpyColor->format) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + pCtrl->color_space = NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_RGB; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + pCtrl->color_space = NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr422; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + pCtrl->color_space = NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr444; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420: + pCtrl->color_space = NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr420; + break; + default: + nvAssert(!"Invalid colorSpace value"); + break; + } + + // sets video infoframe colorimetry. + switch (pDpyColor->format) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + switch (pDpyColor->colorimetry) { + case NVKMS_OUTPUT_COLORIMETRY_BT2100: + pCtrl->colorimetry = NVT_VIDEO_INFOFRAME_BYTE2_C1C0_EXT_COLORIMETRY; + pCtrl->extended_colorimetry = + NVT_VIDEO_INFOFRAME_BYTE3_EC_BT2020RGBYCC; + break; + case NVKMS_OUTPUT_COLORIMETRY_DEFAULT: + pCtrl->colorimetry = NVT_VIDEO_INFOFRAME_BYTE2_C1C0_NO_DATA; + break; + } + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + switch (pDpyColor->colorimetry) { + case NVKMS_OUTPUT_COLORIMETRY_BT2100: + pCtrl->colorimetry = NVT_VIDEO_INFOFRAME_BYTE2_C1C0_EXT_COLORIMETRY; + pCtrl->extended_colorimetry = + NVT_VIDEO_INFOFRAME_BYTE3_EC_BT2020RGBYCC; + break; + case NVKMS_OUTPUT_COLORIMETRY_DEFAULT: + pCtrl->colorimetry = + (hdTimings ? NVT_VIDEO_INFOFRAME_BYTE2_C1C0_ITU709 : + NVT_VIDEO_INFOFRAME_BYTE2_C1C0_SMPTE170M_ITU601); + break; + } + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420: + // XXX HDR TODO: Support YUV420 + HDR + nvAssert(pDpyColor->colorimetry != NVKMS_OUTPUT_COLORIMETRY_BT2100); + pCtrl->colorimetry = NVT_VIDEO_INFOFRAME_BYTE2_C1C0_ITU709; + break; + default: + nvAssert(!"Invalid colorSpace value"); + break; + } + + // sets video infoframe colorrange. + switch (pDpyColor->range) { + case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL: + pCtrl->rgb_quantization_range = + NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_FULL_RANGE; + break; + case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED: + pCtrl->rgb_quantization_range = + NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_LIMITED_RANGE; + break; + default: + nvAssert(!"Invalid colorRange value"); + break; + } + + if (pEdidInfo != NULL) { + sinkSupportsRGBQuantizationOverride = (pEdidInfo->ext861.valid.VCDB && + ((pEdidInfo->ext861.video_capability & NVT_CEA861_VCDB_QS_MASK) >> + NVT_CEA861_VCDB_QS_SHIFT) != 0); + } + + if ((pDpyColor->format == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB) && + !sinkSupportsRGBQuantizationOverride) { + pCtrl->rgb_quantization_range = NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_DEFAULT; + } + + /* + * Only limited color range is allowed with YUV444 and YUV422 color spaces. + */ + nvAssert(!(((pCtrl->color_space == NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr422) || + (pCtrl->color_space == NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr444)) && + (pCtrl->rgb_quantization_range != + NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_LIMITED_RANGE))); +} + +/* + * GetHDMISupportCap() - find the HDMI capabilities of + * the gpu and the display device. + */ + +static NvU32 GetHDMISupportCap(const NVDpyEvoRec *pDpyEvo) +{ + NvU32 hdmiCap = 0; + int extIndex; + + if (pDpyEvo->hdmiCapable) { + hdmiCap |= CAP_HDMI_SUPPORT_GPU; + } + + for (extIndex = 0; TRUE; extIndex++) { + + int vsdbIndex; + const NVT_EDID_CEA861_INFO *pExt861 = + GetExt861(&pDpyEvo->parsedEdid, extIndex); + + if (pExt861 == NULL) { + break; + } + + if (pExt861->revision <= NVT_CEA861_REV_ORIGINAL) { + continue; + } + + for (vsdbIndex = 0; vsdbIndex < pExt861->total_vsdb; vsdbIndex++) { + if (pExt861->vsdb[vsdbIndex].ieee_id == NVT_CEA861_HDMI_IEEE_ID) { + const NVT_HDMI_LLC_VSDB_PAYLOAD *payload = + (const NVT_HDMI_LLC_VSDB_PAYLOAD *) + &pExt861->vsdb[vsdbIndex].vendor_data; + + hdmiCap |= CAP_HDMI_SUPPORT_MONITOR; + + if (payload->DC_48bit) { + hdmiCap |= CAP_HDMI_SUPPORT_MONITOR_48_BPP; + } + + if (payload->DC_36bit) { + hdmiCap |= CAP_HDMI_SUPPORT_MONITOR_36_BPP; + } + + if (payload->DC_30bit) { + hdmiCap |= CAP_HDMI_SUPPORT_MONITOR_30_BPP; + } + + return hdmiCap; + } + } + } + + return hdmiCap; +} + +/*! + * Return whether the GPU supports HDMI and the display is connected + * via HDMI. + */ +NvBool nvDpyIsHdmiEvo(const NVDpyEvoRec *pDpyEvo) +{ + NvU32 hdmiCap; + + hdmiCap = GetHDMISupportCap(pDpyEvo); + + return ((hdmiCap & CAP_HDMI_SUPPORT_GPU) && + (hdmiCap & CAP_HDMI_SUPPORT_MONITOR)); +} + +/*! + * Returns whether the GPU and the display both support HDMI depth 30. + */ +NvBool nvDpyIsHdmiDepth30Evo(const NVDpyEvoRec *pDpyEvo) +{ + const NVDevEvoRec *pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + NvU32 hdmiCap = GetHDMISupportCap(pDpyEvo); + + return nvkms_hdmi_deepcolor() && + nvDpyIsHdmiEvo(pDpyEvo) && + pDevEvo->hal->caps.supportsHDMI10BPC && + (hdmiCap & CAP_HDMI_SUPPORT_MONITOR_30_BPP); +} + +/*! + * Updates the display's HDMI 2.0 capabilities to the RM. + */ +void nvSendHdmiCapsToRm(NVDpyEvoPtr pDpyEvo) +{ + NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS params = { 0 }; + NVParsedEdidEvoPtr pParsedEdid = &pDpyEvo->parsedEdid; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + if (!pDevEvo->caps.supportsHDMI20 || + nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + return; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyEvoGetConnectorId(pDpyEvo); + params.caps = 0; + + /* + * nvSendHdmiCapsToRm() gets called on dpy's connect/disconnect events + * to set/clear capabilities, clear capabilities if parsed edid + * is not valid. + */ + if (pParsedEdid->valid) { + const NVT_HDMI_FORUM_INFO *pHdmiInfo = &pParsedEdid->info.hdmiForumInfo; + if (pHdmiInfo->scdc_present) { + params.caps |= DRF_DEF(0073, _CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS, + _SCDC_SUPPORTED, _TRUE); + } + + if (pHdmiInfo->max_TMDS_char_rate > 0) { + params.caps |= DRF_DEF(0073, _CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS, + _GT_340MHZ_CLOCK_SUPPORTED, _TRUE); + } + + if (pHdmiInfo->lte_340Mcsc_scramble) { + if (!pHdmiInfo->scdc_present) { + nvEvoLogDisp(pDispEvo, + EVO_LOG_WARN, + "EDID inconsistency: SCDC is not present in EDID, but EDID requests 340Mcsc scrambling."); + } + + params.caps |= DRF_DEF(0073, _CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS, + _LTE_340MHZ_SCRAMBLING_SUPPORTED, _TRUE); + } + + /* HDMI Fixed-rate link information */ + if (pDevEvo->hal->caps.supportsHDMIFRL) { + nvAssert((pHdmiInfo->max_FRL_Rate & + ~DRF_MASK(NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED)) == 0); + params.caps |= DRF_NUM(0073_CTRL_CMD_SPECIFIC, _SET_HDMI_SINK_CAPS, _MAX_FRL_RATE_SUPPORTED, + pHdmiInfo->max_FRL_Rate); + + if (pHdmiInfo->dsc_1p2) { + nvAssert((pHdmiInfo->dsc_1p2 & + ~DRF_MASK(NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED)) == 0); + params.caps |= DRF_NUM(0073_CTRL_CMD_SPECIFIC, _SET_HDMI_SINK_CAPS, _DSC_MAX_FRL_RATE_SUPPORTED, + pHdmiInfo->dsc_1p2); + params.caps |= DRF_DEF(0073_CTRL_CMD_SPECIFIC, _SET_HDMI_SINK_CAPS, _DSC_12_SUPPORTED, _TRUE); + } + } + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS failed"); + } +} + +/* + * HdmiSendEnable() - Used to signal RM to enable various hdmi components + * such as audio engine. + */ + +static void HdmiSendEnable(NVDpyEvoPtr pDpyEvo, NvBool hdmiEnable) +{ + NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + params.subDeviceInstance = pDpyEvo->pDispEvo->displayOwner; + params.displayId = nvDpyEvoGetConnectorId(pDpyEvo); + params.enable = hdmiEnable; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE failed"); + } +} + +/*! + * Sends General Control Packet to the HDMI sink. + */ +static void SendHdmiGcp(const NVDispEvoRec *pDispEvo, + const NvU32 head, NvBool avmute) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVHDMIPKT_RESULT ret; + + NvU8 sb0 = avmute ? HDMI_GENCTRL_PACKET_MUTE_ENABLE : + HDMI_GENCTRL_PACKET_MUTE_DISABLE; + + NvU8 sb1 = 0; + + NvU8 sb2 = NVT_HDMI_RESET_DEFAULT_PIXELPACKING_PHASE; + + NvU8 gcp[] = { + pktType_GeneralControl, 0, 0, sb0, sb1, sb2, 0, 0, 0, 0 + }; + + ret = NvHdmiPkt_PacketWrite(pDevEvo->hdmiLib.handle, + pDispEvo->displayOwner, + pHeadState->activeRmId, + head, + NVHDMIPKT_TYPE_GENERAL_CONTROL, + NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_EVERY_FRAME, + sizeof(gcp), + gcp); + + if (ret != NVHDMIPKT_SUCCESS) { + nvAssert(ret == NVHDMIPKT_SUCCESS); + } +} + +/* + * SendVideoInfoFrame() - Construct video infoframe using provided EDID and call + * ->SendHdmiInfoFrame() to send it to RM. + */ +static void SendVideoInfoFrame(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVDpyAttributeColor *pDpyColor, + const NVDispHeadInfoFrameStateEvoRec *pInfoFrameState, + NVT_EDID_INFO *pEdidInfo) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvBool hdTimings = pInfoFrameState->hdTimings; + NVT_VIDEO_INFOFRAME_CTRL videoCtrl = pInfoFrameState->videoCtrl; + NVT_VIDEO_INFOFRAME VideoInfoFrame; + NVT_STATUS status; + + CalculateVideoInfoFrameColorFormat(pDpyColor, hdTimings, pEdidInfo, &videoCtrl); + + status = NvTiming_ConstructVideoInfoframe(pEdidInfo, + &videoCtrl, + NULL, &VideoInfoFrame); + if (status != NVT_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Error in constructing Video InfoFrame"); + return; + } + + pDevEvo->hal->SendHdmiInfoFrame( + pDispEvo, + head, + NV_EVO_INFOFRAME_TRANSMIT_CONTROL_EVERY_FRAME, + (NVT_INFOFRAME_HEADER *) &VideoInfoFrame, + (/* header length */ sizeof(NVT_INFOFRAME_HEADER) + + /* payload length */ VideoInfoFrame.length), + TRUE /* needChecksum */); +} + +/* + * SendHDMI3DVendorSpecificInfoFrame() - Construct vendor specific infoframe + * using provided EDID and call ->SendHdmiInfoFrame() to send it to RM. Currently + * hardcoded to send the infoframe necessary for HDMI 3D. + */ + +static void +SendHDMI3DVendorSpecificInfoFrame(const NVDispEvoRec *pDispEvo, + const NvU32 head, NVT_EDID_INFO *pEdidInfo) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + NVT_VENDOR_SPECIFIC_INFOFRAME_CTRL vendorCtrl = { + .Enable = 1, + .HDMIFormat = NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_3D, + .HDMI_VIC = NVT_HDMI_VS_BYTE5_HDMI_VIC_NA, + .ThreeDStruc = NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK, + .ThreeDDetail = NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_NA, + .MetadataPresent = 0, + .MetadataType = NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_NA, + }; + NVT_VENDOR_SPECIFIC_INFOFRAME vendorInfoFrame; + NVT_STATUS status; + + if (!pEdidInfo->HDMI3DSupported) { + // Only send the HDMI 3D infoframe if the display supports HDMI 3D + return; + } + + // Send the infoframe with HDMI 3D configured if we're setting an HDMI 3D + // mode. + if (!pHeadState->timings.hdmi3D) { + pDevEvo->hal->DisableHdmiInfoFrame(pDispEvo, head, + NVT_INFOFRAME_TYPE_VENDOR_SPECIFIC); + return; + } + + status = NvTiming_ConstructVendorSpecificInfoframe(pEdidInfo, + &vendorCtrl, + &vendorInfoFrame); + if (status != NVT_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Error in constructing Vendor Specific InfoFrame"); + return; + } + + pDevEvo->hal->SendHdmiInfoFrame( + pDispEvo, + head, + NV_EVO_INFOFRAME_TRANSMIT_CONTROL_EVERY_FRAME, + &vendorInfoFrame.Header, + (/* header length */ sizeof(vendorInfoFrame.Header) + + /* payload length */ vendorInfoFrame.Header.length), + TRUE /* needChecksum */); +} + +/* + * SendHDMIVendorSpecificInfoFrame() - Construct vendor specific infoframe + * using provided EDID and call SendInfoFrame() to send it to RM. + */ + +static void +SendHDMIVendorSpecificInfoFrame(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVDispHeadInfoFrameStateEvoRec *pInfoFrameState, + NVT_EDID_INFO *pEdidInfo) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NVT_VENDOR_SPECIFIC_INFOFRAME_CTRL vendorCtrl = pInfoFrameState->vendorCtrl; + NVT_VENDOR_SPECIFIC_INFOFRAME vendorInfoFrame; + NVT_STATUS status; + + if (!vendorCtrl.Enable) { + return; + } + + if (pHeadState->timings.hdmi3D) { + return; + } + + status = NvTiming_ConstructVendorSpecificInfoframe(pEdidInfo, + &vendorCtrl, + &vendorInfoFrame); + if (status != NVT_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Error in constructing Vendor Specific InfoFrame"); + return; + } + + pDevEvo->hal->SendHdmiInfoFrame( + pDispEvo, + head, + NV_EVO_INFOFRAME_TRANSMIT_CONTROL_EVERY_FRAME, + &vendorInfoFrame.Header, + (/* header length */ sizeof(vendorInfoFrame.Header) + + /* payload length */ vendorInfoFrame.Header.length), + TRUE /* needChecksum */); +} + +static void +SendHDRInfoFrame(const NVDispEvoRec *pDispEvo, const NvU32 head, + NVT_EDID_INFO *pEdidInfo) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + NVT_HDR_INFOFRAME hdrInfoFrame = { 0 }; + const NVT_HDR_STATIC_METADATA *pHdrInfo = + &pEdidInfo->hdr_static_metadata_info; + NvEvoInfoFrameTransmitControl transmitCtrl = + NV_EVO_INFOFRAME_TRANSMIT_CONTROL_INIT; + + // Only send the HDMI HDR infoframe if the display supports HDR + if (!pHdrInfo->supported_eotf.smpte_st_2084_eotf || + (pHdrInfo->static_metadata_type != 1)) { + return; + } + + // XXX HDR is not supported with HDMI 3D due to both using VSI infoframes. + if (pEdidInfo->HDMI3DSupported) { + return; + } + + hdrInfoFrame.header.type = NVT_INFOFRAME_TYPE_DYNAMIC_RANGE_MASTERING; + hdrInfoFrame.header.version = NVT_VIDEO_INFOFRAME_VERSION_1; + hdrInfoFrame.header.length = sizeof(NVT_HDR_INFOFRAME) - + sizeof(NVT_INFOFRAME_HEADER); + + if (pHeadState->hdrInfoFrame.state == NVKMS_HDR_INFOFRAME_STATE_ENABLED) { + hdrInfoFrame.payload.eotf = pHeadState->hdrInfoFrame.eotf; + hdrInfoFrame.payload.static_metadata_desc_id = + NVT_CEA861_STATIC_METADATA_SM0; + + // payload->type1 = static metadata + ct_assert(sizeof(NVT_HDR_INFOFRAME_MASTERING_DATA) == + sizeof(struct NvKmsHDRStaticMetadata)); + nvkms_memcpy(&hdrInfoFrame.payload.type1, + (const NvU16 *) &pHeadState->hdrInfoFrame.staticMetadata, + sizeof(NVT_HDR_INFOFRAME_MASTERING_DATA)); + + transmitCtrl = NV_EVO_INFOFRAME_TRANSMIT_CONTROL_EVERY_FRAME; + } else if (pHeadState->hdrInfoFrame.state == + NVKMS_HDR_INFOFRAME_STATE_TRANSITIONING) { + nvDpyAssignSDRInfoFramePayload(&hdrInfoFrame.payload); + + transmitCtrl = NV_EVO_INFOFRAME_TRANSMIT_CONTROL_EVERY_FRAME; + } else { + nvAssert(pHeadState->hdrInfoFrame.state == NVKMS_HDR_INFOFRAME_STATE_DISABLED); + + nvDpyAssignSDRInfoFramePayload(&hdrInfoFrame.payload); + + transmitCtrl = NV_EVO_INFOFRAME_TRANSMIT_CONTROL_SINGLE_FRAME; + } + + pDevEvo->hal->SendHdmiInfoFrame( + pDispEvo, + head, + transmitCtrl, + (NVT_INFOFRAME_HEADER *) &hdrInfoFrame.header, + (/* header length */ sizeof(hdrInfoFrame.header) + + /* payload length */ hdrInfoFrame.header.length), + TRUE /* needChecksum */); +} + + +/* + * Send video and 3D InfoFrames for HDMI. + */ +void nvUpdateHdmiInfoFrames(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVDpyAttributeColor *pDpyColor, + const NVDispHeadInfoFrameStateEvoRec *pInfoFrameState, + NVDpyEvoRec *pDpyEvo) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + if (!nvDpyIsHdmiEvo(pDpyEvo)) { + return; + } + + if (!pDpyEvo->parsedEdid.valid) { + nvEvoLogDispDebug( + pDispEvo, EVO_LOG_WARN, + "No EDID: cannot construct video/vendor-specific info frame"); + return; + } + + SendVideoInfoFrame(pDispEvo, + head, + pDpyColor, + pInfoFrameState, + &pDpyEvo->parsedEdid.info); + + if (!pDevEvo->isSOCDisplay) { + SendHDMI3DVendorSpecificInfoFrame(pDispEvo, + head, + &pDpyEvo->parsedEdid.info); + } else { + SendHDMIVendorSpecificInfoFrame(pDispEvo, + head, + pInfoFrameState, + &pDpyEvo->parsedEdid.info); + } + + SendHDRInfoFrame(pDispEvo, + head, + &pDpyEvo->parsedEdid.info); +} + +static void SetDpAudioMute(const NVDispEvoRec *pDispEvo, + const NvU32 displayId, const NvBool mute) +{ + NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS params = { 0 }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = displayId; + params.mute = mute; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, "NvRmControl" + "(NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM) failed" + "return status = %d...", ret); + } +} + +static void SetDpAudioEnable(const NVDispEvoRec *pDispEvo, + const NvU32 head, const NvBool enable) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVConnectorEvoRec *pConnectorEvo = pHeadState->pConnectorEvo; + NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS params = { 0 }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + /* Mute audio stream before disabling it */ + if (!enable) { + SetDpAudioMute(pDispEvo, pHeadState->activeRmId, TRUE); + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = pHeadState->activeRmId; + params.enable = enable; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "%s: Failed to %s DisplayPort audio stream-%u", + pConnectorEvo->name, + enable ? "enable" : "disable", + head); + } + + /* Unmute audio stream after enabling it */ + if (enable) { + SetDpAudioMute(pDispEvo, pHeadState->activeRmId, FALSE); + } +} + +/* + * Uses RM control to mute HDMI audio stream at source side. + */ +static void SetHdmiAudioMute(const NVDispEvoRec *pDispEvo, + const NvU32 head, const NvBool mute) +{ + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS params = { }; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = pHeadState->activeRmId; + params.mute = (mute ? NV0073_CTRL_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_TRUE : + NV0073_CTRL_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_FALSE); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM failed"); + } +} + +static void EnableHdmiAudio(const NVDispEvoRec *pDispEvo, + const NvU32 head, const NvBool enable) +{ + /* + * XXX Is it correct to use pktType_GeneralControl to mute/unmute + * the audio? pktType_GeneralControl controls both the audio and video data. + */ + static const NvU8 InfoframeMutePacket[] = { + pktType_GeneralControl, 0, 0, HDMI_GENCTRL_PACKET_MUTE_ENABLE, 0, 0, 0, 0, + 0, 0 + }; + static const NvU8 InfoframeUnMutePacket[] = { + pktType_GeneralControl, 0, 0, HDMI_GENCTRL_PACKET_MUTE_DISABLE, 0, 0, 0, 0, + 0, 0 + }; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS params = { 0 }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = pHeadState->activeRmId; + params.transmitControl = + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _ENABLE, _YES) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _OTHER_FRAME, _DISABLE) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _SINGLE_FRAME, _DISABLE) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _ON_HBLANK, _DISABLE) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _VIDEO_FMT, _SW_CONTROLLED) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _RESERVED_LEGACY_MODE, _NO); + + params.packetSize = sizeof(InfoframeMutePacket); + + nvAssert(sizeof(InfoframeMutePacket) == sizeof(InfoframeUnMutePacket)); + + nvkms_memcpy(params.aPacket, + enable ? InfoframeUnMutePacket : InfoframeMutePacket, + params.packetSize); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET failed"); + } +} + +static const NVT_EDID_CEA861_INFO *GetMaxSampleRateExtBlock( + const NVDpyEvoRec *pDpyEvo, + const NVParsedEdidEvoRec *pParsedEdid, + NvU32 *pMaxFreqSupported) +{ + const NVDevEvoRec *pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + const NVT_EDID_CEA861_INFO *pExt861 = NULL; + int extIndex; + int i; + + *pMaxFreqSupported = 0; + + for (extIndex = 0; TRUE; extIndex++) { + + NvU8 sampleRateMask = 0; + const NVT_EDID_CEA861_INFO *pTmpExt861 = + GetExt861(pParsedEdid, extIndex); + NvU32 maxFreqSupported = 0; + + if (pTmpExt861 == NULL) { + break; + } + + if (pTmpExt861->revision == NVT_CEA861_REV_NONE) { + continue; + } + + /* loop through all SAD to find out the max supported rate */ + for (i = 0; i < NVT_CEA861_AUDIO_MAX_DESCRIPTOR; i++) { + + const NvU8 byte1 = pTmpExt861->audio[i].byte1; + const NvU8 byte2 = pTmpExt861->audio[i].byte2; + + if (byte1 == 0) { + break; + } + + if ((byte2 & NVT_CEA861_AUDIO_SAMPLE_RATE_MASK) > sampleRateMask) { + sampleRateMask = byte2 & NVT_CEA861_AUDIO_SAMPLE_RATE_MASK; + } + } + + if (sampleRateMask != 0) { + /* get the highest bit index */ + for (i = 7; i >= 1; i--) { + if ((1<<(i-1)) & sampleRateMask) { + maxFreqSupported = i; + break; + } + } + } else if (pTmpExt861->basic_caps & NVT_CEA861_CAP_BASIC_AUDIO) { + /* + * no short audio descriptor found, try the basic cap + * Uncompressed, two channel, digital audio. Exact parameters are + * determined by the interface specification used with CEA-861-D + * (e.g., 2 channel IEC 60958 LPCM, 32, 44.1, and 48 kHz + * sampling rates, 16 bits/sample). + */ + maxFreqSupported = + NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_0480KHZ; + } + + /* Cap DP audio to 48 KHz unless device supports 192 KHz */ + if (nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo) && + !pDevEvo->hal->caps.supportsDPAudio192KHz && + (maxFreqSupported > + NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_0480KHZ)) { + maxFreqSupported = + NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_0480KHZ; + } + + if (maxFreqSupported > *pMaxFreqSupported) { + *pMaxFreqSupported = maxFreqSupported; + pExt861 = pTmpExt861; + } + } + + return pExt861; +} + +/* + * Search a CEA-861 block for a Vendor Specific Data Block + * with an IEEE "HDMI Licensing, LLC" OUI. + * + * If found, returns VSDB_DATA * to Vendor Specific Data Block + * If !found, returns NULL + */ +static const VSDB_DATA *GetVsdb(const NVT_EDID_CEA861_INFO *pExt861) +{ + const VSDB_DATA *pVsdb = NULL; + + for (int i = 0; i < pExt861->total_vsdb; i++) { + if (pExt861->vsdb[i].ieee_id == NVT_CEA861_HDMI_IEEE_ID) { + pVsdb = &pExt861->vsdb[i]; + break; + } + } + return pVsdb; +} + +static NvBool FillELDBuffer(const NVDpyEvoRec *pDpyEvo, + const NvU32 displayId, + const NVParsedEdidEvoRec *pParsedEdid, + NVEldEvoRec *pEld, + NvU32 *pMaxFreqSupported) +{ + const NVT_EDID_CEA861_INFO *pExt861; + NvU32 SADCount, monitorNameLen; + NvU8 name[NVT_EDID_LDD_PAYLOAD_SIZE + 1]; + NVT_STATUS status; + NvU32 i; + NvU8 EldSAI = 0; + NvU8 EldAudSynchDelay = 0; + const VSDB_DATA *pVsdb; + NvBool isDisplayPort = nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo); + + pExt861 = GetMaxSampleRateExtBlock(pDpyEvo, pParsedEdid, pMaxFreqSupported); + + if (pExt861 == NULL) { + return FALSE; + } + + /* ELD header block: offset 0: ELD_Ver */ + pEld->buffer[0] = NVT_ELD_VER_2 << 3; + + /* Baseline block: offset 4: CEA_EDID_Ver */ + pEld->buffer[4] = pExt861->revision << 5; + + /* offset 5: SAD_Count */ + SADCount = 0; + while (SADCount < NVT_CEA861_AUDIO_MAX_DESCRIPTOR && + pExt861->audio[SADCount].byte1 != 0) { + SADCount++; + } + pEld->buffer[5] = SADCount << 4; + + /* offset 5: Conn_Type */ + if (isDisplayPort) { + pEld->buffer[5] |= NVT_ELD_CONN_TYPE_DP << 2; + } else { + pEld->buffer[5] |= NVT_ELD_CONN_TYPE_HDMI << 2; + } + + /* offset 5 b0: HDCP; always 0 for now */ + + pVsdb = GetVsdb(pExt861); + /* offset 5 b1=1 if Supports_AI; always 0 for DP */ + if ((!isDisplayPort) && + (pVsdb != NULL) && + (pVsdb->vendor_data_size > 2)) { + EldSAI = pVsdb->vendor_data[2]; + EldSAI >>= 7; + } + pEld->buffer[5] |= EldSAI << 1; + + /* offset 6: Aud_Synch_delay in units of 2 msec */ + if ((pVsdb != NULL) && + (pVsdb->vendor_data_size > 6)) { + EldAudSynchDelay = pVsdb->vendor_data[6]; + } + pEld->buffer[6] = EldAudSynchDelay; + + /* offset 7: speaker allocation multiple allocation is not supported in ELD */ + pEld->buffer[7] = pExt861->speaker[0].byte1; + + /* + * offset 8 ~ 15: port ID; nobody knows what port ID is, so far DD/RM/Audio + * all agree to fill it with display Id. + */ + pEld->buffer[8] = displayId & 0xff; + pEld->buffer[9] = (displayId >> 8) & 0xff; + pEld->buffer[10] = (displayId >> 16) & 0xff; + pEld->buffer[11] = (displayId >> 24) & 0xff; + + /* offset 16 ~ 17: manufacturer name */ + pEld->buffer[16] = pParsedEdid->info.manuf_id & 0xff; + pEld->buffer[17] = pParsedEdid->info.manuf_id >> 8; + /* offset 18 ~ 19: product code */ + pEld->buffer[18] = pParsedEdid->info.product_id & 0xff; + pEld->buffer[19] = (pParsedEdid->info.product_id >> 8) & 0xff; + + /* + * offset 20 ~ 20 + MNL - 1: monitor name string (MNL - Monitor Name + * Length) + */ + + monitorNameLen = 0; + + status = NvTiming_GetProductName(&pParsedEdid->info, name, sizeof(name)); + + if (status == NVT_STATUS_SUCCESS) { + /* + * NvTiming_GetProductName returns a nul-terminated string. Figure out + * how long it is and copy the bytes up to, but not including, the nul + * terminator. + */ + monitorNameLen = nvkms_strlen((char *)name); + pEld->buffer[4] |= monitorNameLen; + nvkms_memcpy(&pEld->buffer[20], name, monitorNameLen); + } + + /* offset 20 + MNL ~ 20 + MNL + (3 * SAD_Count) - 1 : CEA_SADs */ + if (SADCount) { + const size_t sadSize = SADCount * sizeof(NVT_3BYTES); + const size_t bufferSize = sizeof(pEld->buffer) - monitorNameLen - 20; + const size_t copySize = NV_MIN(bufferSize, sadSize); + nvAssert(copySize == sadSize); + + nvkms_memcpy(&pEld->buffer[20 + monitorNameLen], + &pExt861->audio[0], copySize); + } + + /* + * The reserved section is not used yet. + * offset 20 + MNL + (3 * SAD_Count) ~ 4 + Baseline_ELD_Len * 4 - 1; + */ + + /* Baseline block size in DWORD */ + i = (16 + monitorNameLen + SADCount * sizeof(NVT_3BYTES) + + sizeof(NvU32) - 1) / sizeof(NvU32); + pEld->buffer[2] = (NvU8)i; + + /* Update the entire ELD space */ + pEld->size = NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER; + + return TRUE; +} + +void nvHdmiDpConstructHeadAudioState(const NvU32 displayId, + const NVDpyEvoRec *pDpyEvo, + NVDispHeadAudioStateEvoRec *pAudioState) +{ + nvkms_memset(pAudioState, 0, sizeof(*pAudioState)); + + /* + * CRT and the DSI digital flat panel does not support audio. If (supported + * == FALSE) the nvHdmiDpEnableDisableAudio does nothing. + */ + if (pDpyEvo->pConnectorEvo->legacyType != + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP || + pDpyEvo->pConnectorEvo->signalFormat == + NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI) { + return; + } + + /* + * The DP/TMDS digital flat panels supports audio, but do not enable audio + * on the eDP and DVI displays. Some eDP panels goes blank when audio is + * enabled, and DVI monitors do not support audio. + * + * If (supported == TRUE) and (enabled == FALSE) then + * nvHdmiDpEnableDisableAudio() makes sure to keep audio disabled for + * a given head. + */ + pAudioState->supported = TRUE; + + if ((nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo) && + pDpyEvo->internal) || + (!nvDpyIsHdmiEvo(pDpyEvo) && + !nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo))) { + return; + } + + pAudioState->isAudioOverHdmi = nvDpyIsHdmiEvo(pDpyEvo); + + if (FillELDBuffer(pDpyEvo, + displayId, + &pDpyEvo->parsedEdid, + &pAudioState->eld, + &pAudioState->maxFreqSupported)) { + pAudioState->enabled = TRUE; + } +} + +/* + * Returns audio device entry of connector, which should + * be attached to given head. Returns NONE if head is inactive. + * + * Each connector(SOR) supports four audio device entries, from 0 to 3, + * which can drive four independent audio streams. Any head can be attached to + * any audio device entry. + * + * Before audio-over-dp-mst support, by default the 0th device entry was + * used when a given head was driving a DP-SST/HDMI/DVI display. This + * function preserves that behavior. In the case of DP-MST, multiple heads + * are attached to a single connector. In that case this functions returns + * a device entry equal to the given head index. + */ +static NvU32 GetAudioDeviceEntry(const NVDispEvoRec *pDispEvo, const NvU32 head) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVConnectorEvoRec *pConnectorEvo = + pHeadState->pConnectorEvo; + + if (pConnectorEvo == NULL) { + return NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_NONE; + } + + ct_assert(NV_MAX_AUDIO_DEVICE_ENTRIES == NVKMS_MAX_HEADS_PER_DISP); + + if (nvConnectorUsesDPLib(pConnectorEvo) && + (nvDPGetActiveLinkMode(pConnectorEvo->pDpLibConnector) == + NV_DP_LINK_MODE_MST)) { + return NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_0 + head; + } + + return NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_0; +} + +/*! + * Send EDID-Like-Data (ELD) to RM. + * + * ELD should be updated under the following situations: + * + * 1. Power on reset + * 2. Pre modeset + * 3. HotPlug / Post modeset + * + * Apart from ELD, also update the following control flags: + * + * isPD - Present Detect, indicates if the monitor is attached + * isELDV - indicates if the ELD is Valid + * + * The values of iSPD and isELDV should be: + * + * NV_ELD_POWER_ON_RESET : isPD = 0, isELDV = 0 + * NV_ELD_PRE_MODESET : isPD = 1, isELDV = 0 + * NV_ELD_POST_MODESET : isPD = 1, isELDV = 1 + * + * The initial ELD case of each audio device entry in hardware is unknown. + * Fortunately, NVConnectorEvoRec::audioDevEldCase[] is zero-initialized, + * which means each audioDevEldCase[] array element will have initial + * value NV_ELD_PRE_MODESET=0. + * + * That ensures that nvRemoveUnusedHdmiDpAudioDevice(), during + * the first modeset, will reset all unused audio device entries to + * NV_ELD_POWER_ON_RESET. + * + * \param[in] pDispEvo The disp of the displayId + * \param[in] displayId The display device whose ELD should be updated. + * This should be NVDispHeadStateEvoRec::activeRmId + * in case of NV_ELD_PRE_MODESET and + * NV_ELD_POST_MODESET, otherwise it should be + * NVConnectorEvoRec::displayId. + * \param[in] deviceEntry The device entry of connector. + * \param[in[ isDP The DisplayPort display device. + * \param[in] pParsedEdid The parsed edid from which ELD should be + * extracted. + * \param[in] eldCase The condition that requires updating the ELD. + */ + +static void RmSetELDAudioCaps( + const NVDispEvoRec *pDispEvo, + NVConnectorEvoRec *pConnectorEvo, + const NvU32 displayId, + const NvU32 deviceEntry, + const NvU32 maxFreqSupported, const NVEldEvoRec *pEld, + const NvEldCase eldCase) +{ + NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS params = { 0 }; + NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS audio_power_params = { 0 }; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvBool isPD, isELDV; + NvU32 ret; + + pConnectorEvo->audioDevEldCase[deviceEntry] = eldCase; + + /* setup the ctrl flag */ + switch(eldCase) { + case NV_ELD_POWER_ON_RESET : + isPD = isELDV = FALSE; + break; + case NV_ELD_PRE_MODESET : + isPD = TRUE; + isELDV = FALSE; + break; + case NV_ELD_POST_MODESET : + isPD = isELDV = TRUE; + break; + default : + return; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.deviceEntry = deviceEntry; + params.displayId = displayId; + + if (isELDV) { + if (pEld->size == 0) { + isPD = isELDV = FALSE; + } else { + ct_assert(sizeof(params.bufferELD) == sizeof(pEld->buffer)); + + nvkms_memcpy(params.bufferELD, pEld->buffer, sizeof(pEld->buffer)); + params.numELDSize = pEld->size; + + params.maxFreqSupported = maxFreqSupported; + } + } else { + params.numELDSize = 0; + } + + params.ctrl = + DRF_NUM(0073_CTRL, _DFP_ELD_AUDIO_CAPS, _CTRL_PD, isPD)| + DRF_NUM(0073_CTRL, _DFP_ELD_AUDIO_CAPS, _CTRL_ELDV, isELDV); + + /* + * ELD information won't be populated to GPU HDA controller driver if + * HDA controller is in suspended state. + * Issue NV2080_CTRL_CMD_OS_UNIX_AUDIO_DYNAMIC_POWER RM control call for + * bringing the HDA controller in active state before writing ELD. Once ELD + * data is written, then HDA controller can again go into suspended state. + */ + audio_power_params.bEnter = FALSE; + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[pDispEvo->displayOwner]->handle, + NV2080_CTRL_CMD_OS_UNIX_AUDIO_DYNAMIC_POWER, + &audio_power_params, sizeof(audio_power_params)); + + if (ret != NVOS_STATUS_SUCCESS) + nvAssert(!"NV2080_CTRL_CMD_OS_UNIX_AUDIO_DYNAMIC_POWER failed"); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, "NvRmControl" + "(NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS) failed" + "return status = %d...", ret); + } + + audio_power_params.bEnter = TRUE; + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[pDispEvo->displayOwner]->handle, + NV2080_CTRL_CMD_OS_UNIX_AUDIO_DYNAMIC_POWER, + &audio_power_params, sizeof(audio_power_params)); + + if (ret != NVOS_STATUS_SUCCESS) + nvAssert(!"NV2080_CTRL_CMD_OS_UNIX_AUDIO_DYNAMIC_POWER failed"); + +} + +void nvHdmiDpEnableDisableAudio(const NVDispEvoRec *pDispEvo, + const NvU32 head, const NvBool enable) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NVConnectorEvoRec *pConnectorEvo = pHeadState->pConnectorEvo; + const NvU32 deviceEntry = GetAudioDeviceEntry(pDispEvo, head); + + /* + * We should only reach this function for active heads, and therefore + * pConnectorEvo and deviceEntry are valid. + */ + nvAssert((pHeadState->pConnectorEvo != NULL) && + (deviceEntry != NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_NONE)); + + if (!pHeadState->audio.supported) { + return; + } + + if (!enable) { + /* + * This is pre modeset code path. If audio device is enabled + * (pHeadState->audio.enabled == TRUE) then invalidate ELD buffer + * before disabling audio. + */ + if (pHeadState->audio.enabled) { + RmSetELDAudioCaps(pDispEvo, + pConnectorEvo, + pHeadState->activeRmId, + deviceEntry, + 0 /* maxFreqSupported */, + NULL /* pEld */, + NV_ELD_PRE_MODESET); + + if (nvConnectorUsesDPLib(pConnectorEvo)) { + SetDpAudioEnable(pDispEvo, head, FALSE /* enable */); + } + } + } + + if (pHeadState->audio.isAudioOverHdmi) { + EnableHdmiAudio(pDispEvo, head, enable); + SetHdmiAudioMute(pDispEvo, head, !enable /* mute */); + SendHdmiGcp(pDispEvo, head, !enable /* avmute */); + } + + if (enable) { + /* + * This is post modeset code path. If audio device is enabled + * (pHeadState->audio.enabled == TRUE) then populate ELD buffer after + * enabling audio, otherwise make sure to remove corresponding audio + * device. + */ + if (pHeadState->audio.enabled) { + if (nvConnectorUsesDPLib(pConnectorEvo)) { + SetDpAudioEnable(pDispEvo, head, TRUE /* enable */); + } + + RmSetELDAudioCaps(pDispEvo, + pConnectorEvo, + pHeadState->activeRmId, + deviceEntry, + pHeadState->audio.maxFreqSupported, + &pHeadState->audio.eld, + NV_ELD_POST_MODESET); + } else { + RmSetELDAudioCaps(pDispEvo, + pConnectorEvo, + nvDpyIdToNvU32(pConnectorEvo->displayId), + deviceEntry, + 0 /* maxFreqSupported */, + NULL /* pEld */, + NV_ELD_POWER_ON_RESET); + } + } +} + +/* + * Report HDMI capabilities to RM before modeset. + */ +void nvDpyUpdateHdmiPreModesetEvo(NVDpyEvoPtr pDpyEvo) +{ + if (!nvDpyIsHdmiEvo(pDpyEvo)) { + pDpyEvo->pConnectorEvo->isHdmiEnabled = FALSE; + return; + } + + HdmiSendEnable(pDpyEvo, TRUE); + pDpyEvo->pConnectorEvo->isHdmiEnabled = TRUE; +} + +/* + * Parse HDMI 2.1 VRR capabilities from the EDID and GPU. + */ +void nvDpyUpdateHdmiVRRCaps(NVDpyEvoPtr pDpyEvo) +{ + + const NVParsedEdidEvoRec *pParsedEdid = &pDpyEvo->parsedEdid; + const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + const NvBool gpuSupportsHDMIVRR = pDevEvo->hal->caps.supportsHDMIVRR; + + const NvBool dispSupportsVrr = nvDispSupportsVrr(pDispEvo) && + !nvkms_conceal_vrr_caps(); + + const NvU32 edidVrrMin = pParsedEdid->info.hdmiForumInfo.vrr_min; + + nvAssert(pParsedEdid->valid); + + if (dispSupportsVrr && gpuSupportsHDMIVRR && (edidVrrMin > 0)) { + if (nvDpyIsAdaptiveSyncDefaultlisted(pDpyEvo)) { + pDpyEvo->vrr.type = + NVKMS_DPY_VRR_TYPE_ADAPTIVE_SYNC_DEFAULTLISTED; + } else { + pDpyEvo->vrr.type = + NVKMS_DPY_VRR_TYPE_ADAPTIVE_SYNC_NON_DEFAULTLISTED; + } + } +} + +void nvRemoveUnusedHdmiDpAudioDevice(const NVDispEvoRec *pDispEvo) +{ + NVConnectorEvoRec *pConnectorEvo; + const NvU32 activeSorMask = nvGetActiveSorMask(pDispEvo); + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + NvU32 deviceEntry; + + // Only connectors with assigned SORs can have audio. + if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR || + pConnectorEvo->or.primary == NV_INVALID_OR) { + continue; + } + + // Check whether an active pConnectorEvo shares an SOR with this one. + // + // This is a workaround for the fact that + // NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS takes a displayId rather than + // an SOR index. See bug 1953489. + if (nvIsConnectorActiveEvo(pConnectorEvo) && + (NVBIT(pConnectorEvo->or.primary) & activeSorMask) != 0x0) { + continue; + } + + for (deviceEntry = 0; + deviceEntry < NV_MAX_AUDIO_DEVICE_ENTRIES; + deviceEntry++) { + + /* + * Skip if the audio device is enabled (ELD case is set to + * NV_ELD_POST_MODESET by nvHdmiDpEnableDisableAudio()), or if the + * audio device is already disabled (ELD case is set to + * NV_ELD_POWER_ON_RESET). + */ + if ((pConnectorEvo->audioDevEldCase[deviceEntry] == + NV_ELD_POST_MODESET) || + (pConnectorEvo->audioDevEldCase[deviceEntry] == + NV_ELD_POWER_ON_RESET)) { + continue; + } + + RmSetELDAudioCaps(pDispEvo, + pConnectorEvo, + nvDpyIdToNvU32(pConnectorEvo->displayId), + deviceEntry, + 0 /* maxFreqSupported */, + NULL /* pEld */, + NV_ELD_POWER_ON_RESET); + } + } +} + +/* + * Find the name of the given audio format, as described in the + * CEA-861 specification's description of byte 1 in the Audio + * Descriptor Block. hasSampleSize and hasMaxBitRate (i.e., how to + * interpret byte 3 of the Audio Descriptor Block) are a function of + * audio format, so set them as a side effect of interpreting the + * audio format. + * + * Note the return value is a const char * and should not be freed. + */ +static const char *GetCea861AudioFormatInfo(NvU8 format, + NvBool *hasSampleSize, + NvBool *hasMaxBitRate) +{ + static const struct { + NvU8 format; + NvBool hasSampleSize : 1; + NvBool hasMaxBitRate : 1; + const char *name; + } audioFormatTable[] = { + { NVT_CEA861_AUDIO_FORMAT_LINEAR_PCM, TRUE, FALSE, "PCM" }, + { NVT_CEA861_AUDIO_FORMAT_AC3, FALSE, TRUE, "AC-3" }, + { NVT_CEA861_AUDIO_FORMAT_MPEG1, FALSE, TRUE, "MPEG-1" }, + { NVT_CEA861_AUDIO_FORMAT_MP3, FALSE, TRUE, "MP3" }, + { NVT_CEA861_AUDIO_FORMAT_MPEG2, FALSE, TRUE, "MPEG-2" }, + { NVT_CEA861_AUDIO_FORMAT_AAC, FALSE, TRUE, "AAC" }, + { NVT_CEA861_AUDIO_FORMAT_DTS, FALSE, TRUE, "DTS" }, + { NVT_CEA861_AUDIO_FORMAT_ATRAC, FALSE, TRUE, "ATRAC" }, + { NVT_CEA861_AUDIO_FORMAT_ONE_BIT, FALSE, FALSE, "DSD" }, + { NVT_CEA861_AUDIO_FORMAT_DDP, FALSE, FALSE, "E-AC-3" }, + { NVT_CEA861_AUDIO_FORMAT_DTS_HD, FALSE, FALSE, "DTS-HD" }, + { NVT_CEA861_AUDIO_FORMAT_MAT, FALSE, FALSE, "MLP" }, + { NVT_CEA861_AUDIO_FORMAT_DST, FALSE, FALSE, "DSP" }, + { NVT_CEA861_AUDIO_FORMAT_WMA_PRO, FALSE, FALSE, "WMA Pro" }, + }; + + int i; + + *hasSampleSize = FALSE; + *hasMaxBitRate = FALSE; + + for (i = 0; i < ARRAY_LEN(audioFormatTable); i++) { + if (format != audioFormatTable[i].format) { + continue; + } + + *hasSampleSize = audioFormatTable[i].hasSampleSize; + *hasMaxBitRate = audioFormatTable[i].hasMaxBitRate; + + return audioFormatTable[i].name; + } + + return ""; +} + + +/* + * Build a string description of the list of sample Rates, as + * described in the CEA-861 specification's description of byte 2 in + * the Audio Descriptor Block. + * + * Note the return value is a static char * and will be overwritten in + * subsequent calls to this function. + */ +static const char *GetCea861AudioSampleRateString(NvU8 sampleRates) +{ + static const struct { + NvU8 rate; + const char *name; + } sampleRateTable[] = { + { NVT_CEA861_AUDIO_SAMPLE_RATE_32KHZ, "32KHz" }, + { NVT_CEA861_AUDIO_SAMPLE_RATE_44KHZ, "44KHz" }, + { NVT_CEA861_AUDIO_SAMPLE_RATE_48KHZ, "48KHz" }, + { NVT_CEA861_AUDIO_SAMPLE_RATE_88KHZ, "88KHz" }, + { NVT_CEA861_AUDIO_SAMPLE_RATE_96KHZ, "96KHz" }, + { NVT_CEA861_AUDIO_SAMPLE_RATE_176KHZ,"176KHz" }, + { NVT_CEA861_AUDIO_SAMPLE_RATE_192KHZ,"192KHz" }, + }; + + static char sampleRateString[64]; + + NvBool first = TRUE; + int i; + char *s; + int ret, bytesLeft = sizeof(sampleRateString); + + sampleRateString[0] = '\0'; + s = sampleRateString; + + for (i = 0; i < ARRAY_LEN(sampleRateTable); i++) { + if (sampleRates & sampleRateTable[i].rate) { + if (first) { + first = FALSE; + } else { + ret = nvkms_snprintf(s, bytesLeft, ", "); + s += ret; + bytesLeft -= ret; + } + ret = nvkms_snprintf(s, bytesLeft, "%s", sampleRateTable[i].name); + s += ret; + bytesLeft -= ret; + } + } + + nvAssert(bytesLeft >= 0); + + return sampleRateString; +} + + +/* + * Build a string description of the list of sample sizes, as + * described in the CEA-861 specification's description of byte 3 in + * the Audio Descriptor Block. + * + * Note the return value is a static char * and will be overwritten in + * subsequent calls to this function. + */ +static const char *GetCea861AudioSampleSizeString(NvU8 sampleSizes) +{ + static const struct { + NvU8 bit; + const char *name; + } sampleSizeTable[] = { + { NVT_CEA861_AUDIO_SAMPLE_SIZE_16BIT, "16-bits" }, + { NVT_CEA861_AUDIO_SAMPLE_SIZE_20BIT, "20-bits" }, + { NVT_CEA861_AUDIO_SAMPLE_SIZE_24BIT, "24-bits" }, + }; + + static char sampleSizeString[64]; + + NvBool first = TRUE; + int i; + char *s; + int ret, bytesLeft = sizeof(sampleSizeString); + + sampleSizeString[0] = '\0'; + s = sampleSizeString; + + for (i = 0; i < ARRAY_LEN(sampleSizeTable); i++) { + if (sampleSizes & sampleSizeTable[i].bit) { + if (first) { + first = FALSE; + } else { + ret = nvkms_snprintf(s, bytesLeft, ", "); + s += ret; + bytesLeft -= ret; + } + ret = nvkms_snprintf(s, bytesLeft, "%s", sampleSizeTable[i].name); + s += ret; + bytesLeft -= ret; + } + } + + nvAssert(bytesLeft >= 0); + + return sampleSizeString; +} + + +/* + * Log the speaker allocation data block, as described in the CEA-861 + * specification. + */ +static void LogEdidCea861SpeakerAllocationData(NVEvoInfoStringPtr pInfoString, + NvU8 speaker) +{ + if ((speaker & NVT_CEA861_SPEAKER_ALLOC_MASK) == 0) { + return; + } + + nvEvoLogInfoString(pInfoString, + " Speaker Allocation Data :"); + + if (speaker & NVT_CEA861_SPEAKER_ALLOC_FL_FR) { + nvEvoLogInfoString(pInfoString, + " Front Left + Front Right"); + } + if (speaker & NVT_CEA861_SPEAKER_ALLOC_LFE) { + nvEvoLogInfoString(pInfoString, + " Low Frequency Effect"); + } + if (speaker & NVT_CEA861_SPEAKER_ALLOC_FC) { + nvEvoLogInfoString(pInfoString, + " Front Center"); + } + if (speaker & NVT_CEA861_SPEAKER_ALLOC_RL_RR) { + nvEvoLogInfoString(pInfoString, + " Rear Left + Rear Right"); + } + if (speaker & NVT_CEA861_SPEAKER_ALLOC_RC) { + nvEvoLogInfoString(pInfoString, + " Rear Center"); + } + if (speaker & NVT_CEA861_SPEAKER_ALLOC_FLC_FRC) { + nvEvoLogInfoString(pInfoString, + " Front Left Center + Front Right Center"); + } + if (speaker & NVT_CEA861_SPEAKER_ALLOC_RLC_RRC) { + nvEvoLogInfoString(pInfoString, + " Rear Left Center + Rear Right Center"); + } +} + + +static void LogEdidCea861Info(NVEvoInfoStringPtr pInfoString, + const NVT_EDID_CEA861_INFO *pExt861) +{ + int vsdbIndex; + int audioIndex; + + nvEvoLogInfoString(pInfoString, + " CEA-861 revision : %d\n", + pExt861->revision); + + /* + * IEEE vendor registration IDs are tracked here: + * https://standards.ieee.org/develop/regauth/oui/oui.txt + */ + for (vsdbIndex = 0; vsdbIndex < pExt861->total_vsdb; vsdbIndex++) { + const NvU32 ieeeId = pExt861->vsdb[vsdbIndex].ieee_id; + nvEvoLogInfoString(pInfoString, + " IEEE Vendor Registration ID: %02x-%02x-%02x", + (ieeeId >> 16) & 0xFF, + (ieeeId >> 8) & 0xFF, + ieeeId & 0xFF); + } + + nvEvoLogInfoString(pInfoString, + " Supports YCbCr 4:4:4 : %s", + (pExt861->basic_caps & NVT_CEA861_CAP_YCbCr_444) ? + "Yes" : "No"); + + nvEvoLogInfoString(pInfoString, + " Supports YCbCr 4:2:2 : %s", + (pExt861->basic_caps & NVT_CEA861_CAP_YCbCr_422) ? + "Yes" : "No"); + + nvEvoLogInfoString(pInfoString, + " Supports Basic Audio : %s", + (pExt861->basic_caps & NVT_CEA861_CAP_BASIC_AUDIO) ? + "Yes" : "No"); + + for (audioIndex = 0; audioIndex < ARRAY_LEN(pExt861->audio); audioIndex++) { + + NvU32 byte1, byte2, byte3; + NvU8 format; + NvU8 maxChannels; + NvU8 sampleRates; + const char *formatString; + NvBool hasSampleSize; + NvBool hasMaxBitRate; + + byte1 = pExt861->audio[audioIndex].byte1; + byte2 = pExt861->audio[audioIndex].byte2; + byte3 = pExt861->audio[audioIndex].byte3; + + if (byte1 == 0) { + break; + } + + nvEvoLogInfoString(pInfoString, + " Audio Descriptor : %d", audioIndex); + + /* + * byte 1 contains the Audio Format and the maximum number + * of channels + */ + + format = ((byte1 & NVT_CEA861_AUDIO_FORMAT_MASK) >> + NVT_CEA861_AUDIO_FORMAT_SHIFT); + + formatString = GetCea861AudioFormatInfo(format, + &hasSampleSize, + &hasMaxBitRate); + + maxChannels = (byte1 & NVT_CEA861_AUDIO_MAX_CHANNEL_MASK) + 1; + + /* byte 2 contains the sample rates */ + + sampleRates = (byte2 & NVT_CEA861_AUDIO_SAMPLE_RATE_MASK); + + /* + * byte 3 varies, depending on Audio Format; interpret + * using hasSampleSize and hasMaxBitRate + */ + + nvEvoLogInfoString(pInfoString, + " Audio Format : %s", formatString); + nvEvoLogInfoString(pInfoString, + " Maximum Channels : %d", maxChannels); + nvEvoLogInfoString(pInfoString, + " Sample Rates : %s", + GetCea861AudioSampleRateString(sampleRates)); + if (hasSampleSize) { + nvEvoLogInfoString(pInfoString, + " Sample Sizes : %s", + GetCea861AudioSampleSizeString(byte3)); + } + if (hasMaxBitRate) { + nvEvoLogInfoString(pInfoString, + " Maximum Bit Rate : %d kHz", + byte3 * 8); + } + } + + LogEdidCea861SpeakerAllocationData(pInfoString, pExt861->speaker[0].byte1); +} + +void nvLogEdidCea861InfoEvo(NVDpyEvoPtr pDpyEvo, + NVEvoInfoStringPtr pInfoString) +{ + int extIndex; + + for (extIndex = 0; TRUE; extIndex++) { + const NVT_EDID_CEA861_INFO *pExt861 = + GetExt861(&pDpyEvo->parsedEdid, extIndex); + + if (pExt861 == NULL) { + break; + } + + if (pExt861->revision == NVT_CEA861_REV_NONE) { + continue; + } + + nvEvoLogInfoString(pInfoString, + "CEA-861 extension block # : %d\n", extIndex); + + LogEdidCea861Info(pInfoString, pExt861); + } +} + +/* + * HDMI 2.0 4K@60hz uncompressed RGB 4:4:4 (6G mode) is allowed if: + * + * - The GPU supports it. + * - The EDID and NVT_TIMING indicate the monitor supports it, or + * this check is overridden. + */ +NvBool nvHdmi204k60HzRGB444Allowed(const NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + const NVT_TIMING *pTiming) +{ + const NVParsedEdidEvoRec *pParsedEdid = &pDpyEvo->parsedEdid; + const NVDevEvoRec *pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + + const NvBool gpuSupports444 = pDevEvo->caps.supportsHDMI20; + + const NvBool overrideMonitorCheck = ((pParams->overrides & + NVKMS_MODE_VALIDATION_NO_HDMI2_CHECK) != 0); + + const NvBool monitorSupports444 = + (IS_BPC_SUPPORTED_COLORFORMAT(pTiming->etc.rgb444.bpcs) && + (pParsedEdid->info.hdmiForumInfo.max_TMDS_char_rate > 0)); + + nvAssert(pParsedEdid->valid); + + return (gpuSupports444 && + (overrideMonitorCheck || monitorSupports444)); +} + +/* + * Enable or disable HDMI 2.1 VRR infoframes. The HDMI 2.1 VRR infoframe must + * be enabled before the first extended vblank after enabling VRR, or the + * display will blank. + */ +void nvHdmiSetVRR(NVDispEvoPtr pDispEvo, NvU32 head, NvBool enable) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVT_EXTENDED_METADATA_PACKET_INFOFRAME empInfoFrame; + NVT_EXTENDED_METADATA_PACKET_INFOFRAME_CTRL empCtrl = { 0 }; + NvEvoInfoFrameTransmitControl transmitCtrl = enable ? + NV_EVO_INFOFRAME_TRANSMIT_CONTROL_EVERY_FRAME : + NV_EVO_INFOFRAME_TRANSMIT_CONTROL_SINGLE_FRAME; + NVT_STATUS status; + + empCtrl.EnableVRR = enable; + + status = NvTiming_ConstructExtendedMetadataPacketInfoframe(&empCtrl, + &empInfoFrame); + + if (status != NVT_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Error in constructing Extended Metadata Packet InfoFrame"); + return; + } + + // XXX Extended metadata infoframes do not contain a length header field. + pDevEvo->hal->SendHdmiInfoFrame( + pDispEvo, + head, + transmitCtrl, + (NVT_INFOFRAME_HEADER *) &empInfoFrame, + sizeof(empInfoFrame), + FALSE /* needChecksum */); +} + +/* + * The HDMI library calls this function during initialization to ask the + * implementation to allocate and map a NV*71_DISP_SF_USER object. The + * appropriate class, mapping size, and subdevice ID are provided. A handle is + * generated here and passed back to the library; the same handle is provided + * in the symmetric HdmiLibRmFreeMemoryMap() function so we don't have to save + * a copy of it in nvkms's data structures. + */ +static NvBool HdmiLibRmGetMemoryMap( + NvHdmiPkt_CBHandle handle, + NvU32 dispSfUserClassId, + NvU32 dispSfUserSize, + NvU32 sd, + NvU32 *pDispSfHandle, + void **pPtr) +{ + NVDevEvoRec *pDevEvo = handle; + void *ptr = NULL; + NvU32 ret; + NvU32 dispSfHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (dispSfHandle == 0) { + return FALSE; + } + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + dispSfHandle, + dispSfUserClassId, + NULL); + + if (ret != NVOS_STATUS_SUCCESS) { + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + dispSfHandle); + return FALSE; + } + + ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + dispSfHandle, + 0, + dispSfUserSize, + &ptr, + 0); + if (ret != NVOS_STATUS_SUCCESS) { + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + dispSfHandle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + dispSfHandle); + return FALSE; + } + + *pDispSfHandle = dispSfHandle; + *pPtr = ptr; + + return TRUE; +} + +static void HdmiLibRmFreeMemoryMap( + NvHdmiPkt_CBHandle handle, + NvU32 sd, + NvU32 dispSfHandle, + void *ptr) +{ + NVDevEvoRec *pDevEvo = handle; + NvU32 ret; + + if (ptr != NULL) { + nvAssert(dispSfHandle != 0); + ret = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + dispSfHandle, + ptr, + 0); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(ret == NVOS_STATUS_SUCCESS); + } + } + + if (dispSfHandle) { + ret = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + dispSfHandle); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(ret == NVOS_STATUS_SUCCESS); + } + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + dispSfHandle); + } +} + +/* Wrapper around RmControl for 0073 (NV04_DISPLAY_COMMON) object. */ +static NvBool HdmiLibRmDispControl( + NvHdmiPkt_CBHandle handle, + NvU32 subDevice, + NvU32 cmd, + void *pParams, + NvU32 paramSize) +{ + NVDevEvoRec *pDevEvo = handle; + NvU32 ret; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + cmd, + pParams, + paramSize); + + return ret == NVOS_STATUS_SUCCESS; +} + +static void HdmiLibAcquireMutex( + NvHdmiPkt_CBHandle handle) +{ + /* The HDMI library only executes when nvkms calls it, and nvkms will only + * call it while holding the nvkms lock. So there is no concurrency to + * protect against with this mutex. */ +} + +static void HdmiLibReleaseMutex( + NvHdmiPkt_CBHandle handle) +{ +} + +static void *HdmiLibMalloc(NvHdmiPkt_CBHandle handle, NvLength len) +{ + return nvAlloc(len); +} + +static void HdmiLibFree(NvHdmiPkt_CBHandle handle, void *p) +{ + nvFree(p); +} + +static void HdmiLibPrint( + NvHdmiPkt_CBHandle handle, + const char *format, ...) +{ + NVDevEvoRec *pDevEvo = handle; + + va_list ap; + va_start(ap, format); + /* The HDMI library doesn't have log levels, but currently only logs in + * debug builds. It's pretty chatty (e.g., it prints "Initialize Success" + * when it inits), so hardcode it to INFO level for now. */ + nvVEvoLog(EVO_LOG_INFO, pDevEvo->gpuLogIndex, format, ap); + va_end(ap); +} + +static void HdmiLibAssert( + const char *expr, + const char *filename, + const char *function, + unsigned int line) +{ +#ifdef DEBUG + nvDebugAssert(expr, filename, function, line); +#endif +} + +static NvU64 hdmiLibTimerStartTime = 0; +static NvU64 hdmiLibTimerTimeout = 0; + +static NvBool HdmiLibSetTimeout(NvHdmiPkt_CBHandle handle, + NvU32 timeoutUs) +{ + hdmiLibTimerTimeout = timeoutUs; + hdmiLibTimerStartTime = nvkms_get_usec(); + return TRUE; +} + +static NvBool HdmiLibCheckTimeout(NvHdmiPkt_CBHandle handle) +{ + const NvU64 currentTime = nvkms_get_usec(); + if (currentTime < hdmiLibTimerStartTime) { + return TRUE; + } + return (currentTime - hdmiLibTimerStartTime) > hdmiLibTimerTimeout; +} + +static const NVHDMIPKT_CALLBACK HdmiLibCallbacks = +{ + .rmGetMemoryMap = HdmiLibRmGetMemoryMap, + .rmFreeMemoryMap = HdmiLibRmFreeMemoryMap, + .rmDispControl2 = HdmiLibRmDispControl, + .acquireMutex = HdmiLibAcquireMutex, + .releaseMutex = HdmiLibReleaseMutex, + .setTimeout = HdmiLibSetTimeout, + .checkTimeout = HdmiLibCheckTimeout, + .malloc = HdmiLibMalloc, + .free = HdmiLibFree, + .print = HdmiLibPrint, + .assert = HdmiLibAssert, +}; + +void nvTeardownHdmiLibrary(NVDevEvoRec *pDevEvo) +{ + NvHdmiPkt_DestroyLibrary(pDevEvo->hdmiLib.handle); +} + +NvBool nvInitHdmiLibrary(NVDevEvoRec *pDevEvo) +{ + pDevEvo->hdmiLib.handle = + NvHdmiPkt_InitializeLibrary(pDevEvo->dispClass, + pDevEvo->numSubDevices, + pDevEvo, // callback handle + &HdmiLibCallbacks, + 0, // not used because we set + NULL); // NVHDMIPKT_RM_CALLS_INTERNAL=0 + + if (pDevEvo->hdmiLib.handle == NVHDMIPKT_INVALID_HANDLE) { + pDevEvo->hdmiLib.handle = NULL; + return FALSE; + } + + return TRUE; +} + +/* + * Call the HDMI library to "assess" the link. This basically does link + * training to see what the maximum lane configuration is. We do this when the + * monitor is connected after reading the EDID, so we can validate modes + * against the link capabilities. + * + * Returns true if the link was assessed to be capable of FRL, and false + * otherwise. + */ +NvBool nvHdmiFrlAssessLink(NVDpyEvoPtr pDpyEvo) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVHDMIPKT_RESULT ret; + const NvU32 displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId); + + nvAssert(nvDpyIsHdmiEvo(pDpyEvo)); + + /* HDMI dpys not dynamic dpy so its connector should have a dpyId. */ + nvAssert(displayId != 0); + nvAssert(pDpyEvo->parsedEdid.valid); + + ret = NvHdmi_AssessLinkCapabilities(pDevEvo->hdmiLib.handle, + pDispEvo->displayOwner, + displayId, + &pDpyEvo->parsedEdid.info, + &pDpyEvo->hdmi.srcCaps, + &pDpyEvo->hdmi.sinkCaps); + if (ret != NVHDMIPKT_SUCCESS) { + nvAssert(ret == NVHDMIPKT_SUCCESS); + return FALSE; + } + + return pDpyEvo->hdmi.sinkCaps.linkMaxFRLRate != HDMI_FRL_DATA_RATE_NONE; +} + +/* + * Determine if the given HDMI dpy supports FRL. + * + * Returns TRUE if the dpy supports FRL, or FALSE otherwise. + */ +NvBool nvHdmiDpySupportsFrl(const NVDpyEvoRec *pDpyEvo) +{ + const NVDevEvoRec *pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + + nvAssert(nvDpyIsHdmiEvo(pDpyEvo)); + + /* Can't use FRL if disabled by kernel module param. */ + if (nvkms_disable_hdmi_frl()) { + return FALSE; + } + + /* Can't use FRL if the display hardware doesn't support it. */ + if (!pDevEvo->hal->caps.supportsHDMIFRL) { + return FALSE; + } + + /* + * Can't use FRL if the connector is not natively HDMI (e.g., if + * using a passive DP-to-HDMI dongle, or if overrideEdid/forceConnected + * attempted to force HDMI FRL on a DP connector). + */ + if (pDpyEvo->pConnectorEvo->type != NVKMS_CONNECTOR_TYPE_HDMI) { + return FALSE; + } + + /* Can't use FRL if the HDMI sink doesn't support it. */ + if (!pDpyEvo->parsedEdid.valid || + !pDpyEvo->parsedEdid.info.hdmiForumInfo.max_FRL_Rate) { + return FALSE; + } + + return TRUE; +} + +NvU32 nvHdmiGetEffectivePixelClockKHz(const NVDpyEvoRec *pDpyEvo, + const NVHwModeTimingsEvo *pHwTimings, + const NVDpyAttributeColor *pDpyColor) +{ + const NvU32 pixelClock = (pHwTimings->yuv420Mode == NV_YUV420_MODE_HW) ? + (pHwTimings->pixelClock / 2) : pHwTimings->pixelClock; + + nvAssert((pHwTimings->yuv420Mode == NV_YUV420_MODE_NONE) || + (pDpyColor->format == + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420)); + nvAssert(nvDpyIsHdmiEvo(pDpyEvo)); + nvAssert(pDpyColor->bpc >= NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8); + + /* YCbCr422 does not change the effective pixel clock. */ + if (pDpyColor->format == + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422) { + return pixelClock; + } + + /* + * For > 8 BPC, the effective pixel clock is adjusted upwards according to + * the ratio of the given BPC and 8 BPC. + */ + return ((pixelClock * pDpyColor->bpc) / 8ULL); +} + +static NvU64 GetHdmiFrlLinkRate(HDMI_FRL_DATA_RATE frlRate) +{ + const NvU64 giga = 1000000000ULL; + NvU64 hdmiLinkRate = 0; + switch(frlRate ) + { + case HDMI_FRL_DATA_RATE_NONE: + hdmiLinkRate = 0; + break; + case HDMI_FRL_DATA_RATE_3LANES_3GBPS: + hdmiLinkRate = 3 * giga; + break; + case HDMI_FRL_DATA_RATE_3LANES_6GBPS: + case HDMI_FRL_DATA_RATE_4LANES_6GBPS: + hdmiLinkRate = 6 * giga; + break; + case HDMI_FRL_DATA_RATE_4LANES_8GBPS: + hdmiLinkRate = 8 * giga; + break; + case HDMI_FRL_DATA_RATE_4LANES_10GBPS: + hdmiLinkRate = 10 * giga; + break; + case HDMI_FRL_DATA_RATE_4LANES_12GBPS: + hdmiLinkRate = 12 * giga; + break; + case HDMI_FRL_DATA_RATE_UNSPECIFIED: + nvAssert(!"Unspecified FRL data rate"); + break; + }; + + return hdmiLinkRate; +} + +static NvBool nvHdmiFrlQueryConfigOneBpc( + const NVDpyEvoRec *pDpyEvo, + const NvModeTimings *pModeTimings, + const NVHwModeTimingsEvo *pHwTimings, + const NVDpyAttributeColor *pDpyColor, + const NvBool b2Heads1Or, + const struct NvKmsModeValidationParams *pValidationParams, + HDMI_FRL_CONFIG *pConfig, + NVDscInfoEvoRec *pDscInfo) +{ + const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + HDMI_VIDEO_TRANSPORT_INFO videoTransportInfo = { }; + HDMI_QUERY_FRL_CLIENT_CONTROL clientControl = { }; + const NVT_TIMING *pNvtTiming; + NVT_TIMING nvtTiming = { }; + NVHDMIPKT_RESULT ret; + + if (pHwTimings->protocol != NVKMS_PROTOCOL_SOR_HDMI_FRL) { + nvkms_memset(pDscInfo, 0, sizeof(*pDscInfo)); + nvkms_memset(pConfig, 0, sizeof(*pConfig)); + return TRUE; + } + + nvAssert(nvDpyIsHdmiEvo(pDpyEvo)); + nvAssert(nvHdmiDpySupportsFrl(pDpyEvo)); + nvAssert(nvHdmiGetEffectivePixelClockKHz(pDpyEvo, pHwTimings, pDpyColor) > + pDpyEvo->maxSingleLinkPixelClockKHz); + + /* See if we can find an NVT_TIMING for this mode from the EDID. */ + pNvtTiming = nvFindEdidNVT_TIMING(pDpyEvo, pModeTimings, pValidationParams); + + if (pNvtTiming == NULL) { + /* + * No luck finding this mode in the EDID. + * + * Construct enough of an NVT_TIMING for the hdmi library, based on the + * pHwTimings mode. + * + * The HDMI library's hdmiQueryFRLConfigC671 uses: + * - pVidTransInfo->pTiming->pclk + * - pVidTransInfo->pTiming->HTotal + * - pVidTransInfo->pTiming->HVisible + * - pVidTransInfo->pTiming->VVisible + * + * This is also used, although we don't have a CEA format so we just + * set it to 0: + * - NVT_GET_CEA_FORMAT(pVidTransInfo->pTiming->etc.status) + */ + + /* Convert from KHz to 10KHz; round up for the purposes of determining a + * minimum FRL rate. */ + nvtTiming.pclk = (pHwTimings->pixelClock + 9) / 10; + nvtTiming.HVisible = pHwTimings->rasterBlankStart.x - + pHwTimings->rasterBlankEnd.x; + nvtTiming.HTotal = pHwTimings->rasterSize.x; + nvtTiming.VVisible = pHwTimings->rasterBlankStart.y - + pHwTimings->rasterBlankEnd.y; + nvtTiming.etc.status = 0; + + pNvtTiming = &nvtTiming; + } + + videoTransportInfo.pTiming = pNvtTiming; + /* + * pTimings->pixelDepth isn't assigned yet at this point in mode + * validation, so we can't use that. + * This matches the non-DP default assigned later in + * nvConstructHwModeTimingsEvo(). + */ + switch(pDpyColor->bpc) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10: + videoTransportInfo.bpc = HDMI_BPC10; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8: + videoTransportInfo.bpc = HDMI_BPC8; + break; + default: + return FALSE; + } + + switch (pDpyColor->format) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + videoTransportInfo.packing = HDMI_PIXEL_PACKING_RGB; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + nvAssert(pDevEvo->hal->caps.supportsYCbCr422OverHDMIFRL); + videoTransportInfo.packing = HDMI_PIXEL_PACKING_YCbCr422; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + videoTransportInfo.packing = HDMI_PIXEL_PACKING_YCbCr444; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420: + switch (pModeTimings->yuv420Mode) { + case NV_YUV420_MODE_NONE: + case NV_YUV420_MODE_SW: + /* + * Don't bother implementing this with FRL. + * HDMI FRL and HW YUV420 support were both added in nvdisplay 4.0 + * hardware, so if the hardware supports FRL it should support + * YUV420_MODE_HW. + */ + return FALSE; + case NV_YUV420_MODE_HW: + videoTransportInfo.packing = HDMI_PIXEL_PACKING_YCbCr420; + break; + } + break; + } + + videoTransportInfo.bDualHeadMode = b2Heads1Or; + + clientControl.option = HDMI_QUERY_FRL_HIGHEST_PIXEL_QUALITY; + + if (pValidationParams->dscMode == NVKMS_DSC_MODE_FORCE_ENABLE) { + clientControl.enableDSC = TRUE; + } + + /* + * 2Heads1Or requires either YUV420 or DSC; if b2Heads1Or is enabled + * but YUV420 is not, force DSC. + */ + if (b2Heads1Or && (pHwTimings->yuv420Mode != NV_YUV420_MODE_HW)) { + if (pValidationParams->dscMode == NVKMS_DSC_MODE_FORCE_DISABLE) { + return FALSE; + } + clientControl.enableDSC = TRUE; + } + + if (pValidationParams->dscOverrideBitsPerPixelX16 != 0) { + clientControl.forceBppx16 = TRUE; + clientControl.bitsPerPixelX16 = + pValidationParams->dscOverrideBitsPerPixelX16; + } + + ret = NvHdmi_QueryFRLConfig(pDevEvo->hdmiLib.handle, + &videoTransportInfo, + &clientControl, + &pDpyEvo->hdmi.srcCaps, + &pDpyEvo->hdmi.sinkCaps, + pConfig); + + if ((ret == NVHDMIPKT_SUCCESS) && b2Heads1Or) { + /* + * 2Heads1Or requires either YUV420 or DSC; pConfig->dscInfo.bEnableDSC + * is assigned by NvHdmi_QueryFRLConfig(). + */ + nvAssert(pConfig->dscInfo.bEnableDSC || + (pHwTimings->yuv420Mode == NV_YUV420_MODE_HW)); + } + + if (ret == NVHDMIPKT_SUCCESS) { + if (pDscInfo != NULL) { + const NvU64 hdmiLinkRate = GetHdmiFrlLinkRate(pConfig->frlRate); + + nvAssert((hdmiLinkRate != 0) || + (pConfig->frlRate == HDMI_FRL_DATA_RATE_NONE)); + + nvkms_memset(pDscInfo, 0, sizeof(*pDscInfo)); + + if ((pConfig->frlRate != HDMI_FRL_DATA_RATE_NONE) && + pConfig->dscInfo.bEnableDSC && + (hdmiLinkRate != 0)) { + + if (pValidationParams->dscMode == + NVKMS_DSC_MODE_FORCE_DISABLE) { + ret = NVHDMIPKT_FAIL; + goto done; + } + + pDscInfo->type = NV_DSC_INFO_EVO_TYPE_HDMI; + pDscInfo->sliceCount = pConfig->dscInfo.sliceCount; + /* + * XXX NvHdmi_QueryFRLConfig() might get updated in future, but + * today it does not return the possible DSC slice counts. + * NvHdmi_QueryFRLConfig() returns the slice count which it has + * been used to calculate DSC PPS, populate + * 'possibleSliceCountMask' using that slice count. + */ + pDscInfo->possibleSliceCountMask = + NVBIT(pDscInfo->sliceCount - 1); + pDscInfo->hdmi.dscMode = b2Heads1Or ? + NV_DSC_EVO_MODE_DUAL : NV_DSC_EVO_MODE_SINGLE; + pDscInfo->hdmi.bitsPerPixelX16 = + pConfig->dscInfo.bitsPerPixelX16; + ct_assert(sizeof(pDscInfo->hdmi.pps) == + sizeof(pConfig->dscInfo.pps)); + nvkms_memcpy(pDscInfo->hdmi.pps, + pConfig->dscInfo.pps, + sizeof(pDscInfo->hdmi.pps)); + pDscInfo->hdmi.dscHActiveBytes = + pConfig->dscInfo.dscHActiveBytes; + pDscInfo->hdmi.dscHActiveTriBytes = + pConfig->dscInfo.dscHActiveTriBytes; + pDscInfo->hdmi.dscHBlankTriBytes = + pConfig->dscInfo.dscHBlankTriBytes; + pDscInfo->hdmi.dscTBlankToTTotalRatioX1k = + pConfig->dscInfo.dscTBlankToTTotalRatioX1k; + pDscInfo->hdmi.hblankMin = + NV_UNSIGNED_DIV_CEIL(((pHwTimings->pixelClock * 1000) * + pConfig->dscInfo.dscHBlankTriBytes), + (hdmiLinkRate / 6)); + } else { + pDscInfo->type = NV_DSC_INFO_EVO_TYPE_DISABLED; + } + } + } + +done: + return ret == NVHDMIPKT_SUCCESS; +} + +void nvHdmiFrlClearConfig(NVDispEvoRec *pDispEvo, NvU32 activeRmId) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + NVHDMIPKT_RESULT ret; + ret = NvHdmi_ClearFRLConfig(pDevEvo->hdmiLib.handle, + pDispEvo->displayOwner, activeRmId); + if (ret != NVHDMIPKT_SUCCESS) { + nvAssert(ret == NVHDMIPKT_SUCCESS); + } +} + +NvBool nvHdmiFrlQueryConfig( + const NVDpyEvoRec *pDpyEvo, + const NvModeTimings *pModeTimings, + const NVHwModeTimingsEvo *pHwTimings, + NVDpyAttributeColor *pDpyColor, + const NvBool b2Heads1Or, + const struct NvKmsModeValidationParams *pValidationParams, + HDMI_FRL_CONFIG *pConfig, + NVDscInfoEvoRec *pDscInfo) +{ + const NvKmsDpyOutputColorFormatInfo supportedColorFormats = + nvDpyGetOutputColorFormatInfo(pDpyEvo); + NVDpyAttributeColor dpyColor = *pDpyColor; + do { + if (nvHdmiFrlQueryConfigOneBpc(pDpyEvo, + pModeTimings, + pHwTimings, + &dpyColor, + b2Heads1Or, + pValidationParams, + pConfig, + pDscInfo)) { + *pDpyColor = dpyColor; + return TRUE; + } + } while(nvDowngradeColorBpc(&supportedColorFormats, &dpyColor) && + (dpyColor.bpc >= NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8)); + return FALSE; +} + +void nvHdmiFrlSetConfig(NVDispEvoRec *pDispEvo, NvU32 head) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + HDMI_FRL_CONFIG *pFrlConfig = &pHeadState->hdmiFrlConfig; + NVHDMIPKT_RESULT ret; + NvU32 retries = 0; + const NvU32 MAX_RETRIES = 5; + + if (pFrlConfig->frlRate == HDMI_FRL_DATA_RATE_NONE) { + return; + } + + nvAssert(pHeadState->activeRmId != 0); + + do { + ret = NvHdmi_SetFRLConfig(pDevEvo->hdmiLib.handle, + pDispEvo->displayOwner, + pHeadState->activeRmId, + NV_FALSE /* bFakeLt */, + pFrlConfig); + } while (ret != NVHDMIPKT_SUCCESS && retries++ < MAX_RETRIES); + + if (ret != NVHDMIPKT_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "HDMI FRL link training failed."); + + /* + * Link training failed even after retrying. Since link training + * happens after we've already committed to a modeset and failing is + * not an option, try one last time with the 'bFakeLt' parameter + * set, which should enable enough of the display hardware to + * prevent hangs when we attempt to drive the OR with + * PROTOCOL_HDMI_FRL. + */ + ret = NvHdmi_SetFRLConfig(pDevEvo->hdmiLib.handle, + pDispEvo->displayOwner, + pHeadState->activeRmId, + NV_TRUE /* bFakeLt */, + pFrlConfig); + + if (ret != NVHDMIPKT_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "HDMI FRL fallback link training failed."); + } + } + + if (retries != 0) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_WARN, + "HDMI FRL link training retried %d times.", + retries); + } +} diff --git a/src/nvidia-modeset/src/nvkms-headsurface-3d.c b/src/nvidia-modeset/src/nvkms-headsurface-3d.c new file mode 100644 index 0000000..856df97 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-headsurface-3d.c @@ -0,0 +1,2062 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-types.h" +#include "nvkms-headsurface-3d.h" +#include "nvkms-headsurface-priv.h" +#include "nvkms-rmapi.h" +#include "nvkms-utils.h" /* nvVEvoLog() */ +#include "nvkms-sync.h" /* nvKmsSemaphorePayloadOffset() */ +#include "nvkms-rm.h" /* nvRmEvoClassListCheck() */ + +#include "nvidia-push-methods.h" /* nvPushMethod(), nvPushSet*() */ +#include "nvidia-push-utils.h" /* nvPushKickoff(), nvPushGetNotifierCpuAddress() */ +#include "nvkms-push.h" + +#include "nv_list.h" /* nv_container_of */ + +#include "nvidia-3d-color-targets.h" /* nv3dSelectColorTarget() */ +#include "nvidia-3d-utils.h" /* nv3dSetSurfaceClip() */ +#include "nvidia-3d-constant-buffers.h" /* nv3dSelectCb() */ +#include "nvidia-3d-shader-constants.h" /* NV3D_CB_SLOT_MISC1 */ + +#include "nvidia-3d-imports.h" + +#include "g_nvidia-headsurface-shader-info.h" /* nvHeadSurfaceShaderInfo[] */ +#include "nvidia-headsurface-types.h" + +#include "nvkms-softfloat.h" + +#include "nvidia-headsurface-constants.h" + +#include + +#include /* NV20_SUBDEVICE_0 */ +#include /* FERMI_TWOD_A */ +#include /* KEPLER_CHANNEL_GPFIFO_A */ + +/* + * Define constant buffer indices used by headSurface. + */ +typedef enum { + NVKMS_HEADSURFACE_CONSTANT_BUFFER_FRAGMENT_PROGRAM, + NVKMS_HEADSURFACE_CONSTANT_BUFFER_VERTEX_PROGRAM, + NVKMS_HEADSURFACE_CONSTANT_BUFFER_STATIC_WARP_MESH, + NVKMS_HEADSURFACE_CONSTANT_BUFFER_COUNT, +} NvHs3dConstantBufferIndex; + + +void *nv3dImportAlloc(size_t size) +{ + return nvAlloc(size); +} + +void nv3dImportFree(void *ptr) +{ + nvFree(ptr); +} + +int nv3dImportMemCmp(const void *a, const void *b, size_t size) +{ + return nvkms_memcmp(a, b, size); +} + +void nv3dImportMemSet(void *s, int c, size_t size) +{ + nvkms_memset(s, c, size); +} + +void nv3dImportMemCpy(void *dest, const void *src, size_t size) +{ + nvkms_memcpy(dest, src, size); +} + +void nv3dImportMemMove(void *dest, const void *src, size_t size) +{ + nvkms_memmove(dest, src, size); +} + +NvBool nvHs3dAllocDevice(NVHsDeviceEvoRec *pHsDevice) +{ + Nv3dAllocDeviceParams params = { }; + + params.pPushDevice = &pHsDevice->pDevEvo->nvPush.device; + + if (!nv3dAllocDevice(¶ms, &pHsDevice->nv3d.device)) { + goto fail; + } + + return TRUE; + +fail: + nvHs3dFreeDevice(pHsDevice); + return FALSE; +} + +void nvHs3dFreeDevice(NVHsDeviceEvoRec *pHsDevice) +{ + if (pHsDevice == NULL) { + return; + } + + nv3dFreeDevice(&pHsDevice->nv3d.device); +} + +static void FreeNvPushChannel(NVHsChannelEvoRec *pHsChannel) +{ + const NVDispEvoRec *pDispEvo; + NVDevEvoPtr pDevEvo; + NvU32 h; + + if ((pHsChannel == NULL) || (pHsChannel->pDispEvo == NULL)) { + return; + } + + pDispEvo = pHsChannel->pDispEvo; + pDevEvo = pDispEvo->pDevEvo; + + nvPushFreeChannel(&pHsChannel->nvPush.channel); + + for (h = 0; h < ARRAY_LEN(pHsChannel->nvPush.handlePool); h++) { + if (pHsChannel->nvPush.handlePool[h] != 0) { + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pHsChannel->nvPush.handlePool[h]); + pHsChannel->nvPush.handlePool[h] = 0; + } + } +} + +static NvBool AllocNvPushChannel(NVHsChannelEvoRec *pHsChannel) +{ + NVDevEvoPtr pDevEvo = pHsChannel->pDispEvo->pDevEvo; + NvPushAllocChannelParams params = { }; + NvU32 h; + + params.engineType = NV2080_ENGINE_TYPE_GRAPHICS; + params.logNvDiss = FALSE; + params.noTimeout = FALSE; + params.ignoreChannelErrors = FALSE; + params.numNotifiers = NVKMS_HEADSURFACE_MAX_SEMAPHORES; + + /* + * XXX NVKMS HEADSURFACE TODO: Pushbuffer memory can be used faster than + * it's drained in complex headsurface swapgroup configurations, since + * there's no throttling on viewportin flips being scheduled in response to + * a vblank callback before previous rendering has completed. + * + * This size was raised from 8k to 128k to WAR the issue, and a proper fix + * will be added in bug 2397492, after which this limit can be lowered + * again. + * + * Throttling is now implemented using the RG line 1 interrupt headsurface + * rendering mechanism, so this limit can be lowered once the old + * vblank-triggered viewport flipping mechanism is removed. + */ + params.pushBufferSizeInBytes = 128 * 1024; /* arbitrary */ + + ct_assert(sizeof(params.handlePool) == + sizeof(pHsChannel->nvPush.handlePool)); + + for (h = 0; h < ARRAY_LEN(pHsChannel->nvPush.handlePool); h++) { + pHsChannel->nvPush.handlePool[h] = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + params.handlePool[h] = pHsChannel->nvPush.handlePool[h]; + } + + params.pDevice = &pDevEvo->nvPush.device; + + if (!nvPushAllocChannel(¶ms, &pHsChannel->nvPush.channel)) { + FreeNvPushChannel(pHsChannel); + return FALSE; + } + + return TRUE; +} + +static void FreeNv3dChannel(NVHsChannelEvoRec *pHsChannel) +{ + const NVDispEvoRec *pDispEvo; + NVDevEvoPtr pDevEvo; + + if ((pHsChannel == NULL) || (pHsChannel->pDispEvo == NULL)) { + return; + } + + pDispEvo = pHsChannel->pDispEvo; + pDevEvo = pDispEvo->pDevEvo; + + nv3dFreeChannelSurface(&pHsChannel->nv3d.channel); + nv3dFreeChannelObject(&pHsChannel->nv3d.channel); + nv3dFreeChannelState(&pHsChannel->nv3d.channel); + + if (pHsChannel->nv3d.handle != 0) { + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pHsChannel->nv3d.handle); + pHsChannel->nv3d.handle = 0; + } +} + + +static NvBool AllocNv3dChannel(NVHsChannelEvoRec *pHsChannel) +{ + NVDevEvoPtr pDevEvo = pHsChannel->pDispEvo->pDevEvo; + Nv3dAllocChannelStateParams stateParams = { }; + Nv3dAllocChannelObjectParams objectParams = { }; + + stateParams.p3dDevice = &pDevEvo->pHsDevice->nv3d.device; + stateParams.numTextures = NVKMS_HEADSURFACE_TEXINFO_NUM; + stateParams.numConstantBuffers = NVKMS_HEADSURFACE_CONSTANT_BUFFER_COUNT; + stateParams.numTextureBindings = + NVIDIA_HEADSURFACE_UNIFORM_SAMPLER_BINDING_NUM; + + /* + * XXX NVKMS HEADSURFACE TODO: set hasFrameBoundaries to TRUE, but how to + * trigger a frame boundary? + */ + stateParams.hasFrameBoundaries = FALSE; + + if (!nv3dAllocChannelState(&stateParams, &pHsChannel->nv3d.channel)) { + goto fail; + } + + pHsChannel->nv3d.handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + objectParams.pPushChannel = &pHsChannel->nvPush.channel; + objectParams.handle[0] = pHsChannel->nv3d.handle; + + if (!nv3dAllocChannelObject(&objectParams, &pHsChannel->nv3d.channel)) { + goto fail; + } + + if (!nv3dAllocChannelSurface(&pHsChannel->nv3d.channel)) { + goto fail; + } + + return TRUE; + +fail: + FreeNv3dChannel(pHsChannel); + return FALSE; +} + +static void FreeNv2dChannel(NVHsChannelEvoRec *pHsChannel) +{ + const NVDispEvoRec *pDispEvo; + NVDevEvoPtr pDevEvo; + + if ((pHsChannel == NULL) || (pHsChannel->pDispEvo == NULL)) { + return; + } + + pDispEvo = pHsChannel->pDispEvo; + pDevEvo = pDispEvo->pDevEvo; + + if (pHsChannel->nv2d.handle[0] != 0) { + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pHsChannel->nv2d.handle[0]); + pHsChannel->nv2d.handle[0] = 0; + } +} + +static NvBool AllocNv2dChannel(NVHsChannelEvoRec *pHsChannel) +{ + NVDevEvoPtr pDevEvo = pHsChannel->pDispEvo->pDevEvo; + NvU32 ret; + + if (!nvRmEvoClassListCheck(pDevEvo, FERMI_TWOD_A)) { + goto fail; + } + + pHsChannel->nv2d.handle[0] = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pHsChannel->nvPush.channel.channelHandle[0], + pHsChannel->nv2d.handle[0], + FERMI_TWOD_A, + NULL); + + if (ret != NVOS_STATUS_SUCCESS) { + goto fail; + } + + return TRUE; + +fail: + FreeNv2dChannel(pHsChannel); + return FALSE; +} + +NvBool nvHs3dAllocChannel(NVHsChannelEvoPtr pHsChannel) +{ + const NvU32 dispSdMask = NVBIT(pHsChannel->pDispEvo->displayOwner); + NvPushChannelPtr p = &pHsChannel->nvPush.channel; + + if (!AllocNvPushChannel(pHsChannel)) { + goto fail; + } + + if (!AllocNv3dChannel(pHsChannel)) { + goto fail; + } + + if (!AllocNv2dChannel(pHsChannel)) { + goto fail; + } + + p = &pHsChannel->nvPush.channel; + + /* + * pHsChannel will only be used on this pDispEvo; set the channel's + * subdevice mask to this pDispEvo's subdevice mask. + */ + nvPushSetSubdeviceMask(p, dispSdMask); + + if (!nv3dInitChannel(&pHsChannel->nv3d.channel)) { + goto fail; + } + + nvPushKickoff(p); + + return TRUE; + +fail: + nvHs3dFreeChannel(pHsChannel); + return FALSE; +} + +void nvHs3dFreeChannel(NVHsChannelEvoPtr pHsChannel) +{ + if (pHsChannel == NULL) { + return; + } + + FreeNv2dChannel(pHsChannel); + + FreeNv3dChannel(pHsChannel); + + FreeNvPushChannel(pHsChannel); +} + +/*! + * HsSurfaceFormatTable[] defines a mapping between NvKmsSurfaceMemoryFormat + * enum values, NV902D_SET_SRC_FORMAT values, NV9097_SET_COLOR_TARGET_FORMAT + * values and various nvidia-3d values needed to assign an Nv3dRenderTexInfo + * structure. + */ +static const struct { + NvU32 cl902d; + NvU32 cl9097; + enum Nv3dTexHeaderComponentSizes sizes; + enum Nv3dTexHeaderDataType dataType; + struct { + enum Nv3dTexHeaderSource x; + enum Nv3dTexHeaderSource y; + enum Nv3dTexHeaderSource z; + enum Nv3dTexHeaderSource w; + } source; +} HsSurfaceFormatTable[] = { + +#define ENTRY(_nvKmsFmt, _cl902d, _cl9097, _nv3dSizes, _nv3dDataType, _x, _y, _z, _w) \ + \ + [NvKmsSurfaceMemoryFormat ## _nvKmsFmt] = { \ + .cl902d = NV902D_SET_SRC_FORMAT_V_ ## _cl902d, \ + .cl9097 = NV9097_SET_COLOR_TARGET_FORMAT_V_ ## _cl9097, \ + .sizes = NV3D_TEXHEAD_ ## _nv3dSizes, \ + .dataType = NV3D_TEXHEAD_NUM_ ## _nv3dDataType, \ + .source.x = NV3D_TEXHEAD_IN_ ## _x, \ + .source.y = NV3D_TEXHEAD_IN_ ## _y, \ + .source.z = NV3D_TEXHEAD_IN_ ## _z, \ + .source.w = NV3D_TEXHEAD_IN_ ## _w, \ + } + + ENTRY(I8, Y8, R8, R8, UINT, R, R, R, R), + ENTRY(A1R5G5B5, A1R5G5B5, A1R5G5B5, A1B5G5R5, UNORM, B, G, R, A), + ENTRY(X1R5G5B5, X1R5G5B5, X1R5G5B5, A1B5G5R5, UNORM, B, G, R, ONE_FLOAT), + ENTRY(R5G6B5, R5G6B5, R5G6B5, B5G6R5, UNORM, B, G, R, ONE_FLOAT), + ENTRY(A8R8G8B8, A8R8G8B8, A8R8G8B8, A8B8G8R8, UNORM, B, G, R, A), + ENTRY(X8R8G8B8, X8R8G8B8, X8R8G8B8, A8B8G8R8, UNORM, B, G, R, ONE_FLOAT), + ENTRY(A2B10G10R10, A2B10G10R10, A2B10G10R10, A2B10G10R10, UNORM, R, G, B, A), + ENTRY(X2B10G10R10, A2B10G10R10, A2B10G10R10, A2B10G10R10, UNORM, R, G, B, ONE_FLOAT), + ENTRY(A8B8G8R8, A8B8G8R8, A8B8G8R8, A8B8G8R8, UNORM, R, G, B, A), + ENTRY(X8B8G8R8, X8B8G8R8, X8B8G8R8, A8B8G8R8, UNORM, R, G, B, ONE_FLOAT), + ENTRY(R16G16B16A16, R16_G16_B16_A16, R16_G16_B16_A16, R16G16B16A16, UNORM, R, G, B, A), + ENTRY(RF32GF32BF32AF32, RF32_GF32_BF32_AF32, RF32_GF32_BF32_AF32, R32G32B32A32, FLOAT, R, G, B, A), + +#undef ENTRY + +}; + +/*! + * Return the NV9097_SET_COLOR_TARGET_FORMAT that corresponds to the provided + * NvKmsSurfaceMemoryFormat. + */ +static NvU32 HsGetColorTargetFormat(enum NvKmsSurfaceMemoryFormat format) +{ + nvAssert(format < ARRAY_LEN(HsSurfaceFormatTable)); + + return HsSurfaceFormatTable[format].cl9097; +} + +/*! + * Given NvKmsSurfaceMemoryFormat, assign Nv3dRenderTexInfo fields: + * + * Nv3dRenderTexInfo::sizes + * Nv3dRenderTexInfo::dataType + * Nv3dRenderTexInfo::source + */ +static void AssignRenderTexInfoSizesDataTypeSource( + enum NvKmsSurfaceMemoryFormat format, + Nv3dRenderTexInfo *pTexInfo) +{ + nvAssert(format < ARRAY_LEN(HsSurfaceFormatTable)); + + pTexInfo->sizes = HsSurfaceFormatTable[format].sizes; + pTexInfo->dataType = HsSurfaceFormatTable[format].dataType; + pTexInfo->source.x = HsSurfaceFormatTable[format].source.x; + pTexInfo->source.y = HsSurfaceFormatTable[format].source.y; + pTexInfo->source.z = HsSurfaceFormatTable[format].source.z; + pTexInfo->source.w = HsSurfaceFormatTable[format].source.w; +} + +/*! + * Return the NV902D_SET_SRC_FORMAT that corresponds to the provided + * NvKmsSurfaceMemoryFormat. + */ +static NvU32 HsGet2dFormat(enum NvKmsSurfaceMemoryFormat format) +{ + nvAssert(format < ARRAY_LEN(HsSurfaceFormatTable)); + + return HsSurfaceFormatTable[format].cl902d; +} + +/*! + * Assign the Nv3dRenderTexInfo, given an NVSurfaceEvoRec. + */ +static void AssignRenderTexInfo( + const NVSurfaceEvoRec *pSurface, + const NvBool normalizedCoords, + const NvBool filtering, + Nv3dRenderTexInfo *pTexInfo) +{ + nvkms_memset(pTexInfo, 0, sizeof(*pTexInfo)); + + if (pSurface == NULL) { + return; + } + + AssignRenderTexInfoSizesDataTypeSource(pSurface->format, pTexInfo); + + if (pSurface->layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + pTexInfo->texType = NV3D_TEX_TYPE_TWO_D_BLOCKLINEAR; + } else { + pTexInfo->texType = NV3D_TEX_TYPE_TWO_D_PITCH; + } + + pTexInfo->offset = pSurface->gpuAddress; + + pTexInfo->normalizedCoords = normalizedCoords; + pTexInfo->repeatType = NV3D_TEXHEAD_REPEAT_TYPE_NONE; + + pTexInfo->filtering = filtering; + pTexInfo->pitch = pSurface->planes[0].pitch; + pTexInfo->width = pSurface->widthInPixels; + pTexInfo->height = pSurface->heightInPixels; + + + /* + * When NVKMS clients register surfaces, they only specify log2GobsPerBlockY + * (not X or Z). This should be okay for now: + * + * X is never non-zero: on >= FERMI the only valid GOBS_PER_BLOCK_WIDTH is + * ONE_GOB: + * + * cl9097tex.h: + * #define NV9097_TEXHEAD2_GOBS_PER_BLOCK_WIDTH 21:19 + * #define NV9097_TEXHEAD2_GOBS_PER_BLOCK_WIDTH_ONE_GOB 0x00000000 + * + * clc097tex.h: + * #define NVC097_TEXHEAD_BL_GOBS_PER_BLOCK_WIDTH MW(98:96) + * #define NVC097_TEXHEAD_BL_GOBS_PER_BLOCK_WIDTH_ONE_GOB 0x00000000 + * + * Z would only be non-zero for a surface with three dimensions. + */ + + pTexInfo->log2GobsPerBlock.x = 0; + pTexInfo->log2GobsPerBlock.y = pSurface->log2GobsPerBlockY; + pTexInfo->log2GobsPerBlock.z = 0; +} + +static void HsGetYuv420Black( + const enum NvKmsSurfaceMemoryFormat format, + NvU32 color[4]) +{ + switch(format) { + case NvKmsSurfaceMemoryFormatI8: + nvAssert(!"headSurface cannot render to an I8 surface"); + break; + case NvKmsSurfaceMemoryFormatA1R5G5B5: + case NvKmsSurfaceMemoryFormatX1R5G5B5: + color[0] = 0x3d042108; /* 1/31 in 5bpc ==> 0.032258f */ + color[1] = 0x3d042108; /* 1/31 in 5bpc ==> 0.032258f */ + color[2] = 0x3ef7bdef; /* 15/31 in 5bpc ==> 0.483871f */ + break; + case NvKmsSurfaceMemoryFormatR5G6B5: + color[0] = 0x3d042108; /* 1/31 in 5bpc ==> 0.032258f */ + color[1] = 0x3d430c31; /* 3/63 in 6bpc ==> 0.047619f */ + color[2] = 0x3ef7bdef; /* 15/31 in 5bpc ==> 0.483871f */ + break; + case NvKmsSurfaceMemoryFormatA8R8G8B8: + case NvKmsSurfaceMemoryFormatX8R8G8B8: + case NvKmsSurfaceMemoryFormatA8B8G8R8: + case NvKmsSurfaceMemoryFormatX8B8G8R8: + color[0] = 0x3d70f0f1; /* 15/255 in 8bpc ==> 0.058824f */ + color[1] = 0x3d70f0f1; /* 15/255 in 8bpc ==> 0.058824f */ + color[2] = 0x3efefeff; /* 127/255 in 8bpc ==> 0.498039f */ + break; + case NvKmsSurfaceMemoryFormatA2B10G10R10: + case NvKmsSurfaceMemoryFormatX2B10G10R10: + color[0] = 0x3d7c3f10; /* 63/1023 in 10bpc ==> 0.061584f */ + color[1] = 0x3d7c3f10; /* 63/1023 in 10bpc ==> 0.061584f */ + color[2] = 0x3effbff0; /* 511/1023 in 10bpc ==> 0.499511f */ + break; + case NvKmsSurfaceMemoryFormatRF16GF16BF16AF16: + case NvKmsSurfaceMemoryFormatRF16GF16BF16XF16: + case NvKmsSurfaceMemoryFormatR16G16B16A16: + case NvKmsSurfaceMemoryFormatRF32GF32BF32AF32: + case NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422: + case NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N444: + case NvKmsSurfaceMemoryFormatY8___V8U8_N444: + case NvKmsSurfaceMemoryFormatY8___U8V8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N420: + case NvKmsSurfaceMemoryFormatY8___V8U8_N420: + case NvKmsSurfaceMemoryFormatY10___U10V10_N444: + case NvKmsSurfaceMemoryFormatY10___V10U10_N444: + case NvKmsSurfaceMemoryFormatY10___U10V10_N422: + case NvKmsSurfaceMemoryFormatY10___V10U10_N422: + case NvKmsSurfaceMemoryFormatY10___U10V10_N420: + case NvKmsSurfaceMemoryFormatY10___V10U10_N420: + case NvKmsSurfaceMemoryFormatY12___U12V12_N444: + case NvKmsSurfaceMemoryFormatY12___V12U12_N444: + case NvKmsSurfaceMemoryFormatY12___U12V12_N422: + case NvKmsSurfaceMemoryFormatY12___V12U12_N422: + case NvKmsSurfaceMemoryFormatY12___U12V12_N420: + case NvKmsSurfaceMemoryFormatY12___V12U12_N420: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N444: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N420: + nvAssert(!"HeadSurface doesn't render to these formats"); + break; + } + + color[3] = NV_FLOAT_ZERO; +} + +void nvHs3dClearSurface( + NVHsChannelEvoRec *pHsChannel, + const NVHsSurfaceRec *pHsSurface, + const struct NvKmsRect surfaceRect, + NvBool yuv420) +{ + NvPushChannelPtr p = &pHsChannel->nvPush.channel; + Nv3dChannelRec *p3d = &pHsChannel->nv3d.channel; + const NVSurfaceEvoRec *pSurfaceEvo = pHsSurface->pSurfaceEvo; + + const int ct = 0; + const NvBool blockLinear = TRUE; + const NvU32 surfaceFormat = HsGetColorTargetFormat(pSurfaceEvo->format); + const NvU16 x = surfaceRect.x; + const NvU16 y = surfaceRect.y; + const NvU16 w = surfaceRect.width; + const NvU16 h = surfaceRect.height; + + NvU32 clearColor[4] = { 0, 0, 0, 0 }; + + if (yuv420) { + HsGetYuv420Black(surfaceFormat, clearColor); + } + + nv3dSelectColorTarget(p3d, ct); + + nv3dSetColorTarget(p3d, + ct, + surfaceFormat, + pSurfaceEvo->gpuAddress, + blockLinear, + pHsSurface->gobsPerBlock, + pSurfaceEvo->widthInPixels, + pSurfaceEvo->heightInPixels); + + nv3dSetSurfaceClip(p3d, x, y, w, h); + nv3dSetBlendColorCoefficients(p3d, NV3D_BLEND_OP_SRC, FALSE, FALSE); + nv3dClearSurface(p3d, clearColor, x, y, w, h); + + nvPushKickoff(p); +} + +/*! + * The 3D engine can perform bilinear and nearest resampling as part of normal + * texture usage. But if a more complex resampling method is requested, then we + * will need to use an appropriate headSurface fragment program. + */ +static NvBool HsIsCustomSampling(enum NvKmsResamplingMethod resamplingMethod) +{ + return (resamplingMethod != NVKMS_RESAMPLING_METHOD_BILINEAR) && + (resamplingMethod != NVKMS_RESAMPLING_METHOD_NEAREST); +} + +/*! + * Return the headSurface fragment program that matches the configuration + * described by NVHsChannelConfig. + * + * \param[in] pChannelConfig The channel configuration. + */ +static ProgramName Hs3dGetFragmentProgram( + const NVHsChannelConfig *pChannelConfig, + const enum NvKmsPixelShiftMode pixelShift, + const NvBool overlay) +{ + const NvBool blend = pChannelConfig->pBlendTexSurface != NULL; + const NvBool offset = pChannelConfig->pOffsetTexSurface != NULL; + const NvBool blendAfterWarp = pChannelConfig->blendAfterWarp; + const NvBool yuv420 = pChannelConfig->yuv420; + const NvBool pixelShiftEnabled = pixelShift != NVKMS_PIXEL_SHIFT_NONE; + const NvBool customSampling = + HsIsCustomSampling(pChannelConfig->resamplingMethod); + const NvBool blendOffsetOrderMatters = (blend || offset); + + int i; + + for (i = 0; i < ARRAY_LEN(nvHeadSurfaceShaderInfo); i++) { + if ((nvHeadSurfaceShaderInfo[i].blend == blend) && + (nvHeadSurfaceShaderInfo[i].offset == offset) && + (nvHeadSurfaceShaderInfo[i].overlay == overlay) && + (nvHeadSurfaceShaderInfo[i].yuv420 == yuv420) && + (nvHeadSurfaceShaderInfo[i].pixelShift == pixelShiftEnabled) && + (nvHeadSurfaceShaderInfo[i].customSampling == customSampling) && + (!blendOffsetOrderMatters || + (nvHeadSurfaceShaderInfo[i].blendAfterWarp == blendAfterWarp))) { + + return PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT + i; + } + } + + nvAssert(!"Missing headSurface fragment program."); + + return PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT; +} + +/*! + * Given the rest of NVHsChannelConfig, return the srcFiltering configuration. + */ +static NvBool Hs3dGetSrcFiltering( + const NVHsChannelConfig *pChannelConfig, + const enum NvKmsPixelShiftMode pixelShift, + const NvBool overlay) +{ + if (overlay) { + return FALSE; + } + + if (pChannelConfig->yuv420) { + return FALSE; + } + + if (pixelShift != NVKMS_PIXEL_SHIFT_NONE) { + return FALSE; + } + + if (pChannelConfig->resamplingMethod == NVKMS_RESAMPLING_METHOD_NEAREST) { + return FALSE; + } + + return TRUE; +} + +/*! + * Load the warp mesh for this head. + */ +static void LoadStaticWarpMesh( + NVHsChannelEvoPtr pHsChannel) +{ + const NvHsStaticWarpMesh *swm = &pHsChannel->config.staticWarpMesh; + Nv3dChannelRec *p3d = &pHsChannel->nv3d.channel; + + /* We use a constant buffer slot to store the static warp mesh. */ + + ct_assert(sizeof(*swm) <= NV3D_CONSTANT_BUFFER_SIZE); + + nv3dSelectCb(p3d, NVKMS_HEADSURFACE_CONSTANT_BUFFER_STATIC_WARP_MESH); + + nv3dLoadConstants(p3d, 0, sizeof(*swm), swm); +} + +/*! + * Initialize the TWOD object in the channel. + */ +static void Hs3dSetup2d(NVHsChannelEvoPtr pHsChannel) +{ + NvPushChannelPtr p = &pHsChannel->nvPush.channel; + + nvAssert(!p->pDevice->clientSli || p->pDevice->numSubDevices == 1); + nvPushSetObject(p, NVA06F_SUBCHANNEL_2D, pHsChannel->nv2d.handle); +} + +/*! + * Set up TWOD to read from the source surface. + */ +static void Hs3dSetup2dBlitSrc( + NVHsChannelEvoPtr pHsChannel, + const NVSurfaceEvoRec *pSrc) +{ + NvPushChannelPtr p = &pHsChannel->nvPush.channel; + const NvU32 colorFormat = HsGet2dFormat(pSrc->format); + + if (pSrc->layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + nvPushMethod(p, NVA06F_SUBCHANNEL_2D, NV902D_SET_SRC_MEMORY_LAYOUT, 2); + nvPushSetMethodData(p, NV902D_SET_SRC_MEMORY_LAYOUT_V_BLOCKLINEAR); + /* NV902D_SET_SRC_BLOCK_SIZE */ + nvPushSetMethodData(p, + NV3D_V(902D, SET_SRC_BLOCK_SIZE, DEPTH, 0) | + NV3D_V(902D, SET_SRC_BLOCK_SIZE, HEIGHT, pSrc->log2GobsPerBlockY)); + } else { + nvPushImmed(p, NVA06F_SUBCHANNEL_2D, + NV902D_SET_SRC_MEMORY_LAYOUT, PITCH); + nvPushMethod(p, NVA06F_SUBCHANNEL_2D, NV902D_SET_SRC_PITCH, 1); + nvPushSetMethodData(p, pSrc->planes[0].pitch); + } + nvPushMethod(p, NVA06F_SUBCHANNEL_2D, NV902D_SET_SRC_WIDTH, 4); + nvPushSetMethodData(p, pSrc->widthInPixels); + /* NV902D_SET_SRC_HEIGHT */ + nvPushSetMethodData(p, pSrc->heightInPixels); + /* NV902D_SET_SRC_OFFSET */ + nvPushSetMethodDataU64(p, pSrc->gpuAddress); + + nvPushImmedVal(p, NVA06F_SUBCHANNEL_2D, NV902D_SET_SRC_FORMAT, colorFormat); +} + +/*! + * Set up TWOD to write to the destination surface. + */ +static void Hs3dSetup2dBlitDst( + NVHsChannelEvoPtr pHsChannel, + const NVHsSurfaceRec *pDst) +{ + NvPushChannelPtr p = &pHsChannel->nvPush.channel; + const NVSurfaceEvoRec *pSurfaceEvo = pDst->pSurfaceEvo; + const NvU32 colorFormat = HsGet2dFormat(pSurfaceEvo->format); + + /* NVHsSurfaceRec surfaces are always blockLinear. */ + nvAssert(pSurfaceEvo->layout == NvKmsSurfaceMemoryLayoutBlockLinear); + + nvPushMethod(p, NVA06F_SUBCHANNEL_2D, NV902D_SET_DST_MEMORY_LAYOUT, 2); + nvPushSetMethodData(p, NV902D_SET_DST_MEMORY_LAYOUT_V_BLOCKLINEAR); + + /* NV902D_SET_DST_BLOCK_SIZE */ + nvAssert(pDst->gobsPerBlock.x == 0); + nvPushSetMethodData(p, + NV3D_V(902D, SET_DST_BLOCK_SIZE, DEPTH, pDst->gobsPerBlock.z) | + NV3D_V(902D, SET_DST_BLOCK_SIZE, HEIGHT, pDst->gobsPerBlock.y)); + + nvPushMethod(p, NVA06F_SUBCHANNEL_2D, NV902D_SET_DST_WIDTH, 4); + nvPushSetMethodData(p, pSurfaceEvo->widthInPixels); + /* NV902D_SET_DST_HEIGHT */ + nvPushSetMethodData(p, pSurfaceEvo->heightInPixels); + /* NV902D_SET_DST_OFFSET */ + nvPushSetMethodDataU64(p, pSurfaceEvo->gpuAddress); + + nvPushImmedVal(p, NVA06F_SUBCHANNEL_2D, NV902D_SET_DST_FORMAT, colorFormat); +} + +/*! + * Prepare TWOD to perform blits between surfaces. + */ +static void Hs3dSetup2dBlit( + NVHsChannelEvoPtr pHsChannel, + const NVSurfaceEvoRec *pSrc, + const NVHsSurfaceRec *pDst) +{ + NvPushChannelPtr p = &pHsChannel->nvPush.channel; + + nvPushImmed(p, NVA06F_SUBCHANNEL_2D, NV902D_SET_OPERATION, SRCCOPY); + + Hs3dSetup2dBlitSrc(pHsChannel, pSrc); + Hs3dSetup2dBlitDst(pHsChannel, pDst); +} + +/*! + * Perform a blit, using TWOD, between the surfaces defined in the previous call + * to Hs3dSetup2dBlit(). This can be called multiple times per + * Hs3dSetup2dBlit(). + */ +static void Hs3d2dBlit( + NVHsChannelEvoPtr pHsChannel, + struct NvKmsPoint srcPoint, + struct NvKmsPoint dstPoint, + struct NvKmsSize size) +{ + NvPushChannelPtr p = &pHsChannel->nvPush.channel; + + nvPushMethod(p, NVA06F_SUBCHANNEL_2D, + NV902D_SET_PIXELS_FROM_MEMORY_DST_X0, 12); + nvPushSetMethodData(p, dstPoint.x); /* Destination X */ + /* NV902D_SET_PIXELS_FROM_MEMORY_DST_Y0 */ + nvPushSetMethodData(p, dstPoint.y); /* Destination Y */ + /* NV902D_SET_PIXELS_FROM_MEMORY_DST_WIDTH */ + nvPushSetMethodData(p, size.width); /* Blit Width */ + /* NV902D_SET_PIXELS_FROM_MEMORY_DST_HEIGHT */ + nvPushSetMethodData(p, size.height); /* Blit Height */ + /* NV902D_SET_PIXELS_FROM_MEMORY_DU_DX_FRAC */ + nvPushSetMethodData(p, 0x0); /* du/dx fraction */ + /* NV902D_SET_PIXELS_FROM_MEMORY_DU_DX_INT */ + nvPushSetMethodData(p, 0x1); /* du/dx int */ + /* NV902D_SET_PIXELS_FROM_MEMORY_DV_DY_FRAC */ + nvPushSetMethodData(p, 0x0); /* dv/dy fraction */ + /* NV902D_SET_PIXELS_FROM_MEMORY_DV_DY_INT */ + nvPushSetMethodData(p, 0x1); /* dv/dy int */ + /* NV902D_SET_PIXELS_FROM_MEMORY_SRC_X0_FRAC */ + nvPushSetMethodData(p, 0x0); /* Source X fraction */ + /* NV902D_SET_PIXELS_FROM_MEMORY_SRC_X0_INT */ + nvPushSetMethodData(p, srcPoint.x); /* Source X int */ + /* NV902D_SET_PIXELS_FROM_MEMORY_SRC_Y0_FRAC */ + nvPushSetMethodData(p, 0x0); /* Source Y fraction */ + /* NV902D_PIXELS_FROM_MEMORY_SRC_Y0_INT */ + nvPushSetMethodData(p, srcPoint.y); /* Source Y int */ +} + +/*! + * Initialize the pHsChannel for rendering. + * + * This should be called once, when the headSurface configuration is applied to + * the device. Here we do any work that is static for the given headSurface + * configuration. + * + * This function cannot fail. + * + * \param[in,out] pHsChannel The channel to update. + */ +void nvHs3dSetConfig(NVHsChannelEvoPtr pHsChannel) +{ + LoadStaticWarpMesh(pHsChannel); + + nvkms_memset(pHsChannel->nv3d.texInfo, 0, + sizeof(pHsChannel->nv3d.texInfo)); + + /* Set up sampler from blend surface. */ + + AssignRenderTexInfo( + pHsChannel->config.pBlendTexSurface, + TRUE /* normalizedCoords */, + TRUE /* filtering */, + &pHsChannel->nv3d.texInfo[NVKMS_HEADSURFACE_TEXINFO_BLEND]); + + /* Set up sampler from offset surface. */ + + AssignRenderTexInfo( + pHsChannel->config.pOffsetTexSurface, + TRUE /* normalizedCoords */, + TRUE /* filtering */, + &pHsChannel->nv3d.texInfo[NVKMS_HEADSURFACE_TEXINFO_OFFSET]); + + Hs3dSetup2d(pHsChannel); +} + +static NvU32 HsGetSatCos(NvS32 dvc) +{ + // Digital vibrance is between -1024 (NV_EVO_DVC_MIN) and 1023 + // (NV_EVO_DVC_MAX), normalized to 0.0f-2.0f for this shader, + // defaulting to 1.0f. This mimics nvSetDVCEvo(). + // (dvc + 1024) / 1024.0f + const NvU32 a = NV_MAX(dvc + 1024, 0); + const float32_t b = ui32_to_f32(a); + const float32_t c = ui32_to_f32(1024); + const float32_t d = f32_div(b, c); + + return F32viewAsNvU32(d); +} + +/*! + * Load the fragment program uniforms needed for the headSurface configuration. + */ +static void LoadFragmentProgramUniforms( + NVHsChannelEvoPtr pHsChannel, + const enum NvKmsPixelShiftMode pixelShift, + const NvBool overlay, + const struct NvKmsPoint viewPortPointIn) +{ + const NVHsChannelConfig *pChannelConfig = &pHsChannel->config; + const NvBool pixelShiftEnabled = pixelShift != NVKMS_PIXEL_SHIFT_NONE; + const NvBool customSampling = + HsIsCustomSampling(pChannelConfig->resamplingMethod); + + NvHsFragmentUniforms fragmentUniforms = { }; + + /* XXX NVKMS HEADSURFACE TODO: plumb colorRange */ + const enum NvKmsDpyAttributeColorRangeValue colorRange = + NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL; + + const NvBool needsFragmentUniforms = + pChannelConfig->blendAfterWarp || + pChannelConfig->yuv420 || + overlay || + pixelShiftEnabled || + customSampling; + + if (!needsFragmentUniforms) { + return; + } + + if (customSampling) { + +#if defined(DEBUG) + /* + * The customSampling calculation above should ensure that + * pChannelConfig->resamplingMethod is one of the following values. And + * the enums and constants should be defined such that they have the + * same values. + */ + NvBool found = FALSE; + +#define CHECK(_x) \ + do { \ + ct_assert(NVKMS_RESAMPLING_METHOD_ ## _x == \ + NVIDIA_HEADSURFACE_RESAMPLING_METHOD_ ## _x); \ + if (pChannelConfig->resamplingMethod == \ + NVKMS_RESAMPLING_METHOD_ ## _x) { \ + found = TRUE; \ + } \ + } while (0) + + CHECK(BICUBIC_TRIANGULAR); + CHECK(BICUBIC_BELL_SHAPED); + CHECK(BICUBIC_BSPLINE); + CHECK(BICUBIC_ADAPTIVE_TRIANGULAR); + CHECK(BICUBIC_ADAPTIVE_BELL_SHAPED); + CHECK(BICUBIC_ADAPTIVE_BSPLINE); + +#undef CHECK + + nvAssert(found); +#endif /* DEBUG */ + + fragmentUniforms.resamplingMethod = pChannelConfig->resamplingMethod; + } + + if (pChannelConfig->blendAfterWarp) { + fragmentUniforms.vertexScale.x = pChannelConfig->viewPortOut.width; + fragmentUniforms.vertexScale.y = pChannelConfig->viewPortOut.height; + } + + // The following uniforms are all necessary for the LUT and colorRange + // to be applied in the headSurface shader for the overlay, YUV420, or + // pixelshift mode. + if (pChannelConfig->yuv420 || overlay || pixelShiftEnabled) { + + const int nPaletteEntries = 0; /* XXX NVKMS HEADSURFACE TODO */ + const int depth = 24; /* XXX NVKMS HEADSURFACE TODO */ + + fragmentUniforms.numLutEntries.x = nPaletteEntries; + fragmentUniforms.numLutEntries.y = nPaletteEntries; + fragmentUniforms.numLutEntries.z = nPaletteEntries; + + // In depth 16 (R5G6B5) the LUT has half as many entries for red + // and blue as it does for green. + if (depth == 16) { + nvAssert(fragmentUniforms.numLutEntries.x % 2 == 0); + fragmentUniforms.numLutEntries.x /= 2; + nvAssert(fragmentUniforms.numLutEntries.z % 2 == 0); + fragmentUniforms.numLutEntries.z /= 2; + } + } + + if (pChannelConfig->yuv420 || pixelShiftEnabled) { + int i, j; + + // primaryTextureBias is used in the fragment shader as well in + // YUV420/pixelshift modes in order to map YUV420/pixelshift + // transformed fragment coordinates to the viewport, since the + // vertex shader's transformation is skipped in YUV420/pixelshift + // modes. + + fragmentUniforms.primaryTextureBias.x = viewPortPointIn.x; + fragmentUniforms.primaryTextureBias.y = viewPortPointIn.y; + + fragmentUniforms.cursorPosition.x = pChannelConfig->cursor.x + + fragmentUniforms.primaryTextureBias.x; + fragmentUniforms.cursorPosition.y = pChannelConfig->cursor.y + + fragmentUniforms.primaryTextureBias.y; + + for (i = 0; i < 3; i++) { + for (j = 0; j < 3; j++) { + fragmentUniforms.transform[i][j] = + F32viewAsNvU32(pChannelConfig->transform.m[i][j]); + } + } + } + + // In pixelshift mode, a 2x width/height source surface is + // downsampled to the destination surface. This tells the shader + // whether to copy the upper left or bottom right pixels of each + // pixel quad for the destination surface. + if (pixelShift == NVKMS_PIXEL_SHIFT_4K_TOP_LEFT) { + fragmentUniforms.pixelShiftOffset.x = NV_FLOAT_NEG_QUARTER; + fragmentUniforms.pixelShiftOffset.y = NV_FLOAT_NEG_QUARTER; + } else if (pixelShift == NVKMS_PIXEL_SHIFT_4K_BOTTOM_RIGHT) { + fragmentUniforms.pixelShiftOffset.x = NV_FLOAT_QUARTER; + fragmentUniforms.pixelShiftOffset.y = NV_FLOAT_QUARTER; + } else { + // When we get here, we should only see pixelShift of 4k or none (not + // 8k): 8k should be translated into different 4k configs for each eye, + // higher in the call chain. + nvAssert(pixelShift == NVKMS_PIXEL_SHIFT_NONE); + } + + if (pChannelConfig->yuv420) { + // Since YUV 4:2:0 is only currently supported on 4K modes, hardcode + // the ITU-R BT.709 colorspace conversion matrix. + // The following 5 coefficients are copied from items 4.2 and 4.3 of + // Rec. ITU-R BT.709-5. + fragmentUniforms.luminanceCoefficient.x = 0x3e59b3d0; /* R : 0.2126f */ + fragmentUniforms.luminanceCoefficient.y = 0x3f371759; /* G : 0.7152f */ + fragmentUniforms.luminanceCoefficient.z = 0x3d93dd98; /* B : 0.0722f */ + fragmentUniforms.chromaCoefficient.x = 0x3f228f5c; /* Cr: 0.6350f */ + fragmentUniforms.chromaCoefficient.y = 0x3f09f55a; /* Cb: 0.5389f */ + + // Range compression is disabled in HW and applied manually in the + // shader for YUV420 mode. + if (colorRange == NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL) { + fragmentUniforms.luminanceScale = NV_FLOAT_ONE; + fragmentUniforms.luminanceBlackLevel = NV_FLOAT_ZERO; + fragmentUniforms.chrominanceScale = NV_FLOAT_ONE; + } else if (colorRange == NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED) { + // ITU-R BT.709 mandates a limited color range with the + // following 8bpc quantization values (section 6.10): + // + // Luminance black level 16 + // Luminance nominal range 16-235 (i.e., range of 219) + // Chrominance nominal range 16-240 (i.e., range of 224) + // + // Divide these values by 255.0f to normalize to the range 0.0f-1.0f + // for the yuv420 shader's color range quantization. + + // 219.0f / 255.0f == 0.858824f ==> 0x3f5bdbdc + fragmentUniforms.luminanceScale = 0x3f5bdbdc; + + // 16.0f / 255.0f == 0.062745f ==> 0x3d808081 + fragmentUniforms.luminanceBlackLevel = 0x3d808081; + + // 224.0f / 255.0f == 0.878431f ==> 0x3f60e0e1 + fragmentUniforms.chrominanceScale = 0x3f60e0e1; + } else { + nvAssert(!"Invalid color range"); + } + // Chrominance black level is 128 for both full and limited range. + // Divide by 255.0f to normalize to the range 0.0f-1.0f. + // 128.0f / 255.0f == 0.501961 ==> 0x3f008081 + fragmentUniforms.chrominanceBlackLevel = 0x3f008081; + + fragmentUniforms.satCos = HsGetSatCos(pChannelConfig->dvc); + + // Default digital vibrance is 1024, from an input dvc of 0. If + // dvc is not default, useSatHue tells the shader to use the + // modified satCos. + if (pChannelConfig->dvc != 0) { + fragmentUniforms.useSatHue = 1; + } + } + + nv3dSelectCb(&pHsChannel->nv3d.channel, + NVKMS_HEADSURFACE_CONSTANT_BUFFER_FRAGMENT_PROGRAM); + nv3dBindCb(&pHsChannel->nv3d.channel, NV3D_HW_BIND_GROUP_FRAGMENT, + NV3D_CB_SLOT_MISC1, TRUE); + nv3dLoadConstants(&pHsChannel->nv3d.channel, 0, + sizeof(fragmentUniforms), &fragmentUniforms); +} + +/*! + * Load the vertex program uniforms needed for the headSurface configuration. + */ +static void LoadVertexProgramUniforms( + NVHsChannelEvoPtr pHsChannel, + const struct NvKmsPoint viewPortPointIn) +{ + const NVHsChannelConfig *pChannelConfig = &pHsChannel->config; + Nv3dChannelRec *p3d = &pHsChannel->nv3d.channel; + NvHsVertexUniforms uniforms = { }; + + /* Scale incoming vertices by the output resolution. */ + uniforms.vertexScale.x = pChannelConfig->viewPortOut.width; + uniforms.vertexScale.y = pChannelConfig->viewPortOut.height; + + /* Map primary texture coordinates to the display viewport. */ + + uniforms.primaryTextureScale.x = pChannelConfig->viewPortIn.width; + uniforms.primaryTextureScale.y = pChannelConfig->viewPortIn.height; + + uniforms.primaryTextureBias.x = viewPortPointIn.x; + uniforms.primaryTextureBias.y = viewPortPointIn.y; + + uniforms.cursorPosition.x = pChannelConfig->cursor.x + + uniforms.primaryTextureBias.x; + uniforms.cursorPosition.y = pChannelConfig->cursor.y + + uniforms.primaryTextureBias.y; + + /* Bind the constant buffer. */ + nv3dSelectCb(p3d, NVKMS_HEADSURFACE_CONSTANT_BUFFER_VERTEX_PROGRAM); + nv3dBindCb(p3d, NV3D_HW_BIND_GROUP_VERTEX, NV3D_CB_SLOT_MISC1, TRUE); + + /* Upload the uniforms in it. */ + nv3dLoadConstants(p3d, 0, sizeof(uniforms), &uniforms); +} + +/*! + * Assign the 'textures' texture binding indices array. + * + * The array should have NVIDIA_HEADSURFACE_UNIFORM_SAMPLER_BINDING_NUM + * elements. + */ +static void AssignTextureBindingIndices( + const NVHsChannelEvoRec *pHsChannel, + const NVSurfaceEvoRec *pSurfaceEvo[NVKMS_MAX_LAYERS_PER_HEAD], + int *textures) +{ + const struct { + enum NVHsChannelTexInfoEnum texture; + const NVSurfaceEvoRec *pSurface; + } textureTable[NVIDIA_HEADSURFACE_UNIFORM_SAMPLER_BINDING_NUM] = { + +#define ENTRY(_binding, _texinfo, _surface) \ + [NVIDIA_HEADSURFACE_UNIFORM_SAMPLER_BINDING_ ## _binding ## _TEX] = { \ + .texture = NVKMS_HEADSURFACE_TEXINFO_ ## _texinfo, \ + .pSurface = _surface, \ + } + + ENTRY(PRIMARY, SRC, pSurfaceEvo[NVKMS_MAIN_LAYER]), + ENTRY(CURSOR, CURSOR, pHsChannel->config.cursor.pSurfaceEvo), + ENTRY(BLEND, BLEND, pHsChannel->config.pBlendTexSurface), + ENTRY(OFFSET, OFFSET, pHsChannel->config.pOffsetTexSurface), + ENTRY(OVERLAY, OVERLAY, pSurfaceEvo[NVKMS_OVERLAY_LAYER]), + +#undef ENTRY + }; + + NvU32 i; + + for (i = 0; i < ARRAY_LEN(textureTable); i++) { + if (textureTable[i].pSurface != NULL) { + textures[i] = textureTable[i].texture; + } else { + textures[i] = NV3D_TEXTURE_INDEX_INVALID; + } + } +} + +/*! + * Get the warp mesh data to use to draw headSurface. + * + * If the client specified a warpMesh surface, we use that. Otherwise, use the + * static warp mesh computed earlier. + */ +static void GetWarpMeshData( + const NVHsChannelEvoPtr pHsChannel, + Nv3dStreamSurfaceRec *pStreamSurf, + NvU32 *pOp, + NvU32 *pVertexCount) +{ + const NvU32 opTable[] = { + [NVKMS_WARP_MESH_DATA_TYPE_TRIANGLES_XYUVRQ] = + NV3D_C(9097, BEGIN, OP, TRIANGLES), + [NVKMS_WARP_MESH_DATA_TYPE_TRIANGLE_STRIP_XYUVRQ] = + NV3D_C(9097, BEGIN, OP, TRIANGLE_STRIP), + }; + + const NVSurfaceEvoRec *pSurface = pHsChannel->config.warpMesh.pSurface; + Nv3dChannelRec *p3d = &pHsChannel->nv3d.channel; + + enum NvKmsWarpMeshDataType dataType; + + nvkms_memset(pStreamSurf, 0, sizeof(*pStreamSurf)); + + if (pSurface != NULL) { + + pStreamSurf->gpuAddress = pSurface->gpuAddress; + pStreamSurf->size = pSurface->planes[0].rmObjectSizeInBytes; + dataType = pHsChannel->config.warpMesh.dataType; + *pVertexCount = pHsChannel->config.warpMesh.vertexCount; + + } else { + + pStreamSurf->gpuAddress = + nv3dGetConstantBufferGpuAddress(p3d, + NVKMS_HEADSURFACE_CONSTANT_BUFFER_STATIC_WARP_MESH); + pStreamSurf->size = NV3D_CONSTANT_BUFFER_SIZE; + dataType = NVKMS_WARP_MESH_DATA_TYPE_TRIANGLE_STRIP_XYUVRQ; + *pVertexCount = 4; + } + + nvAssert((dataType == NVKMS_WARP_MESH_DATA_TYPE_TRIANGLES_XYUVRQ) || + (dataType == NVKMS_WARP_MESH_DATA_TYPE_TRIANGLE_STRIP_XYUVRQ)); + + *pOp = opTable[dataType]; +} + +/*! + * Program the WindowOffset method. + */ +static void HsSetWindowOffset( + NVHsChannelEvoRec *pHsChannel, + NvS16 x, NvS16 y) +{ + NvPushChannelPtr p = &pHsChannel->nvPush.channel; + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_WINDOW_OFFSET_X, 2); + nvPushSetMethodData(p, x); + nvPushSetMethodData(p, y); +} + +/*! + * Return a pointer to the semaphore structure for this headSurface channel and + * semaphore index. + * + * Note we abuse the nvidia-push library's notifier infrastructure as a + * convenient mechanism to allocate and map memory for us. NvNotification and + * NvGpuSemaphore structures are the same size, so this works out. + */ +static inline NvGpuSemaphore *Hs3dGetSemaphore( + const NVHsChannelEvoRec *pHsChannel, + NvU8 index) +{ + const NvPushChannelRec *pChannel = &pHsChannel->nvPush.channel; + const NvU32 sd = pHsChannel->pDispEvo->displayOwner; + + NvNotification *pNotifier = + nvPushGetNotifierCpuAddress(pChannel, index, sd); + + ct_assert(sizeof(NvNotification) == sizeof(NvGpuSemaphore)); + + return (NvGpuSemaphore *) pNotifier; +} + +#if NVKMS_PROCFS_ENABLE +/*! + * Get the semaphore index (e.g., to be passed into + * nvPushGetNotifierGpuAddress()) for an (eye, slot) pair. + */ +static inline NvU8 Hs3dGetStatisticsSemaphoreIndex(const NvU8 eye, + const NvU8 slot) +{ + nvAssert((eye == NVKMS_LEFT) || (eye == NVKMS_RIGHT)); + nvAssert(slot < NVKMS_HEADSURFACE_STATS_MAX_SLOTS); + + const NvU8 maxSemsPerEye = NVKMS_HEAD_SURFACE_STATS_SEMAPHORE_STAGE_COUNT; + const NvU8 maxSemsPerSlot = maxSemsPerEye * NVKMS_MAX_EYES; + + const NvU8 index = (eye * maxSemsPerEye) + (slot * maxSemsPerSlot); + + nvAssert(index < NVKMS_HEADSURFACE_STATS_MAX_SEMAPHORES); + + return index; +} + +/*! + * Return the 64-bit nanosecond timeStamp from the given NvGpuSemaphore. + */ +static inline NvU64 Hs3dGetSemaphoreTime(const NvGpuSemaphore *pSemaphore) +{ + const NvU64 low = pSemaphore->timeStamp.nanoseconds[0]; + const NvU64 high = pSemaphore->timeStamp.nanoseconds[1]; + + return (high << 32) | low; +} + +static void Hs3dStatisticsReleaseSemaphore( + NVHsChannelEvoPtr pHsChannel, + const NvU8 semIndex, + const NvU64 nFrames) +{ + NvPushChannelPtr p = &pHsChannel->nvPush.channel; + const NvU32 sd = pHsChannel->pDispEvo->displayOwner; + + const NvU64 gpuAddress = nvPushGetNotifierGpuAddress(p, semIndex, sd); + + /* + * Use the current frame number as the payload, so we can assert that we're + * inspecting the right, and correctly released, semaphores when computing + * deltaTime below. + */ + const NvU32 payload = NvU64_LO32(nFrames); + + const NvU32 operation = + NV3D_C(9097, SET_REPORT_SEMAPHORE_D, OPERATION, RELEASE) | + NV3D_C(9097, SET_REPORT_SEMAPHORE_D, RELEASE, + AFTER_ALL_PRECEEDING_WRITES_COMPLETE) | + NV3D_C(9097, SET_REPORT_SEMAPHORE_D, STRUCTURE_SIZE, FOUR_WORDS)| + NV3D_C(9097, SET_REPORT_SEMAPHORE_D, FLUSH_DISABLE, TRUE) | + NV3D_C(9097, SET_REPORT_SEMAPHORE_D, PIPELINE_LOCATION, ALL); + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_REPORT_SEMAPHORE_A, 4); + nvPushSetMethodDataU64(p, gpuAddress); + nvPushSetMethodData(p, payload); + nvPushSetMethodData(p, operation); +} + +static void Hs3dStatisticsComputeFps( + NVHsChannelEvoPtr pHsChannel, + const NvU8 eye, + const NvU8 slot, + const NvU64 currentTime) +{ + NVHsChannelStatisticsOneEyeRec *pPerEye = + &pHsChannel->statistics.perEye[eye][slot]; + + NvU64 elapsedTime; + NvU32 elapsedTimeMs; + + if (pPerEye->fps.startTime == 0) { + pPerEye->fps.startTime = currentTime; + } + + pPerEye->fps.nFrames++; + + elapsedTime = currentTime - pPerEye->fps.startTime; + + /* + * To maintain precision without floating point math, convert + * the time from nanoseconds to milliseconds (divide by 1000000) + * and multiply nFrames by 1000000. This yields frames per + * milliseconds. + */ + elapsedTimeMs = (elapsedTime + 500000) / 1000000; + + if (elapsedTimeMs > 5000) { /* 5 seconds */ + + pPerEye->fps.framesPerMs = + (pPerEye->fps.nFrames * 1000000) / elapsedTimeMs; + + pPerEye->fps.nFrames = 0; + pPerEye->fps.startTime = currentTime; + } +} + +#endif /* NVKMS_PROCFS_ENABLE */ + +/*! + * Collect statistics on headSurface rendering. + * + * Hs3dStatisticsBefore() should be called before pushing the frame's rendering + * methods, and Hs3dStatisticsAfter() should be called after pushing the frame's + * rendering methods. Both times, we push methods to do a 3D engine semaphore + * release. + * + * Also, in the "Before" case, we look at the semaphores that were released + * during the previous frame (we assume that, by the time we are here for Frame + * N, the rendering and semaphore releases completed for Frame N-1), and compute + * the time between Frame N-1's "before" and "after" semaphore releases. + */ + + +static void Hs3dStatisticsBefore( + NVHsChannelEvoPtr pHsChannel, + const NvU8 eye, + const NvU8 slot) +{ +#if NVKMS_PROCFS_ENABLE + NVHsChannelStatisticsOneEyeRec *pPerEye = + &pHsChannel->statistics.perEye[eye][slot]; + + const NvU8 semIndex = Hs3dGetStatisticsSemaphoreIndex(eye, slot); + + if (pPerEye->nFrames == 0) { + goto done; + } + + /* Compute the statistics for the previous frame. */ + + const NvU32 prevPayload = NvU64_LO32(pPerEye->nFrames - 1); + + const NvU8 beforeIndex = + semIndex + NVKMS_HEADSURFACE_STATS_SEMAPHORE_BEFORE; + const NvU8 afterIndex = + semIndex + NVKMS_HEADSURFACE_STATS_SEMAPHORE_AFTER; + + NvGpuSemaphore *pBefore = Hs3dGetSemaphore(pHsChannel, beforeIndex); + NvGpuSemaphore *pAfter = Hs3dGetSemaphore(pHsChannel, afterIndex); + + const NvU64 beforeTime = Hs3dGetSemaphoreTime(pBefore); + const NvU64 afterTime = Hs3dGetSemaphoreTime(pAfter); + const NvU64 deltaTime = afterTime - beforeTime; + + /* + * The payload for the before and after semaphores should be the + * previous frame number unless (a) we're looking at the wrong + * semaphores or (b) the semaphores weren't released yet. + */ + nvAssert(pBefore->data[0] == prevPayload); + nvAssert(pAfter->data[0] == prevPayload); + (void)prevPayload; + + if (afterTime < beforeTime) { + nvEvoLogDispDebug(pHsChannel->pDispEvo, EVO_LOG_ERROR, + "Hs3dStatisticsBefore(): " + "afterTime (%" NvU64_fmtu " nsec) < " + "beforeTime (%" NvU64_fmtu " nsec)", + afterTime, beforeTime); + } + + pPerEye->gpuTimeSpent += deltaTime; + + /* + * Compute the frames per second of headSurface for this eye + slot, so that + * we can take advantage of the nanosecond time extracted above from the + * released semaphores. + */ + Hs3dStatisticsComputeFps(pHsChannel, eye, slot, beforeTime); + +done: + Hs3dStatisticsReleaseSemaphore( + pHsChannel, + semIndex + NVKMS_HEADSURFACE_STATS_SEMAPHORE_BEFORE, + pPerEye->nFrames); + +#endif /* NVKMS_PROCFS_ENABLE */ +} + +static void Hs3dStatisticsAfter( + NVHsChannelEvoPtr pHsChannel, + const NvU8 eye, + const NvU8 slot) +{ +#if NVKMS_PROCFS_ENABLE + NVHsChannelStatisticsOneEyeRec *pPerEye = + &pHsChannel->statistics.perEye[eye][slot]; + const NvU8 semIndex = Hs3dGetStatisticsSemaphoreIndex(eye, slot); + + Hs3dStatisticsReleaseSemaphore( + pHsChannel, + semIndex + NVKMS_HEADSURFACE_STATS_SEMAPHORE_AFTER, + pPerEye->nFrames); + + pPerEye->nFrames++; + +#endif /* NVKMS_PROCFS_ENABLE */ +} + +/*! + * Return the semaphore value showing the viewport offset for the most recently + * completed frame of non-swapgroup headsurface rendering. + */ +NvU32 nvHs3dLastRenderedOffset(NVHsChannelEvoPtr pHsChannel) +{ + const NvU8 semIndex = NVKMS_HEADSURFACE_VIEWPORT_OFFSET_SEMAPHORE_INDEX; + const NvGpuSemaphore *sema = Hs3dGetSemaphore(pHsChannel, semIndex); + + return sema->data[0]; +} + +/*! + * Push a semaphore write of the viewport offset used for the previous frame + * of non-swapgroup headsurface rendering to + * NVKMS_HEADSURFACE_VIEWPORT_OFFSET_SEMAPHORE_INDEX followed by a + * non-stall interrupt. + */ +void nvHs3dPushPendingViewportFlip(NVHsChannelEvoPtr pHsChannel) +{ + NvPushChannelPtr p = &pHsChannel->nvPush.channel; + const NvU32 sd = pHsChannel->pDispEvo->displayOwner; + const NvU8 semIndex = NVKMS_HEADSURFACE_VIEWPORT_OFFSET_SEMAPHORE_INDEX; + + const NvU64 gpuAddress = nvPushGetNotifierGpuAddress(p, semIndex, sd); + const NvU32 payload = pHsChannel->nextOffset; + const NvU32 semaphoreOperation = + DRF_DEF(A06F, _SEMAPHORED, _OPERATION, _RELEASE) | + DRF_DEF(A06F, _SEMAPHORED, _RELEASE_WFI, _DIS) | + DRF_DEF(A06F, _SEMAPHORED, _RELEASE_SIZE, _4BYTE); + + nvAssert(!pHsChannel->viewportFlipPending); + + pHsChannel->viewportFlipPending = TRUE; + + nvPushMethod(p, 0, NVA06F_SEMAPHOREA, 4); + nvPushSetMethodDataU64(p, gpuAddress); + nvPushSetMethodData(p, payload); + nvPushSetMethodData(p, semaphoreOperation); + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, NVA06F_NON_STALL_INTERRUPT, 1); + nvPushSetMethodData(p, 0); + nvPushKickoff(p); + +} + +struct NvHs3dRenderFrameWorkArea { + + struct { + NvBool enabled; + NvBool honorSwapGroupClipList; + + struct { + const NVSurfaceEvoRec *pSurfaceEvo[NVKMS_MAX_LAYERS_PER_HEAD]; + } src; + + struct { + const NVHsSurfaceRec *pHsSurface; + } dst; + + } staging; + + struct { + + struct { + const NVSurfaceEvoRec *pSurfaceEvo[NVKMS_MAX_LAYERS_PER_HEAD]; + struct NvKmsPoint viewPortPointIn; + } src; + + struct { + const NVHsSurfaceRec *pHsSurface; + } dst; + + } threeD; +}; + +/*! + * Assign the NvHs3dRenderFrameWorkArea structure. + * + * Whether SwapGroup is enabled impacts the nvHs3dRenderFrame() pipeline. + */ +static NvBool Hs3dAssignRenderFrameWorkArea( + const NVHsChannelEvoRec *pHsChannel, + const NvBool honorSwapGroupClipList, + const NvU8 dstEye, + const NvU8 dstBufferIndex, + const NVSurfaceEvoRec *pSurfaceEvo[NVKMS_MAX_LAYERS_PER_HEAD], + struct NvHs3dRenderFrameWorkArea *pWorkArea) +{ + const NVDevEvoRec *pDevEvo = pHsChannel->pDispEvo->pDevEvo; + const NVHsStateOneHeadAllDisps *pHsOneHeadAllDisps = + &pDevEvo->apiHeadSurfaceAllDisps[pHsChannel->apiHead]; + + const NVHsSurfaceRec *pSurface; + const struct NvKmsPoint origin = { .x = 0, .y = 0}; + + NvU8 layer; + + nvkms_memset(pWorkArea, 0, sizeof(*pWorkArea)); + + nvAssert(dstEye < ARRAY_LEN(pHsOneHeadAllDisps->surfaces)); + nvAssert(dstBufferIndex < ARRAY_LEN(pHsOneHeadAllDisps->surfaces[dstEye])); + + pSurface = pHsOneHeadAllDisps->surfaces[dstEye][dstBufferIndex].pSurface; + + if (pSurface == NULL) { + return FALSE; + } + + /* Assign the src and dst of the operations needed by headSurface. */ + + /* + * We only need the staging buffer if there are both swapgroup and + * non-swapgroup content on the screen which we can see from the + * cliplist. + * + * NOTE: We probably should assert here that viewPortIn minus swapgroup + * clip list equals viewPortIn. The client already does the same, + * communicates that to NVKMS and that is cached in the + * swapGroupIsFullscreen flag. + */ + + const NVSwapGroupRec *pSwapGroup = + pHsChannel->pDispEvo->pSwapGroup[pHsChannel->apiHead]; + + if (pHsChannel->config.neededForSwapGroup && + pSwapGroup && + !pSwapGroup->swapGroupIsFullscreen) { + + const NVHsSurfaceRec *pStagingSurface = + pHsOneHeadAllDisps->surfaces[dstEye][dstBufferIndex].pStagingSurface; + nvAssert(pStagingSurface != NULL); + + /* + * The 'staging' operation uses the client-provided surfaces as src, and + * uses the staging surface as dst. + */ + + pWorkArea->staging.enabled = TRUE; + pWorkArea->staging.honorSwapGroupClipList = honorSwapGroupClipList; + + ct_assert(ARRAY_LEN(pWorkArea->staging.src.pSurfaceEvo) == + NVKMS_MAX_LAYERS_PER_HEAD); + + for (layer = 0; layer < NVKMS_MAX_LAYERS_PER_HEAD; layer++) { + pWorkArea->staging.src.pSurfaceEvo[layer] = pSurfaceEvo[layer]; + } + pWorkArea->staging.dst.pHsSurface = pStagingSurface; + + /* + * The 'threeD' operation uses the staging surface as src, and the + * headSurface surface as dst. + */ + + pWorkArea->threeD.src.pSurfaceEvo[NVKMS_MAIN_LAYER] = + pStagingSurface->pSurfaceEvo; + pWorkArea->threeD.src.viewPortPointIn = origin; + pWorkArea->threeD.dst.pHsSurface = pSurface; + + } else { + + /* Disable the 'staging' operation. */ + + pWorkArea->staging.enabled = FALSE; + + /* + * The 'threeD' operation uses client-provided surfaces as src, and the + * headSurface surface as dst. + */ + + ct_assert(ARRAY_LEN(pWorkArea->threeD.src.pSurfaceEvo) == + NVKMS_MAX_LAYERS_PER_HEAD); + + for (layer = 0; layer < NVKMS_MAX_LAYERS_PER_HEAD; layer++) { + pWorkArea->threeD.src.pSurfaceEvo[layer] = pSurfaceEvo[layer]; + } + pWorkArea->threeD.src.viewPortPointIn.x = pHsChannel->config.viewPortIn.x; + pWorkArea->threeD.src.viewPortPointIn.y = pHsChannel->config.viewPortIn.y; + + pWorkArea->threeD.dst.pHsSurface = pSurface; + } + + return TRUE; +} + +/*! + * Get the clip list for the SwapGroup associated with this pHsChannel. If + * there is no client-specified clip list, use the entire viewPortIn as the clip + * list. + * + * \param[in] pHsChannel The channel in use. + * \param[in] honorSwapGroupClipList + * Whether the returned clip list should honor + * the SwapGroup's current clip list. + * \param[out] pNClips The number of rects in the returned clip list. + * \param[out] ppClipList The returned clip list. + */ +static void Hs3dUpdateStagingSurfaceGetClipList( + const NVHsChannelEvoRec *pHsChannel, + const NvBool honorSwapGroupClipList, + NvU16 *pNClips, + const struct NvKmsRect **ppClipList) +{ + const NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo; + const NVSwapGroupRec *pSwapGroup; + + nvAssert(pDispEvo != NULL); + + pSwapGroup = pDispEvo->pSwapGroup[pHsChannel->apiHead]; + + nvAssert(pSwapGroup != NULL); + + if (pSwapGroup->pClipList == NULL || + !honorSwapGroupClipList) { + + *pNClips = 1; + *ppClipList = &pHsChannel->config.viewPortIn; + } else { + *pNClips = pSwapGroup->nClips; + *ppClipList = pSwapGroup->pClipList; + } +} + +/*! + * Intersect two rects and return the resulting rect. + * + * To do this, convert from NvKmsRect {x,y,width,height} to {x0,x1,y0,y1}, and + * take the max of the x0 values and the min of the x1 values. E.g., + * + * Ax0 Ax1 + * +-----+ + * | Bx0 | Bx1 + * | +--+--+ + * | | | | + * +--+--+ | + * | | + * +-----+ + * . . + * . . + * Cx0 . = MAX(Ax0, Bx0) + * Cx1 = MIN(Ax1, Bx1) + * + * Note that the NvKmsRects are expected to describe regions within the NvU16 + * coordinate space. I.e., x1 = x + width should not overflow 16 bits. The + * headSurface ViewPortIn has this property, and nvHsSetSwapGroupClipList() + * guarantees this for the client-specified clip list. + * + * If intersection is empty, e.g., + * + * Ax0 Ax1 Bx0 Bx1 + * +----+ +----+ + * | | | | + * +----+ +----+ + * . . + * . . + * . Cx0 = MAX(Ax0, Bx0) + * Cx1 = MIN(Ax1, Bx1) + * + * return a rect with width=0, height=0. + */ +static struct NvKmsRect Hs3dIntersectRects( + const struct NvKmsRect rectA, + const struct NvKmsRect rectB) +{ + struct NvKmsRect rect = { }; + + const NvU16 Ax1 = rectA.x + rectA.width; + const NvU16 Ay1 = rectA.y + rectA.height; + + const NvU16 Bx1 = rectB.x + rectB.width; + const NvU16 By1 = rectB.y + rectB.height; + + const NvU16 Cx0 = NV_MAX(rectA.x, rectB.x); + const NvU16 Cy0 = NV_MAX(rectA.y, rectB.y); + + const NvU16 Cx1 = NV_MIN(Ax1, Bx1); + const NvU16 Cy1 = NV_MIN(Ay1, By1); + + nvAssert(!A_plus_B_greater_than_C_U16(rectA.x, rectA.width, NV_U16_MAX)); + nvAssert(!A_plus_B_greater_than_C_U16(rectA.y, rectA.height, NV_U16_MAX)); + + nvAssert(!A_plus_B_greater_than_C_U16(rectB.x, rectB.width, NV_U16_MAX)); + nvAssert(!A_plus_B_greater_than_C_U16(rectB.y, rectB.height, NV_U16_MAX)); + + if ((Cx0 >= Cx1) || (Cy0 >= Cy1)) { + /* Disjoint; return empty rect. */ + return rect; + } + + rect.x = Cx0; + rect.y = Cy0; + rect.width = Cx1 - Cx0; + rect.height = Cy1 - Cy0; + + return rect; +} + +/*! + * Compute the parameters to perform a (non-scaling) blit from client-specified + * input surface to the headSurface staging surface. + * + * \param[in] clipRect In source surface coordinate space. + * \param[in] viewPortIn In source surface coordinate space. + * \param[out] pSrcPoint In source surface coordinate space. + * \param[out] pDstPoint In dest surface coordinate space. + * \param[out] pSize Size of rect to blit. + * + * \return Return FALSE if clipRect and viewPortIn have an empty intersection. + * Otherwise, return TRUE and assign the [out] params. + */ +static NvBool Hs3dUpdateStagingSurfaceGetBlitParams( + const struct NvKmsRect clipRect, + const struct NvKmsRect viewPortIn, + struct NvKmsPoint *pSrcPoint, + struct NvKmsPoint *pDstPoint, + struct NvKmsSize *pSize) +{ + const struct NvKmsRect rect = Hs3dIntersectRects(clipRect, viewPortIn); + + if (rect.width == 0 || rect.height == 0) { + return FALSE; + } + + pSize->width = rect.width; + pSize->height = rect.height; + + pSrcPoint->x = rect.x; + pSrcPoint->y = rect.y; + + /* + * pDstPoint is in the staging surface, which is viewPortIn-sized; position + * pDstPoint relative to viewPortIn. + */ + + nvAssert(rect.x >= viewPortIn.x); + pDstPoint->x = rect.x - viewPortIn.x; + + nvAssert(rect.y >= viewPortIn.y); + pDstPoint->y = rect.y - viewPortIn.y; + + return TRUE; +} + +/*! + * If the staging surface is enabled, copy the content from the client-provided + * surfaces into the staging surface. + * + * \param[in,out] pHsChannel The channel to use for rendering. + * \param[in] pWorkArea The description of what surfaces to use for src + * and dst of the TWOD blits. + */ +static void Hs3dUpdateStagingSurface( + NVHsChannelEvoPtr pHsChannel, + const struct NvHs3dRenderFrameWorkArea *pWorkArea) +{ + NvU16 i, nClips = 0; + const struct NvKmsRect *pClipList = NULL; + + if (!pWorkArea->staging.enabled) { + return; + } + + nvAssert(pWorkArea->staging.dst.pHsSurface != NULL); + + if (pWorkArea->staging.src.pSurfaceEvo[NVKMS_MAIN_LAYER] == NULL) { + return; + } + + Hs3dUpdateStagingSurfaceGetClipList( + pHsChannel, + pWorkArea->staging.honorSwapGroupClipList, + &nClips, + &pClipList); + + Hs3dSetup2dBlit(pHsChannel, + pWorkArea->staging.src.pSurfaceEvo[NVKMS_MAIN_LAYER], + pWorkArea->staging.dst.pHsSurface); + + for (i = 0; i < nClips; i++) { + + struct NvKmsPoint srcPoint = { }; + struct NvKmsPoint dstPoint = { }; + struct NvKmsSize size = { }; + + if (!Hs3dUpdateStagingSurfaceGetBlitParams( + pClipList[i], pHsChannel->config.viewPortIn, + &srcPoint, &dstPoint, &size)) { + continue; + } + + Hs3d2dBlit(pHsChannel, srcPoint, dstPoint, size); + } +} + +/*! + * Render a headSurface frame. + * + * \param[in,out] pHsChannel The channel to use for rendering. + * \param[in] requestType The type of headSurface frame to render. + * \param[in] honorSwapGroupClipList + * Whether to clip the rendering against + * the SwapGroup's current clip list. + * \param[in] dstEye The NVKMS_{LEFT,RIGHT} to which we are + * rendering. + * \param[in] dstBufferIndex The index of the buffer to render into. + * \param[in] pixelShift The pixelShift configuration to use. + * \param[in] destRect The region of pDest to render into. + * \param[in] pSurfaceEvo The surfaces to read from. + * + * \return Return TRUE if there was a headSurface buffer to render into. + * Return FALSE if no headSurface buffer was present. + */ +NvBool nvHs3dRenderFrame( + NVHsChannelEvoPtr pHsChannel, + const NvHsNextFrameRequestType requestType, + const NvBool honorSwapGroupClipList, + const NvU8 dstEye, + const NvU8 dstBufferIndex, + const enum NvKmsPixelShiftMode pixelShift, + const struct NvKmsRect destRect, + const NVSurfaceEvoRec *pSurfaceEvo[NVKMS_MAX_LAYERS_PER_HEAD]) +{ + Nv3dChannelRec *p3d = &pHsChannel->nv3d.channel; + NvBool useOverlay; + const Nv3dVertexAttributeInfoRec attribs[] = { + NV3D_ATTRIB_ENTRY(POSITION, DYNAMIC, 2_32_FLOAT), + NV3D_ATTRIB_ENTRY(TEXCOORD0, DYNAMIC, 4_32_FLOAT), + NV3D_ATTRIB_END, + }; + + NvBool srcFiltering; + ProgramName fragmentProgram; + + Nv3dStreamSurfaceRec streamSurf; + NvPushChannelPtr p = &pHsChannel->nvPush.channel; + NvU32 op, vertexCount; + + int textures[NVIDIA_HEADSURFACE_UNIFORM_SAMPLER_BINDING_NUM] = { }; + + const NvU8 statisticsSlot = + Hs3dStatisticsGetSlot(pHsChannel, requestType, + dstBufferIndex, honorSwapGroupClipList); + + struct NvHs3dRenderFrameWorkArea workArea; + + if (!Hs3dAssignRenderFrameWorkArea(pHsChannel, + honorSwapGroupClipList, + dstEye, + dstBufferIndex, + pSurfaceEvo, &workArea)) { + return FALSE; + } + + useOverlay = (workArea.threeD.src.pSurfaceEvo[NVKMS_OVERLAY_LAYER] != NULL); + srcFiltering = + Hs3dGetSrcFiltering(&pHsChannel->config, pixelShift, useOverlay); + fragmentProgram = + Hs3dGetFragmentProgram(&pHsChannel->config, pixelShift, useOverlay); + + Hs3dStatisticsBefore(pHsChannel, dstEye, statisticsSlot); + + Hs3dUpdateStagingSurface(pHsChannel, &workArea); + + AssignTextureBindingIndices(pHsChannel, workArea.threeD.src.pSurfaceEvo, + textures); + + /* Set up sampler from source surfaces. */ + + AssignRenderTexInfo( + workArea.threeD.src.pSurfaceEvo[NVKMS_MAIN_LAYER], + FALSE /* normalizedCoords */, + srcFiltering, + &pHsChannel->nv3d.texInfo[NVKMS_HEADSURFACE_TEXINFO_SRC]); + + AssignRenderTexInfo( + pHsChannel->config.cursor.pSurfaceEvo, + FALSE /* normalizedCoords */, + srcFiltering, + &pHsChannel->nv3d.texInfo[NVKMS_HEADSURFACE_TEXINFO_CURSOR]); + + AssignRenderTexInfo( + workArea.threeD.src.pSurfaceEvo[NVKMS_OVERLAY_LAYER], + FALSE /* normalizedCoords */, + srcFiltering, + &pHsChannel->nv3d.texInfo[NVKMS_HEADSURFACE_TEXINFO_OVERLAY]); + + /* XXX NVKMS HEADSURFACE TODO: sampler from LUT */ + + /* Set up the source textures. */ + + nv3dLoadTextures(p3d, 0 /* first texture */, + pHsChannel->nv3d.texInfo, + ARRAY_LEN(pHsChannel->nv3d.texInfo)); + + nv3dBindTextures(p3d, fragmentProgram, textures); + + /* + * Set up the destination. + * + * Note: we rely on nvHs3dClearSurface() for setting up the color target and + * surface clip. + */ + nvHs3dClearSurface(pHsChannel, workArea.threeD.dst.pHsSurface, + destRect, pHsChannel->config.yuv420); + + HsSetWindowOffset(pHsChannel, destRect.x, destRect.y); + + /* Load the vertex shader. */ + nv3dLoadProgram(p3d, PROGRAM_NVIDIA_HEADSURFACE_VERTEX); + + /* Load the fragment shader. */ + nv3dLoadProgram(p3d, fragmentProgram); + + /* + * Load vertex and fragment program uniforms. + * + * XXX NVKMS HEADSURFACE TODO: the inputs that influence the program + * uniforms /could/ change from one frame to the next, but in the steady + * state they won't. Should we add tracking to only reload the program + * uniforms when their inputs change? + */ + + LoadFragmentProgramUniforms(pHsChannel, pixelShift, useOverlay, + workArea.threeD.src.viewPortPointIn); + + LoadVertexProgramUniforms(pHsChannel, workArea.threeD.src.viewPortPointIn); + + /* Get the mesh data to use for this frame. */ + + GetWarpMeshData(pHsChannel, &streamSurf, &op, &vertexCount); + + /* Draw the frame of headSurface using a vertex array. */ + + nv3dVasSetup(p3d, attribs, &streamSurf); + nv3dVasBegin(p3d, op); + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_VERTEX_ARRAY_START, 2); + nvPushSetMethodData(p, 0); + nvPushSetMethodData(p, vertexCount); + + nv3dVasEnd(p3d); + + Hs3dStatisticsAfter(pHsChannel, dstEye, statisticsSlot); + + nvPushKickoff(p); + + return TRUE; +} + +/*! + * Use the graphics channel to release the described semaphore. + * + * \param[in,out] pHsChannel The channel to use for the release. + * \param[in] pSurfaceEvo The semaphore surface. + * \param[in] nIsoFormat The NISO format of the surface. + * \param[in] offsetInWords The offset to the semaphore within the surface. + * \param[in] payload The payload to write to the semaphore. + * \param[in] allPreceedingReads Whether to wait for preceding + * reads or writes. + */ +void nvHs3dReleaseSemaphore( + NVHsChannelEvoPtr pHsChannel, + const NVSurfaceEvoRec *pSurfaceEvo, + const enum NvKmsNIsoFormat nIsoFormat, + const NvU16 offsetInWords, + const NvU32 payload, + const NvBool allPreceedingReads) +{ + NvPushChannelPtr p = &pHsChannel->nvPush.channel; + + const NvU32 payloadByteOffsetInSemaphore = + nvKmsSemaphorePayloadOffset(nIsoFormat) * 4; + + const NvU64 gpuAddress = + pSurfaceEvo->gpuAddress + + (offsetInWords * 4) + + payloadByteOffsetInSemaphore; + + const NvU32 afterAllPreceedingReadsOrWrites = + allPreceedingReads ? + NV3D_C(9097, SET_REPORT_SEMAPHORE_D, RELEASE, + AFTER_ALL_PRECEEDING_READS_COMPLETE) : + NV3D_C(9097, SET_REPORT_SEMAPHORE_D, RELEASE, + AFTER_ALL_PRECEEDING_WRITES_COMPLETE); + + const NvU32 operation = + NV3D_C(9097, SET_REPORT_SEMAPHORE_D, OPERATION, RELEASE) | + NV3D_C(9097, SET_REPORT_SEMAPHORE_D, STRUCTURE_SIZE, ONE_WORD) | + NV3D_C(9097, SET_REPORT_SEMAPHORE_D, FLUSH_DISABLE, TRUE) | + NV3D_C(9097, SET_REPORT_SEMAPHORE_D, PIPELINE_LOCATION, ALL) | + afterAllPreceedingReadsOrWrites; + + nvPushMethod(p, NVA06F_SUBCHANNEL_3D, NV9097_SET_REPORT_SEMAPHORE_A, 4); + nvPushSetMethodDataU64(p, gpuAddress); + nvPushSetMethodData(p, payload); + nvPushSetMethodData(p, operation); + + nvPushKickoff(p); +} diff --git a/src/nvidia-modeset/src/nvkms-headsurface-config.c b/src/nvidia-modeset/src/nvkms-headsurface-config.c new file mode 100644 index 0000000..56dde60 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-headsurface-config.c @@ -0,0 +1,2693 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-headsurface.h" +#include "nvkms-headsurface-priv.h" +#include "nvkms-headsurface-config.h" +#include "nvkms-headsurface-3d.h" +#include "nvkms-headsurface-matrix.h" +#include "nvkms-headsurface-swapgroup.h" +#include "nvkms-utils-flip.h" +#include "nvkms-flip.h" +#include "nvkms-utils.h" +#include "nvkms-surface.h" +#include "nvkms-private.h" +#include "nvkms-evo.h" +#include "nvkms-modeset.h" +#include "nvkms-stereo.h" +#include "nvkms-prealloc.h" +#include "nvidia-push-utils.h" /* nvPushIdleChannel() */ + +/*! + * Use warp and blend if any of the warp and blend surfaces were specified. + */ +static NvBool UsesWarpAndBlend( + const struct NvKmsSetModeOneHeadRequest *pRequestHead) +{ + return pRequestHead->headSurface.warpMesh.surfaceHandle != 0 || + pRequestHead->headSurface.blendTexSurfaceHandle != 0 || + pRequestHead->headSurface.offsetTexSurfaceHandle != 0; +} + +/* + * If 3D space, the identity matrix would be + * + * 1 0 0 + * 0 1 0 + * 0 0 1 + * + * but for 2D homogeneous coordinate space, any matrix with: + * + * n 0 0 + * 0 n 0 + * 0 0 n + * + * is an identity matrix. + */ +static NvBool Is2dHomogeneousIdentity(const struct NvKmsMatrix *m) +{ + return m->m[0][1] == 0 && + m->m[0][2] == 0 && + m->m[1][0] == 0 && + m->m[1][2] == 0 && + m->m[2][0] == 0 && + m->m[2][1] == 0 && + m->m[0][0] == m->m[1][1] && + m->m[1][1] == m->m[2][2]; +} + +/* + * A scaling transform is any where: + * + * a 0 0 + * 0 b 0 + * 0 0 c + */ +static NvBool IsScalingTransform(const struct NvKmsMatrix *m) +{ + return m->m[0][1] == 0 && + m->m[0][2] == 0 && + m->m[1][0] == 0 && + m->m[1][2] == 0 && + m->m[2][0] == 0 && + m->m[2][1] == 0; +} + +static inline NvBool StateNeedsHeadSurface(const NVHsConfigState state) +{ + return state == NVKMS_HEAD_SURFACE_CONFIG_STATE_PARTIAL_HEAD_SURFACE || + state == NVKMS_HEAD_SURFACE_CONFIG_STATE_FULL_HEAD_SURFACE; +} + +static inline void CopyHsStateOneHeadAllDisps( + NVHsStateOneHeadAllDisps *pDst, + const NVHsStateOneHeadAllDisps *pSrc) +{ + nvkms_memcpy(pDst, pSrc, sizeof(NVHsStateOneHeadAllDisps)); +} + +static inline void MoveHsStateOneHeadAllDisps( + NVHsStateOneHeadAllDisps *pDst, + NVHsStateOneHeadAllDisps *pSrc) +{ + CopyHsStateOneHeadAllDisps(pDst, pSrc); + nvkms_memset(pSrc, 0, sizeof(NVHsStateOneHeadAllDisps)); +} + +/*! + * Free the surfaces tracked in pHsOneHeadAllDisps. + * + * surfacesReused indicates that the NVHsSurfaceRecs were reused from the + * current configuration, and therefore should not actually be freed. If + * surfacesReused is TRUE, update the pHsOneHeadAllDisps structure, but do not + * free the NVHsSurfaceRecs. + * + * \param[in] pDevEvo The device. + * \param[in] pHsOneHeadAllDisps The structure tracking the surfaces. + * \param[in] surfacesReused Whether the surface was reused. + */ +static void HsConfigFreeHeadSurfaceSurfaces( + NVDevEvoRec *pDevEvo, + NVHsStateOneHeadAllDisps *pHsOneHeadAllDisps, + NvBool surfacesReused) +{ + int eye, buf; + + for (buf = 0; buf < pHsOneHeadAllDisps->surfaceCount; buf++) { + + /* + * If we get here, we expect that the headSurface surfaces are still + * allocated. But depending on the configuration, we may only have left + * surfaces, not right surfaces, so only assert for the left eye. + */ + nvAssert(pHsOneHeadAllDisps->surfaces[NVKMS_LEFT][buf].pSurface != + NULL); + + for (eye = NVKMS_LEFT; eye < NVKMS_MAX_EYES; eye++) { + if (!surfacesReused) { + nvHsFreeSurface( + pDevEvo, + pHsOneHeadAllDisps->surfaces[eye][buf].pSurface); + nvHsFreeSurface( + pDevEvo, + pHsOneHeadAllDisps->surfaces[eye][buf].pStagingSurface); + } + pHsOneHeadAllDisps->surfaces[eye][buf].pSurface = NULL; + pHsOneHeadAllDisps->surfaces[eye][buf].pStagingSurface = NULL; + } + } + pHsOneHeadAllDisps->surfaceCount = 0; +} + + +/*! + * Update the NVHsChannelConfig's surfaceSize. + */ +static void HsConfigUpdateOneHeadSurfaceSize( + NVHsChannelConfig *pChannelConfig) +{ + pChannelConfig->surfaceSize = pChannelConfig->frameSize; + + pChannelConfig->stagingSurfaceSize.width = 0; + pChannelConfig->stagingSurfaceSize.height = 0; + + /* + * When SwapGroup is enabled, we double the size of the surface and allocate + * staging surfaces. + * + * Note we double the height, not the width, for better cache locality: + * frames of headSurface will be rendered to either the top of bottom half + * of the surface. + */ + if (pChannelConfig->neededForSwapGroup) { + pChannelConfig->surfaceSize.height *= 2; + + pChannelConfig->stagingSurfaceSize.width = + pChannelConfig->viewPortIn.width; + pChannelConfig->stagingSurfaceSize.height = + pChannelConfig->viewPortIn.height; + } +} + + +/*! + * Update NVHsConfigOneHead + * + * Given the modetimings and NvKmsSetModeHeadSurfaceParams, update the + * NVHsConfigOneHead. If state is PARTIAL or FULL, compute the needed size of + * the headSurface surfaces. + * + * \param[in] state To what extent, if any, headSurface should + * be used. + * \param[in] pTimings The modetimings in use on the head. + * \param[in] p The requested configuration from the client. + * \param[in,out] pChannelConfig The headSurface channel config for the head. + */ +static NvBool HsConfigUpdateOneHead( + const NVHsConfigState state, + const NVHwModeTimingsEvo *pTimings, + const struct NvKmsSetModeHeadSurfaceParams *p, + NVHsChannelConfig *pChannelConfig) +{ + struct NvKmsSize size = { 0 }; + struct NvKmsRect viewPortOut = { 0 }; + + if (state == NVKMS_HEAD_SURFACE_CONFIG_STATE_PARTIAL_HEAD_SURFACE) { + /* + * If PARTIAL, the viewPortOut and surface will have the same size, and + * the viewPortOut will be positioned at the origin of the surface. + * Note that for double scan modes, the headSurface viewPortOut height + * will _not_ be doubled (the line doubling will be done using display + * hardware). + */ + viewPortOut = nvEvoViewPortOutClientView(pTimings); + viewPortOut.x = 0; + viewPortOut.y = 0; + size.width = viewPortOut.width; + size.height = viewPortOut.height; + + /* SW yuv420 modes should always be forced to FULL_HEAD_SURFACE. */ + nvAssert(pTimings->yuv420Mode != NV_YUV420_MODE_SW); + + } else if (state == NVKMS_HEAD_SURFACE_CONFIG_STATE_FULL_HEAD_SURFACE) { + /* + * Note that for both double scan and SW yuv420 modes, the headSurface + * viewPortOut will be adjusted relative to viewPortIn. In both cases + * viewPortOut will match the modetimings and headSurface, not the + * display hardware, will perform the needed scaling/conversion from + * viewPortIn to viewPortOut. + */ + viewPortOut = nvEvoViewPortOutHwView(pTimings); + size.width = nvEvoVisibleWidth(pTimings); + size.height = nvEvoVisibleHeight(pTimings); + } + + /* viewPortOut must fit within frameSize */ + nvAssert((viewPortOut.x + viewPortOut.width) <= size.width); + nvAssert((viewPortOut.y + viewPortOut.height) <= size.height); + + pChannelConfig->state = state; + + pChannelConfig->frameSize = size; + pChannelConfig->viewPortOut = viewPortOut; + + HsConfigUpdateOneHeadSurfaceSize(pChannelConfig); + + if (StateNeedsHeadSurface(state)) { + return nvHsAssignTransformMatrix(pChannelConfig, p); + } + + return TRUE; +} + +static NvBool HsConfigInitModesetOneHeadWarpAndBlendSurface( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const NvKmsSurfaceHandle handle, + NVSurfaceEvoPtr *ppSurface) +{ + if (handle == 0) { + *ppSurface = NULL; + return TRUE; + } + + *ppSurface = + nvEvoGetSurfaceFromHandleNoDispHWAccessOk(pDevEvo, + pOpenDevSurfaceHandles, + handle); + + return *ppSurface != NULL; +} + +/*! + * Initialize NVHsChannelConfig::warpMesh. + * + * If the client's request is invalid, return FALSE. + * Otherwise, assign NVHsChannelConfig::warpMesh appropriately and return TRUE. + * + * \param[in] pOpenDevSurfaceHandles The client's api handles structure. + * \param[in] p The client request structure. + * \param[out] pChannelConfig The channel configuration to be assigned. + */ +static NvBool HsConfigInitModesetWarpMesh( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const struct NvKmsSetModeHeadSurfaceParams *p, + NVHsChannelConfig *pChannelConfig) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo; + NVSurfaceEvoPtr pSurface; + NvU32 neededSize; + + if (p->warpMesh.surfaceHandle == 0) { + return TRUE; + } + + pSurface = + nvEvoGetSurfaceFromHandleNoDispHWAccessOk(pDevEvo, + pOpenDevSurfaceHandles, + p->warpMesh.surfaceHandle); + if (pSurface == NULL) { + return FALSE; + } + + switch (p->warpMesh.dataType) { + case NVKMS_WARP_MESH_DATA_TYPE_TRIANGLES_XYUVRQ: + if ((p->warpMesh.vertexCount % 3) != 0) { + return FALSE; + } + break; + case NVKMS_WARP_MESH_DATA_TYPE_TRIANGLE_STRIP_XYUVRQ: + break; + default: + return FALSE; + } + + if (p->warpMesh.vertexCount < 3) { + return FALSE; + } + + if (!NV_IS_ALIGNED(pSurface->widthInPixels, 1024)) { + return FALSE; + } + + pFormatInfo = nvKmsGetSurfaceMemoryFormatInfo(pSurface->format); + if (pFormatInfo->rgb.bytesPerPixel != 4) { + return FALSE; + } + + if (pSurface->layout != NvKmsSurfaceMemoryLayoutPitch) { + return FALSE; + } + + neededSize = p->warpMesh.vertexCount * sizeof(Nv3dFloat) * 6; + + if (neededSize > pSurface->planes[0].rmObjectSizeInBytes) { + return FALSE; + } + + pChannelConfig->warpMesh.pSurface = pSurface; + pChannelConfig->warpMesh.vertexCount = p->warpMesh.vertexCount; + pChannelConfig->warpMesh.dataType = p->warpMesh.dataType; + + return TRUE; +} + +/*! + * Initialize pHsConfigOneHead. + * + * This is called once by nvHsConfigInitModeset(), and therefore should only + * assign configuration parameters that do not change when NVHsConfigState + * changes. + * + * Configuration that changes based on NVHsConfigState should be handled by + * HsConfigUpdateOneHead(), so that it can be adjusted by nvHsConfigDowngrade() + * if necessary. + */ +static NvBool HsConfigInitModesetOneHead( + const NVDevEvoRec *pDevEvo, + NVHsConfigState state, + const NvU8 eyeMask, + const NvU32 apiHead, + const NvU32 dispIndex, + const NVHwModeTimingsEvo *pTimings, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsFlipCommonParams *pFlipParams, + const struct NvKmsSetModeHeadSurfaceParams *pHSParams, + const struct NvKmsPerOpenDev *pOpenDev, + NVHsConfigOneHead *pHsConfigOneHead) +{ + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDevConst(pOpenDev); + NVHsChannelConfig *pChannelConfig = &pHsConfigOneHead->channelConfig; + const NVDispEvoRec *pDispEvo = pDevEvo->pDispEvo[dispIndex]; + NvU32 layer; + + /* The passed-in state indicates whether modeset needs headSurface. */ + + pChannelConfig->neededForModeset = StateNeedsHeadSurface(state); + + /* + * If the current SwapGroup configuration needs headSurface, preserve that + * in pChannelConfig and override the state. Note we determine this by + * inspecting the SwapGroups on the device: it isn't sufficient to look at + * pDispEvo->pHsChannel[apiHead]->config.neededForSwapGroup because + * pDispEvo->pHsChannel[apiHead] will by NULL if the head was previously + * disabled and the new modeset is enabling the head. + */ + pChannelConfig->neededForSwapGroup = + nvHsSwapGroupIsHeadSurfaceNeeded(pDispEvo, apiHead); + + if (pDispEvo->pHsChannel[apiHead] != NULL) { + nvAssert(pDispEvo->pHsChannel[apiHead]->config.neededForSwapGroup == + pChannelConfig->neededForSwapGroup); + } + + /* + * If headSurface is needed for SwapGroup, make sure the state is at least + * PARTIAL. + */ + if (pChannelConfig->neededForSwapGroup && !StateNeedsHeadSurface(state)) { + state = NVKMS_HEAD_SURFACE_CONFIG_STATE_PARTIAL_HEAD_SURFACE; + } + + if (state == NVKMS_HEAD_SURFACE_CONFIG_STATE_NO_HEAD_SURFACE) { + nvkms_memset(pHsConfigOneHead, 0, sizeof(*pHsConfigOneHead)); + return TRUE; + } + + pChannelConfig->eyeMask = eyeMask; + + /* + * XXX NVKMS HEADSURFACE TODO: Update viewPortIn.[xy] for panning updates. + */ + if (pFlipParams->viewPortIn.specified) { + pChannelConfig->viewPortIn.x = pFlipParams->viewPortIn.point.x; + pChannelConfig->viewPortIn.y = pFlipParams->viewPortIn.point.y; + } else { + pChannelConfig->viewPortIn.x = 0; + pChannelConfig->viewPortIn.y = 0; + } + + pChannelConfig->viewPortIn.width = pViewPortSizeIn->width; + pChannelConfig->viewPortIn.height = pViewPortSizeIn->height; + + pChannelConfig->yuv420 = (pTimings->yuv420Mode == NV_YUV420_MODE_SW); + + pChannelConfig->blendAfterWarp = pHSParams->blendAfterWarp; + pChannelConfig->pixelShift = pHSParams->pixelShift; + pChannelConfig->resamplingMethod = pHSParams->resamplingMethod; + + if (!HsConfigInitModesetOneHeadWarpAndBlendSurface( + pDevEvo, + pOpenDevSurfaceHandles, + pHSParams->blendTexSurfaceHandle, + &pChannelConfig->pBlendTexSurface)) { + return FALSE; + } + + if (!HsConfigInitModesetOneHeadWarpAndBlendSurface( + pDevEvo, + pOpenDevSurfaceHandles, + pHSParams->offsetTexSurfaceHandle, + &pChannelConfig->pOffsetTexSurface)) { + return FALSE; + } + + if (!HsConfigInitModesetWarpMesh(pDevEvo, + pOpenDevSurfaceHandles, + pHSParams, + pChannelConfig)) { + return FALSE; + } + + /* + * Modeset does not inherit the old flip state, therefore + * make sure to clear the surfaces if the client hasn't specified + * new surfaces. + */ + for (layer = 0; layer < ARRAY_LEN(pFlipParams->layer); layer++) { + if (pFlipParams->layer[layer].surface.specified) { + NvBool ret = nvAssignSurfaceArray(pDevEvo, + pOpenDevSurfaceHandles, + pFlipParams->layer[layer].surface.handle, + FALSE /* isUsedByCursorChannel */, + TRUE /* isUsedByLayerChannel */, + pHsConfigOneHead->layer[layer].pSurfaceEvo); + if (!ret) { + return FALSE; + } + } else { + nvkms_memset(pHsConfigOneHead->layer[layer].pSurfaceEvo, + 0, + sizeof(pHsConfigOneHead->layer[layer].pSurfaceEvo)); + } + } + + /* XXX make cursor stereo-aware */ + if (pFlipParams->cursor.imageSpecified) { + if (!nvAssignCursorSurface(pOpenDev, pDevEvo, &pFlipParams->cursor.image, + &pChannelConfig->cursor.pSurfaceEvo)) { + return FALSE; + } + } else { + pChannelConfig->cursor.pSurfaceEvo = NULL; + } + + if (pFlipParams->cursor.positionSpecified) { + pChannelConfig->cursor.x = pFlipParams->cursor.position.x; + pChannelConfig->cursor.y = pFlipParams->cursor.position.y; + } else { + pChannelConfig->cursor.x = 0; + pChannelConfig->cursor.y = 0; + } + + { + const NVSurfaceEvoRec *pMainSurfaceEvo = + pHsConfigOneHead->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT]; + + if ((pMainSurfaceEvo != NULL) && + ((pMainSurfaceEvo->format == + NvKmsSurfaceMemoryFormatA2B10G10R10) || + (pMainSurfaceEvo->format == + NvKmsSurfaceMemoryFormatX2B10G10R10))) { + pChannelConfig->hs10bpcHint = TRUE; + } else { + pChannelConfig->hs10bpcHint = FALSE; + } + } + + if (!HsConfigUpdateOneHead(state, pTimings, pHSParams, pChannelConfig)) { + return FALSE; + } + + return TRUE; +} + +/*! + * Validate the requested headSurface configuration. + * + * If the configuration is valid return TRUE. + * Otherwise, assign pReplyHead->status and return FALSE. + * + * The general rules for when to use which status: + * + * INVALID_HEAD_SURFACE is reported for bad API usage; e.g., things such as + * unrecognized enum values. + * + * UNSUPPORTED_HEAD_SURFACE_COMBO is reported when headSurface does not support + * the requested combination of feaures. + * + * UNSUPPORTED_HEAD_SURFACE_FEATURE is reported if the requested configuration + * is not supported on the current GPU (Quadro checks, and similar). + */ +static NvBool HsConfigValidate( + const NVDevEvoRec *pDevEvo, + const struct NvKmsSetModeOneHeadRequest *pRequestHead, + struct NvKmsSetModeOneHeadReply *pReplyHead) +{ + const NvModeTimings *pModeTimings = &pRequestHead->mode.timings; + const struct NvKmsSetModeHeadSurfaceParams *p = &pRequestHead->headSurface; + + /* + * Validate that the requested rotation is a recognized value. + */ + switch (p->rotation) { + case NVKMS_ROTATION_0: + case NVKMS_ROTATION_90: + case NVKMS_ROTATION_180: + case NVKMS_ROTATION_270: + break; + default: + goto failInvalid; + } + + /* + * Check warp&blend feature compatibility. + */ + if (UsesWarpAndBlend(pRequestHead)) { + if (pModeTimings->yuv420Mode == NV_YUV420_MODE_SW) { + goto failUnsupportedCombo; + } + } + + /* + * Validate that the requested pixelShift is a recognized value. + */ + switch (p->pixelShift) { + case NVKMS_PIXEL_SHIFT_NONE: + case NVKMS_PIXEL_SHIFT_4K_TOP_LEFT: + case NVKMS_PIXEL_SHIFT_4K_BOTTOM_RIGHT: + case NVKMS_PIXEL_SHIFT_8K: + break; + default: + goto failInvalid; + } + + /* + * Check pixelShift feature compatibility. + */ + if (p->pixelShift != NVKMS_PIXEL_SHIFT_NONE) { + if (UsesWarpAndBlend(pRequestHead)) { + goto failUnsupportedCombo; + } + + if (pModeTimings->yuv420Mode == NV_YUV420_MODE_SW) { + goto failUnsupportedCombo; + } + } + + + /* + * Validate that the requested resamplingMethod is a recognized value. + */ + + switch (p->resamplingMethod) { + case NVKMS_RESAMPLING_METHOD_BILINEAR: + case NVKMS_RESAMPLING_METHOD_BICUBIC_TRIANGULAR: + case NVKMS_RESAMPLING_METHOD_BICUBIC_BELL_SHAPED: + case NVKMS_RESAMPLING_METHOD_BICUBIC_BSPLINE: + case NVKMS_RESAMPLING_METHOD_BICUBIC_ADAPTIVE_TRIANGULAR: + case NVKMS_RESAMPLING_METHOD_BICUBIC_ADAPTIVE_BELL_SHAPED: + case NVKMS_RESAMPLING_METHOD_BICUBIC_ADAPTIVE_BSPLINE: + case NVKMS_RESAMPLING_METHOD_NEAREST: + break; + default: + goto failInvalid; + } + + /* + * Check resamplingMethod feature compatibility. + * + * The overlay, yuv420, and pixelShift headsurface shaders all texture + * from pixel centers, relying on non-filtered input, and perform + * bilinear filtering manually in the shader. Reject combinations + * of these modes with non-bilinear filtering modes. + */ + if (p->resamplingMethod != NVKMS_RESAMPLING_METHOD_BILINEAR) { + + if (p->pixelShift != NVKMS_PIXEL_SHIFT_NONE) { + goto failUnsupportedCombo; + } + + if (pModeTimings->yuv420Mode == NV_YUV420_MODE_SW) { + goto failUnsupportedCombo; + } + + if (p->fakeOverlay) { + goto failUnsupportedCombo; + } + } + + /* + * PixelShift8k hijacks stereo; prohibit PixelShift8k with real stereo. + */ + if ((pRequestHead->headSurface.pixelShift == NVKMS_PIXEL_SHIFT_8K) && + (pRequestHead->modeValidationParams.stereoMode != + NVKMS_STEREO_DISABLED)) { + goto failUnsupportedCombo; + } + + return TRUE; + +failInvalid: + + pReplyHead->status = NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_HEAD_SURFACE; + return FALSE; + +failUnsupportedCombo: + + pReplyHead->status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_UNSUPPORTED_HEAD_SURFACE_COMBO; + return FALSE; +} + +/*! + * Copy the configuration described in an NVHsChannelEvoRec to an + * NVHsConfigOneHead. + * + * When changing the headSurface configuration, we build an NVHsConfig, which + * describes the headSurface configuration across all heads on the device. + * + * But, the requested configuration may only specify a subset of heads, and the + * rest of the heads are expected to preserve their current configuration. + * + * Use this function to copy the configuration of an existing NVHsChannelEvoRec + * to an NVHsConfigOneHead. + */ +static void HsConfigCopyHsChannelToHsConfig( + NVHsConfigOneHead *pHsConfigOneHead, + const NVHsChannelEvoRec *pHsChannel) +{ + NvU8 eye, layer; + + pHsConfigOneHead->channelConfig = pHsChannel->config; + + /* + * Initialize the surfaces to be used by pHsConfigOneHead. We use the + * surfaces most recently pushed on the flip queue. NVKMS should drain the + * current flip queue as part of applying a new headSurface configuration. + * If the flip queue is already drained, use the surfaces in the flipqueue's + * 'current', + * + * Note that we do not need to update any reference counts here: that is + * done when the hsConfig is applied to the device by nvHsConfigApply(). + * And, in that path refcnts are always incremented for the new surfaces + * before refcnts are decremented for the old surfaces. + */ + + /* both structures have the same number of layers */ + ct_assert(ARRAY_LEN(pHsConfigOneHead->layer) == + ARRAY_LEN(pHsChannel->flipQueue)); + + for (layer = 0; layer < ARRAY_LEN(pHsConfigOneHead->layer); layer++) { + + const NVHsLayerRequestedFlipState *pFlipState = + HsGetLastFlipQueueEntry(pHsChannel, layer); + + /* both structures have the same number of eyes */ + ct_assert(ARRAY_LEN(pHsConfigOneHead->layer[layer].pSurfaceEvo) == + ARRAY_LEN(pFlipState->pSurfaceEvo)); + + for (eye = NVKMS_LEFT; eye < NVKMS_MAX_EYES; eye++) { + pHsConfigOneHead->layer[layer].pSurfaceEvo[eye] = + pFlipState->pSurfaceEvo[eye]; + } + } +} + +/*! + * Return whether the NvKmsSetModeRequest requests a change on the disp+head. + */ +static NvBool HsConfigHeadRequested( + const struct NvKmsSetModeRequest *pRequest, + const NvU32 dispIndex, + const NvU32 apiHead) +{ + if ((pRequest->requestedDispsBitMask & NVBIT(dispIndex)) == 0) { + return FALSE; + } + + if ((pRequest->disp[dispIndex].requestedHeadsBitMask & NVBIT(apiHead)) == 0) { + return FALSE; + } + + return TRUE; +} + +/*! + * Get the initial NVHsConfig for the configuration. + * + * For each head that will be active, there are four possible states, + * described by NVHsConfigState. + * + * Construct an optimistic proposed headSurface configuration, + * setting the most hardware-dependent state per head. + * + * If the modeset fails with that configuration, the caller will "downgrade" the + * configuration via nvHsConfigDowngrade() and try again. + * + * \param[in] pDevEvo The device. + * \param[in] pRequest The requested configuration from the client. + * \param[out] pReply The modeset reply structure. + * \param[in] pOpenDev The per-open device data for the client. + * \param[out] pHsConfig The NVHsConfig to populate. + * + * Return FALSE if the requested configuration is not possible, and assign + * status fields in pReply as appropriate. + */ +NvBool nvHsConfigInitModeset( + NVDevEvoRec *pDevEvo, + const struct NvKmsSetModeRequest *pRequest, + struct NvKmsSetModeReply *pReply, + const struct NvKmsPerOpenDev *pOpenDev, + NVHsConfig *pHsConfig) +{ + NvU32 dispIndex, apiHead; + NVDispEvoPtr pDispEvo; + NvBool ret; + + nvkms_memset(pHsConfig, 0, sizeof(*pHsConfig)); + + /* + * Cache the 'commit' flag, to decide later whether to actually allocate + * resources for this pHsConfig. + */ + pHsConfig->commit = pRequest->commit; + + if (!nvGetAllowHeadSurfaceInNvKms(pDevEvo, pOpenDev, pRequest)) { + return TRUE; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + const struct NvKmsSetModeOneHeadRequest *pRequestHead = + &pRequest->disp[dispIndex].head[apiHead]; + struct NvKmsSetModeOneHeadReply *pReplyHead = + &pReply->disp[dispIndex].head[apiHead]; + NVHwModeTimingsEvo *pTimings; + + NvU8 eyeMask = NVBIT(NVKMS_LEFT); + NVHsConfigState state = + NVKMS_HEAD_SURFACE_CONFIG_STATE_NO_HEAD_SURFACE; + + const NvBool is2dHomogeneousIdentity = + !pRequestHead->headSurface.transformSpecified || + Is2dHomogeneousIdentity(&pRequestHead->headSurface.transform); + + /* Skip this head if it is not specified in the request. */ + + if (!HsConfigHeadRequested(pRequest, dispIndex, apiHead)) { + + /* + * If the head is not specified in the new request, but + * currently has a headSurface configuration, propagate that + * configuration to the new pHsConfig. + */ + const NVHsChannelEvoRec *pHsChannel = + pDispEvo->pHsChannel[apiHead]; + + if (pHsChannel != NULL) { + HsConfigCopyHsChannelToHsConfig( + &pHsConfig->apiHead[dispIndex][apiHead], pHsChannel); + } + + continue; + } + + /* Skip this head if it is not driving any dpys. */ + + if (nvDpyIdListIsEmpty(pRequestHead->dpyIdList)) { + continue; + } + + if (!HsConfigValidate(pDevEvo, pRequestHead, pReplyHead)) { + return FALSE; + } + + pTimings = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_HS_INIT_CONFIG_HW_TIMINGS, + sizeof(*pTimings)); + nvkms_memset(pTimings, 0, sizeof(*pTimings)); + + if (!nvGetHwModeTimings(pDispEvo, apiHead, pRequestHead, + pTimings, NULL /* pDpyColor */, + NULL /* pInfoFrameCtrl */)) { + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_HS_INIT_CONFIG_HW_TIMINGS); + return FALSE; + } + + if (pRequestHead->headSurface.pixelShift != + NVKMS_PIXEL_SHIFT_NONE) { + + if (pRequestHead->headSurface.pixelShift == + NVKMS_PIXEL_SHIFT_8K) { + eyeMask |= NVBIT(NVKMS_RIGHT); + } + + state = NVKMS_HEAD_SURFACE_CONFIG_STATE_FULL_HEAD_SURFACE; + goto done; + } + + /* + * If any of the stereo modes were requested, we'll need NVKMS_RIGHT + * surfaces if we enable headSurface. + */ + if (pRequestHead->modeValidationParams.stereoMode != + NVKMS_STEREO_DISABLED) { + eyeMask |= NVBIT(NVKMS_RIGHT); + } + + if (pTimings->yuv420Mode == NV_YUV420_MODE_SW) { + state = NVKMS_HEAD_SURFACE_CONFIG_STATE_FULL_HEAD_SURFACE; + goto done; + } + + /* + * XXX NVKMS HEADSURFACE TODO: should resamplingMethod only apply if + * there is viewport scaling? + */ + if (pRequestHead->headSurface.resamplingMethod != + NVKMS_RESAMPLING_METHOD_DEFAULT) { + state = NVKMS_HEAD_SURFACE_CONFIG_STATE_FULL_HEAD_SURFACE; + goto done; + } + + if (pRequestHead->headSurface.forceFullCompositionPipeline) { + state = NVKMS_HEAD_SURFACE_CONFIG_STATE_FULL_HEAD_SURFACE; + goto done; + } + + if (pRequestHead->headSurface.forceCompositionPipeline) { + state = NVKMS_HEAD_SURFACE_CONFIG_STATE_PARTIAL_HEAD_SURFACE; + goto done; + } + + /* + * If Warp & Blend is attempted, transforms are bypassed. We want + * headSurface buffers no matter what. + */ + if (UsesWarpAndBlend(pRequestHead)) { + state = NVKMS_HEAD_SURFACE_CONFIG_STATE_PARTIAL_HEAD_SURFACE; + goto done; + } + + if (pRequestHead->headSurface.fakeOverlay) { + state = NVKMS_HEAD_SURFACE_CONFIG_STATE_PARTIAL_HEAD_SURFACE; + goto done; + } + + if (pRequestHead->headSurface.rotation != NVKMS_ROTATION_0) { + state = NVKMS_HEAD_SURFACE_CONFIG_STATE_PARTIAL_HEAD_SURFACE; + goto done; + } + + if (pRequestHead->headSurface.reflectionX) { + state = NVKMS_HEAD_SURFACE_CONFIG_STATE_PARTIAL_HEAD_SURFACE; + goto done; + } + + if (pRequestHead->headSurface.reflectionY) { + state = NVKMS_HEAD_SURFACE_CONFIG_STATE_PARTIAL_HEAD_SURFACE; + goto done; + } + + if (is2dHomogeneousIdentity) { + const NvU16 hVisible = nvEvoVisibleWidth(pTimings); + const NvU16 vVisible = nvEvoVisibleHeight(pTimings); + + if (pTimings->viewPort.out.width != pTimings->viewPort.in.width || + pTimings->viewPort.out.height != pTimings->viewPort.in.height) { + /* + * If ViewPortIn is not the size of ViewPortOut, we might need + * headSurface to scale. + */ + state = NVKMS_HEAD_SURFACE_CONFIG_STATE_MAYBE_HEAD_SURFACE; + } else if ((pTimings->viewPort.out.xAdjust != 0) || + (pTimings->viewPort.out.yAdjust != 0) || + (pTimings->viewPort.out.width != hVisible) || + (pTimings->viewPort.out.height != vVisible)) { + /* + * If ViewPortOut is not the size of raster, we might need + * headSurface to position ViewPortOut. + */ + state = NVKMS_HEAD_SURFACE_CONFIG_STATE_MAYBE_HEAD_SURFACE; + + } else { + /* + * If this is an identity transform, ViewPortIn is the + * same as ViewPortOut, and ViewPortOut is the same as + * raster, we don't need headSurface. + */ + state = NVKMS_HEAD_SURFACE_CONFIG_STATE_NO_HEAD_SURFACE; + + } + goto done; + } + + /* + * A scaling transformation might be possible with display hardware. + */ + if (pRequestHead->headSurface.transformSpecified && + IsScalingTransform(&pRequestHead->headSurface.transform)) { + state = NVKMS_HEAD_SURFACE_CONFIG_STATE_MAYBE_HEAD_SURFACE; + goto done; + } + + /* + * Otherwise, the transformation is more complicated: fall back to + * headSurface. + */ + + state = NVKMS_HEAD_SURFACE_CONFIG_STATE_PARTIAL_HEAD_SURFACE; +done: + ret = HsConfigInitModesetOneHead(pDevEvo, + state, + eyeMask, + apiHead, + dispIndex, + pTimings, + &pRequestHead->viewPortSizeIn, + &pRequestHead->flip, + &pRequestHead->headSurface, + pOpenDev, + &pHsConfig->apiHead[dispIndex][apiHead]); + + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_HS_INIT_CONFIG_HW_TIMINGS); + + if (!ret) { + pReplyHead->status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_HEAD_SURFACE; + return FALSE; + } + } + } + + return TRUE; +} + + +/*! + * Initialize pHsConfigOneHead for SwapGroup. + * + * In the case that headSurface is not needed for modeset, but is needed for + * SwapGroup, initialize the given pHsConfigOneHead. + * + * This should parallel HsConfigInitModesetOneHead() + */ +static void HsConfigInitSwapGroupOneHead( + const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + NVHsConfigOneHead *pHsConfigOneHead) +{ + static const struct NvKmsSetModeHeadSurfaceParams + nullHeadSurfaceParams = { }; + + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + const NVHwModeTimingsEvo *pTimings = &pApiHeadState->timings; + const NVHsConfigState state = + NVKMS_HEAD_SURFACE_CONFIG_STATE_PARTIAL_HEAD_SURFACE; + NVHsChannelConfig *pChannelConfig = &pHsConfigOneHead->channelConfig; + NvU32 layer; + + pChannelConfig->eyeMask = NVBIT(NVKMS_LEFT); + + if (pApiHeadState->stereo.mode != NVKMS_STEREO_DISABLED) { + pChannelConfig->eyeMask |= NVBIT(NVKMS_RIGHT); + } + + pChannelConfig->viewPortIn.x = pApiHeadState->viewPortPointIn.x; + pChannelConfig->viewPortIn.y = pApiHeadState->viewPortPointIn.y; + + pChannelConfig->viewPortIn.width = pTimings->viewPort.in.width; + pChannelConfig->viewPortIn.height = pTimings->viewPort.in.height; + + pChannelConfig->hs10bpcHint = pApiHeadState->hs10bpcHint; + + for (layer = 0; layer < pDevEvo->apiHead[apiHead].numLayers; layer++) { + nvApiHeadGetLayerSurfaceArray(pDispEvo, apiHead, layer, + pHsConfigOneHead->layer[layer].pSurfaceEvo); + } + + nvApiHeadGetCursorInfo(pDispEvo, apiHead, + &pChannelConfig->cursor.pSurfaceEvo, + &pChannelConfig->cursor.x, + &pChannelConfig->cursor.y); + + HsConfigUpdateOneHead(state, pTimings, + &nullHeadSurfaceParams, pChannelConfig); +} + + +/*! + * Initial NVHsConfig, applying neededForSwapGroup for the given pSwapGroup to + * the current headSurface configuration. + */ +void nvHsConfigInitSwapGroup( + const NVDevEvoRec *pDevEvo, + const NVSwapGroupRec *pSwapGroup, + const NvBool neededForSwapGroup, + NVHsConfig *pHsConfig) +{ + NvU32 dispIndex, apiHead; + NVDispEvoPtr pDispEvo; + + nvkms_memset(pHsConfig, 0, sizeof(*pHsConfig)); + + pHsConfig->commit = TRUE; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + for (apiHead = 0; apiHead < pDevEvo->numHeads; apiHead++) { + NvBool neededForModeset = FALSE; + + const NVHsChannelEvoRec *pHsChannel = + pDispEvo->pHsChannel[apiHead]; + + NVHsConfigOneHead *pHsConfigOneHead = + &pHsConfig->apiHead[dispIndex][apiHead]; + NVHsChannelConfig *pChannelConfig = &pHsConfigOneHead->channelConfig; + + /* + * The console surface may not be set up to be the source of + * headSurface operations, and NVKMS may be unloaded, so we can't + * have the display rely on headSurface. + */ + if (nvEvoIsConsoleActive(pDevEvo)) { + continue; + } + + if (!nvApiHeadIsActive(pDispEvo, apiHead)) { + continue; + } + + /* + * If the head currently has a headSurface configuration, propagate + * that to the new pHsConfig. + */ + if (pHsChannel != NULL) { + HsConfigCopyHsChannelToHsConfig(pHsConfigOneHead, pHsChannel); + } + + /* If this head is not part of the SwapGroup, don't change it. */ + + if (pDispEvo->pSwapGroup[apiHead] != pSwapGroup) { + continue; + } + + neededForModeset = pHsConfigOneHead->channelConfig.neededForModeset; + + pChannelConfig->neededForSwapGroup = neededForSwapGroup; + + /* + * neededForModeset describes whether the current headSurface + * configuration is enabled due to the modeset. + * + * neededForSwapGroup describes whether the new headSurface + * configuration needs to be enabled due to enabling a SwapGroup. + * + * Update pHsConfigOneHead for the given combination of + * neededForModeset + neededForSwapGroup. + */ + + if (neededForModeset) { + + /* + * HeadSurface is already enabled (it is needed for modeset), + * and we are toggling the neededForSwapGroup field. + * + * We should already have a pHsChannel. + */ + nvAssert(pHsChannel != NULL); + + /* + * neededForSwapGroup impacts the computation of + * pChannelConfig->surfaceSize, so recompute that now. + */ + HsConfigUpdateOneHeadSurfaceSize(pChannelConfig); + + } else { + + if (neededForSwapGroup) { + + /* + * HeadSurface is not needed in the current configuration + * for modeset, but now it is needed for SwapGroup. + * + * We don't yet have a pHsChannel. + * + * We need to initialize pHsConfigOneHead, similar to what + * HsConfigInitModesetOneHead() does. + */ + nvAssert(pHsChannel == NULL); + + HsConfigInitSwapGroupOneHead(pDispEvo, apiHead, + pHsConfigOneHead); + + } else { + + /* + * We have headSurface currently enabled. However, it is + * not needed for modeset, and now it isn't needed for + * SwapGroup. + * + * Clear the pHsConfigOneHead. + */ + + nvAssert(pHsChannel != NULL); + + nvkms_memset(pHsConfigOneHead, 0, sizeof(*pHsConfigOneHead)); + } + } + } + } +} + + +/*! + * "Downgrade" the NVHsConfig. + * + * The caller unsuccessfully attempted a modeset with the given NVHsConfig, + * where the heads with state==MAYBE_HEAD_SURFACE used the display hardware + * instead of headSurface, and the heads with state==PARTIAL_HEAD_SURFACE used + * display hardware for ViewPortOut ==> Raster scaling. + * + * Demote one of the heads along the path MAYBE_HEAD_SURFACE -> + * PARTIAL_HEAD_SURFACE -> FULL_HEAD_SURFACE, so that the caller can try the + * modeset again. + * + * \param[in] pDevEvo The device. + * \param[in] pRequest The requested configuration from the client. + * \param[in,out] pHsConfig The NVHsConfig to downgrade. + * + * \return TRUE if a head could be downgraded. FALSE if there are no + * more heads to downgrade. + */ +NvBool nvHsConfigDowngrade( + NVDevEvoRec *pDevEvo, + const struct NvKmsSetModeRequest *pRequest, + NVHsConfig *pHsConfig) +{ + NvU32 dispIndex, apiHead, try; + NVDispEvoPtr pDispEvo; + + for (try = 0; try < 2; try++) { + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + const struct NvKmsSetModeOneHeadRequest *pRequestHead = + &pRequest->disp[dispIndex].head[apiHead]; + NVHsChannelConfig *pChannelConfig = + &pHsConfig->apiHead[dispIndex][apiHead].channelConfig; + + NVHsConfigState state = + NVKMS_HEAD_SURFACE_CONFIG_STATE_NO_HEAD_SURFACE; + + if (!HsConfigHeadRequested(pRequest, dispIndex, apiHead)) { + continue; + } + + /* Skip this head if it is not driving any dpys. */ + + if (nvDpyIdListIsEmpty(pRequestHead->dpyIdList)) { + continue; + } + + /* + * On the first try, downgrade from + * MAYBE_HEAD_SURFACE to PARTIAL_HEAD_SURFACE. + */ + if ((try == 0) && + (pChannelConfig->state == + NVKMS_HEAD_SURFACE_CONFIG_STATE_MAYBE_HEAD_SURFACE)) { + + state = NVKMS_HEAD_SURFACE_CONFIG_STATE_PARTIAL_HEAD_SURFACE; + } + + /* + * On the second try, downgrade from + * PARTIAL_HEAD_SURFACE to FULL_HEAD_SURFACE. + */ + if ((try == 1) && + (pChannelConfig->state == + NVKMS_HEAD_SURFACE_CONFIG_STATE_PARTIAL_HEAD_SURFACE)) { + + state = NVKMS_HEAD_SURFACE_CONFIG_STATE_FULL_HEAD_SURFACE; + } + + if (state != NVKMS_HEAD_SURFACE_CONFIG_STATE_NO_HEAD_SURFACE) { + NvU32 ret; + const struct NvKmsSetModeHeadSurfaceParams *p = + &pRequest->disp[dispIndex].head[apiHead].headSurface; + NVHwModeTimingsEvo *pTimings = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_HS_INIT_CONFIG_HW_TIMINGS, + sizeof(*pTimings)); + nvkms_memset(pTimings, 0, sizeof(*pTimings)); + + if (!nvGetHwModeTimings(pDispEvo, apiHead, pRequestHead, + pTimings, NULL /* pDpyColor */, + NULL /* pInfoFrameCtrl */)) { + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_HS_INIT_CONFIG_HW_TIMINGS); + return FALSE; + } + + ret = HsConfigUpdateOneHead(state, + pTimings, + p, + pChannelConfig); + + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_HS_INIT_CONFIG_HW_TIMINGS); + + return ret; + } + } + } + } + + return FALSE; +} + +/*! + * Return whether the given pDevEvoHsConfig satisfies the specified eyeMask. + */ +static NvBool HsConfigEyeMasksMatch( + const NVHsStateOneHeadAllDisps *pDevEvoHsConfig, + const NvU8 eyeMask) +{ + NvU8 eye, buf; + + for (eye = NVKMS_LEFT; eye < NVKMS_MAX_EYES; eye++) { + + if ((NVBIT(eye) & eyeMask) == 0) { + continue; + } + + for (buf = 0; buf < pDevEvoHsConfig->surfaceCount; buf++) { + if (pDevEvoHsConfig->surfaces[eye][buf].pSurface == NULL) { + return FALSE; + } + } + } + + return TRUE; +} + + +/*! + * Return an NvKmsSize that is the maximum of sizeA and sizeB in each dimension. + */ +static inline struct NvKmsSize HsConfigGetMaxNvKmsSize( + struct NvKmsSize sizeA, + struct NvKmsSize sizeB) +{ + struct NvKmsSize maxSize; + + maxSize.width = NV_MAX(sizeA.width, sizeB.width); + maxSize.height = NV_MAX(sizeA.height, sizeB.height); + + return maxSize; +} + +/*! + * Reconcile hs10bpcHint across multiple disps. + * + * If any disp is X2B10G10R10, use X2B10G10R10. Otherwise, use A8R8G8B8. + */ +static enum NvKmsSurfaceMemoryFormat HsConfigGetMaxFormat( + enum NvKmsSurfaceMemoryFormat prevFormat, + NvBool hs10bpcHint) +{ + /* + * prevFormat is initialized to 0; it should not collide with X2B10G10R10 or + * A8R8G8B8 + */ + ct_assert(NvKmsSurfaceMemoryFormatX2B10G10R10 != 0); + ct_assert(NvKmsSurfaceMemoryFormatA8R8G8B8 != 0); + + if (prevFormat == NvKmsSurfaceMemoryFormatX2B10G10R10) { + return prevFormat; + } + + return hs10bpcHint ? + NvKmsSurfaceMemoryFormatX2B10G10R10 : NvKmsSurfaceMemoryFormatA8R8G8B8; +} + +/*! + * Return whether sizeA is greater than or equal to sizeB in both dimensions. + */ +static inline NvBool HsConfigNvKmsSizeIsGreaterOrEqual( + struct NvKmsSize sizeA, + struct NvKmsSize sizeB) +{ + return (sizeA.width >= sizeB.width) && (sizeA.height >= sizeB.height); +} + + +typedef struct _NVHsConfigAllocResourcesWorkArea { + NvBool needsHeadSurface; + NvBool neededForSwapGroup; + NvU8 eyeMask; + struct NvKmsSize headSurfaceSize; + struct NvKmsSize headSurfaceStagingSize; + enum NvKmsSurfaceMemoryFormat format; +} NVHsConfigAllocResourcesWorkArea; + + +/*! + * Allocate an NVHsSurfaceRec and clear its memory on all subdevices. + * + * \return NULL if there was an allocation failure. Otherwise, return a + * pointer to the allocated NVHsSurfaceRec. + */ +static NVHsSurfacePtr HsConfigAllocSurfacesOneSurface( + NVDevEvoRec *pDevEvo, + const NVHsConfig *pHsConfig, + const NvU32 apiHead, + const NvBool displayHardwareAccess, + struct NvKmsSize surfaceSize, + const enum NvKmsSurfaceMemoryFormat format) +{ + NvU32 dispIndex; + NVDispEvoPtr pDispEvo; + NVHsSurfacePtr pSurface; + + pSurface = nvHsAllocSurface(pDevEvo, + displayHardwareAccess, + format, + surfaceSize.width, + surfaceSize.height); + if (pSurface == NULL) { + return NULL; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + const NVHsConfigOneHead *pHsConfigOneHead = + &pHsConfig->apiHead[dispIndex][apiHead]; + NVHsChannelEvoRec *pHsChannel = pHsConfigOneHead->pHsChannel; + + if (pHsChannel != NULL) { + const struct NvKmsRect surfaceRect = { + .x = 0, + .y = 0, + .width = surfaceSize.width, + .height = surfaceSize.height, + }; + + nvHs3dClearSurface(pHsChannel, pSurface, surfaceRect, + pHsConfigOneHead->channelConfig.yuv420); + } + } + + return pSurface; +} + + +/*! + * Allocate all the surfaces need for one 'buf'. + * + * Only update pHsConfig if all allocations succeed. + * + * \return FALSE if there was an allocation failure. Otherwise, return TRUE + * and update pHsConfig to point at the new allocations. + */ +static NvBool HsConfigAllocSurfacesOneBuf( + NVDevEvoRec *pDevEvo, + NVHsConfig *pHsConfig, + const NvU32 apiHead, + const NvU8 buf, + const NVHsConfigAllocResourcesWorkArea *pWorkArea) +{ + NVHsSurfacePtr pSurface[NVKMS_MAX_EYES] = { }; + NVHsSurfacePtr pStagingSurface[NVKMS_MAX_EYES] = { }; + NvU8 eye; + + const NvBool needsStaging = + (pWorkArea->headSurfaceStagingSize.width != 0) && + (pWorkArea->headSurfaceStagingSize.height != 0); + + for (eye = NVKMS_LEFT; eye < NVKMS_MAX_EYES; eye++) { + + if ((NVBIT(eye) & pWorkArea->eyeMask) == 0) { + continue; + } + + pSurface[eye] = + HsConfigAllocSurfacesOneSurface(pDevEvo, + pHsConfig, + apiHead, + TRUE, /* requireDisplayHardwareAccess */ + pWorkArea->headSurfaceSize, + pWorkArea->format); + if (pSurface[eye] == NULL) { + goto fail; + } + + if (needsStaging) { + pStagingSurface[eye] = + HsConfigAllocSurfacesOneSurface( + pDevEvo, + pHsConfig, + apiHead, + FALSE, /* displayHardwareAccess */ + pWorkArea->headSurfaceStagingSize, + pWorkArea->format); + if (pStagingSurface[eye] == NULL) { + goto fail; + } + } + } + + /* All allocations succeeded, we can safely update pHsConfig. */ + + for (eye = NVKMS_LEFT; eye < NVKMS_MAX_EYES; eye++) { + pHsConfig->apiHeadAllDisps[apiHead].surfaces[eye][buf].pSurface = + pSurface[eye]; + pHsConfig->apiHeadAllDisps[apiHead].surfaces[eye][buf].pStagingSurface = + pStagingSurface[eye]; + } + + return TRUE; + +fail: + + /* Something failed; free everything. nvHsFreeSurface(NULL) is a noop. */ + + for (eye = NVKMS_LEFT; eye < NVKMS_MAX_EYES; eye++) { + nvHsFreeSurface(pDevEvo, pSurface[eye]); + nvHsFreeSurface(pDevEvo, pStagingSurface[eye]); + } + + return FALSE; +} + + +/*! + * Allocate all the surfaces needed for one head. + * + * \return FALSE if there was an irrecoverable allocation failure. Otherwise, + * return TRUE and update pHsConfig. + */ +static NvBool HsConfigAllocSurfacesOneHead( + NVDevEvoRec *pDevEvo, + NVHsConfig *pHsConfig, + const NvU32 apiHead, + const NVHsConfigAllocResourcesWorkArea *pWorkArea) +{ + NVHsStateOneHeadAllDisps *pHsOneHeadAllDisps = + &pHsConfig->apiHeadAllDisps[apiHead]; + NvU8 buf; + + pHsOneHeadAllDisps->surfaceCount = 0; + + for (buf = 0; buf < NVKMS_HEAD_SURFACE_MAX_BUFFERS; buf++) { + + /* + * HeadSurface normally double buffers its rendering and flipping + * (NVKMS_HEAD_SURFACE_MAX_BUFFERS == 2), but in most configurations it + * can function single-buffered if necessary (so only buf == 0 is + * "mustHave"). + * + * However, for SwapGroups, the two buffers are used differently, and we + * cannot really accommodate single-buffered use. So, both buffers are + * "mustHave" when neededForSwapGroup. + */ + const NvBool mustHave = (buf == 0) || pWorkArea->neededForSwapGroup; + + if (!HsConfigAllocSurfacesOneBuf(pDevEvo, + pHsConfig, + apiHead, + buf, + pWorkArea)) { + if (mustHave) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate memory for composition pipeline"); + HsConfigFreeHeadSurfaceSurfaces(pDevEvo, + pHsOneHeadAllDisps, + FALSE /* surfacesReused */); + return FALSE; + } else { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Failed to allocate memory for composition pipeline; continuing with potential tearing."); + break; + } + } + + pHsOneHeadAllDisps->surfaceCount++; + } + + nvAssert(pHsOneHeadAllDisps->surfaceCount > 0); + + pHsOneHeadAllDisps->size = pWorkArea->headSurfaceSize; + pHsOneHeadAllDisps->stagingSize = pWorkArea->headSurfaceStagingSize; + + return TRUE; +} + + +/*! + * Allocate resources needed for the NVHsConfig. + * + * The headSurface configuration may need additional resources to be allocated. + * Determine what resources are needed, allocate them, and track them in the + * NVHsConfig. We do not know if this configuration will be usable by modeset, + * so we do not alter pDevEvo state in this function. + * + * This function could be called multiple times for the same NVHsConfig. If a + * modeset fails, the caller will call nvHsConfigFreeResources(), then + * nvHsConfigDowngrade(), and then call this again. + * + * \param[in] pDevEvo The device. + * \param[in,out] pHsConfig The NVHsConfig to allocate resources for. + * + * \return TRUE if needed resources were allocated. FALSE if resources + * could not be allocated. + */ +NvBool nvHsConfigAllocResources( + NVDevEvoRec *pDevEvo, + NVHsConfig *pHsConfig) +{ + NvU32 dispIndex, apiHead; + NVDispEvoPtr pDispEvo; + + NVHsConfigAllocResourcesWorkArea workArea[NVKMS_MAX_HEADS_PER_DISP] = { }; + + /* + * Handle SLI Mosaic: surface allocations are broadcast across + * subdevices, so compute the maximum surface sizes needed for + * each head on all subdevices. + */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + const NVHsChannelConfig *pChannelConfig = + &pHsConfig->apiHead[dispIndex][apiHead].channelConfig; + + /* Do we need headSurface on this head? */ + if (!StateNeedsHeadSurface(pChannelConfig->state)) { + continue; + } + + /* + * XXX NVKMS HEADSURFACE TODO: perform validation of headSurface + * here. + */ + + /* + * If the client allocated the device with no3d, but we need + * headSurface, fail. + */ + if (pDevEvo->pHsDevice == NULL) { + return FALSE; + } + + workArea[apiHead].needsHeadSurface = TRUE; + workArea[apiHead].eyeMask |= pChannelConfig->eyeMask; + + workArea[apiHead].neededForSwapGroup = + workArea[apiHead].neededForSwapGroup || + pChannelConfig->neededForSwapGroup; + + workArea[apiHead].headSurfaceSize = + HsConfigGetMaxNvKmsSize(workArea[apiHead].headSurfaceSize, + pChannelConfig->surfaceSize); + + workArea[apiHead].headSurfaceStagingSize = + HsConfigGetMaxNvKmsSize(workArea[apiHead].headSurfaceStagingSize, + pChannelConfig->stagingSurfaceSize); + workArea[apiHead].format = + HsConfigGetMaxFormat(workArea[apiHead].format, + pChannelConfig->hs10bpcHint); + } + } + + /* + * Return early without any resource allocation if this configuration is not + * going to be committed. + */ + if (!pHsConfig->commit) { + return TRUE; + } + + /* Allocate the 3d channel where necessary. */ + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + for (apiHead = 0; apiHead < pDevEvo->numHeads; apiHead++) { + NVHsConfigOneHead *pHsConfigOneHead = + &pHsConfig->apiHead[dispIndex][apiHead]; + NVHsChannelConfig *pChannelConfig = + &pHsConfigOneHead->channelConfig; + + if (!StateNeedsHeadSurface(pChannelConfig->state)) { + continue; + } + + if (pDispEvo->pHsChannel[apiHead] != NULL) { + /* Reuse the existing headSurface channel, if it exists. */ + pHsConfigOneHead->pHsChannel = pDispEvo->pHsChannel[apiHead]; + pHsConfigOneHead->channelReused = TRUE; + } else { + /* Otherwise, allocate a new channel. */ + pHsConfigOneHead->channelReused = FALSE; + pHsConfigOneHead->pHsChannel = + nvHsAllocChannel(pDispEvo, apiHead); + + if (pHsConfigOneHead->pHsChannel == NULL) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate channel for composition pipeline"); + nvHsConfigFreeResources(pDevEvo, pHsConfig); + return FALSE; + } + } + } + } + + /* + * Assign NVHsConfig::apiHeadAllDisps[], and either reuse the existing surfaces + * (if they are large enough), or allocate new surfaces. + */ + for (apiHead = 0; apiHead < pDevEvo->numHeads; apiHead++) { + const NVHsStateOneHeadAllDisps *pDevEvoHsConfig = + &pDevEvo->apiHeadSurfaceAllDisps[apiHead]; + + /* There should not (yet?) be any surfaces allocated for this head */ + nvAssert(pHsConfig->apiHeadAllDisps[apiHead].surfaceCount == 0); + + if (!workArea[apiHead].needsHeadSurface) { + continue; + } + + /* + * If NVKMS already has sufficiently large surfaces for this head, reuse + * them instead of allocating new ones. + * + * XXX NVKMS HEADSURFACE TODO: when transitioning from a large mode to a + * small mode, this will keep the large mode's headSurface surfaces. + * Perhaps we should not reuse the existing surfaces if they are + * significantly larger than necessary. Or, perhaps we should do some + * sort of headSurface compaction after applying surfaces to the device? + * What if the current config is tearing (surfaceCount == 1), and we + * could upgrade to (surfaceCount == 2)? + * + * The same problem applies when transitioning from stereo to mono + * (we'll leave a right eye surface allocated but unused). + * + * The same problem applies when transitioning from a configuration that + * needs stagingSurfaces to a configuration that doesn't. + */ + if ((pDevEvoHsConfig->surfaceCount > 0) && + HsConfigNvKmsSizeIsGreaterOrEqual( + pDevEvoHsConfig->size, + workArea[apiHead].headSurfaceSize) && + HsConfigNvKmsSizeIsGreaterOrEqual( + pDevEvoHsConfig->stagingSize, + workArea[apiHead].headSurfaceStagingSize) && + HsConfigEyeMasksMatch(pDevEvoHsConfig, workArea[apiHead].eyeMask)) { + CopyHsStateOneHeadAllDisps(&pHsConfig->apiHeadAllDisps[apiHead], + pDevEvoHsConfig); + pHsConfig->surfacesReused[apiHead] = TRUE; + continue; + } + + /* Otherwise, allocate new surfaces. */ + + if (!HsConfigAllocSurfacesOneHead(pDevEvo, + pHsConfig, + apiHead, + &workArea[apiHead])) { + nvHsConfigFreeResources(pDevEvo, pHsConfig); + return FALSE; + } + + /* + * XXX NVKMS HEADSURFACE TODO: Populate the surface with the correct + * screen contents. + * + * It is unclear if that is desirable behavior: if we're reusing an + * existing headSurface surface, we shouldn't clobber existing content + * before we flip. + */ + } + + return TRUE; +} + +/*! + * Free resources allocated for NVHsConfig but not used by the current + * configuration. + * + * nvHsConfigAllocResources() allocates resources, after first attempting to + * reuse the current configuration's existing resources. Those will be + * propagated to the pDevEvo by nvHsConfigApply() if the modeset succeeds. + * + * However, if the modeset fails, this function needs to free everything + * allocated by nvHsConfigAllocResources(). + * + * \param[in] pDevEvo The device + * \param[in,out] pHsConfig The NVHsConfigRec whose resources should be freed. + */ +void nvHsConfigFreeResources( + NVDevEvoRec *pDevEvo, + NVHsConfig *pHsConfig) +{ + NvU32 dispIndex, apiHead; + NVDispEvoPtr pDispEvo; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + for (apiHead = 0; apiHead < pDevEvo->numHeads; apiHead++) { + NVHsConfigOneHead *pHsConfigOneHead = + &pHsConfig->apiHead[dispIndex][apiHead]; + + if (pHsConfigOneHead->pHsChannel == NULL) { + continue; + } + + if (!pHsConfigOneHead->channelReused) { + nvHsFreeChannel(pHsConfigOneHead->pHsChannel); + } + + pHsConfigOneHead->pHsChannel = NULL; + pHsConfigOneHead->channelReused = FALSE; + } + } + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + HsConfigFreeHeadSurfaceSurfaces(pDevEvo, + &pHsConfig->apiHeadAllDisps[apiHead], + pHsConfig->surfacesReused[apiHead]); + pHsConfig->surfacesReused[apiHead] = FALSE; + } +} + +/*! + * Initialize each layer's flip queue. + */ +static void HsConfigInitFlipQueue( + NVHsChannelEvoPtr pHsChannel, + const NVHsConfigOneHead *pHsConfigOneHead) +{ + NvU8 layer, eye; + + /* + * Initialize flipQueueMainLayerState with the surfaces specified in the modeset + * request. + */ + for (eye = 0; eye < NVKMS_MAX_EYES; eye++) { + pHsChannel->flipQueueMainLayerState.pSurfaceEvo[eye] = + pHsConfigOneHead->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[eye]; + } + + /* + * Push a single flip queue entry into each layer's flip queue, using the + * surfaces specified in the modeset request. Later, the nvHsNextFrame() => + * HsUpdateFlipQueueCurrent() call chain will pop the entry from the queue + * and put it in "current". + * + * This might seem a little indirect, but we do things this way so that we + * use common paths for reference count bookkeeping. + */ + for (layer = 0; layer < ARRAY_LEN(pHsConfigOneHead->layer); layer++) { + + NVHsLayerRequestedFlipState hwState = { }; + + nvkms_memset(&pHsChannel->flipQueue[layer], 0, + sizeof(pHsChannel->flipQueue[layer])); + + nvListInit(&pHsChannel->flipQueue[layer].queue); + + for (eye = 0; eye < ARRAY_LEN(hwState.pSurfaceEvo); eye++) { + hwState.pSurfaceEvo[eye] = + pHsConfigOneHead->layer[layer].pSurfaceEvo[eye]; + } + + nvHsPushFlipQueueEntry(pHsChannel, layer, &hwState); + } +} + +static void HsConfigUpdateSurfaceRefCount( + NVDevEvoPtr pDevEvo, + const NVHsChannelConfig *pChannelConfig, + NvBool increase) +{ + HsChangeSurfaceFlipRefCount( + pDevEvo, pChannelConfig->warpMesh.pSurface, increase); + + HsChangeSurfaceFlipRefCount( + pDevEvo, pChannelConfig->pBlendTexSurface, increase); + + HsChangeSurfaceFlipRefCount( + pDevEvo, pChannelConfig->pOffsetTexSurface, increase); + + HsChangeSurfaceFlipRefCount( + pDevEvo, pChannelConfig->cursor.pSurfaceEvo, increase); +} + +/*! + * Check if flipLock should be allowed on this device. + * + * If any head has headSurface enabled, flipLock might interfere with per-head + * presentation, so prohibit flipLock. + */ +static NvBool HsConfigAllowFlipLock(const NVDevEvoRec *pDevEvo) +{ + NvU32 dispIndex, apiHead; + NVDispEvoPtr pDispEvo; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + if (pDispEvo->pHsChannel[apiHead] != NULL) { + return FALSE; + } + } + } + + return TRUE; +} + +static void HsMainLayerFlip( + NVHsChannelEvoPtr pHsChannel, + NvKmsSurfaceHandle surfaceHandle[NVKMS_MAX_EYES], + const struct NvKmsPoint viewPortPointIn, + const struct NvKmsSetCursorImageCommonParams cursorImage, + const struct NvKmsMoveCursorCommonParams cursorPosition) +{ + const NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo; + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NvU32 apiHead = pHsChannel->apiHead; + const NvU32 sd = pDispEvo->displayOwner; + NvBool ret; + NvU8 eye; + struct NvKmsFlipCommonParams *pParamsOneHead; + + /* + * Use preallocated memory, so that we don't have to allocate + * memory here (and deal with allocation failure). + */ + struct NvKmsFlipRequestOneHead *pFlipHead = &pHsChannel->scratchParams; + + nvkms_memset(pFlipHead, 0, sizeof(*pFlipHead)); + + pFlipHead->sd = sd; + pFlipHead->head = apiHead; + pParamsOneHead = &pFlipHead->flip; + + pParamsOneHead->layer[NVKMS_MAIN_LAYER].surface.specified = TRUE; + + for (eye = NVKMS_LEFT; eye < NVKMS_MAX_EYES; eye++) { + pParamsOneHead->layer[NVKMS_MAIN_LAYER].surface.handle[eye] = surfaceHandle[eye]; + } + + if (surfaceHandle[NVKMS_LEFT] != 0) { + NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDev(pDevEvo->pNvKmsOpenDev); + NVSurfaceEvoPtr pSurfaceEvo = + nvEvoGetPointerFromApiHandle(pOpenDevSurfaceHandles, surfaceHandle[NVKMS_LEFT]); + + pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeIn.specified = TRUE; + pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeIn.val.width = + pSurfaceEvo->widthInPixels; + pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeIn.val.height = + pSurfaceEvo->heightInPixels; + + pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeOut.specified = TRUE; + pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeOut.val = + pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeIn.val; + } + + /* clear completion notifier and sync objects */ + pParamsOneHead->layer[NVKMS_MAIN_LAYER].completionNotifier.specified = TRUE; + pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.specified = TRUE; + + pParamsOneHead->viewPortIn.specified = TRUE; + pParamsOneHead->viewPortIn.point = viewPortPointIn; + + pParamsOneHead->cursor.image = cursorImage; + pParamsOneHead->cursor.imageSpecified = TRUE; + + pParamsOneHead->cursor.position = cursorPosition; + pParamsOneHead->cursor.positionSpecified = TRUE; + + ret = nvFlipEvo(pDevEvo, + pDevEvo->pNvKmsOpenDev, + pFlipHead, + 1 /* numFlipHeads */, + TRUE /* commit */, + NULL /* pReply */, + FALSE /* skipUpdate */, + FALSE /* allowFlipLock */); + + if (!ret) { + nvAssert(!"headSurface main layer flip failed?"); + } +} + +/*! + * When disabling headSurface, restore the non-headSurface surface, if + * necessary. + * + * HeadSurface is disabled in two paths: as part of modeset, and as part of + * leaving a SwapGroup. In the modeset case, we do not need to do anything + * here: the modeset already specified the correct surface. + * + * But, in the case of disabling headSurface due to leaving a SwapGroup, we need + * to restore the client's non-headSurface surface(s) (i.e., the surfaces in + * flipQueueMainLayerState). So, we check for the case of transitioning from a + * configuration with: + * + * neededForModeset == FALSE + * neededForSwapGroup == TRUE + * + * to a configuration with: + * + * neededForModeset == FALSE + * neededForSwapGroup == FALSE + * + * To flip in that case, use HsMainLayerFlip() => nvFlipEvo(), which populates an + * NvKmsFlipRequest structure. This takes surface handles, so temporarily + * generate NvKmsSurfaceHandles in pNvKmsOpenDev's namespace. + */ +static void HsConfigRestoreMainLayerSurface( + NVDispEvoPtr pDispEvo, + NVHsChannelEvoPtr pHsChannelOld, + const NVHsChannelConfig *pHsChannelConfigNew) +{ + if (!pHsChannelOld->config.neededForModeset && + pHsChannelOld->config.neededForSwapGroup && + !pHsChannelConfigNew->neededForModeset && + !pHsChannelConfigNew->neededForSwapGroup) { + + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoApiHandlesRec *pNvKmsOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDev(pDevEvo->pNvKmsOpenDev); + NVSurfaceEvoRec *pSurfaceEvo[NVKMS_MAX_EYES] = { }; + NvKmsSurfaceHandle surfaceHandle[NVKMS_MAX_EYES] = { }; + NvU8 eye; + + struct NvKmsSetCursorImageCommonParams cursorImage = { }; + + const struct NvKmsMoveCursorCommonParams cursorPosition = { + .x = pHsChannelOld->config.cursor.x, + .y = pHsChannelOld->config.cursor.y, + }; + + const struct NvKmsPoint viewPortPointIn = { + .x = pHsChannelOld->config.viewPortIn.x, + .y = pHsChannelOld->config.viewPortIn.y, + }; + + for (eye = NVKMS_LEFT; eye < NVKMS_MAX_EYES; eye++) { + + pSurfaceEvo[eye] = + pHsChannelOld->flipQueueMainLayerState.pSurfaceEvo[eye]; + + if (pSurfaceEvo[eye] == NULL) { + continue; + } + + surfaceHandle[eye] = + nvEvoCreateApiHandle(pNvKmsOpenDevSurfaceHandles, + pSurfaceEvo[eye]); + } + + if (pHsChannelOld->config.cursor.pSurfaceEvo != NULL) { + + cursorImage.surfaceHandle[NVKMS_LEFT] = + nvEvoCreateApiHandle(pNvKmsOpenDevSurfaceHandles, + pHsChannelOld->config.cursor.pSurfaceEvo); + cursorImage.cursorCompParams.colorKeySelect = + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE; + cursorImage.cursorCompParams.blendingMode[1] = + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA; + } + + HsMainLayerFlip( + pHsChannelOld, + surfaceHandle, + viewPortPointIn, + cursorImage, + cursorPosition); + + for (eye = NVKMS_LEFT; eye < NVKMS_MAX_EYES; eye++) { + + if (pSurfaceEvo[eye] == NULL) { + continue; + } + + nvEvoDestroyApiHandle(pNvKmsOpenDevSurfaceHandles, + surfaceHandle[eye]); + } + + if (cursorImage.surfaceHandle[NVKMS_LEFT] != 0) { + nvEvoDestroyApiHandle(pNvKmsOpenDevSurfaceHandles, + cursorImage.surfaceHandle[NVKMS_LEFT]); + } + } +} + +/*! + * Enable or disable fliplock on all channels using headsurface for swapgroups, + * waiting for idle if necessary. + */ +static void HsConfigUpdateFlipLockForSwapGroups(NVDevEvoPtr pDevEvo, + NvBool enable) +{ + NvU32 dispIndex, apiHead; + NVDispEvoPtr pDispEvo; + NvU32 flipLockToggleApiHeadMaskPerSd[NVKMS_MAX_SUBDEVICES] = { }; + NvBool found = FALSE; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NVHsChannelEvoPtr pHsChannel = pDispEvo->pHsChannel[apiHead]; + + if (pHsChannel == NULL) { + continue; + } + + /* + * This function is called in two cases, when disabling fliplock for + * the pHsChannels in the previous config, and when enabling + * fliplock for the pHsChannels in the new config. In either case, + * if the old config wasn't using fliplock for swapgroups, or the + * new config won't be using fliplock for swapgroups, don't change + * the fliplock state here. + */ + if (!pHsChannel->config.neededForSwapGroup) { + continue; + } + + flipLockToggleApiHeadMaskPerSd[pDispEvo->displayOwner] |= + NVBIT(apiHead); + found = TRUE; + } + } + + if (!found) { + return; + } + + nvApiHeadUpdateFlipLock(pDevEvo, flipLockToggleApiHeadMaskPerSd, enable); +} + +/*! + * Tear down all existing headSurface configs on the device, disabling + * fliplock, flipping base to NULL, restoring core surfaces, and releasing + * swapgroups if necessary. + * + * This function is not allowed to fail: it is called after we have committed to + * performing the modeset. + * + * \param[in,out] pDevEvo The device. + * \param[in] pHsConfig The NVHsConfig that will be applied later in this + * transition; this is used to decide whether to + * restore the non-headSurface surface when leaving + * a SwapGroup. + */ +void nvHsConfigStop( + NVDevEvoPtr pDevEvo, + const NVHsConfig *pHsConfig) +{ + NvU32 dispIndex, apiHead; + NVDispEvoPtr pDispEvo; + NVHsDeviceEvoPtr pHsDevice = pDevEvo->pHsDevice; + NvU32 hsDisableApiHeadMaskPerSd[NVKMS_MAX_SUBDEVICES] = { }; + + /* + * We should only get here if this configuration is going to be committed. + */ + nvAssert(pHsConfig->commit); + + /* + * If fliplock was in use for headsurface swapgroups on any channel, wait + * for those channels to be idle, applying accelerators to ignore + * fliplock/interlock if necessary, and disable fliplock on those channels. + */ + HsConfigUpdateFlipLockForSwapGroups(pDevEvo, FALSE /* enable */); + + /* Flip all headSurface heads to NULL. */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 apiHead; + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NVHsChannelEvoPtr pHsChannel = pDispEvo->pHsChannel[apiHead]; + + if (pHsChannel != NULL) { + hsDisableApiHeadMaskPerSd[pDispEvo->displayOwner] |= NVBIT(apiHead); + + if (pHsChannel->config.pixelShift == NVKMS_PIXEL_SHIFT_8K) { + nvSetStereo(pDispEvo, apiHead, FALSE); + } + + if (pHsChannel->config.neededForSwapGroup) { + pHsChannel->viewportFlipPending = FALSE; + nvHsRemoveRgLine1Callback(pHsChannel); + } + + nvHsRemoveVBlankCallback(pHsChannel); + nvHsFlip(pHsDevice, + pHsChannel, + 0 /* eyeMask: ignored when disabling */, + FALSE /* perEyeStereoFlip: ignored when disabling */, + 0 /* index: ignored when disabling */, + NULL /* NULL == disable */, + FALSE /* isFirstFlip */, + FALSE /* allowFlipLock */); + } + } + } + + /* + * Wait for main layer channels to be idle on all channels that previously + * had headSurface enabled in order to allow semaphore releases from + * previous headSurface flips to complete. This wait should not timeout, + * so if it does, just assert instead of forcing the channels idle. + */ + nvApiHeadIdleMainLayerChannels(pDevEvo, + hsDisableApiHeadMaskPerSd); + + /* Update bookkeeping and restore the original surface in main layer. */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NVHsChannelEvoPtr pHsChannel = pDispEvo->pHsChannel[apiHead]; + + if (pHsChannel != NULL) { + nvHsFreeStatistics(pHsChannel); + nvHsDrainFlipQueue(pHsChannel); + + HsConfigRestoreMainLayerSurface( + pDispEvo, + pHsChannel, + &pHsConfig->apiHead[dispIndex][apiHead].channelConfig); + } + } + } + + /* + * At this point any active swapgroups with pending flips have completed + * those flips (by force if necessary) and flipped back to core, so release + * any deferred request fifos that are waiting for that pending flip to + * complete. + */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NVSwapGroupRec *pSwapGroup = pDispEvo->pSwapGroup[apiHead]; + if ((pSwapGroup != NULL) && + pSwapGroup->pendingFlip) { + nvHsSwapGroupRelease(pDevEvo, pSwapGroup); + } + } + } + + /* finally, make sure any remaining rendering commands have landed */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NVHsChannelEvoPtr pHsChannel = pDispEvo->pHsChannel[apiHead]; + if (pHsChannel != NULL) { + nvPushIdleChannel(&pHsChannel->nvPush.channel); + } + } + } +} + +/*! + * Apply the new NVHsConfig to the device. + * + * As resources are propagated from pHsConfig to pDevEvo, remove them from + * pHsConfig, so that nvHsConfigFreeResources() can safely be called on the + * pHsConfig. + * + * This function is not allowed to fail: it is called after we have committed to + * performing the modeset. + * + * \param[in,out] pDevEvo The device. + * \param[in,out] pHsConfig The NVHsConfig to apply. + */ +void nvHsConfigStart( + NVDevEvoPtr pDevEvo, + NVHsConfig *pHsConfig) +{ + NvU32 dispIndex, apiHead; + NVDispEvoPtr pDispEvo; + NVHsDeviceEvoPtr pHsDevice = pDevEvo->pHsDevice; + NvBool allowFlipLock; + + /* + * We should only get here if this configuration is going to be committed. + */ + nvAssert(pHsConfig->commit); + + /* Update channels. */ + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NVHsConfigOneHead *pHsConfigOneHead = + &pHsConfig->apiHead[dispIndex][apiHead]; + + /* + * If we have a new configuration, increment its surface reference + * counts. + */ + if (pHsConfigOneHead->pHsChannel != NULL) { + HsConfigUpdateSurfaceRefCount( + pDevEvo, + &pHsConfigOneHead->channelConfig, + TRUE /* increase */); + } + + /* + * If we have an old configuration, decrement its surface reference + * counts. + */ + if (pDispEvo->pHsChannel[apiHead] != NULL) { + HsConfigUpdateSurfaceRefCount( + pDevEvo, + &pDispEvo->pHsChannel[apiHead]->config, + FALSE /* increase */); + } + + /* If there is no channel before or after, continue. */ + + if ((pDispEvo->pHsChannel[apiHead] == NULL) && + (pHsConfigOneHead->pHsChannel == NULL)) { + continue; + } + + /* If the channel is used before and after, continue. */ + + if ((pDispEvo->pHsChannel[apiHead] != NULL) && + (pHsConfigOneHead->pHsChannel != NULL)) { + nvAssert(pHsConfigOneHead->channelReused); + nvAssert(pDispEvo->pHsChannel[apiHead] == + pHsConfigOneHead->pHsChannel); + continue; + } + + /* Free any channels no longer needed. */ + + if ((pDispEvo->pHsChannel[apiHead] != NULL) && + (pHsConfigOneHead->pHsChannel == NULL)) { + nvHsFreeChannel(pDispEvo->pHsChannel[apiHead]); + pDispEvo->pHsChannel[apiHead] = NULL; + continue; + } + + /* + * Otherwise, propagate the channel configuration from pHsConfig to + * pDispEvo. + */ + nvAssert(pDispEvo->pHsChannel[apiHead] == NULL); + pDispEvo->pHsChannel[apiHead] = pHsConfigOneHead->pHsChannel; + pHsConfigOneHead->pHsChannel = NULL; + } + } + + /* Update surfaces. */ + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + + NVHsStateOneHeadAllDisps *pDevEvoHsConfig = + &pDevEvo->apiHeadSurfaceAllDisps[apiHead]; + NVHsStateOneHeadAllDisps *pHsOneHeadAllDisps = + &pHsConfig->apiHeadAllDisps[apiHead]; + + /* + * If the device is currently using headSurface on this head, but the + * new configuration is not, free the surfaces. + */ + if ((pDevEvoHsConfig->surfaceCount > 0) && + (pHsOneHeadAllDisps->surfaceCount == 0)) { + + HsConfigFreeHeadSurfaceSurfaces(pDevEvo, pDevEvoHsConfig, FALSE); + continue; + } + + /* + * If the device is currently not using headSurface on this head, but + * the new configuration is, propagate resources. + */ + if ((pDevEvoHsConfig->surfaceCount == 0) && + (pHsOneHeadAllDisps->surfaceCount > 0)) { + + MoveHsStateOneHeadAllDisps(pDevEvoHsConfig, pHsOneHeadAllDisps); + continue; + } + + /* + * If the device is currently using headSurface on this head, and the + * new configuration also is, reconcile the two. + */ + if ((pDevEvoHsConfig->surfaceCount > 0) && + (pHsOneHeadAllDisps->surfaceCount > 0)) { + + /* + * If the new configuration is reusing the device's surfaces, then + * this head is done. + */ + if (pHsConfig->surfacesReused[apiHead]) { + nvAssert(nvkms_memcmp(pDevEvoHsConfig, pHsOneHeadAllDisps, + sizeof(*pDevEvoHsConfig)) == 0); + continue; + } + + /* + * Otherwise, the new configuration had to allocate new surfaces. + * Free the old surfaces and replace them. + */ + HsConfigFreeHeadSurfaceSurfaces(pDevEvo, pDevEvoHsConfig, FALSE); + + MoveHsStateOneHeadAllDisps(pDevEvoHsConfig, pHsOneHeadAllDisps); + } + } + + /* + * Update the flip lock prohibit/allow state, based on whether heads will + * have headSurface enabled in the new configuration. This will allow + * headSurface flips to proceed independently of non-headSurface flips + * on configurations that would otherwise implicitly enable fliplock + * in FinishModesetOneTopology, which should allow the first flip to + * complete below. After that, fliplock may be enabled again for + * headSurface swapgroups if necessary. + */ + + allowFlipLock = HsConfigAllowFlipLock(pDevEvo); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + nvAllowFlipLockEvo(pDispEvo, allowFlipLock); + } + + /* Enable headSurface for the new configuration. */ + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NVHsChannelEvoPtr pHsChannel = pDispEvo->pHsChannel[apiHead]; + const NVHsConfigOneHead *pHsConfigOneHead = + &pHsConfig->apiHead[dispIndex][apiHead]; + + if (pHsChannel != NULL) { + + nvHsAddVBlankCallback(pHsChannel); + + nvHsAllocStatistics(pHsChannel); + + /* Apply the new configuration to pHsChannel. */ + + pHsChannel->config = pHsConfigOneHead->channelConfig; + + if (pHsChannel->config.neededForSwapGroup) { + nvHsAddRgLine1Callback(pHsChannel); + } + + /* + * nvHsConfigPatchSetModeRequest() used + * surfaces[eye][nextIndex].pSurface as the surface during + * modeset. Now that we know that the modeset succeeded, + * increment nextIndex. + */ + HsIncrementNextIndex(pHsDevice, pHsChannel); + + /* + * Reset nextOffset: non-SwapGroup configurations rely on + * nextOffset being 0 to avoid rendering and flipping to an + * invalid configuration within a headSurface surface, and + * SwapGroup configurations rely on nextOffset being 0 to avoid + * the combination of HsIncrementNextOffset below and the + * first flip to offset 0 in HsFlipHelper resulting in the + * active offset and next offset both being 0, which would cause + * a hang in HsServiceRGLineInterrupt. + */ + pHsChannel->nextOffset = 0; + + if (pHsChannel->config.neededForSwapGroup) { + HsIncrementNextOffset(pHsDevice, pHsChannel); + } + + /* Do the one-time set up of the channel. */ + + nvHs3dSetConfig(pHsChannel); + + /* Render the first frame. */ + + nvHsInitNotifiers(pHsDevice, pHsChannel); + HsConfigInitFlipQueue(pHsChannel, pHsConfigOneHead); + nvHsNextFrame(pHsDevice, pHsChannel, + NV_HS_NEXT_FRAME_REQUEST_TYPE_FIRST_FRAME); + + if (pHsChannel->config.pixelShift == NVKMS_PIXEL_SHIFT_8K) { + nvSetStereo(pDispEvo, apiHead, TRUE); + } + } + } + } + + /* + * If fliplock is necessary for headsurface swapgroups on any channel, + * wait for idle (which shouldn't timeout since fliplock was disabled + * above) and enable fliplock on those channels. + */ + HsConfigUpdateFlipLockForSwapGroups(pDevEvo, TRUE /* enable */); +} + +static void HsConfigPatchRequestedViewPortOneHead( + const NVDispEvoRec *pDispEvo, + struct NvKmsSetModeOneHeadRequest *pRequestHead, + const NVHsConfigOneHead *pHsConfigOneHead) +{ + const NVHsChannelConfig *pChannelConfig = &pHsConfigOneHead->channelConfig; + + nvAssert(StateNeedsHeadSurface(pChannelConfig->state)); + + /* + * Patch ViewPortIn: whenever using headSurface, the display + * hardware's ViewPortIn is the size of the headSurface frame. + */ + pRequestHead->viewPortSizeIn.width = pChannelConfig->frameSize.width; + pRequestHead->viewPortSizeIn.height = pChannelConfig->frameSize.height; + + /* + * Patch ViewPortOut: if PARTIAL, then headSurface uses the display hardware + * to do ViewPortIn => ViewPortOut scaling. In that case, we keep the + * client-requested ViewPortOut (i.e., change nothing here). But, if FULL, + * then we program the display hardware with a ViewPortOut the size of the + * visible region of the mode: the surface, ViewPortIn, ViewPortOut, and + * visible region are all the same size. + */ + if (pChannelConfig->state == + NVKMS_HEAD_SURFACE_CONFIG_STATE_FULL_HEAD_SURFACE) { + /* + * If ViewPortOut is not specified in the request, then the ViewPortOut + * will be programmed with the same size as the visible region of + * the mode. + */ + nvkms_memset(&pRequestHead->viewPortOut, + 0, sizeof(pRequestHead->viewPortOut)); + pRequestHead->viewPortOutSpecified = FALSE; + } +} + + +/*! + * Modify the NvKmsSetModeRequest with the desired NVHsConfig. + * + * After calling this function, a modeset will be attempted. If that modeset + * fails, then the previous NvKmsSetModeRequest will be restored, and the + * NVHsConfig will be downgraded, and the process will be tried again. + * + * This function is a noop if pHsConfig does not enable headSurface for any + * heads. + * + * \param[in] pDevEvo The device + * \param[in] pHsConfig The NVHsConfigRec to apply to pRequest. + * \param[in,out] pOpenDev The per-open device data for the modeset client. + * \param[in,out] pRequest The modeset request to be modified. + * \param[out] patchedApiHeadsMask[] The per sub-device mask of the api + * heads which are patched by this + * function. + * + * \return TRUE if pRequest could be modified as necessary. + * FALSE if an error occurred and the modeset should be aborted. + * The patchedApiHeadsMask[] output parameter is used by + * nvHsConfigClearPatchedSetModeReq() to free resources like + * surface handles before clearing the input modeset + * request. + */ +NvBool nvHsConfigPatchSetModeRequest(const NVDevEvoRec *pDevEvo, + const NVHsConfig *pHsConfig, + struct NvKmsPerOpenDev *pOpenDev, + struct NvKmsSetModeRequest *pRequest, + NvU32 patchedApiHeadsMask[NVKMS_MAX_SUBDEVICES]) +{ + NvU32 ret = TRUE; + NvU32 apiHead, sd, eye; + NVDispEvoPtr pDispEvo; + NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDev(pOpenDev); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + struct NvKmsSetModeOneDispRequest *pRequestDisp = + &pRequest->disp[sd]; + + patchedApiHeadsMask[sd] = 0x0; + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + const NVHsConfigOneHead *pHsConfigOneHead = + &pHsConfig->apiHead[sd][apiHead]; + const NVHsChannelConfig *pChannelConfig = + &pHsConfigOneHead->channelConfig; + + const NVHsChannelEvoRec *pHsChannel = pHsConfigOneHead->pHsChannel; + + struct NvKmsSetModeOneHeadRequest *pRequestHead = + &pRequestDisp->head[apiHead]; + + NvU32 layer; + + if (!HsConfigHeadRequested(pRequest, sd, apiHead)) { + continue; + } + + if (!StateNeedsHeadSurface(pChannelConfig->state)) { + continue; + } + + /* If this is a commit-ful modeset, we should have a channel. */ + if (pHsConfig->commit) { + nvAssert(pHsChannel != NULL); + } + + patchedApiHeadsMask[sd] |= NVBIT(apiHead); + + /* + * XXX NVKMS HEADSURFACE TODO: update the cursor configuration as + * necessary. + */ + + /* + * Construct a new NvKmsFlipCommonParams request reflecting + * pHsConfig. This is per-disp. + */ + nvkms_memset(&pRequestHead->flip, 0, sizeof(pRequestHead->flip)); + + pRequestHead->flip.viewPortIn.specified = TRUE; + pRequestHead->flip.viewPortIn.point.x = 0; + pRequestHead->flip.viewPortIn.point.y = 0; + + pRequestHead->flip.cursor.imageSpecified = TRUE; + pRequestHead->flip.cursor.positionSpecified = TRUE; + + for (layer = 0; layer < pDevEvo->apiHead[apiHead].numLayers; layer++) { + pRequestHead->flip.layer[layer].surface.specified = TRUE; + pRequestHead->flip.layer[layer].completionNotifier.specified = TRUE; + pRequestHead->flip.layer[layer].syncObjects.specified = TRUE; + pRequestHead->flip.layer[layer].compositionParams.specified = TRUE; + + if (layer != NVKMS_MAIN_LAYER) { + continue; + } + + pRequestHead->flip.layer[layer].csc.specified = TRUE; + pRequestHead->flip.layer[layer].csc.matrix = NVKMS_IDENTITY_CSC_MATRIX; + + for (eye = NVKMS_LEFT; eye < NVKMS_MAX_EYES; eye++) { + if ((NVBIT(eye) & pChannelConfig->eyeMask) == 0) { + continue; + } + + /* If this is a commit-ful modeset, we should have a surface. */ + if (pHsConfig->commit) { + NVHsSurfaceRec *pHsSurface = + pHsConfig->apiHeadAllDisps[apiHead]. + surfaces[eye][pHsChannel->nextIndex].pSurface; + nvAssert(pHsSurface != NULL); + + NVSurfaceEvoRec *pSurfaceEvo = + nvHsGetNvKmsSurface(pDevEvo, + pHsSurface->nvKmsHandle, + TRUE /* requireDisplayHardwareAccess */); + nvAssert(pSurfaceEvo != NULL); + + pRequestHead->flip.layer[layer].surface.handle[eye] = + nvEvoCreateApiHandle(pOpenDevSurfaceHandles, + pSurfaceEvo); + if (pRequestHead->flip.layer[layer].surface.handle[eye] != 0x0) { + nvEvoIncrementSurfaceStructRefCnt(pSurfaceEvo); + } else { + ret = FALSE; + } + } else { + pRequestHead->flip.layer[layer].surface.handle[eye] = 0; + } + } + + if (pRequestHead->flip.layer[layer].surface.handle[NVKMS_LEFT] != 0) { + const NVHsSurfaceRec *pHsSurface = + pHsConfig->apiHeadAllDisps[apiHead]. + surfaces[NVKMS_LEFT][pHsChannel->nextIndex].pSurface; + + pRequestHead->flip.layer[layer].sizeIn.specified = TRUE; + pRequestHead->flip.layer[layer].sizeIn.val.width = + pHsSurface->pSurfaceEvo->widthInPixels; + pRequestHead->flip.layer[layer].sizeIn.val.height = + pHsSurface->pSurfaceEvo->heightInPixels; + + pRequestHead->flip.layer[layer].sizeOut.specified = TRUE; + pRequestHead->flip.layer[layer].sizeOut.val = + pRequestHead->flip.layer[layer].sizeIn.val; + } + } + + HsConfigPatchRequestedViewPortOneHead(pDispEvo, pRequestHead, + pHsConfigOneHead); + } + } + + return ret; +} + +void +nvHsConfigClearPatchedSetModeRequest(const NVDevEvoRec *pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + struct NvKmsSetModeRequest *pRequest, + const NvU32 patchedApiHeadsMask[NVKMS_MAX_SUBDEVICES]) +{ + NvU32 sd; + NVDispEvoPtr pDispEvo; + NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDev(pOpenDev); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 apiHead; + struct NvKmsSetModeOneDispRequest *pRequestDisp = + &pRequest->disp[sd]; + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + struct NvKmsSetModeOneHeadRequest *pRequestHead = + &pRequestDisp->head[apiHead]; + NvU32 eye; + + if ((NVBIT(apiHead) & patchedApiHeadsMask[sd]) == 0x0) { + continue; + } + + + for (eye = NVKMS_LEFT; eye < NVKMS_MAX_EYES; eye++) { + const NvKmsSurfaceHandle surfaceHandle = + pRequestHead->flip.layer[NVKMS_MAIN_LAYER].surface.handle[eye]; + + if (surfaceHandle != 0x0) { + NVSurfaceEvoPtr pSurfaceEvo = + nvEvoGetPointerFromApiHandle(pOpenDevSurfaceHandles, + surfaceHandle); + nvEvoDestroyApiHandle(pOpenDevSurfaceHandles, surfaceHandle); + nvEvoDecrementSurfaceStructRefCnt(pSurfaceEvo); + } + } + } + } + + nvkms_memset(pRequest, 0, sizeof(*pRequest)); +} diff --git a/src/nvidia-modeset/src/nvkms-headsurface-ioctl.c b/src/nvidia-modeset/src/nvkms-headsurface-ioctl.c new file mode 100644 index 0000000..0d31b35 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-headsurface-ioctl.c @@ -0,0 +1,725 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-types.h" +#include "nvkms-private.h" +#include "nvkms-headsurface.h" +#include "nvkms-headsurface-ioctl.h" +#include "nvkms-headsurface-priv.h" +#include "nvkms-ioctl.h" +#include "nvkms-cursor.h" +#include "nvkms-utils.h" +#include "nvkms-utils-flip.h" +#include "nvkms-flip.h" + +/* + * This source file contains functions that intercept portions of the NVKMS API. + * + * If the API request changes headSurface state, we update headSurface + * accordingly. If the API request does not touch headSurface-ful heads, we + * "call down" to the normal NVKMS implementation. + * + * Note that some NVKMS requests can touch multiple heads at once, where a + * subset of the heads are driven by headSurface. In those cases, we: + * + * - Identify if all requested heads are headSurface-less; if so, call down and + * return. + * + * - Validate the request for the headSurface-ful heads. + * + * - If there are both headSurface-less *and* headSurface-ful heads in the + * request, build a temporary request structure with the headSurface-ful heads + * removed. Call down with that temporary structure. + * + * - Apply the request to the headSurface-ful heads. + */ + +typedef struct _NVHsRequestedFlipState { + struct NvKmsPoint viewPortPointIn; + NVFlipCursorEvoHwState cursor; + NVHsLayerRequestedFlipState layer[NVKMS_MAX_LAYERS_PER_HEAD]; + + struct { + NvBool viewPortPointIn : 1; + NvBool cursorSurface : 1; + NvBool cursorPosition : 1; + + NvBool layer[NVKMS_MAX_LAYERS_PER_HEAD]; + } dirty; + +} NVHsRequestedFlipState; + +/*! + * Validate the NvKmsMoveCursorCommonParams for headSurface. + */ +static NvBool HsIoctlValidateMoveCursor( + const NVHsChannelEvoRec *pHsChannel, + const struct NvKmsMoveCursorCommonParams *pParams) +{ + /* XXX NVKMS HEADSURFACE TODO: validate x,y against headSurface config */ + + return TRUE; +} + +/*! + * Apply x,y to headSurface. + * + * This configuration should first be validated by HsIoctlValidateMoveCursor(). + * We cannot fail from here. + */ +static void HsIoctlMoveCursor( + NVHsChannelEvoRec *pHsChannel, + const NvS16 x, const NvS16 y) +{ + pHsChannel->config.cursor.x = x; + pHsChannel->config.cursor.y = y; + + /* + * XXX NVKMS HEADSURFACE TODO: record that this head is dirty, so + * headSurface knows it needs to rerender the frame. + */ +} + +/*! + * Update headSurface for NVKMS_IOCTL_MOVE_CURSOR. + */ +NvBool nvHsIoctlMoveCursor( + NVDispEvoPtr pDispEvo, + NvU32 apiHead, + const struct NvKmsMoveCursorCommonParams *pParams) +{ + NVHsChannelEvoRec *pHsChannel; + + if (apiHead >= ARRAY_LEN(pDispEvo->pHsChannel)) { + return FALSE; + } + + pHsChannel = pDispEvo->pHsChannel[apiHead]; + + /* If headSurface is not used on this head, call down. */ + + if (pHsChannel == NULL) { + nvMoveCursor(pDispEvo, apiHead, pParams); + return TRUE; + } + + if (!HsIoctlValidateMoveCursor(pHsChannel, pParams)) { + return FALSE; + } + + HsIoctlMoveCursor(pHsChannel, pParams->x, pParams->y); + + return TRUE; +} + +/*! + * Validate the NvKmsSetCursorImageCommonParams for headSurface. + * + * If successfully validated, also assign ppSurfaceEvo. + */ +static NvBool HsIoctlValidateSetCursorImage( + const NVHsChannelEvoRec *pHsChannel, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const struct NvKmsSetCursorImageCommonParams *pParams, + NVSurfaceEvoRec **ppSurfaceEvo) +{ + NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES]; + + if (!nvGetCursorImageSurfaces(pHsChannel->pDispEvo->pDevEvo, + pOpenDevSurfaceHandles, + pParams, + pSurfaceEvos)) { + return FALSE; + } + + /* XXX NVKMS HEADSURFACE TODO: make cursor stereo-aware. */ + + *ppSurfaceEvo = pSurfaceEvos[NVKMS_LEFT]; + + return TRUE; +} + +/*! + * Apply the cursor pSurfaceEvo to headSurface. + * + * This configuration should first be validated by + * HsIoctlValidateSetCursorImage(). We cannot fail from here. + */ +static void HsIoctlSetCursorImage( + NVHsChannelEvoRec *pHsChannel, + NVSurfaceEvoRec *pSurfaceEvo) +{ + NVDevEvoPtr pDevEvo = pHsChannel->pDispEvo->pDevEvo; + + /* + * Increment the refcnt of the new surface, and + * decrement the refcnt of the old surface. + * + * XXX NVKMS HEADSURFACE TODO: wait until graphics channel is done using old + * surface before decrementing its refcnt? + */ + + HsChangeSurfaceFlipRefCount( + pDevEvo, pSurfaceEvo, TRUE /* increase */); + + HsChangeSurfaceFlipRefCount( + pDevEvo, pHsChannel->config.cursor.pSurfaceEvo, FALSE /* increase */); + + pHsChannel->config.cursor.pSurfaceEvo = pSurfaceEvo; + + /* + * XXX NVKMS HEADSURFACE TODO: record that this head is dirty, so + * headSurface knows it needs to rerender the frame. + */ +} + +/*! + * Update headSurface for NVKMS_IOCTL_SET_CURSOR_IMAGE. + */ +NvBool nvHsIoctlSetCursorImage( + NVDispEvoPtr pDispEvo, + const struct NvKmsPerOpenDev *pOpenDevice, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + NvU32 apiHead, + const struct NvKmsSetCursorImageCommonParams *pParams) +{ + NVHsChannelEvoRec *pHsChannel; + NVSurfaceEvoRec *pSurfaceEvo = NULL; + + if (apiHead >= ARRAY_LEN(pDispEvo->pHsChannel)) { + return FALSE; + } + + pHsChannel = pDispEvo->pHsChannel[apiHead]; + + /* If headSurface is not used on this head, call down. */ + + if (pHsChannel == NULL) { + return nvSetCursorImage(pDispEvo, + pOpenDevice, + pOpenDevSurfaceHandles, + apiHead, + pParams); + } + + if (!HsIoctlValidateSetCursorImage(pHsChannel, + pOpenDevSurfaceHandles, + pParams, + &pSurfaceEvo)) { + return FALSE; + } + + HsIoctlSetCursorImage(pHsChannel, pSurfaceEvo); + + return TRUE; +} + +/*! + * Apply NvKmsPoint to headSurface. + */ +static void HsIoctlPan( + NVHsChannelEvoRec *pHsChannel, + const struct NvKmsPoint *pViewPortPointIn) +{ + pHsChannel->config.viewPortIn.x = pViewPortPointIn->x; + pHsChannel->config.viewPortIn.y = pViewPortPointIn->y; + + /* + * XXX NVKMS HEADSURFACE TODO: record that this head is dirty, so + * headSurface knows it needs to rerender the frame. + */ +} + +/*! + * Create a copy of pFlipHead[] array with the headSurface-ful heads removed. + */ +static struct NvKmsFlipRequestOneHead *HsIoctlRemoveHsHeadsFromNvKmsFlipHead( + NVDevEvoPtr pDevEvo, + const struct NvKmsFlipRequestOneHead *pFlipHeadOriginal, + const NvU32 numFlipHeadsOriginal, + const NvU32 numFlipHeads, + const NvU8 hsMask[NV_MAX_FLIP_REQUEST_HEADS / 8]) +{ + struct NvKmsFlipRequestOneHead *pFlipHead = NULL; + NvU32 i, j; + + pFlipHead = nvAlloc(sizeof(*pFlipHead) * numFlipHeads); + if (pFlipHead == NULL) { + return NULL; + } + + j = 0; + for (i = 0; i < numFlipHeadsOriginal; i++) { + if ((hsMask[i / 8] & NVBIT(i % 8)) == 0) { + pFlipHead[j++] = pFlipHeadOriginal[i]; + } + } + nvAssert(j == numFlipHeads); + + return pFlipHead; +} + +static void HsIoctlAssignSurfacesMaxEyes( + NVSurfaceEvoPtr pSurfaceEvoDst[], + NVSurfaceEvoRec *const pSurfaceEvoSrc[]) +{ + NvU8 eye; + + for (eye = NVKMS_LEFT; eye < NVKMS_MAX_EYES; eye++) { + pSurfaceEvoDst[eye] = pSurfaceEvoSrc[eye]; + } +} + +static NvBool HsIoctlFlipValidateOneHwState( + const NVHsLayerRequestedFlipState *pHwState, + const NvU32 sd) +{ + /* The semaphore surface must have a CPU mapping. */ + + if (!pHwState->syncObject.usingSyncpt) { + if (pHwState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo != NULL) { + if (pHwState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo->cpuAddress[sd] == NULL) { + return FALSE; + } + } + + if (pHwState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo != NULL) { + if (pHwState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo->cpuAddress[sd] == NULL) { + return FALSE; + } + } + } + + return TRUE; +} + +/*! + * Assign NVHsRequestedFlipState. + * + * Return TRUE if the NVHsRequestedFlipState could be assigned and is valid for use by + * headSurface. + */ +static NvBool HsIoctlFlipAssignHwStateOneHead( + NVHsChannelEvoRec *pHsChannel, + NVDevEvoPtr pDevEvo, + const NvU32 sd, + const NvU32 apiHead, + const struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsFlipCommonParams *pParams, + NVHsRequestedFlipState *pFlipState) +{ + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDevConst(pOpenDev); + NvU32 layer; + + nvAssert(pHsChannel != NULL); + + /* Init pFlipState using current pHsChannel state. */ + + nvkms_memset(pFlipState, 0, sizeof(*pFlipState)); + + pFlipState->cursor = pHsChannel->config.cursor; + + pFlipState->viewPortPointIn.x = pHsChannel->config.viewPortIn.x; + pFlipState->viewPortPointIn.y = pHsChannel->config.viewPortIn.y; + + for (layer = 0; layer < pDevEvo->apiHead[apiHead].numLayers; layer++) { + pFlipState->layer[layer] = *HsGetLastFlipQueueEntry(pHsChannel, layer); + } + + /* Apply pParams to pFlipState. */ + if (!nvCheckFlipPermissions(pOpenDev, pDevEvo, sd, apiHead, pParams)) { + return FALSE; + } + + if (pParams->viewPortIn.specified) { + pFlipState->dirty.viewPortPointIn = TRUE; + pFlipState->viewPortPointIn = pParams->viewPortIn.point; + } + + if (pParams->cursor.imageSpecified) { + if (!nvAssignCursorSurface(pOpenDev, pDevEvo, &pParams->cursor.image, + &pFlipState->cursor.pSurfaceEvo)) { + return FALSE; + } + + if (pFlipState->cursor.pSurfaceEvo != NULL) { + pFlipState->cursor.cursorCompParams = + pParams->cursor.image.cursorCompParams; + } + + pFlipState->dirty.cursorSurface = TRUE; + } + + if (pParams->cursor.positionSpecified) { + pFlipState->cursor.x = pParams->cursor.position.x; + pFlipState->cursor.y = pParams->cursor.position.y; + + pFlipState->dirty.cursorPosition = TRUE; + } + + for (layer = 0; layer < pDevEvo->apiHead[apiHead].numLayers; layer++) { + if (pParams->layer[layer].surface.specified) { + NvBool ret = + nvAssignSurfaceArray(pDevEvo, + pOpenDevSurfaceHandles, + pParams->layer[layer].surface.handle, + FALSE /* isUsedByCursorChannel */, + TRUE /* isUsedByLayerChannel */, + pFlipState->layer[layer].pSurfaceEvo); + if (!ret) { + return FALSE; + } + + pFlipState->dirty.layer[layer] = TRUE; + } + + if (pParams->layer[layer].syncObjects.specified) { + NvBool ret; + + if (pParams->layer[layer].syncObjects.val.useSyncpt) { + return FALSE; + } + + nvkms_memset(&pFlipState->layer[layer].syncObject, + 0, + sizeof(pFlipState->layer[layer].syncObject)); + + ret = nvAssignSemaphoreEvoHwState(pDevEvo, + pOpenDevSurfaceHandles, + layer, + sd, + &pParams->layer[layer].syncObjects.val, + &pFlipState->layer[layer].syncObject); + if (!ret) { + return FALSE; + } + + pFlipState->dirty.layer[layer] = TRUE; + } + + if (pParams->layer[layer].completionNotifier.specified && + (pParams->layer[layer].completionNotifier.val.surface.surfaceHandle != 0)) { + + /* + * HeadSurface only supports client notifiers when running in + * swapgroup mode where each flip IOCTL will result in a real + * flip in HW. + */ + if (!pHsChannel->config.neededForSwapGroup) { + return FALSE; + } + + NvBool ret = nvAssignCompletionNotifierEvoHwState( + pDevEvo, + pOpenDevSurfaceHandles, + &pParams->layer[layer].completionNotifier.val, + layer, + &pFlipState->layer[layer].completionNotifier); + if (!ret) { + return FALSE; + } + + pFlipState->dirty.layer[layer] = TRUE; + } + + /* HeadSurface does not support timeStamp flips, yet. */ + if (pParams->layer[layer].timeStamp != 0) { + return FALSE; + } + } + + /* XXX Reject all unhandled flip parameters */ + + /* Validate that the requested changes can be performed by headSurface. */ + for (layer = 0; layer < pDevEvo->apiHead[apiHead].numLayers; layer++) { + if (!pFlipState->dirty.layer[layer]) { + continue; + } + + if (!HsIoctlFlipValidateOneHwState(&pFlipState->layer[layer], sd)) { + return FALSE; + } + } + + return TRUE; +} + +/*! + * Update headSurface for NVKMS_IOCTL_FLIP. + * + * XXX NVKMS HEADSURFACE TODO: handle/validate the rest of the flip request + * structure. + */ +NvBool nvHsIoctlFlip( + NVDevEvoPtr pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsFlipRequestOneHead *pFlipHead, + NvU32 numFlipHeads, + NvBool commit, + struct NvKmsFlipReply *pReply) +{ + NvU32 i; + ct_assert((NV_MAX_FLIP_REQUEST_HEADS % 8) == 0); + NvU8 hsMask[NV_MAX_FLIP_REQUEST_HEADS / 8] = { }; + NvU32 nHsApiHeads = 0; + NvU32 nNonHsApiHeads = 0; + NvBool ret = FALSE; + + struct { + + NVHsRequestedFlipState flipState + [NVKMS_MAX_SUBDEVICES][NVKMS_MAX_HEADS_PER_DISP]; + + } *pWorkArea = NULL; + + /* Take inventory of which heads are touched by the request. */ + + for (i = 0; i < numFlipHeads; i++) { + const NvU32 apiHead = pFlipHead[i].head; + const NvU32 sd = pFlipHead[i].sd; + NVDispEvoPtr pDispEvo = pDevEvo->pDispEvo[sd]; + + if (pDispEvo->pHsChannel[apiHead] == NULL) { + nNonHsApiHeads++; + } else { + hsMask[i / 8] |= NVBIT(i % 8); + nHsApiHeads++; + } + } + nvAssert(numFlipHeads == nNonHsApiHeads + nHsApiHeads); + + /* + * Handle the common case: if there are no headSurface-ful heads touched by + * the request, call down and return. + */ + if (nHsApiHeads == 0) { + ret = nvFlipEvo(pDevEvo, + pOpenDev, + pFlipHead, numFlipHeads, commit, + pReply, + FALSE /* skipUpdate */, + TRUE /* allowFlipLock */); + goto done; + } + + pWorkArea = nvCalloc(1, sizeof(*pWorkArea)); + if (pWorkArea == NULL) { + goto done; + } + + /* + * Assign and validate flipState for any headSurface heads in the + * request. + */ + for (i = 0; i < numFlipHeads; i++) { + const NvU32 apiHead = pFlipHead[i].head; + const NvU32 sd = pFlipHead[i].sd; + NVDispEvoPtr pDispEvo = pDevEvo->pDispEvo[sd]; + + if ((hsMask[i / 8] & NVBIT(i % 8)) == 0) { + continue; + } + + if (!HsIoctlFlipAssignHwStateOneHead( + pDispEvo->pHsChannel[apiHead], + pDevEvo, + sd, + apiHead, + pOpenDev, + &pFlipHead[i].flip, + &pWorkArea->flipState[sd][apiHead])) { + goto done; + } + } + + /* + * If we got this far, we know there are headSurface-ful heads. If there + * are also headSurface-less heads, build a new request structure with the + * headSurface-ful heads removed and call down. + */ + + if (nNonHsApiHeads != 0) { + + NvBool tmp; + struct NvKmsFlipRequestOneHead *pFlipHeadLocal = + HsIoctlRemoveHsHeadsFromNvKmsFlipHead( + pDevEvo, pFlipHead, numFlipHeads, nNonHsApiHeads, hsMask); + + if (pFlipHeadLocal == NULL) { + goto done; + } + + tmp = nvFlipEvo(pDevEvo, + pOpenDev, + pFlipHeadLocal, nNonHsApiHeads, commit, + pReply, + FALSE /* skipUpdate */, + TRUE /* allowFlipLock */); + + // nvFlipEvo filled in pReply for the heads in pFlipHeadLocal. + // Move those replies to the right location for pFlipHead. + // + // Due to how HsIoctlRemoveHsHeadsFromNvKmsFlipHead() created + // pFlipHeadLocal, the entries will be in the same order as the + // original pFlipHead request, but some of the entries have been + // removed so the original array is longer. + // + // Iterate backwards through the local array (headLocal), which points + // to where the reply data was filled in by nvFlipEvo(). + // Keep an index into the original array (headOriginal) which points to + // the entry where the reply *should* be. This should always be >= + // headLocal. + // If the expected location for the reply is not the same as the local + // index, copy the reply to the right location and clear the local data + // (which was in the wrong place). + { + NvS32 headOriginal = numFlipHeads - 1; + NvS32 headLocal; + for (headLocal = nNonHsApiHeads - 1; headLocal >= 0; headLocal--) { + while (pFlipHead[headOriginal].sd != pFlipHeadLocal[headLocal].sd || + pFlipHead[headOriginal].head != pFlipHeadLocal[headLocal].head) { + headOriginal--; + nvAssert(headOriginal >= 0); + } + if (headOriginal != headLocal) { + nvAssert(headOriginal > headLocal); + pReply->flipHead[headOriginal] = pReply->flipHead[headLocal]; + nvkms_memset(&pReply->flipHead[headLocal], 0, + sizeof(pReply->flipHead[headLocal])); + } + } + } + + nvFree(pFlipHeadLocal); + + if (!tmp) { + goto done; + } + } + + /* We cannot fail beyond this point. */ + + ret = TRUE; + + + /* If this is a validation-only request, we are done. */ + + if (!commit) { + goto done; + } + + for (i = 0; i < numFlipHeads; i++) { + const NvU32 apiHead = pFlipHead[i].head; + const NvU32 sd = pFlipHead[i].sd; + NVDispEvoPtr pDispEvo = pDevEvo->pDispEvo[sd]; + + if ((hsMask[i / 8] & NVBIT(i % 8)) == 0) { + continue; + } + + NVHsChannelEvoRec *pHsChannel = pDispEvo->pHsChannel[apiHead]; + const struct NvKmsFlipCommonParams *pParams = + &pFlipHead[i].flip; + NVHsRequestedFlipState *pFlipState = + &pWorkArea->flipState[sd][apiHead]; + + if (!nvApiHeadIsActive(pDispEvo, apiHead)) { + continue; + } + + if (pParams->layer[NVKMS_MAIN_LAYER].skipPendingFlips && + pFlipState->dirty.layer[NVKMS_MAIN_LAYER]) { + nvHsIdleFlipQueue(pHsChannel, TRUE /* force */); + } + } + + /* Finally, update the headSurface-ful heads in the request. */ + + for (i = 0; i < numFlipHeads; i++) { + const NvU32 apiHead = pFlipHead[i].head; + const NvU32 sd = pFlipHead[i].sd; + NVDispEvoPtr pDispEvo = pDevEvo->pDispEvo[sd]; + + if ((hsMask[i / 8] & NVBIT(i % 8)) == 0) { + continue; + } + + const NVHsRequestedFlipState *pFlipState = + &pWorkArea->flipState[sd][apiHead]; + + NVHsChannelEvoRec *pHsChannel = pDispEvo->pHsChannel[apiHead]; + NvU32 layer; + + nvAssert(pHsChannel != NULL); + + if (pFlipState->dirty.cursorPosition) { + HsIoctlMoveCursor( + pHsChannel, + pFlipState->cursor.x, + pFlipState->cursor.y); + } + + if (pFlipState->dirty.cursorSurface) { + HsIoctlSetCursorImage( + pHsChannel, + pFlipState->cursor.pSurfaceEvo); + } + + if (pFlipState->dirty.viewPortPointIn) { + HsIoctlPan(pHsChannel, &pFlipState->viewPortPointIn); + } + + /* + * XXX NVKMS HEADSURFACE TODO: Layers that are specified as part + * of the same NVKMS_IOCTL_FLIP request should be flipped + * atomically. But, layers that are specified separately should + * be allowed to flip separately. Update the headSurface flip + * queue handling to coordinate multi-layer atomic flips. + */ + for (layer = 0; layer < pDevEvo->apiHead[apiHead].numLayers; layer++) { + if (!pFlipState->dirty.layer[layer]) { + continue; + } + + if (layer == NVKMS_MAIN_LAYER) { + HsIoctlAssignSurfacesMaxEyes( + pHsChannel->flipQueueMainLayerState.pSurfaceEvo, + pFlipState->layer[layer].pSurfaceEvo); + } + + nvHsPushFlipQueueEntry(pHsChannel, layer, &pFlipState->layer[layer]); + + if (pHsChannel->config.neededForSwapGroup) { + pHsChannel->swapGroupFlipping = NV_TRUE; + } + } + } + +done: + + nvFree(pWorkArea); + + return ret; +} + diff --git a/src/nvidia-modeset/src/nvkms-headsurface-matrix.c b/src/nvidia-modeset/src/nvkms-headsurface-matrix.c new file mode 100644 index 0000000..6ba69cd --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-headsurface-matrix.c @@ -0,0 +1,661 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "nvkms-headsurface-matrix.h" +#include "nvkms-softfloat.h" +#include "nv-float.h" + +/*! + * Multiply the matrices 'l' and 'r', and return the result. + */ +static struct NvKmsMatrixF32 MultiplyMatrix( + const struct NvKmsMatrixF32 *l, + const struct NvKmsMatrixF32 *r) +{ + struct NvKmsMatrixF32 d = { }; + int dx, dy; + + for (dy = 0; dy < 3; dy++) { + for (dx = 0; dx < 3; dx++) { + float32_t v = NvU32viewAsF32(NV_FLOAT_ZERO); + int o; + for (o = 0; o < 3; o++) { + const float32_t a = f32_mul(l->m[dy][o], r->m[o][dx]); + v = f32_add(v, a); + } + d.m[dy][dx] = v; + } + } + + return d; +} + +/*! + * Generate a matrix that performs the translation described by tx in x and ty + * in y. + */ +static struct NvKmsMatrixF32 GenTranslationMatrix( + float32_t tx, + float32_t ty) +{ + struct NvKmsMatrixF32 ret = { }; + + ret.m[0][0] = NvU32viewAsF32(NV_FLOAT_ONE); + ret.m[0][1] = NvU32viewAsF32(NV_FLOAT_ZERO); + ret.m[0][2] = tx; + ret.m[1][0] = NvU32viewAsF32(NV_FLOAT_ZERO); + ret.m[1][1] = NvU32viewAsF32(NV_FLOAT_ONE); + ret.m[1][2] = ty; + ret.m[2][0] = NvU32viewAsF32(NV_FLOAT_ZERO); + ret.m[2][1] = NvU32viewAsF32(NV_FLOAT_ZERO); + ret.m[2][2] = NvU32viewAsF32(NV_FLOAT_ONE); + + return ret; +} + +/*! + * Generate a matrix that performs the scaling operation described by sx in x + * and sy in y. + */ +static struct NvKmsMatrixF32 GenScaleMatrix( + float32_t sx, + float32_t sy) +{ + struct NvKmsMatrixF32 ret = { }; + + ret.m[0][0] = sx; + ret.m[0][1] = NvU32viewAsF32(NV_FLOAT_ZERO); + ret.m[0][2] = NvU32viewAsF32(NV_FLOAT_ZERO); + ret.m[1][0] = NvU32viewAsF32(NV_FLOAT_ZERO); + ret.m[1][1] = sy; + ret.m[1][2] = NvU32viewAsF32(NV_FLOAT_ZERO); + ret.m[2][0] = NvU32viewAsF32(NV_FLOAT_ZERO); + ret.m[2][1] = NvU32viewAsF32(NV_FLOAT_ZERO); + ret.m[2][2] = NvU32viewAsF32(NV_FLOAT_ONE); + + return ret; +} + +/*! + * Generate a matrix that performs the rotation operation described by cosine + * 'c' and sine 's'. + */ +static struct NvKmsMatrixF32 GenRotationMatrix( + float32_t c, + float32_t s) +{ + struct NvKmsMatrixF32 ret = { }; + const float32_t negOneF32 = NvU32viewAsF32(NV_FLOAT_NEG_ONE); + + ret.m[0][0] = c; + ret.m[0][1] = f32_mul(s, negOneF32); + ret.m[0][2] = NvU32viewAsF32(NV_FLOAT_ZERO); + ret.m[1][0] = s; + ret.m[1][1] = c; + ret.m[1][2] = NvU32viewAsF32(NV_FLOAT_ZERO); + ret.m[2][0] = NvU32viewAsF32(NV_FLOAT_ZERO); + ret.m[2][1] = NvU32viewAsF32(NV_FLOAT_ZERO); + ret.m[2][2] = NvU32viewAsF32(NV_FLOAT_ONE); + + return ret; +} + +/*! + * Generate the identity matrix. + */ +static struct NvKmsMatrixF32 GenIdentityMatrix(void) +{ + return GenScaleMatrix(NvU32viewAsF32(NV_FLOAT_ONE), + NvU32viewAsF32(NV_FLOAT_ONE)); +} + +/*! + * Transform x,y,q with a 3x3 affine transformation matrix (row-major). + */ +static inline void TransformVertex( + const struct NvKmsMatrixF32 *mat, + float32_t *pX, + float32_t *pY, + float32_t *pQ) +{ + const float32_t x_in = *pX; + const float32_t y_in = *pY; + const float32_t oneF32 = NvU32viewAsF32(NV_FLOAT_ONE); + float32_t w, oneOverW, x, y; + + x = F32_AxB_plus_CxD_plus_E(x_in, mat->m[0][0], + y_in, mat->m[0][1], + mat->m[0][2]); + y = F32_AxB_plus_CxD_plus_E(x_in, mat->m[1][0], + y_in, mat->m[1][1], + mat->m[1][2]); + w = F32_AxB_plus_CxD_plus_E(x_in, mat->m[2][0], + y_in, mat->m[2][1], + mat->m[2][2]); + oneOverW = f32_div(oneF32, w); + + x = f32_mul(x, oneOverW); + y = f32_mul(y, oneOverW); + + *pX = x; + *pY = y; + *pQ = oneOverW; +} + +/*! + * Transform pRectIn by the matrix, returning the result in pRectOut. + * + * XXX If we knew the matrix would produce a screen-aligned rectangle, we + * wouldn't need to transform as many points. + * + * XXX We should probably compute the screen-aligned rect inscribed by the + * transformed points, rather than compute the bounding box. + */ +static struct NvKmsRect TransformRect( + const struct NvKmsRect *pRectIn, + const struct NvKmsMatrixF32 *transform) +{ + /* + * Get the four corners of pRectIn: + * + * 0 3 + * +----------+ + * | | + * +----------+ + * 1 2 + */ + struct NvKmsPoint p[4] = { + [0] = { + .x = pRectIn->x, + .y = pRectIn->y, + }, + [1] = { + .x = pRectIn->x, + .y = pRectIn->y + pRectIn->height, + }, + [2] = { + .x = pRectIn->x + pRectIn->width, + .y = pRectIn->y + pRectIn->height, + }, + [3] = { + .x = pRectIn->x + pRectIn->width, + .y = pRectIn->y, + }, + }; + + NvU8 i; + NvU16 minx, maxx, miny, maxy; + struct NvKmsRect rectOut = { }; + + /* Apply the matrix transform to each point. */ + + for (i = 0; i < 4; i++) { + + float32_t x = ui32_to_f32(p[i].x); + float32_t y = ui32_to_f32(p[i].y); + float32_t unused; + + TransformVertex(transform, &x, &y, &unused); + + p[i].x = F32toNvU16(x); + p[i].y = F32toNvU16(y); + } + + /* Compute the screen-aligned bounding box of the transformed points. */ + + minx = p[0].x; + maxx = p[0].x; + miny = p[0].y; + maxy = p[0].y; + + for (i = 1; i < 4; i++) { + minx = NV_MIN(minx, p[i].x); + maxx = NV_MAX(maxx, p[i].x); + miny = NV_MIN(miny, p[i].y); + maxy = NV_MAX(maxy, p[i].y); + } + + rectOut.x = minx; + rectOut.y = miny; + rectOut.width = maxx - minx; + rectOut.height = maxy - miny; + + return rectOut; +} + +/*! + * Apply the rotation described by 'rotation' and 'viewPortOut' to the + * transformation matrix. + * + * \param[in] viewPortOut The viewPortOut region. + * \param[in] transform The current transformation matrix. + * \param[in] rotation The requested screen-aligned rotation. + * + * \return The resulting transformation matrix. + */ +static struct NvKmsMatrixF32 ApplyRotationToMatrix( + const struct NvKmsRect *viewPortOut, + const struct NvKmsMatrixF32 *transform, + enum NvKmsRotation rotation) +{ + const float32_t zeroF32 = NvU32viewAsF32(NV_FLOAT_ZERO); + const float32_t oneF32 = NvU32viewAsF32(NV_FLOAT_ONE); + const float32_t negOneF32 = NvU32viewAsF32(NV_FLOAT_NEG_ONE); + + float32_t f_rot_cos, f_rot_sin, f_rot_dx, f_rot_dy; + float32_t width, height; + + struct NvKmsMatrixF32 m = *transform; + struct NvKmsMatrixF32 tmpMatrix; + + struct NvKmsRect transformedViewPortOut; + + if (rotation == NVKMS_ROTATION_0) { + return m; + } + + transformedViewPortOut = TransformRect(viewPortOut, transform); + + width = ui32_to_f32(transformedViewPortOut.width); + height = ui32_to_f32(transformedViewPortOut.height); + + switch (rotation) { + default: + case NVKMS_ROTATION_90: + f_rot_cos = zeroF32; f_rot_sin = oneF32; + f_rot_dx = height; f_rot_dy = zeroF32; + break; + case NVKMS_ROTATION_180: + f_rot_cos = negOneF32; f_rot_sin = zeroF32; + f_rot_dx = width; f_rot_dy = height; + break; + case NVKMS_ROTATION_270: + f_rot_cos = zeroF32; f_rot_sin = negOneF32; + f_rot_dx = zeroF32; f_rot_dy = width; + break; + } + + tmpMatrix = GenRotationMatrix(f_rot_cos, f_rot_sin); + m = MultiplyMatrix(&tmpMatrix, &m); + + tmpMatrix = GenTranslationMatrix(f_rot_dx, f_rot_dy); + m = MultiplyMatrix(&tmpMatrix, &m); + + return m; +} + +/*! + * Apply the reflection described by 'reflection[XY]', 'rotation' and + * 'viewPortOut' to the transformation matrix. + * + * \param[in] viewPortOut The viewPortOut region. + * \param[in] transform The current transformation matrix. + * \param[in] reflectionX Whether to reflect along the X axis. + * \param[in] reflectionY Whether to reflect along the Y axis. + * + * \return The resulting transformation matrix. + */ +static struct NvKmsMatrixF32 ApplyReflectionToMatrix( + const struct NvKmsRect *viewPortOut, + const struct NvKmsMatrixF32 *transform, + NvBool reflectionX, + NvBool reflectionY) +{ + const float32_t zeroF32 = NvU32viewAsF32(NV_FLOAT_ZERO); + const float32_t oneF32 = NvU32viewAsF32(NV_FLOAT_ONE); + const float32_t negOneF32 = NvU32viewAsF32(NV_FLOAT_NEG_ONE); + + float32_t f_scale_x, f_scale_y, f_scale_dx, f_scale_dy; + float32_t width, height; + + struct NvKmsMatrixF32 m = *transform; + struct NvKmsMatrixF32 tmpMatrix; + + struct NvKmsRect transformedViewPortOut; + + if (!reflectionX && !reflectionY) { + return m; + } + + transformedViewPortOut = TransformRect(viewPortOut, transform); + + width = ui32_to_f32(transformedViewPortOut.width); + height = ui32_to_f32(transformedViewPortOut.height); + + f_scale_x = oneF32; + f_scale_dx = zeroF32; + f_scale_y = oneF32; + f_scale_dy = zeroF32; + + if (reflectionX) { + f_scale_x = negOneF32; + f_scale_dx = width; + } + + if (reflectionY) { + f_scale_y = negOneF32; + f_scale_dy = height; + } + + tmpMatrix = GenScaleMatrix(f_scale_x, f_scale_y); + m = MultiplyMatrix(&tmpMatrix, &m); + + tmpMatrix = GenTranslationMatrix(f_scale_dx, f_scale_dy); + m = MultiplyMatrix(&tmpMatrix, &m); + + return m; +} + +/*! + * Apply the scaling described by 'viewPortIn' and 'viewPortOut' to the + * transformation matrix. + * + * \param[in] viewPortIn The viewPortIn region. + * \param[in] viewPortOut The viewPortOut region. + * \param[in] transform The current transformation matrix. + * + * \return The resulting transformation matrix. + */ +static struct NvKmsMatrixF32 ScaleMatrixForViewPorts( + const struct NvKmsRect *viewPortIn, + const struct NvKmsRect *viewPortOut, + const struct NvKmsMatrixF32 *transform) +{ + const struct NvKmsRect transformedViewPortOut = + TransformRect(viewPortOut, transform); + + const float32_t inWidth = ui32_to_f32(viewPortIn->width); + const float32_t inHeight = ui32_to_f32(viewPortIn->height); + const float32_t outWidth = ui32_to_f32(transformedViewPortOut.width); + const float32_t outHeight = ui32_to_f32(transformedViewPortOut.height); + + struct NvKmsMatrixF32 tmpMatrix; + + const float32_t sx = f32_div(inWidth, outWidth); + const float32_t sy = f32_div(inHeight, outHeight); + + tmpMatrix = GenScaleMatrix(sx, sy); + + return MultiplyMatrix(&tmpMatrix, transform); +} + +/*! + * Translate the matrix for the ViewPortOut position. + * + * When headSurface state == FULL, the headSurface surface is the size of the + * visible region of the mode and headSurface rendering simulates the + * client-requested viewPortOut. Translate the headSurface transformation + * matrix for the ViewPortOut position. + * + * \param[in] state The headSurface state; we only translate for FULL. + * \param[in] viewPortOut The viewPortOut region. + * \param[in] transform The current transformation matrix. + */ +static struct NvKmsMatrixF32 TranslateMatrixForViewPortOut( + const NVHsConfigState state, + const struct NvKmsRect *viewPortOut, + const struct NvKmsMatrixF32 *transform) +{ + if (state != NVKMS_HEAD_SURFACE_CONFIG_STATE_FULL_HEAD_SURFACE) { + return *transform; + } + + const struct NvKmsRect transformedViewPortOut = + TransformRect(viewPortOut, transform); + + const float32_t x = ui32_to_f32(transformedViewPortOut.x); + const float32_t y = ui32_to_f32(transformedViewPortOut.y); + const float32_t negX = f32_mul(x, NvU32viewAsF32(NV_FLOAT_NEG_ONE)); + const float32_t negY = f32_mul(y, NvU32viewAsF32(NV_FLOAT_NEG_ONE)); + + const struct NvKmsMatrixF32 translationMatrix = + GenTranslationMatrix(negX, negY); + + return MultiplyMatrix(&translationMatrix, transform); +} + +/*! + * For pixelShift modes, bloat by x2 (pixelShift takes a 2x2 quad of input + * pixels for each output pixel). + * + * \param[in] transform The current transformation matrix. + * \param[in] pixelShift The pixelShift mode requested. + */ +static struct NvKmsMatrixF32 TransformMatrixForPixelShift( + const struct NvKmsMatrixF32 *transform, + enum NvKmsPixelShiftMode pixelShift) +{ + const float32_t twoF32 = NvU32viewAsF32(NV_FLOAT_TWO); + const struct NvKmsMatrixF32 pixelShiftBloatTransform = + GenScaleMatrix(twoF32, twoF32); + struct NvKmsMatrixF32 m = *transform; + + if (pixelShift == NVKMS_PIXEL_SHIFT_NONE) { + return m; + } + + return MultiplyMatrix(&pixelShiftBloatTransform, &m); +} + +/*! + * Multiply the client-specified transform with the current transform. + * + * \param[in] transform The current transformation matrix. + * \param[in] p The client request parameter structure. + */ +static struct NvKmsMatrixF32 ApplyClientTransformToMatrix( + const struct NvKmsMatrixF32 *transform, + const struct NvKmsSetModeHeadSurfaceParams *p) +{ + const struct NvKmsMatrixF32 clientTransform = + NvKmsMatrixToNvKmsMatrixF32(p->transform); + + nvAssert(p->transformSpecified); + + return MultiplyMatrix(&clientTransform, transform); +} + +static NvBool InvertMatrix( + struct NvKmsMatrixF32 *dst, + const struct NvKmsMatrixF32 *src) +{ + float64_t det; + int i, j; + static int a[3] = { 2, 2, 1 }; + static int b[3] = { 1, 0, 0 }; + const float64_t zeroF64 = i32_to_f64(0); + const float64_t oneF64 = i32_to_f64(1); + + det = zeroF64; + for (i = 0; i < 3; i++) { + float64_t p; + const int ai = a[i]; + const int bi = b[i]; + + float32_t tmp = F32_AxB_minus_CxD(src->m[ai][2], src->m[bi][1], + src->m[ai][1], src->m[bi][2]); + tmp = f32_mul(src->m[i][0], tmp); + + p = f32_to_f64(tmp); + + if (i == 1) { + p = F64_negate(p); + } + + det = f64_add(det, p); + } + + if (f64_eq(det, zeroF64)) { + return FALSE; + } + + det = f64_div(oneF64, det); + + for (j = 0; j < 3; j++) { + for (i = 0; i < 3; i++) { + float64_t p; + const int ai = a[i]; + const int aj = a[j]; + const int bi = b[i]; + const int bj = b[j]; + + const float32_t tmp = + F32_AxB_minus_CxD(src->m[ai][aj], src->m[bi][bj], + src->m[ai][bj], src->m[bi][aj]); + p = f32_to_f64(tmp); + + if (((i + j) & 1) != 0) { + p = F64_negate(p); + } + + p = f64_mul(det, p); + + dst->m[j][i] = f64_to_f32(p); + } + } + + return TRUE; +} + +/*! + * Calculate a warp mesh for this head. + * + * This constructs a simple 4-vertex "mesh" that renders a single quad using a + * triangle strip. The vertices are transformed from the viewPortIn region + * using the inversed of the HS transform to normalized headSurface space. + */ +static NvBool AssignStaticWarpMesh( + NVHsChannelConfig *p) +{ + int i; + + NvHsStaticWarpMesh *swm = &p->staticWarpMesh; + + const float32_t viewPortInWidthF = ui32_to_f32(p->viewPortIn.width); + const float32_t viewPortInHeightF = ui32_to_f32(p->viewPortIn.height); + const float32_t viewPortOutWidthF = ui32_to_f32(p->viewPortOut.width); + const float32_t viewPortOutHeightF = ui32_to_f32(p->viewPortOut.height); + + struct NvKmsMatrixF32 invertedTransform; + + if (!InvertMatrix(&invertedTransform, &p->transform)) { + return FALSE; + } + + swm->vertex[0].x = swm->vertex[0].u = NV_FLOAT_ZERO; + swm->vertex[0].y = swm->vertex[0].v = NV_FLOAT_ZERO; + + swm->vertex[1].x = swm->vertex[1].u = NV_FLOAT_ZERO; + swm->vertex[1].y = swm->vertex[1].v = NV_FLOAT_ONE; + + swm->vertex[2].x = swm->vertex[2].u = NV_FLOAT_ONE; + swm->vertex[2].y = swm->vertex[2].v = NV_FLOAT_ZERO; + + swm->vertex[3].x = swm->vertex[3].u = NV_FLOAT_ONE; + swm->vertex[3].y = swm->vertex[3].v = NV_FLOAT_ONE; + + for (i = 0; i < 4; i++) { + + float32_t x = NvU32viewAsF32(swm->vertex[i].x); + float32_t y = NvU32viewAsF32(swm->vertex[i].y); + float32_t q; + + // Scale to input region + x = f32_mul(x, viewPortInWidthF); + y = f32_mul(y, viewPortInHeightF); + + // Transform + TransformVertex(&invertedTransform, &x, &y, &q); + + // Normalize to output region + x = f32_div(x, viewPortOutWidthF); + y = f32_div(y, viewPortOutHeightF); + + swm->vertex[i].x = F32viewAsNvU32(x); + swm->vertex[i].y = F32viewAsNvU32(y); + swm->vertex[i].q = F32viewAsNvU32(q); + } + + return TRUE; +} + +/*! + * Assign NVHsChannelConfig::transform and NVHsChannelConfig::staticWarpMesh, + * based on the current viewports described in NVHsChannelConfig, and various + * client-requested state in NvKmsSetModeHeadSurfaceParams. + * + * \param[in,out] pChannelConfig The headSurface channel config. + * \param[in] p The NVKMS client headSurface parameters. + * + * \return TRUE if the NVHsChannelConfig fields could be successfully assigned. + * Otherwise, FALSE. + */ +NvBool nvHsAssignTransformMatrix( + NVHsChannelConfig *pChannelConfig, + const struct NvKmsSetModeHeadSurfaceParams *p) +{ + struct NvKmsMatrixF32 transform = GenIdentityMatrix(); + + transform = TranslateMatrixForViewPortOut( + pChannelConfig->state, + &pChannelConfig->viewPortOut, + &transform); + + transform = TransformMatrixForPixelShift( + &transform, + p->pixelShift); + + transform = ApplyRotationToMatrix( + &pChannelConfig->viewPortOut, + &transform, + p->rotation); + + transform = ApplyReflectionToMatrix( + &pChannelConfig->viewPortOut, + &transform, + p->reflectionX, + p->reflectionY); + /* + * We treat client-specified transformation matrices and viewport scaling as + * mutually exclusive: when a client-specified transformation matrix is + * provided, the viewPortIn is already transformed by that matrix (the + * client needed to do that, in order to know the size of the surface). + * Calling ScaleMatrixForViewPorts() in the transformSpecified would + * effectively scale the viewPortIn a second time, which would be incorrect. + */ + if (p->transformSpecified) { + transform = ApplyClientTransformToMatrix( + &transform, p); + } else { + transform = ScaleMatrixForViewPorts( + &pChannelConfig->viewPortIn, + &pChannelConfig->viewPortOut, + &transform); + } + + pChannelConfig->transform = transform; + + return AssignStaticWarpMesh(pChannelConfig); +} + diff --git a/src/nvidia-modeset/src/nvkms-headsurface-swapgroup.c b/src/nvidia-modeset/src/nvkms-headsurface-swapgroup.c new file mode 100644 index 0000000..6f32290 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-headsurface-swapgroup.c @@ -0,0 +1,949 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-headsurface-swapgroup.h" +#include "nvkms-headsurface-config.h" +#include "nvkms-headsurface.h" +#include "nvkms-utils.h" +#include "nvkms-private.h" + +/* + * When headSurface is used to implement a SwapGroup, there are several surfaces + * involved: + * + * +------+ + * | | Client Surface + * +------+ + * | | + * +---+ +---+ screen-aligned blits + * v v + * +------+ +------+ + * |(0) | |(1) | Staging Surfaces + * +------+ +------+ + * | | headSurface transformation + * v v + * +------+ +------+ + * |(0) | |(1) | + * |......| |......| Double-height headSurface surfaces. + * | | | | + * +------+ +------+ + * + * For each frame of headSurface: + * + * - All non-SwapGroup content from the Client Surface is copied to Staging + * Surface (0) (merging new non-SwapGroup content with old SwapGroup content). + * + * - All content is copied from the Client Surface to Staging Surface (1). + * + * - HeadSurface transformation (applying rotation, yuv420, etc) textures from + * Staging Surface (0) and renders to one half of double-height headSurface + * surface (0). + * + * - HeadSurface transformation (applying rotation, yuv420, etc) textures from + * Staging Surface (1) and renders to one half of double-height headSurface + * surface (1). + * + * - We use ViewPortIn to flip between halves of the double-height headSurface + * surfaces. + * + * - We use flip-locked flips to flip between the headSurface surfaces (0) and + * (1). + * + * For performance reasons the following optimizations are in place to + * reduce blitting overhead: + * + * - If Swapgroup content is full-screen we skip the Staging Surface + * entirely. There are no regions of Swapgroup and non-Swapgroup content + * that need to be glued together in the Staging Surface: we will only + * ever source updated content from the swapgroup client. We can also + * render transformed frames without the Staging Surface in this case. + * + * - With full-screen Swapgroup content we can also skip REQUEST_TYPE_VBLANK + * frames as there can't possibly be any non-Swapgroup content to be + * updated. We do have to keep rendering continuously though to update the + * mouse cursor, overlays, and other individual artifacts. We thus skip + * REQUEST_TYPE_VBLANK frames if the client keeps actively generating + * REQUEST_TYPE_SWAP_GROUP_READY frames. However, if the client pauses + * itself we fall back to rendering REQUEST_TYPE_VBLANK frames for cursor + * and other updates at vsync rate. + */ + +static void FlipSwapGroup( + NVDevEvoPtr pDevEvo, + NVSwapGroupRec *pSwapGroup); + +/*! + * Update whether this pSwapGroup needs headSurface. + * + * \param[in] pDevEvo The device the SwapGroup is on. + * \param[in] pSwapGroup The SwapGroup to update. + * \param[in] needed Whether headSurface is needed for this SwapGroup. + * + * The usage here is similar to what is done at modeset time: create an + * NVHsConfig and initialize it to describe the headSurface configuration + * required across the device. Allocate the resources for the NVHsConfig and + * apply them to the pDevEvo. + * + * \return Return whether the headSurface needed state could be successfully + * updated. + */ +static NvBool HsSwapGroupUpdateHeadSurfaceNeeded( + NVDevEvoPtr pDevEvo, + const NVSwapGroupRec *pSwapGroup, + const NvBool needed) +{ + NvBool ret = FALSE; + NVHsConfig *pHsConfig = nvCalloc(1, sizeof(*pHsConfig)); + + if (pHsConfig == NULL) { + goto done; + } + + nvHsConfigInitSwapGroup(pDevEvo, pSwapGroup, needed, pHsConfig); + + if (!nvHsConfigAllocResources(pDevEvo, pHsConfig)) { + goto done; + } + + /* + * XXX NVKMS HEADSURFACE TODO: validate the pHsConfig. If we fail + * validation, downgrade and try again. Repeat until we either pass + * validation or downgrading fails and we need to give up. + */ + + /* we cannot fail beyond this point */ + + nvHsConfigStop(pDevEvo, pHsConfig); + + nvHsConfigStart(pDevEvo, pHsConfig); + + nvHsConfigFreeResources(pDevEvo, pHsConfig); + + ret = TRUE; + +done: + nvFree(pHsConfig); + + return ret; +} + + +/*! + * Return whether there is a non-zero number of SwapGroup members for the + * specified device, disp, head. + */ +NvBool nvHsSwapGroupIsHeadSurfaceNeeded( + const NVDispEvoRec *pDispEvo, + const NvU32 apiHead) +{ + const NVSwapGroupRec *pSwapGroup = pDispEvo->pSwapGroup[apiHead]; + + if (pSwapGroup == NULL) { + return FALSE; + } + + return pSwapGroup->nMembers > 0; +} + + +/* + * We should only write to the NvKmsDeferredRequestFifo::semaphore[] element + * that the client requested. When the NVDeferredRequestFifoRec is not ready, + * store an invalid semaphore index value, so that bad indexing is obvious. + */ +#define INVALID_SEMAPHORE_INDEX 0xFFFFFFFF + + +/*! + * Return whether all current members of the SwapGroup are ready to swap. + */ +static NvBool SwapGroupIsReady(const NVSwapGroupRec *pSwapGroup) +{ + /* + * As an optimization, we maintain nMembers and nMembersReady so that we can + * quickly check if all members are ready. + * + * If a member joins while a swapgroup has a pending flip, it is put in a + * pendingJoined state, added to nMembersPendingJoined, and not counted + * here. + */ + NvBool ret = (pSwapGroup->nMembers - pSwapGroup->nMembersPendingJoined) == + pSwapGroup->nMembersReady; + + /* This should only be called if a swap group hasn't been zombified. */ + nvAssert(!pSwapGroup->zombie); + +#if defined(DEBUG) + /* + * Assert that bookkeeping matches between + * pDeferredRequestFifo->swapGroup.ready, + * pDeferredRequestFifo->swapGroup.pendingJoined, and nMembersReady. + */ + NvBool assertRet = TRUE; + + const NVDeferredRequestFifoRec *pDeferredRequestFifo; + + FOR_ALL_DEFERRED_REQUEST_FIFOS_IN_SWAP_GROUP(pSwapGroup, + pDeferredRequestFifo) { + + nvAssert(pDeferredRequestFifo->swapGroup.pSwapGroup == pSwapGroup); + if (!pDeferredRequestFifo->swapGroup.ready && + !pDeferredRequestFifo->swapGroup.pendingJoined) { + assertRet = FALSE; + break; + } + } + + nvAssert(assertRet == ret); +#endif + + return ret; +} + + +/*! + * Release the pDeferredRequestFifo member of the SwapGroup. + */ +static void ReleaseDeferredRequestFifo( + NVDeferredRequestFifoRec *pDeferredRequestFifo) +{ + const NvGpuSemaphore semReadyValue = { + .data[0] = NVKMS_DEFERRED_REQUEST_SEMAPHORE_VALUE_SWAP_GROUP_READY, + }; + + const NvU32 semIndex = pDeferredRequestFifo->swapGroup.semaphoreIndex; + struct NvKmsDeferredRequestFifo *pFifo = pDeferredRequestFifo->fifo; + NVSwapGroupRec *pSwapGroup = pDeferredRequestFifo->swapGroup.pSwapGroup; + + /* This should only be called if the member is ready. */ + nvAssert(pDeferredRequestFifo->swapGroup.ready); + + /* + * This shouldn't be called if a new member has joined and submitted + * swapready while the swap group had an outstanding flip (i.e. + * pendingJoined is true). In that case: + * + * - nvHsSwapGroupReady() will mark this member as pendingReady + * - When the flip completes, nvHsSwapGroupRelease will release all members + * that were present when the flip was kicked off, and promote + * pendingJoined/pendingReady members to joined/ready + * - When the original members submit another ready (nvHsSwapGroupReady), + * the last non-ready original member leaves (nvHsLeaveSwapGroup), or + * the last non-pendingReady member leaves between kicking off a flip + * and the first vblank after the flip completes (nvHsSwapGroupRelease), + * a new flip is kicked off. + */ + nvAssert(!pDeferredRequestFifo->swapGroup.pendingJoined && + !pDeferredRequestFifo->swapGroup.pendingReady); + + if (semIndex < ARRAY_LEN(pFifo->semaphore)) { + pFifo->semaphore[semIndex] = semReadyValue; + + nvSendUnicastEvent(pDeferredRequestFifo->swapGroup.pOpenUnicastEvent); + + } else { + + /* + * The semaphoreIndex is validated in nvHsSwapGroupReady() before + * assignment, so it should always be valid by the time we get here. + */ + nvAssert(!"Invalid semIndex"); + } + + pDeferredRequestFifo->swapGroup.ready = FALSE; + pDeferredRequestFifo->swapGroup.semaphoreIndex = INVALID_SEMAPHORE_INDEX; + + /* + * This may be called if a deferred request fifo entry is being processed + * after its associated swap group has been freed, in which case, + * pDeferredRequestFifo->swapGroup.pSwapGroup will be NULL. + */ + if (pSwapGroup) { + nvAssert(!pSwapGroup->zombie); + nvAssert(pSwapGroup->nMembersReady > 0); + pSwapGroup->nMembersReady--; + } +} + + +/*! + * Release all members of the SwapGroup. + */ +void nvHsSwapGroupRelease( + NVDevEvoPtr pDevEvo, + NVSwapGroupRec *pSwapGroup) +{ + NVDeferredRequestFifoRec *pDeferredRequestFifo; + NvBool readiedFifos = FALSE; + + /* This should only be called if a swap group hasn't been zombified. */ + nvAssert(!pSwapGroup->zombie); + + FOR_ALL_DEFERRED_REQUEST_FIFOS_IN_SWAP_GROUP(pSwapGroup, + pDeferredRequestFifo) { + if (!pDeferredRequestFifo->swapGroup.pendingJoined) { + ReleaseDeferredRequestFifo(pDeferredRequestFifo); + } + } + + nvAssert(pSwapGroup->nMembersReady == 0); + + pSwapGroup->pendingFlip = FALSE; + + /* + * If a new member joins or submits swap ready between FlipSwapGroup() + * and nvHsSwapGroupRelease, it enters the pendingJoined and pendingReady + * state to avoid changing swap group state while a flip is in flight. + * + * Now that the flip has completed, promote these members to fully joined/ + * fully ready. + */ + if (pSwapGroup->nMembersPendingJoined != 0) { + FOR_ALL_DEFERRED_REQUEST_FIFOS_IN_SWAP_GROUP(pSwapGroup, + pDeferredRequestFifo) { + if (pDeferredRequestFifo->swapGroup.pendingJoined) { + pDeferredRequestFifo->swapGroup.pendingJoined = FALSE; + nvAssert(pSwapGroup->nMembersPendingJoined > 0); + pSwapGroup->nMembersPendingJoined--; + } + + if (pDeferredRequestFifo->swapGroup.pendingReady) { + pDeferredRequestFifo->swapGroup.pendingReady = FALSE; + pDeferredRequestFifo->swapGroup.ready = TRUE; + pSwapGroup->nMembersReady++; + readiedFifos = TRUE; + } + } + + nvAssert(pSwapGroup->nMembersPendingJoined == 0); + } + + /* + * If any pending joined fifos submitted SWAP_READY while a flip was in + * flight, they were promoted from pending ready to ready above; if the + * fifos that were previously joined to the swap group left while their + * last flip was in flight, then we need to kick off a new flip for the + * previously pending ready fifos now. + */ + if (readiedFifos && SwapGroupIsReady(pSwapGroup)) { + FlipSwapGroup(pDevEvo, pSwapGroup); + } +} + + +/*! + * Enqueue a headSurface flip on all heads of the pSwapGroup. + * + * Now that the specified pSwapGroup is ready, call + * nvHsNextFrame(SWAP_GROUP_READY) for all active heads in the pSwapGroup. This + * will render a new frame of headSurface, including the now complete SwapGroup + * content, and kick off a flip to the new frame. The flip will only complete + * once the entire SwapBarrier is ready. + * + * Once headSurface sees that these flips completed, it will call + * nvHsSwapGroupRelease() to release all SwapGroup members. + */ +static void FlipSwapGroup( + NVDevEvoPtr pDevEvo, + NVSwapGroupRec *pSwapGroup) +{ + NVHsDeviceEvoPtr pHsDevice = pDevEvo->pHsDevice; + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + NvBool waitForFlip = FALSE; + + /* + * We should never kick off a new flip before the previous flip has + * completed and the swap group has been released. + */ + nvAssert(!pSwapGroup->pendingFlip); + + pSwapGroup->pendingFlip = TRUE; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 apiHead; + for (apiHead = 0; apiHead < ARRAY_LEN(pDispEvo->pSwapGroup); apiHead++) { + if (pDispEvo->pSwapGroup[apiHead] == pSwapGroup) { + NVHsChannelEvoPtr pHsChannel = pDispEvo->pHsChannel[apiHead]; + + if (pHsChannel == NULL) { + continue; + } + + nvHsNextFrame(pHsDevice, pHsChannel, + NV_HS_NEXT_FRAME_REQUEST_TYPE_SWAP_GROUP_READY); + waitForFlip = TRUE; + } + } + } + + /* + * If there are no active heads in the pSwapGroup, then there are no flips + * to wait for: release the SwapGroup now. + */ + if (!waitForFlip) { + nvHsSwapGroupRelease(pDevEvo, pSwapGroup); + } +} + + +/*! + * Check that the NvKmsSwapGroupConfig is valid for the given pDevEvo. + */ +static NvBool HsSwapGroupValidateConfig( + const NVDevEvoRec *pDevEvo, + const struct NvKmsSwapGroupConfig *pConfig) +{ + const NvU32 validHeadMask = NVBIT(pDevEvo->numApiHeads) - 1; + NvU32 dispIndex; + + for (dispIndex = 0; dispIndex < ARRAY_LEN(pConfig->disp); dispIndex++) { + + if (pConfig->disp[dispIndex].headMask == 0) { + continue; + } + + /* Fail if the config describes disps not present on the pDevEvo. */ + + if (dispIndex >= pDevEvo->nDispEvo) { + return FALSE; + } + + /* Fail if the config describes heads not present on the disp. */ + + if ((pConfig->disp[dispIndex].headMask & ~validHeadMask) != 0) { + return FALSE; + } + } + + return TRUE; +} + +/*! + * Allocate a SwapGroup. + * + * This validates that the requested configuration is valid, and adds the + * SwapGroup to pDevEvo's list of SwapGroups. + */ +NVSwapGroupRec* nvHsAllocSwapGroup( + NVDevEvoPtr pDevEvo, + const struct NvKmsAllocSwapGroupRequest *pRequest) +{ + NVSwapGroupRec *pSwapGroup; + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + + if (!HsSwapGroupValidateConfig(pDevEvo, &pRequest->config)) { + return NULL; + } + + /* Are there heads requested that already belong to another SwapGroup? */ + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 apiHead; + FOR_ALL_HEADS(apiHead, pRequest->config.disp[dispIndex].headMask) { + if (pDispEvo->pSwapGroup[apiHead] != NULL) { + return NULL; + } + } + } + + pSwapGroup = nvCalloc(1, sizeof(*pSwapGroup)); + + if (pSwapGroup == NULL) { + return NULL; + } + + if (!nvHsIncrementSwapGroupRefCnt(pSwapGroup)) { + nvFree(pSwapGroup); + return NULL; + } + + nvListInit(&pSwapGroup->deferredRequestFifoList); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 apiHead; + FOR_ALL_HEADS(apiHead, pRequest->config.disp[dispIndex].headMask) { + pDispEvo->pSwapGroup[apiHead] = pSwapGroup; + } + } + + return pSwapGroup; +} + + +/*! + * Returns a swap group with the given handle from the given set of swap group + * handles. + * + * Does not return NULL if pSwapGroup->zombie is TRUE; the only operation we + * want to perform on a swap group that has already been freed but hasn't lost + * all of its references yet is NVKMS_IOCTL_RELEASE_SWAP_GROUP; all other + * callers should use nvHsGetSwapGroup. + */ +NVSwapGroupRec *nvHsGetSwapGroupStruct( + const NVEvoApiHandlesRec *pEvoApiHandles, + NvKmsSwapGroupHandle handle) +{ + return nvEvoGetPointerFromApiHandle(pEvoApiHandles, handle); +} + + +/*! + * Returns a swap group with the given handle from the given set of swap group + * handles, or NULL if the swap group is in the "zombie" state. + * + * A swap group is in the "zombie" state if nvHsFreeSwapGroup() has removed it + * from pDevEvo->swapGroupList, removed its deferred request fifos from + * pSwapGroup->deferredRequestFifoList, and freed its clip list, but its + * reference count is nonzero; in that case, most operations on that swap group + * should call this function and behave as if that swap group no longer exists. + */ +NVSwapGroupRec *nvHsGetSwapGroup( + const NVEvoApiHandlesRec *pEvoApiHandles, + NvKmsSwapGroupHandle handle) +{ + NVSwapGroupRec *pSwapGroup = nvHsGetSwapGroupStruct(pEvoApiHandles, handle); + + if (pSwapGroup == NULL) { + return NULL; + } + + if (pSwapGroup->zombie) { + return NULL; + } + + return pSwapGroup; +} + + +/*! + * Increment the swap group's reference count, failing if refCnt is already + * NV_U64_MAX. + */ +NvBool nvHsIncrementSwapGroupRefCnt(NVSwapGroupPtr pSwapGroup) +{ + if (pSwapGroup->refCnt == NV_U64_MAX) { + return FALSE; + } + + pSwapGroup->refCnt++; + + return TRUE; +} + + +/*! + * Decrement the swap group's reference count, and free it if there are no more + * references to it. + */ +void nvHsDecrementSwapGroupRefCnt(NVSwapGroupPtr pSwapGroup) +{ + nvAssert(pSwapGroup->refCnt >= 1); + pSwapGroup->refCnt--; + + if (pSwapGroup->refCnt == 0) { + nvFree(pSwapGroup); + } +} + + +/*! + * Free the SwapGroup. + * + * - Make any NVDeferredRequestFifoRec members implicitly leave the SwapGroup. + * + * - Remove the SwapGroup from pDevEvo's list of SwapGroups. + * + * - Free the SwapGroup's pClipList + * + * - Mark the SwapGroup as a "zombie" and decrement its reference count. If + * this removes the last reference to the swap group, free it immediately. + * Otherwise, any remaining references to the swap group need to handle + * the "zombie" swap group correctly. + */ +void nvHsFreeSwapGroup( + NVDevEvoPtr pDevEvo, + NVSwapGroupRec *pSwapGroup) +{ + NVDeferredRequestFifoRec *pDeferredRequestFifo; + NVDeferredRequestFifoRec *pDeferredRequestFifoTmp; + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + + /* + * Be careful to use the "_safe" loop macro, because nvHsLeaveSwapGroup() + * will remove pDeferredRequestFifo from + * pSwapGroup->deferredRequestFifoList. + */ + nvListForEachEntry_safe(pDeferredRequestFifo, + pDeferredRequestFifoTmp, + &pSwapGroup->deferredRequestFifoList, + swapGroup.deferredRequestFifoListEntry) { + + nvHsLeaveSwapGroup(pDevEvo, pDeferredRequestFifo, TRUE /* teardown */); + } + + nvAssert(pSwapGroup->nMembers == 0); + nvAssert(pSwapGroup->nMembersReady == 0); + + /* + * XXX NVKMS HEADSURFACE TODO: Shutdown headSurface, if this SwapGroup was + * forcing headSurface on. + */ + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 apiHead; + for (apiHead = 0; apiHead < ARRAY_LEN(pDispEvo->pSwapGroup); apiHead++) { + if (pDispEvo->pSwapGroup[apiHead] == pSwapGroup) { + pDispEvo->pSwapGroup[apiHead] = NULL; + } + } + } + + nvFree(pSwapGroup->pClipList); + pSwapGroup->pClipList = NULL; + + pSwapGroup->zombie = TRUE; + nvHsDecrementSwapGroupRefCnt(pSwapGroup); +} + + +/*! + * Given an array of {deferred request fifo, swapgroup} tuples, join each + * deferred request fifo to its corresponding swapgroup. If any join causes + * a headsurface transition which fails, clean up by undoing any headsurface + * transitions which succeeded. + */ +NvBool nvHsJoinSwapGroup( + NVHsJoinSwapGroupWorkArea *joinSwapGroupWorkArea, + NvU32 numHandles, + NvBool pendingJoin) +{ + NvU32 i; + + for (i = 0; i < numHandles; i++) { + NVDevEvoPtr pDevEvo = joinSwapGroupWorkArea[i].pDevEvo; + NVSwapGroupRec *pSwapGroup = joinSwapGroupWorkArea[i].pSwapGroup; + NVDeferredRequestFifoRec *pDeferredRequestFifo = + joinSwapGroupWorkArea[i].pDeferredRequestFifo; + + /* + * If we are transitioning from 0 to 1 nMembers, change the "needed" state + * of headSurface. + */ + if (pSwapGroup->nMembers == 0) { + /* + * pendingJoin should only be true if a client joins a + * pDeferredRequestFifo to a SwapGroup while a SwapGroup flip is + * pending on any SwapGroup that is being joined as part of this + * collective join; in that case, there must already be at least + * one fifo joined to each SwapGroup this client is joining, or we + * may do a headsurface transition with a SwapGroup flip pending. + */ + nvAssert(!pendingJoin); + + if (!HsSwapGroupUpdateHeadSurfaceNeeded(pDevEvo, pSwapGroup, TRUE)) { + goto fail; + } + + /* + * Keep track of all the swapgroups which have successfully caused + * a headsurface transition, so we can disable headsurface on them + * if we fail to enable headsurface on a subsequent entry. + */ + joinSwapGroupWorkArea[i].enabledHeadSurface = TRUE; + } + + /* This should only be called if a swap group hasn't been zombified. */ + nvAssert(!pSwapGroup->zombie); + + pSwapGroup->nMembers++; + + nvkms_memset(&pDeferredRequestFifo->swapGroup, 0, + sizeof(pDeferredRequestFifo->swapGroup)); + + if (pendingJoin) { + pDeferredRequestFifo->swapGroup.pendingJoined = TRUE; + pSwapGroup->nMembersPendingJoined++; + } + + pDeferredRequestFifo->swapGroup.pSwapGroup = pSwapGroup; + pDeferredRequestFifo->swapGroup.semaphoreIndex = INVALID_SEMAPHORE_INDEX; + + nvListAppend(&pDeferredRequestFifo->swapGroup.deferredRequestFifoListEntry, + &pSwapGroup->deferredRequestFifoList); + } + + return TRUE; + +fail: + /* + * Enabling headsurface for one of the swapgroups in this request failed; + * undo any successful headsurface enablements that happened earlier. + */ + for (i = 0; i < numHandles; i++) { + if (joinSwapGroupWorkArea[i].enabledHeadSurface) { + NVDevEvoPtr pDevEvo = joinSwapGroupWorkArea[i].pDevEvo; + NVSwapGroupRec *pSwapGroup = joinSwapGroupWorkArea[i].pSwapGroup; + if (!HsSwapGroupUpdateHeadSurfaceNeeded(pDevEvo, pSwapGroup, FALSE)) { + nvAssert(!"Failed nvHsJoinSwapGroup cleanup."); + } + } + } + + return FALSE; +} + + +/*! + * Remove the pDeferredRequestFifo from the SwapGroup. + */ +void nvHsLeaveSwapGroup( + NVDevEvoPtr pDevEvo, + NVDeferredRequestFifoRec *pDeferredRequestFifo, + NvBool teardown) +{ + NVSwapGroupRec *pSwapGroup = pDeferredRequestFifo->swapGroup.pSwapGroup; + NvBool removingReadyFifo = FALSE; + + if (pSwapGroup == NULL) { + return; + } + + /* This should only be called if a swap group hasn't been zombified. */ + nvAssert(!pSwapGroup->zombie); + + /* + * If the last member of the SwapGroup is leaving, change the "needed" state + * of headSurface. + */ + if (pSwapGroup->nMembers == 1) { + if (!HsSwapGroupUpdateHeadSurfaceNeeded(pDevEvo, pSwapGroup, FALSE)) { + nvAssert(!"Failed to transition out of headSurface"); + /* XXX NVKMS HEADSURFACE TODO: we need to do something here... */ + } + } + + nvListDel(&pDeferredRequestFifo->swapGroup.deferredRequestFifoListEntry); + pDeferredRequestFifo->swapGroup.pSwapGroup = NULL; + + nvRemoveUnicastEvent(pDeferredRequestFifo->swapGroup.pOpenUnicastEvent); + + if (pDeferredRequestFifo->swapGroup.ready) { + nvAssert(pSwapGroup->nMembersReady > 0); + removingReadyFifo = TRUE; + pSwapGroup->nMembersReady--; + } + + nvAssert(pSwapGroup->nMembers > 0); + pSwapGroup->nMembers--; + + /* + * Release the SwapGroup if this member was the only unready member of the + * SwapGroup. + * + * We only want to do this if we're not in the process of removing + * every member from the swap group (i.e. while the swap group is being + * freed), since this may trigger a flip depending on the order in which + * the deferred request fifos leave the swap group if some members are + * ready and some aren't. + * + * In addition, we only want to do this if the member we're removing was + * the last member preventing the SwapGroup from flipping; otherwise, we + * may kick off a redundant flip here if a member leaves between the swap + * group kicking off a flip and a subsequent vblank or headsurface + * transition releasing the swapgroup. + */ + if (!teardown && !removingReadyFifo && (pSwapGroup->nMembers != 0)) { + if (SwapGroupIsReady(pSwapGroup)) { + FlipSwapGroup(pDevEvo, pSwapGroup); + } + } +} + + +/*! + * Update the clip list of the SwapGroup. + * + * \param[in] pDevEvo The device the SwapGroup is on. + * \param[in] pSwapGroup The SwapGroup to modify. + * \param[in] nClips The number of NvKmsRects in pClipList. + * \param[in] pClipList The array of NvKmsRects. This is dynamically + * allocated by the caller. + * + * \return Return whether the clip list was updated. If this returns TRUE, + * then pSwapGroup takes responsibility for freeing pClipList (either + * when the next clip list update occurs, or when freeing the + * pSwapGroup). + */ +NvBool nvHsSetSwapGroupClipList( + NVDevEvoPtr pDevEvo, + NVSwapGroupRec *pSwapGroup, + const NvU16 nClips, + struct NvKmsRect *pClipList) +{ + NvU16 i; + + /* + * TODO: + * + * - If clip list is transitioning from empty to non-empty, allocate + * headSurface SwapGroup resources. + * + * - If clip list is transitioning from non-empty to empty, free headSurface + * SwapGroup resources. + */ + + /* Reject the clip list if any of rects overflow NvU16. */ + + for (i = 0; i < nClips; i++) { + + if (A_plus_B_greater_than_C_U16(pClipList[i].x, + pClipList[i].width, + NV_U16_MAX)) { + return FALSE; + } + + if (A_plus_B_greater_than_C_U16(pClipList[i].y, + pClipList[i].height, + NV_U16_MAX)) { + return FALSE; + } + } + + nvFree(pSwapGroup->pClipList); + + pSwapGroup->pClipList = pClipList; + pSwapGroup->nClips = nClips; + + /* The cliplists we receive here originate straight from + * UpdateSwapGroupClipList() in nvx_clip.c. The clips are exclusive in + * nature, i.e. they describe areas without swapgroup content. The + * cliplists come constructed depending on the screen content as + * follows: + * + * 1) No swapgroup content at all: NULL cliplist + * 2) Swapgroup and non-swapgroup content: cliplist contains regions + * covering all the non-swapgroup areas + * 3) Only swapgroup content: a cliplist of length 1 containing an empty + * clip region. This is what we detect and cache here. + */ + pSwapGroup->swapGroupIsFullscreen = (pSwapGroup->nClips == 1 && + pSwapGroup->pClipList[0].x == 0 && + pSwapGroup->pClipList[0].y == 0 && + pSwapGroup->pClipList[0].width == 0 && + pSwapGroup->pClipList[0].height == 0); + + return TRUE; +} + + +/*! + * Mark the pDeferredRequestFifo SwapGroup member as ready. + */ +void nvHsSwapGroupReady( + NVDevEvoPtr pDevEvo, + NVDeferredRequestFifoRec *pDeferredRequestFifo, + const NvU32 request) +{ + const NvU32 semaphoreIndex = + DRF_VAL(KMS, _DEFERRED_REQUEST, _SEMAPHORE_INDEX, request); + const NvU32 perEyeStereo = + DRF_VAL(KMS, _DEFERRED_REQUEST, + _SWAP_GROUP_READY_PER_EYE_STEREO, request); + + NVSwapGroupRec *pSwapGroup = pDeferredRequestFifo->swapGroup.pSwapGroup; + + if (semaphoreIndex >= NVKMS_MAX_DEFERRED_REQUESTS) { + return; + } + + /* + * Duplicate execution of a deferred request fifo entry likely indicates + * a poorly behaved client, so assert that this semaphore index hasn't + * transitioned yet. + */ + nvAssert(pDeferredRequestFifo->fifo->semaphore[semaphoreIndex].data[0] != + NVKMS_DEFERRED_REQUEST_SEMAPHORE_VALUE_SWAP_GROUP_READY); + + pDeferredRequestFifo->swapGroup.semaphoreIndex = semaphoreIndex; + + if (pDeferredRequestFifo->swapGroup.pendingJoined) { + /* + * This deferred request fifo joined and was marked ready between its + * swap group kicking off a flip and that flip completing. Mark it + * as pendingReady now, and it will be promoted to ready once the + * swap group is released. + */ + pDeferredRequestFifo->swapGroup.pendingReady = TRUE; + return; + } + + pDeferredRequestFifo->swapGroup.ready = TRUE; + pDeferredRequestFifo->swapGroup.perEyeStereo = + perEyeStereo == NVKMS_DEFERRED_REQUEST_SWAP_GROUP_READY_PER_EYE_STEREO_PER_EYE; + + /* + * This may be called if a deferred request fifo entry is being processed + * after its associated swap group has been freed, in which case, + * pDeferredRequestFifo->swapGroup.pSwapGroup will be NULL and we should + * release the deferred request fifo. + */ + if (pSwapGroup == NULL) { + ReleaseDeferredRequestFifo(pDeferredRequestFifo); + return; + } + + nvAssert(pSwapGroup->nMembersReady < NV_U32_MAX); + pSwapGroup->nMembersReady++; + nvAssert(pSwapGroup->nMembersReady <= pSwapGroup->nMembers); + + /* Kick off a SwapGroup flip when all members are ready. */ + if (SwapGroupIsReady(pSwapGroup)) { + FlipSwapGroup(pDevEvo, pSwapGroup); + } +} + +/*! + * Return the reconciled perEyeStereo setting across all deferred request fifos + * joined to this SwapGroup. + * + * If any deferred request fifo wants per-eye presentation (perEyeStereo == + * TRUE), return TRUE for the entire SwapGroup. Otherwise, return FALSE (i.e., + * per-pair presentation). + */ +NvBool nvHsSwapGroupGetPerEyeStereo( + const NVSwapGroupRec *pSwapGroup) +{ + const NVDeferredRequestFifoRec *pDeferredRequestFifo; + + FOR_ALL_DEFERRED_REQUEST_FIFOS_IN_SWAP_GROUP(pSwapGroup, + pDeferredRequestFifo) { + if (pDeferredRequestFifo->swapGroup.perEyeStereo) { + return TRUE; + } + } + + return FALSE; +} diff --git a/src/nvidia-modeset/src/nvkms-headsurface.c b/src/nvidia-modeset/src/nvkms-headsurface.c new file mode 100644 index 0000000..659a56f --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-headsurface.c @@ -0,0 +1,2975 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-types.h" +#include "nvkms-headsurface.h" +#include "nvkms-headsurface-3d.h" +#include "nvkms-headsurface-priv.h" +#include "nvkms-headsurface-swapgroup.h" +#include "nvkms-utils.h" +#include "nvkms-rmapi.h" +#include "nvkms-surface.h" +#include "nvkms-sync.h" +#include "nvkms-flip.h" +#include "nvkms-private.h" +#include "nvkms-evo.h" +#include "nvkms-dma.h" +#include "nvkms-modeset.h" +#include "nvkms-rm.h" + +#include /* NV01_MEMORY_LOCAL_USER */ +#include /* NV01_MEMORY_SYSTEM */ +#include +#include /* MAXWELL_CHANNEL_GPFIFO_A */ + +static NvBool AllocNotifiers(NVHsDeviceEvoRec *pHsDevice); +static void FreeNotifiers(NVHsDeviceEvoRec *pHsDevice); +static void HsProcFsRecordFullscreenSgFrames(NVHsChannelEvoPtr pHsChannel, + NvBool isFullscreen); + +static NvU32 GetLog2GobsPerBlockY(NvU32 height) +{ + NvU32 log2GobsPerBlockY = 4; // 16 gobs/block + + const NvU64 heightAndOneHalf = (NvU64)height + ((NvU64)height/2ULL); + const NvU64 nvFermiBlockLinearGobHeight = NVKMS_BLOCK_LINEAR_GOB_HEIGHT; + + // If we're wasting too much memory, cap the block height + while ((log2GobsPerBlockY > 0U) && + (((nvFermiBlockLinearGobHeight * ((NvU64)1ULL << log2GobsPerBlockY))) > + heightAndOneHalf)) { + log2GobsPerBlockY--; + } + + // If there is more than one gob per block, + if (log2GobsPerBlockY > 0U) { + + // Proposed shrunk block size. + // compute a new proposedBlockSize, based on a gob size that is half + // of the current value (log2 - 1). the "if(log2 > 0)" above keeps this + // value always ">= 0". + NvU32 proposedBlockSize = + NVKMS_BLOCK_LINEAR_GOB_HEIGHT << (log2GobsPerBlockY - 1U); + + // While the proposedBlockSize is greater than the image size, + while (proposedBlockSize >= height) { + // It's safe to cut the gobs per block in half. + --log2GobsPerBlockY; + + // If we've hit 1 gob per block, stop. + if (log2GobsPerBlockY == 0U) { + break; + } + // Otherwise, divide the proposed block dimension/size by two. + proposedBlockSize /= 2U; + } + } + + return log2GobsPerBlockY; +} + +static void GetLog2GobsPerBlock( + NvU32 bytesPerPixel, + NvU32 widthInPixels, + NvU32 heightInPixels, + NvU32 *pLog2GobsPerBlockY, + NvU32 *pitchInBlocks, + NvU64 *sizeInBytes) +{ + NvU32 xAlign, yAlign, pitchInBytes, lines; + + NvU32 log2GobsPerBlockY = GetLog2GobsPerBlockY(heightInPixels); + + xAlign = NVKMS_BLOCK_LINEAR_GOB_WIDTH - 1; + yAlign = (NVKMS_BLOCK_LINEAR_GOB_HEIGHT << log2GobsPerBlockY) - 1; + + pitchInBytes = NV_ALIGN_UP(widthInPixels * bytesPerPixel, xAlign); + lines = NV_ALIGN_UP(heightInPixels, yAlign); + + *pLog2GobsPerBlockY = log2GobsPerBlockY; + *sizeInBytes = (NvU64)pitchInBytes * lines; + *pitchInBlocks = pitchInBytes / NVKMS_BLOCK_LINEAR_GOB_WIDTH; +} + +static NvBool AllocSurfaceVidmem( + const NVDevEvoRec *pDevEvo, + NvU32 handle, + NvU64 sizeInBytes, + const NvKmsMemoryIsoType isoType) +{ + NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { }; + + memAllocParams.owner = NVKMS_RM_HEAP_ID; + memAllocParams.size = sizeInBytes; + memAllocParams.type = NVOS32_TYPE_IMAGE; + + memAllocParams.attr = DRF_DEF(OS32, _ATTR, _LOCATION, _VIDMEM) | + DRF_DEF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS) | + DRF_DEF(OS32, _ATTR, _FORMAT, _BLOCK_LINEAR); + + memAllocParams.attr2 = DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _DEFAULT); + + if (isoType == NVKMS_MEMORY_ISO) { + memAllocParams.attr2 = FLD_SET_DRF(OS32, _ATTR2, _ISO, _YES, memAllocParams.attr2); + } + + memAllocParams.flags = NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN | + NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE; + + memAllocParams.alignment = NV_EVO_SURFACE_ALIGNMENT; + + return nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + handle, + NV01_MEMORY_LOCAL_USER, + &memAllocParams) == NVOS_STATUS_SUCCESS; +} + +NvU64 nvHsMapSurfaceToDevice( + const NVDevEvoRec *pDevEvo, + const NvU32 rmHandle, + const NvU64 sizeInBytes, + const enum NvHsMapPermissions hsMapPermissions) +{ + NvU32 ret; + NvU32 flags = DRF_DEF(OS46, _FLAGS, _CACHE_SNOOP, _DISABLE); + NvU64 gpuAddress = 0; + + /* pHsDevice could be NULL if we are in no3d mode. */ + + if (pDevEvo->pHsDevice == NULL) { + return gpuAddress; + } + + switch (hsMapPermissions) { + case NvHsMapPermissionsNone: + return gpuAddress; + case NvHsMapPermissionsReadWrite: + flags |= DRF_DEF(OS46, _FLAGS, _ACCESS, _READ_WRITE); + break; + case NvHsMapPermissionsReadOnly: + flags |= DRF_DEF(OS46, _FLAGS, _ACCESS, _READ_ONLY); + break; + } + + ret = nvRmApiMapMemoryDma(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDevEvo->nvkmsGpuVASpace, + rmHandle, + 0, /* offset */ + sizeInBytes, + flags, + &gpuAddress); + + if (ret == NVOS_STATUS_SUCCESS) { + return gpuAddress; + } else { + return NV_HS_BAD_GPU_ADDRESS; + } +} + +void nvHsUnmapSurfaceFromDevice( + const NVDevEvoRec *pDevEvo, + const NvU32 rmHandle, + const NvU64 gpuAddress) +{ + if ((gpuAddress == 0) || (gpuAddress == NV_HS_BAD_GPU_ADDRESS)) { + return; + } + + if (pDevEvo->pHsDevice == NULL) { + return; + } + + nvRmApiUnmapMemoryDma(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDevEvo->nvkmsGpuVASpace, + rmHandle, + 0, /* flags */ + gpuAddress); +} + +/*! + * Free an NVHsSurfaceRec, allocated by nvHsAllocSurface(). + * + * \param[in] pDevEvo The device. + * \param[in] pHsSurface The NVHsSurfaceRec to free. + */ +void nvHsFreeSurface( + NVDevEvoRec *pDevEvo, + NVHsSurfaceRec *pHsSurface) +{ + if (pHsSurface == NULL) { + return; + } + + if (pHsSurface->rmHandle != 0) { + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pHsSurface->rmHandle); + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pHsSurface->rmHandle); + pHsSurface->rmHandle = 0; + } + + if (pHsSurface->nvKmsHandle != 0) { + nvEvoUnregisterSurface(pDevEvo, + pDevEvo->pNvKmsOpenDev, + pHsSurface->nvKmsHandle, + FALSE /* skipUpdate */, + FALSE /* skipSync */); + } + + nvFree(pHsSurface); +} + +NVSurfaceEvoRec *nvHsGetNvKmsSurface(const NVDevEvoRec *pDevEvo, + NvKmsSurfaceHandle surfaceHandle, + const NvBool requireDisplayHardwareAccess) +{ + const NVEvoApiHandlesRec *pNvKmsOpenDevSurfaceHandles; + NVSurfaceEvoRec *pKmsSurface; + + pNvKmsOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDevConst(pDevEvo->pNvKmsOpenDev); + + nvAssert(pNvKmsOpenDevSurfaceHandles != NULL); + + pKmsSurface = + nvEvoGetSurfaceFromHandleNoDispHWAccessOk(pDevEvo, + pNvKmsOpenDevSurfaceHandles, + surfaceHandle); + nvAssert(pKmsSurface != NULL); + nvAssert(pKmsSurface->requireDisplayHardwareAccess == requireDisplayHardwareAccess); + + return pKmsSurface; +} + +/*! + * Allocate an NVHsSurfaceRec, for use with headSurface. + * + * Video memory is allocated, mapped into the device's GPU virtual address + * space, and registered with NVKMS's pNvKmsOpenDev. + * + * Note the video memory is not cleared here, because the corresponding graphics + * channel may not be allocated, yet. + * + * \param[in] pDevEvo The device. + * \param[in] requireDisplayHardwareAccess Whether display hardware requires access. + * \param[in] format The format of the surface. + * \param[in] widthInPixels The width of the surface, in pixels. + * \param[in] heightInPixels The height of the surface, in pixels. + * + * \return On success, an allocate NVHsSurfaceRec structure is returned. + * On failure, NULL is returned. + */ +NVHsSurfaceRec *nvHsAllocSurface( + NVDevEvoRec *pDevEvo, + const NvBool requireDisplayHardwareAccess, + const enum NvKmsSurfaceMemoryFormat format, + const NvU32 widthInPixels, + const NvU32 heightInPixels) +{ + struct NvKmsRegisterSurfaceParams nvKmsParams = { }; + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(format); + NvU32 pitchInBlocks = 0; + NvU64 sizeInBytes = 0; + NvU32 log2GobsPerBlockY = 0; + NvBool ret; + NVHsSurfaceRec *pHsSurface = nvCalloc(1, sizeof(*pHsSurface)); + const NvKmsMemoryIsoType isoType = + requireDisplayHardwareAccess ? NVKMS_MEMORY_ISO : NVKMS_MEMORY_NISO; + + if (pHsSurface == NULL) { + return NULL; + } + + GetLog2GobsPerBlock(pFormatInfo->rgb.bytesPerPixel, + widthInPixels, + heightInPixels, + &log2GobsPerBlockY, + &pitchInBlocks, + &sizeInBytes); + + sizeInBytes = NV_ALIGN_UP(sizeInBytes, NV_EVO_SURFACE_ALIGNMENT); + + pHsSurface->rmHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (pHsSurface->rmHandle == 0) { + goto fail; + } + + if (pDevEvo->requiresAllAllocationsInSysmem) { + ret = nvRmAllocSysmem(pDevEvo, pHsSurface->rmHandle, NULL, NULL, + sizeInBytes, isoType); + } else { + ret = AllocSurfaceVidmem(pDevEvo, pHsSurface->rmHandle, sizeInBytes, + isoType); + } + + if (!ret) { + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pHsSurface->rmHandle); + pHsSurface->rmHandle = 0; + + goto fail; + } + + pHsSurface->gobsPerBlock.y = log2GobsPerBlockY; + + /* + * For blocklinear surfaces, the NVKMS pitch is in units of blocks, which + * matches what GetLog2GobsPerBlock() returned to us. + */ + nvKmsParams.request.useFd = FALSE; + nvKmsParams.request.rmClient = nvEvoGlobal.clientHandle; + nvKmsParams.request.widthInPixels = widthInPixels; + nvKmsParams.request.heightInPixels = heightInPixels; + nvKmsParams.request.layout = NvKmsSurfaceMemoryLayoutBlockLinear; + nvKmsParams.request.format = format; + nvKmsParams.request.noDisplayHardwareAccess = !requireDisplayHardwareAccess; + nvKmsParams.request.log2GobsPerBlockY = log2GobsPerBlockY; + + nvKmsParams.request.planes[0].u.rmObject = pHsSurface->rmHandle; + nvKmsParams.request.planes[0].pitch = pitchInBlocks; + nvKmsParams.request.planes[0].rmObjectSizeInBytes = sizeInBytes; + + nvEvoRegisterSurface(pDevEvo, pDevEvo->pNvKmsOpenDev, &nvKmsParams, + NvHsMapPermissionsReadWrite); + + if (nvKmsParams.reply.surfaceHandle == 0) { + goto fail; + } + + pHsSurface->nvKmsHandle = nvKmsParams.reply.surfaceHandle; + + pHsSurface->pSurfaceEvo = + nvHsGetNvKmsSurface(pDevEvo, pHsSurface->nvKmsHandle, requireDisplayHardwareAccess); + + if (pHsSurface->pSurfaceEvo == NULL) { + goto fail; + } + + return pHsSurface; + +fail: + nvHsFreeSurface(pDevEvo, pHsSurface); + + return NULL; +} + +NvBool nvHsAllocDevice( + NVDevEvoRec *pDevEvo, + const struct NvKmsAllocDeviceRequest *pRequest) +{ + NVHsDeviceEvoRec *pHsDevice; + + nvAssert(pDevEvo->pHsDevice == NULL); + + if (!pDevEvo->isHeadSurfaceSupported) { + return TRUE; + } + + if (pRequest->no3d) { + return TRUE; + } + + pHsDevice = nvCalloc(1, sizeof(*pHsDevice)); + + if (pHsDevice == NULL) { + goto fail; + } + + pDevEvo->pHsDevice = pHsDevice; + pHsDevice->pDevEvo = pDevEvo; + + nvAssert(pDevEvo->nvkmsGpuVASpace); + + if (!nvHs3dAllocDevice(pHsDevice)) { + goto fail; + } + + if (!AllocNotifiers(pHsDevice)) { + goto fail; + } + + return TRUE; + +fail: + nvHsFreeDevice(pDevEvo); + + return FALSE; +} + +void nvHsFreeDevice(NVDevEvoRec *pDevEvo) +{ + NVHsDeviceEvoRec *pHsDevice = pDevEvo->pHsDevice; + + if (pHsDevice == NULL) { + return; + } + + FreeNotifiers(pHsDevice); + + nvHs3dFreeDevice(pHsDevice); + + nvFree(pHsDevice); + + pDevEvo->pHsDevice = NULL; +} + +NVHsChannelEvoPtr nvHsAllocChannel(NVDispEvoRec *pDispEvo, NvU32 apiHead) +{ + NVHsChannelEvoRec *pHsChannel = nvCalloc(1, sizeof(*pHsChannel)); + + if (pHsChannel == NULL) { + goto fail; + } + + pHsChannel->pDispEvo = pDispEvo; + pHsChannel->apiHead = apiHead; + + if (!nvHs3dAllocChannel(pHsChannel)) { + goto fail; + } + + return pHsChannel; + +fail: + nvHsFreeChannel(pHsChannel); + + return NULL; +} + +void nvHsFreeChannel(NVHsChannelEvoPtr pHsChannel) +{ + if (pHsChannel == NULL) { + return; + } + + nvHs3dFreeChannel(pHsChannel); + + nvFree(pHsChannel); +} + +static NvU32 HsGetSemaphoreIndex( + const NVFlipNIsoSurfaceEvoHwState *pSemaSurface) +{ + const NvU32 offsetInBytes = pSemaSurface->offsetInWords * 4; + const enum NvKmsNIsoFormat format = pSemaSurface->format; + const NvU32 sizeOfSemaphore = nvKmsSizeOfSemaphore(format); + + /* + * The semaphore size must be greater than zero. Flip validation should + * prevent us from getting here with an invalid NvKmsNIsoFormat. + */ + nvAssert(sizeOfSemaphore > 0); + + /* The semaphore offset should be a multiple of the semaphore size. */ + nvAssert((offsetInBytes % sizeOfSemaphore) == 0); + + return offsetInBytes / sizeOfSemaphore; +} + +/*! + * Read the payload of the semaphore described in the pSemaSurface. + */ +static NvU32 HsFlipQueueReadSemaphore( + const NVHsChannelEvoRec *pHsChannel, + const NVFlipNIsoSurfaceEvoHwState *pSemaSurface) +{ + const enum NvKmsNIsoFormat format = pSemaSurface->format; + const NvU32 semaphoreIndex = HsGetSemaphoreIndex(pSemaSurface); + const NvU32 sd = pHsChannel->pDispEvo->displayOwner; + const void *ptr; + struct nvKmsParsedSemaphore parsedSemaphore = { }; + + /* We should only get here if we have a valid semaphore surface. */ + nvAssert(pSemaSurface->pSurfaceEvo != NULL); + + ptr = pSemaSurface->pSurfaceEvo->cpuAddress[sd]; + + if (ptr == NULL) { + nvAssert(!"Semaphore surface without CPU mapping!"); + return 0; + } + + nvKmsParseSemaphore(format, semaphoreIndex, ptr, &parsedSemaphore); + + return parsedSemaphore.payload; +} + +/*! + * Return whether the specified pFlipState is ready to flip. + */ +static NvBool HsFlipQueueEntryIsReady( + const NVHsChannelEvoRec *pHsChannel, + const NVHsLayerRequestedFlipState *pFlipState) +{ + const NVFlipNIsoSurfaceEvoHwState *pSemaSurface = + &pFlipState->syncObject.u.semaphores.acquireSurface; + + if (pFlipState->syncObject.usingSyncpt) { + return TRUE; + } + + /* + * If a semaphore surface was specified, check if the semaphore has reached + * the specified acquire value. + */ + if (pSemaSurface->pSurfaceEvo != NULL) { + const NvU32 semaphoreValue = + HsFlipQueueReadSemaphore(pHsChannel, pSemaSurface); + + if (pHsChannel->swapGroupFlipping) { + // With swap group flipping, the client semaphore should be + // written before the non-stall interrupt kicking off the flip. + nvAssert(semaphoreValue == pFlipState->syncObject.u.semaphores.acquireValue); + } else { + if (semaphoreValue != pFlipState->syncObject.u.semaphores.acquireValue) { + return FALSE; + } + } + } + + /* + * If a time stamp was specified for the flip, check if the time stamp has + * been satisfied. + * + * XXX NVKMS HEADSURFACE TODO: Implement time stamp flip check. + */ + + return TRUE; +} + +/*! + * Update the reference count of all the surfaces described in the pFlipState. + */ +static void HsUpdateFlipQueueEntrySurfaceRefCount( + NVDevEvoPtr pDevEvo, + const NVHsLayerRequestedFlipState *pFlipState, + NvBool increase) +{ + HsChangeSurfaceFlipRefCount( + pDevEvo, pFlipState->pSurfaceEvo[NVKMS_LEFT], increase); + + HsChangeSurfaceFlipRefCount( + pDevEvo, pFlipState->pSurfaceEvo[NVKMS_RIGHT], increase); + + if (!pFlipState->syncObject.usingSyncpt) { + HsChangeSurfaceFlipRefCount( + pDevEvo, pFlipState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo, increase); + + HsChangeSurfaceFlipRefCount( + pDevEvo, pFlipState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo, increase); + } +} + +/*! + * Update bookkeeping for "flipping away" from a pFlipState. + */ +static void HsReleaseFlipQueueEntry( + NVDevEvoPtr pDevEvo, + NVHsChannelEvoPtr pHsChannel, + const NVHsLayerRequestedFlipState *pFlipState) +{ + /* + * If a semaphore surface was specified, we can now write its release value. + */ + if (!pFlipState->syncObject.usingSyncpt && + pFlipState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo != NULL) { + + /* + * XXX NVKMS HEADSURFACE TODO: write the timestamp in the EVO/NVDisplay + * semaphore structure, based on NvKmsNIsoFormat. The graphics channel + * doesn't support all the NvKmsNIsoFormats, so we would need to use a + * graphics channel semaphore release of STRUCTURE_SIZE = ONE_WORD with + * the timestamp as payload. It would be unfortunate to read ptimer + * registers in order to compute the payload value. + */ + + nvHs3dReleaseSemaphore(pHsChannel, + pFlipState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo, + pFlipState->syncObject.u.semaphores.releaseSurface.format, + pFlipState->syncObject.u.semaphores.releaseSurface.offsetInWords, + pFlipState->syncObject.u.semaphores.releaseValue, + TRUE /* allPreceedingReads */); + } + + /* + * HeadSurface no longer needs to read from the surfaces in pFlipState; + * decrement their reference counts. + */ + HsUpdateFlipQueueEntrySurfaceRefCount(pDevEvo, pFlipState, FALSE); +} + +/*! + * "Fast forward" through flip queue entries that are ready. + * + * \param[in,out] pHsChannel The headSurface channel. + * \param[in] layer The layer of the flip queue. + * \param[in] honorIsReadyCriteria Honor the isReady check for + * flip queue entries. + * \param[in] honorMinPresentInterval Honor the minPresentInterval in + * flip queue entries. + */ +static void HsFastForwardFlipQueue( + NVHsChannelEvoPtr pHsChannel, + const NvU8 layer, + const NvBool honorIsReadyCriteria, + const NvBool honorMinPresentInterval) +{ + NVDevEvoPtr pDevEvo = pHsChannel->pDispEvo->pDevEvo; + NVListRec *pFlipQueue = &pHsChannel->flipQueue[layer].queue; + + /* + * For swapgroup flips, every flip kicked off by the client needs to result + * in a real flip in hardware, so we can't fast forward through flips here. + */ + if (pHsChannel->config.neededForSwapGroup) { + return; + } + + while (!nvListIsEmpty(pFlipQueue)) { + + NVHsChannelFlipQueueEntry *pEntry = + nvListFirstEntry(pFlipQueue, + NVHsChannelFlipQueueEntry, + flipQueueEntry); + /* + * Stop "fast forwarding" once we find a flip queue entry that is not + * ready: we must not release semaphores out of order, otherwise we + * could confuse client semaphore interlocking. + */ + if (honorIsReadyCriteria && + !HsFlipQueueEntryIsReady(pHsChannel, &pEntry->hwState)) { + break; + } + + /* + * Normally, we want to make sure that each MinPresentInterval > 0 flip + * is displayed for one frame, so we shouldn't fast forward past them. + */ + if (honorMinPresentInterval && + (pEntry->hwState.minPresentInterval != 0)) { + break; + } + + /* + * We are "flipping away" from the flip queue entry in current. Release + * it, and replace it with the entry in pEntry. + */ + + HsReleaseFlipQueueEntry(pDevEvo, pHsChannel, + &pHsChannel->flipQueue[layer].current); + + pHsChannel->flipQueue[layer].current = pEntry->hwState; + + nvListDel(&pEntry->flipQueueEntry); + nvFree(pEntry); + } +} + +/*! + * Push a new entry to the end of the headSurface channel's flip queue. + * + * \param[in,out] pHsChannel The headSurface channel. + * \param[in] layer The layer of the flip queue. + * \param[in] pFlipState The hwState to be pushed on the flip queue. + */ +void nvHsPushFlipQueueEntry( + NVHsChannelEvoPtr pHsChannel, + const NvU8 layer, + const NVHsLayerRequestedFlipState *pFlipState) +{ + NVDevEvoPtr pDevEvo = pHsChannel->pDispEvo->pDevEvo; + NVListRec *pFlipQueue = &pHsChannel->flipQueue[layer].queue; + NVHsChannelFlipQueueEntry *pEntry = nvCalloc(1, sizeof(*pEntry)); + + if (pEntry == NULL) { + /* + * XXX NVKMS HEADSURFACE TODO: we cannot fail at this point in the call + * chain (we've already committed to the flip). Move the nvCalloc() call + * earlier in the call chain to a point where we can fail. + */ + return; + } + + pEntry->hwState = *pFlipState; + + /* Increment the ref counts on the surfaces in the flip queue entry. */ + + HsUpdateFlipQueueEntrySurfaceRefCount(pDevEvo, &pEntry->hwState, TRUE); + + /* "Fast forward" through existing flip queue entries that are ready. */ + + HsFastForwardFlipQueue(pHsChannel, layer, + TRUE /* honorIsReadyCriteria */, + TRUE /* honorMinPresentInterval */); + + /* Append the new entry. */ + + nvListAppend(&pEntry->flipQueueEntry, pFlipQueue); +} + +/*! + * Remove the first entry in the flip queue and return it. + * + * If the first entry in the flipQueue is ready to be consumed by headSurface, + * remove it from the list and return it in the 'pFlipState' argument. + * + * If this function returns TRUE, it is the caller's responsibility to + * eventually call + * + * HsUpdateFlipQueueEntrySurfaceRefCount(pDevEvo, pFlipState, FALSE) + * + * for the returned pFlipState. + * + * \param[in,out] pHsChannel The headSurface channel. + * \param[in] layer The layer of the flip queue. + * \param[out] pFlipState The hwState that was popped off the flip queue. + * + * \return Return TRUE if a flip queue entry was popped off the queue and + * copied into pFlipState. + */ +static NvBool HsPopFlipQueueEntry( + NVHsChannelEvoPtr pHsChannel, + const NvU8 layer, + NVHsLayerRequestedFlipState *pFlipState) +{ + NVListRec *pFlipQueue = &pHsChannel->flipQueue[layer].queue; + NVHsChannelFlipQueueEntry *pEntry; + + if (nvListIsEmpty(pFlipQueue)) { + return FALSE; + } + + pEntry = nvListFirstEntry(pFlipQueue, + NVHsChannelFlipQueueEntry, + flipQueueEntry); + + if (!HsFlipQueueEntryIsReady(pHsChannel, &pEntry->hwState)) { + return FALSE; + } + + *pFlipState = pEntry->hwState; + + nvListDel(&pEntry->flipQueueEntry); + nvFree(pEntry); + + return TRUE; +} + +/*! + * Update the current flip queue entry for a new headSurface frame. + * + * To build a new frame of headSurface, we look at the flip queue of each layer. + * If there is an entry available, we pop it off the queue and replace .current + * with the entry. + */ +static void HsUpdateFlipQueueCurrent( + NVHsChannelEvoPtr pHsChannel) +{ + NVDevEvoPtr pDevEvo = pHsChannel->pDispEvo->pDevEvo; + NvU8 layer; + + for (layer = 0; layer < ARRAY_LEN(pHsChannel->flipQueue); layer++) { + + NVHsLayerRequestedFlipState newCurrent = { }; + + /* + * XXX NVKMS HEADSURFACE TODO: fast forward to the last ready flip queue + * entry. Share code with similar functionality in + * nvHsPushFlipQueueEntry(). + */ + + if (!HsPopFlipQueueEntry(pHsChannel, layer, &newCurrent)) { + continue; + } + + /* + * We have a new flip queue entry to place in current. Release the old + * current flip queue entry, and replace it with the popped entry. + */ + HsReleaseFlipQueueEntry(pDevEvo, pHsChannel, + &pHsChannel->flipQueue[layer].current); + + pHsChannel->flipQueue[layer].current = newCurrent; + } +} + +/*! + * Drain the flip queue on each layer of pHsChannel. + * + * In preparation to disable headSurface, release the flip queue entry in + * .current, as well as all entries in the queue. + */ +void nvHsDrainFlipQueue( + NVHsChannelEvoPtr pHsChannel) +{ + NVDevEvoPtr pDevEvo = pHsChannel->pDispEvo->pDevEvo; + NvU8 layer; + + for (layer = 0; layer < ARRAY_LEN(pHsChannel->flipQueue); layer++) { + NVListRec *pFlipQueue = &pHsChannel->flipQueue[layer].queue; + + HsReleaseFlipQueueEntry(pDevEvo, pHsChannel, + &pHsChannel->flipQueue[layer].current); + + nvkms_memset(&pHsChannel->flipQueue[layer].current, 0, + sizeof(pHsChannel->flipQueue[layer].current)); + + while (!nvListIsEmpty(pFlipQueue)) { + + NVHsChannelFlipQueueEntry *pEntry = + nvListFirstEntry(pFlipQueue, + NVHsChannelFlipQueueEntry, + flipQueueEntry); + + HsReleaseFlipQueueEntry(pDevEvo, pHsChannel, &pEntry->hwState); + + nvListDel(&pEntry->flipQueueEntry); + nvFree(pEntry); + } + } +} + +/*! + * Return whether all flip queues on this pHsChannel are idle. + * + * As a side effect, attempt to "fast forward" through flip queue entries, in an + * effort to make the flip queues idle. When fast forwarding, always ignore the + * client-requested minPresentInterval. Optionally (when force == TRUE), also + * ignore the "IsReady" check. + * + * This is intended to be used in two scenarios: + * + * - First, call nvHsIdleFlipQueue(force=FALSE) in a loop with all other heads + * we are trying to idle. This should allow semaphore interlocking to + * progress naturally. + * + * - If that loop times out, call nvHsIdleFlipQueue(force=TRUE), which will + * ignore the IsReady conditions and forcibly make the flip queues idle. + */ +NvBool nvHsIdleFlipQueue( + NVHsChannelEvoPtr pHsChannel, + NvBool force) +{ + const NvBool honorIsReadyCriteria = !force; + NvBool ret = TRUE; + NvU8 layer; + + for (layer = 0; layer < ARRAY_LEN(pHsChannel->flipQueue); layer++) { + + HsFastForwardFlipQueue(pHsChannel, layer, + honorIsReadyCriteria, + FALSE /* honorMinPresentInterval */); + + if (!nvListIsEmpty(&pHsChannel->flipQueue[layer].queue)) { + /* force should always result in an empty flip queue */ + nvAssert(!force); + ret = FALSE; + } + } + + return ret; +} + +/* + * We use notifiers to know when headSurface frames are presented, so that we + * don't render to the visible buffer. + */ + +static NvU32 AllocNotifierMemory( + const NVDevEvoRec *pDevEvo, + NvU32 handle) +{ + NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { }; + NvU32 hClass; + + memAllocParams.owner = NVKMS_RM_HEAP_ID; + memAllocParams.size = NVKMS_HEAD_SURFACE_NOTIFIERS_SIZE_IN_BYTES; + memAllocParams.type = NVOS32_TYPE_DMA; + + memAllocParams.attr = DRF_DEF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS) | + DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _4KB) | + DRF_DEF(OS32, _ATTR, _COHERENCY, _UNCACHED); + + if (pDevEvo->requiresAllAllocationsInSysmem) { + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _PCI, + memAllocParams.attr); + hClass = NV01_MEMORY_SYSTEM; + } else { + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, + memAllocParams.attr); + hClass = NV01_MEMORY_LOCAL_USER; + } + + memAllocParams.flags = NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN | + NVOS32_ALLOC_FLAGS_IGNORE_BANK_PLACEMENT | + NVOS32_ALLOC_FLAGS_FORCE_ALIGN_HOST_PAGE; + + memAllocParams.attr2 = DRF_DEF(OS32, _ATTR2, _ISO, _NO); + + return nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + handle, + hClass, + &memAllocParams); +} + +static NvBool MapNotifiers(NVHsDeviceEvoRec *pHsDevice) +{ + NVDevEvoRec *pDevEvo = pHsDevice->pDevEvo; + NVHsNotifiersRec *pNotifiers = &pHsDevice->notifiers; + const NvU64 size = NVKMS_HEAD_SURFACE_NOTIFIERS_SIZE_IN_BYTES; + NvU32 sd, ret; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pNotifiers->rmHandle, + 0, + size, + (void **)&pNotifiers->sd[sd].ptr, + 0); + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + /* + * Intentionally use NVMISC_MEMSET() rather than nvkms_memset(): some + * CPU architectures, notably ARM, may fault if streaming stores like in + * an optimized memset() implementation are used on a BAR1 mapping. + * NVMISC_MEMSET() is conveniently not optimized. + */ + NVMISC_MEMSET((void *)pNotifiers->sd[sd].ptr, 0, size); + } + + return TRUE; +} + +static void UnmapNotifiers(NVHsDeviceEvoRec *pHsDevice) +{ + NVDevEvoRec *pDevEvo = pHsDevice->pDevEvo; + NVHsNotifiersRec *pNotifiers = &pHsDevice->notifiers; + NvU32 sd; + + if (pNotifiers->rmHandle == 0) { + return; + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + + if (pNotifiers->sd[sd].ptr == NULL) { + continue; + } + + nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pNotifiers->rmHandle, + pNotifiers->sd[sd].ptr, + 0); + + pNotifiers->sd[sd].ptr = NULL; + } +} + +static NvBool RegisterNotifiersWithNvKms(NVHsDeviceEvoRec *pHsDevice) +{ + struct NvKmsRegisterSurfaceParams params = { }; + NVHsNotifiersRec *pNotifiers = &pHsDevice->notifiers; + NVDevEvoRec *pDevEvo = pHsDevice->pDevEvo; + const NvBool requireDisplayHardwareAccess = TRUE; + + params.request.useFd = FALSE; + params.request.rmClient = nvEvoGlobal.clientHandle; + + params.request.layout = NvKmsSurfaceMemoryLayoutPitch; + params.request.format = NvKmsSurfaceMemoryFormatI8; + + params.request.isoType = NVKMS_MEMORY_NISO; + + params.request.planes[0].u.rmObject = pNotifiers->rmHandle; + params.request.planes[0].pitch = NVKMS_HEAD_SURFACE_NOTIFIERS_SIZE_IN_BYTES; + params.request.planes[0].rmObjectSizeInBytes = + NVKMS_HEAD_SURFACE_NOTIFIERS_SIZE_IN_BYTES; + + nvEvoRegisterSurface(pDevEvo, pDevEvo->pNvKmsOpenDev, ¶ms, + NvHsMapPermissionsReadWrite); + + pHsDevice->notifiers.nvKmsHandle = params.reply.surfaceHandle; + + if (pHsDevice->notifiers.nvKmsHandle == 0) { + return FALSE; + } + + pHsDevice->notifiers.pSurfaceEvo = + nvHsGetNvKmsSurface(pDevEvo, + pHsDevice->notifiers.nvKmsHandle, + requireDisplayHardwareAccess); + + return (pHsDevice->notifiers.pSurfaceEvo != NULL); +} + +static void AssignNIsoFormat(NVHsDeviceEvoRec *pHsDevice) +{ + const NVDevEvoRec *pDevEvo = pHsDevice->pDevEvo; + + if (pDevEvo->caps.validNIsoFormatMask & + NVBIT(NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY)) { + /* If available, use the "nvdisplay" format. */ + pHsDevice->notifiers.nIsoFormat = NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY; + } else { + /* Otherwise, use the "legacy" format. */ + nvAssert((pDevEvo->caps.validNIsoFormatMask & + NVBIT(NVKMS_NISO_FORMAT_LEGACY)) != 0); + pHsDevice->notifiers.nIsoFormat = NVKMS_NISO_FORMAT_LEGACY; + } +} + +static NvBool AllocNotifiers(NVHsDeviceEvoRec *pHsDevice) +{ + NvU32 ret; + NVDevEvoRec *pDevEvo; + + pDevEvo = pHsDevice->pDevEvo; + + pHsDevice->notifiers.rmHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (pHsDevice->notifiers.rmHandle == 0) { + goto fail; + } + + ret = AllocNotifierMemory(pHsDevice->pDevEvo, pHsDevice->notifiers.rmHandle); + + if (ret != NVOS_STATUS_SUCCESS) { + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pHsDevice->notifiers.rmHandle); + pHsDevice->notifiers.rmHandle = 0; + + goto fail; + } + + if (!MapNotifiers(pHsDevice)) { + goto fail; + } + + if (!RegisterNotifiersWithNvKms(pHsDevice)) { + goto fail; + } + + AssignNIsoFormat(pHsDevice); + + return TRUE; + +fail: + FreeNotifiers(pHsDevice); + + return FALSE; +} + +static void FreeNotifiers(NVHsDeviceEvoRec *pHsDevice) +{ + NVDevEvoRec *pDevEvo; + NVHsNotifiersRec *pNotifiers; + + if (pHsDevice == NULL) { + return; + } + + pDevEvo = pHsDevice->pDevEvo; + pNotifiers = &pHsDevice->notifiers; + + if (pNotifiers->nvKmsHandle != 0) { + nvEvoUnregisterSurface(pDevEvo, + pDevEvo->pNvKmsOpenDev, + pNotifiers->nvKmsHandle, + FALSE /* skipUpdate */, + FALSE /* skipSync */); + pNotifiers->pSurfaceEvo = NULL; + } + + UnmapNotifiers(pHsDevice); + + if (pHsDevice->notifiers.rmHandle != 0) { + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pHsDevice->notifiers.rmHandle); + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pHsDevice->notifiers.rmHandle); + pHsDevice->notifiers.rmHandle = 0; + } +} + +/*! + * Reset headSurface notifiers for this channel to NOT_BEGUN. + * + * By the time the modeset completes to transition into a new headSurface + * configuration, all headSurface flips from the previous completion should be + * completed. But, that would leave at least one notifier set to FINISHED. + * + * Initialize all notifiers for this channel to NOT_BEGUN, so that + * HsVBlankCallbackDeferredWork() does not interpret notifier state from the + * previous headSurface configuration as applying to the new headSurface + * configuration. + */ +static void HsInitNotifiers( + NVHsDeviceEvoRec *pHsDevice, + NVHsChannelEvoRec *pHsChannel) +{ + const NvU32 apiHead = pHsChannel->apiHead; + const NvU32 sd = pHsChannel->pDispEvo->displayOwner; + NVHsNotifiersRec *pHsNotifiers = &pHsDevice->notifiers; + NVHsNotifiersOneSdRec *pHsNotifiersOneSd = pHsNotifiers->sd[sd].ptr; + NvU8 slot, buffer; + + for (slot = 0; slot < NVKMS_HEAD_SURFACE_MAX_NOTIFIERS_PER_HEAD; slot++) { + nvKmsResetNotifier(pHsNotifiers->nIsoFormat, + FALSE /* overlay */, + slot, + pHsNotifiersOneSd->notifier[apiHead]); + } + + for (buffer = 0; buffer < NVKMS_HEAD_SURFACE_MAX_BUFFERS; buffer++) { + nvKmsResetSemaphore(pHsNotifiers->nIsoFormat, + buffer, pHsNotifiersOneSd->semaphore[apiHead], + NVKMS_HEAD_SURFACE_FRAME_SEMAPHORE_RENDERABLE); + } +} + +void nvHsInitNotifiers( + NVHsDeviceEvoRec *pHsDevice, + NVHsChannelEvoRec *pHsChannel) +{ + if (pHsChannel->config.neededForSwapGroup) { + /* + * XXX NVKMS HEADSURFACE TODO: initialize tracking for ViewPortIn + * flips. + */ + } else { + HsInitNotifiers(pHsDevice, pHsChannel); + } +} + +/*! + * For the given head and sd, prepare the next notifier: + * + * - Look up the next notifier to use. + * - Clear that notifier to STATUS_NOT_BEGUN. + * - Update the slot bookkeeping for the (head,sd) pair. + * - Return the dword offset of the notifier. + */ +static NvU16 PrepareNextNotifier( + NVHsNotifiersRec *pHsNotifiers, + NvU32 sd, + NvU32 apiHead) +{ + const NvU32 notifierSize = + nvKmsSizeOfNotifier(pHsNotifiers->nIsoFormat, FALSE /* overlay */); + + const NvU8 nextSlot = pHsNotifiers->sd[sd].apiHead[apiHead].nextSlot; + + NVHsNotifiersOneSdRec *pHsNotifiersOneSd = pHsNotifiers->sd[sd].ptr; + + const NvU8 *headBase = pHsNotifiersOneSd->notifier[apiHead]; + + const NvU8 offsetInBytes = + (headBase - ((const NvU8 *) pHsNotifiersOneSd)) + + (notifierSize * nextSlot); + + nvAssert(notifierSize <= NVKMS_HEAD_SURFACE_MAX_NOTIFIER_SIZE); + + nvKmsResetNotifier(pHsNotifiers->nIsoFormat, FALSE /* overlay */, + nextSlot, pHsNotifiersOneSd->notifier[apiHead]); + + pHsNotifiers->sd[sd].apiHead[apiHead].nextSlot = + (nextSlot + 1) % NVKMS_HEAD_SURFACE_MAX_NOTIFIERS_PER_HEAD; + + return offsetInBytes / 4; +} + +/*! + * Helper function for nvHsFlip(); populate NvKmsFlipRequest and call + * nvFlipEvo(). + * + * \param[in,out] pHsDevice The headSurface device. + * \param[in,out] pHsChannel The headSurface channel. + * \param[in] perEyeStereoFlip Whether to flip per-eye. + * \param[in] surfaceHandles The surfaces to flip to. + * \param[in] isFirstFlip Whether this is the first flip after + * enabling headsurface. + * \param[in] allowFlipLock Whether to allow fliplock for this flip. + */ +static void HsFlipHelper( + NVHsDeviceEvoRec *pHsDevice, + NVHsChannelEvoRec *pHsChannel, + const NvBool perEyeStereoFlip, + const NvKmsSurfaceHandle surfaceHandles[NVKMS_MAX_EYES], + const NvBool isFirstFlip, + const NvBool allowFlipLock) +{ + NVDevEvoRec *pDevEvo = pHsDevice->pDevEvo; + struct NvKmsFlipCommonParams *pParamsOneHead; + NVHsNotifiersRec *pHsNotifiers = &pHsDevice->notifiers; + const NvU32 sd = pHsChannel->pDispEvo->displayOwner; + const NvU32 apiHead = pHsChannel->apiHead; + NvBool ret; + + /* + * Use preallocated memory, so that we don't have to allocate + * memory here (and deal with allocation failure). + */ + struct NvKmsFlipRequestOneHead *pFlipHead = &pHsChannel->scratchParams; + + nvkms_memset(pFlipHead, 0, sizeof(*pFlipHead)); + + pFlipHead->sd = sd; + pFlipHead->head = apiHead; + pParamsOneHead = &pFlipHead->flip; + + if (isFirstFlip) { + /* + * For the first flip after enabling headsurface + * (NV_HS_NEXT_FRAME_REQUEST_TYPE_FIRST_FRAME), the old viewport + * (saved in HsConfigInitSwapGroupOneHead or HsConfigInitModesetOneHead + * and restored in HsConfigRestoreMainLayerSurface) which may specify an + * offset within a multi-head surface needs to be overridden to the + * origin for the per-head headsurface surfaces. + */ + pParamsOneHead->viewPortIn.specified = TRUE; + pParamsOneHead->viewPortIn.point.x = 0; + pParamsOneHead->viewPortIn.point.y = 0; + + pParamsOneHead->cursor.imageSpecified = TRUE; + + pParamsOneHead->cursor.positionSpecified = TRUE; + } + + pParamsOneHead->layer[NVKMS_MAIN_LAYER].surface.handle[NVKMS_LEFT] = + surfaceHandles[NVKMS_LEFT]; + pParamsOneHead->layer[NVKMS_MAIN_LAYER].surface.handle[NVKMS_RIGHT] = + surfaceHandles[NVKMS_RIGHT]; + pParamsOneHead->layer[NVKMS_MAIN_LAYER].surface.specified = TRUE; + pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.val.useSyncpt = FALSE; + pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.specified = TRUE; + pParamsOneHead->layer[NVKMS_MAIN_LAYER].tearing = FALSE; + pParamsOneHead->layer[NVKMS_MAIN_LAYER].perEyeStereoFlip = perEyeStereoFlip; + pParamsOneHead->layer[NVKMS_MAIN_LAYER].minPresentInterval = 1; + pParamsOneHead->layer[NVKMS_MAIN_LAYER].csc.specified = TRUE; + pParamsOneHead->lut.input.specified = FALSE; + pParamsOneHead->lut.output.specified = FALSE; + + /* + * XXX NVKMS HEADSURFACE TODO: Work out in which cases we should use the + * head's current CSC or LUT. + */ + pParamsOneHead->layer[NVKMS_MAIN_LAYER].csc.matrix = NVKMS_IDENTITY_CSC_MATRIX; + + pParamsOneHead->layer[NVKMS_MAIN_LAYER].completionNotifier.specified = TRUE; + + if (surfaceHandles[NVKMS_LEFT] != 0) { + NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDev(pDevEvo->pNvKmsOpenDev); + NVSurfaceEvoPtr pSurfaceEvo = + nvEvoGetPointerFromApiHandle(pOpenDevSurfaceHandles, surfaceHandles[NVKMS_LEFT]); + struct NvKmsSemaphore *pSema; + + pParamsOneHead->layer[NVKMS_MAIN_LAYER].completionNotifier.val.surface.surfaceHandle = + pHsNotifiers->nvKmsHandle; + pParamsOneHead->layer[NVKMS_MAIN_LAYER].completionNotifier.val.surface.format = + pHsNotifiers->nIsoFormat; + pParamsOneHead->layer[NVKMS_MAIN_LAYER].completionNotifier.val.surface.offsetInWords = + PrepareNextNotifier(pHsNotifiers, sd, apiHead); + + pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.val.useSyncpt = FALSE; + pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.specified = TRUE; + + pSema = &pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.val.u.semaphores.acquire; + pSema->surface.surfaceHandle = pHsNotifiers->nvKmsHandle; + pSema->surface.format = pHsNotifiers->nIsoFormat; + pSema->surface.offsetInWords = + HsGetFrameSemaphoreOffsetInWords(pHsChannel); + pSema->value = NVKMS_HEAD_SURFACE_FRAME_SEMAPHORE_DISPLAYABLE; + + pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.val.u.semaphores.release = + pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.val.u.semaphores.acquire; + + pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.val.u.semaphores.release.value = + NVKMS_HEAD_SURFACE_FRAME_SEMAPHORE_RENDERABLE; + + pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeIn.specified = TRUE; + pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeIn.val.width = + pSurfaceEvo->widthInPixels; + pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeIn.val.height = + pSurfaceEvo->heightInPixels; + + pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeOut.specified = TRUE; + pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeOut.val = + pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeIn.val; + } + + ret = nvFlipEvo(pDevEvo, + pDevEvo->pNvKmsOpenDev, + pFlipHead, + 1 /* numFlipHeads */, + TRUE /* commit */, + NULL /* pReply */, + FALSE /* skipUpdate */, + allowFlipLock); + + if (!ret) { + nvAssert(!"headSurface flip failed?"); + } +} + +/*! + * Flip to the headSurface buffer specified by index. + * + * If pHsOneHeadAllDisps == NULL, disable headSurface by flipping to NULL. + * + * \param[in,out] pHsDevice The headSurface device. + * \param[in,out] pHsChannel The headSurface channel. + * \param[in] eyeMask The mask of which eyes to flip. + * \param[in] perEyeStereoFlip Whether to flip per-eye. + * \param[in] index Which buffer to flip to. + * \param[in] pHsOneHeadAllDisps The headSurface config. + * \param[in] isFirstFlip Whether this is the first flip after + * enabling headsurface. + * \param[in] allowFlipLock Whether to allow fliplock for this flip. + */ +void nvHsFlip( + NVHsDeviceEvoRec *pHsDevice, + NVHsChannelEvoRec *pHsChannel, + const NvU8 eyeMask, + const NvBool perEyeStereoFlip, + const NvU8 index, + const NVHsStateOneHeadAllDisps *pHsOneHeadAllDisps, + const NvBool isFirstFlip, + const NvBool allowFlipLock) +{ + NvKmsSurfaceHandle surfaceHandles[NVKMS_MAX_EYES] = { 0, 0 }; + const NvBool enable = (pHsOneHeadAllDisps != NULL); + + if (enable) { + NvU8 eye; + + for (eye = NVKMS_LEFT; eye < NVKMS_MAX_EYES; eye++) { + + const NVHsSurfaceRec *pHsSurface = + pHsOneHeadAllDisps->surfaces[eye][index].pSurface; + + if ((eyeMask & NVBIT(eye)) == 0) { + continue; + } + + nvAssert(pHsSurface != NULL); + + surfaceHandles[eye] = pHsSurface->nvKmsHandle; + nvAssert(surfaceHandles[eye] != 0); + } + } + + HsFlipHelper(pHsDevice, + pHsChannel, + perEyeStereoFlip, + surfaceHandles, + isFirstFlip, + allowFlipLock); + + if (!enable) { + /* XXX NVKMS HEADSURFACE TODO: disable stereo toggling, if necessary. */ + } +} + +/*! + * "Flip" using the core channel's ViewPortIn. + */ +static void HsFlipViewPortIn(NVHsChannelEvoPtr pHsChannel, NvU16 x, NvU16 y) +{ + const NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo; + + /* + * XXX NVKMS HEADSURFACE TODO: use the panning NVKMS API request, rather + * than call the low-level SetViewportPointIn() HAL proc. But, to do that, + * we would need to make the pan request much lighter weight, so that it is + * usable for our needs here. + */ + nvApiHeadSetViewportPointIn(pDispEvo, pHsChannel->apiHead, x, y); + + /* + * XXX NVKMS HEADSURFACE TODO: Add tracking so that IsPreviousFrameDone() + * can know if this update latched. + */ +} + +static void HsPickSrcEyeAndPixelShift( + const NVHsChannelEvoRec *pHsChannel, + const NvU8 dstEye, + NvU8 *pSrcEye, + enum NvKmsPixelShiftMode *pPixelShift) +{ + if (pHsChannel->config.pixelShift == NVKMS_PIXEL_SHIFT_8K) { + + if (dstEye == NVKMS_LEFT) { + *pSrcEye = NVKMS_LEFT; + *pPixelShift = NVKMS_PIXEL_SHIFT_4K_BOTTOM_RIGHT; + } + + if (dstEye == NVKMS_RIGHT) { + *pSrcEye = NVKMS_LEFT; + *pPixelShift = NVKMS_PIXEL_SHIFT_4K_TOP_LEFT; + } + } else { + *pSrcEye = dstEye; + *pPixelShift = pHsChannel->config.pixelShift; + } +} + +/*! + * Structure to drive the behavior of nvHsNextFrame(). + */ +struct NvHsNextFrameWorkArea { + + /* + * The range of surface indices to render to. Indices here are used as the + * 'index' in NVHsStateOneHeadAllDisps::surfaces[eye][index]::pSurface. + */ + NvU8 dstBufferIndexStart; + NvU8 dstBufferIndexEnd; + + /* Whether to flip to the surface indicated by pHsChannel->nextIndex. */ + NvBool doFlipToNextIndex; + + /* Whether to allow fliplock on the flip to the next surface. */ + NvBool allowFlipLock; + + /* Whether to flip to the destRect region of the surface.*/ + NvBool doFlipToDestRect; + + /* Whether to increment nextIndex and/or nextOffset. */ + NvBool doIncrementNextIndex; + NvBool doIncrementNextOffset; + + /* + * On which dstBuffer indices to honor the SwapGroup's exclusive + * clip list. + */ + NvU8 honorSwapGroupClipListBufferMask; + + /* The region within the surface to render into. */ + struct NvKmsRect destRect; + + /* + * If perEyeStereo::override == TRUE, use perEyeStereo::value to control the + * headSurface flip. + */ + struct { + NvBool override; + NvBool value; + } perEyeStereo; +}; + +/*! + * Assign an NvHsNextFrameWorkArea structure, to drive execution of + * nvHsNextFrame(). + */ +static struct NvHsNextFrameWorkArea HsAssignNextFrameWorkArea( + const NVHsChannelEvoRec *pHsChannel, + const NvHsNextFrameRequestType requestType) +{ + struct NvHsNextFrameWorkArea workArea = { }; + NvU8 destOffset; + + if ((requestType == NV_HS_NEXT_FRAME_REQUEST_TYPE_FIRST_FRAME) || + (requestType == NV_HS_NEXT_FRAME_REQUEST_TYPE_VBLANK)) { + + /* + * The swapgroup first frame renders and flips both core and base to + * the back index double height headsurface swapgroup surface, just + * like a non-swapgroup headsurface flip. + */ + if (requestType == NV_HS_NEXT_FRAME_REQUEST_TYPE_FIRST_FRAME || + !pHsChannel->config.neededForSwapGroup) { + + /* + * In the non-SwapGroup case, headSurface should: + * - only render to the 'nextIndex' surface, + * - flip to the nextIndex surface, + * - increment nextIndex. + */ + workArea.dstBufferIndexStart = pHsChannel->nextIndex; + workArea.dstBufferIndexEnd = pHsChannel->nextIndex; + + workArea.doFlipToNextIndex = TRUE; + workArea.allowFlipLock = FALSE; + workArea.doFlipToDestRect = FALSE; + + workArea.doIncrementNextIndex = TRUE; + workArea.doIncrementNextOffset = FALSE; + + } else { + + /* + * In the SwapGroup case, headSurface should: + * - render to both surfaces, + * - flip to the nextOffset, + * - increment nextOffset. + */ + workArea.dstBufferIndexStart = 0; + workArea.dstBufferIndexEnd = NVKMS_HEAD_SURFACE_MAX_BUFFERS - 1; + + workArea.doFlipToNextIndex = FALSE; + + workArea.allowFlipLock = FALSE; + workArea.doFlipToDestRect = TRUE; + + workArea.doIncrementNextIndex = FALSE; + workArea.doIncrementNextOffset = TRUE; + + /* + * For VBLANK-initiated frames of SwapGroup headSurface, we want the + * surface indicated by pHsChannel->nextIndex to contain the new + * SwapGroup content, and the non-nextIndex surface to contain the + * old SwapGroup content. + * + * Therefore, set the non-nextIndex bit(s) in + * honorSwapGroupClipListBufferMask, so that we leave the old + * SwapGroup content in that case. In all other cases, we will get + * the new SwapGroup content. + */ + workArea.honorSwapGroupClipListBufferMask = + ~NVBIT(pHsChannel->nextIndex); + } + + } else { + /* + * SWAP_GROUP_READY-initiated headSurface frames are special: we render + * a new frame to the nextIndex surface, using the previous destRect + * (i.e., the location that ViewPortIn will use at the next vblank). + * However, the flip may take indefinitely long to arrive: it will wait + * for the rest of the SwapBarrier. That is okay, because + * nvHsNextFrame(VBLANK) calls between now and the flip actually + * occurring will keep updating both surfaces, using ViewPortIn to + * "flip" to the new content. + * + * Therefore, we do _not_ increment nextIndex here. Instead, we update + * nextIndex when we find that the flip completed. Until then, we keep + * nextIndex the same, so that nvHsNextFrame(VBLANK) frames know which + * surface should receive the new SwapGroup content. + */ + + const NVSwapGroupRec *pSwapGroup = + pHsChannel->pDispEvo->pSwapGroup[pHsChannel->apiHead]; + + nvAssert(requestType == NV_HS_NEXT_FRAME_REQUEST_TYPE_SWAP_GROUP_READY); + nvAssert(pHsChannel->config.neededForSwapGroup); + + workArea.dstBufferIndexStart = pHsChannel->nextIndex; + workArea.dstBufferIndexEnd = pHsChannel->nextIndex; + + workArea.doFlipToNextIndex = TRUE; + workArea.allowFlipLock = TRUE; + workArea.doFlipToDestRect = FALSE; + + workArea.doIncrementNextIndex = FALSE; + workArea.doIncrementNextOffset = FALSE; + + workArea.perEyeStereo.override = TRUE; + workArea.perEyeStereo.value = + nvHsSwapGroupGetPerEyeStereo(pSwapGroup); + } + + /* + * Pick the rect within the destination surface that headSurface should + * render into. + * + * For normal (!neededForSwapGroup) use, this should be simply: + * { 0, 0, frameSize.width, frameSize.height } + * When SwapGroups are enabled, the headSurface is allocated at + * double height and we alternate between + * { 0, 0, frameSize.width, frameSize.height } + * { 0, frameSize.height, frameSize.width, frameSize.height } + * And use ViewPortIn to flip to the updated half. + * + * The 'nextOffset' field tracks which half headSurface should use for the + * next frame. + * + * The exception to the above is SWAP_GROUP_READY: in that case, we will + * flip between surfaces, but not change ViewPortIn, so we want to use the + * _previous_ nextOffset value. + */ + if (requestType == NV_HS_NEXT_FRAME_REQUEST_TYPE_SWAP_GROUP_READY) { + destOffset = HsGetPreviousOffset(pHsChannel); + } else { + destOffset = pHsChannel->nextOffset; + } + + workArea.destRect.x = 0; + workArea.destRect.y = pHsChannel->config.frameSize.height * + destOffset; + workArea.destRect.width = pHsChannel->config.frameSize.width; + workArea.destRect.height = pHsChannel->config.frameSize.height; + + return workArea; +} + +/*! + * Issue an L2 flush if display is not coherent with the GPU. + */ +static void HsFlushL2( + const NVDevEvoRec *pDevEvo, + NVHsChannelEvoRec *pHsChannel) +{ + if (!pDevEvo->isSOCDisplay) { + return; + } + + NvPushChannelPtr p = &pHsChannel->nvPush.channel; + /* Host WFI */ + nvPushImmedVal(p, NVA06F_SUBCHANNEL_2D, NVB06F_WFI, 0); + /* Flush L2 to backing store */ + nvPushMethod(p, NVA06F_SUBCHANNEL_2D, NVB06F_MEM_OP_D, 1); + nvPushSetMethodData(p, DRF_DEF(B06F, _MEM_OP_D, _OPERATION, _L2_FLUSH_DIRTY)); +} + +/*! + * Produce the next headSurface frame. + * + * Render the frame, flip to it, and update next{Index,Offset} bookkeeping + * as necessary. + * + * \param[in,out] pHsDevice The device to render on. + * \param[in,out] pHsChannel The channel to use for rendering. + * \param[in] requestType This indicates the type of frame behavior + * desired by the caller: when FIRST_FRAME, we need + * to populate the surface in the core channel on + * pre-NVDisplay. + */ +void nvHsNextFrame( + NVHsDeviceEvoPtr pHsDevice, + NVHsChannelEvoPtr pHsChannel, + const NvHsNextFrameRequestType requestType) +{ + const NVDevEvoRec *pDevEvo = pHsDevice->pDevEvo; + const NVHsStateOneHeadAllDisps *pHsOneHeadAllDisps = + &pDevEvo->apiHeadSurfaceAllDisps[pHsChannel->apiHead]; + NvBool perEyeStereoFlip = FALSE; + NvU8 dstEye; + NvU8 eyeMask = 0; + + struct NvHsNextFrameWorkArea workArea = + HsAssignNextFrameWorkArea(pHsChannel, requestType); + + HsUpdateFlipQueueCurrent(pHsChannel); + + for (dstEye = NVKMS_LEFT; dstEye < NVKMS_MAX_EYES; dstEye++) { + + const NVSurfaceEvoRec *pSurfaceEvo[NVKMS_MAX_LAYERS_PER_HEAD]; + NvBool surfacesPresent = FALSE; + NvU8 layer, srcEye = dstEye; + NvU8 dstBufferIndex; + enum NvKmsPixelShiftMode pixelShift = pHsChannel->config.pixelShift; + NvBool ret; + + HsPickSrcEyeAndPixelShift(pHsChannel, dstEye, &srcEye, &pixelShift); + + for (layer = 0; layer < ARRAY_LEN(pHsChannel->flipQueue); layer++) { + pSurfaceEvo[layer] = + pHsChannel->flipQueue[layer].current.pSurfaceEvo[srcEye]; + + surfacesPresent = surfacesPresent || (pSurfaceEvo[layer] != NULL); + + perEyeStereoFlip = perEyeStereoFlip || + pHsChannel->flipQueue[layer].current.perEyeStereoFlip; + } + + /* + * If there are no surfaces present for this srcEye, and the dstEye is + * not LEFT, don't render it. + * + * This condition is limited to LEFT because: + * - We need to perform _a_ flip even if no source surface is provided. + * - We don't want to perform more rendering than absolutely + * unnecessarily. + */ + if (!surfacesPresent && (dstEye != NVKMS_LEFT)) { + continue; + } + + for (dstBufferIndex = workArea.dstBufferIndexStart; + dstBufferIndex <= workArea.dstBufferIndexEnd; + dstBufferIndex++) { + + NvU8 thisEyeMask = 0; + const NvBool honorSwapGroupClipList = + !!(workArea.honorSwapGroupClipListBufferMask & + NVBIT(dstBufferIndex)); + + ret = nvHs3dRenderFrame(pHsChannel, + requestType, + honorSwapGroupClipList, + dstEye, + dstBufferIndex, + pixelShift, + workArea.destRect, + pSurfaceEvo); + /* + * Record which eyes we've rendered, so that we only flip those + * eyes. + * + * In the case that we're looping over multiple buffer indices, we + * should get the same result across buffers. + */ + if (ret) { + thisEyeMask = NVBIT(dstEye); + } + + if (dstBufferIndex != workArea.dstBufferIndexStart) { + nvAssert((eyeMask & NVBIT(dstEye)) == + (thisEyeMask & NVBIT(dstEye))); + } + + eyeMask |= thisEyeMask; + } + } + + HsFlushL2(pDevEvo, pHsChannel); + + if (workArea.doFlipToNextIndex) { + + if (workArea.perEyeStereo.override) { + perEyeStereoFlip = workArea.perEyeStereo.value; + } + + nvHs3dReleaseSemaphore( + pHsChannel, + pHsDevice->notifiers.pSurfaceEvo, + pHsDevice->notifiers.nIsoFormat, + HsGetFrameSemaphoreOffsetInWords(pHsChannel), + NVKMS_HEAD_SURFACE_FRAME_SEMAPHORE_DISPLAYABLE, + FALSE /* allPreceedingReads */); + + nvHsFlip( + pHsDevice, + pHsChannel, + eyeMask, + perEyeStereoFlip, + pHsChannel->nextIndex, + pHsOneHeadAllDisps, + requestType == NV_HS_NEXT_FRAME_REQUEST_TYPE_FIRST_FRAME, + workArea.allowFlipLock); + HsIncrementFrameSemaphoreIndex(pHsChannel); + + // Record fullscreen/non-fullscreen swapgroup flip counts + const NVSwapGroupRec *pSwapGroup = + pHsChannel->pDispEvo->pSwapGroup[pHsChannel->apiHead]; + + if (pSwapGroup) { + HsProcFsRecordFullscreenSgFrames(pHsChannel, + pSwapGroup->swapGroupIsFullscreen); + } + + // Record the time of the last flip originating from client update + if (requestType == NV_HS_NEXT_FRAME_REQUEST_TYPE_SWAP_GROUP_READY) { + pHsChannel->lastHsClientFlipTimeUs = nvkms_get_usec(); + } + } + + if (workArea.doFlipToDestRect) { + // Viewport fake flips are only used in swapgroup configurations. + nvAssert(pHsChannel->config.neededForSwapGroup); + + if (pHsChannel->usingRgIntrForSwapGroups) { + nvHs3dPushPendingViewportFlip(pHsChannel); + } else { + HsFlipViewPortIn(pHsChannel, + workArea.destRect.x, workArea.destRect.y); + } + } + + if (workArea.doIncrementNextIndex) { + HsIncrementNextIndex(pHsDevice, pHsChannel); + } + + if (workArea.doIncrementNextOffset) { + HsIncrementNextOffset(pHsDevice, pHsChannel); + } +} + +/*! + * In response to a non-stall interrupt, check if a headsurface channel has + * completed a frame of non-swapgroup headsurface rendering and kick off a + * viewport flip to the offset that was used for that rendering. + */ +void nvHsProcessPendingViewportFlips(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 apiHead; + for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) { + NVHsChannelEvoPtr pHsChannel = pDispEvo->pHsChannel[apiHead]; + NvU32 lastRenderedOffset; + + if (pHsChannel == NULL) { + continue; + } + + lastRenderedOffset = nvHs3dLastRenderedOffset(pHsChannel); + + /* + * If this channel is marked as having kicked off a frame of + * rendering, and the semaphore write of the render offset to + * NVKMS_HEADSURFACE_VIEWPORT_OFFSET_SEMAPHORE_INDEX has completed, + * then this channel is ready to make a viewport flip to that + * offset. + */ + if (pHsChannel->viewportFlipPending && + (lastRenderedOffset == HsGetPreviousOffset(pHsChannel))) { + + HsFlipViewPortIn(pHsChannel, 0 /* x */, + lastRenderedOffset * + pHsChannel->config.frameSize.height); + pHsChannel->viewportFlipPending = FALSE; + } + } + } +} + +/*! + * Record the current scanline, for procfs statistics reporting. + */ +static void HsProcFsRecordScanline( + const NVDispEvoRec *pDispEvo, + const NvU32 apiHead) +{ +#if NVKMS_PROCFS_ENABLE + NVHsChannelEvoRec *pHsChannel = pDispEvo->pHsChannel[apiHead]; + NvU16 scanLine = 0; + NvBool inBlankingPeriod = FALSE; + + if (pHsChannel->statistics.scanLine.pHistogram == NULL) { + return; + } + + nvApiHeadGetScanLine(pDispEvo, apiHead, &scanLine, &inBlankingPeriod); + + if (inBlankingPeriod) { + pHsChannel->statistics.scanLine.nInBlankingPeriod++; + } else { + pHsChannel->statistics.scanLine.nNotInBlankingPeriod++; + + if (scanLine <= pHsChannel->statistics.scanLine.vVisible) { + pHsChannel->statistics.scanLine.pHistogram[scanLine]++; + } else { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "HsProcFsRecordScanline(): scanLine (%d) > vVisible (%d)", + scanLine, pHsChannel->statistics.scanLine.vVisible); + } + } +#endif /* NVKMS_PROCFS_ENABLE */ +} + +static void HsProcFsRecordPreviousFrameNotDone( + NVHsChannelEvoPtr pHsChannel) +{ +#if NVKMS_PROCFS_ENABLE + pHsChannel->statistics.nPreviousFrameNotDone++; +#endif +} + +static void HsProcFsRecordFullscreenSgFrames( + NVHsChannelEvoPtr pHsChannel, + NvBool isFullscreen) +{ +#if NVKMS_PROCFS_ENABLE + if (isFullscreen) { + pHsChannel->statistics.nFullscreenSgFrames++; + } else { + pHsChannel->statistics.nNonFullscreenSgFrames++; + } +#endif /* NVKMS_PROCFS_ENABLE */ +} + +static void HsProcFsRecordOmittedNonSgHsUpdate( + NVHsChannelEvoPtr pHsChannel) +{ +#if NVKMS_PROCFS_ENABLE + pHsChannel->statistics.nOmittedNonSgHsUpdates++; +#endif +} + +/*! + * Determine if we've flipped to the previous frame. + * + * When we program the flip method, we reset the notifier to NOT_BEGUN, and when + * EVO peforms the flip, it changes the notifier to BEGUN. + * + * Find the notifier slot for the previous frame, parse its notifier, and return + * whether it is BEGUN. + */ +static NvBool IsPreviousFlipDone(NVHsChannelEvoPtr pHsChannel) +{ + const NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo; + const NvU32 apiHead = pHsChannel->apiHead; + const NvU32 sd = pDispEvo->displayOwner; + const NVHsDeviceEvoRec *pHsDevice = pDispEvo->pDevEvo->pHsDevice; + const NVHsNotifiersRec *pHsNotifiers = &pHsDevice->notifiers; + const NVHsNotifiersOneSdRec *pHsNotifiersOneSd = pHsNotifiers->sd[sd].ptr; + const NvU8 nextSlot = pHsNotifiers->sd[sd].apiHead[apiHead].nextSlot; + struct nvKmsParsedNotifier parsed = { }; + + const NvU8 prevSlot = + A_minus_b_with_wrap_U8(nextSlot, 1, + NVKMS_HEAD_SURFACE_MAX_NOTIFIERS_PER_HEAD); + + nvKmsParseNotifier(pHsNotifiers->nIsoFormat, FALSE /* overlay */, + prevSlot, pHsNotifiersOneSd->notifier[apiHead], &parsed); + + return parsed.status == NVKMS_NOTIFIER_STATUS_BEGUN; +} + +/*! + * Determine if we've flipped to the previous frame. + */ +static NvBool IsPreviousFrameDone(NVHsChannelEvoPtr pHsChannel) +{ + if (pHsChannel->config.neededForSwapGroup) { + /* + * XXX NVKMS HEADSURFACE TODO: Somehow determine if the previous + * ViewPortIn update for this head was latched. + */ + + /* + * XXX NVKMS HEADSURFACE TODO: In the absence of a mechanism to + * determine if ViewPortIn was latched, we would normally rely on this + * callback arriving once per vblank. Unfortunately, bug 2086726 can + * cause us to get called twice per vblank. WAR this for now by + * ignoring callbacks that arrive in a very small window of the previous + * callback. + * + * Throttling is now implemented using the RG line 1 interrupt + * headsurface rendering mechanism, so this limit can be lowered once + * the old vblank-triggered viewport flipping mechanism is removed. + */ + + const NvU64 oldUSec = pHsChannel->lastCallbackUSec; + const NvU64 newUSec = nvkms_get_usec(); + + /* + * This threshold is somewhat arbitrary. In bug 2086726, we see the + * callback get called from both the ISR and the bottom half, which are + * usually within ~200 usec of each other on an idle system. There + * shouldn't be a danger of mistaking legitimate periodic callbacks with + * this small threshold: 500 usec per refresh would require a 2000 Hz + * mode. + */ + const NvU64 thresholdUSec = 500; + + nvAssert(!pHsChannel->usingRgIntrForSwapGroups); + + if ((newUSec > oldUSec) && + (newUSec - oldUSec) < thresholdUSec) { + return FALSE; + } + + pHsChannel->lastCallbackUSec = newUSec; + + return TRUE; + } else { + return IsPreviousFlipDone(pHsChannel); + } +} + +/*! + * If the client provided a notifier surface with a real flip + * request while swap groups were enabled, write to that + * notifier with the BEGUN status and the most recent + * headsurface notifier timestamp to emulate what the client + * would observe if their notifier was used in hardware. + */ +static void HsUpdateClientNotifier(NVHsChannelEvoPtr pHsChannel) +{ + const NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo; + const NvU32 apiHead = pHsChannel->apiHead; + const NvU32 sd = pDispEvo->displayOwner; + const NVHsDeviceEvoRec *pHsDevice = pDispEvo->pDevEvo->pHsDevice; + const NVHsNotifiersRec *pHsNotifiers = &pHsDevice->notifiers; + const NVHsNotifiersOneSdRec *pHsNotifiersOneSd = pHsNotifiers->sd[sd].ptr; + const NvU8 nextSlot = pHsNotifiers->sd[sd].apiHead[apiHead].nextSlot; + struct nvKmsParsedNotifier parsed = { }; + NVFlipNIsoSurfaceEvoHwState *pClientNotifier = + &pHsChannel->flipQueue[NVKMS_MAIN_LAYER].current.completionNotifier.surface; + + if (pClientNotifier->pSurfaceEvo == NULL) { + return; + } + + const NvU8 prevSlot = + A_minus_b_with_wrap_U8(nextSlot, 1, + NVKMS_HEAD_SURFACE_MAX_NOTIFIERS_PER_HEAD); + + nvKmsParseNotifier(pHsNotifiers->nIsoFormat, FALSE /* overlay */, + prevSlot, pHsNotifiersOneSd->notifier[apiHead], &parsed); + + nvAssert(parsed.status == NVKMS_NOTIFIER_STATUS_BEGUN); + + /* + * XXX NVKMS HEADSURFACE TODO: Get valid timestamp through other means to + * support this on platforms with legacy HW semaphores without valid + * HW notifier timestamps in the main channel. + */ + nvAssert(parsed.timeStampValid); + + nvKmsSetNotifier(pClientNotifier->format, + FALSE /* overlay */, + pClientNotifier->offsetInWords / 4, + pClientNotifier->pSurfaceEvo->cpuAddress[sd], + parsed.timeStamp); +} + +/*! + * Check if all flips completed for this SwapGroup. If so, release the + * SwapGroup. + */ +static void HsCheckSwapGroupFlipDone( + NVDevEvoPtr pDevEvo, + NVSwapGroupRec *pSwapGroup) +{ + const NVHsDeviceEvoRec *pHsDevice = pDevEvo->pHsDevice; + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + + nvAssert(pSwapGroup != NULL); + + if (!pSwapGroup->pendingFlip) { + return; + } + + /* + * Check if all active heads in the SwapGroup have completed their flips. + * If any haven't, return early. + */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 apiHead; + for (apiHead = 0; apiHead < ARRAY_LEN(pDispEvo->pSwapGroup); apiHead++) { + + if (pDispEvo->pSwapGroup[apiHead] == pSwapGroup) { + + NVHsChannelEvoPtr pHsChannel = pDispEvo->pHsChannel[apiHead]; + + if (pHsChannel == NULL) { + continue; + } + + nvAssert(pHsChannel->config.neededForSwapGroup); + + if (!IsPreviousFlipDone(pHsChannel)) { + return; + } + } + } + } + + /* + * The SwapGroup is ready: update client notifiers if necessary and + * increment nextIndex for all active heads, so that subsequent frames of + * headSurface render to the next buffer. + */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 apiHead; + for (apiHead = 0; apiHead < ARRAY_LEN(pDispEvo->pSwapGroup); apiHead++) { + + if (pDispEvo->pSwapGroup[apiHead] == pSwapGroup) { + + NVHsChannelEvoPtr pHsChannel = pDispEvo->pHsChannel[apiHead]; + + if (pHsChannel == NULL) { + continue; + } + + nvAssert(pHsChannel->config.neededForSwapGroup); + nvAssert(IsPreviousFlipDone(pHsChannel)); + + HsUpdateClientNotifier(pHsChannel); + HsIncrementNextIndex(pHsDevice, pHsChannel); + } + } + } + + /* + * The SwapGroup is ready: release all SwapGroup members so that they can + * proceed. + */ + nvHsSwapGroupRelease(pDevEvo, pSwapGroup); +} + +/* + * Called from RG line interrupt handler to determine whether rendering a + * new frame could be skipped. + */ +static NvBool HsCanOmitNonSgHsUpdate(NVHsChannelEvoPtr pHsChannel) +{ + const NVSwapGroupRec *pHeadSwapGroup = + pHsChannel->pDispEvo->pSwapGroup[pHsChannel->apiHead]; + + /* + * When fullscreen swapgroup flipping, updating + * non-swapgroup content at vblank is unnecessary and + * dangerous, since it results in releasing client + * semaphores before their contents have actually been + * displayed. + */ + if (pHsChannel->swapGroupFlipping) { + return NV_TRUE; + } + + /* + * In the case of a fullscreen swapgroup, we can generally omit updating + * the headsurface entirely upon vblank as long as the client is + * actively rendering. All the swapgroup content has already been + * updated to the headsurface backbuffer at the client's swapbuffers + * time and there's no need to update the backbuffer again on RG line 1 + * or vblank interrupt time. + * + * There is one exception to this. If the client isn't rendering + * actively then updates to the cursor (and possibly overlays, head + * config) still require rendering an updated frame to the backbuffer. + * Thus, we will simply limit this optimization for frames that come + * within one frame time after the last recorded flip. + */ + if (pHeadSwapGroup && + pHeadSwapGroup->swapGroupIsFullscreen) { + + NvU64 nowUs = nvkms_get_usec(); + NvU64 frameTimeUs = nvEvoFrametimeUsFromTimings( + &pHsChannel->pDispEvo->apiHeadState[pHsChannel->apiHead].timings); + + if (nowUs - pHsChannel->lastHsClientFlipTimeUs < frameTimeUs) { + return NV_TRUE; + } + } + + return NV_FALSE; +} + +/*! + * Receive RG line 1 callback, in process context with nvkms_lock held. + */ +static void HsRgLine1CallbackProc(NVDispEvoRec *pDispEvo, + const NvU32 head, + NVRgLine1CallbackPtr pCallback) +{ + const NvU32 apiHead = + (NvU32)(NvUPtr)pCallback->pUserData; + NVHsChannelEvoPtr pHsChannel = pDispEvo->pHsChannel[apiHead]; + + /* + * The pHsChannel may have been torn down between when the callback was + * generated and when this was called. Ignore spurious callbacks. + */ + if (pHsChannel == NULL) { + return; + } + + if (pHsChannel->config.neededForSwapGroup) { + /* + * Update the non-swapgroup content on the back half of both + * headsurface surfaces, and the swapgroup content on the back half of + * the back headsurface surface, and perform a viewportoffset flip to + * the back offset. + * + * Synchronization is achieved by the following mechanism: + * + * - Before rendering a new frame, check that we aren't still scanning + * out from that half of the surface. + * - After rendering a frame, push a semaphore write with the render + * offset and a non-stall interrupt. + * - In response to the non-stall interrupt, perform the viewport + * flip to the render offset. + */ + NvU32 activeViewportOffset = + nvApiHeadGetActiveViewportOffset(pDispEvo, apiHead); + + nvAssert((activeViewportOffset == 0) || + (activeViewportOffset == pHsChannel->config.frameSize.height)); + + activeViewportOffset /= pHsChannel->config.frameSize.height; + + if (activeViewportOffset == HsGetPreviousOffset(pHsChannel)) { + /* + * The active viewport is the same as the last one we pushed, so + * it's safe to start rendering to pHsChannel->nextOffset; check if + * rendering from a previous interrupt hasn't completed yet. + */ + if (pHsChannel->viewportFlipPending) { + /* + * A non-stall interrupt hasn't been triggered since we kicked + * off the previous frame's rendering. + */ + HsProcFsRecordPreviousFrameNotDone(pHsChannel); + } else { + NVHsDeviceEvoRec *pHsDevice = pDispEvo->pDevEvo->pHsDevice; + + HsProcFsRecordScanline(pDispEvo, apiHead); + + if (HsCanOmitNonSgHsUpdate(pHsChannel)) { + HsProcFsRecordOmittedNonSgHsUpdate(pHsChannel); + } else { + nvHsNextFrame(pHsDevice, pHsChannel, NV_HS_NEXT_FRAME_REQUEST_TYPE_VBLANK); + } + } + } else { + /* + * The viewport flip we pushed after the previous frame's rendering + * hasn't been applied in hardware yet. + */ + HsProcFsRecordPreviousFrameNotDone(pHsChannel); + } + + HsCheckSwapGroupFlipDone(pDispEvo->pDevEvo, pDispEvo->pSwapGroup[apiHead]); + } +} + +/*! + * Receive vblank callback, in process context with nvkms_lock held. + * + */ +static void HsVBlankCallback(NVDispEvoRec *pDispEvo, + NVVBlankCallbackPtr pCallbackData) +{ + const NvU32 apiHead = pCallbackData->apiHead; + NVHsChannelEvoPtr pHsChannel = pDispEvo->pHsChannel[apiHead]; + NVHsDeviceEvoRec *pHsDevice = pDispEvo->pDevEvo->pHsDevice; + + /* + * The pHsChannel may have been torn down between when the vblank was + * generated and when this was called. Ignore spurious callbacks. + */ + if (pHsChannel == NULL) { + return; + } + + if (!pHsChannel->usingRgIntrForSwapGroups && + pHsChannel->config.neededForSwapGroup) { + HsCheckSwapGroupFlipDone(pDispEvo->pDevEvo, pDispEvo->pSwapGroup[apiHead]); + } + + if (pHsChannel->usingRgIntrForSwapGroups && + pHsChannel->config.neededForSwapGroup) { + // The next frame will be rendered during the RG line 1 interrupt. + return; + } + + /* + * If we have not flipped to the previous buffer, yet, we should not render + * to the next buffer. Wait until the next vblank callback. + */ + if (!IsPreviousFrameDone(pHsChannel)) { + HsProcFsRecordPreviousFrameNotDone(pHsChannel); + return; + } + + HsProcFsRecordScanline(pDispEvo, apiHead); + + /* + * XXX NVKMS HEADSURFACE TODO: evaluate whether there has been + * damage to the source buffer since the last headSurface frame. + * Only if so, perform the headSurface transformation and flip to + * the resulting headSurface buffer. + * + * For headSurface bringup purposes, just always flip to the next + * headSurface buffer. + */ + + /* + * When fullscreen swapgroup flipping, updating + * non-swapgroup content at vblank is unnecessary and + * dangerous, since it results in releasing client + * semaphores before their contents have actually been + * displayed. + */ + if (!pHsChannel->swapGroupFlipping) { + nvHsNextFrame(pHsDevice, pHsChannel, + NV_HS_NEXT_FRAME_REQUEST_TYPE_VBLANK); + } +} + +/*! + * Schedule vblank callbacks from resman on a specific head and subdevice. + */ +void nvHsAddVBlankCallback(NVHsChannelEvoPtr pHsChannel) +{ + NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo; + + pHsChannel->vBlankCallback = + nvApiHeadRegisterVBlankCallback(pDispEvo, + pHsChannel->apiHead, + HsVBlankCallback, + NULL, + 1 /* listIndex */); +} + +/*! + * Add an RG line 1 callback to check the swapgroup flip notifier and release + * its associated deferred request fifo. + * + * This is done in an RG line 1 callback instead of the vblank callback to WAR + * an issue where certain mode timings cause the vblank callback to fire + * slightly before LOADV causes the notifier to transition from NOT_BEGUN + * to BEGUN, causing an extra frame of delay before the next vblank occurs and + * the deferred request fifo can be released. + */ +void nvHsAddRgLine1Callback(NVHsChannelEvoPtr pHsChannel) +{ + NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo; + NvBool found; + NvU32 val; + + /* + * Use the RG line 1 interrupt to check swapgroup completion by default, + * but allow setting NVKMS_DELAY_SWAPGROUP_CHECK=0 by regkey to revert to + * the old method of checking during vblank for debugging purposes. + */ + found = nvGetRegkeyValue(pDispEvo->pDevEvo, "NVKMS_DELAY_SWAPGROUP_CHECK", + &val); + + if (found && (val == 0)) { + return; + } + + pHsChannel->pRgIntrCallback = + nvApiHeadAddRgLine1Callback(pDispEvo, + pHsChannel->apiHead, + HsRgLine1CallbackProc, + (void*)(NvUPtr)pHsChannel->apiHead); + + if (pHsChannel->pRgIntrCallback == NULL) { + nvAssert(!"Failed to register headSurface RG line 1 interrupt"); + } else { + pHsChannel->usingRgIntrForSwapGroups = TRUE; + } +} + +/*! + * Cancel RG line 1 callbacks from resman on the specified head and subdevice. + * + * The same limitations regarding leftover vblank callbacks after vblank + * callbacks are disabled in nvHsRemoveVblankCallback apply to RG callbacks. + */ +void nvHsRemoveRgLine1Callback(NVHsChannelEvoPtr pHsChannel) +{ + const NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo; + + if (pHsChannel->usingRgIntrForSwapGroups) { + nvRmRemoveRgLine1Callback(pDispEvo, + pHsChannel->pRgIntrCallback); + pHsChannel->pRgIntrCallback = NULL; + } +} + +/*! + * Cancel vblank callbacks from resman on the specified head and subdevice. + * + * Note that there could currently be callbacks in flight. We should be + * prepared to handle a spurious callback after cancelling the callbacks here. + * + * XXX NVKMS HEADSURFACE TODO: It would be better to: + * + * (a) Remove the vblank callback before the modeset that disables headSurface. + * (b) Drain/cancel any in flight callbacks while holding the nvkms_lock. + * + * A mechanism like that should avoid spurious callbacks. + */ +void nvHsRemoveVBlankCallback(NVHsChannelEvoPtr pHsChannel) +{ + NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo; + + nvApiHeadUnregisterVBlankCallback(pDispEvo, + pHsChannel->vBlankCallback); + pHsChannel->vBlankCallback = NULL; +} + +void nvHsAllocStatistics( + NVHsChannelEvoRec *pHsChannel) +{ +#if NVKMS_PROCFS_ENABLE + const NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo; + const NvU32 apiHead = pHsChannel->apiHead; + const NVHwModeTimingsEvo *pTimings = + &pDispEvo->apiHeadState[apiHead].timings; + NvU32 n; + + nvkms_memset(&pHsChannel->statistics, 0, sizeof(pHsChannel->statistics)); + + pHsChannel->statistics.scanLine.vVisible = nvEvoVisibleHeight(pTimings); + + n = pHsChannel->statistics.scanLine.vVisible + 1; + + pHsChannel->statistics.scanLine.pHistogram = nvCalloc(1, sizeof(NvU64) * n); +#endif /* NVKMS_PROCFS_ENABLE */ +} + +void nvHsFreeStatistics( + NVHsChannelEvoRec *pHsChannel) +{ +#if NVKMS_PROCFS_ENABLE + nvFree(pHsChannel->statistics.scanLine.pHistogram); + nvkms_memset(&pHsChannel->statistics, 0, sizeof(pHsChannel->statistics)); +#endif /* NVKMS_PROCFS_ENABLE */ +} + +#if NVKMS_PROCFS_ENABLE + +static const struct { + const char *before; + const char *after; +} HsProcFsIndentTable[] = { + [0] = { .before = "", .after = " " }, + [1] = { .before = " ", .after = " " }, + [2] = { .before = " ", .after = " " }, + [3] = { .before = " ", .after = " " }, + [5] = { .before = " ", .after = "" }, +}; + +static const char *HsProcFsIndentBefore(NvU8 indent) +{ + nvAssert(indent < ARRAY_LEN(HsProcFsIndentTable)); + + return HsProcFsIndentTable[indent].before; +} + +static const char *HsProcFsIndentAfter(NvU8 indent) +{ + nvAssert(indent < ARRAY_LEN(HsProcFsIndentTable)); + + return HsProcFsIndentTable[indent].after; +} + +static void HsProcFsGpuTime( + NVEvoInfoStringRec *pInfoString, + const NvU64 nFrames, + const NvU64 gpuTimeSpent, + const NvU8 indent) +{ + /* + * Use nFrames - 1 to compute averageGpuTimeNs: the nvHs3dRenderFrame() path + * increments nFrames at the end of rendering a frame, but it only updates + * gpuTimeSpent at the start of rendering the _next_ frame. I.e., + * gpuTimeSpent has time for nFrames - 1 frames. + */ + const NvU64 averageGpuTimeNs = + (nFrames <= 1) ? 0 : (gpuTimeSpent / (nFrames - 1)); + const NvU64 averageGpuTimeUs = (averageGpuTimeNs + 500) / 1000; + const NvU64 nFramesToReport = (nFrames <= 1) ? 0 : nFrames - 1; + + nvEvoLogInfoString( + pInfoString, " %savg GPU time / frame%s : " + "%" NvU64_fmtu ".%03" NvU64_fmtu " msec " + "(%" NvU64_fmtu " nsec / %" NvU64_fmtu " frames)", + HsProcFsIndentBefore(indent), + HsProcFsIndentAfter(indent), + averageGpuTimeUs / 1000, + averageGpuTimeUs % 1000, + gpuTimeSpent, + nFramesToReport); +} + +static void HsProcFsFrameStatisticsOneEye( + NVEvoInfoStringRec *pInfoString, + const NVHsChannelEvoRec *pHsChannel, + const NvU8 eye, + const NvU8 slot, + const NvU8 indent) +{ + const NVHsChannelStatisticsOneEyeRec *pPerEye = + &pHsChannel->statistics.perEye[eye][slot]; + + const NvU64 framesPerMs = pPerEye->fps.framesPerMs; + + nvEvoLogInfoString( + pInfoString, + " %snFrames%s : %" NvU64_fmtu, + HsProcFsIndentBefore(indent), + HsProcFsIndentAfter(indent), + pPerEye->nFrames); + + nvEvoLogInfoString( + pInfoString, " %sFPS (computed every 5s)%s: " + "%" NvU64_fmtu ".%03" NvU64_fmtu, + HsProcFsIndentBefore(indent), + HsProcFsIndentAfter(indent), + framesPerMs / 1000, + framesPerMs % 1000); + + HsProcFsGpuTime( + pInfoString, + pPerEye->nFrames, + pPerEye->gpuTimeSpent, + indent); +} + +static void HsProcFsFrameStatisticsOneSlot( + NVEvoInfoStringRec *pInfoString, + const NVHsChannelEvoRec *pHsChannel, + const NvU8 slot, + const NvU8 indent) +{ + const char *eyeLabel[] = { + [NVKMS_LEFT] = "Left Eye ", + [NVKMS_RIGHT] = "Right Eye", + }; + + const NvBool needEyeLabel = + pHsChannel->statistics.perEye[NVKMS_RIGHT][slot].nFrames != 0; + NvU8 eye; + + for (eye = NVKMS_LEFT; eye < NVKMS_MAX_EYES; eye++) { + + NvU8 localIndent = 0; + + if (pHsChannel->statistics.perEye[eye][slot].nFrames == 0) { + continue; + } + + if (needEyeLabel) { + nvEvoLogInfoString( + pInfoString, " %s%s%s :", + HsProcFsIndentBefore(indent), + eyeLabel[eye], + HsProcFsIndentAfter(indent)); + localIndent++; + } + + HsProcFsFrameStatisticsOneEye( + pInfoString, + pHsChannel, + eye, + slot, + indent + localIndent); + } +} + +static void HsProcFsFrameStatistics( + NVEvoInfoStringRec *pInfoString, + const NVHsChannelEvoRec *pHsChannel) +{ + NvU8 slot; + + if (pHsChannel->config.neededForSwapGroup) { + nvEvoLogInfoString(pInfoString, + " VBLANK frames :"); + + nvEvoLogInfoString(pInfoString, + " Old swapGroup content :"); + + slot = Hs3dStatisticsGetSlot( + pHsChannel, + NV_HS_NEXT_FRAME_REQUEST_TYPE_VBLANK, 0, + TRUE /* honorSwapGroupClipList */); + + HsProcFsFrameStatisticsOneSlot(pInfoString, pHsChannel, slot, 2); + + nvEvoLogInfoString(pInfoString, + " New swapGroup content :"); + + slot = Hs3dStatisticsGetSlot( + pHsChannel, + NV_HS_NEXT_FRAME_REQUEST_TYPE_VBLANK, 0, + FALSE /* honorSwapGroupClipList */); + + HsProcFsFrameStatisticsOneSlot(pInfoString, pHsChannel, slot, 2); + + nvEvoLogInfoString(pInfoString, + " SWAP_GROUP_READY frames :"); + + slot = Hs3dStatisticsGetSlot( + pHsChannel, + NV_HS_NEXT_FRAME_REQUEST_TYPE_SWAP_GROUP_READY, 0, + FALSE /* honorSwapGroupClipList */); + + HsProcFsFrameStatisticsOneSlot(pInfoString, pHsChannel, slot, 1); + + } else { + const NvU8 indent = 0; /* start with no indentation */ + + slot = Hs3dStatisticsGetSlot( + pHsChannel, + NV_HS_NEXT_FRAME_REQUEST_TYPE_VBLANK, 0, + FALSE); + + HsProcFsFrameStatisticsOneSlot(pInfoString, pHsChannel, slot, indent); + } +} + +static void HsProcFsScanLine( + NVEvoInfoStringRec *pInfoString, + const NVHsChannelEvoRec *pHsChannel) +{ + NvU16 i; + + nvEvoLogInfoString(pInfoString, + " scanLine information :"); + + nvEvoLogInfoString(pInfoString, + " nInBlankingPeriod : %" NvU64_fmtu, + pHsChannel->statistics.scanLine.nInBlankingPeriod); + nvEvoLogInfoString(pInfoString, + " nNotInBlankingPeriod : %" NvU64_fmtu, + pHsChannel->statistics.scanLine.nNotInBlankingPeriod); + nvEvoLogInfoString(pInfoString, + " vVisible : %d", + pHsChannel->statistics.scanLine.vVisible); + + if (pHsChannel->statistics.scanLine.pHistogram == NULL) { + + nvEvoLogInfoString(pInfoString, + " scanline histogram : failed allocation"); + } else { + + nvEvoLogInfoString(pInfoString, + " scanline histogram :"); + + for (i = 0; i <= pHsChannel->statistics.scanLine.vVisible; i++) { + + if (pHsChannel->statistics.scanLine.pHistogram[i] != 0) { + nvEvoLogInfoString(pInfoString, + " scanLine[%04d] : %" NvU64_fmtu, + i, pHsChannel->statistics.scanLine.pHistogram[i]); + } + } + } +} + +static void HsProcFsFlipQueueOneEntry( + NVEvoInfoStringRec *pInfoString, + const NVHsLayerRequestedFlipState *pFlipState) +{ + /* + * Print the pointers by casting to NvUPtr and formatting with NvUPtr_fmtx, + * so that NULL is printed as "0x0", rather than "(null)". + */ + + nvEvoLogInfoString(pInfoString, + " pSurfaceEvo(L,R) : 0x%" NvUPtr_fmtx ", 0x%" NvUPtr_fmtx, + (NvUPtr)pFlipState->pSurfaceEvo[NVKMS_LEFT], + (NvUPtr)pFlipState->pSurfaceEvo[NVKMS_RIGHT]); + + if (!pFlipState->syncObject.usingSyncpt) { + nvEvoLogInfoString(pInfoString, + " semaphore : " + "acquire pSurfaceEvo: 0x%" NvUPtr_fmtx ", " + "release pSurfaceEvo: 0x%" NvUPtr_fmtx ", " + "acquire value: 0x%08x, " + "release value: 0x%08x", + (NvUPtr)pFlipState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo, + (NvUPtr)pFlipState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo, + pFlipState->syncObject.u.semaphores.acquireValue, + pFlipState->syncObject.u.semaphores.releaseValue); + } +} + +static void HsProcFsFlipQueue( + NVEvoInfoStringRec *pInfoString, + const NVHsChannelEvoRec *pHsChannel) +{ + const NVHsChannelFlipQueueEntry *pEntry; + NvU8 layer; + + for (layer = 0; layer < ARRAY_LEN(pHsChannel->flipQueue); layer++) { + + const char *layerString[NVKMS_MAX_LAYERS_PER_HEAD] = { + [NVKMS_MAIN_LAYER] = "(main) ", + [NVKMS_OVERLAY_LAYER] = "(overlay)", + }; + + nvEvoLogInfoString(pInfoString, + " flipQueue%s :", layerString[layer]); + + nvEvoLogInfoString(pInfoString, + " current :"); + + HsProcFsFlipQueueOneEntry(pInfoString, + &pHsChannel->flipQueue[layer].current); + + nvListForEachEntry(pEntry, + &pHsChannel->flipQueue[layer].queue, + flipQueueEntry) { + + nvEvoLogInfoString(pInfoString, + " pending :"); + + HsProcFsFlipQueueOneEntry(pInfoString, &pEntry->hwState); + } + } +} + +static const char *HsGetEyeMaskString(const NvU8 eyeMask) +{ + if (eyeMask == NVBIT(NVKMS_LEFT)) { + return "L"; + } else { + nvAssert(eyeMask == (NVBIT(NVKMS_LEFT) | NVBIT(NVKMS_RIGHT))); + return "L|R"; + } +} + +static const char *HsGetPixelShiftString( + const enum NvKmsPixelShiftMode pixelShift) +{ + switch (pixelShift) { + case NVKMS_PIXEL_SHIFT_NONE: return "none"; + case NVKMS_PIXEL_SHIFT_4K_TOP_LEFT: return "4kTopLeft"; + case NVKMS_PIXEL_SHIFT_4K_BOTTOM_RIGHT: return "4kBottomRight"; + case NVKMS_PIXEL_SHIFT_8K: return "8k"; + } + + return "unknown"; +} + +static void HsProcFsTransform( + NVEvoInfoStringRec *pInfoString, + const NVHsChannelEvoRec *pHsChannel) +{ + nvEvoLogInfoString(pInfoString, + " transform matrix : " + "{ { 0x%08x, 0x%08x, 0x%08x },", + F32viewAsNvU32(pHsChannel->config.transform.m[0][0]), + F32viewAsNvU32(pHsChannel->config.transform.m[0][1]), + F32viewAsNvU32(pHsChannel->config.transform.m[0][2])); + + nvEvoLogInfoString(pInfoString, + " : " + " { 0x%08x, 0x%08x, 0x%08x },", + F32viewAsNvU32(pHsChannel->config.transform.m[1][0]), + F32viewAsNvU32(pHsChannel->config.transform.m[1][1]), + F32viewAsNvU32(pHsChannel->config.transform.m[1][2])); + + nvEvoLogInfoString(pInfoString, + " : " + " { 0x%08x, 0x%08x, 0x%08x } }", + F32viewAsNvU32(pHsChannel->config.transform.m[2][0]), + F32viewAsNvU32(pHsChannel->config.transform.m[2][1]), + F32viewAsNvU32(pHsChannel->config.transform.m[2][2])); +} + +static void HsProcFsStaticWarpMesh( + NVEvoInfoStringRec *pInfoString, + const NVHsChannelEvoRec *pHsChannel) +{ + nvEvoLogInfoString(pInfoString, + " staticWarpMesh : " + "{ { 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x },", + pHsChannel->config.staticWarpMesh.vertex[0].x, + pHsChannel->config.staticWarpMesh.vertex[0].y, + pHsChannel->config.staticWarpMesh.vertex[0].u, + pHsChannel->config.staticWarpMesh.vertex[0].v, + pHsChannel->config.staticWarpMesh.vertex[0].r, + pHsChannel->config.staticWarpMesh.vertex[0].q); + + nvEvoLogInfoString(pInfoString, + " : " + " { 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x },", + pHsChannel->config.staticWarpMesh.vertex[1].x, + pHsChannel->config.staticWarpMesh.vertex[1].y, + pHsChannel->config.staticWarpMesh.vertex[1].u, + pHsChannel->config.staticWarpMesh.vertex[1].v, + pHsChannel->config.staticWarpMesh.vertex[1].r, + pHsChannel->config.staticWarpMesh.vertex[1].q); + + nvEvoLogInfoString(pInfoString, + " : " + " { 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x },", + pHsChannel->config.staticWarpMesh.vertex[2].x, + pHsChannel->config.staticWarpMesh.vertex[2].y, + pHsChannel->config.staticWarpMesh.vertex[2].u, + pHsChannel->config.staticWarpMesh.vertex[2].v, + pHsChannel->config.staticWarpMesh.vertex[2].r, + pHsChannel->config.staticWarpMesh.vertex[2].q); + + nvEvoLogInfoString(pInfoString, + " : " + " { 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x } }", + pHsChannel->config.staticWarpMesh.vertex[3].x, + pHsChannel->config.staticWarpMesh.vertex[3].y, + pHsChannel->config.staticWarpMesh.vertex[3].u, + pHsChannel->config.staticWarpMesh.vertex[3].v, + pHsChannel->config.staticWarpMesh.vertex[3].r, + pHsChannel->config.staticWarpMesh.vertex[3].q); +} + +static const char *HsProcFsGetNeededForString( + const NVHsChannelEvoRec *pHsChannel) +{ + if (pHsChannel->config.neededForModeset && + pHsChannel->config.neededForSwapGroup) { + return "modeset, swapgroup"; + } + + if (pHsChannel->config.neededForModeset && + !pHsChannel->config.neededForSwapGroup) { + return "modeset"; + } + + if (!pHsChannel->config.neededForModeset && + pHsChannel->config.neededForSwapGroup) { + return "swapgroup"; + } + + return "unknown"; +} + +static void HsProcFsFrameSemaphores( + NVEvoInfoStringRec *pInfoString, + const NVHsChannelEvoRec *pHsChannel) +{ + const NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo; + const NVHsDeviceEvoRec *pHsDevice = pDispEvo->pDevEvo->pHsDevice; + const NvU32 sd = pDispEvo->displayOwner; + const NVHsNotifiersOneSdRec *p = pHsDevice->notifiers.sd[sd].ptr; + const NvGpuSemaphore *pSema = + (const NvGpuSemaphore *)p->semaphore[pHsChannel->apiHead]; + + NvU8 buffer; + + for (buffer = 0; buffer < NVKMS_HEAD_SURFACE_MAX_BUFFERS; buffer++) { + nvEvoLogInfoString(pInfoString, + " frameSemaphore[%d] : 0x%0x", + buffer, + pSema[buffer].data[0]); + } +} + +void nvHsProcFs( + NVEvoInfoStringRec *pInfoString, + NVDevEvoRec *pDevEvo, + NvU32 dispIndex, + NvU32 apiHead) +{ + NVDispEvoPtr pDispEvo = pDevEvo->pDispEvo[dispIndex]; + const NVHsChannelEvoRec *pHsChannel = pDispEvo->pHsChannel[apiHead]; + const NVHsStateOneHeadAllDisps *pHsOneHeadAllDisps = + &pDevEvo->apiHeadSurfaceAllDisps[apiHead]; + + if (pHsChannel == NULL) { + nvEvoLogInfoString(pInfoString, + " headSurface[head:%02d] : disabled", apiHead); + return; + } + + nvEvoLogInfoString(pInfoString, + " headSurface[head:%02d] : " + "enabled (needed for: %s)", + apiHead, HsProcFsGetNeededForString(pHsChannel)); + + HsProcFsFrameStatistics(pInfoString, pHsChannel); + + nvEvoLogInfoString(pInfoString, + " nextIndex : %d", + pHsChannel->nextIndex); + + nvEvoLogInfoString(pInfoString, + " nextOffset : %d", + pHsChannel->nextOffset); + + nvEvoLogInfoString(pInfoString, + " nPreviousFrameNotDone : %" NvU64_fmtu, + pHsChannel->statistics.nPreviousFrameNotDone); + + nvEvoLogInfoString(pInfoString, + " nOmittedNonSgHsUpdates : %" NvU64_fmtu, + pHsChannel->statistics.nOmittedNonSgHsUpdates); + + nvEvoLogInfoString(pInfoString, + " nFullscreenSgFrames : %" NvU64_fmtu, + pHsChannel->statistics.nFullscreenSgFrames); + + nvEvoLogInfoString(pInfoString, + " nNonFullscreenSgFrames : %" NvU64_fmtu, + pHsChannel->statistics.nNonFullscreenSgFrames); + + nvEvoLogInfoString(pInfoString, + " viewPortIn : %d x %d +%d +%d", + pHsChannel->config.viewPortIn.width, + pHsChannel->config.viewPortIn.height, + pHsChannel->config.viewPortIn.x, + pHsChannel->config.viewPortIn.y); + + nvEvoLogInfoString(pInfoString, + " viewPortOut : %d x %d +%d +%d", + pHsChannel->config.viewPortOut.width, + pHsChannel->config.viewPortOut.height, + pHsChannel->config.viewPortOut.x, + pHsChannel->config.viewPortOut.y); + + nvEvoLogInfoString(pInfoString, + " frameSize : %d x %d", + pHsChannel->config.frameSize.width, + pHsChannel->config.frameSize.height); + + nvEvoLogInfoString(pInfoString, + " surfaceSize : %d x %d", + pHsChannel->config.surfaceSize.width, + pHsChannel->config.surfaceSize.height); + + nvEvoLogInfoString(pInfoString, + " stagingSurfaceSize : %d x %d", + pHsChannel->config.stagingSurfaceSize.width, + pHsChannel->config.stagingSurfaceSize.height); + + nvEvoLogInfoString(pInfoString, + " allDispsSurfaceSize : %d x %d", + pHsOneHeadAllDisps->size.width, + pHsOneHeadAllDisps->size.height); + + nvEvoLogInfoString(pInfoString, + " allDispsStagingSize : %d x %d", + pHsOneHeadAllDisps->stagingSize.width, + pHsOneHeadAllDisps->stagingSize.height); + + nvEvoLogInfoString(pInfoString, + " allDispsSurfaceCount : %d", + pHsOneHeadAllDisps->surfaceCount); + + nvEvoLogInfoString(pInfoString, + " eyeMask : %s", + HsGetEyeMaskString(pHsChannel->config.eyeMask)); + + nvEvoLogInfoString(pInfoString, + " pixelShift : %s", + HsGetPixelShiftString(pHsChannel->config.pixelShift)); + + HsProcFsTransform(pInfoString, pHsChannel); + + HsProcFsStaticWarpMesh(pInfoString, pHsChannel); + + HsProcFsFlipQueue(pInfoString, pHsChannel); + + HsProcFsFrameSemaphores(pInfoString, pHsChannel); + + HsProcFsScanLine(pInfoString, pHsChannel); +} +#endif /* NVKMS_PROCFS_ENABLE */ diff --git a/src/nvidia-modeset/src/nvkms-hw-flip.c b/src/nvidia-modeset/src/nvkms-hw-flip.c new file mode 100644 index 0000000..cb62913 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-hw-flip.c @@ -0,0 +1,3359 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-dma.h" +#include "nvkms-evo.h" +#include "nvkms-hw-flip.h" +#include "nvkms-utils-flip.h" +#include "nvkms-flip-workarea.h" +#include "nvkms-surface.h" +#include "nvkms-prealloc.h" +#include "nvkms-private.h" +#include "nvkms-rm.h" +#include "nvkms-vrr.h" +#include "nvkms-cursor.h" +#include "nvkms-types.h" +#include "nvkms-dpy.h" +#include "nvkms-lut.h" +#include "nvkms-softfloat.h" +#include "nvkms-ctxdma.h" + +#include "nvkms-sync.h" + +#include "nvkms-difr.h" + +static void SchedulePostFlipIMPTimer(NVDevEvoPtr pDevEvo); + +// The EVO .mfs file defines the maximum minPresentInterval to be 8. +#define NV_MAX_SWAP_INTERVAL 8 + +static NvBool AssignPreSyncptEvoHwState( + const NVDevEvoRec *pDevEvo, + const struct NvKmsChannelSyncObjects *pChannelSyncObjects, + NVFlipSyncObjectEvoHwState *pFlipSyncObject) +{ + NvBool ret; + NvU32 id = 0; + NvU32 value; + enum NvKmsSyncptType preType; + + nvAssert(pChannelSyncObjects->useSyncpt); + + preType = pChannelSyncObjects->u.syncpts.pre.type; + + if (preType == NVKMS_SYNCPT_TYPE_NONE) { + return TRUE; + } + + if (preType == NVKMS_SYNCPT_TYPE_FD) { + /*! Get id from fd using nvhost API */ + NvKmsSyncPtOpParams params = { }; + params.fd_to_id_and_thresh.fd = + pChannelSyncObjects->u.syncpts.pre.u.fd; + ret = nvkms_syncpt_op(NVKMS_SYNCPT_OP_FD_TO_ID_AND_THRESH, + ¶ms); + if (!ret) { + return FALSE; + } + id = params.fd_to_id_and_thresh.id; + value = params.fd_to_id_and_thresh.thresh; + } else { + id = pChannelSyncObjects->u.syncpts.pre.u.raw.id; + value = pChannelSyncObjects->u.syncpts.pre.u.raw.value; + } + if (id >= NV_SYNCPT_GLOBAL_TABLE_LENGTH) { + return FALSE; + } + /*! Fill pre-syncpt related information in hardware state */ + pFlipSyncObject->u.syncpts.preSyncpt = id; + pFlipSyncObject->u.syncpts.preValue = value; + pFlipSyncObject->u.syncpts.isPreSyncptSpecified = TRUE; + pFlipSyncObject->usingSyncpt = TRUE; + + return TRUE; +} + +static NvBool AssignPostSyncptEvoHwState( + const NVDevEvoRec *pDevEvo, + NVEvoChannel *pChannel, + const struct NvKmsChannelSyncObjects *pChannelSyncObjects, + NVFlipSyncObjectEvoHwState *pFlipSyncObject) +{ + enum NvKmsSyncptType postType; + NvU32 threshold; + + nvAssert(pChannelSyncObjects->useSyncpt); + + postType = pChannelSyncObjects->u.syncpts.requestedPostType; + + /*! + * It is possible that syncpt is mentioned but post-syncpt + * is not specified (case where only pre-syncpt used) + */ + if (postType == NVKMS_SYNCPT_TYPE_NONE) { + return TRUE; + } + + /*! return threshold to caller but increase only when programming hw */ + threshold = pChannel->postSyncpt.syncptMaxVal + 1; + + /*! each channel associated with one post-syncpt */ + pFlipSyncObject->u.syncpts.surfaceDesc = + pChannel->postSyncpt.surfaceDesc; + pFlipSyncObject->u.syncpts.postValue = threshold; + + /* + * AllocPostSyncptPerChannel()->AllocSyncpt() sets allocated to TRUE + * when postSyncpt is allocated/valid. + */ + pFlipSyncObject->u.syncpts.isPostSyncptSpecified = + pChannel->postSyncpt.allocated; + + pFlipSyncObject->usingSyncpt = TRUE; + + return TRUE; +} + +void nvFillPostSyncptReplyOneChannel( + NVEvoChannel *pChannel, + enum NvKmsSyncptType postType, + struct NvKmsSyncpt *postSyncpt, + const NVFlipSyncObjectEvoHwState *pHwSyncObject) +{ + if (postType == NVKMS_SYNCPT_TYPE_RAW) { + postSyncpt->u.raw.id = pChannel->postSyncpt.id; + postSyncpt->u.raw.value = pHwSyncObject->u.syncpts.postValue; + postSyncpt->type = NVKMS_SYNCPT_TYPE_RAW; + } else if (postType == NVKMS_SYNCPT_TYPE_FD) { + NvBool ret = TRUE; + NvKmsSyncPtOpParams params = { }; + params.id_and_thresh_to_fd.id = pChannel->postSyncpt.id; + params.id_and_thresh_to_fd.thresh = + pHwSyncObject->u.syncpts.postValue; + + ret = nvkms_syncpt_op(NVKMS_SYNCPT_OP_ID_AND_THRESH_TO_FD, ¶ms); + if (!ret) { + nvAssert(!"Failed syncpt op ID_AND_THRESH_TO_FD"); + return; + } + postSyncpt->u.fd = params.id_and_thresh_to_fd.fd; + postSyncpt->type = NVKMS_SYNCPT_TYPE_FD; + } +} + +void nvClearFlipEvoHwState( + NVFlipEvoHwState *pFlipState) +{ + NvU32 i; + + nvkms_memset(pFlipState, 0, sizeof(*pFlipState)); + + pFlipState->olutFpNormScale = NVKMS_OLUT_FP_NORM_SCALE_DEFAULT; + + for (i = 0; i < ARRAY_LEN(pFlipState->layer); i++) { + pFlipState->layer[i].cscMatrix = NVKMS_IDENTITY_CSC_MATRIX; + } +} + +/*! + * Initialize NVFlipEvoHwState with a current snapshot from headState. + */ +void nvInitFlipEvoHwState( + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + NVFlipEvoHwState *pFlipState) +{ + NVDispEvoRec *pDispEvo = pDevEvo->gpus[sd].pDispEvo; + const NVEvoSubDevHeadStateRec *pSdHeadState; + const NVDispHeadStateEvoRec *pHeadState; + + NvU32 i; + + nvClearFlipEvoHwState(pFlipState); + + if (!nvHeadIsActive(pDispEvo, head)) { + return; + } + + pSdHeadState = &pDevEvo->gpus[sd].headState[head]; + pHeadState = &pDispEvo->headState[head]; + + pFlipState->viewPortPointIn = pSdHeadState->viewPortPointIn; + pFlipState->cursor = pSdHeadState->cursor; + + ct_assert(ARRAY_LEN(pFlipState->layer) == ARRAY_LEN(pSdHeadState->layer)); + + for (i = 0; i < ARRAY_LEN(pFlipState->layer); i++) { + pFlipState->layer[i] = pSdHeadState->layer[i]; + } + + // pFlipState->usage describes the usage bounds that will be necessary after + // this flip is complete. Initialize it using pSdHeadState->targetUsage, + // which describes the usage bounds that will be required just before this + // flip occurs, rather than pSdHeadState->usage, which describes the usage + // bounds currently programmed into the hardware. + // + // pSdHeadState->usage may have higher bounds than pSdHeadState->targetUsage + // if TryLoweringUsageBounds has not yet noticed that a satellite channel is + // no longer in use, or a flip to NULL in a satellite channel is pending but + // has not yet occurred. + pFlipState->usage = pSdHeadState->targetUsage; + + pFlipState->disableMidFrameAndDWCFWatermark = + pSdHeadState->targetDisableMidFrameAndDWCFWatermark; + + pFlipState->tf = pHeadState->tf; + + pFlipState->hdrInfoFrame.enabled = pHeadState->hdrInfoFrameOverride.enabled; + pFlipState->hdrInfoFrame.eotf = pHeadState->hdrInfoFrameOverride.eotf; + pFlipState->hdrInfoFrame.staticMetadata = + pHeadState->hdrInfoFrameOverride.staticMetadata; + + pFlipState->outputLut = pSdHeadState->outputLut; + pFlipState->olutFpNormScale = pSdHeadState->olutFpNormScale; +} + + +NvBool nvIsLayerDirty(const struct NvKmsFlipCommonParams *pParams, + const NvU32 layer) +{ + return pParams->layer[layer].surface.specified || + pParams->layer[layer].sizeIn.specified || + pParams->layer[layer].sizeOut.specified || + pParams->layer[layer].outputPosition.specified || + pParams->layer[layer].completionNotifier.specified || + pParams->layer[layer].syncObjects.specified || + pParams->layer[layer].compositionParams.specified || + pParams->layer[layer].ilut.specified || + pParams->layer[layer].tmo.specified || + pParams->layer[layer].csc.specified || + pParams->layer[layer].hdr.specified || + pParams->layer[layer].colorSpace.specified || + pParams->layer[layer].tf.specified; +} + +/*! + * Determine whether a base channel flip requires a non-tearing present mode. + * + * EVO requires a non-tearing flip when certain parameters are changing. See + * NV_DISP_BASE_STATE_ERROR_052 in dispClass024XBaseUpdateErrorChecks.mfs. + */ +static NvBool FlipRequiresNonTearingMode( + const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVFlipChannelEvoHwState *pOld, + const NVFlipChannelEvoHwState *pNew) +{ + // TODO: Do we need to care about the right eye here? The error check + // doesn't. + const NVSurfaceEvoRec *pOldSurf = pOld->pSurfaceEvo[NVKMS_LEFT]; + const NVSurfaceEvoRec *pNewSurf = pNew->pSurfaceEvo[NVKMS_LEFT]; + NvU32 oldHwFormat = 0, newHwFormat = 0; + + if (pOldSurf == NULL || pNewSurf == NULL) { + return TRUE; + } + + // If these functions actually return FALSE at this point, then something is + // really wrong... + if (!pDevEvo->hal->ValidateWindowFormat( + pOldSurf->format, NULL, &oldHwFormat)) { + nvAssert(FALSE); + } + + if (!pDevEvo->hal->ValidateWindowFormat( + pNewSurf->format, NULL, &newHwFormat)) { + nvAssert(FALSE); + } + + // Commented entries are things checked in the .mfs that are not yet + // supported in NVKMS. + return // SuperSample + oldHwFormat != newHwFormat || + // Gamma + // Layout (i.e. frame, field1, or field2) + pOldSurf->widthInPixels != pNewSurf->widthInPixels || + pOldSurf->heightInPixels != pNewSurf->heightInPixels || + pOldSurf->layout != pNewSurf->layout || + // UseGainOfs + pOld->inputLut.pLutSurfaceEvo != pNew->inputLut.pLutSurfaceEvo || + pOld->inputLut.offset != pNew->inputLut.offset || + pOld->tmoLut.pLutSurfaceEvo != pNew->tmoLut.pLutSurfaceEvo || + pOld->tmoLut.offset != pNew->tmoLut.offset; + // NewOutputLut +} + + +/*! + * Apply flip overrides if necessary. + * + * 1. Override swap intervals for VRR. + * 2. If the flip is changing certain parameters, override the tearing mode. + */ +static NvBool ApplyBaseFlipOverrides( + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + NVFlipChannelEvoHwState *pNew, + NvBool allowVrr) +{ + const NVDispEvoRec *pDispEvo = pDevEvo->gpus[sd].pDispEvo; + const NVFlipChannelEvoHwState *pOld = + &pDevEvo->gpus[sd].headState[head].layer[NVKMS_MAIN_LAYER]; + + // Apply VRR swap interval overrides. + // + // Note that this applies the overrides whenever the client requests VRR and + // VRR is enabled, regardless of whether actually activating it later + // succeeds. + if (allowVrr) { + if (!nvHeadIsActive(pDispEvo, head)) { + // + // XXX If VRR is allowed then modeset should have happened before + // base channel flip, currently we don't know how to do modeset + // and program base channel for VRR at same time. This should be + // revisited as part of bug 1731279. + // + return FALSE; + } + nvApplyVrrBaseFlipOverrides(pDevEvo->gpus[sd].pDispEvo, head, + pOld, pNew); + } + + if (!nvHeadIsActive(pDispEvo, head)) { + // + // This is possible when modeset and base flip happening at same time, + // tearing parameter does not make sense in that case, + // it should is disabled. + // + pNew->tearing = FALSE; + } else { + // Force non-tearing mode if EVO requires it. + if (FlipRequiresNonTearingMode(pDevEvo, head, pOld, pNew)) { + pNew->tearing = FALSE; + } + } + + return TRUE; +} + +static NvBool ValidateScalingUsageBounds( + const struct NvKmsScalingUsageBounds *pS, + const struct NvKmsScalingUsageBounds *pMaxS) +{ + return (pS->maxVDownscaleFactor <= pMaxS->maxVDownscaleFactor) && + (pS->maxHDownscaleFactor <= pMaxS->maxHDownscaleFactor) && + (pS->vTaps <= pMaxS->vTaps) && + (!pS->vUpscalingAllowed || pMaxS->vUpscalingAllowed); +} + +/*! + * Validate the requested usage bounds against the specified maximums. + */ +static NvBool ValidateUsageBounds( + const NVDevEvoRec *pDevEvo, + const NvU32 head, + const struct NvKmsUsageBounds *pUsage, + const struct NvKmsUsageBounds *pGuaranteedUsage) +{ + NvU32 i; + + for (i = 0; i < pDevEvo->head[head].numLayers; i++) { + const NvU64 supportedSurfaceFormatsUnion = + pUsage->layer[i].supportedSurfaceMemoryFormats | + pGuaranteedUsage->layer[i].supportedSurfaceMemoryFormats; + + if ((pUsage->layer[i].usable && !pGuaranteedUsage->layer[i].usable) || + (supportedSurfaceFormatsUnion != + pGuaranteedUsage->layer[i].supportedSurfaceMemoryFormats) || + !ValidateScalingUsageBounds(&pUsage->layer[i].scaling, + &pGuaranteedUsage->layer[i].scaling)) { + return FALSE; + } + } + + return TRUE; +} + +/*! + * Assign pFlipState->usage. + */ +static NvBool AssignUsageBounds( + const NVDevEvoRec *pDevEvo, + const NvU32 head, + NVFlipEvoHwState *pFlipState) +{ + struct NvKmsUsageBounds *pUsage = &pFlipState->usage; + int i; + + for (i = 0; i < pDevEvo->head[head].numLayers; i++) { + const NVFlipChannelEvoHwState *pLayerFlipState = &pFlipState->layer[i]; + + nvInitScalingUsageBounds(pDevEvo, &pUsage->layer[i].scaling); + + if (pLayerFlipState->pSurfaceEvo[NVKMS_LEFT]) { + pUsage->layer[i].usable = TRUE; + pUsage->layer[i].supportedSurfaceMemoryFormats = + nvEvoGetFormatsWithEqualOrLowerUsageBound( + pLayerFlipState->pSurfaceEvo[NVKMS_LEFT]->format, + pDevEvo->caps.layerCaps[i].supportedSurfaceMemoryFormats); + + if (pDevEvo->hal->GetWindowScalingCaps) { + const NVEvoScalerCaps *pScalerCaps = + pDevEvo->hal->GetWindowScalingCaps(pDevEvo); + + if (!nvComputeScalingUsageBounds(pScalerCaps, + pLayerFlipState->sizeIn.width, + pLayerFlipState->sizeIn.height, + pLayerFlipState->sizeOut.width, + pLayerFlipState->sizeOut.height, + pLayerFlipState->hTaps, + pLayerFlipState->vTaps, + &pUsage->layer[i].scaling)) { + return FALSE; + } + } + + if (pLayerFlipState->maxDownscaleFactors.specified) { + struct NvKmsScalingUsageBounds *pTargetScaling = + &pFlipState->usage.layer[i].scaling; + + if ((pLayerFlipState->maxDownscaleFactors.vertical < + pTargetScaling->maxVDownscaleFactor) || + (pLayerFlipState->maxDownscaleFactors.horizontal < + pTargetScaling->maxHDownscaleFactor)) { + return FALSE; + } + + pTargetScaling->maxVDownscaleFactor = + pLayerFlipState->maxDownscaleFactors.vertical; + pTargetScaling->maxHDownscaleFactor = + pLayerFlipState->maxDownscaleFactors.horizontal; + } + + } else { + pUsage->layer[i].usable = FALSE; + pUsage->layer[i].supportedSurfaceMemoryFormats = 0; + } + } + + return TRUE; +} + +void +nvOverrideScalingUsageBounds(const NVDevEvoRec *pDevEvo, + NvU32 head, + NVFlipEvoHwState *pFlipState, + const struct NvKmsUsageBounds *pPossibleUsage) +{ + NvU32 i; + + for (i = 0; i < pDevEvo->head[head].numLayers; i++) { + const NVFlipChannelEvoHwState *pLayerFlipState = &pFlipState->layer[i]; + const struct NvKmsScalingUsageBounds *pPossibleScaling = + &pPossibleUsage->layer[i].scaling; + struct NvKmsScalingUsageBounds *pTargetScaling = + &pFlipState->usage.layer[i].scaling; + + if (!pFlipState->usage.layer[i].usable) { + continue; + } + + if (!pLayerFlipState->maxDownscaleFactors.specified) { + const NvU16 possibleV = pPossibleScaling->maxVDownscaleFactor; + const NvU16 possibleH = pPossibleScaling->maxHDownscaleFactor; + NvU16 targetV = pTargetScaling->maxVDownscaleFactor; + NvU16 targetH = pTargetScaling->maxHDownscaleFactor; + + /* + * Calculate max H/V downscale factor by quantizing the range. + * + * E.g., + * max H/V downscale factor supported by HW is 4x for 5-tap and 2x + * for 2-tap mode. If 5-tap mode is required, the target usage bound + * that nvkms will attempt to program will either allow up to 2x + * downscaling, or up to 4x downscaling. If 2-tap mode is required, + * the target usage bound that NVKMS will attempt to program will + * allow up to 2x downscaling. Example: to downscale from 4096x2160 + * -> 2731x864 in 5-tap mode, NVKMS would specify up to 2x for the + * H downscale bound (required is 1.5x), and up to 4x for the V + * downscale bound (required is 2.5x). + */ + if (targetV > NV_EVO_SCALE_FACTOR_1X) { + const NvU16 possibleMid = + NV_EVO_SCALE_FACTOR_1X + ((possibleV - NV_EVO_SCALE_FACTOR_1X) / 2); + + if (targetV <= possibleMid) { + pTargetScaling->maxVDownscaleFactor = possibleMid; + } else { + pTargetScaling->maxVDownscaleFactor = possibleV; + } + } + + if (targetH > NV_EVO_SCALE_FACTOR_1X) { + const NvU16 possibleMid = + NV_EVO_SCALE_FACTOR_1X + ((possibleH - NV_EVO_SCALE_FACTOR_1X) / 2); + + if (targetH <= possibleMid) { + pTargetScaling->maxHDownscaleFactor = possibleMid; + } else { + pTargetScaling->maxHDownscaleFactor = possibleH; + } + } + } + + pTargetScaling->vTaps = pPossibleScaling->vTaps; + pTargetScaling->vUpscalingAllowed = pPossibleScaling->vUpscalingAllowed; + } +} + +static NvBool FlipTimeStampValidForChannel( + const NVEvoChannel *pChannel, + NvU64 timeStamp) +{ + if (pChannel->caps.validTimeStampBits < 64) { + const NvU64 validTimeStampMask = + NVBIT64(pChannel->caps.validTimeStampBits) - 1; + if ((timeStamp & ~validTimeStampMask) != 0) { + return FALSE; + } + } + return TRUE; +} + +static NvBool UpdateLayerFlipEvoHwStateHDRStaticMetadata( + const NVDevEvoRec *pDevEvo, + const struct NvKmsFlipCommonParams *pParams, + NVFlipEvoHwState *pFlipState, + NVFlipChannelEvoHwState *pHwState, + const NvU32 head, + const NvU32 layer) +{ + if (pParams->layer[layer].hdr.specified) { + if (pParams->layer[layer].hdr.enabled) { + // Don't allow enabling HDR on a layer that doesn't support ICtCp. + if (!pDevEvo->caps.layerCaps[layer].supportsICtCp) { + return FALSE; + } + + pHwState->hdrStaticMetadata.val = + pParams->layer[layer].hdr.staticMetadata; + } + pHwState->hdrStaticMetadata.enabled = pParams->layer[layer].hdr.enabled; + + // Only mark dirty if layer supports ICtCp, otherwise this is a no-op. + if (pDevEvo->caps.layerCaps[layer].supportsICtCp) { + pFlipState->dirty.hdrStaticMetadata = TRUE; + } + } + + return TRUE; +} + +static NvBool UpdateFlipLutHwState( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + NVFlipLutHwState *pFlipLutHwState, + const struct NvKmsLUTSurfaceParams *pLUTSurfaceParams, + const struct NvKmsLUTCaps *pLUTCaps, + const NvBool isUsedByLayerChannel) +{ + NvU32 requiredSize = 0; + + /* + * If the LUT is not supported and the user has specified it, even with + * surfaceHandle == 0, the request is invalid. + */ + if (!pLUTCaps->supported) { + return FALSE; + } + + if (pLUTSurfaceParams->surfaceHandle != 0) { + pFlipLutHwState->pLutSurfaceEvo = + nvEvoGetSurfaceFromHandle(pDevEvo, + pOpenDevSurfaceHandles, + pLUTSurfaceParams->surfaceHandle, + FALSE, /* isUsedByCursorChannel */ + isUsedByLayerChannel); + + if (pFlipLutHwState->pLutSurfaceEvo == NULL) { + /* Invalid surface handle */ + return FALSE; + } + pFlipLutHwState->offset = pLUTSurfaceParams->offset; + pFlipLutHwState->vssSegments = pLUTSurfaceParams->vssSegments; + pFlipLutHwState->lutEntries = pLUTSurfaceParams->lutEntries; + + /* Attempt to validate the surface and parameters: */ + if (pFlipLutHwState->pLutSurfaceEvo->layout != NvKmsSurfaceMemoryLayoutPitch) { + /* + * Only pitch surfaces can be used. + * + * XXX: Also need surface format check? + * (NvKmsSurfaceMemoryFormatR16G16B16A16) + */ + return FALSE; + } + + if ((pLUTCaps->vssSupport == NVKMS_LUT_VSS_NOT_SUPPORTED) && + (pLUTSurfaceParams->vssSegments != 0)) { + /* Can't specify VSS entries if VSS is not supported. */ + return FALSE; + } + + if ((pLUTCaps->vssSupport == NVKMS_LUT_VSS_REQUIRED) && + (pLUTSurfaceParams->vssSegments == 0)) { + /* Must specify VSS entries if VSS is required. */ + return FALSE; + } + + if ((pLUTSurfaceParams->lutEntries > pLUTCaps->lutEntries) || + (pLUTSurfaceParams->vssSegments > pLUTCaps->vssSegments)) { + /* The number of LUT and VSS entries cannot exceed LUT caps. */ + return FALSE; + } + + requiredSize = pLUTSurfaceParams->lutEntries * NVKMS_LUT_CAPS_LUT_ENTRY_SIZE + + (pLUTSurfaceParams->vssSegments > 0 ? NVKMS_LUT_VSS_HEADER_SIZE : 0); + + if (requiredSize > pFlipLutHwState->pLutSurfaceEvo->planes[0].rmObjectSizeInBytes) { + /* The surface isn't large enough to hold the described LUT. */ + return FALSE; + } + + /* + * TODO: Check that lutEntries, vssSegments, and vssType correlate + * correctly. + */ + } else { + /* Disable the LUT. */ + pFlipLutHwState->pLutSurfaceEvo = NULL; + pFlipLutHwState->offset = 0; + pFlipLutHwState->vssSegments = 0; + pFlipLutHwState->lutEntries = 0; + } + return TRUE; +} + +#define WITH_APIHEAD_FOR_HEAD(_pDevEvo, _sd, _head, _apiHead) \ + for (_apiHead = 0; \ + _apiHead < ARRAY_LEN(_pDevEvo->pDispEvo[_sd]->apiHeadState); \ + _apiHead++) { \ + if ((_pDevEvo->pDispEvo[_sd]->apiHeadState[_apiHead].hwHeadsMask & \ + NVBIT(_head)) != 0) { + +#define WITH_APIHEAD_FOR_HEAD_DONE \ + break; \ + } \ + } + +static NvBool UpdateLayerFlipEvoHwStateCommon( + const struct NvKmsPerOpenDev *pOpenDev, + NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + const NvU32 layer, + const struct NvKmsFlipCommonParams *pParams, + NVFlipEvoHwState *pFlipState) +{ + const NVEvoChannel *pChannel = pDevEvo->head[head].layer[layer]; + NVFlipChannelEvoHwState *pHwState = &pFlipState->layer[layer]; + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDevConst(pOpenDev); + NvBool ret; + + if (pParams->layer[layer].surface.specified) { + ret = nvAssignSurfaceArray(pDevEvo, + pOpenDevSurfaceHandles, + pParams->layer[layer].surface.handle, + FALSE /* isUsedByCursorChannel */, + TRUE /* isUsedByLayerChannel */, + pHwState->pSurfaceEvo); + if (!ret) { + return FALSE; + } + + /* + * Verify the (rotation, reflectionX, reflectionY) is a + * combination currently supported. + */ + if ((NVBIT(NvKmsRRParamsToCapBit(&pParams->layer[layer].surface.rrParams)) & + pDevEvo->caps.validLayerRRTransforms) == 0) { + return FALSE; + } + pHwState->rrParams = pParams->layer[layer].surface.rrParams; + + } + + /* Verify the timestamp is in the valid range for this channel. */ + if (!FlipTimeStampValidForChannel(pChannel, + pParams->layer[layer].timeStamp)) { + return FALSE; + } + pHwState->timeStamp = pParams->layer[layer].timeStamp; + + if (pParams->layer[layer].syncObjects.specified) { + if (!pDevEvo->supportsSyncpts && + pParams->layer[layer].syncObjects.val.useSyncpt) { + return FALSE; + } + + nvkms_memset(&pFlipState->layer[layer].syncObject, + 0, + sizeof(pFlipState->layer[layer].syncObject)); + + if (pParams->layer[layer].syncObjects.val.useSyncpt) { + ret = AssignPreSyncptEvoHwState(pDevEvo, + &pParams->layer[layer].syncObjects.val, + &pHwState->syncObject); + if (!ret) { + return FALSE; + } + pFlipState->dirty.layerSyncObjects[layer] = TRUE; + + ret = AssignPostSyncptEvoHwState(pDevEvo, + pDevEvo->head[head].layer[layer], + &pParams->layer[layer].syncObjects.val, + &pHwState->syncObject); + if (!ret) { + return FALSE; + } + } else { + if (pParams->layer[layer].syncObjects.val.u.semaphores.acquire.surface.surfaceHandle != 0 || + pParams->layer[layer].syncObjects.val.u.semaphores.release.surface.surfaceHandle != 0) { + if (pParams->layer[layer].skipPendingFlips) { + return FALSE; + } + } + + ret = nvAssignSemaphoreEvoHwState(pDevEvo, + pOpenDevSurfaceHandles, + layer, + sd, + &pParams->layer[layer].syncObjects.val, + &pHwState->syncObject); + if (!ret) { + return FALSE; + } + } + } + + if (pHwState->pSurfaceEvo[NVKMS_LEFT]) { + pHwState->minPresentInterval = + pParams->layer[layer].minPresentInterval; + } else { + /* The hardware requires that MPI be 0 when disabled. */ + pHwState->minPresentInterval = 0; + } + + if (pParams->layer[layer].sizeIn.specified) { + pHwState->sizeIn = pParams->layer[layer].sizeIn.val; + } + + if (pParams->layer[layer].sizeOut.specified) { + pHwState->sizeOut = pParams->layer[layer].sizeOut.val; + } + + /* + * If supportsWindowMode = TRUE, the sizeIn/sizeOut dimensions can be + * any arbitrary (valid) values. + * + * If supportsWindowMode = FALSE (legacy EVO main layer), the sizeIn + * /sizeOut dimensions must match the size of the surface for that layer. + * + * Note that if sizeIn/Out dimensions are invalid i.e. with a width or + * height of zero, this will be rejected by a call to + * ValidateFlipChannelEvoHwState() later in the code path. + * + * Note that if scaling is unsupported, i.e. that sizeIn cannot differ + * from sizeOut, then any unsupported configurations will be caught by the + * ComputeWindowScalingTaps() call later on in this function. + */ + if (!pDevEvo->caps.layerCaps[layer].supportsWindowMode && + (pHwState->pSurfaceEvo[NVKMS_LEFT] != NULL)) { + const NVSurfaceEvoRec *pSurfaceEvo = + pHwState->pSurfaceEvo[NVKMS_LEFT]; + + if ((pHwState->sizeIn.width != pSurfaceEvo->widthInPixels) || + (pHwState->sizeIn.height != pSurfaceEvo->heightInPixels)) { + return FALSE; + } + + if ((pHwState->sizeOut.width != pSurfaceEvo->widthInPixels) || + (pHwState->sizeOut.height != pSurfaceEvo->heightInPixels)) { + return FALSE; + } + } + + /* + * Allow the client to specify non-origin outputPosition only if the + * layer supports window mode. + * + * If window mode is unsupported but the client specifies non-origin + * outputPosition, return FALSE. + */ + if (pDevEvo->caps.layerCaps[layer].supportsWindowMode) { + if (pParams->layer[layer].outputPosition.specified) { + const NvS16 x = pParams->layer[layer].outputPosition.val.x; + const NvS16 y = pParams->layer[layer].outputPosition.val.y; + if ((pHwState->outputPosition.x != x) || + (pHwState->outputPosition.y != y)) { + pHwState->outputPosition.x = x; + pHwState->outputPosition.y = y; + pFlipState->dirty.layerPosition[layer] = TRUE; + } + } + } else if (pParams->layer[layer].outputPosition.specified && + ((pParams->layer[layer].outputPosition.val.x != 0) || + (pParams->layer[layer].outputPosition.val.y != 0))) { + return FALSE; + } + + if (pParams->layer[layer].compositionParams.specified) { + pHwState->composition = + pParams->layer[layer].compositionParams.val; + } + + if (!UpdateLayerFlipEvoHwStateHDRStaticMetadata( + pDevEvo, pParams, pFlipState, + pHwState, head, layer)) { + return FALSE; + } + + if (pParams->layer[layer].colorRange.specified) { + pHwState->colorRange = pParams->layer[layer].colorRange.val; + } + + if (pParams->layer[layer].colorSpace.specified) { + pHwState->colorSpace = + pParams->layer[layer].colorSpace.val; + } + + if (pParams->layer[layer].tf.specified) { + pHwState->tf = + pParams->layer[layer].tf.val; + } + + if (pParams->layer[layer].csc00Override.specified) { + // CSC00 is only available on layers that support ICtCp. + if (!pDevEvo->caps.layerCaps[layer].supportsICtCp && + pHwState->csc00Override.enabled) { + return FALSE; + } + pHwState->csc00Override.enabled = + pParams->layer[layer].csc00Override.enabled; + pHwState->csc00Override.matrix = + pParams->layer[layer].csc00Override.matrix; + } + + if (pParams->layer[layer].csc01Override.specified) { + // CSC01 is only available on layers that support ICtCp. + if (!pDevEvo->caps.layerCaps[layer].supportsICtCp && + pHwState->csc01Override.enabled) { + return FALSE; + } + pHwState->csc01Override.enabled = + pParams->layer[layer].csc01Override.enabled; + pHwState->csc01Override.matrix = + pParams->layer[layer].csc01Override.matrix; + } + + if (pParams->layer[layer].csc10Override.specified) { + // CSC10 is only available on layers that support ICtCp. + if (!pDevEvo->caps.layerCaps[layer].supportsICtCp && + pHwState->csc10Override.enabled) { + return FALSE; + } + pHwState->csc10Override.enabled = + pParams->layer[layer].csc10Override.enabled; + pHwState->csc10Override.matrix = + pParams->layer[layer].csc10Override.matrix; + } + + if (pParams->layer[layer].csc11Override.specified) { + pHwState->csc11Override.enabled = + pParams->layer[layer].csc11Override.enabled; + pHwState->csc11Override.matrix = + pParams->layer[layer].csc11Override.matrix; + } + + if (pHwState->composition.depth == 0) { + pHwState->composition.depth = + NVKMS_MAX_LAYERS_PER_HEAD - layer; + } + + /* XXX Move ValidatePerLayerCompParams() call to nvValidateFlipEvoHwState() */ + if (!nvValidatePerLayerCompParams( + &pHwState->composition, + &pDevEvo->caps.layerCaps[layer].composition, + pHwState->pSurfaceEvo[NVKMS_LEFT])) { + return FALSE; + } + + if (!pDevEvo->hal->ComputeWindowScalingTaps(pDevEvo, + pChannel, + pHwState)) { + return FALSE; + } + + if (pParams->layer[layer].completionNotifier.specified) { + ret = nvAssignCompletionNotifierEvoHwState( + pDevEvo, + pOpenDevSurfaceHandles, + &pParams->layer[layer].completionNotifier.val, + layer, + &pFlipState->layer[layer].completionNotifier); + if (!ret) { + return FALSE; + } + } + + if (pParams->layer[layer].ilut.specified) { + if (pParams->layer[layer].ilut.enabled) { + ret = UpdateFlipLutHwState( + pDevEvo, + pOpenDevSurfaceHandles, + &pFlipState->layer[layer].inputLut, + &pParams->layer[layer].ilut.lut, + &pDevEvo->caps.layerCaps[layer].ilut, + TRUE /*isUsedByLayerChannel*/); + if (!ret) { + return FALSE; + } + /* Cache that the hw state is from the new params. */ + pFlipState->layer[layer].inputLut.fromOverride = TRUE; + } else { + /* Cache that the hw state is from the legacy params. */ + pFlipState->layer[layer].inputLut.fromOverride = FALSE; + } + } + + /* + * If this is not from the new params - either cached this flip or in a + * previous one - set from the legacy params. + */ + if (!pFlipState->layer[layer].inputLut.fromOverride) { + NvU32 apiHead; + WITH_APIHEAD_FOR_HEAD(pDevEvo, sd, head, apiHead) { + NvBool ilutEnabled = pDevEvo->lut.apiHead[apiHead].disp[sd].curBaseLutEnabled; + NvU32 curLUTIndex = pDevEvo->lut.apiHead[apiHead].disp[sd].curLUTIndex; + NvU32 nextLutIndex = (curLUTIndex + 1) % 3; + + /* If the legacy params are specified, update from those. */ + if (pParams->lut.input.specified) { + if (pParams->lut.input.end != 0) { + pFlipState->layer[layer].inputLut.pLutSurfaceEvo = + pDevEvo->lut.apiHead[apiHead].LUT[nextLutIndex]; + pFlipState->layer[layer].inputLut.offset = offsetof(NVEvoLutDataRec, base); + pFlipState->layer[layer].inputLut.lutEntries = NV_NUM_EVO_LUT_ENTRIES; + pFlipState->layer[layer].inputLut.vssSegments = 0; + } else { + pFlipState->layer[layer].inputLut.pLutSurfaceEvo = NULL; + pFlipState->layer[layer].inputLut.offset = 0; + pFlipState->layer[layer].inputLut.lutEntries = 0; + pFlipState->layer[layer].inputLut.vssSegments = 0; + } + /* Otherwise, use the current state. */ + } else if (ilutEnabled) { + pFlipState->layer[layer].inputLut.pLutSurfaceEvo = + pDevEvo->lut.apiHead[apiHead].LUT[curLUTIndex]; + pFlipState->layer[layer].inputLut.offset = offsetof(NVEvoLutDataRec, base); + pFlipState->layer[layer].inputLut.lutEntries = NV_NUM_EVO_LUT_ENTRIES; + pFlipState->layer[layer].inputLut.vssSegments = 0; + } else { + pFlipState->layer[layer].inputLut.pLutSurfaceEvo = NULL; + pFlipState->layer[layer].inputLut.offset = 0; + pFlipState->layer[layer].inputLut.lutEntries = 0; + pFlipState->layer[layer].inputLut.vssSegments = 0; + } + } WITH_APIHEAD_FOR_HEAD_DONE; + } + + if (pParams->layer[layer].tmo.specified) { + if (pParams->layer[layer].tmo.enabled) { + ret = UpdateFlipLutHwState( + pDevEvo, + pOpenDevSurfaceHandles, + &pFlipState->layer[layer].tmoLut, + &pParams->layer[layer].tmo.lut, + &pDevEvo->caps.layerCaps[layer].tmo, + TRUE /*isUsedByLayerChannel*/); + if (!ret) { + return FALSE; + } + pFlipState->layer[layer].tmoLut.fromOverride = TRUE; + } else { + pFlipState->layer[layer].tmoLut.fromOverride = FALSE; + } + } + + if (!pFlipState->layer[layer].tmoLut.fromOverride) { + nvSetTmoLutSurfaceEvo(pDevEvo, &pFlipState->layer[layer]); + } + + if (pParams->layer[layer].maxDownscaleFactors.specified) { + pFlipState->layer[layer].maxDownscaleFactors.vertical = + pParams->layer[layer].maxDownscaleFactors.vertical; + pFlipState->layer[layer].maxDownscaleFactors.horizontal = + pParams->layer[layer].maxDownscaleFactors.horizontal; + pFlipState->layer[layer].maxDownscaleFactors.specified = TRUE; + } else { + pFlipState->layer[layer].maxDownscaleFactors.vertical = 0; + pFlipState->layer[layer].maxDownscaleFactors.horizontal = 0; + pFlipState->layer[layer].maxDownscaleFactors.specified = FALSE; + } + + pFlipState->dirty.layer[layer] = TRUE; + + return TRUE; +} + +static NvBool UpdateMainLayerFlipEvoHwState( + const struct NvKmsPerOpenDev *pOpenDev, + NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + const struct NvKmsFlipCommonParams *pParams, + NVFlipEvoHwState *pFlipState, + NvBool allowVrr) +{ + const NVEvoChannel *pChannel = + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER]; + NVFlipChannelEvoHwState *pHwState = &pFlipState->layer[NVKMS_MAIN_LAYER]; + + if (!nvIsLayerDirty(pParams, NVKMS_MAIN_LAYER)) { + return TRUE; + } + + if (!UpdateLayerFlipEvoHwStateCommon(pOpenDev, pDevEvo, sd, head, + NVKMS_MAIN_LAYER, + pParams, pFlipState)) { + return FALSE; + } + + if (pHwState->pSurfaceEvo[NVKMS_LEFT] && + pHwState->pSurfaceEvo[NVKMS_LEFT]->format == NvKmsSurfaceMemoryFormatI8 && + pHwState->inputLut.pLutSurfaceEvo == NULL) { + + /* + * Depth 8 requires the input LUT to be enabled, so fall back to the + * last programmed legacy LUT. + */ + pHwState->inputLut.pLutSurfaceEvo = + pDevEvo->pDispEvo[sd]->headState[head].lut.pCurrSurface; + pHwState->inputLut.offset = offsetof(NVEvoLutDataRec, base); + pHwState->inputLut.lutEntries = NV_NUM_EVO_LUT_ENTRIES; + pHwState->inputLut.vssSegments = 0; + } + + if (pParams->layer[NVKMS_MAIN_LAYER].csc.specified) { + if (pParams->layer[NVKMS_MAIN_LAYER].csc.useMain) { + return FALSE; + } else { + pHwState->cscMatrix = pParams->layer[NVKMS_MAIN_LAYER].csc.matrix; + } + } + + if (pParams->layer[NVKMS_MAIN_LAYER].surface.specified) { + if (pParams->layer[NVKMS_MAIN_LAYER].perEyeStereoFlip && + !pChannel->caps.perEyeStereoFlips) { + return FALSE; + } + + pHwState->perEyeStereoFlip = + pParams->layer[NVKMS_MAIN_LAYER].perEyeStereoFlip; + } + + if (pParams->layer[NVKMS_MAIN_LAYER].tearing && !pChannel->caps.tearingFlips) { + return FALSE; + } + + // EVO will throw an invalid argument exception if + // minPresentInterval is too large, or if tearing is enabled and + // it's not zero. + if (pParams->layer[NVKMS_MAIN_LAYER].minPresentInterval > NV_MAX_SWAP_INTERVAL || + (pParams->layer[NVKMS_MAIN_LAYER].tearing && + pParams->layer[NVKMS_MAIN_LAYER].minPresentInterval != 0)) { + return FALSE; + } + + pHwState->tearing = pParams->layer[NVKMS_MAIN_LAYER].tearing; + + /* + * XXX: Cludge. Ideally, this would be handled by FlipRequiresNonTearingMode + * recognizing a difference in HW state. However, the new HW LUT state is + * not computed until later, when nvEvoSetLut() and nvEvoSetLUTContextDma() + * are called. See bug 4054546. + */ + if ((pParams->lut.input.specified || + pParams->lut.output.specified) && + !pDevEvo->hal->caps.supportsCoreLut) { + + pHwState->tearing = FALSE; + } + + if (!ApplyBaseFlipOverrides(pDevEvo, + sd, head, &pFlipState->layer[NVKMS_MAIN_LAYER], + allowVrr)) { + return FALSE; + } + + pFlipState->skipLayerPendingFlips[NVKMS_MAIN_LAYER] = + pParams->layer[NVKMS_MAIN_LAYER].skipPendingFlips; + + return TRUE; +} + +static NvBool UpdateCursorLayerFlipEvoHwState( + const struct NvKmsPerOpenDev *pOpenDev, + const NVDevEvoRec *pDevEvo, + const struct NvKmsFlipCommonParams *pParams, + const NVHwModeTimingsEvo *pTimings, + const NvU8 mergeHeadSection, + NVFlipEvoHwState *pFlipState) +{ + if (pParams->cursor.imageSpecified) { + if (!nvAssignCursorSurface(pOpenDev, pDevEvo, &pParams->cursor.image, + &pFlipState->cursor.pSurfaceEvo)) { + return FALSE; + } + + if (pFlipState->cursor.pSurfaceEvo != NULL) { + pFlipState->cursor.cursorCompParams = + pParams->cursor.image.cursorCompParams; + } + + pFlipState->dirty.cursorSurface = TRUE; + } + + if (pParams->cursor.positionSpecified) { + pFlipState->cursor.x = (pParams->cursor.position.x - + (pTimings->viewPort.in.width * mergeHeadSection)); + pFlipState->cursor.y = pParams->cursor.position.y; + + pFlipState->dirty.cursorPosition = TRUE; + } + + return TRUE; +} + +static NvBool UpdateOverlayLayerFlipEvoHwState( + const struct NvKmsPerOpenDev *pOpenDev, + NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + const NvU32 layer, + const struct NvKmsFlipCommonParams *pParams, + NVFlipEvoHwState *pFlipState) +{ + NVFlipChannelEvoHwState *pHwState = &pFlipState->layer[layer]; + + nvAssert(layer != NVKMS_MAIN_LAYER); + + if (!nvIsLayerDirty(pParams, layer)) { + return TRUE; + } + + if (pParams->layer[layer].skipPendingFlips || + pParams->layer[layer].perEyeStereoFlip) { + return FALSE; + } + + if (!UpdateLayerFlipEvoHwStateCommon(pOpenDev, pDevEvo, sd, head, layer, + pParams, pFlipState)) { + return FALSE; + } + + if (pParams->layer[layer].csc.specified) { + if (pParams->layer[layer].csc.useMain) { + if (pFlipState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT]) { + pHwState->cscMatrix = + pFlipState->layer[NVKMS_MAIN_LAYER].cscMatrix; + } + } else { + pHwState->cscMatrix = pParams->layer[layer].csc.matrix; + } + } + + return TRUE; +} + +/*! + * Update the NVFlipEvoHwState, using NvKmsFlipCommonParams. + * + * Propagate the requested configuration from NvKmsFlipCommonParams to + * NVFlipEvoHwState, performing steps such as translating from + * NvKmsSurfaceHandle to NVSurfaceEvoRecs. Validate the NvKmsFlipCommonParams + * parameters, but defer more general validation of the resulting + * NVFlipEvoHwState until nvValidateFlipEvoHwState(), which callers must call + * separately. + * + * The NVFlipEvoHwState should first be initialized by calling + * nvInitFlipEvoHwState(). + * + * No NVKMS hardware or software state should be altered here, because + * this function is used before we have decided to commit the proposed + * NVFlipEvoHwState to hardware. + * + * \param[in] pOpenDev The pOpenDev of the client doing the flip. + * \param[in] pDevEvo The device on which the surface image will be set. + * \param[in] sd The subdevice for the flip, as specified by the + * client. + * \param[in] head The head for the flip, as specified by the client. + * \param[in] pParams The requested flip, NvKmsFlipCommonParams. + * \param[in] pTimings The mode timings for the flip. + * \param[in,out] pFlipState The resulting NVFlipEvoHwState. + * \param[in] allowVrr Whether VRR flipping should be allowed. + * \param[in] pPossibleUsage Possible usage. + * + * \return If pFlipState could be updated, return TRUE. + * Otherwise, return FALSE. + */ +NvBool nvUpdateFlipEvoHwState( + const struct NvKmsPerOpenDev *pOpenDev, + NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + const struct NvKmsFlipCommonParams *pParams, + const NVHwModeTimingsEvo *pTimings, + const NvU8 mergeHeadSection, + NVFlipEvoHwState *pFlipState, + NvBool allowVrr) +{ + NvU32 layer; + + if (pParams->viewPortIn.specified) { + pFlipState->dirty.viewPortPointIn = TRUE; + pFlipState->viewPortPointIn.x = pParams->viewPortIn.point.x + + (pTimings->viewPort.in.width * mergeHeadSection); + pFlipState->viewPortPointIn.y = pParams->viewPortIn.point.y; + } + + if (!UpdateCursorLayerFlipEvoHwState(pOpenDev, pDevEvo, pParams, pTimings, + mergeHeadSection, pFlipState)) { + return FALSE; + } + + if (pParams->tf.specified) { + pFlipState->dirty.tf = TRUE; + pFlipState->tf = pParams->tf.val; + } + + if (pParams->hdrInfoFrame.specified) { + pFlipState->dirty.hdrStaticMetadata = TRUE; + + if (pParams->hdrInfoFrame.enabled) { + pFlipState->hdrInfoFrame.eotf = + pParams->hdrInfoFrame.eotf; + pFlipState->hdrInfoFrame.staticMetadata = + pParams->hdrInfoFrame.staticMetadata; + } + pFlipState->hdrInfoFrame.enabled = + pParams->hdrInfoFrame.enabled; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (layer == NVKMS_MAIN_LAYER) { + if (!UpdateMainLayerFlipEvoHwState(pOpenDev, pDevEvo, sd, head, + pParams, pFlipState, allowVrr)) { + return FALSE; + } + continue; + } + + if (!UpdateOverlayLayerFlipEvoHwState(pOpenDev, pDevEvo, sd, head, + layer, pParams, pFlipState)) { + return FALSE; + } + } + + /* + * See the comment for the ILUT in UpdateLayerFlipEvoHwStateCommon for an + * overview of this setup. + */ + if (pParams->olut.specified) { + if (pParams->olut.enabled) { + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDevConst(pOpenDev); + + if (!UpdateFlipLutHwState(pDevEvo, pOpenDevSurfaceHandles, + &pFlipState->outputLut, &pParams->olut.lut, + &pDevEvo->caps.olut, + FALSE /*isUsedByLayerChannel*/)) { + return FALSE; + } + pFlipState->outputLut.fromOverride = TRUE; + } else { + pFlipState->outputLut.fromOverride = FALSE; + } + } + + if (!pFlipState->outputLut.fromOverride) { + NvU32 apiHead; + WITH_APIHEAD_FOR_HEAD(pDevEvo, sd, head, apiHead) { + NvBool olutEnabled = pDevEvo->lut.apiHead[apiHead].disp[sd].curOutputLutEnabled; + NvU32 curLUTIndex = pDevEvo->lut.apiHead[apiHead].disp[sd].curLUTIndex; + NvU32 nextLutIndex = (curLUTIndex + 1) % 3; + + if (pParams->lut.output.specified) { + if (pParams->lut.output.enabled) { + pFlipState->outputLut.pLutSurfaceEvo = + pDevEvo->lut.apiHead[apiHead].LUT[nextLutIndex]; + pFlipState->outputLut.offset = offsetof(NVEvoLutDataRec, output); + pFlipState->outputLut.lutEntries = NV_NUM_EVO_LUT_ENTRIES; + pFlipState->outputLut.vssSegments = 0; + } else { + pFlipState->outputLut.pLutSurfaceEvo = NULL; + pFlipState->outputLut.offset = 0; + pFlipState->outputLut.lutEntries = 0; + pFlipState->outputLut.vssSegments = 0; + } + } else if (olutEnabled) { + pFlipState->outputLut.pLutSurfaceEvo = + pDevEvo->lut.apiHead[apiHead].LUT[curLUTIndex]; + pFlipState->outputLut.offset = offsetof(NVEvoLutDataRec, output); + pFlipState->outputLut.lutEntries = NV_NUM_EVO_LUT_ENTRIES; + pFlipState->outputLut.vssSegments = 0; + } else { + pFlipState->outputLut.pLutSurfaceEvo = NULL; + pFlipState->outputLut.offset = 0; + pFlipState->outputLut.lutEntries = 0; + pFlipState->outputLut.vssSegments = 0; + } + } WITH_APIHEAD_FOR_HEAD_DONE; + } + + if (pParams->olutFpNormScale.specified) { + pFlipState->olutFpNormScale = pParams->olutFpNormScale.val; + } + + if ((pFlipState->outputLut.pLutSurfaceEvo != + pDevEvo->gpus[sd].headState[head].outputLut.pLutSurfaceEvo) || + (pFlipState->outputLut.offset != + pDevEvo->gpus[sd].headState[head].outputLut.offset) || + (pFlipState->olutFpNormScale != + pDevEvo->gpus[sd].headState[head].olutFpNormScale)) { + + pFlipState->layer[NVKMS_MAIN_LAYER].tearing = FALSE; + pFlipState->dirty.olut = TRUE; + } + + if (!AssignUsageBounds(pDevEvo, head, pFlipState)) { + return FALSE; + } + + /* + * If there is active cursor/cropped-window(overlay) without full screen + * window(base/core) then NVKMS is supposed to disable MidFrame/DWCF + * watermark. + */ + + pFlipState->disableMidFrameAndDWCFWatermark = FALSE; + + if (NV5070_CTRL_SYSTEM_GET_CAP( + pDevEvo->capsBits, + NV5070_CTRL_SYSTEM_CAPS_BUG_2052012_GLITCHY_MCLK_SWITCH) && + !pFlipState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT]) { + + if (pFlipState->cursor.pSurfaceEvo != NULL) { + pFlipState->disableMidFrameAndDWCFWatermark = TRUE; + } else { + NvU32 layer; + + /* + * XXX TODO: Check the output size of the overlay in order + * to determine if it will be fullscreen or not. + */ + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (layer != NVKMS_MAIN_LAYER && + pFlipState->layer[layer].pSurfaceEvo[NVKMS_LEFT] != NULL) { + pFlipState->disableMidFrameAndDWCFWatermark = TRUE; + break; + } + } + } + } + + return TRUE; +} + +/* + * Checks that if the surface is NULL (i.e. no image will be shown), various + * other elements must be NULL as well. If the surface is not NULL, verifies + * that the sizeIn/Out have nonzero values. + */ +inline static NvBool ValidateFlipChannelEvoHwState( + const NVFlipChannelEvoHwState *pState) +{ + if (pState->pSurfaceEvo[NVKMS_LEFT] != NULL) { + /* Verify sizes are valid. */ + if ((pState->sizeIn.width == 0) || (pState->sizeIn.height == 0) || + (pState->sizeOut.width == 0) || (pState->sizeOut.height == 0)) { + return FALSE; + } + + return TRUE; + } + + if (pState->completionNotifier.surface.pSurfaceEvo != NULL) { + return FALSE; + } + + if (!pState->syncObject.usingSyncpt) { + if (pState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo != NULL) { + return FALSE; + } + + if (pState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo != NULL) { + return FALSE; + } + } + + return TRUE; +} + +static NvBool ValidateSurfaceSize( + const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo, + const struct NvKmsRect *sourceFetchRect) +{ + NvU8 planeIndex; + + if ((pSurfaceEvo->widthInPixels > pDevEvo->caps.maxWidthInPixels) || + (pSurfaceEvo->heightInPixels > pDevEvo->caps.maxHeight)) { + return FALSE; + } + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + + NvU64 planePitch = pSurfaceEvo->planes[planeIndex].pitch; + + /* + * Convert planePitch to units of bytes if it's currently specified in + * units of blocks. Each block is 64-bytes wide. + */ + if (pSurfaceEvo->layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + planePitch <<= NVKMS_BLOCK_LINEAR_LOG_GOB_WIDTH; + } + + if (planePitch > pDevEvo->caps.maxWidthInBytes) { + return FALSE; + } + } + + if (!pDevEvo->hal->ValidateWindowFormat(pSurfaceEvo->format, + sourceFetchRect, + NULL)) { + return FALSE; + } + + return TRUE; +} + +static NvBool +ValidateLayerLutHwState(const NVDevEvoRec *pDevEvo, + const NVFlipEvoHwState *pFlipState, + NvU32 layer) +{ + const NVFlipChannelEvoHwState *pHwState = &pFlipState->layer[layer]; + + if (!pDevEvo->caps.layerCaps[layer].ilut.supported && + (pHwState->inputLut.pLutSurfaceEvo != NULL)) { + return FALSE; + } + + if (!pDevEvo->caps.layerCaps[layer].tmo.supported && + (pHwState->tmoLut.pLutSurfaceEvo != NULL)) { + return FALSE; + } + + /* Surface format validation is handled in UpdateFlipLutHwState */ + if (pDevEvo->hal->caps.needDefaultLutSurface && + pHwState->pSurfaceEvo[NVKMS_LEFT] != NULL) { + /* + * needDefaultLutSurface corresponds to the Turing+ case where the ILUT + * must convert to FP16. When it is set, the ILUT must be set if the + * surface is not in FP16 and the ILUT must not be set if the surface + * is in FP16. However, we only validate the second case because the + * first is handled internally be using the default ILUT. + */ + if ((pHwState->pSurfaceEvo[NVKMS_LEFT]->format == + NvKmsSurfaceMemoryFormatRF16GF16BF16XF16) || + (pHwState->pSurfaceEvo[NVKMS_LEFT]->format == + NvKmsSurfaceMemoryFormatRF16GF16BF16AF16)) { + /* + * If the layer's surface format is FP16, the ILUT must not be + * enabled. + */ + if ((pHwState->inputLut.pLutSurfaceEvo != NULL) || + (pHwState->tf != NVKMS_INPUT_TF_LINEAR)) { + return FALSE; + } + } + } + + if (pHwState->pSurfaceEvo[NVKMS_LEFT] && + pHwState->pSurfaceEvo[NVKMS_LEFT]->format == NvKmsSurfaceMemoryFormatI8) { + + /* If the layer's surface format is I8, the ILUT must be enabled. */ + if (pHwState->inputLut.pLutSurfaceEvo == NULL) { + return FALSE; + } + } + + return TRUE; +} + +static NvBool +ValidateHeadLutHwState(const NVDevEvoRec *pDevEvo, + const NVFlipEvoHwState *pFlipState) +{ + if (!pDevEvo->caps.olut.supported && + (pFlipState->outputLut.pLutSurfaceEvo != NULL)) { + return FALSE; + } + + return TRUE; +} + +static NvBool +ValidateMainFlipChannelEvoHwState(const NVDevEvoRec *pDevEvo, + const NVFlipChannelEvoHwState *pHwState, + const NVHwModeTimingsEvo *pTimings, + struct NvKmsPoint viewPortPointIn) +{ + NvU32 eye; + const NVSurfaceEvoRec *pFirstSurfaceEvo = NULL; + + /* + * This struct represents the source fetch rectangle for a given surface, + * and will be populated later as such. This function doesn't explicitly set + * sourceFetchRect.{x,y} because NVKMS currently doesn't support programming + * source fetch offsets, so the init value of 0 should be fine for both of + * these fields. + */ + struct NvKmsRect sourceFetchRect = {0}; + + if (!ValidateFlipChannelEvoHwState(pHwState)) { + return FALSE; + } + + for (eye = 0; eye < NVKMS_MAX_EYES; eye++) { + const NVSurfaceEvoRec *pSurfaceEvo = pHwState->pSurfaceEvo[eye]; + + if (pSurfaceEvo == NULL) { + continue; + } + + if (pFirstSurfaceEvo == NULL) { + pFirstSurfaceEvo = pSurfaceEvo; + } else if (pSurfaceEvo->widthInPixels != + pFirstSurfaceEvo->widthInPixels || + pSurfaceEvo->heightInPixels != + pFirstSurfaceEvo->heightInPixels) { + return FALSE; + } + + sourceFetchRect.width = pHwState->sizeIn.width; + sourceFetchRect.height = pHwState->sizeIn.height; + + if (!ValidateSurfaceSize(pDevEvo, pSurfaceEvo, &sourceFetchRect)) { + return FALSE; + } + + /* The use of A_plus_B_greater_than_C_U16 is only valid if these + * fit in a U16 */ + nvAssert(pSurfaceEvo->widthInPixels <= NV_U16_MAX); + nvAssert(pSurfaceEvo->heightInPixels <= NV_U16_MAX); + /* And the checks above in ValidateSurfaceSize should have + * guaranteed that. */ + nvAssert(pDevEvo->caps.maxWidthInPixels <= NV_U16_MAX); + nvAssert(pDevEvo->caps.maxHeight <= NV_U16_MAX); + + /* + * Validate that the requested viewport parameters fit within the + * specified surface, unless the main layer is allowed to be smaller + * than the viewport. + */ + if (!pDevEvo->caps.layerCaps[NVKMS_MAIN_LAYER].supportsWindowMode) { + if (A_plus_B_greater_than_C_U16(viewPortPointIn.x, + pTimings->viewPort.in.width, + pSurfaceEvo->widthInPixels)) { + return FALSE; + } + + if (A_plus_B_greater_than_C_U16(viewPortPointIn.y, + pTimings->viewPort.in.height, + pSurfaceEvo->heightInPixels)) { + return FALSE; + } + } + } + + return TRUE; +} + +static NvBool +ValidateOverlayFlipChannelEvoHwState(const NVDevEvoRec *pDevEvo, + const NVFlipChannelEvoHwState *pHwState) +{ + const NVSurfaceEvoRec *pSurfaceEvo = pHwState->pSurfaceEvo[NVKMS_LEFT]; + + /* + * This struct represents the source fetch rectangle for a given surface, + * and will be populated later as such. This function doesn't explicitly set + * sourceFetchRect.{x,y} because NVKMS currently doesn't support programming + * source fetch offsets, so the init value of 0 should be fine for both of + * these fields. + */ + struct NvKmsRect sourceFetchRect = {0}; + + if (!ValidateFlipChannelEvoHwState(pHwState)) { + return FALSE; + } + + if (pSurfaceEvo == NULL) { + return TRUE; + } + + sourceFetchRect.width = pHwState->sizeIn.width; + sourceFetchRect.height = pHwState->sizeIn.height; + + if (!ValidateSurfaceSize(pDevEvo, pSurfaceEvo, &sourceFetchRect)) { + return FALSE; + } + + /* Validate input size against surface size. */ + if (pHwState->sizeIn.width > pSurfaceEvo->widthInPixels || + pHwState->sizeIn.height > pSurfaceEvo->heightInPixels) { + return FALSE; + } + + return TRUE; +} + +static NvBool +ValidateHDR(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVFlipEvoHwState *pFlipState) +{ + NvU32 layerStaticMetadataCount = 0; + NvU32 layerSupportedCount = 0; + + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (pDevEvo->caps.layerCaps[layer].supportsICtCp) { + layerSupportedCount++; + } + + if (pFlipState->layer[layer].hdrStaticMetadata.enabled) { + layerStaticMetadataCount++; + + /* + * If HDR static metadata is enabled, we may need TMO. CSC11 will be + * used by NVKMS to convert from linear FP16 LMS to linear FP16 RGB. + * As such, the user-supplied precomp CSC can't be programmed into + * CSC11 in this case. + */ + if (!nvIsCscMatrixIdentity(&pFlipState->layer[layer].cscMatrix)) { + return FALSE; + } + + // Already checked in UpdateLayerFlipEvoHwStateHDRStaticMetadata() + nvAssert(pDevEvo->caps.layerCaps[layer].supportsICtCp); + } + } + + // If enabling HDR TF... + // XXX HDR TODO: Handle other transfer functions + if (pFlipState->tf == NVKMS_OUTPUT_TF_PQ) { + // At least one layer must support HDR. + if (layerSupportedCount == 0) { + return FALSE; + } + + // If HDR metadata is not overridden for the head... + if (!pFlipState->hdrInfoFrame.enabled) { + // At least one layer must have static metadata. + if (layerStaticMetadataCount == 0) { + return FALSE; + } + } + } + + // Only one layer can specify HDR static metadata. + // XXX HDR TODO: Support multiple layers with HDR static metadata + if (layerStaticMetadataCount > 1) { + return FALSE; + } + + return TRUE; +} + +static NvBool +ValidateColorspace(const NVDevEvoRec *pDevEvo, + const NVFlipEvoHwState *pFlipState, + NvU32 layer) +{ + if ((pFlipState->layer[layer].colorSpace != + NVKMS_INPUT_COLOR_SPACE_NONE)) { + + NVSurfaceEvoPtr pSurfaceEvo = + pFlipState->layer[layer].pSurfaceEvo[NVKMS_LEFT]; + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + (pSurfaceEvo != NULL) ? + nvKmsGetSurfaceMemoryFormatInfo(pSurfaceEvo->format) : NULL; + + if (pFormatInfo == NULL) { + return FALSE; + } + + // FP16 is only for use with scRGB. + if ((pFlipState->layer[layer].colorSpace != + NVKMS_INPUT_COLOR_SPACE_SCRGB) && + ((pSurfaceEvo->format == + NvKmsSurfaceMemoryFormatRF16GF16BF16AF16) || + (pSurfaceEvo->format == + NvKmsSurfaceMemoryFormatRF16GF16BF16XF16))) { + return FALSE; + } + + // scRGB is only compatible with FP16. + if ((pFlipState->layer[layer].colorSpace == + NVKMS_INPUT_COLOR_SPACE_SCRGB) && + !((pSurfaceEvo->format == + NvKmsSurfaceMemoryFormatRF16GF16BF16AF16) || + (pSurfaceEvo->format == + NvKmsSurfaceMemoryFormatRF16GF16BF16XF16))) { + return FALSE; + } + } + + return TRUE; +} + +static NvU32 ValidateCompositionDepth(const NVFlipEvoHwState *pFlipState, + const NvU32 layer) +{ + NvU32 tmpLayer; + + if (pFlipState->layer[layer].pSurfaceEvo[NVKMS_LEFT] == NULL) { + return TRUE; + } + + /* Depth should be different for each of the layers owned by the head */ + for (tmpLayer = 0; tmpLayer < ARRAY_LEN(pFlipState->layer); tmpLayer++) { + if (pFlipState->layer[tmpLayer].pSurfaceEvo[NVKMS_LEFT] == NULL) { + continue; + } + + if ((tmpLayer != layer) && + (pFlipState->layer[tmpLayer].composition.depth == + pFlipState->layer[layer].composition.depth)) { + return FALSE; + } + } + + /* Depth of the main layer should be the greatest one */ + if (pFlipState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT] != NULL) { + if (pFlipState->layer[NVKMS_MAIN_LAYER].composition.depth < + pFlipState->layer[layer].composition.depth) { + return FALSE; + } + } + + return TRUE; +} + +/*! + * Perform validation of the the given NVFlipEvoHwState. + */ +NvBool nvValidateFlipEvoHwState( + const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVHwModeTimingsEvo *pTimings, + const NVFlipEvoHwState *pFlipState) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (!ValidateCompositionDepth(pFlipState, layer)) { + return FALSE; + } + + if (layer == NVKMS_MAIN_LAYER) { + if (!ValidateMainFlipChannelEvoHwState(pDevEvo, + &pFlipState->layer[layer], + pTimings, + pFlipState->viewPortPointIn)) { + return FALSE; + } + } else { + const NVFlipChannelEvoHwState *pMainLayerState = + &pFlipState->layer[NVKMS_MAIN_LAYER]; + + /* + * No overlay layer should be enabled if the main + * layer is disabled. + */ + if ((pMainLayerState->pSurfaceEvo[NVKMS_LEFT] == NULL) && + (pFlipState->layer[layer].pSurfaceEvo[NVKMS_LEFT] != NULL)) { + return FALSE; + } + + if (!pFlipState->dirty.layer[layer]) { + continue; + } + + if (!ValidateOverlayFlipChannelEvoHwState(pDevEvo, + &pFlipState->layer[layer])) { + return FALSE; + } + } + + if (!ValidateLayerLutHwState(pDevEvo, pFlipState, layer)) { + return FALSE; + } + + if (!ValidateColorspace(pDevEvo,pFlipState, layer)) { + return FALSE; + } + } + + if (!ValidateHDR(pDevEvo, head, pFlipState)) { + return FALSE; + } + + if (!ValidateHeadLutHwState(pDevEvo, pFlipState)) { + return FALSE; + } + + /* XXX NVKMS TODO: validate cursor x,y against current viewport in? */ + + return ValidateUsageBounds(pDevEvo, + head, + &pFlipState->usage, + &pTimings->viewPort.possibleUsage); +} + +/* + * Record in the updateState that the given channel needs interlocked + * window immediate updates. + */ +static void UpdateWinImmInterlockState(NVDevEvoPtr pDevEvo, + NVEvoUpdateState *updateState, + const NVEvoChannel *pChannel) +{ + const NvU32 subDevMask = nvPeekEvoSubDevMask(pDevEvo); + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (subDevMask & (1 << sd)) { + updateState->subdev[sd].winImmInterlockMask |= + pChannel->channelMask; + } + } +} + +/*! + * Record in the updateState that the given channel's method are eligible for + * flip locking. + */ +static void UpdateUpdateFlipLockState(NVDevEvoPtr pDevEvo, + NVEvoUpdateState *updateState, + const NVEvoChannel *pChannel) +{ + const NvU32 subDevMask = nvPeekEvoSubDevMask(pDevEvo); + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (subDevMask & (1 << sd)) { + updateState->subdev[sd].flipLockQualifyingMask |= + pChannel->channelMask; + } + } +} + +// Adjust from EDID-encoded maxCLL/maxFALL to actual values in units of 1 cd/m2 +static inline NvU32 MaxCvToVal(NvU32 cv) +{ + if (cv == 0) { + return 0; + } + + // 50*2^(cv/32) + return f64_to_ui32( + f64_mul(ui32_to_f64(50), + nvKmsPow(ui32_to_f64(2), + f64_div(ui32_to_f64(cv), + ui32_to_f64(32)))), softfloat_round_near_even, FALSE); +} + +// Adjust from EDID-encoded minCLL to actual value in units of 0.0001 cd/m2 +static inline NvU32 MinCvToVal(NvU32 cv, NvU32 maxCLL) +{ + // 10,000 * (minCLL = (maxCLL * ((cv/255)^2 / 100))) + return f64_to_ui32( + f64_mul(ui32_to_f64(10000), + f64_mul(ui32_to_f64(maxCLL), + f64_div(nvKmsPow(f64_div(ui32_to_f64(cv), + ui32_to_f64(255)), + ui32_to_f64(2)), + ui32_to_f64(100)))), + softfloat_round_near_even, FALSE); +} + +static NvBool UpdateHDR(NVDevEvoPtr pDevEvo, + const NVFlipEvoHwState *pFlipState, + const NvU32 sd, + const NvU32 head, + const NVT_HDR_STATIC_METADATA *pHdrInfo, + NVEvoUpdateState *updateState) +{ + NVDispEvoPtr pDispEvo = pDevEvo->gpus[sd].pDispEvo; + NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NvBool dirty = FALSE; + + if (pFlipState->dirty.tf) { + pHeadState->tf = pFlipState->tf; + + dirty = TRUE; + } + + if (pFlipState->dirty.hdrStaticMetadata) { + NvBool found = FALSE; + + /* + * Track if HDR static metadata is overridden for the head in order to + * initialize subsequent instances of NVFlipEvoHwState. + */ + pHeadState->hdrInfoFrameOverride.enabled = + pFlipState->hdrInfoFrame.enabled; + pHeadState->hdrInfoFrameOverride.eotf = + pFlipState->hdrInfoFrame.eotf; + pHeadState->hdrInfoFrameOverride.staticMetadata = + pFlipState->hdrInfoFrame.staticMetadata; + + // Populate head with updated static metadata. + + if (pFlipState->hdrInfoFrame.enabled) { + // If HDR static metadata is overridden for the head, use that. + pHeadState->hdrInfoFrame.staticMetadata = + pFlipState->hdrInfoFrame.staticMetadata; + pHeadState->hdrInfoFrame.eotf = + pFlipState->hdrInfoFrame.eotf; + + pHeadState->hdrInfoFrame.state = NVKMS_HDR_INFOFRAME_STATE_ENABLED; + found = TRUE; + } else { + NvU32 layer; + + /* + * If HDR static metadata is specified for layer(s), construct the + * head's HDR static metadata using those. + */ + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (pFlipState->layer[layer].hdrStaticMetadata.enabled) { + NvU32 targetMaxCLL = MaxCvToVal(pHdrInfo->max_cll); + + /* + * Only one layer can currently specify static metadata, + * verified by ValidateHDR(). + * + * XXX HDR TODO: Combine metadata from multiple layers. + */ + nvAssert(!found); + + // Send this layer's metadata to the display. + pHeadState->hdrInfoFrame.staticMetadata = + pFlipState->layer[layer].hdrStaticMetadata.val; + + // Infer metadata eotf from output tf + // XXX HDR TODO: Handle other transfer functions + switch (pHeadState->tf) { + default: + nvAssert(!"Unrecognized output TF"); + // Fall through + case NVKMS_OUTPUT_TF_TRADITIONAL_GAMMA_SDR: + case NVKMS_OUTPUT_TF_NONE: + pHeadState->hdrInfoFrame.eotf = + NVKMS_INFOFRAME_EOTF_SDR_GAMMA; + break; + case NVKMS_OUTPUT_TF_PQ: + pHeadState->hdrInfoFrame.eotf = + NVKMS_INFOFRAME_EOTF_ST2084; + break; + } + + pHeadState->hdrInfoFrame.state = + NVKMS_HDR_INFOFRAME_STATE_ENABLED; + found = TRUE; + + /* + * Prepare for tone mapping. If we expect to tone map and + * the EDID has valid lum values, mirror EDID lum values to + * prevent redundant tone mapping by the display. We will + * tone map to the specified maxCLL. + */ + if (nvNeedsTmoLut(pDevEvo, pDevEvo->head[head].layer[layer], + &pFlipState->layer[layer], + nvGetHDRSrcMaxLum( + &pFlipState->layer[layer]), + targetMaxCLL)) { + NvU32 targetMaxFALL = MaxCvToVal(pHdrInfo->max_fall); + if ((targetMaxCLL > 0) && + (targetMaxCLL <= 10000) && + (targetMaxCLL >= targetMaxFALL)) { + + NvU32 targetMinCLL = MinCvToVal(pHdrInfo->min_cll, + targetMaxCLL); + + pHeadState->hdrInfoFrame.staticMetadata. + maxDisplayMasteringLuminance = targetMaxCLL; + pHeadState->hdrInfoFrame.staticMetadata. + minDisplayMasteringLuminance = targetMinCLL; + pHeadState->hdrInfoFrame.staticMetadata.maxCLL = + targetMaxCLL; + pHeadState->hdrInfoFrame.staticMetadata.maxFALL = + targetMaxFALL; + } + } + } + } + } + + if (!found) { + nvkms_memset(&pHeadState->hdrInfoFrame.staticMetadata, 0, + sizeof(struct NvKmsHDRStaticMetadata)); + if (pHeadState->hdrInfoFrame.state == + NVKMS_HDR_INFOFRAME_STATE_ENABLED) { + pHeadState->hdrInfoFrame.state = + NVKMS_HDR_INFOFRAME_STATE_TRANSITIONING; + } + } + + dirty = TRUE; + } + + return dirty; +} + +/*! + * Program a flip on all requested layers on the specified head. + * + * This also updates pDispEvo->headState[head], caching what was programmed. + + * \param[in,out] pDispEvo The disp on which the flip should be performed. + * \param[in] head The head on which the flip should be performed. + * \param[in] pFlipState The description of how to update each layer. + * \param[in,out] updateState Indicates which channels require UPDATEs + */ +void nvFlipEvoOneHead( + NVDevEvoPtr pDevEvo, + const NvU32 sd, + const NvU32 head, + const NVT_HDR_STATIC_METADATA *pHdrInfo, + const NVFlipEvoHwState *pFlipState, + NvBool allowFlipLock, + NVEvoUpdateState *updateState) +{ + const NvU32 subDeviceMask = NVBIT(sd); + const NVDispHeadStateEvoRec *pHeadState = + &pDevEvo->gpus[sd].pDispEvo->headState[head]; + NvBool bypassComposition = pHeadState->bypassComposition; + NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[sd].headState[head]; + NvU32 layer; + NvBool hdrDirty; + + /* + * Provide the pre-update hardware state (in pSdHeadState) and the new + * target state (pFlipState) to the HAL implementation so that it has the + * information it needs to implement the workaround for hardware bug + * 2193096, which requires special logic on transitions between NULL and + * non-NULL ctxdmas (and vice versa). + */ + pDevEvo->hal->FlipTransitionWAR(pDevEvo, sd, head, + pSdHeadState, pFlipState, + updateState); + + /* + * Promote the software state first, such that the hardware programming + * paths below see the new state atomically. + */ + if (pFlipState->dirty.viewPortPointIn) { + pSdHeadState->viewPortPointIn = pFlipState->viewPortPointIn; + } + + if (pFlipState->dirty.cursorSurface || pFlipState->dirty.cursorPosition) { + pSdHeadState->cursor = pFlipState->cursor; + } + + if (pFlipState->dirty.olut) { + pSdHeadState->outputLut = pFlipState->outputLut; + pSdHeadState->olutFpNormScale = pFlipState->olutFpNormScale; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (pFlipState->dirty.layer[layer]) { + pSdHeadState->layer[layer] = pFlipState->layer[layer]; + } + } + + if (pFlipState->dirty.viewPortPointIn) { + nvSetViewPortPointInEvo(pDevEvo->gpus[sd].pDispEvo, + head, + pFlipState->viewPortPointIn.x, + pFlipState->viewPortPointIn.y, + updateState); + } + + if (pFlipState->dirty.cursorSurface) { + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + pDevEvo->hal->SetCursorImage(pDevEvo, + head, + pSdHeadState->cursor.pSurfaceEvo, + updateState, + &pSdHeadState->cursor.cursorCompParams); + nvPopEvoSubDevMask(pDevEvo); + } + + if (pFlipState->dirty.cursorPosition) { + nvEvoMoveCursorInternal(pDevEvo->gpus[sd].pDispEvo, + head, + pFlipState->cursor.x, + pFlipState->cursor.y); + } + + hdrDirty = UpdateHDR(pDevEvo, pFlipState, sd, head, pHdrInfo, updateState); + + if (pFlipState->dirty.olut || hdrDirty) { + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + pDevEvo->hal->SetOutputLut(pDevEvo, sd, head, + &pFlipState->outputLut, + pFlipState->olutFpNormScale, + updateState, + bypassComposition); + nvPopEvoSubDevMask(pDevEvo); + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (!pFlipState->dirty.layer[layer]) { + continue; + } + + nvPushEvoSubDevMask(pDevEvo, subDeviceMask); + + if (pFlipState->dirty.layerPosition[layer]) { + /* Ensure position updates are supported on this layer. */ + nvAssert(pDevEvo->caps.layerCaps[layer].supportsWindowMode); + + pDevEvo->hal->SetImmPointOut(pDevEvo, + pDevEvo->head[head].layer[layer], + sd, + updateState, + pFlipState->layer[layer].outputPosition.x, + pFlipState->layer[layer].outputPosition.y); + + if (pDevEvo->hal->caps.supportsSynchronizedOverlayPositionUpdate) { + UpdateWinImmInterlockState(pDevEvo, updateState, + pDevEvo->head[head].layer[layer]); + } + } + + /* Inform DIFR about the upcoming flip. */ + if (pDevEvo->pDifrState) { + nvDIFRNotifyFlip(pDevEvo->pDifrState); + } + + pDevEvo->hal->Flip(pDevEvo, + pDevEvo->head[head].layer[layer], + &pFlipState->layer[layer], + updateState, + bypassComposition); + if (layer == NVKMS_MAIN_LAYER && allowFlipLock) { + UpdateUpdateFlipLockState(pDevEvo, updateState, + pDevEvo->head[head].layer[layer]); + } + nvPopEvoSubDevMask(pDevEvo); + } + + pSdHeadState->targetUsage = pFlipState->usage; + + pSdHeadState->targetDisableMidFrameAndDWCFWatermark = + pFlipState->disableMidFrameAndDWCFWatermark; +} + +static void ChangeSurfaceFlipRefCount( + NVDevEvoPtr pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo, + NvBool increase) +{ + if (pSurfaceEvo != NULL) { + if (increase) { + nvEvoIncrementSurfaceRefCnts(pSurfaceEvo); + } else { + nvEvoDecrementSurfaceRefCnts(pDevEvo, pSurfaceEvo); + } + } +} + +void nvUpdateSurfacesFlipRefCount( + NVDevEvoPtr pDevEvo, + const NvU32 head, + NVFlipEvoHwState *pFlipState, + NvBool increase) +{ + NvU32 i; + + ChangeSurfaceFlipRefCount( + pDevEvo, + pFlipState->cursor.pSurfaceEvo, + increase); + ChangeSurfaceFlipRefCount( + pDevEvo, + pFlipState->outputLut.pLutSurfaceEvo, + increase); + + for (i = 0; i < pDevEvo->head[head].numLayers; i++) { + NVFlipChannelEvoHwState *pLayerFlipState = &pFlipState->layer[i]; + + ChangeSurfaceFlipRefCount( + pDevEvo, + pLayerFlipState->pSurfaceEvo[NVKMS_LEFT], + increase); + ChangeSurfaceFlipRefCount( + pDevEvo, + pLayerFlipState->pSurfaceEvo[NVKMS_RIGHT], + increase); + ChangeSurfaceFlipRefCount( + pDevEvo, + pLayerFlipState->completionNotifier.surface.pSurfaceEvo, + increase); + ChangeSurfaceFlipRefCount( + pDevEvo, + pLayerFlipState->inputLut.pLutSurfaceEvo, + increase); + ChangeSurfaceFlipRefCount( + pDevEvo, + pLayerFlipState->tmoLut.pLutSurfaceEvo, + increase); + + if (!pLayerFlipState->syncObject.usingSyncpt) { + ChangeSurfaceFlipRefCount( + pDevEvo, + pLayerFlipState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo, + increase); + ChangeSurfaceFlipRefCount( + pDevEvo, + pLayerFlipState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo, + increase); + } + } +} + +static void UnionScalingUsageBounds( + const struct NvKmsScalingUsageBounds *a, + const struct NvKmsScalingUsageBounds *b, + struct NvKmsScalingUsageBounds *ret) +{ + ret->maxVDownscaleFactor = NV_MAX(a->maxVDownscaleFactor, + b->maxVDownscaleFactor); + ret->maxHDownscaleFactor = NV_MAX(a->maxHDownscaleFactor, + b->maxHDownscaleFactor); + ret->vTaps = NV_MAX(a->vTaps, b->vTaps); + ret->vUpscalingAllowed = a->vUpscalingAllowed || b->vUpscalingAllowed; +} + +void nvUnionUsageBounds(const struct NvKmsUsageBounds *a, + const struct NvKmsUsageBounds *b, + struct NvKmsUsageBounds *ret) +{ + NvU32 i; + + nvkms_memset(ret, 0, sizeof(*ret)); + + for (i = 0; i < ARRAY_LEN(a->layer); i++) { + nvAssert(a->layer[i].usable == + !!a->layer[i].supportedSurfaceMemoryFormats); + nvAssert(b->layer[i].usable == + !!b->layer[i].supportedSurfaceMemoryFormats); + + ret->layer[i].usable = a->layer[i].usable || b->layer[i].usable; + + ret->layer[i].supportedSurfaceMemoryFormats = + a->layer[i].supportedSurfaceMemoryFormats | + b->layer[i].supportedSurfaceMemoryFormats; + + UnionScalingUsageBounds(&a->layer[i].scaling, + &b->layer[i].scaling, + &ret->layer[i].scaling); + } +} + +static void IntersectScalingUsageBounds( + const struct NvKmsScalingUsageBounds *a, + const struct NvKmsScalingUsageBounds *b, + struct NvKmsScalingUsageBounds *ret) +{ + ret->maxVDownscaleFactor = NV_MIN(a->maxVDownscaleFactor, + b->maxVDownscaleFactor); + ret->maxHDownscaleFactor = NV_MIN(a->maxHDownscaleFactor, + b->maxHDownscaleFactor); + ret->vTaps = NV_MIN(a->vTaps, b->vTaps); + ret->vUpscalingAllowed = a->vUpscalingAllowed && b->vUpscalingAllowed; +} + +void nvIntersectUsageBounds(const struct NvKmsUsageBounds *a, + const struct NvKmsUsageBounds *b, + struct NvKmsUsageBounds *ret) +{ + NvU32 i; + + nvkms_memset(ret, 0, sizeof(*ret)); + + for (i = 0; i < ARRAY_LEN(a->layer); i++) { + nvAssert(a->layer[i].usable == + !!a->layer[i].supportedSurfaceMemoryFormats); + nvAssert(b->layer[i].usable == + !!b->layer[i].supportedSurfaceMemoryFormats); + + ret->layer[i].usable = a->layer[i].usable && b->layer[i].usable; + + ret->layer[i].supportedSurfaceMemoryFormats = + a->layer[i].supportedSurfaceMemoryFormats & + b->layer[i].supportedSurfaceMemoryFormats; + + IntersectScalingUsageBounds(&a->layer[i].scaling, + &b->layer[i].scaling, + &ret->layer[i].scaling); + } +} +NvBool UsageBoundsEqual( + const struct NvKmsUsageBounds *a, + const struct NvKmsUsageBounds *b) +{ + NvU32 layer; + + for (layer = 0; layer < ARRAY_LEN(a->layer); layer++) { + if (!nvEvoLayerUsageBoundsEqual(a, b, layer)) { + return FALSE; + } + } + + return TRUE; +} + +NvBool nvAllocatePreFlipBandwidth(NVDevEvoPtr pDevEvo, + struct NvKmsFlipWorkArea *pWorkArea) +{ + NVValidateImpOneDispHeadParamsRec *timingsParams = NULL; + struct NvKmsUsageBounds *currentAndNew = NULL; + struct NvKmsUsageBounds *guaranteedAndCurrent = NULL; + NVDispEvoPtr pDispEvo; + NvU32 head; + NvBool recheckIMP = FALSE; + NvBool ret = TRUE; + + if (!pDevEvo->isSOCDisplay) { + return TRUE; + } + + timingsParams = + nvCalloc(NVKMS_MAX_HEADS_PER_DISP, sizeof(*timingsParams)); + if (timingsParams == NULL) { + return FALSE; + } + + currentAndNew = + nvCalloc(NVKMS_MAX_HEADS_PER_DISP, sizeof(*currentAndNew)); + if (currentAndNew == NULL) { + nvFree(timingsParams); + return FALSE; + } + + guaranteedAndCurrent = + nvCalloc(NVKMS_MAX_HEADS_PER_DISP, sizeof(*guaranteedAndCurrent)); + if (guaranteedAndCurrent == NULL) { + nvFree(timingsParams); + nvFree(currentAndNew); + return FALSE; + } + + pDispEvo = pDevEvo->pDispEvo[0]; + + // SOC Display never has more than one disp + nvAssert(pDevEvo->nDispEvo == 1); + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + const struct NvKmsUsageBounds *pCurrent = + &pDevEvo->gpus[0].headState[head].preallocatedUsage; + const struct NvKmsUsageBounds *pNew = + &pWorkArea->sd[0].head[head].newState.usage; + + if (pHeadState->activeRmId == 0) { + continue; + } + + timingsParams[head].pConnectorEvo = pHeadState->pConnectorEvo; + timingsParams[head].activeRmId = pHeadState->activeRmId; + timingsParams[head].pixelDepth = pHeadState->pixelDepth; + timingsParams[head].pTimings = &pHeadState->timings; + timingsParams[head].enableDsc = (pHeadState->dscInfo.type != + NV_DSC_INFO_EVO_TYPE_DISABLED); + timingsParams[head].dscSliceCount = pHeadState->dscInfo.sliceCount; + timingsParams[head].possibleDscSliceCountMask = + pHeadState->dscInfo.possibleSliceCountMask; + timingsParams[head].b2Heads1Or = + (pHeadState->mergeMode != NV_EVO_MERGE_MODE_DISABLED); + + nvUnionUsageBounds(pCurrent, pNew, ¤tAndNew[head]); + nvUnionUsageBounds(&pHeadState->timings.viewPort.guaranteedUsage, + pCurrent, &guaranteedAndCurrent[head]); + + if (!ValidateUsageBounds(pDevEvo, + head, + pNew, + &guaranteedAndCurrent[head])) { + recheckIMP = TRUE; + } + + nvUnionUsageBounds(&guaranteedAndCurrent[head], pNew, + &guaranteedAndCurrent[head]); + timingsParams[head].pUsage = &guaranteedAndCurrent[head]; + } + + if (recheckIMP) { + ret = nvValidateImpOneDisp(pDispEvo, timingsParams, + FALSE /* requireBootClocks */, + NV_EVO_REALLOCATE_BANDWIDTH_MODE_PRE, + NULL /* pMinIsoBandwidthKBPS */, + NULL /* pMinDramFloorKBPS */, + 0x0 /* changedHeadsMask */); + if (ret) { + for (head = 0; head < pDevEvo->numHeads; head++) { + pDevEvo->gpus[0].headState[head].preallocatedUsage = + currentAndNew[head]; + } + } + } + + nvFree(timingsParams); + nvFree(currentAndNew); + nvFree(guaranteedAndCurrent); + + if (ret) { + nvScheduleLowerDispBandwidthTimer(pDevEvo); + } + + return ret; +} + +/*! + * If the satellite channel is active then pre-NVDisplay hardware does not allow + * to change its usage bounds in non-interlock update. The nvSetUsageBoundsEvo() + * code path for pre-NVDisplay hardware, interlocks the satellite channels with + * the usage bounds update. This makes it essential to poll for + * NO_METHOD_PENDING state of the satellite channels, otherwise blocking + * pre-flip IMP update will also get stuck. + * + * It is not possible to interlock flip-locked satellite channels with the core + * channel usage bounds update; in that case, reject the flip. Do not allow + * client to make any change in surface usage bounds parameters without + * deactivating channel first, if channel is flip-locked. + */ +static NvBool PrepareToDoPreFlipIMP(NVDevEvoPtr pDevEvo, + struct NvKmsFlipWorkArea *pWorkArea) +{ + NvU64 startTime = 0; + NvU32 timeout = 2000000; /* 2 seconds */ + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVEvoHeadControlPtr pHC = + &pEvoSubDev->headControl[head]; + const NVEvoSubDevHeadStateRec *pCurrentFlipState = + &pDevEvo->gpus[sd].headState[head]; + const NVSurfaceEvoRec *pCurrentBaseSurf = + pCurrentFlipState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT]; + const struct NvKmsUsageBounds *pCurrentUsage = + &pCurrentFlipState->usage; + + NVFlipEvoHwState *pNewFlipState = + &pWorkArea->sd[sd].head[head].newState; + const NVSurfaceEvoRec *pNewBaseSurf = + pNewFlipState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT]; + struct NvKmsUsageBounds *pNewUsage = + &pNewFlipState->usage; + + struct NvKmsUsageBounds *pPreFlipUsage = + &pWorkArea->sd[sd].head[head].preFlipUsage; + + NvU32 layer; + + nvUnionUsageBounds(pNewUsage, pCurrentUsage, pPreFlipUsage); + + if (pDevEvo->hal->caps.supportsNonInterlockedUsageBoundsUpdate) { + /* + * NVDisplay does not interlock the satellite channel + * with its usage bounds update. + */ + continue; + } + + /* + * If head is flip-locked then do not change usage + * bounds while base channel is active. + */ + if (pHC->flipLock && + /* If the base channel is active before and after flip then + * current and new base usage bounds should be same. */ + ((pNewBaseSurf != NULL && + pCurrentBaseSurf != NULL && + !nvEvoLayerUsageBoundsEqual(pCurrentUsage, + pNewUsage, NVKMS_MAIN_LAYER)) || + /* If the base channel is active before flip then current and + * preflip base usage bounds should be same. */ + (pCurrentBaseSurf != NULL && + !nvEvoLayerUsageBoundsEqual(pCurrentUsage, + pPreFlipUsage, NVKMS_MAIN_LAYER)))) { + return FALSE; + } + + /* + * Poll for NO_METHOD_PENDING state if usage + * bounds of the channel are changed. + */ + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (!nvEvoLayerUsageBoundsEqual(pCurrentUsage, + pPreFlipUsage, layer) && + !nvEvoPollForNoMethodPending(pDevEvo, + sd, + pDevEvo->head[head].layer[layer], + &startTime, + timeout)) { + return FALSE; + } + } + } + } + + return TRUE; +} + +/*! + * Tasks need to perform before triggering flip, they all should be done here. + * + * If necessary, raise usage bounds and/or disable MidFrameAndDWCFWatermark + * (bug 200508242) in the core channel and do an IMP update. + * + * Note that this function only raises usage bounds and/or disables + * MidFrameAndDWCFWatermark, never lowers usage bounds and/or enables + * MidFrameAndDWCFWatermark. This allows it to run before queuing a flip even + * if there are still pending flips in a base channel. + */ +static void PreFlipIMP(NVDevEvoPtr pDevEvo, + const struct NvKmsFlipWorkArea *pWorkArea) +{ + NvU32 head, sd; + NVDispEvoPtr pDispEvo; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NVEvoUpdateState updateState = { }; + NvBool update = FALSE; + + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + const NVFlipEvoHwState *pNewState = + &pWorkArea->sd[sd].head[head].newState; + const struct NvKmsUsageBounds *pPreFlipUsage = + &pWorkArea->sd[sd].head[head].preFlipUsage; + struct NvKmsUsageBounds *pCurrentUsage = + &pDevEvo->gpus[sd].headState[head].usage; + + if (!UsageBoundsEqual(pCurrentUsage, pPreFlipUsage)) { + update |= nvSetUsageBoundsEvo(pDevEvo, sd, head, + pPreFlipUsage, &updateState); + } + + if (!pDevEvo->gpus[sd]. + headState[head].disableMidFrameAndDWCFWatermark && + pNewState->disableMidFrameAndDWCFWatermark) { + + nvEnableMidFrameAndDWCFWatermark(pDevEvo, + sd, + head, + FALSE /* enable */, + &updateState); + update = TRUE; + } + } + + if (update) { + nvDoIMPUpdateEvo(pDispEvo, &updateState); + } + } +} + +static void LowerDispBandwidth(void *dataPtr, NvU32 dataU32) +{ + NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP]; + struct NvKmsUsageBounds *guaranteedAndCurrent; + NVDevEvoPtr pDevEvo = dataPtr; + NVDispEvoPtr pDispEvo; + NvU32 head; + NvBool ret; + + guaranteedAndCurrent = + nvCalloc(1, sizeof(*guaranteedAndCurrent) * NVKMS_MAX_HEADS_PER_DISP); + if (guaranteedAndCurrent == NULL) { + nvAssert(guaranteedAndCurrent != NULL); + return; + } + + nvkms_memset(&timingsParams, 0, sizeof(timingsParams)); + + pDispEvo = pDevEvo->pDispEvo[0]; + + // SOC Display never has more than one disp + nvAssert(pDevEvo->nDispEvo == 1); + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + const struct NvKmsUsageBounds *pGuaranteed = + &pHeadState->timings.viewPort.guaranteedUsage; + const struct NvKmsUsageBounds *pCurrent = + &pDevEvo->gpus[0].headState[head].usage; + + if (pHeadState->activeRmId == 0) { + continue; + } + + timingsParams[head].pConnectorEvo = pHeadState->pConnectorEvo; + timingsParams[head].activeRmId = pHeadState->activeRmId; + timingsParams[head].pixelDepth = pHeadState->pixelDepth; + timingsParams[head].pTimings = &pHeadState->timings; + timingsParams[head].enableDsc = (pHeadState->dscInfo.type != + NV_DSC_INFO_EVO_TYPE_DISABLED); + timingsParams[head].dscSliceCount = pHeadState->dscInfo.sliceCount; + timingsParams[head].possibleDscSliceCountMask = + pHeadState->dscInfo.possibleSliceCountMask; + timingsParams[head].b2Heads1Or = + (pHeadState->mergeMode != NV_EVO_MERGE_MODE_DISABLED); + + nvUnionUsageBounds(pGuaranteed, pCurrent, &guaranteedAndCurrent[head]); + timingsParams[head].pUsage = &guaranteedAndCurrent[head]; + } + + ret = nvValidateImpOneDisp(pDispEvo, timingsParams, + FALSE /* requireBootClocks */, + NV_EVO_REALLOCATE_BANDWIDTH_MODE_POST, + NULL /* pMinIsoBandwidthKBPS */, + NULL /* pMinDramFloorKBPS */, + 0x0 /* changedHeadsMask */); + if (ret) { + for (head = 0; head < pDevEvo->numHeads; head++) { + pDevEvo->gpus[0].headState[head].preallocatedUsage = + pDevEvo->gpus[0].headState[head].usage; + } + } + + nvAssert(ret); + + nvFree(guaranteedAndCurrent); +} + +void nvCancelLowerDispBandwidthTimer(NVDevEvoPtr pDevEvo) +{ + nvkms_free_timer(pDevEvo->lowerDispBandwidthTimer); + pDevEvo->lowerDispBandwidthTimer = NULL; +} + +void nvScheduleLowerDispBandwidthTimer(NVDevEvoPtr pDevEvo) +{ + nvAssert(pDevEvo->isSOCDisplay); + + nvCancelLowerDispBandwidthTimer(pDevEvo); + + pDevEvo->lowerDispBandwidthTimer = + nvkms_alloc_timer(LowerDispBandwidth, + pDevEvo, + 0, /* dataU32 */ + 30000000 /* 30 seconds */); +} + +/*! + * Check whether the core, base, and overlay channels are idle (i.e. no methods + * pending in the corresponding pushbuffer) and lower the usage bounds if + * possible. + */ +static NvBool TryLoweringUsageBoundsOneHead(NVDevEvoPtr pDevEvo, NvU32 sd, + NvU32 head, + NVEvoUpdateState *updateState) +{ + const NVEvoSubDevHeadStateRec *pHeadState = + &pDevEvo->gpus[sd].headState[head]; + const struct NvKmsUsageBounds *pCurrent = &pHeadState->usage; + const struct NvKmsUsageBounds *pTarget = &pHeadState->targetUsage; + struct NvKmsUsageBounds newUsage = *pCurrent; + NvBool changed = FALSE; + NvBool scheduleLater = FALSE; + int i; + + for (i = 0; i < pDevEvo->head[head].numLayers; i++) { + if (pCurrent->layer[i].usable && !pTarget->layer[i].usable) { + NvBool isMethodPending; + + if (pDevEvo->hal->IsChannelMethodPending( + pDevEvo, + pDevEvo->head[head].layer[i], + sd, + &isMethodPending) && !isMethodPending) { + newUsage.layer[i] = pTarget->layer[i]; + changed = TRUE; + } else { + scheduleLater = TRUE; + } + } else if ((pCurrent->layer[i].usable && pTarget->layer[i].usable) && + ((pCurrent->layer[i].supportedSurfaceMemoryFormats != + pTarget->layer[i].supportedSurfaceMemoryFormats) || + (!nvEvoScalingUsageBoundsEqual(&pCurrent->layer[i].scaling, + &pTarget->layer[i].scaling)))) { + NvBool isMethodPending; + + if (pDevEvo->hal->IsChannelMethodPending( + pDevEvo, + pDevEvo->head[head].layer[i], + sd, + &isMethodPending) && !isMethodPending) { + newUsage.layer[i] = pTarget->layer[i]; + changed = TRUE; + } else { + scheduleLater = TRUE; + } + } + } + + if (scheduleLater) { + SchedulePostFlipIMPTimer(pDevEvo); + } + + if (changed) { + changed = nvSetUsageBoundsEvo(pDevEvo, sd, head, &newUsage, + updateState); + } + + return changed; +} + +static NvBool +TryEnablingMidFrameAndDWCFWatermarkOneHead(NVDevEvoPtr pDevEvo, + NvU32 sd, + NvU32 head, + NVEvoUpdateState *updateState) +{ + const NVEvoSubDevHeadStateRec *pHeadState = + &pDevEvo->gpus[sd].headState[head]; + NvBool changed = FALSE; + + if (pHeadState->disableMidFrameAndDWCFWatermark && + !pHeadState->targetDisableMidFrameAndDWCFWatermark) { + + NvBool isIdle; + + if (pDevEvo->hal->IsChannelIdle(pDevEvo, + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER], + sd, + &isIdle) && isIdle) { + nvEnableMidFrameAndDWCFWatermark(pDevEvo, + sd, + head, + TRUE /* enable */, + updateState); + changed = TRUE; + } else { + // Schedule another timer to try again later. + SchedulePostFlipIMPTimer(pDevEvo); + } + } + + return changed; +} + +static void +TryToDoPostFlipIMP(void *dataPtr, NvU32 dataU32) +{ + NVDevEvoPtr pDevEvo = dataPtr; + NvU32 head, sd; + NVDispEvoPtr pDispEvo; + + pDevEvo->postFlipIMPTimer = NULL; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NVEvoUpdateState updateState = { }; + NvBool update = FALSE; + + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + if (TryLoweringUsageBoundsOneHead(pDevEvo, sd, head, + &updateState)) { + update = TRUE; + } + + if (TryEnablingMidFrameAndDWCFWatermarkOneHead( + pDevEvo, + sd, + head, + &updateState)) { + update = TRUE; + } + } + + if (update) { + nvDoIMPUpdateEvo(pDispEvo, &updateState); + } + } +} + +static void SchedulePostFlipIMPTimer(NVDevEvoPtr pDevEvo) +{ + if (!pDevEvo->postFlipIMPTimer) { + pDevEvo->postFlipIMPTimer = + nvkms_alloc_timer( + TryToDoPostFlipIMP, + pDevEvo, + 0, /* dataU32 */ + 10000000 /* 10 seconds */); + } +} + +void nvEvoCancelPostFlipIMPTimer(NVDevEvoPtr pDevEvo) +{ + nvkms_free_timer(pDevEvo->postFlipIMPTimer); + pDevEvo->postFlipIMPTimer = NULL; +} + +/*! + * If necessary, schedule a timer to see if usage bounds can be lowered. + */ +static void SchedulePostFlipIMP(NVDevEvoPtr pDevEvo) +{ + NvU32 head, sd; + NVDispEvoPtr pDispEvo; + + // If a timer is already scheduled, do nothing. + if (pDevEvo->postFlipIMPTimer) { + return; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + const NVEvoSubDevHeadStateRec *pHeadState = + &pDevEvo->gpus[sd].headState[head]; + + if (!UsageBoundsEqual(&pHeadState->usage, + &pHeadState->targetUsage) || + (pHeadState->disableMidFrameAndDWCFWatermark != + pHeadState->targetDisableMidFrameAndDWCFWatermark)) { + + SchedulePostFlipIMPTimer(pDevEvo); + return; + } + } + } +} + +static void SkipLayerPendingFlips(NVDevEvoRec *pDevEvo, + const NvBool trashPendingMethods, + const NvBool unblockMethodsInExecutation, + struct NvKmsFlipWorkArea *pWorkArea) +{ + NvU64 startTime = 0; + const NvU32 timeout = 2000000; /* 2 seconds */ + struct { + struct { + struct { + NvU32 oldAccelMask; + } head[NVKMS_MAX_HEADS_PER_DISP]; + } sd[NVKMS_MAX_SUBDEVICES]; + } accelState = { }; + NvU32 sd, head; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (!pWorkArea->sd[sd].changed) { + continue; + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + const NVFlipEvoHwState *pFlipState = + &pWorkArea->sd[sd].head[head].newState; + + if (!pFlipState->skipLayerPendingFlips[NVKMS_MAIN_LAYER]|| + !pFlipState->dirty.layer[NVKMS_MAIN_LAYER]) { + continue; + } + + pDevEvo->hal->AccelerateChannel( + pDevEvo, + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER], + sd, + trashPendingMethods, + unblockMethodsInExecutation, + &accelState.sd[sd].head[head].oldAccelMask); + } + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (!pWorkArea->sd[sd].changed) { + continue; + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + const NVFlipEvoHwState *pFlipState = + &pWorkArea->sd[sd].head[head].newState; + + if (!pFlipState->skipLayerPendingFlips[NVKMS_MAIN_LAYER] || + !pFlipState->dirty.layer[NVKMS_MAIN_LAYER]) { + continue; + } + + if (unblockMethodsInExecutation) { + if (!nvEvoPollForNoMethodPending(pDevEvo, + sd, + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER], + &startTime, + timeout)) { + nvAssert(!"Failed to idle the main layer channel"); + } + } else { + if (!nvEvoPollForEmptyChannel(pDevEvo->head[head].layer[NVKMS_MAIN_LAYER], + sd, + &startTime, + timeout)) { + nvAssert(!"Failed to empty the main layer channel"); + } + } + + pDevEvo->hal->ResetChannelAccelerators( + pDevEvo, + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER], + sd, + trashPendingMethods, + unblockMethodsInExecutation, + accelState.sd[sd].head[head].oldAccelMask); + } + } +} + +void nvPreFlip(NVDevEvoRec *pDevEvo, + struct NvKmsFlipWorkArea *pWorkArea, + const NvU32 applyAllowVrrApiHeadMasks[NVKMS_MAX_SUBDEVICES], + const NvU32 allowVrrApiHeadMasks[NVKMS_MAX_SUBDEVICES], + const NvBool skipUpdate) +{ + NvU32 sd, head; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + + if (!pWorkArea->sd[sd].changed) { + continue; + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + // Increase refCnt of surfaces used AFTER flip + nvUpdateSurfacesFlipRefCount( + pDevEvo, + head, + &pWorkArea->sd[sd].head[head].newState, + NV_TRUE); + } + } + + PreFlipIMP(pDevEvo, pWorkArea); + + if (!skipUpdate) { + /* Trash flips pending in channel which are not yet in execution */ + SkipLayerPendingFlips(pDevEvo, TRUE /* trashPendingMethods */, + FALSE /* unblockMethodsInExecutation */, + pWorkArea); + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (applyAllowVrrApiHeadMasks[sd] > 0){ + // Applying allowVrrApiHeadMask to at least one apiHead + nvSetVrrActive(pDevEvo, applyAllowVrrApiHeadMasks, allowVrrApiHeadMasks); + break; + } + } +} + +void nvPostFlip(NVDevEvoRec *pDevEvo, + struct NvKmsFlipWorkArea *pWorkArea, + const NvBool skipUpdate, + const NvU32 applyAllowVrrApiHeadMasks[NVKMS_MAX_SUBDEVICES], + NvS32 *pVrrSemaphoreIndex) +{ + NvU32 sd, head; + + if (!skipUpdate) { + /* Unblock flips which are stuck in execution */ + SkipLayerPendingFlips(pDevEvo, FALSE /* trashPendingMethods */, + TRUE /* unblockMethodsInExecutation */, + pWorkArea); + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (applyAllowVrrApiHeadMasks[sd] > 0) { + *pVrrSemaphoreIndex = nvIncVrrSemaphoreIndex(pDevEvo, applyAllowVrrApiHeadMasks); + break; + } + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (!pWorkArea->sd[sd].changed) { + continue; + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + // Decrease refCnt of surfaces used BEFORE the flip + nvUpdateSurfacesFlipRefCount( + pDevEvo, + head, + &pWorkArea->sd[sd].head[head].oldState, + NV_FALSE); + } + } + + if (!skipUpdate) { + // Note that usage bounds are not lowered here, because the flip + // queued by this function may not occur until later. Instead, schedule + // a timer for later to check if the usage bounds can be lowered. + SchedulePostFlipIMP(pDevEvo); + + pDevEvo->skipConsoleRestore = FALSE; + } +} + +static NvBool AllocPreSyncpt(NVDevEvoRec *pDevEvo, + NVEvoChannel *pChannel, const NvU32 id) +{ + NVSurfaceDescriptor surfaceDesc; + + /*! use id value to check the global table */ + if (!pDevEvo->preSyncptTable[id].allocated) { + /*! Register - allocate and bind surface descriptor for syncpt*/ + if (!nvRmEvoAllocAndBindSyncpt(pDevEvo, + pChannel, + id, + &surfaceDesc, + &pDevEvo->preSyncptTable[id])) { + nvAssert(!"Failed to register pre-syncpt"); + return FALSE; + } + + /*! Fill the Entry in Global Table */ + pDevEvo->preSyncptTable[id].channelMask |= pChannel->channelMask; + } else { + /*! + * syncpt found, just bind the surface descriptor of this syncpt + * to the window if it is not already. + */ + if ((pDevEvo->preSyncptTable[id].channelMask & + pChannel->channelMask) == 0) { + + NvU32 ret = + pDevEvo->hal->BindSurfaceDescriptor(pDevEvo, + pChannel, &pDevEvo->preSyncptTable[id].surfaceDesc); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to bind surface descriptor for pre-syncpt"); + } + + pDevEvo->preSyncptTable[id].channelMask |= pChannel->channelMask; + /*! hSyncpt already allocated for id*/ + } + } + + return TRUE; +} + +static NvBool RegisterPreSyncpt(NVDevEvoRec *pDevEvo, + struct NvKmsFlipWorkArea *pWorkArea) +{ + NvU32 sd; + NvU32 ret = TRUE; + const NVDispEvoRec *pDispEvo; + + pDevEvo->pAllSyncptUsedInCurrentFlip = + nvCalloc(1, sizeof(NvBool) * NV_SYNCPT_GLOBAL_TABLE_LENGTH); + if (pDevEvo->pAllSyncptUsedInCurrentFlip == NULL) { + ret = FALSE; + goto done; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 head; + for (head = 0; head < ARRAY_LEN(pWorkArea->sd[sd].head); head++) { + NVFlipEvoHwState *pFlipState = + &pWorkArea->sd[sd].head[head].newState; + NvU32 layer; + + for (layer = 0; layer < ARRAY_LEN(pFlipState->layer); layer++) { + NVFlipSyncObjectEvoHwState *pFlipSyncObject = + &pFlipState->layer[layer].syncObject; + NvU32 preSyncpt = pFlipSyncObject->u.syncpts.preSyncpt; + + if (!pFlipState->dirty.layerSyncObjects[layer] || + !pFlipSyncObject->usingSyncpt || + !pFlipSyncObject->u.syncpts.isPreSyncptSpecified) { + continue; + } + + if (!AllocPreSyncpt(pDevEvo, pDevEvo->head[head].layer[layer], + preSyncpt)) { + ret = FALSE; + goto done; + } + + pDevEvo->pAllSyncptUsedInCurrentFlip[preSyncpt] = NV_TRUE; + } + } + } + +done: + nvFree(pDevEvo->pAllSyncptUsedInCurrentFlip); + pDevEvo->pAllSyncptUsedInCurrentFlip = NULL; + + return ret; +} + +NvBool nvPrepareToDoPreFlip(NVDevEvoRec *pDevEvo, + struct NvKmsFlipWorkArea *pWorkArea) +{ + if (!RegisterPreSyncpt(pDevEvo, pWorkArea)) { + return FALSE; + } + + if (!PrepareToDoPreFlipIMP(pDevEvo, pWorkArea)) { + return FALSE; + } + + return TRUE; +} + +NvBool nvAssignNVFlipEvoHwState(NVDevEvoRec *pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const NvU32 sd, + const NvU32 head, + const struct NvKmsFlipCommonParams *pParams, + const NvBool allowVrr, + NVFlipEvoHwState *pFlipHwState) +{ + const NVDispEvoRec *pDispEvo = pDevEvo->gpus[sd].pDispEvo; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const struct NvKmsUsageBounds *pPossibleUsage = + &pHeadState->timings.viewPort.possibleUsage; + + if (!nvUpdateFlipEvoHwState(pOpenDev, pDevEvo, sd, head, pParams, + &pHeadState->timings, + pHeadState->mergeHeadSection, pFlipHwState, + allowVrr)) { + return FALSE; + } + + nvOverrideScalingUsageBounds(pDevEvo, head, pFlipHwState, pPossibleUsage); + + if (!nvValidateFlipEvoHwState(pDevEvo, head, &pHeadState->timings, + pFlipHwState)) { + return FALSE; + } + + return TRUE; +} + +/*! + * Wait for idle on a set of the main layer channels. + * + * \param[in,out] pDevEvo The device. + * \param[in] idleChannelMaskPerSd The channel masks per subdevice that + * we should wait to be idle. + * \param[in] allowForceIdle Whether we should force idle a channel + * or just assert if the idle times out. + */ +void nvIdleMainLayerChannels( + NVDevEvoPtr pDevEvo, + const NVEvoChannelMask *idleChannelMaskPerSd, + NvBool allowStopBase) +{ + NvU64 startTime = 0; + NvBool allChannelsIdle = FALSE; + NVDispEvoPtr pDispEvo; + NvU32 dispIndex, head; + NVEvoChannelMask busyChannelMaskPerSd[NVKMS_MAX_SUBDEVICES] = { }; + + /* + * Wait up to 2 seconds for all channels to be idle, and gather a list of + * all busy channels. + */ + while (!allChannelsIdle) { + + const NvU32 timeout = 2000000; /* 2 seconds */ + NvBool anyChannelBusy = FALSE; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + for (head = 0; head < pDevEvo->numHeads; head++) { + NVEvoChannelPtr pMainLayerChannel = + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER]; + if (idleChannelMaskPerSd[pDispEvo->displayOwner] & + pMainLayerChannel->channelMask) { + + NvBool isMethodPending = FALSE; + if (!pDevEvo->hal->IsChannelMethodPending( + pDevEvo, + pMainLayerChannel, + pDispEvo->displayOwner, + &isMethodPending) + || isMethodPending) { + + /* Mark this channel as busy. */ + busyChannelMaskPerSd[pDispEvo->displayOwner] |= + pMainLayerChannel->channelMask; + anyChannelBusy = TRUE; + } else { + /* + * Mark this channel as no longer busy, in case its + * flip completed while we were waiting on another + * channel. + */ + busyChannelMaskPerSd[pDispEvo->displayOwner] &= + ~pMainLayerChannel->channelMask; + } + } + } + } + + if (!anyChannelBusy) { + allChannelsIdle = TRUE; + break; + } + + /* Break out of the loop if we exceed the timeout. */ + if (nvExceedsTimeoutUSec(pDevEvo, &startTime, timeout)) { + break; + } + + nvkms_yield(); + } + + if (!allChannelsIdle) { + /* + * At least one channel was still idle after the 2 second timeout + * above. + */ + if (!allowStopBase) { + /* + * The caller of this function expected this wait for idle not to + * time out. + */ + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Timeout while waiting for idle."); + } else { + /* + * Idle all base channels that were still busy when the wait above + * timed out. + */ + NVEvoIdleChannelState idleChannelState = { }; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + idleChannelState.subdev[pDispEvo->displayOwner].channelMask = + busyChannelMaskPerSd[pDispEvo->displayOwner]; + } + + pDevEvo->hal->ForceIdleSatelliteChannelIgnoreLock( + pDevEvo, &idleChannelState); + } + } +} + +NvBool nvNeedToToggleFlipLock(const NVDispEvoRec *pDispEvo, + const NvU32 head, const NvBool enable) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + const NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + NvBool needToToggle = FALSE; + + if (!enable && pHC->flipLock) { + /* + * This channel is currently using fliplock in the config that + * is being torn down; idle its base channel and disable + * fliplock. + */ + needToToggle = TRUE; + } + + if (enable && ((pHC->serverLock != NV_EVO_NO_LOCK) || + (pHC->clientLock != NV_EVO_NO_LOCK))) { + /* + * This channel will be using fliplock for swap groups in the + * new config; idle its base channel and enable fliplock. + */ + nvAssert(!HEAD_MASK_QUERY(pEvoSubDev->flipLockProhibitedHeadMask, + head)); + needToToggle = TRUE; + } + + return needToToggle; +} + +void nvToggleFlipLockPerDisp(NVDispEvoRec *pDispEvo, const NvU32 headMask, + const NvBool enable) +{ + NvU32 head; + NVEvoUpdateState updateState = { }; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + for (head = 0; head < pDevEvo->numHeads; head++) { + if ((headMask & NVBIT(head)) != 0x0) { + NvU32 setEnable = enable; + + if (!nvUpdateFlipLockEvoOneHead(pDispEvo, head, &setEnable, + TRUE /* set */, + &updateState)) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Failed to toggle fliplock for swapgroups."); + } + } + } + + if (!nvIsUpdateStateEmpty(pDevEvo, &updateState)) { + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, + TRUE /* releaseElv */); + } +} diff --git a/src/nvidia-modeset/src/nvkms-hw-states.c b/src/nvidia-modeset/src/nvkms-hw-states.c new file mode 100644 index 0000000..aed6fdb --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-hw-states.c @@ -0,0 +1,1139 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * nvkms-hw-states.c - Defines how to set up EVO hardware for the given usage. + * Used by the EVO state machines in nv_evo_states.c. + */ + +#include "nvkms-types.h" +#include "nvkms-framelock.h" +#include "nvkms-evo-states.h" + +/* + * Listed below are the different locking topologies for scan lock + * + * ------ (raster lock) + * ====== (frame lock) + * + * Config NoLock: No locking relationship between the heads + * + * +--------+ +------------+ + * | Head A | | Heads B... | + * +--------+ +------------+ + * + * + * Config LockHeads: Supports raster lock across two or more heads. + * + * +--------+ +--------+ + * | Head A | -- Internal -+--> | Head B | + * +--------+ | +--------+ + * | + * | +--------+ + * +--> | Head C | + * | +--------+ + * . ... + * + * Config FrameLockClient: Supports frame lock clients across GPUs/systems, + * one head per GPU + * + * +--------+ +-------------+ + * | Gsync | ==============> | Head A/B... | + * +--------+ +-------------+ + * ^ | + * +-------- External ----------+ + * + * + * Config FrameLockServer: Same as above, but generates timing for the + * frame lock network + * + * +--------+ +-------------+ + * | Gsync | | Head A/B... | + * +--------+ +-------------+ + * ^ | + * +-------------- External ---+ + * + * + * Config FrameLockClientManyHeads: Support frame lock across GPUs/systems, + * two or more heads per GPU + * + * +-------- External ---------------------------+ + * V | + * +--------+ +--------+ +--------+ + * | Gsync | =====> | Head A | == Internal =+==> | Head B | + * +--------+ +--------+ | +--------+ + * | + * | +--------+ + * +==> | Head C | + * | +--------+ + * . ... + * + * Config FrameLockServerManyHeads: Same as above, only this head + * is driving timing for the frame lock network. + * + * +-------- External ---------------------------+ + * V | + * +--------+ +--------+ +--------+ + * | Gsync | | Head A | == Internal =+==> | Head B | + * +--------+ +--------+ | +--------+ + * | + * | +--------+ + * +==> | Head C | + * | +--------+ + * . ... + * + * Config LockHeadsFrameLockClient: Frame lock enabled on one head of a + * GPU where two or more heads are raster-locked. + * Config LockHeadsFrameLockClientManyHeads: Same, but two or more heads are + * enabled. + * + * +-------- External ---------------------------+ + * V | + * +--------+ +--------+ +--------+ + * | Gsync | =====> | Head A | -- Internal -+--> | Head B | + * +--------+ +--------+ | +--------+ + * | + * | +--------+ + * +--> | Head C | + * | +--------+ + * . ... + * + * Config LockHeadsFrameLockServer: Frame lock enabled on one head of a GPU + * where two or more heads are raster-locked, and this head is driving timing + * for the frame lock network. + * Config LockHeadsFrameLockServerManyHeads: Same, but one head is frame + * lock server and the others are frame lock clients. + * + * +-------- External ---------------------------+ + * V | + * +--------+ +--------+ +--------+ + * | Gsync | | Head A | -- Internal -+--> | Head B | + * +--------+ +--------+ | +--------+ + * | + * | +--------+ + * +--> | Head C | + * | +--------+ + * . ... + * + * Configs SliPrimary, SliSecondary, SliLastSecondary: Supports SLI. + * + * +-----------------+ + * +--- | Head A, subdev0 | + * | +-----------------+ + * External + * | +-----------------+ + * +--> | Head A, subdev1 | + * | +-----------------+ + * | +-----------------+ + * +--> | Head A, subdev2 | + * | +-----------------+ + * . ... + * + * + * Config LockHeadsSli{Primary,Secondary,LastSecondary}: Supports SLI with two + * or more heads rasterlocked (primary or any secondary, independently). + * + * +-----------------+ +---------------------+ + * +--- | Head A, subdev0 | -- Internal --> | Heads B..., subdev0 | + * | +-----------------+ +---------------------+ + * External + * | +-----------------+ +---------------------+ + * +--> | Head A, subdev1 | -- Internal --> | Heads B..., subdev1 | + * | +-----------------+ +---------------------+ + * | +-----------------+ + * +--> | Head A, subdev2 | + * | +-----------------+ + * . ... + * + * The SliSecondary states also come in a FrameLockClient variant; this means that + * they have framelock enabled in the RM (for reporting purposes; they still + * get their sync from the SLI primary). + * + * + * Config SliPrimaryFrameLockClient: Supports frame lock across GPU + * groups/systems with SLI + * + * + * +===============================+ + * I V + * +-------+ +-----------------+ + * | Gsync | <-----------+--- | Head A, subdev0 | + * +-------+ | +-----------------+ + * External + * | +-----------------+ + * +--> | Head A, subdev1 | + * | +-----------------+ + * | +-----------------+ + * +--> | Head A, subdev2 | + * | +-----------------+ + * . ... + * + * + * Config SliPrimaryFrameLockServer: Same as above, only this SLI head drives + * timing for the frame lock network. + * + * +-------+ +-----------------+ + * | Gsync | <-----------+--- | Head A, subdev0 | + * +-------+ | +-----------------+ + * External + * | +-----------------+ + * +--> | Head A, subdev1 | + * | +-----------------+ + * | +-----------------+ + * +--> | Head A, subdev2 | + * | +-----------------+ + * . ... + * + * + * Config SliPrimaryLockHeadsFrameLockClient: Supports frame lock across GPU + * groups/systems with SLI, with two or more heads on a GPU rasterlocked + * together. + * + * + * +======================+ + * I V + * +-------+ +-----------------+ +---------------------+ + * | Gsync | <--+--- | Head A, subdev0 | -- Internal --> | Heads B..., subdev0 | + * +-------+ | +-----------------+ +---------------------+ + * External + * | +-----------------+ +---------------------+ + * +--> | Head A, subdev1 | -- Internal --> | Heads B..., subdev1 | + * | +-----------------+ +---------------------+ + * | +-----------------+ + * +--> | Head A, subdev2 | + * | +-----------------+ + * . ... + * + * + * Config SliPrimaryLockHeadsFrameLockServer: Same as above, only this SLI head + * drives timing for the frame lock network. + * + * +-------+ +-----------------+ +---------------------+ + * | Gsync | <--+--- | Head A, subdev0 | -- Internal --> | Heads B..., subdev0 | + * +-------+ | +-----------------+ +---------------------+ + * External + * | +-----------------+ +---------------------+ + * +--> | Head A, subdev1 | -- Internal --> | Heads B..., subdev1 | + * | +-----------------+ +---------------------+ + * | +-----------------+ + * +--> | Head A, subdev2 | + * | +-----------------+ + * . ... + * + * + * Note that for the SLI and framelock topologies we set the external fliplock + * pin. Changing the pin causes a raster reset for some reason, so we want to + * change the pin here, prior to enabling flip lock. + */ + +NvBool nvEvoLockHWStateNoLock(NVDispEvoPtr pDispEvo, NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + unsigned int i; + + nvAssert(pHeads != NULL && pHeads[0] != NV_INVALID_HEAD); + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (pHC->mergeMode) { + /* MergeMode is currently handled outside of the scanlock state + * machine, so don't touch the HeadControl state for heads with + * mergeMode enabled. (The state machine will be transitioned to + * the 'ProhibitLock' state to prevent other states from being + * reached, so this should be the only HWState function that needs + * this special case.) */ + continue; + } + + /* Disable scan lock on this head */ + pHC->serverLock = NV_EVO_NO_LOCK; + pHC->serverLockPin = NV_EVO_LOCK_PIN_INTERNAL(0); + pHC->clientLock = NV_EVO_NO_LOCK; + pHC->clientLockPin = NV_EVO_LOCK_PIN_INTERNAL(0); + pHC->clientLockoutWindow = 0; + pHC->setLockOffsetX = FALSE; + pHC->useStallLockPin = FALSE; + pHC->stallLockPin = NV_EVO_LOCK_PIN_INTERNAL(0); + pHC->crashLockUnstallMode = FALSE; + + /* Reset the flip lock pin to internal, if not needed for SLI */ + if (!HEAD_MASK_QUERY(pEvoSubDev->flipLockPinSetForSliHeadMask, head)) { + pHC->flipLockPin = NV_EVO_LOCK_PIN_INTERNAL(0); + } + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_UNSET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + + /* Disable framelock */ + pEvoSubDev->frameLockServerMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockClientMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockExtRefClkMaskAssy &= ~(1 << head); + + /* Reset SLI state */ + pEvoSubDev->sliRasterLockServerMask &= ~(1 << head); + pEvoSubDev->sliRasterLockClientMask &= ~(1 << head); + + pHC->lockChainPosition = 0; + } + + pEvoSubDev->frameLockHouseSync = FALSE; + + return TRUE; +} + +NvBool nvEvoLockHWStateLockHeads(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + unsigned int i, serverHead = 0; + + nvAssert(pHeads != NULL && + pHeads[0] != NV_INVALID_HEAD && + pHeads[1] != NV_INVALID_HEAD); + + /* First, disable all scan locking */ + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + /* Make the first head a raster lock server on the internal pin */ + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = NV_EVO_LOCK_PIN_INTERNAL(head); + serverHead = head; + } else { + /* Make all the other heads raster lock clients on the internal pin */ + pHC->clientLock = NV_EVO_RASTER_LOCK; + pHC->clientLockPin = NV_EVO_LOCK_PIN_INTERNAL(serverHead); + pHC->clientLockoutWindow = 2; + } + } + + return TRUE; +} + +NvBool nvEvoLockHWStateFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) + +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FRAME_LOCK); + + if (pin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + /* Set up for the FRAME_LOCK_SERVER state */ + if (!nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + nvAssert(pHeads != NULL && pHeads[0] != NV_INVALID_HEAD); + + /* Additionally enable the first head as a frame lock client */ + const int head = pHeads[0]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + pHC->clientLock = NV_EVO_FRAME_LOCK; + pHC->clientLockPin = pin; + + pEvoSubDev->frameLockServerMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + + return TRUE; +} + +NvBool nvEvoLockHWStateFrameLockServer(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_RASTER_LOCK); + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + + if (pin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + /* disable all scan locking */ + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + nvAssert(pHeads != NULL && pHeads[0] != NV_INVALID_HEAD); + + /* Enable the first head as a raster lock server on the external pin */ + const int head = pHeads[0]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = pin; + + /* Set up the first head to use the external flip lock pin */ + pHC->flipLockPin = flPin; + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + pEvoSubDev->frameLockExtRefClkMaskAssy |= 1 << head; + + return TRUE; +} + +NvBool nvEvoLockHWStateFrameLockServerHouseSync(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + /* As far as EVO is concerned, House Sync means FL client */ + if (!nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + const int head = pHeads[0]; + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + pEvoSubDev->frameLockClientMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockHouseSync = TRUE; + + return TRUE; +} + +NvBool nvEvoLockHWStateFrameLockClientManyHeads(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FRAME_LOCK); + + if (pin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + /* Set up as a frame lock server with two heads */ + if (!nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + /* Additionally enable the first head as a frame lock client */ + const int head = pHeads[0]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + pHC->clientLock = NV_EVO_FRAME_LOCK; + pHC->clientLockPin = pin; + + pEvoSubDev->frameLockServerMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + + return TRUE; +} + +NvBool nvEvoLockHWStateFrameLockServerManyHeads(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_RASTER_LOCK); + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + unsigned int i, serverHead = 0; + + if (pin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + /* Disable all scan lock */ + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + /* Make the first head a frame lock server on the internal pin. + * The first head is guaranteed to be framelock server or one of + * the requested framelock clients here + */ + nvAssert(nvIsFramelockableHead(pDispEvo, head)); + + pHC->serverLock = NV_EVO_FRAME_LOCK; + pHC->serverLockPin = NV_EVO_LOCK_PIN_INTERNAL(head); + serverHead = head; + + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + } else { + /* If two or more heads are framelocked, but at least one head + * cannot be framelocked with the others, that head will be in + * the list of pDpys, but must not be framelocked, so skip it. + */ + + if (!nvIsFramelockableHead(pDispEvo, head)) { + continue; + } + if (i == 1) { + /* Make the second head a raster lock server on the external pin */ + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = pin; + } + /* Make all nonzero heads a frame lock client on the internal pin */ + pHC->clientLock = NV_EVO_FRAME_LOCK; + pHC->clientLockPin = NV_EVO_LOCK_PIN_INTERNAL(serverHead); + + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + } + + /* Set up all heads to use the external flip lock pin */ + pHC->flipLockPin = flPin; + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + + pEvoSubDev->frameLockExtRefClkMaskAssy |= 1 << head; + } + + return TRUE; +} +NvBool nvEvoLockHWStateFrameLockServerHouseSyncManyHeads(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + /* As far as EVO is concerned, House Sync means FL client */ + if (!nvEvoLockHWStateFrameLockClientManyHeads(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + const int head = pHeads[0]; + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + pEvoSubDev->frameLockClientMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockHouseSync = TRUE; + + return TRUE; +} + +NvBool nvEvoLockHWStateLockHeadsFrameLockServer(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_RASTER_LOCK); + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + unsigned int i, serverHead = 0; + + if (pin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + /* Disable all scan lock */ + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + /* Make the first head a raster lock server on the internal pin */ + if (i == 0) { + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = NV_EVO_LOCK_PIN_INTERNAL(head); + serverHead = head; + + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + } else { + if (i == 1) { + /* Make the second head a raster lock server on the external pin */ + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = pin; + } + + /* Make all nonzero heads raster lock clients on the internal pin */ + pHC->clientLock = NV_EVO_RASTER_LOCK; + pHC->clientLockPin = NV_EVO_LOCK_PIN_INTERNAL(serverHead); + pHC->clientLockoutWindow = 2; + + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + } + + /* Set up all heads to use the external flip lock pin */ + pHC->flipLockPin = flPin; + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + + pEvoSubDev->frameLockExtRefClkMaskAssy |= 1 << head; + } + + return TRUE; +} + +NvBool nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + /* As far as EVO is concerned, House Sync means FL client */ + if (!nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + const int head = pHeads[0]; + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + pEvoSubDev->frameLockClientMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockHouseSync = TRUE; + + return TRUE; +} + +NvBool nvEvoLockHWStateLockHeadsFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FRAME_LOCK); + + if (pin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + /* Set up for the LOCK_HEADS_FRAME_LOCK_SERVER state */ + if (!nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + /* Additionally, enable the first head as a frame lock client */ + const int head = pHeads[0]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + pHC->clientLock = NV_EVO_FRAME_LOCK; + pHC->clientLockPin = pin; + + pEvoSubDev->frameLockServerMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + + return TRUE; +} + +static void SetLockChainPosition(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoHeadControlPtr pHC) +{ + if (pDispEvo->displayOwner == pEvoSubDev->subDeviceInstance) { + /* + * When we own display (even if subDeviceInstance != 0), set + * lockChainPosition of 0, since we are actually scanning out pixels + * (this is the case for all SLI Mosaic and non-Mosaic display owners). + */ + pHC->lockChainPosition = 0; + } else { + /* + * If we don't own display, just assume the video bridge chain is + * linear + */ + pHC->lockChainPosition = pEvoSubDev->subDeviceInstance; + } +} + +NvBool nvEvoLockHWStateSliPrimary(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = pEvoSubDev->sliServerLockPin; + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + + if (pin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + nvAssert(pHeads[0] != NV_INVALID_HEAD); + nvAssert(pHeads[1] == NV_INVALID_HEAD); + + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + const int head = pHeads[0]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = pin; + + pEvoSubDev->sliRasterLockServerMask |= 1 << head; + + SetLockChainPosition(pDispEvo, pEvoSubDev, pHC); + + return TRUE; +} + +NvBool nvEvoLockHWStateSliPrimaryLockHeads(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = pEvoSubDev->sliServerLockPin; + unsigned int i; + + if (pin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + /* LockHeads sets up server lock on the first head, client lock on the rest */ + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + + nvAssert(pHeads != NULL && + pHeads[0] != NV_INVALID_HEAD && + pHeads[1] != NV_INVALID_HEAD); + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + /* + * The first head is configured as rasterlock server on its + * internal pin. It serves as the server for everything else on + * this GPU, as well as (indirectly though another head) everything + * in the SLI group. + */ + pEvoSubDev->sliRasterLockServerMask |= 1 << head; + } else { + if (i == 1) { + /* + * The first rasterlock client on this GPU also serves as server + * for the rest of the SLI device + */ + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = pin; + } + + /* All of these heads should inherit extrefclk from the server */ + pEvoSubDev->sliRasterLockClientMask |= 1 << head; + } + + SetLockChainPosition(pDispEvo, pEvoSubDev, pHC); + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliSecondary(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin serverPin = pEvoSubDev->sliServerLockPin; + NVEvoLockPin clientPin = pEvoSubDev->sliClientLockPin; + const NvU32 clientLockoutWindow = 2; + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + + if (clientPin == NV_EVO_LOCK_PIN_ERROR || + serverPin == NV_EVO_LOCK_PIN_ERROR || + flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + nvAssert(pHeads[0] != NV_INVALID_HEAD); + nvAssert(pHeads[1] == NV_INVALID_HEAD); + + const int head = pHeads[0]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + /* Server lock to be consumed by GPUs further down the chain */ + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = serverPin; + + /* Client lock to sync to GPUs further up the chain */ + pHC->clientLock = NV_EVO_RASTER_LOCK; + pHC->clientLockPin = clientPin; + pHC->clientLockoutWindow = clientLockoutWindow; + + pEvoSubDev->sliRasterLockClientMask |= 1 << head; + + SetLockChainPosition(pDispEvo, pEvoSubDev, pHC); + + return TRUE; +} + +NvBool nvEvoLockHWStateSliSecondaryFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + if (!nvEvoLockHWStateSliSecondary(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + nvAssert(pHeads[0] != NV_INVALID_HEAD); + nvAssert(pHeads[1] == NV_INVALID_HEAD); + + pEvoSubDev->frameLockClientMaskAssy |= 1 << pHeads[0]; + + return TRUE; +} + +NvBool nvEvoLockHWStateSliLastSecondary(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin clientPin = pEvoSubDev->sliClientLockPin; + const NvU32 clientLockoutWindow = 2; + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + + if (clientPin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + nvAssert(pHeads[0] != NV_INVALID_HEAD); + nvAssert(pHeads[1] == NV_INVALID_HEAD); + + const int head = pHeads[0]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + /* Only set up client lock; no more GPUs to consume server lock */ + pHC->clientLock = NV_EVO_RASTER_LOCK; + pHC->clientLockPin = clientPin; + pHC->clientLockoutWindow = clientLockoutWindow; + + pEvoSubDev->sliRasterLockClientMask |= 1 << head; + + SetLockChainPosition(pDispEvo, pEvoSubDev, pHC); + + return TRUE; +} + +NvBool nvEvoLockHWStateSliLastSecondaryFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + if (!nvEvoLockHWStateSliLastSecondary(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + nvAssert(pHeads[0] != NV_INVALID_HEAD); + nvAssert(pHeads[1] == NV_INVALID_HEAD); + + const int head = pHeads[0]; + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + + return TRUE; +} + +NvBool nvEvoLockHWStateSliSecondaryLockHeads(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin serverPin = pEvoSubDev->sliServerLockPin; + NVEvoLockPin clientPin = pEvoSubDev->sliClientLockPin; + const NvU32 clientLockoutWindow = 2; + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + unsigned int i; + + if (clientPin == NV_EVO_LOCK_PIN_ERROR || serverPin == NV_EVO_LOCK_PIN_ERROR || + flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + /* + * first head (chosen arbitrarily): server lock to be consumed by + * GPUs further down the chain + */ + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = serverPin; + } + + /* + * Client lock all heads to the external SLI pin. Note that we cannot + * client lock one head and set up internal locking for the other + * because of bug 405996. + */ + pHC->clientLock = NV_EVO_RASTER_LOCK; + pHC->clientLockPin = clientPin; + pHC->clientLockoutWindow = clientLockoutWindow; + + pEvoSubDev->sliRasterLockClientMask |= 1 << head; + + SetLockChainPosition(pDispEvo, pEvoSubDev, pHC); + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliSecondaryLockHeadsFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + unsigned int i; + + if (!nvEvoLockHWStateSliSecondaryLockHeads(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + pEvoSubDev->frameLockClientMaskAssy |= 1 << pHeads[i]; + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliLastSecondaryLockHeads(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin clientPin = pEvoSubDev->sliClientLockPin; + const NvU32 clientLockoutWindow = 2; + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + unsigned int i; + + if (clientPin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + /* + * Client lock all heads to the external SLI pin. Note that we cannot + * client lock one head and set up internal locking for the other + * because of bug 405996. + */ + pHC->clientLock = NV_EVO_RASTER_LOCK; + pHC->clientLockPin = clientPin; + pHC->clientLockoutWindow = clientLockoutWindow; + + pEvoSubDev->sliRasterLockClientMask |= 1 << head; + + SetLockChainPosition(pDispEvo, pEvoSubDev, pHC); + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliLastSecondaryLockHeadsFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + unsigned int i; + + if (!nvEvoLockHWStateSliLastSecondaryLockHeads(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + pEvoSubDev->frameLockClientMaskAssy |= 1 << pHeads[i]; + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliPrimaryFrameLockServer(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + unsigned int i; + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + + if (flPin == NV_EVO_LOCK_PIN_ERROR || + !nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + } else { + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + } + + pEvoSubDev->frameLockExtRefClkMaskAssy |= 1 << head; + + /* Set up this head to use the external flip lock pin */ + pHC->flipLockPin = flPin; + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliPrimaryFrameLockServerHouseSync(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + /* As far as EVO is concerned, House Sync means FL client */ + if (!nvEvoLockHWStateSliPrimaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + const int head = pHeads[0]; + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + pEvoSubDev->frameLockClientMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockHouseSync = TRUE; + + return TRUE; +} + +NvBool nvEvoLockHWStateSliPrimaryFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FRAME_LOCK); + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + unsigned int i; + + + if (pin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + if (!nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + /* Enable first head as framelock client */ + pHC->clientLock = NV_EVO_FRAME_LOCK; + pHC->clientLockPin = pin; + } + + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + pEvoSubDev->frameLockExtRefClkMaskAssy |= 1 << head; + + /* Set up this head to use the external flip lock pin */ + pHC->flipLockPin = flPin; + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + unsigned int i; + + if (flPin == NV_EVO_LOCK_PIN_ERROR || + !nvEvoLockHWStateSliPrimaryLockHeads(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + } else { + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + } + + pEvoSubDev->frameLockExtRefClkMaskAssy |= 1 << head; + + /* Set up this head to use the external flip lock pin */ + pHC->flipLockPin = flPin; + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + /* As far as EVO is concerned, House Sync means FL client */ + if (!nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + const int head = pHeads[0]; + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + pEvoSubDev->frameLockClientMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockHouseSync = TRUE; + + return TRUE; +} + +NvBool nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FRAME_LOCK); + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + unsigned int i; + + if (pin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + if (!nvEvoLockHWStateSliPrimaryLockHeads(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + /* Enable first head as framelock client */ + pHC->clientLock = NV_EVO_FRAME_LOCK; + pHC->clientLockPin = pin; + } + + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + pEvoSubDev->frameLockExtRefClkMaskAssy |= 1 << head; + + /* Set up this head to use the external flip lock pin */ + pHC->flipLockPin = flPin; + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + } + + return TRUE; +} diff --git a/src/nvidia-modeset/src/nvkms-lut.c b/src/nvidia-modeset/src/nvkms-lut.c new file mode 100644 index 0000000..69658f7 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-lut.c @@ -0,0 +1,458 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-lut.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "nvkms-dma.h" +#include "nvkms-surface.h" +#include "nvkms-private.h" +#include "nvkms-utils.h" +#include "nvkms-headsurface.h" +#include "nvos.h" + +#include /* NV01_MEMORY_LOCAL_USER */ + +static void FreeLutSurfaceEvo(NVDevEvoPtr pDevEvo, NVSurfaceEvoPtr pSurfEvo) +{ + if (pSurfEvo == NULL) { + return; + } + + nvAssert(pSurfEvo->rmRefCnt == 1); + nvAssert(pSurfEvo->structRefCnt == 1); + + nvEvoUnregisterSurface(pDevEvo, + pDevEvo->pNvKmsOpenDev, + pSurfEvo->owner.surfaceHandle, + TRUE /* skipUpdate */, + FALSE /* skipSync */); +} + +static NVSurfaceEvoPtr RegisterLutSurfaceEvo(NVDevEvoPtr pDevEvo, NvU32 memoryHandle) +{ + struct NvKmsRegisterSurfaceParams registerSurfaceParams = { }; + const NVEvoApiHandlesRec *pSurfaceHandles; + NvU64 size = (sizeof(NVEvoLutDataRec) + 63) & ~63; + NVSurfaceEvoPtr pSurfEvo = NULL; + + registerSurfaceParams.request.deviceHandle = pDevEvo->deviceHandle; + registerSurfaceParams.request.useFd = FALSE; + registerSurfaceParams.request.rmClient = nvEvoGlobal.clientHandle; + + registerSurfaceParams.request.planes[0].u.rmObject = memoryHandle; + registerSurfaceParams.request.planes[0].offset = 0; + registerSurfaceParams.request.planes[0].pitch = (size + 255) & ~255; + registerSurfaceParams.request.planes[0].rmObjectSizeInBytes = size; + + registerSurfaceParams.request.widthInPixels = (size + 7) >> 3; /* TODO: Check on this */ + registerSurfaceParams.request.heightInPixels = 1; + + registerSurfaceParams.request.layout = NvKmsSurfaceMemoryLayoutPitch; + registerSurfaceParams.request.format = NvKmsSurfaceMemoryFormatR16G16B16A16; + + registerSurfaceParams.request.noDisplayHardwareAccess = FALSE; + registerSurfaceParams.request.noDisplayCaching = FALSE; + + registerSurfaceParams.request.isoType = NVKMS_MEMORY_ISO; + registerSurfaceParams.request.log2GobsPerBlockY = 0; + + /* + * Although the caller may like a GPU mapping, we pass + * NvHsMapPermissionsNone so failing the mapping doesn't fail the surface + * creation. + */ + nvEvoRegisterSurface(pDevEvo, + pDevEvo->pNvKmsOpenDev, + ®isterSurfaceParams, + NvHsMapPermissionsNone); + + if (registerSurfaceParams.reply.surfaceHandle == 0) { + return NULL; + } + + pSurfaceHandles = nvGetSurfaceHandlesFromOpenDevConst(pDevEvo->pNvKmsOpenDev); + pSurfEvo = + nvEvoGetSurfaceFromHandle(pDevEvo, + pSurfaceHandles, + registerSurfaceParams.reply.surfaceHandle, + FALSE /* isUsedByCursorChannel */, + TRUE /* isUsedByLayerChannel */); + return pSurfEvo; +} + +static NVSurfaceEvoPtr AllocLutSurfaceEvoInVidmem(NVDevEvoPtr pDevEvo) +{ + NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { }; + NvU32 ret = NVOS_STATUS_ERROR_GENERIC; + NvU32 attr = 0, attr2 = 0; + NvU32 allocFlags = NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN | + NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE; + NvU64 size = 0, alignment = 4096; + NvU32 memoryHandle = 0; + NVSurfaceEvoPtr pSurfEvo = NULL; + + memoryHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (memoryHandle == 0) { + goto fail; + } + + size = (sizeof(NVEvoLutDataRec) + 63) & ~63; + attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, attr); + attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _DEFAULT, attr2); + attr2 = FLD_SET_DRF(OS32, _ATTR2, _ISO, _YES, attr2); + + alignment = NV_MAX(alignment, NV_EVO_SURFACE_ALIGNMENT); + if (alignment != 0) { + allocFlags |= NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE; + } + + memAllocParams.owner = NVKMS_RM_HEAP_ID; + memAllocParams.type = NVOS32_TYPE_IMAGE; + memAllocParams.size = size; + memAllocParams.attr = attr; + memAllocParams.attr2 = attr2; + memAllocParams.flags = allocFlags; + memAllocParams.alignment = alignment; + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + memoryHandle, + NV01_MEMORY_LOCAL_USER, + &memAllocParams); + + /* If we failed the allocation above, abort */ + if (ret != NVOS_STATUS_SUCCESS) { + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, memoryHandle); + + goto fail; + } + + pSurfEvo = RegisterLutSurfaceEvo(pDevEvo, memoryHandle); + /* + * nvEvoRegisterSurface dups the memory handle, so we free the one we + * just created. + */ + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + memoryHandle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, memoryHandle); + memoryHandle = 0; + + if (pSurfEvo == NULL) { + goto fail; + } + + /* + * Map the surface for the CPU. This is only done by nvEvoRegisterSurface + * for NISO surfaces, so it must be done manually here. + */ + if (!nvRmEvoMapVideoMemory(pDevEvo, + pSurfEvo->planes[0].rmHandle, + size, pSurfEvo->cpuAddress, + SUBDEVICE_MASK_ALL)) { + goto fail; + } + + /* + * The GPU mapping is only needed for prefetching LUT surfaces for DIFR. + * It isn't worth failing alone but we want to keep gpuAddress coherent. + */ + pSurfEvo->gpuAddress = nvHsMapSurfaceToDevice(pDevEvo, + pSurfEvo->planes[0].rmHandle, + size, + NvHsMapPermissionsReadOnly); + + if (pSurfEvo->gpuAddress == NV_HS_BAD_GPU_ADDRESS) { + pSurfEvo->gpuAddress = 0ULL; + } + + return pSurfEvo; + +fail: + /* An error occurred -- free the surface */ + FreeLutSurfaceEvo(pDevEvo, pSurfEvo); + + return NULL; +} + +static NVSurfaceEvoPtr AllocLutSurfaceEvoInSysmem(NVDevEvoPtr pDevEvo) +{ + NvU32 memoryHandle = 0; + void *pBase = NULL; + NvU64 size = (sizeof(NVEvoLutDataRec) + 63) & ~63; + NVSurfaceEvoPtr pSurfEvo = NULL; + NvU32 ret = 0; + + memoryHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + if (memoryHandle == 0) { + goto fail; + } + + /* Allocate the LUT memory from sysmem */ + if (!nvRmAllocSysmem(pDevEvo, memoryHandle, NULL, &pBase, size, + NVKMS_MEMORY_ISO)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Unable to allocate LUT memory from sysmem"); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, memoryHandle); + + goto fail; + } + + pSurfEvo = RegisterLutSurfaceEvo(pDevEvo, memoryHandle); + + /* + * nvEvoRegisterSurface dups the memory handle, so we can free the one we + * just created. + */ + nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + memoryHandle, + pBase, + 0 /* flags */); + + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + memoryHandle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, memoryHandle); + memoryHandle = 0; + + if (pSurfEvo == NULL) { + goto fail; + } + + ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pSurfEvo->planes[0].rmHandle, + 0, /* offset */ + size, + &pBase, + 0 /* flags */); + + if (ret != NVOS_STATUS_SUCCESS) { + goto fail; + } + + /* + * AllocLutSurfaceEvoInSysmem() will only be called if + * pDevEvo->requiresAllAllocationsInSysmem is TRUE. NVKMS will only set this + * cap bit for SOC display devices, and these devices should only have one + * subdevice. + */ + nvAssert(pDevEvo->numSubDevices == 1); + pSurfEvo->cpuAddress[0] = pBase; + + return pSurfEvo; + +fail: + /* An error occurred -- free the surface */ + FreeLutSurfaceEvo(pDevEvo, pSurfEvo); + + return NULL; +} + +static NVSurfaceEvoPtr AllocLutSurfaceEvo(NVDevEvoPtr pDevEvo) +{ + if (pDevEvo->requiresAllAllocationsInSysmem) { + return AllocLutSurfaceEvoInSysmem(pDevEvo); + } else { + return AllocLutSurfaceEvoInVidmem(pDevEvo); + } +} + +NvBool nvSetTmoLutSurfaceEvo(NVDevEvoPtr pDevEvo, + NVFlipChannelEvoHwState *pHwState) +{ + if (pHwState->hdrStaticMetadata.enabled) { + if (!pHwState->tmoLut.pLutSurfaceEvo) { + pHwState->tmoLut.pLutSurfaceEvo = AllocLutSurfaceEvo(pDevEvo); + if (!pHwState->tmoLut.pLutSurfaceEvo) { + return FALSE; + } + } + } else { + // Will be freed via nvEvoDecrementSurfaceRefCnts() and + // nvFreeUnrefedTmoLutSurfacesEvo() on old state + pHwState->tmoLut.pLutSurfaceEvo = NULL; + } + + return TRUE; +} + +/* + * After the flip dereferences its TMO surfaces, or when it fails after TMO + * surface allocation, any unused TMO surfaces are left with 1 refcount. If + * these TMO surfaces are owned by the pDevEvo's pNvKmsOpenDev, then they have + * been allocated within this file and need to be freed. If not, they have been + * allocated by an NvKms client, which is in charge of freeing them. + * + * Only call FreeLutSurfaceEvo in the first case. + */ +void nvFreeUnrefedTmoLutSurfacesEvo(NVDevEvoPtr pDevEvo, + NVFlipEvoHwState *pFlipState, + NvU32 head) +{ + NvU32 layer; + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (pFlipState->layer[layer].tmoLut.pLutSurfaceEvo != NULL && + pFlipState->layer[layer].tmoLut.pLutSurfaceEvo->structRefCnt <= 1) { + + if (pFlipState->layer[layer].tmoLut.pLutSurfaceEvo->owner.pOpenDev == + pDevEvo->pNvKmsOpenDev) { + + FreeLutSurfaceEvo(pDevEvo, + pFlipState->layer[layer].tmoLut.pLutSurfaceEvo); + } + pFlipState->layer[layer].tmoLut.pLutSurfaceEvo = NULL; + } + } +} + +void nvInvalidateDefaultLut(NVDevEvoPtr pDevEvo) +{ + NvU32 sd; + + for (sd = 0; sd < NVKMS_MAX_SUBDEVICES; sd++) { + pDevEvo->lut.defaultBaseLUTState[sd] = + pDevEvo->lut.defaultOutputLUTState[sd] = + NvKmsLUTStateUninitialized; + } +} + +NvBool nvAllocLutSurfacesEvo(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 apiHead, dispIndex, i; + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + for (i = 0; i < ARRAY_LEN(pDevEvo->lut.apiHead[apiHead].LUT); i++) { + pDevEvo->lut.apiHead[apiHead].LUT[i] = AllocLutSurfaceEvo(pDevEvo); + + if (pDevEvo->lut.apiHead[apiHead].LUT[i] == NULL) { + nvFreeLutSurfacesEvo(pDevEvo); + return FALSE; + } + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + // No palette has been loaded yet, so disable the LUT. + pDevEvo->lut.apiHead[apiHead].disp[dispIndex].waitForPreviousUpdate = FALSE; + pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curBaseLutEnabled = FALSE; + pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curOutputLutEnabled = FALSE; + } + } + + // Zero-initalize the LUT notifier state - ensure there's no stale data + nvkms_memset(&pDevEvo->lut.notifierState, 0, + sizeof(pDevEvo->lut.notifierState)); + + if (pDevEvo->hal->caps.needDefaultLutSurface) { + pDevEvo->lut.defaultLut = AllocLutSurfaceEvo(pDevEvo); + if (pDevEvo->lut.defaultLut == NULL) { + nvFreeLutSurfacesEvo(pDevEvo); + return FALSE; + } + + nvInvalidateDefaultLut(pDevEvo); + + pDevEvo->hal->InitDefaultLut(pDevEvo); + } + + return TRUE; +} + +void nvFreeLutSurfacesEvo(NVDevEvoPtr pDevEvo) +{ + NvU32 head, i, dispIndex, apiHead; + NVDispEvoPtr pDispEvo; + + /* Cancel any queued LUT update timers */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + nvCancelLutUpdateEvo(pDispEvo, apiHead); + } + } + + /* wait for any outstanding LUT updates before freeing the surface */ + if (pDevEvo->core) { + nvRMSyncEvoChannel(pDevEvo, pDevEvo->core, __LINE__); + } + + /* Clear the current lut surface stored in the hardware head state */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + for (head = 0; head < pDevEvo->numHeads; head++) { + pDispEvo->headState[head].lut.pCurrSurface = NULL; + pDispEvo->headState[head].lut.baseLutEnabled = FALSE; + pDispEvo->headState[head].lut.outputLutEnabled = FALSE; + } + } + + if (pDevEvo->lut.defaultLut != NULL) { + FreeLutSurfaceEvo(pDevEvo, pDevEvo->lut.defaultLut); + pDevEvo->lut.defaultLut = NULL; + } + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + for (i = 0; i < ARRAY_LEN(pDevEvo->lut.apiHead[apiHead].LUT); i++) { + if (pDevEvo->lut.apiHead[apiHead].LUT[i] != NULL) { + FreeLutSurfaceEvo(pDevEvo, pDevEvo->lut.apiHead[apiHead].LUT[i]); + pDevEvo->lut.apiHead[apiHead].LUT[i] = NULL; + } + } + } +} + +void nvUploadDataToLutSurfaceEvo(NVSurfaceEvoPtr pSurfEvo, + const NVEvoLutDataRec *pLUTBuffer, + NVDispEvoPtr pDispEvo) +{ + const NvU32* data = (const NvU32*)pLUTBuffer; + size_t size = sizeof(*pLUTBuffer); + const int sd = pDispEvo->displayOwner; + NvU32 *dst; + const NvU32 *src; + int dword; + + if (pSurfEvo == NULL) { + nvAssert(pSurfEvo); + return; + } + + nvAssert(pSurfEvo->cpuAddress[sd]); + + /* The size to copy should not be larger than the surface. */ + nvAssert(size <= pSurfEvo->planes[0].rmObjectSizeInBytes); + + /* The source, destination, and size should be 4-byte aligned. */ + nvAssert((((NvUPtr)data) & 0x3) == 0); + nvAssert((((NvUPtr)pSurfEvo->cpuAddress[sd]) & 0x3) == 0); + nvAssert((size % 4) == 0); + + src = data; + dst = (NvU32*)pSurfEvo->cpuAddress[sd]; + + for (dword = 0; dword < (size/4); dword++) { + *(dst++) = *(src++); + } +} diff --git a/src/nvidia-modeset/src/nvkms-modepool.c b/src/nvidia-modeset/src/nvkms-modepool.c new file mode 100644 index 0000000..d8ad154 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-modepool.c @@ -0,0 +1,2091 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-modepool.h" +#include "nvkms-types.h" +#include "nvkms-dpy.h" +#include "nvkms-hdmi.h" +#include "nvkms-utils.h" +#include "nvkms-3dvision.h" +#include "nvkms-evo.h" +#include "nvkms-ioctl.h" +#include "nvkms-modetimings.h" + +#include "nv_mode_timings_utils.h" +#include "nv_vasprintf.h" + +#include "nvkms-prealloc.h" + +#include "nvkms-api.h" + +#include "dp/nvdp-connector-event-sink.h" + +typedef struct { + enum NvKmsModeSource source; + NvBool patchedStereoTimings; + NvBool dscPassThrough; +} EvoValidateModeFlags; + +static NvBool +ValidateModeIndexEdid(NVDpyEvoPtr pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + struct NvKmsValidateModeIndexReply *pReply, + NVEvoInfoStringPtr pInfoString, + const NvU32 requestedModeIndex, + NvU32 *pCurrentModeIndex); +static NvBool +ValidateModeIndexVesa(NVDpyEvoPtr pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + struct NvKmsValidateModeIndexReply *pReply, + NVEvoInfoStringPtr pInfoString, + const NvU32 requestedModeIndex, + NvU32 *pCurrentModeIndex); + +static void LogModeValidationEnd(const NVDispEvoRec *pDispEvo, + NVEvoInfoStringPtr pInfoString, + const char *failureReasonFormat, ...) + __attribute__ ((format (printf, 3, 4))); + +static NvBool ConstructModeTimingsMetaData( + NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + struct NvKmsMode *pKmsMode, + EvoValidateModeFlags *pFlags, + NVDispHeadInfoFrameStateEvoRec *pInfoFrameState); + +static NvBool ValidateMode(NVDpyEvoPtr pDpyEvo, + const struct NvKmsMode *pKmsMode, + const EvoValidateModeFlags *flags, + const struct NvKmsModeValidationParams *pParams, + NVEvoInfoStringPtr pInfoString, + struct NvKmsModeValidationValidSyncs *pValidSyncs, + struct NvKmsUsageBounds *pModeUsage); + +#define NV_MAX_MODE_NAME_LEN 64 +#define NV_MAX_MODE_DESCRIPTION_LEN 128 + +/* A single frequency, at its longest, will have the format: "aaa.bbb" */ +#define NV_MAX_FREQUENCY_STRING_LEN 8 + +/* A range element, at its longest, will have the format: "aaa.bbb-ccc.ddd, " */ +#define NV_MAX_RANGE_ELEMENT_STRING_LEN 18 +#define NV_MAX_RANGE_STRING_LEN \ + (NV_MAX_RANGE_ELEMENT_STRING_LEN * NVKMS_MAX_VALID_SYNC_RANGES) + + +void +nvValidateModeIndex(NVDpyEvoPtr pDpyEvo, + const struct NvKmsValidateModeIndexRequest *pRequest, + struct NvKmsValidateModeIndexReply *pReply) +{ + const struct NvKmsModeValidationParams *pParams = &pRequest->modeValidation; + const NvU32 requestedModeIndex = pRequest->modeIndex; + NVEvoInfoStringRec infoString; + NvU32 currentModeIndex = 0; + NvBool done; + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + nvInitInfoString(&infoString, nvKmsNvU64ToPointer(pRequest->pInfoString), + pRequest->infoStringSize); + + done = ValidateModeIndexEdid(pDpyEvo, pParams, pReply, &infoString, + requestedModeIndex, ¤tModeIndex); + if (done) { + goto out; + } + + done = ValidateModeIndexVesa(pDpyEvo, pParams, pReply, &infoString, + requestedModeIndex, ¤tModeIndex); + if (done) { + goto out; + } + + pReply->end = 1; + return; + +out: + if (pRequest->infoStringSize > 0) { + /* Add 1 for the final '\0' */ + nvAssert((infoString.length + 1) <= pRequest->infoStringSize); + pReply->infoStringLenWritten = infoString.length + 1; + } +} + + +void +nvValidateModeEvo(NVDpyEvoPtr pDpyEvo, + const struct NvKmsValidateModeRequest *pRequest, + struct NvKmsValidateModeReply *pReply) +{ + NVEvoInfoStringRec infoString; + struct NvKmsMode kmsMode = { + .timings = pRequest->mode.timings, + }; + EvoValidateModeFlags evoFlags; + NVDispHeadInfoFrameStateEvoRec dummyInfoFrameState; + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + if (!ConstructModeTimingsMetaData(pDpyEvo, + &pRequest->modeValidation, + &kmsMode, + &evoFlags, + &dummyInfoFrameState)) { + pReply->valid = FALSE; + return; + } + + nvInitInfoString(&infoString, nvKmsNvU64ToPointer(pRequest->pInfoString), + pRequest->infoStringSize); + + pReply->valid = ValidateMode(pDpyEvo, + &kmsMode, + &evoFlags, + &pRequest->modeValidation, + &infoString, + &pReply->validSyncs, + &pReply->modeUsage); + + if (infoString.length > 0) { + /* Add 1 for the final '\0' */ + nvAssert((infoString.length + 1) <= pRequest->infoStringSize); + pReply->infoStringLenWritten = infoString.length + 1; + } +} + + +/*! + * Determine whether this mode is HDMI 3D by checking the HDMI 3D + * support map parsed from the CEA-861 EDID extension. + * + * Currently only frame packed 3D modes are supported, as we rely on + * Kepler's HW support for this mode. + * + * If hdmi 3D is supported, then only one of hdmi3D or hdmi3DAvailable + * will be returned true, based on if it was requested. + */ +static void GetHdmi3DValue(const NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + const NVT_TIMING *pTiming, + NvBool *hdmi3D, + NvBool *hdmi3DAvailable) +{ + /* This should only be used in paths where we have a valid parsed EDID. */ + + nvAssert(pDpyEvo->parsedEdid.valid); + + if ((NVT_GET_TIMING_STATUS_TYPE(pTiming->etc.status) == + NVT_TYPE_EDID_861ST) && + nvDpyEvoSupportsHdmi3D(pDpyEvo)) { + + const NVT_EDID_INFO *pInfo = &pDpyEvo->parsedEdid.info; + int i; + + for (i = 0; i < pInfo->Hdmi3Dsupport.total; i++) { + HDMI3DDETAILS hdmi3DMap = pInfo->Hdmi3Dsupport.map[i]; + NvU32 vic = NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status); + if ((vic == hdmi3DMap.Vic) && + (hdmi3DMap.StereoStructureMask & + NVT_HDMI_3D_SUPPORTED_FRAMEPACK_MASK)) { + *hdmi3D = pParams->stereoMode == NVKMS_STEREO_HDMI_3D; + *hdmi3DAvailable = pParams->stereoMode != NVKMS_STEREO_HDMI_3D; + return; + } + } + } + + *hdmi3D = FALSE; + *hdmi3DAvailable = FALSE; +} + +/* + * DP 1.3 decimated YUV 4:2:0 mode is required if: + * + * - The GPU and monitor both support it. + * - Either the monitor doesn't support RGB 4:4:4 scanout of this mode, or + * the user prefers YUV 4:2:0 scanout when possible. + */ +static NvBool DpYuv420Required(const NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + const NVT_TIMING *pTiming) +{ + const NVDevEvoRec *pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + const NvBool monitorSupports444 = + IS_BPC_SUPPORTED_COLORFORMAT(pTiming->etc.rgb444.bpcs); + + if (!pDevEvo->caps.supportsDP13) { + // The GPU doesn't support YUV420. + return FALSE; + } + + if (!nvDPLibDpyIsYuv420ModeSupported(pDpyEvo)) { + // The dpy doesn't support YUV420. + return FALSE; + } + + if (monitorSupports444) { + // The GPU and monitor both support YUV420 and RGB444; use RGB444 + // by default, but allow the user to prefer YUV420 mode in this + // decision. + return pParams->preferYUV420; + } else { + // The GPU and monitor both support YUV420, and the monitor doesn't + // support RGB444, so we have to fall back to YUV420. + return TRUE; + } +} + +/* + * Return whether this mode requires SW, HW, or no YUV 4:2:0 compression given + * this GPU, display, connector type, and user preference. + */ +static enum NvYuv420Mode GetYUV420Value( + const NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + const NVT_TIMING *pTiming) +{ + if (!IS_BPC_SUPPORTED_COLORFORMAT(pTiming->etc.yuv420.bpcs) || + ((pTiming->HSyncWidth & 1) != 0) || + ((pTiming->HFrontPorch & 1) != 0) || + ((pTiming->HVisible & 1) != 0) || + ((pTiming->HTotal & 1) != 0) || + ((pTiming->VVisible & 1) != 0)) { + // If this mode doesn't support YUV420, then the GPU caps or + // user preference doesn't matter. + return NV_YUV420_MODE_NONE; + } else if (nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + if (DpYuv420Required(pDpyEvo, pParams, pTiming)) { + return NV_YUV420_MODE_SW; + } else { + return NV_YUV420_MODE_NONE; + } + } else if (nvDpyIsHdmiEvo(pDpyEvo)) { + /* + * YUV 4:2:0 compression is necessary for HDMI 2.0 4K@60hz modes + * unless the GPU and display both support HDMI 2.0 4K@60hz + * uncompressed RGB 4:4:4 (6G mode). A mode validation override + * may be used to allow RGB 4:4:4 mode if the GPU supports it + * even if the display doesn't claim support in the EDID. + */ + if (!nvHdmi204k60HzRGB444Allowed(pDpyEvo, pParams, pTiming) || + pParams->preferYUV420) { + + const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + // XXX assume the heads have equal capabilities + // XXX assume the gpus have equal capabilities + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[0]; + NVEvoCapabilitiesPtr pEvoCaps = &pEvoSubDev->capabilities; + NVEvoHeadCaps *pHeadCaps = &pEvoCaps->head[0]; + + if (pHeadCaps->supportsHDMIYUV420HW) { + return NV_YUV420_MODE_HW; + } else { + return NV_YUV420_MODE_SW; + } + } else { + return NV_YUV420_MODE_NONE; + } + } else { + return NV_YUV420_MODE_NONE; + } +} + + +/*! + * Scan through the EDID-specified modes, counting each one. If the + * count reaches requestedModeIndex, then validate that mode. + * + * \param[in] pDpyEvo The dpy whose EDID's modes are considered. + * \param[in] pParams The NvKmsModeValidationParams. + * \param[out] pReply The NvKmsValidateModeIndexReply; if we found + * requestedModeIndex, pReply->valid will store if + * the mode was valid. + * \param[in] requestedModeIndex The index of the mode we are looking for. + * \param[in,out] pCurrentModeIndex A running total of the number of modes + * we have considered. This will be incremented + * by the number of modes considered. + * + * \return If we found the mode with index == requestedModeIndex, + * return TRUE. Otherwise, return FALSE. + */ +static NvBool +ValidateModeIndexEdid(NVDpyEvoPtr pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + struct NvKmsValidateModeIndexReply *pReply, + NVEvoInfoStringPtr pInfoString, + const NvU32 requestedModeIndex, + NvU32 *pCurrentModeIndex) +{ + const char *description; + int i; + NvBool is3DVisionStereo = nvIs3DVisionStereoEvo(pParams->stereoMode); + + /* if no EDID, we have nothing to do here */ + + if (!pDpyEvo->parsedEdid.valid) { + return FALSE; + } + + /* Scan through all EDID modes. */ + + for (i = 0; i < pDpyEvo->parsedEdid.info.total_timings; i++) { + + NVT_TIMING timing = pDpyEvo->parsedEdid.info.timing[i]; + EvoValidateModeFlags flags; + struct NvKmsMode kmsMode = { }; + NvBool hdmi3D = FALSE; + + /* Skip this mode if it was marked invalid by nvtiming. */ + + if (timing.etc.status == 0) { + continue; + } + + /* + * If *pCurrentModeIndex matches requestedModeIndex, then + * validate the mode. Otherwise, go on to the next mode. + */ + if (*pCurrentModeIndex != requestedModeIndex) { + (*pCurrentModeIndex)++; + continue; + } + + nvkms_memset(&flags, 0, sizeof(flags)); + flags.source = NvKmsModeSourceEdid; + + /* patch the mode for 3DVision */ + if (is3DVisionStereo && + pDpyEvo->stereo3DVision.requiresModetimingPatching && + nvPatch3DVisionModeTimingsEvo(&timing, pDpyEvo, pInfoString)) { + flags.patchedStereoTimings = TRUE; + } + + if ((NVT_GET_TIMING_STATUS_TYPE(timing.etc.status) == + NVT_TYPE_EDID_861ST) && + (NVT_GET_CEA_FORMAT(timing.etc.status) > 0) && + (timing.etc.name[0] != '\0')) { + description = (const char *) timing.etc.name; + } else { + description = NULL; + } + + /* convert from the EDID's NVT_TIMING to NvModeTimings */ + + NVT_TIMINGtoNvModeTimings(&timing, &kmsMode.timings); + + /* + * Determine whether this mode is a HDMI 3D by checking the HDMI 3D + * support map parsed from the CEA-861 EDID extension. + * + * Currently only frame packed 3D modes are supported, as we rely on + * Kepler's HW support for this mode. + */ + GetHdmi3DValue(pDpyEvo, pParams, &timing, &hdmi3D, + &pReply->hdmi3DAvailable); + nvKmsUpdateNvModeTimingsForHdmi3D(&kmsMode.timings, hdmi3D); + + if (!!(timing.etc.flag & NVT_FLAG_DISPLAYID_T7_DSC_PASSTHRU)) { + flags.dscPassThrough = TRUE; + } + + kmsMode.timings.yuv420Mode = GetYUV420Value(pDpyEvo, pParams, &timing); + + /* validate the mode */ + + pReply->valid = ValidateMode(pDpyEvo, + &kmsMode, + &flags, + pParams, + pInfoString, + &pReply->validSyncs, + &pReply->modeUsage); + + /* + * The client did not request hdmi3D, but this mode supports hdmi3D. + * Re-validate the mode with hdmi3D enabled. If that passes, report + * to the client that the mode could be used with hdmi3D if they choose + * later. + */ + if (pReply->valid && pReply->hdmi3DAvailable) { + /* + * Use dummy validSyncs and modeUsage so the original result isn't + * affected. + * + * Create a temporary KMS mode so that we can enable hdmi3D in it + * without perturbing the currently validated mode. + * + * Put all of this in a temporary heap allocation, to conserve + * stack. + */ + struct workArea { + struct NvKmsModeValidationValidSyncs stereoValidSyncs; + struct NvKmsUsageBounds stereoModeUsage; + struct NvKmsMode stereoKmsMode; + } *pWorkArea = nvCalloc(1, sizeof(*pWorkArea)); + + if (pWorkArea == NULL) { + pReply->hdmi3DAvailable = FALSE; + } else { + pWorkArea->stereoKmsMode = kmsMode; + nvKmsUpdateNvModeTimingsForHdmi3D( + &pWorkArea->stereoKmsMode.timings, TRUE); + + pReply->hdmi3DAvailable = + ValidateMode(pDpyEvo, + &pWorkArea->stereoKmsMode, + &flags, + pParams, + pInfoString, + &pWorkArea->stereoValidSyncs, + &pWorkArea->stereoModeUsage); + nvFree(pWorkArea); + } + } + + /* + * if this is a detailed timing, then flag it as such; this + * will be used later when searching for the AutoSelect mode + */ + + if (NVT_GET_TIMING_STATUS_TYPE(timing.etc.status) == + NVT_TYPE_EDID_DTD) { + + /* + * if the EDID indicates that the first detailed timing is + * preferred, then flag it is as such; this will be used + * later when searching for the AutoSelect mode + * + * Note that the sequence number counts from 1 + */ + + if ((pDpyEvo->parsedEdid.info.u.feature_ver_1_3.preferred_timing_is_native) && + NVT_GET_TIMING_STATUS_SEQ(timing.etc.status) == 1) { + + pReply->preferredMode = TRUE; + } + } + + /* + * If the NVT_TIMING was patched for 3DVision above, then the + * NvModeTimings generated from it, when passed to + * nvFindEdidNVT_TIMING() during nvValidateModeForModeset(), + * won't match the original EDID NVT_TIMING. Rebuild + * NvModeTimings based on the original (non-3DVision-patched) + * NVT_TIMING from the EDID, and return that to the client. + * When the NvModeTimings is passed to + * nvValidateModeForModeset(), the 3DVision patching will be + * performed again. + */ + if (flags.patchedStereoTimings) { + enum NvYuv420Mode yuv420Mode = kmsMode.timings.yuv420Mode; + hdmi3D = kmsMode.timings.hdmi3D; + + NVT_TIMINGtoNvModeTimings(&pDpyEvo->parsedEdid.info.timing[i], + &kmsMode.timings); + kmsMode.timings.yuv420Mode = yuv420Mode; + + nvKmsUpdateNvModeTimingsForHdmi3D(&kmsMode.timings, hdmi3D); + } + + pReply->mode.timings = kmsMode.timings; + pReply->source = NvKmsModeSourceEdid; + + if (description != NULL) { + nvAssert(nvkms_strlen(description) < sizeof(pReply->description)); + nvkms_strncpy(pReply->description, description, + sizeof(pReply->description)); + pReply->description[sizeof(pReply->description) - 1] = '\0'; + } + + nvBuildModeName(kmsMode.timings.hVisible, kmsMode.timings.vVisible, + pReply->mode.name, sizeof(pReply->mode.name)); + return TRUE; + } + + /* No matching mode found. */ + return FALSE; +} + + +// NOTE: does not include timings for 848x480, 1280x768, 1360x768, +// 1400x1050, 1440x900, 1680x1050, 1920x1200 + +static const NvModeTimings VesaModesTable[] = { +#define VESA_MODES_TABLE_ENTRY(_RRx1k, \ + _pixelClockHz, \ + _hVisible, _hSyncStart, \ + _hSyncEnd, _hTotal, \ + _vVisible, _vSyncStart, \ + _vSyncEnd, _vTotal, \ + _hSyncPos, _hSyncNeg, \ + _vSyncPos, _vSyncNeg) \ + { .pixelClockHz = _pixelClockHz, \ + .RRx1k = _RRx1k, \ + .hVisible = _hVisible, .hSyncStart = _hSyncStart, \ + .hSyncEnd = _hSyncEnd, .hTotal = _hTotal, \ + .hSkew = 0, \ + .vVisible = _vVisible, .vSyncStart = _vSyncStart, \ + .vSyncEnd = _vSyncEnd, .vTotal = _vTotal, \ + .sizeMM = { \ + .w = 0, .h = 0 }, \ + .interlaced = FALSE, .doubleScan = FALSE, \ + .hSyncPos = _hSyncPos, .hSyncNeg = _hSyncNeg, \ + .vSyncPos = _vSyncPos, .vSyncNeg = _vSyncNeg, \ + .hdmi3D = FALSE, \ + .yuv420Mode = NV_YUV420_MODE_NONE, } + + // VESA Standard 640x350 @ 85Hz + VESA_MODES_TABLE_ENTRY( + 85080, 31500000, + 640, 672, 736, 832, + 350, 382, 385, 445, + TRUE, FALSE, FALSE, TRUE ), + + // VESA Standard 640x400 @ 85Hz + VESA_MODES_TABLE_ENTRY( + 85080, 31500000, + 640, 672, 736, 832, + 400, 401, 404, 445, + FALSE, TRUE, TRUE, FALSE ), + + // VESA Standard 720x400 @ 85Hz + VESA_MODES_TABLE_ENTRY( + 85039, 35500000, + 720, 756, 828, 936, + 400, 401, 404, 446, + FALSE, TRUE, TRUE, FALSE ), + + // Industry Standard 640x480 @ 60Hz + VESA_MODES_TABLE_ENTRY( + 59940, 25175000, + 640, 656, 752, 800, + 480, 490, 492, 525, + FALSE, TRUE, FALSE, TRUE ), + + // VESA Standard 640x480 @ 72Hz + VESA_MODES_TABLE_ENTRY( + 72809, 31500000, + 640, 664, 704, 832, + 480, 489, 492, 520, + FALSE, TRUE, FALSE, TRUE ), + + // VESA Standard 640x480 @ 75Hz + VESA_MODES_TABLE_ENTRY( + 75000, 31500000, + 640, 656, 720, 840, + 480, 481, 484, 500, + FALSE, TRUE, FALSE, TRUE ), + + // VESA Standard 640x480 @ 85Hz + VESA_MODES_TABLE_ENTRY( + 85008, 36000000, + 640, 696, 752, 832, + 480, 481, 484, 509, + FALSE, TRUE, FALSE, TRUE ), + + // VESA Standard 800x600 @ 56Hz + VESA_MODES_TABLE_ENTRY( + 56250, 36000000, + 800, 824, 896, 1024, + 600, 601, 603, 625, + TRUE, FALSE, TRUE, FALSE ), + + // VESA Standard 800x600 @ 60Hz + VESA_MODES_TABLE_ENTRY( + 60317, 40000000, + 800, 840, 968, 1056, + 600, 601, 605, 628, + TRUE, FALSE, TRUE, FALSE ), + + // VESA Standard 800x600 @ 72Hz + VESA_MODES_TABLE_ENTRY( + 72188, 50000000, + 800, 856, 976, 1040, + 600, 637, 643, 666, + TRUE, FALSE, TRUE, FALSE ), + + // VESA Standard 800x600 @ 75Hz + VESA_MODES_TABLE_ENTRY( + 75000, 49500000, + 800, 816, 896, 1056, + 600, 601, 604, 625, + TRUE, FALSE, TRUE, FALSE ), + + // VESA Standard 800x600 @ 85Hz + VESA_MODES_TABLE_ENTRY( + 85137, 56300000, + 800, 832, 896, 1048, + 600, 601, 604, 631, + TRUE, FALSE, TRUE, FALSE ), + + // VESA Standard 1024x768i @ 87Hz + VESA_MODES_TABLE_ENTRY( + 86958, 44900000, + 1024, 1032, 1208, 1264, + 768, 768, 776, 817, + TRUE, FALSE, TRUE, FALSE ), + + // VESA Standard 1024x768 @ 60Hz + VESA_MODES_TABLE_ENTRY( + 60004, 65000000, + 1024, 1048, 1184, 1344, + 768, 771, 777, 806, + FALSE, TRUE, FALSE, TRUE ), + + // VESA Standard 1024x768 @ 70Hz + VESA_MODES_TABLE_ENTRY( + 70069, 75000000, + 1024, 1048, 1184, 1328, + 768, 771, 777, 806, + FALSE, TRUE, FALSE, TRUE ), + + // VESA Standard 1024x768 @ 75Hz + VESA_MODES_TABLE_ENTRY( + 75029, 78750000, + 1024, 1040, 1136, 1312, + 768, 769, 772, 800, + TRUE, FALSE, TRUE, FALSE ), + + // VESA Standard 1024x768 @ 85Hz + VESA_MODES_TABLE_ENTRY( + 84997, 94500000, + 1024, 1072, 1168, 1376, + 768, 769, 772, 808, + TRUE, FALSE, TRUE, FALSE ), + + // VESA Standard 1152x864 @ 75Hz + VESA_MODES_TABLE_ENTRY( + 75000, 108000000, + 1152, 1216, 1344, 1600, + 864, 865, 868, 900, + TRUE, FALSE, TRUE, FALSE ), + + // VESA Standard 1280x960 @ 60Hz + VESA_MODES_TABLE_ENTRY( + 60000, 108000000, + 1280, 1376, 1488, 1800, + 960, 961, 964, 1000, + TRUE, FALSE, TRUE, FALSE ), + + // VESA Standard 1280x960 @ 85Hz + VESA_MODES_TABLE_ENTRY( + 85002, 148500000, + 1280, 1344, 1504, 1728, + 960, 961, 964, 1011, + TRUE, FALSE, TRUE, FALSE ), + + // VESA Standard 1280x1024 @ 60Hz + VESA_MODES_TABLE_ENTRY( + 60020, 108000000, + 1280, 1328, 1440, 1688, + 1024, 1025, 1028, 1066, + TRUE, FALSE, TRUE, FALSE ), + + // VESA Standard 1280x1024 @ 75Hz + VESA_MODES_TABLE_ENTRY( + 75025, 135000000, + 1280, 1296, 1440, 1688, + 1024, 1025, 1028, 1066, + TRUE, FALSE, TRUE, FALSE ), + + // VESA Standard 1280x1024 @ 85Hz + VESA_MODES_TABLE_ENTRY( + 85024, 157500000, + 1280, 1344, 1504, 1728, + 1024, 1025, 1028, 1072, + TRUE, FALSE, TRUE, FALSE ), + + // VESA Standard 1600x1200 @ 60Hz + VESA_MODES_TABLE_ENTRY( + 60000, 162000000, + 1600, 1664, 1856, 2160, + 1200, 1201, 1204, 1250, + TRUE, FALSE, TRUE, FALSE ), + + // VESA Standard 1600x1200 @ 65Hz + VESA_MODES_TABLE_ENTRY( + 65000, 175500000, + 1600, 1664, 1856, 2160, + 1200, 1201, 1204, 1250, + TRUE, FALSE, TRUE, FALSE ), + + // VESA Standard 1600x1200 @ 70Hz + VESA_MODES_TABLE_ENTRY( + 70000, 189000000, + 1600, 1664, 1856, 2160, + 1200, 1201, 1204, 1250, + TRUE, FALSE, TRUE, FALSE ), + + // VESA Standard 1600x1200 @ 75Hz + VESA_MODES_TABLE_ENTRY( + 75000, 202500000, + 1600, 1664, 1856, 2160, + 1200, 1201, 1204, 1250, + TRUE, FALSE, TRUE, FALSE ), + + // VESA Standard 1600x1200 @ 85Hz + VESA_MODES_TABLE_ENTRY( + 85000, 229500000, + 1600, 1664, 1856, 2160, + 1200, 1201, 1204, 1250, + TRUE, FALSE, TRUE, FALSE ), + + // VESA Standard 1792x1344 @ 60Hz + VESA_MODES_TABLE_ENTRY( + 60014, 204800000, + 1792, 1920, 2120, 2448, + 1344, 1345, 1348, 1394, + FALSE, TRUE, TRUE, FALSE ), + + // VESA Standard 1792x1344 @ 75Hz + VESA_MODES_TABLE_ENTRY( + 74997, 261000000, + 1792, 1888, 2104, 2456, + 1344, 1345, 1348, 1417, + FALSE, TRUE, TRUE, FALSE ), + + // VESA Standard 1856x1392 @ 60Hz + VESA_MODES_TABLE_ENTRY( + 60009, 218300000, + 1856, 1952, 2176, 2528, + 1392, 1393, 1396, 1439, + FALSE, TRUE, TRUE, FALSE ), + + // VESA Standard 1856x1392 @ 75Hz + VESA_MODES_TABLE_ENTRY( + 75000, 288000000, + 1856, 1984, 2208, 2560, + 1392, 1393, 1396, 1500, + FALSE, TRUE, TRUE, FALSE ), + + // VESA Standard 1920x1440 @ 60Hz + VESA_MODES_TABLE_ENTRY( + 60000, 234000000, + 1920, 2048, 2256, 2600, + 1440, 1441, 1444, 1500, + FALSE, TRUE, TRUE, FALSE ), + + // VESA Standard 1920x1440 @ 75Hz + VESA_MODES_TABLE_ENTRY( + 75000, 297000000, + 1920, 2064, 2288, 2640, + 1440, 1441, 1444, 1500, + FALSE, TRUE, TRUE, FALSE ), +#undef VESA_MODES_TABLE_ENTRY +}; + + +/*! + * Scan through the VESA Standard modes, counting each one. If the + * count reaches requestedModeIndex, then validate that mode. + * + * \param[in] pDpyEvo The dpy for whom the modes are considered. + * \param[in] pParams The NvKmsModeValidationParams. + * \param[out] pReply The NvKmsValidateModeIndexReply; if we found + * requestedModeIndex, pReply->valid will store if + * the mode was valid. + * \param[in] requestedModeIndex The index of the mode we are looking for. + * \param[in,out] pCurrentModeIndex A running total of the number of modes + * we have considered. This will be incremented + * by the number of modes considered. + * + * \return If we found the mode with index == requestedModeIndex, + * return TRUE. Otherwise, return FALSE. + */ +static NvBool +ValidateModeIndexVesa(NVDpyEvoPtr pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + struct NvKmsValidateModeIndexReply *pReply, + NVEvoInfoStringPtr pInfoString, + const NvU32 requestedModeIndex, + NvU32 *pCurrentModeIndex) +{ + int i; + + for (i = 0; i < ARRAY_LEN(VesaModesTable); i++) { + struct NvKmsMode kmsMode = { }; + EvoValidateModeFlags flags; + + /* + * If *pCurrentModeIndex matches requestedModeIndex, then + * validate the mode. Otherwise, go on to the next mode. + */ + if (*pCurrentModeIndex != requestedModeIndex) { + (*pCurrentModeIndex)++; + continue; + } + + kmsMode.timings = VesaModesTable[i]; + + nvkms_memset(&flags, 0, sizeof(flags)); + flags.source = NvKmsModeSourceVesa; + + /* is this mode valid? */ + pReply->valid = ValidateMode(pDpyEvo, + &kmsMode, + &flags, + pParams, + pInfoString, + &pReply->validSyncs, + &pReply->modeUsage); + + pReply->mode.timings = kmsMode.timings; + pReply->source = NvKmsModeSourceVesa; + + nvBuildModeName(VesaModesTable[i].hVisible, + VesaModesTable[i].vVisible, + pReply->mode.name, sizeof(pReply->mode.name)); + return TRUE; + } + + /* No matching mode found. */ + return FALSE; +} + + +/*! + * Return if the given NvModeTimings match any entry in VesaModesTable[]. + */ +static NvBool IsVesaMode(const NvModeTimings *pModeTimings, + const struct NvKmsModeValidationParams *pParams) +{ + int i; + + for (i = 0; i < ARRAY_LEN(VesaModesTable); i++) { + if (NvModeTimingsMatch(&VesaModesTable[i], pModeTimings, + TRUE /* ignoreSizeMM */, + ((pParams->overrides & + NVKMS_MODE_VALIDATION_NO_RRX1K_CHECK) != 0x0) + /* ignoreRRx1k */)) { + return TRUE; + } + } + + return FALSE; +} + + +/*! + * Write to pInfoString with information about the current mode that + * we are validating; called from the beginning of ValidateMode(); + * LogModeValidationEnd() should be called at the end of + * ValidateMode() to report whether the mode was validated. + */ + +static void LogModeValidationBegin(NVEvoInfoStringPtr pInfoString, + const NvModeTimings *pModeTimings) +{ + nvEvoLogInfoString(pInfoString, "%d x %d @ %d Hz%s", + pModeTimings->hVisible, + pModeTimings->vVisible, + NV_U32_KHZ_TO_HZ(pModeTimings->RRx1k), + pModeTimings->hdmi3D ? " (HDMI 3D)" : ""); + + nvEvoLogModeValidationModeTimings(pInfoString, pModeTimings); +} + + +/*! + * Append to pInfoString with any mode validation failure. + */ +static void LogModeValidationEnd(const NVDispEvoRec *pDispEvo, + NVEvoInfoStringPtr pInfoString, + const char *failureReasonFormat, ...) +{ + /* expand any varargs, and print the mode validation result */ + + if (failureReasonFormat) { + char *buf; + NV_VSNPRINTF(buf, failureReasonFormat); + nvEvoLogInfoString(pInfoString, + "Mode is rejected: %s.", + buf ? buf : "Unknown failure"); + nvFree(buf); + } +} + +/*! + * Print mode timings to the NVEvoInfoStringPtr. + */ +void nvEvoLogModeValidationModeTimings(NVEvoInfoStringPtr + pInfoString, + const NvModeTimings *pModeTimings) +{ + const char *extra; + NvU32 hdmi3DPixelClock = HzToKHz(pModeTimings->pixelClockHz); + + if (pModeTimings->hdmi3D) { + hdmi3DPixelClock /= 2; + } + + nvEvoLogInfoString(pInfoString, " Pixel Clock : " + NV_FMT_DIV_1000_POINT_2 " MHz%s", + NV_VA_DIV_1000_POINT_2(hdmi3DPixelClock), + pModeTimings->hdmi3D ? " (HDMI 3D)" : ""); + + nvEvoLogInfoString(pInfoString, " HRes, HSyncStart : %4d, %4d", + pModeTimings->hVisible, + pModeTimings->hSyncStart); + + nvEvoLogInfoString(pInfoString, " HSyncEnd, HTotal : %4d, %4d", + pModeTimings->hSyncEnd, + pModeTimings->hTotal); + + nvEvoLogInfoString(pInfoString, " VRes, VSyncStart : %4d, %4d", + pModeTimings->vVisible, + pModeTimings->vSyncStart); + + nvEvoLogInfoString(pInfoString, " VSyncEnd, VTotal : %4d, %4d", + pModeTimings->vSyncEnd, + pModeTimings->vTotal); + + nvEvoLogInfoString(pInfoString, " Sync Polarity : %s%s%s%s", + pModeTimings->hSyncPos ? "+H " : "", + pModeTimings->hSyncNeg ? "-H " : "", + pModeTimings->vSyncPos ? "+V " : "", + pModeTimings->vSyncNeg ? "-V " : ""); + + + if (pModeTimings->interlaced && pModeTimings->doubleScan) { + extra = "Interlace DoubleScan"; + } else if (pModeTimings->interlaced) { + extra = "Interlace"; + } else if (pModeTimings->doubleScan) { + extra = "DoubleScan"; + } else { + extra = NULL; + } + + if (extra) { + nvEvoLogInfoString(pInfoString, " Extra : %s", extra); + } +} + + +/*! + * Adjust the given value by the given percentage, using integer math. + * + * The 'percentage' argument is multiplied by 100 by the caller. E.g., + * + * percentage=50 ==> 50% + * percentage=110 ==> 110% + * + * So, divide by 100.0: + * + * value * percentage / 100 + */ +static NvU32 Percentage(const NvU32 value, const NvU32 percentage) +{ + return axb_div_c(value, percentage, 100); +} + +/*! + * Write the given frequency to the given buffer. + * + * The frequency value is assumed to have been multiplied by 1000, + * such that 'value % 1000' gives the fractional part, and value/1000 + * gives the integer part. + * + * The buffer is assumed to be (at least) NV_MAX_FREQUENCY_STRING_LEN + * bytes long. + * + * Note that to meet the size assumptions made in the + * NV_MAX_FREQUENCY_STRING_LEN definition, the integer portion of the + * frquency value is clamped to 3 digits. + */ +static int +FrequencyToString(const NvU32 value, char *buffer) +{ + int n = nvkms_snprintf(buffer, NV_MAX_FREQUENCY_STRING_LEN, + "%d.%03d", + /* mod 1000, to limit to 3 digits */ + (value / 1000) % 1000, + value % 1000); + + buffer[NV_MAX_FREQUENCY_STRING_LEN - 1] = '\0'; + + return n; +} + +/*! + * Write the given NvKmsModeValidationFrequencyRanges to the given buffer. + */ +static void +RangesToString(const struct NvKmsModeValidationFrequencyRanges *pRanges, + char buffer[NV_MAX_RANGE_STRING_LEN]) +{ + char *s; + int i, n; + + s = buffer; + + for (i = 0; i < pRanges->numRanges; i++) { + if (pRanges->range[i].high == pRanges->range[i].low) { + s += FrequencyToString(pRanges->range[i].high, s); + } else { + char highString[NV_MAX_FREQUENCY_STRING_LEN]; + char lowString[NV_MAX_FREQUENCY_STRING_LEN]; + + FrequencyToString(pRanges->range[i].high, highString); + FrequencyToString(pRanges->range[i].low, lowString); + + n = buffer + NV_MAX_RANGE_STRING_LEN - s; + s += nvkms_snprintf(s, n, "%s-%s", lowString, highString); + } + + if (i < (pRanges->numRanges - 1)) { + n = buffer + NV_MAX_RANGE_STRING_LEN - s; + s += nvkms_snprintf(s, n, ", "); + } + } + + buffer[NV_MAX_RANGE_STRING_LEN - 1] = '\0'; +} + +static NvBool ValidateModeTimings( + NVDpyEvoPtr pDpyEvo, + const struct NvKmsMode *pKmsMode, + const EvoValidateModeFlags *flags, + const struct NvKmsModeValidationParams *pParams, + NVEvoInfoStringPtr pInfoString, + struct NvKmsModeValidationValidSyncs *pValidSyncs) +{ + int i; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 overrides = pParams->overrides; + const NvBool is3DVisionStereo = nvIs3DVisionStereoEvo(pParams->stereoMode); + const char *modeName = pKmsMode->name; + const NvModeTimings *pModeTimings = &pKmsMode->timings; + char localModeName[NV_MAX_MODE_NAME_LEN]; + + if (modeName[0] == '\0') { + nvBuildModeName(pModeTimings->hVisible, pModeTimings->vVisible, + localModeName, sizeof(localModeName)); + modeName = localModeName; + } + + /* Compute the validSyncs to use during validation. */ + + *pValidSyncs = pParams->validSyncs; + nvDpySetValidSyncsEvo(pDpyEvo, pValidSyncs); + + if (pModeTimings->interlaced) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + if (!pEvoSubDev->capabilities.misc.supportsInterlaced) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Interlaced modes are not supported on this GPU"); + return FALSE; + } + } + + if ((flags->source != NvKmsModeSourceEdid) && + (overrides & NVKMS_MODE_VALIDATION_ALLOW_NON_EDID_MODES) == 0) { + + NvBool continuousFrequency = TRUE; + + /* + * EDID 1.3 defines the "GTF Supported" flag like this: + * + * If this bit is set to 1, the display supports timings based + * on the GTF standard. + * + * We interpret this to mean that if the bit is not set, then + * the display device only supports modes listed in the EDID. + */ + if (pDpyEvo->parsedEdid.valid && + (pDpyEvo->parsedEdid.info.version == NVT_EDID_VER_1_3)) { + continuousFrequency = + pDpyEvo->parsedEdid.info.u.feature_ver_1_3.support_gtf; + } + + /* + * EDID 1.4 Release A, Revision 2; Note 5 in section 3.6.4: + * + * If bit 0 is set to 0, then the display is non-continuous + * frequency (multi-mode) and is only specified to accept the + * video timing formats that are listed in BASE EDID and + * certain EXTENSION Blocks. + */ + if (pDpyEvo->parsedEdid.valid && + (pDpyEvo->parsedEdid.info.version >= NVT_EDID_VER_1_4)) { + if (pDpyEvo->parsedEdid.info.input.isDigital) { + continuousFrequency = + pDpyEvo->parsedEdid.info.u.feature_ver_1_4_digital.continuous_frequency; + } else { + continuousFrequency = + pDpyEvo->parsedEdid.info.u.feature_ver_1_4_analog.continuous_frequency; + } + } + + if (!continuousFrequency) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Only EDID-provided modes are allowed on %s (continuous frequency modes not allowed)", + pDpyEvo->name); + return FALSE; + } + + /* + * By default, we only allow EDID modes when driving digital + * protocol. + */ + if (pDpyEvo->parsedEdid.valid && + pDpyEvo->parsedEdid.info.input.isDigital) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Only EDID-provided modes are allowed on %s", + pDpyEvo->name); + return FALSE; + } + } + + /* Throw out modes that will break downstream assumptions */ + + if ((overrides & NVKMS_MODE_VALIDATION_NO_TOTAL_SIZE_CHECK) == 0) { + + if (pModeTimings->hVisible > pModeTimings->hSyncStart) { + LogModeValidationEnd(pDispEvo, pInfoString, + "This mode's visible horizontal size (%d) exceeds the horizontal sync start (%d)", + pModeTimings->hVisible, + pModeTimings->hSyncStart); + return FALSE; + } + + if (pModeTimings->hSyncStart > pModeTimings->hSyncEnd) { + LogModeValidationEnd(pDispEvo, pInfoString, + "This mode's horizontal sync start (%d) exceeds the horizontal sync end (%d)", + pModeTimings->hSyncStart, + pModeTimings->hSyncEnd); + return FALSE; + } + + if (pModeTimings->hSyncEnd > pModeTimings->hTotal) { + LogModeValidationEnd(pDispEvo, pInfoString, + "This mode's horizontal sync end (%d) exceeds the horizontal total size (%d)", + pModeTimings->hSyncEnd, + pModeTimings->hTotal); + return FALSE; + } + + if (pModeTimings->vVisible > pModeTimings->vSyncStart) { + LogModeValidationEnd(pDispEvo, pInfoString, + "This mode's visible vertical size (%d) exceeds the vertical sync start (%d)", + pModeTimings->vVisible, + pModeTimings->vSyncStart); + return FALSE; + } + + if (pModeTimings->vSyncStart > pModeTimings->vSyncEnd) { + LogModeValidationEnd(pDispEvo, pInfoString, + "This mode's vertical sync start (%d) exceeds the vertical sync end (%d)", + pModeTimings->vSyncStart, + pModeTimings->vSyncEnd); + return FALSE; + } + + if (pModeTimings->vSyncEnd > pModeTimings->vTotal) { + LogModeValidationEnd(pDispEvo, pInfoString, + "This mode's vertical sync end (%d) exceeds the vertical total size (%d)", + pModeTimings->vSyncEnd, + pModeTimings->vTotal); + return FALSE; + } + } + + /* reject modes with too high pclk */ + + if ((overrides & NVKMS_MODE_VALIDATION_NO_MAX_PCLK_CHECK) == 0) { + + NvU32 maxPixelClockKHz = pDpyEvo->maxPixelClockKHz; + NvU32 realPixelClock = HzToKHz(pModeTimings->pixelClockHz); + if (pModeTimings->yuv420Mode != NV_YUV420_MODE_NONE) { + realPixelClock /= 2; + } + + if (realPixelClock > maxPixelClockKHz) { + NvU32 hdmi3DPixelClock = realPixelClock; + + if (pModeTimings->hdmi3D) { + hdmi3DPixelClock /= 2; + } + + if (is3DVisionStereo && + pDpyEvo->stereo3DVision.requiresModetimingPatching && + (realPixelClock - maxPixelClockKHz < 5000)) { + + nvAssert(!pModeTimings->hdmi3D); + + nvEvoLogInfoString(pInfoString, + "PixelClock (" NV_FMT_DIV_1000_POINT_1 " MHz) is slightly higher than Display Device maximum (" NV_FMT_DIV_1000_POINT_1 " MHz), but is within tolerance for 3D Vision Stereo.", + NV_VA_DIV_1000_POINT_1(realPixelClock), + NV_VA_DIV_1000_POINT_1(maxPixelClockKHz)); + + } else { + + LogModeValidationEnd(pDispEvo, pInfoString, + "PixelClock (" NV_FMT_DIV_1000_POINT_1 " MHz%s) too high for Display Device (Max: " NV_FMT_DIV_1000_POINT_1 " MHz)", + NV_VA_DIV_1000_POINT_1(hdmi3DPixelClock), + pModeTimings->hdmi3D ? + ", doubled for HDMI 3D" : "", + NV_VA_DIV_1000_POINT_1(maxPixelClockKHz)); + return FALSE; + } + } + } + + /* check against the EDID's max pclk */ + + if ((overrides & NVKMS_MODE_VALIDATION_NO_EDID_MAX_PCLK_CHECK) == 0) { + + NvU32 realPixelClock = HzToKHz(pModeTimings->pixelClockHz); + if (pModeTimings->yuv420Mode != NV_YUV420_MODE_NONE) { + realPixelClock /= 2; + } + + if (pDpyEvo->parsedEdid.valid && + (pDpyEvo->parsedEdid.limits.max_pclk_10khz != 0) && + (realPixelClock > + (pDpyEvo->parsedEdid.limits.max_pclk_10khz * 10))) { + + NvU32 hdmi3DPixelClock = realPixelClock; + NvU32 maxPixelClockKHz = pDpyEvo->parsedEdid.limits.max_pclk_10khz * 10; + + if (pModeTimings->hdmi3D) { + hdmi3DPixelClock /= 2; + } + + /* + * If this mode came from the EDID, then something is odd + * (see bug 336963); print a warning, but continue + */ + + if (is3DVisionStereo && + pDpyEvo->stereo3DVision.requiresModetimingPatching && + (realPixelClock - maxPixelClockKHz < 5000)) { + + nvAssert(!pModeTimings->hdmi3D); + + nvEvoLogInfoString(pInfoString, + "PixelClock (" NV_FMT_DIV_1000_POINT_1 " MHz) is slightly higher than EDID specified maximum (" NV_FMT_DIV_1000_POINT_1 " MHz), but is within tolerance for 3D Vision Stereo.", + NV_VA_DIV_1000_POINT_1(realPixelClock), + NV_VA_DIV_1000_POINT_1(maxPixelClockKHz)); + + } else if ((flags->source == NvKmsModeSourceEdid) && + ((overrides & + NVKMS_MODE_VALIDATION_OBEY_EDID_CONTRADICTIONS) == 0)) { + nvEvoLogInfoString(pInfoString, + "The EDID for %s contradicts itself: mode \"%s\" is specified in the EDID; " + "however, the EDID's reported maximum PixelClock (" NV_FMT_DIV_1000_POINT_1 " MHz) would exclude this mode's PixelClock (" NV_FMT_DIV_1000_POINT_1 " MHz%s); " + "ignoring EDID maximum PixelClock check for mode \"%s\".", + pDpyEvo->name, modeName, + NV_VA_DIV_1000_POINT_1(maxPixelClockKHz), + NV_VA_DIV_1000_POINT_1(hdmi3DPixelClock), + pModeTimings->hdmi3D ? + ", doubled for HDMI 3D" : "", + modeName); + } else { + + LogModeValidationEnd(pDispEvo, pInfoString, + "PixelClock (" NV_FMT_DIV_1000_POINT_1 " MHz%s) too high for EDID (EDID Max: " NV_FMT_DIV_1000_POINT_1" MHz)", + NV_VA_DIV_1000_POINT_1(hdmi3DPixelClock), + pModeTimings->hdmi3D ? + ", doubled for HDMI 3D" : "", + NV_VA_DIV_1000_POINT_1(maxPixelClockKHz)); + return FALSE; + } + } + } + + /* check the mode against the max size */ + + if ((overrides & NVKMS_MODE_VALIDATION_NO_MAX_SIZE_CHECK) == 0) { + + const NvU32 maxHeight = pDevEvo->caps.maxRasterHeight; + const NvU32 maxWidth = pDevEvo->caps.maxRasterWidth; + + NvU16 realHTotal = pModeTimings->hTotal; + if (pModeTimings->yuv420Mode == NV_YUV420_MODE_SW) { + realHTotal /= 2; + } + + // With YUV420 modes, we want to use the real half-width hTotal + // for validation, but report the full-width value in the log. + if ((realHTotal > maxWidth) || + (pModeTimings->vTotal > maxHeight)) { + + LogModeValidationEnd(pDispEvo, pInfoString, + "Mode total size (%u x %u), with visible size (%u x %u), larger than maximum size (%u x %u)", + pModeTimings->hTotal, + pModeTimings->vTotal, + pModeTimings->hVisible, + pModeTimings->vVisible, + maxWidth, maxHeight); + return FALSE; + } + } + + /* check against the frequency information */ + + if ((overrides & NVKMS_MODE_VALIDATION_NO_HORIZ_SYNC_CHECK) == 0) { + if (pValidSyncs->horizSyncHz.numRanges > 0) { + NvU32 hSync = axb_div_c(pModeTimings->pixelClockHz, 1, + pModeTimings->hTotal); + + for (i = 0; i < pValidSyncs->horizSyncHz.numRanges; i++) { + NvU32 low = pValidSyncs->horizSyncHz.range[i].low; + NvU32 high = pValidSyncs->horizSyncHz.range[i].high; + if ((hSync > Percentage(low, 99)) && + (hSync < Percentage(high, 101))) { + break; + } + } + + /* + * Now see whether we ran out of sync ranges without + * finding a match + */ + + if (i == pValidSyncs->horizSyncHz.numRanges) { + + char rangeString[NV_MAX_RANGE_STRING_LEN]; + char hSyncString[NV_MAX_FREQUENCY_STRING_LEN]; + + RangesToString(&pValidSyncs->horizSyncHz, rangeString); + FrequencyToString(hSync, hSyncString); + + /* + * If this mode came from the EDID and the valid + * HorizSync ranges (which excluded this timing) also + * came from the EDID, then something is odd (see bug + * 336963); print a warning, but continue. + */ + + if ((flags->source == NvKmsModeSourceEdid) && + (pValidSyncs->horizSyncHz.source == + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_EDID) && + ((overrides & + NVKMS_MODE_VALIDATION_OBEY_EDID_CONTRADICTIONS) == 0)) { + + nvEvoLogInfoString(pInfoString, + "The EDID for %s contradicts itself: mode \"%s\" is specified in the EDID; " + "however, the EDID's valid HorizSync range (%s kHz) would exclude this mode's HorizSync (%s kHz); " + "ignoring HorizSync check for mode \"%s\".", + pDpyEvo->name, modeName, + rangeString, hSyncString, modeName); + } else { + + LogModeValidationEnd(pDispEvo, pInfoString, + "HorizSync (%s kHz) out of range (%s kHz)", hSyncString, rangeString); + return FALSE; + } + } + } + } + + if ((overrides & NVKMS_MODE_VALIDATION_NO_VERT_REFRESH_CHECK) == 0) { + + if (pValidSyncs->vertRefreshHz1k.numRanges > 0) { + + /* + * note: we expect RRx1k to be field rate for interlaced + * modes, (undoubled) frame rate for doublescan modes, and + * (doubled) frame rate for HDMI 3D modes. + */ + NvU32 vRefresh = pModeTimings->RRx1k; + + for (i = 0; i < pValidSyncs->vertRefreshHz1k.numRanges; i++) { + NvU32 low = pValidSyncs->vertRefreshHz1k.range[i].low; + NvU32 high = pValidSyncs->vertRefreshHz1k.range[i].high; + + if ((vRefresh > Percentage(low, 99)) && + (vRefresh < Percentage(high, 101))) { + break; + } + } + + /* + * Now see whether we ran out of refresh ranges without + * finding a match + */ + + if (i == pValidSyncs->vertRefreshHz1k.numRanges) { + + char rangeString[NV_MAX_RANGE_STRING_LEN]; + char vRefreshString[NV_MAX_FREQUENCY_STRING_LEN]; + + if (pModeTimings->hdmi3D) { + vRefresh /= 2; + } + + RangesToString(&pValidSyncs->vertRefreshHz1k, + rangeString); + FrequencyToString(vRefresh, vRefreshString); + + /* + * If this mode came from the EDID and the valid + * VertRefresh ranges (which excluded this timing) + * also came from the EDID, then something is odd (see + * bug 336963); print a warning, but continue. + */ + + if ((flags->source == NvKmsModeSourceEdid) && + (pValidSyncs->vertRefreshHz1k.source == + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_EDID) && + ((overrides & + NVKMS_MODE_VALIDATION_OBEY_EDID_CONTRADICTIONS) == 0)) { + + nvEvoLogInfoString(pInfoString, + "The EDID for %s contradicts itself: mode \"%s\" is specified in the EDID; " + "however, the EDID's valid VertRefresh range (%s Hz) would exclude this mode's VertRefresh (%s Hz%s); " + "ignoring VertRefresh check for mode \"%s\".", + pDpyEvo->name, modeName, + rangeString, vRefreshString, + pModeTimings->hdmi3D ? ", doubled for HDMI 3D" : "", + modeName); + } else { + + LogModeValidationEnd(pDispEvo, pInfoString, + "VertRefresh (%s Hz%s) out of range (%s Hz)", vRefreshString, + pModeTimings->hdmi3D ? ", doubled for HDMI 3D" : "", + rangeString); + return FALSE; + } + } + } + } + + /* + * If 3D Vision Stereo is enabled, and the pDpy requires patched + * stereo modetimings, and these modetimings are not patched, then + * reject the mode, unless the mode validation override "AllowNon3DVModes" + * has been set. + */ + + if ((overrides & NVKMS_MODE_VALIDATION_ALLOW_NON_3DVISION_MODES) == 0) { + if (is3DVisionStereo && + pDpyEvo->stereo3DVision.requiresModetimingPatching && + !flags->patchedStereoTimings) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Mode not compatible with 3D Vision Stereo"); + return FALSE; + } + } + + /* + * If HDMI 3D is enabled and supported, reject non-HDMI 3D modes unless the + * mode validation override "AllowNonHDMI3DModes" has been set. + */ + if (((overrides & NVKMS_MODE_VALIDATION_ALLOW_NON_HDMI3D_MODES) == 0) && + (pParams->stereoMode == NVKMS_STEREO_HDMI_3D) && + nvDpyEvoSupportsHdmi3D(pDpyEvo) && + !pModeTimings->hdmi3D) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Mode not compatible with HDMI 3D"); + return FALSE; + } + + if (pModeTimings->hdmi3D && pModeTimings->interlaced) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Interlaced frame packed HDMI 3D modes are not supported."); + return FALSE; + } + + if (pModeTimings->interlaced && + nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo) && + (overrides & NVKMS_MODE_VALIDATION_ALLOW_DP_INTERLACED) == 0) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Interlaced modes are not supported over DisplayPort"); + return FALSE; + } + + if (pModeTimings->interlaced && + (overrides & NVKMS_MODE_VALIDATION_NO_INTERLACED_MODES)) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Interlaced modes are not allowed"); + return FALSE; + } + + if (pModeTimings->interlaced && + pParams->stereoMode != NVKMS_STEREO_DISABLED) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Interlaced modes are not allowed with stereo"); + return FALSE; + } + + if (flags->dscPassThrough && + (pParams->dscMode == NVKMS_DSC_MODE_FORCE_DISABLE)) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Mode is only supported with DSC pass-through, but DSC is force disabled"); + return FALSE; + } + + return TRUE; +} + +/* + * Log to the InfoString with information about this + * particular ViewPort. + */ + +static +void LogViewPort(NVEvoInfoStringPtr pInfoString, + const NVHwModeTimingsEvo timings[NVKMS_MAX_HEADS_PER_DISP], + const NvU32 numHeads) +{ + NvU32 head; + char str[64] = { }, *s = NULL; + + nvAssert(numHeads <= 2); + + nvEvoLogInfoString(pInfoString, + "DualHead Mode: %s", (numHeads > 1) ? "Yes" : "No"); + + /* print the viewport name, size, and taps */ + nvkms_memset(str, 0, sizeof(str)); + for (head = 0, s = str; head < numHeads; head++) { + const struct NvKmsRect viewPortOut = + nvEvoViewPortOutClientView(&timings[head]); + size_t n = str + sizeof(str) - s; + s += nvkms_snprintf(s, n, "%s%dx%d+%d+%d", (s != str) ? ", " : "", + viewPortOut.width, viewPortOut.height, + viewPortOut.x, viewPortOut.x); + } + nvEvoLogInfoString(pInfoString, + "Viewport %s", str); + + nvkms_memset(str, 0, sizeof(str)); + for (head = 0, s = str; head < numHeads; head++) { + const NVHwModeViewPortEvo *pViewPort = &timings[head].viewPort; + size_t n = str + sizeof(str) - s; + s += nvkms_snprintf(s, n, "%s%d", (s != str) ? ", " : "", + NVEvoScalerTapsToNum(pViewPort->hTaps)); + } + nvEvoLogInfoString(pInfoString, + " Horizontal Taps %s", str); + + nvkms_memset(str, 0, sizeof(str)); + for (head = 0, s = str; head < numHeads; head++) { + const NVHwModeViewPortEvo *pViewPort = &timings[head].viewPort; + size_t n = str + sizeof(str) - s; + s += nvkms_snprintf(s, n, "%s%d", (s != str) ? ", " : "", + NVEvoScalerTapsToNum(pViewPort->vTaps)); + } + nvEvoLogInfoString(pInfoString, + " Vertical Taps %s", str); +} + +/* + * Validate pModeTimings for use on pDpy. If the mode is valid, use + * pDev->disp.ConstructHwModeTimings() to assign pHwModeTimings and + * return TRUE. + */ +static NvBool ValidateMode(NVDpyEvoPtr pDpyEvo, + const struct NvKmsMode *pKmsMode, + const EvoValidateModeFlags *flags, + const struct NvKmsModeValidationParams *pParams, + NVEvoInfoStringPtr pInfoString, + struct NvKmsModeValidationValidSyncs *pValidSyncs, + struct NvKmsUsageBounds *pModeUsage) +{ + const char *modeName = pKmsMode->name; + const NvModeTimings *pModeTimings = &pKmsMode->timings; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvBool b2Heads1Or = FALSE; + char localModeName[NV_MAX_MODE_NAME_LEN]; + + NVHwModeTimingsEvo *pTimingsEvo = + nvPreallocGet(pDevEvo, + PREALLOC_TYPE_VALIDATE_MODE_HW_MODE_TIMINGS, + sizeof(*pTimingsEvo)); + HDMI_FRL_CONFIG *pHdmiFrlConfig = + nvPreallocGet(pDevEvo, + PREALLOC_TYPE_VALIDATE_MODE_HDMI_FRL_CONFIG, + sizeof(*pHdmiFrlConfig)); + NVDscInfoEvoRec *pDscInfo = + nvPreallocGet(pDevEvo, + PREALLOC_TYPE_VALIDATE_MODE_DSC_INFO, + sizeof(*pDscInfo)); + NVHwModeTimingsEvo *impOutTimings = + nvPreallocGet(pDevEvo, + PREALLOC_TYPE_VALIDATE_MODE_IMP_OUT_HW_MODE_TIMINGS, + sizeof(*impOutTimings) * + NVKMS_MAX_HEADS_PER_DISP); + NvU32 impOutNumHeads = 0x0; + NvU32 head; + NvBool ret = FALSE; + + const NvKmsDpyOutputColorFormatInfo supportedColorFormats = + nvDpyGetOutputColorFormatInfo(pDpyEvo); + NVDpyAttributeColor dpyColor; + + if (modeName[0] == '\0') { + nvBuildModeName(pModeTimings->hVisible, pModeTimings->vVisible, + localModeName, sizeof(localModeName)); + modeName = localModeName; + } + + /* Initialize the EVO hwModeTimings structure */ + + nvkms_memset(pTimingsEvo, 0, sizeof(*pTimingsEvo)); + nvkms_memset(pHdmiFrlConfig, 0, sizeof(*pHdmiFrlConfig)); + nvkms_memset(pDscInfo, 0, sizeof(*pDscInfo)); + nvkms_memset(impOutTimings, 0, sizeof(*impOutTimings) * NVKMS_MAX_HEADS_PER_DISP); + + /* begin logging of ModeValidation for this mode */ + + LogModeValidationBegin(pInfoString, pModeTimings); + + if (!ValidateModeTimings(pDpyEvo, pKmsMode, flags, pParams, + pInfoString, pValidSyncs)) { + goto done; + } + + nvEvoLogInfoString(pInfoString, + "DSCPassThrough: %s", flags->dscPassThrough ? "Yes" : "No"); + + if (pTimingsEvo->yuv420Mode != NV_YUV420_MODE_NONE) { + dpyColor.format = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420; + dpyColor.bpc = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8; + dpyColor.range = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED; + dpyColor.colorimetry = NVKMS_OUTPUT_COLORIMETRY_DEFAULT; + } else if (!nvGetDefaultDpyColor(&supportedColorFormats, &dpyColor)) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Failed to get default color space and Bpc"); + goto done; + } + + /* + * we made it past the rest of mode validation; now construct the + * hw modetimings to use for this mode; we do this here so that we + * can report any failures as part of the mode validation + * reporting. + * + * XXX For certain modes like doublescan, interlaced, and YUV 4:2:0 + * emulated mode, the timings stored in the pTimingsEvo constructed + * here are different than the timings in pModeTimings used for validation + * earlier in this function. + * + * In certain cases (like pclk validation for YUV 4:2:0 modes, which store + * a doubled pclk in pModeTimings and the real pclk in pTimingsEvo) we + * want to use the pTimingsEvo value for validation in this function. + * It may make sense to restructure this function so pTimingsEvo + * construction happens earlier, then the pTimingsEvo values are used + * for the remaining validation. + */ + + if (!nvConstructHwModeTimingsEvo(pDpyEvo, + pKmsMode, + NULL, /* pViewPortSizeIn */ + NULL, /* pViewPortOut */ + flags->dscPassThrough, + &dpyColor, + pTimingsEvo, + pParams, + pInfoString)) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Unable to construct hardware-specific mode " + "timings"); + goto done; + } + + b2Heads1Or = nvEvoUse2Heads1OR(pDpyEvo, pTimingsEvo, pParams); + + if (nvDpyIsHdmiEvo(pDpyEvo)) { + if (!nvHdmiFrlQueryConfig(pDpyEvo, + &pKmsMode->timings, + pTimingsEvo, + &dpyColor, + b2Heads1Or, + pParams, + pHdmiFrlConfig, + pDscInfo)) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Unable to determine HDMI 2.1 Fixed Rate Link configuration."); + goto done; + } + } else { + if (!nvDPValidateModeEvo(pDpyEvo, pTimingsEvo, &dpyColor, b2Heads1Or, + pDscInfo, pParams)) { + LogModeValidationEnd(pDispEvo, + pInfoString, "DP Bandwidth check failed"); + goto done; + } + } + + /* + * Check ViewPortIn dimensions and ensure valid h/vTaps can be assigned. + */ + if (!nvValidateHwModeTimingsViewPort(pDevEvo, + /* XXX assume the gpus have equal capabilities */ + &pDevEvo->gpus[0].capabilities.head[0].scalerCaps, + pTimingsEvo, pInfoString)) { + goto done; + } + + + /* Run the raster timings through IMP checking. */ + if (!nvConstructHwModeTimingsImpCheckEvo(pDpyEvo->pConnectorEvo, + pTimingsEvo, + pDscInfo, + b2Heads1Or, + &dpyColor, + pParams, + impOutTimings, + &impOutNumHeads, + pInfoString)) { + LogModeValidationEnd(pDispEvo, pInfoString, + "GPU extended capability check failed"); + goto done; + } + + nvAssert(impOutNumHeads > 0); + + /* Log modevalidation information about the viewport. */ + + LogViewPort(pInfoString, impOutTimings, impOutNumHeads); + + /* + * Copy out the usage bounds that passed validation; note we intersect + * the usage bounds across the hardware heads that would be used with + * this apiHead, accumulating the results in pModeUsage. + */ + for (head = 0; head < impOutNumHeads; head++) { + if (head == 0) { + *pModeUsage = impOutTimings[0].viewPort.possibleUsage; + } else { + struct NvKmsUsageBounds *pTmpUsageBounds = + nvPreallocGet(pDevEvo, + PREALLOC_TYPE_VALIDATE_MODE_TMP_USAGE_BOUNDS, + sizeof(*pTmpUsageBounds)); + + nvIntersectUsageBounds(pModeUsage, + &impOutTimings[head].viewPort.possibleUsage, + pTmpUsageBounds); + *pModeUsage = *pTmpUsageBounds; + + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_VALIDATE_MODE_TMP_USAGE_BOUNDS); + } + } + + /* Whew, if we got this far, the mode is valid. */ + + LogModeValidationEnd(pDispEvo, pInfoString, NULL); + + ret = TRUE; + +done: + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_VALIDATE_MODE_HW_MODE_TIMINGS); + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_VALIDATE_MODE_HDMI_FRL_CONFIG); + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_VALIDATE_MODE_DSC_INFO); + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_VALIDATE_MODE_IMP_OUT_HW_MODE_TIMINGS); + + return ret; +} + + +/*! + * Return whether the given NVT_TIMING and NvModeTimings match. + */ +static NvBool NVT_TIMINGmatchesNvModeTimings +( + const NVT_TIMING *pTiming, + const NvModeTimings *pModeTimings, + const struct NvKmsModeValidationParams *pParams +) +{ + NvModeTimings tmpModeTimings; + + NVT_TIMINGtoNvModeTimings(pTiming, &tmpModeTimings); + + return NvModeTimingsMatch(&tmpModeTimings, pModeTimings, + TRUE /* ignoreSizeMM */, + ((pParams->overrides & + NVKMS_MODE_VALIDATION_NO_RRX1K_CHECK) != 0x0) + /* ignoreRRx1k */); +} + + +/*! + * Find the NVT_TIMING from the dpy's EDID that matches the pModeTimings. + */ +const NVT_TIMING *nvFindEdidNVT_TIMING +( + const NVDpyEvoRec *pDpyEvo, + const NvModeTimings *pModeTimings, + const struct NvKmsModeValidationParams *pParams +) +{ + NvModeTimings tmpModeTimings; + int i; + + if (!pDpyEvo->parsedEdid.valid) { + return NULL; + } + + tmpModeTimings = *pModeTimings; + + /* + * Revert any modeTimings modifications that were done for hdmi3D + * in ValidateModeIndexEdid(), so that the modeTimings can be + * compared with the NVT_TIMINGs in the parsed EDID. + */ + nvKmsUpdateNvModeTimingsForHdmi3D(&tmpModeTimings, FALSE); + + /* + * The NVT_TIMINGs we compare against below won't have hdmi3D or + * yuv420 set; clear those flags in tmpModeTimings so that we can + * do a more meaningful comparison. + */ + tmpModeTimings.yuv420Mode = NV_YUV420_MODE_NONE; + + for (i = 0; i < pDpyEvo->parsedEdid.info.total_timings; i++) { + const NVT_TIMING *pTiming = &pDpyEvo->parsedEdid.info.timing[i]; + if (NVT_TIMINGmatchesNvModeTimings(pTiming, &tmpModeTimings, pParams) && + /* + * Only consider the mode a match if the yuv420 + * configuration of pTiming would match pModeTimings. + */ + (pModeTimings->yuv420Mode == + GetYUV420Value(pDpyEvo, pParams, pTiming))) { + return pTiming; + } + } + + return NULL; +} + +/*! + * Construct mode-timing's meta data required for mode validation + * logic. This meta data involves EvoValidateModeFlags, patched stereo + * vision timings, etc. + * + * \param[in] pDpyEvo The dpy for whom the mode is considered. + * \param[in] pParams The NvKmsModeValidationParams. + * \param[in/out] pKmsMode The NVKMS mode to be considered. + * \param[out] pFlags The EvoValidateModeFlags + * \param[out] pInfoFrameCtrl InfoFrame control + * + * \return Return TRUE on success with patched mode timings, + * EvoValidateModeFlags and infoFrame controls etc.; otherwise + * returns FALSE. + */ +static NvBool ConstructModeTimingsMetaData( + NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + struct NvKmsMode *pKmsMode, + EvoValidateModeFlags *pFlags, + NVDispHeadInfoFrameStateEvoRec *pInfoFrameState) +{ + const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + EvoValidateModeFlags flags = { 0 }; + NVT_VIDEO_INFOFRAME_CTRL *pVideoInfoFrameCtrl = NULL; + NVT_VENDOR_SPECIFIC_INFOFRAME_CTRL *pVendorInfoFrameCtrl = NULL; + NvModeTimings modeTimings = pKmsMode->timings; + const NVT_TIMING *pTiming; + + if (pInfoFrameState != NULL) { + pVideoInfoFrameCtrl = &pInfoFrameState->videoCtrl; + pVendorInfoFrameCtrl = &pInfoFrameState->vendorCtrl; + + nvkms_memset(pVideoInfoFrameCtrl, NVT_INFOFRAME_CTRL_DONTCARE, + sizeof(*pVideoInfoFrameCtrl)); + + nvkms_memset(pVendorInfoFrameCtrl, NVT_INFOFRAME_CTRL_DONTCARE, + sizeof(*pVendorInfoFrameCtrl)); + } + + flags.source = NvKmsModeSourceUnknown; + + /* Is this an EDID mode? */ + pTiming = nvFindEdidNVT_TIMING(pDpyEvo, &modeTimings, pParams); + + if (pTiming != NULL) { + NVT_TIMING timing = *pTiming; + const NvBool is3DVisionStereo = + nvIs3DVisionStereoEvo(pParams->stereoMode); + + flags.source = NvKmsModeSourceEdid; + + /* Patch the mode for 3DVision. */ + if (is3DVisionStereo && + pDpyEvo->stereo3DVision.requiresModetimingPatching && + nvPatch3DVisionModeTimingsEvo(&timing, pDpyEvo, + &dummyInfoString)) { + flags.patchedStereoTimings = TRUE; + + /* + * Replace the client's modeTimings with the version + * patched for 3DVision stereo. + */ + NVT_TIMINGtoNvModeTimings(&timing, &modeTimings); + + /* Restore the yuv420 and hdmi3D flags from the client's mode. */ + modeTimings.yuv420Mode = pKmsMode->timings.yuv420Mode; + + /* Re-apply adjustments for hdmi3D. */ + nvKmsUpdateNvModeTimingsForHdmi3D(&modeTimings, pKmsMode->timings.hdmi3D); + + } + + /* Validate yuv420. */ + if (modeTimings.yuv420Mode != + GetYUV420Value(pDpyEvo, pParams, &timing)) { + return FALSE; + } + + /* Validate hdmi3D. */ + NvBool hdmi3D = FALSE; + NvBool hdmi3DAvailable = FALSE; + GetHdmi3DValue(pDpyEvo, pParams, &timing, &hdmi3D, &hdmi3DAvailable); + if ((modeTimings.hdmi3D != hdmi3D) && !hdmi3DAvailable) { + return FALSE; + } + + if (!!(timing.etc.flag & NVT_FLAG_DISPLAYID_T7_DSC_PASSTHRU)) { + flags.dscPassThrough = TRUE; + } + + if (pParams->stereoMode == NVKMS_STEREO_HDMI_3D) { + if (!nvDpyEvoSupportsHdmi3D(pDpyEvo)) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "HDMI 3D mode is selected, but " + "HDMI 3D is not supported by %s; HDMI 3D may not function " + "properly. This might happen if no EDID is available for " + "%s, if the display is not connected over HDMI, or if the " + "display does not support HDMI 3D.", pDpyEvo->name, + pDpyEvo->name); + } else if (!modeTimings.hdmi3D) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "HDMI 3D mode is selected, but the " + "currently selected mode is incompatible with HDMI 3D. " + "HDMI 3D will be disabled."); + } + } + + /* + * Compute the infoFrame control; this will be assigned to + * pTimingsEvo after ValidateMode has written to it. + */ + if (nvDpyIsHdmiEvo(pDpyEvo)) { + NvTiming_ConstructVideoInfoframeCtrl(&timing, pVideoInfoFrameCtrl); + + if (pVendorInfoFrameCtrl != NULL) { + // Currently hardcoded to send infoframe necessary for HDMI 1.4a 4kx2k extended modes. + if (NVT_GET_TIMING_STATUS_TYPE(timing.etc.status) == NVT_TYPE_HDMI_EXT) { + pVendorInfoFrameCtrl->Enable = 1; + pVendorInfoFrameCtrl->VSIFVersion = 14; + pVendorInfoFrameCtrl->HDMIFormat = NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_EXT; + pVendorInfoFrameCtrl->HDMI_VIC = NVT_GET_TIMING_STATUS_SEQ(timing.etc.status); + pVendorInfoFrameCtrl->ThreeDStruc = NVT_HDMI_VS_BYTE5_HDMI_3DS_NA; + pVendorInfoFrameCtrl->ThreeDDetail = NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_NA; + pVendorInfoFrameCtrl->MetadataPresent = 0; + pVendorInfoFrameCtrl->MetadataType = NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_NA; + } else { + pVendorInfoFrameCtrl->Enable = 1; + pVendorInfoFrameCtrl->VSIFVersion = 14; + pVendorInfoFrameCtrl->HDMIFormat = NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_NONE; + } + } + } + + goto done; + } + + /* Otherwise, is this a VESA mode? */ + + if (IsVesaMode(&modeTimings, pParams)) { + flags.source = NvKmsModeSourceVesa; + goto done; + } + + /* + * Otherwise, this must be a user-specified mode; no metadata changes + * are needed. + */ + +done: + *pFlags = flags; + pKmsMode->timings = modeTimings; + + return TRUE; +} + +/*! + * Validate the NvKmsMode. + * + * \param[in] pDpyEvo The dpy for whom the mode is considered. + * \param[in] pParams The NvKmsModeValidationParams. + * \param[in] pKmsMode The mode to be considered. + * \param[out] pTimingsEvo The EVO mode timings to be programmed in hardware. + * + * \return If the mode is valid, return TRUE and populate pTimingsEvo. + * If the mode is not valid, return FALSE. + */ +NvBool nvValidateModeForModeset(NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + const struct NvKmsMode *pKmsMode, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut, + NVDpyAttributeColor *pDpyColor, + NVHwModeTimingsEvo *pTimingsEvo, + NVDispHeadInfoFrameStateEvoRec *pInfoFrameState) +{ + EvoValidateModeFlags flags; + struct NvKmsMode kmsMode = *pKmsMode; + struct NvKmsModeValidationValidSyncs dummyValidSyncs; + + nvkms_memset(pTimingsEvo, 0, sizeof(*pTimingsEvo)); + + if (!ConstructModeTimingsMetaData(pDpyEvo, + pParams, + &kmsMode, + &flags, + pInfoFrameState)) { + return FALSE; + } + + if (!ValidateModeTimings(pDpyEvo, + pKmsMode, + &flags, + pParams, + &dummyInfoString, + &dummyValidSyncs)) { + return FALSE; + } + + if (!nvConstructHwModeTimingsEvo(pDpyEvo, + &kmsMode, + pViewPortSizeIn, + pViewPortOut, + flags.dscPassThrough, + pDpyColor, + pTimingsEvo, + pParams, + &dummyInfoString)) { + return FALSE; + } + + return TRUE; +} diff --git a/src/nvidia-modeset/src/nvkms-modeset.c b/src/nvidia-modeset/src/nvkms-modeset.c new file mode 100644 index 0000000..d208cb5 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-modeset.c @@ -0,0 +1,4292 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * The EVO modeset sequence is structured to minimize changes to the + * hardware from one modeset to the next, and to minimize the number + * of UPDATE methods that are programmed. + * + * Software state is tracked in three different structures: + * + * (1) NVDispEvo::headState - This is the NVKMS record of what has + * been programmed in the hardware, for all heads on the disp. + * + * (2) NvKmsSetModeRequest - This is the NVKMS client's description of + * what changes are requested. Note that clients can just request to + * change specific heads on specific disps. Other heads/disps should + * retain their current configuration across the modeset. + * + * (3) NVProposedModeSetHwState - This describes the hardware state + * that is desired at the end of the modeset. It is assigned by + * considering the current state (NVDispEvo::headState) and applying + * any client-requested changes (NvKmsSetModeRequest). + * + * The intended flow is: + * + * - Assign NVProposedModeSetHwState, given NVDispEvo::headState and + * NvKmsSetModeRequest, noting which heads are changing. + * - Check whether the proposed state is valid, and fail the modeset + * if anything about the proposed configuration is invalid. + * + * NOTE: Nothing before this point in the sequence should alter NVKMS + * software state, or program hardware. Also, to the extent + * possible, we should avoid failing the modeset after this point in + * the sequence, because this is when we start altering software and + * hardware state. + * + * - Notify RM that the modeset is starting. + * - Reset the EVO locking state machine. + * - For each disp: + * - For each head: + * - Shut down newly unused heads + * - For each head: + * - Apply the requested changes. + * - Send evo UPDATE method + * - For each head: + * - Perform post-UPDATE work + * - Update the EVO locking state machine. + * - Notify RM that the modeset is complete. + * - Populate the reply structure returned to the NVKMS client. + * + * + * TODO: + * - Would it be worthwhile to centralize SOR (re)assignment, disp-wide, + * in ApplyProposedModeSetHwStateOneDisp() between the calls to + * ApplyProposedModeSetStateOneApiHeadShutDown() and + * ApplyProposedModeSetHwStateOneHeadPreUpdate()? + */ + +#include "nvkms-evo.h" +#include "nvkms-types.h" +#include "nvkms-dpy.h" +#include "nvkms-rm.h" +#include "nvkms-hdmi.h" +#include "nvkms-hw-flip.h" +#include "nvkms-flip.h" +#include "nvkms-3dvision.h" +#include "nvkms-modepool.h" +#include "nvkms-prealloc.h" +#include "nvkms-private.h" +#include "nvkms-vrr.h" +#include "nvkms-lut.h" +#include "nvkms-dma.h" + +#include "dp/nvdp-connector.h" +#include "dp/nvdp-device.h" + +#include "nvkms-api.h" + +#include "nvkms-modeset.h" +#include "nvkms-modeset-types.h" +#include "nvkms-modeset-workarea.h" +#include "nvkms-attributes.h" +#include "nvkms-headsurface-config.h" + +static NvBool +GetColorSpaceAndColorRange( + const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + const struct NvKmsSetModeOneHeadRequest *pRequestHead, + NVDpyAttributeColor *pDpyColor); + +static void +ClearProposedModeSetHwState(const NVDevEvoRec *pDevEvo, + NVProposedModeSetHwState *pProposed, + const NvBool committed) +{ + const NVDispEvoRec *pDispEvo; + NvU32 dispIndex; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 head; + + if (!committed) { + /* Free new allocated RM display IDs for changed heads */ + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposed->disp[dispIndex].apiHead[apiHead]; + + if (!pProposedApiHead->changed || + (pProposedApiHead->activeRmId == 0x0)) { + continue; + } + nvRmFreeDisplayId(pDispEvo, pProposedApiHead->activeRmId); + } + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposed->disp[dispIndex].head[head]; + nvDPLibFreeModesetState(pProposedHead->pDpLibModesetState); + } + } + + nvkms_memset(pProposed, 0 , sizeof(*pProposed)); +} + +/* + * Inherit the previous modeset state as part of this modeset if: + * - The requesting client is not the internal NVKMS client (i.e., this is not + * a console restore modeset). + * - There is no modeset ownership or sub-ownership change since the last modeset. + */ +static NvBool +InheritPreviousModesetState(const NVDevEvoRec *pDevEvo, + const struct NvKmsPerOpenDev *pCurrentModesetOpenDev) +{ + return (pCurrentModesetOpenDev != pDevEvo->pNvKmsOpenDev) && + !pDevEvo->modesetOwnerOrSubOwnerChanged; +} + +/*! + * Get the NVHwModeTimingsEvo for the mode requested by the client. + * + * NvKmsSetModeOneHeadRequest::mode specifies mode timings in a + * hardware-neutral format, along with mode validation parameters and + * the dpyIdList on which to set the mode. Validate the requested + * mode and compute NVHwModeTimingsEvo for it. + * + * \param[in] pDispEvo The disp of the dpyIdList and head. + * \param[in] pRequestHead The mode, mode validation parameters, dpyIdList, + * and head requested by the client. + * \param[out] pTimings The mode timings to program in the hardware. + * + * \return Return TRUE if the requested mode is valid and pTimings + * could be assigned. Otherwise, return FALSE. + */ +NvBool +nvGetHwModeTimings(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + const struct NvKmsSetModeOneHeadRequest *pRequestHead, + NVHwModeTimingsEvo *pTimings, + NVDpyAttributeColor *pDpyColor, + NVDispHeadInfoFrameStateEvoRec *pInfoFrameState) +{ + NVDpyEvoPtr pDpyEvo; + NVDpyAttributeColor dpyColor = { }; + + if (nvDpyIdListIsEmpty(pRequestHead->dpyIdList)) { + return TRUE; + } + + pDpyEvo = nvGetOneArbitraryDpyEvo(pRequestHead->dpyIdList, pDispEvo); + + if (pDpyEvo == NULL) { + return FALSE; + } + + if (!GetColorSpaceAndColorRange(pDispEvo, apiHead, pRequestHead, + &dpyColor)) { + return FALSE; + } + + if (!nvValidateModeForModeset(pDpyEvo, + &pRequestHead->modeValidationParams, + &pRequestHead->mode, + &pRequestHead->viewPortSizeIn, + pRequestHead->viewPortOutSpecified ? + &pRequestHead->viewPortOut : NULL, + &dpyColor, + pTimings, + pInfoFrameState)) { + return FALSE; + } + + if (pDpyColor != NULL) { + *pDpyColor = dpyColor; + } + + return TRUE; +} + +static NvBool IsPreSyncptSpecified( + const NVDevEvoRec *pDevEvo, + const NvU32 head, + const struct NvKmsFlipCommonParams *pParams) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (pParams->layer[layer].syncObjects.specified && + pParams->layer[layer].syncObjects.val.useSyncpt && + pParams->layer[layer].syncObjects.val.u.syncpts.pre.type != + NVKMS_SYNCPT_TYPE_NONE) { + return TRUE; + } + } + + return FALSE; +} + +static NvBool +GetColorSpaceAndColorRange( + const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + const struct NvKmsSetModeOneHeadRequest *pRequestHead, + NVDpyAttributeColor *pDpyColor) +{ + enum NvKmsOutputColorimetry colorimetry; + enum NvKmsDpyAttributeColorRangeValue requestedColorRange; + enum NvKmsDpyAttributeColorBpcValue requestedColorBpc; + enum NvKmsDpyAttributeRequestedColorSpaceValue requestedColorSpace; + NVDpyEvoRec *pOneArbitraryDpyEvo = + nvGetOneArbitraryDpyEvo(pRequestHead->dpyIdList, pDispEvo); + + if (pRequestHead->colorSpaceSpecified) { + const NVDpyEvoRec *pDpyEvo; + + /* + * There could be multiple DPYs driven by this head. For each DPY, + * validate that the requested colorspace and color range is valid. + */ + FOR_ALL_EVO_DPYS(pDpyEvo, pRequestHead->dpyIdList, pDispEvo) { + if (!nvDpyValidateColorSpace(pDpyEvo, pRequestHead->colorSpace)) { + return FALSE; + } + } + + requestedColorSpace = pRequestHead->colorSpace; + } else { + requestedColorSpace = pOneArbitraryDpyEvo->requestedColorSpace; + } + + if (pRequestHead->colorRangeSpecified) { + requestedColorRange = pRequestHead->colorRange; + } else { + requestedColorRange = pOneArbitraryDpyEvo->requestedColorRange; + } + + if (pRequestHead->colorBpcSpecified) { + requestedColorBpc = pRequestHead->colorBpc; + } else { + requestedColorBpc = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN; + } + + if (pRequestHead->flip.colorimetry.specified) { + colorimetry = pRequestHead->flip.colorimetry.val; + } else { + colorimetry = + pDispEvo->apiHeadState[apiHead].attributes.color.colorimetry; + } + + /* + * Choose current colorSpace and colorRange based on the current mode + * timings and the requested color space and range. + */ + if (!nvChooseCurrentColorSpaceAndRangeEvo(pOneArbitraryDpyEvo, + pRequestHead->mode.timings.yuv420Mode, + colorimetry, + requestedColorSpace, + requestedColorBpc, + requestedColorRange, + &pDpyColor->format, + &pDpyColor->bpc, + &pDpyColor->range)) { + return FALSE; + } + pDpyColor->colorimetry = colorimetry; + + return TRUE; +} + +static NvBool AssignProposedModeSetColorSpaceAndColorRangeSpecified( + const struct NvKmsSetModeOneHeadRequest *pRequestHead, + NVProposedModeSetStateOneApiHead *pProposedApiHead) +{ + /* + * When colorspace is specified in modeset request, it should + * match the proposed colorspace. + */ + if (pRequestHead->colorSpaceSpecified) { + NvBool ret = FALSE; + switch (pProposedApiHead->attributes.color.format) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + ret = (pRequestHead->colorSpace == + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_RGB); + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + ret = (pRequestHead->colorSpace == + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr422); + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + ret = (pRequestHead->colorSpace == + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr444); + break; + default: + break; + } + if (!ret) { + return ret; + } + } + + /* + * When color bpc is specified in modeset request, it should + * match the proposed color bpc. + */ + if (pRequestHead->colorBpcSpecified && + (pProposedApiHead->attributes.color.bpc != pRequestHead->colorBpc)) { + return FALSE; + } + + /* + * When color range is specified in modeset request, it should + * match the proposed color range. + */ + if (pRequestHead->colorRangeSpecified && + (pProposedApiHead->attributes.color.range != pRequestHead->colorRange)) { + return FALSE; + } + + pProposedApiHead->colorSpaceSpecified = pRequestHead->colorSpaceSpecified; + pProposedApiHead->colorBpcSpecified = pRequestHead->colorBpcSpecified; + pProposedApiHead->colorRangeSpecified = pRequestHead->colorRangeSpecified; + return TRUE; +} + +/* count existing unchanged and new vrr heads */ +static NvU32 CountProposedVrrApiHeads(NVDevEvoPtr pDevEvo, + const struct NvKmsSetModeRequest *pRequest) +{ + NvU32 sd; + NVDispEvoPtr pDispEvo; + NvU32 numVRRApiHeads = 0; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 apiHead; + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + const struct NvKmsSetModeOneDispRequest *pRequestDisp = + &pRequest->disp[sd]; + const struct NvKmsSetModeOneHeadRequest *pRequestHead = + &pRequestDisp->head[apiHead]; + + if (((pRequest->requestedDispsBitMask & (1 << sd)) == 0) || + ((pRequestDisp->requestedHeadsBitMask & (1 << apiHead)) == 0)) { + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + + if (pApiHeadState->timings.vrr.type != NVKMS_DPY_VRR_TYPE_NONE) { + numVRRApiHeads++; + } + } else { + const NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pRequestHead->dpyIdList, pDispEvo); + + if (pDpyEvo == NULL) { + continue; + } + + if (nvGetAllowedDpyVrrType(pDpyEvo, + &pRequestHead->mode.timings, + pRequestHead->modeValidationParams.stereoMode, + pRequestHead->allowGsync, + pRequestHead->allowAdaptiveSync) != + NVKMS_DPY_VRR_TYPE_NONE) { + numVRRApiHeads++; + } + } + } + } + + return numVRRApiHeads; +} + +static void AdjustHwModeTimingsForVrr(const NVDispEvoRec *pDispEvo, + const struct NvKmsSetModeOneHeadRequest *pRequestHead, + const NvU32 prohibitAdaptiveSync, + NVHwModeTimingsEvo *pTimings) +{ + NVDpyEvoPtr pDpyEvo = + nvGetOneArbitraryDpyEvo(pRequestHead->dpyIdList, pDispEvo); + if (pDpyEvo == NULL) { + return; + } + + const NvBool allowGsync = pRequestHead->allowGsync; + const enum NvKmsAllowAdaptiveSync allowAdaptiveSync = + prohibitAdaptiveSync ? NVKMS_ALLOW_ADAPTIVE_SYNC_DISABLED : + pRequestHead->allowAdaptiveSync; + const NvU32 vrrOverrideMinRefreshRate = pRequestHead->vrrOverrideMinRefreshRate; + const enum NvKmsDpyVRRType vrrType = + nvGetAllowedDpyVrrType(pDpyEvo, + &pRequestHead->mode.timings, + pRequestHead->modeValidationParams.stereoMode, + allowGsync, + allowAdaptiveSync); + + nvAdjustHwModeTimingsForVrrEvo(pDpyEvo, + vrrType, + vrrOverrideMinRefreshRate, + pTimings); +} + +/* + * Return whether headSurface is allowed. But, only honor the requestor's + * setting if they have modeset owner permission. Otherwise, inherit the cached + * value in pDevEvo. + */ +NvBool nvGetAllowHeadSurfaceInNvKms(const NVDevEvoRec *pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsSetModeRequest *pRequest) +{ + if (nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { + return pRequest->allowHeadSurfaceInNvKms; + } + + return pDevEvo->allowHeadSurfaceInNvKms; +} + +static void +InitNVProposedModeSetStateOneApiHead( + const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + NVProposedModeSetStateOneApiHead *pProposedApiHead) +{ +#if defined(DEBUG) + const NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pDispEvo->apiHeadState[apiHead].activeDpys, + pDispEvo); +#endif + + if (!nvApiHeadIsActive(pDispEvo, apiHead)) { + return; + } + + nvAssert(pDispEvo->apiHeadState[apiHead].hwHeadsMask != 0x0); + nvAssert(pDpyEvo != NULL); + + pProposedApiHead->hwHeadsMask = + pDispEvo->apiHeadState[apiHead].hwHeadsMask; + pProposedApiHead->timings = + pDispEvo->apiHeadState[apiHead].timings; + pProposedApiHead->dpyIdList = + pDispEvo->apiHeadState[apiHead].activeDpys; + pProposedApiHead->attributes = + pDispEvo->apiHeadState[apiHead].attributes; + pProposedApiHead->changed = FALSE; + pProposedApiHead->hs10bpcHint = + pDispEvo->apiHeadState[apiHead].hs10bpcHint; + pProposedApiHead->infoFrame = + pDispEvo->apiHeadState[apiHead].infoFrame; + pProposedApiHead->tf = pDispEvo->apiHeadState[apiHead].tf; + pProposedApiHead->hdrInfoFrameOverride = + pDispEvo->apiHeadState[apiHead].hdrInfoFrameOverride; + pProposedApiHead->hdrStaticMetadataLayerMask = + pDispEvo->apiHeadState[apiHead].hdrStaticMetadataLayerMask; + pProposedApiHead->viewPortPointIn = + pDispEvo->apiHeadState[apiHead].viewPortPointIn; + + NvU32 head; + NvU32 hwHeadCount = 0; + FOR_EACH_EVO_HW_HEAD_IN_MASK(pProposedApiHead->hwHeadsMask, + head) { + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + + nvAssert(pDpyEvo->pConnectorEvo == + pHeadState->pConnectorEvo); + + if (hwHeadCount == 0) { + pProposedApiHead->dscInfo = pHeadState->dscInfo; + pProposedApiHead->activeRmId = pHeadState->activeRmId; + pProposedApiHead->modeValidationParams = + pHeadState->modeValidationParams; + } else { + nvAssert(nvkms_memcmp(&pProposedApiHead->dscInfo, + &pHeadState->dscInfo, + sizeof(pProposedApiHead->dscInfo)) == 0x0); + nvAssert(pProposedApiHead->activeRmId == + pHeadState->activeRmId); + nvAssert(nvkms_memcmp(&pProposedApiHead->modeValidationParams, + &pHeadState->modeValidationParams, + sizeof(pProposedApiHead->modeValidationParams)) == 0x0); + } + } +} + +static void +InitProposedModeSetHwState(const NVDevEvoRec *pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + NVProposedModeSetHwState *pProposed) +{ + NvU32 sd; + NVDispEvoPtr pDispEvo; + + nvkms_memset(pProposed, 0, sizeof(*pProposed)); + + /* + * If the previous modeset can not be inherited then initialize the + * proposed modeset state to shutdown all heads. + */ + if (!InheritPreviousModesetState(pDevEvo, pOpenDev)) { + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + for (NvU32 head = 0; head < pDevEvo->numHeads; head++) { + NvU32 layer; + NVFlipEvoHwState *pFlip = &pProposed->sd[sd].head[head].flip; + pFlip->dirty.tf = TRUE; + pFlip->dirty.hdrStaticMetadata = TRUE; + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + pFlip->dirty.layer[layer] = TRUE; + } + } + + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + pProposed->disp[sd].apiHead[apiHead].changed = TRUE; + } + } + } else { + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposed->disp[sd].apiHead[apiHead];; + + InitNVProposedModeSetStateOneApiHead(pDispEvo, apiHead, + pProposedApiHead); + + NvU32 head; + FOR_EACH_EVO_HW_HEAD_IN_MASK(pProposedApiHead->hwHeadsMask, + head) { + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposed->disp[sd].head[head]; + + nvInitFlipEvoHwState(pDevEvo, sd, head, + &pProposed->sd[sd].head[head].flip); + + pProposedHead->mergeHeadSection = + pHeadState->mergeHeadSection; + pProposedHead->timings = pHeadState->timings; + pProposedHead->pConnectorEvo = pHeadState->pConnectorEvo; + pProposedHead->hdmiFrlConfig = pHeadState->hdmiFrlConfig; + pProposedHead->audio = pHeadState->audio; + } + } + } + } +} + +static NvBool +AssignProposedModeSetNVFlipEvoHwState( + NVDevEvoRec *pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const NvU32 sd, + const NvU32 head, + const struct NvKmsSetModeOneHeadRequest *pRequestHead, + NVFlipEvoHwState *pFlip, + NVProposedModeSetHwStateOneHead *pProposedHead, + const NvBool commit) +{ + /* + * Clear the flipStates of all layers: + * + * The current flipState of main layer may still contain + * old surfaces (e.g., headSurface) that are no longer + * desirable or compatible with the new modeset + * configuration. + * + * Function ApplyProposedModeSetHwStateOneHeadShutDown() clears + * pSdHeadState and disables all layers. It is not possible to + * re-apply the existing flipstates because hardware releases + * sempahores when layers get disabled; this results in a stuck + * channel if you re-apply the existing flipstate which has + * the old semaphore values. + */ + + nvClearFlipEvoHwState(pFlip); + + if (commit) { + NvU32 layer; + + pFlip->dirty.tf = TRUE; + pFlip->dirty.hdrStaticMetadata = TRUE; + pFlip->dirty.olut = TRUE; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + pFlip->dirty.layer[layer] = TRUE; + } + } + + /*! + * Modeset path should not request pre-syncpt as it will + * not progress because this will update all of the Core and + * Window method state together, and wait for the Core + * completion notifier to signal. If any of the Window + * channels is waiting for a semaphore acquire, then this + * will stall the Core notifier as well since the Core and + * Window channels are interlocked. + */ + if (pDevEvo->supportsSyncpts && + IsPreSyncptSpecified(pDevEvo, head, &pRequestHead->flip)) { + return FALSE; + } + + if (!nvUpdateFlipEvoHwState(pOpenDev, + pDevEvo, + sd, + head, + &pRequestHead->flip, + &pProposedHead->timings, + pProposedHead->mergeHeadSection, + pFlip, + FALSE /* allowVrr */)) { + return FALSE; + } + + /* + * EVO3 hal simulates USE_CORE_LUT behavior. + * NVDisplay window channel does allow to change the input LUT + * on immediate flips, therefore force disable tearing + * if LUT is specified. + * + * XXX NVKMS TODO: Implement separate input programming for + * base and overlay layers and remove code block. + */ + if ((pRequestHead->flip.lut.input.specified || + pRequestHead->flip.lut.output.specified) && + !pDevEvo->hal->caps.supportsCoreLut) { + pFlip->layer[NVKMS_MAIN_LAYER].tearing = FALSE; + } + + return TRUE; +} + +static +NvBool AssignProposedHwHeadsForDsiConnector( + const NVDispEvoRec *pDispEvo, + NVProposedModeSetHwStateOneDisp *pProposedDisp, + NvU32 *pFreeHwHeadsMask) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvU32 freeHwHeadsMask = *pFreeHwHeadsMask; + NvU32 ret = TRUE; + + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposedDisp->apiHead[apiHead]; + const NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pProposedApiHead->dpyIdList, pDispEvo); + + if (!pProposedApiHead->changed || + nvDpyIdListIsEmpty(pProposedApiHead->dpyIdList) || + (pDpyEvo->pConnectorEvo->signalFormat != + NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI)) { + continue; + } + + if (pProposedApiHead->hwHeadsMask == 0x0) { + /* DSI supports only HEAD0 assignment */ + if ((freeHwHeadsMask & NVBIT(0)) != 0x0) { + nvAssert(pDevEvo->apiHead[apiHead].numLayers <= + pDevEvo->head[0].numLayers); + nvAssignHwHeadsMaskProposedApiHead(pProposedApiHead, 0x1); + freeHwHeadsMask &= ~pProposedApiHead->hwHeadsMask; + } else { + ret = FALSE; + } + } else { + nvAssert(pProposedApiHead->hwHeadsMask == 0x1); + } + + /* There can be only one DSI dpy */ + break; + } + + *pFreeHwHeadsMask &= freeHwHeadsMask; + return ret; +} + +static NvBool HeadIsFree(const NVDevEvoRec *pDevEvo, + const NvU32 apiHead, + const NvU32 freeHwHeadsMask, + const NvU32 head) +{ + if ((NVBIT(head) & freeHwHeadsMask) == 0x0) { + return FALSE; + } + + if (pDevEvo->apiHead[apiHead].numLayers > + pDevEvo->head[head].numLayers) { + return FALSE; + } + + return TRUE; +} + +static NvU32 GetFreeHeads(const NVDevEvoRec *pDevEvo, + const NvU32 apiHead, + const NVDpyEvoRec *pDpyEvo, + const NvU32 freeHwHeadsMask) +{ + NvU32 foundHead = NV_INVALID_HEAD; + + for (NvS32 head = (pDevEvo->numHeads - 1); head >= 0; head--) { + if (HeadIsFree(pDevEvo, apiHead, freeHwHeadsMask, head)) { + if ((foundHead == NV_INVALID_HEAD) || + (pDevEvo->head[head].numLayers < + pDevEvo->head[foundHead].numLayers)) { + foundHead = head; + } + } + } + + return foundHead; +} + +static +NvBool AssignProposedHwHeadsForDPSerializer( + const NVDispEvoRec *pDispEvo, + NVProposedModeSetHwStateOneDisp *pProposedDisp, + NvU32 *pFreeHwHeadsMask) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvU32 freeHwHeadsMask = *pFreeHwHeadsMask; + NvU32 ret = TRUE; + NvU32 boundHead; + + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposedDisp->apiHead[apiHead]; + const NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pProposedApiHead->dpyIdList, pDispEvo); + + if (!pProposedApiHead->changed || + nvDpyIdListIsEmpty(pProposedApiHead->dpyIdList) || + !nvConnectorIsDPSerializer(pDpyEvo->pConnectorEvo)) { + continue; + } + + if (pDpyEvo->pConnectorEvo->dpSerializerCaps.supportsMST) { + /* MST case: Use earlier assigned stream index */ + boundHead = pDpyEvo->dp.serializerStreamIndex; + } else { + /* SST case: find free heads if there are multiple DP Serializer in SST mode */ + boundHead = GetFreeHeads(pDevEvo, apiHead, pDpyEvo, + freeHwHeadsMask); + } + + if (pProposedApiHead->hwHeadsMask == 0x0) { + if ((freeHwHeadsMask & NVBIT(boundHead)) != 0x0) { + nvAssert(pDevEvo->apiHead[apiHead].numLayers <= + pDevEvo->head[boundHead].numLayers); + nvAssignHwHeadsMaskProposedApiHead( + pProposedApiHead, NVBIT(boundHead)); + freeHwHeadsMask &= ~pProposedApiHead->hwHeadsMask; + } else { + ret = FALSE; + break; + } + } else { + nvAssert(pProposedApiHead->hwHeadsMask == + NVBIT(boundHead)); + } + } + + *pFreeHwHeadsMask &= freeHwHeadsMask; + return ret; +} + +static NvU32 GetFree2Heads1ORHeadsMask(const NVDevEvoRec *pDevEvo, + const NvU32 apiHead, + const NvU32 freeHwHeadsMask) +{ + /* + * Hardware allows to setup 2Heads1OR mode + * between head-0 and head-1, or head-2 and head-3 only. + */ + + if (HeadIsFree(pDevEvo, apiHead, freeHwHeadsMask, 0) && + HeadIsFree(pDevEvo, apiHead, freeHwHeadsMask, 1)) { + return NVBIT(0) | NVBIT(1); + } + + if (HeadIsFree(pDevEvo, apiHead, freeHwHeadsMask, 2) && + HeadIsFree(pDevEvo, apiHead, freeHwHeadsMask, 3)) { + return NVBIT(2) | NVBIT(3); + } + + return 0; +} + +static NvBool AssignProposedHwHeadsGeneric( + const NVDispEvoRec *pDispEvo, + NVProposedModeSetHwStateOneDisp *pProposedDisp, + NvU32 *pFreeHwHeadsMask, + const NvBool b2Heads1Or) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvU32 freeHwHeadsMask = *pFreeHwHeadsMask; + NvU32 ret = TRUE; + + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposedDisp->apiHead[apiHead]; + const NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pProposedApiHead->dpyIdList, pDispEvo); + + if (!pProposedApiHead->changed || + nvDpyIdListIsEmpty(pProposedApiHead->dpyIdList) || + (pProposedApiHead->hwHeadsMask != 0x0)) { + continue; + } + + /* + * The hardware-head assigment for the DSI and dp-serializer dpys + * should be already done. + */ + nvAssert((pDpyEvo->pConnectorEvo->signalFormat != + NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI) && + !nvConnectorIsDPSerializer(pDpyEvo->pConnectorEvo)); + + NvU32 foundHeadsMask = 0x0; + + if (b2Heads1Or) { + if (!nvEvoUse2Heads1OR( + pDpyEvo, + &pProposedApiHead->timings, + &pProposedApiHead->modeValidationParams)) { + continue; + } + foundHeadsMask = GetFree2Heads1ORHeadsMask(pDevEvo, apiHead, + freeHwHeadsMask); + } else { + nvAssert(!nvEvoUse2Heads1OR( + pDpyEvo, + &pProposedApiHead->timings, + &pProposedApiHead->modeValidationParams)); + + NvU32 foundHead = GetFreeHeads(pDevEvo, apiHead, pDpyEvo, + freeHwHeadsMask); + if (foundHead != NV_INVALID_HEAD) { + foundHeadsMask = NVBIT(foundHead); + } + } + + if (foundHeadsMask != 0x0) { + nvAssignHwHeadsMaskProposedApiHead( + pProposedApiHead, foundHeadsMask); + freeHwHeadsMask &= ~pProposedApiHead->hwHeadsMask; + } else { + ret = FALSE; + goto done; + } + } + +done: + *pFreeHwHeadsMask &= freeHwHeadsMask; + return ret; +} + +static void ClearHwHeadsMaskOneApiHead(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + NVProposedModeSetHwState *pProposed) +{ + const NvU32 sd = pDispEvo->displayOwner; + NVProposedModeSetHwStateOneDisp *pProposedDisp = + &pProposed->disp[sd]; + NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposedDisp->apiHead[apiHead]; + NvU32 head; + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pProposedApiHead->hwHeadsMask, head) { + nvkms_memset(&pProposedDisp->head[head], + 0, sizeof(pProposedDisp->head[head])); + nvkms_memset(&pProposed->sd[sd].head[head], + 0, sizeof(pProposed->sd[sd].head[head])); + } + nvAssignHwHeadsMaskProposedApiHead(pProposedApiHead, 0); +} + +static +void ClearIncompatibleHwHeadsMaskOneDisp(const NVDispEvoRec *pDispEvo, + NVProposedModeSetHwState *pProposed) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + NVProposedModeSetHwStateOneDisp *pProposedDisp = + &pProposed->disp[sd]; + + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposedDisp->apiHead[apiHead]; + const NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pProposedApiHead->dpyIdList, pDispEvo); + if (!pProposedApiHead->changed || (pDpyEvo == NULL)) { + continue; + } + + NvBool current2Heads1OrState = + (nvPopCount32(pProposedApiHead->hwHeadsMask) > 1); + + const NvBool new2Heads1OrState = + nvEvoUse2Heads1OR(pDpyEvo, + &pProposedApiHead->timings, + &pProposedApiHead->modeValidationParams); + + if (new2Heads1OrState != current2Heads1OrState) { + ClearHwHeadsMaskOneApiHead(pDispEvo, apiHead, pProposed); + } + } +} + +static +NvBool AssignProposedHwHeadsOneDisp(const NVDispEvoRec *pDispEvo, + NVProposedModeSetHwState *pProposed) +{ + const NvU32 sd = pDispEvo->displayOwner; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVProposedModeSetHwStateOneDisp *pProposedDisp = + &pProposed->disp[sd]; + NvU32 freeHwHeadsMask; + /* + * In first pass, keep the all existing api-head to + * hardware-head(s) assignment unchanged. + */ + NvU32 pass = 0; + + ClearIncompatibleHwHeadsMaskOneDisp(pDispEvo, pProposed); + +repeatHwHeadsAssigment: + freeHwHeadsMask = NVBIT(pDevEvo->numHeads) - 1; + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + const NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposedDisp->apiHead[apiHead]; + nvAssert((pProposedApiHead->hwHeadsMask == 0) || + !nvDpyIdListIsEmpty(pProposedApiHead->dpyIdList)); + freeHwHeadsMask &= ~pProposedApiHead->hwHeadsMask; + } + + if (!AssignProposedHwHeadsForDsiConnector(pDispEvo, pProposedDisp, + &freeHwHeadsMask) || + !AssignProposedHwHeadsForDPSerializer(pDispEvo, pProposedDisp, + &freeHwHeadsMask) || + !AssignProposedHwHeadsGeneric(pDispEvo, pProposedDisp, + &freeHwHeadsMask, + TRUE /* b2Heads1Or */) || + !AssignProposedHwHeadsGeneric(pDispEvo, pProposedDisp, + &freeHwHeadsMask, + FALSE /* b2Heads1Or */)) { + if (pass == 1) { + return FALSE; + } + + /* + * In second pass, do the fresh hardware-head(s) assigment for + * the all changed api-heads. + */ + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + if (pProposedDisp->apiHead[apiHead].changed) { + ClearHwHeadsMaskOneApiHead(pDispEvo, apiHead, pProposed); + } + } + pass = 1; + goto repeatHwHeadsAssigment; + } + + return TRUE; +} + +/*! + * Assign the NVProposedModeSetHwState structure. + * + * Use the current hardware state, and the requested changes in + * pRequest, to determine what the desired resulting hardware + * configuration for the device should be. + * + * \param[in] pDevEvo The device whose hardware state is to be changed. + * \param[in] pOpenDev The pOpenDev of the client doing the modeset. + * \param[in] pRequest The requested changes to apply to the hardware state. + * \param[out] pReply The reply structure for the client; if we cannot + * apply some portion of pRequest, set the + * corresponding status field in pReply to a + * non-SUCCESS value. + * \param[out] pProposed The proposed resulting hardware state. + * + * \return If the requested changes could be applied to pProposed, + * return TRUE. If the requested changes could not be applied + * to pProposed, set the corresponding status field in pReply + * to a non-SUCCESS value and return FALSE. + */ +static NvBool +AssignProposedModeSetHwState(NVDevEvoRec *pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsSetModeRequest *pRequest, + struct NvKmsSetModeReply *pReply, + NVProposedModeSetHwState *pProposed) +{ + NvU32 sd; + NVDispEvoPtr pDispEvo; + NvBool ret = TRUE; + /* If more than one head will enable VRR on Pascal, disallow Adaptive-Sync */ + const enum NvKmsAllowAdaptiveSync prohibitAdaptiveSync = + (!pDevEvo->hal->caps.supportsDisplayRate && + (CountProposedVrrApiHeads(pDevEvo, pRequest) > 1)); + + /* Initialize pProposed with the current hardware configuration. */ + InitProposedModeSetHwState(pDevEvo, pOpenDev, pProposed); + + /* Update pProposed with the requested changes from the client. */ + + pProposed->allowHeadSurfaceInNvKms = + nvGetAllowHeadSurfaceInNvKms(pDevEvo, pOpenDev, pRequest); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + const struct NvKmsSetModeOneDispRequest *pRequestDisp = + &pRequest->disp[sd]; + + if ((pRequest->requestedDispsBitMask & (1 << sd)) == 0) { + continue; + } + + NVProposedModeSetHwStateOneDisp *pProposedDisp = + &pProposed->disp[sd]; + + pDispEvo = pDevEvo->pDispEvo[sd]; + + /* Construct the per api head proposed modeset state */ + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + const struct NvKmsSetModeOneHeadRequest *pRequestHead = + &pRequestDisp->head[apiHead]; + NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposedDisp->apiHead[apiHead]; + const NVDpyEvoRec *pDpyEvo; + NvU32 layer; + + if ((pRequestDisp->requestedHeadsBitMask & (1 << apiHead)) == 0) { + /* + * Just leave the head alone so it keeps its current + * configuration. + */ + continue; + } + + pDpyEvo = nvGetOneArbitraryDpyEvo(pRequestHead->dpyIdList, pDispEvo); + if (pDpyEvo == NULL) { + /* + * If newDpyIdList is empty or does not find a valid dpy in + * newDpyIdList, then the head should be disabled. + * Clear the pProposedHead, so that no state leaks to the new + * configuration. + */ + ClearHwHeadsMaskOneApiHead(pDispEvo, apiHead, pProposed); + nvkms_memset(pProposedApiHead, 0, sizeof(*pProposedApiHead)); + pProposedApiHead->changed = TRUE; + continue; + } + + pProposedApiHead->changed = TRUE; + pProposedApiHead->dpyIdList = pRequestHead->dpyIdList; + pProposedApiHead->activeRmId = + nvRmAllocDisplayId(pDispEvo, pProposedApiHead->dpyIdList); + if (pProposedApiHead->activeRmId == 0x0) { + /* XXX Need separate error code? */ + pReply->disp[sd].head[apiHead].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_DPY; + ret = FALSE; + continue; + } + + if ((nvDpyGetPossibleApiHeadsMask(pDpyEvo) & NVBIT(apiHead)) == 0) { + pReply->disp[sd].head[apiHead].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_DPY; + ret = FALSE; + continue; + } + + /* + * Get the requested modetimings for this head. If that + * fails, record in the reply that getting the mode + * failed. In the case of failure, continue to the next + * head so that if additional heads fail, we can report + * more complete failure information to the client. + */ + if (!nvGetHwModeTimings(pDispEvo, + apiHead, + pRequestHead, + &pProposedApiHead->timings, + &pProposedApiHead->attributes.color, + &pProposedApiHead->infoFrame)) { + pReply->disp[sd].head[apiHead].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_MODE; + ret = FALSE; + continue; + } + + if (!AssignProposedModeSetColorSpaceAndColorRangeSpecified( + pRequestHead, pProposedApiHead)) { + pReply->disp[sd].head[apiHead].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_MODE; + ret = FALSE; + continue; + } + + AdjustHwModeTimingsForVrr(pDispEvo, + pRequestHead, + prohibitAdaptiveSync, + &pProposedApiHead->timings); + + pProposedApiHead->stereo.mode = + pRequestHead->modeValidationParams.stereoMode; + pProposedApiHead->stereo.isAegis = pDpyEvo->stereo3DVision.isAegis; + pProposedApiHead->infoFrame.hdTimings = + nvEvoIsHDQualityVideoTimings(&pProposedApiHead->timings); + + pProposedApiHead->modeValidationParams = + pRequestHead->modeValidationParams; + + pProposedApiHead->attributes.digitalSignal = + nvGetDefaultDpyAttributeDigitalSignalValue(pDpyEvo->pConnectorEvo); + if (pProposedApiHead->timings.protocol == + NVKMS_PROTOCOL_SOR_HDMI_FRL) { + nvAssert(pProposedApiHead->attributes.digitalSignal == + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_TMDS); + pProposedApiHead->attributes.digitalSignal = + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_HDMI_FRL; + } + + pProposedApiHead->attributes.dvc = + pDpyEvo->currentAttributes.dvc; + + /* Image sharpening is available when scaling is enabled. */ + pProposedApiHead->attributes.imageSharpening.available = + nvIsImageSharpeningAvailable(&pProposedApiHead->timings.viewPort); + pProposedApiHead->attributes.imageSharpening.value = + pDpyEvo->currentAttributes.imageSharpening.value; + + /* + * If InheritPreviousModesetState() returns FALSE, it implies that + * there was a modeset ownership change since the last modeset. If + * input/output lut not specified by the new modeset owner then + * keep them disabled by default. + */ + if (!InheritPreviousModesetState(pDevEvo, pOpenDev)) { + pProposedApiHead->lut = pRequestHead->flip.lut; + + if (!pRequestHead->flip.lut.input.specified) { + pProposedApiHead->lut.input.specified = TRUE; + pProposedApiHead->lut.input.end = 0; + } + + if (!pRequestHead->flip.lut.output.specified) { + pProposedApiHead->lut.output.specified = TRUE; + pProposedApiHead->lut.output.enabled = FALSE; + } + } else if (pRequestHead->flip.lut.input.specified) { + pProposedApiHead->lut = pRequestHead->flip.lut; + } else { + pProposedApiHead->lut.input.specified = FALSE; + } + + if (pRequestHead->flip.tf.specified) { + pProposedApiHead->tf = pRequestHead->flip.tf.val; + + // If enabling HDR TF... + // XXX HDR TODO: Handle other transfer functions + if (pProposedApiHead->tf == NVKMS_OUTPUT_TF_PQ) { + // Cannot be an SLI configuration. + // XXX HDR TODO: Test SLI Mosaic + HDR and remove this check + if (pDevEvo->numSubDevices > 1) { + ret = FALSE; + pReply->disp[sd].head[apiHead].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_FLIP; + continue; + } + + /* NVKMS_OUTPUT_TF_PQ requires the RGB color space */ + if (pProposedApiHead->attributes.color.format != + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB) { + ret = FALSE; + pReply->disp[sd].head[apiHead].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_FLIP; + continue; + } + } + } + + if (pRequestHead->flip.hdrInfoFrame.specified) { + pProposedApiHead->hdrInfoFrameOverride = + pRequestHead->flip.hdrInfoFrame.enabled; + } + + for (layer = 0; layer < pDevEvo->apiHead[apiHead].numLayers; layer++) { + if (pRequestHead->flip.layer[layer].hdr.specified) { + if (pRequestHead->flip.layer[layer].hdr.enabled) { + pProposedApiHead->hdrStaticMetadataLayerMask |= + 1 << layer; + } else { + pProposedApiHead->hdrStaticMetadataLayerMask &= + ~(1 << layer); + } + } + } + + // If enabling HDR signaling... + // XXX HDR TODO: Handle other colorimetries + if (pProposedApiHead->hdrInfoFrameOverride || + (pProposedApiHead->hdrStaticMetadataLayerMask != 0) || + (pProposedApiHead->attributes.color.colorimetry == + NVKMS_OUTPUT_COLORIMETRY_BT2100)) { + const NVDpyEvoRec *pDpyEvo; + + // All dpys on apiHead must support HDR. + FOR_ALL_EVO_DPYS(pDpyEvo, + pProposedApiHead->dpyIdList, + pDispEvo) { + if (!nvDpyIsHDRCapable(pDpyEvo)) { + ret = FALSE; + pReply->disp[sd].head[apiHead].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_FLIP; + break; + } + } + + if (ret == FALSE) { + continue; + } + } + + if (pRequestHead->flip.viewPortIn.specified) { + pProposedApiHead->viewPortPointIn = + pRequestHead->flip.viewPortIn.point; + } + } /* apiHead */ + + if (!AssignProposedHwHeadsOneDisp(pDispEvo, pProposed)) { + pReply->disp[sd].status = + NVKMS_SET_MODE_ONE_DISP_STATUS_FAILED_TO_ASSIGN_HARDWARE_HEADS; + ret = FALSE; + continue; + } + + /* + * Construct the per hardware head proposed modeset/flip state, and + * assign the dependant per api head modeset parameters. + */ + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + const struct NvKmsSetModeOneHeadRequest *pRequestHead; + NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposedDisp->apiHead[apiHead]; + const NvU32 primaryHead = + nvGetPrimaryHwHeadFromMask(pProposedApiHead->hwHeadsMask); + const NvU32 numMergeHeadSections = + nvPopCount32(pProposedApiHead->hwHeadsMask); + const NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pProposedApiHead->dpyIdList, pDispEvo); + NVProposedModeSetHwStateOneHead *pProposedPrimaryHead; + NvU32 secondaryMergeHeadSection = 1; + NvU32 head; + + nvAssert((pProposedApiHead->hwHeadsMask != 0x0) || + (pDpyEvo == NULL)); + + if (!pProposedApiHead->changed || + (pProposedApiHead->hwHeadsMask == 0x0)) { + continue; + } + + nvAssert(primaryHead != NV_INVALID_HEAD); + pProposedPrimaryHead = &pProposedDisp->head[primaryHead]; + + nvAssert((pRequestDisp->requestedHeadsBitMask & (1 << apiHead)) != 0); + pRequestHead = &pRequestDisp->head[apiHead]; + + enum NvKmsSetModeOneHeadStatus status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_SUCCESS; + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pProposedApiHead->hwHeadsMask, head) { + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + + pProposedHead->mergeHeadSection = + (head == primaryHead) ? 0 : (secondaryMergeHeadSection++); + + if (!nvEvoGetSingleMergeHeadSectionHwModeTimings( + &pProposedApiHead->timings, + numMergeHeadSections, + &pProposedHead->timings)) { + status = NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_MODE; + break; + } + + /* + * Broadcast the connector and the requested flip state across + * the corresponding hardware heads. + */ + pProposedHead->pConnectorEvo = pDpyEvo->pConnectorEvo; + if (!AssignProposedModeSetNVFlipEvoHwState( + pDevEvo, + pOpenDev, + sd, + head, + &pRequest->disp[sd].head[apiHead], + &pProposed->sd[sd].head[head].flip, + pProposedHead, + pRequest->commit)) { + status = NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_FLIP; + break; + } + } + + if (status != NVKMS_SET_MODE_ONE_HEAD_STATUS_SUCCESS) { + pReply->disp[sd].head[apiHead].status = status; + ret = FALSE; + continue; /* next api head */ + } + + nvAssert(nvPopCount32(pProposedApiHead->hwHeadsMask) <= 2); + + /* + * Query the per api head HDMI FRL configuration, and pass it to + * the primary head. Save the dsc info into the per api head + * proposed modeset state, to broadcast it onto all hardware heads + * during modeset. + */ + if (!nvHdmiFrlQueryConfig(pDpyEvo, + &pRequestHead->mode.timings, + &pProposedApiHead->timings, + &pProposedApiHead->attributes.color, + (nvPopCount32(pProposedApiHead->hwHeadsMask) > 1) + /* b2Heads1Or */, + &pProposedApiHead->modeValidationParams, + &pProposedPrimaryHead->hdmiFrlConfig, + &pProposedApiHead->dscInfo)) { + pReply->disp[sd].head[apiHead].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_MODE; + ret = FALSE; + continue; + } + + /* + * Construct the api head audio state, and pass it + * to the primary hardware head. + */ + nvHdmiDpConstructHeadAudioState(pProposedApiHead->activeRmId, + pDpyEvo, + &pProposedPrimaryHead->audio); + + /* + * If the modeset is flipping to a depth 30 surface, record this as + * a hint to headSurface, so it can also allocate its surfaces at + * depth 30. + */ + { + const NVFlipEvoHwState *pFlip = + &pProposed->sd[sd].head[primaryHead].flip; + const NVSurfaceEvoRec *pSurfaceEvo = + pFlip->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT]; + + pProposedApiHead->hs10bpcHint = + (pSurfaceEvo != NULL) && + ((pSurfaceEvo->format == NvKmsSurfaceMemoryFormatA2B10G10R10) || + (pSurfaceEvo->format == NvKmsSurfaceMemoryFormatX2B10G10R10)); + } + } /* apiHead */ + } /* pDispEvo */ + + if (!ret) { + ClearProposedModeSetHwState(pDevEvo, pProposed, FALSE /* committed */); + } + + return ret; +} + + +/*! + * Validate the proposed configuration on the specified disp using IMP. + * + * \param[in] pDispEvo The disp to which pProposedDisp is to be applied. + * \param[in] pProposed The requested configuration. + * \param[in] pProposedDisp The requested configuration for this disp. + * \param[out] pWorkArea The scratch space for the current modeset request. + * + * \return If pProposedDisp passes IMP, return TRUE. Otherwise, + * return FALSE. + */ +static NvBool +ValidateProposedModeSetHwStateOneDispImp(NVDispEvoPtr pDispEvo, + const NVProposedModeSetHwState + *pProposed, + NVProposedModeSetHwStateOneDisp + *pProposedDisp, + NVModeSetWorkArea *pWorkArea) +{ + NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP]; + NvBool skipImpCheck = TRUE, requireBootClocks = FALSE; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 modesetRequestedHeadsMask = 0; + NVEvoReallocateBandwidthMode reallocBandwidth = pDevEvo->isSOCDisplay ? + NV_EVO_REALLOCATE_BANDWIDTH_MODE_PRE : + NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE; + + nvkms_memset(&timingsParams, 0, sizeof(timingsParams)); + + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + const NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposedDisp->apiHead[apiHead]; + + const NvBool skipImpCheckThisHead = + (pProposedApiHead->modeValidationParams.overrides & + NVKMS_MODE_VALIDATION_NO_EXTENDED_GPU_CAPABILITIES_CHECK) != 0; + + const NvBool requireBootClocksThisHead = + (pProposedApiHead->modeValidationParams.overrides & + NVKMS_MODE_VALIDATION_REQUIRE_BOOT_CLOCKS) != 0; + + NvU32 head; + + if (pProposedApiHead->hwHeadsMask == 0x0) { + continue; + } + + /* + * Don't try to downgrade heads which are not marked as changed. + * This could lead to unchanged/not-requested heads hogging all + * the disp bandwidth and preventing otherwise possible modesets, + * but it fixes the cases where we could have downgraded unchanged/ + * not-requested heads without NVKMS clients knowing about it. + * Even if we add some mechanism through the modeset reply to notify + * clients about such a change, not all clients might be in a position + * to handle it. This seems to be a fair trade-off for Orin, as by + * default all heads are initialized with minimal usage bounds. + */ + if (pProposedApiHead->changed) { + modesetRequestedHeadsMask |= pProposedApiHead->hwHeadsMask; + } + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pProposedApiHead->hwHeadsMask, head) { + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + + timingsParams[head].pConnectorEvo = pProposedHead->pConnectorEvo; + timingsParams[head].activeRmId = pProposedApiHead->activeRmId; + timingsParams[head].pixelDepth = + nvEvoDpyColorToPixelDepth(&pProposedApiHead->attributes.color); + timingsParams[head].pTimings = &pProposedHead->timings; + timingsParams[head].enableDsc = (pProposedApiHead->dscInfo.type != + NV_DSC_INFO_EVO_TYPE_DISABLED); + timingsParams[head].dscSliceCount = + pProposedApiHead->dscInfo.sliceCount; + timingsParams[head].possibleDscSliceCountMask = + pProposedApiHead->dscInfo.possibleSliceCountMask; + nvAssert(nvPopCount32(pProposedApiHead->hwHeadsMask) <= 2); + timingsParams[head].b2Heads1Or = + (nvPopCount32(pProposedApiHead->hwHeadsMask) > 1); + timingsParams[head].pUsage = + &pProposedHead->timings.viewPort.guaranteedUsage; + } + + skipImpCheck = skipImpCheck && skipImpCheckThisHead; + requireBootClocks = requireBootClocks || requireBootClocksThisHead; + } + + if (skipImpCheck && + reallocBandwidth == NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE) { + return TRUE; + } + + if (!nvValidateImpOneDispDowngrade(pDispEvo, timingsParams, + requireBootClocks, + reallocBandwidth, + modesetRequestedHeadsMask)) { + return FALSE; + } + + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposedDisp->apiHead[apiHead]; + NvU32 primaryHead = nvGetPrimaryHwHeadFromMask( + pProposedApiHead->hwHeadsMask); + NvU32 head; + + if (!pProposedApiHead->changed || + (primaryHead == NV_INVALID_HEAD) || + (pProposedApiHead->dscInfo.type == NV_DSC_INFO_EVO_TYPE_DISABLED)) { + continue; + } + + pProposedApiHead->dscInfo.sliceCount = + timingsParams[primaryHead].dscSliceCount; + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pProposedApiHead->hwHeadsMask, head) { + nvAssert(timingsParams[head].dscSliceCount == + pProposedApiHead->dscInfo.sliceCount); + } + } + + if (pDevEvo->isSOCDisplay) { + NvBool ret; + struct NvKmsUsageBounds *guaranteedAndProposed = + nvCalloc(1, sizeof(*guaranteedAndProposed) * + NVKMS_MAX_HEADS_PER_DISP); + if (guaranteedAndProposed == NULL) { + return FALSE; + } + + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + const NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposedDisp->apiHead[apiHead]; + NvU32 head; + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pProposedApiHead->hwHeadsMask, head) { + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + const struct NvKmsUsageBounds *pProposedUsage; + + if (pProposedApiHead->changed) { + pProposedUsage = &pProposed->sd[0].head[head].flip.usage; + } else { + pProposedUsage = + &pDevEvo->gpus[0].headState[head].preallocatedUsage; + } + + nvUnionUsageBounds(&pProposedHead->timings.viewPort.guaranteedUsage, + pProposedUsage, &guaranteedAndProposed[head]); + timingsParams[head].pUsage = &guaranteedAndProposed[head]; + } + } + + ret = nvValidateImpOneDisp(pDispEvo, timingsParams, + requireBootClocks, + reallocBandwidth, + &pWorkArea->postModesetIsoBandwidthKBPS, + &pWorkArea->postModesetDramFloorKBPS, + 0x0 /* changedHeadsMask */); + + nvFree(guaranteedAndProposed); + + if (!ret) { + return FALSE; + } + + nvScheduleLowerDispBandwidthTimer(pDevEvo); + } + + return TRUE; +} + +static NvBool SkipDisplayPortBandwidthCheck( + const NVProposedModeSetStateOneApiHead *pProposedApiHead) +{ + return (pProposedApiHead->modeValidationParams.overrides & + NVKMS_MODE_VALIDATION_NO_DISPLAYPORT_BANDWIDTH_CHECK) != 0; +} + +static NvBool DowngradeColorSpaceAndBpcOneHead( + const NVDispEvoRec *pDispEvo, + NVProposedModeSetStateOneApiHead *pProposedApiHead) +{ + NVDpyAttributeColor dpyColor = pProposedApiHead->attributes.color; + NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pProposedApiHead->dpyIdList, + pDispEvo); + const NvKmsDpyOutputColorFormatInfo supportedColorFormats = + nvDpyGetOutputColorFormatInfo(pDpyEvo); + + if (!nvDowngradeColorSpaceAndBpc(pDpyEvo, &supportedColorFormats, &dpyColor)) { + return FALSE; + } + + if (pProposedApiHead->colorRangeSpecified && + (dpyColor.range != pProposedApiHead->attributes.color.range)) { + return FALSE; + } + + if (pProposedApiHead->colorBpcSpecified && + (dpyColor.bpc != pProposedApiHead->attributes.color.bpc)) { + return FALSE; + } + + if (pProposedApiHead->colorSpaceSpecified && + (dpyColor.format != pProposedApiHead->attributes.color.format)) { + return FALSE; + } + + pProposedApiHead->attributes.color = dpyColor; + return TRUE; +} + +static NvBool DowngradeColorSpaceAndBpcOneDisp( + const NVDispEvoRec *pDispEvo, + NVProposedModeSetHwStateOneDisp *pProposedDisp, + const NVConnectorEvoRec *pConnectorEvo) +{ + NvBool ret = FALSE; + NvU32 apiHead; + + /* + * In DP-MST case, many heads can share same connector and dp-bandwidth + * therefore its necessary to validate and downgrade dp-pixel-depth across + * all head which are sharing same connector before retry. + */ + for (apiHead = 0; apiHead < pDispEvo->pDevEvo->numApiHeads; apiHead++) { + NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposedDisp->apiHead[apiHead]; + const NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pProposedApiHead->dpyIdList, pDispEvo); + + if (!pProposedApiHead->changed || + SkipDisplayPortBandwidthCheck(pProposedApiHead)) { + continue; + } + + if ((pDpyEvo != NULL) && + (pDpyEvo->pConnectorEvo == pConnectorEvo) && + DowngradeColorSpaceAndBpcOneHead(pDispEvo, pProposedApiHead)) { + ret = TRUE; + } + } + + return ret; +} + +static NvU32 SetDpLibImpParamsOneConnectorEvo( + NVProposedModeSetHwStateOneDisp *pProposedDisp, + const NVConnectorEvoRec *pConnectorEvo, + NVDpLibIsModePossibleParamsRec *pParams) +{ + const NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvU32 attachedHeadsMask = 0x0; + + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposedDisp->apiHead[apiHead]; + const NvU32 head = + nvGetPrimaryHwHeadFromMask(pProposedApiHead->hwHeadsMask); + const NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pProposedApiHead->dpyIdList, pDispEvo); + const NvBool b2Heads1Or = + nvPopCount32(pProposedApiHead->hwHeadsMask) > 1; + + nvAssert(nvPopCount32(pProposedApiHead->hwHeadsMask) <= 2); + + if ((pDpyEvo == NULL) || (pDpyEvo->pConnectorEvo != pConnectorEvo) || + SkipDisplayPortBandwidthCheck(pProposedApiHead)) { + continue; + } + + /* + * We know that `head` is valid since we have a non-null pDpyEvo; + * this means that either the client did not change this apiHead and the + * existing configuration already had a valid HW head assignment, + * or the client did change this apiHead with a requested dpy, + * in which case AssignProposedHwHeadsOneDisp() guarantees a valid + * assignment. + * + * Assert this so Coverity doesn't complain about NVBIT(head). + */ + + nvAssert(head != NV_INVALID_HEAD); + nvAssert((NVBIT(head) & attachedHeadsMask) == 0x0); + + pParams->head[head].displayId = pProposedApiHead->activeRmId; + pParams->head[head].dpyIdList = pProposedApiHead->dpyIdList; + pParams->head[head].colorSpace = pProposedApiHead->attributes.color.format; + pParams->head[head].colorBpc = pProposedApiHead->attributes.color.bpc; + pParams->head[head].pModeValidationParams = + &pProposedApiHead->modeValidationParams; + pParams->head[head].pTimings = &pProposedApiHead->timings; + pParams->head[head].b2Heads1Or = b2Heads1Or; + pParams->head[head].pDscInfo = &pProposedApiHead->dscInfo; + + attachedHeadsMask |= NVBIT(head); + } + + return attachedHeadsMask; +} + +static NvBool DowngradeColorSpaceAndBpcOneConnectorEvo( + const NVConnectorEvoRec *pConnectorEvo, + const NvU32 failedHeadMask, + NVProposedModeSetHwStateOneDisp *pProposedDisp) +{ + const NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + /* + * First, try to downgrade the pixel depth for the failed heads. + * If the pixel depth for the failed current heads is not possible + * to downgrade further then try to downgrade the pixel depth of + * other changed heads which are sharing same connector and + * dp-bandwidth. + */ + + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposedDisp->apiHead[apiHead]; + + if (((pProposedApiHead->hwHeadsMask & failedHeadMask) == 0x0) || + !pProposedApiHead->changed) { + continue; + } + + if (DowngradeColorSpaceAndBpcOneHead(pDispEvo, pProposedApiHead)) { + return TRUE; + } + } + + + if (DowngradeColorSpaceAndBpcOneDisp(pDispEvo, + pProposedDisp, + pConnectorEvo)) { + return TRUE; + } + + return FALSE; +} + +static NvBool ValidateProposedModeSetHwStateOneConnectorDPlib( + const NVConnectorEvoRec *pConnectorEvo, + NVProposedModeSetHwStateOneDisp *pProposedDisp, + const enum NVDpLibIsModePossibleQueryMode queryMode) +{ + const NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvU32 attachedHeadsMask = 0x0, failedHeadMask = 0x0; + NvBool bResult = TRUE; + NVDpLibIsModePossibleParamsRec *pDpLibImpParams = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_DPLIB_IS_MODE_POSSIBLE_PARAMS, + sizeof(*pDpLibImpParams)); + + nvAssert(pDpLibImpParams != NULL); + + if (!nvConnectorUsesDPLib(pConnectorEvo)) { + goto done; + } + +tryAgain: + nvkms_memset(pDpLibImpParams, 0, sizeof(*pDpLibImpParams)); + + attachedHeadsMask = 0x0; + failedHeadMask = 0x0; + bResult = TRUE; + + attachedHeadsMask = SetDpLibImpParamsOneConnectorEvo(pProposedDisp, + pConnectorEvo, + pDpLibImpParams); + if (attachedHeadsMask == 0x0) { + goto done; + } + + pDpLibImpParams->queryMode = queryMode; + + bResult = nvDPLibIsModePossible(pConnectorEvo->pDpLibConnector, + pDpLibImpParams, + &failedHeadMask); + + if (!bResult) { + if (DowngradeColorSpaceAndBpcOneConnectorEvo(pConnectorEvo, + failedHeadMask, + pProposedDisp)) { + goto tryAgain; + } + + /* + * Cannot downgrade pixelDepth further -- + * This proposed mode-set is not possible on this DP link, so fail. + */ + } + +done: + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_DPLIB_IS_MODE_POSSIBLE_PARAMS); + return bResult; +} + +/*! + * Validate the DisplayPort bandwidth of the proposed disp configuration. + * + * \param[in] pDispEvo The disp to which pProposedDisp is to be applied. + * \param[in] pProposedDisp The requested configuration. + * + * \return If pProposedDisp passes the DP bandwidth check, return + * TRUE. Otherwise, return FALSE. + */ +static NvBool ValidateProposedModeSetHwStateOneDispDPlib( + NVDispEvoPtr pDispEvo, + NVProposedModeSetHwStateOneDisp *pProposedDisp, + const enum NVDpLibIsModePossibleQueryMode queryMode) +{ + NvBool dpIsModePossible = TRUE; + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NVConnectorEvoRec *pConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (!ValidateProposedModeSetHwStateOneConnectorDPlib(pConnectorEvo, + pProposedDisp, + queryMode)) { + /* + * The Dp link bandwidth check fails for an unchanged head -- + * This proposed mode-set is not possible on this DP link, so fail. + */ + dpIsModePossible = FALSE; + break; + } + } + + if (dpIsModePossible && + (queryMode == NV_DP_LIB_IS_MODE_POSSIBLE_QUERY_MODE_POST_IMP)) { + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + const NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposedDisp->apiHead[apiHead]; + const NvU32 primaryHead = + nvGetPrimaryHwHeadFromMask(pProposedApiHead->hwHeadsMask); + const NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pProposedApiHead->dpyIdList, pDispEvo); + NVProposedModeSetHwStateOneHead *pProposedPrimaryHead; + + nvAssert((pProposedApiHead->hwHeadsMask != 0x0) || + (pDpyEvo == NULL)); + + if ((pProposedApiHead->hwHeadsMask == 0x0)) { + continue; + } + + nvAssert(primaryHead != NV_INVALID_HEAD); + pProposedPrimaryHead = &pProposedDisp->head[primaryHead]; + + if (!nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + pProposedPrimaryHead->pDpLibModesetState = NULL; + continue; + } + + pProposedPrimaryHead->pDpLibModesetState = + nvDPLibCreateModesetState(pDispEvo, + primaryHead, + pProposedApiHead->activeRmId, + pProposedApiHead->dpyIdList, + pProposedApiHead->attributes.color.format, + pProposedApiHead->attributes.color.bpc, + &pProposedApiHead->timings, + &pProposedApiHead->dscInfo); + if (pProposedPrimaryHead->pDpLibModesetState == NULL) { + dpIsModePossible = FALSE; + goto done; + } + } + } + +done: + return dpIsModePossible; +} + +static NvBool VblankCallbackListsAreEmpty( + const NVDispVblankApiHeadState *pVblankApiHeadState) +{ + ct_assert(ARRAY_LEN(pVblankApiHeadState->vblankCallbackList) == 2); + + return (nvListIsEmpty(&pVblankApiHeadState->vblankCallbackList[0]) && + nvListIsEmpty(&pVblankApiHeadState->vblankCallbackList[1])); +} + +static void VBlankCallbackDeferredWork(void *dataPtr, NvU32 data32) +{ + NVDispVblankApiHeadState *pVblankApiHeadState = NULL; + NVVBlankCallbackPtr pVBlankCallbackTmp = NULL; + NVVBlankCallbackPtr pVBlankCallback = NULL; + NVDispEvoPtr pDispEvo = dataPtr; + NvU32 apiHead = data32; + + if (!nvApiHeadIsActive(pDispEvo, apiHead)) { + return; + } + + pVblankApiHeadState = &pDispEvo->vblankApiHeadState[apiHead]; + + /* + * Increment the vblankCount here, so that any callbacks in the list can + * rely on the same value. + */ + pVblankApiHeadState->vblankCount++; + + for (NvU32 i = 0; i < ARRAY_LEN(pVblankApiHeadState->vblankCallbackList); i++) { + + nvListForEachEntry_safe(pVBlankCallback, + pVBlankCallbackTmp, + &pVblankApiHeadState->vblankCallbackList[i], + vblankCallbackListEntry) { + pVBlankCallback->pCallback(pDispEvo, pVBlankCallback); + } + } +} + +static void VBlankCallback(void *pParam1, void *pParam2) +{ + const NvU32 apiHead = (NvU32)(NvUPtr)pParam2; + + (void) nvkms_alloc_timer_with_ref_ptr( + VBlankCallbackDeferredWork, + pParam1, /* ref_ptr to pDispEvo */ + apiHead, /* dataU32 */ + 0); /* timeout: schedule the work immediately */ +} + + +static void DisableVBlankCallbacks(const NVDevEvoRec *pDevEvo) +{ + NvU32 dispIndex; + NVDispEvoPtr pDispEvo; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + NvU32 apiHead; + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + + NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + + if (pApiHeadState->rmVBlankCallbackHandle != 0) { + nvRmRemoveVBlankCallback(pDispEvo, + pApiHeadState->rmVBlankCallbackHandle); + pApiHeadState->rmVBlankCallbackHandle = 0; + } + } + } +} + +static void EnableVBlankCallbacks(const NVDevEvoRec *pDevEvo) +{ + NvU32 dispIndex; + NVDispEvoPtr pDispEvo; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + NvU32 apiHead; + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + + NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + + const NVDispVblankApiHeadState *pVblankApiHeadState = + &pDispEvo->vblankApiHeadState[apiHead]; + + nvAssert(pApiHeadState->rmVBlankCallbackHandle == 0); + + if (VblankCallbackListsAreEmpty(pVblankApiHeadState)) { + continue; + } + + const NvU32 hwHead = + nvGetPrimaryHwHeadFromMask(pApiHeadState->hwHeadsMask); + + if (hwHead == NV_INVALID_HEAD) { + continue; + } + + pApiHeadState->rmVBlankCallbackHandle = + nvRmAddVBlankCallback(pDispEvo, hwHead, + VBlankCallback, (void *)(NvUPtr)apiHead); + } + } +} + +/*! + * Validate the proposed configuration on the specified disp. + * + * \param[in] pDispEvo The disp to which pProposedDisp is to be applied. + * \param[in] pProposedDisp The requested configuration. + * \param[out] pReplyDisp The reply structure for the client. + * \param[out] pWorkArea The scratch space for the current modeset request. + * + * \return If pProposedDisp is valid, return TRUE. Otherwise, set the + * appropriate status fields in pReplyDisp to non-SUCCESS, + * and return FALSE. + */ +static NvBool +ValidateProposedModeSetHwStateOneDisp( + NVDispEvoPtr pDispEvo, + const NVProposedModeSetHwState *pProposed, + NVProposedModeSetHwStateOneDisp *pProposedDisp, + struct NvKmsSetModeOneDispReply *pReplyDisp, + NVModeSetWorkArea *pWorkArea) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVDpyIdList dpyIdList; + + /* + * Check that the requested configuration of connectors can be + * driven simultaneously. + */ + dpyIdList = nvEmptyDpyIdList(); + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NVDpyEvoPtr pDpyEvo; + FOR_ALL_EVO_DPYS(pDpyEvo, + pProposedDisp->apiHead[apiHead].dpyIdList, pDispEvo) { + dpyIdList = nvAddDpyIdToDpyIdList(pDpyEvo->pConnectorEvo->displayId, + dpyIdList); + } + } + + if (!nvRmIsPossibleToActivateDpyIdList(pDispEvo, dpyIdList)) { + pReplyDisp->status = NVKMS_SET_MODE_ONE_DISP_STATUS_INCOMPATIBLE_DPYS; + return FALSE; + } + + /* + * Check that no dpyId is used by multiple heads. + */ + dpyIdList = nvEmptyDpyIdList(); + for (NvU32 apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) { + /* + * Intersect the proposed dpys for this head with the + * accumulated list of dpys for this disp; if the intersection + * is not empty, a dpy is proposed to be used on multiple + * api heads. + */ + NVDpyIdList proposedDpyIdList = + pProposedDisp->apiHead[apiHead].dpyIdList; + NVDpyIdList intersectedDpyIdList = + nvIntersectDpyIdListAndDpyIdList(dpyIdList, proposedDpyIdList); + + if (!nvDpyIdListIsEmpty(intersectedDpyIdList)) { + pReplyDisp->status = NVKMS_SET_MODE_ONE_DISP_STATUS_DUPLICATE_DPYS; + return FALSE; + } + + dpyIdList = nvAddDpyIdListToDpyIdList(dpyIdList, proposedDpyIdList); + } + + /* + * Check ViewPortIn dimensions and ensure valid h/vTaps can be assigned. + */ + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NvU32 head; + NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposedDisp->apiHead[apiHead]; + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pProposedApiHead->hwHeadsMask, head) { + /* XXX assume the gpus have equal capabilities */ + const NVEvoScalerCaps *pScalerCaps = + &pDevEvo->gpus[0].capabilities.head[head].scalerCaps; + const NVHwModeTimingsEvoPtr pTimings = &pProposedDisp->head[head].timings; + + if (!nvValidateHwModeTimingsViewPort(pDevEvo, pScalerCaps, pTimings, + &dummyInfoString)) { + pReplyDisp->head[apiHead].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_MODE; + return FALSE; + } + } + } + + /* + * Check that the configuration fits DisplayPort bandwidth constraints. + */ + if (!ValidateProposedModeSetHwStateOneDispDPlib(pDispEvo, pProposedDisp, + NV_DP_LIB_IS_MODE_POSSIBLE_QUERY_MODE_PRE_IMP)) { + pReplyDisp->status = + NVKMS_SET_MODE_ONE_DISP_STATUS_FAILED_DISPLAY_PORT_BANDWIDTH_CHECK; + return FALSE; + } + + /* + * The pixelDepth value, which required to choose the dithering + * configuration, gets finalized as part of the DisplayPort bandwidth + * validation. + */ + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposedDisp->apiHead[apiHead]; + const NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pProposedApiHead->dpyIdList, pDispEvo); + + if (!pProposedApiHead->changed || (pDpyEvo == NULL)) { + continue; + } + + nvChooseDitheringEvo(pDpyEvo->pConnectorEvo, + pProposedApiHead->attributes.color.bpc, + pProposedApiHead->attributes.color.colorimetry, + &pDpyEvo->requestedDithering, + &pProposedApiHead->attributes.dithering); + } + + /* + * Check that the configuration passes IMP. + */ + if (!ValidateProposedModeSetHwStateOneDispImp(pDispEvo, pProposed, + pProposedDisp, pWorkArea)) { + pReplyDisp->status = + NVKMS_SET_MODE_ONE_DISP_STATUS_FAILED_EXTENDED_GPU_CAPABILITIES_CHECK; + return FALSE; + } + + if (!ValidateProposedModeSetHwStateOneDispDPlib(pDispEvo, pProposedDisp, + NV_DP_LIB_IS_MODE_POSSIBLE_QUERY_MODE_POST_IMP)) { + pReplyDisp->status = + NVKMS_SET_MODE_ONE_DISP_STATUS_FAILED_DISPLAY_PORT_BANDWIDTH_CHECK; + return FALSE; + } + + return TRUE; +} + +/*! + * Validate the proposed flip configuration on the specified sub device. + * + * \param[in] pDispEvo The disp to which pProposedDisp is to be applied. + * \param[in] pProposed The requested configuration. + * \param[out] pProposedSd The requested flip configuration. + * + * \return If pProposedDisp is valid, return TRUE. Otherwise, set the + * appropriate status fields in pReplyDisp to non-SUCCESS, + * and return FALSE. + */ +static NvBool +ValidateProposedFlipHwStateOneSubDev( + const NVDevEvoRec *pDevEvo, + const NVProposedModeSetHwStateOneDisp *pProposedDisp, + NVProposedModeSetHwStateOneSubDev *pProposedSd, + struct NvKmsSetModeOneDispReply *pReplyDisp) +{ + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NvU32 head; + const NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposedDisp->apiHead[apiHead]; + + if (!pProposedApiHead->changed || + nvDpyIdListIsEmpty(pProposedApiHead->dpyIdList)) { + continue; + } + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pProposedApiHead->hwHeadsMask, head) { + nvOverrideScalingUsageBounds( + pDevEvo, + head, + &pProposedSd->head[head].flip, + &pProposedDisp->head[head].timings.viewPort.possibleUsage); + + if (!nvValidateFlipEvoHwState(pDevEvo, + head, + &pProposedDisp->head[head].timings, + &pProposedSd->head[head].flip)) { + pReplyDisp->head[apiHead].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_FLIP; + return FALSE; + } + } + } + + return TRUE; +} + +/*! + * Validate the proposed configuration. + * + * \param[in] pDevEvo The device to which pProposed is to be applied. + * \param[in] pProposed The requested configuration. + * \param[out] pReply The reply structure for the client. + * \param[out] pWorkArea The scratch space for the current modeset request. + * + * \return If pProposed is valid, return TRUE. Otherwise, set the + * appropriate status fields in pReply to non-SUCCESS, + * and return FALSE. + */ +static NvBool +ValidateProposedModeSetHwState(NVDevEvoPtr pDevEvo, + NVProposedModeSetHwState *pProposed, + struct NvKmsSetModeReply *pReply, + NVModeSetWorkArea *pWorkArea) +{ + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + NvBool ret = FALSE; + NVProposedModeSetHwState *pActual = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_VALIDATE_PROPOSED_MODESET_HW_STATE, + sizeof(*pActual)); + + /* + * Copy the proposed modeset to a scratch area. During the process below, + * we may modify some parts of the timings. If all of validation succeeds, + * then we'll copy the modified version back out; if not, we don't want to + * touch the input. + */ + nvkms_memcpy(pActual, pProposed, sizeof(*pProposed)); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + NVProposedModeSetHwStateOneDisp *pProposedDisp = + &pActual->disp[dispIndex]; + NVProposedModeSetHwStateOneSubDev *pProposedSd = + &pActual->sd[pDispEvo->displayOwner]; + struct NvKmsSetModeOneDispReply *pReplyDisp; + + pReplyDisp = &pReply->disp[dispIndex]; + + if (!ValidateProposedModeSetHwStateOneDisp(pDispEvo, + pActual, + pProposedDisp, + pReplyDisp, + pWorkArea)) { + goto done; + } + + if (!ValidateProposedFlipHwStateOneSubDev(pDevEvo, + pProposedDisp, + pProposedSd, + pReplyDisp)) { + goto done; + } + } + + nvkms_memcpy(pProposed, pActual, sizeof(*pProposed)); + ret = TRUE; + +done: + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_VALIDATE_PROPOSED_MODESET_HW_STATE); + + return ret; +} + +/*! + * Ensure there is an SOR assigned for this pConnectorEvo, for use by + * the pending modeset. + * + * In DP-MST, multiple heads may use the same pConnectorEvo, and they + * should use the same SOR. + * + * When we call nvAssignSOREvo(), we have to tell RM which SORs have + * already been assigned and need to be excluded from consideration for + * the new SOR assignment request. + */ +static void AssignSor(const NVDispEvoRec *pDispEvo, + const NVProposedModeSetStateOneApiHead *pProposedApiHead, + NVModeSetWorkArea *pWorkArea) +{ + const NvU32 sd = pDispEvo->displayOwner; + const NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pProposedApiHead->dpyIdList, pDispEvo); + const NVConnectorEvoRec *pConnectorEvo = pDpyEvo->pConnectorEvo; + + const NvBool b2Heads1Or = + (nvPopCount32(pProposedApiHead->hwHeadsMask) > 1); + /* + * In 2Heads1OR DP-MST case, NV0073_CTRL_CMD_DFP_ASSIGN_SOR needs + * to be called with the dynamic displayId. + */ + const NvU32 displayId = (nvDpyEvoIsDPMST(pDpyEvo) && b2Heads1Or) ? + pProposedApiHead->activeRmId : nvDpyIdToNvU32(pConnectorEvo->displayId); + /* + * There are two usecases: + * 1. Two 2Heads1OR DP-MST streams over a same connector. + * 2. One legacy and one 2Heads1OR DP-MST stream over a same connector. + * For both of these usecases, NV0073_CTRL_CMD_DFP_ASSIGN_SOR will get + * called over the same connector multiple times, + * + * Two DP-MST streams over a same connector shares a same primary SOR index. + * + * Remove the already assigned assigned primary SOR index from + * sorExcludeMask, so that NV0073_CTRL_CMD_DFP_ASSIGN_SOR + * re-use it. + */ + const NvU32 sorExcludeMask = + (pConnectorEvo->or.primary != NV_INVALID_OR) ? + (pWorkArea->sd[sd].assignedSorMask & ~NVBIT(pConnectorEvo->or.primary)) : + (pWorkArea->sd[sd].assignedSorMask); + + if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + // Nothing to do! + return; + } + + if (nvAssignSOREvo(pDpyEvo->pConnectorEvo, displayId, b2Heads1Or, sorExcludeMask)) { + nvAssert(pConnectorEvo->or.primary != NV_INVALID_OR); + pWorkArea->sd[sd].assignedSorMask |= nvConnectorGetORMaskEvo(pConnectorEvo); + } else { + nvAssert(!"Failed to assign SOR, this failure might cause hang!"); + } +} + +static void +SetLinkHandOffOnDpDdcPartners(NVConnectorEvoRec *pConnectorEvo, NVDispEvoPtr pDispEvo, NvBool enable) +{ + NVConnectorEvoRec *pTmpConnectorEvo; + FOR_ALL_EVO_CONNECTORS(pTmpConnectorEvo, pDispEvo) { + if (nvDpyIdIsInDpyIdList(pTmpConnectorEvo->displayId, + pConnectorEvo->ddcPartnerDpyIdsList)) { + if (nvConnectorUsesDPLib(pTmpConnectorEvo)) { + nvDPSetLinkHandoff(pTmpConnectorEvo->pDpLibConnector, enable); + } + } + } +} + +static void +KickoffModesetUpdateState( + NVDispEvoPtr pDispEvo, + NVEvoModesetUpdateState *modesetUpdateState) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + for (NvU32 head = 0; head < pDevEvo->numHeads; head++) { + nvEvoPreModesetRegisterFlipOccurredEvent(pDispEvo, head, + modesetUpdateState); + } + + if (!nvDpyIdListIsEmpty(modesetUpdateState->connectorIds)) { + NVConnectorEvoRec *pConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (!nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, + modesetUpdateState->connectorIds)) { + continue; + } + + if (pConnectorEvo->pDpLibConnector != NULL) { + nvDPPreSetMode(pConnectorEvo->pDpLibConnector, + modesetUpdateState); + } else if (nvConnectorIsDPSerializer(pConnectorEvo)) { + nvDPSerializerPreSetMode(pDispEvo, pConnectorEvo); + } else { + if (nvIsConnectorActiveEvo(pConnectorEvo)) { + SetLinkHandOffOnDpDdcPartners(pConnectorEvo, pDispEvo, TRUE); + } + } + } + } + + nvDoIMPUpdateEvo(pDispEvo, + &modesetUpdateState->updateState); + + if (!nvDpyIdListIsEmpty(modesetUpdateState->connectorIds)) { + NVConnectorEvoRec *pConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (!nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, + modesetUpdateState->connectorIds)) { + continue; + } + + if (pConnectorEvo->pDpLibConnector != NULL) { + nvDPPostSetMode(pConnectorEvo->pDpLibConnector, + modesetUpdateState); + } else if (nvConnectorIsDPSerializer(pConnectorEvo)) { + nvDPSerializerPostSetMode(pDispEvo, pConnectorEvo); + } else { + if (!nvIsConnectorActiveEvo(pConnectorEvo)) { + SetLinkHandOffOnDpDdcPartners(pConnectorEvo, pDispEvo, FALSE); + } + } + } + } + + for (NvU32 head = 0; head < pDevEvo->numHeads; head++) { + nvEvoPostModesetUnregisterFlipOccurredEvent(pDispEvo, head, + modesetUpdateState); + } + + nvkms_memset(modesetUpdateState, 0, sizeof(*modesetUpdateState)); +} + +/*! + * Determine if display devices driven by head are incompatible with newly + * activated display devices. + */ +static NvBool +IsProposedModeSetStateOneApiHeadIncompatible( + NVDispEvoPtr pDispEvo, + NvU32 apiHead, + const + NVProposedModeSetHwStateOneDisp *pProposedDisp) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposedDisp->apiHead[apiHead]; + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + const NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pApiHeadState->activeDpys, pDispEvo); + const NvBool bCurrent2Heads1Or = + (nvPopCount32(pApiHeadState->hwHeadsMask) > 1); + const NvBool bProposed2Heads1Or = + (nvPopCount32(pProposedApiHead->hwHeadsMask) > 1); + nvAssert(pDpyEvo != NULL); + + /* + * DisplayPort devices require an EVO update when detaching the head + * from the SOR, because DPlib performs link-training to powerdown + * the link. So, always consider DisplayPort as incompatible. + * + * Consider this api-head incompatible if there is change in the api-head + * to hardware-head(s) mapping. + * + * Mark api-head incompatible if its current or proposed modeset state is + * using 2Heads1OR configuration: + * Even if there is no change in the hardware heads or modetimings, it is + * not possible to do modeset on the active 2Heads1OR api-head without + * shutting it down first. The modeset code path is ready to handle the + * glitchless 2Heads1OR modeset, for example NV0073_CTRL_CMD_DFP_ASSIGN_SOR + * does not handles the assignment of secondary SORs if display is already + * active and returns incorrect information which leads to segfault in + * NVKMS. + */ + + if (nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo) || + ((pProposedApiHead->hwHeadsMask != 0x0) && + (pProposedApiHead->hwHeadsMask != pApiHeadState->hwHeadsMask)) || + bCurrent2Heads1Or || bProposed2Heads1Or) { + return TRUE; + } + + for (NvU32 tmpApiHead = 0; tmpApiHead < pDevEvo->numApiHeads; tmpApiHead++) { + const NVProposedModeSetStateOneApiHead *pTmpProposedApiHead = + &pProposedDisp->apiHead[tmpApiHead]; + const NVDpyEvoRec *pDpyEvoTmp = + nvGetOneArbitraryDpyEvo(pTmpProposedApiHead->dpyIdList, pDispEvo); + NVDpyIdList dpyIdList; + + if (!pTmpProposedApiHead->changed) { + continue; + } + + /* + * DDC partners incompatible with each other, only one should be active + * at a time. + */ + if ((pDpyEvoTmp != NULL) && + nvDpyIdIsInDpyIdList(pDpyEvoTmp->pConnectorEvo->displayId, + pDpyEvo->pConnectorEvo->ddcPartnerDpyIdsList)) { + return TRUE; + } + + /* + * For the remaining tests, we compare apiHead against all other heads + * in the tmpApiHead loop. + */ + if (tmpApiHead == apiHead) { + continue; + } + + /* + * Consider this api-head incompatible if its current hardware heads + * are proposed to map onto the different api-head. + */ + if ((pTmpProposedApiHead->hwHeadsMask & + pApiHeadState->hwHeadsMask) != 0x0) { + return TRUE; + } + + /* + * Consider this api-head incompatible if its current + * dpy(s) are proposed to attach to a different api-head. + */ + dpyIdList = nvIntersectDpyIdListAndDpyIdList(pTmpProposedApiHead->dpyIdList, + pApiHeadState->activeDpys); + if (!nvDpyIdListIsEmpty(dpyIdList)) { + return TRUE; + } + } + + return FALSE; +} + +static void DisableActiveCoreRGSyncObjects(NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + NVEvoUpdateState *pUpdateState) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVDispApiHeadStateEvoRec *pApiHeadState = &pDispEvo->apiHeadState[apiHead]; + + for (int i = 0; i < pApiHeadState->numVblankSyncObjectsCreated; i++) { + if (pApiHeadState->vblankSyncObjects[i].enabled) { + NvU32 head = nvGetPrimaryHwHead(pDispEvo, apiHead); + nvAssert(head != NV_INVALID_HEAD); + + /* hCtxDma of 0 indicates Disable. */ + pDevEvo->hal->ConfigureVblankSyncObject( + pDevEvo, + 0, /* rasterLine */ + head, + i, + NULL, /* pSurfaceDesc */ + pUpdateState); + pApiHeadState->vblankSyncObjects[i].enabled = FALSE; + } + } +} + +static void +ApplyProposedModeSetHwStateOneHeadShutDown( + NVDispEvoPtr pDispEvo, + NvU32 head, + const + NVProposedModeSetHwStateOneDisp + *pProposedDisp, + NVModeSetWorkArea *pWorkArea) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + NVDispHeadStateEvoPtr pHeadState = &pDispEvo->headState[head]; + + nvHdmiDpEnableDisableAudio(pDispEvo, head, FALSE /* enable */); + + nvEvoDetachConnector(pHeadState->pConnectorEvo, head, + &pWorkArea->modesetUpdateState); + + nvEvoDisableHwYUV420Packer(pDispEvo, head, + &pWorkArea->modesetUpdateState.updateState); + + pHeadState->pConnectorEvo = NULL; + + pHeadState->bypassComposition = FALSE; + pHeadState->mergeHeadSection = 0; + nvkms_memset(&pHeadState->timings, 0, sizeof(pHeadState->timings)); + pHeadState->activeRmId = 0; + + nvkms_memset(&pHeadState->audio, 0, sizeof(pHeadState->audio)); + + nvkms_memset(&pHeadState->modeValidationParams, 0, + sizeof(pHeadState->modeValidationParams)); + + nvkms_memset(&pDevEvo->gpus[sd].headState[head], 0, + sizeof(pDevEvo->gpus[sd].headState[head])); + + pDevEvo->gpus[sd].headState[head].cursor.cursorCompParams = + nvDefaultCursorCompositionParams(pDevEvo); + + for (NvU32 layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + nvEvoSetFlipOccurredEvent(pDispEvo, + head, + layer, + NULL, + &pWorkArea->modesetUpdateState); + } +} + +/*! + * Send methods to shut down a api-head + * + * \param[in,out] pDispEvo The disp of the head. + * \param[in] apiHead The api head to consider. + * \param[in] pProposedDisp The requested configuration of the display + * \param[in/out] modesetUpdateState Structure tracking channels which need to + * be updated/kicked off + */ +static void +ApplyProposedModeSetStateOneApiHeadShutDown( + NVDispEvoPtr pDispEvo, + NvU32 apiHead, + const + NVProposedModeSetHwStateOneDisp + *pProposedDisp, + NVModeSetWorkArea *pWorkArea) +{ + NVDpyEvoPtr pDpyEvo; + NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + NvU32 head; + + /* + * If nothing changed about this head's configuration, then we + * should not shut it down. + */ + if (!pProposedDisp->apiHead[apiHead].changed) { + return; + } + + /* + * Otherwise, go through the shutdown process for any head that + * changed. If NVProposedModeSetStateOneApiHead::dpyIdList is + * empty, then we'll leave it shut down. If it is non-empty, then + * ApplyProposedModeSetHwStateOneHead{Pre,Post}Update() will + * update the head with its new configuration. + */ + + if (!nvApiHeadIsActive(pDispEvo, apiHead)) { + return; + } + + nvAssert(pWorkArea->sd[pDispEvo->displayOwner]. + apiHead[apiHead].oldActiveRmId == 0); + + if (nvPopCount32(pApiHeadState->hwHeadsMask) > 1) { + nvEvoDisableMergeMode(pDispEvo, pApiHeadState->hwHeadsMask, + &pWorkArea->modesetUpdateState.updateState); + } + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + + /* Track old activeRmId and free it after end modeset */ + if (pWorkArea->sd[pDispEvo->displayOwner].apiHead[apiHead].oldActiveRmId == 0) { + pWorkArea->sd[pDispEvo->displayOwner]. + apiHead[apiHead].oldActiveRmId = pHeadState->activeRmId; + } else { + nvAssert(pWorkArea->sd[pDispEvo->displayOwner]. + apiHead[apiHead].oldActiveRmId == pHeadState->activeRmId); + } + + ApplyProposedModeSetHwStateOneHeadShutDown(pDispEvo, head, + pProposedDisp, pWorkArea); + } + + pDpyEvo = nvGetOneArbitraryDpyEvo(pApiHeadState->activeDpys, pDispEvo); + + nvCancelSDRTransitionTimer(pDpyEvo); + + /* + * Identify and disable any active core RG sync objects. + * + * Note: the disable occurs at the hardware level; this intentionally does + * not clear the software state tracking the existence of these sync + * objects, which will be re-enabled at the hardware level in + * ApplyProposedModeSetStateOneApiHeadPreUpdate(), if the given head will be + * active after the modeset. + */ + DisableActiveCoreRGSyncObjects(pDispEvo, apiHead, + &pWorkArea->modesetUpdateState.updateState); + + nvDisable3DVisionAegis(pDpyEvo); + + /* Cancel any pending LUT updates. */ + nvCancelLutUpdateEvo(pDispEvo, apiHead); + + /* Clear software shadow state. */ + pWorkArea->sd[pDispEvo->displayOwner].changedDpyIdList = + nvAddDpyIdListToDpyIdList( + pApiHeadState->activeDpys, + pWorkArea->sd[pDispEvo->displayOwner].changedDpyIdList); + pApiHeadState->activeDpys = nvEmptyDpyIdList(); + nvkms_memset(&pApiHeadState->timings, 0, sizeof(pApiHeadState->timings)); + nvkms_memset(&pApiHeadState->stereo, 0, sizeof(pApiHeadState->stereo)); + + pDpyEvo->apiHead = NV_INVALID_HEAD; + nvAssignHwHeadsMaskApiHeadState(pApiHeadState, 0x0); +} + +static void +ApplyProposedModeSetStateOneDispFlip( + NVDispEvoPtr pDispEvo, + const NVProposedModeSetHwState *pProposed, + const NVProposedModeSetHwStateOneDisp *pProposedDisp, + NVEvoUpdateState *pUpdateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + const NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposedDisp->apiHead[apiHead]; + const NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pProposedApiHead->dpyIdList, pDispEvo); + const NvU32 sd = pDispEvo->displayOwner; + NvU32 head; + + /* + * If nothing changed about this api-head's configuration or this + * api-head is disabled, then there is nothing to do. + */ + if (!pProposedApiHead->changed || + (pProposedApiHead->hwHeadsMask == 0)) { + continue; + } + + nvAssert(pDpyEvo != NULL); + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pProposedApiHead->hwHeadsMask, head) { + nvSetUsageBoundsEvo(pDevEvo, sd, head, + &pProposed->sd[sd].head[head].flip.usage, + pUpdateState); + + nvFlipEvoOneHead(pDevEvo, sd, head, + &pDpyEvo->parsedEdid.info.hdr_static_metadata_info, + &pProposed->sd[sd].head[head].flip, + FALSE /* allowFlipLock */, + pUpdateState); + + if (pDevEvo->isSOCDisplay) { + pDevEvo->gpus[0].headState[head].preallocatedUsage = + pProposed->sd[sd].head[head].flip.usage; + } + } + + pDispEvo->apiHeadState[apiHead].tf = + pProposedApiHead->tf; + + pDispEvo->apiHeadState[apiHead].hdrInfoFrameOverride = + pProposedApiHead->hdrInfoFrameOverride; + + pDispEvo->apiHeadState[apiHead].hdrStaticMetadataLayerMask = + pProposedApiHead->hdrStaticMetadataLayerMask; + + pDispEvo->apiHeadState[apiHead].viewPortPointIn = + pProposedApiHead->viewPortPointIn; + } +} + +static void ReEnableActiveCoreRGSyncObjects(NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + NVEvoUpdateState *pUpdateState) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvU32 head = nvGetPrimaryHwHead(pDispEvo, apiHead); + NVDispApiHeadStateEvoRec *pApiHeadState = &pDispEvo->apiHeadState[apiHead]; + + nvAssert(head != NV_INVALID_HEAD); + + for (int i = 0; i < pApiHeadState->numVblankSyncObjectsCreated; i++) { + if (pApiHeadState->vblankSyncObjects[i].inUse) { + pDevEvo->hal->ConfigureVblankSyncObject( + pDevEvo, + pDispEvo->headState[head].timings.rasterBlankStart.y, + head, + i, + &pApiHeadState->vblankSyncObjects[i].evoSyncpt.surfaceDesc, + pUpdateState); + + pApiHeadState->vblankSyncObjects[i].enabled = TRUE; + } + } +} + +static void +ApplyProposedModeSetHwStateOneHeadPreUpdate( + NVDispEvoPtr pDispEvo, + const NVProposedModeSetStateOneApiHead *pProposedApiHead, + const NvU32 head, + const NvU32 isPrimaryHead, + const NVProposedModeSetHwStateOneHead *pProposedHead, + NvBool bypassComposition, + NVEvoModesetUpdateState *pModesetUpdateState) +{ + NVDispHeadStateEvoPtr pHeadState = &pDispEvo->headState[head]; + NVEvoUpdateState *updateState = &pModesetUpdateState->updateState; + + pHeadState->modeValidationParams = pProposedApiHead->modeValidationParams; + pHeadState->bypassComposition = bypassComposition; + pHeadState->activeRmId = pProposedApiHead->activeRmId; + pHeadState->pConnectorEvo = pProposedHead->pConnectorEvo; + pHeadState->mergeHeadSection = pProposedHead->mergeHeadSection; + pHeadState->timings = pProposedHead->timings; + pHeadState->dscInfo = pProposedApiHead->dscInfo; + pHeadState->hdmiFrlConfig = pProposedHead->hdmiFrlConfig; + pHeadState->pixelDepth = + nvEvoDpyColorToPixelDepth(&pProposedApiHead->attributes.color); + pHeadState->audio = pProposedHead->audio; + + nvEvoSetTimings(pDispEvo, head, updateState); + + nvSetDitheringEvo(pDispEvo, + head, + &pProposedApiHead->attributes.dithering, + updateState); + + nvEvoHeadSetControlOR(pDispEvo, + head, + &pProposedApiHead->attributes.color, + updateState); + + /* Update hardware's current colorSpace and colorRange */ + nvUpdateCurrentHardwareColorSpaceAndRangeEvo(pDispEvo, + head, + &pProposedApiHead->attributes.color, + updateState); + + nvEvoAttachConnector(pProposedHead->pConnectorEvo, + head, + isPrimaryHead, + pProposedHead->pDpLibModesetState, + pModesetUpdateState); + + nvSetViewPortsEvo(pDispEvo, head, updateState); + + nvSetImageSharpeningEvo( + pDispEvo, + head, + pProposedApiHead->attributes.imageSharpening.value, + updateState); + + + nvSetDVCEvo(pDispEvo, head, + pProposedApiHead->attributes.dvc, + updateState); + + + nvHdmiFrlSetConfig(pDispEvo, head); +} + +/*! + * Update the api heads to be modified on this disp. + * + * This should update the ASSY state of the head, but not trigger an + * UPDATE method. + * + * \param[in,out] pDispEvo The disp of the head. + * \param[in] apiHead The api head to consider. + * \param[in] pProposedHead The requested configuration of the head. + * \param[in,out] updateState Indicates which channels require UPDATEs + * \param[in] bypassComposition + * On Turing and newer, enable display + * composition pipeline bypass mode. + */ +static void +ApplyProposedModeSetStateOneApiHeadPreUpdate( + NVDispEvoPtr pDispEvo, + const NvU32 apiHead, + const NVProposedModeSetHwStateOneDisp *pProposedDisp, + NVModeSetWorkArea *pWorkArea, + NvBool bypassComposition) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVEvoModesetUpdateState *pModesetUpdateState = &pWorkArea->modesetUpdateState; + NVEvoUpdateState *updateState = &pModesetUpdateState->updateState; + NVDispApiHeadStateEvoRec *pApiHeadState = &pDispEvo->apiHeadState[apiHead]; + const NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposedDisp->apiHead[apiHead]; + const NvU32 proposedPrimaryHead = + nvGetPrimaryHwHeadFromMask(pProposedApiHead->hwHeadsMask); + NVDpyEvoPtr pDpyEvo = + nvGetOneArbitraryDpyEvo(pProposedApiHead->dpyIdList, pDispEvo); + NvU32 head; + + /* + * If nothing changed about this head's configuration, then there + * is nothing to do. + */ + if (!pProposedApiHead->changed) { + return; + } + + /* Check for disabled heads. */ + + if (nvDpyIdListIsEmpty(pProposedApiHead->dpyIdList)) { + /* + * ApplyProposedModeSetStateOneApiHeadShutDown() should have + * already been called for this head. + */ + nvAssert(!nvApiHeadIsActive(pDispEvo, apiHead)); + return; + } + + if (pDpyEvo == NULL) { + nvAssert(!"Invalid pDpyEvo"); + return; + } + + /* + * pDevEvo->apiHead[apiHead].numLayers is the number of layers which are + * visible to nvkms clients, and that number should be less than or equal + * to pDevEvo->head[proposedPrimaryHead].numLayers. + * + * If (pDevEvo->head[proposedPrimaryHead].numLayers > + * pDevEvo->apiHead[apiHead].numLayers) then the extra per hardware-head + * layers remain unused and there is no need to register the completion + * notifier callback for those extra per hardware-head layers. + */ + + nvAssert(pDevEvo->head[proposedPrimaryHead].numLayers >= + pDevEvo->apiHead[apiHead].numLayers); + + for (NvU32 layer = 0; layer < + pDevEvo->apiHead[apiHead].numLayers; layer++) { + nvEvoSetFlipOccurredEvent(pDispEvo, + proposedPrimaryHead, + layer, + pApiHeadState->flipOccurredEvent[layer]. + ref_ptr, + &pWorkArea->modesetUpdateState); + } + pApiHeadState->hwHeadsMask = pProposedApiHead->hwHeadsMask; + pDpyEvo->apiHead = apiHead; + + AssignSor(pDispEvo, pProposedApiHead, pWorkArea); + + nvDpyUpdateHdmiPreModesetEvo(pDpyEvo); + + /* + * Cache the list of active pDpys for this head, as well as the + * mode timings. + */ + pApiHeadState->activeDpys = pProposedApiHead->dpyIdList; + pApiHeadState->timings = pProposedApiHead->timings; + pApiHeadState->stereo = pProposedApiHead->stereo; + pWorkArea->sd[pDispEvo->displayOwner].changedDpyIdList = + nvAddDpyIdListToDpyIdList( + pApiHeadState->activeDpys, + pWorkArea->sd[pDispEvo->displayOwner].changedDpyIdList); + + pApiHeadState->infoFrame = pProposedApiHead->infoFrame; + + nvSendHwModeTimingsToAegisEvo(pDispEvo, apiHead); + + /* Set LUT settings + * + * Don't set the LUT notifier because we're already waiting on a core + * notifier for the update. + */ + nvEvoSetLut(pDispEvo, apiHead, FALSE /* kickoff */, &pProposedApiHead->lut); + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pProposedApiHead->hwHeadsMask, head) { + const NvBool isPrimaryHead = (head == proposedPrimaryHead); + + nvAssert(pDpyEvo->pConnectorEvo == + pProposedDisp->head[head].pConnectorEvo); + + ApplyProposedModeSetHwStateOneHeadPreUpdate(pDispEvo, pProposedApiHead, + head, isPrimaryHead, &pProposedDisp->head[head], + bypassComposition, pModesetUpdateState); + } + + /* + * Re-enable any active sync objects, configuring them in accordance with + * the new timings. + */ + ReEnableActiveCoreRGSyncObjects(pDispEvo, apiHead, updateState); + + pApiHeadState->attributes = pProposedApiHead->attributes; + pApiHeadState->tf = pProposedApiHead->tf; + pApiHeadState->hdrInfoFrameOverride = + pProposedApiHead->hdrInfoFrameOverride; + pApiHeadState->hdrStaticMetadataLayerMask = + pProposedApiHead->hdrStaticMetadataLayerMask; + pApiHeadState->hs10bpcHint = pProposedApiHead->hs10bpcHint; + + if (nvPopCount32(pProposedApiHead->hwHeadsMask) > 1) { + nvEvoEnableMergeModePreModeset(pDispEvo, + pProposedApiHead->hwHeadsMask, + &pModesetUpdateState->updateState); + } +} + + +/*! + * Update the heads to be modified on this disp. + * + * PreUpdate() will have already been called on this head, and an + * UPDATE method sent. + * + * \param[in,out] pDispEvo The disp of the head. + * \param[in] apihead The api head to consider. + * \param[in] pProposedApiHead The requested configuration of the api head. + */ +static void +ApplyProposedModeSetStateOneApiHeadPostModesetUpdate( + NVDispEvoPtr pDispEvo, + NvU32 apiHead, + const NVProposedModeSetStateOneApiHead *pProposedApiHead, + NVEvoUpdateState *pUpdateState) +{ + NvU32 head; + NVDpyEvoRec *pDpyEvo; + NVDispApiHeadStateEvoRec *pApiHeadState = &pDispEvo->apiHeadState[apiHead]; + + /* + * If nothing changed about this head's configuration, then there + * is nothing to do. + */ + if (!pProposedApiHead->changed) { + return; + } + + if (!nvApiHeadIsActive(pDispEvo, apiHead)) { + return; + } + + if (nvPopCount32(pProposedApiHead->hwHeadsMask) > 1) { + nvEvoEnableMergeModePostModeset(pDispEvo, + pProposedApiHead->hwHeadsMask, + pUpdateState); + } + + pDpyEvo = nvGetOneArbitraryDpyEvo(pApiHeadState->activeDpys, pDispEvo); + nvAssert(pDpyEvo != NULL); + + nvUpdateInfoFrames(pDpyEvo); + + /* Perform 3D vision authentication */ + nv3DVisionAuthenticationEvo(pDispEvo, apiHead); + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + nvHdmiDpEnableDisableAudio(pDispEvo, head, TRUE /* enable */); + } +} + +/* + * Shut down all api-heads that are incompatible with pProposedDisp. This + * requires doing an update immediately. + */ +static void +KickoffProposedModeSetStateIncompatibleApiHeadsShutDown( + NVDispEvoPtr pDispEvo, + const NVProposedModeSetHwStateOneDisp *pProposedDisp, + NVModeSetWorkArea *pWorkArea) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvBool foundIncompatibleHead = FALSE; + NvU32 clearHdmiFrlActiveRmId[NVKMS_MAX_HEADS_PER_DISP] = { }; + NVDpyIdList proposedActiveConnectorsList = nvEmptyDpyIdList(); + NVDpyIdList currActiveConnectorsList = nvEmptyDpyIdList(); + NVDpyIdList proposedInactiveConnectorList, unionOfActiveConnectorList; + + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + const NVDpyEvoRec *pActiveDpyEvo = + nvGetOneArbitraryDpyEvo(pDispEvo->apiHeadState[apiHead].activeDpys, + pDispEvo); + const NVDpyEvoRec *pProposedDpyEvo = + nvGetOneArbitraryDpyEvo(pProposedDisp->apiHead[apiHead].dpyIdList, + pDispEvo); + NVDpyId activeConnectorId = (pActiveDpyEvo != NULL) ? + pActiveDpyEvo->pConnectorEvo->displayId : + nvInvalidDpyId(); + NVDpyId proposedConnectorId = (pProposedDpyEvo != NULL) ? + pProposedDpyEvo->pConnectorEvo->displayId : + nvInvalidDpyId(); + + currActiveConnectorsList = + nvAddDpyIdToDpyIdList(activeConnectorId, + currActiveConnectorsList); + + proposedActiveConnectorsList = + nvAddDpyIdToDpyIdList(proposedConnectorId, + proposedActiveConnectorsList); + } + + proposedInactiveConnectorList = + nvDpyIdListMinusDpyIdList(currActiveConnectorsList, + proposedActiveConnectorsList); + unionOfActiveConnectorList = + nvAddDpyIdListToDpyIdList(proposedActiveConnectorsList, + currActiveConnectorsList); + + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NvBool thisHeadIncompatible = FALSE; + + if (!pProposedDisp->apiHead[apiHead].changed || + !nvApiHeadIsActive(pDispEvo, apiHead)) { + continue; + } + + const NVDpyEvoRec *pCurrDpyEvo = + nvGetOneArbitraryDpyEvo(pDispEvo->apiHeadState[apiHead].activeDpys, + pDispEvo); + const NVConnectorEvoRec *pCurrConnectorEvo = + pCurrDpyEvo->pConnectorEvo; + + + /* + * If the number of current active connectors + proposed active + * connectors is greater than number of heads then modeset is under + * risk to run out of SORs. This is because the number of connectors > + * the number of SORs >= the number of heads. + * + * The sor assignment failure during modeset causes display engine + * and/or kernel panics. + * + * In this situation, all the connectors which are not going active + * after modeset, mark them incompatible and shut down them before + * triggering modeset on all the active connectors. + * + * [2Heads1OR] XXX This incompatibility check will not work because in + * the 2Heads1OR configuration two heads gets attached to a single + * connector. + */ + if (nvCountDpyIdsInDpyIdList(unionOfActiveConnectorList) > + pDispEvo->pDevEvo->numHeads && + nvDpyIdIsInDpyIdList(pCurrConnectorEvo->displayId, + proposedInactiveConnectorList)) { + thisHeadIncompatible = TRUE; + } + + /* if the *new* timings are FRL, then we need to shut down the head. */ + if (pProposedDisp->apiHead[apiHead].timings.protocol == + NVKMS_PROTOCOL_SOR_HDMI_FRL) { + thisHeadIncompatible = TRUE; + } + + /* if the *old* timings are FRL, then we need to shut down the head and + * clear the FRL config. */ + if (pDispEvo->apiHeadState[apiHead].timings.protocol == + NVKMS_PROTOCOL_SOR_HDMI_FRL) { + NvU32 head; + thisHeadIncompatible = TRUE; + /* cache the activeRmId since it will be cleared below, but + * we don't want to actually call into the HDMI library until + * afterwards. */ + FOR_EACH_EVO_HW_HEAD_IN_MASK( + pDispEvo->apiHeadState[apiHead].hwHeadsMask, + head) { + if (clearHdmiFrlActiveRmId[apiHead] == 0) { + clearHdmiFrlActiveRmId[apiHead] = + pDispEvo->headState[head].activeRmId; + } else { + nvAssert(clearHdmiFrlActiveRmId[apiHead] == + pDispEvo->headState[head].activeRmId); + } + } + } + + if (IsProposedModeSetStateOneApiHeadIncompatible(pDispEvo, + apiHead, + pProposedDisp)) { + thisHeadIncompatible = TRUE; + } + + if (!thisHeadIncompatible) { + continue; + } + + ApplyProposedModeSetStateOneApiHeadShutDown( + pDispEvo, + apiHead, + pProposedDisp, + pWorkArea); + + foundIncompatibleHead = TRUE; + } + + /* Submit UPDATE method and kick off, to shut down incompatible heads. */ + if (foundIncompatibleHead) { + KickoffModesetUpdateState(pDispEvo, &pWorkArea->modesetUpdateState); + for (NvU32 apiHead = 0; apiHead < pDevEvo->numHeads; apiHead++) { + if (clearHdmiFrlActiveRmId[apiHead] == 0) { + continue; + } + nvHdmiFrlClearConfig(pDispEvo, clearHdmiFrlActiveRmId[apiHead]); + } + } +} + +static void +KickoffProposedModeSetHwState( + NVDispEvoPtr pDispEvo, + const NVProposedModeSetHwState *pProposed, + const NVProposedModeSetHwStateOneDisp *pProposedDisp, + const NvBool bypassComposition, + NVModeSetWorkArea *pWorkArea) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVEvoModesetUpdateState *pModesetUpdateState = &pWorkArea->modesetUpdateState; + /* + * If there is a change in window ownership, decouple window channel flips + * and the core channel update that performs a modeset. + * + * This allows window channel flips to be instead interlocked with the core + * channel update that sets the window usage bounds, avoiding window + * invalid usage exceptions. + * + * See comment about NVDisplay error code 37, in + * function EvoInitWindowMapping3(). + */ + const NvBool decoupleFlipUpdates = + pModesetUpdateState->windowMappingChanged; + + /* Send methods to shut down any other unused heads, but don't update yet. */ + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + ApplyProposedModeSetStateOneApiHeadShutDown( + pDispEvo, + apiHead, + pProposedDisp, + pWorkArea); + } + + /* Apply pre-UPDATE modifications for any enabled heads. */ + for (NvU32 apiHead = 0; + apiHead < pDispEvo->pDevEvo->numApiHeads; apiHead++) { + ApplyProposedModeSetStateOneApiHeadPreUpdate( + pDispEvo, + apiHead, + pProposedDisp, + pWorkArea, + bypassComposition); + } + + if (!decoupleFlipUpdates) { + /* Merge modeset and flip state updates together */ + ApplyProposedModeSetStateOneDispFlip( + pDispEvo, + pProposed, + pProposedDisp, + &pModesetUpdateState->updateState); + } + + /* Submit UPDATE method and kick off. */ + KickoffModesetUpdateState(pDispEvo, + pModesetUpdateState); + + if (decoupleFlipUpdates) { + NVEvoUpdateState flipUpdateState = { }; + + ApplyProposedModeSetStateOneDispFlip( + pDispEvo, + pProposed, + pProposedDisp, + &flipUpdateState); + + pDevEvo->hal->Update(pDevEvo, + &flipUpdateState, + TRUE /* releaseElv */); + } + + nvRemoveUnusedHdmiDpAudioDevice(pDispEvo); + + { + NVEvoUpdateState updateState = { }; + + /* Apply post-MODESET-UPDATE modifications for any enabled api-heads. */ + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + ApplyProposedModeSetStateOneApiHeadPostModesetUpdate( + pDispEvo, + apiHead, + &pProposedDisp->apiHead[apiHead], + &updateState); + } + + if (!nvIsUpdateStateEmpty(pDevEvo, &updateState)) { + pDevEvo->hal->Update(pDevEvo, &updateState, + TRUE /* releaseElv */); + } + } +} + +static void AllocatePostModesetDispBandwidth(NVDispEvoPtr pDispEvo, + NVModeSetWorkArea *pWorkArea) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvU64 maxFrameTimeUsec = 0ULL; + NvU32 head; + + if (!pDevEvo->isSOCDisplay) { + return; + } + + if ((pDispEvo->isoBandwidthKBPS == pWorkArea->postModesetIsoBandwidthKBPS) && + (pDispEvo->dramFloorKBPS == pWorkArea->postModesetDramFloorKBPS)) { + return; + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + NvU64 curFrameTimeUsec = 0ULL; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + curFrameTimeUsec = nvEvoFrametimeUsFromTimings(&pDispEvo->headState[head].timings); + maxFrameTimeUsec = NV_MAX(maxFrameTimeUsec, curFrameTimeUsec); + } + + nvkms_usleep(maxFrameTimeUsec * 2); + + if (!nvAllocateDisplayBandwidth(pDispEvo, + pWorkArea->postModesetIsoBandwidthKBPS, + pWorkArea->postModesetDramFloorKBPS)) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Unexpectedly failed to program post-modeset bandwidth!"); + } +} + +/*! + * Update the disp with the modifications described in pProposedDisp. + * + * \param[in] pDispEvo The disp to be modified. + * \param[in] pProposedDisp The requested configuration of the disp. + * \param[in] pWorkArea Preallocated scratch memory. + * \param[in] updateCoreFirst If true, avoid interlock with core: kick off + * the core channel and wait for a notifier + * before the rest of the channels for this update. + * \param[in] bypassComposition + * On Turing and newer, enable display composition + * pipeline bypass mode. + * + * This function is not allowed to fail. + */ +static void +ApplyProposedModeSetHwStateOneDisp( + NVDispEvoPtr pDispEvo, + const NVProposedModeSetHwState *pProposed, + const NVProposedModeSetHwStateOneDisp *pProposedDisp, + NVModeSetWorkArea *pWorkArea, + NvBool updateCoreFirst, + NvBool bypassComposition) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 head; + const NvU32 sd = pDispEvo->displayOwner; + + nvkms_memset(&pWorkArea->modesetUpdateState, 0, + sizeof(pWorkArea->modesetUpdateState)); + + /* Record the current flip state. */ + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + nvInitFlipEvoHwState(pDevEvo, sd, head, + &pWorkArea->sd[sd].head[head].oldState); + } + + if (updateCoreFirst) { + /* If this is the first core update, initialize the window -> head + * mapping. + * + * Hal ->InitWindowMapping() sets + * NVModesetUpdateState::windowMappingChanged true, if there is + * any change in window ownerships/assignment. This is necessary on + * GV100+ because of a series of unfortunate requirements. + * + * NVDisplay has two requirements that we need to honor: + * + * 1. You can't move a window from one head to another while the head + * is active. + * 2. You can't change window assignments in an update that's + * interlocked with the corresponding window channel. + * + * In addition, GV100 has an additional requirement: + * + * 3. You can't change window assignment for a head while it is + * active, but it's okay to assign windows in the same update that + * activates a head. + * + * If there is a change in window assignment, the task of not + * interlocking core and respective window channels will be handled by + * NVEvoUpdateState::subdev[]::noCoreInterlockMask. + * ->InitWindowMapping() will set 'noCoreInterlockMask' and ->Update() + * will take care not to interlock window channels specified in mask + * with core channel. + * + * The GOP driver and NVKMS assign window channels in the same way. The + * window channels channels 2n and 2n+1 are guaranteed to get assigned + * to head n. + */ + pDevEvo->hal->InitWindowMapping(pDispEvo, &pWorkArea->modesetUpdateState); + } + + /* + * Temporarily lock to the max DRAM frequency to prevent mclk switch events + * from being requested. Display can't tolerate mclk switch events during + * modeset transitions. This max DRAM floor will be released after the Core + * notifier signals post-modeset in the AllocatePostModesetDispBandwidth() + * call below. This only needs to be done for Orin SOC display. + */ + if (!nvAllocateDisplayBandwidth(pDispEvo, + pDispEvo->isoBandwidthKBPS, + NV_U32_MAX)) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Unexpectedly failed to lock to max DRAM pre-modeset!"); + } + + KickoffProposedModeSetStateIncompatibleApiHeadsShutDown( + pDispEvo, + pProposedDisp, + pWorkArea); + + KickoffProposedModeSetHwState( + pDispEvo, + pProposed, + pProposedDisp, + bypassComposition, + pWorkArea); + + /* + * This function waits for 2 frames to make sure that the final IMP + * arbitration settings have been programmed by the post-SV3 worker thread + * in RM. Once these settings have taken effect, it's safe to release the + * max DRAM floor that was previously requested, and to program the ISO + * bandwidth that's required for the new mode. This only needs to be done + * for Orin SOC display. + */ + AllocatePostModesetDispBandwidth(pDispEvo, pWorkArea); + + /* + * Record the new flip state, then generate any flip events, and update + * surface reference counts. + */ + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + nvInitFlipEvoHwState( + pDevEvo, sd, head, + &pWorkArea->sd[sd].head[head].newState); + nvUpdateSurfacesFlipRefCount( + pDevEvo, + head, + &pWorkArea->sd[sd].head[head].newState, + NV_TRUE); + nvUpdateSurfacesFlipRefCount( + pDevEvo, + head, + &pWorkArea->sd[sd].head[head].oldState, + NV_FALSE); + } +} + + +/*! + * Initialize the pReply structure. + * + * Mark all of the heads and disps as successful. During the process + * of assigning and validating the proposed configuration, heads with + * invalid requested configuration will have their reply status field + * changed to a non-success value. + * + * \param[in] pRequest The client's requested configuration. This + * indicates which heads on which disps the + * client requested changes on. + * \param[out] pReply The reply to the client. + */ +static void +InitializeReply(const NVDevEvoRec *pDevEvo, + const struct NvKmsSetModeRequest *pRequest, + struct NvKmsSetModeReply *pReply) +{ + NvU32 dispIndex; + NVDispEvoPtr pDispEvo; + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + pReply->status = NVKMS_SET_MODE_STATUS_SUCCESS; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + NvU32 apiHead; + + pReply->disp[dispIndex].status = + NVKMS_SET_MODE_ONE_DISP_STATUS_SUCCESS; + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + + pReply->disp[dispIndex].head[apiHead].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_SUCCESS; + } + } +} + + +/*! + * Validate the client-provided NvKmsSetModeRequest. + * + * Check basic validity of NvKmsSetModeRequest: e.g., that + * requestedDispsBitMask and requestedHeadsBitMask do not exceed the + * disps or heads of the pDevEvo. + * + * \param[in] pDevEvo The device that is to be modified. + * \param[in] pOpenDev The pOpenDev of the client doing the modeset. + * \param[in] pRequest The client's requested configuration. This + * indicates which heads on which disps the + * client requested changes on. + * \param[out] pReply The reply to the client. + + * \return If pRequest is valid, return TRUE. Otherwise, set the + * appropriate status fields in pReply to non-SUCCESS, + * and return FALSE. + */ +static NvBool +ValidateRequest(const NVDevEvoRec *pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsSetModeRequest *pRequest, + struct NvKmsSetModeReply *pReply) +{ + NvU32 dispIndex, apiHead; + NvBool ret = TRUE; + + const struct NvKmsModesetPermissions *pPermissions = + nvGetModesetPermissionsFromOpenDev(pOpenDev); + + nvAssert(pOpenDev != NULL); + nvAssert(pPermissions != NULL); + + /* Check for invalid disps in requestedDispsBitMask. */ + if (nvHasBitAboveMax(pRequest->requestedDispsBitMask, + NVKMS_MAX_SUBDEVICES)) { + pReply->status = NVKMS_SET_MODE_STATUS_INVALID_REQUESTED_DISPS_BITMASK; + ret = FALSE; + } + + for (dispIndex = 0; dispIndex < NVKMS_MAX_SUBDEVICES; dispIndex++) { + + if ((pRequest->requestedDispsBitMask & (1 << dispIndex)) == 0) { + continue; + } + + if (dispIndex >= pDevEvo->nDispEvo) { + pReply->status = + NVKMS_SET_MODE_STATUS_INVALID_REQUESTED_DISPS_BITMASK; + ret = FALSE; + continue; + } + + const struct NvKmsSetModeOneDispRequest *pRequestDisp = + &pRequest->disp[dispIndex]; + + /* Check for invalid heads in requestedHeadsBitMask. */ + if (nvHasBitAboveMax(pRequestDisp->requestedHeadsBitMask, + NVKMS_MAX_HEADS_PER_DISP)) { + pReply->disp[dispIndex].status = + NVKMS_SET_MODE_ONE_DISP_STATUS_INVALID_REQUESTED_HEADS_BITMASK; + ret = FALSE; + } + + for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) { + + if ((pRequestDisp->requestedHeadsBitMask & (1 << apiHead)) == 0) { + continue; + } + + if (apiHead >= pDevEvo->numApiHeads) { + pReply->disp[dispIndex].status = + NVKMS_SET_MODE_ONE_DISP_STATUS_INVALID_REQUESTED_HEADS_BITMASK; + ret = FALSE; + continue; + } + + const NVDpyIdList permDpyIdList = + pPermissions->disp[dispIndex].head[apiHead].dpyIdList; + + const struct NvKmsSetModeOneHeadRequest *pRequestHead = + &pRequestDisp->head[apiHead]; + + /* + * Does the client have permission to touch this head at + * all? + */ + if (pRequest->commit && nvDpyIdListIsEmpty(permDpyIdList)) { + pReply->disp[dispIndex].head[apiHead].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_PERMISSIONS; + ret = FALSE; + continue; + } + + /* + * pRequestHead->dpyIdList == EMPTY means the head is + * being shut down: no more to do for validation. + */ + if (nvDpyIdListIsEmpty(pRequestHead->dpyIdList)) { + continue; + } + + /* + * Does the client have permission to drive this dpyIdList + * with this head? + */ + if (pRequest->commit && + !nvDpyIdListIsASubSetofDpyIdList(pRequestHead->dpyIdList, + permDpyIdList)) { + pReply->disp[dispIndex].head[apiHead].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_PERMISSIONS; + ret = FALSE; + continue; + } + + /* + * Are all requested dpys in the list of valid dpys for this disp? + */ + if (!nvDpyIdListIsASubSetofDpyIdList( + pRequestHead->dpyIdList, + pDevEvo->pDispEvo[dispIndex]->validDisplays)) { + pReply->disp[dispIndex].head[apiHead].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_DPY; + ret = FALSE; + continue; + } + + if (!nvValidateSetLutCommonParams(pDevEvo, &pRequestHead->flip.lut)) { + pReply->disp[dispIndex].head[apiHead].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_LUT; + ret = FALSE; + continue; + } + } + } + + return ret; +} + +static void FillPostSyncptReplyForModeset( + const NVDevEvoRec *pDevEvo, + NvU32 head, + const struct NvKmsFlipCommonParams *pFlipRequest, + struct NvKmsFlipCommonReplyOneHead *pFlipReply, + const NVFlipEvoHwState *pFlipState) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (pFlipRequest->layer[layer].syncObjects.specified && + pFlipRequest->layer[layer].syncObjects.val.useSyncpt) { + nvFillPostSyncptReplyOneChannel( + pDevEvo->head[head].layer[layer], + pFlipRequest->layer[layer].syncObjects.val.u.syncpts.requestedPostType, + &pFlipReply->layer[layer].postSyncpt, + &pFlipState->layer[layer].syncObject); + } + } +} + +/*! + * Assign the NvKmsSetModeReply structure. + * + * After a modeset was successfully completed, update the pReply with + * information about the modeset that the client may need. + * + * \param[in] pDevEvo The device that was modified. + * \param[in] pRequest The client's requested configuration. This + * indicates which heads on which disps the + * client requested changes on. + * \param[out] pReply The reply to the client. + */ +static void +AssignReplySuccess(NVDevEvoRec *pDevEvo, + const struct NvKmsSetModeRequest *pRequest, + struct NvKmsSetModeReply *pReply, + const NVModeSetWorkArea *pWorkArea) +{ + NvU32 dispIndex; + NVDispEvoPtr pDispEvo; + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + const struct NvKmsSetModeOneDispRequest *pRequestDisp = + &pRequest->disp[dispIndex]; + + if ((pRequest->requestedDispsBitMask & (1 << dispIndex)) == 0) { + continue; + } + + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + const struct NvKmsSetModeOneHeadRequest *pRequestHead = + &pRequestDisp->head[apiHead]; + struct NvKmsSetModeOneHeadReply *pReplyHead = + &pReply->disp[dispIndex].head[apiHead]; + + if ((pRequestDisp->requestedHeadsBitMask & (1 << apiHead)) == 0) { + continue; + } + + pReplyHead->status = NVKMS_SET_MODE_ONE_HEAD_STATUS_SUCCESS; + + if (!nvDpyIdListIsEmpty(pRequestHead->dpyIdList)) { + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + const NvU32 primaryHwHead = + nvGetPrimaryHwHeadFromMask(pApiHeadState->hwHeadsMask); + + nvAssert((pApiHeadState->hwHeadsMask != 0x0) && + (primaryHwHead != NV_INVALID_HEAD)); + + struct NvKmsUsageBounds *pTmpUsageBounds = + nvPreallocGet(pDevEvo, + PREALLOC_TYPE_MODE_SET_REPLY_TMP_USAGE_BOUNDS, + sizeof(*pTmpUsageBounds)); + NvU32 head; + + pReplyHead->possibleUsage = pDispEvo->headState[primaryHwHead]. + timings.viewPort.possibleUsage; + pReplyHead->guaranteedUsage = pDispEvo->headState[primaryHwHead]. + timings.viewPort.guaranteedUsage; + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + + nvIntersectUsageBounds(&pReplyHead->possibleUsage, + &pHeadState->timings.viewPort.possibleUsage, + pTmpUsageBounds); + pReplyHead->possibleUsage = *pTmpUsageBounds; + + nvIntersectUsageBounds(&pReplyHead->guaranteedUsage, + &pHeadState->timings.viewPort.guaranteedUsage, + pTmpUsageBounds); + pReplyHead->guaranteedUsage = *pTmpUsageBounds; + } + + nvPreallocRelease(pDevEvo, + PREALLOC_TYPE_MODE_SET_REPLY_TMP_USAGE_BOUNDS); + + pReplyHead->usingHeadSurface = + (pDispEvo->pHsChannel[apiHead] != NULL); + pReplyHead->vrrEnabled = + (pApiHeadState->timings.vrr.type != + NVKMS_DPY_VRR_TYPE_NONE); + pReplyHead->hwHead = primaryHwHead; + + FillPostSyncptReplyForModeset( + pDevEvo, + primaryHwHead, + &pRequestHead->flip, + &pReplyHead->flipReply, + &pWorkArea->sd[dispIndex].head[primaryHwHead].newState); + } else { + pReplyHead->hwHead = NV_INVALID_HEAD; + } + } + } +} + + +/*! + * Call RM to notify that a modeset is impending, or that the modeset has + * completed. + * + * \param[in] pDevEvo The device to modify. + * \param[in] pProposed The proposed resulting hardware state. + * \param[in] beginOrEnd Whether this is a begin call or an end call. + */ +static void +BeginEndModeset(NVDevEvoPtr pDevEvo, + const NVProposedModeSetHwState *pProposed, + enum NvKmsBeginEndModeset beginOrEnd) +{ + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + const NVProposedModeSetHwStateOneDisp *pProposedDisp = + &pProposed->disp[dispIndex]; + NvU32 apiHead, dpyMask = 0; + + /* Compute dpyMask: take all the dpyIds on this dispIndex. */ + for (apiHead = 0; apiHead < + ARRAY_LEN(pProposedDisp->apiHead); apiHead++) { + const NVProposedModeSetStateOneApiHead *pProposedApiHead = + &pProposedDisp->apiHead[apiHead]; + dpyMask |= nvDpyIdListToNvU32(pProposedApiHead->dpyIdList); + } + + nvRmBeginEndModeset(pDispEvo, beginOrEnd, dpyMask); + } +} + +/*! + * Idle all of the satellite channels. + * + * XXX NVKMS: use interlocked UPDATEs, instead, so that we don't + * have to busy-wait on the CPU. + * + * XXX NVKMS: we should idle all channels, not just base. + */ +static NvBool IdleAllSatelliteChannels(NVDevEvoRec *pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 head, sd; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + for (head = 0; head < pDevEvo->numHeads; head++) { + NvBool unused; + if (!nvRMIdleBaseChannel(pDevEvo, head, sd, &unused)) { + return FALSE; + } + } + } + + return TRUE; +} + +/*! + * Helper function to assign and validate the proposed mode + */ +static NvBool +AssignAndValidateProposedModeSet(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsSetModeRequest *pRequest, + struct NvKmsSetModeReply *pReply, + NVProposedModeSetHwState *pProposed, + NVModeSetWorkArea *pWorkArea) +{ + NvBool ret = FALSE; + struct NvKmsSetModeRequest *pPatchedRequest = nvPreallocGet(pDevEvo, + PREALLOC_TYPE_HS_PATCHED_MODESET_REQUEST, + sizeof(*pPatchedRequest)); + + /* clear pPatchedRequest */ + nvkms_memset(pPatchedRequest, 0, sizeof(*pPatchedRequest)); + + if (!nvHsConfigInitModeset(pDevEvo, pRequest, pReply, pOpenDev, + &pWorkArea->hsConfig)) { + goto done; + } + +tryHsAgain: + { + NvU32 patchedApiHeadsMask[NVKMS_MAX_SUBDEVICES] = { }; + + if (!nvHsConfigAllocResources(pDevEvo, &pWorkArea->hsConfig)) { + goto done; + } + + /* copy pRequest -> pPatchedRequest */ + *pPatchedRequest = *pRequest; + + /* modify pPatchedRequest for a headsurface config */ + if (!nvHsConfigPatchSetModeRequest(pDevEvo, &pWorkArea->hsConfig, + pOpenDev, pPatchedRequest, + patchedApiHeadsMask)) { + + nvHsConfigFreeResources(pDevEvo, &pWorkArea->hsConfig); + + goto done; + } + + /* assign pProposed from pPatchedRequest */ + if (!AssignProposedModeSetHwState(pDevEvo, pOpenDev, pPatchedRequest, + pReply, pProposed)) { + + nvHsConfigClearPatchedSetModeRequest(pDevEvo, + pOpenDev, + pPatchedRequest, + patchedApiHeadsMask); + nvHsConfigFreeResources(pDevEvo, &pWorkArea->hsConfig); + + goto done; + } + + /* validate pProposed */ + if (!ValidateProposedModeSetHwState(pDevEvo, pProposed, pReply, + pWorkArea)) { + + ClearProposedModeSetHwState(pDevEvo, pProposed, FALSE); + nvHsConfigClearPatchedSetModeRequest(pDevEvo, + pOpenDev, + pPatchedRequest, + patchedApiHeadsMask); + nvHsConfigFreeResources(pDevEvo, &pWorkArea->hsConfig); + + /* + * If the pProposed assigned from the patched modeset request + * failed validation, downgrade the headSurface configuration and + * try again. + */ + if (nvHsConfigDowngrade(pDevEvo, pRequest, &pWorkArea->hsConfig)) { + goto tryHsAgain; + } + goto done; + } + + nvHsConfigClearPatchedSetModeRequest(pDevEvo, pOpenDev, pPatchedRequest, + patchedApiHeadsMask); + ret = TRUE; + } + +done: + nvPreallocRelease(pDevEvo, + PREALLOC_TYPE_HS_PATCHED_MODESET_REQUEST); + + return ret; +} + +/*! + * Perform a modeset across the heads on the disps of the device. + * + * See the comments at the top of this source file for a description + * of the flow performed by this function. + * + * \param[in,out] pDevEvo The device to be modified. + * \param[in] pOpenDev The pOpenDev of the client doing the modeset. + * \param[in] pOpenDevSurfaceHandles + * The table mapping client handles to surfaces. + * \param[in] pRequest The client's requested configuration changes. + * \param[out] pReply The reply to the client. + * \param[in] bypassComposition + * On Turing and higher, enable composition pipeline + * bypass mode. + * \param[in] doRasterLock + * Rasterlock heads in the post-modeset routine. + * + * \return Return TRUE if the modeset was successful. Otherwise, + * return FALSE. If the modeset was not successful, + * the state of the hardware and software should not + * have been changed. + */ +NvBool nvSetDispModeEvo(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsSetModeRequest *pRequest, + struct NvKmsSetModeReply *pReply, + NvBool bypassComposition, + NvBool doRasterLock) +{ + NvBool ret = FALSE; + NVProposedModeSetHwState *pProposed = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_PROPOSED_MODESET_HW_STATE, + sizeof(*pProposed)); + NvU32 dispIndex; + NVDispEvoPtr pDispEvo; + NvBool updateCoreFirst = FALSE; + NvBool committed = FALSE; + + NVModeSetWorkArea *pWorkArea = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_MODE_SET_WORK_AREA, + sizeof(*pWorkArea)); + + nvkms_memset(pProposed, 0, sizeof(*pProposed)); + nvkms_memset(pWorkArea, 0, sizeof(*pWorkArea)); + + nvAssert(pOpenDev != NULL); + + InitializeReply(pDevEvo, pRequest, pReply); + + if (!ValidateRequest(pDevEvo, pOpenDev, pRequest, pReply)) { + goto done; + } + + /* Disallow GC6 in anticipation of touching GPU/displays. */ + if (!nvRmSetGc6Allowed(pDevEvo, FALSE)) { + goto done; + } + + if (!AssignAndValidateProposedModeSet(pDevEvo, pOpenDev, pRequest, pReply, + pProposed, pWorkArea)) { + goto done; + } + + /* The requested configuration is valid. */ + + ret = TRUE; + + if (!pRequest->commit) { + goto done; + } + + /* All satellite channels must be idle. */ + + if (!IdleAllSatelliteChannels(pDevEvo)) { + ret = FALSE; + goto done; + } + + /* From this point, we should not fail. */ + + /* + * Disable stereo pin during console restore or modeset owner changes. + */ + if (!InheritPreviousModesetState(pDevEvo, pOpenDev)) { + NvU32 sd; + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 head; + for (head = 0; head < pDevEvo->numHeads; head++) { + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + nvSetStereoEvo(pDispEvo, head, FALSE); + } + } + } + + /* + * Tear down any existing headSurface config, restoring the pre-headSurface + * config. This must be done before fliplock is potentially re-enabled + * during nvEvoLockStatePostModeset below. + */ + + nvHsConfigStop(pDevEvo, &pWorkArea->hsConfig); + + nvEvoCancelPostFlipIMPTimer(pDevEvo); + + BeginEndModeset(pDevEvo, pProposed, BEGIN_MODESET); + + DisableVBlankCallbacks(pDevEvo); + + nvEvoLockStatePreModeset(pDevEvo); + + nvEvoRemoveOverlappingFlipLockRequestGroupsForModeset(pDevEvo, pRequest); + + nvDisableVrr(pDevEvo); + + updateCoreFirst = pDevEvo->coreInitMethodsPending; + pDevEvo->coreInitMethodsPending = FALSE; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + ApplyProposedModeSetHwStateOneDisp(pDispEvo, + pProposed, + &pProposed->disp[dispIndex], + pWorkArea, + updateCoreFirst, + bypassComposition); + } + + nvEnableVrr(pDevEvo); + + /* + * Cache whether HS in NVKMS is allowed, so we can make consistent + * decisions for future partial updates from non-modeset owners. + */ + pDevEvo->allowHeadSurfaceInNvKms = pProposed->allowHeadSurfaceInNvKms; + + nvEvoLockStatePostModeset(pDevEvo, doRasterLock); + + EnableVBlankCallbacks(pDevEvo); + + /* + * The modeset was successful: if headSurface was used as part of this + * modeset, record that in the pDevEvo. + */ + nvHsConfigStart(pDevEvo, &pWorkArea->hsConfig); + + BeginEndModeset(pDevEvo, pProposed, END_MODESET); + + AssignReplySuccess(pDevEvo, pRequest, pReply, pWorkArea); + + pDevEvo->skipConsoleRestore = FALSE; + + pDevEvo->modesetOwnerOrSubOwnerChanged = FALSE; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + /* + * In case of successful commit, update current attribute values and + * free old display IDs. + */ + NVDpyEvoRec *pDpyEvo; + + FOR_ALL_EVO_DPYS(pDpyEvo, + pWorkArea->sd[dispIndex].changedDpyIdList, + pDispEvo) { + nvDpyUpdateCurrentAttributes(pDpyEvo); + } + + for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + if (pWorkArea->sd[dispIndex].apiHead[apiHead].oldActiveRmId != 0x0) { + nvRmFreeDisplayId( + pDispEvo, + pWorkArea->sd[dispIndex].apiHead[apiHead].oldActiveRmId); + } + } + } + + committed = TRUE; + + /* fall through */ +done: + ClearProposedModeSetHwState(pDevEvo, pProposed, committed); + + /* If all heads are shut down, allow GC6. */ + if (nvAllHeadsInactive(pDevEvo)) { + nvRmSetGc6Allowed(pDevEvo, TRUE); + } + + /* + * nvHsConfigFreeResources() frees any headSurface resources no longer + * needed. On a successful modeset, nvHsConfigApply() will move resources + * from the hsConfig to the pDevEvo, and nvHsConfigFreeResources() will be a + * noop. + */ + nvHsConfigFreeResources(pDevEvo, &pWorkArea->hsConfig); + + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_MODE_SET_WORK_AREA); + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_PROPOSED_MODESET_HW_STATE); + return ret; +} + +/*! + * Register a callback to activate when vblank is reached on a given head. + * + * \param[in,out] pDispEvo The display engine to register the callback on. + * \param[in] apiHead The api head to register the callback on. + * \param[in] pCallback The function to call when vblank is reached on the + * provided pDispEvo+head combination. + * \param[in] pUserData A pointer to caller-provided custom data. + * \param[in] listIndex Which vblankCallbackList[] array to add this callback into. + * + * \return Returns a pointer to a NVVBlankCallbackRec structure if the + * registration was successful. Otherwise, return NULL. + */ +NVVBlankCallbackPtr +nvApiHeadRegisterVBlankCallback(NVDispEvoPtr pDispEvo, + const NvU32 apiHead, + NVVBlankCallbackProc pCallback, + void *pUserData, + NvU8 listIndex) +{ + /* + * All the hardware heads mapped on the input api head should be + * rasterlocked, and should trigger vblank callback exactly at same time; + * therefore it is sufficient to register vblank callback only with the + * primary hardware head. + */ + const NvU32 head = nvGetPrimaryHwHead(pDispEvo, apiHead); + NVDispApiHeadStateEvoRec *pApiHeadState = &pDispEvo->apiHeadState[apiHead]; + NVDispVblankApiHeadState *pVblankApiHeadState = + &pDispEvo->vblankApiHeadState[apiHead]; + NVVBlankCallbackPtr pVBlankCallback = NULL; + + pVBlankCallback = nvCalloc(1, sizeof(*pVBlankCallback)); + if (pVBlankCallback == NULL) { + return NULL; + } + + pVBlankCallback->pCallback = pCallback; + pVBlankCallback->pUserData = pUserData; + pVBlankCallback->apiHead = apiHead; + + /* append to the tail of the list */ + nvListAppend(&pVBlankCallback->vblankCallbackListEntry, + &pVblankApiHeadState->vblankCallbackList[listIndex]); + + // If this is the first entry in the list, register the vblank callback + if ((head != NV_INVALID_HEAD) && + (pApiHeadState->rmVBlankCallbackHandle == 0)) { + pApiHeadState->rmVBlankCallbackHandle = + nvRmAddVBlankCallback(pDispEvo, head, VBlankCallback, + (void *)(NvUPtr)apiHead); + } + + return pVBlankCallback; +} + +/*! + * Un-register a vblank callback for a given api head. + * + * \param[in,out] pDispEvo The display engine to register the callback on. + * \param[in] pCallback A pointer to the NVVBlankCallbackRec to un-register. + * + */ +void nvApiHeadUnregisterVBlankCallback(NVDispEvoPtr pDispEvo, + NVVBlankCallbackPtr pCallback) +{ + const NvU32 apiHead = pCallback->apiHead; + NVDispApiHeadStateEvoRec *pApiHeadState = &pDispEvo->apiHeadState[apiHead]; + const NVDispVblankApiHeadState *pVblankApiHeadState = + &pDispEvo->vblankApiHeadState[apiHead]; + const NvU32 head = nvGetPrimaryHwHead(pDispEvo, apiHead); + + nvListDel(&pCallback->vblankCallbackListEntry); + nvFree(pCallback); + + nvAssert((head != NV_INVALID_HEAD) || + (pApiHeadState->rmVBlankCallbackHandle == 0)); + + // If there are no more callbacks, disable the RM-level callback + if (VblankCallbackListsAreEmpty(pVblankApiHeadState) && + (head != NV_INVALID_HEAD) && + (pApiHeadState->rmVBlankCallbackHandle != 0)) { + nvRmRemoveVBlankCallback(pDispEvo, + pApiHeadState->rmVBlankCallbackHandle); + pApiHeadState->rmVBlankCallbackHandle = 0; + } +} + +/*! + * Perform a modeset that disables some or all api heads. + * + * \param[in] pDevEvo The device to shut down. + * \param[in] pOpenDev The pOpenDev of the client doing the disabling. + * \param[in] pTestFunc The pointer to test function, identifying heads + * targeted to shut down. If NULL then shut down + * all heads. + * \param[in] pData Data passed to the test function. + * \param[in] doRasterLock Allow rasterlock to be implicitly enabled + */ +void nvShutDownApiHeads(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + NVShutDownApiHeadsTestFunc pTestFunc, + void *pData, + NvBool doRasterLock) +{ + if (pDevEvo->displayHandle != 0) { + struct NvKmsSetModeParams *params = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_SHUT_DOWN_HEADS_SET_MODE, + sizeof(*params)); + struct NvKmsSetModeRequest *req = NULL; + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + NvBool dirty = FALSE; + + nvkms_memset(params, 0, sizeof(*params)); + req = ¶ms->request; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 apiHead; + + req->requestedDispsBitMask |= NVBIT(dispIndex); + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + /* + * XXX pTestFunc isn't honored by nvSetDispModeEvo()'s + * InheritPreviousModesetState() logic. + */ + if (pTestFunc && !pTestFunc(pDispEvo, apiHead, pData)) { + continue; + } + + dirty = TRUE; + req->disp[dispIndex].requestedHeadsBitMask |= NVBIT(apiHead); + } + } + + if (dirty) { + req->commit = TRUE; + + /* + * XXX TODO: The coreInitMethodsPending flag indicates that the + * init_no_update methods which were pushed by the hardware during + * core channel allocation are still pending, it means this is + * first modeset after boot and the boot display/heads are still + * active. In theory, we could only shut down heads which satisfies + * pTestFunc() test but this fails because other heads active at + * boot do not have mode timing information populated during + * MarkConnectorBootHeadActive(), so nvSetDispMode() tries to + * program invalid modes on those heads. + * + * For now, just shut down all heads if any head satisfies + * pTestFunc() test. + */ + if (pDevEvo->coreInitMethodsPending) { + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + req->disp[dispIndex].requestedHeadsBitMask |= + NVBIT(pDevEvo->numApiHeads) - 1; + } + } + + nvSetDispModeEvo(pDevEvo, pOpenDev, req, + ¶ms->reply, FALSE /* bypassComposition */, + doRasterLock); + } + + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_SHUT_DOWN_HEADS_SET_MODE); + } + + if (pTestFunc == NULL) { + nvAssertAllDpysAreInactive(pDevEvo); + } +} + +NVRgLine1CallbackPtr +nvApiHeadAddRgLine1Callback(NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + NVRgLine1CallbackProc pCallbackProc, + void *pUserData) +{ + /* + * All the hardware heads mapped on the input api head should be + * rasterlocked, and should trigger RgLine1 callback exactly at same time; + * therefore it is sufficient to register RgLine1 callback only with the + * primary hardware head. + */ + const NvU32 head = nvGetPrimaryHwHead(pDispEvo, apiHead); + if (head == NV_INVALID_HEAD) { + return NULL; + } + return nvRmAddRgLine1Callback(pDispEvo, head, pCallbackProc, pUserData); +} + + +void nvApiHeadGetScanLine(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + NvU16 *pScanLine, + NvBool *pInBlankingPeriod) +{ + /* + * All the hardware heads mapped on the input api head should be + * rasterlocked; therefore it is sufficient to get scanline only for the + * primary hardware head. + */ + const NvU32 head = nvGetPrimaryHwHead(pDispEvo, apiHead); + + nvAssert(head != NV_INVALID_HEAD); + pDispEvo->pDevEvo->hal->GetScanLine(pDispEvo, head, pScanLine, + pInBlankingPeriod); +} + diff --git a/src/nvidia-modeset/src/nvkms-pow.c b/src/nvidia-modeset/src/nvkms-pow.c new file mode 100644 index 0000000..6445f54 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-pow.c @@ -0,0 +1,468 @@ +/* + * ==================================================== + * Copyright (C) 2004 by Sun Microsystems, Inc. All rights reserved. + * + * Permission to use, copy, modify, and distribute this + * software is freely granted, provided that this notice + * is preserved. + * ==================================================== + */ + +/* + * ==================================================== + * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. + * + * Developed at SunSoft, a Sun Microsystems, Inc. business. + * Permission to use, copy, modify, and distribute this + * software is freely granted, provided that this notice + * is preserved. + * ==================================================== + */ + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* Adapted from https://www.netlib.org/fdlibm/fdlibm.h 1.5 04/04/22 */ +/* Adapted from https://www.netlib.org/fdlibm/e_pow.c 1.5 04/04/22 SMI */ +/* Adapted from https://www.netlib.org/fdlibm/s_fabs.c 1.3 95/01/18 */ +/* Adapted from https://www.netlib.org/fdlibm/s_scalbn.c 1.3 95/01/18 */ +/* Adapted from https://www.netlib.org/fdlibm/s_copysign.c 1.3 95/01/18 */ + +#include "nvkms-softfloat.h" +#include "nv-float.h" + +static const float64_t +bp[] = {{0x3FF0000000000000}, {0x3FF8000000000000},}, // 1.0, 1.5 +dp_h[] = {{0x0000000000000000}, {0x3FE2B80340000000},}, // 5.84962487220764160156e-01 +dp_l[] = {{0x0000000000000000}, {0x3E4CFDEB43CFD006},}, // 1.35003920212974897128e-08 +nan = {0xFFF8000000000000}, // NaN +zero = {0x0000000000000000}, // 0.0 +quarter = {0x3FD0000000000000}, // 0.25 +third = {0x3FD5555555555555}, // 0.3333333333333333333333 +half = {0x3FE0000000000000}, // 0.5 +one = {0x3FF0000000000000}, // 1.0 +two = {0x4000000000000000}, // 2.0 +three = {0x4008000000000000}, // 3.0 +two53 = {0x4340000000000000}, // 9007199254740992.0 +two54 = {0x4350000000000000}, // 1.80143985094819840000e+16 +twom54 = {0x3C90000000000000}, // 5.55111512312578270212e-17 +huge = {0x7E37E43C8800759C}, // 1.0e300 +tiny = {0x01A56E1FC2F8F359}, // 1.0e-300 +/* poly coefs for (3/2)*(log(x)-2s-2/3*s**3 */ +L1 = {0x3FE3333333333303}, // 5.99999999999994648725e-01} +L2 = {0x3FDB6DB6DB6FABFF}, // 4.28571428578550184252e-01} +L3 = {0x3FD55555518F264D}, // 3.33333329818377432918e-01} +L4 = {0x3FD17460A91D4101}, // 2.72728123808534006489e-01} +L5 = {0x3FCD864A93C9DB65}, // 2.30660745775561754067e-01} +L6 = {0x3FCA7E284A454EEF}, // 2.06975017800338417784e-01} +P1 = {0x3FC555555555553E}, // 1.66666666666666019037e-01} +P2 = {0xBF66C16C16BEBD93}, // -2.77777777770155933842e-03} +P3 = {0x3F11566AAF25DE2C}, // 6.61375632143793436117e-05} +P4 = {0xBEBBBD41C5D26BF1}, // -1.65339022054652515390e-06} +P5 = {0x3E66376972BEA4D0}, // 4.13813679705723846039e-08} +lg2 = {0x3FE62E42FEFA39EF}, // 6.93147180559945286227e-01} +lg2_h = {0x3FE62E4300000000}, // 6.93147182464599609375e-01} +lg2_l = {0xBE205C610CA86C39}, // -1.90465429995776804525e-09} +ovt = {0x3C971547652B82FE}, // -(1024-log2(ovfl+.5ulp)), 8.0085662595372944372e-0017 +cp = {0x3FEEC709DC3A03FD}, // 2/(3ln2), 9.61796693925975554329e-01 +cp_h = {0x3FEEC709E0000000}, // (float)cp, 9.61796700954437255859e-01 +cp_l = {0xBE3E2FE0145B01F5}, // tail of cp_h, -7.02846165095275826516e-09 +ivln2 = {0x3FF71547652B82FE}, // 1/ln2, 1.44269504088896338700e+00 +ivln2_h = {0x3FF7154760000000}, // 24b 1/ln2, 1.44269502162933349609e+00 +ivln2_l = {0x3E54AE0BF85DDF44}; // 1/ln2 tail, 1.92596299112661746887e-08 + +// Little endian only +#define __HI(x) *(1+(int*)&x) +#define __LO(x) *(int*)&x +#define __HIp(x) *(1+(int*)x) +#define __LOp(x) *(int*)x + +/* + * F64_copysign(float64_t x, float64_t y) + * F64_copysign(x,y) returns a value with the magnitude of x and + * with the sign bit of y. + */ +static float64_t F64_copysign(float64_t x, float64_t y) +{ + __HI(x) = (__HI(x) & 0x7fffffff) | (__HI(y) & 0x80000000); + return x; +} + +/* + * F64_scalbn (float64_t x, int n) + * F64_scalbn(x,n) returns x* 2**n computed by exponent + * manipulation rather than by actually performing an + * exponentiation or a multiplication. + */ +static float64_t F64_scalbn(float64_t x, int n) +{ + int k,hx,lx; + hx = __HI(x); + lx = __LO(x); + + k = (hx & 0x7ff00000) >> 20; // extract exponent + if (k == 0) { // 0 or subnormal x + if ((lx | (hx & 0x7fffffff)) == 0) { + return x; // +-0 + } + x = f64_mul(x, two54); + hx = __HI(x); + k = ((hx & 0x7ff00000) >> 20) - 54; + if (n < -50000) { + return f64_mul(tiny, x); // underflow + } + } + if (k == 0x7ff) { + return f64_add(x, x); // NaN or Inf + } + k = k + n; + if (k > 0x7fe) { + return f64_mul(huge, F64_copysign(huge,x)); // overflow + } + if (k > 0) { // normal result + __HI(x) = (hx & 0x800fffff) | (k << 20); + return x; + } + if (k <= -54) { + if (n > 50000) { // in case integer overflow in n+k + return f64_mul(huge, F64_copysign(huge,x)); // overflow + } + } else { + return f64_mul(tiny, F64_copysign(tiny,x)); // underflow + } + k += 54; // subnormal result + __HI(x) = (hx & 0x800fffff) | (k << 20); + return f64_mul(x, twom54); +} + + +/* + * F64_fabs(x) returns the absolute value of x. + */ +static float64_t F64_fabs(float64_t x) +{ + __HI(x) &= 0x7fffffff; + return x; +} + +/* + * nvKmsPow(x,y) return x**y + * + * n + * Method: Let x = 2 * (1+f) + * 1. Compute and return log2(x) in two pieces: + * log2(x) = w1 + w2, + * where w1 has 53-24 = 29 bit trailing zeros. + * 2. Perform y*log2(x) = n+y' by simulating muti-precision + * arithmetic, where |y'|<=0.5. + * 3. Return x**y = 2**n*exp(y'*log2) + * + * Special cases: + * 1. (anything) ** 0 is 1 + * 2. (anything) ** 1 is itself + * 3. (anything) ** NAN is NAN + * 4. NAN ** (anything except 0) is NAN + * 5. +-(|x| > 1) ** +INF is +INF + * 6. +-(|x| > 1) ** -INF is +0 + * 7. +-(|x| < 1) ** +INF is +0 + * 8. +-(|x| < 1) ** -INF is +INF + * 9. +-1 ** +-INF is NAN + * 10. +0 ** (+anything except 0, NAN) is +0 + * 11. -0 ** (+anything except 0, NAN, odd integer) is +0 + * 12. +0 ** (-anything except 0, NAN) is +INF + * 13. -0 ** (-anything except 0, NAN, odd integer) is +INF + * 14. -0 ** (odd integer) = -( +0 ** (odd integer) ) + * 15. +INF ** (+anything except 0,NAN) is +INF + * 16. +INF ** (-anything except 0,NAN) is +0 + * 17. -INF ** (anything) = -0 ** (-anything) + * 18. (-anything) ** (integer) is (-1)**(integer)*(+anything**integer) + * 19. (-anything except 0 and inf) ** (non-integer) is NAN + * + * Accuracy: + * pow(x,y) returns x**y nearly rounded. In particular, pow(integer,integer) + * always returns the correct integer provided it is representable. + */ +float64_t nvKmsPow(float64_t x, float64_t y) +{ + float64_t z, ax, z_h, z_l, p_h, p_l; + float64_t y1, t1, t2, r, s, t, u, v, w; + int i, j, k, yisint, n; + int hx, hy, ix, iy; + unsigned lx, ly; + + hx = __HI(x); + lx = __LO(x); + + hy = __HI(y); + ly = __LO(y); + + ix = hx & 0x7fffffff; + iy = hy & 0x7fffffff; + + /* y==zero: x**0 = 1 */ + if ((iy | ly) == 0) { + return one; + } + + /* +-NaN return x+y */ + if ((ix > 0x7ff00000) || ((ix == 0x7ff00000) && (lx != 0)) || + (iy > 0x7ff00000) || ((iy == 0x7ff00000) && (ly != 0))) { + return f64_add(x, y); + } + + /* + * Determine if y is an odd int when x < 0: + * yisint = 0 ... y is not an integer + * yisint = 1 ... y is an odd int + * yisint = 2 ... y is an even int + */ + yisint = 0; + if (hx < 0) { + if (iy >= 0x43400000) { + yisint = 2; // even integer y + } else if (iy >= 0x3ff00000) { + k = (iy >> 20) - 0x3ff; // exponent + if (k > 20) { + j = ly >> (52 - k); + if ((j << (52 - k)) == ly) { + yisint = 2 - (j & 1); + } + } else if (ly == 0) { + j = iy >> (20 - k); + if ((j << (20 -k )) == iy) { + yisint = 2 - (j & 1); + } + } + } + } + + /* special value of y */ + if (ly == 0) { + if (iy == 0x7ff00000) { // y is +-inf + if (((ix - 0x3ff00000) | lx) == 0) { // inf**+-1 is NaN + return f64_sub(y, y); + } else if (ix >= 0x3ff00000) { // (|x|>1)**+-inf = inf,0 + return (hy >= 0) ? y: zero; + } else { // (|x|<1)**-,+inf = inf,0 + return (hy < 0) ? F64_negate(y): zero; + } + } + if (iy == 0x3ff00000) { // y is +-1 + if (hy < 0) { + return f64_div(one, x); + } else { + return x; + } + } + if (hy == 0x40000000) { // y is 2 + return f64_mul(x, x); + } + if (hy == 0x3fe00000) { // y is 0.5 + if (hx >= 0) { // x >= +0 + return f64_sqrt(x); + } + } + } + + ax = F64_fabs(x); + /* special value of x */ + if (lx == 0) { + // x is +-0,+-inf,+-1 + if ((ix == 0x7ff00000) || (ix == 0) || (ix == 0x3ff00000)) { + z = ax; + if (hy < 0) { + z = f64_div(one, z); // z = (1/|x|) + } + if (hx < 0) { + if (((ix - 0x3ff00000) | yisint) == 0) { // (-1)**non-int is NaN + z = nan; + } else if (yisint == 1) { // (x<0)**odd = -(|x|**odd) + z = F64_negate(z); + } + } + return z; + } + } + + n = (hx >> 31) + 1; + + /* (x<0)**(non-int) is NaN */ + if ((n | yisint) == 0) { + return nan; + } + + s = one; // s (sign of result -ve**odd) = -1 else = 1 + if ((n | (yisint - 1)) == 0) { + s = F64_negate(one); // (-ve)**(odd int) + } + + /* |y| is huge */ + if (iy > 0x41e00000) { // if |y| > 2**31 + if (iy > 0x43f00000){ // if |y| > 2**64, must o/uflow + if (ix <= 0x3fefffff) { + return (hy < 0) ? f64_mul(huge, huge) : f64_mul(tiny, tiny); + } + if (ix >= 0x3ff00000) { + return (hy > 0) ? f64_mul(huge, huge) : f64_mul(tiny, tiny); + } + } + /* over/underflow if x is not close to one */ + if (ix < 0x3fefffff) { + return (hy < 0) ? f64_mul(f64_mul(s, huge), huge) : + f64_mul(f64_mul(s, tiny), tiny); + } + if (ix > 0x3ff00000) { + return (hy > 0) ? f64_mul(f64_mul(s, huge), huge) : + f64_mul(f64_mul(s, tiny), tiny); + } + /* + * now |1-x| is tiny <= 2**-20, suffice to compute + * log(x) by x-x^2/2+x^3/3-x^4/4 + */ + t = f64_sub(ax, one); // t has 20 trailing zeros + w = f64_mul(f64_mul(t, t), f64_sub(half, f64_mul(t, f64_sub(third, f64_mul(t, quarter))))); + u = f64_mul(ivln2_h, t); // ivln2_h has 21 sig. bits + v = f64_sub(f64_mul(t, ivln2_l), f64_mul(w, ivln2)); + t1 = f64_add(u, v); + __LO(t1) = 0; + t2 = f64_sub(v, f64_sub(t1, u)); + } else { + float64_t ss, s2, s_h, s_l, t_h, t_l; + n = 0; + /* take care subnormal number */ + if (ix < 0x00100000) { + ax = f64_mul(ax, two53); + n -= 53; + ix = __HI(ax); + } + n += ((ix) >> 20) - 0x3ff; + j = ix & 0x000fffff; + /* determine interval */ + ix = j | 0x3ff00000; // normalize ix + if (j <= 0x3988E) { // |x|> 1) | 0x20000000) + 0x00080000 + (k << 18); + t_l = f64_sub(ax, f64_sub(t_h, bp[k])); + s_l = f64_mul(v, f64_sub(f64_sub(u, f64_mul(s_h, t_h)), f64_mul(s_h, t_l))); + /* compute log(ax) */ + s2 = f64_mul(ss, ss); + r = f64_mul(f64_mul(s2, s2),f64_add(L1, f64_mul(s2, f64_add(L2, f64_mul(s2, f64_add(L3, f64_mul(s2, f64_add(L4, f64_mul(s2, f64_add(L5, f64_mul(s2, L6))))))))))); + r = f64_add(r, f64_mul(s_l, f64_add(s_h, ss))); + s2 = f64_mul(s_h, s_h); + t_h = f64_add(f64_add(three, s2), r); + __LO(t_h) = 0; + t_l = f64_sub(r, (f64_sub(f64_sub(t_h, three), s2))); + /* u+v = ss*(1+...) */ + u = f64_mul(s_h, t_h); + v = f64_add(f64_mul(s_l, t_h), f64_mul(t_l, ss)); + /* 2/(3log2)*(ss+...) */ + p_h = f64_add(u, v); + __LO(p_h) = 0; + p_l = f64_sub(v, f64_sub(p_h, u)); + z_h = f64_mul(cp_h, p_h); // cp_h+cp_l = 2/(3*log2) + z_l = f64_add(f64_add(f64_mul(cp_l, p_h), f64_mul(p_l, cp)), dp_l[k]); + /* log2(ax) = (ss+..)*2/(3*log2) = n + dp_h + z_h + z_l */ + t = i32_to_f64(n); + t1 = f64_add(f64_add(f64_add(z_h, z_l), dp_h[k]), t); + __LO(t1) = 0; + t2 = f64_sub(z_l, f64_sub(f64_sub(f64_sub(t1, t), dp_h[k]), z_h)); + } + + /* split up y into y1+y2 and compute (y1+y2)*(t1+t2) */ + y1 = y; + __LO(y1) = 0; + p_l = f64_add(f64_mul(f64_sub(y, y1), t1), f64_mul(y, t2)); + p_h = f64_mul(y1, t1); + z = f64_add(p_l, p_h); + j = __HI(z); + i = __LO(z); + if (j >= 0x40900000) { // z >= 1024 + if (((j - 0x40900000) | i) != 0) { // if z > 1024 + return f64_mul(f64_mul(s, huge), huge); // overflow + } else { + if (f64_lt(f64_sub(z, p_h), f64_add(p_l, ovt))) { + return f64_mul(f64_mul(s, huge), huge); // overflow + } + } + } else if ((j & 0x7fffffff) >= 0x4090cc00) { // z <= -1075 + if (((j - 0xc090cc00) | i) != 0) { // z < -1075 + return f64_mul(f64_mul(s, tiny), tiny); // underflow + } else { + if (f64_le(p_l, f64_sub(z, p_h))) { + return f64_mul(f64_mul(s, tiny), tiny); // underflow + } + } + } + /* + * compute 2**(p_h+p_l) + */ + i = j & 0x7fffffff; + k = (i >> 20) - 0x3ff; + n = 0; + if (i > 0x3fe00000) { // if |z| > 0.5, set n = [z+0.5] + n = j + (0x00100000 >> (k + 1)); + k = ((n & 0x7fffffff) >> 20) - 0x3ff; // new k for n + t = zero; + __HI(t) = (n & ~(0x000fffff >> k)); + n = ((n & 0x000fffff) | 0x00100000) >> (20 - k); + if (j < 0) { + n = -n; + } + p_h = f64_sub(p_h, t); + } + t = f64_add(p_l, p_h); + __LO(t) = 0; + u = f64_mul(t, lg2_h); + v = f64_add(f64_mul(f64_sub(p_l, f64_sub(t, p_h)), lg2), f64_mul(t, lg2_l)); + z = f64_add(u, v); + w = f64_sub(v, f64_sub(z, u)); + t = f64_mul(z, z); + t1 = f64_sub(z, f64_mul(t, f64_add(P1, f64_mul(t, f64_add(P2, f64_mul(t, f64_add(P3, f64_mul(t, f64_add(P4, f64_mul(t, P5)))))))))); + r = f64_sub(f64_div(f64_mul(z, t1), f64_sub(t1, two)), f64_add(w, f64_mul(z, w))); + z = f64_sub(one, f64_sub(r, z)); + j = __HI(z); + j += (n << 20); + if ((j >> 20) <= 0) { + z = F64_scalbn(z,n); // subnormal output + } else { + __HI(z) += (n << 20); + } + return f64_mul(s, z); +} diff --git a/src/nvidia-modeset/src/nvkms-prealloc.c b/src/nvidia-modeset/src/nvkms-prealloc.c new file mode 100644 index 0000000..0c99497 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-prealloc.c @@ -0,0 +1,164 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-types.h" +#include "nvkms-flip-workarea.h" +#include "nvkms-modeset-types.h" +#include "nvkms-modeset-workarea.h" +#include "nvkms-setlut-workarea.h" +#include "nvkms-prealloc.h" +#include "nvkms-utils.h" + +#include "nvkms-api.h" + +#include + +static size_t GetSizeForType(NVDevEvoPtr pDevEvo, enum NVPreallocType type) +{ + switch (type) { + case PREALLOC_TYPE_IMP_PARAMS: + return pDevEvo->hal->caps.impStructSize; + case PREALLOC_TYPE_SHUT_DOWN_HEADS_SET_MODE: /* fall through */ + case PREALLOC_TYPE_RESTORE_CONSOLE_SET_MODE: + return sizeof(struct NvKmsSetModeParams); + case PREALLOC_TYPE_MODE_SET_WORK_AREA: + return sizeof(NVModeSetWorkArea); + case PREALLOC_TYPE_FLIP_WORK_AREA: + return sizeof(struct NvKmsFlipWorkArea); + case PREALLOC_TYPE_PROPOSED_MODESET_HW_STATE: /* fallthrough */ + case PREALLOC_TYPE_VALIDATE_PROPOSED_MODESET_HW_STATE: + return sizeof(NVProposedModeSetHwState); + case PREALLOC_TYPE_VALIDATE_MODE_HW_MODE_TIMINGS: /* fallthrough */ + case PREALLOC_TYPE_HS_INIT_CONFIG_HW_TIMINGS: + return sizeof(NVHwModeTimingsEvo); + case PREALLOC_TYPE_VALIDATE_MODE_HDMI_FRL_CONFIG: + return sizeof(HDMI_FRL_CONFIG); + case PREALLOC_TYPE_VALIDATE_MODE_DSC_INFO: + return sizeof(NVDscInfoEvoRec); + case PREALLOC_TYPE_HS_PATCHED_MODESET_REQUEST: + return sizeof(struct NvKmsSetModeRequest); + case PREALLOC_TYPE_MODE_SET_REPLY_TMP_USAGE_BOUNDS: + return sizeof(struct NvKmsUsageBounds); + case PREALLOC_TYPE_VALIDATE_MODE_IMP_OUT_HW_MODE_TIMINGS: + return sizeof(NVHwModeTimingsEvo) * NVKMS_MAX_HEADS_PER_DISP; + case PREALLOC_TYPE_VALIDATE_MODE_TMP_USAGE_BOUNDS: + return sizeof(struct NvKmsUsageBounds); + case PREALLOC_TYPE_DPLIB_IS_MODE_POSSIBLE_PARAMS: + return sizeof(NVDpLibIsModePossibleParamsRec); + case PREALLOC_TYPE_SET_LUT_WORK_AREA: + return sizeof(struct NvKmsSetLutWorkArea); + case PREALLOC_TYPE_MAX: + /* Not a real option, but added for -Wswitch-enum */ + break; + } + + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Unknown prealloc type %d in GetSizeForType.", type); + + return 0; +} + +void *nvPreallocGet( + NVDevEvoPtr pDevEvo, + enum NVPreallocType type, + size_t sizeCheck) +{ + struct NVDevPreallocRec *pPrealloc = &pDevEvo->prealloc; + size_t size = GetSizeForType(pDevEvo, type); + + if (size != sizeCheck) { + nvAssert(size == sizeCheck); + return NULL; + } + + if ((pPrealloc->used[type / 8] & NVBIT(type % 8)) != 0) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Prealloc type %d already used in nvPreallocGet.", type); + return NULL; + } + + /* Since these are preallocated, they should not be NULL. */ + if (pPrealloc->ptr[type] == NULL) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Prealloc type %d NULL in nvPreallocGet.", type); + } + + pPrealloc->used[type / 8] |= NVBIT(type % 8); + + return pPrealloc->ptr[type]; +} + +void nvPreallocRelease( + NVDevEvoPtr pDevEvo, + enum NVPreallocType type) +{ + struct NVDevPreallocRec *pPrealloc = &pDevEvo->prealloc; + + if ((pPrealloc->used[type / 8] & NVBIT(type % 8)) == 0) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Prealloc type %d not used in nvPreallocRelease.", type); + } + + pPrealloc->used[type / 8] &= ~(NvU8)NVBIT(type % 8); +} + +NvBool nvPreallocAlloc(NVDevEvoPtr pDevEvo) +{ + struct NVDevPreallocRec *pPrealloc = &pDevEvo->prealloc; + NvU32 type; + + for (type = 0; type < PREALLOC_TYPE_MAX; type++) { + size_t size = GetSizeForType(pDevEvo, type); + if (size == 0) { + goto fail; + } + pPrealloc->ptr[type] = nvAlloc(size); + if (pPrealloc->ptr[type] == NULL) { + goto fail; + } + } + + nvkms_memset(pPrealloc->used, 0, sizeof(pPrealloc->used)); + + return TRUE; + +fail: + nvPreallocFree(pDevEvo); + return FALSE; +} + +void nvPreallocFree(NVDevEvoPtr pDevEvo) +{ + struct NVDevPreallocRec *pPrealloc = &pDevEvo->prealloc; + NvU32 type; + + for (type = 0; type < PREALLOC_TYPE_MAX; type++) { + if ((pDevEvo->prealloc.used[type / 8] & NVBIT(type % 8)) != 0) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Prealloc type %d still used in nvPreallocFree.", type); + } + + nvFree(pPrealloc->ptr[type]); + pPrealloc->ptr[type] = NULL; + } +} diff --git a/src/nvidia-modeset/src/nvkms-push.c b/src/nvidia-modeset/src/nvkms-push.c new file mode 100644 index 0000000..892fa2d --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-push.c @@ -0,0 +1,309 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "nvkms-push.h" +#include "nvkms-rmapi.h" +#include "nvkms-utils.h" + +#include "nvidia-push-methods.h" +#include "nvidia-push-utils.h" + +/* + * Wrapper functions needed by nvidia-push. + */ +static NvU32 NvPushImportRmApiControl( + NvPushDevicePtr pDevice, + NvU32 hObject, + NvU32 cmd, + void *pParams, + NvU32 paramsSize) +{ + return nvRmApiControl( + nvEvoGlobal.clientHandle, + hObject, + cmd, + pParams, + paramsSize); +} + +static NvU32 NvPushImportRmApiAlloc( + NvPushDevicePtr pDevice, + NvU32 hParent, + NvU32 hObject, + NvU32 hClass, + void *pAllocParams) +{ + return nvRmApiAlloc( + nvEvoGlobal.clientHandle, + hParent, + hObject, + hClass, + pAllocParams); +} + +static NvU32 NvPushImportRmApiFree( + NvPushDevicePtr pDevice, + NvU32 hParent, + NvU32 hObject) +{ + return nvRmApiFree( + nvEvoGlobal.clientHandle, + hParent, + hObject); +} + +static NvU32 NvPushImportRmApiMapMemoryDma( + NvPushDevicePtr pDevice, + NvU32 hDevice, + NvU32 hDma, + NvU32 hMemory, + NvU64 offset, + NvU64 length, + NvU32 flags, + NvU64 *pDmaOffset) +{ + return nvRmApiMapMemoryDma( + nvEvoGlobal.clientHandle, + hDevice, + hDma, + hMemory, + offset, + length, + flags, + pDmaOffset); +} + +static NvU32 NvPushImportRmApiUnmapMemoryDma( + NvPushDevicePtr pDevice, + NvU32 hDevice, + NvU32 hDma, + NvU32 hMemory, + NvU32 flags, + NvU64 dmaOffset) +{ + return nvRmApiUnmapMemoryDma( + nvEvoGlobal.clientHandle, + hDevice, + hDma, + hMemory, + flags, + dmaOffset); +} + +static NvU32 NvPushImportRmApiAllocMemory64( + NvPushDevicePtr pDevice, + NvU32 hParent, + NvU32 hMemory, + NvU32 hClass, + NvU32 flags, + void **ppAddress, + NvU64 *pLimit) +{ + return nvRmApiAllocMemory64( + nvEvoGlobal.clientHandle, + hParent, + hMemory, + hClass, + flags, + ppAddress, + pLimit); +} + +static NvU32 NvPushImportRmApiVidHeapControl( + NvPushDevicePtr pDevice, + void *pVidHeapControlParms) +{ + return nvRmApiVidHeapControl(pVidHeapControlParms); +} + +static NvU32 NvPushImportRmApiMapMemory( + NvPushDevicePtr pDevice, + NvU32 hDevice, + NvU32 hMemory, + NvU64 offset, + NvU64 length, + void **ppLinearAddress, + NvU32 flags) +{ + return nvRmApiMapMemory( + nvEvoGlobal.clientHandle, + hDevice, + hMemory, + offset, + length, + ppLinearAddress, + flags); +} + +static NvU32 NvPushImportRmApiUnmapMemory( + NvPushDevicePtr pDevice, + NvU32 hDevice, + NvU32 hMemory, + void *pLinearAddress, + NvU32 flags) +{ + return nvRmApiUnmapMemory( + nvEvoGlobal.clientHandle, + hDevice, + hMemory, + pLinearAddress, + flags); +} + +static NvU64 NvPushImportGetMilliSeconds( + NvPushDevicePtr pDevice) +{ + return (nvkms_get_usec() + 500) / 1000; +} + +static void NvPushImportYield( + NvPushDevicePtr pDevice) +{ + nvkms_yield(); +} + +static NvBool NvPushImportWaitForEvent( + NvPushDevicePtr pDevice, + NvPushImportEvent *pEvent, + NvU64 timeout) +{ + return FALSE; +} + +static void NvPushImportEmptyEventFifo( + NvPushDevicePtr pDevice, + NvPushImportEvent *pEvent) +{ + return; +} + +static void NvPushImportChannelErrorOccurred( + NvPushChannelPtr pChannel, + NvU32 channelErrCode) +{ + /* XXX TODO: implement me */ +} + +static void NvPushImportPushbufferWrapped( + NvPushChannelPtr pChannel) +{ + /* XXX TODO: implement me */ +} + +static void NvPushImportLogError( + NvPushDevicePtr pDevice, + const char *fmt, ...) +{ + const NVDevEvoRec *pDevEvo = pDevice->hostDevice; + + nvAssert(pDevEvo); + + va_list ap; + va_start(ap, fmt); + nvVEvoLog(EVO_LOG_ERROR, pDevEvo->gpuLogIndex, fmt, ap); + va_end(ap); +} + +#if defined(DEBUG) +static void NvPushImportLogNvDiss( + NvPushChannelPtr pChannel, + const char *fmt, ...) +{ + /* XXX TODO: implement me */ +} +#endif /* DEBUG */ + +static const NvPushImports NvKmsNvPushImports = { + NvPushImportRmApiControl, /* rmApiControl */ + NvPushImportRmApiAlloc, /* rmApiAlloc */ + NvPushImportRmApiFree, /* rmApiFree */ + NvPushImportRmApiMapMemoryDma, /* rmApiMapMemoryDma */ + NvPushImportRmApiUnmapMemoryDma, /* rmApiUnmapMemoryDma */ + NvPushImportRmApiAllocMemory64, /* rmApiAllocMemory64 */ + NvPushImportRmApiVidHeapControl, /* rmApiVidHeapControl */ + NvPushImportRmApiMapMemory, /* rmApiMapMemory */ + NvPushImportRmApiUnmapMemory, /* rmApiUnmapMemory */ + NvPushImportGetMilliSeconds, /* getMilliSeconds */ + NvPushImportYield, /* yield */ + NvPushImportWaitForEvent, /* waitForEvent */ + NvPushImportEmptyEventFifo, /* emptyEventFifo */ + NvPushImportChannelErrorOccurred, /* channelErrorOccurred */ + NvPushImportPushbufferWrapped, /* pushbufferWrapped */ + NvPushImportLogError, /* logError */ +#if defined(DEBUG) + NvPushImportLogNvDiss, /* logNvDiss */ +#endif +}; + +NvBool nvAllocNvPushDevice(NVDevEvoPtr pDevEvo) +{ + NvPushAllocDeviceParams params = { }; + NvU32 sd, h; + + params.hostDevice = pDevEvo; + params.pImports = &NvKmsNvPushImports; + params.clientHandle = nvEvoGlobal.clientHandle; + params.subDevice[0].deviceHandle = pDevEvo->deviceHandle; + params.numSubDevices = pDevEvo->numSubDevices; + + params.numClasses = pDevEvo->numClasses; + params.supportedClasses = pDevEvo->supportedClasses; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + params.subDevice[sd].handle = pDevEvo->pSubDevices[sd]->handle; + } + + params.amodel.config = NV_AMODEL_NONE; + params.isTegra = FALSE; + params.subDevice[0].gpuVASpace = pDevEvo->nvkmsGpuVASpace; + + ct_assert(sizeof(params.handlePool) == + sizeof(pDevEvo->nvPush.handlePool)); + + for (h = 0; h < ARRAY_LEN(pDevEvo->nvPush.handlePool); h++) { + pDevEvo->nvPush.handlePool[h] = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + params.handlePool[h] = pDevEvo->nvPush.handlePool[h]; + } + + if (!nvPushAllocDevice(¶ms, &pDevEvo->nvPush.device)) { + nvFreeNvPushDevice(pDevEvo); + return FALSE; + } + + return TRUE; +} + +void nvFreeNvPushDevice(NVDevEvoPtr pDevEvo) +{ + NvU32 h; + + nvPushFreeDevice(&pDevEvo->nvPush.device); + + for (h = 0; h < ARRAY_LEN(pDevEvo->nvPush.handlePool); h++) { + if (pDevEvo->nvPush.handlePool[h] != 0) { + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDevEvo->nvPush.handlePool[h]); + pDevEvo->nvPush.handlePool[h] = 0; + } + } +} diff --git a/src/nvidia-modeset/src/nvkms-rm.c b/src/nvidia-modeset/src/nvkms-rm.c new file mode 100644 index 0000000..6cfa869 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-rm.c @@ -0,0 +1,5816 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + + +#include "dp/nvdp-connector.h" +#include "dp/nvdp-timer.h" +#include "dp/nvdp-device.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "g_nvkms-evo-states.h" +#include "nvkms-event.h" +#include "nvkms-dpy.h" +#include "nvkms-types.h" +#include "nvkms-evo.h" +#include "nvkms-dma.h" +#include "nvkms-utils.h" +#include "nvkms-private.h" +#include "nvkms-modeset.h" +#include "nvkms-surface.h" +#include "nvkms-vrr.h" + +#include "nvkms-push.h" +#include "nvkms-difr.h" + +#include "nv_smg.h" + +#include "class/cl00c3.h" /* NV01_MEMORY_SYNCPOINT */ +#include "class/cl0005.h" /* NV01_EVENT */ + +#include // NV01_MEMORY_VIRTUAL +#include /* NV04_DISPLAY_COMMON */ +#include /* NV01_MEMORY_SYSTEM */ +#include /* NV01_MEMORY_FRAMEBUFFER_CONSOLE */ +#include /* NV01_DEVICE_0 */ +#include /* NV01_MEMORY_LOCAL_USER */ +#include /* NV20_SUBDEVICE_0 */ + +#include "class/clc37b.h" /* NVC37B_WINDOW_IMM_CHANNEL_DMA */ +#include "class/clc37e.h" /* NVC37E_WINDOW_CHANNEL_DMA */ +#include "class/clc57b.h" /* NVC57B_WINDOW_IMM_CHANNEL_DMA */ +#include "class/clc57e.h" /* NVC57E_WINDOW_CHANNEL_DMA */ +#include "class/clc67b.h" /* NVC67B_WINDOW_IMM_CHANNEL_DMA */ +#include "class/clc67e.h" /* NVC67E_WINDOW_CHANNEL_DMA */ +#include "class/clc97b.h" /* NVC97B_WINDOW_IMM_CHANNEL_DMA */ +#include "class/clc97e.h" /* NVC97E_WINDOW_CHANNEL_DMA */ + +#include "class/cl917b.h" /* NV917B_OVERLAY_IMM_CHANNEL_PIO */ + +#include "class/cl927c.h" /* NV927C_BASE_CHANNEL_DMA */ + +#include "class/cl917e.h" /* NV917E_OVERLAY_CHANNEL_DMA */ + +#include /* NV0000_CTRL_GPU_* */ +#include /* NV0002_CTRL_CMD_BIND_CONTEXTDMA */ +#include /* NV0073_CTRL_CMD_DFP_GET_INFO */ +#include /* NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID */ +#include /* NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO */ +#include /* NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED */ +#include /* NV0076_CTRL_CMD_NOTIFY_CONSOLE_DISABLED */ +#include /* NV0080_CTRL_CMD_GPU_SET_DISPLAY_OWNER */ +#include /* NV0080_CTRL_CMD_OS_UNIX_VT_SWITCH */ +#include /* NV2080_CTRL_CMD_BIOS_GET_NBSI */ +#include /* NV2080_CTRL_CMD_BUS_GET_INFO */ +#include /* NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION */ +#include /* NV2080_CTRL_CMD_TIMER_GET_TIME */ +#include /* NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT */ +#include /* NV5070_CTRL_CMD_SET_RMFREE_FLAGS */ +#include /* NV5070_CTRL_CMD_SET_DAC_PWR */ +#include /* NV0000_CTRL_CMD_SYSTEM_GET_APPROVAL_COOKIE */ + +#include "nvos.h" + +#include "displayport/dpcd.h" + +#define NVKMS_SYNCPT_ID_INVALID (0xFFFFFFFF) + +static NvU32 GetLegacyConnectorType(NVDispEvoPtr pDispEvo, NVDpyId dpyId); + +static void RmFreeEvoChannel(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel); + +static NvBool EngineListCheckOneSubdevice(const NVEvoSubDeviceRec *pSubDevice, + NvU32 engineType) +{ + const NvU32 *engines = pSubDevice->supportedEngines; + int i; + + for (i = 0; i < pSubDevice->numEngines; i++) { + if (engines[i] == engineType) { + return TRUE; + } + } + + return FALSE; +} + +static NvBool EngineListCheck(const NVDevEvoRec *pDevEvo, NvU32 engineType) +{ + int sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (!EngineListCheckOneSubdevice(pDevEvo->pSubDevices[sd], + engineType)) { + return FALSE; + } + } + + return TRUE; +} + +static NvBool QueryGpuCapabilities(NVDevEvoPtr pDevEvo) +{ + NvBool ctxDmaCoherentAllowedDev = FALSE; + NvBool ctxDmaNonCoherentAllowedDev = FALSE; + NvU32 ret, sd; + + NV0000_CTRL_GPU_GET_ID_INFO_PARAMS idInfoParams = { 0 }; + + /* Assume headSurface is supported if there is a graphics engine */ + pDevEvo->isHeadSurfaceSupported = + EngineListCheck(pDevEvo, NV2080_ENGINE_TYPE_GRAPHICS); + + /* ctxDma{,Non}CoherentAllowed */ + + /* simulationType */ + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + + NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS simParams = { 0 }; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + NV2080_CTRL_CMD_GPU_GET_SIMULATION_INFO, + &simParams, + sizeof(simParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + simParams.type = NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_NONE; + } + if (sd == 0) { + pDevEvo->simulationType = simParams.type; + } + nvAssert(pDevEvo->simulationType == simParams.type); + } + + /* mobile */ + + idInfoParams.gpuId = pDevEvo->pSubDevices[0]->gpuId; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_GPU_GET_ID_INFO, + &idInfoParams, sizeof(idInfoParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + pDevEvo->mobile = FALSE; + pDevEvo->isSOCDisplay = FALSE; + } else { + pDevEvo->mobile = + FLD_TEST_DRF(0000, _CTRL_GPU_ID_INFO, _MOBILE, _TRUE, + idInfoParams.gpuFlags); + + pDevEvo->isSOCDisplay = + FLD_TEST_DRF(0000, _CTRL_GPU_ID_INFO, _SOC, _TRUE, + idInfoParams.gpuFlags); + } + + /* TODO: This cap bit should be queried from RM */ + pDevEvo->requiresAllAllocationsInSysmem = pDevEvo->isSOCDisplay; + + /* + * Prohibit vblank_sem_control if: + * - the kernel interface layer says so, or + * - (RM-based) SLI mosaic is enabled (WAR for bug 4552673, until RM-based + * SLI is dropped) + */ + pDevEvo->supportsVblankSemControl = + nvkms_vblank_sem_control() && + !pDevEvo->sli.mosaic; + + /* ctxDma{,Non}CoherentAllowed */ + + if (!pDevEvo->isSOCDisplay) { + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NV2080_CTRL_BUS_GET_INFO_PARAMS busParams = { 0 }; + struct { + NV2080_CTRL_BUS_INFO coherentFlags; + NV2080_CTRL_BUS_INFO nonCoherentFlags; + } busInfoList = { { 0 } }; + + NvBool ctxDmaCoherentAllowed; + NvBool ctxDmaNonCoherentAllowed; + + busInfoList.coherentFlags.index = + NV2080_CTRL_BUS_INFO_INDEX_COHERENT_DMA_FLAGS; + busInfoList.nonCoherentFlags.index = + NV2080_CTRL_BUS_INFO_INDEX_NONCOHERENT_DMA_FLAGS; + + busParams.busInfoListSize = + sizeof(busInfoList) / sizeof(busInfoList.coherentFlags); + busParams.busInfoList = NV_PTR_TO_NvP64(&busInfoList); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + NV2080_CTRL_CMD_BUS_GET_INFO, + &busParams, sizeof(busParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + ctxDmaCoherentAllowed = + FLD_TEST_DRF(2080_CTRL_BUS_INFO, _COHERENT_DMA_FLAGS, + _CTXDMA, _TRUE, busInfoList.coherentFlags.data); + + ctxDmaNonCoherentAllowed = + FLD_TEST_DRF(2080_CTRL_BUS_INFO, _NONCOHERENT_DMA_FLAGS, + _CTXDMA, _TRUE, busInfoList.nonCoherentFlags.data); + + if (sd == 0) { + ctxDmaCoherentAllowedDev = ctxDmaCoherentAllowed; + ctxDmaNonCoherentAllowedDev = ctxDmaNonCoherentAllowed; + } else { + ctxDmaCoherentAllowedDev = + ctxDmaCoherentAllowedDev && ctxDmaCoherentAllowed; + ctxDmaNonCoherentAllowedDev = + ctxDmaNonCoherentAllowedDev && ctxDmaNonCoherentAllowed; + } + } + nvAssert(ctxDmaCoherentAllowedDev || ctxDmaNonCoherentAllowedDev); + + if (ctxDmaCoherentAllowedDev) { + pDevEvo->isoIOCoherencyModes.coherent = TRUE; + pDevEvo->nisoIOCoherencyModes.coherent = TRUE; + } + + if (ctxDmaNonCoherentAllowedDev) { + pDevEvo->isoIOCoherencyModes.noncoherent = TRUE; + pDevEvo->nisoIOCoherencyModes.noncoherent = TRUE; + } + } else { + /* + * On SOC display, NISO requests are IO-coherent and ISO + * requests are non-coherent. + */ + pDevEvo->isoIOCoherencyModes.noncoherent = TRUE; + pDevEvo->nisoIOCoherencyModes.coherent = TRUE; + } + + pDevEvo->supportsSyncpts = + nvkms_kernel_supports_syncpts() && + nvRmEvoClassListCheck(pDevEvo, NV01_MEMORY_SYNCPOINT); + + return TRUE; +} + + +static void FreeDisplay(NVDispEvoPtr pDispEvo) +{ + if (pDispEvo == NULL) { + return; + } + +#if defined(DEBUG) + NvU32 apiHead; + + for (apiHead = 0; apiHead < ARRAY_LEN(pDispEvo->pSwapGroup); apiHead++) { + nvAssert(pDispEvo->pSwapGroup[apiHead] == NULL); + } + + for (apiHead = 0; apiHead < ARRAY_LEN(pDispEvo->vblankApiHeadState); apiHead++) { + NvU32 i; + NVDispVblankApiHeadState *pVblankApiHeadState = + &pDispEvo->vblankApiHeadState[apiHead]; + + for (i = 0; i < ARRAY_LEN(pVblankApiHeadState->vblankCallbackList); i++) { + nvAssert(nvListIsEmpty(&pVblankApiHeadState->vblankCallbackList[i])); + } + + nvAssert(nvListIsEmpty(&pVblankApiHeadState->vblankSemControl.list)); + } +#endif + + nvAssert(nvListIsEmpty(&pDispEvo->dpyList)); + + nvkms_free_ref_ptr(pDispEvo->ref_ptr); + + nvInvalidateRasterLockGroupsEvo(); + nvFree(pDispEvo); +} + + +static inline NVDispEvoPtr AllocDisplay(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo = nvCalloc(1, sizeof(NVDispEvoRec)); + NvU32 apiHead; + + if (pDispEvo == NULL) { + goto fail; + } + + pDispEvo->pDevEvo = pDevEvo; + + nvListInit(&pDispEvo->dpyList); + nvListInit(&pDispEvo->connectorList); + + pDispEvo->framelock.server = nvInvalidDpyId(); + pDispEvo->framelock.clients = nvEmptyDpyIdList(); + pDispEvo->framelock.currentServerHead = NV_INVALID_HEAD; + + pDispEvo->ref_ptr = nvkms_alloc_ref_ptr(pDispEvo); + if (!pDispEvo->ref_ptr) { + goto fail; + } + + for (apiHead = 0; apiHead < ARRAY_LEN(pDispEvo->vblankApiHeadState); apiHead++) { + NvU32 i; + NVDispVblankApiHeadState *pVblankApiHeadState = + &pDispEvo->vblankApiHeadState[apiHead]; + + for (i = 0; i < ARRAY_LEN(pVblankApiHeadState->vblankCallbackList); i++) { + nvListInit(&pVblankApiHeadState->vblankCallbackList[i]); + } + nvListInit(&pVblankApiHeadState->vblankSemControl.list); + } + + return pDispEvo; + +fail: + FreeDisplay(pDispEvo); + + return NULL; +} + + +static void FreeDisplays(NVDevEvoPtr pDevEvo) +{ + unsigned int sd; + + for (sd = 0; sd < pDevEvo->nDispEvo; sd++) { + FreeDisplay(pDevEvo->pDispEvo[sd]); + pDevEvo->pDispEvo[sd] = NULL; + } + pDevEvo->nDispEvo = 0; +} + + +/*! + * Allocate the NVDispRecs for the given pDev. + * + * \param[in,out] pDev The device for which to allocate Displays. + */ +static NvBool AllocDisplays(NVDevEvoPtr pDevEvo) +{ + unsigned int sd; + + nvAssert(pDevEvo->nDispEvo == 0); + + pDevEvo->nDispEvo = pDevEvo->numSubDevices; + + for (sd = 0; sd < pDevEvo->nDispEvo; sd++) { + NVDispEvoPtr pDispEvo = AllocDisplay(pDevEvo); + + if (pDispEvo == NULL) { + goto fail; + } + + pDevEvo->pDispEvo[sd] = pDispEvo; + + pDispEvo->displayOwner = sd; + + pDispEvo->gpuLogIndex = pDevEvo->pSubDevices[sd]->gpuLogIndex; + } + + return TRUE; + +fail: + FreeDisplays(pDevEvo); + return FALSE; +} + +/* + * Get the (id) list of all supported display devices for this pDisp. + */ +static NvBool ProbeValidDisplays(NVDispEvoPtr pDispEvo) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS getSupportedParams = { 0 }; + NvU32 ret; + + pDispEvo->connectorIds = nvEmptyDpyIdList(); + pDispEvo->displayPortMSTIds = nvEmptyDpyIdList(); + pDispEvo->dynamicDpyIds = nvEmptyDpyIdList(); + pDispEvo->validDisplays = nvEmptyDpyIdList(); + + getSupportedParams.subDeviceInstance = pDispEvo->displayOwner; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, + &getSupportedParams, sizeof(getSupportedParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to get supported display device(s)"); + } else { + NVDpyIdList dpyIdList; + NVDpyId dpyId; + + // Grab only the static ids from the list. Dynamic ids are + // used to communicate with devices that are connected to + // a connector that has a static id. + dpyIdList = nvNvU32ToDpyIdList(getSupportedParams.displayMask); + + FOR_ALL_DPY_IDS(dpyId, dpyIdList) { + NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS getOrInfoParams = { 0 }; + getOrInfoParams.subDeviceInstance = pDispEvo->displayOwner; + getOrInfoParams.displayId = nvDpyIdToNvU32(dpyId); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, + &getOrInfoParams, + sizeof(getOrInfoParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to get supported display device(s)"); + } else { + if (!getOrInfoParams.bIsDispDynamic) { + pDispEvo->connectorIds = + nvAddDpyIdToDpyIdList(dpyId, pDispEvo->connectorIds); + } + } + } + } + + pDispEvo->validDisplays = pDispEvo->connectorIds; + + return TRUE; +} + +/*! + * Return TRUE if every pDispEvo on this pDevEvo has an empty validDisplays. + */ +static NvBool NoValidDisplays(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + unsigned int sd; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + if (!nvDpyIdListIsEmpty(pDispEvo->validDisplays)) { + return FALSE; + } + } + + return TRUE; +} + + +/* + * Find the NvKmsConnectorSignalFormat for the pConnectorEvo. + */ +static NvKmsConnectorSignalFormat +GetSignalFormat(const NVConnectorEvoRec *pConnectorEvo) +{ + // SignalFormat represents a weird combination of our OR type and protocol. + switch (pConnectorEvo->or.type) { + case NV0073_CTRL_SPECIFIC_OR_TYPE_DAC: + switch (pConnectorEvo->or.protocol) { + default: + nvAssert(!"Unexpected OR protocol for DAC"); + // fall through + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT: + return NVKMS_CONNECTOR_SIGNAL_FORMAT_VGA; + } + + case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR: + switch (pConnectorEvo->or.protocol) { + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM: + return NVKMS_CONNECTOR_SIGNAL_FORMAT_LVDS; + + default: + nvAssert(!"Unexpected OR protocol for SOR"); + // fall through + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A: + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B: + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS: + return NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS; + + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A: + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B: + return NVKMS_CONNECTOR_SIGNAL_FORMAT_DP; + } + + case NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR: + switch (pConnectorEvo->or.protocol) { + default: + nvAssert(!"Unexpected OR protocol for PIOR"); + // fall through + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC: + return NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS; + } + + case NV0073_CTRL_SPECIFIC_OR_TYPE_DSI: + switch (pConnectorEvo->or.protocol) { + default: + nvAssert(!"Unexpected OR protocol for DSI"); + // fall through + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI: + return NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI; + } + + default: + nvAssert(!"Unexpected OR type"); + return NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN; + } + + return NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN; +} + + +static NvU32 GetDfpInfo(const NVConnectorEvoRec *pConnectorEvo) +{ + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + NV0073_CTRL_DFP_GET_INFO_PARAMS params = { 0 }; + NvU32 ret; + + if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + return 0x0; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_GET_INFO, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, "Failed to query DFP info"); + return 0x0; + } + + return params.flags; +} + +typedef struct _AllocConnectorDispDataRec { + NvU32 dfpIndex; + NvU32 crtIndex; + NvU32 typeIndices[NVKMS_CONNECTOR_TYPE_MAX + 1]; +} AllocConnectorDispDataRec; + +/*! + * Query and setup information for a connector. + */ +static NvBool AllocConnector( + NVDispEvoPtr pDispEvo, + NVDpyId dpyId, + AllocConnectorDispDataRec *pAllocConnectorDispData) +{ + NVConnectorEvoPtr pConnectorEvo = NULL; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS params = { 0 }; + NvU32 ret; + NvBool isDP; + + pConnectorEvo = nvCalloc(1, sizeof(*pConnectorEvo)); + + if (pConnectorEvo == NULL) { + return FALSE; + } + + pConnectorEvo->pDispEvo = pDispEvo; + pConnectorEvo->displayId = dpyId; + pConnectorEvo->type = NVKMS_CONNECTOR_TYPE_UNKNOWN; + pConnectorEvo->physicalIndex = NV_INVALID_CONNECTOR_PHYSICAL_INFORMATION; + pConnectorEvo->physicalLocation = NV_INVALID_CONNECTOR_PHYSICAL_INFORMATION; + /* Query the output resource configuration */ + nvRmGetConnectorORInfo(pConnectorEvo, FALSE); + + isDP = + (pConnectorEvo->or.type == + NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) && + (pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A || + pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B); + + /* Determine the connector type. */ + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(dpyId); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to determine connector type for connector " + NV_DPY_ID_PRINT_FORMAT, nvDpyIdToPrintFormat(dpyId)); + goto fail; + } else { + + static const struct { + NvU32 type0073; + NvKmsConnectorType typeNvKms; + } connectorTypeTable[] = { + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_EXT, + NVKMS_CONNECTOR_TYPE_DP }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_USB_C, + NVKMS_CONNECTOR_TYPE_USBC }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_INT, + NVKMS_CONNECTOR_TYPE_DP }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_MINI_EXT, + NVKMS_CONNECTOR_TYPE_DP }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DP_1, + NVKMS_CONNECTOR_TYPE_DP }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DP_2, + NVKMS_CONNECTOR_TYPE_DP }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_VGA_15_PIN, + NVKMS_CONNECTOR_TYPE_VGA }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I_TV_SVIDEO, + NVKMS_CONNECTOR_TYPE_DVI_I }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I_TV_COMPOSITE, + NVKMS_CONNECTOR_TYPE_DVI_I }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I, + NVKMS_CONNECTOR_TYPE_DVI_I }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_D, + NVKMS_CONNECTOR_TYPE_DVI_D }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_ADC, + NVKMS_CONNECTOR_TYPE_ADC }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DVI_I_1, + NVKMS_CONNECTOR_TYPE_DVI_I }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DVI_I_2, + NVKMS_CONNECTOR_TYPE_DVI_I }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_SPWG, + NVKMS_CONNECTOR_TYPE_LVDS }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_OEM, + NVKMS_CONNECTOR_TYPE_LVDS }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_HDMI_A, + NVKMS_CONNECTOR_TYPE_HDMI }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_HDMI_C_MINI, + NVKMS_CONNECTOR_TYPE_HDMI }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_VIRTUAL_WFD, + NVKMS_CONNECTOR_TYPE_UNKNOWN }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DSI, + NVKMS_CONNECTOR_TYPE_DSI }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_SERIALIZER, + NVKMS_CONNECTOR_TYPE_DP_SERIALIZER }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_STEREO_3PIN_DIN, + NVKMS_CONNECTOR_TYPE_UNKNOWN }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_UNKNOWN, + NVKMS_CONNECTOR_TYPE_UNKNOWN }, + }; + + int i, j; + + for (i = 0; i < params.count; i++) { + for (j = 0; j < ARRAY_LEN(connectorTypeTable); j++) { + if (connectorTypeTable[j].type0073 == params.data[i].type) { + if (pConnectorEvo->type == NVKMS_CONNECTOR_TYPE_UNKNOWN) { + pConnectorEvo->type = connectorTypeTable[j].typeNvKms; + } else { + /* + * The only cases where we should see + * params.count > 1 (and thus attempt to + * assign pConnectorEvo->type multiple times) + * should be where all the + * NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_* + * values map to the same NvKmsConnectorType; + */ + nvAssert(pConnectorEvo->type == + connectorTypeTable[j].typeNvKms); + } + break; + } + } + if (j == ARRAY_LEN(connectorTypeTable)) { + nvAssert(!"Unhandled connector type!"); + } + + if (i == 0) { + pConnectorEvo->physicalIndex = params.data[i].index; + pConnectorEvo->physicalLocation = params.data[i].location; + } else { + nvAssert(pConnectorEvo->physicalIndex == params.data[i].index); + nvAssert(pConnectorEvo->physicalLocation == + params.data[i].location); + } + } + + pConnectorEvo->ddcPartnerDpyIdsList = nvNvU32ToDpyIdList(params.DDCPartners); + } + + /* If the connector type is unknown, ignore this connector. */ + if (pConnectorEvo->type == NVKMS_CONNECTOR_TYPE_UNKNOWN) { + nvFree(pConnectorEvo); + return TRUE; + } + + /* + * Ignore connectors that use DP protocol, but don't have a + * DP-compatible type. + */ + if (isDP && + ((pConnectorEvo->type != NVKMS_CONNECTOR_TYPE_DP) && + !nvConnectorIsDPSerializer(pConnectorEvo) && + (pConnectorEvo->type != NVKMS_CONNECTOR_TYPE_USBC))) { + nvFree(pConnectorEvo); + return TRUE; + } + + /* + * Bind connector to the DP lib if DP capable. Serializer + * connector is not managed by DP lib. + */ + if (isDP && + !nvConnectorIsDPSerializer(pConnectorEvo)) { + pConnectorEvo->pDpLibConnector = nvDPCreateConnector(pConnectorEvo); + if (!pConnectorEvo->pDpLibConnector) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to initialize DisplayPort support for " + NV_DPY_ID_PRINT_FORMAT, nvDpyIdToPrintFormat(dpyId)); + goto fail; + } + } + + pConnectorEvo->signalFormat = GetSignalFormat(pConnectorEvo); + + pConnectorEvo->dfpInfo = GetDfpInfo(pConnectorEvo); + + /* Assign connector indices. */ + + pConnectorEvo->legacyType = + GetLegacyConnectorType(pDispEvo, pConnectorEvo->displayId); + + switch (pConnectorEvo->legacyType) { + case NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT: + pConnectorEvo->legacyTypeIndex = + pAllocConnectorDispData->crtIndex++; + break; + case NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP: + pConnectorEvo->legacyTypeIndex = + pAllocConnectorDispData->dfpIndex++; + break; + default: + nvAssert(!"Unknown connector type"); + break; + } + + nvAssert(pConnectorEvo->type < + ARRAY_LEN(pAllocConnectorDispData->typeIndices)); + pConnectorEvo->typeIndex = + pAllocConnectorDispData->typeIndices[pConnectorEvo->type]++; + + nvListAppend(&pConnectorEvo->connectorListEntry, &pDispEvo->connectorList); + + nvkms_snprintf(pConnectorEvo->name, sizeof(pConnectorEvo->name), "%s-%u", + NvKmsConnectorTypeString(pConnectorEvo->type), + pConnectorEvo->typeIndex); + + return TRUE; + +fail: + nvFree(pConnectorEvo); + return FALSE; +} + + +static void FreeConnectors(NVDispEvoPtr pDispEvo) +{ + NVConnectorEvoPtr pConnectorEvo, pConnectorEvoNext; + + nvListForEachEntry_safe(pConnectorEvo, pConnectorEvoNext, + &pDispEvo->connectorList, connectorListEntry) { + // Unbind DP lib from the connector + nvDPDestroyConnector(pConnectorEvo->pDpLibConnector); + pConnectorEvo->pDpLibConnector = NULL; + nvListDel(&pConnectorEvo->connectorListEntry); + nvFree(pConnectorEvo); + } +} + + +/*! + * Allocate and initialize the connector structs for the given pDisp. + * + * NOTE: Each Display ID in pDispEvo->connectorIds (aka the + * NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED mask) is a possible display + * connection to the GPU which is static after boot. + */ +static NvBool AllocConnectors(NVDispEvoPtr pDispEvo) +{ + NVDpyId dpyId; + NVConnectorEvoPtr pConnectorEvo; + AllocConnectorDispDataRec allocConnectorDispData = { }; + + nvAssert(nvListIsEmpty(&pDispEvo->connectorList)); + + if (nvDpyIdListIsEmpty(pDispEvo->connectorIds)) { + /* Allow boards with no connectors */ + return TRUE; + } + + /* Allocate the connectors */ + FOR_ALL_DPY_IDS(dpyId, pDispEvo->connectorIds) { + if (!AllocConnector(pDispEvo, dpyId, &allocConnectorDispData)) { + goto fail; + } + } + + /* + * Reassign pDispEvo->connectorIds, to exclude any connectors ignored above: + * AllocConnector() may return TRUE but not actually create a pConnectorEvo + * for some connectors reported by resman. + */ + pDispEvo->connectorIds = nvEmptyDpyIdList(); + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + pDispEvo->connectorIds = + nvAddDpyIdToDpyIdList(pConnectorEvo->displayId, + pDispEvo->connectorIds); + } + + pDispEvo->validDisplays = pDispEvo->connectorIds; + + return TRUE; + + fail: + FreeConnectors(pDispEvo); + return FALSE; +} + +static NvBool IsFlexibleWindowMapping(NvU32 windowHeadMask) +{ + return (windowHeadMask == + NV0073_CTRL_SPECIFIC_FLEXIBLE_HEAD_WINDOW_ASSIGNMENT); +} + +/*! + * Query the number of heads and save the result in pDevEvo->numHeads. + * Get window head assignment and save it in pDevEvo->headForWindow[win]. + * + * Query the number of heads on each pDisp of the pDev and limit to + * the minimum across all pDisps. Query the headMask on each pDisp and + * take the intersection across pDisps. Query the window-head assignment + * and if it is fully flexible, assign WINDOWs (2N) and (2N + 1) to HEAD N. + * Otherwise, use the queried assignment. + * + * Limit the number of heads to the number of bits in the headMask. Ignore + * the heads which don't have any windows assigned to them and heads which + * create holes in the headMask. If a head which has assigned windows gets + * pruned out, assign NV_INVALID_HEAD to those windows. + * + * \param[in,out] pDev This is the device pointer; the pDisps within + * it are used to query per-GPU information. + * The result is written to pDevEvo->numHeads. + * + * \return Return TRUE if numHeads are correctly queried and + * window-head assignment is done. + * Return FALSE if numHeads or window-head assignment + * could not be queried. + */ +static NvBool ProbeHeadCountAndWindowAssignment(NVDevEvoPtr pDevEvo) +{ + NvU32 numHeads = 0, headMask = 0; + NvU32 headsWithWindowsMask = 0; + int sd, head, numBits; + NVDispEvoPtr pDispEvo; + NvBool first = TRUE; + NvBool isFlexibleWindowMapping = NV_TRUE; + NvU32 win; + NvU32 ret; + + pDevEvo->numHeads = 0; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS numHeadsParams = { 0 }; + NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS headMaskParams = { 0 }; + NV0073_CTRL_SPECIFIC_GET_VALID_HEAD_WINDOW_ASSIGNMENT_PARAMS winHeadAssignParams = { }; + + numHeadsParams.subDeviceInstance = sd; + numHeadsParams.flags = 0; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS, + &numHeadsParams, sizeof(numHeadsParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to get the number of heads"); + return FALSE; + } + + if (numHeadsParams.numHeads == 0) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "No heads found on board!"); + return FALSE; + } + + if (numHeadsParams.numHeads > NV_MAX_HEADS) + { + nvEvoLog(EVO_LOG_WARN, + "HW supports %d heads. Limiting to %d heads", + numHeadsParams.numHeads, NV_MAX_HEADS); + + numHeadsParams.numHeads = NV_MAX_HEADS; + } + + if (numHeads == 0) { + numHeads = numHeadsParams.numHeads; + } else { + if (numHeads != numHeadsParams.numHeads) { + NvU32 minNumHeads = + NV_MIN(numHeads, numHeadsParams.numHeads); + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Unexpected numbers of heads " + "(%d, %d); clamping to %d", + numHeads, numHeadsParams.numHeads, minNumHeads); + numHeads = minNumHeads; + } + } + + headMaskParams.subDeviceInstance = sd; + + ret = nvRmApiControl( + nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK, + &headMaskParams, sizeof(headMaskParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to get head configuration"); + return FALSE; + } + + if (headMask == 0) { + headMask = headMaskParams.headMask; + } else { + if (headMask != headMaskParams.headMask) { + NvU32 intersectedHeadMask = + headMask & headMaskParams.headMask; + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Unexpected head configurations " + "(0x%02x, 0x%02x); limiting to 0x%02x", + headMask, headMaskParams.headMask, + intersectedHeadMask); + headMask = intersectedHeadMask; + } + } + + winHeadAssignParams.subDeviceInstance = sd; + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_VALID_HEAD_WINDOW_ASSIGNMENT, + &winHeadAssignParams, sizeof(winHeadAssignParams)); + + if (ret == NVOS_STATUS_SUCCESS) { + NvBool windowAssigned[NV_MAX_HEADS] = { FALSE }; + + for (win = 0; win < NVKMS_MAX_WINDOWS_PER_DISP; win++) { + NvU32 windowHeadMask = winHeadAssignParams.windowHeadMask[win]; + + if ((win == 0) && first) { + isFlexibleWindowMapping = IsFlexibleWindowMapping(windowHeadMask); + } else if (isFlexibleWindowMapping) { + /* + * Currently, if one window is completely flexible, then all are. + * In case of fully flexible window mapping, if windowHeadMask is + * zero for a window, then that window is not present in HW. + */ + nvAssert(!windowHeadMask || (isFlexibleWindowMapping == + IsFlexibleWindowMapping(windowHeadMask))); + } + + /* + * For custom window mapping, if windowHeadMask is 0, then head + * is not assigned to this window. For flexible window mapping, + * if windowHeadMask is 0, then the window is not present in HW. + */ + if (windowHeadMask == 0) { + continue; + } + + if (isFlexibleWindowMapping) { + /* + * TODO: For now assign WINDOWs (2N) and (2N + 1) to HEAD N when + * completely flexible window assignment is specified by window + * head assignment mask. + */ + head = win >> 1; + windowHeadMask = NVBIT_TYPE(head, NvU8); + nvAssert(head < numHeads); + } else { + // We don't support same window assigned to multiple heads. + nvAssert(ONEBITSET(windowHeadMask)); + + head = BIT_IDX_32(windowHeadMask); + } + + if (!nvkms_enable_overlay_layers() && windowAssigned[head]) { + continue; + } + + if (first) { + pDevEvo->headForWindow[win] = head; + headsWithWindowsMask |= windowHeadMask; + windowAssigned[head] = TRUE; + } else { + nvAssert(pDevEvo->headForWindow[win] == head); + } + } + } else if (ret != NVOS_STATUS_ERROR_NOT_SUPPORTED) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to get window-head assignment"); + return FALSE; + } else { + // Pre-Volta, we don't need to populate pDevEvo->headForWindow[] and + // each HW head has a window assigned. + headsWithWindowsMask = headMask; + } + + if (first) { + first = FALSE; + } + } + + /* Check whether heads which have windows assigned are actually present in HW */ + nvAssert(!(~headMask & headsWithWindowsMask)); + + /* Intersect heads present in HW with heads which have windows assigned */ + headMask &= headsWithWindowsMask; + + /* clamp numHeads to the number of bits in headMask */ + + numBits = nvPopCount32(headMask); + + /* for now, we only support headMask when it is tightly packed at 0 */ + + for (head = 0; head < numBits; head++) { + if ((headMask & (1 << head)) == 0) { + NvU32 modifiedHeadMask = (1 << head) - 1; + + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "The head configuration (0x%02x) " + "is unexpected; limiting to 0x%02x", headMask, + modifiedHeadMask); + + headMask = modifiedHeadMask; + numBits = head; + break; + } + } + + /* headMask should never increase numHeads */ + + if (numBits > numHeads) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "The head configuration (0x%02x) " + "is inconsistent with the number of heads (%d)", + headMask, numHeads); + } else if (numBits < numHeads) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Correcting number of heads for " + "current head configuration (0x%02x)", headMask); + numHeads = numBits; + } + + pDevEvo->numHeads = numHeads; + + /* + * If a head which has assigned windows gets pruned out, assign + * NV_INVALID_HEAD to those windows. + */ + for (win = 0; win < NVKMS_MAX_WINDOWS_PER_DISP; win++) { + if ((pDevEvo->headForWindow[win] == NV_INVALID_HEAD) || + (pDevEvo->headForWindow[win] < pDevEvo->numHeads)) { + continue; + } + pDevEvo->headForWindow[win] = NV_INVALID_HEAD; + } + + return TRUE; +} + +/*! + * Set a pConnectorEvo's software state based on the boot head assignment. + */ +static void MarkConnectorBootHeadActive(NVDispEvoPtr pDispEvo, NvU32 head) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVDpyId displayId, rootPortId; + NVConnectorEvoPtr pConnectorEvo; + NVDispHeadStateEvoPtr pHeadState; + NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS params = { 0 }; + NvU32 ret; + + // Use the first displayId in the boot display list. + // + // TODO: What should we do if more than one dpy ID is listed for a boot + // display? + nvAssert(nvCountDpyIdsInDpyIdList(pDispEvo->vbiosDpyConfig[head]) == 1); + displayId = nvNextDpyIdInDpyIdListUnsorted(nvInvalidDpyId(), + pDispEvo->vbiosDpyConfig[head]); + + // The displayId reported by RM could be a dynamic one. Find the root port + // for this ID. + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(displayId); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + return; + } + + if (params.bIsDispDynamic) { + rootPortId = nvNvU32ToDpyId(params.rootPortId); + } else { + rootPortId = displayId; + } + + pConnectorEvo = nvGetConnectorFromDisp(pDispEvo, rootPortId); + if (!pConnectorEvo) { + return; + } + + if (NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)) { + + nvAssert(params.index != NV_INVALID_OR); + if (params.index == NV_INVALID_OR) { + // If RM reported that a head is driving this dpyId, then there + // should be an SOR assigned. However, due to a bug in the way + // PDB_PROP_GPU_DISABLE_VGA_CONSOLE_RESTORATION_ON_RESUME is + // handled, RM can report an "active" head with no SOR assigned on + // certain specific GPUs. If that happens, just treat the head as + // disabled. See bug 1692425. + pDispEvo->vbiosDpyConfig[head] = nvEmptyDpyIdList(); + return; + } else { + // Track the SOR assignment for this connector. See the comment in + // nvRmGetConnectorORInfo() for why this is deferred until now. + nvAssert(pConnectorEvo->or.primary == NV_INVALID_OR); + pConnectorEvo->or.primary = params.index; + } + } + nvAssert(pConnectorEvo->or.primary == params.index); + + pHeadState = &pDispEvo->headState[head]; + + nvAssert(!nvHeadIsActive(pDispEvo, head)); + + pHeadState->pConnectorEvo = pConnectorEvo; + pHeadState->activeRmId = nvDpyIdToNvU32(displayId); + + // Track the assigned head. + pConnectorEvo->or.ownerHeadMask[params.index] |= NVBIT(head); + + nvEvoStateStartNoLock(&pDispEvo->pDevEvo->gpus[pDispEvo->displayOwner]); +} + +/*! + * Query the vbios assignment of heads to display devices, and cache + * in pDispEvo->vbiosDpyConfig for later use by nvDPResume(). + * + * \param[in,out] pDisp This is the GPU display pointer; the result is + * written to pDispEvo->vbiosDpyConfig + */ +static void GetVbiosHeadAssignmentOneDisp(NVDispEvoPtr pDispEvo) +{ + unsigned int head; + NvU32 ret = NVOS_STATUS_ERROR_GENERIC; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + nvkms_memset(&pDispEvo->vbiosDpyConfig, 0, + sizeof(pDispEvo->vbiosDpyConfig)); + + /* if there is no display, there is no origDpyConfig */ + + nvAssert(pDevEvo->displayCommonHandle != 0); + + /* + * get the vbios assignment of heads within the GPU, so that + * later when we do head assignment, we can try to preserve the + * existing assignment; see bug 208072 + */ + + for (head = 0; head < pDevEvo->numHeads; head++) { + NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS activeDpysParams = { 0 }; + + activeDpysParams.subDeviceInstance = pDispEvo->displayOwner; + activeDpysParams.head = head; + /* + * We want to check for active displays set by any low-level software + * such as VBIOS, not just those set by an RM client + */ + activeDpysParams.flags = + DRF_DEF(0073, _CTRL_SYSTEM_GET_ACTIVE_FLAGS, _CLIENT, _DISABLE); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, + &activeDpysParams, sizeof(activeDpysParams)); + + if (ret == NVOS_STATUS_SUCCESS) { + // XXX TODO: If this is a dynamic display ID, it's not necessarily + // correlated with the NVDpyId we'll assign to a dynamic pDpyEvo + // later. We should instead store this as an NvU32 and assign it as + // the activeRmId for a dynamic pDpyEvo that DPLib reports as being + // driven by the firmware group. See bug 1656584. + pDispEvo->vbiosDpyConfig[head] = + nvNvU32ToDpyIdList(activeDpysParams.displayId); + if (activeDpysParams.displayId != 0) { + MarkConnectorBootHeadActive(pDispEvo, head); + } + } + + nvAssert(ret == NVOS_STATUS_SUCCESS); + } +} + +static void GetVbiosHeadAssignment(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + GetVbiosHeadAssignmentOneDisp(pDispEvo); + } +} + +/*! + * Query the boot display device(s). + */ +static void ProbeBootDisplays(NVDispEvoPtr pDispEvo) +{ + NvU32 ret; + NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS bootParams = { 0 }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + pDispEvo->bootDisplays = nvEmptyDpyIdList(); + + bootParams.subDeviceInstance = pDispEvo->displayOwner; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_BOOT_DISPLAYS, + &bootParams, sizeof(bootParams)); + + if (ret == NVOS_STATUS_SUCCESS) { + pDispEvo->bootDisplays = + nvNvU32ToDpyIdList(bootParams.bootDisplayMask); + } +} + +/*! + * Query the 0073 display common object capabilities. + */ +static NvBool ProbeDisplayCommonCaps(NVDevEvoPtr pDevEvo) +{ + NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS capsParams = { }; + NvU32 ret; + + ct_assert(sizeof(pDevEvo->commonCapsBits) == sizeof(capsParams.capsTbl)); + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_CAPS_V2, + &capsParams, sizeof(capsParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to determine display common capabilities"); + return FALSE; + } + nvkms_memcpy(pDevEvo->commonCapsBits, capsParams.capsTbl, + sizeof(pDevEvo->commonCapsBits)); + + return TRUE; +} + + +/*! + * Query the variable refresh rate (G-SYNC) capability of a display. + */ +static void ProbeVRRCaps(NVDispEvoPtr pDispEvo) +{ + NV0000_CTRL_SYSTEM_GET_VRR_COOKIE_PRESENT_PARAMS params = { 0 }; + NvU32 ret; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_SYSTEM_GET_VRR_COOKIE_PRESENT, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + return; + } + + pDispEvo->vrr.hasPlatformCookie = params.bIsPresent; + +} + + +static NvBool ReadDPCDReg(NVConnectorEvoPtr pConnectorEvo, + NvU32 dpcdAddr, + NvU8 *dpcdData) +{ + NV0073_CTRL_DP_AUXCH_CTRL_PARAMS params = { }; + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + + params.subDeviceInstance = pConnectorEvo->pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + + params.cmd = DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_TYPE, _AUX); + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_REQ_TYPE, _READ); + + params.addr = dpcdAddr; + + /* Requested size is 0-based */ + params.size = 0; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DP_AUXCH_CTRL, + ¶ms, sizeof(params)) != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "AUX read failed for DPCD addr 0x%x", + dpcdAddr); + return FALSE; + } + + if (params.size != 1U) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "AUX read returned 0 bytes for DPCD addr 0x%x", + dpcdAddr); + return FALSE; + } + + *dpcdData = params.data[0]; + + return TRUE; +} + +NvBool nvWriteDPCDReg(NVConnectorEvoPtr pConnectorEvo, + NvU32 dpcdAddr, + NvU8 dpcdData) +{ + NV0073_CTRL_DP_AUXCH_CTRL_PARAMS params = { }; + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + + params.subDeviceInstance = pConnectorEvo->pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + + params.cmd = DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_TYPE, _AUX); + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_REQ_TYPE, _WRITE); + + params.addr = dpcdAddr; + params.data[0] = dpcdData; + + /* Requested size is 0-based */ + params.size = 0; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DP_AUXCH_CTRL, + ¶ms, sizeof(params)) != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "AUX write failed for DPCD addr 0x%x", + dpcdAddr); + return FALSE; + } + + if (params.size != 1U) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Wrote 0 bytes for DPCD addr 0x%x", + dpcdAddr); + return FALSE; + } + + return TRUE; +} + +static NvBool ReadDPSerializerCaps(NVConnectorEvoPtr pConnectorEvo) +{ + NVDpyIdList oneDpyIdList = + nvAddDpyIdToEmptyDpyIdList(pConnectorEvo->displayId); + NVDpyIdList connectedList; + NvU8 dpcdData = 0; + + /* + * This call will not only confirm that the DP serializer is connected, but + * will also power on the corresponding DPAUX pads if the serializer is + * detected via NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE. The DPAUX pads + * need to be enabled for the DPCD reads below. + */ + connectedList = nvRmGetConnectedDpys(pConnectorEvo->pDispEvo, oneDpyIdList); + + /* DP serializer may not be connected, fake the display in such case */ + if (nvDpyIdListIsEmpty(connectedList)) { + nvEvoLogDev(pConnectorEvo->pDispEvo->pDevEvo, EVO_LOG_INFO, + "Serializer connector %s may not connected, hardcoding connector parameters!", + pConnectorEvo->name); + /* Hardcoding connector params */ + pConnectorEvo->dpSerializerCaps.maxLinkBW = + NV0073_CTRL_DP_DATA_SET_LINK_BW_8_10GBPS; + pConnectorEvo->dpSerializerCaps.maxLaneCount = + NV0073_CTRL_DP_DATA_SET_LANE_COUNT_4; + pConnectorEvo->dpSerializerCaps.supportsMST = FALSE; + goto end; + } + + if (!nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, connectedList)) { + nvEvoLogDev(pConnectorEvo->pDispEvo->pDevEvo, EVO_LOG_ERROR, + "Serializer connector %s is not present in DpyIdList!", + pConnectorEvo->name); + return FALSE; + } + + if (!ReadDPCDReg(pConnectorEvo, NV_DPCD_MAX_LINK_BANDWIDTH, &dpcdData)) { + return FALSE; + } + pConnectorEvo->dpSerializerCaps.maxLinkBW = + DRF_VAL(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, dpcdData); + + if (!ReadDPCDReg(pConnectorEvo, NV_DPCD_MAX_LANE_COUNT, &dpcdData)) { + return FALSE; + } + pConnectorEvo->dpSerializerCaps.maxLaneCount = + DRF_VAL(_DPCD, _MAX_LANE_COUNT, _LANE, dpcdData); + + if (!ReadDPCDReg(pConnectorEvo, NV_DPCD_MSTM, &dpcdData)) { + return FALSE; + } + pConnectorEvo->dpSerializerCaps.supportsMST = + FLD_TEST_DRF(_DPCD, _MSTM, _CAP, _YES, dpcdData); + +end: + return TRUE; +} + +static NvBool AllocDPSerializerDpys(NVConnectorEvoPtr pConnectorEvo) +{ + NvBool supportsMST; + NvU32 numHeads; + NvU32 i; + + if (!nvConnectorIsDPSerializer(pConnectorEvo)) { + return TRUE; + } + + if (!ReadDPSerializerCaps(pConnectorEvo)) { + return FALSE; + } + + supportsMST = pConnectorEvo->dpSerializerCaps.supportsMST; + numHeads = pConnectorEvo->pDispEvo->pDevEvo->numHeads; + for (i = 0; i < numHeads && supportsMST; i++) { + NVDpyEvoPtr pDpyEvo = NULL; + NvBool dynamicDpyCreated = FALSE; + char address[5] = { }; + + nvkms_snprintf(address, sizeof(address), "0.%d", i + 1); + pDpyEvo = nvGetDPMSTDpyEvo(pConnectorEvo, address, + &dynamicDpyCreated); + if ((pDpyEvo == NULL) || !dynamicDpyCreated) { + return FALSE; + } + + pDpyEvo->dp.serializerStreamIndex = i; + } + + return TRUE; +} + +/*! + * + */ +static NvBool AllocDpys(NVDispEvoPtr pDispEvo) +{ + NVConnectorEvoPtr pConnectorEvo; + + // At this point, there should be no DisplayPort multistream devices. + nvAssert(nvDpyIdListsAreEqual(pDispEvo->validDisplays, + pDispEvo->connectorIds)); + nvAssert(nvDpyIdListIsEmpty(pDispEvo->displayPortMSTIds)); + nvAssert(nvDpyIdListIsEmpty(pDispEvo->dynamicDpyIds)); + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = nvAllocDpyEvo(pDispEvo, pConnectorEvo, + pConnectorEvo->displayId, NULL); + + if (pDpyEvo == NULL) { + nvAssert(!"Failed to allocate pDpy"); + return FALSE; + } + + if (!AllocDPSerializerDpys(pConnectorEvo)) { + nvAssert(!"Failed to allocate non DPLib managed dpys"); + return FALSE; + } + } + + return TRUE; +} + +static void FreeDpys(NVDispEvoPtr pDispEvo) +{ + NVDpyEvoPtr pDpyEvo, pDpyEvoTmp; + + nvListForEachEntry_safe(pDpyEvo, pDpyEvoTmp, + &pDispEvo->dpyList, dpyListEntry) { + nvFreeDpyEvo(pDispEvo, pDpyEvo); + } +} + + +/*! + * Receive hotplug notification from resman. + * + * This function is registered as the kernel callback function from + * resman when an NV2080_NOTIFIERS_HOTPLUG event is generated. + * + * However, this function is called with resman's context (resman locks held, + * etc). Schedule deferred work, so that we can process the hotplug event + * without resman's encumbrances. + */ +static void ReceiveHotplugEvent(void *arg, void *pEventDataVoid, NvU32 hEvent, + NvU32 Data, NV_STATUS Status) +{ + (void) nvkms_alloc_timer_with_ref_ptr( + nvHandleHotplugEventDeferredWork, /* callback */ + arg, /* argument (this is a ref_ptr to a pDispEvo) */ + 0, /* dataU32 */ + 0); +} + +static void ReceiveDPIRQEvent(void *arg, void *pEventDataVoid, NvU32 hEvent, + NvU32 Data, NV_STATUS Status) +{ + // XXX The displayId of the connector that generated the event should be + // available here somewhere. We should figure out how to find that and + // plumb it through to nvHandleDPIRQEventDeferredWork. + (void) nvkms_alloc_timer_with_ref_ptr( + nvHandleDPIRQEventDeferredWork, /* callback */ + arg, /* argument (this is a ref_ptr to a pDispEvo) */ + 0, /* dataU32 */ + 0); +} + +NvBool nvRmRegisterCallback(const NVDevEvoRec *pDevEvo, + NVOS10_EVENT_KERNEL_CALLBACK_EX *cb, + struct nvkms_ref_ptr *ref_ptr, + NvU32 parentHandle, + NvU32 eventHandle, + Callback5ArgVoidReturn func, + NvU32 event) +{ + NV0005_ALLOC_PARAMETERS allocEventParams = { 0 }; + + cb->func = func; + cb->arg = ref_ptr; + + allocEventParams.hParentClient = nvEvoGlobal.clientHandle; + allocEventParams.hClass = NV01_EVENT_KERNEL_CALLBACK_EX; + allocEventParams.notifyIndex = event; + allocEventParams.data = NV_PTR_TO_NvP64(cb); + + return nvRmApiAlloc(nvEvoGlobal.clientHandle, + parentHandle, + eventHandle, + NV01_EVENT_KERNEL_CALLBACK_EX, + &allocEventParams) + == NVOS_STATUS_SUCCESS; +} + +static NvBool RegisterDispCallback(NVOS10_EVENT_KERNEL_CALLBACK_EX *cb, + NVDispEvoPtr pDispEvo, + NvU32 handle, + Callback5ArgVoidReturn func, + NvU32 event) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 subDevice = pDevEvo->pSubDevices[pDispEvo->displayOwner]->handle; + + return nvRmRegisterCallback(pDevEvo, cb, pDispEvo->ref_ptr, subDevice, + handle, func, event); +} + +static void +DifrPrefetchEventDeferredWork(void *dataPtr, NvU32 dataU32) +{ + NVDevEvoPtr pDevEvo = dataPtr; + size_t l2CacheSize = (size_t)dataU32; + NvU32 status; + + nvAssert(pDevEvo->pDifrState); + + status = nvDIFRPrefetchSurfaces(pDevEvo->pDifrState, l2CacheSize); + nvDIFRSendPrefetchResponse(pDevEvo->pDifrState, status); +} + +static void DifrPrefetchEvent(void *arg, void *pEventDataVoid, + NvU32 hEvent, NvU32 Data, NV_STATUS Status) +{ + Nv2080LpwrDifrPrefetchNotification *notif = + (Nv2080LpwrDifrPrefetchNotification *)pEventDataVoid; + + (void)nvkms_alloc_timer_with_ref_ptr( + DifrPrefetchEventDeferredWork, /* callback */ + arg, /* argument (this is a ref_ptr to a pDevEvo) */ + notif->l2CacheSize, /* dataU32 */ + 0); /* timeout: schedule the work immediately */ +} + +enum NvKmsAllocDeviceStatus nvRmAllocDisplays(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + unsigned int sd; + enum NvKmsAllocDeviceStatus status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + NvU32 totalDispNumSubDevices = 0; + + pDevEvo->sli.bridge.present = FALSE; + + if (!QueryGpuCapabilities(pDevEvo)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to query GPU capabilities"); + goto fail; + } + + if (pDevEvo->supportsSyncpts) { + pDevEvo->preSyncptTable = + nvCalloc(1, sizeof(NVEvoSyncpt) * NV_SYNCPT_GLOBAL_TABLE_LENGTH); + if (pDevEvo->preSyncptTable == NULL) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate memory for pre-syncpt table"); + goto fail; + } + } + + if (!AllocDisplays(pDevEvo)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to allocate displays"); + goto fail; + } + + /* allocate the display common object for this device */ + + if (nvRmEvoClassListCheck(pDevEvo, NV04_DISPLAY_COMMON)) { + + pDevEvo->displayCommonHandle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDevEvo->displayCommonHandle, + NV04_DISPLAY_COMMON, NULL) + != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to initialize the display " + "subsystem for the NVIDIA graphics device!"); + goto fail; + + } + } else { + /* + * Not supporting NV04_DISPLAY_COMMON is expected in some + * configurations: e.g., GF117 (an Optimus-only or "coproc" GPU), + * emulation netlists. Fail with "no hardware". + */ + status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; + goto fail; + } + + if (!ProbeDisplayCommonCaps(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; + goto fail; + } + + if (!ProbeHeadCountAndWindowAssignment(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; + goto fail; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + if (!ProbeValidDisplays(pDispEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; + goto fail; + } + + /* Keep track of connectors per pDisp and bind to DP lib if capable */ + if (!AllocConnectors(pDispEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; + goto fail; + } + } + + /* + * If there are no valid display devices, fail with "no hardware". + */ + if (NoValidDisplays(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; + goto fail; + } + + /* + * The number of numSubDevices across disps should equal the + * device's numSubDevices. + */ + totalDispNumSubDevices = 0; + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + totalDispNumSubDevices++; + } + + if (totalDispNumSubDevices != pDevEvo->numSubDevices) { + nvAssert(!"Number of disps' subdevices does not match device's"); + } + + /* + * Allocate an NV event for each pDispEvo on the corresponding + * subDevice, tied to the pDevEvo's OS event. + */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS setEventParams = { }; + NvU32 subDevice, ret; + + subDevice = pDevEvo->pSubDevices[pDispEvo->displayOwner]->handle; + + pDispEvo->hotplugEventHandle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (!RegisterDispCallback(&pDispEvo->rmHotplugCallback, pDispEvo, + pDispEvo->hotplugEventHandle, + ReceiveHotplugEvent, + NV2080_NOTIFIERS_HOTPLUG)) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Failed to register display hotplug event"); + } + + // Enable hotplug notifications from this subdevice. + setEventParams.event = NV2080_NOTIFIERS_HOTPLUG; + setEventParams.action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; + if ((ret = nvRmApiControl(nvEvoGlobal.clientHandle, + subDevice, + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, + &setEventParams, + sizeof(setEventParams))) + != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Failed to register display hotplug " + "handler: 0x%x\n", ret); + } + } + + // Allocate a handler for the DisplayPort "IRQ" event, which is signaled + // when there's a short interruption in the hotplug detect line. + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS setEventParams = { }; + NvU32 subDevice, ret; + + subDevice = pDevEvo->pSubDevices[pDispEvo->displayOwner]->handle; + + pDispEvo->DPIRQEventHandle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (!RegisterDispCallback(&pDispEvo->rmDPIRQCallback, pDispEvo, + pDispEvo->DPIRQEventHandle, ReceiveDPIRQEvent, + NV2080_NOTIFIERS_DP_IRQ)) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Failed to register DisplayPort interrupt event"); + } + + // Enable DP IRQ notifications from this subdevice. + setEventParams.event = NV2080_NOTIFIERS_DP_IRQ; + setEventParams.action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; + if ((ret = nvRmApiControl(nvEvoGlobal.clientHandle, + subDevice, + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, + &setEventParams, + sizeof(setEventParams))) + != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Failed to register DisplayPort interrupt " + "handler: 0x%x\n", ret); + } + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + ProbeBootDisplays(pDispEvo); + + if (!AllocDpys(pDispEvo)) { + goto fail; + } + + ProbeVRRCaps(pDispEvo); + } + + nvAllocVrrEvo(pDevEvo); + + return NVKMS_ALLOC_DEVICE_STATUS_SUCCESS; + +fail: + nvRmDestroyDisplays(pDevEvo); + return status; +} + + +void nvRmDestroyDisplays(NVDevEvoPtr pDevEvo) +{ + NvU32 ret; + NVDispEvoPtr pDispEvo; + int dispIndex; + NvS64 tmp; + + nvFreeVrrEvo(pDevEvo); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + // Before freeing anything, dump anything left in the RM's DisplayPort + // AUX channel log. + if (pDispEvo->dpAuxLoggingEnabled) { + do { + ret = nvRmQueryDpAuxLog(pDispEvo, &tmp); + } while (ret && tmp); + } + + // Free the DisplayPort IRQ event. + if (pDispEvo->DPIRQEventHandle != 0) { + nvRmApiFree(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + pDispEvo->DPIRQEventHandle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDispEvo->DPIRQEventHandle); + pDispEvo->DPIRQEventHandle = 0; + } + + // Free the hotplug event. + /* + * XXX I wish I could cancel anything scheduled by + * ReceiveHotplugEvent() and ReceiveDPIRQEvent() for this pDispEvo... + */ + if (pDispEvo->hotplugEventHandle != 0) { + nvRmApiFree(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + pDispEvo->hotplugEventHandle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDispEvo->hotplugEventHandle); + pDispEvo->hotplugEventHandle = 0; + } + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + FreeDpys(pDispEvo); + FreeConnectors(pDispEvo); + } + + FreeDisplays(pDevEvo); + + nvFree(pDevEvo->preSyncptTable); + pDevEvo->preSyncptTable = NULL; + + if (pDevEvo->displayCommonHandle != 0) { + ret = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDevEvo->displayCommonHandle); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"Free(displayCommonHandle) failed"); + } + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDevEvo->displayCommonHandle); + pDevEvo->displayCommonHandle = 0; + } +} + + +/*! + * The Allocate a display ID that we use to talk to RM about the dpy(s) on + * head. + * + * \param[in] pDisp The display system on which to allocate the ID. + * \param[in] dpyList The list of dpys. + * + * \return The display ID, or 0 on failure. + */ +NvU32 nvRmAllocDisplayId(const NVDispEvoRec *pDispEvo, const NVDpyIdList dpyList) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS params = { 0 }; + const NVDpyEvoRec *pDpyEvo; + const NVConnectorEvoRec *pConnectorEvo = NULL; + NvBool isDPMST = NV_FALSE; + NvU32 ret; + + FOR_ALL_EVO_DPYS(pDpyEvo, dpyList, pDispEvo) { + if (pConnectorEvo == NULL) { + /* First DPY from list, assign pConnectorEvo and isDPMST variable */ + pConnectorEvo = pDpyEvo->pConnectorEvo; + isDPMST = nvDpyEvoIsDPMST(pDpyEvo); + } + + if (pConnectorEvo != pDpyEvo->pConnectorEvo || + isDPMST != nvDpyEvoIsDPMST(pDpyEvo)) { + return 0; + } + } + + nvAssert(nvConnectorUsesDPLib(pConnectorEvo) || !isDPMST); + + if (!isDPMST) { + /* For non-MST dpy(s), simply return static display ID of connector */ + return nvDpyIdToNvU32(pConnectorEvo->displayId); + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID, + ¶ms, sizeof(params)); + + if (ret == NVOS_STATUS_SUCCESS) { + return params.displayIdAssigned; + } else { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Failed to allocate display resource."); + } + + return 0; +} + + +/*! + * Send DISPLAY_CHANGE to resman. + * + * This should be called before and after each mode change, with the display + * mask describing the NEW display configuration. + */ +void nvRmBeginEndModeset(NVDispEvoPtr pDispEvo, + enum NvKmsBeginEndModeset beginOrEnd, + NvU32 mask) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS bracketParams = { }; + NvU32 ret; + + bracketParams.subDeviceInstance = pDispEvo->displayOwner; + bracketParams.newDevices = mask; + bracketParams.properties = 0; /* this is currently unused */ + switch (beginOrEnd) { + case BEGIN_MODESET: + bracketParams.enable = NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_START; + break; + case END_MODESET: + bracketParams.enable = NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_END; + break; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_DISPLAY_CHANGE, + &bracketParams, + sizeof(bracketParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed NV0073_CTRL_CMD_SPECIFIC_DISPLAY_CHANGE"); + } +} + + +/*! + * Free a RM display ID, if it was allocated dynamically. + * + * This function frees a display ID if it was allocated by + * nvRmAllocDisplayId. If the display ID is static, this function does + * nothing. + * + * From ctrl0073dp.h: You must not call this function while either the ARM + * or ASSEMBLY state cache refers to this display-id. The head must not be + * attached. + * + * \param[in] pDisp The display system on which to free the ID. + * \param[in] displayId The display ID to free. + */ +void nvRmFreeDisplayId(const NVDispEvoRec *pDispEvo, NvU32 displayId) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS params = { 0 }; + NVDpyId dpyId = nvNvU32ToDpyId(displayId); + NvU32 ret; + + /* Do nothing if display ID is static one! */ + if (nvDpyIdIsInDpyIdList(dpyId, pDispEvo->connectorIds)) { + return; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = displayId; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to relinquish display resource."); + } +} + + +/*! + * Query Resman for the (broad) display device type. + */ +static NvU32 GetLegacyConnectorType(NVDispEvoPtr pDispEvo, NVDpyId dpyId) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS params = { 0 }; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(dpyId); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_TYPE, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failure getting specific display device type."); + return NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_UNKNOWN; + } + + nvAssert((params.displayType == NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT) || + (params.displayType == NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP)); + + return params.displayType; +} + + +/*! + * Query RM for the current OR properties of the given connector. + * + * If 'assertOnly' is TRUE, this function will only assert that the OR + * configuration has not changed. + */ +void nvRmGetConnectorORInfo(NVConnectorEvoPtr pConnectorEvo, NvBool assertOnly) +{ + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS params = { 0 }; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, + ¶ms, + sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to determine output resource properties."); + + if (assertOnly) { + return; + } + pConnectorEvo->or.type = NV0073_CTRL_SPECIFIC_OR_TYPE_DAC; + pConnectorEvo->or.primary = NV_INVALID_OR; + pConnectorEvo->or.secondaryMask = 0; + pConnectorEvo->or.protocol = + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT; + pConnectorEvo->or.ditherType = NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF; + pConnectorEvo->or.ditherAlgo = + NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN; + pConnectorEvo->or.location = NV0073_CTRL_SPECIFIC_OR_LOCATION_CHIP; + + return; + } + + if (!assertOnly) { + pConnectorEvo->or.type = params.type; + if (NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED) && + params.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + // For the SOR crossbar, RM may report that multiple displayIDs own + // the same SOR. For example, it may report SOR 2 for both the + // DisplayPort and TMDS halves of a physical connector even though + // they have separate displayIds. + // + // All we really need to know is which SOR is assigned to the boot + // display, so we defer the query to MarkConnectorBootHeadActive(). + pConnectorEvo->or.secondaryMask = 0x0; + pConnectorEvo->or.primary = NV_INVALID_OR; + } else { + pConnectorEvo->or.secondaryMask = 0x0; + pConnectorEvo->or.primary = params.index; + } + pConnectorEvo->or.protocol = params.protocol; + pConnectorEvo->or.ditherType = params.ditherType; + pConnectorEvo->or.ditherAlgo = params.ditherAlgo; + pConnectorEvo->or.location = params.location; + } else { + nvAssert(pConnectorEvo->or.type == params.type); + nvAssert(pConnectorEvo->or.primary == params.index); + nvAssert(pConnectorEvo->or.protocol == params.protocol); + nvAssert(pConnectorEvo->or.ditherType == params.ditherType); + nvAssert(pConnectorEvo->or.ditherAlgo == params.ditherAlgo); + nvAssert(pConnectorEvo->or.location == params.location); + } +} + +/*! + * Query connector state, and retry if necessary. + */ +NVDpyIdList nvRmGetConnectedDpys(const NVDispEvoRec *pDispEvo, + NVDpyIdList dpyIdList) +{ + NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS params = { 0 }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayMask = nvDpyIdListToNvU32(dpyIdList); + params.flags = + (DRF_DEF(0073_CTRL_SYSTEM,_GET_CONNECT_STATE_FLAGS,_METHOD,_DEFAULT) | + DRF_DEF(0073_CTRL_SYSTEM,_GET_CONNECT_STATE_FLAGS,_DDC,_DEFAULT) | + DRF_DEF(0073_CTRL_SYSTEM,_GET_CONNECT_STATE_FLAGS,_LOAD,_DEFAULT)); + + do { + params.retryTimeMs = 0; + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE, + ¶ms, + sizeof(params)); + + if (ret == NVOS_STATUS_ERROR_NOT_READY && + params.retryTimeMs == 0) { + // Work around bug 970351: RM returns a zero retry time on platforms + // where the display driver is in user space. Use a conservative + // default. This code can be removed once this call is fixed in RM. + params.retryTimeMs = 20; + } + + if (params.retryTimeMs > 0) { + nvkms_usleep(params.retryTimeMs * 1000); + } else { + nvkms_yield(); + } + } while(params.retryTimeMs > 0); + + if (ret == NVOS_STATUS_SUCCESS) { + return nvNvU32ToDpyIdList(params.displayMask); + } else { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed detecting connected display devices"); + return nvEmptyDpyIdList(); + } +} + +/*! + * Notify the DP library that we are ready to proceed after a suspend/boot, and + * that it should initialize and start handling events. + */ +NvBool nvRmResumeDP(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + int i; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) { + NVConnectorEvoPtr pConnectorEvo; + NVDpyIdList connectedIdsList = + nvRmGetConnectedDpys(pDispEvo, pDispEvo->connectorIds); + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + NvBool plugged = + nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, connectedIdsList); + + if (!pConnectorEvo->pDpLibConnector) { + continue; + } + + if (!nvDPResume(pConnectorEvo->pDpLibConnector, plugged)) { + goto failed; + } + } + } + + return TRUE; + +failed: + nvRmPauseDP(pDevEvo); + return FALSE; +} + + +void nvRmPauseDP(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + int i; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) { + NVConnectorEvoPtr pConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (nvConnectorUsesDPLib(pConnectorEvo)) { + nvDPPause(pConnectorEvo->pDpLibConnector); + } + } + } +} + + +/*! + * This function is called whenever the DPMS level changes; On a CRT, + * you set the DPMS level by (dis/en)abling the hsync and vsync + * signals: + * + * Hsync Vsync Mode + * ===== ===== ==== + * 1 1 Normal (on). + * 0 1 Standby -- RGB guns off, power supply on, tube filaments + * energized, (screen saver mode). + * 1 0 Suspend -- RGB guns off, power supply off, tube filaments + * energized. + * 0 0 Power off -- small auxiliary circuit stays on to monitor the + * hsync/vsync signals to know when to wake up. + */ +NvBool nvRmSetDpmsEvo(NVDpyEvoPtr pDpyEvo, NvS64 value) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + if (nvDpyUsesDPLib(pDpyEvo)) { + nvDPDeviceSetPowerState(pDpyEvo, + (value == NV_KMS_DPY_ATTRIBUTE_DPMS_ON)); + return TRUE; + } else if (pDpyEvo->pConnectorEvo->legacyType != + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT) { + NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS powerParams = { 0 }; + + powerParams.subDeviceInstance = pDispEvo->displayOwner; + powerParams.displayId = nvDpyEvoGetConnectorId(pDpyEvo); + + powerParams.powerState = (value == NV_KMS_DPY_ATTRIBUTE_DPMS_ON) ? + NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_ON : + NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_OFF; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_MONITOR_POWER, + &powerParams, + sizeof(powerParams)); + + return (ret == NVOS_STATUS_SUCCESS); + } else { + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + NV5070_CTRL_CMD_SET_DAC_PWR_PARAMS powerParams = { { 0 }, 0 }; + + powerParams.base.subdeviceIndex = pDispEvo->displayOwner; + if (pConnectorEvo->or.primary == NV_INVALID_OR) { + nvAssert(pConnectorEvo->or.primary != NV_INVALID_OR); + return FALSE; + } + powerParams.orNumber = pConnectorEvo->or.primary; + + switch (value) { + case NV_KMS_DPY_ATTRIBUTE_DPMS_ON: + powerParams.normalHSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_HSYNC, _ENABLE); + powerParams.normalVSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_VSYNC, _ENABLE); + break; + case NV_KMS_DPY_ATTRIBUTE_DPMS_STANDBY: + powerParams.normalHSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_HSYNC, _LO); + powerParams.normalVSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_VSYNC, _ENABLE); + break; + case NV_KMS_DPY_ATTRIBUTE_DPMS_SUSPEND: + powerParams.normalHSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_HSYNC, _ENABLE); + powerParams.normalVSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_VSYNC, _LO); + break; + case NV_KMS_DPY_ATTRIBUTE_DPMS_OFF: + powerParams.normalHSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_HSYNC, _LO); + powerParams.normalVSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_VSYNC, _LO); + break; + default: + return FALSE; + } + // XXX These could probably be disabled too, in the DPMS_OFF case. + powerParams.normalData = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_DATA, _ENABLE); + powerParams.normalPower = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_PWR, _ON); + + powerParams.flags = + DRF_DEF(5070, _CTRL_CMD_SET_DAC_PWR_FLAGS, _SPECIFIED_NORMAL, _YES); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_SET_DAC_PWR, + &powerParams, + sizeof(powerParams)); + + return (ret == NVOS_STATUS_SUCCESS); + } +} + + +NvBool nvRmAllocSysmem(NVDevEvoPtr pDevEvo, NvU32 memoryHandle, + NvU32 *ctxDmaFlags, void **ppBase, NvU64 size, + NvKmsMemoryIsoType isoType) +{ + NvU32 ret; + NvBool bufferAllocated = FALSE; + NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { }; + const NvKmsDispIOCoherencyModes *pIOCoherencyModes; + + memAllocParams.owner = NVKMS_RM_HEAP_ID; + + memAllocParams.attr2 = DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _NO); + + memAllocParams.size = size; + + if (isoType == NVKMS_MEMORY_NISO) { + memAllocParams.attr2 |= DRF_DEF(OS32, _ATTR2, _NISO_DISPLAY, _YES); + + pIOCoherencyModes = &pDevEvo->nisoIOCoherencyModes; + } else { + memAllocParams.attr2 |= DRF_DEF(OS32, _ATTR2, _ISO, _YES); + + pIOCoherencyModes = &pDevEvo->isoIOCoherencyModes; + } + + memAllocParams.attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI) | + DRF_DEF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS) | + DRF_DEF(OS32, _ATTR, _FORMAT, _PITCH); + + if (pIOCoherencyModes->noncoherent) { + // Model (3) + // - allocate USWC system memory + // - allocate ctx dma with NVOS03_FLAGS_CACHE_SNOOP_DISABLE + // - to sync CPU and GPU, flush CPU WC buffer + + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_COMBINE, + memAllocParams.attr); + + ret = nvRmApiAlloc( + nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + memoryHandle, + NV01_MEMORY_SYSTEM, + &memAllocParams); + + if (ret == NVOS_STATUS_SUCCESS) { + bufferAllocated = TRUE; + if (ctxDmaFlags) { + *ctxDmaFlags |= DRF_DEF(OS03, _FLAGS, _CACHE_SNOOP, _DISABLE); + } + } else { + bufferAllocated = FALSE; + } + + } + + if (!bufferAllocated && pIOCoherencyModes->coherent) { + // Model (2b): Similar to existing PCI model + // - allocate cached (or USWC) system memory + // - allocate ctx DMA with NVOS03_FLAGS_CACHE_SNOOP_ENABLE + // ... + + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_BACK, + memAllocParams.attr); + + ret = nvRmApiAlloc( + nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + memoryHandle, + NV01_MEMORY_SYSTEM, + &memAllocParams); + + if (ret == NVOS_STATUS_SUCCESS) { + bufferAllocated = TRUE; + if (ctxDmaFlags) { + *ctxDmaFlags |= DRF_DEF(OS03, _FLAGS, _CACHE_SNOOP, _ENABLE); + } + } else { + bufferAllocated = FALSE; + } + } + + if (bufferAllocated && ppBase) { + ret = nvRmApiMapMemory( + nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + memoryHandle, + 0, /* offset */ + size, + ppBase, + 0 /* flags */); + + if (ret != NVOS_STATUS_SUCCESS) { + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + memoryHandle); + + bufferAllocated = FALSE; + } + } + + return bufferAllocated; +} + + +/*****************************************************************************/ +/* Alloc memory and a context dma, following the rules dictated by the + DMA coherence flags. */ +/*****************************************************************************/ + +NvBool nvRmAllocEvoDma(NVDevEvoPtr pDevEvo, NVEvoDmaPtr pDma, + NvU64 limit, NvU32 ctxDmaFlags, NvU32 subDeviceMask) +{ + NvBool bufferAllocated = FALSE; + NvU32 memoryHandle = 0; + void *pBase = NULL; + + NvBool needBar1Mapping = FALSE; + + NVSurfaceDescriptor surfaceDesc; + NvU32 localCtxDmaFlags = ctxDmaFlags | + DRF_DEF(OS03, _FLAGS, _ACCESS, _READ_WRITE) | + DRF_DEF(OS03, _FLAGS, _HASH_TABLE, _DISABLE); + + NvU32 ret; + + nvkms_memset(pDma, 0, sizeof(*pDma)); + + memoryHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + /* + * On certain GPUs (GF100, GF104) there exists a hardware bug that forces + * us to put display NISO surfaces (pushbuffer, semaphores, notifiers + * accessed by EVO) in vidmem instead of sysmem. See bug 632241 for + * details. + */ + if (NV5070_CTRL_SYSTEM_GET_CAP(pDevEvo->capsBits, + NV5070_CTRL_SYSTEM_CAPS_BUG_644815_DNISO_VIDMEM_ONLY)) { + NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { }; + + memAllocParams.owner = NVKMS_RM_HEAP_ID; + memAllocParams.type = NVOS32_TYPE_DMA; + memAllocParams.size = limit + 1; + memAllocParams.attr = DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _4KB) | + DRF_DEF(OS32, _ATTR, _LOCATION, _VIDMEM); + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + memoryHandle, + NV01_MEMORY_LOCAL_USER, + &memAllocParams); + + if (ret != NVOS_STATUS_SUCCESS) { + /* We can't fall back to any of the sysmem options below, due to + * the nature of the HW bug forcing us to use vidmem. */ + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Unable to allocate video memory for display"); + return FALSE; + } + + limit = memAllocParams.size - 1; + + /* We'll access these surfaces through IFB */ + pBase = NULL; + + bufferAllocated = TRUE; + needBar1Mapping = TRUE; + } + + if (!bufferAllocated) { + /* + * Setting NVKMS_MEMORY_NISO since nvRmAllocEvoDma() is currently only + * called to allocate pushbuffer and notifier memory. + */ + bufferAllocated = nvRmAllocSysmem(pDevEvo, memoryHandle, + &localCtxDmaFlags, &pBase, limit + 1, + NVKMS_MEMORY_NISO); + } + + if (!bufferAllocated) { + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, memoryHandle); + + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Unable to allocate DMA memory"); + + return FALSE; + } + + // Create surface descriptor for this allocation. + ret = pDevEvo->hal->AllocSurfaceDescriptor(pDevEvo, &surfaceDesc, memoryHandle, + localCtxDmaFlags, limit, + FALSE /* mapToDisplayRm */); + + if (ret != NVOS_STATUS_SUCCESS) { + if (pBase != NULL) { + nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + memoryHandle, + pBase, + 0); + } + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, memoryHandle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, memoryHandle); + + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to allocate surface descriptor"); + + return FALSE; + } + + pDma->memoryHandle = memoryHandle; + + pDma->surfaceDesc = surfaceDesc; + + pDma->limit = limit; + + if (needBar1Mapping) { + NvBool result; + + result = nvRmEvoMapVideoMemory(pDevEvo, memoryHandle, limit + 1, + pDma->subDeviceAddress, subDeviceMask); + + if (!result) { + nvRmFreeEvoDma(pDevEvo, pDma); + return FALSE; + } + } else { + int sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (((1 << sd) & subDeviceMask) == 0) { + continue; + } + + pDma->subDeviceAddress[sd] = pBase; + } + } + pDma->isBar1Mapping = needBar1Mapping; + + return TRUE; +} + +void nvRmFreeEvoDma(NVDevEvoPtr pDevEvo, NVEvoDmaPtr pDma) +{ + NvU32 ret; + + pDevEvo->hal->FreeSurfaceDescriptor(pDevEvo, + pDevEvo->deviceHandle, + &pDma->surfaceDesc); + + if (pDma->memoryHandle != 0) { + if (pDma->isBar1Mapping) { + nvRmEvoUnMapVideoMemory(pDevEvo, pDma->memoryHandle, + pDma->subDeviceAddress); + } else { + int sd = 0; + NvBool addressMapped = TRUE; + + /* If pDma->subDeviceAddress[sd] is non-NULL for multiple subdevices, + * assume they are the same. Unmap only one but set all of them to + * NULL. This matches the logic in nvRmAllocEvoDma(). + */ + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + + if (addressMapped && pDma->subDeviceAddress[sd] != NULL) { + ret = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDma->memoryHandle, + pDma->subDeviceAddress[sd], + 0); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to unmap memory"); + } + + addressMapped = FALSE; + } + + pDma->subDeviceAddress[sd] = NULL; + } + } + + ret = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, pDma->memoryHandle); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to free DMA memory"); + } + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pDma->memoryHandle); + pDma->memoryHandle = 0; + + pDma->limit = 0; + + nvkms_memset(pDma->subDeviceAddress, 0, sizeof(pDma->subDeviceAddress)); + } +} + +/*****************************************************************************/ +/* RmAllocEvoChannel () + * Allocates the EVO channel and associated notifier surfaces and ctxdmas. + * Takes how big the DMA controls are (varies by class of channel) and which + * class to allocate. + */ +/*****************************************************************************/ +static NVEvoChannelPtr +RmAllocEvoChannel(NVDevEvoPtr pDevEvo, + NVEvoChannelMask channelMask, + NvV32 instance, NvU32 class) +{ + NVEvoChannelPtr pChannel = NULL; + NVDmaBufferEvoPtr buffer = NULL; + int sd; + NvU32 ret; + + /* One 4k page is enough to map PUT and GET */ + const NvU64 dmaControlLen = 0x1000; + + nvAssert(NV_EVO_CHANNEL_MASK_POPCOUNT(channelMask) == 1); + + /* Allocate the channel data structure */ + pChannel = nvCalloc(1, sizeof(*pChannel)); + + if (pChannel == NULL) { + goto fail; + } + + buffer = &pChannel->pb; + + pChannel->hwclass = class; + pChannel->instance = instance; + pChannel->channelMask = channelMask; + + pChannel->notifiersDma = nvCalloc(pDevEvo->numSubDevices, sizeof(NVEvoDma)); + + if (pChannel->notifiersDma == NULL) { + goto fail; + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NVEvoDmaPtr pNotifiersDma = &pChannel->notifiersDma[sd]; + + void *pDmaDisplayChannel = NULL; + + // Allocation of the notifiers + if (!nvRmAllocEvoDma(pDevEvo, pNotifiersDma, + NV_DMA_EVO_NOTIFIER_SIZE - 1, + DRF_DEF(OS03, _FLAGS, _TYPE, _NOTIFIER), + 1 << sd)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Notifier DMA allocation failed"); + + goto fail; + } + + nvAssert(pNotifiersDma->subDeviceAddress[sd] != NULL); + + // Only allocate memory for one pushbuffer. + // All subdevices will share (via subdevice mask) + if (sd == 0) { + NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS ChannelAllocParams = { 0 }; + + NvU64 limit = NV_DMA_EVO_PUSH_BUFFER_SIZE - 1; + NVEvoDmaPtr pDma = &buffer->dma; + + // Allocation of the push buffer + if (!nvRmAllocEvoDma(pDevEvo, pDma, limit, 0, SUBDEVICE_MASK_ALL)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Display engine push buffer DMA allocation failed"); + + goto fail; + } + + if (!pDma->isBar1Mapping) { + buffer->base = pDma->subDeviceAddress[0]; + } else { + /* + * Allocate memory for a shadow copy in sysmem that we'll copy + * to vidmem via BAR1 at kickoff time. + */ + buffer->base = nvCalloc(buffer->dma.limit + 1, 1); + if (buffer->base == NULL) { + goto fail; + } + } + + buffer->channel_handle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + // Channel instance (always 0 for CORE - head number otherwise) + ChannelAllocParams.channelInstance = instance; + // PB CtxDMA Handle + ChannelAllocParams.hObjectBuffer = buffer->dma.surfaceDesc.ctxDmaHandle; + // Initial offset within the PB + ChannelAllocParams.offset = 0; + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + buffer->channel_handle, + class, + &ChannelAllocParams); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Display engine push buffer channel allocation failed: 0x%x (%s)", + ret, nvstatusToString(ret)); + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + buffer->channel_handle); + buffer->channel_handle = 0; + + goto fail; + } + } + + ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + buffer->channel_handle, + 0, + dmaControlLen, + &pDmaDisplayChannel, + 0); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Display engine push buffer DMA mapping failed: 0x%x (%s)", + ret, nvstatusToString(ret)); + goto fail; + } + + buffer->control[sd] = pDmaDisplayChannel; + } + + /* Initialize the rest of the required push buffer information */ + buffer->buffer = buffer->base; + buffer->end = (NvU32 *)((char *)buffer->base + + NV_DMA_EVO_PUSH_BUFFER_SIZE - 8); + + /* + * Due to hardware bug 235044, we can not use the last 12 dwords of the + * core channel pushbuffer. Adjust offset_max appropriately. + * + * This bug is fixed in Volta and newer, so this workaround can be removed + * when Pascal support is dropped. See bug 3116066. + */ + buffer->offset_max = NV_DMA_EVO_PUSH_BUFFER_SIZE - + NV_DMA_EVO_PUSH_BUFFER_PAD_SIZE; + buffer->fifo_free_count = (buffer->offset_max >> 2) - 2; + buffer->put_offset = 0; + buffer->num_channels = pDevEvo->numSubDevices; + buffer->pDevEvo = pDevEvo; + buffer->currentSubDevMask = SUBDEVICE_MASK_ALL; + + pChannel->imm.type = NV_EVO_IMM_CHANNEL_NONE; + + pDevEvo->hal->InitChannel(pDevEvo, pChannel); + + return pChannel; + +fail: + + RmFreeEvoChannel(pDevEvo, pChannel); + + return NULL; +} + +static void FreeImmediateChannelPio(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + NVEvoPioChannel *pPio = pChannel->imm.u.pio; + int sd; + + nvAssert(pPio != NULL); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + + if (!pPio->control[sd]) { + continue; + } + + if (nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pPio->handle, + pPio->control[sd], + 0)) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Failed to unmap immediate channel"); + } + pPio->control[sd] = NULL; + } + + if (pPio->handle) { + if (nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + pPio->handle)) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, "Failed to free immediate channel"); + } + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pPio->handle); + pPio->handle = 0; + } + + nvFree(pPio); + pChannel->imm.u.pio = NULL; +} + +static void FreeImmediateChannelDma(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + NVEvoChannelPtr pImmChannel = pChannel->imm.u.dma; + + RmFreeEvoChannel(pDevEvo, pImmChannel); + pChannel->imm.u.dma = NULL; +} + +static void FreeImmediateChannel(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + switch (pChannel->imm.type) { + case NV_EVO_IMM_CHANNEL_NONE: + return; + case NV_EVO_IMM_CHANNEL_PIO: + FreeImmediateChannelPio(pDevEvo, pChannel); + break; + case NV_EVO_IMM_CHANNEL_DMA: + FreeImmediateChannelDma(pDevEvo, pChannel); + break; + } + pChannel->imm.type = NV_EVO_IMM_CHANNEL_NONE; +} + +/*****************************************************************************/ +/* RmFreeEvoChannel () + * Frees all of the stuff allocated in RmAllocEvoChannel */ +/*****************************************************************************/ +static void RmFreeEvoChannel(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + int sd; + + if (pChannel == NULL) { + return; + } + + FreeImmediateChannel(pDevEvo, pChannel); + + if (pChannel->completionNotifierEventHandle != 0) { + + nvRmApiFree(nvEvoGlobal.clientHandle, + pChannel->pb.channel_handle, + pChannel->completionNotifierEventHandle); + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pChannel->completionNotifierEventHandle); + + pChannel->completionNotifierEventHandle = 0; + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (pChannel->pb.control[sd]) { + if (nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pChannel->pb.channel_handle, + pChannel->pb.control[sd], + 0) != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to unmap display engine channel memory"); + } + pChannel->pb.control[sd] = NULL; + } + } + + if (pChannel->pb.channel_handle != 0) { + // If NVKMS restored the console successfully, tell RM to leave the + // channels allocated to avoid shutting down the heads we just + // enabled. + // + // On EVO, only leave the core and base channels allocated. The + // other satellite channels shouldn't be active at the console. + // + // On nvdisplay, one or more window channels are also needed. Rather + // than try to figure out which ones are needed, just leave them all + // alone. + const NvBool isCore = + FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + pChannel->channelMask); + const NvBool isBase = + (pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL) != 0; + const NvBool isWindow = + (pChannel->channelMask & NV_EVO_CHANNEL_MASK_WINDOW_ALL) != 0; + if ((isCore || isBase || isWindow) && pDevEvo->skipConsoleRestore) { + NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS params = { }; + + params.base.subdeviceIndex = pDevEvo->vtFbInfo.subDeviceInstance; + params.flags = NV5070_CTRL_SET_RMFREE_FLAGS_PRESERVE_HW; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_SET_RMFREE_FLAGS, + ¶ms, sizeof(params)) + != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to set the PRESERVE_HW flag"); + } + } + + if (nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + pChannel->pb.channel_handle) + != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to tear down display engine channel"); + } + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pChannel->pb.channel_handle); + pChannel->pb.channel_handle = 0; + } + + if (pChannel->pb.dma.isBar1Mapping) { + /* Pushbuffer is in vidmem. Free shadow copy. */ + nvFree(pChannel->pb.base); + pChannel->pb.base = NULL; + } + + nvRmFreeEvoDma(pDevEvo, &pChannel->pb.dma); + + if (pChannel->notifiersDma) { + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + nvRmFreeEvoDma(pDevEvo, &pChannel->notifiersDma[sd]); + } + } + + nvFree(pChannel->notifiersDma); + pChannel->notifiersDma = NULL; + + nvFree(pChannel); +} + +static NvBool +AllocImmediateChannelPio(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 class, + NvU32 instance, + NvU32 mapSize) +{ + NVEvoPioChannel *pPio = NULL; + NvU32 handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS params = { 0 }; + NvU32 sd; + + pPio = nvCalloc(1, sizeof(*pPio)); + + if (!pPio) { + return FALSE; + } + + pChannel->imm.type = NV_EVO_IMM_CHANNEL_PIO; + pChannel->imm.u.pio = pPio; + + params.channelInstance = instance; + + if (nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + handle, + class, + ¶ms) != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate immediate channel %d", instance); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, handle); + return FALSE; + } + + pPio->handle = handle; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + void *pImm = NULL; + + if (nvRmApiMapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pPio->handle, + 0, + mapSize, + &pImm, + 0) != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to map immediate channel %d/%d", + sd, instance); + return FALSE; + } + + pPio->control[sd] = pImm; + } + + return TRUE; +} + +static NvBool +AllocImmediateChannelDma(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 immClass) +{ + NVEvoChannelPtr pImmChannel = RmAllocEvoChannel( + pDevEvo, + DRF_DEF64(_EVO, _CHANNEL_MASK, _WINDOW_IMM, _ENABLE), + pChannel->instance, immClass); + + if (!pImmChannel) { + return FALSE; + } + + pChannel->imm.type = NV_EVO_IMM_CHANNEL_DMA; + pChannel->imm.u.dma = pImmChannel; + + return TRUE; +} + +NvBool nvRMAllocateBaseChannels(NVDevEvoPtr pDevEvo) +{ + int i; + NvU32 baseClass = 0; + NvU32 head; + + static const NvU32 baseChannelDmaClasses[] = { + NV927C_BASE_CHANNEL_DMA, + }; + + for (i = 0; i < ARRAY_LEN(baseChannelDmaClasses); i++) { + if (nvRmEvoClassListCheck(pDevEvo, baseChannelDmaClasses[i])) { + baseClass = baseChannelDmaClasses[i]; + break; + } + } + + if (!baseClass) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Unsupported base display class"); + return FALSE; + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + pDevEvo->base[head] = RmAllocEvoChannel( + pDevEvo, + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _BASE, head, _ENABLE), + head, baseClass); + + if (!pDevEvo->base[head]) { + return FALSE; + } + } + + return TRUE; +} + +NvBool nvRMAllocateOverlayChannels(NVDevEvoPtr pDevEvo) +{ + NvU32 immMapSize; + NvU32 head; + + if (!nvRmEvoClassListCheck(pDevEvo, + NV917E_OVERLAY_CHANNEL_DMA)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Unsupported overlay display class"); + return FALSE; + } + + nvAssert(nvRmEvoClassListCheck(pDevEvo, NV917B_OVERLAY_IMM_CHANNEL_PIO)); + + /* + * EvoSetImmPointOut91() will interpret the PIO mapping as a pointer + * to GK104DispOverlayImmControlPio and access the SetPointOut and + * Update fields, which is safe as long as SetPointOut and Update are + * at consistent offsets. + */ + nvAssert(offsetof(GK104DispOverlayImmControlPio, SetPointsOut) == + NV917B_SET_POINTS_OUT(NVKMS_LEFT)); + nvAssert(offsetof(GK104DispOverlayImmControlPio, Update) == + NV917B_UPDATE); + immMapSize = + NV_MAX(NV917B_SET_POINTS_OUT(NVKMS_LEFT), NV917B_UPDATE) + sizeof(NvV32); + + for (head = 0; head < pDevEvo->numHeads; head++) { + pDevEvo->overlay[head] = RmAllocEvoChannel( + pDevEvo, + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _OVERLAY, head, _ENABLE), + head, NV917E_OVERLAY_CHANNEL_DMA); + + if (!pDevEvo->overlay[head]) { + return FALSE; + } + + if (!AllocImmediateChannelPio(pDevEvo, pDevEvo->overlay[head], + NV917B_OVERLAY_IMM_CHANNEL_PIO, head, immMapSize)) { + return FALSE; + } + } + + return TRUE; +} + +/*! + * This allocates a syncpt per channel. This syncpt is dedicated + * to this channel. As NVKMS only supports syncpoints for SOC devices, + * in which there's only one device/sub-device/disp, sd can be 0. + */ +static NvBool AllocSyncpt(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel, + NVEvoSyncpt *pEvoSyncptOut) +{ + NvU32 id; + NvKmsSyncPtOpParams params = { }; + NvBool result; + NVSurfaceDescriptor surfaceDesc; + + if (!pDevEvo->supportsSyncpts) { + return FALSE; + } + + /*! Set syncpt id to invalid to avoid un-intended Free */ + pEvoSyncptOut->id = NVKMS_SYNCPT_ID_INVALID; + + /* + * HW engine on Orin is called HOST1X, all syncpts are in internal RAM of + * HOST1X. + * OP_ALLOC calls into HOST1X driver and allocs a syncpt resource. + */ + params.alloc.syncpt_name = "nvkms-fence"; + result = nvkms_syncpt_op(NVKMS_SYNCPT_OP_ALLOC, ¶ms); + if (!result) { + return FALSE; + } + id = params.alloc.id; + + /* Post syncpt max val is tracked locally. Init the value here. */ + params.read_minval.id = id; + result = nvkms_syncpt_op(NVKMS_SYNCPT_OP_READ_MINVAL, ¶ms); + if (!result) { + goto failed; + } + + result = nvRmEvoAllocAndBindSyncpt(pDevEvo, pChannel, id, + &surfaceDesc, + pEvoSyncptOut); + if (!result) { + goto failed; + } + + /*! Populate syncpt values to return. */ + pEvoSyncptOut->channelMask = pChannel->channelMask; + pEvoSyncptOut->syncptMaxVal = params.read_minval.minval; + + return TRUE; + +failed: + /*! put back syncpt as operation failed */ + params.put.id = id; + nvkms_syncpt_op(NVKMS_SYNCPT_OP_PUT, ¶ms); + return FALSE; +} + +static NvBool AllocPostSyncptPerChannel(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel) +{ + if (!pDevEvo->supportsSyncpts) { + return TRUE; + } + + return AllocSyncpt(pDevEvo, pChannel, &pChannel->postSyncpt); +} + +NvBool nvRMAllocateWindowChannels(NVDevEvoPtr pDevEvo) +{ + int index; + NvU32 window; + + static const struct { + NvU32 windowClass; + NvU32 immClass; + } windowChannelClasses[] = { + { NVC97E_WINDOW_CHANNEL_DMA, + NVC97B_WINDOW_IMM_CHANNEL_DMA }, + { NVC67E_WINDOW_CHANNEL_DMA, + NVC67B_WINDOW_IMM_CHANNEL_DMA }, + { NVC57E_WINDOW_CHANNEL_DMA, + NVC57B_WINDOW_IMM_CHANNEL_DMA }, + { NVC37E_WINDOW_CHANNEL_DMA, + NVC37B_WINDOW_IMM_CHANNEL_DMA }, + }, *c = NULL; + + for (index = 0; index < ARRAY_LEN(windowChannelClasses); index++) { + if (nvRmEvoClassListCheck(pDevEvo, + windowChannelClasses[index].windowClass)) { + + c = &windowChannelClasses[index]; + + nvAssert(nvRmEvoClassListCheck(pDevEvo, c->immClass)); + break; + } + } + + if (index >= ARRAY_LEN(windowChannelClasses)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Unsupported window display class"); + return FALSE; + } + + nvAssert(pDevEvo->numWindows <= ARRAY_LEN(pDevEvo->window)); + for (window = 0; window < pDevEvo->numWindows; window++) { + pDevEvo->window[window] = RmAllocEvoChannel( + pDevEvo, + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE), + window, c->windowClass); + + if (!pDevEvo->window[window]) { + return FALSE; + } + + if (!AllocImmediateChannelDma(pDevEvo, pDevEvo->window[window], + c->immClass)) { + return FALSE; + } + + if (!AllocPostSyncptPerChannel(pDevEvo, + pDevEvo->window[window])) { + return FALSE; + } + } + + return TRUE; +} + +static void EvoFreeCoreChannel(NVDevEvoRec *pDevEvo, NVEvoChannel *pChannel) +{ + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NvU32 ret; + + if (!pDevEvo->pSubDevices[sd]->pCoreDma) { + continue; + } + + ret = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pChannel->pb.channel_handle, + pDevEvo->pSubDevices[sd]->pCoreDma, + 0); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug( + pDevEvo, + EVO_LOG_ERROR, + "Failed to unmap NVDisplay core channel memory mapping for ARMed values"); + } + pDevEvo->pSubDevices[sd]->pCoreDma = NULL; + } + + RmFreeEvoChannel(pDevEvo, pChannel); +} + +static NVEvoChannel* EvoAllocateCoreChannel(NVDevEvoRec *pDevEvo) +{ + NVEvoChannel *pChannel; + NvU32 sd; + + pChannel = + RmAllocEvoChannel(pDevEvo, + DRF_DEF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE), + 0, + pDevEvo->coreChannelDma.coreChannelClass); + + if (pChannel == NULL) { + goto failed; + } + + nvkms_memset(&pDevEvo->lut.notifierState, 0, + sizeof(pDevEvo->lut.notifierState)); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NvU32 ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pChannel->pb.channel_handle, + pDevEvo->coreChannelDma.dmaArmedOffset, + pDevEvo->coreChannelDma.dmaArmedSize, + (void**)&pDevEvo->pSubDevices[sd]->pCoreDma, + DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_ONLY)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev( + pDevEvo, + EVO_LOG_ERROR, + "Core channel memory mapping for ARMed values failed: 0x%x (%s)", + ret, nvstatusToString(ret)); + goto failed; + } + } + + return pChannel; + +failed: + if (pChannel != NULL) { + EvoFreeCoreChannel(pDevEvo, pChannel); + } + return NULL; +} + +/* Pre-allocate the vblank syncpts, store in NVDispHeadStateEvoRec. */ +void nvRmAllocCoreRGSyncpts(NVDevEvoPtr pDevEvo) +{ + + NVDispEvoPtr pDispEvo = NULL; + NvU32 syncptIdx = 0; + + if (!pDevEvo->supportsSyncpts || + !pDevEvo->hal->caps.supportsVblankSyncObjects) { + return; + } + + /* If Syncpts are supported, we're on Orin, which only has one display. */ + nvAssert(pDevEvo->nDispEvo == 1); + pDispEvo = pDevEvo->pDispEvo[0]; + + /* Initialize all heads' vblank sync object counts to zero. */ + for (int i = 0; i < pDevEvo->numApiHeads; i++) { + pDispEvo->apiHeadState[i].numVblankSyncObjectsCreated = 0; + } + + /* For each core RG syncpt index: */ + for (syncptIdx = 0; syncptIdx < NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD; + syncptIdx++) { + /* For each Head: */ + for (int i = 0; i < pDevEvo->numApiHeads; i++) { + NvBool result = FALSE; + NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[i]; + + result = + AllocSyncpt(pDevEvo, pDevEvo->core, + &pApiHeadState->vblankSyncObjects[syncptIdx].evoSyncpt); + if (!result) { + /* + * Stop trying to allocate more syncpts if none are + * available. + */ + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + "Failed to allocate Core RG Syncpoint at index %d " + "on Head %d.", syncptIdx, i); + return; + } + + /* Populate the index of the syncpt in the NVVblankSyncObjectRec. */ + pApiHeadState->vblankSyncObjects[syncptIdx].index = syncptIdx; + /* Update the count. */ + pApiHeadState->numVblankSyncObjectsCreated = syncptIdx + 1; + } + } +} + +NvBool nvRMSetupEvoCoreChannel(NVDevEvoPtr pDevEvo) +{ + NvU32 sd; + + pDevEvo->core = EvoAllocateCoreChannel(pDevEvo); + if (!pDevEvo->core) { + return FALSE; + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + // Bind the core notifier surface descriptor + NvU32 ret = + pDevEvo->hal->BindSurfaceDescriptor( + pDevEvo, pDevEvo->core, + &pDevEvo->core->notifiersDma[sd].surfaceDesc); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to bind display engine notify surface descriptor: 0x%x (%s)", + ret, nvstatusToString(ret)); + nvRMFreeEvoCoreChannel(pDevEvo); + return FALSE; + } + } + + nvInitEvoSubDevMask(pDevEvo); + + /* + * XXX NVKMS TODO: Enable core channel event generation; see bug + * 1671139. + */ + + // Query the VBIOS head assignments. Note that this has to happen after the + // core channel is allocated or else RM will return incorrect information + // about dynamic display IDs it allocates for the boot display on DP MST + // devices. + GetVbiosHeadAssignment(pDevEvo); + + return TRUE; +} + +void nvRMFreeBaseChannels(NVDevEvoPtr pDevEvo) +{ + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + RmFreeEvoChannel(pDevEvo, pDevEvo->base[head]); + pDevEvo->base[head] = NULL; + } +} + +void nvRMFreeOverlayChannels(NVDevEvoPtr pDevEvo) +{ + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + RmFreeEvoChannel(pDevEvo, pDevEvo->overlay[head]); + pDevEvo->overlay[head] = NULL; + } +} + +void nvRMFreeWindowChannels(NVDevEvoPtr pDevEvo) +{ + NvU32 window; + + for (window = 0; window < pDevEvo->numWindows; window++) { + nvRmEvoFreeSyncpt(pDevEvo, &pDevEvo->window[window]->postSyncpt); + RmFreeEvoChannel(pDevEvo, pDevEvo->window[window]); + pDevEvo->window[window] = NULL; + } +} + +/* Frees the Core RG Syncpts. */ +void nvRmFreeCoreRGSyncpts(NVDevEvoPtr pDevEvo) +{ + + NVDispEvoPtr pDispEvo = NULL; + + if (!pDevEvo->supportsSyncpts || + !pDevEvo->hal->caps.supportsVblankSyncObjects) { + return; + } + + /* We can get here in teardown cases from alloc failures */ + if (pDevEvo->nDispEvo == 0) { + return; + } + + /* If Syncpts are supported, we're on Orin, which only has one display. */ + nvAssert(pDevEvo->nDispEvo == 1); + pDispEvo = pDevEvo->pDispEvo[0]; + + /* For each Head: */ + for (int i = 0; i < pDevEvo->numApiHeads; i++) { + /* Free all core RG syncpts. */ + NVDispApiHeadStateEvoRec *pApiHeadState = &pDispEvo->apiHeadState[i]; + for (int j = 0; j < pApiHeadState->numVblankSyncObjectsCreated; j++) { + nvAssert(!pApiHeadState->vblankSyncObjects[j].inUse); + nvRmEvoFreeSyncpt(pDevEvo, + &pApiHeadState->vblankSyncObjects[j].evoSyncpt); + } + pApiHeadState->numVblankSyncObjectsCreated = 0; + } +} + +void nvRMFreeEvoCoreChannel(NVDevEvoPtr pDevEvo) +{ + if (pDevEvo->core != NULL) { + EvoFreeCoreChannel(pDevEvo, pDevEvo->core); + pDevEvo->core = NULL; + } +} + +/* Poll for an EVO channel on a particular subdevice to process all its methods */ +static NvBool SyncOneEvoChannel( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvU32 errorToken) +{ + NvBool isMethodPending; + NvU64 startTime = 0; + const NvU32 timeout = 2000000; // microseconds + + do { + if (!pDevEvo->hal->IsChannelMethodPending(pDevEvo, pChan, + sd, &isMethodPending)) { + return FALSE; + } + + if (!isMethodPending) { + break; + } + + if (nvExceedsTimeoutUSec(pDevEvo, &startTime, timeout)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Idling display engine timed out: 0x%08x:%d:%d:%d", + pChan->hwclass, pChan->instance, + sd, errorToken); + return FALSE; + } + + nvkms_yield(); + + } while (TRUE); + + return TRUE; +} + +/* Sync an EVO channel on all subdevices */ +NvBool nvRMSyncEvoChannel( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 errorToken) +{ + NvBool ret = TRUE; + + if (pChannel) { + NvU32 sd; + + nvDmaKickoffEvo(pChannel); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (!SyncOneEvoChannel(pDevEvo, pChannel, sd, errorToken)) { + ret = FALSE; + } + } + } + + return ret; +} + + +/* + * Wait for the requested base channel to be idle (no methods pending), and + * call STOP_BASE if the wait times out. + * + * stoppedBase will be TRUE if calling STOP_BASE was necessary and + * successful. + */ +NvBool nvRMIdleBaseChannel(NVDevEvoPtr pDevEvo, NvU32 head, NvU32 sd, + NvBool *stoppedBase) +{ + NVEvoChannelPtr pMainLayerChannel = + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER]; + NvU64 startTime = 0; + NvBool idleTimedOut = FALSE; + const NvU32 timeout = 2000000; // 2 seconds + NvBool isMethodPending = TRUE; + NvBool ret = TRUE; + + *stoppedBase = FALSE; + + do { + if (!pDevEvo->hal->IsChannelMethodPending(pDevEvo, + pMainLayerChannel, + sd, + &isMethodPending)) { + break; + } + + if (!isMethodPending) { + break; + } + + if (nvExceedsTimeoutUSec(pDevEvo, &startTime, timeout)) { + idleTimedOut = TRUE; + break; + } + + nvkms_yield(); + + } while (TRUE); + + if (idleTimedOut) { + NVEvoIdleChannelState idleChannelState = { }; + + idleChannelState.subdev[sd].channelMask |= pMainLayerChannel->channelMask; + ret = pDevEvo->hal->ForceIdleSatelliteChannel(pDevEvo, &idleChannelState); + + *stoppedBase = ret; + } + + return ret; +} + + +NvBool nvRmEvoClassListCheck(const NVDevEvoRec *pDevEvo, NvU32 classID) +{ + const NvU32 *classes = pDevEvo->supportedClasses; + + int i; + + nvAssert(pDevEvo->numClasses > 0); + + for (i = 0; i < pDevEvo->numClasses; i++) { + if (classes[i] == classID) { + return TRUE; + } + } + + return FALSE; +} + +/*! + * This API used to register syncpt object with RM. + * It involves -> + * 1. Allocate a new NV01_MEMORY_SYNCPOINT syncpt object. + * 2. Allocate a new ctxdma descriptor for the syncpt object. + * 3. Bind the ctxdma entry to the channel. + */ +NvBool nvRmEvoAllocAndBindSyncpt( + NVDevEvoRec *pDevEvo, + NVEvoChannel *pChannel, + NvU32 id, + NVSurfaceDescriptor *pSurfaceDesc, + NVEvoSyncpt *pEvoSyncpt) +{ + NvU32 ret = FALSE; + + NvU32 hSyncpt; + NV_MEMORY_SYNCPOINT_ALLOCATION_PARAMS syncptAllocParams = {0}; + + /*! Alloc SYNC Object */ + syncptAllocParams.syncpointId = id; + + hSyncpt = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + if (hSyncpt == 0) { + goto skipEverythingAndFail; + } + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + hSyncpt, + NV01_MEMORY_SYNCPOINT, + &syncptAllocParams); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to allocate syncpt object"); + goto cleanHandleAndFail; + } + + /*! Alloc surface descriptor for syncpt object */ + ret = pDevEvo->hal->AllocSurfaceDescriptor( + pDevEvo, pSurfaceDesc, hSyncpt, + (DRF_DEF(OS03, _FLAGS, _HASH_TABLE, _DISABLE)), + 65535 /* 64K-1 */, + FALSE /* mapToDisplayRm */); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to allocate surface descriptor"); + goto cleanSyncptHandleAndFail; + } + + /*! Bind surface descriptor to syncpt Object */ + ret = pDevEvo->hal->BindSurfaceDescriptor(pDevEvo, pChannel, pSurfaceDesc); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to bind surface descriptor"); + goto cleanEverythingAndFail; + } + + pEvoSyncpt->id = id; + pEvoSyncpt->surfaceDesc = *pSurfaceDesc; + pEvoSyncpt->hSyncpt = hSyncpt; + pEvoSyncpt->allocated = TRUE; + + return TRUE; + +cleanEverythingAndFail: + pDevEvo->hal->FreeSurfaceDescriptor(pDevEvo, + pDevEvo->deviceHandle, + pSurfaceDesc); + +cleanSyncptHandleAndFail: + nvRmApiFree(nvEvoGlobal.clientHandle, pDevEvo->deviceHandle, hSyncpt); + +cleanHandleAndFail: + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, hSyncpt); + +skipEverythingAndFail: + return FALSE; +} + +void nvRmFreeSyncptHandle( + NVDevEvoRec *pDevEvo, + NVEvoSyncpt *pSyncpt) +{ + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pSyncpt->hSyncpt); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pSyncpt->hSyncpt); + pSyncpt->hSyncpt = 0; + + pDevEvo->hal->FreeSurfaceDescriptor(pDevEvo, + pDevEvo->deviceHandle, + &pSyncpt->surfaceDesc); + pSyncpt->allocated = FALSE; +} + +/*! + * This API used to unregister syncpt object with given channel. + * It searches global table, and when finds that for given channel, syncpt + * is registered, then frees it. + */ +void nvRmEvoFreePreSyncpt( + NVDevEvoRec *pDevEvo, + NVEvoChannel *pChannel) +{ + NvU32 i; + NvBool isChannelIdle = NV_FALSE; + + if (pChannel == NULL) { + return; + } + + if (!pDevEvo->supportsSyncpts) { + return; + } + + if (pChannel->channelMask == 0) { + return; + } + + pDevEvo->hal->IsChannelIdle( + pDevEvo, pChannel, 0, &isChannelIdle); + + if (isChannelIdle == NV_FALSE) { + return; + } + + /*! Find pre-syncpt and free it */ + for (i = 0; i < NV_SYNCPT_GLOBAL_TABLE_LENGTH; i++) { + + pDevEvo->preSyncptTable[i].channelMask &= ~pChannel->channelMask; + if (pDevEvo->preSyncptTable[i].channelMask == 0 && + pDevEvo->preSyncptTable[i].allocated) { + + /*! Free handles */ + nvRmFreeSyncptHandle(pDevEvo, &pDevEvo->preSyncptTable[i]); + } + } +} + +/*! + * This API is used to unregister the given syncpt object. + */ +void nvRmEvoFreeSyncpt( + NVDevEvoRec *pDevEvo, + NVEvoSyncpt *pEvoSyncpt) +{ + if ((pEvoSyncpt == NULL) || !pDevEvo->supportsSyncpts || + (pEvoSyncpt->id == NVKMS_SYNCPT_ID_INVALID)) { + return; + } + + /*! Put reference of syncptid from nvhost */ + NvKmsSyncPtOpParams params = { }; + params.put.id = pEvoSyncpt->id; + nvkms_syncpt_op(NVKMS_SYNCPT_OP_PUT, ¶ms); + + /*! Free handles */ + nvRmFreeSyncptHandle(pDevEvo, pEvoSyncpt); +} + +void nvRmEvoUnMapVideoMemory(NVDevEvoPtr pDevEvo, NvU32 memoryHandle, + void *subDeviceAddress[NVKMS_MAX_SUBDEVICES]) +{ + unsigned int sd; + NvU32 ret; + + if (memoryHandle == 0) { + return; + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (subDeviceAddress[sd] != NULL) { + ret = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + memoryHandle, + subDeviceAddress[sd], + 0); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"UnmapMemory() failed"); + } + } + + subDeviceAddress[sd] = NULL; + } +} + +NvBool nvRmEvoMapVideoMemory(NVDevEvoPtr pDevEvo, + NvU32 memoryHandle, NvU64 size, + void *subDeviceAddress[NVKMS_MAX_SUBDEVICES], + NvU32 subDeviceMask) +{ + NvU32 ret; + + unsigned int sd; + + nvkms_memset(subDeviceAddress, 0, sizeof(void*) * NVKMS_MAX_SUBDEVICES); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + void *address = NULL; + + if (((1 << sd) & subDeviceMask) == 0) { + continue; + } + + ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + memoryHandle, + 0, + size, + &address, + 0); + + if (ret != NVOS_STATUS_SUCCESS) { + nvRmEvoUnMapVideoMemory(pDevEvo, memoryHandle, subDeviceAddress); + return FALSE; + } + subDeviceAddress[sd] = address; + } + return TRUE; +} + +static NvBool GetClassList(NVDevEvoPtr pDevEvo) +{ + NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS classListParams = { 0 }; + NvU32 ret; + + classListParams.numClasses = 0; + classListParams.classList = NvP64_NULL; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_GPU_GET_CLASSLIST, + &classListParams, sizeof(classListParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + pDevEvo->supportedClasses = + nvCalloc(classListParams.numClasses, sizeof(NvU32)); + + if (pDevEvo->supportedClasses == NULL) { + return FALSE; + } + + classListParams.classList = NV_PTR_TO_NvP64(pDevEvo->supportedClasses); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_GPU_GET_CLASSLIST, + &classListParams, sizeof(classListParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvFree(pDevEvo->supportedClasses); + pDevEvo->supportedClasses = NULL; + return FALSE; + } + + pDevEvo->numClasses = classListParams.numClasses; + + return TRUE; +} + +static NvBool GetEngineListOneSubDevice(NVDevEvoPtr pDevEvo, NvU32 sd) +{ + NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS engineListParams = { 0 }; + NvU32 ret; + NVSubDeviceEvoPtr pSubDevice = pDevEvo->pSubDevices[sd]; + size_t length; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pSubDevice->handle, + NV2080_CTRL_CMD_GPU_GET_ENGINES_V2, + &engineListParams, sizeof(engineListParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + if (engineListParams.engineCount == 0) { + return TRUE; + } + + length = engineListParams.engineCount * sizeof(NvU32); + + pSubDevice->supportedEngines = nvAlloc(length); + + if (pSubDevice->supportedEngines == NULL) { + return FALSE; + } + + nvkms_memcpy(pSubDevice->supportedEngines, + engineListParams.engineList, + length); + pSubDevice->numEngines = engineListParams.engineCount; + + return TRUE; +} + +static NvBool GetEngineList(NVDevEvoPtr pDevEvo) +{ + int sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (!GetEngineListOneSubDevice(pDevEvo, sd)) { + return FALSE; + } + } + + return TRUE; +} + +static void FreeSubDevice(NVDevEvoPtr pDevEvo, NVSubDeviceEvoPtr pSubDevice) +{ + if (pSubDevice == NULL) { + return; + } + + if (pDevEvo->deviceId.migDevice != NO_MIG_DEVICE) { + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDevEvo->smg.gpuInstSubHandle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDevEvo->smg.computeInstSubHandle); + } + + if (pSubDevice->handle != 0) { + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pSubDevice->handle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pSubDevice->handle); + } + + if (pSubDevice->gpuString[0] != '\0') { + nvEvoLogDebug(EVO_LOG_INFO, "Freed %s", pSubDevice->gpuString); + } + + nvFree(pSubDevice->supportedEngines); + + nvFree(pSubDevice); +} + +static NVSubDeviceEvoPtr AllocSubDevice(NVDevEvoPtr pDevEvo, const NvU32 sd) +{ + NV2080_ALLOC_PARAMETERS subdevAllocParams = { 0 }; + NV2080_CTRL_GPU_GET_ID_PARAMS getIdParams = { 0 }; + NV2080_CTRL_GPU_GET_GID_INFO_PARAMS *pGidParams = NULL; + NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS pciInfoParams = { 0 }; + NvU32 ret; + const char *uuid; + + NVSubDeviceEvoPtr pSubDevice = nvCalloc(1, sizeof(*pSubDevice)); + + if (pSubDevice == NULL) { + goto failure; + } + + pSubDevice->handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + subdevAllocParams.subDeviceId = sd; + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pSubDevice->handle, + NV20_SUBDEVICE_0, + &subdevAllocParams); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to initialize subDevice"); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pSubDevice->handle); + pSubDevice->handle = 0; + goto failure; + } + + if (pDevEvo->deviceId.migDevice != NO_MIG_DEVICE) { + pDevEvo->smg.gpuInstSubHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + pDevEvo->smg.computeInstSubHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (!pDevEvo->smg.gpuInstSubHandle || !pDevEvo->smg.computeInstSubHandle) { + goto failure; + } else { + if (!nvSMGSubscribeSubDevToPartition(&nvEvoGlobal.rmSmgContext, + pSubDevice->handle, + pDevEvo->deviceId.migDevice, + pDevEvo->smg.gpuInstSubHandle, + pDevEvo->smg.computeInstSubHandle)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Unable to configure MIG (Multi-Instance GPU) partition"); + goto failure; + } + } + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pSubDevice->handle, + NV2080_CTRL_CMD_GPU_GET_ID, + &getIdParams, + sizeof(getIdParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to identify GPU"); + goto failure; + } + + pSubDevice->gpuId = getIdParams.gpuId; + + /* Query the UUID for the gpuString. */ + + pGidParams = nvCalloc(1, sizeof(*pGidParams)); + + if (pGidParams == NULL) { + goto failure; + } + + pGidParams->flags = + DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _FORMAT, _ASCII) | + DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _TYPE, _SHA1); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pSubDevice->handle, + NV2080_CTRL_CMD_GPU_GET_GID_INFO, + pGidParams, + sizeof(*pGidParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + /* If the query failed, make sure the UUID is cleared out. */ + nvkms_memset(pGidParams, 0, sizeof(*pGidParams)); + } + + /* Query the PCI bus address for the gpuString. */ + + pciInfoParams.gpuId = pSubDevice->gpuId; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_GPU_GET_PCI_INFO, + &pciInfoParams, sizeof(pciInfoParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + /* If the query failed, make sure the PCI bus address is cleared out. */ + nvkms_memset(&pciInfoParams, 0, sizeof(pciInfoParams)); + } + + pSubDevice->gpuLogIndex = nvGetGpuLogIndex(); + + /* + * Create the gpuString, using this example format: + * GPU:0 (GPU-af2422f5-2719-29de-567f-ac899cf458c4) @ PCI:0000:01:00.0 + */ + if ((pGidParams->data[0] == '\0') || (pGidParams->length == 0)) { + uuid = ""; + } else { + uuid = (const char *) pGidParams->data; + } + + if (pDevEvo->deviceId.migDevice != NO_MIG_DEVICE) { + const nvMIGDeviceDescription *desc; + + ret = nvSMGGetDeviceById(&nvEvoGlobal.rmSmgContext, pDevEvo->deviceId.migDevice, &desc); + nvAssert(ret == NV_OK); + nvAssert(desc); + + uuid = desc->migUuid; + } + + nvkms_snprintf(pSubDevice->gpuString, sizeof(pSubDevice->gpuString), + "GPU:%d (%s) @ PCI:%04x:%02x:%02x.0", + pSubDevice->gpuLogIndex, uuid, + pciInfoParams.domain, + pciInfoParams.bus, + pciInfoParams.slot); + + pSubDevice->gpuString[sizeof(pSubDevice->gpuString) - 1] = '\0'; + + nvEvoLogDebug(EVO_LOG_INFO, "Allocated %s", pSubDevice->gpuString); + nvFree(pGidParams); + + return pSubDevice; + +failure: + FreeSubDevice(pDevEvo, pSubDevice); + nvFree(pGidParams); + + return NULL; +} + +static void CloseDevice(NVDevEvoPtr pDevEvo) +{ + NvU32 i; + + for (i = 0; i < ARRAY_LEN(pDevEvo->openedGpuIds); i++) { + const NvU32 gpuId = pDevEvo->openedGpuIds[i]; + + if (gpuId == NV0000_CTRL_GPU_INVALID_ID) { + break; + } + + nvkms_close_gpu(gpuId); + pDevEvo->openedGpuIds[i] = NV0000_CTRL_GPU_INVALID_ID; + } +} + +static NvBool OpenTegraDevice(NVDevEvoPtr pDevEvo) +{ + NV0000_CTRL_GPU_GET_ID_INFO_PARAMS params = { 0 }; + nv_gpu_info_t *gpu_info = NULL; + NvU32 ret, gpu_count = 0; + + nvAssert(pDevEvo->deviceId.rmDeviceId == NVKMS_DEVICE_ID_TEGRA); + + gpu_info = nvAlloc(NV_MAX_GPUS * sizeof(*gpu_info)); + if (gpu_info == NULL) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to allocate GPU ids arrays"); + goto fail; + } + + gpu_count = nvkms_enumerate_gpus(gpu_info); + if (gpu_count == 0) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "No NVIDIA GPUs found"); + goto fail; + } + + if (gpu_count != 1) { + // XXX If the system has both Tegra/iGPU and dGPU, it is not + // guaranteed to find the Tegra, so fail. + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "More than one NVIDIA GPU found " + "in a Tegra configuration where only Tegra is expected."); + goto fail; + } + + if (!nvkms_open_gpu(gpu_info[0].gpu_id)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to open GPU"); + goto fail; + } + + pDevEvo->openedGpuIds[0] = gpu_info[0].gpu_id; + params.gpuId = gpu_info[0].gpu_id; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_GPU_GET_ID_INFO, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to find GPU ID"); + goto fail; + } + + nvAssert(FLD_TEST_DRF(0000, _CTRL_GPU_ID_INFO, _SOC, _TRUE, + params.gpuFlags)); + pDevEvo->deviceId.rmDeviceId = params.deviceInstance; + + nvFree(gpu_info); + return TRUE; + +fail: + nvFree(gpu_info); + CloseDevice(pDevEvo); + return FALSE; +} + +static NvBool OpenDevice(NVDevEvoPtr pDevEvo) +{ + NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS idParams = { }; + NvU32 ret, i, gpuIdIndex = 0; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS, + &idParams, sizeof(idParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to query attached GPUs"); + goto fail; + } + + ct_assert(ARRAY_LEN(pDevEvo->openedGpuIds) >= ARRAY_LEN(idParams.gpuIds)); + + for (i = 0; i < ARRAY_LEN(idParams.gpuIds); i++) { + NV0000_CTRL_GPU_GET_ID_INFO_PARAMS params = { 0 }; + const NvU32 gpuId = idParams.gpuIds[i]; + + if (gpuId == NV0000_CTRL_GPU_INVALID_ID) { + break; + } + + nvAssert(pDevEvo->openedGpuIds[gpuIdIndex] == + NV0000_CTRL_GPU_INVALID_ID); + + params.gpuId = gpuId; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_GPU_GET_ID_INFO, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to find GPU ID"); + goto fail; + } + + if (pDevEvo->deviceId.rmDeviceId != params.deviceInstance) { + continue; + } + + if (!nvkms_open_gpu(gpuId)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to open GPU"); + goto fail; + } + + pDevEvo->openedGpuIds[gpuIdIndex++] = gpuId; + } + + return TRUE; + +fail: + CloseDevice(pDevEvo); + return FALSE; +} + +static void FreeGpuVASpace(NVDevEvoPtr pDevEvo) +{ + if (pDevEvo->nvkmsGpuVASpace != 0) { + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDevEvo->nvkmsGpuVASpace); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDevEvo->nvkmsGpuVASpace); + pDevEvo->nvkmsGpuVASpace = 0; + } +} + +static NvBool AllocGpuVASpace(NVDevEvoPtr pDevEvo) +{ + NvU32 ret; + NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS memoryVirtualParams = { }; + + pDevEvo->nvkmsGpuVASpace = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + memoryVirtualParams.offset = 0; + memoryVirtualParams.limit = 0; // no limit on VA space + memoryVirtualParams.hVASpace = 0; // client's default VA space + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDevEvo->nvkmsGpuVASpace, + NV01_MEMORY_VIRTUAL, + &memoryVirtualParams); + + if (ret != NVOS_STATUS_SUCCESS) { + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDevEvo->nvkmsGpuVASpace); + pDevEvo->nvkmsGpuVASpace = 0; + return FALSE; + } + + return TRUE; +} + +static void NonStallInterruptCallback( + void *arg, + void *pEventDataVoid, + NvU32 hEvent, + NvU32 data, + NV_STATUS status) +{ + /* + * We are called within resman's locks. Schedule a separate callback to + * execute with the nvkms_lock. + * + * XXX It might be nice to use a lighter-weight lock here to check if any + * requests are pending in any NvKmsDeferredRequestFifo before scheduling + * nvKmsServiceNonStallInterrupt(). + */ + + (void) nvkms_alloc_timer_with_ref_ptr( + nvKmsServiceNonStallInterrupt, /* callback */ + arg, /* argument (this is a ref_ptr to a pDevEvo) */ + 0, /* dataU32 */ + 0); /* usec */ +} + +static void UnregisterNonStallInterruptCallback(NVDevEvoPtr pDevEvo) +{ + if (pDevEvo->nonStallInterrupt.handle != 0) { + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS + eventNotificationParams = { 0 }; + + eventNotificationParams.event = NV2080_NOTIFIERS_FIFO_EVENT_MTHD; + eventNotificationParams.action = + NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[0]->handle, + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, + &eventNotificationParams, + sizeof(eventNotificationParams)); + + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[0]->handle, + pDevEvo->nonStallInterrupt.handle); + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDevEvo->nonStallInterrupt.handle); + } + + pDevEvo->nonStallInterrupt.handle = 0; +} + +static NvBool RegisterNonStallInterruptCallback(NVDevEvoPtr pDevEvo) +{ + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS eventNotificationParams = { 0 }; + + pDevEvo->nonStallInterrupt.handle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (!nvRmRegisterCallback(pDevEvo, + &pDevEvo->nonStallInterrupt.callback, + pDevEvo->ref_ptr, + pDevEvo->pSubDevices[0]->handle, + pDevEvo->nonStallInterrupt.handle, + NonStallInterruptCallback, + NV2080_NOTIFIERS_FIFO_EVENT_MTHD | + NV01_EVENT_NONSTALL_INTR)) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to register nonstall interrupt callback"); + goto failure_free_handle; + } + + // Setup event notification + eventNotificationParams.event = NV2080_NOTIFIERS_FIFO_EVENT_MTHD; + eventNotificationParams.action = + NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[0]->handle, + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, + &eventNotificationParams, + sizeof(eventNotificationParams)) + != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to set nonstall interrupt notification"); + goto failure_free_callback_and_handle; + } + + return TRUE; + +failure_free_callback_and_handle: + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[0]->handle, + pDevEvo->nonStallInterrupt.handle); +failure_free_handle: + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDevEvo->nonStallInterrupt.handle); + pDevEvo->nonStallInterrupt.handle = 0; + return FALSE; +} + +NvBool nvRmAllocDeviceEvo(NVDevEvoPtr pDevEvo, + const struct NvKmsAllocDeviceRequest *pRequest) +{ + NV0080_ALLOC_PARAMETERS allocParams = { 0 }; + NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS getNumSubDevicesParams = { 0 }; + NvU32 ret, sd; + NvU32 handleSpace = pRequest->deviceId.rmDeviceId; + + if (nvEvoGlobal.clientHandle == 0) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Client handle not initialized"); + goto failure; + } + + /* + * RM deviceIds should be within [0,NV_MAX_DEVICES); check + * that the client provided a value in range, and add one when + * using deviceId as the per-device unique identifier in the + * RM handle allocator: the identifier is expected to be != 0. + */ + + if ((pRequest->deviceId.rmDeviceId >= NV_MAX_DEVICES) && + (pRequest->deviceId.rmDeviceId != NVKMS_DEVICE_ID_TEGRA)) { + goto failure; + } + + pDevEvo->dpTimer = nvDPAllocTimer(pDevEvo); + if (!pDevEvo->dpTimer) { + goto failure; + } + + /* + * Pack the device ID and GPU instance ID into handleSpace. + */ + if (pRequest->deviceId.migDevice != NO_MIG_DEVICE) { + const nvMIGDeviceDescription *migDesc; + + if (nvSMGGetDeviceById(&nvEvoGlobal.rmSmgContext, + pRequest->deviceId.migDevice, + &migDesc) != NV_OK) { + goto failure; + } + + /* NV_MAX_DEVICES is currently 32 so rmDeviceId should not take more + * than 5 bits. */ + nvAssert((handleSpace & ~0x1f) == 0); + + /* The gpuInstanceId (or swizzId) has no defined upper limit. But + * there is a soft max of 14 that follows from enumerating all the + * currently possible partitionings up to a maximum of 8 partitions. + * Further, there is a hard max derived from RM expecting to be able + * to maintain active swizzIds in a 64-bit mask. So if gpuInstanceId + * was >=64 a lot of things would blow up inside RM. Thus, we should + * expect to see gpuInstanceIds that fit in 6 bits. */ + nvAssert((migDesc->gpuInstanceId & ~0x3f) == 0); + + /* This packing now consumes the first 11 bits. */ + handleSpace |= (migDesc->gpuInstanceId << 5); + } + + /* In any case, the final value should fit in the lower 16 bits. The + * upper 16 bits will be used by the handle allocator. */ + nvAssert((handleSpace & 0xffff0000) == 0); + + if (!nvInitUnixRmHandleAllocator( + &pDevEvo->handleAllocator, + nvEvoGlobal.clientHandle, + NVKMS_RM_HANDLE_SPACE_DEVICE(handleSpace))) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to initialize handles"); + goto failure; + } + + pDevEvo->deviceHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + pDevEvo->deviceId.rmDeviceId = pRequest->deviceId.rmDeviceId; + pDevEvo->deviceId.migDevice = pRequest->deviceId.migDevice; + pDevEvo->sli.mosaic = pRequest->sliMosaic; + + if (pRequest->deviceId.rmDeviceId == NVKMS_DEVICE_ID_TEGRA) { + /* + * On Tegra, NVKMS client is not desktop RM client, so + * enumerate and open first GPU. + */ + if (!OpenTegraDevice(pDevEvo)) { + goto failure; + } + + /* OpenTegraDevice should have assigned the real device ID */ + nvAssert(pDevEvo->deviceId.rmDeviceId != NVKMS_DEVICE_ID_TEGRA); + } else if (!OpenDevice(pDevEvo)) { + goto failure; + } + + allocParams.deviceId = pDevEvo->deviceId.rmDeviceId; + + /* Give NVKMS a private GPU virtual address space. */ + allocParams.hClientShare = nvEvoGlobal.clientHandle; + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV01_DEVICE_0, + &allocParams); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to initialize device"); + goto failure; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES, + &getNumSubDevicesParams, + sizeof(getNumSubDevicesParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to determine number of GPUs"); + goto failure; + } + + ct_assert(NVKMS_MAX_SUBDEVICES == NV_MAX_SUBDEVICES); + if ((getNumSubDevicesParams.numSubDevices == 0) || + (getNumSubDevicesParams.numSubDevices > + ARRAY_LEN(pDevEvo->pSubDevices))) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Unsupported number of GPUs: %d", + getNumSubDevicesParams.numSubDevices); + goto failure; + } + + pDevEvo->numSubDevices = getNumSubDevicesParams.numSubDevices; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + pDevEvo->pSubDevices[sd] = AllocSubDevice(pDevEvo, sd); + if (pDevEvo->pSubDevices[sd] == NULL) { + goto failure; + } + } + + pDevEvo->gpuLogIndex = pDevEvo->pSubDevices[0]->gpuLogIndex; + + if (!GetClassList(pDevEvo) || !GetEngineList(pDevEvo)) { + goto failure; + } + + if (!RegisterNonStallInterruptCallback(pDevEvo)) { + goto failure; + } + + if (nvRmEvoClassListCheck(pDevEvo, NV01_MEMORY_VIRTUAL)) { + if (!AllocGpuVASpace(pDevEvo)) { + goto failure; + } + + if (!nvAllocNvPushDevice(pDevEvo)) { + goto failure; + } + } + + return TRUE; + +failure: + nvRmFreeDeviceEvo(pDevEvo); + return FALSE; +} + +void nvRmFreeDeviceEvo(NVDevEvoPtr pDevEvo) +{ + NvU32 sd; + + nvFreeNvPushDevice(pDevEvo); + + FreeGpuVASpace(pDevEvo); + + UnregisterNonStallInterruptCallback(pDevEvo); + + nvFree(pDevEvo->supportedClasses); + pDevEvo->supportedClasses = NULL; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + FreeSubDevice(pDevEvo, pDevEvo->pSubDevices[sd]); + pDevEvo->pSubDevices[sd] = NULL; + } + + if (pDevEvo->deviceHandle != 0) { + nvRmApiFree(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pDevEvo->deviceHandle); + pDevEvo->deviceHandle = 0; + } + + nvTearDownUnixRmHandleAllocator(&pDevEvo->handleAllocator); + + nvDPFreeTimer(pDevEvo->dpTimer); + pDevEvo->dpTimer = NULL; + + CloseDevice(pDevEvo); +} + +/* + * Set up DIFR notifier listener to drive framebuffer prefetching once the + * h/w gets idle enough. + */ +NvBool nvRmRegisterDIFREventHandler(NVDevEvoPtr pDevEvo) +{ + pDevEvo->difrPrefetchEventHandler = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (pDevEvo->difrPrefetchEventHandler != 0) { + NvBool registered; + + /* + * Allocate event callback. + */ + registered = nvRmRegisterCallback( + pDevEvo, + &pDevEvo->difrPrefetchCallback, + pDevEvo->ref_ptr, + pDevEvo->pSubDevices[0]->handle, + pDevEvo->difrPrefetchEventHandler, + DifrPrefetchEvent, + NV2080_NOTIFIERS_LPWR_DIFR_PREFETCH_REQUEST); + + /* + * Configure event notification. + */ + if (registered) { + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS prefetchEventParams = { 0 }; + + prefetchEventParams.event = NV2080_NOTIFIERS_LPWR_DIFR_PREFETCH_REQUEST; + prefetchEventParams.action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[0]->handle, + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, + &prefetchEventParams, + sizeof(prefetchEventParams)) + == NVOS_STATUS_SUCCESS) { + return TRUE; + + } + } + nvRmUnregisterDIFREventHandler(pDevEvo); + } + return FALSE; +} + +void nvRmUnregisterDIFREventHandler(NVDevEvoPtr pDevEvo) +{ + if (pDevEvo->difrPrefetchEventHandler != 0) { + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS prefetchEventParams = { 0 }; + + prefetchEventParams.event = NV2080_NOTIFIERS_LPWR_DIFR_PREFETCH_REQUEST; + prefetchEventParams.action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + + nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[0]->handle, + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, + &prefetchEventParams, + sizeof(prefetchEventParams)); + + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[0]->handle, + pDevEvo->difrPrefetchEventHandler); + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDevEvo->difrPrefetchEventHandler); + pDevEvo->difrPrefetchEventHandler = 0; + } +} + + +/*! + * Determine whether all the dpys in the dpyIdList can be activated together. + * + * \param[in] pDispEvo The disp on which we search for a head. + * \param[in] dpyIdList The connectors to test. + * + * \return Return TRUE if all dpys can be driven simultaneously. + */ +NvBool nvRmIsPossibleToActivateDpyIdList(NVDispEvoPtr pDispEvo, + const NVDpyIdList dpyIdList) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS mapParams = { 0 }; + NvU32 ret = 0; + + /* Trivially accept an empty dpyIdList. */ + + if (nvDpyIdListIsEmpty(dpyIdList)) { + return TRUE; + } + + /* don't even try if EVO isn't initialized (e.g. during a VT switch) */ + + if (!pDevEvo->gpus) { + return FALSE; + } + + /* build a mask of all the displays to use */ + + mapParams.subDeviceInstance = pDispEvo->displayOwner; + + mapParams.displayMask = nvDpyIdListToNvU32(dpyIdList); + + /* ask RM for the head routing */ + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_HEAD_ROUTING_MAP, + &mapParams, + sizeof(mapParams)); + + if ((ret != NVOS_STATUS_SUCCESS) || (mapParams.displayMask == 0)) { + char *dpyIdListStr = nvGetDpyIdListStringEvo(pDispEvo, dpyIdList); + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "The requested configuration of display devices " + "(%s) is not supported on this GPU.", + nvSafeString(dpyIdListStr, "unknown")); + nvFree(dpyIdListStr); + + return FALSE; + } + + /* make sure we got everything we asked for */ + + if (mapParams.displayMask != nvDpyIdListToNvU32(dpyIdList)) { + char *requestedDpyIdListStr; + char *returnedDpyIdListStr; + + requestedDpyIdListStr = + nvGetDpyIdListStringEvo(pDispEvo, dpyIdList); + + returnedDpyIdListStr = + nvGetDpyIdListStringEvo(pDispEvo, + nvNvU32ToDpyIdList(mapParams.displayMask)); + + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "The requested configuration of display devices " + "(%s) is not supported on this GPU; " + "%s is recommended, instead.", + nvSafeString(requestedDpyIdListStr, "unknown"), + nvSafeString(returnedDpyIdListStr, "unknown")); + + nvFree(requestedDpyIdListStr); + nvFree(returnedDpyIdListStr); + + return FALSE; + } + + return TRUE; +} + + +/*! + * Tell the RM to save or restore the console VT state. + * + * \param[in] cmd indicate RM about the action. + * + * \return TRUE on success, FALSE on failure. + */ +NvBool nvRmVTSwitch(NVDevEvoPtr pDevEvo, NvU32 cmd) +{ + NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS params = { 0 }; + NvU32 ret; + + params.cmd = cmd; + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_OS_UNIX_VT_SWITCH, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + return TRUE; +} + +NvBool nvRmGetVTFBInfo(NVDevEvoPtr pDevEvo) +{ + NvU32 ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_OS_UNIX_VT_GET_FB_INFO, + &pDevEvo->vtFbInfo, sizeof(pDevEvo->vtFbInfo)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + return TRUE; +} + +/*! + * Import the current framebuffer console memory, for later use with NVKMS-based + * console restore. + * + * Note this relies on pDevEvo->fbInfo populated by nvRmVTSwitch(). + * + * There are several cases in which NVKMS cannot perform console restore: + * + * - Anything other than linear frame buffer consoles (i.e., VGA text modes, + * Non-linear or paletted graphical modes, etc). For those, resman cannot + * query the framebuffer dimensions from the kernel, + * NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_SAVE_VT_STATE returns empty fbInfo + * params, and consequently pDevEvo->fbInfo.width == 0. + * + * - Linear frame buffer console with an unaligned pitch. In this case, + * nvEvoRegisterSurface() will fail: it has to ensure the surface registration + * satisfies the EVO method interface requirement that PITCH surfaces are + * multiples of 256 bytes. Consequently, pDevEvo->fbConsoleSurfaceHandle will + * be 0. + * + * - Depth 8 frame buffer consoles: these are color index, and cannot be + * supported by NVKMS console restore because they require the VGA palette, + * which exists in special RAM in the VGA core, so we can't name it with a + * ctxdma that we can feed into EVO's LUT. The pFbInfo->depth switch below + * will reject depth 8. + */ +void nvRmImportFbConsoleMemory(NVDevEvoPtr pDevEvo) +{ + NvU32 ret; + struct NvKmsRegisterSurfaceParams registration = { }; + const NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *pFbInfo = &pDevEvo->vtFbInfo; + NvHandle hMemory; + + nvAssert(pDevEvo->fbConsoleSurfaceHandle == 0); + + if (pFbInfo->width == 0) { + // No console memory to map. + return; + } + + switch (pFbInfo->depth) { + case 15: + registration.request.format = NvKmsSurfaceMemoryFormatX1R5G5B5; + break; + case 16: + registration.request.format = NvKmsSurfaceMemoryFormatR5G6B5; + break; + case 32: + // That's a lie, it's really depth 24. Fall through. + case 24: + registration.request.format = NvKmsSurfaceMemoryFormatX8R8G8B8; + break; + default: + nvEvoLogDevDebug(pDevEvo, EVO_LOG_WARN, + "Unsupported framebuffer console depth %d", + pFbInfo->depth); + return; + } + + hMemory = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + if (hMemory == 0) { + return; + } + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + hMemory, + NV01_MEMORY_FRAMEBUFFER_CONSOLE, + NULL); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_WARN, + "Failed to map framebuffer console memory"); + goto done; + } + + registration.request.useFd = FALSE; + registration.request.rmClient = nvEvoGlobal.clientHandle; + registration.request.widthInPixels = pFbInfo->width; + registration.request.heightInPixels = pFbInfo->height; + registration.request.layout = NvKmsSurfaceMemoryLayoutPitch; + + registration.request.planes[0].u.rmObject = hMemory; + registration.request.planes[0].pitch = pFbInfo->pitch; + registration.request.planes[0].rmObjectSizeInBytes = + (NvU64) pFbInfo->height * (NvU64) pFbInfo->pitch; + + nvEvoRegisterSurface(pDevEvo, pDevEvo->pNvKmsOpenDev, ®istration, + NvHsMapPermissionsNone); + + pDevEvo->fbConsoleSurfaceHandle = registration.reply.surfaceHandle; + + // nvEvoRegisterSurface dups the handle, so we can free the one we just + // imported. + nvRmApiFree(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + hMemory); +done: + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, hMemory); +} + +void nvRmUnmapFbConsoleMemory(NVDevEvoPtr pDevEvo) +{ + struct NvKmsPerOpenDev *pOpenDev = pDevEvo->pNvKmsOpenDev; + NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDev(pOpenDev); + NVSurfaceEvoPtr pSurfaceEvo = + nvEvoGetSurfaceFromHandle(pDevEvo, + pOpenDevSurfaceHandles, + pDevEvo->fbConsoleSurfaceHandle, + FALSE, + TRUE); + NvU32 status; + + if (!pSurfaceEvo) { + return; + } + + // Tell Resman that the surface mapping is no longer needed. + status = nvRmApiControl(nvEvoGlobal.clientHandle, + pSurfaceEvo->planes[0].rmHandle, + NV0076_CTRL_CMD_NOTIFY_CONSOLE_DISABLED, + NULL, 0); + (void)status; + nvAssert(status == NVOS_STATUS_SUCCESS); + + // Free the NVKMS surface. + nvEvoUnregisterSurface(pDevEvo, pDevEvo->pNvKmsOpenDev, + pDevEvo->fbConsoleSurfaceHandle, + TRUE /* skipUpdate */, + FALSE /* skipSync */); + pDevEvo->fbConsoleSurfaceHandle = 0; +} + +static void LogAuxPacket(const NVDispEvoRec *pDispEvo, const DPAUXPACKET *pkt) +{ + const char *req, *rep; + char str[DP_MAX_MSG_SIZE * 3 + 1]; + char *p = str; + int i; + + switch (DRF_VAL(_DP, _AUXLOGGER, _REQUEST_TYPE, pkt->auxEvents)) { + case NV_DP_AUXLOGGER_REQUEST_TYPE_AUXWR: + req = "auxwr"; + break; + case NV_DP_AUXLOGGER_REQUEST_TYPE_AUXRD: + req = "auxrd"; + break; + case NV_DP_AUXLOGGER_REQUEST_TYPE_MOTWR: + // MOT is "middle of transaction", which is just another type of i2c + // access. + req = "motwr"; + break; + case NV_DP_AUXLOGGER_REQUEST_TYPE_I2CWR: + req = "i2cwr"; + break; + case NV_DP_AUXLOGGER_REQUEST_TYPE_MOTRD: + req = "motrd"; + break; + case NV_DP_AUXLOGGER_REQUEST_TYPE_I2CRD: + req = "i2crd"; + break; + default: + // Only log I2C and AUX transactions. + return; + } + + switch (DRF_VAL(_DP, _AUXLOGGER, _REPLY_TYPE, pkt->auxEvents)) { + case NV_DP_AUXLOGGER_REPLY_TYPE_NULL: + rep = "none"; + break; + case NV_DP_AUXLOGGER_REPLY_TYPE_SB_ACK: + rep = "sb_ack"; + break; + case NV_DP_AUXLOGGER_REPLY_TYPE_RETRY: + rep = "retry"; + break; + case NV_DP_AUXLOGGER_REPLY_TYPE_TIMEOUT: + rep = "timeout"; + break; + case NV_DP_AUXLOGGER_REPLY_TYPE_DEFER: + rep = "defer"; + break; + case NV_DP_AUXLOGGER_REPLY_TYPE_DEFER_TO: + rep = "defer_to"; + break; + case NV_DP_AUXLOGGER_REPLY_TYPE_ACK: + rep = "ack"; + break; + case NV_DP_AUXLOGGER_REPLY_TYPE_ERROR: + rep = "error"; + break; + default: + case NV_DP_AUXLOGGER_REPLY_TYPE_UNKNOWN: + rep = "unknown"; + break; + } + + for (i = 0; i < pkt->auxMessageReplySize; i++) { + p += nvkms_snprintf(p, str + sizeof(str) - p, "%02x ", + pkt->auxPacket[i]); + } + + nvAssert(p < str + sizeof(str)); + *p = '\0'; + + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + "%04u: port %u @ 0x%05x: [%10u] %s %2u, [%10u] %-8s %s", + pkt->auxCount, pkt->auxOutPort, pkt->auxPortAddress, + pkt->auxRequestTimeStamp, req, + pkt->auxMessageReqSize, + pkt->auxReplyTimeStamp, rep, + str); +} + +/*! + * This "attribute" queries the RM DisplayPort AUX channel log and dumps it to + * the kernel log. It returns a value of TRUE if any RM AUX transactions were + * logged, and FALSE otherwise. + * + * This attribute is intended to be queried in a loop as long as it reads TRUE. + * + * \return TRUE if the query succeeded (even if no events were logged). + * \return FALSE if the query failed. + */ +NvBool nvRmQueryDpAuxLog(NVDispEvoRec *pDispEvo, NvS64 *pValue) +{ + NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS *pParams = + nvCalloc(sizeof(*pParams), 1); + NvU32 status; + int i; + NvBool ret = FALSE; + + pDispEvo->dpAuxLoggingEnabled = TRUE; + *pValue = FALSE; + + if (!pParams) { + return FALSE; + } + + pParams->subDeviceInstance = pDispEvo->displayOwner; + pParams->dpAuxBufferReadSize = MAX_LOGS_PER_POLL; + + status = nvRmApiControl(nvEvoGlobal.clientHandle, + pDispEvo->pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA, + pParams, sizeof(*pParams)); + if (status != NVOS_STATUS_SUCCESS) { + goto done; + } + + nvAssert(pParams->dpNumMessagesRead <= MAX_LOGS_PER_POLL); + for (i = 0; i < pParams->dpNumMessagesRead; i++) { + const DPAUXPACKET *pkt = &pParams->dpAuxBuffer[i]; + + switch (DRF_VAL(_DP, _AUXLOGGER, _EVENT_TYPE, pkt->auxEvents)) { + case NV_DP_AUXLOGGER_EVENT_TYPE_AUX: + LogAuxPacket(pDispEvo, pkt); + break; + case NV_DP_AUXLOGGER_EVENT_TYPE_HOT_PLUG: + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + "%04u: port %u [%10u] hotplug", + pkt->auxCount, pkt->auxOutPort, + pkt->auxRequestTimeStamp); + break; + case NV_DP_AUXLOGGER_EVENT_TYPE_HOT_UNPLUG: + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + "%04u: port %u [%10u] unplug", + pkt->auxCount, pkt->auxOutPort, + pkt->auxRequestTimeStamp); + break; + case NV_DP_AUXLOGGER_EVENT_TYPE_IRQ: + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + "%04u: port %u [%10u] irq", + pkt->auxCount, pkt->auxOutPort, + pkt->auxRequestTimeStamp); + break; + default: + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + "%04u: port %u [%10u] unknown event", + pkt->auxCount, pkt->auxOutPort, + pkt->auxRequestTimeStamp); + break; + } + + *pValue = TRUE; + } + + ret = TRUE; + +done: + nvFree(pParams); + return ret; +} + +/*! + * Return the GPU's current PTIMER, or 0 if the query fails. + */ +NvU64 nvRmGetGpuTime(NVDevEvoPtr pDevEvo) +{ + const NvU32 sd = 0; + NV2080_CTRL_TIMER_GET_TIME_PARAMS params; + + NvU32 ret; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + NV2080_CTRL_CMD_TIMER_GET_TIME, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDebug(EVO_LOG_ERROR, "Failed to query GPU time, ret = %d", ret); + return 0; + } + + return params.time_nsec; +} + +NvBool nvRmSetGc6Allowed(NVDevEvoPtr pDevEvo, NvBool allowed) +{ + NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS params = { }; + NvU32 sd; + + if (allowed == pDevEvo->gc6Allowed) { + return TRUE; + } + + params.action = allowed ? NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_DEC : + NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_INC; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NvU32 ret = nvRmApiControl( + nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + // XXX This is catastrophic, is there a good way to unravel? + nvEvoLogDevDebug( + pDevEvo, EVO_LOG_ERROR, + "Failed to modify GC6 blocker refcount, sd = %d, ret = %x", + sd, ret); + return FALSE; + } + } + + pDevEvo->gc6Allowed = allowed; + + /* + * If we are just now disallowing GC6, it's possible that we previously + * entered GC6 and invalidated display channel state. Re-initialize it here + * to ensure that future modesets are successful. + */ + if (!allowed && pDevEvo->core) { + NvU32 channelIdx; + + pDevEvo->hal->InitChannel(pDevEvo, pDevEvo->core); + pDevEvo->coreInitMethodsPending = TRUE; + + for (channelIdx = 0; channelIdx < pDevEvo->numHeads; channelIdx++) { + // XXX We should InitChannel() for all per-head channels when coming + // out of GC6. + pDevEvo->hal->InitChannel( + pDevEvo, pDevEvo->head[channelIdx].layer[NVKMS_MAIN_LAYER]); + } + } + + return TRUE; +} + +typedef struct _NVRmRgLine1CallbackRec { + NVRgLine1CallbackRec base; + struct nvkms_ref_ptr *ref_ptr; + NvU32 rmHandle; + NVDispEvoRec *pDispEvo; + NvU32 head; +} NVRmRgLine1CallbackRec; + +static void RGLine1ServiceInterrupt(void *dataPtr, NvU32 dataU32) +{ + NVRmRgLine1CallbackRec *pRmCallback = (NVRmRgLine1CallbackRec*)dataPtr; + pRmCallback->base.pCallbackProc(pRmCallback->pDispEvo, pRmCallback->head, + &pRmCallback->base); +} + +/*! + * Receive RG line 1 interrupt notification from resman. + * + * This function is registered as the kernel callback function from resman when + * the RG line 1 interrupt is generated. + * + * This function is called within resman's context, so we schedule a zero timer + * callback to process the swapgroup check and release without holding the + * resman lock. + */ +static void RGLine1InterruptCallback(NvU32 rgIntrLine, void *param1, + NvBool bIsIrqlIsr /* unused */) +{ + (void) nvkms_alloc_timer_with_ref_ptr( + RGLine1ServiceInterrupt, /* callback */ + param1, /* argument (this is a ref_ptr to a NVRmRgLine1CallbackRec*) */ + 0, /* dataU32 */ + 0); /* usec */ +} + +/*! + * Register an RM callback function for the RG line 1 interrupt. + * + * \param[in] pDispEvo The display on which to allocate the callback + * + * \param[in] head The head on which to allocate the callback + * + * \param[in] pCallback The callback function pointer to be registered + * + * \return Pointer to callback object on success, NULL on failure. This same + * pointer must be used to unregister the callback. + */ +NVRgLine1CallbackPtr +nvRmAddRgLine1Callback(NVDispEvoRec *pDispEvo, + NvU32 head, + NVRgLine1CallbackProc pCallbackProc, + void *pUserData) +{ + NV0092_RG_LINE_CALLBACK_ALLOCATION_PARAMETERS rgLineParams = { 0 }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + NvU32 ret; + NVRmRgLine1CallbackRec *pRmCallback = nvCalloc(1, sizeof(*pRmCallback)); + + if (pRmCallback == NULL) { + goto failed; + } + + pRmCallback->ref_ptr = nvkms_alloc_ref_ptr(pRmCallback); + if (pRmCallback->ref_ptr == NULL) { + goto failed; + } + pRmCallback->base.pCallbackProc = pCallbackProc; + pRmCallback->base.pUserData = pUserData; + pRmCallback->rmHandle = handle; + pRmCallback->pDispEvo = pDispEvo; + pRmCallback->head = head; + + rgLineParams.subDeviceInstance = pDispEvo->displayOwner; + rgLineParams.head = head; + rgLineParams.rgLineNum = 1; + rgLineParams.pCallbkFn = RGLine1InterruptCallback; + rgLineParams.pCallbkParams = pRmCallback->ref_ptr; + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + handle, + NV0092_RG_LINE_CALLBACK, + &rgLineParams); + + if (ret == NVOS_STATUS_SUCCESS) { + return &pRmCallback->base; + } + + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to enable RG line interrupt, ret: %d", ret); + /* fall through */ + +failed: + if (pRmCallback != NULL) { + nvkms_free_ref_ptr(pRmCallback->ref_ptr); + nvFree(pRmCallback); + } + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, handle); + + return NULL; +} + +/*! + * Unregister an RM callback function previously registered with + * nvRmAddRgLine1Callback. + * + * \param[in] pDispEvo The display on which to unregister the + * callback + * + * \param[in] pCallback Pointer to the previously allocated + * callback object + */ +void nvRmRemoveRgLine1Callback(const NVDispEvoRec *pDispEvo, + NVRgLine1CallbackPtr pCallback) +{ + NVRmRgLine1CallbackRec *pRmCallback; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + if (pCallback == NULL) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to disable RG line interrupt, obj pointer NULL"); + return; + } + pRmCallback = nv_container_of(pCallback, NVRmRgLine1CallbackRec, base); + + ret = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + pRmCallback->rmHandle); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to disable RG line interrupt, ret: %d", ret); + } + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pRmCallback->rmHandle); + nvkms_free_ref_ptr(pRmCallback->ref_ptr); + nvFree(pRmCallback); +} + +/*! + * Register an RM callback function for the VBlankinterrupt. + * + * \param[in] pDispEvo The display on which to allocate the callback + * + * \param[in] head The head on which to allocate the callback + * + * \param[in] pCallback The callback function pointer to be registered + * + * \return Handle to callback object on success, 0 on failure. This same + * handle must be used to unregister the callback. + */ +NvU32 nvRmAddVBlankCallback( + const NVDispEvoRec *pDispEvo, + NvU32 head, + OSVBLANKCALLBACKPROC pCallback, + void *pParam2) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + NvU32 ret; + NvU32 handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + NV_VBLANK_CALLBACK_ALLOCATION_PARAMETERS params = { + .pProc = pCallback, + .LogicalHead = head, + .pParm1 = pDispEvo->ref_ptr, + .pParm2 = pParam2, + }; + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + handle, + NV9010_VBLANK_CALLBACK, + ¶ms); + + if (ret == NVOS_STATUS_SUCCESS) { + return handle; + } else { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to enable VBlank callback, ret: %d", ret); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, handle); + return 0; + } +} + +/*! + * Unregister an RM callback function previously registered with + * nvRmAddVBlankCallback. + * + * \param[in] pDispEvo The display on which to unregister the + * callback + * + * \param[in] callbackObjectHandle Handle to the previously allocated + * callback object + */ +void nvRmRemoveVBlankCallback(const NVDispEvoRec *pDispEvo, + NvU32 callbackObjectHandle) +{ + const NvU32 sd = pDispEvo->displayOwner; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + if (callbackObjectHandle == 0) { + // already removed + return; + } + + ret = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + callbackObjectHandle); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to disable VBlank callback, ret: %d", ret); + } + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, callbackObjectHandle); +} + +/*! + * Initialize the dynamic display mux on supported systems. + * + * \param[in] pDpyEvo The dpy on which to initialize the mux. + */ +static void MuxInit(const NVDpyEvoRec *pDpyEvo) +{ + NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyEvoGetConnectorId(pDpyEvo); + + if (pDpyEvo->internal) { + /* Attempt to get the EDID from ACPI. This is required for internal + * displays only, as the internal mux initialization requires data + * from the internal panel's EDID, while the external mux can be + * initialized in the absence of a display, in which case there is + * obviously no EDID present. The EDID read is done via ACPI, in + * order to accommodate mux initialization while the internal panel + * is disconnected from the GPU. */ + + /* Map with hard-coded data for systems known to support dynamic mux + * switching. This is a poor-man's alternative to the WDDM driver's + * CDisplayMgr::NVInitializeACPIToDeviceMaskMap() */ + NV0073_CTRL_SPECIFIC_SET_ACPI_ID_MAPPING_PARAMS acpiMap = { + .mapTable = { + {.acpiId = 0x8001a420, .displayId = 0x1000, .dodIndex = 0}, + } + }; + NVEdidRec edid = { }; + NVParsedEdidEvoRec *pParsedEdid = NULL; + NVEvoInfoStringRec infoString; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_ACPI_ID_MAPPING, + &acpiMap, sizeof(acpiMap)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDebug(EVO_LOG_ERROR, "Failed to set ACPI ID map."); + return; + } + + nvInitInfoString(&infoString, NULL, 0); + + /* Retrieve the internal panel's EDID from ACPI */ + if (!nvDpyReadAndParseEdidEvo(pDpyEvo, NULL, + NVKMS_EDID_READ_MODE_ACPI, + &edid, &pParsedEdid, + &infoString)) { + /* EDID read is expected to fail on non-dynamic-mux systems. */ + goto edid_done; + } + + if (edid.length == 0 || pParsedEdid == NULL || !pParsedEdid->valid) { + goto edid_done; + } + + params.manfId = pParsedEdid->info.manuf_id; + params.productId = pParsedEdid->info.product_id; + +edid_done: + nvFree(edid.buffer); + nvFree(pParsedEdid); + + /* Internal mux initialization will fail without manfId/productId */ + if (!params.manfId || !params.productId) { + return; + } + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_INIT_MUX_DATA, + ¶ms, + sizeof(params)); + + if (ret == NVOS_STATUS_SUCCESS) { + pDispEvo->muxDisplays = nvAddDpyIdToDpyIdList(pDpyEvo->id, + pDispEvo->muxDisplays); + } else { + nvEvoLogDebug(EVO_LOG_ERROR, "Failed to initialize mux on %s.", + pDpyEvo->name); + } +} + +static NVDpyIdList GetValidMuxDpys(NVDispEvoPtr pDispEvo) +{ + NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS params = { 0 }; + + params.subDeviceInstance = pDispEvo->displayOwner; + + nvRmApiControl(nvEvoGlobal.clientHandle, + pDispEvo->pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX, + ¶ms, sizeof(params)); + + return nvNvU32ToDpyIdList(params.muxDisplayMask); +} + +void nvRmMuxInit(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + int i; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) { + NVDpyIdList validMuxDpys = GetValidMuxDpys(pDispEvo); + NVDpyEvoPtr pDpyEvo; + + FOR_ALL_EVO_DPYS(pDpyEvo, validMuxDpys, pDispEvo) { + MuxInit(pDpyEvo); + } + } +} + +/*! + * Perform mux pre-switch operations + * + * \param[in] pDpyEvo The Dpy of the target mux + * \param[in] state The target mux state + * + * \return TRUE on success; FALSE on failure + */ +NvBool nvRmMuxPre(const NVDpyEvoRec *pDpyEvo, NvMuxState state) +{ + NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo; + NVDevEvoPtr pDevEvo; + NvU32 ret; + + pDispEvo = pDpyEvo->pDispEvo; + pDevEvo = pDispEvo->pDevEvo; + + if (!nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->muxDisplays)) { + return FALSE; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId); + params.flags = DRF_DEF(0073_CTRL_DFP, _DISP_MUX_FLAGS, _SR_ENTER_SKIP, _NO); + + if (state == MUX_STATE_DISCRETE) { + params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU; + } else if (state == MUX_STATE_INTEGRATED) { + params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU; + } else { + return FALSE; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS, + ¶ms, sizeof(params)); + + nvEvoLogDebug(EVO_LOG_INFO, "RmMuxPre status %d", ret); + + return ret == NVOS_STATUS_SUCCESS; +} + +/*! + * Perform mux switch operation + * + * \param[in] pDpyEvo The Dpy of the target mux + * \param[in] state The target mux state + * + * \return TRUE on success; FALSE on failure + */ +NvBool nvRmMuxSwitch(const NVDpyEvoRec *pDpyEvo, NvMuxState state) +{ + NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo; + NVDevEvoPtr pDevEvo; + NvU32 ret; + + pDispEvo = pDpyEvo->pDispEvo; + pDevEvo = pDispEvo->pDevEvo; + + if (!nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->muxDisplays)) { + return FALSE; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId); + + if (state == MUX_STATE_DISCRETE) { + params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU; + } else if (state == MUX_STATE_INTEGRATED) { + params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU; + } else { + return FALSE; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX, + ¶ms, sizeof(params)); + + nvEvoLogDebug(EVO_LOG_INFO, "RmMuxSwitch status %d", ret); + + /* + * Force link training after waiting for the DP AUX link to settle. + * The delay duration comes from DFP_MUX_AUX_SETTLE_DELAY_MS_DEFAULT + * in drivers/resman/kernel/inc/dfpmux.h. + */ + nvkms_usleep(100000); + + if (pDpyEvo->internal && state == MUX_STATE_DISCRETE) { + nvAssert(nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)); + nvDPNotifyShortPulse(pDpyEvo->pConnectorEvo->pDpLibConnector); + nvDPFireExpiredTimers(pDevEvo); + } + + return ret == NVOS_STATUS_SUCCESS; +} + +/*! + * Perform mux post-switch operations + * + * \param[in] pDpyEvo The Dpy of the target mux + * \param[in] state The target mux state + * + * \return TRUE on success; FALSE on failure + */ +NvBool nvRmMuxPost(const NVDpyEvoRec *pDpyEvo, NvMuxState state) +{ + NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo; + NVDevEvoPtr pDevEvo; + NvU32 ret; + + pDispEvo = pDpyEvo->pDispEvo; + pDevEvo = pDispEvo->pDevEvo; + + if (!nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->muxDisplays)) { + return FALSE; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId); + params.flags = DRF_DEF(0073_CTRL_DFP, _DISP_MUX_FLAGS, _SR_ENTER_SKIP, _NO); + + if (state == MUX_STATE_DISCRETE) { + params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU; + } else if (state == MUX_STATE_INTEGRATED) { + params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU; + } else { + return FALSE; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS, + ¶ms, sizeof(params)); + + nvEvoLogDebug(EVO_LOG_INFO, "RmMuxPost status %d", ret); + + return ret == NVOS_STATUS_SUCCESS; +} + +/*! + * Query the current state of a dynamic mux + * + * \param[in] pDpyEvo The Dpy of the target mux whose state is to be queried + * + * \return Mux state (either MUX_STATE_INTEGRATED or MUX_STATE_DISCRETE) on + * success; MUX_STATE_UNKNOWN on failure. + */ +NvMuxState nvRmMuxState(const NVDpyEvoRec *pDpyEvo) +{ + NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo; + NVDevEvoPtr pDevEvo; + + pDispEvo = pDpyEvo->pDispEvo; + pDevEvo = pDispEvo->pDevEvo; + + if (!nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->muxDisplays)) { + return MUX_STATE_UNKNOWN; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId); + + if (NVOS_STATUS_SUCCESS == nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS, + ¶ms, sizeof(params))) { + if (FLD_TEST_DRF(0073_CTRL_DFP, _DISP_MUX, _STATE, _INTEGRATED_GPU, + params.muxStatus)) { + return MUX_STATE_INTEGRATED; + } + if (FLD_TEST_DRF(0073_CTRL_DFP, _DISP_MUX, _STATE, _DISCRETE_GPU, + params.muxStatus)) { + return MUX_STATE_DISCRETE; + } + } + + return MUX_STATE_UNKNOWN; +} + +void nvRmRegisterBacklight(NVDispEvoRec *pDispEvo) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SYSTEM_GET_INTERNAL_DISPLAYS_PARAMS dispParams = { 0 }; + NvU32 displayMask, displayId; + NvU32 brightness; + + nvAssert(pDispEvo->backlightDevice == NULL); + + dispParams.subDeviceInstance = pDispEvo->displayOwner; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_INTERNAL_DISPLAYS, + &dispParams, sizeof(dispParams)) != NV_OK) { + return; + } + + /* Find a display with a backlight */ + displayMask = dispParams.availableInternalDisplaysMask; + for (; displayMask; displayMask &= ~LOWESTBIT(displayMask)) + { + NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS params = { 0 }; + NV_STATUS status; + + displayId = LOWESTBIT(displayMask); + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = displayId; + params.brightnessType = NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100; + + status = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS, + ¶ms, sizeof(params)); + + if (status == NV_OK) + { + brightness = params.brightness; + break; + } + } + + if (displayMask == 0) + { + /* No internal display has backlight */ + return; + } + + pDispEvo->backlightDevice = nvkms_register_backlight( + pDevEvo->pSubDevices[pDispEvo->displayOwner]->gpuId, + displayId, pDispEvo, + brightness); +} + +void nvRmUnregisterBacklight(NVDispEvoRec *pDispEvo) +{ + if (pDispEvo->backlightDevice != NULL) { + nvkms_unregister_backlight(pDispEvo->backlightDevice); + } + pDispEvo->backlightDevice = NULL; +} + +NvU32 nvRmAllocAndBindSurfaceDescriptor( + NVDevEvoPtr pDevEvo, + NvU32 hMemory, + const enum NvKmsSurfaceMemoryLayout layout, + NvU64 limit, + NVSurfaceDescriptor *pSurfaceDesc, + NvBool mapToDisplayRm) +{ + NVSurfaceDescriptor surfaceDesc; + NvU32 flags = DRF_DEF(OS03, _FLAGS, _HASH_TABLE, _DISABLE); + NvU32 head, layer; + NvU32 ret; + + switch (layout) { + case NvKmsSurfaceMemoryLayoutBlockLinear: + flags |= DRF_DEF(OS03, _FLAGS, _PTE_KIND, _BL); + break; + case NvKmsSurfaceMemoryLayoutPitch: + flags |= DRF_DEF(OS03, _FLAGS, _PTE_KIND, _PITCH); + break; + } + + /* Each surface to be displayed needs its own surface descriptor */ + nvAssert(pDevEvo->displayHandle != 0); + nvAssert(pDevEvo->core); + nvAssert(pDevEvo->core->pb.channel_handle); + nvAssert(hMemory); + nvAssert(limit); + + ret = + pDevEvo->hal->AllocSurfaceDescriptor(pDevEvo, &surfaceDesc, + hMemory, flags, limit, + mapToDisplayRm); + + if (ret != NVOS_STATUS_SUCCESS) { + return ret; + } + + ret = + pDevEvo->hal->BindSurfaceDescriptor(pDevEvo, + pDevEvo->core, + &surfaceDesc); + if (ret != NVOS_STATUS_SUCCESS) { + goto free_this_handle_and_fail; + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (pDevEvo->head[head].layer[layer]) { + nvAssert(pDevEvo->head[head].layer[layer]->pb.channel_handle); + + ret = pDevEvo->hal->BindSurfaceDescriptor(pDevEvo, + pDevEvo->head[head].layer[layer], + &surfaceDesc); + if (ret != NVOS_STATUS_SUCCESS) { + goto free_this_handle_and_fail; + } + } + } + } + + *pSurfaceDesc = surfaceDesc; + + return NVOS_STATUS_SUCCESS; + +free_this_handle_and_fail: + pDevEvo->hal->FreeSurfaceDescriptor(pDevEvo, + nvEvoGlobal.clientHandle, + &surfaceDesc); + return ret; +} diff --git a/src/nvidia-modeset/src/nvkms-rmapi-dgpu.c b/src/nvidia-modeset/src/nvkms-rmapi-dgpu.c new file mode 100644 index 0000000..2c65244 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-rmapi-dgpu.c @@ -0,0 +1,284 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-kernel-rmapi-ops.h" +#include "nvidia-modeset-os-interface.h" + +#include "nvkms-rmapi.h" +#include "nv_assert.h" + +NvU32 nvRmApiAlloc( + NvU32 hClient, + NvU32 hParent, + NvU32 hObject, + NvU32 hClass, + void *pAllocParams) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV04_ALLOC; + + ops.params.alloc.hRoot = hClient; + ops.params.alloc.hObjectParent = hParent; + ops.params.alloc.hObjectNew = hObject; + ops.params.alloc.hClass = hClass; + ops.params.alloc.pAllocParms = NV_PTR_TO_NvP64(pAllocParams); + + nvkms_call_rm(&ops); + + return ops.params.alloc.status; +} + +NvU32 nvRmApiAllocMemory64( + NvU32 hClient, + NvU32 hParent, + NvU32 hMemory, + NvU32 hClass, + NvU32 flags, + void **ppAddress, + NvU64 *pLimit) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV01_ALLOC_MEMORY; + + ops.params.allocMemory64.hRoot = hClient; + ops.params.allocMemory64.hObjectParent = hParent; + ops.params.allocMemory64.hObjectNew = hMemory; + ops.params.allocMemory64.hClass = hClass; + ops.params.allocMemory64.flags = flags; + ops.params.allocMemory64.pMemory = NV_PTR_TO_NvP64(*ppAddress); + ops.params.allocMemory64.limit = *pLimit; + + nvkms_call_rm(&ops); + + *pLimit = ops.params.allocMemory64.limit; + *ppAddress = NvP64_VALUE(ops.params.allocMemory64.pMemory); + + return ops.params.allocMemory64.status; +} + +NvU32 nvRmApiControl( + NvU32 hClient, + NvU32 hObject, + NvU32 cmd, + void *pParams, + NvU32 paramsSize) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV04_CONTROL; + + ops.params.control.hClient = hClient; + ops.params.control.hObject = hObject; + ops.params.control.cmd = cmd; + ops.params.control.params = NV_PTR_TO_NvP64(pParams); + ops.params.control.paramsSize = paramsSize; + + nvkms_call_rm(&ops); + + return ops.params.control.status; +} + +NvU32 nvRmApiDupObject2( + NvU32 hClient, + NvU32 hParent, + NvU32 *hObjectDest, + NvU32 hClientSrc, + NvU32 hObjectSrc, + NvU32 flags) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV04_DUP_OBJECT; + + ops.params.dupObject.hClient = hClient; + ops.params.dupObject.hParent = hParent; + ops.params.dupObject.hObject = *hObjectDest; + ops.params.dupObject.hClientSrc = hClientSrc; + ops.params.dupObject.hObjectSrc = hObjectSrc; + ops.params.dupObject.flags = flags; + + nvkms_call_rm(&ops); + + *hObjectDest = ops.params.dupObject.hObject; + + return ops.params.dupObject.status; +} + +NvU32 nvRmApiDupObject( + NvU32 hClient, + NvU32 hParent, + NvU32 hObjectDest, + NvU32 hClientSrc, + NvU32 hObjectSrc, + NvU32 flags) +{ + NvU32 hObjectLocal = hObjectDest; + NvU32 ret = nvRmApiDupObject2(hClient, + hParent, + &hObjectLocal, + hClientSrc, + hObjectSrc, + flags); + + nvAssert(hObjectLocal == hObjectDest); + + return ret; +} + +NvU32 nvRmApiFree( + NvU32 hClient, + NvU32 hParent, + NvU32 hObject) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV01_FREE; + + ops.params.free.hRoot = hClient; + ops.params.free.hObjectParent = hParent; + ops.params.free.hObjectOld = hObject; + + nvkms_call_rm(&ops); + + return ops.params.free.status; +} + +NvU32 nvRmApiVidHeapControl( + void *pVidHeapControlParams) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + NVOS32_PARAMETERS *pParams = pVidHeapControlParams; + + ops.op = NV04_VID_HEAP_CONTROL; + + ops.params.pVidHeapControl = pParams; + + nvkms_call_rm(&ops); + + return pParams->status; +} + +NvU32 nvRmApiMapMemory( + NvU32 hClient, + NvU32 hDevice, + NvU32 hMemory, + NvU64 offset, + NvU64 length, + void **ppLinearAddress, + NvU32 flags) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV04_MAP_MEMORY; + + ops.params.mapMemory.hClient = hClient; + ops.params.mapMemory.hDevice = hDevice; + ops.params.mapMemory.hMemory = hMemory; + ops.params.mapMemory.offset = offset; + ops.params.mapMemory.length = length; + ops.params.mapMemory.flags = flags; + + nvkms_call_rm(&ops); + + *ppLinearAddress = NvP64_VALUE(ops.params.mapMemory.pLinearAddress); + + return ops.params.mapMemory.status; +} + +NvU32 nvRmApiUnmapMemory( + NvU32 hClient, + NvU32 hDevice, + NvU32 hMemory, + const void *pLinearAddress, + NvU32 flags) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV04_UNMAP_MEMORY; + + ops.params.unmapMemory.hClient = hClient; + ops.params.unmapMemory.hDevice = hDevice; + ops.params.unmapMemory.hMemory = hMemory; + ops.params.unmapMemory.pLinearAddress = NV_PTR_TO_NvP64(pLinearAddress); + ops.params.unmapMemory.flags = flags; + + nvkms_call_rm(&ops); + + return ops.params.unmapMemory.status; +} + +NvU32 nvRmApiMapMemoryDma( + NvU32 hClient, + NvU32 hDevice, + NvU32 hDma, + NvU32 hMemory, + NvU64 offset, + NvU64 length, + NvU32 flags, + NvU64 *pDmaOffset) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV04_MAP_MEMORY_DMA; + + ops.params.mapMemoryDma.hClient = hClient; + ops.params.mapMemoryDma.hDevice = hDevice; + ops.params.mapMemoryDma.hDma = hDma; + ops.params.mapMemoryDma.hMemory = hMemory; + ops.params.mapMemoryDma.offset = offset; + ops.params.mapMemoryDma.length = length; + ops.params.mapMemoryDma.flags = flags; + ops.params.mapMemoryDma.dmaOffset = *pDmaOffset; + + nvkms_call_rm(&ops); + + *pDmaOffset = ops.params.mapMemoryDma.dmaOffset; + + return ops.params.mapMemoryDma.status; +} + +NvU32 nvRmApiUnmapMemoryDma( + NvU32 hClient, + NvU32 hDevice, + NvU32 hDma, + NvU32 hMemory, + NvU32 flags, + NvU64 dmaOffset) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV04_UNMAP_MEMORY_DMA; + + ops.params.unmapMemoryDma.hClient = hClient; + ops.params.unmapMemoryDma.hDevice = hDevice; + ops.params.unmapMemoryDma.hDma = hDma; + ops.params.unmapMemoryDma.hMemory = hMemory; + ops.params.unmapMemoryDma.flags = flags; + ops.params.unmapMemoryDma.dmaOffset = dmaOffset; + + nvkms_call_rm(&ops); + + return ops.params.unmapMemoryDma.status; +} diff --git a/src/nvidia-modeset/src/nvkms-stereo.c b/src/nvidia-modeset/src/nvkms-stereo.c new file mode 100644 index 0000000..27cdd0a --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-stereo.c @@ -0,0 +1,62 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-evo.h" +#include "nvkms-stereo.h" + +NvBool nvSetStereo(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + NvBool enable) +{ + NvU32 head; + NvU32 primaryHead = + nvGetPrimaryHwHead(pDispEvo, apiHead); + + if (primaryHead == NV_INVALID_HEAD) { + return FALSE; + } + + /* Only one head can drive stereo, make sure stereo is disabled + * on all the seconday hardware heads. */ + FOR_EACH_EVO_HW_HEAD(pDispEvo, apiHead, head) { + if (head == primaryHead) { + continue; + } + if(!nvSetStereoEvo(pDispEvo, head, FALSE)) { + nvAssert(!"Failed to disable stereo on secondary head"); + } + } + + return nvSetStereoEvo(pDispEvo, primaryHead, enable); +} + +NvBool nvGetStereo(const NVDispEvoRec *pDispEvo, const NvU32 apiHead) +{ + NvU32 head = nvGetPrimaryHwHead(pDispEvo, apiHead); + + if (head == NV_INVALID_HEAD) { + return FALSE; + } + + return nvGetStereoEvo(pDispEvo, head); +} diff --git a/src/nvidia-modeset/src/nvkms-surface.c b/src/nvidia-modeset/src/nvkms-surface.c new file mode 100644 index 0000000..977bd52 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-surface.c @@ -0,0 +1,1384 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-surface.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "nvkms-utils.h" +#include "nvkms-flip.h" +#include "nvkms-private.h" +#include "nvkms-headsurface.h" +#include "nvkms-headsurface-swapgroup.h" +#include "nvos.h" + +// NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD +#include "ctrl/ctrl0000/ctrl0000unix.h" +// NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM +#include "ctrl/ctrl0000/ctrl0000client.h" + +/* NV0041_CTRL_SURFACE_INFO */ +#include "ctrl/ctrl0041.h" + +/* NV01_MEMORY_SYSTEM_OS_DESCRIPTOR */ +#include "class/cl0071.h" + +static void CpuUnmapSurface( + NVDevEvoPtr pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo) +{ + const NvU32 planeIndex = 0; + NvU32 sd; + + if (pSurfaceEvo->planes[planeIndex].rmHandle == 0) { + return; + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + + if (pSurfaceEvo->cpuAddress[sd] != NULL) { + nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pSurfaceEvo->planes[planeIndex].rmHandle, + pSurfaceEvo->cpuAddress[sd], + 0); + pSurfaceEvo->cpuAddress[sd] = NULL; + } + } +} + +NvBool nvEvoCpuMapSurface( + NVDevEvoPtr pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo) +{ + const NvU32 planeIndex = 0; + NvU32 sd; + + /* + * We should only be called here with surfaces that contain a single plane. + */ + nvAssert(nvKmsGetSurfaceMemoryFormatInfo(pSurfaceEvo->format)->numPlanes == 1); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + + NvU32 result = nvRmApiMapMemory( + nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pSurfaceEvo->planes[planeIndex].rmHandle, + 0, + pSurfaceEvo->planes[planeIndex].rmObjectSizeInBytes, + (void **) &pSurfaceEvo->cpuAddress[sd], + 0); + + if (result != NVOS_STATUS_SUCCESS) { + CpuUnmapSurface(pDevEvo, pSurfaceEvo); + return FALSE; + } + } + + return TRUE; +} + +static void FreeSurfaceEvoStruct(NVSurfaceEvoPtr pSurfaceEvo) +{ + if (pSurfaceEvo == NULL) { + return; + } + + nvAssert(!nvSurfaceEvoInAnyOpens(pSurfaceEvo)); + + nvAssert(pSurfaceEvo->structRefCnt == 0); + nvAssert(pSurfaceEvo->rmRefCnt == 0); + + nvFree(pSurfaceEvo); +} + +static void FreeSurfaceEvoRm(NVDevEvoPtr pDevEvo, NVSurfaceEvoPtr pSurfaceEvo) +{ + NvU64 structRefCnt; + NvU8 planeIndex; + + if ((pDevEvo == NULL) || (pSurfaceEvo == NULL)) { + return; + } + + nvAssert(pSurfaceEvo->rmRefCnt == 0); + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + pDevEvo->hal->FreeSurfaceDescriptor(pDevEvo, + nvEvoGlobal.clientHandle, + &pSurfaceEvo->planes[planeIndex].surfaceDesc); + } + + CpuUnmapSurface(pDevEvo, pSurfaceEvo); + + if (pSurfaceEvo->planes[0].rmHandle != 0) { + nvHsUnmapSurfaceFromDevice(pDevEvo, + pSurfaceEvo->planes[0].rmHandle, + pSurfaceEvo->gpuAddress); + } + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + + if (pSurfaceEvo->planes[planeIndex].rmHandle == 0) { + break; + } + + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pSurfaceEvo->planes[planeIndex].rmHandle); + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pSurfaceEvo->planes[planeIndex].rmHandle); + + pSurfaceEvo->planes[planeIndex].rmHandle = 0; + } + + /* + * The surface is now an orphan: clear the pSurfaceEvo, for + * everything other than its structRefCnt. The only operation + * that can be done on it is unregistration. + */ + structRefCnt = pSurfaceEvo->structRefCnt; + nvkms_memset(pSurfaceEvo, 0, sizeof(*pSurfaceEvo)); + pSurfaceEvo->structRefCnt = structRefCnt; +} + +void nvEvoIncrementSurfaceStructRefCnt(NVSurfaceEvoPtr pSurfaceEvo) +{ + nvAssert(!nvEvoSurfaceRefCntsTooLarge(pSurfaceEvo)); + + pSurfaceEvo->structRefCnt++; +} + +void nvEvoDecrementSurfaceStructRefCnt(NVSurfaceEvoPtr pSurfaceEvo) +{ + nvAssert(pSurfaceEvo->structRefCnt >= 1); + pSurfaceEvo->structRefCnt--; + + if (pSurfaceEvo->structRefCnt == 0) { + FreeSurfaceEvoStruct(pSurfaceEvo); + } +} + +static NvBool ValidatePlaneProperties( + NVDevEvoPtr pDevEvo, + const struct NvKmsRegisterSurfaceRequest *pRequest) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(pRequest->format); + NvU8 planeIndex; + + /* + * Reject all registration requests for multi-planar NISO surfaces. + * This is a non-sensical request. + */ + if ((pRequest->isoType == NVKMS_MEMORY_NISO) && + (pFormatInfo->numPlanes > 1)) { + return FALSE; + } + + for (planeIndex = 0; planeIndex < pFormatInfo->numPlanes; planeIndex++) { + + const NvU64 planeOffset = pRequest->planes[planeIndex].offset; + NvU64 planePitch = pRequest->planes[planeIndex].pitch; + NvU64 rmObjectSizeInBytes = + pRequest->planes[planeIndex].rmObjectSizeInBytes; + NvU64 widthInBytes; + NvU64 planeSizeInBytes; + NvU32 planeEffectiveLines = pRequest->heightInPixels; + NvU32 widthInPixels = pRequest->widthInPixels; + + if ((planePitch == 0U) || (rmObjectSizeInBytes == 0U)) + { + nvEvoLog(EVO_LOG_ERROR, "Invalid request parameters, planePitch or rmObjectSizeInBytes, passed during surface registration"); + return FALSE; + } + + if ((pRequest->isoType == NVKMS_MEMORY_ISO) && + ((planeEffectiveLines == 0U) || (widthInPixels == 0U))) + { + nvEvoLog(EVO_LOG_ERROR, "Invalid request parameters, heightInPixels or widthInPixels, passed during surface registration for ISO surfaces"); + return FALSE; + } + + /* The offset must be 1KB-aligned. */ + if ((planeOffset & + ((1 << NV_SURFACE_OFFSET_ALIGNMENT_SHIFT) - 1)) != 0) { + return FALSE; + } + + /* + * Convert planePitch to units of bytes if it's currently specified in + * units of blocks. Each block is 64-bytes wide. + */ + if (pRequest->layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + planePitch <<= NVKMS_BLOCK_LINEAR_LOG_GOB_WIDTH; + } + + /* + * Convert width to bytes. + */ + widthInBytes = widthInPixels; + + if (pFormatInfo->isYUV) { + NvU8 divisor = 1; + NvU8 bytesPerBlock = pFormatInfo->yuv.storageBitsPerComponent >> 3; + + switch (pFormatInfo->numPlanes) { + case 3: + /* planar */ + if (planeIndex > 0) { + divisor = pFormatInfo->yuv.horizChromaDecimationFactor; + } + break; + + case 2: + /* semi-planar */ + if (planeIndex > 0) { + divisor = pFormatInfo->yuv.horizChromaDecimationFactor; + bytesPerBlock *= 2; + } + break; + + case 1: + /* 4:2:2 packed */ + bytesPerBlock *= 2; + } + + widthInBytes *= bytesPerBlock; + /* Dimensions of decimated planes of odd-width YUV surfaces are + * supposed to be rounded up */ + widthInBytes = (widthInBytes + (divisor - 1)) / divisor; + } else { + widthInBytes *= pFormatInfo->rgb.bytesPerPixel; + } + + /* + * Check that an entire line of pixels will fit in the pitch value + * specified. + */ + if (widthInBytes > planePitch) { + return FALSE; + } + + /* + * Check that the entire memory region occupied by this plane falls + * within the size of the underlying memory allocation. + * + * Force planeEffectiveLines to be even before dividing by + * vertChromaDecimationFactor. The height of the source fetch rectangle + * must be even anyways if there's vertical decimation. + */ + if (planeIndex != 0 && pFormatInfo->isYUV && + pFormatInfo->yuv.vertChromaDecimationFactor > 1) { + planeEffectiveLines = planeEffectiveLines & ~(0x1); + planeEffectiveLines /= pFormatInfo->yuv.vertChromaDecimationFactor; + } + + planeSizeInBytes = planeEffectiveLines * planePitch; + + if ((pRequest->isoType == NVKMS_MEMORY_ISO) && + (planeSizeInBytes == 0U)) + { + nvEvoLog(EVO_LOG_ERROR, "Plane size calculated during ISO surface registration is 0"); + return FALSE; + } + + if ((planeSizeInBytes > rmObjectSizeInBytes) || + (planeOffset > (rmObjectSizeInBytes - planeSizeInBytes))) { + return FALSE; + } + } + + return TRUE; +} + +static NvBool ValidateRegisterSurfaceRequest( + NVDevEvoPtr pDevEvo, + const struct NvKmsRegisterSurfaceRequest *pRequest) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(pRequest->format); + + /* + * The purpose of this check is to make sure the given format is valid and not + * some garbage number. It exists to check for format validity in the case + * where noDisplayHardWareAccess is TRUE. + */ + if (pFormatInfo->depth == 0) { + return FALSE; + } + + /* + * NvKmsSurfaceMemoryFormat has a few formats that we will never display. + * Head surface has several formats it wants to texture from but we won't + * (and can't) display surfaces with those formats. We should reject any + * attempt to register a surface that is marked for display and uses one of + * those formats. + */ + if (!pRequest->noDisplayHardwareAccess) { + /* + * This isn't a perfect check since we can't predict which channel this + * surface will be used on, but we should definitely reject a format if + * it isn't usable on any channel. + */ + NvBool usableOnAnyChannel = FALSE; + NvU8 layer; + + for (layer = 0; + layer < ARRAY_LEN(pDevEvo->caps.layerCaps); + layer++) { + + if (NVBIT64(pRequest->format) & + pDevEvo->caps.layerCaps[layer].supportedSurfaceMemoryFormats) { + usableOnAnyChannel = TRUE; + break; + } + } + + if (!usableOnAnyChannel) { + return FALSE; + } + + if (!pDevEvo->hal->ValidateWindowFormat(pRequest->format, NULL, NULL)) { + return FALSE; + } + } + + if (!ValidatePlaneProperties(pDevEvo, pRequest)) { + return FALSE; + } + + /* XXX Validate surface properties. */ + + return TRUE; +} + +static NvBool ValidateSurfaceAllocation( + NVDevEvoPtr pDevEvo, + NvU64 rmObjectSizeInBytes, + const struct NvKmsRegisterSurfaceRequest *pRequest, + NvU32 rmHandle) +{ + NV0041_CTRL_GET_SURFACE_INFO_PARAMS surfaceInfoParams = {}; + NV0041_CTRL_SURFACE_INFO surfaceInfo[3]; + enum { + PHYS_SIZE_LO = 0, + PHYS_SIZE_HI, + ADDR_SPACE_TYPE, + }; + NV_STATUS status; + NvU64 memSize; + /* + * Do not require vidmem on tegra. Tegra has different capabilities. Here we + * always say display is possible so we never fail framebuffer creation. + * + * Do not require vidmem for surfaces that do not need access to display + * hardware. + * + * If the memory is not isochronous, the memory will not be scanned out to a + * display. The checks are not needed for such memory types. + */ + NvBool requireVidmem = (pRequest->isoType == NVKMS_MEMORY_ISO) && + !pDevEvo->isSOCDisplay && + !pRequest->noDisplayHardwareAccess; + + /* + * Check if the surface's actual size matches the request's sizeInBytes + * specification after duplicating the RM object under the NVKMS RM client. + */ + surfaceInfo[PHYS_SIZE_LO].index = NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_SIZE_LO; + surfaceInfo[PHYS_SIZE_HI].index = NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_SIZE_HI; + + /* + * Check if the memory we are registering this surface with is valid. We + * cannot scan out sysmem or compressed buffers. + * + * If we cannot use this memory for display it may be resident in sysmem + * or may belong to another GPU. + */ + surfaceInfo[ADDR_SPACE_TYPE].index = NV0041_CTRL_SURFACE_INFO_INDEX_ADDR_SPACE_TYPE; + + surfaceInfoParams.surfaceInfoListSize = requireVidmem ? 3 : 2; + surfaceInfoParams.surfaceInfoList = (NvP64)&surfaceInfo; + + status = nvRmApiControl(nvEvoGlobal.clientHandle, + rmHandle, + NV0041_CTRL_CMD_GET_SURFACE_INFO, + &surfaceInfoParams, + sizeof(surfaceInfoParams)); + if (status != NV_OK) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to get surface information of RM memory object 0x%x", + rmHandle); + return FALSE; + } + + memSize = NvU64_BUILD(surfaceInfo[PHYS_SIZE_HI].data, + surfaceInfo[PHYS_SIZE_LO].data); + if (memSize < rmObjectSizeInBytes) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Memory allocated is not large enough for the surface"); + return FALSE; + } + + if (requireVidmem && + surfaceInfo[ADDR_SPACE_TYPE].data != NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Memory used for surface not appropriate for scanout"); + return FALSE; + } + + return TRUE; +} + +void nvEvoRegisterSurface(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + struct NvKmsRegisterSurfaceParams *pParams, + enum NvHsMapPermissions hsMapPermissions) +{ + NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDev(pOpenDev); + const struct NvKmsRegisterSurfaceRequest *pRequest = &pParams->request; + NVSurfaceEvoPtr pSurfaceEvo = NULL; + NvKmsSurfaceHandle surfaceHandle = 0; + NvU32 result; + NvU8 planeIndex; + NvBool nisoMemory = (pRequest->isoType == NVKMS_MEMORY_NISO); + + /* + * HeadSurface needs a CPU mapping of surfaces containing semaphores, in + * order to check, from the CPU, if a semaphore-interlocked flip is ready. + */ + const NvBool needCpuMapping = nisoMemory && pDevEvo->isHeadSurfaceSupported; + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + if (!ValidateRegisterSurfaceRequest(pDevEvo, pRequest)) { + goto fail; + } + + pSurfaceEvo = nvCalloc(1, sizeof(*pSurfaceEvo)); + + if (pSurfaceEvo == NULL) { + goto fail; + } + + pSurfaceEvo->format = pRequest->format; + + surfaceHandle = nvEvoCreateApiHandle(pOpenDevSurfaceHandles, pSurfaceEvo); + + if (surfaceHandle == 0) { + goto fail; + } + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + + const NvU32 planeRmHandle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (planeRmHandle == 0) { + goto fail; + } + + pSurfaceEvo->planes[planeIndex].rmHandle = planeRmHandle; + + if (pRequest->useFd) { + /* + * On T234, the 'fd' provided is allocated outside of RM whereas on + * dGPU it is allocated by RM. So we check whether the fd is associated + * with an nvidia character device, and if it is, then we consider that + * it belongs to RM. Based on whether it belongs to RM or not we need + * to call different mechanisms to import it. + */ + if (nvkms_fd_is_nvidia_chardev(pRequest->planes[planeIndex].u.fd)) { + NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS importParams = { }; + importParams.fd = pRequest->planes[planeIndex].u.fd; + importParams.object.type = NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM; + importParams.object.data.rmObject.hDevice = pDevEvo->deviceHandle; + importParams.object.data.rmObject.hParent = pDevEvo->deviceHandle; + importParams.object.data.rmObject.hObject = planeRmHandle; + + result = nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD, + &importParams, + sizeof(importParams)); + } else { + /* + * If 'fd' doesn't belongs to resman assume that it is allocated by + * some other dmabuf allocator (like nvmap). + */ + NV_OS_DESC_MEMORY_ALLOCATION_PARAMS allocParams = { }; + + allocParams.type = NVOS32_TYPE_IMAGE; + allocParams.descriptor = + (NvP64)(NvU64)(pRequest->planes[planeIndex].u.fd); + allocParams.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_FILE_HANDLE; + allocParams.limit = pRequest->planes[planeIndex].rmObjectSizeInBytes - 1; + + allocParams.attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _PCI, + allocParams.attr); + allocParams.attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, + _NO, allocParams.attr2); + + /* + * The NVKMS client performing the import doesn't know what the original + * CPU cache attributes are, so assume WRITE_BACK since we only need RM to + * IOVA map the memory into display's address space and the CPU cache + * attributes shouldn't really matter in this case. + */ + allocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, + _WRITE_BACK, allocParams.attr); + allocParams.flags = NVOS32_ALLOC_FLAGS_MAP_NOT_REQUIRED; + + switch (pRequest->layout) { + case NvKmsSurfaceMemoryLayoutBlockLinear: + allocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _FORMAT, _BLOCK_LINEAR, + allocParams.attr); + break; + + case NvKmsSurfaceMemoryLayoutPitch: + allocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _FORMAT, _PITCH, + allocParams.attr); + break; + + default: + nvEvoLogDevDebug(pDevEvo, EVO_LOG_INFO, "Unknown layout"); + goto fail; + } + + if (!pRequest->noDisplayHardwareAccess) { + if (nisoMemory) { + allocParams.attr2 = + FLD_SET_DRF(OS32, _ATTR2, _NISO_DISPLAY, _YES, + allocParams.attr2); + } else { + allocParams.attr2 = + FLD_SET_DRF(OS32, _ATTR2, _ISO, _YES, + allocParams.attr2); + } + } + + result = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + planeRmHandle, + NV01_MEMORY_SYSTEM_OS_DESCRIPTOR, + &allocParams); + + /* + * Bug 200614156. RM doesn't support mapping osdesc objects into CPU’s + * address space. + */ + nvAssert(!needCpuMapping); + } + } else { + /* + * If 'useFd' is not specified, the (rmClient, rmObject) tuple from + * the request is an object in the caller's RM client space. + * Call RM to dup the memory into nvkms's RM client. + */ + result = nvRmApiDupObject(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + planeRmHandle, + pRequest->rmClient, + pRequest->planes[planeIndex].u.rmObject, + 0); + } + + if (result != NVOS_STATUS_SUCCESS) { + goto fail; + } + + if (!ValidateSurfaceAllocation(pDevEvo, + pRequest->planes[planeIndex].rmObjectSizeInBytes, + pRequest, + planeRmHandle)) { + goto fail; + } + + if (!pRequest->noDisplayHardwareAccess) { + NvU32 ret; + + /* + * For the surfaces that need display HW access, if the the 'fd' or the + * (rmClient, rmObject) tuple from the request is allocated from sysmem, + * irrespective of whether it is allocated by the same or a different GPU + * than the one nvkms is using for display or is allocated by an external + * allocator (like nvmap), map it for access by the GPU device that nvkms + * is using for display, using NV0041_CTRL_CMD_MAP_MEMORY_FOR_GPU_ACCESS. + * If the mapping is already created, the ctrl call will just refcount it. + */ + if (pDevEvo->isSOCDisplay) { + pSurfaceEvo->mapToDisplayRm = TRUE; + } + + ret = + nvRmAllocAndBindSurfaceDescriptor( + pDevEvo, + planeRmHandle, + pRequest->layout, + pRequest->planes[planeIndex].rmObjectSizeInBytes - 1, + &pSurfaceEvo->planes[planeIndex].surfaceDesc, + pSurfaceEvo->mapToDisplayRm); + if (ret != NVOS_STATUS_SUCCESS) { + goto fail; + } + } + + pSurfaceEvo->planes[planeIndex].pitch = + pRequest->planes[planeIndex].pitch; + pSurfaceEvo->planes[planeIndex].offset = + pRequest->planes[planeIndex].offset; + pSurfaceEvo->planes[planeIndex].rmObjectSizeInBytes = + pRequest->planes[planeIndex].rmObjectSizeInBytes; + } + + pSurfaceEvo->requireDisplayHardwareAccess = !pRequest->noDisplayHardwareAccess; + pSurfaceEvo->noDisplayCaching = pRequest->noDisplayCaching; + + /* + * Map the surface into the GPU's virtual address space, for use with + * headSurface. If the surface may be used for semaphores, headSurface will + * need to write to it through the graphics channel. Force a writable GPU + * mapping. + * + * Map the first plane of the surface only into the GPU's address space. + * We would have already rejected multi-planar semaphore requests earlier. + */ + if (nisoMemory) { + hsMapPermissions = NvHsMapPermissionsReadWrite; + } + + pSurfaceEvo->gpuAddress = nvHsMapSurfaceToDevice( + pDevEvo, + pSurfaceEvo->planes[0].rmHandle, + pRequest->planes[0].rmObjectSizeInBytes, + hsMapPermissions); + + if (pSurfaceEvo->gpuAddress == NV_HS_BAD_GPU_ADDRESS) { + goto fail; + } + + /* + * Map the first plane of the surface only into the CPU's address space. + * This is the only valid plane since we would have already rejected + * multi-planar NISO surface requests earlier in + * + * nvEvoRegisterSurface() => ValidateRegisterSurfaceRequest() => + * ValidatePlaneProperties(). + */ + if (needCpuMapping) { + if (!nvEvoCpuMapSurface(pDevEvo, pSurfaceEvo)) { + goto fail; + } + } + + pSurfaceEvo->widthInPixels = pRequest->widthInPixels; + pSurfaceEvo->heightInPixels = pRequest->heightInPixels; + pSurfaceEvo->layout = pRequest->layout; + pSurfaceEvo->log2GobsPerBlockY = pRequest->log2GobsPerBlockY; + pSurfaceEvo->isoType = pRequest->isoType; + + pSurfaceEvo->rmRefCnt = 1; + pSurfaceEvo->structRefCnt = 1; + + pSurfaceEvo->owner.pOpenDev = pOpenDev; + pSurfaceEvo->owner.surfaceHandle = surfaceHandle; + + pParams->reply.surfaceHandle = surfaceHandle; + + return; + +fail: + nvEvoDestroyApiHandle(pOpenDevSurfaceHandles, surfaceHandle); + + FreeSurfaceEvoRm(pDevEvo, pSurfaceEvo); + FreeSurfaceEvoStruct(pSurfaceEvo); +} + +/* Temporary storage used by ClearSurfaceUsage{Collect,Apply}. */ +struct ClearSurfaceUsageCache { + struct { + struct { + NvBool flipToNull : 1; + NvBool flipSemaphoreToNull : 1; + + NvBool needToIdle : 1; + } layer[NVKMS_MAX_LAYERS_PER_HEAD]; + + NvBool flipCursorToNull : 1; + } apiHead[NVKMS_MAX_SUBDEVICES][NVKMS_MAX_HEADS_PER_DISP]; +}; + +/* + * Search for heads where the surfaces are used, and populate the structure + * pointed to by 'pCache' to indicate which channels need to be updated. + */ +static void +ClearSurfaceUsageCollect(NVDevEvoPtr pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo, + struct ClearSurfaceUsageCache *pCache) +{ + NVDispEvoPtr pDispEvo; + NvU32 apiHead, sd; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + NvU32 usageMaskOneHead = nvCollectSurfaceUsageMaskOneApiHead(pDispEvo, + apiHead, pSurfaceEvo); + NvU32 usageMaskMainLayer = DRF_IDX_VAL(_SURFACE, + _USAGE_MASK, _LAYER, NVKMS_MAIN_LAYER, usageMaskOneHead); + NvU32 layer; + + /* + * XXX NVKMS TODO: flip across heads/subdevices for all scenarios + * that are flip locked. + */ + + if (FLD_TEST_DRF(_SURFACE, _USAGE_MASK_LAYER, _SEMAPHORE, + _ENABLE, usageMaskMainLayer)) { + pCache->apiHead[sd][apiHead].layer[NVKMS_MAIN_LAYER]. + flipSemaphoreToNull = TRUE; + } + + if (FLD_TEST_DRF(_SURFACE, _USAGE_MASK_LAYER, _NOTIFIER, + _ENABLE, usageMaskMainLayer) || + FLD_TEST_DRF(_SURFACE, _USAGE_MASK_LAYER, _SCANOUT, + _ENABLE, usageMaskMainLayer)) { + pCache->apiHead[sd][apiHead].layer[NVKMS_MAIN_LAYER]. + flipToNull = TRUE; + } + + for (layer = 0; + layer < pDevEvo->apiHead[apiHead].numLayers; layer++) { + NvU32 usageMaskOneLayer = DRF_IDX_VAL(_SURFACE, + _USAGE_MASK, _LAYER, layer, usageMaskOneHead); + + if (layer == NVKMS_MAIN_LAYER) { + continue; + } + + if (usageMaskOneLayer != 0x0) { + pCache->apiHead[sd][apiHead].layer[layer]. + flipToNull = TRUE; + } if (pCache->apiHead[sd][apiHead].layer[NVKMS_MAIN_LAYER]. + flipToNull) { + NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES] = { }; + /* + * EVO requires that, when flipping the base channel + * (aka main layer) to NULL, overlay channel is also + * flipped to NULL. + */ + if ((pSurfaceEvos[NVKMS_LEFT] != NULL) || + (pSurfaceEvos[NVKMS_RIGHT] != NULL)) { + pCache->apiHead[sd][apiHead].layer[layer]. + flipToNull = TRUE; + } + } + } + + if (FLD_TEST_DRF(_SURFACE, _USAGE_MASK, _CURSOR, + _ENABLE, usageMaskOneHead) != 0x0) { + pCache->apiHead[sd][apiHead].flipCursorToNull = TRUE; + } + } + } +} + +/* + * Do the hard work to babysit the hardware to ensure that any channels which + * need clearing have actually done so before proceeding to free memory and + * remove ctxdmas from the hash table. + * + * This is achieved in several steps: + * 1. Issue a flip of any overlay layer to NULL -- these are processed + * separately since using one Flip request would interlock them, potentially + * exacerbating stuck channels by getting other channels stuck too. + * Pre-NVDisplay requires that, when flipping the core channel to NULL, + * all satellite channels are also flipped to NULL. The EVO2 hal takes care + * to enable/disable the core surface along with the base surface, + * therefore flip overlay to NULL before base. + * 2. Issue a flip of any main layer to NULL + * 3. Wait for any base/overlay layer that we expect to be idle to actually + * be idle. If they don't idle in a timely fashion, apply accelerators to + * forcibly idle any problematic channels. + * 4. Issue a flip of any core channels to NULL. + */ +static void +ClearSurfaceUsageApply(NVDevEvoPtr pDevEvo, + struct ClearSurfaceUsageCache *pCache, + NvBool skipUpdate) +{ + NVDispEvoPtr pDispEvo; + NvU32 apiHead, sd; + const NvU32 maxApiHeads = pDevEvo->numApiHeads * pDevEvo->numSubDevices; + struct NvKmsFlipRequestOneHead *pFlipApiHead = + nvCalloc(1, sizeof(*pFlipApiHead) * maxApiHeads); + NvU32 numFlipApiHeads = 0; + + if (pFlipApiHead == NULL) { + nvAssert(!"Failed to allocate memory"); + return; + } + + /* 1. Issue a flip of any overlay layer to NULL */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + + struct NvKmsFlipCommonParams *pRequestOneApiHead = + &pFlipApiHead[numFlipApiHeads].flip; + NvU32 layer; + NvBool found = FALSE; + + if (!nvApiHeadIsActive(pDispEvo, apiHead)) { + continue; + } + + for (layer = 0; + layer < pDevEvo->apiHead[apiHead].numLayers; layer++) { + + if (layer == NVKMS_MAIN_LAYER) { + continue; + } + + if (pCache->apiHead[sd][apiHead].layer[layer].flipToNull) { + pRequestOneApiHead->layer[layer].surface.specified = TRUE; + // No need to specify sizeIn/sizeOut as we are flipping NULL surface. + pRequestOneApiHead->layer[layer].compositionParams.specified = TRUE; + pRequestOneApiHead->layer[layer].syncObjects.specified = TRUE; + pRequestOneApiHead->layer[layer].completionNotifier.specified = TRUE; + + found = TRUE; + + pCache->apiHead[sd][apiHead].layer[layer].needToIdle = TRUE; + } + } + + if (found) { + pFlipApiHead[numFlipApiHeads].sd = sd; + pFlipApiHead[numFlipApiHeads].head = apiHead; + numFlipApiHeads++; + nvAssert(numFlipApiHeads <= maxApiHeads); + } + } + } + + if (numFlipApiHeads > 0) { + nvFlipEvo(pDevEvo, pDevEvo->pNvKmsOpenDev, + pFlipApiHead, + numFlipApiHeads, + TRUE /* commit */, + NULL /* pReply */, + skipUpdate, + FALSE /* allowFlipLock */); + + nvkms_memset(pFlipApiHead, 0, + sizeof(pFlipApiHead[0]) * numFlipApiHeads); + numFlipApiHeads = 0; + } + + /* + * No need to idle the overlay layer before flipping the main channel to + * NULL, because the FlipOverlay90() function in the EVO2 hal makes sure + * that the overlay's flip to NULL is always interlocked with the core + * channel and the base (main layer) channel's flip to NULL can proceed only + * after completion of the overlay's flip to NULL (the base channel's flip + * to NULL interlocks with the core channel's flip to NULL). + */ + + /* 2. Issue a flip of any main layer to NULL */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + + struct NvKmsFlipCommonParams *pRequestOneApiHead = + &pFlipApiHead[numFlipApiHeads].flip; + NvBool found = FALSE; + + if (!nvApiHeadIsActive(pDispEvo, apiHead)) { + continue; + } + + if (pCache->apiHead[sd][apiHead].layer[NVKMS_MAIN_LAYER].flipToNull || + pCache->apiHead[sd][apiHead].layer[NVKMS_MAIN_LAYER].flipSemaphoreToNull) { + + if (pCache->apiHead[sd][apiHead].layer[NVKMS_MAIN_LAYER].flipToNull) { + pRequestOneApiHead->layer[NVKMS_MAIN_LAYER].surface.specified = TRUE; + // No need to specify sizeIn/sizeOut as we are flipping NULL surface. + pRequestOneApiHead->layer[NVKMS_MAIN_LAYER].completionNotifier.specified = TRUE; + + pCache->apiHead[sd][apiHead].layer[NVKMS_MAIN_LAYER].needToIdle = TRUE; + } + + /* XXX arguably we should also idle for this case, but we + * don't currently have a way to do so without also + * clearing the ISO surface */ + pRequestOneApiHead->layer[NVKMS_MAIN_LAYER].syncObjects.val.useSyncpt = FALSE; + pRequestOneApiHead->layer[NVKMS_MAIN_LAYER].syncObjects.specified = TRUE; + + found = TRUE; + } + + if (found) { + pFlipApiHead[numFlipApiHeads].sd = sd; + pFlipApiHead[numFlipApiHeads].head = apiHead; + numFlipApiHeads++; + nvAssert(numFlipApiHeads <= maxApiHeads); + } + } + } + + if (numFlipApiHeads > 0) { + nvFlipEvo(pDevEvo, pDevEvo->pNvKmsOpenDev, + pFlipApiHead, + numFlipApiHeads, + TRUE /* commit */, + NULL /* pReply */, + skipUpdate, + FALSE /* allowFlipLock */); + + nvkms_memset(pFlipApiHead, 0, + sizeof(pFlipApiHead[0]) * numFlipApiHeads); + numFlipApiHeads = 0; + } + + /* + * 3. Wait for any base/overlay layer that we expect to be idle to actually + * be idle. If they don't idle in a timely fashion, apply accelerators to + * forcibly idle any problematic channels. + */ + if (!skipUpdate) { + NvU32 layerMaskPerSdApiHead[NVKMS_MAX_SUBDEVICES] + [NVKMS_MAX_HEADS_PER_DISP] = { }; + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + for (NvU32 layer = 0; + layer < pDevEvo->apiHead[apiHead].numLayers; layer++) { + if (pCache->apiHead[sd][apiHead].layer[layer].needToIdle) { + layerMaskPerSdApiHead[sd][apiHead] |= NVBIT(layer); + } + } + } + } + nvIdleLayerChannels(pDevEvo, layerMaskPerSdApiHead); + } + + /* 4. Issue a flip of any core channels to NULL */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + + if (!nvApiHeadIsActive(pDispEvo, apiHead)) { + continue; + } + + if (pCache->apiHead[sd][apiHead].flipCursorToNull) { + pFlipApiHead[numFlipApiHeads].flip.cursor.imageSpecified = TRUE; + pFlipApiHead[numFlipApiHeads].sd = sd; + pFlipApiHead[numFlipApiHeads].head = apiHead; + numFlipApiHeads++; + nvAssert(numFlipApiHeads <= maxApiHeads); + } + } + } + + if (numFlipApiHeads > 0) { + nvFlipEvo(pDevEvo, pDevEvo->pNvKmsOpenDev, + pFlipApiHead, + numFlipApiHeads, + TRUE /* commit */, + NULL /* pReply */, + skipUpdate, + FALSE /* allowFlipLock */); + } + + nvFree(pFlipApiHead); +} + +/* + * This function unregisters/releases all of the surface handles remaining for + * the given pOpenDev. + * + * It duplicates some functionality of nvEvoUnregisterSurface() and + * nvEvoReleaseSurface(), but with an important difference: it processes the + * "clear surface usage" step for all surfaces up front, and only once that is + * complete it proceeds with freeing the surfaces. + * + * In practice, this makes teardown much smoother than invoking those functions + * individually for each surface, particularly in the case that the hardware is + * stuck and needs accelerators. Consider the case where a client has + * registered several surfaces, and is flipping between two of them, and the + * hardware is stuck on a semaphore acquire that will never complete with + * several frames pending in the pushbuffer. If the first surface processed + * by nvEvoUnregisterSurface() happens to be the current "back buffer" (i.e., + * not the most recently pushed surface to be displayed), then + * nvEvoUnregisterSurface() will call ClearSurfaceUsage(), but it will find no + * channels to clear, and will proceed with nvEvoDecrementSurfaceRefCnts() + * which will call nvRMSyncEvoChannel() to drain any outstanding methods. Due + * to the stalled semaphore, nvRMSyncEvoChannel() will stall for 2 seconds, + * time out along with a nasty message to the kernel log, then we'll free the + * surface and remove its entry from the display hash table anyway. And that + * may happen several times until we finally call nvEvoUnregisterSurface() on + * the surface which is the most recently requested flip, where + * ClearSurfaceUsage() will finally get a chance to tear down the channel + * forcefully by using accelerators to skip the semaphore acquire. But, some + * of the methods which were outstanding and now get processed may reference a + * ctxdma which was already freed, triggering nasty Xid messages. + * + * By gathering up all the channels we can to find which ones to clear first, + * we have a much higher chance of avoiding these timeouts. + */ +void nvEvoFreeClientSurfaces(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + NVEvoApiHandlesRec *pOpenDevSurfaceHandles) +{ + NvKmsGenericHandle surfaceHandle; + NVSurfaceEvoPtr pSurfaceEvo; + struct ClearSurfaceUsageCache cache = { }; + NvBool needApply = FALSE; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(pOpenDevSurfaceHandles, + pSurfaceEvo, surfaceHandle) { + + if (nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) { + /* + * If something besides the owner has an rmRefCnt reference, + * the surface might be in use by EVO; flip to NULL to attempt + * to free it. + */ + if (pSurfaceEvo->rmRefCnt > 1) { + ClearSurfaceUsageCollect(pDevEvo, pSurfaceEvo, &cache); + needApply = TRUE; + } + } + } + + if (needApply) { + ClearSurfaceUsageApply(pDevEvo, &cache, FALSE); + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(pOpenDevSurfaceHandles, + pSurfaceEvo, surfaceHandle) { + const NvBool isOwner = + nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle); + + /* Remove the handle from the calling client's namespace. */ + nvEvoDestroyApiHandle(pOpenDevSurfaceHandles, surfaceHandle); + + if (isOwner) { + nvEvoDecrementSurfaceRefCnts(pDevEvo, pSurfaceEvo); + } else { + nvEvoDecrementSurfaceStructRefCnt(pSurfaceEvo); + } + } + +} + +void nvEvoUnregisterSurface(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + NvKmsSurfaceHandle surfaceHandle, + NvBool skipUpdate, + NvBool skipSync) +{ + NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDev(pOpenDev); + NVSurfaceEvoPtr pSurfaceEvo; + + pSurfaceEvo = nvEvoGetPointerFromApiHandle(pOpenDevSurfaceHandles, + surfaceHandle); + if (pSurfaceEvo == NULL) { + return; + } + + if (!nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_WARN, + "Surface unregister attempted by non-owner; " + "non-owners must release the surface."); + return; + } + + /* + * If something besides the owner has an rmRefCnt reference, + * the surface might be in use by EVO; flip to NULL to attempt + * to free it. + */ + if (pSurfaceEvo->rmRefCnt > 1) { + struct ClearSurfaceUsageCache cache = { }; + + ClearSurfaceUsageCollect(pDevEvo, pSurfaceEvo, &cache); + ClearSurfaceUsageApply(pDevEvo, &cache, skipUpdate); + } + + /* Remove the handle from the calling client's namespace. */ + nvEvoDestroyApiHandle(pOpenDevSurfaceHandles, surfaceHandle); + + nvEvoDecrementSurfaceRefCntsWithSync(pDevEvo, pSurfaceEvo, skipSync); +} + +void nvEvoReleaseSurface(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + NvKmsSurfaceHandle surfaceHandle) +{ + NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDev(pOpenDev); + NVSurfaceEvoPtr pSurfaceEvo; + + pSurfaceEvo = nvEvoGetPointerFromApiHandle(pOpenDevSurfaceHandles, + surfaceHandle); + if (pSurfaceEvo == NULL) { + return; + } + + if (nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_WARN, + "Surface release attempted by owner; " + "owners must unregister the surface."); + return; + } + + /* Remove the handle from the calling client's namespace. */ + nvEvoDestroyApiHandle(pOpenDevSurfaceHandles, surfaceHandle); + + nvEvoDecrementSurfaceStructRefCnt(pSurfaceEvo); +} + +void nvEvoIncrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo) +{ + nvAssert(!nvEvoSurfaceRefCntsTooLarge(pSurfaceEvo)); + + pSurfaceEvo->rmRefCnt++; + pSurfaceEvo->structRefCnt++; +} + +void nvEvoDecrementSurfaceRefCnts(NVDevEvoPtr pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo) +{ + nvEvoDecrementSurfaceRefCntsWithSync(pDevEvo, pSurfaceEvo, NV_FALSE); +} + +void nvEvoDecrementSurfaceRefCntsWithSync(NVDevEvoPtr pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo, + NvBool skipSync) +{ + nvAssert(pSurfaceEvo->rmRefCnt >= 1); + pSurfaceEvo->rmRefCnt--; + + if (pSurfaceEvo->rmRefCnt == 0) { + /* + * Don't sync if this surface was registered as not requiring display + * hardware access, to WAR timeouts that result from OGL unregistering + * a deferred request fifo causing a sync here that may timeout if + * GLS hasn't had the opportunity to release semaphores with pending + * flips. (Bug 2050970) + */ + if (!skipSync && pSurfaceEvo->requireDisplayHardwareAccess) { + nvEvoClearSurfaceUsage(pDevEvo, pSurfaceEvo); + } + + FreeSurfaceEvoRm(pDevEvo, pSurfaceEvo); + } + + nvEvoDecrementSurfaceStructRefCnt(pSurfaceEvo); +} + +NvBool nvEvoSurfaceRefCntsTooLarge(const NVSurfaceEvoRec *pSurfaceEvo) +{ + return ((pSurfaceEvo->rmRefCnt == NV_U64_MAX) || + (pSurfaceEvo->structRefCnt == NV_U64_MAX)); +} + +static NVSurfaceEvoPtr GetSurfaceFromHandle( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const NvKmsSurfaceHandle surfaceHandle, + const NvBool isUsedByCursorChannel, + const NvBool isUsedByLayerChannel, + const NvBool requireDisplayHardwareAccess, + const NvBool maybeUsedBy3d) +{ + NVSurfaceEvoPtr pSurfaceEvo = + nvEvoGetPointerFromApiHandle(pOpenDevSurfaceHandles, surfaceHandle); + + nvAssert(requireDisplayHardwareAccess || (!isUsedByCursorChannel && !isUsedByLayerChannel)); + + if (pSurfaceEvo == NULL) { + return NULL; + } + + if (pSurfaceEvo->rmRefCnt == 0) { /* orphan */ + return NULL; + } + + if (requireDisplayHardwareAccess && !pSurfaceEvo->requireDisplayHardwareAccess) { + return NULL; + } + + /* Validate that the surface can be used as a cursor image */ + if (isUsedByCursorChannel && + !pDevEvo->hal->ValidateCursorSurface(pDevEvo, pSurfaceEvo)) { + return NULL; + } + + /* + * XXX If maybeUsedBy3d, the fetched surface may be used as a texture by the + * 3d engine. Previously, all surfaces were checked by + * nvEvoGetHeadSetStoragePitchValue() at registration time, and we don't + * know if nvEvoGetHeadSetStoragePitchValue() was protecting us from any + * surface dimensions that could cause trouble for the 3d engine. + */ + if (isUsedByLayerChannel || maybeUsedBy3d) { + NvU8 planeIndex; + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + if (nvEvoGetHeadSetStoragePitchValue( + pDevEvo, + pSurfaceEvo->layout, + pSurfaceEvo->planes[planeIndex].pitch) == 0) { + return NULL; + } + } + } + + return pSurfaceEvo; +} + +NVSurfaceEvoPtr nvEvoGetSurfaceFromHandle( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const NvKmsSurfaceHandle surfaceHandle, + const NvBool isUsedByCursorChannel, + const NvBool isUsedByLayerChannel) +{ + return GetSurfaceFromHandle(pDevEvo, + pOpenDevSurfaceHandles, + surfaceHandle, + isUsedByCursorChannel, + isUsedByLayerChannel, + TRUE /* requireDisplayHardwareAccess */, + TRUE /* maybeUsedBy3d */); +} + +NVSurfaceEvoPtr nvEvoGetSurfaceFromHandleNoDispHWAccessOk( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + NvKmsSurfaceHandle surfaceHandle) +{ + return GetSurfaceFromHandle(pDevEvo, + pOpenDevSurfaceHandles, + surfaceHandle, + FALSE /* isUsedByCursorChannel */, + FALSE /* isUsedByLayerChannel */, + FALSE /* requireDisplayHardwareAccess */, + TRUE /* maybeUsedBy3d */); +} + +NVSurfaceEvoPtr nvEvoGetSurfaceFromHandleNoHWAccess( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + NvKmsSurfaceHandle surfaceHandle) +{ + return GetSurfaceFromHandle(pDevEvo, + pOpenDevSurfaceHandles, + surfaceHandle, + FALSE /* isUsedByCursorChannel */, + FALSE /* isUsedByLayerChannel */, + FALSE /* requireDisplayHardwareAccess */, + FALSE /* maybeUsedBy3d */); +} + +/*! + * Create a deferred request fifo, using the specified pSurfaceEvo. + */ +NVDeferredRequestFifoRec *nvEvoRegisterDeferredRequestFifo( + NVDevEvoPtr pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo) +{ + NVDeferredRequestFifoRec *pDeferredRequestFifo; + NvU32 ret; + + if (pSurfaceEvo->planes[0].rmObjectSizeInBytes < + sizeof(struct NvKmsDeferredRequestFifo)) { + return NULL; + } + + /* + * XXX validate that the surface is in sysmem; can we query that from + * resman? + */ + + pDeferredRequestFifo = nvCalloc(1, sizeof(*pDeferredRequestFifo)); + + if (pDeferredRequestFifo == NULL) { + return NULL; + } + + /* Get a CPU mapping of the surface. */ + + ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pSurfaceEvo->planes[0].rmHandle, + 0, + sizeof(*pDeferredRequestFifo->fifo), + (void **) &pDeferredRequestFifo->fifo, + 0); + + if (ret != NVOS_STATUS_SUCCESS) { + nvFree(pDeferredRequestFifo); + return NULL; + } + + pDeferredRequestFifo->pSurfaceEvo = pSurfaceEvo; + + nvEvoIncrementSurfaceRefCnts(pSurfaceEvo); + + return pDeferredRequestFifo; +} + +/*! + * Free the deferred request fifo. + */ +void nvEvoUnregisterDeferredRequestFifo( + NVDevEvoPtr pDevEvo, + NVDeferredRequestFifoRec *pDeferredRequestFifo) +{ + nvAssert(pDeferredRequestFifo->fifo != NULL); + nvAssert(pDeferredRequestFifo->pSurfaceEvo != NULL); + + nvHsLeaveSwapGroup(pDevEvo, pDeferredRequestFifo, FALSE /* teardown */); + + nvRmApiUnmapMemory( + nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDeferredRequestFifo->pSurfaceEvo->planes[0].rmHandle, + pDeferredRequestFifo->fifo, + 0); + + nvEvoDecrementSurfaceRefCnts(pDevEvo, pDeferredRequestFifo->pSurfaceEvo); + + nvFree(pDeferredRequestFifo); +} diff --git a/src/nvidia-modeset/src/nvkms-utils-flip.c b/src/nvidia-modeset/src/nvkms-utils-flip.c new file mode 100644 index 0000000..8338dd6 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-utils-flip.c @@ -0,0 +1,399 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-utils-flip.h" +#include "nvkms-private.h" +#include "nvkms-surface.h" +#include "nvkms-cursor.h" +#include "nvkms-sync.h" + +/*! + * Assign the elements in an NVSurfaceEvoPtr[NVKMS_MAX_EYES] array. + * + * Use NVEvoApiHandlesRec to translate an + * NvKmsSurfaceHandle[NVKMS_MAX_EYES] array into an an + * NVSurfaceEvoPtr[NVKMS_MAX_EYES] array. + * + * \param[in] pOpenDevSurfaceHandles The client's surfaces. + * \param[in] surfaceHandles The handles naming surfaces. + * \param[out] pSurfaceEvos The surface pointers. + * + * \return Return TRUE if all surfaceHandles could be successfully + * translated into pSurfaceEvos. Otherwise, return FALSE. + */ +NvBool nvAssignSurfaceArray( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const NvKmsSurfaceHandle surfaceHandles[NVKMS_MAX_EYES], + const NvBool isUsedByCursorChannel, + const NvBool isUsedByLayerChannel, + NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES]) +{ + NvU32 eye; + + nvkms_memset(pSurfaceEvos, 0, sizeof(NVSurfaceEvoRec *) * NVKMS_MAX_EYES); + + for (eye = 0; eye < NVKMS_MAX_EYES; eye++) { + if (surfaceHandles[eye] != 0) { + pSurfaceEvos[eye] = + nvEvoGetSurfaceFromHandle(pDevEvo, + pOpenDevSurfaceHandles, + surfaceHandles[eye], + isUsedByCursorChannel, + isUsedByLayerChannel); + if ((pSurfaceEvos[eye] == NULL) || + (pSurfaceEvos[eye]->isoType != NVKMS_MEMORY_ISO)) { + return FALSE; + } + } + } + return TRUE; +} + + +/*! + * Assign the NVFlipNIsoSurfaceEvoHwState. + * + * Use the given NvKmsNIsoSurface to populate the + * NVFlipNIsoSurfaceEvoHwState. Validate that NvKmsNIsoSurface + * description is legitimate. + * + * \param[in] pDevEvo The device where the surface will be used. + * \param[in] pOpenDevSurfaceHandles The client's surfaces. + * \param[in] pParamsNIso The client's description of the NISO surface. + * \param[in] notifier Whether the NISO surface is a notifier. + * \param[in] pChannel The channel where the surface will be used. + * \param[out] pNIsoState The NVKMS presentation of the NISO surface. + * + * \return Return TRUE if the NVFlipNIsoSurfaceEvoHwState could be + * assigned and validated. Otherwise, return FALSE and leave + * the NVFlipNIsoSurfaceEvoHwState untouched. + */ +NvBool nvAssignNIsoEvoHwState( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const struct NvKmsNIsoSurface *pParamsNIso, + const NvBool notifier, /* TRUE=notifier; FALSE=semaphore */ + const NvU32 layer, + NVFlipNIsoSurfaceEvoHwState *pNIsoState) +{ + NVSurfaceEvoPtr pSurfaceEvo; + NvU32 elementSizeInBytes = 0, offsetInBytes, maxBytes; + + nvAssert(pParamsNIso->surfaceHandle != 0); + + pSurfaceEvo = + nvEvoGetSurfaceFromHandle(pDevEvo, + pOpenDevSurfaceHandles, + pParamsNIso->surfaceHandle, + FALSE /* isUsedByCursorChannel */, + TRUE /* isUsedByLayerChannel */); + if (pSurfaceEvo == NULL) { + return FALSE; + } + + /* Attempt to validate the surface: */ + + /* Only pitch surfaces can be used */ + if (pSurfaceEvo->layout != NvKmsSurfaceMemoryLayoutPitch) { + return FALSE; + } + + if (pSurfaceEvo->isoType != NVKMS_MEMORY_NISO) { + return FALSE; + } + + if ((pParamsNIso->format != NVKMS_NISO_FORMAT_FOUR_WORD) && + (pParamsNIso->format != NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY) && + (pParamsNIso->format != NVKMS_NISO_FORMAT_LEGACY)) { + return FALSE; + } + + if ((pDevEvo->caps.validNIsoFormatMask & + (1 << pParamsNIso->format)) == 0) { + return FALSE; + } + + /* Check that the item fits within the surface. */ + switch (pParamsNIso->format) { + case NVKMS_NISO_FORMAT_FOUR_WORD: + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + elementSizeInBytes = 16; + break; + case NVKMS_NISO_FORMAT_LEGACY: + if (notifier) { + /* Legacy notifier size depends on the layer. */ + elementSizeInBytes = + pDevEvo->caps.legacyNotifierFormatSizeBytes[layer]; + } else { + /* Legacy semaphores are always 4 bytes. */ + elementSizeInBytes = 4; + } + break; + } + +#if defined(DEBUG) + /* Assert that the size calculated by nvkms-sync library is the same as the + * one we derived from channel caps above. */ + if (notifier) { + NvBool overlay = (layer != NVKMS_MAIN_LAYER); + NvU32 libSize = nvKmsSizeOfNotifier(pParamsNIso->format, overlay); + nvAssert(libSize == elementSizeInBytes); + } else { + nvAssert(nvKmsSizeOfSemaphore(pParamsNIso->format) == elementSizeInBytes); + } +#endif + /* + * offsetInWords is an NvU16 and offsetInBytes is an NvU32, so + * neither of the expressions: + * offsetInWords * 4 + * offsetInBytes + elementSizeInBytes + * should ever overflow. + */ + + ct_assert(sizeof(pParamsNIso->offsetInWords) == 2); + + offsetInBytes = ((NvU32)pParamsNIso->offsetInWords) * 4; + + /* + * Compute the upper extent of the NISO element within the surface. + */ + + maxBytes = offsetInBytes + elementSizeInBytes; + + if (maxBytes > pSurfaceEvo->planes[0].rmObjectSizeInBytes) { + return FALSE; + } + + /* EVO expects the NISO element to fit within a 4k page. */ + + if (maxBytes > 4096) { + return FALSE; + } + + /* + * XXX NVKMS TODO: Check that the surface is in vidmem if + * NV5070_CTRL_SYSTEM_CAPS_BUG_644815_DNISO_VIDMEM_ONLY + */ + + pNIsoState->pSurfaceEvo = pSurfaceEvo; + pNIsoState->format = pParamsNIso->format; + pNIsoState->offsetInWords = pParamsNIso->offsetInWords; + + return TRUE; +} + +NvBool nvAssignCompletionNotifierEvoHwState( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const struct NvKmsCompletionNotifierDescription *pParamsNotif, + const NvU32 layer, + NVFlipCompletionNotifierEvoHwState *pNotif) +{ + NvBool ret; + + nvkms_memset(pNotif, 0, sizeof(*pNotif)); + + /* If no surface is specified, we should not use a notifier. */ + if (pParamsNotif->surface.surfaceHandle == 0) { + return TRUE; + } + + ret = nvAssignNIsoEvoHwState(pDevEvo, + pOpenDevSurfaceHandles, + &pParamsNotif->surface, + TRUE, /* notifier */ + layer, + &pNotif->surface); + if (ret) { + pNotif->awaken = pParamsNotif->awaken; + } + + return ret; +} + +NvBool nvAssignSemaphoreEvoHwState( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const NvU32 layer, + const NvU32 sd, + const struct NvKmsChannelSyncObjects *pChannelSyncObjects, + NVFlipSyncObjectEvoHwState *pFlipSyncObject) +{ + NvBool ret; + + nvAssert(!pChannelSyncObjects->useSyncpt); + + nvkms_memset(pFlipSyncObject, 0, sizeof(*pFlipSyncObject)); + + if (!pDevEvo->hal->caps.supportsIndependentAcqRelSemaphore) { + /*! acquire and release sema surface needs to be same */ + if (pChannelSyncObjects->u.semaphores.acquire.surface.surfaceHandle != + pChannelSyncObjects->u.semaphores.release.surface.surfaceHandle) { + return FALSE; + } + if (pChannelSyncObjects->u.semaphores.acquire.surface.format != + pChannelSyncObjects->u.semaphores.release.surface.format) { + return FALSE; + } + if (pChannelSyncObjects->u.semaphores.acquire.surface.offsetInWords != + pChannelSyncObjects->u.semaphores.release.surface.offsetInWords) { + return FALSE; + } + } + + /*! If no surface is specified, we should not use a semaphore.*/ + if (pChannelSyncObjects->u.semaphores.acquire.surface.surfaceHandle != 0) { + + ret = nvAssignNIsoEvoHwState( + pDevEvo, + pOpenDevSurfaceHandles, + &pChannelSyncObjects->u.semaphores.acquire.surface, + FALSE, /* notifier */ + layer, + &pFlipSyncObject->u.semaphores.acquireSurface); + if (ret) { + pFlipSyncObject->u.semaphores.acquireValue = + pChannelSyncObjects->u.semaphores.acquire.value; + } else { + return ret; + } + } + + /*! If no surface is specified, we should not use a semaphore.*/ + if (pChannelSyncObjects->u.semaphores.release.surface.surfaceHandle != 0) { + + ret = nvAssignNIsoEvoHwState( + pDevEvo, + pOpenDevSurfaceHandles, + &pChannelSyncObjects->u.semaphores.release.surface, + FALSE, /* notifier */ + layer, + &pFlipSyncObject->u.semaphores.releaseSurface); + if (ret) { + pFlipSyncObject->u.semaphores.releaseValue = + pChannelSyncObjects->u.semaphores.release.value; + } else { + return ret; + } + } + + return TRUE; +} + +NvBool nvValidatePerLayerCompParams( + const struct NvKmsCompositionParams *pCompParams, + const struct NvKmsCompositionCapabilities *pCaps, + NVSurfaceEvoPtr pSurfaceEvo) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = (pSurfaceEvo != NULL) ? + nvKmsGetSurfaceMemoryFormatInfo(pSurfaceEvo->format) : NULL; + const enum NvKmsCompositionColorKeySelect colorKeySelect = + pCompParams->colorKeySelect; + NvU32 match; + + if ((pCaps->supportedColorKeySelects & NVBIT(colorKeySelect)) == 0x0) { + return FALSE; + } + + NVKMS_COMPOSITION_FOR_MATCH_BITS(colorKeySelect, match) { + if ((pCaps->colorKeySelect[colorKeySelect].supportedBlendModes[match] & + NVBIT(pCompParams->blendingMode[match])) == 0x0) { + return FALSE; + } + + switch (pCompParams->blendingMode[match]) { + case NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA: + case NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA: + if (pCompParams->surfaceAlpha != 0) { + return FALSE; + } + break; + default: + break; + } + } + + /* Match and nomatch pixels should not use alpha blending mode at once. */ + if ((colorKeySelect != NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE) && + (NvKmsIsCompositionModeUseAlpha(pCompParams->blendingMode[0])) && + (NvKmsIsCompositionModeUseAlpha(pCompParams->blendingMode[1]))) { + return FALSE; + } + + /* + * If surface is NULL, no further validation required. The composition + * parameters do not take effect if surface is NULL. + */ + if (pFormatInfo == NULL || pFormatInfo->isYUV) { + return TRUE; + } + + /* Disable color keying for 8 Bpp surfaces. */ + if ((colorKeySelect == NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC) || + (colorKeySelect == NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST)) { + + if (pFormatInfo->rgb.bytesPerPixel > 4) { + return FALSE; + } + } + + return TRUE; +} + +NvBool +nvAssignCursorSurface(const struct NvKmsPerOpenDev *pOpenDev, + const NVDevEvoRec *pDevEvo, + const struct NvKmsSetCursorImageCommonParams *pImgParams, + NVSurfaceEvoPtr *pSurfaceEvo) + +{ + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDevConst(pOpenDev); + NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES] = { }; + + if (!nvGetCursorImageSurfaces(pDevEvo, + pOpenDevSurfaceHandles, + pImgParams, + pSurfaceEvos)) { + return FALSE; + } + + /* XXX NVKMS TODO: add support for stereo cursor */ + if (pSurfaceEvos[NVKMS_RIGHT] != NULL) { + return FALSE; + } + + if (pSurfaceEvos[NVKMS_LEFT] != NULL) { + if (!nvValidatePerLayerCompParams(&pImgParams->cursorCompParams, + &pDevEvo->caps.cursorCompositionCaps, + pSurfaceEvos[NVKMS_LEFT])) { + return FALSE; + } + } + + *pSurfaceEvo = pSurfaceEvos[NVKMS_LEFT]; + + return TRUE; +} + diff --git a/src/nvidia-modeset/src/nvkms-utils.c b/src/nvidia-modeset/src/nvkms-utils.c new file mode 100644 index 0000000..6fd8235 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-utils.c @@ -0,0 +1,803 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-utils.h" +#include "nvkms-types.h" +#include "nv_mode_timings_utils.h" +#include "nv_vasprintf.h" + +#include "nv_list.h" /* for nv_container_of() */ + +void nvVEvoLog(NVEvoLogType logType, NvU8 gpuLogIndex, + const char *fmt, va_list ap) +{ + char *msg, prefix[10]; + const char *gpuPrefix = ""; + int level; + + switch (logType) { + default: + case EVO_LOG_INFO: level = NVKMS_LOG_LEVEL_INFO; break; + case EVO_LOG_WARN: level = NVKMS_LOG_LEVEL_WARN; break; + case EVO_LOG_ERROR: level = NVKMS_LOG_LEVEL_ERROR; break; + } + + msg = nv_vasprintf(fmt, ap); + if (msg == NULL) { + return; + } + + if (gpuLogIndex != NV_INVALID_GPU_LOG_INDEX) { + nvkms_snprintf(prefix, sizeof(prefix), "GPU:%d: ", gpuLogIndex); + gpuPrefix = prefix; + } + + nvkms_log(level, gpuPrefix, msg); + + nvFree(msg); +} + +void nvEvoLogDev(const NVDevEvoRec *pDevEvo, NVEvoLogType logType, + const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + nvVEvoLog(logType, pDevEvo->gpuLogIndex, fmt, ap); + va_end(ap); +} + +void nvEvoLogDisp(const NVDispEvoRec *pDispEvo, NVEvoLogType logType, + const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + nvVEvoLog(logType, pDispEvo->gpuLogIndex, fmt, ap); + va_end(ap); +} + +void nvEvoLog(NVEvoLogType logType, const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + nvVEvoLog(logType, NV_INVALID_GPU_LOG_INDEX, fmt, ap); + va_end(ap); +} + +#if defined(DEBUG) + +void nvEvoLogDebug(NVEvoLogType logType, const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + nvVEvoLog(logType, NV_INVALID_GPU_LOG_INDEX, fmt, ap); + va_end(ap); +} + +void nvEvoLogDevDebug(const NVDevEvoRec *pDevEvo, NVEvoLogType logType, + const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + nvVEvoLog(logType, pDevEvo->gpuLogIndex, fmt, ap); + va_end(ap); +} + +void nvEvoLogDispDebug(const NVDispEvoRec *pDispEvo, NVEvoLogType logType, + const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + nvVEvoLog(logType, pDispEvo->gpuLogIndex, fmt, ap); + va_end(ap); +} + +#endif /* DEBUG */ + + +/*! + * Initialize the given NVEvoInfoString. + * + * Point the infoString at the specified character array. + */ +void nvInitInfoString(NVEvoInfoStringPtr pInfoString, + char *s, NvU16 totalLength) +{ + nvkms_memset(pInfoString, 0, sizeof(*pInfoString)); + pInfoString->s = s; + pInfoString->totalLength = totalLength; +} + + +/*! + * Append the text, described by 'format' and 'ap', to the infoString. + */ +static void LogInfoString(NVEvoInfoStringPtr pInfoString, + const char *format, va_list ap) +{ + char *s; + size_t size = pInfoString->totalLength - pInfoString->length; + int ret; + + if (pInfoString->s == NULL) { + return; + } + if (size <= 1) { + nvAssert(!"pInfoString too small"); + return; + } + + s = pInfoString->s + pInfoString->length; + + ret = nvkms_vsnprintf(s, size, format, ap); + + if (ret > 0) { + pInfoString->length += NV_MIN((size_t)ret, size - 1); + } + + /* + * If ret is larger than size, then we may need to increase + * totalLength to support logging everything that we are trying to + * log to this buffer. + */ + nvAssert(ret <= size); + + nvAssert(pInfoString->length < pInfoString->totalLength); + pInfoString->s[pInfoString->length] = '\0'; +} + + +/*! + * Append to the infoString, without any additions. + */ +void nvEvoLogInfoStringRaw(NVEvoInfoStringPtr pInfoString, + const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + LogInfoString(pInfoString, format, ap); + va_end(ap); +} + + +/*! + * Append to the infoString, appending a newline. + */ +void nvEvoLogInfoString(NVEvoInfoStringPtr pInfoString, + const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + LogInfoString(pInfoString, format, ap); + va_end(ap); + + nvEvoLogInfoStringRaw(pInfoString, "\n"); +} + + +/*! + * The NVEvoApiHandlesRec-related functions below are used to manage + * sets of NvKms API handles. For the various NvKms objects (e.g., + * devices, disps, connectors, surfaces) clients will specify the + * object by handle, and NVKMS will look up the corresponding object. + * + * We store a pointer to the object in a dynamically allocated array, + * and use the handle to look up the pointer in the array. + * + * Note that handles are 1-based (valid handles are in the range + * [1,numPointers], and 0 is an invalid handle), while indices to the + * corresponding pointers are 0-based (valid indices are in the range + * [0,numPointers-1]). Subtract 1 from the handle to get the index + * for the pointer. + */ + +/*! + * Increase the size of the NVEvoApiHandles::pointers array. + * + * Reallocate the pointers array, increasing by defaultSize. + * Initialize the new region of memory. + */ +static NvBool GrowApiHandlesPointersArray(NVEvoApiHandlesPtr pEvoApiHandles) +{ + NvU32 newNumPointers = + pEvoApiHandles->numPointers + pEvoApiHandles->defaultSize; + size_t oldSize = pEvoApiHandles->numPointers * sizeof(void *); + size_t newSize = newNumPointers * sizeof(void *); + void **newPointers; + + /* Check for wrap in the newNumPointers computation. */ + if (newSize <= oldSize) { + return FALSE; + } + + newPointers = nvRealloc(pEvoApiHandles->pointers, newSize); + + if (newPointers == NULL) { + return FALSE; + } + + nvkms_memset(&newPointers[pEvoApiHandles->numPointers], 0, newSize - oldSize); + + pEvoApiHandles->pointers = newPointers; + pEvoApiHandles->numPointers = newNumPointers; + + return TRUE; +} + + +/*! + * Attempt to shrink the NVEvoApiHandles::pointers array. + * + * If high elements in the array are unused, reduce the array size in + * multiples of defaultSize. + */ +static void ShrinkApiHandlesPointersArray(NVEvoApiHandlesPtr pEvoApiHandles) +{ + NvU32 index; + NvU32 newNumPointers; + void **newPointers; + + /* If the array is already as small as it can be, we are done. */ + + if (pEvoApiHandles->numPointers == pEvoApiHandles->defaultSize) { + return; + } + + /* Find the highest non-empty element. */ + + for (index = pEvoApiHandles->numPointers - 1; index > 0; index--) { + if (pEvoApiHandles->pointers[index] != NULL) { + break; + } + } + + /* + * Compute the new array size by rounding index up to the next + * multiple of defaultSize. + */ + newNumPointers = ((index / pEvoApiHandles->defaultSize) + 1) * + pEvoApiHandles->defaultSize; + + /* If the array is already that size, we are done. */ + + if (pEvoApiHandles->numPointers == newNumPointers) { + return; + } + + newPointers = + nvRealloc(pEvoApiHandles->pointers, newNumPointers * sizeof(void *)); + + if (newPointers != NULL) { + pEvoApiHandles->pointers = newPointers; + pEvoApiHandles->numPointers = newNumPointers; + } +} + + +/*! + * Return true if 'pointer' is already present in pEvoApiHandles + */ +NvBool nvEvoApiHandlePointerIsPresent(NVEvoApiHandlesPtr pEvoApiHandles, + void *pointer) +{ + NvU32 index; + + for (index = 0; index < pEvoApiHandles->numPointers; index++) { + if (pEvoApiHandles->pointers[index] == pointer) { + return TRUE; + } + } + + return FALSE; +} + + +/*! + * Create an NvKms API handle. + * + * Create an available handle from pEvoApiHandles, and associate + * 'pointer' with the handle. + */ +NvKmsGenericHandle +nvEvoCreateApiHandle(NVEvoApiHandlesPtr pEvoApiHandles, void *pointer) +{ + NvU32 index; + + if (pointer == NULL) { + return 0; + } + + for (index = 0; index < pEvoApiHandles->numPointers; index++) { + if (pEvoApiHandles->pointers[index] == NULL) { + goto availableIndex; + } + } + + /* + * Otherwise, there are no free elements in the pointers array: + * grow the array and try again. + */ + if (!GrowApiHandlesPointersArray(pEvoApiHandles)) { + return 0; + } + + /* fall through */ + +availableIndex: + + nvAssert(index < pEvoApiHandles->numPointers); + nvAssert(pEvoApiHandles->pointers[index] == NULL); + + pEvoApiHandles->pointers[index] = pointer; + + return index + 1; +} + + +/*! + * Retrieve a pointer that maps to an NvKms API handle. + * + * Return the pointer that nvEvoCreateApiHandle() associated with 'handle'. + */ +void *nvEvoGetPointerFromApiHandle(const NVEvoApiHandlesRec *pEvoApiHandles, + NvKmsGenericHandle handle) +{ + NvU32 index; + + if (handle == 0) { + return NULL; + } + + index = handle - 1; + + if (index >= pEvoApiHandles->numPointers) { + return NULL; + } + + return pEvoApiHandles->pointers[index]; +} + + +/*! + * Retrieve a pointer that maps to the next NvKms API handle. + * + * This is intended to be used by the + * FOR_ALL_POINTERS_IN_EVO_API_HANDLES() macro. On the first + * iteration, *pHandle == 0, and this will return the first pointer it + * finds in the pointer array. The returned *pHandle will be the + * location to begin searching on the next iteration, and so on. + * + * Once there are no more non-zero elements in the pointer array, + * return NULL. + */ +void *nvEvoGetPointerFromApiHandleNext(const NVEvoApiHandlesRec *pEvoApiHandles, + NvKmsGenericHandle *pHandle) +{ + NvU32 index = *pHandle; + + for (; index < pEvoApiHandles->numPointers; index++) { + if (pEvoApiHandles->pointers[index] != NULL) { + *pHandle = index + 1; + return pEvoApiHandles->pointers[index]; + } + } + + return NULL; +} + + +/*! + * Remove an NvKms API handle. + * + * Clear the 'handle' entry, and its corresponding pointer, from pEvoApiHandles. + */ +void nvEvoDestroyApiHandle(NVEvoApiHandlesPtr pEvoApiHandles, + NvKmsGenericHandle handle) +{ + NvU32 index; + + if (handle == 0) { + return; + } + + index = handle - 1; + + if (index >= pEvoApiHandles->numPointers) { + return; + } + + pEvoApiHandles->pointers[index] = NULL; + + ShrinkApiHandlesPointersArray(pEvoApiHandles); +} + + +/* Only used in nvAssert, so only build into debug builds to avoid never-used + * warnings */ +#if defined(DEBUG) +/*! + * Return the number of non-NULL pointers in the pointer array. + */ +static NvU32 +CountApiHandles(const NVEvoApiHandlesRec *pEvoApiHandles) +{ + NvU32 index, count = 0; + + for (index = 0; index < pEvoApiHandles->numPointers; index++) { + if (pEvoApiHandles->pointers[index] != NULL) { + count++; + } + } + + return count; +} +#endif /* DEBUG */ + + +/*! + * Initialize the NVEvoApiHandlesRec. + * + * This should be called before any + * nvEvo{Create,GetPointerFrom,Destroy}ApiHandle() calls on this + * pEvoApiHandles. + * + * The pointer array for the pEvoApiHandles will be managed in + * multiples of 'defaultSize'. + */ +NvBool nvEvoInitApiHandles(NVEvoApiHandlesPtr pEvoApiHandles, NvU32 defaultSize) +{ + nvkms_memset(pEvoApiHandles, 0, sizeof(*pEvoApiHandles)); + + pEvoApiHandles->defaultSize = defaultSize; + + return GrowApiHandlesPointersArray(pEvoApiHandles); +} + + +/*! + * Free the NVEvoApiHandlesPtr resources. + */ +void nvEvoDestroyApiHandles(NVEvoApiHandlesPtr pEvoApiHandles) +{ + nvAssert(CountApiHandles(pEvoApiHandles) == 0); + + nvFree(pEvoApiHandles->pointers); + + nvkms_memset(pEvoApiHandles, 0, sizeof(*pEvoApiHandles)); +} + +NvU8 nvPixelDepthToBitsPerComponent(enum nvKmsPixelDepth pixelDepth) +{ + /* + * Note: The 444 formats have three components per pixel, thus we compute + * bpc as depth/3. The 422 formats effectively store two components per + * pixel, so we compute bpc for those as depth/2. + */ + switch (pixelDepth) { + case NVKMS_PIXEL_DEPTH_18_444: + return 6; + case NVKMS_PIXEL_DEPTH_24_444: + case NVKMS_PIXEL_DEPTH_16_422: + return 8; + case NVKMS_PIXEL_DEPTH_30_444: + case NVKMS_PIXEL_DEPTH_20_422: + return 10; + } + nvAssert(!"Unknown NVKMS_PIXEL_DEPTH"); + return 0; +} + +/* Import function required by nvBuildModeName() */ + +int nvBuildModeNameSnprintf(char *str, size_t size, const char *format, ...) +{ + va_list ap; + int ret; + + va_start(ap, format); + ret = nvkms_vsnprintf(str, size, format, ap); + va_end(ap); + + return ret; +} + +/* Import functions required by nv_vasprintf() */ + +void *nv_vasprintf_alloc(size_t size) +{ + return nvAlloc(size); +} + +void nv_vasprintf_free(void *ptr) +{ + nvFree(ptr); +} + +int nv_vasprintf_vsnprintf(char *str, size_t size, + const char *format, va_list ap) +{ + return nvkms_vsnprintf(str, size, format, ap); +} + +/* + * Track the size of each allocation, so that it can be passed to + * nvkms_free(). + */ +typedef struct { + size_t size; /* includes sizeof(nvkms_memory_info_t) */ + char data[] __attribute__((aligned(8))); +} nvkms_memory_info_t; + +void *nvInternalAlloc(size_t size, const NvBool zero) +{ + size_t totalSize = size + sizeof(nvkms_memory_info_t); + nvkms_memory_info_t *p; + + if (totalSize < size) { /* overflow in the above addition */ + return NULL; + } + + p = nvkms_alloc(totalSize, zero); + + if (p == NULL) { + return NULL; + } + + p->size = totalSize; + + return p->data; +} + +void *nvInternalRealloc(void *ptr, size_t size) +{ + nvkms_memory_info_t *p = NULL; + void *newptr; + + if (ptr == NULL) { + /* realloc with a ptr of NULL is equivalent to alloc. */ + return nvInternalAlloc(size, FALSE); + } + + if (size == 0) { + /* realloc with a size of 0 is equivalent to free. */ + nvInternalFree(ptr); + return NULL; + } + + p = nv_container_of(ptr, nvkms_memory_info_t, data); + + newptr = nvInternalAlloc(size, FALSE); + + if (newptr != NULL) { + size_t oldsize = p->size - sizeof(nvkms_memory_info_t); + size_t copysize = (size < oldsize) ? size : oldsize; + nvkms_memcpy(newptr, ptr, copysize); + nvInternalFree(ptr); + } + + return newptr; +} + +void nvInternalFree(void *ptr) +{ + nvkms_memory_info_t *p; + + if (ptr == NULL) { + return; + } + + p = nv_container_of(ptr, nvkms_memory_info_t, data); + + nvkms_free(p, p->size); +} + +char *nvInternalStrDup(const char *str) +{ + size_t len; + char *newstr; + + if (str == NULL) { + return NULL; + } + + len = nvkms_strlen(str) + 1; + + newstr = nvInternalAlloc(len, FALSE); + + if (newstr == NULL) { + return NULL; + } + + nvkms_memcpy(newstr, str, len); + + return newstr; +} + +/*! + * Look up the value of a key in the set of registry keys provided at device + * allocation time, copied from the client request during nvAllocDevEvo(). + * + * \param[in] pDevEvo The device with regkeys to be checked. + * + * \param[in] key The name of the key to look up. + * + * \param[out] val The value of the key, if the key was specified. + * + * \return Whether the key was specified in the registry. + */ +NvBool nvGetRegkeyValue(const NVDevEvoRec *pDevEvo, + const char *key, NvU32 *val) +{ + int i; + + for (i = 0; i < ARRAY_LEN(pDevEvo->registryKeys); i++) { + if (nvkms_strcmp(key, pDevEvo->registryKeys[i].name) == 0) { + *val = pDevEvo->registryKeys[i].value; + return TRUE; + } + } + + return FALSE; +} + +#if defined(DEBUG) + +#include "nv_memory_tracker.h" + +void *nvDebugAlloc(size_t size, int line, const char *file) +{ + return nvMemoryTrackerTrackedAlloc(&nvEvoGlobal.debugMemoryAllocationList, + size, line, file); +} + +void *nvDebugCalloc(size_t nmemb, size_t size, int line, const char *file) +{ + return nvMemoryTrackerTrackedCalloc(&nvEvoGlobal.debugMemoryAllocationList, + nmemb, size, line, file); +} + +void *nvDebugRealloc(void *ptr, size_t size, int line, const char *file) +{ + return nvMemoryTrackerTrackedRealloc(&nvEvoGlobal.debugMemoryAllocationList, + ptr, size, line, file); +} + +void nvDebugFree(void *ptr) +{ + nvMemoryTrackerTrackedFree(ptr); +} + +char *nvDebugStrDup(const char *str, int line, const char *file) +{ + size_t size = nvkms_strlen(str); + char *newStr = nvDebugAlloc(size + 1, line, file); + + if (newStr == NULL) { + return NULL; + } + + nvkms_memcpy(newStr, str, size); + newStr[size] = '\0'; + + return newStr; +} + +void nvReportUnfreedAllocations(void) +{ + nvMemoryTrackerPrintUnfreedAllocations( + &nvEvoGlobal.debugMemoryAllocationList); +} + +void nvMemoryTrackerPrintf(const char *format, ...) +{ + va_list ap; + va_start(ap, format); + nvVEvoLog(EVO_LOG_WARN, NV_INVALID_GPU_LOG_INDEX, format, ap); + va_end(ap); +} + +void *nvMemoryTrackerAlloc(size_t size) +{ + return nvkms_alloc(size, FALSE); +} + +void nvMemoryTrackerFree(void *ptr, size_t size) +{ + nvkms_free(ptr, size); +} + +void nvMemoryTrackerMemset(void *s, int c, size_t n) +{ + nvkms_memset(s, c, n); +} + +void nvMemoryTrackerMemcpy(void *dest, const void *src, size_t n) +{ + nvkms_memcpy(dest, src, n); +} + +#endif /* DEBUG */ + +/* + * The C++ displayPort library source code introduces a reference to + * __cxa_pure_virtual. This should never actually get called, so + * simply assert. + */ +void __cxa_pure_virtual(void); + +void __cxa_pure_virtual(void) +{ + nvAssert(!"Pure virtual function called"); +} + +/* Import functions required by unix_rm_handle */ + +#if defined(DEBUG) + +void nvUnixRmHandleDebugAssert(const char *expString, + const char *filenameString, + const char *funcString, + const unsigned lineNumber) +{ + nvDebugAssert(expString, filenameString, funcString, lineNumber); +} + +void nvUnixRmHandleLogMsg(NvU32 level, const char *fmt, ...) +{ + + va_list ap; + va_start(ap, fmt); + + /* skip verbose messages */ + if (level < NV_UNIX_RM_HANDLE_DEBUG_VERBOSE) { + nvVEvoLog(EVO_LOG_WARN, NV_INVALID_GPU_LOG_INDEX, fmt, ap); + } + + va_end(ap); +} + +#endif /* DEBUG */ + +void *nvUnixRmHandleReallocMem(void *oldPtr, NvLength newSize) +{ + return nvRealloc(oldPtr, newSize); +} + +void nvUnixRmHandleFreeMem(void *ptr) +{ + nvFree(ptr); +} + +/* Import functions required by nv_assert */ + +#if defined(DEBUG) + +void nvDebugAssert(const char *expString, const char *filenameString, + const char *funcString, const unsigned int lineNumber) +{ + nvEvoLog(EVO_LOG_WARN, "NVKMS Assert @%s:%d:%s(): '%s'", + filenameString, lineNumber, funcString, expString); +} + +#endif /* DEBUG */ diff --git a/src/nvidia-modeset/src/nvkms-vblank-sem-control.c b/src/nvidia-modeset/src/nvkms-vblank-sem-control.c new file mode 100644 index 0000000..73577bb --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-vblank-sem-control.c @@ -0,0 +1,337 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-types.h" +#include "nvkms-utils.h" +#include "nvkms-surface.h" +#include "nvkms-modeset.h" +#include "nvkms-vblank-sem-control.h" + + +static void +VblankSemControlWrite( + NVVblankSemControlHeadEntry *pEntry, + NvU64 vblankCount, + NvBool bAccel) +{ + volatile struct NvKmsVblankSemControlDataOneHead *pData = + pEntry->pDataOneHead; + const NvU32 requestCounter = + bAccel ? pData->requestCounterAccel : pData->requestCounter; + + // + // Write the current vblankCount and GPU time, and release the + // semaphore. Be sure to release the semaphore last, so that consumers + // of these fields can use the semaphore to know when the other fields + // are ready. + // + pData->vblankCount = vblankCount; + + // + // Use gcc builtin to ensure the pData->semaphore write is ordered after the + // above. + // + __sync_synchronize(); + + pData->semaphore = requestCounter; + + // + // Record the request count and current vblankCount, for computation + // next time. + // + pEntry->previousRequestCounter = requestCounter; + pEntry->previousVblankCount = vblankCount; +} + +static void VblankSemControlCallback( + NVDispEvoRec *pDispEvo, + NVVBlankCallbackPtr pCallbackData) +{ + NVDispVblankApiHeadState *pVblankApiHeadState = pCallbackData->pUserData; + const NvU64 vblankCount = pVblankApiHeadState->vblankCount; + NVVblankSemControlHeadEntry *pEntry; + + nvListForEachEntry( + pEntry, &pVblankApiHeadState->vblankSemControl.list, listEntry) { + + volatile struct NvKmsVblankSemControlDataOneHead *pData = + pEntry->pDataOneHead; + NvU32 flags, swapInterval; + + const NvU32 requestCounter = pData->requestCounter; + + // + // Use gcc builtin to ensure the pData->requestCounter read is ordered + // before the below. + // + __sync_synchronize(); + + /* + * If this entry does not have a new request, skip it. But, still + * update the vblankCount so that the client always has access to the + * current vblankCount. + */ + if (requestCounter == pEntry->previousRequestCounter) { + pData->vblankCount = vblankCount; + continue; + } + + flags = pData->flags; + + swapInterval = DRF_VAL(KMS, _VBLANK_SEM_CONTROL, _SWAP_INTERVAL, flags); + + /* + * If the requested swapInterval is not satisfied, skip this entry. + */ + if (swapInterval != 0) { + if (vblankCount < (pEntry->previousVblankCount + swapInterval)) { + continue; + } + } + + VblankSemControlWrite(pEntry, vblankCount, FALSE /* bAccel */); + } +} + +static NvBool EnableVblankSemControlOneHead( + NVDispEvoRec *pDispEvo, + NvU32 apiHead, + NVVblankSemControl *pVblankSemControl, + struct NvKmsVblankSemControlDataOneHead *pDataOneHead) +{ + NVDispVblankApiHeadState *pVblankApiHeadState = &pDispEvo->vblankApiHeadState[apiHead]; + NVVblankSemControlHeadEntry *pEntry; + const NvBool isFirstEntry = + nvListIsEmpty(&pVblankApiHeadState->vblankSemControl.list); + + pEntry = &pVblankSemControl->headEntry[apiHead]; + + pEntry->pDataOneHead = pDataOneHead; + pEntry->previousRequestCounter = 0; + pEntry->previousVblankCount = pVblankApiHeadState->vblankCount; + + // + // If this is the first enabled vblank sem control on head, add a vblank + // callback. Note we specify addToFront=true, so that this callback is + // sequenced before any NotifyVblank callbacks (those use addToFront=false). + // + if (isFirstEntry) { + pVblankApiHeadState->vblankSemControl.pCallbackPtr = + nvApiHeadRegisterVBlankCallback(pDispEvo, + apiHead, + VblankSemControlCallback, + pVblankApiHeadState, + 0 /* listIndex */); + if (pVblankApiHeadState->vblankSemControl.pCallbackPtr == NULL) { + nvkms_memset(pEntry, 0, sizeof(*pEntry)); + return FALSE; + } + } + + nvListAdd(&pEntry->listEntry, &pVblankApiHeadState->vblankSemControl.list); + + return TRUE; +} + +static void DisableVblankSemControlOneHead( + NVDispEvoRec *pDispEvo, + NvU32 apiHead, + NVVblankSemControlHeadEntry *pEntry) +{ + NVDispVblankApiHeadState *pVblankApiHeadState = &pDispEvo->vblankApiHeadState[apiHead]; + + // + // Accelerate any pending semaphores before disabling the vblank sem control. + // + VblankSemControlWrite( + pEntry, pVblankApiHeadState->vblankCount, TRUE /* bAccel */); + + nvListDel(&pEntry->listEntry); + + // + // If that was the last enabled vblank sem control on head, delete the + // vblank callback. + // + if (nvListIsEmpty(&pVblankApiHeadState->vblankSemControl.list)) { + nvApiHeadUnregisterVBlankCallback( + pDispEvo, pVblankApiHeadState->vblankSemControl.pCallbackPtr); + pVblankApiHeadState->vblankSemControl.pCallbackPtr = NULL; + } + + nvkms_memset(pEntry, 0, sizeof(*pEntry)); +} + +static NvBool EnableVblankSemControlValidate( + NVDevEvoRec *pDevEvo, + NVDispEvoRec *pDispEvo, + NVSurfaceEvoRec *pSurfaceEvo, + NvU64 surfaceOffset) +{ + if (!pDevEvo->supportsVblankSemControl) { + return FALSE; + } + + /* + * We cannot enable VblankSemControl if the requested offset within the + * surface is too large. + */ + if (A_plus_B_greater_than_C_U64( + surfaceOffset, + sizeof(struct NvKmsVblankSemControlData), + pSurfaceEvo->planes[0].rmObjectSizeInBytes)) { + return FALSE; + } + + /* + * NvKmsVblankSemControlData must be at least 8-byte aligned, so that GPU + * semaphore releases can write to 8-byte fields within it with natural + * alignment. + */ + if ((surfaceOffset % sizeof(NvU64)) != 0) { + return FALSE; + } + + if (nvEvoSurfaceRefCntsTooLarge(pSurfaceEvo)) { + return FALSE; + } + + return TRUE; +} + +static void DisableVblankSemControl( + NVDispEvoPtr pDispEvo, + NVVblankSemControl *pVblankSemControl) +{ + NvU32 apiHead; + + for (apiHead = 0; apiHead < ARRAY_LEN(pVblankSemControl->headEntry); apiHead++) { + NVVblankSemControlHeadEntry *pEntry = + &pVblankSemControl->headEntry[apiHead]; + DisableVblankSemControlOneHead(pDispEvo, apiHead, pEntry); + } +} + +NvBool nvEvoDisableVblankSemControl( + NVDevEvoRec *pDevEvo, + NVVblankSemControl *pVblankSemControl) +{ + NVDispEvoPtr pDispEvo = pDevEvo->pDispEvo[pVblankSemControl->dispIndex]; + + if (!pDevEvo->supportsVblankSemControl) { + return FALSE; + } + + DisableVblankSemControl(pDispEvo, pVblankSemControl); + + nvEvoDecrementSurfaceRefCnts(pDevEvo, pVblankSemControl->pSurfaceEvo); + nvFree(pVblankSemControl); + return TRUE; +} + +NVVblankSemControl *nvEvoEnableVblankSemControl( + NVDevEvoRec *pDevEvo, + NVDispEvoRec *pDispEvo, + NVSurfaceEvoRec *pSurfaceEvo, + NvU64 surfaceOffset) +{ + struct NvKmsVblankSemControlData *pData; + NVVblankSemControl *pVblankSemControl; + NvU32 apiHead; + + if (!EnableVblankSemControlValidate(pDevEvo, pDispEvo, + pSurfaceEvo, surfaceOffset)) { + return NULL; + } + + /* + * Lazily map the surface; note we'll just leave the surface mapped after + * this point. + */ + if (pSurfaceEvo->cpuAddress[0] == NULL) { + if (!nvEvoCpuMapSurface(pDevEvo, pSurfaceEvo)) { + return NULL; + } + } + + pData = (struct NvKmsVblankSemControlData *) + (((NvU8 *) pSurfaceEvo->cpuAddress[0]) + surfaceOffset); + + pVblankSemControl = nvCalloc(1, sizeof(*pVblankSemControl)); + + if (pVblankSemControl == NULL) { + return NULL; + } + + pVblankSemControl->dispIndex = pDispEvo->displayOwner; + pVblankSemControl->pSurfaceEvo = pSurfaceEvo; + + for (apiHead = 0; apiHead < ARRAY_LEN(pVblankSemControl->headEntry); apiHead++) { + if (!EnableVblankSemControlOneHead(pDispEvo, + apiHead, + pVblankSemControl, + &pData->head[apiHead])) { + /* + * EnableVblankSemControlOneHead() failed for one head, but previous + * heads may have succeeded. Unroll by disabling vblank_sem_control + * for all heads where the pVblankSemControl is enabled. + */ + DisableVblankSemControl(pDispEvo, pVblankSemControl); + nvFree(pVblankSemControl); + return NULL; + } + } + + nvEvoIncrementSurfaceRefCnts(pSurfaceEvo); + + return pVblankSemControl; +} + +NvBool nvEvoAccelVblankSemControls( + NVDevEvoPtr pDevEvo, + NVDispEvoRec *pDispEvo, + NvU32 apiHeadMask) +{ + NvU32 apiHead; + + if (!pDevEvo->supportsVblankSemControl) { + return FALSE; + } + + FOR_ALL_HEADS(apiHead, apiHeadMask) { + + NVDispVblankApiHeadState *pVblankApiHeadState = + &pDispEvo->vblankApiHeadState[apiHead]; + NVVblankSemControlHeadEntry *pEntry; + + nvListForEachEntry( + pEntry, &pVblankApiHeadState->vblankSemControl.list, listEntry) { + + VblankSemControlWrite( + pEntry, + pVblankApiHeadState->vblankCount, + TRUE /* bAccel */); + } + } + + return TRUE; +} diff --git a/src/nvidia-modeset/src/nvkms-vrr.c b/src/nvidia-modeset/src/nvkms-vrr.c new file mode 100644 index 0000000..4c8e26a --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-vrr.c @@ -0,0 +1,1112 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-dma.h" +#include "nvkms-evo.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "nvkms-vrr.h" +#include "dp/nvdp-connector-event-sink.h" +#include "nvkms-hdmi.h" +#include "nvkms-dpy.h" + +#include + + +/*! + * This file contains routines for handling Variable Refresh Rate (VRR) display + * mode, also known as G-SYNC (not to be confused with the feature formerly + * known as G-SYNC, which is now called Quadro Sync). + * + * VRR in NVKMS is handled in several phases: + * + * 1. During modeset, if NvKmsSetModeRequest::allowVrr is true and VRR-capable + * dpys are present, VRR is "enabled". This means that + * + * a. VRR is disabled, if it was enabled before. + * b. The raster timings are adjusted by extending the back porch by 2 + * lines. This signals to the monitor that it should enter G-SYNC mode. + * c. The mode is set. + * d. (EVO only) The RM VRR state machine is initialized, but left in + * "suspended" mode. + * e. Raster lock and frame lock are disabled. + * + * pDevEvo->vrr.enabled indicates whether VRR was enabled successfully at + * modeset time. + * + * 2. At flip time, for each apiHead, if pHeadState[apiHead].flip.allowVrr is true, + * VRR is "activated". + * + * a. Stall lock is enabled. + * b. (NVDisplay only) The RG is switched from continuous mode to one-shot + * mode. + * c. (EVO only) RM's VRR state machine is enabled. + * d. (EVO only) The NVKMS client is told to release a special frame ready + * semaphore which tells RM to unstall the head. + * e. (NVDisplay only) The window channel flip is submitted with + * NVC37E_UPDATE_RELEASE_ELV_TRUE to trigger an unstall when the frame is + * ready. + * + * pApiHeadState->vrr.active indicates whether VRR was activated successfully + * on a particular apiHead at flip time. + * + * 3. Also at flip time, for each apiHead, if pHeadState[apiHead].flip.allowVrr is + * false, VRR is "deactivated". + * + * a. Stall lock is disabled. + * b. (NVDisplay only) the RG is switched from one-shot mode to continuous + * mode. + * c. (EVO only) RM's VRR state machine is suspended. + */ + +static NvBool SetVrrActivePriv(NVDevEvoPtr pDevEvo, + const NvU32 applyAllowVrrApiHeadMasks[NVKMS_MAX_SUBDEVICES], + const NvU32 vrrActiveApiHeadMasks[NVKMS_MAX_SUBDEVICES]); + +static void ConfigVrrPstateSwitch(NVDispEvoPtr pDispEvo, + NvBool vrrEnabled, + NvBool vrrState, + NvBool vrrDirty, + NvU32 head); + + +/*! + * Allocate the VRR semaphore surface. + * + * Only one array of VRR semaphores is needed per "head group", which for our + * purposes means a pDevEvo. This array is allocated when the device is + * initialized and kept around for the lifetime of the pDevEvo. + */ +void nvAllocVrrEvo(NVDevEvoPtr pDevEvo) +{ + NvU32 handle; + NvU64 size = NVKMS_VRR_SEMAPHORE_SURFACE_SIZE; + + /* On GPUs that support the HEAD_SET_DISPLAY_RATE method (nvdisplay), we + * don't need a VRR semaphore surface. */ + if (pDevEvo->hal->caps.supportsDisplayRate) { + return; + } + + handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (nvRmAllocSysmem(pDevEvo, handle, NULL, &pDevEvo->vrr.pSemaphores, + size, NVKMS_MEMORY_NISO)) { + pDevEvo->vrr.semaphoreHandle = handle; + } else { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate G-SYNC semaphore memory"); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, handle); + } +} + +void nvFreeVrrEvo(NVDevEvoPtr pDevEvo) +{ + if (pDevEvo->vrr.semaphoreHandle != 0) { + if (pDevEvo->vrr.pSemaphores != NULL) { + nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDevEvo->vrr.semaphoreHandle, + pDevEvo->vrr.pSemaphores, + 0); + pDevEvo->vrr.pSemaphores = NULL; + } + nvRmApiFree(nvEvoGlobal.clientHandle, pDevEvo->deviceHandle, + pDevEvo->vrr.semaphoreHandle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDevEvo->vrr.semaphoreHandle); + pDevEvo->vrr.semaphoreHandle = 0; + } +} + +NvBool nvExportVrrSemaphoreSurface(const NVDevEvoRec *pDevEvo, int fd) +{ + // Export the memory as an FD. + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS exportParams = { }; + const NvU32 hMemory = pDevEvo->vrr.semaphoreHandle; + NvU32 status; + + if (hMemory == 0) { + return FALSE; + } + + exportParams.fd = fd; + exportParams.object.type = NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM; + exportParams.object.data.rmObject.hDevice = pDevEvo->deviceHandle; + exportParams.object.data.rmObject.hObject = hMemory; + + status = nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECT_TO_FD, + &exportParams, sizeof(exportParams)); + + return status == NVOS_STATUS_SUCCESS; +} + +/*! + * Return TRUE dpy support G-SYNC. + */ +static NvBool DpyIsGsync(const NVDpyEvoRec *pDpyEvo) +{ + return nvIsGsyncDpyVrrType(pDpyEvo->vrr.type); +} + +static NvBool AnyEnabledAdaptiveSyncDpys(const NVDevEvoRec *pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + + if (nvIsAdaptiveSyncDpyVrrType(pHeadState->timings.vrr.type)) { + return TRUE; + } + } + } + + return FALSE; +} + +static NvBool AnyActiveVrrHeads(const NVDevEvoRec *pDevEvo) +{ + NvU32 sd, apiHead; + NVDispEvoPtr pDispEvo; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + pDispEvo = pDevEvo->gpus[sd].pDispEvo; + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + if (!nvApiHeadIsActive(pDispEvo, apiHead)) { + continue; + } + + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + + if (!pApiHeadState->vrr.active) { + continue; + } + + return NV_TRUE; + } + } + return NV_FALSE; +} + +static NvBool DpyAllowsAdaptiveSync( + const NVDpyEvoRec *pDpyEvo, + const enum NvKmsAllowAdaptiveSync allowAdaptiveSync, + const NvModeTimings *pTimings) +{ + /* + * HDMI VRR and HDMI 3D both use the vendor specific infoframe in HW, + * so disallow HDMI VRR when attempting to set an HDMI 3D mode. + */ + + if (pTimings->hdmi3D) { + return FALSE; + } + + return ((allowAdaptiveSync == + NVKMS_ALLOW_ADAPTIVE_SYNC_DEFAULTLISTED_ONLY) && + (pDpyEvo->vrr.type == + NVKMS_DPY_VRR_TYPE_ADAPTIVE_SYNC_DEFAULTLISTED)) || + ((allowAdaptiveSync == NVKMS_ALLOW_ADAPTIVE_SYNC_ALL) && + nvDpyIsAdaptiveSync(pDpyEvo)); +} + +NvBool nvDispSupportsVrr(const NVDispEvoRec *pDispEvo) +{ + // Don't allow VRR if a framelock device is present. + // (In other words, don't allow G-SYNC with Quadro Sync). + return !pDispEvo->pFrameLockEvo; +} + +enum NvKmsDpyVRRType +nvGetAllowedDpyVrrType(const NVDpyEvoRec *pDpyEvo, + const NvModeTimings *pTimings, + enum NvKmsStereoMode stereoMode, + const NvBool allowGsync, + const enum NvKmsAllowAdaptiveSync allowAdaptiveSync) +{ + /* + * Mark these mode timings as indicating a VRR mode, even if the timings + * don't need to be adjusted; this is used to distinguish between VRR and + * non-VRR heads elsewhere. + */ + + if ((stereoMode == NVKMS_STEREO_DISABLED) && + ((allowGsync && DpyIsGsync(pDpyEvo)) || + DpyAllowsAdaptiveSync(pDpyEvo, allowAdaptiveSync, pTimings))) { + return pDpyEvo->vrr.type; + } + + return NVKMS_DPY_VRR_TYPE_NONE; +} + +static NvBool GetEdidTimeoutMicroseconds( + const NVDpyEvoRec *pDpyEvo, + const NVHwModeTimingsEvo *pTimings, + NvU32 *pEdidTimeoutMicroseconds) +{ + const NvU32 rr10kHz = nvGetRefreshRate10kHz(pTimings); + const NVParsedEdidEvoRec *pParsedEdid = &pDpyEvo->parsedEdid; + const NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo = NULL; + const NvU32 nominalRefreshRateHz = rr10kHz / 10000; // XXX round? + NVT_PROTOCOL sinkProtocol = NVT_PROTOCOL_UNKNOWN; + NvU32 fmin; + + /* + At this point, we have verified that we have a valid display, so + nominalRefreshRateHz should be != 0. Assert so Coverity doesn't complain + about potential divide by 0 later in the function. + */ + nvAssert(nominalRefreshRateHz != 0); + + if (!pParsedEdid->valid) { + return FALSE; + } + + // XXX not sufficient; see what DD does in changelist 34157172 + if (nvDpyUsesDPLib(pDpyEvo)) { + sinkProtocol = NVT_PROTOCOL_DP; + } else if (nvDpyIsHdmiEvo(pDpyEvo)) { + sinkProtocol = NVT_PROTOCOL_HDMI; + } + + fmin = NvTiming_GetVrrFmin(&pParsedEdid->info, + pDisplayIdInfo, + nominalRefreshRateHz, + sinkProtocol); + + if (fmin == 0) { + if (pDpyEvo->internal && pDpyEvo->pDispEvo->vrr.hasPlatformCookie) { + + /* + * An internal notebook VRR panel must have a non-zero fmin. The + * recommendation from hardware is to use a default of fmin = + * rr/2.4. So, compute timeoutUsec as: + * + * timeoutUsec = 10^6 / fmin + * = 10^6 / (rr/2.4) + * = 10^6 * (2.4/rr) + * = 10^5 * 24 / rr + */ + *pEdidTimeoutMicroseconds = 2400000 / nominalRefreshRateHz; + return TRUE; + } + + if (nvIsGsyncDpyVrrType(pDpyEvo->vrr.type)) { + /* GSYNC can have fmin==0; i.e., the panel is self-refreshing. */ + *pEdidTimeoutMicroseconds = 0; + return TRUE; + } + + /* Otherwise, VRR is not possible. */ + return FALSE; + } + + *pEdidTimeoutMicroseconds = 1000000 / fmin; + + return TRUE; +} + +/*! Adjust mode timings as necessary for VRR. */ +void nvAdjustHwModeTimingsForVrrEvo(const NVDpyEvoRec *pDpyEvo, + const enum NvKmsDpyVRRType vrrType, + const NvU32 vrrOverrideMinRefreshRate, + NVHwModeTimingsEvoPtr pTimings) +{ + NvU32 timeoutMicroseconds; + NvU32 edidTimeoutMicroseconds; + + if (vrrType == NVKMS_DPY_VRR_TYPE_NONE) { + return; + } + + if (!GetEdidTimeoutMicroseconds(pDpyEvo, + pTimings, + &edidTimeoutMicroseconds)) { + return; + } + + // Allow overriding the EDID min refresh rate on Adaptive-Sync + // displays. + if (nvIsAdaptiveSyncDpyVrrType(vrrType) && vrrOverrideMinRefreshRate) { + NvU32 minMinRefreshRate, maxMinRefreshRate; + NvU32 clampedMinRefreshRate; + + nvGetDpyMinRefreshRateValidValues(pTimings, + vrrType, + edidTimeoutMicroseconds, + &minMinRefreshRate, + &maxMinRefreshRate); + + clampedMinRefreshRate = + NV_MAX(vrrOverrideMinRefreshRate, minMinRefreshRate); + + clampedMinRefreshRate = + NV_MIN(clampedMinRefreshRate, maxMinRefreshRate); + + timeoutMicroseconds = 1000000 / clampedMinRefreshRate; + } else { + timeoutMicroseconds = edidTimeoutMicroseconds; + } + + // Disallow VRR if the refresh rate is less than 110% of the VRR minimum + // refresh rate. + if (timeoutMicroseconds > 0 && + nvGetRefreshRate10kHz(pTimings) < + (((NvU64) 1000000 * 11000) / timeoutMicroseconds)) { + return; + } + + /* + * On G-SYNC panels, the back porch extension is used to indicate to + * the monitor that VRR is enabled. It is not necessary on + * Adaptive-Sync displays. + */ + if (nvIsGsyncDpyVrrType(vrrType)) { + pTimings->rasterSize.y += 2; + pTimings->rasterBlankEnd.y += 2; + pTimings->rasterBlankStart.y += 2; + } + + pTimings->vrr.timeoutMicroseconds = timeoutMicroseconds; + pTimings->vrr.type = vrrType; +} + +static void TellRMAboutVrrHead(NVDispEvoPtr pDispEvo, + NVDispHeadStateEvoRec *pHeadState, + NvBool vrrPossible) +{ + if (pHeadState->activeRmId != 0) { + NV0073_CTRL_SYSTEM_VRR_DISPLAY_INFO_PARAMS params = { }; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = pHeadState->activeRmId; + params.bAddition = vrrPossible; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDispEvo->pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_VRR_DISPLAY_INFO, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_WARN, + "NV0073_CTRL_CMD_SYSTEM_VRR_DISPLAY_INFO failed"); + } + } +} + +static void RmDisableVrr(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 head, dispIndex; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + for (head = 0; head < pDevEvo->numHeads; head++) { + ConfigVrrPstateSwitch(pDispEvo, FALSE /* vrrEnabled */, + FALSE /* vrrState */, + TRUE /* vrrDirty */, + head); + } + } + nvAssert(pDevEvo->hal->caps.supportsDisplayRate); +} + +void nvDisableVrr(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 head, dispIndex; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + for (head = 0; head < pDevEvo->numHeads; head++) { + NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + + TellRMAboutVrrHead(pDispEvo, pHeadState, FALSE); + } + } + + if (!pDevEvo->vrr.enabled) { + return; + } + + // set vrr on all apiHeads to inactive + NvU32 fullApiHeadMasks[NVKMS_MAX_SUBDEVICES]; + NvU32 emptyApiHeadMasks[NVKMS_MAX_SUBDEVICES]; + nvkms_memset(fullApiHeadMasks, 0xFF, sizeof(fullApiHeadMasks)); + nvkms_memset(emptyApiHeadMasks, 0, sizeof(emptyApiHeadMasks)); + + SetVrrActivePriv(pDevEvo, fullApiHeadMasks, emptyApiHeadMasks); + RmDisableVrr(pDevEvo); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + for (head = 0; head < pDevEvo->numHeads; head++) { + NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + + if ((pHeadState->pConnectorEvo != NULL) && + nvIsAdaptiveSyncDpyVrrType(pHeadState->timings.vrr.type)) { + if (nvConnectorUsesDPLib(pHeadState->pConnectorEvo)) { + nvDPLibSetAdaptiveSync(pDispEvo, head, FALSE); + } else { + nvHdmiSetVRR(pDispEvo, head, FALSE); + } + } + } + } + + pDevEvo->vrr.enabled = FALSE; + nvAssert(!AnyActiveVrrHeads(pDevEvo)); +} + +static NvBool AnyEnabledGsyncDpys(const NVDevEvoRec *pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + + if (nvIsGsyncDpyVrrType(pHeadState->timings.vrr.type)) { + return TRUE; + } + } + } + + return FALSE; +} + +static NvBool RmEnableVrr(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 dispIndex, head; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + for (head = 0; head < pDevEvo->numHeads; head++) { + ConfigVrrPstateSwitch(pDispEvo, TRUE /* vrrEnabled */, + FALSE /* vrrState */, + TRUE/* vrrDirty */, + head); + } + } + nvAssert(pDevEvo->hal->caps.supportsDisplayRate); + return TRUE; +} + +void nvGetDpyMinRefreshRateValidValues( + const NVHwModeTimingsEvo *pTimings, + const enum NvKmsDpyVRRType vrrType, + const NvU32 edidTimeoutMicroseconds, + NvU32 *minMinRefreshRate, + NvU32 *maxMinRefreshRate) +{ + NvU32 edidMinRefreshRate; + + if (nvIsAdaptiveSyncDpyVrrType(vrrType)) { + /* + * Adaptive-Sync monitors must always define a nonzero minimum refresh + * rate in the EDID, and a modeset may override this within a range + * of NVKMS_VRR_MIN_REFRESH_RATE_MAX_VARIANCE, as long as the minimum + * is not below 1hz and the maximum does not exceed the current + * refresh rate. + */ + NvU32 minTimeoutMicroseconds = + axb_div_c(pTimings->rasterSize.y * 1000, + pTimings->rasterSize.x, pTimings->pixelClock); + NvU32 maxRefreshRate = 1000000 / minTimeoutMicroseconds; + + nvAssert(edidTimeoutMicroseconds != 0); + + edidMinRefreshRate = + 1000000 / edidTimeoutMicroseconds; + + if (edidMinRefreshRate <= NVKMS_VRR_MIN_REFRESH_RATE_MAX_VARIANCE) { + *minMinRefreshRate = 1; + } else { + *minMinRefreshRate = edidMinRefreshRate - + NVKMS_VRR_MIN_REFRESH_RATE_MAX_VARIANCE; + } + + *maxMinRefreshRate = NV_MIN(maxRefreshRate, + edidMinRefreshRate + NVKMS_VRR_MIN_REFRESH_RATE_MAX_VARIANCE); + } else { + /* + * Non-Adaptive-Sync panels may not override the EDID-provided minimum + * refresh rate, which will be 1hz for most G-SYNC panels or 0hz for + * true self-refresh panels. + */ + edidMinRefreshRate = edidTimeoutMicroseconds ? + 1000000 / edidTimeoutMicroseconds : 0; + *minMinRefreshRate = *maxMinRefreshRate = edidMinRefreshRate; + } +} + +/*! + * Modify the VRR state to enable (but not activate) VRR at modeset time. + * + * This prepares VRR displays for VRR (through a DP MSA override for + * Adaptive-Sync and a backporch extension for G-SYNC) and sets up the RM + * VRR state machine (for pre-nvdisplay) but does not actually start VRR + * flipping until nvSetVrrActive() is called at flip time. + * + * \param[in] pDevEvo The device that is enabling VRR. + */ +void nvEnableVrr(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 head, dispIndex; + + nvAssert(!pDevEvo->vrr.enabled); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + for (head = 0; head < pDevEvo->numHeads; head++) { + NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + + if ((pHeadState->pConnectorEvo != NULL) && + nvIsAdaptiveSyncDpyVrrType(pHeadState->timings.vrr.type)) { + if (nvConnectorUsesDPLib(pHeadState->pConnectorEvo)) { + nvDPLibSetAdaptiveSync(pDispEvo, head, TRUE); + } else { + nvHdmiSetVRR(pDispEvo, head, TRUE); + } + } + } + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + for (head = 0; head < pDevEvo->numHeads; head++) { + NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + + // To allow VRR-based mclk switching, RM needs to know which heads + // are driving VRR displays capable of extending vblank. This + // includes all G-SYNC displays (regardless of whether the modeset + // indicates that G-SYNC is allowed) but only Adaptive-Sync + // displays which put the display into Adaptive-Sync mode by calling + // nvDPLibSetAdaptiveSync above. + TellRMAboutVrrHead(pDispEvo, + pHeadState, + (pHeadState->timings.vrr.type != + NVKMS_DPY_VRR_TYPE_NONE)); + } + } + + if (!(AnyEnabledGsyncDpys(pDevEvo) || + AnyEnabledAdaptiveSyncDpys(pDevEvo))) { + return; + } + + if (!RmEnableVrr(pDevEvo)) { + return; + } + + pDevEvo->vrr.enabled = TRUE; +} + +static void ClearElvBlock(NVDispEvoPtr pDispEvo, NvU32 head) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NV0073_CTRL_SYSTEM_CLEAR_ELV_BLOCK_PARAMS params = { }; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = pHeadState->activeRmId; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDispEvo->pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_CLEAR_ELV_BLOCK, + ¶ms, sizeof(params)) + != NVOS_STATUS_SUCCESS) { + nvAssert(!"CLEAR_ELV_BLOCK failed"); + } +} + +static void ConfigVrrPstateSwitch(NVDispEvoPtr pDispEvo, NvBool vrrEnabled, + NvBool vrrState, NvBool vrrDirty, NvU32 head) +{ + NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS params = { }; + NvU32 ret; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + + if (nvkms_disable_vrr_memclk_switch() || + (pTimings->vrr.type == NVKMS_DPY_VRR_TYPE_NONE)) { + return; + } + + /* + * An inactive head should always have pTimings->vrr.type == + * NVKMS_DPY_VRR_TYPE_NONE and therefore return early above. + */ + nvAssert(nvHeadIsActive(pDispEvo, head)); + + params.displayId = pHeadState->activeRmId; + params.bVrrEnabled = vrrEnabled; + params.bVrrState = vrrState; + params.bVrrDirty = vrrDirty; + + if (params.bVrrDirty) { + NvU64 frameTimeUs = axb_div_c(pTimings->rasterSize.y * 1000ULL, + pTimings->rasterSize.x, pTimings->pixelClock); + NvU64 timePerLineNs = (frameTimeUs * 1000ULL) / pTimings->rasterSize.y; + + NvU64 maxFrameTimeUs = pTimings->vrr.timeoutMicroseconds; + NvU64 maxVblankExtTimeNs = (maxFrameTimeUs - frameTimeUs) * 1000ULL; + + params.maxVblankExtension = maxVblankExtTimeNs / timePerLineNs; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDispEvo->pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_CONFIG_VRR_PSTATE_SWITCH, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_WARN, + "NV0073_CTRL_CMD_SYSTEM_CONFIG_VRR_PSTATE_SWITCH failed"); + } +} + +static void SetStallLockOneDisp(NVDispEvoPtr pDispEvo, NvU32 applyAllowVrrApiHeadMask, + NvU32 enableApiHeadMask) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvBool enableVrrOnHead[NVKMS_MAX_HEADS_PER_DISP]; + NVEvoUpdateState updateState = { }; + NvU32 apiHead, head; + + for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) { + if (!(applyAllowVrrApiHeadMask & (1 << apiHead))) { + continue; + } + NvBool enable = enableApiHeadMask & (1 << apiHead); + if (!enable) { + continue; + } + FOR_EACH_EVO_HW_HEAD_IN_MASK(pDispEvo->apiHeadState[apiHead].hwHeadsMask, head) { + // ignores inactive heads + ConfigVrrPstateSwitch(pDispEvo, TRUE /* vrrEnabled */, + TRUE /* vrrState */, + FALSE/* vrrDirty */, + head); + } + } + + nvPushEvoSubDevMaskDisp(pDispEvo); + + // Make sure any pending updates that we didn't wait for previously have + // completed. + nvRMSyncEvoChannel(pDevEvo, pDevEvo->core, __LINE__); + + for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) { + if (!(applyAllowVrrApiHeadMask & (1 << apiHead))) { + continue; + } + + NvBool enable = enableApiHeadMask & (1 << apiHead); + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pDispEvo->apiHeadState[apiHead].hwHeadsMask, head) { + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NvU32 timeout = pHeadState->timings.vrr.timeoutMicroseconds; + + enableVrrOnHead[head] = ((pHeadState->timings.vrr.type != + NVKMS_DPY_VRR_TYPE_NONE) && enable); + + // ignores inactive heads + nvEvoArmLightweightSupervisor(pDispEvo, head, + enableVrrOnHead[head], TRUE); + if (!enableVrrOnHead[head]) { + ClearElvBlock(pDispEvo, head); + } + pDevEvo->hal->SetStallLock(pDispEvo, head, + enableVrrOnHead[head], + &updateState); + + if (pDevEvo->hal->caps.supportsDisplayRate) { + pDevEvo->hal->SetDisplayRate(pDispEvo, head, + enableVrrOnHead[head], + &updateState, + timeout); + } + } + } + + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, + TRUE /* releaseElv */); + + + for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) { + if (!(applyAllowVrrApiHeadMask & (1 << apiHead))) { + continue; + } + FOR_EACH_EVO_HW_HEAD_IN_MASK(pDispEvo->apiHeadState[apiHead].hwHeadsMask, head) { + nvEvoArmLightweightSupervisor(pDispEvo, head, + enableVrrOnHead[head], FALSE); + } + } + + nvPopEvoSubDevMask(pDevEvo); + + for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) { + if (!(applyAllowVrrApiHeadMask & (1 << apiHead))) { + continue; + } + NvBool enable = enableApiHeadMask & (1 << apiHead); + if (enable) { + continue; + } + FOR_EACH_EVO_HW_HEAD_IN_MASK(pDispEvo->apiHeadState[apiHead].hwHeadsMask, head) { + ConfigVrrPstateSwitch(pDispEvo, TRUE /* vrrEnabled */, + FALSE /* vrrState */, + FALSE/* vrrDirty */, + head); + } + } +} + +static void SetStallLockOneDev(NVDevEvoPtr pDevEvo, + const NvU32 applyAllowVrrApiHeadMasks[NVKMS_MAX_SUBDEVICES], + const NvU32 enableApiHeadMasks[NVKMS_MAX_SUBDEVICES]) +{ + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + SetStallLockOneDisp(pDispEvo, + applyAllowVrrApiHeadMasks[dispIndex], enableApiHeadMasks[dispIndex]); + } +} + +/*! + * Modify the VRR state to activate or deactivate VRR on the heads of a pDevEvo. + */ +static NvBool SetVrrActivePriv(NVDevEvoPtr pDevEvo, + const NvU32 applyAllowVrrApiHeadMasks[NVKMS_MAX_SUBDEVICES], + const NvU32 vrrActiveApiHeadMasks[NVKMS_MAX_SUBDEVICES]) +{ + NvU32 sd, apiHead; + NvU32 currVrrActiveApiHeadMasks[NVKMS_MAX_SUBDEVICES]; + NvBool isUpdate; + NVDispEvoPtr pDispEvo; + + nvkms_memset(currVrrActiveApiHeadMasks, 0, sizeof(currVrrActiveApiHeadMasks)); + isUpdate = NV_FALSE; + + if (!pDevEvo->vrr.enabled) { + return NV_TRUE; + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + pDispEvo = pDevEvo->pDispEvo[sd]; + for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) { + if (pDispEvo->apiHeadState[apiHead].vrr.active) { + currVrrActiveApiHeadMasks[sd] |= (1 << apiHead); + } + } + } + + // check if we are asking to update the existing activeMasks + for (sd = 0; sd < pDevEvo->numSubDevices; sd++){ + for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) { + if (!(applyAllowVrrApiHeadMasks[sd] & (1 << apiHead))) { + continue; + } + if ((vrrActiveApiHeadMasks[sd] & (1 << apiHead)) != + (currVrrActiveApiHeadMasks[sd] & (1 << apiHead))) { + isUpdate = NV_TRUE; + break; + } + } + } + + if (!isUpdate) { + return NV_TRUE; + } + + SetStallLockOneDev(pDevEvo, applyAllowVrrApiHeadMasks, + vrrActiveApiHeadMasks); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + pDispEvo = pDevEvo->gpus[sd].pDispEvo; + for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) { + if (!(applyAllowVrrApiHeadMasks[sd] & (1 << apiHead))) { + continue; + } + pDispEvo->apiHeadState[apiHead].vrr.active = + (vrrActiveApiHeadMasks[sd] & (1 << apiHead)) > 0; + } + } + pDevEvo->vrr.flipCounter = 0; + return NV_TRUE; +} + +void nvSetVrrActive(NVDevEvoPtr pDevEvo, + const NvU32 applyAllowVrrApiHeadMasks[NVKMS_MAX_SUBDEVICES], + const NvU32 vrrActiveApiHeadMasks[NVKMS_MAX_SUBDEVICES]) +{ + if (!SetVrrActivePriv(pDevEvo, applyAllowVrrApiHeadMasks, + vrrActiveApiHeadMasks)) { + nvDisableVrr(pDevEvo); + } +} + +/*! + * Override flip parameters for a head based on VRR state. + */ +void nvApplyVrrBaseFlipOverrides(const NVDispEvoRec *pDispEvo, NvU32 head, + const NVFlipChannelEvoHwState *pOld, + NVFlipChannelEvoHwState *pNew) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NVDispHeadStateEvoRec *pHeadState = NULL; + + if (!pDevEvo->vrr.enabled) { + return; + } + + if (head != NV_INVALID_HEAD) { + pHeadState = &pDispEvo->headState[head]; + nvAssert(pDevEvo->head[head].layer[NVKMS_MAIN_LAYER]->caps.vrrTearingFlips); + } + + // Tell RM the real requested tearing mode so that it can honor + // __GL_SYNC_TO_VBLANK. + pNew->vrrTearing = pNew->tearing; + + // If this head is driving non-VRR displays, force swap interval to be 0. + // RM will block the flips as necessary using the pre-update trap methods + // based on the vrrTearing flag above. + if (pHeadState != NULL && + (pHeadState->timings.vrr.type == NVKMS_DPY_VRR_TYPE_NONE)) { + pNew->tearing = TRUE; + pNew->minPresentInterval = 0; + } + + // If oneshot mode is in use, and the previous flip was non-tearing with + // nonzero MIN_PRESENT_INTERVAL, and the new flip will be tearing with + // MIN_PRESENT_INTERVAL 0, then force this first new flip to be + // non-tearing to WAR bug 2406398 which causes these transitional flips + // to stall for up to the display's minimum refresh rate. + if ((pHeadState != NULL) && + (pHeadState->timings.vrr.type != NVKMS_DPY_VRR_TYPE_NONE) && + !pOld->tearing && + (pOld->minPresentInterval != 0) && + pNew->tearing && + (pNew->minPresentInterval == 0)) { + pNew->tearing = FALSE; + } +} + +void nvCancelVrrFrameReleaseTimers(NVDevEvoPtr pDevEvo, + const NvU32 applyAllowVrrApiHeadMasks[NVKMS_MAX_SUBDEVICES]) +{ + NVDispEvoPtr pDispEvo; + NvU32 dispIndex, apiHead; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvBool pendingCursorMotionUnflipped = NV_FALSE; + NvU32 applyAllowVrrApiHeadMask = applyAllowVrrApiHeadMasks[dispIndex]; + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + if (!(applyAllowVrrApiHeadMask & (1 << apiHead))) { + // This apiHead is not currently flipping: + // Check if this apiHead is pending cursor motion; + // if so, we don't want to cancel the unstall timer yet, + // as the cursor on this apiHead could then freeze up + pendingCursorMotionUnflipped = (pendingCursorMotionUnflipped) || + (pDispEvo->apiHeadState[apiHead].vrr.pendingCursorMotion); + } else { + // clear pendingCursorMotion for all flipped apiHeads + pDispEvo->apiHeadState[apiHead].vrr.pendingCursorMotion = NV_FALSE; + } + } + + if (!pendingCursorMotionUnflipped) { + nvkms_free_timer(pDispEvo->vrr.unstallTimer); + pDispEvo->vrr.unstallTimer = NULL; + } + } +} + +/* Get active vrr type used by the flips. */ +enum NvKmsVrrFlipType nvGetActiveVrrType(const NVDevEvoRec *pDevEvo) +{ + /* + * If VRR is active, and any connected display is G-SYNC, then report that + * this flip was a G-SYNC flip, otherwise report it as an Adaptive-Sync + * flip. + * + * XXX NVKMS TODO: We could be smarter about reporting whether this flip + * exclusively changed surfaces on Adaptive-Sync or G-SYNC heads. + */ + if (AnyActiveVrrHeads(pDevEvo)) { + if (AnyEnabledGsyncDpys(pDevEvo)) { + return NV_KMS_VRR_FLIP_GSYNC; + } else { + return NV_KMS_VRR_FLIP_ADAPTIVE_SYNC; + } + } + + return NV_KMS_VRR_FLIP_NON_VRR; +} + +/*! + * Get the next VRR semaphore index to be released + * by the client, increments the counter and handles wrapping. + */ +NvS32 nvIncVrrSemaphoreIndex(NVDevEvoPtr pDevEvo, + const NvU32 applyAllowVrrApiHeadMasks[NVKMS_MAX_SUBDEVICES]) +{ + NvS32 vrrSemaphoreIndex = -1; + + // If there are pending unstall timers (e.g. triggered by cursor motion), + // cancel them now. The flip that was just requested will trigger an + // unstall. + // NOTE: This call will not cancel the frame release timer in + // the case where there is a vrr active head that is pending cursor motion + // and not currently flipping, since we need to wait for the timer for that head + nvCancelVrrFrameReleaseTimers(pDevEvo, applyAllowVrrApiHeadMasks); + + if (AnyActiveVrrHeads(pDevEvo) && !pDevEvo->hal->caps.supportsDisplayRate) { + vrrSemaphoreIndex = pDevEvo->vrr.flipCounter++; + if (pDevEvo->vrr.flipCounter >= NVKMS_VRR_SEMAPHORE_SURFACE_COUNT) { + pDevEvo->vrr.flipCounter = 0; + } + } + + return vrrSemaphoreIndex; +} + +static void +VrrUnstallNow(NVDispEvoPtr pDispEvo) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 apiHead, head; + + nvAssert(pDevEvo->hal->caps.supportsDisplayRate); + + for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) { + if (!pDispEvo->apiHeadState[apiHead].vrr.pendingCursorMotion) { + continue; + } + FOR_EACH_EVO_HW_HEAD_IN_MASK (pDispEvo->apiHeadState[apiHead].hwHeadsMask, head) { + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + pDevEvo->cursorHal->ReleaseElv(pDevEvo, pDispEvo->displayOwner, head); + } + pDispEvo->apiHeadState[apiHead].vrr.pendingCursorMotion = NV_FALSE; + } +} + +static void +VrrUnstallTimer(void *dataPtr, NvU32 dataU32) +{ + NVDispEvoPtr pDispEvo = dataPtr; + + VrrUnstallNow(pDispEvo); + pDispEvo->vrr.unstallTimer = NULL; +} + +/*! + * Schedule a timer to trigger a VRR unstall if no flip occurs soon. + * + * When VRR is active and something other than a flip (i.e. cursor motion) + * changes the screen, RM needs to be notified so that it can trigger a VRR + * unstall to present the new frame. However, if it does that immediately, then + * applications that flip in response to cursor motion will end up always + * flipping during the unstall, causing stutter. So instead, schedule a timeout + * for some time in the future in order to give the application some time to + * respond, but force a minimum refresh rate if it doesn't. + * + * On nvdisplay, this schedules an nvkms timer and uses a method to trigger an + * unstall. On EVO, it calls into RM to do something equivalent. + */ +void nvTriggerVrrUnstallMoveCursor(NVDispEvoPtr pDispEvo) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 timeoutMs = 33; // 30 fps + + NvU32 apiHead; + + if (!AnyActiveVrrHeads(pDevEvo)) { + return; + } + + { + if (!pDispEvo->vrr.unstallTimer) { + for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) { + if (pDispEvo->apiHeadState[apiHead].vrr.active) { + pDispEvo->apiHeadState[apiHead].vrr.pendingCursorMotion = NV_TRUE; + } + } + pDispEvo->vrr.unstallTimer = + nvkms_alloc_timer(VrrUnstallTimer, pDispEvo, 0, timeoutMs * 1000); + } + } +} + +/*! + * Trigger a VRR unstall in response to a cursor image change. + */ +void nvTriggerVrrUnstallSetCursorImage(NVDispEvoPtr pDispEvo, + NvBool elvReleased) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + if (AnyActiveVrrHeads(pDevEvo)) { + if (!elvReleased) { + // On nvdisplay, no unstall is necessary if the cursor image update + // path did a releaseElv=true Update. + // + // But, if elv was not released, then force an immediate unstall + // now. + VrrUnstallNow(pDispEvo); + } + } +} + +void nvVrrSignalSemaphore(NVDevEvoPtr pDevEvo, NvS32 vrrSemaphoreIndex) +{ + NvU32* pVrrSemaphores = (NvU32*)pDevEvo->vrr.pSemaphores; + + if (!pDevEvo->vrr.pSemaphores) { + return; + } + + if (vrrSemaphoreIndex < 0) { + return; + } + + if (vrrSemaphoreIndex >= NVKMS_VRR_SEMAPHORE_SURFACE_COUNT) { + return; + } + + pVrrSemaphores[vrrSemaphoreIndex] = 1; +} diff --git a/src/nvidia-modeset/src/nvkms.c b/src/nvidia-modeset/src/nvkms.c new file mode 100644 index 0000000..c0b5228 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms.c @@ -0,0 +1,6901 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms.h" +#include "nvkms-private.h" +#include "nvkms-api.h" + +#include "nvkms-types.h" +#include "nvkms-utils.h" +#include "nvkms-console-restore.h" +#include "nvkms-dpy.h" +#include "nvkms-dma.h" +#include "nvkms-evo.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "nvkms-modepool.h" +#include "nvkms-modeset.h" +#include "nvkms-attributes.h" +#include "nvkms-dpy-override.h" +#include "nvkms-framelock.h" +#include "nvkms-stereo.h" +#include "nvkms-surface.h" +#include "nvkms-3dvision.h" +#include "nvkms-ioctl.h" +#include "nvkms-vblank-sem-control.h" +#include "nvkms-headsurface.h" +#include "nvkms-headsurface-ioctl.h" +#include "nvkms-headsurface-swapgroup.h" +#include "nvkms-flip.h" /* nvFlipEvo */ +#include "nvkms-vrr.h" + +#include "dp/nvdp-connector.h" + +#include "nvUnixVersion.h" /* NV_VERSION_STRING */ +#include /* NV01_NULL_OBJECT/NV01_ROOT */ + +#include "nv_list.h" +#include "nv_smg.h" + + +/*! \file + * + * This source file implements the API of NVKMS, built around open, + * close, and ioctl file operations. + * + * An NvKmsPerOpen is stored "per-open"; all API handles are specific + * to a per-open instance. The NvKmsPerOpen is allocated during each + * nvKmsOpen() call, and freed during the corresponding nvKmsClose() + * call. + * + * An NvKmsPerOpenDev stores the API handles for the device and all + * the disps and connectors on the device. It is allocated during + * nvKmsIoctl(ALLOC_DEVICE), and freed during nvKmsIoctl(FREE_DEVICE). + */ + + +/* + * When the NVKMS device file is opened, the per-open structure could + * be used for one of several actions, denoted by its "type". The + * per-open type starts as Undefined. The per-open's first use + * defines its type. Once the type transitions from Undefined to + * anything, it can never transition to any other type. + */ +enum NvKmsPerOpenType { + /* + * The per-open is used for making ioctl calls to make requests of + * NVKMS. + */ + NvKmsPerOpenTypeIoctl, + + /* + * The per-open is used for granting access to a NVKMS registered + * surface. + */ + NvKmsPerOpenTypeGrantSurface, + + /* + * The per-open is used for granting permissions. + */ + NvKmsPerOpenTypeGrantPermissions, + + /* + * The per-open is used for granting access to a swap group + */ + NvKmsPerOpenTypeGrantSwapGroup, + + /* + * The per-open is used to unicast a specific event. + */ + NvKmsPerOpenTypeUnicastEvent, + + /* + * The per-open is currently undefined (this is the initial + * state). + */ + NvKmsPerOpenTypeUndefined, +}; + +enum NvKmsUnicastEventType { + /* Used by: + * NVKMS_IOCTL_JOIN_SWAP_GROUP */ + NvKmsUnicastEventTypeDeferredRequest, + + /* Used by: + * NVKMS_IOCTL_NOTIFY_VBLANK */ + NvKmsUnicastEventTypeVblankNotification, + + /* Undefined, this indicates the unicast fd is available for use. */ + NvKmsUnicastEventTypeUndefined, +}; + +struct NvKmsPerOpenConnector { + NVConnectorEvoPtr pConnectorEvo; + NvKmsConnectorHandle nvKmsApiHandle; +}; + +struct NvKmsPerOpenFrameLock { + NVFrameLockEvoPtr pFrameLockEvo; + int refCnt; + NvKmsFrameLockHandle nvKmsApiHandle; +}; + +struct NvKmsPerOpenDisp { + NVDispEvoPtr pDispEvo; + NvKmsDispHandle nvKmsApiHandle; + NvKmsFrameLockHandle frameLockHandle; + NVEvoApiHandlesRec connectorHandles; + struct NvKmsPerOpenConnector connector[NVKMS_MAX_CONNECTORS_PER_DISP]; + NVEvoApiHandlesRec vblankSyncObjectHandles[NVKMS_MAX_HEADS_PER_DISP]; + NVEvoApiHandlesRec vblankCallbackHandles[NVKMS_MAX_HEADS_PER_DISP]; + NVEvoApiHandlesRec vblankSemControlHandles; +}; + +struct NvKmsPerOpenDev { + NVDevEvoPtr pDevEvo; + NvKmsDeviceHandle nvKmsApiHandle; + NVEvoApiHandlesRec dispHandles; + NVEvoApiHandlesRec surfaceHandles; + struct NvKmsFlipPermissions flipPermissions; + struct NvKmsModesetPermissions modesetPermissions; + struct NvKmsPerOpenDisp disp[NVKMS_MAX_SUBDEVICES]; + NvBool isPrivileged; + NVEvoApiHandlesRec deferredRequestFifoHandles; + NVEvoApiHandlesRec swapGroupHandles; +}; + +struct NvKmsPerOpenEventListEntry { + NVListRec eventListEntry; + struct NvKmsEvent event; +}; + +struct NvKmsPerOpen { + nvkms_per_open_handle_t *pOpenKernel; + NvU32 pid; + enum NvKmsClientType clientType; + NVListRec perOpenListEntry; + NVListRec perOpenIoctlListEntry; + enum NvKmsPerOpenType type; + + union { + struct { + NVListRec eventList; + NvU32 eventInterestMask; + NVEvoApiHandlesRec devHandles; + NVEvoApiHandlesRec frameLockHandles; + } ioctl; + + struct { + NVSurfaceEvoPtr pSurfaceEvo; + } grantSurface; + + struct { + NVDevEvoPtr pDevEvo; + NVSwapGroupPtr pSwapGroup; + } grantSwapGroup; + + struct { + NVDevEvoPtr pDevEvo; + struct NvKmsPermissions permissions; + } grantPermissions; + + struct { + /* + * A unicast event NvKmsPerOpen is assigned to an object, so that + * that object can generate events on the unicast event. Store a + * pointer to that object, so that we can clear the pointer when the + * unicast event NvKmsPerOpen is closed. + */ + enum NvKmsUnicastEventType type; + union { + struct { + NVDeferredRequestFifoPtr pDeferredRequestFifo; + } deferred; + + struct { + NvKmsGenericHandle hCallback; + struct NvKmsPerOpenDisp *pOpenDisp; + NvU32 apiHead; + } vblankNotification; + } e; + } unicastEvent; + }; +}; + +static void AllocSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo); +static void FreeSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo); + +static void EnableAndSetupVblankSyncObjectForAllOpens(NVDevEvoRec *pDevEvo); +static void DisableAndCleanVblankSyncObjectForAllOpens(NVDevEvoRec *pDevEvo); + +static NVListRec perOpenList = NV_LIST_INIT(&perOpenList); +static NVListRec perOpenIoctlList = NV_LIST_INIT(&perOpenIoctlList); + +/*! + * Check if there is an NvKmsPerOpenDev on this NvKmsPerOpen that has + * the specified deviceId. + */ +static NvBool DeviceIdAlreadyPresent(struct NvKmsPerOpen *pOpen, struct NvKmsDeviceId deviceId) +{ + struct NvKmsPerOpenDev *pOpenDev; + NvKmsGenericHandle dev; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, dev) { + if (pOpenDev->pDevEvo->isSOCDisplay && + (deviceId.rmDeviceId == NVKMS_DEVICE_ID_TEGRA)) { + return TRUE; + } else if (pOpenDev->pDevEvo->deviceId.rmDeviceId == deviceId.rmDeviceId && + pOpenDev->pDevEvo->deviceId.migDevice == deviceId.migDevice) { + return TRUE; + } + } + + return FALSE; +} + + +/*! + * Get the NvKmsPerOpenDev described by NvKmsPerOpen + deviceHandle. + */ +static struct NvKmsPerOpenDev *GetPerOpenDev( + const struct NvKmsPerOpen *pOpen, + const NvKmsDeviceHandle deviceHandle) +{ + if (pOpen == NULL) { + return NULL; + } + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + return nvEvoGetPointerFromApiHandle(&pOpen->ioctl.devHandles, deviceHandle); +} + + +/*! + * Get the NvKmsPerOpenDev and NvKmsPerOpenDisp described by + * NvKmsPerOpen + deviceHandle + dispHandle. + */ +static NvBool GetPerOpenDevAndDisp( + const struct NvKmsPerOpen *pOpen, + const NvKmsDeviceHandle deviceHandle, + const NvKmsDispHandle dispHandle, + struct NvKmsPerOpenDev **ppOpenDev, + struct NvKmsPerOpenDisp **ppOpenDisp) +{ + struct NvKmsPerOpenDev *pOpenDev; + struct NvKmsPerOpenDisp *pOpenDisp; + + pOpenDev = GetPerOpenDev(pOpen, deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + pOpenDisp = nvEvoGetPointerFromApiHandle(&pOpenDev->dispHandles, + dispHandle); + + if (pOpenDisp == NULL) { + return FALSE; + } + + *ppOpenDev = pOpenDev; + *ppOpenDisp = pOpenDisp; + + return TRUE; +} + + +/*! + * Get the NvKmsPerOpenDisp described by NvKmsPerOpen + deviceHandle + + * dispHandle. + */ +static struct NvKmsPerOpenDisp *GetPerOpenDisp( + const struct NvKmsPerOpen *pOpen, + const NvKmsDeviceHandle deviceHandle, + const NvKmsDispHandle dispHandle) +{ + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, deviceHandle); + + if (pOpenDev == NULL) { + return NULL; + } + + return nvEvoGetPointerFromApiHandle(&pOpenDev->dispHandles, dispHandle); +} + + +/*! + * Get the NvKmsPerOpenConnector described by NvKmsPerOpen + + * deviceHandle + dispHandle + connectorHandle. + */ +static struct NvKmsPerOpenConnector *GetPerOpenConnector( + const struct NvKmsPerOpen *pOpen, + const NvKmsDeviceHandle deviceHandle, + const NvKmsDispHandle dispHandle, + const NvKmsConnectorHandle connectorHandle) +{ + struct NvKmsPerOpenDisp *pOpenDisp; + + pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle); + + if (pOpenDisp == NULL) { + return NULL; + } + + return nvEvoGetPointerFromApiHandle(&pOpenDisp->connectorHandles, + connectorHandle); +} + + +/*! + * Get the NVDpyEvoRec described by NvKmsPerOpen + deviceHandle + + * dispHandle + dpyId. + */ +static NVDpyEvoRec *GetPerOpenDpy( + const struct NvKmsPerOpen *pOpen, + const NvKmsDeviceHandle deviceHandle, + const NvKmsDispHandle dispHandle, + const NVDpyId dpyId) +{ + struct NvKmsPerOpenDisp *pOpenDisp; + + pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle); + + if (pOpenDisp == NULL) { + return NULL; + } + + return nvGetDpyEvoFromDispEvo(pOpenDisp->pDispEvo, dpyId); +} + + +/*! + * Get the NvKmsPerOpenFrameLock described by pOpen + frameLockHandle. + */ +static struct NvKmsPerOpenFrameLock *GetPerOpenFrameLock( + const struct NvKmsPerOpen *pOpen, + NvKmsFrameLockHandle frameLockHandle) +{ + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + return nvEvoGetPointerFromApiHandle(&pOpen->ioctl.frameLockHandles, + frameLockHandle); +} + + +/*! + * Free the NvKmsPerOpenFrameLock associated with this NvKmsPerOpenDisp. + * + * Multiple disps can be assigned to the same framelock object, so + * NvKmsPerOpenFrameLock is reference counted: the object is freed + * once all NvKmsPerOpenDisps remove their reference to it. + * + * \param[in,out] pOpen The per-open data, to which the + * NvKmsPerOpenFrameLock is assigned. + * \param[in,out] pOpenDisp The NvKmsPerOpenDisp whose corresponding + * NvKmsPerOpenFrameLock should be freed. + */ +static void FreePerOpenFrameLock(struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDisp *pOpenDisp) +{ + struct NvKmsPerOpenFrameLock *pOpenFrameLock; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + pOpenFrameLock = + nvEvoGetPointerFromApiHandle(&pOpen->ioctl.frameLockHandles, + pOpenDisp->frameLockHandle); + if (pOpenFrameLock == NULL) { + return; + } + + pOpenDisp->frameLockHandle = 0; + + pOpenFrameLock->refCnt--; + + if (pOpenFrameLock->refCnt != 0) { + return; + } + + nvEvoDestroyApiHandle(&pOpen->ioctl.frameLockHandles, + pOpenFrameLock->nvKmsApiHandle); + nvFree(pOpenFrameLock); +} + + +/*! + * Allocate and initialize an NvKmsPerOpenFrameLock. + * + * If the disp described by the specified NvKmsPerOpenDisp has a + * framelock object, allocate an NvKmsPerOpenFrameLock for it. + * + * Multiple disps can be assigned to the same framelock object, so + * NvKmsPerOpenFrameLock is reference counted: we first look to see if + * an NvKmsPerOpenFrameLock for this disp's framelock object already + * exists. If so, we increment its reference count. Otherwise, we + * allocate a new NvKmsPerOpenFrameLock. + * + * \param[in,out] pOpen The per-open data, to which the + * new NvKmsPerOpenFrameLock should be assigned. + * \param[in,out] pOpenDisp The NvKmsPerOpenDisp whose corresponding + * NvKmsPerOpenFrameLock should be allocated. + */ +static NvBool AllocPerOpenFrameLock( + struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDisp *pOpenDisp) +{ + struct NvKmsPerOpenFrameLock *pOpenFrameLock; + NVDispEvoPtr pDispEvo = pOpenDisp->pDispEvo; + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + NvKmsGenericHandle handle; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + if (pFrameLockEvo == NULL) { + return TRUE; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.frameLockHandles, + pOpenFrameLock, handle) { + if (pOpenFrameLock->pFrameLockEvo == pFrameLockEvo) { + goto done; + } + } + + pOpenFrameLock = nvCalloc(1, sizeof(*pOpenFrameLock)); + + if (pOpenFrameLock == NULL) { + return FALSE; + } + + pOpenFrameLock->pFrameLockEvo = pFrameLockEvo; + pOpenFrameLock->nvKmsApiHandle = + nvEvoCreateApiHandle(&pOpen->ioctl.frameLockHandles, pOpenFrameLock); + + if (pOpenFrameLock->nvKmsApiHandle == 0) { + nvFree(pOpenFrameLock); + return FALSE; + } + +done: + pOpenDisp->frameLockHandle = pOpenFrameLock->nvKmsApiHandle; + pOpenFrameLock->refCnt++; + return TRUE; +} + + +/*! + * Get the NvKmsConnectorHandle that corresponds to the given + * NVConnectorEvoRec on the NvKmsPerOpen + deviceHandle + dispHandle. + */ +static NvKmsConnectorHandle ConnectorEvoToConnectorHandle( + const struct NvKmsPerOpen *pOpen, + const NvKmsDeviceHandle deviceHandle, + const NvKmsDispHandle dispHandle, + const NVConnectorEvoRec *pConnectorEvo) +{ + struct NvKmsPerOpenDisp *pOpenDisp; + struct NvKmsPerOpenConnector *pOpenConnector; + NvKmsGenericHandle connector; + + pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle); + + if (pOpenDisp == NULL) { + return 0; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->connectorHandles, + pOpenConnector, connector) { + if (pOpenConnector->pConnectorEvo == pConnectorEvo) { + return pOpenConnector->nvKmsApiHandle; + } + } + + return 0; +} + + +/*! + * Get the NvKmsDeviceHandle and NvKmsDispHandle that corresponds to + * the given NVDispEvoRec on the NvKmsPerOpen. + */ +static NvBool DispEvoToDevAndDispHandles( + const struct NvKmsPerOpen *pOpen, + const NVDispEvoRec *pDispEvo, + NvKmsDeviceHandle *pDeviceHandle, + NvKmsDispHandle *pDispHandle) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + struct NvKmsPerOpenDev *pOpenDev; + NvKmsGenericHandle dev; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, dev) { + + struct NvKmsPerOpenDisp *pOpenDisp; + NvKmsGenericHandle disp; + + if (pOpenDev->pDevEvo != pDevEvo) { + continue; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, + pOpenDisp, disp) { + if (pOpenDisp->pDispEvo != pDispEvo) { + continue; + } + + *pDeviceHandle = pOpenDev->nvKmsApiHandle; + *pDispHandle = pOpenDisp->nvKmsApiHandle; + + return TRUE; + } + } + + return FALSE; +} + + +/*! + * Get the NvKmsPerOpenDev that corresponds to the given NVDevEvoRec + * on the NvKmsPerOpen. + */ +static struct NvKmsPerOpenDev *DevEvoToOpenDev( + const struct NvKmsPerOpen *pOpen, + const NVDevEvoRec *pDevEvo) +{ + struct NvKmsPerOpenDev *pOpenDev; + NvKmsGenericHandle dev; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, dev) { + if (pOpenDev->pDevEvo == pDevEvo) { + return pOpenDev; + } + } + + return NULL; +} + + +/*! + * Get the NvKmsFrameLockHandle that corresponds to the given + * NVFrameLockEvoRec on the NvKmsPerOpen. + */ +static NvBool FrameLockEvoToFrameLockHandle( + const struct NvKmsPerOpen *pOpen, + const NVFrameLockEvoRec *pFrameLockEvo, + NvKmsFrameLockHandle *pFrameLockHandle) +{ + struct NvKmsPerOpenFrameLock *pOpenFrameLock; + NvKmsGenericHandle handle; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.frameLockHandles, + pOpenFrameLock, handle) { + + if (pOpenFrameLock->pFrameLockEvo == pFrameLockEvo) { + *pFrameLockHandle = pOpenFrameLock->nvKmsApiHandle; + return TRUE; + } + } + + return FALSE; +} + + +/*! + * Clear the specified NvKmsPerOpenConnector. + * + * \param[in,out] pOpenDisp The NvKmsPerOpenDisp to which the + * NvKmsPerOpenConnector is assigned. + * \param[in,out] pOpenConnector The NvKmsPerOpenConnector to be cleared. + */ +static void ClearPerOpenConnector( + struct NvKmsPerOpenDisp *pOpenDisp, + struct NvKmsPerOpenConnector *pOpenConnector) +{ + nvEvoDestroyApiHandle(&pOpenDisp->connectorHandles, + pOpenConnector->nvKmsApiHandle); + nvkms_memset(pOpenConnector, 0, sizeof(*pOpenConnector)); +} + + +/*! + * Initialize an NvKmsPerOpenConnector. + * + * \param[in,out] pOpenDisp The NvKmsPerOpenDisp to which the + * NvKmsPerOpenConnector is assigned. + * \param[in,out] pOpenConnector The NvKmsPerOpenConnector to initialize. + * \param[in] pConnectorEvo The connector that the NvKmsPerOpenConnector + * corresponds to. + * + * \return If the NvKmsPerOpenConnector is successfully initialized, + * return TRUE. Otherwise, return FALSE. + */ +static NvBool InitPerOpenConnector( + struct NvKmsPerOpenDisp *pOpenDisp, + struct NvKmsPerOpenConnector *pOpenConnector, + NVConnectorEvoPtr pConnectorEvo) +{ + pOpenConnector->nvKmsApiHandle = + nvEvoCreateApiHandle(&pOpenDisp->connectorHandles, pOpenConnector); + + if (pOpenConnector->nvKmsApiHandle == 0) { + goto fail; + } + + pOpenConnector->pConnectorEvo = pConnectorEvo; + + return TRUE; + +fail: + ClearPerOpenConnector(pOpenDisp, pOpenConnector); + return FALSE; +} + +/*! + * Clear the specified NvKmsPerOpenDisp. + * + * \param[in,out] pOpenDev The NvKmsPerOpenDev to which the NvKmsPerOpenDisp + * is assigned. + * \param[in,out] pDispEvo The NvKmsPerOpenDisp to be cleared. + */ +static void ClearPerOpenDisp( + struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDev *pOpenDev, + struct NvKmsPerOpenDisp *pOpenDisp) +{ + struct NvKmsPerOpenConnector *pOpenConnector; + NvKmsGenericHandle connector; + + NVVBlankCallbackPtr pCallbackData; + NvKmsGenericHandle callback; + + FreePerOpenFrameLock(pOpen, pOpenDisp); + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->connectorHandles, + pOpenConnector, connector) { + ClearPerOpenConnector(pOpenDisp, pOpenConnector); + } + + /* Destroy the API handle structures. */ + nvEvoDestroyApiHandles(&pOpenDisp->connectorHandles); + + for (NvU32 i = 0; i < NVKMS_MAX_HEADS_PER_DISP; i++) { + nvEvoDestroyApiHandles(&pOpenDisp->vblankSyncObjectHandles[i]); + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->vblankCallbackHandles[i], + pCallbackData, callback) { + nvRemoveUnicastEvent(pCallbackData->pUserData); + } + nvEvoDestroyApiHandles(&pOpenDisp->vblankCallbackHandles[i]); + } + + nvEvoDestroyApiHandles(&pOpenDisp->vblankSemControlHandles); + + nvEvoDestroyApiHandle(&pOpenDev->dispHandles, pOpenDisp->nvKmsApiHandle); + + nvkms_memset(pOpenDisp, 0, sizeof(*pOpenDisp)); +} + + +/*! + * Initialize an NvKmsPerOpenDisp. + * + * \param[in,out] pOpenDev The NvKmsPerOpenDev to which the NvKmsPerOpenDisp + * is assigned. + * \param[in,out] pOpenDisp The NvKmsPerOpenDisp to initialize. + * \param[in] pDispEvo The disp that the NvKmsPerOpenDisp corresponds to. + * + * \return If the NvKmsPerOpenDisp is successfully initialized, return TRUE. + * Otherwise, return FALSE. + */ +static NvBool InitPerOpenDisp( + struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDev *pOpenDev, + struct NvKmsPerOpenDisp *pOpenDisp, + NVDispEvoPtr pDispEvo) +{ + NVConnectorEvoPtr pConnectorEvo; + NvU32 connector; + + pOpenDisp->nvKmsApiHandle = + nvEvoCreateApiHandle(&pOpenDev->dispHandles, pOpenDisp); + + if (pOpenDisp->nvKmsApiHandle == 0) { + goto fail; + } + + pOpenDisp->pDispEvo = pDispEvo; + + if (nvListCount(&pDispEvo->connectorList) >= + ARRAY_LEN(pOpenDisp->connector)) { + nvAssert(!"More connectors on this disp than NVKMS can handle."); + goto fail; + } + + if (!nvEvoInitApiHandles(&pOpenDisp->connectorHandles, + ARRAY_LEN(pOpenDisp->connector))) { + goto fail; + } + + connector = 0; + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (!InitPerOpenConnector(pOpenDisp, &pOpenDisp->connector[connector], + pConnectorEvo)) { + goto fail; + } + connector++; + } + + /* Initialize the vblankSyncObjectHandles for each head. */ + for (NvU32 i = 0; i < NVKMS_MAX_HEADS_PER_DISP; i++) { + if (!nvEvoInitApiHandles(&pOpenDisp->vblankSyncObjectHandles[i], + NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD)) { + goto fail; + } + } + + /* Initialize the vblankCallbackHandles for each head. + * + * The initial value of VBLANK_SYNC_OBJECTS_PER_HEAD doesn't really apply + * here, but we need something. */ + for (NvU32 i = 0; i < NVKMS_MAX_HEADS_PER_DISP; i++) { + if (!nvEvoInitApiHandles(&pOpenDisp->vblankCallbackHandles[i], + NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD)) { + goto fail; + } + } + + /* Initialize the vblankSemControlHandles. + * + * The initial value of VBLANK_SYNC_OBJECTS_PER_HEAD doesn't really apply + * here, but we need something. */ + if (!nvEvoInitApiHandles(&pOpenDisp->vblankSemControlHandles, + NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD)) { + goto fail; + } + + if (!AllocPerOpenFrameLock(pOpen, pOpenDisp)) { + goto fail; + } + + return TRUE; + +fail: + ClearPerOpenDisp(pOpen, pOpenDev, pOpenDisp); + return FALSE; +} + +/*! + * Free any SwapGroups tracked by this pOpenDev. + */ +static void FreeSwapGroups(struct NvKmsPerOpenDev *pOpenDev) +{ + NVSwapGroupRec *pSwapGroup; + NvKmsSwapGroupHandle handle; + NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->swapGroupHandles, + pSwapGroup, + handle) { + nvEvoDestroyApiHandle(&pOpenDev->swapGroupHandles, handle); + + if (nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { + nvHsFreeSwapGroup(pDevEvo, pSwapGroup); + } else { + nvHsDecrementSwapGroupRefCnt(pSwapGroup); + } + } +} + +/*! + * Check that the NvKmsPermissions make sense. + */ +static NvBool ValidateNvKmsPermissions( + const NVDevEvoRec *pDevEvo, + const struct NvKmsPermissions *pPermissions, + enum NvKmsClientType clientType) +{ + if (pPermissions->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) { + NvU32 d, h; + + for (d = 0; d < ARRAY_LEN(pPermissions->flip.disp); d++) { + for (h = 0; h < ARRAY_LEN(pPermissions->flip.disp[d].head); h++) { + + NvU8 layerMask = pPermissions->flip.disp[d].head[h].layerMask; + + if (layerMask == 0) { + continue; + } + + if (nvHasBitAboveMax(layerMask, pDevEvo->apiHead[h].numLayers)) { + return FALSE; + } + + /* + * If the above blocks didn't 'continue', then there + * are permissions specified for this disp+head. Is + * the specified disp+head in range for the current + * configuration? + */ + if (d >= pDevEvo->nDispEvo) { + return FALSE; + } + + if (h >= pDevEvo->numApiHeads) { + return FALSE; + } + } + } + } else if (pPermissions->type == NV_KMS_PERMISSIONS_TYPE_MODESET) { + NvU32 d, h; + + for (d = 0; d < ARRAY_LEN(pPermissions->flip.disp); d++) { + for (h = 0; h < ARRAY_LEN(pPermissions->flip.disp[d].head); h++) { + + NVDpyIdList dpyIdList = + pPermissions->modeset.disp[d].head[h].dpyIdList; + + if (nvDpyIdListIsEmpty(dpyIdList)) { + continue; + } + + /* + * If the above blocks didn't 'continue', then there + * are permissions specified for this disp+head. Is + * the specified disp+head in range for the current + * configuration? + */ + if (d >= pDevEvo->nDispEvo) { + return FALSE; + } + + if (h >= pDevEvo->numApiHeads) { + return FALSE; + } + } + } + } else if (pPermissions->type == NV_KMS_PERMISSIONS_TYPE_SUB_OWNER) { + + /* Only kapi uses this permission type, so disallow it from userspace */ + if (clientType != NVKMS_CLIENT_KERNEL_SPACE) { + return FALSE; + } + + } else { + return FALSE; + } + + return TRUE; +} + +/*! + * Assign pPermissions with the maximum permissions possible for + * the pDevEvo. + */ +static void AssignFullNvKmsFlipPermissions( + const NVDevEvoRec *pDevEvo, + struct NvKmsFlipPermissions *pPermissions) +{ + NvU32 dispIndex, apiHead; + + nvkms_memset(pPermissions, 0, sizeof(*pPermissions)); + + for (dispIndex = 0; dispIndex < pDevEvo->nDispEvo; dispIndex++) { + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + pPermissions->disp[dispIndex].head[apiHead].layerMask = + NVBIT(pDevEvo->apiHead[apiHead].numLayers) - 1; + } + } +} + +static void AssignFullNvKmsModesetPermissions( + const NVDevEvoRec *pDevEvo, + struct NvKmsModesetPermissions *pPermissions) +{ + NvU32 dispIndex, apiHead; + + nvkms_memset(pPermissions, 0, sizeof(*pPermissions)); + + for (dispIndex = 0; dispIndex < pDevEvo->nDispEvo; dispIndex++) { + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + pPermissions->disp[dispIndex].head[apiHead].dpyIdList = + nvAllDpyIdList(); + } + } +} + +static void AssignFullNvKmsPermissions( + struct NvKmsPerOpenDev *pOpenDev +) +{ + NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; + + AssignFullNvKmsFlipPermissions(pDevEvo, &pOpenDev->flipPermissions); + AssignFullNvKmsModesetPermissions(pDevEvo, &pOpenDev->modesetPermissions); +} + +/*! + * Set the modeset owner to pOpenDev + * + * \param pOpenDev The per-open device structure for the new modeset owner. + * \return FALSE if there was already a modeset owner. TRUE otherwise. + */ +static NvBool GrabModesetOwnership(struct NvKmsPerOpenDev *pOpenDev) +{ + NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; + + if (pDevEvo->modesetOwner == pOpenDev) { + return TRUE; + } + + if (pDevEvo->modesetOwner != NULL) { + return FALSE; + } + + /* + * If claiming modeset ownership, undo any SST forcing imposed by + * console restore. + */ + if (pOpenDev != pDevEvo->pNvKmsOpenDev) { + nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */); + } + + pDevEvo->modesetOwner = pOpenDev; + pDevEvo->modesetOwnerOrSubOwnerChanged = TRUE; + + AssignFullNvKmsPermissions(pOpenDev); + return TRUE; +} + +/* + * If not NULL, remove pRemoveFlip from pFlip. Returns true if there are still + * some remaining permissions. + */ +static NvBool RemoveFlipPermissions(struct NvKmsFlipPermissions *pFlip, + const struct NvKmsFlipPermissions *pRemoveFlip) +{ + NvU32 d, h, dLen, hLen; + NvBool remainingPermissions = FALSE; + + dLen = ARRAY_LEN(pFlip->disp); + for (d = 0; d < dLen; d++) { + hLen = ARRAY_LEN(pFlip->disp[d].head); + for (h = 0; h < hLen; h++) { + + if (pRemoveFlip) { + pFlip->disp[d].head[h].layerMask &= + ~pRemoveFlip->disp[d].head[h].layerMask; + } + + remainingPermissions |= (pFlip->disp[d].head[h].layerMask != 0); + } + } + + return remainingPermissions; +} + +/* + * If not NULL, remove pRemoveModeset from pModeset. Returns true if there are + * still some remaining permissions. + */ +static NvBool RemoveModesetPermissions(struct NvKmsModesetPermissions *pModeset, + const struct NvKmsModesetPermissions *pRemoveModeset) +{ + NvU32 d, h, dLen, hLen; + NvBool remainingPermissions = FALSE; + + dLen = ARRAY_LEN(pModeset->disp); + for (d = 0; d < dLen; d++) { + hLen = ARRAY_LEN(pModeset->disp[d].head); + for (h = 0; h < hLen; h++) { + + if (pRemoveModeset) { + pModeset->disp[d].head[h].dpyIdList = nvDpyIdListMinusDpyIdList( + pModeset->disp[d].head[h].dpyIdList, + pRemoveModeset->disp[d].head[h].dpyIdList); + } + + remainingPermissions |= + !nvDpyIdListIsEmpty(pModeset->disp[d].head[h].dpyIdList); + } + } + + return remainingPermissions; +} + +/*! + * Clear permissions on the specified device for all NvKmsPerOpens. + * + * For NvKmsPerOpen::type==Ioctl, clear the permissions, except for the + * specified pOpenDevExclude. + * + * For NvKmsPerOpen::type==GrantPermissions, clear + * NvKmsPerOpen::grantPermissions and reset NvKmsPerOpen::type to + * Undefined. + */ +static void RevokePermissionsInternal( + const NvU32 typeBitmask, + NVDevEvoRec *pDevEvo, + const struct NvKmsPerOpenDev *pOpenDevExclude) +{ + struct NvKmsPerOpen *pOpen; + + nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) { + + if ((pOpen->type == NvKmsPerOpenTypeGrantPermissions) && + (pOpen->grantPermissions.pDevEvo == pDevEvo) && + (typeBitmask & NVBIT(pOpen->grantPermissions.permissions.type))) { + nvkms_memset(&pOpen->grantPermissions, 0, + sizeof(pOpen->grantPermissions)); + pOpen->type = NvKmsPerOpenTypeUndefined; + } + + if (pOpen->type == NvKmsPerOpenTypeIoctl) { + + struct NvKmsPerOpenDev *pOpenDev = + DevEvoToOpenDev(pOpen, pDevEvo); + + if (pOpenDev == NULL) { + continue; + } + + if (pOpenDev == pOpenDevExclude || pOpenDev->isPrivileged) { + continue; + } + + if (pOpenDev == pDevEvo->modesetSubOwner && + (typeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_SUB_OWNER))) { + FreeSwapGroups(pOpenDev); + pDevEvo->modesetSubOwner = NULL; + pDevEvo->modesetOwnerOrSubOwnerChanged = TRUE; + } + + /* + * Clients with sub-owner permission (or better) don't get flipping + * or modeset permission revoked. + */ + if (nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { + continue; + } + + if (typeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING)) { + nvkms_memset(&pOpenDev->flipPermissions, 0, + sizeof(pOpenDev->flipPermissions)); + } + + if (typeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET)) { + nvkms_memset(&pOpenDev->modesetPermissions, 0, + sizeof(pOpenDev->modesetPermissions)); + } + } + } +} + +static void RestoreConsole(NVDevEvoPtr pDevEvo) +{ + // Try to issue a modeset and flip to the framebuffer console surface. + const NvBool bFail = nvkms_test_fail_alloc_core_channel( + FAIL_ALLOC_CORE_CHANNEL_RESTORE_CONSOLE); + + if (bFail || !nvEvoRestoreConsole(pDevEvo, TRUE /* allowMST */)) { + // If that didn't work, free the core channel to trigger RM's console + // restore code. + FreeSurfaceCtxDmasForAllOpens(pDevEvo); + DisableAndCleanVblankSyncObjectForAllOpens(pDevEvo); + nvFreeCoreChannelEvo(pDevEvo); + + // Reallocate the core channel right after freeing it. This makes sure + // that it's allocated and ready right away if another NVKMS client is + // started. + if ((!bFail) && nvAllocCoreChannelEvo(pDevEvo)) { + nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */); + EnableAndSetupVblankSyncObjectForAllOpens(pDevEvo); + AllocSurfaceCtxDmasForAllOpens(pDevEvo); + } else { + nvRevokeDevice(pDevEvo); + } + } +} + +/*! + * Release modeset ownership previously set by GrabModesetOwnership + * + * \param pOpenDev The per-open device structure relinquishing modeset + * ownership. + * \return FALSE if pOpenDev is not the modeset owner, TRUE otherwise. + */ +static NvBool ReleaseModesetOwnership(struct NvKmsPerOpenDev *pOpenDev) +{ + NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; + + if (pDevEvo->modesetOwner != pOpenDev) { + // Only the current owner can release ownership. + return FALSE; + } + + FreeSwapGroups(pOpenDev); + + pDevEvo->modesetOwner = NULL; + pDevEvo->modesetOwnerOrSubOwnerChanged = TRUE; + pDevEvo->handleConsoleHotplugs = TRUE; + + RestoreConsole(pDevEvo); + RevokePermissionsInternal(NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING) | + NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET) | + NVBIT(NV_KMS_PERMISSIONS_TYPE_SUB_OWNER), + pDevEvo, NULL /* pOpenDevExclude */); + return TRUE; +} + +/*! + * Free the specified NvKmsPerOpenDev. + * + * \param[in,out] pOpen The per-open data, to which the + * NvKmsPerOpenDev is assigned. + * \param[in,out] pOpenDev The NvKmsPerOpenDev to free. + */ +void nvFreePerOpenDev(struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDev *pOpenDev) +{ + struct NvKmsPerOpenDisp *pOpenDisp; + NvKmsGenericHandle disp; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + if (pOpenDev == NULL) { + return; + } + + nvEvoDestroyApiHandles(&pOpenDev->surfaceHandles); + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, + pOpenDisp, disp) { + ClearPerOpenDisp(pOpen, pOpenDev, pOpenDisp); + } + + nvEvoDestroyApiHandles(&pOpenDev->dispHandles); + + nvEvoDestroyApiHandle(&pOpen->ioctl.devHandles, pOpenDev->nvKmsApiHandle); + + nvEvoDestroyApiHandles(&pOpenDev->deferredRequestFifoHandles); + + nvEvoDestroyApiHandles(&pOpenDev->swapGroupHandles); + + nvFree(pOpenDev); +} + + +/*! + * Allocate and initialize an NvKmsPerOpenDev. + * + * \param[in,out] pOpen The per-open data, to which the + * new NvKmsPerOpenDev should be assigned. + * \param[in] pDevEvo The device to which the new NvKmsPerOpenDev + * corresponds. + * \param[in] isPrivileged The NvKmsPerOpenDev is privileged which can + * do modeset anytime. + * + * \return On success, return a pointer to the new NvKmsPerOpenDev. + * On failure, return NULL. + */ +struct NvKmsPerOpenDev *nvAllocPerOpenDev(struct NvKmsPerOpen *pOpen, + NVDevEvoPtr pDevEvo, NvBool isPrivileged) +{ + struct NvKmsPerOpenDev *pOpenDev = nvCalloc(1, sizeof(*pOpenDev)); + NVDispEvoPtr pDispEvo; + NvU32 disp; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + if (pOpenDev == NULL) { + goto fail; + } + + pOpenDev->nvKmsApiHandle = + nvEvoCreateApiHandle(&pOpen->ioctl.devHandles, pOpenDev); + + if (pOpenDev->nvKmsApiHandle == 0) { + goto fail; + } + + pOpenDev->pDevEvo = pDevEvo; + + if (!nvEvoInitApiHandles(&pOpenDev->dispHandles, + ARRAY_LEN(pOpenDev->disp))) { + goto fail; + } + + if (pDevEvo->nDispEvo > ARRAY_LEN(pOpenDev->disp)) { + nvAssert(!"More disps on this device than NVKMS can handle."); + goto fail; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, disp, pDevEvo) { + if (!InitPerOpenDisp(pOpen, pOpenDev, &pOpenDev->disp[disp], pDispEvo)) { + goto fail; + } + } + + if (!nvEvoInitApiHandles(&pOpenDev->surfaceHandles, 32)) { + goto fail; + } + + pOpenDev->isPrivileged = isPrivileged; + if (pOpenDev->isPrivileged) { + AssignFullNvKmsPermissions(pOpenDev); + } + + if (!nvEvoInitApiHandles(&pOpenDev->deferredRequestFifoHandles, 4)) { + goto fail; + } + + if (!nvEvoInitApiHandles(&pOpenDev->swapGroupHandles, 4)) { + goto fail; + } + + return pOpenDev; + +fail: + nvFreePerOpenDev(pOpen, pOpenDev); + return NULL; +} + + +/*! + * Assign NvKmsPerOpen::type. + * + * This succeeds only if NvKmsPerOpen::type is Undefined, or already + * has the requested type and allowRedundantAssignment is TRUE. + */ +static NvBool AssignNvKmsPerOpenType(struct NvKmsPerOpen *pOpen, + enum NvKmsPerOpenType type, + NvBool allowRedundantAssignment) +{ + if ((pOpen->type == type) && allowRedundantAssignment) { + return TRUE; + } + + if (pOpen->type != NvKmsPerOpenTypeUndefined) { + return FALSE; + } + + switch (type) { + case NvKmsPerOpenTypeIoctl: + nvListInit(&pOpen->ioctl.eventList); + + if (!nvEvoInitApiHandles(&pOpen->ioctl.devHandles, NV_MAX_DEVICES)) { + return FALSE; + } + + if (!nvEvoInitApiHandles(&pOpen->ioctl.frameLockHandles, 4)) { + nvEvoDestroyApiHandles(&pOpen->ioctl.devHandles); + return FALSE; + } + + nvListAppend(&pOpen->perOpenIoctlListEntry, &perOpenIoctlList); + break; + + case NvKmsPerOpenTypeGrantSurface: + /* Nothing to do, here. */ + break; + + case NvKmsPerOpenTypeGrantSwapGroup: + /* Nothing to do, here. */ + break; + + case NvKmsPerOpenTypeGrantPermissions: + /* Nothing to do, here. */ + break; + + case NvKmsPerOpenTypeUnicastEvent: + /* Nothing to do, here. */ + break; + + case NvKmsPerOpenTypeUndefined: + nvAssert(!"unexpected NvKmsPerOpenType"); + break; + } + + pOpen->type = type; + return TRUE; +} + +/*! + * Return whether the PerOpen can be used as a unicast event. + */ +static inline NvBool PerOpenIsValidForUnicastEvent( + const struct NvKmsPerOpen *pOpen) +{ + /* If the type is Undefined, it can be made a unicast event. */ + + if (pOpen->type == NvKmsPerOpenTypeUndefined) { + return TRUE; + } + + /* + * If the type is already UnicastEvent but there is no active user, it can + * be made a unicast event. + */ + if ((pOpen->type == NvKmsPerOpenTypeUnicastEvent) && + (pOpen->unicastEvent.type == NvKmsUnicastEventTypeUndefined)) { + return TRUE; + } + + return FALSE; +} + +/*! + * Allocate the specified device. + */ +static NvBool AllocDevice(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsAllocDeviceParams *pParams = pParamsVoid; + NVDevEvoPtr pDevEvo; + struct NvKmsPerOpenDev *pOpenDev; + NvU32 disp, apiHead; + NvU8 layer; + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + if (nvkms_strcmp(pParams->request.versionString, NV_VERSION_STRING) != 0) { + pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_VERSION_MISMATCH; + return FALSE; + } + + /* + * It is an error to call NVKMS_IOCTL_ALLOC_DEVICE multiple times + * on the same device with the same fd. + */ + if (DeviceIdAlreadyPresent(pOpen, pParams->request.deviceId)) { + pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST; + return FALSE; + } + + pDevEvo = nvFindDevEvoByDeviceId(pParams->request.deviceId); + + if (pDevEvo == NULL) { + pDevEvo = nvAllocDevEvo(&pParams->request, &pParams->reply.status); + if (pDevEvo == NULL) { + return FALSE; + } + } else { + if (!pParams->request.tryInferSliMosaicFromExistingDevice && + (pDevEvo->sli.mosaic != pParams->request.sliMosaic)) { + pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST; + return FALSE; + } + pDevEvo->allocRefCnt++; + } + + pOpenDev = nvAllocPerOpenDev(pOpen, pDevEvo, FALSE /* isPrivileged */); + + if (pOpenDev == NULL) { + nvFreeDevEvo(pDevEvo); + pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + return FALSE; + } + + /* Beyond this point, the function cannot fail. */ + + if (pParams->request.enableConsoleHotplugHandling) { + pDevEvo->handleConsoleHotplugs = TRUE; + } + + pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle; + pParams->reply.subDeviceMask = + NV_TWO_N_MINUS_ONE(pDevEvo->numSubDevices); + pParams->reply.numHeads = pDevEvo->numApiHeads; + pParams->reply.numDisps = pDevEvo->nDispEvo; + + ct_assert(ARRAY_LEN(pParams->reply.dispHandles) == + ARRAY_LEN(pOpenDev->disp)); + + for (disp = 0; disp < ARRAY_LEN(pParams->reply.dispHandles); disp++) { + pParams->reply.dispHandles[disp] = pOpenDev->disp[disp].nvKmsApiHandle; + } + + pParams->reply.inputLutAppliesToBase = pDevEvo->caps.inputLutAppliesToBase; + + ct_assert(ARRAY_LEN(pParams->reply.layerCaps) == + ARRAY_LEN(pDevEvo->caps.layerCaps)); + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + pParams->reply.numLayers[apiHead] = pDevEvo->apiHead[apiHead].numLayers; + } + + for (layer = 0; + layer < ARRAY_LEN(pParams->reply.layerCaps); + layer++) { + pParams->reply.layerCaps[layer] = pDevEvo->caps.layerCaps[layer]; + } + pParams->reply.olutCaps = pDevEvo->caps.olut; + + pParams->reply.surfaceAlignment = NV_EVO_SURFACE_ALIGNMENT; + pParams->reply.requiresVrrSemaphores = !pDevEvo->hal->caps.supportsDisplayRate; + + pParams->reply.nIsoSurfacesInVidmemOnly = + !!NV5070_CTRL_SYSTEM_GET_CAP(pDevEvo->capsBits, + NV5070_CTRL_SYSTEM_CAPS_BUG_644815_DNISO_VIDMEM_ONLY); + + pParams->reply.requiresAllAllocationsInSysmem = + pDevEvo->requiresAllAllocationsInSysmem; + pParams->reply.supportsHeadSurface = pDevEvo->isHeadSurfaceSupported; + + pParams->reply.validNIsoFormatMask = pDevEvo->caps.validNIsoFormatMask; + + pParams->reply.maxWidthInBytes = pDevEvo->caps.maxWidthInBytes; + pParams->reply.maxWidthInPixels = pDevEvo->caps.maxWidthInPixels; + pParams->reply.maxHeightInPixels = pDevEvo->caps.maxHeight; + pParams->reply.cursorCompositionCaps = pDevEvo->caps.cursorCompositionCaps; + + pParams->reply.maxCursorSize = pDevEvo->cursorHal->caps.maxSize; + + /* NVKMS swap groups and warp&blend depends on headSurface functionality. */ + pParams->reply.supportsSwapGroups = pDevEvo->isHeadSurfaceSupported; + pParams->reply.supportsWarpAndBlend = pDevEvo->isHeadSurfaceSupported; + + pParams->reply.validLayerRRTransforms = pDevEvo->caps.validLayerRRTransforms; + + pParams->reply.isoIOCoherencyModes = pDevEvo->isoIOCoherencyModes; + pParams->reply.nisoIOCoherencyModes = pDevEvo->nisoIOCoherencyModes; + + /* + * TODO: Replace the isSOCDisplay check with an RM query. See Bug 3689635. + */ + pParams->reply.displayIsGpuL2Coherent = !pDevEvo->isSOCDisplay; + + pParams->reply.supportsSyncpts = pDevEvo->supportsSyncpts; + + pParams->reply.supportsIndependentAcqRelSemaphore = + pDevEvo->hal->caps.supportsIndependentAcqRelSemaphore; + + pParams->reply.supportsVblankSyncObjects = + pDevEvo->hal->caps.supportsVblankSyncObjects; + + pParams->reply.supportsVblankSemControl = pDevEvo->supportsVblankSemControl; + + pParams->reply.supportsInputColorSpace = + pDevEvo->hal->caps.supportsInputColorSpace; + + pParams->reply.supportsInputColorRange = + pDevEvo->hal->caps.supportsInputColorRange; + + if (pOpen->clientType == NVKMS_CLIENT_KERNEL_SPACE) { + pParams->reply.vtFbBaseAddress = pDevEvo->vtFbInfo.baseAddress; + pParams->reply.vtFbSize = pDevEvo->vtFbInfo.size; + } + + pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_SUCCESS; + + return TRUE; +} + +static void UnregisterDeferredRequestFifos(struct NvKmsPerOpenDev *pOpenDev) +{ + NVDeferredRequestFifoRec *pDeferredRequestFifo; + NvKmsGenericHandle handle; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->deferredRequestFifoHandles, + pDeferredRequestFifo, + handle) { + + nvEvoDestroyApiHandle(&pOpenDev->deferredRequestFifoHandles, handle); + + nvEvoUnregisterDeferredRequestFifo(pOpenDev->pDevEvo, + pDeferredRequestFifo); + } +} + +/* + * Forward declaration since this function is used by + * DisableRemainingVblankSyncObjects(). + */ +static void DisableAndCleanVblankSyncObject(NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + NVVblankSyncObjectRec *pVblankSyncObject, + NVEvoUpdateState *pUpdateState); + +static void DisableRemainingVblankSyncObjects(struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDev *pOpenDev) +{ + struct NvKmsPerOpenDisp *pOpenDisp; + NvKmsGenericHandle disp; + NVVblankSyncObjectRec *pVblankSyncObject; + NvKmsVblankSyncObjectHandle handle; + NvU32 apiHead = 0; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + if (pOpenDev == NULL) { + return; + } + + /* For each pOpenDisp: */ + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, + pOpenDisp, disp) { + /* + * A single update state can handle changes across multiple heads on a + * given Disp. + */ + NVEvoUpdateState updateState = { }; + + /* For each head: */ + for (apiHead = 0; apiHead < ARRAY_LEN(pOpenDisp->vblankSyncObjectHandles); apiHead++) { + NVEvoApiHandlesRec *pHandles = + &pOpenDisp->vblankSyncObjectHandles[apiHead]; + + /* For each still-active vblank sync object: */ + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(pHandles, + pVblankSyncObject, handle) { + DisableAndCleanVblankSyncObject(pOpenDisp->pDispEvo, apiHead, + pVblankSyncObject, + &updateState); + /* Remove the handle from the map. */ + nvEvoDestroyApiHandle(pHandles, handle); + } + } + + if (!nvIsUpdateStateEmpty(pOpenDisp->pDispEvo->pDevEvo, &updateState)) { + /* + * Instruct hardware to execute the staged commands from the + * ConfigureVblankSyncObject() calls (inherent in + * DisableAndCleanVblankSyncObject()) above. This will set up + * and wait for a notification that the hardware execution + * has completed. + */ + nvEvoUpdateAndKickOff(pOpenDisp->pDispEvo, TRUE, &updateState, + TRUE); + } + } +} + +static void DisableRemainingVblankSemControls( + struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDev *pOpenDev) +{ + struct NvKmsPerOpenDisp *pOpenDisp; + NvKmsGenericHandle dispHandle; + NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, + pOpenDisp, + dispHandle) { + + NVVblankSemControl *pVblankSemControl; + NvKmsGenericHandle vblankSemControlHandle; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->vblankSemControlHandles, + pVblankSemControl, + vblankSemControlHandle) { + NvBool ret = + nvEvoDisableVblankSemControl(pDevEvo, pVblankSemControl); + + if (!ret) { + nvAssert(!"implicit disable of vblank sem control failed."); + } + nvEvoDestroyApiHandle(&pOpenDisp->vblankSemControlHandles, + vblankSemControlHandle); + } + } +} + +static void FreeDeviceReference(struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDev *pOpenDev) +{ + /* Disable all client-owned vblank sync objects that still exist. */ + DisableRemainingVblankSyncObjects(pOpen, pOpenDev); + + DisableRemainingVblankSemControls(pOpen, pOpenDev); + + FreeSwapGroups(pOpenDev); + + UnregisterDeferredRequestFifos(pOpenDev); + + nvEvoFreeClientSurfaces(pOpenDev->pDevEvo, pOpenDev, + &pOpenDev->surfaceHandles); + + if (!nvFreeDevEvo(pOpenDev->pDevEvo)) { + // If this pOpenDev is the modeset owner, implicitly release it. Does + // nothing if this pOpenDev is not the modeset owner. + // + // If nvFreeDevEvo() freed the device, then it also implicitly released + // ownership. + ReleaseModesetOwnership(pOpenDev); + + nvAssert(pOpenDev->pDevEvo->modesetOwner != pOpenDev); + + // If this pOpenDev is the modeset sub-owner, implicitly release it. + if (pOpenDev->pDevEvo->modesetSubOwner == pOpenDev) { + pOpenDev->pDevEvo->modesetSubOwner = NULL; + pOpenDev->pDevEvo->modesetOwnerOrSubOwnerChanged = TRUE; + } + } + + nvFreePerOpenDev(pOpen, pOpenDev); +} + +/*! + * Free the specified device. + */ +static NvBool FreeDevice(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsFreeDeviceParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + FreeDeviceReference(pOpen, pOpenDev); + + return TRUE; +} + + +/*! + * Get the disp data. This information should remain static for the + * lifetime of the disp. + */ +static NvBool QueryDisp(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsQueryDispParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp *pOpenDisp; + const NVEvoSubDeviceRec *pSubDevice; + NVDispEvoPtr pDispEvo; + NvU32 connector; + + pOpenDisp = GetPerOpenDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + return FALSE; + } + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + pDispEvo = pOpenDisp->pDispEvo; + + // Don't include dynamic displays in validDpys. The data returned here is + // supposed to be static for the lifetime of the pDispEvo. + pParams->reply.validDpys = + nvDpyIdListMinusDpyIdList(pDispEvo->validDisplays, + pDispEvo->dynamicDpyIds); + pParams->reply.bootDpys = pDispEvo->bootDisplays; + pParams->reply.muxDpys = pDispEvo->muxDisplays; + pParams->reply.frameLockHandle = pOpenDisp->frameLockHandle; + pParams->reply.numConnectors = nvListCount(&pDispEvo->connectorList); + + ct_assert(ARRAY_LEN(pParams->reply.connectorHandles) == + ARRAY_LEN(pOpenDisp->connector)); + + for (connector = 0; connector < ARRAY_LEN(pParams->reply.connectorHandles); + connector++) { + pParams->reply.connectorHandles[connector] = + pOpenDisp->connector[connector].nvKmsApiHandle; + } + + pSubDevice = pDispEvo->pDevEvo->pSubDevices[pDispEvo->displayOwner]; + if (pSubDevice != NULL) { + ct_assert(sizeof(pParams->reply.gpuString) >= + sizeof(pSubDevice->gpuString)); + nvkms_memcpy(pParams->reply.gpuString, pSubDevice->gpuString, + sizeof(pSubDevice->gpuString)); + } + + return TRUE; +} + + +/*! + * Get the connector static data. This information should remain static for the + * lifetime of the connector. + */ +static NvBool QueryConnectorStaticData(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsQueryConnectorStaticDataParams *pParams = pParamsVoid; + struct NvKmsPerOpenConnector *pOpenConnector; + NVConnectorEvoPtr pConnectorEvo; + + pOpenConnector = GetPerOpenConnector(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.connectorHandle); + if (pOpenConnector == NULL) { + return FALSE; + } + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + pConnectorEvo = pOpenConnector->pConnectorEvo; + + pParams->reply.dpyId = pConnectorEvo->displayId; + pParams->reply.isDP = nvConnectorUsesDPLib(pConnectorEvo) || + nvConnectorIsDPSerializer(pConnectorEvo); + pParams->reply.legacyTypeIndex = pConnectorEvo->legacyTypeIndex; + pParams->reply.type = pConnectorEvo->type; + pParams->reply.typeIndex = pConnectorEvo->typeIndex; + pParams->reply.signalFormat = pConnectorEvo->signalFormat; + pParams->reply.physicalIndex = pConnectorEvo->physicalIndex; + pParams->reply.physicalLocation = pConnectorEvo->physicalLocation; + + pParams->reply.isLvds = + (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) && + (pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM); + + pParams->reply.locationOnChip = (pConnectorEvo->or.location == + NV0073_CTRL_SPECIFIC_OR_LOCATION_CHIP); + return TRUE; +} + + +/*! + * Get the connector dynamic data. This information should reflects changes to + * the connector over time (e.g. for DisplayPort MST devices). + */ +static NvBool QueryConnectorDynamicData(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsQueryConnectorDynamicDataParams *pParams = pParamsVoid; + struct NvKmsPerOpenConnector *pOpenConnector; + NVConnectorEvoPtr pConnectorEvo; + NVDispEvoPtr pDispEvo; + NVDpyEvoPtr pDpyEvo; + + pOpenConnector = GetPerOpenConnector(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.connectorHandle); + if (pOpenConnector == NULL) { + return FALSE; + } + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + pConnectorEvo = pOpenConnector->pConnectorEvo; + pDispEvo = pConnectorEvo->pDispEvo; + + if (nvConnectorUsesDPLib(pConnectorEvo)) { + pParams->reply.detectComplete = pConnectorEvo->detectComplete; + } else { + pParams->reply.detectComplete = TRUE; + } + + // Find the dynamic dpys on this connector. + pParams->reply.dynamicDpyIdList = nvEmptyDpyIdList(); + FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->dynamicDpyIds, pDispEvo) { + if (pDpyEvo->pConnectorEvo == pConnectorEvo) { + pParams->reply.dynamicDpyIdList = + nvAddDpyIdToDpyIdList(pDpyEvo->id, + pParams->reply.dynamicDpyIdList); + } + } + + return TRUE; +} + + +/*! + * Get the static data for the specified dpy. This information should + * remain static for the lifetime of the dpy. + */ +static NvBool QueryDpyStaticData(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsQueryDpyStaticDataParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + pParams->reply.connectorHandle = + ConnectorEvoToConnectorHandle(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pDpyEvo->pConnectorEvo); + /* + * All pConnectorEvos should have corresponding pOpenConnectors, + * so we should always be able to find the NvKmsConnectorHandle. + */ + nvAssert(pParams->reply.connectorHandle != 0); + + pParams->reply.type = pDpyEvo->pConnectorEvo->legacyType; + + if (pDpyEvo->dp.addressString != NULL) { + const size_t len = nvkms_strlen(pDpyEvo->dp.addressString) + 1; + nvkms_memcpy(pParams->reply.dpAddress, pDpyEvo->dp.addressString, + NV_MIN(sizeof(pParams->reply.dpAddress), len)); + pParams->reply.dpAddress[sizeof(pParams->reply.dpAddress) - 1] = '\0'; + } + + pParams->reply.mobileInternal = pDpyEvo->internal; + pParams->reply.isDpMST = nvDpyEvoIsDPMST(pDpyEvo); + pParams->reply.headMask = nvDpyGetPossibleApiHeadsMask(pDpyEvo); + + return TRUE; +} + + +/*! + * Get the dynamic data for the specified dpy. This information can + * change when a hotplug occurs. + */ +static NvBool QueryDpyDynamicData(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsQueryDpyDynamicDataParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + return nvDpyGetDynamicData(pDpyEvo, pParams); +} + +/* Store a copy of the user's infoString pointer, so we can copy out to it when + * we're done. */ +struct InfoStringExtraUserStateCommon +{ + NvU64 userInfoString; +}; + +/* + * Allocate a kernel buffer to populate the infoString which will be copied out + * to userspace upon completion. + */ +static NvBool InfoStringPrepUserCommon( + NvU32 infoStringSize, + NvU64 *ppInfoString, + struct InfoStringExtraUserStateCommon *pExtra) +{ + char *kernelInfoString = NULL; + + if (infoStringSize == 0) { + *ppInfoString = 0; + return TRUE; + } + + if (!nvKmsNvU64AddressIsSafe(*ppInfoString)) { + return FALSE; + } + + if (infoStringSize > NVKMS_MODE_VALIDATION_MAX_INFO_STRING_LENGTH) { + return FALSE; + } + + kernelInfoString = nvCalloc(1, infoStringSize); + if (kernelInfoString == NULL) { + return FALSE; + } + + pExtra->userInfoString = *ppInfoString; + *ppInfoString = nvKmsPointerToNvU64(kernelInfoString); + + return TRUE; +} + +/* + * Copy the infoString out to userspace and free the kernel-internal buffer. + */ +static NvBool InfoStringDoneUserCommon( + NvU32 infoStringSize, + NvU64 pInfoString, + NvU32 *infoStringLenWritten, + struct InfoStringExtraUserStateCommon *pExtra) +{ + char *kernelInfoString = nvKmsNvU64ToPointer(pInfoString); + int status; + NvBool ret; + + if ((infoStringSize == 0) || (*infoStringLenWritten == 0)) { + ret = TRUE; + goto done; + } + + nvAssert(*infoStringLenWritten <= infoStringSize); + + status = nvkms_copyout(pExtra->userInfoString, + kernelInfoString, + *infoStringLenWritten); + if (status == 0) { + ret = TRUE; + } else { + ret = FALSE; + *infoStringLenWritten = 0; + } + +done: + nvFree(kernelInfoString); + + return ret; +} + +struct NvKmsValidateModeIndexExtraUserState +{ + struct InfoStringExtraUserStateCommon common; +}; + +static NvBool ValidateModeIndexPrepUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsValidateModeIndexParams *pParams = pParamsVoid; + struct NvKmsValidateModeIndexExtraUserState *pExtra = pExtraUserStateVoid; + + return InfoStringPrepUserCommon( + pParams->request.infoStringSize, + &pParams->request.pInfoString, + &pExtra->common); +} + +static NvBool ValidateModeIndexDoneUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsValidateModeIndexParams *pParams = pParamsVoid; + struct NvKmsValidateModeIndexExtraUserState *pExtra = pExtraUserStateVoid; + + return InfoStringDoneUserCommon( + pParams->request.infoStringSize, + pParams->request.pInfoString, + &pParams->reply.infoStringLenWritten, + &pExtra->common); +} + +/*! + * Validate the requested mode. + */ +static NvBool ValidateModeIndex(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsValidateModeIndexParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + nvValidateModeIndex(pDpyEvo, &pParams->request, &pParams->reply); + + return TRUE; +} + +struct NvKmsValidateModeExtraUserState +{ + struct InfoStringExtraUserStateCommon common; +}; + +static NvBool ValidateModePrepUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsValidateModeParams *pParams = pParamsVoid; + struct NvKmsValidateModeExtraUserState *pExtra = pExtraUserStateVoid; + + return InfoStringPrepUserCommon( + pParams->request.infoStringSize, + &pParams->request.pInfoString, + &pExtra->common); +} + +static NvBool ValidateModeDoneUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsValidateModeParams *pParams = pParamsVoid; + struct NvKmsValidateModeExtraUserState *pExtra = pExtraUserStateVoid; + + return InfoStringDoneUserCommon( + pParams->request.infoStringSize, + pParams->request.pInfoString, + &pParams->reply.infoStringLenWritten, + &pExtra->common); +} + +/*! + * Validate the requested mode. + */ +static NvBool ValidateMode(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsValidateModeParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + nvValidateModeEvo(pDpyEvo, &pParams->request, &pParams->reply); + + return TRUE; +} + +static NvBool +CopyInOneLut(NvU64 pRampsUser, struct NvKmsLutRamps **ppRampsKernel) +{ + struct NvKmsLutRamps *pRampsKernel = NULL; + int status; + + if (pRampsUser == 0) { + return TRUE; + } + + if (!nvKmsNvU64AddressIsSafe(pRampsUser)) { + return FALSE; + } + + pRampsKernel = nvAlloc(sizeof(*pRampsKernel)); + if (!pRampsKernel) { + return FALSE; + } + + status = nvkms_copyin((char *)pRampsKernel, pRampsUser, + sizeof(*pRampsKernel)); + if (status != 0) { + nvFree(pRampsKernel); + return FALSE; + } + + *ppRampsKernel = pRampsKernel; + + return TRUE; +} + +static NvBool +CopyInLutParams(struct NvKmsSetLutCommonParams *pCommonLutParams) +{ + struct NvKmsLutRamps *pInputRamps = NULL; + struct NvKmsLutRamps *pOutputRamps = NULL; + + if (!CopyInOneLut(pCommonLutParams->input.pRamps, &pInputRamps)) { + goto fail; + } + if (!CopyInOneLut(pCommonLutParams->output.pRamps, &pOutputRamps)) { + goto fail; + } + + pCommonLutParams->input.pRamps = nvKmsPointerToNvU64(pInputRamps); + pCommonLutParams->output.pRamps = nvKmsPointerToNvU64(pOutputRamps); + + return TRUE; + +fail: + nvFree(pInputRamps); + nvFree(pOutputRamps); + return FALSE; +} + +static void +FreeCopiedInLutParams(struct NvKmsSetLutCommonParams *pCommonLutParams) +{ + struct NvKmsLutRamps *pInputRamps = + nvKmsNvU64ToPointer(pCommonLutParams->input.pRamps); + struct NvKmsLutRamps *pOutputRamps = + nvKmsNvU64ToPointer(pCommonLutParams->output.pRamps); + + nvFree(pInputRamps); + nvFree(pOutputRamps); +} + +/* No extra user state needed for SetMode; although we lose the user pointers + * for the LUT ramps after copying them in, that's okay because we don't need + * to copy them back out again. */ +struct NvKmsSetModeExtraUserState +{ +}; + +/*! + * Copy in any data referenced by pointer for the SetMode request. Currently + * this is only the LUT ramps. + */ +static NvBool SetModePrepUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsSetModeParams *pParams = pParamsVoid; + struct NvKmsSetModeRequest *pReq = &pParams->request; + NvU32 disp, apiHead, dispFailed, apiHeadFailed; + + /* Iterate over all of the common LUT ramp pointers embedded in the SetMode + * request, and copy in each one. */ + for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) { + for (apiHead = 0; apiHead < ARRAY_LEN(pReq->disp[disp].head); apiHead++) { + struct NvKmsSetLutCommonParams *pCommonLutParams = + &pReq->disp[disp].head[apiHead].flip.lut; + + if (!CopyInLutParams(pCommonLutParams)) { + /* Remember how far we got through these loops before we + * failed, so that we can undo everything up to this point. */ + dispFailed = disp; + apiHeadFailed = apiHead; + goto fail; + } + } + } + + return TRUE; + +fail: + for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) { + for (apiHead = 0; apiHead < ARRAY_LEN(pReq->disp[disp].head); apiHead++) { + struct NvKmsSetLutCommonParams *pCommonLutParams = + &pReq->disp[disp].head[apiHead].flip.lut; + + if (disp > dispFailed || + (disp == dispFailed && apiHead >= apiHeadFailed)) { + break; + } + + FreeCopiedInLutParams(pCommonLutParams); + } + } + + return FALSE; +} + +/*! + * Free buffers allocated in SetModePrepUser. + */ +static NvBool SetModeDoneUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsSetModeParams *pParams = pParamsVoid; + struct NvKmsSetModeRequest *pReq = &pParams->request; + NvU32 disp, apiHead; + + for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) { + for (apiHead = 0; apiHead < ARRAY_LEN(pReq->disp[disp].head); apiHead++) { + struct NvKmsSetLutCommonParams *pCommonLutParams = + &pReq->disp[disp].head[apiHead].flip.lut; + + FreeCopiedInLutParams(pCommonLutParams); + } + } + + return TRUE; +} + +/*! + * Perform a modeset on the device. + */ +static NvBool SetMode(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSetModeParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + return nvSetDispModeEvo(pOpenDev->pDevEvo, pOpenDev, + &pParams->request, &pParams->reply, + FALSE /* bypassComposition */, + TRUE /* doRasterLock */); +} + +/*! + * Set the cursor image. + */ +static NvBool SetCursorImage(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSetCursorImageParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + struct NvKmsPerOpenDisp *pOpenDisp; + NVDispEvoPtr pDispEvo; + + if (!GetPerOpenDevAndDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + &pOpenDev, + &pOpenDisp)) { + return FALSE; + } + + pDispEvo = pOpenDisp->pDispEvo; + + if (!nvApiHeadIsActive(pDispEvo, pParams->request.head)) { + return FALSE; + } + + return nvHsIoctlSetCursorImage(pDispEvo, + pOpenDev, + &pOpenDev->surfaceHandles, + pParams->request.head, + &pParams->request.common); +} + +/*! + * Change the cursor position. + */ +static NvBool MoveCursor(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsMoveCursorParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp *pOpenDisp; + NVDispEvoPtr pDispEvo; + + pOpenDisp = GetPerOpenDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + return FALSE; + } + + pDispEvo = pOpenDisp->pDispEvo; + + if (!nvApiHeadIsActive(pDispEvo, pParams->request.head)) { + return FALSE; + } + + return nvHsIoctlMoveCursor(pDispEvo, + pParams->request.head, + &pParams->request.common); +} + +/* No extra user state needed for SetLut; although we lose the user pointers + * for the LUT ramps after copying them in, that's okay because we don't need + * to copy them back out again. */ +struct NvKmsSetLutExtraUserState +{ +}; + +/*! + * Copy in any data referenced by pointer for the SetLut request. Currently + * this is only the LUT ramps. + */ +static NvBool SetLutPrepUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsSetLutParams *pParams = pParamsVoid; + struct NvKmsSetLutCommonParams *pCommonLutParams = &pParams->request.common; + + return CopyInLutParams(pCommonLutParams); +} + +/*! + * Free buffers allocated in SetLutPrepUser. + */ +static NvBool SetLutDoneUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsSetLutParams *pParams = pParamsVoid; + struct NvKmsSetLutCommonParams *pCommonLutParams = &pParams->request.common; + + FreeCopiedInLutParams(pCommonLutParams); + + return TRUE; +} + +/*! + * Set the LUT on the specified head. + */ +static NvBool SetLut(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSetLutParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + struct NvKmsPerOpenDisp *pOpenDisp; + NVDevEvoPtr pDevEvo; + NVDispEvoPtr pDispEvo; + NvU8 allLayersMask; + + if (!GetPerOpenDevAndDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + &pOpenDev, + &pOpenDisp)) { + return FALSE; + } + + pDevEvo = pOpenDev->pDevEvo; + pDispEvo = pOpenDisp->pDispEvo; + + if (!nvApiHeadIsActive(pDispEvo, pParams->request.head)) { + return FALSE; + } + + if (!nvValidateSetLutCommonParams(pDispEvo->pDevEvo, + &pParams->request.common)) { + return FALSE; + } + + /* Changing the LUTs requires permission to alter all layers. */ + allLayersMask = NVBIT(pDevEvo->apiHead[pParams->request.head].numLayers) - 1; + if (!nvCheckLayerPermissions(pOpenDev, pDevEvo, + pDispEvo->displayOwner, + pParams->request.head, + allLayersMask)) { + return FALSE; + } + + nvEvoSetLut(pDispEvo, + pParams->request.head, TRUE /* kickoff */, + &pParams->request.common); + + return TRUE; +} + +static NvBool CheckLutNotifier(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsCheckLutNotifierParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp *pOpenDisp; + NVDispEvoPtr pDispEvo; + + pOpenDisp = GetPerOpenDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + return FALSE; + } + + pDispEvo = pOpenDisp->pDispEvo; + + if (!nvApiHeadIsActive(pDispEvo, pParams->request.head)) { + return FALSE; + } + + if (pParams->request.waitForCompletion) { + nvEvoWaitForLUTNotifier(pDispEvo, pParams->request.head); + } + + pParams->reply.complete = nvEvoIsLUTNotifierComplete(pDispEvo, + pParams->request.head); + + return TRUE; +} + +/*! + * Return whether the specified head is idle. + */ +static NvBool IdleMainLayerChannelCheckIdleOneApiHead( + NVDispEvoPtr pDispEvo, + NvU32 apiHead) +{ + if (pDispEvo->pHsChannel[apiHead] != NULL) { + return nvHsIdleFlipQueue(pDispEvo->pHsChannel[apiHead], + FALSE /* force */); + } + return nvIdleMainLayerChannelCheckIdleOneApiHead(pDispEvo, apiHead); +} + +/*! + * Return whether all heads described in pRequest are idle. + * + * Note that we loop over all requested heads, rather than return FALSE once we + * find the first non-idle head, because checking for idle has side effects: in + * headSurface, checking for idle gives the headSurface flip queue the + * opportunity to proceed another frame. + */ +static NvBool IdleBaseChannelCheckIdle( + NVDevEvoPtr pDevEvo, + const struct NvKmsIdleBaseChannelRequest *pRequest, + struct NvKmsIdleBaseChannelReply *pReply) +{ + NvU32 apiHead, sd; + NVDispEvoPtr pDispEvo; + NvBool allIdle = TRUE; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + + NvBool idle; + + if (!nvApiHeadIsActive(pDispEvo, apiHead)) { + continue; + } + + if ((pRequest->subDevicesPerHead[apiHead] & NVBIT(sd)) == 0) { + continue; + } + + idle = IdleMainLayerChannelCheckIdleOneApiHead(pDispEvo, apiHead); + + if (!idle) { + pReply->stopSubDevicesPerHead[apiHead] |= NVBIT(sd); + } + allIdle = allIdle && idle; + } + } + + return allIdle; +} + +/*! + * Idle all requested heads. + * + * First, wait for the heads to idle naturally. If a timeout is exceeded, then + * force the non-idle heads to idle, and record these in pReply. + */ +static NvBool IdleBaseChannelAll( + NVDevEvoPtr pDevEvo, + const struct NvKmsIdleBaseChannelRequest *pRequest, + struct NvKmsIdleBaseChannelReply *pReply) +{ + NvU64 startTime = 0; + + /* + * Each element in subDevicesPerHead[] must be large enough to hold one bit + * per subdevice. + */ + ct_assert(NVKMS_MAX_SUBDEVICES <= + (sizeof(pRequest->subDevicesPerHead[0]) * 8)); + + /* Loop until all head,sd pairs are idle, or we time out. */ + do { + const NvU32 timeout = 2000000; /* 2 seconds */ + + + /* + * Clear the pReply data, + * IdleBaseChannelCheckIdle() will fill it afresh. + */ + nvkms_memset(pReply, 0, sizeof(*pReply)); + + /* If all heads are idle, we are done. */ + if (IdleBaseChannelCheckIdle(pDevEvo, pRequest, pReply)) { + return TRUE; + } + + /* Break out of the loop if we exceed the timeout. */ + if (nvExceedsTimeoutUSec(pDevEvo, &startTime, timeout)) { + break; + } + + /* At least one head is not idle; yield, and try again. */ + nvkms_yield(); + + } while (TRUE); + + return TRUE; +} + + +/*! + * Wait for the requested base channels to be idle, returning whether + * stopping the base channels was necessary. + */ +static NvBool IdleBaseChannel(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsIdleBaseChannelParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + /* Only a modeset owner can idle base. */ + + if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { + return FALSE; + } + + return IdleBaseChannelAll(pOpenDev->pDevEvo, + &pParams->request, &pParams->reply); +} + +/* No extra user state needed for Flip; although we lose the user pointers + * for the LUT ramps after copying them in, that's okay because we don't need + * to copy them back out again. */ +struct NvKmsFlipExtraUserState +{ + // Nothing needed. +}; + +/*! + * Copy in any data referenced by pointer for the Flip request. Currently + * this is the flip head request array and the LUT ramps. + */ +static NvBool FlipPrepUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsFlipParams *pParams = pParamsVoid; + struct NvKmsFlipRequest *pRequest = &pParams->request; + struct NvKmsFlipRequestOneHead *pFlipHeadKernel = NULL; + NvU64 pFlipHeadUser = pRequest->pFlipHead; + size_t size; + NvU32 apiHead, apiHeadFailed; + int status; + + if (!nvKmsNvU64AddressIsSafe(pFlipHeadUser)) { + return FALSE; + } + + if (pRequest->numFlipHeads <= 0 || + pRequest->numFlipHeads > NV_MAX_FLIP_REQUEST_HEADS) { + return FALSE; + } + + size = sizeof(*pFlipHeadKernel) * pRequest->numFlipHeads; + pFlipHeadKernel = nvAlloc(size); + if (!pFlipHeadKernel) { + return FALSE; + } + + status = nvkms_copyin((char *)pFlipHeadKernel, pFlipHeadUser, size); + if (status != 0) { + nvFree(pFlipHeadKernel); + return FALSE; + } + + /* Iterate over all of the common LUT ramp pointers embedded in the Flip + * request, and copy in each one. */ + for (apiHead = 0; apiHead < pRequest->numFlipHeads; apiHead++) { + struct NvKmsSetLutCommonParams *pCommonLutParams = + &pFlipHeadKernel[apiHead].flip.lut; + + if (!CopyInLutParams(pCommonLutParams)) { + /* Remember how far we got through this loop before we + * failed, so that we can undo everything up to this point. */ + apiHeadFailed = apiHead; + goto fail_lut; + } + } + + pRequest->pFlipHead = nvKmsPointerToNvU64(pFlipHeadKernel); + + return TRUE; + +fail_lut: + for (apiHead = 0; apiHead < apiHeadFailed; apiHead++) { + struct NvKmsSetLutCommonParams *pCommonLutParams = + &pFlipHeadKernel[apiHead].flip.lut; + + FreeCopiedInLutParams(pCommonLutParams); + } + nvFree(pFlipHeadKernel); + return FALSE; +} + +/*! + * Free buffers allocated in FlipPrepUser. + */ +static NvBool FlipDoneUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsFlipParams *pParams = pParamsVoid; + struct NvKmsFlipRequest *pRequest = &pParams->request; + struct NvKmsFlipRequestOneHead *pFlipHead = nvKmsNvU64ToPointer(pRequest->pFlipHead); + NvU32 apiHead; + + for (apiHead = 0; apiHead < pRequest->numFlipHeads; apiHead++) { + struct NvKmsSetLutCommonParams *pCommonLutParams = + &pFlipHead[apiHead].flip.lut; + + FreeCopiedInLutParams(pCommonLutParams); + } + nvFree(pFlipHead); + /* The request is not copied back out to userspace (only the reply is), so + * we don't need to worry about restoring the user pointer */ + pRequest->pFlipHead = 0; + + return TRUE; +} + +/*! + * For each entry in the array pointed to by 'pFlipHead', of length + * 'numFlipHeads', verify that the sd and head values specified are within + * bounds and that there are no duplicates. + */ +static NvBool ValidateFlipHeads( + NVDevEvoPtr pDevEvo, + const struct NvKmsFlipRequestOneHead *pFlipHead, + NvU32 numFlipHeads) +{ + NvU32 i; + ct_assert(NVKMS_MAX_HEADS_PER_DISP <= 8); + NvU8 apiHeadsUsed[NVKMS_MAX_SUBDEVICES] = { }; + + for (i = 0; i < numFlipHeads; i++) { + const NvU32 sd = pFlipHead[i].sd; + const NvU32 apiHead = pFlipHead[i].head; + + if (sd >= pDevEvo->numSubDevices) { + return FALSE; + } + if (apiHead >= pDevEvo->numApiHeads) { + return FALSE; + } + if ((apiHeadsUsed[sd] & (1 << apiHead)) != 0) { + return FALSE; + } + apiHeadsUsed[sd] |= (1 << apiHead); + } + + return TRUE; +} + +/*! + * Flip the specified head. + */ +static NvBool Flip(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsFlipParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + NVDevEvoPtr pDevEvo = NULL; + const struct NvKmsFlipRequest *pRequest = &pParams->request; + const struct NvKmsFlipRequestOneHead *pFlipHead = + nvKmsNvU64ToPointer(pRequest->pFlipHead); + const NvU32 numFlipHeads = pRequest->numFlipHeads; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + pDevEvo = pOpenDev->pDevEvo; + + if (!ValidateFlipHeads(pDevEvo, pFlipHead, numFlipHeads)) { + return FALSE; + } + + return nvHsIoctlFlip(pDevEvo, pOpenDev, + pFlipHead, numFlipHeads, + pRequest->commit, + &pParams->reply); +} + + +/*! + * Record whether this client is interested in the specified dynamic + * dpy. + */ +static NvBool DeclareDynamicDpyInterest(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + /* XXX NVKMS TODO: implement me. */ + + return TRUE; +} + + +/*! + * Register a surface with the specified per-open + device. + */ +static NvBool RegisterSurface(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsRegisterSurfaceParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + /* + * Only allow userspace clients to specify memory objects by FD. + * This prevents clients from specifying (hClient, hObject) tuples that + * really belong to other clients. + */ + if (pOpen->clientType == NVKMS_CLIENT_USER_SPACE && + !pParams->request.useFd) { + return FALSE; + } + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + nvEvoRegisterSurface(pOpenDev->pDevEvo, pOpenDev, pParams, + NvHsMapPermissionsReadOnly); + return pParams->reply.surfaceHandle != 0; +} + + +/*! + * Unregister a surface from the specified per-open + device. + */ +static NvBool UnregisterSurface(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsUnregisterSurfaceParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + /* Fail the ioctl if a non-privileged client sets this */ + if (pOpen->clientType != NVKMS_CLIENT_KERNEL_SPACE && + pParams->request.skipSync) { + return FALSE; + } + + nvEvoUnregisterSurface(pOpenDev->pDevEvo, pOpenDev, + pParams->request.surfaceHandle, + FALSE /* skipUpdate */, + pParams->request.skipSync); + return TRUE; +} + + +/*! + * Associate a surface with the NvKmsPerOpen specified by + * NvKmsGrantSurfaceParams::request::fd. + */ +static NvBool GrantSurface(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsGrantSurfaceParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + NVSurfaceEvoPtr pSurfaceEvo; + struct NvKmsPerOpen *pOpenFd; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + pSurfaceEvo = + nvEvoGetSurfaceFromHandleNoHWAccess(pOpenDev->pDevEvo, + &pOpenDev->surfaceHandles, + pParams->request.surfaceHandle); + if (pSurfaceEvo == NULL) { + return FALSE; + } + + if (nvEvoSurfaceRefCntsTooLarge(pSurfaceEvo)) { + return FALSE; + } + + /* Only the owner of the surface can grant it to other clients. */ + + if (!nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, + pParams->request.surfaceHandle)) { + return FALSE; + } + + pOpenFd = nvkms_get_per_open_data(pParams->request.fd); + + if (pOpenFd == NULL) { + return FALSE; + } + + if (!AssignNvKmsPerOpenType( + pOpenFd, NvKmsPerOpenTypeGrantSurface, FALSE)) { + return FALSE; + } + + nvEvoIncrementSurfaceStructRefCnt(pSurfaceEvo); + pOpenFd->grantSurface.pSurfaceEvo = pSurfaceEvo; + + return TRUE; +} + + +/*! + * Retrieve the surface and device associated with + * NvKmsAcquireSurfaceParams::request::fd, and give the client an + * NvKmsSurfaceHandle to the surface. + */ +static NvBool AcquireSurface(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsAcquireSurfaceParams *pParams = pParamsVoid; + struct NvKmsPerOpen *pOpenFd; + struct NvKmsPerOpenDev *pOpenDev; + NvKmsSurfaceHandle surfaceHandle = 0; + + pOpenFd = nvkms_get_per_open_data(pParams->request.fd); + + if (pOpenFd == NULL) { + return FALSE; + } + + if (pOpenFd->type != NvKmsPerOpenTypeGrantSurface) { + return FALSE; + } + + nvAssert(pOpenFd->grantSurface.pSurfaceEvo != NULL); + + if (pOpenFd->grantSurface.pSurfaceEvo->rmRefCnt == 0) { /* orphan */ + return FALSE; + } + + if (nvEvoSurfaceRefCntsTooLarge(pOpenFd->grantSurface.pSurfaceEvo)) { + return FALSE; + } + + /* Since the surface isn't orphaned, it should have an owner, with a + * pOpenDev and a pDevEvo. Get the pOpenDev for the acquiring client that + * matches the owner's pDevEvo. */ + nvAssert(pOpenFd->grantSurface.pSurfaceEvo->owner.pOpenDev->pDevEvo != NULL); + pOpenDev = DevEvoToOpenDev(pOpen, + pOpenFd->grantSurface.pSurfaceEvo->owner.pOpenDev->pDevEvo); + + if (pOpenDev == NULL) { + return FALSE; + } + + surfaceHandle = + nvEvoCreateApiHandle(&pOpenDev->surfaceHandles, + pOpenFd->grantSurface.pSurfaceEvo); + + if (surfaceHandle == 0) { + return FALSE; + } + + nvEvoIncrementSurfaceStructRefCnt(pOpenFd->grantSurface.pSurfaceEvo); + + pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle; + pParams->reply.surfaceHandle = surfaceHandle; + + return TRUE; +} + +static NvBool ReleaseSurface(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsReleaseSurfaceParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + nvEvoReleaseSurface(pOpenDev->pDevEvo, pOpenDev, + pParams->request.surfaceHandle); + return TRUE; +} + + +/*! + * Associate a swap group with the NvKmsPerOpen specified by + * NvKmsGrantSwapGroupParams::request::fd. + */ +static NvBool GrantSwapGroup(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsGrantSwapGroupParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + NVSwapGroupRec *pSwapGroup; + struct NvKmsPerOpen *pOpenFd; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { + return FALSE; + } + + pSwapGroup = nvHsGetSwapGroup(&pOpenDev->swapGroupHandles, + pParams->request.swapGroupHandle); + + if (pSwapGroup == NULL) { + return FALSE; + } + + pOpenFd = nvkms_get_per_open_data(pParams->request.fd); + + if (pOpenFd == NULL) { + return FALSE; + } + + /* + * Increment the swap group refcnt while granting it so the SwapGroup + * won't be freed out from under the grant fd. To complement this, + * nvKmsClose() on NvKmsPerOpenTypeGrantSwapGroup calls + * DecrementSwapGroupRefCnt(). + */ + if (!nvHsIncrementSwapGroupRefCnt(pSwapGroup)) { + return FALSE; + } + + if (!AssignNvKmsPerOpenType( + pOpenFd, NvKmsPerOpenTypeGrantSwapGroup, FALSE)) { + nvHsDecrementSwapGroupRefCnt(pSwapGroup); + return FALSE; + } + + /* we must not fail beyond this point */ + + pOpenFd->grantSwapGroup.pSwapGroup = pSwapGroup; + + pOpenFd->grantSwapGroup.pDevEvo = pOpenDev->pDevEvo; + + return TRUE; +} + + +/*! + * Retrieve the swap group and device associated with + * NvKmsAcquireSwapGroupParams::request::fd, give the client an + * NvKmsSwapGroupHandle to the swap group, and increment the + * swap group's reference count. + */ +static NvBool AcquireSwapGroup(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsAcquireSwapGroupParams *pParams = pParamsVoid; + struct NvKmsPerOpen *pOpenFd; + struct NvKmsPerOpenDev *pOpenDev; + NvKmsSwapGroupHandle swapGroupHandle = 0; + + pOpenFd = nvkms_get_per_open_data(pParams->request.fd); + + if (pOpenFd == NULL) { + return FALSE; + } + + if (pOpenFd->type != NvKmsPerOpenTypeGrantSwapGroup) { + return FALSE; + } + + /* + * pSwapGroup is only freed when its last reference goes away; if pOpenFd + * hasn't yet been closed, then its reference incremented in + * GrantSwapGroup() couldn't have been decremented in nvKmsClose() + */ + nvAssert(pOpenFd->grantSwapGroup.pSwapGroup != NULL); + nvAssert(pOpenFd->grantSwapGroup.pDevEvo != NULL); + + if (pOpenFd->grantSwapGroup.pSwapGroup->zombie) { + return FALSE; + } + + pOpenDev = DevEvoToOpenDev(pOpen, pOpenFd->grantSwapGroup.pDevEvo); + + if (pOpenDev == NULL) { + return FALSE; + } + + if (nvEvoApiHandlePointerIsPresent(&pOpenDev->swapGroupHandles, + pOpenFd->grantSwapGroup.pSwapGroup)) { + return FALSE; + } + + if (!nvHsIncrementSwapGroupRefCnt(pOpenFd->grantSwapGroup.pSwapGroup)) { + return FALSE; + } + + swapGroupHandle = + nvEvoCreateApiHandle(&pOpenDev->swapGroupHandles, + pOpenFd->grantSwapGroup.pSwapGroup); + + if (swapGroupHandle == 0) { + nvHsDecrementSwapGroupRefCnt(pOpenFd->grantSwapGroup.pSwapGroup); + return FALSE; + } + + /* we must not fail beyond this point */ + + pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle; + pParams->reply.swapGroupHandle = swapGroupHandle; + + return TRUE; +} + + +/*! + * Free this client's reference to the swap group. + * + * This is meant to be called by clients that have acquired the swap group + * handle through AcquireSwapGroup(). + */ +static NvBool ReleaseSwapGroup(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsReleaseSwapGroupParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + NVSwapGroupRec *pSwapGroup; + NvKmsSwapGroupHandle handle = pParams->request.swapGroupHandle; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + /* + * This may operate on a swap group that has already been freed + * (pSwapGroup->zombie is TRUE). + */ + pSwapGroup = nvHsGetSwapGroupStruct(&pOpenDev->swapGroupHandles, + handle); + if (pSwapGroup == NULL) { + return FALSE; + } + + nvEvoDestroyApiHandle(&pOpenDev->swapGroupHandles, handle); + + nvHsDecrementSwapGroupRefCnt(pSwapGroup); + + return TRUE; +} + +/*! + * Change the value of the specified attribute. + */ +static NvBool SetDpyAttribute(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSetDpyAttributeParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + return nvSetDpyAttributeEvo(pDpyEvo, pParams); +} + + +/*! + * Get the value of the specified attribute. + */ +static NvBool GetDpyAttribute(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetDpyAttributeParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + return nvGetDpyAttributeEvo(pDpyEvo, pParams); +} + + +/*! + * Get the valid values of the specified attribute. + */ +static NvBool GetDpyAttributeValidValues(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetDpyAttributeValidValuesParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + return nvGetDpyAttributeValidValuesEvo(pDpyEvo, pParams); +} + + +/*! + * Set the value of the specified attribute. + */ +static NvBool SetDispAttribute(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSetDispAttributeParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp *pOpenDisp; + + pOpenDisp = GetPerOpenDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + return FALSE; + } + + return nvSetDispAttributeEvo(pOpenDisp->pDispEvo, pParams); +} + + +/*! + * Get the value of the specified attribute. + */ +static NvBool GetDispAttribute(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetDispAttributeParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp *pOpenDisp; + + pOpenDisp = GetPerOpenDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + return FALSE; + } + + return nvGetDispAttributeEvo(pOpenDisp->pDispEvo, pParams); +} + + +/*! + * Get the valid values of the specified attribute. + */ +static NvBool GetDispAttributeValidValues(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetDispAttributeValidValuesParams *pParams = pParamsVoid; + + struct NvKmsPerOpenDisp *pOpenDisp; + + pOpenDisp = GetPerOpenDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + return FALSE; + } + + return nvGetDispAttributeValidValuesEvo(pOpenDisp->pDispEvo, pParams); +} + + +/*! + * Get information about the specified framelock device. + */ +static NvBool QueryFrameLock(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsQueryFrameLockParams *pParams = pParamsVoid; + struct NvKmsPerOpenFrameLock *pOpenFrameLock; + const NVFrameLockEvoRec *pFrameLockEvo; + NvU32 gpu; + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + pOpenFrameLock = + GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle); + + if (pOpenFrameLock == NULL) { + return FALSE; + } + + pFrameLockEvo = pOpenFrameLock->pFrameLockEvo; + + ct_assert(ARRAY_LEN(pFrameLockEvo->gpuIds) <= + ARRAY_LEN(pParams->reply.gpuIds)); + + for (gpu = 0; gpu < pFrameLockEvo->nGpuIds; gpu++) { + pParams->reply.gpuIds[gpu] = pFrameLockEvo->gpuIds[gpu]; + } + + return TRUE; +} + + +static NvBool SetFrameLockAttribute(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSetFrameLockAttributeParams *pParams = pParamsVoid; + struct NvKmsPerOpenFrameLock *pOpenFrameLock; + NVFrameLockEvoRec *pFrameLockEvo; + + pOpenFrameLock = + GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle); + + if (pOpenFrameLock == NULL) { + return FALSE; + } + + pFrameLockEvo = pOpenFrameLock->pFrameLockEvo; + + return nvSetFrameLockAttributeEvo(pFrameLockEvo, pParams); +} + + +static NvBool GetFrameLockAttribute(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetFrameLockAttributeParams *pParams = pParamsVoid; + struct NvKmsPerOpenFrameLock *pOpenFrameLock; + const NVFrameLockEvoRec *pFrameLockEvo; + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + pOpenFrameLock = + GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle); + + if (pOpenFrameLock == NULL) { + return FALSE; + } + + pFrameLockEvo = pOpenFrameLock->pFrameLockEvo; + + return nvGetFrameLockAttributeEvo(pFrameLockEvo, pParams); +} + + +static NvBool GetFrameLockAttributeValidValues(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetFrameLockAttributeValidValuesParams *pParams = pParamsVoid; + struct NvKmsPerOpenFrameLock *pOpenFrameLock; + const NVFrameLockEvoRec *pFrameLockEvo; + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + pOpenFrameLock = + GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle); + + if (pOpenFrameLock == NULL) { + return FALSE; + } + + pFrameLockEvo = pOpenFrameLock->pFrameLockEvo; + + return nvGetFrameLockAttributeValidValuesEvo(pFrameLockEvo, pParams); +} + + +/*! + * Pop the next event off of the client's event queue. + */ +static NvBool GetNextEvent(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetNextEventParams *pParams = pParamsVoid; + struct NvKmsPerOpenEventListEntry *pEntry; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + if (nvListIsEmpty(&pOpen->ioctl.eventList)) { + pParams->reply.valid = FALSE; + return TRUE; + } + + pEntry = nvListFirstEntry(&pOpen->ioctl.eventList, + struct NvKmsPerOpenEventListEntry, + eventListEntry); + + pParams->reply.valid = TRUE; + pParams->reply.event = pEntry->event; + + nvListDel(&pEntry->eventListEntry); + + nvFree(pEntry); + + if (nvListIsEmpty(&pOpen->ioctl.eventList)) { + nvkms_event_queue_changed(pOpen->pOpenKernel, FALSE); + } + + return TRUE; +} + + +/*! + * Record the client's event interest for the specified device. + */ +static NvBool DeclareEventInterest(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsDeclareEventInterestParams *pParams = pParamsVoid; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + pOpen->ioctl.eventInterestMask = pParams->request.interestMask; + + return TRUE; +} + +static NvBool ClearUnicastEvent(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsClearUnicastEventParams *pParams = pParamsVoid; + struct NvKmsPerOpen *pOpenFd = NULL; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + pOpenFd = nvkms_get_per_open_data(pParams->request.unicastEventFd); + + if (pOpenFd == NULL) { + return FALSE; + } + + if (pOpenFd->type != NvKmsPerOpenTypeUnicastEvent) { + return FALSE; + } + + nvkms_event_queue_changed(pOpenFd->pOpenKernel, FALSE); + + return TRUE; +} + +static NvBool SetLayerPosition(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSetLayerPositionParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + /* XXX NVKMS HEADSURFACE TODO: intercept */ + + return nvLayerSetPositionEvo(pOpenDev->pDevEvo, &pParams->request); +} + +static NvBool GrabOwnership(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsGrabOwnershipParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + // The only kind of ownership right now is modeset ownership. + return GrabModesetOwnership(pOpenDev); +} + +static NvBool ReleaseOwnership(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsReleaseOwnershipParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + // The only kind of ownership right now is modeset ownership. + return ReleaseModesetOwnership(pOpenDev); +} + +static NvBool GrantPermissions(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsGrantPermissionsParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + struct NvKmsPerOpen *pOpenFd; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + /* Only a modeset owner can grant permissions. */ + + if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { + return FALSE; + } + + if (!ValidateNvKmsPermissions(pOpenDev->pDevEvo, + &pParams->request.permissions, + pOpen->clientType)) { + return FALSE; + } + + pOpenFd = nvkms_get_per_open_data(pParams->request.fd); + + if (pOpenFd == NULL) { + return FALSE; + } + + if (!AssignNvKmsPerOpenType( + pOpenFd, NvKmsPerOpenTypeGrantPermissions, FALSE)) { + return FALSE; + } + + pOpenFd->grantPermissions.permissions = pParams->request.permissions; + + pOpenFd->grantPermissions.pDevEvo = pOpenDev->pDevEvo; + + return TRUE; +} + +static NvBool AcquirePermissions(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsAcquirePermissionsParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + struct NvKmsPerOpen *pOpenFd; + const struct NvKmsPermissions *pPermissionsNew; + enum NvKmsPermissionsType type; + + pOpenFd = nvkms_get_per_open_data(pParams->request.fd); + + if (pOpenFd == NULL) { + return FALSE; + } + + if (pOpenFd->type != NvKmsPerOpenTypeGrantPermissions) { + return FALSE; + } + + pOpenDev = DevEvoToOpenDev(pOpen, pOpenFd->grantPermissions.pDevEvo); + + if (pOpenDev == NULL) { + return FALSE; + } + + type = pOpenFd->grantPermissions.permissions.type; + + pPermissionsNew = &pOpenFd->grantPermissions.permissions; + + if (type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) { + NvU32 d, h; + + for (d = 0; d < ARRAY_LEN(pOpenDev->flipPermissions.disp); d++) { + for (h = 0; h < ARRAY_LEN(pOpenDev->flipPermissions. + disp[d].head); h++) { + pOpenDev->flipPermissions.disp[d].head[h].layerMask |= + pPermissionsNew->flip.disp[d].head[h].layerMask; + } + } + + pParams->reply.permissions.flip = pOpenDev->flipPermissions; + + } else if (type == NV_KMS_PERMISSIONS_TYPE_MODESET) { + NvU32 d, h; + + for (d = 0; d < ARRAY_LEN(pOpenDev->modesetPermissions.disp); d++) { + for (h = 0; h < ARRAY_LEN(pOpenDev->modesetPermissions. + disp[d].head); h++) { + pOpenDev->modesetPermissions.disp[d].head[h].dpyIdList = + nvAddDpyIdListToDpyIdList( + pOpenDev->modesetPermissions.disp[d].head[h].dpyIdList, + pPermissionsNew->modeset.disp[d].head[h].dpyIdList); + } + } + + pParams->reply.permissions.modeset = pOpenDev->modesetPermissions; + + } else if (type == NV_KMS_PERMISSIONS_TYPE_SUB_OWNER) { + + if (pOpenDev->pDevEvo->modesetSubOwner != NULL) { + /* There can be only one sub-owner */ + return FALSE; + } + + pOpenDev->pDevEvo->modesetSubOwner = pOpenDev; + pOpenDev->pDevEvo->modesetOwnerOrSubOwnerChanged = TRUE; + AssignFullNvKmsPermissions(pOpenDev); + + } else { + /* + * GrantPermissions() should ensure that + * pOpenFd->grantPermissions.permissions.type is always valid. + */ + nvAssert(!"AcquirePermissions validation failure"); + return FALSE; + } + + pParams->reply.permissions.type = type; + pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle; + + return TRUE; +} + +/*! + * Clear the set of permissions from pRevokingOpenDev. + * + * For NvKmsPerOpen::type==Ioctl, clear from permissions. It doesn't clear + * itself or privileged. + * + * For NvKmsPerOpen::type==GrantPermissions, clear from + * NvKmsPerOpen::grantPermissions, and reset NvKmsPerOpen::type to Undefined + * if it is empty. + */ +static NvBool RevokePermissionsSet( + struct NvKmsPerOpenDev *pRevokingOpenDev, + const struct NvKmsPermissions *pRevokingPermissions) +{ + const NVDevEvoRec *pDevEvo; + struct NvKmsPerOpen *pOpen; + const struct NvKmsFlipPermissions *pRemoveFlip; + const struct NvKmsModesetPermissions *pRemoveModeset; + + // Only process valid permissions. + if (pRevokingPermissions->type != NV_KMS_PERMISSIONS_TYPE_FLIPPING && + pRevokingPermissions->type != NV_KMS_PERMISSIONS_TYPE_MODESET) { + return FALSE; + } + + pDevEvo = pRevokingOpenDev->pDevEvo; + pRemoveFlip = + (pRevokingPermissions->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) + ? &pRevokingPermissions->flip + : NULL; + pRemoveModeset = + (pRevokingPermissions->type == NV_KMS_PERMISSIONS_TYPE_MODESET) + ? &pRevokingPermissions->modeset + : NULL; + + nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) { + if ((pOpen->type == NvKmsPerOpenTypeGrantPermissions) && + (pOpen->grantPermissions.pDevEvo == pDevEvo)) { + NvBool remainingPermissions = FALSE; + struct NvKmsPermissions *pFdPermissions = + &pOpen->grantPermissions.permissions; + + if (pFdPermissions->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) { + remainingPermissions = + RemoveFlipPermissions(&pFdPermissions->flip, pRemoveFlip); + } else { + remainingPermissions = RemoveModesetPermissions( + &pFdPermissions->modeset, pRemoveModeset); + } + + // Reset if it is empty. + if (!remainingPermissions) { + nvkms_memset(&pOpen->grantPermissions, 0, + sizeof(pOpen->grantPermissions)); + pOpen->type = NvKmsPerOpenTypeUndefined; + } + + } else if (pOpen->type == NvKmsPerOpenTypeIoctl) { + + struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); + if (pOpenDev == NULL) { + continue; + } + + if (pOpenDev == pRevokingOpenDev || pOpenDev->isPrivileged) { + continue; + } + + if (pRevokingPermissions->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) { + RemoveFlipPermissions(&pOpenDev->flipPermissions, pRemoveFlip); + } else { + RemoveModesetPermissions(&pOpenDev->modesetPermissions, + pRemoveModeset); + } + } + } + + return TRUE; +} + +static NvBool IsHeadRevoked(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + void *pData) +{ + const struct NvKmsPermissions *pPermissions = pData; + + return !nvDpyIdListIsEmpty( + pPermissions->modeset.disp[pDispEvo->displayOwner].head[apiHead].dpyIdList); +} + +static void DisableStereoPin(struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsModesetPermissions *pModeset) +{ + NVDispEvoPtr pDispEvo; + NvU32 dispIndex, apiHead; + NvBool stereoEnabled; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pOpenDev->pDevEvo) { + for (apiHead = 0; apiHead < pOpenDev->pDevEvo->numApiHeads; apiHead++) { + const NVDpyIdList dpyIdList = + pModeset->disp[dispIndex].head[apiHead].dpyIdList; + if (!nvDpyIdListIsEmpty(dpyIdList)) { + stereoEnabled = nvGetStereo(pDispEvo, apiHead); + + if (stereoEnabled) { + nvSetStereo(pDispEvo, apiHead, FALSE); + } + } + } + } +} + +static NvBool RevokePermissions(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsRevokePermissionsParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev = + GetPerOpenDev(pOpen, pParams->request.deviceHandle); + const NvU32 validBitmask = + NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING) | + NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET) | + NVBIT(NV_KMS_PERMISSIONS_TYPE_SUB_OWNER); + + if (pOpenDev == NULL) { + return FALSE; + } + + /* Reject invalid bitmasks. */ + + if ((pParams->request.permissionsTypeBitmask & ~validBitmask) != 0) { + return FALSE; + } + + if ((pParams->request.permissionsTypeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_SUB_OWNER)) != 0) { + if (pOpenDev->pDevEvo->modesetOwner != pOpenDev) { + /* Only the modeset owner can revoke sub-owner permissions. */ + return FALSE; + } + + /* + * When revoking ownership permissions, shut down all heads. + * + * This is necessary to keep the state of nvidia-drm in sync with NVKMS. + * Otherwise, an NVKMS client can leave heads enabled when handing off + * control of the device back to nvidia-drm, and nvidia-drm's flip queue + * handling will get out of sync because it thinks all heads are + * disabled and does not expect flip events on those heads. + */ + nvShutDownApiHeads(pOpenDev->pDevEvo, pOpenDev, NULL /* pTestFunc */, + NULL /* pData */, + TRUE /* doRasterLock */); + } + + /* + * Only a client with sub-owner permissions (or better) can revoke other + * kinds of permissions. + */ + if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { + return FALSE; + } + + if (pParams->request.permissionsTypeBitmask > 0) { + // Old behavior, revoke all permissions of a type. + + /* Revoke permissions for everyone except the caller. */ + RevokePermissionsInternal(pParams->request.permissionsTypeBitmask, + pOpenDev->pDevEvo, + pOpenDev /* pOpenDevExclude */); + } else { + /* If not using bitmask, revoke using the set. */ + if (!RevokePermissionsSet(pOpenDev, &pParams->request.permissions)) { + return FALSE; + } + + /* + * When revoking ownership permissions, shut down those heads. + * + * This is necessary to keep the state of nvidia-drm in sync with NVKMS. + * Otherwise, an NVKMS client can leave heads enabled when handing off + * control of the device back to nvidia-drm, which prevents them from + * being able to be leased again. + */ + if (pParams->request.permissions.type == NV_KMS_PERMISSIONS_TYPE_MODESET) { + // Also disable stereo pins if enabled. + DisableStereoPin(pOpenDev, &pParams->request.permissions.modeset); + + nvShutDownApiHeads(pOpenDev->pDevEvo, pOpenDev, IsHeadRevoked, + &pParams->request.permissions, + TRUE /* doRasterLock */); + } + } + + return TRUE; +} + +static NvBool RegisterDeferredRequestFifo(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsRegisterDeferredRequestFifoParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + NVSurfaceEvoPtr pSurfaceEvo; + NVDeferredRequestFifoRec *pDeferredRequestFifo; + NvKmsDeferredRequestFifoHandle handle; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + pSurfaceEvo = nvEvoGetSurfaceFromHandleNoHWAccess( + pOpenDev->pDevEvo, + &pOpenDev->surfaceHandles, + pParams->request.surfaceHandle); + + if (pSurfaceEvo == NULL) { + return FALSE; + } + + /* + * WAR Bug 2050970: If a surface is unregistered and it wasn't registered + * with NvKmsRegisterSurfaceRequest::noDisplayHardwareAccess, then the call + * to nvRMSyncEvoChannel() in nvEvoDecrementSurfaceRefCnts() may hang + * if any flips in flight acquire on semaphore releases that haven't + * occurred yet. + * + * Since a ctxdma is not necessary for the deferred request fifo surface, + * we work around this by forcing all surfaces that will be registered as + * a deferred request fifo to be registered with + * noDisplayHardwareAccess==TRUE, then skip the idle in + * nvEvoDecrementSurfaceRefCnts() for these surfaces. + */ + if (pSurfaceEvo->requireDisplayHardwareAccess) { + return FALSE; + } + + pDeferredRequestFifo = + nvEvoRegisterDeferredRequestFifo(pOpenDev->pDevEvo, pSurfaceEvo); + + if (pDeferredRequestFifo == NULL) { + return FALSE; + } + + handle = nvEvoCreateApiHandle(&pOpenDev->deferredRequestFifoHandles, + pDeferredRequestFifo); + + if (handle == 0) { + nvEvoUnregisterDeferredRequestFifo(pOpenDev->pDevEvo, + pDeferredRequestFifo); + return FALSE; + } + + pParams->reply.deferredRequestFifoHandle = handle; + + return TRUE; +} + +static NvBool UnregisterDeferredRequestFifo(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsUnregisterDeferredRequestFifoParams *pParams = pParamsVoid; + NvKmsDeferredRequestFifoHandle handle = + pParams->request.deferredRequestFifoHandle; + NVDeferredRequestFifoRec *pDeferredRequestFifo; + struct NvKmsPerOpenDev *pOpenDev = + GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + pDeferredRequestFifo = + nvEvoGetPointerFromApiHandle( + &pOpenDev->deferredRequestFifoHandles, handle); + + if (pDeferredRequestFifo == NULL) { + return FALSE; + } + + nvEvoDestroyApiHandle(&pOpenDev->deferredRequestFifoHandles, handle); + + nvEvoUnregisterDeferredRequestFifo(pOpenDev->pDevEvo, pDeferredRequestFifo); + + return TRUE; +} + +/*! + * Get the CRC32 data for the specified dpy. + */ +static NvBool QueryDpyCRC32(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsQueryDpyCRC32Params *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + struct NvKmsPerOpenDisp *pOpenDisp; + NVDispEvoPtr pDispEvo; + CRC32NotifierCrcOut crcOut; + + if (!GetPerOpenDevAndDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + &pOpenDev, + &pOpenDisp)) { + return FALSE; + } + + if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { + // Only a current owner can query CRC32 values. + return FALSE; + } + + pDispEvo = pOpenDisp->pDispEvo; + + if (!nvApiHeadIsActive(pDispEvo, pParams->request.head)) { + return FALSE; + } + + nvkms_memset(&(pParams->reply), 0, sizeof(pParams->reply)); + + // Since will only read 1 frame of CRCs, point to single reply struct vals + crcOut.rasterGeneratorCrc32 = &(pParams->reply.rasterGeneratorCrc32); + crcOut.compositorCrc32 = &(pParams->reply.compositorCrc32); + crcOut.outputCrc32 = &(pParams->reply.outputCrc32); + + { + /* + * XXX[2Heads1OR] Is it sufficient to query CRC only for the primary + * hardware head? + */ + NvU32 head = nvGetPrimaryHwHead(pDispEvo, pParams->request.head); + + nvAssert(head != NV_INVALID_HEAD); + + if (!nvReadCRC32Evo(pDispEvo, head, &crcOut)) { + return FALSE; + } + } + + return TRUE; +} + +static NvBool AllocSwapGroup( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsAllocSwapGroupParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + NVSwapGroupRec *pSwapGroup; + NvKmsSwapGroupHandle handle; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { + return FALSE; + } + + pSwapGroup = nvHsAllocSwapGroup(pOpenDev->pDevEvo, &pParams->request); + + if (pSwapGroup == NULL) { + return FALSE; + } + + handle = nvEvoCreateApiHandle(&pOpenDev->swapGroupHandles, pSwapGroup); + + if (handle == 0) { + nvHsFreeSwapGroup(pOpenDev->pDevEvo, pSwapGroup); + return FALSE; + } + + pParams->reply.swapGroupHandle = handle; + + return TRUE; +} + +static NvBool FreeSwapGroup( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsFreeSwapGroupParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + NVSwapGroupRec *pSwapGroup; + NvKmsSwapGroupHandle handle = pParams->request.swapGroupHandle; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { + return FALSE; + } + + pSwapGroup = nvHsGetSwapGroup(&pOpenDev->swapGroupHandles, + handle); + if (pSwapGroup == NULL) { + return FALSE; + } + + nvEvoDestroyApiHandle(&pOpenDev->swapGroupHandles, handle); + + nvHsFreeSwapGroup(pOpenDev->pDevEvo, pSwapGroup); + + return TRUE; +} + +static NvBool JoinSwapGroup( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsJoinSwapGroupParams *pParams = pParamsVoid; + const struct NvKmsJoinSwapGroupRequestOneMember *pMember = + pParams->request.member; + NvU32 i, j; + NvBool anySwapGroupsPending = FALSE; + NVHsJoinSwapGroupWorkArea *pJoinSwapGroupWorkArea; + + if ((pParams->request.numMembers == 0) || + (pParams->request.numMembers > + ARRAY_LEN(pParams->request.member))) { + return FALSE; + } + + pJoinSwapGroupWorkArea = nvCalloc(pParams->request.numMembers, + sizeof(NVHsJoinSwapGroupWorkArea)); + + if (!pJoinSwapGroupWorkArea) { + return FALSE; + } + + /* + * When a client is joining multiple swap groups simultaneously, all of its + * deferred request fifos must enter the pendingJoined state if any of the + * swap groups it's joining have pending flips. Otherwise, this sequence + * can lead to a deadlock: + * + * - Client 0 joins DRF 0 to SG 0, DRF 1 to SG 1, with SG 0 and SG 1 + * fliplocked + * - Client 0 submits DRF 0 ready, SG 0 flips, but the flip won't complete + * and [Client 0.DRF 0] won't be released until SG 1 flips due to + * fliplock + * - Client 1 joins DRF 0 to SG 0, DRF 1 to SG 1 + * - Client 0 submits DRF 1 ready, but SG 1 doesn't flip because + * [Client 1.DRF 0] has joined. + * + * With the pendingJoined behavior, this sequence works as follows: + * + * - Client 0 joins DRF 0 to SG 0, DRF 1 to SG 1, with SG 0 and SG 1 + * fliplocked + * - Client 0 submits DRF 0 ready, SG 0 flips, but the flip won't complete + * and [Client 0.DRF 0] won't be released until SG 1 flips due to + * fliplock + * - Client 1 joins DRF 0 to SG 0, DRF 1 to SG 1, but both enter the + * pendingJoined state because [Client 0.DRF 0] has a pending flip. + * - Client 0 submits DRF 1 ready, both swap groups flip, Client 0's + * DRFs are both released, and Client 1's DRFs both leave the + * pendingJoined state. + */ + for (i = 0; i < pParams->request.numMembers; i++) { + struct NvKmsPerOpenDev *pOpenDev; + NVSwapGroupRec *pSwapGroup; + NVDeferredRequestFifoRec *pDeferredRequestFifo; + struct NvKmsPerOpen *pEventOpenFd = NULL; + NvKmsDeviceHandle deviceHandle = pMember[i].deviceHandle; + NvKmsSwapGroupHandle swapGroupHandle = pMember[i].swapGroupHandle; + NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle = + pMember[i].deferredRequestFifoHandle; + + pOpenDev = GetPerOpenDev(pOpen, deviceHandle); + + if (pOpenDev == NULL) { + goto fail; + } + + pSwapGroup = nvHsGetSwapGroup(&pOpenDev->swapGroupHandles, + swapGroupHandle); + + if (pSwapGroup == NULL) { + goto fail; + } + + if (pSwapGroup->pendingFlip) { + anySwapGroupsPending = TRUE; + } + + /* + * In addition to the check for pending swap groups above, validate + * the remainder of the request now. + */ + + /* + * Prevent pSwapGroup->nMembers from overflowing NV_U32_MAX. + * + * Ideally we would want to count how many members are being added to + * each swap group in the request, but as an optimization, just verify + * that the number of {fifo, swapgroup} tuples joining would not + * overflow any swapgroup even if every one was joining the same + * swapgroup. + */ + if (NV_U32_MAX - pSwapGroup->nMembers < pParams->request.numMembers) { + goto fail; + } + + pDeferredRequestFifo = + nvEvoGetPointerFromApiHandle( + &pOpenDev->deferredRequestFifoHandles, + deferredRequestFifoHandle); + + if (pDeferredRequestFifo == NULL) { + goto fail; + } + + /* + * If the pDeferredRequestFifo is already a member of a SwapGroup, then + * fail. + */ + if (pDeferredRequestFifo->swapGroup.pSwapGroup != NULL) { + goto fail; + } + + if (pMember[i].unicastEvent.specified) { + pEventOpenFd = nvkms_get_per_open_data(pMember[i].unicastEvent.fd); + + if (pEventOpenFd == NULL) { + goto fail; + } + + if (!PerOpenIsValidForUnicastEvent(pEventOpenFd)) { + goto fail; + } + } + + /* + * We checked above that pDeferredRequestFifo is not currently a member + * of a SwapGroup, and that pEventOpenFd is currently valid to be used + * for a unicast event. However, if either of those were also + * specified for an earlier member for this request, then that won't + * hold: by the time *this* member is processed, the + * pDeferredRequestFifo would already be a member of a swapgroup, or + * the pEventOpenFd would already be in use. + * + * Validate that that doesn't happen. + */ + for (j = 0; j < i; j++) { + if (pJoinSwapGroupWorkArea[j].pDeferredRequestFifo == + pDeferredRequestFifo) { + goto fail; + } + if (pJoinSwapGroupWorkArea[j].pEventOpenFd == + pEventOpenFd) { + goto fail; + } + } + + pJoinSwapGroupWorkArea[i].pDevEvo = pOpenDev->pDevEvo; + pJoinSwapGroupWorkArea[i].pSwapGroup = pSwapGroup; + pJoinSwapGroupWorkArea[i].pDeferredRequestFifo = pDeferredRequestFifo; + pJoinSwapGroupWorkArea[i].pEventOpenFd = pEventOpenFd; + pJoinSwapGroupWorkArea[i].enabledHeadSurface = FALSE; + } + + if (!nvHsJoinSwapGroup(pJoinSwapGroupWorkArea, + pParams->request.numMembers, + anySwapGroupsPending)) { + goto fail; + } + + /* Beyond this point, the function cannot fail. */ + + for (i = 0; i < pParams->request.numMembers; i++) { + struct NvKmsPerOpen *pEventOpenFd = + pJoinSwapGroupWorkArea[i].pEventOpenFd; + NVDeferredRequestFifoRec *pDeferredRequestFifo = + pJoinSwapGroupWorkArea[i].pDeferredRequestFifo; + + if (pEventOpenFd) { + pDeferredRequestFifo->swapGroup.pOpenUnicastEvent = pEventOpenFd; + + pEventOpenFd->unicastEvent.type = + NvKmsUnicastEventTypeDeferredRequest; + pEventOpenFd->unicastEvent.e.deferred.pDeferredRequestFifo = + pDeferredRequestFifo; + + pEventOpenFd->type = NvKmsPerOpenTypeUnicastEvent; + } + } + + nvFree(pJoinSwapGroupWorkArea); + return TRUE; + +fail: + nvFree(pJoinSwapGroupWorkArea); + return FALSE; +} + +static NvBool LeaveSwapGroup( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsLeaveSwapGroupParams *pParams = pParamsVoid; + const struct NvKmsLeaveSwapGroupRequestOneMember *pMember = + pParams->request.member; + NvU32 i; + + if ((pParams->request.numMembers == 0) || + (pParams->request.numMembers > + ARRAY_LEN(pParams->request.member))) { + return FALSE; + } + + /* + * Validate all handles passed by the caller and fail if any are invalid. + */ + for (i = 0; i < pParams->request.numMembers; i++) { + struct NvKmsPerOpenDev *pOpenDev; + NVDeferredRequestFifoRec *pDeferredRequestFifo; + NvKmsDeviceHandle deviceHandle = + pMember[i].deviceHandle; + NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle = + pMember[i].deferredRequestFifoHandle; + + pOpenDev = GetPerOpenDev(pOpen, deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + pDeferredRequestFifo = + nvEvoGetPointerFromApiHandle( + &pOpenDev->deferredRequestFifoHandles, + deferredRequestFifoHandle); + + if (pDeferredRequestFifo == NULL) { + return FALSE; + } + + if (pDeferredRequestFifo->swapGroup.pSwapGroup == NULL) { + return FALSE; + } + } + + /* Beyond this point, the function cannot fail. */ + + for (i = 0; i < pParams->request.numMembers; i++) { + struct NvKmsPerOpenDev *pOpenDev; + NVDeferredRequestFifoRec *pDeferredRequestFifo; + NvKmsDeviceHandle deviceHandle = + pMember[i].deviceHandle; + NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle = + pMember[i].deferredRequestFifoHandle; + + pOpenDev = GetPerOpenDev(pOpen, deviceHandle); + + pDeferredRequestFifo = + nvEvoGetPointerFromApiHandle( + &pOpenDev->deferredRequestFifoHandles, + deferredRequestFifoHandle); + + nvHsLeaveSwapGroup(pOpenDev->pDevEvo, pDeferredRequestFifo, + FALSE /* teardown */); + } + + return TRUE; +} + +static NvBool SetSwapGroupClipList( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSetSwapGroupClipListParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + NVSwapGroupRec *pSwapGroup; + struct NvKmsRect *pClipList; + NvBool ret; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { + return FALSE; + } + + pSwapGroup = nvHsGetSwapGroup(&pOpenDev->swapGroupHandles, + pParams->request.swapGroupHandle); + + if (pSwapGroup == NULL) { + return FALSE; + } + + /* + * Create a copy of the passed-in pClipList, to be stored in pSwapGroup. + * Copy from the client using nvkms_copyin() or nvkms_memcpy(), depending on + * the clientType. + * + * We do not use the nvKmsIoctl() prepUser/doneUser infrastructure here + * because that would require creating two copies of pClipList in the + * user-space client case: one allocated in prepUser and freed in doneUser, + * and a second in nvHsSetSwapGroupClipList(). + */ + if (pParams->request.nClips == 0) { + pClipList = NULL; + } else { + const size_t len = sizeof(struct NvKmsRect) * pParams->request.nClips; + + if ((pParams->request.pClipList == 0) || + !nvKmsNvU64AddressIsSafe(pParams->request.pClipList)) { + return FALSE; + } + + pClipList = nvAlloc(len); + + if (pClipList == NULL) { + return FALSE; + } + + if (pOpen->clientType == NVKMS_CLIENT_USER_SPACE) { + int status = + nvkms_copyin(pClipList, pParams->request.pClipList, len); + + if (status != 0) { + nvFree(pClipList); + return FALSE; + } + } else { + const void *pKernelPointer = + nvKmsNvU64ToPointer(pParams->request.pClipList); + + nvkms_memcpy(pClipList, pKernelPointer, len); + } + } + + ret = nvHsSetSwapGroupClipList( + pOpenDev->pDevEvo, + pSwapGroup, + pParams->request.nClips, + pClipList); + + if (!ret) { + nvFree(pClipList); + } + + return ret; +} + +static NvBool SwitchMux( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSwitchMuxParams *pParams = pParamsVoid; + const struct NvKmsSwitchMuxRequest *r = &pParams->request; + struct NvKmsPerOpenDev *pOpenDev; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, r->deviceHandle, r->dispHandle, r->dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + pOpenDev = GetPerOpenDev(pOpen, r->deviceHandle); + if (pOpenDev == NULL) { + return FALSE; + } + + if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { + return FALSE; + } + + switch (pParams->request.operation) { + case NVKMS_SWITCH_MUX_PRE: + return nvRmMuxPre(pDpyEvo, r->state); + case NVKMS_SWITCH_MUX: + return nvRmMuxSwitch(pDpyEvo, r->state); + case NVKMS_SWITCH_MUX_POST: + return nvRmMuxPost(pDpyEvo, r->state); + default: + return FALSE; + } +} + +static NvBool GetMuxState( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetMuxStateParams *pParams = pParamsVoid; + const struct NvKmsGetMuxStateRequest *r = &pParams->request; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, r->deviceHandle, r->dispHandle, r->dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + pParams->reply.state = nvRmMuxState(pDpyEvo); + + return pParams->reply.state != MUX_STATE_GET; +} + +static NvBool ExportVrrSemaphoreSurface( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsExportVrrSemaphoreSurfaceParams *pParams = pParamsVoid; + const struct NvKmsExportVrrSemaphoreSurfaceRequest *req = &pParams->request; + const struct NvKmsPerOpenDev *pOpenDev = + GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + return nvExportVrrSemaphoreSurface(pOpenDev->pDevEvo, req->memFd); +} + +static void EnableAndSetupVblankSyncObject(NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + NVVblankSyncObjectRec *pVblankSyncObject, + NVEvoUpdateState *pUpdateState) +{ + /* + * The core channel re-allocation code path may end up allocating + * the fewer number of sync objects than the number of sync objects which + * are allocated and in use by the NVKMS clients, hCtxDma = 0 if the + * nvAllocCoreChannelEvo()-> InitApiHeadState()-> nvRmAllocCoreRGSyncpts() + * code path failes to re-allocate that sync object. + */ + if (nvApiHeadIsActive(pDispEvo, apiHead) && + (pVblankSyncObject->evoSyncpt.surfaceDesc.ctxDmaHandle != 0)) { + NvU32 head = nvGetPrimaryHwHead(pDispEvo, apiHead); + + nvAssert(head != NV_INVALID_HEAD); + + pDispEvo->pDevEvo->hal->ConfigureVblankSyncObject( + pDispEvo->pDevEvo, + pDispEvo->headState[head].timings.rasterBlankStart.y, + head, + pVblankSyncObject->index, + &pVblankSyncObject->evoSyncpt.surfaceDesc, + pUpdateState); + + pVblankSyncObject->enabled = TRUE; + } + + pVblankSyncObject->inUse = TRUE; +} + +static void EnableAndSetupVblankSyncObjectForAllOpens(NVDevEvoRec *pDevEvo) +{ + /* + * An NVEvoUpdateState has disp-scope, and we will only have + * one disp when programming syncpts. + */ + NVEvoUpdateState updateState = { }; + struct NvKmsPerOpen *pOpen; + + if (!pDevEvo->supportsSyncpts || + !pDevEvo->hal->caps.supportsVblankSyncObjects) { + return; + } + + /* If Syncpts are supported, we're on Orin, which only has one display. */ + nvAssert(pDevEvo->nDispEvo == 1); + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); + struct NvKmsPerOpenDisp *pOpenDisp; + NvKmsGenericHandle disp; + + if (pOpenDev == NULL) { + continue; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, + pOpenDisp, disp) { + + nvAssert(pOpenDisp->pDispEvo == pDevEvo->pDispEvo[0]); + + for (NvU32 apiHead = 0; apiHead < + ARRAY_LEN(pOpenDisp->vblankSyncObjectHandles); apiHead++) { + NVEvoApiHandlesRec *pHandles = + &pOpenDisp->vblankSyncObjectHandles[apiHead]; + NVVblankSyncObjectRec *pVblankSyncObject; + NvKmsVblankSyncObjectHandle handle; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(pHandles, + pVblankSyncObject, handle) { + EnableAndSetupVblankSyncObject(pOpenDisp->pDispEvo, apiHead, + pVblankSyncObject, + &updateState); + } + } + } + } + + if (!nvIsUpdateStateEmpty(pDevEvo, &updateState)) { + nvEvoUpdateAndKickOff(pDevEvo->pDispEvo[0], TRUE, &updateState, + TRUE); + } +} + +static NvBool EnableVblankSyncObject( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsEnableVblankSyncObjectParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp* pOpenDisp = NULL; + NVDispApiHeadStateEvoRec *pApiHeadState = NULL; + NVDevEvoPtr pDevEvo = NULL; + NvKmsVblankSyncObjectHandle vblankHandle = 0; + int freeVblankSyncObjectIdx = 0; + NvU32 apiHead = pParams->request.head; + NVVblankSyncObjectRec *vblankSyncObjects = NULL; + NVDispEvoPtr pDispEvo = NULL; + NVEvoUpdateState updateState = { }; + + /* Obtain the Head State. */ + pOpenDisp = GetPerOpenDisp(pOpen, pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + nvEvoLogDebug(EVO_LOG_ERROR, "Unable to GetPerOpenDisp."); + return FALSE; + } + + pDispEvo = pOpenDisp->pDispEvo; + pDevEvo = pDispEvo->pDevEvo; + + /* Ensure Vblank Sync Object API is supported on this chip. */ + if (!pDevEvo->supportsSyncpts || + !pDevEvo->hal->caps.supportsVblankSyncObjects) { + nvEvoLogDebug(EVO_LOG_ERROR, "Vblank Sync Object functionality is not " + "supported on this chip."); + return FALSE; + } + + /* Validate requested head because it comes from user input. */ + if (apiHead >= ARRAY_LEN(pDispEvo->apiHeadState)) { + nvEvoLogDebug(EVO_LOG_ERROR, "Invalid head requested, head=%d.", apiHead); + return FALSE; + } + pApiHeadState = &pDispEvo->apiHeadState[apiHead]; + vblankSyncObjects = pApiHeadState->vblankSyncObjects; + pDevEvo = pDispEvo->pDevEvo; + + /* + * Find the available sync object. Sync Objects with handle=0 are not in + * use. + */ + for (freeVblankSyncObjectIdx = 0; + freeVblankSyncObjectIdx < pApiHeadState->numVblankSyncObjectsCreated; + freeVblankSyncObjectIdx++) { + if (!vblankSyncObjects[freeVblankSyncObjectIdx].inUse) { + break; + } + } + if (freeVblankSyncObjectIdx == pApiHeadState->numVblankSyncObjectsCreated) { + return FALSE; + } + + /* Save the created vblank handle if it is valid. */ + vblankHandle = + nvEvoCreateApiHandle(&pOpenDisp->vblankSyncObjectHandles[apiHead], + &vblankSyncObjects[freeVblankSyncObjectIdx]); + if (vblankHandle == 0) { + nvEvoLogDebug(EVO_LOG_ERROR, "Unable to create vblank handle."); + return FALSE; + } + + EnableAndSetupVblankSyncObject(pDispEvo, apiHead, + &vblankSyncObjects[freeVblankSyncObjectIdx], + &updateState); + if (!nvIsUpdateStateEmpty(pOpenDisp->pDispEvo->pDevEvo, &updateState)) { + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, TRUE); + } + + /* Populate the reply field. */ + pParams->reply.vblankHandle = vblankHandle; + /* Note: the syncpt ID is NOT the same as the vblank handle. */ + pParams->reply.syncptId = + pApiHeadState->vblankSyncObjects[freeVblankSyncObjectIdx].evoSyncpt.id; + + return TRUE; +} + +static void DisableAndCleanVblankSyncObject(NVDispEvoRec *pDispEvo, + const NvU32 apiHead, + NVVblankSyncObjectRec *pVblankSyncObject, + NVEvoUpdateState *pUpdateState) +{ + if (nvApiHeadIsActive(pDispEvo, apiHead)) { + NvU32 head = nvGetPrimaryHwHead(pDispEvo, apiHead); + + nvAssert(head != NV_INVALID_HEAD); + + /* + * Instruct the hardware to disable the semaphore corresponding to this + * syncpt. The Update State will be populated. + * + * Note: Using dummy zero value for rasterLine because the disable + * codepath in ConfigureVblankSyncObject() does not use that argument. + */ + pDispEvo->pDevEvo->hal->ConfigureVblankSyncObject(pDispEvo->pDevEvo, + 0, /* rasterLine */ + head, + pVblankSyncObject->index, + NULL, /* pSurfaceDesc */ + pUpdateState); + /* + * Note: it is the caller's responsibility to call + * nvEvoUpdateAndKickOff(). + */ + } + + pVblankSyncObject->inUse = FALSE; + pVblankSyncObject->enabled = FALSE; +} + +static void DisableAndCleanVblankSyncObjectForAllOpens(NVDevEvoRec *pDevEvo) +{ + /* + * An NVEvoUpdateState has disp-scope, and we will only have + * one disp when programming syncpts. + */ + NVEvoUpdateState updateState = { }; + struct NvKmsPerOpen *pOpen; + + if (!pDevEvo->supportsSyncpts || + !pDevEvo->hal->caps.supportsVblankSyncObjects) { + return; + } + + /* If Syncpts are supported, we're on Orin, which only has one display. */ + nvAssert(pDevEvo->nDispEvo == 1); + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); + struct NvKmsPerOpenDisp *pOpenDisp; + NvKmsGenericHandle disp; + + if (pOpenDev == NULL) { + continue; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, + pOpenDisp, disp) { + + nvAssert(pOpenDisp->pDispEvo == pDevEvo->pDispEvo[0]); + + for (NvU32 apiHead = 0; apiHead < + ARRAY_LEN(pOpenDisp->vblankSyncObjectHandles); apiHead++) { + NVEvoApiHandlesRec *pHandles = + &pOpenDisp->vblankSyncObjectHandles[apiHead]; + NVVblankSyncObjectRec *pVblankSyncObject; + NvKmsVblankSyncObjectHandle handle; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(pHandles, + pVblankSyncObject, handle) { + DisableAndCleanVblankSyncObject(pOpenDisp->pDispEvo, apiHead, + pVblankSyncObject, + &updateState); + } + } + } + } + + if (!nvIsUpdateStateEmpty(pDevEvo, &updateState)) { + nvEvoUpdateAndKickOff(pDevEvo->pDispEvo[0], TRUE, &updateState, + TRUE); + } +} + +static NvBool DisableVblankSyncObject( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsDisableVblankSyncObjectParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp* pOpenDisp = + GetPerOpenDisp(pOpen, pParams->request.deviceHandle, + pParams->request.dispHandle); + NVVblankSyncObjectRec *pVblankSyncObject = NULL; + NvU32 apiHead = pParams->request.head; + NVDevEvoPtr pDevEvo = NULL; + NVEvoUpdateState updateState = { }; + + if (pOpenDisp == NULL) { + nvEvoLogDebug(EVO_LOG_ERROR, "Unable to GetPerOpenDisp."); + return FALSE; + } + + pDevEvo = pOpenDisp->pDispEvo->pDevEvo; + + /* Ensure Vblank Sync Object API is supported on this chip. */ + if (!pDevEvo->supportsSyncpts || + !pDevEvo->hal->caps.supportsVblankSyncObjects) { + nvEvoLogDebug(EVO_LOG_ERROR, "Vblank Sync Object functionality is not " + "supported on this chip."); + return FALSE; + } + + /* Validate requested head because it comes from user input. */ + if (apiHead >= ARRAY_LEN(pOpenDisp->pDispEvo->apiHeadState)) { + nvEvoLogDebug(EVO_LOG_ERROR, "Invalid head requested, head=%d.", apiHead); + return FALSE; + } + + /* Mark the indicated object as free. */ + pVblankSyncObject = + nvEvoGetPointerFromApiHandle(&pOpenDisp->vblankSyncObjectHandles[apiHead], + pParams->request.vblankHandle); + if (pVblankSyncObject == NULL) { + nvEvoLogDebug(EVO_LOG_ERROR, "unable to find object with provided " + "handle."); + return FALSE; + } + + DisableAndCleanVblankSyncObject(pOpenDisp->pDispEvo, apiHead, + pVblankSyncObject, &updateState); + + if (!nvIsUpdateStateEmpty(pOpenDisp->pDispEvo->pDevEvo, &updateState)) { + /* + * Instruct hardware to execute the staged commands from the + * ConfigureVblankSyncObject() call inside of the + * DisableAndCleanVblankSyncObject() call above. This will set up and + * wait for a notification that the hardware execution has completed. + */ + nvEvoUpdateAndKickOff(pOpenDisp->pDispEvo, TRUE, &updateState, TRUE); + } + + /* Remove the handle from the map. */ + nvEvoDestroyApiHandle(&pOpenDisp->vblankSyncObjectHandles[apiHead], + pParams->request.vblankHandle); + + return TRUE; +} + +static void NotifyVblankCallback(NVDispEvoRec *pDispEvo, + NVVBlankCallbackPtr pCallbackData) +{ + struct NvKmsPerOpen *pEventOpenFd = pCallbackData->pUserData; + + /* + * NOTIFY_VBLANK events are single-shot so notify the unicast FD, then + * immediately unregister the callback. The unregister step is done in + * nvRemoveUnicastEvent which resets the unicast event data. + */ + nvSendUnicastEvent(pEventOpenFd); + nvRemoveUnicastEvent(pEventOpenFd); +} + +static NvBool NotifyVblank( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsNotifyVblankParams *pParams = pParamsVoid; + struct NvKmsPerOpen *pEventOpenFd = NULL; + NVVBlankCallbackPtr pCallbackData = NULL; + struct NvKmsPerOpenDisp* pOpenDisp = + GetPerOpenDisp(pOpen, pParams->request.deviceHandle, + pParams->request.dispHandle); + + if (pOpenDisp == NULL) { + return NV_FALSE; + } + + const NvU32 apiHead = pParams->request.head; + + pEventOpenFd = nvkms_get_per_open_data(pParams->request.unicastEvent.fd); + + if (pEventOpenFd == NULL) { + return NV_FALSE; + } + + if (!PerOpenIsValidForUnicastEvent(pEventOpenFd)) { + return NV_FALSE; + } + + pEventOpenFd->type = NvKmsPerOpenTypeUnicastEvent; + + pCallbackData = nvApiHeadRegisterVBlankCallback(pOpenDisp->pDispEvo, + apiHead, + NotifyVblankCallback, + pEventOpenFd, + 1 /* listIndex */); + if (pCallbackData == NULL) { + return NV_FALSE; + } + + pEventOpenFd->unicastEvent.type = NvKmsUnicastEventTypeVblankNotification; + pEventOpenFd->unicastEvent.e.vblankNotification.pOpenDisp = pOpenDisp; + pEventOpenFd->unicastEvent.e.vblankNotification.apiHead = apiHead; + pEventOpenFd->unicastEvent.e.vblankNotification.hCallback + = nvEvoCreateApiHandle(&pOpenDisp->vblankCallbackHandles[apiHead], + pCallbackData); + + if (pEventOpenFd->unicastEvent.e.vblankNotification.hCallback == 0) { + nvApiHeadUnregisterVBlankCallback(pOpenDisp->pDispEvo, pCallbackData); + return NV_FALSE; + } + + return NV_TRUE; +} + +static NvBool SetFlipLockGroup( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSetFlipLockGroupParams *pParams = pParamsVoid; + const struct NvKmsSetFlipLockGroupRequest *pRequest = &pParams->request; + /* Fill in this array as we look up the pDevEvo from the given device + * handles, so that later processing can use it without converting + * deviceHandle -> pDevEvo again. */ + NVDevEvoPtr pDevEvo[NV_MAX_SUBDEVICES] = { }; + NvU32 dev; + + /* Ensure we don't overrun the pDevEvo array. */ + ct_assert(ARRAY_LEN(pRequest->dev) == NV_MAX_SUBDEVICES); + + for (dev = 0; dev < ARRAY_LEN(pRequest->dev); dev++) { + const struct NvKmsSetFlipLockGroupOneDev *pRequestDev = + &pRequest->dev[dev]; + struct NvKmsPerOpenDev *pOpenDev = NULL; + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + NvU32 i; + + if (pRequestDev->requestedDispsBitMask == 0) { + break; + } + + pOpenDev = GetPerOpenDev(pOpen, pRequestDev->deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + pDevEvo[dev] = pOpenDev->pDevEvo; + + /* The caller must be the modeset owner for every specified device. */ + if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { + return FALSE; + } + + /* Do not allow the same device to be specified twice. */ + for (i = 0; i < dev; i++) { + if (pDevEvo[i] == pDevEvo[dev]) { + return FALSE; + } + } + + /* Check for invalid disps in requestedDispsBitMask. */ + if (nvHasBitAboveMax(pRequestDev->requestedDispsBitMask, + pDevEvo[dev]->nDispEvo)) { + return FALSE; + } + + /* Check for invalid heads in requestedHeadsBitMask. */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo[dev]) { + const NvU32 requestedHeadsBitMask = + pRequestDev->disp[dispIndex].requestedHeadsBitMask; + NvU32 apiHead; + + if (requestedHeadsBitMask == 0) { + return FALSE; + } + if (nvHasBitAboveMax(requestedHeadsBitMask, + pDevEvo[dev]->numHeads)) { + return FALSE; + } + + /* + * Verify that all API heads in requestedHeadsBitMask are active. + * The requested fliplock group will be implicitly disabled if any of + * these heads are specified in a modeset. + */ + for (apiHead = 0; apiHead < pDevEvo[dev]->numHeads; apiHead++) { + if ((requestedHeadsBitMask & (1 << apiHead)) != 0) { + if (!nvApiHeadIsActive(pDispEvo, apiHead)) { + return FALSE; + } + } + } + } + } + + /* Verify that at least one device was specified */ + if (pDevEvo[0] == NULL) { + return FALSE; + } + + return nvSetFlipLockGroup(pDevEvo, pRequest); +} + +static NvBool EnableVblankSemControl( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsEnableVblankSemControlParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + struct NvKmsPerOpenDisp *pOpenDisp; + NVDevEvoPtr pDevEvo; + NVDispEvoRec *pDispEvo; + NVSurfaceEvoPtr pSurfaceEvo; + NVVblankSemControl *pVblankSemControl; + NvKmsVblankSemControlHandle vblankSemControlHandle; + + if (!GetPerOpenDevAndDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + &pOpenDev, + &pOpenDisp)) { + return FALSE; + } + + pDevEvo = pOpenDev->pDevEvo; + pDispEvo = pOpenDisp->pDispEvo; + + pSurfaceEvo = + nvEvoGetSurfaceFromHandleNoHWAccess( + pDevEvo, + &pOpenDev->surfaceHandles, + pParams->request.surfaceHandle); + + if (pSurfaceEvo == NULL) { + return FALSE; + } + + pVblankSemControl = nvEvoEnableVblankSemControl( + pDevEvo, + pDispEvo, + pSurfaceEvo, + pParams->request.surfaceOffset); + + if (pVblankSemControl == NULL) { + return FALSE; + } + + vblankSemControlHandle = + nvEvoCreateApiHandle(&pOpenDisp->vblankSemControlHandles, + pVblankSemControl); + + if (vblankSemControlHandle == 0) { + (void)nvEvoDisableVblankSemControl(pDevEvo, pVblankSemControl); + return FALSE; + } + + pParams->reply.vblankSemControlHandle = vblankSemControlHandle; + + return TRUE; +} + +static NvBool DisableVblankSemControl( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + const struct NvKmsDisableVblankSemControlParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + struct NvKmsPerOpenDisp *pOpenDisp; + NVDevEvoPtr pDevEvo; + NVVblankSemControl *pVblankSemControl; + NvBool ret; + + if (!GetPerOpenDevAndDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + &pOpenDev, + &pOpenDisp)) { + return FALSE; + } + + pDevEvo = pOpenDev->pDevEvo; + + pVblankSemControl = + nvEvoGetPointerFromApiHandle(&pOpenDisp->vblankSemControlHandles, + pParams->request.vblankSemControlHandle); + if (pVblankSemControl == NULL) { + return FALSE; + } + + ret = nvEvoDisableVblankSemControl(pDevEvo, pVblankSemControl); + + if (ret) { + nvEvoDestroyApiHandle(&pOpenDisp->vblankSemControlHandles, + pParams->request.vblankSemControlHandle); + } + + return ret; +} + +static NvBool AccelVblankSemControls( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + const struct NvKmsAccelVblankSemControlsParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + struct NvKmsPerOpenDisp *pOpenDisp; + NVDevEvoPtr pDevEvo; + NVDispEvoRec *pDispEvo; + + if (!GetPerOpenDevAndDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + &pOpenDev, + &pOpenDisp)) { + return FALSE; + } + + if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { + return FALSE; + } + + pDevEvo = pOpenDev->pDevEvo; + pDispEvo = pOpenDisp->pDispEvo; + + return nvEvoAccelVblankSemControls( + pDevEvo, + pDispEvo, + pParams->request.headMask); +} + +static NvBool VrrSignalSemaphore( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsPerOpenDev *pOpenDev; + + const struct NvKmsVrrSignalSemaphoreParams *pParams = pParamsVoid; + NvS32 vrrSemaphoreIndex = pParams->request.vrrSemaphoreIndex; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + if (pOpenDev == NULL) { + return FALSE; + } + + nvVrrSignalSemaphore(pOpenDev->pDevEvo, vrrSemaphoreIndex); + return TRUE; +} + +static NvBool FramebufferConsoleDisabled( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + const struct NvKmsFramebufferConsoleDisabledParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev = + GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { + return FALSE; + } + + if (pOpen->clientType != NVKMS_CLIENT_KERNEL_SPACE) { + return FALSE; + } + + nvRmUnmapFbConsoleMemory(pOpenDev->pDevEvo); + return TRUE; +} + +/*! + * Perform the ioctl operation requested by the client. + * + * \param[in,out] pOpenVoid The per-open data, allocated by + * nvKmsOpen(). + * \param[in] cmdOpaque The NVKMS_IOCTL_ operation to perform. + * \param[in,out] paramsAddress A pointer, in the client process's + * address space, to the parameter + * structure. This is cmd-specific. + * \param[in] paramSize The client-specified size of the params. + * + * \return Return TRUE if the ioctl operation was successfully + * performed. Otherwise, return FALSE. + */ +NvBool nvKmsIoctl( + void *pOpenVoid, + const NvU32 cmdOpaque, + const NvU64 paramsAddress, + const size_t paramSize) +{ + static const struct { + + NvBool (*proc)(struct NvKmsPerOpen *pOpen, void *pParamsVoid); + NvBool (*prepUser)(void *pParamsVoid, void *pExtraStateVoid); + NvBool (*doneUser)(void *pParamsVoid, void *pExtraStateVoid); + const size_t paramSize; + /* Size of extra state tracked for user parameters */ + const size_t extraSize; + + const size_t requestSize; + const size_t requestOffset; + + const size_t replySize; + const size_t replyOffset; + + } dispatch[] = { + +#define _ENTRY_WITH_USER(_cmd, _func, _prepUser, _doneUser, _extraSize) \ + [_cmd] = { \ + .proc = _func, \ + .prepUser = _prepUser, \ + .doneUser = _doneUser, \ + .paramSize = sizeof(struct NvKms##_func##Params), \ + .requestSize = sizeof(struct NvKms##_func##Request), \ + .requestOffset = offsetof(struct NvKms##_func##Params, request), \ + .replySize = sizeof(struct NvKms##_func##Reply), \ + .replyOffset = offsetof(struct NvKms##_func##Params, reply), \ + .extraSize = _extraSize, \ + } + +#define ENTRY(_cmd, _func) \ + _ENTRY_WITH_USER(_cmd, _func, NULL, NULL, 0) + +#define ENTRY_CUSTOM_USER(_cmd, _func) \ + _ENTRY_WITH_USER(_cmd, _func, \ + _func##PrepUser, _func##DoneUser, \ + sizeof(struct NvKms##_func##ExtraUserState)) + + ENTRY(NVKMS_IOCTL_ALLOC_DEVICE, AllocDevice), + ENTRY(NVKMS_IOCTL_FREE_DEVICE, FreeDevice), + ENTRY(NVKMS_IOCTL_QUERY_DISP, QueryDisp), + ENTRY(NVKMS_IOCTL_QUERY_CONNECTOR_STATIC_DATA, QueryConnectorStaticData), + ENTRY(NVKMS_IOCTL_QUERY_CONNECTOR_DYNAMIC_DATA, QueryConnectorDynamicData), + ENTRY(NVKMS_IOCTL_QUERY_DPY_STATIC_DATA, QueryDpyStaticData), + ENTRY(NVKMS_IOCTL_QUERY_DPY_DYNAMIC_DATA, QueryDpyDynamicData), + ENTRY_CUSTOM_USER(NVKMS_IOCTL_VALIDATE_MODE_INDEX, ValidateModeIndex), + ENTRY_CUSTOM_USER(NVKMS_IOCTL_VALIDATE_MODE, ValidateMode), + ENTRY_CUSTOM_USER(NVKMS_IOCTL_SET_MODE, SetMode), + ENTRY(NVKMS_IOCTL_SET_CURSOR_IMAGE, SetCursorImage), + ENTRY(NVKMS_IOCTL_MOVE_CURSOR, MoveCursor), + ENTRY_CUSTOM_USER(NVKMS_IOCTL_SET_LUT, SetLut), + ENTRY(NVKMS_IOCTL_CHECK_LUT_NOTIFIER, CheckLutNotifier), + ENTRY(NVKMS_IOCTL_IDLE_BASE_CHANNEL, IdleBaseChannel), + ENTRY_CUSTOM_USER(NVKMS_IOCTL_FLIP, Flip), + ENTRY(NVKMS_IOCTL_DECLARE_DYNAMIC_DPY_INTEREST, + DeclareDynamicDpyInterest), + ENTRY(NVKMS_IOCTL_REGISTER_SURFACE, RegisterSurface), + ENTRY(NVKMS_IOCTL_UNREGISTER_SURFACE, UnregisterSurface), + ENTRY(NVKMS_IOCTL_GRANT_SURFACE, GrantSurface), + ENTRY(NVKMS_IOCTL_ACQUIRE_SURFACE, AcquireSurface), + ENTRY(NVKMS_IOCTL_RELEASE_SURFACE, ReleaseSurface), + ENTRY(NVKMS_IOCTL_SET_DPY_ATTRIBUTE, SetDpyAttribute), + ENTRY(NVKMS_IOCTL_GET_DPY_ATTRIBUTE, GetDpyAttribute), + ENTRY(NVKMS_IOCTL_GET_DPY_ATTRIBUTE_VALID_VALUES, + GetDpyAttributeValidValues), + ENTRY(NVKMS_IOCTL_SET_DISP_ATTRIBUTE, SetDispAttribute), + ENTRY(NVKMS_IOCTL_GET_DISP_ATTRIBUTE, GetDispAttribute), + ENTRY(NVKMS_IOCTL_GET_DISP_ATTRIBUTE_VALID_VALUES, + GetDispAttributeValidValues), + ENTRY(NVKMS_IOCTL_QUERY_FRAMELOCK, QueryFrameLock), + ENTRY(NVKMS_IOCTL_SET_FRAMELOCK_ATTRIBUTE, SetFrameLockAttribute), + ENTRY(NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE, GetFrameLockAttribute), + ENTRY(NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE_VALID_VALUES, + GetFrameLockAttributeValidValues), + ENTRY(NVKMS_IOCTL_GET_NEXT_EVENT, GetNextEvent), + ENTRY(NVKMS_IOCTL_DECLARE_EVENT_INTEREST, DeclareEventInterest), + ENTRY(NVKMS_IOCTL_CLEAR_UNICAST_EVENT, ClearUnicastEvent), + ENTRY(NVKMS_IOCTL_SET_LAYER_POSITION, SetLayerPosition), + ENTRY(NVKMS_IOCTL_GRAB_OWNERSHIP, GrabOwnership), + ENTRY(NVKMS_IOCTL_RELEASE_OWNERSHIP, ReleaseOwnership), + ENTRY(NVKMS_IOCTL_GRANT_PERMISSIONS, GrantPermissions), + ENTRY(NVKMS_IOCTL_ACQUIRE_PERMISSIONS, AcquirePermissions), + ENTRY(NVKMS_IOCTL_REVOKE_PERMISSIONS, RevokePermissions), + ENTRY(NVKMS_IOCTL_QUERY_DPY_CRC32, QueryDpyCRC32), + ENTRY(NVKMS_IOCTL_REGISTER_DEFERRED_REQUEST_FIFO, + RegisterDeferredRequestFifo), + ENTRY(NVKMS_IOCTL_UNREGISTER_DEFERRED_REQUEST_FIFO, + UnregisterDeferredRequestFifo), + ENTRY(NVKMS_IOCTL_ALLOC_SWAP_GROUP, AllocSwapGroup), + ENTRY(NVKMS_IOCTL_FREE_SWAP_GROUP, FreeSwapGroup), + ENTRY(NVKMS_IOCTL_JOIN_SWAP_GROUP, JoinSwapGroup), + ENTRY(NVKMS_IOCTL_LEAVE_SWAP_GROUP, LeaveSwapGroup), + ENTRY(NVKMS_IOCTL_SET_SWAP_GROUP_CLIP_LIST, SetSwapGroupClipList), + ENTRY(NVKMS_IOCTL_GRANT_SWAP_GROUP, GrantSwapGroup), + ENTRY(NVKMS_IOCTL_ACQUIRE_SWAP_GROUP, AcquireSwapGroup), + ENTRY(NVKMS_IOCTL_RELEASE_SWAP_GROUP, ReleaseSwapGroup), + ENTRY(NVKMS_IOCTL_SWITCH_MUX, SwitchMux), + ENTRY(NVKMS_IOCTL_GET_MUX_STATE, GetMuxState), + ENTRY(NVKMS_IOCTL_EXPORT_VRR_SEMAPHORE_SURFACE, ExportVrrSemaphoreSurface), + ENTRY(NVKMS_IOCTL_ENABLE_VBLANK_SYNC_OBJECT, EnableVblankSyncObject), + ENTRY(NVKMS_IOCTL_DISABLE_VBLANK_SYNC_OBJECT, DisableVblankSyncObject), + ENTRY(NVKMS_IOCTL_NOTIFY_VBLANK, NotifyVblank), + ENTRY(NVKMS_IOCTL_SET_FLIPLOCK_GROUP, SetFlipLockGroup), + ENTRY(NVKMS_IOCTL_ENABLE_VBLANK_SEM_CONTROL, EnableVblankSemControl), + ENTRY(NVKMS_IOCTL_DISABLE_VBLANK_SEM_CONTROL, DisableVblankSemControl), + ENTRY(NVKMS_IOCTL_ACCEL_VBLANK_SEM_CONTROLS, AccelVblankSemControls), + ENTRY(NVKMS_IOCTL_VRR_SIGNAL_SEMAPHORE, VrrSignalSemaphore), + ENTRY(NVKMS_IOCTL_FRAMEBUFFER_CONSOLE_DISABLED, FramebufferConsoleDisabled), + }; + + struct NvKmsPerOpen *pOpen = pOpenVoid; + void *pParamsKernelPointer; + NvBool ret; + enum NvKmsIoctlCommand cmd = cmdOpaque; + void *pExtraUserState = NULL; + + if (!AssignNvKmsPerOpenType(pOpen, NvKmsPerOpenTypeIoctl, TRUE)) { + return FALSE; + } + + if (cmd >= ARRAY_LEN(dispatch)) { + return FALSE; + } + + if (dispatch[cmd].proc == NULL) { + return FALSE; + } + + if (paramSize != dispatch[cmd].paramSize) { + return FALSE; + } + + if (pOpen->clientType == NVKMS_CLIENT_USER_SPACE) { + pParamsKernelPointer = nvCalloc(1, paramSize + dispatch[cmd].extraSize); + if (pParamsKernelPointer == NULL) { + return FALSE; + } + + if (dispatch[cmd].requestSize > 0) { + int status = + nvkms_copyin((char *) pParamsKernelPointer + + dispatch[cmd].requestOffset, + paramsAddress + dispatch[cmd].requestOffset, + dispatch[cmd].requestSize); + if (status != 0) { + nvFree(pParamsKernelPointer); + return FALSE; + } + } + + if (dispatch[cmd].prepUser) { + pExtraUserState = (char *)pParamsKernelPointer + paramSize; + + if (!dispatch[cmd].prepUser(pParamsKernelPointer, + pExtraUserState)) { + nvFree(pParamsKernelPointer); + return FALSE; + } + } + } else { + pParamsKernelPointer = nvKmsNvU64ToPointer(paramsAddress); + } + + ret = dispatch[cmd].proc(pOpen, pParamsKernelPointer); + + if (pOpen->clientType == NVKMS_CLIENT_USER_SPACE) { + + if (dispatch[cmd].doneUser) { + pExtraUserState = (char *)pParamsKernelPointer + paramSize; + + if (!dispatch[cmd].doneUser(pParamsKernelPointer, + pExtraUserState)) { + ret = FALSE; + } + } + + if (dispatch[cmd].replySize > 0) { + int status = + nvkms_copyout(paramsAddress + dispatch[cmd].replyOffset, + (char *) pParamsKernelPointer + + dispatch[cmd].replyOffset, + dispatch[cmd].replySize); + if (status != 0) { + ret = FALSE; + } + } + + nvFree(pParamsKernelPointer); + } + + return ret; +} + + +/*! + * Close callback. + * + * \param[in,out] pOpenVoid The per-open data, allocated by nvKmsOpen(). + */ +void nvKmsClose(void *pOpenVoid) +{ + struct NvKmsPerOpen *pOpen = pOpenVoid; + + if (pOpen == NULL) { + return; + } + + /* + * First remove the pOpen from global tracking. Otherwise, assertions can + * fail in the free paths below -- the assertions check that the object + * being freed is not tracked by any pOpen. + */ + nvListDel(&pOpen->perOpenListEntry); + + if (pOpen->type == NvKmsPerOpenTypeIoctl) { + + struct NvKmsPerOpenEventListEntry *pEntry, *pEntryTmp; + struct NvKmsPerOpenDev *pOpenDev; + NvKmsGenericHandle dev; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, dev) { + FreeDeviceReference(pOpen, pOpenDev); + } + + nvEvoDestroyApiHandles(&pOpen->ioctl.frameLockHandles); + + nvEvoDestroyApiHandles(&pOpen->ioctl.devHandles); + + nvListForEachEntry_safe(pEntry, pEntryTmp, + &pOpen->ioctl.eventList, eventListEntry) { + nvListDel(&pEntry->eventListEntry); + nvFree(pEntry); + } + + nvListDel(&pOpen->perOpenIoctlListEntry); + } + + if (pOpen->type == NvKmsPerOpenTypeGrantSurface) { + nvAssert(pOpen->grantSurface.pSurfaceEvo != NULL); + nvEvoDecrementSurfaceStructRefCnt(pOpen->grantSurface.pSurfaceEvo); + } + + if (pOpen->type == NvKmsPerOpenTypeGrantSwapGroup) { + nvAssert(pOpen->grantSwapGroup.pSwapGroup != NULL); + nvHsDecrementSwapGroupRefCnt(pOpen->grantSwapGroup.pSwapGroup); + } + + if (pOpen->type == NvKmsPerOpenTypeUnicastEvent) { + nvRemoveUnicastEvent(pOpen); + } + + nvFree(pOpen); +} + + +/* + *Frees all references to a device + */ +void nvRevokeDevice(NVDevEvoPtr pDevEvo) +{ + if (pDevEvo == NULL) { + return; + } + + struct NvKmsPerOpen *pOpen; + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); + if (pOpenDev == NULL) { + continue; + } + if (pOpenDev == pDevEvo->pNvKmsOpenDev) { + // do not free the internal pOpenDev, as that is handled + // by nvFreeDevEvo + continue; + } + FreeDeviceReference(pOpen, pOpenDev); + } +} + +/*! + * Open callback. + * + * Allocate, initialize, and return an opaque pointer to an NvKmsPerOpen. + * + * \return If successful, return an NvKmsPerOpen pointer. Otherwise, + * return NULL. + */ +void *nvKmsOpen( + NvU32 pid, + enum NvKmsClientType clientType, + nvkms_per_open_handle_t *pOpenKernel) +{ + struct NvKmsPerOpen *pOpen = nvCalloc(1, sizeof(*pOpen)); + + if (pOpen == NULL) { + goto fail; + } + + pOpen->pid = pid; + pOpen->clientType = clientType; + pOpen->type = NvKmsPerOpenTypeUndefined; + pOpen->pOpenKernel = pOpenKernel; + + nvListAppend(&pOpen->perOpenListEntry, &perOpenList); + + return pOpen; + +fail: + nvKmsClose(pOpen); + return NULL; +} + +extern const char *const pNV_KMS_ID; + +#if NVKMS_PROCFS_ENABLE + +static const char *ProcFsPerOpenTypeString( + enum NvKmsPerOpenType type) +{ + switch (type) { + case NvKmsPerOpenTypeIoctl: return "ioctl"; + case NvKmsPerOpenTypeGrantSurface: return "grantSurface"; + case NvKmsPerOpenTypeGrantSwapGroup: return "grantSwapGroup"; + case NvKmsPerOpenTypeGrantPermissions: return "grantPermissions"; + case NvKmsPerOpenTypeUnicastEvent: return "unicastEvent"; + case NvKmsPerOpenTypeUndefined: return "undefined"; + } + + return "unknown"; +} + +static const char *ProcFsUnicastEventTypeString( + enum NvKmsUnicastEventType type) +{ + switch (type) { + case NvKmsUnicastEventTypeDeferredRequest: return "DeferredRequest"; + case NvKmsUnicastEventTypeVblankNotification: return "VblankNotification"; + case NvKmsUnicastEventTypeUndefined: return "undefined"; + } + + return "unknown"; +} + +static const char *ProcFsPerOpenClientTypeString( + enum NvKmsClientType clientType) +{ + switch (clientType) { + case NVKMS_CLIENT_USER_SPACE: return "user-space"; + case NVKMS_CLIENT_KERNEL_SPACE: return "kernel-space"; + } + + return "unknown"; +} + +static const char *ProcFsPermissionsTypeString( + enum NvKmsPermissionsType permissionsType) +{ + switch (permissionsType) { + case NV_KMS_PERMISSIONS_TYPE_FLIPPING: return "flipping"; + case NV_KMS_PERMISSIONS_TYPE_MODESET: return "modeset"; + case NV_KMS_PERMISSIONS_TYPE_SUB_OWNER:return "sub-owner"; + } + + return "unknown"; +} + +static void +ProcFsPrintClients( + void *data, + char *buffer, + size_t size, + nvkms_procfs_out_string_func_t *outString) +{ + struct NvKmsPerOpen *pOpen; + NVEvoInfoStringRec infoString; + + nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) { + + const char *extra = ""; + + nvInitInfoString(&infoString, buffer, size); + + if (pOpen == nvEvoGlobal.nvKmsPerOpen) { + extra = " (NVKMS-internal client)"; + } + + nvEvoLogInfoString(&infoString, + "Client (pOpen) : %p", pOpen); + nvEvoLogInfoString(&infoString, + " pid : %d%s", pOpen->pid, extra); + nvEvoLogInfoString(&infoString, + " clientType : %s", + ProcFsPerOpenClientTypeString(pOpen->clientType)); + nvEvoLogInfoString(&infoString, + " type : %s", + ProcFsPerOpenTypeString(pOpen->type)); + + if (pOpen->type == NvKmsPerOpenTypeIoctl) { + + NvKmsGenericHandle deviceHandle; + struct NvKmsPerOpenDev *pOpenDev; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, deviceHandle) { + NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; + + nvEvoLogInfoString(&infoString, + " pDevEvo (deviceId:%02d) : %p", + pDevEvo->deviceId.rmDeviceId, pDevEvo); + nvEvoLogInfoString(&infoString, + " NvKmsDeviceHandle : %d", deviceHandle); + } + + } else if (pOpen->type == NvKmsPerOpenTypeGrantSurface) { + + NVSurfaceEvoPtr pSurfaceEvo = pOpen->grantSurface.pSurfaceEvo; + + nvEvoLogInfoString(&infoString, + " pSurfaceEvo : %p", pSurfaceEvo); + + } else if (pOpen->type == NvKmsPerOpenTypeGrantPermissions) { + + NVDevEvoPtr pDevEvo = pOpen->grantPermissions.pDevEvo; + const struct NvKmsPermissions *pPerms = + &pOpen->grantPermissions.permissions; + + nvEvoLogInfoString(&infoString, + " pDevEvo (deviceId:%02d) : %p", + pDevEvo->deviceId.rmDeviceId, pDevEvo); + + nvEvoLogInfoString(&infoString, + " PermissionsType : %s", + ProcFsPermissionsTypeString(pPerms->type)); + + if (pPerms->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) { + NvU32 d, h; + + for (d = 0; d < ARRAY_LEN(pPerms->flip.disp); d++) { + for (h = 0; h < ARRAY_LEN(pPerms->flip.disp[d].head); h++) { + + const NvU8 layerMask = + pPerms->flip.disp[d].head[h].layerMask; + + if (layerMask == 0) { + continue; + } + + nvEvoLogInfoString(&infoString, + " disp:%02d, head:%02d : 0x%08x", d, h, + layerMask); + } + } + } else if (pPerms->type == NV_KMS_PERMISSIONS_TYPE_MODESET) { + NvU32 d, h; + + for (d = 0; d < ARRAY_LEN(pPerms->flip.disp); d++) { + for (h = 0; h < ARRAY_LEN(pPerms->flip.disp[d].head); h++) { + + NVDpyIdList dpyIdList = + pPerms->modeset.disp[d].head[h].dpyIdList; + NVDispEvoPtr pDispEvo; + char *dpys; + + if (nvDpyIdListIsEmpty(dpyIdList)) { + continue; + } + + pDispEvo = pDevEvo->pDispEvo[d]; + + dpys = nvGetDpyIdListStringEvo(pDispEvo, dpyIdList); + + if (dpys == NULL) { + continue; + } + + nvEvoLogInfoString(&infoString, + " disp:%02d, head:%02d : %s", d, h, dpys); + + nvFree(dpys); + } + } + } + } else if (pOpen->type == NvKmsPerOpenTypeGrantSwapGroup) { + + NVDevEvoPtr pDevEvo = pOpen->grantSwapGroup.pDevEvo; + + nvEvoLogInfoString(&infoString, + " pDevEvo (deviceId:%02d) : %p", + pDevEvo->deviceId.rmDeviceId, pDevEvo); + nvEvoLogInfoString(&infoString, + " pSwapGroup : %p", + pOpen->grantSwapGroup.pSwapGroup); + + } else if (pOpen->type == NvKmsPerOpenTypeUnicastEvent) { + nvEvoLogInfoString(&infoString, + " unicastEvent type : %s", + ProcFsUnicastEventTypeString(pOpen->unicastEvent.type)); + switch(pOpen->unicastEvent.type) { + case NvKmsUnicastEventTypeDeferredRequest: + nvEvoLogInfoString(&infoString, + " pDeferredRequestFifo : %p", + pOpen->unicastEvent.e.deferred.pDeferredRequestFifo); + break; + case NvKmsUnicastEventTypeVblankNotification: + nvEvoLogInfoString(&infoString, + " head : %x", + pOpen->unicastEvent.e.vblankNotification.apiHead); + break; + default: + break; + } + } + + nvEvoLogInfoString(&infoString, ""); + outString(data, buffer); + } +} + +static void PrintSurfacePlanes( + NVEvoInfoStringRec *pInfoString, + const NVSurfaceEvoRec *pSurfaceEvo) +{ + NvU8 planeIndex; + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + nvEvoLogInfoString(pInfoString, + "plane[%u] disp ctxDma:0x%08x pitch:%u offset:%" NvU64_fmtu + " rmObjectSizeInBytes:%" NvU64_fmtu, + planeIndex, + pSurfaceEvo->planes[planeIndex].surfaceDesc.ctxDmaHandle, + pSurfaceEvo->planes[planeIndex].pitch, + pSurfaceEvo->planes[planeIndex].offset, + pSurfaceEvo->planes[planeIndex].rmObjectSizeInBytes); + } +} + +static void PrintSurfaceClients( + NVEvoInfoStringRec *pInfoString, + const NVSurfaceEvoRec *pSurfaceEvo, + const NVDevEvoRec *pDevEvo) +{ + struct NvKmsPerOpen *pOpen; + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + NvKmsGenericHandle deviceHandle; + struct NvKmsPerOpenDev *pOpenDev; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, deviceHandle) { + NvKmsGenericHandle surfaceHandle; + NVSurfaceEvoPtr pTmpSurfaceEvo; + + if (pOpenDev->pDevEvo != pDevEvo) { + continue; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, + pTmpSurfaceEvo, surfaceHandle) { + if (pTmpSurfaceEvo != pSurfaceEvo) { + continue; + } + + nvEvoLogInfoString(pInfoString, + " pOpen : %p", pOpen); + nvEvoLogInfoString(pInfoString, + " pOpenDev : %p", pOpenDev); + nvEvoLogInfoString(pInfoString, + " NvKmsSurfaceHandle : %d", surfaceHandle); + } + } + } +} + +static void PrintSurface( + NVEvoInfoStringRec *pInfoString, + const NVSurfaceEvoRec *pSurfaceEvo, + const NVDevEvoRec *pDevEvo) +{ + NvU32 sd; + + nvEvoLogInfoString(pInfoString, + "pSurfaceEvo : %p", pSurfaceEvo); + nvEvoLogInfoString(pInfoString, + " pDevEvo (deviceId:%02d) : %p", pDevEvo->deviceId.rmDeviceId, pDevEvo); + nvEvoLogInfoString(pInfoString, + " owner : " + "pOpenDev:%p, NvKmsSurfaceHandle:%d", + pSurfaceEvo->owner.pOpenDev, + pSurfaceEvo->owner.surfaceHandle); + nvEvoLogInfoString(pInfoString, + " {width,height}InPixels : %d x %d", + pSurfaceEvo->widthInPixels, + pSurfaceEvo->heightInPixels); + nvEvoLogInfoString(pInfoString, + " misc : " + "log2GobsPerBlockY:%d", + pSurfaceEvo->log2GobsPerBlockY); + nvEvoLogInfoString(pInfoString, + " gpuAddress : 0x%016" NvU64_fmtx, + pSurfaceEvo->gpuAddress); + nvEvoLogInfoString(pInfoString, + " memory : layout:%s format:%s", + NvKmsSurfaceMemoryLayoutToString(pSurfaceEvo->layout), + nvKmsSurfaceMemoryFormatToString(pSurfaceEvo->format)); + nvEvoLogInfoString(pInfoString, + " refCnts : " + "rmRefCnt:%" NvU64_fmtx" structRefCnt:%" NvU64_fmtx, + pSurfaceEvo->rmRefCnt, + pSurfaceEvo->structRefCnt); + + PrintSurfacePlanes(pInfoString, pSurfaceEvo); + + nvEvoLogInfoString(pInfoString, + " clients :"); + + PrintSurfaceClients(pInfoString, pSurfaceEvo, pDevEvo); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (pSurfaceEvo->cpuAddress[sd] != NULL) { + nvEvoLogInfoString(pInfoString, + " cpuAddress[%02d] : %p", + sd, pSurfaceEvo->cpuAddress[sd]); + } + } + + nvEvoLogInfoString(pInfoString, ""); +} + +static void +ProcFsPrintSurfaces( + void *data, + char *buffer, + size_t size, + nvkms_procfs_out_string_func_t *outString) +{ + struct NvKmsPerOpen *pOpen; + NVEvoInfoStringRec infoString; + NvU32 i; + + for (i = 0; i < 2; i++) { + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + NvKmsGenericHandle deviceHandle; + struct NvKmsPerOpenDev *pOpenDev; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, deviceHandle) { + + NvKmsGenericHandle surfaceHandle; + NVSurfaceEvoPtr pSurfaceEvo; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, + pSurfaceEvo, + surfaceHandle) { + /* + * Because clients can grant surfaces between each + * other, a pSurfaceEvo could be in multiple clients' + * lists. So, we loop over all surfaces on all clients + * twice: the first time we print unique surfaces and set + * 'procFsFlag' to recognize duplicates. The second time, + * we clear 'procFsFlag'. + */ + if (i == 0) { + if (pSurfaceEvo->procFsFlag) { + continue; + } + + nvInitInfoString(&infoString, buffer, size); + PrintSurface(&infoString, pSurfaceEvo, + pOpenDev->pDevEvo); + outString(data, buffer); + + pSurfaceEvo->procFsFlag = TRUE; + } else { + pSurfaceEvo->procFsFlag = FALSE; + } + } + } + } + } +} + +static void +ProcFsPrintHeadSurface( + void *data, + char *buffer, + size_t size, + nvkms_procfs_out_string_func_t *outString) +{ + NVDevEvoPtr pDevEvo; + NVDispEvoPtr pDispEvo; + NvU32 dispIndex, apiHead; + NVEvoInfoStringRec infoString; + + FOR_ALL_EVO_DEVS(pDevEvo) { + + nvInitInfoString(&infoString, buffer, size); + nvEvoLogInfoString(&infoString, + "pDevEvo (deviceId:%02d) : %p", + pDevEvo->deviceId.rmDeviceId, pDevEvo); + outString(data, buffer); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + nvInitInfoString(&infoString, buffer, size); + nvEvoLogInfoString(&infoString, + " pDispEvo (dispIndex:%02d) : %p", + dispIndex, pDispEvo); + outString(data, buffer); + + for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { + nvInitInfoString(&infoString, buffer, size); + nvHsProcFs(&infoString, pDevEvo, dispIndex, apiHead); + nvEvoLogInfoString(&infoString, ""); + outString(data, buffer); + } + } + } +} + +static const char *SwapGroupPerEyeStereoString(const NvU32 request) +{ + const NvU32 value = + DRF_VAL(KMS, _DEFERRED_REQUEST, + _SWAP_GROUP_READY_PER_EYE_STEREO, request); + + switch (value) { + + case NVKMS_DEFERRED_REQUEST_SWAP_GROUP_READY_PER_EYE_STEREO_PER_PAIR: + return "PerPair"; + case NVKMS_DEFERRED_REQUEST_SWAP_GROUP_READY_PER_EYE_STEREO_PER_EYE: + return "PerEye"; + } + + return "Unknown"; +} + +static void ProcFsPrintOneDeferredRequestFifo( + void *data, + char *buffer, + size_t size, + nvkms_procfs_out_string_func_t *outString, + const NVDeferredRequestFifoRec *pDeferredRequestFifo, + const struct NvKmsPerOpen *pOpen, + const struct NvKmsPerOpenDev *pOpenDev, + const NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle) +{ + NVEvoInfoStringRec infoString; + + const struct NvKmsDeferredRequestFifo *fifo = pDeferredRequestFifo->fifo; + NvU32 i, prevI; + + nvInitInfoString(&infoString, buffer, size); + + nvEvoLogInfoString(&infoString, + "pDeferredRequestFifo : %p", pDeferredRequestFifo); + + nvEvoLogInfoString(&infoString, + " Client (pOpen) : %p", pOpen); + + nvEvoLogInfoString(&infoString, + " pOpenDev : %p", pOpenDev); + + nvEvoLogInfoString(&infoString, + " pSurfaceEvo : %p", pDeferredRequestFifo->pSurfaceEvo); + + nvEvoLogInfoString(&infoString, + " NvKms...RequestFifoHandle : %d", deferredRequestFifoHandle); + + if (pDeferredRequestFifo->swapGroup.pSwapGroup != NULL) { + + nvEvoLogInfoString(&infoString, + " swapGroup :"); + nvEvoLogInfoString(&infoString, + " pSwapGroup : %p", + pDeferredRequestFifo->swapGroup.pSwapGroup); + nvEvoLogInfoString(&infoString, + " pOpenUnicastEvent : %p", + pDeferredRequestFifo->swapGroup.pOpenUnicastEvent); + nvEvoLogInfoString(&infoString, + " ready : %d", + pDeferredRequestFifo->swapGroup.ready); + nvEvoLogInfoString(&infoString, + " semaphoreIndex : 0x%02x", + pDeferredRequestFifo->swapGroup.semaphoreIndex); + } + + nvEvoLogInfoString(&infoString, + " put : %d", fifo->put); + + nvEvoLogInfoString(&infoString, + " get : %d", fifo->get); + + outString(data, buffer); + + for (i = 0; i < ARRAY_LEN(fifo->request); i++) { + + const NvU32 request = fifo->request[i]; + const NvU32 opcode = DRF_VAL(KMS, _DEFERRED_REQUEST, _OPCODE, request); + const NvU32 semaphoreIndex = + DRF_VAL(KMS, _DEFERRED_REQUEST, _SEMAPHORE_INDEX, request); + + switch (opcode) { + + case NVKMS_DEFERRED_REQUEST_OPCODE_NOP: + break; + + case NVKMS_DEFERRED_REQUEST_OPCODE_SWAP_GROUP_READY: + nvInitInfoString(&infoString, buffer, size); + nvEvoLogInfoString(&infoString, + " request[0x%02x] : " + "opcode:SWAP_GROUP_READY, semaphoreIndex:0x%02x, " + "perEyeStereo:%s", + i, semaphoreIndex, + SwapGroupPerEyeStereoString(request)); + outString(data, buffer); + break; + + default: + nvInitInfoString(&infoString, buffer, size); + nvEvoLogInfoString(&infoString, + " request[0x%02x] : opcode:INVALID", i); + outString(data, buffer); + break; + } + } + + /* + * Print the fifo->semaphore[] array, but collapse multiple lines with + * duplicate values. + * + * To collapse duplicates, loop over all semaphore[] elements. If the + * current element is the same as semaphore[prev], continue. If they + * differ, print the value in semaphore[prev .. i-1], and update prev. + */ + prevI = 0; + + for (i = 1; i <= ARRAY_LEN(fifo->semaphore); i++) { + + const NvU32 prevValue = fifo->semaphore[prevI].data[0]; + + if (i != ARRAY_LEN(fifo->semaphore)) { + const NvU32 currentValue = fifo->semaphore[i].data[0]; + + /* + * If the value in this element matches the previous element, don't + * print anything, yet. + */ + if (currentValue == prevValue) { + continue; + } + } + + nvInitInfoString(&infoString, buffer, size); + + if (prevI == (i - 1)) { + nvEvoLogInfoString(&infoString, + " semaphore[0x%02x] : 0x%08x", + prevI, prevValue); + } else { + nvEvoLogInfoString(&infoString, + " semaphore[0x%02x..0x%02x] : 0x%08x", + prevI, i - 1, prevValue); + } + + outString(data, buffer); + + prevI = i; + } + + nvInitInfoString(&infoString, buffer, size); + nvEvoLogInfoString(&infoString, ""); + outString(data, buffer); +} + +static void +ProcFsPrintDeferredRequestFifos( + void *data, + char *buffer, + size_t size, + nvkms_procfs_out_string_func_t *outString) +{ + struct NvKmsPerOpen *pOpen; + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + + struct NvKmsPerOpenDev *pOpenDev; + NvKmsGenericHandle devHandle; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES( + &pOpen->ioctl.devHandles, + pOpenDev, devHandle) { + + NVDeferredRequestFifoRec *pDeferredRequestFifo; + NvKmsGenericHandle fifoHandle; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES( + &pOpenDev->deferredRequestFifoHandles, + pDeferredRequestFifo, fifoHandle) { + + ProcFsPrintOneDeferredRequestFifo( + data, buffer, size, outString, + pDeferredRequestFifo, + pOpen, + pOpenDev, + fifoHandle); + } + } + } +} + +static void +ProcFsPrintDpyCrcs( + void *data, + char *buffer, + size_t size, + nvkms_procfs_out_string_func_t *outString) +{ + NVDevEvoPtr pDevEvo; + NVDispEvoPtr pDispEvo; + NvU32 dispIndex, head; + NVEvoInfoStringRec infoString; + + FOR_ALL_EVO_DEVS(pDevEvo) { + + nvInitInfoString(&infoString, buffer, size); + nvEvoLogInfoString(&infoString, + "pDevEvo (deviceId:%02d) : %p", + pDevEvo->deviceId.rmDeviceId, pDevEvo); + outString(data, buffer); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + nvInitInfoString(&infoString, buffer, size); + nvEvoLogInfoString(&infoString, + " pDispEvo (dispIndex:%02d) : %p", + dispIndex, pDispEvo); + outString(data, buffer); + + for (head = 0; head < pDevEvo->numHeads; head++) { + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + struct NvKmsDpyCRC32 compCrc; + struct NvKmsDpyCRC32 rgCrc; + struct NvKmsDpyCRC32 outputCrc; + CRC32NotifierCrcOut crcOut; + crcOut.compositorCrc32 = &compCrc; + crcOut.rasterGeneratorCrc32 = &rgCrc; + crcOut.outputCrc32 = &outputCrc; + + if (pHeadState->pConnectorEvo == NULL) { + continue; + } + + nvInitInfoString(&infoString, buffer, size); + if (nvReadCRC32Evo(pDispEvo, head, &crcOut)) { + nvEvoLogInfoString(&infoString, + " head %d :", + head); + if (compCrc.supported) { + nvEvoLogInfoString(&infoString, + " compositor CRC : 0x%08x", + compCrc.value); + } else { + nvEvoLogInfoString(&infoString, + " compositor CRC : unsupported"); + } + if (rgCrc.supported) { + nvEvoLogInfoString(&infoString, + " raster generator CRC : 0x%08x", + rgCrc.value); + } else { + nvEvoLogInfoString(&infoString, + " raster generator CRC : unsupported"); + } + if (outputCrc.supported) { + nvEvoLogInfoString(&infoString, + " output CRC : 0x%08x", + outputCrc.value); + } else { + nvEvoLogInfoString(&infoString, + " output CRC : unsupported"); + } + } else { + nvEvoLogInfoString(&infoString, + " head %d : error", + head); + } + outString(data, buffer); + } + } + } +} + +static const char * +SignalFormatString(NvKmsConnectorSignalFormat signalFormat) +{ + switch (signalFormat) { + case NVKMS_CONNECTOR_SIGNAL_FORMAT_VGA: return "VGA"; + case NVKMS_CONNECTOR_SIGNAL_FORMAT_LVDS: return "LVDS"; + case NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS: return "TMDS"; + case NVKMS_CONNECTOR_SIGNAL_FORMAT_DP: return "DP"; + case NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI: return "DSI"; + case NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN: break; + } + + return "unknown"; +} + +static const char * +PixelDepthString(enum nvKmsPixelDepth pixelDepth) +{ + switch (pixelDepth) { + case NVKMS_PIXEL_DEPTH_18_444: return "18bpp 4:4:4"; + case NVKMS_PIXEL_DEPTH_24_444: return "24bpp 4:4:4"; + case NVKMS_PIXEL_DEPTH_30_444: return "30bpp 4:4:4"; + case NVKMS_PIXEL_DEPTH_20_422: return "20bpp 4:2:2"; + case NVKMS_PIXEL_DEPTH_16_422: return "16bpp 4:2:2"; + } + + return "unknown"; +} + +static void +ProcFsPrintHeads( + void *data, + char *buffer, + size_t size, + nvkms_procfs_out_string_func_t *outString) +{ + NVDevEvoPtr pDevEvo; + NVDispEvoPtr pDispEvo; + NvU32 dispIndex, head; + NVEvoInfoStringRec infoString; + + FOR_ALL_EVO_DEVS(pDevEvo) { + + nvInitInfoString(&infoString, buffer, size); + nvEvoLogInfoString(&infoString, + "pDevEvo (deviceId:%02d) : %p", + pDevEvo->deviceId.rmDeviceId, pDevEvo); + outString(data, buffer); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + const NVLockGroup *pLockGroup = pDispEvo->pLockGroup; + + nvInitInfoString(&infoString, buffer, size); + nvEvoLogInfoString(&infoString, + " pDispEvo (dispIndex:%02d) : %p", + dispIndex, pDispEvo); + if (pLockGroup != NULL) { + const NvBool flipLocked = nvIsLockGroupFlipLocked(pLockGroup); + nvEvoLogInfoString(&infoString, + " pLockGroup : %p", + pLockGroup); + nvEvoLogInfoString(&infoString, + " flipLock : %s", + flipLocked ? "yes" : "no"); + } + outString(data, buffer); + + if (pDevEvo->coreInitMethodsPending) { + /* If the core channel has been allocated but no mode has yet + * been set, pConnectorEvo will be non-NULL for heads being + * driven by the console, but data like the mode timings will + * be bogus. */ + nvInitInfoString(&infoString, buffer, size); + nvEvoLogInfoString(&infoString, " (not yet initialized)"); + outString(data, buffer); + continue; + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + const NVConnectorEvoRec *pConnectorEvo = + pHeadState->pConnectorEvo; + const NVHwModeTimingsEvo *pHwModeTimings = + &pHeadState->timings; + + nvInitInfoString(&infoString, buffer, size); + if (pConnectorEvo == NULL) { + nvEvoLogInfoString(&infoString, + " head %d : inactive", + head); + } else { + const NvU32 refreshRate10kHz = + nvGetRefreshRate10kHz(pHwModeTimings); + + nvEvoLogInfoString(&infoString, + " head %d : %s", + head, pConnectorEvo->name); + + nvEvoLogInfoString(&infoString, + " protocol : %s", + SignalFormatString(pConnectorEvo->signalFormat)); + + nvEvoLogInfoString(&infoString, + " mode : %u x %u @ %u.%04u Hz", + nvEvoVisibleWidth(pHwModeTimings), + nvEvoVisibleHeight(pHwModeTimings), + refreshRate10kHz / 10000, + refreshRate10kHz % 10000); + + nvEvoLogInfoString(&infoString, + " depth : %s", + PixelDepthString(pHeadState->pixelDepth)); + } + outString(data, buffer); + } + } + } +} + +#endif /* NVKMS_PROCFS_ENABLE */ + +void nvKmsGetProcFiles(const nvkms_procfs_file_t **ppProcFiles) +{ +#if NVKMS_PROCFS_ENABLE + static const nvkms_procfs_file_t procFiles[] = { + { "clients", ProcFsPrintClients }, + { "surfaces", ProcFsPrintSurfaces }, + { "headsurface", ProcFsPrintHeadSurface }, + { "deferred-request-fifos", ProcFsPrintDeferredRequestFifos }, + { "crcs", ProcFsPrintDpyCrcs }, + { "heads", ProcFsPrintHeads }, + { NULL, NULL }, + }; + + *ppProcFiles = procFiles; +#else + *ppProcFiles = NULL; +#endif +} + +static void FreeGlobalState(void) +{ + nvInvalidateRasterLockGroupsEvo(); + + nvKmsClose(nvEvoGlobal.nvKmsPerOpen); + nvEvoGlobal.nvKmsPerOpen = NULL; + + if (nvEvoGlobal.clientHandle != 0) { + nvRmApiFree(nvEvoGlobal.clientHandle, nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle); + nvEvoGlobal.clientHandle = 0; + } + + nvClearDpyOverrides(); +} + +/* + * Wrappers to help SMG access NvKmsKAPI's RM context. + */ +static NvU32 EvoGlobalRMControl(nvRMContextPtr rmctx, NvU32 client, NvU32 object, NvU32 cmd, void *params, NvU32 paramsSize) +{ + return nvRmApiControl(client, object, cmd, params, paramsSize); +} + +static NvU32 EvoGlobalRMAlloc(nvRMContextPtr rmctx, NvU32 client, NvHandle parent, NvHandle object, NvU32 cls, void *allocParams) +{ + return nvRmApiAlloc(client, parent, object, cls, allocParams); +} + +static NvU32 EvoGlobalRMFree(nvRMContextPtr rmctx, NvU32 client, NvHandle parent, NvHandle object) +{ + return nvRmApiFree(client, parent, object); +} + +NvBool nvKmsModuleLoad(void) +{ + NvU32 ret = NVOS_STATUS_ERROR_GENERIC; + + nvEvoLog(EVO_LOG_INFO, "Loading %s", pNV_KMS_ID); + + ret = nvRmApiAlloc(NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_ROOT, + &nvEvoGlobal.clientHandle); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLog(EVO_LOG_ERROR, "Failed to initialize client"); + goto fail; + } + + /* Initialize RM context */ + + nvEvoGlobal.rmSmgContext.clientHandle = nvEvoGlobal.clientHandle; + nvEvoGlobal.rmSmgContext.control = EvoGlobalRMControl; + nvEvoGlobal.rmSmgContext.alloc = EvoGlobalRMAlloc; + nvEvoGlobal.rmSmgContext.free = EvoGlobalRMFree; + + nvEvoGlobal.nvKmsPerOpen = nvKmsOpen(0, NVKMS_CLIENT_KERNEL_SPACE, NULL); + if (!nvEvoGlobal.nvKmsPerOpen) { + nvEvoLog(EVO_LOG_ERROR, "Failed to initialize internal modeset client"); + goto fail; + } + + if (!AssignNvKmsPerOpenType(nvEvoGlobal.nvKmsPerOpen, + NvKmsPerOpenTypeIoctl, FALSE)) { + goto fail; + } + + return TRUE; +fail: + FreeGlobalState(); + + return FALSE; +} + + +void nvKmsModuleUnload(void) +{ + FreeGlobalState(); + + nvAssert(nvListIsEmpty(&nvEvoGlobal.frameLockList)); + nvAssert(nvListIsEmpty(&nvEvoGlobal.devList)); +#if defined(DEBUG) + nvReportUnfreedAllocations(); +#endif + nvEvoLog(EVO_LOG_INFO, "Unloading"); +} + + +static void SendEvent(struct NvKmsPerOpen *pOpen, + const struct NvKmsEvent *pEvent) +{ + struct NvKmsPerOpenEventListEntry *pEntry = nvAlloc(sizeof(*pEntry)); + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + if (pEntry == NULL) { + return; + } + + pEntry->event = *pEvent; + nvListAppend(&pEntry->eventListEntry, &pOpen->ioctl.eventList); + + nvkms_event_queue_changed(pOpen->pOpenKernel, TRUE); +} + +static void ConsoleRestoreTimerFired(void *dataPtr, NvU32 dataU32) +{ + NVDevEvoPtr pDevEvo = dataPtr; + + if (pDevEvo->modesetOwner == NULL && pDevEvo->handleConsoleHotplugs) { + pDevEvo->skipConsoleRestore = FALSE; + nvEvoRestoreConsole(pDevEvo, TRUE /* allowMST */); + } +} + +/*! + * Generate a dpy event. + * + * \param[in] pDpyEvo The dpy for which the event should be generated. + * \param[in] eventType The NVKMS_EVENT_TYPE_ + * \param[in] attribute The NvKmsDpyAttribute; only used for + * NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED. + * \param[in] NvS64 The NvKmsDpyAttribute value; only used for + * NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED. + */ +static void SendDpyEventEvo(const NVDpyEvoRec *pDpyEvo, + const NvU32 eventType, + const enum NvKmsDpyAttribute attribute, + const NvS64 value) +{ + struct NvKmsPerOpen *pOpen; + const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + + struct NvKmsEvent event = { 0 }; + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + + if (!DispEvoToDevAndDispHandles(pOpen, pDispEvo, + &deviceHandle, &dispHandle)) { + continue; + } + + if ((pOpen->ioctl.eventInterestMask & NVBIT(eventType)) == 0) { + continue; + } + + event.eventType = eventType; + + switch (eventType) { + + case NVKMS_EVENT_TYPE_DPY_CHANGED: + event.u.dpyChanged.deviceHandle = deviceHandle; + event.u.dpyChanged.dispHandle = dispHandle; + event.u.dpyChanged.dpyId = pDpyEvo->id; + break; + + case NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED: + event.u.dynamicDpyConnected.deviceHandle = deviceHandle; + event.u.dynamicDpyConnected.dispHandle = dispHandle; + event.u.dynamicDpyConnected.dpyId = pDpyEvo->id; + break; + + case NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED: + event.u.dynamicDpyDisconnected.deviceHandle = deviceHandle; + event.u.dynamicDpyDisconnected.dispHandle = dispHandle; + event.u.dynamicDpyDisconnected.dpyId = pDpyEvo->id; + break; + + case NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED: + event.u.dpyAttributeChanged.deviceHandle = deviceHandle; + event.u.dpyAttributeChanged.dispHandle = dispHandle; + event.u.dpyAttributeChanged.dpyId = pDpyEvo->id; + event.u.dpyAttributeChanged.attribute = attribute; + event.u.dpyAttributeChanged.value = value; + break; + + default: + nvAssert(!"Bad eventType"); + return; + } + + SendEvent(pOpen, &event); + } + + if (eventType == NVKMS_EVENT_TYPE_DPY_CHANGED) { + NVDevEvoPtr pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + + if (pDevEvo->modesetOwner == NULL && pDevEvo->handleConsoleHotplugs) { + nvkms_free_timer(pDevEvo->consoleRestoreTimer); + pDevEvo->consoleRestoreTimer = + nvkms_alloc_timer(ConsoleRestoreTimerFired, pDevEvo, 0, 500); + } + } +} + +void nvSendDpyEventEvo(const NVDpyEvoRec *pDpyEvo, const NvU32 eventType) +{ + nvAssert(eventType != NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED); + SendDpyEventEvo(pDpyEvo, eventType, + 0 /* attribute (unused) */, + 0 /* value (unused) */ ); +} + +void nvSendDpyAttributeChangedEventEvo(const NVDpyEvoRec *pDpyEvo, + const enum NvKmsDpyAttribute attribute, + const NvS64 value) +{ + SendDpyEventEvo(pDpyEvo, + NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED, + attribute, value); +} + +void nvSendFrameLockAttributeChangedEventEvo( + const NVFrameLockEvoRec *pFrameLockEvo, + const enum NvKmsFrameLockAttribute attribute, + const NvS64 value) +{ + struct NvKmsPerOpen *pOpen; + const NvU32 eventType = NVKMS_EVENT_TYPE_FRAMELOCK_ATTRIBUTE_CHANGED; + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + + struct NvKmsEvent event = { 0 }; + NvKmsFrameLockHandle frameLockHandle; + + if ((pOpen->ioctl.eventInterestMask & NVBIT(eventType)) == 0) { + continue; + } + + if (!FrameLockEvoToFrameLockHandle(pOpen, pFrameLockEvo, + &frameLockHandle)) { + continue; + } + + event.eventType = eventType; + event.u.frameLockAttributeChanged.frameLockHandle = frameLockHandle; + event.u.frameLockAttributeChanged.attribute = attribute; + event.u.frameLockAttributeChanged.value = value; + + SendEvent(pOpen, &event); + } +} + + +void nvSendFlipOccurredEventEvo(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead, const NvU32 layer) +{ + struct NvKmsPerOpen *pOpen; + const NvU32 eventType = NVKMS_EVENT_TYPE_FLIP_OCCURRED; + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + + struct NvKmsEvent event = { 0 }; + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + + struct NvKmsPerOpenDev *pOpenDev; + const struct NvKmsFlipPermissions *pFlipPermissions; + + pOpenDev = DevEvoToOpenDev(pOpen, pDispEvo->pDevEvo); + + if (pOpenDev == NULL) { + continue; + } + + if ((pOpen->ioctl.eventInterestMask & NVBIT(eventType)) == 0) { + continue; + } + + pFlipPermissions = &pOpenDev->flipPermissions; + + if ((pFlipPermissions->disp[pDispEvo->displayOwner]. + head[apiHead].layerMask & NVBIT(layer)) == 0x0) { + continue; + } + + if (!DispEvoToDevAndDispHandles(pOpen, pDispEvo, + &deviceHandle, &dispHandle)) { + continue; + } + + event.eventType = eventType; + event.u.flipOccurred.deviceHandle = deviceHandle; + event.u.flipOccurred.dispHandle = dispHandle; + event.u.flipOccurred.head = apiHead; + event.u.flipOccurred.layer = layer; + + SendEvent(pOpen, &event); + } +} + +void nvSendUnicastEvent(struct NvKmsPerOpen *pOpen) +{ + if (pOpen == NULL) { + return; + } + + nvAssert(pOpen->type == NvKmsPerOpenTypeUnicastEvent); + nvAssert(pOpen->unicastEvent.type != NvKmsUnicastEventTypeUndefined); + + nvkms_event_queue_changed(pOpen->pOpenKernel, TRUE); +} + +void nvRemoveUnicastEvent(struct NvKmsPerOpen *pOpen) +{ + NVDeferredRequestFifoPtr pDeferredRequestFifo; + NvKmsGenericHandle callbackHandle; + NVVBlankCallbackPtr pCallbackData; + struct NvKmsPerOpenDisp *pOpenDisp; + NvU32 apiHead; + + if (pOpen == NULL) { + return; + } + + nvAssert(pOpen->type == NvKmsPerOpenTypeUnicastEvent); + + switch(pOpen->unicastEvent.type) + { + case NvKmsUnicastEventTypeDeferredRequest: + pDeferredRequestFifo = + pOpen->unicastEvent.e.deferred.pDeferredRequestFifo; + + pDeferredRequestFifo->swapGroup.pOpenUnicastEvent = NULL; + pOpen->unicastEvent.e.deferred.pDeferredRequestFifo = NULL; + break; + case NvKmsUnicastEventTypeVblankNotification: + /* grab fields from the unicast fd */ + callbackHandle = + pOpen->unicastEvent.e.vblankNotification.hCallback; + pOpenDisp = + pOpen->unicastEvent.e.vblankNotification.pOpenDisp; + apiHead = pOpen->unicastEvent.e.vblankNotification.apiHead; + + /* Unregister the vblank callback */ + pCallbackData = + nvEvoGetPointerFromApiHandle(&pOpenDisp->vblankCallbackHandles[apiHead], + callbackHandle); + + nvApiHeadUnregisterVBlankCallback(pOpenDisp->pDispEvo, + pCallbackData); + + nvEvoDestroyApiHandle(&pOpenDisp->vblankCallbackHandles[apiHead], + callbackHandle); + + /* invalidate the pOpen data */ + pOpen->unicastEvent.e.vblankNotification.hCallback = 0; + pOpen->unicastEvent.e.vblankNotification.pOpenDisp = NULL; + pOpen->unicastEvent.e.vblankNotification.apiHead = NV_INVALID_HEAD; + break; + default: + nvAssert("Invalid Unicast Event Type!"); + break; + } + + pOpen->unicastEvent.type = NvKmsUnicastEventTypeUndefined; +} + +static void AllocSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo) +{ + struct NvKmsPerOpen *pOpen; + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); + + NvKmsGenericHandle surfaceHandle; + NVSurfaceEvoPtr pSurfaceEvo; + + if (pOpenDev == NULL) { + continue; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, + pSurfaceEvo, surfaceHandle) { + + NvU8 planeIndex; + + if (!nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) { + continue; + } + + if (!pSurfaceEvo->requireDisplayHardwareAccess) { + nvAssert(pSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle == 0); + continue; + } + + /* + * Orphan surfaces should not get this far: they should + * fail the owner check above. + */ + nvAssert(pSurfaceEvo->rmRefCnt > 0); + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + NvU32 ret = + nvRmAllocAndBindSurfaceDescriptor( + pDevEvo, + pSurfaceEvo->planes[planeIndex].rmHandle, + pSurfaceEvo->layout, + pSurfaceEvo->planes[planeIndex].rmObjectSizeInBytes - 1, + &pSurfaceEvo->planes[planeIndex].surfaceDesc, + pSurfaceEvo->mapToDisplayRm); + if (ret != NVOS_STATUS_SUCCESS) { + FreeSurfaceCtxDmasForAllOpens(pDevEvo); + nvAssert(!"Failed to re-allocate surface descriptor"); + return; + } + } + } + } +} + + +static void FreeSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo) +{ + struct NvKmsPerOpen *pOpen; + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); + + NvKmsGenericHandle surfaceHandle; + NVSurfaceEvoPtr pSurfaceEvo; + + if (pOpenDev == NULL) { + continue; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, + pSurfaceEvo, surfaceHandle) { + + NvU8 planeIndex; + + if (!nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) { + continue; + } + + /* + * Orphan surfaces should not get this far: they should + * fail the owner check above. + */ + nvAssert(pSurfaceEvo->rmRefCnt > 0); + + if (!pSurfaceEvo->requireDisplayHardwareAccess) { + nvAssert(pSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle == 0); + continue; + } + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + pDevEvo->hal->FreeSurfaceDescriptor( + pDevEvo, + nvEvoGlobal.clientHandle, + &pSurfaceEvo->planes[planeIndex].surfaceDesc); + } + } + } +} + +#if defined(DEBUG) +NvBool nvSurfaceEvoInAnyOpens(const NVSurfaceEvoRec *pSurfaceEvo) +{ + struct NvKmsPerOpen *pOpen; + + nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) { + + if (pOpen->type == NvKmsPerOpenTypeIoctl) { + struct NvKmsPerOpenDev *pOpenDev; + NvKmsGenericHandle dev; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, dev) { + + NvKmsGenericHandle surfaceHandleUnused; + NVSurfaceEvoPtr pSurfaceEvoTmp; + + if (pOpenDev == NULL) { + continue; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, + pSurfaceEvoTmp, + surfaceHandleUnused) { + if (pSurfaceEvoTmp == pSurfaceEvo) { + return TRUE; + } + } + } + } else if ((pOpen->type == NvKmsPerOpenTypeGrantSurface) && + (pOpen->grantSurface.pSurfaceEvo == pSurfaceEvo)) { + return TRUE; + } + } + + return FALSE; +} +#endif + +const struct NvKmsFlipPermissions *nvGetFlipPermissionsFromOpenDev( + const struct NvKmsPerOpenDev *pOpenDev) +{ + nvAssert(pOpenDev != NULL); + return &pOpenDev->flipPermissions; +} + +const struct NvKmsModesetPermissions *nvGetModesetPermissionsFromOpenDev( + const struct NvKmsPerOpenDev *pOpenDev) +{ + nvAssert(pOpenDev != NULL); + return &pOpenDev->modesetPermissions; +} + +NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDev( + struct NvKmsPerOpenDev *pOpenDev) +{ + if (pOpenDev == NULL) { + return NULL; + } + + return &pOpenDev->surfaceHandles; +} + +const NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDevConst( + const struct NvKmsPerOpenDev *pOpenDev) +{ + if (pOpenDev == NULL) { + return NULL; + } + + return &pOpenDev->surfaceHandles; +} + +static int suspendCounter = 0; + +/* + * Suspend NVKMS. + * + * This function is called by RM once per GPU, but NVKMS just counts the number + * of suspend calls so that it can deallocate the core channels on the first + * call to suspend(), and reallocate them on the last call to resume(). + */ +void nvKmsSuspend(NvU32 gpuId) +{ + if (suspendCounter == 0) { + NVDevEvoPtr pDevEvo; + + FOR_ALL_EVO_DEVS(pDevEvo) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_INFO, "Suspending"); + + /* + * Shut down all heads and skip console restore. + * + * This works around an RM bug where it fails to train DisplayPort + * links during resume if the system was suspended while heads were + * active. + * + * XXX TODO bug 1850734: In addition to fixing the above + * RM bug, NVKMS should clear pDispEvo head and connector state + * that becomes stale after suspend. Shutting the heads down here + * clears the relevant state explicitly. + */ + nvShutDownApiHeads(pDevEvo, pDevEvo->pNvKmsOpenDev, + NULL /* pTestFunc, shut down all heads */, + NULL /* pData */, + TRUE /* doRasterLock */); + pDevEvo->skipConsoleRestore = TRUE; + + DisableAndCleanVblankSyncObjectForAllOpens(pDevEvo); + + FreeSurfaceCtxDmasForAllOpens(pDevEvo); + + nvSuspendDevEvo(pDevEvo); + } + } + + suspendCounter++; +} + +void nvKmsResume(NvU32 gpuId) +{ + suspendCounter--; + + if (suspendCounter == 0) { + NVDevEvoPtr pDevEvo, pDevEvo_tmp; + FOR_ALL_EVO_DEVS_SAFE(pDevEvo, pDevEvo_tmp) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_INFO, "Resuming"); + + if (nvResumeDevEvo(pDevEvo)) { + nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */); + EnableAndSetupVblankSyncObjectForAllOpens(pDevEvo); + AllocSurfaceCtxDmasForAllOpens(pDevEvo); + + if (pDevEvo->modesetOwner == NULL) { + // Hardware state was lost, so we need to force a console + // restore. + pDevEvo->skipConsoleRestore = FALSE; + RestoreConsole(pDevEvo); + } + } + } + } +} + +static void ServiceOneDeferredRequestFifo( + NVDevEvoPtr pDevEvo, + NVDeferredRequestFifoRec *pDeferredRequestFifo) +{ + struct NvKmsDeferredRequestFifo *fifo = pDeferredRequestFifo->fifo; + NvU32 get, put; + + nvAssert(fifo != NULL); + + get = fifo->get; + put = fifo->put; + + if (put == get) { + return; + } + + if ((get >= ARRAY_LEN(fifo->request)) || + (put >= ARRAY_LEN(fifo->request))) { + return; + } + + while (get != put) { + + const NvU32 request = fifo->request[get]; + const NvU32 opcode = + DRF_VAL(KMS, _DEFERRED_REQUEST, _OPCODE, request); + + switch (opcode) { + + case NVKMS_DEFERRED_REQUEST_OPCODE_NOP: + break; + + case NVKMS_DEFERRED_REQUEST_OPCODE_SWAP_GROUP_READY: + nvHsSwapGroupReady( + pDevEvo, + pDeferredRequestFifo, + request); + break; + + default: + nvAssert(!"Invalid NVKMS deferred request opcode"); + break; + } + + get = (get + 1) % ARRAY_LEN(fifo->request); + } + + fifo->get = put; +} + +/*! + * Respond to a non-stall interrupt. + */ +void nvKmsServiceNonStallInterrupt(void *dataPtr, NvU32 dataU32) +{ + NVDevEvoPtr pDevEvo = dataPtr; + struct NvKmsPerOpen *pOpen; + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + + struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); + NVDeferredRequestFifoRec *pDeferredRequestFifo; + NvKmsGenericHandle handle; + + if (pOpenDev == NULL) { + continue; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES( + &pOpenDev->deferredRequestFifoHandles, + pDeferredRequestFifo, + handle) { + + ServiceOneDeferredRequestFifo(pDevEvo, pDeferredRequestFifo); + } + } + + nvHsProcessPendingViewportFlips(pDevEvo); +} + +NvBool nvKmsGetBacklight(NvU32 display_id, void *drv_priv, NvU32 *brightness) +{ + NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS params = { 0 }; + NV_STATUS status = NV_ERR_INVALID_STATE; + NVDispEvoRec *pDispEvo = drv_priv; + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = display_id; + params.brightnessType = NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100; + + status = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS, + ¶ms, sizeof(params)); + + if (status == NV_OK) { + *brightness = params.brightness; + } + + return status == NV_OK; +} + +NvBool nvKmsSetBacklight(NvU32 display_id, void *drv_priv, NvU32 brightness) +{ + NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS params = { 0 }; + NV_STATUS status = NV_ERR_INVALID_STATE; + NVDispEvoRec *pDispEvo = drv_priv; + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = display_id; + params.brightness = brightness; + params.brightnessType = NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100; + + status = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS, + ¶ms, sizeof(params)); + + return status == NV_OK; +} + +NvBool nvKmsOpenDevHasSubOwnerPermissionOrBetter(const struct NvKmsPerOpenDev *pOpenDev) +{ + return pOpenDev->isPrivileged || + pOpenDev->pDevEvo->modesetOwner == pOpenDev || + pOpenDev->pDevEvo->modesetSubOwner == pOpenDev; +} diff --git a/src/nvidia-modeset/src/shaders/g_ampere_shader_info.h b/src/nvidia-modeset/src/shaders/g_ampere_shader_info.h new file mode 100644 index 0000000..2df3d7e --- /dev/null +++ b/src/nvidia-modeset/src/shaders/g_ampere_shader_info.h @@ -0,0 +1,328 @@ +// Generated using 'Offline GLSL Shader Compiler Version 13.0.0.0.560.00.dev/gpu_drv/bugfix_main-17009' +// WARNING: This file is auto-generated! Do not hand-edit! +// Instead, edit the GLSL shaders and run 'unix-build nvmake @generate'. + +#include "nvidia-3d-shaders.h" +#include "g_shader_names.h" + +ct_assert(NUM_PROGRAMS == 34); +static const Nv3dProgramInfo AmpereProgramInfo[NUM_PROGRAMS] = { + // nvidia_headsurface_vertex + { .offset = 0x00000000, + .registerCount = 15, + .type = NV3D_SHADER_TYPE_VERTEX, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_VERTEX_B, + .bindGroup = NV3D_HW_BIND_GROUP_VERTEX, + }, + + // nvidia_headsurface_fragment + { .offset = 0x00000300, + .registerCount = 13, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_customSampling + { .offset = 0x00000580, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_overlay + { .offset = 0x00004680, + .registerCount = 32, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_overlay_customSampling + { .offset = 0x00005200, + .registerCount = 32, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset + { .offset = 0x00007d00, + .registerCount = 15, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_customSampling + { .offset = 0x00008000, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_swapped + { .offset = 0x0000c180, + .registerCount = 18, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_swapped_customSampling + { .offset = 0x0000c500, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay + { .offset = 0x00010700, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay_customSampling + { .offset = 0x00011300, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay_swapped + { .offset = 0x00013f80, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay_swapped_customSampling + { .offset = 0x00014b00, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend + { .offset = 0x00017780, + .registerCount = 15, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_customSampling + { .offset = 0x00017a80, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_swapped + { .offset = 0x0001bc00, + .registerCount = 20, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_swapped_customSampling + { .offset = 0x0001bf80, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay + { .offset = 0x00020180, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay_customSampling + { .offset = 0x00020d80, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay_swapped + { .offset = 0x00023980, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay_swapped_customSampling + { .offset = 0x00024500, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset + { .offset = 0x00027180, + .registerCount = 20, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_customSampling + { .offset = 0x00027500, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_swapped + { .offset = 0x0002b680, + .registerCount = 19, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_swapped_customSampling + { .offset = 0x0002ba00, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay + { .offset = 0x0002fc00, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay_customSampling + { .offset = 0x00030780, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay_swapped + { .offset = 0x00033400, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay_swapped_customSampling + { .offset = 0x00033f80, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_yuv420 + { .offset = 0x00036c00, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_yuv420_overlay + { .offset = 0x00038300, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_pixelShift + { .offset = 0x0003a480, + .registerCount = 32, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_overlay_pixelShift + { .offset = 0x0003ac80, + .registerCount = 32, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_reversePrime + { .offset = 0x0003b880, + .registerCount = 13, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + +}; + + +static const Nv3dShaderConstBufInfo AmpereConstBufInfo[] = { +}; + +static const size_t AmpereConstBufSize = 0; +static const NvU32 AmpereConstBufSizeAlign = 256; + +// Total shader code size: 238.75 KB +static const size_t AmpereProgramHeapSize = 244480; +static const size_t AmpereShaderMaxLocalBytes = 0; +static const size_t AmpereShaderMaxStackBytes = 0; diff --git a/src/nvidia-modeset/src/shaders/g_ampere_shaders b/src/nvidia-modeset/src/shaders/g_ampere_shaders new file mode 100644 index 0000000000000000000000000000000000000000..6198e5f89f5c9c909c05ec92f82340546d580d5a GIT binary patch literal 244480 zcmeFaOKhB1mL~Ye9}fl@BqJCMlEH__2!`5l4hE&iNPr9siXlM-XD)hV7+zE+W&(8Rj@wdhqf7hQrKMV2S z@RQ&8*MC%h`7$eISp}c&FK4w6@xRrRENk_$>{rdK|ETfu<^PDk@%L-O3ollnOuc0XLr1vSm&&qY6a(@5yp2}w}*WL@# zI|r;QkM{bbatSRQ$M&jORs-I(*BjMpCWqUo@1E+b9l7?=pWbM0ZXx53cx!L&VZ`@G zd4?K~RsYvl^nKoQ^x*q{`o2EKKgZ?lKQyzq0e}0FzkgBAPMTRa;8ElsA^f-l?oa=Y zr~h=uUlxq>#89e#liwSU7Nkpi*)o)7x$?%NlHmW9F!b$>N01dN{)#a4>y1ZM!Osbw z5}p(MzY*SVW_8pjs#^Hy!7lWpwg63p?4Tbnu2g!X@`47c;Cq0p3(yb4KYxPql?C-h z_-5(w!+I4OeyujQTgjd`D~BIu zFXQ{HW-#>K^Gf9bmr5Kj(oc*J=xZLv-=u#2mdeDCMh3G~39OF$4V1EJkk4m5?;upu+4(WNg{~ycXMl{0j8%f9vE|Vs)Nh=>Nt<`)bbW zM}0SsS}$3f#*64%WOL#B>bOgE&|lC;ynQcvw=tbN_&MocH`v+B{!xe1{V?u>*V*XZ}(e>0&EH#UuakN>h; ztp75oWqlt1Y_o{(G>v|be>yQfD_hndkN-3okLYjyI2mu~U;n|X(eKNj7RMXcbH|IK z$<}v2#ZpxhzS)o7cpLgLJjinB2kQHy_eJ>5Id^@cI(VgMX%9E$|=qY*3-T z4G*@dpQw-aYUu03`qIju9AsCmd|%~HzR-Avz3k;J3J||*Ccnpt{Lubw&}ToRAN83& zw^DtL^szrJeS-3!r#D`sJr4cF_on}(UwGg1!k)eJ$Wk|68fP7VxMqJIGql z_n@zYZF~$KP#=c}4aYCgWBT5*@%8qPfmMe=|6hrJD1QR}p}*jF{{hEy!oT|)8sLF{ zr-}Za!9Vinp|2nP8&s(O!-Mtn5mo;A>toio8Gu1xiowm>NY5QU({_*;ojPKWJ{|4>D z@F4f?zctl9@M-(h*R=hn3~p>cu^+F~{$1Li;lYY;|9eyI1E03f@sPH^C4(N@Pwds} zwErIM+3=w4+yDMl`@pB|bACwMU)6*W+fVG_8ST?P9%{p5_9U!dxIV!6;`(8j;LUy= zhxv=+=i6lbm^~sM_5kgfeIX3{VKDTSFvicH!5&@DXn^s9{iXf8-ZYT*81Ud$lPkA_ ztPUdvxPPP7{Ga~ye}(xFFvjcK(2qlY?&f==4<_R}><#V14OKtXe5Cp(+mrpq_pbe0 ztqk@5p*^nLx3*B(!K!cXdef>WJ*s~v-(Yhdes^_c zH-+!_+EM^d|1GWTP5$_P@ZAk8__Koz-`*duvO5R69{y3YM|j)8{mb2ML0@3c4pBh- zGJDGT6Z!&urM_I2c%r_f@}m9#wiPb=L-KZ! z{0#aqen?+O;*a#T8&Mxf4`6mE=yP|gQPAg4FSbVo`YM~71^VWv=)-s>eSSXF^b_gJ z8*V;w`~qB{ufAEN@A1X+Re`=68OQ6Z>;d&Jp-<|b(3j{x`Il=wANhyRqs4f{Rmv)>=T zm#-G+{r(iauvaO)G7ky8d9)XzH|K^8KPUfso)`I9-0xpZZ#ScX3H$vAtHt@?$5Zse zex~%wd?oZYqCGSIwu}5-o#5}Q=Tn{^a6pJY*ejk7sQ={e*BHN}d{y*kCCv{%`v9jk z$$1g>_ZuyK@9;kzS9M-gapy(-(blHfAN#)kXvOO1M>wDAkG9zoe!ydXtv_nnJqGa) zmjSnpe}td%ynbhe26S2F_khoALUi>FMweQ&*E$t?aRue~+MH3ASpLO5z`N?|qTs#k z@T2U~P9?j#*xX&rzQlRyN6UMJTZHd7fp72O!pFHb(80RUyQKQi#todvY@b-4lztbz(*85O~U#ftu zCq|2lsOnh!QhVe@ga@P6BK4uA{x$26UkMBj(7TI$gSVF{^a&t=W&oX;R<1%w>g+6q&i=7>(i^r_aA?>xR#awd+hij zMU034<$ZpQAFTlNgZ6L89b}%{?~-tz&m&!_h;k9#UJ2n zEoz?QzG>WCC(J`fj)&e)78k9>+jZPG0H0+)^yObX0lwLcc-B{H6K~%)OMU1g^lO;q zCSTqc;J*z1xz>|B%@}6Gu)YRJ81wJHgS~l^9mRT)Rsj9W>#W(qk0c_IIZ02fb{KO$@JJ=GHQ z+&o%)$=2feV+kEj@!JJF?3*eO7ulbQ^Ih0CRUrEYe08kRX$=S_KV5@@&H9G(>M_<+ z_=(@Z_m6h4K6UhP{2X1n6kza==j*a>ogVaaH_Jaj-EW?R`J#pMD5~enzj!%E`9c3P z=|A^l25$?#|9=Y3$Gfq8;PcU5Ib{i*)p`@!7-*gHJvJA3r@G(UhRKm7Rv^)cbc^#T8YpX0}~L8Si- zen6gsN&N(Yrk~Ug%%7j+tPk@U_KUC=`~G)cQ}0f{xc|Mg-ys9C+}a;}x6k4Seb0}(`^Hbtk1vb*-M`!K7x)4F zJ7j%@@tE>sVn2R;f*!!4C-6h{C;Uj{aV9_d5~$f1_P=w9_4)DC8t?6u0Bb&jJbNSR z4}a+YX|vWE@9q}yr4_;sPkr8_d3<;u`mFC4SGWKY9{07l?_SZ`iS>cL!DvnD>(;mM zzi4(?pVrUMiC^vTd##`G{b1DY@O!sj2E5)Of$Ju}#ldJp@J4{Ug1Z6U6ue0o^l`j> z@_#%3A&ke?cx#L8`~JWEui#(w*AZWT)V2M03x%{lW83%<{@nIA+D{NZ<^E=?tMfbd z7yeoGN1NHEFOT^c{B;Kyz`wW$xS89Aknj`0tDCl>0gU_ld{olXMgM?*IsB!xKJn|v z(IR+-pM!6|Pk=wOH*Y*@$bQK`0`0$n{UsM*^3Qon*v{YO&jb4N53qjn^Bcw=o1J-zXQlNabW`f$QWJkD1^Kj@t|82c&U`wz-SzqiMy=8vZFK>T|Zqu=52J>1V& zpIhJFts4FQ`)?Be8-9PIX7qdfm*(FV>c2c^^n3iXT3j#i`%c~H_xPvg4;Q|#%v*mv z{?mH=9{tT9&qrAOrBfg1_vKH`KhM|4^+rzy*z}qHO)U@Od#+c%1xHYa)+4>~2J~g< z*PCbDuP;GA9sUL7!{h8&9{eAwrvKr7o#S(Ou;lm!dQAV%zh6iB6YvlH<$C@Bzfbsgze0tN z{5wtb`3(M%KMz$N`T+eHRA?W5llyg+PukC`98j_S$H{nko%TDlKgoV(6OigNeFcAj zXaD_u2Fs`I*EnGD^ZJvF$Jc59GVNP(zx$o3_JL2^XZf`KIU0nd{banK)Bdr_JO1CL zeHj<|AP0aLth^27w`l4lpdB(>8aBI;ph30*q3wYIaGP0=ZyUm=u6up zy=i;%G-&ww_7eMbMtg(zXzzyy%idmmKg-@J-e&`!w#V{mdkfCX*|#@s56$`6^oR4i z)+@>V`Cmw3f_}js9f$d^jdHoLe}KFX^?gboRN%VbzpDCVKQ-B2#oF`j-zrxM_dCxnw6Dz_FTO0G=4!phNO6hgZWTjy&dRLee(_Zx4ZUx?{;-RlX>|5)-p_*?Ju8$zFTvu&(k;f zZk^;GEV%jye>~3v&3K>wqXjPigTFxka)t85dJ0Sezj(fThyvo5*$0XX`T~9Bd~sRg zi~5qvlllqR<`49r@SCbX?l+%aJfD2x+Xp@UPPJN~51l{sc%zT=FX%h({=6FXiS)Fq z>o&hQ`nsw=()a1b^G$)iO08C)58R)ikMlq1^YhKbem!4``bzo$hw(wUURx@Rx5pRH zM+N$B%*_?(o3;lSpDBHl_os=zi@rJ2S8osMsz37Y@x}AiXK2um$Nqcs^MO9-fB$a1 z9$@O@_ony>dy&#R;m?uR<9HOk^@hz~PTrW#iu}BIK07A4&VIc8{V952pHg}>{Nwn@ z=c7FkJ@YLN&|uH&>VLw&SI>9-2MY@Y{``1~9@xW_9+{tno<_7sqNkw^VWeka;yyf? z4S%k+JByJq9jzOO%8iu-?@H}prXx~+$R z*M0YVruxWrou5>7^`}ie&L0M&r6t-=oe$~!WX|^cERXsIqlJ0eC%bRG2e=~xZ}+QD za9>?nfC0O@GH3VOI8Ty+wezGaz}Gs&uM_`(_uKV_RlsY6(O=w$cL;Y0>%M!V!KT%J z>i4K!=719n{?{LEXagi7bw1RN{lWP}Z?tLlpW_YZwYdLw-vfSrkNl~EKgX_q+((xy z+;GPBs&!6~5nfs0f+NC}c^sG|@Y)(C41=Fzei_xu9B?(tPxrmmDq(jYj{D#_9xz~u zVvN3P$fZ|Qkdg>Z5n#R)&Xe>R0Y)_(WnkLt~EpA3B}+&@EJ+%FHo z3bhA$fBaFk9m?yzxrXoEee;{d&jau8pYi^3JXik#zSbGnbKWo4tAwR9FP^k|Kbe~& zy%?{!f4&ENwe~|mD*vKc>iwi%k9gic*XD`0@A>`O~ZPN4WfT}oukdcCt;M4jxtuc^2NEK|YVi zSa0$Enm@p`Kj~!Bk4~>{_tVh!H&0A~Q2*mG)<3>H;1yaPeKgI*t$MV?kV7&E5>m>MC{RjQ9Z>#K2nfyZe{*6tm-`AJE zx8e79i|3R1m&yA2Yt}FOnABgXQhwn+1LLcBKaJ)ReVDqRp5p&{bhW>nz_tB33=s)@kSo;yZAHnyaw(*#B95CV!rPeS`WQ zY#QG4?=_2bX69=fx+ivsxFmi>ji%s+?Y5%TTb+1dp^ll$0h>6`rJe*ygUG97yG!}q)& zxlyt2O{oeI;z5mQ|l;?Q8MRsKeWw+n%-(5F)`91jc8s{-=J~@vm8_e%f zfA9L1(=Tfe^ish1IT+_Zh5gJ0@}>TLuJ0i)@pF>{>R9pyJf)9!%iD$h#`m)9(0_0H zZ?Zq7-{8L``X}@g`dHC=u;T1xK|jI&*0`(tIDbEjcmZ4=;|KoXe)^MfwF*vW2Y>Oh zNq!*S1lOM)ulq~zC%KniZhZ{6Em-%}py%zMVKgWA&7X`b2u~2{<$ZI{}l*?71IxLW0>f`9&m>lOJ!R%o9DR~I?IR}F?e zr~Twlc)%smFBtrTzin0KgLw3}5U=AOK|F-tcl-k%&mhL%*w%VP_xD&o`FI<;zsLT? z$LCNyj#I+6{)zE7+$bTcZ9#Xhx_)5^EdCyUl;xhIV26G zX89*5U&^H*guiZ<<{s5-eb_>IuJ4Yt-TA`wXAk$;+TLLOg<5D|kG43R9sUaN^MnKb z@WX|%y#8=ql1mpKZ+%JXs4)*l$!|ojH z`?Z}p(z}-}egJV*PO^tBd#{(*FR>0W;GsPU4YYk!0ChmD1WMm4+kvcz~NCGE#}VC?sItp$)@P3!MEf0mxBzv131 z+{XCb!~Ul`QGcU>4fnN|IqJWIyVM2v%huBRE@k2m>4m)W#|`s$;r7x!E6Dd(l;@6}w@3Lk zQ~BjNd|X(U-w)^VM>ZJsP0JVh2KjDDzW6qkFUlA8pMTE%wN6^BKFND$ImjDut-NXd z4f5XH6g?UE_0?W}*l2Dxy?l-T2pV-h|8E{`M*c#cz5LRpu-*Xw^T);hwiXZ{!s`dd zV?J7r`yJ7@X#5HEEjoUSKIq@h;%3y3%3)*q(sGn%D}S8ull4DdF8D8eR++~#W@VwYjfwyFVRoQPcwfazm-;y zALh%}!fGq4Ie7?fw^DxA$}8qjNB&)f=*lhgXCnL%=U_Mu)Y%T1@@WgK)xGuSuMyH z@>-4ZlROvYK$1zGl<$%|5AHd6&aK*s(QTfeK%R?>THkSe^E^0TS_WP=4!6TZQq}(a}+q*P5*#y*+!po~i!x|b(!r1LS|A7MV4!~Th)mcOz1 zHOJ2nM33TqRe8U8EExUkAL+W(;jh5{)o8B_kMLlZvajsAxAoxd^XPuX&(4>ynI zMZWaU`pf3NaQs03lKU6oxxb3|>KM0&{ILSzoxiHtGt$fQMlbE*Q5*epF!%@mYJ;79 zgFiC+KaBp$oW9cX4TD4ZMZm7S>wnQ-8TV)6=WXc+{F5Dj0fV02nEuz~H(-2^^Fe3~ z+VlQG<7JM76mRr;eY2q(H`Mjt5wQ&D{{&G2l@6A7|f0yxA0NeRymVYxL{~L4< zNbp}K@pV}LWl)>qpC#*k;uY8^`Ti+Xho89d)2}exL4yC(?49tCSN=GO7r^=+tQq~D z-qWHz`{1AHSMX1!z9s&h zv{t@8j7KYflK7Xh{K*$K-npN>$iETnDb{ED9Q>1u{=@7~(LXuyw>J7s|AT)r-CGO& zGyM<#$y5Hz+#bO{I@Q<0_on~RKbgl5N&GMBs{>i|n?QuRx+gsBF z5!*Y%{%M16`p)$L*Bg3&g!@Xa7yg1H)ZNdSeLBW`VDl5l$G1c~dk8$n8}Y$EdBtGN zN9Z5$&`07iJ`~@p6#Zeb-b1{pw&aoD*8X^KOO+p#xC#Wme`Eflcs{hZ$$ip69GvtH*K))kDdhJXP!jkm@xJSI3@AK8@&j)$pd*}!8-apyf zC(vVf$dh>IpWN1Z#O@=3*L(!lpp5KOFJEra!zLsDo})keCHj{b4EhIMnV%Rht1fep z#N*1Me>KYY?yS1^45*LryY1*7iTVd`DtvbE7w+BPaQf!TUoU|_vHjkioc=6Mfs~c`XT=kN=JIX%l$%(A2#^ooX0;0ZSSxABfako{>uH!6aJ2{SBLmO z{4#q9eWkv5|71-M(cf`Dkq7kyaJb(h{HE&vLoe_AX|f(df6kguKJo2Ce*I3f7@r9C z{Ltf#K8zpIhw(%IfsWKG(${WBeIq@9IRHct;cl~C(C0K>>g&)~Sza#O-(tQyRQ=Gm zFyD|qKi^IIuU4bJlb*b4;Z^;3t1r_(JM!=G#q&{te>X0S??wGf=$r6A&l7zYeRlj9 z_=nwJk-m%PtIs@tDBt%K&kE~vs4@86zgwbxF&OwW{F7;I6M8lLqP+7`9FL;6RO0}N z^s4@ZpBK+($NC=fBt4j44n1u0g?&otne>;=qdgEkc_Y{N7|(<+jOS&g{dmNBeXYQs zA5YN(dzjKAbCd9=8tsqhvHRr6pS1~nOXfq32N92dOurc~+b9H=!Jp}PT+L-}O(b97 zr~Q)`3;xY@?Eh6CWemMVR)knPU-)rvv1MpM&AFo$w@Z_)XXBLl32G-Sw z_vrK7ahpHu6_hX6Xs^s44ClKawKmq_U%bG0TF)7Ot5YV7`%1;%!uhBSuKW>x&3G@B zjV|z6h{shg6Rw4LT+PMMf9m&W+2X&m7Vy80_uZjCIIphb{j^R5NH5MadKS+Jz-^3= zJ-|y#!fU^LRy?lNhUO33-=n>Jh5QKT0i(4A**}MeKg1%->pWNVZMNi37`~s^d8#{) zmH)HO%LBfVY55c2xssMkq5gc4@=VTyb-pftPMz1b7Gcuu{P;QM%h3V?P^IU)<`3cg zPjG(QQ2Z`uF96RivOSBhM*hy{Fu_TG%dN79Q9u3ZIbb6{+j z;d~qC-+6;@JP%)+BOLrm-iKPM0UIA&FLDA+{Gs8#4mkQluabYMKXfr37rOJxcw89l zAXVcL^85HBxSRO$f2w#~_&$xtg}ezK-lzX^p8o(}i)V_*RW1{jj9=V?KXjgxUg%#T z9+%3$c!GFbWu2!Rp7oVVKMdbD5sypf<-;rok>bB3XE5)R6yFv0>fCr-qy+0b4ifY) zuTz>1racBBSMeA1-|VsBgFm#&+xY;>Prp~z$KiXP@4cfxL4K_NJ5%*TozZ@%zfL%* zfAYOcv3@(B#rM=V)PM76{UuAp^Fzpo^#^>>UwVN1U-al$T*UX)v3dXc^FzGPPz5m_ zm#ua@e%e1ei6?~mLcRglevbQ0`74wE!gGD#=!5)f$QMIX1%G4M>xp<=kaug`Y*IZF z|K?2kAN`vZ&n@^jkCCr}{?T6EBa4qAnC*`iT)raI3VT>X{0>fdYTx2<$$?n>v3Ohy z*wJ1^z8Sn1Im6!>@k0XrJU|3({C z-Y4mM`8)4ed4In4P4T?DH|}ydpVnV^-wOX)tKWZrn)ugZJb)fF$M(Iy^QQT~xcLS5 zbxZ7kyC1{;knd4Z-q-gmIj@I2k-xy|^Y||lfAg=&|1JqW7JtDX%%{AM#GR_?GtUc) z{?5R@c@oYir~I9#_`ehR4|#JuhV!>ytG{#)vHn6H{gmIluN`-RAKG~%^shJG1i212 z`8obVURY1`@49$%uD{054CiauzZ-waKET+2l0Oe7_?O1Vhd=b-J?hKwVAGACGyI)_ zKf(Ez_jhLb7k$hlIZCoBRWQ?tCWomq-0Y{{!kvqK_|_?`dNh3T^sA zdQ$x)J?sxGDrD^MU%Hwf&+~Vl(w}7fI{gX$U#36c_p#R>>T?{g@^{|hg8=+A-jneK zen9@cJKG#zwm+x-e^>F*4|aloax&h5?_Iy-_=Wm-Uvpyv>_hveZ}5Lk#vkZ~KeG9= zOW}NHBHlIRcSw31znuPJf0W7(B4&G&^f);9e-Xa5b*ZqQdv>w@eeT-_ z{nPr_E$CnW?)F9g&SZZ=zr>NyzZK;p`nPBx@iTt)Zf!Fjc--GTGk;7!pG@oL_O|@Z zXuq6(QomiiD(NTVt$O>}LjD_+4DqP&eh|ZpKeVrST#L#c82zD3df(&q3G2az=D!e+ zYkNcL&hMXt|D*B-S1c;8_3=6t%D?x+dUsy=lbnAu;Q9{fi~iAT@}KwdY4QG20pAsm z5%uG}lwcRXi~i7poqq=46~Q6C?9V3SakXTQegEhmU5v+7VgL{>n6ap}_*>eK5dNHS zSrTMr*?P`nO*4|q5ndOYkS{Abtk0L=VnL2{gfL}%V<7|lYIpJ3oK*9LI@;f!{e?$GN3(}VWH@CpB zV^<&Bo6~sZenIukb-}O{&;2jjgZ+cPGy1dEhkiYoui62YThDU;FoY=ZiRK&e8sqCU-iWNhcTYvPrQ>=QW)!xT7co7=J)?S=Z~298$-Y7 z-zIsqfd7{n&I@lOv#|i(SAM_WMzB~RF?dzV_Z-<`0;Fs>>JdE`Q_}zg4y|!^Kzg0U7EHls1o;$%G_rCl+x8=Vb_p%9lVSOZd z=)L5%%31QjdN8l+=zpp6BI@t{GWc7idA9v8*5kGE zqI=H({O38~OHtqV=TJV4PkkjT`VV0~$h%8nJ~97XyD#zcN4|AQ{1A-%q>aUHaXc(- zf@1Thmp^B13pXC#{~hDO`ICBnBEEMjj~wwsQhuK`9#Z-AkKBC|?H%O>|0&&{c=^0F zDW6IQ>w|0Mv*d&C3;z46FIoQxpZMSYTgp=_heV$9^`bnx^{f``6a8)LiF_fSc{@9z zJg?F|ts6fGYdj_I889E>y@dQjHQ!4sIA3hHS-;PR0RPn{Pr#HP0`=p3N9QG$ zKZ5aB%T4yj#cRNNL*dPAeS!WW{)62oaX%_}X@TdJI-d|+Zi^qptNxZ8=%PPwMdzFG zyI+Hc{!_SltIJPN|@ycTe^O<3ho|100CiJpI0@6~=A|5qOwjpERmIsfA~ z{JrtW^1H_OXBJ;6zCW}3t@!>dd2e<~|5uAAB!AfbX7GnS;Vr~{;_dwHe|B&k?P)#5 z^}PIB-Te{Q$9kVuv+U`FwO+34J!5>2X6N+=H>-G`X8!j2p6h`Mj_|J(eB&iDVe{2b@|f8+G4M|nWRkjJF|_hdYhD&m3Y=lppm z&)b=AN87P5eo8qs0gc>-K4HB+T2caL+h5W@yawBjAJ5n2A3kTcf-uG>?h}9>4y7+& zE5Ed=OZ>rMS{&Z|!GRx|KREhR$d}Fiocwhh9_K6AZ)v)ae>m5F*iUJ}#z~6bGx`yK zuD`!|zG)q&*!~yw4|vRvwYKsoKB~ZH0vwYLDeAB!i*ZqL=Z|8<^hbqkHGoLEZ;PYhdY1z`iHR^>H5w|9ASgH)HRuwLYO zC+eT&o1W#HmTu{OX_jvq=XIAgK7xPyEZ?;81O9EipHaRw+$ZpS4)-&&eA7w3bKGam z@=ep7mgf=B7tAlFFNC2lvwYK7FR$$EW9sd@`}kSD=~=#M&cEmTw|jp!6oP+S{n6ib zzG?1m@bl|Ma+Lb(I3g|v-d~JKfvzw&6D7Ni2LsC98D$f zWw$AhX$A zMd>d9c^2|ngCB^u#szwEU*B5e0dKs|S9l4BM|-0s<})|EyU+ieH)!sBi~Uo7Ts%3; zukG&h`9A1K)mwd{cZ1b=dKvG{?76d7h|f)b04x8T{o6Fy{QXeAOUVs`zn{-k=mMGk@HA2l5xj_=oo;r?CuLLor#)X6J}w3zN$7hU( zhVo|5&W{g=@+aUQ{Au|9Q{N~2`|a~rOTOWK?6mxVXZt>04(&-^QyFNKlBctieb=uZ-_?yV_) z0H4ys@+m#P^ZeDIFKv(ZJZP=i`Gb9d{~YpHL*J8llHZ?-4+nhO9?Pfgc`xm2 z_C6!cr$_m{kEZ0jZS8S>*ZPF>JI2o$@!?>Pj>CLK`|+)(7xiI$V*C*w;=@5h3FG}7 z+z4ax&uZsNqo#(HHeM;$>%uhZa?Sbf-?_f!k(DOUbUk!Vh(lZhN zGhd4KNc1el_pOBM3lsU$C*wJac>H7fhIm|s_-?;>{_565{_3`SZ-^#vKZyJl6Zxxg z9Y_*5h(G7~MOFW)e{i0G^S(CUS1P`N;tkBX{2=%v>O*`te?d@n= z`7#89-}OhWNWadTw%z+!e1h`{i!N*Z`SC|{op4_b{VLpVLw=0k zGn9!&dC33ckC6V*$8Xbpb`{^J`K%X!PxD#V=6-GomgmosSeAYGP6NY_r z`K&9|ANumjXPxG|=KXecfq46#`K)U-J|N0+5Gnpw7~dhi-)8rR$^ACj0~qre;~~6B z?qdDr&nN4w*}>#DgnJc#@%+Mi5Z?nI;;XF@w*5QG=Z{|n(e~9r9UMR=8l5JyCqO$_5gSUPZi) zv*Ww{$!4S<;}7q%m-IdP!TD^ga54v)|7}0v&I7TZ03P}Xa|3yX_->mmt-tua;=ADi zJ@Nj0@S7z5ANq5C{5U(W>_2$L`6Bp*_CYS&$NmTRnas}`>?!DjeN;kj7yk$QJ1%Dv zdeZn@{QfTYa|!;-B)-$vtbdCE$&STe)n8sCzy9X=tX(`E%1`{?jQj_GI9|i~*{{ei zrV}3e^TH(F9{O+cKwy>(=Ua|{qwN^qlk&Km^dJ0#d@&ss>I01aQ=UP-tPk>5K5NGF z0^ZB>G>M-8{vo~_^x?P3XYKP1yYqzKIKEpF|4-v(E5^q`e{r4z{tORx-FQDEz8ml- zNqje!PtH58Pn_pGOY|A_PuWAnI|%y1_D7~K+ipEW{e-?2^k*Wz+XeZoi~2)-ImFO| zj9mPmUX(x9KNufTzaTG+pPtD(+<&|A41Lo5_ht_T@Vm|RSEYAnH|B#Te0{5j{@Hm7 z_36%bnF^TYDj(vrDc?2l{TuaOL4Q*IL0*IJ?s9o{u*?1SbCkckY4lLv^u6Mj!5#rd zdCKn)AE>`L&$9ktevRidJs{(L?E3b^dC)fX13%CUeMfw>;(q&r_-N$!=lb6Goqsv0 zf7{!^9+CdLTf5kgM*6=uML+tR(l2%=^!xGQ=tX{R+3P^R;@|zDv)5UEL43M`JyU$Q z@*4T!?Iray*f-GM8eh5u&WHP7y5Q_Ff%1n_*F2Ldj>8_L%-LAmuS;_DV?ce70`&>yxpWi^{vn=8Ro6`p4|}8Kc7yirMU(Lt zl&>$zm#uusNxtli3K;88;xDNF-0~~C@%Ilj{(=oz_45_~EGW{D3Ybh^QT80vbo>!p zpX$CJ`dElBBG~fzSiHUHkD&Mqz}G&+=BadoMM2NSAF+5Qml%Hp`=vMFZ@~rg4&lE< zyrB;%O;`RZ_sh9$zofr^02uDTD4|XCx67qM{1U_~Mtl(`Z|XDkmx{z6;1^E-*OT}p zd+;x)Dc-n?U&4HT7T^7Zm zdO>`Z`QwxEnVmm=P@eI=%^so_$n)>}`1wuqT~3xiC;rLb+4!5qKgqCv9}Z>DXYo(q zzch<~0&^ACpEr+MoGvWBAnZ~92>I0Gc~7M}=4#6o5Z>iy^!D>;8xxYNkL!b@b%UM# zh5r;!`-b5>@`&kBTzSj~@Ygaply3ue<Cm3P-<-uiDV`_I;-BC=a_G-fs4v=oMt`bV{F4d(aD3YN62=?&1%FGzI4{6> z8$6iBKY0j-z~&e~X7Nv;FSGb3v-l^r-T!v-l@G-{$#qyP*ZF z*0&Dk0y@CmtwzD0>C@zX3g-)x`5mVGnJSy~hr;^U&L4Qce5m>{U)lNj1^zgV34ff1 z7Wjd_`eu>7$H{s4>(F=O!hD>(AW!_kX7Nux^YX*`YZm`x7XO6z4YT+suy3$8h^Nkz z@MGCKz=$WWc-gb~C!Rmc%hZC zfd-{_y_r!yfQnVo5erL@>KumuXc9;{Td$hoxOP*{y0PGFVF96KLq?q;txiB zO#Q(I`~!a1WY6+vgMxle?#KG0!GHro;}`OQf6Ao+z7GB=@F$w^|Jug|9^U)^XL~OP z|CNV0QSA}+I378G?f&Ho;ADK8|5qGuu-9>Xd;hO}J>U=fyX*ZG zjqm)n?8D&JKD&Rg@AmWk8-1(KkJo3(cqKhI_V)|>&$p)bpZFdk2>x4YKJkB=q-Wpg zq5Yt}x~2LP|EK*6Yi-W&dk@|ndyp@8?ZlGf9$@htucI1&8crt^FL z!;XJa`;gDWP7?p3#Ggq1MHc@={zmdI!u6{6N0NV05)b8b`WGedb555_$Y;t1@Vo8( ziOjv3@GtOw=tERjh>v3Ve`7qBBwmW>N#doPBLB#5884;&Xv5`a1Br^~vbMnkC-Y~z z$M`85vZp7Y7yeJP_$jmaDVRR6UY*5H;eIavH1UUj&i6x0weeI&l; z`?8)K@@)M@|LR%%6dEA#1N<@nmSO6zun8pA_jpex{49P7*YmUZDOja&Jbh5zj(g-iHEVjosM5Ii=U$R z@$?Uv#ZN)}+*$mTbo|WXr&KB!8QH;Kylm>c`CsAw#>ZFrmhmQL@l&`yk$(y9!)Eax z#EfJ8d%-_GvN((V&Ev0_|APDn^5x1Kk9On_6!Wdv`x&2a<&8%b;g#>@zaHTa)8c)3fsrTR+nw?Y^tkmBz*w*U|S4PpFnUeq7$#ds`& zd+U2b;-jzOuQtjydidD;BlA7eJH9@gM;?B-zIzGId3$rDe=l470OG5hWDi@|FMwY+ zk17VF@KkF0U>cQU@^2!J6g0tJ8cP~F| zR0bgU+RGB+*W4*pS_}KVtm?j3d;cW12l{H|efUp%Nv4O->t*;;J13zlzUe-f(K0Z+XxMJ~peSFc!Rr7Bl40?L`#-7THezv#1(Pw)V zwTJur^-TOebN#rd__@cwWP|ugJ{xU|cU4mTl<#NwPstblbp27U6XI8z{MLGj{PKKF z2{`q@#{ADNB+0>E=PHze$ZD- z^>0$XMgH{i)gJhPpRcd8Dtd!FJDKpWkY_O-^6G-UKLpd!AIkF+{AVIh=6A&pe>8b6 z^%8kbkH_6ztf#LbJ~Zvg8S+Z>S@LSI=0skX8bMwfkC&`JVLUEgLe2QycH?njQTlA- zk^F4DQeLCPUsU>#@LTd)?P)x){?jXv?}GW?JASqnb{knOmDjG}NguvHY&2FA`BpIg zcFiFU^=Z5>ntqwQTKVHfVf^Kf7tMk09Qh&s^fFPB$7wU4w6BPty@LthSa_V5^hdib zYad(|oWwV!fAA<<0Y8t=x3AqU^?C?ojCg0|CTD81e~8ytL-cP>#j=M_XulccGu%zy11N>wT=B{eD2}eb^7h2QNYRTy$$bEM=eXu_wn`PyTc~cR>H(+B%lyz*ooe z7S6Q51e}kO`00R|e?jp8sf5}uv@YGYUAMq_UEg6c<)6detfTWbvdtLzX44& z81!QO4q!Nxep>o*9Rvz_D$74(K zTfV@4V$2US(0g0+Rc~CQ`862dn|`7G5RXmiW>UP->+@BO%Oo(x8$W%%t1+-eJeD)8 z-`)@Q#tPz^eh+ma-ur7aK7FBnfW*U}2mAuP=C2&`T{2uh@WFrC&IdS8y}`GL-S{r{xH6vgH2y( z8O^_$?-wh@e2v-<`ubphz;Bd4N&H(`{^ScA|J?6Zly3n`R@pP|k6-fpu>PX;;m^i9 z>N9-~`7Mk7)$9*PVC>H#xKW_r^gsA#vupv+ z5Bf^j$77>D4i8Ekmc~!h_w(~xqTmVm2YqEeum}7w;op5ll17t`f2WE5LVmZw5BnMa z$e)M$17Ng2sL&n^540Wwe1`mhXZx71*uKgq?dKQem%O0;4#!uL&v6rwr_b~U__Te> zJ8i!ygBAM!I2nJh)Bnq~XTyUdXqEJLEE=? zy{33InQ!k-$Ie4N{B9cwukiEm{q?%eR|%8;TPs*^WP$#{ceOw`SatOe{#fbk4%R&U zBa5HA;o$z|2IYzMQ%U@!y*gxj#rYBX;`3WdJkj4Xl^68`FbNWW2*25=74+rl#q-H0 z`rhV~ey7I#laW5y^Fxm}droyq{IjKAk-l~*>Ko|+jGp2L{t@ohN(K5pP1a+O|5@{0 zfxb$!S)dR5r9;&Z`onxf`uuz+^^El8%TZrR58yC92-lm-1^OOeJRcS4yHQ>#&5I{Q2<|J+Ox3hOYnZI&f_hHH(^JjY~U(VFNt8XyctkVEn{+TN%KUd;>lCyoB z|1zHpS3Gv!xQBd}`Q|$ERc4H@^Cj*_KU$b0jQh|<<*)c?d9wpp@psU_!;ji?gmM3g zd~8je-)>aGeHit7v{noJ0{;u|(Q&?Af3z0+kMj-Ym$C05pBH<8Ywml%xS!B{>Mubs zf2a@nW6GKzX{OcQd(?!V=v`($ozNc6d&G`N|Khgh^FXhi7khefp1e$bap%p**MsXh z!0!AT@Y*6_m){HUI?t0VUlHxq`%p`@Xyb$DNfZ9xNRL`P-{SW=-*W!nSCj8Q{s`x1 zJ|8B=Lm_`A^n>S7L$Ffg5&i%8qoq8Q|I=l_xQ+MuGL=uW0ld$jseG5!AK+_^A;g>7 zA1^Ku&RHeKL+>Y5JD*hkOy<`tmw)KXzjy-t;-ZaT=GSC>3k~A!`)0}ID;j37!(6X0 ze;@S8V4UY+{YUl;ll+++?tn30Dt{(x$NJ9g-}5?|<6!a|%6%1oQUA>z<9pyk{IUjN zJ0C!K-nSLc1N3qDp8R}Ae}ep2|97V92fb&JN@I)Lw~f2jZFQAgy& z_-IVGmEw!}8wR7cD$sbr_tmkjPW|~a@@c68jc4ICdc%3_ITA_xUmFZC?1DpC1!guMdzP&kuW05s;|$ zLk;t}<_F48{NIZFM|qCNaQ^ly%3~W94t;zCzTb`gM}Gm^eQL;$SvUVFFOM|;C-~PJ zGd**mJ||!7C#^oJ6VBUweoV19IUi2s0ed!hkLe(j_ypevgbI9rK_BoZIR8R@JpWSp z&!Gl}ues&P^W6oQ9emp-*^au0~zPn9#!GmqC%AbS(cejil zmPdR28#~4L-fyA5_#n|Ql8gOOke|W)p7!kej*}ngb+G&s{rdmb&JOk$f9UKT@=KaO zrJsM#(!bAr`=EbX|8{n0z`Xt;-zW2L!k&U2%KLj$^iw?&`X}};*Eb{oMgJxT9Dc4p zw-o<8p?@+yxeDVSvp;9)C;1WcPbT^a{

(8=ySI2Sfh6y402JBR^(qytY=vm+M^6 z`1u3+yrKCoZv(fzFEbK?J1mVNuz zoU#9f>^b*aIKYI_#{awhnN}EtKj;3YDOD2wl>3{qE&$cvKf3~a)%o+FJmv$$<0{z+ zH2C}C9^kd6t!M~80jzxCC=3|)-IMVc{e6FoFJ}3S7`ZjavBjK-};C_2c6O#OY)gSxZ{D0~1TgpqQ zN-*y8=dhmA_pQF|e~&KFetco$hyJ@8vL|6Y9qn*A*AN+qW^#Ef(3w{%>Z4h1vFj|W7+T_oBCrPkxCO`NMdGKW|I( zImfr)-S(c4fWOB4Jjzzl(y`X(?)#!YV&V@B`un54%eRW}1+Q9uN!BlTzaHure0ibo z+Y`LIto^C8H)wx;U+v>O^SS70To&x?{|GyM!FC>>!@Gx~_VzEw>RbKeegyREehZ}F z6Y0PBjmhymf7R+3t@FnPKfb?tl=wr+Uv8C@T6^GMYhivhtDKmBGVDQX0o(0r3S&K9 z3o!iQ=3%?=YxSJJT-%K|z!;A;WN#Kfh=>07^1YVIbNoEt%X@imMf-cgm@j+zZuGar z_&aRO?{2M3uhy7RJALPS$(y#JY)%r#ZF@4M1E!aZ* z{C=>#8~r6I4-B4ne0kWb!w;eFkjKus{)-d-ys3Yv{n=UiT5ce#jI4MA06?KN$9_iVCka&z0|flpot0^$PpR zn@4*QFZn7nb|T-MdQqN>^$B^`vsya-R!l#Ge0SXYkDeQUTT$K`e=9lbU2Ti@C%$T#7?~E9Co$=fC&|9VR(`{EfEk z?M{GKEPpm(l-K^kf3NXa*`T~O&Xrdp4}4!8n|5me%04QxL?Ry?FBj}1?!$`q5f9W- z`S|rm&)LTXvqzMd#^2V2eXKR5zrjAvEfnRmuocf|DW7Id{15Wknv=h9kk87(rGk8# zaePP~TX8%HUWxBH08fs;{*?c4mFKg^Zhe6B*%s})hcP~P;lS?T=NzvU+Lr`h+u{Xu zSYM3hR=5H2FwUQAxcO9?yOdKvJp9wh0!6!qhLSN_9s{ko)h7w)_k>(#k! zUa$Ln^!+1kPY!&0I6rKO-`uaF|L}h|`$t&x&$V?u@9=1EljU9h57fW3$@S>2(aZKW zS>DkP`YKiWN5}Ktr6s~~y}Zr-1)YDgu_aUGc=gAq@>#y=3 zCcUqfpI!a`FDt+FWWMQ1_#FS^&%x4*f9__E1*Jn5e=$|U`wz$Whj#xN^S`J2i|Ksu zwGUb4iLKYLKk)v_%I}W(;f+UjKH&4i`~8gKGyexHEk1Mp<03=xnce*- zVda0XWEGEB{`bDw8`(GRUvhhYsQHKcE2r-#w=w>h|9!I(_BU9Ma=$|Tj_(_574cH# zl@DI`Nw&YCzpp#r-#`kC#k9oR(!F*9G!zbR?XYUag_6V~+y+>QH z{T$yjLI3r>&9A4xpPNU?{s;W2;dUAQHTqOvk{?j@bs9acZ@B)sTWS^8D|TK`-0wHy ze*V<<6Ne=>0RBoIyXNne;g2Q{w?8+3nrV4pJVPGKzl}Uuq{kM1uQ z`33(3zV{V>!+J8|7wkFS11^X4qU5=3^0fD4crVyovJ(Qx2lx9okBa$`3i5?~dU?bA z$0*NVas9c}4EsIE6Z?h6_ABH8e}Q;jgZFYA567PWsGs+>FwnZ0U zr+m8hJ=Xu7d9K$x0iK@+f3o$^Uh+N=^@G22#r6Hg?*o(iLEkIh2de&(#e)dr=^xhm z{`bB9F1)9jz5m1b;zi$ZD0?t_{|A1|-v42GIB@d=_D{S$1MKYI?ET;D{h#!~`yb#Qvvw3Ou{tw)kz5l}~!TrYU{om~U-w!k%@XrhQ1I*t4O};PWbxZQTaQ6Oh z1*$T8|L5X=&EEfIzr+Ql&V#UC%Rhko&g}hP;K%I!-|YRL<_~|~K70R{9nxGle=3ZR z+51271N&92$7k>V%>RP?ze(*m1Ym@hbh?nr{_k;AW-KV>Zzb}S=?8WZ~_u(&_z8@rEL-$RfBbLuA?dLHs68Vr11)B6F!{|NMba{T?i^}moW zM*jCTiZ;o=WBD{SKK8kPw($}3?<__Ae|lE_9jJ1T3T^wv$^1Jvj~3*f80UK(KHCC7 z^S|A8Ye4|G5Wh#`CyBrF{0j7CDcAcG>ibvs5MR*cw`F`f<>Q&Nx57SN-4VkfIDdP~ z$5SG;KE4g|<6yWI{PBSwT0S1pr)UaBFW*lo{=38DejfWvk&`h$58hMdx!D`P-mM}Y ziGE}N>SKJG;I9do@2?*2u>|<1^LWHlNY3N&zKHaGq4CD=F`m-z5wDW%2R!0e?Qr}4 zaQ{CtUfNVXnmO1njW_1!fxKCs4Je-v#%qpv5utwUuZgF6zx5WBj z|8m4*O~21ah*yd4JwN(KdVY1F{wN<%qXbiV9PG)_GRb%NE3hv)<82$B{Q5Y{KI4NW z$FD$t$OokAzHt0P|0m*89xbsss~`2>n+%x$u2;Y_G?>e)f+TuH1bOe&BsH z&7;)^{`HUe913vo-`8`jVR%1vw1)nL?{S{PbW3UZHv4boU4L8ri|cQ1tZ)u!@3ziA zdWhc%q`_#vH@5ik!sC1BtM%8%-yd82`2=tD`uP20h3mBX?0Y^>v-p*Od*htcq<9?v z5s&i~;a7OwnBqx0@IK#<#k)`1M}5R2o)hQ?z4)Fm{D)9q{{iFw1NP_fr>&wsy@&UM z2_D}=JNUi3@4MSI`u+FcOz6XnO{3rAzw8$4zYOw|`tvlNBI)Zijeh_A(~0p}*|Pq4 z{HIBLc=R`aoQya1ch%_ky!34i&&4Ai-}AYk(JwspwSebwHSo|k z;i<0$JfEKfpXzG?kNSv*z6X6JZ22(;52%mB17+z!{h)`|3V6g51^o7pfyE!x-@g(b zxsrG?S+vj*l+uxEwkL@S+>UG+GkM?YM(Dv;c*{`$E{_ z4+4h162|yJe1YEeObKH(ztR3(Z_=Jx{8_+*Tb5rYqgN;3{*6{KKMC~XP#+lmMjuQ@ zh!?BsW4?%~_AH!bX#Z9#E5?hx(EdXIBR_h%AAh$sen1cOlX$atz5TAUPXQ18A>Qm= zFV8xgzW*41_#W~l-sb0Cd)3(&`=0#&46H#Jv(Nj@hE`xX@Xx_NizXZ5_4e9Z>x!31 zJmdrZb3(w+!FM*?_;K*NyH1}Se1F^N7hu-^W>^0IUCIab4Z4;u*XQejJgs`)-kqIX z@gh9@?ke*&$9Ti{d&=LNdHQc@<~xx5S^sw#{xCc6`F;j}%=qou!LD!rN6j8##T!QX z{^f4B5T6(J><|USFSDndKcO$sSI#GwC7!4+sl2G4fNh0~{u6#v_5Yz?pXE<4w)da- z_95SXXKSlKAB2DC@kSr#XV8c7L;5-rf26P7i26W!0JB3upSxR)f^+zjKKR?3xM1QnR z&ftgfs{BGNJHH?v@gVDu+AdxUV4P14MmsAspo(Xr{6fqHgP*H!FuK&Dy_VMp;&%>4 zdu`577Vq#L;9Ygi;-xUZP{#PomOn`Og_idSD}T^^oUeY=-owf2)eiGlK;I5OTE9fN z8}bYF8i7Bw-=n_8qh@~~pZcR-)RvvM_xo2@gQ}c?BXxhA-@tB9Lf(y%%zjWgBdNZYJTJTP4Q^97>_2D?~m5o zH0a@cW7O?Po-D8Sm)FQ&j~|S90!H-_9_P!eqDN7cpJP58tsp>9hC=|4FUkJLc$QrR z6bkXx@%^&mPq}>b;P2`x_;VclPeL4w`jMZcTjbC3a?&5Os3iUf;4{+^9yC!4}JL;Pw@U=b(Q^r ze!BcZ%{KA&J@X4KE@C_mvmAdB|7AK1w)0e;W(<@3LaYoh=3nI(qPfF*kyZful-EhA zgUN5q-&gS${LRnbh7b9LRNl@bP(FWb=h?o#v*wd`e0{9{J5%+8UbG+TZxc@HZ@=<< z1Lw^k2;WmJQUA@OwU=zppO5#CO6YKkFUA*xeNzSEF#9u+Zyfeb70A8;Uma_7S_6W~ zPuHMev%da(Gaq9;g`fBXJmNiKed_3;{EjXmJeY$aj~ea^WZybH*lXq&Lfvnkg!!U{ z^C;?q@)4js^gomSb3bOV;?KtXLc6hj;PcU5? zJ@H5Rg|Z$JUYh;BQ_}hIAqjH&QhT%?>4&{VJgHpYQ$CzO$9+zJSdZPNyqM3%?f0<1 z0Uj`lgCG99akSFdXZ8GE`GvS#1m2(LeUq$b(4XG;GS?qz{WM3Y&-SDJ2YsvGe}CGK z`~ZK@{$(qV{TRkmf3#16kJW$Bhj?0-NRNxZh5aU%GYLJHI>S0{^)_IzfGCLhHHyXppmhKYyTq4GKi?S69Hl z4jLHx^G?bKu;ZV}hsEuD4*QM4-4(#ec@);KL)I7V2k<924+B5BKT-MQJmSuZTMu5- z_|3me_y>7R+7sx{V1hotq|f%p{Tus6ujlu({m5_dYmy(PZ>~R*_sTxx;rOBXbBFLg z#|P-?zt^$$eEq5Z;rqee0oXe{=sSCKMmz}M$q(ow`Ei5#nDFEJfPcWx@#EPb(tid& zAkP69e4Oeh2sHhqeqjEb$S<_7`SINRLeu)wE0G?rKPypRu-+?-Z}cDh1iL5?^v}i< z$pZ}eWB%z~-!HEJulH9VFx%gAeBIh-_Yd~nejWU=`aHj%CF2$Sqr40Ik+-t!ko86V zqxi7D^ZZuxaeY5U&%V(^`viJ=w^V=Jzo+_irabo9fn&+zw2Ay7$@$l>OrO}_zSF1P zoqlouduP8x24uOlKlpB+#Si+PA9weSpPnCI7WKP-x8E=D1LN_K^+kT*`;;FO`|;}& z^Z*t;fgh?r;YT8mGx^b%K+V3e|D8*$&yT0pcyF%+So0a=3I7Jl3qSN<`Gt0Oi}=zC zVTb4Zw^sr5Fyt3nT+ww7zsG$o?zdO87Bd*_ud)Bw#G@9yH)wWPpVrUMN#AOR-)sFW zd|UAu-Fg}Ch1WYI(8XIs{-6!P8&)6Su3*J$e9;8FDLCYd``KiEp{*^p@B2TUU&zPn zL?P|Z*fxGxbX$I*Jm!Zz<^E=?tMfbdSNVlDU49*u*Z#-ln-xFqp?ov94I#?EcmjBJ z(^fQq5l=5488`jze)15^7|V# zqu=AdH2=J?Ke;?-^n3iXBtJaA?=T%5+Vl9Q=FbzMaY!&(X=B z(U0~`zr%e!^9M!xxSlB9*YkKk;_*HDlVN>s`qUdQVSE+vHXg!#J?H-d-p0fE_w^jF zs2}=o`fK_h?&~=|hX*akFVJK9fBtB-9Wc+62UqoiadlAXn z5-@1~fCQ?5g)&HRA?Yy9LV<^tP6>i&NT=Zh6GDoVPyzNydsr59>sx_XEaq6?O_!-e zEg=+yMnhegLm$-%q9809gTi95GdJ${nfc#~$YfTxTCHjAi$fJH-JiR;nYp>SySdpZ z=x6Wi`F%jY2PFzTeujQ$fjonM%liY;=aJM0Js}^{67@rnpLCUfC};oneuDLb{^K$y zlC?m7icfev|$WsNVvAg>S9& zkMgj8)(`uyQXvHW2lM@c{uff;(El#=quC##{`4mG>Fls+=hOQu_@F$*hxJ2zmh#0u z3-rqc_#6fJoUE~dTHDK zR`sDeWUBmdeHZ&AxR3v>2#k|2=%a;OuQK})$fN2Tmb?3SNGM@<9}jsXjQJt=@kP17 zmGzkV?M7)J`Yp!#40!q;({V?KMebJsr#Fkm%zgy?J|a22z5{-k5^jGc%?JCl<^GD= zAMFA5|DLoT>_>w2@*MtPypV5Ck7QCd{#0mB=>d6Txmo{v6$#*O_8ZyrD%4LVC3P3$ z`1O16li!>B_;^hcz}fu@>Hi5xBje}yz3Q&8i{HNj{oWOIcUU(2y(F&fi2Sh};?0vkj&&$hX==n+c04(@gd3m?Qe7s&>UR=JOd~Ev%ztVoWoWT#X z<;a$+J%GOuKYJf9;^Xn_b-X+gKfoN2n@>CCP6oeUT)y69@GDg+8T=qTN7DYnp66pe z6F`6&3-nw%h> zp3;7R@8#>+LcRw-iO=^}_)B{%z-Q@vCtmaVK=4`X>-q+LLAWO45Ab>WdY8g}&(P;b zEBHVkhWISsuQ$Cu5`3DnA@uaAH_?88Pp}?>Kg;J0>3prO<0*(fmiu!W<0Lxgug&j; zKM`KNKkrBYeX-wB&iim_kCPPVC%B)jsXc`6<^CMkfhh9lF+o0OoM+(vwMYFa_nC4Y zQ}yRJXb5cI$r2W_Izr+R1!P7Z2_(qXrxs>-( zRmq>`^A9$;{@D8DUk?JVu;z2wz=4Y`NBg_GV6}xo&t{YDn|$H8e=bUY=Kcn7N%}MA zfq-kftS|4gUWvTmfe(IqUV;8g)lvpF^VyWMeJU_No>Gah84ubo*9eP$1o*@K?Ti=j zW__bQ$Nffl9>xB`^C-$kc)zU<9MA_?e}4WE(zn?AZOGUE`4hpG2_WTsiTmdnSTFs7 z|3CkzwCU>0eRdh&oBQl{SzbrEz29!(zPeKV0lub^iST}Ca#Af3mNT)}&pXLaD;4dL z_t*DOUMl~kt^c}RNPb#X`nmhAex~P%w!LM2LwbH-+hhCR zT4^8nhV5?=4%**Z+D}Sj4!`sm{NeXzzjW((L$;s(ep}Lc2IblJVL0!W1|&Zc@Vj-q zm-!y&**+gA%F8DtFMjNRPtPmgdcFwyqko{w`q=+qzCqs-;e}lvpdYuW58eCKn>1%p-<}7C?|JzBUGC?C z^3MbR(qFUv0TB+`FK&`v?!M$F7`6z6>bLj_Y6tSLa{v9aVkFhcKcWAwr$6W;=Z7FA zx|Do-$;k)}&g^+1_jg1`$FKYtp|YNLiM$LuZapAB?grxr{lH%=hne;Ovpw+B@t5s^ z-(O<<59xo7a!bFnp_fn4Z+f5G-Pxh!(?$QZ_hv-j?}t!+it{V9$NS{_JZ=om6FwYd z&mX@G=ns6)(`O&s_R(MbdEjp=;~xx&uvyP}o&);K4o5~medqlZ{7_E(?ENe22l(As zI?s6-$n!b)VLs7g-S!&wsiS%dXC!`*{`E6B&)UC4)#H{h4v9A^E?!M`K+=jAH_fc=E!-#}sZ zM}2l&y~O7(CH)Me|Ip->_5J@f0$LH<!jz{Y+C+!t8vXY~J_Ut%`@cdp2PCHCjX zC#6z|FYmJxTmCio7v_C=2c63PqQncVVE@!t+mhvH+;0?3JTs2xD&SJd-EY6XM|$}E z>#v>z|6;Kv!V4^9KT#?XHu)i8f49nn%SK;tebXCmsgL!jRHEr9`VI6$K6amv75rlS z1qzhV*Ti2SeGQCqo6ivVf{AL3W-($Uj;5fk$IdK+H!4{#oEY5bBd;{!x3-OoZ_)S8?RZ3j1- zF<^OriTQx_xFO?r^J%T#MY**PkZ;%LJH~wa|9kwB<@xkag)fMI@~<}k^7tnaHng*u z$afz91pZ5T{1ccH*l$CtW4+1apYSvi^l<)FaV17*V_$M&G^Y|xu{1faCalV$vKjF*BBD{ai{UY#VMzsvFGymC)s!~IFC<39QdPIf4@BbiPJyuALM(j1g4LdpFIAFIO`h! z>gD|t;t5S4Pz!S(md8Jtr=$nS0U_dfT^|1=`q=t|;k`l!@AWyo{#4_iRG!ijJ`Mbf z)St-v7ioSN?_Y%7M8>};h=(HnMS(xjSqu46O?;H-w(%z_n0Ri0|K|BQ@liD1y^qHd z#7lX>cq~D@6!_o$zVT9OkGFk%8j0t!xy=_D62Im3J;qPjmbWD0zw{jM3-b6Wpw7&_ zH_GFu@a0nIKS1|iocHJPQ+OZB_m@Hso_7O|^Y|%HptNVreb7lBKSlkMO+1M_ev0?E zx#0Z&5d_HE`_nvrihmCR{gTH|3F0||9@sxa-*7$3eRp1OFudHj?-eo810dHfVU=ReKkrsPh5#sSR=FxV7kF7s4<7Iwe z+rxR?(bKIF;(cEqSJ~e1=X-mp1|Px<$_kFF)8jP5_sU#FfX ztpDa8-c+N%LA=LeDh`Mef9C-FilqobN?>Ihc)TRhnb^T2N@oVlBN}c*7iOS}C z>FpNYheBn*6#czZ1M1%LLpwVkrRlJtjoqFa&B+xgG2asKeW`TrVA z{M^NF^e@VG`|4J@NddeDt&Y zJ@EYjU&dpV?+yJb#h#ZR;rH0k>o(+v^zV%iJipOC@T-LKx6I!xeUf-1k@)tGpZyKN z*YUF-N%>p&nT?0MQI~MP5r~fQke?r0`YS(~-xWXnQTf?S0)DQ}$I%EInCplSO?`3> zzXEv{el^%~z_0y=%YE- z`PphFcK+Wjl#Uwh?#Ry92UzbKoed;>w)zwNYx#Wiz&C!@X24b*ufUy1@VSVe$Beh? z*W+})xk-mI!{2k_m$zDB{Bgun&ct&}=e-_OsD3Z}uC*Be$IC;p&ww``?l66o!iZmv zc!WdJ*Th#uytTC+5j61_8IPRtE)i5m<%#hPS#I(V;{KyEBtJC2I@W_@jSnjQz2f@N z+7$lL|5))`6Z^)&CLX+jfzN>TeSXB+fb|VNz^}suaG){TL;Jv|%KAaPIEhbR)Amrm zT4jC72POR%ODZoGpHf-yar32Y;yr5nj2GwR3*XZ|E1^A+H-Z1O<1H@l7yj7x|E=SS z*FS&LF8v3@69nrDZ^$I%7kKBf#$%NI0qk#U?~DBp``PXX#Qul=koe&EUV@@(+mo5dPvG z@@=yaKfX7+){2>b8ceB%|{~yu5 z_`{&P0G>xG|94Ri`Hkm6z9Q&vuJ517f{XFP9*duJGw1inch2E_CitNL)%ff(JXenR zuw8y2`2?Wv<9U!j1K(He_?dqa{9=8MKgc)G<#;9E0M@Unr7!CXePkzjDENHA`2B?# ze_063`F#f+X=Ka>KP2i*T)d$(}q@m%AFN}KY%$#-b=&#}pmNca`oZ{r^e#UJ!bk6Q{G{gxg} zI?|c<*Jd=?us*Flz^=YTyVd$;{Mme$Wv*Q4@3zQCa>5S{41ADplIm8=@jc`X;*R!h z{1eIdmnm0#T|BmRA{c(p<;(kPouD*ZZv444etdG$;DEyKxtyv08}LE=a}F>p2gq{g zug&EVu$ey(m-1Df1^JH1pZiP1R6M(f+0R5vhIn)A67^m1pOlO@AMcAIGB)SWqAMBqx+tUykop{#|}cww)=b z`49hZ$eVmmdCioQx+r(~Ss|~PayE!^$UFFl{#4#weoM;Z?66?yg>semi}PEe{we4O zd1ZY5hx{<0-vhCf(B*ZZe|#3)KY)L?K@a^Y{Yalj@&~2Qv_yR{I~4oteEy-F{o8nY ztRM6rUzXqSlK%UgUqL=c7~?aGpUMx)!~V(du>X$9k7dUHESP`aWc)IJXNT)ne!sQS zKlmN?Pk9Ra-x7uB=#TT6H^iT_{e7Gd ziV5K6H_F5QSU>FVE%|UMztn$0d$b3{{q(i~W||D7M9-h!sYDOQLgFqZ)vHm)uYjSB>Rt{n2g za^w@C{+tx`Je=d#@4-)&oA)(w&&*#fm-VQ?_Kg1Sby_emG=Ie_%#U|#o_@eTiQ7#v z|JXj^8$~l84SdJW2Lpd-<<-CsHcV7`z-)hKjh4U5XE{~!9`;Or$>c_7od`s>zdJq6 zJ!Rn!&HHW(KiH|uc^qN3|K66gFZoS@@ASLOpB^1sL}aC)$m zPZ0X*i1=jJN63rKZ^?-U`JsIQdCTzAz-sXVKf>=WC zWXn~bQ(S-_<`37izKD;kaX<>z#mYFkdr zTm4nrT*CMA^=u(6f}g|(>&uaamA}wWAwIHv1^7f}Ju~wm7W)mK7`WDFJUp*YL;L(3 z`J=s_q0f(2@PR%I@ey?t;8XDWN%>jN^0T-^pWr+|@R7meKcEN3pCbQ?o_L@Y&`0uH z2InW~e5ZpcfuEMI=C>?M{uf(b!~S2|qgKQIANeyk<$jCbOMc6`p0~1G^0gHF`N|9W zUvEW46iL3Aza8Vgw&C+%puOpQx6V&B|IAgu+p?a-Y@g@9AH};{$WIxyZGOvAgBoA+ zwTw|--)1@PLrs3m)-DpfUBmsS$#2=)CXD+}lizZutmn;aU+Bez-uM9q{cDeR{CIG_ zUBi2KH-4OF;C|SAe;ePA0k4_w0por`?o)pw4eI(oH~DO+rlr4nOo4Fc0rOgs0_)+e zf|xg^KJx8IzE8mq=L^WUQ|9+VpI5}c*OK*7>&G}hY0G-#`s=KdzP|mqi4!9S;I<~ zVBE&re3_C@vw?D(KeLT|nB_9#qsbYh#7A3i5{}s<=0oz+vYt;$eoW@qEEfOL)_?sR z7C{dy{Uv{&!Z^Q!{YUc5g8Z4BzJRe_O8!jF2H1CG z8papbC~XF&J_hlx;V&|N)yMcAhgH#?pRDK6c-eo*&4cz(2PCt(EqH zZ`gjHaM1qpekI#J&Id67d{6m9`?roaWqT+734n2aivfq_f&T)^kuOUckogF>e8S}$ zKQR!tuP}$1C;Ky0G=jC!9_G`}m}%`7B9q)K8B$JieSi$4u93@B=?8&|jht z`U&*)26k464{K1`9HM>Zek3HH0pr)l{;*Z3d;6QB7` z)#+2_$AmwOtPkv8@?#b&yXcP^C6fG@@lFx>F-xM4&&iMZc;NBJe8PI8_jlwE*T)k{ z-+UA{&{z56@?*Xwz6Izvy}yR~v%`Uz-{<7VMEPmp-_82LdBlfH=U-n2 z@&bOJm!I%s&>t+3@n-lX<6m3jfJ7ef{q!c&A%uxQ2A9F9WfB9Y%9dZ0#{z(1+e_@B;zuXvj{(%0VA14rgO1}j3%j~EA8S+Vn z8hbA*pP+x|WP2N&aPh&AA8!pS9)9TmtX=7xY;9%Bi)%D!tUO`;+m`ju<;N_o5ti~- z7|*=CMuSLTv45L&wr}iP*t?r`w&(pJ8g+he{4r2otMhxG-?PQ}ul6+BpU&F?8+#i0 zGdluzsJ~yg0e1!NT7CXY_=9EQqxE{>_?S=g>;LBSZPp&QjDHR~mHnByE~T0ADfc(l zUkC6R_cvQ`_~8%q``fE1FGjLoV12B|$$V4K!(=|)1KjV3c`WdAz+zA8_qgv~j{j%x z`+a;d&G#z)!7q6Kula;EKOfv3VTYvXMOYs|6PjrXR5#Hzq?Hs z_5!}5J~-Z^Ib_P||CkkZn3a-aTLI7QFxc@x;4SM7c^AY~M9pNAG6L{1cOCey`5Af&Rph6V;9R08EU&p#1 zv*Vkm-v5TYyBchCcIAi#rB^(I5I>n@ImS&wM3t<3M2OOMH)V zjvV;ud3+qNsehTZN8^Udr?HRaeiB{b6FCnNdgJoqB7fEL33}(x*z32B1Aj>Imy=Am zI%z<^PJL}7DxIo-GW0>Gj_r0igkhgo91MTBHFMsP7=O8*n9kxKhxu4R_GY0+2j!6e zBp#ddncLW(jpHOvx?<1E{JM(rk@vTJP5-#Hao`^d|F}jnwf=Dr=^xh{1^&hGk6WT2 z{2fI;EIps~-s-PPd2{}#;9uP9=ykOHzBlszk|^i-uv_o3A3S;rc?UoCF7#hq^5+fx zL($%?RR>@dWOmVflPGgo1P5 zbN_v0+Xp|>u(zPUcs_Q$eIb7*o`39bo@DmJw~oiYT=*+1XTaaRT9%*dwIzPn zqDnabx+-bYwedOlfU`an#+9JQ? zhv>(u*jJuE^?|;3Aiv0;c17s#_%o=Azp&#^x4xg@PutHA;m5$w2Z6i(JqO_B`IoNv z50`m9yD;k?&SyK+?-s`VEK#2*{EG8+1c&ypyfWYgv|m4~UEJq6cslQuY0va@eZ~1_ zhY1ng`2x;khb6#rK8yCJXrJZYU${A>{CIn|I^_AYwO7;QU1)G`zu^2(^G|cXit)q$ zUG*Pf!N1zeo?mx%Ie&~j3wwC9OZ#%9_yX?jvcAC|_?4NzdSU8gJSDzgSV(=rzuF^g z>mxtAoL77LluCLdZ1F2s<^0~*A8+*+M*IKP^Rr*z|NALOeEHAZM6sgq@bWKKYFPhW z|Nc+!t9<_T(7uQ6ddenFb^U!ndtdz*eEefQZ?XB5rP^ZQ@73*3*$_hvr={lWG4GnHrVA7npq=;Zr& zk6ga~Ck@ugQ+u{2zybtv6!LUDz!~Fx+ zt4a|*=(at54_F@)W_$7;tgibhz6XQ+%lk6B-vfOz`w!5kg4&$icZ zh<#7{<8GmowHMCYZ#VpY`b_QlqXOHr{1~;gef;s);r>|tSyuUh`3!!vejk2RoV|wq z?%4AO^v%5Ag#OIX3;qFoZ!7e5_FO=(7u@e|x17Bu{A?*d^*tWm>$RJ@LkB%^zkcg@ z_=eQ5_j4KkfDk{|uh9)^7#`a9tL+>ZEHID2+p ztY_cb^*5b8{)+soW&9(S{3~FeZb^IQeI5M`8Z;fe|3hEqzrEj^&YKPT--&&V_NVhk zBk-rd{(0HHF7kaRh6kYoe_&tt&L`&wRI%X#(IF6;aE zfJM2l-O=x1fA_^c?mM_z<^AoJ>o0hZC-f)(U@VgCNk$q($A^WGyovh>7$A%B0z_p|x?JDFdQhy49r{{BwaWA%s3 zyw`$!K>tz~tN$qAq?f4@Dfxw=?{)Nv=0Gs$*`TM*4{ayb44l9VBujcRX^7nW7 z`#WyPah`+yc>ezGU;F)?=G!>;{T=v`eSb%PTJq{t{oCNLmGRGoKTyUW_usKT?4pbO zZ@>O?_}d2lx7QdCrWASq+dqSU2+}vOOUlQq=YZ3||F&I7=ED;G*VUg^{BKKv|84et zAN+qa@An=oz28H;g15ilqkrv$?o7tts15(v%ir%!;4d4#-yKk8ARA9LeSa z>jUWjhkw5}&G_rAzTZ1odcQZVPf2f{8p-~6_5I!f7B`6(0(%(m^%|%T_z&@ZZ#ADt z{(diizeoSO{QVy0C-g`DelLH&7nM$fd`tQJJ+UX;dy4%1UUF_yYgqg&pGC{{JtN|F0?U@%;Ngjj!YL|7m_dpZ`zu`|0~W?kDiJ14znyz6tju zx?hp_AF$uwLA(-{`}}`RjW<9eK=OIin~blb`vd0xYajrhfnVYLeqL$Hd+Af`FJ3=K zc~RW!&HKCp^5-%C*7fx&-s^ncMtQp_`xDkjyfMV%C{~bv5B5FY2jTv7-f4>c!Sdg6 ze^6>8U&Hk-zehRp_qFk$^?FPg@j7VVhxyy^K0xBTNW5(E*Ke)SU%%n}@z>TEZ$RRg zi~ZfKD8g9p1#U@x9dDo4eEvSzLil!SC{k%pQHPC9p=Lw((Xn@q1+c2Jv@ZU8VfSBJZ4EU))1{ zK@(4v@dYIxPgUP46PEpbgy}t-i9BjP9ta+OE*}r8n{v&^gYuc?;}QHsJ1D+7f4v@y=X1zK4A3cx}A>lTEtI>GxW$_}X~=Cla1BEJu65*T(xl z!E5pC_h^sh#$WM7EZMHV7Xy?c&`fl(uW}@_uoCAJjhu{cL`p zrTi)ncCz_Z&I0)b|CZzDkv@-rkmdKZMERc`Zc`rtAFeMmTdw>=Is3Qy4yZpu{_ToF z@%#(s$2aM}&-pbw+_e4guJn)cuz&J9?0-)bmhV59f8V741M0Kcp}be+_`kK%Kgz@Y zSwHMwh=u=H`3>gxH|hT-^~3Biw&Q<)rGJ!%{Zn4U{@bE(egA>}_$K||rT&>6c5VOP zTj?L=VgH;DVgDPV(0%{Q@wl_|cSZj0Q{T@H<^3?}|NWKzQ6Bcs`4;xy;~xV02m0`w z@pHd$B*AG^pE&y@$p147$}vBouNH3oBK}`Xf2v|AYC#`WkN#<&amxm_L&L zr)}cx-j;ZZ$&EjS<|ZY=lL_VLG9{+{GJ`Lbs;jF#5rj zLw;Cp^8dt}0vg|1)Vf#6lx|&_3z^Nr41V`X{{&37{FV{1w-eUC- zot=!lK%X6sI4+Oth^51&=;y6n`Jui71e?MXO zMxVv=U5%$=&I8hiy=*?JAFbfa`>o{mkHu^PUm|P zk5$hz?*Sgm&@^5P^9MyE#5dD?K{)R{`l!WtXPPhQ0nTqf>g|mI_gy}rtr6iJmrp2Z zsQv=}&`;?+)%epK5BODkoJ0xmk@#{Fk1mc<3*)>B=hKX*h2L#_k9azWw<`1}d~QCW zb;h?@2)(60#uE21;y+6KBO5;v{40RKuKsk+*pDXuxO~69L4_^n7sxjc|3t>WarN;Y zq9^Ms?Mtb@)0g$c!L3cMFY^%3-Q)B=szU+T3)w-UsMo( zBG=!>4q;0_#HVZ#4)bX|B7#OA(*ERmdXP^@;%x@`ge1P^#rcE~?#t!}!hBfGCsgq9 zT0#Gxe}r)Mw!Y*O!g-|4A0+vN5DwnL6Xp|IU;hE{lQUHKJ55eDwg{7GnolUq7sPx* zQS_I#{_E#>@365^VqKR{sNG|^e$RYD>+8@bvna-2g#MyB6xQ=oo@UH~d_rstFxFqm zCq(lI_M+8CHDYRD*c;r>UgH;j{n2ulPe|(Pc?9am&-DD-ws+oo{DEzc?SE^fec+4! zUHd)4LHlplugJd+gz!DZ678?%6DnZ9VR<&58T6GjAQa|!md7oC%# zoFBjW?VBp~1Li|Sd3ogX3F&-r`GiKkf7FkmkDdKQeCCNB|4_mpua(EC#@A#2l2546 zO3}X1NAd|pNdftUB;VVe^YRHzJpRzP$d4Dx_v8=P)033z@1f{loS$R=WA-oZJp-zP z9`?L(-fc{zdwehXgt$DT+@9xs8JwSEJjuxc?UAs3vwza_)bwGh?c49qQcn-i2lyOl z|JaXVKBe;s5njmnfj{DR?GqoPFR|aGc@yCCGB~g2_jeDpJ=@>s2fja9?^7ZiwBOwW zf~v1Ep4EIp=-tW#%l|rpO^Of9e61jVo|7NYpZmX478`pholj%7@6ONXlLi?g@E2D> zzdkyc+4D~F2QbFR^Jt7;`33s&d=B?9)4N@K9-K$Oex0#BuRlGmN^aDSZ^$Fx>x`ZEK;-~xL^yWnIwe)^D@$?40mg#}rjY)Pta3k%4VY=Vv{CaP~;U6MRveNrzs_j{NzYOLp@d3Xx z`;+%q?0vEi_P(XJbnor`6?`U&5A8wftM{aRZ|{flbS^(89Kb^K#ToO31m|C0sXTGK zsgbATPMY0s+?n)AfGF1fr{7Ik{V=ukxI0mLT6%n*mGAU!nr7&M`FOjn3kqSdqzi|BheYR)kQ|DwnE&!JG4E#hqL-Gqhj9>BzjYiq> zLYJ^9=lX~30p!``6I$=ebq>FWe+TZfyJCwejQ%%8e0~0)cAxEu{rrmfZS?uQ*w5l0 z(vx_Q#$HDJ#;raPH1QpgFKAofZRIE69f2i2q~sIY71-tX`sH#yp}`;=-)cS~_Jtpe zY~7#5CO#-�$;jPd(%QhPjdW!}ukikjW><@39`hzb4iVAn9`t@WzhpPlZ0u0rz%k zAp~SbNjdm@Z09+0nGDR=xd{&G`~U!?}J?i#(aT2t1#C)iT~FpJ7>_UI` zJp80WeuU*v7c4jD=_e9?H7v*WlI2*h$nO(z*9yzAe6k$tiIlIAzhOCULs)L_=j$}b z!g7YsMLEuEfj{tFQyBX*w8#9`0$crl7UZ91`F&(p=J%!?-)G?- zZdMe3TmHGm|99mFRmIai2{IB8928w_tV&5bu+)vG{{i^pLEM__CCnv}kWy?jLxF7TN zK~I%8cVAEWn;q_3dk);1+4l5&8|A0LeJ|^uekSvW_Cs7GL-D(=_U*HPpXh(tzD55k z-yL(GbHer6<41eqlKXm{5AMF6_KyD^?dyDS_w~$2lqsjWg*|NLH|zg^?>S#l4*6I4 zRrz=K^_-ux!wo|()K~ex_`V+XPeDI>U(fFY`aLL7;PEr`I}7Cb9Qu(ykEFixds?D? zm>uq2%s-T~e|tZ{`a%D3nG?$Q|16j<-=zOO^-r)rf_9xrdn&KWkFbB%5Bslh!s2J; zCzy}lr2hlzw|~lgJ?n@4SE&$!{)72`LH`S>Z|HxQ`f+w>;$KJ8o7AVl{rUSV^gww? z57rOysZjypXXz2>mkaPYlKP6zIr}l-7xqVd!~T}?(T4i#oc^ZwsqX{-p6|o|aOHkH z?2q-s{;XHH9bYzID)pf{V52#k|2=%a;OuQK})=tIaK<{Qf~-!UG* zkblDNJ|6N&81qB!<4aVq_+dS!e!Ee`@jZTazw(}_yTcOqD=1HI7K@qv2>5-(`hGvc z=7Rm1G#~8GmisGdf42Ykilxl`(aTHqE9BeLBbk)AAmE4bC_Nx=EH~?augV1?EQfrs z+}ID3vYyv+{Q5ok$#SE=<2Ap(LpkaH2}pxFx*qi^dm|dYFiE; zX>YA7{{6&H+8gZ%|1JC=8H_D{I6s`eSC#fGe$(%Yd;DSDv_JieH5lOj{(btxdO~N%;h<>j&^B{I0a`_pdK5Ur#=^ z{R5x0UoL0x+g*`oiVN_w_wgb=9=~4M%MzFvJ|=|leBU-R)EA@AwkS}lX`_g3f$eG%fjbiNeV{CpIAYs@Ey zpW!!_vn)L?U(Xiuz3LD8e;rv^`3wCN;v>`F%U`_a^?~5Cwj=t*;UnV@=oi@QpvSVk zn!Jyn4WXw`y_=;^upSCNGI;z4^uYMBAFk_oRDa0* zIh6^CPX5lfN?b3kep$Ug->B$()bDGL*YrTDgHLdNg8R;z+CwOp`}3kdpON#JvOmwj zettS1%6p?B^_iT?XW&Em#{ItF zL;OJh%z4Zsp3m%?e4Rk@1<#MR%=fqP{Z+s_x*kw|a9#uYhxuV}za8Xzk^Tk)(0?KH z2h8(0PI#NYC7rKTKo3`*&MQ@JXr=xu;=8@g4X^*c+vWEr-wg8?t`RPAJaXS$9T4WD z9EB?t!es~7w+TzWb7{Y%`AGGA0{vAs#hh|*y(;UWf$@H;MEd#r^-_hfIbQ@me2hrVV$X8b5$xZnQSNaNkGf4Sc-GykEM%l&qV7Pji&JLlYQ|7_3WkNHOZDc_S{ zT)$3gG>;Eu{nGnwz@QKJ3n<6_3M&y|-5;dJUrP8b_uIxFz@D#txt}>NOHPWkFT?iD zeofCK(}zpvb7xt50{v6{qJQjfFu#z0iwG}-KEQ7}uWfUw+*kdG{Tt0y)VKJA?{)b7 z-KF!&&x8Hfui1W)2nX#Kaal(KdblKs2#||%Ki4wkbWB9N8}&qFXw?25VM~G zKQ_d^68(IYD_>TC{J$&`~W}<4bBH1>}Agr&z9eBNMo)& z(&uCO6ZAjm9ls3Z0r2$x82HZ)M+Tqx^Q^p`^S%J}i67`8_t!C=l9PfAMA0+JNb8aX@7cW zoW&RLSiZ;jsNcRj8fWbB&*5LhaAJvT{|ej}Sp1vN|95_g+5O+SBL9`_uRcC0l|p=Z zU!B#+NkIaHn8UWK>HL`(J!DM@+p>K=_-tJ_y_xZ zrV_uQ6vlHPKKeYA_@F-Yllb?V_z{3h1@fareTw#B|HtGHNonp-TB45t|*J;mV z5>nn6f3Z;#c@~)dEfqO1Ht|#dS4(m}q2=(G2;!@JA@Niy)gYb<;-@rm-+sOAU$LhIE)_VRTV}p-{;_`B%*TBG zff)ApFEIb|_$OFOquET>lRW-OwCq2h$3MyApFrIl8he-fWj!yl_p5pQ6Tg3gJ(9;i zp}!E%7wr9H9{+^rGqS&cd|>|Yz*o0Pt{+k zfcPma*83XnH;)G_cK0{c$0pvZi=Pt2Q#q6PDZ8kT`)16qFYZAm8fOKG8FFFGiwoX)tnpC9pCE3P-a)+3H>fW0SKvQoU=HsaQQ@%ZMgl>t>eZUe$8-{5`PoXrldLchxfIeI>J}s`#1j} z`3Xy%I>OVGLm254Dh`e+h@PhLaT4Kg1<_x)e;5B5#E+_=IiVl@X&^s{hpzRb+YqvG zoWyGqZ%O0jTt#_Z;w>qBFFI;$*P7+%`kTV26x}J5jv6~_>x5+|*Ysy7Iiq`VV9 zTcY2yfPUa_{A?={`8D*-#ydlP+o!z&%Cs*_foz-Al#Dn3dko0d(83=@;3lH`#Ta0KhPif;C%jIfe-pqDa1!A zFPjH}yiguoJ`tRM3jUCXzR1HkD-Vbl7uO`Rn97szd%BtB?_@K}|AWn_67u^B6ABvs zA`V$GWx_>;!QWfQ6E9!LH(V)BBf+2i$9zrVaqkN2N8tbKtKff-pX}@&@^eaSk&Ty% zeBB#5e$@XJ_|xd8S3JKV4;!19pehffccV8xAWV6=Tj(tFSLpp4LBP;^vm@?8oCV+q z`SSt(Df8p>DfR6U;aSPvvIsU9}xd@ACy}- zeLPPjpK+is+sP9B6Er3K68Xn`1N~bU(ErwP8jK(GpC8D2C;FE0h*$WLEYW|w#GjGp z2l!hlZbrZ_!w;b^`D6Hj@oBm6V-Vm&er&ef`)J`uySc=VAif>>(Ng-W{=fI_j-KLZs?DAo{cT{g2bkS%`^@3_JCOg_FE@|*85p_wn=*QZDTehLnHFXD#^zcZRHQ z@L@i|VSqpJTciK^K-)w4jwBc}`EvlTNqqAymZN>dH%0(({s0R;%kd6t5)s>$!~YTi zzxlgu?RAYO=isvJXLLTFAK#GQdwl-1;~QRPzyGoA?_0+eKmY%xUHT7CBZl@YyeWc7 zV()5x6S2>+e!lZq<1nLp=oWJgUq0cTrCJB@W`fa6OgisA%8BSC8Xeug_7=_4&7= za4~+Y5AhRhT!QyU>2U%5oBamcJ0aolGwmTiIm4fRCgVXl{E21zAo(T{AH0nG;PSKm zBOkdil=da+S1o<$?+pI&d(unpw=naD_}RQ2KgOHgPk?^t z(3i)^FB*T8{4_kEoCKZG4gw=0`N&Yq{cU^GTg( zJZNcO%ema?`ys%{e@Xnp_R${8@!m+vB^-5F&T!f&N4yy+k14NVxsKoGqlEadKH|@y ze)@1v@we~E!g%8>zduy`O}WlTesAL2-yJCa_WLjO{ejp^{Ql;y;&018A7uG|&`|ts z`O84w`F(#~@weqKG~TOgztq(6*z#Yj`}R41o;5uz<86UHz~9zC%gQ6;)z3tPRUa@u zy#@qUI60ww%xrvld2ja#2#z)l{V6Y|9P?G>#n)GPiSQmzDf*{si=UFq8@I=yw*#?>Y2ai5FI){Lc>S7xNG0?BB+_qy7l`j|V17 ztL0xXKfX!-ea^4h;hOCqo0}O3Ci1BAgYvL{@;mH*R}_{T|FdBJeUtuW{>~0-w*POf z^bdZA{ZpR8{u`nYegDDyzM%h)?f57UcRBw}z6I2u-sk+D9md3m@J;HE*`dViM*Z)+ zzk(miL;NVuA%5$kkUf5Zez^d@qX55i@=*fcus_a^u)n4#c;8>3zrIO--(MLY=n?kE z^›@2e{FVKhQ^hf=e$N{J7LuZc!`6>0VS<6*_E!_IW`SW!^AJs=Jck$9NKM1>c zX^>aKHeTAg(RWy%ssC;?+M=#wg{SW^9%6L3&U^|yAG^8N$mA~r{~dpve#Mk=`39u< zAm6}pf9u*G?F07z-rjm9UqYBaB_VfY`cLk( zBq6wiKin+3@!^;Ko4qF*{d2#uA6Y9IPKWJv; zd3Oar%pc-s^HYj=c>H>0FE7LgFbCxD+Zizb<+rVU8vo+*^(cd1X}6uh53~13+K0Tk z^@#Y{^-#p$;}_Tcd?0>+IUt8$ZI}57J$~O{y~^Nsv%!3Wu#XFBZ>{PB+9v^iBJKfx zWiS7NUs>{Tc>Ee!`aQdRJ^NI~2l`-sruUnh4nN3u`moi@z<1ZFZ#@3rTcI!XM~J^n z&k%nJ;D3(4HS|@8kEn+LpOV)Xf=`e?g>bEz&Ho$P^PtbN zeM|k8!RJRS^og_hh&wFUU7E z)ciFnkE{7AH+S^}LCb46&*1>%y*17YcwVmO?WB3bY5u5aS82BInUl`f`k{6GJw8N2%bsRg)z z?@c~T$(IT7vG9cP&Wgn!pnq9|kY5n-&zgiq#xNg}pSpY|$d7pyUMP{@rW|_8v|J*go&4 z9E|zK^Mq^o3;t%;UoCg>$fUlWC!l`(4c4mU$2z9)aUKAx=6UOE)}NAqEt{f*0q zN%iUWBe>7f`zd}e@y=*Yq1>LwE#;3(P9$DP*gmfl(Vm`%rVnW@2H&4$^I2p5@VXN9 zZTzb>BD@g#0Ds)KZJ+_G&#+(Qd$6Frml{7s_VoWX*Nbf;9JF8K zeW~6DLSCrEd6&p{ZXI#&7IeMMSd_H>dc-` zGXJHHhvUb10dqOWd77c$yjCUyWxkOgcZ2am{y={eXWCPKaet}pad>DC{QeT-e<%u5`h@iPSn7k` zpjZ5Pz)!%;k4ga&dBFG6o7~PoO_dX!|z3yL7PW(VW;wSY3{BCShK=3o?c`pO` zI0wIsyu5CAU=Q>-KA+ETZG-K}eKO_aC&>31$OrVpj;xQ9|5xz&&M&c62Ib50LwX!h z;h6n@I!`?Rv3_Odx2``4>gs$0{*WKTe;e;?J3$5fZp-?U+}ZW}Bh*iB>?A^u*p`oK zARrb%g{SZ7{C4~An`?>mXXn?;U_N8~)9>!`w(fA3R?I8V|L%_B!}{niy*b*>?2q1$ zq9c4D<5m780IL>=4lGk@mS|IO#utUYe{{g&*{ny}6BqmqaJPYw6MVsQ{Yll@IY z_lFq&-(E#|Tb3fJkM%g251Vp+NBX?J2Y67>{UYJ#fOp&O`~>&uw|~OU_*40`@yo=2 z0PVf=So8UcfA0&zsxLLaBH@Xwcj7-gVSdG_z~VnUA^zIB;P=1(8TubbWB%zB_`D*1 zJ6w=P+B{=UiF_@}Y{>vlPr8~;GU@E54Uo|5lbAN|2UiQ@g4>hG&1#1|tB zd!eBE;5eZp8ctiJ^Jw~OD%2j=^x$+v31Kj>e{v^VnS5!9DP-lad_hxU#CsGh%9A0K4+gK~}> z{pKy>2A?Woy6oM!NsB_Vm0xKH&EpKlbl( zABunCPqKdjfA$kBuAAriuLA#7|H(013jA5u$5Ht-^j{uFr4Sy&Xm&9C_2BNz?>mX{ zUtWi9F)-$Dr8qJ4LHR)y`<3w<7VCZm^Dj<&?tE4JnWr0mzFtN7WOKp~Wd61bM~zZC zw*GkH&s?5n{U_HaSG<2E{FzHxdOj_?tpx`STce;nN6j{_EeRq?b7=soFX>D}*U={@a6m5|<>CVy~Z=pFd0 z3cXYB&nonuF422)Q{;>E6nc**avme}CB4%WE#rss*Q9rMbDE*|Xe{(~{;GW(@rVB7 zOY-|HkYAy9Ka%#}Lhl7gCGre<$CF+}`_1l$V_+-SYi2%he?7DHg&)vI;K%+q;Ro8k zbsW$~{8wcq3;5G(W%&W6T5t)Q5P_f5KYLT?@BH~U)L+~2qm>B1oIn2({hkH%6MnSxeaTz+@yODb^8bF| z-!1gs+9tg#7ts6G@j)PeJWtT|QTQqJPki}*L3vG1QUm&Ts*b*}UpjSE$m*kNRB`+O z{nyl>a`f-${?O5X-|Hixe^vZRMc!pTY<8SI2zx(?_xBxtU@sgsYE_YUNB=eNKQI2m zo&CVS8~UdP-(36*edA|q+;9hQCnMj>`X@cs`$fOrb$Q;gFmMm+%Lw|@!m!UO?Jk;^ z^TbzNzxsXXQ&-OY91moCd!;oV$auKZfqmuR>3qcX3{Ei+0>}&2!yT+=S`K>!=iP1k zH+y@s+@?X>)A~GrZI=LVIJn)W>1OOj*e6XckB7ehaz)ky?k@%Zjgjal!qR`cPg2_R zV~iKiTikgn`WrP_f8_e>H(6iux#N4}L#{xDFZ_A&Xhhi5=Xq?Ou&vMYZFm15{f|ce z{Gap~`1=QxBR{&oFMLIK)FAzgJ_TI$`RfIrqBtDc{OHV|&iW%WUgmG6KB&6!N0Hk1 zmOcgZ{>RcMHt&A`V}AYnpRjNK7RBfvF@FJnO>>+1>`(vj@AEG6AO3yIFYrJ9RL6YD zf6YxVYpKq?r2aX@nf-yh|I_nZ|Nc+?kNx{U*^cRcM&JL*{!8qC@fRMhalTl8;p*eP zrT2dW6HhqR^Ty*M#q%?rKYag}aC@V$y#Ff|6WHTDwcqjnuO|Bu@qgx0_B3W^x;{PN z{zc#a`FPFxK2Y+#L!RDwT$k@PUp(&*BIrmoVb4pB$Hd3a=i?FaQ5fG>oWu-O7g{5R3B1FJZK@_b4?CVb$`VBt2D;Q@w*Lua1ZdB-hZDGehzqI?0dpjF+U68 z{sHd?qh3M2$A8rxr@C0!^Y~SJIw~eu-!6V1D7#U$?_@n|zG3rX--fm^!1ga5LhJlL5EeVjb( z9xTa&J3m%?XPiJDcFx;h(Z3$2cW5cFUnaX*eouF^{65%?Dj~mv_kq{Ae+u6J8T%gi z>HHCXCtFttlfQTmcI)_xmp{l)67O#7_)z}pD&TY{b@lIIKR6Lc1Qz>eyboP^AGp1n zeIK}`_T{U?Y_Z>2x@5Bjg4XKxDq zFK0jQ`uBlC|693_if5_ueV|_Q+PvJef5W&PU37qhECP|J~*HnWSg2CH4`e@Z9%- zd;3t(3vX{F@?Jt@F#P=fcXaccZB|{?=vrdA86il6G@D>Q%{Of#n4~u z`+XQhA-r33`~y8J#cq_~({Mi}@6E`cPsy*MT;NFg8Q*J;g86{;Bkr2_s-VBT4{Ww| z1_0)I5*J%VwoiUxJvnN0x>+|nB96x#{o!A+wVk!! zyG0trV!uP5rt?+{_VvQ>3+q>@jrpW7>`Sz_leOnNovgh-7-9jo-_!oxV*A#f$N8-| z7#e#Q_H(IOLb<$hM*H-4VSQuI1MW1rG#h&#;~V((UGeS8isi%&VGfg$nCrN6*_ z7x>?{{r< zxUYx0W;yP2pw9tAdNFFUs7fKA`_={{D~Wulf5woSIMa_ka2Ozx@55 z_~Y@uCx8FP{V&`h7C7(3`4R5_Q~1;10BT|Pzxn&W{QVyeEL9%3-_76u`S+!mFPJ}x z&L5n|fSxKgi$zyd+_i$q0%lkjXA45Z8-@|^c zJcT=0?Ns*zU(?>lY08>`VGkmn8?Gl7-xm82_I2{4(2q7zPX9!_-DmeVgxX{idl8IQ=pFU3!GIJ?U>f><^CZ zEmMyEr}Mo6_Zu1?W{mf;n-c%i#=EKFeIXw_NPa@uPw*{)fn`6zaFXVIqUOYPw@2D(i8F5o}I|c zU-lvXpUD$EudwvP_Y!{;>yLQx0iHeqe?WiR{>SrDMI7+{;Z2+OkJ?6DcYU6#3+@H#LE2j+bCi1jAzVyfVdnh0Cb{iuy{J&bOAbS+YYum$lal~?w z2gF-t{uY^Aj7M{Ag#J6ef6NEwXOS1NCcaI298&<4{~{k-gai2)12pA)pWkb0{7rqI zQBQh16~2Er`(Ko|%6K2+;GE7#nj(mv2%8q-_|DcHagKAhm>Fo}=^_KR|+O?x$ zx#iwtfu2Xh>cGXrcmRI(_BEcV@h?6a4tEn9ALFYs>aXw1dpf?Sddeb=IH~4=AZ;AN3E%JOcjHmKm zi|-XbXFS-!4j(MZ`y$eR7+n#5u>1kvla}y6*TfHbNPZwcCE$_a5A(C({r|$?(@A>m zVkj@u`EZBs(LTI|pY#s4v+`AJyZoGIMc`K$`}erScl*G-$2}{99!Y}k z4wc`WPZO_inV+!N-tz5Vg**iDIKH}v_O`S?dEeXaoWnoPXE$Cs-)fvX`AB+cIRCI7 z#7!a;i#$<(1^F33Z}4YneomL?r^b^Jeib=CuekiB%=PZzlUX??K=3JGRn zQ@rXKb^0ySw0%!z30M= z7w;!4MS0)u-rtQRA@aVHzhM0Y{}C^w#QOF>rLMaFOR0pjUNJP;e-gZHh1zrnYBBG_nyXfNp%)}1~> z{OqJxX*n45NP4Aas86uI6lA@T`NQ?CRdMfyAFzJ49oENu$-aN=m8VXgWxZ>b@IY1L z0f;_oS0(1}D(_i;{har4fES0+k-o=} z^LD$w(H?mUX}0$Gqdxsr5p^-+Rme0q$J`QwNxh9vKW(&Ij#Gk+$3{Nf|nzp=N^NzW&EF55p<{QwyIo!HsW z7{AoF_A{Ozzrp>Gi|@nzkF}SH|Bsvfti8J~`x||a z%>7Q236&zJ@8()p(VpC&v%@{hGtijvw-k@;~+QgCE&=R-CUR%oY4(KcM;S z<-Ww(|6AVP6Z?NeSm}lK(qqXlukwR_@-ln}*VLT@jFR|EJ-hY)(5@PH0@$&v{dA-2?BeXwZA8ZBo5$%yd#{Pr-xg+}t zW1nJwvgPzQ@Jo8bz&^SPcvs?Q`Ta;?;NlU8el8Eay;4T~Q7fyT+ZxY<`uVFW^l@t> z>x0-MYp{>D-q_(HeuU^Jw?6-l>&G~>w=uq*(EbG+?w^Qn&)a*L53t95esRD_FWGST zAwL4_z226Mr$GJ;i#yrzW%pZ0!wnzrPTCu8y7pkd#dE}?=YXgLf&bBPbBKfrn(zK0 zWeuB5hS|h?XUhlFz!+xuT_CMk$RiXbD+CSrQ7bISp(XY^tMZ{hU z?W=;uTQd4^b-gQXZus^v*L8j_7BJ z2mXrRw$>i5k^dkppFY_sM0=+i?;PVz zo*=%zl}C>M3DR+od^yg4_E=x+VU(wFT)=o2g3npI6Z3p=a;ozK@|Qjg2A3p)N4ghn00iHgA`yl$W=Yh!p-~UghzTk)O&cM&oWBR02CoJ{R z-<0d|l~c9P(cZ1&VxW&$j!&njD&Nef@?(VCurTUp;@$n%KfQQ&%klF5_~+lczw+nb zSSrCHfBp^mvi7u`e?yzBf&ZNZhLJfBZvf_b6ZnVo9{eLb4~KkhqFm(fD(pRIx4_<; zK0*Fs*o$UA2m21;W?x0^dsUcRgs^n1X%1J z^ap#f68cvn9uV_~F9hFTUqyX{gR}L~etInF)IQVkFkZK)d*}}xUwWL;X<-DEx zJ-gma_ar_O@gqIwlfjhL32VHmAvSc2F#cX*+r#@@!58+~K6PtAC4V|?TI zs4>uZ)U>A%pE`-J>|=cRp&aY+a{SvQK9KxXT0Vxp8@^#}IZuGR9X0m#e3JX^AA?u76GyZjdl3IIF;*D=?di$FE=;6fU zB7U5=zuyewvz-$UZ_?obMle6R%lyd3BgFg!KQta&c>iVR$JEQyx$(!-rTF8BM_vi} zGxYJusqeygcrrgWeLOtjS1?~*aDH^#j{YXTvW^e*#e7)aPbG1!<>>#bdyK!P^fvS9 zsL}4a`xok?Myuw|x6X}+C+GJIp{J4mYca^{&XbY^|NhTbe+! zSzUE+-KujRRTpLKH}qFPdf7hoOX4}VKeGPy$%^}-(FcHsu-UBeipHWBp(6;LUSo_=iD{8Go<*^J;%FV}F#79p8uja$4XM zlyBZl*P8$zf<8ryW224WcL5OnSgyVeW5s<=v9~@$}f$%kyP->FY87{`Dk{?hs| zogd$C9>ib%zF~at5Px0MpNt37!rD{H67H^PatX&pv|w5%L-7Kde0f^gQ$QevJEx1H2A;;67rT z&j-LsxkMi>MbI?0C_T}Svtb1HONZ)Rtk1Qj~!xxVJm=DZvj0JNq zdC>a08OvTu`9OK~ipwe2SM*2qC-f(q_wjq4uPFHSwb`%q3;G9t))W3IuV+|4!7roF z)XVD`+AGah;b(b%g7t0Qs3qb0hV&ez^$zLUVq1^)65OUMdXpcLCl{}Q@^|Flpf;%C zVf}{jV6>*Q@J?eSev2M&QC~*}DL$w-`iqed@eA=3>8Mf2&#w_* zk`9CWi|cvSU$frvA`-iw;rq!Opebk1!TJk(Clcfo^cO$B1wrv&=Ki%;!b^9CpG)k= zy%SbDdo1wrsK7VmpY9LZ7yeqbKQ8djJ?F<{QrIWO{~;4n?)`kpvI0+`p7@yGvv>yf zm|Wj?c;;$5sZUVv?Zrd+Gn_y5YMaSFYV+d(U+8!h<{$Y1e!juioIPtpdh~i|FX&IB z0ef>$4(?~hct{V>uQoC5HI$FR*^?lz=sygc-rmjrQ&n*HFTz(|UXvgbIu+M?oPX8eNseeFUGWrSquk{u6 z5o@o5Jq_YJxkH<#Khj^Eegyi!|6337bou-n=)?A*AE|xmNBCz+KZ1DytxJEBJ?>oi zt>7o(Q=}g_{I;#N!BKPv{Q-Xu{kN>d{A7QB?BfUjs_Q}XH!IK*PvO69Lzl2+t_Qoz z6-s7)7(C9hp%>@dZVp6g>*c>?eqvvL^CrKg|}hd9_j&L8=F5TI8w3i3a2e}^^en%(36@RH@F z$7fhC;2+MfSkLwUp?n0x^+3O8J@Y{fmv+2-|ABl2^QrOIy8V7WSeNWC^L%!Ezkdi1 z&;A)6ZWQ{7p?{420)Kh>uS8JrQ}zqY2hhK9V2?EZ+EP9Y?%z{+2K@eg4)mCW^vCE* z`~doQ{Nj2KdY9=xtBKy^2lw+4VxB;K@#oLfPd4va^!RvQ#>Yc@KG*OA@m(|SXL!I5 z+AoGN+~KbL7XItTvV@1{XEWJ@oqjT(#K-x;{WG@taX>_yAELlt2LIapRj8-DZsV-L zU%*qqSG2EyALf2q;AMnk^1=ItU(Baq`1MD~%TH?+dlTdp{OTO=>+jZ{AJ}t2?*n_& zPb&QT7-%L5`1diW5%TZjS($%VeV_l0@&ork36FvOqCfd5{#EivZ2BFFviQUN5u3D$ z@tQLEwXylj#6R%slTKgyiIbmCnrTU&{3J2)a!2$R+FShknwRxQ&B;d#SEG^`R z`sVpOE%+mO>MLGA@*??Jd47CHd762CZ2FLgzSZBJ8T>m;yBwgAm$#^I`}``zzw=(- z%S()S$gYV$l)NY&zwPaP@ge@*_NNUtFFf$v=m#J9c#!o@@rHhX+fH)gNoU_eeCgn< zRBp@OW$T&Gqbd1f`}gE$O8Z~%`#3J}i0oODQ?_sME9_&Jr_>N^{)`&VU$i+NbjpSP z@olrgvg~JQAMp;SoWH`mkZ;DLvfkx;ULV*u+Y@fkemu=f*~8dhtq&w7+^~j_;WHgz{^2?BWh@Jk@wtuR44k{C=h~@Mpq!2a9j%UmlzH zi+aU3sGjtB4RrwT7x*`dh{yM~KRMzr5Dsq3kdJD72K?Vh2*>`I_P>98MosYO(fe#u`kyTdOq>xkDs$3LgG%=1;Rq5nJm4)_)N8)6;8_y~wwNq@up zPJe^^OMU`T&})SS?3XJ^ar~A6Y-@H z7YvsVEBEIQjsNu)_A0bw9?<*KU5D(kc#!7};XY~HaRCP2puNXf|Bbze=Ls9J94tIf z2>hk@6THtsU_Vcg-BkM%&+~zve4lek=MT|;oJaJ(2OdkhnSUSSUq!KjGg?oU$tr5< ze4kriZm?Se5Sn;{d*pvlqvpbuU$8y6kMTY1-}-*EJuTzgXq=hxKg0MQH{w^?%?trB z-c@v#VFutqz32_R;OLL~oAhLS%I|T%W_)W}KRb72e18>9Oui}8f5cl%2egm4_TV3$ z2p4L*$#zdz*pioM}K=flv?5A;v=u=LKv z|G++0jh~HQ!CJ*f_Pgf~#A9mv*}L`;@96ELpoh*|IeXAG?Ms@@=6(2kVNa!j&X)VY z6ZO2X$Km&@C|CYn(c_-weqZrKmfnbG9}<5!d0T;BW`8yOKz_(v^mFmaY0?+{Sq8lr z?+EmRYEWvpap1;9_+B^Cpeu|P~+5*O23v)Z6wy`<5@@k$s`l0X_EWkA9zY#Se?8fZv83zl)z_ zJQ>?}@vk^9)C=(#zNdIIc@e;0tbZ-mJNh8so9taSKN{j)8#*uL&LOe`SAE8ST?PZ{!vA7+)#g@^98&_9MzKT#dJ^mVc=D%m3c~kKM7f|6{hf{ommM zx4wX%-wO|De~(N1eF-06{#5sW?dJ|JaK1906Z^m9h5ABe|F53sS$__HLhsJc&kzs5 z{!lufiv11eabwO`6CXi*5X=5A{RrF?HOps7E|y6A2GLVDHRYtd~9W1?`=AllD#J zb5?oWVtEqSJAVh!#1M|yJ1=~^lGdYzkADC?VBeW3J|*z~|H`!odjjGkn%KdgE@VHb z#AC|;tjG;-59p*9h5k>xQcn@}SYVZ1hL%56oYizGt=UWwckMIG@P%-PlKv zU$_I^5f4!d=$m)Y{rJwrcdGrVsV~}3XJ!X}hV_c^fzwL+X{!B$`_C^<&>qfrBw>3v z?txFKznD+aZEEZh{66Fb`E_l4E8FM(e^F_FIxFx7+K+Ch<~+acf7!nNpuN=F-|#*9 zOAG$WUbpB0uiycb4FCEe|Dp&s9#-4$MbX0GVcUPkca-^y`3C-)c-zoE9e)k|p|l*Q|EIf||Gwfq-1$R8|LC?n z-n^6biur;0Qo7GT{mblmAJ@Ok(p&w*{99xNUQzr(??UI#jXf`qZUcI<{o9_g*L{G$ zWjxUJ{s+t7Jy zN8bPGeG~G9cmo{IO2YCm59wL*S3d#kklj`OL(1Pv*rDJ}C195Pd`Wp94KyFgf5n5& za*r1o4{Ff6(|=acxg9^=|K>UPk&kqKi|c{-F*qMMJo$q7ei+Id_>-6C|LwrffAMQ* z;xCWz3wSTgUoHRkZymkCFWB$kjtcjStP(FK{cA7&L4U*gdiG)d3BNr}j`A;@|I`-) zed@t{^P@_>d34XMm*Vf4&o>WxB=wQ!pZJ-C zf9>>Hw)R!vkHvyzS`$&b|^5Rbe-|1dm! zU;S@S`8}_nbYvf}{egY85%Yb;ixPirr?TIM_F3v9K7P{VpOHSH{s;D1j30RExc;E} zAD8~{pW%N});#|{{_n`%vV_K0ll`x*^6y#3`sD0`e<;eaw-2g5QQ4i>YT-U1^@_{z z^(F9Li}oX~N4P%+=dZT)j`Y7f%JUNC2JO|jaRKGO5+By;i0Cl>`d{*VXfxC=p?|jO noR4iYUoqd>p+2hlV`za^HD6AU=!cIhN0H_;%jp0Ay8ihON;N@~ literal 0 HcmV?d00001 diff --git a/src/nvidia-modeset/src/shaders/g_hopper_shader_info.h b/src/nvidia-modeset/src/shaders/g_hopper_shader_info.h new file mode 100644 index 0000000..5d78518 --- /dev/null +++ b/src/nvidia-modeset/src/shaders/g_hopper_shader_info.h @@ -0,0 +1,328 @@ +// Generated using 'Offline GLSL Shader Compiler Version 13.0.0.0.560.00.dev/gpu_drv/bugfix_main-17009' +// WARNING: This file is auto-generated! Do not hand-edit! +// Instead, edit the GLSL shaders and run 'unix-build nvmake @generate'. + +#include "nvidia-3d-shaders.h" +#include "g_shader_names.h" + +ct_assert(NUM_PROGRAMS == 34); +static const Nv3dProgramInfo HopperProgramInfo[NUM_PROGRAMS] = { + // nvidia_headsurface_vertex + { .offset = 0x00000000, + .registerCount = 14, + .type = NV3D_SHADER_TYPE_VERTEX, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_VERTEX_B, + .bindGroup = NV3D_HW_BIND_GROUP_VERTEX, + }, + + // nvidia_headsurface_fragment + { .offset = 0x00000300, + .registerCount = 13, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_customSampling + { .offset = 0x00000580, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_overlay + { .offset = 0x00004680, + .registerCount = 32, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_overlay_customSampling + { .offset = 0x00005200, + .registerCount = 32, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset + { .offset = 0x00007d00, + .registerCount = 15, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_customSampling + { .offset = 0x00008000, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_swapped + { .offset = 0x0000c200, + .registerCount = 16, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_swapped_customSampling + { .offset = 0x0000c580, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay + { .offset = 0x00010780, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay_customSampling + { .offset = 0x00011380, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay_swapped + { .offset = 0x00013f80, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay_swapped_customSampling + { .offset = 0x00014b00, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend + { .offset = 0x00017780, + .registerCount = 15, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_customSampling + { .offset = 0x00017a80, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_swapped + { .offset = 0x0001bc00, + .registerCount = 18, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_swapped_customSampling + { .offset = 0x0001bf00, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay + { .offset = 0x00020100, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay_customSampling + { .offset = 0x00020d00, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay_swapped + { .offset = 0x00023900, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay_swapped_customSampling + { .offset = 0x00024480, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset + { .offset = 0x00027080, + .registerCount = 20, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_customSampling + { .offset = 0x00027400, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_swapped + { .offset = 0x0002b580, + .registerCount = 19, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_swapped_customSampling + { .offset = 0x0002b900, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay + { .offset = 0x0002fb00, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay_customSampling + { .offset = 0x00030680, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay_swapped + { .offset = 0x00033300, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay_swapped_customSampling + { .offset = 0x00033f00, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_yuv420 + { .offset = 0x00036b80, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_yuv420_overlay + { .offset = 0x00038200, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_pixelShift + { .offset = 0x0003a380, + .registerCount = 32, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_overlay_pixelShift + { .offset = 0x0003ab80, + .registerCount = 32, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_reversePrime + { .offset = 0x0003b780, + .registerCount = 13, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + +}; + + +static const Nv3dShaderConstBufInfo HopperConstBufInfo[] = { +}; + +static const size_t HopperConstBufSize = 0; +static const NvU32 HopperConstBufSizeAlign = 256; + +// Total shader code size: 238.5 KB +static const size_t HopperProgramHeapSize = 244224; +static const size_t HopperShaderMaxLocalBytes = 0; +static const size_t HopperShaderMaxStackBytes = 0; diff --git a/src/nvidia-modeset/src/shaders/g_hopper_shaders b/src/nvidia-modeset/src/shaders/g_hopper_shaders new file mode 100644 index 0000000000000000000000000000000000000000..fe594490f864e5ac5aa366efcdc7509e766ed230 GIT binary patch literal 244224 zcmeFaON^Y^l_vJbAMbZ$L}X@U|IP zhGnF`@0|PkBQlvROWhjF5r9fOdGCMkx#x9X=brOl=At+{=aAoKHY0X_uj$(=TD-jl|<23O~5G28u+vs4W1Uy&i)*A_jQ155qN+1U(r`TK&vqN+>8ds9hxvVB8le&Ve%+Mzsz0*ubAgLbEUe|Z#P|14pGjx<&nz4L zZ8JK2_M7N8XZ-z(Y;@9$dafK*{s7_Qt{s2s|7R)aBA}w2VzKh{qKX&){p!<-@NPG1 zKyDkkt4}im|Br+r-&dc4SNQyAgdx9IpXLRANf`2Z^=VPy|B3KkGb*7y!K#Ii9`8hu zw+bW&yo0=*Ue6_yYz6XT;QN5{6>5N%KL?zv$XHnar_Id6cT4#~6x}EkcXH9^&DO(r zqiik{-H4lj51P=2VwU<)%7}axhLcj3`chUnH=LBS_&zEShCUA_mR3?9a3;g=y?n>9 z*SBVrlFxtlhbN!EZ~415&5G$b13RK$G;{w9{DrB>oKZ;bHd|Mp#v)%LpL>+Ix|c_h z&yJTr$mdVq0k`peM5faI!G3&^;NP?_fIm7aj;L~lHLxA*5B!-*de5}48x z!YVDcyafDUA7Rh&Kaux(Tqo@0y%TpFd_TGe`I8^*^~+{_Z$f18Bm7gyCv0gHjmIL7 zKW|3WNhHlh(pSa@4OLv3z_Ib^ke(RA6)$z>`S2j}B1&KVy6beT#n_D}AFT zx`lqkaZTtcFvfpa{$;d9l#G3j-`R|!YbTK{A53MqpE>=RX4s#DV~ke_Y}&6KMVqA8 zlfD0tWq(CWzwZuY{*5P1U;kw@Vtvx*$Ea*E26tC(bfZxQxO=*jjV zAMs=*RX?6|=is)7@%>=hB6=ek&(C&YAF6e*cIL;|VDqN@I?Ce?ET1bseE(w+1xqi7 z|K8Vze;GDs>605wrFm@+{ok6l&d|QFhvOMcYivHK-yrPj=Z|`H`3NcZ46wQI+in z@cMch?ou6t@@O@LA>X9eC~E)TLcin)0-LJeZl~cD%Aacw-|KkW|5q0JMtwp5Z14XR z`2&pp3I87!#^;3pHw)|d+kdD0d)!idvJeCCCq8X?dP5%39`ND!MqeNFw0~0{$#5(qFewcM}5f0cofAdANQVRqK|DkzE^qi z<;fA{#lT8mm9G})3wt)qQQpR*zLDReqPGwf%1d<5&4XdDuVs9rnK^2F&*#=-=z~e}()Tk5+B}-&*J&=jEKJCMJ6x;s4z0g0(!~QAHVgF4r$bS5RJ$aq}4`^@3qqgn;I}81z zJnWzP6ZXF*6M*kOuvf3s{{zk!<59=<|ECN6qde@N`Wp7X!av|=?MGk_&*}g0*Bm*&SZA4&W3{WY~e+yCuW6Yc-8T`$BhF15dqe@~C3-8cG=@c=*I1Nme*>>J=D z-Z1*<${{~2S9_Q2S^MS6!A_PN``%tz7kZibDv6`+1}S9Wy=GM?63k~akP2<6{q6NJ zUq+)o=?Q#>CHZ=^VPMdo69#_T|L|Im>=^ZI`P~(xUnqZx@w~lh7sHHGrcPi~sjlxE&a68hCJ}*9+xi z_=Dy)%Xduq;kOp#1@cOHfxJ>)uE_M^bBe~2IShxm0xygYuPyb&M3DShfm`yRg!`+mK4jy<40r|`>FH&XbO7x05V z6F)m2igWPF?MIi{1IlB7pNMyWUm)*-pR8X3{G@$P zzvIj2vlRUfR@YMW`_2OW;xvAuUIBiqUf%@2)!6AL;nM1AntrR_jD8Q+R#N!==>mSR zPa*w8-2?nqy#5P*D{awlYfnqken7v==fjWDq1BIgvcU<*@9!V4&^~+n|HB1*IsFCv z7V{I}8+&^s_{Oq<^zyh8r|J3X`IGxwk!N$B!1Ds(kAbltU!lRW^A+~*y;chDZ;1tt z)ZPsyoBR?#g-cI65d}mx@i=e#iJaG?kmn}Q-;a*+a(5vg4?B6%0Nhsk z5ypAcaI)PYe;RE6uXa(s8j1a7Iocmic3PyUoLZb-2fWp$fvpQZ_W=)9iC~TJSG&=} z_oAKcT=ZtGxl_Y=DB#8x;TGY$gxgyKz+G3qwnMmQ$_JBvRqd4>e-aBmY+uIH_xQib z@gz}#{)HYmPe~?K^Zgyn7rTHrH>F(Wuj@Py$pGJ(p=XTq)f$FsVU){BUI5qXppS)H zgGs$B5@yz8!$~Vcez$}k%@XcEb_okTPYJKo2(OrO^w$>to9_pcT89SRoo`GUtHOWR zejM}rrfWYV{k0w3k@h)=L%@T$TX9~*Y#0otALGk7 ze#rQm9m28eZ>>()(_^JU*o+_h?_Z0kqD#3?;rf#2>vG-({Ue+s%=0t@^Mq5*&oJK| zzW+C=@85r~)`_zJ1-A4!c7nYA56}2Deza1M2hLw(j+p&HKi+?@S9kUQq5&A^kJ&%c z?|)5r4dn*j!+p?Et@da5TGr4ncL6thgyqoltNT&%qgqXSyo2-F>nM++@7wyP&r#lN z`f|3HS!20;kNX4gzXo|6M=_AZ`itlP_?N;MKh}#pt{6vSw|)jl81w7D!THEFN+Z^T zR6>j|F0o|;qrJnjo_1JzqdhI>_gp{Zq`savpniO;=hwEq^Y+KzWfNxoc=)Y__VEYw z@7nJX4%&b9dI;yoKnUMk{g`GjKo8%VuAZ@XIgcm4;dw0JA_uajFx&qH?RV7A6pVepy8?auo|IxtqR;mxZI3_4 zJK2rpd-4yOzVSQgg+B{u)n))vtGO*_Ad_U2YSrgAC;FBeqLTUzWp~8U(2r-Z>0D+ zPY;aGk4NY+e8b5n@FPDcfNvQ7yi%U<{qSJ#HR)l`BPd@1J#Oxq{gI)^i#?D3IrJDz z_V%273BKJPBt+$j^6~7)!hI_rum^iGpYHwd!oS0x{L57Ny!=3Zz5b#-(oYQz`k){04c3 z{0%3o9G_e7vOTVF%=aj7cG;d;Ps9JB(`9>R{S0`$OMzKe`wMtQ+B54{z#CG&q2;yV zWK-Zx2X70!tuW^6Bw>FtU&B5e{^UyZAGsi){}9gaTSr@4uAKV%m%oYrJ}56eB`)}3 zlcM{#IFkJV;ZM2$S@Zj~Q|^B_cK$%0zq*d{TEqh{zdvfmTq-I(?xX(NnyhVw-p>KA z)Ue*y@}FPF`XD1q7rP#eCvhbE7rWk^)Oca2>l3k01o+eW>grQHzmZ)P*5g;P|2#vf z_~(2=SkL3dp9lEI?_fD(*E^6G@V6JEey~62kzT?6pqCZ+iOe4*^vC^PU+9ntjhQa|MWT5V;!yCIoQB&|CY* z_b?CqUe}X=vE3w$`RS$1SC|e7V}5}Aq8#pBgx!7%Co6>iPS;P5vx<+kzo+VdX7vN# z!##`jvA-fe9^|xtQy=AUKV!L$XK;{Ld~ADP=KOrm@AnIepDq7f{oABJmR~6F#xw7JK>mQfX<&Wvq2mEdMsru(xd=8(LQupihZ)%Cb zhCKkjhade>1cKnF_91nDj&jbIAKP-k$x$8hIkxLjz}!C>Sox*$=I+}mf8XT3mi2@6 zSDYn-@w*}N8y~0nJHH+UKdC=h4_o=<^cmvI^+pKuJ|Tp8y&uA~7ak@(z7Twre&|p2 z=i>Wz)gOP~uKMEc+o_M^QC;F1~Na_b0)9JKGEB_b5k!$IsC3G>~WT zYo2}_-;-c`!yN6yc(i8lJI7xF{U$B1Af1Ia6`TmaseR-Y!yR=8ae&*8qcGeI3 zFHm8F{sVn{o&K-T{ss5Vi}&p)5692?VgE%MgrNUG-!JHYCiPXmh6l8#<5A1df4EP3 z>-{qh7w_Lu9^%9LAwDG%@DV=or>lfWzw|E z4>(_pM-3})f11AkhJK(t?2q-s{wl`H+4eVY4^L9|j`O>$SKNL2ze<&(KiH!g<_FA2 z#vXnh&;#YrHyfL=!e{==luP%>~FH`qMtG5N3O?ji@6)++2?<(SVoU$l#I zAsm!LK3Hz=)AMyVe$-d~vfSA3xa{{o(q3Ur^ojHWJiL~pc~N=+ru~{_h>yx^5|_Fn zf3|*)?%~m>X!dW(&0@I!#r&NkJ+OX3Bkb>P#rP{pf4O0)K=m1w?e}l58-FmxC%IW_ zhxjFrHe|tR$DiCSSH<7m;FG-FYmtGG#qV}e+IRRp>NjM6A?0|_@m7h$8C6XG!|%z3 z?`WOyOVHy7l|IX@zrZ>7>0}q=1@g-C<}1QJzuye`N%;V*^9RO9_?pni-xq#z`F!#L zI<)*4bo2R;zUSPhLw|@L^oRI$MSMJd?Yx&K;saRc7lU6<+V}W#j!Y@}Sr0^5+ z8vL<8RDC9XcD|e2ug7&Sufz|q(+|R>LOrE#$CuAXDg5?}#T0%Qp08N^=FV5*K;8wv zqROka2PJ9W)9?85`RZem$LQPOLAe~_H#jJjLilj;`~&tPr01M}M+^r9Y2WY{u+Cow z#&njZ=jHR+OxiQ^+2VaV>{E!3sJEBDxa{>^=vQv({ATfy@dxyK^?WzL`aMOTA7a04 z@l$!@`Y(_-F(&~&Rc~JepQ>yK#oma#Rp$IRg85M9U(=mg|6@N~(eY%&{tYJCoUXqu z+%2fSS-4fw^^L;3&lY}>ia1}8`*0dlG7bGL-iJ3LX`l7Y`3WXWJukrd1J38#w3l*U zD(5FX-JhHGhm(4P_DRlL7|9QE8ubB1^pM8K^&;@2gKz-@pG}sp;;YA+M?XYVeEuVkuO_(wV}6kPYMwS?y~PfppK*!WW?-y;&bhC~ z_b7Mw$$XAC`^@q)K7O@*Aw9o~9`KExKP=u?Q(S`fJA{Mw_5GH`_YT_AAKHVx98CEf z72liv!(dvVx;XgOw0Xw*_P#oQq_c^A-!z2$vVpvjH=IXH14dsK-haiD!2cBVTD-3g z?jKPO{FHtUKe{`4{Nz0viAl6j{}}Jfc>FA51e|)02K~DCqi!Vf(M=#fyQq)#PEbGQ z`?hN*Dp083!utq{v%jzI>Vdi$FZ8J(`s44bWxwF=t0!rHfqOsN;sGA-r^UYs^4O63 za<(V;(}ivk_tRoe&%2-gQ9r<+_F}6l-;+L=zuT{O0Wq>GJ`}W9(UqU&Qb8z@HNEa6gOj zjz=37zs2(#loLODKg#+6emCdvd+}=guztvrfYv^z) z&*}H7f4IL6_NT0Wb8Sb~o2LHotxd?2*)NkGw@I#Olr{Tn$e*_dC?D)!dLBdcuwHlk zQ<(Tbev_MhbDr(SBL>Tj59dFr{mdo&rTl%04!!(sPyuF^-}CYZ`?HzaZ!GS|NbV4S zh5v!@KTkd(@KKKT>>T-Q9rffs&dy(qPr>z_vG=&2F8X*Cv`3TGoV$Mo|G8cSecb)@ z2S@oldWuHBJZn;Z7!QE$mDHZ%ez}t60?MpE0k^Vj&-haUE|cE4WI`*aP24{>1vcwr zz-@uWKNR!J;ZNR7-8X-5ltXv|-~ZuXmQv-Vr&(TL>3#_Joh97Q%G!{d5ZN!~buCC3 z`z6FnkoX2lkL!SQO|Iw^z7IH`XH|iJ{+#+>qdsKS9$`E=@t-vI6L7x9`905a*mL+p ziuzdjfjUc{(#NXJI`#`zB$Nt5} z+mQQv>|bnr4vEJB`LOXheEg01cm$j3-+gii@^qc?Hey-d3C#QYD5kvn_#2s61cK%8 zpDjMk8UKet6urs&`GT$wTY!PzU@F@kv!36@eYR|Gu%1J!wBOS{)z6e)M|qjBmct%A ze792U=b{_%PnUQPi0{Frp~m;P%ko-@vOc_TZ#0S#1%f!#qSR372ldx&j`tW zhU@Jr_UrlR##x5(PT(6>scuJk(;nUfaDOHzOlH4#FYcN1n#8nU-Ov*#X&?KU-dy|D zDmL6V&SJEGm+fzFM}-jIZR6i!+uPn!dTDzx-Vf2fixC6AZ%sGm+V5>|u>G92A0PMB zf0g*+{v^Hr+KtGs*%xdOKjF_tP0w4wAGDw1@2_aTw)8yR^RLMX-tK4P7;^;Mvisq9 zOD>cxzKi^Yyn(-c;V-@o`77hw^5gp(?yu#fCEz#QPg4B0_B}ED*l0Tcs-HFEht=lB zR>0pB{jmRwkJI?KDu@T+_}|R6DmX4ISb3m))pWd+FRX9x#oN4K!+Zl6_+ObcWb=6z z{WnEeZf!EPvY%uE}%pn0Y9v6)r5bP56IucYGb>R;%7il)Q^uF`hMgU{2izG zTW_sMf1bZH{8RK-_**mnW%#4&X9x6i{5>pxSH70|qMx-Q=GPm=3;DM_$3L0*1OBbF z9RCn+q*ZCR()7l0aUrDliuz|cdbiAZW@6~yHuE`P&_9VgD~|t&|AP0aEv`q5{v-a3 z&gS;~B8mInzYgPnSZ#G$R$id5534Kv6~ab;A6DBN+bRCE((-@2l9K-`$8C{^BIV&6 zeZ>C1tLrV{Pn^;}j0gK8z9*SEdmo+`N_kE1_q=|tr};N;UrW3YW4tHM2j}QtU|-OF zJXufq&)k~!J%9h2`Y9)m0e@@7G=J;GG=FiuUkLfznbW75($Dd`uCcd_ekF0SqbE>z zxV{2^Yc)N82Q2zkuLtW}@UL4G{i1yrey-NmEk8xy*6RHfKf7|2w~4z@D7m$9gOF_~ZM_>R0Cr@j`eRz*8g1pV*Q2lJzhtg1o6e7 zt^qtjs67sk@x{=8{)hm6T)<*|bXfkeuJ_FPIerJ)9h_g{{G8_UGp7$z3E$`7c#^&! zz<$3Q;{y{vELTK4LeVe!3-2-BSNc4g^@(3_f7Ipp(J){?UR;+o`>)M*E;{3@w)zb{!adIKSJAV8-G-5Po~EI)xhy2xUUg@$m&l!01W?QjBlp+!@j}4T47_);E&AlkL~$9 z;*()I3F}uCcJ*t3ef>p$W!#?$J@4rH2j??snEuzKw=HjQ0TjS8-8vZZ2>Am()}QDI zujNwp||?!S|m)H24W+67+~a)cBtQv%MU{Aq4f0gLr`~f4r*rTYOH__UsVy zP6nV2**`D(C#U^)?>&p7k8ORlpNPDS1OH_D-y2x@rSj$clPPb3e<$4+ZF|@sfZixS z3H)1G|Ku~(cdpMb^KS$>((S1{JO5<5lZ5!r`&ZL_H7uX^SEjuBa?;}q+aKrw{8eAP zf3oU}j|ax>GVWWPysCaU|76PFcqHe4+8&ofsPE*T?9%%hl%Eq14BwxCevnt*w>+jk z2lRVXN&5$f{_Eh^9gIhRDi0jr6ZwPAkHZ}8!FaTy_673?hsU4R*YUG|>kq~HLH}`+ z3WVPcu{Yd5JNbT{{=2k4-aq$n(LWjGVgIZj_TMo6JGTEo|6Zs6E3|jsfBf)U3*$$5 z*gxxs{mbBk@dx^SLH{%P#L)kM_S5?#5BF(b$0LcKOLx--0Am^zU zId2~@9>}++N79buL6}`Hpd9kXa@a4>3+F4|pARs~As;Na{>j!pVf#2kz|=Kc}%Px)65uSs}p z=l`4B?U?rs=w> z)>8h;Z!z3^G(zFG+fGcJplb7e$XHBPZsg=__fsX_m2 zPru{K=c5$;_Aib1MR^SHoAW=919=zxWIZ3?cUiozn1jJj=YR6|V+%vx1`lG!kHUH% zVomrUL;K?3bNrKGFG74p-2!@My#5KknF1BY(^J|H=y~~kHk0qoe75MH4Eq$~Gw&}Q zd;J%D;;K>v`cAkc;}7t8^?Zl*I^&ml`YieXl>9J%_Xb;1P~ud7*0)1y&$^ul=!{R!N7a6enZ`)S$n z6JMNXz<)se0faxePf8GfO3G!weI4f)@rs=9>-q=&$*UEyZ)QCNf9Mv^W8Hbcq*j&r z#gq>xahCY_^VzoeQ-uB%>GRxLmI7wZi_w22&+}j3ewh<`0GD!dp6>7|WaPfY^{?l# z#=jHiv1K_xGV%mi^Fs(fivu>U)Ag^uK!G)2GGan zipzw3{IIA<*vIQyT_r5>+&Eu8`(N}t-{=F+i)guN1{zdBh_us?u z;49;GLAgbPpnuTE_uqqiiLL(^60Zy2hw-`)=gz`=^jD7KKSTdQBZ=3Q%@P(KeRUuH z&v8tAA#bU8T~hz_IpTF?<-Ayq>EaTHtq7fvb+dxP~H#cdq;KeIEy zm>>1l%^nV7v=->HQ zmk0;#&%bYx#em3~UNAO>vzcT4B<($3@{;*f~a6ZX+b?{FGe{jG34*q@Z z#GOC25Fei6Y5l=qABwsjRC$1YM4}JgpBe5U&<7WP3;wybfAT-P9|U}FvaS9K>>u~3 z^mlUpAN03)4_7J2^`PZU(!r1Qs-@yYD-^`xBh4}+8-od`{N4MoKg7b9Z z{}}!rY|oaTrsJ)>M*J-fa3=Eg72kujIUomPzN^yy`}?y_jr1|^PtJ+Ig;FaIz;D67 zdD8asGf#iO)W>xEq%=P~{bj!h|K>K(AM5$zORT4k7_P;@$}hu@kp2)*`!!TH16UGqNCoOgT~#7_hN)BZI| zpASHPut@f+@#n#PiO{p#b^HT7+}{R6g7ehH^B-E>jPujn^oziE>573#Z z{^@*#@e;=P#lLww{a*B@jZZK75C7(^uGouMVd$rRulfi7=0HAxPja)T@k~wq;alQ9 zJ=*4eobq#fP3+5>-Jc&Y+#T>?dnh05uchN5f1B0$1OMH5IOflrA^!mLdyFq}`~$uQ z7W`??2;W}YPVFyW1pe{o@K^jtZ2uVFqWra~fHON^4Gy-qQu~!f|7MCufdAb4-JA3L z*jD;G{I}=u|K{=u23{zi^Y3%uZ;9dY2l`3;t#${LwlMrLGd?~t=!5yryw|buw^oFK zcD>E^!7K#`?D;{?*?9xz%9<-oIj1+OzR%;qNTv-ru<)uz61i|L3~E z8lRZ&k2W@lzs%Q=x5fBdo07lD&gYBqw_?A4k^LBP69kPvbk^@@PPw1S=>0dxEAK~h zet#zW9qvW=BYp3qejNBuKL;H6k0bupe7uya@OM4K*FK(>zDM!#wA5bOcv{?las44k zOFXQ;`v0Fu|KMj&{6WOO_UF%W-@K{*arB?Xc#BUr)PIvO_MeC!#pQ)rAJSj8OBm~e zj9E{uZ}{^2DCc_;Qx5vWUv>=wWBg?ax4MMYUzTug4GZua*k6i&74qwK31h!1{;br` zCs?71eSrKe`eSaT{4rs#z5gEV4g4|rz`)@<*>0=70RFU>lgRsLqJ8)mNx%nFKSp^6 z^=Hu-=W$ZrkpKx&4ts+33+erEvkwBze19Fjr`7&}585lLKPb*0rM+Sg1atLK&iyR< z)A^jypt(PF`v+ui<9^MqXV39BT*LY4joxeetL2Z>yHeKE+^>{mX4CdTANconqTGr4 z3yc3@qZ8#r80(3GgW=z4_xp+Lcj#Zt1Er{C;f@*{!WcjNJN^D&{L{81zP0EJ=!^62 zp3?7*wD+)D-AenXb+!>uTKx_GMEpS;Y5%l2`W*-KL;Lg(6@x=2p5t#w`FjXa0ZW8m zha0-P5*YIpLj?{?xDV?-Y8s_5J!Q=>0)J?|82k34^2|wiICXj!GBS^_Vo~xwfzn+f@M*h*> zt!bX-+WF7ii#H-UKXURv?+;x!^F?Cxq0a}7_(9&)A6M@~2n&BUeY_O(_pn;);|4?h z@$XXp>*~Ml{G$uXKgXY%1Qc`hMEq~LZ?W`6ybwq43-lr6&tPiqm&|;?{jKbeEq}f} z&z~Ic8*gMUeZKWpl*X8856TB?Zuv(uk)*V!xK*ZNxeJp#}t`yD^Z zUfwDH$LkU3r{(AH2mY>1AT_h|7v8_MabjX&%vUW5@aX5KQi%s97KZ<6j$PY$3}}z% zhprswlk1RY2M;HWEbIID4lN0&Y0p#f{;Ns)ne#=g|LPJS->laF=bJoVwC~^H@5bX) zzrL7cTQXjI-ZZIKWd3t-9`6765q`=2SgXkTem$9!^G<*M+YpC3!58!del46g&-{64 zj_>(q=6n|MBJvHww*DXb`?TN2|J6oDBRk|wod5WSzpIE}O9LwFU3q_|@s<4hGriyP z@6Yr+RQz2}>Hn(ngv1|quj%|@Pq=D-#Bz;K``;Uw@p5H-#YMj4%Qg2$C$P8T-&)Y~ zb%FUlt)%xC0zU^_gabc*0yp#652$~=!YGe*Jwq7siS_PW{^$}zadEwyi~fY=5g)wW zh?!sdK;)D0P^>&w=nqcV+%G^rs|CU)z6ta@YwQp70rr#nn@=tZa zndH?VHk*rp90fX}S^Y?jw-V*kwB418_ z%zIst|Lc(dx&#DP{kV_zR2uF1{XN8&#t8NN9qXNY6XXBP^KkCprgWbgk41hVuX_Jr zVBnX^*Nt-QpTuEA?l-uQedky6_zBm2RH6OLyd5M4fisYk~CcnCWVLoc9 zfBGEdD=T{9O8;voL;rSOkBl$&Z>KcQ0{?dQY+&?vxqrLV z_vfL@eAIFrY~ydllXQINThnDeYWl<1Uf~Zf{$9L4;Q8V*AN4XHwfKX>zn=c!%Y4-E zPd*9!kT~rzUb;x^@s5O^LpPd{y?92{|WI2I^}q*KPJW~{`2adY5W(! z|BOEPrhmgPp3`3^%2Y&PfNmzpU!`c{wDPb%njxPVTevfjd%M0s^`?J;ueHP=z zrSiQn-w4|WKHxv_K|C<3LjV`JAqwCk$?ag=&w`yZ+<%+%^P>_sK&~A5t;w$dmgnyd z#`oCH@q4_#)B9eyTM@?k=B3>K(mfNfz8^q+xTg_z@p<&Uu8S8pmoMft{k|CAi zxA*nS{MN6|Z_WHMV|PE#{r|^cC}=4D4$0AcJT~U@G4++7e2@{`-&6j^qrN%sIVb)Y z%1?s$W2}Gjndl4855IYSYn5*oKaTH5{diLOxJCZwP78P=yg@i6J}ML-Y8m+Fs? zAE)}`x-x~Z2-V-mzk3)IbKka4M z|8Je&TI5l}Ok(`VZw>hg;@K^}A4YkI59^2c{Lb@R1HZ68+W)Y>%i=$BwZNbHZy>+5 z=v&*`i^ceHC=dH%{jfjl#eGBOw^Y78%FnZf_;I>Exj23t?9oi;BYaQ&|JvdU82So* zW4W83ApeB%-cIcyMGj)?s|PbMjkn`uFJ^&&^UQ|90}ozSr0BA-{E9-WM5slDGTv zAT_f1E#$Y}()XUWd@;W@`9Ip^c(8s#erp~d>U(zJb58s?$SdW=#*dqef3~-oj(-N2 z4`!XbUdu5*mXDA1yU%Y;aRGkNABi6);^XDH-SYB8{Q=AYIsAJ0Rtmr0eST|-V{rd1 z;_2~=H@v*c`elRfV?BPQ!bS=|yjMG8vD^QN@Zt|@4=K+du-vS_2AAizhJJ?l&E*G= z19=zxiYA`~xUX>|uzH>~8~ktIHpp{Ks|w9#;5Snaig>AMX*Z%TMKPFe%T)hnvrD-I~j9 zy*8KMx@+DeqS3wA$#0Ex!0ew&Eb$8Z@=y5>_VdV3-KD*h_zV(1ZcF$7tdI5)zXa*2 z&3ixW_YwcChYQ0u>n^`_UlL|XJcgg$M|q*oa>;*vm+zC-wKAFrdmR6c9OkIRMmu6bU;`C}&i(cU9pHRaE|Kg4^gVvexS4_&VlHtz?KPbAys z1T5bR{<#hgTr7-uLmM01U`sjHFL+-l?RQBZ$%mbj_Dy^X^jF$o`|^qv_0fJo_-WQ} zz+dw>JJ{$iVZmSWZwC11a-^rnKVKqj<71qY-x|+RAv$~iLGml`JSs=n=eOSEdcntc zo6m1u?7I7G@F#Wu4f*5y`7uxhNzDB>o>$uVZgSt9$M<1=>k7*4{WtSl7m9y|uW3{w z$!}fE5tcJB$!}ede8S+r$#0#@f8W-Z{MKRqYuZ`eN5tA_2rIv&noD$if-c^b~+@)#i6GxB$9+7a~Z{q+2Mv7yE@nfdpFYtn$t zcYSMGNqrwrf_Tdq|E+0|&y4h#1o5K)s;^V<>v3(a04)NYX{@z1=YU-c#5BmpwU*UpO z?1#jE%dH~;>6?h>aen-_ADMVBf#QHjgZk=jd%{{|TJbn)Ld z5O^h34w&WcJkgvVVt)a9rRO&+$9_fPzin!J8b2zEzYN~*)4uI9y)Nq8@)tXv9^~(1 zyw4BIPlI@bEI;7#HTeGXz&`@#w|xIf1dRQKJ??Wr4u-vDd}Eg7vIdfs zZ_nkA{xW@@Jbyn!`h@HAIOHe!J?Up1{{Z9s2IC!%HZ6XO=Qk)Pel~s>>j(JVoIAgH z5y%(hVIIH1q*MY!RDRfhC@-WJVq_hCPt|_QmNF!2|wTmdL*|u)BEceY5OzJU()+i`5yejco*ewa})fXi9JF2;Gn;g z!hbRT8^#~vFXZ+6jfMQ&CO((Lzfb!7vEf$~zX1FeJ{p!!33;ulHY~m+iJ)g@F4&sO83CsHD zCG}b2HK2WPf%1g~7@uDSv4(k7*$7h(& z*Y~@IFNpu3CPMi1ivRph@^1(69VGsVu8+l^0PVU3TX`L_vc{1c6bLRjNN zY`OR+Iv-0GUhdD{;doA{*+|@tNshS@Lwng@geA6Q_%Q(C#+Anm=k>@415qDQ1Y?j{2_<^Z|mW^ z*uNouN{;bU?y`I#eu^aYGwYcG;!jI__p<5};}1!G56Pb?`eXLrPCsi!r;nnqc^@B8 z^^^AA>Ob@~$9OR_8&93>$0_}fB7WfY{SPaC%5STWzi<6p#$Q>+UxB{be4g-^TE<_2 z`NDYttQzd!t!XQDeuH@XT;6)ld>@)ne z=m0pj=TA8Qp*gkqpnh9nS6`mbx%!L#T+8??LOQ{S0U#@h=kOd*Jy(6wCTU z{1ra7<&f9pXc>Ryi+~{&-*g#KNS{GX4tWWf^~E8Gl9hf6MqQ44*#J^MB4q?PdHGn9F7S6?;CujK8vs zzY^XbEcg>(A7|GWf2;nuA6OXnWEp=YUdCUcJn+1CkN#mhE#B8G{4(vomWqFk?{7`lQ|Ft@_$!&l zE@heaLj2L9{xbdw$iYi{u9ptzJ`VY`{#2j0y+0fLFOaW#8Gl9mvEYxijK5;eM^6I( zG(Lw2{nM85SHu7tf9hrYm1X=DE?A8}9Q;9+@mFMj!2Ox{->mufG2*{w-Zs$xM)Hx? zBFT5j`j6<}){NEP4CUgFv&Q_k_z}i>b+S^!dS7AWBaJ8AJ>X~bG5*H!S2W&=_dim9 zB=3L3mGVj8e*{ER{zpMP7V+N;-Uq>d3HfGasqOuZGU{&4@%#esmEJ{jsrW4GGC%ux zFhRT)w09N$fV4k0pXzTLuch>KUGwXa{t{1SbzR<`(4Xn_KIFN(E^k{1|MWTT!ycr3jC=L5O${-5?bxc^_qZvj2E}dK2fP%lIu^i9=qN z@mp-ZuMa36W*p^E?l}twVk8B_QoW-Xbk+jeHz-KU(cwL|A`4#i4^yu#G&$Fi6 zyzevR*HNC;`#+v{!Qb)WySW_%h`O=XM3k_*jL(9W0{;seuchSTwX}Pz-=t=8qx$nF+y>iA6B>v^!Af5rj zsR=qX0_d+*$tA3R_HWOMgzv@qNbZ+giS(bugEbdl>u1gQVKq0nl8}05NuolCUlJv# zE%ARqpCnFp1&;2>`DJ{ZSN|3EC-?;Z(ewwdt|jYiKd0@-$7|3X{8%6H`qS~&c7 z^t|NZyO1~VcU$<2{7Vb`O+3AYzw(ko+Y|n-cGLV_)mXBQ{_9L=gdgZ9{9Rp70{-5b z?)mnGpK1JC6@(YI{1^FY>-b#zyM|uUKIq@x-AnNo{-lMFzc#*EBJrK#FmTAV#K$#tkh_3?c+b>ek-4vk=j|)v$!=i+n3;0uV>;d-h`;~&>ALw&0USVW3?|&FKgf%W6 zTHI3lkw54^iR&wRBL@DyY{p5v(%=Lr@`LuEPa3b&@b6)@Q4jV1VYRs;{38DjFuzq> zovjrA8gjvAY`G@%-PT8YdAe6w81Hc!o20La=Lk5P<@amU zQqU9e(j?xvXf4(!h|ebH**1O};sbGcGP3vp-&Nt4eIGQ*)pMxe`~@eqthjfv9Or%L zzt8&S{TtfP^;zGq~q(Y5)IU@#22p>;GHRf}c-*(aim`cO!--&74u`u%5N? z7G;01$Mw9f?`6G@^|Rd%$a?=1=5N62oR81A=!U#YJ`yqTGg(i5bW|MC|F^J)WjV_8 zN8$|}$pRB_jQ4DU5REk865>6|`qAwdV#XIV`vdM5a(U5*lRKz?Ke{(zA%5LN?A1LR zpOG-;f2`k8I3A09Qr@N%Oar4F>*ZMN4dd0L|69|nrw_}IB6L4KiMsz>)^mW-eE?6O zI_M!fKsnz39F{-U{fSwR#qX^6^Lez7_kSGDXM&HwG8_lzkM#D-(g*inEBrw2zkp8> z`#1S9za{v?a`qqOGg#qxB%guQH}qtE@NY1co#ROQM?MR@myPLw>+zBF9j5#`$|Hnl z_vMH0e;`k%@h6EV<@$H|FY~N!$AkS-nXtBp@pAtCS`3`Z$E|7ZjN`HK_6Jiz$9-U46vACxQ;f z_c~t2zkez6to-182=oEvSl+Px`S{w-9~;fF-1=ju^JB)RdOl$1Z?wnh7WGx11`n#* zzj;51a^S~u&EGXRC@4Pmec_jL{M^U-hV^av=RrOs%EOg8|L_;-_p$@Y#uI%1Nf56c z^oT#4%fFCYRs3y#VSEYp_n6_(gZ56-_UjPxE%UR{k469Mw14%zXL0l~D2z76{;0f+ zU3|9Wi17vuto%~>Vm?XOn_({PUwlsfOw>OK{3%)g~=S9yxy-)!=)9Z`Nmd}|nf2=nU@ro4tQ<<-Na#~0F}(gXdezWDr^T#kqS z*Cco7f6cE0|7*(Mc$887rMy$2QSRhl_2uIHnfU$$^uu`hzVk5~3h4JpB3GkHL%-8N ze$SyF$M=L78h#IRvx};j?cI1J>rLp_;bQ(wl!yJZe%Sw9K8E8!zc1*2 zX6cLmh6mL5@krt$6W-_gWNhtybhwy*0p%e+Y%IjbtZ%G5V8Qn)eIh>|vI!#(hu>M? zKky6tqyB{bt(vIZR)6R7>!kTVd_emZ=D+ysNY+!vUa5T!`=fq_{jJFaU!kj{FxZ=Oy?iapY-@TpoiKUmb>`~@=qA^5%`O8$RlCs2lk7|pP9~2cr&N_)%cFA z--fp}A7qyK8gTwJ*w5us{)>zG8psg%FWP(!(tPNDJ=dSff5Q16?I*W$*>t{xOYJM< z+tVXyS2+RThxR}pj2Aza!+rry_6oAV2+AQJEI0Wx+bxNY5^?IQzuVRGCDVWMb{h${@U!^bUcveza`-2Y)=F~T zYRlh}1;VIf`X7Ez(&dd-4LrC~CI3b;UxI$aA4tB$(Yh%=Cx0fX19`Fj*TNnzFCo7u zAApIV(1Y-`D*coH*wQEdy27%1>%OewIFKbNB@F zq3DBD#(z}*29s5J0B!9F@~bsf|IsS!3(nW7Iv-oO(3SIvNMYuy5c|ei3H$$_yo1{I z{004ebd;C#mK@Jpa9&-)eXQ6!)|dC?nMm3vjPo1D6XJcE_&Z4c%t|EqnD&R0jS>yC z=DWF$`bCM)6%(JI-bcR7cw-IuFQX>%w|x%#IO$fn0+RF7uXa&hDY6{*p|$7$_i67n zHoAau|0(q!zSk}i#{DPqzj6HQId>l>^jIy(c%?r*A9CY)llRN^ehQu9d;|Gw%>C#c z&~q1X!F*5nI_aC0`y1U4;5@jh{>BQUyb@DD-Fd*Il#%mISKe;Od}rX{q?!-zgK(Z& zlzbaPA35(`m;4-}RWhEaDKnsJFH<9_eSbKB4`AdIk^>@>e+TWi^X%WmUjwYiWhQ!5$Ri4O}>?KwpXdLT)u}t1MU-;4{031?yCN0vyeZ9 zVQ(;BlRV=fKPSxrV9bw_pOdX)J!kgoaf#AsVA6XrKPSFNxy#R4C9L~N)Q^wn*BfX~ zermr+&+l58?SD&GcWA?|FMw~@KGiX7e}4UInm6C${EycyZvTbzTMW>{x29{dowDbJ z`6JUm{{VatuTUB=`UiML8Zi2^aQ>V>5^Z@t<4HRI+r3Fg@6*^n>_6{sbj6KcCB9j%okhOQSnTaebol=F&^mvHzmuNFpW~gB zbbU>G0C^ltI*d2x>{W79=KP8E<{hy&kXN(6!2SdHB}YAWAC?0Kz5ID2>R^ z-ZH`4iF?^cy}X;rsFE^7!sH-zMt^=M8Vq<+GgQf7<^>>GJ`}O?rb~amY`h zr{vd~2|oc3_d6Kxc+@xaJLkLy!1%_KUM#zkCJb*vnBp_M&AefWvy@YE4jPv*UO~;i?qHaH+x$`uNd}% z_Tg^7B>p)D9^NK-BDWuhero@w{oz~NBA?q_g}=mj_WPaG`hIc0Ms%^hcl=YB?LmIw z?`-%7d<@JE-1;8*KheJ5k6)6nlJx#mzQ0KR`h6NC%m2YaXFG-e;{J`|5#T?!Ke^fQ z^cMU(R3QA!cy4z{p8)^BKE9@Wk{+_YJ4ZfSN1L0Vq?^xhKP&k$&3wRoo01>X&JPLl zWnxCe5BS0UK%O-pyUDkiAiuHhceEVqS@BQs>)CFZ3nCN$7wg?hnd3G77l7Bw{NDRt zG{rx`tdEgzvmvlqKLf4{Y}Utss{)(#E9UEs4L3fF|L`YQqW{PV`0QVZJ#QVYue)-{ z`{7^yCi?rJy!13HO9!7{Fk}32faIs#zXb8WPPu={s(%ORBk|2lelDy!bbr&$=m|6F zb076r>$-v={2cIF)%iyvzS(^IS$m)FzvV0m4t>C-J{FWBR{%f8LFF!@>0D)#_T_%p;#-O!^6oeD%ereG~6(5JlME6`%HW zJ>&d$MgPrumHxZyVh@}?O}A)2Oga5`6R{^!4*orSx3!Iga7yx1Rg?~K^l z^?W=@QtP){(_LRK{&1qnfj?Zi8RbsYA6fku;baoRIA1O}82)f&-SD;&nSbuVpH}wo z4&E{PigJu6iJL91@5Ek-KU`-;_RrK$&R_A)Ht3_}sQ<89Zi>IE@pn%7Gm1Z4duKQ0 z4>w0o?7tj6(LViIWqu@m&+&(&{5k(q^dGlddjD+s)80}4KFgnt2ZAkFD)vj{AN<+6 z&|fj|2WR_}y*d6QbNms@8t|vtNb{%FNb?8B&4rLZO;3M}{~r7kX z&>Qp{Og4BTKlAoV;){$NJlWiYL38EtUM?xO-X{?HztA%}>0Crak;j+T(uj z-CQ31-DqCO-++H;FMp)e5{2aX0#+A-KKqCLYE^2bG(YR5G(Yh{A>`+Z%A?aSJ%yho zmA_Z;Go^2c&(8I@0N7w{*=4}71B?=Jk2 z%;JH*6$@$pR0?VSz~812@~5cyI(}3XUkCSfe2zbbqR1opA^KLVr1?|n>-jCpfiL`l z3!%UH!)m`M{=kkuE0s>l9>ScHJeGdPeJ}6ApB3F7U%;P5|KL2&YiCBkaNgUZJ-0CQ zac74IPG0|Vv?l>v=<|Zvmls#KA+a#eW4Ye(`6^m1!u~vTM;;v6^(68o%X-(ZXX~4k zAMdYQ+@yz|XdUP`m~L?S6xfG+OX%&+XX`sM|2w$Y77stdFS(!0Z?L|}-+}dUeS`IP zsMT^lT-;!N!5{R6ziyuX(boT&`8Eg}{CJ*QBrNB@sE_ldoUYfUJ;1rTu1_tT_xBxw z&*lEX#P<(1-}?przn@~rm;cPo7%K{oFaKhphV|d|@Bh?a#^+xT*Z)4>y3fC^`PhBD zW8L4|eC(-w>yTf&-|+E}As3kwozF3!EaqP~_m7ZaAK?AqCf2*l_k$vDT%Z4gzaQ+n`%myQb)NSt+TWz& z?n`w)Sxvxvd%ts7ev86@pIvW2p1{A#dFu_FC-VMC#E0{%toM+w+U}_KWW|vn)h6-3erav{Q4sf-#=kKxfk#1`!3i1Ze^Fvvwigc zuv*y---F3}HM{=OxFKXv;%!M7?2w|#rvs@t#qNoL`^c5y#m_4{qyFE8?A zNBylxMdgRtKdV2>B0r$d;78*;e#m%&_4&E)|L{DiVCX6Q3HDFQAM^fA=!f^X4Sla^ z*2Ap|zPJ2^JxtLL{iogs)|>9V*j?B& z6u*iexUcu;DW|*go^J+HTYHD~NH(>EG`` zAMsw#zo$k0825XEF!fvc(NcPVA1}#|cBdb##~)T#nmxisf3bhA%KDw>8|UcDV9NVs zyI$q{k}TXi{CXMrf>tqq&&>K6_i3%r{~?}V?;^jI_-okrd1$Y~^`~EdcSWD=dX?+X z6(HdKDP;ZcV60!IJ%2u&t#YXmtjERw#;?coRjxPWR0H%u|9s!++GD=F^!hum@87xp z2ETy+2J4&q9IU^)vi|nx*K4vq7ks3CRy^WNJZ#o4b3NVF^*!Kn8RL)E%=#Yq6a*j2 zGx!Pm7SrqR;C-I9AF+P;J}=#V9w%hjzZm@CA0qWFy_sJR{KU`p7st9DQ~00aJW7AF zZ1nG-PtSf6{l=b$pM3Lp8Q(O%#%1+?-|O$xd#2_4yXE`4<@-Arj%e)c_4554%ro!b z-FpX+hcND6Xx|?%-`@#6oIg+CZwvY?-`_3Y-?{ZI);sg>7Z={YE#KeaxO@5j4mL&X z>+=2G^8KC8xA1@9{&@NRZu$O>8(MQ8dER{T0n3g3SiZkozP}T3Fz4;?r{xd+oAy1U zfvx{(%;5x9TzJkj`fAJH9}uzUZ->r=qX_jh`qwtRo5>#rL! z-^R=Lcgy#8xZhvCzr*?y{x-?-{oV5Yo!y_FK7Wn(cN+iT-1m3b&!peq5f1!q@jmaB z{}TpfLjTxTz2Cc!@;&<7j-)>Q#ZvG05MSn;_j`NrhYjEF5f0w(C4<1< z_8#3ckuUJzTYD`NEFPUt;+KK9w?#nJ>&!N zv6k=mK#=A8J;dAiPr!fpGXLVs$}jLgo`J=e{(d+4|7zm@@8ADr1orv=^gW=@|EK#8 zegDV!AVsM!@BQ|;Khbz`Hs9aXr}`dH;)Oio{-S2ye`9!(|1Z{fs3&)Dp2Yn_wnl$+ zA1@@Q@4cm*@j~)7dFv=J@OXlZ8zVwW&|B;V~ z`-iAdjH4R`mIH=AJzfY!GZQbaf_O809{^_{S#RULYn2i8W;VVT;(_UVLFfNoV*Eb* z?E1Xw^Mk?v4Dn7h-bBRl5;pk>9UK?j`wNu+r|v(0nS5VX{`;2yTT^}hBJXv?|2`(` zg8VnZ{(O(}uKMBg-_*VQV?Vo?|0arlnNS|opKLMzjYJF32SecZe7=`>Qc(oC9zc0& ze=hoy(R?!^|J0v)PTrpgd>`$}YSXoUZ(`!}FrFOV>))H``)kRUgZ(t(ErI~!vB)pv zbuf*fJ_bhrseCvnN4yw)Z_E27tXFWK0Kp%J`Eh_w08cdhNM!Qk#If2RS?_TCrMY;E z&_?IKiSonpFX!^<;PfGgpNIEG=-; zkNI)H?^M2+m&nJ5`s_gR>)`!XDqj)4XF2Vc#3u#5MH>&v>CVpRDCC@|)3o+V*+gqp*3; zG?>cst+Dt2U&Z*s`fY_>{SL?P>M!QsY4QCv`n#j{_6Ye5Q}8;U%Le5d|J~P@@XD?n z@~!l-@$!$TFX8tZFW!GI;rqhxb-X_R4*JLVc)vz{IzlKjf!%!^nrFH5|JU(5|9=yI z)cL=+y!>c;40nO=p`Vne2R-fI)JK0fo?yAg{~sJ|C_c8mFVpV>2m4!!pDq7f;{!PU zUIDsn&z8TKXW+VaytzwrIx zvn^e382LZ^=pa&y2z&gun17F$Z*T1K>o8y0$86u&>qO+mmnVqt=ISebRlZ#Q9m-n} zKXNgD3(8M|{4K11@|ox#>}L|UO#TRSzjmDF=Y0G&@K5F0<=>(FhWM`Hxp)YReRtzc zjwr8TIl~`%nDqD}pa=R>eewBs_*^i|zr*M0Vg4P}55^A#KZcJff8&v4$pMV^&_C-t z`A7eRHDATy520@I1Hb;g)W`QHLH-@;b3ngGJyCEh`9$C2(?EW~uX*}$d{1oq#4ktr zACK16zF@uy`iH#e_}Rbp-=jPP{l^<(P+b4VfquMB|6S_Wcx2vpMu&^}cTgVoPkx8} zZ;1i({RjH@I{javy&I2K?fAd7Fn*MW{j+}9zhRf9|8bz-uhaiN?ZbE!+x{2x?VvpD zpYj~`-xP!F#~;{}*XjR&_GUb4+y1|^Fn*MW{ZoI!{?}vz@cjq&>UH{m!1>~J;^SKR zqrQgyucYJ89tZaDoc<4=rStu`^^42T^NWv2;Nfj)eAJ|uC*Z+;D@w&jU5xJv{37_* z0zbUpM?6@CIX_7I^ZhloKduj0e!IndB!6t@@A$=~_80Q+>5;ViM*q>C+Be82%VFOD zC-H{SPgf54VY%A7WY5|!R}TKNT<7Pcy&@N65$PlCMO}$!W8uAKRaRgICVyrb(opvw z!(N})5u-loNqbk4uSXjO2K_l<;HT{mul2}|QO}m&T`~HF@|PIT+nYw-wSIE5sh?YX z9_{#ey2SVH8Zr>$XUpH-lKi?B2EV1h$oBVWTk}<$`op(0-_DA~|9dM8P&e8%@Zd_X z7s|)*2Tg`c9POC$bMo^*UMVk-SIWy3nLd2JkB}df55T&@#drx{lm5KCd~*4G^8q@v z{1|j4o-o#j8R5q{`FS|s0zc>v@#~6sdHh0oBR+sB@J^q4bMcBl?ECf8Irf0^lEN=n z-3aMQKf25wP#y#P zM7#t10(lqwWW68YC+&Ow9$!A6rRaCCn$9<~7|%IQ<0tAB;J51aP4HWdoqiH7t*)l& zH=q9oawR|I*8+bZtfliCE#~KeeG2I(>K@>?;`LwfTWO1aTYFlP_5=D|J|BK;@rAvJ zCmWn_!u5VSU(^rHe$&e%*N*|e#ry>L#@-$YzOif|y*#eOX?nhT{^b5v80S;N$##eQk@zK&-)GgtSH>UF{t)@?Nm20%f&cVyvel-6)%Yj(0S{J*pu|U! zd_K_*;*ACQdm4;C=JNL-pU>6+aM$JcS=%ApbNPMxRkc?-Uc|4`c+ng`;uVz;|5?h3 z|C=075+(Sj(Bl!~V^_`hcVIse?*{QwrCjE(>pTz10Nznv7fEyBT%H)5?_}H);58w{veP-ng zua&_%XhW`1d$`njOOCd>s9))d_ogtTYIl@niq}YmMh>^popL#)k>=`)IuP zt6$4}%5t8F`uslW{6O!&hj3f3%n`?fy)y?*54)k=Qzy`$zD%jJ8??^CORzM9Vm z_Kop$AfF1u9$`M^^}*Qr@1T8!F~3TFA4((EgH%G=n-VcKFxopOzmJwX|9`2k=MA*4 zhJ!TwO&v*j;_zMS|!=5iJ7+49rie4gb8Tr6n^r&ay~!Mo|gyoznC8=O@FTM zPS75@)brj+zz@Oi4bKm*e~!9uz}(pLK=K1H=?QwPJtDe#K8O9s@SqJiIG zf^WA62~l~Xd@SY%+LQV8!u&vK`GNd;{Y87EpBfy<)2~FHz5JkD=}CD3zg1q0KI(Xr zn?t`pKs?F8{%)v0x%fp|Ka-n#VgRD}j@Xak?L7`}H01sr?GN_%_EP)N#q%Gs6YGCV zZ|UBz|3iGx9`Pf;-TMFb-Y~WP|0eu6ZMH}+tYq+0dQqN2{v>yI)BEAOdtwmXc;C|T z?%MN=gI&lA=xgQs^EAB%2XpjT++X7RkRH-~NRKo=H;3Kdk{-JpueJZQcj5S3M}q-4 z>E<)|Lx}pB`2hKWGHsUI`5~Fq+FTGBnDZUB51ikS?+WsV{E{3W_FVW1Fx!*0n?1jp zG`noitfvt#vD0OHX8jC!y-Ng4K1#qV(w5|p>(JrgMofR5yYx5G^A7K?r1Q1) zSBl@kZp5yqpzqvY#Aq(uf3SVI&o|{+fu9(8TCK}|{si+QvnTx1`&{3#_%mQ7 z^#{`qLo4)oCVync^zhC>bOi+XB(>c{(k4s7{@y#)TrQML~N{oPUeALS^? z0i5S{Ie-h?5Ct&P5qKE$6WBS!{AAA0k7UCX)R%Co0bHRua4^1yx#9P^J_U^JD`Cuc zFJ=D2^hy}>4d!cN%2*7`*>#@gK#mCz7Q}wU2`iAe#`&;by$e#!4_kH(to+PVMAR@drOu zzTN#j_lF_A+&_je)7^(Kr|%G^J@GKtBVXA5KtJ@S`s45KRexHLPnBoYpO%wfPN!He zL%#9-@Gdp$|$=)t`&+@A3UfaDUJC0{T75QQ+}2^g9jY8ST&S4>-Oj zQeWj`n4^6dk2VZ`=iJ|;oc-JT3f2$$kMmS0-~Vx-FR#;om-c8pGVz+vyT3#eZQdpnbbG*KcGGJ`@`Wr z?d^C}xBOVV&qsNP59^2c%;kGM4(!bZ_&f~oIVXM}@C*ARzF~i58Zi8<{s#8zoc@Ln zI9~+*0)LvmPlvvtJnWD4!~U!nx9xA<9-gG^9p`shpSb(^f0Zgnf3Qb0osYo()P6+u zANs~}r{9o&!q9(=58p!`2}3{Rem*;QKcDN1eTy+a!`=;VGo5xc%DVkoHhanbjP^Fz zpUvOT&-a(r{G^ZsK43hMZ%>b8FK_&-bbWzx$Q#RHzd)a)U62Gz zX8pnQ5XcA1?fraC^DS}wN>A{U%Tpak1yet{S(FQK+y3B6E*H`}xm)1`fuG=)8YxUVm3K2cl#cE8K>{m9~XyD04!ZTpY5+LE8pmcJ$GQ%4n3KKx#p z{MY;Xmyn+yROJ3%;s*eqbMEIMuapLNeFHgh|Fb5?3A>7NiQuuv%`FxYYFIOm} z@LRl}hdvWOJKu?Tdi>%IFR#Q0FbCxDD;0SE@Ad8Y^7$x*-+r-}!f)}s2KpJ|C*mF8 z7s$KdS5$em{4GiQ0e+XyS08hDmcI|m&;$ELyuYdFcwj_fKal@oF8^Gs zpzAqXzE;xpjl!j;U44&3rQ9?3>s-u}Y3T2xqx^^j&aEL$I_^h!pDyzs>&y8}U-#dH zv7hJp5%Vw0`H|eOZ|VNuw2%9#HYZFuf5LwM;d{9r5?sDnFYeTsZ+JNA_c>q4dDhSF zqrC9{u=h40l4aSM;In7{|K?`qf9CGy&&)hyQWA7j1QC!8O}R%9A{WOV zfcy2GZItht@6SFNjtJwulhlVjp~$Y~d?NVSlYAip%Y5v6Y*hIU?J;}azVsgLOLg-- z_~kk8*SEHue1(4i@y!FKX&?9NC2m+2roDpuhr*Qi^GcEQk}Q>SU#|JqB!8g5?KaCz z{%+i#7kS|jv{xFy;9_Ci&+qO69}0tBGvxc>_x?V-B=56~eGvE6wOy9`_gf}kiqTIo ze$8j;V6%P+%lL~$3M@bVQi-r1f4N53 zeq43nKc1IQFjSyK^tn$zDQ&v?a^GFX_vXI)P2P_;P;T$PJGjrTRR0KHljoynyx*>t z2#XEuXHR>{$CZlq$oP5>P+ltkv915STSz{x+I*nq{=3{@xqOd&-jg}<*-<{ky8eJ~ z2BZt`zsde5!TmS81C0Gk?!Sp$=v&qvS!2rv27j-)|Hk(yclXnqg!R0R`tg&jzZ9C2 zpUM~F^BoJb{nh(#vVYM27U7`%tiO|K-pa29=)>Z#Q1p>Z~8n1Njd4_9Dy|#q!L3=Ysxn;rP<`+4A1a%1gBO1n|w0a%-&Z+43*Pz8v_Qyq~e}JrDE|mfxfLivH~P;d>w0 zZy}#K!KL8uXI;qS5hvte$lERAL+(p39?0iS;>+ZlU31@!?kxS$|H^&$3jTLJ{!w4- zg&+lnVD2l6DSD*v$9$g`d+6{)?Ss#tpIy{2Tmvh;02ANN-$3a}dzjLL+cE54I3JRq zLVS@Q-XH7t7JttJ`~!cphedpUa$Ga~a{7n5Tg&+WR_KlLOW3Q>KF=3YKd>LXo3S_k zG@y1kUvWrJ(tAECI{E>I{RZv$rL^wEDY)pC`BWMk#%L_578fb)07`{YDCNyqIrt zb1!56*&A^Ixbw_gd+GD_mudNvr7!XQg?tZwT+4rZd!X;q(tmPqG)|3w^*o08hvOIY z`t#V$k;k{pXCxO)e*78#_%E~m@iJKaN7oP5$0*C6#E0mUhCeCaEPwVc4i13H;J&*g z1~bDSxbH3(^?aiIkRZPy5A3F#`y1S8xu5BM_h%PngfD==`Rf;5;%_#u)>v-zPu_3W z*q*=NuGiR}$v=+z6$%`yzv4c-A+XW60S^Th{Tchq=?}gW{a0?#FaCp^w?4Zl$$N9# z|LLFCQsuRCq?1Sc{{FgT^2g(Ea(_Kk-I8MNInNhG6aS3!zYe%m^7qpZ0GIuF^~X;s z4~iWj2CyLKg;I&I%?Anp*D_(r|49BveZA3^`hZI%y$`o=+2?2FdYeZP8Oq>4W_$+X z*U}@I+x&(c??3(c3{OPSmkEad(Ip!GW!=C3QSy(g{mbVc*Zs`r8`t+Ca=#D!+xQ|j z-}tq2wHHW!ah#8Ayavfn&hybDaDU+v{F%u4J{3O^^sgbF3D<|`Ujq1%Uxj~z_=wXn z_@!Fqd0+X1{3|Cz1CxJao^P;4l3$pAJj_Q9{bOD($@>8*C#>gHsSkN__PE1*n(FWH zHRG2widcX*T3z*jSQEd5@BgEY?$;IUmu>QUS7FG9b~^?v?<+CiRr^V;Df2h_HQp=u z{KjT|@ciu8!$0l#CBJF@&*P8e@kh8G#UBalD~~^t#~%SZSbG7^qhdO9FwP&?iEy5< z_l-`Mx z3m~uZ_#=7zk(coc<;`azpCHe{FM0ftJpKsSA@FC)E&~53y3dCGQF;6kV#vog$>Wb4 z5+S^Q%HxkjaUOq!H`HpMgMU#Te@(+wxQaj)F z@na;O%H}p-Nceav!)29PVI*^Y|rVFVEwb zP@3T9?t^IwgMMz~>*VoE)L%G{U&8egzs7s<4=g{S{w$AQ5}gJ3$m5p;@f5)y-v7<* zPkfG{`xES~D9_`U2>(F8)Awso_lTc7ehKAKlE*I*?g;j8J~zYtZyvvd@g!FKBl7qq zdHfQt5BnZ6k6)6OTNx$@K4X|hpNYmJviVG|oooDJ zjfX=2fmr+pB>u^i@lZ5AujE@1{|Ak?CHYnW*UrI5{PEWAea0`@FG&75i9d23@kd4l z@pl*a0ppQq{2PIvGF}PX8}JkVfo`FSc%kxrZzAzi;U9r?ai*N{)et{N;)$?6{5h-V zCZ8+oV?2|E#H;#B{5=sL1^Gl6PU%=-@cZI`_DNHI9py#BS}yTXYRo@+W2=iOS$7#9 zCGCH4_89Rc5ih0U;-&O=SReUFQNI9xjm8)OnHW#Sz?~h!Jp)U;DnXgPml}cJPJV(B z@CSbpflGqZj-@{Dc1r_h|?xCd1)-K=jvZ;18((;%|^25b*uDZsOM@(ti?9 zwp_fHA9driW^Hm*j&8gtFn$bjdo}7wR5ss3zT95Se1Cvr#M@`YO&f3Xe!OMk<=XbA z+fp9gk@1{0>(d$Ak8S%&5>>+S+4mFn`{WS+!Vlj|e|7a=;rwJgL4HR112?ylZMI+1 z_Twj8@a@8n^%1W=9dGO~BE6R1umXNUpY6WJmjV{iKI!{?{8Q-hbJ8;zy7-bxzs)3| zUySU_47F4ss z-%X9B%J@YXUlMOKp$~q*pU`)6I|=A}dolIx3x3n%?==u!*V143r?2yK?H?L^N&CQm z|8Sb3FW!GwLi*bHUWw+jd=lUX>oti7Lx~UX_+>bd@j7}AHGkDh=y{T&=b$0+lej7j zJwHp+v*qZm^lW2fFIkUc25h{6c+8Y1Yv^_0@hS9bX4WHylcLwa#6Lr?La&zQvv>4r zvrCtO1=qn+96O-TBwzr+ALmKFV`QKgy{3P^lAl4>+#3$}Y@5f{1 zS4ZE$Sk{kSfAe&F%Clxu>0{ROS+li0POZPDUw>#ne$rA0xhv?0c+edd8TlS(=N@V9?eRkT8 z-a@Z#J54`?gRdC+3H~>bdmBIEU*L|A#=||uAIs5y61O*8Jiuq&IEgnp+yG_0<9n

B*!}WjG>~09XNWXj7@0z{Aeu{n_UYI-nh@W&BU^akz5%8z*8u>a|F#c?? zU(Yu;=`iN$*Y9Khu`uGpb~-K|`13B}g%^j|KWsVT`)zKrL5ZiR@xc3pjsA~#;I%H+ zyRDD*>P!!1VSL}&BYsUhLx~s2@3)M6Ks+?Ym&6(pd4YIntv&9KlFuCYo-8!JrXPQ2 zQ|M*lpQ3-g4|8x!;|mgg2LF|LXT1<<~k{F+Pwe&&7lQUv@` z^vP5_Zj|3%Z2A5HmoLOSII{7@Yv^C%7rOI8T#m_Zmv_)U&kLn;$-xhhPnrOJ!-VT0 zW`1jMqQcn!p}zw-IT3y(KQGAG21Ysb)$ zJdY$kX>M@k?DM8*HigISC_>H;<0Y<1Bus$ z@m6tulOOXrf<7$ge1rS24bDgI!=%2!C+mZLlZBidkEK84b3p#>m=3tM|H(qqSD5na zD38K?3&@}NzsOMzpGbUG*FW<^3H{NzY`9AQ+lz9F-jjuNXXlIal{6o~CO;sli1siZ zFxXPZBm9!aPdxA80Au=;`dxhJ>ZARR!YV%jOM1@}AOD`xF04=O4zQ~)(KcQE)%=lV z&KBdlqxI`A$e~tbLJO4H`N7(vbXYxHhPv;9ld)yvTAM1zsf5^m3fKB{y zl!F~vuJf7P%j9ePX%L^A-`}FT<-d==PUjDp9Bt`%P5bee8Tt?T`XKoJ^B^Ar@DYD6 z$iKk;N<$sLZ4Uza1zHduX8FtVXQKQR=Q$D(ead=R%@^4Pf1J2{koP}|qwm}L_&yPS z@#V=y2mE4SrI+%T%NI$0J2~zue?s0+Q1Yk!1K%jW4C1Y^{^eJ?-l@O6D&HWb4S%jZ zdb~n^zz4>!>&54br1}kiZ}6k?Gtmt= z`cwRIejmsmbbhlE<-y6Z(ZANw59RD1>xunK{hYzZhTGAJlB#iZe^C9vHQo!PONA$DgX0a<8LaZ?IMN%C) zu2C$|9(Sw!s(g`bF33+H&3k{mmHuklpY8vxaysAIm+Ad7D<3c)@VCcDGPUvr<5ziz z;jkR?3vkk}bA!Oom4iQ6ZvCx`YX9c^^?T5hqi>`(bMiMwsF|JnLGvH|&gk2i~EeQNuYqjY||lZvfaXf%I0k@uZ>V;H(yzd42*z1h1 z)a$15X{_dp#CqoV?0zTg>Bkp$Jil^$fF1u6uJt;p@x8`=l)~Swt*zAfR`Zom-w4Jh z>>Z4+;Q3v~S7@d1SKCVC5BCR7zq|r}-$#dzz7K~R;rJ%^HaA0fO}1@dPy-|30GCDsKl zOa2tm&jqe;>iw3M^Zr!6rxe3^{|C%hX6-GY$7;UFNbRevFYn1=-p4=X&<=YI^0gHx zPsN{D@$rst#MZ&c2;J#DxO(K8fHeuX%qJDw+D675M;$z4J+4up5y}fq6lbMgb z??DyJ2lvC~`#Zqrb-;D=J>dsvzg8pzn)5vDzqsD;Fv=?hv4|M`XfkiaWDsi~fPGZ# zonk-5{qhXXTKx+rE5Rx%KqcvLW`ub{Y~aDzy~nw zwPk+ak^b0!SIF6Qa6$6(7<)3x%VICH`5z2Uvh9k?N<&iYyMILgI^@S zrOl5CdwG}q>+R*k0%4yIr&uOz{MDf!{*~SjoAV^?MVx@xv#`Dh)1DXFuZU0Y&;6s+ z_n&?OdzsBg34TrGqkL~&K1!q?vGxCB2l3Hx+ivquN`A^F%56T%F7iv3%YTIaIi~2@ zA>dY69kUk5h`}X6R zET|tE_!;!$d0+hph6(6#haC zNe}SjWIiZ}d^Yq+F6vZYk7a+>_nv@(50_tZhuw$efLZSBjSvrnaUQ{Z!0%|Du=UT* z#^rH9hb<(CZbMSgf+(BttJ z;urXvJ*?sTljDII-(SUoXB#ren4Phqx)`6Az?p&!O$ zV0IwmCyacTsq@Aw@-vD*iF^-yujRkdDCED%y}?0h{HysTiOyjBf?j_kdLp)pTx&i{MozM+XE)ueun#4$p>lrV}40(A8md~y|C~wrMAbJ0buJ}h9{TTTr zI|3X18E{)*qaOoq3T*r{uwU=)y7^&!p8nt{m2a|lvAyleLGRUgW?fMo ze_XPo(O2oOyDjp-%G1eWpYp?$(_c3cc_QVY-`V5d0TOiI=&Qdj;MO+b4&kp^UOGqs z*IapJn{dOyXeo%V#`XSzAVAkU{XH+g;RNu*dW3&&PtI?Y=K_!Whf)Yw{m1B!w-kLG z{3HIo>F+gvBfm%aWIpl!H3AP(`4K1cY0b4S^; z)I1W{=x1}B>;+bPdkp_xlG>gRVrbhkKb}9(->QEx@Quy!YX8&e3pVHD4<}y#LVLFt zhrV3=-()3g|G+2whr3bfQvHwBUlC3uAq;zc#lhmAOnuG!yYcjuIEB>j*zd7ZG$V(GK{P(}-qiasj*5Blt1>7N+*f3yAh zG((>xLm!c>0e!liG<|xVG<{%hu7vcl@-i{>u<;KqJn;UDN}sNihw7hb{4*_m1_N&R zo<6;9=zo6J9B(*(Vxdnb;}4v{=aT@RLZ6QM_q>EYmj(8(@?kRH-3|3Wyf^Uj3G;#f z=^g~y(yrHd+KRaUx%D=Md#8m_|MYj#^o0MAmybN}JucB6cB6YGeFOTTz4C=(OBN*U z3(ziP_?(}tw^l7pPq-td)*C*kg!J4{e&l)rzV64yKi9!Ewdc6?wjuT(p_kBe(31Ib zeaU(o)i@MS&(=l~_%EL|+v;EH{7q{k@o#qi-5Gp73GgZOtm*oH2|ZKmZ4U9m;jlix zpO5q{j)OPU|C%uA^)KIHZT#%|D{YfL+gH#hMGt&`dol3qP3U9t{jzjdXsFK9#EQBk3XZsW#H|X^iy!0?IL7`2SWy|MIius4D)y zjy@ZWK}sG%oMS2upHD`f--SLK{(Z(8dARBiT&BHsY1R|$y*NC_e&tJf;E4 zu0Pm=N4#M6aCL(R5)04f{WA5HzV5HJ8QLdo{e@wF75&!ht7Ca^Wc5qvpL^sV@1I-U zqy5>~Q=#wdQvEr${lQ+?6MVb#TYJn4GiNWGSNqO>{fy_y-Y)C+bbQc{$GfaQc5r`} z^=16PFUCuM?xm@Z`IPt`VQK0EuJ#Gr`misR!}|%o57Zl7NDa^*iyuvoiF%4F#CRRxX=1}9$|TFkM9ZP z{BoCYZ=bN9Zwg{B`~oP(k`;S|oG*St7(eV6zlQgNd(eOL_k+T3*#DpWqrV>%k5;=M zQ2#%(^!+*MnKUx*2b&3qZ|{3f>u(F`g&%O=a`qVf1o}1B=^wCn@;*tLclyP)e;)?^ zxgRIGA5%ZTd6w@1@h|+ae(=5~^&U*#Pn!3E<~>)hap>QdVgHH4_fLTD$A|j9%e8;l zIJE7f|FdS}FnkXt@73&iQ1qqXe2MjTALmuMuXXJ;=>VwXmG*{BcYgc*!ixQN^?cs+ z=kMo-vfk447*yi#3H0@~@Bi>Tsbcgyp-*t0Qu=gA zN}-Ru*X-zfO`{+88u-3!zK6Xo`+hTpU!hNMUKTjOFYrzOu<(4GqA%u?#Lbq&KjmSw z-F5H9D9=#*GJ4?t-rHB6AIf{arCkqbANmO$ww89i@cb2?ukfC)pP`Sp$AJG==>z!+ z`t*f9g*ERpM=yw7|6V$~eggc8y#ntoD^4HR_m-A_1)n>59wEO9JWzYRg9CgEe+2IZ z!H>Ydf4_(I2>XS9PmB67?)zlG=6nKr^c+2&kskfQDA3Q(nj766!e;&AJzrDwdD=VH zte44x_sdp)<@=K2Fw~#n53?EQ~-0wgiABsNi?blnoY+uGB^@~+17(3cN>x(`;ba1sw{d|yEmcNu?Myw6+puMPZZpR*it%=**9evtB~ z75~}5pZ1r0zgLQ!Kka+}0`o&U23Y_&Z$EzkIP|A|3V5&N-}haoKW)kR)1trB`#t#Y zrrz&i{OR|5^oM=f`@IJ!pVIwB?}OA)a4>|9`-)Qyy-Vqc!7as)g_fQ}3KgRpLb@@R8|4Q(04E_oI$@2Gm z`TITk)A{$7`TISQug2dpf4`T%--Bci-pl6i_ka!S|AqZ7f4`T%-{S(o5AWr^ZGMec z)xX03cPah7;y=Hh{CjQDm;C#?hQL1mp6d5L|DNX8)Ax3a*MYYN7^l3ioASJ%=L?C~ zf%D3>bB)LC^X~=mMkN1Uqb>Jqm(=&E&o>bu4?lq)0Isy+8?7qd2Yk)^c$H!W`S75hBmW!AOWhvcyKS&s;HZlSs5fE* zXW!f4y=ig?c#rbYucBWu0NjiJQ2fVO53gD;zwYwi^!mHK(Bq1{ z*OB};pcanEKL1T{UY~M3Y5W_X|3=sE)%kCt==%vNsQwMv{5RQr^x1qb@m%GBJOTp0 zq|WQIeoeeiS$|x=?UL}jzz-15K~|?eNW%B`=O$hc;{!^5oTBn0VVtiKe-Q{cIkD?u zvS6`+@%`;ZQI2+5@4#*@VPANDdZ?n#&*2g3>AxrQT&Onw~3j|tC<ox9dXKUGQvF?MG#4B1k@2X20F6hA`Bl%& z`zq$k0sUjddvM=FzOg<#lz63*FDM<4nB`pW62BDqtJ-);Y!C7|o^SZ}P>%SL6lcdW zUdhMPEI^(v?f8+uhQl-E*CD@R#=myur=R|r#^*8iOE*4~j|cgV*xXm9J=j+$u9w=L z(6{#j?Ynq9lSPZo2leT%9B|L^g=HRf|i<@ZW{oNYdj4azqet{{M6>I`7;v*O3b%fI0I z2prYevkD?etNj0{hRtIXE+R$tN-ET-mZ?vw)fNYd%(%9eI1`I|61b#IQkx8wrtOq zf0?2Gu&d*@<(~)Z59=rXUa+1x-qJwFZ|gq^;?=SI{l1RhmOszPhmR7CH)Ht!^iS_a zDghyX@!lEnFu@-u&i|eH$-d7i82P;FA5VTU^_5=AUoQU+`7MYaxtf0m<(EPJ7S_Mi z^+f-!xJQEGcSHC!UWr#v>B66mU->D5|Gn`aMLbzIKHw+0*aH9h`6a@yAxwS^VdPH@ zVB+H&+aKCPf4W}0|G%ym@Bgps#pmDA_2KgGkpFys73EKkAMqp=pSoVI&cB22FM&VI zm+?2>=lg{KfA8&>c<}~*&jbEN``P$+oZkn*{AMNc|H<*T%A^Y5TM?4R@w``;G<=KBxU-|O^$M0s~|Ebm7_?=|^% zP#*Tr`eFZqO#H|4Z?L{!r~g}&4=2a5o&Re5Rg{PQQ~rkicSRuk{sVdPI{n|Hyg51E zu>G&*-$8lUKi5y#|3EeX-+v&lUZ?+u+%Ha!&3oQxO?+IGhy8QChW+>XhXDV9JY3U1 z#=)}i&mF9!^sM-Fy z+8^};%HOvnAIW#^{+-Gnw+4T}2l(IPBbi$HgZ303;7^uAz5!19BO@PNIrxX=CO=Oc znf2?+K~9z%`9AG7MZ+R~q`iS$m_-(jcY$C03{3hg3;cu9XZGd}(R;jO^3}|CM)3ED zjzr2ZgM>{(qJ(73#4gX6y^7BMnj-DLvdk2O;^?T&! zi8%l}-sIiA4T(5s#-F0Ud_UHF-M0Mg0Tp%^A9(f$lAiZ?pW}T7{`nJz*NBcKJ}|yt zlb;9tN`A5Vd1U+W{1VcW`~g_-Z~5g-nZM_k&tJXYe1;A!JtjkmC+zu|(E#P=_IIow zjt}dH``u93$B!@MCyoy=7l2z&JDGUJ-y8Y!#TtGlzof=jYVL;gJzdSuLw>{f?0zWh z>&F+{{Rs5t_yD{3i-c><{Zu?+#JhaK;ge}zXMDG!SIGmib1*((?_hkJp5JABn_Ieo z%y?@N^XYi{K6&+imcrjXiMQXT{Ly=DopVEFn@0ahNJpYUQGW^Q(E(MC6UyS?VI6#Syg8UX| zpG0HC8w>LJbQpik3-jVzh#K)+q^2_2Q(fHAvKk!#WJZLHB z_`k#XBvFEYijf2M6WFWG_jiC_#JfTKR4JGJ>^kvZz<8Gi--!Px`E@Le@)qO6SQznV z#1d~|l(&bnLX3Whc!~wm+s8-TkoZMn-<5pv5dvDd{5;4P-x^S$JNw4G(3kzzw1<53 zn?irrJ}qKoAHfIWEe@o8lTQM0LHhUcCfjY+H}PXo-jR4yCjUFeKNx@?m!99)Q3$ui z0OZR%oe&?$FEpUPkIAQk_M?LM|M>B>3446>JA@5BV6Xl!seC=uH)-Dv^8IN1_iMkD z{etDZZ}9nk{v!4Lr=K9)md)RT^{|@nrx2ZRhSYx`4?q26tKjNOz8|o%&F>@me)=d6 z^Zm41e}u2ahAR1f23v%s$)7#QtT?2gH0o-9F3Zd&&3H zYC(RSL^1v%@<2p|!jMnUFL`}&;{18gzQWjV8SnK?sz1;Vt^BERqy|QNYx4bQxy$z> z^}n+8juHR%Rr~KdXhHDF_E+=$Fnnt8-k?u7X#eH%6!v8d2;bZFuqdc~$H2E2n=(AH zH>3ZQ{T1*G@pjpOozHB(Ck?3lqP;pveP7Pld&?K9{aX9)iaqn<(C6Dhf3Rml9EIcE z1G^g-@|gMN4~ec}BKl1H2>-VBX_WUal7zhQ;;&y+mHzvh&V?KT`33%u$Ui~;pG4sI z{!aKgzL@y&fWK4mxB=gvA4C&;Fa68@Q|L(iaJDb`f1>>Y^8ZNwwY%%`|BU_kIp6s( zmhVXq@aJT{(WHWH>6Kg@aQ{7){U7hKz`y4Fg7Xc=lUy88p9sq-UU0r~_D(cU80Q_- z$M;7Hmy&z z`;hsV6I=>^N&cS$PRPNC$Hn-UDt|G4;!aw!%OSFeB z)t);adwR(DW}4sB&?mVV!u{{W+BZoLz{D@`t@1_bMf)4h7qfdCfCKx(s{I4ym#}|n z{fX2M><@Q0j6QfnQbHfErY@+am?dJvwG|1lo$llEuZ5BUk-&+g68-pTQSkw0tlF{7OL06uv>yG8yA z@Nsj-Kj3HZ5srTiJ|^?o%<-3ucen=x*!>*)=buSyLVwt=As@!FKVF&tXO(|M;E;cQ zL4G#*aLxR(>l^q4x=0VVJ~=$Vpg;DXX2t_p`SFgicS-vJKGO8auGh)@KnN=Gp7VjcV|ykZIQFyY6f4W^XNV_A zdf|uuo_CS|r?A0tyFVoJ)&>a#K>VN`afvCV(}-!cx$LH=V8uI^8akmpbn8JQTpWsKz!x-HK=ZSHFus=^Qe!aVYN4}(i zFq9kLjy!mh@&52B^t){qe`VG`;7uO?6ox)kJ>ORP@%|q4tDbKvy$Ayzh*!$v9`qfI z;zti4ZwEu1@0Py3J(Aayqrdsa4i!i(zaE`_`X_R<6?tRiz0Akm-}AV*492(3&jXnD zTgYp}znTvrgbzS22jf1T>DU0ve!%$ULz45v>UX%02RXy~I|}Rmf%>w&YJIMcPya`J z&L89@#-CgiWk0a%Ik_mYeT?r8*E7n?G%o~jh4>F(32*9P>`x#k#$)Z{7vhfP%8`$f z^a|hx^mPy8dx#r;ulgxq97hRbe|skSC$?9@ZvR7QeZuZOp5bc&!#<1gGrzRJR-Qjk z=YK}~3~!72IKQ#}Lu5N9Z0e&N?rbbqd;jEKS;u4B`>FaL$$5|8-$MS5;QRRNApRfA zkE%Le`~8>dFX+A>GF_bSKmJ_f^GiAAAAc{%H_!G;nf#fEZ~g)Zb?v>+_;Epd&(r=& zrytc)_wjsh(~}JV{DJmQ|MXu)KnQ-8U#LEvSp8M>z3 zr}CS-pC|tX`rPXMJjySF`(4(*{7Tjj_w%?Y0jr!o_asfN85t{;Ctuj}XP`+16I=*!@5;B)pq*Q3wRiS~5;`1^TXKUd$+ zjSB;{4pz0KAary8-4Y3m3}B^|MotD z^@IN7G8dHZ|4Fc3UZ?*d-~!Umr~#0{~qP($+3w)9nEf0-Uj#OtM~ON5AnhJ;dnCnT%QE;=8Ey01>;$B z9>e&;{y5&Szd8jNeun=-`L(9M*+cFZfj`e5{$)x&VSS)H?2q-s{;XHH?Jp}2FH`MN ze-Zt}-M{}IsdDrOd9=j-pzAet9>MxWIo2D?-TnmrCyf0G^FulKkucVW+`pI1`4syv z<=f37%=P%i3eVos_Ze1`Fb=MiZ>IG<(vD`|h+|Jnar#Zv0N=*w5i zSMayTM=~w>{H=-)@EgmqpL4(HSHpa@#0U6;<>vlqsjHNI7Q4Uc=l__Fu!0l&-ms>-ic9@M0LkH06c-mkvT>6!I5d04N9s`iE-_iZejz{Jnj3?07*X(x_=q<-R--1=+Tb`*NfYw)Uy(sJ}m=fEWAG4>;Ik6))fBFxW%`|?zN z7L<=hJn-86X)@p5<^j;cu(xke0NDI2wBHf-`B>@|;LFuVe#I&ey#D*@i1mxQ|KYy5 zu}#?I^9Ee16E3;(N`-Kl@H674QI-A8oEMOfQ1gj8xUwnwIAQPu-e)z)@m1Geu|(M4 z=a(vk?S1;`N9p@$>Tl%7(Efz^rR-1a&);YNpPBn?q<69R*`QDAJ{$bU^YRIX3KWSx z_vt6FhuZpbe~t96_Wt@!-gnngZtt@@xPPux{|H~ps(!}%>gpz8D215+Q`}cqNDusY zU;O~(rScyGlKRiPh2-O^&G%{Uv&(gs%lF9F4f-*jh2CF7e_;Mc;6q`^1ISlS|0KB2 zW_N(Gf609|sRez@+9yT5FfiI%bDxdxQSR=unI3+r=XKO){OsvPz^EDzs5?3?PJYsUA3Mjs!m0OaSYP)) z-id@?h7e@7^&Qm(CaKN9+Sv-gnP&-_5ZY{<#0~SmV>M zf5eDn{c?=j$FiU4 z{Wf6W)7TGTf1>&vmizM>Pc!#Ha=(cFYbusR9A!VzYg99z+Q{@ubkjg@F(}%MNY`Uvw3Zs_>lV^j34h^VXpW0U#A~s@3+yN zr9b*#x!=yl#^@z=q99`XN&E8|0bj?dnwlKjE=ZXTw` z_a*#aI6brevg1SkGtV#NxB31d5ia}$dS&^A{KNy6&2PNsetVx1>4U58w?E#G((AYA z`2+o@_RF8kcr^dJ!H-{$pttT%W<3HQKLvliJ2vybW9%ab>GR&rnk@Jz*7Dg~hR6!4&)y-iMD<=aI?78u$bJY1TLH%Tw!h&3gx;59b5$JI|-Gz5(-l%qO{f zm>w_SLww-$@wdl^DSbS1|D2^S@%e>(5B>&z2v zsTB5q`scM&dF>qO;nBXF7v#RWWb(n|Z*pHfRNaqax;0(Dm><>?*B{rD(We+crNH&v(e(#;SdLl0OBm}lUw=S`{rwu&UmpJ? zkAEWUWb6;nSMvBLK!~-k6KXLv*xbIo@2g~E1i2acERorjEKLr*=+FPxE7~S~;`(hZc zg7;y0{F6NXiM1z+KVKgIMEso?e+lPhzQ;;%bu8zjU-14okAIT(=M&^8e&8Pjf7m?! z3E9P*r}OwH|J>rA;O-aHzuJF@=7Gz=e+Z}Hl>g9%e}5$YL&19-_#YvEtUOQh{z6-f z&xjxGzj!b7INA*3p&(w$rjM7x_$YfaK8eqQ_O2m*499EZqom@eOyRHj4~&;mLw`Ka zh(F-Z{(FpnXHP6n;=lA1{z}6=dBH%K@eLQ-EdI**!!1TERv7lWD&pCQf250_62w!1 zKOy3$?4tg$d=GrAiJub0S9wZ#9>h<9zvD~er(9<|7qq_&#+S!WQGA1b*e~<=DS7-9 z;fFkaij4+~?>v5rvHxb{->k%M$>XPBKg{E&SbZAyjf_7~9zO;0BzO;# z$4^&p8o)o!XQcK9k2!QGFM&ixM1k}Ea{3YyP{S9aj zSnRTY2O~bqRN^~{zXRj9RHrUJ%X7|mGQ~_xe3u7+r&AyAMYjG@@i4?~8PV4WmH~iEdEciii`?xCjiCT|9uF_@n zK@aBv)W5w*$Ac_iFnSbz3~<2sn}{}re`)*h&*(J}&I;eZ_#5OW{8_iuYal#LIfRi; zq2l1^er)68BpOdAw()T+jL#$ddVnz=1jTeP+DY{#T&YC-=I^h`bJ;GaMOyS0hl9tC$GWdHE;1BJ`PqvhQd4Gv`f9ZH< z$Y=YwKO(;&UoH6!aA`6{d>zE+)&3txlc~#Bg#I5R{%I;6PcK2d-Wv`1p5?6(;f{gH ze;)|KmH$S7PkurK;RpO9ADqt@Eb!5ACWZJ&`DODc;Fr_-Jt_74fboMLhQbfXCmHes z>?d(eB8vs|o@}P+JKc;bj=rEj@_T!J0KJc*340_xfzSK#O!FT(coI!*xzKxafbrsI z>3!JO`2ZIFJbJ-W8*d|tCqrqU^-*5Fkk+&U@Nw20POu;yz0aD1%_GYX!XN#^8Da9v z?-y3+EBO9~J>n<$-Rx}$P>}vepELYZ@G0>Frwz@=g+D6(n;6m3uGiE0+w!%?Kj;tq zBVIH3r;x^fcD{c_dNecZZ?l=E2hs^wLV6%yeTp8_4E_%_eC57(#Aqt_JrtfdF} zy@`ar`~knu5993mfPIz;^p;YN{wDK8@*4;8vzxq>9$0^%N9RgV=PVk^^>N@?Zqe<&mr)=*>U;c7|$;5wt)})gkGda81D}B z=xnCx(e65Wh`ekyM^~;7;CCVS&&Ns+;=A8xz+eL-KX!9VEN&8?i}9Za%%5h<0k>LE z4J?d!UZXzWFPnUO$cNnz^KGMkyOWL|zSU%Xldli;_x7f0_7*m>=`?`u!dA83{kjd^q0?COE@%<$I2AOZb!U zGsY_$?XmvIwZF5+`er=nZ)=bBW&Ge*jIU0A$C0*&@*T!Eu#Bg;67P`p{dj_Shcceuu6T!8 z{roqrf8SnI{CfY3Zt34Wju_gr@PbN`=)0QVMD%mfcQrqb=)a=xsy;074?kzV1KgG{ z`8A0Sc};ox*N6Bw^+mv`c;0|(=T=`s718Ho&9`t#{-J(L1rR@hA0WRp0sQ#B0{aE? zU4t_f=KGTc2YzxQ{7L>;Jo+H;ld^N`juU8 zDSa93!M=`gi_qbw{}1t5Dqn~wFnc9{iyE&-U>C5BvaF^9lI) zzrVGsHR zf4`&Sx8={%{C4_LFO`q-^iS_aWEjlZg$k?md=khP^q2L&MSIH6 zF8?I?HykhdH-yQrAxwS^Ve+eo>2IaSC*4obALbvy9~%7R_?0m7XMuhw=S~#*PZQmN z|1|mU1HY(G@rQE9|GHkT&OeFoFN1jdT+adi-rGy(e|#SB?;8BA_%D~p|0l=xz6 z$+3;seY%>D66N9e$nW9!S|YIh_yT!z#rV#G@vX^6iSdU0aXp3owPi!_{RQ&tb^2S) z=Ky?!{c*j9{dHu+@%;tza7}-wAN?pL51l^Z@>8bsS7NyhPdzry+; z?DA8BUkPLVNPfzek$2diDgSOZyP`qF3eVoc`5N+}#r%S_-`v`7rsDam=A*;-BKXVN zd;`+F&zEpB+h0rjqkh2t-`a1b{3*luo@?-@^3LNUiF;=Kp*@vv;6Ii_zHvX9wv2po z<=_{VoBWjhwvivM9Q0+m%}?3Y9fkOj_L^mfFT(v!L(Z?Jobqj1AOoxXp49{&$32_R z@y!6g{Ql;KS-;x;Y^Tle+{bNGKe;qpBcM|&xHB=7bNze{;3x!ID0-H{#dd)tOT z_50**M-qg)@^?3jB5_Rp*;^Z&-tmU*|GmCi%x(GGEg9dy&gTzczeIhR-!sUQKk3sv zaJ+5G*W{-Jzmi|9f3=Xu^UJj3=|%nk%nn5#B>d)HC&e$Hzk0t3<=tdBYNzoL|4MM*z$a+|lKZW{8Fg{`LV0>lI z?=n7KFNEW3rt$aW)%)2O^1ZIN$;0+$IKIjIoqh_w*P^`f<6q5J3HcF@UzT4ue!m`d z{Ca-3@~qa%jQ`d9-7*;ens{lDSK)YMxd-DZ`Sma33G$~9uC>$oe?xtKjeHgP6^>^$ zUuB#gkBFCGJVh^WWIRQ+M>za+nD02?znAZqxXGoa(cGWn8+l(|GWLJKpEF-&L*$X?gn%ZXozWf4EPnom=}j{`4K*FUKAq@A19-w)y@J#s_=& zyxA0d$@%>{+S@8qfzbKzezjD>0=F>jBM}W2rW|lo2<#=WXZJhY5UhOx`6sqP0h^zy zG?^D@&ko9Ka=~cw^{~AzALQ8YF+bY39SnQ0{=kNM2a=-(M<@`*2k@E}kVfKj6z@NKE3zg4@na-#A z=_kd8%WnyOP36M`KXCn@$b`@z*2kxxv}0HQKXw2Y@V&`@DfuzME*744k>9dd{3G-) zd(N{%z^yi6;i;cJKt4>DzXbUWnJ+Uc{IRY7{3*&?JaFL0e3@*o&}6xM&-e#eUsyky z|5E4!)LR(x#++|Yf_#{43^4Xf$%o07px=>*m>=&49gOzYHjWzfF*kJ#~M?GdmaVnCRWr7LbWvu#~0h0zmoVy{mG(- zqd z-;HEFcz^9pfvvyx1>$+&XYrY}58s~;G&d{zhx~+ms)+ny`;z}M?!!W>@_JqV%lXic zU&dEceVgdD;2kryQaD8HUgs~p6|0FjDOh}8LuII_Ey4E-AGr7Cx z&qu`nm+AGE+#HStUopN1ev-ReH42Exz_YiAp6J+~&%tlnziEH=_MY(P9?$3C=gF7V~9`a5pWCZA^QyenI^oZsX;wg+7vKWGt}FR%?=-N4Uzp4soZ ze5U{TI?g-R{|9yCytB8b8rU7+;{ocA+IoH@{1otR)7eXYd>!(Kjq|7Sx{7-Ddhe({BJflC zzpDSAz}<2(pG7k^K^XbstLNLsKW$?Cfw%N~=MQ{9_3A5?pNF^)mT>>jKMLdz=w}Jp z)s$aHxx}~Ea?lU{ng)QU=%f&Vhh;pGr+wW5lH;SYHjmDpe} zk^SmE+y}(o=irv!7Zb+(lK8NH$okP8<1aZG$oGV~-^J5Szkb0#&C=l;*5B?f_UdMN zBL0xhf4Make^uwdT=Murd-0QU3g7XQmfCxW&lUgW{{i}A#D_bLY3|K;LFq{{NkaEXB{#1M&#;ICxEZWc_h(FD8CH z{3ZIY$<7zhr{77_XV6L0r_+flA$>NL{~Z7KmH!;vG5cF$>9Z;GCH~caxG(P`3_X%K z>Cj)(=mX&YzWQrBfBwN?+CR0EOagym#!pY-^GSeDp;t%Wm%M~Np}+m~l9N ze{cLJ8T^Z7Fu;GUn#O;%rl_?)|mJKe%@g`VYrXYVc*nkN6dQ*Y*7B;GU3k>5zssAnl?3LAL~W;NWVP{B870%&$%L?%2<-tnpQ> zy=y+u^N@q9Ls=h$pYc3cX|q3X4<5HE-^Q-LVVm{kya@awA97_E-xkL9vhMfAx<9V`&D`|z?<#XwYP|9fsq=!o z|5JOefB&cc#}Z!{=ZD(4NF&H2G_CLdkC;SxarQpvK@_sPt7vy{V7xp^n-xL>1{~qZbDV6!a zevk7@F@b(@)%(D#y*=gsEBc;DnqWUGRnq%exsu+`;Qw0*_cLsN9Ffx>vih~$&pCP| z@MczC#{6YJs}$sWx1Tj)(a!~cqF+~AY5kf4HFzJ`Z!}Z-gtq4Qv*7(B=t+G;*x?F& z;_UI(VFG^Kap%R#WQz0R;L7)but(y3Al?JwJrwCNEaG40e9HHMokPNU{wxSThWg5y z_kmk-|3LXI=gp%XIbRc2e%L)q@xwi0AHSv1|1o~>!;a`X>)r?MvZa9Dlif6Zr@K+b z(HHLncXtDRSo=N@`%ysu?EApoZHyN`OYdY$=K~n)^Y$Wr9>M#-or$fF^3?eR_`v(n z%=^IY-L(F{mALc4YrPNL9s2XI%8N5W5b*hP?q^9$=I8O>PEz=PTSn{g5BdZDt=G{v z<$a)7t^#`Ow$t=T+EFE>2lSDWgF%mJl*a#Vgz>rcc6$-%uat+7PHL}0Ex<|K-kh?& zqtC9HuhJ)b9>M!w|9%te>8#n>4fV;h=4RXJ`*(RB9u}QGxoqotQN`gO>jVC) zsq--0VN&!7&bt`@{W$UX7dUudjs9;h#xKy5xi5u3M7&|%uL?czKCs}?4CjlJQl*=@V^A^Lx(2!;HL_p_kKq8;pqe1B4e`OM%S?ZY2~{tZi|R~Pzx zrvnwo!qE3ed)PoN41IQ^NPhGF2ix-A&gk=a|5xf_{n`3xueg)e@5{aLya4;JI4GL? z73iO(b_wmvt7nXl{x9s$==XqIqJJCxAM+de{axnM*${iK)8ExU#NB7lTfFbbbRiU; zvHi}7_5J(9(un<=_m3D)YsC7(HJ~rX(`5aQw$Jf4S>O0G18z21zvc92Yi~z?GQN`P zTa-TnHyZQ+5d332u+LXQ`X}>JmF=tk!Scbt*^_lV7#qfC=1YA(J)Uw*g+uQ9P(Rn_ ziA(dl{I$jZ_`){%(*OT8=>PfqL#lfx`TIj-&!PVo*MI*0P~<7@m-6?A*nZde3%K_j z*dADp`zP){@8|Ci1s}$rA%+b=6gI9e;3t27NOy)4_r5Z@pu2hq(_K7->8|Zz`d@q_ zo8nUV4gO73U*Nw4|4Ha0>TgCE{0RL={htVj{7P8wD`)TL?+*o?#y&v&eh}bymcKs~ zesub9{{B$e#qbN(7uM%x@cxzS_3|q@f8acv#QFO}AT03j%HJO@p;}>&6#1RMKh$VF zf)Cj1v7Tt&2EXL*57*~+$=@HQ{5@aeeTNzobiL*85A*kj&OV6qNdEpXe}72-y8Qhi zzduC00K5P-Pv0B$n6Hko%}+L2NPM*aWWP@y6+J!~U+huS&mXXl z7a{piq9+&TeIUmS`~jas;$Pl30G4>B(9gt6Phh<6s{rPx0e?0xRrtQ(_-|fx34>m7 zNH1v*__gtRWIUFB^1Z&dw%@1TgGqbc*x?^=ioU?7v=92*_CK2=pItLS+k3|ML~o@0LlMuY>-pn4_@kcT z4}A(I{2}Mhf{70R{+N!zAH2V@{K4}l!H_==Kz>_}_0w;b_@3Up2WcQ)cqM>Q-YSbe z#r5^PTTA*oCZ3_dC~xoasBONF&h|=8d0)i%0$=03(O#wFNw+`+5%&+nOYp=Q^ z@9`K9Me6Uh-Fq>>Nq^LE?PL9%?KMU|J_O*$ag6v9H@3}tI=pw~d*d5h2Ckm(`t>CE zviaADFUH5&-N*a5rSL!Vx%ZKG%Ms$g?(+SSd5?wn zNPD|{uVmvN-d=Q4>mmD|IO!+FH2+Kr0gUl?w;g?nzeQF3c}0HH-cc%HJ!*WEd!)Cl z&l_V)@4aH;-unta6($|aUw6qb+g*7d%=d0zSJD3AVL1P!Ki%^EU&nep^6T-35Aglg zRN9yKaz)7F#LHu>H_|s9|4CxOE|s4s?_-I_6r24=_J`)B@()SMJL#go}#>V5R_v))w&xG=rdWokwlgs5l{7O@1W-J zjq#rCjWvIv;2-egS`!Jug7?&|)*IlDDfxl*A9p&0XTl!^!SBgW&;x(6{tDcGmv+Cy zdquvd_V8kYh33}>-uG@MK{?**705tle}jD9-Xd(?_hsby;Trx3*1zZjg+sTVb$oFF z?a6yWDUTWVpU1ehTE>Qsx7F1Ml^azSgZ*zy*ny9Xt9~I)S{v_{V(z3Ym#s zgddE5l&Q~nnlc_!p7aZ?;Ct}Tc$?qb_mJp+!v5|4pY*HU^m;E(Lw-R26Mk>;mwo@3 zif@GR#dE~#Wq%a1fcZX>-{C--N_p>5zI9`pvXUB{Eb{5Vb?^*f0d}---wl{0a z{>bltpZvVP4-NK4Y~lwX-YcJ99^3tv@hiR%g<&cFE3`*TydLI@`<(h>A<_Dz?_o5% zka&5Q63^zx59n`->dWKkOy5u3XZ(4mZ(rB-^9@?yPxy1QgZ*GB;}w4Cbn%{6Vd1~c zO>Us(J#Fp&Bh5T174SzCTIqOTjNb2A7=^onC?`1tr3xw_dh4oPk@?Tc)y~MYXe4WUD4gM;!{?v_c ze?MFg%umY)!xkTMKVkI)8GqBqy9a#w>50_F6op=s1;djrzmo4UzLfsW@jfbgeVX|H z5bEI8rRz`X+w+O&>uAsF$D{|phyHBVV<{D{1p3eH5#q5z{}p~9JwC+yn~rH;V6+#W zcktdhM(;x3pYi+X})?>PJKVfZwvZEAKt?HxY4!x z(&G~2MVWlaERVOKe=>dv;$K4EJ+3f5l!?cP&;?=sZSuIe*8p?(T|W-HKd zc>WlL`XI{p0)6K?%6Ge=yo7$L=LIL9OIvbY5PgU8xS;0)IZsd@D(`uHgZfZ+*Tp~h zx{mh89oIhi5AuF2@?P|fs>r{~K>j8D!<2rG^H8AQ9@cIzwqB5&dOqa%M}a=|0C3;y zZ@?$?Z;gK^^TGP>Cj&Pg@C)?0{+5l$K>F+zcies>`%iTw<8}MbK;xU3{RjBmbnX4< zDZbxSe8~LJ{^PA70^BnGjKrtmdCBqP=bb|G2>GB7E-|A!5^s|HD(jj2`drRWaz0Rf z*cp0qk;@Qm$TWsIau>&p7@^2#1Ao7tiBSsX_eN`pE_JH<s=kT&Ck@#D62M#!7*Fy)V?UmFIDQ0b9GUv0--oC=yR`gRn|+9I8Xk_4 z5!(B2%=a80?Tc@?{TlZ(#Xz2Pqkr=e+LIsl&%fh-`m5+yHXilmw~s&ZZR6Gby6gYu z?a!LO%iEtJyMbSCf5vIKdFjsUu&>eF^Ihc^>|f-MO_U2iz+Z&)Pw8K9FBuy8lixms z`E`r+@g4bx-&_4n__GvKU$gS7b{@o&gT5yErPZgwkCP9P583K#*oDcNbD*S0EZ$w`?68T?t_5_Y+zQcU; zOPL?$Q^oj2UURJ;(P_Dwv%v&TC- zyV$=*KE7I=tjw$E)X$cbGzw#b&b^n=+4_Qolk$&@|eau8* zjW2a@gpYmvy|<-Q+QWN-v&R@O^r?N(uQKu0K(E`2H(u}q(HA7%mz>0dcwdu)sB)?C ztkFN?b4mZbMC^}AJTv3D!|}yu&Hb6zUyw_rf$vM^dz@!_4eaM-2dCQu{qDy{w4c)6 zt?^#x2dVQm;;)T${^(D}AH?J4eN23G2z=lt<3YTs!=nRxUcL+bH^&oaU;J+i8GTso zC80eG=c|?YYwr};KIrvxp~setm-JD3FrHjMk9(|)AL1K2w7BYuIuw0=$eZ8M%be!$=JZoDS` z-PXjHqyE8A=a2eaAI+^y{JYIodcb-F{`=-U;_s7wDe)Up_E(9oCYn#M9<~Q*dK?U*N=Of!S5x$u zdVCB1w^RImAGZghFBy6y@qqE{@dG{u|AGAz_(gomp2Iij2YQ+LOMR^81HYby9{p{1 z-VpxZ7&v-pyydE_59w)|&`N)vc4TSn0Dw4UWB$aC;h z$DChN{Dl5*FSb2B0I%{_*573i#^c8NM!07$F99Q7cZ$CTo_~bDref-|^7FopXKMH> zYMva9;K;kP8Nw;lfhpWwHa(nI;{tl8-b|H%1`>$zQb_DJEUR@I${*Tqx(a8L>N zf84ilJ3m%>GoD{}%f(Y9{pR%{ECWl+56~9|;JC=!qbr+-K{@W9m@m=R2mcM357EM) zcS+)Bn|v=gKaCL(-Nn1ceV4SX`UUhI^w0c^!S{MUdno)3{+PF`Jg@c?#(8L?It1+F z@71fkKeOk9$)Zek#LSoT3(lvQZ}ogXa{3MRmHCFA*WG!iUXuGb17rMUt~ZIl4SFG- zcNz0lewBPSCFZA$?0p^LVebK-)?N+$W*P(h%Hkj2?@{5YX#1E?b&vSA`OT4^ZAbJe zGk@F%sD0jz??CPcExoY*8!{hTzL?Ox!1Q+=@+4vZ9v_P_%RM|FjY4?SwRk$7bI${%uxXVEm95YvbRF zya?jWiawT&hYNFa#q2ktkD2^7T;I??ppTh&yjZWIkL~FF8`mq=XRmP-#LGQ|Jb}H$ z%NOVmZ$w8RfYlGO`+q8)tL#U+TW_$w%8$(b6UQ^%cJhSyZf;M#y@>K-OXK5m|H1gu z_Q`en*lIl7v_3Yi?aFwtX{bI%eM|QP=vSxpds2!&S$!-aJ!hN*P9v-m-af5*F(Jzy<-38c_gim)x-Tg9WVOllxImt z`^Wed|J@{o|Nl-#Z}AU%T#P@cJb*tv&PU8YP8Sp-UapTVFELVCB5|*2M ztE>7B-DgI?!UhjfhK_?+@s`rD{tJkgHPS8zX#ic7tp zM)_pk(|p%%J@w6fk+g?$qpz9r`?SYddl}^M{YTKAtUU|)YF6)K(Ru1V7Ra#YD>;wA z{)h9DtQx@Z?@akCCiExu=PTF)(VRWcV84dHp`4$L{*3!rIr(7zs7HRG{U7HY!6(*( z_!o9U7)928hjP$ACIQ6WC-<>>UY7r2zBoUYiBGu?V0-hZ0gNjQe6HTdvJm(t{xCn8 z&u{DefseGlL;e2{yo;Z^kKHxryNR)1?jNG`E5VQ6#}Zb2kv|v6m*C22zl45g%Rztm zC+$-|R{JDjA)WK@Kz@iZ%S%pwuI%H2E6}GKW5Sx>aDBdr=qKorKl0~%=HurJZ1XFC zzPjIRCzKB-h?hzIZ1SsF{d)X)n?O~^(Zk1S8TJ`Dbce_wa%>qEag z5=ayR^auYwe_u*{Y_FTvkM}xiKYMNcczeZOc?MJqK2<*}W$b0;B!&O%c?J9c{Fher zV~hVFe+lT@8^Idj@eBN8_pAi_`3UFVl7oTo0KbC&fF8hi+W!jnzdoABY(!oIzwvyW z(l0lIkov_6V;#LqbmBPJ?{W{ zj($Sl{TG~_>c`NJrtO;A4?z#oZ(1kk;RhJ>PNpWmnbDuley`^JweMp6{9ZM!pZ#8? zy#EsYjP@4UkK}%+m$okp{&x=D{ss7SH+z0*c=;>mmqvs5U(K(A^&<04^Ir=8QXZUd zs2{`H0?zQ~&ryaRqYOP()>noe$e)^`N4TEQKm6&<`A+bS^;7rx*RVdYUQ+ql;IA0q zpZ+e*0RMy=Ww94>z5IT8WxvDukLWe~*8{AF|F6As*=?kV!m!e2tkH~!*s|bBq6mesiTngS3R_lVA%x&Dm<0>OJB05)b?Is+W?*2!2zrwoRd>0% z&Z(+%{_|JYLg#fv|KQ2xW>fnM&naK&JdUJ1?~(rtat2>vr01P~Dm-zl^Fcy6au)!( zmf7!M4G4S;{Lw+xDDN4*X814r55Nz*vQI;8O)l-c5AQFxo^sxn$_LUK{rTi-bAV^{ z06@9Cf2s3Dn(Z5ugQWbBp8`I&(es{U;C=B-=*zT#&H?+o(){ERQ@%5c30|Kp$HUpS07H000! zeMbCq$L+?qu>F6mMabbizl$3JqY%Gg55`AG;ziqEx8CyvRsml3E@f-Hvh~Idg{Ffgu5AbPe2k~35=lgtG>TASr6XVlbJZz$xFVmTtLgcTe}CeQp5%}Hcwzp_HQl;$+Yi|4GCu@9DEk33 z4fq#R^Dpn82fe%Y>mQY9e)(4UIsC_#CVk|WSH+!*od00^`HVjj_Ty!o4{tXJ1VMlH zd~dH8wC{IZu-e)D=J(gJA263K+4J20waZ^dnS7XYPu%>}+h(}|m^NQ2zA9UN7D#t~ z)pwG!oF&}JudyGF|4x4|zdqyFxbyvFK^=ItgZ8ocTlIxY8 z?{2eY<>qIT!Lt@V{tD%f()|~-uMOih>?crrJIk{m(_YT!T3CKq_?NxU^pW{fhKEtk z{wVo7=B4R=_D4yM`#Ue^!qaYpJzU(ddI$W2{&M{D`h6PsAmiz9zr}ri%u41O>jyv6 z`SDJse(eq1)`#*Ww`@E%|HaGX4>f+=_{APLzkL?*MeG0G$n+zBO8s5ti_>{M%FY{+x+PV|*0qw`Z0A2aphxi)F{-U=! ze{GrW7s5X%3j9y^JmZV}qWME1|Nr!IIbXKo{vqa*Tq8Bw`_s$9&I=3tN}bPab3cvL z_~G}adcMY+ij%}Yls(w^uJg6aT=!ePmidnQ#V3pUlmC?9h3wDhc69$zzi0b1Ivw3# z#NSujb%pjM{uA$ueK!232A}db;b9*(pYpHrGynbTKhsNC|7Bm=Pru%K{k~(p*L-|K zc6y}y-TYwwY*^1_|H6lZKOlTs_;9tyhRlAD`8(Oi`M~yXHvA9p_GM^~+DAM-PG#B~ z)_eSo#{P5TzdX*hUN}5h`)TsFTR)ur+V6$=n&-v8e9Zl}{(~1Azcb7qHvX*m*6d&Y z&1E~M2c69OzMGE;pEMcooQJ6C@{Jo$!ZU}G-F%?+_N$`L0f&JPfIq>V$$UjVaiH?R zw-g@}#%IFETnr=!zNYdI-|*@*`3N8Lyn)vcpRNx0e{0LH#r)als*H>JepP!{eOCO3 z`Iwe(KIC5(HGh)+ya2wp`Am2Uc+fU0R&ofueSt@ocaqE>()?YrzcBb8mj5pk{w3?{ zu5fs>_m3{J6|o~%tiN2l_4AeYx03bK>Mu)MUwppO_?^9d!WShxYt(nXN%?Epu5|fS z{XX{7SpTj5)w!*IKL3{OR&GDN?g#&B;^z+Z7un~==VMX2!`Dty{V989 zc!2MpEZeyKC%(O(4iEKdeup6yHNI1@T?GI3?q|NoX!=g_o2Kve ziM2=biSav5))C)Azm5Kr|Fmnie^SW7^z|DbcRX(OqKti2?tKGZ2S1T~p|c0ZZ+6){ zPi&i2DPDvRSEj#!pO)iu+aDhFne?aX-;D{tx&A=EO+LaX$$vZj-l9qy zZ>HaB<{b8@-#PtvTj#ew)6HCAd{*LXz33m+U&606{h23`=x;iS`m_F>O`82Z;R76L ze;=N|JMr<>*81-vo53C9l0Fz8;)h248{c5<{2%W>=GXF8xO_aZ&CdM$qYvzd^LOU} zG(H{?zhHVcjsCvm*}C=*+vsD`wEx|-wtv&t%$=8hL|^!4X+Fa6@N|9!{}R9K*3Iu4 zk4?TP`Qv5ef4TAbsHwlh@yB`R=&yHFe+mD9{ydB?US6Hm^XG%#Bg6d<USstm7{On-B}awC3#Pu-94!E5R7w2JY7@q3mo zUWnsu;o|o&pY2W^9vtN*+(E8m`aIk(0Y00=1Ni&>G3!O5&lCCIiM&`~p+N?(-E8!J z{`)sNZ~1gums4N4#Qwwm_~$F*Q~LZq{z?JCM{oe+8+{T_3>V0?e?PSi9yK4c9BO?# z&d2N*&3IzU7kId$e_y)wR`T~QUeNvi_l3tA|I+hIkyi$QSbNc*;)x_)(EXh*&!>2- z)?4Era6j?s@pyvw{m|oyzvus(Cq!=vkAJ`G8Q-_jfd5FVoZ)}&d%!kk;3v-~oQLiBuzSWc`Fqa8=5r!kk~Ke*kM9Qm w^%Jw_8lQiALi{-}e#_9=Bla__@qKSEalSPBXajKT;#a)AxcS7%|MmFucjDXr=Kufz literal 0 HcmV?d00001 diff --git a/src/nvidia-modeset/src/shaders/g_maxwell_shader_info.h b/src/nvidia-modeset/src/shaders/g_maxwell_shader_info.h new file mode 100644 index 0000000..be520c6 --- /dev/null +++ b/src/nvidia-modeset/src/shaders/g_maxwell_shader_info.h @@ -0,0 +1,428 @@ +// Generated using 'Offline GLSL Shader Compiler Version 13.0.0.0.560.00.dev/gpu_drv/bugfix_main-17009' +// WARNING: This file is auto-generated! Do not hand-edit! +// Instead, edit the GLSL shaders and run 'unix-build nvmake @generate'. + +#include "nvidia-3d-shaders.h" +#include "g_shader_names.h" + +ct_assert(NUM_PROGRAMS == 34); +static const Nv3dProgramInfo MaxwellProgramInfo[NUM_PROGRAMS] = { + // nvidia_headsurface_vertex + { .offset = 0x00000030, + .registerCount = 13, + .type = NV3D_SHADER_TYPE_VERTEX, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_VERTEX_B, + .bindGroup = NV3D_HW_BIND_GROUP_VERTEX, + }, + + // nvidia_headsurface_fragment + { .offset = 0x000001f0, + .registerCount = 15, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_customSampling + { .offset = 0x00000370, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 0, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_overlay + { .offset = 0x000030f0, + .registerCount = 38, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_overlay_customSampling + { .offset = 0x00003930, + .registerCount = 32, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset + { .offset = 0x000057f0, + .registerCount = 16, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_customSampling + { .offset = 0x000059b0, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 2, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_swapped + { .offset = 0x000087b0, + .registerCount = 16, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_swapped_customSampling + { .offset = 0x000089b0, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 3, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay + { .offset = 0x0000b7f0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay_customSampling + { .offset = 0x0000c0b0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay_swapped + { .offset = 0x0000dfb0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay_swapped_customSampling + { .offset = 0x0000e7f0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend + { .offset = 0x00010730, + .registerCount = 24, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_customSampling + { .offset = 0x000108f0, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 2, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_swapped + { .offset = 0x000136b0, + .registerCount = 23, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_swapped_customSampling + { .offset = 0x000138b0, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 3, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay + { .offset = 0x000166b0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay_customSampling + { .offset = 0x00016f30, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay_swapped + { .offset = 0x00018e30, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay_swapped_customSampling + { .offset = 0x00019630, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset + { .offset = 0x0001b570, + .registerCount = 18, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_customSampling + { .offset = 0x0001b770, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 4, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_swapped + { .offset = 0x0001e5b0, + .registerCount = 18, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_swapped_customSampling + { .offset = 0x0001e7f0, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 5, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay + { .offset = 0x00021670, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay_customSampling + { .offset = 0x00021ef0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 6, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay_swapped + { .offset = 0x00023ef0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay_swapped_customSampling + { .offset = 0x00024770, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 6, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_yuv420 + { .offset = 0x000267b0, + .registerCount = 48, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_yuv420_overlay + { .offset = 0x00027730, + .registerCount = 45, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_pixelShift + { .offset = 0x00028f70, + .registerCount = 32, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_overlay_pixelShift + { .offset = 0x000294f0, + .registerCount = 38, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_reversePrime + { .offset = 0x00029db0, + .registerCount = 15, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + +}; + +static const NvU32 MaxwellConstantBuf0[] = { + 0x00000438, // 1.5134e-42 + 0x000007f0, // 2.84744e-42 + 0x00000d18, // 4.69715e-42 + 0x000012f8, // 6.80471e-42 + 0x00001478, // 7.3428e-42 + 0x00001630, // 7.95938e-42 + 0x00001a38, // 9.40552e-42 + 0x00001df0, // 1.07396e-41 + 0x000022f0, // 1.25332e-41 + 0x00002880, // 1.45287e-41 + 0x00002a00, // 1.50668e-41 + 0x00002bb8, // 1.56833e-41 + 0x3f400000, // 0.75 +}; +static const NvU32 MaxwellConstantBuf1[] = { + 0x000003d0, // 1.36767e-42 + 0x00000770, // 2.66807e-42 + 0x00000c58, // 4.4281e-42 + 0x00001240, // 6.54687e-42 + 0x000013c0, // 7.08497e-42 + 0x00001578, // 7.70154e-42 + 0x3f400000, // 0.75 +}; +static const NvU32 MaxwellConstantBuf2[] = { + 0x00000438, // 1.5134e-42 + 0x000007f0, // 2.84744e-42 + 0x00000d10, // 4.68594e-42 + 0x000012f8, // 6.80471e-42 + 0x00001478, // 7.3428e-42 + 0x00001630, // 7.95938e-42 + 0x00001a60, // 9.46157e-42 + 0x00001e18, // 1.07956e-41 + 0x00002318, // 1.25893e-41 + 0x000028b0, // 1.45959e-41 + 0x00002a30, // 1.5134e-41 + 0x00002be0, // 1.57394e-41 + 0x3f400000, // 0.75 +}; +static const NvU32 MaxwellConstantBuf3[] = { + 0x00000438, // 1.5134e-42 + 0x000007f0, // 2.84744e-42 + 0x00000d10, // 4.68594e-42 + 0x000012f8, // 6.80471e-42 + 0x00001478, // 7.3428e-42 + 0x00001630, // 7.95938e-42 + 0x00001aa0, // 9.55125e-42 + 0x00001e58, // 1.08853e-41 + 0x00002358, // 1.26789e-41 + 0x000028f0, // 1.46856e-41 + 0x00002a70, // 1.52237e-41 + 0x00002c20, // 1.58291e-41 + 0x3f400000, // 0.75 +}; +static const NvU32 MaxwellConstantBuf4[] = { + 0x00000440, // 1.52461e-42 + 0x000007f8, // 2.85865e-42 + 0x00000d18, // 4.69715e-42 + 0x00001300, // 6.81592e-42 + 0x00001480, // 7.35401e-42 + 0x00001638, // 7.97059e-42 + 0x00001a80, // 9.50641e-42 + 0x00001e38, // 1.08404e-41 + 0x00002338, // 1.26341e-41 + 0x000028d0, // 1.46408e-41 + 0x00002a50, // 1.51789e-41 + 0x00002c00, // 1.57842e-41 + 0x3f400000, // 0.75 +}; +static const NvU32 MaxwellConstantBuf5[] = { + 0x00000440, // 1.52461e-42 + 0x000007f8, // 2.85865e-42 + 0x00000d18, // 4.69715e-42 + 0x00001300, // 6.81592e-42 + 0x00001480, // 7.35401e-42 + 0x00001638, // 7.97059e-42 + 0x00001ac0, // 9.59609e-42 + 0x00001e78, // 1.09301e-41 + 0x00002378, // 1.27238e-41 + 0x00002910, // 1.47304e-41 + 0x00002a90, // 1.52685e-41 + 0x00002c40, // 1.58739e-41 + 0x3f400000, // 0.75 +}; +static const NvU32 MaxwellConstantBuf6[] = { + 0x000003f0, // 1.41251e-42 + 0x000007a0, // 2.73533e-42 + 0x00000ca0, // 4.529e-42 + 0x00001280, // 6.63655e-42 + 0x00001400, // 7.17465e-42 + 0x000015b8, // 7.79122e-42 + 0x3f400000, // 0.75 +}; + +static const Nv3dShaderConstBufInfo MaxwellConstBufInfo[] = { + { MaxwellConstantBuf0, 0, 52 }, + { MaxwellConstantBuf1, 256, 28 }, + { MaxwellConstantBuf2, 512, 52 }, + { MaxwellConstantBuf3, 768, 52 }, + { MaxwellConstantBuf4, 1024, 52 }, + { MaxwellConstantBuf5, 1280, 52 }, + { MaxwellConstantBuf6, 1536, 28 }, +}; + +static const size_t MaxwellConstBufSize = 1792; +static const NvU32 MaxwellConstBufSizeAlign = 256; + +// Total shader code size: 167.75 KB +static const size_t MaxwellProgramHeapSize = 171776; +static const size_t MaxwellShaderMaxLocalBytes = 0; +static const size_t MaxwellShaderMaxStackBytes = 96; diff --git a/src/nvidia-modeset/src/shaders/g_maxwell_shaders b/src/nvidia-modeset/src/shaders/g_maxwell_shaders new file mode 100644 index 0000000000000000000000000000000000000000..c2c80736535a338fbb6c3f9235ce09222c0a21a9 GIT binary patch literal 171776 zcmeFaeT-b!btiaVz4z)>y;oK5)koKNbyasO>{fj{tJwS?&U!nR3iUD}b@42i~FnnPF2qNgIP;E!vuC!5Batn>Gc^0Bap znf|}{?ho8>k@HVb{mUJb4r`nb!hq9o5b*E581(L#+a2Q_KEvPr!yBCD23HblXD>$m za~F)`oX2Rk)-aC8`-8n77G83UNwE>UH67D!NoOWu-=JXKXb2(_21-tDz(I^uE#*&kNm{C4A)@@ zk@xnWzis&63|8)&d$UO*@GhFWaX)xr-6X4h#~l5CinKq@b`9Xo>jK#Rj$_VUFU#+4 z^8V?XlH@Cr&Wx7>K2^!rB&|!@iF?s?yEz_ubbhSddEu1FOFo<8ewH)XFX(^y zW-x!>d>`}c04}{xJQcXWOVE|H4}3)Wm%-M6=idQdqC7U4*Ns`ZPQRydaDD8LR5s|- ze2h((qm0dCe2o9D%`Z1(9P!xr(0-;R`GTZ*kSQ4-)z5^SUt9Kqz^o)|%YItRQ}@kn z>@Uu*EoalV9OJCgI2AtmJk9s?a)A1jhJYufTu%7u*}Tu|aC+H6JHv9?zZsj~$p`&c zKX>V=Y<8?1^}mAg(f94prWZV$a{J%yn)ANRf5EZoCg4H8*Zv#%bpOcFHNfXS<5e8^ zoY(R10Iu}=R_O8f*t|Q!KlESlT>joN_xO9;PfgS>n7`)lj^pt6w)yd>{_|ba%ozUe znE#i*H_bazZWz9~tQ}uY$7eRbNbtQA?A|vxe>$BT0}!Zohw-zwOsO#+UhV>a9N>4S z6PUz&=?8IV^FI#aqt~6mRnWPUIKP13fG_^a26Q19oZ;``V0%=K`Q@;$>G%1;aZU?^ zjZr!3JJ`3hUh!a)zXMM=ukiIln@{I#8sx{yQ9o0#`6VYX1m`dwD@XtD1Afr=?BG1N zzX&|B>-P+gzYBb!d=2=;zsqX<6*|974E&+*#2--FaK3HMnv2K&Is%Z}@k)w0YIM_@s{CiT{khH*){s-~GE=pUdce zQvAfY%`Wy;KA$!#V|4aT@D0YVoc5o$?Q=WV7iLHE4|rXa4kJ?=pMUK5=GEZEC!68F z<#DtIlrnv@q^4@^blk1(rpJ+MvKij^}`L1)5)45!d8n54MdFDv|#V33DG=DF2Tyq5T z!T7j-dp2c`6kdGN={gMe+?+8-(o~N9hy4-9|DNHV?_pi=JvNr!DExsR4F4eT8U9Ld z6#gAq?@sVvczkgO?d=2qX7gq42Xqb{oVz~4Z%dDG{&GKI=y5+k!uhm<^S3GIVj$}i zgr8=3>Hcg6Xczq{9VryP#{G=ZC+;scmOkNp=YE(zeYczOJeT`Z`V`1|-x2r(-tTaK zg#hi2^`rEq-TvytykcYNmBJ7F1NZ0qDegz;d7#TRH2W z)9;ErzqU#Fi+j1}YCkZ>6bQc^*XEA}y}vS_o&`6yh<;z&Gk%SK7rHimGv2zN`qqU^ zDs}v5Xx=&MI7^(SyhXstoB%hK<{B~Gsgea7!p3gbKakJtIwLmESl z-2L3kPyIE?|KRTe{G)ubhhe1Dg8w; z2<(5GF<=E!lU6O|!u8;HQF>P1ImB@HNzp%fF|FwDn?=bKXdWy#$!+IFg zzG?B(#L}M1#{tn9;Ln>u|6P)=;>RfeTTgjetP|)?O7t-utdDa!m2?hcU!Yw-bCv0f z@fNpe|3klY-#O6+Gxt33YUR40rt-D*yo}>!@FLTfd6A2^MUMT-yyfHW#1?)F!ByUO zhZd|{e15@}>-ZrTuzcyuIeg)emhUqB=jW}wJ1=tW!h*`L*q`6(ngx+p=jV^}{yTKY z%DcCM{yo}1heW=ePZm>s?88HcmQu7|8Na@J#N-Vw|0{OWIh=RV}ud4W@v=r`8S7kK3di{PJ5VxRP(jy`w&M}$AGA2ABA zIgvXT0?Y$&7W$Rx&RO}*A7I?5zb)_s|I7PN;nRU0isal{$Nv5=guhr=5cqZ={}e7B zo8}+zdzL5sPHLC-oiFfY_yB)=;vdH3N?ixWUzFpI`JV+@zx>=c&DrGQvC;mvd{J%A z_LHBp{V0E)m;UnkKc47^`6sOJp#=*MKM(j}-E_TleDev)?fSK7{qke*Um1h{w&-Ut zUZ#_GV!)5e1>UNMnae|s1^V~4?C*sE+eBQ2)uFD1C=ahf+eak=22_7Hv{}GQ*`JY4n z$GP9RL;LWLG(K4u;2rZnx?i;aPRH`iv9Wyed69>-|9sx^l^t29L$pqS&#?devlcFg zEMKW~`eyKDp8ugayWcwVLO*1`-Lde^%_sPd^(z!=giCE7{oC_r{ywdrlgD@TXY$y0 zNZtGrr?fw)yf=Rp`K`4s(tfT8rib=5wh2F67T} z9_`^=pQUzCzeVlj<$Nak8KWM?6`Sx6$^Ox7+C22PpwC1fv%gQ@ogU~b__uD?rpJP< zA29zTd{0aGHp|B`|8vj#Zr5?TS9m_n<_@R1);6c3{dI-;*Oc(F8R0Kk{$%I@5kPzJh|BztZ;2)TO!8oCxzwX>?qkM_Krw2>s67=HUU}=TYTd}#H z`f%HXX`~|OKjO%|@U%V6Bb1Tj8 z8tIRR`j3JwzaVso+b8`m=1Y2yFJPRk{}4T~_Fs;zezh$6)lx8}{}sJSv0&*)X-fZF z$-8{M7KIMXaR59+ zz4Vm+HxPYo4E&~YH&Oj7=8yV`=#xXy^X8`Xzxks5eL>p6`mPMuAN7|+uNzCd?*o3& zIx_q&L;opy-7fHlzSltCZ22zp$1(7Y@)#$lS24WI z`bsP+x3A}j>L)ON9yjzZ+^*uGp5H~%yNvM{l^pcH>HYMdmX81&xjjCgK%a9tm)li5 zrux(#(?^D1!1Qq@5B>-3>NrRIiRgdxwq2Ze6Z6;oFf+xUFnrN|E#5U}18Udmo$Bdt zv;JDo*J=U!VU(+080$ST{zAdw{&as;3vbH!fuDe{o~Jr)C|B_}(ys$=xV<>GdRyI} z+&_;G0@oOPwVl!Yt^Okry?Q!5ssF9Zey^vWnP_JWzDH@lEB!cb`;+J& z#-GG@uy0SV7YCG{TrUmvzbp61zElzYZ!xK|9-&hDHBNbagrCKHeR|eS^ne9Z-t#SA26MJJO%l z+$XraKA;r(?ZWpsul17w8p=XP)J8YW-IFlU&{& zP#b4#L+1S!@J|AYP;Ag`ZpKQ0j&EKna$GjSi#vdo(Z=nya zR9y2a;9P4O^J;FS|1FiQ{kUN>_g_l(*Z&sdN9cb+XK9@a zg-QLd(xX7&9uy|^zr`v2Z>91G{qF<(v*q8FJ_WLl%0CBrtN*P$ME^T8^9cPf&J~(h zE@%04rB@2SK=3cf{WjylOt8QHw^p-yS>P`SCgHDP`crS)eN(Mk{jkz8rSITdxPRBR zdSRtw+J8>)JLmop_g~GS9o|3F^v%-GY5KM~)Z2r~fDcjnrtQ0e|8Dp=w_kIuK9>C} zCg`WOZ|R+me-q~;!4Gtg+gExwY@g^J$sK9ietp>fiaoyxT%gD0(h~6V#Clji!)b}& z_gdTJ%d05wA8k}>{5=S59`yMyKxvIzJ7v>D>Wh>?ir^RH0^^~`$fAGf<#z+27iP_+CFTJxx{z+dI{cbL2^E0N# zaz>Kayy|x=6`Rk(?~UfyD4&=4S^aL7^hz{88GoJpL;x?<@0R)Zr`JROD)aw-`Ua<> z-!QIsh4blT)gd{-D*_%!iGKA-KU>=1d>Ae{eaLUVzvP&J^s~0yb&<#SbQ)>6wdQ&F z9n#wDe;>t(}uV?sY0MJR_}*7q@Q($7YO?_>H|);GNTR891=G1!HD z^|P$sd-sfhpW{J}8rRSIHIeT` zPpbZ2zz;sZ)UQna!eZ~PF{zP$w&tI~`dwU8|FJ~c(fS3^pQ?Tq@*wxm^!e#uHZ|$r z6Ztwkd;{}&#oA|~|8KQw$eVUu+EG6yZvUktCh%4nUt-Arq=yZ2SO@GQ!0#>RwIjxp z{n{z*UyJO`@vOlHH z!vCmg^X=dY*DJNhs)uF$9OUCj;2uR* z4;w{`fMaj@c5v-2bNh&)Jjs*TCn=UAqu_sj7jooYu*?2)O@UWQ;H2=N`8j|e{&Iw` zodEp-$6C;aDX*l#3%O~Y5|ODTd} z0)G$H|Kax&@<9|?d88zAc`5qpzVeN(3urOxWA*$lmF#*%W$0zI)Q^>;^!%%pZGTbO z_7h1zvcG$g^r!o?9RWUAFO@%O-Do|_wJVN+-VE!tkmH{^w_h_{;md9es`ITW3Hi{1@8lzc7E_0DsG~)_nd;kp-{mIwTkzw}$l__aZ27Gb{j%~Q#>IR>8U7B$hmik(!T8GpuXg!K zZns?i2B!=^$fw-im|ePE2A%>vf*#_Sc>QJ`DRO{{`*8 zL+gZjb)r8W^$+`HxihJk?UZ%>Fn*kmr5Nj$rS$`zVEq~mOV4fw3#^~>N~_$iUpmfd zy0pRR&EOENPi6-5q4Tyv^Fo?Xnz8oT6_nS&7ZBYeKB$j#x>`m%IFElU{AIDcVfy%e zy>9!3p7sAEeX7O3wo$!sY<_Q>zL#e(9`GAhzk144DmI_cIgIb+@>M&&#ohZE^k1rM zFg%J$jZ-%R{vgZ#*F1kOgYhEoXKGGA1OIFAvqYbH|G>Z4@h~prVQ1yk?gR4{>|^Zz zj{MK1|Mecz#~$pbtIYqUL@%66SvZmZKnnbc+5>-KaE0~0k3o+{`rl*0wZAYp{|xfv zFYTb5(q4ExWuWi<71-~c=TDiraFz3ZkIt17AMjwWviw2s&dTL~lS$kBa>hGy4a;=hdZcQpck2mfxBJDPGHzY6~V^HSH;z3q!G z#vKe+&F9@`|G|Rr4|g!Wm0j@H(7fZ%$wtrMASrz$6CLHWB>aC6o!rZA&o-_x9%KIZJ?Lk% zv(NP$d~erLpZ33u?~=jz0iR6!xh;e8(0rNUmo+bNx<~grKtD6)dCn_+hx&rywMX_b zzz_O)h8Ol$h2UB*onbig`GNLV%NBkBI=2(SB2Mck=Lg2$llCz^wo_Pn9fD>@s3!biGeUo-M;e1fvnq&A!hVx$){EOOKa|HkL?AAqsb71}+ z&sXV3Wa^wRn~cB#^aXI(3+8xz{K|2KZ=K+qEsKea!+y&^5V)9^mV_+R%v$mx@`KR(_o4h{NYLWAoocX6{ z{PpUasPOeS8s3|{KC$`lc>Zrv|Df-6)8Kr=WCcEUeSlwlzX4;%1mHJKv%tDFg&y6i zS@-C0j~kCLe=7Ll3(Gx;%~lU@i&)P{4JQ%On()B^X3HSbLPXG zDxF~Z4fra3FPAr2j*cSG*~bZgf!}9IKYyP2oqQg27vF=sxE85q~c7Wgh%B@hv`n zPbr_@;EzJ+SNh<0)Gti!-Mqiy-x!n^fj4yj$+mCp!6%k~4E$aJeyTk@DAx?@iK1=B zcfQ|&@udsrIUkgPKi0nw8=m;z9^)s~qxJ!_KO{PY`zn1JpWLq++Q-o^eP32Px$4*S zewU74=@9K-j4z{lk*e6$)t*lEsT}rpwyWcO=Jt!6t}W~NUV{E~1$K9}tLLi(Ke2yf z{?m4KwYOXNLH`^J2Ho1y;YYBX)R#59ouL5xaU$^d6Piov1%E^sljUr|S8&zTT@cA}6NEKN0dH z@Q>stqxV5lqDOOGJ3fXl_%C;4SN}ZWMY(M4>37J#=re7|mxO=4WlBj8QilG4^dRrT ze}meO%fUD4UX>Ya;9O+?Gv<$n;}g4jO6=`TcY;Cg}B)v@7L#@DwnApQ{d z6Xc#%Dm5(U`N3a;f1einIPhVj{u1~N(vQIJg1^7d8PlK8_y>#J{$k30@47bqp{!30 z_(%1DS6rX)mGZY}|3m*m`=9c8^GW03UX|L@RsVthcgd~}=b&BMSBeh|f6#}#fni|G ze!3)fdOjb)e_>y;-8<)eg70lj+qWO6YbBWe$#lc&olq=GPRFk zGyj1E{0HE`?J@t0@ugDMuC91ElX`4l{ZPgIINSZTy=nad;L7c~uFK<8{f^>u#r?&J zaWZ`Z{W9ri+1{<~svb+>i}kVaOrV7uny`1qA^TQwOv)uTTY-e|Z zkFkHmX#8M@02i#gg>&Fcj9>Sc;r@PH>dXE#oBxpN?Nlw@!XR)&jqJxt@22S-;A8FQ@E0ch z#{CRVH;YRDdc6Nl6~{98KW9KG?nzDA&r@PQcW{%MewWJq0;j5<#eG9_1ox@(&O_|y zp!Z}yt5&W3T=$RK&$NGoa%w+s9%4TiJu~KygK{+gT*~MBL^EQaEsH&z=MVZ&iGRrL zRtJ=Vz9lK1uhO^LfbygJQ}wC)Q>mhr?-~P2Pp+3|-ak>_Egj-~b3iHV>y?jdIn%eA z`EAbEozHUGNS)Jidw!H*ABX+C>HYzKuT))g1bH35fp|yob;z!cGzACvBhJWvp37PL zx!Z*O9N(wu3-E*CulJ{vz9{^4y>r307*5>YKK=_cGe4XCT$9;8T$NHuqy5 zVn0u%_P3wAuH83|+^B!7pHj)TJEU)=9^*Ot|6zUH66hV;b;WM3^iuinx(mJq z@>4_fAZgKmJYhf|f1?Bc{8f+y*uTLS(Cz)VtkdWMm?=IbYf?QcL;FtJ*7W9{>AJcc_3xz9?`=AFWe^khqKO+11<=Ko$t!~kM zh^o24zk~gq^LwQKyPU$Wp-)Erq-4KWsy2Os_`A<^%+M|lJ2H*4F5@TrIL7CtdHkm# z7gIUxxUl1sym}M-opToUC+8~Xz0?g(P5L#;zHMzin_&K2AM~H< zlRX{z9NE#4=F4ZK-1O+vG`?FqJAOz0Z6x1; ze}lb%#^aMz)|23VzxFCq%JB3L*`x6r1#gFWFSMNa;tbP4oTmR*r7ukHu zxnb`?`8%B6T!ek;a@R>QeiHuHFc0{ln;UFjzrBg|g?-TRb~w-S|JXe!=pP0g-vfLr z+1vFVRA~?V3hqBK|AYCJ_h|U8qjIUPw&UWKt%c}%LfH= z4@&Eot6=`(|xFtv*^%0D2+Fw z^;LhXcKdPplkm4PWq*hK!sB52{&XAmW1An{g93g-AHeNHf35cSqO>z%-zWRKS+(-H z*xzAK$2wa3yMg{+?H}#KHE!3>Ucvr2v99)ay$_}K_c%@EBl~;qupLhu=N8FD+aiZr z`#bLUhW{|fvpSdNqXIKXe_uMmF;aD)9_;M$gZPJDj~_yc&Z z5Zr6Ux<_Ofc@RK{o(%hF70#mqhR^~`z^-< z7?WGtvc4*x<;qvt|4G+X?e9g_UmwB!GmD>qb3$~YCh%qWL4NgRea;K~asP>%F9ojf%y;HuT7pw!&@4RIGjagRh;+biyl~_gh4FVV~~U6!O8H71&SHHou&B zH^A4u@BOaR1(P#lBh>{&%)?7w{naNssD#UA0?R2~Mz|2!E{l-PQ69w)f43 zw%$i;o7Vm(_QQJN4B&@)%^kMC9FOep^?qX12K@m2KNZ`0ZL#BW|Fr+bo;xdc+^*OY z-z5Ct{dbJ#TQ}COsC*3Uhiu<1;RI&?Q*UwFzI)$%8T3{3=IVc^{DIoX+F~Dz+be)| z=_BR#n0@TFYvPa?s8`n8?Ps{WDfZkw_=@0s_sU`yZil;kzE?{~v5&yNlYQ4Ti(8zZ ztv=57!%BkoP|o(TvzUMLakd|}E013!`|j+NeK!{SE+R!T928HPKLh;m2MKA!pD8?8 z{y}`aqWC?ie=hDvF`NMZdxRhAcQ6g7AJh2-`?tbN?ciOpZ^Qo0{%;kre|JM-JYQKa z`xcH*()uBOWiwo{e2>6M?cvvB=l<`$mdaGSJ%&%SnQ&UK9_^7Gd~Xru+=`N4E%0j6Z;HUEtLbIKkt9;l%gL8NUL6=$*Z)`CZ;e$bjV?`Jd02e`)uf zd684qFIxDel2Gu=mwlJ`$e$b^RPACj;Zgox3ZLWjdd<22S->wz01n_&sdSFhN@DG>>|X%*c{6?f zK2x%G*s5pkunND5`7+OY#=O93)jZGXH2jLzezgobGXcK@@+8rtlDW#M@*mtj=$9w- zOW~Imc(VTs@Za+`7@mrUBmRGd?c*vJWYxYW@cfbZ$EbZe$p{=X7QRp;uzx(_S24WG z84KTxy$@<27dMQJzYAk<#c`~_4*CY7vPsq&RklqYFbdS!LYJhV1UwQ$`AwL%al=J!j^!+}{{pu3>y-0rX>=*48kRr|~ z+P_EVD=xsJy%Xz0KR0)g^8pX@9~|g^ z=zNNCz7u}}dp7eCMT+kW`)_t|m-&kvzLWo39(fx-6#j9Sk7VFiXI{d0*v(cb&18uF zeEp3}wfQ?|0e>&Iu|@hLldAFe(fJSk2=oVq%o$jCF0Mt;kMxlSA@aWXuT!~)^~Cx_ z{tl-B^dyvjll&`CPWEbAPv|kgzoCAx2!2oM)B1|P!pKj-)_>IcqT)~W_cw_jyWjfh z`8XXv)=MV+5A?n}*6w%6|3Kr#(*DK%#9?CX@@nVS^ODQq|D$>dJzrJ-q48t&zP!Qx zVOsYT?#J8pxA9}W!-O}8pK|y(r;Z2z1B#!j@neB+|Z%2AU(8qLYlT+yT`1}akOI!{; z-s^_!XZSshL;M>a89x^BSNqhj>Y3CJL)L#7KUUAF<|uvu(eF%7;s-n|ek|g~qW!gH zii5C%RQ(*JebZ>td`IyEG)@4Q6MPvSr-_T~PphAU zD|!+xe;)F87W&Jh&@W2-fR5-PRX88p%ao4C4`@U0 z!}mVg8b5X$=cVYEu>bML{E#bt4#dX{%dtL)htBy&#gEnau6|(kO$uN2Z$SKt6~cq` zRQv$glli;GkHva(Uh8YT0F7(A2z;USQ2$TWXQ`h9@I&f@KLCA)-J5@p_??{ot;dhW z{Yx4T=qv0$kYj$z_yMXHiysm{pygfWc(hn2+dd_%{|9|~9q|)9s~__`>*t_&sQR6j z_w&aw7?o%Z{V+*3niP$7)$|388L?H@mOoc>|_PSe`m-2tVD6Fuespz&i}2PjT* zME=h>Sj?iJu z9|z`W{>=j7$0BZOD|ea4)gDkf^3RIBU*+!_Kh_mJq|)a{#gENPT+7~ojX#CiG5Ag%W;>R|d*8ia}m*%*$kMe)edvFT>rmS}}_@cTdha< zKMe2LPsR^WdKAd|1i4Q$ymWuIru-j*_!02~K>x?>>z>4q4dk4CSp3-FA@KuT_YwXN zQ+jQsR|-GHC)Wr4!uoN0`@rw-Dt>G%aj=wsz^}kU4qxx4&$8}XYr}LYR z>)Mfcu4D8GUA(hbC|ExZoP%uNnWlf!aSGHvp!Cln`Um(qVKTxG?4QC^+*-ZQPx?_gvA(cO))r8g@|3;VAo0d}$Ov zHf-DP{0<5G-?M)L?7xr;pg$tJKLnIDjUS6wF6jP~i9(V)M{#2{o~*_fczFC+*#8k< zEGO|}TM|FE)3M_?!urosuM4x(qf66_>`C6iI>H^N$(Qf_=EfFaxXyAhsBT8du9-~2%UIKq#jr|{Y0{WmB66#l%06XLY8y!RK>Kjz=-S@`DX5T^ifUDf`g=j(J_ zSB>YY{Lge;S6v694|IOvkI&Pdtl#_s=zJc{ZyigrqPvQcr-$Y*Ul;FPt`l0ZDukmB&$K%JsKC1C!d%~B` zNxWE&fBJgY%=O@*1$sFL>*Eo`iS5nXIJ0WsU6`}{8`i-bDT0rdc(FYGml5Ap;>zl} zz<*=Qq>(_2hYe2ZMK#{N-J5Bw0$aatdaAKP0325+sksXl(!qV|4< z__1ym=O*|vXV#|2Bz|mL_%h{#T-WkJ@E^7DW1TBJuTFc1(?V-o=5O~`#(+Nq{}!05 zY8=PnU7Wao#zE6oA1pZcRCt>|J%CmHyV_XLB8TA5ZS{i&8^d}lW?z)^08Xu}&u;bG>v+t7s1L!O0_Wwrw zY^!DU#4X0NQG6iElfJmsk~p)i;2&H6MB9H;;{5=}Sx;OX(Eb_ej~fH<^~mczxRUrI z>io*Udnyl`RxjHeY)sToH|%$f51TF8yw*=7o=~G~zb}ctRpHx|b{Db!w5}Temg~d5 z_&(A5c7ZokUIQLc%Jsot>%F=dct+m?;31`Zu-EsYB7^Zh=zbIZ5 zNguS2esoFh*#X#mp6}uP0cyXLMZ8()BUNvM@lLd#&HCKF;!!F4zwg_AISoDWzVCNg zeKFv}?WWUKPptfb;$u1en-ls+)xTC^@EaVz)u18r)3JJD?gBiMSvJZS~&0XW=M{#CV|4}bMFN^V48_*-? z2+mXbi(?N;P)~H z2k>9&pDu6(`#|kX-LEUi{W^`iST9_Y^|$n0$AuJ*9wR-m?howS0V!D4p@z2WP@AG+D8BmJ&QpH28hhY8^=TET&{V?L5mdd}z z--{Awb_72=?`b`;>W7B=O8PH0Z5&(0FK%zle^u%5*!-bbhTXdaJu%`~l^b7@@nL?L ze`P@Vlj}2y%imiAN=Njq9pB>lbq18;ep?piH<90|es@6mlk2tgGyHvaKeGW}S)kQ+gH(-iFzW zqyDkKtD%iItLvw59JP;S!@7YEaQhkuR`;jUx2)iQHcfUC@*f3#Li@F>y)XCh_U-=P z#Q8|@1HGFVztX)d(LJ=AEsV8K{$W$?*Cf8I`gfEoOTbt7Rn#n>o-Nfl&;Ff=uhT5s z?*)l78)t1E`1MzaKV@d&FGBuLt%Cg?Q5;m#U)EOcd%ImHX|A%qF+`j{1HI!+YlGvk z-jeuAUU&oks$YMjXm(hxsGET0j7#L7hV@1Ke=e_>Z7y$^<6M3#n7`A<{3;b&4nI2n zT{&y#!}G)Y6t&t7z~#hxEc2+zJmEinF9rX-ID41j)+(IgeVg?H_0MDa?)iWReILy8?(+RN-^X{p|Av(P>&{Yr#0Nn+@)7BW zA@`-T8yJthzsK!++nl%lTWjmZ^bYHT_5NEa++nzuBYX#Zfk&UAb8dM5t#zE`7Jm1Q z^tdgwk96$*8^@cCr{c^$A@|=Bi8l*9J~q;8d%nb()%$N{i8mX^tE|6Gk_}Gv{##7( zd4QKT&eSN*>}|RKRw&zevyfZxtT)A(#d;wf-hX4df%|W)mw5T9<{pMIcK=P^15>*V ze%B{GuJShxi8ni~#~q`;Ac!OXYTUn{sNaksSAc)j_fd79>iOfzyIVf&&*bMbc7Ke| zPxTj5KQi?X)A|kQ_YL9&HT`XlGpl}NT#oZk{mx2qKTY+#bU%#tM??AtpU?e!A}<$( zKPeohb$eyK+pQ6yYPX~vwtt{~)ei^WD&t8EJp$eL_&&w|_%!6}Jk|sI$CG_Ndl>kJ z_HMy+jb<*3HetOe->9oZ`-l;6uv)J{jJF3)knPlRqR+fq$=>$^SfFs zP4M;V&%^l5=eL#js#UuV#aX=H5$~hFowf2qan|;u@nv<0W=spK_qz8O2T9{0{jg&X4(@)A^(R=lQ`R;LGv*Bhuru{q1$cmz4Vu z2mb%_2mb%yV-NiQ!Ouf~Pxd3S-yQh>*Vyl7U;lq=Keh6Y*uP;b*FZLEs>?nAL8140szDe=X@P1wB!7hRIP&*3w>r?(!^Vr+u zA8bM!7YT8e_}+ou4}ksiM(XnV(EnfGnJ&mZgHwoKmD;^vaPNTaG5T%~?wi66d*TC6 zBEFNke9Gv(0gZE$NB<~a{t=B2^f$tNL&}E*@VAfS{s7`#Q_A*R*cT7(4_N*3!To{Z z{el1C;QqkD{Q>r`fd9b3{egq~17vq89^4aYG)$`>&ojA7l25{el?+@Hw z?A`w`m) zNF0~p{Q-&Ra&UhDa`?ghfuSGu&(ZyXPaWJJICuTv{=mWgf${sV2lodK?hlY6nGUT- z{p}Y&9{QQ<2KU<<#=aMXIQ^_&E+8Hb-FN+!jJfYTjc{cttFPAhH|}6n{QL2K5%k}O z2CMA%Z;;yfH>(`~=2OA7AJF?lIefSN|Eui(KOZg`Q{nfA?m2jWXd&E4QNA;8_0ny` z<0Abu#&>7s^1sQ<+Wd0TK7WzkAJTX_msW9q>8!pV^z}Ddw*GQg;`ng-;oq+cnv$t25_lP)t5A<90 z(kA$|6YHzxO@3EM?`htO?fX7WxmQ?kZ{z;%D{tsKA>XI_d*JW%-lV>>(v^2+u2Xz^ zyw5Y2*!Mu@!UTfiE9<2uQrHJBnGdmkzEs*nWUQ~h(UbRAenH~sJxTii%c)E+G+gho z22#{3KDIYU@6g<$`){wc&0?~G`yZu`%t^h)@Hzhdts3q>0=~UE@~D^e_Ab)9HHmqi z;m7pg`d33)HExj z_5t6TIm-F6c}Ci&C3$C${R`-Rw7|C}>yCFw`FF3<`%*8Vyt+C8cZJWJ6yF)|12yG6 z7KMASY2l5qGxz7l;n#b{-d_Uz2;SEfeogx>i^kWlkHK#RzvuT^0H3D7y=gwo^QxH>obJ*4BfDL^-*j5w0=p65 zQt^&Mj{(lbpkU1vyvyXAA@l|wv4$1!WWStge|MB_&zq~_qulAKUV$g}tyU+du z^%J=nuWUUl*AIpJ|NIL2v7)~6jJo(A9=1s}BKA@s4x zAH)8b+dY*;;UzRj?_+(8<*$167^hz~srzT~K2gw~gW>$^Z&acW^7p7dH&6RV*QZ*U zdy)1G`&TV_SEwCcW4L7H9ik`c{@C+Jyn6VO{T`mfIPE)P zL=X0Ac;9OG0^avI%5VwWc3)JoD+0gyY9I5@3f^ZbmVSihG{ZybM^@e&%1pg8#Q1~y zp3r~X|K@v=wHod#1AnQ!2mC3Q7dh`|HaN{@ci28j<>)^N34if>es>S|9*elANbmJh z`3mZn$~fPkckpT7;NE29;hq`AcaZntJj5rk@oTLe)NkT<8ovPb(?#@$yjMayNPQRO zh;u;k&p_WYX?KJB^IYjCK8|;M>HZqZseZg+4DKs>bpHn~q(107==11an{C(9XY&@~kKU8_iv|OF3l4e&V_rI{>9xO` z`;UEe`pCci_!qtZM&aOT|2slQGiLJrn_1y&(8(w1{hPG>j`0id(f7$b`L6H3==(Oa z5^pd+xZw~VgZN?}9KLUJ_JYYA-bB-{tY;6O=M)rUWrghLt6T3nF9ZH~&w}zn9)1FN z=O#cL(&76yIN#{~3x3~*?kD1X8>I(4KiJ=A#P1-5xQ)aYIR%?CeM5ZWRET!LKWQ9Y zhvH2mkNM&L7vclXpd9`Q8h1BLSv>wR-q&8ajQ#1@d@zIl5jP`~-{$@D*K|Jf_%JW* zpI~N*%VF>Tj^X$RJ;qO}$Nmqd|A$1^92*bt1M)tNN!fd8A3$8cKKT(KULU6#|3Js3 z``i^%^IQ6hk#_R9MyE87Y%jxB>V0<<{0l~j( zG%l*%vp+BK1UbG1%D*7-d^cGycj>8I&c^Yx>qqZr-jA7Y&Db~wJMx|l<1gyxCEjTy zaUjY?SK|kA|9OdTkWSls_+`28uJwK2;oqm_UcN{Az@y~9xx)2*d6&kMcPjAS1D7*= z!QZ)~cWFLP@ST~laRxO$#0=wKZ<$hx4>2=i-=le#;%{L5@l5c|0sQ(PSAY*;|2^in zb$lM)Ev4`N0CXN{dI0*5RN

2Q%`1jlMUd^<#-ghlDy$k;uN;Q5*4R}NOVVptW8|9h*ruUzKPn6HO zpEPG3ice?bA!&R)^uJ5-1_Ae7+W(3V41dsvoQrokN_aN{{1x#L8fTEtSMa~!{}6cV z-1W%&1ivGraR#-0%!k|eJfGWFJks_f@7E^U&-=gmkoRNI4f+QhxIN}SFh1Y6@dgzS z^Zsv0{5n~G8FxAT@rT4OwDG(ES8g|*c6ppXBK{8aN%5KLkHbOz57NQ2Q}4$>yumni z{#YN|ZeZhX=swW-r_;|)wClysa6ClD56lmL9Cyque3Ih~`k9YGr#~Lw1MCy}i!-(# zKjTb{U-y^qgYO($N5F3?J|K04_<#~`QQ_;whTGHq>3J&xUt3@C0(kZ7v_Gfg3%U|t z&>imY$ECim&*nd*dOH;B);GmK$od}Q{vd@ z{&FjSAN%?Ax;LN{`#kf%V!xkWzfJT9@=JO^`IGCO`}g^K+W!Qn@5=gn9^wn)eUzMM zyhg;Xl`{Tbaq8vjlEcPrRO=r>32*zEIuhsG7e{Bcl@=ATX@u3+lLCw+;J z5{=@cF#U8(kIDQf#rrxGzY_iE{3`>>BmN-0Q-tr;0i`4QR*U!edwoFZNxXCTHO_1O zhQt+YN?bwDeOk-8|4RNt++TGDau3?C*ZLFvmm`(a zzpz-eaRrqQxq=58|HUo-BKKEz_lX}4_@yg0z76O!(-Dnxqx1>;hx`*`Oh(D&VI{d*qpyP1Lh3-V?NzuG?T zkEwo=_xJ7*7@N0Qzf&^XR*yjM7dRwezIbN^{d>(|i{4Mogf<1g%)jIKfzN|KDA@0? zEA#Jal{47q^gaV*KEw~K&TMdA;|97iKaSsn_<Wz9Kj7qen@HEzaleA;OUggb zd9PCA_jNNB$Qy{$r}W85Z*#ey0|KF5m?!>i_H+#TfA5a`sQpIMgoQ zkMS$VIlg>Y0UXlQ{ye`AT&SD}MsKa9s9*f9Sg8SC(m(WXEZ0-N89R=GbA$W2WyhZs zQ+7OQdWQw$_DMgDb-ESc{T<+khxdSKT{sRA;I8;Qj6XPYoaG;0SBgJ4v%zu_#m8a( z77!Af31G$>3cXFbc2j8Sxij|AE&}^}M{jPnL45-++_8IuU;`Rzi>3CP!e;}2@ybyPgkFW9ggOCU5{TRHv%4uZ}!x)P{SjMqKd6i4? zJDlUA_=8~9`^1-)g?~`^jPWm%@ds0r^>Ms#{mKLNK|ioR4C1wuJm-M_pmaihKTGF> z)t~D7tK;$`)h`?1lexH7aiG6Ms_&dLexkmNaR@w@arnby$m*y(xIlij$cWu1aMEZxQd`04aZZ6S0@5H+t-(K&N zP2VHC9b9|M+&%*N1N#GV=ja|;@Tb;4eV5<=Z40~&R$(9ikkm-zy`uTE(r!hW;x=k;-; zCC(SYCz0O0MEnx;?KHm?)UP3aAkn=v%^T-;oU4Iw_c#}AfM0S9LBjnL+v15 z;a%ZhtJEI!_OJ`^xDc=K4@sZR^86>FtsCgfE-m zf6iWjet(0<&+&~hFVDNd`Mp)}pI9HfH^X_w|3jeXq>qM}0Qj}dJoLr%-izALThJp@ z{#bDBFUUS_khga88y}=J2p8EN{wvVWI!&51p6AxCh+{by6--!p!r{jS*4 zS8G9v^y71(y(j$^#cxIVYyoyl)N7P%yC02e{JS4Y`EkUprFy5zI5&`%#eV!I_5VHM z503JDqAC0Ft>9X%kM^rLfoZ(}zX>}r>^}pVpT;9rJ8fC)X=;C55PN-GwszfG+1i6G z{Lz0%4Ad*@<+8Oa*TU!c9N#0G3+!uZzbl82^Y_^*Qs5c*bJ*8lf1knck&k8{XFFuO zg7%RAD*P8{{BiGbwnvuRkH1KE$VlwL41d@+VzC#O#eS@K%K9xJk z`LD`D;M=QM_;o85z9m{;_%En`!)(REza8!|9M!%%D|XFlZ;Rp9Zl4qQS^I9IYT;U$ z?eq89wuPtKcY75J*M|ABz~Kc>_t^e^#PrPboL9Kc$bKB7-z9=;*L;J27nnchbOgT? zK>yC3#__Dk{vN`wvi+Xmr+jbl5cqWi3%@x$6GC=!g~Do%HGGE4lf=TW zQklfRQeyEhSGDkKRwnVUD)57U4dYF79Ddke)hYO8z=x8Z?Pf5?^t0Dn;{C00jYs@9 z@vlcrmgqJ3jj(C=eZGYA4S9~wi}riHO^yp3&mzS++-M+W`#9uplxIQT`-m4#@-@ld znGEPWey8@2`FT5@Z0|hw*V=k6-sbeSmA^9?v`62w^Hh#^w@82I(^eRaeK z{wT)*miy_@FL3-{vKLeQUjx2{_#mD?QjPzs_tkr55pY2KZwJ3a3i^fk_pm?1-+<0f zcW|8j0uK$2vp=BnH#y|3|KIVS!T&Gyp#NWQ5%duLH;eNJ{(s;P{@e5a1Ivl?aOgi- z1%1N)S{464)ic5V4F8o>qW*pA*IAjh{(OA@1@evhL#h9h@@Lm6J_G2R@?*+(sh{7i z+FTX>d6~NKS2N%Dzu=1GogQE9;d!SFsH&;PgZ#t(m8+30S3XVsVZs02CH~P9zRhc{GM}M-csO70nLj80VDV3sROiq7 z5x`IV@oM6SR~G*}^)FMuyK?vhpMybQ{q&%Jh5zFbPyO$L@Kc!jE9-S1zXKfiYL@?P zr|}(mzy7H?^1JJKmpIS-FX(f+{VDdx^m?DVNc^wDll707AO3(+^!^F`y|dR9|3jU> z@(Y>SNj?Gk?}_>A`_js|G!w&pZnGTkPzZ%Wo@&$?~f$(9Y`M-qfudD~*bDY+* z!vD+soAe#?&(@#g`L{FpJ@PvLu=yO%Khu70YW_Vs|Gjc`ku2i6`5vSA zznN-%gY|aV`gxY`XA+ci`+crorS}Qw{4b##>5PkVZr}RPRbA9aKI@{K+K-Pr{cz+z zSB+6W!ucQD^L-{q?Gyik_Mu;>=Ba(eEh$j@>^~>l42bGYD4MZ%+O5g24LA{bf!3(?XjM zz{l+8KUdXvYeX)M!2gi`gykRbFFCw-(uaOU`TuOVGwDB9U1a^d+81)r-x7a4^q<4K zYm^VF=%4)Of-B5FtN)x60`DG&|6Fy6%OCKcL%nhTxd8rTG(Pp83&np9{nGab{O6|q z=5(ASAFTlYRlZUGx$>0%9Q^6nf3E1*_7hG={&TTo%hi7_fH70UK|34oPy zmJiZJn|II-efPwlj^#Jd=N-}ay5d)-{v>?f4*^dJ2fru^q-rv2v}hkvhj`M;_ET(vssKUZb^>n&4C zb`0DDG{g6mFuu6@ll|v1;y+guzd3KP1bR5~FB$Wz1ARORJEFC3==wy1fPe3ZUtFd* zENA#k`_(1sVL8!XqyBS6v75XPcwp^M7QX7Aw`}y_(kQB``-NL6dzRo9!>kti9a6G|1tl$yz>cj){uP!<`eJ{Y9GOTcF7*&IQ}l} zTg4-7KkwkW%a~tR!TU}2!(;vg^bLQWH@of#kY zn-sl&YulT)4*;&*F5bK6_S8P3_&h`J-j9uQ=s%Z%{R#eT+U~S}3f70)O=qlqNcVxx z{{jEGBK+t0{ofwjSI{ooqudzlhj)7Y1padYt@5pxnoQuCxF;b9%inpmao^^7QT*zE=j6fNrKvuhZ+b0i~n*^~CV+8v{yD;+>$k_;*^r8Q?sx)suMGMFIP3oAcE{|OGc%9yp96m| z?mt&5S^qhuL$2Tf`&R%yZs`}fznP!ae-7sZ^{4N&DSaBkPw>kvKFR$&)PJt=5dXQ> zBmC!Z-ckRKv%mjbM({S{e3tw7Gxj}Rwhv(bD$hbU$8=KVSUrC>aUN3p2{`%v{O26O ze}{g6b@LpJ-@e8E#o9jnW^n+)e@^`I`22zYociS{eRISgPyOed*tY9RyV@R~&*T1c zGdIvb-r1b;pL6m%{5`5!xuG)cKS%mT=p(!0KbNW4{7vznn~fmv!7kiPUuFF><=@sA6| zk51`JM*Qg_@uN$o{O1htkL>DMz>)OxIcJ;w=-7V__Sp=z5BNn6)&Y8Z@c)35_|Jj< zkv^W}@{GU9sbgrp*64jl;{TE)OZ?&5dYbg|MflG_AK&Nki~pR1@8mz1;5+>46hDUk zbJgQ4{{(~=s#6LprK>c_2ywLi|A;bP4Pw4x-f&K2Q|6FMO<^DCs=fHn1 z9oRhX@v+~Wo1B^OpYwe5i*@nI{|~7#+w3oQTm0wT^cl`;+#S{rjQh`lZ`~d9pPShO zzcS`OSK6cDKiq$=1mq^ZvGjodo86zNJ?TF;6~AZ3v-M{ls1N)H{0!tK@|$yKY^wfp ze14sU{zm=UB-QtLxg7ON4A+Zmq?hUAokb(R)A}m+t6g`@zm5AZir<^Q!wdP4$D{sp zxDUj6PvrFQFb>Kw|2cjCvMTMUJ`(*~|2dEF`V&vZ^1fx5+r~Q5{ziXw*C~!&#&WrmhrUYewWIlH{t(~I*a(?Ez6(f$?k=E zQQpeA3?J;T^LbmYa;BcAT>b+5=S2Q(wXFYFYs!BvpSS)>dG#Y|-4OfS3j5DR;+GVO zTw0jfVSgF*pNk@l3;KtAX@~bY>{meak^fvIaE~JUu5T1A0-n9)+u}b*d6GLL_}P%$ z8Ap@;b1i{aByh?P&_DBETNeKe%uC?+6iyx7e_gKkKe=ki2Pm<_^H1?^CvIEf2j%ih5g#BogMXAj!^k3YEJsKsXrUT5BRV2 zswHsLb6EMef~;56f&5k2$A7M4`-?i0{qXqq@t=z#yS^%)VcqyWd8{kbXZ4>e^8Lmm zM&XV{!a8pg#h;Gx6ZZ9v_|@fS zC-LxB&}aF-Rj~eSo#vm|c3SY43+R4GpXIxP;Af}fGJo4_J;Z-**77k;yaP;pjQYpzz+W4z$Uo>2ce;m4H!sn=~e*%wl0;XjwYYv<<^p8)=|7I1#( zKUZJkysqB@?9il-eu6xEV85P+-kAL5TF@g?8j3ypi^Lz`{q{)g+PMF~cI>R!v-jfO zUztzO8nsWGj2)MgwP~@|d)qtPb=2QZ{p@i5^LXCf?A`w$;?p?97h&9w_P<+VKe`{&&_N@4)}=!2gc?$`1VRa14*dr&0gV1OGc4-{-*p4(AE@ah%`apAP)* z{(ttr>(gd^)Zc#bXW0La_-AAN$MOCP$Ir`&yg7jU>A=4~HjB*90Uq<-ziPJ8(ZxER)c>&3d^r1bCEr%9^d0c0xVRP;&%lr3;#yQfK1==+z58^(-s+{`|2ac^5AIo2iQhS~{wDd4 zGXK29{EtI=Bk)0~6qTP|ACUiZU*)4Qzewm0DUQ&!|3=~mC9+;hm%L;$evmtiAJqRH z@_*3#f($>b|1ds{=tb1t&gJk=())zd`VoyUeF$ed{=sWIbj^Ybpd_hY5BT_Yp0WY6)%^t^- z;qhU8Gm;M^%}L7oXUKo5Z*Gg8z(7CAykUZE_0z zBHu3v+DlvxJ>u&IdJp5Ig_BBSw%jKn31Y@8wZUl4r;d~AMM^-dG|Gss`+7pCKE zBF=@3kLqV~oX5R(_@Asm59cHPk1bEJ{FUYW+H#n-co$tH;vu?;!3I{l1m6eire#ehdBQJ(v9_THnDi z_46Xn!+yfQ)Ax6|9QyfA7Wz-<3ml14)Db=CP(Kse@fCD@Cio)Z0oyM?|JrTqZ_#G_ z>@8DD`YwHcu-&%)7IAy?KT@0`XF&QvwlAuFt4w+qcW?%D@#H%BqYle4zue#o=kxG` zrtgKp#;E)<#b>knuq@u~qH@HUS{be%>Z?D7m$To~qCZpkAf5~VehGShf-}~8qW=5f zw@2T@!8W(Aeij%vz#IM@{HbjJD@^|pPm8|?z#}SeINxSJ2=;Hn_%+@N`royF z5b0gu_XkverFhYBst@`Xfc`82AE{l?K-iz)r|Eu+;~;%s{8!cA0`uVZUDuyzU-2n+ z5kF)f|Am%!nf)!WsJ4Ad#{4(b-$MN?6c1UygYmV*-$L~hdHKE1D zLh(2+eirYN{)OQe@Oa$xHy#rINc5IznSa{q5f#3=A6ysq%YEkW1b;LY{|IXEvH4?t z8Lo)$!En`mr1N(uzQ?M=_W5PsWu z{>aZ{a_&TH))%~RQYwh1@Vt=9ihVzj4MM8%$|JwNcn+3!#!u{s| zkG*$~vE(}M1FLRT-MY8#qwf3t>>f=U!~!D6HoFO|F-?YcYY`zL9j|A4rv;LVq@D%V zSQ1t+2;c;5Wfm<1LPUEVjQ-f{JhFBhff165kRgNx*p^2g0x__P!({Y_K@3MSM{pFo zXGC(i4mo#!-|N1n$3r=>VQ+5#bGoW-opb6t-#OC-{lj|66>& zJE9cd5kQ6Qd zWAP*7InTd6PyEw;_ZGi5$T{#&P0DX~z*hv^#4umu`!W2?ud)AG-uK{7CVnWl@3Ne8 z1MnB&du@JbwS>PYKTNXU+Qu7`KlFp;ABF+Tzoqk+$-kxT-3ot$>&N*W!oSmQKY+go zxKi53&t_-w7d9S++CE|S3oI||pY3p#zbK3!fWLsfK;>}%#QwhT3;rV1c{bGc=KVS7 zXXyVTmluYI^A~xLm z+xKBS;Bqm}@%Py{XW!?m{`<;{+a2>4&5r;1y!8KjzfZ=4UCfizZUp7F{So>;8~xD{2VSL=bXVl!B@fmEc3fde6qN1#^<2F;K{{40i{K<-!cFB*k3@M zo@(bV^Zkyyiu_Mve-!lx|GNj?5_;ZkUFGM=wp_#ie#80oXPkt8hxv*k?9bBgJntrE zV%fhS{-ZqB$5ehYc@59OkL0ZKHC8Ei*HL~3{`?)_HJn6vk@jgLXPcjYErk7y^CyUB z%J*a768L<3B-gJHe-H2{z;E#Lc2nx#?Mgk4V|+n;9-M!c>rRKh8{;Sc?lj>W?mGXz zKd|+5i`MU0UpVdw_1_%f@yTtSCBe-MF96+~G8dd%Mu>~<5xaQZ8p9GyR!u@<+>Vy30{etIRb$1~5ZWrx`a@+*2f!|>L!TK_v$2IO_-*JcL z(GYwrw0>gz28|EYuC)H@pd6%Fzx*Tm9qSM9^?qU&w*j7`gY5J5KYcos{Tah;xCy~& zM2@6y#TV`-zI7hs(~96WypU7FYe0T?Q{V0<47cIBD~!|dqL9-YwEu|ptKl|W(p%s* zVjlm#{^);J{SRI0S6_d$ptz0Zfx~!>$@qe6vOgGS{&m>DSa0z9SJs#Fag(6qFa(L; zTh3qExvNld8wJH{SpO`{;Wiws(+ICY-$(YL3%n;?+dA3!a`Bw?&+z||uA7^rKj1mC z_ZiQFYvVEKP1e`^zTq-#9otfz1;3B+<%s30^(tt6%>OmvJ;J&Oeu^VU>Lf7iZ$2yb zt>$Ozd*eHL3-PNczI%KG{sH1WqJ24xE5>)!Mf~GQJ}^Kj(a> z17l8jkA@l-e(52p$F2Ahmmdsey}LIQ`|l;S=iaM}{FcHM*bSRo%S-aRb4&6&tN+oV ztXq%j`tO^f1%dJQe8_&4r6pUx;`_hVcb0VjI0&|xcYk?V*1Noaj{dW(^X{N< z0$f)YHkX%CAM{h!51jwYHNk%YT$SNAjPJi-=#5>MzUsEf|IT{@%dkbAkKcP#c6ywx-0s3eOby`TAp2BS^F)aVW-zYY(JrWmj~Ba9!pD*3)+p_ z3+LFaeuKhFg2q$BZRDopzmEQG@;9DnREF_w%x^`{Ro=D^S<1nUg?Z|2l0W+A@`$ca zZ4J~eAF4gh@f+R%|H`1mcJXisTt?y2vBA<$@HMv|@?Rc`{f2gMju&A^E1xvi|Lf3S zMWsr%Afp))P8Ob_i>%bt#In)Ryig4((^v_gZL48q$f!0gww!yr|+k_ z1L#K{^ZEe!5Ds$ydkFJ5y}yt3XEZ>5xbD#V_+6XwLwo_#uQ-Lukq~p{PPTWnL?l^h&hn{>az`RfMb`JK} z4*b!WzxaH3*k0w|oBuk8_Qmhk-mPPP$Mp-s zb1fh86<+(I`iW=!yU@RKQ|2|y+xV{2YdA;6sr7&J4}%At?_bvQCT2f?x18_4r2d0F zJ*Q&tpV#v!=k$EXSJXdZ@nd$?Z&ECZAGWA|SbM(|$-H3v$~b?({#+}+q~m#|;PL&k zju-ZRHJ16dqVs4i?asVk)p4Pogx&Vk=2}ht#NY?u^-W#vuSQ<#*U^RXeek~W{bpHG^S7FO5m$wuub};D zUN$~^ejodbA6LI`A9_R2>(C=g`F+^eV_tjc8Ql+|hm;-=e-`GCqWbm5eq#0G@%WAY z6-DANHoYj4KM4Lt9zP&I8$XQK9>V7N{D9)0BL7kv{$lte&2NMH?oab0rqjVb^*hs} zQu?1BntwS7Ke2b1|54^0$cOVyCVkG&{I8~u<>Y5)>=2AEY~NI3*c&+4hI%6-;vo=w z!|WT#mGg~bkMlJ>w(-3Z|8OSXY5{&?K0k-?3Ew}-SN+6#PR`^D-e}I(`azZWq7RX8 zy|vFcS?DRt*Zd!iJp8h#f2|2Wa+c%_Gauu*)jw`Z{cBB^%d!5&^+)^FYR_?b){a)+ z$@+*s0qZNp<iFnecchIIH#A!_8Z$z z@_v6r*Qd7X$!Ga~Z$#s&bOgF_e15mH~F`#|B9b?JAc4wzb1ZStADS4o$t3Q z@Dro{!-n{Y?fpvIV_y4e7xr=f(ySNmsr>NJ|8ixO zuakAL)6=CoUmv6O`zN|it1S2PwG(_lR(v(|>rb4s$DJPRf5LOOvF?Fvp2{S5*-v?^ zzJAx=@4JQ82KyVMc!m9_)%G^y5$M;n#^1RWIw49djVf+gC@9XuOkT>|<^g9g4PW(R!oZpMncep*;`7`)F z@bLH{^W@n0;?u!itOtT5>W?&8*|q27A1XxEb9|kt?Qxo>{1~5)__cWM!#}=*Yvz{) zzYXK%fyciUo~8F;AK^K!Gvpt}x-XgDMtz8%XNTsOLger9btbsXX)$@3{lT}CzrIBL zvG6~XGq}%wi13$ao*SRPYoE|{6WWLTxNYQ*RMwAVf2IBb{XX_*{kV_V`MadthaVpu z>GgxapFMw9PRey0Z?OLse6lm=@8Z13w~!B6zTlgiIe(WgZ}T4tyd6Ks{AqYG!;#IOpKHRu?;ze^E7(K(UEDMrnZ0l4^-Ahz zH9s$%Pow_P)be5HbNP18uBi4&{vm3&SGW58It^5vw&uh8AE=-CheLmZ^(2NLf&3Rg zp!i)lf056jJtf41GEP|2Lg~9i-{` z=2oorC=O)3$^J+9Kh2-`lCDd8=nwRJTi?Vz`MsTQGJm4+hnU~*ysl4+MOkmM{|x=B zsO!sKPu7t=T`&HS_D`T+#UlJd=-05%dHopcI`eyEe?q>sRvq<+e-Avw^t)mc^@E>> z){hkb7UT9R_aF6B#Ic;Sv-oInyaIU+ur6PL_&1P5iIK@88q);6UZphrDF{c7gp9@tohbt8vbV-UzA^h5we%zs>Id*7f{bu`BD4n#$Ma?;4%IpTd_Io`>}t^2Hz9Kj6LLb}06K z*YV>+{I@-^`-?p($JS>>Ew4CN4*UO*f47MC;&vlB;v7`#8lQh=`-SB1gv?i8yhr{^ z@V_2+&Z}NxZsT=AP36q~57_tTv^@qI7ic(%55@8y4$ADG?AAVm@=tH|MwFhC{YRF+ z=;d(eIpn{M^?TU=gZA%rZ^(OubAbKYEsCD9e6c?4^~CNn|1Ijnb^@;tvHxF#IZXD5 z>HR?KVfN#oCjQgjod1;d5B;aB{lV-kLwLoniy^f?ZTn`>IlrYd$~so%1yQaQl`@CpZ;*jQr2A zAAnyh7O%p7xVTv=T?IcL;>VV8ZTRjNfiEhq;`=Z@mv%V~f-{^lKQQ+%dJf!xi*k_) z&)tLZ-Sm+^%HLlW|7}+19d|B%{UbC!0iTf4^MZ!M2Tq@Pa9;>7{1E3Dz_zqb=@!Tt1 zM)|<+7x6o!nbIn!e?##5FMlK*x*vbx2R*Qw2!G)Mzc0A{1%99GT;6}-_0s~sFZSR9 zzrVomlik0-?=SHC3;e$BUt;}Y`1u8X|2ei}7x?|d;}0+J`{E~9;P+)ehS&EPKWtxS zf#1J|rQ8C)zrgP=@cRq={#QS_!0%&!Xo25%7Wn-Wu&Xz>7WjR^FD>x<*Z$_<_htVR zcHR32hO@^$3&#`0{vXBn!S3h^;{t47#n=6m7s9~}|DBE-=)Q^%`~dBrSUkbEDE`#z z&p44^=XIy6=K~0DPx~paIY-`Rd}~za{gSlq?-;%x`?a??c~R`6o_y>W@dM-hr{VsMrzWR(fAG^Xej+312G*iH>|5*; zr<0c#_<#5j4S#Vz{Ofq?q`24{ilmGhYXoLO5 z4pQNt*VUsLi~EGz&*42vm!eh20e{R-fgi_3e4yoMJ3-f7<_|mhSOYK)A^U4O!51j=+bXM^HZFzsKuRJ=;KZf&XE$1rE zwS09e(m0kG2j@6({fe_xTfdShq$0_;(v#G0Ny_iz{1N_`cf@f{{Dk8i95f1aUhO2# z*MOf6{;5W3m#;UmuiwG>RFp60cN?8OoU?uPP2#uyviF(4{R28b2>Z;= zEfx+6RF>O!p5penLHzKqANPlaXK}uw^vPjVz(ViUtx^jq{QIvsa=y1zIsh}(H{Ki+ zp5otqO!?-Hk^lZmB84bW{N9l!QoL6@axir9dyX&qddFEQtV{m--b(a5|NeFz=MN#@ zK?B!#uP`{c=-|DA^Cgxa+keoXqVrY0E;+x&so{Bk2>X!E?-~DNxkBe}D1OpTmCoa{ z{2-r3P0m|v)J1+RXMK`CKpSOq~0{oY)Ik^v7B5%;X+#OEKZ}2SJ)n&+!nB(~`DNCjy@Z z`b{Q$T%Jepr{el)^pD~GGlfj_3bK23tJ8Uo*H4Yw)0}?IN!&gAdLk8ehD+4mmCWOO zKhqd4QU9^_sa1xTs9%K6|IFW?aW*;S_CfpAoc}@PBJG31O4J?`{weyL_YaP$-kN-F zhcbSc+)Pe3e(?Su9yWfY zos)c>ay~I5C)OXl@9X%F^VfV%v0lgc3q7Xi1L#k=yu#N(YMaw^`ZDh?(sPtwh)93& z`xf!+;@n{o=LCr_fS#}8{ZbkC;Wr4V-{9P0#y`jWamHVXxVPA!C0B)ZiQc@{+mjA+gzUSNjYAObB;KN&d=%ncpDLFaqck2bKLi` ztH_7GZ-@K8pTl!p2YD)&#!q@q=NB>lVEmmwcPQhp^Iha z)sOObpYw-1A0>~&?2lV?J{srGh)$jQuf;VozWY=zbmY zMLY<)4#7`IabiO7c&=Y~W%WPRW&Wr55p*3Azaiqo*!<7mhx4ITJ-40^UcAR#-ACu~ z3H*GC2>HVPF&O zSl>Cm1m@2$kMDy1ht*Yn4*q~|JIo(2VEv@`D1XcozfI$kD|~v7X?z#Q;=6zskNiAq zIIqd6?O$8Hto|0qh5Hxu-|U-;#(^nux@r7dOk@clTS#@9Irldk0l2L3U|4_geopy9 zGs>e-E_%kl$K_i*-egkFv6uC{xV;|)F8@BM=h}Vp$KgCW^pD>+{&rvaH4ra>pR;^n zpLvtGE?*|Ss8)ri&-fLptbcGYP_&BoKm<2&qE^h@Tw%lMz;I4s7~ zXZhoMIDg*{IDgY4%Rl4)^i2M_;IsFMzascD$bs`?`vLU}0^#d3JKR*Pf(tHKiWs~4TZPD z`hk6aGWnUAeEs-Y<`ZQ7!1v*g`7ylw=b5K3NIeIi`ZPZV%pa5&r=*-9<<8XK`d1LZ zZk%dI$Zw8ckBWN#0Qxc`Uq5y@KkJ{qzpnC?_f0RLS3gVrbDp2iQ+_^g+`k{z_pN<) z{|>!(xgz5lDuF3#Dt?QOcYBB{PkLv9!Ts8Z(#iN%j}Q2MV?^nxtxWK*_}b*xR9?Q8^78rKllGj; zujHQK@@iGATabUFzB5yPIpcBtTg~6(`-sED^5piz_*|)g*97re0?-z|7F{~lz-sU{ z=I4|Cmq`jQAH~OcEld0;=nhl;3-ck%Ka~oY=fuXBvHZ2Yz0^-~y))k7=f85k@bY0i zLjlwL{xa@B|G1n|$)DkOvvJ4T!7ITH136g#&Uv%EeC4w6rA+ldF3^1vg?-(};|SbnNcUf~#*lcabs zhv={05MDks4hCG(Uui$DCH*D{gqP3O-&Q{QEtemL!pmplm6ac;-Ue7VlUz9;E+6`h z@!7uD+Rf}+%OB$b=O5z%@!;4vX8HT7|9-sA`TOJeE%%IXP=xgx#>2<+5BPPAk8eD_ zDGwj>_oIGZ9^)PA?Rm32e>^{bqT|$P-Gy~iy?l*RzlLkzXxaaQ@xPL}%J)-P7t;M| zu+7(>hJ1;iB&mEdZGROrG0taI<#)I0J0DQ|l~xM=EAWd)z_sq+{FeC_xqt5;$7b&y z`)5kdo~&<)j~;R4Z-tlMga5+uTj2%bFG2iuPGK+e@0fq@ORx{}av%G;{JdT{gMP03 zg5ck)Rk!)tc=tU0KIXpx|6YZE5BZS&4>`I1F4D~{@PqOAk_gW6wO^@Y{dr+CRl#}# zsf|x=a*v+}Sttd_3oMIhC7|dgR@kT+VGNzfep_eUiZ{*FPZt zGTQ03^7r|9v>V!md3+#u)8{dNU-bm9f4E(VzpuK@>!d0EKIp~!2d?hZ<6>t2KADsK zd&Fgdq44^Fqx{jGEAII>^=&aTX#u78d9 z?-L1p7x?>l`a(M9@56eK`THD6D+j2?`<~0=$iw+ig|v*zXxM|hwRew zJ^XuP`(>8DFERT*rWby{a?krRKRVz$r}dl*`-9S%_4`>GU&Q}u@lB@JkF>rhJJ2V@ zU#Yn8KO!}r8rDy|Z}DW!KWOn{O@9l@$Jf$w%32>D-_gG-TCYhQ7U((r0sKDrg)AOG zUGtgZ7yQ6^8S8UrgY_qde~I|52nR3y6ZrVok2@BRrSs4k&NoP3MgKSn|NZxv|H|UA zSR9s2NY6L7YQWF0zXAP!Nb1wf$hvh8{P$$1(fX74>0VMk}1wOZbO^ z58r41Wk=;@a>|TIeq?`LVEYU&XKB8CNx!q5k#!)>C-C^!RJrxrvW^RsPyXHTo6KJb zzF?MjR@a^Nto#o0`@s1g+NUXUZFWR{fy&qVUnct*$4^e;vf%ywX@2!gR@N;Um2W2V zwK@Aeqz-(L&4?V)UD=+o`QO?r)BbzT*&LN4>n)p~>uo77)1EDd>xcMPR-TQE?aVp$ zquTlm?Z)##wt{^#@Vi+YmILrlV*ep!`fBn&Kdqm<|Fgs6Wnbldu0PgamVY}V_HbuJ zDaAMZU9_LX2XUak+5T<5ZjLC$`l2uT$nwYh-|cuDzi_Od*vDfawPxi%*$@axgBmQ5C50lk)X=lI5<1^mp{8@gKKjk;IuinW@J8}QQbF>!- zhF`qVwEoc*`*c#iwAVTGZ{i=$WG3ZPd^#&XOL_shWHV9^YrpyO+Z5jn{f+C7@zeU9 zwO^xAN1Z6X)bd^U4^sikWxF7Q`^dkUL4OnZbNhIiRlY95P9nc~iR>l#%jx^*{W|#f zXxvNEcT=3RY#sKC&hJx=uiTcU=2wm(AEeXK{Y1xXtxveqel;@89I>gAL4Y zXdlFD;cL_XqZrT0AB|-K#*dD(1b;D|hqC?eHvGtR{d9=<^4Q;Ya4q}ZHy@`oj8=F* z8~fXLUF`3*h(8$q+%EecvA^A-aa{MeVc+EB{#vE8eTnwB8}fXO?bp|PspdE?48~9F zTc;ZGJKi6MeOPp^vAvkh%Kd_~%fEXiM0|U+Klp+VDvImgGeH4|V=M>o@Z6>wfq~Jxq{)d>BcbvG0<9AJ2Pv>^tJU zW=Zn>RHn|q`=LHR0p3@7@0l{@4Wwn=|9y+f{~p-~Px1F;=JtPYhu7;n$iIdenA!{S zo7v~Z{>_NK&-i?8pS7&}VYYv`to!Y8S@yl^W!VoC`J?=Z45(MP%4OL%t4GiCJbplX zD%c;h{kL-TFyHUhkV4O}zQFz%_Rp*MJ+3pohj|~dQ$c>Xw*9eq@G$QumOBq$qJ6}S z?&q@nvA+@Pes5X#e@#z${fX~yxA(ZeqmmVB*QKR1GC%A2yMgn|tRFq+S2=Au7dhqq zN9doe7gH+7rs^s0&m#X+WB0-|Zo&AI-wM}xf7ZU=?61mwoflPpvOn9biCina9lqb|h&*k7c2E(yHl5#6IeeAV z1KvMB?hKqS@wLgds{L_l{4SAP`_4D{ccJqKoKEDIfau>ph)fT)e~;x?+51THGrKpu zkNoM9AV>8RI<<$AU*(WSt=-*KtuU3?I-t>^=hyI&7I;(#Z`(O0W^v>j$ zy^s7FS&?5oJ1f6Jv?KD#iu@|tzfFF%=vjVVD2V(jm0A5O6-58CHIZMdGOK?zl^^gi ztT(M``JsQ+=H!=x9ZLIb%rA@QgTX5IZ{r1?*xzKo9(U3-USqzGTGH=xCCqQQ*7&D* zf0uc6;~rAX!_6jA_$zq*jr`LX-*>>bP3vn~f2UFy=kYt+|5#d*`lJWv(7zCWHQwX& z9a(>;Qpk_)r7-FokQ}7hZng zQ^=0Md_nKi{NaZieC;^fNQq|`zvuDw`iS9`ASmMUz~Mb{iFlC zh3p{410XGO{Dt$_zmfX`XNBc7bVXhmzrY`l{b$^#`O_PnU|iqP(Fw-$nSGPRwcz(o z`~~oPiF@&TgB6U2z|X8K@Oxx`{>{Vh!Q{kzIL1HLFg~Hb8h+3GOxS-0z9UgEe4gPl zD?P#E@%a<1Zw$|5_$9N?ZfJZ#v&YQtGJNFi`mhFkU8RaJ3Dt+I^s=HD%=T{qBq z(Ck1O-w7XScH5wau7Gi*HV_=strgV&N}_P2BKDl&1}jGd$3*dS2N<8sZYUFe2y#Ft zWIK%UJK(RG$K5rOA}{!FKA`pGr$X54gey@yyP# z@LL;fXBdtS^XmuB_X*#t_(DzX``JGN`5B(Bt~k1~;^zz>X1KX>^a#&`VJJ8|_+Nov zJnkEQE{tBlr@y+@2=F@^pX*}(c9OV{>!9(%kZ^MY{}sMw`xoPPvhxDtDgD6<7s>uL zd7^%7Um{be6vc0%zxNONiXXJ^H+vz~o3#@t{>=3ETYP4-S0~?Jpp0GMOnDqEt9>>3 zez6NoPeQfBCf`p}z^hwf^gO4HwA%mr{VlqW?@u?L=kM>N@Oxa__eZVg`TJ9y=jXnk z=Rdz(TjBXVQ>}AaQ-3}A_i268SeE%YtiVr*dB5uKB7gGFlm7M#>Es??8-A@w`o#D* z;7jp-0s2R{xDxQow63orc|5o$xKCaUqo+E$Y-=#IQqP2i%K)qxtHuE&h!f|G|HU%>U9p z55~V4E+2(_eir`*`{xV)y<_+{!@aRRB=bM<%lP4Fh3D^Rw8HbZ;obfr>?{A*`>x^M ze8s&P-tAV5^MTku2w{KG_+us!#`&06T`oi=kRa9!!iD? z=t};GH;-~A__x@V=Z1gF01q*ZUo-q$zAX1k5Uf6D|6LM}EV_1)52e21;duRp`RlU! zYdyuq-GhJ2i@)}~;zRDizoEXgA7S{nOlB7UmQj3J?mqapd|v$LIo)@tmORC;Vf;q@ z^6F1F{9AP%|K_^(g zgDL!5&i$No)|teqwfzlz&pz#^xNfjd{nqrz^3S~9y{!BH%{!3yKr*_`I54m!_i1*I<+5U>@b2a$8GxZ$f zOH$ZB0v>HrPw_X|_oIC{-(*VmcdQ@S_uqqmD+2$<@o!;2;QR2$`$=Am_CuW7paA?E zo=@rzvk!7XyG#8G?o9m+Px6uRZ^b$J7Jo$i+Z=w*Q~cZ*AL2bc*FGN+|E6)dZJa~@ z`@s1_?4K58d>hAqP&}OVPs5-6m*U?t9})j%{m1Y%R=(liGWWy3DQ*vqiHY(M{99=m z`l#U$|cxQHt@Y{1bd_-|vs;8oV@a5#!hCt@?=4$@`6h!@qBiC_S~6 z55CL4v-ex^dwksSuuYVTL`*#855uig6BeBbbHGPv#W=)@~=AX%nM#^u}>|JJ+@{;mB0{2S&wD&KVv$G@diZ&U6sbNN9^;?c5y zAK!25QTWz)oV0bQjla8?4=Mjb!8sKFhM@MWmoEJP?dH42e~$Gn<{!%+xK~U-z`rS; z&f3RtbvC}ail;ODn;T2Mp5|-$@%%iEf2-a^`G{jShktW(m-&9Cj&&B+4V8KP8~F=y zzpwbWR7I{0|JKW3y@!3>R`MGAkFm~H|3A$ygnzrq*TBDt|37sCGY&hXCP0;zrF4JQrD?fX#EL%pVoi8|BL-s;PZT!e-C_B1@=GWzcgMS z?|eYrc66P*2h8)Si zo^|&a2gm+*?60OMf565>l@NWg&2QJR^V~l^Ro#6FPNP0o|x7s$R7S$j`yLiPm#oRZ_#d@%(ib{ujfgX=?FkZGJ1UTrX~tUuDO=xasKc z?0sAJ+rI1+K8?#SDsIi<&|-ba_2Kay_zIlo;cH*l>Cr=&D5vmm7XPuP`ItYE^`G!> zKI!%69*s4=W0c)PJ5v7?{F@JRgZ39%522qT-%NUs`>WyKfS0>K_*cWf< zL$LQuPF&oOzx;8CgK-w^i-Z4e;Mo7 zjJA)(S2lfP`QiKWy&Gt+(_8HkrKh%9iiczU#Qu0!adElctRCJDcX<8Z&I=x`+xmBs zPaAl*@X|5ff57@Kulm{TdTige+V{b~^~8>8Ar3C#_znNoGQDqI;{02!S^OKWXYgDb3$`3#uX+4iv1s=N|CTJHzOct0 zG5nj_V`l#}8uFa+|7U@JOWu+13&>7@{aFwBdW?T-tn#(l1K&KG))?Jqkish69Uv{)a!?Vs(t zhLdDwZP9|K7l=s`5W`|0za2XU_QqDD)aweIeu=3I_rb}4&#qu{2bZO zSof2?g7_)_&+f>(9_f1X7U6rbKb~|1-rAA3i2<`oJuEAU{F|3LEN^%L+n!XI>UE0LglPLB29Zwh}x_?nb9@*=Mq>y!>=X$ zLJ3RJp5PfW{vFSexb+!LLrrykgu@>4-2B!Z^*5M*I6w*H|MC-(T09^0ueZs6W&S0L z?-O>QNAUMp`NrF1xTHT%Q-(o(ZlWSQ{alRe>YaRYPevb8R06rkfH_hqhT9lOMynn#?r)pH+CZ7v9 zy$1XYzIVc3BzP9!GFT3$*F2WbY34_w`nkxDeh=OyPA}_uM5{k|nfU%os2}-d_LR@a z+ULCT5}i}tA=}3^e!pGFi{5KC1-H>uyoS9WhvFBz4E`tjT{|my4J$uZ|4YvI7>ChT zyoSB+dFHqH&&+d(eHg=cu>36^ZZ{1-D*O(v^Z)YifdAzE>8|=!^O4jqul4&-&*#A( z^CHQ&(-GW8hxK!Eew41sf7a;;ZX@o5|2Od!@%}o=cSQci32%`%I)ibL_b<7AgiodG z?C2_|IpCq`+P*J8+U9HEYu34)5I<}Beeh24eZzJ5S-H0Nlj@Hn9x-}<75fG1U*q!8 z?tK5_p4^W{dz13v-<-#7)S)lq_u)T+e(?L?)#Cdh^oX7x(D^8gFBZ?!8u?HTJL-KF?R~WZ}Ot4ROFMo*PHVm&}{yWphw&FI-&%x~v|4S~Ac=ndBolmj- zy?NY5PW@XvzM}u<=5QNc@|(^TG18@3;Qs63@pJ4#vK}o8EUQp8e$e@gLgBz90I+?UhbrHxGJX{n5U^n@0DW z$~PbVp5q4>pT76|;RZy$VKn>wfJSn@`EftLpx+PqLB7^rNPm|4`Q?_3LpJ_k&gK26 z`SFjRpM`(l#y{n0`-<|n-O=&i#=rSE_G~|4|1lT-ee^fNMNkU-g9{bM^*hwQs9!$! zM|=%_FXuG}_yWRT-2TU@N&Comqu#tvOgua0U#0WC)~~F5w}t+N{7t@1SNM!f4mJ)? zeLr4!@kT2T9wVG{ZfCFX`}X~v5nZ3!iv7RE_q!uXC*zwJK&&XQH=^{^mYaNzukC${ zXP={X#_t9rN+<7k+E>`Gn2&ywpAScr>T}4y8;f7M=ZT-Y@806yTm1()2mUVjzdPV9 z0{&u{ukp2&AH#3`8vNNF{L;h|<@Q~cGe4*PHvpdzzPr^D9;5s)$^P$&{AJvMez5$* zFqn~lOXshawsR}|4X&TbpYxrBS$mN8g@EFaH z-yZP}L2s>`iTq^T0*?fj13riu`Hjad)gv#!_%)@!obQzU0#|s7?v)?52O&T7&l&k8 zle4%D(@&G1ng^9zXUNcYw^%5nsH~%?XZV@_l%Maxo5YxfthWzR$)x zU-jQtep}OzuJRdK|Fk&v_kN#@1G|_fr|~xg4X5$I#y>Z@z~3|ezNo<0`8j+C?SB$~ zPR==leTT1Zxy}hroAEj5FYUk5`WE&p_8sWj$36wx_NjJ`_6Orpv#B1*Wn|^p|NK9pgX1&qC{`klK~jUmcW#bPE6R zkLdS);&A*2c#jUU&)5Id@E>ks7XJZ%3_th%6~TXa;h&5DaNU(z{6`ed;6M2M=VAB{ z*v*6kY0Tk2ng>wFY5WJ{)hGN%O&9A9wr}6(b%*uiss4)rm(Cx__!Q3JKN^bvXxtC~ zf%O{mA2lSkb+XNG^ZdEdwEqG9w+VeD{-cJwi}t;^X?zCueoudI_zqjgs{T4H#cdqX z-!-*9JiY+`(a?HL_>Zt2n!+TY{xMerZSe>8mP z4zhv&BPIJ;hX0u7KN=?Kx!^w@f1~d#JtXxBhO*Ax8=Cz`_z&}euJC)yOY%GZ ze!_n&$?uO2W!-vop8sfBo(F;P_k75DpzjZsmSp|9r2Ei8u+9AWe0~S~N6X8y?p@Y- zchErpEMM5<`4#i~@-pg${+ero2Lt%5dHly;fdA;*)BHzEDz8B0lpB#8$o_);0sX;W zhV=I(mBaEJ{$miLzL0l+S=MjCDCGCwh5v)%Q|t{yK11a{3YJ7p;2-;t<`2ZD=XKv; zAo9%(zv-MUOykFO-7^%sKR1;1j;&7&SCh;A!OS{>=Wo= z{-pEUsob3WjW3$!XNi9yc)393YglSx)1#zo`fFh2~rv#)(QVXxDSkb-2wC?4}9kU`2cUC_>o3t0J{kD zc{D&iNIUdCerNcPHsyh~huoo?Lg-E;BpG@E_ne zV*AH+ukv?vJC`|qZv*-d{mQrZ_&Vd%+3pIJZ^-Q1j&qamTR*Y=l%nphVE=^oU2K0P zpOyL)CsKzH}O-lKf>|p5kJUD$^J+SzY~1PP2PujTh2eHdg`wZ zdypg9DLu#u_Q~zKbJzcV$H}o@6};N)2gUiH2=c7_7TLdlO!4W%-YWk-8(ro5O|&=p zYyW6P*6$6h*XTMX`w#s-AMJ8V`bGF)_?!O?**{b+m7n7Ilg>Owy17O;M|#fsjrOr~f5ooMYZR|>+JBtm z?>W6zX~_L*Q&RIUg14N1UsM0{<)Y1>(*EFw+-v@jz=Uv(!Y|9@0E(j{@?PP|F~L}`M09;Ya#E>_>ZeP zUes!^Y&jqeRAIAP)$QSk=KmR!NfbO>xAIk5O{}}p2_xpOTuU?yaAL~oR zUE%j*=okGignm(K{^Mfq_nfoLZz$_ql)pdiKhEXEe{6bD%>4)T?>XH5HmYD_Q`%XJu2n@=+OMj@fAMbhWrorAM3fYIQBU|vrkMP z%kew++9$9NDuv(W^KzE&J^tf@)VEOZIA7Ca8(%90&z<(4R%`GdXAbk9svT9E{r+lA z>>KL`)#}f3e)ImFS`&WWEctnNSiUwt*IV${0-sxG{Ep+}xvg_-enz{qybBGN>u>#w z>ks`Z6mYO+%KvL}nD?(hZ#bV?Q~bRqU+bT>+OsqHOs&7Ty=;FXoWl>-TasUW+|Qra z@4v@?{1o-`%iY=W5AuM61@`gW_{aV4Ed0k9&q*)OVVtMb#=o)uSpCt3?tkEUwm716 z;{Png@ACXw9#Kl?+r-|xP5cb#UsbjHiiH}#UoQP5r?=I=T&?_^tD8hXZ0&|;ZTE}WBOMt%=woUPjBN7{ z5Pp1|6P)NTx(ob+&<~bx9MAfX$K#H+b1D89*T>|`W}#c;~d5%v}ZX!+<#oDJivd9ah>$0(GdIJ#wWACbK1VS#=qloD!Ie$ z|7!IC{$tD=R8A9otke4+Hg3hLH*xbemt*p)#$xZ=JW;EO|CaR&_7n7z{krwK^ov44 z{L7Q}gFA!#%9#JC{Cr;g$%GH5{TCb0a;l%X{J)))Pvcu|)~{^i=Olk*PyXZa{tiYN z*ddm`jc+;C^IYrOGx^uZA3dcX_vWvDW7ug_|8{#ee;fC5?Uz~aa_y=7F}~F2@^93) z@jE_0N9$LoSy|=lTtn>kxK!usW3+z!MAs>*-?+@YFW{Gn<#({J|AhRnJuLSKf0ZxG z{dVpQp9j2EU%%_`_uXu9gZ-PW>I(Zki}{fKoVUX#xc{YEH!l)?DyO_Y6$f@Y{J*b+ zPkcc21HOu%7xesKRe1#2KaB4~Jam3usYp8Uf3M$gz>nW($oDn%JErjQ;D74-b@rE| z{}BHT`ca&^s_Xv&+b1dV>tbDyj&5=q74SXmAK>+2p-?=**TvE{r`cG)AMq!5;Qvjf z&hYtzbQdp>ICWqKJYa1M_PUky?BUZ} z@9si=@Gqs)d%v2-@3VxDhhH~EczLAweu+N2bQ#S^qBMXD07QS$QAdH0K{y{lNN%{JUw6kG8os@1He)ui?wg zUu*MYjp7!9_o$lQWB#KV|1Infr&apioXS0ylXHCUQUCoV_$P8Jkmq2H zQd&pmavKR+M{@kMlN~3{g}+Gg>5F{MV{A9uWdzrg(eLtPgR zV)P&K;-JwsxZZ zJ(@q3mqQt!wcQF;v^UOYas5dCPDnHAjhxEAIF>*B-{9{ve+U|naVEu>+J6)Ap9gmx zoF6rQkincE*W|xE5dUmJc@Bokqrv_EoxXEq8TK^%xr3tU=dd7t-nYnqkNUH}7xrmE z{U$^8^G@s!SrhUC9RKjZd;eYRX2w;$fa`8sVs zs^4=MUrHqz*K);Oo>!KO=Qt&N=03)^sJO@1J<11u+cr}2xAOPl{TAKFb-g6xUcUhU zA^UfqV0=#(;{*A1%Y{8T|3~p`FpjeRp#5-O?_UyM{e-{%SN7<-Nch<=z#_xj%u{#`pi0Dj$>^8=5N{R{uJtLGif z?+re_PtiGp7s3la#Q6c(a&qp`({qo;!~5x7@avv^2K)Dr8`|6VB`v_u&hjV!Gw|a@ zZ-uYTf1Vv}upgZD2li`jw4vt`HW*i)#x?Qr%K3xdk9_{X%Uqo~e-O@{Kk$f;8SM@G zi|`G8m?nLhoIl|2Vg12#^q-LTwZLz0=MPk$^nJ&7MW2F%=+|Gf^9L_eXMW({{rDRZ zzj$%}U~&F{_JtPb55)dnoIenI|D6!~pYI$;d@PLbi}MF-i}MFh9=de?a?6 zhX40QzX|)F_BUW(eF^LH#rXr+pV%K?oIk)k0v@FH1NM*DUpnD;T%13c^)D>WABdj_ z{%<~?vN(UR&HGJ@^9MM0usDAJKiA^?0qwuCe-QiMi}MG2yx+Arf1v!6LAW@7fc|0o zV~g_#i}MF$moLsA$o|G(_W1+bC&6|n_8&U;?9YG)iuY%p4ln$v^W^`5^P{u8lqujG zXg}tsHh}L(eCD+BQD#%JFO$PLVA`K~#kqm_!X0pP%J~F8E9VySx=+OM?=U~!?|j04 zR|mf3_qT99Q_8b*2zJil8SK9&_Rl*J&PC9QSeqeEaLH*Cl_iXr)#rcI5_CHUZU--Gj`Gv*#1;!Tv zzpyyJusFX!{%1QMusFZ4IKM#lkKw}>=NFX!W^sOjacPV53&0P;Pr5k2usFZ4IKR+$ z7Uvga-TB|}`Gx;=aem>eTZ{7xi}MTQ2VR_CSe##g!|k5G&)^jOA%8oE+`H%2;qecE zFGYNV0r(8*{H>i|2=tr*_;+s7`2q)g)#P7Z8J%E1^HKPh9r%-f3jB@k>(4mt=uCpH zlSjqx95Nrw$;V!!^QBi%m1KA+bY6WkCC}F?$dAq+z<&hhUhu2S@7JoxpYAstiMQ~A z1ONP!pxVOkiT`Jaa{*M(GJZ$b?0>@foEpwUaQp^m2M+ibZ={va()OQ?9};ny7^hKm zaE<``Z|{Y_bC=GyQhY%=pWr9pXGgk$^A^OT&;DP;A0@U_`n`{n-joKA7^NS=|K0Mnr995vQTxOz32GnaSAu*@{&o(>+8_K- zbiTyNrfzb3SUW7Myx$7fN8s0WhI;)mO+#%5kw4D&g(7#*ptGFg=wKp$=tmfe{3GIB zh5XanexTF&V2Peb&-4AI9NvSR2YFm0{hE{D_Q@^1I3T$n$>M!#r@@N@Y9HY@JnmaI zD%$PUH%(5g|KG>?ZhiQFPz^=s=SWqcSlK)*IRu4spF)RDz~J_DXDUTe!cCy ze%wz-FS2}0uF3AREQf>odTky0H|UAny2$^f-*~gLw0>z? zPKY1J`~+d44*kdZd);?yuUzKyoOHgBeqHd7xoAJz-%Ep+pT@zX z>1{q=IN{f#EZ#p@((z?krf4@rKZj_cX%D)SrQeDaw8U~m@vaKTOFhxoz;oIiZ{twjiymzS(<2cS!-U^@P^3<=F@BIJd(`fcPP)! zkEL?Ee4WnYcT|ryo-@DR7wP>}s*U_`oo?ef_!W|MJm>R^c#iT@b-a)B_31kL71Cq{ z&!JD8KfXUz#dFfH8s&d-E7_pu8efOrZ_@MATj>@(M;vMNL(6|1@B1#EBi|q)`Ty5C z-*)}4=KLg->A`=8*&ob*dZkwYuQJbf76&HjSo|HF4~2cw(SD0k{uheBV?2e3zw|cF zSNi$URhR6Bcy#q~N|O&kz8Hu9A@~>lH~Mb)&>6n=lHvL~_PO0qQsK9F#tHMQ+}|mF z#`=XW-Y0&NRGQ<#oLn2n z$)WSd;PF8J;rJ2(^$YY%KZYJ64JppK@Hcb+03QPgZNOiM_voBjQsU!0M(3ls|7(04 z#2MKk9D&8jvHf$4dt`BwEFKcoANrp%{)jSoco~Pl>89}?rt+x&I`}bi;7z9ISxz_C z^!&aP`SRR&4!ttTcM=~5@sRjB!E!&nmf?2b=PY02KeYHc7Ej0Ids*Y&jN|0E;;*sx z*-OZ^#pT60cIJ^V{?M;5-}5Vv?rK~e#Cb6OK`!4@K0()2oL2_f%sO`?pfudu=mm5_rfz&eyJg(=vI=-4dXNX65iYL(U2Zb!+gM(iLm18-~^An?e`1`Y2;Tb7R^M9cJ zh3q$H@~!5dWxhz(4?M>o1PcDT=<^(xrkHz<`AjDDhuHrRc`dj}h<}i#- z)&E}kQc;L+a}PhI!}(eNtmdz)d|`iK{xQA4%liGZ`epHQEKW{Id5F0E@qUTy@u_+t zzo%%tXZfQ4UM|ac=2)B@pVZqkAH6YQMNbveeg>pMs$5@ zD--;W=*OqG3L{ESZQY>rYp7qsJu3Gz0l$|>NSqw5|JSiTP6%(C^()kuc}!S85kI9| z7CuAkS6069-??Rpm*dUF$r*pYj>q_Zj+5gOk0;xAkU!18Bf37dRm^{v@0UiDPU3r& zm4CW2`UAdS^&jWonfz)ax<0j4&+ha6#)#6%`w18Q3FVcF%l!RKjgw>Vw=`Z3c#E9Z zf=kCLMdZtu}=35c0#ofN`uP{l(g^SQNfP<84KMooPSc7rsLq#{$(~D?jit z^x}Jq!7R_=z2Aq=!}q}sn#n(##b^V2#KyNk`@uN>EQTZGUkzq?4)4uh#=ZN+$FXtG zr|}N?S^j>s&ex^6I6M~LvRXQU{(<SgDUJ0;D6zN|lW^S{}=+%J?QO?WF@U#>sm z(^jh+@_kx=TyuSI!M}_8`5yoN^j4)pasbc0zpLx}oezlL+z-z2wUgZAv_|n}@I5xZ zq{*L(>qM@O_cyoP@Up%y>zg1;^4f&|5%Qt+c}#X9t>2gK()u9sLd?K;AN8PhdItIL zaD1Dk4~YLZ@J{f0p5;vIcW)ctPxuNQpVKS&-p#F`hTkC#qYX~MpG)>#A<4Wms8?cx zaT#6!d4NaJ;@>1fT%){963>yMAOAAVkK_0^l@q)!3aMR*ACde6vVJmtM2>%xh=m8y zQ68{Im|q_JKzX^3I5@83ynf)sv+-}jyj&*|cR22i>)xD+eRpJ=0|kM9+>bO!fCrh zcBS<%8|S9_CkD`|_&2;>n!JzchTpH;hab@?(YgQb#`Ulq4af{Ml!1@~cgYg6M|1SKa5We_Fnh$MrzG3Pd z`o~F}FZdq##qb>a4b=Z*=S$$1!#W*tph(Yaw7!0$@3gf(Q7G%+y>PNVbs@i(EG}zs zncr)&A5GWKsQ)?Ta}GmUhr+GI@@DyfzwW&9T1SzrUn56m$udJ27jsc z|8?dMYc@Szk2aNOE?e1y947H^IR4hd7+*^`UrKQfL7(+3Xx>$ZLjyR-7O{uk-`#@02q zp5gjoeG=)qCK}84lCBRNwok1;W>xO!u553Z|Ej6&6}7QG1CRSX{E2N@*F)d; zW%qXB2PgS)|Katg*lpwf(Gxp$Qoh)y=ddqJ{LWD{DWCYMt^6#t6O|uHJ*@ra%WqRW zIc`7nFS1X-D>&VLjYggNM91RaG$ZsceG<_sUl$|j0eCaZl%~Z# zp!vNH-e?;4a`Czp=b7vZ&-+z+AAHAm)IU(CYs}ZRn$!z?ioZ?# ziTr&Sp9=-Mukmk+8>s$N{F|~|o89Bo<$5aqjeM7Thtov$CZ`tvCWHMR`d->cmi=MW z6ZV_6Z&LSztbZKEcu)JpMfQ(iHx7Pl+E+?BvhSUNKbx*$zyGlV`;7Jj==VL`r+x1} zeotvJzHl>f1@Q~1Ut|A$06UUWiSu&uvA1Bq(f(>-K;!<&$10MpHM@Kb{~wJHy_E4` zH=K~|%R*uM67AQce6}yM4(3gK5B6{QevaZB;`$ZXmrr7Ux6zRMsow4d+UIq8wEs=} zm$JW`&B}Ag94koL-(BVXU0z>e{Xf+GxenWxC%2XxXzzmRKRqwidEd8FyUO>MTCxxP z7V$fye{7WN3EJ=cRQ;+;>529VU(@;!_UU;4)Al|4^8Jt5{#;MxbHDvh+ZVQdPumCO z@fZ7Zb>07L>;7D!F8iI>pX2ePT$laOV&w4rU9ZbNDD2N0v_DtYeY?8u-yi8cuj7O4 ze=c>guLu1q9eJ7e?FXF#Uw_SkeUI-eF1^hA_JzUA7is^qbwAMxXp{AwaM zmEU7y`Dyz$#`-CC^1S>^t|q@hV=O=3Umi9@UaiPsIW09rp1i+&_P9Tc3Vgq#a%{9j zKDX=s-Lt@-w1y(jPDkX}gS-UaAo@FiTxnl_sPgNn{NAGPhyD&!e#1HW9fU)yzmGc= z#fO+)bsWqG7@vwfzTteU%CGBODtjlA`sb{m?(Kelpxi4skGee@gZEHSqUS{)j(-@o<8xFgZhiM>M~h z{@VJ(@F(vA-vj+i9aVkLgwifcibrAp1j^4uFQY!MZiS(>ljw5^>)Rpp*YGEe@H)>^ zEsY;R_> &vN5gZjVZ4pPzSI&vJY4_>KB?D$lZBFFW67`E{M|==^q_>)&x+=W82R zLe)pMU*JE;mqV6YtzPFe+rfA)_6_+D9K@~S>uP_6pI4S|a!U3a*$4R$^DXR#TDT1! zW7v_f<9ER0Z1zPiyoP#T*sO+VC)gDgTCanDnb-fczLovfwQT+>eg}T^8vTBg;@^`0 zAXQ=i$L3Zx#&g&ixfsu3Ptg9(&g=a`h4caT2IeuWwJrjmunjxqB0XpS1LFari<=(d zAD};eZi7>LpY$V0p5SZY#f6=~@l8*U%cuQc8s8^8%6LCP`D1^T-cOtW{^8=L;R1|D z`GEM7(Lan&S>!+AQNI3Hv;R-Z_u#)n_-|2v#vdM0dkE+5nLjzeHQ8r|KLqYja5suS z#C*bdL&G0>(YD|Zbsh=fFCu#f{SW>Z#2-j*u-#Hq9_IS^d_UrkVElDqw`QGJKbe5t zN~tgTtUcmm{JBza46nE>`HpdlHvbjWZbO}$G_HJ1?X}{HbGHC~XKpWiPvD&4ev(MY z_woFO@ny+@K4H9S;JYbpI^YXOD*WDkzvgc9d{a-H;ndObj`;&<{(xUv#;uTX46*tP z2%qR+elYykgYbzq4w?VN@QH>`D(O7Z(ea4!aljvDOENAMOEND+(LUNw@rjUssU+i4 zK7!qX`?+Yq>09yR;4c6^#<{@y-%5O&)5yKfDcLuKPt3RW+3v|j`)ucAQ~PY^@cIq! z=M|rra|EB5ox&$R4*S-|rIasvP^b|O>l<%=EhKzmFXhxuToQcZoeShgL3|h17qeeG zOLC3&t%&_Pi%%R-yb<*KL4ySG^b<2~p3x^ z6dz{xZV}_#(v|a05Ix21d1UZBw@Yfc!zs-_F6NI=^&wiq?=X%HQ_rJ;$^I97-cs~D zCZun?IXLn>8hV0bJRtln>K`bc5%MjPd;`RrAvwaoiuvWZv(d-+hxyp__=xrww*Njz z6wIzizvF(>IU+bl)(^`ucmn!Nsu=6{U;^K$E}oM6ARe?M0F0YArjYx3gn$NDl- zxgAkCn7nS(U6zmG8B;xxPgik_kU#5p)PYzaw_ZwdjL|yJwrcW&2y?2mj}aK@kP&W0l(~!e0-DLP#!+{chIRX9hfqy3X zBwGiZHho#|nEX-=kzXq*>!U_W)_9WLGE zG}*Yy^3?T5A=SLb*XfqnB|!jrGk%cq#VOxk<@ikb5cg^S0r*jh&&%&){oO6^GF~xV zKF2skPv;RHAF#en2KYVfk*<$&kQz>r-lzC0yq^L5W0&IaB29b92j#GRM9&qk=+)t; z$xm>KTs}Rg{Rs3Yue!>q;S|T^7w9?ck|I3^PO*gik!txV!z=pk2HPcuQ{?w){=s}l z`I}v0_Q>b~@g4BukMwt+^A9h7q)ZO6KY0B32>3(9Yhb$sQKS zKgCb}w+`)V;618Gn4$d^ik~d_L+pFc5VCr@SlAbDU1_hgp0VJvaPe!0W#W{!se+>9u5mzW?-En&m&iA6k56 z;M(}P;Ssriq5hW@Z)3Q`mlTg^?X#B@fB1ReFKPeboZ@iC_(Rl}?(ZrtF$iYyhk@ez zd{6O*ic_@mlS$c^@O7Uco%Uw%i%H!ND(1znp4NRFd%xtm{CkVzQ^{e!@2Ls?u%!4y z+y5(PrG9K*)BHAvKdjYe@rO0m&%qj{w9kWhW;m&%@v$JkxF+~RoG-%odN2OaAJO=A z3g-hter|X}lW$&eiNOeZi{E)w*}t*xv-r%eFZU6rdtLFT?EeNXmah$87%L9Z-d}}& zQTyOr4FB%q&;z=*^0SI>TuICQzTyjceTna@E4~o=!SB<0m)>Xpzz@)Wp?7p0c)%Yr zJ}?P6P4Iyzf1lzl1DCo_{mS&g);EENkjf>*HPm@@HEZ>A?m92i_`l<`vho=}0QvdV zpOJihl`qWmL z^bEhNj40Lk$9P_KvFyVA+K5u%5Od$=YkR*jqHD;_Yv`jm>;L}H+-Xw zd+r>*(ajb(|FS!qKhNLruT{z$=+`GHelLD^a?9|8SpTwr!u3z^{bUX6Gx!-jjlcY9 z^3P!X9k_D8oyGbT__drT*Ni{J{1PS4@xFR1fM0;|hnIE#3idVpLw-x}MTSF+b1RHv zd?mcZ^ONBYOB!GKLHI-QXA-^%^~e5ph0Yt%_i6nr|6hC85*ycbp3i;E+&g#f%v=tK zGsEGX;U&d~NckbTMMMKdMGFBKqXKMaBMA^-Hz+`%Kv5T6Xc@Y2!7Q{$fdI;;Fl7{A z7G2nBQ3=^pXdTdtESw^MvI;Dvv5F#1Nkx{5K(YG$uNjh-oC4XTfM#=W=f2MQ&wu{s zcmCV5{t5oj)A%R88^?046K=AffcVE_@rNbv68F#KO9kWK0RAvk++kVqhB(jmS){nc zSn-G1A^f2OKN8&%dJfvMx32pKfp36+AJO;e5D%z(=okF*%!@!j!8efqVg7wsxrA|^ zzfecMiCWGd0DqVzWOo9eSU$=791&08{u=(UTnBv|{QeB> z{xEz3{;(7a{t(|Mgdy=5{2?;j>6@MZ4nx5omP(rk@P}a;?@Q27h6aKKG{pmq2 zl3!%scO(Cl@rOO}Pp2rZh5IT`z#rl}H0(E~hw+E%_x5_?zxI9u{;=2EQTegT_(St& zO^?6rY|N=5O9m4o=hDW#Xu zDSbrrBl~QN?OWU|_(R2E_SMfb{9#Y&Mz|L8^D2+ff876R^e>!0+*bOcF7+zWzoY&m z`0t_ncg#;u=-QhS`c;&^X5Xf@uYKY7o70;3HA9>k;oM-)GyNd{Y&`*2#&pMW#ghHz zZN^8y{nxmAK1P?Fsn9f!H0j zf!J&I{ba-4PU*f8+|SQ9`*3RNpxxHnAHQM$Q}BmVv_Bqnt@WRPKUBL1`|Q}BviH|? zTs6f%*3y4tdfEKdXgvG;p6Qy+-wmeU;rW}K6nSR&!!TXr=k?OF+{*s{5&kd&u8`>8 zq`WEc0pn;l_TvvHMefyWcn;}rdOvUmiZAFkYU{|CY@K&hmjHUWP0s;7;DO4A4m}UY zhKoFmKO7tD;69f(oZt^jO}qzo+6lt<-RryYaXB{otLwTK&qyFy4A3|>7*w)AA@rR3C*Jp%3T;zQFYZ3C}Umm!D=FO)yZ*Jc! z{#*1vL;opWOnGzrZn5RNDL(GM{mu}dm@DpU;O+>2uW^1ap7yXVF+YZLH2mUT2lgl7 z7jx1Zh(Aa>#4p|=`u~>rMXFEp+u|1s#V^i00l!!@B`-YF798W}@r#i!_(j1nJ_)}# zK>Uqxb$!TrqKDxZbEQw7-?@4)KU{o9`0{zR8=m49pW+uO5C0Uu_!PfL@nYB?Y#$na zF-6=Ga%IJEjKEJl#Vie`KE_i!sj+g zZhBtza0%bzA$#dpgpU9|#q!hh$)?y*$XEO!*5O{~UGgtt|JYfC2rr%qj&->8u^%J# zwqE{J_ZI|+KwZXh=&$4axikK;n~ts@BmWWci4guh*_+tME9c#glly7?pF;3a>r-&=UU=Ly|=6o%7%>dai|Wz#iU({9Q%={0a0I$F1kawg^9$rFI_sb723@*5rJe$@x&}4gB3_ ztDt|Z^q%m?3c}^pW~F_N?dQI0B*+UQUS#_KmXd9rU-0)`%I`&y=!Lliei3jXw*Tc8 z==V0pH?4e~wecMEGp|d#w^n?X!G7;9313XkNsIlgNQCU}-U5GZ+<4tJ(>3 zHkS4-;nOj`a3JFbbO~LdJ*w-(UR(@b}T8=YZeNy26Kk zMV|A%_a_{`t|(n>U(!PDEcXZg59Sy93zoNeT|T36utD__pZ^)3e+U0cJZ}L1oAhUE zUdBWF=fQuWFXLYcbl+4h#(cw%zD4y-uzuob-QPg}@-gYx1o@d*e`ECgA7cG3U*q@J z>g)X8*~(eIAN`1jV%yAEDq5k7|8D;|Jh#slK7BgxfqnVN>XSw7tpiqx}iAe%rc!YoI&PwX1vz z`VZ(l;=L?CGhg^VEi3=~r}6zZte;TktI%~9$J==RdF;;ve`Ehb9qWPChjWX+PwOt} z%6O1}q5UP`^RSw*B zvtL>T{=7Gk_qVams2_1G{It6W{s#Yt$7tPLT!D^yp5lVh@)hMD$Ns;`_cG;aux=1% z;Bl~i#r`O(m&bJ^aDGYk2_xknLkU>S=JN8U$~#~pERHKXYX)7{1oST z{k;1NN9Ui#{ivH6==$-WUF7`Kt>`VjzgNY5(be;_Rjeb_wLDVAc~91l`@!q37ZZP? zzHVia`4V*kSs!zJ?#KV^9Z62||EMpJIN&ml9b+8ai}wY8H?&W}c#Z86vp?=peGSA{ zkEz}+g`!ve82S|Yne7wwzf@Xd`{Y;*y*fd5jOfo#G5)u{1^elk=+9-!b3#up9m9HW zQG5>k0sS-xgx{y9!8f3b#kubP`3)Yw_IJhXDC;Nh&xd}n{vPfp>;E6&y!Q8SKP5hY zDzp9`?kDR9@VE8%a6dUdj{J@F*YkyX z(zX`yhAE>;B{PIr$%YTW4Ki&`hrSft@-<##RQR;^IyA8yZTVdEp=I|1OyRjYsSL<={{M zXklT;yF9-2@^S<3llb=|($BOnj^tjG_QgT&*?jW-A59n4)+Xy`H*tkquf47I3FIg6 zRn_VPt_K8sFz2;B568%3>;0|@xlHTUr~D9F)C<@C!+r(-EzjdT*1bQ!$a2k_mGMyi zya0V2#zXyDo<_eXuAcX&aUIJy$Yx1)Ew6!h)q z=ue&e6VM;>1$L*8`(En@xF7#}N91@f`VRNs&p%>)5d=Tx*7u!H{vG5G*RSw{E*}SO zjo<70f1&3gA7y`_4}a)2Th}6Q+)uH; z7tD)4O@24e?|g^PtA4}$?l|%{ef7Vw352g3qxWO}ZNGr|g_I|uI6m#qdkgx({KI7r zeAtG67{CsNeru6_`!IJ$RbIm`;&l}u-$MFB_7^p#Wxr9A>4*7Cuzuj*PESkR*Y+2&|3UlL^?AAO?^>t} z-V5uqy^8NA2Ts1nP+vIC_G@0d0eNr{@`L8bk4+yIDS#^Yzm^|o{lfSNlAp!#t^NDn zOn>ZZ{i0yFuS@8b_f9gMWd9rO-#SR2F7jui{adiFm0puNf8Fjj=Yd$hf>AU{=w$vX>>I@ApdXY!jBZ9J+n--3e{_^i4W*OuU!rwAj7~9AJ$`& z=(D`M$$r&Qk#jFn{R5(3L*-r4RQio$onyb{=jjKzggml+&%o$+_`3$CXN-PEPotx+ zd`9*?npD%ni?hBuh|3>tm@`b;ArK8p9F#1g;@A3MES;h28nm7g>jecXk&@a!9 z^Z)ie1EpV#4WtL?*Ek9}AHhFqY?1$q`RTB{O-v6o#E)V5@uO2NISPNKMDl;lf!42b zJ;>t}-@*F&KGkOsf5z+GVL6?2ceo|{nD#&0e!wO8WhD2js$ZZ#*giR?{y^Qs`~ZjG zEwem_{YLAL^Rnc zZ&Q4Bhy4x1Iacd<4$jm5l~ufd0rnE{RZ8PSKL|hR#6JAY!M^vrYdr2G7W=kZ!Sitc z9^&6G!N2q)&=d6-jtE^|%p%+$Up>$9E6}H_`YBG<*sfkw`dNG@iaL(!0(v3(5BcHi zc*MTnBwCWb1|EKQw=l z$}6se3O#B1j@K9P5w>4E@5#FALBCNPXRibN2K0B)eUsOn>5*Lb>mvTl-)EUreb_yu z2Y(6uDflXhO9g5l7`?c@!9Ca~b?^n~j=C%d_JK41fj+Yuw==sTofUh5>Hqhir;qiM z{ddH-Odgru3w@J6r#Zh9say)EPAurtPDGksWOLMNXJjzWHe zKVg62`&DCa@^RBY&h#0}kI#@k6#0EB%!=bz59mL&4|x7zf8)pcZ)xco>%Zk^wz)O_ z@`rYV(9P@xS)}Qj1(a-2L#_~CL z#$N;DKjkyCulxz2TizDBO{l(8`elRG$E^3vZZLa+>I-;mA4MS1|Lpqkqa_aOo*{l3 z&R@DIerE#u!0%Yndh~sj*l)aon-vx@;?1X@!cTM{`rXCHW9z2 zdXjkG-6;NS74bEG9*(=qkRy1n#YsV*Pe}h+{MJ`LFkD{a_u)M1V*nqze2Mk4#i64r z-j`lI|CgknK|hN(hm=P_9->;^VSPmUjpS<_i`?h$NM)c?at-`^4*1v`p9R<-W^q-- zWj4r9i_{(?`qBRXgzORI8zZ$p=G8v9jrpVTC5hN05$q7UE=|x6*deq&$-bXI3V*bT z`y;VKs`H}f_n!Z|hcW28O!|}Js^~Y_A8n1NO8ochuH%c|H9K7Uxnl8O>+j)yTKw1g zd$^xtif^|59_}ag|F6F_?OA^h_tWCP*5AYZ#PcbB3jKxtgKy1`u1Y@h#i4(!ezUKB zb9d+;ll=q#pe=U85xEa}Jn@rJzmM%h^D9tinC;_nwHHYLQT-aJyM3MWXJjYjYL9Sy z?%N~#!0duT?E#ZdMK9ub)hkmG;{>hF-ra_Lzint2Bw`mB9)Z{2cSio5<=@PIHvQ*z zVV)pjHmiB*z`cYxN%M@=FPy^vbQ~lnaM^X_U;dbnF@COJXnA*+zzoOvqKDSxH~4>U zQu0l`VrG`~+d>!5Vf$R|gSi>@Z@LRJ2kh{XeIW5;t9yi(pu^w1ZS=AFN3S4$C3Rw` zAB1nh{GdLJ*#$OlslU%Yfc!9enSL`mS$!Czm(i)A^sxFcRi&5Z(MR-we8u{#Chs#{ z0;R9f$?C%twLPZCTwUl^D^76yz~uKriu*yY`rHY&yV8XdTcqzueiz_RTbFKFN5{2@ zK7=2p_^tVaM~_19!ER=Gi1t3oFT__Y?s*pRQKFwyI|A$f*Tny?j2m5jrmOM$5Pz!i zhkSO2?TC)zVZeV}-`M=W!}J~TAJjAWZ`^&A<6;f8%71Ks1@1H-C&^u0M*R0R<-g(+ zs-GDoFUbE1_3!$Hqz^VC{u_Qj5dY)2pS-|!!(5K*_wnDLc!BMTeC~y-`|SwykNtl< z6ehuF|B%_6clG;;W>=XXZ2Uyy_x=O)*Z9FyO;y=QN!aIDODi=d7Fao!6hui*Q~IZnX#0nxXO)cbMxy_#3Be8SJjzo7Gn z@(QT$!})|=tv_h|ZFvRkui<>c$G@Zb1Daj>v3(2u)KvKe{N5{+_cwd~1l1SD`aoSm z&MWj4*H_j&!XDy7KKyUbyTZ6S)c@z!_s(*DfZ_{ezkz;|7xy~+zvoFlf#({} zzCiO7hBLdR_K}Z#LQ3;Tc?R%}?=SN86n~Wbf!K9{zs5L#+*@Ou496dV-y2YT2=-64 zzs7R2LF`=B}G5@g~uT=&$O zir=J*EdQ|&k&h97xxWhi8^`iL;@)yTl=cMlm-|ZJMa$dh*Yz`5UvPRBN5=DiLGq!I zKVbfBYyD))Lo@x*KdW14I7-`3Y3s}K>izyYkKgj$jL#zZkYD2GTOB)#Gv8fAejV|F zuGU5ERX4D%Q9r%=<`&7x-L=z%e~Gj9NBn=>lmDCTZ}KtW*9gB;%cLG&tDW)xm8Kk% zed9iiV#^1#+pqF@uO;W-kMMnmFaLonwdL6Am}csyfIej(^qInZP(G=pd5&gvm7hcM z`!wj&NBoY~cenL1|DUzceT;+W7x&wEr{f)_LtMGZt>RCH_1&f$4+KBfD&+a(UqJr0 zT5s@iuYq}od__S$K1Tm!{Hp(4Ec6MR(!&4c{CK@9^#K#miR?o^m6qx=ZeV`Opa=cm z%CB7|9Gcn5ygso0N~I3>7x^_FXBBwQ4eZmh{CBl?4gEo0S#b!c)5F)_BK+O=r{hv` zhmXVZBDY=x&*S^if4tvmKCFc2I>cYY#u~IW)puLv^WHSB$Mbv8U!=GE63*jT>$j#J ztrzGo%roQU{LVJ~*sJG%O8E`UkJrI@=$nq}9oM4gk$wSxK>m0W=ogOt_9fOICTC&) z{lCoLMxZYJz#nYiP~B7FUz5K;9P{xB;;()3cTVLl@G&eeSpLF)dXoI*_{tZ^U!pJh z&w!1M+v?Kgsy$IqxJQPE?N=_%g^}Pvu*H z_zJDVLh;l5ed@Pgcm5pa8~@KFYwV|%3X#92pQq$_CDQ$U4JY*(?lvHwAYZ~6xxOqd zzVo_ymHbTLuQ@*9{2d&B?Z`zqalswce@u zUz7B`5Af$&^~!-fuKA~ypX`R)EdNcv{+#R!$k!ugA8THyQhkHvH2f>(kGLv%p-8_I zUWI%k|KJ5$AHe?&k`2rk^1JpNtuIOSWgX&&v%NR5e(-$?jZ-GGB2VsiD8KuT8%#=k ztTGTen`=JuHuAq4BtP}JaV~Nzao^GWtE|t8`!*lP?i<{iKX%xEweOIlvlB!A70>Su zzdOFk^aHU+&qw^b>M}l#T%pg$6NdinAT$5N_#-PvOlQ-7MsL%bHlD-rNvj*%Q#x_} z;6uE>B=jEFx~L=lybAq|`XVKnFU!jf>z|^2C)F44mf+5U{`q*uZPeXje+&3gj*kqg zSYkL|&9~S;8y{@3AIANMeG$2%JnpLNay#x~ZV&svC5`)nI9!LB?}z-(G_No-*WtKt zF&}XY`4Ch!V|*}{yEmShxu7+ApeEqN&QYjSHF+I@iCll9Qyvj z0P>9NA&WoQ`~Y9U{mur%`Qi7Qzi4)f*(Vl%FkD`!ahl0c;x86|=;C{Pv>sZ?-@wtk zexZl&@zL>Du|6pN&`93m{|D+{wv(%TO!0^CgEH{NAUo(EHau<{ty?@lcN=qGTK%A$ zwcBI-f46-L7k=+2li@9!AI6U~+!Bwg>n6FKbQPVS;!u6C4LG{wN00c|hke*nc@$!% zNe>_owng~C!~AP>F*+H2% z`!u=1*HL_(__vMgi9W5S*k|?jLH^BZt9-uJ68=rIgZx`}7nvVw?i{zN`wF*GpLP_# zG5I|{zRCK4#$SNHs?{reoKjvDdZJd_T0b`o719gN*U*V;3**cjD2y@Y)vXbFO~?`=%0Len8iFNZ&h*_vZO!`d(pr Udfq0t{2jpm1ARaK4gUN80<#_n?*IS* literal 0 HcmV?d00001 diff --git a/src/nvidia-modeset/src/shaders/g_nvidia-headsurface-shader-info.h b/src/nvidia-modeset/src/shaders/g_nvidia-headsurface-shader-info.h new file mode 100644 index 0000000..05468fc --- /dev/null +++ b/src/nvidia-modeset/src/shaders/g_nvidia-headsurface-shader-info.h @@ -0,0 +1,341 @@ +static const struct NVHeadSurfaceShaderInfo { + NvBool yuv420 : 1; + NvBool blend : 1; + NvBool offset : 1; + NvBool blendAfterWarp : 1; + NvBool overlay : 1; + NvBool pixelShift : 1; + NvBool customSampling : 1; + NvBool reversePrime : 1; +} nvHeadSurfaceShaderInfo[] = { + { /* 0: nvidia_headsurface_fragment */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = FALSE, + .offset = FALSE, + .overlay = FALSE, + .blendAfterWarp = FALSE, + .customSampling = FALSE, + .reversePrime = FALSE, + }, + { /* 1: nvidia_headsurface_fragment_customSampling */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = FALSE, + .offset = FALSE, + .overlay = FALSE, + .blendAfterWarp = FALSE, + .customSampling = TRUE, + .reversePrime = FALSE, + }, + { /* 2: nvidia_headsurface_fragment_overlay */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = FALSE, + .offset = FALSE, + .overlay = TRUE, + .blendAfterWarp = FALSE, + .customSampling = FALSE, + .reversePrime = FALSE, + }, + { /* 3: nvidia_headsurface_fragment_overlay_customSampling */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = FALSE, + .offset = FALSE, + .overlay = TRUE, + .blendAfterWarp = FALSE, + .customSampling = TRUE, + .reversePrime = FALSE, + }, + { /* 4: nvidia_headsurface_fragment_offset */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = FALSE, + .offset = TRUE, + .overlay = FALSE, + .blendAfterWarp = FALSE, + .customSampling = FALSE, + .reversePrime = FALSE, + }, + { /* 5: nvidia_headsurface_fragment_offset_customSampling */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = FALSE, + .offset = TRUE, + .overlay = FALSE, + .blendAfterWarp = FALSE, + .customSampling = TRUE, + .reversePrime = FALSE, + }, + { /* 6: nvidia_headsurface_fragment_offset_swapped */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = FALSE, + .offset = TRUE, + .overlay = FALSE, + .blendAfterWarp = TRUE, + .customSampling = FALSE, + .reversePrime = FALSE, + }, + { /* 7: nvidia_headsurface_fragment_offset_swapped_customSampling */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = FALSE, + .offset = TRUE, + .overlay = FALSE, + .blendAfterWarp = TRUE, + .customSampling = TRUE, + .reversePrime = FALSE, + }, + { /* 8: nvidia_headsurface_fragment_offset_overlay */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = FALSE, + .offset = TRUE, + .overlay = TRUE, + .blendAfterWarp = FALSE, + .customSampling = FALSE, + .reversePrime = FALSE, + }, + { /* 9: nvidia_headsurface_fragment_offset_overlay_customSampling */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = FALSE, + .offset = TRUE, + .overlay = TRUE, + .blendAfterWarp = FALSE, + .customSampling = TRUE, + .reversePrime = FALSE, + }, + { /* 10: nvidia_headsurface_fragment_offset_overlay_swapped */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = FALSE, + .offset = TRUE, + .overlay = TRUE, + .blendAfterWarp = TRUE, + .customSampling = FALSE, + .reversePrime = FALSE, + }, + { /* 11: nvidia_headsurface_fragment_offset_overlay_swapped_customSampling */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = FALSE, + .offset = TRUE, + .overlay = TRUE, + .blendAfterWarp = TRUE, + .customSampling = TRUE, + .reversePrime = FALSE, + }, + { /* 12: nvidia_headsurface_fragment_blend */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = TRUE, + .offset = FALSE, + .overlay = FALSE, + .blendAfterWarp = FALSE, + .customSampling = FALSE, + .reversePrime = FALSE, + }, + { /* 13: nvidia_headsurface_fragment_blend_customSampling */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = TRUE, + .offset = FALSE, + .overlay = FALSE, + .blendAfterWarp = FALSE, + .customSampling = TRUE, + .reversePrime = FALSE, + }, + { /* 14: nvidia_headsurface_fragment_blend_swapped */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = TRUE, + .offset = FALSE, + .overlay = FALSE, + .blendAfterWarp = TRUE, + .customSampling = FALSE, + .reversePrime = FALSE, + }, + { /* 15: nvidia_headsurface_fragment_blend_swapped_customSampling */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = TRUE, + .offset = FALSE, + .overlay = FALSE, + .blendAfterWarp = TRUE, + .customSampling = TRUE, + .reversePrime = FALSE, + }, + { /* 16: nvidia_headsurface_fragment_blend_overlay */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = TRUE, + .offset = FALSE, + .overlay = TRUE, + .blendAfterWarp = FALSE, + .customSampling = FALSE, + .reversePrime = FALSE, + }, + { /* 17: nvidia_headsurface_fragment_blend_overlay_customSampling */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = TRUE, + .offset = FALSE, + .overlay = TRUE, + .blendAfterWarp = FALSE, + .customSampling = TRUE, + .reversePrime = FALSE, + }, + { /* 18: nvidia_headsurface_fragment_blend_overlay_swapped */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = TRUE, + .offset = FALSE, + .overlay = TRUE, + .blendAfterWarp = TRUE, + .customSampling = FALSE, + .reversePrime = FALSE, + }, + { /* 19: nvidia_headsurface_fragment_blend_overlay_swapped_customSampling */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = TRUE, + .offset = FALSE, + .overlay = TRUE, + .blendAfterWarp = TRUE, + .customSampling = TRUE, + .reversePrime = FALSE, + }, + { /* 20: nvidia_headsurface_fragment_blend_offset */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = TRUE, + .offset = TRUE, + .overlay = FALSE, + .blendAfterWarp = FALSE, + .customSampling = FALSE, + .reversePrime = FALSE, + }, + { /* 21: nvidia_headsurface_fragment_blend_offset_customSampling */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = TRUE, + .offset = TRUE, + .overlay = FALSE, + .blendAfterWarp = FALSE, + .customSampling = TRUE, + .reversePrime = FALSE, + }, + { /* 22: nvidia_headsurface_fragment_blend_offset_swapped */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = TRUE, + .offset = TRUE, + .overlay = FALSE, + .blendAfterWarp = TRUE, + .customSampling = FALSE, + .reversePrime = FALSE, + }, + { /* 23: nvidia_headsurface_fragment_blend_offset_swapped_customSampling */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = TRUE, + .offset = TRUE, + .overlay = FALSE, + .blendAfterWarp = TRUE, + .customSampling = TRUE, + .reversePrime = FALSE, + }, + { /* 24: nvidia_headsurface_fragment_blend_offset_overlay */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = TRUE, + .offset = TRUE, + .overlay = TRUE, + .blendAfterWarp = FALSE, + .customSampling = FALSE, + .reversePrime = FALSE, + }, + { /* 25: nvidia_headsurface_fragment_blend_offset_overlay_customSampling */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = TRUE, + .offset = TRUE, + .overlay = TRUE, + .blendAfterWarp = FALSE, + .customSampling = TRUE, + .reversePrime = FALSE, + }, + { /* 26: nvidia_headsurface_fragment_blend_offset_overlay_swapped */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = TRUE, + .offset = TRUE, + .overlay = TRUE, + .blendAfterWarp = TRUE, + .customSampling = FALSE, + .reversePrime = FALSE, + }, + { /* 27: nvidia_headsurface_fragment_blend_offset_overlay_swapped_customSampling */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = TRUE, + .offset = TRUE, + .overlay = TRUE, + .blendAfterWarp = TRUE, + .customSampling = TRUE, + .reversePrime = FALSE, + }, + { /* 28: nvidia_headsurface_fragment_yuv420 */ + .pixelShift = FALSE, + .yuv420 = TRUE, + .blend = FALSE, + .offset = FALSE, + .overlay = FALSE, + .blendAfterWarp = FALSE, + .customSampling = FALSE, + .reversePrime = FALSE, + }, + { /* 29: nvidia_headsurface_fragment_yuv420_overlay */ + .pixelShift = FALSE, + .yuv420 = TRUE, + .blend = FALSE, + .offset = FALSE, + .overlay = TRUE, + .blendAfterWarp = FALSE, + .customSampling = FALSE, + .reversePrime = FALSE, + }, + { /* 30: nvidia_headsurface_fragment_pixelShift */ + .pixelShift = TRUE, + .yuv420 = FALSE, + .blend = FALSE, + .offset = FALSE, + .overlay = FALSE, + .blendAfterWarp = FALSE, + .customSampling = FALSE, + .reversePrime = FALSE, + }, + { /* 31: nvidia_headsurface_fragment_overlay_pixelShift */ + .pixelShift = TRUE, + .yuv420 = FALSE, + .blend = FALSE, + .offset = FALSE, + .overlay = TRUE, + .blendAfterWarp = FALSE, + .customSampling = FALSE, + .reversePrime = FALSE, + }, + { /* 32: nvidia_headsurface_fragment_reversePrime */ + .pixelShift = FALSE, + .yuv420 = FALSE, + .blend = FALSE, + .offset = FALSE, + .overlay = FALSE, + .blendAfterWarp = FALSE, + .customSampling = FALSE, + .reversePrime = TRUE, + }, +}; diff --git a/src/nvidia-modeset/src/shaders/g_pascal_shader_info.h b/src/nvidia-modeset/src/shaders/g_pascal_shader_info.h new file mode 100644 index 0000000..6652ddd --- /dev/null +++ b/src/nvidia-modeset/src/shaders/g_pascal_shader_info.h @@ -0,0 +1,428 @@ +// Generated using 'Offline GLSL Shader Compiler Version 13.0.0.0.560.00.dev/gpu_drv/bugfix_main-17009' +// WARNING: This file is auto-generated! Do not hand-edit! +// Instead, edit the GLSL shaders and run 'unix-build nvmake @generate'. + +#include "nvidia-3d-shaders.h" +#include "g_shader_names.h" + +ct_assert(NUM_PROGRAMS == 34); +static const Nv3dProgramInfo PascalProgramInfo[NUM_PROGRAMS] = { + // nvidia_headsurface_vertex + { .offset = 0x00000030, + .registerCount = 13, + .type = NV3D_SHADER_TYPE_VERTEX, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_VERTEX_B, + .bindGroup = NV3D_HW_BIND_GROUP_VERTEX, + }, + + // nvidia_headsurface_fragment + { .offset = 0x000001f0, + .registerCount = 15, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_customSampling + { .offset = 0x00000370, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 0, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_overlay + { .offset = 0x000030f0, + .registerCount = 38, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_overlay_customSampling + { .offset = 0x00003930, + .registerCount = 32, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset + { .offset = 0x000057f0, + .registerCount = 16, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_customSampling + { .offset = 0x000059b0, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 2, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_swapped + { .offset = 0x000087b0, + .registerCount = 16, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_swapped_customSampling + { .offset = 0x000089b0, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 3, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay + { .offset = 0x0000b7f0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay_customSampling + { .offset = 0x0000c0b0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay_swapped + { .offset = 0x0000dfb0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay_swapped_customSampling + { .offset = 0x0000e7f0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend + { .offset = 0x00010730, + .registerCount = 24, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_customSampling + { .offset = 0x000108f0, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 2, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_swapped + { .offset = 0x000136b0, + .registerCount = 23, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_swapped_customSampling + { .offset = 0x000138b0, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 3, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay + { .offset = 0x000166b0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay_customSampling + { .offset = 0x00016f30, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay_swapped + { .offset = 0x00018e30, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay_swapped_customSampling + { .offset = 0x00019630, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset + { .offset = 0x0001b570, + .registerCount = 18, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_customSampling + { .offset = 0x0001b770, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 4, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_swapped + { .offset = 0x0001e5b0, + .registerCount = 18, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_swapped_customSampling + { .offset = 0x0001e7f0, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 5, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay + { .offset = 0x00021670, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay_customSampling + { .offset = 0x00021ef0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 6, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay_swapped + { .offset = 0x00023ef0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay_swapped_customSampling + { .offset = 0x00024770, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = 6, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_yuv420 + { .offset = 0x000267b0, + .registerCount = 48, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_yuv420_overlay + { .offset = 0x00027730, + .registerCount = 45, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_pixelShift + { .offset = 0x00028f70, + .registerCount = 32, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_overlay_pixelShift + { .offset = 0x000294f0, + .registerCount = 38, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_reversePrime + { .offset = 0x00029db0, + .registerCount = 15, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + +}; + +static const NvU32 PascalConstantBuf0[] = { + 0x00000438, // 1.5134e-42 + 0x000007f0, // 2.84744e-42 + 0x00000d18, // 4.69715e-42 + 0x000012f8, // 6.80471e-42 + 0x00001478, // 7.3428e-42 + 0x00001630, // 7.95938e-42 + 0x00001a38, // 9.40552e-42 + 0x00001df0, // 1.07396e-41 + 0x000022f0, // 1.25332e-41 + 0x00002880, // 1.45287e-41 + 0x00002a00, // 1.50668e-41 + 0x00002bb8, // 1.56833e-41 + 0x3f400000, // 0.75 +}; +static const NvU32 PascalConstantBuf1[] = { + 0x000003d0, // 1.36767e-42 + 0x00000770, // 2.66807e-42 + 0x00000c58, // 4.4281e-42 + 0x00001240, // 6.54687e-42 + 0x000013c0, // 7.08497e-42 + 0x00001578, // 7.70154e-42 + 0x3f400000, // 0.75 +}; +static const NvU32 PascalConstantBuf2[] = { + 0x00000438, // 1.5134e-42 + 0x000007f0, // 2.84744e-42 + 0x00000d10, // 4.68594e-42 + 0x000012f8, // 6.80471e-42 + 0x00001478, // 7.3428e-42 + 0x00001630, // 7.95938e-42 + 0x00001a60, // 9.46157e-42 + 0x00001e18, // 1.07956e-41 + 0x00002318, // 1.25893e-41 + 0x000028b0, // 1.45959e-41 + 0x00002a30, // 1.5134e-41 + 0x00002be0, // 1.57394e-41 + 0x3f400000, // 0.75 +}; +static const NvU32 PascalConstantBuf3[] = { + 0x00000438, // 1.5134e-42 + 0x000007f0, // 2.84744e-42 + 0x00000d10, // 4.68594e-42 + 0x000012f8, // 6.80471e-42 + 0x00001478, // 7.3428e-42 + 0x00001630, // 7.95938e-42 + 0x00001aa0, // 9.55125e-42 + 0x00001e58, // 1.08853e-41 + 0x00002358, // 1.26789e-41 + 0x000028f0, // 1.46856e-41 + 0x00002a70, // 1.52237e-41 + 0x00002c20, // 1.58291e-41 + 0x3f400000, // 0.75 +}; +static const NvU32 PascalConstantBuf4[] = { + 0x00000440, // 1.52461e-42 + 0x000007f8, // 2.85865e-42 + 0x00000d18, // 4.69715e-42 + 0x00001300, // 6.81592e-42 + 0x00001480, // 7.35401e-42 + 0x00001638, // 7.97059e-42 + 0x00001a80, // 9.50641e-42 + 0x00001e38, // 1.08404e-41 + 0x00002338, // 1.26341e-41 + 0x000028d0, // 1.46408e-41 + 0x00002a50, // 1.51789e-41 + 0x00002c00, // 1.57842e-41 + 0x3f400000, // 0.75 +}; +static const NvU32 PascalConstantBuf5[] = { + 0x00000440, // 1.52461e-42 + 0x000007f8, // 2.85865e-42 + 0x00000d18, // 4.69715e-42 + 0x00001300, // 6.81592e-42 + 0x00001480, // 7.35401e-42 + 0x00001638, // 7.97059e-42 + 0x00001ac0, // 9.59609e-42 + 0x00001e78, // 1.09301e-41 + 0x00002378, // 1.27238e-41 + 0x00002910, // 1.47304e-41 + 0x00002a90, // 1.52685e-41 + 0x00002c40, // 1.58739e-41 + 0x3f400000, // 0.75 +}; +static const NvU32 PascalConstantBuf6[] = { + 0x000003f0, // 1.41251e-42 + 0x000007a0, // 2.73533e-42 + 0x00000ca0, // 4.529e-42 + 0x00001280, // 6.63655e-42 + 0x00001400, // 7.17465e-42 + 0x000015b8, // 7.79122e-42 + 0x3f400000, // 0.75 +}; + +static const Nv3dShaderConstBufInfo PascalConstBufInfo[] = { + { PascalConstantBuf0, 0, 52 }, + { PascalConstantBuf1, 256, 28 }, + { PascalConstantBuf2, 512, 52 }, + { PascalConstantBuf3, 768, 52 }, + { PascalConstantBuf4, 1024, 52 }, + { PascalConstantBuf5, 1280, 52 }, + { PascalConstantBuf6, 1536, 28 }, +}; + +static const size_t PascalConstBufSize = 1792; +static const NvU32 PascalConstBufSizeAlign = 256; + +// Total shader code size: 167.75 KB +static const size_t PascalProgramHeapSize = 171776; +static const size_t PascalShaderMaxLocalBytes = 0; +static const size_t PascalShaderMaxStackBytes = 96; diff --git a/src/nvidia-modeset/src/shaders/g_pascal_shaders b/src/nvidia-modeset/src/shaders/g_pascal_shaders new file mode 100644 index 0000000000000000000000000000000000000000..befe4e183e38747cf1a1fa7b120d1f498dff763a GIT binary patch literal 171776 zcmeFaZ;Twtbtf2EnORwtSyh?!r|W-pRX4}%J$}*5*k-qIYp^Acw{=)yMS6`Td)W() zMt_?3VS$k?AMXHTw`B9sV?a1j3*Gg6*=2KX?G_fs&5Xp|9taCeTcX4P*1!t3_NWhr zj_jd^hAml5N+M@%l6AlL;=QgvY?>pV;M3u{z62tpG9vQDix)56`^5`mezM<3y)d=k z8!jG3`xVf$*Y+#;d3wbZgWf~)T-#&@LDC<5`v)dGX>#U`4?E#Jr?-RNJLb8LNrg+C zj|NMb{=fY858QB$^Uu)xS2`vg);J%80jJ?0;QxI-=-o9x>KO0HIezXRS>-f0xSCKs zyD{FsaM3u8+pO_d+Gy@8A9X9zOCc9KOrF#UFNq-VW#k zng-@V@bZVF!M*>yHgKmAG*@b1W~|w59Rbcm%h5;ny-B4&f4PL{*`Xdk52M)<85vP zc%F5y8k4@Xd^`B<`$YeYd(HHbcimgY1ecbL!FVTj{LK9>#($0Tsnk5DIvxX|Kk{Sa zvNsNo5b1Az@YAOM?O^etxj&sGLhquv7x#mgS4^_hcg*qst4QQ6ROXf?1U%u`SmKG^J5nTF# zdHJF-M*{Ci>1QBU=y}jVx%-Kuj+86?nv%~+Iy+MCynM#wC7(@kI|~`i z7qq`{JD7cFzK8GYfG)jH91UFXCFDxl2R|bHey}m%@9%&wQ68Jjw~Sfbq5o4q_Ze$|Cs+>n_p;1KjM-8q5e!u@&!rr5L41WdOs6#etE$Q0<)MbFZgLK zPdzk0!u;a=@sjo4@n=(QJ049Phu1{?YTz(54qXn{xf%?wWPq=0E4ybPe>N|7-h=e7b+^_%i78 zkohVOe9mkCw?J3=e=GF(d2AjG`48OxvcG9PWxxpK2P+$8*F3!>Y7gH)&K&k-C_RhEl_IAhZefv z9|!#1=>#S*U-&`XS^JNJ`1p=9xCS|Q8tWJM1O3OZY#}#DYx;kF zaFWx)V0EnA!MvsMiU(`_9DKrgecumlKAp2^khdwfPvcv#`FSTWMCUNJdA!f-=lj4P z^gKIQ=lbWsCwt?E_q*U1%GbbOl*Zr}N^b=H`!v5vFQ7d5hWpiUzG2RrODEn9tbB=0 zn&ku9-+uFVzI@bi(%UqjevI{<_*?m+;ef46XIJd{@a`_wKhh7(Jou67hkW&jj!$?r?EBRIx4H)O;Cg-E z<9d}3cS(Mre=Yyl#_BDkzh*r5(uvztewoK#>Ls=8-CfX=>q9wzUpnn_y~@Xh^vAfq z2SgvxU+K{fzG}{+UhU_Dz3(Tr@!egF57!$6sj=_Z`Opr2eXO5MUwprl`9sKvVZAXV z{^<8(yt&>?#^HK(9_jZxna_;X+YGmj=kNFZs22SMzl?o9R3ENa=Vde8*!TN$S@6|J zKZoCM`76lUdb3$upYm5u=6f!CVXU7+`mX&d&3Bc5=p>f33p?MV`T6ot{yF*o6Y~T5 z(STB!-x!}9_?@1se2jzN^IfCzvM`|Mr&m;7viyU5%H`7LP4mjLpM#tyK3Dm< zL*suF;}d_E>ucox!@v7?cYY_M^GW#={WjZ}SNVL}ERM+8yTMnPzjE5|yzM8~vvX*A z`27Koi^^eSYV`dopJT^2Zw9YC+YJ9L_oFqSbol)R@0a;`XF%!cm9`IZf*$(*^nmiG zSDf@m`FVFh>G1trrq9o329%y&X*Dl9qwnXl{~MS029%y&=@kA%%X$5sZtrlu>)hsa zCYPi}-*2`&b1eVLv%P$ppBFl=Ifn01`PiOLnPY`lo^`qok3(+8m}6-w2mLWW;`rY) z-Sa(+6Q0M$${VFW)<>p)5co`gl{ZTNj_^Y#_%GbQxP$ukp?|Y^ncIP!LjxCfhWu^i z5%`PS2}6(D`7zd~MXbL~Sr-EtpCJ4+(@W=PGeEs)PvuCV@MUgiL_TqQv9ai5muCg;5d^cd_fTZJvoKTG`a>5lQ6_Ica6%FlzA&BH#hW3HSrDeoTJ12S#<-1X5u z>;pIFdc6LngIlO~W4U1Fxc$J)OaIJ6hxBK&%K0b*z1`nfHmTqe?Nk4K0R8zb@Ne4Y zW3$EogTl?_uLsYu{*o)$e5YaiJ>9hZECttZnJdRkvt#=;J#EuwgT5E?t=4 zZ*e|}Kqr)^9n|B}`%h8*N#LVC5JG&4xi9?KW#H@Re;+lPInPwQyV0l#4X zg#Fb6lAi(5kM>_3)Nl7$|APKkRQr=Zx0`#h_5;VYsb}WU&Re^m3O0U-l^pxW8kdKz zO;f3p$3ye(amc6psq+^Ncn-#s3p^6s7Xue!}nAeUC5nWcq-Ae9|9A_LVvg%)cndkM*BJ zGJg4)ubK17r4z&T)#^pH8QV^N#iy|N?YbeP5o^x11a|AM8HUiV>y})SkIyNxBs+j!dcXZb$y!Z!TT+$Cok(W_&bjA9{LrV@DIrU z(QMj0?6=UrNIqtNkDfa{$XDoZ-L6ef1RFnK{YUhkmgsF(k7NDkzW42}<8-g`_cWVZ zoaS1aoDS#LRn}iqqQ_=LzkERR`?sk50PViJn~2|IqUqM3h>k~)Q zgL(5h?BD+_=@;k8{yx7*`5?h}k)DyvbGj3ohp7*>O_(ORed_%__BE7-;U?Rge+m10 z=L=_y55E}MpL1+CJ++YK_23lb7y3u~OB&yYdFy)XoMJusPc%>I`)K^AUFw&YqjebV z*wose(Z5LS$FPr4`9eIj{}O+}JZIN$*oDm^*7clg|Mw%(U(tTZ`Q?SQYs)9?zcKt- zByZLJTPWB((;w}teK?o1&t23rYX4Px@m6fttu)hXXg}Vw{}ycdMUg{XKjiZ|zL(_p z=g?2KUx=Mp+b_q~zFHRhYAKko|BBtCSg>-WG-3a(9T&`F7nBJ%QSUxiS0+x?c zdFVf=SNl2CPsIM4xAkJZ8~c8p4^tER3DXz#*Wzt+KA?K7-Kn1b2HUT7eXSN?A4a*_ zg)#nP{Vx<8Zcpb|wQxiFkNFAus(jaetG`g`3%=+2;@H}4b$)XD+&^r%M(C^c48Pyn zKLW9yIv3f@6dz|LG%0JYM|Ph$w)S70PijBY zekU;uU+8~1K|TA}&$dJk%Zw@Fuy;7Zef%~cNx6+^F^7eqz(<}AbXSDo2 z$(JPdIA1CLGN;YPhL-dCFja*<8@}>vyY&rzUadRk&1l&FIDvi(dvK-Vnl~{HwWcv| z=7#p)QpwtX+ilo?VMl(F{a5A99{q(MV5s5tOUeHB-(vg(`!D1!jdP(eZvRzz6bRje z!npmnIAQ;-RGwh}eMEn@`n$@fK*mw^=OAzGzm><>f2XFNVE@JXLf@6kSv_6lmC`Q| z{tI%y&U`Qx>~H_A)vR3>{0o6e{A=~MdfU#MYSr3@RgS6r!Mx-4UDw)$RgP)76anrrQ`jIS7zpIX0_ciR6o ztdB%L$UUxK<=tNWB=>0Fk+${M_v&9{|B%Y(a%mp?d1@uBpX0Pd^n0sq^5rFvw12!& zsqyn5w0X$qzksAQZuKNTFNpm&ornDnc8VhOtyFA2 z3)~xhU!%M(-)HT+S+Xn9_euZjgcAY1)V^Eh|DRn6{cEiM`{`Sp(*BF&kLz9Kd^%Zj zXrJH}K@X%PzxrgKEv<4s4CkFb_HVvF@0ft>v$ouIk;n6N8fmz(?0NVf(jQ6vL4fBG zmvcLwx5=s4my>|@|7fqA27O^SFL`Th@749YNOZ?|V4Q!8*0})o2HLOe*=G}cKku7X zImQ0wf1>|k{1_h?dDbqQPunz{u+RFQwadn7n*wLHuYES3U*+dfxMk^!?<4zcJ_KDs zZ{J^HyKES*j@f6U2<5QL`aZs!?6bhDjoD|}-f;P7P3*HV)P;TRvuxkHd^Gm4AAtU; z`b9ixUj=nYehlrqFk6z}I->mz+KI;B*ZHjT$iqHr)IRIiWPc}iQpJ0LKY0C8T$$p+ zV(*?YsiA$g=AXm(U0PQBSR(c4`vtL=s(lvwL2jSrGxpClY2TCmb$H|!zUPgV9}|C_ z2Y#VdL*BFlZ;AOL#y+3yu)P2H;xSW~`W4?4<}ePJN1)%^&RfTfC-bv;1bl~jqwFTn zuh-2@#z%PJ63YRu4zkunjO0@-R#(XB>A6G=-;bNn{NkK`Mpwm zWd92L;(FVbcRF_e*pYoA%|G(rUo6@3X4CFFo1^>7KkD|{?G*11)$iDBUX%H|$oQX9 z$?iW(vd=8nHl04~F|0pAK9@@97v^6no^!}fqjuPL$UcYtaU^t)B5Q|@qB+pFw{SDK z{vySS$0I7kE}Nxx>^@4@zgpS07nN;0 zk+dW8yBA4&IzQVH=!5an{Q->|jc2)b)iJP}Vf@$*i|-@+VkVlP|3#Jqgm2qs`#Ac` z_Sk6|m-XpA{^j}Ij=s$Dtuvq$_=UFO7iJ$C@NaqA;uo3&N-=)ZmLGZiz+WB0E5IJB z^lO%1!T-p=wZZFmxe5Ii`gk+4d|EDBeQUTsvin2yi}i#u@D8Mh5PpF8vn=##m!IW& z%jK_f%Jjqjl&?U`6&2duYJNVX!{)+ zCwx~Y`r~2y@L!fY<969jS;r6k$NF1}F>YBJKky00uhFpb>~?U7?Q>pfiR<-CCpk@* zRyn;L9H#NfOyPTIy{*u9Ax$XFSpV!I%4^UINFI_N)WO_5UDys^!17QM+(#es`0em#5Gl=o{9)dd5^LHlNTsjOXR@HQT?r?S~n(U#hG! zJ&H+whV+3+GanPJ|yw!G5j&z@HmjWjpXwu#=HJ_(X91FUWpukSDyf zgK|oH;mMSNz4w>kzjt0bV`jo7&ig%DS5Cd(o3SbEBX<|C{F_YL<`**F`lZ0U@f~BV zf06kg_=uK$Zy^wzL=*ibyu=R4*DI+^LG)JO4_H6V6>L4Po7rj~{F|h0zS7kC`(={n zKP3Mc=7%v$zW_}3ZauTk<5v&Yzz2d~#`?PyLB9jOTlXDJS&!cYKES-zHFa_l19WNc4xh=-(pAH(1{BbF$I%IBQV)a3(s=X-V||AiRKLtn1MtKc5c!oCkgW z+?+q=slPZ1U*qSq%@@(nvnzLkUUl)UGw9869=~w*#a;?}`*hIZ%k<$>u*{rEigQ`r5;o#6UC8b2NP1UQpX zI@-3ZrgAZEy?%VND`ue^dr#pnNf9aT44xeVd z%MXt;{p&u+2|Dfu&s8zLNxPhIKB#ZZF#RLL`7a6oMeU6lqJMdM;}X$1Fn^D~SLH`! z>YOi|jL-q{1$5XAW_W!3%1Nbfo#>n7NPdz0d7jH>o~p6mv-fiWr+)P$mw%V^Gw^3p z2>AVv)UYoBA1}oL^Yd*wKZNzAVLrg^*3AiN|D&Ap{uuPv{rt4dyF~aO<5Q#Y>B_uI zrtSE|G7otFhVfCoThCwk6Z6B{Ez-a{4ez7O|8?&JoIXqQ<3k-&2|v&DZ+a*Ac^tw% zh55qxrkBom)8+*8e>J?s&np#>o5p9ZcXaWsV~H0#oR2FvczmkWBIjRsW{*<;>(v{0 z;Va*1csF=_V)Nhd_uruQA>Zq!!TE;C3VrPOfWMf3(RX|RaMSb^7`LX#qdPTAAJyNz zCh6%zIidWkbWu9#ensU+PWU&UvwU01S$@64@&o>-@SAA&jg?%^@@**ue=>b6|0>_- zfE%XgRs3(1e=8;MHRx5STK-M)i=0>f%~ve{<`T=l1#_0=ukvr+oZ@`We27z(6D+?$ zUzP9W@*3}>qX=^LY2sh-_j$6fqa zkK<6i&<|4coW586;QG(_`8nW+px;fyx@*tN>AX&R_WTykZ@le5Kgft)k;#A#(7&R= z8tV(s$o{x6SYy2bg&(K$U3xx=_s5^^cqx$=uB7QBpfmCJ-1eWF%g5le4;B~U_YH7P zlAiNvg|sd^O*UE^mEx?=AmDc z{^H~Rl=Auw{V0Ter4M~aabfE3=J^eLV^E$0-_ZFdTfg-OpIZ1U@b@D4Q~lvVxn|f- z6m2rU|26sd(7$wHo%2B%{A2NbnDC_k_Lx8EJ*pqH{R5IiIIjZx#Hsho`KmqtINGJ> z3+g9l`Hc4Ue3$lL}!k*fIB)t^r9Q#t(Y>{rM7%=H&JU0#s=p((+Bx(L6! z`qlGQqM!J`(f*`gUH$Ere$dYn`+q%Gl`4+EujdL}3<%Y${&am`{YDqX|GsYa_K&=N z?wzY*eBkSZ^Jja7zMmJrbuO^>jV&HC*&ZB+ttEa@T4*UqlWavLn1_8ep zh@TtofkhezeZMdu{SfC99Q!<#8YxHnz)SG|)8aP=Kj`}`pD}USZUWo|KYyQN^Gd&X zFvs=JrNqACj+CSSHSizVU5HO9rTI?jO`8AL@%`e@&znyg59g}XpRV=~w7*S$bqJbm znpesXx_{3bI0i=ir}N^k=k*c#7wGBHyv#YD;B#A({w;hD*Y7w!*ROn}_2-QC$``U*p?#I~guk}sZ7rJ}?f0z5I z_8sNtiu?0p{bczB`DM~Cu)kaDRXdi_7vp2;o3{2Pod^2;FxHOMn<_ld{&F6Fd>?*n z&vN5WvY*`vKFaurVgI2HK`+DjTRI2MSpRi?IS$mgk#T&KzSI=#)D!;sQ~_2*@P*FL zRH`TRg?@dzWvZXV zJUF}J3@9D?e;u5FrspaLbCdpak&7wyp95b^^MmP&{Ugp3nq#Bqr;4_Ghvv^s@K^5V zxcrsJ^9b7RqH?3`J?_b?YBWW^)qiC<3ATWGrk`S<VQ(nwy?jbIk#Ukzs~u(^BGPXsS8?e*N-y%JT*Lh2@8J3$k#AUs7I)U5hf#exKBbbacTc{Rdd%mH|HJsWCCEF}>x$o8 z<)!N1br*UI_D>D5gQUg&@r;4|`Lz!4`AZN9@P9)up#Pb=&9i-A`;2L%vA@Lg0``IQ zzf^vS{kP8mU++QxNNC@O=PsRl!oIR(ps(}!EZ8sceoW7iFBGm~-v>K5;Zd=_{4x2* zuS{o5YH5ScLsZQz{vZ7BoZlt;-<1^p4SO=)PfGTArE1e>NWc57j@k2z!;eh;tV{pN zKaT!+Y3~17?2D-!eq8wRX}@|K_;CmNP2^nTyqCJgsY##2_gMQn>yPOz(?{NSNpD+T z$tL)Ie!s{1RYd-DgAMqK8Zt#2isJD+an_f58evfkauOg}+_IcWe z8|<6Y*oV{pS>rQ_{-^y_+wbf-?ziKezBa(Iq* z-87zCKRf!ulG(UqJhxpp1T!pPlRkNQd}q91z6+or3kR`;kpg1iimBpPq*P zJO>5gtzA>F{&g?1Dfs810e(5<++x2vc*LHA^0zpDV-Dq4x=xDullZrW?*IJ%qSUW`fB4fej@JKfVE-Do&F+WbNsQ+Eh zL#h8gPE+~N|DHQy`_sm{Mf;*n(YMIInL2;Wyej_nTyT}=0r2hoKc)}t?~4D9W9ui5 zC;jhouBzR(`_uM>{~fq5-cRP#581wF?Q7&exj^T4?0(f3|6&Y(dmr{P^}o~kC9=!q z$S()~+@$~A7yHQMIjEcBf7kO+!*fumC{X43zTI7^_fzqyTQ{JCKe&vL&IiYJ@vvk}g|1IWUt&RQ5 z$hj!g!}|zc-?0Cvwe7y8CiJbUyT<)-hdqzry=N%->GU(zoqn|Ajai zkNDqx+aAtuZ__+i90kh<_-{ELz{tL(E#s^Evt0QaNFzXTcuer@r;XGmTUy`3DMBTePzg#NQkUz3~Xd^3mnL;icDWG|=h zTg3WaOGqyM^P~0nB-_smbe;+GANE6fzn+ih*e@SJe)XaERH+{L=kJOAbc*VOU%yzy zdn4zd`1>HAD;0hIyL=9+Dd*LnA^#@g+c~1YrK(GKy?=a4{P*OaN}a!GOmmI-!f|fd z?@L|dbax5s8^*_-+Ty&!1F>TW2zz z@G9s17Rg=sr#m*q{^0H+*5|a%FC^YA=yh*=r(yrMaEHGCt?7*VC+oDnK+mhB?f>vT z{IeK8v&zpib^APz^TG7L*U5hj`JT(!=lJ$%nt$qFy#f3Q&flA^_*d(q|4x_gfgXYn z`J=AaRljwW=mh_X=*N(6%s)%zTkP+f4()p%sjXT6pZE{!g>#@E-fM2L|K((4{}1~) zzaO$+~ zbCdqvhvtiruVOb>{GI9t>K|*1e=KfW|5&*_;vf5wYvPa;s5e&HZR;Ovia&Q3SP`u6 zURnIY?Qomd_i70#<`MLF^6#2vaf9>I)u-8iSV>SH%Gp159`e2UH2V+Rm8UO}e|LJq zzZ;8x7m*^F4$7yjpMie(VTUx-&y*g#|3Q4bqTn7BpNsQROefI)KJka*4kqdJSG0b? z|E=^=KX_OC+wgyL{J4tvzq=tRo^Py_eM`q@iN6uQvKcN~y+`Pz{_vgHdHCCJr83oS zkLlBFCY;u*$9v=l-<^Yh9Q&V2+3@pD^Em$amG5*b$2W*h%s-%SUFg*jIstLObmH^n z%wGXW^zLre{5H=cWT5g6@#jjin&f#QI?wUjX`fQ+<9uRkD8As%QPMO23NvB7gUkd70Cyd5P0W`W3DJ zY5{U)jD89BlO&Hy<{GD}e{lVfU!KS>rC(a;$@mxWzvrzoJ(UlK`u`&P$8}$jm3f>I zdj44OG3uXAGD63Ur7z3~jE_hBDyCOCW9eHH`kes(&%AXe$;o`qgo{k4u=YHs!he|@ z{N+U7LTKq)%2_%x{ssL{GUI;jlK8WC3IBrqcQWDE?slC#Cpr}ZCUUSK^%{r5?K0`ASB zbH{}u);~PA{xXWcxX9~AwT*VM-$;9__|D~(LLL7@dOP^weOh0t0m^~D^a7M)|6B-A z&g=hk5Bn(htMh3065-+*7ws01BF-r4zfbEc#h0Y@VGrlV``uG31ENn~_1mBA_^;vP z;=f}2R6ZcSE5{j4i$8lH{%Z&JA6lPctnZ|sz@N=}M3M0O@c(89_gKHk;W^>o^2pox zp}@yoIhFyg&b)@_@S81Cn#qv-`O0@*ug%^)5Bht#)eW*AnN*FR57&R#M_@lFWX{3E zb7?t(eWZ^x2$A;%zfR>I#uMWc`CFU@u#-^!2H`7EPX1~dPuMk}zv2C04*H(FPva|i zg&|JCzW=20#fv}D@Bc~s*!{*&*T>2Dv0gGBKUUAXW9)u|@C6z#mgXQ93U1YZ- zJ0av_I<>|r?0dX^1nqe)--{oM|5HB%-|)ovv53Fgr*_rOq&N&2|GoIJx=u9(=OFu^ zOpf!9j~|P;v8aD}f#M)6B2}D&)NdM1`rcvu0F4vCGNp(Sl~7I{Vl}* zA^U~K6VUe+@31N7UNs&fmqY(v7reqA&OzeA>idm`#S>_p&`09O0yjeKwsLBJ8jl~F z_guCwY1~fNwflUoAM-0O`)S2FxMC;a@|Upx&cc3q9QH-QIdsGhsrHdVX!}nA-?!wP8Y+Vc;Zgj9)3p9`cR=aTKI}SRP3ZqB zAM=y=2Z^`+xcITaTe07LFMh1Z&mH>yo6x`V-{txn;>VrD524>|7i_{mPj*R_v+7^f zemhNz(`XQmg8i%G^xYCUjPJ*SImACS3y2?!xT&q&74AoSKQNE&OR=FZ19-)0j~Q5{KJG@TjiC~Px;C9A-^zwT;D$Q`@4!C8%rE4)gLflAb>9H zbn+H|q4r0`S4_rrRXL~MHyPKpBk^2EDYc8WBcbBoUgX|Hrc5`zXt#B>XB{k$Fg zJ+|+3a<)Gy?pTJ33yaeve;d0NLV|)bkhx@7X z7r_%2Hvv7EpL;&q!#ELrDL#Sk&vV{!<`^g8h5@HL*1vHHHorB(mP&szN0nIreW0~9|Nj#QV+I)K$KXxWf<;yFxN3cJ`|8K{y zuzu;!*!}P3jNOO6D*orYHLD-Y26O!0p;`OC^;!ErJs)>?#_n4Wi+}&(AuZqL`S1JI z-|q`<0`WNofB!tkkDZnMs_(Dz`MN`g?0%R1*O)(tWZmsioYC^e@}WcM59TNH7srpC zPnn-;{8&q;4(K-;Kei`+d(5|q_^|=#GmIZQBX|kkU&8*V@na8Z{MeqQ&mD~)3;NUk zu6xL){;Z|f+dO`Vui3No&CeiC0phx<|3%l=$+)f>&sFuG$+)gM4kRCF{lbse)1Hjq z>>=#miu=TmJ!JQ7vxh8BLiZOsezO#Kbp$8D`FVgs7 zzA^o9zgjO@P4zLJ8Yf^jAISPM%j2i^59O5JmH4tPu}cRnYhMmppd<7U zjxT!_@f~1SCHsrUm!)|9WRHe@fCt9@1M2b;i8l04VWdd8Xu}&u>I3GvkwUW0Qm~J{lAbt+iF=maf|tE7$1o8WG`;DB+hIr_@@@1i1yE8 zd?nrwc%1FT#R1Kqp?$b9fL@Qho`Wk1P7&Y7@nOs0dnyl`*8bWYtd6~(ZrJAYQmKBAQ0zX1AxzbGGo z|M-2eNACuP@xw*2>#1D}?Qf6TN5e3^P4h+hqDc0jee9$2a_$bq=Jk9R=W{u}Q5NxL zVUJY14f;D)e>UrL{mMtB?EkuN{pB?5#QWag1@kWU=b#VQn@(FhvFZoPkLC2o$Lx=4 zf33vOH#mN)K}Ceqv36qBZ$MYB7w1s8pUTIoKk)gZ5xrw7ft@(BpZ(S17%@I$-;Z-A zT(8c9DLQYm4}ELRZR6#Kac0&2Q7^zQi~d&|up{S)&J*_IhHbCfaJU_vU)+DpziRcP z+@9KbK}@E@(7u5uWZ!MtdJx}}>rwjZ{H)c0dl{hv^e@Gy3tgc;P(2gp>k4wdPU9}t z3)f}*t$f#hAqCK5XeY+`2##Nld0U`yQ~6gEJ2A)a$NVaZy|`37%=Uv#@w4gtDu=(v z>!r%c%7F4b|5-k%+$@%0C#E>FB0qNoKcVuoRQ?sNzbJ8LC*wCy+KJUZv^TG$-^Hel zhpYU>^^M?HRSxU?$M?gq48L~?c4EY_DmT8s{orzZe`P@V(<@Vn%gFKgadf3-56G%z#oU$NR0RFLQaP zZE?BX>TS+z|EFu$Ip3_nPK^G~)UBO(um3HV&(BSDpdTZSR~7e+9kcOgdH+xYs*>W- zYMj~n4D7^+KRe`Kh$HYF_=EN0F!aap!zy=_56a;$a(|TmrLv7Tt9)3Nd&Wli8x4@$ z0Gp#7@Yk6B-Dp^QvC5%P#z*zu1Zsibq^vF2x$tQ_7i*c5A#^n?EmfK5`@pE!& z-wb8k!{mSGb}FIO^C#__lW}H;^b`Cw))%r%$MHD+TIE(Kd=$p7a63xBsc>Ac*Eq90 zzP$gl`-*zq+E+Es?67?+&oJ+}{cP458$Xq2q3~^(y)$DIJIPW7sfm zn8#ed#(~xOsq!r={GUyepM>zEkWZ+;mbLL_A78(fduv!9iGGlGWBpgTmnFG}db5R* z`UxL4QGZS1%PPL3T$u;I0#{M9dV020<2>U#5nrcSw$BR^XEx5-JoxJ`Nxz;3UWD*Z zt%7|XQ5;mVU)B~MdfQzmX)dw7F+`j{1H0o?Yn9`$-jVo9UU=&g>HkHu#rul733#9J zI^okWzKH+NnWg0lyb#?=jt4g>yV_vtFS1JeF^s4|-rf;HS}lr0L3>Fg@{W6$q#{oW?$Eq-fxrI_Ag zd$68=D}`H3*K&mCpfC97w`iT)JO9=?$@><*_l@khE!2;6}Ll zaYy7Y1mX~1jq~?o?>A%YE1~0MX}pXPuqRuwAhh;Nd6PZk75yaA3gw{>UsO8leP8d8{)zXypfBy~;;N;a(wFfI_&%j?^$GWX6+3nxQWg5@`duxS#`Jo{ z^Duw&`fc}n)v6tb;xz8>i1)GIPTT!MaoV<{`-h^mSDa{v?SH7hQ^a`jxUKRyRuNyg zujd!}e!76;%@!x=Pxse@KQ9VjLcZ}np(^9S_K%l=pA&lbie=OfJM4f`tG_OS|7Ln+ zj2F(oRbSxp=>esut^X19kE@n1XJ%YO_S;y#hwVS#!uWM|?0bZBfd1MkT0Uj^!hSnF zZSA{izeRsoPk?+Q`$nb;b(nA^%J)6#hw6{LsKGd*qT zsqI&1EFMAkz3avC_KQW^zVZe01L&dcSBsV|)#|vO$Kyx*SG^|l?iP<*wHdHImH6j* z$d@+ZL+M;uaf#O%#DC|M@SoeDuMfOD(Z62A_+y=|A?@RQn#kWwg`NX9UxS^LD;=;>t_8I_HT_hTPjIEB+hJ+?2ovAwOB{LVE-(M-l6ek z`TW|qFOvM>@h#E00QA%9W90vY{s4S-v3M2w1I|a6u0nsfw45s=&+%t_d_LDL&g1)V ze`slq(}2#yBhTmm@IJ>aaDeZ2P!4>!pE`%{9L0Cr_lNlIll=aZ@ZHz{ZsO0qMuYpr z-{bK6nE#63_a^ZB&(Qh8D>z>`xc8B@fqNbJ{+QAjc!ZpJ(ZjtrJizf2yb3NamN(G=Fo$u}KI5^*<=L0f>Ew*oG2U~34eumB;0{@@Gb9*je zi*ezj_~7?ji7eiq z{5QP+I>7&1|IGpZ{{a6_{=)}E#9Sefd4|e~jMSgY%~F!=8HovlM^!${C~Q1~kr19_^DI`Nz~h$lnO(4JjWMpx-`?^8@Lv zgYyHx6DWQ|aj}YnRs8G!kIxVMlY{dEXBQ664?unZKX7n<;NbiKt!FAH56%x9oF5>5 zQ2gA%`GJG;19blO;QYYc!TEuo0l)JtvVYyO<#K*N z*Msu|2j>UKkW7o#lYaZfkB9ww$KZTh!`SR*qro#6R-FI;R z(4lZOMfuLGwM(}VXN&C9=-=JNEB_`lZSxCBd;JpKKcw+=USGobC7ge;_XmCDJ1zVE zLRaGW==rdwxyt8hLqq3lUS5d|&XFR0-OTd+ESb>OTbZ`0*R(0WkMmFXaoiK!6YvYh z?C#d(J|d3a1N&CJv<7|c)XGwMjqerGbDDQzd%sUp&K1_%n>fGw#&`6dkna)x4*H#* zo78(&x^mA9;wSL^JTr;C4`e1xuu*(trPM@<{qyVQgKVEKm39#s>nq>s$^9!oCvo(i zCHw!CRHhdie($LUQoL7uYIlb2p}9lnN8f6jxnvRNKT02-k@x1p7y18pYB>K0`u6I` zG&X>*eQa=sJySt1pp!3l}-y-D{=y@vAY(iq*9 zJ~!z6814sZ%6%+K_g>S|8)0W2&WzHp_q;v71o{!Zca(ljdoPQ|*RPMzZxQ{g*N^ji zwYsHyr)lYp^V{#3m(C!5n2mqf3BQT`!y7BT&LaPx?`HvhnnL%c`4E3s&79(Nm+l|g z?&AKYvqBe()2nzVv5&C#v(Wt)pm&|xw%k)tBk(K*~G86X< zG5_FwPvk$&fAcxXS`Fuw!N*kII}iC;p5wfqS>-gF-D3YFm81P6B>u(!`QAO8do1Fd zBE>79@Y8;J@;tLyu5Ga^YC1ot=GzD^ETp--k1A}1_Qba4t4}%UOTSo z^}n0*k9{=y#NU4U%MCi;ePueKaPTz$9g(9MWADEKe&}rn`p2~BHPaF|Fh98EkUsJZ zt-qij;)Q*H^613J>3*EF)C2!6pC8xz%RG6m_rv_NzX^)tHN5)93M9qiBK+jp zBkP?1KF&{?*DjhMkMoAO-zGqOQM$**!TlEWzwg(u{$YKi`!D!@8#(u!)#d!<6OYzry|6 zi&ro|9h(oP&_3d3Wb&IlU;dibXYL=q2lFSGn&)!FKlrBM_y?dL_?zCN`mxUZ0m(JT z#shr6+)rat_FUTg5!bIzI0D4$<5d0U+Ar` zdX9TdIQ1}3M&I9(duLo8m$M76qJMkw<>zVG`E z|359~@;$N#9w+?fBERp;y)>TOQ-S*+xSZ*W^Xu;LUYgGmeW#{uoI#BbF~$7XTcDKU zLrhKC`)D3e{0;O!o(jG;0Im=F3g{uHSHMTCZ|nT?aBnF+_Xm*kNYewzf23NjaR)PU ze~sQZqvf&0Bg8#Ji#(3Q_`i01)Ao6g*cAN0|6~3<-!=2D#E+2p6YqnaRmPY5uki-w zfSaM`UEpi>%JF^;d_(ydd`2nauW)*k^pooxpOV|lxt}!W9g0t9;~{B$J+!|~@diQn zZJPhe4@`f^hn$P4SHis!(62}@);NQ_zC!;6Jw2M2k@pF{M@HieYW?^guHW-~u3z~` z>yNx&8LL0HSeUaC`@pr-K(J?vKIt2L06U$N1QK0~>ck=Yf8II{m^}yk^NZ z>H8A-M-W#K_n&+Ya-H5^lQ^xs|GJ6s^*_P&SItS|9VqG5a#{(iUgl+cAz#385nm1sxh zZ)HGv#2=)4itxNTpma#TTD;58>jO$p5cmB`xV4x!F@A6!Y;lUzVd7xyGe>C$npjAD@xn@ zXXt*PHxXBGQodk4Wcp_^KF4WM`LaiU;RiSKi`?(XwebWW(EO(LLF7Z;d&2!QSidQL zT&d)Z#g9|DqvPO~V8#I*bbjaE@wkHJ@)Pc#fqpQ0|4gxH;|i)Aa)l2x{)=1ud2X-l z?h`+p|fY#hQQVKaehqglRUq-kC}qp=TkD9){a2; z95}SUeC6&U+V`5l2Hl^U32h2}ng7S}10g?ge+-wyugqz!at`yH?q|TB5Ag%5Q>&cU zxPh*GAII-O{J;wTAM_#pA9QlOHKfZch;PR7A?2@g-mBF3e%(w3`whhDQ~BhiH@V!; zfr0Q|m?!;h@BSE*EzEH~6>RbUiZSTH<+BeLq5nHnFZSPl|xUWThKq)9)@-^wjTxO7PoW9_CG16Y=6>p4-5M3lYJWFbSJ?5 zJKzrw_W{$ma2z7gUHN%0{@~O}-v98pQvAWGRo*92d>pzT7yAzI#k;$X;0DrpdymWs z!4KeG87K&E?Ha)iczJt|jK&{KITkMf-eUZKlfE_1Q zs>FNtGbedov>E>|`(wmsT9^H46xq1;k?bd#KJbrRY}@)84ZA;W$Uc`QZ%b&jvf>p$jS8}~Jl{UIt}mH3})^YoqXQ2apbKlMD> z{ev=k3XdQ$v~f1g+8^M-S>4{Hs$gEEc)NDboGUA0p z$@fm%<@*=+_%rvf7_hmPN5`>mXgANmjZsq;?9uU@ZVOiY{O?_quR(Jt!+xL=*pMuhoh>CfZi zM)RC6LQf*Qdx`WV*xTv*Xnn6Cejv%iG<`SL?>JY3{vzx9m2SO2{XssT!#a$0e~Ris zyuy2;zgDR}*zMsL;C>-q;UADao4*h8IZ5>S@BSV=FH#&u=xNW8f8$3TZDM1pO1^gY#ybSN=Z?IZyUzY!g7gwwZ-}neKa0|9K1c zXUd-luKxx3#|`qepocuSYOQL?DNE|@&8^&{OR); zf5bcHde!evV^8b(4~f6Ae&9L##k=C)?N#rAzlneG{g{6<;!ix3BRb*yv#Ig_nZHqg zSN!QqwID@y@|n<{lYX1zC(5S_@LS@&M#kqc{NBbcuP;ac1%hs=43t!}Qe3yJK@UN-=t{gti&!?+M!DrCV;a`LQeG30aKAL`- z{gCYn>O=lZz%Nk$ z>-VevRe213dlgH+ZpG5KMB@wmg5n#dE0+H4aF^+*{@rQuYgT(3Ot*IXg3!*g%|3PQiYjMJ|>K|jn#p0BE(KsGb|nx@9jRX<_+g8vSSyqc4^ z`O1e(Kg{3E;c@;O>i?Gil<$;&xyR72p0o6;<;Llkgndh&oTXny=C{(X8a~hENn+_& zsf_bqDY5*Qt6KUsE93lE75V{R!+g^mr61;3b%K5w=$GVYyB*B1{OtASd46kLRpP1x>K8}0-u+u zi+(jFIHhLU%6YZ#mPN1Y$vUWdAjx;aSE}CDt70l(UQ~M)2X%Xn^w(7KRK@CZiaV@4 zWpPk+|8EcSN%e*@+bKyuwt5)je-NL461P`RTYABMbD#E?9|@qZ6TU?B?7HCfu>WCv z+!2Uh<{d&K& z>Z`-=PpD!CI91+9ltsT9e!tZVlurWD!-n7g8eV^6B?w>Sw4N3HU%tOd&++})`iuPi z?F{~pyncV!e38FD(|&Q{`+Kzhd*$jJukX=Rjnk^w>&d=P`m*-cP`Pbe}*8_-ESJPt{NIIYz<1nQDEN?RMGvI`8kN z5|nfOeSW`6_Y=_iUqU(3DHr8jzs1j0UA&Kc)@pF1E zAeFNEK`@x(^&I*Ik6$>L<8>XK`1m9ExleaI#m^~jPVsWPbK8GzE+4~Pkifr<;O2s0 ziScqbaDSn9`HTqyl#k%%g3#u_Py0XE?*iyAgqu@*p3ZaTU#yQ+y|+g8r4jTWvY+t& z2l`76_nq`%Us3%(8*Yu`=c;pTw^#o{4t8A9ulMkCxOa{6K^6TV{9JIA^=HM;IU)G& zY2fFo^IZN2eh%-A;^zY3$*6ydp9=*)hj!`tBlx*V+?@84_D74buj&3q@pI(~{2cIf zjGrqyw*G|EA$~4)Y`Nm+BH$@T@o$QsE0pc?5(q2hygx`6ZQemW^xP9X9rX|L#cYYa z*A-mdBltPFSNEdeM;^h?p}&j^5&T>fjpOGc!KdXPgP$uDto=PNev7G+JBFVtOyK9H zCh>EQ^HasoRjcFpxhnHtZ-G+sW8fU1*}Go}^oy&1Gkz{3__?Cs=Dfi?))C3;lD}I{kOKhN&i4saCx}zp6gTpjPmmo-FrXMPqq(2zNRzq zKLO9C^-khbFh1PhbjJFJ6rZBs{|J7r_`EsK_y1!2@qPHQKgx|Uez>RCPk^68`LO>` z8$lP0Keyv2&RG8ykMf@JbHxez7Jowg+yws475v>EzQlccVtjr|_&K?^{1N<|$i+SU zTv7dFpfBwEd-yq(lY+0?q4{$Y<16^N=sn`+bp9wlXA(abJq|x7xIGLe7Rr6_bEQL& zGiO%{14@Vf)jZvMhUb+5rI1hMU*Np1AKrlSxR=){LVlfHsSPL{zF$ua|GzPy^fc}X zdYk{J?>FPSoNo;%J-woMIh6DIrFgnp^;5Q<`UOt){iy={hrs`}n*V{HR|GG&_rBoi z=$cKsm$%U{eE%-X7tF7`;OAPcv^kc4GJa0w%O3qF@N@ac;O7cY!p~v-CjTMsuOGwD zsoe2|?p_)42Xxl?&GnA>FQ=xSfS-eYFp8fml`MWv<&Z0U!1xN#$1VLlw>R~-#m`~A zp!W2hHkD6%^b`Jai_da9kHyb59)q82Jpn(5^^V$ioc-~08R6TE^BHd6&)EBT**}2q z*ZnMPb1WxSzg7SI8rDUsKS|8K_&G=T-=RNX+&o9)w{I}MSnCIF77GyYbArd?^#}Ml z#pS7da|Dm4_&F!G^}158)^~^G%P4+s>K59^J)0BwIVZox&!d{%UsNXXb7XIXJ+dqK zxlG08Zwr2II>LSre&J^N8rz?-PZ$3ItuKV1yTy6n=dAxAa}p~K`IiMhH*Ilo8Ntzw z;ODA+$j?XcbIv);bH>lTV}7e+sujDxd;~uS|1I!+p2PnKKC1%#AM~Hy+h6zZ)BT|t z*^e99XU{rwDKmnf^PM%uPk4fl3k64~@+Bj9x=3(z$pn7Rfd9y^o&_DrKA&?o8Ar$X zIrwKYR6pn!IT#1n?V*-7 z?--j-?LuD}!OxX;@%euPey#-OCcUxr2>#8^PrN;jpPPu^Gezfh@cz^z?}Ps_eg^sh z;pW^an<`$8*DvVTdY|v!IV-XA>H1b;`d(TlyG$SVESk}Bys!Iu_3J8*P2Vq0+`EhY zA@_&nJMbAe55##-_UYjfER-YoIlccf@Q8muaWrmII>gUarGB+jhPh3QBh62XpYxz@ zkbl2<1oIU2M%hiCzlxs&-fn~Nvx=W9I8?s8a+LPZ-@^JV{b|Yhu1(pG-Uj|5b^e%n zwPp3KJo&xwUX-`{T>gH{uXQ>9rTa`>Pr3YM;OAuj+iF?dS8D=4m(N?gQeJUHty|)s zTV(uPB)FtV_N9fXEyl|zelCj8FW4XQr7fQ4@Lz$^hxoZj=pIG(Uf(F11ATi7Hw8aO zdD?eIz}e8gGmggbb1k7)By=idwwMl+__=6+enY>?57566{j4ebLZxd5=OoEau!o-$ z`qtnVr+r_yV^hV?<@fgQ_xYS$LFl-v__-$bFN|xmes@6KBUk4yMpj%r{l7I+iX1sKR0dlm?rK4rgH#_pKB`L zH(%lUo6T|j9P)hLLC2{uYM$qGC#Th z;GaBjOSHb~ylgaT7^)$D&dV=h|(?4^TPVrGWpOBE1Latg58%oI-pJ;73`1o@f2XA-fUupj3*=&#nvz z|J>L8(FiUQ_Ctyzbp2;Meh}jW?oY!iYV89mm%PNr55oAc|I(Flh0gmMgm2LEf=oY* z|D@eS{q0;1e3G6goV1U0VvC=2-6hbgf7}e_dHhp@c^UspKQJ#}G~N*^e`Ccza*|UA zaZDD01GpXcA7m~A|A79JKIY&Y8a;;{it!R0UkLpRQZ^r?YS;!`KJA)a-Y0YaFuobd z2a@I_W&1Pse`;@Ti=Dtni)3#CK92FHUdrb69wD^{JtOqfdxe7b9NTX+PLtA4@l=Xi z=#AKyoR`m-JzNI-!f1CP1A7~#jF0#pzORk%BD*2*y(CxDsWnbvU*z)zL3^IdfnWHR zf!%}tPj)21S7bCkk&(DW5#u8We_{8>z&G@%J+&{5+0U^5Qe2q!vl-a`%l<8s<2=r_ z1AnpzJDiXBKeqoV#;;^Kzq}BpZMotsls*kU4{&w?I2e5Q*#(O~k@K17+5gVZU8c|3 z1qbyA{(|j)h_}S~Ejj0jI84KGy#FftM|PD>iDRVqL38`S7p_aZq0g!R5Okvdb2;8` zG%Vhtp>co$Ysb?5Zz1jz{lAs7IE#4HzJ>Smp3C@&mf$R0*V-BM{k-hw;XmR3>HWK0 z4*Ps33;QSR1&+ii>WCe5&ps2{{uQ);Cipz@0sAj5(D_n}w`ene_7*55dzaon*v83S zwtKbL{v*XHat34{6#pXnk6)SWF7DtQL-23I+s2M(H^7Y3_i<-o-( z(s*TYZx@v#&J^eMeZ^yVIr}^<_Az`v+g}jRh5tVfJ3rAGt;|421s)I8FEK90%!pd-Yqq1-^spcU^z1e&wgwMf{L`@Cz;P z3gay>PPTqZM(`Vow@{pg@}c@STY|Sx`$XRV+GFA$*>jkRvrzno;w_Yq^MbQ@K=zj_ zx_-X9>!!c@nD|Fxx0LutZra)rmA*P3To?Y!eZJob{%|7x5zOEt-;eQTx+1;@(^cn@ ze!oNUJx1#7WVg+E;6dwq<( z@2Gst2fxXA(ys{rs`9chpy!BRBfay4$uzv&%imetKo!Bn2yG7ZNbmv>&UhBSCG}W2dXiw_?**NFi zv-du0uf6u#YkzAk#O?Cl@DRU9-)rZ)=r_-ICBBZ> zMQuLW`L*@i?_hqRpKbpk@rzUrN6O)M1}ykCT!`3>5i-TMLT$r3-xANm}J{0Q-j z@V&M^bUH46QDG@XyY(Wz3+p54A4N&hzoYAy(Z8eo(20JN{bPLx#NX+59}&L@xKcUC z&*kRv7bcG)9iJ%oInvAKXE&OUUlbJ|fxqB+)af!~EV5Tu{UE2E$BQ;>xG!nm@&1L&9ZU3?;I zXDT0@Jj48AJE@ezS&02z{ND{9X8h@=1RpfTudv=3{7O-A0>&ShFPM-o-R|cd{=@WP z`##KPwhKAO?=v}P-xn(XhZ+~RKZ##7FaHY#H~$~}J|_=$uuckp2$b8-N96lV{`n&R z@Lhfk`;&bi)|bKpE+g-q$2q|lx84qH=UAzeXMVwrm6G&MW`G)5q&KJ<9 zXS(@oyx;TJQ2!~MkD~vG{~jQoi9GLjZt!`k>#kvczwQ0Tb6z66&v->K&TZv?L2w5v zu{*zz_(uiokEQ)o>L#8eK9aY_YwS`!Tu1v^*z>o6*YFb26*;Gky|aA&r3m^n{7;b2 zl=q9kCD6Y;cGs^;{2t&>fZyQrZrk<0-*^2u0r}!1|7lnI+v~}9Lw@pq=LFyIq4#fx zBim1RWdDx+h3B7C`^^!`C%pnJcM@k2X)0@uKAP`+S)St#Hd_i^sHE$e86 zcr3DiB7Q^UgN&=}zj|l~CH61>Nd5=@VSd~&;qV%WbF`m(q4{Tq*LX3y{2i?1o}X}d zjo5Q#7_JhZ0r~WP;=&~lJj1w0(1Us3@EV}shv}Dw3Bzsp{wi@AK^(EXE$5HezZ!1C z7rq5ez33q?@kd&B>V?>qSvDQ=^rcn#7Q{9jtYZFtzH30_0KPxM0{cul#seX_;NRsKQz zb?2WIYuOh5L7XFdpLiZzTO5PIbbn3$?di4cV>^nc;QNp-N334$S4sOr``1@x|Fe_$ zRoK76$FMJgo#M!mChp1kCe~kk|0M3wkquDh?|-7G^~3+<1Ih1h`~FbQi+c7iap=s`^lu+?fyt%T1{$QSxKfZXv%eN8#0=O!}ZCJb` z+rKWxJ+gR5wtt<*J=*z-#64P8I`u(6U)D#Z*C;~!pab|fEByNw=AY0fIgUUd!EFeg zPH!z~yd&zrus^=6^jgt<>F&tU=brc{Vt?KQ{bheQTybS`+0pA896#|dcl6CKec8KE zn#MV@^%d_^@6Ru}eUIT_O#jK}|9DRCw{alVsZ{Yjag}zFE zixZ8`Fy7I-65mRm>$upyVmS{p7S^eEL_T2rR>pFDdTXS5`I746%-?|dxiqR!FJ4*# zE~9ky*l77jc+K%c{VPjOf5SLbdn+pk=#OZBNyi0zM?K5vZS9XnPH!%HZr=_(74;x%ANVM?Khk+N5`LIz zf0R$K{n2P1C&K(m^8GsBjxGtGZ@Tfrd?Wp?4|Ai^*~B)+vruRXFZGtuzSJ-FLm59m zkNdbzHObpbudkm-qMn z{m_d?s1MgYc_07lN`1&LVEk1q;=LK%hQ*tk;wLkVdlU>IU!i~b16Mv9UH&sUf9k1T zW_l3f8Bq_q3;7s+Z3wg54Sr9*ca3GCyUWt%SK4b*?}{dIhVJm1`|n>A`5d9$_x2N> zI|s2i1+;(Tc_!)lVh87L*VfHVd7lRE6^T#>d7S5eOb5j(7(28zYY01ddItP!i&0V{J+_+^B7pz0`3l`U2?FYN+hhpzrUgwMI-|+>tZ|MB-cGT`s zE;~DHS?#d)ekFG6fyGyb|ADTbPXDRsx>_p*ykAv$Vei+AZoRGPI@-wibMMzxE;Lin z+n(7x-cUO+;sbDh(^UPdRdD@lY5zVD{2=5jhXQlpQ_+{`G1T6Ju+^c z1^aQT?e4=riqA7s|2(#jxUan5uDa6ft<=vjzR0V>=c^chS*ITQGR27#j&#&8();g9t3+MZ{(?{{_*oQKSfKjJ^LzSd-(-_r4S^4&0sN=-;!61_*G-E9YV)fa|0dTJm7*@^A_ zS~p-^`+6VxapCH*2JFP*r^n}zPtAdgcVDSEJ8^3%MgP4i|6;m;f59JWpL2F%qrb@= zG<74`F-2OjF*J?a( zC!WZsnzI+%ykC@0;9Is=Dmgvh9UdBApx zMW@dj{Y-Af%13eWD%&yo)#G`+zS(qkS{pxn7xxvdmYX+?hO;A2+jsH|^N#IjGX9)> z)#O>Md>d!3OxqXv7CV1E8$YAtw0^J$_yhBp^{3FAS^KTYw~X?CCMD-2YM(~^&5ZNc zdwBhB{_j9f6Z^2myEpoo+{>uLghpsp!`O=>LL`>SEsm+B}m@?a)qnx4HgdxHt4moekO> zX>30~8UE=>F;J;VQ+~;_73+M5Dh{F>`jFV&XB^1YA z><5x5^dDuawqwuV$Ns$(*Dvxq+t_89k@|?wNB&wo4`CnQ#x>*1BEAjr^1$QYjV{Rh z(2wvO*IBU-W8as`oJD^mKF_wu*HRqr@;aNm#---PiIJ8l>Cqnz#^)BjNZUksi7wv=+^vl_o1_IpgAH}{jFv)@)z^Zs|m z;>Mi+9pcH(`QH@_j=rTrMEWA$+?@Yip?a3r*ths%zfC6Z(2jdAI`e_pmt;R9_S=Yd ziPt{SQ9CZ>1N0-a=T7XnwEs=x*SC|{7eL=wwBydmU)pm`?}@n2nZDmCy7xN^cH9Yn zsNFP)^JlT+n!g>NWBoF`nBmA4{pZ@S?|aDi*GcYT{H|;oj?CV#seQJhc2%?Uihr8y zU)$O~^nA8&es*O!{{w#&9+UBVeQP*uilFL6;RgA^_8&#q4a8p2)%wj|{B6nK1^|2oRZ^UbZI_M<#< z`&0TaA^w@!6JOSS=>YRV{%`xg;=uji{F}_4Xz?e_?srM|r{%KSZ&H85_m_2lIT*No zzNsP@`#s(w};pGE)VT;1*SUT&EE`V7Bsp!-FmbF=UEaqo!!iTa12uk7oJ4M#Vl zFYycbKBI4=5Bn|s-{#MMQ6Kg3a^LMk8cJVVzZ>FzKZ7qZJP-LB^u>?*58~c%9Lj?~ z@WSE&_S=EBGjQ$L{-Lbxl^5Ef{SWo`%NQ??o6reGr`?uYDf^&fW+AljkHTgFfIK9j4x9AV`1jskBZ=@SgheaPT zz8`5nOn)3Toc(mLU_Yh)i22jk`CxjM#hJDC8%s{VG`;d-dA@y%%V2y#egGY;{YKf* zrO}wz^EiIOe~p_u@9uEi8l8ytRN&)jKlimuB7Zo(mC8w$P9GEdGxP`G zx69=l&>yaBRw_3T9}oFstGG6NH|#UaTO;zhvV-r0o{~JzlJS8#zt;HuF3z3cyHE0Z z{0MxveB}4@`>Tq7o6~j2U&vqogy^ThC#2P1&~W&`=`#-Qi*o*az31b+n0|w?`VAT$ z`{#Cm&%W@SmyvudqPP04Ob0?o*r!(U9RBNKKb7m$_!{lg(fAti{7*!;?g)NA#C^f@ zCvjg&!Rs4-e;em#g69`p!$J7{b&PMex&i(}{+jAZ#x1N?!HS!S|8MbJ%s+2}|JS%K zkHr5k^6XWr94hWO1J#P8?vp5XVF<2BDi{Fom{d>kM7fmY(P3Ayewe%Pre zdx&!-_GQ?|-(A1{f20TQ`go~#@rvXJvUoPHZoq#C__JKj;s3kt{o_N8qr>=N@IUK# zH{jRu#jRNLSY|!=apL+_Z@IC4JyA->uHM?fm0`z~d>{Tt_%ZH?=bii!&)eT`mBe4| z6#Q#0e<1d$R%M6R8`T}=|1m$wd&OJ#x1pbP)W4$D+l3$Q>tC1n*1sBj`tSTl@h^jZ zW`2vM{gSlh54~qdf5b1i?Y(&-Tq>P`e?{ddm*NsOdarL)Iw-OKebsaRy_L#7B2)d| z*GHvi_`i>-KjSg6-(OFpN3qBEjT;sjcX#a|b_e$R9Nx!?|9|Ha{KJnS&pRDw4>bN&_oZo+KtHfBGHo^UrxpO80*J75aUS`s@7vs~-9<{G)X5c%S0;H@#CV_rX4zBh7bZ z=MS~`JnP%Yy5|k=Df)?X{J#bN8t++NN8a-+&Cl1y=cvxVP{(Oi=O4!h{1qM*zt-2k zUetMEaT@Oy*O`C7=xTnx(SBJN!2ce`XMJy&UPk*OuZGd{v+FX#eLWfk}_}Lx0nqtGJYK8zqF6|0203y@l$18I-cy{e-=Lg z@2ARW57%J@^`J}!c#b#&ccWjH`I}A!XW3pDxORdf{2bwj&gb%e@hmdb!tbz%=eQr_ z)=-ap-!|uen8$NmCkxW9$WM7L{)B@zruUvk{2N^q2z~&5TA4X@~dk;@&k~ljM((`;o*qM1B}s|LI=`|IoVn zttVVuyeEAWBVgacF&s*NkiMC+LItdUZTUTs(_g8z&qe{sr=5uY*4` zzPpaE2Y`OyPkB%3$2##lBA0v@Pwz?b z@5OV^^1HzQMeIDs;jhWk&abUs)_=?6!uf^zO~0vW9+(QtO^e@`E~g`JUE(@0&&#Rf z$ftn%H*x+6{1@6UMDDr8>C4sRdo_Oxo+G~uuk#?N+=sufJqP_TzYu>%?jtV<%MkNm z_Wf&`w;)jiXR42Nr*i{m*N>zEsNju~*e!+}=+nef~eCe(j;y=cF(6vtXLnU9^V&*Cf8$$z8cQc8RUJL=0e|H3im7w#v=kkcqrW6__d@%t=pUt06OSiTl}zo_y2Lga1Y zI8Ni|y75i9`;o@)13&P8>P_zN1p;p4D#>iDw%7O!s=xD~lS1ioeR9Pc;5 zH+&y_CMDvRuzXwO?=8l6WP7>b=b3lK;`CX3I<&th@$^9VJ=xb7Kal>A54iwSuX1k7 z^k$3GNBIu@74&4>yKMMb=3%ioeO5odhxLbHlJy%OS^e4YZ_m}APk!bh`Bz+gEYN}V zQGY=Hl1Ue@&-gH({27h^rsEI2PV|>*>hC=y{-2B22fDJ}R4QOUzbEvA{4{=+_h;$A z_y~BOiax!Ne+AnUv}fOs@p1J=E^dX*1N;6|>eF-ehQ%|CC&>AO@57JrF@nO+GfrPJ z{XEwE+4vYxf6!hr?b=DE{ki_z{7NRFH_nVB=(iAGPnvpfAN(^%->~ShKAWFmxUTed z?;Br$uYOwQ=VE-mK;!cTlllF)zHj5R^WVvP*J@6#p%Yl5X7abFybEeS!F@+KmU5bZ zBn*Cn`o=Eu&I|u1)Bg*vZGM?tOpWC}^0y$q`DFflUi2r#YO*Z}C3zFp=J< z-@RW$TodGLNg{0F8}Zd+E$jy0V0=C||FS6;mrwHJypfamD3}g2^9$=C>7Py~8Rx|0 z%Y^>Q4?+4z+3##{u>V&+baDA0&(MIz_m}mKh`wpR^YTt1tR6Zuq|kIQFq zVoW|^KCnH+5#@K8e45Zt`6(zJV>>Cy2Xg@bhAkJD4}$}NEBx!mFX*^=lT5m}e766# z_A&3+eiXU5d?v4~{iO1368mPME9+tV;BUxZ`(7J2({HVQ%xBhL!~=sUO#iG0&Ie2m|Z{sjfdJM=pU=HvX`6Z(9r=QU*C zg?&@AdXr_?z%_8Rw7)?9*RnTwKaG8%+^;9k^7`lSzQiX3fjP?~)5Q@ebg%z=UMUYr;GP2%Ud_cMi6UMHe! z{9m~Udhq%3hwIS)eW@4w=Va|9{#ULwVb9X` z@tpf|X*ccqQSk4uoqMkRQaR!JlS;0!|4FegW1Q}3{JyY&al^PUjt}T={5*-@S3k-9 z564yF_tnpGpEMJ{4}9_7zOU!@sr>x;J@T?ZL3nfD({uYUKXZOB`FnhKUJu^l`MsaI zITyb#5v<~SgnyeXJhY_%}~@mXD?P*m>5Q%-q2II0gIt+l+r@`B*FuOE!|{n_CUw=hwdq{(sE%r=4~C z)?LKEcly=dhd%oEGU5|Dz3e3CMUVJ<-5(=9lI6X~W<&1#vbvuneQ>^UspsBrx844< zt@}_u|5w8i@{aMnr0y?~-%0Ij>mPbAN&Zi_U+w9>G*`RG_@p1``qV?dBe6eZt2Z=$ z<_;Kn{q{Q&e-Hc5q}oRo^RT=f-TH?2-4obd(-fvL9XI#Ie#);34g}zh$ET{WPi#y%Ty8YvKB)=Ev^Rm*XnRENTVb_%$|6fJ_ z+m5bDrE9zA=(sQOBQgK7xlemxYMPe?_3X{YSI_3$z9p;l&1S!}pub0yKe?=vOZ~+RJw5+o60w{wr(G)6_|@otX_h}RzUVLG`VDnlE)FOB%k`(5b^35`ET!Z(`~!@S%Map#e{;io zyl#)BgdJh%_!0d1zUU|YUcmf?6aLKoy1svz@ttyQ#HEJ*+s-;Z&E=fFHjOXr_LJxr z^_bky>1h)GSNJohH2nerL$vwqV*S>MdLdN1e3iSvj1%WVW0e(84G=11S@r_=V` zcwNN&miWWj?6m#$nf7zS7obZn>-u5ix7dDH@|$74vHy^tHt%fwTCFC=MDk0maD7gq zUFro{+(-TGEasb|KgTD?uJO7IJxT25715VqFPHC=^}UJsdm;~0^4*f>EZ2noqU-zh zVXGkh!F3*XZCUs0QV-(&-BSc5K&xK2{&!1)g86Ozm6^3_sy}yC=4da7+Exb1VKkB*jaBLH> ze)YU%*q6m0%Fe&Lus_T7vyxvP_Gu5-?%ey%Q&L9pD$lcVzWt$(^SzG54~9LrPx~Xz zw~^;T)?1!$L%+$p`^Rg&vsdMOyXBrAr~dlpAl;tig@OFUxplhb{>S&BAC|qF)E9F( zcfaKA@PDsH$ZwDFN4#LxYv<-N_QH?f5&p*df%`ny9_sn*sBs_sE$0vTe$2lmJr7*T z37zEpp~?S~zfu2C&%-yGQ9|tGOR>u{_6?Drcs?lL+!61!E3V$3$~O5w%2zx;iMX%w z-g8x~8z`%K{`;1+|F-A{?)*2qaQ=HQy4Bpq`wi$la()2%&7Je&{AMiQXYqXPoVBXw zVRnAFqUY_!syp{;R^54+qd(e@MS*&Kt6FvEX3h8ouH*Z1rh@Y^JAbRjkMsVZff9U% z{RPg)aDHCL|8boiJkE2(UJdo(+Rn#{qsMukSnWN2Rn8H!dY()A<9xlS=e<=u|2014 z{uAHd?e22Eqmwlm*X8B&Zhcn&yOH-x(Yb@K|6_z}I1pnE7F|BlLE1&Xw7WJoF zJC|p13l={a=Qp&^+xMIPReK11M>R*kVa?IEBIAqmS;Hp`YL5QBc%O8%^Vxx(OEyNA zNw;3_C8eJ`pKUiBU2B7F-XHWFJ?(sURC9D~d%vx8_#(@Fo}ZuaM&9RnZFH^ce4LTr z6`||U`!fF*d4J4uNcgnq-w(QgT9HUtl1^cyO@kcNi$ zdx{e=`gP*Vq-SGzF#QTjzg)-BFIu4AFz4u3Q2IsQJpG0X^uv6l{4xCmvYGVjcun3n z{e<)d{~c9%wW@LFjSopb%-{6UdH$Q~|BnBR?~H!AhtRK;bM$NG=IK|8w;g?Qj(#64uI3LW2yvBMTcigqkzr%k#{keWZ&Wm&2>p$s1ZxKC+cmR}T=3lsk{gb;t@>WTw zCEw9Y>>EDx8`Eoo@k!$Pj*d?f&u98g4%c$Np2zd=AHSD)5WhECg**g)X7v!iC;I2# zJp3M1POOI${9^<13G>zPduC_C`7`i=iIU;-43}9OI6NNxPq4o+Jd@#(VXNuj-03>QtSaH~Vf?^}3O+gQf?Hd>4GA>20G1rUK+hW8`p5 zcURH>>xt5ln$zbDH&{F3a7>avcLe!ldP7z4L!bjDA@wlgcM#ux5qH;0IeGy<@V@LX ze=35$F8C7Fvs;R{!~W+3k$)Rm_uTX}!?QyFCB8INy(8>wP|q+N9oEAJ=*V)%h zPqXi*eI4{OJY7?9bXCRA89vN#bJh3>u7gqJaCWf20>5}7H2ho?zlcwNeXEtk|3p4F zo&MWP;XbaDtrwRBH#Z7jGwOm!1yFmJ#6~@vIM-o6~!;G zY-Lpc*YEGheSCkW^#Z@Ym&X5bZQmbvUf}nqdoL_}Ki9vo+F0fKo~<`oHq>4(_I=sk zv{u~u9Mxba#JXP(cTm6B=Y@ZVrA%s<*M?s!3qKM627D>rFMc+UDA{;d(3tcdyAlNi9C7>`TN4xp7Wvz&u4IN z5#k`r^`FT84feMP`iJ1&3}0vSl>CSFtzmgvbYGc;{v!4d?q8sPN{1|+Pc@Z_%`1}F<8{V76zeT`< zN&gK07CXG#lW14&KY)K*#JyQRWq-2{`@DQ{3HN(FZs_uRTgw^M?-=$#4l4~FJNEs>~PWmV7%Qdyv28xS&0RL8S_S#E| z|9AlZhW^U=gW=z@*?Ih1R`F%|hv44|1!q6c>$yX{5-5HR@*DjtEa2bji}*L+=l|_o z{r@Zet{KA7Nh@V?>eRzp4C!1>YS`32zL zz@KuB{J1P5@RMA>E%W~t-d9{*Huz}xH{%DhpJx~GZ;G#@{a^CbYR<`pk&=>ak z3I5IGq~hal%lvtJihs*~MEsl0AH&xy;@`3l!@nu+4ugq>@&No>Wd(BP+*V;M<@9_s zFL}Rkzc!W<@~QeGyteft9LqKG()wk{uX9_?v6R#ITP2VGZ;z!sy;Vqlga5PlJH>Z- z-5pDLddu)`cux6mc(`WcMORPjC6@Mny@2yS;P<+nf5H1T#k)=3S3I1gnUuV=?Y77K zvu2;nEB>w9O?fBskH){5e3{UH0soeN2>z|`DEu4NZ#n-%{`NWio5`I}=^j=ge?VuO z->i4${IOnt1pW>BfwV*U27Fbe;_z=KhXUmT;v*n`g31rGz50JH{tfGev}bwBOg>HM zr~DO^kFlMH;@{d2!M}AMfq%n#C+++G!T7hd@@?AxCAObTyL_~?@8kPzKMLC#<)rDi zCVzLZE=v8Sl6N5f4N2|Em%jV~;}-fB|NJuX!B#(TuULS9e^Wf2jgR5#OuqSwr!)MU zUv%{bTCdf2Pvpxi{;hro?IVxb0{+d6*JX{M#Ul z{T|MBJE@zrA7h`b_J3Jl1pju2*TBCy`+xc*RvfWEEBd-^u} zA98;dioIO$ZzbFZF3$L4f`4nAjdibbb#17K(iu+4@!?eXneaU1i(9vgyq}$NRRgw{uy;q1pT8g}k%a zAF@A`?@#wU`19~O)O~vV7#7MI{F~)}w7j;(p78M$|JKm@&CVF-b}^1JKOO!pgt{T; z^PR^qPf>3+v&;Ev_&4C?E(`wE@NWfQo^Nh_LiW#J1wKOi(^dbhUEPn~1%4rM;e_{c z*Xdh%Ik&=l*}U85^7}ErF6P~H+h^K(%I9AM{!RD4-LAuJbrwLFu?}__q%BFT|y}eGBTD+h1in^SCs_qmh1? zKPIobN=I9VO@AxscxC(8e=&dV0{%_&=Vtr!?NEO@0RNWFy79IB8ODwI)G@A<&xU_1 z(?9WqXLQFPVIh9$_CV=R{3!OXSsfqCuWbAV{&V(?!EKDkxvlP4%F|mN#lw+5aX#Kx zTwH!I&xiM-ZNA?vI6PXv^KV=|UEtlKtH*f$fc;%T`Lo{-sNZ(F55d0;oF3Cb9$dlk z8~&|heBZgs`a7L@{2Q+6uVCX;n6=Mvf4NQk+py#2LHGN_zjd|!j^f`sS=SF6zwSc& zU7eqXf6D`RBDqoQuGAqpUsHZ6Z~6ijn_7Q zD>z3MdHsYSb8!B=4EwU+-MX+lOBw5V^skA2fc)B7J(q_60?(l{dLF%B9Q~>Hxq)Zr z%U;^`%g?y7+#J0VUKsj@mouCk)_?ZrgPqZbPa!^xFM1LB{b>B$+SQMSpL73v5I^Vc z&*JB{A%99GcmJc}=LQS-xsjghE#+?FJN`2K9Q0(^-A8dmyqw|ZmI@9}H^t9I3Hjd? zKj-jthxoZe{G8yz4)JqXhG*i#7{2omKj-339pdM(ohlU_HkA>em0i zcKqD7Ox8#J4vHT`{2bQj0`~o)uONTQ|Fbg=ZpFIayes(L&;6K}@>cmg;jOVODozgg zINCRIh%X^}O@4fzdJgj4+xhyp#lD2RDLh{<;lAMe%ea>EZgKhDPa-~@#Gey9TROeV zeQa1d%j;L8&+z=sE3NX{aP|@Qae{v|d_{nL%{H#N?})B!M(Oj^6R5vm7={6GlcG0d zaV{>UoX^X6@j1Rf3{LVon?1`iz&ZOi;tA>dIDcp|u23?&3Ja##4_tlRKLLN^;s>4D zN+hJ7QxiV;L609qe8KyJQcK6%>t_q8`3W?2q0>%4frGq!K&4j(Tf+4twNR#eM?$E&DG|-jn`VoFdC( zPy7bPFH5||sV9MxfSv2qapMztU%t=mafT1Ge#-mnc)t_5_i`-Z-;VjU4tpM-V}F}O zJRqqz!*cU@oN~{3e!%+E4e4*2egWr>15bnRoyIS6coyI?NQZOB1JdUl<0DD`eAFlZ zN8BZr*VNzB`j5Cwy#F%#C-#|Ljb~)zWBHjcYTThO=sBdt_uB!!Mc!+-9d4tocny2M z7&*JxHN=0C|8;W?uVL*M)&7zX1KP>Dir29BgTU+-|A~Bxa~Q*SDF6Ni@AosXqr&ds zdwG@mXF2U$f3}LjJ7T_YrrB_YKz( z=G-;ugZ5KuZ$n%c{%;NE1wv=gSL_EK#+~;+9=QAQc$f7X{TFc?P4LU)efZ}ho))ho z@Q>WLJofvM$MdJQ|3`|efSCq%ezd!0fTsXdVo#usfL>r_F()F!)+K| z!|Zp)k6p!WSbsVGIN!5;_ExX?r`Y-4B5otE_ASa+%>VoXZX-y2nQ@SCezy?+N9~^) z2Z`rz!$b^rQUP zCGG%~Ljb*WyPtPBlIi#13EIUxX1kDcbKht3E>!*xHNLI!M_=O^+5EIT_78ralYcu{ zCujLXpy479O#b=tWzui*eQ}A`g#~~=@^?c~nxDF&fE}r*0H~9QntnZ9})OGh0;eEy}igErc z|NAENcklWo_|M;AyqyB}+0u_x>L#Ac`3TW{Bz!2 zNBgmNme0eKyI%ypLH^J9M|hr&-Thp_mEE@Me-SxMy4ufPPreuXUk!JHTPkKl4|C?~wQipi3CyIqnw< zD2b2xp%1-3#QGI9*=|3l{SZGR^k4ilrT>M(dDpMthQAHE_xl(>v~ySEANdK~2OgwQ zK#BX{(_fNxbb|jtd=}Y1MKZ3k|LUO~lr#8`e&4FBOL z9PR`57e2@N&xao3ABExSJpLny{!;vh@2}3|KjLT(|3UxHgYX~Fn*|5bTEKs__rZ>{ z_z&)Xrty#3KK32dZ{Op-!{+hK{Dr_3|05@#q6Pd%OYtAAhv7f4Ut|2EmMd+aZ0p-% z{M>jJ-+}qt20u#tqn5ve@x8KX@eJ(!f&RazrR`&tf4z?4GmhxYQK{Kt_a;6I#8PU0UO*#N&x@gGI4-}e2XobOS-IQ)mjKU#Y1KB|HJBkj(!4F9nh z|7a;C&mI2bsjm*b<;Prql1px%ySrripWr_bpGV{GE=Q|;Z)Ms2kKZr&k7f7&qf2hz zdUP@V(TaPXOuBt;asmIbyzKU`%X$u-OrB-@eA=I|zgk&w``#5@cSkME&+6q(uCG|% zS60w3%-4JyaWH_-TEu_+XT(4H%540jWu;eA=`@PaKIk`t|HumeCdU!zBlr)Y(-fb( zr16hlRywUL;6HXpjy}jk&;8wS#g(_CGt_^R%Z_fNk)v;Z>C4`Q(v@c|zk|g?T+Cx{ z`xC>}c+ozJ@#DC`uYJ^7O^6&d{0G)g@!wA87wB*CqGf${`7dCi z7W(VB*nR@fxxcr3_A6tUwJTi>zw{*d~EvTitKOE?n?59T{|lW>yJx1 zE|}zze6O`{bRUge{7=)9t$pCLs1GgXvA6a|!WU?Nluxkz(a7OVXkYuc)BT~%v!hGG z=dwQUaerg;Zqz!P*v2>(3QcrzivL(f`%=H3$9u>4Sq+zca7z{8{mKNSE0Mh z>#WzL-W6%QA=7Vr-W}ezd1B`&Wj$ZP`3cWm?0lt=bNwk7Zy`VYaykv%nVgqo#&>vb z5~khx2=mh;e~_1U=OZ2b&*4k%@EqnHw|`C#)LtD8Ku6J22A~u4lY33?!|+=@FHgHF z;%d_l3jd!N^sN21=)d2V{PfXajsMTZH+a8|@fLgSH&>l~p@sdLTo*1RN7|M4(_BBrKak_^#qRoehU+N$;Yy2dkNvN+59e8K9&Z8Py-xe_FH8Os zuMIq-wBKR5d0cRg@_f;LT<*JdP4YF)+K==6o^!`*EqA}(cBR=b3aC&1-%$JWwX&_B zZu}7+j?Z6K{}7X3_WmV}8+{QtQL*oRLGhOp`*FkFFPEKtw!C0Jt`yaN+;sn|syq_< z3IA1ey{**(+JCDH_Tzfpt-m#0UrPml&VF3ib-B@i9*2EYX~BNnP`#|#bo~R{P|usO zAO8~>-@(Ayj|b$lsr^{4#eO^(>;TFS{N*4zHJhKlKbHA5wI7>59q32@z0&w5=GW=1 z));(+vY~MZ?Rgn|FZc6(XXkCJJ=&fZ)V~h?SL-s4rIsr>J{RC$+i=${^$T1BKTPbu zpfB`2zV~tPf!yy*@t>ssD)>q657ob~*_e4>?8o3Mc^-kU_&x#+Nk7bfT+YLO%=m_G ze~b3_X6?uMyt5x0UzGFzPVIXh+c&wg2L6=#L2TMN%zj)c{2}AATU=^<$6Wn|LUK>^ zcjKc<;hP86UoBpze;evQ*nX^jWyNC1`bUwXC-eLT{r(5+$Ir+-zt*3Tf4@WfF$^p(-F{I1 z?NL7~_5U5$&+=Hx(_7_IgV#3yE<2rf~>w#QDzVZF)Sni+RGPzmHi=KCG z%jWlv+OzAb|3Qv=C(>7sy)Ex!eW+G`l5>hW3sB%B2PSvf{l>K7(IsLZ3=C>7B@2JywOo!wJNZ=X1!PTI(OX{*9$Pz1463 zI zf28qSYV}~+Kk#Q=?8b$w$0`+PA6{yjeR-SbizV2Th2D*kNB_X)672l&6P)rdrVH$Y z;1ANbSe&;XPvnk{f2H^__Q&YUc4zF8)vD8jXZV}*(ez@hzibE3CH`m{`C(`uG&xjM z{xSPtF%P+f@vIgPwjbAOkFXy@zKcDw)pGj3$tTml^E$ry*1uspwfw>Qf4%+)`!UuJ zX{U{Nth47ICbx>pH^ufnwqx|G7oEOu>qMjB?6>4E%n$G_;f+Kr*7N&mau`T9-n<-4zt@ABQ5`e7e#F4W&@p2h#@KPUTF zuU%W?b-v~F_F|>U>tnKi{8Zm7tKGQDxG#t=Q*{4>e*LFne;r`EC-|#E)!pyr&(k09 zZgc&^aBt}6${VzAcIvCNdzK3k?VR_bPjddHJ9n-Kek!kVeQF-`bl87ijXp{F2YeNu zm(+i-u5kouAIA3~A3C4cYOb8xzt?Yjh>zcDx$kT1cg*195&vlzHfb-%{E_%Km`BC* z4c-5bsGp?8u8Vy^CceWmF5!D}JFCw1S1@e%v(;4Dk*FU-I1AIkr8>fe{|%-eU<>euJ| z(B9n7bY|?k$WNck&HMM|^SjT+-h0njetO{IXur)>Kv&Q=UlD!y+}1aDK)Kfr{P@zkUoOk~0r{2Fxbw@)O%YVRD7`^`F#V&r zEZ2Yk6E(Ht8XoT3lHUvd9l5rQ8{$jqJYRYY{0}?sy(03%0za5F2bl7(SC#evH#_f+n-)kJLJlevol(J z$`#!&BCiYeu2RYEH+lXq@>}C5!<*xbr1o%SU@KE{7k#Js?~$GSi}ZeH!U-xT`bc}e5Szz(o3 z`?8lO`};FIA6(LX+K%o&Mk@<;V;d(q-;?oMS&5u{)^RJ< zG2ZacV*gHwe{&=&>g~MJzijlE{D+9&XZ8>b9__g0qKrT7>&y>uQRzKWz0B;mGQZcq zg7!zwK3mc_2TK}9gYzHz$0IAyr(w?>l^uUBm7JaTEt$XQKkdEHPfKbySyDUiRR3`M zLhwKJ1IuS*`p1&;^NQv*S}8j_uIX1xYR@&fSkn2#{-J%07xgQfH{~Nvf1S3E?HTl+ zMLVvwZ+yXVg4|!pAH)3C{Rhrx%PaHx-ICaEG0!-Dm|rX9$@t+t_}A(9N&lXQe5q8N zT+5etxUQ^}FR~PT<{supT;ApNKce(=}(=Mt}e8o&NmcICP(_}R}ve^UEwMbD?1hZgo*+GoFZ z$t#!dgP+cARVp`GRy%7fc|HvODVJ{`e%_VMO63OP^Ih5ONgfT9>_6-?%xeSroP~d} z$myteo+aZ4;xa4Oq_?j()v06^8P8r z-Q8emIPtOAf49y4KJ^>0c)Pd$hQ!}B{{h6;o%0`fLiAtQr+xKzG`lzA@%@ze8N3)> z{tn_JK$mlVkAeChyDi}(7WJYs^?sP zRsIKdPJ-i3*q?h3oX;Q*6wha#jV}M0_w@e;|Izullxg4}q@O>&0enC5GiNj&WiIW` zW%BR?mi_pv-fiR;?jbg(^G^tK&TpZh=R}6Te3*a2f71lMVKhw2mehB90@Er1U zCH5|PG5jLL?;%5cKH}7%zk*je7G3@i;=hnV{vz>bfFG>$Utk=VQ^&uziu!Rr*8(m^ z{09#G7u5c&@t!C4=R^Mm@cW$q!p|J~FC6+WnEmq5p#O zkDB7!H2%$@{{nGohyDw|4+3X@=)Z92zi{Zk0RQ7d{{`Zk{wwxh_@58`7rwZ4=)Z92 zzaY4-L;rZ(2iw}vsO!Nybd+;N``P;kE@6$g`@(YT8LYRP^ z9py%_!8rP~|04gWL`#+bhwucHG6{-F`2pM2@ z-?4LZix>ZG!KZyy;&0xkUub*3Bm?Jnyk}U?y(sbFI{cG^_BzhkD?iz6Kf^MJAIINb z$NAwp^Ru?wXL!F5zu?RLcKi~{+eIJ#dx>-}foq&c6#QGOZ(5 ze!Uo%kkwjQ9xNl3ssI z%Sgw;(I5VOk)wMYxp64Q`&0UZKcdLdKbE+wpnpck58-s)UzX?b3%tLa$9tgjsDNvf zU-A+hpZxMmBcc0|9Nw348oe}<@p17DPlUN6FX4gYw{&!l7@zbM_;-)|i^|L7`@wnA zX=8}<9;J`?R~UT;8Yi`aO3H@Jaud;RN1C-VQd;Qy_3F*+Ij;QuW2 zO{bJzNA$bnl%v;M^ndDknUvBk<>-`BI)T65@!mWUX5yDfAERrke};6}Z>~4i!GEKH zqgy|A*XUpS4gB~`q)zpV0QMd@kuEh)WA z%h7@K%_x1d#(!FpzHd(HJ5u^qmA?DYC;5FtrE5BVob$e@^gbr>%RYZ19Ma+a^{tfh zZMwWYLtmU9R`w|$%4v`EO_k2f(YF$Nq;DzZ&CyrRXN7)|%Jp1sjqwfOpFD|wFuH*F zaLGHU5BY^l@IQR~)K2-A-N#*cAv}gWf>HH3_GX?xl`q9O6#@G90@^89}`f#1; z;yL0gq?&k6|3y4U`{^d$hkt#hiFt)GRl{@e6YIzKr|Wnw{MC^9Pi>`I@?7)l$op-1 zer_w%k>|)Gjd^JGujBpD$8*%1Ot||0OZm6m`s?wZ1T#JQcaZ)8`HT1!gA(E@bG@@X zFe%UF-x+FtoSx2Gi%(b>-|$6GctY0C&-KIN_{LLmjb8julAkB_80ahO&v}wJWBu~0NL!)*L%cM(M*J$qUyCHJBJ$;!pIWH- za46s4f05Md)Iy#d`u9P8)zq&)iTE*cE*~Nekdzh0A6UK|i`#2?axgy_-y^E6(m%}d zG#i~{{>c1GEXWVTa3(U)~2tGs^NuF~T zznSv`@fZ-$2Js8=p2ROqA>WJiU;Lvv|201j@{Gv+yRYd`QxN@PFFk zMN|=oSKcqO+_dqz{j`$dP_<_mu9Lr6_kAwd!zCRA#bMzB=4AyT4RiW=RKMwLC z@j5}epF5uAIPf{?Yw-^)e~#tTG5TK9yf>3PIljXQ*!b)w+_mN9g&(`d@7g1MzM%eV zJDOJqc_1wQAlnZ#o}lk*oI}l%W9=srE?$MNaVRWbw6&j5fBRD2?Vl}=w7p*reT{4A z^5oPd9tq+Lfj`-PS>qjA{_%?D$sv8AA62w}h+E4zhlrCi9*>JXi%(G_e~ykzDL8>z z&H1S@J_W|7Sc`sN^5ytr+1E|u9D+YMzY@^z<$iK}hh<*l9oln?hZsPQmj79v9D5FV z!21d0w~}$sX&eha7x}9350e_FI3KwG;W_2I<;y{wOTPc{#3X(p$ItTREau4}eJ>zB zQqKJ!aX36V2ZcOg`L@jOTi{P9j?%Bd|M|H*ITjz!^5s~b9ODOzuMlXy9Mk_SPmbWz zy;m4tAr$?^Pka_@iNwD^I56lVRzJRHPvR;1iR7NlTjL|E-}mctaS0RQXP74k`o}{4 z88m}>El&>h6Uetj=;9q3A0`q<4ve3eO(MT`LgEK{?C3Cm^31b5 zIT|nhEsa0zS3g1hF)^0%^j5+@iuuU+gq&Zy8W%R3{O@`HlE!~E`BfUr{nJ|}Hxmh$ zCx`Os^Fwb@ewMOHwuk4*4t)vAkUX!FYYYw_>=DwYk=%NFwF z@cSV@EME@3AAiI=lQ^FEzsc9cSgsM*vG5JvuZ*Q+{x8aps>YwLjsKYU>)})UpS|B0 z%k}B4W^Rx7TVpAw?HxRF})k2;e#5MFwE)F93 z=QYHaLO!B5a#xR;+(A73Ir<|mKI7S$+({@O82uB;kMaBb#Jr!H&F@6k#ft&`IS(z) zq478Pi|rxqD%&wRWbp?RI$nvQqhliB;v9ZJ{cAdZvX%Kbhtv7(uX&__(ZC3PN5>d!$Oq5gVu-cRkp`tf;EKkSnd=g{b9`Eg9{ zg(B}zpVc46>%6Wk%n#%@%|gFHnXE$(0sa^EH|Dznad!qcmnOtR@yke|7NyI-ofG7+q@zwAHq z)7I-7?)$X=xMqLv!oG|Cg#rJ6ZmU)kIv~z{xTE{~?e`_Vd6>M&YcI9SvLX3q@I5A9 zGWdS^zC^x>_cyov=$gLo_BY9#&}$R+N6<(1=S9&IW&gg+{XrZ=Sb_0A`XT%DEb8B8 zew*d@C4W&eILZAy=`8#A;4Hph@D*4eLEl7X72mtLm2BXDD5H3TLE_rgoD1qDaP7;v|mHtlG-f0q0= z8qYb3+&&a$C7xH1K3^Pqmo#2=9J~E%ypaE<+jaZWuIk;0U#jzeo$2{=X`eTj1kMHOH8F_uzWq-G;alyN~ z-}{c7&wxHJD}8LAI0SuVpUC|w;wMJ^dAhdSj*k0k-;Z^FWBVH0&v1Ri{v_6YO+2CR zRox$Wlz%oKb4qtiSLz$wKSKX+>v+Xo?9UL#{ek$2UAG^JyKetv?NB}-{$3or_N<+5 z4F1D-+5RU++*TRST@(31ywCoSei%R2Hxs|P4?DQfkMoQB zPp7v{=EuP4snhnIetJ>$o;aSiFY&3Z{hW-Gv>&^E*!V5B-<5oF96!u2te-LB3eJvS ztJTDOn&!W0$CzKJ7yC?>evIe12K_lciFl3IlflF{*J^?{7UwgE1&WF)Y=x;W@DLoIe`Ek^9 z=j~`7Q6~CBGrAUxj}8G|qQhEq6aX*tsm{yxu_0zvcYWo$uyy?m1|V9i*J^ zuJL@A`%A=^T+;Ko9`(ypTPrP$cS-qQo>!VY_w6-q@cwegoddrm`Fk-xHmc2poOk|I z^M)_w$?htzWq%0$baMV_=bl6N{ZHEY+(7B`u=7tl7q)XxI|rrw#ra%Q&p*3*K38hG z^G=-4QGQgL?mV;{dtASpO?M6o{qwe*&sFu@uBqquM+Psbd~oNV%l*fBT{-d!&+SLO z60g7HLBGfMm6u;xl5^40=#?vS{#o(9#qr_zBEE9j`{t_p9DhypQpr zeWU3;tM7w<@O;VpWnPcGUuQY+uCg@#9(vF7+V+n%tjNNz#@|)tPditxE1%vi!haF? ziB;O`Pd$0G?)b8Hbc5%lW`C$EeGNacqIBl@3g~a~uG^|VHLLUVU#>d(8-1(QdHQ$a zQ9|@J8;4%?jMD#UIe#v7I?wQW*n6DkpsnWm9Q~V}$9ZmAZ9NVF`+Hv>dgn+Fqkq#o z#p|l~DWy|{IsM)~^<&C+cm8bayV36nnV*Os+ISfKBtE{-ui@yX^m}qbKRusqP59I4 z$&2(ex*GjPtqJ{jzP!|O^y9pK(^yK;Sg%jaYT;lzn(y`TX^tsmzKfD0^NoUE? zv)6NU9DrU9-{AOn1iA|UE-C#6Nw7nOAAAmj_e;0GBl^_^#0S>=WVONu##Qhokav(H{}G9veCP*_>`AVN#c0Qs z7(F+Bm><}G8tx>IJQzHmQa*kI@%yEI2s&fs63Um5?}{=@Jm--G@L{!1TK ze$Pg3T$UA&Li+^T&&IEyKd)~^k&Tn%=L+_>OWn-$#MsyZ&jG;$DkKaZdXVWk8(M|OG z@@754I6<$d$$lO2m%0B3eTwe9^?0su1OG#O^oIO@Q}W-6{UBYV{bO@0SHyGZ8Tlff zL!V%N+&71%n(zbk4Xk6>Yh3|8;Vkr&EApK72jT(ZE1QAfAHY9heuJgFPy0mbB(Ggu zT<8hRZ+dRhzMTJx{xFTBjQ0~#Kh9Ut|77AM@DEou4HsZ>l=mfmGUkWHQ+D*9#!2FS*nP^_GUlVQxqj=C{^|B_C$N5Pb6l6amsl?2du4nV@na*;=}jL~K7oFU_z{TjuY55&5}}{d zb>uC>x4w9Qi1A0O*Sr2G&sYo=zEJ;rKT$Hh9`la#rgy~Q7|9=? zpXr^3V>EjGg{=RO|BG}RDxILe)4pF+`T;*jzBPLB`?0@_m2O9r4o0urO`r5JJY#y` z=+jpmBj`{5j(d*(2WiDI#_L>f$K0RE{sr?J`eIV|MYtedk@z#vue!R%^)fiRM!P|> zGfO}0kGk{pll=9RFO(0dQdjAQ{U7!AR%eI$x1-;Q3H_3)UmG1sKdjG2&rta;8#+F_ z3;ePt^a+*!LdR#J>MQ%QKZSg-eUss54F@Xt&elLseJ|dNw{>A*U|DFmZ{ba(o^>zrF8oyuQMH|mn4&* zH}QkS7pKE;jro}h5$?5{9$@+9)Fm2c*DOc{9!}w;{hbK z=taKMhbQ>M0QR|U*o_P~DE2$K#`w~H7d2L4pZn}5Z2pHT2RQzauPMDYIWRe&QT$<6 z`91dC{wNYV9nQxLe;A~%ao+jGT{jOK;3s!}CiVx(pZu>q*!$%@=|_~6^Oh<85a*t} z&bs@@fwz)t;s?ckVdp)DGpx<@U-lQ1^B>^+HhDgLmC{;%WQso=Zkd6rZBVGiFV z&kcW=lSMKj9E-{&$#~&sY-xmgoKUAEewVz74^NCQ;2{M^r4!@Yv z^PqCU+0`?8j$`jve4qcfJU+EN&ikI3;twl|KeY3|YR>hK`n9ZY3;4rEV;+ClAb*aI zODX3($Y;iLo(cZ2;qZs>FM@o15PuksMSh)ze?Y|NhBt)#BR;X9xWwcbe2f1Db$8xl z&n-W*AG-U<)4eX^%;%s#aIw5Ld?E0y_MG;CHSm{=5By?y|Ks2Txi_0~$jAExE|vA%G9b@;<-4){a#zfuJL5baI-549BIiScLu zi$#B~|2DrW#k+Iu(Ej+Etlu68E9dj_1OBiyPv24ry9Drs&CgoN)BMQjALwKH>9n4C z{)#5}OIJ^I0e@H(yiWyrGgZ$;2}OAZf2eumu|MEBpvk|t6n_}jf0p~=|B~VnL0_(KnX%jl{!ry6UD=dw0ULyC4K<(3+q3@`jR2)nK=g}e?fj+Q+bTBLnMdFcAx~%N z9|TF_%S`Ts$_Eqr2Y;L26NU%l4^yc};13~hrM*(g;SWs?8U8TP@d-*e_L6Z54#pp5 z%8$SwLcU8o#o|2va3Y^{+%rYUDYRqqC6k%QAC@15KUAC}^z_QsJd0RZdKiSbX9frt?KGlzcmkO608=4s7DyeLj*J9jU*6^ zV-i{fLMCLPrF+bTo`n_@Fvy!Qts&5}2xR1hNMsX1#h78u+u)4@s?FineVy~4|NPJI{8#yZ^!)M(ett9f4e^K2kB6Uc_(qd^-Wa~o zOHzJ*!#nzXj_)JB)@%&1t}oE{dvV=`ONJkW|I6_SFFeox=Ued45N8bZ`^$zu%z5(v zR097L__cB%$BaLOeu?wfIIrH#Auho9!<(AFLi`!=p|C6XBEun8%ZrR-yb-<2`pIyI zb^X5bufrcoJd^NE=s)t?P1 zGx=1+_&0z*3>9~nSG=Lg|44C(1;rnxWB5Y{ek8iZ^c=K#Z(a8f0^b1tKBDi_As$fi z&@cGqsTYBMf^Q)CYyN#$yn=CFyi`HHiCWGdVEF@P}b| z6n~h@Z63iNhIzb~Os#D4bBpm!mPdj=g#A)j-#UyxOguUd{2}tsgg-25f4Y#1jFTJrm%w*{e*e%F z|8$DtTDY(H1^B}x5kE8;#~-TS+wF?~+Wj^7!)|v+<;NQ156z!7JzAdF=J?((&HgW9 zJSRx*UUM7x&g(e-u&8t|76pG;EG~k+2lzueCcm^;RQzG_DE@Ft={2I0(qSBbSbTzh z!Olva5PjKxWBV2Lrq`IRO2?l1dn5dz;y2ssHxmAZ{6qFnuVQ|>Lf4|wwL5hbf0&fN z{wu%VoRr0{8R5(b=LUP8=?D2|>j}6rraP7^mh9X286N@r*XAqf!9S~f4*sww{z%e0 z`X07#fIm$1efFN|1a!iDng7PPL&*Q~#6$j0FXoTG*Sr_qCVWG|_|@qD&SCyU`~m%C zzoW17a;-1?y08AnRPt{u|9VfTJ&Zs6CfncT{(a2XvpBr%K-o8IY`{O~^a_v%_WrVttE~9Pa`GQcFPpzIjpvZxGhMd% zyUp|)pTEgTk!SnSvBwm@3zH!~ubZ6bR`&lN;146<3W@%8%9{co&_KI!7=Jh^@~~3I zb4Y*F`++l1d_kvLUPr!U>!PE&1kl4RdJgab4^=+2>3KLdT;w?ZuvTm1K9^OU;16?k zya#sLNy7Ji(sPZ59Gm@CY|F9X4|BO)?mzOvDX96--dUMJ_MzlZUM zkXtym_3>H!;Ud@dIlvz-a=!i52>J0Z_g!D}=F^%tx9=7IE&89L{}eB#yt#e1*z(=A z>fmGl&38ul#7uEteYY$8J>>jeVcNsG#QYe}(eR7=ZP=fLU(85vApRij2)}rb^JBjq zev#_a{I>YTf#Mfuz5u^Cs7qdWrX@JW&*B#&pYoe8w*|-eMfk-&>?6X}^&r2A9)@4c zls;Lud;MsB82e{C=`Gv5!~IyB#O@!#=ok2Ej+MD|~+!_gy$wLVR!o zIFkn9gFeYzTkW=aAOF{}4>vx~c*}dh@1KQ#u-F!RWWkrVFYW3-y0gF^=F{c^?FYF9 zT>KTrXC=Hpg!h+l|B&~l@t*D~T>>31;afOE;T>V<)yQT79SI znivP{;XTOTHT2J)K!0)Ee6F@d__;K(^VpvQ{Nij`&L^py50&1)-#uOf{R>L(3BNWV zTwZxr+E>|r?zw7=ydZFywhv%A-sbrQe?K7qG>Swo%*F7FfD5txFZV#dH!;3xs0F zWn6=tjI%H+$Bci%`e>!K0ogT`OxoN)#)tlYy7m_E_tB!~fZt9#!iRoQo^!+d6OLaO zl`gg~X`y_c`vd<6^NaljE8Dy-pV2s2zjTGq|CrCegMTHS*N6X2`m;GNn6te8Z2v7i~!VJX-fR(7&uE{hA;@6YH-=&;NI<-<6yE{&HoV-#c48&-a7> zIsP+|pudQlO%o0teyHJZen$Em{L`ZO#k>sqxDIjZBrnGl-(9-{|EVPX$tOa;xFzik z?C)B81pIq*=Us~LCBD$F209ZTS$=SK_6o}jo!`}GyF_zbR z;A8Ud81ISrOhM^v{P8i<7x=<>oyS`&f-cBUEVLuse;xk0?C)A^!|z30)z**U6YoVc zygu`~UW%xTN$*`~uH$~)Uk3hNoB*C2$4$4$e)5FQla0?$g&&K3_|e!`U^u8xq<fQhh^L47YiH!n)9XskO=Pqx}iAep|YJ%b+{awWE9r`uFKP;=L?CQ(yQ# z$t(Z+XYu_ute;TkYoP0{P}s)vFJgZl_#68dDp(J+KAcqo~Jldv~o@P$Fcu!@;y&^8mt?{8F(D5U$H;R>g91A z37lV6Jwp1K;>!u;b@2bRt|OCY`;dSBhnai!Sy{I}`F%f+mN385gZ#ubKIVNY*#Des zfgjMHPe{MveyVFp`=Tr9UFOeRbe{DD@fYrA{3p$4<=I{Cho9g)ub+2+>gfElxF2;h z16@DqlwuE(rx|YXEIPb~&ao>B*bqmCwsIOaGWWGe5K-R|` zpZmeTddK2Z{D0NG6+7TEjvr?n-04q6KacE_urOr1#O#kx2tR@N>T%WExlr_~Uw}S^ zerEdw{m3-;4-(Vr`n=Y*bIK92R?r1%{81Nx~S2)|EH zgKt0=i*tSY7q@x*+TT^PqpY9%(KjCxzQg)E-cQ#5-^Y3F?|45sKKsF$)cQN#Pu36M zZ|m=PKRG^*{EhY3_J7UC7zgw$=)X()$Mo%Us+UXiqDLRJu|B`&7vksS_&I4;kxyiK z0r>*^Aal>_{^R61`5$^*>^IPt_{Zdx$u*OsBwy*g$y3V@nVmrT+e7`%r`a5lk+!?5xP6l$U_Fr04xfrUP^i__MedS~R@+$Y5 zVqc(+&`phh&ai#dQG3GVXc_X7BRDOnLrycYk?}z?Uc{!o)jrD!^+ibt3d6kzfeSg-!$@-T08}FZ+6aHRFcX-{K z{4@QZ)I@H~&kO&T>$`lN)yG|2yvu$=Rp~P?{Db}91EzoV$+~|v__IG+SlIEdHnv_` zsp5SS|9(RHnfAqz+^f^RILJMlPrm=d=|QQv$@v zAD=&_=Q-YbfZy%)(4YF{bH}i6k>2CEMgJ<|w7fqBkcTA4=qxZP~e)b{jiy-&`x4!Ru^6wyjIDg~^9X<}+kl*Y3f2rpo zA7y`_4}a)YTh}6Q+>fxoQAulf!1yW_~;^ws~yCJ?@^M(@Y`+r9zw3n@=RaeUgJ_Xgyn`G+eW_^<{4Fn}Ej z{njM?b|-U7Dz9M|@wy6-Zz26a_UVi0U*6l~|4U1v2aZX*%K8EFiRx!jzMwDdY6AI8 z^&M_FvL9i8@%~8Kw=Qzi*XwYe?XwR2N4kF2ZL?g(K2`SPt^PoBR?esJ59m7BaW%5z zu)q2d?e8*ql_5_?>nN+p`r`dvsBbi^?(lwV!=ENj;t?Ewd>`^B-eG-W`?wa>-}+SS z*TS^!JL*l#{-Wx%>^G`2{V;zq)(`yK>1m1k+WsQ;KWP8DJ}=Y#T?-Y#dtrUH*6{u0 zz{&R*Dhnsre$C3aArCG?e$f2*1=ELv1V9!1U(QajeqnqB$ZY{i0yB zuS@8bbx$#!Wd9rO-#SX44)SLQ`?p|UE4?Ol{yLp)&I7T01%DXbte@tu|AP1v^2f#> zC>?D-7R682fIq7SLbp<&`?z$!8~G2ke=7)t?zRuzzE5HMx3Wp03-e zeFf$((5E>&WO^B$%wL6lgZLcugYt*b&FEzN^9RZwZKYFH>16yDYh4eclkwk4-k%Tt z((if16JQgHpR|Nd_I-#6)ZZt2rQh7+^Q}zk@hko&Y@#5?Vg3p#8fzXsMh@{ymVALJ79$o4%0qu=;<4NT7%{fwSQM_>7j?0d-9 z!iavp@L9tbJ|q8)=s)EPfB8yBtJ7ihn~LA(^$oL%=@Zv+3_2S9YQE4fOHc6s_B{io zUjZ9PkI=8W1UVnUKdEk!|BLx)v%HN>4^+jEVfpd>GcI0&Ka(T*KXjn=D_jrq1jTo- zzP?NK8N{FQy1Oi=~GwErhxrWT>qdJw7D+gEc@%U{|fI%|NC;h`V9G%&|d-7Yl0oe`tMDO&+f3l zVK~Q91<%2Gs*gbPzW{rQ_$s0Cp&x{wbYdT}f982NdE9Y9?AuZi&%^!uh=0Eb|I&{@ zPt;>LCUiNSM!3Il{UXP&z&{<;Plb5McJ-pt&*D2#)OJ)C&gjqW%%|tH~`}UnZYSeoqCmenOQ`wq96%WBwwQS6l}bdeZbA zuP@*uY`=Kcm37sHexo?fejE4==5^7i}*8tpJh_@VP{Ma{tWulWBWkj zQi0kBMlY^!@CoQs0bhXbsLOI>A2{-S+N(G{(t{j`dB~Ne@A@FKl}O(T5f2MWSnRME{-nto~E^PWn&qts5!HcYh?mAxAz(ew*B0(Dgf#-wV+Z{b&6& z`E7hA^7~Af4o+M@qW{!B;Q5FBjUVg3<>i~K|5l#a=GOSjAK48;H?tRv zzZTTqGP}X*EAjIo-)#LE-HcvlUzy!tbTfTN@r^ZFf68A*KciQT<#XnYzxu|1%4cR@ z`4d97tR-}tP<^NLOZ&}_SnrwLVDzm_$>kl6k1j8tV*KXHA^k`3-5}8Z`H0`v5x=E+l6c>PsPK3V z@il%Pjyo%mBY3aHNkN~FN&i{=)>l6;Tp9BFa31wBfDc`{!g|@_&`}BRORiu1E7H%P zpT(O)%A+6;Q7Y}QJ|g`_`ms=EP}Beg%~)jqh7`J?g0vDhOK>=3#xiO~<(A&me2KJZ^l@JH*oKN35nG%tF7 z|M|arQ~-TfNPkjX75ygrqowgwiT{4XwSCdMW`}D(S1tZ){T=V8#ec28E0{MY(B-cLNA;-}DG=s)<@{OFS8Gf$8FWAT?y`_isCmB*GS#%Tbw^5J0VkhgyVDHKClnW zE*PjiVDf3ujW}NQ@>Ik)L94U(peo;Q8`%Z1*ae11;Pv<3fq!TDH}juO|G7PwCy1EM zYF;{UFCtD-KWFs|r|>@=2k}Wy-dFuovc2L(aY#mReD%`n3B@V^5_TjfqcdKEX6mNE`ie5=w$U_ z2IYOG$6Q6|Rvw(>_<_mqg#`D5UX{6%YFJE0rsc) zgG)=$d$5~X9-`&_1&FU$-19u*qeMTab_CY{FNyzOZWvvCjDx;&x6OP$!k;RAv)LWC zBif3G0slQB{HghW2`%71Ks1@0^#$MFMPM*R16<-ftlR6jF_ zUy%P}>ffzPaSv=n{5Sf3ApXa3FMfgThPe#aAL76M;03lTvbh(I*b(R-`~P?-OoD^` zLuPM2(C;UjU1ff-@e_^T!}!fE7^lO*_`%K?|D5tieU!?ryD{ke0zhwL)zSH&R z*Jh54KduX3)%+KEoefgqH;&&SK3tDqN7F_VE)-(1;1!~aWx@7ll?Ez z|9%#+Kj(UFZp-B7@_Fbl=+~gX$p6QsZPvdD=^54kl>f|p=tI=Y>5*JTTQ1@Gz{&9X z#ruN*=aF}}ylocm@blPzeGBoKw2 z!heOke4XVJlz#_73;*N17fN2i_fK$~fb9dKZwsmS6YzUAuVDFvACrGU=g0C2sPDu1 zggvc4X#8z?1?;cke8NY+qxl1xUHY+o3;k4A`33ym%ar#wd;TQV7smQPT|&+)^c2^Z z*F3^L;zK_CSI@h~xH{DT=hpYmbAEv03uM26ev%jW+Wf!gNj`z+7y2O#*7?3b^Av_N zyQlV%k9;^7Rydl>C9%b%8%*96;s`87IT>N8tDR6d!{9Q|b*_ZdPgi zQ2v1MZ^(a?1-EqClXf+M9wj@J?N`iyL5^38?k>yy9P%v`uL1pl{~-GX>zU#=$s)^t z>_g;Z#9uyLLw&YFLHYgGK2)=Z%%3f- zpKN((rXTuebqftgY5OT{eOX?;*IVcDTfUp|StK8_EBt(`V`p*Z2aCwBBRzm0dgu)}mH6z_7Y_>)n6x9RFb!H+cu@_h1dAb*?9ukmrW zig|~8ML|72M*n2|8b5ahp-)(s7XB~i$15GF4;X_^WFPv8v{avQ1M`yyJ?Q^tcJn&n z(9BNe^?~)5%eA?`$glA@OTc?>V4s%dzf0Yl=nwMBiqAov9=`q_;qSgXUC70|d>rN% zx%H}e9^a4t2uiIFDnk-$FRI$`3wK)i{vlISAK>3CHj*89H{&? zIg2_2=-tLE;17xqKtGGzb-M0M{=$AfB7f)oQ;d(E^G+e+MD>V)FN6H`RK5j>uh2Ri zD1Ms1PyOa=&Y#14$bM>WAoADr^OPK~M!LVR>ZCrygDT_`KN4o!zf7&4X*Vf&n);l%-Ym&bA z0sb78t{utanty8f$xgV<^5699PsqN2d_AV}p`>}CV(Duvr{P~Qe-uiR7mD;d;U&m7 z@(*61^#S}}Kij&Sb&^Tp0EAr$)oASHwxc;QX$BKQC zvzg{2?<4=aLGn|dTgXIi#qK@LzsmY7x^MGw!M(+;`D5e$t9^$Yot+r@uXuiE^xg4I zrXPrX@O;F-OD^T($PM)QcmnU=S;PA1r{;ecf28?{>1_JX=xut_#xowDw7Rifr4#25 z?%@47p?5>;q8{jHydU{ttA}cNxl#R7)bFJF;+-7aS>-Ok*!%!r!TnDAqxs?Yo4;swirFU?e=uBLsBxOfP~tBZf9T+Qe6${#@!!DFyLG9H z@A1*`SFt`Q{!oqI;Q#yTU$)}wd`$6&@PjZx_^VX9#>WZeRiP)!NS|~41K2mk0P_R77Dn{lWxO}fFVpuL)6?@d Rx#jNw{s#2@=-2q~{|7t8uA=|| literal 0 HcmV?d00001 diff --git a/src/nvidia-modeset/src/shaders/g_shader_names.h b/src/nvidia-modeset/src/shaders/g_shader_names.h new file mode 100644 index 0000000..fd5c97a --- /dev/null +++ b/src/nvidia-modeset/src/shaders/g_shader_names.h @@ -0,0 +1,46 @@ +// WARNING: This file is auto-generated! Do not hand-edit! +// Instead, edit the GLSL shaders and run 'unix-build nvmake @generate'. + +#ifndef __G_SHADER_NAMES_H__ +#define __G_SHADER_NAMES_H__ + +typedef enum { + PROGRAM_NVIDIA_HEADSURFACE_VERTEX, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_CUSTOMSAMPLING, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_OVERLAY, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_OVERLAY_CUSTOMSAMPLING, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_OFFSET, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_OFFSET_CUSTOMSAMPLING, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_OFFSET_SWAPPED, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_OFFSET_SWAPPED_CUSTOMSAMPLING, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_OFFSET_OVERLAY, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_OFFSET_OVERLAY_CUSTOMSAMPLING, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_OFFSET_OVERLAY_SWAPPED, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_OFFSET_OVERLAY_SWAPPED_CUSTOMSAMPLING, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_BLEND, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_BLEND_CUSTOMSAMPLING, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_BLEND_SWAPPED, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_BLEND_SWAPPED_CUSTOMSAMPLING, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_BLEND_OVERLAY, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_BLEND_OVERLAY_CUSTOMSAMPLING, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_BLEND_OVERLAY_SWAPPED, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_BLEND_OVERLAY_SWAPPED_CUSTOMSAMPLING, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_BLEND_OFFSET, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_BLEND_OFFSET_CUSTOMSAMPLING, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_BLEND_OFFSET_SWAPPED, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_BLEND_OFFSET_SWAPPED_CUSTOMSAMPLING, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_BLEND_OFFSET_OVERLAY, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_BLEND_OFFSET_OVERLAY_CUSTOMSAMPLING, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_BLEND_OFFSET_OVERLAY_SWAPPED, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_BLEND_OFFSET_OVERLAY_SWAPPED_CUSTOMSAMPLING, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_YUV420, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_YUV420_OVERLAY, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_PIXELSHIFT, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_OVERLAY_PIXELSHIFT, + PROGRAM_NVIDIA_HEADSURFACE_FRAGMENT_REVERSEPRIME, +} ProgramName; + +#define NUM_PROGRAMS 34 + +#endif // __G_SHADER_NAMES_H__ diff --git a/src/nvidia-modeset/src/shaders/g_turing_shader_info.h b/src/nvidia-modeset/src/shaders/g_turing_shader_info.h new file mode 100644 index 0000000..661b21e --- /dev/null +++ b/src/nvidia-modeset/src/shaders/g_turing_shader_info.h @@ -0,0 +1,328 @@ +// Generated using 'Offline GLSL Shader Compiler Version 13.0.0.0.560.00.dev/gpu_drv/bugfix_main-17009' +// WARNING: This file is auto-generated! Do not hand-edit! +// Instead, edit the GLSL shaders and run 'unix-build nvmake @generate'. + +#include "nvidia-3d-shaders.h" +#include "g_shader_names.h" + +ct_assert(NUM_PROGRAMS == 34); +static const Nv3dProgramInfo TuringProgramInfo[NUM_PROGRAMS] = { + // nvidia_headsurface_vertex + { .offset = 0x00000000, + .registerCount = 15, + .type = NV3D_SHADER_TYPE_VERTEX, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_VERTEX_B, + .bindGroup = NV3D_HW_BIND_GROUP_VERTEX, + }, + + // nvidia_headsurface_fragment + { .offset = 0x00000280, + .registerCount = 13, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_customSampling + { .offset = 0x00000480, + .registerCount = 41, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_overlay + { .offset = 0x00004780, + .registerCount = 35, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_overlay_customSampling + { .offset = 0x00005280, + .registerCount = 35, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset + { .offset = 0x00007f00, + .registerCount = 15, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_customSampling + { .offset = 0x00008180, + .registerCount = 41, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_swapped + { .offset = 0x0000c500, + .registerCount = 18, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_swapped_customSampling + { .offset = 0x0000c800, + .registerCount = 41, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay + { .offset = 0x00010b80, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay_customSampling + { .offset = 0x00011700, + .registerCount = 38, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay_swapped + { .offset = 0x00014400, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay_swapped_customSampling + { .offset = 0x00014f80, + .registerCount = 38, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend + { .offset = 0x00017c80, + .registerCount = 15, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_customSampling + { .offset = 0x00017f00, + .registerCount = 41, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_swapped + { .offset = 0x0001c200, + .registerCount = 20, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_swapped_customSampling + { .offset = 0x0001c500, + .registerCount = 41, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay + { .offset = 0x00020880, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay_customSampling + { .offset = 0x00021400, + .registerCount = 38, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay_swapped + { .offset = 0x00024100, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay_swapped_customSampling + { .offset = 0x00024c00, + .registerCount = 38, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset + { .offset = 0x00027900, + .registerCount = 20, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_customSampling + { .offset = 0x00027c00, + .registerCount = 41, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_swapped + { .offset = 0x0002bf80, + .registerCount = 20, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_swapped_customSampling + { .offset = 0x0002c280, + .registerCount = 41, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay + { .offset = 0x00030680, + .registerCount = 44, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay_customSampling + { .offset = 0x00031200, + .registerCount = 43, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay_swapped + { .offset = 0x00034000, + .registerCount = 45, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay_swapped_customSampling + { .offset = 0x00034b80, + .registerCount = 45, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_yuv420 + { .offset = 0x00037980, + .registerCount = 59, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_yuv420_overlay + { .offset = 0x00038f00, + .registerCount = 56, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_pixelShift + { .offset = 0x0003b080, + .registerCount = 45, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_overlay_pixelShift + { .offset = 0x0003b800, + .registerCount = 35, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_reversePrime + { .offset = 0x0003c400, + .registerCount = 13, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + +}; + + +static const Nv3dShaderConstBufInfo TuringConstBufInfo[] = { +}; + +static const size_t TuringConstBufSize = 0; +static const NvU32 TuringConstBufSizeAlign = 256; + +// Total shader code size: 241.5 KB +static const size_t TuringProgramHeapSize = 247296; +static const size_t TuringShaderMaxLocalBytes = 0; +static const size_t TuringShaderMaxStackBytes = 0; diff --git a/src/nvidia-modeset/src/shaders/g_turing_shaders b/src/nvidia-modeset/src/shaders/g_turing_shaders new file mode 100644 index 0000000000000000000000000000000000000000..8c7041c8da3d17c2e35670c6aec0e0abd9a7311e GIT binary patch literal 247296 zcmeFaO^jUGu_ku&=6_aJ{#901X4c=$N{V26G5k?2*m{O4i=fcb!l2Ngf!D$;3IriY zvoR;hMRrpUEOWf+6^Y)xU87fi<4uFRx9V~aEwmegAUBMGml}c)9t7c`J!8)s!!YRY zi#UJxW|GBXORq<7G6hf_73ZE4Cr(71zc>;9eW4ad3x0Y0ZDqmVocTAp zTY2#!%0*EDpYAoH(tZ4Y@g#~`NfiCA8IA6)ym;|H;BWl>9pT!G7smoc(X%GXejnv4K*jPs>n{T`c+@|d=5wg=Sn7Xv zQR<6YhMqsLKgB32p}c7?nU+dA4u?_n{6>^a%jH_cAK+i@%J-#vh92~9G>s$Fa4hxz zQtL0@G5FtW|Kr58hw}f;`%)SHInGDGzXSN|7ySLJe00)``VNmGe**YnS9@crF#ms? zffs`{|1G)vCcpD{YciI*Q3He4D4f4r6!&=zgUL z3A?-ZghnuVkLQNECiF2_y4*PE^Jv@G&g$)VQg&C+;U$y0u-3Kz!HY98N5C50g$ z5NEbe`Fu`!$>k`o9*$!t|5^F?`#-*XeB0w6Pjx(_CRp&RX5qgBf5DpE3l7S)X6yXj z6&YWV59sgtyRjdCk&liaf5^w*en5MG|BAvf4to)Y#vV2?@e%t=P17W?BfJ9@zM)%lW z^qJ5L{=^?_N703o$d(`7%YnWCKV47NSDX!j#?SD(K8v>5-#dH%p8YM9Ex&#|lKCp0 z4l>^-{5thL`FZF3*TN6h$Nt|re@Di*Z28UlAWx%v6vXI2=>KAu_Pm3?91Bc+z1L!O z27eLw#wvzd@#yb6A4buS*HFUngY8NCdtaOJq97hy{uGL{9t%5q!5`wae)4bHPxWtf zuTFCG`{&KV=-w*fpnNk^ehY@t;GdHIuENGXjqa@hjwX?{Pxm@tw<$06w-t8fTO1B8 zZ~7~LG5t+u%`Mr(v`#`lmzw;mR5&3t!sq|9qlz!C5 z?~Oj*>?ys(Q(mrbD1D}UjBj3`0jCf9eWlOWr{iz)d^x5yf>ZQN$4wOEN|9r#a zNpE4O^jZ9qK%PKf{7ImHgg;8_)9Ci5(qq4WIP~?yzQoTqJxqCkJ)Ka)jXb>b*HQFX zCSYCPB(p8Z$K)W2As;9Y`wT|$hh_l(;Qd_mvA}ANRenr)&zB>w z5cKI!`-A#azFVREzPx|QjPmR8?5`Sp+h5AB$D@AL7uR3PZwjya(t>=VzsZd9o5HKU zv>@M*2h@l9ql2iW`Y^std7T_I48MR!{lr6`l;5b2uYhx#4Tvndmnrzfz-3+SPJ{#Z63YQNli zi0c#RFV`=}0Ur7d`*`f;&qezAb)cVW4_F@dM(qdmk1*_$!jON$&`;Rc99 zd=YQfH}TNe1Hl*YJ;s}OGr#Qh8td%tOXkm{)9-Esc+M}6(RI|J^FusXlNC0<$9yop z&=dZylfM|BalhY3;e!nWCs(XJKzZPkYa2!%4gBto(FX(H+BWhCnDti%1J*Zm^$k0Y zeyMLOcJvZ{ceA}ia!DWITS*H$2nUa^4oUuj(98ONDC6ts9slL#ChaIDSE4O z^g{nhubrPo+&#T#y*4El$%r$k2QC z`8GxG&EaN>-uLF{h5ZZZ6>}2M8+-dA^v2wf;%E3>8OCY;o_#(}(R+I{NzwcBIeJ@Z zdd1uY^tQeI6MEY%cF@zi*-q2@>iK#!9d7E0NKWRHzx@E`QNj5Z&L?`hUs3#D&x&%s zRWRpUqv>{E>?=N^JsnMV2de*CzH+w_N&W2Ar3TJVG3Sw7!gmO_BC)@OalSU5ZZ>FO zS6Sb$cY$w5yMU$q9`Rc;Vaxpd{376;7AF)r)p~vd@TkoRp)U07Mn@0&TbqUGQm?sF z$9z#ZdeGe=+#-Ct34DEL1UMoLdwBGqvqiW;80UkdX%Y*4u77(&p@%T|U%9&-C1_9R zlm6{RdltrdPcj{v?+?MBUBJ7$f~R~vy~y()^6Oap(~L*cK@Y63Fz~$%PT&TQ{%yqM z=ep8U!S}18UtHgT|KMMb_%`vFzedwF*+9DU7mODdOt!w!v|XnG^7uwh#@p4`-DZ7V z)82U68W3)o`p46Do3Qe`Ss71bp-;x|IpH;354i6e4VKsOB7N<4nx2^R^|8>W^slA) z(V%=NUi!P<=6HDe1~K6c*WPfAu+S^?Y&HmQ63_MB`%&~8IZ##m#`P+X6XZM)<4d?e zI5^+qiL9LOnf2`B;QJ3Bv^!D$pVuR~FK8@0fA}$)Mi*qg`r!kJx-I{+M)cu>9KQdr z_51&ka2I$3|6L>cH}8`g{5WWPxbKH)w9u>y=AM*i!lZfZV7!SapoiNrn zIPZ-s)Ml(NDOAA67q}WXF!_)9`x5>l{LuA=;#nWp8wL6PGoga(<%h4fXN&d^KeqL= zy|?Dt1HGWfwYNn$Xm87(KP}kr;5?e*cp&|dhx4(Gumt7hG8Q9 z@LR@b58X-Cr~KjgYJKPs>%;i}2K>Gq@Q?hsnE*dI*faVYKMVNB{=Ww-DzN-lde4-4$eB7QxV?=gR%eaPoX>a+BH9>@>ylCMqWjpz>sKU`Fv-uYXc?+5sK`8t29e7X7dchcYUXiN1?`1c(zKmYb3&L76-Q2@U~ zd%p4=Ef4;nzsa?8=g~hsFQ9z<@ErIR9gOY%|N2<#r~a}0mE=3906)~n`>y>xt&e!X z$(7M}5~5?^Q9cg!mE(7n@)#W?tRL--_D7@C{sH58#P1FNFrI7<`$;#R3bQ?YkM@#; z#YvBWMJEZ)? zt=YER&&qr$_}qx)?fe6IhocA9leZx6f*+5#fU@$EOjk#;R^xa*B|Ym2v1a`b|BtnV z^{qwt7xFZocBp??9$L7a@O#-02)>()0Gsu4eLU?6++%sEe?wrieg?iTaG!Y4M}7S9 ze=`0?)D?@?*uRVsX=dzu;|kb?yojMH z{i%cNT#xB`N93IVe~@SN_xxS;2l4(x4Vh1^|4{q^sI~JWr=?z=w5@vt1#LtV}C9Dp}zt6UB>=fz9;;N z_#d#RSg(MJFD_#KXlMNoSlr;z-gITo|KMX`xblPjLrv(P^*<1Q0ZSkJ2O1dJ;{Z1I z+bW-+pT`-n#}4jsyQ?tdadc0%zq-Dr{k*r%;hk9hguelHU+#P0*A&iyDkL>t|)s7vkYw#_x6gn9NEX&JYg|8sf3PqP>{qIT|4zw=2Y3e}bygOM9vG zmlTiv72wg$g3_z=5!R1IrO%X)pH=;Ok>&U4l!y9#eGp%^XV%N-DoUUI{;NXz`)*n3 zv-r=|A5Zot9PdI&>9hDJfj<=FJ^rNP>m&S;`sca*&F#F>W50h`^5vl)@w31m4E^E$ zza<7%x_$%xB)E@e`C$DPS1ADaT{iv(!FVp1 z&q0sMzwM)?ogk4rST z_?htu?d5{@$2U3O1pXiIp|O4a!4*S>E{%Kz|53{{hG^Vesoq*y}5Kxxkk70QVDD3O%7e#`*y8_^PP? zLEi1ps)f{k2J(3%{nhzG>4i$W{h3tn?|0_f%WHdFkFfo#)qHwCbE-YZc(XtD{=Y(l zjUVcRJp?__58};wqf?^65Al#!;>~&`F0YIJu>bl!#+P`V|C7CfzfS``J03}=xFPap zz88PI9NSa*9alsh4l3j))&t4A6@Oog`7+td>-jXl2R!bJxjQJC{daO@;P0bZ{#uC> z41QWZxwY%>Q*mCNyt}>T`osDzhKVaz{|7@4f2@ZA*esr_6?c@jbkIof1-dNv(AK)j?uaEG7@Jr_d z$TQ{1`V(aCH^cFu`~cSZ3j82^p+I@{`{Pf}K7T#7^nkumw^&Tk3+5eJyqmvBubsa{ z+`RmEDqj9bA7HiDhTeX$lA`ww=C>5Rc#e^x7xptlFVzL~TKg&L#xw;U10~EJ+73yDS3E!_WAQz>N}S8A7V1SsUx_i42NmBBA3@VtLON8d*gPkT14lvO{pJn#3# zKDSVXoDY?_!PfI0IUg#@0^Obu;r_Wzd6oNIIUiaRQtbKAc)D7mebf8r8#pg(7HMFc zZ2yZ#xIgNyws60nFI#_rLX&Wf@GaQ?X=9Dciw4X8;UdcSRvW;#iAR5r9+aAd`-H_m zU~R?qNBY;Pa)5*%;D6=rnx4nF{&jqRa2}IP2NepK&QBKsSIzf_;OC8K_&x^vH&Q_C zHTbvA4Z76N`=Dt)&kebS$J3Px`Bzkayr{ zEn*6Ze0zlo;T2bZQT#ome;6O`=RXyJQu*cjk^7^eLmiw&WaUxe-HnV zy;&vi?)RVHz`0aezy14TzgcHpgo$Ou)Xa3n{E&9-OxdNZ}xNXG?@R;-sm39 zNr>0;U*QMv<7r7M;3GrQ7fZM-^=}faGr|( z;k=mJrLaD;pVa--=r;FvsNbGPeiiK3@ICYm>}LDeUjdH8_bbp3_H`e^`TKvB2Y3I!p?HoL`EirWo5_K^wQ2qqxE%yGO-v{R( z?`F;m!}IY4>#s%pdTiSxJ)Z~u-+)KcU_2=w`&IC3a?m&SY{C6M@TAw?7qfgo@0HB? z@smJ)7tjlR;{o)smDiL!8F?kc<-U>p<9r>nL&`tc3ppRS^RoN@Ir-W-RleN({5z2! zxc_tf^*m!V-SYY^_x~F^D1hIg*eA%F`b+VA1pQ5}^>?5FdLBUe_~Ah0QQk8^pOPz8 z|K5c6Px}S^p7I2k?@!qOC`SFLFMbl}Blt1Cs`pVb8@9`?gP+1=m)(VaQqB>RqFTW0jG@jBKiDO>ce=7JZ?|{k3}8; z&&licYI;BWqq+WYIKuuY{3q&9Mt;Q}a=gv@Q|zsiU&wQ7wz-Lsb@$(h|4`rx$;eOX z@vK-R{@}MSn&b!KdlcmOQst`vd51eP)}QYFf4$(&=R{vBRn}LbeSZr1m^P|ze*$^J z`o7BYW_{25fASmaL6m>q#QpuMz{VdJa9&`$KH+`-ad4micve982~RKY^M7ma^S|bP zrt0@EvR_-(wJ4UudY_++h9gr6L5iGR|I!C6ch z7(ek>Fz-DCX8e;}EXxPNPanY_VM8uFqt-~`D7I`85*UelR{vNU)CZ6lJdz)0h z&vblualhXtjP+KI@<J{~P!s@Ew{nEf4#4bbqS_1-smB!fae;e3g2KF#P*bKk>B= zCV#Ukm-1pHRM>2tDd=Fq)Qo7=UBjKj5q29O%{i6GoTegyZ&yaZmku z&3a$fA7~1n$a?AMe!l~Mo6F5IYPp8}!Uqt?!bx7ynNDlMAiyfjz|##`$f@!BOeEtKzU8wGwP35l`y7#$Sdo>|5!( zL-ijb4E;#1Z1k1?hmhypIJvSl@b(P$?aGk+#SeJcx1+zPZ|oGK%P(?_=aVZO{l)6m zF5wvP?HurpAs!rDeo+8?E!T>l)Yad~?ysM$ru~(kG^FeD560&Z@})oWb9dqT`i|u{ z#-IChw;uXE<$c&%%zyRgj;D#|H}oy#zb$Yu9=`$}r$5ph>F0RVy4BQp)CSemc+{{Q zDh1<_UpM|@iH=9Ur~S9%k?*T~+3|?isW14M@rXBi;=d*Q$9Tm3K1p%ou~J=6!tq$? z>G;_3Sn2jtG2p=(&MpNiAv#kG)=s##Ei$#yypT;{m5@9KiqgUn{GWx{fO6x=pug92ad-^;I9Y$ zSgAGB9j~kCp zkyEwTkM$Dzqp*%gMn4LjK}tW0>%xCG9;J<8YCHB)RdSmoK!l_@?XSZ+rb4PXqrajsM1@*C76z?!SHfw_v@C z_2+r)cevh=^)TlD1@Yg&Nd7Pim|s)ptAOd>@Y{rV%+G);cg^`E2Tj(``GQ;j%KF*d zr=1*P{(PhOZFyOan)M^%yMeyRM8;R+x1qVoiNtRMJ;fPm!;kALuJ^>h)!;FIbA2cE zxp;0tyf&=AxSsf07Le#4))S+9C2rq7llD-5`~h~S!Fl26o`gem_~J}w3uzzni2d+7 z+w<|*jD4g3^7)3~L1}n@vrhiW{SDeH->v9;#qzR$sM927eLy zAn_KDE&cQtz9b5&{Ud$SzVUZnv>*7bEI4$$ai2hQ!MzgIl;9rl#hcM8KEz&~1H)qlWE9v@7+|8xWV3Cpi4?8=L~iN`U}nI51zs+z1CmJ#=m0y`aPyEesBDd zW;rvy4v+eX*Zoa0i@976+XqOz?q}fNO>-RLbBf1)hWxx8ocC+{5Kn%u`?Jx_meOm_ z=dRQL26)0KKkq*+?)DB3e!v{FKI=aXYQpcoYWejy>KjR5s-XVQgZHAOx4`fV0scvF z|MVR6d;(Qx6XZJKkMul1VAg*-+RGojA4eYxtoowzW6Fa*@JHp#`5UidJPls`qosZq9}CuJV(WuF((g|K|3^;m zC!dKvp+Cu~{?Ue9f4)ln!Tp2rcawO7h92+(?Ww#udMV!?pY4At{v5t1Kfbo~1BU-U zORsC`P(bSj8!K%lre!Sd28uguk zzu<@XM^pa;{@!YdL2&-1&jNXt^?91VS^kh8cLIKlyR;{P|1UNd6N}gJ2A=d-|1y>j z=;^eLf8u51A06n=o6vKP`aC&k8h-IUIztceAw86bke-ehK*yht0)2fGdiMF;$H&`S zaNh%bNDs@0^awNYkL6EbFW!Wn>$K041Bp)zd3)zabNm55q=)50dUE7!Ku=(=UXz|9 z`CjeIg7*`UceZc+`zZfG`!Rm$F9oo9Ynm(0v`8Qxl``1_B7xY)ISwHl@T?dI;UbTrWi zdtZz3W`C@Ibj#@DA?+jYpCOOLoApMg==~?venNkUH|veK-NekM{Z~B3mv}Q@>_x^N zO8s;5;kd%%pXi`Pe&KtWAMu~4L-r3zC+^DrEynjx!S8oloDlH?jPXjYts8%S1MlTT z`XZJm{@pdc=TrI!UupYza)fX7yuamB>@WLreR5DV_mRo9xFq*eroQooV0`iY_%B-= zKc!#lj~eWdYyZ}U-q#xX$3K+izQ$Wa`O(d|<>Z6)T@`UUXq)!OKNI)hgPws$=UQob zg8e;WdwzV`f5;Q$ne)>*VZWEBa6Bl#faP=YlkkOBTAn^R`~3A-D%9~Cb=#-;M?>F8 zubsa#{=1!=mp{@6I4AWRdi!niG8lC(;X8KDYd>bkh8Nc=q`(MelyZ_+nVkgDa%>?Kyg(uOYpe_x0mn zYoz##gPs(BAD(@FOwoHi$M|@jUd)e2!XU_t@)!0eq*v5C;BU_ByV6@q)0@lCd-nPB z*wRCOVm>>vu+m5K6O6x@qkz8H+b5wf*7)*n{Nqe~wP3%oV1C8^WFy6|pU=?)dmGY| z^(PPBg9tq(eb3?ODQ5h&gZWqJkv zX84T!^1f9JQVT_J-dnl5$_1NjpVVJ^#rsR+d}=(c%YCbyk4pSB`sSMQd&IA<@_by+ zD=z|GSz*=U5AX%f;~%tEap8Wc!}vN+C0^PJVTqr13-%cK2`FFk?;jYCCtj@q@8hTS zR|pSX{IqgO4XW*5qbl^X7Vy7<{2J;0+50y%g7ceX+BDxEfYrHVF=N*VwCjJyY9zs$46HNRB@W0!l0k-8a9(imr6c+#JUWfj69^a4U z1k1H&?nfM4$;o|`f#Dx2?F+xSRT(1QF*XulQ4&ooIlfKaZ`s3jH4RK( zOh{anOZbcBzt#u5^S_qw_59;YeE(4WKcv0|_H7}*ZT)QTt-1CKb(VRPO&?U$YX{Pjhw{~6C2 zcypeb%(jUR>jTX1jsMDR#%ng^Io@BT?_<$E+-+DM`#0*#v*11!@;{nxlHg;J7m2UN z_|&F8lpjwAUG{H4{sA6c=ljl}y)fSe`X7HD_(MqjLv}#x2OjZsBp#-Vrvv+yj<1I9 zTsYD5S@dVlzj}`Uo1XtDzer!x{?%LP&cvR_a(nd@#ORA3DVPu)mi0Y5{-AkDCl9Iyo2`eUG07@s`>D_kcwOpwX^}QT_zyHP*kH z-v{R>?+z$H_+8fXy-*$&*qcTCdMw|gJ<8MP+4l+qI{Y3Rd5OmRL+~re2lDnDy}*-R z$Se1sQa+&f%1{E1J9?i4^0|Os#Oob$z^uGdp4eZJCnK+9co^R_<6k|L`SxZ0)pPQ- zbERS}Ueo#J4{Pj)l_n}Yl zUzP{f=DY*`)kCk3SRdj2w-^lh9`(f`KS9sjS0N-rwwRrsJVwJda3^ zmp`-zk#zpmeTDfw=uNI{N`QNz&%n|jyT7=)$@j2+fBqzpuLa}1NIpNc^oV>0_D=Y{ z$#~AkRz649`=`b8M*ma&QP}loJJX-OblCOpYM=ZH`V;8yg8snYo&#X%Lw>vd8u>Fc z_LcUQ2FTjm7UJuGvw?qgEb+9={8DPo*5y9d$5$Ilya6I=))qe+~a1#@m(^43@|0k8KWI2tVU~Dq3~^)|hV@ zf14=zNA_E}hAh7+ug`CgA6S6%NB9w7!{4Ws?jQrR2w}!`Z|>1NTKo;*C4jKv zp^r!RTN`+grvAUz=nve@6UKhm`U7v^1=VH5mzDM0(fv}Mu=oeRhX3#UestT!X9s;; zKTLN`egq3|HE2MbzD{>Eo&@^?{ek}l6G$Dq{lGL@17(15J}COW7VfX{ez)oUeWkq} z^S;=%SLagEx3|`F`iT0xKd)I|O`Cl%^jOL(epTtgegyhL=@9v`>reRm%KFf(hq)hd z^TY2A|1sIEzOjE}u)Y$1I+^(be>!ZJ3n$+Hx7O%K#Sn%$D>)ecb5(PGlF0c=3Eonw zKY$nd(+$jd%^~EC{*9Y$-7x?bf4Z$ztPk)bO#jB}AW8Yt^)}P~jg76WziEn}@Hch* z#CXOJ8~WaY{GGF}ui^iT!Oo0F9`nWJ;cJgaz<<&c#&eeO5X%>gM`txX9-FJ_@z`38 zO5u2{oA)J&8IQjAFO~5aWc+_u*M(o?w~R;M$A6UZ7>ND$1mi^dQ27iXMJ@pJz3!Fof^6Xc{Y7_W9cJzj%)dc219s1%M@TlvvS%y_lbAD6I< zSKu%F3Fe=;&iDQJ+3{L!`T6D|@U5ZlSAiG(Y1c(RtpEHLroI%sXC{82OfME(}3 z-}3X}K;w-Pe$Dav)bbnrO!=#F{H9-6Dm-Y8Xc#4!gT+R4A{=_yv2l6|M|FGG=i9a#r#jbzhug9M0`UUNwKN_#c zo!^#w>GR&epBVImo+kNY&U5KcEDnHyKXEglpY-v5`PiTT7Wkg%*vH=}HVE7G>?_~j z3B7;5@9%8nLa5%|FeNBXiwbnIBAK0vA&n|@AFD^F=r>Vzg*v!>momdpFhHTy^89qz|A7w@5y^i z{hsT$g3nj}IriJrxGwfp%L9-2K6o!zWB&4=v3!(+L;2;H`OUF@&aRIkZv}iG$X7x9 z_04_?`m<6Z-o%%o{8!YTpcf)v1$ocv*Rv~r{VVb>@!T-kO8FS_&-bCWJ;-~J`LmDB z{toqCfjvLXKmMEezyC4hFO`4$Tgrd_>4^L?>z%p$<}y3F^+|Ci zvuOZhcI3~jU*_|lxBuAr&o%#-^PlHI$Ydhpt^V_v9wzEPkNqUhOL3hVz^CLtkK-Ue z8|0^05p8w-i{l_4B-aa=td0^g0Q|#x5BDRyzSaFN@C)*vgT4U&=b!()4O4b(^^N}L z%lzkD4;+g9TjoDs=09KNKZnKRE*SB6{_|J! zXPN)})%;oJKVRlQ7khJP?n9UP&%qAo-@eR$&Vz~w_vh9hKjVMB%zr*-Umx4~8|$0V zGXME9|9L16%lzld{O6Dd_5Z$s^UP)bbNav8`;}$>^MCC8=XhRX{qOf)EB|@hl^1{g zx%}tAzcT;%>Q6Ac`v6SHfxmv8 z@%LWkug?SBiI0~~cO>{j+WX37{&S9uJMUiRKVRlQ7yWSl`D5?z@FnlN$K%AF|1a~O z^Su0QeKk^3XJ9-TJTzjmSj9P^!HsfOa--$>u*i@y>5 z|EB6*DgHrEF9Obae~e$j|M@;d$@~AHK7CK={hzY=)3IK@aN_(?@qS}7M*G40VTrdN zydU1AKah?8mrOV0J-hdJTFc~5@6f$e{U_)@bWg%>Dh&NEgFe|yFn%EX9q03>Ghey! z$J6}j8~DR9ci`fWbdTXkE&c}V>&hA*gbE(^mi`Nmr2)N3a`Uihc+T(P-bif^c z9?+Z3mu}vhy7q_I+F-vy82(SAdjo~lUl8vh#65cA-ml+dI8sv{?_Kb|LSa{)`P8wW zG4tzu{J$E-3GE#ke}pQxRtls2WG3On6-NKCeqnf4_x=;{-smnL;_03Vyotv%EAhE~ zh-W-S_r7dF{64s|5wGtTGWmJfeo65-e+E6spHBD3uztEX;(NTWApdU9d!=7U}508((%EXIAe$5pA^B_MJ>6P>{ z!S_#se2Abg{v^mhNI1-&N%@=0pC05Vfqwk)^QZIqgY~=0Pcq~4I*-rxKbv2k_w`>}`h_2J@&C{s>(}=|$t)dz z5$&tK`S^gUUoL+-_4ifz)0r;H=O;&fsy{D}4~Y6sz+cF>#0O;k0e^30@(Vo+0$Yho^PHX{EqEE zKJKMMMttkHiKjKV2uZMxXT)+?HPlvut4qUv__fVTSVC#B7^(kzR<-_*QoIhRH zM=y^L2zz(z=F6=8$=Vyx2mNJv%%5t1p+AJ7PYOeR34>oazd`uo`m^ojwLQ1LtEKbbJ_+XM1>=SOV!YWOvmS}d{yjAG z33%uS@n*fzDVg{K=%3s0n^In$Mpu=_3zpKC}h-(|6h5E&HDTW+9V5z4{IFrh z!~Ayy0)_v&K7c-l^olx!^m_eJdV6VlOBs64K0l`Dy zLVSz)dH)68e?2HQ@IdPl@)?=@>4gemyjM2))7v#HDDXpnq<>o)AC@rq&;0O$XMd!B zE9U(-U^##4n(q(s{Y9KV^)$Z<<>%=Qp1<_Dz%ljX{bz09;u%PLdFD^Cc$BX)e{lep zn2#lZJ3K&A_$kjvS5|O9W8;_Lz1n7tu*Bba-UOV_6E^u=02d_QpMOtTDiQDByLEWr zgGC4WkNWW*bp`Z*e856)X;a4E!JWJuCm9&=O$sz%K0kU@4p2Nj6^UQy@;|;GMIVa* zsl4($isKocx3T^3yp8RL@%oN}?>~G{T66dLkSEL^D9ss9BzL`sY zQIdSs&~KA3y<8T43rzm!slUPfe~Ixn!ux-!)5OK+kn<(NSRc##dM?MXzO(1A7pP4J z20yspxP-spZ#KRa>I2@z>*I6BV?B=$`HfvH{)qGUwTy%q*!^(=3B@6X{kWof1n=^iDTgBCBE#m>+!zTY;a;k6!9>LnQW1^P_Lz1o6_~rTNhj9)-pE zJ@gat#8%{c@`Ll;tVr-c<~xnw2N>h$|hbF#EHs8osLHsj(kNp+RHQKZB`+|79(1+2qNP>@zeQ%BSxt#Rv<9%Yo2t+7+r{r= zcZ2wSLHs}R<7OZD$wAHP+p~aw?Eibfl6-I9Q2qqxDcDc*Jn$yZ*Mswocl-PUewR)B zzE{T+Tg0!&^1a%-`13$sKo8{A$1_6z$NQP{VeAJc_WR#kev)3utHkfioDW~goDa{P zCrD-ZN9{ZGt;PW}@+$HKiLvViz{MU1P}T$FALi#w{JuU6H0EdIM+H5M7xp;DA58tF zIr-W?RlZz2lHW;xHJ+Y1uNX}eFFz8$Z|LLifj-Dvg0`Wb;1A{p?C-ZpzsM)$tM+JIrJ&HQcUmxeuuOVh~L)-i0{>2Abwvhp}xxZs4u0TXm5N~#KYwyfIKU`#@>wf zH*2Tt@6aBG*v0RwDa`LdFXHzZ`2&3hhC<+H;6Hl&zN~z1dV2%;99^%a_p3M`Jra6U zp1^;NKj{0Tu`y91mbdvC zvHzj|VLb?{B>rAUV4p8NFR)#YP`}QWA6?=P2m7(A-X~-EyEs33P2#}_Kji*Ah;RP< z2KZIs`UpSB-!C80UevIZJU00#v7e|`3Hx|6l?q|oU&ObmNj!AzFYI5P`S0*U|G}@( zbhV=SLxdlwKQHlt*J;n6HyJM@$MSWB!Ox;3RFnKr;QuS*Wk~%J-&)(lc%Bw7Bbz_{ zo5C-Nzws$KaO%%!4$D!d-sCqQ|3%LS#s2~PNXL62tnnWtUZUVNKDFj!NBrpf*lgua zAYX}z|DyI|o6oASAGZD)TZI3B{IK`AJu&4kaX%gV^MX(R8Q@&R5^{dL)qzr z;EO(gI^>u8cYF?G>-2%~bB-ly8p=Z-nE&C}?B8Fl4^qG7Cu;de(1&kZ{!{z2W&D}A z1bxHEbN?m!i~OtrCKIv8U+yyg<}&^aI1 zefu4Ou>^&DjhFFfUd^9n{F!C^8PS(RbAGpsKLd6w|JO4Zm z=AUK!nYCs7nNS{<@n@FtXCM#D_%m3R%6agn+}|$a&j>$oKedcM^Hb=DcrD>RZ*P|C z5*D9E-k*?h6f_%k?b{7lco>F*o(Z!Op_BObc?tMI-D`{Uw_-jn#5c=4lq zIi{;}{%2YLv1R<3W&9bD59ePN#19ewuVwrh#~<9+V*hjw+O>>7!~G=e&1kweO2823 z-@1%H14GQ8KflcQmz{U=B>Y>(pBV|i4)wfh8GmM+kRR&50so58Ci%0c{uh9gE#k)p zp7$?{l%4cXdDe_uvu(K__W5CRqa3~${}uSp&ij{*N9qrP@8O@AOjk$h9|ia+{XNzb ze!Z^k!yjrbVSUD*3~(nI12+C-fZGYbm;C_hLq4u#1lZ))s>8osV9CcN^=}9)??1#p z?Z52!Go|}Ht~-G_NsPaf=F1ZQCh>P_%YBOYKZ(B+?2h%15`QO~&lA;%zf+L!_E{qb zf2IbXfNZ}r2H9{@oQv$rvEVJ=Xe>vCi+?? z1YPg(J;#waL;}Y9fMm9eUxVuu`yORJUIFsAhxlwfiDb5nUz57uU&gP|={VRwr1-Op zUo*#_BTVS9+4{Z{&d(DI!#-iZj^~3X%lI{hKi~(BUXSr!X&JwU^E>#lj9){2iMig! z{_cYK`yUicJQ>6*TE?#l?8zekw8s|D^PFY;nq~Z&P#%`?YnJhAL>{ngfqh-ZuUW>g zDN!DBRvwn|YkmTDEaTVcdCH=A4nG$AVfHs~E#ue7{)PI3_0}?e%`$$CdryM<`S|l7 z{)+f7EaTU}d@SSFEaTTi(K3Eblq}=dgz=Y_@oWAjihlbKFYx-_3H7|+3aV$&PQ~t4$#aQgk-yr@Lb%`&e{tb@`<-6M^zUxThKfwPcC*vXh5X~a|Gjg(jR2cQ;auOej zF#LndcX#Lz41W=P4|+!Th75Q088)0af7nHSn=au4g)zSO@;e^B2>d!>EidtD`guek zzuar$&(|eB%{t)<;aiMP(^>BVZaKJ+CtPzdTC)BlQXct35#Of4_%_#?twsBtBVmy4 zcZTodKIeY}Mf`!D(R8;z5CHuVIO*+L80$U657|htL6G>N*dOrwyl*1E*xty;!$EzhznVz>CVt~|ujSg8^5<%!RDBzp;MB3Sr{A~t9VS9_v-{#B%;Qt?!lE{Qkwidr_vpSL=I~c2o>ugnutNI4XU&9EtqV zAL$a}D}A?u*kbq*MttJZcXxQfq%iae@#MCY--nR*-8i|jyY1q6F<$YNofY&1Kj5F= zh>rfEw7FM|F2Be@KCb5qM}JY?ML@jEFJi#AbHG=2MuZELpIj?`Qc`{4dvfqUM!p{k zxb#=xipG0#@6CzikNn(S7;o~(@*Dh=d>9tSed5vmVSX|HHGkEq{0DobI?{vjZPj{d z{?~TW{IBh%`JW${c-4vWKfe_T|1AIWeb_qu2$TPDdm!-$$zSq6-r5rTAp8dZ9eYR<@Gszc-eUDruCsBk)pjm)X-q454G);ls?pUR-#g%4@gfU{%Yhm^#S7{ z^5@25$B&PU$8b&htv;-*ZKm{LWo2tO)Q5*1 z&o9dV!!^}MHy$ez-_FNhkjC-PYw82epVIvAGQRia{7d;SvtMN2qr}rJ|0Qx^!2e<) z&Hq{<&Hv3pR0{cDjKm$u@xNx`zb9&spshL|0v7(aiuyvv>OFB`D7c&2)=C9TO_Vifr z%m+NpGajmg$J17e?U!j~B>s9y;;&2mXtY0?mJ5(qTOM$+h_hFPpAw(%!9H6&oPvd zKftY3)mQLS_|t4~Xbu0-US7sC@8~P!()AZ(mTwyTKX$yxZ+rb+oE3yyqW>EIjtZZ9 zK_+AUXV=rRKHuYdd&RH6ZxPn~60-h={>phTAKP-!Fdt;^<5Tg=ffs+fge1Ve0nWx( zuV6i|>lqZ0_4-Ol3kQZyQ~{O#~);W|f zk2fc;2h^TPyg7+~d2ID(bg#zh4E`eYt-1nxp?J{Ce47)w9Mb-|_D#OfMf(SAV8k`ukwR#SATntUnM^HUj5@eehnjJ@M8bE z3LE=_czYbdiM4OYe@gSN^U%6-_&wsUqdfAB(maRt!yFQ? z`z6HJr+kKZh!64D@5uKRP8T5_284L*m&m`{vC?b&vq3-1Kk-JNZZ?%(;vvtY>r7t- zyz}SIjQ@T|>GSn*`a^x1k8E^~{!qd9UuEQ@TTuFJ{htT&L3#^8ym|C5eiFzB=!-uI z>n^84{?TOa7r?@t1MOqM_SOzZ*rlf?K* z`O7BXXfR$l-#7$)`crvFeP|)V{f5aOH)B3vLl61`yvm!Sm-6lLupg=ZYdlAfCqKTH z1}#6Bzt-bXzv`3AH_D*NDLmSXKp*5A@__QHKQ7-W<#lo(@p_eB)t{E?)3`u=nH)&` zX~nC4ygc71>N^2{!4Jmkzs>Or_y~{Bnk#j{^O96MD{3pC<>hz9T(v0ZP*Yd`J)F zA*81x2GH^6qd;HZgr0rc%gI5*@`vX+X?lPU>0$Yho=m>Wj{S) zqX+nq9+nU3$>jSC?bU12b7cF^@qu(^_yKum`!-)5bU&7$mm$08#ViRu>^Ed73mHe6*xLD!w)tKZR#Psh4ezYIQ zr{s&Ey+^z-&`bT@m+F1~y1DjZZI9~@wtrRX|Gun8KwtdiRC|u`W`9h+(N2*D9Y53u zc~SdGyjf4|6^uP|c#H<|W<3%&jXpa(#+P_AUvw(6e~ri=)Sn#>O3{k3M9`OxHh!ncNc-eK_Lzm)Wo2W@`;6!cz|bPSH( z@eh0K-a*gQKmM8In|1N@MmLLnCqJlvbgp??zEQ|CtgJd*Yxk4i7>0qM2&Q^eoX8|S?~kUqdEdMlkYy*N)i>-r!??|yzQ zMK9*R482r`fL>9LkY2AZN^c`gZ$3ls+2_X;z1IuOZ;AZ}^nvp5qd9u1t^vKGz5%^R z2#W%4{NsYnr>FL?a#}vmSiTnd#CT@y5$(^hl)!kCKFnuF7S{2n`3dL~a}>~5^!7>k zS*+`P3VT7gQe^&JZ?D7s#)A2Ebi0zy&-3#+dSGuudc?d1^pw556MD)%|BcX7VSe3! zo?!kJdZhFC2mC<)ab8)`{^k2>D++Vo%NZ&gUB~HgyE>J?$CJOI6;qzRKOI zIbTMTIPb6A#hD06pw{93i}^*QeZn}eVtyJ<0IO{8*So;y^)8!uoOdB#PlE6??d{y$R%K7Rac2PcVAYtaEi1=r? zFMH6h5FQf7c^mpK8*q_t>0hnP0TLMePUSz5{@C+$6hVIyoEQ7~?IPe+^Zg<8<%Zn1 zPJSa5iaa5IXIt(w^!x(+?8^DHJ?}t#2pssk_z=MNBtNIcBmcmf+|SDQyTBK41BM^r zr%jws_hUK1a`g}OKFYzBoSgp~nE8^}p2=szcocFUVDgg!?iD#-%K0|uL*!GUf%5S! z%0;#pb9`_!c^hAFKFHTyzEa39>=R1!2Z2U=KR|mf-VNmI!v{Fe%>Q$U47m8S24J{5*nFm%?-Y2S z@ATiiFWk|9aeGa~i;MF=LG2=QPj><5Bf>(`^NX#$S(N*!#Xpz$a+2><@sAK+E{?TE z8(&WHnc@q9!G9UKNfe7b0gkyqRv7jL>rMV9L4H%32f$drNPbf;kFlPl(xSid1+G>N zO#WkjzJ$M6`=s>&@A8|<_n!$BU*db**QMuM`PhBGkl#`S3+oEu5_P6s^?^Or`VFWlc@yrEAWfo=ZM*}89^@-PkZ0g&G5M$P$~{1W+0?+*2TknNw_ zp**h2eLC%neKa|0usmUnRp1zZZVf9b;n8W0}v? z^AGX~So6)ee5QRrev;2L$VV#m4bi<+eSUnjJ{@14AO0r8f8fVA=`J!k7+QS_`OW^n z2Q0g9`Vab0{siYWHlHcK56(~C9Z+EKGv_s-JS`Cjc?YP>-Yat z`$O<+aqI3x%u~Z(qFg-9jiVHzqY;nh(Gw|4hrCRDE1xv zp*tGNgFonRa&2pe`VqyzQ$BvUCGxmw&I6#YT0iXp%U|jH{HVOY+Nb{^>LVU-a%Heh z{ge7oJ`VW{`o>p9Tn_rIAMK6y2ZPjp?CmJK-=Vt=_B(bw`wFu?e2@0zfJ6FcV3o&1 z#iPB{{^ChsuRz}+#!G){4;IPir&8eM@(G1Fw(>c;-rq{~XMTUl;RySquvBKr zI($oD*$+@({}$`n|785bVLw{4cANFv z{=M@z&}ZvE4m|gF>HT0Be~tS?U0TR~@fr71Zapgbsp&tAm9*|BF`p;XLPOTF!k-)5 zf9d{G;75Qf>iAC<7@O!?o|nQ#s3)l;rkeW!0D}s>HYM&_g|L!+nP^QRT9|iR&8ZF zDx7%##${%ng?VpVEQ_*L;Kg9k{#>2rYvQ?3P=Irxp_$R}9 zrun~BeKr3R>5b?AUWWf-`2zlTTWS99w9@?FZAGP!|84bubo>weUxoj@jDK>g%>{+p zd-^B0cVv9TzLEc%asi>^LH_qzX@A%a^;dTO$zAo2b^gh{w14=+jkJIG!|s{+|Jceq z^d;VFtG&15xwnb4KKvXkkvbJddAX0a-($WI`viR%yi{LQ`uuoh^+jf~V7%Im^muJG z(&M!aB}k8#$fFytE%W{)!6Z$6X=U`K(RSy5qA$%Y>8~5FRtqAD-y!B>j#s;p9HLXWH-*V$K#T=D_SM_|C^>e)54$iY= zyk!4%xw>e)U|&=I%>oDK6#{R2eZzh_u81dVFn;w?di+*P>G6X*YbhMRvRQ8=#@^Lb z{y3g8ehrmx!eZ}AWm$i7d}RE}K|XovTde_;j-MUBdMWMy+$dvz?B8Rotk%;0&vop5 z@U!y&u;Isx{eQSB4m*NhFn)M{#N%R}@4^51`LDFW10S=V#{RzCgT1ye&znlDz?|1& zJ}dG5)ZuYI&=Q8Uv{?0gWEy?+)k7rz4=lQu=ALIOY zrOWbWeF}Q&>(rMm&T3D&zR~+rLqF*&i#&3_3jTn;H93!WaD7Ge&A^~%$nwU&8||$P zSzh{!`qBQ7<$e35G9B8yKls`jVZ$Hvr?NuWmWTgyCO>#VE`)_%(myTw`xVf;$p5*3 z_6tfc=qtuNZ!Wt2@a&yG+uxkNZ?pXQhVKpg?@Q(S&q*_SiT|^1*@B`A|I^$|5vO;0 z3roH7kM{kbe}Aa!Z&{B+zR%xP{}r3>zI30}o#^`HmiK@5@t(_KKlFVfzgK@L@sF1J z)W1UFLH`}>_kHj=XX?Ae{R!*GkBbtC2Q4SB3V+?mRq)=C<0p8WhbDNh;r4G&Zvc+< z{i4{1M}RjAZhwIHg?R6%@8wPo&3>*W4xzfApg(c&K5>uxn|1;EsrwC_2d3UXqW|gl ziG07f!3ojq4@94W{lrD!dwxHG_2bd~uDoCDa}_81gTe;iFP3C~Kzw5Z8`8^l2iNk1 z{rkpf7Z(B=|N0j0)9$xg*Z^Mcyz%?Rk=O^3f6#~hL4U;lQ~tk{_4puxKFWT;*uV6C zW8mK->h~p`Z}Gj@k5TGV5K|&X;(LAHe_qqV)^P7iZb^ zX5!Z?vOlX*Qa%4`+tsM#_*-bzwyG)q*EXxc_+UM`?bnl+xW3G;H>-ZX_6hfALw~*{ z>y_1Yw_lL=T2(sK<^8{bxqj=3KZQA8#e9@{PssIX!>vc3a=t1wWj=B1xk{b> zE!MvQ>tm_UywAh>vfCv==6sgx#R2P+U8|heHV2>&+X2vr`f-0UV14Gi7W2QjQ-%G> z_)9dqG>x`>+z07==+?`(f%1^x5~3pl^--A3e8z?XK}gU;NvJ z-nBK_fAgM@{4)M?Nieeg zIkM~JKQ{brufJ38_4NHAU4Mf2hq|8j?+jVB`eVc;uw%evR#`dr^5mSdJbIfQ<|o=ody*$)!_;s*Q&h{Ug(&^~=D?+?G; zB?0Ik=EMK|-yc>u-yJ*se2w>qm#C1YfByx%Kjd+MwYS($(YymT_7LwiX`Uz6--hoq z|1*1kNcuoO^aJ{Y`)p_u=(q0$PJMq^;`9`L4~6CTxR0Uz{nNic#Pq`U^nJp&|NgL~ z^zi-}{ES0?VC?VWXKDMje1AxPvx)WRq5RQYF)0F z^Y0J&p2Xu(KbH@vU+)W(84s;ec+Mned4H(-vwVMO?ibAP82g_cuetY-%lC&Vd7k6X z^@7N+#M^+p^L@nx0t%Za>j%g)@F&50QI-${QK^S60_7`|sye_D($@%H^;>DBKK!}o>I z7wXf;n2JSTIA4VC6R|#-e}5?Aj|ZgZFe*ufMd1aTO<^{26;#DO6Mbp$~(2Bd^KdW2sO1 z`|ZC!EPccGht+DkAV*!hL5w`ke3G3jV#} zGrl+MHi?(_i2nljsSn~+M8q-g4+;01gkS#t(8?#<%e+67{#AW{aUPd`f4B>H#mhI& z$KX#9zBk1C!1?!v6T3A?By~_lFh#{t)xu>F*6o-|)R*Mcxnk_lDI|-y5<& za*HqYoBJ?#ethbCLweey0{n~basDmw^TPLrJns+R8xr>K4NDSGO#H1O@5}dw%lC$` zg~9vAZ~MJrV6P#c%lC$xbMFo1ev|h8wcZ;J)qk1y4Y0?!Z>z}nv>#k=gzpJ;zk?Mm z_d9qW20Zrna1ZqRAHGMF`%r$*_iW+&LfE5MzDLA(rrr+%PQ5q8_h=8~u|4x1F?`SV zrP%ZId%^I1qS(Xqd%;(}PelJx@6)9I;Qb-+r@uD@H|+aU+S9+rVo#R5y1tly@44yu zkMfJ&CprHQ;pg(bA*L_sH^N;X6Vc~m2^`G$X4TF3u zpU@wfx^UwBrOqRs&I|lS{G0z7;xVNBmG!+OzmWavrAOHB2L8&}f5QJb;~%kA7XQE# z(D&>H{5JxB=soTqBi&C5{mop;ANda7D+c*jKrj4>=@9vu$}9bg>4+BLei%mYDdvMz zeigoF-0=R#oNuS2z+am05j#ElA4|Rz@kj2L3F~`B?3eG?dmDiLd&NqbaQI$vWc`tm z|6Nv1uh^qM ztiD$a{gL5ci218H+mqgVf1T|6tJB^qg1nS};c4#`v;MFh?=K5^#Cycdd&OPmxqh|XNCVshW~;8toS2~ z=V0KE40pgn=r6m2K@8zt7#Rn{e{+y|ub9oJvYmOa*c|ZaP3Cj)=iJKV@7Ug!{S58P zbMn966#rPq|7<>$?p!_i3F)f4S((sqYoHdBLRjT^z6MdqtdFrrx`q_Fgf=PrO%@ z`rmZC@Lo~QYwh??gZFck55%j-g`&y_#&`a`MX${9D!=x41@99@zvN^u&@cFdrSz*& zN$Xdu5|u*zs%G9N+IR?w@kg)8)E?1)TE?qVb>pG_(hYr2Pkp)w`&aYv0Y$&+mGt|> z)r`M%bMAej8xPU1RrTMt{C$WwND+Q78L!m)M48Vj1GonsoAoUGtIPfLd&EYC?*Z-m zywrO`{XY9Xk@25weVC8y&GdW3PUbyg*S|M>%JYTtI_1T`Ppr!OL=(>g=lARWJtEt` z$M`{JJtOp#JJNr5er){z38VhebY0&s89dtCWO<(trV0o9(7$_qll?d6$)K;o_m0Q@ zJz}*=SiJOwetBPcyw3H8(37v^@x9?M+PCpwExzdQTTuR$?-8Z`GrUKX_TxCb&-wGq zf2jV;{{3N2*4uu4uJKZRJP>`4=<^?{f3(kksPWxw{13Z+m-r#KxL&UteaDcA|8iW% zNMXr;Nd3k8K$BmG@Y5Su9~U(KsNf#~Uu41yQ=a(_%XN8gAb75iB|abDgA{llmEe7^ z+}AmLp-{y8Jgk=s(a%^O@kwI5_hWg)_sG5{Mtn|0w1|#PJV(4&s3U>|4~Im)089R% zV;fHf_j?7s9~3;~v66|Wx#Hj3LEcjPjpxnSgVHn!o-_{O^y+ z;?sVAjQpFY``@cPz1;s^5NzeLkfI6t@-ysfQvJkT_d}` zFV1+IhM)1z&&E5fAU~1FBNu5R-)+t3L-`Z`{LYH3zX{|1^bP#;+j89N@&nQFb~4fZ z*F~OZV>viE3GxG>xd5I<9kG|p4+Qy2@t^Z~jQE7?ALlprzap;*Tz}&@h^Gxa-wz%o z`~dv>J?9IUdb=M6KK=mP-Qc_$@gTWe)%`N@(@c9aT^(Eg!k>Pd{au)EXpiryf_Pl0 zuQ*Fk8b32W{2mMnD9&^>W9bEdh}ZgsKS4Z0!0@*xIr=@`8;$O*5)R6z#|QC} z;4f_Ous4Wzsj$w^_3YC0a{-1O0SLgk9eD|e#OK83G`Wix|yVh3BTv^3wV9c zjQmEFzYvf5h{t(lXf{{8DppZ)%;@ckg_?=rj^+PC=6H+=mZ@4^uM4e(*SH_#V<66hb{ zkJ9=yy1l9N*!mv^@eX;P^-SXx*#7W-eai@46i#N04>)oD?>L{Ldg6y> zz#Hac`&eLWKkx{CpP3?*e`jz7SM^icjodIn<9Jw1VadJ}rC z(;iL^+LoRl&CvsVNDuWbq-U7UM-|$`H=*Yy?Q4*a>b*I7fDh@ReuwmIntUBro&$Tl zfF9cCk7Wa;^2zm)tcQYpMA&@e=j0dm@z~9ui}dqrEANmuwTCQ^`Bd!()QvFSA1Ms^ zCk*|B|3h-6DFI{M{pXe5me}9;Q2a&4SDRcQsed=@^Ju@>O!-4Yo{nt$&b~thUA$MR z-uu7Kwb#`4xL%?Czbf^AU+4jS@sm^SKlI1*C)ry!_73$aKOoP<8-M?fwLcCI`66EJ zE8u2eq{Ry}=?~!X)gZsnQ@|$Pu%&nWmwH~cWBc>7B*E7Q+Xfz;>!tIz zyfr63oX;UYkYC9^B*W?DCmdhO3t;Jwm7fc}^_2hgCug7U9)kdkxvU39-3^~ViS~Rj zM=z%{&Y|KKj{U`0de-B(r={r`|#}ZWs2VY^|XKZygh+_hxE$y7|^?B=1arh*v4}( zaAlqOjl4a1c=q`;Mep^&Y4M@sG`*s(A-!IIg}-YOPr~zekfHbN^KFXWo5RhNy?$>_ z9$^1M{)#yX=#9O75qe`i|FHU288Sazz~5khvS9wp+DoyIAi(L{?ad@b@6YGxZKdfI za~IIt_V!QcZMWD#&)>~*NnHTCn7nq_w(-sViW&V@$>nLw)=WN0zB;JXu3O4 z{nzq*A0YL!SK?2PIgjKLhQCAQu8r>r822yZ=_cbN2JZ#h=Kd3Z#Cgnkx+N2~Y-(^H zGoJ3W81GU2&u;)8wPC_c{1(0s=x=S>_W@5Of6)$M_&?O6+c=+nP~RD)-V1cL2*dxu zvi~6N~@V^3ojf9}<-(Iw5VVp-N)1mqP5Z}{ZWOrBalrNmuA$|(^ zb*%kiKA}MmRaqGL-UcUdgGc{1V)E16PgL;zDh-}JUjhH9QDz`rU#QGN9%phw0~@)NB_gvBNV`H2MoT=Eljf9mj$ke{et zXa8V7Dv>t*T{7qc^aE&(r80#Ckzoj-~eMzB0 zf8z^0UNJD&8<@W@;V<}`eSfWZm!C+!|4gXhdKvL}U$8jqdrkZMW0p7Todx-cZ~zT@ zTzgxDgZ6^>UJKg8c`FFS_h!FWoN>8?pE)1Kd8_Vk99~Ak;USMwf%Fgjh^ImRVZg9= zQi13T;Ou#BJPp<-!0Uagrzf6aeof^g;(jH-PlNdk_N;VwSL5Y&lb7Tp8V(@|ugXUR zcQWXQ%SVL!eA_<9e;UL`BfZmIAD>?G5slT~i0x}WqUIR&34i2$0Hy=vBSO4P*ylz0 zh(?}%*kAaQ#qvG*&-rpTM#a&A%$NE;05JIB-v^*LVcicQo_%A44`h7heL$k+d0vVB zB(o9Cc~~D{esAJi-X3dx_I&fJ#LF+fhyI{?mbdQ%#vTuONBm3@d@TKcj{JH1ds@F~ ze>_dL*uO;W9nOPCTAyw2N#L)9{>PsO{xDMih#k=Sfyey0i3%`><@9{#eb~<6d^P?6 zr7xVg^X2*X0a^Z!J^wM@i|nD?cu+rupXbn>i9NsN{?v>I_yhU?p96XBd~q}#rN`%b zz;CH<58X-Cr~KjgYJKPs>%;z7@(~66BR_5?z)ueLjQ+;Y0{)>t3-S@6{0YtnQ6Kk9 z@_lfg@@|qOXwaQEh4Qe#zAWO`WBDHZ68+g5C4?<~p9k^-{CGbBza|Ig3|{26lO&Yvn@Zod7U$j^DSrTQlP`;M0%$wxFkj{^7|+Vhp~XnF7l{Y|c& zJCFY9c>(3)hv!5d$9DgJeXR9U|5*M?@*PxwAL`?M*Z!W?M?B!<%IG@@(J}BSABXzN z@w-ZSj1CgkkM>6Uqfu)A@OBg(@q0g>Y!3ThH=YW!J$#S$l7z)ckAX)ajP_FdhbMu& zgT8EjmqqgVsnn?lwP~7|_!XuB5<5+lzTL1TET)SriDOVfRX%EHqN$7%H$=+B_24_0G`hE#5-l zZZ=N^4Ph9D(NOCza2E(;U>E~oFu=@C@Ao?=zV6K=OQhAU>3*4jX`PC=5hqTZIB_E4 zeDG_d|HGbYBVl{qU$@8g7uZ8}6MpaQvHA%SH2N~~5e)?P@hP_iHu^KlhXPAJG2pkD zk7zVv|8{(f`H1@Rz~s++1GPu&;G>Uu{E|wWS&r>Gc@!c<9c<@DE_V1)c|;{t5dAZ8v~TU7GWq_%zu=E?jQ)f3 z$e8pF&LiW3!0(8>N@iOPlGfWhZ8YS5>x}fd3;nRiP{wlyv==q}* zozNe_*h><|c+(TPet^CMEW|#cyM_Kv-FlYyf9$7Hj{as1#xK=!^3Mm5=hA`Fm*YcA z7@yj^10OaQ;Me2l0nGj(zuCUv&+Eq!Ht*3af5!2Rczh1*3xqwUc74OY0`up-r~ib) zYtlaL7dXFK-{7CMN2rsXfX{8gCz;pCehQ=iWL_ovDU9!-zhvX@%q1O0SdJ4B@Ui#x z^RnEZ8+pO+VKBsUbKgHN5x?+zysofZ_0MEpO=H#rE~RIDNaO`09QJ{gdH8eEs-5YwwiaWISD8j`4tf*`Cq24|DPUK3D%7S+D3n z?UxikgHQZvkdK@5E#bL*`|(q?A9U;eqre{<{1bnu_J*$g`vt|vwjahP!TN~LgZ%E~ zhj$^G0(%7Phae|^U{jR;8Opy9fewCW{lAZ*V~hY}7WtCQ`Q9+K`tDNw_~)$7`uJY$ zOH6&>5Be$pIeUdI%(p2Qdj;~R&!@@r_SCiq`KI5W1@GCcfA)#4U-SoiXR+^g@7ck8 zF3|_jp7Ou5SK#}MA0Nt;6nRw-c0u|G_V)uj|X%D{%e#_gj?f`f~9Ixt^v+Ta*vzALY7!UTnXB_Rc_G@Hh4A z`)og;@4Xrq5PsLy9^^dW_a*er(}(nUN4{77nDi-crbi=#AK!DPwp`~Me<42hJ%{xJ ze0o(AzsKP7VX(el2cJXAx9QQ);`7D=J}3|IVf_%FiUiL zdi>r3J}3|IVf_%FTs*uF19|c~_}roVo*p$UeSWxr56VM)SU<$4M1hE(ov%QieMNkZ z<$L9yCGr9M&i?It8u>ryKhDKN4CUpL{wHs9zX|MFevH<#_5dgk`(ypEzXAm`erCLn z7Ugj+9--<-f&Iacvi(8cox1&U+4>6Q4c2>C>qCC1{Kfj=_gGH~gMSHI`-6hX=L`LS z=aC!5ZNWcQcybHTi6D;(?tE7-X3j(4&tnePpYJH|gY%Fy@85@R8s6u)|z9(;0xgp|*{xCm?SJ)-NBlZt? zGL+?WR5s`LG@ekTh z4{Mxn=y#w8=*9c*WA^9y3H(fc0zY$qI?TO~9+drjkbeLR{+6Gv)EGa|pRYf9_5JIK z#RvGM{c0(LFILyFEqD7D@wNMxu$$-qkpGA;;0(UQY8Kzu*xxev7AutuzL@MBzK{pR z*UC>>J|5q=;MWuJ1zeEvg5L;NE7|q>=+*bX48AvW@#+@k3)XXpuPlcUU%x&SU(JVM z`Jr0Q;rr_QV+P+lrR@7Y_QxE3xjX~<%5n|x9r*Pt_zoHzpsZ)1Z;AFgo`1vm;!F0g zygZ`(IhFQKRloo70)7-f0e&Km0{n_zJ_&wBxZCq*=fBAPAiyslFA?ine&`?TBfVeP z&+y|<7x1CJ8sL++j~RIRru340Bp#naj$Xn3EA%qx82N(pzr^da@<;3o2vBHpz0c}3 zJzuDNTC^|dia?j|DbHqulCEd1UwyFQ{b9tOps4<+9keg^OI3XfC5-ze+?Q=|eaZco z+%J{=eH8AO@IEd6Rhl2>F5qgF`<1*O|MD^I30g=pU9$}34`2Av)@D^eCgW>(XP66!4w`2Gd;{*Mx$gd~m#9zkOF!_}L%l%W| ze19AJ*;U+s4S1n}AK+(qdH*t`f@9i;eL;Q12TR{y0U1_9xK;-L1?A-m58S?fj~7S^ zWB-GHS_2m_ID`UUdG8bWqJ7 zF#g+Mm*D-MtI5EmKlaOO_zV7K-~Z7b%H4ZB-EU9zKH^une|eNYkIKjH`=#`jDrlem zy|K_A@D2MD_u!zv{C=D55BVYWzRR57;#nY{(O-H(a-n@!uJ^t6z6ty(4Oo7RXTkXJ zJ?`hF0g3MexHQ+>ZfjqV@voEq%zd2eFR)*ba&F%qpa1+Gr_0D=%x}hj5&ZdRj`8sQ zo46^+~}ABy;aEDqZ<=R>`pP4DOK zGd~aPF+d-zFSRdm_5s299_s=5aEb7#j9=^ns80I+k-h&{|vi1R}9q_|~eZbG^k>eksf63Dy_$*t$S^I!(3}_pP4?_B~7Vt7Y_hV*BI#ej4x#VDM`g z|16r^%-s)vZvkJ>lla>A@Z=0dPs|7eyZc zzTBU4_5njmtQW=)T;Q+W3;E0K$G?^J26N9-cV7W}2G2iYAFwS4bR!?QJ`=4E`e1%= zzSsCW@okYOlOJq@pQ0o2Pr!OgZj=&yfFH`m9$*L%{i(cwJwQF-dX4x!+RLm@&|`8- z*u&*h06#0fM&6`1$MuZPb%Xd<@aU$`LR^d{dkc-$6nrGeWrKnS^N3L^B>J~!|^EW#B)@!7X$V1?p;a|Xu_5o#yhi>?%{A6B2xCk)FZ@z3(KS8_+A72JA_RE}o zz^FbV`pQ4(uPEms(WjrW|5k|_md|flEwR3hAA$1^*Ppc)!25nrU=u$Da6w?JkMOE|Y^W{@byv?THGNMeEa$O8M1_&+u)g=#O@4Rq z8}!#?R;$T-wT$mB;41Ya{80Xj#~5F|4%5%mI`siLZ`5jpC4P&*)hb~-Ug)oNXOB*P zLi`rBufX@97xT%heTB4#{&+Sp{h4?fvR@f`BVLAy#{zsXKNrQzctQLNT>{e2Oa3vO z%Nm^1*ZEm{10R1y_XD5*z3J@_FrLc~^gQqLyK8(5o8P_skh5`y`Aa1J^5q8_&&B75 zS9=M`|NeW%TZDqYX6j4+_cC}9KjDY(f?p~=zP{vt??{pSBl+LUWy}nIp!YNKgLw{; z`FIRCR`ufy<&yusq|dPeGd_#SkLuu}#;;Qt_8BeCr`l)y68-@X>jf}4^RXABdpNIO z7{8_4B)_((UqGJYzJLJ}qPByZ%^0xwrwad78_Lg8AN*I#o&OsqA2Q0ppWk%+mW$%M zTvYx|^MB@iv^4+fD*g+$&#CBRzuK4hn&ekGe?uPNd<#Z2<*=uMzJu$i>Dem&3&;Zc zV1Hi4f1&;==X3JQD*g*rk3H{mz2L%z=Ly`0fnQefUjlzm%9mCA7w_+Z^IqP*8vH}` z0{0K*eS8)F1@s8+OIGn;a`rNOz6=#SyzwjK0`#EQCwu=8E@5`(BFLGbIivI%5OAJ!<{JV<(62!xRKDvtkV$^e^udm|2 ztm40X9PGEN_%EyYFZAaSdmiy$`PUWyrTmc3Z)dM#Gi%tw}>Aj9tL6`brtqg8{*%i@nr7eegC5PF};i8$FO;| z7g)uQq5ZkqtMk4T=l50om{t53tPlJkN94Y96+Z^o`gx~Z&eHA|@%RdY8byo3XGV6= zY7dD09x$%k2(RMDh&&JDcR;>_KJcz4eOB>fL_b``k6DmEPlENeiXXFzACuvSRs5J$ z{1}lB5F6NUSMg(3@nh&e?&Ha<;>XZlU==?`-#3@}tN%#khn|m@*bji6pr1PXfmQq% z-uIUWYs@FKiXS8KnR$Leybs*R<2-}cbi(S-vx*;+w=Y=5kNJ}FhVM5x|1PmFSjCUQ z0up~y$QQ=nSjCTlJkj_stN1aqRs0x@6>{79r?2A2P@eLA^%wv8;>TPRU*n?k7sTIy z{Ji*QBYUyt>&KtbcrZTxjK(9Bd^d>yX7lNYJp=3)E zeLfzI*ChFP&|VesXyn(yzxVfG&mj2|VZwp=e@=UbG1BqiM_A&?6xSrakCZf4(E}X*LNXJ`LJuITCIIMm}2u zR||xZzt+I$+1igt{bCUTr>>V7-v;q#mz{Txg+O}V*?t$VtN#d$_ya!3*EkgWV8(k8 zI2r6qAz;KeYs{jpg!?nkL+9;?-=BUa6-@oDy2ht<->Zzj^diJv$-@cAR9{$VS%^#$GuAtR7VPEqe%x{Wk(K{W@-&L9?tPG}CeB)V=f06M}Z%HOD zXP?vg_Xz)k@4x&ffOOZti~--rTi>4|Uozn$`Dd*YBi^5^7wW5^cl@-W`8yrFE~>2jBk>KSGyFq( z?k|n^cns1>Ip#;+6D|BZ^80pSIsG+%)P?k~=ICD^Wa(eu%hJETpQV3cWa339O8>%6 zB=ocNFAPn-YLt`yaaZys3w=rdcxT7O(!GnR&a8(BRq<_uC zZ%YjQt-T{)$|I~+;djo5(7z+`zdilCEl+Q*hi#v)MCf1b2z?#>Yu%kt9yMCqNZ9S= z5#mFOy{V&j&C^@vqZQHK@kR8<{;c*zX1|gBFSCDjMtqRg{haaJ3k(2jVVp-h9rkbI zk0bti75o{L*CighiC@V0Z#C?9TKUj z(dU0$m-bCQcGPb(AQOg*RmS#Dnhba7^^aLY_hXG$M|?W6-#WOxPEy+Ya-6rD?BC#z z{yR<9_xa%0CH|f7zfhrmRCn?4Iu)+Jj)N-|?nmpczd}*whxlT?5U;VM>($^tD={86 zep;T3*9d)YP4zk1&)R@rEneeA`t+-of7AMR3Gut-M}0)&-!W9^+Lt6U^gpX#i++AU zeOu$}OLJY9qb3e(&H?Km!7yX)( zEc$w*Ecy!dZHWiEUKV~Je0~?-XY);zAFyrw=&yFI6_=s^W26cL|MVmVFr5m&XnZ@( z&uR2il%u*S*Z6kmej3KN1A7MWELvxa(Vf4Em@kL=NtEyp7$5dSK z_$9ijuJ>Q=V?9+EU-HzhpY){8=1lojuK$LUXTX>FI;WzL>-dPD^l$QYE;~OU3@m;+ z-=GJZv*Rtsv)J$dN)MK6|D@Nfohxq;-Qat*w-@;>-&3B2->ZGRFW6Mcro>%kEbW=3cLCeo>%J|`egHg#-tX;b6e&U_S|e=Vf2sujocm- z#`ky+z;vTOoBwKV^Q+=}EmwSP{;hd|;ynBw>I=WO`9O;lk6}4h7R&X#g!uO4&#)Zq z!*Uy+u1awomP3HB9Oos{?>^%v0p@+9;%DA7^t_k8-Bx_{J`(YpnLf(bkI!@Sf3v6f z`Su`L*q?bHIn3qb{5;1WedbRK+W$1*58_)4;=Kdk_-Vi&&jEiF$P>aJ=JGe(&)K6t z3gW5bd>Ef+^EEJD`EN*JBM%nyeKJ1R)Y&s*{cySC2QmP^KYKThelD=CALSoYANYY^ zl)s$)^E%~|wkP>M$$vq9Js7-=78f{8<0&6OjiPPZHxN^{;C_%16O`iN0#k zHRD5j%HJ+N9rN+}@u6J#&EZS__T~BUC-WBg+n19bUs(J>pJdMXzrGyp>w0qeJ{dGQ zQ;z-ur|KDl0|N1~r-xvq~F=leu^XP__mSMz-m_Vm5i z&gJ_I_<0F^^YkG--U;Y2=~JFekNO7xCHX#4PJFO_iI3C|@ac6;em67U9|r5^b?`al zdY&GMeg}H-y?ko%RsKVHh)*uwSg$7n5Jwv2zYl};`a1aBq`aISNqlDF^SuRpP#)sL z`XPOC`6fRMR=Kmr7khi&fpQ;bN zINvAa-KpCTm#wca0{Wll>)DlKepznfnf9uE2`@nUp#A)O zAYUiarQv7FWq(}bAI$z3_d23(viuq7{Z^M7B7T4|Ka-)PGj{nhkbi?4j4Myx9GHOe zT0gnB>GMV4`{b=5E@bgD?N4&~JttL(*LBo3`8Sc@wdKac@84`!m~TwJCwy;P^AWH; z;`bMl{_&{G@1Fs_CFu|xzLOseIJ~2QX@Bw)$@hA+Y2ftj($Mh_+Di}H7v=lJiYGsT zpSd3#%Jg}D3g?sj3s~rH`RPhKYft*otM6Y=NDniA*pI3Xf3D|I6dl`g-7m;~z}N0y z!j2x_UcvJx@deBU=GNb^o6Sf08v9!Y-(s(q!57p#mj1wxZhs@bR(=Zmdwk=9Uk}6= za0cIMFN-hk6Tf8p_qqxwH zq9*(MhkcY6^evn948 zJr799c5iejQ81R4A#{Kj#mK!YB{te_x}cFz9RM~`Gs)5 zGMV*D6!0cLDBwYf`=#7(b3a5rBMK;gzg;P@zgX#q`UOeYXXpudt;G4(`4s-?i#%}n z6gB@hVYH9@KbpVTm791B4&OqBH)xVCR`LrKi-ZlmkYBV^B0Nxh09V%tOa3MBtK0|3 zg{H0#-k*^_gZ!eKq(=~sVUzcvVSdptKj{bW7uH?AQ1CD06KeAZhDLvX_HI7ED4rs0 ze$k(_kZ*Ji-F3nY2_V0(COnG|3^GMMD9QOzK+5YjKmcuX5XCUv&54QAul=t_N zPZZ<7a;EwY)@Ntl>tW*=D!1w-#!0~+3_fLM94YK&ow(8E_% z`e0k%2id>m6OEekp3e0w`9$Nb2>C=sp1-*!_uArKIS}y{D)$%s`3@`doJcQNA|pee4+eY;Y|KfoVR|B`F?w(?fLssh(orI^AyI5 z{vd7%qH zgD10LpYnU8@)G_7A^pdm{;0ofy=L=?(i~%I z?_-%y)YA|A378M)xIfhMF2?8QPx6Td`9MLBWWJ5zW!m%eqwVSZ^8WC%9Q{!~d7I`U z)1z&(-ZS*(__?i2(kRO)lZ_DWQM7~$~mYHwh z1AZTIfM&j<$<1xhYkIU}@LzJ@hH~O-?=x6G!1u`JPXqp2f-lw+6KvZ4k&kh- z!SVWhj6=x}Ecw4lzjqh%iAp~47v>XPSl@dW@>edOC=4@Bbv+8bc0K<{KGE?WD&Tip z%|LpWYvkKYk$j29Cz|{C`Kx+w?t#^=}M)ep0?)-K6~?+SB(K z*oTjHx&ERU{h@vw(iiwmZV9^_4cR{WOK*-w7n~nyZiDlVozJ1d><{0gKe^zL@fle8 z5&aWJf0^^e(?DJ=neS!%`7y`C^#J^r@h29wyr#GU!3 zyw8658`u*{d12(v>)21QUk-}+{+)YjuL%1Ioeq8veu4eOi0uph&)9#F_(hUmRP^=6 zknI`!7U|eps#%t}nAset|vmJHp_)zIndQ+7mC>3lE%r2Yl06lCuv!=z}u&VZ2z6uqW;& zln*=)pg!Qu1A%eh2z=S!^fUQD<>72IV}F#+x;|g0^mmZUH=DHFeTS59ia?hB(LVZX zXn$6}*v{K4w-rB}cb-9>#0}+7o_DxDaK4#hC_q{6UydJkM%Yl7*e6?iK=j6+vri7< z*~)opmz2>(0{PsjH+G}qnYT}_k9VU|2xGe`I~ewU4Luun61kr#7kb|Q5BC{mn7fnV zB9?>ylXzeAv2y*0eQeSddrrb)kD0SiZuIsKXz%Cj0XLFK#vZVdwNJj#9 z2RZxXP8ay%XX)SFllgJF%dD3jwXb#am80jQ96cZHsKP+{e#Q0j^EKiF`2_mMv-L>q{p|eAut%}j!A-sI zB+U8xUT~j<{T===)Ss`{mdzLBYdj0=n*|Q;D+J#4>ka4WxLVH1yY+H*{+i|N{Iy}Q z7|vhC=o^WVcXj1I&Zo>@OZl6y$h&ey^lvwRl^~z{Rmj773nCpqJAdotto?JVg7dNW z$7nR`S^MX8oPF@K^nBFv^TqK$YO1|9%a_a_{Ev8DtouFazqtREw|L=W^l6;$D}$_k z^G2BsnEP7nzh%Ck+WLU&tw7%e+^0RWmdkzedK2=<)t?~WH`_Dz&bVJK%l)Xie}(>8 z-{b>_(T@Q)`mAsCQ|Pbjn_MqDoaJZG2Tt_;)ZkD27_b2#%0VCCx54&2yxx%YX3Bxj zHtSc|p6I(9+pI6+Mf>P~oArJFkbcU!;gmwiT^A&tqM!N`%+Z2 z<&y7yKXT`_XLm8bvHB~DJa`OvToQggyAA%^$9Zk1B>OjEobRkXxB5%U`9V8KbAuLAg}x4zc{46FXw~m7X25!{@>fe zfi&=6Y!wK5|Hfz^28q|>3jEFP;XUo)AL_rDihPju1pOB0gJDYgbG`At7U$sv>ru`F zM*d~b8zb+JsNWm;jqfr3be87)7bo=J(DuiY*gscE@ z$onrg4+Vdm7l5DKzX3J;$(#>%ws0SEy(#f|IX%C1ZsUBdKjqJ5^qhwDgmRl$Sm+7)0eW_p(G%+_ zo(1%Kz7IG!Z%O%~r?2RPdHRB$<&eIhXNJDpIr?@zJ%zs3A3QPoVAI^s02cag`FQ_I z-wn^t;P1HE%hI>ACG?}dF7(~Voxgg`Lw`Pgb^79bl%+4^G3dK4^u^tp^LN(ww|J)e zr_(oQd!C-3f!@_TJwd+=Jwd+=J+~IjQdMNiUCuc9Q(Z_iJ!_)Tg9;ExBA77(@ zVzG|^{&;V0u)eXUp#I(&cK)*+Mzrq)Nv2PcByAt{nf*ygtpwOo%_9RMg z((9Y`7kq*L-@lmtoAH-Z|38}E1pa?||Lpz$RG;c?o`n zeEkOe|A?RV5B%_+jFp7(SbO~o{r}2Tufy+)`aRx5C}02i{r_;hCBFZ=`2Up^A7gJ5 zhxWKQU&ZHH`6c##vLP!!&|c+OE^I7+to;A5e5}3AqQA$5{(to6@ZW>KsUE;SN8js^ zPYL{+Dd$e|D*k`Eepdc}=6+a&8%U`9i}RWJ1IP;bvfz)n^8b4|eeM*6f2-V2xPSbR zn+bnPU+^=^&jNo<)(`BXdjHz||G1yLivJ($2jh*)6u|ghH~z?r@^UI7)89gWKxsel2ZVl?_Xk|`{~J{0ydGmdQ2#aj|L6|k z{0sjL`u~+P_IS7qIOg=A$6q$5*89DamW-`_23RmH$Bhzgo`!uU2>Rn)Iud zbMy-K--Y)O-nZ4t?tL2k_lNucRdfD-brWw6Q-J&6i~axVb?OhH{~sAh-eV8w|JTg< z|FvWHUWfZ7`2RKQ@ZamvfA3TJs};S!-#PvL`fZkrKj2@t>HpV6L>lA&M|jvK{9^w< z%b(cPK<|wIpN!AO*8|S?_?{d3|Lp^Ac>d;l?W`R7`{BNM(ch2s@cw>*KOgod+pub;L-mAU8U+qGFKaNLU)dhczhv)C3FZB1LrMtVo#{F}R7v7=2 zAK}p7kFfXmD@#BxxxWX$ul)U1{(jgS0{_Bq+TSmb*Wk~Uzu$Pl-%s8bAwj;Hzuz`w z0R=>&@%3h-s(jD$Jon4cpHI&_&}ezyars|hp6Aa$^aqspNPbU$ve3Ve=Y^N~17bci z{(FEk{(kr#=PjVe{)|6h=)d^=|z0f}p-&_9^>#z4x|3HjC<9{RV2mXI3zu4ao z)Oa<2KTm&*f0_M&v%e5}uKfMTzTgS52U6N1{N0 zp1+m#2J;QG-h^H&e?PwOyZfn?zu&(xf4{Q$`-v63>=*ym{r$?C4}|xvOa1-Se!~0v z1@^_T_sZB8!(Qn!;@NzO)gku9-=+OAgo(2sR)0lVK77Tx_88|wIqB&673WXb1Ly1` zb}HT;_-@ud8RuQvi@WpvFWPGv|G;;a*#i$~j`j&pSoB9sPiP1h;e8nJGwcVM{3i4# z+{)Plr-A)4{Q-Le+W$(v60rv!RtT%VAkNDV*9TjGC4ULz=kdd8g|N35j-u4s10(;$ zvi{KNow-om~Em-Ca4)i2V!cKWu0HHS_sQ z`V09?q*woV+FvlR@0Rs)p}*iRADHyLi}RKD7sSnF#^3BBf599*;V&rdzwUg&Ur_FA z?flOI|257R;>F`ZQSIR|zl;73g9_)X@|EW+@DG&rmCug?d$6KgKd`sY=SS%*kdCPreHISC;_ve!kYLS^vOhEjwTB z1^+-dAF{rhYCmo1`v^WlAg9=4E?Hk0|3G{Xd*l%e-cD6NX!qa`IL!J3wyN~Uv;J`z ze?R>`?;ptcOSV01aep&oG)qYvxdDJxj$k}c1(_PF50w(ZB`1xb_^A!;;(!$I)*lh59!}{+*AErKe zy-t5PyhkOoRzu{8d{2ED?^8Cv-%kj4nl0$d(C^?sM>uLCfUEk`A>Kwlo(B9^>8~1z zeUk9+dWHNf@$r}1-#3an0LPEH_?BfKFGKjR(Qx|1^LC8*DJ9w8MPDF)w#yVnD&M%C zg+9(d7VmF-uQ|2;5{N&;c%i2*zY)$i+4W-c0a^TPJ|Mu@FaE>3(i#52{mp;V_nR*m z!0yYxD@14Q=vDLmMJZ?4+8)lXs1zsvD1%{K&oh-X1O zF5;UzkMMgCD2!ja%kjTBACau?BcZ3{BZ`?{?9}aVCvsbF%CBO-Yc;`HT26X=fOrsD ze+2E{K{GRF`-j~#$8TpMU&ckx5 zgD8hTGTKWP^Bd{-U4Egq;%n^ffj`>g_ptZD_vzaM#h2w6e|l$2@iX;f)cFz!*!7j( z94dakJ)M8&U$Ck8+3!Ek#S85-yc+tq<)3c(_Br3hZN<-)hws7-=78fLs<$Wj+d%5=FspT`PY}DecBhIedRCZ-%L5BP2gX^ z@co%`T`wJt&xQ8TKI`jxnA|77`usB}*Y!dF0;MSE<>Bk#^ETyckdNxe3;3Ws#E0uU#Aj^sb(r;qcvxyI$}pA-iF6UO@d z73A%Wwgik7eF6JPa$~R~@;AOM`s?IYn+lTJm!mwr*=}d-vB6KrwtpwzA);KoS83kc z-!Amm*8Zrku>V`q{`UnR;1@r=Q2t~6c={vAInces_s_rq7;4>R}{huL_{*q@K>c-($S`r7?j*xl1NZh8JE zzJMW6@H6;Vhph~Kkzf90@-DwVWIb4XNZ*^AS^M-wd4lyF;w#%@fbWLcFAaY2hKa9a z@!e#8BQH-Lz50Hdq3@m1Me(8IEWWZ_Lwx=E75Z+7L%65!D2MN>@3$Fz-`*Z)%;*YqGnap-%!xnt; z-aVP^br|na?ceVLPP-$(8gHBa3B#SS^-p*v`HS`l!~SADx{v$d_t*E*jK4x}hcN6f zOunKdmi+-e==ct{_tC!e4|-P-Z#p69#&-}MSoj|8O}5SVxA8sgRrdF#octyJ3Nh(* zs^eikq0sqOnRF4k!m`Nn^1UAtoVU`v}e%wAM9T#@}G!&Gv(=Q zcSwcFw#WF*6hQWS+&8U>e&pKg%X_x>2k0=t0)~tJxIfOiUBV{5IpXcaf}i-CNWP#A z)d!S*C~vh`-}@_cnZWhb;}dhgR#@@h$kL;giJyu0tS;xn<2Q;4Z@K=qHwc?}KInhk zA{^#J_)ISFb$_P5%Ikz6KhbWH@Z~RLe`7gsXv6$OPlE41cpu?uZN4G!>oR%Z>?NPK zk&g)Bt!(|J`H2u-?4|jMy4}A7KJvyY`H6Zx!g58jJU>zYuUz?K1Th|S z-Yd<)&HNdA72LP#`NoyYYIEh_M`=LD2mIn$_Wm34P8yK)g7W-*?&AHe$;V>)i|5#1 z@pJcqCp@oseB)WLpFy6LAM9(q+#ZnE99GBZ4^_)`!bS1bYJ8Kj%M;kB0GJ zzwq(tB_Gj5?VZ@a<|ArP(4Npo@)2P>Kt3YG$Ao-dmX9d)_(T4}o-LN|Ne}Li^9dS` zj%0sV{{+CGhxboFb;5ccN@oYH6d%a^ihn|)^?6^3@g(z<;yi2*FuynPE$>gXJ$t|T zdE)g+e2?{m=2_qRCro@f_&uFXi14Y5|9QKU-aOFuP5+ZwvcvHuD(`R~oN9Zvzo&t{ z6viKa8rb7V`zZ&Y?V}v~=NJuO4_njwop&KSgZtI^J=DH(=I)ny|2mEFfmy%h2YLEW zJpD1>%hsp7c<_6n=OKnOwfDC?pPKmqeSja}Lv-fu7ZGnXJ3n^G5`g^6AlmT~Fr${V<*-`G`>e4EKX*kLNr2KDbYLD@hV`=zL&QsC%)iU;w$w7d~f9N zeH!rZ5`3}V5>A-mSK%iTI5qr9eoX*R0{;3q#vcqm*k2E1e?35c7U0ADCr^U%MgF>c zA%D63_P4@6m(iE5H=*Ap&p(onXmS}9@VjmAPcCVF&2(wk|T zIX`?aijMicpHFs&^RJsvh1nmzM}JAe>cq#uX$Ygg%=zJI!0*5>pWkH}e|{|ODSyT} ze#ClB!4IdFKhitNVP-r(To@0hBOH&yZakNA<4F=nZ;tO)!t<6tzg^6~lZ5zly+D3s zP_p}jQl=bcY1Ad2L3inH!k7$yLE@$`! z_AjaEa)h4|pUs3{8~q>lTpJ17^Zvp;uD`$@tDEq9Zx7Z_h@jDzk#A@qu#Zo=k04OwTe-+>(^!iaM^Hl-9(jH;pr}-_h ze@1JNCwFk7k2T*~@PaiKVPY|rT5hq?HJpR2!+JJ0k>il4zJ{xrzX z&H0vaVZQzNsm6PC>;I#`KOFeRAC|oSjq#=T3yP0zKa7`x^%0*3`QA~V?_Zt3J^}k9 z$jKiB9?Jg=i^i%$G@A-VsFzt!` z0{L@#v`cD?d7X zd47HI_>(!`dwe6@tS=(+Sm2w;v;T>zcb~!-n{(++K=4%VsfAB*T27` zJzZZeJ|fqX&sU0aT|Y0jXFz*rpfC8F`nI$m(Dz==#0$Hw_95p1zb~QhGW`pZA3x=f zNuTm2uva1VY`M-i%83uwH}R4B0Y1H|i6><6`7l^tuY=Da<=gaVZ29kv1$zZc?63k46@s?=9ei@(>@^5An&x=ld{_C$EFg9m?=D4fgsnY7(dVb-`Q%1H?w6w&`U2p|Eg8;{Sa}0ZZ`KQ$^AY&-SjLO_aq=2V z+MV~LdH-H?Bi~<9`=frs{%_Tb+4Iqb@*MNc@fdyNpy=OIA%8&+tPhqOeWO?M_7{5J z27YC^(O2TCJgA%V5WmO#vRwCn#5dIZPoxjp&(BBFEAI%un{w_4KgZUFHdLR9D`T;+ zW;yhM>iu{DJQ$ zKiJEZExjr}cyZ?RIz;EUPH;R|^{e69Qx_V?>GF8lRBd;w?Z3-_rEzK>pg|I6Te zv%H(ZcTv88;vv4m{vp18Jt)4TEWYI&zOTMNX7IgJ+Rxzo!v*?6zJ&P7@(l3Z@#|Ia z-Dyxkbo^T_(f+~nLm2OT$^Mm>N0dLO(w_3~lK6;_pCNuCjspA&UOowah3yQ#A{<~A zzkK}0HR;{1XPjTM_k}-Qpclnez>gxn0(`c;JQIAjnLi6Zvwo@t+P4Js3ie;2msG|- zpa<3~&i@iWFp7XD*0b0f5TK9-jQoQ&Jzr?~qP@X}_-D!YlxMST&9`RSt3D`t|1+^K z*i!%44%(Odrm7rBVn*#{qiyF z2X?lQ&-q%fV&fwY#2-=a)6TD=yjoy6@~f^#e@%OXGV{G%>l4O!kKZpc-`lk@Vfdrr z{-jO;?8mp?PKU zTtr4>mtP%lxlGvPQwKcm6PEXSv=9GJ^$(=JDDz*Y|0ue*@VG|-YhmQqkqZx#UlQ%r z%A}{qrz-X@;=juM96gf+`Z4! z9r*MU;kRGm{^e2rJSrc%@0ZeBsu=nF#zKF5B0^@)60st=G}-0$Xcyf~8mPR}nCe|r8wIr!6@_so6eY}e<{6?=i; zeiGwDehwZ7!uB%f&GbIiRkUyK8$S=u@Aw}5nfqdEFA&(LU_GR>9U^=x>qG1XcpfzE z;d}TCk4PUQUvWR1z0ZxG2Js&-zWCGNJSgqgIRI@R<&dwkU+)@zmHSVb5Z&+d{+SE* z0t@ut^Yq7jFI&G^dx0Hn4^w+zN_zpo7%$J4Zhj(npO?vR%=x*qr}as1nV%Bqn`zI` z8}qB}add&bz^<0_d(z|WI?AU+il z68&l!y+Zvt{xrx>DC;ShPx8l2rqd1X3%>^l0|rg)eFMsguYF%;{Q%z^x%;Q50sk(+ z7yMe{1XzB}@KeCAdjWst>;=>w??v_k3;cC>A%D63__wm&4jn%Uy$<~P7JGqRF#s^~ zf$Q@?>w`WRZ}R5eAs2iU-&X$k!9Ip-`~|R{k{cx&^x%hbu@9*0_sWm357)iTkCG&r&V1zKX0@7s-@glbRH+|n`7a)G|3Sj#=+x)?tknny`aKdf`*<0MUsHGX z>6j1LJJ+CD<0;9a$(fSI5Ua-x8eFo(}zQ=s5 zv3`rN%tt9^{kADjXOWJd{R=){Z@i8d#NW^*2RvT-GcSFtqdM11YcDOl{6O<(`uH;B;L$!k0gwSS=fh2C#T{28o=O#DdbYnLCai9D33Whl4tJ0u_d|HF8b;I6eZ z9JdlYyyoIbenI<^fd4-JzW}eREj9U3^3k^nv5XA!`%L(69Y+*wd4MJVd~sdmv%ri; zQ(KqgpMy*5LOX@QFLDw^1NVX)t~b@|D~8HI0|YuPMA@{F)$t ztnlCZ1@UVN1(L?#kN7p~KEE)&hrLqS*((9&df@sl1nYs%-|G%8#jM{n>*MQQ58pKZ zXX2kM%|E+}KZEUhD)RPM`;0HTia&$(5YfI0_c^QhGpqPBP+$Ce=_>w=KmR;~{MI@n^8V9Etu7{SfDK90wxY-{}1rV5&2EpM&yM{F$}r9kLVo z3G|q(;?JxJIox|qjy|jSGcTvlD*jB?{$v$@#@i3+dkW+)^&4yNwTeFjd2=N3a-iR= z;?Ict5bmd~eV2rr@%AZ*HzWQmyidXV%PRg1;xWmFrTh0P{>&==Ookusuj0?heJ*yL zRs5N&c>h?%pYi^Rd=Fa1pKr6TP3td-N*)j2?$ge4ADLnJ7Az6R_UD zEykx<#h-EZD$vhY@n@pcop(VGwHGq?kE{4ItN1f-uHw&R{7F{vXM8*^v8PzYpZQT> zf7U@f0B*n5p6=gT{F(AYTECsC{ZiiE=_A@dX*?RScltf;o!~uW?VYaC{;nskA@a)g z@pl0S{zK=jwPe=siTc9(GqG>lQuiamzkbYoS^X^+uZH$X>-{Yk&t_TtnxVYUetq$4 z#<}=4qlUx-Is<<2UqQSYnlq|?j`e|fG4N+FF!t9>yc*aaRxCYeUjh0deoYXchUejp zappXD!gPMvpJ|`>%eyFVj8^e$NDt6o-&4JPJm6#%zXs-R{yigI#jnA_;SU#X;1BW7 z7g+D7SMh6B@oR(~0{(a>;Ez@O8eX*U=hsh$KCAdO3-mb_dZ~O_65k5$zh{Ac)GB_B z?pLe$HAbI-{>c3s{!r9sR`F|A@oOkg=(Gd>yRQ=e_DLXrR`F|A@oO^taIyd5Dt?V! zKd@(B#jjb#uh|!VzouNmj!*Z6of8ovhR7ss#R zB_8M7#;*|{=jT3tjg;g5&&IFO^7blz%{*efnjl`zDt-;^L(79T$!D~RU&C-YJl`PR zh`v8Fe1WsSM*IaI-(nTN2Jws#&Ed+K>i;+ouHx4)xCVb}?*jV;@YC5UevQU&=&a(` ztm43}d>_Qe0bGSWi2U$e6y4uLJQj&O1zO_#{QN57)0lWSgvH+D5V274Q+tYH z1^H~D5aPKhmG~3M>{4{emA6X?;#takN`1z=DJg9T<9@V)d`%k93Eu;s^kkpmu#A1i zKJw{|2&+AX*lQfnoWqn~WxgH4S`Pj6_~Bsxpcq|G+K4*#Chbo;Bf`kfi}odc&fz84 zqac1R;^VB*UghwTwO0|iSVTar>*cnMpVQw*g39YX`JVOHM}+Ir{#s|*`8E=V5q(eE z27jLZBQWF-_#r=|BurBK5W;(sf6Lm7Bc4eLOi!b!`me=^ek-wAd7q5+!1kt@_O?tu z=QSyhd#T3DGWXBO2i(&5@+e2VHMGxs$Y>w_{%DW!(WfX9{p|OKen^hcu8@BF(fWH} zr{Y=kPUpW%{nGpjD}zZ*f3t7-_zKM5I+RR(s{dlVo$b8|zyI=|zN{dAQL(d);g&)e z`4`I$j>;P;12U@p65U4Dab4ER3gcbpu4i*OO}n`@o;!+@UPzxcyo zJ_JtWs$c$*`+n&R|4`rEUwZ#>4WyHD(jSY-#izvmrSL20-xB(-1@zbaR;Br6?SS-0 zexA76rlj=ru3tjXD@V^_r+$c_uOU2X2lEk?ksd?VE8|^}zJ-A#%#`?79}|X8DVi7f zobhFX{GjLih&R7w_>1(v8Yl6fO9Ls5_jDKKVZ5`)(Q%`-b=lIF^lx+rLT^j|MyHJc zhIW2Bx1D?^Jq`bVCLNgd)b6|ZY`|yPdJONIupW9`8o_$oaO;8jBNyfa>mT!hc-@pI zOXfql%ddya+4Zn6KcIg(oR5Rt{A}gsXX5G4^-!?+!V@!JZIut4Kdy&((3APH>mlCQ zJc#(i{BS+Q?Y@qmFxEq(H@K9UkIjuk!frnLZJ94!FAMX5_;>3XzYAP=z8^26|2rtM z{LXl@5a_2uf68;j56sO6?e|V?{ASKaV!xO50r`vhs4rU|T#vIuPk(`f`GCEATs863 z*eT^xeYX*n!}&nIw#<4NG_vbuLlS1mdXV*U;OQ^xrD5U~CZGq`OOP*F){BkT4*JV_ zX*d1+aJ@7%9-Pb<*GpaJgYrn$OH=2=&Chyc`z7loJz+c-yMOM}9x;)4geIO^I_q%f zKK1j_?aKZ`sUq{Uz8=^&BK~`;iS?%SpK<=`br?Ll?OY*3=TC6J0st;cl`4$L$l) z^QBfW|1y4lFZ8Xj`jOBV`8N>nm+^Q{?fOnnYOHR`ucEvhlLS%@e3;+#SS;$59b*^VEr+S9*>H3)zIp5QxnlGn%g>udR zH#sDKPmjvJT-Te)_xk5=1mneh1o}sPju-T2zESay2*!I*6#?PK`%%E(Uva$e1mm6D zq&)bwILB7!+FW`gn5Fe&b3-M{2d}@|Hf&6$KeC|_z`TQA~c#KLPl!y3G zzK8g9Ouhn(Pawa(B0k4LUtKS9pJrk0AM1tvLq15pT*{xI|6bQbO*j327|6%h>7T1F z+duxo{%w3u%D15ZxF;Ky#eroU%;53vBG~?cdv|;l9UxKe?j$O`A4To!J--saUK0U;_%rwp*R%M3`0D#%2H#?{lff6O<5>Cwf9m=rdjnr9?}XhwedDI* zf8q-`gKxE&#rM&x@0S^TZ?@YRd>7>n)^CWfERO)+AYZWHTh{!)mcG??E??@a@244j z?{o$ke1Et=U&xaXUs=8(zJ5I`z9!$M;fHD`hwrQJx2HhB%A+OuQXx-6{6stj_?5i8 z68uV%ug39fwaffRUVew?l_mROUVbj*bN%T8{lKvyej+Xd{K{T_D*ZHHrJL_wj(#uS zuhUt#C-OIfe8Kvb{Heix6z(T9J&q|~%%8g6RDBlTL*7*%lqDdx+(#YYK515$`&`*i z|AFPq6^CC3|HJ)w^+C<&N5Op<^L0`_SJ>Y_Fh6O78=%%ld$>Q9t1`K@!h6MJ)~s=T z#;pHKyjQ%BgvQ8!iuc@g>_5fh_j^skeZu#6U)*W1Yso+K`+d}pn;n!#EI-Hl-ur_J z;UQtkpPJ8K(&YgMKlaD*1o>TLJPkj7j0f-8CjZH8(B~@iwbZ$wg`dvv>U-^H@&{=z zomHzC|EaXke0#HU8#i1IX8t8kh|j;&l=Y+c-@H$%*Px)-`hZ(4DoD~E_A|Vv;D!x9 zo?noE2<_vA&%r1kMsj1O<$$Ye5c#$~=vQYxEhsheJ?^txB??r3A77FEROSQsJGc)Q z{Dj^D*Gkg9`@SmU(fK5Pm7b`FuK$AG9}*V)Yb`Dicb_zio5G-`{$v(aNMA!AydPBM z{@&+vTCcIZrTwG4853?2=Kl6>6n!QWr27T$*T}CyzShkm;mcphe6gGxc9^gAN$~v# z?-%OMejWU~Odf>qj~~2`bR~uVh$RK8{)G8iao=g}+a-Qp1LZz{Yc^kNoB3D^e+hhq zN0HC+U|ww!j`2V8-@!au`zu@ioc8Z)4Z>Rf82MQxA1nUCe64NRzgMdquYoZ?!b#I8 zw)0V7{w#$d|DeC{Hx2T&QvC-EeTMJH>t!yUc^_ZJ{Xwv$K+naU+_1(Uu(I` z*DBxZeFpXCM;rdW=qt+KAKCWV-x~}40bk(b`s))8`s=^6zx0IqKE5YEqWyfnRoq{5 zIlA)FT&0P zUz@=vo(1~_atjmzR2!tceN-Q>Haeva`Z z^C64F_5kyHJ%6S5nT{Q>z3==y$p4G)(Lcl^>*G8|`kV*v!{B%1t0lsxLLbS;I@b10 z|H$Xt=lD!MY}^m?JQMWybnN*TTM z^%=j1+Lj+!{`XkyB22Wr{9u%$|Jc(X^_R&*cONVC+(8FZdtb}>(eb`8AFJdu26@c< z+{w~M=R4D$nQzQ5#*6kzKE02GJwSSYsO9GTHF=xn1=FK#yS~o@`f>a}Mk&eT7_r^9E*yH;&;Lj!al3yJ^fnUi_9Pd!~U+|amYsg=@e5~@G{mzT>u`ck}-i7?- z_S@geeB(L%RM(r(Yqu=(!TFVZtYaqh#qYMrbMObvQ*=FJyvdt8dl(-!P?U4M{6Nw( z9F2J&fc2Ez7&2ipekkXA*G;;=<9l7N$j3U`<@%EE(Ow+V6ZlMSk^iEjp}k+YGg5rb zdQWeT{e1)KFS)N`_c-s``>LVBhM$ugW4Z7U{D41U84vL#d}}<;oOeR`v1GoNt>=$9 zKCTDg7k?VaJE8Y@41PK_@{ICo=YsPkrz0GX!frgfx$z8Tz;1lEhNRc`1*ZP+G+4h& z#shmK5da21;a}2cJK$d$&{%ofnGXh-c^5w}nZ-j9hsr) z0LK0~(EU>Lu|j_x@q3$JHJPb;F7==B`*_Iy1Qz|eK4N=D--Z6YF6_Ln_yTT9d!p}B z9?TlTU}E+3yp8;G|D5&ZJn8h&*`}UHUHL#Rh^1WkLq8XO{vGY7)&5%St%1*F*gJoT zDq@fQ5#foz-u_zcKdk*V>~|#Jt=LOHy9#`Fiy|-1Fy7~PVZSjcPS~Zuk6}-3?O#)| zCrD<8-oE1>+G~{uiLpOJIr@jaH2xz}{T22gu$Lwb{bWt_mGoqoc=#^LcL-~JtpDSO z+rxcp-+YJm($yWpuzxo8(w&_VV6mSDKO8@-4+*ck^5WzW_S9QqPu)d%G&!{P)Ucn0 zyg+ySfg$kypS=sp%7RgO0sq0>#L5Sh)4nNtL#g!V;%_q@F})_b{-L@3d6@MjXId=SR(HWm%+v*Uve3Upn+SP$`}oj4fxJB?}@ z*qg#WxOQ+UV~<*%h<&QH&wf<(^a4HOM>TzrvFrO$S>+Akh4sDMUierDr2Lt&kB2-s zez-UE^#3aMz$zaE38BA8=79d&%`E-XW|sa3&8QsGe?t^lPyZxJNk7hq&_A#b7W(%* zy@mc83Gl}c^TGS7I5{9`6ej(*dqQt(AKw_~?17uf#M%qfKK{|R&cCDg7`AcvIk@@C z^gj*gA0PB||6qGu4>->|{Z8hiwKKwc*jZ+uO@3Ig2TuL@NXi3y_FtoZRd&8$e!89P z{Pa56`AIraIh>!ao`>A}u<=wAGe7$g{wr7?8(rBS-TK%w_UU$h_V=lO`1$GRe7O1P z?xmUa(c2gMdAB|~o_?}Ex_&+c?y3I{>H8Je$CGe=V1Fhz9Cp6pKP3CVgE#zs%lZ3W zUY-r9Zy0;-W$P*7;3I$kkzY@Fdv4e#WaQaK&YpWOwD*R*vieA37*OWd7WI!KaPmwe)+m!wZW5 z-thEZGGFP597m4qek1-unf+{g8~WC%l*|2deGK{R;K^)zm*+pDk5a$f<#=k`HNY>p zFK%NexAg(nce)mS#`0zp`-3gV{c~+2xbLPux=Ve}+go>~{jRo;{>r=5=V)}x^VDQk z+vWZt_q}Xi>=Oh1X4cpie&cyg@at-RaR=ARvi}$u{mcEa(eKfIw*vgZqxc@}qrVF4 zoBjcp#UH`w&$v(S(!b@@;0t(ABkbu@D60l)@#%MY11j_u`V0oR?^hW3;(TE2!*2`z zh_77I`WD{WlKXt&FXFdo@6GyRuO#@FaA6ijTWB)rZ-6zUl^xxbktnvu_o}L^~yqI#HR}#WnF6WW*g#MaU@vr20 z1pApif83+~RTTft^o9PL^?C#Tn>e4qzmxTclLX%nOIIGi7H8^vO#&KskESKeLwAC$Zgm{RR4o z@u#-*Ega_jGi%--4f15sKXb>NM@j#yIDfS?oD+a4DEKR;gwx%tBUWX{Jsx%t|Pz#q#0ay~xv^CRc8n(>zh)?6R)mhlJY{P29- z9{Ks<`K&fndgy!|HwHQX*-=~knVmj4tbN<-n@jvPYoWjFSMb*yk0HQMy}q#1$<0sq zmoe#`U-_H-%Pp7+Q&&RTUHr1cm%}>7;oClW8PtWVm zu=hCO`&O`j&U%o*Za!c?QYeJ?>#!d|Ks{q$^SllHbh}1a^x0=zUzJv%{{!y#ao#vJ z^Gkb?O`JC@jPg#0`mLRR+*eoFp1rSzy$w=a~C)=-1;hRh0qPGv*im zM^s0>J}|3n%l_c>)sZCR@b(|$D*F>y+TW%Flf9ou`@I6sN8VnhTFBy8Xi~wD_dGdI z^qRb1>uG=ZzA5%4Rl-6aV~=9-o%Ph7)WPfC-x=d^`jhYj;a*AZ?;M<_YClE(S+2il z@hQbA@Jk&%{$=&|Z(9Fe`Csy(YdV$n_O1hInGxy6Y|I29Q ze<|`A@{00p<$nqBed_Ib&Z9s-z<##!zl6Nv54$|%$;$sysR5OD<$o#XZNnaBe_3Wf z@{W+~RNh}#{+C&QX8IFk=(F;_%*Y$k=Q!Y>mH#DnD>+bMJ>h-?_H1e&Z0u8)t-mh< z`k_9LpRj*}Jp%TBz&PF!Uin|j{ucV@VZQ@?a2-zito$!UUtIZL%4z~n$ogBh-kt>O zZRLNt^1saR!^;11<$nqJP-OlI>@VqT<$o#nryURl?%yb0R-dpPh15&fipf&XP-e|?Yk();rMYV+S@?5$zH zW#ey)z4Za@vjcnUKZ3niXkRVoNg{_I&zIL8<9r#|S08*gYcKtb_C5Q>ebztY`}keJ zskgWOh5BcTy^^tqPG*T|-$$xV^2EwO; zFYYTA{WmAf_kbUsZN%RBa4@m3+DmU8^7|M1Z*JxMHxJ9Xd^Q#R{zd+qmva7_lbrwN zx7V}&npav5Kkz5)D{)hW-I(kta zz<*Qb*UFEt-+yz{(|?)28Mui*KEq=E;#uC`EbqU$^NRkPTi*Wkn)=J8?9bVE=lwSa zTfi4TOaHA4{5N+rpNsPk+}+CNbIJQ}?&SP8cfV%;&4Kqnrs z{E2goqWT5Y^?73kNUZ=n0snH)0&tzvzd-cGdbDZ<%+=A+3>tnyG@8N!3 zVdy(c{5eNid+^4X`$6E(DeVXToa3B7C;d&;9-Qqj`g88Qjz6blEfo9ii~Tui&=C4_ z688R_{TksH`*ZfXYEg?tB=p(nJAW?l_r><$8$+!Rd5-$a{5hRHING1gN-^ti{0rHG z|3UWOz|;Oz23qK=|NrF&JEGr<{x1HPTLO!}B=rCCLtNig&vd{1g8q__t~CQ=eIPy` z-BG?T{EzsE$$O=(XcOhKeyI0MSh3AC>#4CL-hV$?lYD@j%*yG;y;1@D)8^*dJ+Fi=nKp@gB}$IJuu!Ppx}HU;T-jR0RBOIKsIN} zdA=AE4$co98VFtA)%_R#mUTvOye?U-?zQ1B*`ojF^?jBLyiR|N>xs(yPCh^6qq$HL zUEqiLh;csplK)MH04({Cif0>$DOkH*Hd0UDV*`m5|X{*wCN$<2@gpA$9%xQb3G_We>bxI%`Vr#pr?lai-<4Ec|IW52?eCVZ5vKkD`GfqXl^YgDdD6SG@7g=Riut-Jy*vIm`J+Sp!N?27 z8~!%)gPmIWbv)j`@+Y>vPZ7WPx3P?i}wR>)WJ&HKGy4G z_Q#iTM6mh<@O>k3_`*N)c$~a()Ald-9q}Jsg*>|C(gvH;G^mthOqw9db<;tty zC2aCX0{?I0IPdX`XSWvCL-aXn@CW?T*>~3A$$u*J5PVzT;e<#$H7RfXG0IWACh|e# z)j#qN_(^%A;q)ua58|1{(XT+SUhg}AE&rf=jpgg+d+3*A+^;CT&|W&**pT)eJ=V8VTW;u)jn9?vJbY@} z?A>e#N8Z(%f-S!~_%wbJ=)w3&zmNa&D$qajPzqp#=j5x?_eXwy^F=*n zq?LM@FZw6)G}LFl^%mdL9JPFZMSHW`JEB5BkBr zJW_dp84u|_{C(x;;vdN&_;K@f?H#IqHva0W>en6rXi}eq>fp=4cAr=E?R_%qZ*5KU zHT=*%?-j4BzQ-4E?mjQ`)%%MYI#aax-P!pz^mp}{{SkbdJrL{(VAJogA58z(dOS(K z-F{#`=_T#u?2k6*6E+^{Q-`uAa{8j?e@Xg-{t_PtgVKER`km(UWV#*oIm+jYZ98ws z?1|}A?T7rOygxtDh6?y+JY!daA zW)FsPRyA1pYub9BS?4vpM_cPRE>38UpSLFp@54jSjf-1_@?>#TkmrqyyQT8iOXc?p zapqmulbW%AwR=jvX3uipBb9Nd*Wll*$5 z{+p@k`x>8>w?`&#{dm)%`IErm1KYpp@UR-kXZY9}N7%k^&5HU#XEW*tAJyoudU!kZ zZ}=K-nY<0-dD@+Kk)Px@s=N11wr>>o`_7L1fpPzz@2l9;tLS$pqff?T_WjLe<>L?k z!~DhRCGQRFE%}7`82=-^+jNN!q3U6Z_C3t^*Y(%!MEpDUzi8BC&jlEHU<1nD7uh78 zMv#A}FOYu|%kL1rCm)7Aph|v1=i=X+JF0K*t(g3q z?5KX0kGeV#{xbRJ^2=HNH>*DwXudw*1^rhc2mSb$_R06*?OR~*!TUg^^U!K0FRgsj z+rP2C%dh9~%Y9!VO7!!<+D|WeZ^G!~%+dSB0CUHe)871YvnqP=^FyAJUnm z?Wz3lF>hzS9C}SI(Z+57dwVen|Cf&5R#%@h zFaOtM-hSrBr}&H&!H%CYzS;S@1gtNY|8-TcE2n+(Wv{IhkI{|!?vfS3_bgwX%Ij+= zyk~DkxVa+O=f}f;A$uhAUpAjIuI>_?rnVk0%5V0$6hdzw%`Zuxox}h9a&#zvrF(B` zy*_(Fy1IaW_DAz6?M8p3@ZUd-_}6@&j7CL#Z}y{pihk98d|Vl-dz75Y4f*$Oj*j); zi2vhF!5RK(8o$wqT?SJ!c-?TK|=Dd$@hoAPl{I&D08?WGx z1$X>;1@N!%4<8OswbV{Nkl*ZZkjv+PN_pSaH~#!V<>8&v;YQGJlt1fEb+8%z2k5_d zDt#X8a{iQ`R!jM_eyR6ntT!Vt@L~P{BMA5{u<{S50k-^c>^Jav&+wU#2l4M2@qR$W zAM-EZ)8O3uM#>*|4Zoz3@wZ||O?w+F~y z;kV0!WcmS1zdr0FvdxsE z;sMsO^~E2aCOyA@Jw6ln9$vs7`QGXoK0hHI;H};;Pp`d35ud$!XYCd7`IA7O&}%Zh zU(r!htCdqDgJ&vWGI z6MX8^xc_TwF1|3|?bo9)`c4zYd2D@>%ei4Cc>*ymHGlV4Jk;TPnieM99FFVFFRrzX$5 zy@7rI`kRzbqrC6FA$VW#-{^b%Rg8ytPH*3xoZJcXp?{Ny|H6p$V|~l~Q27EG$(4;C z{aO6+Uriu7{#ZB${9)52bJ^`GneyRV_}=p);-hdx6*E8pV}p5*WeT+QNft^ZmUUrT+y->uvCK_5z{ z%*>4seA3>2vAs%`57~Xc=lYNO5zo&PW zU$!XEbNOIRejBAYek=ZX(yUgRIsSOksAl|e9eK|4KJ{3R{!>!|m5e{mlW?Bx^&_f> za+BY+Er*W>UVbMdx1PWzzpEI``U(C3`Mp&u$eTN(gbkPR!#kT*^N-2@bWK0H&JVs@ zKZ0L@U-L)4`y%})k6+~XpB{ca{YL!S{S1ds=||Pv`&z>9q=es{5`M?u55Jup{hIy> zzX!kH{(i|T;+J>ij9#LAFVSzeM8Dk<{nkqKi}@`7o5_3W_ehTd{U)_(fqs*OfG7P# zuhOTgdfCpy;I}s|;PK{jJf?r~i);E4@)Ueqd{cp*U!ae2-dKpo zvG;q*_-z&WLl=BEB0oTX0^fQ50DdLkGW{vZ_=U%B#JBiEQuF+h`Ix?x@IbJB5x?HQ zCs_F1FX6YB6!iOE_oet%yqT@}` z)0dmPX;)Po@(1%C^*HaZBmeLY=h?13lNZ%}1!!gRZ6g1Egy$FaI?XNJ`QRz?EHV2& z#BcH*WsQhI=T9Uag7aDGU&ZrT-uI}9Uq^ou=gAtMk0+(QiSjE~vi!xzQr8?ln2+A` zrv0mId`UukIk;Nodj~Tg@+TjIFCWij`pL1@Gkfo3a$E(@`Fl~&!I#r7q#xM%Wp>`V zzOM0^zt`}!vZC*^@vPeUXa=vzf8ffsKmN}Bmm&J|&Lj5E)n`^8`Nw%@=D!2xeA4_y zwhMq?5HDHq@1>WtH(1h%=<`MKleD+8gl92o^9LT8KPjB|UO(?lj-o$__U4NlwqBY4 zXm0Co{v*nH-+p$3-h+OKew5AwtocK)UkfhI->vVT`~2}oJz)Os=hk2E4;sFrzw>6r z@D=@?do$`g`mpoPb-~$wINuk)aOHO1InDS7_~ic69-7J7SoVy?Z*9twiS`ijbbC`^ z^M^iC{r0-x>^@u4NU(qE-u@}uH>S@RgLM2y_R#WkH$&M&@GGBhA9^!5_4427kMe^| zzjpq*ofP7z{Dgb~&4WD`ZO25e6Z_<63aS5N351Ne3En|b;T_|4%He^254OT&N3-n)ix@Y~GMtMHqr-^qTFe!;ilNBDeQ_zm}a zfknUI8+vf`E&3hqc8OQ!7xBv%9=3Rh*04aoR5yAJ{@UG*p3!UYf9(!iKdik6zXiX| zYyQN5-&dty@YT%YbFYNY-4Z@~8NLjk;l10``4_>b;V;s!yD#GWqoUvS%zr5UFzj{s zk{|d+KZbtW8Gc2-P4_-W#BZlWzwOrarT)d%Q2Lqa7hfDC<#>rgJcaS|%`H2jiTK@SMtnj`^2EQ7!TIJ! zTd+TGt)SoP{-x7LXdRdc!rIsIk4jRJCpqQ8DCZSw)1>ze`*e| z?7r5ds`@#7*W!b+c!nt#EYQbY`^>Ll@4x5b8xjo=gqXjM_k(JxZ?NIJQ8#-^mHe#uZ7U;Mrt?{HaP zQ@Pcr|9L(>!r+(3KNR96&}Y6Y9-{m{;Qu!KL&aY>9s@34@fY43w|EP?|5h=7rNv`B zLY_RWm*OutJ?Si+d;yofMSXG8(`Cp5Opk^X)@#Hr`>@pWgFHD&agp z@fd67-w*dIT8}sU`RL5YW3Kx1Ct&bH{E66+w?7n*Vb>?$armIW^AX2op zwfXsY(BE7_iOr=PF4lkJBIQ5)V!Y_B0-P*z@1favS+^KZvPSzd=lJRSe`Y?bx%+vC z`?|kp@ey`^&dQ_zko$WUZ@&fxu53M!$Jk>-T5|9|eL{P=`(WUw9N&RIY$SX*e#-lm z$#`V)(DTmc~o4Q}m3`{9w=n-JjQ$3uC+9{ft>yCc&N-1r#JXr!eM@*qv7+4(&FDc%d-PSv0E z5$mr%nFW~scpu%xziPZz-W|nwS<2I7KgrHd1@CRHWc|_q=H?*4_I+m><7W--?OVL9 z>6hwnC5wNBzd!5^He>uL{Sm*Ut$oHdd4C=`l>K|<)|dUZu>8&$d_L5HzuRxLM?6%v z|L9-%%b$-G&J*pv#76$Sk@(K0>C67S5dENT_P0Mzr2l@O^>+RH_KWdCjm&>(@uGbm z_?9^fRf>Nt8XUK1`xs1ny@Edz|B}-$q@Vcqi|1{F0r+?QTl>AC*2nuNX@4-(dRP_& z;bYNr`fJpue0-*dnIFg9ow9oz|z|q4tKIdPxa_0L(%kMvZ`HOUFFP*%#`{!UM$X|ouMv-5FKi${Z zp}yhsJLIv+SI!G^^0pwK!GBr);(yKLwdmoJ)kFTq{h;{Roct}{U$^_mK^|*;HQs_e z-e|zDD=r>e@_5Vqt+v1D&+=&nd3K2WT~s!}z?IDx{@;2$Hi7Ko_jG@K8(W2+{E6^) z`QsR&tFQQs^ZuApFRxF5hk`$|_B6ipUB#<=Socc@f?a)+-_t$^&}Zg_G5_MqP_X5{ zdL(|;9TGt8;wPbB&g*sl_9Hu~$^4B!=K4AMlaEiV_kRp}8hMy_)MkV$ z%{N!!wfy8ikbmLLT>O&!HE&L&|G9GLGks%wECFTmTK>tr7O%&1sQLBi?v zc(1jYTaVfZ-vGa`9yOoONU+v}(>tRt!CDXWC+L~q1AiX6y~TVSZ0F76u}~S;qt_Xk zKIQz6=vU;^3i_PZqqjLOtjDuX#o7<+@odZanyh(+*?N@Y`%rHR>oLj2_YuF+C4EmbzaHymie&3CarlVqvF-2?*JFL%=r^v%XyW)A z>tpM&z3n1A1+zbT-Sy&n^hU+?Xm6XpP5Q6pzv>qE$Fp8>e>`hDdr<51$Lx>W<@F%m z?}hv+tVc1Qbh60d-}c8O+dsA*CMh$1)opOV9`OzSQD^h~synV1*CXWHNFBdIdoz0{ zuE*HnC$7h^5$zF+7Z^2}55Kq`rTtMK?r1;6{m~d1{)2zH*W51Vv#Psz+PEJY_=@;N zxcSokK|UJU=T2UOFZ`LEoPIo@pG%c3IC1d#FN#rr*iO0 z1ARCLH=EdB5k`O6QhVNiCI5o8Z~g_^M_=BkQ||qr=*K}SX2L@ocSqUGU)5HUvH^?=FeSgslJWR@X^TY>#CpW_cs0-5xG7e z1NC*ky{I4j-|F+P>aY8L`oFWRnZAp$(SKg?ck^-A_V0nbO6Gra_qYB*F#1gJ&;4VB z8&==^b5`CoINXzwhTwWjQ@wW J=U#RF{|9f2ZY2N! literal 0 HcmV?d00001 diff --git a/src/nvidia-modeset/src/shaders/g_volta_shader_info.h b/src/nvidia-modeset/src/shaders/g_volta_shader_info.h new file mode 100644 index 0000000..6c048da --- /dev/null +++ b/src/nvidia-modeset/src/shaders/g_volta_shader_info.h @@ -0,0 +1,328 @@ +// Generated using 'Offline GLSL Shader Compiler Version 13.0.0.0.560.00.dev/gpu_drv/bugfix_main-17009' +// WARNING: This file is auto-generated! Do not hand-edit! +// Instead, edit the GLSL shaders and run 'unix-build nvmake @generate'. + +#include "nvidia-3d-shaders.h" +#include "g_shader_names.h" + +ct_assert(NUM_PROGRAMS == 34); +static const Nv3dProgramInfo VoltaProgramInfo[NUM_PROGRAMS] = { + // nvidia_headsurface_vertex + { .offset = 0x00000030, + .registerCount = 15, + .type = NV3D_SHADER_TYPE_VERTEX, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_VERTEX_B, + .bindGroup = NV3D_HW_BIND_GROUP_VERTEX, + }, + + // nvidia_headsurface_fragment + { .offset = 0x000002b0, + .registerCount = 13, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_customSampling + { .offset = 0x000004b0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_overlay + { .offset = 0x000048b0, + .registerCount = 32, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_overlay_customSampling + { .offset = 0x000053b0, + .registerCount = 32, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset + { .offset = 0x00008030, + .registerCount = 15, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_customSampling + { .offset = 0x000082b0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_swapped + { .offset = 0x0000c730, + .registerCount = 18, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_swapped_customSampling + { .offset = 0x0000ca30, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay + { .offset = 0x00010eb0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay_customSampling + { .offset = 0x00011ab0, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay_swapped + { .offset = 0x00014830, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_offset_overlay_swapped_customSampling + { .offset = 0x000153b0, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend + { .offset = 0x000181b0, + .registerCount = 15, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_customSampling + { .offset = 0x00018430, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_swapped + { .offset = 0x0001c830, + .registerCount = 20, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_swapped_customSampling + { .offset = 0x0001cb30, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay + { .offset = 0x00020fb0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay_customSampling + { .offset = 0x00021b30, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay_swapped + { .offset = 0x000248b0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_overlay_swapped_customSampling + { .offset = 0x00025430, + .registerCount = 39, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset + { .offset = 0x000281b0, + .registerCount = 20, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_customSampling + { .offset = 0x000284b0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_swapped + { .offset = 0x0002c9b0, + .registerCount = 19, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_swapped_customSampling + { .offset = 0x0002ccb0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay + { .offset = 0x000311b0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay_customSampling + { .offset = 0x00031d30, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay_swapped + { .offset = 0x00034b30, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_blend_offset_overlay_swapped_customSampling + { .offset = 0x000356b0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_yuv420 + { .offset = 0x000384b0, + .registerCount = 48, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_yuv420_overlay + { .offset = 0x00039ab0, + .registerCount = 40, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_pixelShift + { .offset = 0x0003bc30, + .registerCount = 32, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_overlay_pixelShift + { .offset = 0x0003c3b0, + .registerCount = 32, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + + // nvidia_headsurface_fragment_reversePrime + { .offset = 0x0003cfb0, + .registerCount = 13, + .type = NV3D_SHADER_TYPE_PIXEL, + .constIndex = -1, + .stage = NV3D_HW_SHADER_STAGE_PIXEL, + .bindGroup = NV3D_HW_BIND_GROUP_FRAGMENT, + }, + +}; + + +static const Nv3dShaderConstBufInfo VoltaConstBufInfo[] = { +}; + +static const size_t VoltaConstBufSize = 0; +static const NvU32 VoltaConstBufSizeAlign = 256; + +// Total shader code size: 244.375 KB +static const size_t VoltaProgramHeapSize = 250240; +static const size_t VoltaShaderMaxLocalBytes = 0; +static const size_t VoltaShaderMaxStackBytes = 0; diff --git a/src/nvidia-modeset/src/shaders/g_volta_shaders b/src/nvidia-modeset/src/shaders/g_volta_shaders new file mode 100644 index 0000000000000000000000000000000000000000..a9a8e0c53e72419c899a9162c6f656ead02d8a34 GIT binary patch literal 250240 zcmeFaO^jT}wI+D;=5J+YWoG^VSKX{61$h=tk;4TIG+ar7hU`T{hQI;rhgp;f!mwA( zPS&C%4d4QMHrliV?cPq%>UB>q1n#_2jleNHhGD3M!NV-w1o2=zJUnQ+XS#bZ7;wJt zI}tbH-c0@!W%uiu$plR6RK$%qasJ}(#5qZFvENT?NjmF`D*o7+RlvV}KC9rr<|oPU zLGtWbQc02;fWuzW_yEn<&y!@3CCT6PQQmp>?7v4De}5$0e)jB4P?9|9qduCco?HE& z^-$h@_NAC9`E^R1et!k!jU;KJ zd^JmwyFGp1{qGVsg74o!c`HdOC?~$3@%xl5Cv|<#_7?!1TRGYvPU=;(c!rOF{}r{D z4jevz$MH0iq=EXbziiTK8C`EC$PWS`-y90gSI&t`5e8WjCMGI$Y|JT<3 z!hKi%w>JJ-=K4eZfAXFh!#`)$1m$l7{^l8f|Gb)<_mj=M998}du! z|IdUE`$-G!sh0sN-P;Gh>TO6MXh;5Csb!N!8=QCWJAkWgDFVwMe}wwAws2GZe&zUs zRvq$kwb9(KCBNynjz37MwMud|?E}8uAB-l=DkLyzRnY3weq%IgRVi@`2G>TD1!CPzyH(Ahq}7JKYwcer;}9tOZq_Kcm3Kw{XnW; zc}Aw(>JQ#{&``PL1N!UbL;RO~m^_I8kdOc7fh+?)h5$gnz7PHR_>cpHegUqZE)#%4 zg8qUbHwD-UcwW9Ca`bg^JMj(K?(YV(Om>jl4{EKZ=?Kw z#duA)^6{o=L~qDjnwq|e{-DR(5WXJ`*?`%j;Yo$V`NGR%`u;(KQ@c*&ZCw=a}@eRMPrIYSddqn8|8~4S(CiGwW z%Jv`+=_DEt$2Zw8e4kFt-UR#`?Cwb8`^^FLuNCG8d_O!{VDqm03d)-`km$_QPD+4oj%_%8ont~^;Rm7_g=|9`sjl=#Kp zqdk`YM_0~)6v_dz{Qq#}bx!y3_ZS_^|9zGC5ASUmzFegke)tcTJAJ*w=>ss!DL=Qj z4L?^ueNy88^*zHcvD$l1~MrK46m{S0DJAyd+1-!02uAnr>I z{3v+~-@`r*Yn0dXFQm^HPnK>;fHCDK$!{^fhY`O}eiQiF&&GrHOuh%P{5pKle|db_ z=_05T$~j(}5d3&skl#>__Dx^%@p52=a?_UqwDf%bqMZ16{?gt=_@vu1Ksi1SBYk-td{(KyPz(&8{Vznl3m$ z0RPbqHGWiYf$D&V*SC`Y;a~k%n4bYVc{)~=+#Um8n5b*Om-gtonjdRE#`n|x)vZ6S zS6F^ytB&@+r}Yc%|D%`Mf9Ma#b=-|7%PX65YZ*G%)N89`!?T=d8^dIf{_O8{fJMvfU?^U|& z-ov-j77@tdy9b@!2!=eUeQda`{U6skoTIL5fAj;chmxZm5C6P-KzP@|!%N%S1^Iz} zJVpi4$LuwwAM%6wlJav&>gHwvU(7GZ zzTEJIei2{j7x7({_yv4pc_hAoB|jeDO|>8J{bV!jA5i~Q>{$Wd+FrYW@4^(m(0}6V z=VwiS0pGOg=0nH_@dYe%oA%SMBQJOAw70>YWBxlPx`Dn>^cY`FzY)Hvn~%M`q?J_P zd-~Q%k09R&O3>3gYDzIi{~}?FT)T0wH|a5 z*?-smXp%H}o?BJ@E2wYSdwfTeT8k4vP37;PyoLXE?LnV${wn$|u>9#k@>870VZ=9d ze*0y=_0tEn8sP@dbMMg}EG95nIX*vq&}y-~&GJ9?@qL#Q%8dbGoKFrXds{JmhLe3w zprRl6+j_8_Xujom-XcBrlYO>N80S^lWXpYjQ}|_*ogJ0aKHz?0`#lKLSNs4!v>RNc^LOE+iXut zKIDn(=Ya@xru!P6_fej8z6t&it`X+>rGvo_JFoQXYc1IA`wxGaR+H*~ixzo*q4R5W z#2=`P{%J$@1TEg(z;;1OM67PMeaM8^oMf3 zg}>l$qQ8BQa(>VCkEH4B3-yBbUv_>o%ijOUx6l5*Gu0o)6Zf}CIO^}k>!;xf%_F|2 z7^D6AsaCg`^bN*&D+mymOW5-A5zddl-T^()AJAicJKsy6ME4_FAM6w1GmZZ#=C|wIzoEYCe}wss^OLjhIN!c( z?fL#570>t5FE0oFsQrusu=X+j(WJeVp*`7q@K5#?2%w7eJAEI3mzO68$=iPkV*!ox z+;f?+IH2LND1Ns`jh@G8}!rqK-^LlxFRCoIP z_74HEyI`MCevb1<(3|#2^`rBfTWxnM?#|o3EYh#M{~BJuT;MP8%g~p}6XXl>1~|um z>in7W$yY?r(Ib5n@yFzY?6vXzn)GQ);3SWP@6qeC=;$_*(MnZDeqe8S zzmNI*KZrkX{|fgNQF&Ru3i^%qX3ZZ1Zy(@41O5;l(>KXqq~E;nJ*B$>%WeGGwS6ZK zH*NoW?Q&b=PYoX3puQzX`~E!R`hGz_As?6X{R#M$K8p04_xB6*iR}~V1N}W_dj7(u=`jqgwwr}`wy##!+8|v?S zq8HaAF+XO~XP@)|c`#m%$Jtxy7sfxp{U}Ooz68GXZ((&ezcdE8UoDnb%-@Chpg!b% zP($tecOmb9N0VJmP+H%TzGzS4&-%Fk&HZBxM*YQow!cVzJm&XZecv_yz~7~_&$Xdj zuLB-YK$DS`cSn<<;9(9Q2tLT+J;8f~$q(q`TmOkG4){a;Eyus3`*p1sSpF};ss9+t zTMtI)3%>#Wr~Ou%3_Yy<=mGbCR{k0HU&+W;U^n4!c7Xj^E$r7YpI|+lhV#)YfV&5_ zB18SB?~qw8z1~1cwY2{ zTMxj0puu8;(SP&7g6)^-e*t1J#~7Y;7Eu4l>@oc-YV@Z&3+=UOeqH$$lvg<) zTRHjrA?_pOvSak(cs0Ji$s7G4pbid(Ji{Nt zVCtXzAF$t|eKZ*53m{+qJ^EWTIIrI%d9A*~zvv&K^I7yy`a<7MYi!?O^q-yfD7gmX zd(02mUZX#M{(q|DqqyAg_4f;>3y$7s-@eB}oZq|qh0`kWi@zuRZ2m!gyPqzUqdk^; z{|KFj$KTudbN`5C!`H0`fj`uT-(!D<_J?=+hOb*c3~%=gKfiu_vK;1j*1x`D_=Wa( zoPqY-dim0-;pe~qx?lW$bkrB=Vn#{1N0oLH>Q4`}O70{d&Yt**|J8e>NWAWAgtT{|S?y z7+=c28_$gKXOjO=&h>7-oyRPXHcCzQTPx%4t8*-gEsa(B66EPeJ=|{)Oxn=$GB?%V6Z|Ve}yh%IB9N zeP_wXeUjhF$7q%I=2iR`{QVN^NBAuFsKEHS_4y}}e$C+bOYylxeUI+LzXM3^=kkJb z(#PXNd5G~@ap&tEpGcoy2cPS-FXJQm^Mck~KbP=9d5jP3Z;a2X4051PWKUiPpWC!& zDf$?$Evl|D?Bng4U=(!F~XW`=kEG{cUK%%E$X~+8*DJ+T;3y3Vc8Ehxk!c zj`6`hp5^o9EP4LM(*yKDedsUi!+x0kh5itR{2C1TB@B9De~?}41^)r?BfHjLl76L_ z&jF8a^r`7bJ?{Si53eux^ZksI&tu=e@dNmRMY%sj>3(>szn=BS^$GjGvD_=}?_O%p zZ9etuk>#aupKJ7h#02_f>AKTb^k?NDKg->E<4Wdf{K(Rk zjy$+sIp>2+7A@21)h_`b&cR=~HmvQoeo+EBt5_JH_$`ziGZ^i69)Ux+VY zG9*8bX|2=>^nG~o`LBTQ_0`n^zL@`x)!&)cqp)unL5#1ayBJ?L|3RJ&Uw1z5;MQsh z-;2+W1$=L>trhV7;S|2rBEC}B7~i13hHte<-?b9H7oR`RJpCZQoX?IuZ1T$8LBxNV zqZq$npA5gnB7W;7{EGXL^8AYZ%SM4-znH>@vtNXdrqdW7H^1im)q{{Aj~gX?qWKs2 z-+WJl$3G@d!^!%Fy|{t>p*@-`_qF|}JrcaNV(U$VTMw2J)u)!@yawk7YxaT%f57?8`-IRLjTZ1rCku98L>T?kzkvdz_Rt^it660J z3|COUts;F7{^C7YjT46MyMOx*${TXP4&|LD4YZw~@xG}$00uW|d~YWIgjS1ipYT26 z)3kS<4xgVsXt!Cu!ty`TKcUeeyh>R930uoBiTHW?t*;Az!Wducf$kG+JUTC15A?jt z^RH$1{Y{J?_gND+AJIO}^iSw5!XTW5{%SR#n!`9h?2(`0zQ0!^+|Ti=cIglqzF*)0 zk-?AUkI>-)z?I{Ee`N*y_Au#PWqXl-!2)6NNB8+P^(TJ>!7D2)UuFM~DQ^or@XFi6 zc~y-Inz%n6Sa|$KljQ~SFZ9>15!U&r+V8Cp?y-I9JLLUiT6ib#TyJuFANdb#)(A)E zce?C0XB{w6axM6JfY}6fcHJVKKuth{3XuwynlfF2QYjO z!+&7wJ<~QR(IM~uyIuMVXh|&pfyG6_JS3$5z~O15@iSkJ^U*`>#|c~cM?636h4a&X zJ%c>Z;Q8|~`fEUb#!2e=(e0w#VBBY5z9Biry6=ZR0%mi7u^#zfpdT%K&Gn(vkMvFU z>|oL##c$y+p8nPz%5(pL=)Ki%(cZ&_(*9Z>=ig(!>HZMWTYrH)V}IY7>JRwF{n6d# z%=Y){&(42^KlF3TKAw;~_}=aR>ZeA{;6Ko$_(tWxi$CXIFq*8Z0qFzqODEC#05JRu z)PVE@aQ)P*ssBDb#r_^YZ@-2o>oC8rKk%dAUjTkh-@mu$8DQ@pVL#LN!Ol0&&EE?8 zH#l8e1LtFZfQIxT_yep7&iw)4Z{hJl|LG)}Uw}W}^Xb8w^mV2`z^?hTt!B<{}b(l0)Lg*;;ZCBkRU-oYP$%Q@6{{lork(SGiD`+a-Y+VlNAD(X-A!v1(%1aHR`lG%aK`cdzvBn!1N=ZQ z`I{&1{15ys^5=F$Z?#AFvO;@~-{7ychXEya9{3gLeJ|oK>2ZhT3FlAglZbz$&reW_ z20+4SJ${b!7o0D0e|Crbiq1pc-F55ntMxduduWWB-(d%R1ANo(fjLri94=4TI zNM4fB^##yte6;7!tG+vhFUpCpzyD|b2;XZ9MSM&0RK}O{HyP&T)ytEYPs%F^z8uZR zC4T@;059+d=tufZ`C53XeC6}+-$;JY{h8^T=ygz|yi33T8tbcDoVFUtRCp_olD4?Ap!&^)=!5XfJ&f?Z1J~ z=mzC4Ia+q-2gBRThOg82;q~2}!hQk#JZ5`A{@DFEoHy+l%ZNX@8r|r4_Np2 z`FM8dZxD?q(qD{sT7GwS%JNJ4`1LX6mkg5p!oCbnH#R`o$RD85*9>O!3;Y2#bzkc5 z%d<(dueF<%^Zt0FhVQ*UKsMR#Zv%Gn1ADx<%=W^4|I#wQUov}-`~8(=)?dlt?lR@s z`2*m-e;{~Z<$zZOYrPHry!|WCyKukX>&4^Y{r=~L`~9xnr*m1O{mQ`hTZFa0>tQXA zA7Sk8uz%tD5kK$;>(y-159d2iuK@1#xIa_*uiwFVt99zfGVK-g2ma-Cj;Ei)^*Yxt zJ%geDiEXEBe1Izm55gb*eN21k{b>PXyoGo#-`M>2pEKS|Ii8GAy}&i8dTJh1ni zA^wZ$NAQ0z|3&Zr;N!uB_%BiXk3;Tf(m&FtIDgf_Q0u{LQh|fEtN;6ch5iltp5@pd7vf($J)r*q)#)g=FDKhnpDzr)f^tnS zRu26?{-E82gX`7JKCH$q#h2k!VDV+{vb@`3d5`6HD}V=09H3oY&f(P-;dOmqp+6O| z#Sa94ym9~cxdwQq{RrO^B>zSgWN-0)p{?yCv8*=?!Ag{ zzrz3l_^EyD7qEXINc_}(r-Kdo)n_TsPbz2+^J^n+Z_oJ$WfpJbVaLvs_&w;2_yM~{ zKem4b?MLs^)c)qy9^0>3`{~0?F2Jq-srleQ??2AIVg>xfpDkz4KvnejLVklD;Ad}3 zAEUnUvmf&_P2oeta^jErX}Ze`JcH5xG`}H_;5YU^l-DA^`TZnXPiQ}`laG9SgZ6H( z$p0-Hf6o7{E!)w8e!n|Nk30RXzUQ~`13|@Jzz_Jx=lsO}AWQe$`z6PZhsE&@+TC6* zFT%fgf8D71`6hGnsq+XBNcce>ce`6bURuYU-d->8caT1e=*jjU_QHF^=kUAu-e=Lx zSKuG`F^k`4DAM{1?c@E-~Q^{}4-;L@3YX|%Y-^V!}PVclnCCvFdU0KTI;qgHV z{R;0vF@M)jY3T8LX7tI@Z8!d#82>xq&tPQ`SS&sD){2CPX;o(xy z2k~#&&c2*IE3V(@-_~5^d7td(lYTmjU!QvVgP%BGfI3 z^*ZeF#*6vc;t3ezl}+m>`m3MX>dw=v5RXXw)yYr9-<4*Ozd+C1SMeA3^NpO};NME% zr}*1-=gFDYXN~G&g7azPxA@y!(b*vNN&M}0fj@qLxjsu57xVS(Q}T1Qi3eBS{)3-u zLI3WNpX-Zwf$r@q__=ClkKVpLT+@YBZtviKp!T1~&l6o&*?cX2G<%po&H}&s{oG$s z<-5C>zpVT*`MbIb3H9~)o^EX^z$jl|kJkHGe{NDAF=vVYcn{a^6Atg;c6Hz*|5~(< z_;b21cI#`bSC@ovSf4K}@P5Sa7Z7iZ%a?dO7Vj=!&o1o(KMW833;%^p@xzq^pE~8! z;iL62>pTBpz?=05f8bZIf<8&xzGr>ir^#PR_*bi(j}@=yDd|ODef~6lz!f}Ti{aGP zV=RX}f7|u6n!dn4f9mybAzqo{-yxhF-Xn$hchUOzuoVCBE@68g9L2u_Vfcg8fj)Xa zI3#?g^1WieiQAzgZVv*zXE&I@czC$KUI7?7mtlF@Ee{` zy~bnl)8gCNekqS{SAjUWa*J<=@^KvB4(yL$#K#jpjBf{dsh?tK{L%iyerdgu@edd; z<~PVY#pet4kMXAOW46i94_U78kD+`z-rP!x23-8QS`+apC7<+f20zkCbiWFGu%BKb ze$oBS3J2`{ubX%eO?^<1J<(?&=%eD#H53pom;Vzz$2Wcb%@yc_;!U3U@#sEC6R3?h z@89`H&zgV!KwIbJVR%CK?=wGs#HWkii-BG)K3!BEt&ddRhPvZC%jsizKE{5??ju+Z zeIK6i_`uc2{4qS?aRgvzAK*_-^D=fY>USYOaedv-MD;r;clCh}=~48z)^;BF-ITmc z$D=cUD%%g}d=C6Tf8M`H@wyWG9_`~i&hYjAO?LkjmxH?e-uW+`R)}9*j^!E4ah`|v zvQvfojLXRzl;b{2<#P9k%Rxw%!=HupEX8L7eh^oddw=Vm;p@-uZu7bt<%H3GIey#l z`jX-2%F!ODGqmUZt$|+r{nsUVSS2H)_P>nYXA<99DPG+pyys+-lu78%bUeB!{sHhE z-di(#JpK^_|ra(eS5G^pfAeLBY#QOkIui-x_j?*)z0gu;#W9y!}%-q z2lzPuxKqlv!w3DNKVZOelaF#dy7Krl-1o-cfB(I-y*tDFV){QzzrN7`OuwM-z?Su4KQLba2ImPwpA3fl5(d3~ z3;T1eLIaQAP00u1oh?H%?fwh=@$uyiGUN6R%n_*ul?h(Ec|3Kfg8T5hl1uXOJ>Dx+5 z@!2juKNj%4U7@|g{sa7_Jp6EqzEsBuU#Vw=Zzbrf@U1A|dTx&q7`8~?i_f2DK)}oI z41a1!M2uhQ{vPQTSRuy;a2ms{v=oo+#q%rnCq@7BUrf;p_BO^x<}0FCBiJ+1tDy~U zj$do7QKVNi|Dt~JJR0_AlCIf!z(3j}_+RS$g8UTR#{oWn9$v(MI2in9JT`sLO1f`f z)`DI4tGaKmYw0Bzwg>P3J2ap=A5?s|m4d%=>j8ZU@MC+2tiQZWd!~3>yf1qnkdMcM z@z7sd`pESi@ZtI668_WtZQlXR$}XOn-ruut{8%63`{{#08!cM=Hu+PpvcDU+KdmKq z*?x51t@<_k_uNSGcyFx+Va12LRe`^8zssfry{d~jeSvT5K|jz#^hq%`{5W3F2mY__ z`bXhfIll8%};h7g7e~ZiS+d6A)`sJ%W{ja!25=cf#xr(4|qZ6ufG0p zvep*;SpJy&uhw+lp4aarnoo1M$pg8lzqHHoMd!&npXVVZ=!^5*L6;Mt&Y#gf_|wh# zuktDak~#he?sTaDj(+&w?3cI*`U8I*hy?trjOS+f2>*`VR|kB#VT|ytC|+HN=a%5c z!Qm%=?6gBToTslf3CI4~rTA_u)R)LVdxZyz@%bLSaK{U7eTDdLA%5G_kHC+q_-^f=`SxBbz8k5B^(K`F_u*-am^m2q;Qr?={6+TM z)*Duy`)8~E7m^>YA0C?jhuWKA|7OzLx6l5*Gu0o&9r)z^)d)xZ)k^cf8q4v;c{dQm z_moexU-q9yd^ZpvE|;~k^J~R|C=)mB;tp(4P~&Zwu{D`9q)Z zcp)y2{J)`JDe~hK{i%!VooRpWirMGY%=u4)6mOpA_O}p^2J(&b=|uVy{J9x`1O8_| zz8luB-k+QJARebQhVp{(GJXgsiy!`->Ce4a@Xu7dIX|E1`#K#=?fX}X@74|DyGwu1 z^@hGDe<5$#=^n4wjx;};KezeA2Y+r<2fh9I>7?G&eum%w{)q1{tv=V!pbyU5Xl~;6 z0P}m>-wp54-xIJu|NOdm{}A?r-k&?VzlXjc{wona6a5t5jr#A}L;rXm(C2;Jy4iR5 zM{>Ct_4lZ_UroP^;@_+NE(c)kgFc84By+0y0`#Bu=SFQWFEjnQBl^RB7U+-mX6g3; zqy!38=C1=i96z-Gts#C_pg-R;opw<^w)0=eTXxF$px=_OZqRSEH%q?q`P$^m^uwL6 zfL=}UgZ!00cRlRa&>rLi#b>6^kPrBKOW~9BrtHb+2X(rGS-u14Q+91Vz<9r0?i&bYolc=-;bDeuF+kUWeC9@$je5yEvS2IPWUwQ`ipp(H;UH zc9`#1(0^gS@~9Xuw7g%LMeonm-gD)1n+DO#=kRtZp7;-we{PFwi8)`qC2bAn#c|5Q;_1NBe`IT3sv$xQ%+};jw-$?Tn{Iy-UZuSp!#L^Zv_>FIVH|?BD#8Yq@{& zWA?vQ+hX}S`v1*4@JHTu;~Q%K-F&d=#t--|{VUd~ZeN)E)Bku?`YsssAD(Pbe_c8K zkL7NI^##TO`G$Y-IvxO+f3f_J2djk5znJBt-k z-sd%P82)z8ck@t)B_w{Gx%^P2vY(>QlNG&Ku>gx4DS(5&UjKUm(BfuH|bH z{0{u_ntMfmz0FPe=h44f{(8M#`Olg^H2n2S^n-u2&Wp%2$OrvrrO$+?)(ZzS=j;2P z-lRX2E1$oO{;N2@xVkZm{_HRICzgMtEMr9PL8nOXAfV?m%l6QPr<(( z@d^t5ucH4>@Hc;o29RZt#BauY@Z&7vM|ueIftA|7lir6>J`3^-`}47{FMcV0!BxaZnD%eQ zdNXZlWf<`X{&WR>fHW5MpPyTY z_-B!`HeY}rJWt5aSIM96mibd##R16G>MZ`C{0~$JAL0=d__y9F@^7n^ zG-Ca7@kn!f=Hpppj$bV|e*%_0Ys+8O)j|KA|%fgb|`+WE59r?r&@zz#p0A1}+l z!237j{oVlQ&u7jb8vfHYuHVi=zLPpHc+XT{^fB@W{G#`J!q1c?^9RURM*)a)d7o6B zy@0$(exiI7g5CWE^$qfe_9^aXF5gT_c}0ou59M{{FFJGkG3bjs%Qt-mJnbLP{hZ{v zEU$kLdyVyY{N7YmFYwQ|@qeEsf9WJTPnZ9DN>z>g-(Qyh-Q+X!e|MvSfMNW+UpYJE zpFm>GKNLLt+Rjxcu)aUcXWwwZ7u`^*j}@8$VxD z#O2zb4EX`*h4mKpi)i2NAMt(qJ{HjVc`eIpto((Sx59k| z0A2YN=vz|`S$@3qH~)h+kVdZ@f9HRX`yfnLXP!P7Pn6#S^qkKBUgU?p7mmM=&d>Bc z-79dONP9s36!r&fznuTQ>~F8@({Vl@f9w43ZJsv(pPRCOh_}qC#$fO_lRvV9fiKQO zv77|{-v12eTX}se2Y*=Z{NJYi@v&TCx$|#3oyz}?_E_%x<4(D-NDLoqAKOco+x|W~ zeOdl@7eC(JzW{%<$M4ObdU%KS9I&0w;=W+p@N@OkIR7>J$9b;d7uw@-JK8gUyy2xC z!_R;JbxA(Hb^iCrK2Tr&1o_|3v|%@Wpuhc}H~%~LCu6(6k-t6V?;`o%S^xYC=@0ft zcz^Elom_SP_L077|3vZO&&CJ-n|$Z~_LN_T5BQ7oZ1;i9M`eF|zR#irsXf%sPWfIY ze2@0I9LIf<$yX-%EtH$Syt=x^`pghKh@)6^+CxaL8iT1;l|DerZSg&Y375V3%N9E8b9yjFk=PdpF#_J#S5%uvk>p#~&AM(#~ z$lq^a@2*u#_lK$CuP5nEo$ror=y}6Ym0ll^Kjfn;<~ROsk{tW~P5-(67(f5MvfN+Q z`r~>9?PoXI)#82SOYOf~Ke+Y9VOsXjhrQ)^-TGvC)7c;NXY|6rSnk$0Y2Dcal~3n? zKiq0^JuUj+`!YYW4Z1MB(yL%Pg zAA5Kz|GUK}6TZa%2WbJnk;@0~I-#ry8hBLA#cw4Wc79*$qb)$I_Uo%n)V$G$wD?}+d7{P81sBfgM7$%mKsO|>8H zvp>1`d|1G@wnzO5^u_#K!Wa5aeEs|*aS!+JX*I|{@dZr#B>5uTQu_hl*U10=soFDr zNGH`o{GQ?UotMS8qxeVom){fgf_@19Ubr6<{yW9@8m}?G7U(_C_dd_}PI=}o0rs?z zPaX3s{E-sP`A`>FWq-!|X7%bz;{{)c=IkSKwg@cHc( zj7O)%`S}&jM_-ojUHJ>+eD4@>A>TXU7v+0rd7ST^FygQIeD5K@yXdtW^1nmBTky}2 zzXHc2{}g|J4jAWC*<^Dkx4-m1ME-a4cffo&GoJcRf&-|tz@MbT0|E!*JwlpB=T!ro z7t5Z7^XW>;2|k=pNBP|qPdzE+ci-lQ(6cG!NnU+ISY z?vHuCod};0@4cz~@Zr9xr}$*SA3qho;k>)sg}j|b0L+#iKID6V zTJ0~;nwr#mfK=}T*aPlckng>w04cV9v-be03U7+v$QQ4C@A@A1(~O6Y@sE=fe}Oz4 zBA?pNR3HD?djOJS9P+&@UXRs>J;MHg&7&OiKmGsTB=xcWrBLZT0Hw*nq(9aVZ{aVZ zhpl(;J<4${K+4&9H|1;!!$9UrY7@j}wuk3FtO6B)OcYhDM&wLMXS)w)1_ipET^L+2~eDBX_PThJD_87~X z#Lj!+UpdeB?$)EcpT+z<&-Z@0eVOv&&kN@H-ra-An|5CKXV3Ru_V+L6d++zT;_&|R z@P{eqd+%v}{r_yf_niyod*9i~_gi`okc9mj^!*j`y_^3r^+E5=t6I={drE&6Z5L=? zu^%k@^YOia<#P}31;9I04gON*&mw=R&wAKTHgv%l-V0zW$KN^n*L#7;-{g1jPy7ID z{ov1p{_Opv{JAhb?~f&auDi^?gVWYG&uu;czS!SvbAcZ1Z?-94(f($e=W*Wu0`J!w zIu9m3h<~1$Ka}AI|6{~gWU;N!xqnoCb_b(B##5I+Dg8IFUuJwnUQmYkhzcKZq#ca> zGZq*R_Drza>)CxKcQ_UwvBL`nm2>~_AyCl+_CIeyhkr4i;eZomKg8vRufKno_OAf-_`Ub9(Dg?AJtUpw-oHZc zsp4|*pXJ`aLg6jqavOgh&(OVJ4*n2OCw`CpBG>zOmJDB(lm2nO9h9d}qIhYne|^R9 z3+>T;5AAvXidDle_pgZFJM;V1HN(%B$N6r^?^^Vpj{RRef4t%2+b`uq{?q4==Y7vO zj&}$`@&_>i{ycy9RIr_o7xJlt-jFAgzuX^z{C4fh|AO*4KHBp4;o#QTw`cozl%GfU z^Q?dVh3PlsHyyZq!Tx?e;-~DXLpS(eP`|K#b@8i^?UJ``ddAZxhk??vjgl zIsQ;lKWE9$afHtd{~gFX`}g;Qv@cQr>AEJE;4c%sSD(QjxVA<_{9rPL9h~n)59$qiSdxLx)vwi_zsPr}1E4}Ae^WnbsTDiZ0^~d!I+Rtv3 z;=Mh3sXYh3IX)lnFp3{&_7?gP=$9?81pg6>*8pm>+^tvA^|HT3VgAn!mqNTiv|r{& zw!FG2_udraLAmBfiMX2|)3v@l#90peK6-O$3B=3ur)BSMIeA4n_%+&Grh|EiAJ|fQ z*f^hjxIadHzsL9>k|)SB z<;nY3l;W`+ZWrUR0hT|Am!~(EO8ylWpT9o#_<*0o)#VVc5%3KE3g{d0_4Aj+Ey(}! zLXbboJ75kl*T2o>g@QlPYs_y2d|#eFp6d+Pq+Kj52|{6PS7fH}Uc75bZePq6M& zE z=&SH`@j!BX*Glxg`22YW1f)-t*Pl%37wl(@pUh8$-^=1hu9xCRzIc9ph5YecuSWDL z$2;tn;vIH796*rAjc!pMqxl!~EuXIx^2dWd>HCuID4*}36z_0B`M*@_HqCe za9%Q;bV~W;yE=-|KY0J!da%q1TH7bSCq=%*MZUk)dr6%yxqR)eJ=`~UIYH=^tNa^! z;k*d=G9Du58_Uo9PQv`mR1lpnVZ7;NHPL#RTCVzi<&U@fbLNk4@V@)TBHyzq-r+JA z+&9{U@A3OJyAR{{u-89*(Bu90jU|@h$Oj^xKZqaF zASqqGNqpbzVZO`D@qHf?n8A->-|=3S4w&Klg#~`^-izYCe{qp;@IP2>5_a)WP~K2~ zF5U^?mgGnLQ~RsRzmuQuOd2aXp!4TFlNJ)Lmk z2edX=FLGG&KcKWanDmG6zlFcxZ+<^ZafAr3s_8#Q%=h={-0p6Rza@qX| zVCYY~2KavPH|Pt_;}63B)%TD3=_JZOOnfI@>8tF?On-xY%kLNC!^)H|eZSb_g{!_- z{J}OEYWDiptbFo=Fh1B*#B*h21+yohM|Qfe^}6)M_CGX#wtqr7_K&De*qwJy_GJGg z55K>H_4}ID=lLVXo1G4{zK_~tcpH3g=T*ad`_`U6ulzb%zu6{JHy)JfzaQw2`m^+TK!IcXqUQmIGxq1L^yhc{0DX)<2dJE%2Lk^h ze{M(gR(oq0PN6+VZ}8XJGkIV>l&?UKdwW(+evuw`7#?AKwC3%>ljyw>$B*^onCJ(4 zhx+F@k3oCf|LgncJm%ffdF|Kb{k^2+x1x&I~o zIA2Hd1ADW_^}zkZ{~-Fk{VTkOjLOUMHH$u(mnXD8oMb^hc)vCo1bJk9tkWa|x~8wd zFX$uqhxjTuAIMMvzniA-@9w$tb_ZYE&qSXTFy-Y2ho2m+x%VH#>-(A7clKggaQ$M2}aH-z>>&-j{RQKPDB+y)87W?lO8puAFU;R#m+^4^4)OIqAU9n7w=5s0 z_Byl@pZ_DsAKbg~pE_^&rdKJ%&pF(Oym@|69w~ohG=G4xUt+u*?cdJ{(?6qO@%scb z9_T_>JGvZhT6{*rus?|RsC>dYKX}ai^o>Rn@pEb}f5I!oikmiH(ig-<3nw@=u{D*jtkk=XUa~MD9%*CTZd7S}Pe7v3)%fI^b^h7IV z`JWL#r!3DGg~h51{PUdnJFhMO^Y}aS_&c;u+Mhz7Fn@1B^3C6C9)AbtBg!}Xh2~$b zCnwSSSlEZ*N#q{}cpiUe9)Bm+r(sRgQOMU2pI>0UNk5C?YY?8t-_JdHkLH z{CXaL2mXcg_&cD%JpPXDSARrW(BFjT8`J)8xK4&YLmuyE9)Bl4U&MNB+8?fF`H5CD z`7bN~t@m%j{4_X??(YHPyg$)>eej2y$KRp6IscaI^fKKQj-ve_V3a@CA1=xl1^VLr z?y}W?zCRrA|8>70f4C^VJiZ@I_BZK(^n8OEx4{~@*kdmg{Xk;A>;Dbr`l zKVTleryxI5^qI%+nWE1z&5=K^!(w9WVb~|L_viEYJ&r!02Pnk(aUQ>i=T)EwWtPzh)l4XFroYnaA%j|EYQWo`K?xxcj_$ z{GR`R#qW`+yud%ti9d4oDowO!#^14cIP!l;d$l(nSiD8W-;w`=#arb*nzJIeuJU>YY|UJ@s_N-U(Y5@dk>@b7+Utl`#9NHdgD<^*$z7IP zyq;DbuV;Ca^;cQ{ZUyD-{VL#Y4)-<*57b^|FzbH$bD*aA73=qHtjEUxhA@7>XE-?s z@p%Mi>jx^tNAO4OOp=WZAA5gp##jB3Zx8oJ#~-ZkBR<*H14gyGb&z!52Rm!$$^F4M zil|)nPJZIpZ;1Lj10Dv@STg34fe}^te+S z0pY99D$MU(X${(kSyFf3!#^{fMEOXV+q<+OnC<=DmE^cnzjT@HrD|_5 zXlGf{h}#=VKJ{J*^l4PH1eh6qkcaeP&Ek>y_e2ltiQ*>{2EDU%<3RO=Kl@wX7_z^b z`os6@41k!^qZrTdK>RxUiWTq^e>N0vTJnbWXYu>f3M*qhCI8)QRrrwV#!trU%lVn6 zYYGjVzlTbf5!)C)hrjZD`1Zta$Rqf@Cw{{peTv_QfnMUbBERM11ASME{N4!k6u&n% z6fZmH_u9r`;CBLfSuOJWQs|HTe^?w(qq-dAN&G)>^t%apB0rW7E(QLhec;!K<+04~ zB7L%SC1ZR(?FZG~>Wc8q`MYZQ>PS!UZx(;Y1^%ue;4^<9`g8dEvFC^Jw<~@H{w`(_ zf2Z|zpZj%FlPKOM;?Jh=E|hpVc^$NSi#fj$Uuw_> zdcocT-dIc;!Cp41eV!DVeMEfuMs*<9OWAY8Yiv|^%|4PJpV1yKhWoI`2WgfrDPf+S zPhLU!$}Yx(ANl8d~WX^ z_LU$lz=K5nJx5=MCp(<~Z9W%&*4ApCKYR~@d;mD^IP?5OeB?ELP$N|zbH45M^7kE2 z`-q=k>qFo3`iQ5uig^1GJYYKe9A^AUw&&lMj3&Ju(p&FC&_3cx?$iJ~|6;(^D(ky= zoPY;ATuS+Pd&3jUC*$!2erw`i{{F+|nnk64&9y(8SiJv%YkxYvV6DmV1$-A6?=XkazW7(o@vCV)7}~F= ztncC(e!KXB-&p+MZy135**~u)=l$fO@dPhgUqijXKY!}=e>$=CyY%IE{n|hMAYtl@ z$}=j>t^UBr&kX+2w!YVTAL}K*AJBRq`ajFxjj2T;@_LU{KSj0({^&M{`LGnl9(MR()#Wc8nEeg_QkjNnO9)AzSg4*5?ZpU>DI?F;5t+3$>h zz<9B~8lLcbx4+_h=l)*T0hmlHZkIfj(8JUsewMn9udT7AQ8p zynmO^b=Ld{wZhN%KRi)$e!N&O*nU#<6F(TP^eiej|NBtR@V_{ZCznajSbp_A(^tji zQT_sz5AdNYr~NzW8SLyA^1%X5#@-&n|DL#8f0!?aCtO_s&g+YwRv+z?KE-^oHA*tR zzbSdiPWhpO(SLT@p!u|Nd~foG_VawP${!S$8@@h2+i8^u#^rb(!SC_j5$$DoUs@`6 z{@~94YdYU7#E0L*Uryh*IKa4lfGqd^<~7JSVD}yp_&fhHr(btEhOa;GyzSyOy87vp z692FF4ZqNy@!Q|OT&fy={`;>>^07+$67WgCjO-ivU1NCbsQghRAJDh-vq+x_e^Sbq zac|x5@%>|e@(evnKCvH2pG5hZC=b}5{{bUHpRxzcZ&36nr@#I<_NS)0G6#^ z^z}jtoLh-zSkgYkyg!Z<#)%$9_6GPt ze_22GCx`wJhCZ1+hWru+y?%@N_gb}-Pog%E{G>Ovz8u|Q&6j??vD~3S#}Dm+-k=Bc zBhW89ROSO;&iH-cAIr^uB3mBxbRNd>+xMu;a<^Vdy}ny{nIG9<)z16f_nIFo#K+mc zO_BSkI^TF2drIQZ_GlP?F~veUSH{r2us$nT2zWAvuX z!BNBIXBoBnJnlT|J2<=6u=79P-`(|=@`-r(mOFoR@aV^@*dVz4uWbLu5}&);c!2Nd z=j%-0m*;~S{Xprv^L)0*C$*Z(59$vub;xh5-#{el2kh@L8W6o~K0pEG3Gz(+zNG0W zy!VdzN%;XxghUU*Z+035dHU?)^Vi2}&-gc7Z43(dZcX7!@dLho{*pKbe3vUhK8YV- z4lu`evr#GF`x^6G0pD7`U%(gZt7G+t@fdw!4~Va~pPJ4BzG=nHZ;&_Q3%G!9t6#+T z;l<~_0>0O)O9gzV?F;ld##ibQ;~VtF@NF0It(Ne;`21MF_x5TrKkyHy=u2ux^d$(G z>vO|Tf5`cty7K{V4_m7xd@nwKo~Z}$oBH*WDg0nRWBkhbbrUy#dj8vSbdE2qML}Lm z`;GGaI=r{OUf}O9rtslM+wfKTMz7O?SgRrG@3M9 zG>}c!!g)_RS<-_8?a$DE3-L|VKHEEF{YHbw6Uqmwe6X!V`0qYq)=!~1oW=k!_&^uzfN&d=QUH-%q@d_=|{ zz_@?WeezepjX$)9{H;|^NcnllWFuwM9v)4WYApBX!H5^be6o4@aI(UDvJqTqYyH4! z<}uI9`yGA7Z<2N9!5UQ8t*@izRYK1`Di2+nBS3)s>c4r|HtHCTjO>4 zWl^5i*k6d((b9RI-V^BiT8;g={ID2*TlsN(|HDbTlA>HN+8<6@OhB6Bhx7j>$rr~5 z{^I_|A)VzDIfPPrgF<3kZNaA1UW+*T?zyGwH1=XrKLkXR1GpC+?5#9dUo< z^?R{DoVQ{?_@45J_UosPfA0Jp=ieYeTn=l)pDTwxsR8L5zAvBGLm$V+iTeURoAgm`<4g7f^d$iIu9(;FNv+XcG*zY0o-{^bt6Z)T>cDB(N*7rBbU+%X6gP*~Fo!621{tNc3 zB7?;5m4BA^g(&BG3FFO9n=E$i9k4yX{NDD*!+U&x=IUcS>DN&{9(<4bE*^=Gw-@aP zz(3@3C&FhM|IB!Moei3+ZL^1nx5xK^QGZkM_L_k|YQM<=So@$4?Eean1D+pm52apS zo)d3R^OfUgf&Li(EPdt;15j}JXLm5%u|MC9_yhU?KhTTp#QCD5Z-GAt7=M{R9SpC~ zp5srRf0pD!dw9Qq`~TrRF4xbDzof^VCd$W0I&~!fo<#8r0nf-ki~8p{|3Q1oKg;i< z^O$#=dcc{V$9x^}1O1iPgR|)MG08=Gp?}Cn;2-h(GLkRAqw7u3YkZ`9i^LcEm$Co; zZ#_MUFX%~pRX@V_S_$7rFUA-8+Kl8?@T!@ z%*W_2=r`J%MX!8*#`>_JZ>W#>jzPcw`XI?}?K=L*KCnLBO-!HRf0EtWY0956MLFf; z2YZsoU9S3}PdFc80%ZJ9&ik+HZTsHrMJ>Cw4F?_k_&wSy#KQzWqZ?+QbRGly0C^7j zO!#`IQ`oP7pT}%3%(v`*9L~Es2K)WpwOu`k&G#qQIvtIN_+mWS4YmJ0m18{VqsSh; zqI`bt@ezG@sDNj(Ux3T%FI?5*UN z436cO{PXs9fcQq>d>(HO`7PU+pYQ`cm49|~vshkjb3x_GYgmtN*?j8r&(@UhUFAG~ zo7CI+dhaphW76%g{SZ%YwZryYep#&7*R&rD{wV7mejof_mbKq<`CAchZ%MG$&uZWH zV*{H{P~KO0pD_6M_OGzs`cKZ^1NLKZ+S7T_^W*JR&A*b%8tq4y677!&Yd`A!qX2)# z{Z_SVD=yIQ_XpUY^|iFneiizjO;(p|#f5UMhogMMPv1fLdY}7Q(GU0a<@gNV-#Ykb zSU$7hzisQ6;J*#tqP5_^ZSnWypM7ZgPsMMJNB$Q(Ddk1})ylWKm2P=h>-EIM!ZkI%mX2K=Fa_*)Oef8r~6fB8U#fMIXoZ?b}x&$M4af7lOOepeeG_P>$8Hpe^J z?{No`kAHI5&f8P@N`nT(+aI(~%+Ws1Lm}^gJBNbpej-gf6199ioAym!x!(aky6*w1 z_(b~~(G!#TOn+e1ub-lI{&N4FOJV&b|59bP&hNvaefvr6-2B0z|AY4Oeo~KNOfQWb zhCg8kdvp8-ne>bPzB*4J4E<_UBmYn8S9&l|ecC7a8)i$YPni0Y4uk(H^a=i=<{wP| zRr4>*M&7^h9{mfKhmpUp{6(n>IsG0+^aDTAhs!VY2c9KQAA5R(o{Z0&^BZuwJh1IP z`NQ@>E}egPenI)h{(IP~;}0P3;MeXf|Ik2D{B^~zy}f+C6Tb#a;!n=6{owB_e)aZ>{3_A!VMIUi zt7q>=NKfc%aeaC)bG`KkPjAo@`n7AlQe z0ZtFQ=HKS{34dX35c2PJKjZrYf8ie%>?6)6V9)C4Z-&43zjN<1ivHH=Bn$e<@lOr{ z%=3;-opc5MZ?`dkoWHff_U=M~|J!>DNh2>$8Y@fOAO?M1*>&%8GSk`9H#bh2{7W{)X|kTS@BSOEv6I(uU+w_MH2b=zjI- zM}Vs>@$;Pa;Q;%;tyb7Sa6i~=i54pF*SSAX{e1t>viATi|2^MtZkP5CJKPU^Ve-QL zgZ2wH-%?&r_QU-7l=}zmH>@1?1oP<*EK~e`vjPWkiw}O6<&7qeLbyNB{oL^ftGldk z`w70^?C$VAr|mZg4|WOLe&iqI{pJq#1M~Nrem;KqXMexBYWJtGSJ2N{>-}RC!G!wv zo9)v3%}$0wf1kwlLgwpheGhp9Kijj`3y|-^d%KzM`?Byp59__c`#XD|xrYAngMR6K z-`Ym}o(}Ib!}~kTA8Gua?~iN`XVmjis{>GrE=Pw#Qt2l@K^ZpGg7h4m%ykJhJZ zuTyy6m!+N6e1E6*Ry$h%+4^EoT)(^b&|eWD{Gi`B{|ftUobSJ!Kl|pN#`(tf?{0r> z{yWqBf&PO(y>G*xM!r77{`es9OZ#OUuSWE0RHOH`IDg`M=mU0zm7YLeepM6-*sQeAMhibmpnO` z`JVC&q?f*+fA|Ab^Zk|9AN$LO4=M@X+RFD&zb>7(;QW7@U$D>M*RuFknekpWdOs=s zisTdbDJilu2)%s$yGgkC|lKj?r$5Owz9v{4zUne?_b@M579q$=y zv{$5p-bb#kVgC0p*1MY+fqNe*{#4ssKZf<(pv$F%^Pk{*$;BlPtNt?A>sAgJ{t#-< zo%dpWx+;4R-b+?HY%i>*7j<5&SFexxeW%0rbjrc@r`Fe1zOT*u!+YP=*xv;5x-NOk z*Vmg{tiQ?etNqn2)(_{^>szcJ&Znyjh*Ww{n-5dBku>(B!2HWfB!gt|H%7d zv-j9P(b@ra_G@?|_qDP2FPgu9vQ2<-o{zTpqM z2JA2Se(YE<`U9-{8V|!>p?}zO_HXH_5J+) zf3wBT}EB_*CqexZ*2da z`$vCsrRX31`M&_1(t*P0ame`9kw5ff_-9R`_mt)j9lfW-eMan04S%J=drIv8U-*6! z@m2mayq^sI(V*A-{Up3)j&(`{Qczo{iNnk|2|{> zescbP63eCV9&!GDGJj8m_1}N#?2p7N0U4T~@2^Ig92z}{Ob-pXC>AG#Lbf%_%PZ)5k}eBX%o ziP%qC{6oUpFVrkPqRKyF{G4w5UJ~yo?R}2QxgJlM-{xwoiT55~_FKr0+12~uyu4Pc z<2@tZFV>QuvwqTOrpeU?>m$BsIsPZ!6C;91a^~V~;{8RJ?=i60#iahU54eLDG0B;W zpNaPh3pMp;<$zm^*NC6$1MVpPj$ph$VEj?5&++%uJYQ!bSob?jou$1JR!D>A^(!)UkLGiBKdU{NsYt#LipkP^nJWu&dpy{`oa1{K=EDm{<8Q!jpclg6!JYa z?QJ>R1AWs;F}~mMq`~fvG#=%DYB8SsnPAK}!;=LL$dzA#zBZ|!R*v!0zyB)*tg-!W zj-Si_G(1stPmfw5pB26bJ=vVKPkJJr#xr!N@v5BRHP7Ow=5sFKf0Oo#@iHB~ z0Hgoxw9l2TmE-&1AL#NQAih|s-0=1J6{hnsp*`T|^CO(5#4m0i?XleAM;7u+S^w@m z0{k@rXQv8B7JmkY3_t(eIhDtkNq)olzyBVFFADhaGQ4~!NBgEP`FN>_g>uuEJf9Q&;|t~F2HsOZz9A22 z&-5eYa}s@% z`BeTU4IV9;e3a=$`rP;Zq5n~h`ZPZ3JA7yG7v;pq`=e7nB7D+q8K4{=#MgNde_x8v zD)skO@=1AoXy0Od_FO&=FK>~4%0K^t^pU*?`L|}|lY%_N^eN{n+9~BLitNMd;B%e! zILc@A-AI1y{Q$~id?^1heNvZi!P6(QPp^Z|ZQ8?-zidW6DU`?fP|#w06p05C>G3Jr z!?JyNsr=mGd=usW`pFc1P#)t`djFDEU49ggPh^j0(1-hhKhR=1eaqKJQU0g%s2uta z`*@bmpR>mEO{AZwkFQxjfByjeBaHde?6=K#A^(#Cz-heze6wq*y*E#9Dn7;N2IsdV z%Ky|(3-MziPshH0lPBN{6P4$GQu85y$F*vyKli?!>lOBYqg~4X^iun8^Rr)H9H!3R zp*^#wkY|n`_7rfoyy@&uUS6?s#E)kA;TE==_;LL9J^0V>-TEjk=9en-BU`TaNdW&n z?PnaHZSX>&GF*ho z2ihC`xXRVWk>WEG{&^kYVUnXgH=g08t*wImd}m62DE*Kh$S>!!OJZk`pTq6IU&;$$ z#jEi0^XAreA^z}ZQGEOv`dH?l_yq*ObIHSSb$hdb@75H)&@bW({UW}r60d-7EN{dY zu;jzzyQ%iSC;t7G>%WW7haboI*7l0=sNo^(pa7o5lQ8fMt$6 zzOB7xfxZtfK3{(7@qvD&lTOHo4tRKdXQx2#Y5T(URD{3OBf`HI^hM+8EpouQeQWLX ziuhlAel5`ZcC%;?e>g>N*q<0*sc(dDGw8F?-{Ots^lp~uU5*D2zUc>f1%F>E&v&ZD zd}}|M!j}z3^p$yu@J)lg5x!|9RsXOjgj-e8Bgpf^7te<;#rIyNUXTZ@e~+W_y7h5{ zugqVB@5}O+RZ97y?0nVlugdEQo_}D2X!|eNw{((n!i`{^JQ@sr9wOe1wqFUzN_l@D z=8!HIv>KH6$rk;U@IyJ;L;eN@fb%f(CCOe4PzC2N@Ncqs*Q}3tbIeB$lN9;4G+q22 z{OvR5kE$S?bdo44r{a6JEPlUhZ!}5jc793xu3$WhTod+}$OomL!*?{PH8~&f5RmaS zasJvO{}rDR`9{)73uXARKF0G?#OK3^Ex#1bdw=?%RwLYS@#+fsss4ubTP>FB{8w9x!FO85aE$X^Tb`7~c~e0q=KDpBkO-d1zHz-weTv>!SUmqXz7hXf4t^B!OMN8)x9>mvCC*EW`K5q9e-IexBj^7- ze+B6M;V<#Audn=4rp>qzOcKPW{P35!9`X64lwS&?_wXU}L)B`3fvW06`K8hhVf~(7 z86e*o5?=9lllsK_lX`AX(f{FT+Wi?y_5IV2KyTcS&_1X5i^{82mg{HvrP$m!3k_=c^z7FNPbm5DwJD1uh4!v8J5bq9g54Nc*@YP^c3rpLcXWs z`fxhmRL$%=?=LW4r0;_+Z=UD&cyPMCO<9tEDC7tFl2BmohkR4pg7bV+$cN?oCq5J8 zze0V{8{>sMF0j7nGc(`RWmVOEipQrg|J56p{rAc@C3I}QxHUW9lq^*Qfl8@hCNrrdoR-fx(&?h^+%;`C9ukfCJ zc#rPaXwRSTejS}h;Ctu`&|`mgo|is}?nksf*eAkgqTkGXQ~R6f05;h08Q#8Z?fL$u z@=aY1{89TE2Vm_(Kag)~D?@v-_u!xGD-b{x>38}*0531k$v2hLAN-uPemiG#e*c90 z0YDhTbNEm81HHvB?hnWQJev8Z9Di_M0DJ)xJ39}>{%Ula=~>1{It3#BSZ|Nney~p{ zKgana=*|6@>PP2Cw@T;BU%vQ!VR-#=fxo~fLtiFOkT1v^;2i&%^!SSCIeMgzBL0|s zki9m(UsIpj5;)O|@SS?5{+v9Le-y~eln0cjvAlsEquZJ2x$o@V4Ems)^zr$pD8CVX zu4V2$fjiH7lm&U3WnU**27;eSekEV~n?Ts))yY>W-&9@m^C9x@fIn~lDoH-(jO6&& zK>pKMz7S75$Pe0^C2s?e;{RdqZDQm~jx@oD7m*Q>@gI?q|9>wc$P%l&N30@vpn!og zNU$O4FrZN2L2B$FMG%6tm#GOR8Y%Li1m{$ju0q}BCJ=Mj2KG>>TVJ*k1ws%6K~U>3 zXAMC#goZF6W`Ui()qdY+?$7;228(1#-96L$0uW1=&_h@ ziTR;~F8h1dtMzf8O7RQtEy^L^G{%SZwOd8``}Zv0)O5b51!~XfbG9e(XM4Es&ArzU zKRO3&Kg0`d>-!K-v~>WB)5rTq{LU!_v^2H$kxyzYc$~q9f)6uzU+_L*@&ofRpKoe+ zHy+=&{ubQ*AG8Smqt=VYgI)B6UmyR|ek&=BJ^ViXiB??vU;Ob2_g}la*?vp=(Nfs2 zVLie7f`kV=;ve&^SUyz;-vL~){5zzV_M6p`=;!R$n?KRV_{ZeQ$CnTODc}`Ky}y(9 zPmwm9|kMa$G+F$a<~!sE@ef&S zF#4-wd=g;#U+BF?!}5VRe+>9vP(2^%d--3iB7uj~?`p5h^DI}6{^s=-&ezUVpYrno zXe*Z;qd&)2#s{)5Q4W0AT!bIDw=ulvKf&Wj=o6Dye9!a|4t~n^kRXPz*%$Da=mCa) z_4dX|hudraJ<92x7T2f0fUEED&-+vKw^5GoNpI-m(^a-_F#1nVhp^urjPIepAUSB? z-xpl;w=jHP?oR>g@q2e)5&Lgg`;c^&dw&X@cNsob4*s*;`%@_WU|eqF&-^L+hOe_H zfxo-&!2S^JjqeT&-%Otm4L?^udDaj10qb91GyFn(kiS5W@g;^Y2JFB8?I8dCXx;Gh z<)26SddctVhT-SSKaKJkgFnfWQD~3w$Lj$me)pNaHR}IyK0kc&Y%^D1_OT|M$urhZ zu$w=S4eil??S(&|0DLiiB=37>-o9OGzx+(?p?!RB^5yCSfAGWPFY`Yjzg@ZfEhwL} zqit{Rfm<`*-aq>9&!hWz)<6Hu<~RCF`V;{C{QYIb&n4?6@G<$%{4KbD4dXNUDZ!pK z`7wW^P`+q?Ec3^B;qezec-<4eNBcJ4GJgxocdnc(0s7DUEyz8yMEc~b;B%Y$Y-9+)32 zclO3`E%4Wwc`GI(;TXNrZ<8*|6 zUx4=0dr|&ow3oiOqx+K*`zL&Dy(5ow58v8kKC<9nac|%9J-hPh_n0rebhOR>(f;&? z((@dRJbvF^XZWlPzv+)Qd7YK{Q;hF!(7yrp9KJ{Y<4c1<22-A(e~U=PQE+e~LF>zdllXrhmqx;V_3UX6LalH~GhWBfft9lDGwY zhpR#Uh%aCcFvE9ixQh0J{C|b@Er)M)Z7qi{Iy+W>8GWG-h_Bb5690g268ReuU%(t- zhHqny{v`q5$8Wy=Y00|@fC+0M&jwEm+0 z;`z#WzOiX9aG;MspX8yaANe=*2Yt2g^woTI&FoKu8#ph~_pF5T61=}!w--Jrmw!WB zM@#xg=Sf3tKm_BwW;*Y5s4$g3N9Rd(J0EiG@xGf2gzme4d9}p*D~*@47Wg7Rrt%f( ze*W1Vl&^8Y%kC4C`AAp2{=N(O+3Q?D?7sXE{t7kTXW!_!_iR6SFr>kK1NqR9PZac+ zuiO0>`-8sz!GkXEyKf9w{(His5n;WD!~3@JeADu2di_4%u>3xRLI1{sO_zTO?diO2 zBhU}$ODXbyX8E+9)4!qBGXC7u`4!&3)wqFo>j&~1)#_MpJ&gW49h?t&81z}Ik(6s0 zKJ_m6la=H99_V2(*0b?^FrdSwU!UL~vdZt>`&r!gcRGZFzrm2NOQ#* z`R3n?H;i{ptN1SgZaZ zA0Pa5*vab4-++RHA196c4S=rqFOa_h(#LstLVtl~^RG}5ujG9sI@9AND83De^biBy8swu)mXdf3QS9Ev2__pZ&dkp+Aa$ z)L)x$)Stb_75+>5!}%))gzw$?0{x-HagG=LjZdg<e?d=h=3fAR2H!vG=kv)?pS10KoBCj>f5CzIyT$lW`FMKIJKY10 zhu_P;ph z|4ANxeHG`a>sFuVml$t)+LwJFwFj8r+kWe!e?erw;Cu83MZ)^nf02Gm{0rEj#xMVZ zeQV$KKb@~{b9_$U;=Vk89-PGQ)iA!~^JqVb`8URT!v@tp%As#Jsn4_fB{2TtGC5T8r>Xq4LfTlfoP^hf<=^Etc!6+I78edf}cKVgJ+SyG{^sa zl%fGYe()3Z&v71u_UON$@1ygP_ZH4;e;e@w{Vm#O%jory?+^VapXcwZbNmE7r`Oj( zui4R|!=L9BGylEq*HBJ;A+N+&^&@<*tyAFfd)M-xd>YB`5_~b=*2w@buQ_=lK9Eq^Pro;r0AG6_f^ynJpWqLH{`)~5P@eoY@<&no>yzAm==&Gwf&NJk zFP|cR*biM>5B%Wx0h_!!`rKHc$7{%M3gn&qN*_YwW&RXiep9YTkeB}H-X8v6#QH*+iYn3E306XMM%{s+oykI2n)@H^G|?)@(m&+-ZVCCoo#pYbjK!1$Kf5R_g0cewwpnZJVM z-UH&(4F2_%}G;SoGg2;@`mB$oBI^{F`Y1i}kFCe^bQ2!TOT<$6)_V zb!z@tkPpB-F#ar{#JN@lJmEMe|yf~O8JCw{8)DNUsyaG zwjlq!_YdT6#RE#gPyPn}MJ}IG>8A6~^R8onEj~`Ul#7pJ`Nl##o+w_9@Ui%cKA+hi z8!xBvpb_4SDV|TKA^%AEZ(W5ym-2&wM8Nm?9r!O4@pHi4S@xc)h@Zpbp(E#y%ll}E zKkU;YevZ!HlZAK?Mf{w__Za#07V-m!R>)HkKc`Z%{6(P0dE~#ra0Jo)auGiV?!csv zkB7kc0ugKuHDc?&K@pGVoX|L+NPZ2+d>roLu2kSKF$J-bD zF}Pkw{uo949L^rt!1ABNdp>92yv!e?h@YePZC>9L@pDSYk^DISUd%7=pIXGv0ePbP zp(1_`-^1xX5&qys{G1|wj?_oy|5sYb*HgsLc}E0_{o8R~mF4Rx;^!~`PZ2){&qDYf z`#}*u$If$$_&JJia9YIAvGpa}-xwIJHy?TZiuqH-&(Y};DB|~jrBbRLA?OGC6!CM4 z_&K?FqDB0iB7TnaC(d908RF->D!$08%AN9m?f<+a{>a(uG~hGSv95e9@L%tr5#sOc zDISmF@5o=m@r&*>j!mi`H%K2 zAAII7u~SxjaKcX+Pp4YO`v6?Qke?q`>&OSC{7(I`;`t(;j^&3|d2a>rXv*>@F&O?A z<+9@05Jo(;`h#tkzZl;GpYh27!!v!xBN4{uLB1ctxQ{1{_2p!r?+IP`Rg_lXbCQN*818YwB2#q;bp$0_T7 z@n65FGyZC=GA@l-&Uj3h5N|7)ZU`>jWW3DexYd|8R!Z-FQD*+-a--jzq@@-2J?Njz zqkL|^XM2;<#J6{!?Ooaw%=Z3pwRGHCy@U*L?|zY}y?(!$rlnfkUX&jd^r=CT zAP>pohQ$x#p%D9jylU}~34`8gvU!-||LT6{SL|JRldEFT%`qy2om#6$7x z>^Ew_@oQ7@p~Ww>zl`5ME3-cA9m?Z89a;Qk{L%Qy_<9*XlVn|?eEoS=k_;{0nZsZC zIedHKH{=og-WR_SZ{`BOCz{V{PyAM-wus;9D9`WBKu_^|bJOB+-*o(5-<$+~mmn{r zJijl6{zU)$cxshlkSFo~(9w_WlOMywOM(ArANbW`d0gaoo<3=^mNK4R;O}Tn_-6ba zSw6Wh;cxzZ(;5Oa^9Q28gufr5DB^Eh@#$FK_}fV%{$8A~2h7j+F5-hz|19BOB=6#% zP9`G$?YHv$+qdy$^R>N?mhmHfIbWM?$-B*0(zg}yubRvk_&1N_2mH*w z@z@vVE8>-R$!f>HWZC?T^)cSF?Pc$`G+z%~yhQL*@DB5(pQ$~pN6^Ouj5nLV7x}xj zMgCS^%HK3UU*=J~Rm6);;Ejl$BYt-~8NU%vs^0{9LEi%2?38LjU)Cx;M#?k&h%r}E^L2c(n_uq{ z9~Sn)nV+v0-z#>zd_bad@DuSi*V%x}cMttM9AZh$$`Mbmhxs4Dl@a3QM{u9%=M8?^ zOQ!SgF6r&!*CSrv>X7Y+`0|Zi+B2HP>c3K9xr?XC_>{ZkkIUZ)`^ECX2;Zk%Kf3GU zr^WvR+*p-;l)-&)7(WLCpB>hB@tYYxaEJ9BK7ji>tY4v&q5bK+QfK`r-k{=ZHmp6= zuPYvZKkKhjHF@y!qq-t|*dE4*_?R`bRjvJn_?Yw$Hv5qJ3H9GJJ|<9oqu-y}{!Zp* zFH2s3)vNydMPvcI1!&zH^An-DRbmn_C} zUpckg;}Wmgu|6@Ms^W{1sq$w|7@*JD-}Gm$u2enzi#s^qC4k>3Wxj}{2Kyc@87%vn z1DVYve-_^_5x;p~fc{`lqq-~q`BnPAqPy97Y4jgt9|1;p5j-!Myxc}P_?;kMt1M*B zZ>(?9pDF&of1h!}@16aQ{*s4-Tzv5HiH7%?@MHPh{KLKeK>WP{$FKYhh(BES<0Vli zuovf1ynf)r`W%0hzk%^#Eq?>ruY3CPd(lVIex&}D54BXqU(N(m{!doP4p)8^^P!w% z`UUMVAH=Z?2Jjd5T-HD1;iG)ndi6UkXeJNi6E)|@UtKxX?y2PEVSFNVvhrMfLbMP3 zh>qb0IGIQOsVZl@BUk>E^?M+XgR%aMPr3#>{eb+bB;U;IALKIt7}w|O4mhjN*A=cl z`XfE^`BiHiE%3W(`r(uxIvDt*r&3o|j_)CFAOPC;`NU4;FA$d-zCNGXsnXrV<<5T_ z=ly6eJtcasKWm@rK8n|;a2oOVTrNOq!U4wR09g)yP15tei-*W*WbK1MEO+zk zE_R=UeSGiR^j~x^>L<@4{R0^J;|#yh9>kaJxqJhcm@X#zejz{nSn0;1^3UsGeiGm6 zLVngyBYAlae4a%4CJ28Voo^Dq`xV2-;g>v)@=svDl{|~|0qWBq@pl*zaMt{|m_L>N zt}`FcpYw&&9X}{QeE*j}NlHHxZ1ct9E4uo?5AtI2mE|KPKV7-XLACvD;j`q3yqkYJy%K6?lS59uA9P$l$Kz&Ydz?P3Z5&wRP@8|B2u`&+YqgwL?9fJ_-aA4l`&tKf5q^Eu?3rT^gqJ}8gz zS%|+j>`MU#e4_dKRq(k^{SoEw{Kf@*P#)t${TkC}!{rxq^Wo!2UwjpOZZjJHEX${} zB!4Q(V|>WW7$1w~ALtY5tJlQmm`ytSOy@HmHhIB(W&fD3%(qabeu?@|d_Fy|AM*O} zCgjt8rvIjQxt`39nr?nA$)AeyxIfm9`>Rj^%6dHz)ByECjWHvgB+ zuUOw;zGMEfKJ$OTQ-vi|HSmQPNp?~W8S;+UG=A(QMi~ZHCKiVJc|3ACrc_Dnxwez4r=mrk^^0*WeUej5B(?(C6d)aC$69KU@J{<7TG_jFPzW97@r7x|G6 z>s(Oqv+uP&mI*uk*XVIWany79N7MJbe4;-1Grh7x@v(fhfYafytNk7EA$+YL@&^*W zHDKfTx$=AKE{g!*rzn4KEtem4x>e_K<5At^Hvzv}*?836(DMKI{_brxHG>^I{oWd_ zY0H<2_NO;Ca`;Zat@PGM1K0laM-?7-gnTM@M9vs$b0hS&DXDwJU)}yV|=8}B7Dk0zZw0Sd3mfX&?{PhQ9plv zjrlwuRqTWZtxzA$Yb&1T7IG0${?ukk z?Xeu^!HowkyIUswg!xk|B`Q$Mk9t^Q{!_yG9{n{Q=%iBHEw+z*K}{ZbB-B4RFP_d5 z>2IAk{_+mKZ=lS55BzXG+NS=lN?)=4cJBV`2M?+VFusv6U&`mb2J@#9#{H_xpW3na z`y3DO`N4x`gXImD|6cy!ZNkd8qWr1#a2_dojmjB)L4W3p5j{D6oR>Eqi~{{|-otz` zM(>;G?@;HL_B~+Se_T9o9#pgfa#>J+r4=3bWbj(TuKalk&SPt=uk%>dU$5%^&DEdI zja`a&^px^YX=**Ua^!36wPYV;?XNVbKz;kT4_W1e^6+$Ct!4CAzNbo!{MGq2+6Vt? z#Mk)Ad{8|d$hh-sz$-QK+nt{S?$p@6J6|S#cHWoa)060^f8oE9@OzIB@U#1tY`h!P zL?$24d&t+c-Xn|;NKe2cZa6)DfGZus%I`#d@)H0ez*%;_#{Qywt;**VpZ9S-#Q9p! zsnOm3{DYt3`1FnPwN{d>zVfwFec(r?Dqm|I?fqL|2repLE7JLRc*1V zYpqlWYYy=~X1=2NCVi)Tt<9$9r?a;vr1lZh#Blp3LW7FXNQAHf2`;4;4hwj_C3lofAc!p zJNr!hr2X@FalfpOv+rTgx<6pnpnc`X)nCA0_V@OM{xF`nzbfIVKRbUH{>$|5NrD04 zd&(o)Pv#bnMD+pV{2K%a<%{R>)A@!P(EQQ&&2XLu82Mb)faW{k#eA+Sr_v7Dd2Vuw z{Hs16s`}r6dJFwCf3+w?eyBXk_xkf?`C4lliq8MFK0p$#oSS~f{Oq50W&eHgJNSb@ zUT{90kmK)%e63xz(3AT$lmj;Y z`}5Oz%lw=8{jaYge<96J)Tfrhc+=B8Y75t1kL~65cjNmD=eNJj-#?%}+@sjP_cxE` z1LntgzC(o1G=Al4rF|LtM?UjS`d?-Dhqw2vJ%8T%X>>n=@g<)}_qS@l&H-5aD4)(- z8=TSpJp}b>K3^+Jy}Ud-T#~P~9q14F0{(&i;ODaW+y^Ovg3bfd(_IW_=FfLYPr!~o z#vigXJAVX!%VenLEAqXMZ}a{%(8u_lYtQjJ%h$@`qCN0m`C45*W@isi@3v7M@{uIZ zB6?yxOY*g%{yENn&>qi!^nG+5^IqHSU)*_2EDsppqCL2bULSG1q!;>6J~w~AOg|yN zea277=k$6V^qL**I(@bzUn|Oqug{0c`Vqd@7S1<6jpTC)zGK`MlL1~{DNp22xc_c% zlHfbC|7^Wp$k$q@zIgwo`C1p{tNu#)%KYX2A)eoMz5@TdAV2>F_T8-=cbmyULLzJU3ZUTYuX1N>0V`?BjT``+|LHNCcbD0$`g zXfKK9C-^zNVg3zmcV02RP5C>^^v(GC&TekMa&bS(?q}h=YsX+G&*)zc2Eq^cr`L9N zIH5B5#?DS||MY1jUrYGCOg=wRdtN>xeTVTuAMC{PIljHSpBvA`{VK&J8jsi$_Fvcb z0>3n#y}JBONq@pOcK7n**$wOElJUSFXqO5imS6HamS57#>)Zb6w(`Acy~Oz7&(Om9 zgdh5U)~oeTx3w?7@{q5!-XiSf2lifz2Abff)X$wZ+n1M+?DzFH z+YA04BMJ=mFz8j~%{ISx{wer=jS3DvhAKz?)}dhKKUMn!!7e{4%KL&d`}*%-Z{_m4 z_ImO7zV){tUM|1uM#$f){ngO+do**j{~Xx)9^oh4Z*6R7ze4&!pEn*TK)Nw_P2 zW|mj?Q^3s(#+Tkd8uEJlsJ6*+?6>9bvTXjpBZAoc=l=NoHyjXt=pXvFZzseUrQn0% zp$Y**f5N|RE%UEKf8+UD+vEY|cprf8Y0%>*8080D?XNR<(#+b!`iJks{vYk3ePVuA zn&CVY@(#E)5p4GpNzyWTbLR_5(lhyV_Ou6AeZFD^9z%2mg5)X9sJr`=AZdm{OVHD zB7XI|d43&s^ZXiv2{FG0PM@WYUkB#jn(=GDG)D8{r}#A($X>|!wHEwk<)68~pRIS| zSAQV>Wc)e^{M57*Ykh3y(a%$ zzONwvcbhwh@qGO_qCdy`aYS#y%`l%{lmGF2O|bvd{-694Jv(;Zn!y8{CF1A!dr`ku zH_5-vm(Ev{{-9qYeo4R5J=3)h{QJ#LEyu6spqArTbFEgYMf|FCZ2XyiZP0B(;v@Ih(I_JeT3m@hfTU*n1Gd)UP`=@x$}0)v3#0+ViV5Xza)Qdb|_n3+ee7 z?%-_R)AR8lynk83uZ#ZEeXa**Zau~Ny3)mZ;9=<7>JEd}WvNa_+|eB@{Od!0@5_PTHtWm2mA!nh&H7rE1rN7bKj6E< z{O@P3KE_k!dyX^J7yk7jVP7BT;VU?>!_VVWt(yJn;gx!JAA#|HslPPZe^YLZ{to^67Vdvo-`+>^dy7Xc|7^8aR{5Sgudh8qW$Xq_f7AZ2mMFhk)Ia$; zU;_AYKk%YkZs2`o;^9jp^3U0?H-R+wE7ASx^A7>reT?l74zXX@u7&+W53uvEKgaiw zPwo#OZ|HCA_7An1*sUsAGF`Fa_AGh zpWMYt62G4;Cw_l$&(_;o?)~IwkM(Uo!S|ExUB0KZ{RZLw9%0*${ENJw+{J#Nct4q~ zAOGa7ocA+ z?!UA$1$!=j9|wCc_x{b^N3NrP{4l@tzHWUpe$R&Yk>ULt=4Z~n{5{*l88ttr{b27Q z-F_AA>HS;tFtg9^m0_>oJzcObfq!J5s=Ze3eO;QgM%oV&Keab%$^J9@qQ7{)eDVIe z753XW-+wiK4!YJ~+3(-o{@VO^F7gNSAN=Y5G5o1z_8Ioahq?ERi}b2hqW7)ua6dd* z*l$Jj(|&8So7rdLZ`b&1@7M5NvDcZ9f26PS14i#9AvlPd{HcZ^3=VMSej) zgI`1OtGwjBYV>|l^DB~1y=PpUUpW4)#r)e^;NNCW-fNXL_x?Y1=NVgeUM3HjLy z?-`%*eNS}11^LDM#qi$v=~cXM>`%NO+`SAI_&Ywpl;Chpg1epDnZBVegM*ACEG)UZ*|1 zk=ggrdqmNvx)R&dx%Y?|&zIWQDo>JZzlQnr?{I!;KdeVD?7r#+FTB6_kCoDSuk@z& z^B-H^WAnQI{ZrfD#rsEIq|Ig!aNOTrrN02o`&se+k@Ud)Dc(O)o8i9J=z;wcwH;um zzmQLeQ+Fm9`lWdP$m2E8r+EJ;e}m%vWAXkG=S%R%fqxSHhwf8xME*F%`$rr%lRo}E z3*D(A`V{XUvAo3e;c^qx$4;NIrL*_R@q2CE7r>m+5B4t3Q?UPcf55H<>lNRR9s6>w zU)Y??JdFCVhoH~dzkk0|ynnRyt$6=P`GI`EKsv*EU%Y?R{Z{e*QTHplzkE&o^O4s# zkf-APWAXkmClAH@$Kw4X^#c!CC&l|m_+teBN1o5ji}#PTfc5@V{vE~pNBY+k?;l~G z)1JWn8R${Gf8?wf>j`40%h^Pl4VBlh?G z;{9Xs{_zXyPv_rWynig-KgwT+`%}F4Xgv6z==~$?y^H?Su!djeKdt(afAs$ef2`O) z`US^_=Mo11R247VjtF-`p?WPjWg?K|((;zR~ae z`~Y0MpDf-_YW?)@GZ2po8?5l&Z(h8gEZ$ER?b=SDCH~iE zuhYiz{ErroF64i-_;MkBiRGJFj9(J+KU)0ikpI!*;VHh!J-&yuc)20}qksPlj1}Lc z*^vk9`AzuyGQUg<0SxgYEPEXfrag@OkBFa%2iLCrd9RH8iOjF|Zo7{66^QSd&U^M= zTJ7Ps zzvM;}?5~d|{1f&kpy$dd-U;*P;T@pu)l5%Y6=!t|-LndHIpGf^_|Cl_DigyC0RxxG(dVf;P;;u*p7 zlI4eDekS;vCdl>_pI5CkG<&o0CrCdOm(%}`sgXo4`2QjQ5XJv9;RpI953w81&dXUY z>m@5+ImK}J%i`li@xmbQh>x|)8ZMs`_Vez&H9O_~KHg7f_+Y-UKIx(Ou6lo&f1k#3 z&L`b>0>8SC|I7A3-(;ST*Ev3^RZ;y&<59k+2IDcGW%Kc5l>>6+S1~{9j8AOkXrKQ5 zFBGuG_PZH=F5lDmMAbb#@LqJW|0AqV&26LCP3g1r zbj8sRF#1msFD)y__wW~l`bK{~-@z&4o0iZ&>RUPFjpev+0Gysy_&h8w=W|GuyL<|# z3Gs`|(H_g4KjOvwP!JcE+x;-|2~mH?-$OiEZtx(an|+a;XUji5j5m24&0qNQCC{Sy33~8-!$-0ZY(6l*(?`@8?mQ?(JUxhykN-*k zV~m$Sub<1Mp9wbojQF0AA6FmvL0(M0vV2LDCs(fgPL#jd(Kh9k_+WO;e0%1^-%$RSKS6tH(vO$< z*+MzmxA~Hdm+@h9N3;of2viwd)Z<`M<&*y~i z&p}WBo`L*|=y`9&#cy=S%k?JYPg@di809fOi|;MUE?i71 ze*O~c`L%=#0)988U(;*lZ7gUZ{_YJ32?IVdk0SbtbDp9avp7yUu?fYd#H5e;~j(FO5mja)xJs$ zAHwhLwiNM=_z}Lh-$A^WtQ`5lC>}=@-~P81-ubBP;wewRr|_&tdmjE#Tk)F@96Y|H z_{TwhpdXLfevltZKja7U%lCPgw0s5miTMk8B){c9<>lwfLVV>XZ@#{KTglrBwxOq z_*VDx`L$qw9sBZZJtV$Mzj$ab%C9{_CtKIeEEV z&g)04ZwvTxIgRku^p5b2=DYCK%)%#_kA$(1=js3D>&+|WwS&AufAai9_7mk7^LsqsvUvDz{Y)N0yXNw7Y%`w>exM7!hd+}7 z#Cf>!pyA^4pbE}s5KkQ^O0K^0Pt{#KI{Z!kFHINU2k;Z-lQMZy)J_-w+x$6Dj`pVW zQeE3c;dd4ID>G8I|6)EV{oH)S`D&92UjF{Syo34;{D1ti@>MMu77ynT`J@>8G0s0# ztrD)iH2+ki!E(g6bNQ#*ZNeMMPtxm;=lfg2Kcm+k<1gU{`hmX<#P1`1@{fVPO5=f# z2aCVqJSs)J+f2VH|J3$&7Vmdy{;4gF@67W9=f`Ex%frB@oJ8kW@J~#{4^dJ1l~&5M z4@7_Qw;l3HJ!SmuoeK1wZx8$}67=NpV)gzmk3)LJm;G8p`j{o&qUAU;vK&Hj{6 z>M761cXkNZjb12U6@D&W5#Wstmb>_$fNP46=;QOd^YODV{?*mQuZ+Z0{r^mrzrOIm4B+r#-jXFxc>6_rj&mQoq2e|{7{wl zU!l6@nDS4R%Y^k){wW`C=_cm?9o*MctA-F^eqT<0h|2o@`G=r)h53$Vr3AH9p0rr5 zpXHyTdYFa$Q)L}+t3LD*>{~XEa@YfmzkG$W0roEEFX0BI!NFLsd7kqQ{^IFp-=jRs zKedha&OVcTp#9`=l&=W$@pabI@A~%H-`f}Z!+7HU7%nUB5BZY#J@H@CAI^g@Abju6 z3sz3^@r`l*4+6yHPT#rn^C|K#u>p-w_Deor4dQXB0Yx2E`841I*y4SK_LKQ|p`7Mo zTpq<+#{5c7kxv0XjSu{$eH4%Pga4a_IPPaWRrFWGc|PY)$VY|$@9zunegAYv8xZBs z0F3=;i4q; z%+D#7%10%1EMMWRW%;O@VSLc%h)34Y_vA0`6Vg+KCq9z>Wcxi#9nd%Aqr!4R*q!&z zn{}17tj?b&&>JKrAPNB#h|=g))V^J35!^9AU!KkP>VC(ok$6WIp` zMEFeOS3aufyc_iqzj+HCc>Rd;?aS7l@9)$6emVL4a*z+TpK<`!KIX@C-rP>np7cHF ze=#2wu<`P;G#^z&fADjey?KuI(WQI;l?MJ`JcxH4=q-M&V0bfsKF$5P;}7l+fG=Pm z1p4+lPAa*)~&b3polJP;x$zl1fvK#xA|k0@Rt-;~>~u|MUb+T6^SmqWf{j1TMU zMit-td{ooU2W&sY6K(7J5P!7Ac=#qR+iz(_atE0^tWqpR=-2`TYZN={O9m9{L|B}&cEG!hCSM2`)Kc``cF^$7>|STJ=P-_ z?p&|jeZxh62}m-_y+4N1U0QqgJtUFkw%(*MXd;pgfn&ld9QULP2Kp}pMw%lOjJ z@blmQ7WsQI0PTm^-)PP7^W~qLzop8_@9N0#^W~oo!uQk{PlkE;@{rl^NkN^Jh zD3oJgauaWmhLAl9ehA)qoUq1fyl=2_Sd0z8E_;`Bc{XNjW z&9|5PYoNap>~F|FoKGPiEXv7!d~fshTR%oA z{vdw+LG7cxbI{lOZ?OG{zV~{ZK=?WOqK{18m(X{aJ@>x8H+oD*)HjiT4k2G|{Kj91 zkH61i{Rp37pA#5AhtJ2+{Q4^RT;hC>`~lv+fDg)Jd?*hwJ_C3E;PHv(^H;&=I`zx! zNdC&458t?e56WYF7UN?MrH})CB7O2z@VQO>9r=@d=K?+`kMUvsm_BP%h!MXcefFC8 z9P4|NpC$SM^3MMKeIn&Q>OUD#;o|4@LtbAl>WArF>eswK#ng#q>;dp2z1G{8K22aB0G{6Ha{3>2d4CRge7)Pv_D@bek5K__diqe` zNBc=NAMKYG`|DbNv`5(g4YeQbmtLvQ!EcVo*&~yFxIYGdpaQ-oP`=0y`RK~I9{dcXLmRd}CTlIpBg>)Rr&pw2 zkNVzUVN2;ij)o3S-#bvkXm7tw8v{pBvDzQs9ff?*=x=)Zz!&;}_*@elJgISBKC_yNw*w=u}m7x@X_)IP}Jdwn>};fwWefxei} zF}@Q22;Wwi55hOf*F(55Y~|$P@td!YIec%gt>y5=`nZ5E^h=EIqJKy$%vZzL@~e3H zZLBTe`{wKC84&RLhyI1f9yWP}{*3XHx{2u74*Ey8ye{lV7S~to zU)FQ<`tb#Ppl@S*q`qQ&f<7~Rn4dDjXMF*mX#GX~#r#;~d3)XL1L!BrZ=G*&v&~hH z^L^eQ*!*<*Yu@SEe#66?Ytr}lg!LN_T6W&Uk97Vt(ovND(fLzX2DxCIS4`*YE$Un4 z&%t?cGH)T*JAUjR`murY4k|#^pRj&&GyrJ#<5vNXv_R|p?&o*#eU}d^vhS1m7B|G% z{uK9z+tl~E&xXIj4;~Chn9nyx?!Dd*9yEF0d_(W=?t>olo;EBVpC3Hf;{DH!UDp47 z58rpYgg4y#JopdT`##cx`hC9N7XE}m{|53E>bxeSPd}Wu-Q#)NUPtAWFZo0CZM`Lo z^DMmI8)AHC?mT1&{;koVaWMQHIvt+p1^rdxh`6s&feDF_bHrc+*X9{>QBCPj)@+a8XC9M2MYQNi&{Mh*o`(G2kGI(P^ zh2hVC&|jCNa_S*&FBlbzd>02LH@Yk|AZRd*<-XXxt+K3xkH`TRSCcKLi}Jk z{6%u{A>WU_|KO+nPUdd_`CE2=koQk$HM9EgPk?^|zW=wFq8Ro6K^W;{Jq-VZ?Vs4} zK>}9X$I`!`*Z(X0Un&9QF9+Oj5f+V}U+qs$TW$Ims2u07lhgKqu$6xZ`RNTgb)3Dl zg7>D#AfCaXcdG?t%wIwL==D%;Fz!oWFWP!Ye*(xaU^X|C|AO)jvuD_YZhj_Lh`57E zf2^R8gbv})Bu}uvncp|RUh3oQ`=#_&l`}i9dHX_t6z8Zvk|XYKalOm; zhx1Gf5Z}A=m|Q+Wj1TX@FyOd6f4>BMqz3$bZZePd=lCA^fz^QKBj82ksnKKV4skrn37L@E`M| zsrf?xc!SpkXZ{GVcRW5Qr@qYLKW}f_`h-@O`Xd}-?&25YL*ek_pLeznSIJEZJ9_ml$Yr`=RaQYDb2>J8i;XXkM@&lzsmtw`zVLL9dW$6kAi$% z^hZE%US3%KpP(EPeDFH-NBw2!3ID0_^@v~S-{@Vk^FX6N$p;wxhW;DhJ`DW&E$DH7g7VqX*v-G(`Jvg5C_j(R z566T*6Tc{L_a^)Ue(pRZ<}b<@?XhL_`AFZJzDquh!x2km ze83O-W~}vi^8W}w-hX}+l`qckW%PQd?Ak;7nEyHX0lq;W*m@V_&Ex^}I5T~K^)9_O zap!k8O@CZFT!Vmw`}P}S48L?V@#him9Xk4?zRe^B(*S_5?fs2Ee<5cUeyQV}2dy{0ky~M3(<8`1?P&!XqCj z=4a!LIpBxN)>J6)}a^Pd}DJs;j;(xWw_G^UEKH`}qtgrXOD2KmzDUVP2_2Zco`B&lp zVewBw{2z^ycfEQSDDPN5D*{X6r8%%$rngWajxL%b!sy^cZMe&7x(S!d=tycH(gYGGQ zPzU}G@AhaP3odoOJbqB+4dVw{J_)BUA@8q>A0+>H43OjPCC~>8@q>B|n)-hJEt_v# z5Mc3&^m6pUd3f1ivuBF< zKk~naym8{OuzDP58$k9!?SeC+|-O zcMYEJ!ygC!HJ~5eso^hY{zygqpCbMbnmfE;vEz|8qVF{_^-gwMX=ZI)^$*wI6;9{#TS2`D;b~Tk_XZ zK4!G;{k2}u|FC88h`67A{{V11yhnTTH}JpW`xE|d>OECD-)P%`9$>`>Ds>Q`+UHw? ze^s@!0e`BRy*EI7Ad7D)|1{wf#S4-@8R9ddb@NYSd_^puSI#qi!1qR^&+EnuV*FeL z)Uoq{|NM3Ok8R1nRQ_O$Pukkj`!c~FGX4&WKg;~HPMGh(!I*E1AEftXS^OYJAHu-@ zHR1=6okwc_7k6MUZL~uCpu<oDB=gfpW+PbHRQ$gG4u;z=wqB074d_3|4Vw{Jc#A5_E-()kU~x8C7-R}nu*=V9)BF3zL= z3F8M@yl=hNe*O4Ct;}Cx@;dQ@7$4|=!=41M9xZ+lzK`PvbzUoekPbO5e$XHvKd7zv zK`O`l>&a=S^FxS|X!xA(Q(qN7sEvR|@DELJYWfuGIo@+a6b-g`3Opa2$sfV&WuE*3 z#(w;(#ScPzurD+J28KI3v-3^Z-$nc&y}rjGIrstls)!$C+8{emEaC?h@q?g%ES}ue zB7RU2KPVR;Y%#tKf5;nm7u<}3cPk3Q! z@pG=iKVokJ&KV4Uh5Ca77r$z({c|1hsw{pLVEA*4PnfUI`AZ=F?)c=8@u6+M@vEzd zzf(#NtEG35|F~4T)q_7s6n_Z&yIlMswD;Bni?^oyRZj?8JR{|=dbWH&nB_0xdFjax z@iBi3$P@fCh))J1UeJUNnHn$ldx&2(5iQRte;ogKf{{6T&|hRS3Gs(M#Qv>92Xg%I z?=P7DIC1&e0b_sFnwK^~-!s4eN)n4^ocZ@8irk#t-zz_b8Z`YVdLj5>QWw2S?T1jO z)$`K(eZ&*R_*PE2UBoZjzeM?5wO>@`!pwiB->j_g`!D|M7j?|9YQKqaNh>jo_!G4Z zF4ZbNUQ;T5*O0B(;xz$A{IVLF6aDCK1pcH+qm;!5dwQ6p$*POrb(8c&c@qhS`6GO% zbllo(bXH35eo;oerQ66S*4kP{M&ozCNC4k2qkN;;B3uQ0tK3f>FVOFCL_hF1dAwor zocRx`GyFq(9zyea}z8JAh|@@2X%u9M7ji#+!5U1NhQt>S2?g^-GcbP(CvM z(0^pSDvSs6f%w~)Pl(SP%Ln2eCXIEIPe+e_bF!Z2|E2X(E#o)jVY(j41NeIxvHS2N zf8R&kw#v8!d<+J^lX(=M@YjbZpO%Eb^FK?I!{KGt7r(DU9>$6XDEQeO$gf%}dHGF~ z^)(wm)?A&DUtW$X}H-6v;T6pVL;U z7V~4$iueKkj9U@^K<~pqZ_UR^;MXUdpRLlE!zVwWFZ1!3q3@EzCgcY{(EBO*vpx{N z#jiUkZ}oN7CHO6<&VT)9e*&sh(A$2Y~%|BKe{G=HywX^ody2LLDA+H`O`IiP+y8ao!;2b z7wOY>Yv-l&1^hX&{49aq!yz>8nS=Y_--bfA$=-tO&3A^h7dFXH@n>x<^Jf&?ZbK8= z_fNS#4m$%6tGvs2pT2#xx6xvIig%Cr_v88Q9uxF zzqQNy4j;f9yAl4xZ^Og4YO2zDNlneig9K2fAKk%zE-+P45iq>c4qXvEdsQ7^Y0`WL8 zlsEeQsqJs(Zc4+fs)3c|FK84+_HM|}YVk7_fAb#gTZ`YR_?uW?m+8k( z#vCH_8`hHs>}RvT@F&^t$x6cFbIJql*Xl~u!@syws`+@Fj2Hh-H>n|>XPM=M(f{~_ zh|Xr3Zx%l=;a{}gp+A(Px-0+rRr(8}yV-eZ^dGdo0!DWcJTF6MR_>^Nvs_BGJf7iG9ZmU8Gg(+fb#K)s(X4= z7xxR`C%enang8VrbQthkWp~y;+Dqn9etEUecuTIF`r@Pqd>ssWA>V_+PT!4BY8=lj z)2}CFXZAhv({lF(IIAz|w)$?o+5QgpFY?iHrUAd3rvFa)p@Y$X`qKQfE+5-zt(MhC z`;cG5H}fyA57 zzZ~gze-GmW?A~JnKM;uJK3*WxS-W!5|2CJiP(OJV$uIgFU*9(TLVM_+?YVpnmpX=@ zD^GsAkWWL|4WjlxkL(rVTjlXjRQ_pxek4z#d?mm)`FKIz?ql~CfB!g=H|&>^XZiWV z__H$^lqUbor;v{qNcot(H2-WWc?so=N0aTZ#2@l6l!N{zZ!Vvz@h?|y@<;z^eb4k~ zxpE3P{HwkHHRUf?Zt{@%t5g15Ip~{8-W-hk;^}qD$Ly#_u}6Q5e?LP9prPzv!}mPO zS3r84er>Q!($<2N z_5V21$4kaf{rod(Jf}Z0`-t`l<|pl!v#8wk)fv_!(;v(5f8p^peS!JS`k3!nPfY(1 z&iu_G--NxtdDH2;n|42Ut<#goLt^mshSCEbHJMKY;~8Jy>%{pus9%qL|3)va2eXiG zL(NC|HWvG9T7R@x(0+PjuZi}*Df{J@Sl?cue=$B#i{o+c-;#lwuV~Nc0e-RE**6p2 zlmNx79P^9i&OS-HwE@S&@!R(RSnmAMhwYHR6XlEiKt5Sch@*kazryw4lQJ7L`A#RD zazy{ z+4s{c5kJx1bW8j>YWsXM*P2zC&-|YK-D+F@LZx|r5B0v z*RB)&EPw6zZgVq}AGANd-`>u_mv-~=_4Wn%Dx*EfSG=A9rhP{FnJBV9ssMinn}%}m zm0jjr4)XOC*1M0;y65M3w3E-b3;lKM%WXbz{Rh5Ye@T1-zQb;iC*lh@hws)-9^a4O zd_BzJTit$D{#xh<;_LODrc=N-DTnz*d;yao8ND0Z^Oz~}1Mvbk zVf|lTEipcVeyorFkxyua`b4*~&+eey-Opy_Nfj!qq$mo=rCEv9{jW2BsfR(I z_8xjR`tW{!zP5(-KPw;4H#y!2F4v@=X}|0IzE>xyvhy3{gAxZ=uJNs|tVZR?ueHY& z+vS_W`FClL>uornPxg4DD}FvD|H{ITho)!`^?TglV$ylIvd8naY6h?Fu|M5%tN*fX z-!puAVx90s`?#;S^USP#wVlUzuT1_pe32h_x0mrt`FM9KgoUr*?N!2U;sgHE-nD=k zn9o=bY437EM)4~8Rl>plz0U07s_2K`%{S%WG<;PY2+&bEG{_x13`@v5UKGee#=FeSO`75+W94&utr9xP9K=QZJ zGzD`L{GmTYvq^qP9zp-fX{G){K>GgqhoJWg{d;C5=yl4+-0d9=O*#{HBUF#X8#XuVQ@&=&l;^YN8a-lhdOImP=bpRX46 z$0zFnU(T-+#xHjG<9zC3{@j}76Cb5^zWxxBaOFJHul>_*m*V`z@8qAt`IBhAg#5W( z!CC%X#EbFxK)#r-{0n?9`5=Dt+A8Ol^x@L{xh_9?jE`S`^nJa-^+Mk(e{NC(#-{IY zm6zntZHMvQ!+pxUZu!s2FW#pt?BB3HX#F#N3jKcCrhReb=TmyB>q&zxKP$8Y=*#^Z zKxFSTaN(m=z!Y8SdTW)q1W$_&(=CR#F}sNPkkx{>gIFu z5UqK6fxU1se{Mv7=*uzgXT(1@UzX|5?7SEFr>A?UKJ(|rRnQNxqaWrgV45@8`6k8} z_y_-&@onx81O0ISfZ^rZ3;eP6sO`}n_Ve&J$k89=)4O$)hx5f8y&<1V^3$UJInHm; z9?w7ZeRO{FUftQV?))m2hb8)G8NEIN!Wg6Ub@F+%-?8~p=KwSQUf%(|W=DID{!7lM zQBHgz&%{^tBYdyzPyq3B_JW`UDeNxPwB1gmh9=| zrtH1xw<)isqdlHyU_DE()eq!PLioVx3)mpD1UC!k~nLB&`bKKo16LaN{a@S=!^9P z>+6=Sr#^pfwME#iKd|ps;C|`gr<^~%HrsdkWMQB0w%MNY;mTg`wE4ZVcnWUvg2%_> z#`k@l?>YM#`E&<@wI5LXL%~DNPvzfT6Rdo}l()Zwy_L(iI~v8~W4_(9T)y4DwvPJ8 z?MHRqNi#?L(VGfzXNz% z2ZAQwhp_MXF@I){`gnZ7|HJZ)2mfrdU%Y?zTky}OJtP0;XY>!53YP!#g!@M|ZurUn zr@aF=) zXy5t!+WkfS!KJ_-&>!Wb1p4>;rGtL9KZU+T`M}EUe82vn6ZrR^q4tLZlXnU^#+M8a zOuh-Df4oONyu|j2|Ff&wPtEXu@h9g`IowaN7_Tq$H^%ole_r{w9?bU!-sGp&S3aU!d;!;_fwrG)ha$7C1A|ooIm9y#Lk@Cd@}q0L!8%+he3bQ zAAPz8{g2;Gj34tOnXFGTxK}=IRmX?HKN9}NE7QDxYkP9p`@hN`y_%=z9(iit* z*dS%&&+CK3rT$&Vp5CCR?gu>#`vm^pndWQD^;2wY6zlr|RtXcp4 z=4g1?I1yKy$3ktds_Sei~i$r z3dpTDp#NZfnxp^0q3E6Y7mvo`NA@0IL7pE+@+|s~N@}0-E&G9e@ke=HPX9BIPUWC~ zG8vY%fAsSO*lT{f`6B-VUtj!z{sMpYUxzu7}o9EZKo97o6rCOd} zqIbrxv^3^$2)p?rc_)nZ1pdYAwm-h<`IDOd0xbTd_8v(5qJQylUHr)2Ypj|7ea4?| zD*k2sS)kwJh<@Tv*WMGo0e{~2^yd8d2J#DgSoGi6B>fR@mj0Ic`N94!_HSD_ywxZlSKSO!n$ z2VDQ6^WM%D);}wMdI$ZDdh}0s_A>0#l^*Hu>`B0Dy((bYld?}0e*@7%AVeI`iQN~6 z^?#+N^@ICY;lFku{Y6;g?~Oks(Vwt>Iu zqkYsrAnfYn{CRss*w^QIs`h^#U)1M)NWga|+{XYv*ze)I7VW3=D1Mg8>y>Q30+{bD ziNE*%rhho;ec;9me39WB=QY{*d6&SSr%&0v_ww{f*-u%10ec!-)M*X7$s_iD zZu;t8cz>z*;ZF$Lep>Ov!OypFUqy>s{_46Ho#_&=2|`ov*Iy21Ml_ z0-iYgT=a#$DaZCd-lvp?Wqpr-e<^#K)AuZX?=@2ej7RHDsgh#7dHMTHXfOWUd3x?V zUhDnh`chq3Ux04TKF0A~EnZ(1?PE`$)U7XdvsbwOfPZ*@sren&msUBfCs$G4*8Dbm z><-qKYO|NK$5vWh!r6L1Z0r4Iw%$uxqxB?ue+vDQJRTZ9sRWm-FUOBI4k@2oCZBkp zSsU|x=Gsf&XW~6fS?@8crF*oGM-|+>+Wzt$-UmMF9un66@g@7i*q-2iZsC3AhONI~ zI`GB&%&jR$!B@;&Bup8uD& zOSP;V`=982<~!UEMei@2eGdF=K8ye9#%02sA9#P4%r6If2lF#cwl{5jDChY>GTs_z z_3vPRHxVRx#`y7Gcj0~J=Jv$P2kGD1Fne(6d%$JyGq*?DFOe$1XPG|9>^;cSc%HVg zKH`V^#*a?=rTl<=fFJE;_M!NpldH(y8+Y>bpL9yKjQ-$H+KKoD`d=!EUYWhO9o}P# zAJY=>i}=yuejmS^&i>wZ;{lBQcWyr|{!~U8f1cgR?;jwa$F25uwm;JQzx9snan4uq zV^oRk_l5Veq-Uiodq(tso%fl0`%vg-qQCJ&?_VN*4AKRDq-BoR>rc#2@S`t&EWDSc zzPMnoFTT%={5vpy@S{?y#r)ZaK@-EsvC9@?GhD z=D@utCjYMDe4*2`83LH+fk~xXDe*`6L-40R$n&Sy$-mF+R**pJCGVNJK6aw@ll~hO z9FJx6hrh?hChWH}&kvlh%3s0xOQ3$Y3;WoYW4){PFy9P*D*kWf?eTs;Z?Es{VuAMG z!(Lq3;Q5pHznIRu9j`UR}>~VbG?{F#i=i7+a5Y~UmQ&$eA z-o6ID_MR=XKl>GauX45D-C_U1zofdu{vCe6r@zDck{YxRdsqI4@}ks!z|OzruP(UN zV*Nh*L;Ik2!}tkU_(l2R1UH+!F!1z8ypvksKlJOo3I`1SC=Z0!!g(>;Ul#9#^#eXD z2@R69Z2Xb^t^WW2ZGRWaE;{7F+Db8n#_m|qA(*O2-kp%l0%%5rT{!+^=Eav6( zDc)a}jw5-Yzw*q(I8TJX7xeFW@&1zjXFP8!-e2bY0T=x-i}#n>J{RvV-FX`Hhx1n~ z-e21Nf?JLpeHP~r*YA(K{-ym}yuY;lGS8==f73?s{u1Ib3-5zhus#F!{>({9f6#dz z=>=Hgl+mwvf2kSBO9k4;^Wyy_>0#&3#rwlb{<3&~$>&4v{MO#j@;ckz$HHE7`=#RjW%2$J?`H;{E0PU)%dj^S@sD{u2Asw;n|AF9Fvc(bzmUe?8a}Z#}5UUo!YZTl{YM zL;r#P(=bN7Kl3{a?=$7U?BZMDdZhHCTV{MC`uEuT(gXS@V{gdcP52w#0X$VeYwtgn z&JXc`4!?7hKR?9#+^s6(x0`=B<5x%TIpGibM(;OqzgfKBEZ%QYoWlFT;{9gvev<-0 zeZ=>(#rsWCBfRG*-ft>DN_KuD>fj&l$Ctmajo(`o?>CG0o5lN0&KcN^^56bn`~9Y$ z{jdAqKkEHu)QP=WDZxJP1^Zm@FZ+UJpTmByJ;H0Q)pN7w@&2^-2&;R;!My)H;Ysw_ zH^mR;gM3)-m)23P`@E~A=|k{obPjIezw}S}Yg`0wu-y2k`U78|_P@`ch$<7rFJ-*D zzWW~hR{f5vPyYXPl9Yyotv&VEj{8GFH!8<_T<#Yv|JEVi3)i)O(0dEu$NV$8kmUPN z?FSm}Js@D7k6M12&#XVb7u9ohgYo_g@d){R+TLTLJ>*A3?9|e`9lqDO-NXC5gb8L` zyn}K-#H0A{@`tNqf0Ol({*qI@bhh(;(0k<+?eLeqkBr{;h##%cKj8kuL9JAc-t#;h z)=DcGoPQ66{Y(wfP5J&@@9&>zylgKTubzk4_z+LFw&vpjr4|nd;TQBh`TzV*GaYUv z0S5nu8)d~?Q2Aja9d1_w40`RBx_aNk_iuBJ^tDMCH|p4-v{%3w%!8+P9GxunwMXkf3BSV z8A~I2UkUI3Ab-<`yFJRc-s9l=Wd3hLx!T+4m%M!A{Zs7`j#HcGm=me)cgKH?{Azwa zfd12m8(`WwVh%{2tLejTzm$`A#0T>7j`kbKC#(1D+#hSc;5i!PU+?oZ-?qA${QsQu z7q5L5{q-Mi;lXT>pZbFxi-*AZGWj~>KZ;)j z`XHWTh;N7YhV=*A7T<;SsZWsoPWy4b7t{QDo9~%2c}s_rj>=_EUxoZ!+RtFMzY7JC zoj(mR|M z-fdp~GhcoW<2}*yH^X1$tJ5p~eJ5bNzoaL`nZu`lyxV%~-}rKVKi}nhEni;c`z>(H z!x&Gxk(T(iUOzYyN>@NQ?K`NH?c!e{@I<`-e~hxfvD1U!FN@tBcs zEtl^P^*KMZpA&x%6hHa=H$Z?t%GdmBO(=*nKc4YPGxy$ZI{(_&5MKmJ68Nfqnva8MfHJ z;#X5%PF9JZlP}`mO7;FMdmlF0&+Myby=3t{>2y9`A-)awz770Pe#6x9V|?=GRM1ZT zupSPtMEb4=`T1I2-$7sfIr!z`-(Y=cHVGek{k2=ZlD$U-ebV8dZAbEt_TJvfV3b22 ze(kMDo~HA^SV@6U>c)4xyZvW+&r1GFf9|}Uy{~*$0X@>%81)@L0gvl{;bF9&%>N=s z-xSM9Hh&b~d9pDa}$Gf9T!k_Sc;Qy%Bu48^9ejMVf_xkDSWv&#EPxJ?VEWQ^<{QC7w z|D$~IeLv&F{j5rSv0p_w;-v?D2Dtv1u#^+*9bB%e>IKeib#)#8(azlg`ai3xb-<#&4eHKKE__=#G7 z0r~a!3gW|^mY|N#j!MUtACBYA>$gf?zs33o_)q6e#aEYp)BD#Q#h(xQra|^OeL(zn z%3R?5`&;A>SXaCa$6w@!Rfb2W->W(w_xAP^j=zZv3Sm7Ke{%Zx@DS$ z89YUN{6l=d@AMVo`;4d`U3?$lx3^7t3O~_vZNm4n9$$Rli0M6@Pj*>8$=a(Z-oCQv zJnz*oo({(w^0{{OAY1Wm(LUZouTWo1IDYUG@u%JUU%=|WUKhPw`D&Im$qVM6#YZHJ`N#OrFHnDB{3d_@+Q$!m=HmrYAMJ$wl=gG`Sex*pJp+9*J{bjj z6Z_W-@99rAd0c1mgZM(Ze9!0)`@0b)xa~J?X@9pvSoyR6zwWLjH;^r(bC)&=~LrISGah(6YMB%&Xyw7*tPUqb)E z{yOsZRtx3Y+ToKo9ei&&aQY1KI4)P?iNl{bZ*;jDEFJ!MgHRJUzalrBeN~XZKeGP| zukRmD+l`xF6yC>pz5a(jjdsT+d~y`vf%nkIYmWyW5WjNOUWh*Bcps?$yDN#u!ZP@m zrRmU*hxTaa$l)vS3-;|SsK@W)^)D^{`Uj48m>#oCef&uJ&E|h)%Q8QbWD5O^c&?(K z3-k5OUqfEydFA*XV}`szd?(^zH-8@n8n5pch-dkY`VY3Zc~)89S8^vWmq=fZhw|a$ z`TqRg^-rWdseh09Af6k>M}8FJzb+t)OnZ+KPopaPul&8puZZ!)`2x$ZAJZl4Ri0fy zehtR3XH0)6|26)EjaOWr--tI=(!Ujt|3&{+MV}gZ3whaL0Dr~%rvJY#=*JlhY_AWQ z9~eE={z(6a67Sr^Yes)NDeMhbk9e;d@5PU|&)4xyW{eHGYd%raAv%PD)x4k#>lV#MuxQ9>*;r*8qzf_~X8q?5%{oFCUn7ZTvF?QN}2JZ0j~)VH^Ak0kfd>B^si zeSr3WuWAlofxr26tKsk!{7;2=c)$bAx9G362gJUpx0@J^$6rm!Zy@Vi{8u)9T(0)b z?x^Yf5&Yh6#{BJfLLc}?TZsS1_B`HbW55$O_e@=SBdW{cGyfJ8>+IVhnSni&%tntJT$NvuZ8~5e^c1=Je<3oS) z-DbJ9WzYxlv{%6McVYZL6<~_TFW;j7z(3-jdVTtQyjh9&Czgdf@18(CSDMcM$ars? zNNGnpBenmqSccAPMOcYKaDQqrQ#pz@7M7EMK6vq zKCPE{zh`S5|0~ERgys(8z5NmTKR$j@jPJHSbN=k#R7>&1G`?FgKKjbXQ^))++v~K) zFR>Ira>-W%eoVCg_@nZJ@wyBA$n5)nK>yxizJMPS!K0;kViMm?Jih_`&!z!=LH`%i zO4HGs{Fny(koazA>|T)Q+f2Lz;kMic> z9P#+hig+*DnE3Iif4m?+Q^(Jbj8{I{IR7X3c^QwVf^S{Cw{-P1e~dq*fG4J5{I`Sr z`Nq;)oOjWygdPV6%X2C zFA$IKtryClCgYRdi99;6K5#w{1M&MkTrXLK@frUW@Jpb-*j|JF1Z@xe0)EJ-zXU%B z|3*Gu8O96!QV;bN{Pn#Nnd_wKC1iM2#w+U`@*nd_<2jbrJNjE# zPf`x%*MFcM?h9#-_zK@kx_P_OEX=2NQ9exDMR_m+cEiK@^)2VurXur!WswI{v)_R7 z1LFsV`3W%IXk_U73V8UO@dINokGbC4T{r(8-(&vtO7R0@AK#Jw-niQq`}C>#bqydM z_-FDfxxV_f;{G+m5oWDfU`v&9hNxm7eFQvcjWA2YgS;o_C@q4x&`J|d` zU-FfSeb{dDedCV=9y&uuE&e%Wf1CWhTYp#JmaWHrJLt>&M3o-cr1oZO!S{{-5#w7g z*uL!7NdJ!K4!@v$`F4IrJY?|{=sT13sq_Q=n*gAAw0)FY8gJT`uQu$jr^i)|{R_$` zz8?ICKfXzSeAnPL_*;*}-)g>xc)lBl&jhax&d>pq&xQCcJz-h!-4pS7FZo3y-}doa zK6myx)-U#VmN{SN_f;RQP*!;a{ZIM9DrryT2k7PS+o*`&1TSp$btfNI+e$N(4==WX zJRn}!2J%AifRhIz9|F84@*&s#1w^jf6R}kPR_;viR%D_^mJT zQPrL#KYF>~XW|2i&wI2Ic}=_~@_Xj@^A6wx&i|tSr~i|9?b6a)#KFtxZlG*_4W|X!%)A;FSuX%Hv^G(Mf?qU3;wBnR4d9me6N6)#9o!i!GXQBs#lt! z{F~x{S6D_mkh&{F{`E_EXyftFC*q~xeAcFX9_$?Jt#9b@3i|#6>#aWa_~LsdALRX zWc>bK`Fkq-y$8YZ6O9kI8&p}e{!`%VvBN*$zsCQ%bNjdW-o)Xd59I%h{`os;U)FPa z1pOD3d%X~E5nuCr1`olXEcwA59>RVl^8w?`5{8ZTvHvXlDH4@l@JtUGw+!Cm_pabi zboAl*D}1eUulQH+-vH?W!v56j#r{dY_dDsod~XoZKkVZ(*CpR?@coDkvGJkBKi0+f zCYjA2Uy9#6k^F@|Ub5a_6Fi0Q0iTxq$&=Ou?R$ETyRtt_dVn8)_fJ|S`~zOVc_=)0 zDi1$?UGf(qzb5^K)_wuK#D4MqLiqb?8SNS4|ARjd%XxOMAHni&%Phn{30md+30hx}^8veJw6zFHB1YQ5~g zc39SWeDCsYasmk(uA~QlO^(5G@QPuCrjkJ z@cIMg7sY%R@RptN_tf`aL4S+`JWYHzKJ|F|p z;e2oS{GG^;Qr7RpYyV>DEAn$=@@wIbQf~P3JiueXXY}WQKQ>?IhWZf?-dFxHUxe}x z_?q!-iu}X+2LHNy{QED%zrfxQeg^(#`nPZxK)P$~*DrvEngRd50Chrsez7R>b363; z>BvuPPY9kS|Hw~Fe&L7XXDL6%X4vJYgTjt7Y=F> zI`m)L^%Tr!;YXQ2fjx=#qPcjEg7tP1SDFETmNj=IUe?ns) zPw$43Ut00R#oqaj{L1dfgXpq;W(o1A_fMUSaQ-MLcf{Xj>Y2Z!<<5J2;QEc}Uku)F zG#sA6{?>Lko3ef54}?GMWJ>y(d?BdsbT}VO{yyNPR-I+L-@e_=r<~7%m(gFI$7X$( ztBn2M^QU%ZRPcU0EzQ4f{cXD#Gr-iN|L9+X@7wzl=wE~WJD)#r-r)QG`-Pes5ddl+`4nIc` z@%)|XPlEk-B*^3U^!GS_(RKA$-{tce2hL-(*}h0kj^`)OW5Csm{#knLoyWMXk6CZ? z?`*H}>eh@0K7~C6ThPo0;?-+R--11b_pLnzdsgOSV4oH8 z3&MVt_Ihr9!Cpf@63;KO$EI3()qi$@=Z`fYoFZz!7@yPcF^CycY;;@wU z|8QQ9cFR)z+~?!M`;8mjt+4kGjeVx{20bq0>I4b3G~c0=YjYex)H3>$MYECm>(Kwl zi#qb>jo`1rgYu93eShZ9OR}6S*8Dy3%^Ucm-q!2H=YsFhAMU5H91GrSB;tRv{*DLP zU+ZB)+zJ0b(!Rv`HakCLJe2l$xPO54BKKY6d-a!Ken-0iUju(dyIFxg^e-3h3y65X z)9+C~jlF*p^nkyB=kK)s;r-ryC!BBh_bIqucfFMJ-^zRVA9$fafB(qymkIsTraVk> zK4K{6jZ~gPpY>DH*WJg763OQ)^u~N7{}o@8A6W0vV4$)|%BUZXWdF|S2jsU*#6BQ? z`8S-0jhj>9FU#OB^1n}jh?U>uPuv;^KjnM{zc(6*{$_hp-yOmrQT_ryUdHhVCY9kg z`HQhzdmi^!us_#^zSr?meqCWhI^ZYyQ9M6$)U4b0<-SP^2f`e^uwPqGiI=pl^MQ-%BUh-~IiBI0k+0gx}hJPv}Sd0DfctGUa$J-of|UY|rLX#{RC{m#}yT z{6@lT z@jrp@LC;Tr;`5KS%Kfl1N7+3;L)o?UKm5;MUjIVc7W6Mxm({;kA3J#idVY>R7X8{N z=-YyR#{4PkU(u&lUvqx&{3P_RnXh~Lw|qY(zlOiv>SMHD(8sW^C4QHcf6zZ$+(+g7 z(DMw`&kga%8@!MCQ|0~5JDLBm-+6x>@>umXvs-}G`V z`m1yvQucG@WQ4bOyYaEZ8?c9)wHRk9@DNX&5ifMrJ`(xLe6<1opgkTV-y-F!?q?&v za2K`_9-<7)hop<{Hw>PU_JjM`(thmkpJKl~+G+eZ`MtCs$0ENe?_XEZ-tlpz8Mb!} z0{{>E3;sv%VzsyVeaIi+0du~T`r#by2lqLo{rSS-1GFE#n^y#p@b{p9(f@qVf9U;9 z_}(FZmE$+p$8J4XplO-k%FkiQ&nSXVgVlz9QMA_k8D=~@KT+g2<|Fv6=U+pAh{2mX zjmIJF$$p~jd==dG~7jhm(^t+s29WI8J-^vdd70>Wvw+QeJ&s!ZIqrZ3z zKi(C`n{;~vH(x-nXcy#113#M3PC_5oKYM=dT4cneM(0b@6$m65u z`6>AFW%Cd}@x8{4t_Dl5C|obXU-5he{2iC_)1!NypWyFNDL?(#^HccSgKx|5^IP(} zZ{i_j=KMx$OaQES9{V+s_*=D1`#`hw@(`kTNO z$7AK&yD8(@1m&6DZ+MEISJ19jU(5Px@qXZ)=htu;;AhHP-k;X`59IH}@8>@b>A#$g zY`wHMn_<4Y_TX>idW25HB$4xv-66{ce*?d)*DQ+{8SMk_cetPJ{hK4{u+N9w>5%^R zzSF+Hvegaoe!d&6(LW3i-wgT`yT&G z{{w%e(Bm1rFZIvhec;*uYrJ2!A4satdHy{7PfF^eGLZhauIM|7Z!7*cz3=srUKp%* z>!16bD>vkQ@y|(pQ_A+fR2%KdS14A`PJr*)ls}Xwu$Rx<$Zy>xp7!x*>-4v3`H#ed zl^RYs=>GL@7+;`;3+e9sD(t;>jq|Ca^9l2zQ{(z>n)yY0Psb+nAM2-DRrz4c5`PN6 OKpFl2OWS*8{{J_pBl*$* literal 0 HcmV?d00001 diff --git a/src/nvidia-modeset/srcs.mk b/src/nvidia-modeset/srcs.mk new file mode 100644 index 0000000..499506e --- /dev/null +++ b/src/nvidia-modeset/srcs.mk @@ -0,0 +1,225 @@ +SRCS ?= +SRCS_CXX ?= + +SRCS += ../common/shared/nvstatus/nvstatus.c +SRCS += ../common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c +SRCS += ../common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c +SRCS += ../common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c +SRCS += ../common/softfloat/source/8086-SSE/s_f16UIToCommonNaN.c +SRCS += ../common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c +SRCS += ../common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c +SRCS += ../common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c +SRCS += ../common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c +SRCS += ../common/softfloat/source/8086-SSE/softfloat_raiseFlags.c +SRCS += ../common/softfloat/source/f16_to_f32.c +SRCS += ../common/softfloat/source/f32_add.c +SRCS += ../common/softfloat/source/f32_div.c +SRCS += ../common/softfloat/source/f32_eq.c +SRCS += ../common/softfloat/source/f32_eq_signaling.c +SRCS += ../common/softfloat/source/f32_isSignalingNaN.c +SRCS += ../common/softfloat/source/f32_le.c +SRCS += ../common/softfloat/source/f32_le_quiet.c +SRCS += ../common/softfloat/source/f32_lt.c +SRCS += ../common/softfloat/source/f32_lt_quiet.c +SRCS += ../common/softfloat/source/f32_mul.c +SRCS += ../common/softfloat/source/f32_mulAdd.c +SRCS += ../common/softfloat/source/f32_rem.c +SRCS += ../common/softfloat/source/f32_roundToInt.c +SRCS += ../common/softfloat/source/f32_sqrt.c +SRCS += ../common/softfloat/source/f32_sub.c +SRCS += ../common/softfloat/source/f32_to_f16.c +SRCS += ../common/softfloat/source/f32_to_f64.c +SRCS += ../common/softfloat/source/f32_to_i32.c +SRCS += ../common/softfloat/source/f32_to_i32_r_minMag.c +SRCS += ../common/softfloat/source/f32_to_i64.c +SRCS += ../common/softfloat/source/f32_to_i64_r_minMag.c +SRCS += ../common/softfloat/source/f32_to_ui32.c +SRCS += ../common/softfloat/source/f32_to_ui32_r_minMag.c +SRCS += ../common/softfloat/source/f32_to_ui64.c +SRCS += ../common/softfloat/source/f32_to_ui64_r_minMag.c +SRCS += ../common/softfloat/source/f64_add.c +SRCS += ../common/softfloat/source/f64_div.c +SRCS += ../common/softfloat/source/f64_eq.c +SRCS += ../common/softfloat/source/f64_eq_signaling.c +SRCS += ../common/softfloat/source/f64_isSignalingNaN.c +SRCS += ../common/softfloat/source/f64_le.c +SRCS += ../common/softfloat/source/f64_le_quiet.c +SRCS += ../common/softfloat/source/f64_lt.c +SRCS += ../common/softfloat/source/f64_lt_quiet.c +SRCS += ../common/softfloat/source/f64_mul.c +SRCS += ../common/softfloat/source/f64_mulAdd.c +SRCS += ../common/softfloat/source/f64_rem.c +SRCS += ../common/softfloat/source/f64_roundToInt.c +SRCS += ../common/softfloat/source/f64_sqrt.c +SRCS += ../common/softfloat/source/f64_sub.c +SRCS += ../common/softfloat/source/f64_to_f32.c +SRCS += ../common/softfloat/source/f64_to_i32.c +SRCS += ../common/softfloat/source/f64_to_i32_r_minMag.c +SRCS += ../common/softfloat/source/f64_to_i64.c +SRCS += ../common/softfloat/source/f64_to_i64_r_minMag.c +SRCS += ../common/softfloat/source/f64_to_ui32.c +SRCS += ../common/softfloat/source/f64_to_ui32_r_minMag.c +SRCS += ../common/softfloat/source/f64_to_ui64.c +SRCS += ../common/softfloat/source/f64_to_ui64_r_minMag.c +SRCS += ../common/softfloat/source/i32_to_f32.c +SRCS += ../common/softfloat/source/i32_to_f64.c +SRCS += ../common/softfloat/source/i64_to_f32.c +SRCS += ../common/softfloat/source/i64_to_f64.c +SRCS += ../common/softfloat/source/s_addMagsF32.c +SRCS += ../common/softfloat/source/s_addMagsF64.c +SRCS += ../common/softfloat/source/s_approxRecipSqrt32_1.c +SRCS += ../common/softfloat/source/s_approxRecipSqrt_1Ks.c +SRCS += ../common/softfloat/source/s_countLeadingZeros64.c +SRCS += ../common/softfloat/source/s_countLeadingZeros8.c +SRCS += ../common/softfloat/source/s_mul64To128.c +SRCS += ../common/softfloat/source/s_mulAddF32.c +SRCS += ../common/softfloat/source/s_mulAddF64.c +SRCS += ../common/softfloat/source/s_normRoundPackToF32.c +SRCS += ../common/softfloat/source/s_normRoundPackToF64.c +SRCS += ../common/softfloat/source/s_normSubnormalF16Sig.c +SRCS += ../common/softfloat/source/s_normSubnormalF32Sig.c +SRCS += ../common/softfloat/source/s_normSubnormalF64Sig.c +SRCS += ../common/softfloat/source/s_roundPackToF16.c +SRCS += ../common/softfloat/source/s_roundPackToF32.c +SRCS += ../common/softfloat/source/s_roundPackToF64.c +SRCS += ../common/softfloat/source/s_roundToI32.c +SRCS += ../common/softfloat/source/s_roundToI64.c +SRCS += ../common/softfloat/source/s_roundToUI32.c +SRCS += ../common/softfloat/source/s_roundToUI64.c +SRCS += ../common/softfloat/source/s_shiftRightJam128.c +SRCS += ../common/softfloat/source/s_subMagsF32.c +SRCS += ../common/softfloat/source/s_subMagsF64.c +SRCS += ../common/softfloat/source/softfloat_state.c +SRCS += ../common/softfloat/source/ui32_to_f32.c +SRCS += ../common/softfloat/source/ui32_to_f64.c +SRCS += ../common/softfloat/source/ui64_to_f32.c +SRCS += ../common/softfloat/source/ui64_to_f64.c +SRCS += ../common/src/nv_smg.c +SRCS_CXX += ../common/displayport/src/dp_auxretry.cpp +SRCS_CXX += ../common/displayport/src/dp_bitstream.cpp +SRCS_CXX += ../common/displayport/src/dp_buffer.cpp +SRCS_CXX += ../common/displayport/src/dp_configcaps.cpp +SRCS_CXX += ../common/displayport/src/dp_connectorimpl.cpp +SRCS_CXX += ../common/displayport/src/dp_crc.cpp +SRCS_CXX += ../common/displayport/src/dp_deviceimpl.cpp +SRCS_CXX += ../common/displayport/src/dp_discovery.cpp +SRCS_CXX += ../common/displayport/src/dp_edid.cpp +SRCS_CXX += ../common/displayport/src/dp_evoadapter.cpp +SRCS_CXX += ../common/displayport/src/dp_groupimpl.cpp +SRCS_CXX += ../common/displayport/src/dp_guid.cpp +SRCS_CXX += ../common/displayport/src/dp_linkconfig.cpp +SRCS_CXX += ../common/displayport/src/dp_list.cpp +SRCS_CXX += ../common/displayport/src/dp_merger.cpp +SRCS_CXX += ../common/displayport/src/dp_messagecodings.cpp +SRCS_CXX += ../common/displayport/src/dp_messageheader.cpp +SRCS_CXX += ../common/displayport/src/dp_messages.cpp +SRCS_CXX += ../common/displayport/src/dp_mst_edid.cpp +SRCS_CXX += ../common/displayport/src/dp_qse.cpp +SRCS_CXX += ../common/displayport/src/dp_splitter.cpp +SRCS_CXX += ../common/displayport/src/dp_sst_edid.cpp +SRCS_CXX += ../common/displayport/src/dp_timer.cpp +SRCS_CXX += ../common/displayport/src/dp_vrr.cpp +SRCS_CXX += ../common/displayport/src/dp_wardatabase.cpp +SRCS_CXX += ../common/displayport/src/dp_watermark.cpp +SRCS_CXX += ../common/displayport/src/dptestutil/dp_testmessage.cpp +SRCS += ../common/modeset/hdmipacket/nvhdmipkt.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_0073.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_9171.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_9271.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_9471.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_9571.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_C371.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_C671.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_C871.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_C971.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_CC71.c +SRCS += ../common/modeset/timing/nvt_cvt.c +SRCS += ../common/modeset/timing/nvt_displayid20.c +SRCS += ../common/modeset/timing/nvt_dmt.c +SRCS += ../common/modeset/timing/nvt_dsc_pps.c +SRCS += ../common/modeset/timing/nvt_edid.c +SRCS += ../common/modeset/timing/nvt_edidext_861.c +SRCS += ../common/modeset/timing/nvt_edidext_displayid.c +SRCS += ../common/modeset/timing/nvt_edidext_displayid20.c +SRCS += ../common/modeset/timing/nvt_gtf.c +SRCS += ../common/modeset/timing/nvt_ovt.c +SRCS += ../common/modeset/timing/nvt_tv.c +SRCS += ../common/modeset/timing/nvt_util.c +SRCS += ../common/unix/common/utils/nv_memory_tracker.c +SRCS += ../common/unix/common/utils/nv_mode_timings_utils.c +SRCS += ../common/unix/common/utils/nv_vasprintf.c +SRCS += ../common/unix/common/utils/unix_rm_handle.c +SRCS += ../common/unix/nvidia-3d/src/nvidia-3d-core.c +SRCS += ../common/unix/nvidia-3d/src/nvidia-3d-fermi.c +SRCS += ../common/unix/nvidia-3d/src/nvidia-3d-hopper.c +SRCS += ../common/unix/nvidia-3d/src/nvidia-3d-init.c +SRCS += ../common/unix/nvidia-3d/src/nvidia-3d-kepler.c +SRCS += ../common/unix/nvidia-3d/src/nvidia-3d-maxwell.c +SRCS += ../common/unix/nvidia-3d/src/nvidia-3d-pascal.c +SRCS += ../common/unix/nvidia-3d/src/nvidia-3d-surface.c +SRCS += ../common/unix/nvidia-3d/src/nvidia-3d-turing.c +SRCS += ../common/unix/nvidia-3d/src/nvidia-3d-vertex-arrays.c +SRCS += ../common/unix/nvidia-3d/src/nvidia-3d-volta.c +SRCS += ../common/unix/nvidia-push/src/nvidia-push-init.c +SRCS += ../common/unix/nvidia-push/src/nvidia-push.c +SRCS += kapi/src/nvkms-kapi-notifiers.c +SRCS += kapi/src/nvkms-kapi-sync.c +SRCS += kapi/src/nvkms-kapi.c +SRCS += lib/nvkms-format.c +SRCS += lib/nvkms-sync.c +SRCS_CXX += src/dp/nvdp-connector-event-sink.cpp +SRCS_CXX += src/dp/nvdp-connector.cpp +SRCS_CXX += src/dp/nvdp-device.cpp +SRCS_CXX += src/dp/nvdp-evo-interface.cpp +SRCS_CXX += src/dp/nvdp-host.cpp +SRCS_CXX += src/dp/nvdp-timer.cpp +SRCS += src/g_nvkms-evo-states.c +SRCS += src/nvkms-3dvision.c +SRCS += src/nvkms-attributes.c +SRCS += src/nvkms-conf.c +SRCS += src/nvkms-console-restore.c +SRCS += src/nvkms-ctxdma.c +SRCS += src/nvkms-cursor.c +SRCS += src/nvkms-cursor2.c +SRCS += src/nvkms-cursor3.c +SRCS += src/nvkms-difr.c +SRCS += src/nvkms-dma.c +SRCS += src/nvkms-dpy-override.c +SRCS += src/nvkms-dpy.c +SRCS += src/nvkms-event.c +SRCS += src/nvkms-evo.c +SRCS += src/nvkms-evo1.c +SRCS += src/nvkms-evo2.c +SRCS += src/nvkms-evo3.c +SRCS += src/nvkms-evo4.c +SRCS += src/nvkms-flip.c +SRCS += src/nvkms-framelock.c +SRCS += src/nvkms-hal.c +SRCS += src/nvkms-hdmi.c +SRCS += src/nvkms-headsurface-3d.c +SRCS += src/nvkms-headsurface-config.c +SRCS += src/nvkms-headsurface-ioctl.c +SRCS += src/nvkms-headsurface-matrix.c +SRCS += src/nvkms-headsurface-swapgroup.c +SRCS += src/nvkms-headsurface.c +SRCS += src/nvkms-hw-flip.c +SRCS += src/nvkms-hw-states.c +SRCS += src/nvkms-lut.c +SRCS += src/nvkms-modepool.c +SRCS += src/nvkms-modeset.c +SRCS += src/nvkms-pow.c +SRCS += src/nvkms-prealloc.c +SRCS += src/nvkms-push.c +SRCS += src/nvkms-rm.c +SRCS += src/nvkms-rmapi-dgpu.c +SRCS += src/nvkms-stereo.c +SRCS += src/nvkms-surface.c +SRCS += src/nvkms-utils-flip.c +SRCS += src/nvkms-utils.c +SRCS += src/nvkms-vblank-sem-control.c +SRCS += src/nvkms-vrr.c +SRCS += src/nvkms.c +SRCS += ../common/unix/xzminidec/src/xz_crc32.c +SRCS += ../common/unix/xzminidec/src/xz_dec_bcj.c +SRCS += ../common/unix/xzminidec/src/xz_dec_lzma2.c +SRCS += ../common/unix/xzminidec/src/xz_dec_stream.c diff --git a/src/nvidia/Makefile b/src/nvidia/Makefile new file mode 100644 index 0000000..e6bb857 --- /dev/null +++ b/src/nvidia/Makefile @@ -0,0 +1,217 @@ +########################################################################### +# Makefile for nv-kernel.o +########################################################################### + +NV_MODULE_LOGGING_NAME ?= nvidia + +VERSION_MK_DIR = ../../ + +include ../../utils.mk + +include srcs.mk + +############################################################################## +# Helper functions to determine the compiler type +############################################################################## +GET_COMPILER_TYPE = \ + $(shell $(VERSION_MK_DIR)/nv-compiler.sh type $(1)) +############################################################################## + +# The source files for nv-kernel.o are all SRCS and SRCS_CXX defined in srcs.mk, +# and the NVIDIA ID string +ALL_SRCS = $(SRCS) $(SRCS_CXX) +ALL_SRCS += $(NVIDSTRING) + +SRC_COMMON = ../common +CONDITIONAL_CFLAGS := + +CFLAGS += -include $(SRC_COMMON)/sdk/nvidia/inc/cpuopsys.h + +CFLAGS += -I kernel/inc +CFLAGS += -I interface +CFLAGS += -I $(SRC_COMMON)/sdk/nvidia/inc +CFLAGS += -I $(SRC_COMMON)/sdk/nvidia/inc/hw +CFLAGS += -I arch/nvalloc/common/inc +CFLAGS += -I arch/nvalloc/common/inc/gsp +CFLAGS += -I arch/nvalloc/common/inc/deprecated +CFLAGS += -I arch/nvalloc/unix/include +CFLAGS += -I inc +CFLAGS += -I inc/os +CFLAGS += -I $(SRC_COMMON)/shared/inc +CFLAGS += -I $(SRC_COMMON)/shared/msgq/inc +CFLAGS += -I $(SRC_COMMON)/inc + +CFLAGS += -I $(SRC_COMMON)/inc/swref +CFLAGS += -I $(SRC_COMMON)/inc/swref/published + +CFLAGS += -I generated +CFLAGS += -I $(SRC_COMMON)/nvswitch/kernel/inc +CFLAGS += -I $(SRC_COMMON)/nvswitch/interface +CFLAGS += -I $(SRC_COMMON)/nvswitch/common/inc/ +CFLAGS += -I $(SRC_COMMON)/inc/displayport +CFLAGS += -I $(SRC_COMMON)/nvlink/interface/ +CFLAGS += -I $(SRC_COMMON)/nvlink/inband/interface +CFLAGS += -I src/mm/uvm/interface +CFLAGS += -I inc/libraries +CFLAGS += -I src/libraries +CFLAGS += -I inc/kernel + +#if NV_USE_MBEDTLS +MBEDTLS_VERSION ?= 3.6.2 +CFLAGS += -I $(SRC_COMMON)/mbedtls/$(MBEDTLS_VERSION)/include +CFLAGS += -I $(SRC_COMMON)/mbedtls/$(MBEDTLS_VERSION)/nvidia +CFLAGS += -D"MBEDTLS_USER_CONFIG_FILE=" +#endif + +CFLAGS += -Werror-implicit-function-declaration +CFLAGS += -Wwrite-strings +CFLAGS += -Wundef +CFLAGS += -fno-common +CFLAGS += -ffreestanding +CFLAGS += -fno-stack-protector + +ifeq ($(TARGET_ARCH),x86_64) + CFLAGS += -msoft-float + CFLAGS += -mno-red-zone + CFLAGS += -mcmodel=kernel + CFLAGS += -mno-mmx + CFLAGS += -mno-sse + CFLAGS += -mno-sse2 + CFLAGS += -mno-3dnow +endif + +ifeq ($(TARGET_ARCH),aarch64) + CFLAGS += -mgeneral-regs-only + CFLAGS += -march=armv8-a + CFLAGS += -mstrict-align + CFLAGS += -ffixed-x18 + CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -mno-outline-atomics) +endif + +ifeq ($(TARGET_ARCH),riscv64) + CFLAGS += -march=rv64imac_zicsr_zifencei + CFLAGS += -mabi=lp64 + CFLAGS += -mcmodel=medany + CFLAGS += -mno-relax +endif + +CFLAGS += -fno-pic + +CFLAGS += -D_LANGUAGE_C +CFLAGS += -D__NO_CTYPE +CFLAGS += -DNVRM +CFLAGS += -DLOCK_VAL_ENABLED=0 +CFLAGS += -DPORT_ATOMIC_64_BIT_SUPPORTED=1 +CFLAGS += -DPORT_IS_KERNEL_BUILD=1 +CFLAGS += -DPORT_IS_CHECKED_BUILD=0 +CFLAGS += -DPORT_MODULE_atomic=1 +CFLAGS += -DPORT_MODULE_core=1 +CFLAGS += -DPORT_MODULE_cpu=1 +CFLAGS += -DPORT_MODULE_crypto=1 +CFLAGS += -DPORT_MODULE_debug=1 +CFLAGS += -DPORT_MODULE_memory=1 +CFLAGS += -DPORT_MODULE_safe=1 +CFLAGS += -DPORT_MODULE_string=1 +CFLAGS += -DPORT_MODULE_sync=1 +CFLAGS += -DPORT_MODULE_thread=1 +CFLAGS += -DPORT_MODULE_util=1 +CFLAGS += -DPORT_MODULE_example=0 +CFLAGS += -DPORT_MODULE_mmio=0 +CFLAGS += -DPORT_MODULE_time=0 +CFLAGS += -DRS_STANDALONE=0 +CFLAGS += -DRS_STANDALONE_TEST=0 +CFLAGS += -DRS_COMPATABILITY_MODE=1 +CFLAGS += -DRS_PROVIDES_API_STATE=0 +CFLAGS += -DNV_CONTAINERS_NO_TEMPLATES + +CFLAGS += -DNV_PRINTF_STRINGS_ALLOWED=1 +CFLAGS += -DNV_ASSERT_FAILED_USES_STRINGS=1 +CFLAGS += -DPORT_ASSERT_FAILED_USES_STRINGS=1 + +ifeq ($(DEBUG),1) + CFLAGS += -gsplit-dwarf +endif + +# Define how to perform dead code elimination: place each symbol in its own +# section at compile time, and garbage collect unreachable sections at link +# time. exports_link_command.txt tells the linker which symbols need to be +# exported from nv-kernel.o so the linker can determine which symbols are +# unreachable. +CFLAGS += -ffunction-sections +CFLAGS += -fdata-sections +NV_KERNEL_O_LDFLAGS += --gc-sections +EXPORTS_LINK_COMMAND = exports_link_command.txt + +ifeq ($(TARGET_ARCH),x86_64) + COMPILER_TYPE := $(call GET_COMPILER_TYPE, $(CC)) + ENDBR_SUPPORTED := $(call AS_HAS_INSTR, endbr64) + + FCF_SUPPORTED = + + # + # GCC flags -fcf-protection=branch and -mindirect-branch=extern-thunk can + # be used together after GCC version 9.4.0. See + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=93654 for details. + # Check if GCC version is appropriate. + # + ifeq ($(COMPILER_TYPE),gcc) + FCF_SUPPORTED := \ + $(shell $(VERSION_MK_DIR)/nv-compiler.sh version_is_at_least $(CC) 90400) + endif + + # + # Clang version 14.0.0 is required for -fcf-protection=branch to work + # correctly. See commit + # https://github.com/llvm/llvm-project/commit/dfcf69770bc522b9e411c66454934a37c1f35332 + # + ifeq ($(COMPILER_TYPE),clang) + FCF_SUPPORTED := \ + $(shell $(VERSION_MK_DIR)/nv-compiler.sh version_is_at_least $(CC) 140000) + endif + + ifeq ($(FCF_SUPPORTED)-$(ENDBR_SUPPORTED),1-1) + CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -fcf-protection=branch) + endif + CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -fno-jump-tables) + CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -mindirect-branch-register) + CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -mindirect-branch=thunk-extern) +endif + +CFLAGS += $(CONDITIONAL_CFLAGS) + +CC_ONLY_CFLAGS += --std=gnu11 +CXX_ONLY_CFLAGS += --std=gnu++11 + +OBJS = $(call BUILD_OBJECT_LIST,$(ALL_SRCS)) + +# Define how to generate the NVIDIA ID string +$(eval $(call GENERATE_NVIDSTRING, \ + NVRM_ID, \ + UNIX Open Kernel Module, $(OBJS))) + +# Define how to build each object file from the corresponding source file. +$(foreach src, $(ALL_SRCS), $(eval $(call DEFINE_OBJECT_RULE,TARGET,$(src)))) + +NV_KERNEL_O = $(OUTPUTDIR)/nv-kernel.o + +.PHONY: all +all: $(NV_KERNEL_O) + +LINKER_SCRIPT = nv-kernel.ld + +NV_KERNEL_O_LDFLAGS += $(LDFLAGS) + +$(NV_KERNEL_O): $(OBJS) $(EXPORTS_LINK_COMMAND) $(LINKER_SCRIPT) + $(call quiet_cmd,LD) \ + $(NV_KERNEL_O_LDFLAGS) \ + -T $(LINKER_SCRIPT) \ + -r -o $(NV_KERNEL_O) $(OBJS) @$(EXPORTS_LINK_COMMAND) + $(call quiet_cmd,OBJCOPY) \ + --localize-symbol=memset \ + --localize-symbol=memcpy \ + --remove-section=.note.gnu.property \ + $@ + +.PHONY: clean +clean: + $(RM) -rf $(OUTPUTDIR) diff --git a/src/nvidia/arch/nvalloc/common/inc/nv-firmware.h b/src/nvidia/arch/nvalloc/common/inc/nv-firmware.h new file mode 100644 index 0000000..e77ea2f --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/nv-firmware.h @@ -0,0 +1,139 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NV_FIRMWARE_H +#define NV_FIRMWARE_H + + + +#include +#include + +typedef enum +{ + NV_FIRMWARE_TYPE_GSP, + NV_FIRMWARE_TYPE_GSP_LOG, +} nv_firmware_type_t; + +typedef enum +{ + NV_FIRMWARE_CHIP_FAMILY_NULL = 0, + NV_FIRMWARE_CHIP_FAMILY_TU10X = 1, + NV_FIRMWARE_CHIP_FAMILY_TU11X = 2, + NV_FIRMWARE_CHIP_FAMILY_GA100 = 3, + NV_FIRMWARE_CHIP_FAMILY_GA10X = 4, + NV_FIRMWARE_CHIP_FAMILY_AD10X = 5, + NV_FIRMWARE_CHIP_FAMILY_GH100 = 6, + NV_FIRMWARE_CHIP_FAMILY_GB10X = 8, + NV_FIRMWARE_CHIP_FAMILY_GB10Y = 11, + NV_FIRMWARE_CHIP_FAMILY_END, +} nv_firmware_chip_family_t; + +static inline const char *nv_firmware_chip_family_to_string( + nv_firmware_chip_family_t fw_chip_family +) +{ + switch (fw_chip_family) { + case NV_FIRMWARE_CHIP_FAMILY_GB10X: return "gb10x"; + case NV_FIRMWARE_CHIP_FAMILY_GB10Y: return "gb10y"; + case NV_FIRMWARE_CHIP_FAMILY_GH100: return "gh100"; + case NV_FIRMWARE_CHIP_FAMILY_AD10X: return "ad10x"; + case NV_FIRMWARE_CHIP_FAMILY_GA10X: return "ga10x"; + case NV_FIRMWARE_CHIP_FAMILY_GA100: return "ga100"; + case NV_FIRMWARE_CHIP_FAMILY_TU11X: return "tu11x"; + case NV_FIRMWARE_CHIP_FAMILY_TU10X: return "tu10x"; + + case NV_FIRMWARE_CHIP_FAMILY_END: // fall through + case NV_FIRMWARE_CHIP_FAMILY_NULL: + return ""; + } + return ""; +} + +// The includer may optionally define +// NV_FIRMWARE_FOR_NAME(name) +// to return a platform-defined string for a given a gsp_* or gsp_log_* name. +// +// The function nv_firmware_for_chip_family will then be available. +#if defined(NV_FIRMWARE_FOR_NAME) +static inline const char *nv_firmware_for_chip_family( + nv_firmware_type_t fw_type, + nv_firmware_chip_family_t fw_chip_family +) +{ + if (fw_type == NV_FIRMWARE_TYPE_GSP) + { + switch (fw_chip_family) + { + case NV_FIRMWARE_CHIP_FAMILY_GB10X: // fall through + case NV_FIRMWARE_CHIP_FAMILY_GB10Y: // fall through + case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through + case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through + case NV_FIRMWARE_CHIP_FAMILY_GA10X: + return NV_FIRMWARE_FOR_NAME("gsp_ga10x"); + + case NV_FIRMWARE_CHIP_FAMILY_GA100: // fall through + case NV_FIRMWARE_CHIP_FAMILY_TU11X: // fall through + case NV_FIRMWARE_CHIP_FAMILY_TU10X: + return NV_FIRMWARE_FOR_NAME("gsp_tu10x"); + + case NV_FIRMWARE_CHIP_FAMILY_END: // fall through + case NV_FIRMWARE_CHIP_FAMILY_NULL: + return ""; + } + } + else if (fw_type == NV_FIRMWARE_TYPE_GSP_LOG) + { + switch (fw_chip_family) + { + case NV_FIRMWARE_CHIP_FAMILY_GB10X: // fall through + case NV_FIRMWARE_CHIP_FAMILY_GB10Y: // fall through + case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through + case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through + case NV_FIRMWARE_CHIP_FAMILY_GA10X: + return NV_FIRMWARE_FOR_NAME("gsp_log_ga10x"); + + case NV_FIRMWARE_CHIP_FAMILY_GA100: // fall through + case NV_FIRMWARE_CHIP_FAMILY_TU11X: // fall through + case NV_FIRMWARE_CHIP_FAMILY_TU10X: + return NV_FIRMWARE_FOR_NAME("gsp_log_tu10x"); + + case NV_FIRMWARE_CHIP_FAMILY_END: // fall through + case NV_FIRMWARE_CHIP_FAMILY_NULL: + return ""; + } + } + return ""; +} +#endif // defined(NV_FIRMWARE_FOR_NAME) + +// The includer may optionally define +// NV_FIRMWARE_DECLARE_GSP(name) +// which will then be invoked (at the top-level) for each +// gsp_* (but not gsp_log_*) +#if defined(NV_FIRMWARE_DECLARE_GSP) +NV_FIRMWARE_DECLARE_GSP("gsp_ga10x") +NV_FIRMWARE_DECLARE_GSP("gsp_tu10x") +#endif // defined(NV_FIRMWARE_DECLARE_GSP) + +#endif // NV_FIRMWARE_DECLARE_GSP diff --git a/src/nvidia/arch/nvalloc/common/inc/nvrangetypes.h b/src/nvidia/arch/nvalloc/common/inc/nvrangetypes.h new file mode 100644 index 0000000..1ff7df6 --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/nvrangetypes.h @@ -0,0 +1,162 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file nvrangetypes.h + * @brief Range types and operator macros + * @note #include a header to define NvUxx and NvSxx before sourcing this file. + */ + +#ifndef _NVRANGETYPES_H_ +#define _NVRANGETYPES_H_ + + +// +// Define range types by convention +// +#define __NV_DEFINE_RANGE_TYPE(T) \ +typedef struct NvRange ## T \ +{ \ + Nv ## T min; \ + Nv ## T max; \ +} NvRange ## T; + + +__NV_DEFINE_RANGE_TYPE(U64) // NvRangeU64 +__NV_DEFINE_RANGE_TYPE(S64) // NvRangeS64 +__NV_DEFINE_RANGE_TYPE(U32) // NvRangeU32 +__NV_DEFINE_RANGE_TYPE(S32) // NvRangeS32 +__NV_DEFINE_RANGE_TYPE(U16) // NvRangeU16 +__NV_DEFINE_RANGE_TYPE(S16) // NvRangeS16 +__NV_DEFINE_RANGE_TYPE(U8) // NvRangeU8 +__NV_DEFINE_RANGE_TYPE(S8) // NvRangeS8 + + +// +// Operator macros +// +// Macros are named xxx_RANGE (rather than xxx_RANGEU32, etc.) since they work +// properly on ranges with any number of bits, signed or unsigned. +// + +#define NV_EQUAL_RANGE(r1, r2) ((r1).min == (r2).min && (r1).max == (r2).max) +#define NV_EMPTY_INCLUSIVE_RANGE(r) ((r).min > (r).max) +#define NV_EMPTY_EXCLUSIVE_RANGE(r) ((r).min + 1 > (r).max - 1) +#define NV_WITHIN_INCLUSIVE_RANGE(r, x) ((r).min <= (x) && (x) <= (r).max) +#define NV_WITHIN_EXCLUSIVE_RANGE(r, x) ((r).min < (x) && (x) < (r).max) +#define NV_IS_SUBSET_RANGE(r1, r2) ((r1).min >= (r2).min && (r2).max >= (r1).max) +#define NV_IS_SUPERSET_RANGE(r1, r2) ((r1).min <= (r2).min && (r2).max <= (r1).max) +#define NV_CENTER_OF_RANGE(r) ((r).min / 2 + ((r).max + 1) / 2) // Avoid overflow and rounding anomalies. +#define NV_IS_OVERLAPPING_RANGE(r1, r2) \ + (NV_WITHIN_INCLUSIVE_RANGE((r1), (r2).min) || \ + NV_WITHIN_INCLUSIVE_RANGE((r1), (r2).max)) + +#define NV_DISTANCE_FROM_RANGE(r, x) ((x) < (r).min? (r).min - (x): ((x) > (r).max? (x) - (r).max: 0)) +#define NV_VALUE_WITHIN_INCLUSIVE_RANGE(r, x) ((x) < (r).min? (r).min : ((x) > (r).max? (r).max : (x))) +#define NV_VALUE_WITHIN_EXCLUSIVE_RANGE(r, x) ((x) <= (r).min? (r).min + 1 : ((x) >= (r).max? (r).max - 1 : (x))) + +#define NV_INIT_RANGE(r, x, y) \ +do \ +{ \ + (r).min = (x); \ + (r).max = (y); \ +} while(0) + +#define NV_ASSIGN_DELTA_RANGE(r, x, d) \ +do \ +{ \ + (r).min = (x) - (d); \ + (r).max = (x) + (d); \ +} while(0) + +#define NV_ASSIGN_INTERSECTION_RANGE(r1, r2) \ +do \ +{ \ + if ((r1).min < (r2).min) \ + (r1).min = (r2).min; \ + if ((r1).max > (r2).max) \ + (r1).max = (r2).max; \ +} while(0) + +#define NV_ASSIGN_UNION_RANGE(r1, r2) \ +do \ +{ \ + if ((r1).min > (r2).min) \ + (r1).min = (r2).min; \ + if ((r1).max < (r2).max) \ + (r1).max = (r2).max; \ +} while(0) + +#define NV_MULTIPLY_RANGE(r, x) \ +do \ +{ \ + (r).min *= (x); \ + (r).max *= (x); \ +} while(0) + +#define NV_DIVIDE_FLOOR_RANGE(r, x) \ +do \ +{ \ + (r).min /= (x); \ + (r).max /= (x); \ +} while(0) + +#define NV_DIVIDE_CEILING_RANGE(r, x) \ +do \ +{ \ + (r).min = ((r).min + (x) - 1) / (x); \ + (r).max = ((r).max + (x) - 1) / (x); \ +} while(0) + +#define NV_DIVIDE_ROUND_RANGE(r, x) \ +do \ +{ \ + (r).min = ((r).min + (x) / 2) / (x); \ + (r).max = ((r).max + (x) / 2) / (x); \ +} while(0) + +#define NV_DIVIDE_WIDE_RANGE(r, x) \ +do \ +{ \ + (r).min /= (x); \ + (r).max = ((r).max + (x) - 1) / (x); \ +} while(0) + +#define NV_DIVIDE_NARROW_RANGE(r, x) \ +do \ +{ \ + (r).min = ((r).min + (x) - 1) / (x); \ + (r).max /= (x); \ +} while(0) + +#define NV_VALUE_WITHIN_INCLUSIVE_RANGE(r, x) \ + ((x) < (r).min? (r).min : ((x) > (r).max? (r).max : (x))) + +#define NV_WITHIN_INCLUSIVE_RANGE(r, x) \ + ((r).min <= (x) && (x) <= (r).max) + +#define NV_DISTANCE_FROM_RANGE(r, x) \ + ((x) < (r).min? (r).min - (x): ((x) > (r).max? (x) - (r).max: 0)) + +#endif // _NVRANGETYPES_H_ + diff --git a/src/nvidia/arch/nvalloc/unix/include/nv-caps.h b/src/nvidia/arch/nvalloc/unix/include/nv-caps.h new file mode 100644 index 0000000..35bbf7c --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv-caps.h @@ -0,0 +1,94 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_CAPS_H_ +#define _NV_CAPS_H_ + +#include + +/* + * Opaque OS-specific struct; on Linux, this has member + * 'struct proc_dir_entry'. + */ +typedef struct nv_cap nv_cap_t; + +/* + * Creates directory named "capabilities" under the provided path. + * + * @param[in] path Absolute path + * + * Returns a valid nv_cap_t upon success. Otherwise, returns NULL. + */ +nv_cap_t* NV_API_CALL nv_cap_init(const char *path); + +/* + * Creates capability directory entry + * + * @param[in] parent_cap Parent capability directory + * @param[in] name Capability directory's name + * @param[in] mode Capability directory's access mode + * + * Returns a valid nv_cap_t upon success. Otherwise, returns NULL. + */ +nv_cap_t* NV_API_CALL nv_cap_create_dir_entry(nv_cap_t *parent_cap, const char *name, int mode); + +/* + * Creates capability file entry + * + * @param[in] parent_cap Parent capability directory + * @param[in] name Capability file's name + * @param[in] mode Capability file's access mode + * + * Returns a valid nv_cap_t upon success. Otherwise, returns NULL. + */ +nv_cap_t* NV_API_CALL nv_cap_create_file_entry(nv_cap_t *parent_cap, const char *name, int mode); + +/* + * Destroys capability entry + * + * @param[in] cap Capability entry + */ +void NV_API_CALL nv_cap_destroy_entry(nv_cap_t *cap); + +/* + * Validates and duplicates the provided file descriptor + * + * @param[in] cap Capability entry + * @param[in] fd File descriptor to be validated + * + * Returns duplicate fd upon success. Otherwise, returns -1. + */ +int NV_API_CALL nv_cap_validate_and_dup_fd(const nv_cap_t *cap, int fd); + +/* + * Closes file descriptor + * + * This function should be used to close duplicate file descriptors + * returned by nv_cap_validate_and_dup_fd. + * + * @param[in] fd File descriptor to be validated + * + */ +void NV_API_CALL nv_cap_close_fd(int fd); + +#endif /* _NV_CAPS_H_ */ diff --git a/src/nvidia/arch/nvalloc/unix/include/nv-chardev-numbers.h b/src/nvidia/arch/nvalloc/unix/include/nv-chardev-numbers.h new file mode 100644 index 0000000..54ca547 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv-chardev-numbers.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _NV_CHARDEV_NUMBERS_H_ +#define _NV_CHARDEV_NUMBERS_H_ + +// NVIDIA's reserved major character device number (Linux). +#define NV_MAJOR_DEVICE_NUMBER 195 + +// Minor numbers 0 to 247 reserved for regular devices +#define NV_MINOR_DEVICE_NUMBER_REGULAR_MAX 247 + +// Minor numbers 248 to 253 currently unused + +// Minor number 254 reserved for the modeset device (provided by NVKMS) +#define NV_MINOR_DEVICE_NUMBER_MODESET_DEVICE 254 + +// Minor number 255 reserved for the control device +#define NV_MINOR_DEVICE_NUMBER_CONTROL_DEVICE 255 + +#endif // _NV_CHARDEV_NUMBERS_H_ + diff --git a/src/nvidia/arch/nvalloc/unix/include/nv-gpu-info.h b/src/nvidia/arch/nvalloc/unix/include/nv-gpu-info.h new file mode 100644 index 0000000..a8c0c0a --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv-gpu-info.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_GPU_INFO_H_ +#define _NV_GPU_INFO_H_ + +typedef struct { + NvU32 gpu_id; + + struct { + NvU32 domain; + NvU8 bus, slot, function; + } pci_info; + + /* + * opaque OS-specific pointer; on Linux, this is a pointer to the + * 'struct device' for the GPU. + */ + void *os_device_ptr; +} nv_gpu_info_t; + +#define NV_MAX_GPUS 32 + +#endif /* _NV_GPU_INFO_H_ */ diff --git a/src/nvidia/arch/nvalloc/unix/include/nv-ioctl-lockless-diag.h b/src/nvidia/arch/nvalloc/unix/include/nv-ioctl-lockless-diag.h new file mode 100644 index 0000000..84e3fa7 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv-ioctl-lockless-diag.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NV_IOCTL_LOCKLESS_DIAG) +#define NV_IOCTL_LOCKLESS_DIAG + +#include +#include "ctrl/ctrl0000/ctrl0000nvd.h" + +typedef struct +{ + NvU32 cmd; // in + NvU32 status; // out + union // in/out + { + NV0000_CTRL_NVD_GET_NVLOG_INFO_PARAMS getNvlogInfo; + NV0000_CTRL_NVD_GET_NVLOG_BUFFER_INFO_PARAMS getNvlogBufferInfo; + NV0000_CTRL_NVD_GET_NVLOG_PARAMS getNvlog; + } params; +} NV_LOCKLESS_DIAGNOSTIC_PARAMS; + +#endif + diff --git a/src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numa.h b/src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numa.h new file mode 100644 index 0000000..0af5267 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numa.h @@ -0,0 +1,81 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef NV_IOCTL_NUMA_H +#define NV_IOCTL_NUMA_H + +#include + +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) +#include +#elif defined (NV_KERNEL_INTERFACE_LAYER) && defined(NV_BSD) +#include +#else + +#include + +#if !defined(__aligned) +#define __aligned(n) __attribute__((aligned(n))) +#endif + +#endif + +#define NV_ESC_NUMA_INFO (NV_IOCTL_BASE + 15) +#define NV_ESC_SET_NUMA_STATUS (NV_IOCTL_BASE + 16) + +#define NV_IOCTL_NUMA_INFO_MAX_OFFLINE_ADDRESSES 64 +typedef struct offline_addresses +{ + uint64_t addresses[NV_IOCTL_NUMA_INFO_MAX_OFFLINE_ADDRESSES] __aligned(8); + uint32_t numEntries; +} nv_offline_addresses_t; + + +/* per-device NUMA memory info as assigned by the system */ +typedef struct nv_ioctl_numa_info +{ + int32_t nid; + int32_t status; + uint64_t memblock_size __aligned(8); + uint64_t numa_mem_addr __aligned(8); + uint64_t numa_mem_size __aligned(8); + uint8_t use_auto_online; + nv_offline_addresses_t offline_addresses __aligned(8); +} nv_ioctl_numa_info_t; + +/* set the status of the device NUMA memory */ +typedef struct nv_ioctl_set_numa_status +{ + int32_t status; +} nv_ioctl_set_numa_status_t; + +#define NV_IOCTL_NUMA_STATUS_DISABLED 0 +#define NV_IOCTL_NUMA_STATUS_OFFLINE 1 +#define NV_IOCTL_NUMA_STATUS_ONLINE_IN_PROGRESS 2 +#define NV_IOCTL_NUMA_STATUS_ONLINE 3 +#define NV_IOCTL_NUMA_STATUS_ONLINE_FAILED 4 +#define NV_IOCTL_NUMA_STATUS_OFFLINE_IN_PROGRESS 5 +#define NV_IOCTL_NUMA_STATUS_OFFLINE_FAILED 6 + +#endif diff --git a/src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numbers.h b/src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numbers.h new file mode 100644 index 0000000..d0efa6f --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numbers.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef NV_IOCTL_NUMBERS_H +#define NV_IOCTL_NUMBERS_H + +/* NOTE: using an ioctl() number > 55 will overflow! */ +#define NV_IOCTL_MAGIC 'F' +#define NV_IOCTL_BASE 200 +#define NV_ESC_CARD_INFO (NV_IOCTL_BASE + 0) +#define NV_ESC_REGISTER_FD (NV_IOCTL_BASE + 1) +#define NV_ESC_ALLOC_OS_EVENT (NV_IOCTL_BASE + 6) +#define NV_ESC_FREE_OS_EVENT (NV_IOCTL_BASE + 7) +#define NV_ESC_STATUS_CODE (NV_IOCTL_BASE + 9) +#define NV_ESC_CHECK_VERSION_STR (NV_IOCTL_BASE + 10) +#define NV_ESC_IOCTL_XFER_CMD (NV_IOCTL_BASE + 11) +#define NV_ESC_ATTACH_GPUS_TO_FD (NV_IOCTL_BASE + 12) +#define NV_ESC_QUERY_DEVICE_INTR (NV_IOCTL_BASE + 13) +#define NV_ESC_SYS_PARAMS (NV_IOCTL_BASE + 14) +#define NV_ESC_EXPORT_TO_DMABUF_FD (NV_IOCTL_BASE + 17) +#define NV_ESC_WAIT_OPEN_COMPLETE (NV_IOCTL_BASE + 18) + +#endif diff --git a/src/nvidia/arch/nvalloc/unix/include/nv-ioctl.h b/src/nvidia/arch/nvalloc/unix/include/nv-ioctl.h new file mode 100644 index 0000000..3a8e88f --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv-ioctl.h @@ -0,0 +1,156 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef NV_IOCTL_H +#define NV_IOCTL_H + +#include +#include + +typedef struct { + NvU32 domain; /* PCI domain number */ + NvU8 bus; /* PCI bus number */ + NvU8 slot; /* PCI slot number */ + NvU8 function; /* PCI function number */ + NvU16 vendor_id; /* PCI vendor ID */ + NvU16 device_id; /* PCI device ID */ +} nv_pci_info_t; + +/* + * ioctl()'s with parameter structures too large for the + * _IOC cmd layout use the nv_ioctl_xfer_t structure + * and the NV_ESC_IOCTL_XFER_CMD ioctl() to pass the actual + * size and user argument pointer into the RM, which + * will then copy it to/from kernel space in separate steps. + */ +typedef struct nv_ioctl_xfer +{ + NvU32 cmd; + NvU32 size; + NvP64 ptr NV_ALIGN_BYTES(8); +} nv_ioctl_xfer_t; + +typedef struct nv_ioctl_card_info +{ + NvBool valid; + nv_pci_info_t pci_info; /* PCI config information */ + NvU32 gpu_id; + NvU16 interrupt_line; + NvU64 reg_address NV_ALIGN_BYTES(8); + NvU64 reg_size NV_ALIGN_BYTES(8); + NvU64 fb_address NV_ALIGN_BYTES(8); + NvU64 fb_size NV_ALIGN_BYTES(8); + NvU32 minor_number; + NvU8 dev_name[10]; /* device names such as vmgfx[0-32] for vmkernel */ +} nv_ioctl_card_info_t; + +/* alloc event */ +typedef struct nv_ioctl_alloc_os_event +{ + NvHandle hClient; + NvHandle hDevice; + NvU32 fd; + NvU32 Status; +} nv_ioctl_alloc_os_event_t; + +/* free event */ +typedef struct nv_ioctl_free_os_event +{ + NvHandle hClient; + NvHandle hDevice; + NvU32 fd; + NvU32 Status; +} nv_ioctl_free_os_event_t; + +/* status code */ +typedef struct nv_ioctl_status_code +{ + NvU32 domain; + NvU8 bus; + NvU8 slot; + NvU32 status; +} nv_ioctl_status_code_t; + +/* check version string */ +#define NV_RM_API_VERSION_STRING_LENGTH 64 + +typedef struct nv_ioctl_rm_api_version +{ + NvU32 cmd; + NvU32 reply; + char versionString[NV_RM_API_VERSION_STRING_LENGTH]; +} nv_ioctl_rm_api_version_t; + +#define NV_RM_API_VERSION_CMD_STRICT 0 +#define NV_RM_API_VERSION_CMD_RELAXED '1' +#define NV_RM_API_VERSION_CMD_QUERY '2' + +#define NV_RM_API_VERSION_REPLY_UNRECOGNIZED 0 +#define NV_RM_API_VERSION_REPLY_RECOGNIZED 1 + +typedef struct nv_ioctl_query_device_intr +{ + NvU32 intrStatus NV_ALIGN_BYTES(4); + NvU32 status; +} nv_ioctl_query_device_intr; + +/* system parameters that the kernel driver may use for configuration */ +typedef struct nv_ioctl_sys_params +{ + NvU64 memblock_size NV_ALIGN_BYTES(8); +} nv_ioctl_sys_params_t; + +typedef struct nv_ioctl_register_fd +{ + int ctl_fd; +} nv_ioctl_register_fd_t; + +#define NV_DMABUF_EXPORT_MAX_HANDLES 128 + +#define NV_DMABUF_EXPORT_MAPPING_TYPE_DEFAULT 0 +#define NV_DMABUF_EXPORT_MAPPING_TYPE_FORCE_PCIE 1 + +typedef struct nv_ioctl_export_to_dma_buf_fd +{ + int fd; + NvHandle hClient; + NvU32 totalObjects; + NvU32 numObjects; + NvU32 index; + NvU64 totalSize NV_ALIGN_BYTES(8); + NvU8 mappingType; + NvBool bAllowMmap; + NvHandle handles[NV_DMABUF_EXPORT_MAX_HANDLES]; + NvU64 offsets[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8); + NvU64 sizes[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8); + NvU32 status; +} nv_ioctl_export_to_dma_buf_fd_t; + +typedef struct nv_ioctl_wait_open_complete +{ + int rc; + NvU32 adapterStatus; +} nv_ioctl_wait_open_complete_t; + +#endif diff --git a/src/nvidia/arch/nvalloc/unix/include/nv-kernel-rmapi-ops.h b/src/nvidia/arch/nvalloc/unix/include/nv-kernel-rmapi-ops.h new file mode 100644 index 0000000..d50a1e4 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv-kernel-rmapi-ops.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_KERNEL_RMAPI_OPS_H_ +#define _NV_KERNEL_RMAPI_OPS_H_ + +/* + * Define the RMAPI provided to kernel-level RM clients. + * + * Kernel-level RM clients should populate nvidia_kernel_rmapi_ops_t + * by assigning nvidia_kernel_rmapi_ops_t::op and the corresponding + * parameter structure in nvidia_kernel_rmapi_ops_t's params union. + * Then, pass a pointer to the nvidia_kernel_rmapi_ops_t to + * rm_kernel_rmapi_op(). + */ + +#include "nvtypes.h" +#include "nvos.h" + +typedef struct { + NvU32 op; /* One of the NV0[14]_XXXX operations listed below. */ + + union { + NVOS00_PARAMETERS free; /* NV01_FREE */ + NVOS02_PARAMETERS allocMemory64; /* NV01_ALLOC_MEMORY */ + NVOS64_PARAMETERS alloc; /* NV04_ALLOC */ + NVOS32_PARAMETERS *pVidHeapControl; /* NV04_VID_HEAP_CONTROL */ + NVOS33_PARAMETERS mapMemory; /* NV04_MAP_MEMORY */ + NVOS34_PARAMETERS unmapMemory; /* NV04_UNMAP_MEMORY */ + NVOS39_PARAMETERS allocContextDma2; /* NV04_ALLOC_CONTEXT_DMA */ + NVOS46_PARAMETERS mapMemoryDma; /* NV04_MAP_MEMORY_DMA */ + NVOS47_PARAMETERS unmapMemoryDma; /* NV04_UNMAP_MEMORY_DMA */ + NVOS49_PARAMETERS bindContextDma; /* NV04_BIND_CONTEXT_DMA */ + NVOS54_PARAMETERS control; /* NV04_CONTROL*/ + NVOS55_PARAMETERS dupObject; /* NV04_DUP_OBJECT */ + NVOS57_PARAMETERS share; /* NV04_SHARE */ + NVOS61_PARAMETERS addVblankCallback; /* NV04_ADD_VBLANK_CALLBACK */ + } params; +} nvidia_kernel_rmapi_ops_t; + +#endif /* _NV_KERNEL_RMAPI_OPS_H_ */ diff --git a/src/nvidia/arch/nvalloc/unix/include/nv-nb-regs.h b/src/nvidia/arch/nvalloc/unix/include/nv-nb-regs.h new file mode 100644 index 0000000..6282b35 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv-nb-regs.h @@ -0,0 +1,65 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_NB_REGS_H_ +#define _NV_NB_REGS_H_ + +#include "nvdevid.h" + +typedef struct +{ + NvU32 subsystem_vendor_id; + NvU32 subsystem_device_id; + NvU16 gpu_device_id; +} nv_nb_id_t; + +typedef struct +{ + NvU32 vendor_id; + const char *name; + NvU32 data; +} nv_nb_reg_t; + +/* + * nb_id_table contains the OEM vendor ID, the subsystem ID and the + * GPU device ID of the notebooks for which we need to enable + * vendor specific registry keys. nb_reg_table contains the vendor + * specific registry key values. The initVendorSpecificRegistry() + * function compares the present notebooks OEM subsystem ID and the + * GPU device ID with the values present in id_tables. If a match + * is found, initVendorSpecificRegistry() extracts the vendor + * ID and sets any associated registry key listed in nb_reg_table. + */ + +static nv_nb_id_t nb_id_table[] = { + { PCI_VENDOR_ID_PC_PARTNER, 0x0620, 0x1284 }, // Acer GT 630 + { PCI_VENDOR_ID_PC_PARTNER, 0x0620, 0x124b }, // Acer GT 640 + { 0, 0, 0 } +}; + +static nv_nb_reg_t nb_reg_table[] = { + { PCI_VENDOR_ID_PC_PARTNER, "RmOverrideSupportChipsetAspm", 2 }, + { 0, NULL, 0 } +}; + +#endif //_NV_NB_REGS_H_ diff --git a/src/nvidia/arch/nvalloc/unix/include/nv-priv.h b/src/nvidia/arch/nvalloc/unix/include/nv-priv.h new file mode 100644 index 0000000..0a6339d --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv-priv.h @@ -0,0 +1,364 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_PRIV_H_ +#define _NV_PRIV_H_ + +#include +#include +#include +#include +#include + +#define NV_PRIV_REG_WR08(b,o,d) (*((volatile NvV8*)&(b)->Reg008[(o)/1])=(NvV8)(d)) +#define NV_PRIV_REG_WR16(b,o,d) (*((volatile NvV16*)&(b)->Reg016[(o)/2])=(NvV16)(d)) +#define NV_PRIV_REG_WR32(b,o,d) (*((volatile NvV32*)&(b)->Reg032[(o)/4])=(NvV32)(d)) + +#define NV_PRIV_REG_RD08(b,o) ((b)->Reg008[(o)/1]) +#define NV_PRIV_REG_RD16(b,o) ((b)->Reg016[(o)/2]) +#define NV_PRIV_REG_RD32(b,o) ((b)->Reg032[(o)/4]) + +struct OBJGPU; + +typedef struct +{ + NvBool baseValid; + VGAADDRDESC base; + NvBool workspaceBaseValid; + VGAADDRDESC workspaceBase; + NvU32 vesaMode; +} nv_vga_t; + +/* +* device state during Power Management +*/ +typedef struct nv_pm_state_s +{ + NvU32 IntrEn; + NvBool InHibernate; +} nv_pm_state_t; + +/* +* data structure for the UNIX workqueues +*/ +typedef struct nv_work_item_s +{ + NvU32 flags; + NvU32 gpuInstance; + union + { + OSWorkItemFunction *pGpuFunction; + OSSystemWorkItemFunction *pSystemFunction; + } func; + void *pData; +} nv_work_item_t; + +#define NV_WORK_ITEM_FLAGS_NONE 0x0 +#define NV_WORK_ITEM_FLAGS_REQUIRES_GPU 0x1 +#define NV_WORK_ITEM_FLAGS_DONT_FREE_DATA 0x2 + +#define INVALID_DISP_ID 0xFFFFFFFF +#define MAX_DISP_ID_PER_ADAPTER 0x2 + +typedef struct nv_i2c_adapter_entry_s +{ + void *pOsAdapter; + NvU32 port; + NvU32 displayId[MAX_DISP_ID_PER_ADAPTER]; +} nv_i2c_adapter_entry_t; + +#define NV_INIT_FLAG_HAL 0x0001 +#define NV_INIT_FLAG_HAL_COMPONENTS 0x0002 +#define NV_INIT_FLAG_GPU_STATE 0x0004 +#define NV_INIT_FLAG_GPU_STATE_LOAD 0x0008 +#define NV_INIT_FLAG_FIFO_WATCHDOG 0x0010 +#define NV_INIT_FLAG_CORE_LOGIC 0x0020 +#define NV_INIT_FLAG_GPUMGR_ATTACH 0x0040 +#define NV_INIT_FLAG_PUBLIC_I2C 0x0080 +#define NV_INIT_FLAG_SCALABILITY 0x0100 +#define NV_INIT_FLAG_DMA 0x0200 + +#define MAX_I2C_ADAPTERS NV402C_CTRL_NUM_I2C_PORTS + +/* + * GPU dynamic power state machine. + * + * The GPU is in exactly one of these states at at time. Only certain state + * transitions are valid, as documented by the DAGs below. + * + * When in "instant idle" or COARSE mode: + * + * +----------------------+ + * v | + * +---------+ +----------------+ +--------+ + * | UNKNOWN | --> | IDLE_INDICATED | --> | IN_USE | + * +---------+ +----------------+ +--------+ + * + * The transition from UNKNOWN to IDLE_INDICATED happens in + * rm_init_dynamic_power_management(). + * + * Thereafter, transitions from IDLE_INDICATED to IN_USE happen when + * os_ref_dynamic_power() is called and the refcount transitions from 0 to 1; + * transitions from IN_USE to IDLE_INDICATED happen when + * os_unref_dynamic_power() is called and the refcount transitions from 1 to 0. + * Note that only calls to os_(un)ref_dynamic_power() with the mode == COARSE + * are considered in this mode; calls with mode == FINE are ignored. Since + * COARSE calls are placed only in rm_init_adapter/rm_shutdown_adapter, the GPU + * effectively stays in the IN_USE state any time any client has initialized + * it. + * + * + * When in "deferred idle" or FINE mode: + * + * +----------------------------------------------------------------+ + * | | + * | | + * | +-------------------------------------------+----------------------+ + * | | | v + * | +---------+ +----------------+ +--------------+ +----------------+ +--------+ + * | | UNKNOWN | --> | IDLE_INDICATED | --> | | --> | IDLE_SUSTAINED | --> | IN_USE | -+ + * | +---------+ +----------------+ | | +----------------+ +--------+ | + * | ^ | | | ^ | + * +--------------------+ | IDLE_INSTANT | ------+----------------------+ | + * | | | | + * | | | | + * | | <-----+ | + * +--------------+ | + * ^ | + * +-----------------------------------------------------+ + * + * As before, the transition from UNKNOWN to IDLE_INDICATED happens in + * rm_init_dynamic_power_management(). This is not ideal: it means the GPU may + * be powered down immediately upon loading the RM module, even if + * rm_init_adapter() is going to be called soon thereafter. However, we can't + * rely on deferred idle callbacks yet, since those currently rely on core RM + * being initialized. + * + * At the beginning of rm_init_adapter(), the GPU transitions to the IN_USE + * state; during the rm_init_adapter() sequence, + * RmInitDeferredDynamicPowerManagement() will be called which will schedule + * timer callbacks and set the "deferred_idle_enabled" boolean. + * + * While in "deferred idle" mode, one of the callbacks + * timerCallbackForIdlePreConditions(), timerCallbackToIndicateIdle(), or + * RmIndicateIdle() should be scheduled when in the states: + * - IN_USE + * - IDLE_INSTANT + * - IDLE_SUSTAINED + * Note that since we may transition from IN_USE to IDLE_INSTANT rapidly (e.g., + * for a series of RM calls), we don't attempt to schedule the callbacks and + * cancel them on each of these transitions. The + * timerCallbackForIdlePreConditions() callback will simply exit early if in + * the IN_USE state. + * + * As before, the GPU will remain in the IN_USE state until + * os_unref_dynamic_power() is called and the count transitions from 1 to 0 + * (calls with mode == FINE are honored, in this mode, and these transitions + * can happen frequently). When the refcount reaches 0, rather than going + * directly to the IDLE_INDICATED state, it transitions to the IDLE_INSTANT + * state. + * + * Then, when the next timerCallbackForIdlePreConditions() callback executes, + * if all preconditions are met, the state will transition to IDLE_SUSTAINED. + * + * If, when in the IDLE_SUSTAINED state, os_ref_dynamic_power() is called, the + * GPU will transition back to the IN_USE state and return to the IDLE_INSTANT + * state. This ensures that there is a suitable delay between any activity + * that requires bumping the refcount and indicating idleness. + * + * If the timerCallbackForIdlePreConditions() callback executes again and the + * GPU is still in the IDLE_SUSTAINED state, userspace mappings will be revoked + * and the timerCallbackToIndicateIdle() callback will be scheduled. + * + * If, before the timerCallbackToIndicateIdle() callback executes, either + * os_ref_dynamic_power() is called or a mapping which has been revoked is + * accessed (which triggers the RmForceGpuNotIdle() callback), the GPU will + * transition back to the IN_USE or IDLE_INSTANT state, respectively. + * + * Then, when the timerCallbackToIndicateIdle() callback executes, if all + * mappings are still revoked, and the GPU is still in the IDLE_SUSTAINED + * state, and all GPU idleness preconditions remain satisfied, the + * RmIndicateIdle() work item will be enqueued. (Else, the GPU will transition + * back to the IDLE_INSTANT state and the callback for preconditions is + * scheduled again.) + * + * Finally, once the RmIndicateIdle() work item is called, if all of the same + * conditions still hold, the state will transition to IDLE_INDICATED. No + * callbacks will be scheduled from here; the callbacks for preconditions + * should be re-scheduled when transitioning out of the IDLE_INDICATED state. + * + * Once in the IDLE_INDICATED state, the kernel is free to call the RM to + * perform the GC6 entry sequence then turn off power to the GPU (although it + * may not, if the audio function is being used for example). + * + * There are two paths to exit the IDLE_INDICATED state: + * (a) If os_ref_dynamic_power() is called, in which case it transitions + * directly to the IN_USE state; + * (b) If RmForceGpuNotIdle() is called, in which case it transitions back to + * the IDLE_INSTANT state. + */ +typedef enum +{ + NV_DYNAMIC_POWER_STATE_UNKNOWN = 0, + + NV_DYNAMIC_POWER_STATE_IN_USE, + + NV_DYNAMIC_POWER_STATE_IDLE_INSTANT, + NV_DYNAMIC_POWER_STATE_IDLE_SUSTAINED, + NV_DYNAMIC_POWER_STATE_IDLE_INDICATED, +} nv_dynamic_power_state_t; + +typedef struct nv_dynamic_power_s +{ + /* + * mode is read without the mutex -- should be read-only outside of + * rm_init_dynamic_power_management, called during probe only. + */ + nv_dynamic_power_mode_t mode; + /* + * Whether to indicate idle immediately when the refcount reaches 0, or + * only go to the IDLE_INSTANT state, and expect timer callbacks to + * transition through IDLE_SUSTAINED -> IDLE_INDICATED. + */ + NvBool deferred_idle_enabled; + + nv_dynamic_power_state_t state; + NvS32 refcount; + + /* + * A word on lock ordering. These locks must be taken in the order: + * + * RM API lock > this dynamic_power mutex > RM GPUs lock + * + * Skipping any of those locks is fine (if they aren't required to protect + * whatever state is being accessed or modified), so long as the order is + * not violated. + */ + PORT_MUTEX *mutex; + + /* + * callback handles for deferred dynamic power management. + */ + NvP64 idle_precondition_check_event; + NvP64 indicate_idle_event; + NvBool idle_precondition_check_callback_scheduled; + + /* + * callback handle for kernel initiated gc6 entry/exit. + * these will be protected by the gpu lock. + */ + NvP64 remove_idle_holdoff; + NvBool b_idle_holdoff; + + /* + * flag set if the platform does not support fine grain dynamic power + * management. + */ + NvBool b_fine_not_supported; + + /* + * This flag is used to check if a workitem is queued for + * RmQueueIdleSustainedWorkitem(). + */ + NvBool b_idle_sustained_workitem_queued; + + /* + * Counter to track clients disallowing GCOFF. + */ + NvU32 clients_gcoff_disallow_refcount; + + /* + * Maximum FB allocation size which can be saved in system memory + * while doing GCOFF based dynamic PM. + */ + NvU64 gcoff_max_fb_size; + + /* + * NVreg_DynamicPowerManagement regkey value set by the user + */ + NvU32 dynamic_power_regkey; +} nv_dynamic_power_t; + +typedef struct +{ + OBJGPU *pGpu; + + NvU32 pmc_boot_0; + + nv_vga_t vga; + + NvU32 flags; + NvU32 status; + + nv_i2c_adapter_entry_t i2c_adapters[MAX_I2C_ADAPTERS]; + + void *pVbiosCopy; + NvU32 vbiosSize; + + nv_pm_state_t pm_state; + + nv_reg_entry_t *pRegistry; + + nv_dynamic_power_t dynamic_power; + + /* Flag to check if the GPU needs 4K page isolation. */ + NvBool b_4k_page_isolation_required; + + /* Flag to check if GPU mobile config is enabled */ + NvBool b_mobile_config_enabled; + + /* Flag to check if S0ix-based power management is enabled. */ + NvBool s0ix_pm_enabled; + + /* Variable to track Dynamic Boost support */ + int db_supported; + + /* + * Maximum FB allocation size which can be saved in system memory + * during system supened with S0ix-based power management. + */ + NvU64 s0ix_gcoff_max_fb_size; + + NvU32 pmc_boot_1; + NvU32 pmc_boot_42; + + /* + * This flag is set if the upstream port has been configured ("D3cold Aux Power Limit" and + * "PERST# Assertion Delay") for GC6. This configuration is needed only for desktops. + */ + NvBool gc6_upstream_port_configured; + + /* This flag is set if _PR3 ACPI method is available to support RTD3. */ + NvBool pr3_acpi_method_present; +} nv_priv_t; + +#define NV_SET_NV_PRIV(nv,p) ((nv)->priv = (p)) +#define NV_GET_NV_PRIV(nv) ((nv) ? (nv)->priv : NULL) + +/* + * Make sure that your stack has taken API Lock before using this macro. + */ +#define NV_GET_NV_PRIV_PGPU(nv) \ + (NV_GET_NV_PRIV(nv) ? ((nv_priv_t *)NV_GET_NV_PRIV(nv))->pGpu : NULL) + +#endif // _NV_PRIV_H_ diff --git a/src/nvidia/arch/nvalloc/unix/include/nv-reg.h b/src/nvidia/arch/nvalloc/unix/include/nv-reg.h new file mode 100644 index 0000000..d0d60fc --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv-reg.h @@ -0,0 +1,1093 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +// This file holds Unix-specific NVIDIA driver options +// + +#ifndef _RM_REG_H_ +#define _RM_REG_H_ + +#include "nvtypes.h" +#include "nv-firmware-registry.h" + +/* + * use NV_REG_STRING to stringify a registry key when using that registry key + */ + +#define __NV_REG_STRING(regkey) #regkey +#define NV_REG_STRING(regkey) __NV_REG_STRING(regkey) + +/* + * use NV_DEFINE_REG_ENTRY and NV_DEFINE_PARAMS_TABLE_ENTRY to simplify definition + * of registry keys in the kernel module source code. + */ + +#define __NV_REG_VAR(regkey) NVreg_##regkey + +#if defined(NV_MODULE_PARAMETER) +#define NV_DEFINE_REG_ENTRY(regkey, default_value) \ + static NvU32 __NV_REG_VAR(regkey) = (default_value); \ + NV_MODULE_PARAMETER(__NV_REG_VAR(regkey)) +#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \ + NvU32 __NV_REG_VAR(regkey) = (default_value); \ + NV_MODULE_PARAMETER(__NV_REG_VAR(regkey)) +#else +#define NV_DEFINE_REG_ENTRY(regkey, default_value) \ + static NvU32 __NV_REG_VAR(regkey) = (default_value) +#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \ + NvU32 __NV_REG_VAR(regkey) = (default_value) +#endif + +#if defined(NV_MODULE_STRING_PARAMETER) +#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \ + char *__NV_REG_VAR(regkey) = (default_value); \ + NV_MODULE_STRING_PARAMETER(__NV_REG_VAR(regkey)) +#else +#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \ + char *__NV_REG_VAR(regkey) = (default_value) +#endif + +#define NV_DEFINE_PARAMS_TABLE_ENTRY(regkey) \ + { NV_REG_STRING(regkey), &__NV_REG_VAR(regkey) } + +/* + * Like NV_DEFINE_PARMS_TABLE_ENTRY, but allows a mismatch between the name of + * the regkey and the name of the module parameter. When using this macro, the + * name of the parameter is passed to the extra "parameter" argument, and it is + * this name that must be used in the NV_DEFINE_REG_ENTRY() macro. + */ + +#define NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(regkey, parameter) \ + { NV_REG_STRING(regkey), &__NV_REG_VAR(parameter)} + +/* + *----------------- registry key definitions-------------------------- + */ + +/* + * Option: ModifyDeviceFiles + * + * Description: + * + * When this option is enabled, the NVIDIA driver will verify the validity + * of the NVIDIA device files in /dev and attempt to dynamically modify + * and/or (re-)create them, if necessary. If you don't wish for the NVIDIA + * driver to touch the device files, you can use this registry key. + * + * This module parameter is only honored by the NVIDIA GPU driver and NVIDIA + * capability driver. Furthermore, the NVIDIA capability driver provides + * modifiable /proc file entry (DeviceFileModify=0/1) to alter the behavior of + * this module parameter per device file. + * + * Possible Values: + * 0 = disable dynamic device file management + * 1 = enable dynamic device file management (default) + */ + +#define __NV_MODIFY_DEVICE_FILES ModifyDeviceFiles +#define NV_REG_MODIFY_DEVICE_FILES NV_REG_STRING(__NV_MODIFY_DEVICE_FILES) + +/* + * Option: DeviceFileUID + * + * Description: + * + * This registry key specifies the UID assigned to the NVIDIA device files + * created and/or modified by the NVIDIA driver when dynamic device file + * management is enabled. + * + * This module parameter is only honored by the NVIDIA GPU driver. + * + * The default UID is 0 ('root'). + */ + +#define __NV_DEVICE_FILE_UID DeviceFileUID +#define NV_REG_DEVICE_FILE_UID NV_REG_STRING(__NV_DEVICE_FILE_UID) + +/* + * Option: DeviceFileGID + * + * Description: + * + * This registry key specifies the GID assigned to the NVIDIA device files + * created and/or modified by the NVIDIA driver when dynamic device file + * management is enabled. + * + * This module parameter is only honored by the NVIDIA GPU driver. + * + * The default GID is 0 ('root'). + */ + +#define __NV_DEVICE_FILE_GID DeviceFileGID +#define NV_REG_DEVICE_FILE_GID NV_REG_STRING(__NV_DEVICE_FILE_GID) + +/* + * Option: DeviceFileMode + * + * Description: + * + * This registry key specifies the device file mode assigned to the NVIDIA + * device files created and/or modified by the NVIDIA driver when dynamic + * device file management is enabled. + * + * This module parameter is only honored by the NVIDIA GPU driver. + * + * The default mode is 0666 (octal, rw-rw-rw-). + */ + +#define __NV_DEVICE_FILE_MODE DeviceFileMode +#define NV_REG_DEVICE_FILE_MODE NV_REG_STRING(__NV_DEVICE_FILE_MODE) + +/* + * Option: ResmanDebugLevel + * + * Default value: ~0 + */ + +#define __NV_RESMAN_DEBUG_LEVEL ResmanDebugLevel +#define NV_REG_RESMAN_DEBUG_LEVEL NV_REG_STRING(__NV_RESMAN_DEBUG_LEVEL) + +/* + * Option: RmLogonRC + * + * Default value: 1 + */ + +#define __NV_RM_LOGON_RC RmLogonRC +#define NV_REG_RM_LOGON_RC NV_REG_STRING(__NV_RM_LOGON_RC) + +/* + * Option: InitializeSystemMemoryAllocations + * + * Description: + * + * The NVIDIA Linux driver normally clears system memory it allocates + * for use with GPUs or within the driver stack. This is to ensure + * that potentially sensitive data is not rendered accessible by + * arbitrary user applications. + * + * Owners of single-user systems or similar trusted configurations may + * choose to disable the aforementioned clears using this option and + * potentially improve performance. + * + * Possible values: + * + * 1 = zero out system memory allocations (default) + * 0 = do not perform memory clears + */ + +#define __NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \ + InitializeSystemMemoryAllocations +#define NV_REG_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \ + NV_REG_STRING(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS) + +/* + * Option: RegistryDwords + * + * Description: + * + * This option accepts a semicolon-separated list of key=value pairs. Each + * key name is checked against the table of static options; if a match is + * found, the static option value is overridden, but invalid options remain + * invalid. Pairs that do not match an entry in the static option table + * are passed on to the RM directly. + * + * Format: + * + * NVreg_RegistryDwords=";;..." + */ + +#define __NV_REGISTRY_DWORDS RegistryDwords +#define NV_REG_REGISTRY_DWORDS NV_REG_STRING(__NV_REGISTRY_DWORDS) + +/* + * Option: RegistryDwordsPerDevice + * + * Description: + * + * This option allows to specify registry keys per GPU device. It helps to + * control registry at GPU level of granularity. It accepts a semicolon + * separated list of key=value pairs. The first key value pair MUST be + * "pci=DDDD:BB:DD.F;" where DDDD is Domain, BB is Bus Id, DD is device slot + * number and F is the Function. This PCI BDF is used to identify which GPU to + * assign the registry keys that follows next. + * If a GPU corresponding to the value specified in "pci=DDDD:BB:DD.F;" is NOT + * found, then all the registry keys that follows are skipped, until we find next + * valid pci identified "pci=DDDD:BB:DD.F;". Following are the valid formats for + * the value of the "pci" string: + * 1) bus:slot : Domain and function defaults to 0. + * 2) domain:bus:slot : Function defaults to 0. + * 3) domain:bus:slot.func : Complete PCI dev id string. + * + * For each of the registry keys that follows, key name is checked against the + * table of static options; if a match is found, the static option value is + * overridden, but invalid options remain invalid. Pairs that do not match an + * entry in the static option table are passed on to the RM directly. + * + * Format: + * + * NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F;;;..; \ + * pci=DDDD:BB:DD.F;;..;" + */ + +#define __NV_REGISTRY_DWORDS_PER_DEVICE RegistryDwordsPerDevice +#define NV_REG_REGISTRY_DWORDS_PER_DEVICE NV_REG_STRING(__NV_REGISTRY_DWORDS_PER_DEVICE) + +#define __NV_RM_MSG RmMsg +#define NV_RM_MSG NV_REG_STRING(__NV_RM_MSG) + +/* + * Option: UsePageAttributeTable + * + * Description: + * + * Enable/disable use of the page attribute table (PAT) available in + * modern x86/x86-64 processors to set the effective memory type of memory + * mappings to write-combining (WC). + * + * If enabled, an x86 processor with PAT support is present and the host + * system's Linux kernel did not configure one of the PAT entries to + * indicate the WC memory type, the driver will change the second entry in + * the PAT from its default (write-through (WT)) to WC at module load + * time. If the kernel did update one of the PAT entries, the driver will + * not modify the PAT. + * + * In both cases, the driver will honor attempts to map memory with the WC + * memory type by selecting the appropriate PAT entry using the correct + * set of PTE flags. + * + * Possible values: + * + * ~0 = use the NVIDIA driver's default logic (default) + * 1 = enable use of the PAT for WC mappings. + * 0 = disable use of the PAT for WC mappings. + */ + +#define __NV_USE_PAGE_ATTRIBUTE_TABLE UsePageAttributeTable +#define NV_USE_PAGE_ATTRIBUTE_TABLE NV_REG_STRING(__NV_USE_PAGE_ATTRIBUTE_TABLE) + +/* + * Option: EnableMSI + * + * Description: + * + * When this option is enabled and the host kernel supports the MSI feature, + * the NVIDIA driver will enable the PCI-E MSI capability of GPUs with the + * support for this feature instead of using PCI-E wired interrupt. + * + * Possible Values: + * + * 0 = disable MSI interrupt + * 1 = enable MSI interrupt (default) + * + */ + +#define __NV_ENABLE_MSI EnableMSI +#define NV_REG_ENABLE_MSI NV_REG_STRING(__NV_ENABLE_MSI) + +/* + * Option: EnablePCIeGen3 + * + * Description: + * + * Due to interoperability problems seen with Kepler PCIe Gen3 capable GPUs + * when configured on SandyBridge E desktop platforms, NVIDIA feels that + * delivering a reliable, high-quality experience is not currently possible in + * PCIe Gen3 mode on all PCIe Gen3 platforms. Therefore, Quadro, Tesla and + * NVS Kepler products operate in PCIe Gen2 mode by default. You may use this + * option to enable PCIe Gen3 support. + * + * This is completely unsupported! + * + * Possible Values: + * + * 0: disable PCIe Gen3 support (default) + * 1: enable PCIe Gen3 support + */ + +#define __NV_ENABLE_PCIE_GEN3 EnablePCIeGen3 +#define NV_REG_ENABLE_PCIE_GEN3 NV_REG_STRING(__NV_ENABLE_PCIE_GEN3) + +/* + * Option: MemoryPoolSize + * + * Description: + * + * When set to a non-zero value, this option specifies the size of the + * memory pool, given as a multiple of 1 GB, created on VMware ESXi to + * satisfy any system memory allocations requested by the NVIDIA kernel + * module. + */ + +#define __NV_MEMORY_POOL_SIZE MemoryPoolSize +#define NV_REG_MEMORY_POOL_SIZE NV_REG_STRING(__NV_MEMORY_POOL_SIZE) + +/* + * Option: KMallocHeapMaxSize + * + * Description: + * + * When set to a non-zero value, this option specifies the maximum size of the + * heap memory space reserved for kmalloc operations. Given as a + * multiple of 1 MB created on VMware ESXi to satisfy any system memory + * allocations requested by the NVIDIA kernel module. + */ + +#define __NV_KMALLOC_HEAP_MAX_SIZE KMallocHeapMaxSize +#define NV_KMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_KMALLOC_HEAP_MAX_SIZE) + +/* + * Option: VMallocHeapMaxSize + * + * Description: + * + * When set to a non-zero value, this option specifies the maximum size of the + * heap memory space reserved for vmalloc operations. Given as a + * multiple of 1 MB created on VMware ESXi to satisfy any system memory + * allocations requested by the NVIDIA kernel module. + */ + +#define __NV_VMALLOC_HEAP_MAX_SIZE VMallocHeapMaxSize +#define NV_VMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_VMALLOC_HEAP_MAX_SIZE) + +/* + * Option: IgnoreMMIOCheck + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will ignore + * MMIO limit check during device probe on VMWare ESXi kernel. This is + * typically necessary when VMware ESXi MMIO limit differs between any + * base version and its updates. Customer using updates can set regkey + * to avoid probe failure. + */ + +#define __NV_IGNORE_MMIO_CHECK IgnoreMMIOCheck +#define NV_REG_IGNORE_MMIO_CHECK NV_REG_STRING(__NV_IGNORE_MMIO_CHECK) + +/* + * Option: pci + * + * Description: + * + * On Unix platforms, per GPU based registry key can be specified as: + * NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F,". + * where DDDD:BB:DD.F refers to Domain:Bus:Device.Function. + * We need this key "pci" to identify what follows next is a PCI BDF identifier, + * for which the registry keys are to be applied. + * + * This define is not used on non-UNIX platforms. + * + * Possible Formats for value: + * + * 1) bus:slot : Domain and function defaults to 0. + * 2) domain:bus:slot : Function defaults to 0. + * 3) domain:bus:slot.func : Complete PCI BDF identifier string. + */ +#define __NV_PCI_DEVICE_BDF pci +#define NV_REG_PCI_DEVICE_BDF NV_REG_STRING(__NV_PCI_DEVICE_BDF) + +/* + * Option: EnableStreamMemOPs + * + * Description: + * + * When this option is enabled, the CUDA driver will enable support for + * CUDA Stream Memory Operations in user-mode applications, which are so + * far required to be disabled by default due to limited support in + * devtools. + * + * Note: this is treated as a hint. MemOPs may still be left disabled by CUDA + * driver for other reasons. + * + * Possible Values: + * + * 0 = disable feature (default) + * 1 = enable feature + */ +#define __NV_ENABLE_STREAM_MEMOPS EnableStreamMemOPs +#define NV_REG_ENABLE_STREAM_MEMOPS NV_REG_STRING(__NV_ENABLE_STREAM_MEMOPS) + +/* + * Option: EnableUserNUMAManagement + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will require the + * user-mode NVIDIA Persistence daemon to manage the onlining and offlining + * of its NUMA device memory. + * + * This option has no effect on platforms that do not support onlining + * device memory to a NUMA node (this feature is only supported on certain + * POWER9 systems). + * + * Possible Values: + * + * 0: disable user-mode NUMA management + * 1: enable user-mode NUMA management (default) + */ +#define __NV_ENABLE_USER_NUMA_MANAGEMENT EnableUserNUMAManagement +#define NV_REG_ENABLE_USER_NUMA_MANAGEMENT NV_REG_STRING(__NV_ENABLE_USER_NUMA_MANAGEMENT) + +/* + * Option: CoherentGPUMemoryMode + * + * Description: + * + * This option can be set to control how GPU Memory is accessed through + * the coherent link. + * + * This option has no effect on platforms that do not support onlining + * device memory to a NUMA node. + * + * Possible string values: + * + * "driver" : disable onlining coherent memory to the OS as a NUMA node. The driver + * will manage it in this case + * "numa" (or unset) : enable onlining coherent memory to the OS as a NUMA node (default) + */ +#define __NV_COHERENT_GPU_MEMORY_MODE CoherentGPUMemoryMode +#define NV_REG_COHERENT_GPU_MEMORY_MODE NV_REG_STRING(__NV_COHERENT_GPU_MEMORY_MODE) + +/* + * Option: GpuBlacklist + * + * Description: + * + * This option accepts a list of blacklisted GPUs, separated by commas, that + * cannot be attached or used. Each blacklisted GPU is identified by a UUID in + * the ASCII format with leading "GPU-". An exact match is required; no partial + * UUIDs. This regkey is deprecated and will be removed in the future. Use + * NV_REG_EXCLUDED_GPUS instead. + */ +#define __NV_GPU_BLACKLIST GpuBlacklist +#define NV_REG_GPU_BLACKLIST NV_REG_STRING(__NV_GPU_BLACKLIST) + +/* + * Option: ExcludedGpus + * + * Description: + * + * This option accepts a list of excluded GPUs, separated by commas, that + * cannot be attached or used. Each excluded GPU is identified by a UUID in + * the ASCII format with leading "GPU-". An exact match is required; no partial + * UUIDs. + */ +#define __NV_EXCLUDED_GPUS ExcludedGpus +#define NV_REG_EXCLUDED_GPUS NV_REG_STRING(__NV_EXCLUDED_GPUS) + +/* + * Option: NvLinkDisable + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will not attempt to + * initialize or train NVLink connections for any GPUs. System reboot is required + * for changes to take affect. + * + * This option has no effect if no GPUs support NVLink. + * + * Possible Values: + * + * 0: Do not disable NVLink (default) + * 1: Disable NVLink + */ +#define __NV_NVLINK_DISABLE NvLinkDisable +#define NV_REG_NVLINK_DISABLE NV_REG_STRING(__NV_NVLINK_DISABLE) + +/* + * Option: RestrictProfilingToAdminUsers + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will prevent users + * without administrative access (i.e., the CAP_SYS_ADMIN capability) from + * using GPU performance counters. + * + * Possible Values: + * + * 0: Do not restrict GPU counters (default) + * 1: Restrict GPU counters to system administrators only + */ + +#define __NV_RM_PROFILING_ADMIN_ONLY RmProfilingAdminOnly +#define __NV_RM_PROFILING_ADMIN_ONLY_PARAMETER RestrictProfilingToAdminUsers +#define NV_REG_RM_PROFILING_ADMIN_ONLY NV_REG_STRING(__NV_RM_PROFILING_ADMIN_ONLY) + +/* + * Option: TemporaryFilePath + * + * Description: + * + * When specified, this option changes the location in which the + * NVIDIA kernel module will create unnamed temporary files (e.g. to + * save the contents of video memory in). The indicated file must + * be a directory. By default, temporary files are created in /tmp. + */ +#define __NV_TEMPORARY_FILE_PATH TemporaryFilePath +#define NV_REG_TEMPORARY_FILE_PATH NV_REG_STRING(__NV_TEMPORARY_FILE_PATH) + +/* + * Option: PreserveVideoMemoryAllocations + * + * If enabled, this option prompts the NVIDIA kernel module to save and + * restore all video memory allocations across system power management + * cycles, i.e. suspend/resume and hibernate/restore. Otherwise, + * only select allocations are preserved. + * + * Possible Values: + * + * 0: Preserve only select video memory allocations (default) + * 1: Preserve all video memory allocations + */ +#define __NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS PreserveVideoMemoryAllocations +#define NV_REG_PRESERVE_VIDEO_MEMORY_ALLOCATIONS \ + NV_REG_STRING(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS) + +/* + * Option: EnableS0ixPowerManagement + * + * When this option is enabled, the NVIDIA driver will use S0ix-based + * power management for system suspend/resume, if both the platform and + * the GPU support S0ix. + * + * During system suspend, if S0ix is enabled and + * video memory usage is above the threshold configured by + * 'S0ixPowerManagementVideoMemoryThreshold', video memory will be kept + * in self-refresh mode while the rest of the GPU is powered down. + * + * Otherwise, the driver will copy video memory contents to system memory + * and power off the video memory along with the GPU. + * + * Possible Values: + * + * 0: Disable S0ix based power management (default) + * 1: Enable S0ix based power management + */ + +#define __NV_ENABLE_S0IX_POWER_MANAGEMENT EnableS0ixPowerManagement +#define NV_REG_ENABLE_S0IX_POWER_MANAGEMENT \ + NV_REG_STRING(__NV_ENABLE_S0IX_POWER_MANAGEMENT) + +/* + * Option: S0ixPowerManagementVideoMemoryThreshold + * + * This option controls the threshold that the NVIDIA driver will use during + * S0ix-based system power management. + * + * When S0ix is enabled and the system is suspended, the driver will + * compare the amount of video memory in use with this threshold, + * to decide whether to keep video memory in self-refresh or copy video + * memory content to system memory. + * + * See the 'EnableS0ixPowerManagement' option. + * + * Values are expressed in Megabytes (1048576 bytes). + * + * Default value for this option is 256MB. + * + */ +#define __NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + S0ixPowerManagementVideoMemoryThreshold +#define NV_REG_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + NV_REG_STRING(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD) + +/* + * Option: DynamicPowerManagement + * + * This option controls how aggressively the NVIDIA kernel module will manage + * GPU power through kernel interfaces. + * + * Possible Values: + * + * 0: Never allow the GPU to be powered down (default). + * 1: Power down the GPU when it is not initialized. + * 2: Power down the GPU after it has been inactive for some time. + * 3: (Default) Power down the GPU after a period of inactivity (i.e., + * mode 2) on Ampere or later notebooks. Otherwise, do not power down + * the GPU. + */ +#define __NV_DYNAMIC_POWER_MANAGEMENT DynamicPowerManagement +#define NV_REG_DYNAMIC_POWER_MANAGEMENT \ + NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT) + +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_NEVER 0 +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_COARSE 1 +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_FINE 2 +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_DEFAULT 3 + +/* + * Option: DynamicPowerManagementVideoMemoryThreshold + * + * This option controls the threshold that the NVIDIA driver will use + * when selecting the dynamic power management scheme. + * + * When the driver detects that the GPU is idle, it will compare the amount + * of video memory in use with this threshold. + * + * If the current video memory usage is less than the threshold, the + * driver may preserve video memory contents in system memory and power off + * the video memory along with the GPU itself, if supported. Otherwise, + * the video memory will be kept in self-refresh mode while powering down + * the rest of the GPU, if supported. + * + * Values are expressed in Megabytes (1048576 bytes). + * + * If the requested value is greater than 200MB (the default), then it + * will be capped to 200MB. + */ +#define __NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + DynamicPowerManagementVideoMemoryThreshold +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD) + +/* + * Option: TegraGpuPgMask + * + * This option controls the TPC/GPC/FBP power-gating mask for Tegra iGPU. + * + */ +#define __NV_TEGRA_GPU_PG_MASK TegraGpuPgMask +#define NV_REG_TEGRA_GPU_PG_MASK \ + NV_REG_STRING(__NV_TEGRA_GPU_PG_MASK) + +/* + * Option: RegisterPCIDriver + * + * Description: + * + * When this option is enabled, the NVIDIA driver will register with + * PCI subsystem. + * + * Possible values: + * + * 1 - register as PCI driver (default) + * 0 - do not register as PCI driver + */ + +#define __NV_REGISTER_PCI_DRIVER RegisterPCIDriver +#define NV_REG_REGISTER_PCI_DRIVER NV_REG_STRING(__NV_REGISTER_PCI_DRIVER) + +/* + * Option: RegisterPlatformDeviceDriver + * + * Description: + * + * When this option is enabled, the NVIDIA driver will register with + * platform subsystem. + * + * Possible values: + * + * 1 - register as platform driver (default) + * 0 - do not register as platform driver + */ + +#define __NV_REGISTER_PLATFORM_DEVICE_DRIVER RegisterPlatformDeviceDriver +#define NV_REG_REGISTER_PLATFORM_DEVICE_DRIVER NV_REG_STRING(__NV_REGISTER_PLATFORM_DEVICE_DRIVER) + +/* + * Option: EnablePCIERelaxedOrderingMode + * + * Description: + * + * When this option is enabled, the registry key RmSetPCIERelaxedOrdering will + * be set to NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_FORCE_ENABLE, causing + * every device to set the relaxed ordering bit to 1 in all outbound MWr + * transaction-layer packets. This is equivalent to setting the regkey to + * FORCE_ENABLE as a non-per-device registry key. + * + * Possible values: + * 0 - Do not enable PCIe TLP relaxed ordering bit-setting (default) + * 1 - Enable PCIe TLP relaxed ordering bit-setting + */ +#define __NV_ENABLE_PCIE_RELAXED_ORDERING_MODE EnablePCIERelaxedOrderingMode +#define NV_REG_ENABLE_PCIE_RELAXED_ORDERING_MODE \ + NV_REG_STRING(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE) + +/* + * Option: EnableResizableBar + * + * Description: + * + * When this option is enabled, the NVIDIA driver will attempt to resize + * BAR1 to match framebuffer size, or the next largest available size on + * supported machines. This is currently only implemented for Linux. + * + * Possible values: + * 0 - Do not enable PCI BAR resizing + * 1 - Enable PCI BAR resizing + */ +#define __NV_ENABLE_RESIZABLE_BAR EnableResizableBar +#define NV_REG_ENABLE_RESIZABLE_BAR NV_REG_STRING(__NV_ENABLE_RESIZABLE_BAR) + +/* + * Option: EnableGpuFirmware + * + * Description: + * + * When this option is enabled, the NVIDIA driver will enable use of GPU + * firmware. + * + * If this key is set globally to the system, the driver may still attempt + * to apply some policies to maintain uniform firmware modes across all + * GPUS. This may result in the driver failing initialization on some GPUs + * to maintain such a policy. + * + * If this key is set using NVreg_RegistryDwordsPerDevice, then the driver + * will attempt to honor whatever configuration is specified without applying + * additional policies. This may also result in failed GPU initialzations if + * the configuration is not possible (for example if the firmware is missing + * from the filesystem, or the GPU is not capable). + * + * NOTE: More details for this regkey can be found in nv-firmware-registry.h + */ +#define __NV_ENABLE_GPU_FIRMWARE EnableGpuFirmware +#define NV_REG_ENABLE_GPU_FIRMWARE NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE) + +/* + * Option: EnableGpuFirmwareLogs + * + * When this option is enabled, the NVIDIA driver will send GPU firmware logs + * to the system log, when possible. + * + * NOTE: More details for this regkey can be found in nv-firmware-registry.h + */ +#define __NV_ENABLE_GPU_FIRMWARE_LOGS EnableGpuFirmwareLogs +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE_LOGS) + +/* + * Option: EnableDbgBreakpoint + * + * When this option is set to a non-zero value, and the kernel is configured + * appropriately, assertions within resman will trigger a CPU breakpoint (e.g., + * INT3 on x86_64), assumed to be caught by an attached debugger. + * + * When this option is set to the value zero (the default), assertions within + * resman will print to the system log, but no CPU breakpoint will be triggered. + */ +#define __NV_ENABLE_DBG_BREAKPOINT EnableDbgBreakpoint + + +/* + * Option: OpenRmEnableUnsupportedGpus + * + * This option to require opt in for use of Open RM on non-Data Center + * GPUs is deprecated and no longer required. The kernel module parameter + * is left here, though ignored, for backwards compatibility. + */ +#define __NV_OPENRM_ENABLE_UNSUPPORTED_GPUS OpenRmEnableUnsupportedGpus + +/* + * Option: NVreg_DmaRemapPeerMmio + * + * Description: + * + * When this option is enabled, the NVIDIA driver will use device driver + * APIs provided by the Linux kernel for DMA-remapping part of a device's + * MMIO region to another device, creating e.g., IOMMU mappings as necessary. + * When this option is disabled, the NVIDIA driver will instead only apply a + * fixed offset, which may be zero, to CPU physical addresses to produce the + * DMA address for the peer's MMIO region, and no IOMMU mappings will be + * created. + * + * This option only affects peer MMIO DMA mappings, and not system memory + * mappings. + * + * Possible Values: + * 0 = disable dynamic DMA remapping of peer MMIO regions + * 1 = enable dynamic DMA remapping of peer MMIO regions (default) + */ +#define __NV_DMA_REMAP_PEER_MMIO DmaRemapPeerMmio +#define NV_DMA_REMAP_PEER_MMIO NV_REG_STRING(__NV_DMA_REMAP_PEER_MMIO) +#define NV_DMA_REMAP_PEER_MMIO_DISABLE 0x00000000 +#define NV_DMA_REMAP_PEER_MMIO_ENABLE 0x00000001 + +/* + * Option: NVreg_RmNvlinkBandwidthLinkCount + * + * Description: + * + * This option allows user to reduce the GPU nvlink bandwidth to save power. + * + * This option is only for Blackwell+ GPU with NVLINK version 5.0. + */ +#define __NV_RM_NVLINK_BW_LINK_COUNT RmNvlinkBandwidthLinkCount +#define NV_RM_NVLINK_BW_LINK_COUNT NV_REG_STRING(__NV_RM_NVLINK_BW_LINK_COUNT) + +/* + * Option: NVreg_RmNvlinkBandwidth + * + * Description: + * + * This option allows user to reduce the NVLINK P2P bandwidth to save power. + * The option is in the string format. + * + * Possible string values: + * OFF: 0% bandwidth + * MIN: 15%-25% bandwidth depending on the system's NVLink topology + * HALF: 50% bandwidth + * 3QUARTER: 75% bandwidth + * FULL: 100% bandwidth (default) + * + * This option is only for Hopper+ GPU with NVLINK version 4.0. + */ +#define __NV_RM_NVLINK_BW RmNvlinkBandwidth +#define NV_RM_NVLINK_BW NV_REG_STRING(__NV_RM_NVLINK_BW) + +/* + * Option: NVreg_EnableNonblockingOpen + * + * Description: + * + * When this option is enabled, the NVIDIA driver will try to perform any + * required device initialization in the background when /dev/nvidiaN devices + * are opened with the flag O_NONBLOCK. + * + * Possible Values: + * 0 = O_NONBLOCK flag when opening devices is ignored + * 1 = O_NONBLOCK flag when opening devices results in background device + * initialization (default) + */ +#define __NV_ENABLE_NONBLOCKING_OPEN EnableNonblockingOpen +#define NV_ENABLE_NONBLOCKING_OPEN NV_REG_STRING(__NV_ENABLE_NONBLOCKING_OPEN) + +/* + * Option: NVreg_ImexChannelCount + * + * Description: + * + * This option allows users to specify the number of IMEX (import/export) + * channels. Within an IMEX domain, the channels allow sharing memory + * securely in a multi-user environment using the CUDA driver's fabric handle + * based APIs. + * + * An IMEX domain is either an OS instance or a group of securely + * connected OS instances using the NVIDIA IMEX daemon. The option must + * be set to the same value on each OS instance within the IMEX domain. + * + * An IMEX channel is a logical entity that is represented by a /dev node. + * The IMEX channels are global resources within the IMEX domain. When + * exporter and importer CUDA processes have been granted access to the + * same IMEX channel, they can securely share memory. + * + * Note that the NVIDIA driver will not attempt to create the /dev nodes. Thus, + * the related CUDA APIs will fail with an insufficient permission error until + * the /dev nodes are set up. The creation of these /dev nodes, + * /dev/nvidia-caps-imex-channels/channelN, must be handled by the + * administrator, where N is the minor number. The major number can be + * queried from /proc/devices. + * + * nvidia-modprobe CLI support is available to set up the /dev nodes. + * NVreg_ModifyDeviceFiles, NVreg_DeviceFileGID, NVreg_DeviceFileUID + * and NVreg_DeviceFileMode will be honored by nvidia-modprobe. + * + * Also, refer to the NVreg_CreateImexChannel0 option. + * + * Possible values: + * 0 - Disable IMEX using CUDA driver's fabric handles. + * N - N IMEX channels will be enabled in the driver to facilitate N + * concurrent users. Default value is 2048 channels, and the current + * maximum value is 20-bit, same as Linux dev_t's minor number limit. + */ +#define __NV_IMEX_CHANNEL_COUNT ImexChannelCount +#define NV_REG_IMEX_CHANNEL_COUNT NV_REG_STRING(__NV_IMEX_CHANNEL_COUNT) + +/* + * Option: NVreg_CreateImexChannel0 + * + * Description: + * + * This option allows users to specify whether the NVIDIA driver must create + * the IMEX channel 0 by default. The channel will be created automatically + * when the NVIDIA open GPU kernel module is loaded. + * + * Note that users are advised to enable this option only in trusted + * environments where it is acceptable for applications to share the same + * IMEX channel. + * + * For more details on IMEX channels, refer to the NVreg_ImexChannelCount + * option. + * + * Possible values: + * 0 - Do not create IMEX channel 0 (default). + * 1 - Create IMEX channel 0. + */ +#define __NV_CREATE_IMEX_CHANNEL_0 CreateImexChannel0 +#define NV_CREATE_IMEX_CHANNEL_0 NV_REG_STRING(__CREATE_IMEX_CHANNEL_0) + +/* + * Option: NVreg_GrdmaPciTopoCheckOverride + * + * Description: + * + * This option allows users to override the PCI topology validation enforced by + * the GPU driver's dma-buf and nv-p2p subsystems. + * + * Possible values: + * 0 - Driver's topology check to allow or deny access (default). + * 1 - Override driver's topology check to allow access. + * 2 - Override driver's topology check to deny access. + */ +#define __NV_GRDMA_PCI_TOPO_CHECK_OVERRIDE GrdmaPciTopoCheckOverride +#define NV_GRDMA_PCI_TOPO_CHECK_OVERRIDE NV_REG_STRING(__NV_GRDMA_PCI_TOPO_CHECK_OVERRIDE) +#define NV_REG_GRDMA_PCI_TOPO_CHECK_OVERRIDE_DEFAULT 0 +#define NV_REG_GRDMA_PCI_TOPO_CHECK_OVERRIDE_ALLOW_ACCESS 1 +#define NV_REG_GRDMA_PCI_TOPO_CHECK_OVERRIDE_DENY_ACCESS 2 + +/* + * Option: NVreg_EnableSystemMemoryPools + * + * Description: + * + * This option controls system memory page pools creation for different page sizes + * Pool for pageSize is enabled by setting bit (pageSize >> NV_ENABLE_SYSTEM_MEMORY_POOLS_SHIFT) + * The pools keep memory cached once freed to speed-up reallocation + * Pools are shared by all adapters + * + * This feature is only supported by OpenRM driver + * + * By default 4K, 64K, 2M page size pools are enabled + */ +#define __NV_ENABLE_SYSTEM_MEMORY_POOLS EnableSystemMemoryPools +#define NV_ENABLE_SYSTEM_MEMORY_POOLS NV_REG_STRING(__NV_ENABLE_SYSTEM_MEMORY_POOLS) +#define NV_ENABLE_SYSTEM_MEMORY_POOLS_DEFAULT 0x00000211 +#define NV_ENABLE_SYSTEM_MEMORY_POOLS_SHIFT 12 + +#if defined(NV_DEFINE_REGISTRY_KEY_TABLE) + +/* + *---------registry key parameter declarations-------------- + */ + +NV_DEFINE_REG_ENTRY(__NV_RESMAN_DEBUG_LEVEL, ~0); +NV_DEFINE_REG_ENTRY(__NV_RM_LOGON_RC, 1); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MODIFY_DEVICE_FILES, 1); +NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_UID, 0); +NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_GID, 0); +NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_MODE, 0666); +NV_DEFINE_REG_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS, 1); +NV_DEFINE_REG_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE, ~0); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_PCIE_GEN3, 0); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_MSI, 1); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_STREAM_MEMOPS, 0); +NV_DEFINE_REG_ENTRY(__NV_RM_PROFILING_ADMIN_ONLY_PARAMETER, 1); +NV_DEFINE_REG_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS, 0); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT, 0); +NV_DEFINE_REG_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 256); +NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT, 3); +NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 200); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE, NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS, NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG); +NV_DEFINE_REG_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS, 1); + +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_USER_NUMA_MANAGEMENT, 1); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MEMORY_POOL_SIZE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_KMALLOC_HEAP_MAX_SIZE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_VMALLOC_HEAP_MAX_SIZE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_IGNORE_MMIO_CHECK, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_NVLINK_DISABLE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_REGISTER_PCI_DRIVER, 1); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_REGISTER_PLATFORM_DEVICE_DRIVER, 1); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_RESIZABLE_BAR, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_DBG_BREAKPOINT, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_TEGRA_GPU_PG_MASK, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_NONBLOCKING_OPEN, 1); + +NV_DEFINE_REG_STRING_ENTRY(__NV_COHERENT_GPU_MEMORY_MODE, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS_PER_DEVICE, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_RM_MSG, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_GPU_BLACKLIST, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_TEMPORARY_FILE_PATH, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_EXCLUDED_GPUS, NULL); +NV_DEFINE_REG_ENTRY(__NV_DMA_REMAP_PEER_MMIO, NV_DMA_REMAP_PEER_MMIO_ENABLE); +NV_DEFINE_REG_STRING_ENTRY(__NV_RM_NVLINK_BW, NULL); +NV_DEFINE_REG_ENTRY(__NV_RM_NVLINK_BW_LINK_COUNT, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_IMEX_CHANNEL_COUNT, 2048); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_CREATE_IMEX_CHANNEL_0, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_GRDMA_PCI_TOPO_CHECK_OVERRIDE, + NV_REG_GRDMA_PCI_TOPO_CHECK_OVERRIDE_DEFAULT); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_SYSTEM_MEMORY_POOLS, NV_ENABLE_SYSTEM_MEMORY_POOLS_DEFAULT); + +/* + *----------------registry database definition---------------------- + */ + +/* + * You can enable any of the registry options disabled by default by + * editing their respective entries in the table below. The last field + * determines if the option is considered valid - in order for the + * changes to take effect, you need to recompile and reload the NVIDIA + * kernel module. + */ +nv_parm_t nv_parms[] = { + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RESMAN_DEBUG_LEVEL), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RM_LOGON_RC), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MODIFY_DEVICE_FILES), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_UID), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_GID), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_MODE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_MSI), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_GEN3), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MEMORY_POOL_SIZE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_KMALLOC_HEAP_MAX_SIZE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_VMALLOC_HEAP_MAX_SIZE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_IGNORE_MMIO_CHECK), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_STREAM_MEMOPS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_USER_NUMA_MANAGEMENT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_NVLINK_DISABLE), + NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(__NV_RM_PROFILING_ADMIN_ONLY, + __NV_RM_PROFILING_ADMIN_ONLY_PARAMETER), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_TEGRA_GPU_PG_MASK), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_REGISTER_PCI_DRIVER), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_RESIZABLE_BAR), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RM_NVLINK_BW_LINK_COUNT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_DBG_BREAKPOINT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DMA_REMAP_PEER_MMIO), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_IMEX_CHANNEL_COUNT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_CREATE_IMEX_CHANNEL_0), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_GRDMA_PCI_TOPO_CHECK_OVERRIDE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_SYSTEM_MEMORY_POOLS), + {NULL, NULL} +}; + +#elif defined(NVRM) + +extern nv_parm_t nv_parms[]; + +#endif /* NV_DEFINE_REGISTRY_KEY_TABLE */ + +#endif /* _RM_REG_H_ */ diff --git a/src/nvidia/arch/nvalloc/unix/include/nv-unix-nvos-params-wrappers.h b/src/nvidia/arch/nvalloc/unix/include/nv-unix-nvos-params-wrappers.h new file mode 100644 index 0000000..02b0156 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv-unix-nvos-params-wrappers.h @@ -0,0 +1,49 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_UNIX_NVOS_PARAMS_WRAPPERS_H_ +#define _NV_UNIX_NVOS_PARAMS_WRAPPERS_H_ + +#include + +/* + * This is a wrapper for NVOS02_PARAMETERS with file descriptor + */ + +typedef struct +{ + NVOS02_PARAMETERS params; + int fd; +} nv_ioctl_nvos02_parameters_with_fd; + +/* + * This is a wrapper for NVOS33_PARAMETERS with file descriptor + */ +typedef struct +{ + NVOS33_PARAMETERS params; + int fd; +} nv_ioctl_nvos33_parameters_with_fd; + +#endif // _NV_UNIX_NVOS_PARAMS_WRAPPERS_H_ + diff --git a/src/nvidia/arch/nvalloc/unix/include/nv.h b/src/nvidia/arch/nvalloc/unix/include/nv.h new file mode 100644 index 0000000..654d033 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv.h @@ -0,0 +1,1293 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _NV_H_ +#define _NV_H_ + + + +#include + +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(__FreeBSD__) + #include // NULL +#elif defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) + #include // NULL +#else + #include // NULL +#endif + +#include +#include "nv_stdarg.h" +#include +#include +#include +#include +#include +#include + +extern nv_cap_t *nvidia_caps_root; + +extern const NvBool nv_is_rm_firmware_supported_os; + +#include +#include + +#include + +#define GPU_UUID_LEN (16) + +/* + * Buffer size for an ASCII UUID: We need 2 digits per byte, plus space + * for "GPU", 5 dashes, and '\0' termination: + */ +#define GPU_UUID_ASCII_LEN (GPU_UUID_LEN * 2 + 9) + +/* + * #define an absolute maximum used as a sanity check for the + * NV_ESC_IOCTL_XFER_CMD ioctl() size argument. + */ +#define NV_ABSOLUTE_MAX_IOCTL_SIZE 16384 + +/* + * Solaris provides no more than 8 bits for the argument size in + * the ioctl() command encoding; make sure we don't exceed this + * limit. + */ +#define __NV_IOWR_ASSERT(type) ((sizeof(type) <= NV_PLATFORM_MAX_IOCTL_SIZE) ? 1 : -1) +#define __NV_IOWR(nr, type) ({ \ + typedef char __NV_IOWR_TYPE_SIZE_ASSERT[__NV_IOWR_ASSERT(type)]; \ + _IOWR(NV_IOCTL_MAGIC, (nr), type); \ +}) + +#define NV_PCI_DEV_FMT "%04x:%02x:%02x.%x" +#define NV_PCI_DEV_FMT_ARGS(nv) (nv)->pci_info.domain, (nv)->pci_info.bus, \ + (nv)->pci_info.slot, (nv)->pci_info.function + +#define NV_RM_DEVICE_INTR_ADDRESS 0x100 + +#define NV_TEGRA_PCI_IGPU_PG_MASK_DEFAULT 0xFFFFFFFF + +/* + * Clock domain identifier, which is used for fetching the engine + * load backed by the specified clock domain for Tegra platforms + * conforming linux devfreq framework to realize dynamic frequency + * scaling. + */ +typedef enum _TEGRASOC_DEVFREQ_CLK +{ + TEGRASOC_DEVFREQ_CLK_GPC, + TEGRASOC_DEVFREQ_CLK_NVD, +} TEGRASOC_DEVFREQ_CLK; + +/*! + * @brief The order of the display clocks in the below defined enum + * should be synced with below mapping array and macro. + * All four should be updated simultaneously in case + * of removal or addition of clocks in below order. + * Also, TEGRASOC_WHICH_CLK_MAX is used in various places + * in below mentioned files. + * arch/nvalloc/unix/Linux/nv-linux.h + * + * arch/nvalloc/unix/src/os.c + * dispClkMapRmToOsArr[] = {...}; + * + * arch/nvalloc/unix/Linux/nv-clk.c + * osMapClk[] = {...}; + * + */ +typedef enum _TEGRASOC_WHICH_CLK +{ + TEGRASOC_WHICH_CLK_NVDISPLAYHUB, + TEGRASOC_WHICH_CLK_NVDISPLAY_DISP, + TEGRASOC_WHICH_CLK_NVDISPLAY_P0, + TEGRASOC_WHICH_CLK_NVDISPLAY_P1, + TEGRASOC_WHICH_CLK_NVDISPLAY_P2, + TEGRASOC_WHICH_CLK_NVDISPLAY_P3, + TEGRASOC_WHICH_CLK_NVDISPLAY_P4, + TEGRASOC_WHICH_CLK_NVDISPLAY_P5, + TEGRASOC_WHICH_CLK_NVDISPLAY_P6, + TEGRASOC_WHICH_CLK_NVDISPLAY_P7, + TEGRASOC_WHICH_CLK_DPAUX0, + TEGRASOC_WHICH_CLK_FUSE, + TEGRASOC_WHICH_CLK_DSIPLL_VCO, + TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTPN, + TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTA, + TEGRASOC_WHICH_CLK_SPPLL0_VCO, + TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTA, + TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTB, + TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTPN, + TEGRASOC_WHICH_CLK_SPPLL1_CLKOUTPN, + TEGRASOC_WHICH_CLK_SPPLL0_DIV27, + TEGRASOC_WHICH_CLK_SPPLL1_DIV27, + TEGRASOC_WHICH_CLK_SPPLL0_DIV10, + TEGRASOC_WHICH_CLK_SPPLL0_DIV25, + TEGRASOC_WHICH_CLK_SPPLL1_VCO, + TEGRASOC_WHICH_CLK_VPLL0_REF, + TEGRASOC_WHICH_CLK_VPLL0, + TEGRASOC_WHICH_CLK_VPLL1, + TEGRASOC_WHICH_CLK_VPLL2, + TEGRASOC_WHICH_CLK_VPLL3, + TEGRASOC_WHICH_CLK_VPLL4, + TEGRASOC_WHICH_CLK_VPLL5, + TEGRASOC_WHICH_CLK_VPLL6, + TEGRASOC_WHICH_CLK_VPLL7, + TEGRASOC_WHICH_CLK_NVDISPLAY_P0_REF, + TEGRASOC_WHICH_CLK_RG0, + TEGRASOC_WHICH_CLK_RG1, + TEGRASOC_WHICH_CLK_RG2, + TEGRASOC_WHICH_CLK_RG3, + TEGRASOC_WHICH_CLK_RG4, + TEGRASOC_WHICH_CLK_RG5, + TEGRASOC_WHICH_CLK_RG6, + TEGRASOC_WHICH_CLK_RG7, + TEGRASOC_WHICH_CLK_DISPPLL, + TEGRASOC_WHICH_CLK_DISPHUBPLL, + TEGRASOC_WHICH_CLK_DSI_LP, + TEGRASOC_WHICH_CLK_DSI_CORE, + TEGRASOC_WHICH_CLK_DSI_PIXEL, + TEGRASOC_WHICH_CLK_PRE_SOR0, + TEGRASOC_WHICH_CLK_PRE_SOR1, + TEGRASOC_WHICH_CLK_PRE_SOR2, + TEGRASOC_WHICH_CLK_PRE_SOR3, + TEGRASOC_WHICH_CLK_DP_LINKA_REF, + TEGRASOC_WHICH_CLK_DP_LINKB_REF, + TEGRASOC_WHICH_CLK_DP_LINKC_REF, + TEGRASOC_WHICH_CLK_DP_LINKD_REF, + TEGRASOC_WHICH_CLK_SOR_LINKA_INPUT, + TEGRASOC_WHICH_CLK_SOR_LINKB_INPUT, + TEGRASOC_WHICH_CLK_SOR_LINKC_INPUT, + TEGRASOC_WHICH_CLK_SOR_LINKD_INPUT, + TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO, + TEGRASOC_WHICH_CLK_SOR_LINKB_AFIFO, + TEGRASOC_WHICH_CLK_SOR_LINKC_AFIFO, + TEGRASOC_WHICH_CLK_SOR_LINKD_AFIFO, + TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO_M, + TEGRASOC_WHICH_CLK_RG0_M, + TEGRASOC_WHICH_CLK_RG1_M, + TEGRASOC_WHICH_CLK_SOR0_M, + TEGRASOC_WHICH_CLK_SOR1_M, + TEGRASOC_WHICH_CLK_PLLHUB, + TEGRASOC_WHICH_CLK_SOR0, + TEGRASOC_WHICH_CLK_SOR1, + TEGRASOC_WHICH_CLK_SOR2, + TEGRASOC_WHICH_CLK_SOR3, + TEGRASOC_WHICH_CLK_SOR_PADA_INPUT, + TEGRASOC_WHICH_CLK_SOR_PADB_INPUT, + TEGRASOC_WHICH_CLK_SOR_PADC_INPUT, + TEGRASOC_WHICH_CLK_SOR_PADD_INPUT, + TEGRASOC_WHICH_CLK_SOR0_PAD, + TEGRASOC_WHICH_CLK_SOR1_PAD, + TEGRASOC_WHICH_CLK_SOR2_PAD, + TEGRASOC_WHICH_CLK_SOR3_PAD, + TEGRASOC_WHICH_CLK_PRE_SF0, + TEGRASOC_WHICH_CLK_SF0, + TEGRASOC_WHICH_CLK_SF1, + TEGRASOC_WHICH_CLK_SF2, + TEGRASOC_WHICH_CLK_SF3, + TEGRASOC_WHICH_CLK_SF4, + TEGRASOC_WHICH_CLK_SF5, + TEGRASOC_WHICH_CLK_SF6, + TEGRASOC_WHICH_CLK_SF7, + TEGRASOC_WHICH_CLK_DSI_PAD_INPUT, + TEGRASOC_WHICH_CLK_PRE_SOR0_REF, + TEGRASOC_WHICH_CLK_PRE_SOR1_REF, + TEGRASOC_WHICH_CLK_SOR0_PLL_REF, + TEGRASOC_WHICH_CLK_SOR1_PLL_REF, + TEGRASOC_WHICH_CLK_SOR2_PLL_REF, + TEGRASOC_WHICH_CLK_SOR3_PLL_REF, + TEGRASOC_WHICH_CLK_SOR0_REF, + TEGRASOC_WHICH_CLK_SOR1_REF, + TEGRASOC_WHICH_CLK_SOR2_REF, + TEGRASOC_WHICH_CLK_SOR3_REF, + TEGRASOC_WHICH_CLK_OSC, + TEGRASOC_WHICH_CLK_DSC, + TEGRASOC_WHICH_CLK_MAUD, + TEGRASOC_WHICH_CLK_AZA_2XBIT, + TEGRASOC_WHICH_CLK_AZA_BIT, + TEGRASOC_WHICH_CLK_MIPI_CAL, + TEGRASOC_WHICH_CLK_UART_FST_MIPI_CAL, + TEGRASOC_WHICH_CLK_SOR0_DIV, + TEGRASOC_WHICH_CLK_DISP_ROOT, + TEGRASOC_WHICH_CLK_HUB_ROOT, + TEGRASOC_WHICH_CLK_PLLA_DISP, + TEGRASOC_WHICH_CLK_PLLA_DISPHUB, + TEGRASOC_WHICH_CLK_PLLA, + TEGRASOC_WHICH_CLK_VPLLX_SOR0_MUXED, + TEGRASOC_WHICH_CLK_VPLLX_SOR1_MUXED, + TEGRASOC_WHICH_CLK_VPLLX_SOR2_MUXED, + TEGRASOC_WHICH_CLK_VPLLX_SOR3_MUXED, + TEGRASOC_WHICH_CLK_SF0_SOR, + TEGRASOC_WHICH_CLK_SF1_SOR, + TEGRASOC_WHICH_CLK_SF2_SOR, + TEGRASOC_WHICH_CLK_SF3_SOR, + TEGRASOC_WHICH_CLK_SF4_SOR, + TEGRASOC_WHICH_CLK_SF5_SOR, + TEGRASOC_WHICH_CLK_SF6_SOR, + TEGRASOC_WHICH_CLK_SF7_SOR, + TEGRASOC_WHICH_CLK_EMC, + TEGRASOC_WHICH_CLK_GPU_FIRST, + TEGRASOC_WHICH_CLK_GPU_SYS = TEGRASOC_WHICH_CLK_GPU_FIRST, + TEGRASOC_WHICH_CLK_GPU_NVD, + TEGRASOC_WHICH_CLK_GPU_UPROC, + TEGRASOC_WHICH_CLK_GPU_GPC0, + TEGRASOC_WHICH_CLK_GPU_GPC1, + TEGRASOC_WHICH_CLK_GPU_GPC2, + TEGRASOC_WHICH_CLK_GPU_LAST = TEGRASOC_WHICH_CLK_GPU_GPC2, + TEGRASOC_WHICH_CLK_MAX, // TEGRASOC_WHICH_CLK_MAX is defined for boundary checks only. +} TEGRASOC_WHICH_CLK; + +#ifdef NVRM + +extern const char *pNVRM_ID; + +/* + * ptr arithmetic convenience + */ + +typedef union +{ + volatile NvV8 Reg008[1]; + volatile NvV16 Reg016[1]; + volatile NvV32 Reg032[1]; +} nv_hwreg_t, * nv_phwreg_t; + + +#define NVRM_PCICFG_NUM_BARS 6 +#define NVRM_PCICFG_BAR_OFFSET(i) (0x10 + (i) * 4) +#define NVRM_PCICFG_BAR_REQTYPE_MASK 0x00000001 +#define NVRM_PCICFG_BAR_REQTYPE_MEMORY 0x00000000 +#define NVRM_PCICFG_BAR_MEMTYPE_MASK 0x00000006 +#define NVRM_PCICFG_BAR_MEMTYPE_64BIT 0x00000004 +#define NVRM_PCICFG_BAR_ADDR_MASK 0xfffffff0 + +#define NVRM_PCICFG_NUM_DWORDS 16 + +#define NV_GPU_NUM_BARS 3 +#define NV_GPU_BAR_INDEX_REGS 0 +#define NV_GPU_BAR_INDEX_FB 1 +#define NV_GPU_BAR_INDEX_IMEM 2 + +typedef struct +{ + NvU64 cpu_address; + NvU64 size; + NvU32 offset; + NvU32 *map; + nv_phwreg_t map_u; +} nv_aperture_t; + +typedef struct +{ + char *name; + NvU32 *data; +} nv_parm_t; + +#define NV_RM_PAGE_SHIFT 12 +#define NV_RM_PAGE_SIZE (1 << NV_RM_PAGE_SHIFT) +#define NV_RM_PAGE_MASK (NV_RM_PAGE_SIZE - 1) + +#define NV_RM_TO_OS_PAGE_SHIFT (os_page_shift - NV_RM_PAGE_SHIFT) +#define NV_RM_PAGES_TO_OS_PAGES(count) \ + ((((NvUPtr)(count)) >> NV_RM_TO_OS_PAGE_SHIFT) + \ + ((((count) & ((1 << NV_RM_TO_OS_PAGE_SHIFT) - 1)) != 0) ? 1 : 0)) + +#if defined(NVCPU_X86_64) +#define NV_STACK_SIZE (NV_RM_PAGE_SIZE * 3) +#else +#define NV_STACK_SIZE (NV_RM_PAGE_SIZE * 2) +#endif + +typedef struct nvidia_stack_s +{ + NvU32 size; + void *top; + NvU8 stack[NV_STACK_SIZE-16] __attribute__ ((aligned(16))); +} nvidia_stack_t; + +/* + * TODO: Remove once all UNIX layers have been converted to use nvidia_stack_t + */ +typedef nvidia_stack_t nv_stack_t; + +typedef struct nv_file_private_t nv_file_private_t; + +/* + * this is a wrapper for unix events + * unlike the events that will be returned to clients, this includes + * kernel-specific data, such as file pointer, etc.. + */ +typedef struct nv_event_s +{ + NvHandle hParent; + NvHandle hObject; + NvU32 index; + NvU32 info32; + NvU16 info16; + nv_file_private_t *nvfp; /* per file-descriptor data pointer */ + NvU32 fd; + NvBool active; /* whether the event should be signaled */ + NvU32 refcount; /* count of associated RM events */ + struct nv_event_s *next; +} nv_event_t; + +typedef struct nv_kern_mapping_s +{ + void *addr; + NvU64 size; + NvU32 modeFlag; + struct nv_kern_mapping_s *next; +} nv_kern_mapping_t; + +typedef struct nv_usermap_access_params_s +{ + NvU64 addr; + NvU64 size; + NvU64 offset; + NvU64 *page_array; + NvU64 num_pages; + MemoryArea memArea; + NvU64 access_start; + NvU64 access_size; + NvBool contig; + NvU32 caching; +} nv_usermap_access_params_t; + +/* + * It stores mapping context per mapping + */ +typedef struct nv_alloc_mapping_context_s { + void *alloc; + NvU64 page_index; + NvU64 *page_array; + NvU64 num_pages; + MemoryArea memArea; + NvU64 access_start; + NvU64 access_size; + NvU32 prot; + NvBool valid; + NvU32 caching; +} nv_alloc_mapping_context_t; + +typedef enum +{ + NV_SOC_IRQ_DISPLAY_TYPE = 0x1, + NV_SOC_IRQ_DPAUX_TYPE, + NV_SOC_IRQ_GPIO_TYPE, + NV_SOC_IRQ_HDACODEC_TYPE, + NV_SOC_IRQ_TCPC2DISP_TYPE, + NV_SOC_IRQ_HFRP0_TYPE, + NV_SOC_IRQ_HFRP1_TYPE, + NV_SOC_IRQ_INVALID_TYPE +} nv_soc_irq_type_t; + +/* + * It stores interrupt numbers and interrupt type and private data + */ +typedef struct nv_soc_irq_info_s { + NvU32 irq_num; + nv_soc_irq_type_t irq_type; + NvBool bh_pending; + union { + NvU32 gpio_num; + NvU32 dpaux_instance; + } irq_data; + NvS32 ref_count; +} nv_soc_irq_info_t; + +#define NV_MAX_SOC_IRQS 10 +#define NV_MAX_DPAUX_NUM_DEVICES 4 +#define NV_MAX_DPAUX_DEV_NAME_SIZE 10 + +#define NV_MAX_SOC_DPAUX_NUM_DEVICES 4 + +/* + * per device state + */ + +/* DMA-capable device data, defined by kernel interface layer */ +typedef struct nv_dma_device nv_dma_device_t; + +typedef struct nv_phys_addr_range +{ + NvU64 addr; + NvU64 len; +} nv_phys_addr_range_t; + +typedef struct nv_state_t +{ + void *priv; /* private data */ + void *os_state; /* os-specific device state */ + + int flags; + + /* PCI config info */ + nv_pci_info_t pci_info; + NvU16 subsystem_id; + NvU16 subsystem_vendor; + NvU32 gpu_id; + NvU32 iovaspace_id; + struct + { + NvBool valid; + NvU8 uuid[GPU_UUID_LEN]; + NvBool pci_uuid_read_attempted; + NV_STATUS pci_uuid_status; + } nv_uuid_cache; + void *handle; + + NvU32 pci_cfg_space[NVRM_PCICFG_NUM_DWORDS]; + + /* physical characteristics */ + nv_aperture_t bars[NV_GPU_NUM_BARS]; + nv_aperture_t *regs; + nv_aperture_t *dpaux[NV_MAX_DPAUX_NUM_DEVICES]; + nv_aperture_t *hdacodec_regs; + nv_aperture_t *mipical_regs; + nv_aperture_t *hfrp0_regs; + nv_aperture_t *hfrp1_regs; + nv_aperture_t *fb, ud; + nv_aperture_t *simregs; + + NvU32 num_dpaux_instance; + NvU32 interrupt_line; + NvU32 dpaux_irqs[NV_MAX_DPAUX_NUM_DEVICES]; + char dpaux_devname[NV_MAX_DPAUX_NUM_DEVICES][NV_MAX_DPAUX_DEV_NAME_SIZE]; + nv_soc_irq_info_t soc_irq_info[NV_MAX_SOC_IRQS]; + NvS32 current_soc_irq; + NvU32 num_soc_irqs; + NvU32 hdacodec_irq; + NvU32 tcpc2disp_irq; + NvU32 hfrp0_irq; + NvU32 hfrp1_irq; + NvU8 *soc_dcb_blob; + NvU32 soc_dcb_size; + NvU32 disp_sw_soc_chip_id; + NvBool soc_is_dpalt_mode_supported; + NvBool soc_is_hfrp_supported; + + NvU64 dma_mask; + + NvBool is_tegra_pci_igpu; + NvBool supports_tegra_igpu_rg; + NvBool is_tegra_pci_igpu_rg_enabled; + NvU32 tegra_pci_igpu_pg_mask; + + NvBool primary_vga; + + NvU32 sim_env; + + NvU32 rc_timer_enabled; + + /* list of events allocated for this device */ + nv_event_t *event_list; + + /* lock to protect event_list */ + void *event_spinlock; + + nv_kern_mapping_t *kern_mappings; + + /* Kernel interface DMA device data */ + nv_dma_device_t *dma_dev; + nv_dma_device_t *niso_dma_dev; + + /* + * Per-GPU queue. The actual queue object is usually allocated in the + * arch-specific parent structure (e.g. nv_linux_state_t), and this + * pointer just points to it. + */ + struct os_work_queue *queue; + + /* For loading RM as a firmware (DCE or GSP) client */ + NvBool request_firmware; /* request firmware from the OS */ + NvBool request_fw_client_rm; /* attempt to init RM as FW a client */ + NvBool allow_fallback_to_monolithic_rm; /* allow fallback to monolithic RM if FW client RM doesn't work out */ + NvBool enable_firmware_logs; /* attempt to enable firmware log decoding/printing */ + + /* Variable to track, if nvidia_remove is called */ + NvBool removed; + + NvBool console_device; + + /* Variable to track, if GPU is external GPU */ + NvBool is_external_gpu; + + /* Variable to track, if regkey PreserveVideoMemoryAllocations is set */ + NvBool preserve_vidmem_allocations; + + /* Variable to force allocation of 32-bit addressable memory */ + NvBool force_dma32_alloc; + + /* PCI power state should be D0 during system suspend */ + NvBool d0_state_in_suspend; + + /* Current cyclestats client and context */ + NvU32 profiler_owner; + void *profiler_context; + + /* + * RMAPI objects to use in the OS layer to talk to core RM. + * + * Note that we only need to store one subdevice handle: in SLI, we will + * have a separate nv_state_t per physical GPU. + */ + struct { + NvHandle hClient; + NvHandle hDevice; + NvHandle hSubDevice; + NvHandle hI2C; + NvHandle hDisp; + } rmapi; + + /* Bool to check if dma-buf is supported */ + NvBool dma_buf_supported; + + /* Check if NVPCF DSM function is implemented under NVPCF or GPU device scope */ + NvBool nvpcf_dsm_in_gpu_scope; + + /* Bool to check if the device received a shutdown notification */ + NvBool is_shutdown; + + /* Bool to check if the GPU has a coherent sysmem link */ + NvBool coherent; + + /* + * Bool to check if GPU memory is backed by struct page. + * False for non-coherent platforms. May also be false + * on coherent platforms if GPU memory is not onlined to the kernel. + */ + NvBool mem_has_struct_page; + + /* OS detected GPU has ATS capability */ + NvBool ats_support; + /* + * NUMA node ID of the CPU to which the GPU is attached. + * Holds NUMA_NO_NODE on platforms that don't support NUMA configuration. + */ + NvS32 cpu_numa_node_id; + + struct { + /* Bool to check if ISO iommu enabled */ + NvBool iso_iommu_present; + /* Bool to check if NISO iommu enabled */ + NvBool niso_iommu_present; + /* Display SMMU Stream IDs */ + NvU32 dispIsoStreamId; + NvU32 dispNisoStreamId; + } iommus; + + /* Console is managed by drm drivers or NVKMS */ + NvBool client_managed_console; +} nv_state_t; + +#define NVFP_TYPE_NONE 0x0 +#define NVFP_TYPE_REFCOUNTED 0x1 +#define NVFP_TYPE_REGISTERED 0x2 + +struct nv_file_private_t +{ + NvHandle *handles; + NvU16 maxHandles; + NvU32 deviceInstance; + NvU32 gpuInstanceId; + NvU8 metadata[64]; + + nv_file_private_t *ctl_nvfp; + void *ctl_nvfp_priv; + NvU32 register_or_refcount; + + // + // True if a client or an event was ever allocated on this fd. + // If false, RMAPI cleanup is skipped. + // + NvBool bCleanupRmapi; +}; + +// Forward define the gpu ops structures +typedef struct gpuSession *nvgpuSessionHandle_t; +typedef struct gpuDevice *nvgpuDeviceHandle_t; +typedef struct gpuAddressSpace *nvgpuAddressSpaceHandle_t; +typedef struct gpuTsg *nvgpuTsgHandle_t; +typedef struct UvmGpuTsgAllocParams_tag nvgpuTsgAllocParams_t; +typedef struct gpuChannel *nvgpuChannelHandle_t; +typedef struct UvmGpuChannelInfo_tag *nvgpuChannelInfo_t; +typedef struct UvmGpuChannelAllocParams_tag nvgpuChannelAllocParams_t; +typedef struct UvmGpuCaps_tag *nvgpuCaps_t; +typedef struct UvmGpuCopyEnginesCaps_tag *nvgpuCesCaps_t; +typedef struct UvmGpuAddressSpaceInfo_tag *nvgpuAddressSpaceInfo_t; +typedef struct UvmGpuAllocInfo_tag *nvgpuAllocInfo_t; +typedef struct UvmGpuP2PCapsParams_tag *nvgpuP2PCapsParams_t; +typedef struct UvmGpuFbInfo_tag *nvgpuFbInfo_t; +typedef struct UvmGpuNvlinkInfo_tag *nvgpuNvlinkInfo_t; +typedef struct UvmGpuEccInfo_tag *nvgpuEccInfo_t; +typedef struct UvmGpuFaultInfo_tag *nvgpuFaultInfo_t; +typedef struct UvmGpuAccessCntrInfo_tag *nvgpuAccessCntrInfo_t; +typedef struct UvmGpuAccessCntrConfig_tag nvgpuAccessCntrConfig_t; +typedef struct UvmGpuInfo_tag nvgpuInfo_t; +typedef struct UvmGpuClientInfo_tag nvgpuClientInfo_t; +typedef struct UvmPmaAllocationOptions_tag *nvgpuPmaAllocationOptions_t; +typedef struct UvmPmaStatistics_tag *nvgpuPmaStatistics_t; +typedef struct UvmGpuMemoryInfo_tag *nvgpuMemoryInfo_t; +typedef struct UvmGpuExternalMappingInfo_tag *nvgpuExternalMappingInfo_t; +typedef struct UvmGpuExternalPhysAddrInfo_tag *nvgpuExternalPhysAddrInfo_t; +typedef struct UvmGpuChannelResourceInfo_tag *nvgpuChannelResourceInfo_t; +typedef struct UvmGpuChannelInstanceInfo_tag *nvgpuChannelInstanceInfo_t; +typedef struct UvmGpuChannelResourceBindParams_tag *nvgpuChannelResourceBindParams_t; +typedef struct UvmGpuPagingChannelAllocParams_tag nvgpuPagingChannelAllocParams_t; +typedef struct UvmGpuPagingChannel_tag *nvgpuPagingChannelHandle_t; +typedef struct UvmGpuPagingChannelInfo_tag *nvgpuPagingChannelInfo_t; +typedef enum UvmPmaGpuMemoryType_tag nvgpuGpuMemoryType_t; +typedef NV_STATUS (*nvPmaEvictPagesCallback)(void *, NvU64, NvU64 *, NvU32, NvU64, NvU64, nvgpuGpuMemoryType_t); +typedef NV_STATUS (*nvPmaEvictRangeCallback)(void *, NvU64, NvU64, nvgpuGpuMemoryType_t); + +/* + * flags + */ + +#define NV_FLAG_OPEN 0x0001 +#define NV_FLAG_EXCLUDE 0x0002 +#define NV_FLAG_CONTROL 0x0004 +#define NV_FLAG_PCI_P2P_UNSUPPORTED_CHIPSET 0x0008 +#define NV_FLAG_SOC_DISPLAY 0x0010 +#define NV_FLAG_USES_MSI 0x0020 +#define NV_FLAG_USES_MSIX 0x0040 +#define NV_FLAG_PASSTHRU 0x0080 +#define NV_FLAG_SUSPENDED 0x0100 +/* To be set when an FLR needs to be triggered after device shut down. */ +#define NV_FLAG_TRIGGER_FLR 0x0400 +#define NV_FLAG_PERSISTENT_SW_STATE 0x0800 +#define NV_FLAG_IN_RECOVERY 0x1000 +#define NV_FLAG_PCI_REMOVE_IN_PROGRESS 0x2000 +#define NV_FLAG_UNBIND_LOCK 0x4000 +/* To be set when GPU is not present on the bus, to help device teardown */ +#define NV_FLAG_IN_SURPRISE_REMOVAL 0x8000 + +typedef enum +{ + NV_PM_ACTION_HIBERNATE, + NV_PM_ACTION_STANDBY, + NV_PM_ACTION_RESUME +} nv_pm_action_t; + +typedef enum +{ + NV_PM_ACTION_DEPTH_DEFAULT, + NV_PM_ACTION_DEPTH_MODESET, + NV_PM_ACTION_DEPTH_UVM +} nv_pm_action_depth_t; + +typedef enum +{ + NV_DYNAMIC_PM_NEVER, + NV_DYNAMIC_PM_COARSE, + NV_DYNAMIC_PM_FINE +} nv_dynamic_power_mode_t; + +typedef enum +{ + NV_POWER_STATE_IN_HIBERNATE, + NV_POWER_STATE_IN_STANDBY, + NV_POWER_STATE_RUNNING +} nv_power_state_t; + +typedef struct +{ + const char *vidmem_power_status; + const char *dynamic_power_status; + const char *gc6_support; + const char *gcoff_support; + const char *s0ix_status; + const char *db_support; +} nv_power_info_t; + +typedef enum +{ + NV_MEMORY_TYPE_SYSTEM, /* Memory mapped for ROM, SBIOS and physical RAM. */ + NV_MEMORY_TYPE_REGISTERS, + NV_MEMORY_TYPE_FRAMEBUFFER, + NV_MEMORY_TYPE_DEVICE_MMIO, /* All kinds of MMIO referred by NVRM e.g. BARs and MCFG of device */ +} nv_memory_type_t; + +#define NV_PRIMARY_VGA(nv) ((nv)->primary_vga) + +#define NV_IS_CTL_DEVICE(nv) ((nv)->flags & NV_FLAG_CONTROL) +#define NV_IS_SOC_DISPLAY_DEVICE(nv) \ + ((nv)->flags & NV_FLAG_SOC_DISPLAY) + +#define NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv) \ + (((nv)->flags & NV_FLAG_IN_SURPRISE_REMOVAL) != 0) + +/* + * For console setup by EFI GOP, the base address is BAR1. + * For console setup by VBIOS, the base address is BAR2 + 16MB. + */ +#define NV_IS_CONSOLE_MAPPED(nv, addr) \ + (((addr) == (nv)->bars[NV_GPU_BAR_INDEX_FB].cpu_address) || \ + ((addr) == ((nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + 0x1000000))) + +#define NV_SOC_IS_ISO_IOMMU_PRESENT(nv) \ + ((nv)->iommus.iso_iommu_present) + +#define NV_SOC_IS_NISO_IOMMU_PRESENT(nv) \ + ((nv)->iommus.niso_iommu_present) +/* + * GPU add/remove events + */ +#define NV_SYSTEM_GPU_ADD_EVENT 0x9001 +#define NV_SYSTEM_GPU_REMOVE_EVENT 0x9002 + +/* + * NVIDIA ACPI sub-event IDs (event types) to be passed into + * to core NVIDIA driver for ACPI events. + */ +#define NV_SYSTEM_ACPI_EVENT_VALUE_DISPLAY_SWITCH_DEFAULT 0 +#define NV_SYSTEM_ACPI_EVENT_VALUE_DOCK_EVENT_UNDOCKED 0 +#define NV_SYSTEM_ACPI_EVENT_VALUE_DOCK_EVENT_DOCKED 1 + +#define NV_ACPI_NVIF_HANDLE_PRESENT 0x01 +#define NV_ACPI_DSM_HANDLE_PRESENT 0x02 +#define NV_ACPI_WMMX_HANDLE_PRESENT 0x04 + +#define NV_EVAL_ACPI_METHOD_NVIF 0x01 +#define NV_EVAL_ACPI_METHOD_WMMX 0x02 + +typedef enum { + NV_I2C_CMD_READ = 1, + NV_I2C_CMD_WRITE, + NV_I2C_CMD_SMBUS_READ, + NV_I2C_CMD_SMBUS_WRITE, + NV_I2C_CMD_SMBUS_QUICK_WRITE, + NV_I2C_CMD_SMBUS_QUICK_READ, + NV_I2C_CMD_SMBUS_BLOCK_READ, + NV_I2C_CMD_SMBUS_BLOCK_WRITE, + NV_I2C_CMD_BLOCK_READ, + NV_I2C_CMD_BLOCK_WRITE +} nv_i2c_cmd_t; + +// Flags needed by OSAllocPagesNode +#define NV_ALLOC_PAGES_NODE_NONE 0x0 +#define NV_ALLOC_PAGES_NODE_SKIP_RECLAIM 0x1 + +/* +** where we hide our nv_state_t * ... +*/ +#define NV_SET_NV_STATE(pgpu,p) ((pgpu)->pOsGpuInfo = (p)) +#define NV_GET_NV_STATE(pGpu) \ + (nv_state_t *)((pGpu) ? (pGpu)->pOsGpuInfo : NULL) + +static inline NvBool IS_REG_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length) +{ + return ((offset >= nv->regs->cpu_address) && + ((offset + (length - 1)) >= offset) && + ((offset + (length - 1)) <= (nv->regs->cpu_address + (nv->regs->size - 1)))); +} + +static inline NvBool IS_FB_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length) +{ + return ((nv->fb) && (nv->fb->size != 0) && + (offset >= nv->fb->cpu_address) && + ((offset + (length - 1)) >= offset) && + ((offset + (length - 1)) <= (nv->fb->cpu_address + (nv->fb->size - 1)))); +} + +static inline NvBool IS_UD_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length) +{ + return ((nv->ud.cpu_address != 0) && (nv->ud.size != 0) && + (offset >= nv->ud.cpu_address) && + ((offset + (length - 1)) >= offset) && + ((offset + (length - 1)) <= (nv->ud.cpu_address + (nv->ud.size - 1)))); +} + +static inline NvBool IS_IMEM_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length) +{ + return ((nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address != 0) && + (nv->bars[NV_GPU_BAR_INDEX_IMEM].size != 0) && + (offset >= nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address) && + ((offset + (length - 1)) >= offset) && + ((offset + (length - 1)) <= (nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + + (nv->bars[NV_GPU_BAR_INDEX_IMEM].size - 1)))); +} + +#define NV_RM_MAX_MSIX_LINES 8 + +#define NV_MAX_ISR_DELAY_US 20000 +#define NV_MAX_ISR_DELAY_MS (NV_MAX_ISR_DELAY_US / 1000) + +#define NV_TIMERCMP(a, b, CMP) \ + (((a)->tv_sec == (b)->tv_sec) ? \ + ((a)->tv_usec CMP (b)->tv_usec) : ((a)->tv_sec CMP (b)->tv_sec)) + +#define NV_TIMERADD(a, b, result) \ + { \ + (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \ + (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \ + if ((result)->tv_usec >= 1000000) \ + { \ + ++(result)->tv_sec; \ + (result)->tv_usec -= 1000000; \ + } \ + } + +#define NV_TIMERSUB(a, b, result) \ + { \ + (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \ + (result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \ + if ((result)->tv_usec < 0) \ + { \ + --(result)->tv_sec; \ + (result)->tv_usec += 1000000; \ + } \ + } + +#define NV_TIMEVAL_TO_US(tv) ((NvU64)(tv).tv_sec * 1000000 + (tv).tv_usec) + +#ifndef NV_ALIGN_UP +#define NV_ALIGN_UP(v,g) (((v) + ((g) - 1)) & ~((g) - 1)) +#endif +#ifndef NV_ALIGN_DOWN +#define NV_ALIGN_DOWN(v,g) ((v) & ~((g) - 1)) +#endif + + +/* + * driver internal interfaces + */ + +/* + * --------------------------------------------------------------------------- + * + * Function prototypes for UNIX specific OS interface. + * + * --------------------------------------------------------------------------- + */ + +NvU32 NV_API_CALL nv_get_dev_minor (nv_state_t *); +void* NV_API_CALL nv_alloc_kernel_mapping (nv_state_t *, void *, NvU64, NvU32, NvU64, void **); +void NV_API_CALL nv_free_kernel_mapping (nv_state_t *, void *, void *, void *); +NV_STATUS NV_API_CALL nv_alloc_user_mapping (nv_state_t *, void *, NvU64, NvU32, NvU64, NvU32, NvU64 *, void **); +void NV_API_CALL nv_free_user_mapping (nv_state_t *, void *, NvU64, void *); +NV_STATUS NV_API_CALL nv_add_mapping_context_to_file (nv_state_t *, nv_usermap_access_params_t*, NvU32, void *, NvU64, NvU32); + +NvU64 NV_API_CALL nv_get_kern_phys_address (NvU64); +NvU64 NV_API_CALL nv_get_user_phys_address (NvU64); +nv_state_t* NV_API_CALL nv_get_adapter_state (NvU32, NvU8, NvU8); +nv_state_t* NV_API_CALL nv_get_ctl_state (void); + +void NV_API_CALL nv_set_dma_address_size (nv_state_t *, NvU32 ); + +NV_STATUS NV_API_CALL nv_alias_pages (nv_state_t *, NvU32, NvU64, NvU32, NvU32, NvU64, NvU64 *, NvBool, void **); +NV_STATUS NV_API_CALL nv_alloc_pages (nv_state_t *, NvU32, NvU64, NvBool, NvU32, NvBool, NvBool, NvS32, NvU64 *, void **); +NV_STATUS NV_API_CALL nv_free_pages (nv_state_t *, NvU32, NvBool, NvU32, void *); + +NV_STATUS NV_API_CALL nv_register_user_pages (nv_state_t *, NvU64, NvU64 *, void *, void **, NvBool); +void NV_API_CALL nv_unregister_user_pages (nv_state_t *, NvU64, void **, void **); + +NV_STATUS NV_API_CALL nv_register_peer_io_mem (nv_state_t *, NvU64 *, NvU64, void **); +void NV_API_CALL nv_unregister_peer_io_mem(nv_state_t *, void *); + +struct sg_table; + +NV_STATUS NV_API_CALL nv_register_sgt (nv_state_t *, NvU64 *, NvU64, NvU32, void **, + struct sg_table *, void *, NvBool); +void NV_API_CALL nv_unregister_sgt (nv_state_t *, struct sg_table **, void **, void *); +NV_STATUS NV_API_CALL nv_register_phys_pages (nv_state_t *, NvU64 *, NvU64, NvU32, void **); +void NV_API_CALL nv_unregister_phys_pages (nv_state_t *, void *); + +NV_STATUS NV_API_CALL nv_dma_map_sgt (nv_dma_device_t *, NvU64, NvU64 *, NvU32, void **); + +NV_STATUS NV_API_CALL nv_dma_map_alloc (nv_dma_device_t *, NvU64, NvU64 *, NvBool, void **); +NV_STATUS NV_API_CALL nv_dma_unmap_alloc (nv_dma_device_t *, NvU64, NvU64 *, void **); + +NV_STATUS NV_API_CALL nv_dma_map_peer (nv_dma_device_t *, nv_dma_device_t *, NvU8, NvU64, NvU64 *); +NV_STATUS NV_API_CALL nv_dma_map_non_pci_peer (nv_dma_device_t *, NvU64, NvU64 *); +void NV_API_CALL nv_dma_unmap_peer (nv_dma_device_t *, NvU64, NvU64); + +NV_STATUS NV_API_CALL nv_dma_map_mmio (nv_dma_device_t *, NvU64, NvU64 *); +void NV_API_CALL nv_dma_unmap_mmio (nv_dma_device_t *, NvU64, NvU64); + +void NV_API_CALL nv_dma_cache_invalidate (nv_dma_device_t *, void *); +NvBool NV_API_CALL nv_grdma_pci_topology_supported(nv_state_t *, nv_dma_device_t *); + +NvS32 NV_API_CALL nv_start_rc_timer (nv_state_t *); +NvS32 NV_API_CALL nv_stop_rc_timer (nv_state_t *); + +void NV_API_CALL nv_post_event (nv_event_t *, NvHandle, NvU32, NvU32, NvU16, NvBool); +NvS32 NV_API_CALL nv_get_event (nv_file_private_t *, nv_event_t *, NvU32 *); + +void* NV_API_CALL nv_i2c_add_adapter (nv_state_t *, NvU32); +void NV_API_CALL nv_i2c_del_adapter (nv_state_t *, void *); + +void NV_API_CALL nv_acpi_methods_init (NvU32 *); +void NV_API_CALL nv_acpi_methods_uninit (void); + +NV_STATUS NV_API_CALL nv_acpi_method (NvU32, NvU32, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *); +NV_STATUS NV_API_CALL nv_acpi_d3cold_dsm_for_upstream_port (nv_state_t *, NvU8 *, NvU32, NvU32, NvU32 *); +NV_STATUS NV_API_CALL nv_acpi_dsm_method (nv_state_t *, NvU8 *, NvU32, NvBool, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *); +NV_STATUS NV_API_CALL nv_acpi_ddc_method (nv_state_t *, void *, NvU32 *, NvBool); +NV_STATUS NV_API_CALL nv_acpi_dod_method (nv_state_t *, NvU32 *, NvU32 *); +NV_STATUS NV_API_CALL nv_acpi_rom_method (nv_state_t *, NvU32 *, NvU32 *); +NV_STATUS NV_API_CALL nv_acpi_get_powersource (NvU32 *); +NvBool NV_API_CALL nv_acpi_is_battery_present(void); + +NV_STATUS NV_API_CALL nv_acpi_mux_method (nv_state_t *, NvU32 *, NvU32, const char *); + +NV_STATUS NV_API_CALL nv_log_error (nv_state_t *, NvU32, const char *, va_list); + +NV_STATUS NV_API_CALL nv_set_primary_vga_status(nv_state_t *); +NvBool NV_API_CALL nv_requires_dma_remap (nv_state_t *); + +NvBool NV_API_CALL nv_is_rm_firmware_active(nv_state_t *); +const void*NV_API_CALL nv_get_firmware(nv_state_t *, nv_firmware_type_t, nv_firmware_chip_family_t, const void **, NvU32 *); +void NV_API_CALL nv_put_firmware(const void *); + +nv_file_private_t* NV_API_CALL nv_get_file_private(NvS32, NvBool, void **); +void NV_API_CALL nv_put_file_private(void *); + +NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU64 *, NvU32 *, NvS32 *); +NV_STATUS NV_API_CALL nv_get_egm_info(nv_state_t *, NvU64 *, NvU64 *, NvS32 *); + +void NV_API_CALL nv_p2p_free_platform_data(void *data); + +NV_STATUS NV_API_CALL nv_revoke_gpu_mappings (nv_state_t *); +void NV_API_CALL nv_acquire_mmap_lock (nv_state_t *); +void NV_API_CALL nv_release_mmap_lock (nv_state_t *); +NvBool NV_API_CALL nv_get_all_mappings_revoked_locked (nv_state_t *); +void NV_API_CALL nv_set_safe_to_mmap_locked (nv_state_t *, NvBool); + +NV_STATUS NV_API_CALL nv_indicate_idle (nv_state_t *); +NV_STATUS NV_API_CALL nv_indicate_not_idle (nv_state_t *); +void NV_API_CALL nv_idle_holdoff (nv_state_t *); + +NvBool NV_API_CALL nv_dynamic_power_available (nv_state_t *); +void NV_API_CALL nv_audio_dynamic_power (nv_state_t *); + +void NV_API_CALL nv_control_soc_irqs (nv_state_t *, NvBool bEnable); +NV_STATUS NV_API_CALL nv_get_current_irq_priv_data(nv_state_t *, NvU32 *); + +NV_STATUS NV_API_CALL nv_acquire_fabric_mgmt_cap (int, int*); +int NV_API_CALL nv_cap_drv_init(void); +void NV_API_CALL nv_cap_drv_exit(void); +NvBool NV_API_CALL nv_is_gpu_accessible(nv_state_t *); +NvBool NV_API_CALL nv_match_gpu_os_info(nv_state_t *, void *); + +void NV_API_CALL nv_get_updated_emu_seg(NvU32 *start, NvU32 *end); +void NV_API_CALL nv_get_screen_info(nv_state_t *, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU32 *, NvU64 *); +void NV_API_CALL nv_set_gpu_pg_mask(nv_state_t *); + +struct dma_buf; +typedef struct nv_dma_buf nv_dma_buf_t; +struct drm_gem_object; + +NV_STATUS NV_API_CALL nv_dma_import_sgt (nv_dma_device_t *, struct sg_table *, struct drm_gem_object *); +void NV_API_CALL nv_dma_release_sgt(struct sg_table *, struct drm_gem_object *); +NV_STATUS NV_API_CALL nv_dma_import_dma_buf (nv_dma_device_t *, struct dma_buf *, NvBool, NvU32 *, struct sg_table **, nv_dma_buf_t **); +NV_STATUS NV_API_CALL nv_dma_import_from_fd (nv_dma_device_t *, NvS32, NvBool, NvU32 *, struct sg_table **, nv_dma_buf_t **); +void NV_API_CALL nv_dma_release_dma_buf (nv_dma_buf_t *); + +void NV_API_CALL nv_schedule_uvm_isr (nv_state_t *); + +NV_STATUS NV_API_CALL nv_schedule_uvm_drain_p2p (NvU8 *); +void NV_API_CALL nv_schedule_uvm_resume_p2p (NvU8 *); + +NvBool NV_API_CALL nv_platform_supports_s0ix (void); +NvBool NV_API_CALL nv_s2idle_pm_configured (void); + +NvBool NV_API_CALL nv_pci_tegra_register_power_domain (nv_state_t *, NvBool); +NvBool NV_API_CALL nv_pci_tegra_pm_init (nv_state_t *); +void NV_API_CALL nv_pci_tegra_pm_deinit (nv_state_t *); + +NvBool NV_API_CALL nv_is_chassis_notebook (void); +void NV_API_CALL nv_allow_runtime_suspend (nv_state_t *nv); +void NV_API_CALL nv_disallow_runtime_suspend (nv_state_t *nv); + +typedef void (*nvTegraDceClientIpcCallback)(NvU32, NvU32, NvU32, void *, void *); + +NV_STATUS NV_API_CALL nv_get_num_phys_pages (void *, NvU32 *); +NV_STATUS NV_API_CALL nv_get_phys_pages (void *, void *, NvU32 *); +void NV_API_CALL nv_get_disp_smmu_stream_ids (nv_state_t *, NvU32 *, NvU32 *); + +typedef struct TEGRA_IMP_IMPORT_DATA TEGRA_IMP_IMPORT_DATA; +typedef struct nv_i2c_msg_s nv_i2c_msg_t; + +NV_STATUS NV_API_CALL nv_bpmp_send_mrq (nv_state_t *, NvU32, const void *, NvU32, void *, NvU32, NvS32 *, NvS32 *); +NV_STATUS NV_API_CALL nv_i2c_transfer(nv_state_t *, NvU32, NvU8, nv_i2c_msg_t *, int); +void NV_API_CALL nv_i2c_unregister_clients(nv_state_t *); +NV_STATUS NV_API_CALL nv_i2c_bus_status(nv_state_t *, NvU32, NvS32 *, NvS32 *); +NV_STATUS NV_API_CALL nv_imp_get_import_data (TEGRA_IMP_IMPORT_DATA *); +NV_STATUS NV_API_CALL nv_imp_enable_disable_rfl (nv_state_t *nv, NvBool bEnable); +NV_STATUS NV_API_CALL nv_imp_icc_set_bw (nv_state_t *nv, NvU32 avg_bw_kbps, NvU32 floor_bw_kbps); +NV_STATUS NV_API_CALL nv_get_num_dpaux_instances(nv_state_t *nv, NvU32 *num_instances); +NV_STATUS NV_API_CALL nv_get_tegra_brightness_level(nv_state_t *, NvU32 *); +NV_STATUS NV_API_CALL nv_set_tegra_brightness_level(nv_state_t *, NvU32); + +NV_STATUS NV_API_CALL nv_soc_device_reset (nv_state_t *); +NV_STATUS NV_API_CALL nv_soc_pm_powergate (nv_state_t *); +NV_STATUS NV_API_CALL nv_soc_pm_unpowergate (nv_state_t *); +NV_STATUS NV_API_CALL nv_gpio_get_pin_state(nv_state_t *, NvU32, NvU32 *); +void NV_API_CALL nv_gpio_set_pin_state(nv_state_t *, NvU32, NvU32); +NV_STATUS NV_API_CALL nv_gpio_set_pin_direction(nv_state_t *, NvU32, NvU32); +NV_STATUS NV_API_CALL nv_gpio_get_pin_direction(nv_state_t *, NvU32, NvU32 *); +NV_STATUS NV_API_CALL nv_gpio_get_pin_number(nv_state_t *, NvU32, NvU32 *); +NvBool NV_API_CALL nv_gpio_get_pin_interrupt_status(nv_state_t *, NvU32, NvU32); +NV_STATUS NV_API_CALL nv_gpio_set_pin_interrupt(nv_state_t *, NvU32, NvU32); +NvU32 NV_API_CALL nv_tegra_get_rm_interface_type(NvU32); +NV_STATUS NV_API_CALL nv_tegra_dce_register_ipc_client(NvU32, void *, nvTegraDceClientIpcCallback, NvU32 *); +NV_STATUS NV_API_CALL nv_tegra_dce_client_ipc_send_recv(NvU32, void *, NvU32); +NV_STATUS NV_API_CALL nv_tegra_dce_unregister_ipc_client(NvU32); +NV_STATUS NV_API_CALL nv_dsi_parse_panel_props(nv_state_t *, void *); +NvBool NV_API_CALL nv_dsi_is_panel_connected(nv_state_t *); +NV_STATUS NV_API_CALL nv_dsi_panel_enable(nv_state_t *, void *); +NV_STATUS NV_API_CALL nv_dsi_panel_reset(nv_state_t *, void *); +void NV_API_CALL nv_dsi_panel_disable(nv_state_t *, void *); +void NV_API_CALL nv_dsi_panel_cleanup(nv_state_t *, void *); +NV_STATUS NV_API_CALL nv_soc_mipi_cal_reset(nv_state_t *); +NvU32 NV_API_CALL nv_soc_fuse_register_read (NvU32 addr); +NvBool NV_API_CALL nv_get_hdcp_enabled(nv_state_t *nv); +NV_STATUS NV_API_CALL nv_get_valid_window_head_mask(nv_state_t *nv, NvU64 *); +NV_STATUS NV_API_CALL nv_dp_uphy_pll_init(nv_state_t *, NvU32, NvU32); +NV_STATUS NV_API_CALL nv_dp_uphy_pll_deinit(nv_state_t *); +NV_STATUS NV_API_CALL nv_soc_i2c_hsp_semaphore_acquire(NvU32 ownerId, NvBool bAcquire, NvU64 timeout); +typedef void (*nv_soc_tsec_cb_func_t)(void*, void*); +NvU32 NV_API_CALL nv_soc_tsec_send_cmd(void* cmd, nv_soc_tsec_cb_func_t cb_func, void* cb_context); +NvU32 NV_API_CALL nv_soc_tsec_event_register(nv_soc_tsec_cb_func_t cb_func, void* cb_context, NvBool is_init_event); +NvU32 NV_API_CALL nv_soc_tsec_event_unregister(NvBool is_init_event); +void* NV_API_CALL nv_soc_tsec_alloc_mem_desc(NvU32 num_bytes, NvU32 *flcn_addr); +void NV_API_CALL nv_soc_tsec_free_mem_desc(void *mem_desc); +NvBool NV_API_CALL nv_is_clk_enabled (nv_state_t *, TEGRASOC_WHICH_CLK); +NV_STATUS NV_API_CALL nv_set_parent (nv_state_t *, TEGRASOC_WHICH_CLK, TEGRASOC_WHICH_CLK); +NV_STATUS NV_API_CALL nv_get_parent (nv_state_t *, TEGRASOC_WHICH_CLK, TEGRASOC_WHICH_CLK*); +NV_STATUS NV_API_CALL nv_clk_get_handles (nv_state_t *); +void NV_API_CALL nv_clk_clear_handles (nv_state_t *); +NV_STATUS NV_API_CALL nv_enable_clk (nv_state_t *, TEGRASOC_WHICH_CLK); +void NV_API_CALL nv_disable_clk (nv_state_t *, TEGRASOC_WHICH_CLK); +NV_STATUS NV_API_CALL nv_get_curr_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32 *); +NV_STATUS NV_API_CALL nv_get_max_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32 *); +NV_STATUS NV_API_CALL nv_get_min_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32 *); +NV_STATUS NV_API_CALL nv_set_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32); + +/* + * --------------------------------------------------------------------------- + * + * Function prototypes for Resource Manager interface. + * + * --------------------------------------------------------------------------- + */ + +NvBool NV_API_CALL rm_init_rm (nvidia_stack_t *); +void NV_API_CALL rm_shutdown_rm (nvidia_stack_t *); +NvBool NV_API_CALL rm_init_private_state (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_free_private_state (nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_init_adapter (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_disable_adapter (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_shutdown_adapter (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_exclude_adapter (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_acquire_api_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_release_api_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_acquire_gpu_lock (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_release_gpu_lock (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_acquire_all_gpus_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_release_all_gpus_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_ioctl (nvidia_stack_t *, nv_state_t *, nv_file_private_t *, NvU32, void *, NvU32); +NvBool NV_API_CALL rm_isr (nvidia_stack_t *, nv_state_t *, NvU32 *); +void NV_API_CALL rm_isr_bh (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_isr_bh_unlocked (nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_is_msix_allowed (nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_wait_for_bar_firewall (nvidia_stack_t *, NvU32 domain, NvU8 bus, NvU8 device, NvU8 function, NvU16 devId, NvU16 subsystemId); +NV_STATUS NV_API_CALL rm_pmu_perfmon_get_load (nvidia_stack_t *, nv_state_t *, NvU32 *, TEGRASOC_DEVFREQ_CLK); +NV_STATUS NV_API_CALL rm_power_management (nvidia_stack_t *, nv_state_t *, nv_pm_action_t); +NV_STATUS NV_API_CALL rm_stop_user_channels (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_restart_user_channels (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_save_low_res_mode (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_get_vbios_version (nvidia_stack_t *, nv_state_t *, char *); +char* NV_API_CALL rm_get_gpu_uuid (nvidia_stack_t *, nv_state_t *); +const NvU8* NV_API_CALL rm_get_gpu_uuid_raw (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_set_rm_firmware_requested(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_get_firmware_version (nvidia_stack_t *, nv_state_t *, char *, NvLength); +void NV_API_CALL rm_cleanup_file_private (nvidia_stack_t *, nv_state_t *, nv_file_private_t *); +void NV_API_CALL rm_unbind_lock (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_read_registry_dword (nvidia_stack_t *, nv_state_t *, const char *, NvU32 *); +NV_STATUS NV_API_CALL rm_write_registry_dword (nvidia_stack_t *, nv_state_t *, const char *, NvU32); +NV_STATUS NV_API_CALL rm_write_registry_binary (nvidia_stack_t *, nv_state_t *, const char *, NvU8 *, NvU32); +NV_STATUS NV_API_CALL rm_write_registry_string (nvidia_stack_t *, nv_state_t *, const char *, const char *, NvU32); +void NV_API_CALL rm_parse_option_string (nvidia_stack_t *, const char *); +char* NV_API_CALL rm_remove_spaces (const char *); +char* NV_API_CALL rm_string_token (char **, const char); +void NV_API_CALL rm_vgpu_vfio_set_driver_vm(nvidia_stack_t *, NvBool); +NV_STATUS NV_API_CALL rm_get_adapter_status_external(nvidia_stack_t *, nv_state_t *); + +NV_STATUS NV_API_CALL rm_run_rc_callback (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_execute_work_item (nvidia_stack_t *, void *); +const char* NV_API_CALL rm_get_device_name (NvU16, NvU16, NvU16); + +NV_STATUS NV_API_CALL rm_is_supported_device (nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_is_supported_pci_device(NvU8 pci_class, + NvU8 pci_subclass, + NvU16 vendor, + NvU16 device, + NvU16 subsystem_vendor, + NvU16 subsystem_device, + NvBool print_legacy_warning); + +void NV_API_CALL rm_i2c_remove_adapters (nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_i2c_is_smbus_capable (nvidia_stack_t *, nv_state_t *, void *); +NV_STATUS NV_API_CALL rm_i2c_transfer (nvidia_stack_t *, nv_state_t *, void *, nv_i2c_cmd_t, NvU8, NvU8, NvU32, NvU8 *); + +NV_STATUS NV_API_CALL rm_perform_version_check (nvidia_stack_t *, void *, NvU32); + +void NV_API_CALL rm_power_source_change_event (nvidia_stack_t *, NvU32); + +void NV_API_CALL rm_request_dnotifier_state (nvidia_stack_t *, nv_state_t *); + +void NV_API_CALL rm_disable_gpu_state_persistence (nvidia_stack_t *sp, nv_state_t *); +NV_STATUS NV_API_CALL rm_p2p_init_mapping (nvidia_stack_t *, NvU64, NvU64 *, NvU64 *, NvU64 *, NvU64 *, NvU64, NvU64, NvU64, NvU64, void (*)(void *), void *); +NV_STATUS NV_API_CALL rm_p2p_destroy_mapping (nvidia_stack_t *, NvU64); +NV_STATUS NV_API_CALL rm_p2p_get_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, NvU64, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU8 **, void *, NvBool *); +NV_STATUS NV_API_CALL rm_p2p_get_gpu_info (nvidia_stack_t *, NvU64, NvU64, NvU8 **, void **); +NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent (nvidia_stack_t *, NvU64, NvU64, void **, NvU64 *, NvU32 *, NvBool, void *, void *, void **, NvBool *); +NV_STATUS NV_API_CALL rm_p2p_register_callback (nvidia_stack_t *, NvU64, NvU64, NvU64, void *, void (*)(void *), void *); +NV_STATUS NV_API_CALL rm_p2p_put_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, void *); +NV_STATUS NV_API_CALL rm_p2p_put_pages_persistent(nvidia_stack_t *, void *, void *, void *); +NV_STATUS NV_API_CALL rm_p2p_dma_map_pages (nvidia_stack_t *, nv_dma_device_t *, NvU8 *, NvU64, NvU32, NvU64 *, void **); +NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, + NvHandle, void *, NvHandle, NvU64, NvU64, NvHandle *, void **, + NvBool *, NvU32 *, NvBool *, nv_memory_type_t *); +void NV_API_CALL rm_dma_buf_undup_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle); +NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle (nvidia_stack_t *, nv_state_t *, + NvHandle, NvHandle, MemoryRange, + NvU8, void *, NvBool, MemoryArea *); +void NV_API_CALL rm_dma_buf_unmap_mem_handle(nvidia_stack_t *, nv_state_t *, + NvHandle, NvHandle, NvU8, void *, + NvBool, MemoryArea); +NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(nvidia_stack_t *, + nv_state_t *, NvHandle, NvHandle, + NvU8, NvHandle *, NvHandle *, + NvHandle *, void **, NvBool *, NvBool *); +void NV_API_CALL rm_dma_buf_put_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, void *); + +void NV_API_CALL rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd); +NvBool NV_API_CALL rm_get_device_remove_flag(nvidia_stack_t *sp, NvU32 gpu_id); +NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *); +NV_STATUS NV_API_CALL rm_gpu_handle_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *); +NvBool NV_API_CALL rm_gpu_need_4k_page_isolation(nv_state_t *); +NvBool NV_API_CALL rm_is_chipset_io_coherent(nv_stack_t *); +NvBool NV_API_CALL rm_init_event_locks(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_destroy_event_locks(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_get_gpu_numa_info(nvidia_stack_t *, nv_state_t *, nv_ioctl_numa_info_t *); +NV_STATUS NV_API_CALL rm_gpu_numa_online(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_gpu_numa_offline(nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_is_device_sequestered(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_check_for_gpu_surprise_removal(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_set_external_kernel_client_count(nvidia_stack_t *, nv_state_t *, NvBool); +NV_STATUS NV_API_CALL rm_schedule_gpu_wakeup(nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_disable_iomap_wc(void); + +void NV_API_CALL rm_init_tegra_dynamic_power_management(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_init_dynamic_power_management(nvidia_stack_t *, nv_state_t *, NvBool); +void NV_API_CALL rm_cleanup_dynamic_power_management(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_enable_dynamic_power_management(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_ref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t); +void NV_API_CALL rm_unref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t); +NV_STATUS NV_API_CALL rm_transition_dynamic_power(nvidia_stack_t *, nv_state_t *, NvBool, NvBool *); +void NV_API_CALL rm_get_power_info(nvidia_stack_t *, nv_state_t *, nv_power_info_t *); + +void NV_API_CALL rm_acpi_notify(nvidia_stack_t *, nv_state_t *, NvU32); +void NV_API_CALL rm_acpi_nvpcf_notify(nvidia_stack_t *); + +NvBool NV_API_CALL rm_is_altstack_in_use(void); + +void NV_API_CALL rm_notify_gpu_addition(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_notify_gpu_removal(nvidia_stack_t *, nv_state_t *); + +/* vGPU VFIO specific functions */ +NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, + NvU32 *, NvU32 *, NvU32); +NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16); +NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *, NvBool, NvU8, NvBool); +NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8); +NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, + NvU64 *, NvU64 *, NvU32 *, NvBool *, NvU8 *); +NV_STATUS NV_API_CALL nv_vgpu_update_sysfs_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU32 *); +NV_STATUS NV_API_CALL nv_vgpu_get_hbm_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU64 *); +NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(nvidia_stack_t *, nv_state_t *, NvU8, NvU32, NvU8, NvU8, NvU8, NvBool, void *); +NV_STATUS NV_API_CALL nv_gpu_bind_event(nvidia_stack_t *, NvU32, NvBool *); +NV_STATUS NV_API_CALL nv_gpu_unbind_event(nvidia_stack_t *, NvU32, NvBool *); + +NV_STATUS NV_API_CALL nv_check_usermap_access_params(nv_state_t*, const nv_usermap_access_params_t*); +nv_soc_irq_type_t NV_API_CALL nv_get_current_irq_type(nv_state_t*); +void NV_API_CALL nv_flush_coherent_cpu_cache_range(nv_state_t *nv, NvU64 cpu_virtual, NvU64 size); + +/* Callbacks should occur roughly every 10ms. */ +#define NV_SNAPSHOT_TIMER_HZ 100 +void NV_API_CALL nv_start_snapshot_timer(void (*snapshot_callback)(void *context)); +void NV_API_CALL nv_flush_snapshot_timer(void); +void NV_API_CALL nv_stop_snapshot_timer(void); + +static inline const NvU8 *nv_get_cached_uuid(nv_state_t *nv) +{ + return nv->nv_uuid_cache.valid ? nv->nv_uuid_cache.uuid : NULL; +} + +/* nano second resolution timer callback structure */ +typedef struct nv_nano_timer nv_nano_timer_t; + +/* nano timer functions */ +void NV_API_CALL nv_create_nano_timer(nv_state_t *, void *pTmrEvent, nv_nano_timer_t **); +void NV_API_CALL nv_start_nano_timer(nv_state_t *nv, nv_nano_timer_t *, NvU64 timens); +NV_STATUS NV_API_CALL rm_run_nano_timer_callback(nvidia_stack_t *, nv_state_t *, void *pTmrEvent); +void NV_API_CALL nv_cancel_nano_timer(nv_state_t *, nv_nano_timer_t *); +void NV_API_CALL nv_destroy_nano_timer(nv_state_t *nv, nv_nano_timer_t *); + +// Host1x specific functions. +NV_STATUS NV_API_CALL nv_get_syncpoint_aperture(NvU32, NvU64 *, NvU64 *, NvU32 *); + +#if defined(NVCPU_X86_64) + +static inline NvU64 nv_rdtsc(void) +{ + NvU64 val; + __asm__ __volatile__ ("rdtsc \t\n" + "shlq $0x20,%%rdx \t\n" + "orq %%rdx,%%rax \t\n" + : "=A" (val)); + return val; +} + +#endif + +#endif /* NVRM */ + +static inline int nv_count_bits(NvU64 word) +{ + NvU64 bits; + + bits = (word & 0x5555555555555555ULL) + ((word >> 1) & 0x5555555555555555ULL); + bits = (bits & 0x3333333333333333ULL) + ((bits >> 2) & 0x3333333333333333ULL); + bits = (bits & 0x0f0f0f0f0f0f0f0fULL) + ((bits >> 4) & 0x0f0f0f0f0f0f0f0fULL); + bits = (bits & 0x00ff00ff00ff00ffULL) + ((bits >> 8) & 0x00ff00ff00ff00ffULL); + bits = (bits & 0x0000ffff0000ffffULL) + ((bits >> 16) & 0x0000ffff0000ffffULL); + bits = (bits & 0x00000000ffffffffULL) + ((bits >> 32) & 0x00000000ffffffffULL); + + return (int)(bits); +} + +#endif diff --git a/src/nvidia/arch/nvalloc/unix/include/nv_escape.h b/src/nvidia/arch/nvalloc/unix/include/nv_escape.h new file mode 100644 index 0000000..3310292 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv_escape.h @@ -0,0 +1,55 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NV_ESCAPE_H_INCLUDED +#define NV_ESCAPE_H_INCLUDED + +#define NV_ESC_RM_ALLOC_MEMORY 0x27 +#define NV_ESC_RM_ALLOC_OBJECT 0x28 +#define NV_ESC_RM_FREE 0x29 +#define NV_ESC_RM_CONTROL 0x2A +#define NV_ESC_RM_ALLOC 0x2B +#define NV_ESC_RM_CONFIG_GET 0x32 +#define NV_ESC_RM_CONFIG_SET 0x33 +#define NV_ESC_RM_DUP_OBJECT 0x34 +#define NV_ESC_RM_SHARE 0x35 +#define NV_ESC_RM_CONFIG_GET_EX 0x37 +#define NV_ESC_RM_CONFIG_SET_EX 0x38 +#define NV_ESC_RM_I2C_ACCESS 0x39 +#define NV_ESC_RM_IDLE_CHANNELS 0x41 +#define NV_ESC_RM_VID_HEAP_CONTROL 0x4A +#define NV_ESC_RM_ACCESS_REGISTRY 0x4D +#define NV_ESC_RM_MAP_MEMORY 0x4E +#define NV_ESC_RM_UNMAP_MEMORY 0x4F +#define NV_ESC_RM_GET_EVENT_DATA 0x52 +#define NV_ESC_RM_ALLOC_CONTEXT_DMA2 0x54 +#define NV_ESC_RM_ADD_VBLANK_CALLBACK 0x56 +#define NV_ESC_RM_MAP_MEMORY_DMA 0x57 +#define NV_ESC_RM_UNMAP_MEMORY_DMA 0x58 +#define NV_ESC_RM_BIND_CONTEXT_DMA 0x59 +#define NV_ESC_RM_EXPORT_OBJECT_TO_FD 0x5C +#define NV_ESC_RM_IMPORT_OBJECT_FROM_FD 0x5D +#define NV_ESC_RM_UPDATE_DEVICE_MAPPING_INFO 0x5E +#define NV_ESC_RM_LOCKLESS_DIAGNOSTIC 0x5F + +#endif // NV_ESCAPE_H_INCLUDED diff --git a/src/nvidia/arch/nvalloc/unix/include/os-interface.h b/src/nvidia/arch/nvalloc/unix/include/os-interface.h new file mode 100644 index 0000000..b132861 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/os-interface.h @@ -0,0 +1,275 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* + * Os interface definitions needed by os-interface.c + */ + +#ifndef OS_INTERFACE_H +#define OS_INTERFACE_H + +/******************* Operating System Interface Routines *******************\ +* * +* Operating system wrapper functions used to abstract the OS. * +* * +\***************************************************************************/ + +#include +#include +#include "nv_stdarg.h" +#include +#include +#include +#include + +#include "rs_access.h" + + + +typedef struct +{ + NvU32 os_major_version; + NvU32 os_minor_version; + NvU32 os_build_number; + const char * os_build_version_str; + const char * os_build_date_plus_str; +}os_version_info; + +/* Each OS defines its own version of this opaque type */ +struct os_work_queue; + +/* Each OS defines its own version of this opaque type */ +typedef struct os_wait_queue os_wait_queue; + +/* + * --------------------------------------------------------------------------- + * + * Function prototypes for OS interface. + * + * --------------------------------------------------------------------------- + */ + +NV_STATUS NV_API_CALL os_alloc_mem (void **, NvU64); +void NV_API_CALL os_free_mem (void *); +NV_STATUS NV_API_CALL os_get_system_time (NvU32 *, NvU32 *); +NvU64 NV_API_CALL os_get_monotonic_time_ns (void); +NvU64 NV_API_CALL os_get_monotonic_time_ns_hr (void); +NvU64 NV_API_CALL os_get_monotonic_tick_resolution_ns (void); +NV_STATUS NV_API_CALL os_delay (NvU32); +NV_STATUS NV_API_CALL os_delay_us (NvU32); +NvU64 NV_API_CALL os_get_cpu_frequency (void); +NvU32 NV_API_CALL os_get_current_process (void); +void NV_API_CALL os_get_current_process_name (char *, NvU32); +NV_STATUS NV_API_CALL os_get_current_thread (NvU64 *); +char* NV_API_CALL os_string_copy (char *, const char *); +NvU32 NV_API_CALL os_string_length (const char *); +NvU32 NV_API_CALL os_strtoul (const char *, char **, NvU32); +NvS32 NV_API_CALL os_string_compare (const char *, const char *); +NvS32 NV_API_CALL os_snprintf (char *, NvU32, const char *, ...); +NvS32 NV_API_CALL os_vsnprintf (char *, NvU32, const char *, va_list); +void NV_API_CALL os_log_error (const char *, va_list); +void* NV_API_CALL os_mem_copy (void *, const void *, NvU32); +NV_STATUS NV_API_CALL os_memcpy_from_user (void *, const void *, NvU32); +NV_STATUS NV_API_CALL os_memcpy_to_user (void *, const void *, NvU32); +void* NV_API_CALL os_mem_set (void *, NvU8, NvU32); +NvS32 NV_API_CALL os_mem_cmp (const NvU8 *, const NvU8 *, NvU32); +void* NV_API_CALL os_pci_init_handle (NvU32, NvU8, NvU8, NvU8, NvU16 *, NvU16 *); +NV_STATUS NV_API_CALL os_pci_read_byte (void *, NvU32, NvU8 *); +NV_STATUS NV_API_CALL os_pci_read_word (void *, NvU32, NvU16 *); +NV_STATUS NV_API_CALL os_pci_read_dword (void *, NvU32, NvU32 *); +NV_STATUS NV_API_CALL os_pci_write_byte (void *, NvU32, NvU8); +NV_STATUS NV_API_CALL os_pci_write_word (void *, NvU32, NvU16); +NV_STATUS NV_API_CALL os_pci_write_dword (void *, NvU32, NvU32); +NvBool NV_API_CALL os_pci_remove_supported (void); +void NV_API_CALL os_pci_remove (void *); +void* NV_API_CALL os_map_kernel_space (NvU64, NvU64, NvU32); +void NV_API_CALL os_unmap_kernel_space (void *, NvU64); +NV_STATUS NV_API_CALL os_flush_cpu_cache_all (void); +NV_STATUS NV_API_CALL os_flush_user_cache (void); +void NV_API_CALL os_flush_cpu_write_combine_buffer(void); +NvU8 NV_API_CALL os_io_read_byte (NvU32); +NvU16 NV_API_CALL os_io_read_word (NvU32); +NvU32 NV_API_CALL os_io_read_dword (NvU32); +void NV_API_CALL os_io_write_byte (NvU32, NvU8); +void NV_API_CALL os_io_write_word (NvU32, NvU16); +void NV_API_CALL os_io_write_dword (NvU32, NvU32); +NvBool NV_API_CALL os_is_administrator (void); +NvBool NV_API_CALL os_check_access (RsAccessRight accessRight); +void NV_API_CALL os_dbg_init (void); +void NV_API_CALL os_dbg_breakpoint (void); +void NV_API_CALL os_dbg_set_level (NvU32); +NvU32 NV_API_CALL os_get_cpu_count (void); +NvU32 NV_API_CALL os_get_cpu_number (void); +void NV_API_CALL os_disable_console_access (void); +void NV_API_CALL os_enable_console_access (void); +NV_STATUS NV_API_CALL os_registry_init (void); +NvU64 NV_API_CALL os_get_max_user_va (void); +NV_STATUS NV_API_CALL os_schedule (void); +NV_STATUS NV_API_CALL os_alloc_spinlock (void **); +void NV_API_CALL os_free_spinlock (void *); +NvU64 NV_API_CALL os_acquire_spinlock (void *); +void NV_API_CALL os_release_spinlock (void *, NvU64); +NV_STATUS NV_API_CALL os_queue_work_item (struct os_work_queue *, void *); +NV_STATUS NV_API_CALL os_flush_work_queue (struct os_work_queue *, NvBool); +NvBool NV_API_CALL os_is_queue_flush_ongoing (struct os_work_queue *); +NV_STATUS NV_API_CALL os_alloc_mutex (void **); +void NV_API_CALL os_free_mutex (void *); +NV_STATUS NV_API_CALL os_acquire_mutex (void *); +NV_STATUS NV_API_CALL os_cond_acquire_mutex (void *); +void NV_API_CALL os_release_mutex (void *); +void* NV_API_CALL os_alloc_semaphore (NvU32); +void NV_API_CALL os_free_semaphore (void *); +NV_STATUS NV_API_CALL os_acquire_semaphore (void *); +NV_STATUS NV_API_CALL os_cond_acquire_semaphore (void *); +NV_STATUS NV_API_CALL os_release_semaphore (void *); +void* NV_API_CALL os_alloc_rwlock (void); +void NV_API_CALL os_free_rwlock (void *); +NV_STATUS NV_API_CALL os_acquire_rwlock_read (void *); +NV_STATUS NV_API_CALL os_acquire_rwlock_write (void *); +NV_STATUS NV_API_CALL os_cond_acquire_rwlock_read (void *); +NV_STATUS NV_API_CALL os_cond_acquire_rwlock_write (void *); +void NV_API_CALL os_release_rwlock_read (void *); +void NV_API_CALL os_release_rwlock_write (void *); +NvBool NV_API_CALL os_semaphore_may_sleep (void); +NV_STATUS NV_API_CALL os_get_version_info (os_version_info*); +NV_STATUS NV_API_CALL os_get_is_openrm (NvBool *); +NvBool NV_API_CALL os_is_isr (void); +NvBool NV_API_CALL os_pat_supported (void); +void NV_API_CALL os_dump_stack (void); +NvBool NV_API_CALL os_is_efi_enabled (void); +NvBool NV_API_CALL os_is_xen_dom0 (void); +NvBool NV_API_CALL os_is_vgx_hyper (void); +NV_STATUS NV_API_CALL os_inject_vgx_msi (NvU16, NvU64, NvU32); +NvBool NV_API_CALL os_is_grid_supported (void); +NvU32 NV_API_CALL os_get_grid_csp_support (void); +void NV_API_CALL os_bug_check (NvU32, const char *); +NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32); +NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **); +NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *, NvU32); +NV_STATUS NV_API_CALL os_match_mmap_offset (void *, NvU64, NvU64 *); +NV_STATUS NV_API_CALL os_get_euid (NvU32 *); +NV_STATUS NV_API_CALL os_get_smbios_header (NvU64 *pSmbsAddr); +NV_STATUS NV_API_CALL os_get_acpi_rsdp_from_uefi (NvU32 *); +void NV_API_CALL os_add_record_for_crashLog (void *, NvU32); +void NV_API_CALL os_delete_record_for_crashLog (void *); +NV_STATUS NV_API_CALL os_call_vgpu_vfio (void *, NvU32); +NV_STATUS NV_API_CALL os_device_vm_present (void); +NV_STATUS NV_API_CALL os_numa_memblock_size (NvU64 *); +NV_STATUS NV_API_CALL os_alloc_pages_node (NvS32, NvU32, NvU32, NvU64 *); +NV_STATUS NV_API_CALL os_get_page (NvU64 address); +NV_STATUS NV_API_CALL os_put_page (NvU64 address); +NvU32 NV_API_CALL os_get_page_refcount (NvU64 address); +NvU32 NV_API_CALL os_count_tail_pages (NvU64 address); +void NV_API_CALL os_free_pages_phys (NvU64, NvU32); +NV_STATUS NV_API_CALL os_open_temporary_file (void **); +void NV_API_CALL os_close_file (void *); +NV_STATUS NV_API_CALL os_write_file (void *, NvU8 *, NvU64, NvU64); +NV_STATUS NV_API_CALL os_read_file (void *, NvU8 *, NvU64, NvU64); +NV_STATUS NV_API_CALL os_open_readonly_file (const char *, void **); +NV_STATUS NV_API_CALL os_open_and_read_file (const char *, NvU8 *, NvU64); +NvBool NV_API_CALL os_is_nvswitch_present (void); +NV_STATUS NV_API_CALL os_get_random_bytes (NvU8 *, NvU16); +NV_STATUS NV_API_CALL os_alloc_wait_queue (os_wait_queue **); +void NV_API_CALL os_free_wait_queue (os_wait_queue *); +void NV_API_CALL os_wait_uninterruptible (os_wait_queue *); +void NV_API_CALL os_wait_interruptible (os_wait_queue *); +void NV_API_CALL os_wake_up (os_wait_queue *); +nv_cap_t* NV_API_CALL os_nv_cap_init (const char *); +nv_cap_t* NV_API_CALL os_nv_cap_create_dir_entry (nv_cap_t *, const char *, int); +nv_cap_t* NV_API_CALL os_nv_cap_create_file_entry (nv_cap_t *, const char *, int); +void NV_API_CALL os_nv_cap_destroy_entry (nv_cap_t *); +int NV_API_CALL os_nv_cap_validate_and_dup_fd (const nv_cap_t *, int); +void NV_API_CALL os_nv_cap_close_fd (int); +NvS32 NV_API_CALL os_imex_channel_get (NvU64); +NvS32 NV_API_CALL os_imex_channel_count (void); +NV_STATUS NV_API_CALL os_tegra_igpu_perf_boost (void *, NvBool, NvU32); + +NV_STATUS NV_API_CALL os_get_tegra_platform (NvU32 *); +enum os_pci_req_atomics_type { + OS_INTF_PCIE_REQ_ATOMICS_32BIT, + OS_INTF_PCIE_REQ_ATOMICS_64BIT, + OS_INTF_PCIE_REQ_ATOMICS_128BIT +}; +NV_STATUS NV_API_CALL os_enable_pci_req_atomics (void *, enum os_pci_req_atomics_type); +void NV_API_CALL os_pci_trigger_flr(void *handle); +NV_STATUS NV_API_CALL os_get_numa_node_memory_usage (NvS32, NvU64 *, NvU64 *); +NV_STATUS NV_API_CALL os_numa_add_gpu_memory (void *, NvU64, NvU64, NvU32 *); +NV_STATUS NV_API_CALL os_numa_remove_gpu_memory (void *, NvU64, NvU64, NvU32); +NV_STATUS NV_API_CALL os_offline_page_at_address(NvU64 address); +void* NV_API_CALL os_get_pid_info(void); +void NV_API_CALL os_put_pid_info(void *pid_info); +NV_STATUS NV_API_CALL os_find_ns_pid(void *pid_info, NvU32 *ns_pid); +NvBool NV_API_CALL os_is_init_ns(void); +NV_STATUS NV_API_CALL os_iommu_sva_bind(void *arg, void **handle, NvU32 *pasid); +void NV_API_CALL os_iommu_sva_unbind(void *handle); + +extern NvU64 os_page_size; +extern NvU64 os_max_page_size; +extern NvU64 os_page_mask; +extern NvU8 os_page_shift; +extern NvBool os_cc_enabled; +extern NvBool os_cc_sev_snp_enabled; +extern NvBool os_cc_sme_enabled; +extern NvBool os_cc_snp_vtom_enabled; +extern NvBool os_cc_tdx_enabled; +extern NvBool os_dma_buf_enabled; +extern NvBool os_imex_channel_is_supported; + +/* + * --------------------------------------------------------------------------- + * + * Debug macros. + * + * --------------------------------------------------------------------------- + */ + +#define NV_DBG_INFO 0x0 +#define NV_DBG_SETUP 0x1 +#define NV_DBG_USERERRORS 0x2 +#define NV_DBG_WARNINGS 0x3 +#define NV_DBG_ERRORS 0x4 + + +void NV_API_CALL out_string(const char *str); +int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...); + +#define NV_DEV_PRINTF(debuglevel, nv, format, ... ) \ + nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format, NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__) + +#define NV_DEV_PRINTF_STATUS(debuglevel, nv, status, format, ... ) \ + nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format " (0x%x)\n", NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__, status) + +/* + * Fields for os_lock_user_pages flags parameter + */ +#define NV_LOCK_USER_PAGES_FLAGS_WRITE 0:0 +#define NV_LOCK_USER_PAGES_FLAGS_WRITE_NO 0x00000000 +#define NV_LOCK_USER_PAGES_FLAGS_WRITE_YES 0x00000001 + +// NV OS Tegra platform type defines +#define NV_OS_TEGRA_PLATFORM_SIM 0 +#define NV_OS_TEGRA_PLATFORM_FPGA 1 +#define NV_OS_TEGRA_PLATFORM_SILICON 2 + +#endif /* OS_INTERFACE_H */ diff --git a/src/nvidia/arch/nvalloc/unix/include/os_custom.h b/src/nvidia/arch/nvalloc/unix/include/os_custom.h new file mode 100644 index 0000000..1effaf7 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/os_custom.h @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _OS_CUSTOM_H_ +#define _OS_CUSTOM_H_ + +/*! + * @file os_custom.h + * @brief OS module specific definitions for this OS + */ + +#include + +// File modes, added for NVIDIA capabilities. +#define OS_RUSR 00400 // read permission, owner +#define OS_WUSR 00200 // write permission, owner +#define OS_XUSR 00100 // execute/search permission, owner +#define OS_RWXU (OS_RUSR | OS_WUSR | OS_XUSR) // read, write, execute/search, owner +#define OS_RGRP 00040 // read permission, group +#define OS_WGRP 00020 // write permission, group +#define OS_XGRP 00010 // execute/search permission, group +#define OS_RWXG (OS_RGRP | OS_WGRP | OS_XGRP) // read, write, execute/search, group +#define OS_ROTH 00004 // read permission, other +#define OS_WOTH 00002 // write permission, other +#define OS_XOTH 00001 // execute/search permission, other +#define OS_RWXO (OS_ROTH | OS_WOTH | OS_XOTH) // read, write, execute/search, other +#define OS_RUGO (OS_RUSR | OS_RGRP | OS_ROTH) +#define OS_WUGO (OS_WUSR | OS_WGRP | OS_WOTH) +#define OS_XUGO (OS_XUSR | OS_XGRP | OS_XOTH) + +// Trigger for collecting GPU state for later extraction. +void RmLogGpuCrash(OBJGPU *); + +// This is callback function in the miniport. +// The argument is a device extension, and must be cast as such to be useful. +typedef void (*MINIPORT_CALLBACK)(void*); + +NV_STATUS osPackageRegistry(OBJGPU *pGpu, PACKED_REGISTRY_TABLE *, NvU32 *); + +#endif // _OS_CUSTOM_H_ diff --git a/src/nvidia/arch/nvalloc/unix/include/osapi.h b/src/nvidia/arch/nvalloc/unix/include/osapi.h new file mode 100644 index 0000000..9e38e81 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/osapi.h @@ -0,0 +1,189 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _OSAPI_H_ +#define _OSAPI_H_ + +#include "core/system.h" +#include "gpu/gpu.h" + +#include // NV_DBG_ERRORS +#include +#include + +#if defined(__use_altstack__) +#if defined(QA_BUILD) +//--------------------------------------------------------------------------- +// +// 32 bit debug marker values. +// +//--------------------------------------------------------------------------- + +#define NV_MARKER1 (NvU32)(('M' << 24) | ('R' << 16) | ('V' << 8) | 'N') +#define NV_MARKER2 (NvU32)(('N' << 24) | ('V' << 16) | ('R' << 8) | 'M') + +// +// The two macros below implement a simple alternate stack usage sanity +// check for QA_BUILD RM builds. NV_ALTSTACK_WRITE_MARKERS() fills +// altstacks with NV_MARKER1, which enables NV_ALTSTACK_CHECK_MARKERS() +// to determine the stack usage fairly reliably by looking for the +// first clobbered marker. If more than 7/8 of the alternate stack were +// used, NV_ALTSTACK_CHECK_MARKERS() prints an error and asserts. +// +#define NV_ALTSTACK_WRITE_MARKERS(sp) \ +{ \ + NvU32 i, *stack = (void *)(sp)->stack; \ + for (i = 0; i < ((sp)->size / sizeof(NvU32)); i++) \ + stack[i] = NV_MARKER1; \ +} + +#define NV_ALTSTACK_CHECK_MARKERS(sp) \ +{ \ + NvU32 i, *stack = (void *)(sp)->stack; \ + for (i = 0; i < ((sp)->size / sizeof(NvU32)); i++) \ + { \ + if (stack[i] != NV_MARKER1) \ + break; \ + } \ + if ((i * sizeof(NvU32)) < ((sp)->size / 8)) \ + { \ + nv_printf(NV_DBG_ERRORS, "NVRM: altstack: used %d of %d bytes!\n", \ + ((sp)->size - (i * (NvU32)sizeof(NvU32))), (sp)->size); \ + NV_ASSERT_PRECOMP((i * sizeof(NvU32)) >= ((sp)->size / 8)); \ + } \ +} +#else +#define NV_ALTSTACK_WRITE_MARKERS(sp) +#define NV_ALTSTACK_CHECK_MARKERS(sp) +#endif +#if defined(NVCPU_X86_64) +#define NV_ENTER_RM_RUNTIME(sp,fp) \ +{ \ + NV_ALTSTACK_WRITE_MARKERS(sp); \ + __asm__ __volatile__ ("movq %%rbp,%0" : "=r" (fp)); /* save %rbp */ \ + __asm__ __volatile__ ("movq %0,%%rbp" :: "r" ((sp)->top)); \ +} + +#define NV_EXIT_RM_RUNTIME(sp,fp) \ +{ \ + register void *__rbp __asm__ ("rbp"); \ + if (__rbp != (sp)->top) \ + { \ + nv_printf(NV_DBG_ERRORS, "NVRM: detected corrupted runtime stack!\n"); \ + NV_ASSERT_PRECOMP(__rbp == (sp)->top); \ + } \ + NV_ALTSTACK_CHECK_MARKERS(sp); \ + __asm__ __volatile__ ("movq %0,%%rbp" :: "r" (fp)); /* restore %rbp */ \ +} +#else +#error "gcc \"altstacks\" support is not implemented on this platform!" +#endif +#else +#define NV_ENTER_RM_RUNTIME(sp,fp) { (void)sp; (void)fp; } +#define NV_EXIT_RM_RUNTIME(sp,fp) +#endif + +void RmShutdownRm (void); + +NvBool RmInitPrivateState (nv_state_t *); +void RmFreePrivateState (nv_state_t *); + +NvBool RmInitAdapter (nv_state_t *); +NvBool RmPartiallyInitAdapter (nv_state_t *); +void RmShutdownAdapter (nv_state_t *); +void RmDisableAdapter (nv_state_t *); +void RmPartiallyDisableAdapter(nv_state_t *); +NV_STATUS RmGetAdapterStatus (nv_state_t *, NvU32 *); +NV_STATUS RmExcludeAdapter (nv_state_t *); + +NvBool RmGpuHasIOSpaceEnabled (nv_state_t *); + +void RmFreeUnusedClients (nv_state_t *, nv_file_private_t *); +NV_STATUS RmIoctl (nv_state_t *, nv_file_private_t *, NvU32, void *, NvU32); + +void RmI2cAddGpuPorts(nv_state_t *); + +NV_STATUS RmInitX86EmuState(OBJGPU *); +void RmFreeX86EmuState(OBJGPU *); +NV_STATUS RmPowerSourceChangeEvent(nv_state_t *, NvU32); + +void RmRequestDNotifierState(nv_state_t *); + +const NvU8 *RmGetGpuUuidRaw(nv_state_t *); + +NV_STATUS nv_vbios_call(OBJGPU *, NvU32 *, NvU32 *); + +int amd_adv_spec_cache_feature(OBJOS *); +int amd_msr_c0011022_incompatible(OBJOS *); + +NV_STATUS rm_get_adapter_status (nv_state_t *, NvU32 *); + +void rm_client_free_os_events (NvHandle); + +NV_STATUS rm_create_mmap_context (NvHandle, NvHandle, NvHandle, NvP64, NvU64, NvU64, NvU32, NvU32); +NV_STATUS rm_update_device_mapping_info (NvHandle, NvHandle, NvHandle, void *, void *); + +NV_STATUS rm_access_registry (NvHandle, NvHandle, NvU32, NvP64, NvU32, NvP64, NvU32, NvP64, NvU32 *, NvU32 *, NvU32 *); + +// registry management +NV_STATUS RmInitRegistry (void); +NV_STATUS RmDestroyRegistry (nv_state_t *); + +NV_STATUS RmWriteRegistryDword (nv_state_t *, const char *, NvU32 ); +NV_STATUS RmReadRegistryDword (nv_state_t *, const char *, NvU32 *); +NV_STATUS RmWriteRegistryString (nv_state_t *, const char *, const char *, NvU32); +NV_STATUS RmReadRegistryBinary (nv_state_t *, const char *, NvU8 *, NvU32 *); +NV_STATUS RmWriteRegistryBinary (nv_state_t *, const char *, NvU8 *, NvU32); +NV_STATUS RmReadRegistryString (nv_state_t *, const char *, NvU8 *, NvU32 *); + +NV_STATUS RmPackageRegistry (nv_state_t *, PACKED_REGISTRY_TABLE *, NvU32 *); + +NvBool RmIsNvifFunctionSupported(NvU32, NvU32); +void RmInitAcpiMethods (OBJOS *, OBJSYS *, OBJGPU *); +void RmUnInitAcpiMethods (OBJSYS *); + +void RmInflateOsToRmPageArray (RmPhysAddr *, NvU64); +void RmDeflateRmToOsPageArray (RmPhysAddr *, NvU64); + +void RmInitPowerManagement (nv_state_t *); +void RmDestroyPowerManagement (nv_state_t *); + +NV_STATUS RmPowerManagementTegra (OBJGPU *pGpu, nv_pm_action_t pmAction); + +NV_STATUS os_ref_dynamic_power (nv_state_t *, nv_dynamic_power_mode_t); +void os_unref_dynamic_power (nv_state_t *, nv_dynamic_power_mode_t); +void RmHandleDisplayChange (nvidia_stack_t *, nv_state_t *); +void RmUpdateGc6ConsoleRefCount (nv_state_t *, NvBool); + +NvBool rm_get_uefi_console_status (nv_state_t *); +NvU64 rm_get_uefi_console_size (nv_state_t *, NvU64 *); + +RM_API *RmUnixRmApiPrologue (nv_state_t *, THREAD_STATE_NODE *, NvU32 module); +void RmUnixRmApiEpilogue (nv_state_t *, THREAD_STATE_NODE *); + +static inline NvBool rm_is_system_notebook(void) +{ + return (nv_is_chassis_notebook() || nv_acpi_is_battery_present()); +} + +#endif // _OSAPI_H_ diff --git a/src/nvidia/arch/nvalloc/unix/include/rmobjexportimport.h b/src/nvidia/arch/nvalloc/unix/include/rmobjexportimport.h new file mode 100644 index 0000000..a070c4e --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/rmobjexportimport.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _RMOBJEXPORTIMPORT_H_ +#define _RMOBJEXPORTIMPORT_H_ + +#include "nvstatus.h" + +typedef NvHandle RmObjExportHandle; + +NV_STATUS RmExportObject(NvHandle hSrcClient, NvHandle hSrcObject, + RmObjExportHandle *pDstObject, NvU32 *pDeviceInstance); + +void RmFreeObjExportHandle(RmObjExportHandle hObject); + +NV_STATUS RmImportObject(NvHandle hDstClient, NvHandle hDstParent, + NvHandle *phDstObject, RmObjExportHandle hSrcObject, + NvU8 *pObjectType); +#endif // _RMOBJEXPORTIMPORT_H_ + diff --git a/src/nvidia/arch/nvalloc/unix/src/escape.c b/src/nvidia/arch/nvalloc/unix/src/escape.c new file mode 100644 index 0000000..3de8b28 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/escape.c @@ -0,0 +1,869 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + + +//***************************** Module Header ********************************** +// +// This code is linked into the resource manager proper. It receives the +// ioctl from the resource manager's customer, unbundles the args and +// calls the correct resman routines. +// +//****************************************************************************** + +#include +#include +#include +#include +#include +#include +#include + +#include +#include // NV01_ROOT +#include // NV01_ROOT_NON_PRIV +#include // NV01_EVENT +#include // NV01_MEMORY_SYSTEM +#include // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR + +#include "rmapi/client_resource.h" +#include "nvlog/nvlog.h" +#include + +#define NV_CTL_DEVICE_ONLY(nv) \ +{ \ + if (((nv)->flags & NV_FLAG_CONTROL) == 0) \ + { \ + rmStatus = NV_ERR_INVALID_ARGUMENT; \ + goto done; \ + } \ +} + +#define NV_ACTUAL_DEVICE_ONLY(nv) \ +{ \ + if (((nv)->flags & NV_FLAG_CONTROL) != 0) \ + { \ + rmStatus = NV_ERR_INVALID_ARGUMENT; \ + goto done; \ + } \ +} + +static NV_STATUS RmGetDeviceFd(NVOS54_PARAMETERS *pApi, NvS32 *pFd, + NvBool *pSkipDeviceRef) +{ + RMAPI_PARAM_COPY paramCopy; + void *pKernelParams; + NvU32 paramSize; + NV_STATUS status; + + *pFd = -1; + *pSkipDeviceRef = NV_TRUE; + + switch(pApi->cmd) + { + default: + return NV_OK; + } + + RMAPI_PARAM_COPY_INIT(paramCopy, pKernelParams, pApi->params, paramSize, 1); + + status = rmapiParamsAcquire(¶mCopy, NV_TRUE); + if (status != NV_OK) + return status; + + switch(pApi->cmd) + { + default: + NV_ASSERT(0); + break; + } + + NV_ASSERT(rmapiParamsRelease(¶mCopy) == NV_OK); + + return status; +} + +// Only return errors through pApi->status +static void RmCreateOsDescriptor(NVOS32_PARAMETERS *pApi, API_SECURITY_INFO secInfo) +{ + NV_STATUS rmStatus; + NvBool writable; + NvU32 flags = 0; + NvU64 allocSize, pageCount, *pPteArray = NULL; + void *pDescriptor, *pPageArray = NULL; + + pDescriptor = NvP64_VALUE(pApi->data.AllocOsDesc.descriptor); + if (((NvUPtr)pDescriptor & ~os_page_mask) != 0) + { + rmStatus = NV_ERR_NOT_SUPPORTED; + goto done; + } + + // Check to prevent an NvU64 overflow + if ((pApi->data.AllocOsDesc.limit + 1) == 0) + { + rmStatus = NV_ERR_INVALID_LIMIT; + goto done; + } + + allocSize = (pApi->data.AllocOsDesc.limit + 1); + pageCount = (1 + ((allocSize - 1) / os_page_size)); + + writable = FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_WRITE, pApi->data.AllocOsDesc.attr2); + + flags = FLD_SET_DRF_NUM(_LOCK_USER_PAGES, _FLAGS, _WRITE, writable, flags); + rmStatus = os_lock_user_pages(pDescriptor, pageCount, &pPageArray, flags); + if (rmStatus == NV_OK) + { + pApi->data.AllocOsDesc.descriptor = (NvP64)(NvUPtr)pPageArray; + pApi->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY; + } + else if (rmStatus == NV_ERR_INVALID_ADDRESS) + { + rmStatus = os_lookup_user_io_memory(pDescriptor, pageCount, &pPteArray); + if (rmStatus == NV_OK) + { + pApi->data.AllocOsDesc.descriptor = (NvP64)(NvUPtr)pPteArray; + pApi->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_IO_MEMORY; + } + } + if (rmStatus != NV_OK) + goto done; + + Nv04VidHeapControlWithSecInfo(pApi, secInfo); + + if (pApi->status != NV_OK) + { + switch (pApi->data.AllocOsDesc.descriptorType) + { + default: + break; + case NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY: + os_unlock_user_pages(pageCount, pPageArray, flags); + break; + } + } + +done: + if (rmStatus != NV_OK) + pApi->status = rmStatus; +} + +// Only return errors through pApi->status +static void RmAllocOsDescriptor(NVOS02_PARAMETERS *pApi, API_SECURITY_INFO secInfo) +{ + NV_STATUS rmStatus = NV_OK; + NvU32 flags, attr, attr2; + NVOS32_PARAMETERS *pVidHeapParams; + + if (!FLD_TEST_DRF(OS02, _FLAGS, _LOCATION, _PCI, pApi->flags) || + !FLD_TEST_DRF(OS02, _FLAGS, _MAPPING, _NO_MAP, pApi->flags)) + { + rmStatus = NV_ERR_INVALID_FLAGS; + goto done; + } + + attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI); + + if (FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _CACHED, pApi->flags) || + FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_BACK, pApi->flags)) + { + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_BACK, attr); + } + else if (FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _UNCACHED, pApi->flags)) + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _UNCACHED, attr); + else { + rmStatus = NV_ERR_INVALID_FLAGS; + goto done; + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, pApi->flags)) + attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS, attr); + else + attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS, attr); + + if (FLD_TEST_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _YES, pApi->flags)) + attr2 = DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _YES); + else + attr2 = DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _NO); + + if (FLD_TEST_DRF(OS02, _FLAGS, _MEMORY_PROTECTION, _UNPROTECTED, pApi->flags)) + attr2 = FLD_SET_DRF(OS32, _ATTR2, _MEMORY_PROTECTION, _UNPROTECTED, attr2); + + pVidHeapParams = portMemAllocNonPaged(sizeof(NVOS32_PARAMETERS)); + if (pVidHeapParams == NULL) + { + rmStatus = NV_ERR_NO_MEMORY; + goto done; + } + portMemSet(pVidHeapParams, 0, sizeof(NVOS32_PARAMETERS)); + + pVidHeapParams->hRoot = pApi->hRoot; + pVidHeapParams->hObjectParent = pApi->hObjectParent; + pVidHeapParams->function = NVOS32_FUNCTION_ALLOC_OS_DESCRIPTOR; + + flags = (NVOS32_ALLOC_FLAGS_MEMORY_HANDLE_PROVIDED | + NVOS32_ALLOC_FLAGS_MAP_NOT_REQUIRED); + + if (DRF_VAL(OS02, _FLAGS, _ALLOC_USER_READ_ONLY, pApi->flags)) + attr2 = FLD_SET_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_ONLY, attr2); + + // Currently CPU-RO memory implies GPU-RO as well + if (DRF_VAL(OS02, _FLAGS, _ALLOC_DEVICE_READ_ONLY, pApi->flags) || + DRF_VAL(OS02, _FLAGS, _ALLOC_USER_READ_ONLY, pApi->flags)) + attr2 = FLD_SET_DRF(OS32, _ATTR2, _PROTECTION_DEVICE, _READ_ONLY, attr2); + + pVidHeapParams->data.AllocOsDesc.hMemory = pApi->hObjectNew; + pVidHeapParams->data.AllocOsDesc.flags = flags; + pVidHeapParams->data.AllocOsDesc.attr = attr; + pVidHeapParams->data.AllocOsDesc.attr2 = attr2; + pVidHeapParams->data.AllocOsDesc.descriptor = pApi->pMemory; + pVidHeapParams->data.AllocOsDesc.limit = pApi->limit; + pVidHeapParams->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_VIRTUAL_ADDRESS; + + RmCreateOsDescriptor(pVidHeapParams, secInfo); + + pApi->status = pVidHeapParams->status; + + portMemFree(pVidHeapParams); + +done: + if (rmStatus != NV_OK) + pApi->status = rmStatus; +} + +ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hRoot) == NV_OFFSETOF(NVOS64_PARAMETERS, hRoot)); +ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hObjectParent) == NV_OFFSETOF(NVOS64_PARAMETERS, hObjectParent)); +ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hObjectNew) == NV_OFFSETOF(NVOS64_PARAMETERS, hObjectNew)); +ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hClass) == NV_OFFSETOF(NVOS64_PARAMETERS, hClass)); +ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, pAllocParms) == NV_OFFSETOF(NVOS64_PARAMETERS, pAllocParms)); + +NV_STATUS RmIoctl( + nv_state_t *nv, + nv_file_private_t *nvfp, + NvU32 cmd, + void *data, + NvU32 dataSize +) +{ + NV_STATUS rmStatus = NV_ERR_GENERIC; + API_SECURITY_INFO secInfo = { }; + + secInfo.privLevel = osIsAdministrator() ? RS_PRIV_LEVEL_USER_ROOT : RS_PRIV_LEVEL_USER; + secInfo.paramLocation = PARAM_LOCATION_USER; + secInfo.pProcessToken = NULL; + secInfo.gpuOsInfo = NULL; + secInfo.clientOSInfo = nvfp->ctl_nvfp; + if (secInfo.clientOSInfo == NULL) + secInfo.clientOSInfo = nvfp; + + switch (cmd) + { + case NV_ESC_RM_ALLOC_MEMORY: + { + nv_ioctl_nvos02_parameters_with_fd *pApi; + NVOS02_PARAMETERS *pParms; + + if (dataSize != sizeof(*pApi)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + pApi = data; + pParms = &pApi->params; + + NV_ACTUAL_DEVICE_ONLY(nv); + + if (pParms->hClass == NV01_MEMORY_SYSTEM_OS_DESCRIPTOR) + RmAllocOsDescriptor(pParms, secInfo); + else + { + NvU32 flags = pParms->flags; + + Nv01AllocMemoryWithSecInfo(pParms, secInfo); + + // + // If the system memory is going to be mapped immediately, + // create the mmap context for it now. + // + if ((pParms->hClass == NV01_MEMORY_SYSTEM) && + (!FLD_TEST_DRF(OS02, _FLAGS, _ALLOC, _NONE, flags)) && + (!FLD_TEST_DRF(OS02, _FLAGS, _MAPPING, _NO_MAP, flags)) && + (pParms->status == NV_OK)) + { + if (rm_create_mmap_context(pParms->hRoot, + pParms->hObjectParent, pParms->hObjectNew, + pParms->pMemory, pParms->limit + 1, 0, + NV_MEMORY_DEFAULT, + pApi->fd) != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "could not create mmap context for %p\n", + NvP64_VALUE(pParms->pMemory)); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + } + } + + break; + } + + case NV_ESC_RM_ALLOC_OBJECT: + { + NVOS05_PARAMETERS *pApi = data; + + if (dataSize != sizeof(*pApi)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + NV_CTL_DEVICE_ONLY(nv); + + Nv01AllocObjectWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_ALLOC: + { + NVOS21_PARAMETERS *pApi = data; + NVOS64_PARAMETERS *pApiAccess = data; + NvBool bAccessApi = (dataSize == sizeof(NVOS64_PARAMETERS)); + + if ((dataSize != sizeof(*pApi)) && + (dataSize != sizeof(*pApiAccess))) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + switch (pApi->hClass) + { + case NV01_ROOT: + case NV01_ROOT_CLIENT: + case NV01_ROOT_NON_PRIV: + { + NV_CTL_DEVICE_ONLY(nv); + + // Force userspace client allocations to be the _CLIENT class. + pApi->hClass = NV01_ROOT_CLIENT; + break; + } + case NV01_EVENT: + case NV01_EVENT_OS_EVENT: + case NV01_EVENT_KERNEL_CALLBACK: + case NV01_EVENT_KERNEL_CALLBACK_EX: + { + break; + } + default: + { + NV_CTL_DEVICE_ONLY(nv); + break; + } + } + + if (!bAccessApi) + { + Nv04AllocWithSecInfo(pApi, secInfo); + } + else + { + Nv04AllocWithAccessSecInfo(pApiAccess, secInfo); + } + + break; + } + + case NV_ESC_RM_FREE: + { + NVOS00_PARAMETERS *pApi = data; + + if (dataSize != sizeof(*pApi)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + NV_CTL_DEVICE_ONLY(nv); + + Nv01FreeWithSecInfo(pApi, secInfo); + + if (pApi->status == NV_OK && + pApi->hObjectOld == pApi->hRoot) + { + rm_client_free_os_events(pApi->hRoot); + } + + break; + } + + case NV_ESC_RM_VID_HEAP_CONTROL: + { + NVOS32_PARAMETERS *pApi = data; + + if (dataSize != sizeof(*pApi)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + NV_CTL_DEVICE_ONLY(nv); + + if (pApi->function == NVOS32_FUNCTION_ALLOC_OS_DESCRIPTOR) + RmCreateOsDescriptor(pApi, secInfo); + else + Nv04VidHeapControlWithSecInfo(pApi, secInfo); + + break; + } + + case NV_ESC_RM_I2C_ACCESS: + { + NVOS_I2C_ACCESS_PARAMS *pApi = data; + + if (dataSize != sizeof(*pApi)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + NV_ACTUAL_DEVICE_ONLY(nv); + + Nv04I2CAccessWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_IDLE_CHANNELS: + { + NVOS30_PARAMETERS *pApi = data; + + if (dataSize != sizeof(*pApi)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + NV_CTL_DEVICE_ONLY(nv); + + Nv04IdleChannelsWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_MAP_MEMORY: + { + nv_ioctl_nvos33_parameters_with_fd *pApi; + NVOS33_PARAMETERS *pParms; + + if (dataSize != sizeof(*pApi)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + pApi = data; + pParms = &pApi->params; + + NV_CTL_DEVICE_ONLY(nv); + + // Don't allow userspace to override the caching type + pParms->flags = FLD_SET_DRF(OS33, _FLAGS, _CACHING_TYPE, _DEFAULT, pParms->flags); + Nv04MapMemoryWithSecInfo(pParms, secInfo); + + if (pParms->status == NV_OK) + { + pParms->status = rm_create_mmap_context(pParms->hClient, + pParms->hDevice, pParms->hMemory, + pParms->pLinearAddress, pParms->length, + pParms->offset, + DRF_VAL(OS33, _FLAGS, _CACHING_TYPE, pParms->flags), + pApi->fd); + if (pParms->status != NV_OK) + { + NVOS34_PARAMETERS params; + portMemSet(¶ms, 0, sizeof(params)); + params.hClient = pParms->hClient; + params.hDevice = pParms->hDevice; + params.hMemory = pParms->hMemory; + params.pLinearAddress = pParms->pLinearAddress; + params.flags = pParms->flags; + Nv04UnmapMemoryWithSecInfo(¶ms, secInfo); + } + } + break; + } + + case NV_ESC_RM_UNMAP_MEMORY: + { + NVOS34_PARAMETERS *pApi = data; + + if (dataSize != sizeof(*pApi)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + NV_CTL_DEVICE_ONLY(nv); + + Nv04UnmapMemoryWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_ACCESS_REGISTRY: + { + NVOS38_PARAMETERS *pApi = data; + + if (dataSize != sizeof(*pApi)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + NV_CTL_DEVICE_ONLY(nv); + + pApi->status = rm_access_registry(pApi->hClient, + pApi->hObject, + pApi->AccessType, + pApi->pDevNode, + pApi->DevNodeLength, + pApi->pParmStr, + pApi->ParmStrLength, + pApi->pBinaryData, + &pApi->BinaryDataLength, + &pApi->Data, + &pApi->Entry); + break; + } + + case NV_ESC_RM_ALLOC_CONTEXT_DMA2: + { + NVOS39_PARAMETERS *pApi = data; + + if (dataSize != sizeof(*pApi)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + NV_CTL_DEVICE_ONLY(nv); + + Nv04AllocContextDmaWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_BIND_CONTEXT_DMA: + { + NVOS49_PARAMETERS *pApi = data; + + if (dataSize != sizeof(*pApi)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + NV_CTL_DEVICE_ONLY(nv); + + Nv04BindContextDmaWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_MAP_MEMORY_DMA: + { + NVOS46_PARAMETERS *pApi = data; + + if (dataSize != sizeof(*pApi)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + NV_CTL_DEVICE_ONLY(nv); + + Nv04MapMemoryDmaWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_UNMAP_MEMORY_DMA: + { + NVOS47_PARAMETERS *pApi = data; + + if (dataSize != sizeof(*pApi)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + NV_CTL_DEVICE_ONLY(nv); + + Nv04UnmapMemoryDmaWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_DUP_OBJECT: + { + NVOS55_PARAMETERS *pApi = data; + + if (dataSize != sizeof(*pApi)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + NV_CTL_DEVICE_ONLY(nv); + + Nv04DupObjectWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_SHARE: + { + NVOS57_PARAMETERS *pApi = data; + + if (dataSize != sizeof(*pApi)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + NV_CTL_DEVICE_ONLY(nv); + + Nv04ShareWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_STATUS_CODE: + { + nv_state_t *pNv; + nv_ioctl_status_code_t *pApi = data; + + if (dataSize != sizeof(*pApi)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + NV_CTL_DEVICE_ONLY(nv); + + pNv = nv_get_adapter_state(pApi->domain, pApi->bus, pApi->slot); + if (pNv == NULL) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + rmStatus = rm_get_adapter_status(pNv, &pApi->status); + + if (rmStatus != NV_OK) + goto done; + + break; + } + + case NV_ESC_RM_CONTROL: + { + NVOS54_PARAMETERS *pApi = data; + void *priv = NULL; + nv_file_private_t *dev_nvfp = NULL; + NvS32 fd; + NvBool bSkipDeviceRef; + + if (dataSize != sizeof(*pApi)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + NV_CTL_DEVICE_ONLY(nv); + + rmStatus = RmGetDeviceFd(pApi, &fd, &bSkipDeviceRef); + if (rmStatus != NV_OK) + { + goto done; + } + + if (!bSkipDeviceRef) + { + dev_nvfp = nv_get_file_private(fd, NV_FALSE, &priv); + if (dev_nvfp == NULL) + { + rmStatus = NV_ERR_INVALID_DEVICE; + goto done; + } + + // Check to avoid cyclic dependency with NV_ESC_REGISTER_FD + if (!portAtomicCompareAndSwapU32(&dev_nvfp->register_or_refcount, + NVFP_TYPE_REFCOUNTED, + NVFP_TYPE_NONE)) + { + // Is this already refcounted... + if (dev_nvfp->register_or_refcount != NVFP_TYPE_REFCOUNTED) + { + nv_put_file_private(priv); + rmStatus = NV_ERR_IN_USE; + goto done; + } + } + + secInfo.gpuOsInfo = priv; + } + + Nv04ControlWithSecInfo(pApi, secInfo); + + if ((pApi->status != NV_OK) && (priv != NULL)) + { + // + // No need to reset `register_or_refcount` as it might be set + // for previous successful calls. We let it clear with FD close. + // + nv_put_file_private(priv); + + secInfo.gpuOsInfo = NULL; + } + + break; + } + + case NV_ESC_RM_UPDATE_DEVICE_MAPPING_INFO: + { + NVOS56_PARAMETERS *pApi = data; + void *pOldCpuAddress; + void *pNewCpuAddress; + + if (dataSize != sizeof(*pApi)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + NV_CTL_DEVICE_ONLY(nv); + + pOldCpuAddress = NvP64_VALUE(pApi->pOldCpuAddress); + pNewCpuAddress = NvP64_VALUE(pApi->pNewCpuAddress); + + pApi->status = rm_update_device_mapping_info(pApi->hClient, + pApi->hDevice, + pApi->hMemory, + pOldCpuAddress, + pNewCpuAddress); + break; + } + + case NV_ESC_REGISTER_FD: + { + nv_ioctl_register_fd_t *params = data; + void *priv = NULL; + nv_file_private_t *ctl_nvfp; + + if (dataSize != sizeof(*params)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + // LOCK: acquire API lock + rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI); + if (rmStatus != NV_OK) + goto done; + + // If there is already a ctl fd registered on this nvfp, fail. + if (nvfp->ctl_nvfp != NULL) + { + // UNLOCK: release API lock + rmapiLockRelease(); + rmStatus = NV_ERR_INVALID_STATE; + goto done; + } + + // + // Note that this call is valid for both "actual" devices and ctrl + // devices. In particular, NV_ESC_ALLOC_OS_EVENT can be used with + // both types of devices. + // But, the ctl_fd passed in should always correspond to a control FD. + // + ctl_nvfp = nv_get_file_private(params->ctl_fd, + NV_TRUE, /* require ctl fd */ + &priv); + if (ctl_nvfp == NULL) + { + // UNLOCK: release API lock + rmapiLockRelease(); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + // Disallow self-referential links, and disallow links to FDs that + // themselves have a link. + if ((ctl_nvfp == nvfp) || (ctl_nvfp->ctl_nvfp != NULL)) + { + nv_put_file_private(priv); + // UNLOCK: release API lock + rmapiLockRelease(); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + // Check to avoid cyclic dependency with device refcounting + if (!portAtomicCompareAndSwapU32(&nvfp->register_or_refcount, + NVFP_TYPE_REGISTERED, + NVFP_TYPE_NONE)) + { + nv_put_file_private(priv); + // UNLOCK: release API lock + rmapiLockRelease(); + rmStatus = NV_ERR_IN_USE; + goto done; + } + + // + // nvfp->ctl_nvfp is read outside the lock, so set it atomically. + // Note that once set, this can never be removed until the fd + // associated with nvfp is closed. We hold on to 'priv' until the + // fd is closed, too, to ensure that the fd associated with + // ctl_nvfp remains valid. + // + portAtomicSetSize(&nvfp->ctl_nvfp, ctl_nvfp); + nvfp->ctl_nvfp_priv = priv; + + // UNLOCK: release API lock + rmapiLockRelease(); + + // NOTE: nv_put_file_private(priv) is not called here. It MUST be + // called during cleanup of this nvfp. + rmStatus = NV_OK; + break; + } + + default: + { + NV_PRINTF(LEVEL_ERROR, "unknown NVRM ioctl command: 0x%x\n", cmd); + goto done; + } + } + + rmStatus = NV_OK; +done: + + return rmStatus; +} diff --git a/src/nvidia/arch/nvalloc/unix/src/exports-stubs.c b/src/nvidia/arch/nvalloc/unix/src/exports-stubs.c new file mode 100644 index 0000000..757b819 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/exports-stubs.c @@ -0,0 +1,316 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include "os/os.h" +#include +#include +#include +#include + +NV_STATUS NV_API_CALL rm_schedule_gpu_wakeup( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + return NV_OK; +} + +void NV_API_CALL rm_init_tegra_dynamic_power_management( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ +} + +void NV_API_CALL rm_init_dynamic_power_management( + nvidia_stack_t *sp, + nv_state_t *nv, + NvBool bPr3AcpiMethodPresent +) +{ +} + +void NV_API_CALL rm_cleanup_dynamic_power_management( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ +} + +void NV_API_CALL rm_enable_dynamic_power_management( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ +} + +NV_STATUS NV_API_CALL rm_ref_dynamic_power( + nvidia_stack_t *sp, + nv_state_t *nv, + nv_dynamic_power_mode_t mode +) +{ + return NV_OK; +} + +void NV_API_CALL rm_unref_dynamic_power( + nvidia_stack_t *sp, + nv_state_t *nv, + nv_dynamic_power_mode_t mode +) +{ +} + +NV_STATUS NV_API_CALL rm_transition_dynamic_power( + nvidia_stack_t *sp, + nv_state_t *nv, + NvBool bEnter, + NvBool *bTryAgain +) +{ + return NV_OK; +} + +void NV_API_CALL rm_get_power_info( + nvidia_stack_t *sp, + nv_state_t *pNv, + nv_power_info_t *powerInfo +) +{ + powerInfo->vidmem_power_status = "?"; + powerInfo->dynamic_power_status = "?"; + powerInfo->gc6_support = "?"; + powerInfo->gcoff_support = "?"; + powerInfo->s0ix_status = "?"; + powerInfo->db_support = "?"; +} + +NV_STATUS +subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS *pParams +) +{ + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdOsUnixAllowDisallowGcoff_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS *pParams +) +{ + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdOsUnixAudioDynamicPower_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS *pParams +) +{ + return NV_OK; +} + +void +RmUpdateGc6ConsoleRefCount +( + nv_state_t *nv, + NvBool bIncrease +) +{ +} + +void +RmInitPowerManagement +( + nv_state_t *nv +) +{ +} + +void +RmDestroyPowerManagement +( + nv_state_t *nv +) +{ +} + +void RmHandleDisplayChange +( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ +} + +NV_STATUS +os_ref_dynamic_power +( + nv_state_t *nv, + nv_dynamic_power_mode_t mode +) +{ + return NV_OK; +} + +void +os_unref_dynamic_power +( + nv_state_t *nv, + nv_dynamic_power_mode_t mode +) +{ +} + +void +osClientGcoffDisallowRefcount +( + OS_GPU_INFO *pArg1, + NvBool arg2 +) +{ +} + +NvU32 osGetDynamicPowerSupportMask(void) +{ + return 0; +} + +void osUnrefGpuAccessNeeded +( + OS_GPU_INFO *pOsGpuInfo +) +{ + return; +} + +NV_STATUS osRefGpuAccessNeeded +( + OS_GPU_INFO *pOsGpuInfo +) +{ + return NV_OK; +} + +NV_STATUS +deviceCtrlCmdOsUnixVTSwitch_IMPL +( + Device *pDevice, + NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS *pParams +) +{ + return NV_OK; +} + +NV_STATUS deviceCtrlCmdOsUnixVTGetFBInfo_IMPL(Device *pDevice, + NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *pParams) +{ + return NV_OK; +} + +NV_STATUS NV_API_CALL rm_save_low_res_mode( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NvBool NV_API_CALL rm_isr( + nvidia_stack_t *sp, + nv_state_t *nv, + NvU32 *NeedBottomHalf +) +{ + *NeedBottomHalf = NV_FALSE; + return NV_TRUE; +} + +void NV_API_CALL rm_isr_bh( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ +} + +void NV_API_CALL rm_isr_bh_unlocked( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ +} + +NvBool NV_API_CALL rm_is_msix_allowed( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + return NV_FALSE; +} + +// Functions are in unix_intr.c -- that file is not compiled on DCE_CLIENT_RM enabled builds +NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults( + nvidia_stack_t *sp, + nv_state_t *nv, + NvU32 *faultsCopied +) +{ + return NV_OK; +} + +NV_STATUS NV_API_CALL rm_gpu_handle_mmu_faults( + nvidia_stack_t *sp, + nv_state_t *nv, + NvU32 *faultsCopied +) +{ + return NV_OK; +} + +NvBool NV_API_CALL rm_is_chipset_io_coherent +( + nvidia_stack_t *sp +) +{ + return NV_FALSE; +} + +NvBool NV_API_CALL rm_disable_iomap_wc(void) +{ + return NV_FALSE; +} + +NV_STATUS RmInitX86EmuState(OBJGPU *pGpu) +{ + return NV_OK; +} + +void RmFreeX86EmuState(OBJGPU *pGpu) +{ +} diff --git a/src/nvidia/arch/nvalloc/unix/src/gcc_helper.c b/src/nvidia/arch/nvalloc/unix/src/gcc_helper.c new file mode 100644 index 0000000..3c1037c --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/gcc_helper.c @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +void* memset(void* s, int c, NvUPtr n) +{ + return os_mem_set(s, (NvU8)c, (NvU32)n); +} + +void* memcpy(void* dest, const void* src, NvUPtr n) +{ + return os_mem_copy(dest, src, (NvU32)n); +} diff --git a/src/nvidia/arch/nvalloc/unix/src/os-hypervisor-stubs.c b/src/nvidia/arch/nvalloc/unix/src/os-hypervisor-stubs.c new file mode 100644 index 0000000..9225cee --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/os-hypervisor-stubs.c @@ -0,0 +1,192 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvstatus.h" +#include "os/os.h" +#include "nv.h" +#include "nv-hypervisor.h" + +HYPERVISOR_TYPE NV_API_CALL nv_get_hypervisor_type(void) +{ + return OS_HYPERVISOR_UNKNOWN; +} + +NV_STATUS NV_API_CALL nv_vgpu_get_type_ids( + nvidia_stack_t *sp, + nv_state_t *pNv, + NvU32 *numVgpuTypes, + NvU32 *vgpuTypeIds, + NvBool isVirtfn, + NvU8 devfn, + NvBool getCreatableTypes +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_vgpu_process_vf_info( + nvidia_stack_t *sp, + nv_state_t *pNv, + NvU8 cmd, + NvU32 domain, + NvU8 bus, + NvU8 slot, + NvU8 function, + NvBool isMdevAttached, + void *vf_pci_info +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_vgpu_get_type_info( + nvidia_stack_t *sp, + nv_state_t *pNv, + NvU32 vgpuTypeId, + char *buffer, + int type_info, + NvU8 devfn +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_vgpu_create_request( + nvidia_stack_t *sp, + nv_state_t *pNv, + const NvU8 *pVgpuDevName, + NvU32 vgpuTypeId, + NvU16 *vgpuId, + NvU32 *gpu_instance_id, + NvU32 *placement_id, + NvU32 gpuPciBdf +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_gpu_bind_event( + nvidia_stack_t *sp, + NvU32 gpuId, + NvBool *isEventNotified +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_gpu_unbind_event( + nvidia_stack_t *sp, + NvU32 gpuId, + NvBool *isEventNotified +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_vgpu_delete( + nvidia_stack_t *sp, + const NvU8 *pVgpuDevName, + NvU16 vgpuId +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS nv_vgpu_rm_get_bar_info( + OBJGPU *pGpu, + const NvU8 *pVgpuDevName, + NvU64 *bar_sizes, + NvU64 *sparse_offsets, + NvU64 *sparse_sizes, + NvU32 *sparse_count, + NvBool *isBar064bit, + NvU8 *config_params +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_vgpu_get_bar_info( + nvidia_stack_t *sp, + nv_state_t *pNv, + const NvU8 *pVgpuDevName, + NvU64 *bar_sizes, + NvU64 *sparse_offsets, + NvU64 *sparse_sizes, + NvU32 *sparse_count, + NvBool *isBar064bit, + NvU8 *config_params +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_vgpu_update_sysfs_info( + nvidia_stack_t *sp, + nv_state_t *pNv, + const NvU8 *pVgpuDevName, + NvU32 updateMode, + NvU32 *sysfs_val +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_vgpu_get_hbm_info( + nvidia_stack_t *sp, + nv_state_t *pNv, + const NvU8 *pVgpuDevName, + NvU64 *hbm_addr, + NvU64 *size +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NvU32 osGetGridCspSupport(void) +{ + return 0; +} + +void initVGXSpecificRegistry(OBJGPU *pGpu) +{} + +NV_STATUS osIsVgpuVfioPresent(void) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osIsVfioPciCorePresent(void) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NvBool hypervisorIsVgxHyper_IMPL(void) +{ + return NV_FALSE; +} + +NV_STATUS osIsVgpuDeviceVmPresent(void) +{ + return NV_ERR_NOT_SUPPORTED; +} diff --git a/src/nvidia/arch/nvalloc/unix/src/os.c b/src/nvidia/arch/nvalloc/unix/src/os.c new file mode 100644 index 0000000..4884cfe --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/os.c @@ -0,0 +1,5391 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include // NV device driver interface +#include +#include +#include +#include +#include +#include +#include +#include // NV_EVENT_BUFFER_BIND +#include + +#include +#include + +#include "gpu/gpu.h" +#include + +#include "nverror.h" + +#include "mem_mgr/io_vaspace.h" +#include +#include "kernel/diagnostics/xid_context.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "core/thread_state.h" +#include +#include +#include +#include +#include "virtualization/hypervisor/hypervisor.h" +#include "rmobjexportimport.h" +#include +#include "rmapi/rs_utils.h" +#include "rmapi/client_resource.h" +#include "os/dce_rm_client_ipc.h" +#include "mem_mgr/mem.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" + +#include "vgpu/vgpu_util.h" + +#include +#include +#include "gps.h" +#include "jt.h" + + + + +extern const char *ppOsBugCheckBugcodeStr[]; + + +ct_assert(NV_RM_PAGE_SIZE == RM_PAGE_SIZE); +ct_assert(NV_RM_PAGE_MASK == RM_PAGE_MASK); +ct_assert(NV_RM_PAGE_SHIFT == RM_PAGE_SHIFT); + +typedef struct +{ + NvU32 euid; + NvU32 pid; +} TOKEN_USER, *PTOKEN_USER; + +struct OS_RM_CAPS +{ + NvU32 count; + + // This should be the last element + nv_cap_t **caps; +}; + +NvBool osIsRaisedIRQL(void) +{ + return (!os_semaphore_may_sleep()); +} + +NvBool osIsISR(void) +{ + return os_is_isr(); +} + +NV_STATUS osGetDriverBlock +( + OS_GPU_INFO *pOsGpuInfo, + OS_DRIVER_BLOCK *pBlock +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NvU64 osGetMonotonicTimeNs(void) +{ + return os_get_monotonic_time_ns(); +} + +NvU64 osGetMonotonicTickResolutionNs(void) +{ + return os_get_monotonic_tick_resolution_ns(); +} + +NV_STATUS osGetPerformanceCounter(NvU64 *pTimeInNs) +{ + *pTimeInNs = os_get_monotonic_time_ns_hr(); + return NV_OK; +} + +NV_STATUS osGetSystemTime( + NvU32 *pSeconds, + NvU32 *pMicroSeconds +) +{ + return os_get_system_time(pSeconds, pMicroSeconds); +} + +/*! + * @brief Get timestamp for logging. + * + * Everything that logs a time stamp should use this routine for consistency. + * + * The returned value is OS dependent. We want the time stamp to use + * KeQueryPerformanceCounter on Windows so it matches the DirectX timestamps. + * Linux uses microseconds since 1970 (osGetSystemTime), since matching DirectX + * is not a priority. + * + * osGetTimestampFreq returns the frequency required to decode the time stamps. + * + * @returns system dependent timestamp. + */ +NvU64 osGetTimestamp(void) +{ + NvU32 sec = 0; + NvU32 usec = 0; + osGetSystemTime(&sec, &usec); + return (NvU64)sec * 1000000 + usec; +} + +/*! + * @brief Get timestamp frequency. + * + * Timestamps are OS dependent. This call returns the frequency + * required to decode them. + * + * @returns Timestamp frequency. For example, 1000000 for MHz. + */ +NvU64 osGetTimestampFreq(void) +{ + return 1000000; +} + +NV_STATUS osDelay(NvU32 milliseconds) +{ + return os_delay(milliseconds); +} + +NV_STATUS osDelayUs(NvU32 microseconds) +{ + return os_delay_us(microseconds); +} + +NV_STATUS osDelayNs(NvU32 nanoseconds) +{ + NvU32 microseconds = NV_MAX(1, (nanoseconds / 1000)); + return os_delay_us(microseconds); +} + +NvU32 osGetCpuFrequency(void) +{ + /* convert os_get_cpu_frequency()'s return value from Hz to MHz */ + return ((NvU32)(os_get_cpu_frequency() / 1000000ULL)); +} + +void* osPciInitHandle( + NvU32 Domain, + NvU8 Bus, + NvU8 Slot, + NvU8 Function, + NvU16 *pVendor, + NvU16 *pDevice +) +{ + // + // Check if the BDF is for a GPU that's already been attached, for which + // we should already have a handle cached. This won't catch devices that + // have been probed but not yet attached, but that shouldn't be a common + // occurrence. + // + // More importantly, having this check here means we don't need to check + // a global list of devices in the kernel interface layer, which could + // have the implication of taking another lock, causing hairy lock + // ordering issues. + // + if (Function == 0) + { + OBJGPU *pGpu = gpumgrGetGpuFromBusInfo(Domain, Bus, Slot); + if (pGpu != NULL) + { + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + if (pVendor) *pVendor = nv->pci_info.vendor_id; + if (pDevice) *pDevice = nv->pci_info.device_id; + return nv->handle; + } + } + + return os_pci_init_handle(Domain, Bus, Slot, Function, pVendor, pDevice); +} + +NvU8 osPciReadByte( + void *pHandle, + NvU32 Offset +) +{ + NvU8 val; + os_pci_read_byte(pHandle, Offset, &val); + return val; +} + +NvU16 osPciReadWord( + void *pHandle, + NvU32 Offset +) +{ + NvU16 val; + os_pci_read_word(pHandle, Offset, &val); + return val; +} + +NvU32 osPciReadDword( + void *pHandle, + NvU32 Offset +) +{ + NvU32 val; + os_pci_read_dword(pHandle, Offset, &val); + return val; +} + +void osPciWriteByte( + void *pHandle, + NvU32 Offset, + NvU8 Value +) +{ + os_pci_write_byte(pHandle, Offset, Value); +} + +void osPciWriteWord( + void *pHandle, + NvU32 Offset, + NvU16 Value +) +{ + os_pci_write_word(pHandle, Offset, Value); +} + +void osPciWriteDword( + void *pHandle, + NvU32 Offset, + NvU32 Value +) +{ + os_pci_write_dword(pHandle, Offset, Value); +} + +void* osMapKernelSpace( + RmPhysAddr Start, + NvU64 Size, + NvU32 Mode, + NvU32 Protect +) +{ + NvU64 offset; + NvU8 *ptr; + + if (0 == Size) + { + NV_ASSERT(Size != 0); + return NULL; + } + + offset = (Start & ~os_page_mask); + Start &= os_page_mask; + + if (!portSafeAddU64(Size, offset, &Size) || + !portSafeAddU64(Size, ~os_page_mask, &Size)) + { + return NULL; + } + Size &= os_page_mask; + + ptr = os_map_kernel_space(Start, Size, Mode); + if (ptr != NULL) + return (ptr + offset); + + return NULL; +} + +void osUnmapKernelSpace( + void *pAddress, + NvU64 Size +) +{ + NvU64 offset; + NvUPtr ptr = (NvUPtr)pAddress; + + if (0 == Size) + { + NV_ASSERT(Size != 0); + return; + } + + offset = (ptr & ~os_page_mask); + ptr &= os_page_mask; + Size = ((Size + offset + ~os_page_mask) & os_page_mask); + os_unmap_kernel_space((void *)ptr, Size); +} + +NV_STATUS osGetNumMemoryPages +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 *pNumPages +) +{ + void *pAllocPrivate = NULL; + + pAllocPrivate = memdescGetMemData(pMemDesc); + if (pAllocPrivate == NULL) + { + NV_PRINTF(LEVEL_ERROR, "pAllocPrivate is NULL!\n"); + return NV_ERR_INVALID_STATE; + } + + return nv_get_num_phys_pages(pAllocPrivate, pNumPages); +} + +NV_STATUS osGetMemoryPages +( + MEMORY_DESCRIPTOR *pMemDesc, + void *pPages, + NvU32 *pNumPages +) +{ + void *pAllocPrivate = NULL; + + pAllocPrivate = memdescGetMemData(pMemDesc); + if (pAllocPrivate == NULL) + { + NV_PRINTF(LEVEL_ERROR, "pAllocPrivate is NULL!\n"); + return NV_ERR_INVALID_STATE; + } + + return nv_get_phys_pages(pAllocPrivate, pPages, pNumPages); +} + +NV_STATUS osMapSystemMemory +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 Offset, + NvU64 Length, + NvBool Kernel, + NvU32 Protect, + NvP64 *ppAddress, + NvP64 *ppPrivate +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + + NvU64 rootOffset = 0; + pMemDesc = memdescGetRootMemDesc(pMemDesc, &rootOffset); + Offset += rootOffset; + + RmPhysAddr userAddress; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + NV_STATUS rmStatus = NV_OK; + void *pAllocPrivate = NULL; + void *pAddress; + void *pPrivate = NULL; + NvU64 pageIndex; + NvU32 pageOffset = (Offset & ~os_page_mask); + + *ppAddress = NvP64_NULL; + *ppPrivate = NvP64_NULL; + + if ((Offset + Length) < Length) + return NV_ERR_INVALID_ARGUMENT; + if ((Offset + Length) > pMemDesc->Size) + return NV_ERR_INVALID_ARGUMENT; + + pageIndex = (Offset >> os_page_shift); + + pAllocPrivate = memdescGetMemData(pMemDesc); + if (!pAllocPrivate) + { + NV_PRINTF(LEVEL_ERROR, "pAllocPrivate is NULL!\n"); + return NV_ERR_INVALID_STATE; + } + + if (Kernel) + { + pAddress = nv_alloc_kernel_mapping(nv, pAllocPrivate, + pageIndex, pageOffset, Length, &pPrivate); + if (pAddress == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "failed to create system memory kernel mapping!\n"); + rmStatus = NV_ERR_GENERIC; + } + else + { + *ppAddress = NV_PTR_TO_NvP64(pAddress); + *ppPrivate = NV_PTR_TO_NvP64(pPrivate); + } + } + else + { + rmStatus = nv_alloc_user_mapping(nv, pAllocPrivate, + pageIndex, pageOffset, Length, Protect, &userAddress, + &pPrivate); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to create system memory user mapping!\n"); + } + else + { + *ppAddress = (NvP64)(userAddress); + *ppPrivate = NV_PTR_TO_NvP64(pPrivate); + } + } + + return rmStatus; +} + +void osUnmapSystemMemory +( + MEMORY_DESCRIPTOR *pMemDesc, + NvBool Kernel, + NvP64 pAddress, + NvP64 pPrivate +) +{ + void *pAllocPrivate; + OBJGPU *pGpu = pMemDesc->pGpu; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + pMemDesc = memdescGetRootMemDesc(pMemDesc, NULL); + pAllocPrivate = memdescGetMemData(pMemDesc); + + if (Kernel) + { + nv_free_kernel_mapping(nv, pAllocPrivate, NvP64_VALUE(pAddress), + NvP64_VALUE(pPrivate)); + } + else + { + nv_free_user_mapping(nv, pAllocPrivate, (NvU64)pAddress, + NvP64_VALUE(pPrivate)); + } + + if (pGpu != NULL && + pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING) && + memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM && + pAllocPrivate != NULL) + { + nv_unregister_phys_pages(nv, pAllocPrivate); + memdescSetMemData(pMemDesc, NULL, NULL); + } +} + +void osIoWriteByte( + NvU32 Address, + NvU8 Value +) +{ + os_io_write_byte(Address, Value); +} + +NvU16 osIoReadWord( + NvU32 Address +) +{ + return os_io_read_word(Address); +} + +void osIoWriteWord( + NvU32 Address, + NvU16 Value +) +{ + os_io_write_word(Address, Value); +} + +NvU8 osIoReadByte( + NvU32 Address +) +{ + return os_io_read_byte(Address); +} + +NvBool osIsAdministrator(void) +{ + return os_is_administrator(); +} + +NvBool osCheckAccess(RsAccessRight accessRight) +{ + return os_check_access(accessRight); +} + +NvU32 osGetCurrentProcess(void) +{ + return os_get_current_process(); +} + +void osGetCurrentProcessName(char *ProcName, NvU32 Length) +{ + return os_get_current_process_name(ProcName, Length); +} + +NV_STATUS osGetCurrentThread(OS_THREAD_HANDLE *pThreadId) +{ + NV_STATUS rmStatus; + NvU64 threadId = 0; + + if (pThreadId == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + rmStatus = os_get_current_thread(&threadId); + if (rmStatus == NV_OK) + { + *pThreadId = threadId; + } + else + { + *pThreadId = 0; + } + + return rmStatus; +} + +void* osGetPidInfo(void) +{ + return os_get_pid_info(); +} + +void osPutPidInfo(void *pOsPidInfo) +{ + os_put_pid_info(pOsPidInfo); +} + +NV_STATUS osFindNsPid(void *pOsPidInfo, NvU32 *pNsPid) +{ + return os_find_ns_pid(pOsPidInfo, pNsPid); +} + +NvBool osIsInitNs(void) +{ + return os_is_init_ns(); +} + +NV_STATUS osAttachToProcess(void** ppProcessInfo, NvU32 ProcessId) +{ + // + // This function is used by RmUnmapMemory() to attach to the + // process for which a given device memory mapping was + // created, in order to be able to unmap it. On Linux/UNIX + // platforms, we can't "attach" to a random process, but + // since we don't create/destroy user mappings in the RM, we + // don't need to, either. + // + // Report success to the caller to keep RmUnmapMemory() from + // failing, and memory from being leaked as a result. + // + *ppProcessInfo = NULL; + return NV_OK; +} + +void osDetachFromProcess(void* pProcessInfo) +{ + // stub + return; +} + +NV_STATUS osAcquireRmSema(void *pSema) +{ + return NV_OK; +} + +NV_STATUS osCondAcquireRmSema(void *pSema) +{ + return NV_OK; +} + +NvU32 osReleaseRmSema(void *pSema, OBJGPU *pDpcGpu) +{ + return NV_SEMA_RELEASE_SUCCEED; +} + +void osSpinLoop(void) +{ + // Enable this code to get debug prints from Libos. +} + +NvU64 osGetMaxUserVa(void) +{ + return os_get_max_user_va(); +} + +NV_STATUS osSchedule(void) +{ + return os_schedule(); +} + +NV_STATUS osQueueWorkItem( + OBJGPU *pGpu, + OSWorkItemFunction pFunction, + void *pParams, + NvU32 flags +) +{ + nv_work_item_t *pWi; + nv_state_t *nv; + NV_STATUS status; + + pWi = portMemAllocNonPaged(sizeof(nv_work_item_t)); + + if (NULL == pWi) + { + return NV_ERR_NO_MEMORY; + } + + pWi->flags = NV_WORK_ITEM_FLAGS_REQUIRES_GPU; + if (flags & OS_QUEUE_WORKITEM_FLAGS_DONT_FREE_PARAMS) + pWi->flags |= NV_WORK_ITEM_FLAGS_DONT_FREE_DATA; + + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_SEMA) + pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_SEMA; + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW) + pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW; + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RO) + pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RO; + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS) + pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS; + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE) + pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE; + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE) + pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE; + + if (flags & OS_QUEUE_WORKITEM_FLAGS_FULL_GPU_SANITY) + pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_FULL_GPU_SANITY; + + if (flags & OS_QUEUE_WORKITEM_FLAGS_DROP_ON_UNLOAD_QUEUE_FLUSH) + pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_DROP_ON_UNLOAD_QUEUE_FLUSH; + + pWi->gpuInstance = gpuGetInstance(pGpu); + pWi->func.pGpuFunction = pFunction; + pWi->pData = pParams; + nv = NV_GET_NV_STATE(pGpu); + + status = os_queue_work_item(nv ? nv->queue : NULL, pWi); + + if (NV_OK != status) + { + portMemFree((void *)pWi); + } + + return status; +} + +NV_STATUS osQueueSystemWorkItem( + OSSystemWorkItemFunction pFunction, + void *pParams +) +{ + nv_work_item_t *pWi; + NV_STATUS status; + + pWi = portMemAllocNonPaged(sizeof(nv_work_item_t)); + + if (NULL == pWi) + { + return NV_ERR_NO_MEMORY; + } + + pWi->flags = NV_WORK_ITEM_FLAGS_NONE; + pWi->func.pSystemFunction = pFunction; + pWi->pData = pParams; + + status = os_queue_work_item(NULL, pWi); + + if (NV_OK != status) + { + portMemFree((void *)pWi); + } + + return status; +} + +void osQueueMMUFaultHandler(OBJGPU *pGpu) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + nv_schedule_uvm_isr(nv); +} + +NvBool osGpuSupportsAts(OBJGPU *pGpu) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + // Checks ATS support from both OS side and RM side. + return nv->ats_support && + pGpu->getProperty(pGpu, PDB_PROP_GPU_ATS_SUPPORTED); +} + +NV_STATUS osQueueDrainP2PHandler(NvU8 *pUuid) +{ + return nv_schedule_uvm_drain_p2p(pUuid); +} + +void osQueueResumeP2PHandler(NvU8 *pUuid) +{ + nv_schedule_uvm_resume_p2p(pUuid); +} + +static inline nv_dma_device_t* osGetDmaDeviceForMemDesc( + OS_GPU_INFO *pOsGpuInfo, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + return (pOsGpuInfo->niso_dma_dev != NULL) && + memdescGetFlag(pMemDesc, MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO) ? + pOsGpuInfo->niso_dma_dev : pOsGpuInfo->dma_dev; +} + +// +// Set the DMA address size for the given GPU +// +// This is a global device setting and care would need to be taken if it was to +// be modified outside of GPU initialization. At least on Linux other drivers, +// like UVM, might be requesting its own DMA mappings for the same GPU after +// the GPU has been initialized. +// +void osDmaSetAddressSize( + OS_GPU_INFO *pOsGpuInfo, + NvU32 bits +) +{ + nv_set_dma_address_size(pOsGpuInfo, bits); +} + +static NV_STATUS osGetPagesInfo( + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 *pageSize, + NvU64 *osPageCount, + NvU64 *rmPageCount +) +{ + NvU64 osPageSize = osGetPageSize(); + NvU64 maxPageSize = NV_MAX(osPageSize, RM_PAGE_SIZE); + NvU64 alignedSize = NV_ALIGN_UP(pMemDesc->Size, maxPageSize); + + *osPageCount = alignedSize >> BIT_IDX_32(osPageSize); + *rmPageCount = alignedSize >> RM_PAGE_SHIFT; + *pageSize = memdescGetAdjustedPageSize(pMemDesc); + + // In the non-contig case need to protect against page array overflows. + if (!memdescGetContiguity(pMemDesc, AT_CPU)) + NV_ASSERT_OR_RETURN(*rmPageCount <= pMemDesc->pageArraySize, NV_ERR_INVALID_ARGUMENT); + + if (*osPageCount > NV_U32_MAX || *rmPageCount > NV_U32_MAX) + return NV_ERR_INVALID_LIMIT; + + return NV_OK; +} + +NV_STATUS osAllocPagesInternal( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPU *pGpu = pMemDesc->pGpu; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + void *pMemData = NULL; + NV_STATUS status; + NvS32 nodeId = NV0000_CTRL_NO_NUMA_NODE; + NV_ADDRESS_SPACE addrSpace = memdescGetAddressSpace(pMemDesc); + NvU64 pageSize; + NvU64 osPageCount; + NvU64 rmPageCount; + NvU32 cpuCacheAttrib = memdescGetCpuCacheAttrib(pMemDesc); + + memdescSetAddress(pMemDesc, NvP64_NULL); + memdescSetMemData(pMemDesc, NULL, NULL); + + // + // For carveout, the memory is already reserved so we don't have + // to allocate memory. + // + if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ALLOC_FROM_SCANOUT_CARVEOUT) || + memdescGetFlag(pMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED)) + { + // We only support scanout carveout with contiguous memory. + if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ALLOC_FROM_SCANOUT_CARVEOUT) && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + + status = osGetPagesInfo(pMemDesc, &pageSize, &osPageCount, &rmPageCount); + if (status != NV_OK) + goto done; + + if (NV_RM_PAGE_SIZE < os_page_size && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + rmPageCount); + } + + status = nv_alias_pages( + NV_GET_NV_STATE(pGpu), + osPageCount, + pageSize, + memdescGetContiguity(pMemDesc, AT_CPU), + cpuCacheAttrib, + memdescGetGuestId(pMemDesc), + memdescGetPteArray(pMemDesc, AT_CPU), + memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ALLOC_FROM_SCANOUT_CARVEOUT), + &pMemData); + } + else + { + NvBool unencrypted = 0; + + if (nv && (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ALLOC_32BIT_ADDRESSABLE))) + nv->force_dma32_alloc = NV_TRUE; + + // + // OBJGPU/nv may be NULL if constructing NV01_MEMORY_DEVICELESS. Assume that + // NV01_MEMORY_DEVICELESS won't be used for display. + // + if (pGpu != NULL && + pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY) && + addrSpace == ADDR_SYSMEM) + { + if (!memdescGetFlag(pMemDesc, MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO) && + !NV_SOC_IS_ISO_IOMMU_PRESENT(nv)) + { + NV_PRINTF(LEVEL_INFO, "Forcing physically contiguous flags for ISO\n"); + memdescSetContiguity(pMemDesc, AT_CPU, NV_TRUE); + } + + if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO) && + !NV_SOC_IS_NISO_IOMMU_PRESENT(nv)) + { + NV_PRINTF(LEVEL_INFO, "Forcing physically contiguous flags for NISO\n"); + memdescSetContiguity(pMemDesc, AT_CPU, NV_TRUE); + } + } + + if (addrSpace == ADDR_SYSMEM) + { + nodeId = memdescGetNumaNode(pMemDesc); + } + else if (addrSpace == ADDR_EGM) + { + nodeId = GPU_GET_MEMORY_MANAGER(pGpu)->localEgmNodeId; + } + + status = osGetPagesInfo(pMemDesc, &pageSize, &osPageCount, &rmPageCount); + if (status != NV_OK) + goto done; + + status = nv_alloc_pages( + NV_GET_NV_STATE(pGpu), + osPageCount, // TODO: This call needs to receive the page count param at the requested page size. + pageSize, + memdescGetContiguity(pMemDesc, AT_CPU), + cpuCacheAttrib, + pSys->getProperty(pSys, + PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS), + unencrypted, + nodeId, + memdescGetPteArray(pMemDesc, AT_CPU), + &pMemData); + + if (nv && nv->force_dma32_alloc) + nv->force_dma32_alloc = NV_FALSE; + } + + if (status != NV_OK) + { + goto done; + } + + // Guest allocated memory is already initialized + if (!memdescGetFlag(pMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED)) + { + NV_ASSERT_OK_OR_RETURN(memdescSetAllocSizeFields(pMemDesc, rmPageCount * RM_PAGE_SIZE, RM_PAGE_SIZE)); + } + + // + // If the OS layer doesn't think in RM page size, we need to inflate the + // PTE array into RM pages. + // + if (NV_RM_PAGE_SIZE < os_page_size && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmInflateOsToRmPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + memdescSetMemData(pMemDesc, pMemData, NULL); + + if ((pGpu != NULL) && IS_VIRTUAL(pGpu)) + NV_ASSERT_OK_OR_RETURN(vgpuUpdateGuestSysmemPfnBitMap(pGpu, pMemDesc, NV_TRUE)); +done: + return status; +} + +void osFreePagesInternal( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + NV_STATUS rmStatus; + + if ((pGpu != NULL) && IS_VIRTUAL(pGpu)) + NV_ASSERT_OR_RETURN_VOID(vgpuUpdateGuestSysmemPfnBitMap(pGpu, pMemDesc, NV_FALSE) == NV_OK); + + if (NV_RM_PAGE_SIZE < os_page_size && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + rmStatus = nv_free_pages(NV_GET_NV_STATE(pGpu), + NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), + memdescGetContiguity(pMemDesc, AT_CPU), + memdescGetCpuCacheAttrib(pMemDesc), + memdescGetMemData(pMemDesc)); + NV_ASSERT(rmStatus == NV_OK); +} + +NV_STATUS osLockMem( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + // Not supported on this OS. + DBG_BREAKPOINT(); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osUnlockMem( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + // Not supported on this OS. + DBG_BREAKPOINT(); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osMapPciMemoryAreaUser +( + OS_GPU_INFO *pOsGpuInfo, + MemoryArea memArea, + NvU32 protect, + NvU32 mode, + NvP64 *pVirtualAddress, + NvP64 *pPriv +) +{ + NV_STATUS status = NV_OK; + NvU64 origStart; + NvU64 origSize; + NvU64 diffStart; + + if (memArea.numRanges == 0) + { + *pVirtualAddress = (NvP64) NULL; + *pPriv = NULL; + return NV_OK; + } + + // Fix alignment insofar as we can (middle blocks can't be force-aligned this way) + origStart = memArea.pRanges[0].start; + memArea.pRanges[0].start = NV_ALIGN_DOWN64(origStart, os_page_size); + diffStart = origStart - memArea.pRanges[0].start; + memArea.pRanges[0].size += diffStart; + origSize = memArea.pRanges[memArea.numRanges - 1llu].size; + memArea.pRanges[memArea.numRanges - 1llu].size = NV_ALIGN_UP64(origSize, os_page_size); + + { + nv_usermap_access_params_t **ppNvuap, tNvuap; + NvU64 totalRangeSize = sizeof(MemoryRange) * memArea.numRanges; + + portMemSet(&tNvuap, 0, sizeof(nv_usermap_access_params_t)); + + tNvuap.memArea = memArea; + // access_size is only for caching, we can use os_page_size for now until linux has been properly plumbed + tNvuap.access_start = memArea.pRanges[0].start; + tNvuap.access_size = os_page_size; + + tNvuap.caching = mode; + tNvuap.contig = NV_TRUE; + + NV_ASSERT_OK_OR_RETURN(nv_check_usermap_access_params(pOsGpuInfo, &tNvuap)); + + ppNvuap = (nv_usermap_access_params_t **) tlsEntryAcquire(TLS_ENTRY_ID_MAPPING_CONTEXT); + NV_ASSERT_OR_RETURN(ppNvuap != NULL, NV_ERR_INVALID_STATE); + + NV_ASSERT_OK_OR_GOTO(status, + os_alloc_mem((void**) ppNvuap, sizeof(nv_alloc_mapping_context_t)), + free_tls); + + portMemCopy(*ppNvuap, sizeof(nv_usermap_access_params_t), + &tNvuap, sizeof(nv_usermap_access_params_t)); + + NV_ASSERT_OK_OR_GOTO(status, + os_alloc_mem((void**) &((*ppNvuap)->memArea.pRanges), totalRangeSize), + free_nvuap); + + portMemCopy((*ppNvuap)->memArea.pRanges, totalRangeSize, memArea.pRanges, totalRangeSize); + + *pVirtualAddress = (NvP64) (memArea.pRanges[0].start + diffStart); + goto unalign_and_return; + +free_nvuap: + os_free_mem(*ppNvuap); +free_tls: + tlsEntryRelease(TLS_ENTRY_ID_MAPPING_CONTEXT); + } +unalign_and_return: + memArea.pRanges[memArea.numRanges - 1llu].size = origSize; + memArea.pRanges[0].size -= diffStart; + memArea.pRanges[0].start = origStart; + return status; +} + +NV_STATUS osMapPciMemoryUser( + OS_GPU_INFO *pOsGpuInfo, + RmPhysAddr busAddress, + NvU64 length, + NvU32 Protect, + NvP64 *pVirtualAddress, + NvP64 *pPriv, + NvU32 modeFlag +) +{ + MemoryArea memArea; + MemoryRange memRange; + + memArea.numRanges = 1; + memArea.pRanges = &memRange; + + memRange.start = busAddress; + memRange.size = length; + + return osMapPciMemoryAreaUser(pOsGpuInfo, memArea, Protect, modeFlag, pVirtualAddress, pPriv); +} + +void osUnmapPciMemoryUser( + OS_GPU_INFO *pOsGpuInfo, + NvP64 virtualAddress, + NvU64 length, + NvP64 pPriv +) +{ +} + +NV_STATUS osMapPciMemoryKernelOld +( + OBJGPU *pGpu, + RmPhysAddr busAddress, + NvU64 length, + NvU32 Protect, + void **pVirtualAddress, + NvU32 modeFlag +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_kern_mapping_t *mapping; + + if (pVirtualAddress == NULL) + return NV_ERR_GENERIC; + + *pVirtualAddress = os_map_kernel_space(busAddress, length, modeFlag); + if (*pVirtualAddress == NULL) + return NV_ERR_GENERIC; + + mapping = portMemAllocNonPaged(sizeof(nv_kern_mapping_t)); + if (NULL == mapping) + { + os_unmap_kernel_space(*pVirtualAddress, length); + *pVirtualAddress = 0; + return NV_ERR_GENERIC; + } + + mapping->addr = *pVirtualAddress; + mapping->size = length; + mapping->modeFlag = modeFlag; + + mapping->next = nv->kern_mappings; + nv->kern_mappings = mapping; + + return NV_OK; +} + +NV_STATUS osMapPciMemoryKernel64 +( + OBJGPU *pGpu, + RmPhysAddr busAddress, + NvU64 length, + NvU32 Protect, + NvP64 *pVirtualAddress, + NvU32 modeFlag +) +{ + void *tmppVirtualAddress = NvP64_VALUE(pVirtualAddress); + NV_STATUS rc; + + rc = osMapPciMemoryKernelOld(pGpu, + busAddress, + length, + Protect, + &tmppVirtualAddress, + modeFlag); + + *pVirtualAddress = NV_PTR_TO_NvP64(tmppVirtualAddress); + + return rc; +} + +void osUnmapPciMemoryKernelOld +( + OBJGPU *pGpu, + void* virtualAddress +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_kern_mapping_t *mapping, *tmp; + + // this can happen, for example, during a call to RmShutdownAdapter() + // from a failed RmInitAdapter() + if (virtualAddress == NULL) + { + return; + } + + tmp = mapping = nv->kern_mappings; + while (mapping) + { + if (mapping->addr == virtualAddress) + { + if (mapping == nv->kern_mappings) + { + nv->kern_mappings = mapping->next; + } + else + { + tmp->next = mapping->next; + } + + os_unmap_kernel_space(mapping->addr, mapping->size); + + portMemFree(mapping); + return; + } + tmp = mapping; + mapping = mapping->next; + } + + DBG_BREAKPOINT(); +} + +void osUnmapPciMemoryKernel64 +( + OBJGPU *pGpu, + NvP64 virtualAddress +) +{ + osUnmapPciMemoryKernelOld(pGpu, NvP64_VALUE(virtualAddress)); +} + +NV_STATUS osMapGPU( + OBJGPU *pGpu, + RS_PRIV_LEVEL privLevel, + NvU64 offset, + NvU64 length, + NvU32 Protect, + NvP64 *pAddress, + NvP64 *pPriv +) +{ + NV_STATUS rmStatus = NV_OK; + + if (privLevel >= RS_PRIV_LEVEL_KERNEL) + { + if (!portSafeAddU64((NvUPtr)pGpu->deviceMappings[0].gpuNvAddr, offset, (NvU64*)pAddress)) + { + rmStatus = NV_ERR_INVALID_LIMIT; + } + } + else + { + RmPhysAddr busAddress; + if (!portSafeAddU64(pGpu->busInfo.gpuPhysAddr, offset, &busAddress)) + { + rmStatus = NV_ERR_INVALID_LIMIT; + } + else + { + rmStatus = osMapPciMemoryUser(pGpu->pOsGpuInfo, + busAddress, + length, + Protect, + pAddress, + pPriv, + NV_FALSE); + } + } + + return rmStatus; +} + +void osUnmapGPU( + OS_GPU_INFO *pOsGpuInfo, + RS_PRIV_LEVEL privLevel, + NvP64 address, + NvU64 length, + NvP64 priv +) +{ + if (privLevel < RS_PRIV_LEVEL_KERNEL) + { + osUnmapPciMemoryUser(pOsGpuInfo, address, length, priv); + } +} + +static void postEvent( + nv_event_t *event, + NvU32 hEvent, + NvU32 notifyIndex, + NvU32 info32, + NvU16 info16, + NvBool dataValid +) +{ + if (osReferenceObjectCount(event) != NV_OK) + return; + nv_post_event(event, hEvent, notifyIndex, + info32, info16, dataValid); + osDereferenceObjectCount(event); +} + +NvU32 osSetEvent +( + OBJGPU *pGpu, + NvP64 eventID +) +{ + nv_event_t *event = NvP64_VALUE(eventID); + postEvent(event, 0, 0, 0, 0, NV_FALSE); + return 1; +} + +NV_STATUS osNotifyEvent( + OBJGPU *pGpu, + PEVENTNOTIFICATION NotifyEvent, + NvU32 Method, + NvU32 Data, + NV_STATUS Status +) +{ + NV_STATUS rmStatus = NV_OK; + + // notify the event + switch (NotifyEvent->NotifyType) + { + case NV01_EVENT_OS_EVENT: + { + nv_event_t *event = NvP64_VALUE(NotifyEvent->Data); + postEvent(event, + NotifyEvent->hEvent, + NotifyEvent->NotifyIndex, + 0, 0, + NotifyEvent->bEventDataRequired); + break; + } + + // NOTE: NV01_EVENT_KERNEL_CALLBACK is deprecated. please use NV01_EVENT_KERNEL_CALLBACK_EX. + case NV01_EVENT_KERNEL_CALLBACK: + { + MINIPORT_CALLBACK callBackToMiniport = + (MINIPORT_CALLBACK)NvP64_VALUE(NotifyEvent->Data); + + // perform a direct callback to the miniport + if (callBackToMiniport) + callBackToMiniport(NV_GET_NV_STATE(pGpu)); + break; + } + + case NV01_EVENT_KERNEL_CALLBACK_EX: + { + NVOS10_EVENT_KERNEL_CALLBACK_EX *kc = (NVOS10_EVENT_KERNEL_CALLBACK_EX *)NvP64_VALUE(NotifyEvent->Data); + + // passes two arguments (arg, params) to the kernel callback instead of one (arg). + if (kc && kc->func) + { + kc->func(kc->arg, NULL, NotifyEvent->hEvent, Data, Status); + } + break; + } + + + default: + { + rmStatus = NV_ERR_GENERIC; + break; + } + } + + return rmStatus; + +} // end of osNotifyEvent() + +// Allow CPL Events to be callback or events +NV_STATUS osEventNotification +( + OBJGPU *pGpu, + PEVENTNOTIFICATION pNotifyEvent, + NvU32 notifyIndex, + void * pEventData, + NvU32 eventDataSize +) +{ + return osEventNotificationWithInfo(pGpu, pNotifyEvent, notifyIndex, 0, 0, + pEventData, eventDataSize); +} + +NV_STATUS osEventNotificationWithInfo +( + OBJGPU *pGpu, + PEVENTNOTIFICATION pNotifyEvent, + NvU32 notifyIndex, + NvU32 info32, + NvU16 info16, + void * pEventData, + NvU32 eventDataSize +) +{ + NV_STATUS rmStatus = NV_OK; + + // walk this object's event list and find any matches for this specific notify + for (; pNotifyEvent; pNotifyEvent = pNotifyEvent->Next) + { + // notifyIndex must match if request isn't for all + if ((notifyIndex != OS_EVENT_NOTIFICATION_INDEX_ALL) && + (pNotifyEvent->NotifyIndex != notifyIndex)) + { + continue; + } + + switch (pNotifyEvent->NotifyType) + { + case NV_EVENT_BUFFER_BIND: + case NV01_EVENT_WIN32_EVENT: + { + nv_event_t *event = NvP64_VALUE(pNotifyEvent->Data); + postEvent(event, + pNotifyEvent->hEvent, + pNotifyEvent->NotifyIndex, + info32, info16, + pNotifyEvent->bEventDataRequired); + break; + } + + // NOTE: NV01_EVENT_KERNEL_CALLBACK is deprecated. please use NV01_EVENT_KERNEL_CALLBACK_EX. + case NV01_EVENT_KERNEL_CALLBACK: + { + MINIPORT_CALLBACK callBackToMiniport = + (MINIPORT_CALLBACK)NvP64_VALUE(pNotifyEvent->Data); + + // perform a direct callback to the miniport + if (callBackToMiniport) + callBackToMiniport(NV_GET_NV_STATE(pGpu)); + break; + } + + case NV01_EVENT_KERNEL_CALLBACK_EX: + { + NVOS10_EVENT_KERNEL_CALLBACK_EX *kc = (NVOS10_EVENT_KERNEL_CALLBACK_EX *)NvP64_VALUE(pNotifyEvent->Data); + + if (kc && kc->func) + { + kc->func(kc->arg, pEventData, pNotifyEvent->hEvent, 0, NV_OK); + } + break; + } + + default: + break; + } + } + + return rmStatus; +} + +// Allow CPL Events to be callback or events +NV_STATUS osObjectEventNotification +( + NvHandle hClient, + NvHandle hObject, + NvU32 hClass, + PEVENTNOTIFICATION pNotifyEvent, + NvU32 notifyIndex, + void *pEventData, + NvU32 eventDataSize +) +{ + NV_STATUS rmStatus = NV_OK; + + NV_PRINTF(LEVEL_INFO, "%s()\n", __FUNCTION__); + // walk this object's event list and find any matches for this specific notify + for (; pNotifyEvent; pNotifyEvent = pNotifyEvent->Next) + { + // notifyIndex must match if request isn't for all + if ((notifyIndex != OS_EVENT_NOTIFICATION_INDEX_ALL) && + (pNotifyEvent->NotifyIndex != notifyIndex)) + { + continue; + } + + switch (pNotifyEvent->NotifyType) + { + case NV01_EVENT_OS_EVENT: + { + nv_event_t *event = NvP64_VALUE(pNotifyEvent->Data); + postEvent(event, + pNotifyEvent->hEvent, + pNotifyEvent->NotifyIndex, + 0, 0, + pNotifyEvent->bEventDataRequired); + break; + } + + case NV01_EVENT_KERNEL_CALLBACK_EX: + { + NVOS10_EVENT_KERNEL_CALLBACK_EX *kc = (NVOS10_EVENT_KERNEL_CALLBACK_EX *)NvP64_VALUE(pNotifyEvent->Data); + + if (kc && kc->func) + { + kc->func(kc->arg, pEventData, pNotifyEvent->hEvent, 0, NV_OK); + } + break; + } + + default: + break; + } + } + + return rmStatus; +} + +NV_STATUS osReferenceObjectCount(void *pEvent) +{ + nv_state_t *nv = nv_get_ctl_state(); + nv_event_t *event = pEvent; + + portSyncSpinlockAcquire(nv->event_spinlock); + // If event->active is false, don't allow any more reference + if (!event->active) + { + portSyncSpinlockRelease(nv->event_spinlock); + return NV_ERR_INVALID_EVENT; + } + ++event->refcount; + portSyncSpinlockRelease(nv->event_spinlock); + return NV_OK; +} + +NV_STATUS osDereferenceObjectCount(void *pOSEvent) +{ + nv_state_t *nv = nv_get_ctl_state(); + nv_event_t *event = pOSEvent; + + portSyncSpinlockAcquire(nv->event_spinlock); + NV_ASSERT(event->refcount > 0); + // If event->refcount == 0 but event->active is true, the client + // has not yet freed the OS event. free_os_event will free its + // memory when they do, or else when the client itself is freed. + if (--event->refcount == 0 && !event->active) + portMemFree(event); + portSyncSpinlockRelease(nv->event_spinlock); + + return NV_OK; +} + +NV_STATUS osUserHandleToKernelPtr(NvHandle hClient, NvP64 hEvent, NvP64 *pEvent) +{ + nv_state_t *nv = nv_get_ctl_state(); + NvU32 fd = (NvU64)hEvent; + NV_STATUS result; + + portSyncSpinlockAcquire(nv->event_spinlock); + nv_event_t *e = nv->event_list; + while (e != NULL) + { + if (e->fd == fd && e->hParent == hClient) + break; + e = e->next; + } + + if (e != NULL) + { + ++e->refcount; + *pEvent = NV_PTR_TO_NvP64(e); + result = NV_OK; + } + else + result = NV_ERR_OBJECT_NOT_FOUND; + portSyncSpinlockRelease(nv->event_spinlock); + + return result; +} + +NV_STATUS osFlushCpuCache(void) +{ + return os_flush_cpu_cache_all(); +} + +void osFlushCpuWriteCombineBuffer(void) +{ + os_flush_cpu_write_combine_buffer(); +} + + +// +// Evict GPU memory range from the CPU caches. +// +// On some platforms (e.g. P9+V100), the CPU can coherently cache GPU memory +// and RM takes advantage of that. Most everything is handled transparently, +// but there are two exceptions that require explicitly flushing any CPU cache +// lines of GPU memory. These are: +// +// 1) Flushing memory backing ACR regions before they get locked. +// +// Otherwise the cache could get flushed while the regions are locked causing a +// region violation physical fault. See more details in +// acrFlushRegionsFromGpuCoherentCpuCache_IMPL(). +// +// 2) Flushing all of FB before GPU reset (NVLink going down) +// +// Leaving cache entries on the CPU causes fatal errors when the CPU tries +// flushing them later while the link is down. See more details in +// nvlinkStatePostUnload_IMPL(). +// +void osFlushGpuCoherentCpuCacheRange +( + OS_GPU_INFO *pOsGpuInfo, + NvU64 cpuVirtual, + NvU64 size +) +{ + nv_flush_coherent_cpu_cache_range(pOsGpuInfo, cpuVirtual, size); +} + +void osErrorLogV(OBJGPU *pGpu, XidContext context, const char * pFormat, va_list arglist) +{ + NV_STATUS rmStatus; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + if ((pFormat == NULL) || (*pFormat == '\0')) + { + return; + } + + rmStatus = nv_log_error(nv, context.xid, pFormat, arglist); + NV_ASSERT(rmStatus == NV_OK); +} + +void osErrorLog(OBJGPU *pGpu, NvU32 num, const char* pFormat, ...) +{ + va_list arglist; + va_start(arglist, pFormat); + osErrorLogV(pGpu, (XidContext){.xid = num}, pFormat, arglist); + va_end(arglist); +} + +NvU32 +osPollHotkeyState +( + OBJGPU *pGpu +) +{ + return 0; +} + +void osDevWriteReg008( + OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress, + NvV8 thisValue +) +{ + if (thisAddress >= pMapping->gpuNvLength) + { + NV_ASSERT(thisAddress < pMapping->gpuNvLength); + return; + } + + NV_PRIV_REG_WR08(pMapping->gpuNvAddr, thisAddress, thisValue); +} + +void osDevWriteReg016( + OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress, + NvV16 thisValue +) +{ + if (thisAddress >= pMapping->gpuNvLength) + { + NV_ASSERT(thisAddress < pMapping->gpuNvLength); + return; + } + + NV_PRIV_REG_WR16(pMapping->gpuNvAddr, thisAddress, thisValue); +} + +void osDevWriteReg032( + OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress, + NvV32 thisValue +) +{ + NvBool vgpuHandled = NV_FALSE; + + if (vgpuHandled) + { + return; + } + + if (thisAddress >= pMapping->gpuNvLength) + { + NV_ASSERT(thisAddress < pMapping->gpuNvLength); + return; + } + + NV_PRIV_REG_WR32(pMapping->gpuNvAddr, thisAddress, thisValue); +} + +NvU8 osDevReadReg008( + OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress +) +{ + NvU8 retval = 0; + + if (thisAddress >= pMapping->gpuNvLength) + { + NV_ASSERT(thisAddress < pMapping->gpuNvLength); + } + else + retval = NV_PRIV_REG_RD08(pMapping->gpuNvAddr, thisAddress); + + return retval; +} + +NvU16 osDevReadReg016( + OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress +) +{ + NvU16 retval = 0; + + if (thisAddress >= pMapping->gpuNvLength) + { + NV_ASSERT(thisAddress < pMapping->gpuNvLength); + } + else + retval = NV_PRIV_REG_RD16(pMapping->gpuNvAddr, thisAddress); + + return retval; +} + +NvU32 osDevReadReg032( + OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress +) +{ + NvU32 retval = 0; + NvBool vgpuHandled = NV_FALSE; + + if (vgpuHandled) + { + return retval; + } + + if (thisAddress >= pMapping->gpuNvLength) + { + NV_ASSERT(thisAddress < pMapping->gpuNvLength); + } + else + retval = NV_PRIV_REG_RD32(pMapping->gpuNvAddr, thisAddress); + + return retval; +} + +NV_STATUS osReadRegistryDwordBase( + OBJGPU *pGpu, + const char *regParmStr, + NvU32 *Data +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + return RmReadRegistryDword(nv, regParmStr, Data); +} + +NV_STATUS osWriteRegistryDword( + OBJGPU *pGpu, + const char *regParmStr, + NvU32 Data +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + return RmWriteRegistryDword(nv, regParmStr, Data); +} + +NV_STATUS osReadRegistryBinary( + OBJGPU *pGpu, + const char *regParmStr, + NvU8 *Data, + NvU32 *cbLen +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + return RmReadRegistryBinary(nv, regParmStr, Data, cbLen); +} + +NV_STATUS osWriteRegistryBinary( + OBJGPU *pGpu, + const char *regParmStr, + NvU8 *Data, + NvU32 cbLen +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + return RmWriteRegistryBinary(nv, regParmStr, Data, cbLen); +} + +NV_STATUS osWriteRegistryVolatile( + OBJGPU *pGpu, + const char *regParmStr, + NvU8 *Data, + NvU32 cbLen +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osReadRegistryVolatile +( + OBJGPU *pGpu, + const char *regParmStr, + NvU8 *Data, + NvU32 cbLen +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osReadRegistryVolatileSize +( + OBJGPU *pGpu, + const char *regParmStr, + NvU32 *pSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osReadRegistryStringBase( + OBJGPU *pGpu, + const char *regParmStr, + NvU8 *buffer, + NvU32 *pBufferLength +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + return RmReadRegistryString(nv, regParmStr, buffer, pBufferLength); +} + +NvU32 osGetCpuCount(void) +{ + return os_get_cpu_count(); // Total number of logical CPUs. +} + +NvU32 osGetCurrentProcessorNumber(void) +{ + return os_get_cpu_number(); +} + +void osGetTimeoutParams(OBJGPU *pGpu, NvU32 *pTimeoutUs, NvU32 *pScale, NvU32 *pFlags) +{ + NvU32 gpuMode = gpuGetMode(pGpu); + + NV_ASSERT((NV_GPU_MODE_GRAPHICS_MODE == gpuMode) || + (NV_GPU_MODE_COMPUTE_MODE == gpuMode)); + + if (hypervisorIsVgxHyper()) + { + if (IS_GSP_CLIENT(pGpu) && pGpu->getProperty(pGpu, PDB_PROP_GPU_EXTENDED_GSP_RM_INITIALIZATION_TIMEOUT_FOR_VGX) && + !pGpu->gspRmInitialized) + { + // + // For Hopper, 1.8 seconds is not enough to boot GSP-RM. + // To avoid this issue, 4 seconds timeout is set on initialization, + // and then it's going to be changed 1.8 seconds after GSP initialization. + // + *pTimeoutUs = 4 * 1000000; + } + else + { + // + // 1.8 seconds is chosen because it is 90% of the overall hard limit of 2.0 + // seconds, imposed by WDDM driver rules. + // Currently primary use case of VGX is Windows, so setting 1.8 as default + // + *pTimeoutUs = 1.8 * 1000000; + } + } + else + { + switch (gpuMode) + { + default: + case NV_GPU_MODE_GRAPHICS_MODE: + *pTimeoutUs = 4 * 1000000; + break; + + case NV_GPU_MODE_COMPUTE_MODE: + *pTimeoutUs = 30 * 1000000; + break; + } + + } + + *pFlags = GPU_TIMEOUT_FLAGS_OSTIMER; + + *pScale = 1; + if (IS_EMULATION(pGpu) || IS_SIMULATION(pGpu)) + { + *pScale = 60; // 1s -> 1m + } + + return; +} + +void osFlushLog(void) +{ + // Not implemented +} + +static NvU32 _osGetTegraPlatform(void) +{ + NV_STATUS status; + NvU32 mode; + + status = os_get_tegra_platform(&mode); + if (status != NV_ERR_NOT_SUPPORTED) + { + return mode; + } + + return NV_OS_TEGRA_PLATFORM_SILICON; +} + +NvU32 osGetSimulationMode(void) +{ + NvU32 mode; + + switch (_osGetTegraPlatform()) + { + case NV_OS_TEGRA_PLATFORM_SIM: + mode = NV_SIM_MODE_CMODEL; + break; + case NV_OS_TEGRA_PLATFORM_FPGA: + mode = NV_SIM_MODE_TEGRA_FPGA; + break; + case NV_OS_TEGRA_PLATFORM_SILICON: + default: + mode = NV_SIM_MODE_HARDWARE; + break; + } + + return mode; +} + +NV_STATUS +cliresCtrlCmdOsUnixFlushUserCache_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS *pAddressSpaceParams +) +{ + Memory *pMemory; + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 start, end; + NvBool bInvalidateOnly; + + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + memGetByHandle(RES_GET_CLIENT(pRmCliRes), + pAddressSpaceParams->hObject, + &pMemory)); + + pMemDesc = pMemory->pMemDesc; + + if (memdescGetAddressSpace(pMemDesc) != ADDR_SYSMEM) + { + NV_PRINTF(LEVEL_ERROR, "%s: wrong address space %d\n", + __FUNCTION__, memdescGetAddressSpace(pMemDesc)); + return NV_ERR_INVALID_COMMAND; + } + + if (memdescGetCpuCacheAttrib(pMemDesc) != NV_MEMORY_CACHED) + { + NV_PRINTF(LEVEL_ERROR, "%s: wrong caching type %d\n", + __FUNCTION__, memdescGetCpuCacheAttrib(pMemDesc)); + return NV_ERR_INVALID_COMMAND; + } + + start = pAddressSpaceParams->offset; + end = start + pAddressSpaceParams->length; + + switch(pAddressSpaceParams->cacheOps) + { + case NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_FLUSH_INVALIDATE: + case NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_FLUSH: + bInvalidateOnly = NV_FALSE; + break; + + case NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_INVALIDATE: + bInvalidateOnly = NV_TRUE; + break; + + default: + NV_PRINTF(LEVEL_ERROR, "%s: cacheOps not specified\n", __FUNCTION__); + return NV_ERR_INVALID_COMMAND; + } + + if ((end - start) > pMemory->Length) + { + NV_PRINTF(LEVEL_ERROR, + "%s: end address 0x%llx exceeded buffer length: 0x%llx\n", + __FUNCTION__, end, pMemory->Length); + return NV_ERR_INVALID_LIMIT; + } + + if (bInvalidateOnly) + { + // + // XXX: this seems fishy - I'm not sure if invalidating by the kernel + // VA only as nv_dma_cache_invalidate() does here is sufficient for + // this control call. + // pAddressSpaceParams->internalOnly is expected to be the RM client + // VA for this control call; if we wanted to invalidate the user VA we + // could do so using that. + // + // For I/O coherent platforms this won't actually do anything. + // On non-I/O-coherent platforms, there's no need to do a second + // invalidation after the full flush. + // + nv_state_t *nv = NV_GET_NV_STATE(pMemDesc->pGpu); + if (nv->iovaspace_id != NV_IOVA_DOMAIN_NONE) + { + PIOVAMAPPING pIovaMapping = memdescGetIommuMap(pMemDesc, nv->iovaspace_id); + // + // This should only be called for devices that map memory descriptors + // through the nv-dma library, where the memory descriptor data + // contains all the kernel-specific context we need for the + // invalidation. + // + // (These checks match those in osIovaUnmap() leading up to + // nv_dma_unmap_alloc()). + // + if (pIovaMapping == NULL || + pIovaMapping->pOsData == NULL || + memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED) || + memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_PEER_IO_MEM)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + nv_dma_cache_invalidate(nv->dma_dev, pIovaMapping->pOsData); + } + else + { + return NV_ERR_INVALID_ARGUMENT; + } + } + else + { + return os_flush_user_cache(); + } + + return NV_OK; +} + +static NV_STATUS +_initializeExportObjectFd +( + nv_file_private_t *nvfp, + NvHandle hClient, + NvHandle hDevice, + NvU16 maxObjects, + NvU8 *metadata +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef; + Device *pDevice; + NvU32 deviceInstance = NV_MAX_DEVICES; + NvU32 gpuInstanceId = NV_U32_MAX; + + if (nvfp->handles != NULL) + { + return NV_ERR_STATE_IN_USE; + } + + if (hDevice != 0) + { + + status = serverutilGetResourceRef(hClient, hDevice, &pResourceRef); + if (status != NV_OK) + { + return status; + } + + pDevice = dynamicCast(pResourceRef->pResource, Device); + if (pDevice == NULL) + { + return NV_ERR_INVALID_PARAMETER; + } + + deviceInstance = pDevice->deviceInst; + } + + NV_ASSERT_OK_OR_RETURN(os_alloc_mem((void **)&nvfp->handles, + sizeof(nvfp->handles[0]) * maxObjects)); + + os_mem_set(nvfp->handles, 0, + sizeof(nvfp->handles[0]) * maxObjects); + + nvfp->maxHandles = maxObjects; + nvfp->deviceInstance = deviceInstance; + nvfp->gpuInstanceId = gpuInstanceId; + + if (metadata != NULL) + { + os_mem_copy(nvfp->metadata, metadata, sizeof(nvfp->metadata)); + } + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdOsUnixExportObjectToFd_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + RmObjExportHandle hExportHandle = 0; + nv_file_private_t *nvfp = NULL; + void *priv = NULL; + NV_STATUS status = NV_OK; + + /* + * This flag is intended to be implemented entirely in the rmapi library in + * userspace, we should never encounter it here. + */ + if (FLD_TEST_DRF(0000_CTRL, _OS_UNIX_EXPORT_OBJECT_TO_FD_FLAGS, + _EMPTY_FD, _TRUE, pParams->flags)) + { + return NV_ERR_INVALID_PARAMETER; + } + + if (pParams->object.type != NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM || + pParams->fd == -1) + { + return NV_ERR_INVALID_PARAMETER; + } + + status = RmExportObject(hClient, + pParams->object.data.rmObject.hObject, + &hExportHandle, NULL); + if (status != NV_OK) + { + goto done; + } + NV_ASSERT(hExportHandle != 0); + + nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv); + if (nvfp == NULL) + { + NV_ASSERT(priv == NULL); + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + status = _initializeExportObjectFd(nvfp, hClient, + pParams->object.data.rmObject.hDevice, + 1, NULL); + if (status != NV_OK) + { + goto done; + } + + nvfp->handles[0] = hExportHandle; + +done: + + if (status != NV_OK && hExportHandle != 0) + { + RmFreeObjExportHandle(hExportHandle); + } + + if (priv != NULL) + { + nv_put_file_private(priv); + } + + return status; +} + +// This control call has been deprecated. It will be deleted soon. +NV_STATUS +cliresCtrlCmdOsUnixCreateExportObjectFd_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS *pParams +) +{ + NV_STATUS status; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + nv_file_private_t *nvfp = NULL; + void *priv = NULL; + + ct_assert(sizeof(nvfp->metadata) == sizeof(pParams->metadata)); + + if (pParams->maxObjects == 0) + { + return NV_ERR_INVALID_PARAMETER; + } + + nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv); + if (nvfp == NULL) + { + NV_ASSERT(priv == NULL); + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + status = _initializeExportObjectFd(nvfp, hClient, pParams->hDevice, + pParams->maxObjects, pParams->metadata); + +done: + if (priv != NULL) + { + nv_put_file_private(priv); + } + + return status; +} + +NV_STATUS +cliresCtrlCmdOsUnixExportObjectsToFd_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + RmObjExportHandle *pExportHandle; + nv_file_private_t *nvfp = NULL; + void *priv = NULL; + NV_STATUS status = NV_OK; + NvU32 i; + NvU32 deviceInstance; + NvU32 result; + NvHandle *exportHandles = NULL; + NvBool bFdSetup = NV_FALSE; + + nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv); + if (nvfp == NULL) + { + NV_ASSERT(priv == NULL); + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + ct_assert(sizeof(nvfp->metadata) == sizeof(pParams->metadata)); + + /* Setup export FD if not done */ + if (nvfp->handles == NULL) + { + if (pParams->maxObjects == 0) + { + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + status = _initializeExportObjectFd(nvfp, hClient, pParams->hDevice, + pParams->maxObjects, + pParams->metadata); + if (status != NV_OK) + { + goto done; + } + + bFdSetup = NV_TRUE; + } + + if ((nvfp->handles == NULL) || + (pParams->numObjects > + NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_MAX_OBJECTS)) + { + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + if (!portSafeAddU32(pParams->numObjects, pParams->index, &result) || + (result > nvfp->maxHandles)) + { + status = NV_ERR_OUT_OF_RANGE; + goto done; + } + + status = os_alloc_mem((void **)&exportHandles, + sizeof(exportHandles[0]) * + pParams->numObjects); + if (status != NV_OK) + { + goto done; + } + + for (i = 0; i < pParams->numObjects; i++) + { + exportHandles[i] = 0; + + if (pParams->objects[i] == 0) + { + continue; + } + + status = RmExportObject(hClient, + pParams->objects[i], + &exportHandles[i], + &deviceInstance); + if (status != NV_OK) + { + goto done; + } + + NV_ASSERT(exportHandles[i] != 0); + + if (deviceInstance != nvfp->deviceInstance) + { + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + } + + for (i = 0; i < pParams->numObjects; i++) + { + pExportHandle = &nvfp->handles[i + pParams->index]; + + // If the handle already exists in this position, free it + if (*pExportHandle != 0) + { + RmFreeObjExportHandle(*pExportHandle); + *pExportHandle = 0; + } + + *pExportHandle = exportHandles[i]; + } + +done: + + if ((status != NV_OK) && (exportHandles != NULL)) + { + for (i = 0; i < pParams->numObjects; i++) + { + if (exportHandles[i] != 0) + { + RmFreeObjExportHandle(exportHandles[i]); + } + } + } + + if (exportHandles != NULL) + { + os_free_mem(exportHandles); + } + + if ((status != NV_OK) && bFdSetup) + { + os_free_mem(nvfp->handles); + nvfp->handles = NULL; + nvfp->maxHandles = 0; + } + + if (priv != NULL) + { + nv_put_file_private(priv); + } + + return status; +} + +NV_STATUS +cliresCtrlCmdOsUnixImportObjectFromFd_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + nv_file_private_t *nvfp = NULL; + void *priv = NULL; + NV_STATUS status = NV_OK; + + if (pParams->object.type != NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM || + pParams->fd == -1) + { + return NV_ERR_INVALID_PARAMETER; + } + + nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv); + if (nvfp == NULL) + { + NV_ASSERT(priv == NULL); + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + if ((nvfp->handles == NULL) || (nvfp->handles[0] == 0) || + (nvfp->maxHandles < 1)) + { + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + status = RmImportObject(hClient, + pParams->object.data.rmObject.hParent, + &pParams->object.data.rmObject.hObject, + nvfp->handles[0], NULL); + +done: + if (priv != NULL) + { + nv_put_file_private(priv); + } + + return status; +} + +NV_STATUS +cliresCtrlCmdOsUnixImportObjectsFromFd_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + nv_file_private_t *nvfp = NULL; + void *priv = NULL; + NV_STATUS status = NV_OK; + NvU32 i = 0; + RmObjExportHandle hImportHandle; + NvU32 result; + RM_API *pRmApi; + + nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv); + if (nvfp == NULL) + { + NV_ASSERT(priv == NULL); + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + if ((nvfp->handles == NULL) || + (pParams->numObjects > + NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_TO_FD_MAX_OBJECTS)) + { + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + if (!portSafeAddU32(pParams->numObjects, pParams->index, &result) || + (result > nvfp->maxHandles)) + { + status = NV_ERR_OUT_OF_RANGE; + goto done; + } + + for (i = 0; i < pParams->numObjects; i++) + { + hImportHandle = nvfp->handles[i + pParams->index]; + + /* Nothing to import, just continue */ + if (hImportHandle == 0) + { + pParams->objectTypes[i] = \ + NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_NONE; + continue; + } + + status = RmImportObject(hClient, + pParams->hParent, + &pParams->objects[i], + hImportHandle, + &pParams->objectTypes[i]); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "%s: Unable to import handle (%x, %x, %x)\n", + __FUNCTION__, pParams->hParent, pParams->objects[i], hImportHandle); + goto done; + } + } + +done: + + if (status != NV_OK) + { + pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + while (i > 0) + { + i--; + + if (pParams->objects[i] != 0) + { + pRmApi->Free(pRmApi, hClient, pParams->objects[i]); + } + } + } + + if (priv != NULL) + { + nv_put_file_private(priv); + } + + return status; +} + +NV_STATUS +cliresCtrlCmdOsUnixGetExportObjectInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS *pParams +) +{ + nv_file_private_t *nvfp = NULL; + void *priv = NULL; + NV_STATUS status = NV_OK; + + if (pParams->fd < 0) + { + return NV_ERR_INVALID_PARAMETER; + } + + nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv); + if (nvfp == NULL) + { + NV_ASSERT(priv == NULL); + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + if (nvfp->handles == NULL) + { + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + pParams->maxObjects = nvfp->maxHandles; + pParams->deviceInstance = nvfp->deviceInstance; + pParams->gpuInstanceId = nvfp->gpuInstanceId; + + os_mem_copy(pParams->metadata, nvfp->metadata, sizeof(nvfp->metadata)); + +done: + if (priv != NULL) + { + nv_put_file_private(priv); + } + + return status; +} + +/*! + * osAcpiDsm + * + * @brief Handles os specific _DSM method function calls. + * + * Input parameters: + * @param[in] pGpu : OBJGPU pointer + * @param[in] acpiDsmFunction : ACPI DSM function + * @param[in] acpiDsmSubFunction : ACPI DSM subfunction + * @param[in/out] pInOut : in/out buffer, caller should make sure the buffer is large enough. + * @param[in] pSize : when input, size of data that the caller wants to read, in bytes. + * when output, size of valid data in pInOuta in bytes. + */ +NV_STATUS osCallACPI_DSM +( + OBJGPU *pGpu, + ACPI_DSM_FUNCTION acpiDsmFunction, + NvU32 acpiDsmSubFunction, + NvU32 *pInOut, + NvU16 *pSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osCallACPI_DOD +( + OBJGPU *pGpu, + NvU32 *pOut, + NvU32 *pSize +) +{ + NV_STATUS rmStatus; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + if ((pOut == NULL) || (pSize == NULL)) + { + return NV_ERR_INVALID_POINTER; + } + + rmStatus = nv_acpi_dod_method(nv, pOut, pSize); + + return rmStatus; +} + +// +// osAcpiDdc +// +// Handles os specific _DDC method function calls. _DDC is to get EDID from SBIOS. +// +NV_STATUS osCallACPI_DDC +( + OBJGPU *pGpu, + NvU32 ulAcpiId, + NvU8 *pOutData, + NvU32 *pOutSize, + NvBool bReadMultiBlock +) +{ + NV_STATUS rmStatus; + + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + if ((pOutData == NULL) || (pOutSize == NULL)) + { + return NV_ERR_INVALID_POINTER; + } + + portMemSet(pOutData, 0, *pOutSize); + + rmStatus = nv_acpi_ddc_method(nv, pOutData, pOutSize, bReadMultiBlock); + + return rmStatus; +} + +// osCallACPI_NVHG_ROM +// Making ACPI Call into SBIOS with ROM method to get display device's ROM data. +// +NV_STATUS osCallACPI_NVHG_ROM +( + OBJGPU *pGpu, + NvU32 *pInData, + NvU32 *pOutData +) +{ + NV_STATUS rmStatus; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + if ((pOutData == NULL) || (pInData == NULL)) + { + return NV_ERR_INVALID_POINTER; + } + + if (pInData[1] > ROM_METHOD_MAX_RETURN_BUFFER_SIZE) + { + return NV_ERR_INVALID_ARGUMENT; + } + + rmStatus = nv_acpi_rom_method(nv, pInData, pOutData); + + return rmStatus; +} + +void osInitSystemStaticConfig(SYS_STATIC_CONFIG *pConfig) +{ + pConfig->bIsNotebook = rm_is_system_notebook(); + pConfig->bOsCCEnabled = os_cc_enabled; + pConfig->bOsCCSevSnpEnabled = os_cc_sev_snp_enabled; + pConfig->bOsCCSmeEnabled = os_cc_sme_enabled; + pConfig->bOsCCSnpVtomEnabled = os_cc_snp_vtom_enabled; + pConfig->bOsCCTdxEnabled = os_cc_tdx_enabled; +} + +NvU32 osApiLockAcquireConfigureFlags(NvU32 flags) +{ + return flags; +} + +NV_STATUS osGpuLocksQueueRelease(OBJGPU *pGpu, NvU32 dpcGpuLocksRelease) +{ + return NV_SEMA_RELEASE_FAILED; +} + +void osSyncWithRmDestroy(void) +{ +} + +void osSyncWithGpuDestroy(NvBool bEntry) +{ +} + +void osModifyGpuSwStatePersistence +( + OS_GPU_INFO *pOsGpuInfo, + NvBool bEnable +) +{ + if (bEnable) + { + pOsGpuInfo->flags |= NV_FLAG_PERSISTENT_SW_STATE; + } + else + { + pOsGpuInfo->flags &= ~NV_FLAG_PERSISTENT_SW_STATE; + } +} + +// +//osCallACPI_MXDS +// +//Handles OS specific MXDS function call. +// +NV_STATUS osCallACPI_MXDS +( + OBJGPU *pGpu, + NvU32 acpiId, + NvU32 *pInOut +) +{ + NV_STATUS rmStatus; + + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + if (pInOut == NULL) + { + return NV_ERR_INVALID_POINTER; + } + + rmStatus = nv_acpi_mux_method(nv, pInOut, acpiId, "MXDS"); + + return rmStatus; +} + +// +//osCallACPI_MXDM +// +//Handles OS specific MXDM function call. +// +NV_STATUS osCallACPI_MXDM +( + OBJGPU *pGpu, + NvU32 acpiId, + NvU32 *pInOut +) +{ + NV_STATUS rmStatus; + + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + if (pInOut == NULL) + { + return NV_ERR_INVALID_POINTER; + } + + rmStatus = nv_acpi_mux_method(nv, pInOut, acpiId, "MXDM"); + + return rmStatus; +} + +NV_STATUS osGetVersionDump(void * pVoid) +{ + return NV_OK; +} + +NV_STATUS osTegraiGpuPerfBoost(OBJGPU *pGpu, NvBool enable, NvU32 duration) +{ + return os_tegra_igpu_perf_boost(pGpu->pOsGpuInfo, enable, duration); +} + +NV_STATUS osGetVersion(NvU32 *majorVer, NvU32 *minorVer, NvU32 *buildNum, NvU16 *unusedPatchVersion, NvU16 *unusedProductType) +{ + os_version_info osVersionInfo; + NV_STATUS rmStatus; + + portMemSet(&osVersionInfo, 0, sizeof(osVersionInfo)); + + rmStatus = os_get_version_info(&osVersionInfo); + if (rmStatus == NV_OK) + { + if (majorVer) + *majorVer = osVersionInfo.os_major_version; + if (minorVer) + *minorVer = osVersionInfo.os_minor_version; + if (buildNum) + *buildNum = osVersionInfo.os_build_number; + } + + return rmStatus; +} + +NV_STATUS osGetIsOpenRM(NvBool *bOpenRm) +{ + return os_get_is_openrm(bOpenRm); +} + +NV_STATUS +osGetCarveoutInfo +( + NvU64 *pAddr, + NvU64 *pSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osGetVPRInfo +( + NvU64 *pAddr, + NvU64 *pSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osAllocInVPR +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osGetGenCarveout +( + NvU64 *pAddr, + NvU64 *pSize, + NvU32 id, + NvU64 align +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osI2CClosePorts +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 numPorts +) +{ + nv_i2c_unregister_clients(pOsGpuInfo); + return NV_OK; +} + +static NvU32 +i2cSwPortMapping( + nv_state_t *nv, + NvU32 physicalI2CPort +) +{ + NvU32 linuxI2CSwPort = NV_U32_MAX; + + /** + * For T23X, Linux Tegra I2C controller driver uses logical port(controller) number + * where logical port number of I2C1(Gen1) controller is 0, logical port number for + * I2C2(Gen2) controller is 1 and so on. But RM passes I2C physical port(controller) + * number i.e RM passes "1" for I2C1(Gen1), 2 for I2C2(Gen2), etc. So convert + * physical port number to logical port number(linuxI2CSwPort). + * + * For other chips, the above mentioned logic does not apply and we do not need + * conversion as the physical controller number was updated to 0 and RM passes the + * same. + */ + if ((nv->disp_sw_soc_chip_id == NV_CHIP_ID_T234) + ) + { + linuxI2CSwPort = physicalI2CPort - 1; + } + else + { + linuxI2CSwPort = physicalI2CPort; + } + + return linuxI2CSwPort; +} + +NV_STATUS +osI2CTransfer +( + OBJGPU *pGpu, + NvU32 Port, + NvU8 Address, + nv_i2c_msg_t *nv_i2c_msgs, + NvU32 count +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + if (NV_IS_SOC_DISPLAY_DEVICE(nv)) + { + Port = i2cSwPortMapping(nv, Port); + return nv_i2c_transfer(nv, Port, Address, + nv_i2c_msgs, count); + } + else + { + return NV_ERR_NOT_SUPPORTED; + } +} + +NV_STATUS +osTegraI2CGetBusState +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 port, + NvS32 *scl, + NvS32 *sda +) +{ + if (NV_IS_SOC_DISPLAY_DEVICE(pOsGpuInfo)) + { + port = i2cSwPortMapping(pOsGpuInfo, port); + return nv_i2c_bus_status(pOsGpuInfo, port, scl, sda); + } + else + { + return NV_ERR_NOT_SUPPORTED; + } +} + +NV_STATUS +osReadI2CBufferDirect +( + OBJGPU *pGpu, + NvU32 Port, + NvU8 Address, + void *pOutputBuffer, + NvU32 OutputSize, + void *pInputBuffer, + NvU32 InputSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osWriteI2CBufferDirect +( + OBJGPU *pGpu, + NvU32 Port, + NvU8 Address, + void *pOutputBuffer0, + NvU32 OutputSize0, + void *pOutputBuffer1, + NvU32 OutputSize1 +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osGC6PowerControl +( + OBJGPU *pGpu, + NvU32 cmd, + NvU32 *pOut +) +{ + NV_STATUS status; + NvU32 inOut = cmd; + NvU16 rtnSize = sizeof(inOut); + + if (FLD_TEST_DRF(_JT_FUNC, _POWERCONTROL, _GPU_POWER_CONTROL, _GSS, inOut)) + { + if (!pOut) + { + return NV_ERR_INVALID_ARGUMENT; + } + } + + status = osCallACPI_DSM(pGpu, + ACPI_DSM_FUNCTION_JT, + JT_FUNC_POWERCONTROL, + &inOut, + &rtnSize); + + if ((status != NV_OK) || !pOut) + { + return status; + } + + *pOut = inOut; + + return NV_OK; +} + +static NvBool skipIovaMappingForTegra +( + PIOVAMAPPING pIovaMapping, + nv_state_t *nv +) +{ + // + // TODO: When ISO SMMU is not present, dma mapping of imported ISO memory + // causes crash during __clean_dcache_area_poc. dma mapping of ISO + // memory allocated by RM (via __get_free_pages) still works. + // Skip dma mapping of imported ISO memory to unblock Tegra Display in + // AV+L. Bug 200765629 + // + + NV_ASSERT(nv != NULL); + + if (NV_IS_SOC_DISPLAY_DEVICE(nv) && + !NV_SOC_IS_ISO_IOMMU_PRESENT(nv) && + !memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO) && + memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_EXT_PAGE_ARRAY_MEM)) + { + NV_PRINTF(LEVEL_INFO, "%s: Skip memdescMapIommu mapping\n", __FUNCTION__); + return NV_TRUE; + } + + return NV_FALSE; +} + +/*! + * @brief Map memory into an IOVA space according to the given mapping info. + * + * @param[in] pIovaMapping IOVA mapping info + * + * @return NV_ERR_NOT_SUPPORTED + */ +NV_STATUS +osIovaMap +( + PIOVAMAPPING pIovaMapping +) +{ + OBJGPU *pGpu; + nv_state_t *nv, *peer; + NV_STATUS status; + RmPhysAddr base; + NvBool bIsBar0; + PMEMORY_DESCRIPTOR pRootMemDesc; + NvBool bIsFbOffset = NV_FALSE; + NvBool bIsIndirectPeerMapping = NV_FALSE; + NvBool bIsContig; + NV_ADDRESS_SPACE addressSpace; + NvU32 osPageCount; + + if (pIovaMapping == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pGpu = gpumgrGetGpuFromId(pIovaMapping->iovaspaceId); + if (pGpu == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pRootMemDesc = memdescGetRootMemDesc(pIovaMapping->pPhysMemDesc, NULL); + addressSpace = memdescGetAddressSpace(pIovaMapping->pPhysMemDesc); + if (gpumgrCheckIndirectPeer(pGpu, pRootMemDesc->pGpu) && + (addressSpace == ADDR_FBMEM)) + { + bIsIndirectPeerMapping = NV_TRUE; + } + + if ((addressSpace != ADDR_SYSMEM) && !bIsIndirectPeerMapping) + { + NV_PRINTF(LEVEL_INFO, + "%s passed memory descriptor in an unsupported address space (%s)\n", + __FUNCTION__, + memdescGetApertureString(memdescGetAddressSpace(pIovaMapping->pPhysMemDesc))); + return NV_ERR_NOT_SUPPORTED; + } + + // + // For guest-allocated memory, we don't actually want to do any remapping, + // since the physical address is already the DMA address to be used by the + // GPU. + // + // For carveout memory, we setup identity mapping, so physical + // address is same as the DMA address. + // + // + if (memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_ALLOC_FROM_SCANOUT_CARVEOUT) || + memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED)) + { + return NV_OK; + } + + nv = NV_GET_NV_STATE(pGpu); + + // + // Intercept peer IO type memory. These are contiguous allocations, so no + // need to adjust pages. + // + if (memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_PEER_IO_MEM)) + { + NV_ASSERT(memdescGetContiguity(pIovaMapping->pPhysMemDesc, AT_CPU)); + + status = nv_dma_map_mmio(nv->dma_dev, + NV_RM_PAGES_TO_OS_PAGES(pIovaMapping->pPhysMemDesc->PageCount), + &pIovaMapping->iovaArray[0]); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s: failed to map peer IO mem (status = 0x%x)\n", + __FUNCTION__, status); + } + + return status; + } + + // + // We need to check against the "root" GPU, e.g., the GPU that owns this + // allocation. If we're trying to map one of its BARs for a peer, we need + // to handle it differently because it wouldn't have gone through our system + // memory page allocation paths, obviously, and wouldn't have alloc private + // data associated with it. + // + peer = NV_GET_NV_STATE(pRootMemDesc->pGpu); + bIsContig = memdescGetContiguity(pIovaMapping->pPhysMemDesc, AT_CPU); + if (NV_RM_PAGE_SIZE < os_page_size && !bIsContig) + { + RmDeflateRmToOsPageArray(&pIovaMapping->iovaArray[0], + pIovaMapping->pPhysMemDesc->PageCount); + } + + base = memdescGetPhysAddr(pIovaMapping->pPhysMemDesc, AT_CPU, 0); + bIsBar0 = IS_REG_OFFSET(peer, base, pIovaMapping->pPhysMemDesc->Size); + + bIsFbOffset = IS_FB_OFFSET(peer, base, pIovaMapping->pPhysMemDesc->Size); + + void *pPriv = memdescGetMemData(pIovaMapping->pPhysMemDesc); + osPageCount = NV_RM_PAGES_TO_OS_PAGES(pIovaMapping->pPhysMemDesc->PageCount); + + if (!bIsBar0 && !bIsFbOffset) + { + if (pPriv == NULL) + { + return NV_ERR_INVALID_STATE; + } + } + + if (skipIovaMappingForTegra(pIovaMapping, nv)) + { + return NV_OK; + } + + if (!bIsBar0 && (!bIsFbOffset || bIsIndirectPeerMapping)) + { + status = nv_dma_map_alloc( + osGetDmaDeviceForMemDesc(nv, pIovaMapping->pPhysMemDesc), + osPageCount, + &pIovaMapping->iovaArray[0], + bIsContig, &pPriv); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s: failed to map allocation (status = 0x%x)\n", + __FUNCTION__, status); + return status; + } + + pIovaMapping->pOsData = pPriv; + } + else if (peer != nv) + { + status = nv_dma_map_peer(nv->dma_dev, peer->dma_dev, bIsBar0 ? 0 : 1, + osPageCount, &pIovaMapping->iovaArray[0]); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, + "%s: failed to map peer (base = 0x%llx, status = 0x%x)\n", + __FUNCTION__, base, status); + return status; + } + + // + // pOsData must be NULL to distinguish a peer DMA mapping from a + // system memory mapping in osIovaUnmap(), so make sure to set it + // accordingly here. + // + pIovaMapping->pOsData = NULL; + } + else + { + NV_PRINTF(LEVEL_INFO, "cannot map a GPU's BAR to itself\n"); + return NV_ERR_NOT_SUPPORTED; + } + + // + // If the OS layer doesn't think in RM page size, we need to inflate the + // PTE array into RM pages. + // + if (NV_RM_PAGE_SIZE < os_page_size && !bIsContig) + { + RmInflateOsToRmPageArray(&pIovaMapping->iovaArray[0], + pIovaMapping->pPhysMemDesc->PageCount); + } + + return NV_OK; +} + +/*! + * @brief Unmap memory from an IOVA space according to the given mapping info. + * + * This mapping info must have been previously mapped by osIovaMap(). + * + * @param[in] pIovaMapping IOVA mapping info + * + */ +void +osIovaUnmap +( + PIOVAMAPPING pIovaMapping +) +{ + OBJGPU *pGpu; + nv_state_t *nv; + void *pPriv; + NV_STATUS status; + + if (pIovaMapping == NULL) + { + return; + } + + pGpu = gpumgrGetGpuFromId(pIovaMapping->iovaspaceId); + if (pGpu == NULL) + { + return; + } + + // + // For guest-allocated or carveout memory, we never actually remapped the + // memory, so we shouldn't try to unmap it here. + // + if (memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_ALLOC_FROM_SCANOUT_CARVEOUT) || + memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED)) + { + return; + } + + nv = NV_GET_NV_STATE(pGpu); + + if (skipIovaMappingForTegra(pIovaMapping, nv)) + { + return; + } + + if (memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_PEER_IO_MEM)) + { + nv_dma_unmap_mmio(nv->dma_dev, + NV_RM_PAGES_TO_OS_PAGES(pIovaMapping->pPhysMemDesc->PageCount), + pIovaMapping->iovaArray[0]); + + return; + } + + // + // TODO: Formalize the interface with the OS layers so we can use a common + // definition of OS_IOVA_MAPPING_DATA. + // + pPriv = (void *)pIovaMapping->pOsData; + + if (NV_RM_PAGE_SIZE < os_page_size && + !memdescGetContiguity(pIovaMapping->pPhysMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(&pIovaMapping->iovaArray[0], + pIovaMapping->pPhysMemDesc->PageCount); + } + + if (pPriv != NULL) + { + status = nv_dma_unmap_alloc(nv->dma_dev, + NV_RM_PAGES_TO_OS_PAGES(pIovaMapping->pPhysMemDesc->PageCount), + &pIovaMapping->iovaArray[0], &pPriv); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s: failed to unmap allocation (status = 0x%x)\n", + __FUNCTION__, status); + } + } + else + { + nv_dma_unmap_peer(nv->dma_dev, + NV_RM_PAGES_TO_OS_PAGES(pIovaMapping->pPhysMemDesc->PageCount), + pIovaMapping->iovaArray[0]); + } + + // + // If the OS layer doesn't think in RM page size, we need to fluff out the + // PTE array into RM pages. + // + if (NV_RM_PAGE_SIZE < os_page_size && + !memdescGetContiguity(pIovaMapping->pPhysMemDesc, AT_CPU)) + { + RmInflateOsToRmPageArray(&pIovaMapping->iovaArray[0], + pIovaMapping->pPhysMemDesc->PageCount); + } + + pIovaMapping->pOsData = NULL; +} + +/*! + * @brief Set the GPU Rail Voltage in Tegra SoC. Currently not supported + * + * @param[in] pGpu GPU object pointer + * @param[in] reqVoltageuV Rail Voltage requested in uV + * @param[out] pSetVoltageuV Rail Voltage set in uV + * + * @return NV_ERR_NOT_SUPPORTED + */ +NV_STATUS +osSetGpuRailVoltage +( + OBJGPU *pGpu, + NvU32 reqVoltageuV, + NvU32 *pSetVoltageuV +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Get the GPU Rail Voltage in Tegra SoC. Currently not supported + * + * @param[in] pGpu GPU object pointer + * @param[out] voltageuV Rail Voltage in uV + * + * @return NV_ERR_NOT_SUPPORTED + */ +NV_STATUS +osGetGpuRailVoltage +( + OBJGPU *pGpu, + NvU32 *pVoltageuV +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Bring down system in a controlled manner on known error conditions. + * + * @bugCode[in] Error code / reason. + */ +void osBugCheck(NvU32 bugCode) +{ + if (bugCode > OS_BUG_CHECK_BUGCODE_LAST) + { + bugCode = OS_BUG_CHECK_BUGCODE_UNKNOWN; + } + + os_bug_check(bugCode, ppOsBugCheckBugcodeStr[bugCode]); +} + +/*! + * @brief Perform an action at assertion failure. + */ +void osAssertFailed(void) +{ + os_dump_stack(); +} + +/*! + * @brief Get the GPU Chip Info - Speedo and IDDQ values + * + * + * @param[in] pGpu GPU object pointer + * @param[out] pGpuSpeedoHv Pointer to GPU Speedo value at high voltage corner. + * @param[out] pGpuSpeedoLv Pointer to GPU Speedo value at low voltage corner. + * @param[out] pGpuIddq Pointer to GPU Iddq Value + * @param[out] pChipSkuId SKU ID for the chip + * + * @return NV_ERR_NOT_SUPPORTED + */ +NV_STATUS +osGetChipInfo +( + OBJGPU *pGpu, + NvU32 *pGpuSpeedoHv, + NvU32 *pGpuSpeedoLv, + NvU32 *pGpuIddq, + NvU32 *pChipSkuId +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/* + * @brief Get the GPU Rail Voltage Info (i.e. Min, Max and StepSize) in Tegra SoC. + * + * @param[in] pGpu GPU object pointer + * @param[out] pMinVoltageuV Minimum Voltage supported on the Rail in Micro Volts + * @param[out] pMaxVoltageuV Maximum Voltage supported on the Rail in Micro Volts + * @param[out] pStepVoltageuV Voltage Step-size supported on the Rail in Micro Volts + * + * @return NV_ERR_NOT_SUPPORTED + */ +NV_STATUS +osGetGpuRailVoltageInfo +( + OBJGPU *pGpu, + NvU32 *pMinVoltageuV, + NvU32 *pMaxVoltageuV, + NvU32 *pStepVoltageuV +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Get the current opaque security token. + * + * For Linux the security token is the effective UID of a process and process ID + * + * Note: This function allocates memory for the token. The onus is on the calling + * function to free the memory associated with the token once its done with it. + * + * @return pointer to the security token. + */ +PSECURITY_TOKEN +osGetSecurityToken(void) +{ + NV_STATUS rmStatus; + TOKEN_USER *pTokenUser; + + pTokenUser = portMemAllocNonPaged(sizeof(TOKEN_USER)); + if (pTokenUser == NULL) + { + return NULL; + } + rmStatus = os_get_euid(&pTokenUser->euid); + if (rmStatus != NV_OK) + { + portMemFree(pTokenUser); + return NULL; + } + + pTokenUser->pid = os_get_current_process(); + + return (PSECURITY_TOKEN)pTokenUser; +} + +PUID_TOKEN +osGetCurrentUidToken(void) +{ + NV_STATUS rmStatus; + NvU32 *pUidToken; + + pUidToken = portMemAllocNonPaged(sizeof(NvU32)); + if (pUidToken == NULL) + { + return NULL; + } + + rmStatus = os_get_euid(pUidToken); + if (rmStatus != NV_OK) + { + portMemFree(pUidToken); + return NULL; + } + + return (PUID_TOKEN)pUidToken; +} + +/*! + * @brief Interface function to validate the token for the current client + * + * This function takes two tokens as parameters, validates them and checks + * if either the PID or EUID from client database matches the current PID or EUID. + * + * @param[in] pClientSecurityToken security token cached in the client db + * @param[in] pCurrentSecurityToken security token of the current client + * @return NV_OK if the validation is successful + * NV_ERR_INVALID_CLIENT if the tokens do not match + * NV_ERR_INVALID_POINTER if the tokens are invalid + */ +NV_STATUS +osValidateClientTokens +( + PSECURITY_TOKEN pClientSecurityToken, + PSECURITY_TOKEN pCurrentSecurityToken +) +{ + PTOKEN_USER pClientTokenUser = (PTOKEN_USER)pClientSecurityToken; + PTOKEN_USER pCurrentTokenUser = (PTOKEN_USER)pCurrentSecurityToken; + + if (pClientTokenUser == NULL || pCurrentTokenUser == NULL) + return NV_ERR_INVALID_POINTER; + + if ((pClientTokenUser->euid != pCurrentTokenUser->euid) && + (pClientTokenUser->pid != pCurrentTokenUser->pid)) + { + NV_PRINTF(LEVEL_INFO, + "NVRM: %s: Current security token doesn't match the one in the client database. " + "Current EUID: %d, PID: %d; Client DB EUID: %d, PID: %d\n", + __FUNCTION__, pCurrentTokenUser->euid, pCurrentTokenUser->pid, + pClientTokenUser->euid, pClientTokenUser->pid); + return NV_ERR_INVALID_CLIENT; + } + + return NV_OK; +} + +/*! + * @brief Interface function to compare the tokens for two client + * + * This function takes two tokens as parameters, validates them and checks + * if the EUIDs of each token match. + * + * @param[in] pToken1 Token to compare + * @param[in] pToken2 Token to compare + * @return NV_TRUE if the tokens match + * NV_FALSE if the tokens do not match + */ +NvBool +osUidTokensEqual +( + PUID_TOKEN pUidToken1, + PUID_TOKEN pUidToken2 +) +{ + NvU32 * pTokenUser1 = (NvU32*)pUidToken1; + NvU32 * pTokenUser2 = (NvU32*)pUidToken2; + + NV_ASSERT_OR_RETURN((pTokenUser1 != NULL), NV_FALSE); + NV_ASSERT_OR_RETURN((pTokenUser2 != NULL), NV_FALSE); + + if (*pTokenUser1 != *pTokenUser2) + { + return NV_FALSE; + } + + return NV_TRUE; +} + +NvBool +osRemoveGpuSupported +( + void +) +{ + return os_pci_remove_supported(); +} + +/* + * @brief Get the address ranges assigned to local or peer GPUs on a system that + * supports hardware address translation services (ATS) over NVLink/C2C. + * + * @note + * - All address values are in the System Physical Address (SPA) space + * - Targets can either be "Local" (bIsPeer=False) or for a specified "Peer" + * (bIsPeer=True, peerIndex=#) GPU + * - Target address and mask values have a specified bit width, and represent + * the higher order bits above the target address granularity + * + * @param[in] pGpu GPU object pointer + * @param[out] pAddrSysPhys Pointer to hold SPA + * @param[out] pAddrWidth Address range width value pointer + * @param[out] pMask Mask value pointer + * @param[out] pMaskWidth Mask width value pointer + * @param[in] bIsPeer NV_TRUE if this is a peer, local GPU otherwise + * @param[in] peerIndex Peer index + * + * @return NV_OK or NV_ERR_NOT_SUPPORTED + * + * A return value of NV_ERR_NOT_SUPPORTED for the local GPU would + * indicate that the system does not support ATS over NVLink/C2C + */ +NV_STATUS +osGetAtsTargetAddressRange +( + OBJGPU *pGpu, + NvU64 *pAddrSysPhys, + NvU32 *pAddrWidth, + NvU32 *pMask, + NvU32 *pMaskWidth, + NvBool bIsPeer, + NvU32 peerIndex +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/* + * @brief Get the physical address in CPU address map and NUMA node id + * of the GPU memory. + * + * @note + * - The physical address is System Physical Address (SPA) in baremetal/host + * and Intermediate Physical Address(IPA) or Guest Physical Address(GPA) + * inside a VM. + * + * @param[in] pGpu GPU object pointer + * @param[out] pAddrPhys Pointer to hold the physical address of FB in + * CPU address map + * @param[out] pNodeId NUMA nodeID of respective GPU memory + * + * @return NV_OK or NV_ERR_NOT_SUPPORTED + * + */ +NV_STATUS +osGetFbNumaInfo +( + OBJGPU *pGpu, + NvU64 *pAddrPhys, + NvU64 *pAddrRsvdPhys, + NvS32 *pNodeId +) +{ + return NV_ERR_NOT_SUPPORTED; +} + + +/* + * @brief Verif only function to get the chiplib overrides for link connection + * state for all C2C links. + * + * If chiplib overrides exist, each link can either be enabled (1) or disabled (0) + * + * @param[in] pGpu GPU object pointer + * @param[in] maxLinks Size of pLinkConnection array + * @param[out] pLinkConnection array of pLinkConnection values to be populated by MODS + * + * @return NV_OK or NV_ERR_NOT_SUPPORTED (no overrides available) + */ +NV_STATUS +osGetForcedC2CConnection +( + OBJGPU *pGpu, + NvU32 maxLinks, + NvU32 *pLinkConnection +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS +osGetSmbiosTableInfo +( + const NvU8 *pMappedAddr, + NvU64 *pBaseAddr, + NvU64 *pLength, + NvU64 *pNumSubTypes, + NvU32 *pVersion +) +{ + *pBaseAddr = 0; + *pLength = 0; + *pNumSubTypes = 0; + *pVersion = 0; + + if (portMemCmp(pMappedAddr, "_SM3_", 5) == 0) + { + *pVersion = (pMappedAddr[7] << 8) | pMappedAddr[8]; + portMemCopy(pLength, 4, pMappedAddr + 12, 4); + portMemCopy(pBaseAddr, 8, pMappedAddr + 16, 8); + + *pNumSubTypes = *pLength / 4; + + return NV_OK; + } + + if (portMemCmp(pMappedAddr, "_SM_", 4) == 0) + { + *pVersion = (pMappedAddr[6] << 8) | pMappedAddr[7]; + + pMappedAddr += 16; + + if (portMemCmp(pMappedAddr, "_DMI_", 5) == 0) + { + portMemCopy(pLength, 2, pMappedAddr + 6, 2); + portMemCopy(pBaseAddr, 4, pMappedAddr + 8, 4); + portMemCopy(pNumSubTypes, 2, pMappedAddr + 12, 2); + + if (!*pVersion) + *pVersion = (pMappedAddr[14] & 0xF0) << 4 | + (pMappedAddr[14] & 0x0F); + + return NV_OK; + } + } + + return NV_ERR_INVALID_ADDRESS; +} + + +/* + * @brief Function to export SMBIOS table. Also, maps table in kernel-space. + * + * @param[out] ppBaseVAddr Base virtual address of SMBIOS table. + * @param[out] pLength Size of SMBIOS table. + * @param[out] pNumSubTypes Count of structures (types) embedded in + * the SMBIOS table. + * @param[out] pVersion SMBIOS version + * + * @return NV_OK, NV_ERR_INSUFFICIENT_RESOURCES or NV_ERR_INVALID_ADDRESS + * or errors from OS layer + */ +NV_STATUS +osGetSmbiosTable +( + void **ppBaseVAddr, + NvU64 *pLength, + NvU64 *pNumSubTypes, + NvU32 *pVersion +) +{ + NV_STATUS status = NV_OK; + NvU64 physSmbiosAddr = ~0ull; + void *pMappedAddr = NULL; + NvU64 basePAddr = 0; + + if (!NVCPU_IS_X86_64 && !NVCPU_IS_AARCH64) + { + return NV_ERR_NOT_SUPPORTED; + } + + status = os_get_smbios_header(&physSmbiosAddr); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, "%s: Failed query SMBIOS table with error: %x \n", + __FUNCTION__, status); + return status; + } + + NV_ASSERT(physSmbiosAddr != ~0ull); + + pMappedAddr = osMapKernelSpace(physSmbiosAddr, + os_page_size, + NV_MEMORY_CACHED, + NV_PROTECT_READ_WRITE); + if (!pMappedAddr) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + status = osGetSmbiosTableInfo(pMappedAddr, + &basePAddr, + pLength, + pNumSubTypes, + pVersion); + + osUnmapKernelSpace(pMappedAddr, os_page_size); + + if (status != NV_OK) + { + return status; + } + + *ppBaseVAddr = osMapKernelSpace(basePAddr, + *pLength, + NV_MEMORY_CACHED, + NV_PROTECT_READ_WRITE); + if (!*ppBaseVAddr) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + return NV_OK; +} + +/* + * @brief Function to free SMBIOS table mappings + * + * @param[in] pBaseVAddr Base virtual address of SMBIOS table. + * @param[in] length Size of SMBIOS table. + * + */ +void +osPutSmbiosTable +( + void *pBaseVAddr, + NvU64 length +) +{ + osUnmapKernelSpace(pBaseVAddr, length); +} + +NV_STATUS +osGetAcpiRsdpFromUefi +( + NvU32 *pRsdpAddr +) +{ + return os_get_acpi_rsdp_from_uefi(pRsdpAddr); +} + +/* + * @brief Returns NV_TRUE if NvSwitch device is present in the system. + */ +NvBool +osIsNvswitchPresent +( + void +) +{ + return os_is_nvswitch_present(); +} + +/* + * @brief Function to add crashlog buffer entry. + * + * @param[in] pBuffer virt_addr of nvlog buffer + * @param[in] length size of nvlog buffer + */ +void +osAddRecordForCrashLog +( + void *pBuffer, + NvU32 length +) +{ + os_add_record_for_crashLog(pBuffer, length); +} + +/* + * @brief Function to delete crashlog buffer entry. + * + * @param[in] pBuffer virt_addr of nvlog buffer + */ +void +osDeleteRecordForCrashLog +( + void *pBuffer +) +{ + os_delete_record_for_crashLog(pBuffer); +} + +/* + * @brief Queries the sysfs interface to get memblock size + * @param[out] memblock_size Pointer to the memblock_size + */ +NV_STATUS +osNumaMemblockSize +( + NvU64 *memblock_size +) +{ + return os_numa_memblock_size(memblock_size); +} + +NvBool +osNumaOnliningEnabled +( + OS_GPU_INFO *pOsGpuInfo +) +{ + NvS32 numaNodeId = NV0000_CTRL_NO_NUMA_NODE; + + // + // Note that this numaNodeId value fetched from Linux layer might not be + // accurate since it is possible to overwrite it with regkey on some configs + // + if (nv_get_device_memory_config(pOsGpuInfo, NULL, NULL, NULL, NULL, + &numaNodeId) != NV_OK) + { + return NV_FALSE; + } + + return (numaNodeId != NV0000_CTRL_NO_NUMA_NODE); +} + +/* + * @brief Function to call NUMA allocation entry. + * + * @param[in] nid NUMA node id + * @param[in] size Allocation size + * @param[in] flag Allocation flags + * @param[out] pAddress Ptr to the allocated physical address + */ +NV_STATUS +osAllocPagesNode +( + NvS32 nid, + NvLength size, + NvU32 flag, + NvU64 *pAddress +) +{ + NV_STATUS status = NV_OK; + NvU32 localFlag = NV_ALLOC_PAGES_NODE_NONE; + + if (pAddress == NULL || nid < 0 || size > NV_U32_MAX) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Translate the flags + if (flag & OS_ALLOC_PAGES_NODE_SKIP_RECLAIM) + { + localFlag |= NV_ALLOC_PAGES_NODE_SKIP_RECLAIM; + } + + status = os_alloc_pages_node(nid, (NvU32)size, localFlag, pAddress); + return status; +} + +void +osAllocAcquirePage +( + NvU64 pAddress, + NvU32 pageCount +) +{ + NvU32 i; + + for (i = 0; i < pageCount; i++) + { + os_get_page(pAddress + (i << os_page_shift)); + } +} + +void +osAllocReleasePage +( + NvU64 pAddress, + NvU32 pageCount +) +{ + NvU32 i; + + for (i = 0; i < pageCount; i++) + { + os_put_page(pAddress + (i << os_page_shift)); + } +} + +/* + * @brief Function to return refcount on a page + * @param[in] address The physical address of the page + */ +NvU32 +osGetPageRefcount +( + NvU64 pAddress +) +{ + return os_get_page_refcount(pAddress); +} + +/* + * @brief Function to return the number of tail pages if the address is + * referring to a compound page; For non-compound pages, 1 is returned. + * @param[in] address The physical address of the page + */ +NvU32 +osCountTailPages +( + NvU64 pAddress +) +{ + return os_count_tail_pages(pAddress); +} + +NvU64 +osGetPageSize(void) +{ + return os_page_size; +} + +NvU64 +osGetSupportedSysmemPageSizeMask(void) +{ + // + // We assume that the kernel can support all power-of-two pagesizes + // between os_page_size and os_max_page_size (inclusive). Return a + // bitmask containing all of those. + // + return (((os_max_page_size << 1) - 1) & (~(os_page_size - 1))); +} + +NvU8 +osGetPageShift(void) +{ + return os_page_shift; +} + + + +/* + * @brief Opens a new temporary file for reading and writing + * + * @param[in] ppFile void double pointer + * + * @returns NV_STATUS, NV_OK if success, + NV_ERR_GENERIC, if error + NV_ERR_NOT_SUPPORTED, for unsupported platforms + */ +NV_STATUS +osOpenTemporaryFile +( + void **ppFile +) +{ + return os_open_temporary_file(ppFile); +} + +/* + * @brief Closes the specified temporary file + * + * @param[in] pFile Pointer to file + * + * @returns void + */ +void +osCloseFile +( + void *pFile +) +{ + os_close_file(pFile); +} + +/* + * @brief Writes the buffer to the specified file at the given offset + * + * @param[in] pFile Pointer to file (void) + * @param[in] pBuffer Pointer to buffer from which to copy + * @param[in] size Size of the copy + * @parma[in] offset offset in the file + * + * @returns NV_STATUS, NV_OK if success, + NV_ERR_GENERIC, if error + NV_ERR_NOT_SUPPORTED, for unsupported platforms + */ +NV_STATUS +osWriteToFile +( + void *pFile, + NvU8 *pBuffer, + NvU64 size, + NvU64 offset +) +{ + return os_write_file(pFile, pBuffer, size, offset); +} + +/* + * @brief Reads from the specified file at the given offset + * + * @param[in] pFile Pointer to file (void *) + * @param[in] pBuffer Pointer to buffer to which the data is copied + * @param[in] size Size of the copy + * @parma[in] offset offset in the file + * + * @returns NV_STATUS, NV_OK if success, + NV_ERR_GENERIC, if error + NV_ERR_NOT_SUPPORTED, for unsupported platforms + */ +NV_STATUS +osReadFromFile +( + void *pFile, + NvU8 *pBuffer, + NvU64 size, + NvU64 offset +) +{ + return os_read_file(pFile, pBuffer, size, offset); +} + +/* + * @brief Unregisters caps from the capability framework. + * The function assumes that the caps are allocated and stored in the + * hierarchical order. If they aren't, OS (Linux kernel) would warn and + * leak the caps. + * + * @param[in] pOsRmCaps caps of interest + */ +void +osRmCapUnregister +( + OS_RM_CAPS **ppOsRmCaps +) +{ + OS_RM_CAPS *pOsRmCaps = *ppOsRmCaps; + NvS32 i; + + if (pOsRmCaps == NULL) + { + return; + } + + for (i = pOsRmCaps->count - 1; i >= 0; i--) + { + if (pOsRmCaps->caps[i] != NULL) + { + os_nv_cap_destroy_entry(pOsRmCaps->caps[i]); + } + } + + os_free_mem(pOsRmCaps->caps); + os_free_mem(pOsRmCaps); + + *ppOsRmCaps = NULL; +} + +static NV_STATUS +_allocOsRmCaps +( + OS_RM_CAPS **ppOsRmCaps, + NvU32 count +) +{ + NV_STATUS status; + OS_RM_CAPS *pOsRmCaps; + + *ppOsRmCaps = NULL; + + status = os_alloc_mem((void**)&pOsRmCaps, sizeof(OS_RM_CAPS)); + if (status != NV_OK) + return status; + + pOsRmCaps->count = count; + + status = os_alloc_mem((void**)&pOsRmCaps->caps, sizeof(pOsRmCaps->caps[0]) * count); + if (status != NV_OK) + { + os_free_mem(pOsRmCaps); + return status; + } + + os_mem_set(pOsRmCaps->caps, 0, sizeof(pOsRmCaps->caps[0]) * count); + + *ppOsRmCaps = pOsRmCaps; + return NV_OK; +} + +#define OS_RM_CAP_GPU_DIR 0 +#define OS_RM_CAP_GPU_MIG_DIR 1 +#define OS_RM_CAP_GPU_COUNT 2 + +/* + * @brief Registers OBJGPU with the capability framework. + * + * @param[in] pOsGpuInfo OS specific GPU information pointer + * @param[out] ppOsRmCaps GPU OS specific capabilities pointer + */ +NV_STATUS +osRmCapRegisterGpu +( + OS_GPU_INFO *pOsGpuInfo, + OS_RM_CAPS **ppOsRmCaps +) +{ + NvU32 minor = nv_get_dev_minor(pOsGpuInfo); + char name[16]; + NV_STATUS status; + OS_RM_CAPS *pOsRmCaps; + nv_cap_t *parent; + nv_cap_t *cap; + + // Return success on the unsupported platforms. + if (nvidia_caps_root == NULL) + { + return NV_OK; + } + + if (*ppOsRmCaps != NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + status = _allocOsRmCaps(&pOsRmCaps, OS_RM_CAP_GPU_COUNT); + if (status != NV_OK) + return status; + + *ppOsRmCaps = pOsRmCaps; + + os_snprintf(name, sizeof(name), "gpu%u", minor); + name[sizeof(name) - 1] = '\0'; + parent = nvidia_caps_root; + + cap = os_nv_cap_create_dir_entry(parent, name, (OS_RUGO | OS_XUGO)); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to setup gpu%u directory\n", minor); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + + pOsRmCaps->caps[OS_RM_CAP_GPU_DIR] = cap; + parent = cap; + + // TODO: Bug 2679591: Add MIG directory only if SMC is enabled. + // For now, always add "mig" directory. + cap = os_nv_cap_create_dir_entry(parent, "mig", (OS_RUGO | OS_XUGO)); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to setup mig directory\n"); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + + pOsRmCaps->caps[OS_RM_CAP_GPU_MIG_DIR] = cap; + + return NV_OK; + +failed: + osRmCapUnregister(ppOsRmCaps); + + return status; +} + +#define OS_RM_CAP_SMC_PART_DIR 0 +#define OS_RM_CAP_SMC_PART_ACCESS_FILE 1 +#define OS_RM_CAP_SMC_PART_COUNT 2 + +/* + * @brief Registers SMC partition (a.k.a. GPU instance) with the capability + * framework + * + * @param[in] pGpuOsRmCaps GPU OS specific capabilities pointer + * @param[out] ppPartitionOsRmCaps OS specific capabilities pointer for SMC partition + * @param[in] swizzId SMC partition swizz ID + */ +NV_STATUS +osRmCapRegisterSmcPartition +( + OS_RM_CAPS *pGpuOsRmCaps, + OS_RM_CAPS **ppPartitionOsRmCaps, + NvU32 swizzId +) +{ + char name[16]; + NV_STATUS status; + nv_cap_t *parent; + nv_cap_t *cap; + OS_RM_CAPS *pOsRmCaps; + + // Return success as there is nothing to do. + if (pGpuOsRmCaps == NULL) + { + return NV_OK; + } + + if (*ppPartitionOsRmCaps != NULL || swizzId >= NV_U32_MAX) + { + return NV_ERR_INVALID_ARGUMENT; + } + + parent = pGpuOsRmCaps->caps[OS_RM_CAP_GPU_MIG_DIR]; + if (parent == NULL) + { + return NV_ERR_INVALID_STATE; + } + + status = _allocOsRmCaps(&pOsRmCaps, OS_RM_CAP_SMC_PART_COUNT); + if (status != NV_OK) + return status; + + *ppPartitionOsRmCaps = pOsRmCaps; + + os_snprintf(name, sizeof(name), "gi%u", swizzId); + name[sizeof(name) - 1] = '\0'; + + cap = os_nv_cap_create_dir_entry(parent, name, OS_RUGO | OS_XUGO); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to setup gi%u directory\n", + swizzId); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + + pOsRmCaps->caps[OS_RM_CAP_SMC_PART_DIR] = cap; + parent = cap; + + cap = os_nv_cap_create_file_entry(parent, "access", OS_RUGO); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to setup access file for ID:%u\n", + swizzId); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + + pOsRmCaps->caps[OS_RM_CAP_SMC_PART_ACCESS_FILE] = cap; + + return NV_OK; + +failed: + osRmCapUnregister(ppPartitionOsRmCaps); + + return status; +} + +#define OS_RM_CAP_SMC_EXEC_PART_DIR 0 +#define OS_RM_CAP_SMC_EXEC_PART_ACCESS_FILE 1 +#define OS_RM_CAP_SMC_EXEC_PART_COUNT 2 + +/* + * @brief Registers SMC execution partition (a.k.a. compute instance) with the + * capability framework + * + * @param[in] pPartitionOsRmCaps OS specific capabilities pointer for SMC partition + * @param[out] ppExecPartitionOsRmCaps OS specific capabilities pointer for SMC execution partition + * @param[in] execPartitionId SMC execution partition ID + */ +NV_STATUS +osRmCapRegisterSmcExecutionPartition +( + OS_RM_CAPS *pPartitionOsRmCaps, + OS_RM_CAPS **ppExecPartitionOsRmCaps, + NvU32 execPartitionId +) +{ + char name[16]; + NV_STATUS status; + nv_cap_t *parent; + nv_cap_t *cap; + OS_RM_CAPS *pOsRmCaps; + + // Return success as there is nothing to do. + if (pPartitionOsRmCaps == NULL) + { + return NV_OK; + } + + if ((*ppExecPartitionOsRmCaps != NULL) || (execPartitionId >= NV_U32_MAX)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + parent = pPartitionOsRmCaps->caps[OS_RM_CAP_SMC_PART_DIR]; + if (parent == NULL) + { + return NV_ERR_INVALID_STATE; + } + + status = _allocOsRmCaps(&pOsRmCaps, OS_RM_CAP_SMC_EXEC_PART_COUNT); + if (status != NV_OK) + { + return status; + } + + *ppExecPartitionOsRmCaps = pOsRmCaps; + + os_snprintf(name, sizeof(name), "ci%u", execPartitionId); + name[sizeof(name) - 1] = '\0'; + + cap = os_nv_cap_create_dir_entry(parent, name, OS_RUGO | OS_XUGO); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to setup ci%u directory\n", + execPartitionId); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + + pOsRmCaps->caps[OS_RM_CAP_SMC_EXEC_PART_DIR] = cap; + parent = cap; + + cap = os_nv_cap_create_file_entry(parent, "access", OS_RUGO); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to setup access file for ID:%u\n", + execPartitionId); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + + pOsRmCaps->caps[OS_RM_CAP_SMC_EXEC_PART_ACCESS_FILE] = cap; + + return NV_OK; + +failed: + osRmCapUnregister(ppExecPartitionOsRmCaps); + + return status; +} + +/* + * @brief Release the acquired capability + * + * @param[in] dupedCapDescriptor descriptor to be released + */ +void +osRmCapRelease +( + NvU64 dupedCapDescriptor +) +{ + if (dupedCapDescriptor == NV_U64_MAX) + { + return; + } + + os_nv_cap_close_fd((int)dupedCapDescriptor); +} + +#define OS_RM_CAP_SYS_MIG_DIR 0 +#define OS_RM_CAP_SYS_SMC_CONFIG_FILE 1 +#define OS_RM_CAP_SYS_SMC_MONITOR_FILE 2 +#define OS_RM_CAP_SYS_COUNT 3 + +NV_STATUS +osRmCapRegisterSys +( + OS_RM_CAPS **ppOsRmCaps +) +{ + nv_cap_t **ppCaps; + nv_cap_t *parent; + nv_cap_t *cap; + NV_STATUS status; + OS_RM_CAPS *pOsRmCaps; + + if (nvidia_caps_root == NULL) + return NV_ERR_NOT_SUPPORTED; + + status = _allocOsRmCaps(&pOsRmCaps, OS_RM_CAP_SYS_COUNT); + if (status != NV_OK) + return status; + + *ppOsRmCaps = pOsRmCaps; + + ppCaps = pOsRmCaps->caps; + + parent = os_nv_cap_create_dir_entry(nvidia_caps_root, "mig", OS_RUGO | OS_XUGO); + if (parent == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to create mig directory\n"); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + ppCaps[OS_RM_CAP_SYS_MIG_DIR] = parent; + + cap = os_nv_cap_create_file_entry(parent, "config", OS_RUSR); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to create mig config file\n"); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + ppCaps[OS_RM_CAP_SYS_SMC_CONFIG_FILE] = cap; + + cap = os_nv_cap_create_file_entry(parent, "monitor", OS_RUGO); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to create mig monitor file\n"); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + ppCaps[OS_RM_CAP_SYS_SMC_MONITOR_FILE] = cap; + + return NV_OK; + +failed: + osRmCapUnregister(ppOsRmCaps); + return status; +} + +/* + * @brief Acquire the requested capability + * + * @param[in] pOsRmCaps opaque pointer to the caps. + * @param[in] rmCap the capability to be acquired. + * @param[in] capDescriptor descriptor to be used for validation + * @param[out] dupedCapDescriptor returns duplicated descriptor if validation + * is successful + * + * Note: On Linux, duplicating fd is helpful to let administrators know about + * the capability users. See https://linux.die.net/man/8/lsof usage. + */ +NV_STATUS +osRmCapAcquire +( + OS_RM_CAPS *pOsRmCaps, + NvU32 rmCap, + NvU64 capDescriptor, + NvU64 *dupedCapDescriptor +) +{ + nv_cap_t *cap; + int fd = (int)capDescriptor; + int duped_fd; + NvU32 index; + NV_STATUS status; + + *dupedCapDescriptor = NV_U64_MAX; + + switch (rmCap) + { + case NV_RM_CAP_SMC_PARTITION_ACCESS: + { + index = OS_RM_CAP_SMC_PART_ACCESS_FILE; + break; + } + case NV_RM_CAP_EXT_FABRIC_MGMT: + { + status = nv_acquire_fabric_mgmt_cap(fd, &duped_fd); + if (status != NV_OK) + { + return status; + } + + goto done; + } + case NV_RM_CAP_SMC_EXEC_PARTITION_ACCESS: + { + index = OS_RM_CAP_SMC_EXEC_PART_ACCESS_FILE; + break; + } + case NV_RM_CAP_SYS_SMC_CONFIG: + { + index = OS_RM_CAP_SYS_SMC_CONFIG_FILE; + break; + } + case NV_RM_CAP_SYS_SMC_MONITOR: + { + index = OS_RM_CAP_SYS_SMC_MONITOR_FILE; + break; + } + default: + { + return NV_ERR_INVALID_ARGUMENT; + } + } + + if (pOsRmCaps == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + if (index >= pOsRmCaps->count) + { + return NV_ERR_INVALID_ARGUMENT; + } + + cap = pOsRmCaps->caps[index]; + + duped_fd = os_nv_cap_validate_and_dup_fd(cap, fd); + if (duped_fd < 0) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + +done: + *dupedCapDescriptor = duped_fd; + + return NV_OK; +} + +/* + * @brief Initializes capability descriptor + * + * @param[out] pCapDescriptor descriptor to be used + * + */ +void +osRmCapInitDescriptor +( + NvU64 *pCapDescriptor +) +{ + *pCapDescriptor = NV_U64_MAX; +} + +/* + * @brief Checks if IMEX channel support is present. + */ +NvBool +osImexChannelIsSupported(void) +{ + return os_imex_channel_is_supported; +} + +/* + * @brief Returns IMEX channel count. + */ +NvS32 +osImexChannelCount +( + void +) +{ + return os_imex_channel_count(); +} + +/* + * @brief Returns IMEX channel number. + * + * @param[in] descriptor OS specific descriptor to query channel number. + * + */ +NvS32 +osImexChannelGet(NvU64 descriptor) +{ + return os_imex_channel_get(descriptor); +} + +/* + * @brief Generates random bytes which can be used as a universally unique + * identifier. + * + * This function may sleep (interruptible). + * + * @param[out] pBytes Array of random bytes + * @param[in] numBytes Size of the array + */ +NV_STATUS +osGetRandomBytes +( + NvU8 *pBytes, + NvU16 numBytes +) +{ + return os_get_random_bytes(pBytes, numBytes); +} + +/* + * @brief Allocate wait queue + * + * @param[out] ppWq Wait queue + */ +NV_STATUS +osAllocWaitQueue +( + OS_WAIT_QUEUE **ppWq +) +{ + return os_alloc_wait_queue(ppWq); +} + +/* + * @brief Free wait queue + * + * @param[in] pWq Wait queue + */ +void +osFreeWaitQueue +( + OS_WAIT_QUEUE *pWq +) +{ + os_free_wait_queue(pWq); +} + +/* + * @brief Put thread to uninterruptible sleep + * + * @param[in] pWq Wait queue + */ +void +osWaitUninterruptible +( + OS_WAIT_QUEUE *pWq +) +{ + os_wait_uninterruptible(pWq); +} + +/* + * @brief Put thread to interruptible sleep + * + * @param[in] pWq Wait queue + */ +void +osWaitInterruptible +( + OS_WAIT_QUEUE *pWq +) +{ + os_wait_interruptible(pWq); +} + +/* + * @brief Wake up thread from uninterruptible sleep + * + * @param[in] pWq Wait queue + */ +void +osWakeUp +( + OS_WAIT_QUEUE *pWq +) +{ + os_wake_up(pWq); +} + +NV_STATUS +osReadPFPciConfigInVF +( + NvU32 addr, + NvU32 *data +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Callback function to notify RM when unix layer receives an event + * + * This function is basically a wrapper to call the Core RM layer and is + * being called from DCE KMD when an event is received from DCE RM. + * + * @param[in] handle handle allocated for corresponding IPC type with DCE + * @param[in] interfaceType RM IPC interface type + * @param[in] length length of the message passed from DCE + * @param[in] data any specific data if present + * @param[in] usrCtx any specific user context if present + * + * @returns void + */ +static void +osTegraDceClientIpcCallback +( + NvU32 handle, + NvU32 interfaceType, + NvU32 length, + void *data, + void *usrCtx +) +{ + THREAD_STATE_NODE threadState; + NvU32 rmInterfaceType = nv_tegra_get_rm_interface_type(interfaceType); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + if (rmLocksAcquireAll(RM_LOCK_MODULES_KERNEL_RM_EVENTS) == NV_OK) + { + dceclientHandleAsyncRpcCallback(handle, rmInterfaceType, length, data, usrCtx); + + rmLocksReleaseAll(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); +} + +/*! + * @brief Performs IPC Client registration with DCE + * + * This function is basically a wrapper to call the unix/linux layer. + * + * @param[in] interfaceType RM IPC interface type + * @param[in] usrCtx any specific user context if present + * @param[out] clientId unique ID registered with DCE for IPC + * + * @returns NV_OK if successful, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, or + * other errors as may be returned by subfunctions. + */ +NV_STATUS +osTegraDceRegisterIpcClient +( + NvU32 interfaceType, + void *usrCtx, + NvU32 *clientId +) +{ + if (interfaceType == DCE_CLIENT_RM_IPC_TYPE_SYNC) + return nv_tegra_dce_register_ipc_client(interfaceType, usrCtx, NULL, clientId); + else if (interfaceType == DCE_CLIENT_RM_IPC_TYPE_EVENT) + return nv_tegra_dce_register_ipc_client(interfaceType, usrCtx, osTegraDceClientIpcCallback, clientId); + else + return NV_ERR_INVALID_ARGUMENT; +} + +/*! + * @brief Performs IPC Client destroy with DCE + * + * This function is basically a wrapper to call the unix/linux layer. + * + * @param[in] clientId unique ID registered with DCE for IPC + * + * @returns NV_OK if successful, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, or + * other errors as may be returned by subfunctions. + */ +NV_STATUS +osTegraDceUnregisterIpcClient +( + NvU32 clientId +) +{ + return nv_tegra_dce_unregister_ipc_client(clientId); +} + +/*! + * @brief Performs IPC Send/Receive to/from DCE + * + * This function is basically a wrapper to call the unix/linux layer. + * + * @param[in] clientId unique ID registered with DCE KMD for corresponding IPC type + * @param[in] msg structure to hold dce ipc message info + * @param[in] msgLength length of the message + * + * @returns NV_OK if successful, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, or + * other errors as may be returned by subfunctions. + */ +NV_STATUS +osTegraDceClientIpcSendRecv +( + NvU32 clientId, + void *msg, + NvU32 msgLength +) +{ + return nv_tegra_dce_client_ipc_send_recv(clientId, msg, msgLength); +} + +/*! + * @brief Sends an MRQ (message-request) to BPMP + * + * The request, response, and ret parameters of this function correspond to the + * components of the tegra_bpmp_message struct, which BPMP uses to receive + * MRQs. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] mrq MRQ_xxx ID specifying what is requested + * @param[in] pRequestData Pointer to request input data + * @param[in] requestDataSize Size of structure pointed to by pRequestData + * @param[out] pResponseData Pointer to response output data + * @param[in] responseDataSize Size of structure pointed to by pResponseData + * @param[out] ret MRQ return code (from "ret" element of + * tegra_bpmp_message struct) + * @param[out] apiRet Return code from tegra_bpmp_transfer call + * + * @returns NV_OK if successful, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, + * NV_ERR_INVALID_POINTER if the tegra_bpmp struct pointer could not + * be obtained from nv, or + * NV_ERR_GENERIC if the tegra_bpmp_transfer call failed (see apiRet + * for Linux error code). + */ +NV_STATUS +osTegraSocBpmpSendMrq +( + OBJGPU *pGpu, + NvU32 mrq, + const void *pRequestData, + NvU32 requestDataSize, + void *pResponseData, + NvU32 responseDataSize, + NvS32 *pRet, + NvS32 *pApiRet +) +{ + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_SOC_SDM)) + { + return NV_ERR_NOT_SUPPORTED; + } + else if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + return nv_bpmp_send_mrq(pGpu->pOsGpuInfo, + mrq, + pRequestData, + requestDataSize, + pResponseData, + responseDataSize, + pRet, + pApiRet); + } + else + { + return NV_ERR_NOT_SUPPORTED; + } +} + +/*! + * @brief Returns IMP-relevant data collected from other modules + * + * This function is basically a wrapper to call the unix/linux layer. + * + * @param[out] pTegraImpImportData Structure to receive the data + * + * @returns NV_OK if successful, + * NV_ERR_BUFFER_TOO_SMALL if the array in TEGRA_IMP_IMPORT_DATA is + * too small, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, or + * other errors as may be returned by subfunctions. + */ +NV_STATUS +osTegraSocGetImpImportData +( + OBJGPU *pGpu, + TEGRA_IMP_IMPORT_DATA *pTegraImpImportData +) +{ + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + return nv_imp_get_import_data(pTegraImpImportData); + } + else + { + return NV_ERR_NOT_SUPPORTED; + } +} + +/*! + * @brief Tells BPMP whether or not RFL is valid + * + * Display HW generates an ok_to_switch signal which asserts when mempool + * occupancy is high enough to be able to turn off memory long enough to + * execute a dramclk frequency switch without underflowing display output. + * ok_to_switch drives the RFL ("request for latency") signal in the memory + * unit, and the switch sequencer waits for this signal to go active before + * starting a dramclk switch. However, if the signal is not valid (e.g., if + * display HW or SW has not been initialized yet), the switch sequencer ignores + * the signal. This API tells BPMP whether or not the signal is valid. + * + * @param[in] pOsGpuInfo Per GPU Linux state + * @param[in] bEnable True if RFL will be valid; false if invalid + * + * @returns NV_OK if successful, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, or + * NV_ERR_GENERIC if some other kind of error occurred. + */ +NV_STATUS +osTegraSocEnableDisableRfl +( + OS_GPU_INFO *pOsGpuInfo, + NvBool bEnable +) +{ + if (NV_IS_SOC_DISPLAY_DEVICE(pOsGpuInfo)) + { + return nv_imp_enable_disable_rfl(pOsGpuInfo, bEnable); + } + else + { + return NV_ERR_NOT_SUPPORTED; + } +} + +/*! + * @brief Allocates a specified amount of ISO memory bandwidth for display + * + * floorBandwidthKBPS is the minimum required (i.e., floor) dramclk frequency + * multiplied by the width of the pipe over which the display data will travel. + * (It is understood that the bandwidth calculated by multiplying the clock + * frequency by the pipe width will not be realistically achievable, due to + * overhead in the memory subsystem. The infrastructure will not actually use + * the bandwidth value, except to reverse the calculation to get the required + * dramclk frequency.) + * + * This function is basically a wrapper to call the unix/linux layer. + * + * @param[in] pOsGpuInfo OS specific GPU information pointer + * @param[in] averageBandwidthKBPS Amount of ISO memory bandwidth requested + * @param[in] floorBandwidhtKBPS Min required dramclk freq * pipe width + * + * @returns NV_OK if successful, + * NV_ERR_INSUFFICIENT_RESOURCES if one of the bandwidth values is too + * high, and bandwidth cannot be allocated, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, or + * NV_ERR_GENERIC if some other kind of error occurred. + */ +NV_STATUS +osTegraAllocateDisplayBandwidth +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 averageBandwidthKBPS, + NvU32 floorBandwidthKBPS +) +{ + if (NV_IS_SOC_DISPLAY_DEVICE(pOsGpuInfo)) + { + return nv_imp_icc_set_bw(pOsGpuInfo, + averageBandwidthKBPS, + floorBandwidthKBPS); + } + else + { + return NV_ERR_NOT_SUPPORTED; + } +} + +/*! + * @brief Creates or sets up platform specific nano second resolution timer + * + * @param[in] pOsGpuInfo OS specific GPU information pointer + * @param[in] pTmrEvent Pointer to timer event information + * @param[in/out] pTimer pointer to hold high resolution timer object + */ +NV_STATUS +osCreateNanoTimer +( + OS_GPU_INFO *pOsGpuInfo, + void *pTmrEvent, + void **pTimer +) +{ + nv_create_nano_timer(pOsGpuInfo, pTmrEvent, (nv_nano_timer_t **)pTimer); + return NV_OK; +} + +/*! + * @brief Starts platform specific nano second resolution timer + * + * @param[in] pOsGpuInfo OS specific GPU information pointer + * @param[in] pTimer pointer to high resolution timer object + * @param[in] timeNs Relative time in nano seconds + */ +NV_STATUS +osStartNanoTimer +( + OS_GPU_INFO *pOsGpuInfo, + void *pTimer, + NvU64 timeNs +) +{ + nv_start_nano_timer(pOsGpuInfo, (nv_nano_timer_t *)pTimer, timeNs); + return NV_OK; +} + +/*! + * @brief Cancels platform specific nano second resolution timer + * + * @param[in] pOsGpuInfo OS specific GPU information pointer + * @param[in] pTimer pointer to timer object + */ +NV_STATUS +osCancelNanoTimer +( + OS_GPU_INFO *pOsGpuInfo, + void *pTimer +) +{ + nv_cancel_nano_timer(pOsGpuInfo, (nv_nano_timer_t *)pTimer); + return NV_OK; +} + +/*! + * @brief Destroys & cancels platform specific nano second resolution timer + * + * + * @param[in] pGpu Device of interest + * @param[in] pTimer pointer to timer object + */ +NV_STATUS +osDestroyNanoTimer +( + OS_GPU_INFO *pOsGpuInfo, + void *pTimer +) +{ + nv_destroy_nano_timer(pOsGpuInfo, (nv_nano_timer_t *)pTimer); + return NV_OK; +} + +/*! + * @brief Get number of dpAux instances. + * It is wrapper function to call unix/linux layer. + * + * @param[in] pGpu GPU object pointer + * @param[out] pNumIntances Number of dpAux instances. + * + * @returns NV_STATUS, NV_OK if success, + * NV_ERR_GENERIC, if error + * NV_ERR_NOT_SUPPORTED, for unsupported platforms + */ + +NV_STATUS +osGetTegraNumDpAuxInstances +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 *pNumIntances +) +{ + if (NV_IS_SOC_DISPLAY_DEVICE(pOsGpuInfo)) + { + return nv_get_num_dpaux_instances(pOsGpuInfo, pNumIntances); + } + else + { + return NV_ERR_NOT_SUPPORTED; + } +} + +/* + * @brief Return the priv Data of current IRQ. + * It is wrapper function to call unix/linux layer. + * + * @param[in] pGpu Device of interest + * @param[out] pPrivData privData of current IRQ + * + * @returns NV_STATUS, NV_OK if success, + * NV_ERR_GENERIC, if error + * NV_ERR_NOT_SUPPORTED, for unsupported platforms + */ +NV_STATUS +osGetCurrentIrqPrivData +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 *pPrivData +) +{ + if (NV_IS_SOC_DISPLAY_DEVICE(pOsGpuInfo)) + { + return nv_get_current_irq_priv_data(pOsGpuInfo, pPrivData); + } + else + { + return NV_ERR_NOT_SUPPORTED; + } +} + +/*! + * @brief Get the brightness level + * It is wrapper function to call unix/linux layer. + * + * @param[in] pGpu GPU object pointer + * @param[out] brightness Pointer to brightness level + * + * @returns NV_STATUS, NV_OK if success, + * NV_ERR_GENERIC, if error + * NV_ERR_NOT_SUPPORTED, for unsupported platforms + */ +NV_STATUS +osGetTegraBrightnessLevel +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 *brightness +) +{ + if (NV_IS_SOC_DISPLAY_DEVICE(pOsGpuInfo)) + { + return nv_get_tegra_brightness_level(pOsGpuInfo, brightness); + } + else + { + return NV_ERR_NOT_SUPPORTED; + } +} + +/*! + * @brief Set the brightness level + * It is wrapper function to call unix/linux layer. + * + * @param[in] pGpu GPU object pointer + * @param[out] brightness brightness level + * + * @returns NV_STATUS, NV_OK if success, + * NV_ERR_GENERIC, if error + * NV_ERR_NOT_SUPPORTED, for unsupported platforms + */ +NV_STATUS +osSetTegraBrightnessLevel +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 brightness +) +{ + if (NV_IS_SOC_DISPLAY_DEVICE(pOsGpuInfo)) + { + return nv_set_tegra_brightness_level(pOsGpuInfo, brightness); + } + else + { + return NV_ERR_NOT_SUPPORTED; + } +} + +/* @brief Gets syncpoint aperture information + * + * @param[in] OS_GPU_INFO OS specific GPU information pointer + * @param[in] syncpointId + * @param[out] *physAddr + * @param[out] *limit + * @param[out] *offset + */ +NV_STATUS +osGetSyncpointAperture +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 syncpointId, + NvU64 *physAddr, + NvU64 *limit, + NvU32 *offset +) +{ + return nv_get_syncpoint_aperture(syncpointId, physAddr, limit, offset); +} + +/*! + * @brief Enable PCIe AtomicOp Requester Enable and return + * the completer side capabilities that the requester can send. + * + * @param[in] pOsGpuInfo OS_GPU_INFO OS specific GPU information pointer + * @param[out] pMask mask of supported atomic size, including one or more of: + * OS_PCIE_CAP_MASK_REQ_ATOMICS_32 + * OS_PCIE_CAP_MASK_REQ_ATOMICS_64 + * OS_PCIE_CAP_MASK_REQ_ATOMICS_128 + * + * @returns NV_STATUS, NV_OK if success + * NV_ERR_NOT_SUPPORTED if platform doesn't support this + * feature. + * NV_ERR_GENERIC for any other error + */ + +NV_STATUS +osConfigurePcieReqAtomics +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 *pMask +) +{ + if (pMask) + { + *pMask = 0U; + if (pOsGpuInfo) + { + if (os_enable_pci_req_atomics(pOsGpuInfo->handle, + OS_INTF_PCIE_REQ_ATOMICS_32BIT) == NV_OK) + *pMask |= OS_PCIE_CAP_MASK_REQ_ATOMICS_32; + if (os_enable_pci_req_atomics(pOsGpuInfo->handle, + OS_INTF_PCIE_REQ_ATOMICS_64BIT) == NV_OK) + *pMask |= OS_PCIE_CAP_MASK_REQ_ATOMICS_64; + if (os_enable_pci_req_atomics(pOsGpuInfo->handle, + OS_INTF_PCIE_REQ_ATOMICS_128BIT) == NV_OK) + *pMask |= OS_PCIE_CAP_MASK_REQ_ATOMICS_128; + + if (*pMask != 0) + return NV_OK; + } + } + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Check GPU is accessible or not + * + * @param[in] pGpu GPU object pointer + * + * @returns NVBool, Returns TRUE if the GPU is accessible, + * FALSE, if error + */ +NvBool +osIsGpuAccessible +( + OBJGPU *pGpu +) +{ + return nv_is_gpu_accessible(NV_GET_NV_STATE(pGpu)); +} + +/*! + * @brief Check whether GPU has received a shutdown notification from the OS + */ +NvBool +osIsGpuShutdown +( + OBJGPU *pGpu +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + return nv ? nv->is_shutdown : NV_TRUE; +} + +/*! + * @brief Check GPU OS info matches + * + * @param[in] pGpu GPU object pointer + * + * @returns NVBool, Returns TRUE if matched. + */ +NvBool +osMatchGpuOsInfo +( + OBJGPU *pGpu, + void *pOsInfo +) +{ + return nv_match_gpu_os_info(NV_GET_NV_STATE(pGpu), pOsInfo); +} + +/*! + * @brief Release GPU OS info. + * + * @param[in] pOsInfo GPU OS info pointer + * + * @returns void + */ +void +osReleaseGpuOsInfo +( + void *pOsInfo +) +{ + nv_put_file_private(pOsInfo); +} + +/*! + * @brief Get free, total memory of a NUMA node by NUMA node ID from kernel. + * + * @param[in] numaId NUMA node ID. + * @param[out] free_memory_bytes free memory in bytes. + * @param[out] total_memory_bytes total memory in bytes. + * + */ +void +osGetNumaMemoryUsage +( + NvS32 numaId, + NvU64 *free_memory_bytes, + NvU64 *total_memory_bytes +) +{ + NV_STATUS status = os_get_numa_node_memory_usage(numaId, + free_memory_bytes, + total_memory_bytes); + NV_ASSERT(status == NV_OK); +} + +/*! + * @brief Add GPU memory as a NUMA node. + * + * @param[in/out] pOsGpuInfo OS specific GPU information pointer + * @param[in] offset start offset of the partition within FB + * @param[in] size size of the partition + * @param[out] pNumaNodeId OS NUMA node id for the added memory. + * + * @returns NV_OK if all is okay. Otherwise an error-specific value. + * + */ +NV_STATUS +osNumaAddGpuMemory +( + OS_GPU_INFO *pOsGpuInfo, + NvU64 offset, + NvU64 size, + NvU32 *pNumaNodeId +) +{ + nv_state_t *nv = pOsGpuInfo; + + return os_numa_add_gpu_memory(nv->handle, offset, size, pNumaNodeId); +} + +/*! + * @brief Remove a particular SMC partition's GPU memory from OS kernel. + * + * Remove GPU memory from the OS kernel that is earlier added as a NUMA node + * to the kernel in platforms where GPU is coherently connected to the CPU. + * + * @param[in/out] pOsGpuInfo OS_GPU_INFO OS specific GPU information pointer + * @param[in] offset start offset of the partition within FB + * @param[in] size size of the partition + * @param[in] numaNodeId OS NUMA node id of the memory to be removed. + * + */ +void +osNumaRemoveGpuMemory +( + OS_GPU_INFO *pOsGpuInfo, + NvU64 offset, + NvU64 size, + NvU32 numaNodeId +) +{ + nv_state_t *nv = pOsGpuInfo; + + NV_STATUS status = os_numa_remove_gpu_memory(nv->handle, offset, size, numaNodeId); + NV_ASSERT(status == NV_OK); + + return; +} + +NvBool +osDmabufIsSupported(void) +{ + return os_dma_buf_enabled; +} + +NV_STATUS +osGetEgmInfo +( + OBJGPU *pGpu, + NvU64 *pPhysAddr, + NvU64 *pSize, + NvS32 *pNodeId +) +{ + return nv_get_egm_info(NV_GET_NV_STATE(pGpu), pPhysAddr, pSize, pNodeId); +} + +/*! + * @brief Offline (i.e., blacklist) the page containing a given address from OS kernel. + * + * @param[in] address Address (SPA) of the page to be offlined + * + */ +NV_STATUS +osOfflinePageAtAddress +( + NvU64 address +) +{ + return os_offline_page_at_address(address); +} + +void osAllocatedRmClient(void *pOsInfo) +{ + nv_file_private_t* nvfp = (nv_file_private_t*)pOsInfo; + + if (nvfp != NULL) + nvfp->bCleanupRmapi = NV_TRUE; +} + +/*! + * @brief Update variable to indicate console managed by drm driver. + * + * @param[in] OBJGPU GPU object pointer + * + * @returns void + */ +void +osDisableConsoleManagement +( + OBJGPU *pGpu +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + nv->client_managed_console = NV_TRUE; +} diff --git a/src/nvidia/arch/nvalloc/unix/src/osapi.c b/src/nvidia/arch/nvalloc/unix/src/osapi.c new file mode 100644 index 0000000..91ac4b8 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/osapi.c @@ -0,0 +1,4818 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + + +#include +#include +#include +#include +#include +#include +#include +#include // Declares RmInitRm(). +#include "gpu/gpu.h" +#include "gps.h" + +#include +#include +#include +#include "kernel/gpu/mem_mgr/mem_mgr.h" + +#include +#include + +#include +#include +#include +#include +#include +#include "platform/sli/sli.h" + +#include "rmapi/exports.h" +#include "rmapi/rmapi_utils.h" +#include "rmapi/rs_utils.h" +#include "rmapi/resource_fwd_decls.h" +#include +#include +#include "nv-reg.h" +#include "nv-firmware-registry.h" +#include "core/hal_mgr.h" +#include "gpu/device/device.h" + +#include "resserv/rs_server.h" +#include "resserv/rs_client.h" +#include "resserv/rs_resource.h" +#include "gpu/gpu_uuid.h" + +#include "ctrl/ctrl0000/ctrl0000system.h" +#include "ctrl/ctrl0073/ctrl0073dp.h" +#include "ctrl/ctrl0073/ctrl0073system.h" +#include "ctrl/ctrl0073/ctrl0073specific.h" +#include "ctrl/ctrl2080/ctrl2080bios.h" +#include "ctrl/ctrl2080/ctrl2080fb.h" +#include "ctrl/ctrl2080/ctrl2080perf.h" +#include "ctrl/ctrl2080/ctrl2080gpu.h" +#include "ctrl/ctrl402c.h" + +#include "g_nv_name_released.h" // released chip entries from nvChipAutoFlags.h + +#include + +// +// If timer callback comes when PM resume is in progress, then it can't be +// serviced. The timer needs to be rescheduled in this case. This time controls +// the duration of rescheduling. +// +#define TIMER_RESCHED_TIME_DURING_PM_RESUME_NS (100 * 1000 * 1000) + +// +// Helper function which can be called before doing any RM control +// This function: +// +// a. Performs threadStateInit(). +// b. Acquires API lock. +// c. Checks if RMAPI client handle is valid (i.e. RM is initialized) and +// returns early if RMAPI client handle is invalid. +// d. Increments the dynamic power refcount. If GPU is in RTD3 suspended +// state, then it will wake-up the GPU. +// e. Returns the RMAPI interface handle. +// +// This function should be called only when caller doesn't have acquired API +// lock. Caller needs to use RmUnixRmApiEpilogue() after RM control, if +// RmUnixRmApiPrologue() is successful. +// +RM_API *RmUnixRmApiPrologue(nv_state_t *pNv, THREAD_STATE_NODE *pThreadNode, NvU32 module) +{ + threadStateInit(pThreadNode, THREAD_STATE_FLAGS_NONE); + + if ((rmapiLockAcquire(API_LOCK_FLAGS_NONE, module)) == NV_OK) + { + if ((pNv->rmapi.hClient != 0) && + (os_ref_dynamic_power(pNv, NV_DYNAMIC_PM_FINE) == NV_OK)) + { + return rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + } + + rmapiLockRelease(); + } + + threadStateFree(pThreadNode, THREAD_STATE_FLAGS_NONE); + + return NULL; +} + +// +// Helper function which can be called after doing RM control, if +// caller has used RmUnixRmApiPrologue() helper function. This function: +// +// a. Decrements the dynamic power refcount. +// b. Release API lock. +// c. Performs threadStateFree(). +// +void RmUnixRmApiEpilogue(nv_state_t *pNv, THREAD_STATE_NODE *pThreadNode) +{ + os_unref_dynamic_power(pNv, NV_DYNAMIC_PM_FINE); + rmapiLockRelease(); + threadStateFree(pThreadNode, THREAD_STATE_FLAGS_NONE); +} + +NvBool RmGpuHasIOSpaceEnabled(nv_state_t * nv) +{ + NvU16 val; + NvBool has_io; + os_pci_read_word(nv->handle, NV_CONFIG_PCI_NV_1, &val); + has_io = FLD_TEST_DRF(_CONFIG, _PCI_NV_1, _IO_SPACE, _ENABLED, val); + return has_io; +} + +// This is a stub function for unix +void osHandleDeferredRecovery( + OBJGPU *pGpu +) +{ + +} + +// This is a stub function for unix +NvBool osIsSwPreInitOnly +( + OS_GPU_INFO *pOsGpuInfo +) +{ + return NV_FALSE; +} + +const NvU8 * RmGetGpuUuidRaw( + nv_state_t *pNv +) +{ + NV_STATUS rmStatus; + OBJGPU *pGpu = NULL; + NvU32 gidFlags; + NvBool isApiLockTaken = NV_FALSE; + + if (pNv->nv_uuid_cache.valid) + return pNv->nv_uuid_cache.uuid; + + if (!rmapiLockIsOwner()) + { + rmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU); + if (rmStatus != NV_OK) + { + return NULL; + } + + isApiLockTaken = NV_TRUE; + } + + pGpu = NV_GET_NV_PRIV_PGPU(pNv); + + // + // PBI is not present in simulation and the loop inside + // pciPbiReadUuid takes up considerable amount of time in + // simulation environment during RM load. + // + if (pGpu && IS_SIMULATION(pGpu)) + { + rmStatus = NV_ERR_NOT_SUPPORTED; + } + rmStatus = NV_ERR_NOT_SUPPORTED; + + if (rmStatus == NV_OK) + { + rmStatus = gpumgrSetUuid(pNv->gpu_id, pNv->nv_uuid_cache.uuid); + if (rmStatus != NV_OK) + { + goto err; + } + + pNv->nv_uuid_cache.valid = NV_TRUE; + goto done; + } + else if (rmStatus == NV_ERR_NOT_SUPPORTED) + { + nv_printf(NV_DBG_INFO, + "NVRM: PBI is not supported for GPU " NV_PCI_DEV_FMT "\n", + NV_PCI_DEV_FMT_ARGS(pNv)); + } + + gidFlags = DRF_DEF(2080_GPU_CMD,_GPU_GET_GID_FLAGS,_TYPE,_SHA1) + | DRF_DEF(2080_GPU_CMD,_GPU_GET_GID_FLAGS,_FORMAT,_BINARY); + + if (!pGpu) + goto err; + + rmStatus = gpuGetGidInfo(pGpu, NULL, NULL, gidFlags); + if (rmStatus != NV_OK) + goto err; + + if (!pGpu->gpuUuid.isInitialized) + goto err; + + // copy the uuid from the OBJGPU uuid cache + os_mem_copy(pNv->nv_uuid_cache.uuid, pGpu->gpuUuid.uuid, GPU_UUID_LEN); + pNv->nv_uuid_cache.valid = NV_TRUE; + +done: + if (isApiLockTaken) + { + rmapiLockRelease(); + } + + return pNv->nv_uuid_cache.uuid; + +err: + if (isApiLockTaken) + { + rmapiLockRelease(); + } + + return NULL; +} + +static NV_STATUS RmGpuUuidRawToString( + const NvU8 *pGidData, + char *pBuf, + NvU32 bufLen +) +{ + NvU8 *pGidString; + NvU32 GidStrlen; + NV_STATUS rmStatus; + NvU32 gidFlags; + + gidFlags = DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _FORMAT, _ASCII) | + DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _TYPE, _SHA1); + + rmStatus = transformGidToUserFriendlyString(pGidData, RM_SHA1_GID_SIZE, + &pGidString, &GidStrlen, + gidFlags, + RM_UUID_PREFIX_GPU); + if (rmStatus != NV_OK) + return rmStatus; + + if (bufLen >= GidStrlen) + portMemCopy(pBuf, bufLen, pGidString, GidStrlen); + else + rmStatus = NV_ERR_BUFFER_TOO_SMALL; + + portMemFree((void *)pGidString); + + return rmStatus; +} + +// This function should be called with the API and GPU locks already acquired. +void +RmLogGpuCrash(OBJGPU *pGpu) +{ + NvBool bGpuIsLost, bGpuIsConnected; + + // + // Re-evaluate whether or not the GPU is accessible. This could be called + // from a recovery context where the OS has re-enabled MMIO for the device. + // This happens during EEH processing on IBM Power + Linux, and marking + // the device as connected again will allow rcdbAddRmGpuDump() to collect + // more GPU state. + // + bGpuIsLost = pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST); + bGpuIsConnected = pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED); + if (!bGpuIsConnected || bGpuIsLost) + { + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + NvU32 pmcBoot0 = NV_PRIV_REG_RD32(nv->regs->map_u, NV_PMC_BOOT_0); + if (pmcBoot0 == nvp->pmc_boot_0) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED, NV_TRUE); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_LOST, NV_FALSE); + } + } + + // Restore the disconnected properties, if they were reset + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED, bGpuIsConnected); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_LOST, bGpuIsLost); + + // Restore persistence mode to the way it was prior to the crash + osModifyGpuSwStatePersistence(pGpu->pOsGpuInfo, + pGpu->getProperty(pGpu, PDB_PROP_GPU_PERSISTENT_SW_STATE)); +} + +static void free_os_event_under_lock(nv_event_t *event) +{ + event->active = NV_FALSE; + + // If refcount > 0, event will be freed by osDereferenceObjectCount + // when the last associated RM event is freed. + if (event->refcount == 0) + portMemFree(event); +} + +static void free_os_events( + nv_file_private_t *nvfp, + NvHandle client +) +{ + nv_state_t *nv = nv_get_ctl_state(); + nv_event_t **pprev; + + portSyncSpinlockAcquire(nv->event_spinlock); + + pprev = &nv->event_list; + while (*pprev != NULL) + { + nv_event_t *cur = *pprev; + // + // XXX We must be called from either rm_client_free_os_events() or + // RmFreeUnusedClients() for this to work. + // + if ((cur->hParent == client) || (cur->nvfp == nvfp)) + { + *pprev = cur->next; + free_os_event_under_lock(cur); + } + else + { + pprev = &cur->next; + } + } + + portSyncSpinlockRelease(nv->event_spinlock); +} + +static NV_STATUS get_os_event_data( + nv_file_private_t *nvfp, + NvP64 pEvent, + NvU32 *MoreEvents +) +{ + nv_event_t nv_event; + NvUnixEvent *nv_unix_event; + NV_STATUS status; + + status = os_alloc_mem((void**)&nv_unix_event, sizeof(NvUnixEvent)); + if (status != NV_OK) + return status; + + status = nv_get_event(nvfp, &nv_event, MoreEvents); + if (status != NV_OK) + { + status = NV_ERR_OPERATING_SYSTEM; + goto done; + } + + os_mem_set(nv_unix_event, 0, sizeof(NvUnixEvent)); + nv_unix_event->hObject = nv_event.hObject; + nv_unix_event->NotifyIndex = nv_event.index; + nv_unix_event->info32 = nv_event.info32; + nv_unix_event->info16 = nv_event.info16; + + status = os_memcpy_to_user(NvP64_VALUE(pEvent), nv_unix_event, sizeof(NvUnixEvent)); +done: + os_free_mem(nv_unix_event); + return status; +} + +void rm_client_free_os_events( + NvHandle client +) +{ + free_os_events(NULL, client); +} + +void RmFreeUnusedClients( + nv_state_t *nv, + nv_file_private_t *nvfp +) +{ + NvU32 *pClientList; + NvU32 numClients, i; + NV_STATUS status; + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + + // + // The 'nvfp' pointer uniquely identifies an open instance in kernel space + // and the kernel interface layer guarantees that we are not called before + // the associated nvfp descriptor is closed. We can thus safely free + // abandoned clients with matching 'nvfp' pointers. + // + status = rmapiGetClientHandlesFromOSInfo(nvfp, &pClientList, &numClients); + if (status != NV_OK) + { + numClients = 0; + } + + for (i = 0; i < numClients; ++i) + { + NV_PRINTF(LEVEL_INFO, "freeing abandoned client 0x%x\n", + pClientList[i]); + + } + + if (numClients != 0) + { + pRmApi->DisableClients(pRmApi, pClientList, numClients); + + portMemFree(pClientList); + } + + // Clean up any remaining events using this nvfp. + free_os_events(nvfp, 0); +} + +static void RmUnbindLock( + nv_state_t *nv +) +{ + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv); + + if ((pGpu == NULL) || (gpuGetUserClientCount(pGpu) == 0)) + { + nv->flags |= NV_FLAG_UNBIND_LOCK; + } +} + +static NV_STATUS allocate_os_event( + NvHandle hParent, + nv_file_private_t *nvfp, + NvU32 fd +) +{ + nv_state_t *nv = nv_get_ctl_state(); + NvU32 status = NV_OK; + nv_event_t *event; + + nv_event_t *new_event = portMemAllocNonPaged(sizeof(nv_event_t)); + if (new_event == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + new_event->hParent = hParent; + new_event->nvfp = nvfp; + new_event->fd = fd; + new_event->active = NV_TRUE; + new_event->refcount = 0; + + portSyncSpinlockAcquire(nv->event_spinlock); + for (event = nv->event_list; event; event = event->next) + { + // Only one event may be associated with a given fd. + if (event->hParent == hParent && event->fd == fd) + { + status = NV_ERR_INVALID_ARGUMENT; + portSyncSpinlockRelease(nv->event_spinlock); + goto done; + } + } + + new_event->next = nv->event_list; + nv->event_list = new_event; + nvfp->bCleanupRmapi = NV_TRUE; + portSyncSpinlockRelease(nv->event_spinlock); + +done: + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "allocated OS event:\n"); + NV_PRINTF(LEVEL_INFO, " hParent: 0x%x\n", hParent); + NV_PRINTF(LEVEL_INFO, " fd: %d\n", fd); + } + else + { + NV_PRINTF(LEVEL_ERROR, "failed to allocate OS event: 0x%08x\n", status); + status = NV_ERR_INSUFFICIENT_RESOURCES; + portMemFree(new_event); + } + + return status; +} + +static NV_STATUS free_os_event( + NvHandle hParent, + NvU32 fd +) +{ + nv_state_t *nv = nv_get_ctl_state(); + nv_event_t *event, *tmp; + NV_STATUS result; + + portSyncSpinlockAcquire(nv->event_spinlock); + tmp = event = nv->event_list; + while (event) + { + if ((event->fd == fd) && (event->hParent == hParent)) + { + if (event == nv->event_list) + nv->event_list = event->next; + else + tmp->next = event->next; + break; + } + tmp = event; + event = event->next; + } + + if (event != NULL) + { + free_os_event_under_lock(event); + result = NV_OK; + } + else + result = NV_ERR_INVALID_EVENT; + portSyncSpinlockRelease(nv->event_spinlock); + + if (result == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "freed OS event:\n"); + NV_PRINTF(LEVEL_INFO, " hParent: 0x%x\n", hParent); + NV_PRINTF(LEVEL_INFO, " fd: %d\n", fd); + } + else + { + NV_PRINTF(LEVEL_ERROR, "failed to find OS event:\n"); + NV_PRINTF(LEVEL_ERROR, " hParent: 0x%x\n", hParent); + NV_PRINTF(LEVEL_ERROR, " fd: %d\n", fd); + } + + return result; +} + +static void RmExecuteWorkItem( + void *pWorkItem +) +{ + nv_work_item_t *pWi = (nv_work_item_t *)pWorkItem; + NvU32 gpuMask = 0; + NvU32 releaseLocks = 0; + + if (!(pWi->flags & NV_WORK_ITEM_FLAGS_REQUIRES_GPU) && + ((pWi->flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS) || + (pWi->flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE) || + (pWi->flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE) || + (pWi->flags & OS_QUEUE_WORKITEM_FLAGS_FULL_GPU_SANITY))) + { + // Requesting one of the GPU locks without providing a GPU instance + NV_ASSERT(0); + goto done; + } + + // Get locks requested by workitem + if (NV_OK != workItemLocksAcquire(pWi->gpuInstance, pWi->flags, + &releaseLocks, &gpuMask)) + { + goto done; + } + + // Some work items may not require a valid GPU instance + if (pWi->flags & NV_WORK_ITEM_FLAGS_REQUIRES_GPU) + { + // Make sure that pGpu is present + OBJGPU *pGpu = gpumgrGetGpu(pWi->gpuInstance); + if (pGpu != NULL) + { + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + NV_ASSERT_OR_GOTO(nv != NULL, done); + + if (!((pWi->flags & OS_QUEUE_WORKITEM_FLAGS_DROP_ON_UNLOAD_QUEUE_FLUSH) && os_is_queue_flush_ongoing(nv->queue))) + { + pWi->func.pGpuFunction(pWi->gpuInstance, pWi->pData); + } + } + else + { + NV_PRINTF(LEVEL_ERROR, "Invalid GPU instance for workitem\n"); + } + } + else + { + if (!((pWi->flags & OS_QUEUE_WORKITEM_FLAGS_DROP_ON_UNLOAD_QUEUE_FLUSH) && os_is_queue_flush_ongoing(NULL))) + { + pWi->func.pSystemFunction(pWi->pData); + } + } + +done: + // Release any locks taken + workItemLocksRelease(releaseLocks, gpuMask); + + if ((pWi->pData != NULL) && + !(pWi->flags & NV_WORK_ITEM_FLAGS_DONT_FREE_DATA)) + { + portMemFree(pWi->pData); + } + + portMemFree((void *)pWi); +} + +static NV_STATUS RmAccessRegistry( + NvHandle hClient, + NvHandle hObject, + NvU32 AccessType, + NvP64 clientDevNodeAddress, + NvU32 DevNodeLength, + NvP64 clientParmStrAddress, + NvU32 ParmStrLength, + NvP64 clientBinaryDataAddress, + NvU32 *pBinaryDataLength, + NvU32 *Data, + NvU32 *Entry +) +{ + NvU32 gpuMask = 0, gpuInstance = 0; + OBJGPU *pGpu; + NvBool isDevice = NV_FALSE; + NV_STATUS RmStatus = NV_ERR_OPERATING_SYSTEM; + CLIENT_ENTRY *pClientEntry; + Device *pDevice; + Subdevice *pSubdevice; + + RMAPI_PARAM_COPY devNodeParamCopy; + NvU8 *tmpDevNode = NULL; + NvU32 copyOutDevNodeLength = 0; + + RMAPI_PARAM_COPY parmStrParamCopy; + char *tmpParmStr = NULL; + NvU32 copyOutParmStrLength = 0; + + RMAPI_PARAM_COPY binaryDataParamCopy; + NvU8 *tmpBinaryData = NULL; + NvU32 BinaryDataLength = 0; + NvU32 copyOutBinaryDataLength = 0; + + if (NV_OK != serverAcquireClient(&g_resServ, hClient, LOCK_ACCESS_WRITE, + &pClientEntry)) + { + return NV_ERR_INVALID_CLIENT; + } + + if (hClient == hObject) + { + pGpu = NULL; + } + else + { + RsClient *pClient = pClientEntry->pClient; + + RmStatus = deviceGetByHandle(pClient, hObject, &pDevice); + if (RmStatus != NV_OK) + { + RmStatus = subdeviceGetByHandle(pClient, hObject, &pSubdevice); + if (RmStatus != NV_OK) + goto done; + + RmStatus = rmGpuGroupLockAcquire(pSubdevice->subDeviceInst, + GPU_LOCK_GRP_SUBDEVICE, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_GPU, + &gpuMask); + if (RmStatus != NV_OK) + { + gpuMask = 0; + goto done; + } + + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + pGpu = GPU_RES_GET_GPU(pSubdevice); + } + else + { + RmStatus = rmGpuGroupLockAcquire(pDevice->deviceInst, + GPU_LOCK_GRP_DEVICE, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_GPU, + &gpuMask); + if (RmStatus != NV_OK) + { + gpuMask = 0; + goto done; + } + + GPU_RES_SET_THREAD_BC_STATE(pDevice); + pGpu = GPU_RES_GET_GPU(pDevice); + isDevice = NV_TRUE; + } + } + + if (pBinaryDataLength) + { + BinaryDataLength = *pBinaryDataLength; + } + + // a passed-in devNode + if (DevNodeLength) + { + // the passed-in DevNodeLength does not account for '\0' + DevNodeLength++; + + if (DevNodeLength > NVOS38_MAX_REGISTRY_STRING_LENGTH) + { + RmStatus = NV_ERR_INVALID_STRING_LENGTH; + goto done; + } + + // get access to client's DevNode + RMAPI_PARAM_COPY_INIT(devNodeParamCopy, tmpDevNode, clientDevNodeAddress, DevNodeLength, 1); + devNodeParamCopy.flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER; + RmStatus = rmapiParamsAcquire(&devNodeParamCopy, NV_TRUE); + if (RmStatus != NV_OK) + { + RmStatus = NV_ERR_OPERATING_SYSTEM; + goto done; + } + } + + // a passed-in parmStr + if (ParmStrLength) + { + // the passed-in ParmStrLength does not account for '\0' + ParmStrLength++; + + if ((ParmStrLength == 0) || (ParmStrLength > NVOS38_MAX_REGISTRY_STRING_LENGTH)) + { + RmStatus = NV_ERR_INVALID_STRING_LENGTH; + goto done; + } + // get access to client's parmStr + RMAPI_PARAM_COPY_INIT(parmStrParamCopy, tmpParmStr, clientParmStrAddress, ParmStrLength, 1); + RmStatus = rmapiParamsAcquire(&parmStrParamCopy, NV_TRUE); + if (RmStatus != NV_OK) + { + RmStatus = NV_ERR_OPERATING_SYSTEM; + goto done; + } + if (tmpParmStr[ParmStrLength - 1] != '\0') + { + RmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + } + + if ((AccessType == NVOS38_ACCESS_TYPE_READ_BINARY) || + (AccessType == NVOS38_ACCESS_TYPE_WRITE_BINARY)) + { + if ((BinaryDataLength > NVOS38_MAX_REGISTRY_BINARY_LENGTH) || + (BinaryDataLength == 0)) + { + RmStatus = NV_ERR_INVALID_STRING_LENGTH; + goto done; + } + + // get access to client's binaryData + RMAPI_PARAM_COPY_INIT(binaryDataParamCopy, tmpBinaryData, clientBinaryDataAddress, BinaryDataLength, 1); + if (AccessType == NVOS38_ACCESS_TYPE_READ_BINARY) + binaryDataParamCopy.flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + RmStatus = rmapiParamsAcquire(&binaryDataParamCopy, NV_TRUE); + if (RmStatus != NV_OK) + { + RmStatus = NV_ERR_OPERATING_SYSTEM; + goto done; + } + } + + switch (AccessType) + { + case NVOS38_ACCESS_TYPE_READ_DWORD: + RmStatus = osReadRegistryDword(pGpu, + tmpParmStr, Data); + break; + + case NVOS38_ACCESS_TYPE_WRITE_DWORD: + if (isDevice && osIsAdministrator()) + { + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + RmStatus = osWriteRegistryDword(pGpu, + tmpParmStr, *Data); + + if (RmStatus != NV_OK) + goto done; + } + break; + } + + RmStatus = osWriteRegistryDword(pGpu, + tmpParmStr, *Data); + break; + + case NVOS38_ACCESS_TYPE_READ_BINARY: + RmStatus = osReadRegistryBinary(pGpu, + tmpParmStr, tmpBinaryData, &BinaryDataLength); + + if (RmStatus != NV_OK) + { + goto done; + } + + if (BinaryDataLength) + copyOutBinaryDataLength = BinaryDataLength; + + break; + + case NVOS38_ACCESS_TYPE_WRITE_BINARY: + if (isDevice && osIsAdministrator()) + { + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + RmStatus = osWriteRegistryBinary(pGpu, + tmpParmStr, tmpBinaryData, + BinaryDataLength); + + if (RmStatus != NV_OK) + goto done; + } + break; + } + + RmStatus = osWriteRegistryBinary(pGpu, + tmpParmStr, tmpBinaryData, + BinaryDataLength); + break; + + default: + RmStatus = NV_ERR_INVALID_ACCESS_TYPE; + } + + done: + if (gpuMask != 0) + rmGpuGroupLockRelease(gpuMask, GPUS_LOCK_FLAGS_NONE); + + if (tmpDevNode != NULL) + { + // skip copyout on error + if ((RmStatus != NV_OK) || (copyOutDevNodeLength == 0)) + devNodeParamCopy.flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + devNodeParamCopy.paramsSize = copyOutDevNodeLength; + if (NV_OK != rmapiParamsRelease(&devNodeParamCopy)) + if (RmStatus == NV_OK) + RmStatus = NV_ERR_OPERATING_SYSTEM; + } + if (tmpParmStr != NULL) + { + // skip copyout on error + if ((RmStatus != NV_OK) || (copyOutParmStrLength == 0)) + parmStrParamCopy.flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + parmStrParamCopy.paramsSize = copyOutParmStrLength; + if (NV_OK != rmapiParamsRelease(&parmStrParamCopy)) + if (RmStatus == NV_OK) + RmStatus = NV_ERR_OPERATING_SYSTEM; + } + if (tmpBinaryData != NULL) + { + // skip copyout on error + if ((RmStatus != NV_OK) || (copyOutBinaryDataLength == 0)) + binaryDataParamCopy.flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + binaryDataParamCopy.paramsSize = copyOutBinaryDataLength; + if (NV_OK != rmapiParamsRelease(&binaryDataParamCopy)) + if (RmStatus == NV_OK) + RmStatus = NV_ERR_OPERATING_SYSTEM; + *pBinaryDataLength = copyOutBinaryDataLength; + } + + serverReleaseClient(&g_resServ, LOCK_ACCESS_WRITE, pClientEntry); + return RmStatus; +} + +static NV_STATUS RmUpdateDeviceMappingInfo( + NvHandle hClient, + NvHandle hDevice, + NvHandle hMappable, + void *pOldCpuAddress, + void *pNewCpuAddress +) +{ + NV_STATUS status; + CLIENT_ENTRY *pClientEntry; + RsClient *pClient; + RsResourceRef *pMappableRef; + RsCpuMapping *pCpuMapping; + Device *pDevice; + Subdevice *pSubdevice; + NvU32 gpuMask = 0; + + status = serverAcquireClient(&g_resServ, hClient, LOCK_ACCESS_WRITE, &pClientEntry); + if (status != NV_OK) + return status; + + pClient = pClientEntry->pClient; + + status = deviceGetByHandle(pClient, hDevice, &pDevice); + if (status != NV_OK) + { + status = subdeviceGetByHandle(pClient, hDevice, &pSubdevice); + if (status != NV_OK) + goto done; + + status = rmGpuGroupLockAcquire(pSubdevice->subDeviceInst, + GPU_LOCK_GRP_SUBDEVICE, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_GPU, + &gpuMask); + if (status != NV_OK) + goto done; + + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + } + else + { + status = rmGpuGroupLockAcquire(pDevice->deviceInst, + GPU_LOCK_GRP_DEVICE, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_GPU, + &gpuMask); + if (status != NV_OK) + goto done; + + GPU_RES_SET_THREAD_BC_STATE(pDevice); + } + + status = clientGetResourceRef(pClient, hMappable, &pMappableRef); + if (status != NV_OK) + goto done; + + if ((objDynamicCastById(pMappableRef->pResource, classId(Memory)) == NULL) && + (objDynamicCastById(pMappableRef->pResource, classId(KernelChannel)) == NULL)) + { + status = NV_ERR_INVALID_OBJECT_HANDLE; + goto done; + } + + status = refFindCpuMappingWithFilter(pMappableRef, + NV_PTR_TO_NvP64(pOldCpuAddress), + serverutilMappingFilterCurrentUserProc, + &pCpuMapping); + if (status != NV_OK) + goto done; + + pCpuMapping->pLinearAddress = NV_PTR_TO_NvP64(pNewCpuAddress); + +done: + + if (gpuMask != 0) + rmGpuGroupLockRelease(gpuMask, GPUS_LOCK_FLAGS_NONE); + + serverReleaseClient(&g_resServ, LOCK_ACCESS_WRITE, pClientEntry); + return status; +} + +static NV_STATUS RmPerformVersionCheck( + void *pData, + NvU32 dataSize +) +{ + nv_ioctl_rm_api_version_t *pParams; + char clientCh, rmCh; + const char *rmStr = NV_VERSION_STRING; + NvBool relaxed = NV_FALSE; + NvU32 i; + NvU32 procId; + char procName[32]; + + // + // rmStr (i.e., NV_VERSION_STRING) must be null-terminated and fit within + // NV_RM_API_VERSION_STRING_LENGTH, so that: + // + // (1) If the versions don't match, we can return rmStr in + // pParams->versionString. + // (2) The below loop is guaranteed to not overrun rmStr. + // + ct_assert(sizeof(NV_VERSION_STRING) <= NV_RM_API_VERSION_STRING_LENGTH); + + if (dataSize != sizeof(nv_ioctl_rm_api_version_t)) + return NV_ERR_INVALID_ARGUMENT; + + pParams = pData; + + // + // write the reply value, so that the client knows we recognized + // the request + // + pParams->reply = NV_RM_API_VERSION_REPLY_RECOGNIZED; + + // + // the client is just querying the version, not verifying against expected. + // + if (pParams->cmd == NV_RM_API_VERSION_CMD_QUERY) + { + os_string_copy(pParams->versionString, rmStr); + return NV_OK; + } + + // + // the client requested relaxed version checking; we will only + // compare the strings until the first decimal point. + // + if (pParams->cmd == NV_RM_API_VERSION_CMD_RELAXED) + { + relaxed = NV_TRUE; + } + + for (i = 0; i < NV_RM_API_VERSION_STRING_LENGTH; i++) + { + clientCh = pParams->versionString[i]; + rmCh = rmStr[i]; + + // + // fail if the current character is not the same + // + if (clientCh != rmCh) + { + break; + } + + // + // if relaxed matching was requested, succeed when we find the + // first decimal point + // + if ((relaxed) && (clientCh == '.')) + { + return NV_OK; + } + + // + // we found the end of the strings: succeed + // + if (clientCh == '\0') + { + return NV_OK; + } + } + + // + // the version strings did not match: print an error message and + // copy the RM's version string into pParams->versionString, so + // that the client can report the mismatch; explicitly NULL + // terminate the client's string, since we cannot trust it + // + pParams->versionString[NV_RM_API_VERSION_STRING_LENGTH - 1] = '\0'; + + procId = os_get_current_process(); + os_get_current_process_name(procName, sizeof(procName)); + + nv_printf(NV_DBG_ERRORS, + "NVRM: API mismatch: the client '%s' (pid %u)\n" + "NVRM: has the version %s, but this kernel module has\n" + "NVRM: the version %s. Please make sure that this\n" + "NVRM: kernel module and all NVIDIA driver components\n" + "NVRM: have the same version.\n", + procName, procId, + pParams->versionString, NV_VERSION_STRING); + + os_string_copy(pParams->versionString, rmStr); + + return NV_ERR_GENERIC; +} + +NV_STATUS RmPowerSourceChangeEvent( + nv_state_t *pNv, + NvU32 event_val +) +{ + NV2080_CTRL_PERF_SET_POWERSTATE_PARAMS params = {0}; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + params.powerStateInfo.powerState = event_val ? NV2080_CTRL_PERF_POWER_SOURCE_BATTERY : + NV2080_CTRL_PERF_POWER_SOURCE_AC; + + return pRmApi->Control(pRmApi, pNv->rmapi.hClient, + pNv->rmapi.hSubDevice, + NV2080_CTRL_CMD_PERF_SET_POWERSTATE, + ¶ms, sizeof(params)); +} + +/*! + * @brief Function to request latest D-Notifier status from SBIOS. + * + * Handle certain scenarios (like a fresh boot or suspend/resume + * of the system) when RM is not available to receive the Dx notifiers. + * This function gets the latest D-Notifier status from SBIOS + * when RM is ready to receive and handle those events. + * Use GPS_FUNC_REQUESTDXSTATE subfunction to invoke current Dx state. + * + * @param[in] pNv nv_state_t pointer. + */ +void RmRequestDNotifierState( + nv_state_t *pNv +) +{ + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv); + NvU32 supportedFuncs = 0; + NvU16 dsmDataSize = sizeof(supportedFuncs); + NV_STATUS status = NV_OK; + + status = osCallACPI_DSM(pGpu, ACPI_DSM_FUNCTION_GPS_2X, + GPS_FUNC_REQUESTDXSTATE, &supportedFuncs, + &dsmDataSize); + if (status != NV_OK) + { + // + // Call for 'GPS_FUNC_REQUESTDXSTATE' subfunction may fail if the + // SBIOS/EC does not have the corresponding implementation. + // + NV_PRINTF(LEVEL_INFO, + "%s: Failed to request Dx event update, status 0x%x\n", + __FUNCTION__, status); + } +} + +/*! + * @brief Deal with D-notifier events to apply a performance + * level based on the requested auxiliary power-state. + * Read confluence page "D-Notifiers on Linux" for more details. + * + * @param[in] pNv nv_state_t pointer. + * @param[in] event_type NvU32 Event type. + */ +static void RmHandleDNotifierEvent( + nv_state_t *pNv, + NvU32 event_type +) +{ + NV2080_CTRL_PERF_SET_AUX_POWER_STATE_PARAMS params = { 0 }; + RM_API *pRmApi; + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus = NV_OK; + + switch (event_type) + { + case ACPI_NOTIFY_POWER_LEVEL_D1: + params.powerState = NV2080_CTRL_PERF_AUX_POWER_STATE_P0; + break; + case ACPI_NOTIFY_POWER_LEVEL_D2: + params.powerState = NV2080_CTRL_PERF_AUX_POWER_STATE_P1; + break; + case ACPI_NOTIFY_POWER_LEVEL_D3: + params.powerState = NV2080_CTRL_PERF_AUX_POWER_STATE_P2; + break; + case ACPI_NOTIFY_POWER_LEVEL_D4: + params.powerState = NV2080_CTRL_PERF_AUX_POWER_STATE_P3; + break; + case ACPI_NOTIFY_POWER_LEVEL_D5: + params.powerState = NV2080_CTRL_PERF_AUX_POWER_STATE_P4; + break; + default: + return; + } + + pRmApi = RmUnixRmApiPrologue(pNv, &threadState, RM_LOCK_MODULES_ACPI); + if (pRmApi == NULL) + { + return; + } + + rmStatus = pRmApi->Control(pRmApi, pNv->rmapi.hClient, + pNv->rmapi.hSubDevice, + NV2080_CTRL_CMD_PERF_SET_AUX_POWER_STATE, + ¶ms, sizeof(params)); + + RmUnixRmApiEpilogue(pNv, &threadState); + + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s: Failed to handle ACPI D-Notifier event, status=0x%x\n", + __FUNCTION__, rmStatus); + } +} + +static NV_STATUS +RmDmabufVerifyMemHandle( + OBJGPU *pGpu, + NvHandle hSrcClient, + NvHandle hMemory, + NvU64 offset, + NvU64 size, + void *pGpuInstanceInfo, + MEMORY_DESCRIPTOR **ppMemDesc +) +{ + NV_STATUS status; + RsClient *pClient = NULL; + RsResourceRef *pSrcMemoryRef = NULL; + Memory *pSrcMemory = NULL; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hSrcClient, &pClient)); + + status = clientGetResourceRef(pClient, hMemory, &pSrcMemoryRef); + if (status != NV_OK) + { + return status; + } + + pSrcMemory = dynamicCast(pSrcMemoryRef->pResource, Memory); + if (pSrcMemory == NULL) + { + return NV_ERR_INVALID_OBJECT; + } + + pMemDesc = pSrcMemory->pMemDesc; + + // Check if hMemory belongs to the same pGpu + if ((pMemDesc->pGpu != pGpu) && + (pSrcMemory->pGpu != pGpu)) + { + return NV_ERR_INVALID_OBJECT_PARENT; + } + + // Offset and size must be aligned to OS page-size + if (!NV_IS_ALIGNED64(offset, os_page_size) || + !NV_IS_ALIGNED64(size, os_page_size)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Only supported for vidmem and sysmem(only for 0FB) handles + if ((memdescGetAddressSpace(pMemDesc) != ADDR_FBMEM) && + (!pGpu->getProperty(pGpu, PDB_PROP_GPU_ZERO_FB))) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if ((size == 0) || + (size > memdescGetSize(pMemDesc)) || + (offset > (memdescGetSize(pMemDesc) - size))) + { + return NV_ERR_INVALID_ARGUMENT; + } + + *ppMemDesc = pMemDesc; + + return NV_OK; +} + +static NV_STATUS +RmDmabufGetClientAndDevice( + OBJGPU *pGpu, + NvHandle hClient, + NvHandle hMemory, + NvU8 mappingType, + NvHandle *phClient, + NvHandle *phDevice, + NvHandle *phSubdevice, + void **ppGpuInstanceInfo +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + // No dma-buf support for SLI-enabled GPU + if (IsSLIEnabled(pGpu)) + { + return NV_ERR_NOT_SUPPORTED; + } + + // + // MAPPING_TYPE_FORCE_PCIE is to be used only on coherent systems with a + // direct PCIe connection between the exporter and importer. + // MIG is not a supported use-case for dma-buf on these systems. + // + if (mappingType == NV_DMABUF_EXPORT_MAPPING_TYPE_FORCE_PCIE) + { + return NV_ERR_NOT_SUPPORTED; + } + + *phClient = pMemoryManager->hClient; + *phDevice = pMemoryManager->hDevice; + *phSubdevice = pMemoryManager->hSubdevice; + *ppGpuInstanceInfo = NULL; + + return NV_OK; +} + +static void +RmDmabufPutClientAndDevice( + OBJGPU *pGpu, + NvHandle hClient, + NvHandle hDevice, + NvHandle hSubdevice, + void *pGpuInstanceInfo +) +{ +} + +/* + * --------------------------------------------------------------------------- + * + * The routines below are part of the interface between the kernel interface + * layer and the kernel-agnostic portions of the resource manager. + * + * --------------------------------------------------------------------------- + */ + +NvBool NV_API_CALL rm_init_private_state( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + NvBool retval; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + retval = RmInitPrivateState(pNv); + + NV_EXIT_RM_RUNTIME(sp,fp); + + return retval; +} + +void NV_API_CALL rm_free_private_state( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + RmFreePrivateState(pNv); + + NV_EXIT_RM_RUNTIME(sp,fp); +} + +NvBool NV_API_CALL rm_init_adapter( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + THREAD_STATE_NODE threadState; + NvBool retval = NV_FALSE; + void *fp; + NvBool bEnabled; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_DEVICE_INIT); + + // LOCK: acquire API lock + if (rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT) == NV_OK) + { + if (!((gpumgrQueryGpuDrainState(pNv->gpu_id, &bEnabled, NULL) == NV_OK) + && bEnabled)) + { + if (pNv->flags & NV_FLAG_PERSISTENT_SW_STATE) + { + retval = RmPartiallyInitAdapter(pNv); + } + else + { + retval = RmInitAdapter(pNv); + } + } + + // UNLOCK: release API lock + rmapiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return retval; +} + +void NV_API_CALL rm_disable_adapter( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + NV_ASSERT_OK(os_flush_work_queue(pNv->queue, NV_TRUE)); + + // LOCK: acquire API lock + if (rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DESTROY) == NV_OK) + { + if (pNv->flags & NV_FLAG_PERSISTENT_SW_STATE) + { + RmPartiallyDisableAdapter(pNv); + } + else + { + RmDisableAdapter(pNv); + } + + // UNLOCK: release API lock + rmapiLockRelease(); + } + + NV_ASSERT_OK(os_flush_work_queue(pNv->queue, NV_TRUE)); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +void NV_API_CALL rm_shutdown_adapter( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + if (rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DESTROY) == NV_OK) + { + RmShutdownAdapter(pNv); + + // UNLOCK: release API lock + rmapiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +NV_STATUS NV_API_CALL rm_exclude_adapter( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + NV_STATUS rmStatus; + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + rmStatus = RmExcludeAdapter(pNv); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_acquire_api_lock( + nvidia_stack_t *sp +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_release_api_lock( + nvidia_stack_t *sp +) +{ + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // UNLOCK: release API lock + rmapiLockRelease(); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return NV_OK; +} + +NV_STATUS NV_API_CALL rm_acquire_gpu_lock( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire GPU lock + rmStatus = rmDeviceGpuLocksAcquire(NV_GET_NV_PRIV_PGPU(nv), + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_OSAPI); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_release_gpu_lock( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // UNLOCK: release GPU lock + rmDeviceGpuLocksRelease(NV_GET_NV_PRIV_PGPU(nv), GPUS_LOCK_FLAGS_NONE, NULL); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return NV_OK; +} + +NV_STATUS NV_API_CALL rm_acquire_all_gpus_lock( + nvidia_stack_t *sp +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire all GPUs lock + rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_release_all_gpus_lock( + nvidia_stack_t *sp +) +{ + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // UNLOCK: release all GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return NV_OK; +} + +/*! + * @brief Handle ACPI_NOTIFY_GPS_STATUS_CHANGE event. + * + * This function is called for GPS when SBIOS trigger + * gps STATUS_CHANGE event, which calls rm control call + * NV0000_CTRL_CMD_SYSTEM_GPS_CONTROL to init the GPS + * data from SBIOS. + */ +static void RmHandleGPSStatusChange +( + nv_state_t *pNv +) +{ +} + +/*! + * @brief Function to handle device specific ACPI events. + * + * @param[in] sp nvidia_stack_t pointer. + * @param[in] nv nv_state_t pointer. + * @param[in] event_type NvU32 Event type. + */ +void NV_API_CALL rm_acpi_notify( + nvidia_stack_t *sp, + nv_state_t *nv, + NvU32 event_type +) +{ + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + + switch (event_type) + { + case ACPI_VIDEO_NOTIFY_PROBE: + { + THREAD_STATE_NODE threadState; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + RmHandleDisplayChange(sp, nv); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + break; + } + + case ACPI_NOTIFY_GPS_STATUS_CHANGE: + RmHandleGPSStatusChange(nv); + break; + + case ACPI_NOTIFY_POWER_LEVEL_D1: /* fallthrough */ + case ACPI_NOTIFY_POWER_LEVEL_D2: /* fallthrough */ + case ACPI_NOTIFY_POWER_LEVEL_D3: /* fallthrough */ + case ACPI_NOTIFY_POWER_LEVEL_D4: /* fallthrough */ + case ACPI_NOTIFY_POWER_LEVEL_D5: + RmHandleDNotifierEvent(nv, event_type); + break; + + default: + NV_PRINTF(LEVEL_INFO, "No support for 0x%x event\n", event_type); + NV_ASSERT(0); + break; + } + + NV_EXIT_RM_RUNTIME(sp,fp); +} + +static void nv_align_mmap_offset_length( + nv_usermap_access_params_t *nvuap) +{ + NvU64 page_size = os_page_size; + NvU64 end = nvuap->size + (nvuap->addr & (page_size - 1)); + + nvuap->memArea.numRanges = 1; + nvuap->memArea.pRanges[0].start = NV_ALIGN_DOWN(nvuap->addr, page_size); + nvuap->memArea.pRanges[0].size = NV_ALIGN_UP(end, page_size); + nvuap->offset = NV_ALIGN_DOWN(nvuap->offset, page_size); +} + +static inline NV_STATUS RmGetArrayMinMax( + NvU64 *array, + NvU64 count, + NvU64 *min, + NvU64 *max +) +{ + NvU64 i; + + if (array == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + *min = array[0]; + *max = array[0]; + + if (count == 1) + return NV_OK; + + for (i = 1; i < count; i++) + { + if (array[i] > *max) + *max = array[i]; + + if (array[i] < *min) + *min = array[i]; + } + + return NV_OK; +} + +static NV_STATUS RmGetAllocPrivate(RmClient *, NvU32, NvU64, NvU64, NvU32 *, void **, + NvU64 *); + +/* Must be called with the API lock and the GPU locks */ +static NV_STATUS RmCreateMmapContextLocked( + RmClient *pRmClient, + NvHandle hDevice, + NvHandle hMemory, + NvP64 address, + NvU64 size, + NvU64 offset, + NvU32 cachingType, + NvU32 fd +) +{ + NV_STATUS status = NV_OK; + void *pAllocPriv = NULL; + OBJGPU *pGpu = NULL; + RsClient *pClient = staticCast(pRmClient, RsClient); + NvBool bCoherentAtsCpuOffset = NV_FALSE; + NvBool bHostCoherentFbOffset = NV_FALSE; + nv_state_t *pNv = NULL; + NvU64 addr = (NvU64)address; + NvU32 prot = 0; + NvU64 pageIndex = 0; + nv_usermap_access_params_t *nvuap = NULL; + NvBool bClientMap = (pClient->hClient == hDevice); + + if (!bClientMap) + { + if (CliSetGpuContext(pClient->hClient, hDevice, &pGpu, NULL) != NV_OK) + { + Subdevice *pSubdevice; + + status = subdeviceGetByHandle(pClient, hDevice, &pSubdevice); + if (status != NV_OK) + return status; + + pGpu = GPU_RES_GET_GPU(pSubdevice); + + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + } + } + nvuap = tlsEntryGet(TLS_ENTRY_ID_MAPPING_CONTEXT); + if (nvuap != NULL) + { + NV_ASSERT(tlsEntryRelease(TLS_ENTRY_ID_MAPPING_CONTEXT) == 0); + prot = NV_PROTECT_READ_WRITE; + // + // TODO: Add this to the mapping context so we don't need to do gymnastics + // to figure out what the correct pNv is + // + pNv = NV_GET_NV_STATE(pGpu); + goto add_ctx_to_file; + } + + NV_ASSERT_OK_OR_RETURN(os_alloc_mem((void**)&nvuap, sizeof(nv_usermap_access_params_t))); + portMemSet(nvuap, 0, sizeof(nv_usermap_access_params_t)); + + NV_ASSERT_OK_OR_GOTO(status, os_alloc_mem((void**)&(nvuap->memArea.pRanges), sizeof(MemoryRange)), free_nvuap); + + nvuap->addr = addr; + nvuap->size = size; + nvuap->offset = offset; + nvuap->caching = cachingType; + + // + // Assume the allocation is contiguous until RmGetMmapPteArray + // determines otherwise. + // + nvuap->contig = NV_TRUE; + nv_align_mmap_offset_length(nvuap); + + if (pGpu != NULL) + { + pNv = NV_GET_NV_STATE(pGpu); + } + // + // If no device is given, or the address isn't in the given device's BARs, + // validate this as a system memory mapping and associate it with the + // control device. + // + if ((pNv == NULL) || + (!IS_REG_OFFSET(pNv, addr, size) && + !IS_FB_OFFSET(pNv, addr, size) && + !(bCoherentAtsCpuOffset || bHostCoherentFbOffset) && + !IS_IMEM_OFFSET(pNv, addr, size))) + { + pNv = nv_get_ctl_state(); + + // + // Validate the mapping request by looking up the underlying sysmem + // allocation. + // + status = RmGetAllocPrivate(pRmClient, hMemory, addr, size, &prot, &pAllocPriv, + &pageIndex); + + if (status != NV_OK) + { + goto done; + } + } + +add_ctx_to_file: + status = nv_add_mapping_context_to_file(pNv, nvuap, prot, pAllocPriv, + pageIndex, fd); + +done: + os_free_mem(nvuap->memArea.pRanges); +free_nvuap: + os_free_mem(nvuap); + return status; +} + +// TODO: Bug 1802250: [uvm8] Use an alt stack in all functions in unix/src/osapi.c +NV_STATUS rm_create_mmap_context( + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvP64 address, + NvU64 size, + NvU64 offset, + NvU32 cachingType, + NvU32 fd +) +{ + NV_STATUS rmStatus = NV_OK; + // LOCK: acquire API lock + if ((rmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_OSAPI)) == NV_OK) + { + CLIENT_ENTRY *pClientEntry; + RmClient *pRmClient; + + if (NV_OK != serverutilAcquireClient(hClient, LOCK_ACCESS_READ, &pClientEntry, + &pRmClient)) + { + // UNLOCK: release API lock + rmapiLockRelease(); + return NV_ERR_INVALID_CLIENT; + } + + if (pRmClient->ProcID != osGetCurrentProcess()) + { + rmStatus = NV_ERR_INVALID_CLIENT; + } + // LOCK: acquire GPUs lock + else if ((rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI)) == NV_OK) + { + rmStatus = RmCreateMmapContextLocked(pRmClient, hDevice, + hMemory, address, size, offset, + cachingType, fd); + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + serverutilReleaseClient(LOCK_ACCESS_READ, pClientEntry); + + // UNLOCK: release API lock + rmapiLockRelease(); + } + + return rmStatus; +} + +static NV_STATUS RmGetAllocPrivate( + RmClient *pRmClient, + NvU32 hMemory, + NvU64 offset, + NvU64 length, + NvU32 *pProtection, + void **ppPrivate, + NvU64 *pPageIndex +) +{ + NV_STATUS rmStatus; + PMEMORY_DESCRIPTOR pMemDesc; + NvU32 pageOffset; + NvU64 pageCount; + NvU64 endingOffset; + RsResourceRef *pResourceRef; + RmResource *pRmResource; + void *pMemData; + NvBool bPeerIoMem; + NvBool bReadOnlyMem; + *pProtection = NV_PROTECT_READ_WRITE; + *ppPrivate = NULL; + + pageOffset = (offset & ~os_page_mask); + offset &= os_page_mask; + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + rmStatus = clientGetResourceRef(staticCast(pRmClient, RsClient), + hMemory, &pResourceRef); + if (rmStatus != NV_OK) + goto done; + + pRmResource = dynamicCast(pResourceRef->pResource, RmResource); + if (!pRmResource) + { + rmStatus = NV_ERR_INVALID_OBJECT; + goto done; + } + + rmStatus = rmresGetMemoryMappingDescriptor(pRmResource, &pMemDesc); + if (rmStatus != NV_OK) + goto done; + + bReadOnlyMem = memdescGetFlag(pMemDesc, MEMDESC_FLAGS_USER_READ_ONLY); + bPeerIoMem = memdescGetFlag(pMemDesc, MEMDESC_FLAGS_PEER_IO_MEM); + + if (!(pMemDesc->Allocated || bPeerIoMem || + (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ALLOC_FROM_SCANOUT_CARVEOUT)) || + (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ALLOW_EXT_SYSMEM_USER_CPU_MAPPING)))) + { + NV_PRINTF(LEVEL_ERROR, "Mmap is not allowed\n"); + rmStatus = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + switch (memdescGetAddressSpace(pMemDesc)) + { + case ADDR_SYSMEM: + case ADDR_EGM: + break; + default: + rmStatus = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + pMemData = memdescGetMemData(pMemDesc); + if (pMemData == NULL) + { + rmStatus = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + rmStatus = os_match_mmap_offset(pMemData, offset, pPageIndex); + if (rmStatus != NV_OK) + goto done; + + if (!portSafeAddU64(pageOffset, length, &endingOffset)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + pageCount = (endingOffset / os_page_size); + + if (!portSafeAddU64(*pPageIndex + ((endingOffset % os_page_size) ? 1 : 0), + pageCount, &pageCount)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + if (pageCount > NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + if (bReadOnlyMem) + *pProtection = NV_PROTECT_READABLE; + + *ppPrivate = pMemData; + +done: + return rmStatus; +} + +NV_STATUS rm_get_adapter_status( + nv_state_t *pNv, + NvU32 *pStatus +) +{ + NV_STATUS rmStatus = NV_ERR_OPERATING_SYSTEM; + + // LOCK: acquire API lock + if (rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_OSAPI) == NV_OK) + { + rmStatus = RmGetAdapterStatus(pNv, pStatus); + + // UNLOCK: release API lock + rmapiLockRelease(); + } + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_get_adapter_status_external( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + if (rm_get_adapter_status(pNv, &rmStatus) != NV_OK) + { + rmStatus = NV_ERR_OPERATING_SYSTEM; + } + + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NvBool NV_API_CALL rm_init_rm( + nvidia_stack_t *sp +) +{ + NvBool retval; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + retval = RmInitRm(); + + NV_EXIT_RM_RUNTIME(sp,fp); + + return retval; +} + +void NV_API_CALL rm_shutdown_rm( + nvidia_stack_t *sp +) +{ + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + RmShutdownRm(); + + NV_EXIT_RM_RUNTIME(sp,fp); +} + +NvBool NV_API_CALL rm_init_event_locks( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + void *fp; + NvBool ret; + + NV_ENTER_RM_RUNTIME(sp,fp); + + pNv->event_spinlock = portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged()); + ret = (pNv->event_spinlock != NULL); + + NV_EXIT_RM_RUNTIME(sp,fp); + return ret; +} + +void NV_API_CALL rm_destroy_event_locks( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + if (pNv && pNv->event_spinlock) + portSyncSpinlockDestroy(pNv->event_spinlock); + + NV_EXIT_RM_RUNTIME(sp,fp); +} + +void NV_API_CALL rm_get_vbios_version( + nvidia_stack_t *sp, + nv_state_t *pNv, + char *vbiosString +) +{ + *vbiosString = '\0'; +} + +NV_STATUS NV_API_CALL rm_stop_user_channels( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL rm_restart_user_channels( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL rm_ioctl( + nvidia_stack_t *sp, + nv_state_t *pNv, + nv_file_private_t *nvfp, + NvU32 Command, + void *pData, + NvU32 dataSize +) +{ + NV_STATUS rmStatus = NV_OK; + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + // + // Some ioctls are handled entirely inside the OS layer and don't need to + // suffer the overhead of calling into RM core. + // + switch (Command) + { + case NV_ESC_ALLOC_OS_EVENT: + { + nv_ioctl_alloc_os_event_t *pApi = pData; + + if (dataSize != sizeof(nv_ioctl_alloc_os_event_t)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + break; + } + + pApi->Status = allocate_os_event(pApi->hClient, nvfp, pApi->fd); + break; + } + case NV_ESC_FREE_OS_EVENT: + { + nv_ioctl_free_os_event_t *pApi = pData; + + if (dataSize != sizeof(nv_ioctl_free_os_event_t)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + break; + } + + pApi->Status = free_os_event(pApi->hClient, pApi->fd); + break; + } + case NV_ESC_RM_GET_EVENT_DATA: + { + NVOS41_PARAMETERS *pApi = pData; + + if (dataSize != sizeof(NVOS41_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + break; + } + + pApi->status = get_os_event_data(nvfp, + pApi->pEvent, + &pApi->MoreEvents); + break; + } + default: + { + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + rmStatus = RmIoctl(pNv, nvfp, Command, pData, dataSize); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + break; + } + } + + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +static void _deferredClientListFreeCallback(void *unused) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NV_STATUS status = serverFreeDisabledClients(&g_resServ, 0, pSys->clientListDeferredFreeLimit); + // + // Possible return values: + // NV_WARN_MORE_PROCESSING_REQUIRED - Iteration limit reached, need to call again + // NV_ERR_IN_USE - Already running on another thread, try again later + // In both cases, schedule a worker to clean up anything that remains + // + if (status != NV_OK) + { + status = osQueueSystemWorkItem(_deferredClientListFreeCallback, unused); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_NOTICE, "Failed to schedule deferred free callback. Freeing immediately.\n"); + serverFreeDisabledClients(&g_resServ, 0, 0); + } + } +} + +void NV_API_CALL rm_cleanup_file_private( + nvidia_stack_t *sp, + nv_state_t *pNv, + nv_file_private_t *nvfp +) +{ + THREAD_STATE_NODE threadState; + void *fp; + RM_API *pRmApi; + RM_API_CONTEXT rmApiContext = {0}; + NvU32 i; + OBJSYS *pSys = SYS_GET_INSTANCE(); + + NV_ENTER_RM_RUNTIME(sp,fp); + + // + // Skip cleaning up this fd if: + // - no RMAPI clients and events were ever allocated on this fd + // - no RMAPI object handles were exported on this fd + // Access nvfp->handles without locking as fd cleanup is synchronised by the kernel + // + if (!nvfp->bCleanupRmapi && nvfp->handles == NULL) + goto done; + + pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + threadStateSetTimeoutOverride(&threadState, 10 * 1000); + + if (rmapiPrologue(pRmApi, &rmApiContext) != NV_OK) { + NV_EXIT_RM_RUNTIME(sp,fp); + return; + } + + // LOCK: acquire API lock. Low priority so cleanup doesn't block active threads + if (rmapiLockAcquire(RMAPI_LOCK_FLAGS_LOW_PRIORITY, RM_LOCK_MODULES_OSAPI) == NV_OK) + { + // Unref any object which was exported on this file. + if (nvfp->handles != NULL) + { + for (i = 0; i < nvfp->maxHandles; i++) + { + if (nvfp->handles[i] == 0) + { + continue; + } + + RmFreeObjExportHandle(nvfp->handles[i]); + nvfp->handles[i] = 0; + } + + os_free_mem(nvfp->handles); + nvfp->handles = NULL; + nvfp->maxHandles = 0; + } + + // Disable any RM clients associated with this file. + RmFreeUnusedClients(pNv, nvfp); + + // Unless configured otherwise, immediately free all disabled clients + if (!pSys->bUseDeferredClientListFree) + serverFreeDisabledClients(&g_resServ, RM_LOCK_STATES_API_LOCK_ACQUIRED, 0); + + // UNLOCK: release API lock + rmapiLockRelease(); + } + + // Start the deferred free callback if necessary + if (pSys->bUseDeferredClientListFree) + _deferredClientListFreeCallback(NULL); + + rmapiEpilogue(pRmApi, &rmApiContext); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + +done: + if (nvfp->ctl_nvfp != NULL) + { + nv_put_file_private(nvfp->ctl_nvfp_priv); + nvfp->ctl_nvfp = NULL; + nvfp->ctl_nvfp_priv = NULL; + } + + NV_EXIT_RM_RUNTIME(sp,fp); +} + +void NV_API_CALL rm_unbind_lock( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + if (rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI) == NV_OK) + { + RmUnbindLock(pNv); + + // UNLOCK: release API lock + rmapiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +NV_STATUS NV_API_CALL rm_read_registry_dword( + nvidia_stack_t *sp, + nv_state_t *nv, + const char *regParmStr, + NvU32 *Data +) +{ + OBJGPU *pGpu = NULL; + NV_STATUS RmStatus; + void *fp; + NvBool isApiLockTaken = NV_FALSE; + + NV_ENTER_RM_RUNTIME(sp,fp); + + // + // We can be called from different contexts: + // + // 1) early initialization without device state. + // 2) from outside the RM API (without the lock held) + // + // In context 1)the API lock is not needed and + // in context 2), it needs to be acquired. + // + if (nv != NULL) + { + // LOCK: acquire API lock + if ((RmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_OSAPI)) != NV_OK) + { + NV_EXIT_RM_RUNTIME(sp,fp); + return RmStatus; + } + + isApiLockTaken = NV_TRUE; + } + + pGpu = NV_GET_NV_PRIV_PGPU(nv); + + // Skipping the NULL check as osReadRegistryDword takes care of it. + RmStatus = osReadRegistryDword(pGpu, regParmStr, Data); + + if (isApiLockTaken == NV_TRUE) + { + // UNLOCK: release API lock + rmapiLockRelease(); + } + + NV_EXIT_RM_RUNTIME(sp,fp); + + return RmStatus; +} + +NV_STATUS NV_API_CALL rm_write_registry_dword( + nvidia_stack_t *sp, + nv_state_t *nv, + const char *regParmStr, + NvU32 Data +) +{ + NV_STATUS RmStatus; + void *fp; + NvBool isApiLockTaken = NV_FALSE; + + NV_ENTER_RM_RUNTIME(sp,fp); + + if (nv != NULL) + { + // LOCK: acquire API lock + if ((RmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI)) != NV_OK) + { + NV_EXIT_RM_RUNTIME(sp,fp); + return RmStatus; + } + + isApiLockTaken = NV_TRUE; + } + + RmStatus = RmWriteRegistryDword(nv, regParmStr, Data); + + if (isApiLockTaken == NV_TRUE) + { + // UNLOCK: release API lock + rmapiLockRelease(); + } + + NV_EXIT_RM_RUNTIME(sp,fp); + + return RmStatus; +} + +NV_STATUS NV_API_CALL rm_write_registry_binary( + nvidia_stack_t *sp, + nv_state_t *nv, + const char *regParmStr, + NvU8 *Data, + NvU32 cbLen +) +{ + NV_STATUS RmStatus; + void *fp; + NvBool isApiLockTaken = NV_FALSE; + + NV_ENTER_RM_RUNTIME(sp,fp); + + if (nv != NULL) + { + // LOCK: acquire API lock + if ((RmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI)) != NV_OK) + { + NV_EXIT_RM_RUNTIME(sp,fp); + return RmStatus; + } + + isApiLockTaken = NV_TRUE; + } + + RmStatus = RmWriteRegistryBinary(nv, regParmStr, Data, cbLen); + + if (isApiLockTaken == NV_TRUE) + { + // UNLOCK: release API lock + rmapiLockRelease(); + } + + NV_EXIT_RM_RUNTIME(sp,fp); + + return RmStatus; +} + +NV_STATUS NV_API_CALL rm_write_registry_string( + nvidia_stack_t *sp, + nv_state_t *nv, + const char *regParmStr, + const char *string, + NvU32 stringLength +) +{ + NV_STATUS rmStatus; + void *fp; + NvBool isApiLockTaken = NV_FALSE; + + NV_ENTER_RM_RUNTIME(sp,fp); + + if (nv != NULL) + { + // LOCK: acquire API lock + if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI)) != NV_OK) + { + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; + } + + isApiLockTaken = NV_TRUE; + } + + rmStatus = RmWriteRegistryString(nv, regParmStr, string, (stringLength + 1)); + + if (isApiLockTaken == NV_TRUE) + { + // UNLOCK: release API lock + rmapiLockRelease(); + } + + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +static NvBool NV_API_CALL rm_is_space(const char ch) +{ + // + // return true if it is a: + // ' ' : (space - decimal 32.) + // '\t' : (TAB - decimal 9) + // 'LF' : (Line feed, new line - decimal 10) + // 'VT' : (Vertical TAB - decimal 11) + // 'FF' : (Form feed, new page - decimal 12) + // '\r' : (carriage return - decimal 13) + // + return ((ch == ' ') || ((ch >= '\t') && (ch <= '\r'))); +} + +char* NV_API_CALL rm_remove_spaces(const char *in) +{ + unsigned int len = os_string_length(in) + 1; + const char *in_ptr; + char *out, *out_ptr; + + if (os_alloc_mem((void **)&out, len) != NV_OK) + return NULL; + + in_ptr = in; + out_ptr = out; + + while (*in_ptr != '\0') + { + if (!rm_is_space(*in_ptr)) + *out_ptr++ = *in_ptr; + in_ptr++; + } + *out_ptr = '\0'; + + return out; +} + +char* NV_API_CALL rm_string_token(char **strp, const char delim) +{ + char *s, *token; + + if ((strp == NULL) || (*strp == NULL)) + return NULL; + + s = token = *strp; + *strp = NULL; + + for (; *s != '\0'; s++) { + if (*s == delim) { + *s = '\0'; + *strp = ++s; + break; + } + } + + return token; +} + +// Parse string passed in NVRM as module parameter. +void NV_API_CALL rm_parse_option_string(nvidia_stack_t *sp, const char *nvRegistryDwords) +{ + unsigned int i; + nv_parm_t *entry; + char *option_string = NULL; + char *ptr, *token; + char *name, *value; + NvU32 data; + + if (nvRegistryDwords != NULL) + { + if ((option_string = rm_remove_spaces(nvRegistryDwords)) == NULL) + { + return; + } + + ptr = option_string; + + while ((token = rm_string_token(&ptr, ';')) != NULL) + { + if (!(name = rm_string_token(&token, '=')) || !os_string_length(name)) + { + continue; + } + + if (!(value = rm_string_token(&token, '=')) || !os_string_length(value)) + { + continue; + } + + if (rm_string_token(&token, '=') != NULL) + { + continue; + } + + data = os_strtoul(value, NULL, 0); + + for (i = 0; (entry = &nv_parms[i])->name != NULL; i++) + { + if (os_string_compare(entry->name, name) == 0) + break; + } + + if (!entry->name) + rm_write_registry_dword(sp, NULL, name, data); + else + *entry->data = data; + } + + // Free the memory allocated by rm_remove_spaces() + os_free_mem(option_string); + } +} + +NV_STATUS NV_API_CALL rm_run_rc_callback( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + OBJGPU *pGpu; + void *fp; + + /* make sure our timer's not still running when it shouldn't be */ + if (nv == NULL) + return NV_ERR_GENERIC; + + pGpu = NV_GET_NV_PRIV_PGPU(nv); + if (pGpu == NULL) + return NV_ERR_GENERIC; + + if (nv->rc_timer_enabled == 0) + return NV_ERR_GENERIC; + + if (!FULL_GPU_SANITY_CHECK(pGpu)) + { + return NV_ERR_GENERIC; + } + + NV_ENTER_RM_RUNTIME(sp,fp); + + osRun1HzCallbacksNow(pGpu); + + NV_EXIT_RM_RUNTIME(sp,fp); + + return NV_OK; +} + +static void _tmrEventServiceTimerWorkItem +( + NvU32 gpuInstance, + void *pArgs +) +{ + OBJGPU *pGpu = gpumgrGetGpu(gpuInstance); + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + NV_STATUS status = NV_OK; + + status = tmrEventServiceTimer(pGpu, pTmr, (TMR_EVENT *)pArgs); + + if (status != NV_OK) + NV_PRINTF(LEVEL_ERROR, "Timer event failed from OS timer callback workitem with status :0x%x\n", status); +} + +static NV_STATUS RmRunNanoTimerCallback( + OBJGPU *pGpu, + void *pTmrEvent +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status = NV_OK; + + threadStateInitISRAndDeferredIntHandler(&threadState, pGpu, + THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER); + + // + // OS timers come in ISR context, hence schedule workitem for timer event service. + // GPU timer events are also handled in same mammer allowing us to have same functionality + // for callback functions. + // + status = osQueueWorkItem(pGpu, + _tmrEventServiceTimerWorkItem, + pTmrEvent, + (OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE | + OS_QUEUE_WORKITEM_FLAGS_DONT_FREE_PARAMS)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR,"Queuing workitem for timer event failed with status :0x%x\n", status); + } + + // Out of conflicting thread + threadStateFreeISRAndDeferredIntHandler(&threadState, + pGpu, THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER); + + return status; +} + +NV_STATUS NV_API_CALL rm_run_nano_timer_callback +( + nvidia_stack_t *sp, + nv_state_t *nv, + void *pTmrEvent +) +{ + NV_STATUS status; + OBJGPU *pGpu = NULL; + void *fp; + + if (nv == NULL) + return NV_ERR_GENERIC; + + pGpu = NV_GET_NV_PRIV_PGPU(nv); + if (pGpu == NULL) + return NV_ERR_GENERIC; + + if (!FULL_GPU_SANITY_FOR_PM_RESUME(pGpu)) + { + return NV_ERR_GENERIC; + } + + NV_ENTER_RM_RUNTIME(sp,fp); + + status = RmRunNanoTimerCallback(pGpu, pTmrEvent); + + NV_EXIT_RM_RUNTIME(sp,fp); + + return status; +} + +void NV_API_CALL rm_execute_work_item( + nvidia_stack_t *sp, + void *pNvWorkItem +) +{ + void *fp; + THREAD_STATE_NODE threadState; + + NV_ENTER_RM_RUNTIME(sp, fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + RmExecuteWorkItem(pNvWorkItem); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp, fp); +} + +const char* NV_API_CALL rm_get_device_name( + NvU16 device, + NvU16 subsystem_vendor, + NvU16 subsystem_device +) +{ + unsigned int i; + const char *tmpName = NULL; + + for (i = 0; i < NV_ARRAY_ELEMENTS(sChipsReleased); i++) + { + // if the device ID doesn't match, go to the next entry + if (device != sChipsReleased[i].devID) + { + continue; + } + + // if the entry has 0 for the subsystem IDs, then the device + // ID match is sufficient, but continue scanning through + // sChipsReleased[] in case there is a subsystem ID match later + // in the table + if (sChipsReleased[i].subSystemVendorID == 0 && + sChipsReleased[i].subSystemID == 0) + { + tmpName = sChipsReleased[i].name; + continue; + } + + if (subsystem_vendor == sChipsReleased[i].subSystemVendorID && + subsystem_device == sChipsReleased[i].subSystemID) + { + tmpName = sChipsReleased[i].name; + break; + } + } + + return (tmpName != NULL) ? tmpName : "Unknown"; +} + +NV_STATUS rm_access_registry( + NvHandle hClient, + NvHandle hObject, + NvU32 AccessType, + NvP64 clientDevNodeAddress, + NvU32 DevNodeLength, + NvP64 clientParmStrAddress, + NvU32 ParmStrLength, + NvP64 clientBinaryDataAddress, + NvU32 *pBinaryDataLength, + NvU32 *Data, + NvU32 *Entry +) +{ + NV_STATUS RmStatus; + NvBool bReadOnly = (AccessType == NVOS38_ACCESS_TYPE_READ_DWORD) || + (AccessType == NVOS38_ACCESS_TYPE_READ_BINARY); + + // LOCK: acquire API lock + if ((RmStatus = rmapiLockAcquire(bReadOnly ? RMAPI_LOCK_FLAGS_READ : RMAPI_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_OSAPI)) == NV_OK) + { + RmStatus = RmAccessRegistry(hClient, + hObject, + AccessType, + clientDevNodeAddress, + DevNodeLength, + clientParmStrAddress, + ParmStrLength, + clientBinaryDataAddress, + pBinaryDataLength, + Data, + Entry); + + // UNLOCK: release API lock + rmapiLockRelease(); + } + + return RmStatus; +} + +NV_STATUS rm_update_device_mapping_info( + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + void *pOldCpuAddress, + void *pNewCpuAddress +) +{ + NV_STATUS RmStatus; + + // LOCK: acquire API lock + if ((RmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU)) == NV_OK) + { + RmStatus = RmUpdateDeviceMappingInfo(hClient, + hDevice, + hMemory, + pOldCpuAddress, + pNewCpuAddress); + + // UNLOCK: release API lock + rmapiLockRelease(); + } + + return RmStatus; +} + +static NvBool NV_API_CALL rm_is_legacy_device( + NvU16 device_id, + NvU16 subsystem_vendor, + NvU16 subsystem_device, + NvBool print_warning +) +{ + return NV_FALSE; +} + +static NvBool NV_API_CALL rm_is_legacy_arch( + NvU32 pmc_boot_0, + NvU32 pmc_boot_42 +) +{ + NvBool legacy = NV_FALSE; + + return legacy; +} + +NV_STATUS NV_API_CALL rm_is_supported_device( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + OBJSYS *pSys; + OBJHALMGR *pHalMgr; + GPUHWREG *reg_mapping; + NvU32 myHalPublicID; + void *fp; + NvU32 pmc_boot_0; + NvU32 pmc_boot_42; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + pSys = SYS_GET_INSTANCE(); + pHalMgr = SYS_GET_HALMGR(pSys); + + reg_mapping = osMapKernelSpace(pNv->regs->cpu_address, + os_page_size, + NV_MEMORY_UNCACHED, + NV_PROTECT_READABLE); + + if (reg_mapping == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to map registers!\n"); + rmStatus = NV_ERR_OPERATING_SYSTEM; + goto threadfree; + } + pmc_boot_0 = NV_PRIV_REG_RD32(reg_mapping, NV_PMC_BOOT_0); + pmc_boot_42 = NV_PRIV_REG_RD32(reg_mapping, NV_PMC_BOOT_42); + + osUnmapKernelSpace(reg_mapping, os_page_size); + + if ((pmc_boot_0 == 0xFFFFFFFF) && (pmc_boot_42 == 0xFFFFFFFF)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: The NVIDIA GPU %04x:%02x:%02x.%x\n" + "NVRM: (PCI ID: %04x:%04x) installed in this system has\n" + "NVRM: fallen off the bus and is not responding to commands.\n", + pNv->pci_info.domain, pNv->pci_info.bus, pNv->pci_info.slot, + pNv->pci_info.function, pNv->pci_info.vendor_id, + pNv->pci_info.device_id); + rmStatus = NV_ERR_GPU_IS_LOST; + goto threadfree; + } + + /* + * For legacy architectures, rm_is_legacy_arch() prints "legacy" message. + * We do not want to print "unsupported" message for legacy architectures + * to avoid confusion. Also, the probe should not continue for legacy + * architectures. Hence, we set rmStatus to NV_ERR_NOT_SUPPORTED and + * goto threadfree. + */ + if (rm_is_legacy_arch(pmc_boot_0, pmc_boot_42)) + { + rmStatus = NV_ERR_NOT_SUPPORTED; + goto threadfree; + } + + rmStatus = halmgrGetHalForGpu(pHalMgr, pmc_boot_0, pmc_boot_42, &myHalPublicID); + + if (rmStatus != NV_OK) + { + goto print_unsupported; + } + + goto threadfree; + +print_unsupported: + nv_printf(NV_DBG_ERRORS, + "NVRM: The NVIDIA GPU %04x:%02x:%02x.%x (PCI ID: %04x:%04x)\n" + "NVRM: installed in this system is not supported by the\n" + "NVRM: NVIDIA %s driver release.\n" + "NVRM: Please see 'Appendix A - Supported NVIDIA GPU Products'\n" + "NVRM: in this release's README, available on the operating system\n" + "NVRM: specific graphics driver download page at www.nvidia.com.\n", + pNv->pci_info.domain, pNv->pci_info.bus, pNv->pci_info.slot, + pNv->pci_info.function, pNv->pci_info.vendor_id, + pNv->pci_info.device_id, NV_VERSION_STRING); + +threadfree: + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NvBool NV_API_CALL rm_is_supported_pci_device( + NvU8 pci_class, + NvU8 pci_subclass, + NvU16 vendor, + NvU16 device, + NvU16 subsystem_vendor, + NvU16 subsystem_device, + NvBool print_legacy_warning +) +{ + const NvU16 nv_pci_vendor_id = 0x10DE; + const NvU16 nv_pci_id_riva_tnt = 0x0020; + const NvU8 nv_pci_class_display = 0x03; + const NvU8 nv_pci_subclass_display_vga = 0x00; + const NvU8 nv_pci_subclass_display_3d = 0x02; + + if (pci_class != nv_pci_class_display) + { + return NV_FALSE; + } + + if ((pci_subclass != nv_pci_subclass_display_vga) && + (pci_subclass != nv_pci_subclass_display_3d)) + { + return NV_FALSE; + } + + if (vendor != nv_pci_vendor_id) + { + return NV_FALSE; + } + + if (device < nv_pci_id_riva_tnt) + { + return NV_FALSE; + } + + if (rm_is_legacy_device( + device, + subsystem_vendor, + subsystem_device, + print_legacy_warning)) + { + return NV_FALSE; + } + + return NV_TRUE; +} + +/* + * Performs the I2C transfers which are related with DP AUX channel + */ +static NV_STATUS RmDpAuxI2CTransfer +( + nv_state_t *pNv, + NvU32 displayId, + NvU8 addr, + NvU32 len, + NvU8 *pData, + NvBool bWrite +) +{ + NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_CTRL_PARAMS *pParams; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NV_STATUS status; + + if (len > NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_MAX_DATA_SIZE) + { + NV_PRINTF(LEVEL_ERROR, + "%s: requested I2C transfer length %u is greater than maximum supported length %u\n", + __FUNCTION__, len, NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_MAX_DATA_SIZE); + return NV_ERR_NOT_SUPPORTED; + } + + pParams = portMemAllocNonPaged(sizeof(*pParams)); + if (pParams == NULL) + { + return NV_ERR_NO_MEMORY; + } + + portMemSet(pParams, 0, sizeof(*pParams)); + + pParams->subDeviceInstance = 0; + pParams->displayId = displayId; + pParams->addr = addr; + pParams->size = len; + pParams->bWrite = bWrite; + + if (bWrite) + { + portMemCopy(pParams->data, NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_MAX_DATA_SIZE, + pData, len); + } + + status = pRmApi->Control(pRmApi, pNv->rmapi.hClient, pNv->rmapi.hDisp, + NV0073_CTRL_CMD_DP_AUXCH_I2C_TRANSFER_CTRL, + pParams, sizeof(*pParams)); + + if ((status == NV_OK) && !bWrite) + { + portMemCopy(pData, len, pParams->data, pParams->size); + } + + portMemFree(pParams); + + return status; +} + +/* + * Performs the I2C transfers which are not related with DP AUX channel + */ +static NV_STATUS RmNonDPAuxI2CTransfer +( + nv_state_t *pNv, + NvU8 portId, + nv_i2c_cmd_t type, + NvU8 addr, + NvU8 command, + NvU32 len, + NvU8 *pData +) +{ + NV402C_CTRL_I2C_TRANSACTION_PARAMS *params; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NV_STATUS rmStatus = NV_OK; + + params = portMemAllocNonPaged(sizeof(*params)); + if (params == NULL) + { + return NV_ERR_NO_MEMORY; + } + + portMemSet(params, 0, sizeof(*params)); + + params->portId = portId; + // precondition our address (our stack requires this) + params->deviceAddress = addr << 1; + + switch (type) + { + case NV_I2C_CMD_WRITE: + params->transData.i2cBlockData.bWrite = NV_TRUE; + /* fall through*/ + + case NV_I2C_CMD_READ: + params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BLOCK_RW; + params->transData.i2cBlockData.messageLength = len; + params->transData.i2cBlockData.pMessage = pData; + break; + + case NV_I2C_CMD_SMBUS_WRITE: + if (len == 2) + { + params->transData.smbusWordData.bWrite = NV_TRUE; + } + else + { + params->transData.smbusByteData.bWrite = NV_TRUE; + } + /* fall through*/ + + case NV_I2C_CMD_SMBUS_READ: + if (len == 2) + { + params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_WORD_RW; + params->transData.smbusWordData.message = pData[0] | ((NvU16)pData[1] << 8); + params->transData.smbusWordData.registerAddress = command; + } + else + { + params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BYTE_RW; + params->transData.smbusByteData.message = pData[0]; + params->transData.smbusByteData.registerAddress = command; + } + break; + + case NV_I2C_CMD_SMBUS_BLOCK_WRITE: + if (pData[0] >= len) { + portMemFree(params); + return NV_ERR_INVALID_ARGUMENT; + } + params->transData.smbusBlockData.bWrite = NV_TRUE; + params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW; + params->transData.smbusBlockData.registerAddress = command; + params->transData.smbusBlockData.messageLength = pData[0]; + params->transData.smbusBlockData.pMessage = pData + 1; + break; + + case NV_I2C_CMD_SMBUS_BLOCK_READ: + params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW; + params->transData.smbusBlockData.registerAddress = command; + params->transData.smbusBlockData.messageLength = len; + params->transData.smbusBlockData.pMessage = pData; + break; + + case NV_I2C_CMD_SMBUS_QUICK_WRITE: + params->transData.smbusQuickData.bWrite = NV_TRUE; + /* fall through*/ + + case NV_I2C_CMD_SMBUS_QUICK_READ: + params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW; + break; + + case NV_I2C_CMD_BLOCK_WRITE: + params->transData.i2cBufferData.bWrite = NV_TRUE; + /* fall through */ + + case NV_I2C_CMD_BLOCK_READ: + params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW; + params->transData.i2cBufferData.registerAddress = command; + params->transData.i2cBufferData.messageLength = len; + params->transData.i2cBufferData.pMessage = pData; + break; + + default: + portMemFree(params); + return NV_ERR_INVALID_ARGUMENT; + } + + rmStatus = pRmApi->Control(pRmApi, pNv->rmapi.hClient, + pNv->rmapi.hI2C, + NV402C_CTRL_CMD_I2C_TRANSACTION, + params, sizeof(*params)); + + // + // For NV_I2C_CMD_SMBUS_READ, copy the read data to original + // data buffer. + // + if (rmStatus == NV_OK && type == NV_I2C_CMD_SMBUS_READ) + { + if (len == 2) + { + pData[0] = (params->transData.smbusWordData.message & 0xff); + pData[1] = params->transData.smbusWordData.message >> 8; + } + else + { + pData[0] = params->transData.smbusByteData.message; + } + } + + portMemFree(params); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_i2c_transfer( + nvidia_stack_t *sp, + nv_state_t *pNv, + void *pI2cAdapter, + nv_i2c_cmd_t type, + NvU8 addr, + NvU8 command, + NvU32 len, + NvU8 *pData +) +{ + THREAD_STATE_NODE threadState; + nv_priv_t *pNvp = NV_GET_NV_PRIV(pNv); + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = NULL; + NvBool unlockApi = NV_FALSE; + NvBool unlockGpu = NV_FALSE; + NvU32 x; + void *fp; + NvU32 numDispId = 0; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + if (pNvp->flags & NV_INIT_FLAG_PUBLIC_I2C) + { + // LOCK: acquire API lock + if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_I2C)) != NV_OK) + goto finish; + + unlockApi = NV_TRUE; + + // LOCK: acquire GPUs lock + if ((rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_I2C)) != NV_OK) + goto finish; + + unlockGpu = NV_TRUE; + } + + pGpu = NV_GET_NV_PRIV_PGPU(pNv); + + if (!pGpu) + { + rmStatus = NV_ERR_GENERIC; + goto finish; + } + + for (x = 0; x < MAX_I2C_ADAPTERS; x++) + { + if (pNvp->i2c_adapters[x].pOsAdapter == pI2cAdapter) + { + break; + } + } + + if (x == MAX_I2C_ADAPTERS) + { + rmStatus = NV_ERR_GENERIC; + goto finish; + } + + for (numDispId = 0; numDispId < MAX_DISP_ID_PER_ADAPTER; numDispId++) + { + NvU32 displayId = pNvp->i2c_adapters[x].displayId[numDispId]; + + if (displayId == INVALID_DISP_ID) + { + continue; + } + + // Handle i2c-over-DpAux adapters separately from regular i2c adapters + if (displayId == 0) + { + rmStatus = RmNonDPAuxI2CTransfer(pNv, pNvp->i2c_adapters[x].port, + type, addr, command, len, pData); + } + else + { + if ((type != NV_I2C_CMD_READ) && (type != NV_I2C_CMD_WRITE)) + { + rmStatus = NV_ERR_NOT_SUPPORTED; + goto semafinish; + } + + rmStatus = RmDpAuxI2CTransfer(pNv, displayId, addr, len, pData, + type == NV_I2C_CMD_WRITE); + } +semafinish: + if (rmStatus == NV_OK) + { + break; + } + } + +finish: + if (unlockGpu) + { + // UNLOCK: release GPU lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + if (unlockApi) + { + // UNLOCK: release API lock + rmapiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +static void rm_i2c_add_adapter( + nv_state_t *pNv, + NvU32 port, + NvU32 displayId +) +{ + NvU32 y, free; + nv_priv_t *pNvp = NV_GET_NV_PRIV(pNv); + NvU32 numDispId = 0; + + for (y = 0, free = MAX_I2C_ADAPTERS; y < MAX_I2C_ADAPTERS; y++) + { + if (pNvp->i2c_adapters[y].pOsAdapter == NULL) + { + // Only find the first free entry, and ignore the rest + if (free == MAX_I2C_ADAPTERS) + { + free = y; + } + } + else if (pNvp->i2c_adapters[y].port == port) + { + break; + } + } + + if (y < MAX_I2C_ADAPTERS) + { + for (numDispId = 0; numDispId < MAX_DISP_ID_PER_ADAPTER; numDispId++) + { + if (pNvp->i2c_adapters[y].displayId[numDispId] == INVALID_DISP_ID) + { + pNvp->i2c_adapters[y].displayId[numDispId] = displayId; + break; + } + else + { + NV_PRINTF(LEVEL_INFO, + "%s: adapter already exists (port=0x%x, displayId=0x%x)\n", + __FUNCTION__, port, + pNvp->i2c_adapters[y].displayId[numDispId]); + } + } + + if (numDispId == MAX_DISP_ID_PER_ADAPTER) + { + NV_PRINTF(LEVEL_ERROR, + "%s: no more free display Id entries in adapter\n", + __FUNCTION__); + } + + return; + } + + if (free == MAX_I2C_ADAPTERS) + { + NV_PRINTF(LEVEL_ERROR, "%s: no more free adapter entries exist\n", + __FUNCTION__); + return; + } + + pNvp->i2c_adapters[free].pOsAdapter = nv_i2c_add_adapter(pNv, port); + pNvp->i2c_adapters[free].port = port; + // When port is added, numDispId will be 0. + pNvp->i2c_adapters[free].displayId[numDispId] = displayId; +} + +void RmI2cAddGpuPorts(nv_state_t * pNv) +{ + NvU32 x = 0; + nv_priv_t *pNvp = NV_GET_NV_PRIV(pNv); + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + NvU32 displayMask; + NV_STATUS status; + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv); + NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS systemGetSupportedParams = { 0 }; + + if (IS_VIRTUAL(pGpu) || NV_IS_SOC_DISPLAY_DEVICE(pNv)) + return; + + // Make displayId as Invalid. + for (x = 0; x < MAX_I2C_ADAPTERS; x++) + { + NvU32 numDispId; + + for (numDispId = 0; numDispId < MAX_DISP_ID_PER_ADAPTER; numDispId++) + { + pNvp->i2c_adapters[x].displayId[numDispId] = INVALID_DISP_ID; + } + } + + // First, set up the regular i2c adapters - one per i2c port + if (pNv->rmapi.hI2C != 0) + { + NV402C_CTRL_I2C_GET_PORT_INFO_PARAMS i2cPortInfoParams = { 0 }; + + status = pRmApi->Control(pRmApi, pNv->rmapi.hClient, pNv->rmapi.hI2C, + NV402C_CTRL_CMD_I2C_GET_PORT_INFO, + &i2cPortInfoParams, sizeof(i2cPortInfoParams)); + + if (status == NV_OK) + { + for (x = 0; x < NV_ARRAY_ELEMENTS(i2cPortInfoParams.info); x++) + { + // + // Check if this port is implemented and RM I2C framework has + // validated this port. Only limited amount of ports can + // be added to the OS framework. + // + if (FLD_TEST_DRF(402C_CTRL, _I2C_GET_PORT_INFO, _IMPLEMENTED, + _YES, i2cPortInfoParams.info[x]) && + FLD_TEST_DRF(402C_CTRL, _I2C_GET_PORT_INFO, _VALID, + _YES, i2cPortInfoParams.info[x])) + { + rm_i2c_add_adapter(pNv, x, 0); + } + } + } + } + + // + // Now set up the i2c-over-DpAux adapters - one per DP OD + // + // 1. Perform NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS RM control which + // will return the mask for all the display ID's. + // 2. Loop for all the display ID's and do + // NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO RM control call. For each + // output resource, check for the following requirements: + // a. It must be DisplayPort. + // b. It must be internal to the GPU (ie, not on the board) + // c. It must be directly connected to the physical connector (ie, no DP + // 1.2 multistream ODs). + // 3. Perform NV0073_CTRL_CMD_SPECIFIC_GET_I2C_PORTID RM control for + // getting the I2C port data. + // + // With these restrictions, we should only end up with at most one OD + // per DP connector. + // + + if (pNv->rmapi.hDisp == 0) + { + return; + } + + systemGetSupportedParams.subDeviceInstance = 0; + status = pRmApi->Control(pRmApi, pNv->rmapi.hClient, pNv->rmapi.hDisp, + NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, + &systemGetSupportedParams, sizeof(systemGetSupportedParams)); + + if (status != NV_OK) + { + return; + } + + for (displayMask = systemGetSupportedParams.displayMask; + displayMask != 0; + displayMask &= ~LOWESTBIT(displayMask)) + { + NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS orInfoParams = { 0 }; + NvU32 displayId = LOWESTBIT(displayMask); + + orInfoParams.subDeviceInstance = 0; + orInfoParams.displayId = displayId; + + status = pRmApi->Control(pRmApi, pNv->rmapi.hClient, pNv->rmapi.hDisp, + NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, + &orInfoParams, sizeof(orInfoParams)); + + if ((status == NV_OK) && + (orInfoParams.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) && + ((orInfoParams.protocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A) || + (orInfoParams.protocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B)) && + (orInfoParams.location == NV0073_CTRL_SPECIFIC_OR_LOCATION_CHIP) && + (!orInfoParams.bIsDispDynamic)) + { + NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS i2cPortIdParams = { 0 }; + + i2cPortIdParams.subDeviceInstance = 0; + i2cPortIdParams.displayId = displayId; + + status = pRmApi->Control(pRmApi, + pNv->rmapi.hClient, + pNv->rmapi.hDisp, + NV0073_CTRL_CMD_SPECIFIC_GET_I2C_PORTID, + &i2cPortIdParams, + sizeof(i2cPortIdParams)); + + if ((status == NV_OK) && + (i2cPortIdParams.ddcPortId != NV0073_CTRL_SPECIFIC_I2C_PORT_NONE)) + { + rm_i2c_add_adapter(pNv, i2cPortIdParams.ddcPortId - 1, displayId); + } + } + } +} + +void NV_API_CALL rm_i2c_remove_adapters( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + // + // Cycle through all adapter entries, and first remove the adapter + // from the list from the kernel, then remove the i2c adapter + // list once that is completed. This should only be used from exit + // module time. Otherwise it could fail to remove some of the + // kernel adapters and subsequent transfer requests would result + // in crashes. + // + NvU32 x = 0; + nv_priv_t *pNvp = NV_GET_NV_PRIV(pNv); + NvU32 numDispId; + + for (x = 0; x < MAX_I2C_ADAPTERS; x++) + { + if (pNvp->i2c_adapters[x].pOsAdapter != NULL) + { + nv_i2c_del_adapter(pNv, pNvp->i2c_adapters[x].pOsAdapter); + + pNvp->i2c_adapters[x].pOsAdapter = NULL; + pNvp->i2c_adapters[x].port = 0; + for (numDispId = 0; numDispId < MAX_DISP_ID_PER_ADAPTER; numDispId++) + { + pNvp->i2c_adapters[x].displayId[numDispId] = INVALID_DISP_ID; + } + } + } +} + +NvBool NV_API_CALL rm_i2c_is_smbus_capable( + nvidia_stack_t *sp, + nv_state_t *pNv, + void *pI2cAdapter +) +{ + THREAD_STATE_NODE threadState; + nv_priv_t *pNvp = NV_GET_NV_PRIV(pNv); + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = NULL; + NvBool unlock = NV_FALSE; + NvU32 x; + NvBool ret = NV_FALSE; + void *fp; + NvU32 numDispId = 0; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + if (pNvp->flags & NV_INIT_FLAG_PUBLIC_I2C) + { + // LOCK: acquire API lock + if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_I2C)) != NV_OK) + goto semafinish; + + unlock = NV_TRUE; + } + + pGpu = NV_GET_NV_PRIV_PGPU(pNv); + + if (!pGpu) + { + goto semafinish; + } + + for (x = 0; x < MAX_I2C_ADAPTERS; x++) + { + if (pNvp->i2c_adapters[x].pOsAdapter == pI2cAdapter) + { + break; + } + } + + if (x == MAX_I2C_ADAPTERS) + { + goto semafinish; + } + + // we do not support smbus functions on i2c-over-DPAUX + for (numDispId = 0; numDispId < MAX_DISP_ID_PER_ADAPTER; numDispId++) + { + if (pNvp->i2c_adapters[x].displayId[numDispId] == 0x0) + { + ret = NV_TRUE; + } + } + +semafinish: + if (unlock) + { + // UNLOCK: release API lock + rmapiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return ret; +} + +NV_STATUS NV_API_CALL rm_perform_version_check( + nvidia_stack_t *sp, + void *pData, + NvU32 dataSize +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + rmStatus = RmPerformVersionCheck(pData, dataSize); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +// +// Handles the Power Source Change event(AC/DC) for Notebooks. +// Notebooks from Maxwell have only one Gpu, so this functions grabs first Gpu +// from GpuMgr and call subdevice RmControl. +// +void NV_API_CALL rm_power_source_change_event( + nvidia_stack_t *sp, + NvU32 event_val +) +{ + THREAD_STATE_NODE threadState; + void *fp; + nv_state_t *nv; + NV_STATUS rmStatus = NV_OK; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_EVENT)) == NV_OK) + { + OBJGPU *pGpu = gpumgrGetGpu(0); + if (pGpu != NULL) + { + nv = NV_GET_NV_STATE(pGpu); + if ((rmStatus = os_ref_dynamic_power(nv, NV_DYNAMIC_PM_FINE)) == + NV_OK) + { + // LOCK: acquire GPU lock + if ((rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_EVENT)) == + NV_OK) + { + rmStatus = RmPowerSourceChangeEvent(nv, event_val); + + // UNLOCK: release GPU lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + os_unref_dynamic_power(nv, NV_DYNAMIC_PM_FINE); + } + // UNLOCK: release API lock + rmapiLockRelease(); + } + } + + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s: Failed to handle Power Source change event, status=0x%x\n", + __FUNCTION__, rmStatus); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +void NV_API_CALL rm_request_dnotifier_state( + nv_stack_t *sp, + nv_state_t *pNv +) +{ + nv_priv_t *nvp = NV_GET_NV_PRIV(pNv); + + if (nvp->b_mobile_config_enabled) + { + THREAD_STATE_NODE threadState; + void *fp; + GPU_MASK gpuMask; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + if ((rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_ACPI)) == NV_OK) + { + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv); + + // LOCK: acquire per device lock + if ((pGpu != NULL) && + ((rmGpuGroupLockAcquire(pGpu->gpuInstance, GPU_LOCK_GRP_SUBDEVICE, + GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_ACPI, + &gpuMask)) == NV_OK)) + { + RmRequestDNotifierState(pNv); + + // UNLOCK: release per device lock + rmGpuGroupLockRelease(gpuMask, GPUS_LOCK_FLAGS_NONE); + } + + // UNLOCK: release API lock + rmapiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + } +} + +NV_STATUS NV_API_CALL rm_p2p_dma_map_pages( + nvidia_stack_t *sp, + nv_dma_device_t *peer, + NvU8 *pGpuUuid, + NvU64 pageSize, + NvU32 pageCount, + NvU64 *pDmaAddresses, + void **ppPriv +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL rm_p2p_get_gpu_info( + nvidia_stack_t *sp, + NvU64 gpuVirtualAddress, + NvU64 length, + NvU8 **ppGpuUuid, + void **ppGpuInfo +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent( + nvidia_stack_t *sp, + NvU64 gpuVirtualAddress, + NvU64 length, + void **p2pObject, + NvU64 *pPhysicalAddresses, + NvU32 *pEntries, + NvBool bForcePcie, + void *pPlatformData, + void *pGpuInfo, + void **ppMigInfo, + NvBool *pMemCpuCacheable +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL rm_p2p_get_pages( + nvidia_stack_t *sp, + NvU64 p2pToken, + NvU32 vaSpaceToken, + NvU64 gpuVirtualAddress, + NvU64 length, + NvU64 *pPhysicalAddresses, + NvU32 *pWreqMbH, + NvU32 *pRreqMbH, + NvU32 *pEntries, + NvU8 **ppGpuUuid, + void *pPlatformData, + NvBool *pMemCpuCacheable +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL rm_p2p_register_callback( + nvidia_stack_t *sp, + NvU64 p2pToken, + NvU64 gpuVirtualAddress, + NvU64 length, + void *pPlatformData, + void (*pFreeCallback)(void *pData), + void *pData +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL rm_p2p_put_pages_persistent( + nvidia_stack_t *sp, + void *p2pObject, + void *pKey, + void *pMigInfo +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL rm_p2p_put_pages( + nvidia_stack_t *sp, + NvU64 p2pToken, + NvU32 vaSpaceToken, + NvU64 gpuVirtualAddress, + void *pKey +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +char* NV_API_CALL rm_get_gpu_uuid( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + NV_STATUS rmStatus; + const NvU8 *pGid; + char *pGidString; + + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // Allocate space for the ASCII string + rmStatus = os_alloc_mem((void **)&pGidString, GPU_UUID_ASCII_LEN); + if (rmStatus != NV_OK) + { + pGidString = NULL; + goto done; + } + + // Get the raw UUID; note the pGid is cached, so we do not need to free it + pGid = RmGetGpuUuidRaw(nv); + + if (pGid != NULL) + { + // Convert the raw UUID to ASCII + rmStatus = RmGpuUuidRawToString(pGid, pGidString, GPU_UUID_ASCII_LEN); + if (rmStatus != NV_OK) + { + os_free_mem(pGidString); + pGidString = NULL; + } + } + else + { + const char *pTmpString = "GPU-???????\?-???\?-???\?-???\?-????????????"; + + portStringCopy(pGidString, GPU_UUID_ASCII_LEN, pTmpString, + portStringLength(pTmpString) + 1); + } + +done: + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return pGidString; +} + +// +// This function will return the UUID in the binary format +// +const NvU8 * NV_API_CALL rm_get_gpu_uuid_raw( + nvidia_stack_t *sp, + nv_state_t *nv) +{ + THREAD_STATE_NODE threadState; + void *fp; + const NvU8 *pGid; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + pGid = RmGetGpuUuidRaw(nv); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return pGid; +} + +static void rm_set_firmware_logs( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + void *fp; + NvU32 enableFirmwareLogsRegVal = NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG; + + NV_ENTER_RM_RUNTIME(sp,fp); + + (void) RmReadRegistryDword(nv, NV_REG_ENABLE_GPU_FIRMWARE_LOGS, + &enableFirmwareLogsRegVal); + nv->enable_firmware_logs = gpumgrGetRmFirmwareLogsEnabled(enableFirmwareLogsRegVal); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +void NV_API_CALL rm_set_rm_firmware_requested( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + nv->request_firmware = NV_TRUE; + nv->allow_fallback_to_monolithic_rm = NV_FALSE; + + // Check if we want firmware logs + if (nv->request_firmware) + rm_set_firmware_logs(sp, nv); +} + +// +// This function will be called by nv_procfs_read_gpu_info(). +// nv_procfs_read_gpu_info() will not print the 'GPU Firmware:' field at +// all if the 'version' string is empty. +// +// If GSP is enabled (firmware was requested), this function needs to return +// the firmware version or "NA" in case of any errors. +// +// If GSP is not enabled (firmware was not requested), this function needs to +// return the empty string, regardless of error cases. +// +void NV_API_CALL rm_get_firmware_version( + nvidia_stack_t *sp, + nv_state_t *nv, + char *version, + NvLength version_length +) +{ + NV2080_CTRL_GSP_GET_FEATURES_PARAMS params = { 0 }; + RM_API *pRmApi; + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus = NV_OK; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + pRmApi = RmUnixRmApiPrologue(nv, &threadState, RM_LOCK_MODULES_GPU); + if (pRmApi != NULL) + { + rmStatus = pRmApi->Control(pRmApi, + nv->rmapi.hClient, + nv->rmapi.hSubDevice, + NV2080_CTRL_CMD_GSP_GET_FEATURES, + ¶ms, + sizeof(params)); + + RmUnixRmApiEpilogue(nv, &threadState); + } + else + { + rmStatus = NV_ERR_INVALID_STATE; + } + + if (rmStatus != NV_OK) + { + if (RMCFG_FEATURE_GSP_CLIENT_RM && nv->request_firmware) + { + const char *pTmpString = "N/A"; + portStringCopy(version, version_length, pTmpString, portStringLength(pTmpString) + 1); + } + NV_PRINTF(LEVEL_INFO, + "%s: Failed to query gpu build versions, status=0x%x\n", + __FUNCTION__, + rmStatus); + goto finish; + } + portMemCopy(version, version_length, params.firmwareVersion, sizeof(params.firmwareVersion)); + +finish: + NV_EXIT_RM_RUNTIME(sp,fp); +} + +// +// disable GPU SW state persistence +// + +void NV_API_CALL rm_disable_gpu_state_persistence(nvidia_stack_t *sp, nv_state_t *nv) +{ + THREAD_STATE_NODE threadState; + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv); + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + pGpu->setProperty(pGpu, PDB_PROP_GPU_PERSISTENT_SW_STATE, NV_FALSE); + osModifyGpuSwStatePersistence(pGpu->pOsGpuInfo, NV_FALSE); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +void NV_API_CALL rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd) +{ + nvidia_kernel_rmapi_ops_t *ops = ops_cmd; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + switch (ops->op) + { + case NV01_FREE: + Nv01FreeKernel(&ops->params.free); + break; + + case NV01_ALLOC_MEMORY: + Nv01AllocMemoryKernel(&ops->params.allocMemory64); + break; + + case NV04_ALLOC: + Nv04AllocWithAccessKernel(&ops->params.alloc); + break; + + case NV04_VID_HEAP_CONTROL: + Nv04VidHeapControlKernel(ops->params.pVidHeapControl); + break; + + case NV04_MAP_MEMORY: + { + // + // We need to free NVUAP for MEM_SPACE_USER mappings, + // since we're not going through the regular path + // + nv_usermap_access_params_t *pNvuap = NULL; + Nv04MapMemoryKernel(&ops->params.mapMemory); + pNvuap = (nv_usermap_access_params_t *) tlsEntryGet(TLS_ENTRY_ID_MAPPING_CONTEXT); + if (pNvuap != NULL) + { + NV_ASSERT(tlsEntryRelease(TLS_ENTRY_ID_MAPPING_CONTEXT) == 0); + os_free_mem(pNvuap->memArea.pRanges); + os_free_mem(pNvuap); + } + break; + } + + case NV04_UNMAP_MEMORY: + Nv04UnmapMemoryKernel(&ops->params.unmapMemory); + break; + + case NV04_ALLOC_CONTEXT_DMA: + Nv04AllocContextDmaKernel(&ops->params.allocContextDma2); + break; + + case NV04_MAP_MEMORY_DMA: + Nv04MapMemoryDmaKernel(&ops->params.mapMemoryDma); + break; + + case NV04_UNMAP_MEMORY_DMA: + Nv04UnmapMemoryDmaKernel(&ops->params.unmapMemoryDma); + break; + + case NV04_BIND_CONTEXT_DMA: + Nv04BindContextDmaKernel(&ops->params.bindContextDma); + break; + + case NV04_CONTROL: + Nv04ControlKernel(&ops->params.control); + break; + + case NV04_DUP_OBJECT: + Nv04DupObjectKernel(&ops->params.dupObject); + break; + + case NV04_SHARE: + Nv04ShareKernel(&ops->params.share); + break; + + case NV04_ADD_VBLANK_CALLBACK: + Nv04AddVblankCallbackKernel(&ops->params.addVblankCallback); + break; + } + + NV_EXIT_RM_RUNTIME(sp,fp); +} + +// +// ACPI method (NVIF/_DSM/WMMX/MXM*/etc.) initialization +// +void RmInitAcpiMethods(OBJOS *pOS, OBJSYS *pSys, OBJGPU *pGpu) +{ + NvU32 handlesPresent; + + if (pSys->getProperty(pSys, PDB_PROP_SYS_NVIF_INIT_DONE)) + return; + + nv_acpi_methods_init(&handlesPresent); + +} + +// +// ACPI method (NVIF/_DSM/WMMX/MXM*/etc.) teardown +// +void RmUnInitAcpiMethods(OBJSYS *pSys) +{ + pSys->setProperty(pSys, PDB_PROP_SYS_NVIF_INIT_DONE, NV_FALSE); + + nv_acpi_methods_uninit(); +} + +// +// Converts an array of OS page address to an array of RM page addresses.This +// assumes that: +// (1) The pteArray is at least pageCount entries large, +// (2) The pageCount is given in RM pages, and +// (3) The OS page entries start at index 0. +// +void RmInflateOsToRmPageArray(RmPhysAddr *pteArray, NvU64 pageCount) +{ + NvUPtr osPageIdx, osPageOffset; + NvU64 i; + + // + // We can do the translation in place by moving backwards, since there + // will always be more RM pages than OS pages + // + for (i = pageCount - 1; i != NV_U64_MAX; i--) + { + osPageIdx = i >> NV_RM_TO_OS_PAGE_SHIFT; + osPageOffset = (i & ((1 << NV_RM_TO_OS_PAGE_SHIFT) - 1)) * + NV_RM_PAGE_SIZE; + pteArray[i] = pteArray[osPageIdx] + osPageOffset; + } +} + +void RmDeflateRmToOsPageArray(RmPhysAddr *pteArray, NvU64 pageCount) +{ + NvU64 i; + + for (i = 0; i < NV_RM_PAGES_TO_OS_PAGES(pageCount); i++) + { + pteArray[i] = pteArray[(i << NV_RM_TO_OS_PAGE_SHIFT)]; + } + + // Zero out the rest of the addresses, which are now invalid + portMemSet(pteArray + i, 0, sizeof(*pteArray) * (pageCount - i)); +} + +NvBool NV_API_CALL +rm_get_device_remove_flag +( + nvidia_stack_t * sp, + NvU32 gpu_id +) +{ + THREAD_STATE_NODE threadState; + void *fp; + NvBool bRemove; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + if (gpumgrQueryGpuDrainState(gpu_id, NULL, &bRemove) != NV_OK) + { + bRemove = NV_FALSE; + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + return bRemove; +} + +NvBool NV_API_CALL +rm_gpu_need_4k_page_isolation +( + nv_state_t *nv +) +{ + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + + return nvp->b_4k_page_isolation_required; +} + +// +// This API updates only the following fields in nv_ioctl_numa_info_t: +// - nid +// - numa_mem_addr +// - numa_mem_size +// - offline_addresses +// +// Rest of the fields should be updated by caller. +NV_STATUS NV_API_CALL rm_get_gpu_numa_info( + nvidia_stack_t *sp, + nv_state_t *nv, + nv_ioctl_numa_info_t *numa_info +) +{ + if (numa_info == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + numa_info->nid = NV0000_CTRL_NO_NUMA_NODE; + numa_info->numa_mem_addr = 0; + numa_info->numa_mem_size = 0; + numa_info->offline_addresses.numEntries = 0; + + return NV_OK; +} + +NV_STATUS NV_API_CALL rm_gpu_numa_online( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + return NV_ERR_NOT_SUPPORTED; +} + + +NV_STATUS NV_API_CALL rm_gpu_numa_offline( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// +// A device is considered "sequestered" if it has drain state enabled for it. +// The kernel interface layer can use this to check the drain state of a device +// in paths outside of initialization, e.g., when clients attempt to reference +// count the device. +// +NvBool NV_API_CALL rm_is_device_sequestered( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + THREAD_STATE_NODE threadState; + void *fp; + NvBool bDrain = NV_FALSE; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // + // If gpumgrQueryGpuDrainState succeeds, bDrain will be set as needed. + // If gpumgrQueryGpuDrainState fails, bDrain will stay false; we assume + // that if core RM can't tell us the drain state, it must not be + // attached and the "sequestered" question is not relevant. + // + (void) gpumgrQueryGpuDrainState(pNv->gpu_id, &bDrain, NULL); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + return bDrain; +} + +void NV_API_CALL rm_check_for_gpu_surprise_removal( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + THREAD_STATE_NODE threadState; + void *fp; + NV_STATUS rmStatus; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock. + if ((rmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU)) == NV_OK) + { + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv); + + if ((rmStatus = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_GPU)) == NV_OK) + { + osHandleGpuLost(pGpu); + rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL); + } + + // UNLOCK: release api lock + rmapiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +NV_STATUS NV_API_CALL rm_set_external_kernel_client_count( + nvidia_stack_t *sp, + nv_state_t *pNv, + NvBool bIncr +) +{ + THREAD_STATE_NODE threadState; + void *fp; + OBJGPU *pGpu; + NV_STATUS rmStatus = NV_OK; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + pGpu = NV_GET_NV_PRIV_PGPU(pNv); + + if (pGpu != NULL) + { + rmStatus = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_GPU); + if (rmStatus == NV_OK) + { + rmStatus = gpuSetExternalKernelClientCount(pGpu, bIncr); + rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL); + } + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NvBool rm_get_uefi_console_status( + nv_state_t *nv +) +{ + NvU32 fbWidth, fbHeight, fbDepth, fbPitch; + NvU64 fbSize; + NvU64 fbBaseAddress = 0; + NvBool bConsoleDevice = NV_FALSE; + + // + // nv_get_screen_info() will return dimensions and an address for + // any fbdev driver (e.g., efifb, vesafb, etc). + // + nv_get_screen_info(nv, &fbBaseAddress, &fbWidth, &fbHeight, &fbDepth, &fbPitch, &fbSize); + + bConsoleDevice = (fbSize != 0); + + return bConsoleDevice; +} + +NvU64 rm_get_uefi_console_size( + nv_state_t *nv, + NvU64 *pFbBaseAddress +) +{ + NvU32 fbWidth, fbHeight, fbDepth, fbPitch; + NvU64 fbSize; + + fbSize = fbWidth = fbHeight = fbDepth = fbPitch = 0; + + // + // nv_get_screen_info() will return dimensions and an address for + // any fbdev driver (e.g., efifb, vesafb, etc). + // + nv_get_screen_info(nv, pFbBaseAddress, &fbWidth, &fbHeight, &fbDepth, &fbPitch, &fbSize); + + return fbSize; +} + +// +// Verifies the handle, offset and size and dups hMemory. +// Must be called with API lock and GPU lock held. +// +NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle( + nvidia_stack_t *sp, + nv_state_t *nv, + NvHandle hSrcClient, + NvHandle hDstClient, + NvHandle hDevice, + NvHandle hSubdevice, + void *pGpuInstanceInfo, + NvHandle hMemory, + NvU64 offset, + NvU64 size, + NvHandle *phMemoryDuped, + void **ppMemInfo, + NvBool *pbCanMmap, + NvU32 *pCacheType, + NvBool *pbReadOnlyMem, + nv_memory_type_t *pMemoryType +) +{ + MEMORY_DESCRIPTOR *pMemDesc; + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + OBJGPU *pGpu; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + pGpu = NV_GET_NV_PRIV_PGPU(nv); + + NV_ASSERT(rmapiLockIsOwner()); + + NV_ASSERT(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu))); + + rmStatus = RmDmabufVerifyMemHandle(pGpu, hSrcClient, hMemory, + offset, size, pGpuInstanceInfo, + &pMemDesc); + if (rmStatus == NV_OK) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvHandle hMemoryDuped = 0; + + rmStatus = pRmApi->DupObject(pRmApi, + hDstClient, + hDevice, + &hMemoryDuped, + hSrcClient, + hMemory, + 0); + if (rmStatus == NV_OK) + { + *phMemoryDuped = hMemoryDuped; + } + else if (rmStatus == NV_ERR_INVALID_OBJECT_PARENT) + { + hMemoryDuped = 0; + + // If duping under Device fails, try duping under Subdevice + rmStatus = pRmApi->DupObject(pRmApi, + hDstClient, + hSubdevice, + &hMemoryDuped, + hSrcClient, + hMemory, + 0); + if (rmStatus == NV_OK) + { + *phMemoryDuped = hMemoryDuped; + } + } + *ppMemInfo = (void *) pMemDesc; + + *pCacheType = memdescGetCpuCacheAttrib(pMemDesc); + *pbReadOnlyMem = memdescGetFlag(pMemDesc, MEMDESC_FLAGS_USER_READ_ONLY); + + if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_PEER_IO_MEM)) + { + *pMemoryType = NV_MEMORY_TYPE_DEVICE_MMIO; + } + else if (memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) + { + *pMemoryType = NV_MEMORY_TYPE_SYSTEM; + } + else + { + // TODO: Assume NV_MEMORY_TYPE_FRAMEBUFFER is for now. + // Add Proper handling for NV_MEMORY_TYPE_REGISTERS. + *pMemoryType = NV_MEMORY_TYPE_FRAMEBUFFER; + } + + // mmap is allowed only for 0FB chips (iGPU) + *pbCanMmap = pGpu->getProperty(pGpu, PDB_PROP_GPU_ZERO_FB); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +// +// Frees dup'd hMemory. +// Must be called with API lock and GPU lock held. +// +void NV_API_CALL rm_dma_buf_undup_mem_handle( + nvidia_stack_t *sp, + nv_state_t *nv, + NvHandle hClient, + NvHandle hMemory +) +{ + THREAD_STATE_NODE threadState; + RM_API *pRmApi; + OBJGPU *pGpu; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + pGpu = NV_GET_NV_PRIV_PGPU(nv); + + NV_ASSERT(rmapiLockIsOwner()); + + NV_ASSERT(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu))); + + pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + pRmApi->Free(pRmApi, hClient, hMemory); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +// +// Maps a handle to system physical addresses: +// C2C for coherent platforms with DEFAULT mapping type +// BAR1(static & dynamic) for non-coherent platforms and for +// coherent platforms with mapping type FORCE_PCIE +// Must be called with API lock and GPU lock held for dynamic BAR1. +// +NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle( + nvidia_stack_t *sp, + nv_state_t *nv, + NvHandle hClient, + NvHandle hMemory, + MemoryRange memRange, + NvU8 mappingType, + void *pMemInfo, + NvBool bStaticPhysAddrs, + MemoryArea *pMemArea +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// +// Unmaps a handle that was mapped to system physical addresses. +// Must be called with API lock and GPU lock held for dynamic BAR1. +// +void NV_API_CALL rm_dma_buf_unmap_mem_handle( + nvidia_stack_t *sp, + nv_state_t *nv, + NvHandle hClient, + NvHandle hMemory, + NvU8 mappingType, + void *pMemInfo, + NvBool bStaticPhysAddrs, + MemoryArea memArea +) +{ +} + +NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device( + nvidia_stack_t *sp, + nv_state_t *nv, + NvHandle hClient, + NvHandle hMemory, + NvU8 mappingType, + NvHandle *phClient, + NvHandle *phDevice, + NvHandle *phSubdevice, + void **ppGpuInstanceInfo, + NvBool *pbStaticPhysAddrs, + NvBool *pbAcquireReleaseAllGpuLockOnDup +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI); + if (rmStatus == NV_OK) + { + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv); + + rmStatus = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI); + if (rmStatus == NV_OK) + { + rmStatus = RmDmabufGetClientAndDevice(pGpu, hClient, hMemory, mappingType, + phClient, phDevice, + phSubdevice, ppGpuInstanceInfo); + if (rmStatus == NV_OK) + { + *pbStaticPhysAddrs = NV_FALSE; + } + + *pbAcquireReleaseAllGpuLockOnDup = pGpu->getProperty(pGpu, PDB_PROP_GPU_ZERO_FB); + + rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL); + } + + // UNLOCK: release API lock + rmapiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +void NV_API_CALL rm_dma_buf_put_client_and_device( + nvidia_stack_t *sp, + nv_state_t *nv, + NvHandle hClient, + NvHandle hDevice, + NvHandle hSubdevice, + void *pGpuInstanceInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI); + if (rmStatus == NV_OK) + { + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv); + + rmStatus = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI); + if (rmStatus == NV_OK) + { + RmDmabufPutClientAndDevice(pGpu, hClient, hDevice, hSubdevice, + pGpuInstanceInfo); + + rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL); + } + + // UNLOCK: release API lock + rmapiLockRelease(); + } + NV_ASSERT_OK(rmStatus); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +// +// Fetches GSP ucode data for usage during RM Init +// NOTE: Used only on VMWware +// + +void NV_API_CALL rm_vgpu_vfio_set_driver_vm( + nvidia_stack_t *sp, + NvBool is_driver_vm +) +{ + OBJSYS *pSys; + OBJHYPERVISOR *pHypervisor; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + pSys = SYS_GET_INSTANCE(); + pHypervisor = SYS_GET_HYPERVISOR(pSys); + + pHypervisor->setProperty(pHypervisor, PDB_PROP_HYPERVISOR_DRIVERVM_ENABLED, is_driver_vm); + + NV_EXIT_RM_RUNTIME(sp,fp); +} + +NvBool NV_API_CALL rm_is_altstack_in_use(void) +{ +#if defined(__use_altstack__) + return NV_TRUE; +#else + return NV_FALSE; +#endif +} + +void NV_API_CALL rm_acpi_nvpcf_notify( + nvidia_stack_t *sp +) +{ + void *fp; + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus = NV_OK; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_EVENT)) == NV_OK) + { + OBJGPU *pGpu = gpumgrGetGpu(0); + if (pGpu != NULL) + { + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + if ((rmStatus = os_ref_dynamic_power(nv, NV_DYNAMIC_PM_FINE)) == + NV_OK) + { + gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_NVPCF_EVENTS, + NULL, 0, 0, 0); + } + os_unref_dynamic_power(nv, NV_DYNAMIC_PM_FINE); + } + rmapiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +static void rm_notify_gpu_addition_removal_helper( + nv_state_t *nv, + NvBool bBind) +{ + THREAD_STATE_NODE threadState; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + if (rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI) == NV_OK) + { + NV0000_CTRL_SYSTEM_EVENT_DATA_GPU_BIND_UNBIND eventData = { 0 }; + + eventData.gpuId = nv->gpu_id; + eventData.bBind = bBind; + + CliAddSystemEvent(NV0000_NOTIFIERS_GPU_BIND_UNBIND_EVENT, &eventData, NULL); + rmapiLockRelease(); + } + else + { + NV_PRINTF(LEVEL_ERROR, "Fail to acquire rmApi lock. Skip notification."); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); +} + +void NV_API_CALL rm_notify_gpu_addition( + nvidia_stack_t *sp, + nv_state_t *nv) +{ + void *fp = NULL; + + NV_ENTER_RM_RUNTIME(sp,fp); + rm_notify_gpu_addition_removal_helper(nv, NV_TRUE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +void NV_API_CALL rm_notify_gpu_removal( + nvidia_stack_t *sp, + nv_state_t *nv) +{ + void *fp = NULL; + + NV_ENTER_RM_RUNTIME(sp,fp); + rm_notify_gpu_addition_removal_helper(nv, NV_FALSE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +NvBool NV_API_CALL rm_wait_for_bar_firewall( + nvidia_stack_t * sp, + NvU32 domain, + NvU8 bus, + NvU8 device, + NvU8 function, + NvU16 devId, + NvU16 subsystemId +) +{ + NvBool ret; + void *fp = NULL; + + // no state set up yet for threadstate or RM locks + NV_ENTER_RM_RUNTIME(sp,fp); + ret = gpumgrWaitForBarFirewall(domain, bus, device, function, devId, subsystemId); + NV_EXIT_RM_RUNTIME(sp,fp); + + return ret; +} + +static NvU32 devfreq_clk_to_domain( + TEGRASOC_DEVFREQ_CLK devfreqClk +) +{ + NvU32 clkDomain = NV2080_CTRL_CLK_DOMAIN_TEGRA_UNDEFINED; + switch (devfreqClk) + { + case TEGRASOC_DEVFREQ_CLK_GPC: + clkDomain = NV2080_CTRL_CLK_DOMAIN_TEGRA_GPCCLK; + break; + + case TEGRASOC_DEVFREQ_CLK_NVD: + clkDomain = NV2080_CTRL_CLK_DOMAIN_TEGRA_NVDCLK; + break; + + default: + break; + } + + return clkDomain; +} + +NV_STATUS NV_API_CALL rm_pmu_perfmon_get_load( + nvidia_stack_t *sp, + nv_state_t *nv, + NvU32 *load, + TEGRASOC_DEVFREQ_CLK devfreqClk +) +{ + NV2080_CTRL_PERF_GET_TEGRA_PERFMON_SAMPLE_PARAMS params = { 0 }; + NvU32 clkDomain = devfreq_clk_to_domain(devfreqClk); + RM_API *pRmApi; + NV_STATUS status; + void *fp; + + if (clkDomain == NV2080_CTRL_CLK_DOMAIN_TEGRA_UNDEFINED) + { + return NV_ERR_INVALID_ARGUMENT; + } + + NV_ENTER_RM_RUNTIME(sp, fp); + + if (rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI) == NV_OK) + { + pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + if (pRmApi == NULL) + { + status = NV_ERR_INVALID_STATE; + rmapiLockRelease(); + goto rm_pmu_perfmon_get_load_exit; + } + + params.clkDomain = clkDomain; + status = pRmApi->Control(pRmApi, nv->rmapi.hClient, nv->rmapi.hSubDevice, + NV2080_CTRL_CMD_PERF_GET_TEGRA_PERFMON_SAMPLE, + ¶ms, sizeof(params)); + if (status == NV_OK) + { + *load = params.clkPercentBusy; + } + + rmapiLockRelease(); + } + else + { + status = NV_ERR_INVALID_STATE; + } + +rm_pmu_perfmon_get_load_exit: + NV_EXIT_RM_RUNTIME(sp, fp); + + return status; +} diff --git a/src/nvidia/arch/nvalloc/unix/src/osinit.c b/src/nvidia/arch/nvalloc/unix/src/osinit.c new file mode 100644 index 0000000..65faa8c --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/osinit.c @@ -0,0 +1,2043 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/************************************************************************************************************** +* +* Description: +* UNIX-general, device-independent initialization code for +* the resource manager. +* +* +**************************************************************************************************************/ + +#include +#include // NV device driver interface +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "gpu/gpu.h" +#include +#include "nverror.h" +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "gpu_mgr/gpu_db.h" +#include +#include +#include +#include + +#include +// RMCONFIG: need definition of REGISTER_ALL_HALS() +#include "g_hal_register.h" + +typedef enum +{ + RM_INIT_OK, + + /* general os errors */ + RM_INIT_REG_SETUP_FAILED = 0x10, + RM_INIT_SYS_ENVIRONMENT_FAILED, + + /* gpu errors */ + RM_INIT_GPU_GPUMGR_ALLOC_GPU_FAILED = 0x20, + RM_INIT_GPU_GPUMGR_CREATE_DEV_FAILED, + RM_INIT_GPU_GPUMGR_ATTACH_GPU_FAILED, + RM_INIT_GPU_PRE_INIT_FAILED, + RM_INIT_GPU_STATE_INIT_FAILED, + RM_INIT_GPU_LOAD_FAILED, + RM_INIT_GPU_DMA_CONFIGURATION_FAILED, + RM_INIT_GPU_GPUMGR_EXPANDED_VISIBILITY_FAILED, + + /* vbios errors */ + RM_INIT_VBIOS_FAILED = 0x30, + RM_INIT_VBIOS_POST_FAILED, + RM_INIT_VBIOS_X86EMU_FAILED, + + /* scalability errors */ + RM_INIT_SCALABILITY_FAILED = 0x40, + + /* general core rm errors */ + RM_INIT_WATCHDOG_FAILED, + RM_FIFO_GET_UD_BAR1_MAP_INFO_FAILED, + RM_GPUDB_REGISTER_FAILED, + + RM_INIT_ALLOC_RMAPI_FAILED, + RM_INIT_GPUINFO_WITH_RMAPI_FAILED, + + /* rm firmware errors */ + RM_INIT_FIRMWARE_POLICY_FAILED = 0x60, + RM_INIT_FIRMWARE_FETCH_FAILED, + RM_INIT_FIRMWARE_INIT_FAILED, + + RM_INIT_MAX_FAILURES +} rm_init_status; + +typedef rm_init_status RM_INIT_STATUS; + +typedef struct { + RM_INIT_STATUS initStatus; + NV_STATUS rmStatus; + NvU32 line; +} UNIX_STATUS; + +#define INIT_UNIX_STATUS { RM_INIT_OK, NV_OK, 0 } +#define RM_INIT_SUCCESS(init) ((init) == RM_INIT_OK) + +#define RM_SET_ERROR(status, err) { (status).initStatus = (err); \ + (status).line = __LINE__; } + + +static inline NvU64 nv_encode_pci_info(nv_pci_info_t *pci_info) +{ + return gpuEncodeDomainBusDevice(pci_info->domain, pci_info->bus, pci_info->slot); +} + +static inline NvU32 nv_generate_id_from_pci_info(nv_pci_info_t *pci_info) +{ + return gpuGenerate32BitId(pci_info->domain, pci_info->bus, pci_info->slot); +} + +static void nv_set_probed_gpu_flags(nv_state_t *nv) +{ + NvU32 flags = 0; + + if (NV_IS_SOC_DISPLAY_DEVICE(nv)) + { + flags |= DRF_DEF(0000, _CTRL_GPU_PROBED_ID_FLAGS, _SOC_DISPLAY, _TRUE); + } + + gpumgrSetProbedFlags(nv->gpu_id, flags); +} + +static inline void nv_os_map_kernel_space(nv_state_t *nv, nv_aperture_t *aperture) +{ + NV_ASSERT(aperture->map == NULL); + + // let's start off assuming a standard device and map the registers + // normally. It is unfortunate to hard-code the register size here, but we don't + // want to fail trying to map all of a multi-devices' register space + aperture->map = osMapKernelSpace(aperture->cpu_address, + aperture->size, + NV_MEMORY_UNCACHED, + NV_PROTECT_READ_WRITE); + aperture->map_u = (nv_phwreg_t)aperture->map; +} + +// local prototypes +static void initVendorSpecificRegistry(OBJGPU *, NvU16); +static void initUnixSpecificRegistry(OBJGPU *); + +NV_STATUS osRmInitRm(void) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NV_STATUS status; + + if (os_is_efi_enabled()) + { + pSys->setProperty(pSys, PDB_PROP_SYS_IS_UEFI, NV_TRUE); + } + + // have to init this before the debug subsystem, which will + // try to check the value of ResmanDebugLevel + RmInitRegistry(); + + // init the debug subsystem if necessary + os_dbg_init(); + nvDbgInitRmMsg(NULL); + + // Force nvlog reinit since module params are now available + NVLOG_UPDATE(); + + // Register all supported hals + status = REGISTER_ALL_HALS(); + if (status != NV_OK) + { + RmDestroyRegistry(NULL); + return status; + } + + // Setup any ThreadState defaults + threadStateInitSetupFlags(THREAD_STATE_SETUP_FLAGS_ENABLED | + THREAD_STATE_SETUP_FLAGS_TIMEOUT_ENABLED | + THREAD_STATE_SETUP_FLAGS_SLI_LOGIC_ENABLED | + THREAD_STATE_SETUP_FLAGS_DO_NOT_INCLUDE_SLEEP_TIME_ENABLED); + + return NV_OK; +} + +void RmShutdownRm(void) +{ + NV_PRINTF(LEVEL_INFO, "shutdown rm\n"); + + RmDestroyRegistry(NULL); + + // Free objects created with RmInitRm, including the system object + RmDestroyRm(); +} + +// +// osAttachGpu +// +// This routine is used as a callback by the gpumgrAttachGpu +// interface to allow os-dependent code to set up any state +// before engine construction begins. +// +NV_STATUS osAttachGpu( + OBJGPU *pGpu, + void *pOsGpuInfo +) +{ + nv_state_t *nv = (nv_state_t *)pOsGpuInfo; + nv_priv_t *nvp; + + nvp = NV_GET_NV_PRIV(nv); + + nvp->pGpu = pGpu; + + NV_SET_NV_STATE(pGpu, (void *)nv); + + initUnixSpecificRegistry(pGpu); + + // Assign default values to Registry keys for VGX + if (os_is_vgx_hyper()) + { + initVGXSpecificRegistry(pGpu); + } + + return NV_OK; +} + +NV_STATUS osDpcAttachGpu( + OBJGPU *pGpu, + void *pOsGpuInfo +) +{ + return NV_OK; // Nothing to do for unix +} + +void osDpcDetachGpu( + OBJGPU *pGpu +) +{ + return; // Nothing to do for unix +} + +NV_STATUS +osHandleGpuLost +( + OBJGPU *pGpu +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + NvU32 pmc_boot_0; + + // Determine if we've already run the handler + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED)) + { + return NV_OK; + } + + pmc_boot_0 = NV_PRIV_REG_RD32(nv->regs->map_u, NV_PMC_BOOT_0); + if (pmc_boot_0 != nvp->pmc_boot_0) + { + // + // This doesn't support PEX Reset and Recovery yet. + // This will help to prevent accessing registers of a GPU + // which has fallen off the bus. + // + nvErrorLog_va((void *)pGpu, ROBUST_CHANNEL_GPU_HAS_FALLEN_OFF_THE_BUS, + "GPU has fallen off the bus."); + + gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_GPU_UNAVAILABLE, NULL, + 0, ROBUST_CHANNEL_GPU_HAS_FALLEN_OFF_THE_BUS, 0); + + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "GPU has fallen off the bus.\n"); + + if (pGpu->boardInfo != NULL && pGpu->boardInfo->serialNumber[0] != '\0') + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "GPU serial number is %s.\n", + pGpu->boardInfo->serialNumber); + } + + gpuSetDisconnectedProperties(pGpu); + + // Initiate a crash dump immediately. + RmLogGpuCrash(pGpu); + + // Set SURPRISE_REMOVAL flag for eGPU to help in device removal. + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_EXTERNAL_GPU)) + { + nv->flags |= NV_FLAG_IN_SURPRISE_REMOVAL; + } + + DBG_BREAKPOINT(); + } + + return NV_OK; +} + +/* + * Initialize the required GPU information by doing RMAPI control calls + * and store the same in the UNIX specific data structures. + */ +static NV_STATUS +RmInitGpuInfoWithRmApi +( + OBJGPU *pGpu +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pGpuInfoParams = { 0 }; + NV_STATUS status; + + // LOCK: acquire GPUs lock + status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT); + if (status != NV_OK) + { + return status; + } + + pGpuInfoParams = portMemAllocNonPaged(sizeof(*pGpuInfoParams)); + if (pGpuInfoParams == NULL) + { + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + return NV_ERR_NO_MEMORY; + } + + + portMemSet(pGpuInfoParams, 0, sizeof(*pGpuInfoParams)); + + pGpuInfoParams->gpuInfoListSize = 4; + pGpuInfoParams->gpuInfoList[0].index = NV2080_CTRL_GPU_INFO_INDEX_4K_PAGE_ISOLATION_REQUIRED; + pGpuInfoParams->gpuInfoList[1].index = NV2080_CTRL_GPU_INFO_INDEX_MOBILE_CONFIG_ENABLED; + pGpuInfoParams->gpuInfoList[2].index = NV2080_CTRL_GPU_INFO_INDEX_DMABUF_CAPABILITY; + pGpuInfoParams->gpuInfoList[3].index = NV2080_CTRL_GPU_INFO_INDEX_COHERENT_GPU_MEMORY_MODE; + + status = pRmApi->Control(pRmApi, nv->rmapi.hClient, + nv->rmapi.hSubDevice, + NV2080_CTRL_CMD_GPU_GET_INFO_V2, + pGpuInfoParams, sizeof(*pGpuInfoParams)); + + if (status == NV_OK) + { + nvp->b_4k_page_isolation_required = + (pGpuInfoParams->gpuInfoList[0].data == + NV2080_CTRL_GPU_INFO_INDEX_4K_PAGE_ISOLATION_REQUIRED_YES); + nvp->b_mobile_config_enabled = + (pGpuInfoParams->gpuInfoList[1].data == + NV2080_CTRL_GPU_INFO_INDEX_MOBILE_CONFIG_ENABLED_YES); + nv->dma_buf_supported = + (pGpuInfoParams->gpuInfoList[2].data == + NV2080_CTRL_GPU_INFO_INDEX_DMABUF_CAPABILITY_YES); + } + + nv->coherent = + (pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_ZERO_FB)); + + // + // If coherent GPU memory mode is NONE, then GPU memory has struct page + // on coherent platforms and no struct page on non-coherent ones. + // If the mode is enabled and _NUMA, then GPU memory has struct page. + // If the mode is enabled and _DRIVER, then GPU memory doesn't have struct page. + // Tegra iGPU also falls under struct page category, although + // COHERENT_GPU_MEMORY_MODE doesn't apply there. + // + if (pGpuInfoParams->gpuInfoList[3].data == + NV2080_CTRL_GPU_INFO_INDEX_COHERENT_GPU_MEMORY_MODE_NONE) + { + nv->mem_has_struct_page = nv->coherent; + } + else + { + // If mode is not _NONE, we're already on a PDB_PROP_GPU_COHERENT_CPU_MAPPING platform. + nv->mem_has_struct_page = pGpu->getProperty(pGpu, PDB_PROP_GPU_ZERO_FB) || + (pGpuInfoParams->gpuInfoList[3].data == + NV2080_CTRL_GPU_INFO_INDEX_COHERENT_GPU_MEMORY_MODE_NUMA); + } + + portMemFree(pGpuInfoParams); + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + return status; +} + +static void RmSetSocDispDeviceMappings( + GPUATTACHARG *gpuAttachArg, + nv_state_t *nv +) +{ + gpuAttachArg->socDeviceArgs.deviceMapping[SOC_DEV_MAPPING_DISP].gpuNvAddr = (GPUHWREG*) nv->regs->map; + gpuAttachArg->socDeviceArgs.deviceMapping[SOC_DEV_MAPPING_DISP].gpuNvPAddr = nv->regs->cpu_address; + gpuAttachArg->socDeviceArgs.deviceMapping[SOC_DEV_MAPPING_DISP].gpuNvLength = (NvU32) nv->regs->size; +} + +static void RmSetSocDpauxDeviceMappings( + GPUATTACHARG *gpuAttachArg, + nv_state_t *nv +) +{ +} + +static void RmSetSocHdacodecDeviceMappings( + GPUATTACHARG *gpuAttachArg, + nv_state_t *nv +) +{ +} + +static void RmSetSocMipiCalDeviceMappings( + GPUATTACHARG *gpuAttachArg, + nv_state_t *nv +) +{ + gpuAttachArg->socDeviceArgs.deviceMapping[SOC_DEV_MAPPING_MIPICAL].gpuNvAddr = (GPUHWREG*) nv->mipical_regs->map; + gpuAttachArg->socDeviceArgs.deviceMapping[SOC_DEV_MAPPING_MIPICAL].gpuNvPAddr = nv->mipical_regs->cpu_address; + gpuAttachArg->socDeviceArgs.deviceMapping[SOC_DEV_MAPPING_MIPICAL].gpuNvLength = nv->mipical_regs->size; +} + +static void RmSetSocHfrpDeviceMappings( + GPUATTACHARG *gpuAttachArg, + nv_state_t *nv +) +{ +} + +static void +osInitNvMapping( + nv_state_t *nv, + NvU32 *pDeviceReference, + UNIX_STATUS *status +) +{ + OBJGPU *pGpu; + OBJSYS *pSys = SYS_GET_INSTANCE(); + GPUATTACHARG *gpuAttachArg; + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + NvU32 deviceInstance; + NvU32 data = 0; + NvU32 dispIsoStreamId; + NvU32 dispNisoStreamId; + + NV_PRINTF(LEVEL_INFO, "osInitNvMapping:\n"); + + // allocate the next available gpu device number + status->rmStatus = gpumgrAllocGpuInstance(pDeviceReference); + if (status->rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** Cannot get valid gpu instance\n"); + RM_SET_ERROR(*status, RM_INIT_GPU_GPUMGR_ALLOC_GPU_FAILED); + return; + } + + // RM_BASIC_LOCK_MODEL: allocate GPU lock + status->rmStatus = rmGpuLockAlloc(*pDeviceReference); + if (status->rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** cannot allocate GPU lock\n"); + RM_SET_ERROR(*status, RM_INIT_GPU_GPUMGR_ALLOC_GPU_FAILED); + return; + } + + // attach default single-entry broadcast device for this gpu + status->rmStatus = gpumgrCreateDevice(&deviceInstance, NVBIT(*pDeviceReference), NULL); + if (status->rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** Cannot attach bc gpu\n"); + RM_SET_ERROR(*status, RM_INIT_GPU_GPUMGR_CREATE_DEV_FAILED); + // RM_BASIC_LOCK_MODEL: free GPU lock + rmGpuLockFree(*pDeviceReference); + return; + } + + // init attach state + gpuAttachArg = portMemAllocNonPaged(sizeof(GPUATTACHARG)); + if (gpuAttachArg == NULL) + { + NV_PRINTF(LEVEL_ERROR, "*** Cannot allocate gpuAttachArg\n"); + RM_SET_ERROR(*status, RM_INIT_GPU_GPUMGR_ALLOC_GPU_FAILED); + // RM_BASIC_LOCK_MODEL: free GPU lock + rmGpuLockFree(*pDeviceReference); + return; + } + + portMemSet(gpuAttachArg, 0, sizeof(GPUATTACHARG)); + + if (NV_IS_SOC_DISPLAY_DEVICE(nv)) + { + gpuAttachArg->socDeviceArgs.specified = NV_TRUE; + + RmSetSocDispDeviceMappings(gpuAttachArg, nv); + + RmSetSocDpauxDeviceMappings(gpuAttachArg, nv); + + RmSetSocHdacodecDeviceMappings(gpuAttachArg, nv); + + RmSetSocMipiCalDeviceMappings(gpuAttachArg, nv); + + RmSetSocHfrpDeviceMappings(gpuAttachArg, nv); + gpuAttachArg->socDeviceArgs.socChipId0 = nv->disp_sw_soc_chip_id; + + gpuAttachArg->socDeviceArgs.iovaspaceId = nv->iovaspace_id; + } + else + { + gpuAttachArg->fbPhysAddr = nv->fb->cpu_address; + gpuAttachArg->fbBaseAddr = (GPUHWREG*) 0; // not mapped + gpuAttachArg->devPhysAddr = nv->regs->cpu_address; + gpuAttachArg->regBaseAddr = (GPUHWREG*) nv->regs->map; + gpuAttachArg->intLine = 0; // don't know yet + gpuAttachArg->instPhysAddr = nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address; + gpuAttachArg->instBaseAddr = (GPUHWREG*) 0; // not mapped + + gpuAttachArg->regLength = nv->regs->size; + gpuAttachArg->fbLength = nv->fb->size; + gpuAttachArg->instLength = nv->bars[NV_GPU_BAR_INDEX_IMEM].size; + + gpuAttachArg->iovaspaceId = nv->iovaspace_id; + gpuAttachArg->cpuNumaNodeId = nv->cpu_numa_node_id; + + if (nv->iovaspace_id != NV_IOVA_DOMAIN_NONE) + { + // Default - PCIe GPUs are connected via NISO IOMMU + nv->iommus.iso_iommu_present = NV_FALSE; + nv->iommus.niso_iommu_present = NV_TRUE; + } + } + + // + // we need this to check if we are running on virtual GPU + // in gpuBindHal function later. + // + gpuAttachArg->nvDomainBusDeviceFunc = nv_encode_pci_info(&nv->pci_info); + + gpuAttachArg->bRequestFwClientRm = nv->request_fw_client_rm; + + gpuAttachArg->pOsAttachArg = (void *)nv; + + // use gpu manager to attach gpu + status->rmStatus = gpumgrAttachGpu(*pDeviceReference, gpuAttachArg); + portMemFree(gpuAttachArg); + if (status->rmStatus != NV_OK) + { + gpumgrDestroyDevice(deviceInstance); + RM_SET_ERROR(*status, RM_INIT_GPU_GPUMGR_ATTACH_GPU_FAILED); + NV_PRINTF(LEVEL_ERROR, "*** Cannot attach gpu\n"); + // RM_BASIC_LOCK_MODEL: free GPU lock + rmGpuLockFree(*pDeviceReference); + return; + } + nvp->flags |= NV_INIT_FLAG_GPUMGR_ATTACH; + + pGpu = gpumgrGetGpu(*pDeviceReference); + + sysInitRegistryOverrides(pSys); + + sysApplyLockingPolicy(pSys); + + pGpu->busInfo.IntLine = nv->interrupt_line; + + if (nv->fb != NULL) + { + pGpu->registerAccess.gpuFbAddr = (GPUHWREG*) nv->fb->map; + pGpu->busInfo.gpuPhysFbAddr = nv->fb->cpu_address; + } + + // set default parent gpu + gpumgrSetParentGPU(pGpu, pGpu); + + NV_PRINTF(LEVEL_INFO, "device instance : %d\n", *pDeviceReference); + NV_PRINTF(LEVEL_INFO, "NV regs using linear address : 0x%p\n", + pGpu->deviceMappings[SOC_DEV_MAPPING_DISP].gpuNvAddr); + NV_PRINTF(LEVEL_INFO, + "NV fb using linear address : 0x%p\n", pGpu->registerAccess.gpuFbAddr); + + pGpu->setProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED, NV_TRUE); + pGpu->setProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS, NV_TRUE); + + if (!os_is_vgx_hyper()) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT, NV_TRUE); + } + else + { + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT, NV_TRUE); + } + } + + if ((osReadRegistryDword(NULL, + NV_REG_PRESERVE_VIDEO_MEMORY_ALLOCATIONS, + &data) == NV_OK) && data) + { + + nv->preserve_vidmem_allocations = NV_TRUE; + } + + // Check if SMMU can be enabled on PushBuffer Aperture + nv_get_disp_smmu_stream_ids(nv, &dispIsoStreamId, &dispNisoStreamId); + if (dispNisoStreamId != NV_U32_MAX) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_DISP_PB_REQUIRES_SMMU_BYPASS, NV_FALSE); + } + else + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_DISP_PB_REQUIRES_SMMU_BYPASS, NV_TRUE); + } + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TRIGGER_PCIE_FLR)) + { + nv->flags |= NV_FLAG_TRIGGER_FLR; + } +} + +static NV_STATUS +RmInitNvHal( + nv_state_t *nv, + NvU32 deviceReference, + UNIX_STATUS *status +) +{ + OBJGPU *pGpu = gpumgrGetGpu(deviceReference); + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + + PORT_UNREFERENCED_VARIABLE(pGpu); + + nvp->flags |= NV_INIT_FLAG_HAL; + + nvp->flags |= NV_INIT_FLAG_HAL_COMPONENTS; + + return NV_OK; +} + +#define NV_DBG_PRINT_VGA_STATUS(nv, src) \ + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "%s reports GPU is %s VGA\n", \ + src, NV_PRIMARY_VGA(nv) ? "primary" : "not primary"); + +static void +RmAssignPrimaryVga( + nv_state_t *nv, + OBJGPU *pGpu +) +{ + // + // Check with the OS for the primary VGA status of the adapter. If it knows + // definitively (nv_set_primary_vga_status() returns NV_OK), then we should + // use that value. + // + // Otherwise, check the I/O access and VGA decoding along the path from the + // adapter to the root. We expect that the primary VGA will be the only + // non-3D controller with these properties enabled along the entire path. + // + if (nv_set_primary_vga_status(nv) != NV_OK) + { + } + else + { + NV_DBG_PRINT_VGA_STATUS(nv, "OS"); + } +} + +static void +RmDeterminePrimaryDevice(OBJGPU *pGpu) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + return; + } + + // Skip updating nv->primary_vga while RM is recovering after GPU reset + if (nv->flags & NV_FLAG_IN_RECOVERY) + { + return; + } + + nv->primary_vga = NV_FALSE; + + // + // In case of Passthru, GPU will always be secondary + // + if (IS_PASSTHRU(pGpu)) + { + return; + } + + // + // In case of VIRTUAL GPU, there is no display, hence it will be secondary + // + if (IS_VIRTUAL(pGpu)) + { + return; + } + + RmAssignPrimaryVga(nv, pGpu); + + NV_DEV_PRINTF(NV_DBG_SETUP, nv, " is %s VGA\n", + !!nv->primary_vga ? "primary" : "not primary"); + + // + // If GPU is driving any frame buffer console(vesafb, efifb etc) + // mark the console as client driven and GPU as Primary. + // + nv->client_managed_console = rm_get_uefi_console_status(nv); + + NV_DEV_PRINTF(NV_DBG_SETUP, nv, " is %s UEFI console device\n", + nv->client_managed_console ? "primary" : "not primary"); + + pGpu->setProperty(pGpu, PDB_PROP_GPU_PRIMARY_DEVICE, + (nv->client_managed_console || !!nv->primary_vga)); +} + +static NV_STATUS +RmInitDeviceDma( + nv_state_t *nv +) +{ + if (nv->iovaspace_id != NV_IOVA_DOMAIN_NONE) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJVMM *pVmm = SYS_GET_VMM(pSys); + OBJVASPACE *pIOVAS; + NV_STATUS status = vmmCreateVaspace(pVmm, IO_VASPACE_A, + nv->iovaspace_id, 0, 0ULL, ~0ULL, + 0ULL, 0ULL, + NULL, VASPACE_FLAGS_ENABLE_VMM, + &pIOVAS); + if (status != NV_OK) + { + return status; + } + } + + return NV_OK; +} + +static void +RmTeardownDeviceDma( + nv_state_t *nv +) +{ + if (nv->iovaspace_id != NV_IOVA_DOMAIN_NONE) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJVMM *pVmm = SYS_GET_VMM(pSys); + OBJVASPACE *pIOVAS; + + if (NV_OK == vmmGetVaspaceFromId(pVmm, nv->iovaspace_id, IO_VASPACE_A, &pIOVAS)) + { + vmmDestroyVaspace(pVmm, pIOVAS); + } + } +} + +static void +RmEnableDeviceClks(nv_state_t *nv) +{ + NvU32 freqKHz = 0; + NvU32 i; + NV_STATUS ret; + + for (i = TEGRASOC_WHICH_CLK_GPU_FIRST; i <= TEGRASOC_WHICH_CLK_GPU_LAST; i++) + { + ret = nv_get_max_freq(nv, i, &freqKHz); + if (ret != NV_OK) + { + NV_PRINTF(LEVEL_INFO, "NVRM: Max Freq fetch failed for Clk:%d\n", i); + continue; + } + + ret = nv_enable_clk(nv, i); + if (ret != NV_OK) + { + NV_PRINTF(LEVEL_INFO, "NVRM: Clk prepare enable failed for Clk:%d\n", i); + continue; + } + + ret = nv_set_freq(nv, i, freqKHz); + if (ret != NV_OK) + { + NV_PRINTF(LEVEL_INFO, "NVRM: Set Freq failed for Clk:%d\n", i); + } + else + { + NV_PRINTF(LEVEL_INFO, "NVRM: Set Freq:%d for Clk:%d\n", freqKHz, i); + } + } +} + +static void +RmDisableDeviceClks(nv_state_t *nv) +{ + NvU32 i; + + for (i = TEGRASOC_WHICH_CLK_GPU_FIRST; i <= TEGRASOC_WHICH_CLK_GPU_LAST; i++) + { + nv_disable_clk(nv, i); + } +} + +static void +RmInitNvDevice( + NvU32 deviceReference, + UNIX_STATUS *status +) +{ + // set the device context + OBJGPU *pGpu = gpumgrGetGpu(deviceReference); + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + + NV_PRINTF(LEVEL_INFO, "RmInitNvDevice:\n"); + + NV_PRINTF(LEVEL_INFO, + "device instance : 0x%08x\n", deviceReference); + + // initialize all engines -- calls back osInitMapping() + status->rmStatus = gpumgrStatePreInitGpu(pGpu); + if (status->rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** Cannot pre-initialize the device\n"); + RM_SET_ERROR(*status, RM_INIT_GPU_PRE_INIT_FAILED); + return; + } + + status->rmStatus = gpumgrStateInitGpu(pGpu); + if (status->rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** Cannot initialize the device\n"); + RM_SET_ERROR(*status, RM_INIT_GPU_STATE_INIT_FAILED); + return; + } + nvp->flags |= NV_INIT_FLAG_GPU_STATE; + + status->rmStatus = gpumgrStateLoadGpu(pGpu, GPU_STATE_DEFAULT); + if (status->rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "*** Cannot load state into the device\n"); + RM_SET_ERROR(*status, RM_INIT_GPU_LOAD_FAILED); + return; + } + nvp->flags |= NV_INIT_FLAG_GPU_STATE_LOAD; + + return; +} + +static void RmTeardownDpauxRegisters( + nv_state_t *nv +) +{ +} + +static void RmTeardownHdacodecRegisters( + nv_state_t *nv +) +{ +} + +static void RmTeardownMipiCalRegisters( + nv_state_t *nv +) +{ + if (nv->mipical_regs && nv->mipical_regs->map) + { + osUnmapKernelSpace(nv->mipical_regs->map, + nv->mipical_regs->size); + nv->mipical_regs->map = NULL; + } +} + +static void RmTeardownHfrpRegisters( + nv_state_t *nv +) +{ +} + +static NV_STATUS +RmTeardownRegisters( + nv_state_t *nv +) +{ + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "Tearing down registers\n"); + + if (nv->regs && nv->regs->map) + { + osUnmapKernelSpace(nv->regs->map, nv->regs->size); + nv->regs->map = 0; + nv->regs->map_u = NULL; + } + + RmTeardownDpauxRegisters(nv); + + RmTeardownHdacodecRegisters(nv); + + RmTeardownMipiCalRegisters(nv); + + RmTeardownHfrpRegisters(nv); + + return NV_OK; +} + +static NV_STATUS +RmSetupDpauxRegisters( + nv_state_t *nv, + UNIX_STATUS *status +) +{ + + return NV_OK; +} + +static NV_STATUS +RmSetupHdacodecRegisters( + nv_state_t *nv, + UNIX_STATUS *status +) +{ + + return NV_OK; +} + +static NV_STATUS +RmSetupMipiCalRegisters( + nv_state_t *nv, + UNIX_STATUS *status +) +{ + if (nv->mipical_regs != NULL) + { + nv_os_map_kernel_space(nv, nv->mipical_regs); + if (nv->mipical_regs->map == NULL) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to map mipical registers!!\n"); + RM_SET_ERROR(*status, RM_INIT_REG_SETUP_FAILED); + status->rmStatus = NV_ERR_OPERATING_SYSTEM; + return NV_ERR_GENERIC; + } + } + + if (nv->mipical_regs != NULL) + { + NV_DEV_PRINTF(NV_DBG_SETUP, nv, " MIPICAL: " NvP64_fmt " " NvP64_fmt " 0x%p\n", + nv->mipical_regs->cpu_address, nv->mipical_regs->size, nv->mipical_regs->map); + } + + return NV_OK; +} + +static NV_STATUS +RmSetupHfrpRegisters( + nv_state_t *nv, + UNIX_STATUS *status +) +{ + + return NV_OK; +} + +static void +RmSetupRegisters( + nv_state_t *nv, + UNIX_STATUS *status +) +{ + NV_STATUS ret; + + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "RmSetupRegisters for 0x%x:0x%x\n", + nv->pci_info.vendor_id, nv->pci_info.device_id); + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "pci config info:\n"); + NV_DEV_PRINTF(NV_DBG_SETUP, nv, " registers look like: 0x%" NvU64_fmtx " 0x%" NvU64_fmtx, + nv->regs->cpu_address, nv->regs->size); + + if (nv->fb != NULL) + { + NV_DEV_PRINTF(NV_DBG_SETUP, nv, " fb looks like: 0x%" NvU64_fmtx " 0x%" NvU64_fmtx "\n", + nv->fb->cpu_address, nv->fb->size); + } + + nv_os_map_kernel_space(nv, nv->regs); + + if (nv->regs->map == NULL) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to map regs registers!!\n"); + RM_SET_ERROR(*status, RM_INIT_REG_SETUP_FAILED); + status->rmStatus = NV_ERR_OPERATING_SYSTEM; + return; + } + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "Successfully mapped framebuffer and registers\n"); + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "final mappings:\n"); + NV_DEV_PRINTF(NV_DBG_SETUP, nv, " regs: 0x%" NvU64_fmtx " 0x%" NvU64_fmtx " 0x%p\n", + nv->regs->cpu_address, nv->regs->size, nv->regs->map); + + ret = RmSetupDpauxRegisters(nv, status); + if (ret != NV_OK) + goto err_unmap_disp_regs; + + ret = RmSetupHdacodecRegisters(nv, status); + if (ret != NV_OK) + { + RmTeardownDpauxRegisters(nv); + goto err_unmap_disp_regs; + } + + ret = RmSetupMipiCalRegisters(nv, status); + if (ret != NV_OK) + { + RmTeardownHdacodecRegisters(nv); + RmTeardownDpauxRegisters(nv); + goto err_unmap_disp_regs; + } + + ret = RmSetupHfrpRegisters(nv, status); + if (ret != NV_OK) + { + RmTeardownMipiCalRegisters(nv); + RmTeardownHdacodecRegisters(nv); + RmTeardownDpauxRegisters(nv); + goto err_unmap_disp_regs; + } + + return; + +err_unmap_disp_regs: + if (nv->regs && nv->regs->map) + { + osUnmapKernelSpace(nv->regs->map, nv->regs->size); + nv->regs->map = 0; + } + + return; +} + +NvBool RmInitPrivateState( + nv_state_t *pNv +) +{ + nv_priv_t *nvp; + NvU32 gpuId; + NvU32 socChipId0 = 0; + NvU32 pmc_boot_0 = 0; + NvU32 pmc_boot_1 = 0; + NvU32 pmc_boot_42 = 0; + NvU32 dmaAddrWidth = 0; + + NV_SET_NV_PRIV(pNv, NULL); + + if (NV_IS_SOC_DISPLAY_DEVICE(pNv)) + socChipId0 = pNv->disp_sw_soc_chip_id; + else + { + pNv->regs->map_u = os_map_kernel_space(pNv->regs->cpu_address, + os_page_size, + NV_MEMORY_UNCACHED); + if (pNv->regs->map_u == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "failed to map GPU registers (DISABLE_INTERRUPTS).\n"); + return NV_FALSE; + } + + pmc_boot_0 = NV_PRIV_REG_RD32(pNv->regs->map_u, NV_PMC_BOOT_0); + pmc_boot_1 = NV_PRIV_REG_RD32(pNv->regs->map_u, NV_PMC_BOOT_1); + pmc_boot_42 = NV_PRIV_REG_RD32(pNv->regs->map_u, NV_PMC_BOOT_42); + + os_unmap_kernel_space(pNv->regs->map_u, os_page_size); + pNv->regs->map_u = NULL; + } + + if (os_alloc_mem((void **)&nvp, sizeof(*nvp)) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to allocate private device state.\n"); + return NV_FALSE; + } + + gpuId = nv_generate_id_from_pci_info(&pNv->pci_info); + + if (gpumgrRegisterGpuId(gpuId, nv_encode_pci_info(&pNv->pci_info)) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to register GPU with GPU manager.\n"); + os_free_mem(nvp); + return NV_FALSE; + } + + pNv->gpu_id = gpuId; + + nv_set_probed_gpu_flags(pNv); + + pNv->iovaspace_id = nv_requires_dma_remap(pNv) ? gpuId : + NV_IOVA_DOMAIN_NONE; + pNv->cpu_numa_node_id = NV0000_CTRL_NO_NUMA_NODE; + + // Get the GpuArch instance for this architecture to determine the DMA address width. + GpuArch *pGpuArch = gpumgrGetGpuArch(pmc_boot_42, socChipId0, TEGRA_CHIP_TYPE_DEFAULT); + if (pGpuArch == NULL) + { + NV_PRINTF(LEVEL_ERROR, "failed to get GpuArch for 0x%x/0x%x.\n", + pmc_boot_42, socChipId0); + gpumgrUnregisterGpuId(gpuId); + os_free_mem(nvp); + return NV_FALSE; + } + + dmaAddrWidth = gpuarchGetDmaAddrWidth(pGpuArch); + if (dmaAddrWidth == 0) + { + dmaAddrWidth = gpuarchGetSystemPhysAddrWidth(pGpuArch); + } + nv_set_dma_address_size(pNv, dmaAddrWidth); + + pNv->is_tegra_pci_igpu = !NV_IS_SOC_DISPLAY_DEVICE(pNv) && gpuarchIsZeroFb(pGpuArch); + // Only certain Tegra PCI iGPUs support Rail-Gating + pNv->supports_tegra_igpu_rg = pNv->is_tegra_pci_igpu && gpuarchSupportsIgpuRg(pGpuArch); + + os_mem_set(nvp, 0, sizeof(*nvp)); + nvp->status = NV_ERR_INVALID_STATE; + nvp->pmc_boot_0 = pmc_boot_0; + nvp->pmc_boot_1 = pmc_boot_1; + nvp->pmc_boot_42 = pmc_boot_42; + nvp->db_supported = -1; + NV_SET_NV_PRIV(pNv, nvp); + + return NV_TRUE; +} + +void RmClearPrivateState( + nv_state_t *pNv +) +{ + nv_priv_t *nvp = NV_GET_NV_PRIV(pNv); + NvU32 status; + void *pVbiosCopy = NULL; + void *pRegistryCopy = NULL; + NvU32 vbiosSize; + nv_i2c_adapter_entry_t i2c_adapters[MAX_I2C_ADAPTERS]; + nv_dynamic_power_t dynamicPowerCopy; + NvU32 x = 0; + NvU32 pmc_boot_0, pmc_boot_1, pmc_boot_42; + NvBool pr3_acpi_method_present = 0; + int db_supported; + + // + // Do not clear private state after GPU resets, it is used while + // recovering the GPU. Only clear the pGpu pointer, which is + // restored during next initialization cycle. + // + if (pNv->flags & NV_FLAG_IN_RECOVERY) + { + nvp->pGpu = NULL; + } + + status = nvp->status; + pVbiosCopy = nvp->pVbiosCopy; + vbiosSize = nvp->vbiosSize; + pRegistryCopy = nvp->pRegistry; + dynamicPowerCopy = nvp->dynamic_power; + pmc_boot_0 = nvp->pmc_boot_0; + pmc_boot_1 = nvp->pmc_boot_1; + pmc_boot_42 = nvp->pmc_boot_42; + pr3_acpi_method_present = nvp->pr3_acpi_method_present; + db_supported = nvp->db_supported; + + for (x = 0; x < MAX_I2C_ADAPTERS; x++) + { + i2c_adapters[x] = nvp->i2c_adapters[x]; + } + + portMemSet(nvp, 0, sizeof(nv_priv_t)); + + nvp->status = status; + nvp->pVbiosCopy = pVbiosCopy; + nvp->vbiosSize = vbiosSize; + nvp->pRegistry = pRegistryCopy; + nvp->dynamic_power = dynamicPowerCopy; + nvp->pmc_boot_0 = pmc_boot_0; + nvp->pmc_boot_1 = pmc_boot_1; + nvp->pmc_boot_42 = pmc_boot_42; + nvp->pr3_acpi_method_present = pr3_acpi_method_present; + nvp->db_supported = db_supported; + + for (x = 0; x < MAX_I2C_ADAPTERS; x++) + { + nvp->i2c_adapters[x] = i2c_adapters[x]; + } + + nvp->flags |= NV_INIT_FLAG_PUBLIC_I2C; +} + +void RmFreePrivateState( + nv_state_t *pNv +) +{ + nv_priv_t *nvp = NV_GET_NV_PRIV(pNv); + + gpumgrUnregisterGpuId(pNv->gpu_id); + + RmDestroyRegistry(pNv); + + if (nvp != NULL) + { + portMemFree(nvp->pVbiosCopy); + os_free_mem(nvp); + } + + NV_SET_NV_PRIV(pNv, NULL); +} + +NvBool RmPartiallyInitAdapter( + nv_state_t *nv +) +{ + NV_PRINTF(LEVEL_INFO, "%s: %04x:%02x:%02x.0\n", __FUNCTION__, + nv->pci_info.domain, nv->pci_info.bus, nv->pci_info.slot); + + nv_start_rc_timer(nv); + + return NV_TRUE; +} + +static NV_STATUS +RmInitX86Emu( + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + PORT_UNREFERENCED_VARIABLE(nv); + +#if NVCPU_IS_X86_64 + status = RmInitX86EmuState(pGpu); +#else + // We don't expect a "primary VGA" adapter on non-amd64 platforms + NV_ASSERT(!NV_PRIMARY_VGA(nv)); +#endif + + return status; +} + +static NV_STATUS RmRegisterGpudb( + OBJGPU *pGpu +) +{ + NV_STATUS rmStatus; + const NvU8 *pGid; + nv_state_t *pNv = NV_GET_NV_STATE(pGpu); + + pGid = RmGetGpuUuidRaw(pNv); + if (pGid == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to get UUID\n"); + return NV_ERR_OPERATING_SYSTEM; + } + + rmStatus = gpudbRegisterGpu(pGid, &pGpu->gpuClData.upstreamPort.addr, + pGpu->busInfo.nvDomainBusDeviceFunc); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to register GPU with GPU data base\n"); + } + + return rmStatus; +} + +static void RmUnixFreeRmApi( + nv_state_t *nv +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + + if (nv->rmapi.hClient != 0) + { + pRmApi->Free(pRmApi, nv->rmapi.hClient, nv->rmapi.hClient); + } + + portMemSet(&nv->rmapi, 0, sizeof(nv->rmapi)); +} + +static NvBool RmUnixAllocRmApi( + nv_state_t *nv, + NvU32 deviceId +) +{ + NV0080_ALLOC_PARAMETERS deviceParams = { 0 }; + NV2080_ALLOC_PARAMETERS subDeviceParams = { 0 }; + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + + portMemSet(&nv->rmapi, 0, sizeof(nv->rmapi)); + + if (pRmApi->AllocWithHandle( + pRmApi, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_ROOT, + &nv->rmapi.hClient, + sizeof(nv->rmapi.hClient)) != NV_OK) + { + goto fail; + } + + // + // Any call to rmapiDelPendingDevices() will internally delete the UNIX OS + // layer RMAPI handles. Set this flag to preserve these handles. These + // handles will be freed explicitly by RmUnixFreeRmApi(). + // + if (!rmclientSetClientFlagsByHandle(nv->rmapi.hClient, + RMAPI_CLIENT_FLAG_RM_INTERNAL_CLIENT)) + { + goto fail; + } + + deviceParams.deviceId = deviceId; + + if (pRmApi->Alloc( + pRmApi, + nv->rmapi.hClient, + nv->rmapi.hClient, + &nv->rmapi.hDevice, + NV01_DEVICE_0, + &deviceParams, + sizeof(deviceParams)) != NV_OK) + { + goto fail; + } + + subDeviceParams.subDeviceId = 0; + + if (pRmApi->Alloc( + pRmApi, + nv->rmapi.hClient, + nv->rmapi.hDevice, + &nv->rmapi.hSubDevice, + NV20_SUBDEVICE_0, + &subDeviceParams, + sizeof(subDeviceParams)) != NV_OK) + { + goto fail; + } + + // + // The NV40_I2C allocation expected to fail, if it is disabled + // with RM config. + // + if (pRmApi->Alloc( + pRmApi, + nv->rmapi.hClient, + nv->rmapi.hSubDevice, + &nv->rmapi.hI2C, + NV40_I2C, + NULL, + 0) != NV_OK) + { + nv->rmapi.hI2C = 0; + } + + // + // The NV04_DISPLAY_COMMON allocation expected to fail for displayless + // system. nv->rmapi.hDisp value needs to be checked before doing display + // related control calls. + // + if (pRmApi->Alloc( + pRmApi, + nv->rmapi.hClient, + nv->rmapi.hDevice, + &nv->rmapi.hDisp, + NV04_DISPLAY_COMMON, + NULL, + 0) != NV_OK) + { + nv->rmapi.hDisp = 0; + } + + return NV_TRUE; + +fail: + RmUnixFreeRmApi(nv); + return NV_FALSE; +} + +static void _checkP2pChipsetSupport( + nv_state_t *nv +) +{ +} + +NvBool RmInitAdapter( + nv_state_t *nv +) +{ + NvU32 devicereference = 0; + UNIX_STATUS status = INIT_UNIX_STATUS; + nv_priv_t *nvp; + NvBool retVal = NV_FALSE; + OBJSYS *pSys; + OBJGPU *pGpu = NULL; + OBJOS *pOS; + KernelDisplay *pKernelDisplay; + const void *gspFwHandle = NULL; + const void *gspFwLogHandle = NULL; + NvBool consoleDisabled = NV_FALSE; + + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "RmInitAdapter\n"); + + nv->flags &= ~NV_FLAG_PASSTHRU; + nv->flags &= ~NV_FLAG_PCI_P2P_UNSUPPORTED_CHIPSET; + + RmSetupRegisters(nv, &status); + if (! RM_INIT_SUCCESS(status.initStatus) ) + goto failed; + + nvp = NV_GET_NV_PRIV(nv); + nvp->status = NV_ERR_OPERATING_SYSTEM; + + status.rmStatus = RmInitDeviceDma(nv); + if (status.rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Cannot configure the device for DMA\n"); + RM_SET_ERROR(status, RM_INIT_GPU_DMA_CONFIGURATION_FAILED); + goto shutdown; + } + + nvp->flags |= NV_INIT_FLAG_DMA; + + pSys = SYS_GET_INSTANCE(); + + // + // Get firmware from the OS, if requested, and decide if RM will run as a + // firmware client. + // + if (nv->request_firmware) + { + if (!NV_IS_SOC_DISPLAY_DEVICE(nv)) + { + } + else + { + nv->request_fw_client_rm = NV_TRUE; + } + } + + // + // Initialization path requires expanded GPU visibility in GPUMGR + // in order to access the GPU undergoing initialization. + // + status.rmStatus = gpumgrThreadEnableExpandedGpuVisibility(); + if (status.rmStatus != NV_OK) + { + RM_SET_ERROR(status, RM_INIT_GPU_GPUMGR_EXPANDED_VISIBILITY_FAILED); + goto shutdown; + } + + // initialize the RM device register mapping + osInitNvMapping(nv, &devicereference, &status); + if (! RM_INIT_SUCCESS(status.initStatus) ) + { + switch (status.rmStatus) + { + case NV_ERR_NOT_SUPPORTED: + nvp->status = NV_ERR_NOT_SUPPORTED; + break; + } + NV_PRINTF(LEVEL_ERROR, + "osInitNvMapping failed, bailing out of RmInitAdapter\n"); + goto shutdown; + } + + // + // now we can have a pdev for the first time... + // + pGpu = gpumgrGetGpu(devicereference); + + pOS = SYS_GET_OS(pSys); + + // Boot GSP-RM proxy through COT command either via FSP or SEC2 + if (!IS_GSP_CLIENT(pGpu) && !IS_VIRTUAL(pGpu)) + { + status.rmStatus = gpuBootGspRmProxy(pGpu); + if (status.rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "GSP-RM proxy boot command failed.\n"); + RM_SET_ERROR(status, RM_INIT_FIRMWARE_INIT_FAILED); + goto shutdown; + } + } + + RmDeterminePrimaryDevice(pGpu); + + RmInitAcpiMethods(pOS, pSys, pGpu); + + // + // For GPU driving console, disable console access here, to ensure no console + // writes through BAR1 can interfere with physical RM's setup of BAR1 + // + if (nv->client_managed_console) + { + os_disable_console_access(); + consoleDisabled = NV_TRUE; + } + + // This needs to run before GSP-RM is booted, or else it will timeout + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_CLKS_IN_TEGRA_SOC)) + { + NV_PRINTF(LEVEL_INFO, "Enable Clocks to Max\n"); + RmEnableDeviceClks(nv); + } + + // + // If GSP fw RM support is enabled then start the GSP microcode + // (including the task running the full instance of the RM) and + // exchange the necessary initial RPC messages before continuing + // with GPU initialization here. + // + if (IS_GSP_CLIENT(pGpu)) + { + } + else if (IS_DCE_CLIENT(pGpu)) + { + status.rmStatus = dceclientDceRmInit(pGpu, GPU_GET_DCECLIENTRM(pGpu), NV_TRUE); + if (status.rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Cannot initialize DCE firmware RM\n"); + RM_SET_ERROR(status, RM_INIT_FIRMWARE_INIT_FAILED); + goto shutdown; + } + } + else if (nv->request_fw_client_rm) + { + // We were expecting to enable GSP-RM but something went wrong. + if (!nv->allow_fallback_to_monolithic_rm) + { + RM_SET_ERROR(status, RM_INIT_FIRMWARE_POLICY_FAILED); + goto shutdown; + } + else + { + NV_PRINTF(LEVEL_NOTICE, "Falling back to monolithic RM\n"); + } + } + + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + if (pKernelDisplay != NULL) + { + kdispSetWarPurgeSatellitesOnCoreFree(pKernelDisplay, NV_TRUE); + } + + if (IS_PASSTHRU(pGpu)) + nv->flags |= NV_FLAG_PASSTHRU; + + RmInitNvHal(nv, devicereference, &status); + if (!RM_INIT_SUCCESS(status.initStatus)) + { + NV_PRINTF(LEVEL_ERROR, + "RmInitNvHal() failed, bailing out of RmInitAdapter!\n"); + goto shutdown; + } + + status.rmStatus = RmInitX86Emu(pGpu); + if (status.rmStatus != NV_OK) + { + RM_SET_ERROR(status, RM_INIT_VBIOS_X86EMU_FAILED); + NV_PRINTF(LEVEL_ERROR, + "RmInitX86Emu failed, bailing out of RmInitAdapter\n"); + goto shutdown; + } + + + initVendorSpecificRegistry(pGpu, nv->pci_info.device_id); + + // finally, initialize the device + RmInitNvDevice(devicereference, &status); + if (! RM_INIT_SUCCESS(status.initStatus) ) + { + NV_PRINTF(LEVEL_ERROR, + "RmInitNvDevice failed, bailing out of RmInitAdapter\n"); + switch (status.rmStatus) + { + case NV_ERR_INSUFFICIENT_POWER: + nvp->status = NV_ERR_INSUFFICIENT_POWER; + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "GPU does not have the necessary power cables connected.\n"); + break; + } + goto shutdown; + } + + if (consoleDisabled) + { + os_enable_console_access(); + consoleDisabled = NV_FALSE; + } + + // + // Expanded GPU visibility in GPUMGR is no longer needed once the + // GPU is initialized. + // + gpumgrThreadDisableExpandedGpuVisibility(); + + // LOCK: acquire GPUs lock + status.rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_INIT); + if (status.rmStatus != NV_OK) + { + goto shutdown; + } + + status.rmStatus = osVerifySystemEnvironment(pGpu); + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + if (status.rmStatus != NV_OK) + { + RM_SET_ERROR(status, RM_INIT_SYS_ENVIRONMENT_FAILED); + switch (status.rmStatus) + { + case NV_ERR_IRQ_NOT_FIRING: + nvp->status = NV_ERR_IRQ_NOT_FIRING; + break; + } + NV_PRINTF(LEVEL_ERROR, "osVerifySystemEnvironment failed, bailing!\n"); + goto shutdown; + } + + nv_start_rc_timer(nv); + + nvp->status = NV_OK; + + if (!RmUnixAllocRmApi(nv, devicereference)) { + RM_SET_ERROR(status, RM_INIT_ALLOC_RMAPI_FAILED); + status.rmStatus = NV_ERR_GENERIC; + goto shutdown; + } + + if (!NV_IS_SOC_DISPLAY_DEVICE(nv)) + { + status.rmStatus = RmInitGpuInfoWithRmApi(pGpu); + if (status.rmStatus != NV_OK) + { + RM_SET_ERROR(status, RM_INIT_GPUINFO_WITH_RMAPI_FAILED); + goto shutdown; + } + } + + // i2c only on master device?? + RmI2cAddGpuPorts(nv); + nvp->flags |= NV_INIT_FLAG_PUBLIC_I2C; + + nv->flags &= ~NV_FLAG_IN_RECOVERY; + + pOS->setProperty(pOS, PDB_PROP_OS_SYSTEM_EVENTS_SUPPORTED, NV_TRUE); + + RmInitPowerManagement(nv); + + if (!NV_IS_SOC_DISPLAY_DEVICE(nv)) + { + status.rmStatus = RmRegisterGpudb(pGpu); + if (status.rmStatus != NV_OK) + { + RM_SET_ERROR(status, RM_GPUDB_REGISTER_FAILED); + goto shutdown; + } + } + + if (nvp->b_mobile_config_enabled) + { + NvU32 ac_plugged = 0; + if (nv_acpi_get_powersource(&ac_plugged) == NV_OK) + { + // LOCK: acquire GPU lock + if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_NONE) == NV_OK) + { + // + // As we have already acquired the API Lock here, we are calling + // RmPowerSourceChangeEvent directly instead of rm_power_source_change_event. + // + RmPowerSourceChangeEvent(nv, !ac_plugged); + + // UNLOCK: release GPU lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + } + } + + _checkP2pChipsetSupport(nv); + + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "RmInitAdapter succeeded!\n"); + + retVal = NV_TRUE; + goto done; + + shutdown: + nv->flags &= ~NV_FLAG_IN_RECOVERY; + + gpumgrThreadDisableExpandedGpuVisibility(); + + if (consoleDisabled) + { + os_enable_console_access(); + } + + // call ShutdownAdapter to undo anything we've done above + RmShutdownAdapter(nv); + + failed: + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "RmInitAdapter failed! (0x%x:0x%x:%d)\n", + status.initStatus, status.rmStatus, status.line); + +done: + nv_put_firmware(gspFwHandle); + nv_put_firmware(gspFwLogHandle); + + return retVal; +} + +void RmShutdownAdapter( + nv_state_t *nv +) +{ + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv); + NV_STATUS rmStatus; + + if ((pGpu != NULL) && (nvp->flags & NV_INIT_FLAG_GPUMGR_ATTACH)) + { + NvU32 gpuInstance = gpuGetInstance(pGpu); + NvU32 deviceInstance = gpuGetDeviceInstance(pGpu); + OBJSYS *pSys = SYS_GET_INSTANCE(); + + RmUnixFreeRmApi(nv); + + nv->ud.cpu_address = 0; + nv->ud.size = 0; + + // + // LOCK: lock all clients in case of eGPU hot unplug, which + // will not wait for all existing RM clients to stop using the GPU. + // + if (!nv->is_external_gpu || serverLockAllClients(&g_resServ) == NV_OK) + { + // LOCK: acquire GPUs lock + if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DESTROY) == NV_OK) + { + // + // Shutdown path requires expanded GPU visibility in GPUMGR in order + // to access the GPU undergoing shutdown which may not be fully + // initialized, and to continue accessing the GPU undergoing shutdown + // after state destroy. + // + NV_ASSERT_OK(gpumgrThreadEnableExpandedGpuVisibility()); + + RmDestroyPowerManagement(nv); + + rmapiSetDelPendingClientResourcesFromGpuMask(NVBIT(gpuInstance)); + rmapiDelPendingDevices(NVBIT(gpuInstance)); + + os_disable_console_access(); + + if (nvp->flags & NV_INIT_FLAG_GPU_STATE_LOAD) + { + rmStatus = gpuStateUnload(pGpu, GPU_STATE_DEFAULT); + NV_ASSERT(rmStatus == NV_OK); + } + + if (nvp->flags & NV_INIT_FLAG_GPU_STATE) + { + rmStatus = gpuStateDestroy(pGpu); + NV_ASSERT(rmStatus == NV_OK); + } + + if (IS_DCE_CLIENT(pGpu)) + { + rmStatus = dceclientDceRmInit(pGpu, GPU_GET_DCECLIENTRM(pGpu), NV_FALSE); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "DCE firmware RM Shutdown failure\n"); + } + } + + os_enable_console_access(); + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_CLKS_IN_TEGRA_SOC)) + { + NV_PRINTF(LEVEL_INFO, "Disable Clocks\n"); + RmDisableDeviceClks(nv); + } + + //if (nvp->flags & NV_INIT_FLAG_HAL) + // destroyHal(pDev); + +#if NVCPU_IS_X86_64 + RmFreeX86EmuState(pGpu); +#endif + + gpumgrDetachGpu(gpuInstance); + gpumgrDestroyDevice(deviceInstance); + + // + // Expanded GPU visibility in GPUMGR is no longer needed once the + // GPU is removed from GPUMGR. + // + gpumgrThreadDisableExpandedGpuVisibility(); + + if (nvp->flags & NV_INIT_FLAG_DMA) + { + RmTeardownDeviceDma(nv); + } + + RmClearPrivateState(nv); + + RmUnInitAcpiMethods(pSys); + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + // RM_BASIC_LOCK_MODEL: free GPU lock + rmGpuLockFree(deviceInstance); + } + + // UNLOCK: unlock all clients for eGPU hot unplug path + if (nv->is_external_gpu) + serverUnlockAllClients(&g_resServ); + } + } + else + { + RmClearPrivateState(nv); + } + + RmTeardownRegisters(nv); +} + +void RmPartiallyDisableAdapter( + nv_state_t *nv +) +{ + NV_PRINTF(LEVEL_INFO, "%s: RM is in SW Persistence mode\n", __FUNCTION__); + + nv_stop_rc_timer(nv); +} + +void RmDisableAdapter( + nv_state_t *nv +) +{ + NV_STATUS rmStatus; + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv); + NvU32 gpuMask; + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET)) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_TIMEOUT_RECOVERY, NV_TRUE); + nv->flags |= NV_FLAG_IN_RECOVERY; + } + + // + // LOCK: lock all clients in case of eGPU hot unplug, which + // will not wait for all existing RM clients to stop using the GPU. + // + if (!nv->is_external_gpu || serverLockAllClients(&g_resServ) == NV_OK) + { + + // LOCK: acquire GPUs lock + if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DESTROY) == NV_OK) + { + // + // Free the client allocated resources. + // + // This needs to happen prior to tearing down SLI state when SLI is enabled. + // + // Note this doesn't free RM internal resource allocations. Those are + // freed during (gpumgrUpdateSLIConfig->...->)gpuStateUnload. + // + // We need to free resources for all GPUs linked in a group as + // gpumgrUpdateSLIConfig will teardown GPU state for the entire set. + // + gpuMask = gpumgrGetGpuMask(pGpu); + + rmapiSetDelPendingClientResourcesFromGpuMask(gpuMask); + rmapiDelPendingDevices(gpuMask); + + nv_stop_rc_timer(nv); + + os_disable_console_access(); + + if (nvp->flags & NV_INIT_FLAG_GPU_STATE_LOAD) + { + rmStatus = gpuStateUnload(pGpu, GPU_STATE_DEFAULT); + NV_ASSERT(rmStatus == NV_OK); + nvp->flags &= ~NV_INIT_FLAG_GPU_STATE_LOAD; + } + + os_enable_console_access(); + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + // UNLOCK: unlock all clients for eGPU hot unplug path + if (nv->is_external_gpu) + serverUnlockAllClients(&g_resServ); + } +} + +NV_STATUS RmGetAdapterStatus( + nv_state_t *pNv, + NvU32 *pStatus +) +{ + // + // This status is determined in RmInitAdapter(); the glue layer + // requests it when the adapter failed to initialize to learn + // more about the error condition. This is currently limited to + // osVerifySystemEnvironment() failures. + // + nv_priv_t *nvp; + + nvp = NV_GET_NV_PRIV(pNv); + if (nvp == NULL) + { + return NV_ERR_INVALID_STATE; + } + + *pStatus = nvp->status; + return NV_OK; +} + +static void initVendorSpecificRegistry( + OBJGPU *pGpu, + NvU16 device_id +) +{ + NV_STATUS rmStatus; + NvU32 i; + NvU32 subsystem_id; + NvU32 subsystem_vendor_id; + NvU32 subsystem_device_id; + NvU32 vendor_id = 0; + + if (!pGpu) + return; + + if (pGpu->bIsSOC) + return; + + rmStatus = GPU_BUS_CFG_RD32(pGpu, + NV_CONFIG_PCI_NV_11, &subsystem_id); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s: Cannot read NV_CONFIG_PCI_NV_11\n", __FUNCTION__); + return; + } + + subsystem_vendor_id = (subsystem_id & 0xffff); + subsystem_device_id = (subsystem_id >> 16); + + for (i = 0; (nb_id_table[i].subsystem_vendor_id) != 0; i++) + { + if ((nb_id_table[i].subsystem_vendor_id == subsystem_vendor_id) && + (nb_id_table[i].subsystem_device_id == subsystem_device_id) && + (nb_id_table[i].gpu_device_id == device_id)) + { + vendor_id = subsystem_vendor_id; + break; + } + } + + if (vendor_id != 0) + { + for (i = 0; nb_reg_table[i].vendor_id != 0; i++) + { + if (nb_reg_table[i].vendor_id == vendor_id) + { + osWriteRegistryDword(pGpu, nb_reg_table[i].name, + nb_reg_table[i].data); + } + } + } +} + +static void initUnixSpecificRegistry( + OBJGPU *pGpu +) +{ + // By default, enable GPU reset on Unix + osWriteRegistryDword(pGpu, "RMSecBusResetEnable", 1); + osWriteRegistryDword(pGpu, "RMForcePcieConfigSave", 1); + +} + +void +osRemoveGpu( + NvU32 domain, + NvU8 bus, + NvU8 device +) +{ + void *handle; + + handle = os_pci_init_handle(domain, bus, device, 0, NULL, NULL); + if (handle != NULL) + { + os_pci_remove(handle); + } +} + +NV_STATUS RmExcludeAdapter( + nv_state_t *nv +) +{ + return NV_ERR_NOT_SUPPORTED; +} diff --git a/src/nvidia/arch/nvalloc/unix/src/osmemdesc.c b/src/nvidia/arch/nvalloc/unix/src/osmemdesc.c new file mode 100644 index 0000000..ba6d52b --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/osmemdesc.c @@ -0,0 +1,1218 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************* OS Memory Descriptor APIS *****************************\ +* * +* This contains routines to create and destroy OS memory descriptor * +* * +****************************************************************************/ + +#include // NV device driver interface +#include +#include +#include +#include +#include +#include + +static NV_STATUS osCreateOsDescriptorFromPageArray(OBJGPU *, NvP64, NvHandle, NvU32, NvU64 *, MEMORY_DESCRIPTOR **, void **); +static void osDestroyOsDescriptorPageArray(PMEMORY_DESCRIPTOR); + +static NV_STATUS osCreateOsDescriptorFromIoMemory(OBJGPU *, NvP64, NvHandle, NvU32, NvU64 *, MEMORY_DESCRIPTOR **, void**); +static void osDestroyOsDescriptorFromIoMemory(PMEMORY_DESCRIPTOR); + +static NV_STATUS osCreateOsDescriptorFromPhysAddr(OBJGPU *, NvP64, NvHandle, NvU32, NvU64 *, MEMORY_DESCRIPTOR **, void**); +static void osDestroyOsDescriptorFromPhysAddr(PMEMORY_DESCRIPTOR); + +static NV_STATUS osCreateOsDescriptorFromFileHandle(OBJGPU *, NvP64, NvHandle, NvU32, NvU64 *, MEMORY_DESCRIPTOR **, void**); +static NV_STATUS osCreateOsDescriptorFromDmaBufPtr(OBJGPU *, NvP64, NvHandle, NvU32, NvU64 *, MEMORY_DESCRIPTOR **, void**); +static void osDestroyOsDescriptorFromDmaBuf(PMEMORY_DESCRIPTOR); +static NV_STATUS osCreateOsDescriptorFromSgtPtr(OBJGPU *, NvP64, NvHandle, NvU32, NvU64 *, MEMORY_DESCRIPTOR **, void**); +static void osDestroyOsDescriptorFromSgt(PMEMORY_DESCRIPTOR); + +static NV_STATUS osCheckGpuBarsOverlapAddrRange(NvRangeU64 addrRange); + +NV_STATUS +osCreateMemFromOsDescriptor +( + OBJGPU *pGpu, + NvP64 pDescriptor, + NvHandle hClient, + NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + NvU32 descriptorType, + RS_PRIV_LEVEL privilegeLevel +) +{ + RmClient* pClient; + NV_STATUS rmStatus; + void *pPrivate; + + pClient = serverutilGetClientUnderLock(hClient); + if ((pDescriptor == NvP64_NULL) || + (*pLimit == 0) || + (pClient == NULL)) + { + return NV_ERR_INVALID_PARAM_STRUCT; + } + + // + // For the sake of simplicity, unmatched RM and OS page + // sizes are not currently supported in this path, except for + // aarch64. + // + // Also, the nvmap handle is sent which can be any random number so + // the virtual address alignment sanity check can't be done here. + // + if (!NVCPU_IS_AARCH64 && + (NV_RM_PAGE_SIZE != os_page_size)) + { + return NV_ERR_NOT_SUPPORTED; + } + + // + // The two checks below use cached privilege because they + // concern the privilege level of the client, and not the + // privilege level of the calling context which may be + // overridden to KERNEL at some internal callsites. + // + + // + // The RM cannot obtain a table of physical addresses + // for a kernel virtual address range on all of + // the supported UNIX platforms. Since this path is + // not really compelling for kernel allocations on any + // of those platforms, it is not supported. + // For UVM, they could have pre-allocated sysmem to register + // with RM so we put in an exception for that case. + // + if ((rmclientGetCachedPrivilege(pClient) >= RS_PRIV_LEVEL_KERNEL) && + (descriptorType != NVOS32_DESCRIPTOR_TYPE_OS_PHYS_ADDR) && + (descriptorType != NVOS32_DESCRIPTOR_TYPE_OS_FILE_HANDLE) && + (descriptorType != NVOS32_DESCRIPTOR_TYPE_OS_DMA_BUF_PTR) && + (descriptorType != NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR)) + { + return NV_ERR_NOT_SUPPORTED; + } + + // + // NVOS32_DESCRIPTOR_TYPE_OS_DMA_BUF_PTR and + // NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR can only be utilized by kernel space + // rm-clients. + // + if ((rmclientGetCachedPrivilege(pClient) < RS_PRIV_LEVEL_KERNEL) && + ((descriptorType == NVOS32_DESCRIPTOR_TYPE_OS_DMA_BUF_PTR) || + (descriptorType == NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR))) + { + return NV_ERR_NOT_SUPPORTED; + } + + switch (descriptorType) + { + case NVOS32_DESCRIPTOR_TYPE_VIRTUAL_ADDRESS: + rmStatus = NV_ERR_NOT_SUPPORTED; + break; + case NVOS32_DESCRIPTOR_TYPE_OS_PHYS_ADDR: + if (privilegeLevel < RS_PRIV_LEVEL_KERNEL) + { + rmStatus = NV_ERR_NOT_SUPPORTED; + break; + } + rmStatus = osCreateOsDescriptorFromPhysAddr(pGpu, pDescriptor, + hClient, flags, pLimit, ppMemDesc, &pPrivate); + break; + case NVOS32_DESCRIPTOR_TYPE_OS_IO_MEMORY: + rmStatus = osCreateOsDescriptorFromIoMemory(pGpu, pDescriptor, + hClient, flags, pLimit, ppMemDesc, &pPrivate); + break; + case NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY: + rmStatus = osCreateOsDescriptorFromPageArray(pGpu, pDescriptor, + hClient, flags, pLimit, ppMemDesc, &pPrivate); + break; + case NVOS32_DESCRIPTOR_TYPE_OS_FILE_HANDLE: + rmStatus = osCreateOsDescriptorFromFileHandle(pGpu, pDescriptor, + hClient, flags, pLimit, ppMemDesc, &pPrivate); + break; + case NVOS32_DESCRIPTOR_TYPE_OS_DMA_BUF_PTR: + rmStatus = osCreateOsDescriptorFromDmaBufPtr(pGpu, pDescriptor, + hClient, flags, pLimit, ppMemDesc, &pPrivate); + break; + case NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR: + rmStatus = osCreateOsDescriptorFromSgtPtr(pGpu, pDescriptor, + hClient, flags, pLimit, ppMemDesc, &pPrivate); + break; + default: + rmStatus = NV_ERR_INVALID_ARGUMENT; + break; + } + + return rmStatus; +} + +static NV_STATUS +osCreateMemdescFromPages +( + OBJGPU *pGpu, + NvU64 size, + NvU32 flags, + NvU32 cacheType, + MEMORY_DESCRIPTOR **ppMemDesc, + void *pImportPriv, + void **ppPrivate +) +{ + NV_STATUS rmStatus = NV_OK; + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 memdescFlags = MEMDESC_FLAGS_NONE; + NvU32 gpuCachedFlags; + NvBool bUnprotected = NV_FALSE; + NvU64 osPageCount; + + // + // Align size up to os page size. This is important in + // order to support submemdesc mappings at native page size. + // Once dynamic tracking is enabled this will not be needed + // as all submemory will get tracked at its native page size. + // + size = NV_ALIGN_UP64(size, os_page_size); + + osPageCount = size >> os_page_shift; + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_NISO_DISPLAY, _YES, flags)) + { + memdescFlags |= MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO; + } + + rmStatus = memdescCreate(ppMemDesc, pGpu, size, 0, + NV_MEMORY_NONCONTIGUOUS, ADDR_SYSMEM, + cacheType, memdescFlags); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _YES, flags)) + gpuCachedFlags = NV_MEMORY_CACHED; + else + gpuCachedFlags = NV_MEMORY_UNCACHED; + + pMemDesc = *ppMemDesc; + rmStatus = nv_register_user_pages(NV_GET_NV_STATE(pGpu), + osPageCount, + memdescGetPteArray(pMemDesc, AT_CPU), pImportPriv, + ppPrivate, bUnprotected); + if (rmStatus != NV_OK) + { + memdescDestroy(pMemDesc); + return rmStatus; + } + + memdescSetGpuCacheAttrib(pMemDesc, gpuCachedFlags); + memdescSetAddress(pMemDesc, NvP64_NULL); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_KERNEL_MODE, NV_FALSE); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_EXT_PAGE_ARRAY_MEM, NV_TRUE); + + if (!NV_IS_ALIGNED64(memdescGetPhysAddr(pMemDesc, AT_CPU, 0), os_page_size)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + NV_ASSERT_OR_GOTO(0, cleanup); + } + + NV_ASSERT_OK_OR_GOTO(rmStatus, memdescSetAllocSizeFields(pMemDesc, size, NV_RM_PAGE_SIZE), cleanup); + + + // + // If the OS layer doesn't think in RM page size, we need to inflate the + // PTE array into RM pages. + // + if ((NV_RM_PAGE_SIZE < os_page_size) && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmInflateOsToRmPageArray(memdescGetPteArray(pMemDesc, AT_CPU), pMemDesc->PageCount); + } + + // + // memdescMapIommu() requires the OS-private data to be set on the memory + // descriptor, but we don't want to wire up the teardown callback just yet: + // that callback needs to unpin the pages, but that will already be done + // as part of failure handling further up the stack if memdescMapIommu() + // fails. So we only set up the priv-data cleanup callback once we're sure + // this call will succeed. + // + memdescSetMemData(pMemDesc, *ppPrivate, NULL); + + rmStatus = memdescMapIommu(pMemDesc, pGpu->busInfo.iovaspaceId); +cleanup: + if (rmStatus != NV_OK) + { + if ((NV_RM_PAGE_SIZE < os_page_size) && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + nv_unregister_user_pages(NV_GET_NV_STATE(pGpu), + osPageCount, + NULL /* import_priv */, ppPrivate); + memdescDestroy(pMemDesc); + return rmStatus; + } + + return NV_OK; +} + +static NV_STATUS +osCreateOsDescriptorFromPageArray +( + OBJGPU *pGpu, + NvP64 pDescriptor, + NvHandle hClient, + NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus; + + *ppPrivate = NvP64_VALUE(pDescriptor); + + // + // Since the only type of memory permitted in this path + // is anonymous user memory, certain restrictions + // apply for the allocation flags: + // + // 1) anonymous memory is write-back cacheable, hence + // the _COHERENCY flag must match. + // + // 2) the RM has no control over the location of the + // associated pages in memory and thus cannot + // honor requests for contiguous memory. + // + // These restrictions are enforced here to avoid subtle + // bugs later on. + // + if ((!FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _CACHED, flags) && + !FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_BACK, flags)) || + FLD_TEST_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, flags)) + { + return NV_ERR_INVALID_FLAGS; + } + + rmStatus = osCreateMemdescFromPages(pGpu, (*pLimit + 1), flags, + NV_MEMORY_CACHED, ppMemDesc, + NULL /* pImportPriv */, ppPrivate); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + // All is well - wire up the cleanup callback now + memdescSetMemData(*ppMemDesc, memdescGetMemData(*ppMemDesc), + osDestroyOsDescriptorPageArray); + + return NV_OK; +} + +/*! + * @brief Checks if the given address range overlaps with the BARs for any of + * the GPUs. + */ +static NV_STATUS +osCheckGpuBarsOverlapAddrRange +( + NvRangeU64 addrRange +) +{ + NvRangeU64 gpuPhysAddrRange; + NvRangeU64 gpuPhysFbAddrRange; + NvRangeU64 gpuPhysInstAddrRange; + NvU32 gpuInstance; + OBJGPU *pGpu; + NvU32 gpuMask; + NV_STATUS rmStatus; + + rmStatus = gpumgrGetGpuAttachInfo(NULL, &gpuMask); + NV_ASSERT_OR_RETURN(rmStatus == NV_OK, rmStatus); + + gpuInstance = 0; + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ZERO_FB)) + { + continue; + } + + NV_INIT_RANGE(gpuPhysFbAddrRange, gpumgrGetGpuPhysFbAddr(pGpu), + gpumgrGetGpuPhysFbAddr(pGpu) + pGpu->fbLength -1); + + NV_INIT_RANGE(gpuPhysAddrRange, pGpu->busInfo.gpuPhysAddr, + pGpu->busInfo.gpuPhysAddr + pGpu->deviceMappings[0].gpuNvLength -1); + + NV_INIT_RANGE(gpuPhysInstAddrRange, pGpu->busInfo.gpuPhysInstAddr, + pGpu->busInfo.gpuPhysInstAddr + pGpu->instLength -1); + + if (NV_IS_OVERLAPPING_RANGE(gpuPhysFbAddrRange, addrRange) || + NV_IS_OVERLAPPING_RANGE(gpuPhysAddrRange, addrRange) || + NV_IS_OVERLAPPING_RANGE(gpuPhysInstAddrRange, addrRange)) + { + return NV_ERR_INVALID_ADDRESS; + } + } + + return NV_OK; +} + +static NvU64 +_doWarBug4040336 +( + OBJGPU *pGpu, + NvU64 addr +) +{ + if (gpuIsWarBug4040336Enabled(pGpu)) + { + if ((addr & 0xffffffff00000000ULL) == 0x7fff00000000ULL) + { + addr = addr & 0xffffffffULL; + } + } + return addr; +} + +static NV_STATUS +osCreateOsDescriptorFromIoMemory +( + OBJGPU *pGpu, + NvP64 pDescriptor, + NvHandle hClient, + NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus; + NvU32 gpuCachedFlags; + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 *pPteArray; + NvRangeU64 physAddrRange; + NvU64 *base = 0; + NvBool bAllowMmap; + NvU64 size; + + // + // Unlike the page array path, this one deals exclusively + // with I/O memory, which is expected to be contiguous + // physically, and which may only be accessed with uncached + // transactions. + // + if (!FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _UNCACHED, flags) || + !FLD_TEST_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, flags)) + { + return NV_ERR_INVALID_FLAGS; + } + + // + // _PEER_MAP_OVERRIDE flag is controlled by the RM and not the client. + // + // RM will set the _PEER_MAP_OVERRIDE_REQUIRED flag itself for IO memory + // memory imported with RmVidHeapControl. + // + if (FLD_TEST_DRF(OS02, _FLAGS, _PEER_MAP_OVERRIDE, _REQUIRED, flags)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + bAllowMmap = !FLD_TEST_DRF(OS02, _FLAGS, _MAPPING, _NEVER_MAP, flags); + + base = (void *)(NvUPtr)pDescriptor; + + // + // There is an architectural deadlock scenario involved when full-duplex P2P + // enabled over BAR1. See #3 in the description of bug 1571948 which explains + // the classic deadlock. So, make sure to error out usermode's memory + // registration if a memory range falls within any of the available GPU's + // BAR window. + // + physAddrRange.min = *base; + physAddrRange.max = *base + *pLimit; + + rmStatus = osCheckGpuBarsOverlapAddrRange(physAddrRange); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s(): phys range 0x%016llx-0x%016llx overlaps with GPU BARs", + __FUNCTION__, physAddrRange.min, physAddrRange.max); + return rmStatus; + } + + // + // BF3's PCIe MMIO bus address at 0x800000000000(CPU PA 0x7fff00000000) is + // too high for Ampere to address. As a result, BF3's bus address is + // moved to < 4GB. Now, the CPU PA and the bus address are no longer 1:1 + // and needs to be adjusted. + // + *base = _doWarBug4040336(pGpu, *base); + + size = NV_ALIGN_UP64(*pLimit + 1, os_page_size); + + rmStatus = memdescCreate(ppMemDesc, pGpu, size, 0, + NV_MEMORY_CONTIGUOUS, ADDR_SYSMEM, + NV_MEMORY_UNCACHED, MEMDESC_FLAGS_NONE); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s(): error %d while attempting to create the MMIO mapping\n", + __FUNCTION__, rmStatus); + return rmStatus; + } + + pMemDesc = *ppMemDesc; + + if (FLD_TEST_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _YES, flags)) + gpuCachedFlags = NV_MEMORY_CACHED; + else + gpuCachedFlags = NV_MEMORY_UNCACHED; + + memdescSetGpuCacheAttrib(pMemDesc, gpuCachedFlags); + memdescSetAddress(pMemDesc, NvP64_NULL); + memdescSetMemData(pMemDesc, NULL, NULL); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_KERNEL_MODE, NV_FALSE); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_PEER_IO_MEM, NV_TRUE); + + pPteArray = memdescGetPteArray(pMemDesc, AT_CPU); + pPteArray[0] = *base; + + *ppPrivate = NULL; + + if (!NV_IS_ALIGNED64(pPteArray[0], os_page_size)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + NV_ASSERT_OR_GOTO(0, cleanup); + } + NV_ASSERT_OK_OR_GOTO(rmStatus, memdescSetAllocSizeFields(pMemDesc, size, NV_RM_PAGE_SIZE), cleanup); + + if (bAllowMmap) + { + rmStatus = nv_register_peer_io_mem(NV_GET_NV_STATE(pGpu), pPteArray, + NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), + ppPrivate); + if (rmStatus != NV_OK) + { + memdescDestroy(pMemDesc); + return rmStatus; + } + } + + memdescSetMemData(pMemDesc, *ppPrivate, NULL); + + // + // memdescMapIommu() requires the OS-private data to be set on the memory + // descriptor, but we don't want to wire up the teardown callback just yet: + // that callback needs to unpin the pages, but that will already be done + // as part of failure handling further up the stack if memdescMapIommu() + // fails. So we only set up the priv-data cleanup callback once we're sure + // this call will succeed. + // + rmStatus = memdescMapIommu(pMemDesc, pGpu->busInfo.iovaspaceId); +cleanup: + if (rmStatus != NV_OK) + { + if (*ppPrivate != NULL) + { + nv_unregister_peer_io_mem(NV_GET_NV_STATE(pGpu), *ppPrivate); + } + memdescDestroy(pMemDesc); + return rmStatus; + } + + // All is well - wire up the cleanup callback now + memdescSetMemData(pMemDesc, memdescGetMemData(pMemDesc), + osDestroyOsDescriptorFromIoMemory); + + return NV_OK; +} + +static NV_STATUS +osCreateOsDescriptorFromPhysAddr +( + OBJGPU *pGpu, + NvP64 pDescriptor, + NvHandle hClient, + NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 *pPteArray; + NvU64 base = 0; + NvU32 cache_type = NV_MEMORY_CACHED; + NvU64 memdescFlags = MEMDESC_FLAGS_NONE; + NvU64 *pPhys_addrs; + NvU64 num_os_pages; + NvU32 idx; + NvU64 size; + + // Currently only work with contiguous sysmem allocations + if (!FLD_TEST_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, flags)) + { + return NV_ERR_INVALID_FLAGS; + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_TYPE_SYNCPOINT, _APERTURE, flags)) + { + // Syncpoint memory is uncached, DMA mapping needs to skip CPU sync. + cache_type = NV_MEMORY_UNCACHED; + + // + // Syncpoint memory is NISO. Don't attempt to IOMMU map if the NISO + // IOMMU isn't enabled. + // + if (!NV_SOC_IS_NISO_IOMMU_PRESENT(nv)) + { + memdescFlags |= MEMDESC_FLAGS_SKIP_IOMMU_MAPPING; + } + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_NISO_DISPLAY, _YES, flags)) + { + memdescFlags |= MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO; + } + + base = (NvU64)pDescriptor; + size = NV_ALIGN_UP64((*pLimit + 1), os_page_size); + + NV_ASSERT_OR_RETURN(NV_IS_ALIGNED64(base, os_page_size), NV_ERR_INVALID_ARGUMENT); + + rmStatus = memdescCreate(ppMemDesc, pGpu, size, 0, + NV_MEMORY_CONTIGUOUS, ADDR_SYSMEM, + cache_type, memdescFlags); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s(): error %d while creating memdesc for kernel memory\n", + __FUNCTION__, rmStatus); + return rmStatus; + } + + pMemDesc = *ppMemDesc; + + memdescSetAddress(pMemDesc, NvP64_NULL); + memdescSetMemData(pMemDesc, NULL, NULL); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_EXT_PAGE_ARRAY_MEM, NV_TRUE); + + pPteArray = memdescGetPteArray(pMemDesc, AT_CPU); + pPteArray[0] = base; + + num_os_pages = size >> os_page_shift; + pPhys_addrs = portMemAllocNonPaged(sizeof(NvU64) * num_os_pages); + if (pPhys_addrs == NULL) + goto cleanup_memdesc; + + for (idx = 0; idx < num_os_pages; idx++) + { + pPhys_addrs[idx] = base + (idx * os_page_size); + } + + *ppPrivate = NULL; + rmStatus = nv_register_phys_pages(nv, pPhys_addrs, num_os_pages, + memdescGetCpuCacheAttrib(pMemDesc), + ppPrivate); + if (rmStatus != NV_OK) + goto cleanup_memdesc; + + NV_ASSERT_OK_OR_RETURN(memdescSetAllocSizeFields(pMemDesc, size, NV_RM_PAGE_SIZE)); + + // If IOMMU skip flag wasn't set earlier, create IOVA mapping. + if (!memdescGetFlag(pMemDesc, MEMDESC_FLAGS_SKIP_IOMMU_MAPPING)) + { + // + // memdescMapIommu() requires the OS-private data to be set on the memory + // descriptor, but we don't want to wire up the teardown callback just yet: + // that callback needs to unpin the pages, but that will already be done + // as part of failure handling further up the stack if memdescMapIommu() + // fails. So we only set up the priv-data cleanup callback once we're sure + // this call will succeed. + // + memdescSetMemData(pMemDesc, *ppPrivate, NULL); + + rmStatus = memdescMapIommu(pMemDesc, pGpu->busInfo.iovaspaceId); + if (rmStatus != NV_OK) + goto cleanup_pages; + } + + // All is well - wire up the cleanup callback now + memdescSetMemData(pMemDesc, *ppPrivate, + osDestroyOsDescriptorFromPhysAddr); + + portMemFree(pPhys_addrs); + + return NV_OK; + +cleanup_pages: + if (*ppPrivate != NULL) + { + nv_unregister_phys_pages(NV_GET_NV_STATE(pGpu), *ppPrivate); + } + +cleanup_memdesc: + memdescDestroy(pMemDesc); + + portMemFree(pPhys_addrs); + + return rmStatus; +} + +static NV_STATUS +_createMemdescFromDmaBufSgtHelper +( + OBJGPU *pGpu, + NvU32 flags, + void *pImportPriv, + struct sg_table *pImportSgt, + NvU32 size, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate, + MEM_DATA_RELEASE_CALL_BACK *pMemDataReleaseCallback +) +{ + NV_STATUS rmStatus = NV_OK; + NvU32 cacheType = NV_MEMORY_UNCACHED; + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 memdescFlags = MEMDESC_FLAGS_NONE; + NvU32 gpuCachedFlags; + NvBool isPeerMmio = NV_FALSE; + NvU64 osPageCount; + + // + // Align size up to os page size. This is important in + // order to support submemdesc mappings at native page size. + // Once dynamic tracking is enabled this will not be needed + // as all submemor + // + size = NV_ALIGN_UP64(size, os_page_size); + + osPageCount = size >> os_page_shift; + + NV_ASSERT((pMemDataReleaseCallback == osDestroyOsDescriptorFromDmaBuf) || + (pMemDataReleaseCallback == osDestroyOsDescriptorFromSgt)); + + if (FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_COMBINE, flags)) + { + cacheType = NV_MEMORY_WRITECOMBINED; + } + else if (!FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _UNCACHED, flags)) + { + cacheType = NV_MEMORY_CACHED; + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _YES, flags)) + { + gpuCachedFlags = NV_MEMORY_CACHED; + } + else + { + gpuCachedFlags = NV_MEMORY_UNCACHED; + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_TYPE_SYNCPOINT, _APERTURE, flags)) + { + // Syncpoint memory is uncached. + if ((cacheType != NV_MEMORY_UNCACHED) || + (gpuCachedFlags != NV_MEMORY_UNCACHED)) + { + NV_PRINTF(LEVEL_ERROR, + "%s(): Error: Syncpoint memory region should be uncached!!!\n", + __FUNCTION__); + return NV_ERR_INVALID_FLAGS; + } + + isPeerMmio = NV_TRUE; + + NV_PRINTF(LEVEL_INFO, + "%s(): Syncpoint type sgt!\n", __FUNCTION__); + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_NISO_DISPLAY, _YES, flags)) + { + memdescFlags |= MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO; + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_USER_READ_ONLY, _YES, flags)) + { + memdescFlags |= MEMDESC_FLAGS_USER_READ_ONLY; + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_DEVICE_READ_ONLY, _YES, flags)) + { + memdescFlags |= MEMDESC_FLAGS_DEVICE_READ_ONLY; + } + + rmStatus = memdescCreate(ppMemDesc, pGpu, size, 0, + NV_MEMORY_NONCONTIGUOUS, ADDR_SYSMEM, + cacheType, memdescFlags); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + pMemDesc = *ppMemDesc; + + memdescSetGpuCacheAttrib(pMemDesc, gpuCachedFlags); + memdescSetAddress(pMemDesc, NvP64_NULL); + memdescSetMemData(pMemDesc, NULL, NULL); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_KERNEL_MODE, NV_FALSE); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_EXT_PAGE_ARRAY_MEM, NV_TRUE); + + *ppPrivate = NULL; + rmStatus = nv_register_sgt(NV_GET_NV_STATE(pGpu), + memdescGetPteArray(pMemDesc, AT_CPU), + osPageCount, + memdescGetCpuCacheAttrib(pMemDesc), + ppPrivate, + pImportSgt, + pImportPriv, + isPeerMmio); + if (rmStatus != NV_OK) + { + memdescDestroy(pMemDesc); + return rmStatus; + } + + + if (!NV_IS_ALIGNED64(memdescGetPhysAddr(pMemDesc, AT_CPU, 0), os_page_size)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + NV_ASSERT_OR_GOTO(0, cleanup); + } + + NV_ASSERT_OK_OR_GOTO(rmStatus, memdescSetAllocSizeFields(pMemDesc, size, NV_RM_PAGE_SIZE), cleanup); + + // + // If the OS layer doesn't think in RM page size, we need to inflate the + // PTE array into RM pages. + // + if ((NV_RM_PAGE_SIZE < os_page_size) && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmInflateOsToRmPageArray(memdescGetPteArray(pMemDesc, AT_CPU), pMemDesc->PageCount); + } + + memdescSetMemData(*ppMemDesc, *ppPrivate, NULL); + + // + // memdescMapIommu() requires the OS-private data to be set on the memory + // descriptor, but we don't want to wire up the teardown callback just yet: + // that callback does teardown that will already be done as part of failure + // handling further up the stack if memdescMapIommu() fails. So we only + // setup the priv-data cleanup callback once we're sure this call will + // succeed. + // + rmStatus = memdescMapIommu(*ppMemDesc, pGpu->busInfo.iovaspaceId); +cleanup: + if (rmStatus != NV_OK) + { + if ((NV_RM_PAGE_SIZE < os_page_size) && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + if (*ppPrivate != NULL) + { + nv_unregister_sgt(NV_GET_NV_STATE(pGpu), &pImportSgt, + (void **) &pImportPriv, *ppPrivate); + } + memdescDestroy(pMemDesc); + return rmStatus; + } + + // All is well - wire up the cleanup callback now + memdescSetMemData(*ppMemDesc, *ppPrivate, pMemDataReleaseCallback); + + return rmStatus; +} + +static NV_STATUS +_createMemdescFromDmaBuf +( + OBJGPU *pGpu, + NvU32 flags, + nv_dma_buf_t *pImportPriv, + struct sg_table *pImportSgt, + NvU32 size, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus = + _createMemdescFromDmaBufSgtHelper(pGpu, flags, pImportPriv, pImportSgt, + size, ppMemDesc, ppPrivate, + osDestroyOsDescriptorFromDmaBuf); + if (rmStatus != NV_OK) + { + nv_dma_release_dma_buf(pImportPriv); + } + + return rmStatus; +} + +static NV_STATUS +_createMemdescFromSgt +( + OBJGPU *pGpu, + NvU32 flags, + struct drm_gem_object *pImportPrivGem, + struct sg_table *pImportSgt, + NvU32 size, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus = + _createMemdescFromDmaBufSgtHelper(pGpu, flags, pImportPrivGem, + pImportSgt, size, ppMemDesc, + ppPrivate, + osDestroyOsDescriptorFromSgt); + if (rmStatus != NV_OK) + { + nv_dma_release_sgt(pImportSgt, pImportPrivGem); + } + + return rmStatus; +} + +static nv_dma_device_t *GetDmaDeviceForImport +( + nv_state_t *nv, + NvU32 flags +) +{ + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_NISO_DISPLAY, _YES, flags) && + (nv->niso_dma_dev != NULL)) + { + return nv->niso_dma_dev; + } + else + { + return nv->dma_dev; + } +} + +static NV_STATUS +osCreateOsDescriptorFromFileHandle +( + OBJGPU *pGpu, + NvP64 pDescriptor, + NvHandle hClient, + NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus = NV_OK; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_dma_device_t *dma_dev = NULL; + NvU32 size = 0; + nv_dma_buf_t *pImportPriv = NULL; + struct sg_table *pImportSgt = NULL; + NvBool bRoDeviceMap = NV_FALSE; + NvS32 fd; + + fd = (NvS32)((NvU64)pDescriptor); + if ((NvU64)fd != (NvU64)pDescriptor) + { + NV_PRINTF(LEVEL_ERROR, + "%s(): fd must fit within a signed 32-bit integer!\n", + __FUNCTION__); + return NV_ERR_INVALID_ARGUMENT; + } + + dma_dev = GetDmaDeviceForImport(nv, flags); + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_DEVICE_READ_ONLY, _YES, flags)) + { + bRoDeviceMap = NV_TRUE; + NV_PRINTF(LEVEL_INFO, + "%s(): RO DMA Mapping - flags [%x]!\n", + __FUNCTION__, flags); + } + + rmStatus = nv_dma_import_from_fd(dma_dev, fd, bRoDeviceMap, &size, + &pImportSgt, &pImportPriv); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s(): Error (%d) while trying to import fd!\n", + __FUNCTION__, rmStatus); + return rmStatus; + } + + return _createMemdescFromDmaBuf(pGpu, flags, pImportPriv, + pImportSgt, + size, ppMemDesc, ppPrivate); +} + +static NV_STATUS +osCreateOsDescriptorFromSgtPtr +( + OBJGPU *pGpu, + NvP64 pDescriptor, + NvHandle hClient, + NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus = NV_OK; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR_PARAMETERS *params = + (NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR_PARAMETERS*)((NvUPtr) pDescriptor); + + struct sg_table *sgt = params->sgt; + struct drm_gem_object *gem = params->gem; + + rmStatus = nv_dma_import_sgt(nv->dma_dev, sgt, gem); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s(): Error (%d) while trying to import sgt!\n", + __FUNCTION__, rmStatus); + return rmStatus; + } + + return _createMemdescFromSgt(pGpu, flags, gem, sgt, + (*pLimit + 1), ppMemDesc, ppPrivate); +} + +static NV_STATUS +osCreateOsDescriptorFromDmaBufPtr +( + OBJGPU *pGpu, + NvP64 pDescriptor, + NvHandle hClient, + NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus = NV_OK; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_dma_device_t *dma_dev = NULL; + NvU32 size = 0; + nv_dma_buf_t *pImportPriv = NULL; + struct sg_table *pImportSgt = NULL; + void *dmaBuf = (void*)((NvUPtr)pDescriptor); + NvBool bRoDeviceMap = NV_FALSE; + + dma_dev = GetDmaDeviceForImport(nv, flags); + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_DEVICE_READ_ONLY, _YES, flags)) + { + bRoDeviceMap = NV_TRUE; + NV_PRINTF(LEVEL_INFO, + "%s(): RO DMA Mapping - flags [%x]!\n", + __FUNCTION__, flags); + } + + rmStatus = nv_dma_import_dma_buf(dma_dev, dmaBuf, bRoDeviceMap, &size, + &pImportSgt, &pImportPriv); + if (rmStatus != NV_OK) + { + NV_PRINTF_COND(rmStatus == NV_ERR_NOT_SUPPORTED, LEVEL_INFO, LEVEL_ERROR, + "Error (%d) while trying to import dma_buf!\n", rmStatus); + return rmStatus; + } + + return _createMemdescFromDmaBuf(pGpu, flags, pImportPriv, + pImportSgt, + size, ppMemDesc, ppPrivate); +} + +static void +osDestroyOsDescriptorFromPhysAddr +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + void *pPrivate; + + pPrivate = memdescGetMemData(pMemDesc); + NV_ASSERT(pPrivate != NULL); + + if (!memdescGetFlag(pMemDesc, MEMDESC_FLAGS_SKIP_IOMMU_MAPPING)) + memdescUnmapIommu(pMemDesc, pGpu->busInfo.iovaspaceId); + + nv_unregister_phys_pages(NV_GET_NV_STATE(pGpu), pPrivate); +} + +static void +osDestroyOsDescriptorFromIoMemory +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + void *pPrivate = memdescGetMemData(pMemDesc); + + if (pPrivate == NULL) + { + return; + } + + nv_unregister_peer_io_mem(NV_GET_NV_STATE(pGpu), pPrivate); +} + +static void +osDestroyOsDescriptorPageArray +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + NvU64 osPageCount = NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount); + NV_STATUS status; + void *pPrivate; + + pPrivate = memdescGetMemData(pMemDesc); + + NV_ASSERT(pPrivate != NULL); + + // + // TODO: Bug 1811006: Notably skip any IOMMU mapping management as the + // pMemDesc->pGpu might have been torn down already and the pGpu passed in + // doesn't necessarily have IOMMU mappings. For now just allow memdescDestroy() + // to clean up whatever is there (this may not work correctly either if any + // of the IOMMU mappings have outlasted their VASPACEs). This should + // be cleaned up once the fix for bug 1811006 is known. + // + + if ((NV_RM_PAGE_SIZE < os_page_size) && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + nv_unregister_user_pages(NV_GET_NV_STATE(pGpu), osPageCount, + NULL /* import_priv */, &pPrivate); + + if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_FOREIGN_PAGE)) + { + os_free_mem(pPrivate); + } + else + { + // + // We use MEMDESC_FLAGS_USER_READ_ONLY because this reflects the + // NVOS02_FLAGS_ALLOC_USER_READ_ONLY flag value passed into + // os_lock_user_pages(). That flag also results in + // MEMDESC_FLAGS_DEVICE_READ_ONLY being set. + // + NvBool writable = !memdescGetFlag(pMemDesc, MEMDESC_FLAGS_USER_READ_ONLY); + NvU32 flags = DRF_NUM(_LOCK_USER_PAGES, _FLAGS, _WRITE, writable); + status = os_unlock_user_pages(osPageCount, pPrivate, flags); + NV_ASSERT(status == NV_OK); + } +} + +static void +osDestroyOsDescriptorFromDmaBuf +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + void *pPrivate = memdescGetMemData(pMemDesc); + + struct sg_table *pImportSgt; + void *pImportPriv; + + /* + * Unmap IOMMU now or we will get a kernel crash when it is unmapped after + * pImportSgt is freed. + */ + memdescUnmapIommu(pMemDesc, pGpu->busInfo.iovaspaceId); + + if ((NV_RM_PAGE_SIZE < os_page_size) && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + nv_unregister_sgt(NV_GET_NV_STATE(pGpu), &pImportSgt, + &pImportPriv, pPrivate); + + /* + * pImportSgt doesn't need to be passed to nv_dma_release_dma_buf() because + * the DMA-BUF associated with pImportPriv already has a reference to the + * SGT. + */ + + nv_dma_release_dma_buf(pImportPriv); +} + +static void +osDestroyOsDescriptorFromSgt +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + void *pPrivate = memdescGetMemData(pMemDesc); + + struct sg_table *pImportSgt; + struct drm_gem_object *pImportPrivGem; + + NV_ASSERT(pPrivate != NULL); + + /* + * Unmap IOMMU now or we will get a kernel crash when it is unmapped after + * pImportSgt is freed. + */ + memdescUnmapIommu(pMemDesc, pGpu->busInfo.iovaspaceId); + + if ((NV_RM_PAGE_SIZE < os_page_size) && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + nv_unregister_sgt(NV_GET_NV_STATE(pGpu), &pImportSgt, + (void **) &pImportPrivGem, pPrivate); + + nv_dma_release_sgt(pImportSgt, pImportPrivGem); +} diff --git a/src/nvidia/arch/nvalloc/unix/src/osunix.c b/src/nvidia/arch/nvalloc/unix/src/osunix.c new file mode 100644 index 0000000..46d3388 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/osunix.c @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/***************************** HW State Routines ***************************\ +* * +* Fills in os specific function pointers for the Unix OS object. * +* * +\***************************************************************************/ + +#include + +void +osInitObjOS(OBJOS *pOS) +{ + pOS->setProperty(pOS, PDB_PROP_OS_ONDEMAND_VBLANK_CONTROL_ENABLE_DEFAULT, NV_TRUE); + pOS->setProperty(pOS, PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE, NV_TRUE); + pOS->setProperty(pOS, PDB_PROP_OS_LIMIT_GPU_RESET, NV_TRUE); +} diff --git a/src/nvidia/arch/nvalloc/unix/src/power-management-tegra.c b/src/nvidia/arch/nvalloc/unix/src/power-management-tegra.c new file mode 100644 index 0000000..489ec90 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/power-management-tegra.c @@ -0,0 +1,161 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include +#include +#include + +NV_STATUS +RmPowerManagementTegra( + OBJGPU *pGpu, + nv_pm_action_t pmAction +) +{ + // + // Default to NV_OK. there may cases where resman is loaded, but + // no devices are allocated (we're still at the console). in these + // cases, it's fine to let the system do whatever it wants. + // + NV_STATUS rmStatus = NV_OK; + + if (pGpu) + { + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + + switch (pmAction) + { + case NV_PM_ACTION_HIBERNATE: + nvp->pm_state.InHibernate = NV_TRUE; + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_PM_CODEPATH, NV_TRUE); + rmStatus = gpuStateUnload(pGpu, GPU_STATE_FLAGS_PM_HIBERNATE | + GPU_STATE_FLAGS_PRESERVING | + GPU_STATE_FLAGS_PM_TRANSITION); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_HIBERNATE, NV_TRUE); + break; + + case NV_PM_ACTION_STANDBY: + nvp->pm_state.InHibernate = NV_FALSE; + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_PM_CODEPATH, NV_TRUE); + rmStatus = gpuStateUnload(pGpu, GPU_STATE_FLAGS_PM_SUSPEND | + GPU_STATE_FLAGS_PRESERVING | + GPU_STATE_FLAGS_PM_TRANSITION);; + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_STANDBY, NV_TRUE); + pGpu->bInD3Cold = NV_TRUE; + break; + + case NV_PM_ACTION_RESUME: + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH, NV_TRUE); + + if (nvp->pm_state.InHibernate) + { + rmStatus = gpuStateLoad(pGpu, GPU_STATE_FLAGS_PM_HIBERNATE | + GPU_STATE_FLAGS_PRESERVING | + GPU_STATE_FLAGS_PM_TRANSITION);; + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_HIBERNATE, NV_FALSE); + } + else + { + rmStatus = gpuStateLoad(pGpu, GPU_STATE_FLAGS_PM_SUSPEND | + GPU_STATE_FLAGS_PRESERVING | + GPU_STATE_FLAGS_PM_TRANSITION);; + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_STANDBY, NV_FALSE); + pGpu->bInD3Cold = NV_FALSE; + } + + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_PM_CODEPATH, NV_FALSE); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH, NV_FALSE); + break; + + default: + rmStatus = NV_ERR_INVALID_ARGUMENT; + break; + } + } + + return rmStatus; +} + +// FIXME: needed for openrm-orin: refer to bug 5044365 +NV_STATUS NV_API_CALL rm_power_management( + nvidia_stack_t *sp, + nv_state_t *pNv, + nv_pm_action_t pmAction +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus = NV_OK; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + NV_ASSERT_OK(os_flush_work_queue(pNv->queue, pmAction != NV_PM_ACTION_RESUME)); + + // LOCK: acquire API lock + if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DYN_POWER)) == NV_OK) + { + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv); + + if (pGpu != NULL) + { + // LOCK: acquire GPUs lock + if ((rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DYN_POWER)) == NV_OK) + { + rmStatus = RmPowerManagementTegra(pGpu, pmAction); + + // + // RmPowerManagementTegra() is most likely to fail due to + // gpuStateUnload() failures deep in the RM's GPU power + // management paths. However, those paths make no + // attempt to unwind in case of errors. Rather, they + // soldier on and simply report an error at the very end. + // GPU software state meanwhile will indicate the GPU + // has been suspended. + // + // Sadly, in case of an error during suspend/hibernate, + // the only path forward here is to attempt to resume the + // GPU, accepting that the odds of success will vary. + // + if (rmStatus != NV_OK && pmAction != NV_PM_ACTION_RESUME) + { + RmPowerManagementTegra(pGpu, NV_PM_ACTION_RESUME); + } + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + } + // UNLOCK: release API lock + rmapiLockRelease(); + } + + NV_ASSERT_OK(os_flush_work_queue(pNv->queue, pmAction != NV_PM_ACTION_RESUME)); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} diff --git a/src/nvidia/arch/nvalloc/unix/src/registry.c b/src/nvidia/arch/nvalloc/unix/src/registry.c new file mode 100644 index 0000000..c822bd9 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/registry.c @@ -0,0 +1,524 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include + +#if defined(DEBUG_REGISTRY) +#define DBG_REG_PRINTF(a, ...) \ + NV_PRINTF(LEVEL_INFO, a, ##__VA_ARGS__) +#else +#define DBG_REG_PRINTF(a, ...) +#endif + +static NvS32 stringCaseCompare( + const char *string1, + const char *string2 +) +{ + NvU8 c1, c2; + + do + { + c1 = *string1, c2 = *string2; + if (c1 >= 'A' && c1 <= 'Z') + c1 += ('a' - 'A'); + if (c2 >= 'A' && c2 <= 'Z') + c2 += ('a' - 'A'); + string1++, string2++; + } + while ((c1 == c2) && (c1 != '\0')); + + return (c1 - c2); +} + +static nv_reg_entry_t *the_registry = NULL; + +static nv_reg_entry_t* regCreateNewRegistryKey( + nv_state_t *nv, + const char *regParmStr +) +{ + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + nv_reg_entry_t *new_reg = NULL; + char *new_ParmStr = NULL; + NvU32 parm_size; + + if (regParmStr == NULL) + { + DBG_BREAKPOINT(); + return NULL; + } + + new_reg = portMemAllocNonPaged(sizeof(nv_reg_entry_t)); + if (NULL == new_reg) + { + NV_PRINTF(LEVEL_ERROR, "failed to grow registry\n"); + return NULL; + } + + portMemSet(new_reg, 0, sizeof(nv_reg_entry_t)); + + if (regParmStr != NULL) + { + parm_size = (portStringLength(regParmStr) + 1); + new_ParmStr = portMemAllocNonPaged(parm_size); + if (NULL == new_ParmStr) + { + NV_PRINTF(LEVEL_ERROR, "failed to allocate registry param string\n"); + portMemFree(new_reg); + return NULL; + } + + NV_ASSERT(parm_size <= NVOS38_MAX_REGISTRY_STRING_LENGTH); + + if (portMemCopy(new_ParmStr, parm_size, regParmStr, parm_size) == NULL) + { + NV_PRINTF(LEVEL_ERROR, "failed to copy registry param string\n"); + portMemFree(new_ParmStr); + portMemFree(new_reg); + return NULL; + } + } + + new_reg->regParmStr = new_ParmStr; + new_reg->type = NV_REGISTRY_ENTRY_TYPE_UNKNOWN; + + if (nvp != NULL) + { + new_reg->next = nvp->pRegistry; + nvp->pRegistry = new_reg; + DBG_REG_PRINTF("local registry now at 0x%p\n", nvp->pRegistry); + } + else + { + new_reg->next = the_registry; + the_registry = new_reg; + DBG_REG_PRINTF("global registry now at 0x%p\n", the_registry); + } + + return new_reg; +} + +static NV_STATUS regFreeEntry(nv_reg_entry_t *tmp) +{ + portMemFree(tmp->regParmStr); + tmp->regParmStr = NULL; + { + portMemFree(tmp->pdata); + tmp->pdata = NULL; + tmp->len = 0; + } + portMemFree(tmp); + + return NV_OK; +} + +static nv_reg_entry_t* regFindRegistryEntry( + nv_state_t *nv, + const char *regParmStr, + NvU32 type, + NvBool *bGlobalEntry +) +{ + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + nv_reg_entry_t *tmp; + + DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr); + + if (nvp != NULL) + { + tmp = nvp->pRegistry; + DBG_REG_PRINTF(" local registry at 0x%p\n", tmp); + + while ((tmp != NULL) && (tmp->regParmStr != NULL)) + { + DBG_REG_PRINTF(" Testing against %s\n", + tmp->regParmStr); + if ((stringCaseCompare(tmp->regParmStr, regParmStr) == 0) && + (type == tmp->type)) + { + DBG_REG_PRINTF(" found a match!\n"); + if (bGlobalEntry) + *bGlobalEntry = NV_FALSE; + return tmp; + } + tmp = tmp->next; + } + } + + tmp = the_registry; + DBG_REG_PRINTF(" global registry at 0x%p\n", tmp); + + while ((tmp != NULL) && (tmp->regParmStr != NULL)) + { + DBG_REG_PRINTF(" Testing against %s\n", + tmp->regParmStr); + if ((stringCaseCompare(tmp->regParmStr, regParmStr) == 0) && + (type == tmp->type)) + { + DBG_REG_PRINTF(" found a match!\n"); + if (bGlobalEntry) + *bGlobalEntry = NV_TRUE; + return tmp; + } + tmp = tmp->next; + } + + DBG_REG_PRINTF(" no match\n"); + return NULL; +} + +NV_STATUS RmWriteRegistryDword( + nv_state_t *nv, + const char *regParmStr, + NvU32 Data +) +{ + nv_reg_entry_t *tmp; + NvBool bGlobalEntry; + + if (regParmStr == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + DBG_REG_PRINTF("%s: %s -> 0x%x\n", __FUNCTION__, regParmStr, Data); + + tmp = regFindRegistryEntry(nv, regParmStr, + NV_REGISTRY_ENTRY_TYPE_DWORD, &bGlobalEntry); + + // If we found an entry and we were looking for a global entry and + // found a global, or we were looking for a per-GPU entry and found a + // per-GPU entry + if (tmp != NULL && + ((nv == NULL && bGlobalEntry) || + (nv != NULL && !bGlobalEntry))) + { + tmp->data = Data; + + if (stringCaseCompare(regParmStr, "ResmanDebugLevel") == 0) + { + os_dbg_set_level(Data); + } + + return NV_OK; + } + + tmp = regCreateNewRegistryKey(nv, regParmStr); + if (tmp == NULL) + return NV_ERR_GENERIC; + + tmp->type = NV_REGISTRY_ENTRY_TYPE_DWORD; + tmp->data = Data; + + return NV_OK; +} + +NV_STATUS RmReadRegistryDword( + nv_state_t *nv, + const char *regParmStr, + NvU32 *Data +) +{ + nv_reg_entry_t *tmp; + + if ((regParmStr == NULL) || (Data == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr); + + tmp = regFindRegistryEntry(nv, regParmStr, + NV_REGISTRY_ENTRY_TYPE_DWORD, NULL); + if (tmp == NULL) + { + tmp = regFindRegistryEntry(nv, regParmStr, + NV_REGISTRY_ENTRY_TYPE_BINARY, NULL); + if ((tmp != NULL) && (tmp->len >= sizeof(NvU32))) + { + *Data = *(NvU32 *)tmp->pdata; + } + else + { + DBG_REG_PRINTF(" not found\n"); + return NV_ERR_GENERIC; + } + } + else + { + *Data = tmp->data; + } + + DBG_REG_PRINTF(" found in the_registry: 0x%x\n", *Data); + + return NV_OK; +} + +NV_STATUS RmReadRegistryBinary( + nv_state_t *nv, + const char *regParmStr, + NvU8 *Data, + NvU32 *cbLen +) +{ + nv_reg_entry_t *tmp; + NV_STATUS status; + + if ((regParmStr == NULL) || (Data == NULL) || (cbLen == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr); + + tmp = regFindRegistryEntry(nv, regParmStr, + NV_REGISTRY_ENTRY_TYPE_BINARY, NULL); + if (tmp == NULL) + { + DBG_REG_PRINTF(" not found\n"); + return NV_ERR_GENERIC; + } + + DBG_REG_PRINTF(" found\n"); + + if (*cbLen >= tmp->len) + { + portMemCopy((NvU8 *)Data, *cbLen, (NvU8 *)tmp->pdata, tmp->len); + *cbLen = tmp->len; + status = NV_OK; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "buffer (length: %u) is too small (data length: %u)\n", + *cbLen, tmp->len); + status = NV_ERR_GENERIC; + } + + return status; +} + +NV_STATUS RmWriteRegistryBinary( + nv_state_t *nv, + const char *regParmStr, + NvU8 *Data, + NvU32 cbLen +) +{ + nv_reg_entry_t *tmp; + NvBool bGlobalEntry; + + if ((regParmStr == NULL) || (Data == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr); + + tmp = regFindRegistryEntry(nv, regParmStr, + NV_REGISTRY_ENTRY_TYPE_BINARY, &bGlobalEntry); + + // If we found an entry and we were looking for a global entry and + // found a global, or we were looking for a per-GPU entry and found a + // per-GPU entry + if (tmp != NULL && + ((nv == NULL && bGlobalEntry) || + (nv != NULL && !bGlobalEntry))) + { + if (tmp->pdata != NULL) + { + portMemFree(tmp->pdata); + tmp->pdata = NULL; + tmp->len = 0; + } + } + else + { + tmp = regCreateNewRegistryKey(nv, regParmStr); + if (tmp == NULL) + { + NV_PRINTF(LEVEL_ERROR, "failed to create binary registry entry\n"); + return NV_ERR_GENERIC; + } + } + + tmp->pdata = portMemAllocNonPaged(cbLen); + if (NULL == tmp->pdata) + { + NV_PRINTF(LEVEL_ERROR, "failed to write binary registry entry\n"); + return NV_ERR_GENERIC; + } + + tmp->type = NV_REGISTRY_ENTRY_TYPE_BINARY; + tmp->len = cbLen; + portMemCopy((NvU8 *)tmp->pdata, tmp->len, (NvU8 *)Data, cbLen); + + return NV_OK; +} + +NV_STATUS RmWriteRegistryString( + nv_state_t *nv, + const char *regParmStr, + const char *buffer, + NvU32 bufferLength +) +{ + nv_reg_entry_t *tmp; + NvBool bGlobalEntry; + + if ((regParmStr == NULL) || (buffer == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr); + + tmp = regFindRegistryEntry(nv, regParmStr, + NV_REGISTRY_ENTRY_TYPE_STRING, &bGlobalEntry); + + // If we found an entry and we were looking for a global entry and + // found a global, or we were looking for a per-GPU entry and found a + // per-GPU entry + if (tmp != NULL && + ((nv == NULL && bGlobalEntry) || + (nv != NULL && !bGlobalEntry))) + { + if (tmp->pdata != NULL) + { + portMemFree(tmp->pdata); + tmp->len = 0; + tmp->pdata = NULL; + } + } + else + { + tmp = regCreateNewRegistryKey(nv, regParmStr); + if (tmp == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "failed to allocate a string registry entry!\n"); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + } + + tmp->pdata = portMemAllocNonPaged(bufferLength); + if (tmp->pdata == NULL) + { + NV_PRINTF(LEVEL_ERROR, "failed to write a string registry entry!\n"); + return NV_ERR_NO_MEMORY; + } + + tmp->type = NV_REGISTRY_ENTRY_TYPE_STRING; + tmp->len = bufferLength; + portMemCopy((void *)tmp->pdata, tmp->len, buffer, (bufferLength - 1)); + tmp->pdata[bufferLength-1] = '\0'; + + return NV_OK; +} + +NV_STATUS RmReadRegistryString( + nv_state_t *nv, + const char *regParmStr, + NvU8 *buffer, + NvU32 *pBufferLength +) +{ + NvU32 bufferLength; + nv_reg_entry_t *tmp; + + if ((regParmStr == NULL) || (buffer == NULL) || (pBufferLength == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr); + + bufferLength = *pBufferLength; + *pBufferLength = 0; + *buffer = '\0'; + + tmp = regFindRegistryEntry(nv, regParmStr, + NV_REGISTRY_ENTRY_TYPE_STRING, NULL); + if (tmp == NULL) + { + return NV_ERR_GENERIC; + } + + if (bufferLength >= tmp->len) + { + portMemCopy((void *)buffer, bufferLength, (void *)tmp->pdata, tmp->len); + *pBufferLength = tmp->len; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "buffer (length: %u) is too small (data length: %u)\n", + bufferLength, tmp->len); + return NV_ERR_BUFFER_TOO_SMALL; + } + + return NV_OK; +} + +NV_STATUS RmInitRegistry(void) +{ + NV_STATUS rmStatus; + + rmStatus = os_registry_init(); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to initialize the OS registry!\n"); + } + + return rmStatus; +} + +NV_STATUS RmDestroyRegistry(nv_state_t *nv) +{ + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + nv_reg_entry_t *tmp; + + if (nvp != NULL) + { + tmp = nvp->pRegistry; + nvp->pRegistry = NULL; + } + else + { + tmp = the_registry; + the_registry = NULL; + } + + while (tmp != NULL) + { + nv_reg_entry_t *entry = tmp; + tmp = tmp->next; + regFreeEntry(entry); + } + + return NV_OK; +} + diff --git a/src/nvidia/arch/nvalloc/unix/src/rmobjexportimport.c b/src/nvidia/arch/nvalloc/unix/src/rmobjexportimport.c new file mode 100644 index 0000000..ed965f1 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/rmobjexportimport.c @@ -0,0 +1,604 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * + * @brief Provides RmExportObject, RmImportObject, RmFreeObjExportHandle and + * RmGetExportObjectInfo interfaces : + * + * These interfaces allow rm clients to export their objects into + * a unique RmObjExportHandle which another rm client could + * import, even if the source rm client gets destroyed. + * + * RM's device instance may get destroyed asynchronously, in which + * case exported objects residing on that device instance also get + * destroyed. This means it is not possible to import it back, but the + * RmObjExportHandle into which the object had been exported still + * remains valid but no other object could get it. + * + * There are not init/fini routines, it is the responsibility of the + * rest of RM's eco-system to make sure that all RmObjExportHandles get + * freed during driver unload. + * + * The api lock is expected to be held before calling into + * rmobjexportimport.c; do not hold gpu or any other lock. + */ + +#include "rmobjexportimport.h" +#include "nvlimits.h" +#include "gpu/device/device.h" + +#include "containers/map.h" +#include "rmapi/rmapi.h" +#include "rmapi/rs_utils.h" + +#include "class/cl0080.h" +#include "class/cl2080.h" +#include +#include + +// +// A reference to an RmObjExportHandle +// generated by function RmGenerateObjExportHandle(). +// +MAKE_MAP(RmObjExportHandleMap, NvU8); + +// +// Memory allocator +// +PORT_MEM_ALLOCATOR *pMemAllocator; + +// +// Map RmObjExportHandle -> RmObjExportHandleRef +// +RmObjExportHandleMap objExportHandleMap; + +// +// Rm client to use to dup an object exported to RmObjExportHandle. The minimal +// requirement for duping is to have a device object allocated. This rm client +// is simply like any other external rm client and has no any special handling. +// +// We keep this rm client just like any other external rm client: if +// gpu(s)/device gets powered-down/uninitialized, rm objects allocated by +// external rm clients and located on that gpu(s)/device gets freed (the +// os-layer does that). In that way, code in this file doesn't need to worry +// about freeing exported objects located on that gpu(s)/device. +// +NvHandle hObjExportRmClient; + +// +// Tracker for device and subdevice handles. For now only one subdevice +// (instance 0) is supported per device. +// +typedef struct +{ + NvHandle hRmDevice; + NvHandle hRmSubDevice; + NvHandle hGpuInstSub; +} RmObjExportDevice; +MAKE_MAP(RmObjExportDeviceMap, RmObjExportDevice); + +RmObjExportDeviceMap objExportDeviceMap; + +// +// Usage reference counter for static object in this file like rm client used to +// dup an exported object, memory allocator, map etc. +// +NvU64 objExportImportRefCount; + +// +// Static functions for internal use to code in this file. +// +static NV_STATUS RmRefObjExportImport (void); +static void RmUnrefObjExportImport (void); + +static RmObjExportHandle RmGenerateObjExportHandle (void); +static NV_STATUS RmUnrefObjExportHandle (RmObjExportHandle hObject); + +// +// Free the RmObjExportHandle. +// +static NV_STATUS RmUnrefObjExportHandle(RmObjExportHandle hObject) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + + void *pHandleRef = mapFind(&objExportHandleMap, hObject); + if (pHandleRef == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + if (pRmApi->Free(pRmApi, + hObjExportRmClient, + (NvHandle)mapKey(&objExportHandleMap, pHandleRef)) != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Exported object trying to free was zombie in %s\n", + __FUNCTION__); + } + + mapRemove(&objExportHandleMap, pHandleRef); + + return NV_OK; +} + +// +// Generate unique RmObjExportHandle. +// +static RmObjExportHandle RmGenerateObjExportHandle(void) +{ + // + // The object export handle belongs to range of 0 to + // (MAX_OBJ_EXPORT_HANDLES - 1). + // + // Handle 0 is considered as invalid object handle, this function generates + // handle from range of 1 to (MAX_OBJ_EXPORT_HANDLES - 1). + // + #define MAX_OBJ_EXPORT_HANDLES 0x80000 + + static NvHandle hObjExportHandleNext = 1; + + RmObjExportHandle hStartHandle = hObjExportHandleNext; + RmObjExportHandle hObject = 0; + + do + { + void *pHandleRef; + + hObject = hObjExportHandleNext++; + /* Reset hObjExportHandleNext to next valid handle */ + if (hObjExportHandleNext == MAX_OBJ_EXPORT_HANDLES) { + hObjExportHandleNext = 1; + } + + pHandleRef = mapFind(&objExportHandleMap, hObject); + + if (hObject != hObjExportRmClient && pHandleRef == NULL) + { + break; + } + else + { + hObject = 0; + } + + } while(hObjExportHandleNext != hStartHandle); + + if (hObject != 0) + { + void *pHandleRef = mapInsertNew(&objExportHandleMap, hObject); + + if (pHandleRef == NULL) + { + hObject = 0; + } + } + + return hObject; +} + +// +// Validate that the given hObject is not one of our internally used handles. +// +// Note that mapFind(&objExportHandleMap, hObject) could still fail; that is the +// caller's responsibility. +// +static NvBool RmValidateHandleAgainstInternalHandles(RmObjExportHandle hObject) +{ + RmObjExportDeviceMapIter iter; + + // + // No external RmObjExportHandle could be valid if hObjExportRmClient has + // not been allocated yet, or if it is equal to any of the handles used + // internally by code in this file. + // + if (objExportImportRefCount == 0 || hObjExportRmClient == 0 || + hObject == hObjExportRmClient) + { + return NV_FALSE; + } + + iter = mapIterAll(&objExportDeviceMap); + while (mapIterNext(&iter)) + { + RmObjExportDevice *pRmObjExportDevice = iter.pValue; + + if (pRmObjExportDevice->hRmDevice != 0 && + (hObject == pRmObjExportDevice->hRmDevice || + hObject == pRmObjExportDevice->hRmSubDevice || + hObject == pRmObjExportDevice->hGpuInstSub)) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + +// +// Increment reference count of static objects internally +// used by code in this file. +// +static NV_STATUS RmRefObjExportImport(void) +{ + NV_STATUS rmStatus = NV_OK; + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + + if ((objExportImportRefCount++) != 0) + { + NV_ASSERT(hObjExportRmClient != 0); + NV_ASSERT(pMemAllocator != NULL); + return NV_OK; + } + + rmStatus = pRmApi->AllocWithHandle(pRmApi, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_ROOT, + &hObjExportRmClient, + sizeof(hObjExportRmClient)); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Unable to alloc root in %s\n", __FUNCTION__); + goto failed; + } + + pMemAllocator = portMemAllocatorCreateNonPaged(); + + if (pMemAllocator == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to alloc memory allocator in %s\n", + __FUNCTION__); + goto failed; + } + + mapInit(&objExportHandleMap, pMemAllocator); + mapInit(&objExportDeviceMap, pMemAllocator); + + return NV_OK; + +failed: + + RmUnrefObjExportImport(); + + return rmStatus; +} + +// +// Decrement reference count of static objects internally used by code in this +// file, and free them if reference count reaches to zero. +// +static void RmUnrefObjExportImport(void) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + + if ((--objExportImportRefCount) != 0) + { + return; + } + + if (pMemAllocator != NULL) + { + mapDestroy(&objExportDeviceMap); + mapDestroy(&objExportHandleMap); + + portMemAllocatorRelease(pMemAllocator); + pMemAllocator = NULL; + } + + if (hObjExportRmClient != 0) + { + NV_STATUS rmStatus = pRmApi->Free(pRmApi, + hObjExportRmClient, + hObjExportRmClient); + + NV_ASSERT(rmStatus == NV_OK); + hObjExportRmClient = 0; + } +} + +NV_STATUS RmExportObject(NvHandle hSrcClient, NvHandle hSrcObject, + RmObjExportHandle *pDstObject, NvU32 *pDeviceInstance) +{ + RmObjExportHandle hDstObject; + NvU32 deviceInstance = NV_MAX_DEVICES; + NvBool bClientAsDstParent = NV_FALSE; + NV_STATUS status; + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + NvU64 deviceMapIdx = 0; + RmObjExportDevice *pObjExportDevice = NULL; + RsResourceRef *pSrcResourceRef; + RsResourceRef *pDeviceRef; + + if (pDstObject == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // Find the device instance on which the rm object exists. + // + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + serverutilGetResourceRef(hSrcClient, hSrcObject, &pSrcResourceRef)); + + status = refFindAncestorOfType(pSrcResourceRef, classId(Device), &pDeviceRef); + if (status == NV_OK) + { + Device *pDevice = dynamicCast(pDeviceRef->pResource, Device); + deviceMapIdx = pDevice->deviceInst; + deviceInstance = pDevice->deviceInst; + pObjExportDevice = mapFind(&objExportDeviceMap, deviceMapIdx); + } + else + { + bClientAsDstParent = NV_TRUE; + } + + status = RmRefObjExportImport(); + + if (status != NV_OK) + { + return status; + } + + if (!bClientAsDstParent && + ((pObjExportDevice == NULL) || + serverutilValidateNewResourceHandle(hObjExportRmClient, + pObjExportDevice->hRmDevice))) + { + // + // Device object has not been created or it got destroyed in the + // teardown path of device instance destruction; allocate a fresh device + // object. + // + NV0080_ALLOC_PARAMETERS params; + NV2080_ALLOC_PARAMETERS subdevParams; + + if (pObjExportDevice == NULL) + { + pObjExportDevice = mapInsertNew(&objExportDeviceMap, deviceMapIdx); + + pObjExportDevice->hRmDevice = RmGenerateObjExportHandle(); + pObjExportDevice->hRmSubDevice = RmGenerateObjExportHandle(); + pObjExportDevice->hGpuInstSub = NV01_NULL_OBJECT; + + if (pObjExportDevice->hRmDevice == 0 || + pObjExportDevice->hRmSubDevice == 0) + { + NV_PRINTF(LEVEL_ERROR, "Failed to allocate object handles in %s\n", + __FUNCTION__); + + mapRemove(&objExportDeviceMap, pObjExportDevice); + + status = NV_ERR_NO_MEMORY; + goto done; + } + } + + portMemSet(¶ms, 0, sizeof(NV0080_ALLOC_PARAMETERS)); + + params.deviceId = deviceInstance; + params.hClientShare = hObjExportRmClient; + + status = pRmApi->AllocWithHandle(pRmApi, + hObjExportRmClient, + hObjExportRmClient, + pObjExportDevice->hRmDevice, + NV01_DEVICE_0, + ¶ms, + sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Unable to alloc device in %s\n", + __FUNCTION__); + goto done; + } + + portMemSet(&subdevParams, 0, sizeof(NV2080_ALLOC_PARAMETERS)); + + subdevParams.subDeviceId = 0; + + status = pRmApi->AllocWithHandle(pRmApi, + hObjExportRmClient, + pObjExportDevice->hRmDevice, + pObjExportDevice->hRmSubDevice, + NV20_SUBDEVICE_0, + &subdevParams, + sizeof(subdevParams)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Unable to alloc subdevice in %s\n", + __FUNCTION__); + + (void) pRmApi->Free(pRmApi, hObjExportRmClient, + pObjExportDevice->hRmDevice); + goto done; + } + + } + + hDstObject = RmGenerateObjExportHandle(); + + if (hDstObject == 0) + { + NV_PRINTF(LEVEL_ERROR, "Failed to allocate object handle in %s\n", + __FUNCTION__); + status = NV_ERR_NO_MEMORY; + goto done; + } + + // If duping under device handle fails, try subdevice handle. + status = pRmApi->DupObject(pRmApi, + hObjExportRmClient, + bClientAsDstParent ? hObjExportRmClient : + pObjExportDevice->hRmDevice, + &hDstObject, + hSrcClient, + hSrcObject, + 0 /* flags */); + if (status != NV_OK) + { + if (!bClientAsDstParent && (status == NV_ERR_INVALID_OBJECT_PARENT)) + { + NV_PRINTF(LEVEL_INFO, + "pRmApi->DupObject(Dev, failed due to invalid parent in %s." + " Now attempting DupObject with Subdev handle.\n", + __FUNCTION__); + + status = pRmApi->DupObject(pRmApi, + hObjExportRmClient, + pObjExportDevice->hRmSubDevice, + &hDstObject, + hSrcClient, + hSrcObject, + 0 /* flags */); + if (status != NV_OK) + { + RmUnrefObjExportHandle(hDstObject); + + NV_PRINTF(LEVEL_ERROR, + "pRmApi->DupObject(Subdev, failed with error code 0x%x in %s\n", + status, __FUNCTION__); + goto done; + } + } + else + { + RmUnrefObjExportHandle(hDstObject); + + NV_PRINTF(LEVEL_ERROR, + "pRmApi->DupObject(Dev, failed with error code 0x%x in %s\n", + status, __FUNCTION__); + goto done; + } + } + + if (pDeviceInstance != NULL) + { + *pDeviceInstance = deviceInstance; + } + + *pDstObject = hDstObject; + +done: + if (status != NV_OK) + { + RmUnrefObjExportImport(); + } + + return status; +} + +void RmFreeObjExportHandle(RmObjExportHandle hObject) +{ + if (!RmValidateHandleAgainstInternalHandles(hObject)) + { + NV_PRINTF(LEVEL_ERROR, "Invalid handle to exported object in %s\n", + __FUNCTION__); + return; + } + + RmUnrefObjExportHandle(hObject); + + RmUnrefObjExportImport(); +} + +NV_STATUS RmImportObject(NvHandle hDstClient, NvHandle hDstParent, + NvHandle *phDstObject, RmObjExportHandle hSrcObject, + NvU8 *pObjectType) +{ + NV_STATUS status; + NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS params; + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + + if (!RmValidateHandleAgainstInternalHandles(hSrcObject)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (mapFind(&objExportHandleMap, hSrcObject) == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (pObjectType != NULL) + { + params.hObject = hSrcObject; + // + // rmApiGetEffectiveAddrSpace expect mapping flags to be set as DIRECT for + // GPU cacheable Sysmem for Pre-Ampere chips. We are not doing any mapping + // here, so passing this as workaround to get the expected address space. + // + params.mapFlags = FLD_SET_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, 0); + params.addrSpaceType = \ + NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_INVALID; + + status = pRmApi->Control(pRmApi, hObjExportRmClient, hObjExportRmClient, + NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE, + ¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "GET_ADDR_SPACE_TYPE failed with error code 0x%x in %s\n", + status, __FUNCTION__); + return status; + } + + switch (params.addrSpaceType) + { + case NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_SYSMEM: + *pObjectType = NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_SYSMEM; + break; + case NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM: + *pObjectType = NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_VIDMEM; + break; + case NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_FABRIC: + *pObjectType = NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_FABRIC; + break; +#if defined(NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_FABRIC_MC) && \ + defined(NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_FABRIC_MC) + case NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_FABRIC_MC: + *pObjectType = NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_FABRIC_MC; + break; +#endif + default: + NV_ASSERT_OK_OR_RETURN(NV_ERR_INVALID_ARGUMENT); + } + } + + status = pRmApi->DupObject(pRmApi, hDstClient, hDstParent, phDstObject, + hObjExportRmClient, hSrcObject, + 0 /* flags */); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "pRmApi->DupObject(pRmApi, failed with error code 0x%x in %s\n", + status, __FUNCTION__); + return status; + } + + return NV_OK; +} diff --git a/src/nvidia/exports_link_command.txt b/src/nvidia/exports_link_command.txt new file mode 100644 index 0000000..591a318 --- /dev/null +++ b/src/nvidia/exports_link_command.txt @@ -0,0 +1,108 @@ +--undefined=rm_disable_adapter +--undefined=rm_execute_work_item +--undefined=rm_free_private_state +--undefined=rm_cleanup_file_private +--undefined=rm_unbind_lock +--undefined=rm_get_device_name +--undefined=rm_get_vbios_version +--undefined=rm_get_gpu_uuid +--undefined=rm_get_gpu_uuid_raw +--undefined=rm_set_rm_firmware_requested +--undefined=rm_get_firmware_version +--undefined=rm_i2c_remove_adapters +--undefined=rm_i2c_is_smbus_capable +--undefined=rm_i2c_transfer +--undefined=rm_init_adapter +--undefined=rm_init_private_state +--undefined=rm_init_rm +--undefined=rm_vgpu_vfio_set_driver_vm +--undefined=rm_ioctl +--undefined=rm_is_supported_device +--undefined=rm_is_supported_pci_device +--undefined=rm_isr +--undefined=rm_isr_bh +--undefined=rm_isr_bh_unlocked +--undefined=rm_is_msix_allowed +--undefined=rm_wait_for_bar_firewall +--undefined=rm_perform_version_check +--undefined=rm_pmu_perfmon_get_load +--undefined=rm_power_management +--undefined=rm_stop_user_channels +--undefined=rm_restart_user_channels +--undefined=rm_read_registry_dword +--undefined=rm_run_rc_callback +--undefined=rm_run_nano_timer_callback +--undefined=rm_save_low_res_mode +--undefined=rm_shutdown_adapter +--undefined=rm_exclude_adapter +--undefined=rm_acquire_api_lock +--undefined=rm_release_api_lock +--undefined=rm_acquire_gpu_lock +--undefined=rm_release_gpu_lock +--undefined=rm_acquire_all_gpus_lock +--undefined=rm_release_all_gpus_lock +--undefined=rm_shutdown_rm +--undefined=rm_power_source_change_event +--undefined=rm_request_dnotifier_state +--undefined=rm_write_registry_binary +--undefined=rm_write_registry_dword +--undefined=rm_write_registry_string +--undefined=rm_parse_option_string +--undefined=rm_remove_spaces +--undefined=rm_string_token +--undefined=rm_get_adapter_status_external +--undefined=rm_disable_gpu_state_persistence +--undefined=pNVRM_ID +--undefined=rm_p2p_get_pages +--undefined=rm_p2p_get_pages_persistent +--undefined=rm_p2p_get_gpu_info +--undefined=rm_p2p_register_callback +--undefined=rm_p2p_put_pages +--undefined=rm_p2p_put_pages_persistent +--undefined=rm_p2p_dma_map_pages +--undefined=rm_dma_buf_dup_mem_handle +--undefined=rm_dma_buf_undup_mem_handle +--undefined=rm_dma_buf_map_mem_handle +--undefined=rm_dma_buf_unmap_mem_handle +--undefined=rm_dma_buf_get_client_and_device +--undefined=rm_dma_buf_put_client_and_device +--undefined=rm_kernel_rmapi_op +--undefined=nv_get_hypervisor_type +--undefined=rm_gpu_copy_mmu_faults +--undefined=rm_gpu_handle_mmu_faults +--undefined=rm_gpu_need_4k_page_isolation +--undefined=rm_is_chipset_io_coherent +--undefined=rm_get_device_remove_flag +--undefined=rm_init_event_locks +--undefined=rm_destroy_event_locks +--undefined=rm_get_gpu_numa_info +--undefined=rm_gpu_numa_online +--undefined=rm_gpu_numa_offline +--undefined=rm_is_device_sequestered +--undefined=nv_vgpu_create_request +--undefined=nv_vgpu_delete +--undefined=nv_vgpu_get_bar_info +--undefined=nv_vgpu_update_sysfs_info +--undefined=nv_vgpu_get_hbm_info +--undefined=nv_vgpu_get_type_ids +--undefined=nv_vgpu_get_type_info +--undefined=nv_vgpu_process_vf_info +--undefined=nv_gpu_bind_event +--undefined=nv_gpu_unbind_event +--undefined=rm_check_for_gpu_surprise_removal +--undefined=rm_set_external_kernel_client_count +--undefined=rm_schedule_gpu_wakeup +--undefined=rm_init_tegra_dynamic_power_management +--undefined=rm_init_dynamic_power_management +--undefined=rm_cleanup_dynamic_power_management +--undefined=rm_enable_dynamic_power_management +--undefined=rm_ref_dynamic_power +--undefined=rm_unref_dynamic_power +--undefined=rm_transition_dynamic_power +--undefined=rm_acpi_notify +--undefined=rm_get_power_info +--undefined=rm_disable_iomap_wc +--undefined=rm_is_altstack_in_use +--undefined=rm_acpi_nvpcf_notify +--undefined=rm_notify_gpu_addition +--undefined=rm_notify_gpu_removal diff --git a/src/nvidia/generated/g_allclasses.h b/src/nvidia/generated/g_allclasses.h new file mode 100644 index 0000000..f2d6429 --- /dev/null +++ b/src/nvidia/generated/g_allclasses.h @@ -0,0 +1,280 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * g_allclasses.h + * + * Pull in all class headers or class number declarations. + * The class list is generated by chip-config from Classes.pm + * + * NOTE: this file may be included multiple times + * + */ + +#if defined(SDK_ALL_CLASSES_INCLUDE_FULL_HEADER) + +#include // NV01_ROOT +#include // NV01_ROOT_NON_PRIV +#include // NV01_ROOT_CLIENT +#include // NV01_DEVICE_0 +#include // NV20_SUBDEVICE_0 +#include // NV01_CONTEXT_DMA +#include // NV01_MEMORY_SYSTEM +#include // NV01_MEMORY_SYNCPOINT +#include // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR +#include // IO_VASPACE_A +#include // NV01_EVENT +#include // NV01_EVENT_KERNEL_CALLBACK +#include // NV01_EVENT_OS_EVENT +#include // NV01_EVENT_KERNEL_CALLBACK_EX +#include // LOCK_STRESS_OBJECT +#include // LOCK_TEST_RELAXED_DUP_OBJECT +#include // NVC372_DISPLAY_SW +#include // NVC673_DISP_CAPABILITIES +#include // NVC973_DISP_CAPABILITIES +#include // NVCC73_DISP_CAPABILITIES +#include // NV04_DISPLAY_COMMON +#include // NVC670_DISPLAY +#include // NVC671_DISP_SF_USER +#include // NVC67A_CURSOR_IMM_CHANNEL_PIO +#include // NVC67B_WINDOW_IMM_CHANNEL_DMA +#include // NVC67D_CORE_CHANNEL_DMA +#include // NVC67E_WINDOW_CHANNEL_DMA +#include // NVC77F_ANY_CHANNEL_DMA +#include // NVC970_DISPLAY +#include // NVC971_DISP_SF_USER +#include // NVC97A_CURSOR_IMM_CHANNEL_PIO +#include // NVC97B_WINDOW_IMM_CHANNEL_DMA +#include // NVC97D_CORE_CHANNEL_DMA +#include // NVC97E_WINDOW_CHANNEL_DMA +#include // NVCC70_DISPLAY +#include // NVCC71_DISP_SF_USER +#include // NVCC7A_CURSOR_IMM_CHANNEL_PIO +#include // NVCC7B_WINDOW_IMM_CHANNEL_DMA +#include // NVCC7D_CORE_CHANNEL_DMA +#include // NVCC7E_WINDOW_CHANNEL_DMA +#include // GF100_HDACODEC + + +#else // defined(SDK_ALL_CLASSES_INCLUDE_FULL_HEADER) + + +#ifndef NV01_ROOT +#define NV01_ROOT (0x00000000) +#endif +#ifndef NV1_ROOT +#define NV1_ROOT (0x00000000) // alias +#endif +#ifndef NV01_NULL_OBJECT +#define NV01_NULL_OBJECT (0x00000000) // alias +#endif +#ifndef NV1_NULL_OBJECT +#define NV1_NULL_OBJECT (0x00000000) // alias +#endif + +#ifndef NV01_ROOT_NON_PRIV +#define NV01_ROOT_NON_PRIV (0x00000001) +#endif +#ifndef NV1_ROOT_NON_PRIV +#define NV1_ROOT_NON_PRIV (0x00000001) // alias +#endif + +#ifndef NV01_ROOT_CLIENT +#define NV01_ROOT_CLIENT (0x00000041) +#endif + +#ifndef NV01_DEVICE_0 +#define NV01_DEVICE_0 (0x00000080) +#endif + +#ifndef NV20_SUBDEVICE_0 +#define NV20_SUBDEVICE_0 (0x00002080) +#endif + +#ifndef NV01_CONTEXT_DMA +#define NV01_CONTEXT_DMA (0x00000002) +#endif + +#ifndef NV01_MEMORY_SYSTEM +#define NV01_MEMORY_SYSTEM (0x0000003e) +#endif +#ifndef NV1_MEMORY_SYSTEM +#define NV1_MEMORY_SYSTEM (0x0000003e) // alias +#endif + +#ifndef NV01_MEMORY_SYNCPOINT +#define NV01_MEMORY_SYNCPOINT (0x000000c3) +#endif + +#ifndef NV01_MEMORY_SYSTEM_OS_DESCRIPTOR +#define NV01_MEMORY_SYSTEM_OS_DESCRIPTOR (0x00000071) +#endif + +#ifndef IO_VASPACE_A +#define IO_VASPACE_A (0x000000f2) +#endif + +#ifndef NV01_EVENT +#define NV01_EVENT (0x00000005) +#endif +#ifndef NV1_EVENT +#define NV1_EVENT (0x00000005) // alias +#endif + +#ifndef NV01_EVENT_KERNEL_CALLBACK +#define NV01_EVENT_KERNEL_CALLBACK (0x00000078) +#endif +#ifndef NV1_EVENT_KERNEL_CALLBACK +#define NV1_EVENT_KERNEL_CALLBACK (0x00000078) // alias +#endif + +#ifndef NV01_EVENT_OS_EVENT +#define NV01_EVENT_OS_EVENT (0x00000079) +#endif +#ifndef NV1_EVENT_OS_EVENT +#define NV1_EVENT_OS_EVENT (0x00000079) // alias +#endif +#ifndef NV01_EVENT_WIN32_EVENT +#define NV01_EVENT_WIN32_EVENT (0x00000079) // alias +#endif +#ifndef NV1_EVENT_WIN32_EVENT +#define NV1_EVENT_WIN32_EVENT (0x00000079) // alias +#endif + +#ifndef NV01_EVENT_KERNEL_CALLBACK_EX +#define NV01_EVENT_KERNEL_CALLBACK_EX (0x0000007e) +#endif +#ifndef NV1_EVENT_KERNEL_CALLBACK_EX +#define NV1_EVENT_KERNEL_CALLBACK_EX (0x0000007e) // alias +#endif + +#ifndef LOCK_STRESS_OBJECT +#define LOCK_STRESS_OBJECT (0x00000100) +#endif + +#ifndef LOCK_TEST_RELAXED_DUP_OBJECT +#define LOCK_TEST_RELAXED_DUP_OBJECT (0x00000101) +#endif + +#ifndef NVC372_DISPLAY_SW +#define NVC372_DISPLAY_SW (0x0000c372) +#endif + +#ifndef NVC673_DISP_CAPABILITIES +#define NVC673_DISP_CAPABILITIES (0x0000c673) +#endif + +#ifndef NVC973_DISP_CAPABILITIES +#define NVC973_DISP_CAPABILITIES (0x0000c973) +#endif + +#ifndef NVCC73_DISP_CAPABILITIES +#define NVCC73_DISP_CAPABILITIES (0x0000cc73) +#endif + +#ifndef NV04_DISPLAY_COMMON +#define NV04_DISPLAY_COMMON (0x00000073) +#endif + +#ifndef NVC670_DISPLAY +#define NVC670_DISPLAY (0x0000c670) +#endif + +#ifndef NVC671_DISP_SF_USER +#define NVC671_DISP_SF_USER (0x0000c671) +#endif + +#ifndef NVC67A_CURSOR_IMM_CHANNEL_PIO +#define NVC67A_CURSOR_IMM_CHANNEL_PIO (0x0000c67a) +#endif + +#ifndef NVC67B_WINDOW_IMM_CHANNEL_DMA +#define NVC67B_WINDOW_IMM_CHANNEL_DMA (0x0000c67b) +#endif + +#ifndef NVC67D_CORE_CHANNEL_DMA +#define NVC67D_CORE_CHANNEL_DMA (0x0000c67d) +#endif + +#ifndef NVC67E_WINDOW_CHANNEL_DMA +#define NVC67E_WINDOW_CHANNEL_DMA (0x0000c67e) +#endif + +#ifndef NVC77F_ANY_CHANNEL_DMA +#define NVC77F_ANY_CHANNEL_DMA (0x0000c77f) +#endif + +#ifndef NVC970_DISPLAY +#define NVC970_DISPLAY (0x0000c970) +#endif + +#ifndef NVC971_DISP_SF_USER +#define NVC971_DISP_SF_USER (0x0000c971) +#endif + +#ifndef NVC97A_CURSOR_IMM_CHANNEL_PIO +#define NVC97A_CURSOR_IMM_CHANNEL_PIO (0x0000c97a) +#endif + +#ifndef NVC97B_WINDOW_IMM_CHANNEL_DMA +#define NVC97B_WINDOW_IMM_CHANNEL_DMA (0x0000c97b) +#endif + +#ifndef NVC97D_CORE_CHANNEL_DMA +#define NVC97D_CORE_CHANNEL_DMA (0x0000c97d) +#endif + +#ifndef NVC97E_WINDOW_CHANNEL_DMA +#define NVC97E_WINDOW_CHANNEL_DMA (0x0000c97e) +#endif + +#ifndef NVCC70_DISPLAY +#define NVCC70_DISPLAY (0x0000cc70) +#endif + +#ifndef NVCC71_DISP_SF_USER +#define NVCC71_DISP_SF_USER (0x0000cc71) +#endif + +#ifndef NVCC7A_CURSOR_IMM_CHANNEL_PIO +#define NVCC7A_CURSOR_IMM_CHANNEL_PIO (0x0000cc7a) +#endif + +#ifndef NVCC7B_WINDOW_IMM_CHANNEL_DMA +#define NVCC7B_WINDOW_IMM_CHANNEL_DMA (0x0000cc7b) +#endif + +#ifndef NVCC7D_CORE_CHANNEL_DMA +#define NVCC7D_CORE_CHANNEL_DMA (0x0000cc7d) +#endif + +#ifndef NVCC7E_WINDOW_CHANNEL_DMA +#define NVCC7E_WINDOW_CHANNEL_DMA (0x0000cc7e) +#endif + +#ifndef GF100_HDACODEC +#define GF100_HDACODEC (0x000090ec) +#endif + + +#endif // defined(SDK_ALL_CLASSES_INCLUDE_FULL_HEADER) diff --git a/src/nvidia/generated/g_binary_api_nvoc.c b/src/nvidia/generated/g_binary_api_nvoc.c new file mode 100644 index 0000000..c2e1c5d --- /dev/null +++ b/src/nvidia/generated/g_binary_api_nvoc.c @@ -0,0 +1,1077 @@ +#define NVOC_BINARY_API_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_binary_api_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xb7a47c = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApi; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +// Forward declarations for BinaryApi +void __nvoc_init__GpuResource(GpuResource*); +void __nvoc_init__BinaryApi(BinaryApi*); +void __nvoc_init_funcTable_BinaryApi(BinaryApi*); +NV_STATUS __nvoc_ctor_BinaryApi(BinaryApi*, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_BinaryApi(BinaryApi*); +void __nvoc_dtor_BinaryApi(BinaryApi*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__BinaryApi; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__BinaryApi; + +// Down-thunk(s) to bridge BinaryApi methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^2 +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_GpuResource_resControl(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_GpuResource_resMap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_down_thunk_GpuResource_resUnmap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_down_thunk_GpuResource_rmresShareCallback(struct RmResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_down_thunk_BinaryApi_gpuresControl(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this + +// Up-thunk(s) to bridge BinaryApi methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^2 +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^2 +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^2 +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super^2 +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super^2 +NvBool __nvoc_up_thunk_RmResource_gpuresAccessCallback(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControl_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_gpuresControl_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_gpuresCanCopy(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresIsDuplicate(struct GpuResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_gpuresPreDestruct(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresControlFilter(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresMapTo(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresUnmapFrom(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_gpuresGetRefCount(struct GpuResource *pResource); // super +void __nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference); // super +NV_STATUS __nvoc_up_thunk_GpuResource_binapiMap(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_GpuResource_binapiUnmap(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_GpuResource_binapiShareCallback(struct BinaryApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_GpuResource_binapiGetRegBaseOffsetAndSize(struct BinaryApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); // this +NV_STATUS __nvoc_up_thunk_GpuResource_binapiGetMapAddrSpace(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); // this +NV_STATUS __nvoc_up_thunk_GpuResource_binapiInternalControlForward(struct BinaryApi *pGpuResource, NvU32 command, void *pParams, NvU32 size); // this +NvHandle __nvoc_up_thunk_GpuResource_binapiGetInternalObjectHandle(struct BinaryApi *pGpuResource); // this +NvBool __nvoc_up_thunk_RmResource_binapiAccessCallback(struct BinaryApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NV_STATUS __nvoc_up_thunk_RmResource_binapiGetMemInterMapParams(struct BinaryApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_binapiCheckMemInterUnmap(struct BinaryApi *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_binapiGetMemoryMappingDescriptor(struct BinaryApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_binapiControlSerialization_Prologue(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_binapiControlSerialization_Epilogue(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_binapiControl_Prologue(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_binapiControl_Epilogue(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_binapiCanCopy(struct BinaryApi *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_binapiIsDuplicate(struct BinaryApi *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_binapiPreDestruct(struct BinaryApi *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_binapiControlFilter(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_binapiIsPartialUnmapSupported(struct BinaryApi *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_binapiMapTo(struct BinaryApi *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_binapiUnmapFrom(struct BinaryApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_binapiGetRefCount(struct BinaryApi *pResource); // this +void __nvoc_up_thunk_RsResource_binapiAddAdditionalDependants(struct RsClient *pClient, struct BinaryApi *pResource, RsResourceRef *pReference); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(BinaryApi), + /*classId=*/ classId(BinaryApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "BinaryApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_BinaryApi, + /*pCastInfo=*/ &__nvoc_castinfo__BinaryApi, + /*pExportInfo=*/ &__nvoc_export_info__BinaryApi +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__BinaryApi __nvoc_metadata__BinaryApi = { + .rtti.pClassDef = &__nvoc_class_def_BinaryApi, // (binapi) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_BinaryApi, + .rtti.offset = 0, + .metadata__GpuResource.rtti.pClassDef = &__nvoc_class_def_GpuResource, // (gpures) super + .metadata__GpuResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.rtti.offset = NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource), + .metadata__GpuResource.metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super^2 + .metadata__GpuResource.metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.rtti.offset = NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource), + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^3 + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^4 + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^3 + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + + .vtable.__binapiControl__ = &binapiControl_IMPL, // virtual override (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl__ = &__nvoc_down_thunk_BinaryApi_gpuresControl, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_GpuResource_resControl, // virtual + .vtable.__binapiMap__ = &__nvoc_up_thunk_GpuResource_binapiMap, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresMap__ = &gpuresMap_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &__nvoc_down_thunk_GpuResource_resMap, // virtual + .vtable.__binapiUnmap__ = &__nvoc_up_thunk_GpuResource_binapiUnmap, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresUnmap__ = &gpuresUnmap_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &__nvoc_down_thunk_GpuResource_resUnmap, // virtual + .vtable.__binapiShareCallback__ = &__nvoc_up_thunk_GpuResource_binapiShareCallback, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresShareCallback__ = &gpuresShareCallback_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresShareCallback__ = &__nvoc_down_thunk_GpuResource_rmresShareCallback, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__binapiGetRegBaseOffsetAndSize__ = &__nvoc_up_thunk_GpuResource_binapiGetRegBaseOffsetAndSize, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetRegBaseOffsetAndSize__ = &gpuresGetRegBaseOffsetAndSize_IMPL, // virtual + .vtable.__binapiGetMapAddrSpace__ = &__nvoc_up_thunk_GpuResource_binapiGetMapAddrSpace, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMapAddrSpace__ = &gpuresGetMapAddrSpace_IMPL, // virtual + .vtable.__binapiInternalControlForward__ = &__nvoc_up_thunk_GpuResource_binapiInternalControlForward, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresInternalControlForward__ = &gpuresInternalControlForward_IMPL, // virtual + .vtable.__binapiGetInternalObjectHandle__ = &__nvoc_up_thunk_GpuResource_binapiGetInternalObjectHandle, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetInternalObjectHandle__ = &gpuresGetInternalObjectHandle_IMPL, // virtual + .vtable.__binapiAccessCallback__ = &__nvoc_up_thunk_RmResource_binapiAccessCallback, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresAccessCallback__ = &__nvoc_up_thunk_RmResource_gpuresAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__binapiGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_binapiGetMemInterMapParams, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__binapiCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_binapiCheckMemInterUnmap, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__binapiGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_binapiGetMemoryMappingDescriptor, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__binapiControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_binapiControlSerialization_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__binapiControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_binapiControlSerialization_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__binapiControl_Prologue__ = &__nvoc_up_thunk_RmResource_binapiControl_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__binapiControl_Epilogue__ = &__nvoc_up_thunk_RmResource_binapiControl_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__binapiCanCopy__ = &__nvoc_up_thunk_RsResource_binapiCanCopy, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresCanCopy__ = &__nvoc_up_thunk_RsResource_gpuresCanCopy, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__binapiIsDuplicate__ = &__nvoc_up_thunk_RsResource_binapiIsDuplicate, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresIsDuplicate__ = &__nvoc_up_thunk_RsResource_gpuresIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__binapiPreDestruct__ = &__nvoc_up_thunk_RsResource_binapiPreDestruct, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresPreDestruct__ = &__nvoc_up_thunk_RsResource_gpuresPreDestruct, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__binapiControlFilter__ = &__nvoc_up_thunk_RsResource_binapiControlFilter, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlFilter__ = &__nvoc_up_thunk_RsResource_gpuresControlFilter, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__binapiIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_binapiIsPartialUnmapSupported, // inline virtual inherited (res) base (gpures) body + .metadata__GpuResource.vtable.__gpuresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__GpuResource.metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__binapiMapTo__ = &__nvoc_up_thunk_RsResource_binapiMapTo, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresMapTo__ = &__nvoc_up_thunk_RsResource_gpuresMapTo, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__binapiUnmapFrom__ = &__nvoc_up_thunk_RsResource_binapiUnmapFrom, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresUnmapFrom__ = &__nvoc_up_thunk_RsResource_gpuresUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__binapiGetRefCount__ = &__nvoc_up_thunk_RsResource_binapiGetRefCount, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetRefCount__ = &__nvoc_up_thunk_RsResource_gpuresGetRefCount, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__binapiAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_binapiAddAdditionalDependants, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__BinaryApi = { + .numRelatives = 6, + .relatives = { + &__nvoc_metadata__BinaryApi.rtti, // [0]: (binapi) this + &__nvoc_metadata__BinaryApi.metadata__GpuResource.rtti, // [1]: (gpures) super + &__nvoc_metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.rtti, // [2]: (rmres) super^2 + &__nvoc_metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti, // [3]: (res) super^3 + &__nvoc_metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [4]: (obj) super^4 + &__nvoc_metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti, // [5]: (rmrescmn) super^3 + } +}; + +// 1 down-thunk(s) defined to bridge methods in BinaryApi from superclasses + +// binapiControl: virtual override (res) base (gpures) +NV_STATUS __nvoc_down_thunk_BinaryApi_gpuresControl(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return binapiControl((struct BinaryApi *)(((unsigned char *) pResource) - NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource)), pCallContext, pParams); +} + + +// 24 up-thunk(s) defined to bridge methods in BinaryApi to superclasses + +// binapiMap: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_binapiMap(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource)), pCallContext, pParams, pCpuMapping); +} + +// binapiUnmap: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_binapiUnmap(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource)), pCallContext, pCpuMapping); +} + +// binapiShareCallback: virtual inherited (gpures) base (gpures) +NvBool __nvoc_up_thunk_GpuResource_binapiShareCallback(struct BinaryApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// binapiGetRegBaseOffsetAndSize: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_binapiGetRegBaseOffsetAndSize(struct BinaryApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource)), pGpu, pOffset, pSize); +} + +// binapiGetMapAddrSpace: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_binapiGetMapAddrSpace(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource)), pCallContext, mapFlags, pAddrSpace); +} + +// binapiInternalControlForward: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_binapiInternalControlForward(struct BinaryApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource)), command, pParams, size); +} + +// binapiGetInternalObjectHandle: virtual inherited (gpures) base (gpures) +NvHandle __nvoc_up_thunk_GpuResource_binapiGetInternalObjectHandle(struct BinaryApi *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource))); +} + +// binapiAccessCallback: virtual inherited (rmres) base (gpures) +NvBool __nvoc_up_thunk_RmResource_binapiAccessCallback(struct BinaryApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// binapiGetMemInterMapParams: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_binapiGetMemInterMapParams(struct BinaryApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pParams); +} + +// binapiCheckMemInterUnmap: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_binapiCheckMemInterUnmap(struct BinaryApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// binapiGetMemoryMappingDescriptor: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_binapiGetMemoryMappingDescriptor(struct BinaryApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), ppMemDesc); +} + +// binapiControlSerialization_Prologue: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_binapiControlSerialization_Prologue(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// binapiControlSerialization_Epilogue: virtual inherited (rmres) base (gpures) +void __nvoc_up_thunk_RmResource_binapiControlSerialization_Epilogue(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// binapiControl_Prologue: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_binapiControl_Prologue(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// binapiControl_Epilogue: virtual inherited (rmres) base (gpures) +void __nvoc_up_thunk_RmResource_binapiControl_Epilogue(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// binapiCanCopy: virtual inherited (res) base (gpures) +NvBool __nvoc_up_thunk_RsResource_binapiCanCopy(struct BinaryApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// binapiIsDuplicate: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_binapiIsDuplicate(struct BinaryApi *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// binapiPreDestruct: virtual inherited (res) base (gpures) +void __nvoc_up_thunk_RsResource_binapiPreDestruct(struct BinaryApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// binapiControlFilter: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_binapiControlFilter(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// binapiIsPartialUnmapSupported: inline virtual inherited (res) base (gpures) body +NvBool __nvoc_up_thunk_RsResource_binapiIsPartialUnmapSupported(struct BinaryApi *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// binapiMapTo: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_binapiMapTo(struct BinaryApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// binapiUnmapFrom: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_binapiUnmapFrom(struct BinaryApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// binapiGetRefCount: virtual inherited (res) base (gpures) +NvU32 __nvoc_up_thunk_RsResource_binapiGetRefCount(struct BinaryApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// binapiAddAdditionalDependants: virtual inherited (res) base (gpures) +void __nvoc_up_thunk_RsResource_binapiAddAdditionalDependants(struct RsClient *pClient, struct BinaryApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__BinaryApi = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_BinaryApi(BinaryApi *pThis) { + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_BinaryApi(BinaryApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_BinaryApi(BinaryApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_BinaryApi_fail_GpuResource; + __nvoc_init_dataField_BinaryApi(pThis); + + status = __nvoc_binapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_BinaryApi_fail__init; + goto __nvoc_ctor_BinaryApi_exit; // Success + +__nvoc_ctor_BinaryApi_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_BinaryApi_fail_GpuResource: +__nvoc_ctor_BinaryApi_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_BinaryApi_1(BinaryApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_BinaryApi_1 + + +// Initialize vtable(s) for 25 virtual method(s). +void __nvoc_init_funcTable_BinaryApi(BinaryApi *pThis) { + __nvoc_init_funcTable_BinaryApi_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__BinaryApi(BinaryApi *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^4 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^3 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; // (rmres) super^2 + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; // (gpures) super + pThis->__nvoc_pbase_BinaryApi = pThis; // (binapi) this + + // Recurse to superclass initialization function(s). + __nvoc_init__GpuResource(&pThis->__nvoc_base_GpuResource); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^4 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource; // (res) super^3 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__BinaryApi.metadata__GpuResource.metadata__RmResource; // (rmres) super^2 + pThis->__nvoc_base_GpuResource.__nvoc_metadata_ptr = &__nvoc_metadata__BinaryApi.metadata__GpuResource; // (gpures) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__BinaryApi; // (binapi) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_BinaryApi(pThis); +} + +NV_STATUS __nvoc_objCreate_BinaryApi(BinaryApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + BinaryApi *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(BinaryApi), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(BinaryApi)); + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__BinaryApi(pThis); + status = __nvoc_ctor_BinaryApi(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_BinaryApi_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_BinaryApi_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(BinaryApi)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_BinaryApi(BinaryApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_BinaryApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x1c0579 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApiPrivileged; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApi; + +// Forward declarations for BinaryApiPrivileged +void __nvoc_init__BinaryApi(BinaryApi*); +void __nvoc_init__BinaryApiPrivileged(BinaryApiPrivileged*); +void __nvoc_init_funcTable_BinaryApiPrivileged(BinaryApiPrivileged*); +NV_STATUS __nvoc_ctor_BinaryApiPrivileged(BinaryApiPrivileged*, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_BinaryApiPrivileged(BinaryApiPrivileged*); +void __nvoc_dtor_BinaryApiPrivileged(BinaryApiPrivileged*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__BinaryApiPrivileged; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__BinaryApiPrivileged; + +// Down-thunk(s) to bridge BinaryApiPrivileged methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^3 +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^3 +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +NV_STATUS __nvoc_down_thunk_GpuResource_resControl(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_GpuResource_resMap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_down_thunk_GpuResource_resUnmap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // super^2 +NvBool __nvoc_down_thunk_GpuResource_rmresShareCallback(struct RmResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^2 +NV_STATUS __nvoc_down_thunk_BinaryApi_gpuresControl(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_BinaryApiPrivileged_binapiControl(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this + +// Up-thunk(s) to bridge BinaryApiPrivileged methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^3 +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^3 +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^3 +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super^3 +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super^3 +NvBool __nvoc_up_thunk_RmResource_gpuresAccessCallback(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^2 +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided); // super^2 +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // super^2 +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControl_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_up_thunk_RmResource_gpuresControl_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NvBool __nvoc_up_thunk_RsResource_gpuresCanCopy(struct GpuResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_gpuresIsDuplicate(struct GpuResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^2 +void __nvoc_up_thunk_RsResource_gpuresPreDestruct(struct GpuResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_gpuresControlFilter(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NvBool __nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported(struct GpuResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_gpuresMapTo(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_gpuresUnmapFrom(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^2 +NvU32 __nvoc_up_thunk_RsResource_gpuresGetRefCount(struct GpuResource *pResource); // super^2 +void __nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference); // super^2 +NV_STATUS __nvoc_up_thunk_GpuResource_binapiMap(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_up_thunk_GpuResource_binapiUnmap(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_up_thunk_GpuResource_binapiShareCallback(struct BinaryApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_up_thunk_GpuResource_binapiGetRegBaseOffsetAndSize(struct BinaryApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); // super +NV_STATUS __nvoc_up_thunk_GpuResource_binapiGetMapAddrSpace(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); // super +NV_STATUS __nvoc_up_thunk_GpuResource_binapiInternalControlForward(struct BinaryApi *pGpuResource, NvU32 command, void *pParams, NvU32 size); // super +NvHandle __nvoc_up_thunk_GpuResource_binapiGetInternalObjectHandle(struct BinaryApi *pGpuResource); // super +NvBool __nvoc_up_thunk_RmResource_binapiAccessCallback(struct BinaryApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NV_STATUS __nvoc_up_thunk_RmResource_binapiGetMemInterMapParams(struct BinaryApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_binapiCheckMemInterUnmap(struct BinaryApi *pRmResource, NvBool bSubdeviceHandleProvided); // super +NV_STATUS __nvoc_up_thunk_RmResource_binapiGetMemoryMappingDescriptor(struct BinaryApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // super +NV_STATUS __nvoc_up_thunk_RmResource_binapiControlSerialization_Prologue(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_binapiControlSerialization_Epilogue(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_binapiControl_Prologue(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_binapiControl_Epilogue(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_binapiCanCopy(struct BinaryApi *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_binapiIsDuplicate(struct BinaryApi *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_binapiPreDestruct(struct BinaryApi *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_binapiControlFilter(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_binapiIsPartialUnmapSupported(struct BinaryApi *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_binapiMapTo(struct BinaryApi *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_binapiUnmapFrom(struct BinaryApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_binapiGetRefCount(struct BinaryApi *pResource); // super +void __nvoc_up_thunk_RsResource_binapiAddAdditionalDependants(struct RsClient *pClient, struct BinaryApi *pResource, RsResourceRef *pReference); // super +NV_STATUS __nvoc_up_thunk_GpuResource_binapiprivMap(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_GpuResource_binapiprivUnmap(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_GpuResource_binapiprivShareCallback(struct BinaryApiPrivileged *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_GpuResource_binapiprivGetRegBaseOffsetAndSize(struct BinaryApiPrivileged *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); // this +NV_STATUS __nvoc_up_thunk_GpuResource_binapiprivGetMapAddrSpace(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); // this +NV_STATUS __nvoc_up_thunk_GpuResource_binapiprivInternalControlForward(struct BinaryApiPrivileged *pGpuResource, NvU32 command, void *pParams, NvU32 size); // this +NvHandle __nvoc_up_thunk_GpuResource_binapiprivGetInternalObjectHandle(struct BinaryApiPrivileged *pGpuResource); // this +NvBool __nvoc_up_thunk_RmResource_binapiprivAccessCallback(struct BinaryApiPrivileged *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NV_STATUS __nvoc_up_thunk_RmResource_binapiprivGetMemInterMapParams(struct BinaryApiPrivileged *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_binapiprivCheckMemInterUnmap(struct BinaryApiPrivileged *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_binapiprivGetMemoryMappingDescriptor(struct BinaryApiPrivileged *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_binapiprivControlSerialization_Prologue(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_binapiprivControlSerialization_Epilogue(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_binapiprivControl_Prologue(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_binapiprivControl_Epilogue(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_binapiprivCanCopy(struct BinaryApiPrivileged *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_binapiprivIsDuplicate(struct BinaryApiPrivileged *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_binapiprivPreDestruct(struct BinaryApiPrivileged *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_binapiprivControlFilter(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_binapiprivIsPartialUnmapSupported(struct BinaryApiPrivileged *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_binapiprivMapTo(struct BinaryApiPrivileged *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_binapiprivUnmapFrom(struct BinaryApiPrivileged *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_binapiprivGetRefCount(struct BinaryApiPrivileged *pResource); // this +void __nvoc_up_thunk_RsResource_binapiprivAddAdditionalDependants(struct RsClient *pClient, struct BinaryApiPrivileged *pResource, RsResourceRef *pReference); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApiPrivileged = +{ + /*classInfo=*/ { + /*size=*/ sizeof(BinaryApiPrivileged), + /*classId=*/ classId(BinaryApiPrivileged), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "BinaryApiPrivileged", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_BinaryApiPrivileged, + /*pCastInfo=*/ &__nvoc_castinfo__BinaryApiPrivileged, + /*pExportInfo=*/ &__nvoc_export_info__BinaryApiPrivileged +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__BinaryApiPrivileged __nvoc_metadata__BinaryApiPrivileged = { + .rtti.pClassDef = &__nvoc_class_def_BinaryApiPrivileged, // (binapipriv) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_BinaryApiPrivileged, + .rtti.offset = 0, + .metadata__BinaryApi.rtti.pClassDef = &__nvoc_class_def_BinaryApi, // (binapi) super + .metadata__BinaryApi.rtti.dtor = &__nvoc_destructFromBase, + .metadata__BinaryApi.rtti.offset = NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi), + .metadata__BinaryApi.metadata__GpuResource.rtti.pClassDef = &__nvoc_class_def_GpuResource, // (gpures) super^2 + .metadata__BinaryApi.metadata__GpuResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__BinaryApi.metadata__GpuResource.rtti.offset = NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource), + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super^3 + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.rtti.offset = NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource), + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^4 + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^5 + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^4 + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + + .vtable.__binapiprivControl__ = &binapiprivControl_IMPL, // virtual override (res) base (binapi) + .metadata__BinaryApi.vtable.__binapiControl__ = &__nvoc_down_thunk_BinaryApiPrivileged_binapiControl, // virtual override (res) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresControl__ = &__nvoc_down_thunk_BinaryApi_gpuresControl, // virtual override (res) base (rmres) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_GpuResource_resControl, // virtual + .vtable.__binapiprivMap__ = &__nvoc_up_thunk_GpuResource_binapiprivMap, // virtual inherited (gpures) base (binapi) + .metadata__BinaryApi.vtable.__binapiMap__ = &__nvoc_up_thunk_GpuResource_binapiMap, // virtual inherited (gpures) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresMap__ = &gpuresMap_IMPL, // virtual override (res) base (rmres) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &__nvoc_down_thunk_GpuResource_resMap, // virtual + .vtable.__binapiprivUnmap__ = &__nvoc_up_thunk_GpuResource_binapiprivUnmap, // virtual inherited (gpures) base (binapi) + .metadata__BinaryApi.vtable.__binapiUnmap__ = &__nvoc_up_thunk_GpuResource_binapiUnmap, // virtual inherited (gpures) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresUnmap__ = &gpuresUnmap_IMPL, // virtual override (res) base (rmres) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &__nvoc_down_thunk_GpuResource_resUnmap, // virtual + .vtable.__binapiprivShareCallback__ = &__nvoc_up_thunk_GpuResource_binapiprivShareCallback, // virtual inherited (gpures) base (binapi) + .metadata__BinaryApi.vtable.__binapiShareCallback__ = &__nvoc_up_thunk_GpuResource_binapiShareCallback, // virtual inherited (gpures) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresShareCallback__ = &gpuresShareCallback_IMPL, // virtual override (res) base (rmres) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.vtable.__rmresShareCallback__ = &__nvoc_down_thunk_GpuResource_rmresShareCallback, // virtual override (res) base (res) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__binapiprivGetRegBaseOffsetAndSize__ = &__nvoc_up_thunk_GpuResource_binapiprivGetRegBaseOffsetAndSize, // virtual inherited (gpures) base (binapi) + .metadata__BinaryApi.vtable.__binapiGetRegBaseOffsetAndSize__ = &__nvoc_up_thunk_GpuResource_binapiGetRegBaseOffsetAndSize, // virtual inherited (gpures) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresGetRegBaseOffsetAndSize__ = &gpuresGetRegBaseOffsetAndSize_IMPL, // virtual + .vtable.__binapiprivGetMapAddrSpace__ = &__nvoc_up_thunk_GpuResource_binapiprivGetMapAddrSpace, // virtual inherited (gpures) base (binapi) + .metadata__BinaryApi.vtable.__binapiGetMapAddrSpace__ = &__nvoc_up_thunk_GpuResource_binapiGetMapAddrSpace, // virtual inherited (gpures) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresGetMapAddrSpace__ = &gpuresGetMapAddrSpace_IMPL, // virtual + .vtable.__binapiprivInternalControlForward__ = &__nvoc_up_thunk_GpuResource_binapiprivInternalControlForward, // virtual inherited (gpures) base (binapi) + .metadata__BinaryApi.vtable.__binapiInternalControlForward__ = &__nvoc_up_thunk_GpuResource_binapiInternalControlForward, // virtual inherited (gpures) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresInternalControlForward__ = &gpuresInternalControlForward_IMPL, // virtual + .vtable.__binapiprivGetInternalObjectHandle__ = &__nvoc_up_thunk_GpuResource_binapiprivGetInternalObjectHandle, // virtual inherited (gpures) base (binapi) + .metadata__BinaryApi.vtable.__binapiGetInternalObjectHandle__ = &__nvoc_up_thunk_GpuResource_binapiGetInternalObjectHandle, // virtual inherited (gpures) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresGetInternalObjectHandle__ = &gpuresGetInternalObjectHandle_IMPL, // virtual + .vtable.__binapiprivAccessCallback__ = &__nvoc_up_thunk_RmResource_binapiprivAccessCallback, // virtual inherited (rmres) base (binapi) + .metadata__BinaryApi.vtable.__binapiAccessCallback__ = &__nvoc_up_thunk_RmResource_binapiAccessCallback, // virtual inherited (rmres) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresAccessCallback__ = &__nvoc_up_thunk_RmResource_gpuresAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__binapiprivGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_binapiprivGetMemInterMapParams, // virtual inherited (rmres) base (binapi) + .metadata__BinaryApi.vtable.__binapiGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_binapiGetMemInterMapParams, // virtual inherited (rmres) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__binapiprivCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_binapiprivCheckMemInterUnmap, // virtual inherited (rmres) base (binapi) + .metadata__BinaryApi.vtable.__binapiCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_binapiCheckMemInterUnmap, // virtual inherited (rmres) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__binapiprivGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_binapiprivGetMemoryMappingDescriptor, // virtual inherited (rmres) base (binapi) + .metadata__BinaryApi.vtable.__binapiGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_binapiGetMemoryMappingDescriptor, // virtual inherited (rmres) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__binapiprivControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_binapiprivControlSerialization_Prologue, // virtual inherited (rmres) base (binapi) + .metadata__BinaryApi.vtable.__binapiControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_binapiControlSerialization_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__binapiprivControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_binapiprivControlSerialization_Epilogue, // virtual inherited (rmres) base (binapi) + .metadata__BinaryApi.vtable.__binapiControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_binapiControlSerialization_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__binapiprivControl_Prologue__ = &__nvoc_up_thunk_RmResource_binapiprivControl_Prologue, // virtual inherited (rmres) base (binapi) + .metadata__BinaryApi.vtable.__binapiControl_Prologue__ = &__nvoc_up_thunk_RmResource_binapiControl_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresControl_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__binapiprivControl_Epilogue__ = &__nvoc_up_thunk_RmResource_binapiprivControl_Epilogue, // virtual inherited (rmres) base (binapi) + .metadata__BinaryApi.vtable.__binapiControl_Epilogue__ = &__nvoc_up_thunk_RmResource_binapiControl_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresControl_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__binapiprivCanCopy__ = &__nvoc_up_thunk_RsResource_binapiprivCanCopy, // virtual inherited (res) base (binapi) + .metadata__BinaryApi.vtable.__binapiCanCopy__ = &__nvoc_up_thunk_RsResource_binapiCanCopy, // virtual inherited (res) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresCanCopy__ = &__nvoc_up_thunk_RsResource_gpuresCanCopy, // virtual inherited (res) base (rmres) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__binapiprivIsDuplicate__ = &__nvoc_up_thunk_RsResource_binapiprivIsDuplicate, // virtual inherited (res) base (binapi) + .metadata__BinaryApi.vtable.__binapiIsDuplicate__ = &__nvoc_up_thunk_RsResource_binapiIsDuplicate, // virtual inherited (res) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresIsDuplicate__ = &__nvoc_up_thunk_RsResource_gpuresIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__binapiprivPreDestruct__ = &__nvoc_up_thunk_RsResource_binapiprivPreDestruct, // virtual inherited (res) base (binapi) + .metadata__BinaryApi.vtable.__binapiPreDestruct__ = &__nvoc_up_thunk_RsResource_binapiPreDestruct, // virtual inherited (res) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresPreDestruct__ = &__nvoc_up_thunk_RsResource_gpuresPreDestruct, // virtual inherited (res) base (rmres) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__binapiprivControlFilter__ = &__nvoc_up_thunk_RsResource_binapiprivControlFilter, // virtual inherited (res) base (binapi) + .metadata__BinaryApi.vtable.__binapiControlFilter__ = &__nvoc_up_thunk_RsResource_binapiControlFilter, // virtual inherited (res) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresControlFilter__ = &__nvoc_up_thunk_RsResource_gpuresControlFilter, // virtual inherited (res) base (rmres) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__binapiprivIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_binapiprivIsPartialUnmapSupported, // inline virtual inherited (res) base (binapi) body + .metadata__BinaryApi.vtable.__binapiIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_binapiIsPartialUnmapSupported, // inline virtual inherited (res) base (gpures) body + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__binapiprivMapTo__ = &__nvoc_up_thunk_RsResource_binapiprivMapTo, // virtual inherited (res) base (binapi) + .metadata__BinaryApi.vtable.__binapiMapTo__ = &__nvoc_up_thunk_RsResource_binapiMapTo, // virtual inherited (res) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresMapTo__ = &__nvoc_up_thunk_RsResource_gpuresMapTo, // virtual inherited (res) base (rmres) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__binapiprivUnmapFrom__ = &__nvoc_up_thunk_RsResource_binapiprivUnmapFrom, // virtual inherited (res) base (binapi) + .metadata__BinaryApi.vtable.__binapiUnmapFrom__ = &__nvoc_up_thunk_RsResource_binapiUnmapFrom, // virtual inherited (res) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresUnmapFrom__ = &__nvoc_up_thunk_RsResource_gpuresUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__binapiprivGetRefCount__ = &__nvoc_up_thunk_RsResource_binapiprivGetRefCount, // virtual inherited (res) base (binapi) + .metadata__BinaryApi.vtable.__binapiGetRefCount__ = &__nvoc_up_thunk_RsResource_binapiGetRefCount, // virtual inherited (res) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresGetRefCount__ = &__nvoc_up_thunk_RsResource_gpuresGetRefCount, // virtual inherited (res) base (rmres) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__binapiprivAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_binapiprivAddAdditionalDependants, // virtual inherited (res) base (binapi) + .metadata__BinaryApi.vtable.__binapiAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_binapiAddAdditionalDependants, // virtual inherited (res) base (gpures) + .metadata__BinaryApi.metadata__GpuResource.vtable.__gpuresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__BinaryApiPrivileged = { + .numRelatives = 7, + .relatives = { + &__nvoc_metadata__BinaryApiPrivileged.rtti, // [0]: (binapipriv) this + &__nvoc_metadata__BinaryApiPrivileged.metadata__BinaryApi.rtti, // [1]: (binapi) super + &__nvoc_metadata__BinaryApiPrivileged.metadata__BinaryApi.metadata__GpuResource.rtti, // [2]: (gpures) super^2 + &__nvoc_metadata__BinaryApiPrivileged.metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.rtti, // [3]: (rmres) super^3 + &__nvoc_metadata__BinaryApiPrivileged.metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti, // [4]: (res) super^4 + &__nvoc_metadata__BinaryApiPrivileged.metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [5]: (obj) super^5 + &__nvoc_metadata__BinaryApiPrivileged.metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti, // [6]: (rmrescmn) super^4 + } +}; + +// 1 down-thunk(s) defined to bridge methods in BinaryApiPrivileged from superclasses + +// binapiprivControl: virtual override (res) base (binapi) +NV_STATUS __nvoc_down_thunk_BinaryApiPrivileged_binapiControl(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return binapiprivControl((struct BinaryApiPrivileged *)(((unsigned char *) pResource) - NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi)), pCallContext, pParams); +} + + +// 24 up-thunk(s) defined to bridge methods in BinaryApiPrivileged to superclasses + +// binapiprivMap: virtual inherited (gpures) base (binapi) +NV_STATUS __nvoc_up_thunk_GpuResource_binapiprivMap(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource)), pCallContext, pParams, pCpuMapping); +} + +// binapiprivUnmap: virtual inherited (gpures) base (binapi) +NV_STATUS __nvoc_up_thunk_GpuResource_binapiprivUnmap(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource)), pCallContext, pCpuMapping); +} + +// binapiprivShareCallback: virtual inherited (gpures) base (binapi) +NvBool __nvoc_up_thunk_GpuResource_binapiprivShareCallback(struct BinaryApiPrivileged *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// binapiprivGetRegBaseOffsetAndSize: virtual inherited (gpures) base (binapi) +NV_STATUS __nvoc_up_thunk_GpuResource_binapiprivGetRegBaseOffsetAndSize(struct BinaryApiPrivileged *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource)), pGpu, pOffset, pSize); +} + +// binapiprivGetMapAddrSpace: virtual inherited (gpures) base (binapi) +NV_STATUS __nvoc_up_thunk_GpuResource_binapiprivGetMapAddrSpace(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource)), pCallContext, mapFlags, pAddrSpace); +} + +// binapiprivInternalControlForward: virtual inherited (gpures) base (binapi) +NV_STATUS __nvoc_up_thunk_GpuResource_binapiprivInternalControlForward(struct BinaryApiPrivileged *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource)), command, pParams, size); +} + +// binapiprivGetInternalObjectHandle: virtual inherited (gpures) base (binapi) +NvHandle __nvoc_up_thunk_GpuResource_binapiprivGetInternalObjectHandle(struct BinaryApiPrivileged *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource))); +} + +// binapiprivAccessCallback: virtual inherited (rmres) base (binapi) +NvBool __nvoc_up_thunk_RmResource_binapiprivAccessCallback(struct BinaryApiPrivileged *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// binapiprivGetMemInterMapParams: virtual inherited (rmres) base (binapi) +NV_STATUS __nvoc_up_thunk_RmResource_binapiprivGetMemInterMapParams(struct BinaryApiPrivileged *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource)), pParams); +} + +// binapiprivCheckMemInterUnmap: virtual inherited (rmres) base (binapi) +NV_STATUS __nvoc_up_thunk_RmResource_binapiprivCheckMemInterUnmap(struct BinaryApiPrivileged *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// binapiprivGetMemoryMappingDescriptor: virtual inherited (rmres) base (binapi) +NV_STATUS __nvoc_up_thunk_RmResource_binapiprivGetMemoryMappingDescriptor(struct BinaryApiPrivileged *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource)), ppMemDesc); +} + +// binapiprivControlSerialization_Prologue: virtual inherited (rmres) base (binapi) +NV_STATUS __nvoc_up_thunk_RmResource_binapiprivControlSerialization_Prologue(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// binapiprivControlSerialization_Epilogue: virtual inherited (rmres) base (binapi) +void __nvoc_up_thunk_RmResource_binapiprivControlSerialization_Epilogue(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// binapiprivControl_Prologue: virtual inherited (rmres) base (binapi) +NV_STATUS __nvoc_up_thunk_RmResource_binapiprivControl_Prologue(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// binapiprivControl_Epilogue: virtual inherited (rmres) base (binapi) +void __nvoc_up_thunk_RmResource_binapiprivControl_Epilogue(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// binapiprivCanCopy: virtual inherited (res) base (binapi) +NvBool __nvoc_up_thunk_RsResource_binapiprivCanCopy(struct BinaryApiPrivileged *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// binapiprivIsDuplicate: virtual inherited (res) base (binapi) +NV_STATUS __nvoc_up_thunk_RsResource_binapiprivIsDuplicate(struct BinaryApiPrivileged *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// binapiprivPreDestruct: virtual inherited (res) base (binapi) +void __nvoc_up_thunk_RsResource_binapiprivPreDestruct(struct BinaryApiPrivileged *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// binapiprivControlFilter: virtual inherited (res) base (binapi) +NV_STATUS __nvoc_up_thunk_RsResource_binapiprivControlFilter(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// binapiprivIsPartialUnmapSupported: inline virtual inherited (res) base (binapi) body +NvBool __nvoc_up_thunk_RsResource_binapiprivIsPartialUnmapSupported(struct BinaryApiPrivileged *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// binapiprivMapTo: virtual inherited (res) base (binapi) +NV_STATUS __nvoc_up_thunk_RsResource_binapiprivMapTo(struct BinaryApiPrivileged *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// binapiprivUnmapFrom: virtual inherited (res) base (binapi) +NV_STATUS __nvoc_up_thunk_RsResource_binapiprivUnmapFrom(struct BinaryApiPrivileged *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// binapiprivGetRefCount: virtual inherited (res) base (binapi) +NvU32 __nvoc_up_thunk_RsResource_binapiprivGetRefCount(struct BinaryApiPrivileged *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// binapiprivAddAdditionalDependants: virtual inherited (res) base (binapi) +void __nvoc_up_thunk_RsResource_binapiprivAddAdditionalDependants(struct RsClient *pClient, struct BinaryApiPrivileged *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__BinaryApiPrivileged = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_BinaryApi(BinaryApi*); +void __nvoc_dtor_BinaryApiPrivileged(BinaryApiPrivileged *pThis) { + __nvoc_dtor_BinaryApi(&pThis->__nvoc_base_BinaryApi); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_BinaryApiPrivileged(BinaryApiPrivileged *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_BinaryApi(BinaryApi* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_BinaryApiPrivileged(BinaryApiPrivileged *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_BinaryApi(&pThis->__nvoc_base_BinaryApi, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_BinaryApiPrivileged_fail_BinaryApi; + __nvoc_init_dataField_BinaryApiPrivileged(pThis); + + status = __nvoc_binapiprivConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_BinaryApiPrivileged_fail__init; + goto __nvoc_ctor_BinaryApiPrivileged_exit; // Success + +__nvoc_ctor_BinaryApiPrivileged_fail__init: + __nvoc_dtor_BinaryApi(&pThis->__nvoc_base_BinaryApi); +__nvoc_ctor_BinaryApiPrivileged_fail_BinaryApi: +__nvoc_ctor_BinaryApiPrivileged_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_BinaryApiPrivileged_1(BinaryApiPrivileged *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_BinaryApiPrivileged_1 + + +// Initialize vtable(s) for 25 virtual method(s). +void __nvoc_init_funcTable_BinaryApiPrivileged(BinaryApiPrivileged *pThis) { + __nvoc_init_funcTable_BinaryApiPrivileged_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__BinaryApiPrivileged(BinaryApiPrivileged *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^5 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^4 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^4 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource; // (rmres) super^3 + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource; // (gpures) super^2 + pThis->__nvoc_pbase_BinaryApi = &pThis->__nvoc_base_BinaryApi; // (binapi) super + pThis->__nvoc_pbase_BinaryApiPrivileged = pThis; // (binapipriv) this + + // Recurse to superclass initialization function(s). + __nvoc_init__BinaryApi(&pThis->__nvoc_base_BinaryApi); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__BinaryApiPrivileged.metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^5 + pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__BinaryApiPrivileged.metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource; // (res) super^4 + pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__BinaryApiPrivileged.metadata__BinaryApi.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^4 + pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__BinaryApiPrivileged.metadata__BinaryApi.metadata__GpuResource.metadata__RmResource; // (rmres) super^3 + pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_metadata_ptr = &__nvoc_metadata__BinaryApiPrivileged.metadata__BinaryApi.metadata__GpuResource; // (gpures) super^2 + pThis->__nvoc_base_BinaryApi.__nvoc_metadata_ptr = &__nvoc_metadata__BinaryApiPrivileged.metadata__BinaryApi; // (binapi) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__BinaryApiPrivileged; // (binapipriv) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_BinaryApiPrivileged(pThis); +} + +NV_STATUS __nvoc_objCreate_BinaryApiPrivileged(BinaryApiPrivileged **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + BinaryApiPrivileged *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(BinaryApiPrivileged), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(BinaryApiPrivileged)); + + pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__BinaryApiPrivileged(pThis); + status = __nvoc_ctor_BinaryApiPrivileged(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_BinaryApiPrivileged_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_BinaryApiPrivileged_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(BinaryApiPrivileged)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_BinaryApiPrivileged(BinaryApiPrivileged **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_BinaryApiPrivileged(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_binary_api_nvoc.h b/src/nvidia/generated/g_binary_api_nvoc.h new file mode 100644 index 0000000..64cb4bb --- /dev/null +++ b/src/nvidia/generated/g_binary_api_nvoc.h @@ -0,0 +1,589 @@ + +#ifndef _G_BINARY_API_NVOC_H_ +#define _G_BINARY_API_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_binary_api_nvoc.h" + +#ifndef BINARY_API_H +#define BINARY_API_H + +#include "core/core.h" +#include "rmapi/resource.h" +#include "gpu/gpu_resource.h" +#include "resserv/rs_resource.h" +#include "rmapi/control.h" + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_BINARY_API_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__BinaryApi; +struct NVOC_METADATA__GpuResource; +struct NVOC_VTABLE__BinaryApi; + + +struct BinaryApi { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__BinaryApi *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct GpuResource __nvoc_base_GpuResource; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^4 + struct RsResource *__nvoc_pbase_RsResource; // res super^3 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^3 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^2 + struct GpuResource *__nvoc_pbase_GpuResource; // gpures super + struct BinaryApi *__nvoc_pbase_BinaryApi; // binapi +}; + + +// Vtable with 25 per-class function pointers +struct NVOC_VTABLE__BinaryApi { + NV_STATUS (*__binapiControl__)(struct BinaryApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual override (res) base (gpures) + NV_STATUS (*__binapiMap__)(struct BinaryApi * /*this*/, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__binapiUnmap__)(struct BinaryApi * /*this*/, struct CALL_CONTEXT *, struct RsCpuMapping *); // virtual inherited (gpures) base (gpures) + NvBool (*__binapiShareCallback__)(struct BinaryApi * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__binapiGetRegBaseOffsetAndSize__)(struct BinaryApi * /*this*/, struct OBJGPU *, NvU32 *, NvU32 *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__binapiGetMapAddrSpace__)(struct BinaryApi * /*this*/, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__binapiInternalControlForward__)(struct BinaryApi * /*this*/, NvU32, void *, NvU32); // virtual inherited (gpures) base (gpures) + NvHandle (*__binapiGetInternalObjectHandle__)(struct BinaryApi * /*this*/); // virtual inherited (gpures) base (gpures) + NvBool (*__binapiAccessCallback__)(struct BinaryApi * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__binapiGetMemInterMapParams__)(struct BinaryApi * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__binapiCheckMemInterUnmap__)(struct BinaryApi * /*this*/, NvBool); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__binapiGetMemoryMappingDescriptor__)(struct BinaryApi * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__binapiControlSerialization_Prologue__)(struct BinaryApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + void (*__binapiControlSerialization_Epilogue__)(struct BinaryApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__binapiControl_Prologue__)(struct BinaryApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + void (*__binapiControl_Epilogue__)(struct BinaryApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + NvBool (*__binapiCanCopy__)(struct BinaryApi * /*this*/); // virtual inherited (res) base (gpures) + NV_STATUS (*__binapiIsDuplicate__)(struct BinaryApi * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (gpures) + void (*__binapiPreDestruct__)(struct BinaryApi * /*this*/); // virtual inherited (res) base (gpures) + NV_STATUS (*__binapiControlFilter__)(struct BinaryApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (gpures) + NvBool (*__binapiIsPartialUnmapSupported__)(struct BinaryApi * /*this*/); // inline virtual inherited (res) base (gpures) body + NV_STATUS (*__binapiMapTo__)(struct BinaryApi * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (gpures) + NV_STATUS (*__binapiUnmapFrom__)(struct BinaryApi * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (gpures) + NvU32 (*__binapiGetRefCount__)(struct BinaryApi * /*this*/); // virtual inherited (res) base (gpures) + void (*__binapiAddAdditionalDependants__)(struct RsClient *, struct BinaryApi * /*this*/, RsResourceRef *); // virtual inherited (res) base (gpures) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__BinaryApi { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__GpuResource metadata__GpuResource; + const struct NVOC_VTABLE__BinaryApi vtable; +}; + +#ifndef __NVOC_CLASS_BinaryApi_TYPEDEF__ +#define __NVOC_CLASS_BinaryApi_TYPEDEF__ +typedef struct BinaryApi BinaryApi; +#endif /* __NVOC_CLASS_BinaryApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_BinaryApi +#define __nvoc_class_id_BinaryApi 0xb7a47c +#endif /* __nvoc_class_id_BinaryApi */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApi; + +#define __staticCast_BinaryApi(pThis) \ + ((pThis)->__nvoc_pbase_BinaryApi) + +#ifdef __nvoc_binary_api_h_disabled +#define __dynamicCast_BinaryApi(pThis) ((BinaryApi*) NULL) +#else //__nvoc_binary_api_h_disabled +#define __dynamicCast_BinaryApi(pThis) \ + ((BinaryApi*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(BinaryApi))) +#endif //__nvoc_binary_api_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_BinaryApi(BinaryApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_BinaryApi(BinaryApi**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_BinaryApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_BinaryApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define binapiControl_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__binapiControl__ +#define binapiControl(pResource, pCallContext, pParams) binapiControl_DISPATCH(pResource, pCallContext, pParams) +#define binapiMap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresMap__ +#define binapiMap(pGpuResource, pCallContext, pParams, pCpuMapping) binapiMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define binapiUnmap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresUnmap__ +#define binapiUnmap(pGpuResource, pCallContext, pCpuMapping) binapiUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define binapiShareCallback_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresShareCallback__ +#define binapiShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) binapiShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define binapiGetRegBaseOffsetAndSize_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetRegBaseOffsetAndSize__ +#define binapiGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) binapiGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define binapiGetMapAddrSpace_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetMapAddrSpace__ +#define binapiGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) binapiGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define binapiInternalControlForward_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresInternalControlForward__ +#define binapiInternalControlForward(pGpuResource, command, pParams, size) binapiInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define binapiGetInternalObjectHandle_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetInternalObjectHandle__ +#define binapiGetInternalObjectHandle(pGpuResource) binapiGetInternalObjectHandle_DISPATCH(pGpuResource) +#define binapiAccessCallback_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define binapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) binapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define binapiGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define binapiGetMemInterMapParams(pRmResource, pParams) binapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define binapiCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define binapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) binapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define binapiGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define binapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) binapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define binapiControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define binapiControlSerialization_Prologue(pResource, pCallContext, pParams) binapiControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define binapiControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define binapiControlSerialization_Epilogue(pResource, pCallContext, pParams) binapiControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define binapiControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define binapiControl_Prologue(pResource, pCallContext, pParams) binapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define binapiControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define binapiControl_Epilogue(pResource, pCallContext, pParams) binapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define binapiCanCopy_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define binapiCanCopy(pResource) binapiCanCopy_DISPATCH(pResource) +#define binapiIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define binapiIsDuplicate(pResource, hMemory, pDuplicate) binapiIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define binapiPreDestruct_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define binapiPreDestruct(pResource) binapiPreDestruct_DISPATCH(pResource) +#define binapiControlFilter_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define binapiControlFilter(pResource, pCallContext, pParams) binapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define binapiIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define binapiIsPartialUnmapSupported(pResource) binapiIsPartialUnmapSupported_DISPATCH(pResource) +#define binapiMapTo_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define binapiMapTo(pResource, pParams) binapiMapTo_DISPATCH(pResource, pParams) +#define binapiUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define binapiUnmapFrom(pResource, pParams) binapiUnmapFrom_DISPATCH(pResource, pParams) +#define binapiGetRefCount_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define binapiGetRefCount(pResource) binapiGetRefCount_DISPATCH(pResource) +#define binapiAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define binapiAddAdditionalDependants(pClient, pResource, pReference) binapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) + +// Dispatch functions +static inline NV_STATUS binapiControl_DISPATCH(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__binapiControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS binapiMap_DISPATCH(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__binapiMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS binapiUnmap_DISPATCH(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__binapiUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NvBool binapiShareCallback_DISPATCH(struct BinaryApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__binapiShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS binapiGetRegBaseOffsetAndSize_DISPATCH(struct BinaryApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__binapiGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS binapiGetMapAddrSpace_DISPATCH(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__binapiGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NV_STATUS binapiInternalControlForward_DISPATCH(struct BinaryApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__binapiInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NvHandle binapiGetInternalObjectHandle_DISPATCH(struct BinaryApi *pGpuResource) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__binapiGetInternalObjectHandle__(pGpuResource); +} + +static inline NvBool binapiAccessCallback_DISPATCH(struct BinaryApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__binapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS binapiGetMemInterMapParams_DISPATCH(struct BinaryApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__binapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS binapiCheckMemInterUnmap_DISPATCH(struct BinaryApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__binapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS binapiGetMemoryMappingDescriptor_DISPATCH(struct BinaryApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__binapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS binapiControlSerialization_Prologue_DISPATCH(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__binapiControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void binapiControlSerialization_Epilogue_DISPATCH(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__binapiControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS binapiControl_Prologue_DISPATCH(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__binapiControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void binapiControl_Epilogue_DISPATCH(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__binapiControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool binapiCanCopy_DISPATCH(struct BinaryApi *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__binapiCanCopy__(pResource); +} + +static inline NV_STATUS binapiIsDuplicate_DISPATCH(struct BinaryApi *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__binapiIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void binapiPreDestruct_DISPATCH(struct BinaryApi *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__binapiPreDestruct__(pResource); +} + +static inline NV_STATUS binapiControlFilter_DISPATCH(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__binapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvBool binapiIsPartialUnmapSupported_DISPATCH(struct BinaryApi *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__binapiIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS binapiMapTo_DISPATCH(struct BinaryApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__binapiMapTo__(pResource, pParams); +} + +static inline NV_STATUS binapiUnmapFrom_DISPATCH(struct BinaryApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__binapiUnmapFrom__(pResource, pParams); +} + +static inline NvU32 binapiGetRefCount_DISPATCH(struct BinaryApi *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__binapiGetRefCount__(pResource); +} + +static inline void binapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct BinaryApi *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__binapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +NV_STATUS binapiControl_IMPL(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +NV_STATUS binapiConstruct_IMPL(struct BinaryApi *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_binapiConstruct(arg_pResource, arg_pCallContext, arg_pParams) binapiConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_BINARY_API_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__BinaryApiPrivileged; +struct NVOC_METADATA__BinaryApi; +struct NVOC_VTABLE__BinaryApiPrivileged; + + +struct BinaryApiPrivileged { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__BinaryApiPrivileged *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct BinaryApi __nvoc_base_BinaryApi; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^5 + struct RsResource *__nvoc_pbase_RsResource; // res super^4 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^4 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^3 + struct GpuResource *__nvoc_pbase_GpuResource; // gpures super^2 + struct BinaryApi *__nvoc_pbase_BinaryApi; // binapi super + struct BinaryApiPrivileged *__nvoc_pbase_BinaryApiPrivileged; // binapipriv +}; + + +// Vtable with 25 per-class function pointers +struct NVOC_VTABLE__BinaryApiPrivileged { + NV_STATUS (*__binapiprivControl__)(struct BinaryApiPrivileged * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual override (res) base (binapi) + NV_STATUS (*__binapiprivMap__)(struct BinaryApiPrivileged * /*this*/, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); // virtual inherited (gpures) base (binapi) + NV_STATUS (*__binapiprivUnmap__)(struct BinaryApiPrivileged * /*this*/, struct CALL_CONTEXT *, struct RsCpuMapping *); // virtual inherited (gpures) base (binapi) + NvBool (*__binapiprivShareCallback__)(struct BinaryApiPrivileged * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (gpures) base (binapi) + NV_STATUS (*__binapiprivGetRegBaseOffsetAndSize__)(struct BinaryApiPrivileged * /*this*/, struct OBJGPU *, NvU32 *, NvU32 *); // virtual inherited (gpures) base (binapi) + NV_STATUS (*__binapiprivGetMapAddrSpace__)(struct BinaryApiPrivileged * /*this*/, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); // virtual inherited (gpures) base (binapi) + NV_STATUS (*__binapiprivInternalControlForward__)(struct BinaryApiPrivileged * /*this*/, NvU32, void *, NvU32); // virtual inherited (gpures) base (binapi) + NvHandle (*__binapiprivGetInternalObjectHandle__)(struct BinaryApiPrivileged * /*this*/); // virtual inherited (gpures) base (binapi) + NvBool (*__binapiprivAccessCallback__)(struct BinaryApiPrivileged * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (binapi) + NV_STATUS (*__binapiprivGetMemInterMapParams__)(struct BinaryApiPrivileged * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (binapi) + NV_STATUS (*__binapiprivCheckMemInterUnmap__)(struct BinaryApiPrivileged * /*this*/, NvBool); // virtual inherited (rmres) base (binapi) + NV_STATUS (*__binapiprivGetMemoryMappingDescriptor__)(struct BinaryApiPrivileged * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (binapi) + NV_STATUS (*__binapiprivControlSerialization_Prologue__)(struct BinaryApiPrivileged * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (binapi) + void (*__binapiprivControlSerialization_Epilogue__)(struct BinaryApiPrivileged * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (binapi) + NV_STATUS (*__binapiprivControl_Prologue__)(struct BinaryApiPrivileged * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (binapi) + void (*__binapiprivControl_Epilogue__)(struct BinaryApiPrivileged * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (binapi) + NvBool (*__binapiprivCanCopy__)(struct BinaryApiPrivileged * /*this*/); // virtual inherited (res) base (binapi) + NV_STATUS (*__binapiprivIsDuplicate__)(struct BinaryApiPrivileged * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (binapi) + void (*__binapiprivPreDestruct__)(struct BinaryApiPrivileged * /*this*/); // virtual inherited (res) base (binapi) + NV_STATUS (*__binapiprivControlFilter__)(struct BinaryApiPrivileged * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (binapi) + NvBool (*__binapiprivIsPartialUnmapSupported__)(struct BinaryApiPrivileged * /*this*/); // inline virtual inherited (res) base (binapi) body + NV_STATUS (*__binapiprivMapTo__)(struct BinaryApiPrivileged * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (binapi) + NV_STATUS (*__binapiprivUnmapFrom__)(struct BinaryApiPrivileged * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (binapi) + NvU32 (*__binapiprivGetRefCount__)(struct BinaryApiPrivileged * /*this*/); // virtual inherited (res) base (binapi) + void (*__binapiprivAddAdditionalDependants__)(struct RsClient *, struct BinaryApiPrivileged * /*this*/, RsResourceRef *); // virtual inherited (res) base (binapi) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__BinaryApiPrivileged { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__BinaryApi metadata__BinaryApi; + const struct NVOC_VTABLE__BinaryApiPrivileged vtable; +}; + +#ifndef __NVOC_CLASS_BinaryApiPrivileged_TYPEDEF__ +#define __NVOC_CLASS_BinaryApiPrivileged_TYPEDEF__ +typedef struct BinaryApiPrivileged BinaryApiPrivileged; +#endif /* __NVOC_CLASS_BinaryApiPrivileged_TYPEDEF__ */ + +#ifndef __nvoc_class_id_BinaryApiPrivileged +#define __nvoc_class_id_BinaryApiPrivileged 0x1c0579 +#endif /* __nvoc_class_id_BinaryApiPrivileged */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApiPrivileged; + +#define __staticCast_BinaryApiPrivileged(pThis) \ + ((pThis)->__nvoc_pbase_BinaryApiPrivileged) + +#ifdef __nvoc_binary_api_h_disabled +#define __dynamicCast_BinaryApiPrivileged(pThis) ((BinaryApiPrivileged*) NULL) +#else //__nvoc_binary_api_h_disabled +#define __dynamicCast_BinaryApiPrivileged(pThis) \ + ((BinaryApiPrivileged*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(BinaryApiPrivileged))) +#endif //__nvoc_binary_api_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_BinaryApiPrivileged(BinaryApiPrivileged**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_BinaryApiPrivileged(BinaryApiPrivileged**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_BinaryApiPrivileged(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_BinaryApiPrivileged((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define binapiprivControl_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__binapiprivControl__ +#define binapiprivControl(pResource, pCallContext, pParams) binapiprivControl_DISPATCH(pResource, pCallContext, pParams) +#define binapiprivMap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresMap__ +#define binapiprivMap(pGpuResource, pCallContext, pParams, pCpuMapping) binapiprivMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define binapiprivUnmap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresUnmap__ +#define binapiprivUnmap(pGpuResource, pCallContext, pCpuMapping) binapiprivUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define binapiprivShareCallback_FNPTR(pGpuResource) pGpuResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresShareCallback__ +#define binapiprivShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) binapiprivShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define binapiprivGetRegBaseOffsetAndSize_FNPTR(pGpuResource) pGpuResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetRegBaseOffsetAndSize__ +#define binapiprivGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) binapiprivGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define binapiprivGetMapAddrSpace_FNPTR(pGpuResource) pGpuResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetMapAddrSpace__ +#define binapiprivGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) binapiprivGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define binapiprivInternalControlForward_FNPTR(pGpuResource) pGpuResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresInternalControlForward__ +#define binapiprivInternalControlForward(pGpuResource, command, pParams, size) binapiprivInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define binapiprivGetInternalObjectHandle_FNPTR(pGpuResource) pGpuResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetInternalObjectHandle__ +#define binapiprivGetInternalObjectHandle(pGpuResource) binapiprivGetInternalObjectHandle_DISPATCH(pGpuResource) +#define binapiprivAccessCallback_FNPTR(pResource) pResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define binapiprivAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) binapiprivAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define binapiprivGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define binapiprivGetMemInterMapParams(pRmResource, pParams) binapiprivGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define binapiprivCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define binapiprivCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) binapiprivCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define binapiprivGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define binapiprivGetMemoryMappingDescriptor(pRmResource, ppMemDesc) binapiprivGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define binapiprivControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define binapiprivControlSerialization_Prologue(pResource, pCallContext, pParams) binapiprivControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define binapiprivControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define binapiprivControlSerialization_Epilogue(pResource, pCallContext, pParams) binapiprivControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define binapiprivControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define binapiprivControl_Prologue(pResource, pCallContext, pParams) binapiprivControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define binapiprivControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define binapiprivControl_Epilogue(pResource, pCallContext, pParams) binapiprivControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define binapiprivCanCopy_FNPTR(pResource) pResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define binapiprivCanCopy(pResource) binapiprivCanCopy_DISPATCH(pResource) +#define binapiprivIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define binapiprivIsDuplicate(pResource, hMemory, pDuplicate) binapiprivIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define binapiprivPreDestruct_FNPTR(pResource) pResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define binapiprivPreDestruct(pResource) binapiprivPreDestruct_DISPATCH(pResource) +#define binapiprivControlFilter_FNPTR(pResource) pResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define binapiprivControlFilter(pResource, pCallContext, pParams) binapiprivControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define binapiprivIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define binapiprivIsPartialUnmapSupported(pResource) binapiprivIsPartialUnmapSupported_DISPATCH(pResource) +#define binapiprivMapTo_FNPTR(pResource) pResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define binapiprivMapTo(pResource, pParams) binapiprivMapTo_DISPATCH(pResource, pParams) +#define binapiprivUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define binapiprivUnmapFrom(pResource, pParams) binapiprivUnmapFrom_DISPATCH(pResource, pParams) +#define binapiprivGetRefCount_FNPTR(pResource) pResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define binapiprivGetRefCount(pResource) binapiprivGetRefCount_DISPATCH(pResource) +#define binapiprivAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define binapiprivAddAdditionalDependants(pClient, pResource, pReference) binapiprivAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) + +// Dispatch functions +static inline NV_STATUS binapiprivControl_DISPATCH(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__binapiprivControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS binapiprivMap_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__binapiprivMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS binapiprivUnmap_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__binapiprivUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NvBool binapiprivShareCallback_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__binapiprivShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS binapiprivGetRegBaseOffsetAndSize_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__binapiprivGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS binapiprivGetMapAddrSpace_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__binapiprivGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NV_STATUS binapiprivInternalControlForward_DISPATCH(struct BinaryApiPrivileged *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__binapiprivInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NvHandle binapiprivGetInternalObjectHandle_DISPATCH(struct BinaryApiPrivileged *pGpuResource) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__binapiprivGetInternalObjectHandle__(pGpuResource); +} + +static inline NvBool binapiprivAccessCallback_DISPATCH(struct BinaryApiPrivileged *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__binapiprivAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS binapiprivGetMemInterMapParams_DISPATCH(struct BinaryApiPrivileged *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__binapiprivGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS binapiprivCheckMemInterUnmap_DISPATCH(struct BinaryApiPrivileged *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__binapiprivCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS binapiprivGetMemoryMappingDescriptor_DISPATCH(struct BinaryApiPrivileged *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__binapiprivGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS binapiprivControlSerialization_Prologue_DISPATCH(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__binapiprivControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void binapiprivControlSerialization_Epilogue_DISPATCH(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__binapiprivControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS binapiprivControl_Prologue_DISPATCH(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__binapiprivControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void binapiprivControl_Epilogue_DISPATCH(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__binapiprivControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool binapiprivCanCopy_DISPATCH(struct BinaryApiPrivileged *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__binapiprivCanCopy__(pResource); +} + +static inline NV_STATUS binapiprivIsDuplicate_DISPATCH(struct BinaryApiPrivileged *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__binapiprivIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void binapiprivPreDestruct_DISPATCH(struct BinaryApiPrivileged *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__binapiprivPreDestruct__(pResource); +} + +static inline NV_STATUS binapiprivControlFilter_DISPATCH(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__binapiprivControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvBool binapiprivIsPartialUnmapSupported_DISPATCH(struct BinaryApiPrivileged *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__binapiprivIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS binapiprivMapTo_DISPATCH(struct BinaryApiPrivileged *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__binapiprivMapTo__(pResource, pParams); +} + +static inline NV_STATUS binapiprivUnmapFrom_DISPATCH(struct BinaryApiPrivileged *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__binapiprivUnmapFrom__(pResource, pParams); +} + +static inline NvU32 binapiprivGetRefCount_DISPATCH(struct BinaryApiPrivileged *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__binapiprivGetRefCount__(pResource); +} + +static inline void binapiprivAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct BinaryApiPrivileged *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__binapiprivAddAdditionalDependants__(pClient, pResource, pReference); +} + +NV_STATUS binapiprivControl_IMPL(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +NV_STATUS binapiprivConstruct_IMPL(struct BinaryApiPrivileged *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_binapiprivConstruct(arg_pResource, arg_pCallContext, arg_pParams) binapiprivConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif + + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_BINARY_API_NVOC_H_ diff --git a/src/nvidia/generated/g_chips2halspec.h b/src/nvidia/generated/g_chips2halspec.h new file mode 100644 index 0000000..7fdb068 --- /dev/null +++ b/src/nvidia/generated/g_chips2halspec.h @@ -0,0 +1,3 @@ + +#include "g_chips2halspec_nvoc.h" + diff --git a/src/nvidia/generated/g_chips2halspec_nvoc.c b/src/nvidia/generated/g_chips2halspec_nvoc.c new file mode 100644 index 0000000..ef31dd7 --- /dev/null +++ b/src/nvidia/generated/g_chips2halspec_nvoc.c @@ -0,0 +1,86 @@ +#define NVOC_CHIPS2HALSPEC_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_chips2halspec_nvoc.h" + +void __nvoc_init_halspec_ChipHal(ChipHal *pChipHal, NvU32 arch, NvU32 impl, NvU32 hidrev) +{ + // T234D + if(arch == 0x0 && impl == 0x0 && hidrev == 0x235) + { + pChipHal->__nvoc_HalVarIdx = 108; + } + // T264D + else if(arch == 0x0 && impl == 0x0 && hidrev == 0x265) + { + pChipHal->__nvoc_HalVarIdx = 110; + } + // T256D + else if(arch == 0x0 && impl == 0x0 && hidrev == 0x257) + { + pChipHal->__nvoc_HalVarIdx = 111; + } +} + +void __nvoc_init_halspec_TegraChipHal(TegraChipHal *pTegraChipHal, TEGRA_CHIP_TYPE tegraType) +{ + // TEGRA_PCIE + if(tegraType == 0x0) + { + pTegraChipHal->__nvoc_HalVarIdx = 0; + } + // TEGRA_SOC + else if(tegraType == 0x1) + { + pTegraChipHal->__nvoc_HalVarIdx = 1; + } +} + +void __nvoc_init_halspec_RmVariantHal(RmVariantHal *pRmVariantHal, RM_RUNTIME_VARIANT rmVariant) +{ + // PF_KERNEL_ONLY + if(rmVariant == 0x2) + { + pRmVariantHal->__nvoc_HalVarIdx = 1; + } +} + +void __nvoc_init_halspec_DispIpHal(DispIpHal *pDispIpHal, NvU32 ipver) +{ + // DISPv0402 + if(ipver == 0x4020000) + { + pDispIpHal->__nvoc_HalVarIdx = 12; + } + // DISPv0501 + else if(ipver == 0x5010000) + { + pDispIpHal->__nvoc_HalVarIdx = 15; + } + // DISPv0504 + else if(ipver == 0x5040000) + { + pDispIpHal->__nvoc_HalVarIdx = 18; + } +} + +void __nvoc_init_halspec_DpuIpHal(DpuIpHal *pDpuIpHal, NvU32 ipver) +{ + // DPUv0000 + if(ipver == 0x0) + { + pDpuIpHal->__nvoc_HalVarIdx = 5; + } +} + diff --git a/src/nvidia/generated/g_chips2halspec_nvoc.h b/src/nvidia/generated/g_chips2halspec_nvoc.h new file mode 100644 index 0000000..121324d --- /dev/null +++ b/src/nvidia/generated/g_chips2halspec_nvoc.h @@ -0,0 +1,165 @@ + +#ifndef _G_CHIPS2HALSPEC_NVOC_H_ +#define _G_CHIPS2HALSPEC_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#pragma once +#include "g_chips2halspec_nvoc.h" + +#ifndef _CHIPS_2_HALSPEC_H_ +#define _CHIPS_2_HALSPEC_H_ + +#include "nvtypes.h" +#include "rmconfig.h" + +// Several WARs that only visible by NVOC compiler + +#define GPUHAL_ARCH(x) NV_PMC_BOOT_42_ARCHITECTURE_##x +#define GPUHAL_IMPL(x) NV_PMC_BOOT_42_IMPLEMENTATION_##x + +// Create alias 'group' to provide a concise syntax +#define group variant_group + +// Use in hal block to indicate that the function isn't wried to any enabled chips +#define __disabled__ false + +struct ChipHal { + unsigned short __nvoc_HalVarIdx; +}; +typedef struct ChipHal ChipHal; +void __nvoc_init_halspec_ChipHal(ChipHal*, NvU32, NvU32, NvU32); + +/* + * Tegra Chip Type Halspec + * + * For Legacy iGPUs, we have two type Tegra chips in Chips.pm + * TEGRA_DGPU : The iGPU Core inside the Tegra Soc Chip with PCIE interface. + * The behavior is more like a dGPU. Such chip is generally + * added to dGPU (CLASSIC_GPUS) chip family. E.g. GA10B + * This is generally the test chip used in MODS Arch validation + * that shares the test infrastructure with dGPU. + * + * TEGRA : The SoC chip. The chips do not share dGPU HAL on PCIE related + * implementation. + * + * The Tegra chip after Ampere arch is using PCIE interface which connects + * iGPU to SoC for BAR and control accesses (interrupt). + * The code between TEGRA_CHIP_TYPE_PCIE and TEGRA_CHIP_TYPE_SOC + * shares same dGPU ARCH specific HAL mostly except manual differences due to + * latency of manual updates between nvgpu (Standlone iGPU/Full Chip Verification) + * and nvmobile (SOC) trees. + * */ +typedef enum _TEGRA_CHIP_TYPE { + // Default TEGRA_CHIP_TYPE is TEGRA_PCIE + TEGRA_CHIP_TYPE_DEFAULT = 0, + TEGRA_CHIP_TYPE_SOC = 1, +} TEGRA_CHIP_TYPE; + +struct TegraChipHal { + unsigned short __nvoc_HalVarIdx; +}; +typedef struct TegraChipHal TegraChipHal; +void __nvoc_init_halspec_TegraChipHal(TegraChipHal*, TEGRA_CHIP_TYPE); + +/* + * RM Runtime Variant Halspec + * + * One group of Hal Variants that presents two perspectives: + * + * Operating Environment Perspective: VF / PF / UCODE + * VF | PF | UCODE = true + * VF & PF & UCODE = false + * + * VF : RM is running in VGPU Guest environment. Equals to IS_VIRTUAL(pGpu) + * PF : RM is running in Host/Baremetal in standard PCIE environment + * UCODE : RM is running on microcontroller + * + * Functionality-Based Perspective: KERNEL_ONLY / PHYSICAL_ONLY / MONOLITHIC + * KERNEL_ONLY | PHYSICAL_ONLY | MONOLITHIC = true + * KERNEL_ONLY & PHYSICAL_ONLY & MONOLITHIC = false + * + * KERNEL_ONLY : RM does not own HW. The physical part is offloaded to Ucode. + * PHYSICAL_ONLY : RM owns HW but does not expose services to RM Clients + * MONOLITHIC : RM owns both the interface to the client and the underlying HW. + * + * Note: GSP Client "IS_GSP_CLIENT(pGpu) maps to "PF_KERNEL_ONLY" + * DCE Client maps to "PF_KERNEL_ONLY & T234D" + * + * + * HAL Variants + * +--------+ +----------------+ + * | VF | <-----| VF |--+ + * +--------+ +----------------+ | +---------------+ + * |--> | KERNEL_ONLY | + * +----------------+ | +---------------+ + * +--| PF_KERNEL_ONLY |--+ + * +--------+ | +----------------+ + * | PF | <--| + * +--------+ | +----------------+ +---------------+ + * +--| PF_MONOLITHIC |-----> | MONOLITHIC | + * +----------------+ +---------------+ + * + * +--------+ +----------------+ +---------------+ + * | UCODE | <-----| UCODE |-----> | PHYSICAL_ONLY | + * +--------+ +----------------+ +---------------+ + * + * */ +typedef enum _RM_RUNTIME_VARIANT { + RM_RUNTIME_VARIANT_VF = 1, + RM_RUNTIME_VARIANT_PF_KERNEL_ONLY = 2, + RM_RUNTIME_VARIANT_PF_MONOLITHIC = 3, + RM_RUNTIME_VARIANT_UCODE = 4, +} RM_RUNTIME_VARIANT; + +struct RmVariantHal { + unsigned short __nvoc_HalVarIdx; +}; +typedef struct RmVariantHal RmVariantHal; +void __nvoc_init_halspec_RmVariantHal(RmVariantHal*, RM_RUNTIME_VARIANT); + +/* DISP IP versions */ +struct DispIpHal { + unsigned short __nvoc_HalVarIdx; +}; +typedef struct DispIpHal DispIpHal; +void __nvoc_init_halspec_DispIpHal(DispIpHal*, NvU32); + +/* The 'delete' rules for DispIpHal and ChipHal */ +// delete DISPv0402 & ~T234D; +// delete ~DISPv0402 & T234D; +// delete DISPv0501 & ~T264D; +// delete ~DISPv0501 & T264D; +// delete DISPv0504 & ~T256D; +// delete ~DISPv0504 & T256D; + + +/* DPU IP versions */ +struct DpuIpHal { + unsigned short __nvoc_HalVarIdx; +}; +typedef struct DpuIpHal DpuIpHal; +void __nvoc_init_halspec_DpuIpHal(DpuIpHal*, NvU32); + +/* The 'delete' rules for DpuIpHal and ChipHal */ + + +#undef group +#endif /* _CHIPS_2_HALSPEC_H_ */ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_CHIPS2HALSPEC_NVOC_H_ diff --git a/src/nvidia/generated/g_client_nvoc.c b/src/nvidia/generated/g_client_nvoc.c new file mode 100644 index 0000000..0c66cbe --- /dev/null +++ b/src/nvidia/generated/g_client_nvoc.c @@ -0,0 +1,511 @@ +#define NVOC_CLIENT_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_client_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x21d236 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_UserInfo; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared; + +// Forward declarations for UserInfo +void __nvoc_init__RsShared(RsShared*); +void __nvoc_init__UserInfo(UserInfo*); +void __nvoc_init_funcTable_UserInfo(UserInfo*); +NV_STATUS __nvoc_ctor_UserInfo(UserInfo*); +void __nvoc_init_dataField_UserInfo(UserInfo*); +void __nvoc_dtor_UserInfo(UserInfo*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__UserInfo; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__UserInfo; + +// Down-thunk(s) to bridge UserInfo methods from ancestors (if any) + +// Up-thunk(s) to bridge UserInfo methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_UserInfo = +{ + /*classInfo=*/ { + /*size=*/ sizeof(UserInfo), + /*classId=*/ classId(UserInfo), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "UserInfo", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_UserInfo, + /*pCastInfo=*/ &__nvoc_castinfo__UserInfo, + /*pExportInfo=*/ &__nvoc_export_info__UserInfo +}; + + +// Metadata with per-class RTTI with ancestor(s) +static const struct NVOC_METADATA__UserInfo __nvoc_metadata__UserInfo = { + .rtti.pClassDef = &__nvoc_class_def_UserInfo, // (userinfo) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_UserInfo, + .rtti.offset = 0, + .metadata__RsShared.rtti.pClassDef = &__nvoc_class_def_RsShared, // (shr) super + .metadata__RsShared.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RsShared.rtti.offset = NV_OFFSETOF(UserInfo, __nvoc_base_RsShared), + .metadata__RsShared.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^2 + .metadata__RsShared.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RsShared.metadata__Object.rtti.offset = NV_OFFSETOF(UserInfo, __nvoc_base_RsShared.__nvoc_base_Object), +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__UserInfo = { + .numRelatives = 3, + .relatives = { + &__nvoc_metadata__UserInfo.rtti, // [0]: (userinfo) this + &__nvoc_metadata__UserInfo.metadata__RsShared.rtti, // [1]: (shr) super + &__nvoc_metadata__UserInfo.metadata__RsShared.metadata__Object.rtti, // [2]: (obj) super^2 + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__UserInfo = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RsShared(RsShared*); +void __nvoc_dtor_UserInfo(UserInfo *pThis) { + __nvoc_userinfoDestruct(pThis); + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_UserInfo(UserInfo *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RsShared(RsShared* ); +NV_STATUS __nvoc_ctor_UserInfo(UserInfo *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsShared(&pThis->__nvoc_base_RsShared); + if (status != NV_OK) goto __nvoc_ctor_UserInfo_fail_RsShared; + __nvoc_init_dataField_UserInfo(pThis); + + status = __nvoc_userinfoConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_UserInfo_fail__init; + goto __nvoc_ctor_UserInfo_exit; // Success + +__nvoc_ctor_UserInfo_fail__init: + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); +__nvoc_ctor_UserInfo_fail_RsShared: +__nvoc_ctor_UserInfo_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_UserInfo_1(UserInfo *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_UserInfo_1 + + +// Initialize vtable(s): Nothing to do for empty vtables +void __nvoc_init_funcTable_UserInfo(UserInfo *pThis) { + __nvoc_init_funcTable_UserInfo_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__UserInfo(UserInfo *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsShared.__nvoc_base_Object; // (obj) super^2 + pThis->__nvoc_pbase_RsShared = &pThis->__nvoc_base_RsShared; // (shr) super + pThis->__nvoc_pbase_UserInfo = pThis; // (userinfo) this + + // Recurse to superclass initialization function(s). + __nvoc_init__RsShared(&pThis->__nvoc_base_RsShared); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_RsShared.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__UserInfo.metadata__RsShared.metadata__Object; // (obj) super^2 + pThis->__nvoc_base_RsShared.__nvoc_metadata_ptr = &__nvoc_metadata__UserInfo.metadata__RsShared; // (shr) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__UserInfo; // (userinfo) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_UserInfo(pThis); +} + +NV_STATUS __nvoc_objCreate_UserInfo(UserInfo **ppThis, Dynamic *pParent, NvU32 createFlags) +{ + NV_STATUS status; + Object *pParentObj = NULL; + UserInfo *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(UserInfo), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(UserInfo)); + + pThis->__nvoc_base_RsShared.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsShared.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsShared.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__UserInfo(pThis); + status = __nvoc_ctor_UserInfo(pThis); + if (status != NV_OK) goto __nvoc_objCreate_UserInfo_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_UserInfo_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_RsShared.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(UserInfo)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_UserInfo(UserInfo **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_UserInfo(ppThis, pParent, createFlags); + + return status; +} + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xb23d83 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmClient; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsClient; + +// Forward declarations for RmClient +void __nvoc_init__RsClient(RsClient*); +void __nvoc_init__RmClient(RmClient*); +void __nvoc_init_funcTable_RmClient(RmClient*); +NV_STATUS __nvoc_ctor_RmClient(RmClient*, struct PORT_MEM_ALLOCATOR *arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_RmClient(RmClient*); +void __nvoc_dtor_RmClient(RmClient*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__RmClient; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__RmClient; + +// Down-thunk(s) to bridge RmClient methods from ancestors (if any) +NV_STATUS __nvoc_down_thunk_RmClient_clientValidate(struct RsClient *pClient, const API_SECURITY_INFO *pSecInfo); // this +NV_STATUS __nvoc_down_thunk_RmClient_clientValidateLocks(struct RsClient *pClient, struct RsServer *pServer, const struct CLIENT_ENTRY *pClientEntry); // this +NV_STATUS __nvoc_down_thunk_RmClient_clientFreeResource(struct RsClient *pClient, struct RsServer *pServer, struct RS_RES_FREE_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_down_thunk_RmClient_clientInterMap(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RsResourceRef *pMappableRef, struct RS_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_down_thunk_RmClient_clientInterUnmap(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RS_INTER_UNMAP_PARAMS *pParams); // this +NV_STATUS __nvoc_down_thunk_RmClient_clientPostProcessPendingFreeList(struct RsClient *pClient, struct RsResourceRef **ppFirstLowPriRef); // this +RS_PRIV_LEVEL __nvoc_down_thunk_RmClient_clientGetCachedPrivilege(struct RsClient *pClient); // this +NvBool __nvoc_down_thunk_RmClient_clientIsAdmin(struct RsClient *pClient, RS_PRIV_LEVEL privLevel); // this + +// Up-thunk(s) to bridge RmClient methods to ancestors (if any) +NV_STATUS __nvoc_up_thunk_RsClient_rmclientDestructResourceRef(struct RmClient *pClient, RsServer *pServer, struct RsResourceRef *pResourceRef, struct RS_LOCK_INFO *pLockInfo, API_SECURITY_INFO *pSecInfo); // this +NV_STATUS __nvoc_up_thunk_RsClient_rmclientUnmapMemory(struct RmClient *pClient, struct RsResourceRef *pResourceRef, struct RS_LOCK_INFO *pLockInfo, struct RsCpuMapping **ppCpuMapping, API_SECURITY_INFO *pSecInfo); // this +NV_STATUS __nvoc_up_thunk_RsClient_rmclientValidateNewResourceHandle(struct RmClient *pClient, NvHandle hResource, NvBool bRestrict); // this +NV_STATUS __nvoc_up_thunk_RsClient_rmclientShareResource(struct RmClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_RmClient = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RmClient), + /*classId=*/ classId(RmClient), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RmClient", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RmClient, + /*pCastInfo=*/ &__nvoc_castinfo__RmClient, + /*pExportInfo=*/ &__nvoc_export_info__RmClient +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__RmClient __nvoc_metadata__RmClient = { + .rtti.pClassDef = &__nvoc_class_def_RmClient, // (rmclient) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RmClient, + .rtti.offset = 0, + .metadata__RsClient.rtti.pClassDef = &__nvoc_class_def_RsClient, // (client) super + .metadata__RsClient.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RsClient.rtti.offset = NV_OFFSETOF(RmClient, __nvoc_base_RsClient), + .metadata__RsClient.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^2 + .metadata__RsClient.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RsClient.metadata__Object.rtti.offset = NV_OFFSETOF(RmClient, __nvoc_base_RsClient.__nvoc_base_Object), + + .vtable.__rmclientValidate__ = &rmclientValidate_IMPL, // virtual override (client) base (client) + .metadata__RsClient.vtable.__clientValidate__ = &__nvoc_down_thunk_RmClient_clientValidate, // virtual + .vtable.__rmclientValidateLocks__ = &rmclientValidateLocks_IMPL, // virtual override (client) base (client) + .metadata__RsClient.vtable.__clientValidateLocks__ = &__nvoc_down_thunk_RmClient_clientValidateLocks, // virtual + .vtable.__rmclientFreeResource__ = &rmclientFreeResource_IMPL, // virtual override (client) base (client) + .metadata__RsClient.vtable.__clientFreeResource__ = &__nvoc_down_thunk_RmClient_clientFreeResource, // virtual + .vtable.__rmclientInterMap__ = &rmclientInterMap_IMPL, // virtual override (client) base (client) + .metadata__RsClient.vtable.__clientInterMap__ = &__nvoc_down_thunk_RmClient_clientInterMap, // virtual + .vtable.__rmclientInterUnmap__ = &rmclientInterUnmap_IMPL, // virtual override (client) base (client) + .metadata__RsClient.vtable.__clientInterUnmap__ = &__nvoc_down_thunk_RmClient_clientInterUnmap, // virtual + .vtable.__rmclientPostProcessPendingFreeList__ = &rmclientPostProcessPendingFreeList_IMPL, // virtual override (client) base (client) + .metadata__RsClient.vtable.__clientPostProcessPendingFreeList__ = &__nvoc_down_thunk_RmClient_clientPostProcessPendingFreeList, // virtual + .vtable.__rmclientGetCachedPrivilege__ = &rmclientGetCachedPrivilege_IMPL, // virtual override (client) base (client) + .metadata__RsClient.vtable.__clientGetCachedPrivilege__ = &__nvoc_down_thunk_RmClient_clientGetCachedPrivilege, // virtual + .vtable.__rmclientIsAdmin__ = &rmclientIsAdmin_IMPL, // virtual override (client) base (client) + .metadata__RsClient.vtable.__clientIsAdmin__ = &__nvoc_down_thunk_RmClient_clientIsAdmin, // virtual + .vtable.__rmclientDestructResourceRef__ = &__nvoc_up_thunk_RsClient_rmclientDestructResourceRef, // virtual inherited (client) base (client) + .metadata__RsClient.vtable.__clientDestructResourceRef__ = &clientDestructResourceRef_IMPL, // virtual + .vtable.__rmclientUnmapMemory__ = &__nvoc_up_thunk_RsClient_rmclientUnmapMemory, // virtual inherited (client) base (client) + .metadata__RsClient.vtable.__clientUnmapMemory__ = &clientUnmapMemory_IMPL, // virtual + .vtable.__rmclientValidateNewResourceHandle__ = &__nvoc_up_thunk_RsClient_rmclientValidateNewResourceHandle, // virtual inherited (client) base (client) + .metadata__RsClient.vtable.__clientValidateNewResourceHandle__ = &clientValidateNewResourceHandle_IMPL, // virtual + .vtable.__rmclientShareResource__ = &__nvoc_up_thunk_RsClient_rmclientShareResource, // virtual inherited (client) base (client) + .metadata__RsClient.vtable.__clientShareResource__ = &clientShareResource_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__RmClient = { + .numRelatives = 3, + .relatives = { + &__nvoc_metadata__RmClient.rtti, // [0]: (rmclient) this + &__nvoc_metadata__RmClient.metadata__RsClient.rtti, // [1]: (client) super + &__nvoc_metadata__RmClient.metadata__RsClient.metadata__Object.rtti, // [2]: (obj) super^2 + } +}; + +// 8 down-thunk(s) defined to bridge methods in RmClient from superclasses + +// rmclientValidate: virtual override (client) base (client) +NV_STATUS __nvoc_down_thunk_RmClient_clientValidate(struct RsClient *pClient, const API_SECURITY_INFO *pSecInfo) { + return rmclientValidate((struct RmClient *)(((unsigned char *) pClient) - NV_OFFSETOF(RmClient, __nvoc_base_RsClient)), pSecInfo); +} + +// rmclientValidateLocks: virtual override (client) base (client) +NV_STATUS __nvoc_down_thunk_RmClient_clientValidateLocks(struct RsClient *pClient, struct RsServer *pServer, const struct CLIENT_ENTRY *pClientEntry) { + return rmclientValidateLocks((struct RmClient *)(((unsigned char *) pClient) - NV_OFFSETOF(RmClient, __nvoc_base_RsClient)), pServer, pClientEntry); +} + +// rmclientFreeResource: virtual override (client) base (client) +NV_STATUS __nvoc_down_thunk_RmClient_clientFreeResource(struct RsClient *pClient, struct RsServer *pServer, struct RS_RES_FREE_PARAMS_INTERNAL *pParams) { + return rmclientFreeResource((struct RmClient *)(((unsigned char *) pClient) - NV_OFFSETOF(RmClient, __nvoc_base_RsClient)), pServer, pParams); +} + +// rmclientInterMap: virtual override (client) base (client) +NV_STATUS __nvoc_down_thunk_RmClient_clientInterMap(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RsResourceRef *pMappableRef, struct RS_INTER_MAP_PARAMS *pParams) { + return rmclientInterMap((struct RmClient *)(((unsigned char *) pClient) - NV_OFFSETOF(RmClient, __nvoc_base_RsClient)), pMapperRef, pMappableRef, pParams); +} + +// rmclientInterUnmap: virtual override (client) base (client) +NV_STATUS __nvoc_down_thunk_RmClient_clientInterUnmap(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RS_INTER_UNMAP_PARAMS *pParams) { + return rmclientInterUnmap((struct RmClient *)(((unsigned char *) pClient) - NV_OFFSETOF(RmClient, __nvoc_base_RsClient)), pMapperRef, pParams); +} + +// rmclientPostProcessPendingFreeList: virtual override (client) base (client) +NV_STATUS __nvoc_down_thunk_RmClient_clientPostProcessPendingFreeList(struct RsClient *pClient, struct RsResourceRef **ppFirstLowPriRef) { + return rmclientPostProcessPendingFreeList((struct RmClient *)(((unsigned char *) pClient) - NV_OFFSETOF(RmClient, __nvoc_base_RsClient)), ppFirstLowPriRef); +} + +// rmclientGetCachedPrivilege: virtual override (client) base (client) +RS_PRIV_LEVEL __nvoc_down_thunk_RmClient_clientGetCachedPrivilege(struct RsClient *pClient) { + return rmclientGetCachedPrivilege((struct RmClient *)(((unsigned char *) pClient) - NV_OFFSETOF(RmClient, __nvoc_base_RsClient))); +} + +// rmclientIsAdmin: virtual override (client) base (client) +NvBool __nvoc_down_thunk_RmClient_clientIsAdmin(struct RsClient *pClient, RS_PRIV_LEVEL privLevel) { + return rmclientIsAdmin((struct RmClient *)(((unsigned char *) pClient) - NV_OFFSETOF(RmClient, __nvoc_base_RsClient)), privLevel); +} + + +// 4 up-thunk(s) defined to bridge methods in RmClient to superclasses + +// rmclientDestructResourceRef: virtual inherited (client) base (client) +NV_STATUS __nvoc_up_thunk_RsClient_rmclientDestructResourceRef(struct RmClient *pClient, RsServer *pServer, struct RsResourceRef *pResourceRef, struct RS_LOCK_INFO *pLockInfo, API_SECURITY_INFO *pSecInfo) { + return clientDestructResourceRef((struct RsClient *)(((unsigned char *) pClient) + NV_OFFSETOF(RmClient, __nvoc_base_RsClient)), pServer, pResourceRef, pLockInfo, pSecInfo); +} + +// rmclientUnmapMemory: virtual inherited (client) base (client) +NV_STATUS __nvoc_up_thunk_RsClient_rmclientUnmapMemory(struct RmClient *pClient, struct RsResourceRef *pResourceRef, struct RS_LOCK_INFO *pLockInfo, struct RsCpuMapping **ppCpuMapping, API_SECURITY_INFO *pSecInfo) { + return clientUnmapMemory((struct RsClient *)(((unsigned char *) pClient) + NV_OFFSETOF(RmClient, __nvoc_base_RsClient)), pResourceRef, pLockInfo, ppCpuMapping, pSecInfo); +} + +// rmclientValidateNewResourceHandle: virtual inherited (client) base (client) +NV_STATUS __nvoc_up_thunk_RsClient_rmclientValidateNewResourceHandle(struct RmClient *pClient, NvHandle hResource, NvBool bRestrict) { + return clientValidateNewResourceHandle((struct RsClient *)(((unsigned char *) pClient) + NV_OFFSETOF(RmClient, __nvoc_base_RsClient)), hResource, bRestrict); +} + +// rmclientShareResource: virtual inherited (client) base (client) +NV_STATUS __nvoc_up_thunk_RsClient_rmclientShareResource(struct RmClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext) { + return clientShareResource((struct RsClient *)(((unsigned char *) pClient) + NV_OFFSETOF(RmClient, __nvoc_base_RsClient)), pResourceRef, pSharePolicy, pCallContext); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__RmClient = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RsClient(RsClient*); +void __nvoc_dtor_RmClient(RmClient *pThis) { + __nvoc_rmclientDestruct(pThis); + __nvoc_dtor_RsClient(&pThis->__nvoc_base_RsClient); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RmClient(RmClient *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RsClient(RsClient* , struct PORT_MEM_ALLOCATOR *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_RmClient(RmClient *pThis, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsClient(&pThis->__nvoc_base_RsClient, arg_pAllocator, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RmClient_fail_RsClient; + __nvoc_init_dataField_RmClient(pThis); + + status = __nvoc_rmclientConstruct(pThis, arg_pAllocator, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RmClient_fail__init; + goto __nvoc_ctor_RmClient_exit; // Success + +__nvoc_ctor_RmClient_fail__init: + __nvoc_dtor_RsClient(&pThis->__nvoc_base_RsClient); +__nvoc_ctor_RmClient_fail_RsClient: +__nvoc_ctor_RmClient_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_RmClient_1(RmClient *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_RmClient_1 + + +// Initialize vtable(s) for 12 virtual method(s). +void __nvoc_init_funcTable_RmClient(RmClient *pThis) { + __nvoc_init_funcTable_RmClient_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__RmClient(RmClient *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsClient.__nvoc_base_Object; // (obj) super^2 + pThis->__nvoc_pbase_RsClient = &pThis->__nvoc_base_RsClient; // (client) super + pThis->__nvoc_pbase_RmClient = pThis; // (rmclient) this + + // Recurse to superclass initialization function(s). + __nvoc_init__RsClient(&pThis->__nvoc_base_RsClient); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_RsClient.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__RmClient.metadata__RsClient.metadata__Object; // (obj) super^2 + pThis->__nvoc_base_RsClient.__nvoc_metadata_ptr = &__nvoc_metadata__RmClient.metadata__RsClient; // (client) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__RmClient; // (rmclient) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_RmClient(pThis); +} + +NV_STATUS __nvoc_objCreate_RmClient(RmClient **ppThis, Dynamic *pParent, NvU32 createFlags, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + RmClient *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(RmClient), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(RmClient)); + + pThis->__nvoc_base_RsClient.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsClient.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsClient.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__RmClient(pThis); + status = __nvoc_ctor_RmClient(pThis, arg_pAllocator, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_RmClient_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_RmClient_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_RsClient.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(RmClient)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RmClient(RmClient **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct PORT_MEM_ALLOCATOR * arg_pAllocator = va_arg(args, struct PORT_MEM_ALLOCATOR *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_RmClient(ppThis, pParent, createFlags, arg_pAllocator, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_client_nvoc.h b/src/nvidia/generated/g_client_nvoc.h new file mode 100644 index 0000000..725ac5f --- /dev/null +++ b/src/nvidia/generated/g_client_nvoc.h @@ -0,0 +1,430 @@ + +#ifndef _G_CLIENT_NVOC_H_ +#define _G_CLIENT_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_client_nvoc.h" + +#ifndef _CLIENT_H_ +#define _CLIENT_H_ + +#include "ctrl/ctrl0000/ctrl0000proc.h" // NV_PROC_NAME_MAX_LENGTH +#include "containers/btree.h" +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "resserv/rs_client.h" +#include "rmapi/resource.h" +#include "rmapi/event.h" +#include "nvsecurityinfo.h" + +// event information definitions +typedef struct _def_client_system_event_info CLI_SYSTEM_EVENT_INFO, *PCLI_SYSTEM_EVENT_INFO; + +/** + * This ref-counted object is shared by all clients that were registered under + * the same user and is used to identify clients from the same user. + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_CLIENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__UserInfo; +struct NVOC_METADATA__RsShared; + + +struct UserInfo { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__UserInfo *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct RsShared __nvoc_base_RsShared; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^2 + struct RsShared *__nvoc_pbase_RsShared; // shr super + struct UserInfo *__nvoc_pbase_UserInfo; // userinfo + + // Data members + PUID_TOKEN pUidToken; +}; + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__UserInfo { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__RsShared metadata__RsShared; +}; + +#ifndef __NVOC_CLASS_UserInfo_TYPEDEF__ +#define __NVOC_CLASS_UserInfo_TYPEDEF__ +typedef struct UserInfo UserInfo; +#endif /* __NVOC_CLASS_UserInfo_TYPEDEF__ */ + +#ifndef __nvoc_class_id_UserInfo +#define __nvoc_class_id_UserInfo 0x21d236 +#endif /* __nvoc_class_id_UserInfo */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_UserInfo; + +#define __staticCast_UserInfo(pThis) \ + ((pThis)->__nvoc_pbase_UserInfo) + +#ifdef __nvoc_client_h_disabled +#define __dynamicCast_UserInfo(pThis) ((UserInfo*) NULL) +#else //__nvoc_client_h_disabled +#define __dynamicCast_UserInfo(pThis) \ + ((UserInfo*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(UserInfo))) +#endif //__nvoc_client_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_UserInfo(UserInfo**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_UserInfo(UserInfo**, Dynamic*, NvU32); +#define __objCreate_UserInfo(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_UserInfo((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros + +// Dispatch functions +NV_STATUS userinfoConstruct_IMPL(struct UserInfo *arg_pUserInfo); + +#define __nvoc_userinfoConstruct(arg_pUserInfo) userinfoConstruct_IMPL(arg_pUserInfo) +void userinfoDestruct_IMPL(struct UserInfo *pUserInfo); + +#define __nvoc_userinfoDestruct(pUserInfo) userinfoDestruct_IMPL(pUserInfo) +#undef PRIVATE_FIELD + + +// Flags for RmClient +#define RMAPI_CLIENT_FLAG_RM_INTERNAL_CLIENT 0x00000001 +#define RMAPI_CLIENT_FLAG_DELETE_PENDING 0x00000002 + +// Values for client debugger state +#define RMAPI_CLIENT_DEBUGGER_STATE_NOT_SET 0x00000000 +#define RMAPI_CLIENT_DEBUGGER_STATE_COMPUTE_ACTIVE 0x00000001 +#define RMAPI_CLIENT_DEBUGGER_STATE_DEBUG_ACTIVE 0x00000002 + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_CLIENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__RmClient; +struct NVOC_METADATA__RsClient; +struct NVOC_VTABLE__RmClient; + + +struct RmClient { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__RmClient *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct RsClient __nvoc_base_RsClient; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^2 + struct RsClient *__nvoc_pbase_RsClient; // client super + struct RmClient *__nvoc_pbase_RmClient; // rmclient + + // Data members + RS_PRIV_LEVEL cachedPrivilege; + NvBool bIsRootNonPriv; + NvU32 ProcID; + NvU32 SubProcessID; + char SubProcessName[100]; + NvBool bIsSubProcessDisabled; + NvU32 Flags; + NvU32 ClientDebuggerState; + void *pOSInfo; + void *pOsPidInfo; + char name[100]; + CLI_SYSTEM_EVENT_INFO CliSysEventInfo; + PSECURITY_TOKEN pSecurityToken; + struct UserInfo *pUserInfo; + NvBool bIsClientVirtualMode; + NvS32 imexChannel; + PNODE pCliSyncGpuBoostTree; + NvS32 lockStressCounter; +}; + + +// Vtable with 12 per-class function pointers +struct NVOC_VTABLE__RmClient { + NV_STATUS (*__rmclientValidate__)(struct RmClient * /*this*/, const API_SECURITY_INFO *); // virtual override (client) base (client) + NV_STATUS (*__rmclientValidateLocks__)(struct RmClient * /*this*/, struct RsServer *, const struct CLIENT_ENTRY *); // virtual override (client) base (client) + NV_STATUS (*__rmclientFreeResource__)(struct RmClient * /*this*/, struct RsServer *, struct RS_RES_FREE_PARAMS_INTERNAL *); // virtual override (client) base (client) + NV_STATUS (*__rmclientInterMap__)(struct RmClient * /*this*/, struct RsResourceRef *, struct RsResourceRef *, struct RS_INTER_MAP_PARAMS *); // virtual override (client) base (client) + NV_STATUS (*__rmclientInterUnmap__)(struct RmClient * /*this*/, struct RsResourceRef *, struct RS_INTER_UNMAP_PARAMS *); // virtual override (client) base (client) + NV_STATUS (*__rmclientPostProcessPendingFreeList__)(struct RmClient * /*this*/, struct RsResourceRef **); // virtual override (client) base (client) + RS_PRIV_LEVEL (*__rmclientGetCachedPrivilege__)(struct RmClient * /*this*/); // virtual override (client) base (client) + NvBool (*__rmclientIsAdmin__)(struct RmClient * /*this*/, RS_PRIV_LEVEL); // virtual override (client) base (client) + NV_STATUS (*__rmclientDestructResourceRef__)(struct RmClient * /*this*/, RsServer *, struct RsResourceRef *, struct RS_LOCK_INFO *, API_SECURITY_INFO *); // virtual inherited (client) base (client) + NV_STATUS (*__rmclientUnmapMemory__)(struct RmClient * /*this*/, struct RsResourceRef *, struct RS_LOCK_INFO *, struct RsCpuMapping **, API_SECURITY_INFO *); // virtual inherited (client) base (client) + NV_STATUS (*__rmclientValidateNewResourceHandle__)(struct RmClient * /*this*/, NvHandle, NvBool); // virtual inherited (client) base (client) + NV_STATUS (*__rmclientShareResource__)(struct RmClient * /*this*/, struct RsResourceRef *, RS_SHARE_POLICY *, struct CALL_CONTEXT *); // virtual inherited (client) base (client) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__RmClient { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__RsClient metadata__RsClient; + const struct NVOC_VTABLE__RmClient vtable; +}; + +#ifndef __NVOC_CLASS_RmClient_TYPEDEF__ +#define __NVOC_CLASS_RmClient_TYPEDEF__ +typedef struct RmClient RmClient; +#endif /* __NVOC_CLASS_RmClient_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmClient +#define __nvoc_class_id_RmClient 0xb23d83 +#endif /* __nvoc_class_id_RmClient */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmClient; + +#define __staticCast_RmClient(pThis) \ + ((pThis)->__nvoc_pbase_RmClient) + +#ifdef __nvoc_client_h_disabled +#define __dynamicCast_RmClient(pThis) ((RmClient*) NULL) +#else //__nvoc_client_h_disabled +#define __dynamicCast_RmClient(pThis) \ + ((RmClient*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RmClient))) +#endif //__nvoc_client_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_RmClient(RmClient**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RmClient(RmClient**, Dynamic*, NvU32, struct PORT_MEM_ALLOCATOR *arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_RmClient(ppNewObj, pParent, createFlags, arg_pAllocator, arg_pParams) \ + __nvoc_objCreate_RmClient((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pAllocator, arg_pParams) + + +// Wrapper macros +#define rmclientValidate_FNPTR(pClient) pClient->__nvoc_metadata_ptr->vtable.__rmclientValidate__ +#define rmclientValidate(pClient, pSecInfo) rmclientValidate_DISPATCH(pClient, pSecInfo) +#define rmclientValidateLocks_FNPTR(pClient) pClient->__nvoc_metadata_ptr->vtable.__rmclientValidateLocks__ +#define rmclientValidateLocks(pClient, pServer, pClientEntry) rmclientValidateLocks_DISPATCH(pClient, pServer, pClientEntry) +#define rmclientFreeResource_FNPTR(pClient) pClient->__nvoc_metadata_ptr->vtable.__rmclientFreeResource__ +#define rmclientFreeResource(pClient, pServer, pParams) rmclientFreeResource_DISPATCH(pClient, pServer, pParams) +#define rmclientInterMap_FNPTR(pClient) pClient->__nvoc_metadata_ptr->vtable.__rmclientInterMap__ +#define rmclientInterMap(pClient, pMapperRef, pMappableRef, pParams) rmclientInterMap_DISPATCH(pClient, pMapperRef, pMappableRef, pParams) +#define rmclientInterUnmap_FNPTR(pClient) pClient->__nvoc_metadata_ptr->vtable.__rmclientInterUnmap__ +#define rmclientInterUnmap(pClient, pMapperRef, pParams) rmclientInterUnmap_DISPATCH(pClient, pMapperRef, pParams) +#define rmclientPostProcessPendingFreeList_FNPTR(pClient) pClient->__nvoc_metadata_ptr->vtable.__rmclientPostProcessPendingFreeList__ +#define rmclientPostProcessPendingFreeList(pClient, ppFirstLowPriRef) rmclientPostProcessPendingFreeList_DISPATCH(pClient, ppFirstLowPriRef) +#define rmclientGetCachedPrivilege_FNPTR(pClient) pClient->__nvoc_metadata_ptr->vtable.__rmclientGetCachedPrivilege__ +#define rmclientGetCachedPrivilege(pClient) rmclientGetCachedPrivilege_DISPATCH(pClient) +#define rmclientIsAdmin_FNPTR(pClient) pClient->__nvoc_metadata_ptr->vtable.__rmclientIsAdmin__ +#define rmclientIsAdmin(pClient, privLevel) rmclientIsAdmin_DISPATCH(pClient, privLevel) +#define rmclientDestructResourceRef_FNPTR(pClient) pClient->__nvoc_base_RsClient.__nvoc_metadata_ptr->vtable.__clientDestructResourceRef__ +#define rmclientDestructResourceRef(pClient, pServer, pResourceRef, pLockInfo, pSecInfo) rmclientDestructResourceRef_DISPATCH(pClient, pServer, pResourceRef, pLockInfo, pSecInfo) +#define rmclientUnmapMemory_FNPTR(pClient) pClient->__nvoc_base_RsClient.__nvoc_metadata_ptr->vtable.__clientUnmapMemory__ +#define rmclientUnmapMemory(pClient, pResourceRef, pLockInfo, ppCpuMapping, pSecInfo) rmclientUnmapMemory_DISPATCH(pClient, pResourceRef, pLockInfo, ppCpuMapping, pSecInfo) +#define rmclientValidateNewResourceHandle_FNPTR(pClient) pClient->__nvoc_base_RsClient.__nvoc_metadata_ptr->vtable.__clientValidateNewResourceHandle__ +#define rmclientValidateNewResourceHandle(pClient, hResource, bRestrict) rmclientValidateNewResourceHandle_DISPATCH(pClient, hResource, bRestrict) +#define rmclientShareResource_FNPTR(pClient) pClient->__nvoc_base_RsClient.__nvoc_metadata_ptr->vtable.__clientShareResource__ +#define rmclientShareResource(pClient, pResourceRef, pSharePolicy, pCallContext) rmclientShareResource_DISPATCH(pClient, pResourceRef, pSharePolicy, pCallContext) + +// Dispatch functions +static inline NV_STATUS rmclientValidate_DISPATCH(struct RmClient *pClient, const API_SECURITY_INFO *pSecInfo) { + return pClient->__nvoc_metadata_ptr->vtable.__rmclientValidate__(pClient, pSecInfo); +} + +static inline NV_STATUS rmclientValidateLocks_DISPATCH(struct RmClient *pClient, struct RsServer *pServer, const struct CLIENT_ENTRY *pClientEntry) { + return pClient->__nvoc_metadata_ptr->vtable.__rmclientValidateLocks__(pClient, pServer, pClientEntry); +} + +static inline NV_STATUS rmclientFreeResource_DISPATCH(struct RmClient *pClient, struct RsServer *pServer, struct RS_RES_FREE_PARAMS_INTERNAL *pParams) { + return pClient->__nvoc_metadata_ptr->vtable.__rmclientFreeResource__(pClient, pServer, pParams); +} + +static inline NV_STATUS rmclientInterMap_DISPATCH(struct RmClient *pClient, struct RsResourceRef *pMapperRef, struct RsResourceRef *pMappableRef, struct RS_INTER_MAP_PARAMS *pParams) { + return pClient->__nvoc_metadata_ptr->vtable.__rmclientInterMap__(pClient, pMapperRef, pMappableRef, pParams); +} + +static inline NV_STATUS rmclientInterUnmap_DISPATCH(struct RmClient *pClient, struct RsResourceRef *pMapperRef, struct RS_INTER_UNMAP_PARAMS *pParams) { + return pClient->__nvoc_metadata_ptr->vtable.__rmclientInterUnmap__(pClient, pMapperRef, pParams); +} + +static inline NV_STATUS rmclientPostProcessPendingFreeList_DISPATCH(struct RmClient *pClient, struct RsResourceRef **ppFirstLowPriRef) { + return pClient->__nvoc_metadata_ptr->vtable.__rmclientPostProcessPendingFreeList__(pClient, ppFirstLowPriRef); +} + +static inline RS_PRIV_LEVEL rmclientGetCachedPrivilege_DISPATCH(struct RmClient *pClient) { + return pClient->__nvoc_metadata_ptr->vtable.__rmclientGetCachedPrivilege__(pClient); +} + +static inline NvBool rmclientIsAdmin_DISPATCH(struct RmClient *pClient, RS_PRIV_LEVEL privLevel) { + return pClient->__nvoc_metadata_ptr->vtable.__rmclientIsAdmin__(pClient, privLevel); +} + +static inline NV_STATUS rmclientDestructResourceRef_DISPATCH(struct RmClient *pClient, RsServer *pServer, struct RsResourceRef *pResourceRef, struct RS_LOCK_INFO *pLockInfo, API_SECURITY_INFO *pSecInfo) { + return pClient->__nvoc_metadata_ptr->vtable.__rmclientDestructResourceRef__(pClient, pServer, pResourceRef, pLockInfo, pSecInfo); +} + +static inline NV_STATUS rmclientUnmapMemory_DISPATCH(struct RmClient *pClient, struct RsResourceRef *pResourceRef, struct RS_LOCK_INFO *pLockInfo, struct RsCpuMapping **ppCpuMapping, API_SECURITY_INFO *pSecInfo) { + return pClient->__nvoc_metadata_ptr->vtable.__rmclientUnmapMemory__(pClient, pResourceRef, pLockInfo, ppCpuMapping, pSecInfo); +} + +static inline NV_STATUS rmclientValidateNewResourceHandle_DISPATCH(struct RmClient *pClient, NvHandle hResource, NvBool bRestrict) { + return pClient->__nvoc_metadata_ptr->vtable.__rmclientValidateNewResourceHandle__(pClient, hResource, bRestrict); +} + +static inline NV_STATUS rmclientShareResource_DISPATCH(struct RmClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext) { + return pClient->__nvoc_metadata_ptr->vtable.__rmclientShareResource__(pClient, pResourceRef, pSharePolicy, pCallContext); +} + +NV_STATUS rmclientValidate_IMPL(struct RmClient *pClient, const API_SECURITY_INFO *pSecInfo); + +NV_STATUS rmclientValidateLocks_IMPL(struct RmClient *pClient, struct RsServer *pServer, const struct CLIENT_ENTRY *pClientEntry); + +NV_STATUS rmclientFreeResource_IMPL(struct RmClient *pClient, struct RsServer *pServer, struct RS_RES_FREE_PARAMS_INTERNAL *pParams); + +NV_STATUS rmclientInterMap_IMPL(struct RmClient *pClient, struct RsResourceRef *pMapperRef, struct RsResourceRef *pMappableRef, struct RS_INTER_MAP_PARAMS *pParams); + +NV_STATUS rmclientInterUnmap_IMPL(struct RmClient *pClient, struct RsResourceRef *pMapperRef, struct RS_INTER_UNMAP_PARAMS *pParams); + +NV_STATUS rmclientPostProcessPendingFreeList_IMPL(struct RmClient *pClient, struct RsResourceRef **ppFirstLowPriRef); + +RS_PRIV_LEVEL rmclientGetCachedPrivilege_IMPL(struct RmClient *pClient); + +NvBool rmclientIsAdmin_IMPL(struct RmClient *pClient, RS_PRIV_LEVEL privLevel); + +NV_STATUS rmclientConstruct_IMPL(struct RmClient *arg_pClient, struct PORT_MEM_ALLOCATOR *arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_rmclientConstruct(arg_pClient, arg_pAllocator, arg_pParams) rmclientConstruct_IMPL(arg_pClient, arg_pAllocator, arg_pParams) +void rmclientDestruct_IMPL(struct RmClient *pClient); + +#define __nvoc_rmclientDestruct(pClient) rmclientDestruct_IMPL(pClient) +void rmclientSetClientFlags_IMPL(struct RmClient *pClient, NvU32 clientFlags); + +#ifdef __nvoc_client_h_disabled +static inline void rmclientSetClientFlags(struct RmClient *pClient, NvU32 clientFlags) { + NV_ASSERT_FAILED_PRECOMP("RmClient was disabled!"); +} +#else //__nvoc_client_h_disabled +#define rmclientSetClientFlags(pClient, clientFlags) rmclientSetClientFlags_IMPL(pClient, clientFlags) +#endif //__nvoc_client_h_disabled + +void *rmclientGetSecurityToken_IMPL(struct RmClient *pClient); + +#ifdef __nvoc_client_h_disabled +static inline void *rmclientGetSecurityToken(struct RmClient *pClient) { + NV_ASSERT_FAILED_PRECOMP("RmClient was disabled!"); + return NULL; +} +#else //__nvoc_client_h_disabled +#define rmclientGetSecurityToken(pClient) rmclientGetSecurityToken_IMPL(pClient) +#endif //__nvoc_client_h_disabled + +NvBool rmclientIsCapableOrAdmin_IMPL(struct RmClient *pClient, NvU32 capability, RS_PRIV_LEVEL privLevel); + +#ifdef __nvoc_client_h_disabled +static inline NvBool rmclientIsCapableOrAdmin(struct RmClient *pClient, NvU32 capability, RS_PRIV_LEVEL privLevel) { + NV_ASSERT_FAILED_PRECOMP("RmClient was disabled!"); + return NV_FALSE; +} +#else //__nvoc_client_h_disabled +#define rmclientIsCapableOrAdmin(pClient, capability, privLevel) rmclientIsCapableOrAdmin_IMPL(pClient, capability, privLevel) +#endif //__nvoc_client_h_disabled + +NvBool rmclientIsCapable_IMPL(struct RmClient *pClient, NvU32 capability); + +#ifdef __nvoc_client_h_disabled +static inline NvBool rmclientIsCapable(struct RmClient *pClient, NvU32 capability) { + NV_ASSERT_FAILED_PRECOMP("RmClient was disabled!"); + return NV_FALSE; +} +#else //__nvoc_client_h_disabled +#define rmclientIsCapable(pClient, capability) rmclientIsCapable_IMPL(pClient, capability) +#endif //__nvoc_client_h_disabled + +#undef PRIVATE_FIELD + + +MAKE_LIST(RmClientList, RmClient*); +extern RmClientList g_clientListBehindGpusLock; +MAKE_LIST(UserInfoList, UserInfo*); +extern UserInfoList g_userInfoList; +MAKE_MULTIMAP(OsInfoMap, RmClient*); +extern OsInfoMap g_osInfoList; + + +// +// Convenience rmclientXxxByHandle util macros. Ideally, code operates on +// pClient directly instead of hClient but providing these for compatibility +// to hClient-heavy code. +// +NvBool rmclientIsKernelOnlyByHandle(NvHandle hClient); +NvBool rmclientSetClientFlagsByHandle(NvHandle hClient, NvU32 clientFlags); +void rmclientPromoteDebuggerStateByHandle(NvHandle hClient, NvU32 newMinimumState); +void *rmclientGetSecurityTokenByHandle(NvHandle hClient); +NV_STATUS rmclientUserClientSecurityCheckByHandle(NvHandle hClient, const API_SECURITY_INFO *pSecInfo); + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_CLIENT_NVOC_H_ diff --git a/src/nvidia/generated/g_client_resource_nvoc.c b/src/nvidia/generated/g_client_resource_nvoc.c new file mode 100644 index 0000000..883f234 --- /dev/null +++ b/src/nvidia/generated/g_client_resource_nvoc.c @@ -0,0 +1,1627 @@ +#define NVOC_CLIENT_RESOURCE_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_client_resource_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x37a701 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmClientResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsClientResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +// Forward declarations for RmClientResource +void __nvoc_init__RsClientResource(RsClientResource*); +void __nvoc_init__RmResourceCommon(RmResourceCommon*); +void __nvoc_init__Notifier(Notifier*); +void __nvoc_init__RmClientResource(RmClientResource*); +void __nvoc_init_funcTable_RmClientResource(RmClientResource*); +NV_STATUS __nvoc_ctor_RmClientResource(RmClientResource*, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_RmClientResource(RmClientResource*); +void __nvoc_dtor_RmClientResource(RmClientResource*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__RmClientResource; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__RmClientResource; + +// Down-thunk(s) to bridge RmClientResource methods from ancestors (if any) +PEVENTNOTIFICATION * __nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr(struct INotifier *pNotifier); // super +struct NotifShare * __nvoc_down_thunk_Notifier_inotifyGetNotificationShare(struct INotifier *pNotifier); // super +void __nvoc_down_thunk_Notifier_inotifySetNotificationShare(struct INotifier *pNotifier, struct NotifShare *pNotifShare); // super +NV_STATUS __nvoc_down_thunk_Notifier_inotifyUnregisterEvent(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // super +NV_STATUS __nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // super +NvBool __nvoc_down_thunk_RmClientResource_resAccessCallback(struct RsResource *pRmCliRes, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NvBool __nvoc_down_thunk_RmClientResource_resShareCallback(struct RsResource *pRmCliRes, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_down_thunk_RmClientResource_resControl_Prologue(struct RsResource *pRmCliRes, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_down_thunk_RmClientResource_resControl_Epilogue(struct RsResource *pRmCliRes, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this + +// Up-thunk(s) to bridge RmClientResource methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_clientresCanCopy(struct RsClientResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_clientresIsDuplicate(struct RsClientResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_clientresPreDestruct(struct RsClientResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_clientresControl(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_clientresControlFilter(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_clientresControlSerialization_Prologue(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RsResource_clientresControlSerialization_Epilogue(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_clientresControl_Prologue(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RsResource_clientresControl_Epilogue(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_clientresMap(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_up_thunk_RsResource_clientresUnmap(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_up_thunk_RsResource_clientresIsPartialUnmapSupported(struct RsClientResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_clientresMapTo(struct RsClientResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_clientresUnmapFrom(struct RsClientResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_clientresGetRefCount(struct RsClientResource *pResource); // super +NvBool __nvoc_up_thunk_RsResource_clientresAccessCallback(struct RsClientResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NvBool __nvoc_up_thunk_RsResource_clientresShareCallback(struct RsClientResource *pResource, struct RsClient *pInvokingClient, RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +void __nvoc_up_thunk_RsResource_clientresAddAdditionalDependants(struct RsClient *pClient, struct RsClientResource *pResource, RsResourceRef *pReference); // super +NvBool __nvoc_up_thunk_RsResource_cliresCanCopy(struct RmClientResource *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_cliresIsDuplicate(struct RmClientResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_cliresPreDestruct(struct RmClientResource *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_cliresControl(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_cliresControlFilter(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_cliresControlSerialization_Prologue(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RsResource_cliresControlSerialization_Epilogue(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_cliresMap(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_RsResource_cliresUnmap(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_RsResource_cliresIsPartialUnmapSupported(struct RmClientResource *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_cliresMapTo(struct RmClientResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_cliresUnmapFrom(struct RmClientResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_cliresGetRefCount(struct RmClientResource *pResource); // this +void __nvoc_up_thunk_RsResource_cliresAddAdditionalDependants(struct RsClient *pClient, struct RmClientResource *pResource, RsResourceRef *pReference); // this +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_cliresGetNotificationListPtr(struct RmClientResource *pNotifier); // this +struct NotifShare * __nvoc_up_thunk_Notifier_cliresGetNotificationShare(struct RmClientResource *pNotifier); // this +void __nvoc_up_thunk_Notifier_cliresSetNotificationShare(struct RmClientResource *pNotifier, struct NotifShare *pNotifShare); // this +NV_STATUS __nvoc_up_thunk_Notifier_cliresUnregisterEvent(struct RmClientResource *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // this +NV_STATUS __nvoc_up_thunk_Notifier_cliresGetOrAllocNotifShare(struct RmClientResource *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_RmClientResource = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RmClientResource), + /*classId=*/ classId(RmClientResource), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RmClientResource", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RmClientResource, + /*pCastInfo=*/ &__nvoc_castinfo__RmClientResource, + /*pExportInfo=*/ &__nvoc_export_info__RmClientResource +}; + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_RmClientResource[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetCpuInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + /*flags=*/ 0x10bu, + /*accessRight=*/0x0u, + /*methodId=*/ 0x102u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetCpuInfo" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetVrrCookiePresent_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x107u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_VRR_COOKIE_PRESENT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetVrrCookiePresent" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetClassList_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x108u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetClassList" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x105u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetLockTimes_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x105u) + /*flags=*/ 0x105u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x109u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_LOCK_TIMES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetLockTimes" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemNotifyEvent_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x110u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemNotifyEvent" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemDebugCtrlRmMsg_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x121u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemDebugCtrlRmMsg" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetVgxSystemInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*flags=*/ 0x9u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x133u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_VGX_SYSTEM_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetVgxSystemInfo" +#endif + }, + { /* [7] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetPrivilegedStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*flags=*/ 0x109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x135u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetPrivilegedStatus" +#endif + }, + { /* [8] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetFabricStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*flags=*/ 0x109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x136u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetFabricStatus" +#endif + }, + { /* [9] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetRmInstanceId_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + /*flags=*/ 0x10bu, + /*accessRight=*/0x0u, + /*methodId=*/ 0x139u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetRmInstanceId" +#endif + }, + { /* [10] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemNVPCFGetPowerModeInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x13bu, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemNVPCFGetPowerModeInfo" +#endif + }, + { /* [11] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemSyncExternalFabricMgmt_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x13cu, + /*paramSize=*/ sizeof(NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemSyncExternalFabricMgmt" +#endif + }, + { /* [12] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x107u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetClientDatabaseInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x107u) + /*flags=*/ 0x107u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x13du, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetClientDatabaseInfo" +#endif + }, + { /* [13] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10509u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetBuildVersionV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10509u) + /*flags=*/ 0x10509u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x13eu, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetBuildVersionV2" +#endif + }, + { /* [14] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemRmctrlCacheModeCtrl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + /*flags=*/ 0x7u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x13fu, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemRmctrlCacheModeCtrl" +#endif + }, + { /* [15] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetFeatures_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x1f0u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetFeatures" +#endif + }, + { /* [16] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetAttachedIds_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + /*flags=*/ 0x10bu, + /*accessRight=*/0x0u, + /*methodId=*/ 0x201u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetAttachedIds" +#endif + }, + { /* [17] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetIdInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10109u) + /*flags=*/ 0x10109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x202u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_ID_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetIdInfo" +#endif + }, + { /* [18] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetInitStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*flags=*/ 0x109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x203u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetInitStatus" +#endif + }, + { /* [19] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetDeviceIds_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + /*flags=*/ 0x10bu, + /*accessRight=*/0x0u, + /*methodId=*/ 0x204u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetDeviceIds" +#endif + }, + { /* [20] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetIdInfoV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*flags=*/ 0x109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x205u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetIdInfoV2" +#endif + }, + { /* [21] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetProbedIds_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*flags=*/ 0x109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x214u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetProbedIds" +#endif + }, + { /* [22] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuAttachIds_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10109u) + /*flags=*/ 0x10109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x215u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_ATTACH_IDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuAttachIds" +#endif + }, + { /* [23] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuDetachIds_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*flags=*/ 0x109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x216u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_DETACH_IDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuDetachIds" +#endif + }, + { /* [24] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetPciInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*flags=*/ 0x109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x21bu, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetPciInfo" +#endif + }, + { /* [25] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetUuidInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x274u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetUuidInfo" +#endif + }, + { /* [26] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetUuidFromGpuId_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*flags=*/ 0x109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x275u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetUuidFromGpuId" +#endif + }, + { /* [27] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuModifyGpuDrainState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x278u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuModifyGpuDrainState" +#endif + }, + { /* [28] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuQueryGpuDrainState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*flags=*/ 0x109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x279u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuQueryGpuDrainState" +#endif + }, + { /* [29] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x509u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetMemOpEnable_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x509u) + /*flags=*/ 0x509u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x27bu, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetMemOpEnable" +#endif + }, + { /* [30] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xbu) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuDisableNvlinkInit_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xbu) + /*flags=*/ 0xbu, + /*accessRight=*/0x0u, + /*methodId=*/ 0x281u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuDisableNvlinkInit" +#endif + }, + { /* [31] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdLegacyConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x282u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdLegacyConfig" +#endif + }, + { /* [32] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdPushUcodeImage_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x285u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_PUSH_UCODE_IMAGE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdPushUcodeImage" +#endif + }, + { /* [33] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetActiveDeviceIds_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + /*flags=*/ 0x10bu, + /*accessRight=*/0x0u, + /*methodId=*/ 0x288u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_ACTIVE_DEVICE_IDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetActiveDeviceIds" +#endif + }, + { /* [34] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuAsyncAttachId_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*flags=*/ 0x109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x289u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_ASYNC_ATTACH_ID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuAsyncAttachId" +#endif + }, + { /* [35] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuWaitAttachId_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*flags=*/ 0x109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x290u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_WAIT_ATTACH_ID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuWaitAttachId" +#endif + }, + { /* [36] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x108u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGsyncGetAttachedIds_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x108u) + /*flags=*/ 0x108u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x301u, + /*paramSize=*/ sizeof(NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGsyncGetAttachedIds" +#endif + }, + { /* [37] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGsyncGetIdInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x302u, + /*paramSize=*/ sizeof(NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGsyncGetIdInfo" +#endif + }, + { /* [38] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdEventSetNotification_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x501u, + /*paramSize=*/ sizeof(NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdEventSetNotification" +#endif + }, + { /* [39] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdEventGetSystemEventData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x502u, + /*paramSize=*/ sizeof(NV0000_CTRL_GET_SYSTEM_EVENT_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdEventGetSystemEventData" +#endif + }, + { /* [40] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSetSubProcessID_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10109u) + /*flags=*/ 0x10109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x901u, + /*paramSize=*/ sizeof(NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSetSubProcessID" +#endif + }, + { /* [41] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdDisableSubProcessUserdIsolation_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10109u) + /*flags=*/ 0x10109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x902u, + /*paramSize=*/ sizeof(NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdDisableSubProcessUserdIsolation" +#endif + }, + { /* [42] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdClientGetAddrSpaceType_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*flags=*/ 0x109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xd01u, + /*paramSize=*/ sizeof(NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdClientGetAddrSpaceType" +#endif + }, + { /* [43] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdClientGetHandleInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*flags=*/ 0x109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xd02u, + /*paramSize=*/ sizeof(NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdClientGetHandleInfo" +#endif + }, + { /* [44] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdClientGetAccessRights_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*flags=*/ 0x9u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xd03u, + /*paramSize=*/ sizeof(NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdClientGetAccessRights" +#endif + }, + { /* [45] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdClientSetInheritedSharePolicy_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*flags=*/ 0x9u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xd04u, + /*paramSize=*/ sizeof(NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdClientSetInheritedSharePolicy" +#endif + }, + { /* [46] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdClientGetChildHandle_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*flags=*/ 0x9u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xd05u, + /*paramSize=*/ sizeof(NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdClientGetChildHandle" +#endif + }, + { /* [47] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdClientShareObject_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*flags=*/ 0x9u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xd06u, + /*paramSize=*/ sizeof(NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdClientShareObject" +#endif + }, + { /* [48] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdObjectsAreDuplicates_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*flags=*/ 0x109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xd07u, + /*paramSize=*/ sizeof(NV0000_CTRL_CLIENT_OBJECTS_ARE_DUPLICATES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdObjectsAreDuplicates" +#endif + }, + { /* [49] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdOsUnixFlushUserCache_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3d02u, + /*paramSize=*/ sizeof(NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdOsUnixFlushUserCache" +#endif + }, + { /* [50] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdOsUnixExportObjectToFd_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*flags=*/ 0x9u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3d05u, + /*paramSize=*/ sizeof(NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdOsUnixExportObjectToFd" +#endif + }, + { /* [51] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdOsUnixImportObjectFromFd_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*flags=*/ 0x9u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3d06u, + /*paramSize=*/ sizeof(NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdOsUnixImportObjectFromFd" +#endif + }, + { /* [52] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdOsUnixGetExportObjectInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + /*flags=*/ 0x10bu, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3d08u, + /*paramSize=*/ sizeof(NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdOsUnixGetExportObjectInfo" +#endif + }, + { /* [53] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdOsUnixCreateExportObjectFd_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*flags=*/ 0x9u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3d0au, + /*paramSize=*/ sizeof(NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdOsUnixCreateExportObjectFd" +#endif + }, + { /* [54] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdOsUnixExportObjectsToFd_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*flags=*/ 0x9u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3d0bu, + /*paramSize=*/ sizeof(NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdOsUnixExportObjectsToFd" +#endif + }, + { /* [55] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdOsUnixImportObjectsFromFd_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*flags=*/ 0x9u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3d0cu, + /*paramSize=*/ sizeof(NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdOsUnixImportObjectsFromFd" +#endif + }, + +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__RmClientResource __nvoc_metadata__RmClientResource = { + .rtti.pClassDef = &__nvoc_class_def_RmClientResource, // (clires) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RmClientResource, + .rtti.offset = 0, + .metadata__RsClientResource.rtti.pClassDef = &__nvoc_class_def_RsClientResource, // (clientres) super + .metadata__RsClientResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RsClientResource.rtti.offset = NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource), + .metadata__RsClientResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^2 + .metadata__RsClientResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RsClientResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource), + .metadata__RsClientResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^3 + .metadata__RsClientResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RsClientResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super + .metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(RmClientResource, __nvoc_base_RmResourceCommon), + .metadata__Notifier.rtti.pClassDef = &__nvoc_class_def_Notifier, // (notify) super + .metadata__Notifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Notifier.rtti.offset = NV_OFFSETOF(RmClientResource, __nvoc_base_Notifier), + .metadata__Notifier.metadata__INotifier.rtti.pClassDef = &__nvoc_class_def_INotifier, // (inotify) super^2 + .metadata__Notifier.metadata__INotifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Notifier.metadata__INotifier.rtti.offset = NV_OFFSETOF(RmClientResource, __nvoc_base_Notifier.__nvoc_base_INotifier), + + .vtable.__cliresAccessCallback__ = &cliresAccessCallback_IMPL, // virtual override (res) base (clientres) + .metadata__RsClientResource.vtable.__clientresAccessCallback__ = &__nvoc_up_thunk_RsResource_clientresAccessCallback, // virtual inherited (res) base (res) + .metadata__RsClientResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmClientResource_resAccessCallback, // virtual + .vtable.__cliresShareCallback__ = &cliresShareCallback_IMPL, // virtual override (res) base (clientres) + .metadata__RsClientResource.vtable.__clientresShareCallback__ = &__nvoc_up_thunk_RsResource_clientresShareCallback, // virtual inherited (res) base (res) + .metadata__RsClientResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmClientResource_resShareCallback, // virtual + .vtable.__cliresControl_Prologue__ = &cliresControl_Prologue_IMPL, // virtual override (res) base (clientres) + .metadata__RsClientResource.vtable.__clientresControl_Prologue__ = &__nvoc_up_thunk_RsResource_clientresControl_Prologue, // virtual inherited (res) base (res) + .metadata__RsClientResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmClientResource_resControl_Prologue, // virtual + .vtable.__cliresControl_Epilogue__ = &cliresControl_Epilogue_IMPL, // virtual override (res) base (clientres) + .metadata__RsClientResource.vtable.__clientresControl_Epilogue__ = &__nvoc_up_thunk_RsResource_clientresControl_Epilogue, // virtual inherited (res) base (res) + .metadata__RsClientResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmClientResource_resControl_Epilogue, // virtual + .vtable.__cliresCanCopy__ = &__nvoc_up_thunk_RsResource_cliresCanCopy, // virtual inherited (res) base (clientres) + .metadata__RsClientResource.vtable.__clientresCanCopy__ = &__nvoc_up_thunk_RsResource_clientresCanCopy, // virtual inherited (res) base (res) + .metadata__RsClientResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__cliresIsDuplicate__ = &__nvoc_up_thunk_RsResource_cliresIsDuplicate, // virtual inherited (res) base (clientres) + .metadata__RsClientResource.vtable.__clientresIsDuplicate__ = &__nvoc_up_thunk_RsResource_clientresIsDuplicate, // virtual inherited (res) base (res) + .metadata__RsClientResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__cliresPreDestruct__ = &__nvoc_up_thunk_RsResource_cliresPreDestruct, // virtual inherited (res) base (clientres) + .metadata__RsClientResource.vtable.__clientresPreDestruct__ = &__nvoc_up_thunk_RsResource_clientresPreDestruct, // virtual inherited (res) base (res) + .metadata__RsClientResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__cliresControl__ = &__nvoc_up_thunk_RsResource_cliresControl, // virtual inherited (res) base (clientres) + .metadata__RsClientResource.vtable.__clientresControl__ = &__nvoc_up_thunk_RsResource_clientresControl, // virtual inherited (res) base (res) + .metadata__RsClientResource.metadata__RsResource.vtable.__resControl__ = &resControl_IMPL, // virtual + .vtable.__cliresControlFilter__ = &__nvoc_up_thunk_RsResource_cliresControlFilter, // virtual inherited (res) base (clientres) + .metadata__RsClientResource.vtable.__clientresControlFilter__ = &__nvoc_up_thunk_RsResource_clientresControlFilter, // virtual inherited (res) base (res) + .metadata__RsClientResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__cliresControlSerialization_Prologue__ = &__nvoc_up_thunk_RsResource_cliresControlSerialization_Prologue, // virtual inherited (res) base (clientres) + .metadata__RsClientResource.vtable.__clientresControlSerialization_Prologue__ = &__nvoc_up_thunk_RsResource_clientresControlSerialization_Prologue, // virtual inherited (res) base (res) + .metadata__RsClientResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &resControlSerialization_Prologue_IMPL, // virtual + .vtable.__cliresControlSerialization_Epilogue__ = &__nvoc_up_thunk_RsResource_cliresControlSerialization_Epilogue, // virtual inherited (res) base (clientres) + .metadata__RsClientResource.vtable.__clientresControlSerialization_Epilogue__ = &__nvoc_up_thunk_RsResource_clientresControlSerialization_Epilogue, // virtual inherited (res) base (res) + .metadata__RsClientResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &resControlSerialization_Epilogue_IMPL, // virtual + .vtable.__cliresMap__ = &__nvoc_up_thunk_RsResource_cliresMap, // virtual inherited (res) base (clientres) + .metadata__RsClientResource.vtable.__clientresMap__ = &__nvoc_up_thunk_RsResource_clientresMap, // virtual inherited (res) base (res) + .metadata__RsClientResource.metadata__RsResource.vtable.__resMap__ = &resMap_IMPL, // virtual + .vtable.__cliresUnmap__ = &__nvoc_up_thunk_RsResource_cliresUnmap, // virtual inherited (res) base (clientres) + .metadata__RsClientResource.vtable.__clientresUnmap__ = &__nvoc_up_thunk_RsResource_clientresUnmap, // virtual inherited (res) base (res) + .metadata__RsClientResource.metadata__RsResource.vtable.__resUnmap__ = &resUnmap_IMPL, // virtual + .vtable.__cliresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_cliresIsPartialUnmapSupported, // inline virtual inherited (res) base (clientres) body + .metadata__RsClientResource.vtable.__clientresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_clientresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__RsClientResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__cliresMapTo__ = &__nvoc_up_thunk_RsResource_cliresMapTo, // virtual inherited (res) base (clientres) + .metadata__RsClientResource.vtable.__clientresMapTo__ = &__nvoc_up_thunk_RsResource_clientresMapTo, // virtual inherited (res) base (res) + .metadata__RsClientResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__cliresUnmapFrom__ = &__nvoc_up_thunk_RsResource_cliresUnmapFrom, // virtual inherited (res) base (clientres) + .metadata__RsClientResource.vtable.__clientresUnmapFrom__ = &__nvoc_up_thunk_RsResource_clientresUnmapFrom, // virtual inherited (res) base (res) + .metadata__RsClientResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__cliresGetRefCount__ = &__nvoc_up_thunk_RsResource_cliresGetRefCount, // virtual inherited (res) base (clientres) + .metadata__RsClientResource.vtable.__clientresGetRefCount__ = &__nvoc_up_thunk_RsResource_clientresGetRefCount, // virtual inherited (res) base (res) + .metadata__RsClientResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__cliresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_cliresAddAdditionalDependants, // virtual inherited (res) base (clientres) + .metadata__RsClientResource.vtable.__clientresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_clientresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__RsClientResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual + .vtable.__cliresGetNotificationListPtr__ = &__nvoc_up_thunk_Notifier_cliresGetNotificationListPtr, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyGetNotificationListPtr__ = ¬ifyGetNotificationListPtr_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationListPtr__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr, // pure virtual + .vtable.__cliresGetNotificationShare__ = &__nvoc_up_thunk_Notifier_cliresGetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyGetNotificationShare__ = ¬ifyGetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationShare, // pure virtual + .vtable.__cliresSetNotificationShare__ = &__nvoc_up_thunk_Notifier_cliresSetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifySetNotificationShare__ = ¬ifySetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifySetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifySetNotificationShare, // pure virtual + .vtable.__cliresUnregisterEvent__ = &__nvoc_up_thunk_Notifier_cliresUnregisterEvent, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyUnregisterEvent__ = ¬ifyUnregisterEvent_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyUnregisterEvent__ = &__nvoc_down_thunk_Notifier_inotifyUnregisterEvent, // pure virtual + .vtable.__cliresGetOrAllocNotifShare__ = &__nvoc_up_thunk_Notifier_cliresGetOrAllocNotifShare, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyGetOrAllocNotifShare__ = ¬ifyGetOrAllocNotifShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyGetOrAllocNotifShare__ = &__nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare, // pure virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__RmClientResource = { + .numRelatives = 7, + .relatives = { + &__nvoc_metadata__RmClientResource.rtti, // [0]: (clires) this + &__nvoc_metadata__RmClientResource.metadata__RsClientResource.rtti, // [1]: (clientres) super + &__nvoc_metadata__RmClientResource.metadata__RsClientResource.metadata__RsResource.rtti, // [2]: (res) super^2 + &__nvoc_metadata__RmClientResource.metadata__RsClientResource.metadata__RsResource.metadata__Object.rtti, // [3]: (obj) super^3 + &__nvoc_metadata__RmClientResource.metadata__RmResourceCommon.rtti, // [4]: (rmrescmn) super + &__nvoc_metadata__RmClientResource.metadata__Notifier.rtti, // [5]: (notify) super + &__nvoc_metadata__RmClientResource.metadata__Notifier.metadata__INotifier.rtti, // [6]: (inotify) super^2 + } +}; + +// 4 down-thunk(s) defined to bridge methods in RmClientResource from superclasses + +// cliresAccessCallback: virtual override (res) base (clientres) +NvBool __nvoc_down_thunk_RmClientResource_resAccessCallback(struct RsResource *pRmCliRes, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return cliresAccessCallback((struct RmClientResource *)(((unsigned char *) pRmCliRes) - NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource)), pInvokingClient, pAllocParams, accessRight); +} + +// cliresShareCallback: virtual override (res) base (clientres) +NvBool __nvoc_down_thunk_RmClientResource_resShareCallback(struct RsResource *pRmCliRes, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return cliresShareCallback((struct RmClientResource *)(((unsigned char *) pRmCliRes) - NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// cliresControl_Prologue: virtual override (res) base (clientres) +NV_STATUS __nvoc_down_thunk_RmClientResource_resControl_Prologue(struct RsResource *pRmCliRes, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return cliresControl_Prologue((struct RmClientResource *)(((unsigned char *) pRmCliRes) - NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// cliresControl_Epilogue: virtual override (res) base (clientres) +void __nvoc_down_thunk_RmClientResource_resControl_Epilogue(struct RsResource *pRmCliRes, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + cliresControl_Epilogue((struct RmClientResource *)(((unsigned char *) pRmCliRes) - NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + + +// 19 up-thunk(s) defined to bridge methods in RmClientResource to superclasses + +// cliresCanCopy: virtual inherited (res) base (clientres) +NvBool __nvoc_up_thunk_RsResource_cliresCanCopy(struct RmClientResource *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource))); +} + +// cliresIsDuplicate: virtual inherited (res) base (clientres) +NV_STATUS __nvoc_up_thunk_RsResource_cliresIsDuplicate(struct RmClientResource *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// cliresPreDestruct: virtual inherited (res) base (clientres) +void __nvoc_up_thunk_RsResource_cliresPreDestruct(struct RmClientResource *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource))); +} + +// cliresControl: virtual inherited (res) base (clientres) +NV_STATUS __nvoc_up_thunk_RsResource_cliresControl(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// cliresControlFilter: virtual inherited (res) base (clientres) +NV_STATUS __nvoc_up_thunk_RsResource_cliresControlFilter(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// cliresControlSerialization_Prologue: virtual inherited (res) base (clientres) +NV_STATUS __nvoc_up_thunk_RsResource_cliresControlSerialization_Prologue(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlSerialization_Prologue((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// cliresControlSerialization_Epilogue: virtual inherited (res) base (clientres) +void __nvoc_up_thunk_RsResource_cliresControlSerialization_Epilogue(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + resControlSerialization_Epilogue((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// cliresMap: virtual inherited (res) base (clientres) +NV_STATUS __nvoc_up_thunk_RsResource_cliresMap(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource)), pCallContext, pParams, pCpuMapping); +} + +// cliresUnmap: virtual inherited (res) base (clientres) +NV_STATUS __nvoc_up_thunk_RsResource_cliresUnmap(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource)), pCallContext, pCpuMapping); +} + +// cliresIsPartialUnmapSupported: inline virtual inherited (res) base (clientres) body +NvBool __nvoc_up_thunk_RsResource_cliresIsPartialUnmapSupported(struct RmClientResource *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource))); +} + +// cliresMapTo: virtual inherited (res) base (clientres) +NV_STATUS __nvoc_up_thunk_RsResource_cliresMapTo(struct RmClientResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource)), pParams); +} + +// cliresUnmapFrom: virtual inherited (res) base (clientres) +NV_STATUS __nvoc_up_thunk_RsResource_cliresUnmapFrom(struct RmClientResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource)), pParams); +} + +// cliresGetRefCount: virtual inherited (res) base (clientres) +NvU32 __nvoc_up_thunk_RsResource_cliresGetRefCount(struct RmClientResource *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource))); +} + +// cliresAddAdditionalDependants: virtual inherited (res) base (clientres) +void __nvoc_up_thunk_RsResource_cliresAddAdditionalDependants(struct RsClient *pClient, struct RmClientResource *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource)), pReference); +} + +// cliresGetNotificationListPtr: virtual inherited (notify) base (notify) +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_cliresGetNotificationListPtr(struct RmClientResource *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(RmClientResource, __nvoc_base_Notifier))); +} + +// cliresGetNotificationShare: virtual inherited (notify) base (notify) +struct NotifShare * __nvoc_up_thunk_Notifier_cliresGetNotificationShare(struct RmClientResource *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(RmClientResource, __nvoc_base_Notifier))); +} + +// cliresSetNotificationShare: virtual inherited (notify) base (notify) +void __nvoc_up_thunk_Notifier_cliresSetNotificationShare(struct RmClientResource *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(RmClientResource, __nvoc_base_Notifier)), pNotifShare); +} + +// cliresUnregisterEvent: virtual inherited (notify) base (notify) +NV_STATUS __nvoc_up_thunk_Notifier_cliresUnregisterEvent(struct RmClientResource *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(RmClientResource, __nvoc_base_Notifier)), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +// cliresGetOrAllocNotifShare: virtual inherited (notify) base (notify) +NV_STATUS __nvoc_up_thunk_Notifier_cliresGetOrAllocNotifShare(struct RmClientResource *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(RmClientResource, __nvoc_base_Notifier)), hNotifierClient, hNotifierResource, ppNotifShare); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__RmClientResource = +{ + /*numEntries=*/ 56, + /*pExportEntries=*/ __nvoc_exported_method_def_RmClientResource +}; + +void __nvoc_dtor_RsClientResource(RsClientResource*); +void __nvoc_dtor_RmResourceCommon(RmResourceCommon*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_RmClientResource(RmClientResource *pThis) { + __nvoc_cliresDestruct(pThis); + __nvoc_dtor_RsClientResource(&pThis->__nvoc_base_RsClientResource); + __nvoc_dtor_RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RmClientResource(RmClientResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RsClientResource(RsClientResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_RmResourceCommon(RmResourceCommon* ); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_RmClientResource(RmClientResource *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsClientResource(&pThis->__nvoc_base_RsClientResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RmClientResource_fail_RsClientResource; + status = __nvoc_ctor_RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); + if (status != NV_OK) goto __nvoc_ctor_RmClientResource_fail_RmResourceCommon; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_RmClientResource_fail_Notifier; + __nvoc_init_dataField_RmClientResource(pThis); + + status = __nvoc_cliresConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RmClientResource_fail__init; + goto __nvoc_ctor_RmClientResource_exit; // Success + +__nvoc_ctor_RmClientResource_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_RmClientResource_fail_Notifier: + __nvoc_dtor_RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); +__nvoc_ctor_RmClientResource_fail_RmResourceCommon: + __nvoc_dtor_RsClientResource(&pThis->__nvoc_base_RsClientResource); +__nvoc_ctor_RmClientResource_fail_RsClientResource: +__nvoc_ctor_RmClientResource_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_RmClientResource_1(RmClientResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + // cliresCtrlCmdSystemGetCpuInfo -- exported (id=0x102) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + pThis->__cliresCtrlCmdSystemGetCpuInfo__ = &cliresCtrlCmdSystemGetCpuInfo_IMPL; +#endif + + // cliresCtrlCmdSystemGetFeatures -- exported (id=0x1f0) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__cliresCtrlCmdSystemGetFeatures__ = &cliresCtrlCmdSystemGetFeatures_IMPL; +#endif + + // cliresCtrlCmdSystemGetBuildVersionV2 -- exported (id=0x13e) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10509u) + pThis->__cliresCtrlCmdSystemGetBuildVersionV2__ = &cliresCtrlCmdSystemGetBuildVersionV2_IMPL; +#endif + + // cliresCtrlCmdSystemGetLockTimes -- exported (id=0x109) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x105u) + pThis->__cliresCtrlCmdSystemGetLockTimes__ = &cliresCtrlCmdSystemGetLockTimes_IMPL; +#endif + + // cliresCtrlCmdSystemGetClassList -- exported (id=0x108) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__cliresCtrlCmdSystemGetClassList__ = &cliresCtrlCmdSystemGetClassList_IMPL; +#endif + + // cliresCtrlCmdSystemNotifyEvent -- exported (id=0x110) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__cliresCtrlCmdSystemNotifyEvent__ = &cliresCtrlCmdSystemNotifyEvent_IMPL; +#endif + + // cliresCtrlCmdSystemDebugCtrlRmMsg -- exported (id=0x121) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__cliresCtrlCmdSystemDebugCtrlRmMsg__ = &cliresCtrlCmdSystemDebugCtrlRmMsg_IMPL; +#endif + + // cliresCtrlCmdSystemGetVgxSystemInfo -- exported (id=0x133) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + pThis->__cliresCtrlCmdSystemGetVgxSystemInfo__ = &cliresCtrlCmdSystemGetVgxSystemInfo_IMPL; +#endif + + // cliresCtrlCmdSystemGetPrivilegedStatus -- exported (id=0x135) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + pThis->__cliresCtrlCmdSystemGetPrivilegedStatus__ = &cliresCtrlCmdSystemGetPrivilegedStatus_IMPL; +#endif + + // cliresCtrlCmdSystemGetFabricStatus -- exported (id=0x136) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + pThis->__cliresCtrlCmdSystemGetFabricStatus__ = &cliresCtrlCmdSystemGetFabricStatus_IMPL; +#endif + + // cliresCtrlCmdSystemGetRmInstanceId -- exported (id=0x139) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + pThis->__cliresCtrlCmdSystemGetRmInstanceId__ = &cliresCtrlCmdSystemGetRmInstanceId_IMPL; +#endif + + // cliresCtrlCmdSystemGetClientDatabaseInfo -- exported (id=0x13d) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x107u) + pThis->__cliresCtrlCmdSystemGetClientDatabaseInfo__ = &cliresCtrlCmdSystemGetClientDatabaseInfo_IMPL; +#endif + + // cliresCtrlCmdSystemRmctrlCacheModeCtrl -- exported (id=0x13f) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + pThis->__cliresCtrlCmdSystemRmctrlCacheModeCtrl__ = &cliresCtrlCmdSystemRmctrlCacheModeCtrl_IMPL; +#endif + + // cliresCtrlCmdClientGetAddrSpaceType -- exported (id=0xd01) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + pThis->__cliresCtrlCmdClientGetAddrSpaceType__ = &cliresCtrlCmdClientGetAddrSpaceType_IMPL; +#endif + + // cliresCtrlCmdClientGetHandleInfo -- exported (id=0xd02) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + pThis->__cliresCtrlCmdClientGetHandleInfo__ = &cliresCtrlCmdClientGetHandleInfo_IMPL; +#endif + + // cliresCtrlCmdClientGetAccessRights -- exported (id=0xd03) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + pThis->__cliresCtrlCmdClientGetAccessRights__ = &cliresCtrlCmdClientGetAccessRights_IMPL; +#endif + + // cliresCtrlCmdClientSetInheritedSharePolicy -- exported (id=0xd04) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + pThis->__cliresCtrlCmdClientSetInheritedSharePolicy__ = &cliresCtrlCmdClientSetInheritedSharePolicy_IMPL; +#endif + + // cliresCtrlCmdClientShareObject -- exported (id=0xd06) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + pThis->__cliresCtrlCmdClientShareObject__ = &cliresCtrlCmdClientShareObject_IMPL; +#endif + + // cliresCtrlCmdClientGetChildHandle -- exported (id=0xd05) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + pThis->__cliresCtrlCmdClientGetChildHandle__ = &cliresCtrlCmdClientGetChildHandle_IMPL; +#endif + + // cliresCtrlCmdObjectsAreDuplicates -- exported (id=0xd07) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + pThis->__cliresCtrlCmdObjectsAreDuplicates__ = &cliresCtrlCmdObjectsAreDuplicates_IMPL; +#endif + + // cliresCtrlCmdGpuGetAttachedIds -- exported (id=0x201) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + pThis->__cliresCtrlCmdGpuGetAttachedIds__ = &cliresCtrlCmdGpuGetAttachedIds_IMPL; +#endif + + // cliresCtrlCmdGpuGetIdInfo -- exported (id=0x202) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10109u) + pThis->__cliresCtrlCmdGpuGetIdInfo__ = &cliresCtrlCmdGpuGetIdInfo_IMPL; +#endif + + // cliresCtrlCmdGpuGetIdInfoV2 -- exported (id=0x205) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + pThis->__cliresCtrlCmdGpuGetIdInfoV2__ = &cliresCtrlCmdGpuGetIdInfoV2_IMPL; +#endif + + // cliresCtrlCmdGpuGetInitStatus -- exported (id=0x203) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + pThis->__cliresCtrlCmdGpuGetInitStatus__ = &cliresCtrlCmdGpuGetInitStatus_IMPL; +#endif + + // cliresCtrlCmdGpuGetDeviceIds -- exported (id=0x204) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + pThis->__cliresCtrlCmdGpuGetDeviceIds__ = &cliresCtrlCmdGpuGetDeviceIds_IMPL; +#endif + + // cliresCtrlCmdGpuGetActiveDeviceIds -- exported (id=0x288) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + pThis->__cliresCtrlCmdGpuGetActiveDeviceIds__ = &cliresCtrlCmdGpuGetActiveDeviceIds_IMPL; +#endif + + // cliresCtrlCmdGpuGetProbedIds -- exported (id=0x214) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + pThis->__cliresCtrlCmdGpuGetProbedIds__ = &cliresCtrlCmdGpuGetProbedIds_IMPL; +#endif + + // cliresCtrlCmdGpuAttachIds -- exported (id=0x215) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10109u) + pThis->__cliresCtrlCmdGpuAttachIds__ = &cliresCtrlCmdGpuAttachIds_IMPL; +#endif + + // cliresCtrlCmdGpuAsyncAttachId -- exported (id=0x289) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + pThis->__cliresCtrlCmdGpuAsyncAttachId__ = &cliresCtrlCmdGpuAsyncAttachId_IMPL; +#endif + + // cliresCtrlCmdGpuWaitAttachId -- exported (id=0x290) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + pThis->__cliresCtrlCmdGpuWaitAttachId__ = &cliresCtrlCmdGpuWaitAttachId_IMPL; +#endif + + // cliresCtrlCmdGpuDetachIds -- exported (id=0x216) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + pThis->__cliresCtrlCmdGpuDetachIds__ = &cliresCtrlCmdGpuDetachIds_IMPL; +#endif + + // cliresCtrlCmdGpuGetPciInfo -- exported (id=0x21b) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + pThis->__cliresCtrlCmdGpuGetPciInfo__ = &cliresCtrlCmdGpuGetPciInfo_IMPL; +#endif + + // cliresCtrlCmdGpuGetUuidInfo -- exported (id=0x274) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__cliresCtrlCmdGpuGetUuidInfo__ = &cliresCtrlCmdGpuGetUuidInfo_IMPL; +#endif + + // cliresCtrlCmdGpuGetUuidFromGpuId -- exported (id=0x275) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + pThis->__cliresCtrlCmdGpuGetUuidFromGpuId__ = &cliresCtrlCmdGpuGetUuidFromGpuId_IMPL; +#endif + + // cliresCtrlCmdGpuModifyGpuDrainState -- exported (id=0x278) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__cliresCtrlCmdGpuModifyGpuDrainState__ = &cliresCtrlCmdGpuModifyGpuDrainState_IMPL; +#endif + + // cliresCtrlCmdGpuQueryGpuDrainState -- exported (id=0x279) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + pThis->__cliresCtrlCmdGpuQueryGpuDrainState__ = &cliresCtrlCmdGpuQueryGpuDrainState_IMPL; +#endif + + // cliresCtrlCmdGpuGetMemOpEnable -- exported (id=0x27b) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x509u) + pThis->__cliresCtrlCmdGpuGetMemOpEnable__ = &cliresCtrlCmdGpuGetMemOpEnable_IMPL; +#endif + + // cliresCtrlCmdGpuDisableNvlinkInit -- exported (id=0x281) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xbu) + pThis->__cliresCtrlCmdGpuDisableNvlinkInit__ = &cliresCtrlCmdGpuDisableNvlinkInit_IMPL; +#endif + + // cliresCtrlCmdLegacyConfig -- exported (id=0x282) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__cliresCtrlCmdLegacyConfig__ = &cliresCtrlCmdLegacyConfig_IMPL; +#endif + + // cliresCtrlCmdPushUcodeImage -- exported (id=0x285) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__cliresCtrlCmdPushUcodeImage__ = &cliresCtrlCmdPushUcodeImage_IMPL; +#endif + + // cliresCtrlCmdSystemGetVrrCookiePresent -- exported (id=0x107) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__cliresCtrlCmdSystemGetVrrCookiePresent__ = &cliresCtrlCmdSystemGetVrrCookiePresent_IMPL; +#endif + + // cliresCtrlCmdGsyncGetAttachedIds -- exported (id=0x301) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x108u) + pThis->__cliresCtrlCmdGsyncGetAttachedIds__ = &cliresCtrlCmdGsyncGetAttachedIds_IMPL; +#endif + + // cliresCtrlCmdGsyncGetIdInfo -- exported (id=0x302) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__cliresCtrlCmdGsyncGetIdInfo__ = &cliresCtrlCmdGsyncGetIdInfo_IMPL; +#endif + + // cliresCtrlCmdEventSetNotification -- exported (id=0x501) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__cliresCtrlCmdEventSetNotification__ = &cliresCtrlCmdEventSetNotification_IMPL; +#endif + + // cliresCtrlCmdEventGetSystemEventData -- exported (id=0x502) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__cliresCtrlCmdEventGetSystemEventData__ = &cliresCtrlCmdEventGetSystemEventData_IMPL; +#endif + + // cliresCtrlCmdOsUnixExportObjectToFd -- exported (id=0x3d05) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + pThis->__cliresCtrlCmdOsUnixExportObjectToFd__ = &cliresCtrlCmdOsUnixExportObjectToFd_IMPL; +#endif + + // cliresCtrlCmdOsUnixImportObjectFromFd -- exported (id=0x3d06) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + pThis->__cliresCtrlCmdOsUnixImportObjectFromFd__ = &cliresCtrlCmdOsUnixImportObjectFromFd_IMPL; +#endif + + // cliresCtrlCmdOsUnixGetExportObjectInfo -- exported (id=0x3d08) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + pThis->__cliresCtrlCmdOsUnixGetExportObjectInfo__ = &cliresCtrlCmdOsUnixGetExportObjectInfo_IMPL; +#endif + + // cliresCtrlCmdOsUnixCreateExportObjectFd -- exported (id=0x3d0a) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + pThis->__cliresCtrlCmdOsUnixCreateExportObjectFd__ = &cliresCtrlCmdOsUnixCreateExportObjectFd_IMPL; +#endif + + // cliresCtrlCmdOsUnixExportObjectsToFd -- exported (id=0x3d0b) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + pThis->__cliresCtrlCmdOsUnixExportObjectsToFd__ = &cliresCtrlCmdOsUnixExportObjectsToFd_IMPL; +#endif + + // cliresCtrlCmdOsUnixImportObjectsFromFd -- exported (id=0x3d0c) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + pThis->__cliresCtrlCmdOsUnixImportObjectsFromFd__ = &cliresCtrlCmdOsUnixImportObjectsFromFd_IMPL; +#endif + + // cliresCtrlCmdOsUnixFlushUserCache -- exported (id=0x3d02) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__cliresCtrlCmdOsUnixFlushUserCache__ = &cliresCtrlCmdOsUnixFlushUserCache_IMPL; +#endif + + // cliresCtrlCmdSetSubProcessID -- exported (id=0x901) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10109u) + pThis->__cliresCtrlCmdSetSubProcessID__ = &cliresCtrlCmdSetSubProcessID_IMPL; +#endif + + // cliresCtrlCmdDisableSubProcessUserdIsolation -- exported (id=0x902) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10109u) + pThis->__cliresCtrlCmdDisableSubProcessUserdIsolation__ = &cliresCtrlCmdDisableSubProcessUserdIsolation_IMPL; +#endif + + // cliresCtrlCmdSystemNVPCFGetPowerModeInfo -- exported (id=0x13b) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__cliresCtrlCmdSystemNVPCFGetPowerModeInfo__ = &cliresCtrlCmdSystemNVPCFGetPowerModeInfo_IMPL; +#endif + + // cliresCtrlCmdSystemSyncExternalFabricMgmt -- exported (id=0x13c) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__cliresCtrlCmdSystemSyncExternalFabricMgmt__ = &cliresCtrlCmdSystemSyncExternalFabricMgmt_IMPL; +#endif +} // End __nvoc_init_funcTable_RmClientResource_1 with approximately 56 basic block(s). + + +// Initialize vtable(s) for 79 virtual method(s). +void __nvoc_init_funcTable_RmClientResource(RmClientResource *pThis) { + + // Initialize vtable(s) with 56 per-object function pointer(s). + __nvoc_init_funcTable_RmClientResource_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__RmClientResource(RmClientResource *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^3 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RsClientResource.__nvoc_base_RsResource; // (res) super^2 + pThis->__nvoc_pbase_RsClientResource = &pThis->__nvoc_base_RsClientResource; // (clientres) super + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResourceCommon; // (rmrescmn) super + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; // (inotify) super^2 + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; // (notify) super + pThis->__nvoc_pbase_RmClientResource = pThis; // (clires) this + + // Recurse to superclass initialization function(s). + __nvoc_init__RsClientResource(&pThis->__nvoc_base_RsClientResource); + __nvoc_init__RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); + __nvoc_init__Notifier(&pThis->__nvoc_base_Notifier); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__RmClientResource.metadata__RsClientResource.metadata__RsResource.metadata__Object; // (obj) super^3 + pThis->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__RmClientResource.metadata__RsClientResource.metadata__RsResource; // (res) super^2 + pThis->__nvoc_base_RsClientResource.__nvoc_metadata_ptr = &__nvoc_metadata__RmClientResource.metadata__RsClientResource; // (clientres) super + pThis->__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__RmClientResource.metadata__RmResourceCommon; // (rmrescmn) super + pThis->__nvoc_base_Notifier.__nvoc_base_INotifier.__nvoc_metadata_ptr = &__nvoc_metadata__RmClientResource.metadata__Notifier.metadata__INotifier; // (inotify) super^2 + pThis->__nvoc_base_Notifier.__nvoc_metadata_ptr = &__nvoc_metadata__RmClientResource.metadata__Notifier; // (notify) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__RmClientResource; // (clires) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_RmClientResource(pThis); +} + +NV_STATUS __nvoc_objCreate_RmClientResource(RmClientResource **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + RmClientResource *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(RmClientResource), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(RmClientResource)); + + pThis->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__RmClientResource(pThis); + status = __nvoc_ctor_RmClientResource(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_RmClientResource_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_RmClientResource_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(RmClientResource)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RmClientResource(RmClientResource **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_RmClientResource(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_client_resource_nvoc.h b/src/nvidia/generated/g_client_resource_nvoc.h new file mode 100644 index 0000000..d7c341a --- /dev/null +++ b/src/nvidia/generated/g_client_resource_nvoc.h @@ -0,0 +1,857 @@ + +#ifndef _G_CLIENT_RESOURCE_NVOC_H_ +#define _G_CLIENT_RESOURCE_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_client_resource_nvoc.h" + + +#ifndef _CLIENT_RESOURCE_H_ +#define _CLIENT_RESOURCE_H_ + +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "resserv/rs_client.h" +#include "rmapi/resource.h" +#include "rmapi/event.h" +#include "rmapi/control.h" + +#include "ctrl/ctrl0000/ctrl0000gpu.h" +#include "ctrl/ctrl0000/ctrl0000gpuacct.h" +#include "ctrl/ctrl0000/ctrl0000gsync.h" +#include "ctrl/ctrl0000/ctrl0000diag.h" +#include "ctrl/ctrl0000/ctrl0000event.h" +#include "ctrl/ctrl0000/ctrl0000nvd.h" +#include "ctrl/ctrl0000/ctrl0000proc.h" +#include "ctrl/ctrl0000/ctrl0000syncgpuboost.h" +#include "ctrl/ctrl0000/ctrl0000vgpu.h" +#include "ctrl/ctrl0000/ctrl0000client.h" + +/* include appropriate os-specific command header */ +#if defined(NV_UNIX) || defined(NV_QNX) +#include "ctrl/ctrl0000/ctrl0000unix.h" +#endif + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_CLIENT_RESOURCE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__RmClientResource; +struct NVOC_METADATA__RsClientResource; +struct NVOC_METADATA__RmResourceCommon; +struct NVOC_METADATA__Notifier; +struct NVOC_VTABLE__RmClientResource; + + +struct RmClientResource { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__RmClientResource *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct RsClientResource __nvoc_base_RsClientResource; + struct RmResourceCommon __nvoc_base_RmResourceCommon; + struct Notifier __nvoc_base_Notifier; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^3 + struct RsResource *__nvoc_pbase_RsResource; // res super^2 + struct RsClientResource *__nvoc_pbase_RsClientResource; // clientres super + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super + struct INotifier *__nvoc_pbase_INotifier; // inotify super^2 + struct Notifier *__nvoc_pbase_Notifier; // notify super + struct RmClientResource *__nvoc_pbase_RmClientResource; // clires + + // Vtable with 56 per-object function pointers + NV_STATUS (*__cliresCtrlCmdSystemGetCpuInfo__)(struct RmClientResource * /*this*/, NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS *); // exported (id=0x102) + NV_STATUS (*__cliresCtrlCmdSystemGetFeatures__)(struct RmClientResource * /*this*/, NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS *); // exported (id=0x1f0) + NV_STATUS (*__cliresCtrlCmdSystemGetBuildVersionV2__)(struct RmClientResource * /*this*/, NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS *); // exported (id=0x13e) + NV_STATUS (*__cliresCtrlCmdSystemGetLockTimes__)(struct RmClientResource * /*this*/, NV0000_CTRL_SYSTEM_GET_LOCK_TIMES_PARAMS *); // exported (id=0x109) + NV_STATUS (*__cliresCtrlCmdSystemGetClassList__)(struct RmClientResource * /*this*/, NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS *); // exported (id=0x108) + NV_STATUS (*__cliresCtrlCmdSystemNotifyEvent__)(struct RmClientResource * /*this*/, NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS *); // exported (id=0x110) + NV_STATUS (*__cliresCtrlCmdSystemDebugCtrlRmMsg__)(struct RmClientResource * /*this*/, NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS *); // exported (id=0x121) + NV_STATUS (*__cliresCtrlCmdSystemGetVgxSystemInfo__)(struct RmClientResource * /*this*/, NV0000_CTRL_SYSTEM_GET_VGX_SYSTEM_INFO_PARAMS *); // exported (id=0x133) + NV_STATUS (*__cliresCtrlCmdSystemGetPrivilegedStatus__)(struct RmClientResource * /*this*/, NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS *); // exported (id=0x135) + NV_STATUS (*__cliresCtrlCmdSystemGetFabricStatus__)(struct RmClientResource * /*this*/, NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS *); // exported (id=0x136) + NV_STATUS (*__cliresCtrlCmdSystemGetRmInstanceId__)(struct RmClientResource * /*this*/, NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS *); // exported (id=0x139) + NV_STATUS (*__cliresCtrlCmdSystemGetClientDatabaseInfo__)(struct RmClientResource * /*this*/, NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS *); // exported (id=0x13d) + NV_STATUS (*__cliresCtrlCmdSystemRmctrlCacheModeCtrl__)(struct RmClientResource * /*this*/, NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_PARAMS *); // exported (id=0x13f) + NV_STATUS (*__cliresCtrlCmdClientGetAddrSpaceType__)(struct RmClientResource * /*this*/, NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS *); // exported (id=0xd01) + NV_STATUS (*__cliresCtrlCmdClientGetHandleInfo__)(struct RmClientResource * /*this*/, NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS *); // exported (id=0xd02) + NV_STATUS (*__cliresCtrlCmdClientGetAccessRights__)(struct RmClientResource * /*this*/, NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS *); // exported (id=0xd03) + NV_STATUS (*__cliresCtrlCmdClientSetInheritedSharePolicy__)(struct RmClientResource * /*this*/, NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS *); // exported (id=0xd04) + NV_STATUS (*__cliresCtrlCmdClientShareObject__)(struct RmClientResource * /*this*/, NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS *); // exported (id=0xd06) + NV_STATUS (*__cliresCtrlCmdClientGetChildHandle__)(struct RmClientResource * /*this*/, NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS *); // exported (id=0xd05) + NV_STATUS (*__cliresCtrlCmdObjectsAreDuplicates__)(struct RmClientResource * /*this*/, NV0000_CTRL_CLIENT_OBJECTS_ARE_DUPLICATES_PARAMS *); // exported (id=0xd07) + NV_STATUS (*__cliresCtrlCmdGpuGetAttachedIds__)(struct RmClientResource * /*this*/, NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *); // exported (id=0x201) + NV_STATUS (*__cliresCtrlCmdGpuGetIdInfo__)(struct RmClientResource * /*this*/, NV0000_CTRL_GPU_GET_ID_INFO_PARAMS *); // exported (id=0x202) + NV_STATUS (*__cliresCtrlCmdGpuGetIdInfoV2__)(struct RmClientResource * /*this*/, NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS *); // exported (id=0x205) + NV_STATUS (*__cliresCtrlCmdGpuGetInitStatus__)(struct RmClientResource * /*this*/, NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS *); // exported (id=0x203) + NV_STATUS (*__cliresCtrlCmdGpuGetDeviceIds__)(struct RmClientResource * /*this*/, NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS *); // exported (id=0x204) + NV_STATUS (*__cliresCtrlCmdGpuGetActiveDeviceIds__)(struct RmClientResource * /*this*/, NV0000_CTRL_GPU_GET_ACTIVE_DEVICE_IDS_PARAMS *); // exported (id=0x288) + NV_STATUS (*__cliresCtrlCmdGpuGetProbedIds__)(struct RmClientResource * /*this*/, NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *); // exported (id=0x214) + NV_STATUS (*__cliresCtrlCmdGpuAttachIds__)(struct RmClientResource * /*this*/, NV0000_CTRL_GPU_ATTACH_IDS_PARAMS *); // exported (id=0x215) + NV_STATUS (*__cliresCtrlCmdGpuAsyncAttachId__)(struct RmClientResource * /*this*/, NV0000_CTRL_GPU_ASYNC_ATTACH_ID_PARAMS *); // exported (id=0x289) + NV_STATUS (*__cliresCtrlCmdGpuWaitAttachId__)(struct RmClientResource * /*this*/, NV0000_CTRL_GPU_WAIT_ATTACH_ID_PARAMS *); // exported (id=0x290) + NV_STATUS (*__cliresCtrlCmdGpuDetachIds__)(struct RmClientResource * /*this*/, NV0000_CTRL_GPU_DETACH_IDS_PARAMS *); // exported (id=0x216) + NV_STATUS (*__cliresCtrlCmdGpuGetPciInfo__)(struct RmClientResource * /*this*/, NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS *); // exported (id=0x21b) + NV_STATUS (*__cliresCtrlCmdGpuGetUuidInfo__)(struct RmClientResource * /*this*/, NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS *); // exported (id=0x274) + NV_STATUS (*__cliresCtrlCmdGpuGetUuidFromGpuId__)(struct RmClientResource * /*this*/, NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS *); // exported (id=0x275) + NV_STATUS (*__cliresCtrlCmdGpuModifyGpuDrainState__)(struct RmClientResource * /*this*/, NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS *); // exported (id=0x278) + NV_STATUS (*__cliresCtrlCmdGpuQueryGpuDrainState__)(struct RmClientResource * /*this*/, NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS *); // exported (id=0x279) + NV_STATUS (*__cliresCtrlCmdGpuGetMemOpEnable__)(struct RmClientResource * /*this*/, NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS *); // exported (id=0x27b) + NV_STATUS (*__cliresCtrlCmdGpuDisableNvlinkInit__)(struct RmClientResource * /*this*/, NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS *); // exported (id=0x281) + NV_STATUS (*__cliresCtrlCmdLegacyConfig__)(struct RmClientResource * /*this*/, NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS *); // exported (id=0x282) + NV_STATUS (*__cliresCtrlCmdPushUcodeImage__)(struct RmClientResource * /*this*/, NV0000_CTRL_GPU_PUSH_UCODE_IMAGE_PARAMS *); // exported (id=0x285) + NV_STATUS (*__cliresCtrlCmdSystemGetVrrCookiePresent__)(struct RmClientResource * /*this*/, NV0000_CTRL_SYSTEM_GET_VRR_COOKIE_PRESENT_PARAMS *); // exported (id=0x107) + NV_STATUS (*__cliresCtrlCmdGsyncGetAttachedIds__)(struct RmClientResource * /*this*/, NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS *); // exported (id=0x301) + NV_STATUS (*__cliresCtrlCmdGsyncGetIdInfo__)(struct RmClientResource * /*this*/, NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS *); // exported (id=0x302) + NV_STATUS (*__cliresCtrlCmdEventSetNotification__)(struct RmClientResource * /*this*/, NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS *); // exported (id=0x501) + NV_STATUS (*__cliresCtrlCmdEventGetSystemEventData__)(struct RmClientResource * /*this*/, NV0000_CTRL_GET_SYSTEM_EVENT_DATA_PARAMS *); // exported (id=0x502) + NV_STATUS (*__cliresCtrlCmdOsUnixExportObjectToFd__)(struct RmClientResource * /*this*/, NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS *); // exported (id=0x3d05) + NV_STATUS (*__cliresCtrlCmdOsUnixImportObjectFromFd__)(struct RmClientResource * /*this*/, NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS *); // exported (id=0x3d06) + NV_STATUS (*__cliresCtrlCmdOsUnixGetExportObjectInfo__)(struct RmClientResource * /*this*/, NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS *); // exported (id=0x3d08) + NV_STATUS (*__cliresCtrlCmdOsUnixCreateExportObjectFd__)(struct RmClientResource * /*this*/, NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS *); // exported (id=0x3d0a) + NV_STATUS (*__cliresCtrlCmdOsUnixExportObjectsToFd__)(struct RmClientResource * /*this*/, NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS *); // exported (id=0x3d0b) + NV_STATUS (*__cliresCtrlCmdOsUnixImportObjectsFromFd__)(struct RmClientResource * /*this*/, NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS *); // exported (id=0x3d0c) + NV_STATUS (*__cliresCtrlCmdOsUnixFlushUserCache__)(struct RmClientResource * /*this*/, NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS *); // exported (id=0x3d02) + NV_STATUS (*__cliresCtrlCmdSetSubProcessID__)(struct RmClientResource * /*this*/, NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS *); // exported (id=0x901) + NV_STATUS (*__cliresCtrlCmdDisableSubProcessUserdIsolation__)(struct RmClientResource * /*this*/, NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS *); // exported (id=0x902) + NV_STATUS (*__cliresCtrlCmdSystemNVPCFGetPowerModeInfo__)(struct RmClientResource * /*this*/, NV0000_CTRL_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *); // exported (id=0x13b) + NV_STATUS (*__cliresCtrlCmdSystemSyncExternalFabricMgmt__)(struct RmClientResource * /*this*/, NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS *); // exported (id=0x13c) +}; + + +// Vtable with 23 per-class function pointers +struct NVOC_VTABLE__RmClientResource { + NvBool (*__cliresAccessCallback__)(struct RmClientResource * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual override (res) base (clientres) + NvBool (*__cliresShareCallback__)(struct RmClientResource * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual override (res) base (clientres) + NV_STATUS (*__cliresControl_Prologue__)(struct RmClientResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual override (res) base (clientres) + void (*__cliresControl_Epilogue__)(struct RmClientResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual override (res) base (clientres) + NvBool (*__cliresCanCopy__)(struct RmClientResource * /*this*/); // virtual inherited (res) base (clientres) + NV_STATUS (*__cliresIsDuplicate__)(struct RmClientResource * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (clientres) + void (*__cliresPreDestruct__)(struct RmClientResource * /*this*/); // virtual inherited (res) base (clientres) + NV_STATUS (*__cliresControl__)(struct RmClientResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (clientres) + NV_STATUS (*__cliresControlFilter__)(struct RmClientResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (clientres) + NV_STATUS (*__cliresControlSerialization_Prologue__)(struct RmClientResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (clientres) + void (*__cliresControlSerialization_Epilogue__)(struct RmClientResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (clientres) + NV_STATUS (*__cliresMap__)(struct RmClientResource * /*this*/, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); // virtual inherited (res) base (clientres) + NV_STATUS (*__cliresUnmap__)(struct RmClientResource * /*this*/, struct CALL_CONTEXT *, RsCpuMapping *); // virtual inherited (res) base (clientres) + NvBool (*__cliresIsPartialUnmapSupported__)(struct RmClientResource * /*this*/); // inline virtual inherited (res) base (clientres) body + NV_STATUS (*__cliresMapTo__)(struct RmClientResource * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (clientres) + NV_STATUS (*__cliresUnmapFrom__)(struct RmClientResource * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (clientres) + NvU32 (*__cliresGetRefCount__)(struct RmClientResource * /*this*/); // virtual inherited (res) base (clientres) + void (*__cliresAddAdditionalDependants__)(struct RsClient *, struct RmClientResource * /*this*/, RsResourceRef *); // virtual inherited (res) base (clientres) + PEVENTNOTIFICATION * (*__cliresGetNotificationListPtr__)(struct RmClientResource * /*this*/); // virtual inherited (notify) base (notify) + struct NotifShare * (*__cliresGetNotificationShare__)(struct RmClientResource * /*this*/); // virtual inherited (notify) base (notify) + void (*__cliresSetNotificationShare__)(struct RmClientResource * /*this*/, struct NotifShare *); // virtual inherited (notify) base (notify) + NV_STATUS (*__cliresUnregisterEvent__)(struct RmClientResource * /*this*/, NvHandle, NvHandle, NvHandle, NvHandle); // virtual inherited (notify) base (notify) + NV_STATUS (*__cliresGetOrAllocNotifShare__)(struct RmClientResource * /*this*/, NvHandle, NvHandle, struct NotifShare **); // virtual inherited (notify) base (notify) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__RmClientResource { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__RsClientResource metadata__RsClientResource; + const struct NVOC_METADATA__RmResourceCommon metadata__RmResourceCommon; + const struct NVOC_METADATA__Notifier metadata__Notifier; + const struct NVOC_VTABLE__RmClientResource vtable; +}; + +#ifndef __NVOC_CLASS_RmClientResource_TYPEDEF__ +#define __NVOC_CLASS_RmClientResource_TYPEDEF__ +typedef struct RmClientResource RmClientResource; +#endif /* __NVOC_CLASS_RmClientResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmClientResource +#define __nvoc_class_id_RmClientResource 0x37a701 +#endif /* __nvoc_class_id_RmClientResource */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmClientResource; + +#define __staticCast_RmClientResource(pThis) \ + ((pThis)->__nvoc_pbase_RmClientResource) + +#ifdef __nvoc_client_resource_h_disabled +#define __dynamicCast_RmClientResource(pThis) ((RmClientResource*) NULL) +#else //__nvoc_client_resource_h_disabled +#define __dynamicCast_RmClientResource(pThis) \ + ((RmClientResource*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RmClientResource))) +#endif //__nvoc_client_resource_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_RmClientResource(RmClientResource**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RmClientResource(RmClientResource**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_RmClientResource(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_RmClientResource((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define cliresAccessCallback_FNPTR(pRmCliRes) pRmCliRes->__nvoc_metadata_ptr->vtable.__cliresAccessCallback__ +#define cliresAccessCallback(pRmCliRes, pInvokingClient, pAllocParams, accessRight) cliresAccessCallback_DISPATCH(pRmCliRes, pInvokingClient, pAllocParams, accessRight) +#define cliresShareCallback_FNPTR(pRmCliRes) pRmCliRes->__nvoc_metadata_ptr->vtable.__cliresShareCallback__ +#define cliresShareCallback(pRmCliRes, pInvokingClient, pParentRef, pSharePolicy) cliresShareCallback_DISPATCH(pRmCliRes, pInvokingClient, pParentRef, pSharePolicy) +#define cliresControl_Prologue_FNPTR(pRmCliRes) pRmCliRes->__nvoc_metadata_ptr->vtable.__cliresControl_Prologue__ +#define cliresControl_Prologue(pRmCliRes, pCallContext, pParams) cliresControl_Prologue_DISPATCH(pRmCliRes, pCallContext, pParams) +#define cliresControl_Epilogue_FNPTR(pRmCliRes) pRmCliRes->__nvoc_metadata_ptr->vtable.__cliresControl_Epilogue__ +#define cliresControl_Epilogue(pRmCliRes, pCallContext, pParams) cliresControl_Epilogue_DISPATCH(pRmCliRes, pCallContext, pParams) +#define cliresCtrlCmdSystemGetCpuInfo_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdSystemGetCpuInfo__ +#define cliresCtrlCmdSystemGetCpuInfo(pRmCliRes, pCpuInfoParams) cliresCtrlCmdSystemGetCpuInfo_DISPATCH(pRmCliRes, pCpuInfoParams) +#define cliresCtrlCmdSystemGetFeatures_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdSystemGetFeatures__ +#define cliresCtrlCmdSystemGetFeatures(pRmCliRes, pParams) cliresCtrlCmdSystemGetFeatures_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemGetBuildVersionV2_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdSystemGetBuildVersionV2__ +#define cliresCtrlCmdSystemGetBuildVersionV2(pRmCliRes, pParams) cliresCtrlCmdSystemGetBuildVersionV2_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemGetLockTimes_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdSystemGetLockTimes__ +#define cliresCtrlCmdSystemGetLockTimes(pRmCliRes, pParams) cliresCtrlCmdSystemGetLockTimes_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemGetClassList_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdSystemGetClassList__ +#define cliresCtrlCmdSystemGetClassList(pRmCliRes, pParams) cliresCtrlCmdSystemGetClassList_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemNotifyEvent_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdSystemNotifyEvent__ +#define cliresCtrlCmdSystemNotifyEvent(pRmCliRes, pParams) cliresCtrlCmdSystemNotifyEvent_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemDebugCtrlRmMsg_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdSystemDebugCtrlRmMsg__ +#define cliresCtrlCmdSystemDebugCtrlRmMsg(pRmCliRes, pParams) cliresCtrlCmdSystemDebugCtrlRmMsg_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemGetVgxSystemInfo_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdSystemGetVgxSystemInfo__ +#define cliresCtrlCmdSystemGetVgxSystemInfo(pRmCliRes, pParams) cliresCtrlCmdSystemGetVgxSystemInfo_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemGetPrivilegedStatus_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdSystemGetPrivilegedStatus__ +#define cliresCtrlCmdSystemGetPrivilegedStatus(pRmCliRes, pParams) cliresCtrlCmdSystemGetPrivilegedStatus_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemGetFabricStatus_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdSystemGetFabricStatus__ +#define cliresCtrlCmdSystemGetFabricStatus(pRmCliRes, pParams) cliresCtrlCmdSystemGetFabricStatus_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemGetRmInstanceId_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdSystemGetRmInstanceId__ +#define cliresCtrlCmdSystemGetRmInstanceId(pRmCliRes, pRmInstanceIdParams) cliresCtrlCmdSystemGetRmInstanceId_DISPATCH(pRmCliRes, pRmInstanceIdParams) +#define cliresCtrlCmdSystemGetClientDatabaseInfo_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdSystemGetClientDatabaseInfo__ +#define cliresCtrlCmdSystemGetClientDatabaseInfo(pRmCliRes, pParams) cliresCtrlCmdSystemGetClientDatabaseInfo_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemRmctrlCacheModeCtrl_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdSystemRmctrlCacheModeCtrl__ +#define cliresCtrlCmdSystemRmctrlCacheModeCtrl(pRmCliRes, pParams) cliresCtrlCmdSystemRmctrlCacheModeCtrl_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdClientGetAddrSpaceType_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdClientGetAddrSpaceType__ +#define cliresCtrlCmdClientGetAddrSpaceType(pRmCliRes, pParams) cliresCtrlCmdClientGetAddrSpaceType_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdClientGetHandleInfo_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdClientGetHandleInfo__ +#define cliresCtrlCmdClientGetHandleInfo(pRmCliRes, pParams) cliresCtrlCmdClientGetHandleInfo_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdClientGetAccessRights_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdClientGetAccessRights__ +#define cliresCtrlCmdClientGetAccessRights(pRmCliRes, pParams) cliresCtrlCmdClientGetAccessRights_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdClientSetInheritedSharePolicy_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdClientSetInheritedSharePolicy__ +#define cliresCtrlCmdClientSetInheritedSharePolicy(pRmCliRes, pParams) cliresCtrlCmdClientSetInheritedSharePolicy_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdClientShareObject_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdClientShareObject__ +#define cliresCtrlCmdClientShareObject(pRmCliRes, pParams) cliresCtrlCmdClientShareObject_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdClientGetChildHandle_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdClientGetChildHandle__ +#define cliresCtrlCmdClientGetChildHandle(pRmCliRes, pParams) cliresCtrlCmdClientGetChildHandle_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdObjectsAreDuplicates_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdObjectsAreDuplicates__ +#define cliresCtrlCmdObjectsAreDuplicates(pRmCliRes, pParams) cliresCtrlCmdObjectsAreDuplicates_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdGpuGetAttachedIds_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdGpuGetAttachedIds__ +#define cliresCtrlCmdGpuGetAttachedIds(pRmCliRes, pGpuAttachedIds) cliresCtrlCmdGpuGetAttachedIds_DISPATCH(pRmCliRes, pGpuAttachedIds) +#define cliresCtrlCmdGpuGetIdInfo_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdGpuGetIdInfo__ +#define cliresCtrlCmdGpuGetIdInfo(pRmCliRes, pGpuIdInfoParams) cliresCtrlCmdGpuGetIdInfo_DISPATCH(pRmCliRes, pGpuIdInfoParams) +#define cliresCtrlCmdGpuGetIdInfoV2_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdGpuGetIdInfoV2__ +#define cliresCtrlCmdGpuGetIdInfoV2(pRmCliRes, pGpuIdInfoParams) cliresCtrlCmdGpuGetIdInfoV2_DISPATCH(pRmCliRes, pGpuIdInfoParams) +#define cliresCtrlCmdGpuGetInitStatus_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdGpuGetInitStatus__ +#define cliresCtrlCmdGpuGetInitStatus(pRmCliRes, pGpuInitStatusParams) cliresCtrlCmdGpuGetInitStatus_DISPATCH(pRmCliRes, pGpuInitStatusParams) +#define cliresCtrlCmdGpuGetDeviceIds_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdGpuGetDeviceIds__ +#define cliresCtrlCmdGpuGetDeviceIds(pRmCliRes, pDeviceIdsParams) cliresCtrlCmdGpuGetDeviceIds_DISPATCH(pRmCliRes, pDeviceIdsParams) +#define cliresCtrlCmdGpuGetActiveDeviceIds_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdGpuGetActiveDeviceIds__ +#define cliresCtrlCmdGpuGetActiveDeviceIds(pRmCliRes, pActiveDeviceIdsParams) cliresCtrlCmdGpuGetActiveDeviceIds_DISPATCH(pRmCliRes, pActiveDeviceIdsParams) +#define cliresCtrlCmdGpuGetProbedIds_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdGpuGetProbedIds__ +#define cliresCtrlCmdGpuGetProbedIds(pRmCliRes, pGpuProbedIds) cliresCtrlCmdGpuGetProbedIds_DISPATCH(pRmCliRes, pGpuProbedIds) +#define cliresCtrlCmdGpuAttachIds_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdGpuAttachIds__ +#define cliresCtrlCmdGpuAttachIds(pRmCliRes, pGpuAttachIds) cliresCtrlCmdGpuAttachIds_DISPATCH(pRmCliRes, pGpuAttachIds) +#define cliresCtrlCmdGpuAsyncAttachId_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdGpuAsyncAttachId__ +#define cliresCtrlCmdGpuAsyncAttachId(pRmCliRes, pAsyncAttachIdParams) cliresCtrlCmdGpuAsyncAttachId_DISPATCH(pRmCliRes, pAsyncAttachIdParams) +#define cliresCtrlCmdGpuWaitAttachId_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdGpuWaitAttachId__ +#define cliresCtrlCmdGpuWaitAttachId(pRmCliRes, pWaitAttachIdParams) cliresCtrlCmdGpuWaitAttachId_DISPATCH(pRmCliRes, pWaitAttachIdParams) +#define cliresCtrlCmdGpuDetachIds_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdGpuDetachIds__ +#define cliresCtrlCmdGpuDetachIds(pRmCliRes, pGpuDetachIds) cliresCtrlCmdGpuDetachIds_DISPATCH(pRmCliRes, pGpuDetachIds) +#define cliresCtrlCmdGpuGetPciInfo_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdGpuGetPciInfo__ +#define cliresCtrlCmdGpuGetPciInfo(pRmCliRes, pPciInfoParams) cliresCtrlCmdGpuGetPciInfo_DISPATCH(pRmCliRes, pPciInfoParams) +#define cliresCtrlCmdGpuGetUuidInfo_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdGpuGetUuidInfo__ +#define cliresCtrlCmdGpuGetUuidInfo(pRmCliRes, pParams) cliresCtrlCmdGpuGetUuidInfo_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdGpuGetUuidFromGpuId_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdGpuGetUuidFromGpuId__ +#define cliresCtrlCmdGpuGetUuidFromGpuId(pRmCliRes, pParams) cliresCtrlCmdGpuGetUuidFromGpuId_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdGpuModifyGpuDrainState_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdGpuModifyGpuDrainState__ +#define cliresCtrlCmdGpuModifyGpuDrainState(pRmCliRes, pParams) cliresCtrlCmdGpuModifyGpuDrainState_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdGpuQueryGpuDrainState_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdGpuQueryGpuDrainState__ +#define cliresCtrlCmdGpuQueryGpuDrainState(pRmCliRes, pParams) cliresCtrlCmdGpuQueryGpuDrainState_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdGpuGetMemOpEnable_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdGpuGetMemOpEnable__ +#define cliresCtrlCmdGpuGetMemOpEnable(pRmCliRes, pMemOpEnableParams) cliresCtrlCmdGpuGetMemOpEnable_DISPATCH(pRmCliRes, pMemOpEnableParams) +#define cliresCtrlCmdGpuDisableNvlinkInit_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdGpuDisableNvlinkInit__ +#define cliresCtrlCmdGpuDisableNvlinkInit(pRmCliRes, pParams) cliresCtrlCmdGpuDisableNvlinkInit_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdLegacyConfig_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdLegacyConfig__ +#define cliresCtrlCmdLegacyConfig(pRmCliRes, pParams) cliresCtrlCmdLegacyConfig_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdPushUcodeImage_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdPushUcodeImage__ +#define cliresCtrlCmdPushUcodeImage(pRmCliRes, pParams) cliresCtrlCmdPushUcodeImage_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemGetVrrCookiePresent_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdSystemGetVrrCookiePresent__ +#define cliresCtrlCmdSystemGetVrrCookiePresent(pRmCliRes, pParams) cliresCtrlCmdSystemGetVrrCookiePresent_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdGsyncGetAttachedIds_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdGsyncGetAttachedIds__ +#define cliresCtrlCmdGsyncGetAttachedIds(pRmCliRes, pGsyncAttachedIds) cliresCtrlCmdGsyncGetAttachedIds_DISPATCH(pRmCliRes, pGsyncAttachedIds) +#define cliresCtrlCmdGsyncGetIdInfo_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdGsyncGetIdInfo__ +#define cliresCtrlCmdGsyncGetIdInfo(pRmCliRes, pGsyncIdInfoParams) cliresCtrlCmdGsyncGetIdInfo_DISPATCH(pRmCliRes, pGsyncIdInfoParams) +#define cliresCtrlCmdEventSetNotification_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdEventSetNotification__ +#define cliresCtrlCmdEventSetNotification(pRmCliRes, pEventSetNotificationParams) cliresCtrlCmdEventSetNotification_DISPATCH(pRmCliRes, pEventSetNotificationParams) +#define cliresCtrlCmdEventGetSystemEventData_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdEventGetSystemEventData__ +#define cliresCtrlCmdEventGetSystemEventData(pRmCliRes, pSystemEventDataParams) cliresCtrlCmdEventGetSystemEventData_DISPATCH(pRmCliRes, pSystemEventDataParams) +#define cliresCtrlCmdOsUnixExportObjectToFd_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdOsUnixExportObjectToFd__ +#define cliresCtrlCmdOsUnixExportObjectToFd(pRmCliRes, pParams) cliresCtrlCmdOsUnixExportObjectToFd_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdOsUnixImportObjectFromFd_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdOsUnixImportObjectFromFd__ +#define cliresCtrlCmdOsUnixImportObjectFromFd(pRmCliRes, pParams) cliresCtrlCmdOsUnixImportObjectFromFd_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdOsUnixGetExportObjectInfo_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdOsUnixGetExportObjectInfo__ +#define cliresCtrlCmdOsUnixGetExportObjectInfo(pRmCliRes, pParams) cliresCtrlCmdOsUnixGetExportObjectInfo_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdOsUnixCreateExportObjectFd_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdOsUnixCreateExportObjectFd__ +#define cliresCtrlCmdOsUnixCreateExportObjectFd(pRmCliRes, pParams) cliresCtrlCmdOsUnixCreateExportObjectFd_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdOsUnixExportObjectsToFd_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdOsUnixExportObjectsToFd__ +#define cliresCtrlCmdOsUnixExportObjectsToFd(pRmCliRes, pParams) cliresCtrlCmdOsUnixExportObjectsToFd_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdOsUnixImportObjectsFromFd_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdOsUnixImportObjectsFromFd__ +#define cliresCtrlCmdOsUnixImportObjectsFromFd(pRmCliRes, pParams) cliresCtrlCmdOsUnixImportObjectsFromFd_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdOsUnixFlushUserCache_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdOsUnixFlushUserCache__ +#define cliresCtrlCmdOsUnixFlushUserCache(pRmCliRes, pAddressSpaceParams) cliresCtrlCmdOsUnixFlushUserCache_DISPATCH(pRmCliRes, pAddressSpaceParams) +#define cliresCtrlCmdSetSubProcessID_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdSetSubProcessID__ +#define cliresCtrlCmdSetSubProcessID(pRmCliRes, pParams) cliresCtrlCmdSetSubProcessID_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdDisableSubProcessUserdIsolation_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdDisableSubProcessUserdIsolation__ +#define cliresCtrlCmdDisableSubProcessUserdIsolation(pRmCliRes, pParams) cliresCtrlCmdDisableSubProcessUserdIsolation_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemNVPCFGetPowerModeInfo_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdSystemNVPCFGetPowerModeInfo__ +#define cliresCtrlCmdSystemNVPCFGetPowerModeInfo(pRmCliRes, pParams) cliresCtrlCmdSystemNVPCFGetPowerModeInfo_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemSyncExternalFabricMgmt_FNPTR(pRmCliRes) pRmCliRes->__cliresCtrlCmdSystemSyncExternalFabricMgmt__ +#define cliresCtrlCmdSystemSyncExternalFabricMgmt(pRmCliRes, pExtFabricMgmtParams) cliresCtrlCmdSystemSyncExternalFabricMgmt_DISPATCH(pRmCliRes, pExtFabricMgmtParams) +#define cliresCanCopy_FNPTR(pResource) pResource->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define cliresCanCopy(pResource) cliresCanCopy_DISPATCH(pResource) +#define cliresIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define cliresIsDuplicate(pResource, hMemory, pDuplicate) cliresIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define cliresPreDestruct_FNPTR(pResource) pResource->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define cliresPreDestruct(pResource) cliresPreDestruct_DISPATCH(pResource) +#define cliresControl_FNPTR(pResource) pResource->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControl__ +#define cliresControl(pResource, pCallContext, pParams) cliresControl_DISPATCH(pResource, pCallContext, pParams) +#define cliresControlFilter_FNPTR(pResource) pResource->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define cliresControlFilter(pResource, pCallContext, pParams) cliresControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define cliresControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlSerialization_Prologue__ +#define cliresControlSerialization_Prologue(pResource, pCallContext, pParams) cliresControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define cliresControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlSerialization_Epilogue__ +#define cliresControlSerialization_Epilogue(pResource, pCallContext, pParams) cliresControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define cliresMap_FNPTR(pResource) pResource->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMap__ +#define cliresMap(pResource, pCallContext, pParams, pCpuMapping) cliresMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define cliresUnmap_FNPTR(pResource) pResource->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmap__ +#define cliresUnmap(pResource, pCallContext, pCpuMapping) cliresUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define cliresIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define cliresIsPartialUnmapSupported(pResource) cliresIsPartialUnmapSupported_DISPATCH(pResource) +#define cliresMapTo_FNPTR(pResource) pResource->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define cliresMapTo(pResource, pParams) cliresMapTo_DISPATCH(pResource, pParams) +#define cliresUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define cliresUnmapFrom(pResource, pParams) cliresUnmapFrom_DISPATCH(pResource, pParams) +#define cliresGetRefCount_FNPTR(pResource) pResource->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define cliresGetRefCount(pResource) cliresGetRefCount_DISPATCH(pResource) +#define cliresAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define cliresAddAdditionalDependants(pClient, pResource, pReference) cliresAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define cliresGetNotificationListPtr_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationListPtr__ +#define cliresGetNotificationListPtr(pNotifier) cliresGetNotificationListPtr_DISPATCH(pNotifier) +#define cliresGetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationShare__ +#define cliresGetNotificationShare(pNotifier) cliresGetNotificationShare_DISPATCH(pNotifier) +#define cliresSetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifySetNotificationShare__ +#define cliresSetNotificationShare(pNotifier, pNotifShare) cliresSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define cliresUnregisterEvent_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyUnregisterEvent__ +#define cliresUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) cliresUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define cliresGetOrAllocNotifShare_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetOrAllocNotifShare__ +#define cliresGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) cliresGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) + +// Dispatch functions +static inline NvBool cliresAccessCallback_DISPATCH(struct RmClientResource *pRmCliRes, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pRmCliRes->__nvoc_metadata_ptr->vtable.__cliresAccessCallback__(pRmCliRes, pInvokingClient, pAllocParams, accessRight); +} + +static inline NvBool cliresShareCallback_DISPATCH(struct RmClientResource *pRmCliRes, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pRmCliRes->__nvoc_metadata_ptr->vtable.__cliresShareCallback__(pRmCliRes, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS cliresControl_Prologue_DISPATCH(struct RmClientResource *pRmCliRes, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pRmCliRes->__nvoc_metadata_ptr->vtable.__cliresControl_Prologue__(pRmCliRes, pCallContext, pParams); +} + +static inline void cliresControl_Epilogue_DISPATCH(struct RmClientResource *pRmCliRes, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pRmCliRes->__nvoc_metadata_ptr->vtable.__cliresControl_Epilogue__(pRmCliRes, pCallContext, pParams); +} + +static inline NV_STATUS cliresCtrlCmdSystemGetCpuInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS *pCpuInfoParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetCpuInfo__(pRmCliRes, pCpuInfoParams); +} + +static inline NV_STATUS cliresCtrlCmdSystemGetFeatures_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetFeatures__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdSystemGetBuildVersionV2_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetBuildVersionV2__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdSystemGetLockTimes_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_LOCK_TIMES_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetLockTimes__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdSystemGetClassList_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetClassList__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdSystemNotifyEvent_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemNotifyEvent__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdSystemDebugCtrlRmMsg_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemDebugCtrlRmMsg__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdSystemGetVgxSystemInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_VGX_SYSTEM_INFO_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetVgxSystemInfo__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdSystemGetPrivilegedStatus_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetPrivilegedStatus__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdSystemGetFabricStatus_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetFabricStatus__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdSystemGetRmInstanceId_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS *pRmInstanceIdParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetRmInstanceId__(pRmCliRes, pRmInstanceIdParams); +} + +static inline NV_STATUS cliresCtrlCmdSystemGetClientDatabaseInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetClientDatabaseInfo__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdSystemRmctrlCacheModeCtrl_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemRmctrlCacheModeCtrl__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdClientGetAddrSpaceType_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdClientGetAddrSpaceType__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdClientGetHandleInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdClientGetHandleInfo__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdClientGetAccessRights_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdClientGetAccessRights__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdClientSetInheritedSharePolicy_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdClientSetInheritedSharePolicy__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdClientShareObject_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdClientShareObject__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdClientGetChildHandle_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdClientGetChildHandle__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdObjectsAreDuplicates_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_OBJECTS_ARE_DUPLICATES_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdObjectsAreDuplicates__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdGpuGetAttachedIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *pGpuAttachedIds) { + return pRmCliRes->__cliresCtrlCmdGpuGetAttachedIds__(pRmCliRes, pGpuAttachedIds); +} + +static inline NV_STATUS cliresCtrlCmdGpuGetIdInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ID_INFO_PARAMS *pGpuIdInfoParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetIdInfo__(pRmCliRes, pGpuIdInfoParams); +} + +static inline NV_STATUS cliresCtrlCmdGpuGetIdInfoV2_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS *pGpuIdInfoParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetIdInfoV2__(pRmCliRes, pGpuIdInfoParams); +} + +static inline NV_STATUS cliresCtrlCmdGpuGetInitStatus_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS *pGpuInitStatusParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetInitStatus__(pRmCliRes, pGpuInitStatusParams); +} + +static inline NV_STATUS cliresCtrlCmdGpuGetDeviceIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS *pDeviceIdsParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetDeviceIds__(pRmCliRes, pDeviceIdsParams); +} + +static inline NV_STATUS cliresCtrlCmdGpuGetActiveDeviceIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ACTIVE_DEVICE_IDS_PARAMS *pActiveDeviceIdsParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetActiveDeviceIds__(pRmCliRes, pActiveDeviceIdsParams); +} + +static inline NV_STATUS cliresCtrlCmdGpuGetProbedIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *pGpuProbedIds) { + return pRmCliRes->__cliresCtrlCmdGpuGetProbedIds__(pRmCliRes, pGpuProbedIds); +} + +static inline NV_STATUS cliresCtrlCmdGpuAttachIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_ATTACH_IDS_PARAMS *pGpuAttachIds) { + return pRmCliRes->__cliresCtrlCmdGpuAttachIds__(pRmCliRes, pGpuAttachIds); +} + +static inline NV_STATUS cliresCtrlCmdGpuAsyncAttachId_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_ASYNC_ATTACH_ID_PARAMS *pAsyncAttachIdParams) { + return pRmCliRes->__cliresCtrlCmdGpuAsyncAttachId__(pRmCliRes, pAsyncAttachIdParams); +} + +static inline NV_STATUS cliresCtrlCmdGpuWaitAttachId_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_WAIT_ATTACH_ID_PARAMS *pWaitAttachIdParams) { + return pRmCliRes->__cliresCtrlCmdGpuWaitAttachId__(pRmCliRes, pWaitAttachIdParams); +} + +static inline NV_STATUS cliresCtrlCmdGpuDetachIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_DETACH_IDS_PARAMS *pGpuDetachIds) { + return pRmCliRes->__cliresCtrlCmdGpuDetachIds__(pRmCliRes, pGpuDetachIds); +} + +static inline NV_STATUS cliresCtrlCmdGpuGetPciInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS *pPciInfoParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetPciInfo__(pRmCliRes, pPciInfoParams); +} + +static inline NV_STATUS cliresCtrlCmdGpuGetUuidInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetUuidInfo__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdGpuGetUuidFromGpuId_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetUuidFromGpuId__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdGpuModifyGpuDrainState_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdGpuModifyGpuDrainState__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdGpuQueryGpuDrainState_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdGpuQueryGpuDrainState__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdGpuGetMemOpEnable_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS *pMemOpEnableParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetMemOpEnable__(pRmCliRes, pMemOpEnableParams); +} + +static inline NV_STATUS cliresCtrlCmdGpuDisableNvlinkInit_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdGpuDisableNvlinkInit__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdLegacyConfig_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdLegacyConfig__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdPushUcodeImage_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_PUSH_UCODE_IMAGE_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdPushUcodeImage__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdSystemGetVrrCookiePresent_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_VRR_COOKIE_PRESENT_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetVrrCookiePresent__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdGsyncGetAttachedIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS *pGsyncAttachedIds) { + return pRmCliRes->__cliresCtrlCmdGsyncGetAttachedIds__(pRmCliRes, pGsyncAttachedIds); +} + +static inline NV_STATUS cliresCtrlCmdGsyncGetIdInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS *pGsyncIdInfoParams) { + return pRmCliRes->__cliresCtrlCmdGsyncGetIdInfo__(pRmCliRes, pGsyncIdInfoParams); +} + +static inline NV_STATUS cliresCtrlCmdEventSetNotification_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pEventSetNotificationParams) { + return pRmCliRes->__cliresCtrlCmdEventSetNotification__(pRmCliRes, pEventSetNotificationParams); +} + +static inline NV_STATUS cliresCtrlCmdEventGetSystemEventData_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GET_SYSTEM_EVENT_DATA_PARAMS *pSystemEventDataParams) { + return pRmCliRes->__cliresCtrlCmdEventGetSystemEventData__(pRmCliRes, pSystemEventDataParams); +} + +static inline NV_STATUS cliresCtrlCmdOsUnixExportObjectToFd_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdOsUnixExportObjectToFd__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdOsUnixImportObjectFromFd_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdOsUnixImportObjectFromFd__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdOsUnixGetExportObjectInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdOsUnixGetExportObjectInfo__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdOsUnixCreateExportObjectFd_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdOsUnixCreateExportObjectFd__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdOsUnixExportObjectsToFd_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdOsUnixExportObjectsToFd__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdOsUnixImportObjectsFromFd_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdOsUnixImportObjectsFromFd__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdOsUnixFlushUserCache_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS *pAddressSpaceParams) { + return pRmCliRes->__cliresCtrlCmdOsUnixFlushUserCache__(pRmCliRes, pAddressSpaceParams); +} + +static inline NV_STATUS cliresCtrlCmdSetSubProcessID_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSetSubProcessID__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdDisableSubProcessUserdIsolation_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdDisableSubProcessUserdIsolation__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdSystemNVPCFGetPowerModeInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemNVPCFGetPowerModeInfo__(pRmCliRes, pParams); +} + +static inline NV_STATUS cliresCtrlCmdSystemSyncExternalFabricMgmt_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS *pExtFabricMgmtParams) { + return pRmCliRes->__cliresCtrlCmdSystemSyncExternalFabricMgmt__(pRmCliRes, pExtFabricMgmtParams); +} + +static inline NvBool cliresCanCopy_DISPATCH(struct RmClientResource *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__cliresCanCopy__(pResource); +} + +static inline NV_STATUS cliresIsDuplicate_DISPATCH(struct RmClientResource *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__cliresIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void cliresPreDestruct_DISPATCH(struct RmClientResource *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__cliresPreDestruct__(pResource); +} + +static inline NV_STATUS cliresControl_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__cliresControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS cliresControlFilter_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__cliresControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS cliresControlSerialization_Prologue_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__cliresControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void cliresControlSerialization_Epilogue_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__cliresControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS cliresMap_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__cliresMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS cliresUnmap_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__cliresUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NvBool cliresIsPartialUnmapSupported_DISPATCH(struct RmClientResource *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__cliresIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS cliresMapTo_DISPATCH(struct RmClientResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__cliresMapTo__(pResource, pParams); +} + +static inline NV_STATUS cliresUnmapFrom_DISPATCH(struct RmClientResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__cliresUnmapFrom__(pResource, pParams); +} + +static inline NvU32 cliresGetRefCount_DISPATCH(struct RmClientResource *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__cliresGetRefCount__(pResource); +} + +static inline void cliresAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct RmClientResource *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__cliresAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline PEVENTNOTIFICATION * cliresGetNotificationListPtr_DISPATCH(struct RmClientResource *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__cliresGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare * cliresGetNotificationShare_DISPATCH(struct RmClientResource *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__cliresGetNotificationShare__(pNotifier); +} + +static inline void cliresSetNotificationShare_DISPATCH(struct RmClientResource *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__nvoc_metadata_ptr->vtable.__cliresSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS cliresUnregisterEvent_DISPATCH(struct RmClientResource *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__nvoc_metadata_ptr->vtable.__cliresUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS cliresGetOrAllocNotifShare_DISPATCH(struct RmClientResource *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__nvoc_metadata_ptr->vtable.__cliresGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NvBool cliresAccessCallback_IMPL(struct RmClientResource *pRmCliRes, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); + +NvBool cliresShareCallback_IMPL(struct RmClientResource *pRmCliRes, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); + +NV_STATUS cliresControl_Prologue_IMPL(struct RmClientResource *pRmCliRes, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +void cliresControl_Epilogue_IMPL(struct RmClientResource *pRmCliRes, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +NV_STATUS cliresCtrlCmdSystemGetCpuInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS *pCpuInfoParams); + +NV_STATUS cliresCtrlCmdSystemGetFeatures_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdSystemGetBuildVersionV2_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdSystemGetLockTimes_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_LOCK_TIMES_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdSystemGetClassList_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdSystemNotifyEvent_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdSystemDebugCtrlRmMsg_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdSystemGetVgxSystemInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_VGX_SYSTEM_INFO_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdSystemGetPrivilegedStatus_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdSystemGetFabricStatus_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdSystemGetRmInstanceId_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS *pRmInstanceIdParams); + +NV_STATUS cliresCtrlCmdSystemGetClientDatabaseInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdSystemRmctrlCacheModeCtrl_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdClientGetAddrSpaceType_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdClientGetHandleInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdClientGetAccessRights_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdClientSetInheritedSharePolicy_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdClientShareObject_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdClientGetChildHandle_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdObjectsAreDuplicates_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_OBJECTS_ARE_DUPLICATES_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdGpuGetAttachedIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *pGpuAttachedIds); + +NV_STATUS cliresCtrlCmdGpuGetIdInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ID_INFO_PARAMS *pGpuIdInfoParams); + +NV_STATUS cliresCtrlCmdGpuGetIdInfoV2_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS *pGpuIdInfoParams); + +NV_STATUS cliresCtrlCmdGpuGetInitStatus_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS *pGpuInitStatusParams); + +NV_STATUS cliresCtrlCmdGpuGetDeviceIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS *pDeviceIdsParams); + +NV_STATUS cliresCtrlCmdGpuGetActiveDeviceIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ACTIVE_DEVICE_IDS_PARAMS *pActiveDeviceIdsParams); + +NV_STATUS cliresCtrlCmdGpuGetProbedIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *pGpuProbedIds); + +NV_STATUS cliresCtrlCmdGpuAttachIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_ATTACH_IDS_PARAMS *pGpuAttachIds); + +NV_STATUS cliresCtrlCmdGpuAsyncAttachId_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_ASYNC_ATTACH_ID_PARAMS *pAsyncAttachIdParams); + +NV_STATUS cliresCtrlCmdGpuWaitAttachId_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_WAIT_ATTACH_ID_PARAMS *pWaitAttachIdParams); + +NV_STATUS cliresCtrlCmdGpuDetachIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_DETACH_IDS_PARAMS *pGpuDetachIds); + +NV_STATUS cliresCtrlCmdGpuGetPciInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS *pPciInfoParams); + +NV_STATUS cliresCtrlCmdGpuGetUuidInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdGpuGetUuidFromGpuId_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdGpuModifyGpuDrainState_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdGpuQueryGpuDrainState_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdGpuGetMemOpEnable_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS *pMemOpEnableParams); + +NV_STATUS cliresCtrlCmdGpuDisableNvlinkInit_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdLegacyConfig_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdPushUcodeImage_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_PUSH_UCODE_IMAGE_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdSystemGetVrrCookiePresent_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_VRR_COOKIE_PRESENT_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdGsyncGetAttachedIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS *pGsyncAttachedIds); + +NV_STATUS cliresCtrlCmdGsyncGetIdInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS *pGsyncIdInfoParams); + +NV_STATUS cliresCtrlCmdEventSetNotification_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pEventSetNotificationParams); + +NV_STATUS cliresCtrlCmdEventGetSystemEventData_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GET_SYSTEM_EVENT_DATA_PARAMS *pSystemEventDataParams); + +NV_STATUS cliresCtrlCmdOsUnixExportObjectToFd_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdOsUnixImportObjectFromFd_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdOsUnixGetExportObjectInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdOsUnixCreateExportObjectFd_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdOsUnixExportObjectsToFd_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdOsUnixImportObjectsFromFd_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdOsUnixFlushUserCache_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS *pAddressSpaceParams); + +NV_STATUS cliresCtrlCmdSetSubProcessID_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdDisableSubProcessUserdIsolation_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdSystemNVPCFGetPowerModeInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *pParams); + +NV_STATUS cliresCtrlCmdSystemSyncExternalFabricMgmt_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS *pExtFabricMgmtParams); + +NV_STATUS cliresConstruct_IMPL(struct RmClientResource *arg_pRmCliRes, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_cliresConstruct(arg_pRmCliRes, arg_pCallContext, arg_pParams) cliresConstruct_IMPL(arg_pRmCliRes, arg_pCallContext, arg_pParams) +void cliresDestruct_IMPL(struct RmClientResource *pRmCliRes); + +#define __nvoc_cliresDestruct(pRmCliRes) cliresDestruct_IMPL(pRmCliRes) +#undef PRIVATE_FIELD + + +NV_STATUS CliGetSystemP2pCaps(NvU32 *gpuIds, + NvU32 gpuCount, + NvU32 *p2pCaps, + NvU32 *p2pOptimalReadCEs, + NvU32 *p2pOptimalWriteCEs, + NvU8 *p2pCapsStatus, + NvU32 *pBusPeerIds, + NvU32 *pBusEgmPeerIds); + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_CLIENT_RESOURCE_NVOC_H_ diff --git a/src/nvidia/generated/g_code_coverage_mgr_nvoc.c b/src/nvidia/generated/g_code_coverage_mgr_nvoc.c new file mode 100644 index 0000000..439237e --- /dev/null +++ b/src/nvidia/generated/g_code_coverage_mgr_nvoc.c @@ -0,0 +1,204 @@ +#define NVOC_CODE_COVERAGE_MGR_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_code_coverage_mgr_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x62cbfb = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_CodeCoverageManager; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +// Forward declarations for CodeCoverageManager +void __nvoc_init__Object(Object*); +void __nvoc_init__CodeCoverageManager(CodeCoverageManager*); +void __nvoc_init_funcTable_CodeCoverageManager(CodeCoverageManager*); +NV_STATUS __nvoc_ctor_CodeCoverageManager(CodeCoverageManager*); +void __nvoc_init_dataField_CodeCoverageManager(CodeCoverageManager*); +void __nvoc_dtor_CodeCoverageManager(CodeCoverageManager*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__CodeCoverageManager; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__CodeCoverageManager; + +// Down-thunk(s) to bridge CodeCoverageManager methods from ancestors (if any) + +// Up-thunk(s) to bridge CodeCoverageManager methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_CodeCoverageManager = +{ + /*classInfo=*/ { + /*size=*/ sizeof(CodeCoverageManager), + /*classId=*/ classId(CodeCoverageManager), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "CodeCoverageManager", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_CodeCoverageManager, + /*pCastInfo=*/ &__nvoc_castinfo__CodeCoverageManager, + /*pExportInfo=*/ &__nvoc_export_info__CodeCoverageManager +}; + + +// Metadata with per-class RTTI with ancestor(s) +static const struct NVOC_METADATA__CodeCoverageManager __nvoc_metadata__CodeCoverageManager = { + .rtti.pClassDef = &__nvoc_class_def_CodeCoverageManager, // (codecovmgr) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_CodeCoverageManager, + .rtti.offset = 0, + .metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super + .metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Object.rtti.offset = NV_OFFSETOF(CodeCoverageManager, __nvoc_base_Object), +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__CodeCoverageManager = { + .numRelatives = 2, + .relatives = { + &__nvoc_metadata__CodeCoverageManager.rtti, // [0]: (codecovmgr) this + &__nvoc_metadata__CodeCoverageManager.metadata__Object.rtti, // [1]: (obj) super + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__CodeCoverageManager = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_CodeCoverageManager(CodeCoverageManager *pThis) { + __nvoc_codecovmgrDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_CodeCoverageManager(CodeCoverageManager *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_CodeCoverageManager(CodeCoverageManager *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_CodeCoverageManager_fail_Object; + __nvoc_init_dataField_CodeCoverageManager(pThis); + + status = __nvoc_codecovmgrConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_CodeCoverageManager_fail__init; + goto __nvoc_ctor_CodeCoverageManager_exit; // Success + +__nvoc_ctor_CodeCoverageManager_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_CodeCoverageManager_fail_Object: +__nvoc_ctor_CodeCoverageManager_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_CodeCoverageManager_1(CodeCoverageManager *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_CodeCoverageManager_1 + + +// Initialize vtable(s): Nothing to do for empty vtables +void __nvoc_init_funcTable_CodeCoverageManager(CodeCoverageManager *pThis) { + __nvoc_init_funcTable_CodeCoverageManager_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__CodeCoverageManager(CodeCoverageManager *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; // (obj) super + pThis->__nvoc_pbase_CodeCoverageManager = pThis; // (codecovmgr) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Object(&pThis->__nvoc_base_Object); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__CodeCoverageManager.metadata__Object; // (obj) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__CodeCoverageManager; // (codecovmgr) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_CodeCoverageManager(pThis); +} + +NV_STATUS __nvoc_objCreate_CodeCoverageManager(CodeCoverageManager **ppThis, Dynamic *pParent, NvU32 createFlags) +{ + NV_STATUS status; + Object *pParentObj = NULL; + CodeCoverageManager *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(CodeCoverageManager), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(CodeCoverageManager)); + + pThis->__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__CodeCoverageManager(pThis); + status = __nvoc_ctor_CodeCoverageManager(pThis); + if (status != NV_OK) goto __nvoc_objCreate_CodeCoverageManager_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_CodeCoverageManager_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(CodeCoverageManager)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_CodeCoverageManager(CodeCoverageManager **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_CodeCoverageManager(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_code_coverage_mgr_nvoc.h b/src/nvidia/generated/g_code_coverage_mgr_nvoc.h new file mode 100644 index 0000000..b7552c4 --- /dev/null +++ b/src/nvidia/generated/g_code_coverage_mgr_nvoc.h @@ -0,0 +1,226 @@ + +#ifndef _G_CODE_COVERAGE_MGR_NVOC_H_ +#define _G_CODE_COVERAGE_MGR_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_code_coverage_mgr_nvoc.h" + +#ifndef __CODE_COVERAGE_MGR_H__ +#define __CODE_COVERAGE_MGR_H__ + +#include "gpu/gpu.h" +#include "core/core.h" + +#define GFID_TASK_RM 0 +#define BULLSEYE_TASK_VGPU_COVERAGE_SIZE (32 << 10) +#define BULLSEYE_TASK_RM_COVERAGE_SIZE (3 << 20) +#define MAX_PARTITIONS_WITH_CODE_COVERAGE (32) +#define BULLSEYE_GSP_RM_COVERAGE_SIZE \ + (BULLSEYE_TASK_RM_COVERAGE_SIZE) + (MAX_PARTITIONS_WITH_CODE_COVERAGE * BULLSEYE_TASK_VGPU_COVERAGE_SIZE) + +typedef struct +{ + NvLength length; + NvU8 *dataBuffer; +} GSP_BULLSEYE_OUTPUT_BUFFER; + +typedef struct +{ + NvU32 gfid; // 0 for task_rm, 1-32 (inclusive) for task_vgpu objects + NvU32 gpuInstance; + NvU8 *pCoverageData; + NvLength bufferLength; +} GSP_BULLSEYE_COVERAGE_DATA; + +// list storing 33 (or more) coverage buffers for task_rm and task_vgpu +MAKE_LIST(GSP_BULLSEYE_COVERAGE_DATA_LIST, GSP_BULLSEYE_COVERAGE_DATA); + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_CODE_COVERAGE_MGR_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__CodeCoverageManager; +struct NVOC_METADATA__Object; + + +struct CodeCoverageManager { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__CodeCoverageManager *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Object __nvoc_base_Object; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super + struct CodeCoverageManager *__nvoc_pbase_CodeCoverageManager; // codecovmgr + + // Data members + GSP_BULLSEYE_COVERAGE_DATA_LIST covDataList; + GSP_BULLSEYE_OUTPUT_BUFFER bullseyeOutputBuffer; +}; + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__CodeCoverageManager { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Object metadata__Object; +}; + +#ifndef __NVOC_CLASS_CodeCoverageManager_TYPEDEF__ +#define __NVOC_CLASS_CodeCoverageManager_TYPEDEF__ +typedef struct CodeCoverageManager CodeCoverageManager; +#endif /* __NVOC_CLASS_CodeCoverageManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_CodeCoverageManager +#define __nvoc_class_id_CodeCoverageManager 0x62cbfb +#endif /* __nvoc_class_id_CodeCoverageManager */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_CodeCoverageManager; + +#define __staticCast_CodeCoverageManager(pThis) \ + ((pThis)->__nvoc_pbase_CodeCoverageManager) + +#ifdef __nvoc_code_coverage_mgr_h_disabled +#define __dynamicCast_CodeCoverageManager(pThis) ((CodeCoverageManager*) NULL) +#else //__nvoc_code_coverage_mgr_h_disabled +#define __dynamicCast_CodeCoverageManager(pThis) \ + ((CodeCoverageManager*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(CodeCoverageManager))) +#endif //__nvoc_code_coverage_mgr_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_CodeCoverageManager(CodeCoverageManager**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_CodeCoverageManager(CodeCoverageManager**, Dynamic*, NvU32); +#define __objCreate_CodeCoverageManager(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_CodeCoverageManager((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros + +// Dispatch functions +NV_STATUS codecovmgrConstruct_IMPL(struct CodeCoverageManager *arg_pCodeCovMgr); + +#define __nvoc_codecovmgrConstruct(arg_pCodeCovMgr) codecovmgrConstruct_IMPL(arg_pCodeCovMgr) +void codecovmgrDestruct_IMPL(struct CodeCoverageManager *pCodeCovMgr); + +#define __nvoc_codecovmgrDestruct(pCodeCovMgr) codecovmgrDestruct_IMPL(pCodeCovMgr) +GSP_BULLSEYE_COVERAGE_DATA *codecovmgrGetCoverageNode_IMPL(struct CodeCoverageManager *pCodeCovMgr, NvU32 gfid, NvU32 gpuInstance); + +#ifdef __nvoc_code_coverage_mgr_h_disabled +static inline GSP_BULLSEYE_COVERAGE_DATA *codecovmgrGetCoverageNode(struct CodeCoverageManager *pCodeCovMgr, NvU32 gfid, NvU32 gpuInstance) { + NV_ASSERT_FAILED_PRECOMP("CodeCoverageManager was disabled!"); + return NULL; +} +#else //__nvoc_code_coverage_mgr_h_disabled +#define codecovmgrGetCoverageNode(pCodeCovMgr, gfid, gpuInstance) codecovmgrGetCoverageNode_IMPL(pCodeCovMgr, gfid, gpuInstance) +#endif //__nvoc_code_coverage_mgr_h_disabled + +NvU8 *codecovmgrGetCoverageBuffer_IMPL(struct CodeCoverageManager *pCodeCovMgr, NvU32 gfid, NvU32 gpuInstance); + +#ifdef __nvoc_code_coverage_mgr_h_disabled +static inline NvU8 *codecovmgrGetCoverageBuffer(struct CodeCoverageManager *pCodeCovMgr, NvU32 gfid, NvU32 gpuInstance) { + NV_ASSERT_FAILED_PRECOMP("CodeCoverageManager was disabled!"); + return NULL; +} +#else //__nvoc_code_coverage_mgr_h_disabled +#define codecovmgrGetCoverageBuffer(pCodeCovMgr, gfid, gpuInstance) codecovmgrGetCoverageBuffer_IMPL(pCodeCovMgr, gfid, gpuInstance) +#endif //__nvoc_code_coverage_mgr_h_disabled + +void codecovmgrMergeCoverage_IMPL(struct CodeCoverageManager *pCodeCovMgr, NvU32 gfid, NvU32 gpuInstance, NvU8 *pSysmemBuffer); + +#ifdef __nvoc_code_coverage_mgr_h_disabled +static inline void codecovmgrMergeCoverage(struct CodeCoverageManager *pCodeCovMgr, NvU32 gfid, NvU32 gpuInstance, NvU8 *pSysmemBuffer) { + NV_ASSERT_FAILED_PRECOMP("CodeCoverageManager was disabled!"); +} +#else //__nvoc_code_coverage_mgr_h_disabled +#define codecovmgrMergeCoverage(pCodeCovMgr, gfid, gpuInstance, pSysmemBuffer) codecovmgrMergeCoverage_IMPL(pCodeCovMgr, gfid, gpuInstance, pSysmemBuffer) +#endif //__nvoc_code_coverage_mgr_h_disabled + +void codecovmgrResetCoverage_IMPL(struct CodeCoverageManager *pCodeCovMgr, NvU32 gfid, NvU32 gpuInstance); + +#ifdef __nvoc_code_coverage_mgr_h_disabled +static inline void codecovmgrResetCoverage(struct CodeCoverageManager *pCodeCovMgr, NvU32 gfid, NvU32 gpuInstance) { + NV_ASSERT_FAILED_PRECOMP("CodeCoverageManager was disabled!"); +} +#else //__nvoc_code_coverage_mgr_h_disabled +#define codecovmgrResetCoverage(pCodeCovMgr, gfid, gpuInstance) codecovmgrResetCoverage_IMPL(pCodeCovMgr, gfid, gpuInstance) +#endif //__nvoc_code_coverage_mgr_h_disabled + +void codecovmgrRegisterCoverageBuffer_IMPL(struct CodeCoverageManager *pCodeCovMgr, NvU32 gfid, NvU32 gpuInstance, NvU64 bufferSize); + +#ifdef __nvoc_code_coverage_mgr_h_disabled +static inline void codecovmgrRegisterCoverageBuffer(struct CodeCoverageManager *pCodeCovMgr, NvU32 gfid, NvU32 gpuInstance, NvU64 bufferSize) { + NV_ASSERT_FAILED_PRECOMP("CodeCoverageManager was disabled!"); +} +#else //__nvoc_code_coverage_mgr_h_disabled +#define codecovmgrRegisterCoverageBuffer(pCodeCovMgr, gfid, gpuInstance, bufferSize) codecovmgrRegisterCoverageBuffer_IMPL(pCodeCovMgr, gfid, gpuInstance, bufferSize) +#endif //__nvoc_code_coverage_mgr_h_disabled + +void codecovmgrDeregisterCoverageBuffer_IMPL(struct CodeCoverageManager *pCodeCovMgr, NvU32 gfid, NvU32 gpuInstance); + +#ifdef __nvoc_code_coverage_mgr_h_disabled +static inline void codecovmgrDeregisterCoverageBuffer(struct CodeCoverageManager *pCodeCovMgr, NvU32 gfid, NvU32 gpuInstance) { + NV_ASSERT_FAILED_PRECOMP("CodeCoverageManager was disabled!"); +} +#else //__nvoc_code_coverage_mgr_h_disabled +#define codecovmgrDeregisterCoverageBuffer(pCodeCovMgr, gfid, gpuInstance) codecovmgrDeregisterCoverageBuffer_IMPL(pCodeCovMgr, gfid, gpuInstance) +#endif //__nvoc_code_coverage_mgr_h_disabled + +#undef PRIVATE_FIELD + + +#endif //__CODE_COVERAGE_MGR_H__ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_CODE_COVERAGE_MGR_NVOC_H_ diff --git a/src/nvidia/generated/g_context_dma_nvoc.c b/src/nvidia/generated/g_context_dma_nvoc.c new file mode 100644 index 0000000..902be06 --- /dev/null +++ b/src/nvidia/generated/g_context_dma_nvoc.c @@ -0,0 +1,582 @@ +#define NVOC_CONTEXT_DMA_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_context_dma_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x88441b = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ContextDma; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +// Forward declarations for ContextDma +void __nvoc_init__RmResource(RmResource*); +void __nvoc_init__Notifier(Notifier*); +void __nvoc_init__ContextDma(ContextDma*); +void __nvoc_init_funcTable_ContextDma(ContextDma*); +NV_STATUS __nvoc_ctor_ContextDma(ContextDma*, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_ContextDma(ContextDma*); +void __nvoc_dtor_ContextDma(ContextDma*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__ContextDma; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__ContextDma; + +// Down-thunk(s) to bridge ContextDma methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +PEVENTNOTIFICATION * __nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr(struct INotifier *pNotifier); // super +struct NotifShare * __nvoc_down_thunk_Notifier_inotifyGetNotificationShare(struct INotifier *pNotifier); // super +void __nvoc_down_thunk_Notifier_inotifySetNotificationShare(struct INotifier *pNotifier, struct NotifShare *pNotifShare); // super +NV_STATUS __nvoc_down_thunk_Notifier_inotifyUnregisterEvent(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // super +NV_STATUS __nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // super +NV_STATUS __nvoc_down_thunk_ContextDma_resMapTo(struct RsResource *pContextDma, struct RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_down_thunk_ContextDma_resUnmapFrom(struct RsResource *pContextDma, struct RS_RES_UNMAP_FROM_PARAMS *pParams); // this + +// Up-thunk(s) to bridge ContextDma methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super +NvBool __nvoc_up_thunk_RmResource_ctxdmaAccessCallback(struct ContextDma *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NvBool __nvoc_up_thunk_RmResource_ctxdmaShareCallback(struct ContextDma *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_RmResource_ctxdmaGetMemInterMapParams(struct ContextDma *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_ctxdmaCheckMemInterUnmap(struct ContextDma *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_ctxdmaGetMemoryMappingDescriptor(struct ContextDma *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_ctxdmaControlSerialization_Prologue(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_ctxdmaControlSerialization_Epilogue(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_ctxdmaControl_Prologue(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_ctxdmaControl_Epilogue(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_ctxdmaCanCopy(struct ContextDma *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_ctxdmaIsDuplicate(struct ContextDma *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_ctxdmaPreDestruct(struct ContextDma *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_ctxdmaControl(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_ctxdmaControlFilter(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_ctxdmaMap(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_RsResource_ctxdmaUnmap(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_RsResource_ctxdmaIsPartialUnmapSupported(struct ContextDma *pResource); // this +NvU32 __nvoc_up_thunk_RsResource_ctxdmaGetRefCount(struct ContextDma *pResource); // this +void __nvoc_up_thunk_RsResource_ctxdmaAddAdditionalDependants(struct RsClient *pClient, struct ContextDma *pResource, RsResourceRef *pReference); // this +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_ctxdmaGetNotificationListPtr(struct ContextDma *pNotifier); // this +struct NotifShare * __nvoc_up_thunk_Notifier_ctxdmaGetNotificationShare(struct ContextDma *pNotifier); // this +void __nvoc_up_thunk_Notifier_ctxdmaSetNotificationShare(struct ContextDma *pNotifier, struct NotifShare *pNotifShare); // this +NV_STATUS __nvoc_up_thunk_Notifier_ctxdmaUnregisterEvent(struct ContextDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // this +NV_STATUS __nvoc_up_thunk_Notifier_ctxdmaGetOrAllocNotifShare(struct ContextDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_ContextDma = +{ + /*classInfo=*/ { + /*size=*/ sizeof(ContextDma), + /*classId=*/ classId(ContextDma), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "ContextDma", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_ContextDma, + /*pCastInfo=*/ &__nvoc_castinfo__ContextDma, + /*pExportInfo=*/ &__nvoc_export_info__ContextDma +}; + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_ContextDma[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ctxdmaCtrlCmdUpdateContextdma_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20101u, + /*paramSize=*/ sizeof(NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ContextDma.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ctxdmaCtrlCmdUpdateContextdma" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ctxdmaCtrlCmdBindContextdma_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20102u, + /*paramSize=*/ sizeof(NV0002_CTRL_BIND_CONTEXTDMA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ContextDma.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ctxdmaCtrlCmdBindContextdma" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ctxdmaCtrlCmdUnbindContextdma_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20103u, + /*paramSize=*/ sizeof(NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ContextDma.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ctxdmaCtrlCmdUnbindContextdma" +#endif + }, + +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__ContextDma __nvoc_metadata__ContextDma = { + .rtti.pClassDef = &__nvoc_class_def_ContextDma, // (ctxdma) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_ContextDma, + .rtti.offset = 0, + .metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super + .metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.rtti.offset = NV_OFFSETOF(ContextDma, __nvoc_base_RmResource), + .metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^2 + .metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^3 + .metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^2 + .metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + .metadata__Notifier.rtti.pClassDef = &__nvoc_class_def_Notifier, // (notify) super + .metadata__Notifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Notifier.rtti.offset = NV_OFFSETOF(ContextDma, __nvoc_base_Notifier), + .metadata__Notifier.metadata__INotifier.rtti.pClassDef = &__nvoc_class_def_INotifier, // (inotify) super^2 + .metadata__Notifier.metadata__INotifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Notifier.metadata__INotifier.rtti.offset = NV_OFFSETOF(ContextDma, __nvoc_base_Notifier.__nvoc_base_INotifier), + + .vtable.__ctxdmaMapTo__ = &ctxdmaMapTo_IMPL, // virtual override (res) base (rmres) + .metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &__nvoc_down_thunk_ContextDma_resMapTo, // virtual + .vtable.__ctxdmaUnmapFrom__ = &ctxdmaUnmapFrom_IMPL, // virtual override (res) base (rmres) + .metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &__nvoc_down_thunk_ContextDma_resUnmapFrom, // virtual + .vtable.__ctxdmaAccessCallback__ = &__nvoc_up_thunk_RmResource_ctxdmaAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__ctxdmaShareCallback__ = &__nvoc_up_thunk_RmResource_ctxdmaShareCallback, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresShareCallback__ = &rmresShareCallback_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__ctxdmaGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_ctxdmaGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__ctxdmaCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_ctxdmaCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__ctxdmaGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_ctxdmaGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__ctxdmaControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_ctxdmaControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__ctxdmaControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_ctxdmaControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__ctxdmaControl_Prologue__ = &__nvoc_up_thunk_RmResource_ctxdmaControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__ctxdmaControl_Epilogue__ = &__nvoc_up_thunk_RmResource_ctxdmaControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__ctxdmaCanCopy__ = &__nvoc_up_thunk_RsResource_ctxdmaCanCopy, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__ctxdmaIsDuplicate__ = &__nvoc_up_thunk_RsResource_ctxdmaIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__ctxdmaPreDestruct__ = &__nvoc_up_thunk_RsResource_ctxdmaPreDestruct, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__ctxdmaControl__ = &__nvoc_up_thunk_RsResource_ctxdmaControl, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &resControl_IMPL, // virtual + .vtable.__ctxdmaControlFilter__ = &__nvoc_up_thunk_RsResource_ctxdmaControlFilter, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__ctxdmaMap__ = &__nvoc_up_thunk_RsResource_ctxdmaMap, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &resMap_IMPL, // virtual + .vtable.__ctxdmaUnmap__ = &__nvoc_up_thunk_RsResource_ctxdmaUnmap, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &resUnmap_IMPL, // virtual + .vtable.__ctxdmaIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_ctxdmaIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__ctxdmaGetRefCount__ = &__nvoc_up_thunk_RsResource_ctxdmaGetRefCount, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__ctxdmaAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_ctxdmaAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual + .vtable.__ctxdmaGetNotificationListPtr__ = &__nvoc_up_thunk_Notifier_ctxdmaGetNotificationListPtr, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyGetNotificationListPtr__ = ¬ifyGetNotificationListPtr_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationListPtr__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr, // pure virtual + .vtable.__ctxdmaGetNotificationShare__ = &__nvoc_up_thunk_Notifier_ctxdmaGetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyGetNotificationShare__ = ¬ifyGetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationShare, // pure virtual + .vtable.__ctxdmaSetNotificationShare__ = &__nvoc_up_thunk_Notifier_ctxdmaSetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifySetNotificationShare__ = ¬ifySetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifySetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifySetNotificationShare, // pure virtual + .vtable.__ctxdmaUnregisterEvent__ = &__nvoc_up_thunk_Notifier_ctxdmaUnregisterEvent, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyUnregisterEvent__ = ¬ifyUnregisterEvent_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyUnregisterEvent__ = &__nvoc_down_thunk_Notifier_inotifyUnregisterEvent, // pure virtual + .vtable.__ctxdmaGetOrAllocNotifShare__ = &__nvoc_up_thunk_Notifier_ctxdmaGetOrAllocNotifShare, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyGetOrAllocNotifShare__ = ¬ifyGetOrAllocNotifShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyGetOrAllocNotifShare__ = &__nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare, // pure virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__ContextDma = { + .numRelatives = 7, + .relatives = { + &__nvoc_metadata__ContextDma.rtti, // [0]: (ctxdma) this + &__nvoc_metadata__ContextDma.metadata__RmResource.rtti, // [1]: (rmres) super + &__nvoc_metadata__ContextDma.metadata__RmResource.metadata__RsResource.rtti, // [2]: (res) super^2 + &__nvoc_metadata__ContextDma.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [3]: (obj) super^3 + &__nvoc_metadata__ContextDma.metadata__RmResource.metadata__RmResourceCommon.rtti, // [4]: (rmrescmn) super^2 + &__nvoc_metadata__ContextDma.metadata__Notifier.rtti, // [5]: (notify) super + &__nvoc_metadata__ContextDma.metadata__Notifier.metadata__INotifier.rtti, // [6]: (inotify) super^2 + } +}; + +// 2 down-thunk(s) defined to bridge methods in ContextDma from superclasses + +// ctxdmaMapTo: virtual override (res) base (rmres) +NV_STATUS __nvoc_down_thunk_ContextDma_resMapTo(struct RsResource *pContextDma, struct RS_RES_MAP_TO_PARAMS *pParams) { + return ctxdmaMapTo((struct ContextDma *)(((unsigned char *) pContextDma) - NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// ctxdmaUnmapFrom: virtual override (res) base (rmres) +NV_STATUS __nvoc_down_thunk_ContextDma_resUnmapFrom(struct RsResource *pContextDma, struct RS_RES_UNMAP_FROM_PARAMS *pParams) { + return ctxdmaUnmapFrom((struct ContextDma *)(((unsigned char *) pContextDma) - NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + + +// 24 up-thunk(s) defined to bridge methods in ContextDma to superclasses + +// ctxdmaAccessCallback: virtual inherited (rmres) base (rmres) +NvBool __nvoc_up_thunk_RmResource_ctxdmaAccessCallback(struct ContextDma *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(ContextDma, __nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// ctxdmaShareCallback: virtual inherited (rmres) base (rmres) +NvBool __nvoc_up_thunk_RmResource_ctxdmaShareCallback(struct ContextDma *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(ContextDma, __nvoc_base_RmResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// ctxdmaGetMemInterMapParams: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_ctxdmaGetMemInterMapParams(struct ContextDma *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(ContextDma, __nvoc_base_RmResource)), pParams); +} + +// ctxdmaCheckMemInterUnmap: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_ctxdmaCheckMemInterUnmap(struct ContextDma *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(ContextDma, __nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// ctxdmaGetMemoryMappingDescriptor: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_ctxdmaGetMemoryMappingDescriptor(struct ContextDma *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(ContextDma, __nvoc_base_RmResource)), ppMemDesc); +} + +// ctxdmaControlSerialization_Prologue: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_ctxdmaControlSerialization_Prologue(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(ContextDma, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// ctxdmaControlSerialization_Epilogue: virtual inherited (rmres) base (rmres) +void __nvoc_up_thunk_RmResource_ctxdmaControlSerialization_Epilogue(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(ContextDma, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// ctxdmaControl_Prologue: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_ctxdmaControl_Prologue(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(ContextDma, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// ctxdmaControl_Epilogue: virtual inherited (rmres) base (rmres) +void __nvoc_up_thunk_RmResource_ctxdmaControl_Epilogue(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(ContextDma, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// ctxdmaCanCopy: virtual inherited (res) base (rmres) +NvBool __nvoc_up_thunk_RsResource_ctxdmaCanCopy(struct ContextDma *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// ctxdmaIsDuplicate: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_ctxdmaIsDuplicate(struct ContextDma *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// ctxdmaPreDestruct: virtual inherited (res) base (rmres) +void __nvoc_up_thunk_RsResource_ctxdmaPreDestruct(struct ContextDma *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// ctxdmaControl: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_ctxdmaControl(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// ctxdmaControlFilter: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_ctxdmaControlFilter(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// ctxdmaMap: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_ctxdmaMap(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams, pCpuMapping); +} + +// ctxdmaUnmap: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_ctxdmaUnmap(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pCpuMapping); +} + +// ctxdmaIsPartialUnmapSupported: inline virtual inherited (res) base (rmres) body +NvBool __nvoc_up_thunk_RsResource_ctxdmaIsPartialUnmapSupported(struct ContextDma *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// ctxdmaGetRefCount: virtual inherited (res) base (rmres) +NvU32 __nvoc_up_thunk_RsResource_ctxdmaGetRefCount(struct ContextDma *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// ctxdmaAddAdditionalDependants: virtual inherited (res) base (rmres) +void __nvoc_up_thunk_RsResource_ctxdmaAddAdditionalDependants(struct RsClient *pClient, struct ContextDma *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + +// ctxdmaGetNotificationListPtr: virtual inherited (notify) base (notify) +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_ctxdmaGetNotificationListPtr(struct ContextDma *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(ContextDma, __nvoc_base_Notifier))); +} + +// ctxdmaGetNotificationShare: virtual inherited (notify) base (notify) +struct NotifShare * __nvoc_up_thunk_Notifier_ctxdmaGetNotificationShare(struct ContextDma *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(ContextDma, __nvoc_base_Notifier))); +} + +// ctxdmaSetNotificationShare: virtual inherited (notify) base (notify) +void __nvoc_up_thunk_Notifier_ctxdmaSetNotificationShare(struct ContextDma *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(ContextDma, __nvoc_base_Notifier)), pNotifShare); +} + +// ctxdmaUnregisterEvent: virtual inherited (notify) base (notify) +NV_STATUS __nvoc_up_thunk_Notifier_ctxdmaUnregisterEvent(struct ContextDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(ContextDma, __nvoc_base_Notifier)), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +// ctxdmaGetOrAllocNotifShare: virtual inherited (notify) base (notify) +NV_STATUS __nvoc_up_thunk_Notifier_ctxdmaGetOrAllocNotifShare(struct ContextDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(ContextDma, __nvoc_base_Notifier)), hNotifierClient, hNotifierResource, ppNotifShare); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__ContextDma = +{ + /*numEntries=*/ 3, + /*pExportEntries=*/ __nvoc_exported_method_def_ContextDma +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_ContextDma(ContextDma *pThis) { + __nvoc_ctxdmaDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_ContextDma(ContextDma *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_ContextDma(ContextDma *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_ContextDma_fail_RmResource; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_ContextDma_fail_Notifier; + __nvoc_init_dataField_ContextDma(pThis); + + status = __nvoc_ctxdmaConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_ContextDma_fail__init; + goto __nvoc_ctor_ContextDma_exit; // Success + +__nvoc_ctor_ContextDma_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_ContextDma_fail_Notifier: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_ContextDma_fail_RmResource: +__nvoc_ctor_ContextDma_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_ContextDma_1(ContextDma *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + // ctxdmaCtrlCmdUpdateContextdma -- exported (id=0x20101) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__ctxdmaCtrlCmdUpdateContextdma__ = &ctxdmaCtrlCmdUpdateContextdma_IMPL; +#endif + + // ctxdmaCtrlCmdBindContextdma -- exported (id=0x20102) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__ctxdmaCtrlCmdBindContextdma__ = &ctxdmaCtrlCmdBindContextdma_IMPL; +#endif + + // ctxdmaCtrlCmdUnbindContextdma -- exported (id=0x20103) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__ctxdmaCtrlCmdUnbindContextdma__ = &ctxdmaCtrlCmdUnbindContextdma_IMPL; +#endif +} // End __nvoc_init_funcTable_ContextDma_1 with approximately 3 basic block(s). + + +// Initialize vtable(s) for 29 virtual method(s). +void __nvoc_init_funcTable_ContextDma(ContextDma *pThis) { + + // Initialize vtable(s) with 3 per-object function pointer(s). + __nvoc_init_funcTable_ContextDma_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__ContextDma(ContextDma *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^3 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^2 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^2 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; // (rmres) super + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; // (inotify) super^2 + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; // (notify) super + pThis->__nvoc_pbase_ContextDma = pThis; // (ctxdma) this + + // Recurse to superclass initialization function(s). + __nvoc_init__RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_init__Notifier(&pThis->__nvoc_base_Notifier); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__ContextDma.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^3 + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__ContextDma.metadata__RmResource.metadata__RsResource; // (res) super^2 + pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__ContextDma.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^2 + pThis->__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__ContextDma.metadata__RmResource; // (rmres) super + pThis->__nvoc_base_Notifier.__nvoc_base_INotifier.__nvoc_metadata_ptr = &__nvoc_metadata__ContextDma.metadata__Notifier.metadata__INotifier; // (inotify) super^2 + pThis->__nvoc_base_Notifier.__nvoc_metadata_ptr = &__nvoc_metadata__ContextDma.metadata__Notifier; // (notify) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__ContextDma; // (ctxdma) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_ContextDma(pThis); +} + +NV_STATUS __nvoc_objCreate_ContextDma(ContextDma **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + ContextDma *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(ContextDma), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(ContextDma)); + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__ContextDma(pThis); + status = __nvoc_ctor_ContextDma(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_ContextDma_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_ContextDma_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(ContextDma)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_ContextDma(ContextDma **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_ContextDma(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_context_dma_nvoc.h b/src/nvidia/generated/g_context_dma_nvoc.h new file mode 100644 index 0000000..c16a488 --- /dev/null +++ b/src/nvidia/generated/g_context_dma_nvoc.h @@ -0,0 +1,469 @@ + +#ifndef _G_CONTEXT_DMA_NVOC_H_ +#define _G_CONTEXT_DMA_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_context_dma_nvoc.h" + +#ifndef CONTEXT_DMA_H +#define CONTEXT_DMA_H + +#include "core/core.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "rmapi/resource.h" +#include "rmapi/event.h" +#include "ctrl/ctrl0002.h" +#include "rmapi/control.h" // for macro RMCTRL_EXPORT etc. +#include "nvlimits.h" + + +struct Device; + +#ifndef __NVOC_CLASS_Device_TYPEDEF__ +#define __NVOC_CLASS_Device_TYPEDEF__ +typedef struct Device Device; +#endif /* __NVOC_CLASS_Device_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Device +#define __nvoc_class_id_Device 0xe0ac20 +#endif /* __nvoc_class_id_Device */ + + + +struct Memory; + +#ifndef __NVOC_CLASS_Memory_TYPEDEF__ +#define __NVOC_CLASS_Memory_TYPEDEF__ +typedef struct Memory Memory; +#endif /* __NVOC_CLASS_Memory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Memory +#define __nvoc_class_id_Memory 0x4789f2 +#endif /* __nvoc_class_id_Memory */ + + + +/*! + * RM internal class representing NV01_CONTEXT_DMA + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_CONTEXT_DMA_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__ContextDma; +struct NVOC_METADATA__RmResource; +struct NVOC_METADATA__Notifier; +struct NVOC_VTABLE__ContextDma; + + +struct ContextDma { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__ContextDma *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct RmResource __nvoc_base_RmResource; + struct Notifier __nvoc_base_Notifier; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^3 + struct RsResource *__nvoc_pbase_RsResource; // res super^2 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^2 + struct RmResource *__nvoc_pbase_RmResource; // rmres super + struct INotifier *__nvoc_pbase_INotifier; // inotify super^2 + struct Notifier *__nvoc_pbase_Notifier; // notify super + struct ContextDma *__nvoc_pbase_ContextDma; // ctxdma + + // Vtable with 3 per-object function pointers + NV_STATUS (*__ctxdmaCtrlCmdUpdateContextdma__)(struct ContextDma * /*this*/, NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS *); // exported (id=0x20101) + NV_STATUS (*__ctxdmaCtrlCmdBindContextdma__)(struct ContextDma * /*this*/, NV0002_CTRL_BIND_CONTEXTDMA_PARAMS *); // exported (id=0x20102) + NV_STATUS (*__ctxdmaCtrlCmdUnbindContextdma__)(struct ContextDma * /*this*/, NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS *); // exported (id=0x20103) + + // Data members + NvU32 Class; + NvU32 Flags; + NvBool bReadOnly; + NvU32 CacheSnoop; + NvU32 Type; + NvU64 Limit; + NV_ADDRESS_SPACE AddressSpace; + NvBool bUnicast; + void *KernelVAddr[8]; + void *KernelPriv; + NvU64 FbAperture[8]; + NvU64 FbApertureLen[8]; + struct Memory *pMemory; + struct MEMORY_DESCRIPTOR *pMemDesc; + NvU32 Instance[8]; + NvU32 InstRefCount[8]; + struct OBJGPU *pGpu; + struct Device *pDevice; +}; + + +// Vtable with 26 per-class function pointers +struct NVOC_VTABLE__ContextDma { + NV_STATUS (*__ctxdmaMapTo__)(struct ContextDma * /*this*/, struct RS_RES_MAP_TO_PARAMS *); // virtual override (res) base (rmres) + NV_STATUS (*__ctxdmaUnmapFrom__)(struct ContextDma * /*this*/, struct RS_RES_UNMAP_FROM_PARAMS *); // virtual override (res) base (rmres) + NvBool (*__ctxdmaAccessCallback__)(struct ContextDma * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (rmres) + NvBool (*__ctxdmaShareCallback__)(struct ContextDma * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__ctxdmaGetMemInterMapParams__)(struct ContextDma * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__ctxdmaCheckMemInterUnmap__)(struct ContextDma * /*this*/, NvBool); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__ctxdmaGetMemoryMappingDescriptor__)(struct ContextDma * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__ctxdmaControlSerialization_Prologue__)(struct ContextDma * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + void (*__ctxdmaControlSerialization_Epilogue__)(struct ContextDma * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__ctxdmaControl_Prologue__)(struct ContextDma * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + void (*__ctxdmaControl_Epilogue__)(struct ContextDma * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + NvBool (*__ctxdmaCanCopy__)(struct ContextDma * /*this*/); // virtual inherited (res) base (rmres) + NV_STATUS (*__ctxdmaIsDuplicate__)(struct ContextDma * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (rmres) + void (*__ctxdmaPreDestruct__)(struct ContextDma * /*this*/); // virtual inherited (res) base (rmres) + NV_STATUS (*__ctxdmaControl__)(struct ContextDma * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (rmres) + NV_STATUS (*__ctxdmaControlFilter__)(struct ContextDma * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (rmres) + NV_STATUS (*__ctxdmaMap__)(struct ContextDma * /*this*/, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); // virtual inherited (res) base (rmres) + NV_STATUS (*__ctxdmaUnmap__)(struct ContextDma * /*this*/, struct CALL_CONTEXT *, RsCpuMapping *); // virtual inherited (res) base (rmres) + NvBool (*__ctxdmaIsPartialUnmapSupported__)(struct ContextDma * /*this*/); // inline virtual inherited (res) base (rmres) body + NvU32 (*__ctxdmaGetRefCount__)(struct ContextDma * /*this*/); // virtual inherited (res) base (rmres) + void (*__ctxdmaAddAdditionalDependants__)(struct RsClient *, struct ContextDma * /*this*/, RsResourceRef *); // virtual inherited (res) base (rmres) + PEVENTNOTIFICATION * (*__ctxdmaGetNotificationListPtr__)(struct ContextDma * /*this*/); // virtual inherited (notify) base (notify) + struct NotifShare * (*__ctxdmaGetNotificationShare__)(struct ContextDma * /*this*/); // virtual inherited (notify) base (notify) + void (*__ctxdmaSetNotificationShare__)(struct ContextDma * /*this*/, struct NotifShare *); // virtual inherited (notify) base (notify) + NV_STATUS (*__ctxdmaUnregisterEvent__)(struct ContextDma * /*this*/, NvHandle, NvHandle, NvHandle, NvHandle); // virtual inherited (notify) base (notify) + NV_STATUS (*__ctxdmaGetOrAllocNotifShare__)(struct ContextDma * /*this*/, NvHandle, NvHandle, struct NotifShare **); // virtual inherited (notify) base (notify) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__ContextDma { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__RmResource metadata__RmResource; + const struct NVOC_METADATA__Notifier metadata__Notifier; + const struct NVOC_VTABLE__ContextDma vtable; +}; + +#ifndef __NVOC_CLASS_ContextDma_TYPEDEF__ +#define __NVOC_CLASS_ContextDma_TYPEDEF__ +typedef struct ContextDma ContextDma; +#endif /* __NVOC_CLASS_ContextDma_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ContextDma +#define __nvoc_class_id_ContextDma 0x88441b +#endif /* __nvoc_class_id_ContextDma */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ContextDma; + +#define __staticCast_ContextDma(pThis) \ + ((pThis)->__nvoc_pbase_ContextDma) + +#ifdef __nvoc_context_dma_h_disabled +#define __dynamicCast_ContextDma(pThis) ((ContextDma*) NULL) +#else //__nvoc_context_dma_h_disabled +#define __dynamicCast_ContextDma(pThis) \ + ((ContextDma*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(ContextDma))) +#endif //__nvoc_context_dma_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_ContextDma(ContextDma**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_ContextDma(ContextDma**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_ContextDma(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_ContextDma((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define ctxdmaMapTo_FNPTR(pContextDma) pContextDma->__nvoc_metadata_ptr->vtable.__ctxdmaMapTo__ +#define ctxdmaMapTo(pContextDma, pParams) ctxdmaMapTo_DISPATCH(pContextDma, pParams) +#define ctxdmaUnmapFrom_FNPTR(pContextDma) pContextDma->__nvoc_metadata_ptr->vtable.__ctxdmaUnmapFrom__ +#define ctxdmaUnmapFrom(pContextDma, pParams) ctxdmaUnmapFrom_DISPATCH(pContextDma, pParams) +#define ctxdmaCtrlCmdUpdateContextdma_FNPTR(pContextDma) pContextDma->__ctxdmaCtrlCmdUpdateContextdma__ +#define ctxdmaCtrlCmdUpdateContextdma(pContextDma, pUpdateCtxtDmaParams) ctxdmaCtrlCmdUpdateContextdma_DISPATCH(pContextDma, pUpdateCtxtDmaParams) +#define ctxdmaCtrlCmdBindContextdma_FNPTR(pContextDma) pContextDma->__ctxdmaCtrlCmdBindContextdma__ +#define ctxdmaCtrlCmdBindContextdma(pContextDma, pBindCtxtDmaParams) ctxdmaCtrlCmdBindContextdma_DISPATCH(pContextDma, pBindCtxtDmaParams) +#define ctxdmaCtrlCmdUnbindContextdma_FNPTR(pContextDma) pContextDma->__ctxdmaCtrlCmdUnbindContextdma__ +#define ctxdmaCtrlCmdUnbindContextdma(pContextDma, pUnbindCtxtDmaParams) ctxdmaCtrlCmdUnbindContextdma_DISPATCH(pContextDma, pUnbindCtxtDmaParams) +#define ctxdmaAccessCallback_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define ctxdmaAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) ctxdmaAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define ctxdmaShareCallback_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresShareCallback__ +#define ctxdmaShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) ctxdmaShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define ctxdmaGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define ctxdmaGetMemInterMapParams(pRmResource, pParams) ctxdmaGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define ctxdmaCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define ctxdmaCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) ctxdmaCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define ctxdmaGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define ctxdmaGetMemoryMappingDescriptor(pRmResource, ppMemDesc) ctxdmaGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define ctxdmaControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define ctxdmaControlSerialization_Prologue(pResource, pCallContext, pParams) ctxdmaControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define ctxdmaControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define ctxdmaControlSerialization_Epilogue(pResource, pCallContext, pParams) ctxdmaControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define ctxdmaControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define ctxdmaControl_Prologue(pResource, pCallContext, pParams) ctxdmaControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define ctxdmaControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define ctxdmaControl_Epilogue(pResource, pCallContext, pParams) ctxdmaControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define ctxdmaCanCopy_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define ctxdmaCanCopy(pResource) ctxdmaCanCopy_DISPATCH(pResource) +#define ctxdmaIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define ctxdmaIsDuplicate(pResource, hMemory, pDuplicate) ctxdmaIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define ctxdmaPreDestruct_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define ctxdmaPreDestruct(pResource) ctxdmaPreDestruct_DISPATCH(pResource) +#define ctxdmaControl_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControl__ +#define ctxdmaControl(pResource, pCallContext, pParams) ctxdmaControl_DISPATCH(pResource, pCallContext, pParams) +#define ctxdmaControlFilter_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define ctxdmaControlFilter(pResource, pCallContext, pParams) ctxdmaControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define ctxdmaMap_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMap__ +#define ctxdmaMap(pResource, pCallContext, pParams, pCpuMapping) ctxdmaMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define ctxdmaUnmap_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmap__ +#define ctxdmaUnmap(pResource, pCallContext, pCpuMapping) ctxdmaUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define ctxdmaIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define ctxdmaIsPartialUnmapSupported(pResource) ctxdmaIsPartialUnmapSupported_DISPATCH(pResource) +#define ctxdmaGetRefCount_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define ctxdmaGetRefCount(pResource) ctxdmaGetRefCount_DISPATCH(pResource) +#define ctxdmaAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define ctxdmaAddAdditionalDependants(pClient, pResource, pReference) ctxdmaAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define ctxdmaGetNotificationListPtr_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationListPtr__ +#define ctxdmaGetNotificationListPtr(pNotifier) ctxdmaGetNotificationListPtr_DISPATCH(pNotifier) +#define ctxdmaGetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationShare__ +#define ctxdmaGetNotificationShare(pNotifier) ctxdmaGetNotificationShare_DISPATCH(pNotifier) +#define ctxdmaSetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifySetNotificationShare__ +#define ctxdmaSetNotificationShare(pNotifier, pNotifShare) ctxdmaSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define ctxdmaUnregisterEvent_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyUnregisterEvent__ +#define ctxdmaUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) ctxdmaUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define ctxdmaGetOrAllocNotifShare_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetOrAllocNotifShare__ +#define ctxdmaGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) ctxdmaGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) + +// Dispatch functions +static inline NV_STATUS ctxdmaMapTo_DISPATCH(struct ContextDma *pContextDma, struct RS_RES_MAP_TO_PARAMS *pParams) { + return pContextDma->__nvoc_metadata_ptr->vtable.__ctxdmaMapTo__(pContextDma, pParams); +} + +static inline NV_STATUS ctxdmaUnmapFrom_DISPATCH(struct ContextDma *pContextDma, struct RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pContextDma->__nvoc_metadata_ptr->vtable.__ctxdmaUnmapFrom__(pContextDma, pParams); +} + +static inline NV_STATUS ctxdmaCtrlCmdUpdateContextdma_DISPATCH(struct ContextDma *pContextDma, NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS *pUpdateCtxtDmaParams) { + return pContextDma->__ctxdmaCtrlCmdUpdateContextdma__(pContextDma, pUpdateCtxtDmaParams); +} + +static inline NV_STATUS ctxdmaCtrlCmdBindContextdma_DISPATCH(struct ContextDma *pContextDma, NV0002_CTRL_BIND_CONTEXTDMA_PARAMS *pBindCtxtDmaParams) { + return pContextDma->__ctxdmaCtrlCmdBindContextdma__(pContextDma, pBindCtxtDmaParams); +} + +static inline NV_STATUS ctxdmaCtrlCmdUnbindContextdma_DISPATCH(struct ContextDma *pContextDma, NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS *pUnbindCtxtDmaParams) { + return pContextDma->__ctxdmaCtrlCmdUnbindContextdma__(pContextDma, pUnbindCtxtDmaParams); +} + +static inline NvBool ctxdmaAccessCallback_DISPATCH(struct ContextDma *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__ctxdmaAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NvBool ctxdmaShareCallback_DISPATCH(struct ContextDma *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__nvoc_metadata_ptr->vtable.__ctxdmaShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS ctxdmaGetMemInterMapParams_DISPATCH(struct ContextDma *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__ctxdmaGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS ctxdmaCheckMemInterUnmap_DISPATCH(struct ContextDma *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__ctxdmaCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS ctxdmaGetMemoryMappingDescriptor_DISPATCH(struct ContextDma *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__ctxdmaGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS ctxdmaControlSerialization_Prologue_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__ctxdmaControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void ctxdmaControlSerialization_Epilogue_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__ctxdmaControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS ctxdmaControl_Prologue_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__ctxdmaControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void ctxdmaControl_Epilogue_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__ctxdmaControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool ctxdmaCanCopy_DISPATCH(struct ContextDma *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__ctxdmaCanCopy__(pResource); +} + +static inline NV_STATUS ctxdmaIsDuplicate_DISPATCH(struct ContextDma *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__ctxdmaIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void ctxdmaPreDestruct_DISPATCH(struct ContextDma *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__ctxdmaPreDestruct__(pResource); +} + +static inline NV_STATUS ctxdmaControl_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__ctxdmaControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS ctxdmaControlFilter_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__ctxdmaControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS ctxdmaMap_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__ctxdmaMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS ctxdmaUnmap_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__ctxdmaUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NvBool ctxdmaIsPartialUnmapSupported_DISPATCH(struct ContextDma *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__ctxdmaIsPartialUnmapSupported__(pResource); +} + +static inline NvU32 ctxdmaGetRefCount_DISPATCH(struct ContextDma *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__ctxdmaGetRefCount__(pResource); +} + +static inline void ctxdmaAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct ContextDma *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__ctxdmaAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline PEVENTNOTIFICATION * ctxdmaGetNotificationListPtr_DISPATCH(struct ContextDma *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__ctxdmaGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare * ctxdmaGetNotificationShare_DISPATCH(struct ContextDma *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__ctxdmaGetNotificationShare__(pNotifier); +} + +static inline void ctxdmaSetNotificationShare_DISPATCH(struct ContextDma *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__nvoc_metadata_ptr->vtable.__ctxdmaSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS ctxdmaUnregisterEvent_DISPATCH(struct ContextDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__nvoc_metadata_ptr->vtable.__ctxdmaUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS ctxdmaGetOrAllocNotifShare_DISPATCH(struct ContextDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__nvoc_metadata_ptr->vtable.__ctxdmaGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS ctxdmaMapTo_IMPL(struct ContextDma *pContextDma, struct RS_RES_MAP_TO_PARAMS *pParams); + +NV_STATUS ctxdmaUnmapFrom_IMPL(struct ContextDma *pContextDma, struct RS_RES_UNMAP_FROM_PARAMS *pParams); + +NV_STATUS ctxdmaCtrlCmdUpdateContextdma_IMPL(struct ContextDma *pContextDma, NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS *pUpdateCtxtDmaParams); + +NV_STATUS ctxdmaCtrlCmdBindContextdma_IMPL(struct ContextDma *pContextDma, NV0002_CTRL_BIND_CONTEXTDMA_PARAMS *pBindCtxtDmaParams); + +NV_STATUS ctxdmaCtrlCmdUnbindContextdma_IMPL(struct ContextDma *pContextDma, NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS *pUnbindCtxtDmaParams); + +NV_STATUS ctxdmaConstruct_IMPL(struct ContextDma *arg_pCtxdma, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_ctxdmaConstruct(arg_pCtxdma, arg_pCallContext, arg_pParams) ctxdmaConstruct_IMPL(arg_pCtxdma, arg_pCallContext, arg_pParams) +void ctxdmaDestruct_IMPL(struct ContextDma *pCtxdma); + +#define __nvoc_ctxdmaDestruct(pCtxdma) ctxdmaDestruct_IMPL(pCtxdma) +NV_STATUS ctxdmaValidate_IMPL(struct ContextDma *pContextDma, NvU64 start, NvU64 len); + +#ifdef __nvoc_context_dma_h_disabled +static inline NV_STATUS ctxdmaValidate(struct ContextDma *pContextDma, NvU64 start, NvU64 len) { + NV_ASSERT_FAILED_PRECOMP("ContextDma was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_context_dma_h_disabled +#define ctxdmaValidate(pContextDma, start, len) ctxdmaValidate_IMPL(pContextDma, start, len) +#endif //__nvoc_context_dma_h_disabled + +NV_STATUS ctxdmaGetKernelVA_IMPL(struct ContextDma *pContextDma, NvU64 start, NvU64 len, void **arg4, NvU32 VA_idx); + +#ifdef __nvoc_context_dma_h_disabled +static inline NV_STATUS ctxdmaGetKernelVA(struct ContextDma *pContextDma, NvU64 start, NvU64 len, void **arg4, NvU32 VA_idx) { + NV_ASSERT_FAILED_PRECOMP("ContextDma was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_context_dma_h_disabled +#define ctxdmaGetKernelVA(pContextDma, start, len, arg4, VA_idx) ctxdmaGetKernelVA_IMPL(pContextDma, start, len, arg4, VA_idx) +#endif //__nvoc_context_dma_h_disabled + +NvBool ctxdmaIsBound_IMPL(struct ContextDma *pContextDma); + +#ifdef __nvoc_context_dma_h_disabled +static inline NvBool ctxdmaIsBound(struct ContextDma *pContextDma) { + NV_ASSERT_FAILED_PRECOMP("ContextDma was disabled!"); + return NV_FALSE; +} +#else //__nvoc_context_dma_h_disabled +#define ctxdmaIsBound(pContextDma) ctxdmaIsBound_IMPL(pContextDma) +#endif //__nvoc_context_dma_h_disabled + +NV_STATUS ctxdmaGetByHandle_IMPL(struct RsClient *pClient, NvHandle hContextDma, struct ContextDma **arg3); + +#define ctxdmaGetByHandle(pClient, hContextDma, arg3) ctxdmaGetByHandle_IMPL(pClient, hContextDma, arg3) +#undef PRIVATE_FIELD + + +// **************************************************************************** +// Deprecated Definitions +// **************************************************************************** + +#if RM_STRICT_CONFIG_EMIT_DEPRECATED_CONTEXT_DMA_DEFINITIONS == 1 + +/** + * @warning This function is deprecated! Please use ctxdmaGetByHandle. + */ +NV_STATUS CliGetContextDma(NvHandle hClient, NvHandle hContextDma, struct ContextDma **); + +#endif + +#endif /* CONTEXT_DMA_H */ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_CONTEXT_DMA_NVOC_H_ diff --git a/src/nvidia/generated/g_dce_client_nvoc.c b/src/nvidia/generated/g_dce_client_nvoc.c new file mode 100644 index 0000000..08f6e59 --- /dev/null +++ b/src/nvidia/generated/g_dce_client_nvoc.c @@ -0,0 +1,325 @@ +#define NVOC_DCE_CLIENT_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_dce_client_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x61649c = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJDCECLIENTRM; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +// Forward declarations for OBJDCECLIENTRM +void __nvoc_init__OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init__OBJDCECLIENTRM(OBJDCECLIENTRM*); +void __nvoc_init_funcTable_OBJDCECLIENTRM(OBJDCECLIENTRM*); +NV_STATUS __nvoc_ctor_OBJDCECLIENTRM(OBJDCECLIENTRM*); +void __nvoc_init_dataField_OBJDCECLIENTRM(OBJDCECLIENTRM*); +void __nvoc_dtor_OBJDCECLIENTRM(OBJDCECLIENTRM*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__OBJDCECLIENTRM; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJDCECLIENTRM; + +// Down-thunk(s) to bridge OBJDCECLIENTRM methods from ancestors (if any) +NV_STATUS __nvoc_down_thunk_OBJDCECLIENTRM_engstateConstructEngine(struct OBJGPU *arg1, struct OBJENGSTATE *arg_this, ENGDESCRIPTOR arg3); // this +void __nvoc_down_thunk_OBJDCECLIENTRM_engstateStateDestroy(struct OBJGPU *arg1, struct OBJENGSTATE *arg_this); // this +NV_STATUS __nvoc_down_thunk_OBJDCECLIENTRM_engstateStateLoad(struct OBJGPU *arg1, struct OBJENGSTATE *arg_this, NvU32 arg3); // this +NV_STATUS __nvoc_down_thunk_OBJDCECLIENTRM_engstateStateUnload(struct OBJGPU *arg1, struct OBJENGSTATE *arg_this, NvU32 arg3); // this + +// Up-thunk(s) to bridge OBJDCECLIENTRM methods to ancestors (if any) +void __nvoc_up_thunk_OBJENGSTATE_dceclientInitMissing(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_dceclientStatePreInitLocked(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_dceclientStatePreInitUnlocked(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_dceclientStateInitLocked(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_dceclientStateInitUnlocked(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_dceclientStatePreLoad(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg3); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_dceclientStatePostLoad(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg3); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_dceclientStatePreUnload(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg3); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_dceclientStatePostUnload(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg3); // this +NvBool __nvoc_up_thunk_OBJENGSTATE_dceclientIsPresent(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJDCECLIENTRM = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJDCECLIENTRM), + /*classId=*/ classId(OBJDCECLIENTRM), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJDCECLIENTRM", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJDCECLIENTRM, + /*pCastInfo=*/ &__nvoc_castinfo__OBJDCECLIENTRM, + /*pExportInfo=*/ &__nvoc_export_info__OBJDCECLIENTRM +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__OBJDCECLIENTRM __nvoc_metadata__OBJDCECLIENTRM = { + .rtti.pClassDef = &__nvoc_class_def_OBJDCECLIENTRM, // (dceclient) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJDCECLIENTRM, + .rtti.offset = 0, + .metadata__OBJENGSTATE.rtti.pClassDef = &__nvoc_class_def_OBJENGSTATE, // (engstate) super + .metadata__OBJENGSTATE.rtti.dtor = &__nvoc_destructFromBase, + .metadata__OBJENGSTATE.rtti.offset = NV_OFFSETOF(OBJDCECLIENTRM, __nvoc_base_OBJENGSTATE), + .metadata__OBJENGSTATE.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^2 + .metadata__OBJENGSTATE.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__OBJENGSTATE.metadata__Object.rtti.offset = NV_OFFSETOF(OBJDCECLIENTRM, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), + + .vtable.__dceclientConstructEngine__ = &dceclientConstructEngine_IMPL, // virtual override (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateConstructEngine__ = &__nvoc_down_thunk_OBJDCECLIENTRM_engstateConstructEngine, // virtual + .vtable.__dceclientStateDestroy__ = &dceclientStateDestroy_IMPL, // virtual override (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStateDestroy__ = &__nvoc_down_thunk_OBJDCECLIENTRM_engstateStateDestroy, // virtual + .vtable.__dceclientStateLoad__ = &dceclientStateLoad_IMPL, // virtual override (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStateLoad__ = &__nvoc_down_thunk_OBJDCECLIENTRM_engstateStateLoad, // virtual + .vtable.__dceclientStateUnload__ = &dceclientStateUnload_IMPL, // virtual override (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStateUnload__ = &__nvoc_down_thunk_OBJDCECLIENTRM_engstateStateUnload, // virtual + .vtable.__dceclientInitMissing__ = &__nvoc_up_thunk_OBJENGSTATE_dceclientInitMissing, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateInitMissing__ = &engstateInitMissing_IMPL, // virtual + .vtable.__dceclientStatePreInitLocked__ = &__nvoc_up_thunk_OBJENGSTATE_dceclientStatePreInitLocked, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePreInitLocked__ = &engstateStatePreInitLocked_IMPL, // virtual + .vtable.__dceclientStatePreInitUnlocked__ = &__nvoc_up_thunk_OBJENGSTATE_dceclientStatePreInitUnlocked, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePreInitUnlocked__ = &engstateStatePreInitUnlocked_IMPL, // virtual + .vtable.__dceclientStateInitLocked__ = &__nvoc_up_thunk_OBJENGSTATE_dceclientStateInitLocked, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStateInitLocked__ = &engstateStateInitLocked_IMPL, // virtual + .vtable.__dceclientStateInitUnlocked__ = &__nvoc_up_thunk_OBJENGSTATE_dceclientStateInitUnlocked, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStateInitUnlocked__ = &engstateStateInitUnlocked_IMPL, // virtual + .vtable.__dceclientStatePreLoad__ = &__nvoc_up_thunk_OBJENGSTATE_dceclientStatePreLoad, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePreLoad__ = &engstateStatePreLoad_IMPL, // virtual + .vtable.__dceclientStatePostLoad__ = &__nvoc_up_thunk_OBJENGSTATE_dceclientStatePostLoad, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePostLoad__ = &engstateStatePostLoad_IMPL, // virtual + .vtable.__dceclientStatePreUnload__ = &__nvoc_up_thunk_OBJENGSTATE_dceclientStatePreUnload, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePreUnload__ = &engstateStatePreUnload_IMPL, // virtual + .vtable.__dceclientStatePostUnload__ = &__nvoc_up_thunk_OBJENGSTATE_dceclientStatePostUnload, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePostUnload__ = &engstateStatePostUnload_IMPL, // virtual + .vtable.__dceclientIsPresent__ = &__nvoc_up_thunk_OBJENGSTATE_dceclientIsPresent, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateIsPresent__ = &engstateIsPresent_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__OBJDCECLIENTRM = { + .numRelatives = 3, + .relatives = { + &__nvoc_metadata__OBJDCECLIENTRM.rtti, // [0]: (dceclient) this + &__nvoc_metadata__OBJDCECLIENTRM.metadata__OBJENGSTATE.rtti, // [1]: (engstate) super + &__nvoc_metadata__OBJDCECLIENTRM.metadata__OBJENGSTATE.metadata__Object.rtti, // [2]: (obj) super^2 + } +}; + +// 4 down-thunk(s) defined to bridge methods in OBJDCECLIENTRM from superclasses + +// dceclientConstructEngine: virtual override (engstate) base (engstate) +NV_STATUS __nvoc_down_thunk_OBJDCECLIENTRM_engstateConstructEngine(struct OBJGPU *arg1, struct OBJENGSTATE *arg_this, ENGDESCRIPTOR arg3) { + return dceclientConstructEngine(arg1, (struct OBJDCECLIENTRM *)(((unsigned char *) arg_this) - NV_OFFSETOF(OBJDCECLIENTRM, __nvoc_base_OBJENGSTATE)), arg3); +} + +// dceclientStateDestroy: virtual override (engstate) base (engstate) +void __nvoc_down_thunk_OBJDCECLIENTRM_engstateStateDestroy(struct OBJGPU *arg1, struct OBJENGSTATE *arg_this) { + dceclientStateDestroy(arg1, (struct OBJDCECLIENTRM *)(((unsigned char *) arg_this) - NV_OFFSETOF(OBJDCECLIENTRM, __nvoc_base_OBJENGSTATE))); +} + +// dceclientStateLoad: virtual override (engstate) base (engstate) +NV_STATUS __nvoc_down_thunk_OBJDCECLIENTRM_engstateStateLoad(struct OBJGPU *arg1, struct OBJENGSTATE *arg_this, NvU32 arg3) { + return dceclientStateLoad(arg1, (struct OBJDCECLIENTRM *)(((unsigned char *) arg_this) - NV_OFFSETOF(OBJDCECLIENTRM, __nvoc_base_OBJENGSTATE)), arg3); +} + +// dceclientStateUnload: virtual override (engstate) base (engstate) +NV_STATUS __nvoc_down_thunk_OBJDCECLIENTRM_engstateStateUnload(struct OBJGPU *arg1, struct OBJENGSTATE *arg_this, NvU32 arg3) { + return dceclientStateUnload(arg1, (struct OBJDCECLIENTRM *)(((unsigned char *) arg_this) - NV_OFFSETOF(OBJDCECLIENTRM, __nvoc_base_OBJENGSTATE)), arg3); +} + + +// 10 up-thunk(s) defined to bridge methods in OBJDCECLIENTRM to superclasses + +// dceclientInitMissing: virtual inherited (engstate) base (engstate) +void __nvoc_up_thunk_OBJENGSTATE_dceclientInitMissing(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(OBJDCECLIENTRM, __nvoc_base_OBJENGSTATE))); +} + +// dceclientStatePreInitLocked: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_dceclientStatePreInitLocked(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(OBJDCECLIENTRM, __nvoc_base_OBJENGSTATE))); +} + +// dceclientStatePreInitUnlocked: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_dceclientStatePreInitUnlocked(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(OBJDCECLIENTRM, __nvoc_base_OBJENGSTATE))); +} + +// dceclientStateInitLocked: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_dceclientStateInitLocked(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate) { + return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(OBJDCECLIENTRM, __nvoc_base_OBJENGSTATE))); +} + +// dceclientStateInitUnlocked: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_dceclientStateInitUnlocked(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(OBJDCECLIENTRM, __nvoc_base_OBJENGSTATE))); +} + +// dceclientStatePreLoad: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_dceclientStatePreLoad(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg3) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(OBJDCECLIENTRM, __nvoc_base_OBJENGSTATE)), arg3); +} + +// dceclientStatePostLoad: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_dceclientStatePostLoad(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg3) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(OBJDCECLIENTRM, __nvoc_base_OBJENGSTATE)), arg3); +} + +// dceclientStatePreUnload: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_dceclientStatePreUnload(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg3) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(OBJDCECLIENTRM, __nvoc_base_OBJENGSTATE)), arg3); +} + +// dceclientStatePostUnload: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_dceclientStatePostUnload(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg3) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(OBJDCECLIENTRM, __nvoc_base_OBJENGSTATE)), arg3); +} + +// dceclientIsPresent: virtual inherited (engstate) base (engstate) +NvBool __nvoc_up_thunk_OBJENGSTATE_dceclientIsPresent(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(OBJDCECLIENTRM, __nvoc_base_OBJENGSTATE))); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJDCECLIENTRM = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_OBJDCECLIENTRM(OBJDCECLIENTRM *pThis) { + __nvoc_dceclientDestruct(pThis); + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJDCECLIENTRM(OBJDCECLIENTRM *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_OBJDCECLIENTRM(OBJDCECLIENTRM *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_OBJDCECLIENTRM_fail_OBJENGSTATE; + __nvoc_init_dataField_OBJDCECLIENTRM(pThis); + goto __nvoc_ctor_OBJDCECLIENTRM_exit; // Success + +__nvoc_ctor_OBJDCECLIENTRM_fail_OBJENGSTATE: +__nvoc_ctor_OBJDCECLIENTRM_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_OBJDCECLIENTRM_1(OBJDCECLIENTRM *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_OBJDCECLIENTRM_1 + + +// Initialize vtable(s) for 14 virtual method(s). +void __nvoc_init_funcTable_OBJDCECLIENTRM(OBJDCECLIENTRM *pThis) { + __nvoc_init_funcTable_OBJDCECLIENTRM_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__OBJDCECLIENTRM(OBJDCECLIENTRM *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; // (obj) super^2 + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; // (engstate) super + pThis->__nvoc_pbase_OBJDCECLIENTRM = pThis; // (dceclient) this + + // Recurse to superclass initialization function(s). + __nvoc_init__OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__OBJDCECLIENTRM.metadata__OBJENGSTATE.metadata__Object; // (obj) super^2 + pThis->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr = &__nvoc_metadata__OBJDCECLIENTRM.metadata__OBJENGSTATE; // (engstate) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__OBJDCECLIENTRM; // (dceclient) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_OBJDCECLIENTRM(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJDCECLIENTRM(OBJDCECLIENTRM **ppThis, Dynamic *pParent, NvU32 createFlags) +{ + NV_STATUS status; + Object *pParentObj = NULL; + OBJDCECLIENTRM *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(OBJDCECLIENTRM), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(OBJDCECLIENTRM)); + + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__OBJDCECLIENTRM(pThis); + status = __nvoc_ctor_OBJDCECLIENTRM(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJDCECLIENTRM_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_OBJDCECLIENTRM_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(OBJDCECLIENTRM)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJDCECLIENTRM(OBJDCECLIENTRM **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJDCECLIENTRM(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_dce_client_nvoc.h b/src/nvidia/generated/g_dce_client_nvoc.h new file mode 100644 index 0000000..6501362 --- /dev/null +++ b/src/nvidia/generated/g_dce_client_nvoc.h @@ -0,0 +1,327 @@ + +#ifndef _G_DCE_CLIENT_NVOC_H_ +#define _G_DCE_CLIENT_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_dce_client_nvoc.h" + +#ifndef _DCE_CLIENT_H_ +#define _DCE_CLIENT_H_ + +/*! + * @file dce_client.h + * @brief Provides definitions for all DceClient data structures and interfaces. + */ + +#include "gpu/eng_state.h" +#include "core/core.h" +#include "gpu/rpc/objrpc.h" +#include "os/dce_rm_client_ipc.h" + +/*! + * Temporary alias of DceClient to OBJDCECLIENTRM + */ +#define DceClient OBJDCECLIENTRM + +/*! + * Defines the structure used to contain all generic information related to + * the DceClient. + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_DCE_CLIENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__OBJDCECLIENTRM; +struct NVOC_METADATA__OBJENGSTATE; +struct NVOC_VTABLE__OBJDCECLIENTRM; + + +struct OBJDCECLIENTRM { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__OBJDCECLIENTRM *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^2 + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; // engstate super + struct OBJDCECLIENTRM *__nvoc_pbase_OBJDCECLIENTRM; // dceclient + + // Data members + struct OBJRPC *pRpc; + NvU32 clientId[2]; + NvU32 hInternalClient; +}; + + +// Vtable with 14 per-class function pointers +struct NVOC_VTABLE__OBJDCECLIENTRM { + NV_STATUS (*__dceclientConstructEngine__)(struct OBJGPU *, struct OBJDCECLIENTRM * /*this*/, ENGDESCRIPTOR); // virtual override (engstate) base (engstate) + void (*__dceclientStateDestroy__)(struct OBJGPU *, struct OBJDCECLIENTRM * /*this*/); // virtual override (engstate) base (engstate) + NV_STATUS (*__dceclientStateLoad__)(struct OBJGPU *, struct OBJDCECLIENTRM * /*this*/, NvU32); // virtual override (engstate) base (engstate) + NV_STATUS (*__dceclientStateUnload__)(struct OBJGPU *, struct OBJDCECLIENTRM * /*this*/, NvU32); // virtual override (engstate) base (engstate) + void (*__dceclientInitMissing__)(struct OBJGPU *, struct OBJDCECLIENTRM * /*this*/); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__dceclientStatePreInitLocked__)(struct OBJGPU *, struct OBJDCECLIENTRM * /*this*/); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__dceclientStatePreInitUnlocked__)(struct OBJGPU *, struct OBJDCECLIENTRM * /*this*/); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__dceclientStateInitLocked__)(struct OBJGPU *, struct OBJDCECLIENTRM * /*this*/); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__dceclientStateInitUnlocked__)(struct OBJGPU *, struct OBJDCECLIENTRM * /*this*/); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__dceclientStatePreLoad__)(struct OBJGPU *, struct OBJDCECLIENTRM * /*this*/, NvU32); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__dceclientStatePostLoad__)(struct OBJGPU *, struct OBJDCECLIENTRM * /*this*/, NvU32); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__dceclientStatePreUnload__)(struct OBJGPU *, struct OBJDCECLIENTRM * /*this*/, NvU32); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__dceclientStatePostUnload__)(struct OBJGPU *, struct OBJDCECLIENTRM * /*this*/, NvU32); // virtual inherited (engstate) base (engstate) + NvBool (*__dceclientIsPresent__)(struct OBJGPU *, struct OBJDCECLIENTRM * /*this*/); // virtual inherited (engstate) base (engstate) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__OBJDCECLIENTRM { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__OBJENGSTATE metadata__OBJENGSTATE; + const struct NVOC_VTABLE__OBJDCECLIENTRM vtable; +}; + +#ifndef __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__ +#define __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__ +typedef struct OBJDCECLIENTRM OBJDCECLIENTRM; +#endif /* __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDCECLIENTRM +#define __nvoc_class_id_OBJDCECLIENTRM 0x61649c +#endif /* __nvoc_class_id_OBJDCECLIENTRM */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJDCECLIENTRM; + +#define __staticCast_OBJDCECLIENTRM(pThis) \ + ((pThis)->__nvoc_pbase_OBJDCECLIENTRM) + +#ifdef __nvoc_dce_client_h_disabled +#define __dynamicCast_OBJDCECLIENTRM(pThis) ((OBJDCECLIENTRM*) NULL) +#else //__nvoc_dce_client_h_disabled +#define __dynamicCast_OBJDCECLIENTRM(pThis) \ + ((OBJDCECLIENTRM*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJDCECLIENTRM))) +#endif //__nvoc_dce_client_h_disabled + +// Property macros +#define PDB_PROP_DCECLIENT_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_DCECLIENT_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_OBJDCECLIENTRM(OBJDCECLIENTRM**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJDCECLIENTRM(OBJDCECLIENTRM**, Dynamic*, NvU32); +#define __objCreate_OBJDCECLIENTRM(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJDCECLIENTRM((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros +#define dceclientConstructEngine_FNPTR(arg_this) arg_this->__nvoc_metadata_ptr->vtable.__dceclientConstructEngine__ +#define dceclientConstructEngine(arg1, arg_this, arg3) dceclientConstructEngine_DISPATCH(arg1, arg_this, arg3) +#define dceclientStateDestroy_FNPTR(arg_this) arg_this->__nvoc_metadata_ptr->vtable.__dceclientStateDestroy__ +#define dceclientStateDestroy(arg1, arg_this) dceclientStateDestroy_DISPATCH(arg1, arg_this) +#define dceclientStateLoad_FNPTR(arg_this) arg_this->__nvoc_metadata_ptr->vtable.__dceclientStateLoad__ +#define dceclientStateLoad(arg1, arg_this, arg3) dceclientStateLoad_DISPATCH(arg1, arg_this, arg3) +#define dceclientStateUnload_FNPTR(arg_this) arg_this->__nvoc_metadata_ptr->vtable.__dceclientStateUnload__ +#define dceclientStateUnload(arg1, arg_this, arg3) dceclientStateUnload_DISPATCH(arg1, arg_this, arg3) +#define dceclientInitMissing_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateInitMissing__ +#define dceclientInitMissing(pGpu, pEngstate) dceclientInitMissing_DISPATCH(pGpu, pEngstate) +#define dceclientStatePreInitLocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePreInitLocked__ +#define dceclientStatePreInitLocked(pGpu, pEngstate) dceclientStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define dceclientStatePreInitUnlocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePreInitUnlocked__ +#define dceclientStatePreInitUnlocked(pGpu, pEngstate) dceclientStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define dceclientStateInitLocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStateInitLocked__ +#define dceclientStateInitLocked(pGpu, pEngstate) dceclientStateInitLocked_DISPATCH(pGpu, pEngstate) +#define dceclientStateInitUnlocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStateInitUnlocked__ +#define dceclientStateInitUnlocked(pGpu, pEngstate) dceclientStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define dceclientStatePreLoad_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePreLoad__ +#define dceclientStatePreLoad(pGpu, pEngstate, arg3) dceclientStatePreLoad_DISPATCH(pGpu, pEngstate, arg3) +#define dceclientStatePostLoad_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePostLoad__ +#define dceclientStatePostLoad(pGpu, pEngstate, arg3) dceclientStatePostLoad_DISPATCH(pGpu, pEngstate, arg3) +#define dceclientStatePreUnload_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePreUnload__ +#define dceclientStatePreUnload(pGpu, pEngstate, arg3) dceclientStatePreUnload_DISPATCH(pGpu, pEngstate, arg3) +#define dceclientStatePostUnload_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePostUnload__ +#define dceclientStatePostUnload(pGpu, pEngstate, arg3) dceclientStatePostUnload_DISPATCH(pGpu, pEngstate, arg3) +#define dceclientIsPresent_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateIsPresent__ +#define dceclientIsPresent(pGpu, pEngstate) dceclientIsPresent_DISPATCH(pGpu, pEngstate) + +// Dispatch functions +static inline NV_STATUS dceclientConstructEngine_DISPATCH(struct OBJGPU *arg1, struct OBJDCECLIENTRM *arg_this, ENGDESCRIPTOR arg3) { + return arg_this->__nvoc_metadata_ptr->vtable.__dceclientConstructEngine__(arg1, arg_this, arg3); +} + +static inline void dceclientStateDestroy_DISPATCH(struct OBJGPU *arg1, struct OBJDCECLIENTRM *arg_this) { + arg_this->__nvoc_metadata_ptr->vtable.__dceclientStateDestroy__(arg1, arg_this); +} + +static inline NV_STATUS dceclientStateLoad_DISPATCH(struct OBJGPU *arg1, struct OBJDCECLIENTRM *arg_this, NvU32 arg3) { + return arg_this->__nvoc_metadata_ptr->vtable.__dceclientStateLoad__(arg1, arg_this, arg3); +} + +static inline NV_STATUS dceclientStateUnload_DISPATCH(struct OBJGPU *arg1, struct OBJDCECLIENTRM *arg_this, NvU32 arg3) { + return arg_this->__nvoc_metadata_ptr->vtable.__dceclientStateUnload__(arg1, arg_this, arg3); +} + +static inline void dceclientInitMissing_DISPATCH(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate) { + pEngstate->__nvoc_metadata_ptr->vtable.__dceclientInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS dceclientStatePreInitLocked_DISPATCH(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__dceclientStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS dceclientStatePreInitUnlocked_DISPATCH(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__dceclientStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS dceclientStateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__dceclientStateInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS dceclientStateInitUnlocked_DISPATCH(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__dceclientStateInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS dceclientStatePreLoad_DISPATCH(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__dceclientStatePreLoad__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS dceclientStatePostLoad_DISPATCH(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__dceclientStatePostLoad__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS dceclientStatePreUnload_DISPATCH(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__dceclientStatePreUnload__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS dceclientStatePostUnload_DISPATCH(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__dceclientStatePostUnload__(pGpu, pEngstate, arg3); +} + +static inline NvBool dceclientIsPresent_DISPATCH(struct OBJGPU *pGpu, struct OBJDCECLIENTRM *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__dceclientIsPresent__(pGpu, pEngstate); +} + +NV_STATUS dceclientConstructEngine_IMPL(struct OBJGPU *arg1, struct OBJDCECLIENTRM *arg2, ENGDESCRIPTOR arg3); + +void dceclientStateDestroy_IMPL(struct OBJGPU *arg1, struct OBJDCECLIENTRM *arg2); + +NV_STATUS dceclientStateLoad_IMPL(struct OBJGPU *arg1, struct OBJDCECLIENTRM *arg2, NvU32 arg3); + +NV_STATUS dceclientStateUnload_IMPL(struct OBJGPU *arg1, struct OBJDCECLIENTRM *arg2, NvU32 arg3); + +void dceclientDestruct_IMPL(struct OBJDCECLIENTRM *arg1); + +#define __nvoc_dceclientDestruct(arg1) dceclientDestruct_IMPL(arg1) +NV_STATUS dceclientInitRpcInfra_IMPL(struct OBJGPU *arg1, struct OBJDCECLIENTRM *arg2); + +#ifdef __nvoc_dce_client_h_disabled +static inline NV_STATUS dceclientInitRpcInfra(struct OBJGPU *arg1, struct OBJDCECLIENTRM *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJDCECLIENTRM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_dce_client_h_disabled +#define dceclientInitRpcInfra(arg1, arg2) dceclientInitRpcInfra_IMPL(arg1, arg2) +#endif //__nvoc_dce_client_h_disabled + +void dceclientDeinitRpcInfra_IMPL(struct OBJDCECLIENTRM *arg1); + +#ifdef __nvoc_dce_client_h_disabled +static inline void dceclientDeinitRpcInfra(struct OBJDCECLIENTRM *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJDCECLIENTRM was disabled!"); +} +#else //__nvoc_dce_client_h_disabled +#define dceclientDeinitRpcInfra(arg1) dceclientDeinitRpcInfra_IMPL(arg1) +#endif //__nvoc_dce_client_h_disabled + +NV_STATUS dceclientDceRmInit_IMPL(struct OBJGPU *arg1, struct OBJDCECLIENTRM *arg2, NvBool arg3); + +#ifdef __nvoc_dce_client_h_disabled +static inline NV_STATUS dceclientDceRmInit(struct OBJGPU *arg1, struct OBJDCECLIENTRM *arg2, NvBool arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJDCECLIENTRM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_dce_client_h_disabled +#define dceclientDceRmInit(arg1, arg2, arg3) dceclientDceRmInit_IMPL(arg1, arg2, arg3) +#endif //__nvoc_dce_client_h_disabled + +NV_STATUS dceclientSendRpc_IMPL(struct OBJDCECLIENTRM *arg1, void *arg2, NvU32 arg3); + +#ifdef __nvoc_dce_client_h_disabled +static inline NV_STATUS dceclientSendRpc(struct OBJDCECLIENTRM *arg1, void *arg2, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJDCECLIENTRM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_dce_client_h_disabled +#define dceclientSendRpc(arg1, arg2, arg3) dceclientSendRpc_IMPL(arg1, arg2, arg3) +#endif //__nvoc_dce_client_h_disabled + +#undef PRIVATE_FIELD + + +NV_STATUS rpcRmApiControl_dce(RM_API *pRmApi, + NvHandle hClient, NvHandle hObject, + NvU32 cmd, void *pParamStructPtr, + NvU32 paramsSize); +NV_STATUS rpcRmApiAlloc_dce(RM_API *pRmApi, + NvHandle hClient, NvHandle hParent, + NvHandle hObject, NvU32 hClass, + void *pAllocParams, NvU32 allocParamsSize); +NV_STATUS rpcRmApiDupObject_dce(RM_API *pRmApi, NvHandle hClient, + NvHandle hParent, NvHandle *phObject, NvHandle hClientSrc, + NvHandle hObjectSrc, NvU32 flags); +NV_STATUS rpcRmApiFree_dce(RM_API *pRmApi, NvHandle hClient, NvHandle hObject); +NV_STATUS rpcDceRmInit_dce(RM_API *pRmApi, NvBool bInit); +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_DCE_CLIENT_NVOC_H_ diff --git a/src/nvidia/generated/g_device_nvoc.c b/src/nvidia/generated/g_device_nvoc.c new file mode 100644 index 0000000..684b3e2 --- /dev/null +++ b/src/nvidia/generated/g_device_nvoc.c @@ -0,0 +1,826 @@ +#define NVOC_DEVICE_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_device_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xe0ac20 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Device; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +// Forward declarations for Device +void __nvoc_init__GpuResource(GpuResource*); +void __nvoc_init__Device(Device*, RmHalspecOwner *pRmhalspecowner); +void __nvoc_init_funcTable_Device(Device*, RmHalspecOwner *pRmhalspecowner); +NV_STATUS __nvoc_ctor_Device(Device*, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_Device(Device*, RmHalspecOwner *pRmhalspecowner); +void __nvoc_dtor_Device(Device*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__Device; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__Device; + +// Down-thunk(s) to bridge Device methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^2 +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_GpuResource_resControl(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_GpuResource_resMap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_down_thunk_GpuResource_resUnmap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_down_thunk_GpuResource_rmresShareCallback(struct RmResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_down_thunk_Device_gpuresControl(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_down_thunk_Device_gpuresInternalControlForward(struct GpuResource *pDevice, NvU32 command, void *pParams, NvU32 size); // this + +// Up-thunk(s) to bridge Device methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^2 +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^2 +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^2 +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super^2 +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super^2 +NvBool __nvoc_up_thunk_RmResource_gpuresAccessCallback(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControl_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_gpuresControl_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_gpuresCanCopy(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresIsDuplicate(struct GpuResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_gpuresPreDestruct(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresControlFilter(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresMapTo(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresUnmapFrom(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_gpuresGetRefCount(struct GpuResource *pResource); // super +void __nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference); // super +NV_STATUS __nvoc_up_thunk_GpuResource_deviceMap(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_GpuResource_deviceUnmap(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_GpuResource_deviceShareCallback(struct Device *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_GpuResource_deviceGetRegBaseOffsetAndSize(struct Device *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); // this +NV_STATUS __nvoc_up_thunk_GpuResource_deviceGetMapAddrSpace(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); // this +NvHandle __nvoc_up_thunk_GpuResource_deviceGetInternalObjectHandle(struct Device *pGpuResource); // this +NvBool __nvoc_up_thunk_RmResource_deviceAccessCallback(struct Device *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NV_STATUS __nvoc_up_thunk_RmResource_deviceGetMemInterMapParams(struct Device *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_deviceCheckMemInterUnmap(struct Device *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_deviceGetMemoryMappingDescriptor(struct Device *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_deviceControlSerialization_Prologue(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_deviceControlSerialization_Epilogue(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_deviceControl_Prologue(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_deviceControl_Epilogue(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_deviceCanCopy(struct Device *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_deviceIsDuplicate(struct Device *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_devicePreDestruct(struct Device *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_deviceControlFilter(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_deviceIsPartialUnmapSupported(struct Device *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_deviceMapTo(struct Device *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_deviceUnmapFrom(struct Device *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_deviceGetRefCount(struct Device *pResource); // this +void __nvoc_up_thunk_RsResource_deviceAddAdditionalDependants(struct RsClient *pClient, struct Device *pResource, RsResourceRef *pReference); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_Device = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Device), + /*classId=*/ classId(Device), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Device", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Device, + /*pCastInfo=*/ &__nvoc_castinfo__Device, + /*pExportInfo=*/ &__nvoc_export_info__Device +}; + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetClasslist_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + /*flags=*/ 0x10bu, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800201u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuGetClasslist" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x509u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetNumSubdevices_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x509u) + /*flags=*/ 0x509u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800280u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuGetNumSubdevices" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuModifyGpuSwStatePersistence_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u) + /*flags=*/ 0x5u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800287u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuModifyGpuSwStatePersistence" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuQueryGpuSwStatePersistence_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*flags=*/ 0x9u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800288u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuQueryGpuSwStatePersistence" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetVirtualizationMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*flags=*/ 0x109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800289u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuGetVirtualizationMode" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuVirtualizationSwitchToVga_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800290u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuVirtualizationSwitchToVga" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1010bu) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetClasslistV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1010bu) + /*flags=*/ 0x1010bu, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800292u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuGetClasslistV2" +#endif + }, + { /* [7] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetFindSubDeviceHandle_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + /*flags=*/ 0x10bu, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800293u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuGetFindSubDeviceHandle" +#endif + }, + { /* [8] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40049u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetBrandCaps_DISPATCH, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40049u) + /*flags=*/ 0x40049u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800294u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuGetBrandCaps" +#endif + }, + { /* [9] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuSetVgpuVfBar1Size_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800296u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuSetVgpuVfBar1Size" +#endif + }, + { /* [10] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuSetVgpuHeterogeneousMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800297u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_SET_VGPU_HETEROGENEOUS_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuSetVgpuHeterogeneousMode" +#endif + }, + { /* [11] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetVgpuHeterogeneousMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800298u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_VGPU_HETEROGENEOUS_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuGetVgpuHeterogeneousMode" +#endif + }, + { /* [12] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdOsUnixVTSwitch_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + /*flags=*/ 0x1u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801e01u, + /*paramSize=*/ sizeof(NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdOsUnixVTSwitch" +#endif + }, + { /* [13] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdOsUnixVTGetFBInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + /*flags=*/ 0x1u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801e02u, + /*paramSize=*/ sizeof(NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdOsUnixVTGetFBInfo" +#endif + }, + +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__Device __nvoc_metadata__Device = { + .rtti.pClassDef = &__nvoc_class_def_Device, // (device) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Device, + .rtti.offset = 0, + .metadata__GpuResource.rtti.pClassDef = &__nvoc_class_def_GpuResource, // (gpures) super + .metadata__GpuResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.rtti.offset = NV_OFFSETOF(Device, __nvoc_base_GpuResource), + .metadata__GpuResource.metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super^2 + .metadata__GpuResource.metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.rtti.offset = NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource), + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^3 + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^4 + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^3 + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + + .vtable.__deviceControl__ = &deviceControl_IMPL, // virtual override (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl__ = &__nvoc_down_thunk_Device_gpuresControl, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_GpuResource_resControl, // virtual + .vtable.__deviceInternalControlForward__ = &deviceInternalControlForward_IMPL, // virtual override (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresInternalControlForward__ = &__nvoc_down_thunk_Device_gpuresInternalControlForward, // virtual + .vtable.__deviceMap__ = &__nvoc_up_thunk_GpuResource_deviceMap, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresMap__ = &gpuresMap_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &__nvoc_down_thunk_GpuResource_resMap, // virtual + .vtable.__deviceUnmap__ = &__nvoc_up_thunk_GpuResource_deviceUnmap, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresUnmap__ = &gpuresUnmap_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &__nvoc_down_thunk_GpuResource_resUnmap, // virtual + .vtable.__deviceShareCallback__ = &__nvoc_up_thunk_GpuResource_deviceShareCallback, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresShareCallback__ = &gpuresShareCallback_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresShareCallback__ = &__nvoc_down_thunk_GpuResource_rmresShareCallback, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__deviceGetRegBaseOffsetAndSize__ = &__nvoc_up_thunk_GpuResource_deviceGetRegBaseOffsetAndSize, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetRegBaseOffsetAndSize__ = &gpuresGetRegBaseOffsetAndSize_IMPL, // virtual + .vtable.__deviceGetMapAddrSpace__ = &__nvoc_up_thunk_GpuResource_deviceGetMapAddrSpace, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMapAddrSpace__ = &gpuresGetMapAddrSpace_IMPL, // virtual + .vtable.__deviceGetInternalObjectHandle__ = &__nvoc_up_thunk_GpuResource_deviceGetInternalObjectHandle, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetInternalObjectHandle__ = &gpuresGetInternalObjectHandle_IMPL, // virtual + .vtable.__deviceAccessCallback__ = &__nvoc_up_thunk_RmResource_deviceAccessCallback, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresAccessCallback__ = &__nvoc_up_thunk_RmResource_gpuresAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__deviceGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_deviceGetMemInterMapParams, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__deviceCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_deviceCheckMemInterUnmap, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__deviceGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_deviceGetMemoryMappingDescriptor, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__deviceControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_deviceControlSerialization_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__deviceControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_deviceControlSerialization_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__deviceControl_Prologue__ = &__nvoc_up_thunk_RmResource_deviceControl_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__deviceControl_Epilogue__ = &__nvoc_up_thunk_RmResource_deviceControl_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__deviceCanCopy__ = &__nvoc_up_thunk_RsResource_deviceCanCopy, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresCanCopy__ = &__nvoc_up_thunk_RsResource_gpuresCanCopy, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__deviceIsDuplicate__ = &__nvoc_up_thunk_RsResource_deviceIsDuplicate, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresIsDuplicate__ = &__nvoc_up_thunk_RsResource_gpuresIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__devicePreDestruct__ = &__nvoc_up_thunk_RsResource_devicePreDestruct, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresPreDestruct__ = &__nvoc_up_thunk_RsResource_gpuresPreDestruct, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__deviceControlFilter__ = &__nvoc_up_thunk_RsResource_deviceControlFilter, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlFilter__ = &__nvoc_up_thunk_RsResource_gpuresControlFilter, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__deviceIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_deviceIsPartialUnmapSupported, // inline virtual inherited (res) base (gpures) body + .metadata__GpuResource.vtable.__gpuresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__GpuResource.metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__deviceMapTo__ = &__nvoc_up_thunk_RsResource_deviceMapTo, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresMapTo__ = &__nvoc_up_thunk_RsResource_gpuresMapTo, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__deviceUnmapFrom__ = &__nvoc_up_thunk_RsResource_deviceUnmapFrom, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresUnmapFrom__ = &__nvoc_up_thunk_RsResource_gpuresUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__deviceGetRefCount__ = &__nvoc_up_thunk_RsResource_deviceGetRefCount, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetRefCount__ = &__nvoc_up_thunk_RsResource_gpuresGetRefCount, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__deviceAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_deviceAddAdditionalDependants, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__Device = { + .numRelatives = 6, + .relatives = { + &__nvoc_metadata__Device.rtti, // [0]: (device) this + &__nvoc_metadata__Device.metadata__GpuResource.rtti, // [1]: (gpures) super + &__nvoc_metadata__Device.metadata__GpuResource.metadata__RmResource.rtti, // [2]: (rmres) super^2 + &__nvoc_metadata__Device.metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti, // [3]: (res) super^3 + &__nvoc_metadata__Device.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [4]: (obj) super^4 + &__nvoc_metadata__Device.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti, // [5]: (rmrescmn) super^3 + } +}; + +// 2 down-thunk(s) defined to bridge methods in Device from superclasses + +// deviceControl: virtual override (res) base (gpures) +NV_STATUS __nvoc_down_thunk_Device_gpuresControl(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return deviceControl((struct Device *)(((unsigned char *) pResource) - NV_OFFSETOF(Device, __nvoc_base_GpuResource)), pCallContext, pParams); +} + +// deviceInternalControlForward: virtual override (gpures) base (gpures) +NV_STATUS __nvoc_down_thunk_Device_gpuresInternalControlForward(struct GpuResource *pDevice, NvU32 command, void *pParams, NvU32 size) { + return deviceInternalControlForward((struct Device *)(((unsigned char *) pDevice) - NV_OFFSETOF(Device, __nvoc_base_GpuResource)), command, pParams, size); +} + + +// 23 up-thunk(s) defined to bridge methods in Device to superclasses + +// deviceMap: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_deviceMap(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(Device, __nvoc_base_GpuResource)), pCallContext, pParams, pCpuMapping); +} + +// deviceUnmap: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_deviceUnmap(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(Device, __nvoc_base_GpuResource)), pCallContext, pCpuMapping); +} + +// deviceShareCallback: virtual inherited (gpures) base (gpures) +NvBool __nvoc_up_thunk_GpuResource_deviceShareCallback(struct Device *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(Device, __nvoc_base_GpuResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// deviceGetRegBaseOffsetAndSize: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_deviceGetRegBaseOffsetAndSize(struct Device *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(Device, __nvoc_base_GpuResource)), pGpu, pOffset, pSize); +} + +// deviceGetMapAddrSpace: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_deviceGetMapAddrSpace(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(Device, __nvoc_base_GpuResource)), pCallContext, mapFlags, pAddrSpace); +} + +// deviceGetInternalObjectHandle: virtual inherited (gpures) base (gpures) +NvHandle __nvoc_up_thunk_GpuResource_deviceGetInternalObjectHandle(struct Device *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(Device, __nvoc_base_GpuResource))); +} + +// deviceAccessCallback: virtual inherited (rmres) base (gpures) +NvBool __nvoc_up_thunk_RmResource_deviceAccessCallback(struct Device *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// deviceGetMemInterMapParams: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_deviceGetMemInterMapParams(struct Device *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pParams); +} + +// deviceCheckMemInterUnmap: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_deviceCheckMemInterUnmap(struct Device *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// deviceGetMemoryMappingDescriptor: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_deviceGetMemoryMappingDescriptor(struct Device *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource)), ppMemDesc); +} + +// deviceControlSerialization_Prologue: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_deviceControlSerialization_Prologue(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// deviceControlSerialization_Epilogue: virtual inherited (rmres) base (gpures) +void __nvoc_up_thunk_RmResource_deviceControlSerialization_Epilogue(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// deviceControl_Prologue: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_deviceControl_Prologue(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// deviceControl_Epilogue: virtual inherited (rmres) base (gpures) +void __nvoc_up_thunk_RmResource_deviceControl_Epilogue(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// deviceCanCopy: virtual inherited (res) base (gpures) +NvBool __nvoc_up_thunk_RsResource_deviceCanCopy(struct Device *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// deviceIsDuplicate: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_deviceIsDuplicate(struct Device *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// devicePreDestruct: virtual inherited (res) base (gpures) +void __nvoc_up_thunk_RsResource_devicePreDestruct(struct Device *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// deviceControlFilter: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_deviceControlFilter(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// deviceIsPartialUnmapSupported: inline virtual inherited (res) base (gpures) body +NvBool __nvoc_up_thunk_RsResource_deviceIsPartialUnmapSupported(struct Device *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// deviceMapTo: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_deviceMapTo(struct Device *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// deviceUnmapFrom: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_deviceUnmapFrom(struct Device *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// deviceGetRefCount: virtual inherited (res) base (gpures) +NvU32 __nvoc_up_thunk_RsResource_deviceGetRefCount(struct Device *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// deviceAddAdditionalDependants: virtual inherited (res) base (gpures) +void __nvoc_up_thunk_RsResource_deviceAddAdditionalDependants(struct RsClient *pClient, struct Device *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__Device = +{ + /*numEntries=*/ 14, + /*pExportEntries=*/ __nvoc_exported_method_def_Device +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_Device(Device *pThis) { + __nvoc_deviceDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Device(Device *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Device(Device *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Device_fail_GpuResource; + __nvoc_init_dataField_Device(pThis, pRmhalspecowner); + + status = __nvoc_deviceConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Device_fail__init; + goto __nvoc_ctor_Device_exit; // Success + +__nvoc_ctor_Device_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_Device_fail_GpuResource: +__nvoc_ctor_Device_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_Device_1(Device *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + // deviceCtrlCmdGpuGetClasslist -- exported (id=0x800201) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + pThis->__deviceCtrlCmdGpuGetClasslist__ = &deviceCtrlCmdGpuGetClasslist_IMPL; +#endif + + // deviceCtrlCmdGpuGetClasslistV2 -- exported (id=0x800292) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1010bu) + pThis->__deviceCtrlCmdGpuGetClasslistV2__ = &deviceCtrlCmdGpuGetClasslistV2_IMPL; +#endif + + // deviceCtrlCmdGpuGetNumSubdevices -- exported (id=0x800280) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x509u) + pThis->__deviceCtrlCmdGpuGetNumSubdevices__ = &deviceCtrlCmdGpuGetNumSubdevices_IMPL; +#endif + + // deviceCtrlCmdGpuModifyGpuSwStatePersistence -- exported (id=0x800287) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u) + pThis->__deviceCtrlCmdGpuModifyGpuSwStatePersistence__ = &deviceCtrlCmdGpuModifyGpuSwStatePersistence_IMPL; +#endif + + // deviceCtrlCmdGpuQueryGpuSwStatePersistence -- exported (id=0x800288) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + pThis->__deviceCtrlCmdGpuQueryGpuSwStatePersistence__ = &deviceCtrlCmdGpuQueryGpuSwStatePersistence_IMPL; +#endif + + // deviceCtrlCmdGpuGetVirtualizationMode -- exported (id=0x800289) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + pThis->__deviceCtrlCmdGpuGetVirtualizationMode__ = &deviceCtrlCmdGpuGetVirtualizationMode_IMPL; +#endif + + // deviceCtrlCmdGpuSetVgpuVfBar1Size -- exported (id=0x800296) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__deviceCtrlCmdGpuSetVgpuVfBar1Size__ = &deviceCtrlCmdGpuSetVgpuVfBar1Size_IMPL; +#endif + + // deviceCtrlCmdGpuGetBrandCaps -- halified (singleton optimized) exported (id=0x800294) body + pThis->__deviceCtrlCmdGpuGetBrandCaps__ = &deviceCtrlCmdGpuGetBrandCaps_5baef9; + + // deviceCtrlCmdGpuVirtualizationSwitchToVga -- exported (id=0x800290) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__deviceCtrlCmdGpuVirtualizationSwitchToVga__ = &deviceCtrlCmdGpuVirtualizationSwitchToVga_IMPL; +#endif + + // deviceCtrlCmdGpuSetVgpuHeterogeneousMode -- exported (id=0x800297) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__deviceCtrlCmdGpuSetVgpuHeterogeneousMode__ = &deviceCtrlCmdGpuSetVgpuHeterogeneousMode_IMPL; +#endif + + // deviceCtrlCmdGpuGetVgpuHeterogeneousMode -- exported (id=0x800298) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__deviceCtrlCmdGpuGetVgpuHeterogeneousMode__ = &deviceCtrlCmdGpuGetVgpuHeterogeneousMode_IMPL; +#endif + + // deviceCtrlCmdGpuGetFindSubDeviceHandle -- exported (id=0x800293) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + pThis->__deviceCtrlCmdGpuGetFindSubDeviceHandle__ = &deviceCtrlCmdGpuGetFindSubDeviceHandle_IMPL; +#endif + + // deviceCtrlCmdOsUnixVTSwitch -- exported (id=0x801e01) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + pThis->__deviceCtrlCmdOsUnixVTSwitch__ = &deviceCtrlCmdOsUnixVTSwitch_IMPL; +#endif + + // deviceCtrlCmdOsUnixVTGetFBInfo -- exported (id=0x801e02) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + pThis->__deviceCtrlCmdOsUnixVTGetFBInfo__ = &deviceCtrlCmdOsUnixVTGetFBInfo_IMPL; +#endif +} // End __nvoc_init_funcTable_Device_1 with approximately 14 basic block(s). + + +// Initialize vtable(s) for 39 virtual method(s). +void __nvoc_init_funcTable_Device(Device *pThis, RmHalspecOwner *pRmhalspecowner) { + + // Initialize vtable(s) with 14 per-object function pointer(s). + __nvoc_init_funcTable_Device_1(pThis, pRmhalspecowner); +} + +// Initialize newly constructed object. +void __nvoc_init__Device(Device *pThis, RmHalspecOwner *pRmhalspecowner) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^4 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^3 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; // (rmres) super^2 + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; // (gpures) super + pThis->__nvoc_pbase_Device = pThis; // (device) this + + // Recurse to superclass initialization function(s). + __nvoc_init__GpuResource(&pThis->__nvoc_base_GpuResource); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__Device.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^4 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__Device.metadata__GpuResource.metadata__RmResource.metadata__RsResource; // (res) super^3 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__Device.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__Device.metadata__GpuResource.metadata__RmResource; // (rmres) super^2 + pThis->__nvoc_base_GpuResource.__nvoc_metadata_ptr = &__nvoc_metadata__Device.metadata__GpuResource; // (gpures) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__Device; // (device) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_Device(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_Device(Device **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + Device *pThis; + RmHalspecOwner *pRmhalspecowner; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(Device), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(Device)); + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // pParent must be a valid object that derives from a halspec owner class. + NV_ASSERT_OR_RETURN(pParent != NULL, NV_ERR_INVALID_ARGUMENT); + + // Link the child into the parent unless flagged not to do so. + if (!(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init__Device(pThis, pRmhalspecowner); + status = __nvoc_ctor_Device(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_Device_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_Device_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(Device)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_Device(Device **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_Device(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_device_nvoc.h b/src/nvidia/generated/g_device_nvoc.h new file mode 100644 index 0000000..336d179 --- /dev/null +++ b/src/nvidia/generated/g_device_nvoc.h @@ -0,0 +1,594 @@ + +#ifndef _G_DEVICE_NVOC_H_ +#define _G_DEVICE_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_device_nvoc.h" + +#ifndef _DEVICE_H_ +#define _DEVICE_H_ + +#include "core/core.h" + +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "nvoc/utility.h" +#include "resserv/rs_resource.h" +#include "rmapi/control.h" +#include "containers/btree.h" + +#include "gpu/gpu_halspec.h" +#include "gpu/gpu_resource.h" +#include "mem_mgr/vaspace.h" + +#include "ctrl/ctrl0080.h" // rmcontrol params + +// Forward declaration +struct KERNEL_HOST_VGPU_DEVICE; + +struct OBJVASPACE; + +#ifndef __NVOC_CLASS_OBJVASPACE_TYPEDEF__ +#define __NVOC_CLASS_OBJVASPACE_TYPEDEF__ +typedef struct OBJVASPACE OBJVASPACE; +#endif /* __NVOC_CLASS_OBJVASPACE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVASPACE +#define __nvoc_class_id_OBJVASPACE 0x6c347f +#endif /* __nvoc_class_id_OBJVASPACE */ + + + +/** + * A device consists of one or more GPUs. Devices provide broadcast + * semantics; that is, operations involving a device are applied to all GPUs + * in the device. + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_DEVICE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__Device; +struct NVOC_METADATA__GpuResource; +struct NVOC_VTABLE__Device; + + +struct Device { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__Device *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct GpuResource __nvoc_base_GpuResource; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^4 + struct RsResource *__nvoc_pbase_RsResource; // res super^3 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^3 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^2 + struct GpuResource *__nvoc_pbase_GpuResource; // gpures super + struct Device *__nvoc_pbase_Device; // device + + // Vtable with 14 per-object function pointers + NV_STATUS (*__deviceCtrlCmdGpuGetClasslist__)(struct Device * /*this*/, NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *); // exported (id=0x800201) + NV_STATUS (*__deviceCtrlCmdGpuGetClasslistV2__)(struct Device * /*this*/, NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS *); // exported (id=0x800292) + NV_STATUS (*__deviceCtrlCmdGpuGetNumSubdevices__)(struct Device * /*this*/, NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS *); // exported (id=0x800280) + NV_STATUS (*__deviceCtrlCmdGpuModifyGpuSwStatePersistence__)(struct Device * /*this*/, NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS *); // exported (id=0x800287) + NV_STATUS (*__deviceCtrlCmdGpuQueryGpuSwStatePersistence__)(struct Device * /*this*/, NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS *); // exported (id=0x800288) + NV_STATUS (*__deviceCtrlCmdGpuGetVirtualizationMode__)(struct Device * /*this*/, NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS *); // exported (id=0x800289) + NV_STATUS (*__deviceCtrlCmdGpuSetVgpuVfBar1Size__)(struct Device * /*this*/, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *); // exported (id=0x800296) + NV_STATUS (*__deviceCtrlCmdGpuGetBrandCaps__)(struct Device * /*this*/, NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS *); // halified (singleton optimized) exported (id=0x800294) body + NV_STATUS (*__deviceCtrlCmdGpuVirtualizationSwitchToVga__)(struct Device * /*this*/); // exported (id=0x800290) + NV_STATUS (*__deviceCtrlCmdGpuSetVgpuHeterogeneousMode__)(struct Device * /*this*/, NV0080_CTRL_GPU_SET_VGPU_HETEROGENEOUS_MODE_PARAMS *); // exported (id=0x800297) + NV_STATUS (*__deviceCtrlCmdGpuGetVgpuHeterogeneousMode__)(struct Device * /*this*/, NV0080_CTRL_GPU_GET_VGPU_HETEROGENEOUS_MODE_PARAMS *); // exported (id=0x800298) + NV_STATUS (*__deviceCtrlCmdGpuGetFindSubDeviceHandle__)(struct Device * /*this*/, NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM *); // exported (id=0x800293) + NV_STATUS (*__deviceCtrlCmdOsUnixVTSwitch__)(struct Device * /*this*/, NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS *); // exported (id=0x801e01) + NV_STATUS (*__deviceCtrlCmdOsUnixVTGetFBInfo__)(struct Device * /*this*/, NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *); // exported (id=0x801e02) + + // Data members + NvU32 deviceInst; + NvU32 PerfReqCnt; + PNODE DevMemoryTable; + NvBool bSliGpuBoostSyncActivate; + NvBool bPerfOptpActive; + NvU32 nPerfOptpRefCnt; + NvU32 nCudaLimitRefCnt; + struct OBJVASPACE *pVASpace; + NvHandle hClientShare; + NvHandle hTargetClient; + NvHandle hTargetDevice; + NvU32 deviceAllocFlags; + NvU32 deviceInternalAllocFlags; + NvU64 vaStartInternal; + NvU64 vaLimitInternal; + NvU64 vaSize; + NvU32 vaMode; + NvU32 defaultVidmemPhysicalityOverride; + struct KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice; +}; + + +// Vtable with 25 per-class function pointers +struct NVOC_VTABLE__Device { + NV_STATUS (*__deviceControl__)(struct Device * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual override (res) base (gpures) + NV_STATUS (*__deviceInternalControlForward__)(struct Device * /*this*/, NvU32, void *, NvU32); // virtual override (gpures) base (gpures) + NV_STATUS (*__deviceMap__)(struct Device * /*this*/, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__deviceUnmap__)(struct Device * /*this*/, struct CALL_CONTEXT *, struct RsCpuMapping *); // virtual inherited (gpures) base (gpures) + NvBool (*__deviceShareCallback__)(struct Device * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__deviceGetRegBaseOffsetAndSize__)(struct Device * /*this*/, struct OBJGPU *, NvU32 *, NvU32 *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__deviceGetMapAddrSpace__)(struct Device * /*this*/, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); // virtual inherited (gpures) base (gpures) + NvHandle (*__deviceGetInternalObjectHandle__)(struct Device * /*this*/); // virtual inherited (gpures) base (gpures) + NvBool (*__deviceAccessCallback__)(struct Device * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__deviceGetMemInterMapParams__)(struct Device * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__deviceCheckMemInterUnmap__)(struct Device * /*this*/, NvBool); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__deviceGetMemoryMappingDescriptor__)(struct Device * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__deviceControlSerialization_Prologue__)(struct Device * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + void (*__deviceControlSerialization_Epilogue__)(struct Device * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__deviceControl_Prologue__)(struct Device * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + void (*__deviceControl_Epilogue__)(struct Device * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + NvBool (*__deviceCanCopy__)(struct Device * /*this*/); // virtual inherited (res) base (gpures) + NV_STATUS (*__deviceIsDuplicate__)(struct Device * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (gpures) + void (*__devicePreDestruct__)(struct Device * /*this*/); // virtual inherited (res) base (gpures) + NV_STATUS (*__deviceControlFilter__)(struct Device * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (gpures) + NvBool (*__deviceIsPartialUnmapSupported__)(struct Device * /*this*/); // inline virtual inherited (res) base (gpures) body + NV_STATUS (*__deviceMapTo__)(struct Device * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (gpures) + NV_STATUS (*__deviceUnmapFrom__)(struct Device * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (gpures) + NvU32 (*__deviceGetRefCount__)(struct Device * /*this*/); // virtual inherited (res) base (gpures) + void (*__deviceAddAdditionalDependants__)(struct RsClient *, struct Device * /*this*/, RsResourceRef *); // virtual inherited (res) base (gpures) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__Device { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__GpuResource metadata__GpuResource; + const struct NVOC_VTABLE__Device vtable; +}; + +#ifndef __NVOC_CLASS_Device_TYPEDEF__ +#define __NVOC_CLASS_Device_TYPEDEF__ +typedef struct Device Device; +#endif /* __NVOC_CLASS_Device_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Device +#define __nvoc_class_id_Device 0xe0ac20 +#endif /* __nvoc_class_id_Device */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Device; + +#define __staticCast_Device(pThis) \ + ((pThis)->__nvoc_pbase_Device) + +#ifdef __nvoc_device_h_disabled +#define __dynamicCast_Device(pThis) ((Device*) NULL) +#else //__nvoc_device_h_disabled +#define __dynamicCast_Device(pThis) \ + ((Device*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Device))) +#endif //__nvoc_device_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_Device(Device**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Device(Device**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_Device(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_Device((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define deviceControl_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__deviceControl__ +#define deviceControl(pResource, pCallContext, pParams) deviceControl_DISPATCH(pResource, pCallContext, pParams) +#define deviceInternalControlForward_FNPTR(pDevice) pDevice->__nvoc_metadata_ptr->vtable.__deviceInternalControlForward__ +#define deviceInternalControlForward(pDevice, command, pParams, size) deviceInternalControlForward_DISPATCH(pDevice, command, pParams, size) +#define deviceCtrlCmdGpuGetClasslist_FNPTR(pDevice) pDevice->__deviceCtrlCmdGpuGetClasslist__ +#define deviceCtrlCmdGpuGetClasslist(pDevice, pClassListParams) deviceCtrlCmdGpuGetClasslist_DISPATCH(pDevice, pClassListParams) +#define deviceCtrlCmdGpuGetClasslistV2_FNPTR(pDevice) pDevice->__deviceCtrlCmdGpuGetClasslistV2__ +#define deviceCtrlCmdGpuGetClasslistV2(pDevice, pParams) deviceCtrlCmdGpuGetClasslistV2_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdGpuGetNumSubdevices_FNPTR(pDevice) pDevice->__deviceCtrlCmdGpuGetNumSubdevices__ +#define deviceCtrlCmdGpuGetNumSubdevices(pDevice, pSubDeviceCountParams) deviceCtrlCmdGpuGetNumSubdevices_DISPATCH(pDevice, pSubDeviceCountParams) +#define deviceCtrlCmdGpuModifyGpuSwStatePersistence_FNPTR(pDevice) pDevice->__deviceCtrlCmdGpuModifyGpuSwStatePersistence__ +#define deviceCtrlCmdGpuModifyGpuSwStatePersistence(pDevice, pParams) deviceCtrlCmdGpuModifyGpuSwStatePersistence_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdGpuQueryGpuSwStatePersistence_FNPTR(pDevice) pDevice->__deviceCtrlCmdGpuQueryGpuSwStatePersistence__ +#define deviceCtrlCmdGpuQueryGpuSwStatePersistence(pDevice, pParams) deviceCtrlCmdGpuQueryGpuSwStatePersistence_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdGpuGetVirtualizationMode_FNPTR(pDevice) pDevice->__deviceCtrlCmdGpuGetVirtualizationMode__ +#define deviceCtrlCmdGpuGetVirtualizationMode(pDevice, pParams) deviceCtrlCmdGpuGetVirtualizationMode_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdGpuSetVgpuVfBar1Size_FNPTR(pDevice) pDevice->__deviceCtrlCmdGpuSetVgpuVfBar1Size__ +#define deviceCtrlCmdGpuSetVgpuVfBar1Size(pDevice, pParams) deviceCtrlCmdGpuSetVgpuVfBar1Size_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdGpuGetBrandCaps_FNPTR(pDevice) pDevice->__deviceCtrlCmdGpuGetBrandCaps__ +#define deviceCtrlCmdGpuGetBrandCaps(pDevice, pParams) deviceCtrlCmdGpuGetBrandCaps_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdGpuGetBrandCaps_HAL(pDevice, pParams) deviceCtrlCmdGpuGetBrandCaps_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdGpuVirtualizationSwitchToVga_FNPTR(pDevice) pDevice->__deviceCtrlCmdGpuVirtualizationSwitchToVga__ +#define deviceCtrlCmdGpuVirtualizationSwitchToVga(pDevice) deviceCtrlCmdGpuVirtualizationSwitchToVga_DISPATCH(pDevice) +#define deviceCtrlCmdGpuSetVgpuHeterogeneousMode_FNPTR(pDevice) pDevice->__deviceCtrlCmdGpuSetVgpuHeterogeneousMode__ +#define deviceCtrlCmdGpuSetVgpuHeterogeneousMode(pDevice, pParams) deviceCtrlCmdGpuSetVgpuHeterogeneousMode_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdGpuGetVgpuHeterogeneousMode_FNPTR(pDevice) pDevice->__deviceCtrlCmdGpuGetVgpuHeterogeneousMode__ +#define deviceCtrlCmdGpuGetVgpuHeterogeneousMode(pDevice, pParams) deviceCtrlCmdGpuGetVgpuHeterogeneousMode_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdGpuGetFindSubDeviceHandle_FNPTR(pDevice) pDevice->__deviceCtrlCmdGpuGetFindSubDeviceHandle__ +#define deviceCtrlCmdGpuGetFindSubDeviceHandle(pDevice, pParams) deviceCtrlCmdGpuGetFindSubDeviceHandle_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdOsUnixVTSwitch_FNPTR(pDevice) pDevice->__deviceCtrlCmdOsUnixVTSwitch__ +#define deviceCtrlCmdOsUnixVTSwitch(pDevice, pParams) deviceCtrlCmdOsUnixVTSwitch_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdOsUnixVTGetFBInfo_FNPTR(pDevice) pDevice->__deviceCtrlCmdOsUnixVTGetFBInfo__ +#define deviceCtrlCmdOsUnixVTGetFBInfo(pDevice, pParams) deviceCtrlCmdOsUnixVTGetFBInfo_DISPATCH(pDevice, pParams) +#define deviceMap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresMap__ +#define deviceMap(pGpuResource, pCallContext, pParams, pCpuMapping) deviceMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define deviceUnmap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresUnmap__ +#define deviceUnmap(pGpuResource, pCallContext, pCpuMapping) deviceUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define deviceShareCallback_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresShareCallback__ +#define deviceShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) deviceShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define deviceGetRegBaseOffsetAndSize_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetRegBaseOffsetAndSize__ +#define deviceGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) deviceGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define deviceGetMapAddrSpace_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetMapAddrSpace__ +#define deviceGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) deviceGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define deviceGetInternalObjectHandle_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetInternalObjectHandle__ +#define deviceGetInternalObjectHandle(pGpuResource) deviceGetInternalObjectHandle_DISPATCH(pGpuResource) +#define deviceAccessCallback_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define deviceAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) deviceAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define deviceGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define deviceGetMemInterMapParams(pRmResource, pParams) deviceGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define deviceCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define deviceCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) deviceCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define deviceGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define deviceGetMemoryMappingDescriptor(pRmResource, ppMemDesc) deviceGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define deviceControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define deviceControlSerialization_Prologue(pResource, pCallContext, pParams) deviceControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define deviceControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define deviceControlSerialization_Epilogue(pResource, pCallContext, pParams) deviceControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define deviceControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define deviceControl_Prologue(pResource, pCallContext, pParams) deviceControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define deviceControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define deviceControl_Epilogue(pResource, pCallContext, pParams) deviceControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define deviceCanCopy_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define deviceCanCopy(pResource) deviceCanCopy_DISPATCH(pResource) +#define deviceIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define deviceIsDuplicate(pResource, hMemory, pDuplicate) deviceIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define devicePreDestruct_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define devicePreDestruct(pResource) devicePreDestruct_DISPATCH(pResource) +#define deviceControlFilter_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define deviceControlFilter(pResource, pCallContext, pParams) deviceControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define deviceIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define deviceIsPartialUnmapSupported(pResource) deviceIsPartialUnmapSupported_DISPATCH(pResource) +#define deviceMapTo_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define deviceMapTo(pResource, pParams) deviceMapTo_DISPATCH(pResource, pParams) +#define deviceUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define deviceUnmapFrom(pResource, pParams) deviceUnmapFrom_DISPATCH(pResource, pParams) +#define deviceGetRefCount_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define deviceGetRefCount(pResource) deviceGetRefCount_DISPATCH(pResource) +#define deviceAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define deviceAddAdditionalDependants(pClient, pResource, pReference) deviceAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) + +// Dispatch functions +static inline NV_STATUS deviceControl_DISPATCH(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__deviceControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS deviceInternalControlForward_DISPATCH(struct Device *pDevice, NvU32 command, void *pParams, NvU32 size) { + return pDevice->__nvoc_metadata_ptr->vtable.__deviceInternalControlForward__(pDevice, command, pParams, size); +} + +static inline NV_STATUS deviceCtrlCmdGpuGetClasslist_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *pClassListParams) { + return pDevice->__deviceCtrlCmdGpuGetClasslist__(pDevice, pClassListParams); +} + +static inline NV_STATUS deviceCtrlCmdGpuGetClasslistV2_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdGpuGetClasslistV2__(pDevice, pParams); +} + +static inline NV_STATUS deviceCtrlCmdGpuGetNumSubdevices_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS *pSubDeviceCountParams) { + return pDevice->__deviceCtrlCmdGpuGetNumSubdevices__(pDevice, pSubDeviceCountParams); +} + +static inline NV_STATUS deviceCtrlCmdGpuModifyGpuSwStatePersistence_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdGpuModifyGpuSwStatePersistence__(pDevice, pParams); +} + +static inline NV_STATUS deviceCtrlCmdGpuQueryGpuSwStatePersistence_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdGpuQueryGpuSwStatePersistence__(pDevice, pParams); +} + +static inline NV_STATUS deviceCtrlCmdGpuGetVirtualizationMode_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdGpuGetVirtualizationMode__(pDevice, pParams); +} + +static inline NV_STATUS deviceCtrlCmdGpuSetVgpuVfBar1Size_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdGpuSetVgpuVfBar1Size__(pDevice, pParams); +} + +static inline NV_STATUS deviceCtrlCmdGpuGetBrandCaps_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdGpuGetBrandCaps__(pDevice, pParams); +} + +static inline NV_STATUS deviceCtrlCmdGpuVirtualizationSwitchToVga_DISPATCH(struct Device *pDevice) { + return pDevice->__deviceCtrlCmdGpuVirtualizationSwitchToVga__(pDevice); +} + +static inline NV_STATUS deviceCtrlCmdGpuSetVgpuHeterogeneousMode_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_SET_VGPU_HETEROGENEOUS_MODE_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdGpuSetVgpuHeterogeneousMode__(pDevice, pParams); +} + +static inline NV_STATUS deviceCtrlCmdGpuGetVgpuHeterogeneousMode_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_GET_VGPU_HETEROGENEOUS_MODE_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdGpuGetVgpuHeterogeneousMode__(pDevice, pParams); +} + +static inline NV_STATUS deviceCtrlCmdGpuGetFindSubDeviceHandle_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM *pParams) { + return pDevice->__deviceCtrlCmdGpuGetFindSubDeviceHandle__(pDevice, pParams); +} + +static inline NV_STATUS deviceCtrlCmdOsUnixVTSwitch_DISPATCH(struct Device *pDevice, NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdOsUnixVTSwitch__(pDevice, pParams); +} + +static inline NV_STATUS deviceCtrlCmdOsUnixVTGetFBInfo_DISPATCH(struct Device *pDevice, NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdOsUnixVTGetFBInfo__(pDevice, pParams); +} + +static inline NV_STATUS deviceMap_DISPATCH(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__deviceMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS deviceUnmap_DISPATCH(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__deviceUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NvBool deviceShareCallback_DISPATCH(struct Device *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__deviceShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS deviceGetRegBaseOffsetAndSize_DISPATCH(struct Device *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__deviceGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS deviceGetMapAddrSpace_DISPATCH(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__deviceGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle deviceGetInternalObjectHandle_DISPATCH(struct Device *pGpuResource) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__deviceGetInternalObjectHandle__(pGpuResource); +} + +static inline NvBool deviceAccessCallback_DISPATCH(struct Device *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__deviceAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS deviceGetMemInterMapParams_DISPATCH(struct Device *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__deviceGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS deviceCheckMemInterUnmap_DISPATCH(struct Device *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__deviceCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS deviceGetMemoryMappingDescriptor_DISPATCH(struct Device *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__deviceGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS deviceControlSerialization_Prologue_DISPATCH(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__deviceControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void deviceControlSerialization_Epilogue_DISPATCH(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__deviceControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS deviceControl_Prologue_DISPATCH(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__deviceControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void deviceControl_Epilogue_DISPATCH(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__deviceControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool deviceCanCopy_DISPATCH(struct Device *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__deviceCanCopy__(pResource); +} + +static inline NV_STATUS deviceIsDuplicate_DISPATCH(struct Device *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__deviceIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void devicePreDestruct_DISPATCH(struct Device *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__devicePreDestruct__(pResource); +} + +static inline NV_STATUS deviceControlFilter_DISPATCH(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__deviceControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvBool deviceIsPartialUnmapSupported_DISPATCH(struct Device *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__deviceIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS deviceMapTo_DISPATCH(struct Device *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__deviceMapTo__(pResource, pParams); +} + +static inline NV_STATUS deviceUnmapFrom_DISPATCH(struct Device *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__deviceUnmapFrom__(pResource, pParams); +} + +static inline NvU32 deviceGetRefCount_DISPATCH(struct Device *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__deviceGetRefCount__(pResource); +} + +static inline void deviceAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct Device *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__deviceAddAdditionalDependants__(pClient, pResource, pReference); +} + +NV_STATUS deviceControl_IMPL(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +NV_STATUS deviceInternalControlForward_IMPL(struct Device *pDevice, NvU32 command, void *pParams, NvU32 size); + +NV_STATUS deviceCtrlCmdGpuGetClasslist_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *pClassListParams); + +NV_STATUS deviceCtrlCmdGpuGetClasslistV2_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS *pParams); + +NV_STATUS deviceCtrlCmdGpuGetNumSubdevices_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS *pSubDeviceCountParams); + +NV_STATUS deviceCtrlCmdGpuModifyGpuSwStatePersistence_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS *pParams); + +NV_STATUS deviceCtrlCmdGpuQueryGpuSwStatePersistence_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS *pParams); + +NV_STATUS deviceCtrlCmdGpuGetVirtualizationMode_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS *pParams); + +NV_STATUS deviceCtrlCmdGpuSetVgpuVfBar1Size_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdGpuGetBrandCaps_5baef9(struct Device *pDevice, NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS *pParams) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +NV_STATUS deviceCtrlCmdGpuVirtualizationSwitchToVga_IMPL(struct Device *pDevice); + +NV_STATUS deviceCtrlCmdGpuSetVgpuHeterogeneousMode_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_SET_VGPU_HETEROGENEOUS_MODE_PARAMS *pParams); + +NV_STATUS deviceCtrlCmdGpuGetVgpuHeterogeneousMode_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_GET_VGPU_HETEROGENEOUS_MODE_PARAMS *pParams); + +NV_STATUS deviceCtrlCmdGpuGetFindSubDeviceHandle_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM *pParams); + +NV_STATUS deviceCtrlCmdOsUnixVTSwitch_IMPL(struct Device *pDevice, NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS *pParams); + +NV_STATUS deviceCtrlCmdOsUnixVTGetFBInfo_IMPL(struct Device *pDevice, NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *pParams); + +static inline NV_STATUS deviceSetDefaultVASpace(struct Device *pDevice, NvHandle hVASpace) { + return NV_OK; +} + +NV_STATUS deviceConstruct_IMPL(struct Device *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_deviceConstruct(arg_pResource, arg_pCallContext, arg_pParams) deviceConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +void deviceDestruct_IMPL(struct Device *pResource); + +#define __nvoc_deviceDestruct(pResource) deviceDestruct_IMPL(pResource) +NV_STATUS deviceGetByHandle_IMPL(struct RsClient *pClient, NvHandle hDevice, struct Device **ppDevice); + +#define deviceGetByHandle(pClient, hDevice, ppDevice) deviceGetByHandle_IMPL(pClient, hDevice, ppDevice) +NV_STATUS deviceGetByInstance_IMPL(struct RsClient *pClient, NvU32 deviceInstance, struct Device **ppDevice); + +#define deviceGetByInstance(pClient, deviceInstance, ppDevice) deviceGetByInstance_IMPL(pClient, deviceInstance, ppDevice) +NV_STATUS deviceGetByGpu_IMPL(struct RsClient *pClient, struct OBJGPU *pGpu, NvBool bAnyInGroup, struct Device **ppDevice); + +#define deviceGetByGpu(pClient, pGpu, bAnyInGroup, ppDevice) deviceGetByGpu_IMPL(pClient, pGpu, bAnyInGroup, ppDevice) +NV_STATUS deviceGetDefaultVASpace_IMPL(struct Device *pDevice, struct OBJVASPACE **ppVAS); + +#ifdef __nvoc_device_h_disabled +static inline NV_STATUS deviceGetDefaultVASpace(struct Device *pDevice, struct OBJVASPACE **ppVAS) { + NV_ASSERT_FAILED_PRECOMP("Device was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_device_h_disabled +#define deviceGetDefaultVASpace(pDevice, ppVAS) deviceGetDefaultVASpace_IMPL(pDevice, ppVAS) +#endif //__nvoc_device_h_disabled + +NV_STATUS deviceSetClientShare_IMPL(struct Device *pDevice, NvHandle hClientShare, NvU64 vaSize, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 deviceAllocFlags); + +#ifdef __nvoc_device_h_disabled +static inline NV_STATUS deviceSetClientShare(struct Device *pDevice, NvHandle hClientShare, NvU64 vaSize, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 deviceAllocFlags) { + NV_ASSERT_FAILED_PRECOMP("Device was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_device_h_disabled +#define deviceSetClientShare(pDevice, hClientShare, vaSize, vaStartInternal, vaLimitInternal, deviceAllocFlags) deviceSetClientShare_IMPL(pDevice, hClientShare, vaSize, vaStartInternal, vaLimitInternal, deviceAllocFlags) +#endif //__nvoc_device_h_disabled + +void deviceRemoveFromClientShare_IMPL(struct Device *pDevice); + +#ifdef __nvoc_device_h_disabled +static inline void deviceRemoveFromClientShare(struct Device *pDevice) { + NV_ASSERT_FAILED_PRECOMP("Device was disabled!"); +} +#else //__nvoc_device_h_disabled +#define deviceRemoveFromClientShare(pDevice) deviceRemoveFromClientShare_IMPL(pDevice) +#endif //__nvoc_device_h_disabled + +#undef PRIVATE_FIELD + + +// **************************************************************************** +// Deprecated Definitions +// **************************************************************************** + +/** + * WARNING: This function is deprecated and use is *strongly* discouraged + * (especially for new code!) + * + * From the function name (CliSetGpuContext) it appears as a simple accessor but + * violates expectations by modifying the SLI BC threadstate (calls to + * GPU_RES_SET_THREAD_BC_STATE). This can be dangerous if not carefully managed + * by the caller. + * + * Instead of using this routine, please use deviceGetByHandle then call + * GPU_RES_GET_GPU, GPU_RES_GET_GPUGRP, GPU_RES_SET_THREAD_BC_STATE as needed. + * + * Note that GPU_RES_GET_GPU supports returning a pGpu for both pDevice, + * pSubdevice, the base pResource type, and any resource that inherits from + * GpuResource. That is, instead of using CliSetGpuContext or + * CliSetSubDeviceContext, please use following pattern to look up the pGpu: + * + * OBJGPU *pGpu = GPU_RES_GET_GPU(pResource or pResourceRef->pResource) + * + * To set the threadstate, please use: + * + * GPU_RES_SET_THREAD_BC_STATE(pResource or pResourceRef->pResource); + */ +NV_STATUS CliSetGpuContext(NvHandle, NvHandle, OBJGPU **, struct OBJGPUGRP **); + +/** + * WARNING: This function is deprecated! Please use gpuGetByRef() + */ +OBJGPU *CliGetGpuFromContext(RsResourceRef *pContextRef, NvBool *pbBroadcast); + +/** + * WARNING: This function is deprecated! Please use gpuGetByHandle() + */ +OBJGPU *CliGetGpuFromHandle(NvHandle hClient, NvHandle hResource, NvBool *pbBroadcast); + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_DEVICE_NVOC_H_ diff --git a/src/nvidia/generated/g_disp_capabilities_nvoc.c b/src/nvidia/generated/g_disp_capabilities_nvoc.c new file mode 100644 index 0000000..3f4b40b --- /dev/null +++ b/src/nvidia/generated/g_disp_capabilities_nvoc.c @@ -0,0 +1,518 @@ +#define NVOC_DISP_CAPABILITIES_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_disp_capabilities_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x99db3e = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispCapabilities; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +// Forward declarations for DispCapabilities +void __nvoc_init__GpuResource(GpuResource*); +void __nvoc_init__DispCapabilities(DispCapabilities*); +void __nvoc_init_funcTable_DispCapabilities(DispCapabilities*); +NV_STATUS __nvoc_ctor_DispCapabilities(DispCapabilities*, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_DispCapabilities(DispCapabilities*); +void __nvoc_dtor_DispCapabilities(DispCapabilities*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__DispCapabilities; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__DispCapabilities; + +// Down-thunk(s) to bridge DispCapabilities methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^2 +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_GpuResource_resControl(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_GpuResource_resMap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_down_thunk_GpuResource_resUnmap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_down_thunk_GpuResource_rmresShareCallback(struct RmResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_down_thunk_DispCapabilities_gpuresGetRegBaseOffsetAndSize(struct GpuResource *pDispCapabilities, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); // this + +// Up-thunk(s) to bridge DispCapabilities methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^2 +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^2 +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^2 +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super^2 +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super^2 +NvBool __nvoc_up_thunk_RmResource_gpuresAccessCallback(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControl_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_gpuresControl_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_gpuresCanCopy(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresIsDuplicate(struct GpuResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_gpuresPreDestruct(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresControlFilter(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresMapTo(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresUnmapFrom(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_gpuresGetRefCount(struct GpuResource *pResource); // super +void __nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference); // super +NV_STATUS __nvoc_up_thunk_GpuResource_dispcapControl(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_GpuResource_dispcapMap(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_GpuResource_dispcapUnmap(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_GpuResource_dispcapShareCallback(struct DispCapabilities *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_GpuResource_dispcapGetMapAddrSpace(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); // this +NV_STATUS __nvoc_up_thunk_GpuResource_dispcapInternalControlForward(struct DispCapabilities *pGpuResource, NvU32 command, void *pParams, NvU32 size); // this +NvHandle __nvoc_up_thunk_GpuResource_dispcapGetInternalObjectHandle(struct DispCapabilities *pGpuResource); // this +NvBool __nvoc_up_thunk_RmResource_dispcapAccessCallback(struct DispCapabilities *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispcapGetMemInterMapParams(struct DispCapabilities *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispcapCheckMemInterUnmap(struct DispCapabilities *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispcapGetMemoryMappingDescriptor(struct DispCapabilities *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispcapControlSerialization_Prologue(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_dispcapControlSerialization_Epilogue(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispcapControl_Prologue(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_dispcapControl_Epilogue(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_dispcapCanCopy(struct DispCapabilities *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispcapIsDuplicate(struct DispCapabilities *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_dispcapPreDestruct(struct DispCapabilities *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispcapControlFilter(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_dispcapIsPartialUnmapSupported(struct DispCapabilities *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispcapMapTo(struct DispCapabilities *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispcapUnmapFrom(struct DispCapabilities *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_dispcapGetRefCount(struct DispCapabilities *pResource); // this +void __nvoc_up_thunk_RsResource_dispcapAddAdditionalDependants(struct RsClient *pClient, struct DispCapabilities *pResource, RsResourceRef *pReference); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispCapabilities = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispCapabilities), + /*classId=*/ classId(DispCapabilities), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispCapabilities", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispCapabilities, + /*pCastInfo=*/ &__nvoc_castinfo__DispCapabilities, + /*pExportInfo=*/ &__nvoc_export_info__DispCapabilities +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__DispCapabilities __nvoc_metadata__DispCapabilities = { + .rtti.pClassDef = &__nvoc_class_def_DispCapabilities, // (dispcap) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispCapabilities, + .rtti.offset = 0, + .metadata__GpuResource.rtti.pClassDef = &__nvoc_class_def_GpuResource, // (gpures) super + .metadata__GpuResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.rtti.offset = NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource), + .metadata__GpuResource.metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super^2 + .metadata__GpuResource.metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.rtti.offset = NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource), + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^3 + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^4 + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^3 + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + + .vtable.__dispcapGetRegBaseOffsetAndSize__ = &dispcapGetRegBaseOffsetAndSize_IMPL, // virtual override (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetRegBaseOffsetAndSize__ = &__nvoc_down_thunk_DispCapabilities_gpuresGetRegBaseOffsetAndSize, // virtual + .vtable.__dispcapControl__ = &__nvoc_up_thunk_GpuResource_dispcapControl, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl__ = &gpuresControl_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_GpuResource_resControl, // virtual + .vtable.__dispcapMap__ = &__nvoc_up_thunk_GpuResource_dispcapMap, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresMap__ = &gpuresMap_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &__nvoc_down_thunk_GpuResource_resMap, // virtual + .vtable.__dispcapUnmap__ = &__nvoc_up_thunk_GpuResource_dispcapUnmap, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresUnmap__ = &gpuresUnmap_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &__nvoc_down_thunk_GpuResource_resUnmap, // virtual + .vtable.__dispcapShareCallback__ = &__nvoc_up_thunk_GpuResource_dispcapShareCallback, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresShareCallback__ = &gpuresShareCallback_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresShareCallback__ = &__nvoc_down_thunk_GpuResource_rmresShareCallback, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__dispcapGetMapAddrSpace__ = &__nvoc_up_thunk_GpuResource_dispcapGetMapAddrSpace, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMapAddrSpace__ = &gpuresGetMapAddrSpace_IMPL, // virtual + .vtable.__dispcapInternalControlForward__ = &__nvoc_up_thunk_GpuResource_dispcapInternalControlForward, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresInternalControlForward__ = &gpuresInternalControlForward_IMPL, // virtual + .vtable.__dispcapGetInternalObjectHandle__ = &__nvoc_up_thunk_GpuResource_dispcapGetInternalObjectHandle, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetInternalObjectHandle__ = &gpuresGetInternalObjectHandle_IMPL, // virtual + .vtable.__dispcapAccessCallback__ = &__nvoc_up_thunk_RmResource_dispcapAccessCallback, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresAccessCallback__ = &__nvoc_up_thunk_RmResource_gpuresAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__dispcapGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_dispcapGetMemInterMapParams, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__dispcapCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_dispcapCheckMemInterUnmap, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__dispcapGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_dispcapGetMemoryMappingDescriptor, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__dispcapControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_dispcapControlSerialization_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__dispcapControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_dispcapControlSerialization_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__dispcapControl_Prologue__ = &__nvoc_up_thunk_RmResource_dispcapControl_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__dispcapControl_Epilogue__ = &__nvoc_up_thunk_RmResource_dispcapControl_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__dispcapCanCopy__ = &__nvoc_up_thunk_RsResource_dispcapCanCopy, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresCanCopy__ = &__nvoc_up_thunk_RsResource_gpuresCanCopy, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__dispcapIsDuplicate__ = &__nvoc_up_thunk_RsResource_dispcapIsDuplicate, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresIsDuplicate__ = &__nvoc_up_thunk_RsResource_gpuresIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__dispcapPreDestruct__ = &__nvoc_up_thunk_RsResource_dispcapPreDestruct, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresPreDestruct__ = &__nvoc_up_thunk_RsResource_gpuresPreDestruct, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__dispcapControlFilter__ = &__nvoc_up_thunk_RsResource_dispcapControlFilter, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlFilter__ = &__nvoc_up_thunk_RsResource_gpuresControlFilter, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__dispcapIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_dispcapIsPartialUnmapSupported, // inline virtual inherited (res) base (gpures) body + .metadata__GpuResource.vtable.__gpuresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__GpuResource.metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__dispcapMapTo__ = &__nvoc_up_thunk_RsResource_dispcapMapTo, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresMapTo__ = &__nvoc_up_thunk_RsResource_gpuresMapTo, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__dispcapUnmapFrom__ = &__nvoc_up_thunk_RsResource_dispcapUnmapFrom, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresUnmapFrom__ = &__nvoc_up_thunk_RsResource_gpuresUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__dispcapGetRefCount__ = &__nvoc_up_thunk_RsResource_dispcapGetRefCount, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetRefCount__ = &__nvoc_up_thunk_RsResource_gpuresGetRefCount, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__dispcapAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_dispcapAddAdditionalDependants, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__DispCapabilities = { + .numRelatives = 6, + .relatives = { + &__nvoc_metadata__DispCapabilities.rtti, // [0]: (dispcap) this + &__nvoc_metadata__DispCapabilities.metadata__GpuResource.rtti, // [1]: (gpures) super + &__nvoc_metadata__DispCapabilities.metadata__GpuResource.metadata__RmResource.rtti, // [2]: (rmres) super^2 + &__nvoc_metadata__DispCapabilities.metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti, // [3]: (res) super^3 + &__nvoc_metadata__DispCapabilities.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [4]: (obj) super^4 + &__nvoc_metadata__DispCapabilities.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti, // [5]: (rmrescmn) super^3 + } +}; + +// 1 down-thunk(s) defined to bridge methods in DispCapabilities from superclasses + +// dispcapGetRegBaseOffsetAndSize: virtual override (gpures) base (gpures) +NV_STATUS __nvoc_down_thunk_DispCapabilities_gpuresGetRegBaseOffsetAndSize(struct GpuResource *pDispCapabilities, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return dispcapGetRegBaseOffsetAndSize((struct DispCapabilities *)(((unsigned char *) pDispCapabilities) - NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource)), pGpu, pOffset, pSize); +} + + +// 24 up-thunk(s) defined to bridge methods in DispCapabilities to superclasses + +// dispcapControl: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_dispcapControl(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource)), pCallContext, pParams); +} + +// dispcapMap: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_dispcapMap(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource)), pCallContext, pParams, pCpuMapping); +} + +// dispcapUnmap: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_dispcapUnmap(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource)), pCallContext, pCpuMapping); +} + +// dispcapShareCallback: virtual inherited (gpures) base (gpures) +NvBool __nvoc_up_thunk_GpuResource_dispcapShareCallback(struct DispCapabilities *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// dispcapGetMapAddrSpace: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_dispcapGetMapAddrSpace(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource)), pCallContext, mapFlags, pAddrSpace); +} + +// dispcapInternalControlForward: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_dispcapInternalControlForward(struct DispCapabilities *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource)), command, pParams, size); +} + +// dispcapGetInternalObjectHandle: virtual inherited (gpures) base (gpures) +NvHandle __nvoc_up_thunk_GpuResource_dispcapGetInternalObjectHandle(struct DispCapabilities *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource))); +} + +// dispcapAccessCallback: virtual inherited (rmres) base (gpures) +NvBool __nvoc_up_thunk_RmResource_dispcapAccessCallback(struct DispCapabilities *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// dispcapGetMemInterMapParams: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_dispcapGetMemInterMapParams(struct DispCapabilities *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pParams); +} + +// dispcapCheckMemInterUnmap: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_dispcapCheckMemInterUnmap(struct DispCapabilities *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// dispcapGetMemoryMappingDescriptor: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_dispcapGetMemoryMappingDescriptor(struct DispCapabilities *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource)), ppMemDesc); +} + +// dispcapControlSerialization_Prologue: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_dispcapControlSerialization_Prologue(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispcapControlSerialization_Epilogue: virtual inherited (rmres) base (gpures) +void __nvoc_up_thunk_RmResource_dispcapControlSerialization_Epilogue(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispcapControl_Prologue: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_dispcapControl_Prologue(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispcapControl_Epilogue: virtual inherited (rmres) base (gpures) +void __nvoc_up_thunk_RmResource_dispcapControl_Epilogue(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispcapCanCopy: virtual inherited (res) base (gpures) +NvBool __nvoc_up_thunk_RsResource_dispcapCanCopy(struct DispCapabilities *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispcapIsDuplicate: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_dispcapIsDuplicate(struct DispCapabilities *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// dispcapPreDestruct: virtual inherited (res) base (gpures) +void __nvoc_up_thunk_RsResource_dispcapPreDestruct(struct DispCapabilities *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispcapControlFilter: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_dispcapControlFilter(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// dispcapIsPartialUnmapSupported: inline virtual inherited (res) base (gpures) body +NvBool __nvoc_up_thunk_RsResource_dispcapIsPartialUnmapSupported(struct DispCapabilities *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispcapMapTo: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_dispcapMapTo(struct DispCapabilities *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// dispcapUnmapFrom: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_dispcapUnmapFrom(struct DispCapabilities *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// dispcapGetRefCount: virtual inherited (res) base (gpures) +NvU32 __nvoc_up_thunk_RsResource_dispcapGetRefCount(struct DispCapabilities *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispcapAddAdditionalDependants: virtual inherited (res) base (gpures) +void __nvoc_up_thunk_RsResource_dispcapAddAdditionalDependants(struct RsClient *pClient, struct DispCapabilities *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__DispCapabilities = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_DispCapabilities(DispCapabilities *pThis) { + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispCapabilities(DispCapabilities *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_DispCapabilities(DispCapabilities *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispCapabilities_fail_GpuResource; + __nvoc_init_dataField_DispCapabilities(pThis); + + status = __nvoc_dispcapConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispCapabilities_fail__init; + goto __nvoc_ctor_DispCapabilities_exit; // Success + +__nvoc_ctor_DispCapabilities_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_DispCapabilities_fail_GpuResource: +__nvoc_ctor_DispCapabilities_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_DispCapabilities_1(DispCapabilities *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_DispCapabilities_1 + + +// Initialize vtable(s) for 25 virtual method(s). +void __nvoc_init_funcTable_DispCapabilities(DispCapabilities *pThis) { + __nvoc_init_funcTable_DispCapabilities_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__DispCapabilities(DispCapabilities *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^4 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^3 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; // (rmres) super^2 + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; // (gpures) super + pThis->__nvoc_pbase_DispCapabilities = pThis; // (dispcap) this + + // Recurse to superclass initialization function(s). + __nvoc_init__GpuResource(&pThis->__nvoc_base_GpuResource); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__DispCapabilities.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^4 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__DispCapabilities.metadata__GpuResource.metadata__RmResource.metadata__RsResource; // (res) super^3 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__DispCapabilities.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__DispCapabilities.metadata__GpuResource.metadata__RmResource; // (rmres) super^2 + pThis->__nvoc_base_GpuResource.__nvoc_metadata_ptr = &__nvoc_metadata__DispCapabilities.metadata__GpuResource; // (gpures) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__DispCapabilities; // (dispcap) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_DispCapabilities(pThis); +} + +NV_STATUS __nvoc_objCreate_DispCapabilities(DispCapabilities **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + DispCapabilities *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(DispCapabilities), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(DispCapabilities)); + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__DispCapabilities(pThis); + status = __nvoc_ctor_DispCapabilities(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DispCapabilities_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_DispCapabilities_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(DispCapabilities)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispCapabilities(DispCapabilities **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DispCapabilities(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_disp_capabilities_nvoc.h b/src/nvidia/generated/g_disp_capabilities_nvoc.h new file mode 100644 index 0000000..67fcbce --- /dev/null +++ b/src/nvidia/generated/g_disp_capabilities_nvoc.h @@ -0,0 +1,333 @@ + +#ifndef _G_DISP_CAPABILITIES_NVOC_H_ +#define _G_DISP_CAPABILITIES_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing DispCapabilities class. +* +******************************************************************************/ + +#pragma once +#include "g_disp_capabilities_nvoc.h" + +#ifndef DISP_CAPABILITIES_H +#define DISP_CAPABILITIES_H + +#include "gpu/gpu_resource.h" + +/*! + * RM internal class representing NVXXXX_DISP_CAPABILITIES + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_DISP_CAPABILITIES_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__DispCapabilities; +struct NVOC_METADATA__GpuResource; +struct NVOC_VTABLE__DispCapabilities; + + +struct DispCapabilities { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__DispCapabilities *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct GpuResource __nvoc_base_GpuResource; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^4 + struct RsResource *__nvoc_pbase_RsResource; // res super^3 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^3 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^2 + struct GpuResource *__nvoc_pbase_GpuResource; // gpures super + struct DispCapabilities *__nvoc_pbase_DispCapabilities; // dispcap + + // Data members + NvU32 ControlOffset; + NvU32 ControlLength; +}; + + +// Vtable with 25 per-class function pointers +struct NVOC_VTABLE__DispCapabilities { + NV_STATUS (*__dispcapGetRegBaseOffsetAndSize__)(struct DispCapabilities * /*this*/, struct OBJGPU *, NvU32 *, NvU32 *); // virtual override (gpures) base (gpures) + NV_STATUS (*__dispcapControl__)(struct DispCapabilities * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__dispcapMap__)(struct DispCapabilities * /*this*/, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__dispcapUnmap__)(struct DispCapabilities * /*this*/, struct CALL_CONTEXT *, struct RsCpuMapping *); // virtual inherited (gpures) base (gpures) + NvBool (*__dispcapShareCallback__)(struct DispCapabilities * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__dispcapGetMapAddrSpace__)(struct DispCapabilities * /*this*/, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__dispcapInternalControlForward__)(struct DispCapabilities * /*this*/, NvU32, void *, NvU32); // virtual inherited (gpures) base (gpures) + NvHandle (*__dispcapGetInternalObjectHandle__)(struct DispCapabilities * /*this*/); // virtual inherited (gpures) base (gpures) + NvBool (*__dispcapAccessCallback__)(struct DispCapabilities * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__dispcapGetMemInterMapParams__)(struct DispCapabilities * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__dispcapCheckMemInterUnmap__)(struct DispCapabilities * /*this*/, NvBool); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__dispcapGetMemoryMappingDescriptor__)(struct DispCapabilities * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__dispcapControlSerialization_Prologue__)(struct DispCapabilities * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + void (*__dispcapControlSerialization_Epilogue__)(struct DispCapabilities * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__dispcapControl_Prologue__)(struct DispCapabilities * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + void (*__dispcapControl_Epilogue__)(struct DispCapabilities * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + NvBool (*__dispcapCanCopy__)(struct DispCapabilities * /*this*/); // virtual inherited (res) base (gpures) + NV_STATUS (*__dispcapIsDuplicate__)(struct DispCapabilities * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (gpures) + void (*__dispcapPreDestruct__)(struct DispCapabilities * /*this*/); // virtual inherited (res) base (gpures) + NV_STATUS (*__dispcapControlFilter__)(struct DispCapabilities * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (gpures) + NvBool (*__dispcapIsPartialUnmapSupported__)(struct DispCapabilities * /*this*/); // inline virtual inherited (res) base (gpures) body + NV_STATUS (*__dispcapMapTo__)(struct DispCapabilities * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (gpures) + NV_STATUS (*__dispcapUnmapFrom__)(struct DispCapabilities * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (gpures) + NvU32 (*__dispcapGetRefCount__)(struct DispCapabilities * /*this*/); // virtual inherited (res) base (gpures) + void (*__dispcapAddAdditionalDependants__)(struct RsClient *, struct DispCapabilities * /*this*/, RsResourceRef *); // virtual inherited (res) base (gpures) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__DispCapabilities { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__GpuResource metadata__GpuResource; + const struct NVOC_VTABLE__DispCapabilities vtable; +}; + +#ifndef __NVOC_CLASS_DispCapabilities_TYPEDEF__ +#define __NVOC_CLASS_DispCapabilities_TYPEDEF__ +typedef struct DispCapabilities DispCapabilities; +#endif /* __NVOC_CLASS_DispCapabilities_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispCapabilities +#define __nvoc_class_id_DispCapabilities 0x99db3e +#endif /* __nvoc_class_id_DispCapabilities */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispCapabilities; + +#define __staticCast_DispCapabilities(pThis) \ + ((pThis)->__nvoc_pbase_DispCapabilities) + +#ifdef __nvoc_disp_capabilities_h_disabled +#define __dynamicCast_DispCapabilities(pThis) ((DispCapabilities*) NULL) +#else //__nvoc_disp_capabilities_h_disabled +#define __dynamicCast_DispCapabilities(pThis) \ + ((DispCapabilities*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispCapabilities))) +#endif //__nvoc_disp_capabilities_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_DispCapabilities(DispCapabilities**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispCapabilities(DispCapabilities**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_DispCapabilities(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DispCapabilities((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define dispcapGetRegBaseOffsetAndSize_FNPTR(pDispCapabilities) pDispCapabilities->__nvoc_metadata_ptr->vtable.__dispcapGetRegBaseOffsetAndSize__ +#define dispcapGetRegBaseOffsetAndSize(pDispCapabilities, pGpu, pOffset, pSize) dispcapGetRegBaseOffsetAndSize_DISPATCH(pDispCapabilities, pGpu, pOffset, pSize) +#define dispcapControl_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresControl__ +#define dispcapControl(pGpuResource, pCallContext, pParams) dispcapControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define dispcapMap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresMap__ +#define dispcapMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispcapMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define dispcapUnmap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresUnmap__ +#define dispcapUnmap(pGpuResource, pCallContext, pCpuMapping) dispcapUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define dispcapShareCallback_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresShareCallback__ +#define dispcapShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispcapShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispcapGetMapAddrSpace_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetMapAddrSpace__ +#define dispcapGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispcapGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define dispcapInternalControlForward_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresInternalControlForward__ +#define dispcapInternalControlForward(pGpuResource, command, pParams, size) dispcapInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define dispcapGetInternalObjectHandle_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetInternalObjectHandle__ +#define dispcapGetInternalObjectHandle(pGpuResource) dispcapGetInternalObjectHandle_DISPATCH(pGpuResource) +#define dispcapAccessCallback_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define dispcapAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispcapAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define dispcapGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define dispcapGetMemInterMapParams(pRmResource, pParams) dispcapGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispcapCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define dispcapCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispcapCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispcapGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define dispcapGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispcapGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispcapControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define dispcapControlSerialization_Prologue(pResource, pCallContext, pParams) dispcapControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispcapControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define dispcapControlSerialization_Epilogue(pResource, pCallContext, pParams) dispcapControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispcapControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define dispcapControl_Prologue(pResource, pCallContext, pParams) dispcapControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispcapControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define dispcapControl_Epilogue(pResource, pCallContext, pParams) dispcapControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispcapCanCopy_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define dispcapCanCopy(pResource) dispcapCanCopy_DISPATCH(pResource) +#define dispcapIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define dispcapIsDuplicate(pResource, hMemory, pDuplicate) dispcapIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define dispcapPreDestruct_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define dispcapPreDestruct(pResource) dispcapPreDestruct_DISPATCH(pResource) +#define dispcapControlFilter_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define dispcapControlFilter(pResource, pCallContext, pParams) dispcapControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispcapIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define dispcapIsPartialUnmapSupported(pResource) dispcapIsPartialUnmapSupported_DISPATCH(pResource) +#define dispcapMapTo_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define dispcapMapTo(pResource, pParams) dispcapMapTo_DISPATCH(pResource, pParams) +#define dispcapUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define dispcapUnmapFrom(pResource, pParams) dispcapUnmapFrom_DISPATCH(pResource, pParams) +#define dispcapGetRefCount_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define dispcapGetRefCount(pResource) dispcapGetRefCount_DISPATCH(pResource) +#define dispcapAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define dispcapAddAdditionalDependants(pClient, pResource, pReference) dispcapAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) + +// Dispatch functions +static inline NV_STATUS dispcapGetRegBaseOffsetAndSize_DISPATCH(struct DispCapabilities *pDispCapabilities, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pDispCapabilities->__nvoc_metadata_ptr->vtable.__dispcapGetRegBaseOffsetAndSize__(pDispCapabilities, pGpu, pOffset, pSize); +} + +static inline NV_STATUS dispcapControl_DISPATCH(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispcapControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS dispcapMap_DISPATCH(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispcapMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS dispcapUnmap_DISPATCH(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispcapUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NvBool dispcapShareCallback_DISPATCH(struct DispCapabilities *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispcapShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispcapGetMapAddrSpace_DISPATCH(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispcapGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NV_STATUS dispcapInternalControlForward_DISPATCH(struct DispCapabilities *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispcapInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NvHandle dispcapGetInternalObjectHandle_DISPATCH(struct DispCapabilities *pGpuResource) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispcapGetInternalObjectHandle__(pGpuResource); +} + +static inline NvBool dispcapAccessCallback_DISPATCH(struct DispCapabilities *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__dispcapAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS dispcapGetMemInterMapParams_DISPATCH(struct DispCapabilities *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispcapGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispcapCheckMemInterUnmap_DISPATCH(struct DispCapabilities *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispcapCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS dispcapGetMemoryMappingDescriptor_DISPATCH(struct DispCapabilities *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispcapGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispcapControlSerialization_Prologue_DISPATCH(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispcapControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void dispcapControlSerialization_Epilogue_DISPATCH(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__dispcapControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispcapControl_Prologue_DISPATCH(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispcapControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void dispcapControl_Epilogue_DISPATCH(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__dispcapControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool dispcapCanCopy_DISPATCH(struct DispCapabilities *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispcapCanCopy__(pResource); +} + +static inline NV_STATUS dispcapIsDuplicate_DISPATCH(struct DispCapabilities *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__dispcapIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void dispcapPreDestruct_DISPATCH(struct DispCapabilities *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__dispcapPreDestruct__(pResource); +} + +static inline NV_STATUS dispcapControlFilter_DISPATCH(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispcapControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvBool dispcapIsPartialUnmapSupported_DISPATCH(struct DispCapabilities *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispcapIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS dispcapMapTo_DISPATCH(struct DispCapabilities *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispcapMapTo__(pResource, pParams); +} + +static inline NV_STATUS dispcapUnmapFrom_DISPATCH(struct DispCapabilities *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispcapUnmapFrom__(pResource, pParams); +} + +static inline NvU32 dispcapGetRefCount_DISPATCH(struct DispCapabilities *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispcapGetRefCount__(pResource); +} + +static inline void dispcapAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispCapabilities *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__dispcapAddAdditionalDependants__(pClient, pResource, pReference); +} + +NV_STATUS dispcapGetRegBaseOffsetAndSize_IMPL(struct DispCapabilities *pDispCapabilities, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); + +NV_STATUS dispcapConstruct_IMPL(struct DispCapabilities *arg_pDispCapabilities, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_dispcapConstruct(arg_pDispCapabilities, arg_pCallContext, arg_pParams) dispcapConstruct_IMPL(arg_pDispCapabilities, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif // DISP_CAPABILITIES_H + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_DISP_CAPABILITIES_NVOC_H_ diff --git a/src/nvidia/generated/g_disp_channel_nvoc.c b/src/nvidia/generated/g_disp_channel_nvoc.c new file mode 100644 index 0000000..08f958a --- /dev/null +++ b/src/nvidia/generated/g_disp_channel_nvoc.c @@ -0,0 +1,1853 @@ +#define NVOC_DISP_CHANNEL_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_disp_channel_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xbd2ff3 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannel; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +// Forward declarations for DispChannel +void __nvoc_init__GpuResource(GpuResource*); +void __nvoc_init__Notifier(Notifier*); +void __nvoc_init__DispChannel(DispChannel*); +void __nvoc_init_funcTable_DispChannel(DispChannel*); +NV_STATUS __nvoc_ctor_DispChannel(DispChannel*, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams, NvU32 arg_isDma); +void __nvoc_init_dataField_DispChannel(DispChannel*); +void __nvoc_dtor_DispChannel(DispChannel*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__DispChannel; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__DispChannel; + +// Down-thunk(s) to bridge DispChannel methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^2 +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_GpuResource_resControl(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_GpuResource_resMap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_down_thunk_GpuResource_resUnmap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_down_thunk_GpuResource_rmresShareCallback(struct RmResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +PEVENTNOTIFICATION * __nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr(struct INotifier *pNotifier); // super +struct NotifShare * __nvoc_down_thunk_Notifier_inotifyGetNotificationShare(struct INotifier *pNotifier); // super +void __nvoc_down_thunk_Notifier_inotifySetNotificationShare(struct INotifier *pNotifier, struct NotifShare *pNotifShare); // super +NV_STATUS __nvoc_down_thunk_Notifier_inotifyUnregisterEvent(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // super +NV_STATUS __nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // super +NV_STATUS __nvoc_down_thunk_DispChannel_gpuresGetRegBaseOffsetAndSize(struct GpuResource *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); // this + +// Up-thunk(s) to bridge DispChannel methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^2 +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^2 +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^2 +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super^2 +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super^2 +NvBool __nvoc_up_thunk_RmResource_gpuresAccessCallback(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControl_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_gpuresControl_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_gpuresCanCopy(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresIsDuplicate(struct GpuResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_gpuresPreDestruct(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresControlFilter(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresMapTo(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresUnmapFrom(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_gpuresGetRefCount(struct GpuResource *pResource); // super +void __nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference); // super +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnControl(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnMap(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnUnmap(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_GpuResource_dispchnShareCallback(struct DispChannel *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnGetMapAddrSpace(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); // this +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnInternalControlForward(struct DispChannel *pGpuResource, NvU32 command, void *pParams, NvU32 size); // this +NvHandle __nvoc_up_thunk_GpuResource_dispchnGetInternalObjectHandle(struct DispChannel *pGpuResource); // this +NvBool __nvoc_up_thunk_RmResource_dispchnAccessCallback(struct DispChannel *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispchnGetMemInterMapParams(struct DispChannel *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispchnCheckMemInterUnmap(struct DispChannel *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispchnGetMemoryMappingDescriptor(struct DispChannel *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispchnControlSerialization_Prologue(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_dispchnControlSerialization_Epilogue(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispchnControl_Prologue(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_dispchnControl_Epilogue(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_dispchnCanCopy(struct DispChannel *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispchnIsDuplicate(struct DispChannel *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_dispchnPreDestruct(struct DispChannel *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispchnControlFilter(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_dispchnIsPartialUnmapSupported(struct DispChannel *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispchnMapTo(struct DispChannel *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispchnUnmapFrom(struct DispChannel *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_dispchnGetRefCount(struct DispChannel *pResource); // this +void __nvoc_up_thunk_RsResource_dispchnAddAdditionalDependants(struct RsClient *pClient, struct DispChannel *pResource, RsResourceRef *pReference); // this +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_dispchnGetNotificationListPtr(struct DispChannel *pNotifier); // this +struct NotifShare * __nvoc_up_thunk_Notifier_dispchnGetNotificationShare(struct DispChannel *pNotifier); // this +void __nvoc_up_thunk_Notifier_dispchnSetNotificationShare(struct DispChannel *pNotifier, struct NotifShare *pNotifShare); // this +NV_STATUS __nvoc_up_thunk_Notifier_dispchnUnregisterEvent(struct DispChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // this +NV_STATUS __nvoc_up_thunk_Notifier_dispchnGetOrAllocNotifShare(struct DispChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannel = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispChannel), + /*classId=*/ classId(DispChannel), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispChannel", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispChannel, + /*pCastInfo=*/ &__nvoc_castinfo__DispChannel, + /*pExportInfo=*/ &__nvoc_export_info__DispChannel +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__DispChannel __nvoc_metadata__DispChannel = { + .rtti.pClassDef = &__nvoc_class_def_DispChannel, // (dispchn) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispChannel, + .rtti.offset = 0, + .metadata__GpuResource.rtti.pClassDef = &__nvoc_class_def_GpuResource, // (gpures) super + .metadata__GpuResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.rtti.offset = NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource), + .metadata__GpuResource.metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super^2 + .metadata__GpuResource.metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.rtti.offset = NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource), + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^3 + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^4 + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^3 + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + .metadata__Notifier.rtti.pClassDef = &__nvoc_class_def_Notifier, // (notify) super + .metadata__Notifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Notifier.rtti.offset = NV_OFFSETOF(DispChannel, __nvoc_base_Notifier), + .metadata__Notifier.metadata__INotifier.rtti.pClassDef = &__nvoc_class_def_INotifier, // (inotify) super^2 + .metadata__Notifier.metadata__INotifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Notifier.metadata__INotifier.rtti.offset = NV_OFFSETOF(DispChannel, __nvoc_base_Notifier.__nvoc_base_INotifier), + + .vtable.__dispchnGetRegBaseOffsetAndSize__ = &dispchnGetRegBaseOffsetAndSize_IMPL, // virtual override (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetRegBaseOffsetAndSize__ = &__nvoc_down_thunk_DispChannel_gpuresGetRegBaseOffsetAndSize, // virtual + .vtable.__dispchnControl__ = &__nvoc_up_thunk_GpuResource_dispchnControl, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl__ = &gpuresControl_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_GpuResource_resControl, // virtual + .vtable.__dispchnMap__ = &__nvoc_up_thunk_GpuResource_dispchnMap, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresMap__ = &gpuresMap_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &__nvoc_down_thunk_GpuResource_resMap, // virtual + .vtable.__dispchnUnmap__ = &__nvoc_up_thunk_GpuResource_dispchnUnmap, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresUnmap__ = &gpuresUnmap_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &__nvoc_down_thunk_GpuResource_resUnmap, // virtual + .vtable.__dispchnShareCallback__ = &__nvoc_up_thunk_GpuResource_dispchnShareCallback, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresShareCallback__ = &gpuresShareCallback_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresShareCallback__ = &__nvoc_down_thunk_GpuResource_rmresShareCallback, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__dispchnGetMapAddrSpace__ = &__nvoc_up_thunk_GpuResource_dispchnGetMapAddrSpace, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMapAddrSpace__ = &gpuresGetMapAddrSpace_IMPL, // virtual + .vtable.__dispchnInternalControlForward__ = &__nvoc_up_thunk_GpuResource_dispchnInternalControlForward, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresInternalControlForward__ = &gpuresInternalControlForward_IMPL, // virtual + .vtable.__dispchnGetInternalObjectHandle__ = &__nvoc_up_thunk_GpuResource_dispchnGetInternalObjectHandle, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetInternalObjectHandle__ = &gpuresGetInternalObjectHandle_IMPL, // virtual + .vtable.__dispchnAccessCallback__ = &__nvoc_up_thunk_RmResource_dispchnAccessCallback, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresAccessCallback__ = &__nvoc_up_thunk_RmResource_gpuresAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__dispchnGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_dispchnGetMemInterMapParams, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__dispchnCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_dispchnCheckMemInterUnmap, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__dispchnGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_dispchnGetMemoryMappingDescriptor, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__dispchnControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_dispchnControlSerialization_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__dispchnControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_dispchnControlSerialization_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__dispchnControl_Prologue__ = &__nvoc_up_thunk_RmResource_dispchnControl_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__dispchnControl_Epilogue__ = &__nvoc_up_thunk_RmResource_dispchnControl_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__dispchnCanCopy__ = &__nvoc_up_thunk_RsResource_dispchnCanCopy, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresCanCopy__ = &__nvoc_up_thunk_RsResource_gpuresCanCopy, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__dispchnIsDuplicate__ = &__nvoc_up_thunk_RsResource_dispchnIsDuplicate, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresIsDuplicate__ = &__nvoc_up_thunk_RsResource_gpuresIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__dispchnPreDestruct__ = &__nvoc_up_thunk_RsResource_dispchnPreDestruct, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresPreDestruct__ = &__nvoc_up_thunk_RsResource_gpuresPreDestruct, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__dispchnControlFilter__ = &__nvoc_up_thunk_RsResource_dispchnControlFilter, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlFilter__ = &__nvoc_up_thunk_RsResource_gpuresControlFilter, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__dispchnIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_dispchnIsPartialUnmapSupported, // inline virtual inherited (res) base (gpures) body + .metadata__GpuResource.vtable.__gpuresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__GpuResource.metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__dispchnMapTo__ = &__nvoc_up_thunk_RsResource_dispchnMapTo, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresMapTo__ = &__nvoc_up_thunk_RsResource_gpuresMapTo, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__dispchnUnmapFrom__ = &__nvoc_up_thunk_RsResource_dispchnUnmapFrom, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresUnmapFrom__ = &__nvoc_up_thunk_RsResource_gpuresUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__dispchnGetRefCount__ = &__nvoc_up_thunk_RsResource_dispchnGetRefCount, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetRefCount__ = &__nvoc_up_thunk_RsResource_gpuresGetRefCount, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__dispchnAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_dispchnAddAdditionalDependants, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual + .vtable.__dispchnGetNotificationListPtr__ = &__nvoc_up_thunk_Notifier_dispchnGetNotificationListPtr, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyGetNotificationListPtr__ = ¬ifyGetNotificationListPtr_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationListPtr__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr, // pure virtual + .vtable.__dispchnGetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispchnGetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyGetNotificationShare__ = ¬ifyGetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationShare, // pure virtual + .vtable.__dispchnSetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispchnSetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifySetNotificationShare__ = ¬ifySetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifySetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifySetNotificationShare, // pure virtual + .vtable.__dispchnUnregisterEvent__ = &__nvoc_up_thunk_Notifier_dispchnUnregisterEvent, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyUnregisterEvent__ = ¬ifyUnregisterEvent_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyUnregisterEvent__ = &__nvoc_down_thunk_Notifier_inotifyUnregisterEvent, // pure virtual + .vtable.__dispchnGetOrAllocNotifShare__ = &__nvoc_up_thunk_Notifier_dispchnGetOrAllocNotifShare, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyGetOrAllocNotifShare__ = ¬ifyGetOrAllocNotifShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyGetOrAllocNotifShare__ = &__nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare, // pure virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__DispChannel = { + .numRelatives = 8, + .relatives = { + &__nvoc_metadata__DispChannel.rtti, // [0]: (dispchn) this + &__nvoc_metadata__DispChannel.metadata__GpuResource.rtti, // [1]: (gpures) super + &__nvoc_metadata__DispChannel.metadata__GpuResource.metadata__RmResource.rtti, // [2]: (rmres) super^2 + &__nvoc_metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti, // [3]: (res) super^3 + &__nvoc_metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [4]: (obj) super^4 + &__nvoc_metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti, // [5]: (rmrescmn) super^3 + &__nvoc_metadata__DispChannel.metadata__Notifier.rtti, // [6]: (notify) super + &__nvoc_metadata__DispChannel.metadata__Notifier.metadata__INotifier.rtti, // [7]: (inotify) super^2 + } +}; + +// 1 down-thunk(s) defined to bridge methods in DispChannel from superclasses + +// dispchnGetRegBaseOffsetAndSize: virtual override (gpures) base (gpures) +NV_STATUS __nvoc_down_thunk_DispChannel_gpuresGetRegBaseOffsetAndSize(struct GpuResource *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return dispchnGetRegBaseOffsetAndSize((struct DispChannel *)(((unsigned char *) pDispChannel) - NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource)), pGpu, pOffset, pSize); +} + + +// 29 up-thunk(s) defined to bridge methods in DispChannel to superclasses + +// dispchnControl: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnControl(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource)), pCallContext, pParams); +} + +// dispchnMap: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnMap(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource)), pCallContext, pParams, pCpuMapping); +} + +// dispchnUnmap: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnUnmap(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource)), pCallContext, pCpuMapping); +} + +// dispchnShareCallback: virtual inherited (gpures) base (gpures) +NvBool __nvoc_up_thunk_GpuResource_dispchnShareCallback(struct DispChannel *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// dispchnGetMapAddrSpace: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnGetMapAddrSpace(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource)), pCallContext, mapFlags, pAddrSpace); +} + +// dispchnInternalControlForward: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnInternalControlForward(struct DispChannel *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource)), command, pParams, size); +} + +// dispchnGetInternalObjectHandle: virtual inherited (gpures) base (gpures) +NvHandle __nvoc_up_thunk_GpuResource_dispchnGetInternalObjectHandle(struct DispChannel *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource))); +} + +// dispchnAccessCallback: virtual inherited (rmres) base (gpures) +NvBool __nvoc_up_thunk_RmResource_dispchnAccessCallback(struct DispChannel *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// dispchnGetMemInterMapParams: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_dispchnGetMemInterMapParams(struct DispChannel *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pParams); +} + +// dispchnCheckMemInterUnmap: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_dispchnCheckMemInterUnmap(struct DispChannel *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// dispchnGetMemoryMappingDescriptor: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_dispchnGetMemoryMappingDescriptor(struct DispChannel *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource)), ppMemDesc); +} + +// dispchnControlSerialization_Prologue: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_dispchnControlSerialization_Prologue(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispchnControlSerialization_Epilogue: virtual inherited (rmres) base (gpures) +void __nvoc_up_thunk_RmResource_dispchnControlSerialization_Epilogue(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispchnControl_Prologue: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_dispchnControl_Prologue(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispchnControl_Epilogue: virtual inherited (rmres) base (gpures) +void __nvoc_up_thunk_RmResource_dispchnControl_Epilogue(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispchnCanCopy: virtual inherited (res) base (gpures) +NvBool __nvoc_up_thunk_RsResource_dispchnCanCopy(struct DispChannel *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispchnIsDuplicate: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_dispchnIsDuplicate(struct DispChannel *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// dispchnPreDestruct: virtual inherited (res) base (gpures) +void __nvoc_up_thunk_RsResource_dispchnPreDestruct(struct DispChannel *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispchnControlFilter: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_dispchnControlFilter(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// dispchnIsPartialUnmapSupported: inline virtual inherited (res) base (gpures) body +NvBool __nvoc_up_thunk_RsResource_dispchnIsPartialUnmapSupported(struct DispChannel *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispchnMapTo: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_dispchnMapTo(struct DispChannel *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// dispchnUnmapFrom: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_dispchnUnmapFrom(struct DispChannel *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// dispchnGetRefCount: virtual inherited (res) base (gpures) +NvU32 __nvoc_up_thunk_RsResource_dispchnGetRefCount(struct DispChannel *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispchnAddAdditionalDependants: virtual inherited (res) base (gpures) +void __nvoc_up_thunk_RsResource_dispchnAddAdditionalDependants(struct RsClient *pClient, struct DispChannel *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + +// dispchnGetNotificationListPtr: virtual inherited (notify) base (notify) +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_dispchnGetNotificationListPtr(struct DispChannel *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispChannel, __nvoc_base_Notifier))); +} + +// dispchnGetNotificationShare: virtual inherited (notify) base (notify) +struct NotifShare * __nvoc_up_thunk_Notifier_dispchnGetNotificationShare(struct DispChannel *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispChannel, __nvoc_base_Notifier))); +} + +// dispchnSetNotificationShare: virtual inherited (notify) base (notify) +void __nvoc_up_thunk_Notifier_dispchnSetNotificationShare(struct DispChannel *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispChannel, __nvoc_base_Notifier)), pNotifShare); +} + +// dispchnUnregisterEvent: virtual inherited (notify) base (notify) +NV_STATUS __nvoc_up_thunk_Notifier_dispchnUnregisterEvent(struct DispChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispChannel, __nvoc_base_Notifier)), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +// dispchnGetOrAllocNotifShare: virtual inherited (notify) base (notify) +NV_STATUS __nvoc_up_thunk_Notifier_dispchnGetOrAllocNotifShare(struct DispChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispChannel, __nvoc_base_Notifier)), hNotifierClient, hNotifierResource, ppNotifShare); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__DispChannel = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_DispChannel(DispChannel *pThis) { + __nvoc_dispchnDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispChannel(DispChannel *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_DispChannel(DispChannel *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams, NvU32 arg_isDma) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispChannel_fail_GpuResource; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_DispChannel_fail_Notifier; + __nvoc_init_dataField_DispChannel(pThis); + + status = __nvoc_dispchnConstruct(pThis, arg_pCallContext, arg_pParams, arg_isDma); + if (status != NV_OK) goto __nvoc_ctor_DispChannel_fail__init; + goto __nvoc_ctor_DispChannel_exit; // Success + +__nvoc_ctor_DispChannel_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_DispChannel_fail_Notifier: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_DispChannel_fail_GpuResource: +__nvoc_ctor_DispChannel_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_DispChannel_1(DispChannel *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_DispChannel_1 + + +// Initialize vtable(s) for 30 virtual method(s). +void __nvoc_init_funcTable_DispChannel(DispChannel *pThis) { + __nvoc_init_funcTable_DispChannel_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__DispChannel(DispChannel *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^4 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^3 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; // (rmres) super^2 + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; // (gpures) super + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; // (inotify) super^2 + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; // (notify) super + pThis->__nvoc_pbase_DispChannel = pThis; // (dispchn) this + + // Recurse to superclass initialization function(s). + __nvoc_init__GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init__Notifier(&pThis->__nvoc_base_Notifier); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^4 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource; // (res) super^3 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__DispChannel.metadata__GpuResource.metadata__RmResource; // (rmres) super^2 + pThis->__nvoc_base_GpuResource.__nvoc_metadata_ptr = &__nvoc_metadata__DispChannel.metadata__GpuResource; // (gpures) super + pThis->__nvoc_base_Notifier.__nvoc_base_INotifier.__nvoc_metadata_ptr = &__nvoc_metadata__DispChannel.metadata__Notifier.metadata__INotifier; // (inotify) super^2 + pThis->__nvoc_base_Notifier.__nvoc_metadata_ptr = &__nvoc_metadata__DispChannel.metadata__Notifier; // (notify) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__DispChannel; // (dispchn) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_DispChannel(pThis); +} + +NV_STATUS __nvoc_objCreate_DispChannel(DispChannel **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams, NvU32 arg_isDma) +{ + NV_STATUS status; + Object *pParentObj = NULL; + DispChannel *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(DispChannel), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(DispChannel)); + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__DispChannel(pThis); + status = __nvoc_ctor_DispChannel(pThis, arg_pCallContext, arg_pParams, arg_isDma); + if (status != NV_OK) goto __nvoc_objCreate_DispChannel_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_DispChannel_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(DispChannel)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispChannel(DispChannel **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + NvU32 arg_isDma = va_arg(args, NvU32); + + status = __nvoc_objCreate_DispChannel(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams, arg_isDma); + + return status; +} + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x10dec3 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannelPio; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannel; + +// Forward declarations for DispChannelPio +void __nvoc_init__DispChannel(DispChannel*); +void __nvoc_init__DispChannelPio(DispChannelPio*); +void __nvoc_init_funcTable_DispChannelPio(DispChannelPio*); +NV_STATUS __nvoc_ctor_DispChannelPio(DispChannelPio*, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_DispChannelPio(DispChannelPio*); +void __nvoc_dtor_DispChannelPio(DispChannelPio*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__DispChannelPio; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__DispChannelPio; + +// Down-thunk(s) to bridge DispChannelPio methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^3 +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^3 +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +NV_STATUS __nvoc_down_thunk_GpuResource_resControl(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_GpuResource_resMap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_down_thunk_GpuResource_resUnmap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // super^2 +NvBool __nvoc_down_thunk_GpuResource_rmresShareCallback(struct RmResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^2 +PEVENTNOTIFICATION * __nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr(struct INotifier *pNotifier); // super^2 +struct NotifShare * __nvoc_down_thunk_Notifier_inotifyGetNotificationShare(struct INotifier *pNotifier); // super^2 +void __nvoc_down_thunk_Notifier_inotifySetNotificationShare(struct INotifier *pNotifier, struct NotifShare *pNotifShare); // super^2 +NV_STATUS __nvoc_down_thunk_Notifier_inotifyUnregisterEvent(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // super^2 +NV_STATUS __nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // super^2 +NV_STATUS __nvoc_down_thunk_DispChannel_gpuresGetRegBaseOffsetAndSize(struct GpuResource *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); // super + +// Up-thunk(s) to bridge DispChannelPio methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^3 +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^3 +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^3 +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super^3 +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super^3 +NvBool __nvoc_up_thunk_RmResource_gpuresAccessCallback(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^2 +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided); // super^2 +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // super^2 +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControl_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_up_thunk_RmResource_gpuresControl_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NvBool __nvoc_up_thunk_RsResource_gpuresCanCopy(struct GpuResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_gpuresIsDuplicate(struct GpuResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^2 +void __nvoc_up_thunk_RsResource_gpuresPreDestruct(struct GpuResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_gpuresControlFilter(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NvBool __nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported(struct GpuResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_gpuresMapTo(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_gpuresUnmapFrom(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^2 +NvU32 __nvoc_up_thunk_RsResource_gpuresGetRefCount(struct GpuResource *pResource); // super^2 +void __nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference); // super^2 +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnControl(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnMap(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnUnmap(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_up_thunk_GpuResource_dispchnShareCallback(struct DispChannel *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnGetMapAddrSpace(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); // super +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnInternalControlForward(struct DispChannel *pGpuResource, NvU32 command, void *pParams, NvU32 size); // super +NvHandle __nvoc_up_thunk_GpuResource_dispchnGetInternalObjectHandle(struct DispChannel *pGpuResource); // super +NvBool __nvoc_up_thunk_RmResource_dispchnAccessCallback(struct DispChannel *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispchnGetMemInterMapParams(struct DispChannel *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispchnCheckMemInterUnmap(struct DispChannel *pRmResource, NvBool bSubdeviceHandleProvided); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispchnGetMemoryMappingDescriptor(struct DispChannel *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispchnControlSerialization_Prologue(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_dispchnControlSerialization_Epilogue(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispchnControl_Prologue(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_dispchnControl_Epilogue(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_dispchnCanCopy(struct DispChannel *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispchnIsDuplicate(struct DispChannel *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_dispchnPreDestruct(struct DispChannel *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispchnControlFilter(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_dispchnIsPartialUnmapSupported(struct DispChannel *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispchnMapTo(struct DispChannel *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispchnUnmapFrom(struct DispChannel *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_dispchnGetRefCount(struct DispChannel *pResource); // super +void __nvoc_up_thunk_RsResource_dispchnAddAdditionalDependants(struct RsClient *pClient, struct DispChannel *pResource, RsResourceRef *pReference); // super +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_dispchnGetNotificationListPtr(struct DispChannel *pNotifier); // super +struct NotifShare * __nvoc_up_thunk_Notifier_dispchnGetNotificationShare(struct DispChannel *pNotifier); // super +void __nvoc_up_thunk_Notifier_dispchnSetNotificationShare(struct DispChannel *pNotifier, struct NotifShare *pNotifShare); // super +NV_STATUS __nvoc_up_thunk_Notifier_dispchnUnregisterEvent(struct DispChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // super +NV_STATUS __nvoc_up_thunk_Notifier_dispchnGetOrAllocNotifShare(struct DispChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // super +NV_STATUS __nvoc_up_thunk_DispChannel_dispchnpioGetRegBaseOffsetAndSize(struct DispChannelPio *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); // this +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnpioControl(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnpioMap(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnpioUnmap(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_GpuResource_dispchnpioShareCallback(struct DispChannelPio *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnpioGetMapAddrSpace(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); // this +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnpioInternalControlForward(struct DispChannelPio *pGpuResource, NvU32 command, void *pParams, NvU32 size); // this +NvHandle __nvoc_up_thunk_GpuResource_dispchnpioGetInternalObjectHandle(struct DispChannelPio *pGpuResource); // this +NvBool __nvoc_up_thunk_RmResource_dispchnpioAccessCallback(struct DispChannelPio *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispchnpioGetMemInterMapParams(struct DispChannelPio *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispchnpioCheckMemInterUnmap(struct DispChannelPio *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispchnpioGetMemoryMappingDescriptor(struct DispChannelPio *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispchnpioControlSerialization_Prologue(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_dispchnpioControlSerialization_Epilogue(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispchnpioControl_Prologue(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_dispchnpioControl_Epilogue(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_dispchnpioCanCopy(struct DispChannelPio *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispchnpioIsDuplicate(struct DispChannelPio *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_dispchnpioPreDestruct(struct DispChannelPio *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispchnpioControlFilter(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_dispchnpioIsPartialUnmapSupported(struct DispChannelPio *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispchnpioMapTo(struct DispChannelPio *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispchnpioUnmapFrom(struct DispChannelPio *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_dispchnpioGetRefCount(struct DispChannelPio *pResource); // this +void __nvoc_up_thunk_RsResource_dispchnpioAddAdditionalDependants(struct RsClient *pClient, struct DispChannelPio *pResource, RsResourceRef *pReference); // this +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_dispchnpioGetNotificationListPtr(struct DispChannelPio *pNotifier); // this +struct NotifShare * __nvoc_up_thunk_Notifier_dispchnpioGetNotificationShare(struct DispChannelPio *pNotifier); // this +void __nvoc_up_thunk_Notifier_dispchnpioSetNotificationShare(struct DispChannelPio *pNotifier, struct NotifShare *pNotifShare); // this +NV_STATUS __nvoc_up_thunk_Notifier_dispchnpioUnregisterEvent(struct DispChannelPio *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // this +NV_STATUS __nvoc_up_thunk_Notifier_dispchnpioGetOrAllocNotifShare(struct DispChannelPio *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannelPio = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispChannelPio), + /*classId=*/ classId(DispChannelPio), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispChannelPio", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispChannelPio, + /*pCastInfo=*/ &__nvoc_castinfo__DispChannelPio, + /*pExportInfo=*/ &__nvoc_export_info__DispChannelPio +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__DispChannelPio __nvoc_metadata__DispChannelPio = { + .rtti.pClassDef = &__nvoc_class_def_DispChannelPio, // (dispchnpio) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispChannelPio, + .rtti.offset = 0, + .metadata__DispChannel.rtti.pClassDef = &__nvoc_class_def_DispChannel, // (dispchn) super + .metadata__DispChannel.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispChannel.rtti.offset = NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel), + .metadata__DispChannel.metadata__GpuResource.rtti.pClassDef = &__nvoc_class_def_GpuResource, // (gpures) super^2 + .metadata__DispChannel.metadata__GpuResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispChannel.metadata__GpuResource.rtti.offset = NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource), + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super^3 + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.rtti.offset = NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource), + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^4 + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^5 + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^4 + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + .metadata__DispChannel.metadata__Notifier.rtti.pClassDef = &__nvoc_class_def_Notifier, // (notify) super^2 + .metadata__DispChannel.metadata__Notifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispChannel.metadata__Notifier.rtti.offset = NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_Notifier), + .metadata__DispChannel.metadata__Notifier.metadata__INotifier.rtti.pClassDef = &__nvoc_class_def_INotifier, // (inotify) super^3 + .metadata__DispChannel.metadata__Notifier.metadata__INotifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispChannel.metadata__Notifier.metadata__INotifier.rtti.offset = NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_base_INotifier), + + .vtable.__dispchnpioGetRegBaseOffsetAndSize__ = &__nvoc_up_thunk_DispChannel_dispchnpioGetRegBaseOffsetAndSize, // virtual inherited (dispchn) base (dispchn) + .metadata__DispChannel.vtable.__dispchnGetRegBaseOffsetAndSize__ = &dispchnGetRegBaseOffsetAndSize_IMPL, // virtual override (gpures) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresGetRegBaseOffsetAndSize__ = &__nvoc_down_thunk_DispChannel_gpuresGetRegBaseOffsetAndSize, // virtual + .vtable.__dispchnpioControl__ = &__nvoc_up_thunk_GpuResource_dispchnpioControl, // virtual inherited (gpures) base (dispchn) + .metadata__DispChannel.vtable.__dispchnControl__ = &__nvoc_up_thunk_GpuResource_dispchnControl, // virtual inherited (gpures) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresControl__ = &gpuresControl_IMPL, // virtual override (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_GpuResource_resControl, // virtual + .vtable.__dispchnpioMap__ = &__nvoc_up_thunk_GpuResource_dispchnpioMap, // virtual inherited (gpures) base (dispchn) + .metadata__DispChannel.vtable.__dispchnMap__ = &__nvoc_up_thunk_GpuResource_dispchnMap, // virtual inherited (gpures) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresMap__ = &gpuresMap_IMPL, // virtual override (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &__nvoc_down_thunk_GpuResource_resMap, // virtual + .vtable.__dispchnpioUnmap__ = &__nvoc_up_thunk_GpuResource_dispchnpioUnmap, // virtual inherited (gpures) base (dispchn) + .metadata__DispChannel.vtable.__dispchnUnmap__ = &__nvoc_up_thunk_GpuResource_dispchnUnmap, // virtual inherited (gpures) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresUnmap__ = &gpuresUnmap_IMPL, // virtual override (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &__nvoc_down_thunk_GpuResource_resUnmap, // virtual + .vtable.__dispchnpioShareCallback__ = &__nvoc_up_thunk_GpuResource_dispchnpioShareCallback, // virtual inherited (gpures) base (dispchn) + .metadata__DispChannel.vtable.__dispchnShareCallback__ = &__nvoc_up_thunk_GpuResource_dispchnShareCallback, // virtual inherited (gpures) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresShareCallback__ = &gpuresShareCallback_IMPL, // virtual override (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresShareCallback__ = &__nvoc_down_thunk_GpuResource_rmresShareCallback, // virtual override (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__dispchnpioGetMapAddrSpace__ = &__nvoc_up_thunk_GpuResource_dispchnpioGetMapAddrSpace, // virtual inherited (gpures) base (dispchn) + .metadata__DispChannel.vtable.__dispchnGetMapAddrSpace__ = &__nvoc_up_thunk_GpuResource_dispchnGetMapAddrSpace, // virtual inherited (gpures) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresGetMapAddrSpace__ = &gpuresGetMapAddrSpace_IMPL, // virtual + .vtable.__dispchnpioInternalControlForward__ = &__nvoc_up_thunk_GpuResource_dispchnpioInternalControlForward, // virtual inherited (gpures) base (dispchn) + .metadata__DispChannel.vtable.__dispchnInternalControlForward__ = &__nvoc_up_thunk_GpuResource_dispchnInternalControlForward, // virtual inherited (gpures) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresInternalControlForward__ = &gpuresInternalControlForward_IMPL, // virtual + .vtable.__dispchnpioGetInternalObjectHandle__ = &__nvoc_up_thunk_GpuResource_dispchnpioGetInternalObjectHandle, // virtual inherited (gpures) base (dispchn) + .metadata__DispChannel.vtable.__dispchnGetInternalObjectHandle__ = &__nvoc_up_thunk_GpuResource_dispchnGetInternalObjectHandle, // virtual inherited (gpures) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresGetInternalObjectHandle__ = &gpuresGetInternalObjectHandle_IMPL, // virtual + .vtable.__dispchnpioAccessCallback__ = &__nvoc_up_thunk_RmResource_dispchnpioAccessCallback, // virtual inherited (rmres) base (dispchn) + .metadata__DispChannel.vtable.__dispchnAccessCallback__ = &__nvoc_up_thunk_RmResource_dispchnAccessCallback, // virtual inherited (rmres) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresAccessCallback__ = &__nvoc_up_thunk_RmResource_gpuresAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__dispchnpioGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_dispchnpioGetMemInterMapParams, // virtual inherited (rmres) base (dispchn) + .metadata__DispChannel.vtable.__dispchnGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_dispchnGetMemInterMapParams, // virtual inherited (rmres) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__dispchnpioCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_dispchnpioCheckMemInterUnmap, // virtual inherited (rmres) base (dispchn) + .metadata__DispChannel.vtable.__dispchnCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_dispchnCheckMemInterUnmap, // virtual inherited (rmres) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__dispchnpioGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_dispchnpioGetMemoryMappingDescriptor, // virtual inherited (rmres) base (dispchn) + .metadata__DispChannel.vtable.__dispchnGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_dispchnGetMemoryMappingDescriptor, // virtual inherited (rmres) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__dispchnpioControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_dispchnpioControlSerialization_Prologue, // virtual inherited (rmres) base (dispchn) + .metadata__DispChannel.vtable.__dispchnControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_dispchnControlSerialization_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__dispchnpioControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_dispchnpioControlSerialization_Epilogue, // virtual inherited (rmres) base (dispchn) + .metadata__DispChannel.vtable.__dispchnControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_dispchnControlSerialization_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__dispchnpioControl_Prologue__ = &__nvoc_up_thunk_RmResource_dispchnpioControl_Prologue, // virtual inherited (rmres) base (dispchn) + .metadata__DispChannel.vtable.__dispchnControl_Prologue__ = &__nvoc_up_thunk_RmResource_dispchnControl_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresControl_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__dispchnpioControl_Epilogue__ = &__nvoc_up_thunk_RmResource_dispchnpioControl_Epilogue, // virtual inherited (rmres) base (dispchn) + .metadata__DispChannel.vtable.__dispchnControl_Epilogue__ = &__nvoc_up_thunk_RmResource_dispchnControl_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresControl_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__dispchnpioCanCopy__ = &__nvoc_up_thunk_RsResource_dispchnpioCanCopy, // virtual inherited (res) base (dispchn) + .metadata__DispChannel.vtable.__dispchnCanCopy__ = &__nvoc_up_thunk_RsResource_dispchnCanCopy, // virtual inherited (res) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresCanCopy__ = &__nvoc_up_thunk_RsResource_gpuresCanCopy, // virtual inherited (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__dispchnpioIsDuplicate__ = &__nvoc_up_thunk_RsResource_dispchnpioIsDuplicate, // virtual inherited (res) base (dispchn) + .metadata__DispChannel.vtable.__dispchnIsDuplicate__ = &__nvoc_up_thunk_RsResource_dispchnIsDuplicate, // virtual inherited (res) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresIsDuplicate__ = &__nvoc_up_thunk_RsResource_gpuresIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__dispchnpioPreDestruct__ = &__nvoc_up_thunk_RsResource_dispchnpioPreDestruct, // virtual inherited (res) base (dispchn) + .metadata__DispChannel.vtable.__dispchnPreDestruct__ = &__nvoc_up_thunk_RsResource_dispchnPreDestruct, // virtual inherited (res) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresPreDestruct__ = &__nvoc_up_thunk_RsResource_gpuresPreDestruct, // virtual inherited (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__dispchnpioControlFilter__ = &__nvoc_up_thunk_RsResource_dispchnpioControlFilter, // virtual inherited (res) base (dispchn) + .metadata__DispChannel.vtable.__dispchnControlFilter__ = &__nvoc_up_thunk_RsResource_dispchnControlFilter, // virtual inherited (res) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresControlFilter__ = &__nvoc_up_thunk_RsResource_gpuresControlFilter, // virtual inherited (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__dispchnpioIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_dispchnpioIsPartialUnmapSupported, // inline virtual inherited (res) base (dispchn) body + .metadata__DispChannel.vtable.__dispchnIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_dispchnIsPartialUnmapSupported, // inline virtual inherited (res) base (gpures) body + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__dispchnpioMapTo__ = &__nvoc_up_thunk_RsResource_dispchnpioMapTo, // virtual inherited (res) base (dispchn) + .metadata__DispChannel.vtable.__dispchnMapTo__ = &__nvoc_up_thunk_RsResource_dispchnMapTo, // virtual inherited (res) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresMapTo__ = &__nvoc_up_thunk_RsResource_gpuresMapTo, // virtual inherited (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__dispchnpioUnmapFrom__ = &__nvoc_up_thunk_RsResource_dispchnpioUnmapFrom, // virtual inherited (res) base (dispchn) + .metadata__DispChannel.vtable.__dispchnUnmapFrom__ = &__nvoc_up_thunk_RsResource_dispchnUnmapFrom, // virtual inherited (res) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresUnmapFrom__ = &__nvoc_up_thunk_RsResource_gpuresUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__dispchnpioGetRefCount__ = &__nvoc_up_thunk_RsResource_dispchnpioGetRefCount, // virtual inherited (res) base (dispchn) + .metadata__DispChannel.vtable.__dispchnGetRefCount__ = &__nvoc_up_thunk_RsResource_dispchnGetRefCount, // virtual inherited (res) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresGetRefCount__ = &__nvoc_up_thunk_RsResource_gpuresGetRefCount, // virtual inherited (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__dispchnpioAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_dispchnpioAddAdditionalDependants, // virtual inherited (res) base (dispchn) + .metadata__DispChannel.vtable.__dispchnAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_dispchnAddAdditionalDependants, // virtual inherited (res) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual + .vtable.__dispchnpioGetNotificationListPtr__ = &__nvoc_up_thunk_Notifier_dispchnpioGetNotificationListPtr, // virtual inherited (notify) base (dispchn) + .metadata__DispChannel.vtable.__dispchnGetNotificationListPtr__ = &__nvoc_up_thunk_Notifier_dispchnGetNotificationListPtr, // virtual inherited (notify) base (notify) + .metadata__DispChannel.metadata__Notifier.vtable.__notifyGetNotificationListPtr__ = ¬ifyGetNotificationListPtr_IMPL, // virtual override (inotify) base (inotify) + .metadata__DispChannel.metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationListPtr__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr, // pure virtual + .vtable.__dispchnpioGetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispchnpioGetNotificationShare, // virtual inherited (notify) base (dispchn) + .metadata__DispChannel.vtable.__dispchnGetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispchnGetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__DispChannel.metadata__Notifier.vtable.__notifyGetNotificationShare__ = ¬ifyGetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__DispChannel.metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationShare, // pure virtual + .vtable.__dispchnpioSetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispchnpioSetNotificationShare, // virtual inherited (notify) base (dispchn) + .metadata__DispChannel.vtable.__dispchnSetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispchnSetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__DispChannel.metadata__Notifier.vtable.__notifySetNotificationShare__ = ¬ifySetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__DispChannel.metadata__Notifier.metadata__INotifier.vtable.__inotifySetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifySetNotificationShare, // pure virtual + .vtable.__dispchnpioUnregisterEvent__ = &__nvoc_up_thunk_Notifier_dispchnpioUnregisterEvent, // virtual inherited (notify) base (dispchn) + .metadata__DispChannel.vtable.__dispchnUnregisterEvent__ = &__nvoc_up_thunk_Notifier_dispchnUnregisterEvent, // virtual inherited (notify) base (notify) + .metadata__DispChannel.metadata__Notifier.vtable.__notifyUnregisterEvent__ = ¬ifyUnregisterEvent_IMPL, // virtual override (inotify) base (inotify) + .metadata__DispChannel.metadata__Notifier.metadata__INotifier.vtable.__inotifyUnregisterEvent__ = &__nvoc_down_thunk_Notifier_inotifyUnregisterEvent, // pure virtual + .vtable.__dispchnpioGetOrAllocNotifShare__ = &__nvoc_up_thunk_Notifier_dispchnpioGetOrAllocNotifShare, // virtual inherited (notify) base (dispchn) + .metadata__DispChannel.vtable.__dispchnGetOrAllocNotifShare__ = &__nvoc_up_thunk_Notifier_dispchnGetOrAllocNotifShare, // virtual inherited (notify) base (notify) + .metadata__DispChannel.metadata__Notifier.vtable.__notifyGetOrAllocNotifShare__ = ¬ifyGetOrAllocNotifShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__DispChannel.metadata__Notifier.metadata__INotifier.vtable.__inotifyGetOrAllocNotifShare__ = &__nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare, // pure virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__DispChannelPio = { + .numRelatives = 9, + .relatives = { + &__nvoc_metadata__DispChannelPio.rtti, // [0]: (dispchnpio) this + &__nvoc_metadata__DispChannelPio.metadata__DispChannel.rtti, // [1]: (dispchn) super + &__nvoc_metadata__DispChannelPio.metadata__DispChannel.metadata__GpuResource.rtti, // [2]: (gpures) super^2 + &__nvoc_metadata__DispChannelPio.metadata__DispChannel.metadata__GpuResource.metadata__RmResource.rtti, // [3]: (rmres) super^3 + &__nvoc_metadata__DispChannelPio.metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti, // [4]: (res) super^4 + &__nvoc_metadata__DispChannelPio.metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [5]: (obj) super^5 + &__nvoc_metadata__DispChannelPio.metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti, // [6]: (rmrescmn) super^4 + &__nvoc_metadata__DispChannelPio.metadata__DispChannel.metadata__Notifier.rtti, // [7]: (notify) super^2 + &__nvoc_metadata__DispChannelPio.metadata__DispChannel.metadata__Notifier.metadata__INotifier.rtti, // [8]: (inotify) super^3 + } +}; + +// 30 up-thunk(s) defined to bridge methods in DispChannelPio to superclasses + +// dispchnpioGetRegBaseOffsetAndSize: virtual inherited (dispchn) base (dispchn) +NV_STATUS __nvoc_up_thunk_DispChannel_dispchnpioGetRegBaseOffsetAndSize(struct DispChannelPio *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return dispchnGetRegBaseOffsetAndSize((struct DispChannel *)(((unsigned char *) pDispChannel) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel)), pGpu, pOffset, pSize); +} + +// dispchnpioControl: virtual inherited (gpures) base (dispchn) +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnpioControl(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource)), pCallContext, pParams); +} + +// dispchnpioMap: virtual inherited (gpures) base (dispchn) +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnpioMap(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource)), pCallContext, pParams, pCpuMapping); +} + +// dispchnpioUnmap: virtual inherited (gpures) base (dispchn) +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnpioUnmap(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource)), pCallContext, pCpuMapping); +} + +// dispchnpioShareCallback: virtual inherited (gpures) base (dispchn) +NvBool __nvoc_up_thunk_GpuResource_dispchnpioShareCallback(struct DispChannelPio *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// dispchnpioGetMapAddrSpace: virtual inherited (gpures) base (dispchn) +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnpioGetMapAddrSpace(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource)), pCallContext, mapFlags, pAddrSpace); +} + +// dispchnpioInternalControlForward: virtual inherited (gpures) base (dispchn) +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnpioInternalControlForward(struct DispChannelPio *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource)), command, pParams, size); +} + +// dispchnpioGetInternalObjectHandle: virtual inherited (gpures) base (dispchn) +NvHandle __nvoc_up_thunk_GpuResource_dispchnpioGetInternalObjectHandle(struct DispChannelPio *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource))); +} + +// dispchnpioAccessCallback: virtual inherited (rmres) base (dispchn) +NvBool __nvoc_up_thunk_RmResource_dispchnpioAccessCallback(struct DispChannelPio *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// dispchnpioGetMemInterMapParams: virtual inherited (rmres) base (dispchn) +NV_STATUS __nvoc_up_thunk_RmResource_dispchnpioGetMemInterMapParams(struct DispChannelPio *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource)), pParams); +} + +// dispchnpioCheckMemInterUnmap: virtual inherited (rmres) base (dispchn) +NV_STATUS __nvoc_up_thunk_RmResource_dispchnpioCheckMemInterUnmap(struct DispChannelPio *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// dispchnpioGetMemoryMappingDescriptor: virtual inherited (rmres) base (dispchn) +NV_STATUS __nvoc_up_thunk_RmResource_dispchnpioGetMemoryMappingDescriptor(struct DispChannelPio *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource)), ppMemDesc); +} + +// dispchnpioControlSerialization_Prologue: virtual inherited (rmres) base (dispchn) +NV_STATUS __nvoc_up_thunk_RmResource_dispchnpioControlSerialization_Prologue(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispchnpioControlSerialization_Epilogue: virtual inherited (rmres) base (dispchn) +void __nvoc_up_thunk_RmResource_dispchnpioControlSerialization_Epilogue(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispchnpioControl_Prologue: virtual inherited (rmres) base (dispchn) +NV_STATUS __nvoc_up_thunk_RmResource_dispchnpioControl_Prologue(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispchnpioControl_Epilogue: virtual inherited (rmres) base (dispchn) +void __nvoc_up_thunk_RmResource_dispchnpioControl_Epilogue(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispchnpioCanCopy: virtual inherited (res) base (dispchn) +NvBool __nvoc_up_thunk_RsResource_dispchnpioCanCopy(struct DispChannelPio *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispchnpioIsDuplicate: virtual inherited (res) base (dispchn) +NV_STATUS __nvoc_up_thunk_RsResource_dispchnpioIsDuplicate(struct DispChannelPio *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// dispchnpioPreDestruct: virtual inherited (res) base (dispchn) +void __nvoc_up_thunk_RsResource_dispchnpioPreDestruct(struct DispChannelPio *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispchnpioControlFilter: virtual inherited (res) base (dispchn) +NV_STATUS __nvoc_up_thunk_RsResource_dispchnpioControlFilter(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// dispchnpioIsPartialUnmapSupported: inline virtual inherited (res) base (dispchn) body +NvBool __nvoc_up_thunk_RsResource_dispchnpioIsPartialUnmapSupported(struct DispChannelPio *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispchnpioMapTo: virtual inherited (res) base (dispchn) +NV_STATUS __nvoc_up_thunk_RsResource_dispchnpioMapTo(struct DispChannelPio *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// dispchnpioUnmapFrom: virtual inherited (res) base (dispchn) +NV_STATUS __nvoc_up_thunk_RsResource_dispchnpioUnmapFrom(struct DispChannelPio *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// dispchnpioGetRefCount: virtual inherited (res) base (dispchn) +NvU32 __nvoc_up_thunk_RsResource_dispchnpioGetRefCount(struct DispChannelPio *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispchnpioAddAdditionalDependants: virtual inherited (res) base (dispchn) +void __nvoc_up_thunk_RsResource_dispchnpioAddAdditionalDependants(struct RsClient *pClient, struct DispChannelPio *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + +// dispchnpioGetNotificationListPtr: virtual inherited (notify) base (dispchn) +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_dispchnpioGetNotificationListPtr(struct DispChannelPio *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_Notifier))); +} + +// dispchnpioGetNotificationShare: virtual inherited (notify) base (dispchn) +struct NotifShare * __nvoc_up_thunk_Notifier_dispchnpioGetNotificationShare(struct DispChannelPio *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_Notifier))); +} + +// dispchnpioSetNotificationShare: virtual inherited (notify) base (dispchn) +void __nvoc_up_thunk_Notifier_dispchnpioSetNotificationShare(struct DispChannelPio *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_Notifier)), pNotifShare); +} + +// dispchnpioUnregisterEvent: virtual inherited (notify) base (dispchn) +NV_STATUS __nvoc_up_thunk_Notifier_dispchnpioUnregisterEvent(struct DispChannelPio *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_Notifier)), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +// dispchnpioGetOrAllocNotifShare: virtual inherited (notify) base (dispchn) +NV_STATUS __nvoc_up_thunk_Notifier_dispchnpioGetOrAllocNotifShare(struct DispChannelPio *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_Notifier)), hNotifierClient, hNotifierResource, ppNotifShare); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__DispChannelPio = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_DispChannel(DispChannel*); +void __nvoc_dtor_DispChannelPio(DispChannelPio *pThis) { + __nvoc_dtor_DispChannel(&pThis->__nvoc_base_DispChannel); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispChannelPio(DispChannelPio *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_DispChannel(DispChannel* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *, NvU32); +NV_STATUS __nvoc_ctor_DispChannelPio(DispChannelPio *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_DispChannel(&pThis->__nvoc_base_DispChannel, arg_pCallContext, arg_pParams, NV_FALSE); + if (status != NV_OK) goto __nvoc_ctor_DispChannelPio_fail_DispChannel; + __nvoc_init_dataField_DispChannelPio(pThis); + + status = __nvoc_dispchnpioConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispChannelPio_fail__init; + goto __nvoc_ctor_DispChannelPio_exit; // Success + +__nvoc_ctor_DispChannelPio_fail__init: + __nvoc_dtor_DispChannel(&pThis->__nvoc_base_DispChannel); +__nvoc_ctor_DispChannelPio_fail_DispChannel: +__nvoc_ctor_DispChannelPio_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_DispChannelPio_1(DispChannelPio *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_DispChannelPio_1 + + +// Initialize vtable(s) for 30 virtual method(s). +void __nvoc_init_funcTable_DispChannelPio(DispChannelPio *pThis) { + __nvoc_init_funcTable_DispChannelPio_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__DispChannelPio(DispChannelPio *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^5 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^4 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^4 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource; // (rmres) super^3 + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource; // (gpures) super^2 + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_base_INotifier; // (inotify) super^3 + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_DispChannel.__nvoc_base_Notifier; // (notify) super^2 + pThis->__nvoc_pbase_DispChannel = &pThis->__nvoc_base_DispChannel; // (dispchn) super + pThis->__nvoc_pbase_DispChannelPio = pThis; // (dispchnpio) this + + // Recurse to superclass initialization function(s). + __nvoc_init__DispChannel(&pThis->__nvoc_base_DispChannel); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__DispChannelPio.metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^5 + pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__DispChannelPio.metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource; // (res) super^4 + pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__DispChannelPio.metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^4 + pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__DispChannelPio.metadata__DispChannel.metadata__GpuResource.metadata__RmResource; // (rmres) super^3 + pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_metadata_ptr = &__nvoc_metadata__DispChannelPio.metadata__DispChannel.metadata__GpuResource; // (gpures) super^2 + pThis->__nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_base_INotifier.__nvoc_metadata_ptr = &__nvoc_metadata__DispChannelPio.metadata__DispChannel.metadata__Notifier.metadata__INotifier; // (inotify) super^3 + pThis->__nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_metadata_ptr = &__nvoc_metadata__DispChannelPio.metadata__DispChannel.metadata__Notifier; // (notify) super^2 + pThis->__nvoc_base_DispChannel.__nvoc_metadata_ptr = &__nvoc_metadata__DispChannelPio.metadata__DispChannel; // (dispchn) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__DispChannelPio; // (dispchnpio) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_DispChannelPio(pThis); +} + +NV_STATUS __nvoc_objCreate_DispChannelPio(DispChannelPio **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + DispChannelPio *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(DispChannelPio), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(DispChannelPio)); + + pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__DispChannelPio(pThis); + status = __nvoc_ctor_DispChannelPio(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DispChannelPio_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_DispChannelPio_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(DispChannelPio)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispChannelPio(DispChannelPio **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DispChannelPio(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xfe3d2e = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannelDma; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannel; + +// Forward declarations for DispChannelDma +void __nvoc_init__DispChannel(DispChannel*); +void __nvoc_init__DispChannelDma(DispChannelDma*); +void __nvoc_init_funcTable_DispChannelDma(DispChannelDma*); +NV_STATUS __nvoc_ctor_DispChannelDma(DispChannelDma*, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_DispChannelDma(DispChannelDma*); +void __nvoc_dtor_DispChannelDma(DispChannelDma*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__DispChannelDma; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__DispChannelDma; + +// Down-thunk(s) to bridge DispChannelDma methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^3 +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^3 +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +NV_STATUS __nvoc_down_thunk_GpuResource_resControl(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_GpuResource_resMap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_down_thunk_GpuResource_resUnmap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // super^2 +NvBool __nvoc_down_thunk_GpuResource_rmresShareCallback(struct RmResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^2 +PEVENTNOTIFICATION * __nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr(struct INotifier *pNotifier); // super^2 +struct NotifShare * __nvoc_down_thunk_Notifier_inotifyGetNotificationShare(struct INotifier *pNotifier); // super^2 +void __nvoc_down_thunk_Notifier_inotifySetNotificationShare(struct INotifier *pNotifier, struct NotifShare *pNotifShare); // super^2 +NV_STATUS __nvoc_down_thunk_Notifier_inotifyUnregisterEvent(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // super^2 +NV_STATUS __nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // super^2 +NV_STATUS __nvoc_down_thunk_DispChannel_gpuresGetRegBaseOffsetAndSize(struct GpuResource *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); // super + +// Up-thunk(s) to bridge DispChannelDma methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^3 +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^3 +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^3 +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super^3 +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super^3 +NvBool __nvoc_up_thunk_RmResource_gpuresAccessCallback(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^2 +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided); // super^2 +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // super^2 +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControl_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_up_thunk_RmResource_gpuresControl_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NvBool __nvoc_up_thunk_RsResource_gpuresCanCopy(struct GpuResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_gpuresIsDuplicate(struct GpuResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^2 +void __nvoc_up_thunk_RsResource_gpuresPreDestruct(struct GpuResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_gpuresControlFilter(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NvBool __nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported(struct GpuResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_gpuresMapTo(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_gpuresUnmapFrom(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^2 +NvU32 __nvoc_up_thunk_RsResource_gpuresGetRefCount(struct GpuResource *pResource); // super^2 +void __nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference); // super^2 +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnControl(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnMap(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnUnmap(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_up_thunk_GpuResource_dispchnShareCallback(struct DispChannel *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnGetMapAddrSpace(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); // super +NV_STATUS __nvoc_up_thunk_GpuResource_dispchnInternalControlForward(struct DispChannel *pGpuResource, NvU32 command, void *pParams, NvU32 size); // super +NvHandle __nvoc_up_thunk_GpuResource_dispchnGetInternalObjectHandle(struct DispChannel *pGpuResource); // super +NvBool __nvoc_up_thunk_RmResource_dispchnAccessCallback(struct DispChannel *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispchnGetMemInterMapParams(struct DispChannel *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispchnCheckMemInterUnmap(struct DispChannel *pRmResource, NvBool bSubdeviceHandleProvided); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispchnGetMemoryMappingDescriptor(struct DispChannel *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispchnControlSerialization_Prologue(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_dispchnControlSerialization_Epilogue(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispchnControl_Prologue(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_dispchnControl_Epilogue(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_dispchnCanCopy(struct DispChannel *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispchnIsDuplicate(struct DispChannel *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_dispchnPreDestruct(struct DispChannel *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispchnControlFilter(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_dispchnIsPartialUnmapSupported(struct DispChannel *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispchnMapTo(struct DispChannel *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispchnUnmapFrom(struct DispChannel *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_dispchnGetRefCount(struct DispChannel *pResource); // super +void __nvoc_up_thunk_RsResource_dispchnAddAdditionalDependants(struct RsClient *pClient, struct DispChannel *pResource, RsResourceRef *pReference); // super +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_dispchnGetNotificationListPtr(struct DispChannel *pNotifier); // super +struct NotifShare * __nvoc_up_thunk_Notifier_dispchnGetNotificationShare(struct DispChannel *pNotifier); // super +void __nvoc_up_thunk_Notifier_dispchnSetNotificationShare(struct DispChannel *pNotifier, struct NotifShare *pNotifShare); // super +NV_STATUS __nvoc_up_thunk_Notifier_dispchnUnregisterEvent(struct DispChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // super +NV_STATUS __nvoc_up_thunk_Notifier_dispchnGetOrAllocNotifShare(struct DispChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // super +NV_STATUS __nvoc_up_thunk_DispChannel_dispchndmaGetRegBaseOffsetAndSize(struct DispChannelDma *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); // this +NV_STATUS __nvoc_up_thunk_GpuResource_dispchndmaControl(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_GpuResource_dispchndmaMap(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_GpuResource_dispchndmaUnmap(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_GpuResource_dispchndmaShareCallback(struct DispChannelDma *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_GpuResource_dispchndmaGetMapAddrSpace(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); // this +NV_STATUS __nvoc_up_thunk_GpuResource_dispchndmaInternalControlForward(struct DispChannelDma *pGpuResource, NvU32 command, void *pParams, NvU32 size); // this +NvHandle __nvoc_up_thunk_GpuResource_dispchndmaGetInternalObjectHandle(struct DispChannelDma *pGpuResource); // this +NvBool __nvoc_up_thunk_RmResource_dispchndmaAccessCallback(struct DispChannelDma *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispchndmaGetMemInterMapParams(struct DispChannelDma *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispchndmaCheckMemInterUnmap(struct DispChannelDma *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispchndmaGetMemoryMappingDescriptor(struct DispChannelDma *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispchndmaControlSerialization_Prologue(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_dispchndmaControlSerialization_Epilogue(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispchndmaControl_Prologue(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_dispchndmaControl_Epilogue(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_dispchndmaCanCopy(struct DispChannelDma *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispchndmaIsDuplicate(struct DispChannelDma *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_dispchndmaPreDestruct(struct DispChannelDma *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispchndmaControlFilter(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_dispchndmaIsPartialUnmapSupported(struct DispChannelDma *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispchndmaMapTo(struct DispChannelDma *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispchndmaUnmapFrom(struct DispChannelDma *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_dispchndmaGetRefCount(struct DispChannelDma *pResource); // this +void __nvoc_up_thunk_RsResource_dispchndmaAddAdditionalDependants(struct RsClient *pClient, struct DispChannelDma *pResource, RsResourceRef *pReference); // this +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_dispchndmaGetNotificationListPtr(struct DispChannelDma *pNotifier); // this +struct NotifShare * __nvoc_up_thunk_Notifier_dispchndmaGetNotificationShare(struct DispChannelDma *pNotifier); // this +void __nvoc_up_thunk_Notifier_dispchndmaSetNotificationShare(struct DispChannelDma *pNotifier, struct NotifShare *pNotifShare); // this +NV_STATUS __nvoc_up_thunk_Notifier_dispchndmaUnregisterEvent(struct DispChannelDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // this +NV_STATUS __nvoc_up_thunk_Notifier_dispchndmaGetOrAllocNotifShare(struct DispChannelDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannelDma = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispChannelDma), + /*classId=*/ classId(DispChannelDma), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispChannelDma", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispChannelDma, + /*pCastInfo=*/ &__nvoc_castinfo__DispChannelDma, + /*pExportInfo=*/ &__nvoc_export_info__DispChannelDma +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__DispChannelDma __nvoc_metadata__DispChannelDma = { + .rtti.pClassDef = &__nvoc_class_def_DispChannelDma, // (dispchndma) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispChannelDma, + .rtti.offset = 0, + .metadata__DispChannel.rtti.pClassDef = &__nvoc_class_def_DispChannel, // (dispchn) super + .metadata__DispChannel.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispChannel.rtti.offset = NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel), + .metadata__DispChannel.metadata__GpuResource.rtti.pClassDef = &__nvoc_class_def_GpuResource, // (gpures) super^2 + .metadata__DispChannel.metadata__GpuResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispChannel.metadata__GpuResource.rtti.offset = NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource), + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super^3 + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.rtti.offset = NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource), + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^4 + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^5 + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^4 + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + .metadata__DispChannel.metadata__Notifier.rtti.pClassDef = &__nvoc_class_def_Notifier, // (notify) super^2 + .metadata__DispChannel.metadata__Notifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispChannel.metadata__Notifier.rtti.offset = NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_Notifier), + .metadata__DispChannel.metadata__Notifier.metadata__INotifier.rtti.pClassDef = &__nvoc_class_def_INotifier, // (inotify) super^3 + .metadata__DispChannel.metadata__Notifier.metadata__INotifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispChannel.metadata__Notifier.metadata__INotifier.rtti.offset = NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_base_INotifier), + + .vtable.__dispchndmaGetRegBaseOffsetAndSize__ = &__nvoc_up_thunk_DispChannel_dispchndmaGetRegBaseOffsetAndSize, // virtual inherited (dispchn) base (dispchn) + .metadata__DispChannel.vtable.__dispchnGetRegBaseOffsetAndSize__ = &dispchnGetRegBaseOffsetAndSize_IMPL, // virtual override (gpures) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresGetRegBaseOffsetAndSize__ = &__nvoc_down_thunk_DispChannel_gpuresGetRegBaseOffsetAndSize, // virtual + .vtable.__dispchndmaControl__ = &__nvoc_up_thunk_GpuResource_dispchndmaControl, // virtual inherited (gpures) base (dispchn) + .metadata__DispChannel.vtable.__dispchnControl__ = &__nvoc_up_thunk_GpuResource_dispchnControl, // virtual inherited (gpures) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresControl__ = &gpuresControl_IMPL, // virtual override (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_GpuResource_resControl, // virtual + .vtable.__dispchndmaMap__ = &__nvoc_up_thunk_GpuResource_dispchndmaMap, // virtual inherited (gpures) base (dispchn) + .metadata__DispChannel.vtable.__dispchnMap__ = &__nvoc_up_thunk_GpuResource_dispchnMap, // virtual inherited (gpures) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresMap__ = &gpuresMap_IMPL, // virtual override (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &__nvoc_down_thunk_GpuResource_resMap, // virtual + .vtable.__dispchndmaUnmap__ = &__nvoc_up_thunk_GpuResource_dispchndmaUnmap, // virtual inherited (gpures) base (dispchn) + .metadata__DispChannel.vtable.__dispchnUnmap__ = &__nvoc_up_thunk_GpuResource_dispchnUnmap, // virtual inherited (gpures) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresUnmap__ = &gpuresUnmap_IMPL, // virtual override (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &__nvoc_down_thunk_GpuResource_resUnmap, // virtual + .vtable.__dispchndmaShareCallback__ = &__nvoc_up_thunk_GpuResource_dispchndmaShareCallback, // virtual inherited (gpures) base (dispchn) + .metadata__DispChannel.vtable.__dispchnShareCallback__ = &__nvoc_up_thunk_GpuResource_dispchnShareCallback, // virtual inherited (gpures) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresShareCallback__ = &gpuresShareCallback_IMPL, // virtual override (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresShareCallback__ = &__nvoc_down_thunk_GpuResource_rmresShareCallback, // virtual override (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__dispchndmaGetMapAddrSpace__ = &__nvoc_up_thunk_GpuResource_dispchndmaGetMapAddrSpace, // virtual inherited (gpures) base (dispchn) + .metadata__DispChannel.vtable.__dispchnGetMapAddrSpace__ = &__nvoc_up_thunk_GpuResource_dispchnGetMapAddrSpace, // virtual inherited (gpures) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresGetMapAddrSpace__ = &gpuresGetMapAddrSpace_IMPL, // virtual + .vtable.__dispchndmaInternalControlForward__ = &__nvoc_up_thunk_GpuResource_dispchndmaInternalControlForward, // virtual inherited (gpures) base (dispchn) + .metadata__DispChannel.vtable.__dispchnInternalControlForward__ = &__nvoc_up_thunk_GpuResource_dispchnInternalControlForward, // virtual inherited (gpures) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresInternalControlForward__ = &gpuresInternalControlForward_IMPL, // virtual + .vtable.__dispchndmaGetInternalObjectHandle__ = &__nvoc_up_thunk_GpuResource_dispchndmaGetInternalObjectHandle, // virtual inherited (gpures) base (dispchn) + .metadata__DispChannel.vtable.__dispchnGetInternalObjectHandle__ = &__nvoc_up_thunk_GpuResource_dispchnGetInternalObjectHandle, // virtual inherited (gpures) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresGetInternalObjectHandle__ = &gpuresGetInternalObjectHandle_IMPL, // virtual + .vtable.__dispchndmaAccessCallback__ = &__nvoc_up_thunk_RmResource_dispchndmaAccessCallback, // virtual inherited (rmres) base (dispchn) + .metadata__DispChannel.vtable.__dispchnAccessCallback__ = &__nvoc_up_thunk_RmResource_dispchnAccessCallback, // virtual inherited (rmres) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresAccessCallback__ = &__nvoc_up_thunk_RmResource_gpuresAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__dispchndmaGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_dispchndmaGetMemInterMapParams, // virtual inherited (rmres) base (dispchn) + .metadata__DispChannel.vtable.__dispchnGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_dispchnGetMemInterMapParams, // virtual inherited (rmres) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__dispchndmaCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_dispchndmaCheckMemInterUnmap, // virtual inherited (rmres) base (dispchn) + .metadata__DispChannel.vtable.__dispchnCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_dispchnCheckMemInterUnmap, // virtual inherited (rmres) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__dispchndmaGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_dispchndmaGetMemoryMappingDescriptor, // virtual inherited (rmres) base (dispchn) + .metadata__DispChannel.vtable.__dispchnGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_dispchnGetMemoryMappingDescriptor, // virtual inherited (rmres) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__dispchndmaControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_dispchndmaControlSerialization_Prologue, // virtual inherited (rmres) base (dispchn) + .metadata__DispChannel.vtable.__dispchnControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_dispchnControlSerialization_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__dispchndmaControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_dispchndmaControlSerialization_Epilogue, // virtual inherited (rmres) base (dispchn) + .metadata__DispChannel.vtable.__dispchnControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_dispchnControlSerialization_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__dispchndmaControl_Prologue__ = &__nvoc_up_thunk_RmResource_dispchndmaControl_Prologue, // virtual inherited (rmres) base (dispchn) + .metadata__DispChannel.vtable.__dispchnControl_Prologue__ = &__nvoc_up_thunk_RmResource_dispchnControl_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresControl_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__dispchndmaControl_Epilogue__ = &__nvoc_up_thunk_RmResource_dispchndmaControl_Epilogue, // virtual inherited (rmres) base (dispchn) + .metadata__DispChannel.vtable.__dispchnControl_Epilogue__ = &__nvoc_up_thunk_RmResource_dispchnControl_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresControl_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__dispchndmaCanCopy__ = &__nvoc_up_thunk_RsResource_dispchndmaCanCopy, // virtual inherited (res) base (dispchn) + .metadata__DispChannel.vtable.__dispchnCanCopy__ = &__nvoc_up_thunk_RsResource_dispchnCanCopy, // virtual inherited (res) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresCanCopy__ = &__nvoc_up_thunk_RsResource_gpuresCanCopy, // virtual inherited (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__dispchndmaIsDuplicate__ = &__nvoc_up_thunk_RsResource_dispchndmaIsDuplicate, // virtual inherited (res) base (dispchn) + .metadata__DispChannel.vtable.__dispchnIsDuplicate__ = &__nvoc_up_thunk_RsResource_dispchnIsDuplicate, // virtual inherited (res) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresIsDuplicate__ = &__nvoc_up_thunk_RsResource_gpuresIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__dispchndmaPreDestruct__ = &__nvoc_up_thunk_RsResource_dispchndmaPreDestruct, // virtual inherited (res) base (dispchn) + .metadata__DispChannel.vtable.__dispchnPreDestruct__ = &__nvoc_up_thunk_RsResource_dispchnPreDestruct, // virtual inherited (res) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresPreDestruct__ = &__nvoc_up_thunk_RsResource_gpuresPreDestruct, // virtual inherited (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__dispchndmaControlFilter__ = &__nvoc_up_thunk_RsResource_dispchndmaControlFilter, // virtual inherited (res) base (dispchn) + .metadata__DispChannel.vtable.__dispchnControlFilter__ = &__nvoc_up_thunk_RsResource_dispchnControlFilter, // virtual inherited (res) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresControlFilter__ = &__nvoc_up_thunk_RsResource_gpuresControlFilter, // virtual inherited (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__dispchndmaIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_dispchndmaIsPartialUnmapSupported, // inline virtual inherited (res) base (dispchn) body + .metadata__DispChannel.vtable.__dispchnIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_dispchnIsPartialUnmapSupported, // inline virtual inherited (res) base (gpures) body + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__dispchndmaMapTo__ = &__nvoc_up_thunk_RsResource_dispchndmaMapTo, // virtual inherited (res) base (dispchn) + .metadata__DispChannel.vtable.__dispchnMapTo__ = &__nvoc_up_thunk_RsResource_dispchnMapTo, // virtual inherited (res) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresMapTo__ = &__nvoc_up_thunk_RsResource_gpuresMapTo, // virtual inherited (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__dispchndmaUnmapFrom__ = &__nvoc_up_thunk_RsResource_dispchndmaUnmapFrom, // virtual inherited (res) base (dispchn) + .metadata__DispChannel.vtable.__dispchnUnmapFrom__ = &__nvoc_up_thunk_RsResource_dispchnUnmapFrom, // virtual inherited (res) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresUnmapFrom__ = &__nvoc_up_thunk_RsResource_gpuresUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__dispchndmaGetRefCount__ = &__nvoc_up_thunk_RsResource_dispchndmaGetRefCount, // virtual inherited (res) base (dispchn) + .metadata__DispChannel.vtable.__dispchnGetRefCount__ = &__nvoc_up_thunk_RsResource_dispchnGetRefCount, // virtual inherited (res) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresGetRefCount__ = &__nvoc_up_thunk_RsResource_gpuresGetRefCount, // virtual inherited (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__dispchndmaAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_dispchndmaAddAdditionalDependants, // virtual inherited (res) base (dispchn) + .metadata__DispChannel.vtable.__dispchnAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_dispchnAddAdditionalDependants, // virtual inherited (res) base (gpures) + .metadata__DispChannel.metadata__GpuResource.vtable.__gpuresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual + .vtable.__dispchndmaGetNotificationListPtr__ = &__nvoc_up_thunk_Notifier_dispchndmaGetNotificationListPtr, // virtual inherited (notify) base (dispchn) + .metadata__DispChannel.vtable.__dispchnGetNotificationListPtr__ = &__nvoc_up_thunk_Notifier_dispchnGetNotificationListPtr, // virtual inherited (notify) base (notify) + .metadata__DispChannel.metadata__Notifier.vtable.__notifyGetNotificationListPtr__ = ¬ifyGetNotificationListPtr_IMPL, // virtual override (inotify) base (inotify) + .metadata__DispChannel.metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationListPtr__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr, // pure virtual + .vtable.__dispchndmaGetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispchndmaGetNotificationShare, // virtual inherited (notify) base (dispchn) + .metadata__DispChannel.vtable.__dispchnGetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispchnGetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__DispChannel.metadata__Notifier.vtable.__notifyGetNotificationShare__ = ¬ifyGetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__DispChannel.metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationShare, // pure virtual + .vtable.__dispchndmaSetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispchndmaSetNotificationShare, // virtual inherited (notify) base (dispchn) + .metadata__DispChannel.vtable.__dispchnSetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispchnSetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__DispChannel.metadata__Notifier.vtable.__notifySetNotificationShare__ = ¬ifySetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__DispChannel.metadata__Notifier.metadata__INotifier.vtable.__inotifySetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifySetNotificationShare, // pure virtual + .vtable.__dispchndmaUnregisterEvent__ = &__nvoc_up_thunk_Notifier_dispchndmaUnregisterEvent, // virtual inherited (notify) base (dispchn) + .metadata__DispChannel.vtable.__dispchnUnregisterEvent__ = &__nvoc_up_thunk_Notifier_dispchnUnregisterEvent, // virtual inherited (notify) base (notify) + .metadata__DispChannel.metadata__Notifier.vtable.__notifyUnregisterEvent__ = ¬ifyUnregisterEvent_IMPL, // virtual override (inotify) base (inotify) + .metadata__DispChannel.metadata__Notifier.metadata__INotifier.vtable.__inotifyUnregisterEvent__ = &__nvoc_down_thunk_Notifier_inotifyUnregisterEvent, // pure virtual + .vtable.__dispchndmaGetOrAllocNotifShare__ = &__nvoc_up_thunk_Notifier_dispchndmaGetOrAllocNotifShare, // virtual inherited (notify) base (dispchn) + .metadata__DispChannel.vtable.__dispchnGetOrAllocNotifShare__ = &__nvoc_up_thunk_Notifier_dispchnGetOrAllocNotifShare, // virtual inherited (notify) base (notify) + .metadata__DispChannel.metadata__Notifier.vtable.__notifyGetOrAllocNotifShare__ = ¬ifyGetOrAllocNotifShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__DispChannel.metadata__Notifier.metadata__INotifier.vtable.__inotifyGetOrAllocNotifShare__ = &__nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare, // pure virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__DispChannelDma = { + .numRelatives = 9, + .relatives = { + &__nvoc_metadata__DispChannelDma.rtti, // [0]: (dispchndma) this + &__nvoc_metadata__DispChannelDma.metadata__DispChannel.rtti, // [1]: (dispchn) super + &__nvoc_metadata__DispChannelDma.metadata__DispChannel.metadata__GpuResource.rtti, // [2]: (gpures) super^2 + &__nvoc_metadata__DispChannelDma.metadata__DispChannel.metadata__GpuResource.metadata__RmResource.rtti, // [3]: (rmres) super^3 + &__nvoc_metadata__DispChannelDma.metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti, // [4]: (res) super^4 + &__nvoc_metadata__DispChannelDma.metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [5]: (obj) super^5 + &__nvoc_metadata__DispChannelDma.metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti, // [6]: (rmrescmn) super^4 + &__nvoc_metadata__DispChannelDma.metadata__DispChannel.metadata__Notifier.rtti, // [7]: (notify) super^2 + &__nvoc_metadata__DispChannelDma.metadata__DispChannel.metadata__Notifier.metadata__INotifier.rtti, // [8]: (inotify) super^3 + } +}; + +// 30 up-thunk(s) defined to bridge methods in DispChannelDma to superclasses + +// dispchndmaGetRegBaseOffsetAndSize: virtual inherited (dispchn) base (dispchn) +NV_STATUS __nvoc_up_thunk_DispChannel_dispchndmaGetRegBaseOffsetAndSize(struct DispChannelDma *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return dispchnGetRegBaseOffsetAndSize((struct DispChannel *)(((unsigned char *) pDispChannel) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel)), pGpu, pOffset, pSize); +} + +// dispchndmaControl: virtual inherited (gpures) base (dispchn) +NV_STATUS __nvoc_up_thunk_GpuResource_dispchndmaControl(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource)), pCallContext, pParams); +} + +// dispchndmaMap: virtual inherited (gpures) base (dispchn) +NV_STATUS __nvoc_up_thunk_GpuResource_dispchndmaMap(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource)), pCallContext, pParams, pCpuMapping); +} + +// dispchndmaUnmap: virtual inherited (gpures) base (dispchn) +NV_STATUS __nvoc_up_thunk_GpuResource_dispchndmaUnmap(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource)), pCallContext, pCpuMapping); +} + +// dispchndmaShareCallback: virtual inherited (gpures) base (dispchn) +NvBool __nvoc_up_thunk_GpuResource_dispchndmaShareCallback(struct DispChannelDma *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// dispchndmaGetMapAddrSpace: virtual inherited (gpures) base (dispchn) +NV_STATUS __nvoc_up_thunk_GpuResource_dispchndmaGetMapAddrSpace(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource)), pCallContext, mapFlags, pAddrSpace); +} + +// dispchndmaInternalControlForward: virtual inherited (gpures) base (dispchn) +NV_STATUS __nvoc_up_thunk_GpuResource_dispchndmaInternalControlForward(struct DispChannelDma *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource)), command, pParams, size); +} + +// dispchndmaGetInternalObjectHandle: virtual inherited (gpures) base (dispchn) +NvHandle __nvoc_up_thunk_GpuResource_dispchndmaGetInternalObjectHandle(struct DispChannelDma *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource))); +} + +// dispchndmaAccessCallback: virtual inherited (rmres) base (dispchn) +NvBool __nvoc_up_thunk_RmResource_dispchndmaAccessCallback(struct DispChannelDma *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// dispchndmaGetMemInterMapParams: virtual inherited (rmres) base (dispchn) +NV_STATUS __nvoc_up_thunk_RmResource_dispchndmaGetMemInterMapParams(struct DispChannelDma *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource)), pParams); +} + +// dispchndmaCheckMemInterUnmap: virtual inherited (rmres) base (dispchn) +NV_STATUS __nvoc_up_thunk_RmResource_dispchndmaCheckMemInterUnmap(struct DispChannelDma *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// dispchndmaGetMemoryMappingDescriptor: virtual inherited (rmres) base (dispchn) +NV_STATUS __nvoc_up_thunk_RmResource_dispchndmaGetMemoryMappingDescriptor(struct DispChannelDma *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource)), ppMemDesc); +} + +// dispchndmaControlSerialization_Prologue: virtual inherited (rmres) base (dispchn) +NV_STATUS __nvoc_up_thunk_RmResource_dispchndmaControlSerialization_Prologue(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispchndmaControlSerialization_Epilogue: virtual inherited (rmres) base (dispchn) +void __nvoc_up_thunk_RmResource_dispchndmaControlSerialization_Epilogue(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispchndmaControl_Prologue: virtual inherited (rmres) base (dispchn) +NV_STATUS __nvoc_up_thunk_RmResource_dispchndmaControl_Prologue(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispchndmaControl_Epilogue: virtual inherited (rmres) base (dispchn) +void __nvoc_up_thunk_RmResource_dispchndmaControl_Epilogue(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispchndmaCanCopy: virtual inherited (res) base (dispchn) +NvBool __nvoc_up_thunk_RsResource_dispchndmaCanCopy(struct DispChannelDma *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispchndmaIsDuplicate: virtual inherited (res) base (dispchn) +NV_STATUS __nvoc_up_thunk_RsResource_dispchndmaIsDuplicate(struct DispChannelDma *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// dispchndmaPreDestruct: virtual inherited (res) base (dispchn) +void __nvoc_up_thunk_RsResource_dispchndmaPreDestruct(struct DispChannelDma *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispchndmaControlFilter: virtual inherited (res) base (dispchn) +NV_STATUS __nvoc_up_thunk_RsResource_dispchndmaControlFilter(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// dispchndmaIsPartialUnmapSupported: inline virtual inherited (res) base (dispchn) body +NvBool __nvoc_up_thunk_RsResource_dispchndmaIsPartialUnmapSupported(struct DispChannelDma *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispchndmaMapTo: virtual inherited (res) base (dispchn) +NV_STATUS __nvoc_up_thunk_RsResource_dispchndmaMapTo(struct DispChannelDma *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// dispchndmaUnmapFrom: virtual inherited (res) base (dispchn) +NV_STATUS __nvoc_up_thunk_RsResource_dispchndmaUnmapFrom(struct DispChannelDma *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// dispchndmaGetRefCount: virtual inherited (res) base (dispchn) +NvU32 __nvoc_up_thunk_RsResource_dispchndmaGetRefCount(struct DispChannelDma *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispchndmaAddAdditionalDependants: virtual inherited (res) base (dispchn) +void __nvoc_up_thunk_RsResource_dispchndmaAddAdditionalDependants(struct RsClient *pClient, struct DispChannelDma *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + +// dispchndmaGetNotificationListPtr: virtual inherited (notify) base (dispchn) +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_dispchndmaGetNotificationListPtr(struct DispChannelDma *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_Notifier))); +} + +// dispchndmaGetNotificationShare: virtual inherited (notify) base (dispchn) +struct NotifShare * __nvoc_up_thunk_Notifier_dispchndmaGetNotificationShare(struct DispChannelDma *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_Notifier))); +} + +// dispchndmaSetNotificationShare: virtual inherited (notify) base (dispchn) +void __nvoc_up_thunk_Notifier_dispchndmaSetNotificationShare(struct DispChannelDma *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_Notifier)), pNotifShare); +} + +// dispchndmaUnregisterEvent: virtual inherited (notify) base (dispchn) +NV_STATUS __nvoc_up_thunk_Notifier_dispchndmaUnregisterEvent(struct DispChannelDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_Notifier)), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +// dispchndmaGetOrAllocNotifShare: virtual inherited (notify) base (dispchn) +NV_STATUS __nvoc_up_thunk_Notifier_dispchndmaGetOrAllocNotifShare(struct DispChannelDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_Notifier)), hNotifierClient, hNotifierResource, ppNotifShare); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__DispChannelDma = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_DispChannel(DispChannel*); +void __nvoc_dtor_DispChannelDma(DispChannelDma *pThis) { + __nvoc_dtor_DispChannel(&pThis->__nvoc_base_DispChannel); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispChannelDma(DispChannelDma *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_DispChannel(DispChannel* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *, NvU32); +NV_STATUS __nvoc_ctor_DispChannelDma(DispChannelDma *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_DispChannel(&pThis->__nvoc_base_DispChannel, arg_pCallContext, arg_pParams, NV_TRUE); + if (status != NV_OK) goto __nvoc_ctor_DispChannelDma_fail_DispChannel; + __nvoc_init_dataField_DispChannelDma(pThis); + + status = __nvoc_dispchndmaConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispChannelDma_fail__init; + goto __nvoc_ctor_DispChannelDma_exit; // Success + +__nvoc_ctor_DispChannelDma_fail__init: + __nvoc_dtor_DispChannel(&pThis->__nvoc_base_DispChannel); +__nvoc_ctor_DispChannelDma_fail_DispChannel: +__nvoc_ctor_DispChannelDma_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_DispChannelDma_1(DispChannelDma *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_DispChannelDma_1 + + +// Initialize vtable(s) for 30 virtual method(s). +void __nvoc_init_funcTable_DispChannelDma(DispChannelDma *pThis) { + __nvoc_init_funcTable_DispChannelDma_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__DispChannelDma(DispChannelDma *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^5 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^4 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^4 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource; // (rmres) super^3 + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource; // (gpures) super^2 + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_base_INotifier; // (inotify) super^3 + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_DispChannel.__nvoc_base_Notifier; // (notify) super^2 + pThis->__nvoc_pbase_DispChannel = &pThis->__nvoc_base_DispChannel; // (dispchn) super + pThis->__nvoc_pbase_DispChannelDma = pThis; // (dispchndma) this + + // Recurse to superclass initialization function(s). + __nvoc_init__DispChannel(&pThis->__nvoc_base_DispChannel); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__DispChannelDma.metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^5 + pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__DispChannelDma.metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RsResource; // (res) super^4 + pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__DispChannelDma.metadata__DispChannel.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^4 + pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__DispChannelDma.metadata__DispChannel.metadata__GpuResource.metadata__RmResource; // (rmres) super^3 + pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_metadata_ptr = &__nvoc_metadata__DispChannelDma.metadata__DispChannel.metadata__GpuResource; // (gpures) super^2 + pThis->__nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_base_INotifier.__nvoc_metadata_ptr = &__nvoc_metadata__DispChannelDma.metadata__DispChannel.metadata__Notifier.metadata__INotifier; // (inotify) super^3 + pThis->__nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_metadata_ptr = &__nvoc_metadata__DispChannelDma.metadata__DispChannel.metadata__Notifier; // (notify) super^2 + pThis->__nvoc_base_DispChannel.__nvoc_metadata_ptr = &__nvoc_metadata__DispChannelDma.metadata__DispChannel; // (dispchn) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__DispChannelDma; // (dispchndma) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_DispChannelDma(pThis); +} + +NV_STATUS __nvoc_objCreate_DispChannelDma(DispChannelDma **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + DispChannelDma *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(DispChannelDma), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(DispChannelDma)); + + pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__DispChannelDma(pThis); + status = __nvoc_ctor_DispChannelDma(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DispChannelDma_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_DispChannelDma_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(DispChannelDma)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispChannelDma(DispChannelDma **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DispChannelDma(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_disp_channel_nvoc.h b/src/nvidia/generated/g_disp_channel_nvoc.h new file mode 100644 index 0000000..f32b28d --- /dev/null +++ b/src/nvidia/generated/g_disp_channel_nvoc.h @@ -0,0 +1,1059 @@ + +#ifndef _G_DISP_CHANNEL_NVOC_H_ +#define _G_DISP_CHANNEL_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing DispChannel and its derived classes. +* +******************************************************************************/ + +#pragma once +#include "g_disp_channel_nvoc.h" + +#ifndef DISP_CHANNEL_H +#define DISP_CHANNEL_H + +#include "gpu/gpu_resource.h" +#include "rmapi/event.h" + + +struct ContextDma; + +#ifndef __NVOC_CLASS_ContextDma_TYPEDEF__ +#define __NVOC_CLASS_ContextDma_TYPEDEF__ +typedef struct ContextDma ContextDma; +#endif /* __NVOC_CLASS_ContextDma_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ContextDma +#define __nvoc_class_id_ContextDma 0x88441b +#endif /* __nvoc_class_id_ContextDma */ + + + +struct DispObject; + +#ifndef __NVOC_CLASS_DispObject_TYPEDEF__ +#define __NVOC_CLASS_DispObject_TYPEDEF__ +typedef struct DispObject DispObject; +#endif /* __NVOC_CLASS_DispObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispObject +#define __nvoc_class_id_DispObject 0x999839 +#endif /* __nvoc_class_id_DispObject */ + + + +/*! + * Base class for display channels + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_DISP_CHANNEL_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__DispChannel; +struct NVOC_METADATA__GpuResource; +struct NVOC_METADATA__Notifier; +struct NVOC_VTABLE__DispChannel; + + +struct DispChannel { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__DispChannel *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct GpuResource __nvoc_base_GpuResource; + struct Notifier __nvoc_base_Notifier; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^4 + struct RsResource *__nvoc_pbase_RsResource; // res super^3 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^3 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^2 + struct GpuResource *__nvoc_pbase_GpuResource; // gpures super + struct INotifier *__nvoc_pbase_INotifier; // inotify super^2 + struct Notifier *__nvoc_pbase_Notifier; // notify super + struct DispChannel *__nvoc_pbase_DispChannel; // dispchn + + // Data members + struct DispObject *pDispObject; + NvU32 DispClass; + NvU32 InstanceNumber; + NvP64 pControl; + NvP64 pPriv; + NvU32 ControlOffset; + NvU32 ControlLength; + NvBool bIsDma; +}; + + +// Vtable with 30 per-class function pointers +struct NVOC_VTABLE__DispChannel { + NV_STATUS (*__dispchnGetRegBaseOffsetAndSize__)(struct DispChannel * /*this*/, struct OBJGPU *, NvU32 *, NvU32 *); // virtual override (gpures) base (gpures) + NV_STATUS (*__dispchnControl__)(struct DispChannel * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__dispchnMap__)(struct DispChannel * /*this*/, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__dispchnUnmap__)(struct DispChannel * /*this*/, struct CALL_CONTEXT *, struct RsCpuMapping *); // virtual inherited (gpures) base (gpures) + NvBool (*__dispchnShareCallback__)(struct DispChannel * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__dispchnGetMapAddrSpace__)(struct DispChannel * /*this*/, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__dispchnInternalControlForward__)(struct DispChannel * /*this*/, NvU32, void *, NvU32); // virtual inherited (gpures) base (gpures) + NvHandle (*__dispchnGetInternalObjectHandle__)(struct DispChannel * /*this*/); // virtual inherited (gpures) base (gpures) + NvBool (*__dispchnAccessCallback__)(struct DispChannel * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__dispchnGetMemInterMapParams__)(struct DispChannel * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__dispchnCheckMemInterUnmap__)(struct DispChannel * /*this*/, NvBool); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__dispchnGetMemoryMappingDescriptor__)(struct DispChannel * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__dispchnControlSerialization_Prologue__)(struct DispChannel * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + void (*__dispchnControlSerialization_Epilogue__)(struct DispChannel * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__dispchnControl_Prologue__)(struct DispChannel * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + void (*__dispchnControl_Epilogue__)(struct DispChannel * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + NvBool (*__dispchnCanCopy__)(struct DispChannel * /*this*/); // virtual inherited (res) base (gpures) + NV_STATUS (*__dispchnIsDuplicate__)(struct DispChannel * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (gpures) + void (*__dispchnPreDestruct__)(struct DispChannel * /*this*/); // virtual inherited (res) base (gpures) + NV_STATUS (*__dispchnControlFilter__)(struct DispChannel * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (gpures) + NvBool (*__dispchnIsPartialUnmapSupported__)(struct DispChannel * /*this*/); // inline virtual inherited (res) base (gpures) body + NV_STATUS (*__dispchnMapTo__)(struct DispChannel * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (gpures) + NV_STATUS (*__dispchnUnmapFrom__)(struct DispChannel * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (gpures) + NvU32 (*__dispchnGetRefCount__)(struct DispChannel * /*this*/); // virtual inherited (res) base (gpures) + void (*__dispchnAddAdditionalDependants__)(struct RsClient *, struct DispChannel * /*this*/, RsResourceRef *); // virtual inherited (res) base (gpures) + PEVENTNOTIFICATION * (*__dispchnGetNotificationListPtr__)(struct DispChannel * /*this*/); // virtual inherited (notify) base (notify) + struct NotifShare * (*__dispchnGetNotificationShare__)(struct DispChannel * /*this*/); // virtual inherited (notify) base (notify) + void (*__dispchnSetNotificationShare__)(struct DispChannel * /*this*/, struct NotifShare *); // virtual inherited (notify) base (notify) + NV_STATUS (*__dispchnUnregisterEvent__)(struct DispChannel * /*this*/, NvHandle, NvHandle, NvHandle, NvHandle); // virtual inherited (notify) base (notify) + NV_STATUS (*__dispchnGetOrAllocNotifShare__)(struct DispChannel * /*this*/, NvHandle, NvHandle, struct NotifShare **); // virtual inherited (notify) base (notify) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__DispChannel { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__GpuResource metadata__GpuResource; + const struct NVOC_METADATA__Notifier metadata__Notifier; + const struct NVOC_VTABLE__DispChannel vtable; +}; + +#ifndef __NVOC_CLASS_DispChannel_TYPEDEF__ +#define __NVOC_CLASS_DispChannel_TYPEDEF__ +typedef struct DispChannel DispChannel; +#endif /* __NVOC_CLASS_DispChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannel +#define __nvoc_class_id_DispChannel 0xbd2ff3 +#endif /* __nvoc_class_id_DispChannel */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannel; + +#define __staticCast_DispChannel(pThis) \ + ((pThis)->__nvoc_pbase_DispChannel) + +#ifdef __nvoc_disp_channel_h_disabled +#define __dynamicCast_DispChannel(pThis) ((DispChannel*) NULL) +#else //__nvoc_disp_channel_h_disabled +#define __dynamicCast_DispChannel(pThis) \ + ((DispChannel*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispChannel))) +#endif //__nvoc_disp_channel_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_DispChannel(DispChannel**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispChannel(DispChannel**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams, NvU32 arg_isDma); +#define __objCreate_DispChannel(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams, arg_isDma) \ + __nvoc_objCreate_DispChannel((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams, arg_isDma) + + +// Wrapper macros +#define dispchnGetRegBaseOffsetAndSize_FNPTR(pDispChannel) pDispChannel->__nvoc_metadata_ptr->vtable.__dispchnGetRegBaseOffsetAndSize__ +#define dispchnGetRegBaseOffsetAndSize(pDispChannel, pGpu, pOffset, pSize) dispchnGetRegBaseOffsetAndSize_DISPATCH(pDispChannel, pGpu, pOffset, pSize) +#define dispchnControl_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresControl__ +#define dispchnControl(pGpuResource, pCallContext, pParams) dispchnControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define dispchnMap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresMap__ +#define dispchnMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispchnMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define dispchnUnmap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresUnmap__ +#define dispchnUnmap(pGpuResource, pCallContext, pCpuMapping) dispchnUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define dispchnShareCallback_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresShareCallback__ +#define dispchnShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispchnShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispchnGetMapAddrSpace_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetMapAddrSpace__ +#define dispchnGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispchnGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define dispchnInternalControlForward_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresInternalControlForward__ +#define dispchnInternalControlForward(pGpuResource, command, pParams, size) dispchnInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define dispchnGetInternalObjectHandle_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetInternalObjectHandle__ +#define dispchnGetInternalObjectHandle(pGpuResource) dispchnGetInternalObjectHandle_DISPATCH(pGpuResource) +#define dispchnAccessCallback_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define dispchnAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispchnAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define dispchnGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define dispchnGetMemInterMapParams(pRmResource, pParams) dispchnGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispchnCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define dispchnCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispchnCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispchnGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define dispchnGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispchnGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispchnControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define dispchnControlSerialization_Prologue(pResource, pCallContext, pParams) dispchnControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispchnControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define dispchnControlSerialization_Epilogue(pResource, pCallContext, pParams) dispchnControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispchnControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define dispchnControl_Prologue(pResource, pCallContext, pParams) dispchnControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispchnControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define dispchnControl_Epilogue(pResource, pCallContext, pParams) dispchnControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispchnCanCopy_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define dispchnCanCopy(pResource) dispchnCanCopy_DISPATCH(pResource) +#define dispchnIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define dispchnIsDuplicate(pResource, hMemory, pDuplicate) dispchnIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define dispchnPreDestruct_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define dispchnPreDestruct(pResource) dispchnPreDestruct_DISPATCH(pResource) +#define dispchnControlFilter_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define dispchnControlFilter(pResource, pCallContext, pParams) dispchnControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispchnIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define dispchnIsPartialUnmapSupported(pResource) dispchnIsPartialUnmapSupported_DISPATCH(pResource) +#define dispchnMapTo_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define dispchnMapTo(pResource, pParams) dispchnMapTo_DISPATCH(pResource, pParams) +#define dispchnUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define dispchnUnmapFrom(pResource, pParams) dispchnUnmapFrom_DISPATCH(pResource, pParams) +#define dispchnGetRefCount_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define dispchnGetRefCount(pResource) dispchnGetRefCount_DISPATCH(pResource) +#define dispchnAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define dispchnAddAdditionalDependants(pClient, pResource, pReference) dispchnAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispchnGetNotificationListPtr_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationListPtr__ +#define dispchnGetNotificationListPtr(pNotifier) dispchnGetNotificationListPtr_DISPATCH(pNotifier) +#define dispchnGetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationShare__ +#define dispchnGetNotificationShare(pNotifier) dispchnGetNotificationShare_DISPATCH(pNotifier) +#define dispchnSetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifySetNotificationShare__ +#define dispchnSetNotificationShare(pNotifier, pNotifShare) dispchnSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define dispchnUnregisterEvent_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyUnregisterEvent__ +#define dispchnUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispchnUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define dispchnGetOrAllocNotifShare_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetOrAllocNotifShare__ +#define dispchnGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispchnGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) + +// Dispatch functions +static inline NV_STATUS dispchnGetRegBaseOffsetAndSize_DISPATCH(struct DispChannel *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pDispChannel->__nvoc_metadata_ptr->vtable.__dispchnGetRegBaseOffsetAndSize__(pDispChannel, pGpu, pOffset, pSize); +} + +static inline NV_STATUS dispchnControl_DISPATCH(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispchnControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchnMap_DISPATCH(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispchnMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS dispchnUnmap_DISPATCH(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispchnUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NvBool dispchnShareCallback_DISPATCH(struct DispChannel *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispchnShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispchnGetMapAddrSpace_DISPATCH(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispchnGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NV_STATUS dispchnInternalControlForward_DISPATCH(struct DispChannel *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispchnInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NvHandle dispchnGetInternalObjectHandle_DISPATCH(struct DispChannel *pGpuResource) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispchnGetInternalObjectHandle__(pGpuResource); +} + +static inline NvBool dispchnAccessCallback_DISPATCH(struct DispChannel *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchnAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS dispchnGetMemInterMapParams_DISPATCH(struct DispChannel *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispchnGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispchnCheckMemInterUnmap_DISPATCH(struct DispChannel *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispchnCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS dispchnGetMemoryMappingDescriptor_DISPATCH(struct DispChannel *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispchnGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispchnControlSerialization_Prologue_DISPATCH(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchnControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void dispchnControlSerialization_Epilogue_DISPATCH(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__dispchnControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchnControl_Prologue_DISPATCH(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchnControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void dispchnControl_Epilogue_DISPATCH(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__dispchnControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool dispchnCanCopy_DISPATCH(struct DispChannel *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchnCanCopy__(pResource); +} + +static inline NV_STATUS dispchnIsDuplicate_DISPATCH(struct DispChannel *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchnIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void dispchnPreDestruct_DISPATCH(struct DispChannel *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__dispchnPreDestruct__(pResource); +} + +static inline NV_STATUS dispchnControlFilter_DISPATCH(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchnControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvBool dispchnIsPartialUnmapSupported_DISPATCH(struct DispChannel *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchnIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS dispchnMapTo_DISPATCH(struct DispChannel *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchnMapTo__(pResource, pParams); +} + +static inline NV_STATUS dispchnUnmapFrom_DISPATCH(struct DispChannel *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchnUnmapFrom__(pResource, pParams); +} + +static inline NvU32 dispchnGetRefCount_DISPATCH(struct DispChannel *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchnGetRefCount__(pResource); +} + +static inline void dispchnAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispChannel *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__dispchnAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline PEVENTNOTIFICATION * dispchnGetNotificationListPtr_DISPATCH(struct DispChannel *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispchnGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare * dispchnGetNotificationShare_DISPATCH(struct DispChannel *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispchnGetNotificationShare__(pNotifier); +} + +static inline void dispchnSetNotificationShare_DISPATCH(struct DispChannel *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__nvoc_metadata_ptr->vtable.__dispchnSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS dispchnUnregisterEvent_DISPATCH(struct DispChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispchnUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS dispchnGetOrAllocNotifShare_DISPATCH(struct DispChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispchnGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS dispchnGetRegBaseOffsetAndSize_IMPL(struct DispChannel *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); + +NV_STATUS dispchnConstruct_IMPL(struct DispChannel *arg_pDispChannel, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams, NvU32 arg_isDma); + +#define __nvoc_dispchnConstruct(arg_pDispChannel, arg_pCallContext, arg_pParams, arg_isDma) dispchnConstruct_IMPL(arg_pDispChannel, arg_pCallContext, arg_pParams, arg_isDma) +void dispchnDestruct_IMPL(struct DispChannel *pDispChannel); + +#define __nvoc_dispchnDestruct(pDispChannel) dispchnDestruct_IMPL(pDispChannel) +void dispchnSetRegBaseOffsetAndSize_IMPL(struct DispChannel *pDispChannel, struct OBJGPU *pGpu); + +#ifdef __nvoc_disp_channel_h_disabled +static inline void dispchnSetRegBaseOffsetAndSize(struct DispChannel *pDispChannel, struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("DispChannel was disabled!"); +} +#else //__nvoc_disp_channel_h_disabled +#define dispchnSetRegBaseOffsetAndSize(pDispChannel, pGpu) dispchnSetRegBaseOffsetAndSize_IMPL(pDispChannel, pGpu) +#endif //__nvoc_disp_channel_h_disabled + +NV_STATUS dispchnGrabChannel_IMPL(struct DispChannel *pDispChannel, NvHandle hClient, NvHandle hParent, NvHandle hChannel, NvU32 hClass, void *pAllocParms); + +#ifdef __nvoc_disp_channel_h_disabled +static inline NV_STATUS dispchnGrabChannel(struct DispChannel *pDispChannel, NvHandle hClient, NvHandle hParent, NvHandle hChannel, NvU32 hClass, void *pAllocParms) { + NV_ASSERT_FAILED_PRECOMP("DispChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_channel_h_disabled +#define dispchnGrabChannel(pDispChannel, hClient, hParent, hChannel, hClass, pAllocParms) dispchnGrabChannel_IMPL(pDispChannel, hClient, hParent, hChannel, hClass, pAllocParms) +#endif //__nvoc_disp_channel_h_disabled + +NV_STATUS dispchnBindCtx_IMPL(struct OBJGPU *pGpu, struct ContextDma *pContextDma, NvHandle hDispChannel); + +#define dispchnBindCtx(pGpu, pContextDma, hDispChannel) dispchnBindCtx_IMPL(pGpu, pContextDma, hDispChannel) +NV_STATUS dispchnUnbindCtx_IMPL(struct OBJGPU *pGpu, struct ContextDma *pContextDma, NvHandle hDispChannel); + +#define dispchnUnbindCtx(pGpu, pContextDma, hDispChannel) dispchnUnbindCtx_IMPL(pGpu, pContextDma, hDispChannel) +void dispchnUnbindCtxFromAllChannels_IMPL(struct OBJGPU *pGpu, struct ContextDma *pContextDma); + +#define dispchnUnbindCtxFromAllChannels(pGpu, pContextDma) dispchnUnbindCtxFromAllChannels_IMPL(pGpu, pContextDma) +void dispchnUnbindAllCtx_IMPL(struct OBJGPU *pGpu, struct DispChannel *pDispChannel); + +#ifdef __nvoc_disp_channel_h_disabled +static inline void dispchnUnbindAllCtx(struct OBJGPU *pGpu, struct DispChannel *pDispChannel) { + NV_ASSERT_FAILED_PRECOMP("DispChannel was disabled!"); +} +#else //__nvoc_disp_channel_h_disabled +#define dispchnUnbindAllCtx(pGpu, pDispChannel) dispchnUnbindAllCtx_IMPL(pGpu, pDispChannel) +#endif //__nvoc_disp_channel_h_disabled + +NV_STATUS dispchnGetByHandle_IMPL(struct RsClient *pClient, NvHandle hDisplayChannel, struct DispChannel **ppDispChannel); + +#define dispchnGetByHandle(pClient, hDisplayChannel, ppDispChannel) dispchnGetByHandle_IMPL(pClient, hDisplayChannel, ppDispChannel) +#undef PRIVATE_FIELD + + +/*! + * RM internal class representing XXX_XXX_CHANNEL_PIO + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_DISP_CHANNEL_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__DispChannelPio; +struct NVOC_METADATA__DispChannel; +struct NVOC_VTABLE__DispChannelPio; + + +struct DispChannelPio { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__DispChannelPio *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct DispChannel __nvoc_base_DispChannel; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^5 + struct RsResource *__nvoc_pbase_RsResource; // res super^4 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^4 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^3 + struct GpuResource *__nvoc_pbase_GpuResource; // gpures super^2 + struct INotifier *__nvoc_pbase_INotifier; // inotify super^3 + struct Notifier *__nvoc_pbase_Notifier; // notify super^2 + struct DispChannel *__nvoc_pbase_DispChannel; // dispchn super + struct DispChannelPio *__nvoc_pbase_DispChannelPio; // dispchnpio +}; + + +// Vtable with 30 per-class function pointers +struct NVOC_VTABLE__DispChannelPio { + NV_STATUS (*__dispchnpioGetRegBaseOffsetAndSize__)(struct DispChannelPio * /*this*/, struct OBJGPU *, NvU32 *, NvU32 *); // virtual inherited (dispchn) base (dispchn) + NV_STATUS (*__dispchnpioControl__)(struct DispChannelPio * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (gpures) base (dispchn) + NV_STATUS (*__dispchnpioMap__)(struct DispChannelPio * /*this*/, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); // virtual inherited (gpures) base (dispchn) + NV_STATUS (*__dispchnpioUnmap__)(struct DispChannelPio * /*this*/, struct CALL_CONTEXT *, struct RsCpuMapping *); // virtual inherited (gpures) base (dispchn) + NvBool (*__dispchnpioShareCallback__)(struct DispChannelPio * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (gpures) base (dispchn) + NV_STATUS (*__dispchnpioGetMapAddrSpace__)(struct DispChannelPio * /*this*/, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); // virtual inherited (gpures) base (dispchn) + NV_STATUS (*__dispchnpioInternalControlForward__)(struct DispChannelPio * /*this*/, NvU32, void *, NvU32); // virtual inherited (gpures) base (dispchn) + NvHandle (*__dispchnpioGetInternalObjectHandle__)(struct DispChannelPio * /*this*/); // virtual inherited (gpures) base (dispchn) + NvBool (*__dispchnpioAccessCallback__)(struct DispChannelPio * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (dispchn) + NV_STATUS (*__dispchnpioGetMemInterMapParams__)(struct DispChannelPio * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (dispchn) + NV_STATUS (*__dispchnpioCheckMemInterUnmap__)(struct DispChannelPio * /*this*/, NvBool); // virtual inherited (rmres) base (dispchn) + NV_STATUS (*__dispchnpioGetMemoryMappingDescriptor__)(struct DispChannelPio * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (dispchn) + NV_STATUS (*__dispchnpioControlSerialization_Prologue__)(struct DispChannelPio * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (dispchn) + void (*__dispchnpioControlSerialization_Epilogue__)(struct DispChannelPio * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (dispchn) + NV_STATUS (*__dispchnpioControl_Prologue__)(struct DispChannelPio * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (dispchn) + void (*__dispchnpioControl_Epilogue__)(struct DispChannelPio * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (dispchn) + NvBool (*__dispchnpioCanCopy__)(struct DispChannelPio * /*this*/); // virtual inherited (res) base (dispchn) + NV_STATUS (*__dispchnpioIsDuplicate__)(struct DispChannelPio * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (dispchn) + void (*__dispchnpioPreDestruct__)(struct DispChannelPio * /*this*/); // virtual inherited (res) base (dispchn) + NV_STATUS (*__dispchnpioControlFilter__)(struct DispChannelPio * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (dispchn) + NvBool (*__dispchnpioIsPartialUnmapSupported__)(struct DispChannelPio * /*this*/); // inline virtual inherited (res) base (dispchn) body + NV_STATUS (*__dispchnpioMapTo__)(struct DispChannelPio * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (dispchn) + NV_STATUS (*__dispchnpioUnmapFrom__)(struct DispChannelPio * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (dispchn) + NvU32 (*__dispchnpioGetRefCount__)(struct DispChannelPio * /*this*/); // virtual inherited (res) base (dispchn) + void (*__dispchnpioAddAdditionalDependants__)(struct RsClient *, struct DispChannelPio * /*this*/, RsResourceRef *); // virtual inherited (res) base (dispchn) + PEVENTNOTIFICATION * (*__dispchnpioGetNotificationListPtr__)(struct DispChannelPio * /*this*/); // virtual inherited (notify) base (dispchn) + struct NotifShare * (*__dispchnpioGetNotificationShare__)(struct DispChannelPio * /*this*/); // virtual inherited (notify) base (dispchn) + void (*__dispchnpioSetNotificationShare__)(struct DispChannelPio * /*this*/, struct NotifShare *); // virtual inherited (notify) base (dispchn) + NV_STATUS (*__dispchnpioUnregisterEvent__)(struct DispChannelPio * /*this*/, NvHandle, NvHandle, NvHandle, NvHandle); // virtual inherited (notify) base (dispchn) + NV_STATUS (*__dispchnpioGetOrAllocNotifShare__)(struct DispChannelPio * /*this*/, NvHandle, NvHandle, struct NotifShare **); // virtual inherited (notify) base (dispchn) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__DispChannelPio { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__DispChannel metadata__DispChannel; + const struct NVOC_VTABLE__DispChannelPio vtable; +}; + +#ifndef __NVOC_CLASS_DispChannelPio_TYPEDEF__ +#define __NVOC_CLASS_DispChannelPio_TYPEDEF__ +typedef struct DispChannelPio DispChannelPio; +#endif /* __NVOC_CLASS_DispChannelPio_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannelPio +#define __nvoc_class_id_DispChannelPio 0x10dec3 +#endif /* __nvoc_class_id_DispChannelPio */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannelPio; + +#define __staticCast_DispChannelPio(pThis) \ + ((pThis)->__nvoc_pbase_DispChannelPio) + +#ifdef __nvoc_disp_channel_h_disabled +#define __dynamicCast_DispChannelPio(pThis) ((DispChannelPio*) NULL) +#else //__nvoc_disp_channel_h_disabled +#define __dynamicCast_DispChannelPio(pThis) \ + ((DispChannelPio*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispChannelPio))) +#endif //__nvoc_disp_channel_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_DispChannelPio(DispChannelPio**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispChannelPio(DispChannelPio**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_DispChannelPio(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DispChannelPio((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define dispchnpioGetRegBaseOffsetAndSize_FNPTR(pDispChannel) pDispChannel->__nvoc_base_DispChannel.__nvoc_metadata_ptr->vtable.__dispchnGetRegBaseOffsetAndSize__ +#define dispchnpioGetRegBaseOffsetAndSize(pDispChannel, pGpu, pOffset, pSize) dispchnpioGetRegBaseOffsetAndSize_DISPATCH(pDispChannel, pGpu, pOffset, pSize) +#define dispchnpioControl_FNPTR(pGpuResource) pGpuResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresControl__ +#define dispchnpioControl(pGpuResource, pCallContext, pParams) dispchnpioControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define dispchnpioMap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresMap__ +#define dispchnpioMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispchnpioMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define dispchnpioUnmap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresUnmap__ +#define dispchnpioUnmap(pGpuResource, pCallContext, pCpuMapping) dispchnpioUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define dispchnpioShareCallback_FNPTR(pGpuResource) pGpuResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresShareCallback__ +#define dispchnpioShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispchnpioShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispchnpioGetMapAddrSpace_FNPTR(pGpuResource) pGpuResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetMapAddrSpace__ +#define dispchnpioGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispchnpioGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define dispchnpioInternalControlForward_FNPTR(pGpuResource) pGpuResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresInternalControlForward__ +#define dispchnpioInternalControlForward(pGpuResource, command, pParams, size) dispchnpioInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define dispchnpioGetInternalObjectHandle_FNPTR(pGpuResource) pGpuResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetInternalObjectHandle__ +#define dispchnpioGetInternalObjectHandle(pGpuResource) dispchnpioGetInternalObjectHandle_DISPATCH(pGpuResource) +#define dispchnpioAccessCallback_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define dispchnpioAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispchnpioAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define dispchnpioGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define dispchnpioGetMemInterMapParams(pRmResource, pParams) dispchnpioGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispchnpioCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define dispchnpioCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispchnpioCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispchnpioGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define dispchnpioGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispchnpioGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispchnpioControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define dispchnpioControlSerialization_Prologue(pResource, pCallContext, pParams) dispchnpioControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispchnpioControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define dispchnpioControlSerialization_Epilogue(pResource, pCallContext, pParams) dispchnpioControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispchnpioControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define dispchnpioControl_Prologue(pResource, pCallContext, pParams) dispchnpioControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispchnpioControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define dispchnpioControl_Epilogue(pResource, pCallContext, pParams) dispchnpioControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispchnpioCanCopy_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define dispchnpioCanCopy(pResource) dispchnpioCanCopy_DISPATCH(pResource) +#define dispchnpioIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define dispchnpioIsDuplicate(pResource, hMemory, pDuplicate) dispchnpioIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define dispchnpioPreDestruct_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define dispchnpioPreDestruct(pResource) dispchnpioPreDestruct_DISPATCH(pResource) +#define dispchnpioControlFilter_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define dispchnpioControlFilter(pResource, pCallContext, pParams) dispchnpioControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispchnpioIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define dispchnpioIsPartialUnmapSupported(pResource) dispchnpioIsPartialUnmapSupported_DISPATCH(pResource) +#define dispchnpioMapTo_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define dispchnpioMapTo(pResource, pParams) dispchnpioMapTo_DISPATCH(pResource, pParams) +#define dispchnpioUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define dispchnpioUnmapFrom(pResource, pParams) dispchnpioUnmapFrom_DISPATCH(pResource, pParams) +#define dispchnpioGetRefCount_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define dispchnpioGetRefCount(pResource) dispchnpioGetRefCount_DISPATCH(pResource) +#define dispchnpioAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define dispchnpioAddAdditionalDependants(pClient, pResource, pReference) dispchnpioAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispchnpioGetNotificationListPtr_FNPTR(pNotifier) pNotifier->__nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationListPtr__ +#define dispchnpioGetNotificationListPtr(pNotifier) dispchnpioGetNotificationListPtr_DISPATCH(pNotifier) +#define dispchnpioGetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationShare__ +#define dispchnpioGetNotificationShare(pNotifier) dispchnpioGetNotificationShare_DISPATCH(pNotifier) +#define dispchnpioSetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifySetNotificationShare__ +#define dispchnpioSetNotificationShare(pNotifier, pNotifShare) dispchnpioSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define dispchnpioUnregisterEvent_FNPTR(pNotifier) pNotifier->__nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyUnregisterEvent__ +#define dispchnpioUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispchnpioUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define dispchnpioGetOrAllocNotifShare_FNPTR(pNotifier) pNotifier->__nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetOrAllocNotifShare__ +#define dispchnpioGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispchnpioGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) + +// Dispatch functions +static inline NV_STATUS dispchnpioGetRegBaseOffsetAndSize_DISPATCH(struct DispChannelPio *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pDispChannel->__nvoc_metadata_ptr->vtable.__dispchnpioGetRegBaseOffsetAndSize__(pDispChannel, pGpu, pOffset, pSize); +} + +static inline NV_STATUS dispchnpioControl_DISPATCH(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispchnpioControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchnpioMap_DISPATCH(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispchnpioMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS dispchnpioUnmap_DISPATCH(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispchnpioUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NvBool dispchnpioShareCallback_DISPATCH(struct DispChannelPio *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispchnpioShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispchnpioGetMapAddrSpace_DISPATCH(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispchnpioGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NV_STATUS dispchnpioInternalControlForward_DISPATCH(struct DispChannelPio *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispchnpioInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NvHandle dispchnpioGetInternalObjectHandle_DISPATCH(struct DispChannelPio *pGpuResource) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispchnpioGetInternalObjectHandle__(pGpuResource); +} + +static inline NvBool dispchnpioAccessCallback_DISPATCH(struct DispChannelPio *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchnpioAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS dispchnpioGetMemInterMapParams_DISPATCH(struct DispChannelPio *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispchnpioGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispchnpioCheckMemInterUnmap_DISPATCH(struct DispChannelPio *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispchnpioCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS dispchnpioGetMemoryMappingDescriptor_DISPATCH(struct DispChannelPio *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispchnpioGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispchnpioControlSerialization_Prologue_DISPATCH(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchnpioControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void dispchnpioControlSerialization_Epilogue_DISPATCH(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__dispchnpioControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchnpioControl_Prologue_DISPATCH(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchnpioControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void dispchnpioControl_Epilogue_DISPATCH(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__dispchnpioControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool dispchnpioCanCopy_DISPATCH(struct DispChannelPio *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchnpioCanCopy__(pResource); +} + +static inline NV_STATUS dispchnpioIsDuplicate_DISPATCH(struct DispChannelPio *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchnpioIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void dispchnpioPreDestruct_DISPATCH(struct DispChannelPio *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__dispchnpioPreDestruct__(pResource); +} + +static inline NV_STATUS dispchnpioControlFilter_DISPATCH(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchnpioControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvBool dispchnpioIsPartialUnmapSupported_DISPATCH(struct DispChannelPio *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchnpioIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS dispchnpioMapTo_DISPATCH(struct DispChannelPio *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchnpioMapTo__(pResource, pParams); +} + +static inline NV_STATUS dispchnpioUnmapFrom_DISPATCH(struct DispChannelPio *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchnpioUnmapFrom__(pResource, pParams); +} + +static inline NvU32 dispchnpioGetRefCount_DISPATCH(struct DispChannelPio *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchnpioGetRefCount__(pResource); +} + +static inline void dispchnpioAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispChannelPio *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__dispchnpioAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline PEVENTNOTIFICATION * dispchnpioGetNotificationListPtr_DISPATCH(struct DispChannelPio *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispchnpioGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare * dispchnpioGetNotificationShare_DISPATCH(struct DispChannelPio *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispchnpioGetNotificationShare__(pNotifier); +} + +static inline void dispchnpioSetNotificationShare_DISPATCH(struct DispChannelPio *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__nvoc_metadata_ptr->vtable.__dispchnpioSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS dispchnpioUnregisterEvent_DISPATCH(struct DispChannelPio *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispchnpioUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS dispchnpioGetOrAllocNotifShare_DISPATCH(struct DispChannelPio *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispchnpioGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS dispchnpioConstruct_IMPL(struct DispChannelPio *arg_pDispChannelPio, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_dispchnpioConstruct(arg_pDispChannelPio, arg_pCallContext, arg_pParams) dispchnpioConstruct_IMPL(arg_pDispChannelPio, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +/*! + * RM internal class representing XXX_XXX_CHANNEL_DMA + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_DISP_CHANNEL_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__DispChannelDma; +struct NVOC_METADATA__DispChannel; +struct NVOC_VTABLE__DispChannelDma; + + +struct DispChannelDma { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__DispChannelDma *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct DispChannel __nvoc_base_DispChannel; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^5 + struct RsResource *__nvoc_pbase_RsResource; // res super^4 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^4 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^3 + struct GpuResource *__nvoc_pbase_GpuResource; // gpures super^2 + struct INotifier *__nvoc_pbase_INotifier; // inotify super^3 + struct Notifier *__nvoc_pbase_Notifier; // notify super^2 + struct DispChannel *__nvoc_pbase_DispChannel; // dispchn super + struct DispChannelDma *__nvoc_pbase_DispChannelDma; // dispchndma +}; + + +// Vtable with 30 per-class function pointers +struct NVOC_VTABLE__DispChannelDma { + NV_STATUS (*__dispchndmaGetRegBaseOffsetAndSize__)(struct DispChannelDma * /*this*/, struct OBJGPU *, NvU32 *, NvU32 *); // virtual inherited (dispchn) base (dispchn) + NV_STATUS (*__dispchndmaControl__)(struct DispChannelDma * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (gpures) base (dispchn) + NV_STATUS (*__dispchndmaMap__)(struct DispChannelDma * /*this*/, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); // virtual inherited (gpures) base (dispchn) + NV_STATUS (*__dispchndmaUnmap__)(struct DispChannelDma * /*this*/, struct CALL_CONTEXT *, struct RsCpuMapping *); // virtual inherited (gpures) base (dispchn) + NvBool (*__dispchndmaShareCallback__)(struct DispChannelDma * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (gpures) base (dispchn) + NV_STATUS (*__dispchndmaGetMapAddrSpace__)(struct DispChannelDma * /*this*/, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); // virtual inherited (gpures) base (dispchn) + NV_STATUS (*__dispchndmaInternalControlForward__)(struct DispChannelDma * /*this*/, NvU32, void *, NvU32); // virtual inherited (gpures) base (dispchn) + NvHandle (*__dispchndmaGetInternalObjectHandle__)(struct DispChannelDma * /*this*/); // virtual inherited (gpures) base (dispchn) + NvBool (*__dispchndmaAccessCallback__)(struct DispChannelDma * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (dispchn) + NV_STATUS (*__dispchndmaGetMemInterMapParams__)(struct DispChannelDma * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (dispchn) + NV_STATUS (*__dispchndmaCheckMemInterUnmap__)(struct DispChannelDma * /*this*/, NvBool); // virtual inherited (rmres) base (dispchn) + NV_STATUS (*__dispchndmaGetMemoryMappingDescriptor__)(struct DispChannelDma * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (dispchn) + NV_STATUS (*__dispchndmaControlSerialization_Prologue__)(struct DispChannelDma * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (dispchn) + void (*__dispchndmaControlSerialization_Epilogue__)(struct DispChannelDma * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (dispchn) + NV_STATUS (*__dispchndmaControl_Prologue__)(struct DispChannelDma * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (dispchn) + void (*__dispchndmaControl_Epilogue__)(struct DispChannelDma * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (dispchn) + NvBool (*__dispchndmaCanCopy__)(struct DispChannelDma * /*this*/); // virtual inherited (res) base (dispchn) + NV_STATUS (*__dispchndmaIsDuplicate__)(struct DispChannelDma * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (dispchn) + void (*__dispchndmaPreDestruct__)(struct DispChannelDma * /*this*/); // virtual inherited (res) base (dispchn) + NV_STATUS (*__dispchndmaControlFilter__)(struct DispChannelDma * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (dispchn) + NvBool (*__dispchndmaIsPartialUnmapSupported__)(struct DispChannelDma * /*this*/); // inline virtual inherited (res) base (dispchn) body + NV_STATUS (*__dispchndmaMapTo__)(struct DispChannelDma * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (dispchn) + NV_STATUS (*__dispchndmaUnmapFrom__)(struct DispChannelDma * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (dispchn) + NvU32 (*__dispchndmaGetRefCount__)(struct DispChannelDma * /*this*/); // virtual inherited (res) base (dispchn) + void (*__dispchndmaAddAdditionalDependants__)(struct RsClient *, struct DispChannelDma * /*this*/, RsResourceRef *); // virtual inherited (res) base (dispchn) + PEVENTNOTIFICATION * (*__dispchndmaGetNotificationListPtr__)(struct DispChannelDma * /*this*/); // virtual inherited (notify) base (dispchn) + struct NotifShare * (*__dispchndmaGetNotificationShare__)(struct DispChannelDma * /*this*/); // virtual inherited (notify) base (dispchn) + void (*__dispchndmaSetNotificationShare__)(struct DispChannelDma * /*this*/, struct NotifShare *); // virtual inherited (notify) base (dispchn) + NV_STATUS (*__dispchndmaUnregisterEvent__)(struct DispChannelDma * /*this*/, NvHandle, NvHandle, NvHandle, NvHandle); // virtual inherited (notify) base (dispchn) + NV_STATUS (*__dispchndmaGetOrAllocNotifShare__)(struct DispChannelDma * /*this*/, NvHandle, NvHandle, struct NotifShare **); // virtual inherited (notify) base (dispchn) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__DispChannelDma { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__DispChannel metadata__DispChannel; + const struct NVOC_VTABLE__DispChannelDma vtable; +}; + +#ifndef __NVOC_CLASS_DispChannelDma_TYPEDEF__ +#define __NVOC_CLASS_DispChannelDma_TYPEDEF__ +typedef struct DispChannelDma DispChannelDma; +#endif /* __NVOC_CLASS_DispChannelDma_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannelDma +#define __nvoc_class_id_DispChannelDma 0xfe3d2e +#endif /* __nvoc_class_id_DispChannelDma */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannelDma; + +#define __staticCast_DispChannelDma(pThis) \ + ((pThis)->__nvoc_pbase_DispChannelDma) + +#ifdef __nvoc_disp_channel_h_disabled +#define __dynamicCast_DispChannelDma(pThis) ((DispChannelDma*) NULL) +#else //__nvoc_disp_channel_h_disabled +#define __dynamicCast_DispChannelDma(pThis) \ + ((DispChannelDma*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispChannelDma))) +#endif //__nvoc_disp_channel_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_DispChannelDma(DispChannelDma**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispChannelDma(DispChannelDma**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_DispChannelDma(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DispChannelDma((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define dispchndmaGetRegBaseOffsetAndSize_FNPTR(pDispChannel) pDispChannel->__nvoc_base_DispChannel.__nvoc_metadata_ptr->vtable.__dispchnGetRegBaseOffsetAndSize__ +#define dispchndmaGetRegBaseOffsetAndSize(pDispChannel, pGpu, pOffset, pSize) dispchndmaGetRegBaseOffsetAndSize_DISPATCH(pDispChannel, pGpu, pOffset, pSize) +#define dispchndmaControl_FNPTR(pGpuResource) pGpuResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresControl__ +#define dispchndmaControl(pGpuResource, pCallContext, pParams) dispchndmaControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define dispchndmaMap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresMap__ +#define dispchndmaMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispchndmaMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define dispchndmaUnmap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresUnmap__ +#define dispchndmaUnmap(pGpuResource, pCallContext, pCpuMapping) dispchndmaUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define dispchndmaShareCallback_FNPTR(pGpuResource) pGpuResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresShareCallback__ +#define dispchndmaShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispchndmaShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispchndmaGetMapAddrSpace_FNPTR(pGpuResource) pGpuResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetMapAddrSpace__ +#define dispchndmaGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispchndmaGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define dispchndmaInternalControlForward_FNPTR(pGpuResource) pGpuResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresInternalControlForward__ +#define dispchndmaInternalControlForward(pGpuResource, command, pParams, size) dispchndmaInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define dispchndmaGetInternalObjectHandle_FNPTR(pGpuResource) pGpuResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetInternalObjectHandle__ +#define dispchndmaGetInternalObjectHandle(pGpuResource) dispchndmaGetInternalObjectHandle_DISPATCH(pGpuResource) +#define dispchndmaAccessCallback_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define dispchndmaAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispchndmaAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define dispchndmaGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define dispchndmaGetMemInterMapParams(pRmResource, pParams) dispchndmaGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispchndmaCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define dispchndmaCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispchndmaCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispchndmaGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define dispchndmaGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispchndmaGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispchndmaControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define dispchndmaControlSerialization_Prologue(pResource, pCallContext, pParams) dispchndmaControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispchndmaControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define dispchndmaControlSerialization_Epilogue(pResource, pCallContext, pParams) dispchndmaControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispchndmaControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define dispchndmaControl_Prologue(pResource, pCallContext, pParams) dispchndmaControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispchndmaControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define dispchndmaControl_Epilogue(pResource, pCallContext, pParams) dispchndmaControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispchndmaCanCopy_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define dispchndmaCanCopy(pResource) dispchndmaCanCopy_DISPATCH(pResource) +#define dispchndmaIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define dispchndmaIsDuplicate(pResource, hMemory, pDuplicate) dispchndmaIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define dispchndmaPreDestruct_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define dispchndmaPreDestruct(pResource) dispchndmaPreDestruct_DISPATCH(pResource) +#define dispchndmaControlFilter_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define dispchndmaControlFilter(pResource, pCallContext, pParams) dispchndmaControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispchndmaIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define dispchndmaIsPartialUnmapSupported(pResource) dispchndmaIsPartialUnmapSupported_DISPATCH(pResource) +#define dispchndmaMapTo_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define dispchndmaMapTo(pResource, pParams) dispchndmaMapTo_DISPATCH(pResource, pParams) +#define dispchndmaUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define dispchndmaUnmapFrom(pResource, pParams) dispchndmaUnmapFrom_DISPATCH(pResource, pParams) +#define dispchndmaGetRefCount_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define dispchndmaGetRefCount(pResource) dispchndmaGetRefCount_DISPATCH(pResource) +#define dispchndmaAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define dispchndmaAddAdditionalDependants(pClient, pResource, pReference) dispchndmaAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispchndmaGetNotificationListPtr_FNPTR(pNotifier) pNotifier->__nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationListPtr__ +#define dispchndmaGetNotificationListPtr(pNotifier) dispchndmaGetNotificationListPtr_DISPATCH(pNotifier) +#define dispchndmaGetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationShare__ +#define dispchndmaGetNotificationShare(pNotifier) dispchndmaGetNotificationShare_DISPATCH(pNotifier) +#define dispchndmaSetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifySetNotificationShare__ +#define dispchndmaSetNotificationShare(pNotifier, pNotifShare) dispchndmaSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define dispchndmaUnregisterEvent_FNPTR(pNotifier) pNotifier->__nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyUnregisterEvent__ +#define dispchndmaUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispchndmaUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define dispchndmaGetOrAllocNotifShare_FNPTR(pNotifier) pNotifier->__nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetOrAllocNotifShare__ +#define dispchndmaGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispchndmaGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) + +// Dispatch functions +static inline NV_STATUS dispchndmaGetRegBaseOffsetAndSize_DISPATCH(struct DispChannelDma *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pDispChannel->__nvoc_metadata_ptr->vtable.__dispchndmaGetRegBaseOffsetAndSize__(pDispChannel, pGpu, pOffset, pSize); +} + +static inline NV_STATUS dispchndmaControl_DISPATCH(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispchndmaControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchndmaMap_DISPATCH(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispchndmaMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS dispchndmaUnmap_DISPATCH(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispchndmaUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NvBool dispchndmaShareCallback_DISPATCH(struct DispChannelDma *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispchndmaShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispchndmaGetMapAddrSpace_DISPATCH(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispchndmaGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NV_STATUS dispchndmaInternalControlForward_DISPATCH(struct DispChannelDma *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispchndmaInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NvHandle dispchndmaGetInternalObjectHandle_DISPATCH(struct DispChannelDma *pGpuResource) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispchndmaGetInternalObjectHandle__(pGpuResource); +} + +static inline NvBool dispchndmaAccessCallback_DISPATCH(struct DispChannelDma *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchndmaAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS dispchndmaGetMemInterMapParams_DISPATCH(struct DispChannelDma *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispchndmaGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispchndmaCheckMemInterUnmap_DISPATCH(struct DispChannelDma *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispchndmaCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS dispchndmaGetMemoryMappingDescriptor_DISPATCH(struct DispChannelDma *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispchndmaGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispchndmaControlSerialization_Prologue_DISPATCH(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchndmaControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void dispchndmaControlSerialization_Epilogue_DISPATCH(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__dispchndmaControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchndmaControl_Prologue_DISPATCH(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchndmaControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void dispchndmaControl_Epilogue_DISPATCH(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__dispchndmaControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool dispchndmaCanCopy_DISPATCH(struct DispChannelDma *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchndmaCanCopy__(pResource); +} + +static inline NV_STATUS dispchndmaIsDuplicate_DISPATCH(struct DispChannelDma *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchndmaIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void dispchndmaPreDestruct_DISPATCH(struct DispChannelDma *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__dispchndmaPreDestruct__(pResource); +} + +static inline NV_STATUS dispchndmaControlFilter_DISPATCH(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchndmaControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvBool dispchndmaIsPartialUnmapSupported_DISPATCH(struct DispChannelDma *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchndmaIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS dispchndmaMapTo_DISPATCH(struct DispChannelDma *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchndmaMapTo__(pResource, pParams); +} + +static inline NV_STATUS dispchndmaUnmapFrom_DISPATCH(struct DispChannelDma *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchndmaUnmapFrom__(pResource, pParams); +} + +static inline NvU32 dispchndmaGetRefCount_DISPATCH(struct DispChannelDma *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispchndmaGetRefCount__(pResource); +} + +static inline void dispchndmaAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispChannelDma *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__dispchndmaAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline PEVENTNOTIFICATION * dispchndmaGetNotificationListPtr_DISPATCH(struct DispChannelDma *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispchndmaGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare * dispchndmaGetNotificationShare_DISPATCH(struct DispChannelDma *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispchndmaGetNotificationShare__(pNotifier); +} + +static inline void dispchndmaSetNotificationShare_DISPATCH(struct DispChannelDma *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__nvoc_metadata_ptr->vtable.__dispchndmaSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS dispchndmaUnregisterEvent_DISPATCH(struct DispChannelDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispchndmaUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS dispchndmaGetOrAllocNotifShare_DISPATCH(struct DispChannelDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispchndmaGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS dispchndmaConstruct_IMPL(struct DispChannelDma *arg_pDispChannelDma, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_dispchndmaConstruct(arg_pDispChannelDma, arg_pCallContext, arg_pParams) dispchndmaConstruct_IMPL(arg_pDispChannelDma, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif // DISP_CHANNEL_H + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_DISP_CHANNEL_NVOC_H_ diff --git a/src/nvidia/generated/g_disp_inst_mem_nvoc.c b/src/nvidia/generated/g_disp_inst_mem_nvoc.c new file mode 100644 index 0000000..f73c081 --- /dev/null +++ b/src/nvidia/generated/g_disp_inst_mem_nvoc.c @@ -0,0 +1,222 @@ +#define NVOC_DISP_INST_MEM_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_disp_inst_mem_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x8223e2 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayInstanceMemory; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +// Forward declarations for DisplayInstanceMemory +void __nvoc_init__Object(Object*); +void __nvoc_init__DisplayInstanceMemory(DisplayInstanceMemory*, RmHalspecOwner *pRmhalspecowner); +void __nvoc_init_funcTable_DisplayInstanceMemory(DisplayInstanceMemory*, RmHalspecOwner *pRmhalspecowner); +NV_STATUS __nvoc_ctor_DisplayInstanceMemory(DisplayInstanceMemory*, RmHalspecOwner *pRmhalspecowner); +void __nvoc_init_dataField_DisplayInstanceMemory(DisplayInstanceMemory*, RmHalspecOwner *pRmhalspecowner); +void __nvoc_dtor_DisplayInstanceMemory(DisplayInstanceMemory*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__DisplayInstanceMemory; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__DisplayInstanceMemory; + +// Down-thunk(s) to bridge DisplayInstanceMemory methods from ancestors (if any) + +// Up-thunk(s) to bridge DisplayInstanceMemory methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayInstanceMemory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DisplayInstanceMemory), + /*classId=*/ classId(DisplayInstanceMemory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DisplayInstanceMemory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DisplayInstanceMemory, + /*pCastInfo=*/ &__nvoc_castinfo__DisplayInstanceMemory, + /*pExportInfo=*/ &__nvoc_export_info__DisplayInstanceMemory +}; + + +// Metadata with per-class RTTI with ancestor(s) +static const struct NVOC_METADATA__DisplayInstanceMemory __nvoc_metadata__DisplayInstanceMemory = { + .rtti.pClassDef = &__nvoc_class_def_DisplayInstanceMemory, // (instmem) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DisplayInstanceMemory, + .rtti.offset = 0, + .metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super + .metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Object.rtti.offset = NV_OFFSETOF(DisplayInstanceMemory, __nvoc_base_Object), +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__DisplayInstanceMemory = { + .numRelatives = 2, + .relatives = { + &__nvoc_metadata__DisplayInstanceMemory.rtti, // [0]: (instmem) this + &__nvoc_metadata__DisplayInstanceMemory.metadata__Object.rtti, // [1]: (obj) super + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__DisplayInstanceMemory = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_DisplayInstanceMemory(DisplayInstanceMemory *pThis) { + __nvoc_instmemDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DisplayInstanceMemory(DisplayInstanceMemory *pThis, RmHalspecOwner *pRmhalspecowner) { + DispIpHal *dispIpHal = &pRmhalspecowner->dispIpHal; + const unsigned long dispIpHal_HalVarIdx = (unsigned long)dispIpHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(dispIpHal); + PORT_UNREFERENCED_VARIABLE(dispIpHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_DisplayInstanceMemory(DisplayInstanceMemory *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_DisplayInstanceMemory_fail_Object; + __nvoc_init_dataField_DisplayInstanceMemory(pThis, pRmhalspecowner); + + status = __nvoc_instmemConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_DisplayInstanceMemory_fail__init; + goto __nvoc_ctor_DisplayInstanceMemory_exit; // Success + +__nvoc_ctor_DisplayInstanceMemory_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_DisplayInstanceMemory_fail_Object: +__nvoc_ctor_DisplayInstanceMemory_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_DisplayInstanceMemory_1(DisplayInstanceMemory *pThis, RmHalspecOwner *pRmhalspecowner) { + DispIpHal *dispIpHal = &pRmhalspecowner->dispIpHal; + const unsigned long dispIpHal_HalVarIdx = (unsigned long)dispIpHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(dispIpHal); + PORT_UNREFERENCED_VARIABLE(dispIpHal_HalVarIdx); +} // End __nvoc_init_funcTable_DisplayInstanceMemory_1 + + +// Initialize vtable(s): Nothing to do for empty vtables +void __nvoc_init_funcTable_DisplayInstanceMemory(DisplayInstanceMemory *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_DisplayInstanceMemory_1(pThis, pRmhalspecowner); +} + +// Initialize newly constructed object. +void __nvoc_init__DisplayInstanceMemory(DisplayInstanceMemory *pThis, RmHalspecOwner *pRmhalspecowner) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; // (obj) super + pThis->__nvoc_pbase_DisplayInstanceMemory = pThis; // (instmem) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Object(&pThis->__nvoc_base_Object); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__DisplayInstanceMemory.metadata__Object; // (obj) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__DisplayInstanceMemory; // (instmem) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_DisplayInstanceMemory(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_DisplayInstanceMemory(DisplayInstanceMemory **ppThis, Dynamic *pParent, NvU32 createFlags) +{ + NV_STATUS status; + Object *pParentObj = NULL; + DisplayInstanceMemory *pThis; + RmHalspecOwner *pRmhalspecowner; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(DisplayInstanceMemory), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(DisplayInstanceMemory)); + + pThis->__nvoc_base_Object.createFlags = createFlags; + + // pParent must be a valid object that derives from a halspec owner class. + NV_ASSERT_OR_RETURN(pParent != NULL, NV_ERR_INVALID_ARGUMENT); + + // Link the child into the parent unless flagged not to do so. + if (!(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init__DisplayInstanceMemory(pThis, pRmhalspecowner); + status = __nvoc_ctor_DisplayInstanceMemory(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_DisplayInstanceMemory_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_DisplayInstanceMemory_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(DisplayInstanceMemory)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DisplayInstanceMemory(DisplayInstanceMemory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_DisplayInstanceMemory(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_disp_inst_mem_nvoc.h b/src/nvidia/generated/g_disp_inst_mem_nvoc.h new file mode 100644 index 0000000..cbc8e9a --- /dev/null +++ b/src/nvidia/generated/g_disp_inst_mem_nvoc.h @@ -0,0 +1,403 @@ + +#ifndef _G_DISP_INST_MEM_NVOC_H_ +#define _G_DISP_INST_MEM_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_disp_inst_mem_nvoc.h" + +#ifndef DISPLAY_INSTANCE_MEMORY_H +#define DISPLAY_INSTANCE_MEMORY_H + +/* ------------------------ Includes --------------------------------------- */ +#include "nvtypes.h" +#include "nvoc/utility.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "gpu/mem_mgr/mem_desc.h" + +/* ------------------------ Forward Declaration ---------------------------- */ +typedef struct OBJEHEAP OBJEHEAP; + +struct DispChannel; + +#ifndef __NVOC_CLASS_DispChannel_TYPEDEF__ +#define __NVOC_CLASS_DispChannel_TYPEDEF__ +typedef struct DispChannel DispChannel; +#endif /* __NVOC_CLASS_DispChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannel +#define __nvoc_class_id_DispChannel 0xbd2ff3 +#endif /* __nvoc_class_id_DispChannel */ + + + +struct ContextDma; + +#ifndef __NVOC_CLASS_ContextDma_TYPEDEF__ +#define __NVOC_CLASS_ContextDma_TYPEDEF__ +typedef struct ContextDma ContextDma; +#endif /* __NVOC_CLASS_ContextDma_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ContextDma +#define __nvoc_class_id_ContextDma 0x88441b +#endif /* __nvoc_class_id_ContextDma */ + + + +/* ------------------------ Macros & Defines ------------------------------- */ +#define KERNEL_DISPLAY_GET_INST_MEM(p) ((p)->pInst) +#define DISP_INST_MEM_ALIGN 0x10000 + +/* ------------------------ Types definitions ------------------------------ */ +/*! + * A software hash table entry + */ +typedef struct +{ + struct ContextDma *pContextDma; + struct DispChannel *pDispChannel; +} SW_HASH_TABLE_ENTRY; + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_DISP_INST_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__DisplayInstanceMemory; +struct NVOC_METADATA__Object; + + +struct DisplayInstanceMemory { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__DisplayInstanceMemory *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Object __nvoc_base_Object; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super + struct DisplayInstanceMemory *__nvoc_pbase_DisplayInstanceMemory; // instmem + + // Data members + NV_ADDRESS_SPACE instMemAddrSpace; + NvU32 instMemAttr; + NvU64 instMemBase; + NvU32 instMemSize; + MEMORY_DESCRIPTOR *pAllocedInstMemDesc; + MEMORY_DESCRIPTOR *pInstMemDesc; + void *pInstMem; + NvU32 nHashTableEntries; + NvU32 hashTableBaseAddr; + SW_HASH_TABLE_ENTRY *pHashTable; + OBJEHEAP *pInstHeap; +}; + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__DisplayInstanceMemory { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Object metadata__Object; +}; + +#ifndef __NVOC_CLASS_DisplayInstanceMemory_TYPEDEF__ +#define __NVOC_CLASS_DisplayInstanceMemory_TYPEDEF__ +typedef struct DisplayInstanceMemory DisplayInstanceMemory; +#endif /* __NVOC_CLASS_DisplayInstanceMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DisplayInstanceMemory +#define __nvoc_class_id_DisplayInstanceMemory 0x8223e2 +#endif /* __nvoc_class_id_DisplayInstanceMemory */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayInstanceMemory; + +#define __staticCast_DisplayInstanceMemory(pThis) \ + ((pThis)->__nvoc_pbase_DisplayInstanceMemory) + +#ifdef __nvoc_disp_inst_mem_h_disabled +#define __dynamicCast_DisplayInstanceMemory(pThis) ((DisplayInstanceMemory*) NULL) +#else //__nvoc_disp_inst_mem_h_disabled +#define __dynamicCast_DisplayInstanceMemory(pThis) \ + ((DisplayInstanceMemory*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DisplayInstanceMemory))) +#endif //__nvoc_disp_inst_mem_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_DisplayInstanceMemory(DisplayInstanceMemory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DisplayInstanceMemory(DisplayInstanceMemory**, Dynamic*, NvU32); +#define __objCreate_DisplayInstanceMemory(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_DisplayInstanceMemory((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros + +// Dispatch functions +void instmemGetSize_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 *pTotalInstMemSize, NvU32 *pHashTableSize); + + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline void instmemGetSize(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 *pTotalInstMemSize, NvU32 *pHashTableSize) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemGetSize(pGpu, pInstMem, pTotalInstMemSize, pHashTableSize) instmemGetSize_v03_00(pGpu, pInstMem, pTotalInstMemSize, pHashTableSize) +#endif //__nvoc_disp_inst_mem_h_disabled + +#define instmemGetSize_HAL(pGpu, pInstMem, pTotalInstMemSize, pHashTableSize) instmemGetSize(pGpu, pInstMem, pTotalInstMemSize, pHashTableSize) + +NvU32 instmemGetHashTableBaseAddr_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem); + + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NvU32 instmemGetHashTableBaseAddr(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return 0; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemGetHashTableBaseAddr(pGpu, pInstMem) instmemGetHashTableBaseAddr_v03_00(pGpu, pInstMem) +#endif //__nvoc_disp_inst_mem_h_disabled + +#define instmemGetHashTableBaseAddr_HAL(pGpu, pInstMem) instmemGetHashTableBaseAddr(pGpu, pInstMem) + +NvBool instmemIsValid_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 offset); + + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NvBool instmemIsValid(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 offset) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_FALSE; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemIsValid(pGpu, pInstMem, offset) instmemIsValid_v03_00(pGpu, pInstMem, offset) +#endif //__nvoc_disp_inst_mem_h_disabled + +#define instmemIsValid_HAL(pGpu, pInstMem, offset) instmemIsValid(pGpu, pInstMem, offset) + +NvU32 instmemGenerateHashTableData_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 hClient, NvU32 offset, NvU32 dispChannelNum); + + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NvU32 instmemGenerateHashTableData(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 hClient, NvU32 offset, NvU32 dispChannelNum) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return 0; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemGenerateHashTableData(pGpu, pInstMem, hClient, offset, dispChannelNum) instmemGenerateHashTableData_v03_00(pGpu, pInstMem, hClient, offset, dispChannelNum) +#endif //__nvoc_disp_inst_mem_h_disabled + +#define instmemGenerateHashTableData_HAL(pGpu, pInstMem, hClient, offset, dispChannelNum) instmemGenerateHashTableData(pGpu, pInstMem, hClient, offset, dispChannelNum) + +NV_STATUS instmemHashFunc_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvHandle hClient, NvHandle hContextDma, NvU32 dispChannelNum, NvU32 *result); + + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemHashFunc(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvHandle hClient, NvHandle hContextDma, NvU32 dispChannelNum, NvU32 *result) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemHashFunc(pGpu, pInstMem, hClient, hContextDma, dispChannelNum, result) instmemHashFunc_v03_00(pGpu, pInstMem, hClient, hContextDma, dispChannelNum, result) +#endif //__nvoc_disp_inst_mem_h_disabled + +#define instmemHashFunc_HAL(pGpu, pInstMem, hClient, hContextDma, dispChannelNum, result) instmemHashFunc(pGpu, pInstMem, hClient, hContextDma, dispChannelNum, result) + +NV_STATUS instmemCommitContextDma_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma); + + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemCommitContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemCommitContextDma(pGpu, pInstMem, pContextDma) instmemCommitContextDma_v03_00(pGpu, pInstMem, pContextDma) +#endif //__nvoc_disp_inst_mem_h_disabled + +#define instmemCommitContextDma_HAL(pGpu, pInstMem, pContextDma) instmemCommitContextDma(pGpu, pInstMem, pContextDma) + +static inline void instmemDecommitContextDma_b3696a(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma) { + return; +} + + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline void instmemDecommitContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemDecommitContextDma(pGpu, pInstMem, pContextDma) instmemDecommitContextDma_b3696a(pGpu, pInstMem, pContextDma) +#endif //__nvoc_disp_inst_mem_h_disabled + +#define instmemDecommitContextDma_HAL(pGpu, pInstMem, pContextDma) instmemDecommitContextDma(pGpu, pInstMem, pContextDma) + +NV_STATUS instmemUpdateContextDma_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, NvU64 *pNewAddress, NvU64 *pNewLimit, NvHandle hMemory, NvU32 comprInfo); + + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemUpdateContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, NvU64 *pNewAddress, NvU64 *pNewLimit, NvHandle hMemory, NvU32 comprInfo) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemUpdateContextDma(pGpu, pInstMem, pContextDma, pNewAddress, pNewLimit, hMemory, comprInfo) instmemUpdateContextDma_v03_00(pGpu, pInstMem, pContextDma, pNewAddress, pNewLimit, hMemory, comprInfo) +#endif //__nvoc_disp_inst_mem_h_disabled + +#define instmemUpdateContextDma_HAL(pGpu, pInstMem, pContextDma, pNewAddress, pNewLimit, hMemory, comprInfo) instmemUpdateContextDma(pGpu, pInstMem, pContextDma, pNewAddress, pNewLimit, hMemory, comprInfo) + +NV_STATUS instmemConstruct_IMPL(struct DisplayInstanceMemory *arg_pInstMem); + +#define __nvoc_instmemConstruct(arg_pInstMem) instmemConstruct_IMPL(arg_pInstMem) +void instmemDestruct_IMPL(struct DisplayInstanceMemory *pInstMem); + +#define __nvoc_instmemDestruct(pInstMem) instmemDestruct_IMPL(pInstMem) +NV_STATUS instmemStateInitLocked_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem); + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemStateInitLocked(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemStateInitLocked(pGpu, pInstMem) instmemStateInitLocked_IMPL(pGpu, pInstMem) +#endif //__nvoc_disp_inst_mem_h_disabled + +void instmemStateDestroy_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem); + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline void instmemStateDestroy(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemStateDestroy(pGpu, pInstMem) instmemStateDestroy_IMPL(pGpu, pInstMem) +#endif //__nvoc_disp_inst_mem_h_disabled + +NV_STATUS instmemStateLoad_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 flags); + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemStateLoad(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemStateLoad(pGpu, pInstMem, flags) instmemStateLoad_IMPL(pGpu, pInstMem, flags) +#endif //__nvoc_disp_inst_mem_h_disabled + +NV_STATUS instmemStateUnload_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 flags); + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemStateUnload(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemStateUnload(pGpu, pInstMem, flags) instmemStateUnload_IMPL(pGpu, pInstMem, flags) +#endif //__nvoc_disp_inst_mem_h_disabled + +void instmemSetMemory_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NV_ADDRESS_SPACE dispInstMemAddrSpace, NvU32 dispInstMemAttr, NvU64 dispInstMemBase, NvU32 dispInstMemSize); + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline void instmemSetMemory(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NV_ADDRESS_SPACE dispInstMemAddrSpace, NvU32 dispInstMemAttr, NvU64 dispInstMemBase, NvU32 dispInstMemSize) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemSetMemory(pGpu, pInstMem, dispInstMemAddrSpace, dispInstMemAttr, dispInstMemBase, dispInstMemSize) instmemSetMemory_IMPL(pGpu, pInstMem, dispInstMemAddrSpace, dispInstMemAttr, dispInstMemBase, dispInstMemSize) +#endif //__nvoc_disp_inst_mem_h_disabled + +NV_STATUS instmemBindContextDma_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, struct DispChannel *pDispChannel); + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemBindContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, struct DispChannel *pDispChannel) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemBindContextDma(pGpu, pInstMem, pContextDma, pDispChannel) instmemBindContextDma_IMPL(pGpu, pInstMem, pContextDma, pDispChannel) +#endif //__nvoc_disp_inst_mem_h_disabled + +NV_STATUS instmemUnbindContextDma_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, struct DispChannel *pDispChannel); + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemUnbindContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, struct DispChannel *pDispChannel) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemUnbindContextDma(pGpu, pInstMem, pContextDma, pDispChannel) instmemUnbindContextDma_IMPL(pGpu, pInstMem, pContextDma, pDispChannel) +#endif //__nvoc_disp_inst_mem_h_disabled + +void instmemUnbindContextDmaFromAllChannels_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma); + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline void instmemUnbindContextDmaFromAllChannels(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemUnbindContextDmaFromAllChannels(pGpu, pInstMem, pContextDma) instmemUnbindContextDmaFromAllChannels_IMPL(pGpu, pInstMem, pContextDma) +#endif //__nvoc_disp_inst_mem_h_disabled + +void instmemUnbindDispChannelContextDmas_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct DispChannel *pDispChannel); + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline void instmemUnbindDispChannelContextDmas(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct DispChannel *pDispChannel) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemUnbindDispChannelContextDmas(pGpu, pInstMem, pDispChannel) instmemUnbindDispChannelContextDmas_IMPL(pGpu, pInstMem, pDispChannel) +#endif //__nvoc_disp_inst_mem_h_disabled + +#undef PRIVATE_FIELD + + +#endif // DISPLAY_INSTANCE_MEMORY_H + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_DISP_INST_MEM_NVOC_H_ diff --git a/src/nvidia/generated/g_disp_objs_nvoc.c b/src/nvidia/generated/g_disp_objs_nvoc.c new file mode 100644 index 0000000..b28dda7 --- /dev/null +++ b/src/nvidia/generated/g_disp_objs_nvoc.c @@ -0,0 +1,5578 @@ +#define NVOC_DISP_OBJS_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_disp_objs_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xe9980c = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayApi; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +// Forward declarations for DisplayApi +void __nvoc_init__RmResource(RmResource*); +void __nvoc_init__Notifier(Notifier*); +void __nvoc_init__DisplayApi(DisplayApi*, RmHalspecOwner *pRmhalspecowner); +void __nvoc_init_funcTable_DisplayApi(DisplayApi*, RmHalspecOwner *pRmhalspecowner); +NV_STATUS __nvoc_ctor_DisplayApi(DisplayApi*, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_DisplayApi(DisplayApi*, RmHalspecOwner *pRmhalspecowner); +void __nvoc_dtor_DisplayApi(DisplayApi*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__DisplayApi; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__DisplayApi; + +// Down-thunk(s) to bridge DisplayApi methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +PEVENTNOTIFICATION * __nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr(struct INotifier *pNotifier); // super +struct NotifShare * __nvoc_down_thunk_Notifier_inotifyGetNotificationShare(struct INotifier *pNotifier); // super +void __nvoc_down_thunk_Notifier_inotifySetNotificationShare(struct INotifier *pNotifier, struct NotifShare *pNotifShare); // super +NV_STATUS __nvoc_down_thunk_Notifier_inotifyUnregisterEvent(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // super +NV_STATUS __nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // super +NV_STATUS __nvoc_down_thunk_DisplayApi_resControl(struct RsResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_down_thunk_DisplayApi_rmresControl_Prologue(struct RmResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); // this +void __nvoc_down_thunk_DisplayApi_rmresControl_Epilogue(struct RmResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); // this + +// Up-thunk(s) to bridge DisplayApi methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super +NvBool __nvoc_up_thunk_RmResource_dispapiAccessCallback(struct DisplayApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NvBool __nvoc_up_thunk_RmResource_dispapiShareCallback(struct DisplayApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispapiGetMemInterMapParams(struct DisplayApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispapiCheckMemInterUnmap(struct DisplayApi *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispapiGetMemoryMappingDescriptor(struct DisplayApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispapiControlSerialization_Prologue(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_dispapiControlSerialization_Epilogue(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_dispapiCanCopy(struct DisplayApi *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispapiIsDuplicate(struct DisplayApi *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_dispapiPreDestruct(struct DisplayApi *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispapiControlFilter(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispapiMap(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispapiUnmap(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_RsResource_dispapiIsPartialUnmapSupported(struct DisplayApi *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispapiMapTo(struct DisplayApi *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispapiUnmapFrom(struct DisplayApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_dispapiGetRefCount(struct DisplayApi *pResource); // this +void __nvoc_up_thunk_RsResource_dispapiAddAdditionalDependants(struct RsClient *pClient, struct DisplayApi *pResource, RsResourceRef *pReference); // this +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_dispapiGetNotificationListPtr(struct DisplayApi *pNotifier); // this +struct NotifShare * __nvoc_up_thunk_Notifier_dispapiGetNotificationShare(struct DisplayApi *pNotifier); // this +void __nvoc_up_thunk_Notifier_dispapiSetNotificationShare(struct DisplayApi *pNotifier, struct NotifShare *pNotifShare); // this +NV_STATUS __nvoc_up_thunk_Notifier_dispapiUnregisterEvent(struct DisplayApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // this +NV_STATUS __nvoc_up_thunk_Notifier_dispapiGetOrAllocNotifShare(struct DisplayApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DisplayApi), + /*classId=*/ classId(DisplayApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DisplayApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DisplayApi, + /*pCastInfo=*/ &__nvoc_castinfo__DisplayApi, + /*pExportInfo=*/ &__nvoc_export_info__DisplayApi +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__DisplayApi __nvoc_metadata__DisplayApi = { + .rtti.pClassDef = &__nvoc_class_def_DisplayApi, // (dispapi) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DisplayApi, + .rtti.offset = 0, + .metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super + .metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.rtti.offset = NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource), + .metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^2 + .metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^3 + .metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^2 + .metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + .metadata__Notifier.rtti.pClassDef = &__nvoc_class_def_Notifier, // (notify) super + .metadata__Notifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Notifier.rtti.offset = NV_OFFSETOF(DisplayApi, __nvoc_base_Notifier), + .metadata__Notifier.metadata__INotifier.rtti.pClassDef = &__nvoc_class_def_INotifier, // (inotify) super^2 + .metadata__Notifier.metadata__INotifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Notifier.metadata__INotifier.rtti.offset = NV_OFFSETOF(DisplayApi, __nvoc_base_Notifier.__nvoc_base_INotifier), + + .vtable.__dispapiControl__ = &dispapiControl_IMPL, // virtual override (res) base (rmres) + .metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_DisplayApi_resControl, // virtual + .vtable.__dispapiControl_Prologue__ = &dispapiControl_Prologue_IMPL, // virtual override (res) base (rmres) + .metadata__RmResource.vtable.__rmresControl_Prologue__ = &__nvoc_down_thunk_DisplayApi_rmresControl_Prologue, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__dispapiControl_Epilogue__ = &dispapiControl_Epilogue_IMPL, // virtual override (res) base (rmres) + .metadata__RmResource.vtable.__rmresControl_Epilogue__ = &__nvoc_down_thunk_DisplayApi_rmresControl_Epilogue, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__dispapiAccessCallback__ = &__nvoc_up_thunk_RmResource_dispapiAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__dispapiShareCallback__ = &__nvoc_up_thunk_RmResource_dispapiShareCallback, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresShareCallback__ = &rmresShareCallback_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__dispapiGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_dispapiGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__dispapiCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_dispapiCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__dispapiGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_dispapiGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__dispapiControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_dispapiControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__dispapiControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_dispapiControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__dispapiCanCopy__ = &__nvoc_up_thunk_RsResource_dispapiCanCopy, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__dispapiIsDuplicate__ = &__nvoc_up_thunk_RsResource_dispapiIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__dispapiPreDestruct__ = &__nvoc_up_thunk_RsResource_dispapiPreDestruct, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__dispapiControlFilter__ = &__nvoc_up_thunk_RsResource_dispapiControlFilter, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__dispapiMap__ = &__nvoc_up_thunk_RsResource_dispapiMap, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &resMap_IMPL, // virtual + .vtable.__dispapiUnmap__ = &__nvoc_up_thunk_RsResource_dispapiUnmap, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &resUnmap_IMPL, // virtual + .vtable.__dispapiIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_dispapiIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__dispapiMapTo__ = &__nvoc_up_thunk_RsResource_dispapiMapTo, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__dispapiUnmapFrom__ = &__nvoc_up_thunk_RsResource_dispapiUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__dispapiGetRefCount__ = &__nvoc_up_thunk_RsResource_dispapiGetRefCount, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__dispapiAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_dispapiAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual + .vtable.__dispapiGetNotificationListPtr__ = &__nvoc_up_thunk_Notifier_dispapiGetNotificationListPtr, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyGetNotificationListPtr__ = ¬ifyGetNotificationListPtr_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationListPtr__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr, // pure virtual + .vtable.__dispapiGetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispapiGetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyGetNotificationShare__ = ¬ifyGetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationShare, // pure virtual + .vtable.__dispapiSetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispapiSetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifySetNotificationShare__ = ¬ifySetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifySetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifySetNotificationShare, // pure virtual + .vtable.__dispapiUnregisterEvent__ = &__nvoc_up_thunk_Notifier_dispapiUnregisterEvent, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyUnregisterEvent__ = ¬ifyUnregisterEvent_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyUnregisterEvent__ = &__nvoc_down_thunk_Notifier_inotifyUnregisterEvent, // pure virtual + .vtable.__dispapiGetOrAllocNotifShare__ = &__nvoc_up_thunk_Notifier_dispapiGetOrAllocNotifShare, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyGetOrAllocNotifShare__ = ¬ifyGetOrAllocNotifShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyGetOrAllocNotifShare__ = &__nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare, // pure virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__DisplayApi = { + .numRelatives = 7, + .relatives = { + &__nvoc_metadata__DisplayApi.rtti, // [0]: (dispapi) this + &__nvoc_metadata__DisplayApi.metadata__RmResource.rtti, // [1]: (rmres) super + &__nvoc_metadata__DisplayApi.metadata__RmResource.metadata__RsResource.rtti, // [2]: (res) super^2 + &__nvoc_metadata__DisplayApi.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [3]: (obj) super^3 + &__nvoc_metadata__DisplayApi.metadata__RmResource.metadata__RmResourceCommon.rtti, // [4]: (rmrescmn) super^2 + &__nvoc_metadata__DisplayApi.metadata__Notifier.rtti, // [5]: (notify) super + &__nvoc_metadata__DisplayApi.metadata__Notifier.metadata__INotifier.rtti, // [6]: (inotify) super^2 + } +}; + +// 3 down-thunk(s) defined to bridge methods in DisplayApi from superclasses + +// dispapiControl: virtual override (res) base (rmres) +NV_STATUS __nvoc_down_thunk_DisplayApi_resControl(struct RsResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return dispapiControl((struct DisplayApi *)(((unsigned char *) pDisplayApi) - NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// dispapiControl_Prologue: virtual override (res) base (rmres) +NV_STATUS __nvoc_down_thunk_DisplayApi_rmresControl_Prologue(struct RmResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return dispapiControl_Prologue((struct DisplayApi *)(((unsigned char *) pDisplayApi) - NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource)), pCallContext, pRsParams); +} + +// dispapiControl_Epilogue: virtual override (res) base (rmres) +void __nvoc_down_thunk_DisplayApi_rmresControl_Epilogue(struct RmResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + dispapiControl_Epilogue((struct DisplayApi *)(((unsigned char *) pDisplayApi) - NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource)), pCallContext, pRsParams); +} + + +// 23 up-thunk(s) defined to bridge methods in DisplayApi to superclasses + +// dispapiAccessCallback: virtual inherited (rmres) base (rmres) +NvBool __nvoc_up_thunk_RmResource_dispapiAccessCallback(struct DisplayApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// dispapiShareCallback: virtual inherited (rmres) base (rmres) +NvBool __nvoc_up_thunk_RmResource_dispapiShareCallback(struct DisplayApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// dispapiGetMemInterMapParams: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_dispapiGetMemInterMapParams(struct DisplayApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource)), pParams); +} + +// dispapiCheckMemInterUnmap: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_dispapiCheckMemInterUnmap(struct DisplayApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// dispapiGetMemoryMappingDescriptor: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_dispapiGetMemoryMappingDescriptor(struct DisplayApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource)), ppMemDesc); +} + +// dispapiControlSerialization_Prologue: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_dispapiControlSerialization_Prologue(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispapiControlSerialization_Epilogue: virtual inherited (rmres) base (rmres) +void __nvoc_up_thunk_RmResource_dispapiControlSerialization_Epilogue(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispapiCanCopy: virtual inherited (res) base (rmres) +NvBool __nvoc_up_thunk_RsResource_dispapiCanCopy(struct DisplayApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispapiIsDuplicate: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_dispapiIsDuplicate(struct DisplayApi *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// dispapiPreDestruct: virtual inherited (res) base (rmres) +void __nvoc_up_thunk_RsResource_dispapiPreDestruct(struct DisplayApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispapiControlFilter: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_dispapiControlFilter(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// dispapiMap: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_dispapiMap(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams, pCpuMapping); +} + +// dispapiUnmap: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_dispapiUnmap(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pCpuMapping); +} + +// dispapiIsPartialUnmapSupported: inline virtual inherited (res) base (rmres) body +NvBool __nvoc_up_thunk_RsResource_dispapiIsPartialUnmapSupported(struct DisplayApi *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispapiMapTo: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_dispapiMapTo(struct DisplayApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// dispapiUnmapFrom: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_dispapiUnmapFrom(struct DisplayApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// dispapiGetRefCount: virtual inherited (res) base (rmres) +NvU32 __nvoc_up_thunk_RsResource_dispapiGetRefCount(struct DisplayApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispapiAddAdditionalDependants: virtual inherited (res) base (rmres) +void __nvoc_up_thunk_RsResource_dispapiAddAdditionalDependants(struct RsClient *pClient, struct DisplayApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + +// dispapiGetNotificationListPtr: virtual inherited (notify) base (notify) +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_dispapiGetNotificationListPtr(struct DisplayApi *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DisplayApi, __nvoc_base_Notifier))); +} + +// dispapiGetNotificationShare: virtual inherited (notify) base (notify) +struct NotifShare * __nvoc_up_thunk_Notifier_dispapiGetNotificationShare(struct DisplayApi *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DisplayApi, __nvoc_base_Notifier))); +} + +// dispapiSetNotificationShare: virtual inherited (notify) base (notify) +void __nvoc_up_thunk_Notifier_dispapiSetNotificationShare(struct DisplayApi *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DisplayApi, __nvoc_base_Notifier)), pNotifShare); +} + +// dispapiUnregisterEvent: virtual inherited (notify) base (notify) +NV_STATUS __nvoc_up_thunk_Notifier_dispapiUnregisterEvent(struct DisplayApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DisplayApi, __nvoc_base_Notifier)), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +// dispapiGetOrAllocNotifShare: virtual inherited (notify) base (notify) +NV_STATUS __nvoc_up_thunk_Notifier_dispapiGetOrAllocNotifShare(struct DisplayApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DisplayApi, __nvoc_base_Notifier)), hNotifierClient, hNotifierResource, ppNotifShare); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__DisplayApi = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_DisplayApi(DisplayApi *pThis) { + __nvoc_dispapiDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DisplayApi(DisplayApi *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_DisplayApi(DisplayApi *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DisplayApi_fail_RmResource; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_DisplayApi_fail_Notifier; + __nvoc_init_dataField_DisplayApi(pThis, pRmhalspecowner); + + status = __nvoc_dispapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DisplayApi_fail__init; + goto __nvoc_ctor_DisplayApi_exit; // Success + +__nvoc_ctor_DisplayApi_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_DisplayApi_fail_Notifier: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_DisplayApi_fail_RmResource: +__nvoc_ctor_DisplayApi_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_DisplayApi_1(DisplayApi *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} // End __nvoc_init_funcTable_DisplayApi_1 + + +// Initialize vtable(s) for 26 virtual method(s). +void __nvoc_init_funcTable_DisplayApi(DisplayApi *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_DisplayApi_1(pThis, pRmhalspecowner); +} + +// Initialize newly constructed object. +void __nvoc_init__DisplayApi(DisplayApi *pThis, RmHalspecOwner *pRmhalspecowner) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^3 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^2 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^2 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; // (rmres) super + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; // (inotify) super^2 + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; // (notify) super + pThis->__nvoc_pbase_DisplayApi = pThis; // (dispapi) this + + // Recurse to superclass initialization function(s). + __nvoc_init__RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_init__Notifier(&pThis->__nvoc_base_Notifier); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__DisplayApi.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^3 + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__DisplayApi.metadata__RmResource.metadata__RsResource; // (res) super^2 + pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__DisplayApi.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^2 + pThis->__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__DisplayApi.metadata__RmResource; // (rmres) super + pThis->__nvoc_base_Notifier.__nvoc_base_INotifier.__nvoc_metadata_ptr = &__nvoc_metadata__DisplayApi.metadata__Notifier.metadata__INotifier; // (inotify) super^2 + pThis->__nvoc_base_Notifier.__nvoc_metadata_ptr = &__nvoc_metadata__DisplayApi.metadata__Notifier; // (notify) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__DisplayApi; // (dispapi) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_DisplayApi(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_DisplayApi(DisplayApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + DisplayApi *pThis; + RmHalspecOwner *pRmhalspecowner; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(DisplayApi), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(DisplayApi)); + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // pParent must be a valid object that derives from a halspec owner class. + NV_ASSERT_OR_RETURN(pParent != NULL, NV_ERR_INVALID_ARGUMENT); + + // Link the child into the parent unless flagged not to do so. + if (!(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init__DisplayApi(pThis, pRmhalspecowner); + status = __nvoc_ctor_DisplayApi(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DisplayApi_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_DisplayApi_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(DisplayApi)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DisplayApi(DisplayApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DisplayApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x999839 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispObject; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayApi; + +// Forward declarations for DispObject +void __nvoc_init__DisplayApi(DisplayApi*, RmHalspecOwner *pRmhalspecowner); +void __nvoc_init__DispObject(DispObject*, RmHalspecOwner *pRmhalspecowner); +void __nvoc_init_funcTable_DispObject(DispObject*, RmHalspecOwner *pRmhalspecowner); +NV_STATUS __nvoc_ctor_DispObject(DispObject*, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_DispObject(DispObject*, RmHalspecOwner *pRmhalspecowner); +void __nvoc_dtor_DispObject(DispObject*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__DispObject; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__DispObject; + +// Down-thunk(s) to bridge DispObject methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^2 +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +PEVENTNOTIFICATION * __nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr(struct INotifier *pNotifier); // super^2 +struct NotifShare * __nvoc_down_thunk_Notifier_inotifyGetNotificationShare(struct INotifier *pNotifier); // super^2 +void __nvoc_down_thunk_Notifier_inotifySetNotificationShare(struct INotifier *pNotifier, struct NotifShare *pNotifShare); // super^2 +NV_STATUS __nvoc_down_thunk_Notifier_inotifyUnregisterEvent(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // super^2 +NV_STATUS __nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // super^2 +NV_STATUS __nvoc_down_thunk_DisplayApi_resControl(struct RsResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_DisplayApi_rmresControl_Prologue(struct RmResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); // super +void __nvoc_down_thunk_DisplayApi_rmresControl_Epilogue(struct RmResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); // super + +// Up-thunk(s) to bridge DispObject methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^2 +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^2 +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^2 +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super^2 +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super^2 +NvBool __nvoc_up_thunk_RmResource_dispapiAccessCallback(struct DisplayApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NvBool __nvoc_up_thunk_RmResource_dispapiShareCallback(struct DisplayApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispapiGetMemInterMapParams(struct DisplayApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispapiCheckMemInterUnmap(struct DisplayApi *pRmResource, NvBool bSubdeviceHandleProvided); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispapiGetMemoryMappingDescriptor(struct DisplayApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispapiControlSerialization_Prologue(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_dispapiControlSerialization_Epilogue(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_dispapiCanCopy(struct DisplayApi *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispapiIsDuplicate(struct DisplayApi *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_dispapiPreDestruct(struct DisplayApi *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispapiControlFilter(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispapiMap(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispapiUnmap(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_up_thunk_RsResource_dispapiIsPartialUnmapSupported(struct DisplayApi *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispapiMapTo(struct DisplayApi *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispapiUnmapFrom(struct DisplayApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_dispapiGetRefCount(struct DisplayApi *pResource); // super +void __nvoc_up_thunk_RsResource_dispapiAddAdditionalDependants(struct RsClient *pClient, struct DisplayApi *pResource, RsResourceRef *pReference); // super +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_dispapiGetNotificationListPtr(struct DisplayApi *pNotifier); // super +struct NotifShare * __nvoc_up_thunk_Notifier_dispapiGetNotificationShare(struct DisplayApi *pNotifier); // super +void __nvoc_up_thunk_Notifier_dispapiSetNotificationShare(struct DisplayApi *pNotifier, struct NotifShare *pNotifShare); // super +NV_STATUS __nvoc_up_thunk_Notifier_dispapiUnregisterEvent(struct DisplayApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // super +NV_STATUS __nvoc_up_thunk_Notifier_dispapiGetOrAllocNotifShare(struct DisplayApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // super +NV_STATUS __nvoc_up_thunk_DisplayApi_dispobjControl(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_DisplayApi_dispobjControl_Prologue(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); // this +void __nvoc_up_thunk_DisplayApi_dispobjControl_Epilogue(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); // this +NvBool __nvoc_up_thunk_RmResource_dispobjAccessCallback(struct DispObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NvBool __nvoc_up_thunk_RmResource_dispobjShareCallback(struct DispObject *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispobjGetMemInterMapParams(struct DispObject *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispobjCheckMemInterUnmap(struct DispObject *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispobjGetMemoryMappingDescriptor(struct DispObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispobjControlSerialization_Prologue(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_dispobjControlSerialization_Epilogue(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_dispobjCanCopy(struct DispObject *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispobjIsDuplicate(struct DispObject *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_dispobjPreDestruct(struct DispObject *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispobjControlFilter(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispobjMap(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispobjUnmap(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_RsResource_dispobjIsPartialUnmapSupported(struct DispObject *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispobjMapTo(struct DispObject *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispobjUnmapFrom(struct DispObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_dispobjGetRefCount(struct DispObject *pResource); // this +void __nvoc_up_thunk_RsResource_dispobjAddAdditionalDependants(struct RsClient *pClient, struct DispObject *pResource, RsResourceRef *pReference); // this +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_dispobjGetNotificationListPtr(struct DispObject *pNotifier); // this +struct NotifShare * __nvoc_up_thunk_Notifier_dispobjGetNotificationShare(struct DispObject *pNotifier); // this +void __nvoc_up_thunk_Notifier_dispobjSetNotificationShare(struct DispObject *pNotifier, struct NotifShare *pNotifShare); // this +NV_STATUS __nvoc_up_thunk_Notifier_dispobjUnregisterEvent(struct DispObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // this +NV_STATUS __nvoc_up_thunk_Notifier_dispobjGetOrAllocNotifShare(struct DispObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispObject = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispObject), + /*classId=*/ classId(DispObject), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispObject", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispObject, + /*pCastInfo=*/ &__nvoc_castinfo__DispObject, + /*pExportInfo=*/ &__nvoc_export_info__DispObject +}; + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_DispObject[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSetRmFreeFlags_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*flags=*/ 0x40u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700117u, + /*paramSize=*/ sizeof(NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSetRmFreeFlags" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdIMPSetGetParameter_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700118u, + /*paramSize=*/ sizeof(NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdIMPSetGetParameter" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetRgStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700202u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetRgStatus" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetRgUnderflowProp_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700203u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetRgUnderflowProp" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSetRgUnderflowProp_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700204u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSetRgUnderflowProp" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetRgFliplockProp_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700205u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetRgFliplockProp" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSetRgFliplockProp_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*flags=*/ 0x40u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700206u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSetRgFliplockProp" +#endif + }, + { /* [7] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetRgConnectedLockpinStateless_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x5070020au, + /*paramSize=*/ sizeof(NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetRgConnectedLockpinStateless" +#endif + }, + { /* [8] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetRgScanLine_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x5070020cu, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetRgScanLine" +#endif + }, + { /* [9] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetSorOpMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700422u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetSorOpMode" +#endif + }, + { /* [10] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSetSorOpMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700423u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSetSorOpMode" +#endif + }, + { /* [11] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSetSorFlushMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700457u, + /*paramSize=*/ sizeof(NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSetSorFlushMode" +#endif + }, + { /* [12] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSystemGetCapsV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700709u, + /*paramSize=*/ sizeof(NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSystemGetCapsV2" +#endif + }, + +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__DispObject __nvoc_metadata__DispObject = { + .rtti.pClassDef = &__nvoc_class_def_DispObject, // (dispobj) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispObject, + .rtti.offset = 0, + .metadata__DisplayApi.rtti.pClassDef = &__nvoc_class_def_DisplayApi, // (dispapi) super + .metadata__DisplayApi.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DisplayApi.rtti.offset = NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi), + .metadata__DisplayApi.metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super^2 + .metadata__DisplayApi.metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DisplayApi.metadata__RmResource.rtti.offset = NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource), + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^3 + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^4 + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__DisplayApi.metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^3 + .metadata__DisplayApi.metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DisplayApi.metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + .metadata__DisplayApi.metadata__Notifier.rtti.pClassDef = &__nvoc_class_def_Notifier, // (notify) super^2 + .metadata__DisplayApi.metadata__Notifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DisplayApi.metadata__Notifier.rtti.offset = NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_Notifier), + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.rtti.pClassDef = &__nvoc_class_def_INotifier, // (inotify) super^3 + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.rtti.offset = NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier), + + .vtable.__dispobjControl__ = &__nvoc_up_thunk_DisplayApi_dispobjControl, // virtual inherited (dispapi) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiControl__ = &dispapiControl_IMPL, // virtual override (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_DisplayApi_resControl, // virtual + .vtable.__dispobjControl_Prologue__ = &__nvoc_up_thunk_DisplayApi_dispobjControl_Prologue, // virtual inherited (dispapi) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiControl_Prologue__ = &dispapiControl_Prologue_IMPL, // virtual override (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresControl_Prologue__ = &__nvoc_down_thunk_DisplayApi_rmresControl_Prologue, // virtual override (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__dispobjControl_Epilogue__ = &__nvoc_up_thunk_DisplayApi_dispobjControl_Epilogue, // virtual inherited (dispapi) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiControl_Epilogue__ = &dispapiControl_Epilogue_IMPL, // virtual override (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresControl_Epilogue__ = &__nvoc_down_thunk_DisplayApi_rmresControl_Epilogue, // virtual override (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__dispobjAccessCallback__ = &__nvoc_up_thunk_RmResource_dispobjAccessCallback, // virtual inherited (rmres) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiAccessCallback__ = &__nvoc_up_thunk_RmResource_dispapiAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__dispobjShareCallback__ = &__nvoc_up_thunk_RmResource_dispobjShareCallback, // virtual inherited (rmres) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiShareCallback__ = &__nvoc_up_thunk_RmResource_dispapiShareCallback, // virtual inherited (rmres) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresShareCallback__ = &rmresShareCallback_IMPL, // virtual override (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__dispobjGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_dispobjGetMemInterMapParams, // virtual inherited (rmres) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_dispapiGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__dispobjCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_dispobjCheckMemInterUnmap, // virtual inherited (rmres) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_dispapiCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__dispobjGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_dispobjGetMemoryMappingDescriptor, // virtual inherited (rmres) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_dispapiGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__dispobjControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_dispobjControlSerialization_Prologue, // virtual inherited (rmres) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_dispapiControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__dispobjControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_dispobjControlSerialization_Epilogue, // virtual inherited (rmres) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_dispapiControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__dispobjCanCopy__ = &__nvoc_up_thunk_RsResource_dispobjCanCopy, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiCanCopy__ = &__nvoc_up_thunk_RsResource_dispapiCanCopy, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__dispobjIsDuplicate__ = &__nvoc_up_thunk_RsResource_dispobjIsDuplicate, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiIsDuplicate__ = &__nvoc_up_thunk_RsResource_dispapiIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__dispobjPreDestruct__ = &__nvoc_up_thunk_RsResource_dispobjPreDestruct, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiPreDestruct__ = &__nvoc_up_thunk_RsResource_dispapiPreDestruct, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__dispobjControlFilter__ = &__nvoc_up_thunk_RsResource_dispobjControlFilter, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiControlFilter__ = &__nvoc_up_thunk_RsResource_dispapiControlFilter, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__dispobjMap__ = &__nvoc_up_thunk_RsResource_dispobjMap, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiMap__ = &__nvoc_up_thunk_RsResource_dispapiMap, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &resMap_IMPL, // virtual + .vtable.__dispobjUnmap__ = &__nvoc_up_thunk_RsResource_dispobjUnmap, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiUnmap__ = &__nvoc_up_thunk_RsResource_dispapiUnmap, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &resUnmap_IMPL, // virtual + .vtable.__dispobjIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_dispobjIsPartialUnmapSupported, // inline virtual inherited (res) base (dispapi) body + .metadata__DisplayApi.vtable.__dispapiIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_dispapiIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__dispobjMapTo__ = &__nvoc_up_thunk_RsResource_dispobjMapTo, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiMapTo__ = &__nvoc_up_thunk_RsResource_dispapiMapTo, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__dispobjUnmapFrom__ = &__nvoc_up_thunk_RsResource_dispobjUnmapFrom, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiUnmapFrom__ = &__nvoc_up_thunk_RsResource_dispapiUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__dispobjGetRefCount__ = &__nvoc_up_thunk_RsResource_dispobjGetRefCount, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiGetRefCount__ = &__nvoc_up_thunk_RsResource_dispapiGetRefCount, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__dispobjAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_dispobjAddAdditionalDependants, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_dispapiAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual + .vtable.__dispobjGetNotificationListPtr__ = &__nvoc_up_thunk_Notifier_dispobjGetNotificationListPtr, // virtual inherited (notify) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiGetNotificationListPtr__ = &__nvoc_up_thunk_Notifier_dispapiGetNotificationListPtr, // virtual inherited (notify) base (notify) + .metadata__DisplayApi.metadata__Notifier.vtable.__notifyGetNotificationListPtr__ = ¬ifyGetNotificationListPtr_IMPL, // virtual override (inotify) base (inotify) + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationListPtr__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr, // pure virtual + .vtable.__dispobjGetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispobjGetNotificationShare, // virtual inherited (notify) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiGetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispapiGetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__DisplayApi.metadata__Notifier.vtable.__notifyGetNotificationShare__ = ¬ifyGetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationShare, // pure virtual + .vtable.__dispobjSetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispobjSetNotificationShare, // virtual inherited (notify) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiSetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispapiSetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__DisplayApi.metadata__Notifier.vtable.__notifySetNotificationShare__ = ¬ifySetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.vtable.__inotifySetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifySetNotificationShare, // pure virtual + .vtable.__dispobjUnregisterEvent__ = &__nvoc_up_thunk_Notifier_dispobjUnregisterEvent, // virtual inherited (notify) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiUnregisterEvent__ = &__nvoc_up_thunk_Notifier_dispapiUnregisterEvent, // virtual inherited (notify) base (notify) + .metadata__DisplayApi.metadata__Notifier.vtable.__notifyUnregisterEvent__ = ¬ifyUnregisterEvent_IMPL, // virtual override (inotify) base (inotify) + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.vtable.__inotifyUnregisterEvent__ = &__nvoc_down_thunk_Notifier_inotifyUnregisterEvent, // pure virtual + .vtable.__dispobjGetOrAllocNotifShare__ = &__nvoc_up_thunk_Notifier_dispobjGetOrAllocNotifShare, // virtual inherited (notify) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiGetOrAllocNotifShare__ = &__nvoc_up_thunk_Notifier_dispapiGetOrAllocNotifShare, // virtual inherited (notify) base (notify) + .metadata__DisplayApi.metadata__Notifier.vtable.__notifyGetOrAllocNotifShare__ = ¬ifyGetOrAllocNotifShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.vtable.__inotifyGetOrAllocNotifShare__ = &__nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare, // pure virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__DispObject = { + .numRelatives = 8, + .relatives = { + &__nvoc_metadata__DispObject.rtti, // [0]: (dispobj) this + &__nvoc_metadata__DispObject.metadata__DisplayApi.rtti, // [1]: (dispapi) super + &__nvoc_metadata__DispObject.metadata__DisplayApi.metadata__RmResource.rtti, // [2]: (rmres) super^2 + &__nvoc_metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.rtti, // [3]: (res) super^3 + &__nvoc_metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [4]: (obj) super^4 + &__nvoc_metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RmResourceCommon.rtti, // [5]: (rmrescmn) super^3 + &__nvoc_metadata__DispObject.metadata__DisplayApi.metadata__Notifier.rtti, // [6]: (notify) super^2 + &__nvoc_metadata__DispObject.metadata__DisplayApi.metadata__Notifier.metadata__INotifier.rtti, // [7]: (inotify) super^3 + } +}; + +// 26 up-thunk(s) defined to bridge methods in DispObject to superclasses + +// dispobjControl: virtual inherited (dispapi) base (dispapi) +NV_STATUS __nvoc_up_thunk_DisplayApi_dispobjControl(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return dispapiControl((struct DisplayApi *)(((unsigned char *) pDisplayApi) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi)), pCallContext, pParams); +} + +// dispobjControl_Prologue: virtual inherited (dispapi) base (dispapi) +NV_STATUS __nvoc_up_thunk_DisplayApi_dispobjControl_Prologue(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return dispapiControl_Prologue((struct DisplayApi *)(((unsigned char *) pDisplayApi) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi)), pCallContext, pRsParams); +} + +// dispobjControl_Epilogue: virtual inherited (dispapi) base (dispapi) +void __nvoc_up_thunk_DisplayApi_dispobjControl_Epilogue(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + dispapiControl_Epilogue((struct DisplayApi *)(((unsigned char *) pDisplayApi) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi)), pCallContext, pRsParams); +} + +// dispobjAccessCallback: virtual inherited (rmres) base (dispapi) +NvBool __nvoc_up_thunk_RmResource_dispobjAccessCallback(struct DispObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// dispobjShareCallback: virtual inherited (rmres) base (dispapi) +NvBool __nvoc_up_thunk_RmResource_dispobjShareCallback(struct DispObject *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// dispobjGetMemInterMapParams: virtual inherited (rmres) base (dispapi) +NV_STATUS __nvoc_up_thunk_RmResource_dispobjGetMemInterMapParams(struct DispObject *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource)), pParams); +} + +// dispobjCheckMemInterUnmap: virtual inherited (rmres) base (dispapi) +NV_STATUS __nvoc_up_thunk_RmResource_dispobjCheckMemInterUnmap(struct DispObject *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// dispobjGetMemoryMappingDescriptor: virtual inherited (rmres) base (dispapi) +NV_STATUS __nvoc_up_thunk_RmResource_dispobjGetMemoryMappingDescriptor(struct DispObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource)), ppMemDesc); +} + +// dispobjControlSerialization_Prologue: virtual inherited (rmres) base (dispapi) +NV_STATUS __nvoc_up_thunk_RmResource_dispobjControlSerialization_Prologue(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispobjControlSerialization_Epilogue: virtual inherited (rmres) base (dispapi) +void __nvoc_up_thunk_RmResource_dispobjControlSerialization_Epilogue(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispobjCanCopy: virtual inherited (res) base (dispapi) +NvBool __nvoc_up_thunk_RsResource_dispobjCanCopy(struct DispObject *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispobjIsDuplicate: virtual inherited (res) base (dispapi) +NV_STATUS __nvoc_up_thunk_RsResource_dispobjIsDuplicate(struct DispObject *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// dispobjPreDestruct: virtual inherited (res) base (dispapi) +void __nvoc_up_thunk_RsResource_dispobjPreDestruct(struct DispObject *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispobjControlFilter: virtual inherited (res) base (dispapi) +NV_STATUS __nvoc_up_thunk_RsResource_dispobjControlFilter(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// dispobjMap: virtual inherited (res) base (dispapi) +NV_STATUS __nvoc_up_thunk_RsResource_dispobjMap(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams, pCpuMapping); +} + +// dispobjUnmap: virtual inherited (res) base (dispapi) +NV_STATUS __nvoc_up_thunk_RsResource_dispobjUnmap(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pCpuMapping); +} + +// dispobjIsPartialUnmapSupported: inline virtual inherited (res) base (dispapi) body +NvBool __nvoc_up_thunk_RsResource_dispobjIsPartialUnmapSupported(struct DispObject *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispobjMapTo: virtual inherited (res) base (dispapi) +NV_STATUS __nvoc_up_thunk_RsResource_dispobjMapTo(struct DispObject *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// dispobjUnmapFrom: virtual inherited (res) base (dispapi) +NV_STATUS __nvoc_up_thunk_RsResource_dispobjUnmapFrom(struct DispObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// dispobjGetRefCount: virtual inherited (res) base (dispapi) +NvU32 __nvoc_up_thunk_RsResource_dispobjGetRefCount(struct DispObject *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispobjAddAdditionalDependants: virtual inherited (res) base (dispapi) +void __nvoc_up_thunk_RsResource_dispobjAddAdditionalDependants(struct RsClient *pClient, struct DispObject *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + +// dispobjGetNotificationListPtr: virtual inherited (notify) base (dispapi) +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_dispobjGetNotificationListPtr(struct DispObject *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_Notifier))); +} + +// dispobjGetNotificationShare: virtual inherited (notify) base (dispapi) +struct NotifShare * __nvoc_up_thunk_Notifier_dispobjGetNotificationShare(struct DispObject *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_Notifier))); +} + +// dispobjSetNotificationShare: virtual inherited (notify) base (dispapi) +void __nvoc_up_thunk_Notifier_dispobjSetNotificationShare(struct DispObject *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_Notifier)), pNotifShare); +} + +// dispobjUnregisterEvent: virtual inherited (notify) base (dispapi) +NV_STATUS __nvoc_up_thunk_Notifier_dispobjUnregisterEvent(struct DispObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_Notifier)), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +// dispobjGetOrAllocNotifShare: virtual inherited (notify) base (dispapi) +NV_STATUS __nvoc_up_thunk_Notifier_dispobjGetOrAllocNotifShare(struct DispObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_Notifier)), hNotifierClient, hNotifierResource, ppNotifShare); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__DispObject = +{ + /*numEntries=*/ 13, + /*pExportEntries=*/ __nvoc_exported_method_def_DispObject +}; + +void __nvoc_dtor_DisplayApi(DisplayApi*); +void __nvoc_dtor_DispObject(DispObject *pThis) { + __nvoc_dtor_DisplayApi(&pThis->__nvoc_base_DisplayApi); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispObject(DispObject *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_DisplayApi(DisplayApi* , RmHalspecOwner* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_DispObject(DispObject *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_DisplayApi(&pThis->__nvoc_base_DisplayApi, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispObject_fail_DisplayApi; + __nvoc_init_dataField_DispObject(pThis, pRmhalspecowner); + + status = __nvoc_dispobjConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispObject_fail__init; + goto __nvoc_ctor_DispObject_exit; // Success + +__nvoc_ctor_DispObject_fail__init: + __nvoc_dtor_DisplayApi(&pThis->__nvoc_base_DisplayApi); +__nvoc_ctor_DispObject_fail_DisplayApi: +__nvoc_ctor_DispObject_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_DispObject_1(DispObject *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + // dispobjCtrlCmdSetRmFreeFlags -- exported (id=0x50700117) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + pThis->__dispobjCtrlCmdSetRmFreeFlags__ = &dispobjCtrlCmdSetRmFreeFlags_IMPL; +#endif + + // dispobjCtrlCmdIMPSetGetParameter -- exported (id=0x50700118) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__dispobjCtrlCmdIMPSetGetParameter__ = &dispobjCtrlCmdIMPSetGetParameter_IMPL; +#endif + + // dispobjCtrlCmdGetRgStatus -- exported (id=0x50700202) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispobjCtrlCmdGetRgStatus__ = &dispobjCtrlCmdGetRgStatus_IMPL; +#endif + + // dispobjCtrlCmdGetRgUnderflowProp -- exported (id=0x50700203) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__dispobjCtrlCmdGetRgUnderflowProp__ = &dispobjCtrlCmdGetRgUnderflowProp_IMPL; +#endif + + // dispobjCtrlCmdSetRgUnderflowProp -- exported (id=0x50700204) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__dispobjCtrlCmdSetRgUnderflowProp__ = &dispobjCtrlCmdSetRgUnderflowProp_IMPL; +#endif + + // dispobjCtrlCmdGetRgFliplockProp -- exported (id=0x50700205) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__dispobjCtrlCmdGetRgFliplockProp__ = &dispobjCtrlCmdGetRgFliplockProp_IMPL; +#endif + + // dispobjCtrlCmdSetRgFliplockProp -- exported (id=0x50700206) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + pThis->__dispobjCtrlCmdSetRgFliplockProp__ = &dispobjCtrlCmdSetRgFliplockProp_IMPL; +#endif + + // dispobjCtrlCmdGetRgConnectedLockpinStateless -- exported (id=0x5070020a) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__dispobjCtrlCmdGetRgConnectedLockpinStateless__ = &dispobjCtrlCmdGetRgConnectedLockpinStateless_IMPL; +#endif + + // dispobjCtrlCmdGetRgScanLine -- exported (id=0x5070020c) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__dispobjCtrlCmdGetRgScanLine__ = &dispobjCtrlCmdGetRgScanLine_IMPL; +#endif + + // dispobjCtrlCmdGetSorOpMode -- exported (id=0x50700422) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispobjCtrlCmdGetSorOpMode__ = &dispobjCtrlCmdGetSorOpMode_IMPL; +#endif + + // dispobjCtrlCmdSetSorOpMode -- exported (id=0x50700423) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispobjCtrlCmdSetSorOpMode__ = &dispobjCtrlCmdSetSorOpMode_IMPL; +#endif + + // dispobjCtrlCmdSetSorFlushMode -- exported (id=0x50700457) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispobjCtrlCmdSetSorFlushMode__ = &dispobjCtrlCmdSetSorFlushMode_IMPL; +#endif + + // dispobjCtrlCmdSystemGetCapsV2 -- exported (id=0x50700709) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__dispobjCtrlCmdSystemGetCapsV2__ = &dispobjCtrlCmdSystemGetCapsV2_IMPL; +#endif +} // End __nvoc_init_funcTable_DispObject_1 with approximately 13 basic block(s). + + +// Initialize vtable(s) for 39 virtual method(s). +void __nvoc_init_funcTable_DispObject(DispObject *pThis, RmHalspecOwner *pRmhalspecowner) { + + // Initialize vtable(s) with 13 per-object function pointer(s). + __nvoc_init_funcTable_DispObject_1(pThis, pRmhalspecowner); +} + +// Initialize newly constructed object. +void __nvoc_init__DispObject(DispObject *pThis, RmHalspecOwner *pRmhalspecowner) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^4 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^3 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource; // (rmres) super^2 + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier; // (inotify) super^3 + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier; // (notify) super^2 + pThis->__nvoc_pbase_DisplayApi = &pThis->__nvoc_base_DisplayApi; // (dispapi) super + pThis->__nvoc_pbase_DispObject = pThis; // (dispobj) this + + // Recurse to superclass initialization function(s). + __nvoc_init__DisplayApi(&pThis->__nvoc_base_DisplayApi, pRmhalspecowner); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^4 + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource; // (res) super^3 + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__DispObject.metadata__DisplayApi.metadata__RmResource; // (rmres) super^2 + pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier.__nvoc_metadata_ptr = &__nvoc_metadata__DispObject.metadata__DisplayApi.metadata__Notifier.metadata__INotifier; // (inotify) super^3 + pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr = &__nvoc_metadata__DispObject.metadata__DisplayApi.metadata__Notifier; // (notify) super^2 + pThis->__nvoc_base_DisplayApi.__nvoc_metadata_ptr = &__nvoc_metadata__DispObject.metadata__DisplayApi; // (dispapi) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__DispObject; // (dispobj) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_DispObject(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_DispObject(DispObject **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + DispObject *pThis; + RmHalspecOwner *pRmhalspecowner; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(DispObject), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(DispObject)); + + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // pParent must be a valid object that derives from a halspec owner class. + NV_ASSERT_OR_RETURN(pParent != NULL, NV_ERR_INVALID_ARGUMENT); + + // Link the child into the parent unless flagged not to do so. + if (!(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init__DispObject(pThis, pRmhalspecowner); + status = __nvoc_ctor_DispObject(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DispObject_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_DispObject_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(DispObject)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispObject(DispObject **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DispObject(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x36aa0b = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_NvDispApi; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayApi; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispObject; + +// Forward declarations for NvDispApi +void __nvoc_init__DispObject(DispObject*, RmHalspecOwner *pRmhalspecowner); +void __nvoc_init__NvDispApi(NvDispApi*, RmHalspecOwner *pRmhalspecowner); +void __nvoc_init_funcTable_NvDispApi(NvDispApi*); +NV_STATUS __nvoc_ctor_NvDispApi(NvDispApi*, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_NvDispApi(NvDispApi*); +void __nvoc_dtor_NvDispApi(NvDispApi*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__NvDispApi; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__NvDispApi; + +// Down-thunk(s) to bridge NvDispApi methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^3 +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^3 +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +PEVENTNOTIFICATION * __nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr(struct INotifier *pNotifier); // super^3 +struct NotifShare * __nvoc_down_thunk_Notifier_inotifyGetNotificationShare(struct INotifier *pNotifier); // super^3 +void __nvoc_down_thunk_Notifier_inotifySetNotificationShare(struct INotifier *pNotifier, struct NotifShare *pNotifShare); // super^3 +NV_STATUS __nvoc_down_thunk_Notifier_inotifyUnregisterEvent(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // super^3 +NV_STATUS __nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // super^3 +NV_STATUS __nvoc_down_thunk_DisplayApi_resControl(struct RsResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_DisplayApi_rmresControl_Prologue(struct RmResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); // super^2 +void __nvoc_down_thunk_DisplayApi_rmresControl_Epilogue(struct RmResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); // super^2 + +// Up-thunk(s) to bridge NvDispApi methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^3 +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^3 +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^3 +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super^3 +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super^3 +NvBool __nvoc_up_thunk_RmResource_dispapiAccessCallback(struct DisplayApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^2 +NvBool __nvoc_up_thunk_RmResource_dispapiShareCallback(struct DisplayApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^2 +NV_STATUS __nvoc_up_thunk_RmResource_dispapiGetMemInterMapParams(struct DisplayApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RmResource_dispapiCheckMemInterUnmap(struct DisplayApi *pRmResource, NvBool bSubdeviceHandleProvided); // super^2 +NV_STATUS __nvoc_up_thunk_RmResource_dispapiGetMemoryMappingDescriptor(struct DisplayApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // super^2 +NV_STATUS __nvoc_up_thunk_RmResource_dispapiControlSerialization_Prologue(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_up_thunk_RmResource_dispapiControlSerialization_Epilogue(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NvBool __nvoc_up_thunk_RsResource_dispapiCanCopy(struct DisplayApi *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_dispapiIsDuplicate(struct DisplayApi *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^2 +void __nvoc_up_thunk_RsResource_dispapiPreDestruct(struct DisplayApi *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_dispapiControlFilter(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_dispapiMap(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_dispapiUnmap(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^2 +NvBool __nvoc_up_thunk_RsResource_dispapiIsPartialUnmapSupported(struct DisplayApi *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_dispapiMapTo(struct DisplayApi *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_dispapiUnmapFrom(struct DisplayApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^2 +NvU32 __nvoc_up_thunk_RsResource_dispapiGetRefCount(struct DisplayApi *pResource); // super^2 +void __nvoc_up_thunk_RsResource_dispapiAddAdditionalDependants(struct RsClient *pClient, struct DisplayApi *pResource, RsResourceRef *pReference); // super^2 +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_dispapiGetNotificationListPtr(struct DisplayApi *pNotifier); // super^2 +struct NotifShare * __nvoc_up_thunk_Notifier_dispapiGetNotificationShare(struct DisplayApi *pNotifier); // super^2 +void __nvoc_up_thunk_Notifier_dispapiSetNotificationShare(struct DisplayApi *pNotifier, struct NotifShare *pNotifShare); // super^2 +NV_STATUS __nvoc_up_thunk_Notifier_dispapiUnregisterEvent(struct DisplayApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // super^2 +NV_STATUS __nvoc_up_thunk_Notifier_dispapiGetOrAllocNotifShare(struct DisplayApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // super^2 +NV_STATUS __nvoc_up_thunk_DisplayApi_dispobjControl(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_DisplayApi_dispobjControl_Prologue(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); // super +void __nvoc_up_thunk_DisplayApi_dispobjControl_Epilogue(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); // super +NvBool __nvoc_up_thunk_RmResource_dispobjAccessCallback(struct DispObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NvBool __nvoc_up_thunk_RmResource_dispobjShareCallback(struct DispObject *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispobjGetMemInterMapParams(struct DispObject *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispobjCheckMemInterUnmap(struct DispObject *pRmResource, NvBool bSubdeviceHandleProvided); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispobjGetMemoryMappingDescriptor(struct DispObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispobjControlSerialization_Prologue(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_dispobjControlSerialization_Epilogue(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_dispobjCanCopy(struct DispObject *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispobjIsDuplicate(struct DispObject *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_dispobjPreDestruct(struct DispObject *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispobjControlFilter(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispobjMap(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispobjUnmap(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_up_thunk_RsResource_dispobjIsPartialUnmapSupported(struct DispObject *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispobjMapTo(struct DispObject *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispobjUnmapFrom(struct DispObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_dispobjGetRefCount(struct DispObject *pResource); // super +void __nvoc_up_thunk_RsResource_dispobjAddAdditionalDependants(struct RsClient *pClient, struct DispObject *pResource, RsResourceRef *pReference); // super +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_dispobjGetNotificationListPtr(struct DispObject *pNotifier); // super +struct NotifShare * __nvoc_up_thunk_Notifier_dispobjGetNotificationShare(struct DispObject *pNotifier); // super +void __nvoc_up_thunk_Notifier_dispobjSetNotificationShare(struct DispObject *pNotifier, struct NotifShare *pNotifShare); // super +NV_STATUS __nvoc_up_thunk_Notifier_dispobjUnregisterEvent(struct DispObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // super +NV_STATUS __nvoc_up_thunk_Notifier_dispobjGetOrAllocNotifShare(struct DispObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // super +NV_STATUS __nvoc_up_thunk_DisplayApi_nvdispapiControl(struct NvDispApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_DisplayApi_nvdispapiControl_Prologue(struct NvDispApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); // this +void __nvoc_up_thunk_DisplayApi_nvdispapiControl_Epilogue(struct NvDispApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); // this +NvBool __nvoc_up_thunk_RmResource_nvdispapiAccessCallback(struct NvDispApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NvBool __nvoc_up_thunk_RmResource_nvdispapiShareCallback(struct NvDispApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_RmResource_nvdispapiGetMemInterMapParams(struct NvDispApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_nvdispapiCheckMemInterUnmap(struct NvDispApi *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_nvdispapiGetMemoryMappingDescriptor(struct NvDispApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_nvdispapiControlSerialization_Prologue(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_nvdispapiControlSerialization_Epilogue(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_nvdispapiCanCopy(struct NvDispApi *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_nvdispapiIsDuplicate(struct NvDispApi *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_nvdispapiPreDestruct(struct NvDispApi *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_nvdispapiControlFilter(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_nvdispapiMap(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_RsResource_nvdispapiUnmap(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_RsResource_nvdispapiIsPartialUnmapSupported(struct NvDispApi *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_nvdispapiMapTo(struct NvDispApi *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_nvdispapiUnmapFrom(struct NvDispApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_nvdispapiGetRefCount(struct NvDispApi *pResource); // this +void __nvoc_up_thunk_RsResource_nvdispapiAddAdditionalDependants(struct RsClient *pClient, struct NvDispApi *pResource, RsResourceRef *pReference); // this +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_nvdispapiGetNotificationListPtr(struct NvDispApi *pNotifier); // this +struct NotifShare * __nvoc_up_thunk_Notifier_nvdispapiGetNotificationShare(struct NvDispApi *pNotifier); // this +void __nvoc_up_thunk_Notifier_nvdispapiSetNotificationShare(struct NvDispApi *pNotifier, struct NotifShare *pNotifShare); // this +NV_STATUS __nvoc_up_thunk_Notifier_nvdispapiUnregisterEvent(struct NvDispApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // this +NV_STATUS __nvoc_up_thunk_Notifier_nvdispapiGetOrAllocNotifShare(struct NvDispApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_NvDispApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(NvDispApi), + /*classId=*/ classId(NvDispApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "NvDispApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_NvDispApi, + /*pCastInfo=*/ &__nvoc_castinfo__NvDispApi, + /*pExportInfo=*/ &__nvoc_export_info__NvDispApi +}; + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_NvDispApi[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) nvdispapiCtrlCmdIdleChannel_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3700101u, + /*paramSize=*/ sizeof(NVC370_CTRL_IDLE_CHANNEL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_NvDispApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "nvdispapiCtrlCmdIdleChannel" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) nvdispapiCtrlCmdSetAccl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3700102u, + /*paramSize=*/ sizeof(NVC370_CTRL_SET_ACCL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_NvDispApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "nvdispapiCtrlCmdSetAccl" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) nvdispapiCtrlCmdGetAccl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3700103u, + /*paramSize=*/ sizeof(NVC370_CTRL_GET_ACCL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_NvDispApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "nvdispapiCtrlCmdGetAccl" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) nvdispapiCtrlCmdGetChannelInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*flags=*/ 0x40u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3700104u, + /*paramSize=*/ sizeof(NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_NvDispApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "nvdispapiCtrlCmdGetChannelInfo" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) nvdispapiCtrlCmdChannelCancelFlip_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3700105u, + /*paramSize=*/ sizeof(NVC370_CTRL_CHANNEL_CANCEL_FLIP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_NvDispApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "nvdispapiCtrlCmdChannelCancelFlip" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) nvdispapiCtrlCmdGetLockpinsCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3700201u, + /*paramSize=*/ sizeof(NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_NvDispApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "nvdispapiCtrlCmdGetLockpinsCaps" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) nvdispapiCtrlCmdSetSwaprdyGpioWar_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*flags=*/ 0x40u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3700202u, + /*paramSize=*/ sizeof(NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_NvDispApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "nvdispapiCtrlCmdSetSwaprdyGpioWar" +#endif + }, + { /* [7] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3700602u, + /*paramSize=*/ sizeof(NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_NvDispApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides" +#endif + }, + +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__NvDispApi __nvoc_metadata__NvDispApi = { + .rtti.pClassDef = &__nvoc_class_def_NvDispApi, // (nvdispapi) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_NvDispApi, + .rtti.offset = 0, + .metadata__DispObject.rtti.pClassDef = &__nvoc_class_def_DispObject, // (dispobj) super + .metadata__DispObject.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispObject.rtti.offset = NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject), + .metadata__DispObject.metadata__DisplayApi.rtti.pClassDef = &__nvoc_class_def_DisplayApi, // (dispapi) super^2 + .metadata__DispObject.metadata__DisplayApi.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispObject.metadata__DisplayApi.rtti.offset = NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi), + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super^3 + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.rtti.offset = NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource), + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^4 + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^5 + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^4 + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + .metadata__DispObject.metadata__DisplayApi.metadata__Notifier.rtti.pClassDef = &__nvoc_class_def_Notifier, // (notify) super^3 + .metadata__DispObject.metadata__DisplayApi.metadata__Notifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispObject.metadata__DisplayApi.metadata__Notifier.rtti.offset = NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier), + .metadata__DispObject.metadata__DisplayApi.metadata__Notifier.metadata__INotifier.rtti.pClassDef = &__nvoc_class_def_INotifier, // (inotify) super^4 + .metadata__DispObject.metadata__DisplayApi.metadata__Notifier.metadata__INotifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DispObject.metadata__DisplayApi.metadata__Notifier.metadata__INotifier.rtti.offset = NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier), + + .vtable.__nvdispapiControl__ = &__nvoc_up_thunk_DisplayApi_nvdispapiControl, // virtual inherited (dispapi) base (dispobj) + .metadata__DispObject.vtable.__dispobjControl__ = &__nvoc_up_thunk_DisplayApi_dispobjControl, // virtual inherited (dispapi) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiControl__ = &dispapiControl_IMPL, // virtual override (res) base (rmres) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_DisplayApi_resControl, // virtual + .vtable.__nvdispapiControl_Prologue__ = &__nvoc_up_thunk_DisplayApi_nvdispapiControl_Prologue, // virtual inherited (dispapi) base (dispobj) + .metadata__DispObject.vtable.__dispobjControl_Prologue__ = &__nvoc_up_thunk_DisplayApi_dispobjControl_Prologue, // virtual inherited (dispapi) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiControl_Prologue__ = &dispapiControl_Prologue_IMPL, // virtual override (res) base (rmres) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.vtable.__rmresControl_Prologue__ = &__nvoc_down_thunk_DisplayApi_rmresControl_Prologue, // virtual override (res) base (res) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__nvdispapiControl_Epilogue__ = &__nvoc_up_thunk_DisplayApi_nvdispapiControl_Epilogue, // virtual inherited (dispapi) base (dispobj) + .metadata__DispObject.vtable.__dispobjControl_Epilogue__ = &__nvoc_up_thunk_DisplayApi_dispobjControl_Epilogue, // virtual inherited (dispapi) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiControl_Epilogue__ = &dispapiControl_Epilogue_IMPL, // virtual override (res) base (rmres) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.vtable.__rmresControl_Epilogue__ = &__nvoc_down_thunk_DisplayApi_rmresControl_Epilogue, // virtual override (res) base (res) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__nvdispapiAccessCallback__ = &__nvoc_up_thunk_RmResource_nvdispapiAccessCallback, // virtual inherited (rmres) base (dispobj) + .metadata__DispObject.vtable.__dispobjAccessCallback__ = &__nvoc_up_thunk_RmResource_dispobjAccessCallback, // virtual inherited (rmres) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiAccessCallback__ = &__nvoc_up_thunk_RmResource_dispapiAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__nvdispapiShareCallback__ = &__nvoc_up_thunk_RmResource_nvdispapiShareCallback, // virtual inherited (rmres) base (dispobj) + .metadata__DispObject.vtable.__dispobjShareCallback__ = &__nvoc_up_thunk_RmResource_dispobjShareCallback, // virtual inherited (rmres) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiShareCallback__ = &__nvoc_up_thunk_RmResource_dispapiShareCallback, // virtual inherited (rmres) base (rmres) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.vtable.__rmresShareCallback__ = &rmresShareCallback_IMPL, // virtual override (res) base (res) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__nvdispapiGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_nvdispapiGetMemInterMapParams, // virtual inherited (rmres) base (dispobj) + .metadata__DispObject.vtable.__dispobjGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_dispobjGetMemInterMapParams, // virtual inherited (rmres) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_dispapiGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__nvdispapiCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_nvdispapiCheckMemInterUnmap, // virtual inherited (rmres) base (dispobj) + .metadata__DispObject.vtable.__dispobjCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_dispobjCheckMemInterUnmap, // virtual inherited (rmres) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_dispapiCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__nvdispapiGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_nvdispapiGetMemoryMappingDescriptor, // virtual inherited (rmres) base (dispobj) + .metadata__DispObject.vtable.__dispobjGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_dispobjGetMemoryMappingDescriptor, // virtual inherited (rmres) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_dispapiGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__nvdispapiControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_nvdispapiControlSerialization_Prologue, // virtual inherited (rmres) base (dispobj) + .metadata__DispObject.vtable.__dispobjControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_dispobjControlSerialization_Prologue, // virtual inherited (rmres) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_dispapiControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__nvdispapiControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_nvdispapiControlSerialization_Epilogue, // virtual inherited (rmres) base (dispobj) + .metadata__DispObject.vtable.__dispobjControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_dispobjControlSerialization_Epilogue, // virtual inherited (rmres) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_dispapiControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__nvdispapiCanCopy__ = &__nvoc_up_thunk_RsResource_nvdispapiCanCopy, // virtual inherited (res) base (dispobj) + .metadata__DispObject.vtable.__dispobjCanCopy__ = &__nvoc_up_thunk_RsResource_dispobjCanCopy, // virtual inherited (res) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiCanCopy__ = &__nvoc_up_thunk_RsResource_dispapiCanCopy, // virtual inherited (res) base (rmres) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__nvdispapiIsDuplicate__ = &__nvoc_up_thunk_RsResource_nvdispapiIsDuplicate, // virtual inherited (res) base (dispobj) + .metadata__DispObject.vtable.__dispobjIsDuplicate__ = &__nvoc_up_thunk_RsResource_dispobjIsDuplicate, // virtual inherited (res) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiIsDuplicate__ = &__nvoc_up_thunk_RsResource_dispapiIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__nvdispapiPreDestruct__ = &__nvoc_up_thunk_RsResource_nvdispapiPreDestruct, // virtual inherited (res) base (dispobj) + .metadata__DispObject.vtable.__dispobjPreDestruct__ = &__nvoc_up_thunk_RsResource_dispobjPreDestruct, // virtual inherited (res) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiPreDestruct__ = &__nvoc_up_thunk_RsResource_dispapiPreDestruct, // virtual inherited (res) base (rmres) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__nvdispapiControlFilter__ = &__nvoc_up_thunk_RsResource_nvdispapiControlFilter, // virtual inherited (res) base (dispobj) + .metadata__DispObject.vtable.__dispobjControlFilter__ = &__nvoc_up_thunk_RsResource_dispobjControlFilter, // virtual inherited (res) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiControlFilter__ = &__nvoc_up_thunk_RsResource_dispapiControlFilter, // virtual inherited (res) base (rmres) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__nvdispapiMap__ = &__nvoc_up_thunk_RsResource_nvdispapiMap, // virtual inherited (res) base (dispobj) + .metadata__DispObject.vtable.__dispobjMap__ = &__nvoc_up_thunk_RsResource_dispobjMap, // virtual inherited (res) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiMap__ = &__nvoc_up_thunk_RsResource_dispapiMap, // virtual inherited (res) base (rmres) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &resMap_IMPL, // virtual + .vtable.__nvdispapiUnmap__ = &__nvoc_up_thunk_RsResource_nvdispapiUnmap, // virtual inherited (res) base (dispobj) + .metadata__DispObject.vtable.__dispobjUnmap__ = &__nvoc_up_thunk_RsResource_dispobjUnmap, // virtual inherited (res) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiUnmap__ = &__nvoc_up_thunk_RsResource_dispapiUnmap, // virtual inherited (res) base (rmres) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &resUnmap_IMPL, // virtual + .vtable.__nvdispapiIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_nvdispapiIsPartialUnmapSupported, // inline virtual inherited (res) base (dispobj) body + .metadata__DispObject.vtable.__dispobjIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_dispobjIsPartialUnmapSupported, // inline virtual inherited (res) base (dispapi) body + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_dispapiIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__nvdispapiMapTo__ = &__nvoc_up_thunk_RsResource_nvdispapiMapTo, // virtual inherited (res) base (dispobj) + .metadata__DispObject.vtable.__dispobjMapTo__ = &__nvoc_up_thunk_RsResource_dispobjMapTo, // virtual inherited (res) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiMapTo__ = &__nvoc_up_thunk_RsResource_dispapiMapTo, // virtual inherited (res) base (rmres) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__nvdispapiUnmapFrom__ = &__nvoc_up_thunk_RsResource_nvdispapiUnmapFrom, // virtual inherited (res) base (dispobj) + .metadata__DispObject.vtable.__dispobjUnmapFrom__ = &__nvoc_up_thunk_RsResource_dispobjUnmapFrom, // virtual inherited (res) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiUnmapFrom__ = &__nvoc_up_thunk_RsResource_dispapiUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__nvdispapiGetRefCount__ = &__nvoc_up_thunk_RsResource_nvdispapiGetRefCount, // virtual inherited (res) base (dispobj) + .metadata__DispObject.vtable.__dispobjGetRefCount__ = &__nvoc_up_thunk_RsResource_dispobjGetRefCount, // virtual inherited (res) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiGetRefCount__ = &__nvoc_up_thunk_RsResource_dispapiGetRefCount, // virtual inherited (res) base (rmres) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__nvdispapiAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_nvdispapiAddAdditionalDependants, // virtual inherited (res) base (dispobj) + .metadata__DispObject.vtable.__dispobjAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_dispobjAddAdditionalDependants, // virtual inherited (res) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_dispapiAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual + .vtable.__nvdispapiGetNotificationListPtr__ = &__nvoc_up_thunk_Notifier_nvdispapiGetNotificationListPtr, // virtual inherited (notify) base (dispobj) + .metadata__DispObject.vtable.__dispobjGetNotificationListPtr__ = &__nvoc_up_thunk_Notifier_dispobjGetNotificationListPtr, // virtual inherited (notify) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiGetNotificationListPtr__ = &__nvoc_up_thunk_Notifier_dispapiGetNotificationListPtr, // virtual inherited (notify) base (notify) + .metadata__DispObject.metadata__DisplayApi.metadata__Notifier.vtable.__notifyGetNotificationListPtr__ = ¬ifyGetNotificationListPtr_IMPL, // virtual override (inotify) base (inotify) + .metadata__DispObject.metadata__DisplayApi.metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationListPtr__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr, // pure virtual + .vtable.__nvdispapiGetNotificationShare__ = &__nvoc_up_thunk_Notifier_nvdispapiGetNotificationShare, // virtual inherited (notify) base (dispobj) + .metadata__DispObject.vtable.__dispobjGetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispobjGetNotificationShare, // virtual inherited (notify) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiGetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispapiGetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__DispObject.metadata__DisplayApi.metadata__Notifier.vtable.__notifyGetNotificationShare__ = ¬ifyGetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__DispObject.metadata__DisplayApi.metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationShare, // pure virtual + .vtable.__nvdispapiSetNotificationShare__ = &__nvoc_up_thunk_Notifier_nvdispapiSetNotificationShare, // virtual inherited (notify) base (dispobj) + .metadata__DispObject.vtable.__dispobjSetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispobjSetNotificationShare, // virtual inherited (notify) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiSetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispapiSetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__DispObject.metadata__DisplayApi.metadata__Notifier.vtable.__notifySetNotificationShare__ = ¬ifySetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__DispObject.metadata__DisplayApi.metadata__Notifier.metadata__INotifier.vtable.__inotifySetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifySetNotificationShare, // pure virtual + .vtable.__nvdispapiUnregisterEvent__ = &__nvoc_up_thunk_Notifier_nvdispapiUnregisterEvent, // virtual inherited (notify) base (dispobj) + .metadata__DispObject.vtable.__dispobjUnregisterEvent__ = &__nvoc_up_thunk_Notifier_dispobjUnregisterEvent, // virtual inherited (notify) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiUnregisterEvent__ = &__nvoc_up_thunk_Notifier_dispapiUnregisterEvent, // virtual inherited (notify) base (notify) + .metadata__DispObject.metadata__DisplayApi.metadata__Notifier.vtable.__notifyUnregisterEvent__ = ¬ifyUnregisterEvent_IMPL, // virtual override (inotify) base (inotify) + .metadata__DispObject.metadata__DisplayApi.metadata__Notifier.metadata__INotifier.vtable.__inotifyUnregisterEvent__ = &__nvoc_down_thunk_Notifier_inotifyUnregisterEvent, // pure virtual + .vtable.__nvdispapiGetOrAllocNotifShare__ = &__nvoc_up_thunk_Notifier_nvdispapiGetOrAllocNotifShare, // virtual inherited (notify) base (dispobj) + .metadata__DispObject.vtable.__dispobjGetOrAllocNotifShare__ = &__nvoc_up_thunk_Notifier_dispobjGetOrAllocNotifShare, // virtual inherited (notify) base (dispapi) + .metadata__DispObject.metadata__DisplayApi.vtable.__dispapiGetOrAllocNotifShare__ = &__nvoc_up_thunk_Notifier_dispapiGetOrAllocNotifShare, // virtual inherited (notify) base (notify) + .metadata__DispObject.metadata__DisplayApi.metadata__Notifier.vtable.__notifyGetOrAllocNotifShare__ = ¬ifyGetOrAllocNotifShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__DispObject.metadata__DisplayApi.metadata__Notifier.metadata__INotifier.vtable.__inotifyGetOrAllocNotifShare__ = &__nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare, // pure virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__NvDispApi = { + .numRelatives = 9, + .relatives = { + &__nvoc_metadata__NvDispApi.rtti, // [0]: (nvdispapi) this + &__nvoc_metadata__NvDispApi.metadata__DispObject.rtti, // [1]: (dispobj) super + &__nvoc_metadata__NvDispApi.metadata__DispObject.metadata__DisplayApi.rtti, // [2]: (dispapi) super^2 + &__nvoc_metadata__NvDispApi.metadata__DispObject.metadata__DisplayApi.metadata__RmResource.rtti, // [3]: (rmres) super^3 + &__nvoc_metadata__NvDispApi.metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.rtti, // [4]: (res) super^4 + &__nvoc_metadata__NvDispApi.metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [5]: (obj) super^5 + &__nvoc_metadata__NvDispApi.metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RmResourceCommon.rtti, // [6]: (rmrescmn) super^4 + &__nvoc_metadata__NvDispApi.metadata__DispObject.metadata__DisplayApi.metadata__Notifier.rtti, // [7]: (notify) super^3 + &__nvoc_metadata__NvDispApi.metadata__DispObject.metadata__DisplayApi.metadata__Notifier.metadata__INotifier.rtti, // [8]: (inotify) super^4 + } +}; + +// 26 up-thunk(s) defined to bridge methods in NvDispApi to superclasses + +// nvdispapiControl: virtual inherited (dispapi) base (dispobj) +NV_STATUS __nvoc_up_thunk_DisplayApi_nvdispapiControl(struct NvDispApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return dispapiControl((struct DisplayApi *)(((unsigned char *) pDisplayApi) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi)), pCallContext, pParams); +} + +// nvdispapiControl_Prologue: virtual inherited (dispapi) base (dispobj) +NV_STATUS __nvoc_up_thunk_DisplayApi_nvdispapiControl_Prologue(struct NvDispApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return dispapiControl_Prologue((struct DisplayApi *)(((unsigned char *) pDisplayApi) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi)), pCallContext, pRsParams); +} + +// nvdispapiControl_Epilogue: virtual inherited (dispapi) base (dispobj) +void __nvoc_up_thunk_DisplayApi_nvdispapiControl_Epilogue(struct NvDispApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + dispapiControl_Epilogue((struct DisplayApi *)(((unsigned char *) pDisplayApi) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi)), pCallContext, pRsParams); +} + +// nvdispapiAccessCallback: virtual inherited (rmres) base (dispobj) +NvBool __nvoc_up_thunk_RmResource_nvdispapiAccessCallback(struct NvDispApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// nvdispapiShareCallback: virtual inherited (rmres) base (dispobj) +NvBool __nvoc_up_thunk_RmResource_nvdispapiShareCallback(struct NvDispApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// nvdispapiGetMemInterMapParams: virtual inherited (rmres) base (dispobj) +NV_STATUS __nvoc_up_thunk_RmResource_nvdispapiGetMemInterMapParams(struct NvDispApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource)), pParams); +} + +// nvdispapiCheckMemInterUnmap: virtual inherited (rmres) base (dispobj) +NV_STATUS __nvoc_up_thunk_RmResource_nvdispapiCheckMemInterUnmap(struct NvDispApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// nvdispapiGetMemoryMappingDescriptor: virtual inherited (rmres) base (dispobj) +NV_STATUS __nvoc_up_thunk_RmResource_nvdispapiGetMemoryMappingDescriptor(struct NvDispApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource)), ppMemDesc); +} + +// nvdispapiControlSerialization_Prologue: virtual inherited (rmres) base (dispobj) +NV_STATUS __nvoc_up_thunk_RmResource_nvdispapiControlSerialization_Prologue(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// nvdispapiControlSerialization_Epilogue: virtual inherited (rmres) base (dispobj) +void __nvoc_up_thunk_RmResource_nvdispapiControlSerialization_Epilogue(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// nvdispapiCanCopy: virtual inherited (res) base (dispobj) +NvBool __nvoc_up_thunk_RsResource_nvdispapiCanCopy(struct NvDispApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// nvdispapiIsDuplicate: virtual inherited (res) base (dispobj) +NV_STATUS __nvoc_up_thunk_RsResource_nvdispapiIsDuplicate(struct NvDispApi *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// nvdispapiPreDestruct: virtual inherited (res) base (dispobj) +void __nvoc_up_thunk_RsResource_nvdispapiPreDestruct(struct NvDispApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// nvdispapiControlFilter: virtual inherited (res) base (dispobj) +NV_STATUS __nvoc_up_thunk_RsResource_nvdispapiControlFilter(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// nvdispapiMap: virtual inherited (res) base (dispobj) +NV_STATUS __nvoc_up_thunk_RsResource_nvdispapiMap(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams, pCpuMapping); +} + +// nvdispapiUnmap: virtual inherited (res) base (dispobj) +NV_STATUS __nvoc_up_thunk_RsResource_nvdispapiUnmap(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pCpuMapping); +} + +// nvdispapiIsPartialUnmapSupported: inline virtual inherited (res) base (dispobj) body +NvBool __nvoc_up_thunk_RsResource_nvdispapiIsPartialUnmapSupported(struct NvDispApi *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// nvdispapiMapTo: virtual inherited (res) base (dispobj) +NV_STATUS __nvoc_up_thunk_RsResource_nvdispapiMapTo(struct NvDispApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// nvdispapiUnmapFrom: virtual inherited (res) base (dispobj) +NV_STATUS __nvoc_up_thunk_RsResource_nvdispapiUnmapFrom(struct NvDispApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// nvdispapiGetRefCount: virtual inherited (res) base (dispobj) +NvU32 __nvoc_up_thunk_RsResource_nvdispapiGetRefCount(struct NvDispApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// nvdispapiAddAdditionalDependants: virtual inherited (res) base (dispobj) +void __nvoc_up_thunk_RsResource_nvdispapiAddAdditionalDependants(struct RsClient *pClient, struct NvDispApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + +// nvdispapiGetNotificationListPtr: virtual inherited (notify) base (dispobj) +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_nvdispapiGetNotificationListPtr(struct NvDispApi *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier))); +} + +// nvdispapiGetNotificationShare: virtual inherited (notify) base (dispobj) +struct NotifShare * __nvoc_up_thunk_Notifier_nvdispapiGetNotificationShare(struct NvDispApi *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier))); +} + +// nvdispapiSetNotificationShare: virtual inherited (notify) base (dispobj) +void __nvoc_up_thunk_Notifier_nvdispapiSetNotificationShare(struct NvDispApi *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier)), pNotifShare); +} + +// nvdispapiUnregisterEvent: virtual inherited (notify) base (dispobj) +NV_STATUS __nvoc_up_thunk_Notifier_nvdispapiUnregisterEvent(struct NvDispApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier)), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +// nvdispapiGetOrAllocNotifShare: virtual inherited (notify) base (dispobj) +NV_STATUS __nvoc_up_thunk_Notifier_nvdispapiGetOrAllocNotifShare(struct NvDispApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier)), hNotifierClient, hNotifierResource, ppNotifShare); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__NvDispApi = +{ + /*numEntries=*/ 8, + /*pExportEntries=*/ __nvoc_exported_method_def_NvDispApi +}; + +void __nvoc_dtor_DispObject(DispObject*); +void __nvoc_dtor_NvDispApi(NvDispApi *pThis) { + __nvoc_dtor_DispObject(&pThis->__nvoc_base_DispObject); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_NvDispApi(NvDispApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_DispObject(DispObject* , RmHalspecOwner* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_NvDispApi(NvDispApi *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_DispObject(&pThis->__nvoc_base_DispObject, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_NvDispApi_fail_DispObject; + __nvoc_init_dataField_NvDispApi(pThis); + + status = __nvoc_nvdispapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_NvDispApi_fail__init; + goto __nvoc_ctor_NvDispApi_exit; // Success + +__nvoc_ctor_NvDispApi_fail__init: + __nvoc_dtor_DispObject(&pThis->__nvoc_base_DispObject); +__nvoc_ctor_NvDispApi_fail_DispObject: +__nvoc_ctor_NvDispApi_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_NvDispApi_1(NvDispApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + // nvdispapiCtrlCmdIdleChannel -- exported (id=0xc3700101) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__nvdispapiCtrlCmdIdleChannel__ = &nvdispapiCtrlCmdIdleChannel_IMPL; +#endif + + // nvdispapiCtrlCmdSetAccl -- exported (id=0xc3700102) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__nvdispapiCtrlCmdSetAccl__ = &nvdispapiCtrlCmdSetAccl_IMPL; +#endif + + // nvdispapiCtrlCmdGetAccl -- exported (id=0xc3700103) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__nvdispapiCtrlCmdGetAccl__ = &nvdispapiCtrlCmdGetAccl_IMPL; +#endif + + // nvdispapiCtrlCmdGetChannelInfo -- exported (id=0xc3700104) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + pThis->__nvdispapiCtrlCmdGetChannelInfo__ = &nvdispapiCtrlCmdGetChannelInfo_IMPL; +#endif + + // nvdispapiCtrlCmdChannelCancelFlip -- exported (id=0xc3700105) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__nvdispapiCtrlCmdChannelCancelFlip__ = &nvdispapiCtrlCmdChannelCancelFlip_IMPL; +#endif + + // nvdispapiCtrlCmdSetSwaprdyGpioWar -- exported (id=0xc3700202) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + pThis->__nvdispapiCtrlCmdSetSwaprdyGpioWar__ = &nvdispapiCtrlCmdSetSwaprdyGpioWar_IMPL; +#endif + + // nvdispapiCtrlCmdGetLockpinsCaps -- exported (id=0xc3700201) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__nvdispapiCtrlCmdGetLockpinsCaps__ = &nvdispapiCtrlCmdGetLockpinsCaps_IMPL; +#endif + + // nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides -- exported (id=0xc3700602) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides__ = &nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides_IMPL; +#endif +} // End __nvoc_init_funcTable_NvDispApi_1 with approximately 8 basic block(s). + + +// Initialize vtable(s) for 34 virtual method(s). +void __nvoc_init_funcTable_NvDispApi(NvDispApi *pThis) { + + // Initialize vtable(s) with 8 per-object function pointer(s). + __nvoc_init_funcTable_NvDispApi_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__NvDispApi(NvDispApi *pThis, RmHalspecOwner *pRmhalspecowner) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^5 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^4 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^4 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource; // (rmres) super^3 + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier; // (inotify) super^4 + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier; // (notify) super^3 + pThis->__nvoc_pbase_DisplayApi = &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi; // (dispapi) super^2 + pThis->__nvoc_pbase_DispObject = &pThis->__nvoc_base_DispObject; // (dispobj) super + pThis->__nvoc_pbase_NvDispApi = pThis; // (nvdispapi) this + + // Recurse to superclass initialization function(s). + __nvoc_init__DispObject(&pThis->__nvoc_base_DispObject, pRmhalspecowner); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__NvDispApi.metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^5 + pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__NvDispApi.metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RsResource; // (res) super^4 + pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__NvDispApi.metadata__DispObject.metadata__DisplayApi.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^4 + pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__NvDispApi.metadata__DispObject.metadata__DisplayApi.metadata__RmResource; // (rmres) super^3 + pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier.__nvoc_metadata_ptr = &__nvoc_metadata__NvDispApi.metadata__DispObject.metadata__DisplayApi.metadata__Notifier.metadata__INotifier; // (inotify) super^4 + pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr = &__nvoc_metadata__NvDispApi.metadata__DispObject.metadata__DisplayApi.metadata__Notifier; // (notify) super^3 + pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_metadata_ptr = &__nvoc_metadata__NvDispApi.metadata__DispObject.metadata__DisplayApi; // (dispapi) super^2 + pThis->__nvoc_base_DispObject.__nvoc_metadata_ptr = &__nvoc_metadata__NvDispApi.metadata__DispObject; // (dispobj) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__NvDispApi; // (nvdispapi) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_NvDispApi(pThis); +} + +NV_STATUS __nvoc_objCreate_NvDispApi(NvDispApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + NvDispApi *pThis; + RmHalspecOwner *pRmhalspecowner; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(NvDispApi), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(NvDispApi)); + + pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // pParent must be a valid object that derives from a halspec owner class. + NV_ASSERT_OR_RETURN(pParent != NULL, NV_ERR_INVALID_ARGUMENT); + + // Link the child into the parent unless flagged not to do so. + if (!(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init__NvDispApi(pThis, pRmhalspecowner); + status = __nvoc_ctor_NvDispApi(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_NvDispApi_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_NvDispApi_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(NvDispApi)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_NvDispApi(NvDispApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_NvDispApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x6aa5e2 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispSwObj; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayApi; + +// Forward declarations for DispSwObj +void __nvoc_init__DisplayApi(DisplayApi*, RmHalspecOwner *pRmhalspecowner); +void __nvoc_init__DispSwObj(DispSwObj*, RmHalspecOwner *pRmhalspecowner); +void __nvoc_init_funcTable_DispSwObj(DispSwObj*); +NV_STATUS __nvoc_ctor_DispSwObj(DispSwObj*, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_DispSwObj(DispSwObj*); +void __nvoc_dtor_DispSwObj(DispSwObj*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__DispSwObj; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__DispSwObj; + +// Down-thunk(s) to bridge DispSwObj methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^2 +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +PEVENTNOTIFICATION * __nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr(struct INotifier *pNotifier); // super^2 +struct NotifShare * __nvoc_down_thunk_Notifier_inotifyGetNotificationShare(struct INotifier *pNotifier); // super^2 +void __nvoc_down_thunk_Notifier_inotifySetNotificationShare(struct INotifier *pNotifier, struct NotifShare *pNotifShare); // super^2 +NV_STATUS __nvoc_down_thunk_Notifier_inotifyUnregisterEvent(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // super^2 +NV_STATUS __nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // super^2 +NV_STATUS __nvoc_down_thunk_DisplayApi_resControl(struct RsResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_DisplayApi_rmresControl_Prologue(struct RmResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); // super +void __nvoc_down_thunk_DisplayApi_rmresControl_Epilogue(struct RmResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); // super + +// Up-thunk(s) to bridge DispSwObj methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^2 +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^2 +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^2 +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super^2 +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super^2 +NvBool __nvoc_up_thunk_RmResource_dispapiAccessCallback(struct DisplayApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NvBool __nvoc_up_thunk_RmResource_dispapiShareCallback(struct DisplayApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispapiGetMemInterMapParams(struct DisplayApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispapiCheckMemInterUnmap(struct DisplayApi *pRmResource, NvBool bSubdeviceHandleProvided); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispapiGetMemoryMappingDescriptor(struct DisplayApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispapiControlSerialization_Prologue(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_dispapiControlSerialization_Epilogue(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_dispapiCanCopy(struct DisplayApi *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispapiIsDuplicate(struct DisplayApi *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_dispapiPreDestruct(struct DisplayApi *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispapiControlFilter(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispapiMap(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispapiUnmap(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_up_thunk_RsResource_dispapiIsPartialUnmapSupported(struct DisplayApi *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispapiMapTo(struct DisplayApi *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispapiUnmapFrom(struct DisplayApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_dispapiGetRefCount(struct DisplayApi *pResource); // super +void __nvoc_up_thunk_RsResource_dispapiAddAdditionalDependants(struct RsClient *pClient, struct DisplayApi *pResource, RsResourceRef *pReference); // super +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_dispapiGetNotificationListPtr(struct DisplayApi *pNotifier); // super +struct NotifShare * __nvoc_up_thunk_Notifier_dispapiGetNotificationShare(struct DisplayApi *pNotifier); // super +void __nvoc_up_thunk_Notifier_dispapiSetNotificationShare(struct DisplayApi *pNotifier, struct NotifShare *pNotifShare); // super +NV_STATUS __nvoc_up_thunk_Notifier_dispapiUnregisterEvent(struct DisplayApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // super +NV_STATUS __nvoc_up_thunk_Notifier_dispapiGetOrAllocNotifShare(struct DisplayApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // super +NV_STATUS __nvoc_up_thunk_DisplayApi_dispswobjControl(struct DispSwObj *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_DisplayApi_dispswobjControl_Prologue(struct DispSwObj *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); // this +void __nvoc_up_thunk_DisplayApi_dispswobjControl_Epilogue(struct DispSwObj *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); // this +NvBool __nvoc_up_thunk_RmResource_dispswobjAccessCallback(struct DispSwObj *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NvBool __nvoc_up_thunk_RmResource_dispswobjShareCallback(struct DispSwObj *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispswobjGetMemInterMapParams(struct DispSwObj *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispswobjCheckMemInterUnmap(struct DispSwObj *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispswobjGetMemoryMappingDescriptor(struct DispSwObj *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispswobjControlSerialization_Prologue(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_dispswobjControlSerialization_Epilogue(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_dispswobjCanCopy(struct DispSwObj *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispswobjIsDuplicate(struct DispSwObj *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_dispswobjPreDestruct(struct DispSwObj *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispswobjControlFilter(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispswobjMap(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispswobjUnmap(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_RsResource_dispswobjIsPartialUnmapSupported(struct DispSwObj *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispswobjMapTo(struct DispSwObj *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispswobjUnmapFrom(struct DispSwObj *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_dispswobjGetRefCount(struct DispSwObj *pResource); // this +void __nvoc_up_thunk_RsResource_dispswobjAddAdditionalDependants(struct RsClient *pClient, struct DispSwObj *pResource, RsResourceRef *pReference); // this +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_dispswobjGetNotificationListPtr(struct DispSwObj *pNotifier); // this +struct NotifShare * __nvoc_up_thunk_Notifier_dispswobjGetNotificationShare(struct DispSwObj *pNotifier); // this +void __nvoc_up_thunk_Notifier_dispswobjSetNotificationShare(struct DispSwObj *pNotifier, struct NotifShare *pNotifShare); // this +NV_STATUS __nvoc_up_thunk_Notifier_dispswobjUnregisterEvent(struct DispSwObj *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // this +NV_STATUS __nvoc_up_thunk_Notifier_dispswobjGetOrAllocNotifShare(struct DispSwObj *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispSwObj = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispSwObj), + /*classId=*/ classId(DispSwObj), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispSwObj", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispSwObj, + /*pCastInfo=*/ &__nvoc_castinfo__DispSwObj, + /*pExportInfo=*/ &__nvoc_export_info__DispSwObj +}; + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_DispSwObj[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispswobjCtrlCmdIsModePossible_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3720101u, + /*paramSize=*/ sizeof(NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispSwObj.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispswobjCtrlCmdIsModePossible" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispswobjCtrlCmdIsModePossibleOrSettings_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3720102u, + /*paramSize=*/ sizeof(NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispSwObj.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispswobjCtrlCmdIsModePossibleOrSettings" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispswobjCtrlCmdVideoAdaptiveRefreshRate_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3720103u, + /*paramSize=*/ sizeof(NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispSwObj.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispswobjCtrlCmdVideoAdaptiveRefreshRate" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x49u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispswobjCtrlCmdGetActiveViewportPointIn_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x49u) + /*flags=*/ 0x49u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3720104u, + /*paramSize=*/ sizeof(NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispSwObj.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispswobjCtrlCmdGetActiveViewportPointIn" +#endif + }, + +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__DispSwObj __nvoc_metadata__DispSwObj = { + .rtti.pClassDef = &__nvoc_class_def_DispSwObj, // (dispswobj) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispSwObj, + .rtti.offset = 0, + .metadata__DisplayApi.rtti.pClassDef = &__nvoc_class_def_DisplayApi, // (dispapi) super + .metadata__DisplayApi.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DisplayApi.rtti.offset = NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi), + .metadata__DisplayApi.metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super^2 + .metadata__DisplayApi.metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DisplayApi.metadata__RmResource.rtti.offset = NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource), + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^3 + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^4 + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__DisplayApi.metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^3 + .metadata__DisplayApi.metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DisplayApi.metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + .metadata__DisplayApi.metadata__Notifier.rtti.pClassDef = &__nvoc_class_def_Notifier, // (notify) super^2 + .metadata__DisplayApi.metadata__Notifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DisplayApi.metadata__Notifier.rtti.offset = NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_Notifier), + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.rtti.pClassDef = &__nvoc_class_def_INotifier, // (inotify) super^3 + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.rtti.offset = NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier), + + .vtable.__dispswobjControl__ = &__nvoc_up_thunk_DisplayApi_dispswobjControl, // virtual inherited (dispapi) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiControl__ = &dispapiControl_IMPL, // virtual override (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_DisplayApi_resControl, // virtual + .vtable.__dispswobjControl_Prologue__ = &__nvoc_up_thunk_DisplayApi_dispswobjControl_Prologue, // virtual inherited (dispapi) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiControl_Prologue__ = &dispapiControl_Prologue_IMPL, // virtual override (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresControl_Prologue__ = &__nvoc_down_thunk_DisplayApi_rmresControl_Prologue, // virtual override (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__dispswobjControl_Epilogue__ = &__nvoc_up_thunk_DisplayApi_dispswobjControl_Epilogue, // virtual inherited (dispapi) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiControl_Epilogue__ = &dispapiControl_Epilogue_IMPL, // virtual override (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresControl_Epilogue__ = &__nvoc_down_thunk_DisplayApi_rmresControl_Epilogue, // virtual override (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__dispswobjAccessCallback__ = &__nvoc_up_thunk_RmResource_dispswobjAccessCallback, // virtual inherited (rmres) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiAccessCallback__ = &__nvoc_up_thunk_RmResource_dispapiAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__dispswobjShareCallback__ = &__nvoc_up_thunk_RmResource_dispswobjShareCallback, // virtual inherited (rmres) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiShareCallback__ = &__nvoc_up_thunk_RmResource_dispapiShareCallback, // virtual inherited (rmres) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresShareCallback__ = &rmresShareCallback_IMPL, // virtual override (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__dispswobjGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_dispswobjGetMemInterMapParams, // virtual inherited (rmres) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_dispapiGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__dispswobjCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_dispswobjCheckMemInterUnmap, // virtual inherited (rmres) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_dispapiCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__dispswobjGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_dispswobjGetMemoryMappingDescriptor, // virtual inherited (rmres) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_dispapiGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__dispswobjControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_dispswobjControlSerialization_Prologue, // virtual inherited (rmres) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_dispapiControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__dispswobjControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_dispswobjControlSerialization_Epilogue, // virtual inherited (rmres) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_dispapiControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__dispswobjCanCopy__ = &__nvoc_up_thunk_RsResource_dispswobjCanCopy, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiCanCopy__ = &__nvoc_up_thunk_RsResource_dispapiCanCopy, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__dispswobjIsDuplicate__ = &__nvoc_up_thunk_RsResource_dispswobjIsDuplicate, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiIsDuplicate__ = &__nvoc_up_thunk_RsResource_dispapiIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__dispswobjPreDestruct__ = &__nvoc_up_thunk_RsResource_dispswobjPreDestruct, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiPreDestruct__ = &__nvoc_up_thunk_RsResource_dispapiPreDestruct, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__dispswobjControlFilter__ = &__nvoc_up_thunk_RsResource_dispswobjControlFilter, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiControlFilter__ = &__nvoc_up_thunk_RsResource_dispapiControlFilter, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__dispswobjMap__ = &__nvoc_up_thunk_RsResource_dispswobjMap, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiMap__ = &__nvoc_up_thunk_RsResource_dispapiMap, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &resMap_IMPL, // virtual + .vtable.__dispswobjUnmap__ = &__nvoc_up_thunk_RsResource_dispswobjUnmap, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiUnmap__ = &__nvoc_up_thunk_RsResource_dispapiUnmap, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &resUnmap_IMPL, // virtual + .vtable.__dispswobjIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_dispswobjIsPartialUnmapSupported, // inline virtual inherited (res) base (dispapi) body + .metadata__DisplayApi.vtable.__dispapiIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_dispapiIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__dispswobjMapTo__ = &__nvoc_up_thunk_RsResource_dispswobjMapTo, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiMapTo__ = &__nvoc_up_thunk_RsResource_dispapiMapTo, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__dispswobjUnmapFrom__ = &__nvoc_up_thunk_RsResource_dispswobjUnmapFrom, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiUnmapFrom__ = &__nvoc_up_thunk_RsResource_dispapiUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__dispswobjGetRefCount__ = &__nvoc_up_thunk_RsResource_dispswobjGetRefCount, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiGetRefCount__ = &__nvoc_up_thunk_RsResource_dispapiGetRefCount, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__dispswobjAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_dispswobjAddAdditionalDependants, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_dispapiAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual + .vtable.__dispswobjGetNotificationListPtr__ = &__nvoc_up_thunk_Notifier_dispswobjGetNotificationListPtr, // virtual inherited (notify) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiGetNotificationListPtr__ = &__nvoc_up_thunk_Notifier_dispapiGetNotificationListPtr, // virtual inherited (notify) base (notify) + .metadata__DisplayApi.metadata__Notifier.vtable.__notifyGetNotificationListPtr__ = ¬ifyGetNotificationListPtr_IMPL, // virtual override (inotify) base (inotify) + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationListPtr__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr, // pure virtual + .vtable.__dispswobjGetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispswobjGetNotificationShare, // virtual inherited (notify) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiGetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispapiGetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__DisplayApi.metadata__Notifier.vtable.__notifyGetNotificationShare__ = ¬ifyGetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationShare, // pure virtual + .vtable.__dispswobjSetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispswobjSetNotificationShare, // virtual inherited (notify) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiSetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispapiSetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__DisplayApi.metadata__Notifier.vtable.__notifySetNotificationShare__ = ¬ifySetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.vtable.__inotifySetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifySetNotificationShare, // pure virtual + .vtable.__dispswobjUnregisterEvent__ = &__nvoc_up_thunk_Notifier_dispswobjUnregisterEvent, // virtual inherited (notify) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiUnregisterEvent__ = &__nvoc_up_thunk_Notifier_dispapiUnregisterEvent, // virtual inherited (notify) base (notify) + .metadata__DisplayApi.metadata__Notifier.vtable.__notifyUnregisterEvent__ = ¬ifyUnregisterEvent_IMPL, // virtual override (inotify) base (inotify) + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.vtable.__inotifyUnregisterEvent__ = &__nvoc_down_thunk_Notifier_inotifyUnregisterEvent, // pure virtual + .vtable.__dispswobjGetOrAllocNotifShare__ = &__nvoc_up_thunk_Notifier_dispswobjGetOrAllocNotifShare, // virtual inherited (notify) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiGetOrAllocNotifShare__ = &__nvoc_up_thunk_Notifier_dispapiGetOrAllocNotifShare, // virtual inherited (notify) base (notify) + .metadata__DisplayApi.metadata__Notifier.vtable.__notifyGetOrAllocNotifShare__ = ¬ifyGetOrAllocNotifShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.vtable.__inotifyGetOrAllocNotifShare__ = &__nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare, // pure virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__DispSwObj = { + .numRelatives = 8, + .relatives = { + &__nvoc_metadata__DispSwObj.rtti, // [0]: (dispswobj) this + &__nvoc_metadata__DispSwObj.metadata__DisplayApi.rtti, // [1]: (dispapi) super + &__nvoc_metadata__DispSwObj.metadata__DisplayApi.metadata__RmResource.rtti, // [2]: (rmres) super^2 + &__nvoc_metadata__DispSwObj.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.rtti, // [3]: (res) super^3 + &__nvoc_metadata__DispSwObj.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [4]: (obj) super^4 + &__nvoc_metadata__DispSwObj.metadata__DisplayApi.metadata__RmResource.metadata__RmResourceCommon.rtti, // [5]: (rmrescmn) super^3 + &__nvoc_metadata__DispSwObj.metadata__DisplayApi.metadata__Notifier.rtti, // [6]: (notify) super^2 + &__nvoc_metadata__DispSwObj.metadata__DisplayApi.metadata__Notifier.metadata__INotifier.rtti, // [7]: (inotify) super^3 + } +}; + +// 26 up-thunk(s) defined to bridge methods in DispSwObj to superclasses + +// dispswobjControl: virtual inherited (dispapi) base (dispapi) +NV_STATUS __nvoc_up_thunk_DisplayApi_dispswobjControl(struct DispSwObj *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return dispapiControl((struct DisplayApi *)(((unsigned char *) pDisplayApi) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi)), pCallContext, pParams); +} + +// dispswobjControl_Prologue: virtual inherited (dispapi) base (dispapi) +NV_STATUS __nvoc_up_thunk_DisplayApi_dispswobjControl_Prologue(struct DispSwObj *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return dispapiControl_Prologue((struct DisplayApi *)(((unsigned char *) pDisplayApi) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi)), pCallContext, pRsParams); +} + +// dispswobjControl_Epilogue: virtual inherited (dispapi) base (dispapi) +void __nvoc_up_thunk_DisplayApi_dispswobjControl_Epilogue(struct DispSwObj *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + dispapiControl_Epilogue((struct DisplayApi *)(((unsigned char *) pDisplayApi) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi)), pCallContext, pRsParams); +} + +// dispswobjAccessCallback: virtual inherited (rmres) base (dispapi) +NvBool __nvoc_up_thunk_RmResource_dispswobjAccessCallback(struct DispSwObj *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// dispswobjShareCallback: virtual inherited (rmres) base (dispapi) +NvBool __nvoc_up_thunk_RmResource_dispswobjShareCallback(struct DispSwObj *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// dispswobjGetMemInterMapParams: virtual inherited (rmres) base (dispapi) +NV_STATUS __nvoc_up_thunk_RmResource_dispswobjGetMemInterMapParams(struct DispSwObj *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource)), pParams); +} + +// dispswobjCheckMemInterUnmap: virtual inherited (rmres) base (dispapi) +NV_STATUS __nvoc_up_thunk_RmResource_dispswobjCheckMemInterUnmap(struct DispSwObj *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// dispswobjGetMemoryMappingDescriptor: virtual inherited (rmres) base (dispapi) +NV_STATUS __nvoc_up_thunk_RmResource_dispswobjGetMemoryMappingDescriptor(struct DispSwObj *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource)), ppMemDesc); +} + +// dispswobjControlSerialization_Prologue: virtual inherited (rmres) base (dispapi) +NV_STATUS __nvoc_up_thunk_RmResource_dispswobjControlSerialization_Prologue(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispswobjControlSerialization_Epilogue: virtual inherited (rmres) base (dispapi) +void __nvoc_up_thunk_RmResource_dispswobjControlSerialization_Epilogue(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispswobjCanCopy: virtual inherited (res) base (dispapi) +NvBool __nvoc_up_thunk_RsResource_dispswobjCanCopy(struct DispSwObj *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispswobjIsDuplicate: virtual inherited (res) base (dispapi) +NV_STATUS __nvoc_up_thunk_RsResource_dispswobjIsDuplicate(struct DispSwObj *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// dispswobjPreDestruct: virtual inherited (res) base (dispapi) +void __nvoc_up_thunk_RsResource_dispswobjPreDestruct(struct DispSwObj *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispswobjControlFilter: virtual inherited (res) base (dispapi) +NV_STATUS __nvoc_up_thunk_RsResource_dispswobjControlFilter(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// dispswobjMap: virtual inherited (res) base (dispapi) +NV_STATUS __nvoc_up_thunk_RsResource_dispswobjMap(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams, pCpuMapping); +} + +// dispswobjUnmap: virtual inherited (res) base (dispapi) +NV_STATUS __nvoc_up_thunk_RsResource_dispswobjUnmap(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pCpuMapping); +} + +// dispswobjIsPartialUnmapSupported: inline virtual inherited (res) base (dispapi) body +NvBool __nvoc_up_thunk_RsResource_dispswobjIsPartialUnmapSupported(struct DispSwObj *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispswobjMapTo: virtual inherited (res) base (dispapi) +NV_STATUS __nvoc_up_thunk_RsResource_dispswobjMapTo(struct DispSwObj *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// dispswobjUnmapFrom: virtual inherited (res) base (dispapi) +NV_STATUS __nvoc_up_thunk_RsResource_dispswobjUnmapFrom(struct DispSwObj *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// dispswobjGetRefCount: virtual inherited (res) base (dispapi) +NvU32 __nvoc_up_thunk_RsResource_dispswobjGetRefCount(struct DispSwObj *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispswobjAddAdditionalDependants: virtual inherited (res) base (dispapi) +void __nvoc_up_thunk_RsResource_dispswobjAddAdditionalDependants(struct RsClient *pClient, struct DispSwObj *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + +// dispswobjGetNotificationListPtr: virtual inherited (notify) base (dispapi) +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_dispswobjGetNotificationListPtr(struct DispSwObj *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_Notifier))); +} + +// dispswobjGetNotificationShare: virtual inherited (notify) base (dispapi) +struct NotifShare * __nvoc_up_thunk_Notifier_dispswobjGetNotificationShare(struct DispSwObj *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_Notifier))); +} + +// dispswobjSetNotificationShare: virtual inherited (notify) base (dispapi) +void __nvoc_up_thunk_Notifier_dispswobjSetNotificationShare(struct DispSwObj *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_Notifier)), pNotifShare); +} + +// dispswobjUnregisterEvent: virtual inherited (notify) base (dispapi) +NV_STATUS __nvoc_up_thunk_Notifier_dispswobjUnregisterEvent(struct DispSwObj *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_Notifier)), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +// dispswobjGetOrAllocNotifShare: virtual inherited (notify) base (dispapi) +NV_STATUS __nvoc_up_thunk_Notifier_dispswobjGetOrAllocNotifShare(struct DispSwObj *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_Notifier)), hNotifierClient, hNotifierResource, ppNotifShare); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__DispSwObj = +{ + /*numEntries=*/ 4, + /*pExportEntries=*/ __nvoc_exported_method_def_DispSwObj +}; + +void __nvoc_dtor_DisplayApi(DisplayApi*); +void __nvoc_dtor_DispSwObj(DispSwObj *pThis) { + __nvoc_dtor_DisplayApi(&pThis->__nvoc_base_DisplayApi); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispSwObj(DispSwObj *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_DisplayApi(DisplayApi* , RmHalspecOwner* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_DispSwObj(DispSwObj *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_DisplayApi(&pThis->__nvoc_base_DisplayApi, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispSwObj_fail_DisplayApi; + __nvoc_init_dataField_DispSwObj(pThis); + + status = __nvoc_dispswobjConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispSwObj_fail__init; + goto __nvoc_ctor_DispSwObj_exit; // Success + +__nvoc_ctor_DispSwObj_fail__init: + __nvoc_dtor_DisplayApi(&pThis->__nvoc_base_DisplayApi); +__nvoc_ctor_DispSwObj_fail_DisplayApi: +__nvoc_ctor_DispSwObj_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_DispSwObj_1(DispSwObj *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + // dispswobjCtrlCmdIsModePossible -- exported (id=0xc3720101) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__dispswobjCtrlCmdIsModePossible__ = &dispswobjCtrlCmdIsModePossible_IMPL; +#endif + + // dispswobjCtrlCmdIsModePossibleOrSettings -- exported (id=0xc3720102) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__dispswobjCtrlCmdIsModePossibleOrSettings__ = &dispswobjCtrlCmdIsModePossibleOrSettings_IMPL; +#endif + + // dispswobjCtrlCmdVideoAdaptiveRefreshRate -- exported (id=0xc3720103) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__dispswobjCtrlCmdVideoAdaptiveRefreshRate__ = &dispswobjCtrlCmdVideoAdaptiveRefreshRate_IMPL; +#endif + + // dispswobjCtrlCmdGetActiveViewportPointIn -- exported (id=0xc3720104) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x49u) + pThis->__dispswobjCtrlCmdGetActiveViewportPointIn__ = &dispswobjCtrlCmdGetActiveViewportPointIn_IMPL; +#endif +} // End __nvoc_init_funcTable_DispSwObj_1 with approximately 4 basic block(s). + + +// Initialize vtable(s) for 30 virtual method(s). +void __nvoc_init_funcTable_DispSwObj(DispSwObj *pThis) { + + // Initialize vtable(s) with 4 per-object function pointer(s). + __nvoc_init_funcTable_DispSwObj_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__DispSwObj(DispSwObj *pThis, RmHalspecOwner *pRmhalspecowner) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^4 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^3 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource; // (rmres) super^2 + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier; // (inotify) super^3 + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier; // (notify) super^2 + pThis->__nvoc_pbase_DisplayApi = &pThis->__nvoc_base_DisplayApi; // (dispapi) super + pThis->__nvoc_pbase_DispSwObj = pThis; // (dispswobj) this + + // Recurse to superclass initialization function(s). + __nvoc_init__DisplayApi(&pThis->__nvoc_base_DisplayApi, pRmhalspecowner); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__DispSwObj.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^4 + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__DispSwObj.metadata__DisplayApi.metadata__RmResource.metadata__RsResource; // (res) super^3 + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__DispSwObj.metadata__DisplayApi.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__DispSwObj.metadata__DisplayApi.metadata__RmResource; // (rmres) super^2 + pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier.__nvoc_metadata_ptr = &__nvoc_metadata__DispSwObj.metadata__DisplayApi.metadata__Notifier.metadata__INotifier; // (inotify) super^3 + pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr = &__nvoc_metadata__DispSwObj.metadata__DisplayApi.metadata__Notifier; // (notify) super^2 + pThis->__nvoc_base_DisplayApi.__nvoc_metadata_ptr = &__nvoc_metadata__DispSwObj.metadata__DisplayApi; // (dispapi) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__DispSwObj; // (dispswobj) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_DispSwObj(pThis); +} + +NV_STATUS __nvoc_objCreate_DispSwObj(DispSwObj **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + DispSwObj *pThis; + RmHalspecOwner *pRmhalspecowner; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(DispSwObj), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(DispSwObj)); + + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // pParent must be a valid object that derives from a halspec owner class. + NV_ASSERT_OR_RETURN(pParent != NULL, NV_ERR_INVALID_ARGUMENT); + + // Link the child into the parent unless flagged not to do so. + if (!(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init__DispSwObj(pThis, pRmhalspecowner); + status = __nvoc_ctor_DispSwObj(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DispSwObj_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_DispSwObj_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(DispSwObj)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispSwObj(DispSwObj **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DispSwObj(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x41f4f2 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayApi; + +// Forward declarations for DispCommon +void __nvoc_init__DisplayApi(DisplayApi*, RmHalspecOwner *pRmhalspecowner); +void __nvoc_init__DispCommon(DispCommon*, RmHalspecOwner *pRmhalspecowner); +void __nvoc_init_funcTable_DispCommon(DispCommon*); +NV_STATUS __nvoc_ctor_DispCommon(DispCommon*, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_DispCommon(DispCommon*); +void __nvoc_dtor_DispCommon(DispCommon*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__DispCommon; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__DispCommon; + +// Down-thunk(s) to bridge DispCommon methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^2 +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +PEVENTNOTIFICATION * __nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr(struct INotifier *pNotifier); // super^2 +struct NotifShare * __nvoc_down_thunk_Notifier_inotifyGetNotificationShare(struct INotifier *pNotifier); // super^2 +void __nvoc_down_thunk_Notifier_inotifySetNotificationShare(struct INotifier *pNotifier, struct NotifShare *pNotifShare); // super^2 +NV_STATUS __nvoc_down_thunk_Notifier_inotifyUnregisterEvent(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // super^2 +NV_STATUS __nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // super^2 +NV_STATUS __nvoc_down_thunk_DisplayApi_resControl(struct RsResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_DisplayApi_rmresControl_Prologue(struct RmResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); // super +void __nvoc_down_thunk_DisplayApi_rmresControl_Epilogue(struct RmResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); // super + +// Up-thunk(s) to bridge DispCommon methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^2 +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^2 +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^2 +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super^2 +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super^2 +NvBool __nvoc_up_thunk_RmResource_dispapiAccessCallback(struct DisplayApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NvBool __nvoc_up_thunk_RmResource_dispapiShareCallback(struct DisplayApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispapiGetMemInterMapParams(struct DisplayApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispapiCheckMemInterUnmap(struct DisplayApi *pRmResource, NvBool bSubdeviceHandleProvided); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispapiGetMemoryMappingDescriptor(struct DisplayApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // super +NV_STATUS __nvoc_up_thunk_RmResource_dispapiControlSerialization_Prologue(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_dispapiControlSerialization_Epilogue(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_dispapiCanCopy(struct DisplayApi *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispapiIsDuplicate(struct DisplayApi *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_dispapiPreDestruct(struct DisplayApi *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispapiControlFilter(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispapiMap(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispapiUnmap(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_up_thunk_RsResource_dispapiIsPartialUnmapSupported(struct DisplayApi *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispapiMapTo(struct DisplayApi *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_dispapiUnmapFrom(struct DisplayApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_dispapiGetRefCount(struct DisplayApi *pResource); // super +void __nvoc_up_thunk_RsResource_dispapiAddAdditionalDependants(struct RsClient *pClient, struct DisplayApi *pResource, RsResourceRef *pReference); // super +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_dispapiGetNotificationListPtr(struct DisplayApi *pNotifier); // super +struct NotifShare * __nvoc_up_thunk_Notifier_dispapiGetNotificationShare(struct DisplayApi *pNotifier); // super +void __nvoc_up_thunk_Notifier_dispapiSetNotificationShare(struct DisplayApi *pNotifier, struct NotifShare *pNotifShare); // super +NV_STATUS __nvoc_up_thunk_Notifier_dispapiUnregisterEvent(struct DisplayApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // super +NV_STATUS __nvoc_up_thunk_Notifier_dispapiGetOrAllocNotifShare(struct DisplayApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // super +NV_STATUS __nvoc_up_thunk_DisplayApi_dispcmnControl(struct DispCommon *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_DisplayApi_dispcmnControl_Prologue(struct DispCommon *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); // this +void __nvoc_up_thunk_DisplayApi_dispcmnControl_Epilogue(struct DispCommon *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); // this +NvBool __nvoc_up_thunk_RmResource_dispcmnAccessCallback(struct DispCommon *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NvBool __nvoc_up_thunk_RmResource_dispcmnShareCallback(struct DispCommon *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispcmnGetMemInterMapParams(struct DispCommon *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispcmnCheckMemInterUnmap(struct DispCommon *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispcmnGetMemoryMappingDescriptor(struct DispCommon *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispcmnControlSerialization_Prologue(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_dispcmnControlSerialization_Epilogue(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_dispcmnCanCopy(struct DispCommon *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispcmnIsDuplicate(struct DispCommon *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_dispcmnPreDestruct(struct DispCommon *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispcmnControlFilter(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispcmnMap(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispcmnUnmap(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_RsResource_dispcmnIsPartialUnmapSupported(struct DispCommon *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispcmnMapTo(struct DispCommon *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispcmnUnmapFrom(struct DispCommon *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_dispcmnGetRefCount(struct DispCommon *pResource); // this +void __nvoc_up_thunk_RsResource_dispcmnAddAdditionalDependants(struct RsClient *pClient, struct DispCommon *pResource, RsResourceRef *pReference); // this +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_dispcmnGetNotificationListPtr(struct DispCommon *pNotifier); // this +struct NotifShare * __nvoc_up_thunk_Notifier_dispcmnGetNotificationShare(struct DispCommon *pNotifier); // this +void __nvoc_up_thunk_Notifier_dispcmnSetNotificationShare(struct DispCommon *pNotifier, struct NotifShare *pNotifShare); // this +NV_STATUS __nvoc_up_thunk_Notifier_dispcmnUnregisterEvent(struct DispCommon *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // this +NV_STATUS __nvoc_up_thunk_Notifier_dispcmnGetOrAllocNotifShare(struct DispCommon *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispCommon = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispCommon), + /*classId=*/ classId(DispCommon), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispCommon", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispCommon, + /*pCastInfo=*/ &__nvoc_castinfo__DispCommon, + /*pExportInfo=*/ &__nvoc_export_info__DispCommon +}; + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_DispCommon[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetCapsV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730101u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetCapsV2" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4au) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetNumHeads_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4au) + /*flags=*/ 0x4au, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730102u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetNumHeads" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetScanline_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730104u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetScanline" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x82004au) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetSuppported_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x82004au) + /*flags=*/ 0x82004au, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730107u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetSuppported" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x848u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetConnectState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x848u) + /*flags=*/ 0x848u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730108u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetConnectState" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetHotplugConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730109u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetHotplugConfig" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetHeadRoutingMap_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73010bu, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetHeadRoutingMap" +#endif + }, + { /* [7] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetActive_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73010cu, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetActive" +#endif + }, + { /* [8] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemValidateSrm_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730118u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_VALIDATE_SRM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemValidateSrm" +#endif + }, + { /* [9] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetSrmStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730119u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_SRM_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetSrmStatus" +#endif + }, + { /* [10] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemHdcpRevocationCheck_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73011bu, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_HDCP_REVOCATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemHdcpRevocationCheck" +#endif + }, + { /* [11] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemUpdateSrm_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73011cu, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_UPDATE_SRM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemUpdateSrm" +#endif + }, + { /* [12] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetBootDisplays_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73011eu, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetBootDisplays" +#endif + }, + { /* [13] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetHotplugUnplugState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73012du, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetHotplugUnplugState" +#endif + }, + { /* [14] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemArmLightweightSupervisor_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73012fu, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemArmLightweightSupervisor" +#endif + }, + { /* [15] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemConfigVrrPstateSwitch_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730134u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemConfigVrrPstateSwitch" +#endif + }, + { /* [16] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemQueryDisplayIdsWithMux_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*flags=*/ 0x40u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73013du, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemQueryDisplayIdsWithMux" +#endif + }, + { /* [17] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemAllocateDisplayBandwidth_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730143u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemAllocateDisplayBandwidth" +#endif + }, + { /* [18] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetHotplugEventConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730144u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetHotplugEventConfig" +#endif + }, + { /* [19] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemSetHotplugEventConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730145u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemSetHotplugEventConfig" +#endif + }, + { /* [20] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemCheckSidebandI2cSupport_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*flags=*/ 0x40u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73014bu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemCheckSidebandI2cSupport" +#endif + }, + { /* [21] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemInternalAllocateDisplayBandwidth_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc4u) + /*flags=*/ 0xc4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730157u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_INTERNAL_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemInternalAllocateDisplayBandwidth" +#endif + }, + { /* [22] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetI2cPortid_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730211u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetI2cPortid" +#endif + }, + { /* [23] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x820046u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetType_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x820046u) + /*flags=*/ 0x820046u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730240u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetType" +#endif + }, + { /* [24] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificFakeDevice_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730243u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificFakeDevice" +#endif + }, + { /* [25] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetEdidV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730245u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetEdidV2" +#endif + }, + { /* [26] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetEdidV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730246u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetEdidV2" +#endif + }, + { /* [27] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetConnectorData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730250u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetConnectorData" +#endif + }, + { /* [28] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetHdcpRepeaterInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730260u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_HDCP_REPEATER_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetHdcpRepeaterInfo" +#endif + }, + { /* [29] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetHdmiEnable_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730273u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetHdmiEnable" +#endif + }, + { /* [30] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificCtrlHdmi_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730274u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificCtrlHdmi" +#endif + }, + { /* [31] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetHdmiAudioMutestream_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730275u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetHdmiAudioMutestream" +#endif + }, + { /* [32] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetHdcpState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730280u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_HDCP_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetHdcpState" +#endif + }, + { /* [33] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetHdcpDiagnostics_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730281u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_HDCP_DIAGNOSTICS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetHdcpDiagnostics" +#endif + }, + { /* [34] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificHdcpCtrl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730282u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_HDCP_CTRL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificHdcpCtrl" +#endif + }, + { /* [35] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetAllHeadMask_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730287u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetAllHeadMask" +#endif + }, + { /* [36] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetOdPacket_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730288u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetOdPacket" +#endif + }, + { /* [37] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetOdPacketCtrl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*flags=*/ 0x40u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730289u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetOdPacketCtrl" +#endif + }, + { /* [38] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetPclkLimit_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*flags=*/ 0x40u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73028au, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetPclkLimit" +#endif + }, + { /* [39] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x46u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificOrGetInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x46u) + /*flags=*/ 0x46u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73028bu, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificOrGetInfo" +#endif + }, + { /* [40] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificHdcpKsvListValidate_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73028du, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_HDCP_KSVLIST_VALIDATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificHdcpKsvListValidate" +#endif + }, + { /* [41] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificHdcpUpdate_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73028eu, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_HDCP_UPDATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificHdcpUpdate" +#endif + }, + { /* [42] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetHdmiSinkCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730293u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetHdmiSinkCaps" +#endif + }, + { /* [43] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetMonitorPower_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*flags=*/ 0x40u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730295u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetMonitorPower" +#endif + }, + { /* [44] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73029au, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig" +#endif + }, + { /* [45] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetRegionalCrcs_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302a0u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetRegionalCrcs" +#endif + }, + { /* [46] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificApplyEdidOverrideV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*flags=*/ 0x40u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302a1u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificApplyEdidOverrideV2" +#endif + }, + { /* [47] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetHdmiGpuCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*flags=*/ 0x40u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302a2u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetHdmiGpuCaps" +#endif + }, + { /* [48] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificDisplayChange_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*flags=*/ 0x40u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302a4u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificDisplayChange" +#endif + }, + { /* [49] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetHdmiScdcData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302a6u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetHdmiScdcData" +#endif + }, + { /* [50] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificIsDirectmodeDisplay_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*flags=*/ 0x40u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302a7u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificIsDirectmodeDisplay" +#endif + }, + { /* [51] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302a8u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation" +#endif + }, + { /* [52] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetSharedGenericPacket_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302a9u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetSharedGenericPacket" +#endif + }, + { /* [53] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificAcquireSharedGenericPacket_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302aau, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificAcquireSharedGenericPacket" +#endif + }, + { /* [54] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificReleaseSharedGenericPacket_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302abu, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificReleaseSharedGenericPacket" +#endif + }, + { /* [55] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificDispI2cReadWrite_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302acu, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_DISP_I2C_READ_WRITE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificDispI2cReadWrite" +#endif + }, + { /* [56] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetValidHeadWindowAssignment_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302adu, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_VALID_HEAD_WINDOW_ASSIGNMENT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetValidHeadWindowAssignment" +#endif + }, + { /* [57] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificDefaultAdaptivesyncDisplay_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*flags=*/ 0x40u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302aeu, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_DEFAULT_ADAPTIVESYNC_DISPLAY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificDefaultAdaptivesyncDisplay" +#endif + }, + { /* [58] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdInternalGetHotplugUnplugState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730401u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdInternalGetHotplugUnplugState" +#endif + }, + { /* [59] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdFrlConfigMacroPad_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730502u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_FRL_CONFIG_MACRO_PAD_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdFrlConfigMacroPad" +#endif + }, + { /* [60] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4au) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpGetInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4au) + /*flags=*/ 0x4au, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731140u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_GET_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpGetInfo" +#endif + }, + { /* [61] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpGetDisplayportDongleInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731142u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpGetDisplayportDongleInfo" +#endif + }, + { /* [62] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpSetEldAudioCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731144u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpSetEldAudioCaps" +#endif + }, + { /* [63] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpUpdateDynamicDfpCache_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73114eu, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpUpdateDynamicDfpCache" +#endif + }, + { /* [64] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpSetAudioEnable_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*flags=*/ 0x40u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731150u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpSetAudioEnable" +#endif + }, + { /* [65] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpAssignSor_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731152u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpAssignSor" +#endif + }, + { /* [66] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpGetPadlinkMask_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731153u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpGetPadlinkMask" +#endif + }, + { /* [67] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpConfigTwoHeadOneOr_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731156u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpConfigTwoHeadOneOr" +#endif + }, + { /* [68] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpDscCrcControl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731157u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpDscCrcControl" +#endif + }, + { /* [69] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpInitMuxData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + /*flags=*/ 0x40u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731158u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpInitMuxData" +#endif + }, + { /* [70] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpGetDsiModeTiming_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731166u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpGetDsiModeTiming" +#endif + }, + { /* [71] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x46u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpGetFixedModeTiming_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x46u) + /*flags=*/ 0x46u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731172u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_GET_FIXED_MODE_TIMING_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpGetFixedModeTiming" +#endif + }, + { /* [72] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4au) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpEdpDriverUnload_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4au) + /*flags=*/ 0x4au, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731176u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_EDP_DRIVER_UNLOAD_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpEdpDriverUnload" +#endif + }, + { /* [73] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemSetRegionRamRectangles_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731177u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_SYSTEM_SET_REGION_RAM_RECTANGLES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemSetRegionRamRectangles" +#endif + }, + { /* [74] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemConfigureSafetyInterrupts_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731178u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_SYSTEM_CONFIGURE_SAFETY_INTERRUPTS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemConfigureSafetyInterrupts" +#endif + }, + { /* [75] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpSetForceBlackPixels_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731179u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_SET_FORCE_BLACK_PIXELS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpSetForceBlackPixels" +#endif + }, + { /* [76] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x844u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpAuxchCtrl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x844u) + /*flags=*/ 0x844u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731341u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_AUXCH_CTRL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpAuxchCtrl" +#endif + }, + { /* [77] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x844u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpCtrl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x844u) + /*flags=*/ 0x844u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731343u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_CTRL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpCtrl" +#endif + }, + { /* [78] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetLaneData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731345u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_LANE_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetLaneData" +#endif + }, + { /* [79] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetLaneData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731346u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_LANE_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetLaneData" +#endif + }, + { /* [80] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetTestpattern_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731347u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetTestpattern" +#endif + }, + { /* [81] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetTestpattern_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731348u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_GET_TESTPATTERN_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetTestpattern" +#endif + }, + { /* [82] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731351u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data" +#endif + }, + { /* [83] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731352u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data" +#endif + }, + { /* [84] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpMainLinkCtrl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731356u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpMainLinkCtrl" +#endif + }, + { /* [85] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetAudioMuteStream_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731359u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetAudioMuteStream" +#endif + }, + { /* [86] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpTopologyAllocateDisplayId_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73135bu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpTopologyAllocateDisplayId" +#endif + }, + { /* [87] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpTopologyFreeDisplayId_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73135cu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpTopologyFreeDisplayId" +#endif + }, + { /* [88] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetLinkConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731360u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetLinkConfig" +#endif + }, + { /* [89] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetEDPData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731361u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_GET_EDP_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetEDPData" +#endif + }, + { /* [90] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpConfigStream_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731362u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpConfigStream" +#endif + }, + { /* [91] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetRateGov_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731363u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetRateGov" +#endif + }, + { /* [92] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetManualDisplayPort_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731365u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetManualDisplayPort" +#endif + }, + { /* [93] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetEcf_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731366u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_SET_ECF_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetEcf" +#endif + }, + { /* [94] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSendACT_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731367u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSendACT" +#endif + }, + { /* [95] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x820046u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x820046u) + /*flags=*/ 0x820046u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731369u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetCaps" +#endif + }, + { /* [96] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGenerateFakeInterrupt_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73136bu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGenerateFakeInterrupt" +#endif + }, + { /* [97] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpConfigRadScratchReg_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73136cu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpConfigRadScratchReg" +#endif + }, + { /* [98] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpConfigSingleHeadMultiStream_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73136eu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpConfigSingleHeadMultiStream" +#endif + }, + { /* [99] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetTriggerSelect_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73136fu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetTriggerSelect" +#endif + }, + { /* [100] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetTriggerAll_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731370u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetTriggerAll" +#endif + }, + { /* [101] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetAuxLogData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731373u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetAuxLogData" +#endif + }, + { /* [102] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpConfigIndexedLinkRates_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731377u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpConfigIndexedLinkRates" +#endif + }, + { /* [103] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetStereoMSAProperties_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731378u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetStereoMSAProperties" +#endif + }, + { /* [104] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpConfigureFec_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73137au, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpConfigureFec" +#endif + }, + { /* [105] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpConfigMacroPad_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73137bu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpConfigMacroPad" +#endif + }, + { /* [106] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetGenericInfoframe_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73137eu, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetGenericInfoframe" +#endif + }, + { /* [107] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetMsaAttributes_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73137fu, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetMsaAttributes" +#endif + }, + { /* [108] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetMSAPropertiesv2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731381u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetMSAPropertiesv2" +#endif + }, + { /* [109] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetLevelInfoTableData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731387u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_SET_LEVEL_INFO_TABLE_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetLevelInfoTableData" +#endif + }, + { /* [110] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetLevelInfoTableData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731388u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_GET_LEVEL_INFO_TABLE_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetLevelInfoTableData" +#endif + }, + { /* [111] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDPGetCableIDInfoFromMacro_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73138du, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_USBC_CABLEID_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDPGetCableIDInfoFromMacro" +#endif + }, + +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__DispCommon __nvoc_metadata__DispCommon = { + .rtti.pClassDef = &__nvoc_class_def_DispCommon, // (dispcmn) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispCommon, + .rtti.offset = 0, + .metadata__DisplayApi.rtti.pClassDef = &__nvoc_class_def_DisplayApi, // (dispapi) super + .metadata__DisplayApi.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DisplayApi.rtti.offset = NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi), + .metadata__DisplayApi.metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super^2 + .metadata__DisplayApi.metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DisplayApi.metadata__RmResource.rtti.offset = NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource), + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^3 + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^4 + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__DisplayApi.metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^3 + .metadata__DisplayApi.metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DisplayApi.metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + .metadata__DisplayApi.metadata__Notifier.rtti.pClassDef = &__nvoc_class_def_Notifier, // (notify) super^2 + .metadata__DisplayApi.metadata__Notifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DisplayApi.metadata__Notifier.rtti.offset = NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_Notifier), + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.rtti.pClassDef = &__nvoc_class_def_INotifier, // (inotify) super^3 + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.rtti.offset = NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier), + + .vtable.__dispcmnControl__ = &__nvoc_up_thunk_DisplayApi_dispcmnControl, // virtual inherited (dispapi) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiControl__ = &dispapiControl_IMPL, // virtual override (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_DisplayApi_resControl, // virtual + .vtable.__dispcmnControl_Prologue__ = &__nvoc_up_thunk_DisplayApi_dispcmnControl_Prologue, // virtual inherited (dispapi) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiControl_Prologue__ = &dispapiControl_Prologue_IMPL, // virtual override (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresControl_Prologue__ = &__nvoc_down_thunk_DisplayApi_rmresControl_Prologue, // virtual override (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__dispcmnControl_Epilogue__ = &__nvoc_up_thunk_DisplayApi_dispcmnControl_Epilogue, // virtual inherited (dispapi) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiControl_Epilogue__ = &dispapiControl_Epilogue_IMPL, // virtual override (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresControl_Epilogue__ = &__nvoc_down_thunk_DisplayApi_rmresControl_Epilogue, // virtual override (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__dispcmnAccessCallback__ = &__nvoc_up_thunk_RmResource_dispcmnAccessCallback, // virtual inherited (rmres) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiAccessCallback__ = &__nvoc_up_thunk_RmResource_dispapiAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__dispcmnShareCallback__ = &__nvoc_up_thunk_RmResource_dispcmnShareCallback, // virtual inherited (rmres) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiShareCallback__ = &__nvoc_up_thunk_RmResource_dispapiShareCallback, // virtual inherited (rmres) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresShareCallback__ = &rmresShareCallback_IMPL, // virtual override (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__dispcmnGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_dispcmnGetMemInterMapParams, // virtual inherited (rmres) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_dispapiGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__dispcmnCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_dispcmnCheckMemInterUnmap, // virtual inherited (rmres) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_dispapiCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__dispcmnGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_dispcmnGetMemoryMappingDescriptor, // virtual inherited (rmres) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_dispapiGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__dispcmnControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_dispcmnControlSerialization_Prologue, // virtual inherited (rmres) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_dispapiControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__dispcmnControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_dispcmnControlSerialization_Epilogue, // virtual inherited (rmres) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_dispapiControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__dispcmnCanCopy__ = &__nvoc_up_thunk_RsResource_dispcmnCanCopy, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiCanCopy__ = &__nvoc_up_thunk_RsResource_dispapiCanCopy, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__dispcmnIsDuplicate__ = &__nvoc_up_thunk_RsResource_dispcmnIsDuplicate, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiIsDuplicate__ = &__nvoc_up_thunk_RsResource_dispapiIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__dispcmnPreDestruct__ = &__nvoc_up_thunk_RsResource_dispcmnPreDestruct, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiPreDestruct__ = &__nvoc_up_thunk_RsResource_dispapiPreDestruct, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__dispcmnControlFilter__ = &__nvoc_up_thunk_RsResource_dispcmnControlFilter, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiControlFilter__ = &__nvoc_up_thunk_RsResource_dispapiControlFilter, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__dispcmnMap__ = &__nvoc_up_thunk_RsResource_dispcmnMap, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiMap__ = &__nvoc_up_thunk_RsResource_dispapiMap, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &resMap_IMPL, // virtual + .vtable.__dispcmnUnmap__ = &__nvoc_up_thunk_RsResource_dispcmnUnmap, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiUnmap__ = &__nvoc_up_thunk_RsResource_dispapiUnmap, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &resUnmap_IMPL, // virtual + .vtable.__dispcmnIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_dispcmnIsPartialUnmapSupported, // inline virtual inherited (res) base (dispapi) body + .metadata__DisplayApi.vtable.__dispapiIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_dispapiIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__dispcmnMapTo__ = &__nvoc_up_thunk_RsResource_dispcmnMapTo, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiMapTo__ = &__nvoc_up_thunk_RsResource_dispapiMapTo, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__dispcmnUnmapFrom__ = &__nvoc_up_thunk_RsResource_dispcmnUnmapFrom, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiUnmapFrom__ = &__nvoc_up_thunk_RsResource_dispapiUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__dispcmnGetRefCount__ = &__nvoc_up_thunk_RsResource_dispcmnGetRefCount, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiGetRefCount__ = &__nvoc_up_thunk_RsResource_dispapiGetRefCount, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__dispcmnAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_dispcmnAddAdditionalDependants, // virtual inherited (res) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_dispapiAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__DisplayApi.metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__DisplayApi.metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual + .vtable.__dispcmnGetNotificationListPtr__ = &__nvoc_up_thunk_Notifier_dispcmnGetNotificationListPtr, // virtual inherited (notify) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiGetNotificationListPtr__ = &__nvoc_up_thunk_Notifier_dispapiGetNotificationListPtr, // virtual inherited (notify) base (notify) + .metadata__DisplayApi.metadata__Notifier.vtable.__notifyGetNotificationListPtr__ = ¬ifyGetNotificationListPtr_IMPL, // virtual override (inotify) base (inotify) + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationListPtr__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr, // pure virtual + .vtable.__dispcmnGetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispcmnGetNotificationShare, // virtual inherited (notify) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiGetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispapiGetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__DisplayApi.metadata__Notifier.vtable.__notifyGetNotificationShare__ = ¬ifyGetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationShare, // pure virtual + .vtable.__dispcmnSetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispcmnSetNotificationShare, // virtual inherited (notify) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiSetNotificationShare__ = &__nvoc_up_thunk_Notifier_dispapiSetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__DisplayApi.metadata__Notifier.vtable.__notifySetNotificationShare__ = ¬ifySetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.vtable.__inotifySetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifySetNotificationShare, // pure virtual + .vtable.__dispcmnUnregisterEvent__ = &__nvoc_up_thunk_Notifier_dispcmnUnregisterEvent, // virtual inherited (notify) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiUnregisterEvent__ = &__nvoc_up_thunk_Notifier_dispapiUnregisterEvent, // virtual inherited (notify) base (notify) + .metadata__DisplayApi.metadata__Notifier.vtable.__notifyUnregisterEvent__ = ¬ifyUnregisterEvent_IMPL, // virtual override (inotify) base (inotify) + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.vtable.__inotifyUnregisterEvent__ = &__nvoc_down_thunk_Notifier_inotifyUnregisterEvent, // pure virtual + .vtable.__dispcmnGetOrAllocNotifShare__ = &__nvoc_up_thunk_Notifier_dispcmnGetOrAllocNotifShare, // virtual inherited (notify) base (dispapi) + .metadata__DisplayApi.vtable.__dispapiGetOrAllocNotifShare__ = &__nvoc_up_thunk_Notifier_dispapiGetOrAllocNotifShare, // virtual inherited (notify) base (notify) + .metadata__DisplayApi.metadata__Notifier.vtable.__notifyGetOrAllocNotifShare__ = ¬ifyGetOrAllocNotifShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__DisplayApi.metadata__Notifier.metadata__INotifier.vtable.__inotifyGetOrAllocNotifShare__ = &__nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare, // pure virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__DispCommon = { + .numRelatives = 8, + .relatives = { + &__nvoc_metadata__DispCommon.rtti, // [0]: (dispcmn) this + &__nvoc_metadata__DispCommon.metadata__DisplayApi.rtti, // [1]: (dispapi) super + &__nvoc_metadata__DispCommon.metadata__DisplayApi.metadata__RmResource.rtti, // [2]: (rmres) super^2 + &__nvoc_metadata__DispCommon.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.rtti, // [3]: (res) super^3 + &__nvoc_metadata__DispCommon.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [4]: (obj) super^4 + &__nvoc_metadata__DispCommon.metadata__DisplayApi.metadata__RmResource.metadata__RmResourceCommon.rtti, // [5]: (rmrescmn) super^3 + &__nvoc_metadata__DispCommon.metadata__DisplayApi.metadata__Notifier.rtti, // [6]: (notify) super^2 + &__nvoc_metadata__DispCommon.metadata__DisplayApi.metadata__Notifier.metadata__INotifier.rtti, // [7]: (inotify) super^3 + } +}; + +// 26 up-thunk(s) defined to bridge methods in DispCommon to superclasses + +// dispcmnControl: virtual inherited (dispapi) base (dispapi) +NV_STATUS __nvoc_up_thunk_DisplayApi_dispcmnControl(struct DispCommon *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return dispapiControl((struct DisplayApi *)(((unsigned char *) pDisplayApi) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi)), pCallContext, pParams); +} + +// dispcmnControl_Prologue: virtual inherited (dispapi) base (dispapi) +NV_STATUS __nvoc_up_thunk_DisplayApi_dispcmnControl_Prologue(struct DispCommon *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return dispapiControl_Prologue((struct DisplayApi *)(((unsigned char *) pDisplayApi) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi)), pCallContext, pRsParams); +} + +// dispcmnControl_Epilogue: virtual inherited (dispapi) base (dispapi) +void __nvoc_up_thunk_DisplayApi_dispcmnControl_Epilogue(struct DispCommon *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + dispapiControl_Epilogue((struct DisplayApi *)(((unsigned char *) pDisplayApi) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi)), pCallContext, pRsParams); +} + +// dispcmnAccessCallback: virtual inherited (rmres) base (dispapi) +NvBool __nvoc_up_thunk_RmResource_dispcmnAccessCallback(struct DispCommon *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// dispcmnShareCallback: virtual inherited (rmres) base (dispapi) +NvBool __nvoc_up_thunk_RmResource_dispcmnShareCallback(struct DispCommon *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// dispcmnGetMemInterMapParams: virtual inherited (rmres) base (dispapi) +NV_STATUS __nvoc_up_thunk_RmResource_dispcmnGetMemInterMapParams(struct DispCommon *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource)), pParams); +} + +// dispcmnCheckMemInterUnmap: virtual inherited (rmres) base (dispapi) +NV_STATUS __nvoc_up_thunk_RmResource_dispcmnCheckMemInterUnmap(struct DispCommon *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// dispcmnGetMemoryMappingDescriptor: virtual inherited (rmres) base (dispapi) +NV_STATUS __nvoc_up_thunk_RmResource_dispcmnGetMemoryMappingDescriptor(struct DispCommon *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource)), ppMemDesc); +} + +// dispcmnControlSerialization_Prologue: virtual inherited (rmres) base (dispapi) +NV_STATUS __nvoc_up_thunk_RmResource_dispcmnControlSerialization_Prologue(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispcmnControlSerialization_Epilogue: virtual inherited (rmres) base (dispapi) +void __nvoc_up_thunk_RmResource_dispcmnControlSerialization_Epilogue(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispcmnCanCopy: virtual inherited (res) base (dispapi) +NvBool __nvoc_up_thunk_RsResource_dispcmnCanCopy(struct DispCommon *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispcmnIsDuplicate: virtual inherited (res) base (dispapi) +NV_STATUS __nvoc_up_thunk_RsResource_dispcmnIsDuplicate(struct DispCommon *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// dispcmnPreDestruct: virtual inherited (res) base (dispapi) +void __nvoc_up_thunk_RsResource_dispcmnPreDestruct(struct DispCommon *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispcmnControlFilter: virtual inherited (res) base (dispapi) +NV_STATUS __nvoc_up_thunk_RsResource_dispcmnControlFilter(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// dispcmnMap: virtual inherited (res) base (dispapi) +NV_STATUS __nvoc_up_thunk_RsResource_dispcmnMap(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams, pCpuMapping); +} + +// dispcmnUnmap: virtual inherited (res) base (dispapi) +NV_STATUS __nvoc_up_thunk_RsResource_dispcmnUnmap(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pCpuMapping); +} + +// dispcmnIsPartialUnmapSupported: inline virtual inherited (res) base (dispapi) body +NvBool __nvoc_up_thunk_RsResource_dispcmnIsPartialUnmapSupported(struct DispCommon *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispcmnMapTo: virtual inherited (res) base (dispapi) +NV_STATUS __nvoc_up_thunk_RsResource_dispcmnMapTo(struct DispCommon *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// dispcmnUnmapFrom: virtual inherited (res) base (dispapi) +NV_STATUS __nvoc_up_thunk_RsResource_dispcmnUnmapFrom(struct DispCommon *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// dispcmnGetRefCount: virtual inherited (res) base (dispapi) +NvU32 __nvoc_up_thunk_RsResource_dispcmnGetRefCount(struct DispCommon *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispcmnAddAdditionalDependants: virtual inherited (res) base (dispapi) +void __nvoc_up_thunk_RsResource_dispcmnAddAdditionalDependants(struct RsClient *pClient, struct DispCommon *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + +// dispcmnGetNotificationListPtr: virtual inherited (notify) base (dispapi) +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_dispcmnGetNotificationListPtr(struct DispCommon *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_Notifier))); +} + +// dispcmnGetNotificationShare: virtual inherited (notify) base (dispapi) +struct NotifShare * __nvoc_up_thunk_Notifier_dispcmnGetNotificationShare(struct DispCommon *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_Notifier))); +} + +// dispcmnSetNotificationShare: virtual inherited (notify) base (dispapi) +void __nvoc_up_thunk_Notifier_dispcmnSetNotificationShare(struct DispCommon *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_Notifier)), pNotifShare); +} + +// dispcmnUnregisterEvent: virtual inherited (notify) base (dispapi) +NV_STATUS __nvoc_up_thunk_Notifier_dispcmnUnregisterEvent(struct DispCommon *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_Notifier)), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +// dispcmnGetOrAllocNotifShare: virtual inherited (notify) base (dispapi) +NV_STATUS __nvoc_up_thunk_Notifier_dispcmnGetOrAllocNotifShare(struct DispCommon *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_Notifier)), hNotifierClient, hNotifierResource, ppNotifShare); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__DispCommon = +{ + /*numEntries=*/ 112, + /*pExportEntries=*/ __nvoc_exported_method_def_DispCommon +}; + +void __nvoc_dtor_DisplayApi(DisplayApi*); +void __nvoc_dtor_DispCommon(DispCommon *pThis) { + __nvoc_dtor_DisplayApi(&pThis->__nvoc_base_DisplayApi); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispCommon(DispCommon *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_DisplayApi(DisplayApi* , RmHalspecOwner* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_DispCommon(DispCommon *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_DisplayApi(&pThis->__nvoc_base_DisplayApi, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispCommon_fail_DisplayApi; + __nvoc_init_dataField_DispCommon(pThis); + + status = __nvoc_dispcmnConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispCommon_fail__init; + goto __nvoc_ctor_DispCommon_exit; // Success + +__nvoc_ctor_DispCommon_fail__init: + __nvoc_dtor_DisplayApi(&pThis->__nvoc_base_DisplayApi); +__nvoc_ctor_DispCommon_fail_DisplayApi: +__nvoc_ctor_DispCommon_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_DispCommon_1(DispCommon *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + // dispcmnCtrlCmdSpecificGetHdcpState -- exported (id=0x730280) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSpecificGetHdcpState__ = &dispcmnCtrlCmdSpecificGetHdcpState_IMPL; +#endif + + // dispcmnCtrlCmdSpecificHdcpCtrl -- exported (id=0x730282) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSpecificHdcpCtrl__ = &dispcmnCtrlCmdSpecificHdcpCtrl_IMPL; +#endif + + // dispcmnCtrlCmdSpecificGetHdcpRepeaterInfo -- exported (id=0x730260) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSpecificGetHdcpRepeaterInfo__ = &dispcmnCtrlCmdSpecificGetHdcpRepeaterInfo_IMPL; +#endif + + // dispcmnCtrlCmdSpecificGetHdcpDiagnostics -- exported (id=0x730281) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSpecificGetHdcpDiagnostics__ = &dispcmnCtrlCmdSpecificGetHdcpDiagnostics_IMPL; +#endif + + // dispcmnCtrlCmdSpecificHdcpKsvListValidate -- exported (id=0x73028d) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSpecificHdcpKsvListValidate__ = &dispcmnCtrlCmdSpecificHdcpKsvListValidate_IMPL; +#endif + + // dispcmnCtrlCmdSpecificHdcpUpdate -- exported (id=0x73028e) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSpecificHdcpUpdate__ = &dispcmnCtrlCmdSpecificHdcpUpdate_IMPL; +#endif + + // dispcmnCtrlCmdSystemValidateSrm -- exported (id=0x730118) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSystemValidateSrm__ = &dispcmnCtrlCmdSystemValidateSrm_IMPL; +#endif + + // dispcmnCtrlCmdSystemGetSrmStatus -- exported (id=0x730119) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSystemGetSrmStatus__ = &dispcmnCtrlCmdSystemGetSrmStatus_IMPL; +#endif + + // dispcmnCtrlCmdSystemHdcpRevocationCheck -- exported (id=0x73011b) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSystemHdcpRevocationCheck__ = &dispcmnCtrlCmdSystemHdcpRevocationCheck_IMPL; +#endif + + // dispcmnCtrlCmdSystemUpdateSrm -- exported (id=0x73011c) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSystemUpdateSrm__ = &dispcmnCtrlCmdSystemUpdateSrm_IMPL; +#endif + + // dispcmnCtrlCmdSystemGetCapsV2 -- exported (id=0x730101) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__dispcmnCtrlCmdSystemGetCapsV2__ = &dispcmnCtrlCmdSystemGetCapsV2_IMPL; +#endif + + // dispcmnCtrlCmdSystemGetNumHeads -- exported (id=0x730102) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4au) + pThis->__dispcmnCtrlCmdSystemGetNumHeads__ = &dispcmnCtrlCmdSystemGetNumHeads_IMPL; +#endif + + // dispcmnCtrlCmdSystemGetScanline -- exported (id=0x730104) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__dispcmnCtrlCmdSystemGetScanline__ = &dispcmnCtrlCmdSystemGetScanline_IMPL; +#endif + + // dispcmnCtrlCmdSystemGetSuppported -- exported (id=0x730107) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x82004au) + pThis->__dispcmnCtrlCmdSystemGetSuppported__ = &dispcmnCtrlCmdSystemGetSuppported_IMPL; +#endif + + // dispcmnCtrlCmdSystemGetConnectState -- exported (id=0x730108) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x848u) + pThis->__dispcmnCtrlCmdSystemGetConnectState__ = &dispcmnCtrlCmdSystemGetConnectState_IMPL; +#endif + + // dispcmnCtrlCmdSystemGetHotplugUnplugState -- exported (id=0x73012d) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__dispcmnCtrlCmdSystemGetHotplugUnplugState__ = &dispcmnCtrlCmdSystemGetHotplugUnplugState_IMPL; +#endif + + // dispcmnCtrlCmdInternalGetHotplugUnplugState -- exported (id=0x730401) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__dispcmnCtrlCmdInternalGetHotplugUnplugState__ = &dispcmnCtrlCmdInternalGetHotplugUnplugState_IMPL; +#endif + + // dispcmnCtrlCmdSystemGetHeadRoutingMap -- exported (id=0x73010b) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__dispcmnCtrlCmdSystemGetHeadRoutingMap__ = &dispcmnCtrlCmdSystemGetHeadRoutingMap_IMPL; +#endif + + // dispcmnCtrlCmdSystemGetActive -- exported (id=0x73010c) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__dispcmnCtrlCmdSystemGetActive__ = &dispcmnCtrlCmdSystemGetActive_IMPL; +#endif + + // dispcmnCtrlCmdSystemGetBootDisplays -- exported (id=0x73011e) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSystemGetBootDisplays__ = &dispcmnCtrlCmdSystemGetBootDisplays_IMPL; +#endif + + // dispcmnCtrlCmdSystemQueryDisplayIdsWithMux -- exported (id=0x73013d) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + pThis->__dispcmnCtrlCmdSystemQueryDisplayIdsWithMux__ = &dispcmnCtrlCmdSystemQueryDisplayIdsWithMux_IMPL; +#endif + + // dispcmnCtrlCmdSystemCheckSidebandI2cSupport -- exported (id=0x73014b) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + pThis->__dispcmnCtrlCmdSystemCheckSidebandI2cSupport__ = &dispcmnCtrlCmdSystemCheckSidebandI2cSupport_IMPL; +#endif + + // dispcmnCtrlCmdSystemAllocateDisplayBandwidth -- exported (id=0x730143) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__dispcmnCtrlCmdSystemAllocateDisplayBandwidth__ = &dispcmnCtrlCmdSystemAllocateDisplayBandwidth_IMPL; +#endif + + // dispcmnCtrlCmdSystemInternalAllocateDisplayBandwidth -- exported (id=0x730157) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc4u) + pThis->__dispcmnCtrlCmdSystemInternalAllocateDisplayBandwidth__ = &dispcmnCtrlCmdSystemInternalAllocateDisplayBandwidth_IMPL; +#endif + + // dispcmnCtrlCmdSystemGetHotplugConfig -- exported (id=0x730109) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSystemGetHotplugConfig__ = &dispcmnCtrlCmdSystemGetHotplugConfig_IMPL; +#endif + + // dispcmnCtrlCmdSystemGetHotplugEventConfig -- exported (id=0x730144) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSystemGetHotplugEventConfig__ = &dispcmnCtrlCmdSystemGetHotplugEventConfig_IMPL; +#endif + + // dispcmnCtrlCmdSystemSetHotplugEventConfig -- exported (id=0x730145) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSystemSetHotplugEventConfig__ = &dispcmnCtrlCmdSystemSetHotplugEventConfig_IMPL; +#endif + + // dispcmnCtrlCmdSystemArmLightweightSupervisor -- exported (id=0x73012f) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSystemArmLightweightSupervisor__ = &dispcmnCtrlCmdSystemArmLightweightSupervisor_IMPL; +#endif + + // dispcmnCtrlCmdSystemSetRegionRamRectangles -- exported (id=0x731177) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSystemSetRegionRamRectangles__ = &dispcmnCtrlCmdSystemSetRegionRamRectangles_IMPL; +#endif + + // dispcmnCtrlCmdSystemConfigureSafetyInterrupts -- exported (id=0x731178) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSystemConfigureSafetyInterrupts__ = &dispcmnCtrlCmdSystemConfigureSafetyInterrupts_IMPL; +#endif + + // dispcmnCtrlCmdSystemConfigVrrPstateSwitch -- exported (id=0x730134) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSystemConfigVrrPstateSwitch__ = &dispcmnCtrlCmdSystemConfigVrrPstateSwitch_IMPL; +#endif + + // dispcmnCtrlCmdSpecificGetType -- exported (id=0x730240) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x820046u) + pThis->__dispcmnCtrlCmdSpecificGetType__ = &dispcmnCtrlCmdSpecificGetType_IMPL; +#endif + + // dispcmnCtrlCmdSpecificGetEdidV2 -- exported (id=0x730245) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSpecificGetEdidV2__ = &dispcmnCtrlCmdSpecificGetEdidV2_IMPL; +#endif + + // dispcmnCtrlCmdSpecificSetEdidV2 -- exported (id=0x730246) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSpecificSetEdidV2__ = &dispcmnCtrlCmdSpecificSetEdidV2_IMPL; +#endif + + // dispcmnCtrlCmdSpecificFakeDevice -- exported (id=0x730243) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSpecificFakeDevice__ = &dispcmnCtrlCmdSpecificFakeDevice_IMPL; +#endif + + // dispcmnCtrlCmdSpecificGetConnectorData -- exported (id=0x730250) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__dispcmnCtrlCmdSpecificGetConnectorData__ = &dispcmnCtrlCmdSpecificGetConnectorData_IMPL; +#endif + + // dispcmnCtrlCmdSpecificSetHdmiEnable -- exported (id=0x730273) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSpecificSetHdmiEnable__ = &dispcmnCtrlCmdSpecificSetHdmiEnable_IMPL; +#endif + + // dispcmnCtrlCmdSpecificCtrlHdmi -- exported (id=0x730274) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSpecificCtrlHdmi__ = &dispcmnCtrlCmdSpecificCtrlHdmi_IMPL; +#endif + + // dispcmnCtrlCmdSpecificGetAllHeadMask -- exported (id=0x730287) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSpecificGetAllHeadMask__ = &dispcmnCtrlCmdSpecificGetAllHeadMask_IMPL; +#endif + + // dispcmnCtrlCmdSpecificSetOdPacket -- exported (id=0x730288) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSpecificSetOdPacket__ = &dispcmnCtrlCmdSpecificSetOdPacket_IMPL; +#endif + + // dispcmnCtrlCmdSpecificAcquireSharedGenericPacket -- exported (id=0x7302aa) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSpecificAcquireSharedGenericPacket__ = &dispcmnCtrlCmdSpecificAcquireSharedGenericPacket_IMPL; +#endif + + // dispcmnCtrlCmdSpecificSetSharedGenericPacket -- exported (id=0x7302a9) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSpecificSetSharedGenericPacket__ = &dispcmnCtrlCmdSpecificSetSharedGenericPacket_IMPL; +#endif + + // dispcmnCtrlCmdSpecificReleaseSharedGenericPacket -- exported (id=0x7302ab) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSpecificReleaseSharedGenericPacket__ = &dispcmnCtrlCmdSpecificReleaseSharedGenericPacket_IMPL; +#endif + + // dispcmnCtrlCmdSpecificSetOdPacketCtrl -- exported (id=0x730289) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + pThis->__dispcmnCtrlCmdSpecificSetOdPacketCtrl__ = &dispcmnCtrlCmdSpecificSetOdPacketCtrl_IMPL; +#endif + + // dispcmnCtrlCmdSpecificOrGetInfo -- exported (id=0x73028b) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x46u) + pThis->__dispcmnCtrlCmdSpecificOrGetInfo__ = &dispcmnCtrlCmdSpecificOrGetInfo_IMPL; +#endif + + // dispcmnCtrlCmdSpecificGetPclkLimit -- exported (id=0x73028a) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + pThis->__dispcmnCtrlCmdSpecificGetPclkLimit__ = &dispcmnCtrlCmdSpecificGetPclkLimit_IMPL; +#endif + + // dispcmnCtrlCmdSpecificSetHdmiSinkCaps -- exported (id=0x730293) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSpecificSetHdmiSinkCaps__ = &dispcmnCtrlCmdSpecificSetHdmiSinkCaps_IMPL; +#endif + + // dispcmnCtrlCmdSpecificSetMonitorPower -- exported (id=0x730295) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + pThis->__dispcmnCtrlCmdSpecificSetMonitorPower__ = &dispcmnCtrlCmdSpecificSetMonitorPower_IMPL; +#endif + + // dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig -- exported (id=0x73029a) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig__ = &dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig_IMPL; +#endif + + // dispcmnCtrlCmdSpecificApplyEdidOverrideV2 -- exported (id=0x7302a1) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + pThis->__dispcmnCtrlCmdSpecificApplyEdidOverrideV2__ = &dispcmnCtrlCmdSpecificApplyEdidOverrideV2_IMPL; +#endif + + // dispcmnCtrlCmdSpecificGetI2cPortid -- exported (id=0x730211) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__dispcmnCtrlCmdSpecificGetI2cPortid__ = &dispcmnCtrlCmdSpecificGetI2cPortid_IMPL; +#endif + + // dispcmnCtrlCmdSpecificGetHdmiGpuCaps -- exported (id=0x7302a2) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + pThis->__dispcmnCtrlCmdSpecificGetHdmiGpuCaps__ = &dispcmnCtrlCmdSpecificGetHdmiGpuCaps_IMPL; +#endif + + // dispcmnCtrlCmdSpecificGetHdmiScdcData -- exported (id=0x7302a6) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSpecificGetHdmiScdcData__ = &dispcmnCtrlCmdSpecificGetHdmiScdcData_IMPL; +#endif + + // dispcmnCtrlCmdSpecificIsDirectmodeDisplay -- exported (id=0x7302a7) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + pThis->__dispcmnCtrlCmdSpecificIsDirectmodeDisplay__ = &dispcmnCtrlCmdSpecificIsDirectmodeDisplay_IMPL; +#endif + + // dispcmnCtrlCmdSpecificDefaultAdaptivesyncDisplay -- exported (id=0x7302ae) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + pThis->__dispcmnCtrlCmdSpecificDefaultAdaptivesyncDisplay__ = &dispcmnCtrlCmdSpecificDefaultAdaptivesyncDisplay_IMPL; +#endif + + // dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation -- exported (id=0x7302a8) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation__ = &dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation_IMPL; +#endif + + // dispcmnCtrlCmdSpecificDispI2cReadWrite -- exported (id=0x7302ac) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSpecificDispI2cReadWrite__ = &dispcmnCtrlCmdSpecificDispI2cReadWrite_IMPL; +#endif + + // dispcmnCtrlCmdSpecificGetValidHeadWindowAssignment -- exported (id=0x7302ad) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__dispcmnCtrlCmdSpecificGetValidHeadWindowAssignment__ = &dispcmnCtrlCmdSpecificGetValidHeadWindowAssignment_IMPL; +#endif + + // dispcmnCtrlCmdSpecificSetHdmiAudioMutestream -- exported (id=0x730275) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSpecificSetHdmiAudioMutestream__ = &dispcmnCtrlCmdSpecificSetHdmiAudioMutestream_IMPL; +#endif + + // dispcmnCtrlCmdSpecificDisplayChange -- exported (id=0x7302a4) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + pThis->__dispcmnCtrlCmdSpecificDisplayChange__ = &dispcmnCtrlCmdSpecificDisplayChange_IMPL; +#endif + + // dispcmnCtrlCmdDfpEdpDriverUnload -- exported (id=0x731176) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4au) + pThis->__dispcmnCtrlCmdDfpEdpDriverUnload__ = &dispcmnCtrlCmdDfpEdpDriverUnload_IMPL; +#endif + + // dispcmnCtrlCmdDfpSetForceBlackPixels -- exported (id=0x731179) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__dispcmnCtrlCmdDfpSetForceBlackPixels__ = &dispcmnCtrlCmdDfpSetForceBlackPixels_IMPL; +#endif + + // dispcmnCtrlCmdDfpGetInfo -- exported (id=0x731140) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4au) + pThis->__dispcmnCtrlCmdDfpGetInfo__ = &dispcmnCtrlCmdDfpGetInfo_IMPL; +#endif + + // dispcmnCtrlCmdDfpGetDisplayportDongleInfo -- exported (id=0x731142) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__dispcmnCtrlCmdDfpGetDisplayportDongleInfo__ = &dispcmnCtrlCmdDfpGetDisplayportDongleInfo_IMPL; +#endif + + // dispcmnCtrlCmdDfpSetEldAudioCaps -- exported (id=0x731144) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDfpSetEldAudioCaps__ = &dispcmnCtrlCmdDfpSetEldAudioCaps_IMPL; +#endif + + // dispcmnCtrlCmdDfpSetAudioEnable -- exported (id=0x731150) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + pThis->__dispcmnCtrlCmdDfpSetAudioEnable__ = &dispcmnCtrlCmdDfpSetAudioEnable_IMPL; +#endif + + // dispcmnCtrlCmdDfpUpdateDynamicDfpCache -- exported (id=0x73114e) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDfpUpdateDynamicDfpCache__ = &dispcmnCtrlCmdDfpUpdateDynamicDfpCache_IMPL; +#endif + + // dispcmnCtrlCmdDfpAssignSor -- exported (id=0x731152) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDfpAssignSor__ = &dispcmnCtrlCmdDfpAssignSor_IMPL; +#endif + + // dispcmnCtrlCmdDfpDscCrcControl -- exported (id=0x731157) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDfpDscCrcControl__ = &dispcmnCtrlCmdDfpDscCrcControl_IMPL; +#endif + + // dispcmnCtrlCmdDfpInitMuxData -- exported (id=0x731158) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40u) + pThis->__dispcmnCtrlCmdDfpInitMuxData__ = &dispcmnCtrlCmdDfpInitMuxData_IMPL; +#endif + + // dispcmnCtrlCmdDfpGetDsiModeTiming -- exported (id=0x731166) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDfpGetDsiModeTiming__ = &dispcmnCtrlCmdDfpGetDsiModeTiming_IMPL; +#endif + + // dispcmnCtrlCmdDfpConfigTwoHeadOneOr -- exported (id=0x731156) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDfpConfigTwoHeadOneOr__ = &dispcmnCtrlCmdDfpConfigTwoHeadOneOr_IMPL; +#endif + + // dispcmnCtrlCmdDfpGetPadlinkMask -- exported (id=0x731153) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDfpGetPadlinkMask__ = &dispcmnCtrlCmdDfpGetPadlinkMask_IMPL; +#endif + + // dispcmnCtrlCmdDfpGetFixedModeTiming -- exported (id=0x731172) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x46u) + pThis->__dispcmnCtrlCmdDfpGetFixedModeTiming__ = &dispcmnCtrlCmdDfpGetFixedModeTiming_IMPL; +#endif + + // dispcmnCtrlCmdDpAuxchCtrl -- exported (id=0x731341) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x844u) + pThis->__dispcmnCtrlCmdDpAuxchCtrl__ = &dispcmnCtrlCmdDpAuxchCtrl_IMPL; +#endif + + // dispcmnCtrlCmdDpCtrl -- exported (id=0x731343) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x844u) + pThis->__dispcmnCtrlCmdDpCtrl__ = &dispcmnCtrlCmdDpCtrl_IMPL; +#endif + + // dispcmnCtrlCmdDpGetLaneData -- exported (id=0x731345) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpGetLaneData__ = &dispcmnCtrlCmdDpGetLaneData_IMPL; +#endif + + // dispcmnCtrlCmdDpSetLaneData -- exported (id=0x731346) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpSetLaneData__ = &dispcmnCtrlCmdDpSetLaneData_IMPL; +#endif + + // dispcmnCtrlCmdDpGetTestpattern -- exported (id=0x731348) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpGetTestpattern__ = &dispcmnCtrlCmdDpGetTestpattern_IMPL; +#endif + + // dispcmnCtrlCmdDpSetTestpattern -- exported (id=0x731347) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpSetTestpattern__ = &dispcmnCtrlCmdDpSetTestpattern_IMPL; +#endif + + // dispcmnCtrlCmdDpMainLinkCtrl -- exported (id=0x731356) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpMainLinkCtrl__ = &dispcmnCtrlCmdDpMainLinkCtrl_IMPL; +#endif + + // dispcmnCtrlCmdDpSetAudioMuteStream -- exported (id=0x731359) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpSetAudioMuteStream__ = &dispcmnCtrlCmdDpSetAudioMuteStream_IMPL; +#endif + + // dispcmnCtrlCmdDpGetLinkConfig -- exported (id=0x731360) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpGetLinkConfig__ = &dispcmnCtrlCmdDpGetLinkConfig_IMPL; +#endif + + // dispcmnCtrlCmdDpGetEDPData -- exported (id=0x731361) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpGetEDPData__ = &dispcmnCtrlCmdDpGetEDPData_IMPL; +#endif + + // dispcmnCtrlCmdDpTopologyAllocateDisplayId -- exported (id=0x73135b) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpTopologyAllocateDisplayId__ = &dispcmnCtrlCmdDpTopologyAllocateDisplayId_IMPL; +#endif + + // dispcmnCtrlCmdDpTopologyFreeDisplayId -- exported (id=0x73135c) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpTopologyFreeDisplayId__ = &dispcmnCtrlCmdDpTopologyFreeDisplayId_IMPL; +#endif + + // dispcmnCtrlCmdDpConfigStream -- exported (id=0x731362) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpConfigStream__ = &dispcmnCtrlCmdDpConfigStream_IMPL; +#endif + + // dispcmnCtrlCmdDpConfigSingleHeadMultiStream -- exported (id=0x73136e) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpConfigSingleHeadMultiStream__ = &dispcmnCtrlCmdDpConfigSingleHeadMultiStream_IMPL; +#endif + + // dispcmnCtrlCmdDpSetRateGov -- exported (id=0x731363) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpSetRateGov__ = &dispcmnCtrlCmdDpSetRateGov_IMPL; +#endif + + // dispcmnCtrlCmdDpSendACT -- exported (id=0x731367) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpSendACT__ = &dispcmnCtrlCmdDpSendACT_IMPL; +#endif + + // dispcmnCtrlCmdDpSetManualDisplayPort -- exported (id=0x731365) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpSetManualDisplayPort__ = &dispcmnCtrlCmdDpSetManualDisplayPort_IMPL; +#endif + + // dispcmnCtrlCmdDpGetCaps -- exported (id=0x731369) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x820046u) + pThis->__dispcmnCtrlCmdDpGetCaps__ = &dispcmnCtrlCmdDpGetCaps_IMPL; +#endif + + // dispcmnCtrlCmdDpSetMSAPropertiesv2 -- exported (id=0x731381) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpSetMSAPropertiesv2__ = &dispcmnCtrlCmdDpSetMSAPropertiesv2_IMPL; +#endif + + // dispcmnCtrlCmdDpSetStereoMSAProperties -- exported (id=0x731378) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpSetStereoMSAProperties__ = &dispcmnCtrlCmdDpSetStereoMSAProperties_IMPL; +#endif + + // dispcmnCtrlCmdDpGenerateFakeInterrupt -- exported (id=0x73136b) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__dispcmnCtrlCmdDpGenerateFakeInterrupt__ = &dispcmnCtrlCmdDpGenerateFakeInterrupt_IMPL; +#endif + + // dispcmnCtrlCmdDpConfigRadScratchReg -- exported (id=0x73136c) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpConfigRadScratchReg__ = &dispcmnCtrlCmdDpConfigRadScratchReg_IMPL; +#endif + + // dispcmnCtrlCmdDpSetTriggerSelect -- exported (id=0x73136f) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpSetTriggerSelect__ = &dispcmnCtrlCmdDpSetTriggerSelect_IMPL; +#endif + + // dispcmnCtrlCmdDpSetTriggerAll -- exported (id=0x731370) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpSetTriggerAll__ = &dispcmnCtrlCmdDpSetTriggerAll_IMPL; +#endif + + // dispcmnCtrlCmdDpGetAuxLogData -- exported (id=0x731373) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpGetAuxLogData__ = &dispcmnCtrlCmdDpGetAuxLogData_IMPL; +#endif + + // dispcmnCtrlCmdDpConfigIndexedLinkRates -- exported (id=0x731377) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpConfigIndexedLinkRates__ = &dispcmnCtrlCmdDpConfigIndexedLinkRates_IMPL; +#endif + + // dispcmnCtrlCmdDpConfigureFec -- exported (id=0x73137a) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpConfigureFec__ = &dispcmnCtrlCmdDpConfigureFec_IMPL; +#endif + + // dispcmnCtrlCmdDpGetGenericInfoframe -- exported (id=0x73137e) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpGetGenericInfoframe__ = &dispcmnCtrlCmdDpGetGenericInfoframe_IMPL; +#endif + + // dispcmnCtrlCmdDpGetMsaAttributes -- exported (id=0x73137f) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpGetMsaAttributes__ = &dispcmnCtrlCmdDpGetMsaAttributes_IMPL; +#endif + + // dispcmnCtrlCmdFrlConfigMacroPad -- exported (id=0x730502) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdFrlConfigMacroPad__ = &dispcmnCtrlCmdFrlConfigMacroPad_IMPL; +#endif + + // dispcmnCtrlCmdDpConfigMacroPad -- exported (id=0x73137b) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpConfigMacroPad__ = &dispcmnCtrlCmdDpConfigMacroPad_IMPL; +#endif + + // dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data -- exported (id=0x731351) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data__ = &dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data_IMPL; +#endif + + // dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data -- exported (id=0x731352) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data__ = &dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data_IMPL; +#endif + + // dispcmnCtrlCmdDpSetLevelInfoTableData -- exported (id=0x731387) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpSetLevelInfoTableData__ = &dispcmnCtrlCmdDpSetLevelInfoTableData_IMPL; +#endif + + // dispcmnCtrlCmdDpGetLevelInfoTableData -- exported (id=0x731388) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpGetLevelInfoTableData__ = &dispcmnCtrlCmdDpGetLevelInfoTableData_IMPL; +#endif + + // dispcmnCtrlCmdDpSetEcf -- exported (id=0x731366) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDpSetEcf__ = &dispcmnCtrlCmdDpSetEcf_IMPL; +#endif + + // dispcmnCtrlCmdDPGetCableIDInfoFromMacro -- exported (id=0x73138d) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdDPGetCableIDInfoFromMacro__ = &dispcmnCtrlCmdDPGetCableIDInfoFromMacro_IMPL; +#endif + + // dispcmnCtrlCmdSpecificGetRegionalCrcs -- exported (id=0x7302a0) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__dispcmnCtrlCmdSpecificGetRegionalCrcs__ = &dispcmnCtrlCmdSpecificGetRegionalCrcs_IMPL; +#endif +} // End __nvoc_init_funcTable_DispCommon_1 with approximately 112 basic block(s). + + +// Initialize vtable(s) for 138 virtual method(s). +void __nvoc_init_funcTable_DispCommon(DispCommon *pThis) { + + // Initialize vtable(s) with 112 per-object function pointer(s). + __nvoc_init_funcTable_DispCommon_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__DispCommon(DispCommon *pThis, RmHalspecOwner *pRmhalspecowner) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^4 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^3 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource; // (rmres) super^2 + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier; // (inotify) super^3 + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier; // (notify) super^2 + pThis->__nvoc_pbase_DisplayApi = &pThis->__nvoc_base_DisplayApi; // (dispapi) super + pThis->__nvoc_pbase_DispCommon = pThis; // (dispcmn) this + + // Recurse to superclass initialization function(s). + __nvoc_init__DisplayApi(&pThis->__nvoc_base_DisplayApi, pRmhalspecowner); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__DispCommon.metadata__DisplayApi.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^4 + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__DispCommon.metadata__DisplayApi.metadata__RmResource.metadata__RsResource; // (res) super^3 + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__DispCommon.metadata__DisplayApi.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__DispCommon.metadata__DisplayApi.metadata__RmResource; // (rmres) super^2 + pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier.__nvoc_metadata_ptr = &__nvoc_metadata__DispCommon.metadata__DisplayApi.metadata__Notifier.metadata__INotifier; // (inotify) super^3 + pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr = &__nvoc_metadata__DispCommon.metadata__DisplayApi.metadata__Notifier; // (notify) super^2 + pThis->__nvoc_base_DisplayApi.__nvoc_metadata_ptr = &__nvoc_metadata__DispCommon.metadata__DisplayApi; // (dispapi) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__DispCommon; // (dispcmn) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_DispCommon(pThis); +} + +NV_STATUS __nvoc_objCreate_DispCommon(DispCommon **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + DispCommon *pThis; + RmHalspecOwner *pRmhalspecowner; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(DispCommon), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(DispCommon)); + + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // pParent must be a valid object that derives from a halspec owner class. + NV_ASSERT_OR_RETURN(pParent != NULL, NV_ERR_INVALID_ARGUMENT); + + // Link the child into the parent unless flagged not to do so. + if (!(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init__DispCommon(pThis, pRmhalspecowner); + status = __nvoc_ctor_DispCommon(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DispCommon_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_DispCommon_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(DispCommon)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispCommon(DispCommon **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DispCommon(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_disp_objs_nvoc.h b/src/nvidia/generated/g_disp_objs_nvoc.h new file mode 100644 index 0000000..0ddc78d --- /dev/null +++ b/src/nvidia/generated/g_disp_objs_nvoc.h @@ -0,0 +1,2846 @@ + +#ifndef _G_DISP_OBJS_NVOC_H_ +#define _G_DISP_OBJS_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing the display - both Disp and DispCommon +* entries with their insides (DispChannelList and DispDmaControlList) +* +******************************************************************************/ + +#pragma once +#include "g_disp_objs_nvoc.h" + +#ifndef DISP_OBJS_H +#define DISP_OBJS_H + +#include "rmapi/event.h" +#include "rmapi/resource.h" + +#include "gpu/gpu_halspec.h" + +#include "ctrl/ctrl0073.h" +#include "ctrl/ctrl0073/ctrl0073system.h" +#include "ctrl/ctrl0073/ctrl0073dp.h" +#include "ctrl/ctrl0073/ctrl0073specific.h" +#include "ctrl/ctrl5070/ctrl5070or.h" +#include "ctrl/ctrl5070/ctrl5070system.h" +#include "ctrl/ctrlc370/ctrlc370chnc.h" +#include "ctrl/ctrlc370/ctrlc370event.h" +#include "ctrl/ctrlc370/ctrlc370rg.h" +#include "ctrl/ctrlc370/ctrlc370or.h" +#include "ctrl/ctrlc370/ctrlc370verif.h" +#include "ctrl/ctrlc372/ctrlc372base.h" +#include "ctrl/ctrlc372/ctrlc372chnc.h" + +// **************************************************************************** +// Type definitions +// **************************************************************************** + +struct OBJGPU; +struct Device; +struct Memory; +struct RsResource; +struct RmResource; + +struct DispChannel; + +#ifndef __NVOC_CLASS_DispChannel_TYPEDEF__ +#define __NVOC_CLASS_DispChannel_TYPEDEF__ +typedef struct DispChannel DispChannel; +#endif /* __NVOC_CLASS_DispChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannel +#define __nvoc_class_id_DispChannel 0xbd2ff3 +#endif /* __nvoc_class_id_DispChannel */ + + + +#define DISPAPI_GET_GPU(pDispRes) staticCast(pDispRes, DisplayApi)->pGpuInRmctrl + +#define DISPAPI_GET_GPUGRP(pDispRes) staticCast(pDispRes, DisplayApi)->pGpuGrp + +/*! + * Base class for many of display's RsResource subclasses + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_DISP_OBJS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__DisplayApi; +struct NVOC_METADATA__RmResource; +struct NVOC_METADATA__Notifier; +struct NVOC_VTABLE__DisplayApi; + + +struct DisplayApi { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__DisplayApi *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct RmResource __nvoc_base_RmResource; + struct Notifier __nvoc_base_Notifier; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^3 + struct RsResource *__nvoc_pbase_RsResource; // res super^2 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^2 + struct RmResource *__nvoc_pbase_RmResource; // rmres super + struct INotifier *__nvoc_pbase_INotifier; // inotify super^2 + struct Notifier *__nvoc_pbase_Notifier; // notify super + struct DisplayApi *__nvoc_pbase_DisplayApi; // dispapi + + // Data members + struct OBJGPU *pGpuInRmctrl; + struct OBJGPUGRP *pGpuGrp; + NvBool bBcResource; + NvU32 *pNotifyActions[8]; + NvU32 numNotifiers; +}; + + +// Vtable with 26 per-class function pointers +struct NVOC_VTABLE__DisplayApi { + NV_STATUS (*__dispapiControl__)(struct DisplayApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual override (res) base (rmres) + NV_STATUS (*__dispapiControl_Prologue__)(struct DisplayApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual override (res) base (rmres) + void (*__dispapiControl_Epilogue__)(struct DisplayApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual override (res) base (rmres) + NvBool (*__dispapiAccessCallback__)(struct DisplayApi * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (rmres) + NvBool (*__dispapiShareCallback__)(struct DisplayApi * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__dispapiGetMemInterMapParams__)(struct DisplayApi * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__dispapiCheckMemInterUnmap__)(struct DisplayApi * /*this*/, NvBool); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__dispapiGetMemoryMappingDescriptor__)(struct DisplayApi * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__dispapiControlSerialization_Prologue__)(struct DisplayApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + void (*__dispapiControlSerialization_Epilogue__)(struct DisplayApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + NvBool (*__dispapiCanCopy__)(struct DisplayApi * /*this*/); // virtual inherited (res) base (rmres) + NV_STATUS (*__dispapiIsDuplicate__)(struct DisplayApi * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (rmres) + void (*__dispapiPreDestruct__)(struct DisplayApi * /*this*/); // virtual inherited (res) base (rmres) + NV_STATUS (*__dispapiControlFilter__)(struct DisplayApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (rmres) + NV_STATUS (*__dispapiMap__)(struct DisplayApi * /*this*/, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); // virtual inherited (res) base (rmres) + NV_STATUS (*__dispapiUnmap__)(struct DisplayApi * /*this*/, struct CALL_CONTEXT *, RsCpuMapping *); // virtual inherited (res) base (rmres) + NvBool (*__dispapiIsPartialUnmapSupported__)(struct DisplayApi * /*this*/); // inline virtual inherited (res) base (rmres) body + NV_STATUS (*__dispapiMapTo__)(struct DisplayApi * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (rmres) + NV_STATUS (*__dispapiUnmapFrom__)(struct DisplayApi * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (rmres) + NvU32 (*__dispapiGetRefCount__)(struct DisplayApi * /*this*/); // virtual inherited (res) base (rmres) + void (*__dispapiAddAdditionalDependants__)(struct RsClient *, struct DisplayApi * /*this*/, RsResourceRef *); // virtual inherited (res) base (rmres) + PEVENTNOTIFICATION * (*__dispapiGetNotificationListPtr__)(struct DisplayApi * /*this*/); // virtual inherited (notify) base (notify) + struct NotifShare * (*__dispapiGetNotificationShare__)(struct DisplayApi * /*this*/); // virtual inherited (notify) base (notify) + void (*__dispapiSetNotificationShare__)(struct DisplayApi * /*this*/, struct NotifShare *); // virtual inherited (notify) base (notify) + NV_STATUS (*__dispapiUnregisterEvent__)(struct DisplayApi * /*this*/, NvHandle, NvHandle, NvHandle, NvHandle); // virtual inherited (notify) base (notify) + NV_STATUS (*__dispapiGetOrAllocNotifShare__)(struct DisplayApi * /*this*/, NvHandle, NvHandle, struct NotifShare **); // virtual inherited (notify) base (notify) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__DisplayApi { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__RmResource metadata__RmResource; + const struct NVOC_METADATA__Notifier metadata__Notifier; + const struct NVOC_VTABLE__DisplayApi vtable; +}; + +#ifndef __NVOC_CLASS_DisplayApi_TYPEDEF__ +#define __NVOC_CLASS_DisplayApi_TYPEDEF__ +typedef struct DisplayApi DisplayApi; +#endif /* __NVOC_CLASS_DisplayApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DisplayApi +#define __nvoc_class_id_DisplayApi 0xe9980c +#endif /* __nvoc_class_id_DisplayApi */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayApi; + +#define __staticCast_DisplayApi(pThis) \ + ((pThis)->__nvoc_pbase_DisplayApi) + +#ifdef __nvoc_disp_objs_h_disabled +#define __dynamicCast_DisplayApi(pThis) ((DisplayApi*) NULL) +#else //__nvoc_disp_objs_h_disabled +#define __dynamicCast_DisplayApi(pThis) \ + ((DisplayApi*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DisplayApi))) +#endif //__nvoc_disp_objs_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_DisplayApi(DisplayApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DisplayApi(DisplayApi**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_DisplayApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DisplayApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define dispapiControl_FNPTR(pDisplayApi) pDisplayApi->__nvoc_metadata_ptr->vtable.__dispapiControl__ +#define dispapiControl(pDisplayApi, pCallContext, pParams) dispapiControl_DISPATCH(pDisplayApi, pCallContext, pParams) +#define dispapiControl_Prologue_FNPTR(pDisplayApi) pDisplayApi->__nvoc_metadata_ptr->vtable.__dispapiControl_Prologue__ +#define dispapiControl_Prologue(pDisplayApi, pCallContext, pRsParams) dispapiControl_Prologue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispapiControl_Epilogue_FNPTR(pDisplayApi) pDisplayApi->__nvoc_metadata_ptr->vtable.__dispapiControl_Epilogue__ +#define dispapiControl_Epilogue(pDisplayApi, pCallContext, pRsParams) dispapiControl_Epilogue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispapiAccessCallback_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define dispapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define dispapiShareCallback_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresShareCallback__ +#define dispapiShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) dispapiShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispapiGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define dispapiGetMemInterMapParams(pRmResource, pParams) dispapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispapiCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define dispapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispapiGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define dispapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispapiControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define dispapiControlSerialization_Prologue(pResource, pCallContext, pParams) dispapiControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispapiControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define dispapiControlSerialization_Epilogue(pResource, pCallContext, pParams) dispapiControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispapiCanCopy_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define dispapiCanCopy(pResource) dispapiCanCopy_DISPATCH(pResource) +#define dispapiIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define dispapiIsDuplicate(pResource, hMemory, pDuplicate) dispapiIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define dispapiPreDestruct_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define dispapiPreDestruct(pResource) dispapiPreDestruct_DISPATCH(pResource) +#define dispapiControlFilter_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define dispapiControlFilter(pResource, pCallContext, pParams) dispapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispapiMap_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMap__ +#define dispapiMap(pResource, pCallContext, pParams, pCpuMapping) dispapiMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define dispapiUnmap_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmap__ +#define dispapiUnmap(pResource, pCallContext, pCpuMapping) dispapiUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define dispapiIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define dispapiIsPartialUnmapSupported(pResource) dispapiIsPartialUnmapSupported_DISPATCH(pResource) +#define dispapiMapTo_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define dispapiMapTo(pResource, pParams) dispapiMapTo_DISPATCH(pResource, pParams) +#define dispapiUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define dispapiUnmapFrom(pResource, pParams) dispapiUnmapFrom_DISPATCH(pResource, pParams) +#define dispapiGetRefCount_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define dispapiGetRefCount(pResource) dispapiGetRefCount_DISPATCH(pResource) +#define dispapiAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define dispapiAddAdditionalDependants(pClient, pResource, pReference) dispapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispapiGetNotificationListPtr_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationListPtr__ +#define dispapiGetNotificationListPtr(pNotifier) dispapiGetNotificationListPtr_DISPATCH(pNotifier) +#define dispapiGetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationShare__ +#define dispapiGetNotificationShare(pNotifier) dispapiGetNotificationShare_DISPATCH(pNotifier) +#define dispapiSetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifySetNotificationShare__ +#define dispapiSetNotificationShare(pNotifier, pNotifShare) dispapiSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define dispapiUnregisterEvent_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyUnregisterEvent__ +#define dispapiUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispapiUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define dispapiGetOrAllocNotifShare_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetOrAllocNotifShare__ +#define dispapiGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispapiGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) + +// Dispatch functions +static inline NV_STATUS dispapiControl_DISPATCH(struct DisplayApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pDisplayApi->__nvoc_metadata_ptr->vtable.__dispapiControl__(pDisplayApi, pCallContext, pParams); +} + +static inline NV_STATUS dispapiControl_Prologue_DISPATCH(struct DisplayApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return pDisplayApi->__nvoc_metadata_ptr->vtable.__dispapiControl_Prologue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline void dispapiControl_Epilogue_DISPATCH(struct DisplayApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + pDisplayApi->__nvoc_metadata_ptr->vtable.__dispapiControl_Epilogue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline NvBool dispapiAccessCallback_DISPATCH(struct DisplayApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__dispapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NvBool dispapiShareCallback_DISPATCH(struct DisplayApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__nvoc_metadata_ptr->vtable.__dispapiShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispapiGetMemInterMapParams_DISPATCH(struct DisplayApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispapiCheckMemInterUnmap_DISPATCH(struct DisplayApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS dispapiGetMemoryMappingDescriptor_DISPATCH(struct DisplayApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispapiControlSerialization_Prologue_DISPATCH(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispapiControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void dispapiControlSerialization_Epilogue_DISPATCH(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__dispapiControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool dispapiCanCopy_DISPATCH(struct DisplayApi *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispapiCanCopy__(pResource); +} + +static inline NV_STATUS dispapiIsDuplicate_DISPATCH(struct DisplayApi *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__dispapiIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void dispapiPreDestruct_DISPATCH(struct DisplayApi *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__dispapiPreDestruct__(pResource); +} + +static inline NV_STATUS dispapiControlFilter_DISPATCH(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispapiMap_DISPATCH(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__dispapiMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS dispapiUnmap_DISPATCH(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__dispapiUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NvBool dispapiIsPartialUnmapSupported_DISPATCH(struct DisplayApi *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispapiIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS dispapiMapTo_DISPATCH(struct DisplayApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispapiMapTo__(pResource, pParams); +} + +static inline NV_STATUS dispapiUnmapFrom_DISPATCH(struct DisplayApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispapiUnmapFrom__(pResource, pParams); +} + +static inline NvU32 dispapiGetRefCount_DISPATCH(struct DisplayApi *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispapiGetRefCount__(pResource); +} + +static inline void dispapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DisplayApi *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__dispapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline PEVENTNOTIFICATION * dispapiGetNotificationListPtr_DISPATCH(struct DisplayApi *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispapiGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare * dispapiGetNotificationShare_DISPATCH(struct DisplayApi *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispapiGetNotificationShare__(pNotifier); +} + +static inline void dispapiSetNotificationShare_DISPATCH(struct DisplayApi *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__nvoc_metadata_ptr->vtable.__dispapiSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS dispapiUnregisterEvent_DISPATCH(struct DisplayApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispapiUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS dispapiGetOrAllocNotifShare_DISPATCH(struct DisplayApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispapiGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS dispapiSetUnicastAndSynchronize_KERNEL(struct DisplayApi *pDisplayApi, struct OBJGPUGRP *pGpuGroup, struct OBJGPU **ppGpu, OBJDISP **ppDisp, NvU32 subDeviceInstance); + + +#ifdef __nvoc_disp_objs_h_disabled +static inline NV_STATUS dispapiSetUnicastAndSynchronize(struct DisplayApi *pDisplayApi, struct OBJGPUGRP *pGpuGroup, struct OBJGPU **ppGpu, OBJDISP **ppDisp, NvU32 subDeviceInstance) { + NV_ASSERT_FAILED_PRECOMP("DisplayApi was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_objs_h_disabled +#define dispapiSetUnicastAndSynchronize(pDisplayApi, pGpuGroup, ppGpu, ppDisp, subDeviceInstance) dispapiSetUnicastAndSynchronize_KERNEL(pDisplayApi, pGpuGroup, ppGpu, ppDisp, subDeviceInstance) +#endif //__nvoc_disp_objs_h_disabled + +#define dispapiSetUnicastAndSynchronize_HAL(pDisplayApi, pGpuGroup, ppGpu, ppDisp, subDeviceInstance) dispapiSetUnicastAndSynchronize(pDisplayApi, pGpuGroup, ppGpu, ppDisp, subDeviceInstance) + +NV_STATUS dispapiControl_IMPL(struct DisplayApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +NV_STATUS dispapiControl_Prologue_IMPL(struct DisplayApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); + +void dispapiControl_Epilogue_IMPL(struct DisplayApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); + +NV_STATUS dispapiConstruct_IMPL(struct DisplayApi *arg_pDisplayApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_dispapiConstruct(arg_pDisplayApi, arg_pCallContext, arg_pParams) dispapiConstruct_IMPL(arg_pDisplayApi, arg_pCallContext, arg_pParams) +void dispapiDestruct_IMPL(struct DisplayApi *pDisplayApi); + +#define __nvoc_dispapiDestruct(pDisplayApi) dispapiDestruct_IMPL(pDisplayApi) +NV_STATUS dispapiCtrlCmdEventSetNotification_IMPL(struct DisplayApi *pDisplayApi, NV0073_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams); + +#ifdef __nvoc_disp_objs_h_disabled +static inline NV_STATUS dispapiCtrlCmdEventSetNotification(struct DisplayApi *pDisplayApi, NV0073_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams) { + NV_ASSERT_FAILED_PRECOMP("DisplayApi was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_objs_h_disabled +#define dispapiCtrlCmdEventSetNotification(pDisplayApi, pSetEventParams) dispapiCtrlCmdEventSetNotification_IMPL(pDisplayApi, pSetEventParams) +#endif //__nvoc_disp_objs_h_disabled + +NV_STATUS dispapiValidateRmctrlPriv_IMPL(struct OBJGPU *pGpu); + +#define dispapiValidateRmctrlPriv(pGpu) dispapiValidateRmctrlPriv_IMPL(pGpu) +#undef PRIVATE_FIELD + + +/*! + * RM internal class representing XXX_DISPLAY. Parent for all other display + * resources (channels, etc). Allocated under a device or subdevice. + * + * Only one instance of this class is allowed per-GPU. Multi-instance restrictions + * are enforced by resource_list.h + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_DISP_OBJS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__DispObject; +struct NVOC_METADATA__DisplayApi; +struct NVOC_VTABLE__DispObject; + + +struct DispObject { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__DispObject *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct DisplayApi __nvoc_base_DisplayApi; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^4 + struct RsResource *__nvoc_pbase_RsResource; // res super^3 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^3 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^2 + struct INotifier *__nvoc_pbase_INotifier; // inotify super^3 + struct Notifier *__nvoc_pbase_Notifier; // notify super^2 + struct DisplayApi *__nvoc_pbase_DisplayApi; // dispapi super + struct DispObject *__nvoc_pbase_DispObject; // dispobj + + // Vtable with 13 per-object function pointers + NV_STATUS (*__dispobjCtrlCmdSetRmFreeFlags__)(struct DispObject * /*this*/, NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS *); // exported (id=0x50700117) + NV_STATUS (*__dispobjCtrlCmdIMPSetGetParameter__)(struct DispObject * /*this*/, NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS *); // exported (id=0x50700118) + NV_STATUS (*__dispobjCtrlCmdGetRgStatus__)(struct DispObject * /*this*/, NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS *); // exported (id=0x50700202) + NV_STATUS (*__dispobjCtrlCmdGetRgUnderflowProp__)(struct DispObject * /*this*/, NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS *); // exported (id=0x50700203) + NV_STATUS (*__dispobjCtrlCmdSetRgUnderflowProp__)(struct DispObject * /*this*/, NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS *); // exported (id=0x50700204) + NV_STATUS (*__dispobjCtrlCmdGetRgFliplockProp__)(struct DispObject * /*this*/, NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS *); // exported (id=0x50700205) + NV_STATUS (*__dispobjCtrlCmdSetRgFliplockProp__)(struct DispObject * /*this*/, NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS *); // exported (id=0x50700206) + NV_STATUS (*__dispobjCtrlCmdGetRgConnectedLockpinStateless__)(struct DispObject * /*this*/, NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS *); // exported (id=0x5070020a) + NV_STATUS (*__dispobjCtrlCmdGetRgScanLine__)(struct DispObject * /*this*/, NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS *); // exported (id=0x5070020c) + NV_STATUS (*__dispobjCtrlCmdGetSorOpMode__)(struct DispObject * /*this*/, NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS *); // exported (id=0x50700422) + NV_STATUS (*__dispobjCtrlCmdSetSorOpMode__)(struct DispObject * /*this*/, NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS *); // exported (id=0x50700423) + NV_STATUS (*__dispobjCtrlCmdSetSorFlushMode__)(struct DispObject * /*this*/, NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS *); // exported (id=0x50700457) + NV_STATUS (*__dispobjCtrlCmdSystemGetCapsV2__)(struct DispObject * /*this*/, NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS *); // exported (id=0x50700709) + + // Data members + NvU32 rmFreeFlags; +}; + + +// Vtable with 26 per-class function pointers +struct NVOC_VTABLE__DispObject { + NV_STATUS (*__dispobjControl__)(struct DispObject * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (dispapi) base (dispapi) + NV_STATUS (*__dispobjControl_Prologue__)(struct DispObject * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (dispapi) base (dispapi) + void (*__dispobjControl_Epilogue__)(struct DispObject * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (dispapi) base (dispapi) + NvBool (*__dispobjAccessCallback__)(struct DispObject * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (dispapi) + NvBool (*__dispobjShareCallback__)(struct DispObject * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (rmres) base (dispapi) + NV_STATUS (*__dispobjGetMemInterMapParams__)(struct DispObject * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (dispapi) + NV_STATUS (*__dispobjCheckMemInterUnmap__)(struct DispObject * /*this*/, NvBool); // virtual inherited (rmres) base (dispapi) + NV_STATUS (*__dispobjGetMemoryMappingDescriptor__)(struct DispObject * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (dispapi) + NV_STATUS (*__dispobjControlSerialization_Prologue__)(struct DispObject * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (dispapi) + void (*__dispobjControlSerialization_Epilogue__)(struct DispObject * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (dispapi) + NvBool (*__dispobjCanCopy__)(struct DispObject * /*this*/); // virtual inherited (res) base (dispapi) + NV_STATUS (*__dispobjIsDuplicate__)(struct DispObject * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (dispapi) + void (*__dispobjPreDestruct__)(struct DispObject * /*this*/); // virtual inherited (res) base (dispapi) + NV_STATUS (*__dispobjControlFilter__)(struct DispObject * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (dispapi) + NV_STATUS (*__dispobjMap__)(struct DispObject * /*this*/, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); // virtual inherited (res) base (dispapi) + NV_STATUS (*__dispobjUnmap__)(struct DispObject * /*this*/, struct CALL_CONTEXT *, RsCpuMapping *); // virtual inherited (res) base (dispapi) + NvBool (*__dispobjIsPartialUnmapSupported__)(struct DispObject * /*this*/); // inline virtual inherited (res) base (dispapi) body + NV_STATUS (*__dispobjMapTo__)(struct DispObject * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (dispapi) + NV_STATUS (*__dispobjUnmapFrom__)(struct DispObject * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (dispapi) + NvU32 (*__dispobjGetRefCount__)(struct DispObject * /*this*/); // virtual inherited (res) base (dispapi) + void (*__dispobjAddAdditionalDependants__)(struct RsClient *, struct DispObject * /*this*/, RsResourceRef *); // virtual inherited (res) base (dispapi) + PEVENTNOTIFICATION * (*__dispobjGetNotificationListPtr__)(struct DispObject * /*this*/); // virtual inherited (notify) base (dispapi) + struct NotifShare * (*__dispobjGetNotificationShare__)(struct DispObject * /*this*/); // virtual inherited (notify) base (dispapi) + void (*__dispobjSetNotificationShare__)(struct DispObject * /*this*/, struct NotifShare *); // virtual inherited (notify) base (dispapi) + NV_STATUS (*__dispobjUnregisterEvent__)(struct DispObject * /*this*/, NvHandle, NvHandle, NvHandle, NvHandle); // virtual inherited (notify) base (dispapi) + NV_STATUS (*__dispobjGetOrAllocNotifShare__)(struct DispObject * /*this*/, NvHandle, NvHandle, struct NotifShare **); // virtual inherited (notify) base (dispapi) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__DispObject { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__DisplayApi metadata__DisplayApi; + const struct NVOC_VTABLE__DispObject vtable; +}; + +#ifndef __NVOC_CLASS_DispObject_TYPEDEF__ +#define __NVOC_CLASS_DispObject_TYPEDEF__ +typedef struct DispObject DispObject; +#endif /* __NVOC_CLASS_DispObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispObject +#define __nvoc_class_id_DispObject 0x999839 +#endif /* __nvoc_class_id_DispObject */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispObject; + +#define __staticCast_DispObject(pThis) \ + ((pThis)->__nvoc_pbase_DispObject) + +#ifdef __nvoc_disp_objs_h_disabled +#define __dynamicCast_DispObject(pThis) ((DispObject*) NULL) +#else //__nvoc_disp_objs_h_disabled +#define __dynamicCast_DispObject(pThis) \ + ((DispObject*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispObject))) +#endif //__nvoc_disp_objs_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_DispObject(DispObject**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispObject(DispObject**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_DispObject(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DispObject((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define dispobjCtrlCmdSetRmFreeFlags_FNPTR(pDispObject) pDispObject->__dispobjCtrlCmdSetRmFreeFlags__ +#define dispobjCtrlCmdSetRmFreeFlags(pDispObject, pParams) dispobjCtrlCmdSetRmFreeFlags_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdIMPSetGetParameter_FNPTR(pDispObject) pDispObject->__dispobjCtrlCmdIMPSetGetParameter__ +#define dispobjCtrlCmdIMPSetGetParameter(pDispObject, pImpSetGetParams) dispobjCtrlCmdIMPSetGetParameter_DISPATCH(pDispObject, pImpSetGetParams) +#define dispobjCtrlCmdGetRgStatus_FNPTR(pDispObject) pDispObject->__dispobjCtrlCmdGetRgStatus__ +#define dispobjCtrlCmdGetRgStatus(pDispObject, pParams) dispobjCtrlCmdGetRgStatus_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetRgUnderflowProp_FNPTR(pDispObject) pDispObject->__dispobjCtrlCmdGetRgUnderflowProp__ +#define dispobjCtrlCmdGetRgUnderflowProp(pDispObject, pParams) dispobjCtrlCmdGetRgUnderflowProp_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSetRgUnderflowProp_FNPTR(pDispObject) pDispObject->__dispobjCtrlCmdSetRgUnderflowProp__ +#define dispobjCtrlCmdSetRgUnderflowProp(pDispObject, pParams) dispobjCtrlCmdSetRgUnderflowProp_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetRgFliplockProp_FNPTR(pDispObject) pDispObject->__dispobjCtrlCmdGetRgFliplockProp__ +#define dispobjCtrlCmdGetRgFliplockProp(pDispObject, pParams) dispobjCtrlCmdGetRgFliplockProp_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSetRgFliplockProp_FNPTR(pDispObject) pDispObject->__dispobjCtrlCmdSetRgFliplockProp__ +#define dispobjCtrlCmdSetRgFliplockProp(pDispObject, pParams) dispobjCtrlCmdSetRgFliplockProp_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetRgConnectedLockpinStateless_FNPTR(pDispObject) pDispObject->__dispobjCtrlCmdGetRgConnectedLockpinStateless__ +#define dispobjCtrlCmdGetRgConnectedLockpinStateless(pDispObject, pParams) dispobjCtrlCmdGetRgConnectedLockpinStateless_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetRgScanLine_FNPTR(pDispObject) pDispObject->__dispobjCtrlCmdGetRgScanLine__ +#define dispobjCtrlCmdGetRgScanLine(pDispObject, pParams) dispobjCtrlCmdGetRgScanLine_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetSorOpMode_FNPTR(pDispObject) pDispObject->__dispobjCtrlCmdGetSorOpMode__ +#define dispobjCtrlCmdGetSorOpMode(pDispObject, pParams) dispobjCtrlCmdGetSorOpMode_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSetSorOpMode_FNPTR(pDispObject) pDispObject->__dispobjCtrlCmdSetSorOpMode__ +#define dispobjCtrlCmdSetSorOpMode(pDispObject, pParams) dispobjCtrlCmdSetSorOpMode_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSetSorFlushMode_FNPTR(pDispObject) pDispObject->__dispobjCtrlCmdSetSorFlushMode__ +#define dispobjCtrlCmdSetSorFlushMode(pDispObject, pParams) dispobjCtrlCmdSetSorFlushMode_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSystemGetCapsV2_FNPTR(pDispObject) pDispObject->__dispobjCtrlCmdSystemGetCapsV2__ +#define dispobjCtrlCmdSystemGetCapsV2(pDispObject, pCapsParams) dispobjCtrlCmdSystemGetCapsV2_DISPATCH(pDispObject, pCapsParams) +#define dispobjControl_FNPTR(pDisplayApi) pDisplayApi->__nvoc_base_DisplayApi.__nvoc_metadata_ptr->vtable.__dispapiControl__ +#define dispobjControl(pDisplayApi, pCallContext, pParams) dispobjControl_DISPATCH(pDisplayApi, pCallContext, pParams) +#define dispobjControl_Prologue_FNPTR(pDisplayApi) pDisplayApi->__nvoc_base_DisplayApi.__nvoc_metadata_ptr->vtable.__dispapiControl_Prologue__ +#define dispobjControl_Prologue(pDisplayApi, pCallContext, pRsParams) dispobjControl_Prologue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispobjControl_Epilogue_FNPTR(pDisplayApi) pDisplayApi->__nvoc_base_DisplayApi.__nvoc_metadata_ptr->vtable.__dispapiControl_Epilogue__ +#define dispobjControl_Epilogue(pDisplayApi, pCallContext, pRsParams) dispobjControl_Epilogue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispobjAccessCallback_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define dispobjAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispobjAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define dispobjShareCallback_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresShareCallback__ +#define dispobjShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) dispobjShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispobjGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define dispobjGetMemInterMapParams(pRmResource, pParams) dispobjGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispobjCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define dispobjCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispobjCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispobjGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define dispobjGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispobjGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispobjControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define dispobjControlSerialization_Prologue(pResource, pCallContext, pParams) dispobjControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispobjControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define dispobjControlSerialization_Epilogue(pResource, pCallContext, pParams) dispobjControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispobjCanCopy_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define dispobjCanCopy(pResource) dispobjCanCopy_DISPATCH(pResource) +#define dispobjIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define dispobjIsDuplicate(pResource, hMemory, pDuplicate) dispobjIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define dispobjPreDestruct_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define dispobjPreDestruct(pResource) dispobjPreDestruct_DISPATCH(pResource) +#define dispobjControlFilter_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define dispobjControlFilter(pResource, pCallContext, pParams) dispobjControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispobjMap_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMap__ +#define dispobjMap(pResource, pCallContext, pParams, pCpuMapping) dispobjMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define dispobjUnmap_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmap__ +#define dispobjUnmap(pResource, pCallContext, pCpuMapping) dispobjUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define dispobjIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define dispobjIsPartialUnmapSupported(pResource) dispobjIsPartialUnmapSupported_DISPATCH(pResource) +#define dispobjMapTo_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define dispobjMapTo(pResource, pParams) dispobjMapTo_DISPATCH(pResource, pParams) +#define dispobjUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define dispobjUnmapFrom(pResource, pParams) dispobjUnmapFrom_DISPATCH(pResource, pParams) +#define dispobjGetRefCount_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define dispobjGetRefCount(pResource) dispobjGetRefCount_DISPATCH(pResource) +#define dispobjAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define dispobjAddAdditionalDependants(pClient, pResource, pReference) dispobjAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispobjGetNotificationListPtr_FNPTR(pNotifier) pNotifier->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationListPtr__ +#define dispobjGetNotificationListPtr(pNotifier) dispobjGetNotificationListPtr_DISPATCH(pNotifier) +#define dispobjGetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationShare__ +#define dispobjGetNotificationShare(pNotifier) dispobjGetNotificationShare_DISPATCH(pNotifier) +#define dispobjSetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifySetNotificationShare__ +#define dispobjSetNotificationShare(pNotifier, pNotifShare) dispobjSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define dispobjUnregisterEvent_FNPTR(pNotifier) pNotifier->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyUnregisterEvent__ +#define dispobjUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispobjUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define dispobjGetOrAllocNotifShare_FNPTR(pNotifier) pNotifier->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetOrAllocNotifShare__ +#define dispobjGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispobjGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) + +// Dispatch functions +static inline NV_STATUS dispobjCtrlCmdSetRmFreeFlags_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSetRmFreeFlags__(pDispObject, pParams); +} + +static inline NV_STATUS dispobjCtrlCmdIMPSetGetParameter_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS *pImpSetGetParams) { + return pDispObject->__dispobjCtrlCmdIMPSetGetParameter__(pDispObject, pImpSetGetParams); +} + +static inline NV_STATUS dispobjCtrlCmdGetRgStatus_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetRgStatus__(pDispObject, pParams); +} + +static inline NV_STATUS dispobjCtrlCmdGetRgUnderflowProp_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetRgUnderflowProp__(pDispObject, pParams); +} + +static inline NV_STATUS dispobjCtrlCmdSetRgUnderflowProp_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSetRgUnderflowProp__(pDispObject, pParams); +} + +static inline NV_STATUS dispobjCtrlCmdGetRgFliplockProp_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetRgFliplockProp__(pDispObject, pParams); +} + +static inline NV_STATUS dispobjCtrlCmdSetRgFliplockProp_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSetRgFliplockProp__(pDispObject, pParams); +} + +static inline NV_STATUS dispobjCtrlCmdGetRgConnectedLockpinStateless_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetRgConnectedLockpinStateless__(pDispObject, pParams); +} + +static inline NV_STATUS dispobjCtrlCmdGetRgScanLine_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetRgScanLine__(pDispObject, pParams); +} + +static inline NV_STATUS dispobjCtrlCmdGetSorOpMode_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetSorOpMode__(pDispObject, pParams); +} + +static inline NV_STATUS dispobjCtrlCmdSetSorOpMode_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSetSorOpMode__(pDispObject, pParams); +} + +static inline NV_STATUS dispobjCtrlCmdSetSorFlushMode_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSetSorFlushMode__(pDispObject, pParams); +} + +static inline NV_STATUS dispobjCtrlCmdSystemGetCapsV2_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS *pCapsParams) { + return pDispObject->__dispobjCtrlCmdSystemGetCapsV2__(pDispObject, pCapsParams); +} + +static inline NV_STATUS dispobjControl_DISPATCH(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pDisplayApi->__nvoc_metadata_ptr->vtable.__dispobjControl__(pDisplayApi, pCallContext, pParams); +} + +static inline NV_STATUS dispobjControl_Prologue_DISPATCH(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return pDisplayApi->__nvoc_metadata_ptr->vtable.__dispobjControl_Prologue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline void dispobjControl_Epilogue_DISPATCH(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + pDisplayApi->__nvoc_metadata_ptr->vtable.__dispobjControl_Epilogue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline NvBool dispobjAccessCallback_DISPATCH(struct DispObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__dispobjAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NvBool dispobjShareCallback_DISPATCH(struct DispObject *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__nvoc_metadata_ptr->vtable.__dispobjShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispobjGetMemInterMapParams_DISPATCH(struct DispObject *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispobjGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispobjCheckMemInterUnmap_DISPATCH(struct DispObject *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispobjCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS dispobjGetMemoryMappingDescriptor_DISPATCH(struct DispObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispobjGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispobjControlSerialization_Prologue_DISPATCH(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispobjControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void dispobjControlSerialization_Epilogue_DISPATCH(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__dispobjControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool dispobjCanCopy_DISPATCH(struct DispObject *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispobjCanCopy__(pResource); +} + +static inline NV_STATUS dispobjIsDuplicate_DISPATCH(struct DispObject *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__dispobjIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void dispobjPreDestruct_DISPATCH(struct DispObject *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__dispobjPreDestruct__(pResource); +} + +static inline NV_STATUS dispobjControlFilter_DISPATCH(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispobjControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispobjMap_DISPATCH(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__dispobjMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS dispobjUnmap_DISPATCH(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__dispobjUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NvBool dispobjIsPartialUnmapSupported_DISPATCH(struct DispObject *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispobjIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS dispobjMapTo_DISPATCH(struct DispObject *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispobjMapTo__(pResource, pParams); +} + +static inline NV_STATUS dispobjUnmapFrom_DISPATCH(struct DispObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispobjUnmapFrom__(pResource, pParams); +} + +static inline NvU32 dispobjGetRefCount_DISPATCH(struct DispObject *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispobjGetRefCount__(pResource); +} + +static inline void dispobjAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispObject *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__dispobjAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline PEVENTNOTIFICATION * dispobjGetNotificationListPtr_DISPATCH(struct DispObject *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispobjGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare * dispobjGetNotificationShare_DISPATCH(struct DispObject *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispobjGetNotificationShare__(pNotifier); +} + +static inline void dispobjSetNotificationShare_DISPATCH(struct DispObject *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__nvoc_metadata_ptr->vtable.__dispobjSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS dispobjUnregisterEvent_DISPATCH(struct DispObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispobjUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS dispobjGetOrAllocNotifShare_DISPATCH(struct DispObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispobjGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS dispobjConstructHal_IMPL(struct DispObject *pDispObject, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + + +#ifdef __nvoc_disp_objs_h_disabled +static inline NV_STATUS dispobjConstructHal(struct DispObject *pDispObject, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("DispObject was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_objs_h_disabled +#define dispobjConstructHal(pDispObject, pCallContext, pParams) dispobjConstructHal_IMPL(pDispObject, pCallContext, pParams) +#endif //__nvoc_disp_objs_h_disabled + +#define dispobjConstructHal_HAL(pDispObject, pCallContext, pParams) dispobjConstructHal(pDispObject, pCallContext, pParams) + +NV_STATUS dispobjCtrlCmdSetRmFreeFlags_IMPL(struct DispObject *pDispObject, NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS *pParams); + +NV_STATUS dispobjCtrlCmdIMPSetGetParameter_IMPL(struct DispObject *pDispObject, NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS *pImpSetGetParams); + +NV_STATUS dispobjCtrlCmdGetRgStatus_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS *pParams); + +NV_STATUS dispobjCtrlCmdGetRgUnderflowProp_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS *pParams); + +NV_STATUS dispobjCtrlCmdSetRgUnderflowProp_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS *pParams); + +NV_STATUS dispobjCtrlCmdGetRgFliplockProp_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS *pParams); + +NV_STATUS dispobjCtrlCmdSetRgFliplockProp_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS *pParams); + +NV_STATUS dispobjCtrlCmdGetRgConnectedLockpinStateless_IMPL(struct DispObject *pDispObject, NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS *pParams); + +NV_STATUS dispobjCtrlCmdGetRgScanLine_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS *pParams); + +NV_STATUS dispobjCtrlCmdGetSorOpMode_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS *pParams); + +NV_STATUS dispobjCtrlCmdSetSorOpMode_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS *pParams); + +NV_STATUS dispobjCtrlCmdSetSorFlushMode_IMPL(struct DispObject *pDispObject, NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS *pParams); + +NV_STATUS dispobjCtrlCmdSystemGetCapsV2_IMPL(struct DispObject *pDispObject, NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS *pCapsParams); + +NV_STATUS dispobjConstruct_IMPL(struct DispObject *arg_pDispObject, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_dispobjConstruct(arg_pDispObject, arg_pCallContext, arg_pParams) dispobjConstruct_IMPL(arg_pDispObject, arg_pCallContext, arg_pParams) +NV_STATUS dispobjGetByHandle_IMPL(struct RsClient *pClient, NvHandle hDispObject, struct DispObject **ppDispObject); + +#define dispobjGetByHandle(pClient, hDispObject, ppDispObject) dispobjGetByHandle_IMPL(pClient, hDispObject, ppDispObject) +NV_STATUS dispobjGetByDevice_IMPL(struct RsClient *pClient, struct Device *pDevice, struct DispObject **ppDispObject); + +#define dispobjGetByDevice(pClient, pDevice, ppDispObject) dispobjGetByDevice_IMPL(pClient, pDevice, ppDispObject) +void dispobjClearRmFreeFlags_IMPL(struct DispObject *pDispObject); + +#ifdef __nvoc_disp_objs_h_disabled +static inline void dispobjClearRmFreeFlags(struct DispObject *pDispObject) { + NV_ASSERT_FAILED_PRECOMP("DispObject was disabled!"); +} +#else //__nvoc_disp_objs_h_disabled +#define dispobjClearRmFreeFlags(pDispObject) dispobjClearRmFreeFlags_IMPL(pDispObject) +#endif //__nvoc_disp_objs_h_disabled + +NvBool dispobjGetRmFreeFlags_IMPL(struct DispObject *pDispObject); + +#ifdef __nvoc_disp_objs_h_disabled +static inline NvBool dispobjGetRmFreeFlags(struct DispObject *pDispObject) { + NV_ASSERT_FAILED_PRECOMP("DispObject was disabled!"); + return NV_FALSE; +} +#else //__nvoc_disp_objs_h_disabled +#define dispobjGetRmFreeFlags(pDispObject) dispobjGetRmFreeFlags_IMPL(pDispObject) +#endif //__nvoc_disp_objs_h_disabled + +#undef PRIVATE_FIELD + + +/*! + * RM internal class representing NvDisp's XXX_DISPLAY (C370, C570...etc). Parent for + * all other display resources (channels, etc). Allocated under a device or subdevice. + * + * Only one instance of this class is allowed per-GPU. Multi-instance restrictions + * are enforced by resource_list.h + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_DISP_OBJS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__NvDispApi; +struct NVOC_METADATA__DispObject; +struct NVOC_VTABLE__NvDispApi; + + +struct NvDispApi { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__NvDispApi *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct DispObject __nvoc_base_DispObject; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^5 + struct RsResource *__nvoc_pbase_RsResource; // res super^4 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^4 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^3 + struct INotifier *__nvoc_pbase_INotifier; // inotify super^4 + struct Notifier *__nvoc_pbase_Notifier; // notify super^3 + struct DisplayApi *__nvoc_pbase_DisplayApi; // dispapi super^2 + struct DispObject *__nvoc_pbase_DispObject; // dispobj super + struct NvDispApi *__nvoc_pbase_NvDispApi; // nvdispapi + + // Vtable with 8 per-object function pointers + NV_STATUS (*__nvdispapiCtrlCmdIdleChannel__)(struct NvDispApi * /*this*/, NVC370_CTRL_IDLE_CHANNEL_PARAMS *); // exported (id=0xc3700101) + NV_STATUS (*__nvdispapiCtrlCmdSetAccl__)(struct NvDispApi * /*this*/, NVC370_CTRL_SET_ACCL_PARAMS *); // exported (id=0xc3700102) + NV_STATUS (*__nvdispapiCtrlCmdGetAccl__)(struct NvDispApi * /*this*/, NVC370_CTRL_GET_ACCL_PARAMS *); // exported (id=0xc3700103) + NV_STATUS (*__nvdispapiCtrlCmdGetChannelInfo__)(struct NvDispApi * /*this*/, NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS *); // exported (id=0xc3700104) + NV_STATUS (*__nvdispapiCtrlCmdChannelCancelFlip__)(struct NvDispApi * /*this*/, NVC370_CTRL_CHANNEL_CANCEL_FLIP_PARAMS *); // exported (id=0xc3700105) + NV_STATUS (*__nvdispapiCtrlCmdSetSwaprdyGpioWar__)(struct NvDispApi * /*this*/, NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS *); // exported (id=0xc3700202) + NV_STATUS (*__nvdispapiCtrlCmdGetLockpinsCaps__)(struct NvDispApi * /*this*/, NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS *); // exported (id=0xc3700201) + NV_STATUS (*__nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides__)(struct NvDispApi * /*this*/, NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS *); // exported (id=0xc3700602) +}; + + +// Vtable with 26 per-class function pointers +struct NVOC_VTABLE__NvDispApi { + NV_STATUS (*__nvdispapiControl__)(struct NvDispApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (dispapi) base (dispobj) + NV_STATUS (*__nvdispapiControl_Prologue__)(struct NvDispApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (dispapi) base (dispobj) + void (*__nvdispapiControl_Epilogue__)(struct NvDispApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (dispapi) base (dispobj) + NvBool (*__nvdispapiAccessCallback__)(struct NvDispApi * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (dispobj) + NvBool (*__nvdispapiShareCallback__)(struct NvDispApi * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (rmres) base (dispobj) + NV_STATUS (*__nvdispapiGetMemInterMapParams__)(struct NvDispApi * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (dispobj) + NV_STATUS (*__nvdispapiCheckMemInterUnmap__)(struct NvDispApi * /*this*/, NvBool); // virtual inherited (rmres) base (dispobj) + NV_STATUS (*__nvdispapiGetMemoryMappingDescriptor__)(struct NvDispApi * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (dispobj) + NV_STATUS (*__nvdispapiControlSerialization_Prologue__)(struct NvDispApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (dispobj) + void (*__nvdispapiControlSerialization_Epilogue__)(struct NvDispApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (dispobj) + NvBool (*__nvdispapiCanCopy__)(struct NvDispApi * /*this*/); // virtual inherited (res) base (dispobj) + NV_STATUS (*__nvdispapiIsDuplicate__)(struct NvDispApi * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (dispobj) + void (*__nvdispapiPreDestruct__)(struct NvDispApi * /*this*/); // virtual inherited (res) base (dispobj) + NV_STATUS (*__nvdispapiControlFilter__)(struct NvDispApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (dispobj) + NV_STATUS (*__nvdispapiMap__)(struct NvDispApi * /*this*/, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); // virtual inherited (res) base (dispobj) + NV_STATUS (*__nvdispapiUnmap__)(struct NvDispApi * /*this*/, struct CALL_CONTEXT *, RsCpuMapping *); // virtual inherited (res) base (dispobj) + NvBool (*__nvdispapiIsPartialUnmapSupported__)(struct NvDispApi * /*this*/); // inline virtual inherited (res) base (dispobj) body + NV_STATUS (*__nvdispapiMapTo__)(struct NvDispApi * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (dispobj) + NV_STATUS (*__nvdispapiUnmapFrom__)(struct NvDispApi * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (dispobj) + NvU32 (*__nvdispapiGetRefCount__)(struct NvDispApi * /*this*/); // virtual inherited (res) base (dispobj) + void (*__nvdispapiAddAdditionalDependants__)(struct RsClient *, struct NvDispApi * /*this*/, RsResourceRef *); // virtual inherited (res) base (dispobj) + PEVENTNOTIFICATION * (*__nvdispapiGetNotificationListPtr__)(struct NvDispApi * /*this*/); // virtual inherited (notify) base (dispobj) + struct NotifShare * (*__nvdispapiGetNotificationShare__)(struct NvDispApi * /*this*/); // virtual inherited (notify) base (dispobj) + void (*__nvdispapiSetNotificationShare__)(struct NvDispApi * /*this*/, struct NotifShare *); // virtual inherited (notify) base (dispobj) + NV_STATUS (*__nvdispapiUnregisterEvent__)(struct NvDispApi * /*this*/, NvHandle, NvHandle, NvHandle, NvHandle); // virtual inherited (notify) base (dispobj) + NV_STATUS (*__nvdispapiGetOrAllocNotifShare__)(struct NvDispApi * /*this*/, NvHandle, NvHandle, struct NotifShare **); // virtual inherited (notify) base (dispobj) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__NvDispApi { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__DispObject metadata__DispObject; + const struct NVOC_VTABLE__NvDispApi vtable; +}; + +#ifndef __NVOC_CLASS_NvDispApi_TYPEDEF__ +#define __NVOC_CLASS_NvDispApi_TYPEDEF__ +typedef struct NvDispApi NvDispApi; +#endif /* __NVOC_CLASS_NvDispApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NvDispApi +#define __nvoc_class_id_NvDispApi 0x36aa0b +#endif /* __nvoc_class_id_NvDispApi */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_NvDispApi; + +#define __staticCast_NvDispApi(pThis) \ + ((pThis)->__nvoc_pbase_NvDispApi) + +#ifdef __nvoc_disp_objs_h_disabled +#define __dynamicCast_NvDispApi(pThis) ((NvDispApi*) NULL) +#else //__nvoc_disp_objs_h_disabled +#define __dynamicCast_NvDispApi(pThis) \ + ((NvDispApi*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(NvDispApi))) +#endif //__nvoc_disp_objs_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_NvDispApi(NvDispApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_NvDispApi(NvDispApi**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_NvDispApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_NvDispApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define nvdispapiCtrlCmdIdleChannel_FNPTR(pNvDispApi) pNvDispApi->__nvdispapiCtrlCmdIdleChannel__ +#define nvdispapiCtrlCmdIdleChannel(pNvDispApi, pParams) nvdispapiCtrlCmdIdleChannel_DISPATCH(pNvDispApi, pParams) +#define nvdispapiCtrlCmdSetAccl_FNPTR(pNvDispApi) pNvDispApi->__nvdispapiCtrlCmdSetAccl__ +#define nvdispapiCtrlCmdSetAccl(pNvDispApi, pParams) nvdispapiCtrlCmdSetAccl_DISPATCH(pNvDispApi, pParams) +#define nvdispapiCtrlCmdGetAccl_FNPTR(pNvDispApi) pNvDispApi->__nvdispapiCtrlCmdGetAccl__ +#define nvdispapiCtrlCmdGetAccl(pNvDispApi, pParams) nvdispapiCtrlCmdGetAccl_DISPATCH(pNvDispApi, pParams) +#define nvdispapiCtrlCmdGetChannelInfo_FNPTR(pNvDispApi) pNvDispApi->__nvdispapiCtrlCmdGetChannelInfo__ +#define nvdispapiCtrlCmdGetChannelInfo(pNvDispApi, pParams) nvdispapiCtrlCmdGetChannelInfo_DISPATCH(pNvDispApi, pParams) +#define nvdispapiCtrlCmdChannelCancelFlip_FNPTR(pNvDispApi) pNvDispApi->__nvdispapiCtrlCmdChannelCancelFlip__ +#define nvdispapiCtrlCmdChannelCancelFlip(pNvDispApi, pParams) nvdispapiCtrlCmdChannelCancelFlip_DISPATCH(pNvDispApi, pParams) +#define nvdispapiCtrlCmdSetSwaprdyGpioWar_FNPTR(pNvDispApi) pNvDispApi->__nvdispapiCtrlCmdSetSwaprdyGpioWar__ +#define nvdispapiCtrlCmdSetSwaprdyGpioWar(pNvDispApi, pParams) nvdispapiCtrlCmdSetSwaprdyGpioWar_DISPATCH(pNvDispApi, pParams) +#define nvdispapiCtrlCmdGetLockpinsCaps_FNPTR(pNvDispApi) pNvDispApi->__nvdispapiCtrlCmdGetLockpinsCaps__ +#define nvdispapiCtrlCmdGetLockpinsCaps(pNvDispApi, pParams) nvdispapiCtrlCmdGetLockpinsCaps_DISPATCH(pNvDispApi, pParams) +#define nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides_FNPTR(pNvDispApi) pNvDispApi->__nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides__ +#define nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides(pNvDispApi, pParams) nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides_DISPATCH(pNvDispApi, pParams) +#define nvdispapiControl_FNPTR(pDisplayApi) pDisplayApi->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_metadata_ptr->vtable.__dispapiControl__ +#define nvdispapiControl(pDisplayApi, pCallContext, pParams) nvdispapiControl_DISPATCH(pDisplayApi, pCallContext, pParams) +#define nvdispapiControl_Prologue_FNPTR(pDisplayApi) pDisplayApi->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_metadata_ptr->vtable.__dispapiControl_Prologue__ +#define nvdispapiControl_Prologue(pDisplayApi, pCallContext, pRsParams) nvdispapiControl_Prologue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define nvdispapiControl_Epilogue_FNPTR(pDisplayApi) pDisplayApi->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_metadata_ptr->vtable.__dispapiControl_Epilogue__ +#define nvdispapiControl_Epilogue(pDisplayApi, pCallContext, pRsParams) nvdispapiControl_Epilogue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define nvdispapiAccessCallback_FNPTR(pResource) pResource->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define nvdispapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) nvdispapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define nvdispapiShareCallback_FNPTR(pResource) pResource->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresShareCallback__ +#define nvdispapiShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) nvdispapiShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define nvdispapiGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define nvdispapiGetMemInterMapParams(pRmResource, pParams) nvdispapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define nvdispapiCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define nvdispapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) nvdispapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define nvdispapiGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define nvdispapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) nvdispapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define nvdispapiControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define nvdispapiControlSerialization_Prologue(pResource, pCallContext, pParams) nvdispapiControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define nvdispapiControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define nvdispapiControlSerialization_Epilogue(pResource, pCallContext, pParams) nvdispapiControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define nvdispapiCanCopy_FNPTR(pResource) pResource->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define nvdispapiCanCopy(pResource) nvdispapiCanCopy_DISPATCH(pResource) +#define nvdispapiIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define nvdispapiIsDuplicate(pResource, hMemory, pDuplicate) nvdispapiIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define nvdispapiPreDestruct_FNPTR(pResource) pResource->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define nvdispapiPreDestruct(pResource) nvdispapiPreDestruct_DISPATCH(pResource) +#define nvdispapiControlFilter_FNPTR(pResource) pResource->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define nvdispapiControlFilter(pResource, pCallContext, pParams) nvdispapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define nvdispapiMap_FNPTR(pResource) pResource->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMap__ +#define nvdispapiMap(pResource, pCallContext, pParams, pCpuMapping) nvdispapiMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define nvdispapiUnmap_FNPTR(pResource) pResource->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmap__ +#define nvdispapiUnmap(pResource, pCallContext, pCpuMapping) nvdispapiUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define nvdispapiIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define nvdispapiIsPartialUnmapSupported(pResource) nvdispapiIsPartialUnmapSupported_DISPATCH(pResource) +#define nvdispapiMapTo_FNPTR(pResource) pResource->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define nvdispapiMapTo(pResource, pParams) nvdispapiMapTo_DISPATCH(pResource, pParams) +#define nvdispapiUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define nvdispapiUnmapFrom(pResource, pParams) nvdispapiUnmapFrom_DISPATCH(pResource, pParams) +#define nvdispapiGetRefCount_FNPTR(pResource) pResource->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define nvdispapiGetRefCount(pResource) nvdispapiGetRefCount_DISPATCH(pResource) +#define nvdispapiAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define nvdispapiAddAdditionalDependants(pClient, pResource, pReference) nvdispapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define nvdispapiGetNotificationListPtr_FNPTR(pNotifier) pNotifier->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationListPtr__ +#define nvdispapiGetNotificationListPtr(pNotifier) nvdispapiGetNotificationListPtr_DISPATCH(pNotifier) +#define nvdispapiGetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationShare__ +#define nvdispapiGetNotificationShare(pNotifier) nvdispapiGetNotificationShare_DISPATCH(pNotifier) +#define nvdispapiSetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifySetNotificationShare__ +#define nvdispapiSetNotificationShare(pNotifier, pNotifShare) nvdispapiSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define nvdispapiUnregisterEvent_FNPTR(pNotifier) pNotifier->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyUnregisterEvent__ +#define nvdispapiUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) nvdispapiUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define nvdispapiGetOrAllocNotifShare_FNPTR(pNotifier) pNotifier->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetOrAllocNotifShare__ +#define nvdispapiGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) nvdispapiGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) + +// Dispatch functions +static inline NV_STATUS nvdispapiCtrlCmdIdleChannel_DISPATCH(struct NvDispApi *pNvDispApi, NVC370_CTRL_IDLE_CHANNEL_PARAMS *pParams) { + return pNvDispApi->__nvdispapiCtrlCmdIdleChannel__(pNvDispApi, pParams); +} + +static inline NV_STATUS nvdispapiCtrlCmdSetAccl_DISPATCH(struct NvDispApi *pNvDispApi, NVC370_CTRL_SET_ACCL_PARAMS *pParams) { + return pNvDispApi->__nvdispapiCtrlCmdSetAccl__(pNvDispApi, pParams); +} + +static inline NV_STATUS nvdispapiCtrlCmdGetAccl_DISPATCH(struct NvDispApi *pNvDispApi, NVC370_CTRL_GET_ACCL_PARAMS *pParams) { + return pNvDispApi->__nvdispapiCtrlCmdGetAccl__(pNvDispApi, pParams); +} + +static inline NV_STATUS nvdispapiCtrlCmdGetChannelInfo_DISPATCH(struct NvDispApi *pNvDispApi, NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS *pParams) { + return pNvDispApi->__nvdispapiCtrlCmdGetChannelInfo__(pNvDispApi, pParams); +} + +static inline NV_STATUS nvdispapiCtrlCmdChannelCancelFlip_DISPATCH(struct NvDispApi *pNvDispApi, NVC370_CTRL_CHANNEL_CANCEL_FLIP_PARAMS *pParams) { + return pNvDispApi->__nvdispapiCtrlCmdChannelCancelFlip__(pNvDispApi, pParams); +} + +static inline NV_STATUS nvdispapiCtrlCmdSetSwaprdyGpioWar_DISPATCH(struct NvDispApi *pNvDispApi, NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS *pParams) { + return pNvDispApi->__nvdispapiCtrlCmdSetSwaprdyGpioWar__(pNvDispApi, pParams); +} + +static inline NV_STATUS nvdispapiCtrlCmdGetLockpinsCaps_DISPATCH(struct NvDispApi *pNvDispApi, NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS *pParams) { + return pNvDispApi->__nvdispapiCtrlCmdGetLockpinsCaps__(pNvDispApi, pParams); +} + +static inline NV_STATUS nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides_DISPATCH(struct NvDispApi *pNvDispApi, NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS *pParams) { + return pNvDispApi->__nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides__(pNvDispApi, pParams); +} + +static inline NV_STATUS nvdispapiControl_DISPATCH(struct NvDispApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pDisplayApi->__nvoc_metadata_ptr->vtable.__nvdispapiControl__(pDisplayApi, pCallContext, pParams); +} + +static inline NV_STATUS nvdispapiControl_Prologue_DISPATCH(struct NvDispApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return pDisplayApi->__nvoc_metadata_ptr->vtable.__nvdispapiControl_Prologue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline void nvdispapiControl_Epilogue_DISPATCH(struct NvDispApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + pDisplayApi->__nvoc_metadata_ptr->vtable.__nvdispapiControl_Epilogue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline NvBool nvdispapiAccessCallback_DISPATCH(struct NvDispApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__nvdispapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NvBool nvdispapiShareCallback_DISPATCH(struct NvDispApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__nvoc_metadata_ptr->vtable.__nvdispapiShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS nvdispapiGetMemInterMapParams_DISPATCH(struct NvDispApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__nvdispapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS nvdispapiCheckMemInterUnmap_DISPATCH(struct NvDispApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__nvdispapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS nvdispapiGetMemoryMappingDescriptor_DISPATCH(struct NvDispApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__nvdispapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS nvdispapiControlSerialization_Prologue_DISPATCH(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__nvdispapiControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void nvdispapiControlSerialization_Epilogue_DISPATCH(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__nvdispapiControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool nvdispapiCanCopy_DISPATCH(struct NvDispApi *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__nvdispapiCanCopy__(pResource); +} + +static inline NV_STATUS nvdispapiIsDuplicate_DISPATCH(struct NvDispApi *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__nvdispapiIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void nvdispapiPreDestruct_DISPATCH(struct NvDispApi *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__nvdispapiPreDestruct__(pResource); +} + +static inline NV_STATUS nvdispapiControlFilter_DISPATCH(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__nvdispapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS nvdispapiMap_DISPATCH(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__nvdispapiMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS nvdispapiUnmap_DISPATCH(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__nvdispapiUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NvBool nvdispapiIsPartialUnmapSupported_DISPATCH(struct NvDispApi *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__nvdispapiIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS nvdispapiMapTo_DISPATCH(struct NvDispApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__nvdispapiMapTo__(pResource, pParams); +} + +static inline NV_STATUS nvdispapiUnmapFrom_DISPATCH(struct NvDispApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__nvdispapiUnmapFrom__(pResource, pParams); +} + +static inline NvU32 nvdispapiGetRefCount_DISPATCH(struct NvDispApi *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__nvdispapiGetRefCount__(pResource); +} + +static inline void nvdispapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct NvDispApi *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__nvdispapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline PEVENTNOTIFICATION * nvdispapiGetNotificationListPtr_DISPATCH(struct NvDispApi *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__nvdispapiGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare * nvdispapiGetNotificationShare_DISPATCH(struct NvDispApi *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__nvdispapiGetNotificationShare__(pNotifier); +} + +static inline void nvdispapiSetNotificationShare_DISPATCH(struct NvDispApi *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__nvoc_metadata_ptr->vtable.__nvdispapiSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS nvdispapiUnregisterEvent_DISPATCH(struct NvDispApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__nvoc_metadata_ptr->vtable.__nvdispapiUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS nvdispapiGetOrAllocNotifShare_DISPATCH(struct NvDispApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__nvoc_metadata_ptr->vtable.__nvdispapiGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS nvdispapiCtrlCmdIdleChannel_IMPL(struct NvDispApi *pNvDispApi, NVC370_CTRL_IDLE_CHANNEL_PARAMS *pParams); + +NV_STATUS nvdispapiCtrlCmdSetAccl_IMPL(struct NvDispApi *pNvDispApi, NVC370_CTRL_SET_ACCL_PARAMS *pParams); + +NV_STATUS nvdispapiCtrlCmdGetAccl_IMPL(struct NvDispApi *pNvDispApi, NVC370_CTRL_GET_ACCL_PARAMS *pParams); + +NV_STATUS nvdispapiCtrlCmdGetChannelInfo_IMPL(struct NvDispApi *pNvDispApi, NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS *pParams); + +NV_STATUS nvdispapiCtrlCmdChannelCancelFlip_IMPL(struct NvDispApi *pNvDispApi, NVC370_CTRL_CHANNEL_CANCEL_FLIP_PARAMS *pParams); + +NV_STATUS nvdispapiCtrlCmdSetSwaprdyGpioWar_IMPL(struct NvDispApi *pNvDispApi, NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS *pParams); + +NV_STATUS nvdispapiCtrlCmdGetLockpinsCaps_IMPL(struct NvDispApi *pNvDispApi, NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS *pParams); + +NV_STATUS nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides_IMPL(struct NvDispApi *pNvDispApi, NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS *pParams); + +NV_STATUS nvdispapiConstruct_IMPL(struct NvDispApi *arg_pNvdispApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_nvdispapiConstruct(arg_pNvdispApi, arg_pCallContext, arg_pParams) nvdispapiConstruct_IMPL(arg_pNvdispApi, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +/*! + * RM internal class representing XXX_DISPLAY_SW + * + * With NvDisplay, we have divided classes into HW & SW classes. HW class provides + * interface for register/methods. SW class provides rmctrls. Clients can use + * multiple SW classes on a chip, but only one HW class. NVC372_DISPLAY_SW is SW + * class of NvDisplay family chips. + * + * Multi-instance restrictions are enforced by resource_list.h + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_DISP_OBJS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__DispSwObj; +struct NVOC_METADATA__DisplayApi; +struct NVOC_VTABLE__DispSwObj; + + +struct DispSwObj { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__DispSwObj *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct DisplayApi __nvoc_base_DisplayApi; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^4 + struct RsResource *__nvoc_pbase_RsResource; // res super^3 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^3 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^2 + struct INotifier *__nvoc_pbase_INotifier; // inotify super^3 + struct Notifier *__nvoc_pbase_Notifier; // notify super^2 + struct DisplayApi *__nvoc_pbase_DisplayApi; // dispapi super + struct DispSwObj *__nvoc_pbase_DispSwObj; // dispswobj + + // Vtable with 4 per-object function pointers + NV_STATUS (*__dispswobjCtrlCmdIsModePossible__)(struct DispSwObj * /*this*/, NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS *); // exported (id=0xc3720101) + NV_STATUS (*__dispswobjCtrlCmdIsModePossibleOrSettings__)(struct DispSwObj * /*this*/, NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS *); // exported (id=0xc3720102) + NV_STATUS (*__dispswobjCtrlCmdVideoAdaptiveRefreshRate__)(struct DispSwObj * /*this*/, NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS *); // exported (id=0xc3720103) + NV_STATUS (*__dispswobjCtrlCmdGetActiveViewportPointIn__)(struct DispSwObj * /*this*/, NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS *); // exported (id=0xc3720104) +}; + + +// Vtable with 26 per-class function pointers +struct NVOC_VTABLE__DispSwObj { + NV_STATUS (*__dispswobjControl__)(struct DispSwObj * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (dispapi) base (dispapi) + NV_STATUS (*__dispswobjControl_Prologue__)(struct DispSwObj * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (dispapi) base (dispapi) + void (*__dispswobjControl_Epilogue__)(struct DispSwObj * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (dispapi) base (dispapi) + NvBool (*__dispswobjAccessCallback__)(struct DispSwObj * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (dispapi) + NvBool (*__dispswobjShareCallback__)(struct DispSwObj * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (rmres) base (dispapi) + NV_STATUS (*__dispswobjGetMemInterMapParams__)(struct DispSwObj * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (dispapi) + NV_STATUS (*__dispswobjCheckMemInterUnmap__)(struct DispSwObj * /*this*/, NvBool); // virtual inherited (rmres) base (dispapi) + NV_STATUS (*__dispswobjGetMemoryMappingDescriptor__)(struct DispSwObj * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (dispapi) + NV_STATUS (*__dispswobjControlSerialization_Prologue__)(struct DispSwObj * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (dispapi) + void (*__dispswobjControlSerialization_Epilogue__)(struct DispSwObj * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (dispapi) + NvBool (*__dispswobjCanCopy__)(struct DispSwObj * /*this*/); // virtual inherited (res) base (dispapi) + NV_STATUS (*__dispswobjIsDuplicate__)(struct DispSwObj * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (dispapi) + void (*__dispswobjPreDestruct__)(struct DispSwObj * /*this*/); // virtual inherited (res) base (dispapi) + NV_STATUS (*__dispswobjControlFilter__)(struct DispSwObj * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (dispapi) + NV_STATUS (*__dispswobjMap__)(struct DispSwObj * /*this*/, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); // virtual inherited (res) base (dispapi) + NV_STATUS (*__dispswobjUnmap__)(struct DispSwObj * /*this*/, struct CALL_CONTEXT *, RsCpuMapping *); // virtual inherited (res) base (dispapi) + NvBool (*__dispswobjIsPartialUnmapSupported__)(struct DispSwObj * /*this*/); // inline virtual inherited (res) base (dispapi) body + NV_STATUS (*__dispswobjMapTo__)(struct DispSwObj * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (dispapi) + NV_STATUS (*__dispswobjUnmapFrom__)(struct DispSwObj * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (dispapi) + NvU32 (*__dispswobjGetRefCount__)(struct DispSwObj * /*this*/); // virtual inherited (res) base (dispapi) + void (*__dispswobjAddAdditionalDependants__)(struct RsClient *, struct DispSwObj * /*this*/, RsResourceRef *); // virtual inherited (res) base (dispapi) + PEVENTNOTIFICATION * (*__dispswobjGetNotificationListPtr__)(struct DispSwObj * /*this*/); // virtual inherited (notify) base (dispapi) + struct NotifShare * (*__dispswobjGetNotificationShare__)(struct DispSwObj * /*this*/); // virtual inherited (notify) base (dispapi) + void (*__dispswobjSetNotificationShare__)(struct DispSwObj * /*this*/, struct NotifShare *); // virtual inherited (notify) base (dispapi) + NV_STATUS (*__dispswobjUnregisterEvent__)(struct DispSwObj * /*this*/, NvHandle, NvHandle, NvHandle, NvHandle); // virtual inherited (notify) base (dispapi) + NV_STATUS (*__dispswobjGetOrAllocNotifShare__)(struct DispSwObj * /*this*/, NvHandle, NvHandle, struct NotifShare **); // virtual inherited (notify) base (dispapi) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__DispSwObj { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__DisplayApi metadata__DisplayApi; + const struct NVOC_VTABLE__DispSwObj vtable; +}; + +#ifndef __NVOC_CLASS_DispSwObj_TYPEDEF__ +#define __NVOC_CLASS_DispSwObj_TYPEDEF__ +typedef struct DispSwObj DispSwObj; +#endif /* __NVOC_CLASS_DispSwObj_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispSwObj +#define __nvoc_class_id_DispSwObj 0x6aa5e2 +#endif /* __nvoc_class_id_DispSwObj */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispSwObj; + +#define __staticCast_DispSwObj(pThis) \ + ((pThis)->__nvoc_pbase_DispSwObj) + +#ifdef __nvoc_disp_objs_h_disabled +#define __dynamicCast_DispSwObj(pThis) ((DispSwObj*) NULL) +#else //__nvoc_disp_objs_h_disabled +#define __dynamicCast_DispSwObj(pThis) \ + ((DispSwObj*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispSwObj))) +#endif //__nvoc_disp_objs_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_DispSwObj(DispSwObj**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispSwObj(DispSwObj**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_DispSwObj(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DispSwObj((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define dispswobjCtrlCmdIsModePossible_FNPTR(pDispSwObj) pDispSwObj->__dispswobjCtrlCmdIsModePossible__ +#define dispswobjCtrlCmdIsModePossible(pDispSwObj, pParams) dispswobjCtrlCmdIsModePossible_DISPATCH(pDispSwObj, pParams) +#define dispswobjCtrlCmdIsModePossibleOrSettings_FNPTR(pDispSwObj) pDispSwObj->__dispswobjCtrlCmdIsModePossibleOrSettings__ +#define dispswobjCtrlCmdIsModePossibleOrSettings(pDispSwObj, pParams) dispswobjCtrlCmdIsModePossibleOrSettings_DISPATCH(pDispSwObj, pParams) +#define dispswobjCtrlCmdVideoAdaptiveRefreshRate_FNPTR(pDispSwObj) pDispSwObj->__dispswobjCtrlCmdVideoAdaptiveRefreshRate__ +#define dispswobjCtrlCmdVideoAdaptiveRefreshRate(pDispSwObj, pParams) dispswobjCtrlCmdVideoAdaptiveRefreshRate_DISPATCH(pDispSwObj, pParams) +#define dispswobjCtrlCmdGetActiveViewportPointIn_FNPTR(pDispSwObj) pDispSwObj->__dispswobjCtrlCmdGetActiveViewportPointIn__ +#define dispswobjCtrlCmdGetActiveViewportPointIn(pDispSwObj, pParams) dispswobjCtrlCmdGetActiveViewportPointIn_DISPATCH(pDispSwObj, pParams) +#define dispswobjControl_FNPTR(pDisplayApi) pDisplayApi->__nvoc_base_DisplayApi.__nvoc_metadata_ptr->vtable.__dispapiControl__ +#define dispswobjControl(pDisplayApi, pCallContext, pParams) dispswobjControl_DISPATCH(pDisplayApi, pCallContext, pParams) +#define dispswobjControl_Prologue_FNPTR(pDisplayApi) pDisplayApi->__nvoc_base_DisplayApi.__nvoc_metadata_ptr->vtable.__dispapiControl_Prologue__ +#define dispswobjControl_Prologue(pDisplayApi, pCallContext, pRsParams) dispswobjControl_Prologue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispswobjControl_Epilogue_FNPTR(pDisplayApi) pDisplayApi->__nvoc_base_DisplayApi.__nvoc_metadata_ptr->vtable.__dispapiControl_Epilogue__ +#define dispswobjControl_Epilogue(pDisplayApi, pCallContext, pRsParams) dispswobjControl_Epilogue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispswobjAccessCallback_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define dispswobjAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispswobjAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define dispswobjShareCallback_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresShareCallback__ +#define dispswobjShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) dispswobjShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispswobjGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define dispswobjGetMemInterMapParams(pRmResource, pParams) dispswobjGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispswobjCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define dispswobjCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispswobjCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispswobjGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define dispswobjGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispswobjGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispswobjControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define dispswobjControlSerialization_Prologue(pResource, pCallContext, pParams) dispswobjControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispswobjControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define dispswobjControlSerialization_Epilogue(pResource, pCallContext, pParams) dispswobjControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispswobjCanCopy_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define dispswobjCanCopy(pResource) dispswobjCanCopy_DISPATCH(pResource) +#define dispswobjIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define dispswobjIsDuplicate(pResource, hMemory, pDuplicate) dispswobjIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define dispswobjPreDestruct_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define dispswobjPreDestruct(pResource) dispswobjPreDestruct_DISPATCH(pResource) +#define dispswobjControlFilter_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define dispswobjControlFilter(pResource, pCallContext, pParams) dispswobjControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispswobjMap_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMap__ +#define dispswobjMap(pResource, pCallContext, pParams, pCpuMapping) dispswobjMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define dispswobjUnmap_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmap__ +#define dispswobjUnmap(pResource, pCallContext, pCpuMapping) dispswobjUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define dispswobjIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define dispswobjIsPartialUnmapSupported(pResource) dispswobjIsPartialUnmapSupported_DISPATCH(pResource) +#define dispswobjMapTo_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define dispswobjMapTo(pResource, pParams) dispswobjMapTo_DISPATCH(pResource, pParams) +#define dispswobjUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define dispswobjUnmapFrom(pResource, pParams) dispswobjUnmapFrom_DISPATCH(pResource, pParams) +#define dispswobjGetRefCount_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define dispswobjGetRefCount(pResource) dispswobjGetRefCount_DISPATCH(pResource) +#define dispswobjAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define dispswobjAddAdditionalDependants(pClient, pResource, pReference) dispswobjAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispswobjGetNotificationListPtr_FNPTR(pNotifier) pNotifier->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationListPtr__ +#define dispswobjGetNotificationListPtr(pNotifier) dispswobjGetNotificationListPtr_DISPATCH(pNotifier) +#define dispswobjGetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationShare__ +#define dispswobjGetNotificationShare(pNotifier) dispswobjGetNotificationShare_DISPATCH(pNotifier) +#define dispswobjSetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifySetNotificationShare__ +#define dispswobjSetNotificationShare(pNotifier, pNotifShare) dispswobjSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define dispswobjUnregisterEvent_FNPTR(pNotifier) pNotifier->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyUnregisterEvent__ +#define dispswobjUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispswobjUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define dispswobjGetOrAllocNotifShare_FNPTR(pNotifier) pNotifier->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetOrAllocNotifShare__ +#define dispswobjGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispswobjGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) + +// Dispatch functions +static inline NV_STATUS dispswobjCtrlCmdIsModePossible_DISPATCH(struct DispSwObj *pDispSwObj, NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS *pParams) { + return pDispSwObj->__dispswobjCtrlCmdIsModePossible__(pDispSwObj, pParams); +} + +static inline NV_STATUS dispswobjCtrlCmdIsModePossibleOrSettings_DISPATCH(struct DispSwObj *pDispSwObj, NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS *pParams) { + return pDispSwObj->__dispswobjCtrlCmdIsModePossibleOrSettings__(pDispSwObj, pParams); +} + +static inline NV_STATUS dispswobjCtrlCmdVideoAdaptiveRefreshRate_DISPATCH(struct DispSwObj *pDispSwObj, NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS *pParams) { + return pDispSwObj->__dispswobjCtrlCmdVideoAdaptiveRefreshRate__(pDispSwObj, pParams); +} + +static inline NV_STATUS dispswobjCtrlCmdGetActiveViewportPointIn_DISPATCH(struct DispSwObj *pDispSwObj, NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS *pParams) { + return pDispSwObj->__dispswobjCtrlCmdGetActiveViewportPointIn__(pDispSwObj, pParams); +} + +static inline NV_STATUS dispswobjControl_DISPATCH(struct DispSwObj *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pDisplayApi->__nvoc_metadata_ptr->vtable.__dispswobjControl__(pDisplayApi, pCallContext, pParams); +} + +static inline NV_STATUS dispswobjControl_Prologue_DISPATCH(struct DispSwObj *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return pDisplayApi->__nvoc_metadata_ptr->vtable.__dispswobjControl_Prologue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline void dispswobjControl_Epilogue_DISPATCH(struct DispSwObj *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + pDisplayApi->__nvoc_metadata_ptr->vtable.__dispswobjControl_Epilogue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline NvBool dispswobjAccessCallback_DISPATCH(struct DispSwObj *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__dispswobjAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NvBool dispswobjShareCallback_DISPATCH(struct DispSwObj *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__nvoc_metadata_ptr->vtable.__dispswobjShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispswobjGetMemInterMapParams_DISPATCH(struct DispSwObj *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispswobjGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispswobjCheckMemInterUnmap_DISPATCH(struct DispSwObj *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispswobjCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS dispswobjGetMemoryMappingDescriptor_DISPATCH(struct DispSwObj *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispswobjGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispswobjControlSerialization_Prologue_DISPATCH(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispswobjControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void dispswobjControlSerialization_Epilogue_DISPATCH(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__dispswobjControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool dispswobjCanCopy_DISPATCH(struct DispSwObj *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispswobjCanCopy__(pResource); +} + +static inline NV_STATUS dispswobjIsDuplicate_DISPATCH(struct DispSwObj *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__dispswobjIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void dispswobjPreDestruct_DISPATCH(struct DispSwObj *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__dispswobjPreDestruct__(pResource); +} + +static inline NV_STATUS dispswobjControlFilter_DISPATCH(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispswobjControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispswobjMap_DISPATCH(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__dispswobjMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS dispswobjUnmap_DISPATCH(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__dispswobjUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NvBool dispswobjIsPartialUnmapSupported_DISPATCH(struct DispSwObj *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispswobjIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS dispswobjMapTo_DISPATCH(struct DispSwObj *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispswobjMapTo__(pResource, pParams); +} + +static inline NV_STATUS dispswobjUnmapFrom_DISPATCH(struct DispSwObj *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispswobjUnmapFrom__(pResource, pParams); +} + +static inline NvU32 dispswobjGetRefCount_DISPATCH(struct DispSwObj *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispswobjGetRefCount__(pResource); +} + +static inline void dispswobjAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispSwObj *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__dispswobjAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline PEVENTNOTIFICATION * dispswobjGetNotificationListPtr_DISPATCH(struct DispSwObj *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispswobjGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare * dispswobjGetNotificationShare_DISPATCH(struct DispSwObj *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispswobjGetNotificationShare__(pNotifier); +} + +static inline void dispswobjSetNotificationShare_DISPATCH(struct DispSwObj *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__nvoc_metadata_ptr->vtable.__dispswobjSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS dispswobjUnregisterEvent_DISPATCH(struct DispSwObj *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispswobjUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS dispswobjGetOrAllocNotifShare_DISPATCH(struct DispSwObj *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispswobjGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS dispswobjCtrlCmdIsModePossible_IMPL(struct DispSwObj *pDispSwObj, NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS *pParams); + +NV_STATUS dispswobjCtrlCmdIsModePossibleOrSettings_IMPL(struct DispSwObj *pDispSwObj, NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS *pParams); + +NV_STATUS dispswobjCtrlCmdVideoAdaptiveRefreshRate_IMPL(struct DispSwObj *pDispSwObj, NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS *pParams); + +NV_STATUS dispswobjCtrlCmdGetActiveViewportPointIn_IMPL(struct DispSwObj *pDispSwObj, NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS *pParams); + +NV_STATUS dispswobjConstruct_IMPL(struct DispSwObj *arg_pDispSwObj, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_dispswobjConstruct(arg_pDispSwObj, arg_pCallContext, arg_pParams) dispswobjConstruct_IMPL(arg_pDispSwObj, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +/*! + * RM internal class representing XXX_DISPLAY_COMMON (class id: 0x0073) + * + * Only one instance of this class is allowed per-GPU. Multi-instance restrictions + * are enforced by resource_list.h + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_DISP_OBJS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__DispCommon; +struct NVOC_METADATA__DisplayApi; +struct NVOC_VTABLE__DispCommon; + + +struct DispCommon { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__DispCommon *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct DisplayApi __nvoc_base_DisplayApi; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^4 + struct RsResource *__nvoc_pbase_RsResource; // res super^3 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^3 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^2 + struct INotifier *__nvoc_pbase_INotifier; // inotify super^3 + struct Notifier *__nvoc_pbase_Notifier; // notify super^2 + struct DisplayApi *__nvoc_pbase_DisplayApi; // dispapi super + struct DispCommon *__nvoc_pbase_DispCommon; // dispcmn + + // Vtable with 112 per-object function pointers + NV_STATUS (*__dispcmnCtrlCmdSpecificGetHdcpState__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_GET_HDCP_STATE_PARAMS *); // exported (id=0x730280) + NV_STATUS (*__dispcmnCtrlCmdSpecificHdcpCtrl__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_HDCP_CTRL_PARAMS *); // exported (id=0x730282) + NV_STATUS (*__dispcmnCtrlCmdSpecificGetHdcpRepeaterInfo__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_GET_HDCP_REPEATER_INFO_PARAMS *); // exported (id=0x730260) + NV_STATUS (*__dispcmnCtrlCmdSpecificGetHdcpDiagnostics__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_GET_HDCP_DIAGNOSTICS_PARAMS *); // exported (id=0x730281) + NV_STATUS (*__dispcmnCtrlCmdSpecificHdcpKsvListValidate__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_HDCP_KSVLIST_VALIDATE_PARAMS *); // exported (id=0x73028d) + NV_STATUS (*__dispcmnCtrlCmdSpecificHdcpUpdate__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_HDCP_UPDATE_PARAMS *); // exported (id=0x73028e) + NV_STATUS (*__dispcmnCtrlCmdSystemValidateSrm__)(struct DispCommon * /*this*/, NV0073_CTRL_SYSTEM_VALIDATE_SRM_PARAMS *); // exported (id=0x730118) + NV_STATUS (*__dispcmnCtrlCmdSystemGetSrmStatus__)(struct DispCommon * /*this*/, NV0073_CTRL_SYSTEM_GET_SRM_STATUS_PARAMS *); // exported (id=0x730119) + NV_STATUS (*__dispcmnCtrlCmdSystemHdcpRevocationCheck__)(struct DispCommon * /*this*/, NV0073_CTRL_SYSTEM_HDCP_REVOCATE_PARAMS *); // exported (id=0x73011b) + NV_STATUS (*__dispcmnCtrlCmdSystemUpdateSrm__)(struct DispCommon * /*this*/, NV0073_CTRL_SYSTEM_UPDATE_SRM_PARAMS *); // exported (id=0x73011c) + NV_STATUS (*__dispcmnCtrlCmdSystemGetCapsV2__)(struct DispCommon * /*this*/, NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS *); // exported (id=0x730101) + NV_STATUS (*__dispcmnCtrlCmdSystemGetNumHeads__)(struct DispCommon * /*this*/, NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS *); // exported (id=0x730102) + NV_STATUS (*__dispcmnCtrlCmdSystemGetScanline__)(struct DispCommon * /*this*/, NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS *); // exported (id=0x730104) + NV_STATUS (*__dispcmnCtrlCmdSystemGetSuppported__)(struct DispCommon * /*this*/, NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *); // exported (id=0x730107) + NV_STATUS (*__dispcmnCtrlCmdSystemGetConnectState__)(struct DispCommon * /*this*/, NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *); // exported (id=0x730108) + NV_STATUS (*__dispcmnCtrlCmdSystemGetHotplugUnplugState__)(struct DispCommon * /*this*/, NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS *); // exported (id=0x73012d) + NV_STATUS (*__dispcmnCtrlCmdInternalGetHotplugUnplugState__)(struct DispCommon * /*this*/, NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS *); // exported (id=0x730401) + NV_STATUS (*__dispcmnCtrlCmdSystemGetHeadRoutingMap__)(struct DispCommon * /*this*/, NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS *); // exported (id=0x73010b) + NV_STATUS (*__dispcmnCtrlCmdSystemGetActive__)(struct DispCommon * /*this*/, NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *); // exported (id=0x73010c) + NV_STATUS (*__dispcmnCtrlCmdSystemGetBootDisplays__)(struct DispCommon * /*this*/, NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS *); // exported (id=0x73011e) + NV_STATUS (*__dispcmnCtrlCmdSystemQueryDisplayIdsWithMux__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS *); // exported (id=0x73013d) + NV_STATUS (*__dispcmnCtrlCmdSystemCheckSidebandI2cSupport__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS *); // exported (id=0x73014b) + NV_STATUS (*__dispcmnCtrlCmdSystemAllocateDisplayBandwidth__)(struct DispCommon * /*this*/, NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS *); // exported (id=0x730143) + NV_STATUS (*__dispcmnCtrlCmdSystemInternalAllocateDisplayBandwidth__)(struct DispCommon * /*this*/, NV0073_CTRL_SYSTEM_INTERNAL_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS *); // exported (id=0x730157) + NV_STATUS (*__dispcmnCtrlCmdSystemGetHotplugConfig__)(struct DispCommon * /*this*/, NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS *); // exported (id=0x730109) + NV_STATUS (*__dispcmnCtrlCmdSystemGetHotplugEventConfig__)(struct DispCommon * /*this*/, NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS *); // exported (id=0x730144) + NV_STATUS (*__dispcmnCtrlCmdSystemSetHotplugEventConfig__)(struct DispCommon * /*this*/, NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS *); // exported (id=0x730145) + NV_STATUS (*__dispcmnCtrlCmdSystemArmLightweightSupervisor__)(struct DispCommon * /*this*/, NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS *); // exported (id=0x73012f) + NV_STATUS (*__dispcmnCtrlCmdSystemSetRegionRamRectangles__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_SYSTEM_SET_REGION_RAM_RECTANGLES_PARAMS *); // exported (id=0x731177) + NV_STATUS (*__dispcmnCtrlCmdSystemConfigureSafetyInterrupts__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_SYSTEM_CONFIGURE_SAFETY_INTERRUPTS_PARAMS *); // exported (id=0x731178) + NV_STATUS (*__dispcmnCtrlCmdSystemConfigVrrPstateSwitch__)(struct DispCommon * /*this*/, NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS *); // exported (id=0x730134) + NV_STATUS (*__dispcmnCtrlCmdSpecificGetType__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS *); // exported (id=0x730240) + NV_STATUS (*__dispcmnCtrlCmdSpecificGetEdidV2__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *); // exported (id=0x730245) + NV_STATUS (*__dispcmnCtrlCmdSpecificSetEdidV2__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS *); // exported (id=0x730246) + NV_STATUS (*__dispcmnCtrlCmdSpecificFakeDevice__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS *); // exported (id=0x730243) + NV_STATUS (*__dispcmnCtrlCmdSpecificGetConnectorData__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS *); // exported (id=0x730250) + NV_STATUS (*__dispcmnCtrlCmdSpecificSetHdmiEnable__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS *); // exported (id=0x730273) + NV_STATUS (*__dispcmnCtrlCmdSpecificCtrlHdmi__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS *); // exported (id=0x730274) + NV_STATUS (*__dispcmnCtrlCmdSpecificGetAllHeadMask__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS *); // exported (id=0x730287) + NV_STATUS (*__dispcmnCtrlCmdSpecificSetOdPacket__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS *); // exported (id=0x730288) + NV_STATUS (*__dispcmnCtrlCmdSpecificAcquireSharedGenericPacket__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS *); // exported (id=0x7302aa) + NV_STATUS (*__dispcmnCtrlCmdSpecificSetSharedGenericPacket__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS *); // exported (id=0x7302a9) + NV_STATUS (*__dispcmnCtrlCmdSpecificReleaseSharedGenericPacket__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS *); // exported (id=0x7302ab) + NV_STATUS (*__dispcmnCtrlCmdSpecificSetOdPacketCtrl__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS *); // exported (id=0x730289) + NV_STATUS (*__dispcmnCtrlCmdSpecificOrGetInfo__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *); // exported (id=0x73028b) + NV_STATUS (*__dispcmnCtrlCmdSpecificGetPclkLimit__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS *); // exported (id=0x73028a) + NV_STATUS (*__dispcmnCtrlCmdSpecificSetHdmiSinkCaps__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS *); // exported (id=0x730293) + NV_STATUS (*__dispcmnCtrlCmdSpecificSetMonitorPower__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS *); // exported (id=0x730295) + NV_STATUS (*__dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS *); // exported (id=0x73029a) + NV_STATUS (*__dispcmnCtrlCmdSpecificApplyEdidOverrideV2__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS *); // exported (id=0x7302a1) + NV_STATUS (*__dispcmnCtrlCmdSpecificGetI2cPortid__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS *); // exported (id=0x730211) + NV_STATUS (*__dispcmnCtrlCmdSpecificGetHdmiGpuCaps__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS *); // exported (id=0x7302a2) + NV_STATUS (*__dispcmnCtrlCmdSpecificGetHdmiScdcData__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS *); // exported (id=0x7302a6) + NV_STATUS (*__dispcmnCtrlCmdSpecificIsDirectmodeDisplay__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS *); // exported (id=0x7302a7) + NV_STATUS (*__dispcmnCtrlCmdSpecificDefaultAdaptivesyncDisplay__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_DEFAULT_ADAPTIVESYNC_DISPLAY_PARAMS *); // exported (id=0x7302ae) + NV_STATUS (*__dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS *); // exported (id=0x7302a8) + NV_STATUS (*__dispcmnCtrlCmdSpecificDispI2cReadWrite__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_DISP_I2C_READ_WRITE_PARAMS *); // exported (id=0x7302ac) + NV_STATUS (*__dispcmnCtrlCmdSpecificGetValidHeadWindowAssignment__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_GET_VALID_HEAD_WINDOW_ASSIGNMENT_PARAMS *); // exported (id=0x7302ad) + NV_STATUS (*__dispcmnCtrlCmdSpecificSetHdmiAudioMutestream__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS *); // exported (id=0x730275) + NV_STATUS (*__dispcmnCtrlCmdSpecificDisplayChange__)(struct DispCommon * /*this*/, NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS *); // exported (id=0x7302a4) + NV_STATUS (*__dispcmnCtrlCmdDfpEdpDriverUnload__)(struct DispCommon * /*this*/, NV0073_CTRL_DFP_EDP_DRIVER_UNLOAD_PARAMS *); // exported (id=0x731176) + NV_STATUS (*__dispcmnCtrlCmdDfpSetForceBlackPixels__)(struct DispCommon * /*this*/, NV0073_CTRL_DFP_SET_FORCE_BLACK_PIXELS_PARAMS *); // exported (id=0x731179) + NV_STATUS (*__dispcmnCtrlCmdDfpGetInfo__)(struct DispCommon * /*this*/, NV0073_CTRL_DFP_GET_INFO_PARAMS *); // exported (id=0x731140) + NV_STATUS (*__dispcmnCtrlCmdDfpGetDisplayportDongleInfo__)(struct DispCommon * /*this*/, NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS *); // exported (id=0x731142) + NV_STATUS (*__dispcmnCtrlCmdDfpSetEldAudioCaps__)(struct DispCommon * /*this*/, NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *); // exported (id=0x731144) + NV_STATUS (*__dispcmnCtrlCmdDfpSetAudioEnable__)(struct DispCommon * /*this*/, NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS *); // exported (id=0x731150) + NV_STATUS (*__dispcmnCtrlCmdDfpUpdateDynamicDfpCache__)(struct DispCommon * /*this*/, NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS *); // exported (id=0x73114e) + NV_STATUS (*__dispcmnCtrlCmdDfpAssignSor__)(struct DispCommon * /*this*/, NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *); // exported (id=0x731152) + NV_STATUS (*__dispcmnCtrlCmdDfpDscCrcControl__)(struct DispCommon * /*this*/, NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS *); // exported (id=0x731157) + NV_STATUS (*__dispcmnCtrlCmdDfpInitMuxData__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS *); // exported (id=0x731158) + NV_STATUS (*__dispcmnCtrlCmdDfpGetDsiModeTiming__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS *); // exported (id=0x731166) + NV_STATUS (*__dispcmnCtrlCmdDfpConfigTwoHeadOneOr__)(struct DispCommon * /*this*/, NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS *); // exported (id=0x731156) + NV_STATUS (*__dispcmnCtrlCmdDfpGetPadlinkMask__)(struct DispCommon * /*this*/, NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS *); // exported (id=0x731153) + NV_STATUS (*__dispcmnCtrlCmdDfpGetFixedModeTiming__)(struct DispCommon * /*this*/, NV0073_CTRL_DFP_GET_FIXED_MODE_TIMING_PARAMS *); // exported (id=0x731172) + NV_STATUS (*__dispcmnCtrlCmdDpAuxchCtrl__)(struct DispCommon * /*this*/, NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *); // exported (id=0x731341) + NV_STATUS (*__dispcmnCtrlCmdDpCtrl__)(struct DispCommon * /*this*/, NV0073_CTRL_DP_CTRL_PARAMS *); // exported (id=0x731343) + NV_STATUS (*__dispcmnCtrlCmdDpGetLaneData__)(struct DispCommon * /*this*/, NV0073_CTRL_DP_LANE_DATA_PARAMS *); // exported (id=0x731345) + NV_STATUS (*__dispcmnCtrlCmdDpSetLaneData__)(struct DispCommon * /*this*/, NV0073_CTRL_DP_LANE_DATA_PARAMS *); // exported (id=0x731346) + NV_STATUS (*__dispcmnCtrlCmdDpGetTestpattern__)(struct DispCommon * /*this*/, NV0073_CTRL_DP_GET_TESTPATTERN_PARAMS *); // exported (id=0x731348) + NV_STATUS (*__dispcmnCtrlCmdDpSetTestpattern__)(struct DispCommon * /*this*/, NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS *); // exported (id=0x731347) + NV_STATUS (*__dispcmnCtrlCmdDpMainLinkCtrl__)(struct DispCommon * /*this*/, NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS *); // exported (id=0x731356) + NV_STATUS (*__dispcmnCtrlCmdDpSetAudioMuteStream__)(struct DispCommon * /*this*/, NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS *); // exported (id=0x731359) + NV_STATUS (*__dispcmnCtrlCmdDpGetLinkConfig__)(struct DispCommon * /*this*/, NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS *); // exported (id=0x731360) + NV_STATUS (*__dispcmnCtrlCmdDpGetEDPData__)(struct DispCommon * /*this*/, NV0073_CTRL_DP_GET_EDP_DATA_PARAMS *); // exported (id=0x731361) + NV_STATUS (*__dispcmnCtrlCmdDpTopologyAllocateDisplayId__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS *); // exported (id=0x73135b) + NV_STATUS (*__dispcmnCtrlCmdDpTopologyFreeDisplayId__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS *); // exported (id=0x73135c) + NV_STATUS (*__dispcmnCtrlCmdDpConfigStream__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *); // exported (id=0x731362) + NV_STATUS (*__dispcmnCtrlCmdDpConfigSingleHeadMultiStream__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS *); // exported (id=0x73136e) + NV_STATUS (*__dispcmnCtrlCmdDpSetRateGov__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS *); // exported (id=0x731363) + NV_STATUS (*__dispcmnCtrlCmdDpSendACT__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS *); // exported (id=0x731367) + NV_STATUS (*__dispcmnCtrlCmdDpSetManualDisplayPort__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS *); // exported (id=0x731365) + NV_STATUS (*__dispcmnCtrlCmdDpGetCaps__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *); // exported (id=0x731369) + NV_STATUS (*__dispcmnCtrlCmdDpSetMSAPropertiesv2__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_V2_PARAMS *); // exported (id=0x731381) + NV_STATUS (*__dispcmnCtrlCmdDpSetStereoMSAProperties__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS *); // exported (id=0x731378) + NV_STATUS (*__dispcmnCtrlCmdDpGenerateFakeInterrupt__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS *); // exported (id=0x73136b) + NV_STATUS (*__dispcmnCtrlCmdDpConfigRadScratchReg__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS *); // exported (id=0x73136c) + NV_STATUS (*__dispcmnCtrlCmdDpSetTriggerSelect__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS *); // exported (id=0x73136f) + NV_STATUS (*__dispcmnCtrlCmdDpSetTriggerAll__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS *); // exported (id=0x731370) + NV_STATUS (*__dispcmnCtrlCmdDpGetAuxLogData__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS *); // exported (id=0x731373) + NV_STATUS (*__dispcmnCtrlCmdDpConfigIndexedLinkRates__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *); // exported (id=0x731377) + NV_STATUS (*__dispcmnCtrlCmdDpConfigureFec__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS *); // exported (id=0x73137a) + NV_STATUS (*__dispcmnCtrlCmdDpGetGenericInfoframe__)(struct DispCommon * /*this*/, NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS *); // exported (id=0x73137e) + NV_STATUS (*__dispcmnCtrlCmdDpGetMsaAttributes__)(struct DispCommon * /*this*/, NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS *); // exported (id=0x73137f) + NV_STATUS (*__dispcmnCtrlCmdFrlConfigMacroPad__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_FRL_CONFIG_MACRO_PAD_PARAMS *); // exported (id=0x730502) + NV_STATUS (*__dispcmnCtrlCmdDpConfigMacroPad__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS *); // exported (id=0x73137b) + NV_STATUS (*__dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data__)(struct DispCommon * /*this*/, NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS *); // exported (id=0x731351) + NV_STATUS (*__dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data__)(struct DispCommon * /*this*/, NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS *); // exported (id=0x731352) + NV_STATUS (*__dispcmnCtrlCmdDpSetLevelInfoTableData__)(struct DispCommon * /*this*/, NV0073_CTRL_DP_SET_LEVEL_INFO_TABLE_DATA_PARAMS *); // exported (id=0x731387) + NV_STATUS (*__dispcmnCtrlCmdDpGetLevelInfoTableData__)(struct DispCommon * /*this*/, NV0073_CTRL_DP_GET_LEVEL_INFO_TABLE_DATA_PARAMS *); // exported (id=0x731388) + NV_STATUS (*__dispcmnCtrlCmdDpSetEcf__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_DP_SET_ECF_PARAMS *); // exported (id=0x731366) + NV_STATUS (*__dispcmnCtrlCmdDPGetCableIDInfoFromMacro__)(struct DispCommon * /*this*/, NV0073_CTRL_DP_USBC_CABLEID_INFO_PARAMS *); // exported (id=0x73138d) + NV_STATUS (*__dispcmnCtrlCmdSpecificGetRegionalCrcs__)(struct DispCommon * /*this*/, NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS_PARAMS *); // exported (id=0x7302a0) + + // Data members + NvU32 hotPlugMaskToBeReported; + NvU32 hotUnplugMaskToBeReported; +}; + + +// Vtable with 26 per-class function pointers +struct NVOC_VTABLE__DispCommon { + NV_STATUS (*__dispcmnControl__)(struct DispCommon * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (dispapi) base (dispapi) + NV_STATUS (*__dispcmnControl_Prologue__)(struct DispCommon * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (dispapi) base (dispapi) + void (*__dispcmnControl_Epilogue__)(struct DispCommon * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (dispapi) base (dispapi) + NvBool (*__dispcmnAccessCallback__)(struct DispCommon * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (dispapi) + NvBool (*__dispcmnShareCallback__)(struct DispCommon * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (rmres) base (dispapi) + NV_STATUS (*__dispcmnGetMemInterMapParams__)(struct DispCommon * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (dispapi) + NV_STATUS (*__dispcmnCheckMemInterUnmap__)(struct DispCommon * /*this*/, NvBool); // virtual inherited (rmres) base (dispapi) + NV_STATUS (*__dispcmnGetMemoryMappingDescriptor__)(struct DispCommon * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (dispapi) + NV_STATUS (*__dispcmnControlSerialization_Prologue__)(struct DispCommon * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (dispapi) + void (*__dispcmnControlSerialization_Epilogue__)(struct DispCommon * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (dispapi) + NvBool (*__dispcmnCanCopy__)(struct DispCommon * /*this*/); // virtual inherited (res) base (dispapi) + NV_STATUS (*__dispcmnIsDuplicate__)(struct DispCommon * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (dispapi) + void (*__dispcmnPreDestruct__)(struct DispCommon * /*this*/); // virtual inherited (res) base (dispapi) + NV_STATUS (*__dispcmnControlFilter__)(struct DispCommon * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (dispapi) + NV_STATUS (*__dispcmnMap__)(struct DispCommon * /*this*/, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); // virtual inherited (res) base (dispapi) + NV_STATUS (*__dispcmnUnmap__)(struct DispCommon * /*this*/, struct CALL_CONTEXT *, RsCpuMapping *); // virtual inherited (res) base (dispapi) + NvBool (*__dispcmnIsPartialUnmapSupported__)(struct DispCommon * /*this*/); // inline virtual inherited (res) base (dispapi) body + NV_STATUS (*__dispcmnMapTo__)(struct DispCommon * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (dispapi) + NV_STATUS (*__dispcmnUnmapFrom__)(struct DispCommon * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (dispapi) + NvU32 (*__dispcmnGetRefCount__)(struct DispCommon * /*this*/); // virtual inherited (res) base (dispapi) + void (*__dispcmnAddAdditionalDependants__)(struct RsClient *, struct DispCommon * /*this*/, RsResourceRef *); // virtual inherited (res) base (dispapi) + PEVENTNOTIFICATION * (*__dispcmnGetNotificationListPtr__)(struct DispCommon * /*this*/); // virtual inherited (notify) base (dispapi) + struct NotifShare * (*__dispcmnGetNotificationShare__)(struct DispCommon * /*this*/); // virtual inherited (notify) base (dispapi) + void (*__dispcmnSetNotificationShare__)(struct DispCommon * /*this*/, struct NotifShare *); // virtual inherited (notify) base (dispapi) + NV_STATUS (*__dispcmnUnregisterEvent__)(struct DispCommon * /*this*/, NvHandle, NvHandle, NvHandle, NvHandle); // virtual inherited (notify) base (dispapi) + NV_STATUS (*__dispcmnGetOrAllocNotifShare__)(struct DispCommon * /*this*/, NvHandle, NvHandle, struct NotifShare **); // virtual inherited (notify) base (dispapi) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__DispCommon { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__DisplayApi metadata__DisplayApi; + const struct NVOC_VTABLE__DispCommon vtable; +}; + +#ifndef __NVOC_CLASS_DispCommon_TYPEDEF__ +#define __NVOC_CLASS_DispCommon_TYPEDEF__ +typedef struct DispCommon DispCommon; +#endif /* __NVOC_CLASS_DispCommon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispCommon +#define __nvoc_class_id_DispCommon 0x41f4f2 +#endif /* __nvoc_class_id_DispCommon */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispCommon; + +#define __staticCast_DispCommon(pThis) \ + ((pThis)->__nvoc_pbase_DispCommon) + +#ifdef __nvoc_disp_objs_h_disabled +#define __dynamicCast_DispCommon(pThis) ((DispCommon*) NULL) +#else //__nvoc_disp_objs_h_disabled +#define __dynamicCast_DispCommon(pThis) \ + ((DispCommon*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispCommon))) +#endif //__nvoc_disp_objs_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_DispCommon(DispCommon**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispCommon(DispCommon**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_DispCommon(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DispCommon((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define dispcmnCtrlCmdSpecificGetHdcpState_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificGetHdcpState__ +#define dispcmnCtrlCmdSpecificGetHdcpState(pDispCommon, pParams) dispcmnCtrlCmdSpecificGetHdcpState_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificHdcpCtrl_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificHdcpCtrl__ +#define dispcmnCtrlCmdSpecificHdcpCtrl(pDispCommon, pParams) dispcmnCtrlCmdSpecificHdcpCtrl_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificGetHdcpRepeaterInfo_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificGetHdcpRepeaterInfo__ +#define dispcmnCtrlCmdSpecificGetHdcpRepeaterInfo(pDispCommon, pParams) dispcmnCtrlCmdSpecificGetHdcpRepeaterInfo_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificGetHdcpDiagnostics_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificGetHdcpDiagnostics__ +#define dispcmnCtrlCmdSpecificGetHdcpDiagnostics(pDispCommon, pParams) dispcmnCtrlCmdSpecificGetHdcpDiagnostics_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificHdcpKsvListValidate_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificHdcpKsvListValidate__ +#define dispcmnCtrlCmdSpecificHdcpKsvListValidate(pDispCommon, pKsvListValidateParams) dispcmnCtrlCmdSpecificHdcpKsvListValidate_DISPATCH(pDispCommon, pKsvListValidateParams) +#define dispcmnCtrlCmdSpecificHdcpUpdate_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificHdcpUpdate__ +#define dispcmnCtrlCmdSpecificHdcpUpdate(pDispCommon, pHdcpUpdateParams) dispcmnCtrlCmdSpecificHdcpUpdate_DISPATCH(pDispCommon, pHdcpUpdateParams) +#define dispcmnCtrlCmdSystemValidateSrm_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemValidateSrm__ +#define dispcmnCtrlCmdSystemValidateSrm(pDispCommon, pParams) dispcmnCtrlCmdSystemValidateSrm_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemGetSrmStatus_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemGetSrmStatus__ +#define dispcmnCtrlCmdSystemGetSrmStatus(pDispCommon, pParams) dispcmnCtrlCmdSystemGetSrmStatus_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemHdcpRevocationCheck_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemHdcpRevocationCheck__ +#define dispcmnCtrlCmdSystemHdcpRevocationCheck(pDispCommon, pParams) dispcmnCtrlCmdSystemHdcpRevocationCheck_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemUpdateSrm_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemUpdateSrm__ +#define dispcmnCtrlCmdSystemUpdateSrm(pDispCommon, pParams) dispcmnCtrlCmdSystemUpdateSrm_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemGetCapsV2_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemGetCapsV2__ +#define dispcmnCtrlCmdSystemGetCapsV2(pDispCommon, pCapsParams) dispcmnCtrlCmdSystemGetCapsV2_DISPATCH(pDispCommon, pCapsParams) +#define dispcmnCtrlCmdSystemGetNumHeads_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemGetNumHeads__ +#define dispcmnCtrlCmdSystemGetNumHeads(pDispCommon, pNumHeadsParams) dispcmnCtrlCmdSystemGetNumHeads_DISPATCH(pDispCommon, pNumHeadsParams) +#define dispcmnCtrlCmdSystemGetScanline_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemGetScanline__ +#define dispcmnCtrlCmdSystemGetScanline(pDispCommon, pScanlineParams) dispcmnCtrlCmdSystemGetScanline_DISPATCH(pDispCommon, pScanlineParams) +#define dispcmnCtrlCmdSystemGetSuppported_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemGetSuppported__ +#define dispcmnCtrlCmdSystemGetSuppported(pDispCommon, pSupportedParams) dispcmnCtrlCmdSystemGetSuppported_DISPATCH(pDispCommon, pSupportedParams) +#define dispcmnCtrlCmdSystemGetConnectState_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemGetConnectState__ +#define dispcmnCtrlCmdSystemGetConnectState(pDispCommon, pConnectParams) dispcmnCtrlCmdSystemGetConnectState_DISPATCH(pDispCommon, pConnectParams) +#define dispcmnCtrlCmdSystemGetHotplugUnplugState_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemGetHotplugUnplugState__ +#define dispcmnCtrlCmdSystemGetHotplugUnplugState(pDispCommon, pHotplugParams) dispcmnCtrlCmdSystemGetHotplugUnplugState_DISPATCH(pDispCommon, pHotplugParams) +#define dispcmnCtrlCmdInternalGetHotplugUnplugState_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdInternalGetHotplugUnplugState__ +#define dispcmnCtrlCmdInternalGetHotplugUnplugState(pDispCommon, pHotplugParams) dispcmnCtrlCmdInternalGetHotplugUnplugState_DISPATCH(pDispCommon, pHotplugParams) +#define dispcmnCtrlCmdSystemGetHeadRoutingMap_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemGetHeadRoutingMap__ +#define dispcmnCtrlCmdSystemGetHeadRoutingMap(pDispCommon, pMapParams) dispcmnCtrlCmdSystemGetHeadRoutingMap_DISPATCH(pDispCommon, pMapParams) +#define dispcmnCtrlCmdSystemGetActive_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemGetActive__ +#define dispcmnCtrlCmdSystemGetActive(pDispCommon, pActiveParams) dispcmnCtrlCmdSystemGetActive_DISPATCH(pDispCommon, pActiveParams) +#define dispcmnCtrlCmdSystemGetBootDisplays_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemGetBootDisplays__ +#define dispcmnCtrlCmdSystemGetBootDisplays(pDispCommon, pParams) dispcmnCtrlCmdSystemGetBootDisplays_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemQueryDisplayIdsWithMux_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemQueryDisplayIdsWithMux__ +#define dispcmnCtrlCmdSystemQueryDisplayIdsWithMux(pDispCommon, pParams) dispcmnCtrlCmdSystemQueryDisplayIdsWithMux_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemCheckSidebandI2cSupport_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemCheckSidebandI2cSupport__ +#define dispcmnCtrlCmdSystemCheckSidebandI2cSupport(pDispCommon, pParams) dispcmnCtrlCmdSystemCheckSidebandI2cSupport_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemAllocateDisplayBandwidth_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemAllocateDisplayBandwidth__ +#define dispcmnCtrlCmdSystemAllocateDisplayBandwidth(pDispCommon, pParams) dispcmnCtrlCmdSystemAllocateDisplayBandwidth_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemInternalAllocateDisplayBandwidth_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemInternalAllocateDisplayBandwidth__ +#define dispcmnCtrlCmdSystemInternalAllocateDisplayBandwidth(pDispCommon, pParams) dispcmnCtrlCmdSystemInternalAllocateDisplayBandwidth_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemGetHotplugConfig_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemGetHotplugConfig__ +#define dispcmnCtrlCmdSystemGetHotplugConfig(pDispCommon, pHotplugParams) dispcmnCtrlCmdSystemGetHotplugConfig_DISPATCH(pDispCommon, pHotplugParams) +#define dispcmnCtrlCmdSystemGetHotplugEventConfig_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemGetHotplugEventConfig__ +#define dispcmnCtrlCmdSystemGetHotplugEventConfig(pDispCommon, pParams) dispcmnCtrlCmdSystemGetHotplugEventConfig_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemSetHotplugEventConfig_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemSetHotplugEventConfig__ +#define dispcmnCtrlCmdSystemSetHotplugEventConfig(pDispCommon, pParams) dispcmnCtrlCmdSystemSetHotplugEventConfig_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemArmLightweightSupervisor_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemArmLightweightSupervisor__ +#define dispcmnCtrlCmdSystemArmLightweightSupervisor(pDispCommon, pParams) dispcmnCtrlCmdSystemArmLightweightSupervisor_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemSetRegionRamRectangles_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemSetRegionRamRectangles__ +#define dispcmnCtrlCmdSystemSetRegionRamRectangles(pDispCommon, pParams) dispcmnCtrlCmdSystemSetRegionRamRectangles_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemConfigureSafetyInterrupts_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemConfigureSafetyInterrupts__ +#define dispcmnCtrlCmdSystemConfigureSafetyInterrupts(pDispCommon, pParams) dispcmnCtrlCmdSystemConfigureSafetyInterrupts_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemConfigVrrPstateSwitch_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSystemConfigVrrPstateSwitch__ +#define dispcmnCtrlCmdSystemConfigVrrPstateSwitch(pDispCommon, pParams) dispcmnCtrlCmdSystemConfigVrrPstateSwitch_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificGetType_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificGetType__ +#define dispcmnCtrlCmdSpecificGetType(pDispCommon, pDisplayTypeParams) dispcmnCtrlCmdSpecificGetType_DISPATCH(pDispCommon, pDisplayTypeParams) +#define dispcmnCtrlCmdSpecificGetEdidV2_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificGetEdidV2__ +#define dispcmnCtrlCmdSpecificGetEdidV2(pDispCommon, pEdidParams) dispcmnCtrlCmdSpecificGetEdidV2_DISPATCH(pDispCommon, pEdidParams) +#define dispcmnCtrlCmdSpecificSetEdidV2_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificSetEdidV2__ +#define dispcmnCtrlCmdSpecificSetEdidV2(pDispCommon, pEdidParams) dispcmnCtrlCmdSpecificSetEdidV2_DISPATCH(pDispCommon, pEdidParams) +#define dispcmnCtrlCmdSpecificFakeDevice_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificFakeDevice__ +#define dispcmnCtrlCmdSpecificFakeDevice(pDispCommon, pTestParams) dispcmnCtrlCmdSpecificFakeDevice_DISPATCH(pDispCommon, pTestParams) +#define dispcmnCtrlCmdSpecificGetConnectorData_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificGetConnectorData__ +#define dispcmnCtrlCmdSpecificGetConnectorData(pDispCommon, pConnectorParams) dispcmnCtrlCmdSpecificGetConnectorData_DISPATCH(pDispCommon, pConnectorParams) +#define dispcmnCtrlCmdSpecificSetHdmiEnable_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificSetHdmiEnable__ +#define dispcmnCtrlCmdSpecificSetHdmiEnable(pDispCommon, pParams) dispcmnCtrlCmdSpecificSetHdmiEnable_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificCtrlHdmi_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificCtrlHdmi__ +#define dispcmnCtrlCmdSpecificCtrlHdmi(pDispCommon, pParams) dispcmnCtrlCmdSpecificCtrlHdmi_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificGetAllHeadMask_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificGetAllHeadMask__ +#define dispcmnCtrlCmdSpecificGetAllHeadMask(pDispCommon, pAllHeadMaskParams) dispcmnCtrlCmdSpecificGetAllHeadMask_DISPATCH(pDispCommon, pAllHeadMaskParams) +#define dispcmnCtrlCmdSpecificSetOdPacket_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificSetOdPacket__ +#define dispcmnCtrlCmdSpecificSetOdPacket(pDispCommon, pParams) dispcmnCtrlCmdSpecificSetOdPacket_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificAcquireSharedGenericPacket_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificAcquireSharedGenericPacket__ +#define dispcmnCtrlCmdSpecificAcquireSharedGenericPacket(pDispCommon, pParams) dispcmnCtrlCmdSpecificAcquireSharedGenericPacket_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificSetSharedGenericPacket_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificSetSharedGenericPacket__ +#define dispcmnCtrlCmdSpecificSetSharedGenericPacket(pDispCommon, pParams) dispcmnCtrlCmdSpecificSetSharedGenericPacket_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificReleaseSharedGenericPacket_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificReleaseSharedGenericPacket__ +#define dispcmnCtrlCmdSpecificReleaseSharedGenericPacket(pDispCommon, pParams) dispcmnCtrlCmdSpecificReleaseSharedGenericPacket_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificSetOdPacketCtrl_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificSetOdPacketCtrl__ +#define dispcmnCtrlCmdSpecificSetOdPacketCtrl(pDispCommon, pParams) dispcmnCtrlCmdSpecificSetOdPacketCtrl_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificOrGetInfo_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificOrGetInfo__ +#define dispcmnCtrlCmdSpecificOrGetInfo(pDispCommon, pParams) dispcmnCtrlCmdSpecificOrGetInfo_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificGetPclkLimit_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificGetPclkLimit__ +#define dispcmnCtrlCmdSpecificGetPclkLimit(pDispCommon, pParams) dispcmnCtrlCmdSpecificGetPclkLimit_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificSetHdmiSinkCaps_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificSetHdmiSinkCaps__ +#define dispcmnCtrlCmdSpecificSetHdmiSinkCaps(pDispCommon, pParams) dispcmnCtrlCmdSpecificSetHdmiSinkCaps_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificSetMonitorPower_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificSetMonitorPower__ +#define dispcmnCtrlCmdSpecificSetMonitorPower(pDispCommon, setMonitorPowerParams) dispcmnCtrlCmdSpecificSetMonitorPower_DISPATCH(pDispCommon, setMonitorPowerParams) +#define dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig__ +#define dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig(pDispCommon, pParams) dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificApplyEdidOverrideV2_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificApplyEdidOverrideV2__ +#define dispcmnCtrlCmdSpecificApplyEdidOverrideV2(pDispCommon, pEdidOverrideParams) dispcmnCtrlCmdSpecificApplyEdidOverrideV2_DISPATCH(pDispCommon, pEdidOverrideParams) +#define dispcmnCtrlCmdSpecificGetI2cPortid_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificGetI2cPortid__ +#define dispcmnCtrlCmdSpecificGetI2cPortid(pDispCommon, pParams) dispcmnCtrlCmdSpecificGetI2cPortid_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificGetHdmiGpuCaps_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificGetHdmiGpuCaps__ +#define dispcmnCtrlCmdSpecificGetHdmiGpuCaps(pDispCommon, pParams) dispcmnCtrlCmdSpecificGetHdmiGpuCaps_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificGetHdmiScdcData_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificGetHdmiScdcData__ +#define dispcmnCtrlCmdSpecificGetHdmiScdcData(pDispCommon, pParams) dispcmnCtrlCmdSpecificGetHdmiScdcData_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificIsDirectmodeDisplay_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificIsDirectmodeDisplay__ +#define dispcmnCtrlCmdSpecificIsDirectmodeDisplay(pDispCommon, pParams) dispcmnCtrlCmdSpecificIsDirectmodeDisplay_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificDefaultAdaptivesyncDisplay_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificDefaultAdaptivesyncDisplay__ +#define dispcmnCtrlCmdSpecificDefaultAdaptivesyncDisplay(pDispCommon, pParams) dispcmnCtrlCmdSpecificDefaultAdaptivesyncDisplay_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation__ +#define dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation(pDispCommon, pParams) dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificDispI2cReadWrite_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificDispI2cReadWrite__ +#define dispcmnCtrlCmdSpecificDispI2cReadWrite(pDispCommon, pParams) dispcmnCtrlCmdSpecificDispI2cReadWrite_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificGetValidHeadWindowAssignment_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificGetValidHeadWindowAssignment__ +#define dispcmnCtrlCmdSpecificGetValidHeadWindowAssignment(pDispCommon, pParams) dispcmnCtrlCmdSpecificGetValidHeadWindowAssignment_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificSetHdmiAudioMutestream_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificSetHdmiAudioMutestream__ +#define dispcmnCtrlCmdSpecificSetHdmiAudioMutestream(pDispCommon, pParams) dispcmnCtrlCmdSpecificSetHdmiAudioMutestream_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificDisplayChange_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificDisplayChange__ +#define dispcmnCtrlCmdSpecificDisplayChange(pDispCommon, pParams) dispcmnCtrlCmdSpecificDisplayChange_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpEdpDriverUnload_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDfpEdpDriverUnload__ +#define dispcmnCtrlCmdDfpEdpDriverUnload(pDispCommon, pParams) dispcmnCtrlCmdDfpEdpDriverUnload_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpSetForceBlackPixels_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDfpSetForceBlackPixels__ +#define dispcmnCtrlCmdDfpSetForceBlackPixels(pDispCommon, pParams) dispcmnCtrlCmdDfpSetForceBlackPixels_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpGetInfo_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDfpGetInfo__ +#define dispcmnCtrlCmdDfpGetInfo(pDispCommon, pParams) dispcmnCtrlCmdDfpGetInfo_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpGetDisplayportDongleInfo_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDfpGetDisplayportDongleInfo__ +#define dispcmnCtrlCmdDfpGetDisplayportDongleInfo(pDispCommon, pParams) dispcmnCtrlCmdDfpGetDisplayportDongleInfo_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpSetEldAudioCaps_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDfpSetEldAudioCaps__ +#define dispcmnCtrlCmdDfpSetEldAudioCaps(pDispCommon, pEldAudioCapsParams) dispcmnCtrlCmdDfpSetEldAudioCaps_DISPATCH(pDispCommon, pEldAudioCapsParams) +#define dispcmnCtrlCmdDfpSetAudioEnable_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDfpSetAudioEnable__ +#define dispcmnCtrlCmdDfpSetAudioEnable(pDispCommon, pParams) dispcmnCtrlCmdDfpSetAudioEnable_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpUpdateDynamicDfpCache_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDfpUpdateDynamicDfpCache__ +#define dispcmnCtrlCmdDfpUpdateDynamicDfpCache(pDispCommon, pParams) dispcmnCtrlCmdDfpUpdateDynamicDfpCache_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpAssignSor_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDfpAssignSor__ +#define dispcmnCtrlCmdDfpAssignSor(pDispCommon, pParams) dispcmnCtrlCmdDfpAssignSor_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpDscCrcControl_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDfpDscCrcControl__ +#define dispcmnCtrlCmdDfpDscCrcControl(pDispCommon, pParams) dispcmnCtrlCmdDfpDscCrcControl_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpInitMuxData_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDfpInitMuxData__ +#define dispcmnCtrlCmdDfpInitMuxData(pDispCommon, pParams) dispcmnCtrlCmdDfpInitMuxData_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpGetDsiModeTiming_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDfpGetDsiModeTiming__ +#define dispcmnCtrlCmdDfpGetDsiModeTiming(pDispCommon, pParams) dispcmnCtrlCmdDfpGetDsiModeTiming_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpConfigTwoHeadOneOr_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDfpConfigTwoHeadOneOr__ +#define dispcmnCtrlCmdDfpConfigTwoHeadOneOr(pDispCommon, pParams) dispcmnCtrlCmdDfpConfigTwoHeadOneOr_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpGetPadlinkMask_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDfpGetPadlinkMask__ +#define dispcmnCtrlCmdDfpGetPadlinkMask(pDispCommon, pParams) dispcmnCtrlCmdDfpGetPadlinkMask_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpGetFixedModeTiming_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDfpGetFixedModeTiming__ +#define dispcmnCtrlCmdDfpGetFixedModeTiming(pDispCommon, pParams) dispcmnCtrlCmdDfpGetFixedModeTiming_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpAuxchCtrl_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpAuxchCtrl__ +#define dispcmnCtrlCmdDpAuxchCtrl(pDispCommon, pAuxchCtrlParams) dispcmnCtrlCmdDpAuxchCtrl_DISPATCH(pDispCommon, pAuxchCtrlParams) +#define dispcmnCtrlCmdDpCtrl_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpCtrl__ +#define dispcmnCtrlCmdDpCtrl(pDispCommon, pParams) dispcmnCtrlCmdDpCtrl_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetLaneData_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpGetLaneData__ +#define dispcmnCtrlCmdDpGetLaneData(pDispCommon, pParams) dispcmnCtrlCmdDpGetLaneData_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetLaneData_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpSetLaneData__ +#define dispcmnCtrlCmdDpSetLaneData(pDispCommon, pParams) dispcmnCtrlCmdDpSetLaneData_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetTestpattern_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpGetTestpattern__ +#define dispcmnCtrlCmdDpGetTestpattern(pDispCommon, pParams) dispcmnCtrlCmdDpGetTestpattern_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetTestpattern_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpSetTestpattern__ +#define dispcmnCtrlCmdDpSetTestpattern(pDispCommon, pParams) dispcmnCtrlCmdDpSetTestpattern_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpMainLinkCtrl_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpMainLinkCtrl__ +#define dispcmnCtrlCmdDpMainLinkCtrl(pDispCommon, pParams) dispcmnCtrlCmdDpMainLinkCtrl_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetAudioMuteStream_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpSetAudioMuteStream__ +#define dispcmnCtrlCmdDpSetAudioMuteStream(pDispCommon, pParams) dispcmnCtrlCmdDpSetAudioMuteStream_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetLinkConfig_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpGetLinkConfig__ +#define dispcmnCtrlCmdDpGetLinkConfig(pDispCommon, pParams) dispcmnCtrlCmdDpGetLinkConfig_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetEDPData_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpGetEDPData__ +#define dispcmnCtrlCmdDpGetEDPData(pDispCommon, pParams) dispcmnCtrlCmdDpGetEDPData_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpTopologyAllocateDisplayId_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpTopologyAllocateDisplayId__ +#define dispcmnCtrlCmdDpTopologyAllocateDisplayId(pDispCommon, pParams) dispcmnCtrlCmdDpTopologyAllocateDisplayId_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpTopologyFreeDisplayId_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpTopologyFreeDisplayId__ +#define dispcmnCtrlCmdDpTopologyFreeDisplayId(pDispCommon, pParams) dispcmnCtrlCmdDpTopologyFreeDisplayId_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpConfigStream_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpConfigStream__ +#define dispcmnCtrlCmdDpConfigStream(pDispCommon, pParams) dispcmnCtrlCmdDpConfigStream_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpConfigSingleHeadMultiStream_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpConfigSingleHeadMultiStream__ +#define dispcmnCtrlCmdDpConfigSingleHeadMultiStream(pDispCommon, pParams) dispcmnCtrlCmdDpConfigSingleHeadMultiStream_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetRateGov_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpSetRateGov__ +#define dispcmnCtrlCmdDpSetRateGov(pDispCommon, pParams) dispcmnCtrlCmdDpSetRateGov_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSendACT_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpSendACT__ +#define dispcmnCtrlCmdDpSendACT(pDispCommon, pParams) dispcmnCtrlCmdDpSendACT_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetManualDisplayPort_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpSetManualDisplayPort__ +#define dispcmnCtrlCmdDpSetManualDisplayPort(pDispCommon, pParams) dispcmnCtrlCmdDpSetManualDisplayPort_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetCaps_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpGetCaps__ +#define dispcmnCtrlCmdDpGetCaps(pDispCommon, pParams) dispcmnCtrlCmdDpGetCaps_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetMSAPropertiesv2_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpSetMSAPropertiesv2__ +#define dispcmnCtrlCmdDpSetMSAPropertiesv2(pDispCommon, pParams) dispcmnCtrlCmdDpSetMSAPropertiesv2_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetStereoMSAProperties_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpSetStereoMSAProperties__ +#define dispcmnCtrlCmdDpSetStereoMSAProperties(pDispCommon, pParams) dispcmnCtrlCmdDpSetStereoMSAProperties_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGenerateFakeInterrupt_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpGenerateFakeInterrupt__ +#define dispcmnCtrlCmdDpGenerateFakeInterrupt(pDispCommon, pParams) dispcmnCtrlCmdDpGenerateFakeInterrupt_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpConfigRadScratchReg_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpConfigRadScratchReg__ +#define dispcmnCtrlCmdDpConfigRadScratchReg(pDispCommon, pParams) dispcmnCtrlCmdDpConfigRadScratchReg_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetTriggerSelect_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpSetTriggerSelect__ +#define dispcmnCtrlCmdDpSetTriggerSelect(pDispCommon, pTriggerSelectParams) dispcmnCtrlCmdDpSetTriggerSelect_DISPATCH(pDispCommon, pTriggerSelectParams) +#define dispcmnCtrlCmdDpSetTriggerAll_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpSetTriggerAll__ +#define dispcmnCtrlCmdDpSetTriggerAll(pDispCommon, pTriggerAllParams) dispcmnCtrlCmdDpSetTriggerAll_DISPATCH(pDispCommon, pTriggerAllParams) +#define dispcmnCtrlCmdDpGetAuxLogData_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpGetAuxLogData__ +#define dispcmnCtrlCmdDpGetAuxLogData(pDispCommon, pDpAuxBufferWrapper) dispcmnCtrlCmdDpGetAuxLogData_DISPATCH(pDispCommon, pDpAuxBufferWrapper) +#define dispcmnCtrlCmdDpConfigIndexedLinkRates_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpConfigIndexedLinkRates__ +#define dispcmnCtrlCmdDpConfigIndexedLinkRates(pDispCommon, pParams) dispcmnCtrlCmdDpConfigIndexedLinkRates_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpConfigureFec_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpConfigureFec__ +#define dispcmnCtrlCmdDpConfigureFec(pDispCommon, pParams) dispcmnCtrlCmdDpConfigureFec_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetGenericInfoframe_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpGetGenericInfoframe__ +#define dispcmnCtrlCmdDpGetGenericInfoframe(pDispCommon, pParams) dispcmnCtrlCmdDpGetGenericInfoframe_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetMsaAttributes_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpGetMsaAttributes__ +#define dispcmnCtrlCmdDpGetMsaAttributes(pDispCommon, pParams) dispcmnCtrlCmdDpGetMsaAttributes_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdFrlConfigMacroPad_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdFrlConfigMacroPad__ +#define dispcmnCtrlCmdFrlConfigMacroPad(pDispCommon, pParams) dispcmnCtrlCmdFrlConfigMacroPad_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpConfigMacroPad_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpConfigMacroPad__ +#define dispcmnCtrlCmdDpConfigMacroPad(pDispCommon, pParams) dispcmnCtrlCmdDpConfigMacroPad_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data__ +#define dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data(pDispCommon, pParams) dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data__ +#define dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data(pDispCommon, pParams) dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetLevelInfoTableData_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpSetLevelInfoTableData__ +#define dispcmnCtrlCmdDpSetLevelInfoTableData(pDispCommon, pParams) dispcmnCtrlCmdDpSetLevelInfoTableData_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetLevelInfoTableData_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpGetLevelInfoTableData__ +#define dispcmnCtrlCmdDpGetLevelInfoTableData(pDispCommon, pParams) dispcmnCtrlCmdDpGetLevelInfoTableData_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetEcf_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDpSetEcf__ +#define dispcmnCtrlCmdDpSetEcf(pDispCommon, pCtrlEcfParams) dispcmnCtrlCmdDpSetEcf_DISPATCH(pDispCommon, pCtrlEcfParams) +#define dispcmnCtrlCmdDPGetCableIDInfoFromMacro_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdDPGetCableIDInfoFromMacro__ +#define dispcmnCtrlCmdDPGetCableIDInfoFromMacro(pDispCommon, pParams) dispcmnCtrlCmdDPGetCableIDInfoFromMacro_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificGetRegionalCrcs_FNPTR(pDispCommon) pDispCommon->__dispcmnCtrlCmdSpecificGetRegionalCrcs__ +#define dispcmnCtrlCmdSpecificGetRegionalCrcs(pDispCommon, pParams) dispcmnCtrlCmdSpecificGetRegionalCrcs_DISPATCH(pDispCommon, pParams) +#define dispcmnControl_FNPTR(pDisplayApi) pDisplayApi->__nvoc_base_DisplayApi.__nvoc_metadata_ptr->vtable.__dispapiControl__ +#define dispcmnControl(pDisplayApi, pCallContext, pParams) dispcmnControl_DISPATCH(pDisplayApi, pCallContext, pParams) +#define dispcmnControl_Prologue_FNPTR(pDisplayApi) pDisplayApi->__nvoc_base_DisplayApi.__nvoc_metadata_ptr->vtable.__dispapiControl_Prologue__ +#define dispcmnControl_Prologue(pDisplayApi, pCallContext, pRsParams) dispcmnControl_Prologue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispcmnControl_Epilogue_FNPTR(pDisplayApi) pDisplayApi->__nvoc_base_DisplayApi.__nvoc_metadata_ptr->vtable.__dispapiControl_Epilogue__ +#define dispcmnControl_Epilogue(pDisplayApi, pCallContext, pRsParams) dispcmnControl_Epilogue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispcmnAccessCallback_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define dispcmnAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispcmnAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define dispcmnShareCallback_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresShareCallback__ +#define dispcmnShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) dispcmnShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispcmnGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define dispcmnGetMemInterMapParams(pRmResource, pParams) dispcmnGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispcmnCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define dispcmnCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispcmnCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispcmnGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define dispcmnGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispcmnGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispcmnControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define dispcmnControlSerialization_Prologue(pResource, pCallContext, pParams) dispcmnControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispcmnControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define dispcmnControlSerialization_Epilogue(pResource, pCallContext, pParams) dispcmnControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispcmnCanCopy_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define dispcmnCanCopy(pResource) dispcmnCanCopy_DISPATCH(pResource) +#define dispcmnIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define dispcmnIsDuplicate(pResource, hMemory, pDuplicate) dispcmnIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define dispcmnPreDestruct_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define dispcmnPreDestruct(pResource) dispcmnPreDestruct_DISPATCH(pResource) +#define dispcmnControlFilter_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define dispcmnControlFilter(pResource, pCallContext, pParams) dispcmnControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispcmnMap_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMap__ +#define dispcmnMap(pResource, pCallContext, pParams, pCpuMapping) dispcmnMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define dispcmnUnmap_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmap__ +#define dispcmnUnmap(pResource, pCallContext, pCpuMapping) dispcmnUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define dispcmnIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define dispcmnIsPartialUnmapSupported(pResource) dispcmnIsPartialUnmapSupported_DISPATCH(pResource) +#define dispcmnMapTo_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define dispcmnMapTo(pResource, pParams) dispcmnMapTo_DISPATCH(pResource, pParams) +#define dispcmnUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define dispcmnUnmapFrom(pResource, pParams) dispcmnUnmapFrom_DISPATCH(pResource, pParams) +#define dispcmnGetRefCount_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define dispcmnGetRefCount(pResource) dispcmnGetRefCount_DISPATCH(pResource) +#define dispcmnAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define dispcmnAddAdditionalDependants(pClient, pResource, pReference) dispcmnAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispcmnGetNotificationListPtr_FNPTR(pNotifier) pNotifier->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationListPtr__ +#define dispcmnGetNotificationListPtr(pNotifier) dispcmnGetNotificationListPtr_DISPATCH(pNotifier) +#define dispcmnGetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationShare__ +#define dispcmnGetNotificationShare(pNotifier) dispcmnGetNotificationShare_DISPATCH(pNotifier) +#define dispcmnSetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifySetNotificationShare__ +#define dispcmnSetNotificationShare(pNotifier, pNotifShare) dispcmnSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define dispcmnUnregisterEvent_FNPTR(pNotifier) pNotifier->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyUnregisterEvent__ +#define dispcmnUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispcmnUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define dispcmnGetOrAllocNotifShare_FNPTR(pNotifier) pNotifier->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetOrAllocNotifShare__ +#define dispcmnGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispcmnGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) + +// Dispatch functions +static inline NV_STATUS dispcmnCtrlCmdSpecificGetHdcpState_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDCP_STATE_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetHdcpState__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificHdcpCtrl_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_HDCP_CTRL_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificHdcpCtrl__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetHdcpRepeaterInfo_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDCP_REPEATER_INFO_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetHdcpRepeaterInfo__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetHdcpDiagnostics_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDCP_DIAGNOSTICS_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetHdcpDiagnostics__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificHdcpKsvListValidate_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_HDCP_KSVLIST_VALIDATE_PARAMS *pKsvListValidateParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificHdcpKsvListValidate__(pDispCommon, pKsvListValidateParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificHdcpUpdate_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_HDCP_UPDATE_PARAMS *pHdcpUpdateParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificHdcpUpdate__(pDispCommon, pHdcpUpdateParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemValidateSrm_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_VALIDATE_SRM_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemValidateSrm__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemGetSrmStatus_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_SRM_STATUS_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetSrmStatus__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemHdcpRevocationCheck_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_HDCP_REVOCATE_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemHdcpRevocationCheck__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemUpdateSrm_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_UPDATE_SRM_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemUpdateSrm__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemGetCapsV2_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS *pCapsParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetCapsV2__(pDispCommon, pCapsParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemGetNumHeads_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS *pNumHeadsParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetNumHeads__(pDispCommon, pNumHeadsParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemGetScanline_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS *pScanlineParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetScanline__(pDispCommon, pScanlineParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemGetSuppported_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *pSupportedParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetSuppported__(pDispCommon, pSupportedParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemGetConnectState_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *pConnectParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetConnectState__(pDispCommon, pConnectParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemGetHotplugUnplugState_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS *pHotplugParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetHotplugUnplugState__(pDispCommon, pHotplugParams); +} + +static inline NV_STATUS dispcmnCtrlCmdInternalGetHotplugUnplugState_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS *pHotplugParams) { + return pDispCommon->__dispcmnCtrlCmdInternalGetHotplugUnplugState__(pDispCommon, pHotplugParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemGetHeadRoutingMap_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS *pMapParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetHeadRoutingMap__(pDispCommon, pMapParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemGetActive_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *pActiveParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetActive__(pDispCommon, pActiveParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemGetBootDisplays_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetBootDisplays__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemQueryDisplayIdsWithMux_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemQueryDisplayIdsWithMux__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemCheckSidebandI2cSupport_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemCheckSidebandI2cSupport__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemAllocateDisplayBandwidth_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemAllocateDisplayBandwidth__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemInternalAllocateDisplayBandwidth_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_INTERNAL_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemInternalAllocateDisplayBandwidth__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemGetHotplugConfig_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS *pHotplugParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetHotplugConfig__(pDispCommon, pHotplugParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemGetHotplugEventConfig_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetHotplugEventConfig__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemSetHotplugEventConfig_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemSetHotplugEventConfig__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemArmLightweightSupervisor_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemArmLightweightSupervisor__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemSetRegionRamRectangles_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SYSTEM_SET_REGION_RAM_RECTANGLES_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemSetRegionRamRectangles__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemConfigureSafetyInterrupts_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SYSTEM_CONFIGURE_SAFETY_INTERRUPTS_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemConfigureSafetyInterrupts__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSystemConfigVrrPstateSwitch_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemConfigVrrPstateSwitch__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetType_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS *pDisplayTypeParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetType__(pDispCommon, pDisplayTypeParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetEdidV2_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *pEdidParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetEdidV2__(pDispCommon, pEdidParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetEdidV2_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS *pEdidParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetEdidV2__(pDispCommon, pEdidParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificFakeDevice_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS *pTestParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificFakeDevice__(pDispCommon, pTestParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetConnectorData_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS *pConnectorParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetConnectorData__(pDispCommon, pConnectorParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetHdmiEnable_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetHdmiEnable__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificCtrlHdmi_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificCtrlHdmi__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetAllHeadMask_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS *pAllHeadMaskParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetAllHeadMask__(pDispCommon, pAllHeadMaskParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetOdPacket_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetOdPacket__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificAcquireSharedGenericPacket_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificAcquireSharedGenericPacket__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetSharedGenericPacket_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetSharedGenericPacket__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificReleaseSharedGenericPacket_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificReleaseSharedGenericPacket__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetOdPacketCtrl_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetOdPacketCtrl__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificOrGetInfo_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificOrGetInfo__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetPclkLimit_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetPclkLimit__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetHdmiSinkCaps_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetHdmiSinkCaps__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetMonitorPower_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS *setMonitorPowerParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetMonitorPower__(pDispCommon, setMonitorPowerParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificApplyEdidOverrideV2_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS *pEdidOverrideParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificApplyEdidOverrideV2__(pDispCommon, pEdidOverrideParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetI2cPortid_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetI2cPortid__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetHdmiGpuCaps_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetHdmiGpuCaps__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetHdmiScdcData_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetHdmiScdcData__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificIsDirectmodeDisplay_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificIsDirectmodeDisplay__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificDefaultAdaptivesyncDisplay_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_DEFAULT_ADAPTIVESYNC_DISPLAY_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificDefaultAdaptivesyncDisplay__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificDispI2cReadWrite_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_DISP_I2C_READ_WRITE_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificDispI2cReadWrite__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetValidHeadWindowAssignment_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_VALID_HEAD_WINDOW_ASSIGNMENT_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetValidHeadWindowAssignment__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetHdmiAudioMutestream_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetHdmiAudioMutestream__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificDisplayChange_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificDisplayChange__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDfpEdpDriverUnload_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_EDP_DRIVER_UNLOAD_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpEdpDriverUnload__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDfpSetForceBlackPixels_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_SET_FORCE_BLACK_PIXELS_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpSetForceBlackPixels__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDfpGetInfo_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_INFO_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpGetInfo__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDfpGetDisplayportDongleInfo_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpGetDisplayportDongleInfo__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDfpSetEldAudioCaps_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *pEldAudioCapsParams) { + return pDispCommon->__dispcmnCtrlCmdDfpSetEldAudioCaps__(pDispCommon, pEldAudioCapsParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDfpSetAudioEnable_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpSetAudioEnable__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDfpUpdateDynamicDfpCache_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpUpdateDynamicDfpCache__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDfpAssignSor_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpAssignSor__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDfpDscCrcControl_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpDscCrcControl__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDfpInitMuxData_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpInitMuxData__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDfpGetDsiModeTiming_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpGetDsiModeTiming__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDfpConfigTwoHeadOneOr_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpConfigTwoHeadOneOr__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDfpGetPadlinkMask_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpGetPadlinkMask__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDfpGetFixedModeTiming_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_FIXED_MODE_TIMING_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpGetFixedModeTiming__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpAuxchCtrl_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *pAuxchCtrlParams) { + return pDispCommon->__dispcmnCtrlCmdDpAuxchCtrl__(pDispCommon, pAuxchCtrlParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpCtrl_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_CTRL_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpCtrl__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpGetLaneData_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_LANE_DATA_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetLaneData__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpSetLaneData_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_LANE_DATA_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetLaneData__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpGetTestpattern_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_TESTPATTERN_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetTestpattern__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpSetTestpattern_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetTestpattern__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpMainLinkCtrl_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpMainLinkCtrl__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpSetAudioMuteStream_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetAudioMuteStream__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpGetLinkConfig_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetLinkConfig__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpGetEDPData_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_EDP_DATA_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetEDPData__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpTopologyAllocateDisplayId_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpTopologyAllocateDisplayId__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpTopologyFreeDisplayId_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpTopologyFreeDisplayId__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpConfigStream_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpConfigStream__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpConfigSingleHeadMultiStream_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpConfigSingleHeadMultiStream__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpSetRateGov_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetRateGov__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpSendACT_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSendACT__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpSetManualDisplayPort_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetManualDisplayPort__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpGetCaps_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetCaps__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpSetMSAPropertiesv2_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_V2_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetMSAPropertiesv2__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpSetStereoMSAProperties_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetStereoMSAProperties__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpGenerateFakeInterrupt_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGenerateFakeInterrupt__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpConfigRadScratchReg_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpConfigRadScratchReg__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpSetTriggerSelect_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS *pTriggerSelectParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetTriggerSelect__(pDispCommon, pTriggerSelectParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpSetTriggerAll_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS *pTriggerAllParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetTriggerAll__(pDispCommon, pTriggerAllParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpGetAuxLogData_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS *pDpAuxBufferWrapper) { + return pDispCommon->__dispcmnCtrlCmdDpGetAuxLogData__(pDispCommon, pDpAuxBufferWrapper); +} + +static inline NV_STATUS dispcmnCtrlCmdDpConfigIndexedLinkRates_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpConfigIndexedLinkRates__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpConfigureFec_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpConfigureFec__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpGetGenericInfoframe_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetGenericInfoframe__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpGetMsaAttributes_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetMsaAttributes__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdFrlConfigMacroPad_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_FRL_CONFIG_MACRO_PAD_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdFrlConfigMacroPad__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpConfigMacroPad_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpConfigMacroPad__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpSetLevelInfoTableData_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_SET_LEVEL_INFO_TABLE_DATA_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetLevelInfoTableData__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpGetLevelInfoTableData_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_LEVEL_INFO_TABLE_DATA_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetLevelInfoTableData__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDpSetEcf_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_ECF_PARAMS *pCtrlEcfParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetEcf__(pDispCommon, pCtrlEcfParams); +} + +static inline NV_STATUS dispcmnCtrlCmdDPGetCableIDInfoFromMacro_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_USBC_CABLEID_INFO_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDPGetCableIDInfoFromMacro__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetRegionalCrcs_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetRegionalCrcs__(pDispCommon, pParams); +} + +static inline NV_STATUS dispcmnControl_DISPATCH(struct DispCommon *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pDisplayApi->__nvoc_metadata_ptr->vtable.__dispcmnControl__(pDisplayApi, pCallContext, pParams); +} + +static inline NV_STATUS dispcmnControl_Prologue_DISPATCH(struct DispCommon *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return pDisplayApi->__nvoc_metadata_ptr->vtable.__dispcmnControl_Prologue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline void dispcmnControl_Epilogue_DISPATCH(struct DispCommon *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + pDisplayApi->__nvoc_metadata_ptr->vtable.__dispcmnControl_Epilogue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline NvBool dispcmnAccessCallback_DISPATCH(struct DispCommon *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__dispcmnAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NvBool dispcmnShareCallback_DISPATCH(struct DispCommon *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__nvoc_metadata_ptr->vtable.__dispcmnShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispcmnGetMemInterMapParams_DISPATCH(struct DispCommon *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispcmnGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispcmnCheckMemInterUnmap_DISPATCH(struct DispCommon *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispcmnCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS dispcmnGetMemoryMappingDescriptor_DISPATCH(struct DispCommon *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispcmnGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispcmnControlSerialization_Prologue_DISPATCH(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispcmnControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void dispcmnControlSerialization_Epilogue_DISPATCH(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__dispcmnControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool dispcmnCanCopy_DISPATCH(struct DispCommon *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispcmnCanCopy__(pResource); +} + +static inline NV_STATUS dispcmnIsDuplicate_DISPATCH(struct DispCommon *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__dispcmnIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void dispcmnPreDestruct_DISPATCH(struct DispCommon *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__dispcmnPreDestruct__(pResource); +} + +static inline NV_STATUS dispcmnControlFilter_DISPATCH(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispcmnControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispcmnMap_DISPATCH(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__dispcmnMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS dispcmnUnmap_DISPATCH(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__dispcmnUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NvBool dispcmnIsPartialUnmapSupported_DISPATCH(struct DispCommon *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispcmnIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS dispcmnMapTo_DISPATCH(struct DispCommon *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispcmnMapTo__(pResource, pParams); +} + +static inline NV_STATUS dispcmnUnmapFrom_DISPATCH(struct DispCommon *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispcmnUnmapFrom__(pResource, pParams); +} + +static inline NvU32 dispcmnGetRefCount_DISPATCH(struct DispCommon *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispcmnGetRefCount__(pResource); +} + +static inline void dispcmnAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispCommon *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__dispcmnAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline PEVENTNOTIFICATION * dispcmnGetNotificationListPtr_DISPATCH(struct DispCommon *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispcmnGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare * dispcmnGetNotificationShare_DISPATCH(struct DispCommon *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispcmnGetNotificationShare__(pNotifier); +} + +static inline void dispcmnSetNotificationShare_DISPATCH(struct DispCommon *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__nvoc_metadata_ptr->vtable.__dispcmnSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS dispcmnUnregisterEvent_DISPATCH(struct DispCommon *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispcmnUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS dispcmnGetOrAllocNotifShare_DISPATCH(struct DispCommon *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__nvoc_metadata_ptr->vtable.__dispcmnGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS dispcmnCtrlCmdSpecificGetHdcpState_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDCP_STATE_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificHdcpCtrl_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_HDCP_CTRL_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificGetHdcpRepeaterInfo_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDCP_REPEATER_INFO_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificGetHdcpDiagnostics_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDCP_DIAGNOSTICS_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificHdcpKsvListValidate_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_HDCP_KSVLIST_VALIDATE_PARAMS *pKsvListValidateParams); + +NV_STATUS dispcmnCtrlCmdSpecificHdcpUpdate_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_HDCP_UPDATE_PARAMS *pHdcpUpdateParams); + +NV_STATUS dispcmnCtrlCmdSystemValidateSrm_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_VALIDATE_SRM_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSystemGetSrmStatus_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_SRM_STATUS_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSystemHdcpRevocationCheck_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_HDCP_REVOCATE_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSystemUpdateSrm_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_UPDATE_SRM_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSystemGetCapsV2_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS *pCapsParams); + +NV_STATUS dispcmnCtrlCmdSystemGetNumHeads_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS *pNumHeadsParams); + +NV_STATUS dispcmnCtrlCmdSystemGetScanline_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS *pScanlineParams); + +NV_STATUS dispcmnCtrlCmdSystemGetSuppported_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *pSupportedParams); + +NV_STATUS dispcmnCtrlCmdSystemGetConnectState_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *pConnectParams); + +NV_STATUS dispcmnCtrlCmdSystemGetHotplugUnplugState_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS *pHotplugParams); + +NV_STATUS dispcmnCtrlCmdInternalGetHotplugUnplugState_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS *pHotplugParams); + +NV_STATUS dispcmnCtrlCmdSystemGetHeadRoutingMap_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS *pMapParams); + +NV_STATUS dispcmnCtrlCmdSystemGetActive_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *pActiveParams); + +NV_STATUS dispcmnCtrlCmdSystemGetBootDisplays_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSystemQueryDisplayIdsWithMux_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSystemCheckSidebandI2cSupport_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSystemAllocateDisplayBandwidth_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSystemInternalAllocateDisplayBandwidth_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_INTERNAL_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSystemGetHotplugConfig_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS *pHotplugParams); + +NV_STATUS dispcmnCtrlCmdSystemGetHotplugEventConfig_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSystemSetHotplugEventConfig_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSystemArmLightweightSupervisor_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSystemSetRegionRamRectangles_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SYSTEM_SET_REGION_RAM_RECTANGLES_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSystemConfigureSafetyInterrupts_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SYSTEM_CONFIGURE_SAFETY_INTERRUPTS_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSystemConfigVrrPstateSwitch_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificGetType_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS *pDisplayTypeParams); + +NV_STATUS dispcmnCtrlCmdSpecificGetEdidV2_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *pEdidParams); + +NV_STATUS dispcmnCtrlCmdSpecificSetEdidV2_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS *pEdidParams); + +NV_STATUS dispcmnCtrlCmdSpecificFakeDevice_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS *pTestParams); + +NV_STATUS dispcmnCtrlCmdSpecificGetConnectorData_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS *pConnectorParams); + +NV_STATUS dispcmnCtrlCmdSpecificSetHdmiEnable_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificCtrlHdmi_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificGetAllHeadMask_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS *pAllHeadMaskParams); + +NV_STATUS dispcmnCtrlCmdSpecificSetOdPacket_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificAcquireSharedGenericPacket_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificSetSharedGenericPacket_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificReleaseSharedGenericPacket_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificSetOdPacketCtrl_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificOrGetInfo_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificGetPclkLimit_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificSetHdmiSinkCaps_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificSetMonitorPower_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS *setMonitorPowerParams); + +NV_STATUS dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificApplyEdidOverrideV2_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS *pEdidOverrideParams); + +NV_STATUS dispcmnCtrlCmdSpecificGetI2cPortid_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificGetHdmiGpuCaps_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificGetHdmiScdcData_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificIsDirectmodeDisplay_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificDefaultAdaptivesyncDisplay_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_DEFAULT_ADAPTIVESYNC_DISPLAY_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificDispI2cReadWrite_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_DISP_I2C_READ_WRITE_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificGetValidHeadWindowAssignment_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_VALID_HEAD_WINDOW_ASSIGNMENT_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificSetHdmiAudioMutestream_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificDisplayChange_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDfpEdpDriverUnload_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_EDP_DRIVER_UNLOAD_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDfpSetForceBlackPixels_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_SET_FORCE_BLACK_PIXELS_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDfpGetInfo_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_INFO_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDfpGetDisplayportDongleInfo_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDfpSetEldAudioCaps_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *pEldAudioCapsParams); + +NV_STATUS dispcmnCtrlCmdDfpSetAudioEnable_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDfpUpdateDynamicDfpCache_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDfpAssignSor_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDfpDscCrcControl_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDfpInitMuxData_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDfpGetDsiModeTiming_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDfpConfigTwoHeadOneOr_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDfpGetPadlinkMask_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDfpGetFixedModeTiming_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_FIXED_MODE_TIMING_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpAuxchCtrl_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *pAuxchCtrlParams); + +NV_STATUS dispcmnCtrlCmdDpCtrl_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_CTRL_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpGetLaneData_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_LANE_DATA_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpSetLaneData_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_LANE_DATA_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpGetTestpattern_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_TESTPATTERN_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpSetTestpattern_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpMainLinkCtrl_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpSetAudioMuteStream_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpGetLinkConfig_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpGetEDPData_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_EDP_DATA_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpTopologyAllocateDisplayId_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpTopologyFreeDisplayId_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpConfigStream_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpConfigSingleHeadMultiStream_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpSetRateGov_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpSendACT_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpSetManualDisplayPort_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpGetCaps_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpSetMSAPropertiesv2_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_V2_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpSetStereoMSAProperties_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpGenerateFakeInterrupt_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpConfigRadScratchReg_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpSetTriggerSelect_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS *pTriggerSelectParams); + +NV_STATUS dispcmnCtrlCmdDpSetTriggerAll_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS *pTriggerAllParams); + +NV_STATUS dispcmnCtrlCmdDpGetAuxLogData_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS *pDpAuxBufferWrapper); + +NV_STATUS dispcmnCtrlCmdDpConfigIndexedLinkRates_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpConfigureFec_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpGetGenericInfoframe_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpGetMsaAttributes_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdFrlConfigMacroPad_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_FRL_CONFIG_MACRO_PAD_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpConfigMacroPad_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpSetLevelInfoTableData_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_SET_LEVEL_INFO_TABLE_DATA_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpGetLevelInfoTableData_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_LEVEL_INFO_TABLE_DATA_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdDpSetEcf_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_ECF_PARAMS *pCtrlEcfParams); + +NV_STATUS dispcmnCtrlCmdDPGetCableIDInfoFromMacro_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_USBC_CABLEID_INFO_PARAMS *pParams); + +NV_STATUS dispcmnCtrlCmdSpecificGetRegionalCrcs_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS_PARAMS *pParams); + +NV_STATUS dispcmnConstruct_IMPL(struct DispCommon *arg_pDispCommon, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_dispcmnConstruct(arg_pDispCommon, arg_pCallContext, arg_pParams) dispcmnConstruct_IMPL(arg_pDispCommon, arg_pCallContext, arg_pParams) +NV_STATUS dispcmnGetByHandle_IMPL(struct RsClient *pClient, NvHandle hDispCommon, struct DispCommon **ppDispCommon); + +#define dispcmnGetByHandle(pClient, hDispCommon, ppDispCommon) dispcmnGetByHandle_IMPL(pClient, hDispCommon, ppDispCommon) +void dispcmnGetByDevice_IMPL(struct RsClient *pClient, NvHandle hDevice, struct DispCommon **ppDispCommon); + +#define dispcmnGetByDevice(pClient, hDevice, ppDispCommon) dispcmnGetByDevice_IMPL(pClient, hDevice, ppDispCommon) +#undef PRIVATE_FIELD + + +// **************************************************************************** +// Deprecated Definitions +// **************************************************************************** + +/** + * @warning This function is deprecated! Please use dispchnGetByHandle. + */ +NV_STATUS CliFindDispChannelInfo(NvHandle, NvHandle, struct DispChannel **ppDispChannel, NvHandle*); + +#endif // DISP_OBJS_H + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_DISP_OBJS_NVOC_H_ diff --git a/src/nvidia/generated/g_disp_sf_user_nvoc.c b/src/nvidia/generated/g_disp_sf_user_nvoc.c new file mode 100644 index 0000000..b67bfd6 --- /dev/null +++ b/src/nvidia/generated/g_disp_sf_user_nvoc.c @@ -0,0 +1,518 @@ +#define NVOC_DISP_SF_USER_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_disp_sf_user_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xba7439 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispSfUser; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +// Forward declarations for DispSfUser +void __nvoc_init__GpuResource(GpuResource*); +void __nvoc_init__DispSfUser(DispSfUser*); +void __nvoc_init_funcTable_DispSfUser(DispSfUser*); +NV_STATUS __nvoc_ctor_DispSfUser(DispSfUser*, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_DispSfUser(DispSfUser*); +void __nvoc_dtor_DispSfUser(DispSfUser*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__DispSfUser; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__DispSfUser; + +// Down-thunk(s) to bridge DispSfUser methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^2 +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_GpuResource_resControl(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_GpuResource_resMap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_down_thunk_GpuResource_resUnmap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_down_thunk_GpuResource_rmresShareCallback(struct RmResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_down_thunk_DispSfUser_gpuresGetRegBaseOffsetAndSize(struct GpuResource *pDispSfUser, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); // this + +// Up-thunk(s) to bridge DispSfUser methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^2 +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^2 +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^2 +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super^2 +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super^2 +NvBool __nvoc_up_thunk_RmResource_gpuresAccessCallback(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControl_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_gpuresControl_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_gpuresCanCopy(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresIsDuplicate(struct GpuResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_gpuresPreDestruct(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresControlFilter(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresMapTo(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresUnmapFrom(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_gpuresGetRefCount(struct GpuResource *pResource); // super +void __nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference); // super +NV_STATUS __nvoc_up_thunk_GpuResource_dispsfControl(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_GpuResource_dispsfMap(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_GpuResource_dispsfUnmap(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_GpuResource_dispsfShareCallback(struct DispSfUser *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_GpuResource_dispsfGetMapAddrSpace(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); // this +NV_STATUS __nvoc_up_thunk_GpuResource_dispsfInternalControlForward(struct DispSfUser *pGpuResource, NvU32 command, void *pParams, NvU32 size); // this +NvHandle __nvoc_up_thunk_GpuResource_dispsfGetInternalObjectHandle(struct DispSfUser *pGpuResource); // this +NvBool __nvoc_up_thunk_RmResource_dispsfAccessCallback(struct DispSfUser *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispsfGetMemInterMapParams(struct DispSfUser *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispsfCheckMemInterUnmap(struct DispSfUser *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispsfGetMemoryMappingDescriptor(struct DispSfUser *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispsfControlSerialization_Prologue(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_dispsfControlSerialization_Epilogue(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_dispsfControl_Prologue(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_dispsfControl_Epilogue(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_dispsfCanCopy(struct DispSfUser *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispsfIsDuplicate(struct DispSfUser *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_dispsfPreDestruct(struct DispSfUser *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispsfControlFilter(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_dispsfIsPartialUnmapSupported(struct DispSfUser *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispsfMapTo(struct DispSfUser *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_dispsfUnmapFrom(struct DispSfUser *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_dispsfGetRefCount(struct DispSfUser *pResource); // this +void __nvoc_up_thunk_RsResource_dispsfAddAdditionalDependants(struct RsClient *pClient, struct DispSfUser *pResource, RsResourceRef *pReference); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispSfUser = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispSfUser), + /*classId=*/ classId(DispSfUser), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispSfUser", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispSfUser, + /*pCastInfo=*/ &__nvoc_castinfo__DispSfUser, + /*pExportInfo=*/ &__nvoc_export_info__DispSfUser +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__DispSfUser __nvoc_metadata__DispSfUser = { + .rtti.pClassDef = &__nvoc_class_def_DispSfUser, // (dispsf) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispSfUser, + .rtti.offset = 0, + .metadata__GpuResource.rtti.pClassDef = &__nvoc_class_def_GpuResource, // (gpures) super + .metadata__GpuResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.rtti.offset = NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource), + .metadata__GpuResource.metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super^2 + .metadata__GpuResource.metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.rtti.offset = NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource), + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^3 + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^4 + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^3 + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + + .vtable.__dispsfGetRegBaseOffsetAndSize__ = &dispsfGetRegBaseOffsetAndSize_IMPL, // virtual override (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetRegBaseOffsetAndSize__ = &__nvoc_down_thunk_DispSfUser_gpuresGetRegBaseOffsetAndSize, // virtual + .vtable.__dispsfControl__ = &__nvoc_up_thunk_GpuResource_dispsfControl, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl__ = &gpuresControl_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_GpuResource_resControl, // virtual + .vtable.__dispsfMap__ = &__nvoc_up_thunk_GpuResource_dispsfMap, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresMap__ = &gpuresMap_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &__nvoc_down_thunk_GpuResource_resMap, // virtual + .vtable.__dispsfUnmap__ = &__nvoc_up_thunk_GpuResource_dispsfUnmap, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresUnmap__ = &gpuresUnmap_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &__nvoc_down_thunk_GpuResource_resUnmap, // virtual + .vtable.__dispsfShareCallback__ = &__nvoc_up_thunk_GpuResource_dispsfShareCallback, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresShareCallback__ = &gpuresShareCallback_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresShareCallback__ = &__nvoc_down_thunk_GpuResource_rmresShareCallback, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__dispsfGetMapAddrSpace__ = &__nvoc_up_thunk_GpuResource_dispsfGetMapAddrSpace, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMapAddrSpace__ = &gpuresGetMapAddrSpace_IMPL, // virtual + .vtable.__dispsfInternalControlForward__ = &__nvoc_up_thunk_GpuResource_dispsfInternalControlForward, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresInternalControlForward__ = &gpuresInternalControlForward_IMPL, // virtual + .vtable.__dispsfGetInternalObjectHandle__ = &__nvoc_up_thunk_GpuResource_dispsfGetInternalObjectHandle, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetInternalObjectHandle__ = &gpuresGetInternalObjectHandle_IMPL, // virtual + .vtable.__dispsfAccessCallback__ = &__nvoc_up_thunk_RmResource_dispsfAccessCallback, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresAccessCallback__ = &__nvoc_up_thunk_RmResource_gpuresAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__dispsfGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_dispsfGetMemInterMapParams, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__dispsfCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_dispsfCheckMemInterUnmap, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__dispsfGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_dispsfGetMemoryMappingDescriptor, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__dispsfControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_dispsfControlSerialization_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__dispsfControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_dispsfControlSerialization_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__dispsfControl_Prologue__ = &__nvoc_up_thunk_RmResource_dispsfControl_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__dispsfControl_Epilogue__ = &__nvoc_up_thunk_RmResource_dispsfControl_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__dispsfCanCopy__ = &__nvoc_up_thunk_RsResource_dispsfCanCopy, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresCanCopy__ = &__nvoc_up_thunk_RsResource_gpuresCanCopy, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__dispsfIsDuplicate__ = &__nvoc_up_thunk_RsResource_dispsfIsDuplicate, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresIsDuplicate__ = &__nvoc_up_thunk_RsResource_gpuresIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__dispsfPreDestruct__ = &__nvoc_up_thunk_RsResource_dispsfPreDestruct, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresPreDestruct__ = &__nvoc_up_thunk_RsResource_gpuresPreDestruct, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__dispsfControlFilter__ = &__nvoc_up_thunk_RsResource_dispsfControlFilter, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlFilter__ = &__nvoc_up_thunk_RsResource_gpuresControlFilter, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__dispsfIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_dispsfIsPartialUnmapSupported, // inline virtual inherited (res) base (gpures) body + .metadata__GpuResource.vtable.__gpuresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__GpuResource.metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__dispsfMapTo__ = &__nvoc_up_thunk_RsResource_dispsfMapTo, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresMapTo__ = &__nvoc_up_thunk_RsResource_gpuresMapTo, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__dispsfUnmapFrom__ = &__nvoc_up_thunk_RsResource_dispsfUnmapFrom, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresUnmapFrom__ = &__nvoc_up_thunk_RsResource_gpuresUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__dispsfGetRefCount__ = &__nvoc_up_thunk_RsResource_dispsfGetRefCount, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetRefCount__ = &__nvoc_up_thunk_RsResource_gpuresGetRefCount, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__dispsfAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_dispsfAddAdditionalDependants, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__DispSfUser = { + .numRelatives = 6, + .relatives = { + &__nvoc_metadata__DispSfUser.rtti, // [0]: (dispsf) this + &__nvoc_metadata__DispSfUser.metadata__GpuResource.rtti, // [1]: (gpures) super + &__nvoc_metadata__DispSfUser.metadata__GpuResource.metadata__RmResource.rtti, // [2]: (rmres) super^2 + &__nvoc_metadata__DispSfUser.metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti, // [3]: (res) super^3 + &__nvoc_metadata__DispSfUser.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [4]: (obj) super^4 + &__nvoc_metadata__DispSfUser.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti, // [5]: (rmrescmn) super^3 + } +}; + +// 1 down-thunk(s) defined to bridge methods in DispSfUser from superclasses + +// dispsfGetRegBaseOffsetAndSize: virtual override (gpures) base (gpures) +NV_STATUS __nvoc_down_thunk_DispSfUser_gpuresGetRegBaseOffsetAndSize(struct GpuResource *pDispSfUser, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return dispsfGetRegBaseOffsetAndSize((struct DispSfUser *)(((unsigned char *) pDispSfUser) - NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource)), pGpu, pOffset, pSize); +} + + +// 24 up-thunk(s) defined to bridge methods in DispSfUser to superclasses + +// dispsfControl: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_dispsfControl(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource)), pCallContext, pParams); +} + +// dispsfMap: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_dispsfMap(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource)), pCallContext, pParams, pCpuMapping); +} + +// dispsfUnmap: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_dispsfUnmap(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource)), pCallContext, pCpuMapping); +} + +// dispsfShareCallback: virtual inherited (gpures) base (gpures) +NvBool __nvoc_up_thunk_GpuResource_dispsfShareCallback(struct DispSfUser *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// dispsfGetMapAddrSpace: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_dispsfGetMapAddrSpace(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource)), pCallContext, mapFlags, pAddrSpace); +} + +// dispsfInternalControlForward: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_dispsfInternalControlForward(struct DispSfUser *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource)), command, pParams, size); +} + +// dispsfGetInternalObjectHandle: virtual inherited (gpures) base (gpures) +NvHandle __nvoc_up_thunk_GpuResource_dispsfGetInternalObjectHandle(struct DispSfUser *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource))); +} + +// dispsfAccessCallback: virtual inherited (rmres) base (gpures) +NvBool __nvoc_up_thunk_RmResource_dispsfAccessCallback(struct DispSfUser *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// dispsfGetMemInterMapParams: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_dispsfGetMemInterMapParams(struct DispSfUser *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pParams); +} + +// dispsfCheckMemInterUnmap: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_dispsfCheckMemInterUnmap(struct DispSfUser *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// dispsfGetMemoryMappingDescriptor: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_dispsfGetMemoryMappingDescriptor(struct DispSfUser *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource)), ppMemDesc); +} + +// dispsfControlSerialization_Prologue: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_dispsfControlSerialization_Prologue(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispsfControlSerialization_Epilogue: virtual inherited (rmres) base (gpures) +void __nvoc_up_thunk_RmResource_dispsfControlSerialization_Epilogue(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispsfControl_Prologue: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_dispsfControl_Prologue(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispsfControl_Epilogue: virtual inherited (rmres) base (gpures) +void __nvoc_up_thunk_RmResource_dispsfControl_Epilogue(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// dispsfCanCopy: virtual inherited (res) base (gpures) +NvBool __nvoc_up_thunk_RsResource_dispsfCanCopy(struct DispSfUser *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispsfIsDuplicate: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_dispsfIsDuplicate(struct DispSfUser *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// dispsfPreDestruct: virtual inherited (res) base (gpures) +void __nvoc_up_thunk_RsResource_dispsfPreDestruct(struct DispSfUser *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispsfControlFilter: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_dispsfControlFilter(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// dispsfIsPartialUnmapSupported: inline virtual inherited (res) base (gpures) body +NvBool __nvoc_up_thunk_RsResource_dispsfIsPartialUnmapSupported(struct DispSfUser *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispsfMapTo: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_dispsfMapTo(struct DispSfUser *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// dispsfUnmapFrom: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_dispsfUnmapFrom(struct DispSfUser *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// dispsfGetRefCount: virtual inherited (res) base (gpures) +NvU32 __nvoc_up_thunk_RsResource_dispsfGetRefCount(struct DispSfUser *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// dispsfAddAdditionalDependants: virtual inherited (res) base (gpures) +void __nvoc_up_thunk_RsResource_dispsfAddAdditionalDependants(struct RsClient *pClient, struct DispSfUser *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__DispSfUser = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_DispSfUser(DispSfUser *pThis) { + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispSfUser(DispSfUser *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_DispSfUser(DispSfUser *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispSfUser_fail_GpuResource; + __nvoc_init_dataField_DispSfUser(pThis); + + status = __nvoc_dispsfConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispSfUser_fail__init; + goto __nvoc_ctor_DispSfUser_exit; // Success + +__nvoc_ctor_DispSfUser_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_DispSfUser_fail_GpuResource: +__nvoc_ctor_DispSfUser_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_DispSfUser_1(DispSfUser *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_DispSfUser_1 + + +// Initialize vtable(s) for 25 virtual method(s). +void __nvoc_init_funcTable_DispSfUser(DispSfUser *pThis) { + __nvoc_init_funcTable_DispSfUser_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__DispSfUser(DispSfUser *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^4 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^3 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; // (rmres) super^2 + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; // (gpures) super + pThis->__nvoc_pbase_DispSfUser = pThis; // (dispsf) this + + // Recurse to superclass initialization function(s). + __nvoc_init__GpuResource(&pThis->__nvoc_base_GpuResource); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__DispSfUser.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^4 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__DispSfUser.metadata__GpuResource.metadata__RmResource.metadata__RsResource; // (res) super^3 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__DispSfUser.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__DispSfUser.metadata__GpuResource.metadata__RmResource; // (rmres) super^2 + pThis->__nvoc_base_GpuResource.__nvoc_metadata_ptr = &__nvoc_metadata__DispSfUser.metadata__GpuResource; // (gpures) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__DispSfUser; // (dispsf) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_DispSfUser(pThis); +} + +NV_STATUS __nvoc_objCreate_DispSfUser(DispSfUser **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + DispSfUser *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(DispSfUser), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(DispSfUser)); + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__DispSfUser(pThis); + status = __nvoc_ctor_DispSfUser(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DispSfUser_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_DispSfUser_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(DispSfUser)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispSfUser(DispSfUser **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DispSfUser(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_disp_sf_user_nvoc.h b/src/nvidia/generated/g_disp_sf_user_nvoc.h new file mode 100644 index 0000000..ee70e6a --- /dev/null +++ b/src/nvidia/generated/g_disp_sf_user_nvoc.h @@ -0,0 +1,333 @@ + +#ifndef _G_DISP_SF_USER_NVOC_H_ +#define _G_DISP_SF_USER_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing DispSfUser class. +* +******************************************************************************/ + +#pragma once +#include "g_disp_sf_user_nvoc.h" + +#ifndef DISP_SF_USER_H +#define DISP_SF_USER_H + +#include "gpu/gpu_resource.h" + +/*! + * RM internal class representing NVXXXX_DISP_SF_USER + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_DISP_SF_USER_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__DispSfUser; +struct NVOC_METADATA__GpuResource; +struct NVOC_VTABLE__DispSfUser; + + +struct DispSfUser { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__DispSfUser *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct GpuResource __nvoc_base_GpuResource; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^4 + struct RsResource *__nvoc_pbase_RsResource; // res super^3 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^3 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^2 + struct GpuResource *__nvoc_pbase_GpuResource; // gpures super + struct DispSfUser *__nvoc_pbase_DispSfUser; // dispsf + + // Data members + NvU32 ControlOffset; + NvU32 ControlLength; +}; + + +// Vtable with 25 per-class function pointers +struct NVOC_VTABLE__DispSfUser { + NV_STATUS (*__dispsfGetRegBaseOffsetAndSize__)(struct DispSfUser * /*this*/, struct OBJGPU *, NvU32 *, NvU32 *); // virtual override (gpures) base (gpures) + NV_STATUS (*__dispsfControl__)(struct DispSfUser * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__dispsfMap__)(struct DispSfUser * /*this*/, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__dispsfUnmap__)(struct DispSfUser * /*this*/, struct CALL_CONTEXT *, struct RsCpuMapping *); // virtual inherited (gpures) base (gpures) + NvBool (*__dispsfShareCallback__)(struct DispSfUser * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__dispsfGetMapAddrSpace__)(struct DispSfUser * /*this*/, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__dispsfInternalControlForward__)(struct DispSfUser * /*this*/, NvU32, void *, NvU32); // virtual inherited (gpures) base (gpures) + NvHandle (*__dispsfGetInternalObjectHandle__)(struct DispSfUser * /*this*/); // virtual inherited (gpures) base (gpures) + NvBool (*__dispsfAccessCallback__)(struct DispSfUser * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__dispsfGetMemInterMapParams__)(struct DispSfUser * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__dispsfCheckMemInterUnmap__)(struct DispSfUser * /*this*/, NvBool); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__dispsfGetMemoryMappingDescriptor__)(struct DispSfUser * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__dispsfControlSerialization_Prologue__)(struct DispSfUser * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + void (*__dispsfControlSerialization_Epilogue__)(struct DispSfUser * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__dispsfControl_Prologue__)(struct DispSfUser * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + void (*__dispsfControl_Epilogue__)(struct DispSfUser * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + NvBool (*__dispsfCanCopy__)(struct DispSfUser * /*this*/); // virtual inherited (res) base (gpures) + NV_STATUS (*__dispsfIsDuplicate__)(struct DispSfUser * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (gpures) + void (*__dispsfPreDestruct__)(struct DispSfUser * /*this*/); // virtual inherited (res) base (gpures) + NV_STATUS (*__dispsfControlFilter__)(struct DispSfUser * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (gpures) + NvBool (*__dispsfIsPartialUnmapSupported__)(struct DispSfUser * /*this*/); // inline virtual inherited (res) base (gpures) body + NV_STATUS (*__dispsfMapTo__)(struct DispSfUser * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (gpures) + NV_STATUS (*__dispsfUnmapFrom__)(struct DispSfUser * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (gpures) + NvU32 (*__dispsfGetRefCount__)(struct DispSfUser * /*this*/); // virtual inherited (res) base (gpures) + void (*__dispsfAddAdditionalDependants__)(struct RsClient *, struct DispSfUser * /*this*/, RsResourceRef *); // virtual inherited (res) base (gpures) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__DispSfUser { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__GpuResource metadata__GpuResource; + const struct NVOC_VTABLE__DispSfUser vtable; +}; + +#ifndef __NVOC_CLASS_DispSfUser_TYPEDEF__ +#define __NVOC_CLASS_DispSfUser_TYPEDEF__ +typedef struct DispSfUser DispSfUser; +#endif /* __NVOC_CLASS_DispSfUser_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispSfUser +#define __nvoc_class_id_DispSfUser 0xba7439 +#endif /* __nvoc_class_id_DispSfUser */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispSfUser; + +#define __staticCast_DispSfUser(pThis) \ + ((pThis)->__nvoc_pbase_DispSfUser) + +#ifdef __nvoc_disp_sf_user_h_disabled +#define __dynamicCast_DispSfUser(pThis) ((DispSfUser*) NULL) +#else //__nvoc_disp_sf_user_h_disabled +#define __dynamicCast_DispSfUser(pThis) \ + ((DispSfUser*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispSfUser))) +#endif //__nvoc_disp_sf_user_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_DispSfUser(DispSfUser**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispSfUser(DispSfUser**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_DispSfUser(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DispSfUser((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define dispsfGetRegBaseOffsetAndSize_FNPTR(pDispSfUser) pDispSfUser->__nvoc_metadata_ptr->vtable.__dispsfGetRegBaseOffsetAndSize__ +#define dispsfGetRegBaseOffsetAndSize(pDispSfUser, pGpu, pOffset, pSize) dispsfGetRegBaseOffsetAndSize_DISPATCH(pDispSfUser, pGpu, pOffset, pSize) +#define dispsfControl_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresControl__ +#define dispsfControl(pGpuResource, pCallContext, pParams) dispsfControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define dispsfMap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresMap__ +#define dispsfMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispsfMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define dispsfUnmap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresUnmap__ +#define dispsfUnmap(pGpuResource, pCallContext, pCpuMapping) dispsfUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define dispsfShareCallback_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresShareCallback__ +#define dispsfShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispsfShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispsfGetMapAddrSpace_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetMapAddrSpace__ +#define dispsfGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispsfGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define dispsfInternalControlForward_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresInternalControlForward__ +#define dispsfInternalControlForward(pGpuResource, command, pParams, size) dispsfInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define dispsfGetInternalObjectHandle_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetInternalObjectHandle__ +#define dispsfGetInternalObjectHandle(pGpuResource) dispsfGetInternalObjectHandle_DISPATCH(pGpuResource) +#define dispsfAccessCallback_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define dispsfAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispsfAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define dispsfGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define dispsfGetMemInterMapParams(pRmResource, pParams) dispsfGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispsfCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define dispsfCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispsfCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispsfGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define dispsfGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispsfGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispsfControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define dispsfControlSerialization_Prologue(pResource, pCallContext, pParams) dispsfControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispsfControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define dispsfControlSerialization_Epilogue(pResource, pCallContext, pParams) dispsfControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispsfControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define dispsfControl_Prologue(pResource, pCallContext, pParams) dispsfControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispsfControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define dispsfControl_Epilogue(pResource, pCallContext, pParams) dispsfControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispsfCanCopy_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define dispsfCanCopy(pResource) dispsfCanCopy_DISPATCH(pResource) +#define dispsfIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define dispsfIsDuplicate(pResource, hMemory, pDuplicate) dispsfIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define dispsfPreDestruct_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define dispsfPreDestruct(pResource) dispsfPreDestruct_DISPATCH(pResource) +#define dispsfControlFilter_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define dispsfControlFilter(pResource, pCallContext, pParams) dispsfControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispsfIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define dispsfIsPartialUnmapSupported(pResource) dispsfIsPartialUnmapSupported_DISPATCH(pResource) +#define dispsfMapTo_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define dispsfMapTo(pResource, pParams) dispsfMapTo_DISPATCH(pResource, pParams) +#define dispsfUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define dispsfUnmapFrom(pResource, pParams) dispsfUnmapFrom_DISPATCH(pResource, pParams) +#define dispsfGetRefCount_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define dispsfGetRefCount(pResource) dispsfGetRefCount_DISPATCH(pResource) +#define dispsfAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define dispsfAddAdditionalDependants(pClient, pResource, pReference) dispsfAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) + +// Dispatch functions +static inline NV_STATUS dispsfGetRegBaseOffsetAndSize_DISPATCH(struct DispSfUser *pDispSfUser, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pDispSfUser->__nvoc_metadata_ptr->vtable.__dispsfGetRegBaseOffsetAndSize__(pDispSfUser, pGpu, pOffset, pSize); +} + +static inline NV_STATUS dispsfControl_DISPATCH(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispsfControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS dispsfMap_DISPATCH(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispsfMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS dispsfUnmap_DISPATCH(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispsfUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NvBool dispsfShareCallback_DISPATCH(struct DispSfUser *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispsfShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispsfGetMapAddrSpace_DISPATCH(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispsfGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NV_STATUS dispsfInternalControlForward_DISPATCH(struct DispSfUser *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispsfInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NvHandle dispsfGetInternalObjectHandle_DISPATCH(struct DispSfUser *pGpuResource) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__dispsfGetInternalObjectHandle__(pGpuResource); +} + +static inline NvBool dispsfAccessCallback_DISPATCH(struct DispSfUser *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__dispsfAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS dispsfGetMemInterMapParams_DISPATCH(struct DispSfUser *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispsfGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispsfCheckMemInterUnmap_DISPATCH(struct DispSfUser *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispsfCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS dispsfGetMemoryMappingDescriptor_DISPATCH(struct DispSfUser *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__dispsfGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispsfControlSerialization_Prologue_DISPATCH(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispsfControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void dispsfControlSerialization_Epilogue_DISPATCH(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__dispsfControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispsfControl_Prologue_DISPATCH(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispsfControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void dispsfControl_Epilogue_DISPATCH(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__dispsfControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool dispsfCanCopy_DISPATCH(struct DispSfUser *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispsfCanCopy__(pResource); +} + +static inline NV_STATUS dispsfIsDuplicate_DISPATCH(struct DispSfUser *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__dispsfIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void dispsfPreDestruct_DISPATCH(struct DispSfUser *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__dispsfPreDestruct__(pResource); +} + +static inline NV_STATUS dispsfControlFilter_DISPATCH(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispsfControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvBool dispsfIsPartialUnmapSupported_DISPATCH(struct DispSfUser *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispsfIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS dispsfMapTo_DISPATCH(struct DispSfUser *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispsfMapTo__(pResource, pParams); +} + +static inline NV_STATUS dispsfUnmapFrom_DISPATCH(struct DispSfUser *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__dispsfUnmapFrom__(pResource, pParams); +} + +static inline NvU32 dispsfGetRefCount_DISPATCH(struct DispSfUser *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__dispsfGetRefCount__(pResource); +} + +static inline void dispsfAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispSfUser *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__dispsfAddAdditionalDependants__(pClient, pResource, pReference); +} + +NV_STATUS dispsfGetRegBaseOffsetAndSize_IMPL(struct DispSfUser *pDispSfUser, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); + +NV_STATUS dispsfConstruct_IMPL(struct DispSfUser *arg_pDispSfUser, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_dispsfConstruct(arg_pDispSfUser, arg_pCallContext, arg_pParams) dispsfConstruct_IMPL(arg_pDispSfUser, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif // DISP_SF_USER_H + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_DISP_SF_USER_NVOC_H_ diff --git a/src/nvidia/generated/g_eng_desc_nvoc.h b/src/nvidia/generated/g_eng_desc_nvoc.h new file mode 100644 index 0000000..025e92e --- /dev/null +++ b/src/nvidia/generated/g_eng_desc_nvoc.h @@ -0,0 +1,1792 @@ + +#ifndef _G_ENG_DESC_NVOC_H_ +#define _G_ENG_DESC_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_eng_desc_nvoc.h" + +#ifndef _ENG_DESC_H_ +#define _ENG_DESC_H_ + + +#include "core/core.h" + +// +// Engine descriptors +// +// An ENGDESCRIPTOR carries both an NVOC_CLASS_ID and an instance ID. For example, +// to specify the engine CE1, use MKENGDESC(classId(OBJCE), 1). +// +#define ENGDESC_CLASS 31:8 +#define ENGDESC_INST 7:0 + +#define MKENGDESC(class, inst) ((((NvU32)(class)) << SF_SHIFT(ENGDESC_CLASS)) | \ + ((inst ) << SF_SHIFT(ENGDESC_INST ))) + +#define ENGDESC_FIELD(desc, field) (((desc) >> SF_SHIFT(ENGDESC ## field)) & \ + SF_MASK(ENGDESC ## field)) + +typedef NvU32 ENGDESCRIPTOR; + + +// +// Class declarations to get classIds for use with ENGDESCRIPTOR +// + +struct OBJINVALID; + +#ifndef __NVOC_CLASS_OBJINVALID_TYPEDEF__ +#define __NVOC_CLASS_OBJINVALID_TYPEDEF__ +typedef struct OBJINVALID OBJINVALID; +#endif /* __NVOC_CLASS_OBJINVALID_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJINVALID +#define __nvoc_class_id_OBJINVALID 0xb33b15 +#endif /* __nvoc_class_id_OBJINVALID */ + + // classId only. Not a real class + +struct OBJSWENG; + +#ifndef __NVOC_CLASS_OBJSWENG_TYPEDEF__ +#define __NVOC_CLASS_OBJSWENG_TYPEDEF__ +typedef struct OBJSWENG OBJSWENG; +#endif /* __NVOC_CLASS_OBJSWENG_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJSWENG +#define __nvoc_class_id_OBJSWENG 0x95a6f5 +#endif /* __nvoc_class_id_OBJSWENG */ + + + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + + +struct Falcon; + +#ifndef __NVOC_CLASS_Falcon_TYPEDEF__ +#define __NVOC_CLASS_Falcon_TYPEDEF__ +typedef struct Falcon Falcon; +#endif /* __NVOC_CLASS_Falcon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Falcon +#define __nvoc_class_id_Falcon 0xdc5264 +#endif /* __nvoc_class_id_Falcon */ + + + +struct OBJMC; + +#ifndef __NVOC_CLASS_OBJMC_TYPEDEF__ +#define __NVOC_CLASS_OBJMC_TYPEDEF__ +typedef struct OBJMC OBJMC; +#endif /* __NVOC_CLASS_OBJMC_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJMC +#define __nvoc_class_id_OBJMC 0x9aad0e +#endif /* __nvoc_class_id_OBJMC */ + + + +struct KernelMc; + +#ifndef __NVOC_CLASS_KernelMc_TYPEDEF__ +#define __NVOC_CLASS_KernelMc_TYPEDEF__ +typedef struct KernelMc KernelMc; +#endif /* __NVOC_CLASS_KernelMc_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelMc +#define __nvoc_class_id_KernelMc 0x3827ff +#endif /* __nvoc_class_id_KernelMc */ + + + +struct PrivRing; + +#ifndef __NVOC_CLASS_PrivRing_TYPEDEF__ +#define __NVOC_CLASS_PrivRing_TYPEDEF__ +typedef struct PrivRing PrivRing; +#endif /* __NVOC_CLASS_PrivRing_TYPEDEF__ */ + +#ifndef __nvoc_class_id_PrivRing +#define __nvoc_class_id_PrivRing 0x4c57c4 +#endif /* __nvoc_class_id_PrivRing */ + + + +struct SwIntr; + +#ifndef __NVOC_CLASS_SwIntr_TYPEDEF__ +#define __NVOC_CLASS_SwIntr_TYPEDEF__ +typedef struct SwIntr SwIntr; +#endif /* __NVOC_CLASS_SwIntr_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SwIntr +#define __nvoc_class_id_SwIntr 0x5ca633 +#endif /* __nvoc_class_id_SwIntr */ + + + +struct MemorySystem; + +#ifndef __NVOC_CLASS_MemorySystem_TYPEDEF__ +#define __NVOC_CLASS_MemorySystem_TYPEDEF__ +typedef struct MemorySystem MemorySystem; +#endif /* __NVOC_CLASS_MemorySystem_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MemorySystem +#define __nvoc_class_id_MemorySystem 0x174e21 +#endif /* __nvoc_class_id_MemorySystem */ + + + +struct KernelMemorySystem; + +#ifndef __NVOC_CLASS_KernelMemorySystem_TYPEDEF__ +#define __NVOC_CLASS_KernelMemorySystem_TYPEDEF__ +typedef struct KernelMemorySystem KernelMemorySystem; +#endif /* __NVOC_CLASS_KernelMemorySystem_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelMemorySystem +#define __nvoc_class_id_KernelMemorySystem 0x7faff1 +#endif /* __nvoc_class_id_KernelMemorySystem */ + + + +struct MemoryManager; + +#ifndef __NVOC_CLASS_MemoryManager_TYPEDEF__ +#define __NVOC_CLASS_MemoryManager_TYPEDEF__ +typedef struct MemoryManager MemoryManager; +#endif /* __NVOC_CLASS_MemoryManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MemoryManager +#define __nvoc_class_id_MemoryManager 0x22ad47 +#endif /* __nvoc_class_id_MemoryManager */ + + + +struct OBJFBFLCN; + +#ifndef __NVOC_CLASS_OBJFBFLCN_TYPEDEF__ +#define __NVOC_CLASS_OBJFBFLCN_TYPEDEF__ +typedef struct OBJFBFLCN OBJFBFLCN; +#endif /* __NVOC_CLASS_OBJFBFLCN_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJFBFLCN +#define __nvoc_class_id_OBJFBFLCN 0x8a20bf +#endif /* __nvoc_class_id_OBJFBFLCN */ + + + +struct OBJHSHUBMANAGER; + +#ifndef __NVOC_CLASS_OBJHSHUBMANAGER_TYPEDEF__ +#define __NVOC_CLASS_OBJHSHUBMANAGER_TYPEDEF__ +typedef struct OBJHSHUBMANAGER OBJHSHUBMANAGER; +#endif /* __NVOC_CLASS_OBJHSHUBMANAGER_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHSHUBMANAGER +#define __nvoc_class_id_OBJHSHUBMANAGER 0xbb32b9 +#endif /* __nvoc_class_id_OBJHSHUBMANAGER */ + + + +struct Hshub; + +#ifndef __NVOC_CLASS_Hshub_TYPEDEF__ +#define __NVOC_CLASS_Hshub_TYPEDEF__ +typedef struct Hshub Hshub; +#endif /* __NVOC_CLASS_Hshub_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Hshub +#define __nvoc_class_id_Hshub 0x5b3331 +#endif /* __nvoc_class_id_Hshub */ + + + +struct OBJTMR; + +#ifndef __NVOC_CLASS_OBJTMR_TYPEDEF__ +#define __NVOC_CLASS_OBJTMR_TYPEDEF__ +typedef struct OBJTMR OBJTMR; +#endif /* __NVOC_CLASS_OBJTMR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJTMR +#define __nvoc_class_id_OBJTMR 0x9ddede +#endif /* __nvoc_class_id_OBJTMR */ + + + +struct VirtMemAllocator; + +#ifndef __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ +#define __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ +typedef struct VirtMemAllocator VirtMemAllocator; +#endif /* __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VirtMemAllocator +#define __nvoc_class_id_VirtMemAllocator 0x899e48 +#endif /* __nvoc_class_id_VirtMemAllocator */ + + + +struct Graphics; + +#ifndef __NVOC_CLASS_Graphics_TYPEDEF__ +#define __NVOC_CLASS_Graphics_TYPEDEF__ +typedef struct Graphics Graphics; +#endif /* __NVOC_CLASS_Graphics_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Graphics +#define __nvoc_class_id_Graphics 0xd334df +#endif /* __nvoc_class_id_Graphics */ + + + +struct OBJGR; + +#ifndef __NVOC_CLASS_OBJGR_TYPEDEF__ +#define __NVOC_CLASS_OBJGR_TYPEDEF__ +typedef struct OBJGR OBJGR; +#endif /* __NVOC_CLASS_OBJGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGR +#define __nvoc_class_id_OBJGR 0xb0940a +#endif /* __nvoc_class_id_OBJGR */ + + // classId only. Not a real class. Bug 200664045 + +struct GraphicsManager; + +#ifndef __NVOC_CLASS_GraphicsManager_TYPEDEF__ +#define __NVOC_CLASS_GraphicsManager_TYPEDEF__ +typedef struct GraphicsManager GraphicsManager; +#endif /* __NVOC_CLASS_GraphicsManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GraphicsManager +#define __nvoc_class_id_GraphicsManager 0x2f465a +#endif /* __nvoc_class_id_GraphicsManager */ + + + +struct KernelGraphicsManager; + +#ifndef __NVOC_CLASS_KernelGraphicsManager_TYPEDEF__ +#define __NVOC_CLASS_KernelGraphicsManager_TYPEDEF__ +typedef struct KernelGraphicsManager KernelGraphicsManager; +#endif /* __NVOC_CLASS_KernelGraphicsManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGraphicsManager +#define __nvoc_class_id_KernelGraphicsManager 0xd22179 +#endif /* __nvoc_class_id_KernelGraphicsManager */ + + + +struct MIGManager; + +#ifndef __NVOC_CLASS_MIGManager_TYPEDEF__ +#define __NVOC_CLASS_MIGManager_TYPEDEF__ +typedef struct MIGManager MIGManager; +#endif /* __NVOC_CLASS_MIGManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MIGManager +#define __nvoc_class_id_MIGManager 0xfd75d0 +#endif /* __nvoc_class_id_MIGManager */ + + + +struct KernelMIGManager; + +#ifndef __NVOC_CLASS_KernelMIGManager_TYPEDEF__ +#define __NVOC_CLASS_KernelMIGManager_TYPEDEF__ +typedef struct KernelMIGManager KernelMIGManager; +#endif /* __NVOC_CLASS_KernelMIGManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelMIGManager +#define __nvoc_class_id_KernelMIGManager 0x01c1bf +#endif /* __nvoc_class_id_KernelMIGManager */ + + + +struct SMDebugger; + +#ifndef __NVOC_CLASS_SMDebugger_TYPEDEF__ +#define __NVOC_CLASS_SMDebugger_TYPEDEF__ +typedef struct SMDebugger SMDebugger; +#endif /* __NVOC_CLASS_SMDebugger_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SMDebugger +#define __nvoc_class_id_SMDebugger 0x12018b +#endif /* __nvoc_class_id_SMDebugger */ + + + +struct KernelGraphics; + +#ifndef __NVOC_CLASS_KernelGraphics_TYPEDEF__ +#define __NVOC_CLASS_KernelGraphics_TYPEDEF__ +typedef struct KernelGraphics KernelGraphics; +#endif /* __NVOC_CLASS_KernelGraphics_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGraphics +#define __nvoc_class_id_KernelGraphics 0xea3fa9 +#endif /* __nvoc_class_id_KernelGraphics */ + + + +struct KernelFifo; + +#ifndef __NVOC_CLASS_KernelFifo_TYPEDEF__ +#define __NVOC_CLASS_KernelFifo_TYPEDEF__ +typedef struct KernelFifo KernelFifo; +#endif /* __NVOC_CLASS_KernelFifo_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelFifo +#define __nvoc_class_id_KernelFifo 0xf3e155 +#endif /* __nvoc_class_id_KernelFifo */ + + + +struct OBJFIFO; + +#ifndef __NVOC_CLASS_OBJFIFO_TYPEDEF__ +#define __NVOC_CLASS_OBJFIFO_TYPEDEF__ +typedef struct OBJFIFO OBJFIFO; +#endif /* __NVOC_CLASS_OBJFIFO_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJFIFO +#define __nvoc_class_id_OBJFIFO 0xb02365 +#endif /* __nvoc_class_id_OBJFIFO */ + + + +struct OBJOS; + +#ifndef __NVOC_CLASS_OBJOS_TYPEDEF__ +#define __NVOC_CLASS_OBJOS_TYPEDEF__ +typedef struct OBJOS OBJOS; +#endif /* __NVOC_CLASS_OBJOS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJOS +#define __nvoc_class_id_OBJOS 0xaa1d70 +#endif /* __nvoc_class_id_OBJOS */ + + + +struct OBJBUS; + +#ifndef __NVOC_CLASS_OBJBUS_TYPEDEF__ +#define __NVOC_CLASS_OBJBUS_TYPEDEF__ +typedef struct OBJBUS OBJBUS; +#endif /* __NVOC_CLASS_OBJBUS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJBUS +#define __nvoc_class_id_OBJBUS 0xcc4c31 +#endif /* __nvoc_class_id_OBJBUS */ + + + +struct KernelBus; + +#ifndef __NVOC_CLASS_KernelBus_TYPEDEF__ +#define __NVOC_CLASS_KernelBus_TYPEDEF__ +typedef struct KernelBus KernelBus; +#endif /* __NVOC_CLASS_KernelBus_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelBus +#define __nvoc_class_id_KernelBus 0xd2ac57 +#endif /* __nvoc_class_id_KernelBus */ + + + +struct OBJINFOROM; + +#ifndef __NVOC_CLASS_OBJINFOROM_TYPEDEF__ +#define __NVOC_CLASS_OBJINFOROM_TYPEDEF__ +typedef struct OBJINFOROM OBJINFOROM; +#endif /* __NVOC_CLASS_OBJINFOROM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJINFOROM +#define __nvoc_class_id_OBJINFOROM 0x0e1639 +#endif /* __nvoc_class_id_OBJINFOROM */ + + + +struct Perf; + +#ifndef __NVOC_CLASS_Perf_TYPEDEF__ +#define __NVOC_CLASS_Perf_TYPEDEF__ +typedef struct Perf Perf; +#endif /* __NVOC_CLASS_Perf_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Perf +#define __nvoc_class_id_Perf 0xed0b65 +#endif /* __nvoc_class_id_Perf */ + + + +struct KernelPerf; + +#ifndef __NVOC_CLASS_KernelPerf_TYPEDEF__ +#define __NVOC_CLASS_KernelPerf_TYPEDEF__ +typedef struct KernelPerf KernelPerf; +#endif /* __NVOC_CLASS_KernelPerf_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelPerf +#define __nvoc_class_id_KernelPerf 0xc53a57 +#endif /* __nvoc_class_id_KernelPerf */ + + + +struct Pxuc; + +#ifndef __NVOC_CLASS_Pxuc_TYPEDEF__ +#define __NVOC_CLASS_Pxuc_TYPEDEF__ +typedef struct Pxuc Pxuc; +#endif /* __NVOC_CLASS_Pxuc_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Pxuc +#define __nvoc_class_id_Pxuc 0xba06f5 +#endif /* __nvoc_class_id_Pxuc */ + + + +struct OBJBIF; + +#ifndef __NVOC_CLASS_OBJBIF_TYPEDEF__ +#define __NVOC_CLASS_OBJBIF_TYPEDEF__ +typedef struct OBJBIF OBJBIF; +#endif /* __NVOC_CLASS_OBJBIF_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJBIF +#define __nvoc_class_id_OBJBIF 0xd1c956 +#endif /* __nvoc_class_id_OBJBIF */ + + + +struct KernelBif; + +#ifndef __NVOC_CLASS_KernelBif_TYPEDEF__ +#define __NVOC_CLASS_KernelBif_TYPEDEF__ +typedef struct KernelBif KernelBif; +#endif /* __NVOC_CLASS_KernelBif_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelBif +#define __nvoc_class_id_KernelBif 0xdbe523 +#endif /* __nvoc_class_id_KernelBif */ + + + +struct OBJSF; + +#ifndef __NVOC_CLASS_OBJSF_TYPEDEF__ +#define __NVOC_CLASS_OBJSF_TYPEDEF__ +typedef struct OBJSF OBJSF; +#endif /* __NVOC_CLASS_OBJSF_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJSF +#define __nvoc_class_id_OBJSF 0x0bd720 +#endif /* __nvoc_class_id_OBJSF */ + + + +struct OBJGPIO; + +#ifndef __NVOC_CLASS_OBJGPIO_TYPEDEF__ +#define __NVOC_CLASS_OBJGPIO_TYPEDEF__ +typedef struct OBJGPIO OBJGPIO; +#endif /* __NVOC_CLASS_OBJGPIO_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPIO +#define __nvoc_class_id_OBJGPIO 0x05c7b5 +#endif /* __nvoc_class_id_OBJGPIO */ + + + +struct ClockManager; + +#ifndef __NVOC_CLASS_ClockManager_TYPEDEF__ +#define __NVOC_CLASS_ClockManager_TYPEDEF__ +typedef struct ClockManager ClockManager; +#endif /* __NVOC_CLASS_ClockManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ClockManager +#define __nvoc_class_id_ClockManager 0xbcadd3 +#endif /* __nvoc_class_id_ClockManager */ + + + +struct KernelDisplay; + +#ifndef __NVOC_CLASS_KernelDisplay_TYPEDEF__ +#define __NVOC_CLASS_KernelDisplay_TYPEDEF__ +typedef struct KernelDisplay KernelDisplay; +#endif /* __NVOC_CLASS_KernelDisplay_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelDisplay +#define __nvoc_class_id_KernelDisplay 0x55952e +#endif /* __nvoc_class_id_KernelDisplay */ + + + +struct OBJDISP; + +#ifndef __NVOC_CLASS_OBJDISP_TYPEDEF__ +#define __NVOC_CLASS_OBJDISP_TYPEDEF__ +typedef struct OBJDISP OBJDISP; +#endif /* __NVOC_CLASS_OBJDISP_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDISP +#define __nvoc_class_id_OBJDISP 0xd1755e +#endif /* __nvoc_class_id_OBJDISP */ + + + +struct OBJDPU; + +#ifndef __NVOC_CLASS_OBJDPU_TYPEDEF__ +#define __NVOC_CLASS_OBJDPU_TYPEDEF__ +typedef struct OBJDPU OBJDPU; +#endif /* __NVOC_CLASS_OBJDPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDPU +#define __nvoc_class_id_OBJDPU 0x23486d +#endif /* __nvoc_class_id_OBJDPU */ + + + +struct OBJDIP; + +#ifndef __NVOC_CLASS_OBJDIP_TYPEDEF__ +#define __NVOC_CLASS_OBJDIP_TYPEDEF__ +typedef struct OBJDIP OBJDIP; +#endif /* __NVOC_CLASS_OBJDIP_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDIP +#define __nvoc_class_id_OBJDIP 0x1cc271 +#endif /* __nvoc_class_id_OBJDIP */ + + + +struct Fan; + +#ifndef __NVOC_CLASS_Fan_TYPEDEF__ +#define __NVOC_CLASS_Fan_TYPEDEF__ +typedef struct Fan Fan; +#endif /* __NVOC_CLASS_Fan_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Fan +#define __nvoc_class_id_Fan 0xadd018 +#endif /* __nvoc_class_id_Fan */ + + + +struct DisplayInstanceMemory; + +#ifndef __NVOC_CLASS_DisplayInstanceMemory_TYPEDEF__ +#define __NVOC_CLASS_DisplayInstanceMemory_TYPEDEF__ +typedef struct DisplayInstanceMemory DisplayInstanceMemory; +#endif /* __NVOC_CLASS_DisplayInstanceMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DisplayInstanceMemory +#define __nvoc_class_id_DisplayInstanceMemory 0x8223e2 +#endif /* __nvoc_class_id_DisplayInstanceMemory */ + + + +struct KernelHead; + +#ifndef __NVOC_CLASS_KernelHead_TYPEDEF__ +#define __NVOC_CLASS_KernelHead_TYPEDEF__ +typedef struct KernelHead KernelHead; +#endif /* __NVOC_CLASS_KernelHead_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelHead +#define __nvoc_class_id_KernelHead 0x0145e6 +#endif /* __nvoc_class_id_KernelHead */ + + + +struct OBJVOLT; + +#ifndef __NVOC_CLASS_OBJVOLT_TYPEDEF__ +#define __NVOC_CLASS_OBJVOLT_TYPEDEF__ +typedef struct OBJVOLT OBJVOLT; +#endif /* __NVOC_CLASS_OBJVOLT_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVOLT +#define __nvoc_class_id_OBJVOLT 0xa68120 +#endif /* __nvoc_class_id_OBJVOLT */ + + + +struct Intr; + +#ifndef __NVOC_CLASS_Intr_TYPEDEF__ +#define __NVOC_CLASS_Intr_TYPEDEF__ +typedef struct Intr Intr; +#endif /* __NVOC_CLASS_Intr_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Intr +#define __nvoc_class_id_Intr 0xc06e44 +#endif /* __nvoc_class_id_Intr */ + + + +struct OBJHDA; + +#ifndef __NVOC_CLASS_OBJHDA_TYPEDEF__ +#define __NVOC_CLASS_OBJHDA_TYPEDEF__ +typedef struct OBJHDA OBJHDA; +#endif /* __NVOC_CLASS_OBJHDA_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHDA +#define __nvoc_class_id_OBJHDA 0xd3bfb4 +#endif /* __nvoc_class_id_OBJHDA */ + + + +struct I2c; + +#ifndef __NVOC_CLASS_I2c_TYPEDEF__ +#define __NVOC_CLASS_I2c_TYPEDEF__ +typedef struct I2c I2c; +#endif /* __NVOC_CLASS_I2c_TYPEDEF__ */ + +#ifndef __nvoc_class_id_I2c +#define __nvoc_class_id_I2c 0x48e035 +#endif /* __nvoc_class_id_I2c */ + + + +struct KernelRc; + +#ifndef __NVOC_CLASS_KernelRc_TYPEDEF__ +#define __NVOC_CLASS_KernelRc_TYPEDEF__ +typedef struct KernelRc KernelRc; +#endif /* __NVOC_CLASS_KernelRc_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelRc +#define __nvoc_class_id_KernelRc 0x4888db +#endif /* __nvoc_class_id_KernelRc */ + + + +struct OBJRC; + +#ifndef __NVOC_CLASS_OBJRC_TYPEDEF__ +#define __NVOC_CLASS_OBJRC_TYPEDEF__ +typedef struct OBJRC OBJRC; +#endif /* __NVOC_CLASS_OBJRC_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJRC +#define __nvoc_class_id_OBJRC 0x42d150 +#endif /* __nvoc_class_id_OBJRC */ + + + +struct OBJSOR; + +#ifndef __NVOC_CLASS_OBJSOR_TYPEDEF__ +#define __NVOC_CLASS_OBJSOR_TYPEDEF__ +typedef struct OBJSOR OBJSOR; +#endif /* __NVOC_CLASS_OBJSOR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJSOR +#define __nvoc_class_id_OBJSOR 0x5ccbfa +#endif /* __nvoc_class_id_OBJSOR */ + + + +struct OBJDAC; + +#ifndef __NVOC_CLASS_OBJDAC_TYPEDEF__ +#define __NVOC_CLASS_OBJDAC_TYPEDEF__ +typedef struct OBJDAC OBJDAC; +#endif /* __NVOC_CLASS_OBJDAC_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDAC +#define __nvoc_class_id_OBJDAC 0x4b1802 +#endif /* __nvoc_class_id_OBJDAC */ + + + +struct OBJPIOR; + +#ifndef __NVOC_CLASS_OBJPIOR_TYPEDEF__ +#define __NVOC_CLASS_OBJPIOR_TYPEDEF__ +typedef struct OBJPIOR OBJPIOR; +#endif /* __NVOC_CLASS_OBJPIOR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJPIOR +#define __nvoc_class_id_OBJPIOR 0x0128a3 +#endif /* __nvoc_class_id_OBJPIOR */ + + + +struct OBJHEAD; + +#ifndef __NVOC_CLASS_OBJHEAD_TYPEDEF__ +#define __NVOC_CLASS_OBJHEAD_TYPEDEF__ +typedef struct OBJHEAD OBJHEAD; +#endif /* __NVOC_CLASS_OBJHEAD_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHEAD +#define __nvoc_class_id_OBJHEAD 0x74dd86 +#endif /* __nvoc_class_id_OBJHEAD */ + + + +struct OBJVGA; + +#ifndef __NVOC_CLASS_OBJVGA_TYPEDEF__ +#define __NVOC_CLASS_OBJVGA_TYPEDEF__ +typedef struct OBJVGA OBJVGA; +#endif /* __NVOC_CLASS_OBJVGA_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVGA +#define __nvoc_class_id_OBJVGA 0x84e0bc +#endif /* __nvoc_class_id_OBJVGA */ + + + +struct Stereo; + +#ifndef __NVOC_CLASS_Stereo_TYPEDEF__ +#define __NVOC_CLASS_Stereo_TYPEDEF__ +typedef struct Stereo Stereo; +#endif /* __NVOC_CLASS_Stereo_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Stereo +#define __nvoc_class_id_Stereo 0xbbc45d +#endif /* __nvoc_class_id_Stereo */ + + + +struct OBJOR; + +#ifndef __NVOC_CLASS_OBJOR_TYPEDEF__ +#define __NVOC_CLASS_OBJOR_TYPEDEF__ +typedef struct OBJOR OBJOR; +#endif /* __NVOC_CLASS_OBJOR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJOR +#define __nvoc_class_id_OBJOR 0x215d6b +#endif /* __nvoc_class_id_OBJOR */ + + + +struct OBJBSP; + +#ifndef __NVOC_CLASS_OBJBSP_TYPEDEF__ +#define __NVOC_CLASS_OBJBSP_TYPEDEF__ +typedef struct OBJBSP OBJBSP; +#endif /* __NVOC_CLASS_OBJBSP_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJBSP +#define __nvoc_class_id_OBJBSP 0x8f99e1 +#endif /* __nvoc_class_id_OBJBSP */ + + + +struct OBJCIPHER; + +#ifndef __NVOC_CLASS_OBJCIPHER_TYPEDEF__ +#define __NVOC_CLASS_OBJCIPHER_TYPEDEF__ +typedef struct OBJCIPHER OBJCIPHER; +#endif /* __NVOC_CLASS_OBJCIPHER_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJCIPHER +#define __nvoc_class_id_OBJCIPHER 0x8dd911 +#endif /* __nvoc_class_id_OBJCIPHER */ + + + +struct OBJFUSE; + +#ifndef __NVOC_CLASS_OBJFUSE_TYPEDEF__ +#define __NVOC_CLASS_OBJFUSE_TYPEDEF__ +typedef struct OBJFUSE OBJFUSE; +#endif /* __NVOC_CLASS_OBJFUSE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJFUSE +#define __nvoc_class_id_OBJFUSE 0x95ba71 +#endif /* __nvoc_class_id_OBJFUSE */ + + + +struct Jtag; + +#ifndef __NVOC_CLASS_Jtag_TYPEDEF__ +#define __NVOC_CLASS_Jtag_TYPEDEF__ +typedef struct Jtag Jtag; +#endif /* __NVOC_CLASS_Jtag_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Jtag +#define __nvoc_class_id_Jtag 0xd73cf9 +#endif /* __nvoc_class_id_Jtag */ + + + +struct OBJHDCP; + +#ifndef __NVOC_CLASS_OBJHDCP_TYPEDEF__ +#define __NVOC_CLASS_OBJHDCP_TYPEDEF__ +typedef struct OBJHDCP OBJHDCP; +#endif /* __NVOC_CLASS_OBJHDCP_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHDCP +#define __nvoc_class_id_OBJHDCP 0x426d44 +#endif /* __nvoc_class_id_OBJHDCP */ + + + +struct OBJHDMI; + +#ifndef __NVOC_CLASS_OBJHDMI_TYPEDEF__ +#define __NVOC_CLASS_OBJHDMI_TYPEDEF__ +typedef struct OBJHDMI OBJHDMI; +#endif /* __NVOC_CLASS_OBJHDMI_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHDMI +#define __nvoc_class_id_OBJHDMI 0x2213b6 +#endif /* __nvoc_class_id_OBJHDMI */ + + + +struct Therm; + +#ifndef __NVOC_CLASS_Therm_TYPEDEF__ +#define __NVOC_CLASS_Therm_TYPEDEF__ +typedef struct Therm Therm; +#endif /* __NVOC_CLASS_Therm_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Therm +#define __nvoc_class_id_Therm 0x6c1e56 +#endif /* __nvoc_class_id_Therm */ + + + +struct OBJSEQ; + +#ifndef __NVOC_CLASS_OBJSEQ_TYPEDEF__ +#define __NVOC_CLASS_OBJSEQ_TYPEDEF__ +typedef struct OBJSEQ OBJSEQ; +#endif /* __NVOC_CLASS_OBJSEQ_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJSEQ +#define __nvoc_class_id_OBJSEQ 0x45da4a +#endif /* __nvoc_class_id_OBJSEQ */ + + + +struct OBJDPAUX; + +#ifndef __NVOC_CLASS_OBJDPAUX_TYPEDEF__ +#define __NVOC_CLASS_OBJDPAUX_TYPEDEF__ +typedef struct OBJDPAUX OBJDPAUX; +#endif /* __NVOC_CLASS_OBJDPAUX_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDPAUX +#define __nvoc_class_id_OBJDPAUX 0xfd2ab9 +#endif /* __nvoc_class_id_OBJDPAUX */ + + + +struct Pmu; + +#ifndef __NVOC_CLASS_Pmu_TYPEDEF__ +#define __NVOC_CLASS_Pmu_TYPEDEF__ +typedef struct Pmu Pmu; +#endif /* __NVOC_CLASS_Pmu_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Pmu +#define __nvoc_class_id_Pmu 0xf3d722 +#endif /* __nvoc_class_id_Pmu */ + + + +struct KernelPmu; + +#ifndef __NVOC_CLASS_KernelPmu_TYPEDEF__ +#define __NVOC_CLASS_KernelPmu_TYPEDEF__ +typedef struct KernelPmu KernelPmu; +#endif /* __NVOC_CLASS_KernelPmu_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelPmu +#define __nvoc_class_id_KernelPmu 0xab9d7d +#endif /* __nvoc_class_id_KernelPmu */ + + +struct GCX; + +#ifndef __NVOC_CLASS_GCX_TYPEDEF__ +#define __NVOC_CLASS_GCX_TYPEDEF__ +typedef struct GCX GCX; +#endif /* __NVOC_CLASS_GCX_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GCX +#define __nvoc_class_id_GCX 0x4507c4 +#endif /* __nvoc_class_id_GCX */ + + +struct Lpwr; + +#ifndef __NVOC_CLASS_Lpwr_TYPEDEF__ +#define __NVOC_CLASS_Lpwr_TYPEDEF__ +typedef struct Lpwr Lpwr; +#endif /* __NVOC_CLASS_Lpwr_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Lpwr +#define __nvoc_class_id_Lpwr 0x112230 +#endif /* __nvoc_class_id_Lpwr */ + + + +struct OBJISOHUB; + +#ifndef __NVOC_CLASS_OBJISOHUB_TYPEDEF__ +#define __NVOC_CLASS_OBJISOHUB_TYPEDEF__ +typedef struct OBJISOHUB OBJISOHUB; +#endif /* __NVOC_CLASS_OBJISOHUB_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJISOHUB +#define __nvoc_class_id_OBJISOHUB 0x7c5e0d +#endif /* __nvoc_class_id_OBJISOHUB */ + + + +struct Pmgr; + +#ifndef __NVOC_CLASS_Pmgr_TYPEDEF__ +#define __NVOC_CLASS_Pmgr_TYPEDEF__ +typedef struct Pmgr Pmgr; +#endif /* __NVOC_CLASS_Pmgr_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Pmgr +#define __nvoc_class_id_Pmgr 0x894574 +#endif /* __nvoc_class_id_Pmgr */ + + + +struct OBJHDACODEC; + +#ifndef __NVOC_CLASS_OBJHDACODEC_TYPEDEF__ +#define __NVOC_CLASS_OBJHDACODEC_TYPEDEF__ +typedef struct OBJHDACODEC OBJHDACODEC; +#endif /* __NVOC_CLASS_OBJHDACODEC_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHDACODEC +#define __nvoc_class_id_OBJHDACODEC 0xa576e2 +#endif /* __nvoc_class_id_OBJHDACODEC */ + + + +struct Spi; + +#ifndef __NVOC_CLASS_Spi_TYPEDEF__ +#define __NVOC_CLASS_Spi_TYPEDEF__ +typedef struct Spi Spi; +#endif /* __NVOC_CLASS_Spi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Spi +#define __nvoc_class_id_Spi 0x824313 +#endif /* __nvoc_class_id_Spi */ + + + +struct OBJUVM; + +#ifndef __NVOC_CLASS_OBJUVM_TYPEDEF__ +#define __NVOC_CLASS_OBJUVM_TYPEDEF__ +typedef struct OBJUVM OBJUVM; +#endif /* __NVOC_CLASS_OBJUVM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJUVM +#define __nvoc_class_id_OBJUVM 0xf9a17d +#endif /* __nvoc_class_id_OBJUVM */ + + + +struct OBJSEC2; + +#ifndef __NVOC_CLASS_OBJSEC2_TYPEDEF__ +#define __NVOC_CLASS_OBJSEC2_TYPEDEF__ +typedef struct OBJSEC2 OBJSEC2; +#endif /* __NVOC_CLASS_OBJSEC2_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJSEC2 +#define __nvoc_class_id_OBJSEC2 0x28c408 +#endif /* __nvoc_class_id_OBJSEC2 */ + + + +struct OBJPMS; + +#ifndef __NVOC_CLASS_OBJPMS_TYPEDEF__ +#define __NVOC_CLASS_OBJPMS_TYPEDEF__ +typedef struct OBJPMS OBJPMS; +#endif /* __NVOC_CLASS_OBJPMS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJPMS +#define __nvoc_class_id_OBJPMS 0x9e3810 +#endif /* __nvoc_class_id_OBJPMS */ + + + +struct OBJENGSTATE; + +#ifndef __NVOC_CLASS_OBJENGSTATE_TYPEDEF__ +#define __NVOC_CLASS_OBJENGSTATE_TYPEDEF__ +typedef struct OBJENGSTATE OBJENGSTATE; +#endif /* __NVOC_CLASS_OBJENGSTATE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJENGSTATE +#define __nvoc_class_id_OBJENGSTATE 0x7a7ed6 +#endif /* __nvoc_class_id_OBJENGSTATE */ + + + +struct OBJLSFM; + +#ifndef __NVOC_CLASS_OBJLSFM_TYPEDEF__ +#define __NVOC_CLASS_OBJLSFM_TYPEDEF__ +typedef struct OBJLSFM OBJLSFM; +#endif /* __NVOC_CLASS_OBJLSFM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJLSFM +#define __nvoc_class_id_OBJLSFM 0x9a25e4 +#endif /* __nvoc_class_id_OBJLSFM */ + + + +struct OBJACR; + +#ifndef __NVOC_CLASS_OBJACR_TYPEDEF__ +#define __NVOC_CLASS_OBJACR_TYPEDEF__ +typedef struct OBJACR OBJACR; +#endif /* __NVOC_CLASS_OBJACR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJACR +#define __nvoc_class_id_OBJACR 0xdb32a1 +#endif /* __nvoc_class_id_OBJACR */ + + + +struct OBJGPULOG; + +#ifndef __NVOC_CLASS_OBJGPULOG_TYPEDEF__ +#define __NVOC_CLASS_OBJGPULOG_TYPEDEF__ +typedef struct OBJGPULOG OBJGPULOG; +#endif /* __NVOC_CLASS_OBJGPULOG_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPULOG +#define __nvoc_class_id_OBJGPULOG 0xdd19be +#endif /* __nvoc_class_id_OBJGPULOG */ + + + +struct KernelNvlink; + +#ifndef __NVOC_CLASS_KernelNvlink_TYPEDEF__ +#define __NVOC_CLASS_KernelNvlink_TYPEDEF__ +typedef struct KernelNvlink KernelNvlink; +#endif /* __NVOC_CLASS_KernelNvlink_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelNvlink +#define __nvoc_class_id_KernelNvlink 0xce6818 +#endif /* __nvoc_class_id_KernelNvlink */ + + + +struct Nvlink; + +#ifndef __NVOC_CLASS_Nvlink_TYPEDEF__ +#define __NVOC_CLASS_Nvlink_TYPEDEF__ +typedef struct Nvlink Nvlink; +#endif /* __NVOC_CLASS_Nvlink_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Nvlink +#define __nvoc_class_id_Nvlink 0x790a3c +#endif /* __nvoc_class_id_Nvlink */ + + + +struct KernelHwpm; + +#ifndef __NVOC_CLASS_KernelHwpm_TYPEDEF__ +#define __NVOC_CLASS_KernelHwpm_TYPEDEF__ +typedef struct KernelHwpm KernelHwpm; +#endif /* __NVOC_CLASS_KernelHwpm_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelHwpm +#define __nvoc_class_id_KernelHwpm 0xc8c00f +#endif /* __nvoc_class_id_KernelHwpm */ + + + +struct OBJHWPM; + +#ifndef __NVOC_CLASS_OBJHWPM_TYPEDEF__ +#define __NVOC_CLASS_OBJHWPM_TYPEDEF__ +typedef struct OBJHWPM OBJHWPM; +#endif /* __NVOC_CLASS_OBJHWPM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHWPM +#define __nvoc_class_id_OBJHWPM 0x97e43b +#endif /* __nvoc_class_id_OBJHWPM */ + + + +struct OBJGPUMON; + +#ifndef __NVOC_CLASS_OBJGPUMON_TYPEDEF__ +#define __NVOC_CLASS_OBJGPUMON_TYPEDEF__ +typedef struct OBJGPUMON OBJGPUMON; +#endif /* __NVOC_CLASS_OBJGPUMON_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPUMON +#define __nvoc_class_id_OBJGPUMON 0x2b424b +#endif /* __nvoc_class_id_OBJGPUMON */ + + + +struct OBJGRIDDISPLAYLESS; + +#ifndef __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ +#define __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ +typedef struct OBJGRIDDISPLAYLESS OBJGRIDDISPLAYLESS; +#endif /* __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGRIDDISPLAYLESS +#define __nvoc_class_id_OBJGRIDDISPLAYLESS 0x20fd5a +#endif /* __nvoc_class_id_OBJGRIDDISPLAYLESS */ + + + +struct FECS; + +#ifndef __NVOC_CLASS_FECS_TYPEDEF__ +#define __NVOC_CLASS_FECS_TYPEDEF__ +typedef struct FECS FECS; +#endif /* __NVOC_CLASS_FECS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_FECS +#define __nvoc_class_id_FECS 0x5ee8dc +#endif /* __nvoc_class_id_FECS */ + + + +struct GPCCS; + +#ifndef __NVOC_CLASS_GPCCS_TYPEDEF__ +#define __NVOC_CLASS_GPCCS_TYPEDEF__ +typedef struct GPCCS GPCCS; +#endif /* __NVOC_CLASS_GPCCS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GPCCS +#define __nvoc_class_id_GPCCS 0x4781e8 +#endif /* __nvoc_class_id_GPCCS */ + + + +struct OBJCE; + +#ifndef __NVOC_CLASS_OBJCE_TYPEDEF__ +#define __NVOC_CLASS_OBJCE_TYPEDEF__ +typedef struct OBJCE OBJCE; +#endif /* __NVOC_CLASS_OBJCE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJCE +#define __nvoc_class_id_OBJCE 0x793ceb +#endif /* __nvoc_class_id_OBJCE */ + + + +struct KernelCE; + +#ifndef __NVOC_CLASS_KernelCE_TYPEDEF__ +#define __NVOC_CLASS_KernelCE_TYPEDEF__ +typedef struct KernelCE KernelCE; +#endif /* __NVOC_CLASS_KernelCE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelCE +#define __nvoc_class_id_KernelCE 0x242aca +#endif /* __nvoc_class_id_KernelCE */ + + + +struct OBJMSENC; + +#ifndef __NVOC_CLASS_OBJMSENC_TYPEDEF__ +#define __NVOC_CLASS_OBJMSENC_TYPEDEF__ +typedef struct OBJMSENC OBJMSENC; +#endif /* __NVOC_CLASS_OBJMSENC_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJMSENC +#define __nvoc_class_id_OBJMSENC 0xe97b6c +#endif /* __nvoc_class_id_OBJMSENC */ + + + +struct OBJNVJPG; + +#ifndef __NVOC_CLASS_OBJNVJPG_TYPEDEF__ +#define __NVOC_CLASS_OBJNVJPG_TYPEDEF__ +typedef struct OBJNVJPG OBJNVJPG; +#endif /* __NVOC_CLASS_OBJNVJPG_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJNVJPG +#define __nvoc_class_id_OBJNVJPG 0x2b3a54 +#endif /* __nvoc_class_id_OBJNVJPG */ + + + +struct OBJVMMU; + +#ifndef __NVOC_CLASS_OBJVMMU_TYPEDEF__ +#define __NVOC_CLASS_OBJVMMU_TYPEDEF__ +typedef struct OBJVMMU OBJVMMU; +#endif /* __NVOC_CLASS_OBJVMMU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVMMU +#define __nvoc_class_id_OBJVMMU 0xdf8918 +#endif /* __nvoc_class_id_OBJVMMU */ + + + +struct Gsp; + +#ifndef __NVOC_CLASS_Gsp_TYPEDEF__ +#define __NVOC_CLASS_Gsp_TYPEDEF__ +typedef struct Gsp Gsp; +#endif /* __NVOC_CLASS_Gsp_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Gsp +#define __nvoc_class_id_Gsp 0xda3de4 +#endif /* __nvoc_class_id_Gsp */ + + + +struct OBJFSP; + +#ifndef __NVOC_CLASS_OBJFSP_TYPEDEF__ +#define __NVOC_CLASS_OBJFSP_TYPEDEF__ +typedef struct OBJFSP OBJFSP; +#endif /* __NVOC_CLASS_OBJFSP_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJFSP +#define __nvoc_class_id_OBJFSP 0xd39158 +#endif /* __nvoc_class_id_OBJFSP */ + + + +struct KernelFsp; + +#ifndef __NVOC_CLASS_KernelFsp_TYPEDEF__ +#define __NVOC_CLASS_KernelFsp_TYPEDEF__ +typedef struct KernelFsp KernelFsp; +#endif /* __NVOC_CLASS_KernelFsp_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelFsp +#define __nvoc_class_id_KernelFsp 0x87fb96 +#endif /* __nvoc_class_id_KernelFsp */ + + + +struct OBJOFA; + +#ifndef __NVOC_CLASS_OBJOFA_TYPEDEF__ +#define __NVOC_CLASS_OBJOFA_TYPEDEF__ +typedef struct OBJOFA OBJOFA; +#endif /* __NVOC_CLASS_OBJOFA_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJOFA +#define __nvoc_class_id_OBJOFA 0xdd7bab +#endif /* __nvoc_class_id_OBJOFA */ + + + +struct KernelIoctrl; + +#ifndef __NVOC_CLASS_KernelIoctrl_TYPEDEF__ +#define __NVOC_CLASS_KernelIoctrl_TYPEDEF__ +typedef struct KernelIoctrl KernelIoctrl; +#endif /* __NVOC_CLASS_KernelIoctrl_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelIoctrl +#define __nvoc_class_id_KernelIoctrl 0x880c7d +#endif /* __nvoc_class_id_KernelIoctrl */ + + + +struct Ioctrl; + +#ifndef __NVOC_CLASS_Ioctrl_TYPEDEF__ +#define __NVOC_CLASS_Ioctrl_TYPEDEF__ +typedef struct Ioctrl Ioctrl; +#endif /* __NVOC_CLASS_Ioctrl_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Ioctrl +#define __nvoc_class_id_Ioctrl 0x11ce10 +#endif /* __nvoc_class_id_Ioctrl */ + + + +struct KernelSec2; + +#ifndef __NVOC_CLASS_KernelSec2_TYPEDEF__ +#define __NVOC_CLASS_KernelSec2_TYPEDEF__ +typedef struct KernelSec2 KernelSec2; +#endif /* __NVOC_CLASS_KernelSec2_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelSec2 +#define __nvoc_class_id_KernelSec2 0x2f36c9 +#endif /* __nvoc_class_id_KernelSec2 */ + + + +struct KernelGsp; + +#ifndef __NVOC_CLASS_KernelGsp_TYPEDEF__ +#define __NVOC_CLASS_KernelGsp_TYPEDEF__ +typedef struct KernelGsp KernelGsp; +#endif /* __NVOC_CLASS_KernelGsp_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGsp +#define __nvoc_class_id_KernelGsp 0x311d4e +#endif /* __nvoc_class_id_KernelGsp */ + + + +struct OBJDCECLIENTRM; + +#ifndef __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__ +#define __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__ +typedef struct OBJDCECLIENTRM OBJDCECLIENTRM; +#endif /* __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDCECLIENTRM +#define __nvoc_class_id_OBJDCECLIENTRM 0x61649c +#endif /* __nvoc_class_id_OBJDCECLIENTRM */ + + + +struct OBJDISPMACRO; + +#ifndef __NVOC_CLASS_OBJDISPMACRO_TYPEDEF__ +#define __NVOC_CLASS_OBJDISPMACRO_TYPEDEF__ +typedef struct OBJDISPMACRO OBJDISPMACRO; +#endif /* __NVOC_CLASS_OBJDISPMACRO_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDISPMACRO +#define __nvoc_class_id_OBJDISPMACRO 0xa1cad2 +#endif /* __nvoc_class_id_OBJDISPMACRO */ + + + +struct Nne; + +#ifndef __NVOC_CLASS_Nne_TYPEDEF__ +#define __NVOC_CLASS_Nne_TYPEDEF__ +typedef struct Nne Nne; +#endif /* __NVOC_CLASS_Nne_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Nne +#define __nvoc_class_id_Nne 0x2487e2 +#endif /* __nvoc_class_id_Nne */ + + + +struct Oob; + +#ifndef __NVOC_CLASS_Oob_TYPEDEF__ +#define __NVOC_CLASS_Oob_TYPEDEF__ +typedef struct Oob Oob; +#endif /* __NVOC_CLASS_Oob_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Oob +#define __nvoc_class_id_Oob 0x98b919 +#endif /* __nvoc_class_id_Oob */ + + + +struct OBJDSI; + +#ifndef __NVOC_CLASS_OBJDSI_TYPEDEF__ +#define __NVOC_CLASS_OBJDSI_TYPEDEF__ +typedef struct OBJDSI OBJDSI; +#endif /* __NVOC_CLASS_OBJDSI_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDSI +#define __nvoc_class_id_OBJDSI 0x2e9a64 +#endif /* __nvoc_class_id_OBJDSI */ + + + +struct OBJDCB; + +#ifndef __NVOC_CLASS_OBJDCB_TYPEDEF__ +#define __NVOC_CLASS_OBJDCB_TYPEDEF__ +typedef struct OBJDCB OBJDCB; +#endif /* __NVOC_CLASS_OBJDCB_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDCB +#define __nvoc_class_id_OBJDCB 0xf931d4 +#endif /* __nvoc_class_id_OBJDCB */ + + + +struct KernelGmmu; + +#ifndef __NVOC_CLASS_KernelGmmu_TYPEDEF__ +#define __NVOC_CLASS_KernelGmmu_TYPEDEF__ +typedef struct KernelGmmu KernelGmmu; +#endif /* __NVOC_CLASS_KernelGmmu_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGmmu +#define __nvoc_class_id_KernelGmmu 0x29362f +#endif /* __nvoc_class_id_KernelGmmu */ + + + +struct OBJGMMU; + +#ifndef __NVOC_CLASS_OBJGMMU_TYPEDEF__ +#define __NVOC_CLASS_OBJGMMU_TYPEDEF__ +typedef struct OBJGMMU OBJGMMU; +#endif /* __NVOC_CLASS_OBJGMMU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGMMU +#define __nvoc_class_id_OBJGMMU 0xd7a41d +#endif /* __nvoc_class_id_OBJGMMU */ + + +#define ENG_CLASS_INVALID classId(OBJINVALID) +#define ENG_CLASS_SW classId(OBJSWENG) +#define ENG_CLASS_GPU classId(OBJGPU) +#define ENG_CLASS_FLCN classId(Falcon) +#define ENG_CLASS_MC classId(OBJMC) +#define ENG_CLASS_KERNEL_MC classId(KernelMc) +#define ENG_CLASS_PRIV_RING classId(PrivRing) +#define ENG_CLASS_SW_INTR classId(SwIntr) +#define ENG_CLASS_MEMORY_SYSTEM classId(MemorySystem) +#define ENG_CLASS_KERNEL_MEMORY_SYSTEM classId(KernelMemorySystem) +#define ENG_CLASS_MEMORY_MANAGER classId(MemoryManager) +#define ENG_CLASS_FBFLCN classId(OBJFBFLCN) +#define ENG_CLASS_TMR classId(OBJTMR) +#define ENG_CLASS_DMA classId(VirtMemAllocator) +#define ENG_CLASS_KERNEL_FIFO classId(KernelFifo) +#define ENG_CLASS_FIFO classId(OBJFIFO) +#define ENG_CLASS_OS classId(OBJOS) +#define ENG_CLASS_BUS classId(OBJBUS) +#define ENG_CLASS_KERNEL_BUS classId(KernelBus) +#define ENG_CLASS_INFOROM classId(OBJINFOROM) +#define ENG_CLASS_PERF classId(Perf) +#define ENG_CLASS_KERNEL_PERF classId(KernelPerf) +#define ENG_CLASS_PXUC classId(Pxuc) +#define ENG_CLASS_BIF classId(OBJBIF) +#define ENG_CLASS_KERNEL_BIF classId(KernelBif) +#define ENG_CLASS_HSHUBMANAGER classId(OBJHSHUBMANAGER) +#define ENG_CLASS_SF classId(OBJSF) +#define ENG_CLASS_GPIO classId(OBJGPIO) +#define ENG_CLASS_CLK classId(ClockManager) +#define ENG_CLASS_KERNEL_DISPLAY classId(KernelDisplay) +#define ENG_CLASS_DISP classId(OBJDISP) +#define ENG_CLASS_DPU classId(OBJDPU) +#define ENG_CLASS_DIP classId(OBJDIP) +#define ENG_CLASS_FAN classId(Fan) +#define ENG_CLASS_INST classId(DisplayInstanceMemory) +#define ENG_CLASS_KERNEL_HEAD classId(KernelHead) +#define ENG_CLASS_VOLT classId(OBJVOLT) +#define ENG_CLASS_INTR classId(Intr) +#define ENG_CLASS_HDA classId(OBJHDA) +#define ENG_CLASS_I2C classId(I2c) +#define ENG_CLASS_KERNEL_RC classId(KernelRc) +#define ENG_CLASS_RC classId(OBJRC) +#define ENG_CLASS_SOR classId(OBJSOR) +#define ENG_CLASS_DAC classId(OBJDAC) +#define ENG_CLASS_PIOR classId(OBJPIOR) +#define ENG_CLASS_HEAD classId(OBJHEAD) +#define ENG_CLASS_VGA classId(OBJVGA) +#define ENG_CLASS_STEREO classId(Stereo) +#define ENG_CLASS_OR classId(OBJOR) +#define ENG_CLASS_NVDEC classId(OBJBSP) +#define ENG_CLASS_CIPHER classId(OBJCIPHER) +#define ENG_CLASS_FUSE classId(OBJFUSE) +#define ENG_CLASS_JTAG classId(Jtag) +#define ENG_CLASS_HDCP classId(OBJHDCP) +#define ENG_CLASS_HDMI classId(OBJHDMI) +#define ENG_CLASS_THERM classId(Therm) +#define ENG_CLASS_SEQ classId(OBJSEQ) +#define ENG_CLASS_DPAUX classId(OBJDPAUX) +#define ENG_CLASS_PMU classId(Pmu) +#define ENG_CLASS_KERNEL_PMU classId(KernelPmu) +#define ENG_CLASS_GCX classId(GCX) +#define ENG_CLASS_LPWR classId(Lpwr) +#define ENG_CLASS_ISOHUB classId(OBJISOHUB) +#define ENG_CLASS_PMGR classId(Pmgr) +#define ENG_CLASS_HDACODEC classId(OBJHDACODEC) +#define ENG_CLASS_SPI classId(Spi) +#define ENG_CLASS_UVM classId(OBJUVM) +#define ENG_CLASS_SEC2 classId(OBJSEC2) +#define ENG_CLASS_PMS classId(OBJPMS) +#define ENG_CLASS_ENGSTATE classId(OBJENGSTATE) +#define ENG_CLASS_LSFM classId(OBJLSFM) +#define ENG_CLASS_ACR classId(OBJACR) +#define ENG_CLASS_GPULOG classId(OBJGPULOG) +#define ENG_CLASS_NVLINK classId(Nvlink) +#define ENG_CLASS_HWPM classId(OBJHWPM) +#define ENG_CLASS_KERNEL_HWPM classId(KernelHwpm) +#define ENG_CLASS_GPUMON classId(OBJGPUMON) +#define ENG_CLASS_GRIDDISPLAYLESS classId(OBJGRIDDISPLAYLESS) +#define ENG_CLASS_VMMU classId(OBJVMMU) +#define ENG_CLASS_NVJPG classId(OBJNVJPG) +#define ENG_CLASS_GSP classId(Gsp) +#define ENG_CLASS_FSP classId(OBJFSP) +#define ENG_CLASS_KERNEL_FSP classId(KernelFsp) +#define ENG_CLASS_KERNEL_GSP classId(KernelGsp) +#define ENG_CLASS_KERNEL_SEC2 classId(KernelSec2) +#define ENG_CLASS_DISPMACRO classId(OBJDISPMACRO) +#define ENG_CLASS_NNE classId(OBJNNE) +#define ENG_CLASS_OOB classId(Oob) +#define ENG_CLASS_DSI classId(OBJDSI) +#define ENG_CLASS_DCECLIENTRM classId(OBJDCECLIENTRM) +#define ENG_CLASS_DCB classId(OBJDCB) +#define ENG_CLASS_KERNEL_NVLINK classId(KernelNvlink) +#define ENG_CLASS_GMMU classId(OBJGMMU) +#define ENG_CLASS_KERNEL_GMMU classId(KernelGmmu) +#define ENG_CLASS_CE classId(OBJCE) +#define ENG_CLASS_NVENC classId(OBJMSENC) +#define ENG_CLASS_NVDEC classId(OBJBSP) +#define ENG_CLASS_GR classId(Graphics) +#define ENG_CLASS_NVJPEG classId(OBJNVJPG) +#define ENG_CLASS_FECS classId(FECS) +#define ENG_CLASS_GPCCS classId(GPCCS) +#define ENG_CLASS_IOCTRL classId(Ioctrl) +#define ENG_CLASS_HSHUB classId(Hshub) +#define ENG_CLASS_KERNEL_IOCTRL classId(KernelIoctrl) +#define ENG_CLASS_OFA classId(OBJOFA) + + +// +// Engine tags to be used by both RM/HAL to reference specific engines. +// +// These values are used in the engine descriptor table +// as well as in the class descriptor table. +// +#define ENG_INVALID MKENGDESC(ENG_CLASS_INVALID, 0) +#define ENG_SW MKENGDESC(ENG_CLASS_SW, 0) +#define ENG_GPU MKENGDESC(ENG_CLASS_GPU, 0) +#define ENG_FLCN MKENGDESC(ENG_CLASS_FLCN, 0) +#define ENG_MC MKENGDESC(ENG_CLASS_MC, 0) +#define ENG_KERNEL_MC MKENGDESC(ENG_CLASS_KERNEL_MC, 0) +#define ENG_PRIV_RING MKENGDESC(ENG_CLASS_PRIV_RING, 0) +#define ENG_SW_INTR MKENGDESC(ENG_CLASS_SW_INTR, 0) +#define ENG_MEMORY_SYSTEM MKENGDESC(ENG_CLASS_MEMORY_SYSTEM, 0) +#define ENG_KERNEL_MEMORY_SYSTEM MKENGDESC(ENG_CLASS_KERNEL_MEMORY_SYSTEM, 0) +#define ENG_MEMORY_MANAGER MKENGDESC(ENG_CLASS_MEMORY_MANAGER, 0) +#define ENG_FBFLCN MKENGDESC(ENG_CLASS_FBFLCN, 0) +#define ENG_TMR MKENGDESC(ENG_CLASS_TMR, 0) +#define ENG_DMA MKENGDESC(ENG_CLASS_DMA, 0) +#define ENG_KERNEL_FIFO MKENGDESC(ENG_CLASS_KERNEL_FIFO, 0) +#define ENG_FIFO MKENGDESC(ENG_CLASS_FIFO, 0) +#define ENG_OS MKENGDESC(ENG_CLASS_OS, 0) +#define ENG_BUS MKENGDESC(ENG_CLASS_BUS, 0) +#define ENG_KERNEL_BUS MKENGDESC(ENG_CLASS_KERNEL_BUS, 0) +#define ENG_INFOROM MKENGDESC(ENG_CLASS_INFOROM, 0) +#define ENG_PERF MKENGDESC(ENG_CLASS_PERF, 0) +#define ENG_KERNEL_PERF MKENGDESC(ENG_CLASS_KERNEL_PERF, 0) +#define ENG_PXUC MKENGDESC(ENG_CLASS_PXUC, 0) +#define ENG_BIF MKENGDESC(ENG_CLASS_BIF, 0) +#define ENG_KERNEL_BIF MKENGDESC(ENG_CLASS_KERNEL_BIF, 0) +#define ENG_HSHUBMANAGER MKENGDESC(ENG_CLASS_HSHUBMANAGER, 0) +#define ENG_SF MKENGDESC(ENG_CLASS_SF, 0) +#define ENG_GPIO MKENGDESC(ENG_CLASS_GPIO, 0) +#define ENG_CLK MKENGDESC(ENG_CLASS_CLK, 0) +#define ENG_KERNEL_DISPLAY MKENGDESC(ENG_CLASS_KERNEL_DISPLAY, 0) +#define ENG_DISP MKENGDESC(ENG_CLASS_DISP, 0) +#define ENG_DPU MKENGDESC(ENG_CLASS_DPU, 0) +#define ENG_DIP MKENGDESC(ENG_CLASS_DIP, 0) +#define ENG_FAN MKENGDESC(ENG_CLASS_FAN, 0) +#define ENG_INST MKENGDESC(ENG_CLASS_INST, 0) +#define ENG_KERNEL_HEAD MKENGDESC(ENG_CLASS_KERNEL_HEAD, 0) +#define ENG_VOLT MKENGDESC(ENG_CLASS_VOLT, 0) +#define ENG_INTR MKENGDESC(ENG_CLASS_INTR, 0) +#define ENG_HDA MKENGDESC(ENG_CLASS_HDA, 0) +#define ENG_I2C MKENGDESC(ENG_CLASS_I2C, 0) +#define ENG_KERNEL_RC MKENGDESC(ENG_CLASS_KERNEL_RC, 0) +#define ENG_RC MKENGDESC(ENG_CLASS_RC, 0) +#define ENG_SOR MKENGDESC(ENG_CLASS_SOR, 0) +#define ENG_DAC MKENGDESC(ENG_CLASS_DAC, 0) +#define ENG_PIOR MKENGDESC(ENG_CLASS_PIOR, 0) +#define ENG_HEAD MKENGDESC(ENG_CLASS_HEAD, 0) +#define ENG_VGA MKENGDESC(ENG_CLASS_VGA, 0) +#define ENG_STEREO MKENGDESC(ENG_CLASS_STEREO, 0) +#define ENG_OR MKENGDESC(ENG_CLASS_OR, 0) +#define ENG_CIPHER MKENGDESC(ENG_CLASS_CIPHER, 0) +#define ENG_FUSE MKENGDESC(ENG_CLASS_FUSE, 0) +#define ENG_JTAG MKENGDESC(ENG_CLASS_JTAG, 0) +#define ENG_HDCP MKENGDESC(ENG_CLASS_HDCP, 0) +#define ENG_HDMI MKENGDESC(ENG_CLASS_HDMI, 0) +#define ENG_THERM MKENGDESC(ENG_CLASS_THERM, 0) +#define ENG_SEQ MKENGDESC(ENG_CLASS_SEQ, 0) +#define ENG_DPAUX MKENGDESC(ENG_CLASS_DPAUX, 0) +#define ENG_PMU MKENGDESC(ENG_CLASS_PMU, 0) +#define ENG_KERNEL_PMU MKENGDESC(ENG_CLASS_KERNEL_PMU, 0) +#define ENG_GCX MKENGDESC(ENG_CLASS_GCX, 0) +#define ENG_LPWR MKENGDESC(ENG_CLASS_LPWR, 0) +#define ENG_ISOHUB MKENGDESC(ENG_CLASS_ISOHUB, 0) +#define ENG_PMGR MKENGDESC(ENG_CLASS_PMGR, 0) +#define ENG_HDACODEC MKENGDESC(ENG_CLASS_HDACODEC, 0) +#define ENG_SPI MKENGDESC(ENG_CLASS_SPI, 0) +#define ENG_UVM MKENGDESC(ENG_CLASS_UVM, 0) +#define ENG_SEC2 MKENGDESC(ENG_CLASS_SEC2, 0) +#define ENG_PMS MKENGDESC(ENG_CLASS_PMS, 0) +#define ENG_ENGSTATE MKENGDESC(ENG_CLASS_ENGSTATE, 0) +#define ENG_LSFM MKENGDESC(ENG_CLASS_LSFM, 0) +#define ENG_ACR MKENGDESC(ENG_CLASS_ACR, 0) +#define ENG_GPULOG MKENGDESC(ENG_CLASS_GPULOG, 0) +#define ENG_NVLINK MKENGDESC(ENG_CLASS_NVLINK, 0) +#define ENG_HWPM MKENGDESC(ENG_CLASS_HWPM, 0) +#define ENG_KERNEL_HWPM MKENGDESC(ENG_CLASS_KERNEL_HWPM, 0) +#define ENG_GPUMON MKENGDESC(ENG_CLASS_GPUMON, 0) +#define ENG_GRIDDISPLAYLESS MKENGDESC(ENG_CLASS_GRIDDISPLAYLESS, 0) +#define ENG_VMMU MKENGDESC(ENG_CLASS_VMMU, 0) +#define ENG_NVJPG MKENGDESC(ENG_CLASS_NVJPG, 0) +#define ENG_GSP MKENGDESC(ENG_CLASS_GSP, 0) +#define ENG_FSP MKENGDESC(ENG_CLASS_FSP, 0) +#define ENG_KERNEL_FSP MKENGDESC(ENG_CLASS_KERNEL_FSP, 0) +#define ENG_KERNEL_GSP MKENGDESC(ENG_CLASS_KERNEL_GSP, 0) +#define ENG_KERNEL_SEC2 MKENGDESC(ENG_CLASS_KERNEL_SEC2, 0) +#define ENG_DISPMACRO MKENGDESC(ENG_CLASS_DISPMACRO, 0) +#define ENG_NNE MKENGDESC(ENG_CLASS_NNE, 0) +#define ENG_OOB MKENGDESC(ENG_CLASS_OOB, 0) +#define ENG_DSI MKENGDESC(ENG_CLASS_DSI, 0) +#define ENG_DCECLIENTRM MKENGDESC(ENG_CLASS_DCECLIENTRM, 0) +#define ENG_DCB MKENGDESC(ENG_CLASS_DCB, 0) +#define ENG_KERNEL_NVLINK MKENGDESC(ENG_CLASS_KERNEL_NVLINK, 0) +#define ENG_GMMU MKENGDESC(ENG_CLASS_GMMU, 0) +#define ENG_KERNEL_GMMU MKENGDESC(ENG_CLASS_KERNEL_GMMU, 0) + +// Indexed GSPLITE Engine Tag Reference + +// Indexed CE engine tag reference +#define ENG_CE(x) MKENGDESC(ENG_CLASS_CE, x) +#define ENG_CE__SIZE_1 20 +#define IS_CE(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(OBJCE)) +#define GET_CE_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed MSENC Engine Tag Reference +#define ENG_NVENC(x) MKENGDESC(ENG_CLASS_NVENC, x) +#define ENG_NVENC__SIZE_1 4 +#define IS_MSENC(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(OBJMSENC)) +#define GET_MSENC_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed BSP/NVDEC Engine Tag Reference +#define ENG_NVDEC(x) MKENGDESC(ENG_CLASS_NVDEC, x) +#define ENG_NVDEC__SIZE_1 8 +#define IS_NVDEC(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(OBJBSP)) +#define GET_NVDEC_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed GR engine tag reference +#define ENG_GR(x) MKENGDESC(ENG_CLASS_GR, x) +#define ENG_GR__SIZE_1 8 +#define IS_GR(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(Graphics)) +#define GET_GR_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed NVJPEG Engine Tag Reference +#define ENG_NVJPEG(x) MKENGDESC(ENG_CLASS_NVJPEG, x) +#define ENG_NVJPEG__SIZE_1 8 +#define IS_NVJPEG(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(OBJNVJPG)) +#define GET_NVJPEG_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed FECS engine tag reference +#define ENG_FECS(x) MKENGDESC(ENG_CLASS_FECS, x) +#define ENG_FECS__SIZE_1 8 +#define IS_FECS(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(FECS)) +#define GET_FECS_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed GPCCS engine tag reference +#define ENG_GPCCS(x) MKENGDESC(ENG_CLASS_GPCCS, x) +#define ENG_GPCCS__SIZE_1 8 +#define IS_GPCCS(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(GPCCS)) +#define GET_GPCCS_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed IOCTRL engine tag reference +#define ENG_IOCTRL(x) MKENGDESC(ENG_CLASS_IOCTRL, x) +#define ENG_IOCTRL__SIZE_1 3 +#define IS_IOCTRL(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(Ioctrl)) +#define GET_IOCTRL_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed HSHUB engine tag reference +#define ENG_HSHUB(x) MKENGDESC(ENG_CLASS_HSHUB, x) +#define ENG_HSHUB__SIZE_1 12 +#define IS_HSHUB(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(Hshub)) +#define GET_HSHUB_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed IOCTRL engine tag reference +#define ENG_KERNEL_IOCTRL(x) MKENGDESC(ENG_CLASS_KERNEL_IOCTRL, x) +#define ENG_KERNEL_IOCTRL__SIZE_1 3 +#define IS_KERNEL_IOCTRL(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(KernelIoctrl)) +#define GET_KERNEL_IOCTRL_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed OFA engine tag reference +#define ENG_OFA(x) MKENGDESC(ENG_CLASS_OFA, x) +#define ENG_OFA__SIZE_1 2 +#define IS_OFA(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(OBJOFA)) +#define GET_OFA_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +#endif // _ENG_DESC_H_ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_ENG_DESC_NVOC_H_ diff --git a/src/nvidia/generated/g_eng_state_nvoc.c b/src/nvidia/generated/g_eng_state_nvoc.c new file mode 100644 index 0000000..3f60154 --- /dev/null +++ b/src/nvidia/generated/g_eng_state_nvoc.c @@ -0,0 +1,214 @@ +#define NVOC_ENG_STATE_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_eng_state_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x7a7ed6 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +// Forward declarations for OBJENGSTATE +void __nvoc_init__Object(Object*); +void __nvoc_init__OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_funcTable_OBJENGSTATE(OBJENGSTATE*); +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_dataField_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__OBJENGSTATE; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJENGSTATE; + +// Down-thunk(s) to bridge OBJENGSTATE methods from ancestors (if any) + +// Up-thunk(s) to bridge OBJENGSTATE methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJENGSTATE), + /*classId=*/ classId(OBJENGSTATE), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJENGSTATE", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJENGSTATE, + /*pCastInfo=*/ &__nvoc_castinfo__OBJENGSTATE, + /*pExportInfo=*/ &__nvoc_export_info__OBJENGSTATE +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__OBJENGSTATE __nvoc_metadata__OBJENGSTATE = { + .rtti.pClassDef = &__nvoc_class_def_OBJENGSTATE, // (engstate) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJENGSTATE, + .rtti.offset = 0, + .metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super + .metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Object.rtti.offset = NV_OFFSETOF(OBJENGSTATE, __nvoc_base_Object), + + .vtable.__engstateConstructEngine__ = &engstateConstructEngine_IMPL, // virtual + .vtable.__engstateInitMissing__ = &engstateInitMissing_IMPL, // virtual + .vtable.__engstateStatePreInitLocked__ = &engstateStatePreInitLocked_IMPL, // virtual + .vtable.__engstateStatePreInitUnlocked__ = &engstateStatePreInitUnlocked_IMPL, // virtual + .vtable.__engstateStateInitLocked__ = &engstateStateInitLocked_IMPL, // virtual + .vtable.__engstateStateInitUnlocked__ = &engstateStateInitUnlocked_IMPL, // virtual + .vtable.__engstateStatePreLoad__ = &engstateStatePreLoad_IMPL, // virtual + .vtable.__engstateStateLoad__ = &engstateStateLoad_IMPL, // virtual + .vtable.__engstateStatePostLoad__ = &engstateStatePostLoad_IMPL, // virtual + .vtable.__engstateStatePreUnload__ = &engstateStatePreUnload_IMPL, // virtual + .vtable.__engstateStateUnload__ = &engstateStateUnload_IMPL, // virtual + .vtable.__engstateStatePostUnload__ = &engstateStatePostUnload_IMPL, // virtual + .vtable.__engstateStateDestroy__ = &engstateStateDestroy_IMPL, // virtual + .vtable.__engstateIsPresent__ = &engstateIsPresent_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__OBJENGSTATE = { + .numRelatives = 2, + .relatives = { + &__nvoc_metadata__OBJENGSTATE.rtti, // [0]: (engstate) this + &__nvoc_metadata__OBJENGSTATE.metadata__Object.rtti, // [1]: (obj) super + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJENGSTATE = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE *pThis) { + __nvoc_engstateDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJENGSTATE(OBJENGSTATE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJENGSTATE_fail_Object; + __nvoc_init_dataField_OBJENGSTATE(pThis); + goto __nvoc_ctor_OBJENGSTATE_exit; // Success + +__nvoc_ctor_OBJENGSTATE_fail_Object: +__nvoc_ctor_OBJENGSTATE_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_OBJENGSTATE_1(OBJENGSTATE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_OBJENGSTATE_1 + + +// Initialize vtable(s) for 14 virtual method(s). +void __nvoc_init_funcTable_OBJENGSTATE(OBJENGSTATE *pThis) { + __nvoc_init_funcTable_OBJENGSTATE_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__OBJENGSTATE(OBJENGSTATE *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; // (obj) super + pThis->__nvoc_pbase_OBJENGSTATE = pThis; // (engstate) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Object(&pThis->__nvoc_base_Object); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__OBJENGSTATE.metadata__Object; // (obj) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__OBJENGSTATE; // (engstate) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_OBJENGSTATE(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJENGSTATE(OBJENGSTATE **ppThis, Dynamic *pParent, NvU32 createFlags) +{ + NV_STATUS status; + Object *pParentObj = NULL; + OBJENGSTATE *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(OBJENGSTATE), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(OBJENGSTATE)); + + pThis->__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__OBJENGSTATE(pThis); + status = __nvoc_ctor_OBJENGSTATE(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJENGSTATE_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_OBJENGSTATE_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(OBJENGSTATE)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJENGSTATE(OBJENGSTATE **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJENGSTATE(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_eng_state_nvoc.h b/src/nvidia/generated/g_eng_state_nvoc.h new file mode 100644 index 0000000..f4b5379 --- /dev/null +++ b/src/nvidia/generated/g_eng_state_nvoc.h @@ -0,0 +1,413 @@ + +#ifndef _G_ENG_STATE_NVOC_H_ +#define _G_ENG_STATE_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_eng_state_nvoc.h" + +#ifndef _ENG_STATE_H_ +#define _ENG_STATE_H_ + +/*! + * @file eng_state.h + * @brief Provides definitions for all OBJENGSTATE data structures and interfaces. + */ + +#include "core/core.h" +#include "nvoc/object.h" +#include "gpu/eng_desc.h" + +typedef enum ENGSTATE_STATE +{ + ENGSTATE_STATE_UNDEFINED = 0, + ENGSTATE_STATE_CONSTRUCT, + ENGSTATE_STATE_PRE_INIT, + ENGSTATE_STATE_INIT, + ENGSTATE_STATE_PRE_LOAD, + ENGSTATE_STATE_LOAD, + ENGSTATE_STATE_POST_LOAD, + ENGSTATE_STATE_PRE_UNLOAD, + ENGSTATE_STATE_UNLOAD, + ENGSTATE_STATE_POST_UNLOAD, + ENGSTATE_STATE_DESTROY, + ENGSTATE_STATE_COUNT // Keep this last +} ENGSTATE_STATE; + +// Stats data stored for every state transition +typedef struct ENGSTATE_STATS +{ + NvS32 memoryAllocCount; + NvS32 memoryAllocSize; + NvU32 transitionTimeUs; +} ENGSTATE_STATS; + +// Temporary transition data, not stored +typedef struct ENGSTATE_TRANSITION_DATA +{ + NvS64 memoryAllocCount; + NvS64 memoryAllocSize; + NvU64 transitionStartTimeNs; +} ENGSTATE_TRANSITION_DATA; + + +#define ENG_GET_FIFO(p) (engstateGetFifo(staticCast((p), OBJENGSTATE))) +#define ENG_GET_ENG_DESC(p) (staticCast((p), OBJENGSTATE)->engDesc) + + +/*! + * Defines the structure used to contain all generic information related to + * the OBJENGSTATE. + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_ENG_STATE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__OBJENGSTATE; +struct NVOC_METADATA__Object; +struct NVOC_VTABLE__OBJENGSTATE; + + +struct OBJENGSTATE { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__OBJENGSTATE *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Object __nvoc_base_Object; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; // engstate + + // 1 PDB property + NvBool PDB_PROP_ENGSTATE_IS_MISSING; + + // Data members + ENGDESCRIPTOR engDesc; + struct OBJGPU *pGpu; + ENGSTATE_STATE currentState; + ENGSTATE_STATS stats[11]; + char name[100]; +}; + + +// Vtable with 14 per-class function pointers +struct NVOC_VTABLE__OBJENGSTATE { + NV_STATUS (*__engstateConstructEngine__)(struct OBJGPU *, struct OBJENGSTATE * /*this*/, ENGDESCRIPTOR); // virtual + void (*__engstateInitMissing__)(struct OBJGPU *, struct OBJENGSTATE * /*this*/); // virtual + NV_STATUS (*__engstateStatePreInitLocked__)(struct OBJGPU *, struct OBJENGSTATE * /*this*/); // virtual + NV_STATUS (*__engstateStatePreInitUnlocked__)(struct OBJGPU *, struct OBJENGSTATE * /*this*/); // virtual + NV_STATUS (*__engstateStateInitLocked__)(struct OBJGPU *, struct OBJENGSTATE * /*this*/); // virtual + NV_STATUS (*__engstateStateInitUnlocked__)(struct OBJGPU *, struct OBJENGSTATE * /*this*/); // virtual + NV_STATUS (*__engstateStatePreLoad__)(struct OBJGPU *, struct OBJENGSTATE * /*this*/, NvU32); // virtual + NV_STATUS (*__engstateStateLoad__)(struct OBJGPU *, struct OBJENGSTATE * /*this*/, NvU32); // virtual + NV_STATUS (*__engstateStatePostLoad__)(struct OBJGPU *, struct OBJENGSTATE * /*this*/, NvU32); // virtual + NV_STATUS (*__engstateStatePreUnload__)(struct OBJGPU *, struct OBJENGSTATE * /*this*/, NvU32); // virtual + NV_STATUS (*__engstateStateUnload__)(struct OBJGPU *, struct OBJENGSTATE * /*this*/, NvU32); // virtual + NV_STATUS (*__engstateStatePostUnload__)(struct OBJGPU *, struct OBJENGSTATE * /*this*/, NvU32); // virtual + void (*__engstateStateDestroy__)(struct OBJGPU *, struct OBJENGSTATE * /*this*/); // virtual + NvBool (*__engstateIsPresent__)(struct OBJGPU *, struct OBJENGSTATE * /*this*/); // virtual +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__OBJENGSTATE { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Object metadata__Object; + const struct NVOC_VTABLE__OBJENGSTATE vtable; +}; + +#ifndef __NVOC_CLASS_OBJENGSTATE_TYPEDEF__ +#define __NVOC_CLASS_OBJENGSTATE_TYPEDEF__ +typedef struct OBJENGSTATE OBJENGSTATE; +#endif /* __NVOC_CLASS_OBJENGSTATE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJENGSTATE +#define __nvoc_class_id_OBJENGSTATE 0x7a7ed6 +#endif /* __nvoc_class_id_OBJENGSTATE */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +#define __staticCast_OBJENGSTATE(pThis) \ + ((pThis)->__nvoc_pbase_OBJENGSTATE) + +#ifdef __nvoc_eng_state_h_disabled +#define __dynamicCast_OBJENGSTATE(pThis) ((OBJENGSTATE*) NULL) +#else //__nvoc_eng_state_h_disabled +#define __dynamicCast_OBJENGSTATE(pThis) \ + ((OBJENGSTATE*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJENGSTATE))) +#endif //__nvoc_eng_state_h_disabled + +// Property macros +#define PDB_PROP_ENGSTATE_IS_MISSING_BASE_CAST +#define PDB_PROP_ENGSTATE_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_OBJENGSTATE(OBJENGSTATE**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJENGSTATE(OBJENGSTATE**, Dynamic*, NvU32); +#define __objCreate_OBJENGSTATE(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJENGSTATE((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros +#define engstateConstructEngine_FNPTR(pEngstate) pEngstate->__nvoc_metadata_ptr->vtable.__engstateConstructEngine__ +#define engstateConstructEngine(pGpu, pEngstate, arg3) engstateConstructEngine_DISPATCH(pGpu, pEngstate, arg3) +#define engstateInitMissing_FNPTR(pEngstate) pEngstate->__nvoc_metadata_ptr->vtable.__engstateInitMissing__ +#define engstateInitMissing(pGpu, pEngstate) engstateInitMissing_DISPATCH(pGpu, pEngstate) +#define engstateStatePreInitLocked_FNPTR(pEngstate) pEngstate->__nvoc_metadata_ptr->vtable.__engstateStatePreInitLocked__ +#define engstateStatePreInitLocked(pGpu, pEngstate) engstateStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define engstateStatePreInitUnlocked_FNPTR(pEngstate) pEngstate->__nvoc_metadata_ptr->vtable.__engstateStatePreInitUnlocked__ +#define engstateStatePreInitUnlocked(pGpu, pEngstate) engstateStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define engstateStateInitLocked_FNPTR(pEngstate) pEngstate->__nvoc_metadata_ptr->vtable.__engstateStateInitLocked__ +#define engstateStateInitLocked(pGpu, pEngstate) engstateStateInitLocked_DISPATCH(pGpu, pEngstate) +#define engstateStateInitUnlocked_FNPTR(pEngstate) pEngstate->__nvoc_metadata_ptr->vtable.__engstateStateInitUnlocked__ +#define engstateStateInitUnlocked(pGpu, pEngstate) engstateStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define engstateStatePreLoad_FNPTR(pEngstate) pEngstate->__nvoc_metadata_ptr->vtable.__engstateStatePreLoad__ +#define engstateStatePreLoad(pGpu, pEngstate, arg3) engstateStatePreLoad_DISPATCH(pGpu, pEngstate, arg3) +#define engstateStateLoad_FNPTR(pEngstate) pEngstate->__nvoc_metadata_ptr->vtable.__engstateStateLoad__ +#define engstateStateLoad(pGpu, pEngstate, arg3) engstateStateLoad_DISPATCH(pGpu, pEngstate, arg3) +#define engstateStatePostLoad_FNPTR(pEngstate) pEngstate->__nvoc_metadata_ptr->vtable.__engstateStatePostLoad__ +#define engstateStatePostLoad(pGpu, pEngstate, arg3) engstateStatePostLoad_DISPATCH(pGpu, pEngstate, arg3) +#define engstateStatePreUnload_FNPTR(pEngstate) pEngstate->__nvoc_metadata_ptr->vtable.__engstateStatePreUnload__ +#define engstateStatePreUnload(pGpu, pEngstate, arg3) engstateStatePreUnload_DISPATCH(pGpu, pEngstate, arg3) +#define engstateStateUnload_FNPTR(pEngstate) pEngstate->__nvoc_metadata_ptr->vtable.__engstateStateUnload__ +#define engstateStateUnload(pGpu, pEngstate, arg3) engstateStateUnload_DISPATCH(pGpu, pEngstate, arg3) +#define engstateStatePostUnload_FNPTR(pEngstate) pEngstate->__nvoc_metadata_ptr->vtable.__engstateStatePostUnload__ +#define engstateStatePostUnload(pGpu, pEngstate, arg3) engstateStatePostUnload_DISPATCH(pGpu, pEngstate, arg3) +#define engstateStateDestroy_FNPTR(pEngstate) pEngstate->__nvoc_metadata_ptr->vtable.__engstateStateDestroy__ +#define engstateStateDestroy(pGpu, pEngstate) engstateStateDestroy_DISPATCH(pGpu, pEngstate) +#define engstateIsPresent_FNPTR(pEngstate) pEngstate->__nvoc_metadata_ptr->vtable.__engstateIsPresent__ +#define engstateIsPresent(pGpu, pEngstate) engstateIsPresent_DISPATCH(pGpu, pEngstate) + +// Dispatch functions +static inline NV_STATUS engstateConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate, ENGDESCRIPTOR arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__engstateConstructEngine__(pGpu, pEngstate, arg3); +} + +static inline void engstateInitMissing_DISPATCH(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate) { + pEngstate->__nvoc_metadata_ptr->vtable.__engstateInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS engstateStatePreInitLocked_DISPATCH(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__engstateStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS engstateStatePreInitUnlocked_DISPATCH(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__engstateStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS engstateStateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__engstateStateInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS engstateStateInitUnlocked_DISPATCH(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__engstateStateInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS engstateStatePreLoad_DISPATCH(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__engstateStatePreLoad__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS engstateStateLoad_DISPATCH(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__engstateStateLoad__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS engstateStatePostLoad_DISPATCH(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__engstateStatePostLoad__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS engstateStatePreUnload_DISPATCH(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__engstateStatePreUnload__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS engstateStateUnload_DISPATCH(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__engstateStateUnload__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS engstateStatePostUnload_DISPATCH(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__engstateStatePostUnload__(pGpu, pEngstate, arg3); +} + +static inline void engstateStateDestroy_DISPATCH(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate) { + pEngstate->__nvoc_metadata_ptr->vtable.__engstateStateDestroy__(pGpu, pEngstate); +} + +static inline NvBool engstateIsPresent_DISPATCH(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__engstateIsPresent__(pGpu, pEngstate); +} + +NV_STATUS engstateConstructEngine_IMPL(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate, ENGDESCRIPTOR arg3); + +void engstateInitMissing_IMPL(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate); + +NV_STATUS engstateStatePreInitLocked_IMPL(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate); + +NV_STATUS engstateStatePreInitUnlocked_IMPL(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate); + +NV_STATUS engstateStateInitLocked_IMPL(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate); + +NV_STATUS engstateStateInitUnlocked_IMPL(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate); + +NV_STATUS engstateStatePreLoad_IMPL(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate, NvU32 arg3); + +NV_STATUS engstateStateLoad_IMPL(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate, NvU32 arg3); + +NV_STATUS engstateStatePostLoad_IMPL(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate, NvU32 arg3); + +NV_STATUS engstateStatePreUnload_IMPL(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate, NvU32 arg3); + +NV_STATUS engstateStateUnload_IMPL(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate, NvU32 arg3); + +NV_STATUS engstateStatePostUnload_IMPL(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate, NvU32 arg3); + +void engstateStateDestroy_IMPL(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate); + +NvBool engstateIsPresent_IMPL(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate); + +void engstateDestruct_IMPL(struct OBJENGSTATE *pEngstate); + +#define __nvoc_engstateDestruct(pEngstate) engstateDestruct_IMPL(pEngstate) +NV_STATUS engstateStatePreInit_IMPL(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate); + +#ifdef __nvoc_eng_state_h_disabled +static inline NV_STATUS engstateStatePreInit(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_eng_state_h_disabled +#define engstateStatePreInit(pGpu, pEngstate) engstateStatePreInit_IMPL(pGpu, pEngstate) +#endif //__nvoc_eng_state_h_disabled + +NV_STATUS engstateStateInit_IMPL(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate); + +#ifdef __nvoc_eng_state_h_disabled +static inline NV_STATUS engstateStateInit(struct OBJGPU *pGpu, struct OBJENGSTATE *pEngstate) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_eng_state_h_disabled +#define engstateStateInit(pGpu, pEngstate) engstateStateInit_IMPL(pGpu, pEngstate) +#endif //__nvoc_eng_state_h_disabled + +ENGDESCRIPTOR engstateGetDescriptor_IMPL(struct OBJENGSTATE *pEngstate); + +#ifdef __nvoc_eng_state_h_disabled +static inline ENGDESCRIPTOR engstateGetDescriptor(struct OBJENGSTATE *pEngstate) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); + ENGDESCRIPTOR ret; + portMemSet(&ret, 0, sizeof(ENGDESCRIPTOR)); + return ret; +} +#else //__nvoc_eng_state_h_disabled +#define engstateGetDescriptor(pEngstate) engstateGetDescriptor_IMPL(pEngstate) +#endif //__nvoc_eng_state_h_disabled + +struct OBJFIFO *engstateGetFifo_IMPL(struct OBJENGSTATE *pEngstate); + +#ifdef __nvoc_eng_state_h_disabled +static inline struct OBJFIFO *engstateGetFifo(struct OBJENGSTATE *pEngstate) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); + return NULL; +} +#else //__nvoc_eng_state_h_disabled +#define engstateGetFifo(pEngstate) engstateGetFifo_IMPL(pEngstate) +#endif //__nvoc_eng_state_h_disabled + +NV_STATUS engstateConstructBase_IMPL(struct OBJENGSTATE *arg1, struct OBJGPU *arg2, ENGDESCRIPTOR arg3); + +#ifdef __nvoc_eng_state_h_disabled +static inline NV_STATUS engstateConstructBase(struct OBJENGSTATE *arg1, struct OBJGPU *arg2, ENGDESCRIPTOR arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_eng_state_h_disabled +#define engstateConstructBase(arg1, arg2, arg3) engstateConstructBase_IMPL(arg1, arg2, arg3) +#endif //__nvoc_eng_state_h_disabled + +void engstateLogStateTransitionPre_IMPL(struct OBJENGSTATE *arg1, ENGSTATE_STATE arg2, ENGSTATE_TRANSITION_DATA *arg3); + +#ifdef __nvoc_eng_state_h_disabled +static inline void engstateLogStateTransitionPre(struct OBJENGSTATE *arg1, ENGSTATE_STATE arg2, ENGSTATE_TRANSITION_DATA *arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); +} +#else //__nvoc_eng_state_h_disabled +#define engstateLogStateTransitionPre(arg1, arg2, arg3) engstateLogStateTransitionPre_IMPL(arg1, arg2, arg3) +#endif //__nvoc_eng_state_h_disabled + +void engstateLogStateTransitionPost_IMPL(struct OBJENGSTATE *arg1, ENGSTATE_STATE arg2, ENGSTATE_TRANSITION_DATA *arg3); + +#ifdef __nvoc_eng_state_h_disabled +static inline void engstateLogStateTransitionPost(struct OBJENGSTATE *arg1, ENGSTATE_STATE arg2, ENGSTATE_TRANSITION_DATA *arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); +} +#else //__nvoc_eng_state_h_disabled +#define engstateLogStateTransitionPost(arg1, arg2, arg3) engstateLogStateTransitionPost_IMPL(arg1, arg2, arg3) +#endif //__nvoc_eng_state_h_disabled + +const char *engstateGetName_IMPL(struct OBJENGSTATE *arg1); + +#ifdef __nvoc_eng_state_h_disabled +static inline const char *engstateGetName(struct OBJENGSTATE *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); + return NULL; +} +#else //__nvoc_eng_state_h_disabled +#define engstateGetName(arg1) engstateGetName_IMPL(arg1) +#endif //__nvoc_eng_state_h_disabled + +#undef PRIVATE_FIELD + + +#endif // _ENG_STATE_H_ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_ENG_STATE_NVOC_H_ diff --git a/src/nvidia/generated/g_event_buffer_nvoc.c b/src/nvidia/generated/g_event_buffer_nvoc.c new file mode 100644 index 0000000..e34561e --- /dev/null +++ b/src/nvidia/generated/g_event_buffer_nvoc.c @@ -0,0 +1,526 @@ +#define NVOC_EVENT_BUFFER_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_event_buffer_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x63502b = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_EventBuffer; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +// Forward declarations for EventBuffer +void __nvoc_init__RmResource(RmResource*); +void __nvoc_init__EventBuffer(EventBuffer*); +void __nvoc_init_funcTable_EventBuffer(EventBuffer*); +NV_STATUS __nvoc_ctor_EventBuffer(EventBuffer*, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_EventBuffer(EventBuffer*); +void __nvoc_dtor_EventBuffer(EventBuffer*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__EventBuffer; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__EventBuffer; + +// Down-thunk(s) to bridge EventBuffer methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super + +// Up-thunk(s) to bridge EventBuffer methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super +NvBool __nvoc_up_thunk_RmResource_eventbufferAccessCallback(struct EventBuffer *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NvBool __nvoc_up_thunk_RmResource_eventbufferShareCallback(struct EventBuffer *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_RmResource_eventbufferGetMemInterMapParams(struct EventBuffer *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_eventbufferCheckMemInterUnmap(struct EventBuffer *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_eventbufferGetMemoryMappingDescriptor(struct EventBuffer *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_eventbufferControlSerialization_Prologue(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_eventbufferControlSerialization_Epilogue(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_eventbufferControl_Prologue(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_eventbufferControl_Epilogue(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_eventbufferCanCopy(struct EventBuffer *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_eventbufferIsDuplicate(struct EventBuffer *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_eventbufferPreDestruct(struct EventBuffer *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_eventbufferControl(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_eventbufferControlFilter(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_eventbufferMap(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_RsResource_eventbufferUnmap(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_RsResource_eventbufferIsPartialUnmapSupported(struct EventBuffer *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_eventbufferMapTo(struct EventBuffer *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_eventbufferUnmapFrom(struct EventBuffer *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_eventbufferGetRefCount(struct EventBuffer *pResource); // this +void __nvoc_up_thunk_RsResource_eventbufferAddAdditionalDependants(struct RsClient *pClient, struct EventBuffer *pResource, RsResourceRef *pReference); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_EventBuffer = +{ + /*classInfo=*/ { + /*size=*/ sizeof(EventBuffer), + /*classId=*/ classId(EventBuffer), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "EventBuffer", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_EventBuffer, + /*pCastInfo=*/ &__nvoc_castinfo__EventBuffer, + /*pExportInfo=*/ &__nvoc_export_info__EventBuffer +}; + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_EventBuffer[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) eventbuffertBufferCtrlCmdEnableEvent_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*flags=*/ 0x9u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90cd0101u, + /*paramSize=*/ sizeof(NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_EventBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "eventbuffertBufferCtrlCmdEnableEvent" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) eventbuffertBufferCtrlCmdUpdateGet_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*flags=*/ 0x9u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90cd0102u, + /*paramSize=*/ sizeof(NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_EventBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "eventbuffertBufferCtrlCmdUpdateGet" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) eventbuffertBufferCtrlCmdFlush_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90cd0104u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_EventBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "eventbuffertBufferCtrlCmdFlush" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) eventbuffertBufferCtrlCmdPostTelemetryEvent_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90cd0105u, + /*paramSize=*/ sizeof(NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_EventBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "eventbuffertBufferCtrlCmdPostTelemetryEvent" +#endif + }, + +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__EventBuffer __nvoc_metadata__EventBuffer = { + .rtti.pClassDef = &__nvoc_class_def_EventBuffer, // (eventbuffer) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_EventBuffer, + .rtti.offset = 0, + .metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super + .metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.rtti.offset = NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource), + .metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^2 + .metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^3 + .metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^2 + .metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + + .vtable.__eventbufferAccessCallback__ = &__nvoc_up_thunk_RmResource_eventbufferAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__eventbufferShareCallback__ = &__nvoc_up_thunk_RmResource_eventbufferShareCallback, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresShareCallback__ = &rmresShareCallback_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__eventbufferGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_eventbufferGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__eventbufferCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_eventbufferCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__eventbufferGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_eventbufferGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__eventbufferControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_eventbufferControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__eventbufferControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_eventbufferControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__eventbufferControl_Prologue__ = &__nvoc_up_thunk_RmResource_eventbufferControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__eventbufferControl_Epilogue__ = &__nvoc_up_thunk_RmResource_eventbufferControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__eventbufferCanCopy__ = &__nvoc_up_thunk_RsResource_eventbufferCanCopy, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__eventbufferIsDuplicate__ = &__nvoc_up_thunk_RsResource_eventbufferIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__eventbufferPreDestruct__ = &__nvoc_up_thunk_RsResource_eventbufferPreDestruct, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__eventbufferControl__ = &__nvoc_up_thunk_RsResource_eventbufferControl, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &resControl_IMPL, // virtual + .vtable.__eventbufferControlFilter__ = &__nvoc_up_thunk_RsResource_eventbufferControlFilter, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__eventbufferMap__ = &__nvoc_up_thunk_RsResource_eventbufferMap, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &resMap_IMPL, // virtual + .vtable.__eventbufferUnmap__ = &__nvoc_up_thunk_RsResource_eventbufferUnmap, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &resUnmap_IMPL, // virtual + .vtable.__eventbufferIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_eventbufferIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__eventbufferMapTo__ = &__nvoc_up_thunk_RsResource_eventbufferMapTo, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__eventbufferUnmapFrom__ = &__nvoc_up_thunk_RsResource_eventbufferUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__eventbufferGetRefCount__ = &__nvoc_up_thunk_RsResource_eventbufferGetRefCount, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__eventbufferAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_eventbufferAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__EventBuffer = { + .numRelatives = 5, + .relatives = { + &__nvoc_metadata__EventBuffer.rtti, // [0]: (eventbuffer) this + &__nvoc_metadata__EventBuffer.metadata__RmResource.rtti, // [1]: (rmres) super + &__nvoc_metadata__EventBuffer.metadata__RmResource.metadata__RsResource.rtti, // [2]: (res) super^2 + &__nvoc_metadata__EventBuffer.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [3]: (obj) super^3 + &__nvoc_metadata__EventBuffer.metadata__RmResource.metadata__RmResourceCommon.rtti, // [4]: (rmrescmn) super^2 + } +}; + +// 21 up-thunk(s) defined to bridge methods in EventBuffer to superclasses + +// eventbufferAccessCallback: virtual inherited (rmres) base (rmres) +NvBool __nvoc_up_thunk_RmResource_eventbufferAccessCallback(struct EventBuffer *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// eventbufferShareCallback: virtual inherited (rmres) base (rmres) +NvBool __nvoc_up_thunk_RmResource_eventbufferShareCallback(struct EventBuffer *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// eventbufferGetMemInterMapParams: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_eventbufferGetMemInterMapParams(struct EventBuffer *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource)), pParams); +} + +// eventbufferCheckMemInterUnmap: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_eventbufferCheckMemInterUnmap(struct EventBuffer *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// eventbufferGetMemoryMappingDescriptor: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_eventbufferGetMemoryMappingDescriptor(struct EventBuffer *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource)), ppMemDesc); +} + +// eventbufferControlSerialization_Prologue: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_eventbufferControlSerialization_Prologue(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// eventbufferControlSerialization_Epilogue: virtual inherited (rmres) base (rmres) +void __nvoc_up_thunk_RmResource_eventbufferControlSerialization_Epilogue(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// eventbufferControl_Prologue: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_eventbufferControl_Prologue(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// eventbufferControl_Epilogue: virtual inherited (rmres) base (rmres) +void __nvoc_up_thunk_RmResource_eventbufferControl_Epilogue(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// eventbufferCanCopy: virtual inherited (res) base (rmres) +NvBool __nvoc_up_thunk_RsResource_eventbufferCanCopy(struct EventBuffer *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// eventbufferIsDuplicate: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_eventbufferIsDuplicate(struct EventBuffer *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// eventbufferPreDestruct: virtual inherited (res) base (rmres) +void __nvoc_up_thunk_RsResource_eventbufferPreDestruct(struct EventBuffer *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// eventbufferControl: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_eventbufferControl(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// eventbufferControlFilter: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_eventbufferControlFilter(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// eventbufferMap: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_eventbufferMap(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams, pCpuMapping); +} + +// eventbufferUnmap: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_eventbufferUnmap(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pCpuMapping); +} + +// eventbufferIsPartialUnmapSupported: inline virtual inherited (res) base (rmres) body +NvBool __nvoc_up_thunk_RsResource_eventbufferIsPartialUnmapSupported(struct EventBuffer *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// eventbufferMapTo: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_eventbufferMapTo(struct EventBuffer *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// eventbufferUnmapFrom: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_eventbufferUnmapFrom(struct EventBuffer *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// eventbufferGetRefCount: virtual inherited (res) base (rmres) +NvU32 __nvoc_up_thunk_RsResource_eventbufferGetRefCount(struct EventBuffer *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// eventbufferAddAdditionalDependants: virtual inherited (res) base (rmres) +void __nvoc_up_thunk_RsResource_eventbufferAddAdditionalDependants(struct RsClient *pClient, struct EventBuffer *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__EventBuffer = +{ + /*numEntries=*/ 4, + /*pExportEntries=*/ __nvoc_exported_method_def_EventBuffer +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_EventBuffer(EventBuffer *pThis) { + __nvoc_eventbufferDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_EventBuffer(EventBuffer *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_EventBuffer(EventBuffer *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_EventBuffer_fail_RmResource; + __nvoc_init_dataField_EventBuffer(pThis); + + status = __nvoc_eventbufferConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_EventBuffer_fail__init; + goto __nvoc_ctor_EventBuffer_exit; // Success + +__nvoc_ctor_EventBuffer_fail__init: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_EventBuffer_fail_RmResource: +__nvoc_ctor_EventBuffer_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_EventBuffer_1(EventBuffer *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + // eventbuffertBufferCtrlCmdEnableEvent -- exported (id=0x90cd0101) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + pThis->__eventbuffertBufferCtrlCmdEnableEvent__ = &eventbuffertBufferCtrlCmdEnableEvent_IMPL; +#endif + + // eventbuffertBufferCtrlCmdUpdateGet -- exported (id=0x90cd0102) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + pThis->__eventbuffertBufferCtrlCmdUpdateGet__ = &eventbuffertBufferCtrlCmdUpdateGet_IMPL; +#endif + + // eventbuffertBufferCtrlCmdFlush -- exported (id=0x90cd0104) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__eventbuffertBufferCtrlCmdFlush__ = &eventbuffertBufferCtrlCmdFlush_IMPL; +#endif + + // eventbuffertBufferCtrlCmdPostTelemetryEvent -- exported (id=0x90cd0105) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__eventbuffertBufferCtrlCmdPostTelemetryEvent__ = &eventbuffertBufferCtrlCmdPostTelemetryEvent_IMPL; +#endif +} // End __nvoc_init_funcTable_EventBuffer_1 with approximately 4 basic block(s). + + +// Initialize vtable(s) for 25 virtual method(s). +void __nvoc_init_funcTable_EventBuffer(EventBuffer *pThis) { + + // Initialize vtable(s) with 4 per-object function pointer(s). + __nvoc_init_funcTable_EventBuffer_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__EventBuffer(EventBuffer *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^3 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^2 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^2 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; // (rmres) super + pThis->__nvoc_pbase_EventBuffer = pThis; // (eventbuffer) this + + // Recurse to superclass initialization function(s). + __nvoc_init__RmResource(&pThis->__nvoc_base_RmResource); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__EventBuffer.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^3 + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__EventBuffer.metadata__RmResource.metadata__RsResource; // (res) super^2 + pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__EventBuffer.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^2 + pThis->__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__EventBuffer.metadata__RmResource; // (rmres) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__EventBuffer; // (eventbuffer) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_EventBuffer(pThis); +} + +NV_STATUS __nvoc_objCreate_EventBuffer(EventBuffer **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + EventBuffer *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(EventBuffer), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(EventBuffer)); + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__EventBuffer(pThis); + status = __nvoc_ctor_EventBuffer(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_EventBuffer_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_EventBuffer_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(EventBuffer)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_EventBuffer(EventBuffer **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_EventBuffer(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_event_buffer_nvoc.h b/src/nvidia/generated/g_event_buffer_nvoc.h new file mode 100644 index 0000000..7cd3e29 --- /dev/null +++ b/src/nvidia/generated/g_event_buffer_nvoc.h @@ -0,0 +1,386 @@ + +#ifndef _G_EVENT_BUFFER_NVOC_H_ +#define _G_EVENT_BUFFER_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_event_buffer_nvoc.h" + +#ifndef _EVENT_BUFFER_H_ +#define _EVENT_BUFFER_H_ + +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "rmapi/event.h" +#include "rmapi/resource.h" +#include "ctrl/ctrl90cd.h" +#include "eventbufferproducer.h" + + +struct Memory; + +#ifndef __NVOC_CLASS_Memory_TYPEDEF__ +#define __NVOC_CLASS_Memory_TYPEDEF__ +typedef struct Memory Memory; +#endif /* __NVOC_CLASS_Memory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Memory +#define __nvoc_class_id_Memory 0x4789f2 +#endif /* __nvoc_class_id_Memory */ + + + +typedef struct +{ + // + // Addr: user RO address + // Priv: return cookie to be passed to unmap + // + NvP64 headerAddr; + NvP64 headerPriv; + NvP64 recordBuffAddr; + NvP64 recordBuffPriv; + NvP64 vardataBuffAddr; + NvP64 vardataBuffPriv; +} EVENT_BUFFER_MAP_INFO; + +// This class shares buffers between kernel and usermode + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_EVENT_BUFFER_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__EventBuffer; +struct NVOC_METADATA__RmResource; +struct NVOC_VTABLE__EventBuffer; + + +struct EventBuffer { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__EventBuffer *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct RmResource __nvoc_base_RmResource; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^3 + struct RsResource *__nvoc_pbase_RsResource; // res super^2 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^2 + struct RmResource *__nvoc_pbase_RmResource; // rmres super + struct EventBuffer *__nvoc_pbase_EventBuffer; // eventbuffer + + // Vtable with 4 per-object function pointers + NV_STATUS (*__eventbuffertBufferCtrlCmdEnableEvent__)(struct EventBuffer * /*this*/, NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS *); // exported (id=0x90cd0101) + NV_STATUS (*__eventbuffertBufferCtrlCmdUpdateGet__)(struct EventBuffer * /*this*/, NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS *); // exported (id=0x90cd0102) + NV_STATUS (*__eventbuffertBufferCtrlCmdFlush__)(struct EventBuffer * /*this*/); // exported (id=0x90cd0104) + NV_STATUS (*__eventbuffertBufferCtrlCmdPostTelemetryEvent__)(struct EventBuffer * /*this*/, NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS *); // exported (id=0x90cd0105) + + // Data members + struct MEMORY_DESCRIPTOR *pHeaderDesc; + struct MEMORY_DESCRIPTOR *pRecordBufDesc; + struct MEMORY_DESCRIPTOR *pVardataBufDesc; + NvHandle hSubDevice; + NvU32 subDeviceInst; + EVENT_BUFFER_MAP_INFO kernelMapInfo; + EVENT_BUFFER_MAP_INFO clientMapInfo; + NvHandle hClient; + NvU16 seqNo; + NvBool bNotifyPending; + PEVENTNOTIFICATION pListeners; + EVENT_BUFFER_PRODUCER_INFO producerInfo; + struct Memory *pHeader; + struct Memory *pRecord; + struct Memory *pVardata; + NvHandle hInternalClient; + NvHandle hInternalDevice; + NvHandle hInternalSubdevice; + NvHandle hInternalHeader; + NvHandle hInternalBuffer; +}; + + +// Vtable with 21 per-class function pointers +struct NVOC_VTABLE__EventBuffer { + NvBool (*__eventbufferAccessCallback__)(struct EventBuffer * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (rmres) + NvBool (*__eventbufferShareCallback__)(struct EventBuffer * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__eventbufferGetMemInterMapParams__)(struct EventBuffer * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__eventbufferCheckMemInterUnmap__)(struct EventBuffer * /*this*/, NvBool); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__eventbufferGetMemoryMappingDescriptor__)(struct EventBuffer * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__eventbufferControlSerialization_Prologue__)(struct EventBuffer * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + void (*__eventbufferControlSerialization_Epilogue__)(struct EventBuffer * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__eventbufferControl_Prologue__)(struct EventBuffer * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + void (*__eventbufferControl_Epilogue__)(struct EventBuffer * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + NvBool (*__eventbufferCanCopy__)(struct EventBuffer * /*this*/); // virtual inherited (res) base (rmres) + NV_STATUS (*__eventbufferIsDuplicate__)(struct EventBuffer * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (rmres) + void (*__eventbufferPreDestruct__)(struct EventBuffer * /*this*/); // virtual inherited (res) base (rmres) + NV_STATUS (*__eventbufferControl__)(struct EventBuffer * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (rmres) + NV_STATUS (*__eventbufferControlFilter__)(struct EventBuffer * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (rmres) + NV_STATUS (*__eventbufferMap__)(struct EventBuffer * /*this*/, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); // virtual inherited (res) base (rmres) + NV_STATUS (*__eventbufferUnmap__)(struct EventBuffer * /*this*/, struct CALL_CONTEXT *, RsCpuMapping *); // virtual inherited (res) base (rmres) + NvBool (*__eventbufferIsPartialUnmapSupported__)(struct EventBuffer * /*this*/); // inline virtual inherited (res) base (rmres) body + NV_STATUS (*__eventbufferMapTo__)(struct EventBuffer * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (rmres) + NV_STATUS (*__eventbufferUnmapFrom__)(struct EventBuffer * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (rmres) + NvU32 (*__eventbufferGetRefCount__)(struct EventBuffer * /*this*/); // virtual inherited (res) base (rmres) + void (*__eventbufferAddAdditionalDependants__)(struct RsClient *, struct EventBuffer * /*this*/, RsResourceRef *); // virtual inherited (res) base (rmres) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__EventBuffer { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__RmResource metadata__RmResource; + const struct NVOC_VTABLE__EventBuffer vtable; +}; + +#ifndef __NVOC_CLASS_EventBuffer_TYPEDEF__ +#define __NVOC_CLASS_EventBuffer_TYPEDEF__ +typedef struct EventBuffer EventBuffer; +#endif /* __NVOC_CLASS_EventBuffer_TYPEDEF__ */ + +#ifndef __nvoc_class_id_EventBuffer +#define __nvoc_class_id_EventBuffer 0x63502b +#endif /* __nvoc_class_id_EventBuffer */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_EventBuffer; + +#define __staticCast_EventBuffer(pThis) \ + ((pThis)->__nvoc_pbase_EventBuffer) + +#ifdef __nvoc_event_buffer_h_disabled +#define __dynamicCast_EventBuffer(pThis) ((EventBuffer*) NULL) +#else //__nvoc_event_buffer_h_disabled +#define __dynamicCast_EventBuffer(pThis) \ + ((EventBuffer*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(EventBuffer))) +#endif //__nvoc_event_buffer_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_EventBuffer(EventBuffer**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_EventBuffer(EventBuffer**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_EventBuffer(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_EventBuffer((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define eventbuffertBufferCtrlCmdEnableEvent_FNPTR(pEventBuffer) pEventBuffer->__eventbuffertBufferCtrlCmdEnableEvent__ +#define eventbuffertBufferCtrlCmdEnableEvent(pEventBuffer, pEnableParams) eventbuffertBufferCtrlCmdEnableEvent_DISPATCH(pEventBuffer, pEnableParams) +#define eventbuffertBufferCtrlCmdUpdateGet_FNPTR(pEventBuffer) pEventBuffer->__eventbuffertBufferCtrlCmdUpdateGet__ +#define eventbuffertBufferCtrlCmdUpdateGet(pEventBuffer, pUpdateParams) eventbuffertBufferCtrlCmdUpdateGet_DISPATCH(pEventBuffer, pUpdateParams) +#define eventbuffertBufferCtrlCmdFlush_FNPTR(pEventBuffer) pEventBuffer->__eventbuffertBufferCtrlCmdFlush__ +#define eventbuffertBufferCtrlCmdFlush(pEventBuffer) eventbuffertBufferCtrlCmdFlush_DISPATCH(pEventBuffer) +#define eventbuffertBufferCtrlCmdPostTelemetryEvent_FNPTR(pEventBuffer) pEventBuffer->__eventbuffertBufferCtrlCmdPostTelemetryEvent__ +#define eventbuffertBufferCtrlCmdPostTelemetryEvent(pEventBuffer, pPostTelemetryEvent) eventbuffertBufferCtrlCmdPostTelemetryEvent_DISPATCH(pEventBuffer, pPostTelemetryEvent) +#define eventbufferAccessCallback_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define eventbufferAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) eventbufferAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define eventbufferShareCallback_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresShareCallback__ +#define eventbufferShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) eventbufferShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define eventbufferGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define eventbufferGetMemInterMapParams(pRmResource, pParams) eventbufferGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define eventbufferCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define eventbufferCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) eventbufferCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define eventbufferGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define eventbufferGetMemoryMappingDescriptor(pRmResource, ppMemDesc) eventbufferGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define eventbufferControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define eventbufferControlSerialization_Prologue(pResource, pCallContext, pParams) eventbufferControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define eventbufferControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define eventbufferControlSerialization_Epilogue(pResource, pCallContext, pParams) eventbufferControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define eventbufferControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define eventbufferControl_Prologue(pResource, pCallContext, pParams) eventbufferControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define eventbufferControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define eventbufferControl_Epilogue(pResource, pCallContext, pParams) eventbufferControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define eventbufferCanCopy_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define eventbufferCanCopy(pResource) eventbufferCanCopy_DISPATCH(pResource) +#define eventbufferIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define eventbufferIsDuplicate(pResource, hMemory, pDuplicate) eventbufferIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define eventbufferPreDestruct_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define eventbufferPreDestruct(pResource) eventbufferPreDestruct_DISPATCH(pResource) +#define eventbufferControl_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControl__ +#define eventbufferControl(pResource, pCallContext, pParams) eventbufferControl_DISPATCH(pResource, pCallContext, pParams) +#define eventbufferControlFilter_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define eventbufferControlFilter(pResource, pCallContext, pParams) eventbufferControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define eventbufferMap_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMap__ +#define eventbufferMap(pResource, pCallContext, pParams, pCpuMapping) eventbufferMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define eventbufferUnmap_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmap__ +#define eventbufferUnmap(pResource, pCallContext, pCpuMapping) eventbufferUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define eventbufferIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define eventbufferIsPartialUnmapSupported(pResource) eventbufferIsPartialUnmapSupported_DISPATCH(pResource) +#define eventbufferMapTo_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define eventbufferMapTo(pResource, pParams) eventbufferMapTo_DISPATCH(pResource, pParams) +#define eventbufferUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define eventbufferUnmapFrom(pResource, pParams) eventbufferUnmapFrom_DISPATCH(pResource, pParams) +#define eventbufferGetRefCount_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define eventbufferGetRefCount(pResource) eventbufferGetRefCount_DISPATCH(pResource) +#define eventbufferAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define eventbufferAddAdditionalDependants(pClient, pResource, pReference) eventbufferAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) + +// Dispatch functions +static inline NV_STATUS eventbuffertBufferCtrlCmdEnableEvent_DISPATCH(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS *pEnableParams) { + return pEventBuffer->__eventbuffertBufferCtrlCmdEnableEvent__(pEventBuffer, pEnableParams); +} + +static inline NV_STATUS eventbuffertBufferCtrlCmdUpdateGet_DISPATCH(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS *pUpdateParams) { + return pEventBuffer->__eventbuffertBufferCtrlCmdUpdateGet__(pEventBuffer, pUpdateParams); +} + +static inline NV_STATUS eventbuffertBufferCtrlCmdFlush_DISPATCH(struct EventBuffer *pEventBuffer) { + return pEventBuffer->__eventbuffertBufferCtrlCmdFlush__(pEventBuffer); +} + +static inline NV_STATUS eventbuffertBufferCtrlCmdPostTelemetryEvent_DISPATCH(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS *pPostTelemetryEvent) { + return pEventBuffer->__eventbuffertBufferCtrlCmdPostTelemetryEvent__(pEventBuffer, pPostTelemetryEvent); +} + +static inline NvBool eventbufferAccessCallback_DISPATCH(struct EventBuffer *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__eventbufferAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NvBool eventbufferShareCallback_DISPATCH(struct EventBuffer *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__nvoc_metadata_ptr->vtable.__eventbufferShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS eventbufferGetMemInterMapParams_DISPATCH(struct EventBuffer *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__eventbufferGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS eventbufferCheckMemInterUnmap_DISPATCH(struct EventBuffer *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__eventbufferCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS eventbufferGetMemoryMappingDescriptor_DISPATCH(struct EventBuffer *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__eventbufferGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS eventbufferControlSerialization_Prologue_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__eventbufferControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void eventbufferControlSerialization_Epilogue_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__eventbufferControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS eventbufferControl_Prologue_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__eventbufferControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void eventbufferControl_Epilogue_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__eventbufferControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool eventbufferCanCopy_DISPATCH(struct EventBuffer *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__eventbufferCanCopy__(pResource); +} + +static inline NV_STATUS eventbufferIsDuplicate_DISPATCH(struct EventBuffer *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__eventbufferIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void eventbufferPreDestruct_DISPATCH(struct EventBuffer *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__eventbufferPreDestruct__(pResource); +} + +static inline NV_STATUS eventbufferControl_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__eventbufferControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS eventbufferControlFilter_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__eventbufferControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS eventbufferMap_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__eventbufferMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS eventbufferUnmap_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__eventbufferUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NvBool eventbufferIsPartialUnmapSupported_DISPATCH(struct EventBuffer *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__eventbufferIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS eventbufferMapTo_DISPATCH(struct EventBuffer *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__eventbufferMapTo__(pResource, pParams); +} + +static inline NV_STATUS eventbufferUnmapFrom_DISPATCH(struct EventBuffer *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__eventbufferUnmapFrom__(pResource, pParams); +} + +static inline NvU32 eventbufferGetRefCount_DISPATCH(struct EventBuffer *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__eventbufferGetRefCount__(pResource); +} + +static inline void eventbufferAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct EventBuffer *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__eventbufferAddAdditionalDependants__(pClient, pResource, pReference); +} + +NV_STATUS eventbuffertBufferCtrlCmdEnableEvent_IMPL(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS *pEnableParams); + +NV_STATUS eventbuffertBufferCtrlCmdUpdateGet_IMPL(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS *pUpdateParams); + +NV_STATUS eventbuffertBufferCtrlCmdFlush_IMPL(struct EventBuffer *pEventBuffer); + +NV_STATUS eventbuffertBufferCtrlCmdPostTelemetryEvent_IMPL(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS *pPostTelemetryEvent); + +NV_STATUS eventbufferConstruct_IMPL(struct EventBuffer *arg_pEventBuffer, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_eventbufferConstruct(arg_pEventBuffer, arg_pCallContext, arg_pParams) eventbufferConstruct_IMPL(arg_pEventBuffer, arg_pCallContext, arg_pParams) +void eventbufferDestruct_IMPL(struct EventBuffer *pEventBuffer); + +#define __nvoc_eventbufferDestruct(pEventBuffer) eventbufferDestruct_IMPL(pEventBuffer) +#undef PRIVATE_FIELD + + +NV_STATUS eventBufferAdd(struct EventBuffer *pEventBuffer, void* pEventData, NvU32 recordType, NvBool* bNotify, NvP64 *pHandle); + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_EVENT_BUFFER_NVOC_H_ diff --git a/src/nvidia/generated/g_event_nvoc.c b/src/nvidia/generated/g_event_nvoc.c new file mode 100644 index 0000000..ee0e8dd --- /dev/null +++ b/src/nvidia/generated/g_event_nvoc.c @@ -0,0 +1,920 @@ +#define NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_event_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xd5f150 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_NotifShare; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared; + +// Forward declarations for NotifShare +void __nvoc_init__RsShared(RsShared*); +void __nvoc_init__NotifShare(NotifShare*); +void __nvoc_init_funcTable_NotifShare(NotifShare*); +NV_STATUS __nvoc_ctor_NotifShare(NotifShare*); +void __nvoc_init_dataField_NotifShare(NotifShare*); +void __nvoc_dtor_NotifShare(NotifShare*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__NotifShare; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__NotifShare; + +// Down-thunk(s) to bridge NotifShare methods from ancestors (if any) + +// Up-thunk(s) to bridge NotifShare methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_NotifShare = +{ + /*classInfo=*/ { + /*size=*/ sizeof(NotifShare), + /*classId=*/ classId(NotifShare), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "NotifShare", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_NotifShare, + /*pCastInfo=*/ &__nvoc_castinfo__NotifShare, + /*pExportInfo=*/ &__nvoc_export_info__NotifShare +}; + + +// Metadata with per-class RTTI with ancestor(s) +static const struct NVOC_METADATA__NotifShare __nvoc_metadata__NotifShare = { + .rtti.pClassDef = &__nvoc_class_def_NotifShare, // (shrnotif) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_NotifShare, + .rtti.offset = 0, + .metadata__RsShared.rtti.pClassDef = &__nvoc_class_def_RsShared, // (shr) super + .metadata__RsShared.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RsShared.rtti.offset = NV_OFFSETOF(NotifShare, __nvoc_base_RsShared), + .metadata__RsShared.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^2 + .metadata__RsShared.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RsShared.metadata__Object.rtti.offset = NV_OFFSETOF(NotifShare, __nvoc_base_RsShared.__nvoc_base_Object), +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__NotifShare = { + .numRelatives = 3, + .relatives = { + &__nvoc_metadata__NotifShare.rtti, // [0]: (shrnotif) this + &__nvoc_metadata__NotifShare.metadata__RsShared.rtti, // [1]: (shr) super + &__nvoc_metadata__NotifShare.metadata__RsShared.metadata__Object.rtti, // [2]: (obj) super^2 + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__NotifShare = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RsShared(RsShared*); +void __nvoc_dtor_NotifShare(NotifShare *pThis) { + __nvoc_shrnotifDestruct(pThis); + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_NotifShare(NotifShare *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RsShared(RsShared* ); +NV_STATUS __nvoc_ctor_NotifShare(NotifShare *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsShared(&pThis->__nvoc_base_RsShared); + if (status != NV_OK) goto __nvoc_ctor_NotifShare_fail_RsShared; + __nvoc_init_dataField_NotifShare(pThis); + + status = __nvoc_shrnotifConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_NotifShare_fail__init; + goto __nvoc_ctor_NotifShare_exit; // Success + +__nvoc_ctor_NotifShare_fail__init: + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); +__nvoc_ctor_NotifShare_fail_RsShared: +__nvoc_ctor_NotifShare_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_NotifShare_1(NotifShare *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_NotifShare_1 + + +// Initialize vtable(s): Nothing to do for empty vtables +void __nvoc_init_funcTable_NotifShare(NotifShare *pThis) { + __nvoc_init_funcTable_NotifShare_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__NotifShare(NotifShare *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsShared.__nvoc_base_Object; // (obj) super^2 + pThis->__nvoc_pbase_RsShared = &pThis->__nvoc_base_RsShared; // (shr) super + pThis->__nvoc_pbase_NotifShare = pThis; // (shrnotif) this + + // Recurse to superclass initialization function(s). + __nvoc_init__RsShared(&pThis->__nvoc_base_RsShared); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_RsShared.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__NotifShare.metadata__RsShared.metadata__Object; // (obj) super^2 + pThis->__nvoc_base_RsShared.__nvoc_metadata_ptr = &__nvoc_metadata__NotifShare.metadata__RsShared; // (shr) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__NotifShare; // (shrnotif) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_NotifShare(pThis); +} + +NV_STATUS __nvoc_objCreate_NotifShare(NotifShare **ppThis, Dynamic *pParent, NvU32 createFlags) +{ + NV_STATUS status; + Object *pParentObj = NULL; + NotifShare *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(NotifShare), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(NotifShare)); + + pThis->__nvoc_base_RsShared.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsShared.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsShared.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__NotifShare(pThis); + status = __nvoc_ctor_NotifShare(pThis); + if (status != NV_OK) goto __nvoc_objCreate_NotifShare_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_NotifShare_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_RsShared.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(NotifShare)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_NotifShare(NotifShare **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_NotifShare(ppThis, pParent, createFlags); + + return status; +} + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xa4ecfc = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Event; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +// Forward declarations for Event +void __nvoc_init__RmResource(RmResource*); +void __nvoc_init__Event(Event*); +void __nvoc_init_funcTable_Event(Event*); +NV_STATUS __nvoc_ctor_Event(Event*, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_Event(Event*); +void __nvoc_dtor_Event(Event*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__Event; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__Event; + +// Down-thunk(s) to bridge Event methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super + +// Up-thunk(s) to bridge Event methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super +NvBool __nvoc_up_thunk_RmResource_eventAccessCallback(struct Event *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NvBool __nvoc_up_thunk_RmResource_eventShareCallback(struct Event *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_RmResource_eventGetMemInterMapParams(struct Event *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_eventCheckMemInterUnmap(struct Event *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_eventGetMemoryMappingDescriptor(struct Event *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_eventControlSerialization_Prologue(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_eventControlSerialization_Epilogue(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_eventControl_Prologue(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_eventControl_Epilogue(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_eventCanCopy(struct Event *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_eventIsDuplicate(struct Event *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_eventPreDestruct(struct Event *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_eventControl(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_eventControlFilter(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_eventMap(struct Event *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_RsResource_eventUnmap(struct Event *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_RsResource_eventIsPartialUnmapSupported(struct Event *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_eventMapTo(struct Event *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_eventUnmapFrom(struct Event *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_eventGetRefCount(struct Event *pResource); // this +void __nvoc_up_thunk_RsResource_eventAddAdditionalDependants(struct RsClient *pClient, struct Event *pResource, RsResourceRef *pReference); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_Event = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Event), + /*classId=*/ classId(Event), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Event", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Event, + /*pCastInfo=*/ &__nvoc_castinfo__Event, + /*pExportInfo=*/ &__nvoc_export_info__Event +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__Event __nvoc_metadata__Event = { + .rtti.pClassDef = &__nvoc_class_def_Event, // (event) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Event, + .rtti.offset = 0, + .metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super + .metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.rtti.offset = NV_OFFSETOF(Event, __nvoc_base_RmResource), + .metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^2 + .metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^3 + .metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^2 + .metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + + .vtable.__eventAccessCallback__ = &__nvoc_up_thunk_RmResource_eventAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__eventShareCallback__ = &__nvoc_up_thunk_RmResource_eventShareCallback, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresShareCallback__ = &rmresShareCallback_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__eventGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_eventGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__eventCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_eventCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__eventGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_eventGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__eventControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_eventControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__eventControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_eventControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__eventControl_Prologue__ = &__nvoc_up_thunk_RmResource_eventControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__eventControl_Epilogue__ = &__nvoc_up_thunk_RmResource_eventControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__eventCanCopy__ = &__nvoc_up_thunk_RsResource_eventCanCopy, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__eventIsDuplicate__ = &__nvoc_up_thunk_RsResource_eventIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__eventPreDestruct__ = &__nvoc_up_thunk_RsResource_eventPreDestruct, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__eventControl__ = &__nvoc_up_thunk_RsResource_eventControl, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &resControl_IMPL, // virtual + .vtable.__eventControlFilter__ = &__nvoc_up_thunk_RsResource_eventControlFilter, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__eventMap__ = &__nvoc_up_thunk_RsResource_eventMap, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &resMap_IMPL, // virtual + .vtable.__eventUnmap__ = &__nvoc_up_thunk_RsResource_eventUnmap, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &resUnmap_IMPL, // virtual + .vtable.__eventIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_eventIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__eventMapTo__ = &__nvoc_up_thunk_RsResource_eventMapTo, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__eventUnmapFrom__ = &__nvoc_up_thunk_RsResource_eventUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__eventGetRefCount__ = &__nvoc_up_thunk_RsResource_eventGetRefCount, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__eventAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_eventAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__Event = { + .numRelatives = 5, + .relatives = { + &__nvoc_metadata__Event.rtti, // [0]: (event) this + &__nvoc_metadata__Event.metadata__RmResource.rtti, // [1]: (rmres) super + &__nvoc_metadata__Event.metadata__RmResource.metadata__RsResource.rtti, // [2]: (res) super^2 + &__nvoc_metadata__Event.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [3]: (obj) super^3 + &__nvoc_metadata__Event.metadata__RmResource.metadata__RmResourceCommon.rtti, // [4]: (rmrescmn) super^2 + } +}; + +// 21 up-thunk(s) defined to bridge methods in Event to superclasses + +// eventAccessCallback: virtual inherited (rmres) base (rmres) +NvBool __nvoc_up_thunk_RmResource_eventAccessCallback(struct Event *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Event, __nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// eventShareCallback: virtual inherited (rmres) base (rmres) +NvBool __nvoc_up_thunk_RmResource_eventShareCallback(struct Event *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Event, __nvoc_base_RmResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// eventGetMemInterMapParams: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_eventGetMemInterMapParams(struct Event *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(Event, __nvoc_base_RmResource)), pParams); +} + +// eventCheckMemInterUnmap: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_eventCheckMemInterUnmap(struct Event *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(Event, __nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// eventGetMemoryMappingDescriptor: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_eventGetMemoryMappingDescriptor(struct Event *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(Event, __nvoc_base_RmResource)), ppMemDesc); +} + +// eventControlSerialization_Prologue: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_eventControlSerialization_Prologue(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Event, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// eventControlSerialization_Epilogue: virtual inherited (rmres) base (rmres) +void __nvoc_up_thunk_RmResource_eventControlSerialization_Epilogue(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Event, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// eventControl_Prologue: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_eventControl_Prologue(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Event, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// eventControl_Epilogue: virtual inherited (rmres) base (rmres) +void __nvoc_up_thunk_RmResource_eventControl_Epilogue(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Event, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// eventCanCopy: virtual inherited (res) base (rmres) +NvBool __nvoc_up_thunk_RsResource_eventCanCopy(struct Event *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// eventIsDuplicate: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_eventIsDuplicate(struct Event *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// eventPreDestruct: virtual inherited (res) base (rmres) +void __nvoc_up_thunk_RsResource_eventPreDestruct(struct Event *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// eventControl: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_eventControl(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// eventControlFilter: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_eventControlFilter(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// eventMap: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_eventMap(struct Event *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams, pCpuMapping); +} + +// eventUnmap: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_eventUnmap(struct Event *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pCpuMapping); +} + +// eventIsPartialUnmapSupported: inline virtual inherited (res) base (rmres) body +NvBool __nvoc_up_thunk_RsResource_eventIsPartialUnmapSupported(struct Event *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// eventMapTo: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_eventMapTo(struct Event *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// eventUnmapFrom: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_eventUnmapFrom(struct Event *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// eventGetRefCount: virtual inherited (res) base (rmres) +NvU32 __nvoc_up_thunk_RsResource_eventGetRefCount(struct Event *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// eventAddAdditionalDependants: virtual inherited (res) base (rmres) +void __nvoc_up_thunk_RsResource_eventAddAdditionalDependants(struct RsClient *pClient, struct Event *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__Event = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_Event(Event *pThis) { + __nvoc_eventDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Event(Event *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Event(Event *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Event_fail_RmResource; + __nvoc_init_dataField_Event(pThis); + + status = __nvoc_eventConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Event_fail__init; + goto __nvoc_ctor_Event_exit; // Success + +__nvoc_ctor_Event_fail__init: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_Event_fail_RmResource: +__nvoc_ctor_Event_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_Event_1(Event *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_Event_1 + + +// Initialize vtable(s) for 21 virtual method(s). +void __nvoc_init_funcTable_Event(Event *pThis) { + __nvoc_init_funcTable_Event_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__Event(Event *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^3 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^2 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^2 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; // (rmres) super + pThis->__nvoc_pbase_Event = pThis; // (event) this + + // Recurse to superclass initialization function(s). + __nvoc_init__RmResource(&pThis->__nvoc_base_RmResource); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__Event.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^3 + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__Event.metadata__RmResource.metadata__RsResource; // (res) super^2 + pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__Event.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^2 + pThis->__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__Event.metadata__RmResource; // (rmres) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__Event; // (event) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_Event(pThis); +} + +NV_STATUS __nvoc_objCreate_Event(Event **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + Event *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(Event), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(Event)); + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__Event(pThis); + status = __nvoc_ctor_Event(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_Event_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_Event_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(Event)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_Event(Event **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_Event(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xf8f965 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +// Forward declarations for INotifier +void __nvoc_init__INotifier(INotifier*); +void __nvoc_init_funcTable_INotifier(INotifier*); +NV_STATUS __nvoc_ctor_INotifier(INotifier*, struct CALL_CONTEXT *arg_pCallContext); +void __nvoc_init_dataField_INotifier(INotifier*); +void __nvoc_dtor_INotifier(INotifier*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__INotifier; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__INotifier; + +// Down-thunk(s) to bridge INotifier methods from ancestors (if any) + +// Up-thunk(s) to bridge INotifier methods to ancestors (if any) + +// Not instantiable because it's not derived from class "Object" +// Not instantiable because it's an abstract class with following pure virtual functions: +// inotifyGetNotificationListPtr +// inotifySetNotificationShare +// inotifyGetNotificationShare +// inotifyUnregisterEvent +// inotifyGetOrAllocNotifShare +const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier = +{ + /*classInfo=*/ { + /*size=*/ sizeof(INotifier), + /*classId=*/ classId(INotifier), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "INotifier", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo__INotifier, + /*pExportInfo=*/ &__nvoc_export_info__INotifier +}; + + +// Metadata with per-class RTTI and vtable +static const struct NVOC_METADATA__INotifier __nvoc_metadata__INotifier = { + .rtti.pClassDef = &__nvoc_class_def_INotifier, // (inotify) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_INotifier, + .rtti.offset = 0, + + .vtable.__inotifyGetNotificationListPtr__ = NULL, // pure virtual + .vtable.__inotifySetNotificationShare__ = NULL, // pure virtual + .vtable.__inotifyGetNotificationShare__ = NULL, // pure virtual + .vtable.__inotifyUnregisterEvent__ = NULL, // pure virtual + .vtable.__inotifyGetOrAllocNotifShare__ = NULL, // pure virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__INotifier = { + .numRelatives = 1, + .relatives = { + &__nvoc_metadata__INotifier.rtti, // [0]: (inotify) this + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__INotifier = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_INotifier(INotifier *pThis) { + __nvoc_inotifyDestruct(pThis); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_INotifier(INotifier *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_INotifier(INotifier *pThis, struct CALL_CONTEXT * arg_pCallContext) { + NV_STATUS status = NV_OK; + __nvoc_init_dataField_INotifier(pThis); + + status = __nvoc_inotifyConstruct(pThis, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_INotifier_fail__init; + goto __nvoc_ctor_INotifier_exit; // Success + +__nvoc_ctor_INotifier_fail__init: +__nvoc_ctor_INotifier_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_INotifier_1(INotifier *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_INotifier_1 + + +// Initialize vtable(s) for 5 virtual method(s). +void __nvoc_init_funcTable_INotifier(INotifier *pThis) { + __nvoc_init_funcTable_INotifier_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__INotifier(INotifier *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_INotifier = pThis; // (inotify) this + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__INotifier; // (inotify) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_INotifier(pThis); +} + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xa8683b = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +// Forward declarations for Notifier +void __nvoc_init__INotifier(INotifier*); +void __nvoc_init__Notifier(Notifier*); +void __nvoc_init_funcTable_Notifier(Notifier*); +NV_STATUS __nvoc_ctor_Notifier(Notifier*, struct CALL_CONTEXT *arg_pCallContext); +void __nvoc_init_dataField_Notifier(Notifier*); +void __nvoc_dtor_Notifier(Notifier*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__Notifier; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__Notifier; + +// Down-thunk(s) to bridge Notifier methods from ancestors (if any) +PEVENTNOTIFICATION * __nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr(struct INotifier *pNotifier); // this +struct NotifShare * __nvoc_down_thunk_Notifier_inotifyGetNotificationShare(struct INotifier *pNotifier); // this +void __nvoc_down_thunk_Notifier_inotifySetNotificationShare(struct INotifier *pNotifier, struct NotifShare *pNotifShare); // this +NV_STATUS __nvoc_down_thunk_Notifier_inotifyUnregisterEvent(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // this +NV_STATUS __nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // this + +// Up-thunk(s) to bridge Notifier methods to ancestors (if any) + +// Not instantiable because it's not derived from class "Object" +const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Notifier), + /*classId=*/ classId(Notifier), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Notifier", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo__Notifier, + /*pExportInfo=*/ &__nvoc_export_info__Notifier +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__Notifier __nvoc_metadata__Notifier = { + .rtti.pClassDef = &__nvoc_class_def_Notifier, // (notify) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Notifier, + .rtti.offset = 0, + .metadata__INotifier.rtti.pClassDef = &__nvoc_class_def_INotifier, // (inotify) super + .metadata__INotifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__INotifier.rtti.offset = NV_OFFSETOF(Notifier, __nvoc_base_INotifier), + + .vtable.__notifyGetNotificationListPtr__ = ¬ifyGetNotificationListPtr_IMPL, // virtual override (inotify) base (inotify) + .metadata__INotifier.vtable.__inotifyGetNotificationListPtr__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr, // pure virtual + .vtable.__notifyGetNotificationShare__ = ¬ifyGetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__INotifier.vtable.__inotifyGetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationShare, // pure virtual + .vtable.__notifySetNotificationShare__ = ¬ifySetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__INotifier.vtable.__inotifySetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifySetNotificationShare, // pure virtual + .vtable.__notifyUnregisterEvent__ = ¬ifyUnregisterEvent_IMPL, // virtual override (inotify) base (inotify) + .metadata__INotifier.vtable.__inotifyUnregisterEvent__ = &__nvoc_down_thunk_Notifier_inotifyUnregisterEvent, // pure virtual + .vtable.__notifyGetOrAllocNotifShare__ = ¬ifyGetOrAllocNotifShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__INotifier.vtable.__inotifyGetOrAllocNotifShare__ = &__nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare, // pure virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__Notifier = { + .numRelatives = 2, + .relatives = { + &__nvoc_metadata__Notifier.rtti, // [0]: (notify) this + &__nvoc_metadata__Notifier.metadata__INotifier.rtti, // [1]: (inotify) super + } +}; + +// 5 down-thunk(s) defined to bridge methods in Notifier from superclasses + +// notifyGetNotificationListPtr: virtual override (inotify) base (inotify) +PEVENTNOTIFICATION * __nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr(struct INotifier *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *) pNotifier) - NV_OFFSETOF(Notifier, __nvoc_base_INotifier))); +} + +// notifyGetNotificationShare: virtual override (inotify) base (inotify) +struct NotifShare * __nvoc_down_thunk_Notifier_inotifyGetNotificationShare(struct INotifier *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) - NV_OFFSETOF(Notifier, __nvoc_base_INotifier))); +} + +// notifySetNotificationShare: virtual override (inotify) base (inotify) +void __nvoc_down_thunk_Notifier_inotifySetNotificationShare(struct INotifier *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) - NV_OFFSETOF(Notifier, __nvoc_base_INotifier)), pNotifShare); +} + +// notifyUnregisterEvent: virtual override (inotify) base (inotify) +NV_STATUS __nvoc_down_thunk_Notifier_inotifyUnregisterEvent(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *) pNotifier) - NV_OFFSETOF(Notifier, __nvoc_base_INotifier)), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +// notifyGetOrAllocNotifShare: virtual override (inotify) base (inotify) +NV_STATUS __nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *) pNotifier) - NV_OFFSETOF(Notifier, __nvoc_base_INotifier)), hNotifierClient, hNotifierResource, ppNotifShare); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__Notifier = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_INotifier(INotifier*); +void __nvoc_dtor_Notifier(Notifier *pThis) { + __nvoc_notifyDestruct(pThis); + __nvoc_dtor_INotifier(&pThis->__nvoc_base_INotifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Notifier(Notifier *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_INotifier(INotifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_Notifier(Notifier *pThis, struct CALL_CONTEXT * arg_pCallContext) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_INotifier(&pThis->__nvoc_base_INotifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_Notifier_fail_INotifier; + __nvoc_init_dataField_Notifier(pThis); + + status = __nvoc_notifyConstruct(pThis, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_Notifier_fail__init; + goto __nvoc_ctor_Notifier_exit; // Success + +__nvoc_ctor_Notifier_fail__init: + __nvoc_dtor_INotifier(&pThis->__nvoc_base_INotifier); +__nvoc_ctor_Notifier_fail_INotifier: +__nvoc_ctor_Notifier_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_Notifier_1(Notifier *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_Notifier_1 + + +// Initialize vtable(s) for 5 virtual method(s). +void __nvoc_init_funcTable_Notifier(Notifier *pThis) { + __nvoc_init_funcTable_Notifier_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__Notifier(Notifier *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_INotifier; // (inotify) super + pThis->__nvoc_pbase_Notifier = pThis; // (notify) this + + // Recurse to superclass initialization function(s). + __nvoc_init__INotifier(&pThis->__nvoc_base_INotifier); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_INotifier.__nvoc_metadata_ptr = &__nvoc_metadata__Notifier.metadata__INotifier; // (inotify) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__Notifier; // (notify) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_Notifier(pThis); +} + diff --git a/src/nvidia/generated/g_event_nvoc.h b/src/nvidia/generated/g_event_nvoc.h new file mode 100644 index 0000000..e356584 --- /dev/null +++ b/src/nvidia/generated/g_event_nvoc.h @@ -0,0 +1,752 @@ + +#ifndef _G_EVENT_NVOC_H_ +#define _G_EVENT_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_event_nvoc.h" + +#ifndef _EVENT_H_ +#define _EVENT_H_ + +#include "ctrl/ctrl0000/ctrl0000event.h" // NV0000_NOTIFIERS_MAXCOUNT + +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "resserv/rs_server.h" +#include "rmapi/resource.h" +#include "kernel/gpu/gpu_engine_type.h" + +// Opaque callback memory type for interfacing the scheduling API +typedef struct TMR_EVENT TMR_EVENT; + +struct EVENTNOTIFICATION +{ + NvHandle hEventClient; + NvHandle hEvent; + NvU32 subdeviceInst; + NvU32 NotifyIndex; // NVnnnn_NOTIFIERS_xyz + NvU32 NotifyType; // Event class. NV01_EVENT_OS_EVENT for example. + NvBool bUserOsEventHandle; // Event was allocated from user app. + NvBool bBroadcastEvent; // Wait for all subdevices before sending event. + NvBool bClientRM; // Event was allocated from client RM. + NvBool bSubdeviceSpecificEvent; // SubdeviceSpecificValue is valid. + NvU32 SubdeviceSpecificValue; // NV0005_NOTIFY_INDEX_SUBDEVICE + NvBool bEventDataRequired; // nv_post_event allocates memory for Data. + NvBool bNonStallIntrEvent; + NvU32 NotifyTriggerCount; // Used with bBroadcastEvent. + NvP64 Data; + OBJGPU *pGpu; // Store to free dynamic memory on teardown. + TMR_EVENT *pTmrEvent; // Store to free dynamic memory on teardown. + struct EVENTNOTIFICATION *Next; +}; +typedef struct EVENTNOTIFICATION EVENTNOTIFICATION, *PEVENTNOTIFICATION; + + +struct INotifier; + +#ifndef __NVOC_CLASS_INotifier_TYPEDEF__ +#define __NVOC_CLASS_INotifier_TYPEDEF__ +typedef struct INotifier INotifier; +#endif /* __NVOC_CLASS_INotifier_TYPEDEF__ */ + +#ifndef __nvoc_class_id_INotifier +#define __nvoc_class_id_INotifier 0xf8f965 +#endif /* __nvoc_class_id_INotifier */ + + + +MAKE_LIST(SystemEventQueueList, NV0000_CTRL_GET_SYSTEM_EVENT_DATA_PARAMS); + +struct _def_client_system_event_info +{ + SystemEventQueueList eventQueue; + NvU32 notifyActions[NV0000_NOTIFIERS_MAXCOUNT]; +}; + +/** + * This class represents data that is shared between one notifier and any + * events that are registered with the notifier. + * + * Instances of this class are ref-counted and will be kept alive until + * the notifier and all of its events have been freed. + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__NotifShare; +struct NVOC_METADATA__RsShared; + + +struct NotifShare { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__NotifShare *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct RsShared __nvoc_base_RsShared; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^2 + struct RsShared *__nvoc_pbase_RsShared; // shr super + struct NotifShare *__nvoc_pbase_NotifShare; // shrnotif + + // Data members + struct INotifier *pNotifier; + NvHandle hNotifierClient; + NvHandle hNotifierResource; + EVENTNOTIFICATION *pEventList; +}; + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__NotifShare { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__RsShared metadata__RsShared; +}; + +#ifndef __NVOC_CLASS_NotifShare_TYPEDEF__ +#define __NVOC_CLASS_NotifShare_TYPEDEF__ +typedef struct NotifShare NotifShare; +#endif /* __NVOC_CLASS_NotifShare_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NotifShare +#define __nvoc_class_id_NotifShare 0xd5f150 +#endif /* __nvoc_class_id_NotifShare */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_NotifShare; + +#define __staticCast_NotifShare(pThis) \ + ((pThis)->__nvoc_pbase_NotifShare) + +#ifdef __nvoc_event_h_disabled +#define __dynamicCast_NotifShare(pThis) ((NotifShare*) NULL) +#else //__nvoc_event_h_disabled +#define __dynamicCast_NotifShare(pThis) \ + ((NotifShare*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(NotifShare))) +#endif //__nvoc_event_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_NotifShare(NotifShare**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_NotifShare(NotifShare**, Dynamic*, NvU32); +#define __objCreate_NotifShare(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_NotifShare((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros + +// Dispatch functions +NV_STATUS shrnotifConstruct_IMPL(struct NotifShare *arg_pNotifShare); + +#define __nvoc_shrnotifConstruct(arg_pNotifShare) shrnotifConstruct_IMPL(arg_pNotifShare) +void shrnotifDestruct_IMPL(struct NotifShare *pNotifShare); + +#define __nvoc_shrnotifDestruct(pNotifShare) shrnotifDestruct_IMPL(pNotifShare) +#undef PRIVATE_FIELD + + +/** + * This class represents event notification consumers + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__Event; +struct NVOC_METADATA__RmResource; +struct NVOC_VTABLE__Event; + + +struct Event { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__Event *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct RmResource __nvoc_base_RmResource; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^3 + struct RsResource *__nvoc_pbase_RsResource; // res super^2 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^2 + struct RmResource *__nvoc_pbase_RmResource; // rmres super + struct Event *__nvoc_pbase_Event; // event + + // Data members + struct NotifShare *pNotifierShare; + NvHandle hNotifierClient; + NvHandle hNotifierResource; + NvHandle hEvent; +}; + + +// Vtable with 21 per-class function pointers +struct NVOC_VTABLE__Event { + NvBool (*__eventAccessCallback__)(struct Event * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (rmres) + NvBool (*__eventShareCallback__)(struct Event * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__eventGetMemInterMapParams__)(struct Event * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__eventCheckMemInterUnmap__)(struct Event * /*this*/, NvBool); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__eventGetMemoryMappingDescriptor__)(struct Event * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__eventControlSerialization_Prologue__)(struct Event * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + void (*__eventControlSerialization_Epilogue__)(struct Event * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__eventControl_Prologue__)(struct Event * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + void (*__eventControl_Epilogue__)(struct Event * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + NvBool (*__eventCanCopy__)(struct Event * /*this*/); // virtual inherited (res) base (rmres) + NV_STATUS (*__eventIsDuplicate__)(struct Event * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (rmres) + void (*__eventPreDestruct__)(struct Event * /*this*/); // virtual inherited (res) base (rmres) + NV_STATUS (*__eventControl__)(struct Event * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (rmres) + NV_STATUS (*__eventControlFilter__)(struct Event * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (rmres) + NV_STATUS (*__eventMap__)(struct Event * /*this*/, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); // virtual inherited (res) base (rmres) + NV_STATUS (*__eventUnmap__)(struct Event * /*this*/, struct CALL_CONTEXT *, RsCpuMapping *); // virtual inherited (res) base (rmres) + NvBool (*__eventIsPartialUnmapSupported__)(struct Event * /*this*/); // inline virtual inherited (res) base (rmres) body + NV_STATUS (*__eventMapTo__)(struct Event * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (rmres) + NV_STATUS (*__eventUnmapFrom__)(struct Event * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (rmres) + NvU32 (*__eventGetRefCount__)(struct Event * /*this*/); // virtual inherited (res) base (rmres) + void (*__eventAddAdditionalDependants__)(struct RsClient *, struct Event * /*this*/, RsResourceRef *); // virtual inherited (res) base (rmres) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__Event { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__RmResource metadata__RmResource; + const struct NVOC_VTABLE__Event vtable; +}; + +#ifndef __NVOC_CLASS_Event_TYPEDEF__ +#define __NVOC_CLASS_Event_TYPEDEF__ +typedef struct Event Event; +#endif /* __NVOC_CLASS_Event_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Event +#define __nvoc_class_id_Event 0xa4ecfc +#endif /* __nvoc_class_id_Event */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Event; + +#define __staticCast_Event(pThis) \ + ((pThis)->__nvoc_pbase_Event) + +#ifdef __nvoc_event_h_disabled +#define __dynamicCast_Event(pThis) ((Event*) NULL) +#else //__nvoc_event_h_disabled +#define __dynamicCast_Event(pThis) \ + ((Event*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Event))) +#endif //__nvoc_event_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_Event(Event**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Event(Event**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_Event(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_Event((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define eventAccessCallback_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define eventAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) eventAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define eventShareCallback_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresShareCallback__ +#define eventShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) eventShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define eventGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define eventGetMemInterMapParams(pRmResource, pParams) eventGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define eventCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define eventCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) eventCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define eventGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define eventGetMemoryMappingDescriptor(pRmResource, ppMemDesc) eventGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define eventControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define eventControlSerialization_Prologue(pResource, pCallContext, pParams) eventControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define eventControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define eventControlSerialization_Epilogue(pResource, pCallContext, pParams) eventControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define eventControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define eventControl_Prologue(pResource, pCallContext, pParams) eventControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define eventControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define eventControl_Epilogue(pResource, pCallContext, pParams) eventControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define eventCanCopy_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define eventCanCopy(pResource) eventCanCopy_DISPATCH(pResource) +#define eventIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define eventIsDuplicate(pResource, hMemory, pDuplicate) eventIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define eventPreDestruct_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define eventPreDestruct(pResource) eventPreDestruct_DISPATCH(pResource) +#define eventControl_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControl__ +#define eventControl(pResource, pCallContext, pParams) eventControl_DISPATCH(pResource, pCallContext, pParams) +#define eventControlFilter_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define eventControlFilter(pResource, pCallContext, pParams) eventControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define eventMap_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMap__ +#define eventMap(pResource, pCallContext, pParams, pCpuMapping) eventMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define eventUnmap_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmap__ +#define eventUnmap(pResource, pCallContext, pCpuMapping) eventUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define eventIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define eventIsPartialUnmapSupported(pResource) eventIsPartialUnmapSupported_DISPATCH(pResource) +#define eventMapTo_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define eventMapTo(pResource, pParams) eventMapTo_DISPATCH(pResource, pParams) +#define eventUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define eventUnmapFrom(pResource, pParams) eventUnmapFrom_DISPATCH(pResource, pParams) +#define eventGetRefCount_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define eventGetRefCount(pResource) eventGetRefCount_DISPATCH(pResource) +#define eventAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define eventAddAdditionalDependants(pClient, pResource, pReference) eventAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) + +// Dispatch functions +static inline NvBool eventAccessCallback_DISPATCH(struct Event *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__eventAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NvBool eventShareCallback_DISPATCH(struct Event *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__nvoc_metadata_ptr->vtable.__eventShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS eventGetMemInterMapParams_DISPATCH(struct Event *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__eventGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS eventCheckMemInterUnmap_DISPATCH(struct Event *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__eventCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS eventGetMemoryMappingDescriptor_DISPATCH(struct Event *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__eventGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS eventControlSerialization_Prologue_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__eventControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void eventControlSerialization_Epilogue_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__eventControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS eventControl_Prologue_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__eventControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void eventControl_Epilogue_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__eventControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool eventCanCopy_DISPATCH(struct Event *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__eventCanCopy__(pResource); +} + +static inline NV_STATUS eventIsDuplicate_DISPATCH(struct Event *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__eventIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void eventPreDestruct_DISPATCH(struct Event *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__eventPreDestruct__(pResource); +} + +static inline NV_STATUS eventControl_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__eventControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS eventControlFilter_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__eventControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS eventMap_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__eventMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS eventUnmap_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__eventUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NvBool eventIsPartialUnmapSupported_DISPATCH(struct Event *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__eventIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS eventMapTo_DISPATCH(struct Event *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__eventMapTo__(pResource, pParams); +} + +static inline NV_STATUS eventUnmapFrom_DISPATCH(struct Event *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__eventUnmapFrom__(pResource, pParams); +} + +static inline NvU32 eventGetRefCount_DISPATCH(struct Event *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__eventGetRefCount__(pResource); +} + +static inline void eventAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct Event *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__eventAddAdditionalDependants__(pClient, pResource, pReference); +} + +NV_STATUS eventConstruct_IMPL(struct Event *arg_pEvent, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_eventConstruct(arg_pEvent, arg_pCallContext, arg_pParams) eventConstruct_IMPL(arg_pEvent, arg_pCallContext, arg_pParams) +void eventDestruct_IMPL(struct Event *pEvent); + +#define __nvoc_eventDestruct(pEvent) eventDestruct_IMPL(pEvent) +NV_STATUS eventInit_IMPL(struct Event *pEvent, struct CALL_CONTEXT *pCallContext, NvHandle hNotifierClient, NvHandle hNotifierResource, PEVENTNOTIFICATION **pppEventNotification); + +#ifdef __nvoc_event_h_disabled +static inline NV_STATUS eventInit(struct Event *pEvent, struct CALL_CONTEXT *pCallContext, NvHandle hNotifierClient, NvHandle hNotifierResource, PEVENTNOTIFICATION **pppEventNotification) { + NV_ASSERT_FAILED_PRECOMP("Event was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_event_h_disabled +#define eventInit(pEvent, pCallContext, hNotifierClient, hNotifierResource, pppEventNotification) eventInit_IMPL(pEvent, pCallContext, hNotifierClient, hNotifierResource, pppEventNotification) +#endif //__nvoc_event_h_disabled + +NV_STATUS eventGetByHandle_IMPL(struct RsClient *pClient, NvHandle hEvent, NvU32 *pNotifyIndex); + +#define eventGetByHandle(pClient, hEvent, pNotifyIndex) eventGetByHandle_IMPL(pClient, hEvent, pNotifyIndex) +#undef PRIVATE_FIELD + + +/** + * Mix-in interface for resources that send notifications to events + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable +struct NVOC_METADATA__INotifier; +struct NVOC_VTABLE__INotifier; + + +struct INotifier { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__INotifier *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Ancestor object pointers for `staticCast` feature + struct INotifier *__nvoc_pbase_INotifier; // inotify +}; + + +// Vtable with 5 per-class function pointers +struct NVOC_VTABLE__INotifier { + PEVENTNOTIFICATION * (*__inotifyGetNotificationListPtr__)(struct INotifier * /*this*/); // pure virtual + void (*__inotifySetNotificationShare__)(struct INotifier * /*this*/, struct NotifShare *); // pure virtual + struct NotifShare * (*__inotifyGetNotificationShare__)(struct INotifier * /*this*/); // pure virtual + NV_STATUS (*__inotifyUnregisterEvent__)(struct INotifier * /*this*/, NvHandle, NvHandle, NvHandle, NvHandle); // pure virtual + NV_STATUS (*__inotifyGetOrAllocNotifShare__)(struct INotifier * /*this*/, NvHandle, NvHandle, struct NotifShare **); // pure virtual +}; + +// Metadata with per-class RTTI and vtable +struct NVOC_METADATA__INotifier { + const struct NVOC_RTTI rtti; + const struct NVOC_VTABLE__INotifier vtable; +}; + +#ifndef __NVOC_CLASS_INotifier_TYPEDEF__ +#define __NVOC_CLASS_INotifier_TYPEDEF__ +typedef struct INotifier INotifier; +#endif /* __NVOC_CLASS_INotifier_TYPEDEF__ */ + +#ifndef __nvoc_class_id_INotifier +#define __nvoc_class_id_INotifier 0xf8f965 +#endif /* __nvoc_class_id_INotifier */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +#define __staticCast_INotifier(pThis) \ + ((pThis)->__nvoc_pbase_INotifier) + +#ifdef __nvoc_event_h_disabled +#define __dynamicCast_INotifier(pThis) ((INotifier*) NULL) +#else //__nvoc_event_h_disabled +#define __dynamicCast_INotifier(pThis) \ + ((INotifier*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(INotifier))) +#endif //__nvoc_event_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_INotifier(INotifier**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_INotifier(INotifier**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext); +#define __objCreate_INotifier(ppNewObj, pParent, createFlags, arg_pCallContext) \ + __nvoc_objCreate_INotifier((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext) + + +// Wrapper macros +#define inotifyGetNotificationListPtr_FNPTR(pNotifier) pNotifier->__nvoc_metadata_ptr->vtable.__inotifyGetNotificationListPtr__ +#define inotifyGetNotificationListPtr(pNotifier) inotifyGetNotificationListPtr_DISPATCH(pNotifier) +#define inotifySetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_metadata_ptr->vtable.__inotifySetNotificationShare__ +#define inotifySetNotificationShare(pNotifier, pNotifShare) inotifySetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define inotifyGetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_metadata_ptr->vtable.__inotifyGetNotificationShare__ +#define inotifyGetNotificationShare(pNotifier) inotifyGetNotificationShare_DISPATCH(pNotifier) +#define inotifyUnregisterEvent_FNPTR(pNotifier) pNotifier->__nvoc_metadata_ptr->vtable.__inotifyUnregisterEvent__ +#define inotifyUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) inotifyUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define inotifyGetOrAllocNotifShare_FNPTR(pNotifier) pNotifier->__nvoc_metadata_ptr->vtable.__inotifyGetOrAllocNotifShare__ +#define inotifyGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) inotifyGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) + +// Dispatch functions +static inline PEVENTNOTIFICATION * inotifyGetNotificationListPtr_DISPATCH(struct INotifier *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__inotifyGetNotificationListPtr__(pNotifier); +} + +static inline void inotifySetNotificationShare_DISPATCH(struct INotifier *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__nvoc_metadata_ptr->vtable.__inotifySetNotificationShare__(pNotifier, pNotifShare); +} + +static inline struct NotifShare * inotifyGetNotificationShare_DISPATCH(struct INotifier *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__inotifyGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS inotifyUnregisterEvent_DISPATCH(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__nvoc_metadata_ptr->vtable.__inotifyUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS inotifyGetOrAllocNotifShare_DISPATCH(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__nvoc_metadata_ptr->vtable.__inotifyGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS inotifyConstruct_IMPL(struct INotifier *arg_pNotifier, struct CALL_CONTEXT *arg_pCallContext); + +#define __nvoc_inotifyConstruct(arg_pNotifier, arg_pCallContext) inotifyConstruct_IMPL(arg_pNotifier, arg_pCallContext) +void inotifyDestruct_IMPL(struct INotifier *pNotifier); + +#define __nvoc_inotifyDestruct(pNotifier) inotifyDestruct_IMPL(pNotifier) +PEVENTNOTIFICATION inotifyGetNotificationList_IMPL(struct INotifier *pNotifier); + +#ifdef __nvoc_event_h_disabled +static inline PEVENTNOTIFICATION inotifyGetNotificationList(struct INotifier *pNotifier) { + NV_ASSERT_FAILED_PRECOMP("INotifier was disabled!"); + return NULL; +} +#else //__nvoc_event_h_disabled +#define inotifyGetNotificationList(pNotifier) inotifyGetNotificationList_IMPL(pNotifier) +#endif //__nvoc_event_h_disabled + +#undef PRIVATE_FIELD + + +/** + * Basic implementation for event notification mix-in + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__Notifier; +struct NVOC_METADATA__INotifier; +struct NVOC_VTABLE__Notifier; + + +struct Notifier { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__Notifier *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct INotifier __nvoc_base_INotifier; + + // Ancestor object pointers for `staticCast` feature + struct INotifier *__nvoc_pbase_INotifier; // inotify super + struct Notifier *__nvoc_pbase_Notifier; // notify + + // Data members + struct NotifShare *pNotifierShare; +}; + + +// Vtable with 5 per-class function pointers +struct NVOC_VTABLE__Notifier { + PEVENTNOTIFICATION * (*__notifyGetNotificationListPtr__)(struct Notifier * /*this*/); // virtual override (inotify) base (inotify) + struct NotifShare * (*__notifyGetNotificationShare__)(struct Notifier * /*this*/); // virtual override (inotify) base (inotify) + void (*__notifySetNotificationShare__)(struct Notifier * /*this*/, struct NotifShare *); // virtual override (inotify) base (inotify) + NV_STATUS (*__notifyUnregisterEvent__)(struct Notifier * /*this*/, NvHandle, NvHandle, NvHandle, NvHandle); // virtual override (inotify) base (inotify) + NV_STATUS (*__notifyGetOrAllocNotifShare__)(struct Notifier * /*this*/, NvHandle, NvHandle, struct NotifShare **); // virtual override (inotify) base (inotify) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__Notifier { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__INotifier metadata__INotifier; + const struct NVOC_VTABLE__Notifier vtable; +}; + +#ifndef __NVOC_CLASS_Notifier_TYPEDEF__ +#define __NVOC_CLASS_Notifier_TYPEDEF__ +typedef struct Notifier Notifier; +#endif /* __NVOC_CLASS_Notifier_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Notifier +#define __nvoc_class_id_Notifier 0xa8683b +#endif /* __nvoc_class_id_Notifier */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +#define __staticCast_Notifier(pThis) \ + ((pThis)->__nvoc_pbase_Notifier) + +#ifdef __nvoc_event_h_disabled +#define __dynamicCast_Notifier(pThis) ((Notifier*) NULL) +#else //__nvoc_event_h_disabled +#define __dynamicCast_Notifier(pThis) \ + ((Notifier*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Notifier))) +#endif //__nvoc_event_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_Notifier(Notifier**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Notifier(Notifier**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext); +#define __objCreate_Notifier(ppNewObj, pParent, createFlags, arg_pCallContext) \ + __nvoc_objCreate_Notifier((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext) + + +// Wrapper macros +#define notifyGetNotificationListPtr_FNPTR(pNotifier) pNotifier->__nvoc_metadata_ptr->vtable.__notifyGetNotificationListPtr__ +#define notifyGetNotificationListPtr(pNotifier) notifyGetNotificationListPtr_DISPATCH(pNotifier) +#define notifyGetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_metadata_ptr->vtable.__notifyGetNotificationShare__ +#define notifyGetNotificationShare(pNotifier) notifyGetNotificationShare_DISPATCH(pNotifier) +#define notifySetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_metadata_ptr->vtable.__notifySetNotificationShare__ +#define notifySetNotificationShare(pNotifier, pNotifShare) notifySetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define notifyUnregisterEvent_FNPTR(pNotifier) pNotifier->__nvoc_metadata_ptr->vtable.__notifyUnregisterEvent__ +#define notifyUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) notifyUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define notifyGetOrAllocNotifShare_FNPTR(pNotifier) pNotifier->__nvoc_metadata_ptr->vtable.__notifyGetOrAllocNotifShare__ +#define notifyGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) notifyGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) + +// Dispatch functions +static inline PEVENTNOTIFICATION * notifyGetNotificationListPtr_DISPATCH(struct Notifier *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__notifyGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare * notifyGetNotificationShare_DISPATCH(struct Notifier *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__notifyGetNotificationShare__(pNotifier); +} + +static inline void notifySetNotificationShare_DISPATCH(struct Notifier *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__nvoc_metadata_ptr->vtable.__notifySetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS notifyUnregisterEvent_DISPATCH(struct Notifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__nvoc_metadata_ptr->vtable.__notifyUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS notifyGetOrAllocNotifShare_DISPATCH(struct Notifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__nvoc_metadata_ptr->vtable.__notifyGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +PEVENTNOTIFICATION *notifyGetNotificationListPtr_IMPL(struct Notifier *pNotifier); + +struct NotifShare *notifyGetNotificationShare_IMPL(struct Notifier *pNotifier); + +void notifySetNotificationShare_IMPL(struct Notifier *pNotifier, struct NotifShare *pNotifShare); + +NV_STATUS notifyUnregisterEvent_IMPL(struct Notifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); + +NV_STATUS notifyGetOrAllocNotifShare_IMPL(struct Notifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); + +NV_STATUS notifyConstruct_IMPL(struct Notifier *arg_pNotifier, struct CALL_CONTEXT *arg_pCallContext); + +#define __nvoc_notifyConstruct(arg_pNotifier, arg_pCallContext) notifyConstruct_IMPL(arg_pNotifier, arg_pCallContext) +void notifyDestruct_IMPL(struct Notifier *pNotifier); + +#define __nvoc_notifyDestruct(pNotifier) notifyDestruct_IMPL(pNotifier) +#undef PRIVATE_FIELD + + +void CliAddSystemEvent(NvU32, void *, NvBool *); +void CliDelObjectEvents(RsResourceRef *pResourceRef); +NvBool CliGetEventInfo(NvHandle hClient, NvHandle hEvent, struct Event **ppEvent); +NV_STATUS CliGetEventNotificationList(NvHandle hClient, NvHandle hObject, + struct INotifier **ppNotifier, + PEVENTNOTIFICATION **pppEventNotification); + +NV_STATUS registerEventNotification(PEVENTNOTIFICATION*, struct RsClient *, NvHandle, NvHandle, NvU32, NvU32, NvP64, NvBool); +NV_STATUS unregisterEventNotification(PEVENTNOTIFICATION*, NvHandle, NvHandle, NvHandle); +NV_STATUS unregisterEventNotificationWithData(PEVENTNOTIFICATION *, NvHandle, NvHandle, NvHandle, NvBool, NvP64); +NV_STATUS bindEventNotificationToSubdevice(PEVENTNOTIFICATION, NvHandle, NvU32); +NV_STATUS engineNonStallIntrNotify(OBJGPU *, RM_ENGINE_TYPE); +NV_STATUS notifyEvents(OBJGPU*, EVENTNOTIFICATION*, NvU32, NvU32, NvU32, NV_STATUS, NvU32); +NV_STATUS engineNonStallIntrNotifyEvent(OBJGPU *, RM_ENGINE_TYPE, NvHandle); + +typedef struct GpuEngineEventNotificationList GpuEngineEventNotificationList; + +NV_STATUS gpuEngineEventNotificationListCreate(OBJGPU *, GpuEngineEventNotificationList **); +void gpuEngineEventNotificationListDestroy(OBJGPU *, GpuEngineEventNotificationList *); + +// System Event Queue helpers +void eventSystemInitEventQueue(SystemEventQueueList *pQueue); +NV_STATUS eventSystemEnqueueEvent(SystemEventQueueList *pQueue, NvU32 event, void *pEventData); +NV_STATUS eventSystemDequeueEvent(SystemEventQueueList *pQueue, NV0000_CTRL_GET_SYSTEM_EVENT_DATA_PARAMS *pEvent); +void eventSystemClearEventQueue(SystemEventQueueList *pQueue); + +#endif // _EVENT_H_ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_EVENT_NVOC_H_ diff --git a/src/nvidia/generated/g_generic_engine_nvoc.c b/src/nvidia/generated/g_generic_engine_nvoc.c new file mode 100644 index 0000000..d6e7794 --- /dev/null +++ b/src/nvidia/generated/g_generic_engine_nvoc.c @@ -0,0 +1,519 @@ +#define NVOC_GENERIC_ENGINE_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_generic_engine_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x4bc329 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GenericEngineApi; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +// Forward declarations for GenericEngineApi +void __nvoc_init__GpuResource(GpuResource*); +void __nvoc_init__GenericEngineApi(GenericEngineApi*); +void __nvoc_init_funcTable_GenericEngineApi(GenericEngineApi*); +NV_STATUS __nvoc_ctor_GenericEngineApi(GenericEngineApi*, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_GenericEngineApi(GenericEngineApi*); +void __nvoc_dtor_GenericEngineApi(GenericEngineApi*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__GenericEngineApi; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__GenericEngineApi; + +// Down-thunk(s) to bridge GenericEngineApi methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^2 +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_GpuResource_resControl(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_GpuResource_resMap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_down_thunk_GpuResource_resUnmap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_down_thunk_GpuResource_rmresShareCallback(struct RmResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_down_thunk_GenericEngineApi_gpuresMap(struct GpuResource *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_down_thunk_GenericEngineApi_gpuresGetMapAddrSpace(struct GpuResource *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); // this +NV_STATUS __nvoc_down_thunk_GenericEngineApi_gpuresControl(struct GpuResource *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this + +// Up-thunk(s) to bridge GenericEngineApi methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^2 +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^2 +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^2 +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super^2 +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super^2 +NvBool __nvoc_up_thunk_RmResource_gpuresAccessCallback(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControl_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_gpuresControl_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_gpuresCanCopy(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresIsDuplicate(struct GpuResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_gpuresPreDestruct(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresControlFilter(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresMapTo(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresUnmapFrom(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_gpuresGetRefCount(struct GpuResource *pResource); // super +void __nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference); // super +NV_STATUS __nvoc_up_thunk_GpuResource_genapiUnmap(struct GenericEngineApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_GpuResource_genapiShareCallback(struct GenericEngineApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_GpuResource_genapiGetRegBaseOffsetAndSize(struct GenericEngineApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); // this +NV_STATUS __nvoc_up_thunk_GpuResource_genapiInternalControlForward(struct GenericEngineApi *pGpuResource, NvU32 command, void *pParams, NvU32 size); // this +NvHandle __nvoc_up_thunk_GpuResource_genapiGetInternalObjectHandle(struct GenericEngineApi *pGpuResource); // this +NvBool __nvoc_up_thunk_RmResource_genapiAccessCallback(struct GenericEngineApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NV_STATUS __nvoc_up_thunk_RmResource_genapiGetMemInterMapParams(struct GenericEngineApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_genapiCheckMemInterUnmap(struct GenericEngineApi *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_genapiGetMemoryMappingDescriptor(struct GenericEngineApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_genapiControlSerialization_Prologue(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_genapiControlSerialization_Epilogue(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_genapiControl_Prologue(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_genapiControl_Epilogue(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_genapiCanCopy(struct GenericEngineApi *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_genapiIsDuplicate(struct GenericEngineApi *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_genapiPreDestruct(struct GenericEngineApi *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_genapiControlFilter(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_genapiIsPartialUnmapSupported(struct GenericEngineApi *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_genapiMapTo(struct GenericEngineApi *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_genapiUnmapFrom(struct GenericEngineApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_genapiGetRefCount(struct GenericEngineApi *pResource); // this +void __nvoc_up_thunk_RsResource_genapiAddAdditionalDependants(struct RsClient *pClient, struct GenericEngineApi *pResource, RsResourceRef *pReference); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_GenericEngineApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(GenericEngineApi), + /*classId=*/ classId(GenericEngineApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "GenericEngineApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GenericEngineApi, + /*pCastInfo=*/ &__nvoc_castinfo__GenericEngineApi, + /*pExportInfo=*/ &__nvoc_export_info__GenericEngineApi +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__GenericEngineApi __nvoc_metadata__GenericEngineApi = { + .rtti.pClassDef = &__nvoc_class_def_GenericEngineApi, // (genapi) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GenericEngineApi, + .rtti.offset = 0, + .metadata__GpuResource.rtti.pClassDef = &__nvoc_class_def_GpuResource, // (gpures) super + .metadata__GpuResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.rtti.offset = NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource), + .metadata__GpuResource.metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super^2 + .metadata__GpuResource.metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.rtti.offset = NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource), + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^3 + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^4 + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^3 + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + + .vtable.__genapiMap__ = &genapiMap_IMPL, // virtual override (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresMap__ = &__nvoc_down_thunk_GenericEngineApi_gpuresMap, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &__nvoc_down_thunk_GpuResource_resMap, // virtual + .vtable.__genapiGetMapAddrSpace__ = &genapiGetMapAddrSpace_IMPL, // virtual override (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMapAddrSpace__ = &__nvoc_down_thunk_GenericEngineApi_gpuresGetMapAddrSpace, // virtual + .vtable.__genapiControl__ = &genapiControl_IMPL, // virtual override (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl__ = &__nvoc_down_thunk_GenericEngineApi_gpuresControl, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_GpuResource_resControl, // virtual + .vtable.__genapiUnmap__ = &__nvoc_up_thunk_GpuResource_genapiUnmap, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresUnmap__ = &gpuresUnmap_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &__nvoc_down_thunk_GpuResource_resUnmap, // virtual + .vtable.__genapiShareCallback__ = &__nvoc_up_thunk_GpuResource_genapiShareCallback, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresShareCallback__ = &gpuresShareCallback_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresShareCallback__ = &__nvoc_down_thunk_GpuResource_rmresShareCallback, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__genapiGetRegBaseOffsetAndSize__ = &__nvoc_up_thunk_GpuResource_genapiGetRegBaseOffsetAndSize, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetRegBaseOffsetAndSize__ = &gpuresGetRegBaseOffsetAndSize_IMPL, // virtual + .vtable.__genapiInternalControlForward__ = &__nvoc_up_thunk_GpuResource_genapiInternalControlForward, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresInternalControlForward__ = &gpuresInternalControlForward_IMPL, // virtual + .vtable.__genapiGetInternalObjectHandle__ = &__nvoc_up_thunk_GpuResource_genapiGetInternalObjectHandle, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetInternalObjectHandle__ = &gpuresGetInternalObjectHandle_IMPL, // virtual + .vtable.__genapiAccessCallback__ = &__nvoc_up_thunk_RmResource_genapiAccessCallback, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresAccessCallback__ = &__nvoc_up_thunk_RmResource_gpuresAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__genapiGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_genapiGetMemInterMapParams, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__genapiCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_genapiCheckMemInterUnmap, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__genapiGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_genapiGetMemoryMappingDescriptor, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__genapiControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_genapiControlSerialization_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__genapiControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_genapiControlSerialization_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__genapiControl_Prologue__ = &__nvoc_up_thunk_RmResource_genapiControl_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__genapiControl_Epilogue__ = &__nvoc_up_thunk_RmResource_genapiControl_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__genapiCanCopy__ = &__nvoc_up_thunk_RsResource_genapiCanCopy, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresCanCopy__ = &__nvoc_up_thunk_RsResource_gpuresCanCopy, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__genapiIsDuplicate__ = &__nvoc_up_thunk_RsResource_genapiIsDuplicate, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresIsDuplicate__ = &__nvoc_up_thunk_RsResource_gpuresIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__genapiPreDestruct__ = &__nvoc_up_thunk_RsResource_genapiPreDestruct, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresPreDestruct__ = &__nvoc_up_thunk_RsResource_gpuresPreDestruct, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__genapiControlFilter__ = &__nvoc_up_thunk_RsResource_genapiControlFilter, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlFilter__ = &__nvoc_up_thunk_RsResource_gpuresControlFilter, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__genapiIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_genapiIsPartialUnmapSupported, // inline virtual inherited (res) base (gpures) body + .metadata__GpuResource.vtable.__gpuresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__GpuResource.metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__genapiMapTo__ = &__nvoc_up_thunk_RsResource_genapiMapTo, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresMapTo__ = &__nvoc_up_thunk_RsResource_gpuresMapTo, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__genapiUnmapFrom__ = &__nvoc_up_thunk_RsResource_genapiUnmapFrom, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresUnmapFrom__ = &__nvoc_up_thunk_RsResource_gpuresUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__genapiGetRefCount__ = &__nvoc_up_thunk_RsResource_genapiGetRefCount, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetRefCount__ = &__nvoc_up_thunk_RsResource_gpuresGetRefCount, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__genapiAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_genapiAddAdditionalDependants, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__GenericEngineApi = { + .numRelatives = 6, + .relatives = { + &__nvoc_metadata__GenericEngineApi.rtti, // [0]: (genapi) this + &__nvoc_metadata__GenericEngineApi.metadata__GpuResource.rtti, // [1]: (gpures) super + &__nvoc_metadata__GenericEngineApi.metadata__GpuResource.metadata__RmResource.rtti, // [2]: (rmres) super^2 + &__nvoc_metadata__GenericEngineApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti, // [3]: (res) super^3 + &__nvoc_metadata__GenericEngineApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [4]: (obj) super^4 + &__nvoc_metadata__GenericEngineApi.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti, // [5]: (rmrescmn) super^3 + } +}; + +// 3 down-thunk(s) defined to bridge methods in GenericEngineApi from superclasses + +// genapiMap: virtual override (res) base (gpures) +NV_STATUS __nvoc_down_thunk_GenericEngineApi_gpuresMap(struct GpuResource *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return genapiMap((struct GenericEngineApi *)(((unsigned char *) pGenericEngineApi) - NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource)), pCallContext, pParams, pCpuMapping); +} + +// genapiGetMapAddrSpace: virtual override (gpures) base (gpures) +NV_STATUS __nvoc_down_thunk_GenericEngineApi_gpuresGetMapAddrSpace(struct GpuResource *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return genapiGetMapAddrSpace((struct GenericEngineApi *)(((unsigned char *) pGenericEngineApi) - NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource)), pCallContext, mapFlags, pAddrSpace); +} + +// genapiControl: virtual override (res) base (gpures) +NV_STATUS __nvoc_down_thunk_GenericEngineApi_gpuresControl(struct GpuResource *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return genapiControl((struct GenericEngineApi *)(((unsigned char *) pGenericEngineApi) - NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource)), pCallContext, pParams); +} + + +// 22 up-thunk(s) defined to bridge methods in GenericEngineApi to superclasses + +// genapiUnmap: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_genapiUnmap(struct GenericEngineApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource)), pCallContext, pCpuMapping); +} + +// genapiShareCallback: virtual inherited (gpures) base (gpures) +NvBool __nvoc_up_thunk_GpuResource_genapiShareCallback(struct GenericEngineApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// genapiGetRegBaseOffsetAndSize: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_genapiGetRegBaseOffsetAndSize(struct GenericEngineApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource)), pGpu, pOffset, pSize); +} + +// genapiInternalControlForward: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_genapiInternalControlForward(struct GenericEngineApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource)), command, pParams, size); +} + +// genapiGetInternalObjectHandle: virtual inherited (gpures) base (gpures) +NvHandle __nvoc_up_thunk_GpuResource_genapiGetInternalObjectHandle(struct GenericEngineApi *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource))); +} + +// genapiAccessCallback: virtual inherited (rmres) base (gpures) +NvBool __nvoc_up_thunk_RmResource_genapiAccessCallback(struct GenericEngineApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// genapiGetMemInterMapParams: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_genapiGetMemInterMapParams(struct GenericEngineApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pParams); +} + +// genapiCheckMemInterUnmap: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_genapiCheckMemInterUnmap(struct GenericEngineApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// genapiGetMemoryMappingDescriptor: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_genapiGetMemoryMappingDescriptor(struct GenericEngineApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), ppMemDesc); +} + +// genapiControlSerialization_Prologue: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_genapiControlSerialization_Prologue(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// genapiControlSerialization_Epilogue: virtual inherited (rmres) base (gpures) +void __nvoc_up_thunk_RmResource_genapiControlSerialization_Epilogue(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// genapiControl_Prologue: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_genapiControl_Prologue(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// genapiControl_Epilogue: virtual inherited (rmres) base (gpures) +void __nvoc_up_thunk_RmResource_genapiControl_Epilogue(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// genapiCanCopy: virtual inherited (res) base (gpures) +NvBool __nvoc_up_thunk_RsResource_genapiCanCopy(struct GenericEngineApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// genapiIsDuplicate: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_genapiIsDuplicate(struct GenericEngineApi *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// genapiPreDestruct: virtual inherited (res) base (gpures) +void __nvoc_up_thunk_RsResource_genapiPreDestruct(struct GenericEngineApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// genapiControlFilter: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_genapiControlFilter(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// genapiIsPartialUnmapSupported: inline virtual inherited (res) base (gpures) body +NvBool __nvoc_up_thunk_RsResource_genapiIsPartialUnmapSupported(struct GenericEngineApi *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// genapiMapTo: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_genapiMapTo(struct GenericEngineApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// genapiUnmapFrom: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_genapiUnmapFrom(struct GenericEngineApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// genapiGetRefCount: virtual inherited (res) base (gpures) +NvU32 __nvoc_up_thunk_RsResource_genapiGetRefCount(struct GenericEngineApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// genapiAddAdditionalDependants: virtual inherited (res) base (gpures) +void __nvoc_up_thunk_RsResource_genapiAddAdditionalDependants(struct RsClient *pClient, struct GenericEngineApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__GenericEngineApi = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_GenericEngineApi(GenericEngineApi *pThis) { + __nvoc_genapiDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_GenericEngineApi(GenericEngineApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_GenericEngineApi(GenericEngineApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_GenericEngineApi_fail_GpuResource; + __nvoc_init_dataField_GenericEngineApi(pThis); + + status = __nvoc_genapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_GenericEngineApi_fail__init; + goto __nvoc_ctor_GenericEngineApi_exit; // Success + +__nvoc_ctor_GenericEngineApi_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_GenericEngineApi_fail_GpuResource: +__nvoc_ctor_GenericEngineApi_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_GenericEngineApi_1(GenericEngineApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_GenericEngineApi_1 + + +// Initialize vtable(s) for 25 virtual method(s). +void __nvoc_init_funcTable_GenericEngineApi(GenericEngineApi *pThis) { + __nvoc_init_funcTable_GenericEngineApi_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__GenericEngineApi(GenericEngineApi *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^4 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^3 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; // (rmres) super^2 + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; // (gpures) super + pThis->__nvoc_pbase_GenericEngineApi = pThis; // (genapi) this + + // Recurse to superclass initialization function(s). + __nvoc_init__GpuResource(&pThis->__nvoc_base_GpuResource); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__GenericEngineApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^4 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__GenericEngineApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource; // (res) super^3 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__GenericEngineApi.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__GenericEngineApi.metadata__GpuResource.metadata__RmResource; // (rmres) super^2 + pThis->__nvoc_base_GpuResource.__nvoc_metadata_ptr = &__nvoc_metadata__GenericEngineApi.metadata__GpuResource; // (gpures) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__GenericEngineApi; // (genapi) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_GenericEngineApi(pThis); +} + +NV_STATUS __nvoc_objCreate_GenericEngineApi(GenericEngineApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + GenericEngineApi *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(GenericEngineApi), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(GenericEngineApi)); + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__GenericEngineApi(pThis); + status = __nvoc_ctor_GenericEngineApi(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_GenericEngineApi_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_GenericEngineApi_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(GenericEngineApi)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_GenericEngineApi(GenericEngineApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_GenericEngineApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_generic_engine_nvoc.h b/src/nvidia/generated/g_generic_engine_nvoc.h new file mode 100644 index 0000000..122b694 --- /dev/null +++ b/src/nvidia/generated/g_generic_engine_nvoc.h @@ -0,0 +1,330 @@ + +#ifndef _G_GENERIC_ENGINE_NVOC_H_ +#define _G_GENERIC_ENGINE_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_generic_engine_nvoc.h" + +#ifndef _GENERICENGINEAPI_H_ +#define _GENERICENGINEAPI_H_ + +#include "gpu/gpu_resource.h" + +/*! + * RM internal class providing a generic engine API to RM clients (e.g.: + * GF100_SUBDEVICE_GRAPHICS and GF100_SUBDEVICE_FB). Classes are primarily used + * for exposing BAR0 mappings and controls. + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_GENERIC_ENGINE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__GenericEngineApi; +struct NVOC_METADATA__GpuResource; +struct NVOC_VTABLE__GenericEngineApi; + + +struct GenericEngineApi { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__GenericEngineApi *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct GpuResource __nvoc_base_GpuResource; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^4 + struct RsResource *__nvoc_pbase_RsResource; // res super^3 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^3 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^2 + struct GpuResource *__nvoc_pbase_GpuResource; // gpures super + struct GenericEngineApi *__nvoc_pbase_GenericEngineApi; // genapi +}; + + +// Vtable with 25 per-class function pointers +struct NVOC_VTABLE__GenericEngineApi { + NV_STATUS (*__genapiMap__)(struct GenericEngineApi * /*this*/, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); // virtual override (res) base (gpures) + NV_STATUS (*__genapiGetMapAddrSpace__)(struct GenericEngineApi * /*this*/, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); // virtual override (gpures) base (gpures) + NV_STATUS (*__genapiControl__)(struct GenericEngineApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual override (res) base (gpures) + NV_STATUS (*__genapiUnmap__)(struct GenericEngineApi * /*this*/, struct CALL_CONTEXT *, struct RsCpuMapping *); // virtual inherited (gpures) base (gpures) + NvBool (*__genapiShareCallback__)(struct GenericEngineApi * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__genapiGetRegBaseOffsetAndSize__)(struct GenericEngineApi * /*this*/, struct OBJGPU *, NvU32 *, NvU32 *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__genapiInternalControlForward__)(struct GenericEngineApi * /*this*/, NvU32, void *, NvU32); // virtual inherited (gpures) base (gpures) + NvHandle (*__genapiGetInternalObjectHandle__)(struct GenericEngineApi * /*this*/); // virtual inherited (gpures) base (gpures) + NvBool (*__genapiAccessCallback__)(struct GenericEngineApi * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__genapiGetMemInterMapParams__)(struct GenericEngineApi * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__genapiCheckMemInterUnmap__)(struct GenericEngineApi * /*this*/, NvBool); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__genapiGetMemoryMappingDescriptor__)(struct GenericEngineApi * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__genapiControlSerialization_Prologue__)(struct GenericEngineApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + void (*__genapiControlSerialization_Epilogue__)(struct GenericEngineApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__genapiControl_Prologue__)(struct GenericEngineApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + void (*__genapiControl_Epilogue__)(struct GenericEngineApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + NvBool (*__genapiCanCopy__)(struct GenericEngineApi * /*this*/); // virtual inherited (res) base (gpures) + NV_STATUS (*__genapiIsDuplicate__)(struct GenericEngineApi * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (gpures) + void (*__genapiPreDestruct__)(struct GenericEngineApi * /*this*/); // virtual inherited (res) base (gpures) + NV_STATUS (*__genapiControlFilter__)(struct GenericEngineApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (gpures) + NvBool (*__genapiIsPartialUnmapSupported__)(struct GenericEngineApi * /*this*/); // inline virtual inherited (res) base (gpures) body + NV_STATUS (*__genapiMapTo__)(struct GenericEngineApi * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (gpures) + NV_STATUS (*__genapiUnmapFrom__)(struct GenericEngineApi * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (gpures) + NvU32 (*__genapiGetRefCount__)(struct GenericEngineApi * /*this*/); // virtual inherited (res) base (gpures) + void (*__genapiAddAdditionalDependants__)(struct RsClient *, struct GenericEngineApi * /*this*/, RsResourceRef *); // virtual inherited (res) base (gpures) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__GenericEngineApi { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__GpuResource metadata__GpuResource; + const struct NVOC_VTABLE__GenericEngineApi vtable; +}; + +#ifndef __NVOC_CLASS_GenericEngineApi_TYPEDEF__ +#define __NVOC_CLASS_GenericEngineApi_TYPEDEF__ +typedef struct GenericEngineApi GenericEngineApi; +#endif /* __NVOC_CLASS_GenericEngineApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GenericEngineApi +#define __nvoc_class_id_GenericEngineApi 0x4bc329 +#endif /* __nvoc_class_id_GenericEngineApi */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GenericEngineApi; + +#define __staticCast_GenericEngineApi(pThis) \ + ((pThis)->__nvoc_pbase_GenericEngineApi) + +#ifdef __nvoc_generic_engine_h_disabled +#define __dynamicCast_GenericEngineApi(pThis) ((GenericEngineApi*) NULL) +#else //__nvoc_generic_engine_h_disabled +#define __dynamicCast_GenericEngineApi(pThis) \ + ((GenericEngineApi*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GenericEngineApi))) +#endif //__nvoc_generic_engine_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_GenericEngineApi(GenericEngineApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_GenericEngineApi(GenericEngineApi**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_GenericEngineApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_GenericEngineApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define genapiMap_FNPTR(pGenericEngineApi) pGenericEngineApi->__nvoc_metadata_ptr->vtable.__genapiMap__ +#define genapiMap(pGenericEngineApi, pCallContext, pParams, pCpuMapping) genapiMap_DISPATCH(pGenericEngineApi, pCallContext, pParams, pCpuMapping) +#define genapiGetMapAddrSpace_FNPTR(pGenericEngineApi) pGenericEngineApi->__nvoc_metadata_ptr->vtable.__genapiGetMapAddrSpace__ +#define genapiGetMapAddrSpace(pGenericEngineApi, pCallContext, mapFlags, pAddrSpace) genapiGetMapAddrSpace_DISPATCH(pGenericEngineApi, pCallContext, mapFlags, pAddrSpace) +#define genapiControl_FNPTR(pGenericEngineApi) pGenericEngineApi->__nvoc_metadata_ptr->vtable.__genapiControl__ +#define genapiControl(pGenericEngineApi, pCallContext, pParams) genapiControl_DISPATCH(pGenericEngineApi, pCallContext, pParams) +#define genapiUnmap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresUnmap__ +#define genapiUnmap(pGpuResource, pCallContext, pCpuMapping) genapiUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define genapiShareCallback_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresShareCallback__ +#define genapiShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) genapiShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define genapiGetRegBaseOffsetAndSize_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetRegBaseOffsetAndSize__ +#define genapiGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) genapiGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define genapiInternalControlForward_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresInternalControlForward__ +#define genapiInternalControlForward(pGpuResource, command, pParams, size) genapiInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define genapiGetInternalObjectHandle_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetInternalObjectHandle__ +#define genapiGetInternalObjectHandle(pGpuResource) genapiGetInternalObjectHandle_DISPATCH(pGpuResource) +#define genapiAccessCallback_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define genapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) genapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define genapiGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define genapiGetMemInterMapParams(pRmResource, pParams) genapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define genapiCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define genapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) genapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define genapiGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define genapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) genapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define genapiControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define genapiControlSerialization_Prologue(pResource, pCallContext, pParams) genapiControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define genapiControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define genapiControlSerialization_Epilogue(pResource, pCallContext, pParams) genapiControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define genapiControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define genapiControl_Prologue(pResource, pCallContext, pParams) genapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define genapiControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define genapiControl_Epilogue(pResource, pCallContext, pParams) genapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define genapiCanCopy_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define genapiCanCopy(pResource) genapiCanCopy_DISPATCH(pResource) +#define genapiIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define genapiIsDuplicate(pResource, hMemory, pDuplicate) genapiIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define genapiPreDestruct_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define genapiPreDestruct(pResource) genapiPreDestruct_DISPATCH(pResource) +#define genapiControlFilter_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define genapiControlFilter(pResource, pCallContext, pParams) genapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define genapiIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define genapiIsPartialUnmapSupported(pResource) genapiIsPartialUnmapSupported_DISPATCH(pResource) +#define genapiMapTo_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define genapiMapTo(pResource, pParams) genapiMapTo_DISPATCH(pResource, pParams) +#define genapiUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define genapiUnmapFrom(pResource, pParams) genapiUnmapFrom_DISPATCH(pResource, pParams) +#define genapiGetRefCount_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define genapiGetRefCount(pResource) genapiGetRefCount_DISPATCH(pResource) +#define genapiAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define genapiAddAdditionalDependants(pClient, pResource, pReference) genapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) + +// Dispatch functions +static inline NV_STATUS genapiMap_DISPATCH(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGenericEngineApi->__nvoc_metadata_ptr->vtable.__genapiMap__(pGenericEngineApi, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS genapiGetMapAddrSpace_DISPATCH(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGenericEngineApi->__nvoc_metadata_ptr->vtable.__genapiGetMapAddrSpace__(pGenericEngineApi, pCallContext, mapFlags, pAddrSpace); +} + +static inline NV_STATUS genapiControl_DISPATCH(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGenericEngineApi->__nvoc_metadata_ptr->vtable.__genapiControl__(pGenericEngineApi, pCallContext, pParams); +} + +static inline NV_STATUS genapiUnmap_DISPATCH(struct GenericEngineApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__genapiUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NvBool genapiShareCallback_DISPATCH(struct GenericEngineApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__genapiShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS genapiGetRegBaseOffsetAndSize_DISPATCH(struct GenericEngineApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__genapiGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS genapiInternalControlForward_DISPATCH(struct GenericEngineApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__genapiInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NvHandle genapiGetInternalObjectHandle_DISPATCH(struct GenericEngineApi *pGpuResource) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__genapiGetInternalObjectHandle__(pGpuResource); +} + +static inline NvBool genapiAccessCallback_DISPATCH(struct GenericEngineApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__genapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS genapiGetMemInterMapParams_DISPATCH(struct GenericEngineApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__genapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS genapiCheckMemInterUnmap_DISPATCH(struct GenericEngineApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__genapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS genapiGetMemoryMappingDescriptor_DISPATCH(struct GenericEngineApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__genapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS genapiControlSerialization_Prologue_DISPATCH(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__genapiControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void genapiControlSerialization_Epilogue_DISPATCH(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__genapiControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS genapiControl_Prologue_DISPATCH(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__genapiControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void genapiControl_Epilogue_DISPATCH(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__genapiControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool genapiCanCopy_DISPATCH(struct GenericEngineApi *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__genapiCanCopy__(pResource); +} + +static inline NV_STATUS genapiIsDuplicate_DISPATCH(struct GenericEngineApi *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__genapiIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void genapiPreDestruct_DISPATCH(struct GenericEngineApi *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__genapiPreDestruct__(pResource); +} + +static inline NV_STATUS genapiControlFilter_DISPATCH(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__genapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvBool genapiIsPartialUnmapSupported_DISPATCH(struct GenericEngineApi *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__genapiIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS genapiMapTo_DISPATCH(struct GenericEngineApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__genapiMapTo__(pResource, pParams); +} + +static inline NV_STATUS genapiUnmapFrom_DISPATCH(struct GenericEngineApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__genapiUnmapFrom__(pResource, pParams); +} + +static inline NvU32 genapiGetRefCount_DISPATCH(struct GenericEngineApi *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__genapiGetRefCount__(pResource); +} + +static inline void genapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct GenericEngineApi *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__genapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +NV_STATUS genapiMap_IMPL(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); + +NV_STATUS genapiGetMapAddrSpace_IMPL(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); + +NV_STATUS genapiControl_IMPL(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +NV_STATUS genapiConstruct_IMPL(struct GenericEngineApi *arg_pGenericEngineApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_genapiConstruct(arg_pGenericEngineApi, arg_pCallContext, arg_pParams) genapiConstruct_IMPL(arg_pGenericEngineApi, arg_pCallContext, arg_pParams) +void genapiDestruct_IMPL(struct GenericEngineApi *pGenericEngineApi); + +#define __nvoc_genapiDestruct(pGenericEngineApi) genapiDestruct_IMPL(pGenericEngineApi) +#undef PRIVATE_FIELD + + +#endif // _GENERICENGINEAPI_H_ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_GENERIC_ENGINE_NVOC_H_ diff --git a/src/nvidia/generated/g_gpu_access_nvoc.c b/src/nvidia/generated/g_gpu_access_nvoc.c new file mode 100644 index 0000000..3e87705 --- /dev/null +++ b/src/nvidia/generated/g_gpu_access_nvoc.c @@ -0,0 +1,568 @@ +#define NVOC_GPU_ACCESS_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_access_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x40549c = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_IoAperture; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RegisterAperture; + +// Forward declarations for IoAperture +void __nvoc_init__Object(Object*); +void __nvoc_init__RegisterAperture(RegisterAperture*); +void __nvoc_init__IoAperture(IoAperture*); +void __nvoc_init_funcTable_IoAperture(IoAperture*); +NV_STATUS __nvoc_ctor_IoAperture(IoAperture*, struct IoAperture *arg_pParentAperture, OBJGPU *arg_pGpu, NvU32 arg_deviceIndex, NvU32 arg_deviceInstance, DEVICE_MAPPING *arg_pMapping, NvU32 arg_mappingStartAddr, NvU32 arg_offset, NvU32 arg_length); +void __nvoc_init_dataField_IoAperture(IoAperture*); +void __nvoc_dtor_IoAperture(IoAperture*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__IoAperture; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__IoAperture; + +// Down-thunk(s) to bridge IoAperture methods from ancestors (if any) +NvU8 __nvoc_down_thunk_IoAperture_regaprtReadReg08(struct RegisterAperture *pAperture, NvU32 addr); // this +NvU16 __nvoc_down_thunk_IoAperture_regaprtReadReg16(struct RegisterAperture *pAperture, NvU32 addr); // this +NvU32 __nvoc_down_thunk_IoAperture_regaprtReadReg32(struct RegisterAperture *pAperture, NvU32 addr); // this +void __nvoc_down_thunk_IoAperture_regaprtWriteReg08(struct RegisterAperture *pAperture, NvU32 addr, NvV8 value); // this +void __nvoc_down_thunk_IoAperture_regaprtWriteReg16(struct RegisterAperture *pAperture, NvU32 addr, NvV16 value); // this +void __nvoc_down_thunk_IoAperture_regaprtWriteReg32(struct RegisterAperture *pAperture, NvU32 addr, NvV32 value); // this +void __nvoc_down_thunk_IoAperture_regaprtWriteReg32Uc(struct RegisterAperture *pAperture, NvU32 addr, NvV32 value); // this +NvBool __nvoc_down_thunk_IoAperture_regaprtIsRegValid(struct RegisterAperture *pAperture, NvU32 addr); // this + +// Up-thunk(s) to bridge IoAperture methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_IoAperture = +{ + /*classInfo=*/ { + /*size=*/ sizeof(IoAperture), + /*classId=*/ classId(IoAperture), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "IoAperture", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_IoAperture, + /*pCastInfo=*/ &__nvoc_castinfo__IoAperture, + /*pExportInfo=*/ &__nvoc_export_info__IoAperture +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__IoAperture __nvoc_metadata__IoAperture = { + .rtti.pClassDef = &__nvoc_class_def_IoAperture, // (ioaprt) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_IoAperture, + .rtti.offset = 0, + .metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super + .metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Object.rtti.offset = NV_OFFSETOF(IoAperture, __nvoc_base_Object), + .metadata__RegisterAperture.rtti.pClassDef = &__nvoc_class_def_RegisterAperture, // (regaprt) super + .metadata__RegisterAperture.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RegisterAperture.rtti.offset = NV_OFFSETOF(IoAperture, __nvoc_base_RegisterAperture), + + .vtable.__ioaprtReadReg08__ = &ioaprtReadReg08_IMPL, // virtual override (regaprt) base (regaprt) + .metadata__RegisterAperture.vtable.__regaprtReadReg08__ = &__nvoc_down_thunk_IoAperture_regaprtReadReg08, // pure virtual + .vtable.__ioaprtReadReg16__ = &ioaprtReadReg16_IMPL, // virtual override (regaprt) base (regaprt) + .metadata__RegisterAperture.vtable.__regaprtReadReg16__ = &__nvoc_down_thunk_IoAperture_regaprtReadReg16, // pure virtual + .vtable.__ioaprtReadReg32__ = &ioaprtReadReg32_IMPL, // virtual override (regaprt) base (regaprt) + .metadata__RegisterAperture.vtable.__regaprtReadReg32__ = &__nvoc_down_thunk_IoAperture_regaprtReadReg32, // pure virtual + .vtable.__ioaprtWriteReg08__ = &ioaprtWriteReg08_IMPL, // virtual override (regaprt) base (regaprt) + .metadata__RegisterAperture.vtable.__regaprtWriteReg08__ = &__nvoc_down_thunk_IoAperture_regaprtWriteReg08, // pure virtual + .vtable.__ioaprtWriteReg16__ = &ioaprtWriteReg16_IMPL, // virtual override (regaprt) base (regaprt) + .metadata__RegisterAperture.vtable.__regaprtWriteReg16__ = &__nvoc_down_thunk_IoAperture_regaprtWriteReg16, // pure virtual + .vtable.__ioaprtWriteReg32__ = &ioaprtWriteReg32_IMPL, // virtual override (regaprt) base (regaprt) + .metadata__RegisterAperture.vtable.__regaprtWriteReg32__ = &__nvoc_down_thunk_IoAperture_regaprtWriteReg32, // pure virtual + .vtable.__ioaprtWriteReg32Uc__ = &ioaprtWriteReg32Uc_IMPL, // virtual override (regaprt) base (regaprt) + .metadata__RegisterAperture.vtable.__regaprtWriteReg32Uc__ = &__nvoc_down_thunk_IoAperture_regaprtWriteReg32Uc, // pure virtual + .vtable.__ioaprtIsRegValid__ = &ioaprtIsRegValid_IMPL, // virtual override (regaprt) base (regaprt) + .metadata__RegisterAperture.vtable.__regaprtIsRegValid__ = &__nvoc_down_thunk_IoAperture_regaprtIsRegValid, // pure virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__IoAperture = { + .numRelatives = 3, + .relatives = { + &__nvoc_metadata__IoAperture.rtti, // [0]: (ioaprt) this + &__nvoc_metadata__IoAperture.metadata__Object.rtti, // [1]: (obj) super + &__nvoc_metadata__IoAperture.metadata__RegisterAperture.rtti, // [2]: (regaprt) super + } +}; + +// 8 down-thunk(s) defined to bridge methods in IoAperture from superclasses + +// ioaprtReadReg08: virtual override (regaprt) base (regaprt) +NvU8 __nvoc_down_thunk_IoAperture_regaprtReadReg08(struct RegisterAperture *pAperture, NvU32 addr) { + return ioaprtReadReg08((struct IoAperture *)(((unsigned char *) pAperture) - NV_OFFSETOF(IoAperture, __nvoc_base_RegisterAperture)), addr); +} + +// ioaprtReadReg16: virtual override (regaprt) base (regaprt) +NvU16 __nvoc_down_thunk_IoAperture_regaprtReadReg16(struct RegisterAperture *pAperture, NvU32 addr) { + return ioaprtReadReg16((struct IoAperture *)(((unsigned char *) pAperture) - NV_OFFSETOF(IoAperture, __nvoc_base_RegisterAperture)), addr); +} + +// ioaprtReadReg32: virtual override (regaprt) base (regaprt) +NvU32 __nvoc_down_thunk_IoAperture_regaprtReadReg32(struct RegisterAperture *pAperture, NvU32 addr) { + return ioaprtReadReg32((struct IoAperture *)(((unsigned char *) pAperture) - NV_OFFSETOF(IoAperture, __nvoc_base_RegisterAperture)), addr); +} + +// ioaprtWriteReg08: virtual override (regaprt) base (regaprt) +void __nvoc_down_thunk_IoAperture_regaprtWriteReg08(struct RegisterAperture *pAperture, NvU32 addr, NvV8 value) { + ioaprtWriteReg08((struct IoAperture *)(((unsigned char *) pAperture) - NV_OFFSETOF(IoAperture, __nvoc_base_RegisterAperture)), addr, value); +} + +// ioaprtWriteReg16: virtual override (regaprt) base (regaprt) +void __nvoc_down_thunk_IoAperture_regaprtWriteReg16(struct RegisterAperture *pAperture, NvU32 addr, NvV16 value) { + ioaprtWriteReg16((struct IoAperture *)(((unsigned char *) pAperture) - NV_OFFSETOF(IoAperture, __nvoc_base_RegisterAperture)), addr, value); +} + +// ioaprtWriteReg32: virtual override (regaprt) base (regaprt) +void __nvoc_down_thunk_IoAperture_regaprtWriteReg32(struct RegisterAperture *pAperture, NvU32 addr, NvV32 value) { + ioaprtWriteReg32((struct IoAperture *)(((unsigned char *) pAperture) - NV_OFFSETOF(IoAperture, __nvoc_base_RegisterAperture)), addr, value); +} + +// ioaprtWriteReg32Uc: virtual override (regaprt) base (regaprt) +void __nvoc_down_thunk_IoAperture_regaprtWriteReg32Uc(struct RegisterAperture *pAperture, NvU32 addr, NvV32 value) { + ioaprtWriteReg32Uc((struct IoAperture *)(((unsigned char *) pAperture) - NV_OFFSETOF(IoAperture, __nvoc_base_RegisterAperture)), addr, value); +} + +// ioaprtIsRegValid: virtual override (regaprt) base (regaprt) +NvBool __nvoc_down_thunk_IoAperture_regaprtIsRegValid(struct RegisterAperture *pAperture, NvU32 addr) { + return ioaprtIsRegValid((struct IoAperture *)(((unsigned char *) pAperture) - NV_OFFSETOF(IoAperture, __nvoc_base_RegisterAperture)), addr); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__IoAperture = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_RegisterAperture(RegisterAperture*); +void __nvoc_dtor_IoAperture(IoAperture *pThis) { + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + __nvoc_dtor_RegisterAperture(&pThis->__nvoc_base_RegisterAperture); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_IoAperture(IoAperture *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_RegisterAperture(RegisterAperture* ); +NV_STATUS __nvoc_ctor_IoAperture(IoAperture *pThis, struct IoAperture * arg_pParentAperture, OBJGPU * arg_pGpu, NvU32 arg_deviceIndex, NvU32 arg_deviceInstance, DEVICE_MAPPING * arg_pMapping, NvU32 arg_mappingStartAddr, NvU32 arg_offset, NvU32 arg_length) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_IoAperture_fail_Object; + status = __nvoc_ctor_RegisterAperture(&pThis->__nvoc_base_RegisterAperture); + if (status != NV_OK) goto __nvoc_ctor_IoAperture_fail_RegisterAperture; + __nvoc_init_dataField_IoAperture(pThis); + + status = __nvoc_ioaprtConstruct(pThis, arg_pParentAperture, arg_pGpu, arg_deviceIndex, arg_deviceInstance, arg_pMapping, arg_mappingStartAddr, arg_offset, arg_length); + if (status != NV_OK) goto __nvoc_ctor_IoAperture_fail__init; + goto __nvoc_ctor_IoAperture_exit; // Success + +__nvoc_ctor_IoAperture_fail__init: + __nvoc_dtor_RegisterAperture(&pThis->__nvoc_base_RegisterAperture); +__nvoc_ctor_IoAperture_fail_RegisterAperture: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_IoAperture_fail_Object: +__nvoc_ctor_IoAperture_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_IoAperture_1(IoAperture *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_IoAperture_1 + + +// Initialize vtable(s) for 8 virtual method(s). +void __nvoc_init_funcTable_IoAperture(IoAperture *pThis) { + __nvoc_init_funcTable_IoAperture_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__IoAperture(IoAperture *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; // (obj) super + pThis->__nvoc_pbase_RegisterAperture = &pThis->__nvoc_base_RegisterAperture; // (regaprt) super + pThis->__nvoc_pbase_IoAperture = pThis; // (ioaprt) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Object(&pThis->__nvoc_base_Object); + __nvoc_init__RegisterAperture(&pThis->__nvoc_base_RegisterAperture); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__IoAperture.metadata__Object; // (obj) super + pThis->__nvoc_base_RegisterAperture.__nvoc_metadata_ptr = &__nvoc_metadata__IoAperture.metadata__RegisterAperture; // (regaprt) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__IoAperture; // (ioaprt) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_IoAperture(pThis); +} + +NV_STATUS __nvoc_objCreate_IoAperture(IoAperture **ppThis, Dynamic *pParent, NvU32 createFlags, struct IoAperture * arg_pParentAperture, OBJGPU * arg_pGpu, NvU32 arg_deviceIndex, NvU32 arg_deviceInstance, DEVICE_MAPPING * arg_pMapping, NvU32 arg_mappingStartAddr, NvU32 arg_offset, NvU32 arg_length) +{ + NV_STATUS status; + Object *pParentObj = NULL; + IoAperture *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(IoAperture), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(IoAperture)); + + pThis->__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__IoAperture(pThis); + status = __nvoc_ctor_IoAperture(pThis, arg_pParentAperture, arg_pGpu, arg_deviceIndex, arg_deviceInstance, arg_pMapping, arg_mappingStartAddr, arg_offset, arg_length); + if (status != NV_OK) goto __nvoc_objCreate_IoAperture_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_IoAperture_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(IoAperture)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_IoAperture(IoAperture **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct IoAperture * arg_pParentAperture = va_arg(args, struct IoAperture *); + OBJGPU * arg_pGpu = va_arg(args, OBJGPU *); + NvU32 arg_deviceIndex = va_arg(args, NvU32); + NvU32 arg_deviceInstance = va_arg(args, NvU32); + DEVICE_MAPPING * arg_pMapping = va_arg(args, DEVICE_MAPPING *); + NvU32 arg_mappingStartAddr = va_arg(args, NvU32); + NvU32 arg_offset = va_arg(args, NvU32); + NvU32 arg_length = va_arg(args, NvU32); + + status = __nvoc_objCreate_IoAperture(ppThis, pParent, createFlags, arg_pParentAperture, arg_pGpu, arg_deviceIndex, arg_deviceInstance, arg_pMapping, arg_mappingStartAddr, arg_offset, arg_length); + + return status; +} + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x6d0f88 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_SwBcAperture; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RegisterAperture; + +// Forward declarations for SwBcAperture +void __nvoc_init__Object(Object*); +void __nvoc_init__RegisterAperture(RegisterAperture*); +void __nvoc_init__SwBcAperture(SwBcAperture*); +void __nvoc_init_funcTable_SwBcAperture(SwBcAperture*); +NV_STATUS __nvoc_ctor_SwBcAperture(SwBcAperture*, struct IoAperture *arg_pApertures, NvU32 arg_numApertures); +void __nvoc_init_dataField_SwBcAperture(SwBcAperture*); +void __nvoc_dtor_SwBcAperture(SwBcAperture*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__SwBcAperture; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__SwBcAperture; + +// Down-thunk(s) to bridge SwBcAperture methods from ancestors (if any) +NvU8 __nvoc_down_thunk_SwBcAperture_regaprtReadReg08(struct RegisterAperture *pAperture, NvU32 addr); // this +NvU16 __nvoc_down_thunk_SwBcAperture_regaprtReadReg16(struct RegisterAperture *pAperture, NvU32 addr); // this +NvU32 __nvoc_down_thunk_SwBcAperture_regaprtReadReg32(struct RegisterAperture *pAperture, NvU32 addr); // this +void __nvoc_down_thunk_SwBcAperture_regaprtWriteReg08(struct RegisterAperture *pAperture, NvU32 addr, NvV8 value); // this +void __nvoc_down_thunk_SwBcAperture_regaprtWriteReg16(struct RegisterAperture *pAperture, NvU32 addr, NvV16 value); // this +void __nvoc_down_thunk_SwBcAperture_regaprtWriteReg32(struct RegisterAperture *pAperture, NvU32 addr, NvV32 value); // this +void __nvoc_down_thunk_SwBcAperture_regaprtWriteReg32Uc(struct RegisterAperture *pAperture, NvU32 addr, NvV32 value); // this +NvBool __nvoc_down_thunk_SwBcAperture_regaprtIsRegValid(struct RegisterAperture *pAperture, NvU32 addr); // this + +// Up-thunk(s) to bridge SwBcAperture methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_SwBcAperture = +{ + /*classInfo=*/ { + /*size=*/ sizeof(SwBcAperture), + /*classId=*/ classId(SwBcAperture), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "SwBcAperture", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_SwBcAperture, + /*pCastInfo=*/ &__nvoc_castinfo__SwBcAperture, + /*pExportInfo=*/ &__nvoc_export_info__SwBcAperture +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__SwBcAperture __nvoc_metadata__SwBcAperture = { + .rtti.pClassDef = &__nvoc_class_def_SwBcAperture, // (swbcaprt) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_SwBcAperture, + .rtti.offset = 0, + .metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super + .metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Object.rtti.offset = NV_OFFSETOF(SwBcAperture, __nvoc_base_Object), + .metadata__RegisterAperture.rtti.pClassDef = &__nvoc_class_def_RegisterAperture, // (regaprt) super + .metadata__RegisterAperture.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RegisterAperture.rtti.offset = NV_OFFSETOF(SwBcAperture, __nvoc_base_RegisterAperture), + + .vtable.__swbcaprtReadReg08__ = &swbcaprtReadReg08_IMPL, // virtual override (regaprt) base (regaprt) + .metadata__RegisterAperture.vtable.__regaprtReadReg08__ = &__nvoc_down_thunk_SwBcAperture_regaprtReadReg08, // pure virtual + .vtable.__swbcaprtReadReg16__ = &swbcaprtReadReg16_IMPL, // virtual override (regaprt) base (regaprt) + .metadata__RegisterAperture.vtable.__regaprtReadReg16__ = &__nvoc_down_thunk_SwBcAperture_regaprtReadReg16, // pure virtual + .vtable.__swbcaprtReadReg32__ = &swbcaprtReadReg32_IMPL, // virtual override (regaprt) base (regaprt) + .metadata__RegisterAperture.vtable.__regaprtReadReg32__ = &__nvoc_down_thunk_SwBcAperture_regaprtReadReg32, // pure virtual + .vtable.__swbcaprtWriteReg08__ = &swbcaprtWriteReg08_IMPL, // virtual override (regaprt) base (regaprt) + .metadata__RegisterAperture.vtable.__regaprtWriteReg08__ = &__nvoc_down_thunk_SwBcAperture_regaprtWriteReg08, // pure virtual + .vtable.__swbcaprtWriteReg16__ = &swbcaprtWriteReg16_IMPL, // virtual override (regaprt) base (regaprt) + .metadata__RegisterAperture.vtable.__regaprtWriteReg16__ = &__nvoc_down_thunk_SwBcAperture_regaprtWriteReg16, // pure virtual + .vtable.__swbcaprtWriteReg32__ = &swbcaprtWriteReg32_IMPL, // virtual override (regaprt) base (regaprt) + .metadata__RegisterAperture.vtable.__regaprtWriteReg32__ = &__nvoc_down_thunk_SwBcAperture_regaprtWriteReg32, // pure virtual + .vtable.__swbcaprtWriteReg32Uc__ = &swbcaprtWriteReg32Uc_IMPL, // virtual override (regaprt) base (regaprt) + .metadata__RegisterAperture.vtable.__regaprtWriteReg32Uc__ = &__nvoc_down_thunk_SwBcAperture_regaprtWriteReg32Uc, // pure virtual + .vtable.__swbcaprtIsRegValid__ = &swbcaprtIsRegValid_IMPL, // virtual override (regaprt) base (regaprt) + .metadata__RegisterAperture.vtable.__regaprtIsRegValid__ = &__nvoc_down_thunk_SwBcAperture_regaprtIsRegValid, // pure virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__SwBcAperture = { + .numRelatives = 3, + .relatives = { + &__nvoc_metadata__SwBcAperture.rtti, // [0]: (swbcaprt) this + &__nvoc_metadata__SwBcAperture.metadata__Object.rtti, // [1]: (obj) super + &__nvoc_metadata__SwBcAperture.metadata__RegisterAperture.rtti, // [2]: (regaprt) super + } +}; + +// 8 down-thunk(s) defined to bridge methods in SwBcAperture from superclasses + +// swbcaprtReadReg08: virtual override (regaprt) base (regaprt) +NvU8 __nvoc_down_thunk_SwBcAperture_regaprtReadReg08(struct RegisterAperture *pAperture, NvU32 addr) { + return swbcaprtReadReg08((struct SwBcAperture *)(((unsigned char *) pAperture) - NV_OFFSETOF(SwBcAperture, __nvoc_base_RegisterAperture)), addr); +} + +// swbcaprtReadReg16: virtual override (regaprt) base (regaprt) +NvU16 __nvoc_down_thunk_SwBcAperture_regaprtReadReg16(struct RegisterAperture *pAperture, NvU32 addr) { + return swbcaprtReadReg16((struct SwBcAperture *)(((unsigned char *) pAperture) - NV_OFFSETOF(SwBcAperture, __nvoc_base_RegisterAperture)), addr); +} + +// swbcaprtReadReg32: virtual override (regaprt) base (regaprt) +NvU32 __nvoc_down_thunk_SwBcAperture_regaprtReadReg32(struct RegisterAperture *pAperture, NvU32 addr) { + return swbcaprtReadReg32((struct SwBcAperture *)(((unsigned char *) pAperture) - NV_OFFSETOF(SwBcAperture, __nvoc_base_RegisterAperture)), addr); +} + +// swbcaprtWriteReg08: virtual override (regaprt) base (regaprt) +void __nvoc_down_thunk_SwBcAperture_regaprtWriteReg08(struct RegisterAperture *pAperture, NvU32 addr, NvV8 value) { + swbcaprtWriteReg08((struct SwBcAperture *)(((unsigned char *) pAperture) - NV_OFFSETOF(SwBcAperture, __nvoc_base_RegisterAperture)), addr, value); +} + +// swbcaprtWriteReg16: virtual override (regaprt) base (regaprt) +void __nvoc_down_thunk_SwBcAperture_regaprtWriteReg16(struct RegisterAperture *pAperture, NvU32 addr, NvV16 value) { + swbcaprtWriteReg16((struct SwBcAperture *)(((unsigned char *) pAperture) - NV_OFFSETOF(SwBcAperture, __nvoc_base_RegisterAperture)), addr, value); +} + +// swbcaprtWriteReg32: virtual override (regaprt) base (regaprt) +void __nvoc_down_thunk_SwBcAperture_regaprtWriteReg32(struct RegisterAperture *pAperture, NvU32 addr, NvV32 value) { + swbcaprtWriteReg32((struct SwBcAperture *)(((unsigned char *) pAperture) - NV_OFFSETOF(SwBcAperture, __nvoc_base_RegisterAperture)), addr, value); +} + +// swbcaprtWriteReg32Uc: virtual override (regaprt) base (regaprt) +void __nvoc_down_thunk_SwBcAperture_regaprtWriteReg32Uc(struct RegisterAperture *pAperture, NvU32 addr, NvV32 value) { + swbcaprtWriteReg32Uc((struct SwBcAperture *)(((unsigned char *) pAperture) - NV_OFFSETOF(SwBcAperture, __nvoc_base_RegisterAperture)), addr, value); +} + +// swbcaprtIsRegValid: virtual override (regaprt) base (regaprt) +NvBool __nvoc_down_thunk_SwBcAperture_regaprtIsRegValid(struct RegisterAperture *pAperture, NvU32 addr) { + return swbcaprtIsRegValid((struct SwBcAperture *)(((unsigned char *) pAperture) - NV_OFFSETOF(SwBcAperture, __nvoc_base_RegisterAperture)), addr); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__SwBcAperture = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_RegisterAperture(RegisterAperture*); +void __nvoc_dtor_SwBcAperture(SwBcAperture *pThis) { + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + __nvoc_dtor_RegisterAperture(&pThis->__nvoc_base_RegisterAperture); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_SwBcAperture(SwBcAperture *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_RegisterAperture(RegisterAperture* ); +NV_STATUS __nvoc_ctor_SwBcAperture(SwBcAperture *pThis, struct IoAperture * arg_pApertures, NvU32 arg_numApertures) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_SwBcAperture_fail_Object; + status = __nvoc_ctor_RegisterAperture(&pThis->__nvoc_base_RegisterAperture); + if (status != NV_OK) goto __nvoc_ctor_SwBcAperture_fail_RegisterAperture; + __nvoc_init_dataField_SwBcAperture(pThis); + + status = __nvoc_swbcaprtConstruct(pThis, arg_pApertures, arg_numApertures); + if (status != NV_OK) goto __nvoc_ctor_SwBcAperture_fail__init; + goto __nvoc_ctor_SwBcAperture_exit; // Success + +__nvoc_ctor_SwBcAperture_fail__init: + __nvoc_dtor_RegisterAperture(&pThis->__nvoc_base_RegisterAperture); +__nvoc_ctor_SwBcAperture_fail_RegisterAperture: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_SwBcAperture_fail_Object: +__nvoc_ctor_SwBcAperture_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_SwBcAperture_1(SwBcAperture *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_SwBcAperture_1 + + +// Initialize vtable(s) for 8 virtual method(s). +void __nvoc_init_funcTable_SwBcAperture(SwBcAperture *pThis) { + __nvoc_init_funcTable_SwBcAperture_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__SwBcAperture(SwBcAperture *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; // (obj) super + pThis->__nvoc_pbase_RegisterAperture = &pThis->__nvoc_base_RegisterAperture; // (regaprt) super + pThis->__nvoc_pbase_SwBcAperture = pThis; // (swbcaprt) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Object(&pThis->__nvoc_base_Object); + __nvoc_init__RegisterAperture(&pThis->__nvoc_base_RegisterAperture); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__SwBcAperture.metadata__Object; // (obj) super + pThis->__nvoc_base_RegisterAperture.__nvoc_metadata_ptr = &__nvoc_metadata__SwBcAperture.metadata__RegisterAperture; // (regaprt) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__SwBcAperture; // (swbcaprt) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_SwBcAperture(pThis); +} + +NV_STATUS __nvoc_objCreate_SwBcAperture(SwBcAperture **ppThis, Dynamic *pParent, NvU32 createFlags, struct IoAperture * arg_pApertures, NvU32 arg_numApertures) +{ + NV_STATUS status; + Object *pParentObj = NULL; + SwBcAperture *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(SwBcAperture), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(SwBcAperture)); + + pThis->__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__SwBcAperture(pThis); + status = __nvoc_ctor_SwBcAperture(pThis, arg_pApertures, arg_numApertures); + if (status != NV_OK) goto __nvoc_objCreate_SwBcAperture_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_SwBcAperture_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(SwBcAperture)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_SwBcAperture(SwBcAperture **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct IoAperture * arg_pApertures = va_arg(args, struct IoAperture *); + NvU32 arg_numApertures = va_arg(args, NvU32); + + status = __nvoc_objCreate_SwBcAperture(ppThis, pParent, createFlags, arg_pApertures, arg_numApertures); + + return status; +} + diff --git a/src/nvidia/generated/g_gpu_access_nvoc.h b/src/nvidia/generated/g_gpu_access_nvoc.h new file mode 100644 index 0000000..b3f557e --- /dev/null +++ b/src/nvidia/generated/g_gpu_access_nvoc.h @@ -0,0 +1,764 @@ + +#ifndef _G_GPU_ACCESS_NVOC_H_ +#define _G_GPU_ACCESS_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_gpu_access_nvoc.h" + +#ifndef _GPU_ACCESS_H_ +#define _GPU_ACCESS_H_ + +#include "core/core.h" +#include "nvoc/object.h" +#include "ioaccess/ioaccess.h" +#include "gpu/gpu_device_mapping.h" + +// Go straight at the memory or hardware. +#define MEM_RD08(a) (*(const volatile NvU8 *)(a)) +#define MEM_RD16(a) (*(const volatile NvU16 *)(a)) +#define MEM_RD32(a) (*(const volatile NvU32 *)(a)) +#define MEM_RD64(a) (*(const volatile NvU64 *)(a)) +#define MEM_WR08(a, d) do { *(volatile NvU8 *)(a) = (d); } while (0) +#define MEM_WR16(a, d) do { *(volatile NvU16 *)(a) = (d); } while (0) +#define MEM_WR32(a, d) do { *(volatile NvU32 *)(a) = (d); } while (0) +#define MEM_WR64(a, d) do { *(volatile NvU64 *)(a) = (d); } while (0) + +// +// Define the signature of the register filter callback function +// +// flags can be optionally used for filters to decide whether to actually +// touch HW or not. flags should be OR'ed every time a new filter is found. (see objgpu.c) +// +typedef void (*GpuWriteRegCallback)(OBJGPU *, void *, NvU32 addr, NvU32 val, NvU32 accessSize, NvU32 flags); +typedef NvU32 (*GpuReadRegCallback)(OBJGPU *, void *, NvU32 addr, NvU32 accessSize, NvU32 flags); + +union GPUHWREG +{ + volatile NvV8 Reg008[1]; + volatile NvV16 Reg016[1]; + volatile NvV32 Reg032[1]; +}; + +typedef union GPUHWREG GPUHWREG; + +// +// Register filter record +// +// If REGISTER_FILTER_FLAGS_READ is set, then that means that the base RegRead +// function will not read the register, so the provided read callback function +// is expected to read the register and return the value. +// +// If REGISTER_FILTER_FLAGS_WRITE is set, then that means that the base RegWrite +// function will not write the register, so the provided callback write function +// is expected to write the given value to the register. +// +// It is an error to specify REGISTER_FILTER_FLAGS_READ and not provide a +// read callback function. +// +// It is an error to specify REGISTER_FILTER_FLAGS_WRITE and not provide a +// write callback function. +// +#define REGISTER_FILTER_FLAGS_READ (NVBIT(0)) +#define REGISTER_FILTER_FLAGS_WRITE (NVBIT(1)) +// filter is in the list but it is invalid and should be removed +#define REGISTER_FILTER_FLAGS_INVALID (NVBIT(2)) + +#define REGISTER_FILTER_FLAGS_VIRTUAL (0) +#define REGISTER_FILTER_FLAGS_READ_WRITE (REGISTER_FILTER_FLAGS_READ | REGISTER_FILTER_FLAGS_WRITE) + +// Do not warn if attempting to add a filter on GSP [CORERM-5356] +#define REGISTER_FILTER_FLAGS_NO_GSP_WARNING (NVBIT(3)) + +typedef struct REGISTER_FILTER REGISTER_FILTER; + +struct REGISTER_FILTER +{ + REGISTER_FILTER *pNext; //!< pointer to next filter + NvU32 flags; //!< attributes of this filter + DEVICE_INDEX devIndex; //!< filter device + NvU32 devInstance; //!< filter device instance + NvU32 rangeStart; //!< filter range start (can overlap) + NvU32 rangeEnd; //!< filter range end (can overlap) + GpuWriteRegCallback pWriteCallback; //!< callback for write + GpuReadRegCallback pReadCallback; //!< callback for read + void *pParam; //!< pointer to param which gets passed to callbacks +}; + +typedef struct { + REGISTER_FILTER *pRegFilterList; // Active filters + REGISTER_FILTER *pRegFilterRecycleList; // Inactive filters + PORT_SPINLOCK * pRegFilterLock; // Thread-safe list management + NvU32 regFilterRefCnt; // Thread-safe list management + NvBool bRegFilterNeedRemove; // Thread-safe list garbage collection +} DEVICE_REGFILTER_INFO; + +typedef struct DEVICE_MAPPING +{ + GPUHWREG *gpuNvAddr; // CPU Virtual Address + RmPhysAddr gpuNvPAddr; // Physical Base Address + NvU32 gpuNvLength; // Length of the Aperture + NvU32 gpuNvSaveLength; + NvU32 gpuDeviceEnum; // Device ID NV_DEVID_* + NvU32 refCount; // refCount for the device map. + DEVICE_REGFILTER_INFO devRegFilterInfo; // register filter range list +} DEVICE_MAPPING; + +typedef struct +{ + // Pointer to GPU linked to this RegisterAccess object + OBJGPU *pGpu; + + // HW register access tools + GPUHWREG *gpuFbAddr; + GPUHWREG *gpuInstAddr; + + // Register access profiling + NvU32 regReadCount; + NvU32 regWriteCount; +} RegisterAccess; + +/*! Init register IO access path */ +NV_STATUS regAccessConstruct(RegisterAccess *, OBJGPU *pGpu); + +/*! Shutdown register IO access path */ +void regAccessDestruct(RegisterAccess *); + +/*! Writes to 8 bit register */ +void regWrite008(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvV8); + +/*! Writes to 16 bit register */ +void regWrite016(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvV16); + +/*! Writes to 32 bit register, with thread state on the stack */ +void regWrite032(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvV32, THREAD_STATE_NODE *); + +/*! Unicast register access, with thread state on the stack */ +void regWrite032Unicast(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvV32, THREAD_STATE_NODE *); + +/*! Reads from 8 bit register */ +NvU8 regRead008(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32); + +/*! Reads from 16 bit register */ +NvU16 regRead016(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32); + +/*! Reads from 32 bit register, with thread state on the stack */ +NvU32 regRead032(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, THREAD_STATE_NODE *); + +/*! Reads from 32 bit register and checks bit mask, with thread state on the stack */ +NvU32 regCheckRead032(RegisterAccess *, NvU32, NvU32, THREAD_STATE_NODE *); + +/*! Reads 32 bit register and polls bit field for specific value */ +NV_STATUS regRead032_AndPoll(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvU32); + +/*! Adds a register filter */ +NV_STATUS regAddRegisterFilter(RegisterAccess *, NvU32, DEVICE_INDEX, NvU32, NvU32, NvU32, GpuWriteRegCallback, GpuReadRegCallback, void *, REGISTER_FILTER **); + +/*! Removes register filter */ +void regRemoveRegisterFilter(RegisterAccess *, REGISTER_FILTER *); + +/*! Check status of read return value for GPU/bus errors */ +void regCheckAndLogReadFailure(RegisterAccess *, NvU32 addr, NvU32 mask, NvU32 value); + +// +// GPU register I/O macros. +// + +// +// GPU neutral macros typically used for register I/O. +// +#define GPU_DRF_SHIFT(drf) ((0?drf) % 32) +#define GPU_DRF_MASK(drf) (0xFFFFFFFF>>(31-((1?drf) % 32)+((0?drf) % 32))) +#define GPU_DRF_DEF(d,r,f,c) ((NV ## d ## r ## f ## c)<>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define GPU_DRF_SHIFTMASK(drf) (GPU_DRF_MASK(drf)<<(GPU_DRF_SHIFT(drf))) +#define GPU_DRF_WIDTH(drf) ((1?drf) - (0?drf) + 1) + + +// Device independent macros +// Multiple device instance macros + +#define REG_INST_RD08(g,dev,inst,a) regRead008(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a) +#define REG_INST_RD16(g,dev,inst,a) regRead016(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a) +#define REG_INST_RD32(g,dev,inst,a) regRead032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, NULL) + +#define REG_INST_WR08(g,dev,inst,a,v) regWrite008(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v) +#define REG_INST_WR16(g,dev,inst,a,v) regWrite016(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v) +#define REG_INST_WR32(g,dev,inst,a,v) regWrite032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v, NULL) +#define REG_INST_WR32_UC(g,dev,inst,a,v) regWrite032Unicast(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v, NULL) + +#define REG_INST_RD32_EX(g,dev,inst,a,t) regRead032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, t) +#define REG_INST_WR32_EX(g,dev,inst,a,v,t) regWrite032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v, t) + +#define REG_INST_DEVIDX_RD32_EX(g,devidx,inst,a,t) regRead032(GPU_GET_REGISTER_ACCESS(g), devidx, inst, a, t) +#define REG_INST_DEVIDX_WR32_EX(g,devidx,inst,a,v,t) regWrite032(GPU_GET_REGISTER_ACCESS(g), devidx, inst, a, v, t) + +// Get the address of a register given the Aperture and offset. +#define REG_GET_ADDR(ap, offset) ioaprtGetRegAddr(ap, offset) + +// GPU macros defined in terms of DEV_ macros +#define GPU_REG_RD08(g,a) REG_INST_RD08(g,GPU,0,a) +#define GPU_REG_RD16(g,a) REG_INST_RD16(g,GPU,0,a) +#define GPU_REG_RD32(g,a) REG_INST_RD32(g,GPU,0,a) + +// +// These UNCHECKED macros are provided for extenuating circumstances to avoid the 0xbadf +// sanity checking done by the usual register read utilities and must not be used generally +// +// +#define GPU_REG_RD08_UNCHECKED(g,a) osDevReadReg008(g, gpuGetDeviceMapping(g, DEVICE_INDEX_GPU, 0), a) +#define GPU_REG_RD32_UNCHECKED(g,a) osDevReadReg032(g, gpuGetDeviceMapping(g, DEVICE_INDEX_GPU, 0), a) + +#define GPU_CHECK_REG_RD32(g,a,m) regCheckRead032(GPU_GET_REGISTER_ACCESS(g),a,m,NULL) +#define GPU_REG_RD32_AND_POLL(g,r,m,v) regRead032_AndPoll(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_GPU, r, m, v) + +#define GPU_REG_WR08(g,a,v) REG_INST_WR08(g,GPU,0,a,v) +#define GPU_REG_WR16(g,a,v) REG_INST_WR16(g,GPU,0,a,v) +#define GPU_REG_WR32(g,a,v) REG_INST_WR32(g,GPU,0,a,v) +#define GPU_REG_WR32_UC(g,a,v) REG_INST_WR32_UC(g,GPU,0,a,v) + +// GPU macros for SR-IOV +#define GPU_VREG_RD32(g, a) GPU_REG_RD32(g, g->sriovState.virtualRegPhysOffset + a) +#define GPU_VREG_WR32(g, a, v) GPU_REG_WR32(g, g->sriovState.virtualRegPhysOffset + a, v) +#define GPU_VREG_RD32_EX(g,a,t) REG_INST_RD32_EX(g, GPU, 0, g->sriovState.virtualRegPhysOffset + a, t) +#define GPU_VREG_WR32_EX(g,a,v,t) REG_INST_WR32_EX(g, GPU, 0, g->sriovState.virtualRegPhysOffset + a, v, t) +#define GPU_VREG_FLD_WR_DRF_DEF(g,d,r,f,c) GPU_VREG_WR32(g, NV##d##r,(GPU_VREG_RD32(g,NV##d##r)&~(GPU_DRF_MASK(NV##d##r##f)<>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) + +#define VREG_INST_RD32(g,dev,inst,a) regRead032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, g->sriovState.virtualRegPhysOffset + a, NULL) +#define VREG_INST_WR32(g,dev,inst,a,v) regWrite032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, g->sriovState.virtualRegPhysOffset + a, v, NULL) +#define GPU_VREG_FLD_WR_DRF_NUM(g,d,r,f,n) VREG_INST_WR32(g,GPU,0,NV##d##r,(VREG_INST_RD32(g,GPU,0,NV##d##r)&~(GPU_DRF_MASK(NV##d##r##f)<sriovState.virtualRegPhysOffset + a) + +#define GPU_VREG_IDX_RD_DRF(g,d,r,i,f) (((GPU_VREG_RD32(g, NV ## d ## r(i)))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define GPU_VREG_FLD_IDX_WR_DRF_DEF(g,d,r,i,f,c) GPU_VREG_WR32(g, NV##d##r(i),(GPU_VREG_RD32(g,NV##d##r(i))&~(GPU_DRF_MASK(NV##d##r##f)<sriovState.virtualRegPhysOffset + a) +#define GPU_VREG_WR32(g, a, v) gpuRegWr32_dumpinfo(__FUNCTION__,#a,"(VREG)",g, g->sriovState.virtualRegPhysOffset + a, v) + +#endif // GPU_REGISTER_ACCESS_DUMP + +// +// Macros for register I/O +// +#define GPU_FLD_WR_DRF_NUM(g,d,r,f,n) REG_INST_WR32(g,GPU,0,NV##d##r,(REG_INST_RD32(g,GPU,0,NV##d##r)&~(GPU_DRF_MASK(NV##d##r##f)<>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define GPU_FLD_TEST_DRF_DEF(g,d,r,f,c) (GPU_REG_RD_DRF(g, d, r, f) == NV##d##r##f##c) +#define GPU_FLD_TEST_DRF_NUM(g,d,r,f,n) (GPU_REG_RD_DRF(g, d, r, f) == n) +#define GPU_FLD_IDX_TEST_DRF_DEF(g,d,r,f,c,i) (GPU_REG_IDX_RD_DRF(g, d, r, i, f) == NV##d##r##f##c) +#define GPU_FLD_2IDX_TEST_DRF_DEF(g,d,r,f,c,i,j) (GPU_REG_2IDX_RD_DRF(g, d, r, i, j, f) == NV##d##r##f##c) + +#define GPU_REG_RD_DRF_EX(g,d,r,f,t) (((GPU_REG_RD32_EX(g, NV ## d ## r, t))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) + +#define GPU_FLD_WR_DRF_NUM_EX(g,d,r,f,n,t) REG_INST_WR32_EX(g,GPU,0,NV##d##r,(REG_INST_RD32_EX(g,GPU,0,NV##d##r,t)&~(GPU_DRF_MASK(NV##d##r##f)<>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define GPU_REG_2IDX_RD_DRF(g,d,r,i,j,f) (((GPU_REG_RD32(g, NV ## d ## r(i, j)))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define GPU_REG_RD_DRF_IDX(g,d,r,f,i) (((GPU_REG_RD32(g, NV ## d ## r))>>GPU_DRF_SHIFT(NV ## d ## r ## f(i)))&GPU_DRF_MASK(NV ## d ## r ## f(i))) +#define GPU_REG_IDX_OFFSET_RD_DRF(g,d,r,i,o,f) (((GPU_REG_RD32(g, NV ## d ## r(i,o)))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) + +// +// Macros that abstract the use of bif object to access GPU bus config registers +// This is the preferred set >= NV50 +// +#define GPU_BUS_CFG_RD32(g,r,d) gpuReadBusConfigReg_HAL(g, r, d) +#define GPU_BUS_CFG_WR32(g,r,d) gpuWriteBusConfigReg_HAL(g, r, d) +#define GPU_BUS_CFG_FLD_WR_DRF_DEF(g,x,d,r,f,c) GPU_BUS_CFG_WR32(g, NV##d##r,(x &~(GPU_DRF_MASK(NV##d##r##f)<>(31-(1?sf)+(0?sf))) +#define SF_SHIFTMASK(sf) (SF_MASK(sf) << SF_SHIFT(sf)) +#define SF_DEF(s,f,c) ((NV ## s ## f ## c)<>SF_SHIFT(NV ## s ## f))&SF_MASK(NV ## s ## f)) +#define SF_WIDTH(sf) ((1?sf) - (0?sf) + 1) +// This macro parses multi-word/array defines +#define SF_ARR32_VAL(s,f,arr) \ + (((arr)[SF_INDEX(NV ## s ## f)] >> SF_SHIFT(NV ## s ## f)) & SF_MASK(NV ## s ## f)) +#define FLD_SF_DEF(s,f,d,l) ((l)&~(SF_MASK(NV##s##f) << SF_SHIFT(NV##s##f)))| SF_DEF(s,f,d) +#define FLD_SF_NUM(s,f,n,l) ((l)&~(SF_MASK(NV##s##f) << SF_SHIFT(NV##s##f)))| SF_NUM(s,f,n) +#define FLD_SF_IDX_DEF(s,f,c,i,l) (((l) & ~SF_SHIFTMASK(NV ## s ## f(i))) | SF_IDX_DEF(s,f,c,i)) +#define FLD_SF_IDX_NUM(s,f,n,i,l) (((l) & ~SF_SHIFTMASK(NV ## s ## f(i))) | SF_IDX_NUM(s,f,n,i)) + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_GPU_ACCESS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__IoAperture; +struct NVOC_METADATA__Object; +struct NVOC_METADATA__RegisterAperture; +struct NVOC_VTABLE__IoAperture; + + +struct IoAperture { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__IoAperture *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Object __nvoc_base_Object; + struct RegisterAperture __nvoc_base_RegisterAperture; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super + struct RegisterAperture *__nvoc_pbase_RegisterAperture; // regaprt super + struct IoAperture *__nvoc_pbase_IoAperture; // ioaprt + + // Data members + OBJGPU *pGpu; + NvU32 deviceIndex; + NvU32 deviceInstance; + DEVICE_MAPPING *pMapping; + NvU32 mappingStartAddr; + NvU32 baseAddress; + NvU32 length; +}; + + +// Vtable with 8 per-class function pointers +struct NVOC_VTABLE__IoAperture { + NvU8 (*__ioaprtReadReg08__)(struct IoAperture * /*this*/, NvU32); // virtual override (regaprt) base (regaprt) + NvU16 (*__ioaprtReadReg16__)(struct IoAperture * /*this*/, NvU32); // virtual override (regaprt) base (regaprt) + NvU32 (*__ioaprtReadReg32__)(struct IoAperture * /*this*/, NvU32); // virtual override (regaprt) base (regaprt) + void (*__ioaprtWriteReg08__)(struct IoAperture * /*this*/, NvU32, NvV8); // virtual override (regaprt) base (regaprt) + void (*__ioaprtWriteReg16__)(struct IoAperture * /*this*/, NvU32, NvV16); // virtual override (regaprt) base (regaprt) + void (*__ioaprtWriteReg32__)(struct IoAperture * /*this*/, NvU32, NvV32); // virtual override (regaprt) base (regaprt) + void (*__ioaprtWriteReg32Uc__)(struct IoAperture * /*this*/, NvU32, NvV32); // virtual override (regaprt) base (regaprt) + NvBool (*__ioaprtIsRegValid__)(struct IoAperture * /*this*/, NvU32); // virtual override (regaprt) base (regaprt) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__IoAperture { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Object metadata__Object; + const struct NVOC_METADATA__RegisterAperture metadata__RegisterAperture; + const struct NVOC_VTABLE__IoAperture vtable; +}; + +#ifndef __NVOC_CLASS_IoAperture_TYPEDEF__ +#define __NVOC_CLASS_IoAperture_TYPEDEF__ +typedef struct IoAperture IoAperture; +#endif /* __NVOC_CLASS_IoAperture_TYPEDEF__ */ + +#ifndef __nvoc_class_id_IoAperture +#define __nvoc_class_id_IoAperture 0x40549c +#endif /* __nvoc_class_id_IoAperture */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_IoAperture; + +#define __staticCast_IoAperture(pThis) \ + ((pThis)->__nvoc_pbase_IoAperture) + +#ifdef __nvoc_gpu_access_h_disabled +#define __dynamicCast_IoAperture(pThis) ((IoAperture*) NULL) +#else //__nvoc_gpu_access_h_disabled +#define __dynamicCast_IoAperture(pThis) \ + ((IoAperture*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(IoAperture))) +#endif //__nvoc_gpu_access_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_IoAperture(IoAperture**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_IoAperture(IoAperture**, Dynamic*, NvU32, struct IoAperture *arg_pParentAperture, OBJGPU *arg_pGpu, NvU32 arg_deviceIndex, NvU32 arg_deviceInstance, DEVICE_MAPPING *arg_pMapping, NvU32 arg_mappingStartAddr, NvU32 arg_offset, NvU32 arg_length); +#define __objCreate_IoAperture(ppNewObj, pParent, createFlags, arg_pParentAperture, arg_pGpu, arg_deviceIndex, arg_deviceInstance, arg_pMapping, arg_mappingStartAddr, arg_offset, arg_length) \ + __nvoc_objCreate_IoAperture((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pParentAperture, arg_pGpu, arg_deviceIndex, arg_deviceInstance, arg_pMapping, arg_mappingStartAddr, arg_offset, arg_length) + + +// Wrapper macros +#define ioaprtReadReg08_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__ioaprtReadReg08__ +#define ioaprtReadReg08(pAperture, addr) ioaprtReadReg08_DISPATCH(pAperture, addr) +#define ioaprtReadReg16_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__ioaprtReadReg16__ +#define ioaprtReadReg16(pAperture, addr) ioaprtReadReg16_DISPATCH(pAperture, addr) +#define ioaprtReadReg32_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__ioaprtReadReg32__ +#define ioaprtReadReg32(pAperture, addr) ioaprtReadReg32_DISPATCH(pAperture, addr) +#define ioaprtWriteReg08_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__ioaprtWriteReg08__ +#define ioaprtWriteReg08(pAperture, addr, value) ioaprtWriteReg08_DISPATCH(pAperture, addr, value) +#define ioaprtWriteReg16_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__ioaprtWriteReg16__ +#define ioaprtWriteReg16(pAperture, addr, value) ioaprtWriteReg16_DISPATCH(pAperture, addr, value) +#define ioaprtWriteReg32_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__ioaprtWriteReg32__ +#define ioaprtWriteReg32(pAperture, addr, value) ioaprtWriteReg32_DISPATCH(pAperture, addr, value) +#define ioaprtWriteReg32Uc_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__ioaprtWriteReg32Uc__ +#define ioaprtWriteReg32Uc(pAperture, addr, value) ioaprtWriteReg32Uc_DISPATCH(pAperture, addr, value) +#define ioaprtIsRegValid_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__ioaprtIsRegValid__ +#define ioaprtIsRegValid(pAperture, addr) ioaprtIsRegValid_DISPATCH(pAperture, addr) + +// Dispatch functions +static inline NvU8 ioaprtReadReg08_DISPATCH(struct IoAperture *pAperture, NvU32 addr) { + return pAperture->__nvoc_metadata_ptr->vtable.__ioaprtReadReg08__(pAperture, addr); +} + +static inline NvU16 ioaprtReadReg16_DISPATCH(struct IoAperture *pAperture, NvU32 addr) { + return pAperture->__nvoc_metadata_ptr->vtable.__ioaprtReadReg16__(pAperture, addr); +} + +static inline NvU32 ioaprtReadReg32_DISPATCH(struct IoAperture *pAperture, NvU32 addr) { + return pAperture->__nvoc_metadata_ptr->vtable.__ioaprtReadReg32__(pAperture, addr); +} + +static inline void ioaprtWriteReg08_DISPATCH(struct IoAperture *pAperture, NvU32 addr, NvV8 value) { + pAperture->__nvoc_metadata_ptr->vtable.__ioaprtWriteReg08__(pAperture, addr, value); +} + +static inline void ioaprtWriteReg16_DISPATCH(struct IoAperture *pAperture, NvU32 addr, NvV16 value) { + pAperture->__nvoc_metadata_ptr->vtable.__ioaprtWriteReg16__(pAperture, addr, value); +} + +static inline void ioaprtWriteReg32_DISPATCH(struct IoAperture *pAperture, NvU32 addr, NvV32 value) { + pAperture->__nvoc_metadata_ptr->vtable.__ioaprtWriteReg32__(pAperture, addr, value); +} + +static inline void ioaprtWriteReg32Uc_DISPATCH(struct IoAperture *pAperture, NvU32 addr, NvV32 value) { + pAperture->__nvoc_metadata_ptr->vtable.__ioaprtWriteReg32Uc__(pAperture, addr, value); +} + +static inline NvBool ioaprtIsRegValid_DISPATCH(struct IoAperture *pAperture, NvU32 addr) { + return pAperture->__nvoc_metadata_ptr->vtable.__ioaprtIsRegValid__(pAperture, addr); +} + +NvU8 ioaprtReadReg08_IMPL(struct IoAperture *pAperture, NvU32 addr); + +NvU16 ioaprtReadReg16_IMPL(struct IoAperture *pAperture, NvU32 addr); + +NvU32 ioaprtReadReg32_IMPL(struct IoAperture *pAperture, NvU32 addr); + +void ioaprtWriteReg08_IMPL(struct IoAperture *pAperture, NvU32 addr, NvV8 value); + +void ioaprtWriteReg16_IMPL(struct IoAperture *pAperture, NvU32 addr, NvV16 value); + +void ioaprtWriteReg32_IMPL(struct IoAperture *pAperture, NvU32 addr, NvV32 value); + +void ioaprtWriteReg32Uc_IMPL(struct IoAperture *pAperture, NvU32 addr, NvV32 value); + +NvBool ioaprtIsRegValid_IMPL(struct IoAperture *pAperture, NvU32 addr); + +static inline NvU32 ioaprtGetRegAddr(struct IoAperture *pAperture, NvU32 addr) { + return pAperture->baseAddress + addr; +} + +static inline NvU32 ioaprtGetBaseAddr(struct IoAperture *pAperture) { + return pAperture->baseAddress; +} + +static inline NvU32 ioaprtGetLength(struct IoAperture *pAperture) { + return pAperture->length; +} + +static inline NvBool ioaprtIsInitialized(struct IoAperture *pAperture) { + return pAperture->length != 0; +} + +static inline NvBool ioaprtIsAddressInRange(struct IoAperture *pAperture, NvU32 addr) { + return (addr >= pAperture->baseAddress) && (addr < (pAperture->baseAddress + pAperture->length)); +} + +NV_STATUS ioaprtConstruct_IMPL(struct IoAperture *arg_pAperture, struct IoAperture *arg_pParentAperture, OBJGPU *arg_pGpu, NvU32 arg_deviceIndex, NvU32 arg_deviceInstance, DEVICE_MAPPING *arg_pMapping, NvU32 arg_mappingStartAddr, NvU32 arg_offset, NvU32 arg_length); + +#define __nvoc_ioaprtConstruct(arg_pAperture, arg_pParentAperture, arg_pGpu, arg_deviceIndex, arg_deviceInstance, arg_pMapping, arg_mappingStartAddr, arg_offset, arg_length) ioaprtConstruct_IMPL(arg_pAperture, arg_pParentAperture, arg_pGpu, arg_deviceIndex, arg_deviceInstance, arg_pMapping, arg_mappingStartAddr, arg_offset, arg_length) +#undef PRIVATE_FIELD + + +// In-place construct wrapper +NV_STATUS ioaprtInit(struct IoAperture *pAperture, struct IoAperture *pParentAperture, NvU32 offset, NvU32 length); + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_GPU_ACCESS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__SwBcAperture; +struct NVOC_METADATA__Object; +struct NVOC_METADATA__RegisterAperture; +struct NVOC_VTABLE__SwBcAperture; + + +struct SwBcAperture { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__SwBcAperture *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Object __nvoc_base_Object; + struct RegisterAperture __nvoc_base_RegisterAperture; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super + struct RegisterAperture *__nvoc_pbase_RegisterAperture; // regaprt super + struct SwBcAperture *__nvoc_pbase_SwBcAperture; // swbcaprt + + // Data members + struct IoAperture *pApertures; + NvU32 numApertures; +}; + + +// Vtable with 8 per-class function pointers +struct NVOC_VTABLE__SwBcAperture { + NvU8 (*__swbcaprtReadReg08__)(struct SwBcAperture * /*this*/, NvU32); // virtual override (regaprt) base (regaprt) + NvU16 (*__swbcaprtReadReg16__)(struct SwBcAperture * /*this*/, NvU32); // virtual override (regaprt) base (regaprt) + NvU32 (*__swbcaprtReadReg32__)(struct SwBcAperture * /*this*/, NvU32); // virtual override (regaprt) base (regaprt) + void (*__swbcaprtWriteReg08__)(struct SwBcAperture * /*this*/, NvU32, NvV8); // virtual override (regaprt) base (regaprt) + void (*__swbcaprtWriteReg16__)(struct SwBcAperture * /*this*/, NvU32, NvV16); // virtual override (regaprt) base (regaprt) + void (*__swbcaprtWriteReg32__)(struct SwBcAperture * /*this*/, NvU32, NvV32); // virtual override (regaprt) base (regaprt) + void (*__swbcaprtWriteReg32Uc__)(struct SwBcAperture * /*this*/, NvU32, NvV32); // virtual override (regaprt) base (regaprt) + NvBool (*__swbcaprtIsRegValid__)(struct SwBcAperture * /*this*/, NvU32); // virtual override (regaprt) base (regaprt) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__SwBcAperture { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Object metadata__Object; + const struct NVOC_METADATA__RegisterAperture metadata__RegisterAperture; + const struct NVOC_VTABLE__SwBcAperture vtable; +}; + +#ifndef __NVOC_CLASS_SwBcAperture_TYPEDEF__ +#define __NVOC_CLASS_SwBcAperture_TYPEDEF__ +typedef struct SwBcAperture SwBcAperture; +#endif /* __NVOC_CLASS_SwBcAperture_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SwBcAperture +#define __nvoc_class_id_SwBcAperture 0x6d0f88 +#endif /* __nvoc_class_id_SwBcAperture */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_SwBcAperture; + +#define __staticCast_SwBcAperture(pThis) \ + ((pThis)->__nvoc_pbase_SwBcAperture) + +#ifdef __nvoc_gpu_access_h_disabled +#define __dynamicCast_SwBcAperture(pThis) ((SwBcAperture*) NULL) +#else //__nvoc_gpu_access_h_disabled +#define __dynamicCast_SwBcAperture(pThis) \ + ((SwBcAperture*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(SwBcAperture))) +#endif //__nvoc_gpu_access_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_SwBcAperture(SwBcAperture**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_SwBcAperture(SwBcAperture**, Dynamic*, NvU32, struct IoAperture *arg_pApertures, NvU32 arg_numApertures); +#define __objCreate_SwBcAperture(ppNewObj, pParent, createFlags, arg_pApertures, arg_numApertures) \ + __nvoc_objCreate_SwBcAperture((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pApertures, arg_numApertures) + + +// Wrapper macros +#define swbcaprtReadReg08_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__swbcaprtReadReg08__ +#define swbcaprtReadReg08(pAperture, addr) swbcaprtReadReg08_DISPATCH(pAperture, addr) +#define swbcaprtReadReg16_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__swbcaprtReadReg16__ +#define swbcaprtReadReg16(pAperture, addr) swbcaprtReadReg16_DISPATCH(pAperture, addr) +#define swbcaprtReadReg32_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__swbcaprtReadReg32__ +#define swbcaprtReadReg32(pAperture, addr) swbcaprtReadReg32_DISPATCH(pAperture, addr) +#define swbcaprtWriteReg08_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__swbcaprtWriteReg08__ +#define swbcaprtWriteReg08(pAperture, addr, value) swbcaprtWriteReg08_DISPATCH(pAperture, addr, value) +#define swbcaprtWriteReg16_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__swbcaprtWriteReg16__ +#define swbcaprtWriteReg16(pAperture, addr, value) swbcaprtWriteReg16_DISPATCH(pAperture, addr, value) +#define swbcaprtWriteReg32_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__swbcaprtWriteReg32__ +#define swbcaprtWriteReg32(pAperture, addr, value) swbcaprtWriteReg32_DISPATCH(pAperture, addr, value) +#define swbcaprtWriteReg32Uc_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__swbcaprtWriteReg32Uc__ +#define swbcaprtWriteReg32Uc(pAperture, addr, value) swbcaprtWriteReg32Uc_DISPATCH(pAperture, addr, value) +#define swbcaprtIsRegValid_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__swbcaprtIsRegValid__ +#define swbcaprtIsRegValid(pAperture, addr) swbcaprtIsRegValid_DISPATCH(pAperture, addr) + +// Dispatch functions +static inline NvU8 swbcaprtReadReg08_DISPATCH(struct SwBcAperture *pAperture, NvU32 addr) { + return pAperture->__nvoc_metadata_ptr->vtable.__swbcaprtReadReg08__(pAperture, addr); +} + +static inline NvU16 swbcaprtReadReg16_DISPATCH(struct SwBcAperture *pAperture, NvU32 addr) { + return pAperture->__nvoc_metadata_ptr->vtable.__swbcaprtReadReg16__(pAperture, addr); +} + +static inline NvU32 swbcaprtReadReg32_DISPATCH(struct SwBcAperture *pAperture, NvU32 addr) { + return pAperture->__nvoc_metadata_ptr->vtable.__swbcaprtReadReg32__(pAperture, addr); +} + +static inline void swbcaprtWriteReg08_DISPATCH(struct SwBcAperture *pAperture, NvU32 addr, NvV8 value) { + pAperture->__nvoc_metadata_ptr->vtable.__swbcaprtWriteReg08__(pAperture, addr, value); +} + +static inline void swbcaprtWriteReg16_DISPATCH(struct SwBcAperture *pAperture, NvU32 addr, NvV16 value) { + pAperture->__nvoc_metadata_ptr->vtable.__swbcaprtWriteReg16__(pAperture, addr, value); +} + +static inline void swbcaprtWriteReg32_DISPATCH(struct SwBcAperture *pAperture, NvU32 addr, NvV32 value) { + pAperture->__nvoc_metadata_ptr->vtable.__swbcaprtWriteReg32__(pAperture, addr, value); +} + +static inline void swbcaprtWriteReg32Uc_DISPATCH(struct SwBcAperture *pAperture, NvU32 addr, NvV32 value) { + pAperture->__nvoc_metadata_ptr->vtable.__swbcaprtWriteReg32Uc__(pAperture, addr, value); +} + +static inline NvBool swbcaprtIsRegValid_DISPATCH(struct SwBcAperture *pAperture, NvU32 addr) { + return pAperture->__nvoc_metadata_ptr->vtable.__swbcaprtIsRegValid__(pAperture, addr); +} + +NvU8 swbcaprtReadReg08_IMPL(struct SwBcAperture *pAperture, NvU32 addr); + +NvU16 swbcaprtReadReg16_IMPL(struct SwBcAperture *pAperture, NvU32 addr); + +NvU32 swbcaprtReadReg32_IMPL(struct SwBcAperture *pAperture, NvU32 addr); + +void swbcaprtWriteReg08_IMPL(struct SwBcAperture *pAperture, NvU32 addr, NvV8 value); + +void swbcaprtWriteReg16_IMPL(struct SwBcAperture *pAperture, NvU32 addr, NvV16 value); + +void swbcaprtWriteReg32_IMPL(struct SwBcAperture *pAperture, NvU32 addr, NvV32 value); + +void swbcaprtWriteReg32Uc_IMPL(struct SwBcAperture *pAperture, NvU32 addr, NvV32 value); + +NvBool swbcaprtIsRegValid_IMPL(struct SwBcAperture *pAperture, NvU32 addr); + +NV_STATUS swbcaprtConstruct_IMPL(struct SwBcAperture *arg_pAperture, struct IoAperture *arg_pApertures, NvU32 arg_numApertures); + +#define __nvoc_swbcaprtConstruct(arg_pAperture, arg_pApertures, arg_numApertures) swbcaprtConstruct_IMPL(arg_pAperture, arg_pApertures, arg_numApertures) +#undef PRIVATE_FIELD + + +#endif // _GPU_ACCESS_H_ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_GPU_ACCESS_NVOC_H_ diff --git a/src/nvidia/generated/g_gpu_arch_nvoc.c b/src/nvidia/generated/g_gpu_arch_nvoc.c new file mode 100644 index 0000000..87d99f2 --- /dev/null +++ b/src/nvidia/generated/g_gpu_arch_nvoc.c @@ -0,0 +1,243 @@ +#define NVOC_GPU_ARCH_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_arch_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x4b33af = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuArch; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuHalspecOwner; + +// Forward declarations for GpuArch +void __nvoc_init__Object(Object*); +void __nvoc_init__GpuHalspecOwner(GpuHalspecOwner*, + NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, + TEGRA_CHIP_TYPE TegraChipHal_tegraType); +void __nvoc_init__GpuArch(GpuArch*, + NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, + TEGRA_CHIP_TYPE TegraChipHal_tegraType); +void __nvoc_init_funcTable_GpuArch(GpuArch*); +NV_STATUS __nvoc_ctor_GpuArch(GpuArch*, NvU32 arg_chipArch, NvU32 arg_chipImpl, NvU32 arg_hidrev, TEGRA_CHIP_TYPE arg_tegraType); +void __nvoc_init_dataField_GpuArch(GpuArch*); +void __nvoc_dtor_GpuArch(GpuArch*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__GpuArch; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__GpuArch; + +// Down-thunk(s) to bridge GpuArch methods from ancestors (if any) + +// Up-thunk(s) to bridge GpuArch methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_GpuArch = +{ + /*classInfo=*/ { + /*size=*/ sizeof(GpuArch), + /*classId=*/ classId(GpuArch), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "GpuArch", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GpuArch, + /*pCastInfo=*/ &__nvoc_castinfo__GpuArch, + /*pExportInfo=*/ &__nvoc_export_info__GpuArch +}; + + +// Metadata with per-class RTTI with ancestor(s) +static const struct NVOC_METADATA__GpuArch __nvoc_metadata__GpuArch = { + .rtti.pClassDef = &__nvoc_class_def_GpuArch, // (gpuarch) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GpuArch, + .rtti.offset = 0, + .metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super + .metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Object.rtti.offset = NV_OFFSETOF(GpuArch, __nvoc_base_Object), + .metadata__GpuHalspecOwner.rtti.pClassDef = &__nvoc_class_def_GpuHalspecOwner, // (gpuhalspecowner) super + .metadata__GpuHalspecOwner.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuHalspecOwner.rtti.offset = NV_OFFSETOF(GpuArch, __nvoc_base_GpuHalspecOwner), +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__GpuArch = { + .numRelatives = 3, + .relatives = { + &__nvoc_metadata__GpuArch.rtti, // [0]: (gpuarch) this + &__nvoc_metadata__GpuArch.metadata__Object.rtti, // [1]: (obj) super + &__nvoc_metadata__GpuArch.metadata__GpuHalspecOwner.rtti, // [2]: (gpuhalspecowner) super + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__GpuArch = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_GpuHalspecOwner(GpuHalspecOwner*); +void __nvoc_dtor_GpuArch(GpuArch *pThis) { + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + __nvoc_dtor_GpuHalspecOwner(&pThis->__nvoc_base_GpuHalspecOwner); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_GpuArch(GpuArch *pThis) { + ChipHal *chipHal = &staticCast(pThis, GpuHalspecOwner)->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_GpuHalspecOwner(GpuHalspecOwner* ); +NV_STATUS __nvoc_ctor_GpuArch(GpuArch *pThis, NvU32 arg_chipArch, NvU32 arg_chipImpl, NvU32 arg_hidrev, TEGRA_CHIP_TYPE arg_tegraType) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_GpuArch_fail_Object; + status = __nvoc_ctor_GpuHalspecOwner(&pThis->__nvoc_base_GpuHalspecOwner); + if (status != NV_OK) goto __nvoc_ctor_GpuArch_fail_GpuHalspecOwner; + __nvoc_init_dataField_GpuArch(pThis); + + status = __nvoc_gpuarchConstruct(pThis, arg_chipArch, arg_chipImpl, arg_hidrev, arg_tegraType); + if (status != NV_OK) goto __nvoc_ctor_GpuArch_fail__init; + goto __nvoc_ctor_GpuArch_exit; // Success + +__nvoc_ctor_GpuArch_fail__init: + __nvoc_dtor_GpuHalspecOwner(&pThis->__nvoc_base_GpuHalspecOwner); +__nvoc_ctor_GpuArch_fail_GpuHalspecOwner: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_GpuArch_fail_Object: +__nvoc_ctor_GpuArch_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_GpuArch_1(GpuArch *pThis) { + ChipHal *chipHal = &staticCast(pThis, GpuHalspecOwner)->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); +} // End __nvoc_init_funcTable_GpuArch_1 + + +// Initialize vtable(s): Nothing to do for empty vtables +void __nvoc_init_funcTable_GpuArch(GpuArch *pThis) { + __nvoc_init_funcTable_GpuArch_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__GpuArch(GpuArch *pThis, + NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, + TEGRA_CHIP_TYPE TegraChipHal_tegraType) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; // (obj) super + pThis->__nvoc_pbase_GpuHalspecOwner = &pThis->__nvoc_base_GpuHalspecOwner; // (gpuhalspecowner) super + pThis->__nvoc_pbase_GpuArch = pThis; // (gpuarch) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Object(&pThis->__nvoc_base_Object); + __nvoc_init__GpuHalspecOwner(&pThis->__nvoc_base_GpuHalspecOwner, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, TegraChipHal_tegraType); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__GpuArch.metadata__Object; // (obj) super + pThis->__nvoc_base_GpuHalspecOwner.__nvoc_metadata_ptr = &__nvoc_metadata__GpuArch.metadata__GpuHalspecOwner; // (gpuhalspecowner) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__GpuArch; // (gpuarch) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_GpuArch(pThis); +} + +NV_STATUS __nvoc_objCreate_GpuArch(GpuArch **ppThis, Dynamic *pParent, NvU32 createFlags, + NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, + TEGRA_CHIP_TYPE TegraChipHal_tegraType, NvU32 arg_chipArch, NvU32 arg_chipImpl, NvU32 arg_hidrev, TEGRA_CHIP_TYPE arg_tegraType) +{ + NV_STATUS status; + Object *pParentObj = NULL; + GpuArch *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(GpuArch), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(GpuArch)); + + pThis->__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__GpuArch(pThis, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, TegraChipHal_tegraType); + status = __nvoc_ctor_GpuArch(pThis, arg_chipArch, arg_chipImpl, arg_hidrev, arg_tegraType); + if (status != NV_OK) goto __nvoc_objCreate_GpuArch_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_GpuArch_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(GpuArch)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_GpuArch(GpuArch **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + NvU32 ChipHal_arch = va_arg(args, NvU32); + NvU32 ChipHal_impl = va_arg(args, NvU32); + NvU32 ChipHal_hidrev = va_arg(args, NvU32); + TEGRA_CHIP_TYPE TegraChipHal_tegraType = va_arg(args, TEGRA_CHIP_TYPE); + NvU32 arg_chipArch = va_arg(args, NvU32); + NvU32 arg_chipImpl = va_arg(args, NvU32); + NvU32 arg_hidrev = va_arg(args, NvU32); + TEGRA_CHIP_TYPE arg_tegraType = va_arg(args, TEGRA_CHIP_TYPE); + + status = __nvoc_objCreate_GpuArch(ppThis, pParent, createFlags, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, TegraChipHal_tegraType, arg_chipArch, arg_chipImpl, arg_hidrev, arg_tegraType); + + return status; +} + diff --git a/src/nvidia/generated/g_gpu_arch_nvoc.h b/src/nvidia/generated/g_gpu_arch_nvoc.h new file mode 100644 index 0000000..23ef46e --- /dev/null +++ b/src/nvidia/generated/g_gpu_arch_nvoc.h @@ -0,0 +1,206 @@ + +#ifndef _G_GPU_ARCH_NVOC_H_ +#define _G_GPU_ARCH_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_gpu_arch_nvoc.h" + +#ifndef _GPU_ARCH_H_ +#define _GPU_ARCH_H_ + +#include "gpu/gpu_halspec.h" +#include "nvoc/object.h" + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_GPU_ARCH_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__GpuArch; +struct NVOC_METADATA__Object; +struct NVOC_METADATA__GpuHalspecOwner; + + +struct GpuArch { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__GpuArch *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Object __nvoc_base_Object; + struct GpuHalspecOwner __nvoc_base_GpuHalspecOwner; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super + struct GpuHalspecOwner *__nvoc_pbase_GpuHalspecOwner; // gpuhalspecowner super + struct GpuArch *__nvoc_pbase_GpuArch; // gpuarch + + // Data members + NvU32 chipArch; + NvU32 chipImpl; + NvU32 hidrev; + TEGRA_CHIP_TYPE tegraType; +}; + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__GpuArch { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Object metadata__Object; + const struct NVOC_METADATA__GpuHalspecOwner metadata__GpuHalspecOwner; +}; + +#ifndef __NVOC_CLASS_GpuArch_TYPEDEF__ +#define __NVOC_CLASS_GpuArch_TYPEDEF__ +typedef struct GpuArch GpuArch; +#endif /* __NVOC_CLASS_GpuArch_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuArch +#define __nvoc_class_id_GpuArch 0x4b33af +#endif /* __nvoc_class_id_GpuArch */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuArch; + +#define __staticCast_GpuArch(pThis) \ + ((pThis)->__nvoc_pbase_GpuArch) + +#ifdef __nvoc_gpu_arch_h_disabled +#define __dynamicCast_GpuArch(pThis) ((GpuArch*) NULL) +#else //__nvoc_gpu_arch_h_disabled +#define __dynamicCast_GpuArch(pThis) \ + ((GpuArch*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GpuArch))) +#endif //__nvoc_gpu_arch_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_GpuArch(GpuArch**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_GpuArch(GpuArch**, Dynamic*, NvU32, + NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, + TEGRA_CHIP_TYPE TegraChipHal_tegraType, NvU32 arg_chipArch, NvU32 arg_chipImpl, NvU32 arg_hidrev, TEGRA_CHIP_TYPE arg_tegraType); +#define __objCreate_GpuArch(ppNewObj, pParent, createFlags, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, TegraChipHal_tegraType, arg_chipArch, arg_chipImpl, arg_hidrev, arg_tegraType) \ + __nvoc_objCreate_GpuArch((ppNewObj), staticCast((pParent), Dynamic), (createFlags), ChipHal_arch, ChipHal_impl, ChipHal_hidrev, TegraChipHal_tegraType, arg_chipArch, arg_chipImpl, arg_hidrev, arg_tegraType) + + +// Wrapper macros + +// Dispatch functions +NvU32 gpuarchGetSystemPhysAddrWidth_T234D(struct GpuArch *pGpuArch); + + +#ifdef __nvoc_gpu_arch_h_disabled +static inline NvU32 gpuarchGetSystemPhysAddrWidth(struct GpuArch *pGpuArch) { + NV_ASSERT_FAILED_PRECOMP("GpuArch was disabled!"); + return 0; +} +#else //__nvoc_gpu_arch_h_disabled +#define gpuarchGetSystemPhysAddrWidth(pGpuArch) gpuarchGetSystemPhysAddrWidth_T234D(pGpuArch) +#endif //__nvoc_gpu_arch_h_disabled + +#define gpuarchGetSystemPhysAddrWidth_HAL(pGpuArch) gpuarchGetSystemPhysAddrWidth(pGpuArch) + +static inline NvU32 gpuarchGetDmaAddrWidth_4a4dee(struct GpuArch *pGpuArch) { + return 0; +} + + +#ifdef __nvoc_gpu_arch_h_disabled +static inline NvU32 gpuarchGetDmaAddrWidth(struct GpuArch *pGpuArch) { + NV_ASSERT_FAILED_PRECOMP("GpuArch was disabled!"); + return 0; +} +#else //__nvoc_gpu_arch_h_disabled +#define gpuarchGetDmaAddrWidth(pGpuArch) gpuarchGetDmaAddrWidth_4a4dee(pGpuArch) +#endif //__nvoc_gpu_arch_h_disabled + +#define gpuarchGetDmaAddrWidth_HAL(pGpuArch) gpuarchGetDmaAddrWidth(pGpuArch) + +static inline NvBool gpuarchIsZeroFb_491d52(struct GpuArch *pGpuArch) { + return ((NvBool)(0 != 0)); +} + + +#ifdef __nvoc_gpu_arch_h_disabled +static inline NvBool gpuarchIsZeroFb(struct GpuArch *pGpuArch) { + NV_ASSERT_FAILED_PRECOMP("GpuArch was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_arch_h_disabled +#define gpuarchIsZeroFb(pGpuArch) gpuarchIsZeroFb_491d52(pGpuArch) +#endif //__nvoc_gpu_arch_h_disabled + +#define gpuarchIsZeroFb_HAL(pGpuArch) gpuarchIsZeroFb(pGpuArch) + +static inline NvBool gpuarchSupportsIgpuRg_491d52(struct GpuArch *pGpuArch) { + return ((NvBool)(0 != 0)); +} + + +#ifdef __nvoc_gpu_arch_h_disabled +static inline NvBool gpuarchSupportsIgpuRg(struct GpuArch *pGpuArch) { + NV_ASSERT_FAILED_PRECOMP("GpuArch was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_arch_h_disabled +#define gpuarchSupportsIgpuRg(pGpuArch) gpuarchSupportsIgpuRg_491d52(pGpuArch) +#endif //__nvoc_gpu_arch_h_disabled + +#define gpuarchSupportsIgpuRg_HAL(pGpuArch) gpuarchSupportsIgpuRg(pGpuArch) + +NV_STATUS gpuarchConstruct_IMPL(struct GpuArch *arg_pGpuArch, NvU32 arg_chipArch, NvU32 arg_chipImpl, NvU32 arg_hidrev, TEGRA_CHIP_TYPE arg_tegraType); + +#define __nvoc_gpuarchConstruct(arg_pGpuArch, arg_chipArch, arg_chipImpl, arg_hidrev, arg_tegraType) gpuarchConstruct_IMPL(arg_pGpuArch, arg_chipArch, arg_chipImpl, arg_hidrev, arg_tegraType) +#undef PRIVATE_FIELD + + +#endif // _GPU_ARCH_H_ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_GPU_ARCH_NVOC_H_ diff --git a/src/nvidia/generated/g_gpu_class_list.c b/src/nvidia/generated/g_gpu_class_list.c new file mode 100644 index 0000000..ffcebfe --- /dev/null +++ b/src/nvidia/generated/g_gpu_class_list.c @@ -0,0 +1,137 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include // NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE + + +const NvU32 * +gpuGetGenericClassList_IMPL(OBJGPU *pGpu, NvU32 *pNumClassDescriptors) +{ + static const NvU32 genericClassDescriptorList[] = { + LOCK_STRESS_OBJECT, + LOCK_TEST_RELAXED_DUP_OBJECT, + NV01_CONTEXT_DMA, + NV01_DEVICE_0, + NV01_EVENT, + NV01_EVENT_KERNEL_CALLBACK, + NV01_EVENT_KERNEL_CALLBACK_EX, + NV01_EVENT_OS_EVENT, + NV01_MEMORY_SYNCPOINT, + NV01_MEMORY_SYSTEM, + NV01_MEMORY_SYSTEM_OS_DESCRIPTOR, + NV01_ROOT, + NV01_ROOT_CLIENT, + NV01_ROOT_NON_PRIV, + NV20_SUBDEVICE_0, + }; + *pNumClassDescriptors = 15; + return genericClassDescriptorList; +} + +const NvU32 * +gpuGetNoEngClassList_T234D(OBJGPU *pGpu, NvU32 *pNumClassDescriptors) +{ + *pNumClassDescriptors = 0; + return NULL; +} + +const CLASSDESCRIPTOR * +gpuGetEngClassDescriptorList_T234D(OBJGPU *pGpu, NvU32 *pNumClassDescriptors) +{ + static const CLASSDESCRIPTOR halT234DClassDescriptorList[] = { + { GF100_HDACODEC, ENG_HDACODEC }, + { IO_VASPACE_A, ENG_INVALID }, + { NV04_DISPLAY_COMMON, ENG_KERNEL_DISPLAY }, + { NVC372_DISPLAY_SW, ENG_KERNEL_DISPLAY }, + { NVC670_DISPLAY, ENG_KERNEL_DISPLAY }, + { NVC671_DISP_SF_USER, ENG_KERNEL_DISPLAY }, + { NVC673_DISP_CAPABILITIES, ENG_KERNEL_DISPLAY }, + { NVC67A_CURSOR_IMM_CHANNEL_PIO, ENG_KERNEL_DISPLAY }, + { NVC67B_WINDOW_IMM_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC67D_CORE_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC67E_WINDOW_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC77F_ANY_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + }; + *pNumClassDescriptors = NV_ARRAY_ELEMENTS(halT234DClassDescriptorList); + return halT234DClassDescriptorList; +} + +const NvU32 * +gpuGetNoEngClassList_T264D(OBJGPU *pGpu, NvU32 *pNumClassDescriptors) +{ + *pNumClassDescriptors = 0; + return NULL; +} + +const CLASSDESCRIPTOR * +gpuGetEngClassDescriptorList_T264D(OBJGPU *pGpu, NvU32 *pNumClassDescriptors) +{ + static const CLASSDESCRIPTOR halT264DClassDescriptorList[] = { + { GF100_HDACODEC, ENG_HDACODEC }, + { IO_VASPACE_A, ENG_INVALID }, + { NV04_DISPLAY_COMMON, ENG_KERNEL_DISPLAY }, + { NVC372_DISPLAY_SW, ENG_KERNEL_DISPLAY }, + { NVC970_DISPLAY, ENG_KERNEL_DISPLAY }, + { NVC971_DISP_SF_USER, ENG_KERNEL_DISPLAY }, + { NVC973_DISP_CAPABILITIES, ENG_KERNEL_DISPLAY }, + { NVC97A_CURSOR_IMM_CHANNEL_PIO, ENG_KERNEL_DISPLAY }, + { NVC97B_WINDOW_IMM_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC97D_CORE_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC97E_WINDOW_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + }; + *pNumClassDescriptors = NV_ARRAY_ELEMENTS(halT264DClassDescriptorList); + return halT264DClassDescriptorList; +} + +const NvU32 * +gpuGetNoEngClassList_T256D(OBJGPU *pGpu, NvU32 *pNumClassDescriptors) +{ + *pNumClassDescriptors = 0; + return NULL; +} + +const CLASSDESCRIPTOR * +gpuGetEngClassDescriptorList_T256D(OBJGPU *pGpu, NvU32 *pNumClassDescriptors) +{ + static const CLASSDESCRIPTOR halT256DClassDescriptorList[] = { + { GF100_HDACODEC, ENG_HDACODEC }, + { IO_VASPACE_A, ENG_INVALID }, + { NV04_DISPLAY_COMMON, ENG_KERNEL_DISPLAY }, + { NVC372_DISPLAY_SW, ENG_KERNEL_DISPLAY }, + { NVCC70_DISPLAY, ENG_KERNEL_DISPLAY }, + { NVCC71_DISP_SF_USER, ENG_KERNEL_DISPLAY }, + { NVCC73_DISP_CAPABILITIES, ENG_KERNEL_DISPLAY }, + { NVCC7A_CURSOR_IMM_CHANNEL_PIO, ENG_KERNEL_DISPLAY }, + { NVCC7B_WINDOW_IMM_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVCC7D_CORE_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVCC7E_WINDOW_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + }; + *pNumClassDescriptors = NV_ARRAY_ELEMENTS(halT256DClassDescriptorList); + return halT256DClassDescriptorList; +} + +ct_assert(NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE >= (15 /* generic */ + 12 /* T234D */)); diff --git a/src/nvidia/generated/g_gpu_db_nvoc.c b/src/nvidia/generated/g_gpu_db_nvoc.c new file mode 100644 index 0000000..0eea743 --- /dev/null +++ b/src/nvidia/generated/g_gpu_db_nvoc.c @@ -0,0 +1,204 @@ +#define NVOC_GPU_DB_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_db_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xcdd250 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuDb; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +// Forward declarations for GpuDb +void __nvoc_init__Object(Object*); +void __nvoc_init__GpuDb(GpuDb*); +void __nvoc_init_funcTable_GpuDb(GpuDb*); +NV_STATUS __nvoc_ctor_GpuDb(GpuDb*); +void __nvoc_init_dataField_GpuDb(GpuDb*); +void __nvoc_dtor_GpuDb(GpuDb*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__GpuDb; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__GpuDb; + +// Down-thunk(s) to bridge GpuDb methods from ancestors (if any) + +// Up-thunk(s) to bridge GpuDb methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_GpuDb = +{ + /*classInfo=*/ { + /*size=*/ sizeof(GpuDb), + /*classId=*/ classId(GpuDb), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "GpuDb", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GpuDb, + /*pCastInfo=*/ &__nvoc_castinfo__GpuDb, + /*pExportInfo=*/ &__nvoc_export_info__GpuDb +}; + + +// Metadata with per-class RTTI with ancestor(s) +static const struct NVOC_METADATA__GpuDb __nvoc_metadata__GpuDb = { + .rtti.pClassDef = &__nvoc_class_def_GpuDb, // (gpudb) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GpuDb, + .rtti.offset = 0, + .metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super + .metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Object.rtti.offset = NV_OFFSETOF(GpuDb, __nvoc_base_Object), +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__GpuDb = { + .numRelatives = 2, + .relatives = { + &__nvoc_metadata__GpuDb.rtti, // [0]: (gpudb) this + &__nvoc_metadata__GpuDb.metadata__Object.rtti, // [1]: (obj) super + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__GpuDb = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_GpuDb(GpuDb *pThis) { + __nvoc_gpudbDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_GpuDb(GpuDb *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_GpuDb(GpuDb *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_GpuDb_fail_Object; + __nvoc_init_dataField_GpuDb(pThis); + + status = __nvoc_gpudbConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_GpuDb_fail__init; + goto __nvoc_ctor_GpuDb_exit; // Success + +__nvoc_ctor_GpuDb_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_GpuDb_fail_Object: +__nvoc_ctor_GpuDb_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_GpuDb_1(GpuDb *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_GpuDb_1 + + +// Initialize vtable(s): Nothing to do for empty vtables +void __nvoc_init_funcTable_GpuDb(GpuDb *pThis) { + __nvoc_init_funcTable_GpuDb_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__GpuDb(GpuDb *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; // (obj) super + pThis->__nvoc_pbase_GpuDb = pThis; // (gpudb) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Object(&pThis->__nvoc_base_Object); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__GpuDb.metadata__Object; // (obj) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__GpuDb; // (gpudb) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_GpuDb(pThis); +} + +NV_STATUS __nvoc_objCreate_GpuDb(GpuDb **ppThis, Dynamic *pParent, NvU32 createFlags) +{ + NV_STATUS status; + Object *pParentObj = NULL; + GpuDb *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(GpuDb), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(GpuDb)); + + pThis->__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__GpuDb(pThis); + status = __nvoc_ctor_GpuDb(pThis); + if (status != NV_OK) goto __nvoc_objCreate_GpuDb_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_GpuDb_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(GpuDb)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_GpuDb(GpuDb **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_GpuDb(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_gpu_db_nvoc.h b/src/nvidia/generated/g_gpu_db_nvoc.h new file mode 100644 index 0000000..44e1c9b --- /dev/null +++ b/src/nvidia/generated/g_gpu_db_nvoc.h @@ -0,0 +1,205 @@ + +#ifndef _G_GPU_DB_NVOC_H_ +#define _G_GPU_DB_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_gpu_db_nvoc.h" + +#ifndef GPU_DB_H +#define GPU_DB_H + +#include "core/core.h" +#include "nvoc/object.h" +#include "containers/list.h" +#include "gpu/gpu_uuid.h" +#include "gpu/gpu_shared_data_map.h" + +typedef struct NBADDR NBADDR; + +// **************************************************************************** +// Type Definitions +// **************************************************************************** +// +// The GPU database object is used to encapsulate the GPUINFO +// + +/*! + * @brief Compute policy data for a GPU + * Saved policy information for a GPU that can be retrieved later + */ +typedef struct GPU_COMPUTE_POLICY_INFO +{ + // + // Timeslice config for channels/TSG's on a runlist. The timeslice configs + // are restricted to four levels : default, short, medium and long. + // + NvU32 timeslice; + // Future policies to be added here +} GPU_COMPUTE_POLICY_INFO; + +typedef struct +{ + NvU32 domain; + NvU8 bus; + NvU8 device; + NvU8 function; + NvBool bValid; +} PCI_PORT_INFO; + +#define GPUDB_CLK_PROP_TOP_POLS_COUNT 1 + +/*! + * @brief Clock Propagation Topology Policies control data + */ +typedef struct +{ + NvU8 chosenIdx[GPUDB_CLK_PROP_TOP_POLS_COUNT]; +} GPU_CLK_PROP_TOP_POLS_CONTROL; + +typedef struct +{ + NvU8 uuid[RM_SHA1_GID_SIZE]; + PCI_PORT_INFO pciPortInfo; + PCI_PORT_INFO upstreamPciPortInfo; + GPU_COMPUTE_POLICY_INFO policyInfo; + NvBool bShutdownState; + GPU_CLK_PROP_TOP_POLS_CONTROL clkPropTopPolsControl; + GPU_DB_RUSD_SETTINGS rusd; +} GPU_INFO_LIST_NODE, *PGPU_INFO_LIST_NODE; + +MAKE_LIST(GpuInfoList, GPU_INFO_LIST_NODE); + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_GPU_DB_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__GpuDb; +struct NVOC_METADATA__Object; + + +struct GpuDb { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__GpuDb *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Object __nvoc_base_Object; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super + struct GpuDb *__nvoc_pbase_GpuDb; // gpudb + + // Data members + GpuInfoList gpuList; + PORT_MUTEX *pLock; +}; + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__GpuDb { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Object metadata__Object; +}; + +#ifndef __NVOC_CLASS_GpuDb_TYPEDEF__ +#define __NVOC_CLASS_GpuDb_TYPEDEF__ +typedef struct GpuDb GpuDb; +#endif /* __NVOC_CLASS_GpuDb_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuDb +#define __nvoc_class_id_GpuDb 0xcdd250 +#endif /* __nvoc_class_id_GpuDb */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuDb; + +#define __staticCast_GpuDb(pThis) \ + ((pThis)->__nvoc_pbase_GpuDb) + +#ifdef __nvoc_gpu_db_h_disabled +#define __dynamicCast_GpuDb(pThis) ((GpuDb*) NULL) +#else //__nvoc_gpu_db_h_disabled +#define __dynamicCast_GpuDb(pThis) \ + ((GpuDb*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GpuDb))) +#endif //__nvoc_gpu_db_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_GpuDb(GpuDb**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_GpuDb(GpuDb**, Dynamic*, NvU32); +#define __objCreate_GpuDb(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_GpuDb((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros + +// Dispatch functions +NV_STATUS gpudbConstruct_IMPL(struct GpuDb *arg_pGpuDb); + +#define __nvoc_gpudbConstruct(arg_pGpuDb) gpudbConstruct_IMPL(arg_pGpuDb) +void gpudbDestruct_IMPL(struct GpuDb *pGpuDb); + +#define __nvoc_gpudbDestruct(pGpuDb) gpudbDestruct_IMPL(pGpuDb) +#undef PRIVATE_FIELD + + +NV_STATUS gpudbRegisterGpu(const NvU8 *pUuid, const NBADDR *pUpstreamPortPciInfo, NvU64 pciInfo); +NV_STATUS gpudbSetGpuComputePolicyConfig(const NvU8 *uuid, NvU32 policyType, GPU_COMPUTE_POLICY_INFO *policyInfo); +NV_STATUS gpudbGetGpuComputePolicyConfigs(const NvU8 *uuid, GPU_COMPUTE_POLICY_INFO *policyInfo); +NV_STATUS gpudbSetClockPoliciesControl(const NvU8 *uuid, GPU_CLK_PROP_TOP_POLS_CONTROL *pControl); +NV_STATUS gpudbGetClockPoliciesControl(const NvU8 *uuid, GPU_CLK_PROP_TOP_POLS_CONTROL *pControl); +NV_STATUS gpudbSetShutdownState(const NvU8 *pUuid); +NV_STATUS gpudbSetRusdSettings(const NvU8 *uuid, GPU_DB_RUSD_SETTINGS *pRusd); +NV_STATUS gpudbGetRusdSettings(const NvU8 *uuid, GPU_DB_RUSD_SETTINGS *pRusd); +#endif // GPU_DB_H + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_GPU_DB_NVOC_H_ diff --git a/src/nvidia/generated/g_gpu_group_nvoc.c b/src/nvidia/generated/g_gpu_group_nvoc.c new file mode 100644 index 0000000..6f2905c --- /dev/null +++ b/src/nvidia/generated/g_gpu_group_nvoc.c @@ -0,0 +1,198 @@ +#define NVOC_GPU_GROUP_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_group_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xe40531 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUGRP; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +// Forward declarations for OBJGPUGRP +void __nvoc_init__Object(Object*); +void __nvoc_init__OBJGPUGRP(OBJGPUGRP*); +void __nvoc_init_funcTable_OBJGPUGRP(OBJGPUGRP*); +NV_STATUS __nvoc_ctor_OBJGPUGRP(OBJGPUGRP*); +void __nvoc_init_dataField_OBJGPUGRP(OBJGPUGRP*); +void __nvoc_dtor_OBJGPUGRP(OBJGPUGRP*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__OBJGPUGRP; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJGPUGRP; + +// Down-thunk(s) to bridge OBJGPUGRP methods from ancestors (if any) + +// Up-thunk(s) to bridge OBJGPUGRP methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUGRP = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJGPUGRP), + /*classId=*/ classId(OBJGPUGRP), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJGPUGRP", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJGPUGRP, + /*pCastInfo=*/ &__nvoc_castinfo__OBJGPUGRP, + /*pExportInfo=*/ &__nvoc_export_info__OBJGPUGRP +}; + + +// Metadata with per-class RTTI with ancestor(s) +static const struct NVOC_METADATA__OBJGPUGRP __nvoc_metadata__OBJGPUGRP = { + .rtti.pClassDef = &__nvoc_class_def_OBJGPUGRP, // (gpugrp) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJGPUGRP, + .rtti.offset = 0, + .metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super + .metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Object.rtti.offset = NV_OFFSETOF(OBJGPUGRP, __nvoc_base_Object), +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__OBJGPUGRP = { + .numRelatives = 2, + .relatives = { + &__nvoc_metadata__OBJGPUGRP.rtti, // [0]: (gpugrp) this + &__nvoc_metadata__OBJGPUGRP.metadata__Object.rtti, // [1]: (obj) super + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJGPUGRP = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJGPUGRP(OBJGPUGRP *pThis) { + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJGPUGRP(OBJGPUGRP *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJGPUGRP(OBJGPUGRP *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJGPUGRP_fail_Object; + __nvoc_init_dataField_OBJGPUGRP(pThis); + goto __nvoc_ctor_OBJGPUGRP_exit; // Success + +__nvoc_ctor_OBJGPUGRP_fail_Object: +__nvoc_ctor_OBJGPUGRP_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_OBJGPUGRP_1(OBJGPUGRP *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_OBJGPUGRP_1 + + +// Initialize vtable(s): Nothing to do for empty vtables +void __nvoc_init_funcTable_OBJGPUGRP(OBJGPUGRP *pThis) { + __nvoc_init_funcTable_OBJGPUGRP_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__OBJGPUGRP(OBJGPUGRP *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; // (obj) super + pThis->__nvoc_pbase_OBJGPUGRP = pThis; // (gpugrp) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Object(&pThis->__nvoc_base_Object); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__OBJGPUGRP.metadata__Object; // (obj) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__OBJGPUGRP; // (gpugrp) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_OBJGPUGRP(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJGPUGRP(OBJGPUGRP **ppThis, Dynamic *pParent, NvU32 createFlags) +{ + NV_STATUS status; + Object *pParentObj = NULL; + OBJGPUGRP *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(OBJGPUGRP), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(OBJGPUGRP)); + + pThis->__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__OBJGPUGRP(pThis); + status = __nvoc_ctor_OBJGPUGRP(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJGPUGRP_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_OBJGPUGRP_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(OBJGPUGRP)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJGPUGRP(OBJGPUGRP **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJGPUGRP(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_gpu_group_nvoc.h b/src/nvidia/generated/g_gpu_group_nvoc.h new file mode 100644 index 0000000..8b99ec7 --- /dev/null +++ b/src/nvidia/generated/g_gpu_group_nvoc.h @@ -0,0 +1,352 @@ + +#ifndef _G_GPU_GROUP_NVOC_H_ +#define _G_GPU_GROUP_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_gpu_group_nvoc.h" + +#ifndef GPU_GROUP_H +#define GPU_GROUP_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Defines and structures used for GPUGRP Object. * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "nvoc/object.h" +#include "nvlimits.h" + +struct OBJVASPACE; +struct OBJGPU; + +/*! + * @brief Specialization of @ref FOR_EACH_INDEX_IN_MASK for looping + * over each GPU in an instance bitmask and processing the GPU in + * unicast mode. + * + * @note This macro is constructed to handle 'continue' and 'break' + * statements but not 'return.' Do NOT return directly from the loop - + * use status variable and 'break' to safely abort. + * + * @param[in] maskWidth bit-width of the mask (allowed: 8, 16, 32, 64) + * @param[in,out] pGpu Local GPU variable to use. + * @param[in] mask GPU instance bitmask. + */ +#define FOR_EACH_GPU_IN_MASK_UC(maskWidth, pSys, pGpu, mask) \ +{ \ + NvU32 gpuInstance; \ + NvBool bOrigBcState = NV_FALSE; \ + NvBool bEntryBcState = NV_FALSE; \ + OBJGPU *pEntryGpu = pGpu; \ + pGpu = NULL; \ + if (pEntryGpu != NULL) \ + { \ + bEntryBcState = gpumgrGetBcEnabledStatus(pEntryGpu); \ + } \ + FOR_EACH_INDEX_IN_MASK(maskWidth, gpuInstance, mask) \ + { \ + if (NULL != pGpu) /* continue */ \ + { \ + gpumgrSetBcEnabledStatus(pGpu, bOrigBcState); \ + } \ + pGpu = gpumgrGetGpu(gpuInstance); \ + if (pGpu == NULL) \ + { /* We should never hit this assert */ \ + NV_ASSERT(0); /* But it occurs very rarely */ \ + continue; /* It needs to be debugged */ \ + } \ + bOrigBcState = gpumgrGetBcEnabledStatus(pGpu); \ + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); \ + +#define FOR_EACH_GPU_IN_MASK_UC_END \ + } \ + FOR_EACH_INDEX_IN_MASK_END \ + if (NULL != pGpu) /* break */ \ + { \ + gpumgrSetBcEnabledStatus(pGpu, bOrigBcState); \ + pGpu = NULL; \ + } \ + if (pEntryGpu != NULL) \ + { \ + NV_ASSERT(bEntryBcState == gpumgrGetBcEnabledStatus(pEntryGpu)); \ + pGpu = pEntryGpu; \ + } \ +} + +typedef struct _def_vid_link_node +{ + /*! + * GPU instance for this node + */ + NvU32 gpuInstance; + /*! + * DrPort that receives data from Child GPU + */ + NvU32 ParentDrPort; + /*! + * DrPort that sources data to a Parent GPU + */ + NvU32 ChildDrPort; +} SLILINKNODE; + + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_GPU_GROUP_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__OBJGPUGRP; +struct NVOC_METADATA__Object; + + +struct OBJGPUGRP { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__OBJGPUGRP *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Object __nvoc_base_Object; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super + struct OBJGPUGRP *__nvoc_pbase_OBJGPUGRP; // gpugrp + + // Data members + NvU32 gpuMask; + NvU32 gpuSliLinkMask; + NvU32 linkingGpuMask; + NvU32 attachedGpuMaskAtLinking; + SLILINKNODE SliLinkOrder[8]; + NvU32 ConnectionCount; + NvU32 flags; + NvU32 displayFlags; + NvBool bcEnabled; + struct OBJGPU *parentGpu; + struct OBJVASPACE *pGlobalVASpace; +}; + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__OBJGPUGRP { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Object metadata__Object; +}; + +#ifndef __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ +#define __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ +typedef struct OBJGPUGRP OBJGPUGRP; +#endif /* __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPUGRP +#define __nvoc_class_id_OBJGPUGRP 0xe40531 +#endif /* __nvoc_class_id_OBJGPUGRP */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUGRP; + +#define __staticCast_OBJGPUGRP(pThis) \ + ((pThis)->__nvoc_pbase_OBJGPUGRP) + +#ifdef __nvoc_gpu_group_h_disabled +#define __dynamicCast_OBJGPUGRP(pThis) ((OBJGPUGRP*) NULL) +#else //__nvoc_gpu_group_h_disabled +#define __dynamicCast_OBJGPUGRP(pThis) \ + ((OBJGPUGRP*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJGPUGRP))) +#endif //__nvoc_gpu_group_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_OBJGPUGRP(OBJGPUGRP**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJGPUGRP(OBJGPUGRP**, Dynamic*, NvU32); +#define __objCreate_OBJGPUGRP(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJGPUGRP((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros + +// Dispatch functions +NV_STATUS gpugrpCreate_IMPL(struct OBJGPUGRP *pGpuGrp, NvU32 gpuMask); + +#ifdef __nvoc_gpu_group_h_disabled +static inline NV_STATUS gpugrpCreate(struct OBJGPUGRP *pGpuGrp, NvU32 gpuMask) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpCreate(pGpuGrp, gpuMask) gpugrpCreate_IMPL(pGpuGrp, gpuMask) +#endif //__nvoc_gpu_group_h_disabled + +NV_STATUS gpugrpDestroy_IMPL(struct OBJGPUGRP *pGpuGrp); + +#ifdef __nvoc_gpu_group_h_disabled +static inline NV_STATUS gpugrpDestroy(struct OBJGPUGRP *pGpuGrp) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpDestroy(pGpuGrp) gpugrpDestroy_IMPL(pGpuGrp) +#endif //__nvoc_gpu_group_h_disabled + +NvU32 gpugrpGetGpuMask_IMPL(struct OBJGPUGRP *pGpuGrp); + +#ifdef __nvoc_gpu_group_h_disabled +static inline NvU32 gpugrpGetGpuMask(struct OBJGPUGRP *pGpuGrp) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return 0; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpGetGpuMask(pGpuGrp) gpugrpGetGpuMask_IMPL(pGpuGrp) +#endif //__nvoc_gpu_group_h_disabled + +void gpugrpSetGpuMask_IMPL(struct OBJGPUGRP *pGpuGrp, NvU32 gpuMask); + +#ifdef __nvoc_gpu_group_h_disabled +static inline void gpugrpSetGpuMask(struct OBJGPUGRP *pGpuGrp, NvU32 gpuMask) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpSetGpuMask(pGpuGrp, gpuMask) gpugrpSetGpuMask_IMPL(pGpuGrp, gpuMask) +#endif //__nvoc_gpu_group_h_disabled + +NvBool gpugrpGetBcEnabledState_IMPL(struct OBJGPUGRP *pGpuGrp); + +#ifdef __nvoc_gpu_group_h_disabled +static inline NvBool gpugrpGetBcEnabledState(struct OBJGPUGRP *pGpuGrp) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpGetBcEnabledState(pGpuGrp) gpugrpGetBcEnabledState_IMPL(pGpuGrp) +#endif //__nvoc_gpu_group_h_disabled + +void gpugrpSetBcEnabledState_IMPL(struct OBJGPUGRP *pGpuGrp, NvBool bcState); + +#ifdef __nvoc_gpu_group_h_disabled +static inline void gpugrpSetBcEnabledState(struct OBJGPUGRP *pGpuGrp, NvBool bcState) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpSetBcEnabledState(pGpuGrp, bcState) gpugrpSetBcEnabledState_IMPL(pGpuGrp, bcState) +#endif //__nvoc_gpu_group_h_disabled + +void gpugrpSetParentGpu_IMPL(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pParentGpu); + +#ifdef __nvoc_gpu_group_h_disabled +static inline void gpugrpSetParentGpu(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pParentGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpSetParentGpu(pGpuGrp, pParentGpu) gpugrpSetParentGpu_IMPL(pGpuGrp, pParentGpu) +#endif //__nvoc_gpu_group_h_disabled + +struct OBJGPU *gpugrpGetParentGpu_IMPL(struct OBJGPUGRP *pGpuGrp); + +#ifdef __nvoc_gpu_group_h_disabled +static inline struct OBJGPU *gpugrpGetParentGpu(struct OBJGPUGRP *pGpuGrp) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NULL; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpGetParentGpu(pGpuGrp) gpugrpGetParentGpu_IMPL(pGpuGrp) +#endif //__nvoc_gpu_group_h_disabled + +NV_STATUS gpugrpCreateGlobalVASpace_IMPL(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pGpu, NvU32 vaspaceClass, NvU64 vaStart, NvU64 vaEnd, NvU32 vaspaceFlags, struct OBJVASPACE **ppGlobalVAS); + +#ifdef __nvoc_gpu_group_h_disabled +static inline NV_STATUS gpugrpCreateGlobalVASpace(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pGpu, NvU32 vaspaceClass, NvU64 vaStart, NvU64 vaEnd, NvU32 vaspaceFlags, struct OBJVASPACE **ppGlobalVAS) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpCreateGlobalVASpace(pGpuGrp, pGpu, vaspaceClass, vaStart, vaEnd, vaspaceFlags, ppGlobalVAS) gpugrpCreateGlobalVASpace_IMPL(pGpuGrp, pGpu, vaspaceClass, vaStart, vaEnd, vaspaceFlags, ppGlobalVAS) +#endif //__nvoc_gpu_group_h_disabled + +NV_STATUS gpugrpDestroyGlobalVASpace_IMPL(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_group_h_disabled +static inline NV_STATUS gpugrpDestroyGlobalVASpace(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpDestroyGlobalVASpace(pGpuGrp, pGpu) gpugrpDestroyGlobalVASpace_IMPL(pGpuGrp, pGpu) +#endif //__nvoc_gpu_group_h_disabled + +NV_STATUS gpugrpGetGlobalVASpace_IMPL(struct OBJGPUGRP *pGpuGrp, struct OBJVASPACE **ppGlobalVAS); + +#ifdef __nvoc_gpu_group_h_disabled +static inline NV_STATUS gpugrpGetGlobalVASpace(struct OBJGPUGRP *pGpuGrp, struct OBJVASPACE **ppGlobalVAS) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpGetGlobalVASpace(pGpuGrp, ppGlobalVAS) gpugrpGetGlobalVASpace_IMPL(pGpuGrp, ppGlobalVAS) +#endif //__nvoc_gpu_group_h_disabled + +NV_STATUS gpugrpGetGpuFromSubDeviceInstance_IMPL(struct OBJGPUGRP *pGpuGrp, NvU32 subDeviceInst, struct OBJGPU **ppGpu); + +#ifdef __nvoc_gpu_group_h_disabled +static inline NV_STATUS gpugrpGetGpuFromSubDeviceInstance(struct OBJGPUGRP *pGpuGrp, NvU32 subDeviceInst, struct OBJGPU **ppGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpGetGpuFromSubDeviceInstance(pGpuGrp, subDeviceInst, ppGpu) gpugrpGetGpuFromSubDeviceInstance_IMPL(pGpuGrp, subDeviceInst, ppGpu) +#endif //__nvoc_gpu_group_h_disabled + +#undef PRIVATE_FIELD + + +#endif // GPU_GROUP_H + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_GPU_GROUP_NVOC_H_ diff --git a/src/nvidia/generated/g_gpu_halspec_nvoc.c b/src/nvidia/generated/g_gpu_halspec_nvoc.c new file mode 100644 index 0000000..5267cf3 --- /dev/null +++ b/src/nvidia/generated/g_gpu_halspec_nvoc.c @@ -0,0 +1,238 @@ +#define NVOC_GPU_HALSPEC_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_halspec_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x34a6d6 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmHalspecOwner; + +// Forward declarations for RmHalspecOwner +void __nvoc_init__RmHalspecOwner(RmHalspecOwner*, + RM_RUNTIME_VARIANT RmVariantHal_rmVariant, + NvU32 DispIpHal_ipver); +void __nvoc_init_funcTable_RmHalspecOwner(RmHalspecOwner*); +NV_STATUS __nvoc_ctor_RmHalspecOwner(RmHalspecOwner*); +void __nvoc_init_dataField_RmHalspecOwner(RmHalspecOwner*); +void __nvoc_dtor_RmHalspecOwner(RmHalspecOwner*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__RmHalspecOwner; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__RmHalspecOwner; + +// Down-thunk(s) to bridge RmHalspecOwner methods from ancestors (if any) + +// Up-thunk(s) to bridge RmHalspecOwner methods to ancestors (if any) + +// Not instantiable because it's not derived from class "Object" +const struct NVOC_CLASS_DEF __nvoc_class_def_RmHalspecOwner = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RmHalspecOwner), + /*classId=*/ classId(RmHalspecOwner), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RmHalspecOwner", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo__RmHalspecOwner, + /*pExportInfo=*/ &__nvoc_export_info__RmHalspecOwner +}; + + +// Metadata with per-class RTTI +static const struct NVOC_METADATA__RmHalspecOwner __nvoc_metadata__RmHalspecOwner = { + .rtti.pClassDef = &__nvoc_class_def_RmHalspecOwner, // (rmhalspecowner) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RmHalspecOwner, + .rtti.offset = 0, +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__RmHalspecOwner = { + .numRelatives = 1, + .relatives = { + &__nvoc_metadata__RmHalspecOwner.rtti, // [0]: (rmhalspecowner) this + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__RmHalspecOwner = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RmHalspecOwner(RmHalspecOwner *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RmHalspecOwner(RmHalspecOwner *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmHalspecOwner(RmHalspecOwner *pThis) { + NV_STATUS status = NV_OK; + __nvoc_init_dataField_RmHalspecOwner(pThis); + goto __nvoc_ctor_RmHalspecOwner_exit; // Success + +__nvoc_ctor_RmHalspecOwner_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_RmHalspecOwner_1(RmHalspecOwner *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_RmHalspecOwner_1 + + +// Initialize vtable(s): Nothing to do for empty vtables +void __nvoc_init_funcTable_RmHalspecOwner(RmHalspecOwner *pThis) { + __nvoc_init_funcTable_RmHalspecOwner_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__RmHalspecOwner(RmHalspecOwner *pThis, + RM_RUNTIME_VARIANT RmVariantHal_rmVariant, + NvU32 DispIpHal_ipver) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_RmHalspecOwner = pThis; // (rmhalspecowner) this + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__RmHalspecOwner; // (rmhalspecowner) this + + // Initialize halspec data. + __nvoc_init_halspec_RmVariantHal(&pThis->rmVariantHal, RmVariantHal_rmVariant); + __nvoc_init_halspec_DispIpHal(&pThis->dispIpHal, DispIpHal_ipver); + + // Initialize per-object vtables. + __nvoc_init_funcTable_RmHalspecOwner(pThis); +} + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x74bc71 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuHalspecOwner; + +// Forward declarations for GpuHalspecOwner +void __nvoc_init__GpuHalspecOwner(GpuHalspecOwner*, + NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, + TEGRA_CHIP_TYPE TegraChipHal_tegraType); +void __nvoc_init_funcTable_GpuHalspecOwner(GpuHalspecOwner*); +NV_STATUS __nvoc_ctor_GpuHalspecOwner(GpuHalspecOwner*); +void __nvoc_init_dataField_GpuHalspecOwner(GpuHalspecOwner*); +void __nvoc_dtor_GpuHalspecOwner(GpuHalspecOwner*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__GpuHalspecOwner; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__GpuHalspecOwner; + +// Down-thunk(s) to bridge GpuHalspecOwner methods from ancestors (if any) + +// Up-thunk(s) to bridge GpuHalspecOwner methods to ancestors (if any) + +// Not instantiable because it's not derived from class "Object" +const struct NVOC_CLASS_DEF __nvoc_class_def_GpuHalspecOwner = +{ + /*classInfo=*/ { + /*size=*/ sizeof(GpuHalspecOwner), + /*classId=*/ classId(GpuHalspecOwner), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "GpuHalspecOwner", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo__GpuHalspecOwner, + /*pExportInfo=*/ &__nvoc_export_info__GpuHalspecOwner +}; + + +// Metadata with per-class RTTI +static const struct NVOC_METADATA__GpuHalspecOwner __nvoc_metadata__GpuHalspecOwner = { + .rtti.pClassDef = &__nvoc_class_def_GpuHalspecOwner, // (gpuhalspecowner) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GpuHalspecOwner, + .rtti.offset = 0, +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__GpuHalspecOwner = { + .numRelatives = 1, + .relatives = { + &__nvoc_metadata__GpuHalspecOwner.rtti, // [0]: (gpuhalspecowner) this + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__GpuHalspecOwner = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuHalspecOwner(GpuHalspecOwner *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_GpuHalspecOwner(GpuHalspecOwner *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuHalspecOwner(GpuHalspecOwner *pThis) { + NV_STATUS status = NV_OK; + __nvoc_init_dataField_GpuHalspecOwner(pThis); + goto __nvoc_ctor_GpuHalspecOwner_exit; // Success + +__nvoc_ctor_GpuHalspecOwner_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_GpuHalspecOwner_1(GpuHalspecOwner *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_GpuHalspecOwner_1 + + +// Initialize vtable(s): Nothing to do for empty vtables +void __nvoc_init_funcTable_GpuHalspecOwner(GpuHalspecOwner *pThis) { + __nvoc_init_funcTable_GpuHalspecOwner_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__GpuHalspecOwner(GpuHalspecOwner *pThis, + NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, + TEGRA_CHIP_TYPE TegraChipHal_tegraType) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_GpuHalspecOwner = pThis; // (gpuhalspecowner) this + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__GpuHalspecOwner; // (gpuhalspecowner) this + + // Initialize halspec data. + __nvoc_init_halspec_ChipHal(&pThis->chipHal, ChipHal_arch, ChipHal_impl, ChipHal_hidrev); + __nvoc_init_halspec_TegraChipHal(&pThis->tegraChipHal, TegraChipHal_tegraType); + + // Initialize per-object vtables. + __nvoc_init_funcTable_GpuHalspecOwner(pThis); +} + diff --git a/src/nvidia/generated/g_gpu_halspec_nvoc.h b/src/nvidia/generated/g_gpu_halspec_nvoc.h new file mode 100644 index 0000000..992dd2d --- /dev/null +++ b/src/nvidia/generated/g_gpu_halspec_nvoc.h @@ -0,0 +1,195 @@ + +#ifndef _G_GPU_HALSPEC_NVOC_H_ +#define _G_GPU_HALSPEC_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_gpu_halspec_nvoc.h" + +#ifndef GPU_HALSPEC_H +#define GPU_HALSPEC_H + +#include "g_chips2halspec.h" // NVOC halspec, generated by rmconfig.pl + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_GPU_HALSPEC_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI +struct NVOC_METADATA__RmHalspecOwner; + + +struct RmHalspecOwner { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__RmHalspecOwner *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Ancestor object pointers for `staticCast` feature + struct RmHalspecOwner *__nvoc_pbase_RmHalspecOwner; // rmhalspecowner + + // Data members + struct RmVariantHal rmVariantHal; + struct DispIpHal dispIpHal; +}; + + +// Metadata with per-class RTTI +struct NVOC_METADATA__RmHalspecOwner { + const struct NVOC_RTTI rtti; +}; + +#ifndef __NVOC_CLASS_RmHalspecOwner_TYPEDEF__ +#define __NVOC_CLASS_RmHalspecOwner_TYPEDEF__ +typedef struct RmHalspecOwner RmHalspecOwner; +#endif /* __NVOC_CLASS_RmHalspecOwner_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmHalspecOwner +#define __nvoc_class_id_RmHalspecOwner 0x34a6d6 +#endif /* __nvoc_class_id_RmHalspecOwner */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmHalspecOwner; + +#define __staticCast_RmHalspecOwner(pThis) \ + ((pThis)->__nvoc_pbase_RmHalspecOwner) + +#ifdef __nvoc_gpu_halspec_h_disabled +#define __dynamicCast_RmHalspecOwner(pThis) ((RmHalspecOwner*) NULL) +#else //__nvoc_gpu_halspec_h_disabled +#define __dynamicCast_RmHalspecOwner(pThis) \ + ((RmHalspecOwner*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RmHalspecOwner))) +#endif //__nvoc_gpu_halspec_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_RmHalspecOwner(RmHalspecOwner**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RmHalspecOwner(RmHalspecOwner**, Dynamic*, NvU32, + RM_RUNTIME_VARIANT RmVariantHal_rmVariant, + NvU32 DispIpHal_ipver); +#define __objCreate_RmHalspecOwner(ppNewObj, pParent, createFlags, RmVariantHal_rmVariant, DispIpHal_ipver) \ + __nvoc_objCreate_RmHalspecOwner((ppNewObj), staticCast((pParent), Dynamic), (createFlags), RmVariantHal_rmVariant, DispIpHal_ipver) + +#undef PRIVATE_FIELD + + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_GPU_HALSPEC_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI +struct NVOC_METADATA__GpuHalspecOwner; + + +struct GpuHalspecOwner { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__GpuHalspecOwner *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Ancestor object pointers for `staticCast` feature + struct GpuHalspecOwner *__nvoc_pbase_GpuHalspecOwner; // gpuhalspecowner + + // Data members + struct ChipHal chipHal; + struct TegraChipHal tegraChipHal; +}; + + +// Metadata with per-class RTTI +struct NVOC_METADATA__GpuHalspecOwner { + const struct NVOC_RTTI rtti; +}; + +#ifndef __NVOC_CLASS_GpuHalspecOwner_TYPEDEF__ +#define __NVOC_CLASS_GpuHalspecOwner_TYPEDEF__ +typedef struct GpuHalspecOwner GpuHalspecOwner; +#endif /* __NVOC_CLASS_GpuHalspecOwner_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuHalspecOwner +#define __nvoc_class_id_GpuHalspecOwner 0x74bc71 +#endif /* __nvoc_class_id_GpuHalspecOwner */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuHalspecOwner; + +#define __staticCast_GpuHalspecOwner(pThis) \ + ((pThis)->__nvoc_pbase_GpuHalspecOwner) + +#ifdef __nvoc_gpu_halspec_h_disabled +#define __dynamicCast_GpuHalspecOwner(pThis) ((GpuHalspecOwner*) NULL) +#else //__nvoc_gpu_halspec_h_disabled +#define __dynamicCast_GpuHalspecOwner(pThis) \ + ((GpuHalspecOwner*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GpuHalspecOwner))) +#endif //__nvoc_gpu_halspec_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_GpuHalspecOwner(GpuHalspecOwner**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_GpuHalspecOwner(GpuHalspecOwner**, Dynamic*, NvU32, + NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, + TEGRA_CHIP_TYPE TegraChipHal_tegraType); +#define __objCreate_GpuHalspecOwner(ppNewObj, pParent, createFlags, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, TegraChipHal_tegraType) \ + __nvoc_objCreate_GpuHalspecOwner((ppNewObj), staticCast((pParent), Dynamic), (createFlags), ChipHal_arch, ChipHal_impl, ChipHal_hidrev, TegraChipHal_tegraType) + +#undef PRIVATE_FIELD + + +#endif // GPU_HALSPEC_H + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_GPU_HALSPEC_NVOC_H_ diff --git a/src/nvidia/generated/g_gpu_mgmt_api_nvoc.c b/src/nvidia/generated/g_gpu_mgmt_api_nvoc.c new file mode 100644 index 0000000..0e5bfaa --- /dev/null +++ b/src/nvidia/generated/g_gpu_mgmt_api_nvoc.c @@ -0,0 +1,466 @@ +#define NVOC_GPU_MGMT_API_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_mgmt_api_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x376305 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuManagementApi; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +// Forward declarations for GpuManagementApi +void __nvoc_init__RmResource(RmResource*); +void __nvoc_init__GpuManagementApi(GpuManagementApi*); +void __nvoc_init_funcTable_GpuManagementApi(GpuManagementApi*); +NV_STATUS __nvoc_ctor_GpuManagementApi(GpuManagementApi*, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_GpuManagementApi(GpuManagementApi*); +void __nvoc_dtor_GpuManagementApi(GpuManagementApi*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__GpuManagementApi; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__GpuManagementApi; + +// Down-thunk(s) to bridge GpuManagementApi methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super + +// Up-thunk(s) to bridge GpuManagementApi methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super +NvBool __nvoc_up_thunk_RmResource_gpumgmtapiAccessCallback(struct GpuManagementApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NvBool __nvoc_up_thunk_RmResource_gpumgmtapiShareCallback(struct GpuManagementApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_RmResource_gpumgmtapiGetMemInterMapParams(struct GpuManagementApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_gpumgmtapiCheckMemInterUnmap(struct GpuManagementApi *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_gpumgmtapiGetMemoryMappingDescriptor(struct GpuManagementApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_gpumgmtapiControlSerialization_Prologue(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_gpumgmtapiControlSerialization_Epilogue(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_gpumgmtapiControl_Prologue(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_gpumgmtapiControl_Epilogue(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_gpumgmtapiCanCopy(struct GpuManagementApi *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_gpumgmtapiIsDuplicate(struct GpuManagementApi *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_gpumgmtapiPreDestruct(struct GpuManagementApi *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_gpumgmtapiControl(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_gpumgmtapiControlFilter(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_gpumgmtapiMap(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_RsResource_gpumgmtapiUnmap(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_RsResource_gpumgmtapiIsPartialUnmapSupported(struct GpuManagementApi *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_gpumgmtapiMapTo(struct GpuManagementApi *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_gpumgmtapiUnmapFrom(struct GpuManagementApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_gpumgmtapiGetRefCount(struct GpuManagementApi *pResource); // this +void __nvoc_up_thunk_RsResource_gpumgmtapiAddAdditionalDependants(struct RsClient *pClient, struct GpuManagementApi *pResource, RsResourceRef *pReference); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_GpuManagementApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(GpuManagementApi), + /*classId=*/ classId(GpuManagementApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "GpuManagementApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GpuManagementApi, + /*pCastInfo=*/ &__nvoc_castinfo__GpuManagementApi, + /*pExportInfo=*/ &__nvoc_export_info__GpuManagementApi +}; + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_GpuManagementApi[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) gpumgmtapiCtrlCmdSetShutdownState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + /*flags=*/ 0x7u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x200101u, + /*paramSize=*/ sizeof(NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_GpuManagementApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "gpumgmtapiCtrlCmdSetShutdownState" +#endif + }, + +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__GpuManagementApi __nvoc_metadata__GpuManagementApi = { + .rtti.pClassDef = &__nvoc_class_def_GpuManagementApi, // (gpumgmtapi) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GpuManagementApi, + .rtti.offset = 0, + .metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super + .metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.rtti.offset = NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource), + .metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^2 + .metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^3 + .metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^2 + .metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + + .vtable.__gpumgmtapiAccessCallback__ = &__nvoc_up_thunk_RmResource_gpumgmtapiAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__gpumgmtapiShareCallback__ = &__nvoc_up_thunk_RmResource_gpumgmtapiShareCallback, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresShareCallback__ = &rmresShareCallback_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__gpumgmtapiGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_gpumgmtapiGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__gpumgmtapiCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_gpumgmtapiCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__gpumgmtapiGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_gpumgmtapiGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__gpumgmtapiControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_gpumgmtapiControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__gpumgmtapiControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_gpumgmtapiControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__gpumgmtapiControl_Prologue__ = &__nvoc_up_thunk_RmResource_gpumgmtapiControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__gpumgmtapiControl_Epilogue__ = &__nvoc_up_thunk_RmResource_gpumgmtapiControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__gpumgmtapiCanCopy__ = &__nvoc_up_thunk_RsResource_gpumgmtapiCanCopy, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__gpumgmtapiIsDuplicate__ = &__nvoc_up_thunk_RsResource_gpumgmtapiIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__gpumgmtapiPreDestruct__ = &__nvoc_up_thunk_RsResource_gpumgmtapiPreDestruct, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__gpumgmtapiControl__ = &__nvoc_up_thunk_RsResource_gpumgmtapiControl, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &resControl_IMPL, // virtual + .vtable.__gpumgmtapiControlFilter__ = &__nvoc_up_thunk_RsResource_gpumgmtapiControlFilter, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__gpumgmtapiMap__ = &__nvoc_up_thunk_RsResource_gpumgmtapiMap, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &resMap_IMPL, // virtual + .vtable.__gpumgmtapiUnmap__ = &__nvoc_up_thunk_RsResource_gpumgmtapiUnmap, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &resUnmap_IMPL, // virtual + .vtable.__gpumgmtapiIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_gpumgmtapiIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__gpumgmtapiMapTo__ = &__nvoc_up_thunk_RsResource_gpumgmtapiMapTo, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__gpumgmtapiUnmapFrom__ = &__nvoc_up_thunk_RsResource_gpumgmtapiUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__gpumgmtapiGetRefCount__ = &__nvoc_up_thunk_RsResource_gpumgmtapiGetRefCount, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__gpumgmtapiAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_gpumgmtapiAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__GpuManagementApi = { + .numRelatives = 5, + .relatives = { + &__nvoc_metadata__GpuManagementApi.rtti, // [0]: (gpumgmtapi) this + &__nvoc_metadata__GpuManagementApi.metadata__RmResource.rtti, // [1]: (rmres) super + &__nvoc_metadata__GpuManagementApi.metadata__RmResource.metadata__RsResource.rtti, // [2]: (res) super^2 + &__nvoc_metadata__GpuManagementApi.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [3]: (obj) super^3 + &__nvoc_metadata__GpuManagementApi.metadata__RmResource.metadata__RmResourceCommon.rtti, // [4]: (rmrescmn) super^2 + } +}; + +// 21 up-thunk(s) defined to bridge methods in GpuManagementApi to superclasses + +// gpumgmtapiAccessCallback: virtual inherited (rmres) base (rmres) +NvBool __nvoc_up_thunk_RmResource_gpumgmtapiAccessCallback(struct GpuManagementApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// gpumgmtapiShareCallback: virtual inherited (rmres) base (rmres) +NvBool __nvoc_up_thunk_RmResource_gpumgmtapiShareCallback(struct GpuManagementApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// gpumgmtapiGetMemInterMapParams: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_gpumgmtapiGetMemInterMapParams(struct GpuManagementApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource)), pParams); +} + +// gpumgmtapiCheckMemInterUnmap: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_gpumgmtapiCheckMemInterUnmap(struct GpuManagementApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// gpumgmtapiGetMemoryMappingDescriptor: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_gpumgmtapiGetMemoryMappingDescriptor(struct GpuManagementApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource)), ppMemDesc); +} + +// gpumgmtapiControlSerialization_Prologue: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_gpumgmtapiControlSerialization_Prologue(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// gpumgmtapiControlSerialization_Epilogue: virtual inherited (rmres) base (rmres) +void __nvoc_up_thunk_RmResource_gpumgmtapiControlSerialization_Epilogue(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// gpumgmtapiControl_Prologue: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_gpumgmtapiControl_Prologue(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// gpumgmtapiControl_Epilogue: virtual inherited (rmres) base (rmres) +void __nvoc_up_thunk_RmResource_gpumgmtapiControl_Epilogue(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// gpumgmtapiCanCopy: virtual inherited (res) base (rmres) +NvBool __nvoc_up_thunk_RsResource_gpumgmtapiCanCopy(struct GpuManagementApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// gpumgmtapiIsDuplicate: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_gpumgmtapiIsDuplicate(struct GpuManagementApi *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// gpumgmtapiPreDestruct: virtual inherited (res) base (rmres) +void __nvoc_up_thunk_RsResource_gpumgmtapiPreDestruct(struct GpuManagementApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// gpumgmtapiControl: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_gpumgmtapiControl(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// gpumgmtapiControlFilter: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_gpumgmtapiControlFilter(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// gpumgmtapiMap: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_gpumgmtapiMap(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams, pCpuMapping); +} + +// gpumgmtapiUnmap: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_gpumgmtapiUnmap(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pCpuMapping); +} + +// gpumgmtapiIsPartialUnmapSupported: inline virtual inherited (res) base (rmres) body +NvBool __nvoc_up_thunk_RsResource_gpumgmtapiIsPartialUnmapSupported(struct GpuManagementApi *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// gpumgmtapiMapTo: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_gpumgmtapiMapTo(struct GpuManagementApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// gpumgmtapiUnmapFrom: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_gpumgmtapiUnmapFrom(struct GpuManagementApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// gpumgmtapiGetRefCount: virtual inherited (res) base (rmres) +NvU32 __nvoc_up_thunk_RsResource_gpumgmtapiGetRefCount(struct GpuManagementApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// gpumgmtapiAddAdditionalDependants: virtual inherited (res) base (rmres) +void __nvoc_up_thunk_RsResource_gpumgmtapiAddAdditionalDependants(struct RsClient *pClient, struct GpuManagementApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__GpuManagementApi = +{ + /*numEntries=*/ 1, + /*pExportEntries=*/ __nvoc_exported_method_def_GpuManagementApi +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_GpuManagementApi(GpuManagementApi *pThis) { + __nvoc_gpumgmtapiDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_GpuManagementApi(GpuManagementApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_GpuManagementApi(GpuManagementApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_GpuManagementApi_fail_RmResource; + __nvoc_init_dataField_GpuManagementApi(pThis); + + status = __nvoc_gpumgmtapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_GpuManagementApi_fail__init; + goto __nvoc_ctor_GpuManagementApi_exit; // Success + +__nvoc_ctor_GpuManagementApi_fail__init: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_GpuManagementApi_fail_RmResource: +__nvoc_ctor_GpuManagementApi_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_GpuManagementApi_1(GpuManagementApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + // gpumgmtapiCtrlCmdSetShutdownState -- exported (id=0x200101) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + pThis->__gpumgmtapiCtrlCmdSetShutdownState__ = &gpumgmtapiCtrlCmdSetShutdownState_IMPL; +#endif +} // End __nvoc_init_funcTable_GpuManagementApi_1 with approximately 1 basic block(s). + + +// Initialize vtable(s) for 22 virtual method(s). +void __nvoc_init_funcTable_GpuManagementApi(GpuManagementApi *pThis) { + + // Initialize vtable(s) with 1 per-object function pointer(s). + __nvoc_init_funcTable_GpuManagementApi_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__GpuManagementApi(GpuManagementApi *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^3 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^2 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^2 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; // (rmres) super + pThis->__nvoc_pbase_GpuManagementApi = pThis; // (gpumgmtapi) this + + // Recurse to superclass initialization function(s). + __nvoc_init__RmResource(&pThis->__nvoc_base_RmResource); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__GpuManagementApi.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^3 + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__GpuManagementApi.metadata__RmResource.metadata__RsResource; // (res) super^2 + pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__GpuManagementApi.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^2 + pThis->__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__GpuManagementApi.metadata__RmResource; // (rmres) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__GpuManagementApi; // (gpumgmtapi) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_GpuManagementApi(pThis); +} + +NV_STATUS __nvoc_objCreate_GpuManagementApi(GpuManagementApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + GpuManagementApi *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(GpuManagementApi), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(GpuManagementApi)); + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__GpuManagementApi(pThis); + status = __nvoc_ctor_GpuManagementApi(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_GpuManagementApi_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_GpuManagementApi_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(GpuManagementApi)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_GpuManagementApi(GpuManagementApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_GpuManagementApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_gpu_mgmt_api_nvoc.h b/src/nvidia/generated/g_gpu_mgmt_api_nvoc.h new file mode 100644 index 0000000..ab75465 --- /dev/null +++ b/src/nvidia/generated/g_gpu_mgmt_api_nvoc.h @@ -0,0 +1,313 @@ + +#ifndef _G_GPU_MGMT_API_NVOC_H_ +#define _G_GPU_MGMT_API_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_gpu_mgmt_api_nvoc.h" + +#ifndef GPU_MGMT_API_H +#define GPU_MGMT_API_H + +#include "rmapi/resource.h" +#include "ctrl/ctrl0020.h" + +// **************************************************************************** +// Type Definitions +// **************************************************************************** + +// +// GpuManagementApi class information +// +// This is a global GPU class will help us to route IOCTLs to probed +// and persistent GPU state +// + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_GPU_MGMT_API_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__GpuManagementApi; +struct NVOC_METADATA__RmResource; +struct NVOC_VTABLE__GpuManagementApi; + + +struct GpuManagementApi { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__GpuManagementApi *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct RmResource __nvoc_base_RmResource; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^3 + struct RsResource *__nvoc_pbase_RsResource; // res super^2 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^2 + struct RmResource *__nvoc_pbase_RmResource; // rmres super + struct GpuManagementApi *__nvoc_pbase_GpuManagementApi; // gpumgmtapi + + // Vtable with 1 per-object function pointer + NV_STATUS (*__gpumgmtapiCtrlCmdSetShutdownState__)(struct GpuManagementApi * /*this*/, NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS *); // exported (id=0x200101) +}; + + +// Vtable with 21 per-class function pointers +struct NVOC_VTABLE__GpuManagementApi { + NvBool (*__gpumgmtapiAccessCallback__)(struct GpuManagementApi * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (rmres) + NvBool (*__gpumgmtapiShareCallback__)(struct GpuManagementApi * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__gpumgmtapiGetMemInterMapParams__)(struct GpuManagementApi * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__gpumgmtapiCheckMemInterUnmap__)(struct GpuManagementApi * /*this*/, NvBool); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__gpumgmtapiGetMemoryMappingDescriptor__)(struct GpuManagementApi * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__gpumgmtapiControlSerialization_Prologue__)(struct GpuManagementApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + void (*__gpumgmtapiControlSerialization_Epilogue__)(struct GpuManagementApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__gpumgmtapiControl_Prologue__)(struct GpuManagementApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + void (*__gpumgmtapiControl_Epilogue__)(struct GpuManagementApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + NvBool (*__gpumgmtapiCanCopy__)(struct GpuManagementApi * /*this*/); // virtual inherited (res) base (rmres) + NV_STATUS (*__gpumgmtapiIsDuplicate__)(struct GpuManagementApi * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (rmres) + void (*__gpumgmtapiPreDestruct__)(struct GpuManagementApi * /*this*/); // virtual inherited (res) base (rmres) + NV_STATUS (*__gpumgmtapiControl__)(struct GpuManagementApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (rmres) + NV_STATUS (*__gpumgmtapiControlFilter__)(struct GpuManagementApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (rmres) + NV_STATUS (*__gpumgmtapiMap__)(struct GpuManagementApi * /*this*/, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); // virtual inherited (res) base (rmres) + NV_STATUS (*__gpumgmtapiUnmap__)(struct GpuManagementApi * /*this*/, struct CALL_CONTEXT *, RsCpuMapping *); // virtual inherited (res) base (rmres) + NvBool (*__gpumgmtapiIsPartialUnmapSupported__)(struct GpuManagementApi * /*this*/); // inline virtual inherited (res) base (rmres) body + NV_STATUS (*__gpumgmtapiMapTo__)(struct GpuManagementApi * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (rmres) + NV_STATUS (*__gpumgmtapiUnmapFrom__)(struct GpuManagementApi * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (rmres) + NvU32 (*__gpumgmtapiGetRefCount__)(struct GpuManagementApi * /*this*/); // virtual inherited (res) base (rmres) + void (*__gpumgmtapiAddAdditionalDependants__)(struct RsClient *, struct GpuManagementApi * /*this*/, RsResourceRef *); // virtual inherited (res) base (rmres) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__GpuManagementApi { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__RmResource metadata__RmResource; + const struct NVOC_VTABLE__GpuManagementApi vtable; +}; + +#ifndef __NVOC_CLASS_GpuManagementApi_TYPEDEF__ +#define __NVOC_CLASS_GpuManagementApi_TYPEDEF__ +typedef struct GpuManagementApi GpuManagementApi; +#endif /* __NVOC_CLASS_GpuManagementApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuManagementApi +#define __nvoc_class_id_GpuManagementApi 0x376305 +#endif /* __nvoc_class_id_GpuManagementApi */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuManagementApi; + +#define __staticCast_GpuManagementApi(pThis) \ + ((pThis)->__nvoc_pbase_GpuManagementApi) + +#ifdef __nvoc_gpu_mgmt_api_h_disabled +#define __dynamicCast_GpuManagementApi(pThis) ((GpuManagementApi*) NULL) +#else //__nvoc_gpu_mgmt_api_h_disabled +#define __dynamicCast_GpuManagementApi(pThis) \ + ((GpuManagementApi*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GpuManagementApi))) +#endif //__nvoc_gpu_mgmt_api_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_GpuManagementApi(GpuManagementApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_GpuManagementApi(GpuManagementApi**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_GpuManagementApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_GpuManagementApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define gpumgmtapiCtrlCmdSetShutdownState_FNPTR(pGpuMgmt) pGpuMgmt->__gpumgmtapiCtrlCmdSetShutdownState__ +#define gpumgmtapiCtrlCmdSetShutdownState(pGpuMgmt, pParams) gpumgmtapiCtrlCmdSetShutdownState_DISPATCH(pGpuMgmt, pParams) +#define gpumgmtapiAccessCallback_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define gpumgmtapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) gpumgmtapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define gpumgmtapiShareCallback_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresShareCallback__ +#define gpumgmtapiShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) gpumgmtapiShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define gpumgmtapiGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define gpumgmtapiGetMemInterMapParams(pRmResource, pParams) gpumgmtapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define gpumgmtapiCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define gpumgmtapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) gpumgmtapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define gpumgmtapiGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define gpumgmtapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) gpumgmtapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define gpumgmtapiControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define gpumgmtapiControlSerialization_Prologue(pResource, pCallContext, pParams) gpumgmtapiControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define gpumgmtapiControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define gpumgmtapiControlSerialization_Epilogue(pResource, pCallContext, pParams) gpumgmtapiControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define gpumgmtapiControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define gpumgmtapiControl_Prologue(pResource, pCallContext, pParams) gpumgmtapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define gpumgmtapiControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define gpumgmtapiControl_Epilogue(pResource, pCallContext, pParams) gpumgmtapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define gpumgmtapiCanCopy_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define gpumgmtapiCanCopy(pResource) gpumgmtapiCanCopy_DISPATCH(pResource) +#define gpumgmtapiIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define gpumgmtapiIsDuplicate(pResource, hMemory, pDuplicate) gpumgmtapiIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define gpumgmtapiPreDestruct_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define gpumgmtapiPreDestruct(pResource) gpumgmtapiPreDestruct_DISPATCH(pResource) +#define gpumgmtapiControl_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControl__ +#define gpumgmtapiControl(pResource, pCallContext, pParams) gpumgmtapiControl_DISPATCH(pResource, pCallContext, pParams) +#define gpumgmtapiControlFilter_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define gpumgmtapiControlFilter(pResource, pCallContext, pParams) gpumgmtapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define gpumgmtapiMap_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMap__ +#define gpumgmtapiMap(pResource, pCallContext, pParams, pCpuMapping) gpumgmtapiMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define gpumgmtapiUnmap_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmap__ +#define gpumgmtapiUnmap(pResource, pCallContext, pCpuMapping) gpumgmtapiUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define gpumgmtapiIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define gpumgmtapiIsPartialUnmapSupported(pResource) gpumgmtapiIsPartialUnmapSupported_DISPATCH(pResource) +#define gpumgmtapiMapTo_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define gpumgmtapiMapTo(pResource, pParams) gpumgmtapiMapTo_DISPATCH(pResource, pParams) +#define gpumgmtapiUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define gpumgmtapiUnmapFrom(pResource, pParams) gpumgmtapiUnmapFrom_DISPATCH(pResource, pParams) +#define gpumgmtapiGetRefCount_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define gpumgmtapiGetRefCount(pResource) gpumgmtapiGetRefCount_DISPATCH(pResource) +#define gpumgmtapiAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define gpumgmtapiAddAdditionalDependants(pClient, pResource, pReference) gpumgmtapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) + +// Dispatch functions +static inline NV_STATUS gpumgmtapiCtrlCmdSetShutdownState_DISPATCH(struct GpuManagementApi *pGpuMgmt, NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS *pParams) { + return pGpuMgmt->__gpumgmtapiCtrlCmdSetShutdownState__(pGpuMgmt, pParams); +} + +static inline NvBool gpumgmtapiAccessCallback_DISPATCH(struct GpuManagementApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__gpumgmtapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NvBool gpumgmtapiShareCallback_DISPATCH(struct GpuManagementApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__nvoc_metadata_ptr->vtable.__gpumgmtapiShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS gpumgmtapiGetMemInterMapParams_DISPATCH(struct GpuManagementApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__gpumgmtapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS gpumgmtapiCheckMemInterUnmap_DISPATCH(struct GpuManagementApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__gpumgmtapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS gpumgmtapiGetMemoryMappingDescriptor_DISPATCH(struct GpuManagementApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__gpumgmtapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS gpumgmtapiControlSerialization_Prologue_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__gpumgmtapiControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void gpumgmtapiControlSerialization_Epilogue_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__gpumgmtapiControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS gpumgmtapiControl_Prologue_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__gpumgmtapiControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void gpumgmtapiControl_Epilogue_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__gpumgmtapiControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool gpumgmtapiCanCopy_DISPATCH(struct GpuManagementApi *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__gpumgmtapiCanCopy__(pResource); +} + +static inline NV_STATUS gpumgmtapiIsDuplicate_DISPATCH(struct GpuManagementApi *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__gpumgmtapiIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void gpumgmtapiPreDestruct_DISPATCH(struct GpuManagementApi *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__gpumgmtapiPreDestruct__(pResource); +} + +static inline NV_STATUS gpumgmtapiControl_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__gpumgmtapiControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS gpumgmtapiControlFilter_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__gpumgmtapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS gpumgmtapiMap_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__gpumgmtapiMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS gpumgmtapiUnmap_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__gpumgmtapiUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NvBool gpumgmtapiIsPartialUnmapSupported_DISPATCH(struct GpuManagementApi *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__gpumgmtapiIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS gpumgmtapiMapTo_DISPATCH(struct GpuManagementApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__gpumgmtapiMapTo__(pResource, pParams); +} + +static inline NV_STATUS gpumgmtapiUnmapFrom_DISPATCH(struct GpuManagementApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__gpumgmtapiUnmapFrom__(pResource, pParams); +} + +static inline NvU32 gpumgmtapiGetRefCount_DISPATCH(struct GpuManagementApi *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__gpumgmtapiGetRefCount__(pResource); +} + +static inline void gpumgmtapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct GpuManagementApi *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__gpumgmtapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +NV_STATUS gpumgmtapiCtrlCmdSetShutdownState_IMPL(struct GpuManagementApi *pGpuMgmt, NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS *pParams); + +NV_STATUS gpumgmtapiConstruct_IMPL(struct GpuManagementApi *arg_pGpuMgmt, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_gpumgmtapiConstruct(arg_pGpuMgmt, arg_pCallContext, arg_pParams) gpumgmtapiConstruct_IMPL(arg_pGpuMgmt, arg_pCallContext, arg_pParams) +void gpumgmtapiDestruct_IMPL(struct GpuManagementApi *pGpuMgmt); + +#define __nvoc_gpumgmtapiDestruct(pGpuMgmt) gpumgmtapiDestruct_IMPL(pGpuMgmt) +#undef PRIVATE_FIELD + + +#endif // GPU_MGMT_API_H + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_GPU_MGMT_API_NVOC_H_ diff --git a/src/nvidia/generated/g_gpu_mgr_nvoc.c b/src/nvidia/generated/g_gpu_mgr_nvoc.c new file mode 100644 index 0000000..e0c3c73 --- /dev/null +++ b/src/nvidia/generated/g_gpu_mgr_nvoc.c @@ -0,0 +1,204 @@ +#define NVOC_GPU_MGR_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_mgr_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xcf1b25 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUMGR; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +// Forward declarations for OBJGPUMGR +void __nvoc_init__Object(Object*); +void __nvoc_init__OBJGPUMGR(OBJGPUMGR*); +void __nvoc_init_funcTable_OBJGPUMGR(OBJGPUMGR*); +NV_STATUS __nvoc_ctor_OBJGPUMGR(OBJGPUMGR*); +void __nvoc_init_dataField_OBJGPUMGR(OBJGPUMGR*); +void __nvoc_dtor_OBJGPUMGR(OBJGPUMGR*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__OBJGPUMGR; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJGPUMGR; + +// Down-thunk(s) to bridge OBJGPUMGR methods from ancestors (if any) + +// Up-thunk(s) to bridge OBJGPUMGR methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUMGR = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJGPUMGR), + /*classId=*/ classId(OBJGPUMGR), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJGPUMGR", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJGPUMGR, + /*pCastInfo=*/ &__nvoc_castinfo__OBJGPUMGR, + /*pExportInfo=*/ &__nvoc_export_info__OBJGPUMGR +}; + + +// Metadata with per-class RTTI with ancestor(s) +static const struct NVOC_METADATA__OBJGPUMGR __nvoc_metadata__OBJGPUMGR = { + .rtti.pClassDef = &__nvoc_class_def_OBJGPUMGR, // (gpumgr) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJGPUMGR, + .rtti.offset = 0, + .metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super + .metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Object.rtti.offset = NV_OFFSETOF(OBJGPUMGR, __nvoc_base_Object), +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__OBJGPUMGR = { + .numRelatives = 2, + .relatives = { + &__nvoc_metadata__OBJGPUMGR.rtti, // [0]: (gpumgr) this + &__nvoc_metadata__OBJGPUMGR.metadata__Object.rtti, // [1]: (obj) super + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJGPUMGR = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJGPUMGR(OBJGPUMGR *pThis) { + __nvoc_gpumgrDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJGPUMGR(OBJGPUMGR *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJGPUMGR(OBJGPUMGR *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJGPUMGR_fail_Object; + __nvoc_init_dataField_OBJGPUMGR(pThis); + + status = __nvoc_gpumgrConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_OBJGPUMGR_fail__init; + goto __nvoc_ctor_OBJGPUMGR_exit; // Success + +__nvoc_ctor_OBJGPUMGR_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_OBJGPUMGR_fail_Object: +__nvoc_ctor_OBJGPUMGR_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_OBJGPUMGR_1(OBJGPUMGR *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_OBJGPUMGR_1 + + +// Initialize vtable(s): Nothing to do for empty vtables +void __nvoc_init_funcTable_OBJGPUMGR(OBJGPUMGR *pThis) { + __nvoc_init_funcTable_OBJGPUMGR_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__OBJGPUMGR(OBJGPUMGR *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; // (obj) super + pThis->__nvoc_pbase_OBJGPUMGR = pThis; // (gpumgr) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Object(&pThis->__nvoc_base_Object); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__OBJGPUMGR.metadata__Object; // (obj) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__OBJGPUMGR; // (gpumgr) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_OBJGPUMGR(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJGPUMGR(OBJGPUMGR **ppThis, Dynamic *pParent, NvU32 createFlags) +{ + NV_STATUS status; + Object *pParentObj = NULL; + OBJGPUMGR *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(OBJGPUMGR), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(OBJGPUMGR)); + + pThis->__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__OBJGPUMGR(pThis); + status = __nvoc_ctor_OBJGPUMGR(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJGPUMGR_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_OBJGPUMGR_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(OBJGPUMGR)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJGPUMGR(OBJGPUMGR **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJGPUMGR(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_gpu_mgr_nvoc.h b/src/nvidia/generated/g_gpu_mgr_nvoc.h new file mode 100644 index 0000000..a4da607 --- /dev/null +++ b/src/nvidia/generated/g_gpu_mgr_nvoc.h @@ -0,0 +1,623 @@ + +#ifndef _G_GPU_MGR_NVOC_H_ +#define _G_GPU_MGR_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_gpu_mgr_nvoc.h" + + +#ifndef _GPUMGR_H_ +#define _GPUMGR_H_ + +// +// GPU Manager Defines and Structures +// + +struct OBJGPU; +#include "core/core.h" +#include "core/system.h" +#include "nvlimits.h" +#include "gpu_mgr/gpu_group.h" +#include "gpu/gpu_uuid.h" +#include "gpu/gpu_device_mapping.h" +#include "gpu/gpu_access.h" +#include "gpu/gpu_arch.h" +#include "ctrl/ctrl0000/ctrl0000gpu.h" +#include "ctrl/ctrl2080/ctrl2080ce.h" +#include "ctrl/ctrl2080/ctrl2080internal.h" +#include "nvoc/utility.h" + +#include "utils/nvbitvector.h" +TYPEDEF_BITVECTOR(MC_ENGINE_BITVECTOR); + +#define GPUMGR_MAX_GPU_INSTANCES 8 +#define GPUMGR_MAX_COMPUTE_INSTANCES 8 + +// +// Terminology: +// GPU -> entity sitting on the bus +// Device -> broadcast semantics; maps to one or more GPUs +// Subdevice -> unicast semantics; maps to a single GPU +// + + +//////////////////////////////////////////////////////////////////////////////// +// DO NOT ADD NEW STUBS HERE // +//////////////////////////////////////////////////////////////////////////////// +#define gpumgrGetGpuLinkCount(deviceInstance) ((NvU32) 0) +#define gpumgrGetSliLinkOutputMaskFromGpu(pGpu) ((NvU32) 0) +#define gpumgrGetVidLinkOutputMaskFromGpu(pGpu) ((NvU32) 0) +#define gpumgrGetSliLinkOrderCount(pGpu) ((NvU32) 0) +#define gpumgrGetSliLinkConnectionCount(pGpu) ((NvU32) 0) +#define gpumgrGetSLIConfig(gpuInstance, onlyWithSliLink) ((NvU32) 0) +#define gpumgrDisableVidLink(pGpu, head, max_dr_port) +#define gpumgrGetBcEnabledStatus(g) (NV_FALSE) +#define gpumgrGetBcEnabledStatusEx(g, t) (NV_FALSE) +#define gpumgrSetBcEnabledStatus(g, b) do { NvBool b2 = b; (void)b2; } while (0) +#define gpumgrSLILoopReentrancy(pGpu, l, r, i, pFuncStr) +#define gpumgrSLILoopReentrancyPop(pGpu) ((NvU32)0) +#define gpumgrSLILoopReentrancyPush(pGpu, sliLoopReentrancy) do { NvU32 x = sliLoopReentrancy; (void)x; } while(0) + +#define gpumgrPinsetToPinsetTableIndex(pinset, pPinsetIndex) (NV_ERR_NOT_SUPPORTED) + +typedef struct +{ + NvU32 gpuId; + NvU32 flags; + NvU64 gpuDomainBusDevice; + NvBool bInitAttempted; + NvBool bDrainState; // no new client connections to this GPU + NvBool bRemoveIdle; // remove this GPU once it's idle (detached) + NvBool bExcluded; // this gpu is marked as excluded; do not use + NvBool bUuidValid; // cached uuid is valid + NvBool bSkipHwNvlinkDisable; //skip HW registers configuration for disabled links + NV2080_CTRL_NVLINK_LINK_MASK initDisabledNvlinks; + NV_STATUS initStatus; + NvU8 uuid[RM_SHA1_GID_SIZE]; + OS_RM_CAPS *pOsRmCaps; // "Opaque" pointer to os-specific capabilities +} PROBEDGPU; + +#define NV_DEVICE_DISPLAY_FLAGS_AFR_FRAME_FLIPS 11:4 +#define NV_DEVICE_DISPLAY_FLAGS_AFR_FRAME_TIME 12:12 +#define NV_DEVICE_DISPLAY_FLAGS_AFR_FRAME_TIME_INVALID 0x0000000 +#define NV_DEVICE_DISPLAY_FLAGS_AFR_FRAME_TIME_VALID 0x0000001 + +/*! + * Structure for tracking resources allocated for saving primary GPU's VBIOS + * state. This is used for TDR/fullchip reset recovery. The GPU object gets + * destroyed, so the data belongs here. + */ +typedef struct _def_gpumgr_save_vbios_state +{ + RmPhysAddr vgaWorkspaceVidMemBase; //__nvoc_pbase_OBJGPUMGR) + +#ifdef __nvoc_gpu_mgr_h_disabled +#define __dynamicCast_OBJGPUMGR(pThis) ((OBJGPUMGR*) NULL) +#else //__nvoc_gpu_mgr_h_disabled +#define __dynamicCast_OBJGPUMGR(pThis) \ + ((OBJGPUMGR*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJGPUMGR))) +#endif //__nvoc_gpu_mgr_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_OBJGPUMGR(OBJGPUMGR**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJGPUMGR(OBJGPUMGR**, Dynamic*, NvU32); +#define __objCreate_OBJGPUMGR(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJGPUMGR((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros + +// Dispatch functions +NV_STATUS gpumgrInitPcieP2PCapsCache_IMPL(struct OBJGPUMGR *pGpuMgr); + + +#define gpumgrInitPcieP2PCapsCache(pGpuMgr) gpumgrInitPcieP2PCapsCache_IMPL(pGpuMgr) +#define gpumgrInitPcieP2PCapsCache_HAL(pGpuMgr) gpumgrInitPcieP2PCapsCache(pGpuMgr) + +void gpumgrDestroyPcieP2PCapsCache_IMPL(struct OBJGPUMGR *pGpuMgr); + + +#define gpumgrDestroyPcieP2PCapsCache(pGpuMgr) gpumgrDestroyPcieP2PCapsCache_IMPL(pGpuMgr) +#define gpumgrDestroyPcieP2PCapsCache_HAL(pGpuMgr) gpumgrDestroyPcieP2PCapsCache(pGpuMgr) + +NV_STATUS gpumgrStorePcieP2PCapsCache_IMPL(NvU32 gpuMask, NvU8 p2pWriteCapStatus, NvU8 p2pReadCapStatus); + + +#define gpumgrStorePcieP2PCapsCache(gpuMask, p2pWriteCapStatus, p2pReadCapStatus) gpumgrStorePcieP2PCapsCache_IMPL(gpuMask, p2pWriteCapStatus, p2pReadCapStatus) +#define gpumgrStorePcieP2PCapsCache_HAL(gpuMask, p2pWriteCapStatus, p2pReadCapStatus) gpumgrStorePcieP2PCapsCache(gpuMask, p2pWriteCapStatus, p2pReadCapStatus) + +void gpumgrRemovePcieP2PCapsFromCache_IMPL(NvU32 gpuId); + + +#define gpumgrRemovePcieP2PCapsFromCache(gpuId) gpumgrRemovePcieP2PCapsFromCache_IMPL(gpuId) +#define gpumgrRemovePcieP2PCapsFromCache_HAL(gpuId) gpumgrRemovePcieP2PCapsFromCache(gpuId) + +NvBool gpumgrGetPcieP2PCapsFromCache_IMPL(NvU32 gpuMask, NvU8 *pP2PWriteCapStatus, NvU8 *pP2PReadCapStatus); + + +#define gpumgrGetPcieP2PCapsFromCache(gpuMask, pP2PWriteCapStatus, pP2PReadCapStatus) gpumgrGetPcieP2PCapsFromCache_IMPL(gpuMask, pP2PWriteCapStatus, pP2PReadCapStatus) +#define gpumgrGetPcieP2PCapsFromCache_HAL(gpuMask, pP2PWriteCapStatus, pP2PReadCapStatus) gpumgrGetPcieP2PCapsFromCache(gpuMask, pP2PWriteCapStatus, pP2PReadCapStatus) + +static inline void gpumgrAddSystemNvlinkTopo(NvU64 DomainBusDevice) { + return; +} + +static inline NvBool gpumgrGetSystemNvlinkTopo(NvU64 DomainBusDevice, struct NVLINK_TOPOLOGY_PARAMS *pTopoParams) { + return NV_FALSE; +} + +static inline void gpumgrUpdateSystemNvlinkTopo(NvU64 DomainBusDevice, struct NVLINK_TOPOLOGY_PARAMS *pTopoParams) { + return; +} + +static inline NVLINK_UNCONTAINED_ERROR_RECOVERY_INFO *gpumgrGetNvlinkRecoveryInfo(NvU64 DomainBusDevice) { + return ((void *)0); +} + +static inline NV_STATUS gpumgrSetGpuInitDisabledNvlinks(NvU32 gpuId, NvU32 mask, NV2080_CTRL_NVLINK_LINK_MASK *pLinks, NvBool bSkipHwNvlinkDisable) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS gpumgrGetGpuInitDisabledNvlinks(NvU32 gpuId, NV2080_CTRL_NVLINK_LINK_MASK *pLinks, NvBool *pbSkipHwNvlinkDisable) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NvU8 gpumgrGetGpuNvlinkBwMode(void) { + return (1); +} + +static inline NvU8 gpumgrGetGpuNvlinkBwModeScope(void) { + return (0); +} + +static inline void gpumgrSetGpuNvlinkBwModeFromRegistry(struct OBJGPU *pGpu) { + return; +} + +static inline NV_STATUS gpumgrSetGpuNvlinkBwMode(NvU8 mode) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS gpumgrSetGpuNvlinkBwModePerGpu(struct OBJGPU *pGpu, NvU8 mode) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NvBool gpumgrCheckIndirectPeer(struct OBJGPU *pGpu, struct OBJGPU *pRemoteGpu) { + return NV_FALSE; +} + +static inline void gpumgrAddSystemMIGInstanceTopo(NvU64 domainBusDevice) { + return; +} + +static inline NvBool gpumgrGetSystemMIGInstanceTopo(NvU64 domainBusDevice, struct GPUMGR_SAVE_MIG_INSTANCE_TOPOLOGY **ppTopoParams) { + return NV_FALSE; +} + +static inline NvBool gpumgrIsSystemMIGEnabled(NvU64 domainBusDevice) { + return NV_FALSE; +} + +static inline void gpumgrSetSystemMIGEnabled(NvU64 domainBusDevice, NvBool bMIGEnabled) { + return; +} + +static inline void gpumgrUnregisterRmCapsForMIGGI(NvU64 gpuDomainBusDevice) { + return; +} + +static inline void gpumgrCacheCreateGpuInstance(struct OBJGPU *pGpu, NvU32 swizzId) { + return; +} + +static inline void gpumgrCacheDestroyGpuInstance(struct OBJGPU *pGpu, NvU32 swizzId) { + return; +} + +static inline void gpumgrCacheCreateComputeInstance(struct OBJGPU *pGpu, NvU32 swizzId, NvU32 ciId) { + return; +} + +static inline void gpumgrCacheDestroyComputeInstance(struct OBJGPU *pGpu, NvU32 swizzId, NvU32 ciId) { + return; +} + +static inline void gpumgrCacheSetMIGEnabled(struct OBJGPU *pGpu, NvBool bMIGEnabled) { + return; +} + +static inline void gpumgrUpdateBoardId(struct OBJGPU *arg1) { + return; +} + +static inline void gpumgrServiceInterrupts(NvU32 arg1, MC_ENGINE_BITVECTOR *arg2, NvBool arg3) { + return; +} + +NV_STATUS gpumgrConstruct_IMPL(struct OBJGPUMGR *arg_); + +#define __nvoc_gpumgrConstruct(arg_) gpumgrConstruct_IMPL(arg_) +void gpumgrDestruct_IMPL(struct OBJGPUMGR *arg1); + +#define __nvoc_gpumgrDestruct(arg1) gpumgrDestruct_IMPL(arg1) +NV_STATUS gpumgrCacheGetActiveDeviceIds_IMPL(NV0000_CTRL_GPU_GET_ACTIVE_DEVICE_IDS_PARAMS *pActiveDeviceIdsParams); + +#define gpumgrCacheGetActiveDeviceIds(pActiveDeviceIdsParams) gpumgrCacheGetActiveDeviceIds_IMPL(pActiveDeviceIdsParams) +struct GpuArch *gpumgrGetGpuArch_IMPL(NvU32 pmcBoot42, NvU32 socChipId0, TEGRA_CHIP_TYPE tegraType); + +#define gpumgrGetGpuArch(pmcBoot42, socChipId0, tegraType) gpumgrGetGpuArch_IMPL(pmcBoot42, socChipId0, tegraType) +#undef PRIVATE_FIELD + + +typedef struct { + NvBool specified; // Set this flag when using this struct + NvBool bIsIGPU; // Set this flag for iGPU + + DEVICE_MAPPING deviceMapping[DEVICE_INDEX_MAX]; // Register Aperture mapping + NvU32 socChipId0; // Chip ID used for HAL binding + NvU32 iovaspaceId; // SMMU client ID +} SOCGPUATTACHARG; + +// +// Packages up system/bus state for attach process. +// +typedef struct GPUATTACHARG +{ + GPUHWREG *regBaseAddr; + GPUHWREG *fbBaseAddr; + GPUHWREG *instBaseAddr; + RmPhysAddr devPhysAddr; + RmPhysAddr fbPhysAddr; + RmPhysAddr instPhysAddr; + RmPhysAddr ioPhysAddr; + NvU64 nvDomainBusDeviceFunc; + NvU32 regLength; + NvU64 fbLength; + NvU32 instLength; + NvU32 intLine; + void *pOsAttachArg; + NvBool bIsSOC; + NvU32 socDeviceCount; + DEVICE_MAPPING socDeviceMappings[GPU_MAX_DEVICE_MAPPINGS]; + NvU32 socId; + NvU32 socSubId; + NvU32 socChipId0; + NvU32 iovaspaceId; + NvBool bRequestFwClientRm; + NvS32 cpuNumaNodeId; + + // + // The SOC-specific fields above are legacy fields that were added for + // ARCH MODS iGPU verification. There is a plan to deprecate these fields as + // part of an effort to clean up the existing iGPU code in RM. + // + // Starting with T234D+, the SOCGPUATTACHARG field below will be used to + // pass the required attach info for a single SOC device from the RM OS + // layer to core RM. + // + SOCGPUATTACHARG socDeviceArgs; +} GPUATTACHARG; + +typedef struct WindowsFirmwarePolicyArg +{ + NvU32 devId; + NvU32 ssId; + NvU32 fuseIsQuadro; + NvBool bIsTccOrMcdm; +} WindowsFirmwarePolicyArg; + +NV_STATUS gpumgrThreadEnableExpandedGpuVisibility(void); +void gpumgrThreadDisableExpandedGpuVisibility(void); +NvBool gpumgrThreadHasExpandedGpuVisibility(void); + +NV_STATUS gpumgrGetGpuAttachInfo(NvU32 *pGpuCnt, NvU32 *pGpuMask); +NV_STATUS gpumgrGetProbedGpuIds(NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *); +NV_STATUS gpumgrGetProbedGpuDomainBusDevice(NvU32 gpuId, NvU64 *gpuDomainBusDevice); +void gpumgrSetProbedFlags(NvU32 gpuId, NvU32 flags); +NV_STATUS gpumgrGetAttachedGpuIds(NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *); +NV_STATUS gpumgrGetGpuIdInfo(NV0000_CTRL_GPU_GET_ID_INFO_PARAMS *); +NV_STATUS gpumgrGetGpuIdInfoV2(NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS *); +void gpumgrSetGpuId(OBJGPU*, NvU32 gpuId); +NV_STATUS gpumgrGetGpuInitStatus(NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS *); +void gpumgrSetGpuInitStatus(NvU32 gpuId, NV_STATUS status); +OBJGPU* gpumgrGetGpuFromId(NvU32 gpuId); +OBJGPU* gpumgrGetGpuFromUuid(const NvU8 *pGpuUuid, NvU32 flags); +OBJGPU* gpumgrGetGpuFromBusInfo(NvU32 domain, NvU8 bus, NvU8 device); +NvU32 gpumgrGetDefaultPrimaryGpu(NvU32 gpuMask); +NV_STATUS gpumgrAllocGpuInstance(NvU32 *pDeviceInstance); +NV_STATUS gpumgrRegisterGpuId(NvU32 gpuId, NvU64 gpuDomainBusDevice); +NV_STATUS gpumgrUnregisterGpuId(NvU32 gpuId); +NV_STATUS gpumgrExcludeGpuId(NvU32 gpuId); +NV_STATUS gpumgrSetUuid(NvU32 gpuId, NvU8 *uuid); +NV_STATUS gpumgrGetGpuUuidInfo(NvU32 gpuId, NvU8 **ppUuidStr, NvU32 *pUuidStrLen, NvU32 uuidFlags); +// gpumgrGetRmFirmwarePolicy() and gpumgrGetRmFirmwareLogsEnabled() contain +// all logic for deciding the policies for loading firmwares, and so need to be +// compiled for all platforms besides those actually running the firmwares +void gpumgrGetRmFirmwarePolicy(NvU32 pmcBoot42, NvBool bIsVirtualWithSriov, NvBool bIsSoc, + NvU32 enableFirmwareRegVal, NvBool *pbRequestFirmware, + NvBool *pbAllowFallbackToMonolithicRm, + WindowsFirmwarePolicyArg *pWinRmFwPolicyArg); +NvBool gpumgrGetRmFirmwareLogsEnabled(NvU32 enableFirmwareLogsRegVal); +NvBool gpumgrIsDeviceRmFirmwareCapable(NvU32 pmcBoot42, NvBool bIsVirtualWithSriov, + NvBool bIsSoc, NvBool *pbEnableByDefault, + WindowsFirmwarePolicyArg *pWinRmFwPolicyArg); +NvBool gpumgrIsVgxRmFirmwareCapableChip(NvU32 pmcBoot42); +NV_STATUS gpumgrAttachGpu(NvU32 deviceInstance, GPUATTACHARG *); +NV_STATUS gpumgrDetachGpu(NvU32 deviceInstance); +OBJGPU* gpumgrGetNextGpu(NvU32 gpuMask, NvU32 *pStartIndex); +NV_STATUS gpumgrStatePreInitGpu(OBJGPU*); +NV_STATUS gpumgrStateInitGpu(OBJGPU*); +NV_STATUS gpumgrStateLoadGpu(OBJGPU*, NvU32); +NV_STATUS gpumgrAllocDeviceInstance(NvU32 *pDeviceInstance); +NV_STATUS gpumgrCreateDevice(NvU32 *pDeviceInstance, NvU32 gpuMask, NvU32 *pGpuIdsOrdinal); +NV_STATUS gpumgrDestroyDevice(NvU32 deviceInstance); +NvU32 gpumgrGetDeviceInstanceMask(void); +NvU32 gpumgrGetDeviceGpuMask(NvU32 deviceInstance); +NV_STATUS gpumgrIsDeviceInstanceValid(NvU32 deviceInstance); +NvU32 gpumgrGetPrimaryForDevice(NvU32 deviceInstance); +NvBool gpumgrIsSubDeviceInstanceValid(NvU32 subDeviceInstance); +NvBool gpumgrIsDeviceEnabled(NvU32 deviceInstance); +NvU32 gpumgrGetGpuMask(OBJGPU *pGpu); +OBJGPU* gpumgrGetGpu(NvU32 deviceInstance); +OBJGPU* gpumgrGetSomeGpu(void); +NvU32 gpumgrGetSubDeviceCount(NvU32 gpuMask); +NvU32 gpumgrGetSubDeviceCountFromGpu(OBJGPU *pGpu); +NvU32 gpumgrGetSubDeviceMaxValuePlus1(OBJGPU *pGpu); +NvU32 gpumgrGetSubDeviceInstanceFromGpu(OBJGPU *pGpu); +OBJGPU* gpumgrGetParentGPU(OBJGPU *pGpu); +void gpumgrSetParentGPU(OBJGPU *pGpu, OBJGPU *pParentGpu); +NvBool gpumgrIsGpuDisplayParent(OBJGPU*); +OBJGPU* gpumgrGetDisplayParent(OBJGPU*); +NV_STATUS gpumgrGetGpuLockAndDrPorts(OBJGPU*, OBJGPU*, NvU32 *, NvU32 *); +NV_STATUS gpumgrGetBootPrimary(OBJGPU **ppGpu); +OBJGPU* gpumgrGetMGpu(void); +RmPhysAddr gpumgrGetGpuPhysFbAddr(OBJGPU*); +OBJGPU* gpumgrGetGpuFromSubDeviceInst(NvU32, NvU32); +NV_STATUS gpumgrAddDeviceInstanceToGpus(NvU32 gpuMask); +NV_STATUS gpumgrRemoveDeviceInstanceFromGpus(NvU32 gpuMask); +NV_STATUS gpumgrConstructGpuGrpObject(struct OBJGPUMGR *pGpuMgr, NvU32 gpuMask, struct OBJGPUGRP **ppGpuGrp); +struct OBJGPUGRP* gpumgrGetGpuGrpFromGpu(OBJGPU *pGpu); +struct OBJGPUGRP* gpumgrGetGpuGrpFromInstance(NvU32 gpugrpInstance); +NV_STATUS gpumgrModifyGpuDrainState(NvU32 gpuId, NvBool bEnable, NvBool bRemove, NvBool bLinkDisable); +NV_STATUS gpumgrQueryGpuDrainState(NvU32 gpuId, NvBool *pBEnable, NvBool *pBRemove); +NvBool gpumgrIsGpuPointerValid(OBJGPU *pGpu); +NvBool gpumgrIsGpuPointerAttached(OBJGPU *pGpu); +NvU32 gpumgrGetGrpMaskFromGpuInst(NvU32 gpuInst); +void gpumgrAddDeviceMaskToGpuInstTable(NvU32 gpuMask); +void gpumgrClearDeviceMaskFromGpuInstTable(NvU32 gpuMask); +NvBool gpumgrSetGpuAcquire(OBJGPU *pGpu); +void gpumgrSetGpuRelease(void); +NvU8 gpumgrGetGpuBridgeType(void); +NvBool gpumgrAreAllGpusInOffloadMode(void); +NvBool gpumgrIsSafeToReadGpuInfo(void); +NvBool gpumgrIsDeviceMsixAllowed(RmPhysAddr bar0BaseAddr, NvU32 pmcBoot1, NvU32 pmcBoot42); +NvBool gpumgrWaitForBarFirewall(NvU32 domain, NvU8 bus, NvU8 device, NvU8 function, NvU16 devId, NvU16 subsystemId); + +// +// gpumgrIsSubDeviceCountOne +// +static NV_INLINE NvBool +gpumgrIsSubDeviceCountOne(NvU32 gpuMask) +{ + // + // A fast version of gpumgrGetSubDeviceCount(gpumask) == 1. + // Make sure it returns 0 for gpuMask==0, just like gpumgrGetSubDeviceCount(0)!!! + // + return gpuMask != 0 && (gpuMask&(gpuMask-1)) == 0; +} + +// +// gpumgrIsParentGPU +// +static NV_INLINE NvBool +gpumgrIsParentGPU(OBJGPU *pGpu) +{ + return gpumgrGetParentGPU(pGpu) == pGpu; +} + +#endif // _GPUMGR_H_ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_GPU_MGR_NVOC_H_ diff --git a/src/nvidia/generated/g_gpu_nvoc.c b/src/nvidia/generated/g_gpu_nvoc.c new file mode 100644 index 0000000..38c6a35 --- /dev/null +++ b/src/nvidia/generated/g_gpu_nvoc.c @@ -0,0 +1,698 @@ +#define NVOC_GPU_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x7ef3cb = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPU; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuHalspecOwner; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmHalspecOwner; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTRACEABLE; + +// Forward declarations for OBJGPU +void __nvoc_init__Object(Object*); +void __nvoc_init__GpuHalspecOwner(GpuHalspecOwner*, + NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, + TEGRA_CHIP_TYPE TegraChipHal_tegraType); +void __nvoc_init__RmHalspecOwner(RmHalspecOwner*, + RM_RUNTIME_VARIANT RmVariantHal_rmVariant, + NvU32 DispIpHal_ipver); +void __nvoc_init__OBJTRACEABLE(OBJTRACEABLE*); +void __nvoc_init__OBJGPU(OBJGPU*, + NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, + TEGRA_CHIP_TYPE TegraChipHal_tegraType, + RM_RUNTIME_VARIANT RmVariantHal_rmVariant, + NvU32 DispIpHal_ipver); +void __nvoc_init_funcTable_OBJGPU(OBJGPU*); +NV_STATUS __nvoc_ctor_OBJGPU(OBJGPU*, NvU32 arg_gpuInstance, NvU32 arg_gpuId, NvUuid *arg_pUuid, struct GpuArch *arg_pGpuArch); +void __nvoc_init_dataField_OBJGPU(OBJGPU*); +void __nvoc_dtor_OBJGPU(OBJGPU*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__OBJGPU; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJGPU; + +// Down-thunk(s) to bridge OBJGPU methods from ancestors (if any) + +// Up-thunk(s) to bridge OBJGPU methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPU = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJGPU), + /*classId=*/ classId(OBJGPU), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJGPU", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJGPU, + /*pCastInfo=*/ &__nvoc_castinfo__OBJGPU, + /*pExportInfo=*/ &__nvoc_export_info__OBJGPU +}; + + +// Metadata with per-class RTTI with ancestor(s) +static const struct NVOC_METADATA__OBJGPU __nvoc_metadata__OBJGPU = { + .rtti.pClassDef = &__nvoc_class_def_OBJGPU, // (gpu) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJGPU, + .rtti.offset = 0, + .metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super + .metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Object.rtti.offset = NV_OFFSETOF(OBJGPU, __nvoc_base_Object), + .metadata__GpuHalspecOwner.rtti.pClassDef = &__nvoc_class_def_GpuHalspecOwner, // (gpuhalspecowner) super + .metadata__GpuHalspecOwner.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuHalspecOwner.rtti.offset = NV_OFFSETOF(OBJGPU, __nvoc_base_GpuHalspecOwner), + .metadata__RmHalspecOwner.rtti.pClassDef = &__nvoc_class_def_RmHalspecOwner, // (rmhalspecowner) super + .metadata__RmHalspecOwner.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmHalspecOwner.rtti.offset = NV_OFFSETOF(OBJGPU, __nvoc_base_RmHalspecOwner), + .metadata__OBJTRACEABLE.rtti.pClassDef = &__nvoc_class_def_OBJTRACEABLE, // (traceable) super + .metadata__OBJTRACEABLE.rtti.dtor = &__nvoc_destructFromBase, + .metadata__OBJTRACEABLE.rtti.offset = NV_OFFSETOF(OBJGPU, __nvoc_base_OBJTRACEABLE), +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__OBJGPU = { + .numRelatives = 5, + .relatives = { + &__nvoc_metadata__OBJGPU.rtti, // [0]: (gpu) this + &__nvoc_metadata__OBJGPU.metadata__Object.rtti, // [1]: (obj) super + &__nvoc_metadata__OBJGPU.metadata__GpuHalspecOwner.rtti, // [2]: (gpuhalspecowner) super + &__nvoc_metadata__OBJGPU.metadata__RmHalspecOwner.rtti, // [3]: (rmhalspecowner) super + &__nvoc_metadata__OBJGPU.metadata__OBJTRACEABLE.rtti, // [4]: (traceable) super + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJGPU = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_GpuHalspecOwner(GpuHalspecOwner*); +void __nvoc_dtor_RmHalspecOwner(RmHalspecOwner*); +void __nvoc_dtor_OBJTRACEABLE(OBJTRACEABLE*); +void __nvoc_dtor_OBJGPU(OBJGPU *pThis) { + __nvoc_gpuDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + __nvoc_dtor_GpuHalspecOwner(&pThis->__nvoc_base_GpuHalspecOwner); + __nvoc_dtor_RmHalspecOwner(&pThis->__nvoc_base_RmHalspecOwner); + __nvoc_dtor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJGPU(OBJGPU *pThis) { + ChipHal *chipHal = &staticCast(pThis, GpuHalspecOwner)->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &staticCast(pThis, RmHalspecOwner)->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + pThis->setProperty(pThis, PDB_PROP_GPU_IS_CONNECTED, NV_TRUE); + + // NVOC Property Hal field -- PDB_PROP_GPU_KEEP_WPR_ACROSS_GC6_SUPPORTED + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_KEEP_WPR_ACROSS_GC6_SUPPORTED, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY + if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000d000UL) )) /* ChipHal: T234D | T264D | T256D */ + { + pThis->setProperty(pThis, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY, NV_TRUE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_ATS_SUPPORTED + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_ATS_SUPPORTED, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_SC7_SUPPORTED + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_SC7_SUPPORTED, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_RTD3_RG_SUPPORTED + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_RTD3_RG_SUPPORTED, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_TRIGGER_PCIE_FLR + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_TRIGGER_PCIE_FLR, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_CLKS_IN_TEGRA_SOC + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_CLKS_IN_TEGRA_SOC, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_PREINITIALIZED_WPR_REGION + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_PREINITIALIZED_WPR_REGION, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_ZERO_FB + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_ZERO_FB, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_MIG_SUPPORTED + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_MIG_SUPPORTED, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_MIG_MIRROR_HOST_CI_ON_GUEST + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_MIG_MIRROR_HOST_CI_ON_GUEST, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_MIG_SUPPORTS_SPLIT_CE_RANGES + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_MIG_SUPPORTS_SPLIT_CE_RANGES, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_MIG_GFX_SUPPORTED + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_MIG_GFX_SUPPORTED, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_MIG_TIMESLICING_SUPPORTED + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_MIG_TIMESLICING_SUPPORTED, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_IS_COT_ENABLED + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_IS_COT_ENABLED, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_FW_WPR_OFFSET_SET_BY_ACR + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_FW_WPR_OFFSET_SET_BY_ACR, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_SRIOV_SYSMEM_DIRTY_PAGE_TRACKING_ENABLED + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_SRIOV_SYSMEM_DIRTY_PAGE_TRACKING_ENABLED, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_VGPU_OFFLOAD_CAPABLE + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_VGPU_OFFLOAD_CAPABLE, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_UNIX_DYNAMIC_POWER_SUPPORTED + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_UNIX_DYNAMIC_POWER_SUPPORTED, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK, NV_TRUE); + } + pThis->setProperty(pThis, PDB_PROP_GPU_OPTIMUS_GOLD_CFG_SPACE_RESTORE, NV_TRUE); + + // NVOC Property Hal field -- PDB_PROP_GPU_CC_FEATURE_CAPABLE + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_CC_FEATURE_CAPABLE, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_APM_FEATURE_CAPABLE + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_APM_FEATURE_CAPABLE, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_EXTENDED_GSP_RM_INITIALIZATION_TIMEOUT_FOR_VGX + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_EXTENDED_GSP_RM_INITIALIZATION_TIMEOUT_FOR_VGX, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_CHIP_SUPPORTS_RTD3_DEF + if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000d000UL) )) /* ChipHal: T234D | T264D | T256D */ + { + pThis->setProperty(pThis, PDB_PROP_GPU_CHIP_SUPPORTS_RTD3_DEF, NV_TRUE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_IS_SOC_SDM + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_IS_SOC_SDM, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_DISP_PB_REQUIRES_SMMU_BYPASS + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_DISP_PB_REQUIRES_SMMU_BYPASS, NV_TRUE); + } + pThis->setProperty(pThis, PDB_PROP_GPU_FASTPATH_SEQ_ENABLED, NV_FALSE); + pThis->setProperty(pThis, PDB_PROP_GPU_RECOVERY_DRAIN_P2P_REQUIRED, NV_FALSE); + + // NVOC Property Hal field -- PDB_PROP_GPU_REUSE_INIT_CONTING_MEM + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_REUSE_INIT_CONTING_MEM, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_RUSD_POLLING_SUPPORT_MONOLITHIC + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_RUSD_POLLING_SUPPORT_MONOLITHIC, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_RUSD_DISABLE_CLK_PUBLIC_DOMAIN_INFO + // default + { + pThis->setProperty(pThis, PDB_PROP_GPU_RUSD_DISABLE_CLK_PUBLIC_DOMAIN_INFO, NV_FALSE); + } + pThis->setProperty(pThis, PDB_PROP_GPU_RECOVERY_REBOOT_REQUIRED, NV_FALSE); + pThis->setProperty(pThis, PDB_PROP_GPU_ALLOC_ISO_SYS_MEM_FROM_CARVEOUT, NV_FALSE); + + pThis->deviceInstance = 32; + + // Hal field -- isVirtual + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + pThis->isVirtual = NV_FALSE; + } + + // Hal field -- isGspClient + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000d000UL) )) /* ChipHal: T234D | T264D | T256D */ + { + pThis->isGspClient = NV_FALSE; + } + } + + // Hal field -- isDceClient + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000d000UL) )) /* ChipHal: T234D | T264D | T256D */ + { + pThis->isDceClient = NV_TRUE; + } + } + + pThis->bIsDebugModeEnabled = NV_FALSE; + + pThis->numOfMclkLockRequests = 0U; + + pThis->bUseRegisterAccessMap = !(0); + + pThis->boardInfo = ((void *)0); + + // Hal field -- gpuGroupCount + // default + { + pThis->gpuGroupCount = 1; + } + + pThis->bIsMigRm = NV_FALSE; + + // Hal field -- bUnifiedMemorySpaceEnabled + if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000d000UL) )) /* ChipHal: T234D | T264D | T256D */ + { + pThis->bUnifiedMemorySpaceEnabled = NV_TRUE; + } + + // Hal field -- bWarBug200577889SriovHeavyEnabled + pThis->bWarBug200577889SriovHeavyEnabled = NV_FALSE; + + // Hal field -- bNonPowerOf2ChannelCountSupported + pThis->bNonPowerOf2ChannelCountSupported = NV_FALSE; + + // Hal field -- bWarBug4347206PowerCycleOnUnload + // default + { + pThis->bWarBug4347206PowerCycleOnUnload = NV_FALSE; + } + + // Hal field -- bNeed4kPageIsolation + // default + { + pThis->bNeed4kPageIsolation = NV_FALSE; + } + + // Hal field -- bInstLoc47bitPaWar + // default + { + pThis->bInstLoc47bitPaWar = NV_FALSE; + } + + // Hal field -- bIsBarPteInSysmemSupported + if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000d000UL) )) /* ChipHal: T234D | T264D | T256D */ + { + pThis->bIsBarPteInSysmemSupported = NV_TRUE; + } + + // Hal field -- bClientRmAllocatedCtxBuffer + // default + { + pThis->bClientRmAllocatedCtxBuffer = NV_FALSE; + } + + // Hal field -- bInstanceMemoryAlwaysCached + if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000d000UL) )) /* ChipHal: T234D | T264D | T256D */ + { + pThis->bInstanceMemoryAlwaysCached = NV_TRUE; + } + + // Hal field -- bComputePolicyTimesliceSupported + // default + { + pThis->bComputePolicyTimesliceSupported = NV_FALSE; + } + + // Hal field -- bSriovCapable + if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000d000UL) )) /* ChipHal: T234D | T264D | T256D */ + { + pThis->bSriovCapable = NV_TRUE; + } + + // Hal field -- bRecheckSliSupportAtResume + // default + { + pThis->bRecheckSliSupportAtResume = NV_FALSE; + } + + // Hal field -- bGpuNvEncAv1Supported + // default + { + pThis->bGpuNvEncAv1Supported = NV_FALSE; + } + + pThis->bIsGspOwnedFaultBuffersEnabled = NV_FALSE; + + // Hal field -- bVfResizableBAR1Supported + // default + { + pThis->bVfResizableBAR1Supported = NV_FALSE; + } + + // Hal field -- bVoltaHubIntrSupported + if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000d000UL) )) /* ChipHal: T234D | T264D | T256D */ + { + pThis->bVoltaHubIntrSupported = NV_TRUE; + } + + // Hal field -- bUsePmcDeviceEnableForHostEngine + if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000d000UL) )) /* ChipHal: T234D | T264D | T256D */ + { + pThis->bUsePmcDeviceEnableForHostEngine = NV_TRUE; + } + + pThis->bBlockNewWorkload = NV_FALSE; +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_GpuHalspecOwner(GpuHalspecOwner* ); +NV_STATUS __nvoc_ctor_RmHalspecOwner(RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_OBJTRACEABLE(OBJTRACEABLE* ); +NV_STATUS __nvoc_ctor_OBJGPU(OBJGPU *pThis, NvU32 arg_gpuInstance, NvU32 arg_gpuId, NvUuid * arg_pUuid, struct GpuArch * arg_pGpuArch) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJGPU_fail_Object; + status = __nvoc_ctor_GpuHalspecOwner(&pThis->__nvoc_base_GpuHalspecOwner); + if (status != NV_OK) goto __nvoc_ctor_OBJGPU_fail_GpuHalspecOwner; + status = __nvoc_ctor_RmHalspecOwner(&pThis->__nvoc_base_RmHalspecOwner); + if (status != NV_OK) goto __nvoc_ctor_OBJGPU_fail_RmHalspecOwner; + status = __nvoc_ctor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); + if (status != NV_OK) goto __nvoc_ctor_OBJGPU_fail_OBJTRACEABLE; + __nvoc_init_dataField_OBJGPU(pThis); + + status = __nvoc_gpuConstruct(pThis, arg_gpuInstance, arg_gpuId, arg_pUuid, arg_pGpuArch); + if (status != NV_OK) goto __nvoc_ctor_OBJGPU_fail__init; + goto __nvoc_ctor_OBJGPU_exit; // Success + +__nvoc_ctor_OBJGPU_fail__init: + __nvoc_dtor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); +__nvoc_ctor_OBJGPU_fail_OBJTRACEABLE: + __nvoc_dtor_RmHalspecOwner(&pThis->__nvoc_base_RmHalspecOwner); +__nvoc_ctor_OBJGPU_fail_RmHalspecOwner: + __nvoc_dtor_GpuHalspecOwner(&pThis->__nvoc_base_GpuHalspecOwner); +__nvoc_ctor_OBJGPU_fail_GpuHalspecOwner: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_OBJGPU_fail_Object: +__nvoc_ctor_OBJGPU_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_OBJGPU_1(OBJGPU *pThis) { + ChipHal *chipHal = &staticCast(pThis, GpuHalspecOwner)->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &staticCast(pThis, RmHalspecOwner)->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + // gpuGetIdInfo -- halified (3 hals) body + if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00001000UL) )) /* ChipHal: T234D */ + { + pThis->__gpuGetIdInfo__ = &gpuGetIdInfo_T234D; + } + else if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00004000UL) )) /* ChipHal: T264D */ + { + pThis->__gpuGetIdInfo__ = &gpuGetIdInfo_T264D; + } + else + { + pThis->__gpuGetIdInfo__ = &gpuGetIdInfo_T256D; + } + + // gpuGetChildrenOrder -- halified (2 hals) + if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00008000UL) )) /* ChipHal: T256D */ + { + pThis->__gpuGetChildrenOrder__ = &gpuGetChildrenOrder_T256D; + } + else + { + pThis->__gpuGetChildrenOrder__ = &gpuGetChildrenOrder_T234D; + } + + // gpuGetChildrenPresent -- halified (2 hals) + if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00008000UL) )) /* ChipHal: T256D */ + { + pThis->__gpuGetChildrenPresent__ = &gpuGetChildrenPresent_T256D; + } + else + { + pThis->__gpuGetChildrenPresent__ = &gpuGetChildrenPresent_T234D; + } + + // gpuGetEngClassDescriptorList -- halified (3 hals) + if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00001000UL) )) /* ChipHal: T234D */ + { + pThis->__gpuGetEngClassDescriptorList__ = &gpuGetEngClassDescriptorList_T234D; + } + else if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00004000UL) )) /* ChipHal: T264D */ + { + pThis->__gpuGetEngClassDescriptorList__ = &gpuGetEngClassDescriptorList_T264D; + } + else + { + pThis->__gpuGetEngClassDescriptorList__ = &gpuGetEngClassDescriptorList_T256D; + } + + // gpuGetNoEngClassList -- halified (3 hals) + if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00001000UL) )) /* ChipHal: T234D */ + { + pThis->__gpuGetNoEngClassList__ = &gpuGetNoEngClassList_T234D; + } + else if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00004000UL) )) /* ChipHal: T264D */ + { + pThis->__gpuGetNoEngClassList__ = &gpuGetNoEngClassList_T264D; + } + else + { + pThis->__gpuGetNoEngClassList__ = &gpuGetNoEngClassList_T256D; + } +} // End __nvoc_init_funcTable_OBJGPU_1 with approximately 13 basic block(s). + + +// Initialize vtable(s) for 5 virtual method(s). +void __nvoc_init_funcTable_OBJGPU(OBJGPU *pThis) { + + // Initialize vtable(s) with 5 per-object function pointer(s). + __nvoc_init_funcTable_OBJGPU_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__OBJGPU(OBJGPU *pThis, + NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, + TEGRA_CHIP_TYPE TegraChipHal_tegraType, + RM_RUNTIME_VARIANT RmVariantHal_rmVariant, + NvU32 DispIpHal_ipver) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; // (obj) super + pThis->__nvoc_pbase_GpuHalspecOwner = &pThis->__nvoc_base_GpuHalspecOwner; // (gpuhalspecowner) super + pThis->__nvoc_pbase_RmHalspecOwner = &pThis->__nvoc_base_RmHalspecOwner; // (rmhalspecowner) super + pThis->__nvoc_pbase_OBJTRACEABLE = &pThis->__nvoc_base_OBJTRACEABLE; // (traceable) super + pThis->__nvoc_pbase_OBJGPU = pThis; // (gpu) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Object(&pThis->__nvoc_base_Object); + __nvoc_init__GpuHalspecOwner(&pThis->__nvoc_base_GpuHalspecOwner, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, TegraChipHal_tegraType); + __nvoc_init__RmHalspecOwner(&pThis->__nvoc_base_RmHalspecOwner, RmVariantHal_rmVariant, DispIpHal_ipver); + __nvoc_init__OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__OBJGPU.metadata__Object; // (obj) super + pThis->__nvoc_base_GpuHalspecOwner.__nvoc_metadata_ptr = &__nvoc_metadata__OBJGPU.metadata__GpuHalspecOwner; // (gpuhalspecowner) super + pThis->__nvoc_base_RmHalspecOwner.__nvoc_metadata_ptr = &__nvoc_metadata__OBJGPU.metadata__RmHalspecOwner; // (rmhalspecowner) super + pThis->__nvoc_base_OBJTRACEABLE.__nvoc_metadata_ptr = &__nvoc_metadata__OBJGPU.metadata__OBJTRACEABLE; // (traceable) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__OBJGPU; // (gpu) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_OBJGPU(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJGPU(OBJGPU **ppThis, Dynamic *pParent, NvU32 createFlags, + NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, + TEGRA_CHIP_TYPE TegraChipHal_tegraType, + RM_RUNTIME_VARIANT RmVariantHal_rmVariant, + NvU32 DispIpHal_ipver, NvU32 arg_gpuInstance, NvU32 arg_gpuId, NvUuid * arg_pUuid, struct GpuArch * arg_pGpuArch) +{ + NV_STATUS status; + Object *pParentObj = NULL; + OBJGPU *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(OBJGPU), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(OBJGPU)); + + pThis->__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__OBJGPU(pThis, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, TegraChipHal_tegraType, RmVariantHal_rmVariant, DispIpHal_ipver); + status = __nvoc_ctor_OBJGPU(pThis, arg_gpuInstance, arg_gpuId, arg_pUuid, arg_pGpuArch); + if (status != NV_OK) goto __nvoc_objCreate_OBJGPU_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_OBJGPU_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(OBJGPU)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJGPU(OBJGPU **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + NvU32 ChipHal_arch = va_arg(args, NvU32); + NvU32 ChipHal_impl = va_arg(args, NvU32); + NvU32 ChipHal_hidrev = va_arg(args, NvU32); + TEGRA_CHIP_TYPE TegraChipHal_tegraType = va_arg(args, TEGRA_CHIP_TYPE); + RM_RUNTIME_VARIANT RmVariantHal_rmVariant = va_arg(args, RM_RUNTIME_VARIANT); + NvU32 DispIpHal_ipver = va_arg(args, NvU32); + NvU32 arg_gpuInstance = va_arg(args, NvU32); + NvU32 arg_gpuId = va_arg(args, NvU32); + NvUuid * arg_pUuid = va_arg(args, NvUuid *); + struct GpuArch * arg_pGpuArch = va_arg(args, struct GpuArch *); + + status = __nvoc_objCreate_OBJGPU(ppThis, pParent, createFlags, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, TegraChipHal_tegraType, RmVariantHal_rmVariant, DispIpHal_ipver, arg_gpuInstance, arg_gpuId, arg_pUuid, arg_pGpuArch); + + return status; +} + diff --git a/src/nvidia/generated/g_gpu_nvoc.h b/src/nvidia/generated/g_gpu_nvoc.h new file mode 100644 index 0000000..6e8d613 --- /dev/null +++ b/src/nvidia/generated/g_gpu_nvoc.h @@ -0,0 +1,5062 @@ + +#ifndef _G_GPU_NVOC_H_ +#define _G_GPU_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_gpu_nvoc.h" + +#ifndef _OBJGPU_H_ +#define _OBJGPU_H_ + +/*! + * @file + * @brief Resource Manager Defines and Structures: Defines and structures used for the GPU Object. + */ + +/*! + * + * Forward declaration of SEQSCRIPT - here because it is used by many clients + * and we don't want objseq.h to have to be included everywhere, so adding this + * here. See NVCR 12827752 + * + */ +typedef struct _SEQSCRIPT SEQSCRIPT, *PSEQSCRIPT; + +typedef struct GPUATTACHARG GPUATTACHARG; + +/* + * WARNING -- Avoid including headers in gpu.h + * A change in gpu.h and headers included by gpu.h triggers recompilation of most RM + * files in an incremental build. We should keep the list of included header as short as + * possible. + * Especially, GPU's child module should not have its object header being included here. + * A child module generally includes the header of its parent. A child module header included + * by the parent module affects all the sibling modules. + * */ +#include "ctrl/ctrl0000/ctrl0000system.h" +#include "ctrl/ctrl2080/ctrl2080internal.h" // NV2080_CTRL_CMD_INTERNAL_MAX_BSPS/NVENCS +#include "ctrl/ctrl2080/ctrl2080bus.h" // NV2080_CTRL_BUS_INFO +#include "class/cl2080.h" + +#include "nvlimits.h" +#include "utils/nv_enum.h" + +#include "gpu/gpu_arch.h" +#include "gpu/gpu_timeout.h" +#include "gpu/gpu_access.h" +#include "gpu/gpu_shared_data_map.h" +#include "gpu/kern_gpu_power.h" + +#include "platform/acpi_common.h" +#include "gpu/gpu_acpi_data.h" + +#include "core/core.h" +#include "core/hal.h" +#include "core/system.h" +#include "diagnostics/traceable.h" +#include "gpu/gpu_halspec.h" +#include "gpu/gpu_resource_desc.h" +#include "gpu/gpu_uuid.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "kernel/gpu/error_cont.h" +#include "kernel/gpu/gpu_engine_type.h" +#include "nvoc/utility.h" +#include "prereq_tracker/prereq_tracker.h" +#include "libraries/containers/vector.h" + +#include "class/cl00de.h" + +#include "rmapi/control.h" +#include "rmapi/event.h" +#include "rmapi/rmapi.h" + +#include "published/nv_arch.h" + +#include "g_rmconfig_util.h" // prototypes for rmconfig utility functions, eg: rmcfg_IsGK104() + +// TODO - the forward declaration of OS_GPU_INFO should be simplified +typedef struct nv_state_t OS_GPU_INFO; + + +struct OBJGMMU; + +#ifndef __NVOC_CLASS_OBJGMMU_TYPEDEF__ +#define __NVOC_CLASS_OBJGMMU_TYPEDEF__ +typedef struct OBJGMMU OBJGMMU; +#endif /* __NVOC_CLASS_OBJGMMU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGMMU +#define __nvoc_class_id_OBJGMMU 0xd7a41d +#endif /* __nvoc_class_id_OBJGMMU */ + + + +struct OBJGRIDDISPLAYLESS; + +#ifndef __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ +#define __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ +typedef struct OBJGRIDDISPLAYLESS OBJGRIDDISPLAYLESS; +#endif /* __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGRIDDISPLAYLESS +#define __nvoc_class_id_OBJGRIDDISPLAYLESS 0x20fd5a +#endif /* __nvoc_class_id_OBJGRIDDISPLAYLESS */ + + + +struct OBJHOSTENG; + +#ifndef __NVOC_CLASS_OBJHOSTENG_TYPEDEF__ +#define __NVOC_CLASS_OBJHOSTENG_TYPEDEF__ +typedef struct OBJHOSTENG OBJHOSTENG; +#endif /* __NVOC_CLASS_OBJHOSTENG_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHOSTENG +#define __nvoc_class_id_OBJHOSTENG 0xb356e7 +#endif /* __nvoc_class_id_OBJHOSTENG */ + + + +struct OBJPMU_CLIENT_IMPLEMENTER; + +#ifndef __NVOC_CLASS_OBJPMU_CLIENT_IMPLEMENTER_TYPEDEF__ +#define __NVOC_CLASS_OBJPMU_CLIENT_IMPLEMENTER_TYPEDEF__ +typedef struct OBJPMU_CLIENT_IMPLEMENTER OBJPMU_CLIENT_IMPLEMENTER; +#endif /* __NVOC_CLASS_OBJPMU_CLIENT_IMPLEMENTER_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJPMU_CLIENT_IMPLEMENTER +#define __nvoc_class_id_OBJPMU_CLIENT_IMPLEMENTER 0x88cace +#endif /* __nvoc_class_id_OBJPMU_CLIENT_IMPLEMENTER */ + + + +struct OBJINTRABLE; + +#ifndef __NVOC_CLASS_OBJINTRABLE_TYPEDEF__ +#define __NVOC_CLASS_OBJINTRABLE_TYPEDEF__ +typedef struct OBJINTRABLE OBJINTRABLE; +#endif /* __NVOC_CLASS_OBJINTRABLE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJINTRABLE +#define __nvoc_class_id_OBJINTRABLE 0x31ccb7 +#endif /* __nvoc_class_id_OBJINTRABLE */ + + + +struct OBJVBIOS; + +#ifndef __NVOC_CLASS_OBJVBIOS_TYPEDEF__ +#define __NVOC_CLASS_OBJVBIOS_TYPEDEF__ +typedef struct OBJVBIOS OBJVBIOS; +#endif /* __NVOC_CLASS_OBJVBIOS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVBIOS +#define __nvoc_class_id_OBJVBIOS 0x5dc772 +#endif /* __nvoc_class_id_OBJVBIOS */ + + + +struct NvDebugDump; + +#ifndef __NVOC_CLASS_NvDebugDump_TYPEDEF__ +#define __NVOC_CLASS_NvDebugDump_TYPEDEF__ +typedef struct NvDebugDump NvDebugDump; +#endif /* __NVOC_CLASS_NvDebugDump_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NvDebugDump +#define __nvoc_class_id_NvDebugDump 0x7e80a2 +#endif /* __nvoc_class_id_NvDebugDump */ + + + +struct GpuMutexMgr; + +#ifndef __NVOC_CLASS_GpuMutexMgr_TYPEDEF__ +#define __NVOC_CLASS_GpuMutexMgr_TYPEDEF__ +typedef struct GpuMutexMgr GpuMutexMgr; +#endif /* __NVOC_CLASS_GpuMutexMgr_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuMutexMgr +#define __nvoc_class_id_GpuMutexMgr 0x9d93b2 +#endif /* __nvoc_class_id_GpuMutexMgr */ + + + +struct KernelFalcon; + +#ifndef __NVOC_CLASS_KernelFalcon_TYPEDEF__ +#define __NVOC_CLASS_KernelFalcon_TYPEDEF__ +typedef struct KernelFalcon KernelFalcon; +#endif /* __NVOC_CLASS_KernelFalcon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelFalcon +#define __nvoc_class_id_KernelFalcon 0xb6b1af +#endif /* __nvoc_class_id_KernelFalcon */ + + + +struct KernelVideoEngine; + +#ifndef __NVOC_CLASS_KernelVideoEngine_TYPEDEF__ +#define __NVOC_CLASS_KernelVideoEngine_TYPEDEF__ +typedef struct KernelVideoEngine KernelVideoEngine; +#endif /* __NVOC_CLASS_KernelVideoEngine_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelVideoEngine +#define __nvoc_class_id_KernelVideoEngine 0x9e2f3e +#endif /* __nvoc_class_id_KernelVideoEngine */ + + + +struct KernelChannel; + +#ifndef __NVOC_CLASS_KernelChannel_TYPEDEF__ +#define __NVOC_CLASS_KernelChannel_TYPEDEF__ +typedef struct KernelChannel KernelChannel; +#endif /* __NVOC_CLASS_KernelChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelChannel +#define __nvoc_class_id_KernelChannel 0x5d8d70 +#endif /* __nvoc_class_id_KernelChannel */ + + + +struct GenericKernelFalcon; + +#ifndef __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__ +#define __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__ +typedef struct GenericKernelFalcon GenericKernelFalcon; +#endif /* __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GenericKernelFalcon +#define __nvoc_class_id_GenericKernelFalcon 0xabcf08 +#endif /* __nvoc_class_id_GenericKernelFalcon */ + + + + +struct Subdevice; + +#ifndef __NVOC_CLASS_Subdevice_TYPEDEF__ +#define __NVOC_CLASS_Subdevice_TYPEDEF__ +typedef struct Subdevice Subdevice; +#endif /* __NVOC_CLASS_Subdevice_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Subdevice +#define __nvoc_class_id_Subdevice 0x4b01b3 +#endif /* __nvoc_class_id_Subdevice */ + + + +struct Device; + +#ifndef __NVOC_CLASS_Device_TYPEDEF__ +#define __NVOC_CLASS_Device_TYPEDEF__ +typedef struct Device Device; +#endif /* __NVOC_CLASS_Device_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Device +#define __nvoc_class_id_Device 0xe0ac20 +#endif /* __nvoc_class_id_Device */ + + + +struct RsClient; + +#ifndef __NVOC_CLASS_RsClient_TYPEDEF__ +#define __NVOC_CLASS_RsClient_TYPEDEF__ +typedef struct RsClient RsClient; +#endif /* __NVOC_CLASS_RsClient_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsClient +#define __nvoc_class_id_RsClient 0x8f87e5 +#endif /* __nvoc_class_id_RsClient */ + + +struct Memory; + +#ifndef __NVOC_CLASS_Memory_TYPEDEF__ +#define __NVOC_CLASS_Memory_TYPEDEF__ +typedef struct Memory Memory; +#endif /* __NVOC_CLASS_Memory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Memory +#define __nvoc_class_id_Memory 0x4789f2 +#endif /* __nvoc_class_id_Memory */ + + + +#ifndef PARTITIONID_INVALID +#define PARTITIONID_INVALID 0xFFFFFFFF +#endif +typedef struct MIG_INSTANCE_REF MIG_INSTANCE_REF; +typedef struct NV2080_CTRL_GPU_REG_OP NV2080_CTRL_GPU_REG_OP; +typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS + NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS; +typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS + NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS; +typedef struct NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS + NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS; + +typedef enum +{ + BRANDING_TYPE_NONE, + BRANDING_TYPE_QUADRO_GENERIC, + BRANDING_TYPE_QUADRO_AD, + BRANDING_TYPE_NVS_NVIDIA, // "NVIDIA NVS" + BRANDING_TYPE_VGX, +} BRANDING_TYPE; + +typedef enum +{ + COMPUTE_BRANDING_TYPE_NONE, + COMPUTE_BRANDING_TYPE_TESLA, +} COMPUTE_BRANDING_TYPE; + +#define OOR_ARCH_DEF(x) \ + NV_ENUM_ENTRY(x, OOR_ARCH_X86_64, 0x00000000) \ + NV_ENUM_ENTRY(x, OOR_ARCH_PPC64LE, 0x00000001) \ + NV_ENUM_ENTRY(x, OOR_ARCH_ARM, 0x00000002) \ + NV_ENUM_ENTRY(x, OOR_ARCH_AARCH64, 0x00000003) \ + NV_ENUM_ENTRY(x, OOR_ARCH_NONE, 0x00000004) + +NV_ENUM_DEF(OOR_ARCH, OOR_ARCH_DEF) + +typedef struct +{ + NvU32 classId; + NvU32 flags; +} GPUCHILDORDER; + +typedef struct +{ + NvU32 classId; + NvU32 instances; + + /*! + * Pointer to the @ref NVOC_CLASS_INFO for the concrete class to instantiate + * for this child. + */ + const NVOC_CLASS_INFO *pClassInfo; +} GPUCHILDPRESENT; + +/*! + * @brief Generates an entry for a list of @ref GPUCHILDPRESENT objects for a + * class of the given name + * + * @param[in] _childClassName + * Name of the class for the entry + * @param[in] _instances + * Number of instances of the child that may be present; see + * @ref GPUCHILDPRESENT::instances + * + * @return An entry suitable for a list of @ref GPUCHILDPRESENT for the given + * child of @ref OBJGPU + */ +#define GPU_CHILD_PRESENT(_childClassName, _instances) \ + GPU_CHILD_PRESENT_POLYMORPHIC(_childClassName, (_instances), _childClassName) + +/*! + * @brief Generates an entry for a list of @ref GPUCHILDPRESENT objects that + * allows the @ref OBJGPU child to instantiate a sub-class of the base + * @ref OBJGPU child class. + * + * @details The intention of this macro is to allow a list of + * @ref GPUCHILDPRESENT to essentially state "this child should be + * present with this concrete class type". This allows for different + * @ref GPUCHILDPRESENT lists to request different classes with + * different behavior via sub-classes, for the same basic @ref OBJGPU + * child. + * + * @param[in] _childClassName + * Name of the base class at which @ref OBJGPU points + * @param[in] _instances + * Number of instances of the child that may be present; see + * @ref GPUCHILDPRESENT::instances + * @param[in] _concreteClassName + * Name of the sub-class of _childClassName that should actually be + * instantiated + * + * @return An entry suitable for a list of @ref GPUCHILDPRESENT for the given + * child of @ref OBJGPU with the given concrete class type. + */ +#define GPU_CHILD_PRESENT_POLYMORPHIC(_childClassName, _instances, _concreteClassName) \ + { \ + .classId = classId(_childClassName), \ + .instances = (_instances), \ + .pClassInfo = classInfo(_concreteClassName) \ + } + +// GPU Child Order Flags +#define GCO_LIST_INIT NVBIT(0) // entry is used for init ordering (DO NOT USE) +#define GCO_LIST_LOAD NVBIT(1) // entry is used for load and postload ordering (DO NOT USE) +#define GCO_LIST_UNLOAD NVBIT(2) // entry is used for unload and preunload ordering (DO NOT USE) +#define GCO_LIST_DESTROY NVBIT(3) // entry is used for destroy order (DO NOT USE) +#define GCO_LIST_ALL (GCO_LIST_INIT | GCO_LIST_LOAD | GCO_LIST_UNLOAD | GCO_LIST_DESTROY) + // ^ entry is used for all list types (RECOMMENDED) +#define GCO_ALL (GCO_LIST_ALL) + + +typedef struct +{ + NvU32 childIndex; +} GPU_CHILD_ITER; + +// +// Object 'get' macros for GPU relative object retrievals. +// + +OBJGPU *gpuEngineGetGpu(struct Object *pObject); +// OBJGPU should be an ancestor of the param object, otherwise this will return NULL +#define ENG_GET_GPU(p) gpuEngineGetGpu(staticCast(p,Object)) + +// GPU_GET_FIFO_UC is autogenerated, returns per Gpu pFifo. +#define GPU_GET_FIFO(p) GPU_GET_FIFO_UC(p) + +// GPU_GET_KERNEL_FIFO_UC is autogenerated, returns per Gpu pKernelFifo. +#define GPU_GET_KERNEL_FIFO(p) gpuGetKernelFifoShared(p) + +#define GPU_GET_HEAP(p) (RMCFG_MODULE_HEAP ? MEMORY_MANAGER_GET_HEAP(GPU_GET_MEMORY_MANAGER(p)) : NULL) + +#define GPU_GET_HAL(p) (RMCFG_MODULE_HAL ? (p)->pHal : NULL) + +#define GPU_GET_OS(p) (RMCFG_MODULE_OS ? (p)->pOS : NULL) // TBD: replace with SYS_GET_OS +#define GPU_QUICK_PATH_GET_OS(p) GPU_GET_OS(p) // TBD: remove + +#define GPU_GET_REGISTER_ACCESS(g) (&(g)->registerAccess) + +// Returns the pRmApi that routes to the physical driver, either via RPC or local calls +#define GPU_GET_PHYSICAL_RMAPI(g) (&(g)->physicalRmApi) + +// +// Defines and helpers for encoding and decoding PCI domain, bus and device. +// +// Ideally these would live in objbus.h (or somewhere else more appropriate) and +// not gpu/gpu.h, but keep them here for now while support for 32-bit domains is +// being added as part of bug 1904645. +// + +// DRF macros for GPUBUSINFO::nvDomainBusDeviceFunc +#define NVGPU_BUSDEVICE_DOMAIN 63:32 +#define NVGPU_BUSDEVICE_BUS 15:8 +#define NVGPU_BUSDEVICE_DEVICE 7:0 + +static NV_INLINE NvU32 gpuDecodeDomain(NvU64 gpuDomainBusDevice) +{ + return (NvU32)DRF_VAL64(GPU, _BUSDEVICE, _DOMAIN, gpuDomainBusDevice); +} + +static NV_INLINE NvU8 gpuDecodeBus(NvU64 gpuDomainBusDevice) +{ + return (NvU8)DRF_VAL64(GPU, _BUSDEVICE, _BUS, gpuDomainBusDevice); +} + +static NV_INLINE NvU8 gpuDecodeDevice(NvU64 gpuDomainBusDevice) +{ + return (NvU8)DRF_VAL64(GPU, _BUSDEVICE, _DEVICE, gpuDomainBusDevice); +} + +static NV_INLINE NvU64 gpuEncodeDomainBusDevice(NvU32 domain, NvU8 bus, NvU8 device) +{ + return DRF_NUM64(GPU, _BUSDEVICE, _DOMAIN, domain) | + DRF_NUM64(GPU, _BUSDEVICE, _BUS, bus) | + DRF_NUM64(GPU, _BUSDEVICE, _DEVICE, device); +} + +static NV_INLINE NvU32 gpuEncodeBusDevice(NvU8 bus, NvU8 device) +{ + NvU64 busDevice = gpuEncodeDomainBusDevice(0, bus, device); + + // Bus and device are guaranteed to fit in the lower 32bits + return (NvU32)busDevice; +} + +#define GPU_LOG_AND_NOTIFY_INFOROM_XID_WITHOUT_INFO(pGpu, xidSuffix, ...) \ +do { \ + nvErrorLog_va((void *)pGpu, INFOROM_##xidSuffix, __VA_ARGS__); \ + gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_##xidSuffix, NULL, 0, 0, 0);\ +} while (0) + +// +// Generate a 32-bit id from domain, bus and device tuple. +// +NvU32 gpuGenerate32BitId(NvU32 domain, NvU8 bus, NvU8 device); + +// +// Generate a 32-bit id from a physical address +// +NvU32 gpuGenerate32BitIdFromPhysAddr(RmPhysAddr addr); + +// +// Helpers for getting domain, bus and device of a GPU +// +// Ideally these would be inline functions, but NVOC doesn't support that today, +// tracked in bug 1905882 +// +#define gpuGetDBDF(pGpu) ((pGpu)->busInfo.nvDomainBusDeviceFunc) +#define gpuGetDomain(pGpu) gpuDecodeDomain((pGpu)->busInfo.nvDomainBusDeviceFunc) +#define gpuGetBus(pGpu) gpuDecodeBus((pGpu)->busInfo.nvDomainBusDeviceFunc) +#define gpuGetDevice(pGpu) gpuDecodeDevice((pGpu)->busInfo.nvDomainBusDeviceFunc) +#define gpuIsDBDFValid(pGpu) ((pGpu)->busInfo.bNvDomainBusDeviceFuncValid) + + +#undef NVGPU_BUSDEVICE_DOMAIN +#undef NVGPU_BUSDEVICE_BUS +#undef NVGPU_BUSDEVICE_DEVICE + +// +// One extra nibble should be added to the architecture version read from the +// PMC boot register to represent the architecture number in RM. +// +#define GPU_ARCH_SHIFT 0x4 + +// Registry key for inst mem modification defines +#define INSTMEM_TAG_MASK (0xf0000000) +#define INSTMEM_TAG(a) ((INSTMEM_TAG_MASK & (a)) >> 28) + + +typedef struct +{ + + NvU32 PCIDeviceID; + NvU32 Manufacturer; + NvU32 PCISubDeviceID; + NvU32 PCIRevisionID; + NvU32 Subrevision; + +} GPUIDINFO; + + +typedef struct +{ + NvU32 impl; + NvU32 arch; + NvU32 majorRev; + NvU32 minorRev; + NvU32 minorExtRev; +} PMCBOOT0; + +typedef struct +{ + NvU32 impl; + NvU32 arch; + NvU32 majorRev; + NvU32 minorRev; + NvU32 minorExtRev; +} PMCBOOT42; + +// +// Random collection of bus-related configuration state. +// +typedef struct +{ + RmPhysAddr gpuPhysAddr; + RmPhysAddr gpuPhysFbAddr; + RmPhysAddr gpuPhysInstAddr; + RmPhysAddr gpuPhysIoAddr; + NvU32 iovaspaceId; + NvU32 IntLine; + NvU32 IsrHooked; + NvU64 nvDomainBusDeviceFunc; + NvBool bNvDomainBusDeviceFuncValid; + OOR_ARCH oorArch; +} GPUBUSINFO; + +typedef struct +{ + CLASSDESCRIPTOR *pClasses; + NvU32 *pSuppressClasses; + NvU32 numClasses; + NvBool bSuppressRead; +} GpuClassDb; + +typedef struct +{ + CLASSDESCRIPTOR *pClassDescriptors; + NvU32 numClassDescriptors; + + ENGDESCRIPTOR *pEngineInitDescriptors; + ENGDESCRIPTOR *pEngineDestroyDescriptors; + ENGDESCRIPTOR *pEngineLoadDescriptors; + ENGDESCRIPTOR *pEngineUnloadDescriptors; + NvU32 numEngineDescriptors; +} GpuEngineOrder; + +// +// PCI Express Support +// +typedef struct NBADDR +{ + NvU32 domain; + NvU8 bus; + NvU8 device; + NvU8 func; + NvU8 valid; + void *handle; +} NBADDR; + +typedef struct +{ + NBADDR addr; + void *vAddr; // virtual address of the port, if it has been mapped . Not used starting with Win10 BuildXXXXX + NvU32 PCIECapPtr; // offset of the PCIE capptr in the NB + // Capability register set in enhanced configuration space + // + NvU32 PCIEErrorCapPtr; // offset of the Advanced Error Reporting Capability register set + NvU32 PCIEVCCapPtr; // offset of the Virtual Channel (VC) Capability register set + NvU32 PCIEL1SsCapPtr; // Offset of the L1 Substates Capabilities + NvU32 PCIEAcsCapPtr; // Offset of the ACS redirect Capabilities + NvU16 DeviceID, VendorID; // device and vendor ID for port +} PORTDATA; + +typedef struct // GPU specific data for core logic object, stored in GPU object +{ + PORTDATA upstreamPort; // the upstream port info for the GPU + // If there is a switch this is equal to boardDownstreamPort + // If there is no switch this is equal to rootPort + PORTDATA rootPort; // The root port of the PCI-E root complex + PORTDATA boardUpstreamPort; // If there is no BR03 this is equal to rootPort. + PORTDATA boardDownstreamPort; // If there is no BR03 these data are not set. +} GPUCLDATA; + + +// +// Flags for gpuStateLoad() and gpuStateUnload() routines. Flags *must* be used +// symmetrically across an Unload/Load pair. +// +#define GPU_STATE_FLAGS_PRESERVING NVBIT(0) // GPU state is preserved +#define GPU_STATE_FLAGS_VGA_TRANSITION NVBIT(1) // To be used with GPU_STATE_FLAGS_PRESERVING. +#define GPU_STATE_FLAGS_PM_TRANSITION NVBIT(2) // To be used with GPU_STATE_FLAGS_PRESERVING. +#define GPU_STATE_FLAGS_PM_SUSPEND NVBIT(3) +#define GPU_STATE_FLAGS_PM_HIBERNATE NVBIT(4) +#define GPU_STATE_FLAGS_GC6_TRANSITION NVBIT(5) // To be used with GPU_STATE_FLAGS_PRESERVING. +#define GPU_STATE_FLAGS_FAST_UNLOAD NVBIT(6) // Used during windows restart, skips stateDestroy steps +#define GPU_STATE_DEFAULT 0 // Default flags for destructive state loads + // and unloads + +struct OBJHWBC; +typedef struct hwbc_list +{ + struct OBJHWBC *pHWBC; + struct hwbc_list *pNext; +} HWBC_LIST; + +/*! + * GFID allocation state + */ +typedef enum +{ + GFID_FREE = 0, + GFID_ALLOCATED = 1, + GFID_INVALIDATED = 2, +} GFID_ALLOC_STATUS; + +typedef struct SRIOV_P2P_INFO +{ + NvU32 gfid; + NvBool bAllowP2pAccess; + NvU32 accessRefCount; + NvU32 destRefCount; +} SRIOV_P2P_INFO, *PSRIOV_P2P_INFO; + +typedef struct +{ + NvU32 peerGpuId; + NvU32 peerGpuInstance; + NvU32 p2pCaps; + NvU32 p2pOptimalReadCEs; + NvU32 p2pOptimalWriteCEs; + NvU8 p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE]; + NvU32 busPeerId; + NvU32 busEgmPeerId; +} GPU_P2P_PEER_GPU_CAPS; + +// +// typedef of private struct used in OBJGPU's data field +// + +typedef struct +{ + NvBool isInitialized; + NvU8 uuid[RM_SHA1_GID_SIZE]; +} _GPU_UUID; + +typedef struct +{ + NvBool bValid; + NvU8 id; +} _GPU_PCIE_PEER_CLIQUE; + +typedef struct +{ + NvU32 platformId; // used to identify soc + NvU32 implementationId; // soc-specific + NvU32 revisionId; // soc-revision + NvU32 chipId; // platform (architecture) + implementation + PMCBOOT0 pmcBoot0; + PMCBOOT42 pmcBoot42; + NvU8 subRevision; // sub-revision (NV_FUSE_OPT_SUBREVISION on GPU) +} _GPU_CHIP_INFO; + + +// Engine Database +typedef struct +{ + NvU32 size; + RM_ENGINE_TYPE *pType; + NvBool bValid; +} _GPU_ENGINE_DB; + +#define MAX_NUM_BARS (8) +// SRIOV state +typedef struct +{ + /*! + * Total number of VFs available in this GPU + */ + NvU32 totalVFs; + + /*! + * First VF Offset + */ + NvU32 firstVFOffset; + + /*! + * Max GFID possible + */ + NvU32 maxGfid; + + /*! + * Physical offset of Virtual BAR0 register. Stores the offset if the GPU is + * a physical function, else 0 + */ + NvU32 virtualRegPhysOffset; + + /*! + * Allocated GFIDs. Will be used to ensure plugins doesn't use same GFID for multiple VFs + */ + NvU8 *pAllocatedGfids; + + /*! + * The sizes of the BAR regions on the VF + */ + NvU64 vfBarSize[MAX_NUM_BARS]; + + /*! + * First PF's BAR addresses + */ + NvU64 firstVFBarAddress[MAX_NUM_BARS]; + + /*! + * If the VF BARs are 64-bit addressable + */ + NvBool b64bitVFBar0; + NvBool b64bitVFBar1; + NvBool b64bitVFBar2; + + /*! + * GFID used for P2P access + */ + PSRIOV_P2P_INFO pP2PInfo; + NvBool bP2PAllocated; + NvU32 maxP2pGfid; + NvU32 p2pFabricPartitionId; +} _GPU_SRIOV_STATE; + +// Max # of instances for GPU children +#define GPU_MAX_CES 20 +#define GPU_MAX_GRS 8 +#define GPU_MAX_FIFOS 1 +#define GPU_MAX_MSENCS NV2080_CTRL_CMD_INTERNAL_MAX_MSENCS +#define GPU_MAX_NVDECS NV2080_CTRL_CMD_INTERNAL_MAX_BSPS +#define GPU_MAX_NVJPGS 8 +#define GPU_MAX_HSHUBS 12 +#define GPU_MAX_OFAS 2 +#define GPU_MAX_GSPLITES 4 +// +// Macro defines for OBJGPU fields -- Macro defines inside NVOC class block is +// gone after NVOC preprocessing stage. For macros used outside gpu/gpu.h should +// not be defined inside the class block. +// + +// +// Maximum number of Falcon objects that can be allocated on one GPU. +// This is purely a software limit and can be raised freely as more are added. +// +#define GPU_MAX_FALCON_ENGINES \ + ENG_IOCTRL__SIZE_1 + \ + ENG_GPCCS__SIZE_1 + \ + ENG_FECS__SIZE_1 + \ + ENG_NVJPEG__SIZE_1 + \ + ENG_NVDEC__SIZE_1 + \ + ENG_NVENC__SIZE_1 + \ + 32 + +#define GPU_MAX_VIDEO_ENGINES \ + (ENG_NVJPEG__SIZE_1 + \ + ENG_NVDEC__SIZE_1 + \ + ENG_NVENC__SIZE_1 + \ + ENG_OFA__SIZE_1) + +// for OBJGPU::pRmCtrlDeferredCmd +#define MAX_DEFERRED_CMDS 2 + +// for OBJGPU::computeModeRefCount +#define NV_GPU_MODE_GRAPHICS_MODE 0x00000001 +#define NV_GPU_MODE_COMPUTE_MODE 0x00000002 +#define NV_GPU_COMPUTE_REFCOUNT_COMMAND_INCREMENT 0x0000000a +#define NV_GPU_COMPUTE_REFCOUNT_COMMAND_DECREMENT 0x0000000b + +// +// Structure to hold information obtained from +// parsing the DEVICE_INFO2 table during init. +// +typedef struct +{ + NvU32 faultId; + NvU32 instanceId; + NvU32 typeEnum; + NvU32 resetId; + NvU32 devicePriBase; + NvU32 isEngine; + NvU32 rlEngId; + NvU32 runlistPriBase; + NvU32 groupId; + NvU32 ginTargetId; + NvU32 deviceBroadcastPriBase; + NvU32 groupLocalInstanceId; +} DEVICE_INFO2_ENTRY; + +MAKE_VECTOR(DeviceInfoEntryVec, DEVICE_INFO2_ENTRY); + +#define DEVICE_INFO_INSTANCE_ID_ANY (-1) +#define DEVICE_INFO_GLOBAL_INSTANCE_ID_ANY DEVICE_INFO_INSTANCE_ID_ANY +#define DEVICE_INFO_DIE_LOCAL_INSTANCE_ID_ANY DEVICE_INFO_INSTANCE_ID_ANY + +#define MAX_GROUP_COUNT 2 + +#define NV_GPU_INTERNAL_DEVICE_HANDLE 0xABCD0080 +#define NV_GPU_INTERNAL_SUBDEVICE_HANDLE 0xABCD2080 + +// +// NV GPU simulation mode defines +// Keep in sync with os.h SIM MODE defines until osGetSimulationMode is deprecated. +// +#ifndef NV_SIM_MODE_DEFS +#define NV_SIM_MODE_DEFS +#define NV_SIM_MODE_HARDWARE 0U +#define NV_SIM_MODE_RTL 1U +#define NV_SIM_MODE_CMODEL 2U +#define NV_SIM_MODE_MODS_AMODEL 3U +#define NV_SIM_MODE_TEGRA_FPGA 4U +#define NV_SIM_MODE_INVALID (~0x0U) +#endif + +#define GPU_IS_NVSWITCH_DETECTED(pGpu) \ + (pGpu->nvswitchSupport == NV2080_CTRL_PMGR_MODULE_INFO_NVSWITCH_SUPPORTED) + + +typedef struct GspTraceBuffer GspTraceBuffer; + +typedef enum +{ + SCHED_POLICY_DEFAULT = 0, + SCHED_POLICY_BEST_EFFORT = 1, + SCHED_POLICY_VGPU_EQUAL_SHARE = 2, + SCHED_POLICY_VGPU_FIXED_SHARE = 3, +} SCHED_POLICY; + +// Scratch bits +#define NV_PBUS_SW_RESET_BITS_SCRATCH_REG 30 + +typedef enum +{ + NV_PM_DEPTH_NONE = 0, + NV_PM_DEPTH_OS_LAYER = 1, + NV_PM_DEPTH_SR_META = 2, + NV_PM_DEPTH_STATE_LOAD = 3, +} NV_PM_DEPTH; + +// Recovery action init value +#define GPU_RECOVERY_ACTION_UNKNOWN 0xff + +typedef enum +{ + // NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_PROVIDER_TYPE_OPCODE + THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_PROVIDER_TYPE = 0, + // NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_TARGET_TYPE_OPCODE + THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_TARGET_TYPE = 1, + // NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_READING_RANGE_OPCODE + THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_DEFAULT_MIN_TEMP = 2, + // NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_READING_RANGE_OPCODE + THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_DEFAULT_MAX_TEMP = 3, + // NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_PROVIDER_OPCODE + THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_PROVIDER_INDEX = 4, + // NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_TARGET_OPCODE + THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_TARGET_INDEX = 5, + + // Used to determine number of enum entries. Should always be last. + THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_LAST = 6 +} THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY; + +// +// The actual GPU object definition +// + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_GPU_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__OBJGPU; +struct NVOC_METADATA__Object; +struct NVOC_METADATA__GpuHalspecOwner; +struct NVOC_METADATA__RmHalspecOwner; +struct NVOC_METADATA__OBJTRACEABLE; + +union __nvoc_inner_struc_OBJGPU_1__ { + struct { + struct MemoryManager *pMemoryManager; + struct KernelDisplay *pKernelDisplay; + struct OBJTMR *pTmr; + struct OBJDCECLIENTRM *pDceclientrm; + } named; + Dynamic *pChild[4]; +}; + +struct __nvoc_inner_struc_OBJGPU_2__ { + NvU32 numSensors; + NvBool bNumSensorsCached; + struct { + NvU32 cache[6]; + NvBool bIsCached[6]; + } sensors[4]; +}; + + + +struct OBJGPU { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__OBJGPU *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Object __nvoc_base_Object; + struct GpuHalspecOwner __nvoc_base_GpuHalspecOwner; + struct RmHalspecOwner __nvoc_base_RmHalspecOwner; + struct OBJTRACEABLE __nvoc_base_OBJTRACEABLE; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super + struct GpuHalspecOwner *__nvoc_pbase_GpuHalspecOwner; // gpuhalspecowner super + struct RmHalspecOwner *__nvoc_pbase_RmHalspecOwner; // rmhalspecowner super + struct OBJTRACEABLE *__nvoc_pbase_OBJTRACEABLE; // traceable super + struct OBJGPU *__nvoc_pbase_OBJGPU; // gpu + + // Vtable with 5 per-object function pointers + void (*__gpuGetIdInfo__)(struct OBJGPU * /*this*/); // halified (3 hals) body + const GPUCHILDORDER * (*__gpuGetChildrenOrder__)(struct OBJGPU * /*this*/, NvU32 *); // halified (2 hals) + const GPUCHILDPRESENT * (*__gpuGetChildrenPresent__)(struct OBJGPU * /*this*/, NvU32 *); // halified (2 hals) + const CLASSDESCRIPTOR * (*__gpuGetEngClassDescriptorList__)(struct OBJGPU * /*this*/, NvU32 *); // halified (3 hals) + const NvU32 * (*__gpuGetNoEngClassList__)(struct OBJGPU * /*this*/, NvU32 *); // halified (3 hals) + + // 118 PDB properties + NvBool PDB_PROP_GPU_IN_STANDBY; + NvBool PDB_PROP_GPU_IN_HIBERNATE; + NvBool PDB_PROP_GPU_IN_PM_CODEPATH; + NvBool PDB_PROP_GPU_IN_PM_RESUME_CODEPATH; + NvBool PDB_PROP_GPU_STATE_INITIALIZED; + NvBool PDB_PROP_GPU_EMULATION; + NvBool PDB_PROP_GPU_PRIMARY_DEVICE; + NvBool PDB_PROP_GPU_HYBRID_MGPU; + NvBool PDB_PROP_GPU_ALTERNATE_TREE_ENABLED; + NvBool PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS; + NvBool PDB_PROP_GPU_3D_CONTROLLER; + NvBool PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM; + NvBool PDB_PROP_GPU_IS_CONNECTED; + NvBool PDB_PROP_GPU_BROKEN_FB; + NvBool PDB_PROP_GPU_IN_FULLCHIP_RESET; + NvBool PDB_PROP_GPU_IN_SECONDARY_BUS_RESET; + NvBool PDB_PROP_GPU_IN_GC6_RESET; + NvBool PDB_PROP_GPU_IS_GEMINI; + NvBool PDB_PROP_GPU_PERSISTENT_SW_STATE; + NvBool PDB_PROP_GPU_COHERENT_CPU_MAPPING; + NvBool PDB_PROP_GPU_IS_LOST; + NvBool PDB_PROP_GPU_IN_TIMEOUT_RECOVERY; + NvBool PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT; + NvBool PDB_PROP_GPU_KEEP_WPR_ACROSS_GC6_SUPPORTED; + NvBool PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY; + NvBool PDB_PROP_GPU_ATS_SUPPORTED; + NvBool PDB_PROP_GPU_SC7_SUPPORTED; + NvBool PDB_PROP_GPU_RTD3_RG_SUPPORTED; + NvBool PDB_PROP_GPU_TRIGGER_PCIE_FLR; + NvBool PDB_PROP_GPU_CLKS_IN_TEGRA_SOC; + NvBool PDB_PROP_GPU_PREINITIALIZED_WPR_REGION; + NvBool PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING; + NvBool PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE; + NvBool PDB_PROP_GPU_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE; + NvBool PDB_PROP_GPU_IS_UEFI; + NvBool PDB_PROP_GPU_IS_EFI_INIT; + NvBool PDB_PROP_GPU_ZERO_FB; + NvBool PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE; + NvBool PDB_PROP_GPU_MIG_SUPPORTED; + NvBool PDB_PROP_GPU_MIG_MIRROR_HOST_CI_ON_GUEST; + NvBool PDB_PROP_GPU_MIG_SUPPORTS_SPLIT_CE_RANGES; + NvBool PDB_PROP_GPU_MIG_GFX_SUPPORTED; + NvBool PDB_PROP_GPU_MIG_TIMESLICING_SUPPORTED; + NvBool PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED; + NvBool PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED; + NvBool PDB_PROP_GPU_IS_COT_ENABLED; + NvBool PDB_PROP_GPU_FW_WPR_OFFSET_SET_BY_ACR; + NvBool PDB_PROP_GPU_SRIOV_SYSMEM_DIRTY_PAGE_TRACKING_ENABLED; + NvBool PDB_PROP_GPU_VGPU_OFFLOAD_CAPABLE; + NvBool PDB_PROP_GPU_SWRL_GRANULAR_LOCKING; + NvBool PDB_PROP_GPU_IN_SLI_LINK_CODEPATH; + NvBool PDB_PROP_GPU_IS_PLX_PRESENT; + NvBool PDB_PROP_GPU_IS_BR03_PRESENT; + NvBool PDB_PROP_GPU_IS_BR04_PRESENT; + NvBool PDB_PROP_GPU_BEHIND_BRIDGE; + NvBool PDB_PROP_GPU_BEHIND_BR03; + NvBool PDB_PROP_GPU_BEHIND_BR04; + NvBool PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED; + NvBool PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED; + NvBool PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED; + NvBool PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY; + NvBool PDB_PROP_GPU_RM_UNLINKED_SLI; + NvBool PDB_PROP_GPU_SLI_LINK_ACTIVE; + NvBool PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST; + NvBool PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH; + NvBool PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL; + NvBool PDB_PROP_GPU_IS_MOBILE; + NvBool PDB_PROP_GPU_IS_EXTERNAL_GPU; + NvBool PDB_PROP_GPU_RTD3_GC6_SUPPORTED; + NvBool PDB_PROP_GPU_RTD3_GC6_ACTIVE; + NvBool PDB_PROP_GPU_FAST_GC6_ACTIVE; + NvBool PDB_PROP_GPU_UNIX_DYNAMIC_POWER_SUPPORTED; + NvBool PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA; + NvBool PDB_PROP_GPU_LEGACY_GCOFF_SUPPORTED; + NvBool PDB_PROP_GPU_RTD3_GCOFF_SUPPORTED; + NvBool PDB_PROP_GPU_GCOFF_STATE_ENTERING; + NvBool PDB_PROP_GPU_GCOFF_STATE_ENTERED; + NvBool PDB_PROP_GPU_RG_STATE_ENTERING; + NvBool PDB_PROP_GPU_RG_STATE_ENTERED; + NvBool PDB_PROP_GPU_ACCOUNTING_ON; + NvBool PDB_PROP_GPU_INACCESSIBLE; + NvBool PDB_PROP_GPU_NVLINK_SYSMEM; + NvBool PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK; + NvBool PDB_PROP_GPU_C2C_SYSMEM; + NvBool PDB_PROP_GPU_IN_TCC_MODE; + NvBool PDB_PROP_GPU_SUPPORTS_TDR_EVENT; + NvBool PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE; + NvBool PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K; + NvBool PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT; + NvBool PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT; + NvBool PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS; + NvBool PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU; + NvBool PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA; + NvBool PDB_PROP_GPU_IS_VGPU_HETEROGENEOUS_MODE; + NvBool PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED; + NvBool PDB_PROP_GPU_NV_USERMODE_ENABLED; + NvBool PDB_PROP_GPU_IN_FATAL_ERROR; + NvBool PDB_PROP_GPU_OPTIMUS_GOLD_CFG_SPACE_RESTORE; + NvBool PDB_PROP_GPU_VGA_ENABLED; + NvBool PDB_PROP_GPU_IS_MXM_3X; + NvBool PDB_PROP_GPU_GSYNC_III_ATTACHED; + NvBool PDB_PROP_GPU_QSYNC_II_ATTACHED; + NvBool PDB_PROP_GPU_CC_FEATURE_CAPABLE; + NvBool PDB_PROP_GPU_APM_FEATURE_CAPABLE; + NvBool PDB_PROP_GPU_EXTENDED_GSP_RM_INITIALIZATION_TIMEOUT_FOR_VGX; + NvBool PDB_PROP_GPU_SKIP_TABLE_CE_MAP; + NvBool PDB_PROP_GPU_CHIP_SUPPORTS_RTD3_DEF; + NvBool PDB_PROP_GPU_IS_SOC_SDM; + NvBool PDB_PROP_GPU_DISP_PB_REQUIRES_SMMU_BYPASS; + NvBool PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL; + NvBool PDB_PROP_GPU_FASTPATH_SEQ_ENABLED; + NvBool PDB_PROP_GPU_PREPARING_FULLCHIP_RESET; + NvBool PDB_PROP_GPU_RECOVERY_DRAIN_P2P_REQUIRED; + NvBool PDB_PROP_GPU_REUSE_INIT_CONTING_MEM; + NvBool PDB_PROP_GPU_RUSD_POLLING_SUPPORT_MONOLITHIC; + NvBool PDB_PROP_GPU_RUSD_DISABLE_CLK_PUBLIC_DOMAIN_INFO; + NvBool PDB_PROP_GPU_RECOVERY_REBOOT_REQUIRED; + NvBool PDB_PROP_GPU_ALLOC_ISO_SYS_MEM_FROM_CARVEOUT; + + // Data members + NvU32 moduleId; + NvU8 nvswitchSupport; + NV2080_CTRL_GPU_RECOVERY_ACTION currentRecoveryAction; + struct GpuArch *pGpuArch; + OS_GPU_INFO *pOsGpuInfo; + OS_RM_CAPS *pOsRmCaps; + NvU32 halImpl; + void *hPci; + GpuEngineEventNotificationList *engineNonstallIntrEventNotifications[84]; + NvBool bIsSOC; + NvU32 gpuInstance; + NvU32 gpuDisabled; + NvU32 gpuId; + NvU32 boardId; + NvU32 deviceInstance; + NvU32 subdeviceInstance; + NvS32 numaNodeId; + NvS32 cpuNumaNodeId; + _GPU_UUID gpuUuid; + NvU32 gpuPhysicalId; + NvU32 gpuTerminatedLinkMask; + NvBool gpuLinkTerminationEnabled; + NvBool gspRmInitialized; + NV_PM_DEPTH powerManagementDepth; + _GPU_PCIE_PEER_CLIQUE pciePeerClique; + NvU16 virtualConfigBits; + NvBool bGspNocatEnabled; + NvU32 i2cPortForExtdev; + GPUIDINFO idInfo; + _GPU_CHIP_INFO chipInfo; + GPUBUSINFO busInfo; + const GPUCHILDPRESENT *pChildrenPresent; + NvU32 numChildrenPresent; + GpuEngineOrder engineOrder; + GpuClassDb classDB; + NvU32 chipId0; + NvU32 chipId1; + NvU32 pmcEnable; + NvU32 pmcRmOwnsIntrMask; + NvBool testIntr; + NvU32 numCEs; + NvU32 ceFaultMethodBufferSize; + NvS32 lockStressCounter; + NvBool isVirtual; + NvBool isGspClient; + NvBool isDceClient; + NvU64 fbLength; + NvU32 instLength; + NvBool instSetViaAttachArg; + NvU64 activeFBIOs; + NvU64 gpuVbiosPostTime; + NvU32 uefiScanoutSurfaceSizeInMB; + NvU32 gpuDeviceMapCount; + DEVICE_MAPPING deviceMappings[60]; + struct IoAperture *pIOApertures[13]; + DEVICE_MAPPING *pDeviceMappingsByDeviceInstance[13]; + void *gpuCfgAddr; + TIMEOUT_DATA timeoutData; + NvU32 bug5203024OverrideTimeouts; + NvU32 computeModeRules; + NvS32 computeModeRefCount; + NvHandle hComputeModeReservation; + NvBool bIsDebugModeEnabled; + NvU64 lastCallbackTime; + volatile NvU32 bCallbackQueued; + NvU32 masterFromSLIConfig; + NvU32 sliStatus; + NvBool bIsRTD3Gc6D3HotTransition; + NvBool bIsRTD3GcoffD3HotTransition; + NvU32 simMode; + struct OBJOS *pOS; + struct OBJHAL *pHal; + union __nvoc_inner_struc_OBJGPU_1__ children; + HWBC_LIST *pHWBCList; + GPUCLDATA gpuClData; + _GPU_ENGINE_DB engineDB; + NvU32 engineDBSize; + NvU32 instCacheOverride; + NvS32 numOfMclkLockRequests; + NvU32 netlistNum; + RmCtrlDeferredCmd pRmCtrlDeferredCmd[2]; + ACPI_DATA acpi; + ACPI_METHOD_DATA acpiMethodData; + NvBool bSystemHasMux; + NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS backLightMethodData; + struct Falcon *constructedFalcons[71]; + NvU32 numConstructedFalcons; + struct GenericKernelFalcon *genericKernelFalcons[71]; + NvU32 numGenericKernelFalcons; + struct KernelVideoEngine *kernelVideoEngines[22]; + NvU32 numKernelVideoEngines; + NvU8 *pUserRegisterAccessMap; + NvU8 *pUnrestrictedRegisterAccessMap; + NvU32 userRegisterAccessMapSize; + struct PrereqTracker *pPrereqTracker; + RegisterAccess registerAccess; + NvBool bUseRegisterAccessMap; + NvU32 *pRegopOffsetScratchBuffer; + NvU32 *pRegopOffsetAddrScratchBuffer; + NvU32 regopScratchBufferMaxOffsets; + _GPU_SRIOV_STATE sriovState; + NvU64 vmmuSegmentSize; + NvHandle hDefaultClientShare; + NvHandle hDefaultClientShareDevice; + NvHandle hDefaultClientShareSubDevice; + NvU32 externalKernelClientCount; + DEVICE_INFO2_ENTRY *pDeviceInfoTable; + NvU32 numDeviceInfoEntries; + NvHandle hInternalClient; + NvHandle hInternalDevice; + NvHandle hInternalSubdevice; + NvHandle hInternalLockStressClient; + struct Subdevice *pCachedSubdevice; + struct RsClient *pCachedRsClient; + RM_API physicalRmApi; + struct Subdevice **pSubdeviceBackReferences; + NvU32 numSubdeviceBackReferences; + NvU32 maxSubdeviceBackReferences; + NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *pChipInfo; + NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS *boardInfo; + GpuSharedDataMap userSharedData; + NvU32 gpuGroupCount; + NvBool bBar2MovedByVtd; + NvBool bBar1Is64Bit; + NvBool bSurpriseRemovalSupported; + NvBool bTwoStageRcRecoveryEnabled; + NvBool bReplayableTraceEnabled; + NvBool bInD3Cold; + NvBool bIsSimulation; + NvBool bIsModsAmodel; + NvBool bIsFmodel; + NvBool bIsRtlsim; + NvBool bIsPassthru; + NvBool bIsVirtualWithSriov; + NvBool bIsMigRm; + NvU32 P2PPeerGpuCount; + GPU_P2P_PEER_GPU_CAPS P2PPeerGpuCaps[32]; + NvBool bIsSelfHosted; + NvBool bStateLoading; + NvBool bStateUnloading; + NvBool bStateLoaded; + NvBool bFullyConstructed; + NvBool bRecoveryMarginPresent; + NvBool bBf3WarBug4040336Enabled; + NvBool bUnifiedMemorySpaceEnabled; + NvBool bSriovEnabled; + NvBool bWarBug200577889SriovHeavyEnabled; + NvBool bNonPowerOf2ChannelCountSupported; + NvBool bWarBug4347206PowerCycleOnUnload; + NvBool bCacheOnlyMode; + NvBool bNeed4kPageIsolation; + NvBool bSplitVasManagementServerClientRm; + NvU32 instLocOverrides; + NvU32 instLocOverrides2; + NvU32 instLocOverrides3; + NvU32 instLocOverrides4; + NvBool bInstLoc47bitPaWar; + NvU32 instVprOverrides; + NvU32 optimizeUseCaseOverride; + NvS16 videoCtxswLogConsumerCount; + THREAD_STATE_NODE *pDpcThreadState; + struct OBJVASPACE *pFabricVAS; + NvBool bPipelinedPteMemEnabled; + NvBool bIsBarPteInSysmemSupported; + NvBool bRegUsesGlobalSurfaceOverrides; + NvBool bClientRmAllocatedCtxBuffer; + NvBool bEccPageRetirementWithSliAllowed; + NvBool bInstanceMemoryAlwaysCached; + NvBool bUseRpcSimEscapes; + NvBool bRmProfilingPrivileged; + NvBool bGeforceSmb; + NvBool bIsGeforce; + NvBool bIsQuadro; + NvBool bIsQuadroAD; + NvBool bIsVgx; + NvBool bIsNvidiaNvs; + NvBool bIsTitan; + NvBool bIsTesla; + NvBool bComputePolicyTimesliceSupported; + RmPhysAddr simAccessBufPhysAddr; + RmPhysAddr notifyOpSharedSurfacePhysAddr; + NvBool bVgpuGspPluginOffloadEnabled; + NvBool bSriovCapable; + NvBool bRecheckSliSupportAtResume; + NvBool bGpuNvEncAv1Supported; + NvBool bIsGspOwnedFaultBuffersEnabled; + NvBool bVfResizableBAR1Supported; + NvBool bVoltaHubIntrSupported; + NvBool bUsePmcDeviceEnableForHostEngine; + NvBool bBlockNewWorkload; + struct __nvoc_inner_struc_OBJGPU_2__ thermalSystemExecuteV2Cache; + _GPU_GC6_STATE gc6State; + NvU32 numUserKernelChannel; +}; + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__OBJGPU { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Object metadata__Object; + const struct NVOC_METADATA__GpuHalspecOwner metadata__GpuHalspecOwner; + const struct NVOC_METADATA__RmHalspecOwner metadata__RmHalspecOwner; + const struct NVOC_METADATA__OBJTRACEABLE metadata__OBJTRACEABLE; +}; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPU; + +#define __staticCast_OBJGPU(pThis) \ + ((pThis)->__nvoc_pbase_OBJGPU) + +#ifdef __nvoc_gpu_h_disabled +#define __dynamicCast_OBJGPU(pThis) ((OBJGPU*) NULL) +#else //__nvoc_gpu_h_disabled +#define __dynamicCast_OBJGPU(pThis) \ + ((OBJGPU*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJGPU))) +#endif //__nvoc_gpu_h_disabled + +// Property macros +#define PDB_PROP_GPU_RTD3_GC6_SUPPORTED_BASE_CAST +#define PDB_PROP_GPU_RTD3_GC6_SUPPORTED_BASE_NAME PDB_PROP_GPU_RTD3_GC6_SUPPORTED +#define PDB_PROP_GPU_IS_EFI_INIT_BASE_CAST +#define PDB_PROP_GPU_IS_EFI_INIT_BASE_NAME PDB_PROP_GPU_IS_EFI_INIT +#define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU_BASE_CAST +#define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU_BASE_NAME PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU +#define PDB_PROP_GPU_SKIP_TABLE_CE_MAP_BASE_CAST +#define PDB_PROP_GPU_SKIP_TABLE_CE_MAP_BASE_NAME PDB_PROP_GPU_SKIP_TABLE_CE_MAP +#define PDB_PROP_GPU_REUSE_INIT_CONTING_MEM_BASE_CAST +#define PDB_PROP_GPU_REUSE_INIT_CONTING_MEM_BASE_NAME PDB_PROP_GPU_REUSE_INIT_CONTING_MEM +#define PDB_PROP_GPU_IN_FATAL_ERROR_BASE_CAST +#define PDB_PROP_GPU_IN_FATAL_ERROR_BASE_NAME PDB_PROP_GPU_IN_FATAL_ERROR +#define PDB_PROP_GPU_VGA_ENABLED_BASE_CAST +#define PDB_PROP_GPU_VGA_ENABLED_BASE_NAME PDB_PROP_GPU_VGA_ENABLED +#define PDB_PROP_GPU_COHERENT_CPU_MAPPING_BASE_CAST +#define PDB_PROP_GPU_COHERENT_CPU_MAPPING_BASE_NAME PDB_PROP_GPU_COHERENT_CPU_MAPPING +#define PDB_PROP_GPU_IN_STANDBY_BASE_CAST +#define PDB_PROP_GPU_IN_STANDBY_BASE_NAME PDB_PROP_GPU_IN_STANDBY +#define PDB_PROP_GPU_IS_COT_ENABLED_BASE_CAST +#define PDB_PROP_GPU_IS_COT_ENABLED_BASE_NAME PDB_PROP_GPU_IS_COT_ENABLED +#define PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED_BASE_CAST +#define PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED +#define PDB_PROP_GPU_SLI_LINK_ACTIVE_BASE_CAST +#define PDB_PROP_GPU_SLI_LINK_ACTIVE_BASE_NAME PDB_PROP_GPU_SLI_LINK_ACTIVE +#define PDB_PROP_GPU_RG_STATE_ENTERING_BASE_CAST +#define PDB_PROP_GPU_RG_STATE_ENTERING_BASE_NAME PDB_PROP_GPU_RG_STATE_ENTERING +#define PDB_PROP_GPU_TRIGGER_PCIE_FLR_BASE_CAST +#define PDB_PROP_GPU_TRIGGER_PCIE_FLR_BASE_NAME PDB_PROP_GPU_TRIGGER_PCIE_FLR +#define PDB_PROP_GPU_IN_GC6_RESET_BASE_CAST +#define PDB_PROP_GPU_IN_GC6_RESET_BASE_NAME PDB_PROP_GPU_IN_GC6_RESET +#define PDB_PROP_GPU_MIG_TIMESLICING_SUPPORTED_BASE_CAST +#define PDB_PROP_GPU_MIG_TIMESLICING_SUPPORTED_BASE_NAME PDB_PROP_GPU_MIG_TIMESLICING_SUPPORTED +#define PDB_PROP_GPU_3D_CONTROLLER_BASE_CAST +#define PDB_PROP_GPU_3D_CONTROLLER_BASE_NAME PDB_PROP_GPU_3D_CONTROLLER +#define PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING_BASE_CAST +#define PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING_BASE_NAME PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING +#define PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL_BASE_CAST +#define PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL_BASE_NAME PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL +#define PDB_PROP_GPU_RM_UNLINKED_SLI_BASE_CAST +#define PDB_PROP_GPU_RM_UNLINKED_SLI_BASE_NAME PDB_PROP_GPU_RM_UNLINKED_SLI +#define PDB_PROP_GPU_IS_UEFI_BASE_CAST +#define PDB_PROP_GPU_IS_UEFI_BASE_NAME PDB_PROP_GPU_IS_UEFI +#define PDB_PROP_GPU_IN_SECONDARY_BUS_RESET_BASE_CAST +#define PDB_PROP_GPU_IN_SECONDARY_BUS_RESET_BASE_NAME PDB_PROP_GPU_IN_SECONDARY_BUS_RESET +#define PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT_BASE_CAST +#define PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT_BASE_NAME PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT +#define PDB_PROP_GPU_RECOVERY_DRAIN_P2P_REQUIRED_BASE_CAST +#define PDB_PROP_GPU_RECOVERY_DRAIN_P2P_REQUIRED_BASE_NAME PDB_PROP_GPU_RECOVERY_DRAIN_P2P_REQUIRED +#define PDB_PROP_GPU_IS_CONNECTED_BASE_CAST +#define PDB_PROP_GPU_IS_CONNECTED_BASE_NAME PDB_PROP_GPU_IS_CONNECTED +#define PDB_PROP_GPU_RTD3_GC6_ACTIVE_BASE_CAST +#define PDB_PROP_GPU_RTD3_GC6_ACTIVE_BASE_NAME PDB_PROP_GPU_RTD3_GC6_ACTIVE +#define PDB_PROP_GPU_CC_FEATURE_CAPABLE_BASE_CAST +#define PDB_PROP_GPU_CC_FEATURE_CAPABLE_BASE_NAME PDB_PROP_GPU_CC_FEATURE_CAPABLE +#define PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT_BASE_CAST +#define PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT_BASE_NAME PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT +#define PDB_PROP_GPU_UNIX_DYNAMIC_POWER_SUPPORTED_BASE_CAST +#define PDB_PROP_GPU_UNIX_DYNAMIC_POWER_SUPPORTED_BASE_NAME PDB_PROP_GPU_UNIX_DYNAMIC_POWER_SUPPORTED +#define PDB_PROP_GPU_GCOFF_STATE_ENTERING_BASE_CAST +#define PDB_PROP_GPU_GCOFF_STATE_ENTERING_BASE_NAME PDB_PROP_GPU_GCOFF_STATE_ENTERING +#define PDB_PROP_GPU_RUSD_POLLING_SUPPORT_MONOLITHIC_BASE_CAST +#define PDB_PROP_GPU_RUSD_POLLING_SUPPORT_MONOLITHIC_BASE_NAME PDB_PROP_GPU_RUSD_POLLING_SUPPORT_MONOLITHIC +#define PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE_BASE_CAST +#define PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE_BASE_NAME PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE +#define PDB_PROP_GPU_ACCOUNTING_ON_BASE_CAST +#define PDB_PROP_GPU_ACCOUNTING_ON_BASE_NAME PDB_PROP_GPU_ACCOUNTING_ON +#define PDB_PROP_GPU_FAST_GC6_ACTIVE_BASE_CAST +#define PDB_PROP_GPU_FAST_GC6_ACTIVE_BASE_NAME PDB_PROP_GPU_FAST_GC6_ACTIVE +#define PDB_PROP_GPU_RUSD_DISABLE_CLK_PUBLIC_DOMAIN_INFO_BASE_CAST +#define PDB_PROP_GPU_RUSD_DISABLE_CLK_PUBLIC_DOMAIN_INFO_BASE_NAME PDB_PROP_GPU_RUSD_DISABLE_CLK_PUBLIC_DOMAIN_INFO +#define PDB_PROP_GPU_KEEP_WPR_ACROSS_GC6_SUPPORTED_BASE_CAST +#define PDB_PROP_GPU_KEEP_WPR_ACROSS_GC6_SUPPORTED_BASE_NAME PDB_PROP_GPU_KEEP_WPR_ACROSS_GC6_SUPPORTED +#define PDB_PROP_GPU_GCOFF_STATE_ENTERED_BASE_CAST +#define PDB_PROP_GPU_GCOFF_STATE_ENTERED_BASE_NAME PDB_PROP_GPU_GCOFF_STATE_ENTERED +#define PDB_PROP_GPU_IN_FULLCHIP_RESET_BASE_CAST +#define PDB_PROP_GPU_IN_FULLCHIP_RESET_BASE_NAME PDB_PROP_GPU_IN_FULLCHIP_RESET +#define PDB_PROP_GPU_NV_USERMODE_ENABLED_BASE_CAST +#define PDB_PROP_GPU_NV_USERMODE_ENABLED_BASE_NAME PDB_PROP_GPU_NV_USERMODE_ENABLED +#define PDB_PROP_GPU_IN_SLI_LINK_CODEPATH_BASE_CAST +#define PDB_PROP_GPU_IN_SLI_LINK_CODEPATH_BASE_NAME PDB_PROP_GPU_IN_SLI_LINK_CODEPATH +#define PDB_PROP_GPU_IS_GEMINI_BASE_CAST +#define PDB_PROP_GPU_IS_GEMINI_BASE_NAME PDB_PROP_GPU_IS_GEMINI +#define PDB_PROP_GPU_STATE_INITIALIZED_BASE_CAST +#define PDB_PROP_GPU_STATE_INITIALIZED_BASE_NAME PDB_PROP_GPU_STATE_INITIALIZED +#define PDB_PROP_GPU_GSYNC_III_ATTACHED_BASE_CAST +#define PDB_PROP_GPU_GSYNC_III_ATTACHED_BASE_NAME PDB_PROP_GPU_GSYNC_III_ATTACHED +#define PDB_PROP_GPU_QSYNC_II_ATTACHED_BASE_CAST +#define PDB_PROP_GPU_QSYNC_II_ATTACHED_BASE_NAME PDB_PROP_GPU_QSYNC_II_ATTACHED +#define PDB_PROP_GPU_MIG_SUPPORTS_SPLIT_CE_RANGES_BASE_CAST +#define PDB_PROP_GPU_MIG_SUPPORTS_SPLIT_CE_RANGES_BASE_NAME PDB_PROP_GPU_MIG_SUPPORTS_SPLIT_CE_RANGES +#define PDB_PROP_GPU_IS_SOC_SDM_BASE_CAST +#define PDB_PROP_GPU_IS_SOC_SDM_BASE_NAME PDB_PROP_GPU_IS_SOC_SDM +#define PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM_BASE_CAST +#define PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM_BASE_NAME PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM +#define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED_BASE_CAST +#define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED +#define PDB_PROP_GPU_IS_EXTERNAL_GPU_BASE_CAST +#define PDB_PROP_GPU_IS_EXTERNAL_GPU_BASE_NAME PDB_PROP_GPU_IS_EXTERNAL_GPU +#define PDB_PROP_GPU_ALLOC_ISO_SYS_MEM_FROM_CARVEOUT_BASE_CAST +#define PDB_PROP_GPU_ALLOC_ISO_SYS_MEM_FROM_CARVEOUT_BASE_NAME PDB_PROP_GPU_ALLOC_ISO_SYS_MEM_FROM_CARVEOUT +#define PDB_PROP_GPU_EMULATION_BASE_CAST +#define PDB_PROP_GPU_EMULATION_BASE_NAME PDB_PROP_GPU_EMULATION +#define PDB_PROP_GPU_APM_FEATURE_CAPABLE_BASE_CAST +#define PDB_PROP_GPU_APM_FEATURE_CAPABLE_BASE_NAME PDB_PROP_GPU_APM_FEATURE_CAPABLE +#define PDB_PROP_GPU_SC7_SUPPORTED_BASE_CAST +#define PDB_PROP_GPU_SC7_SUPPORTED_BASE_NAME PDB_PROP_GPU_SC7_SUPPORTED +#define PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST_BASE_CAST +#define PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST_BASE_NAME PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST +#define PDB_PROP_GPU_LEGACY_GCOFF_SUPPORTED_BASE_CAST +#define PDB_PROP_GPU_LEGACY_GCOFF_SUPPORTED_BASE_NAME PDB_PROP_GPU_LEGACY_GCOFF_SUPPORTED +#define PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL_BASE_CAST +#define PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL_BASE_NAME PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL +#define PDB_PROP_GPU_INACCESSIBLE_BASE_CAST +#define PDB_PROP_GPU_INACCESSIBLE_BASE_NAME PDB_PROP_GPU_INACCESSIBLE +#define PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH_BASE_CAST +#define PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH_BASE_NAME PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH +#define PDB_PROP_GPU_IN_PM_RESUME_CODEPATH_BASE_CAST +#define PDB_PROP_GPU_IN_PM_RESUME_CODEPATH_BASE_NAME PDB_PROP_GPU_IN_PM_RESUME_CODEPATH +#define PDB_PROP_GPU_FASTPATH_SEQ_ENABLED_BASE_CAST +#define PDB_PROP_GPU_FASTPATH_SEQ_ENABLED_BASE_NAME PDB_PROP_GPU_FASTPATH_SEQ_ENABLED +#define PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY_BASE_CAST +#define PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY_BASE_NAME PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY +#define PDB_PROP_GPU_IN_TCC_MODE_BASE_CAST +#define PDB_PROP_GPU_IN_TCC_MODE_BASE_NAME PDB_PROP_GPU_IN_TCC_MODE +#define PDB_PROP_GPU_C2C_SYSMEM_BASE_CAST +#define PDB_PROP_GPU_C2C_SYSMEM_BASE_NAME PDB_PROP_GPU_C2C_SYSMEM +#define PDB_PROP_GPU_HYBRID_MGPU_BASE_CAST +#define PDB_PROP_GPU_HYBRID_MGPU_BASE_NAME PDB_PROP_GPU_HYBRID_MGPU +#define PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED_BASE_CAST +#define PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED_BASE_NAME PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED +#define PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE_BASE_CAST +#define PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE_BASE_NAME PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE +#define PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED_BASE_CAST +#define PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED_BASE_NAME PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED +#define PDB_PROP_GPU_RG_STATE_ENTERED_BASE_CAST +#define PDB_PROP_GPU_RG_STATE_ENTERED_BASE_NAME PDB_PROP_GPU_RG_STATE_ENTERED +#define PDB_PROP_GPU_MIG_GFX_SUPPORTED_BASE_CAST +#define PDB_PROP_GPU_MIG_GFX_SUPPORTED_BASE_NAME PDB_PROP_GPU_MIG_GFX_SUPPORTED +#define PDB_PROP_GPU_IS_PLX_PRESENT_BASE_CAST +#define PDB_PROP_GPU_IS_PLX_PRESENT_BASE_NAME PDB_PROP_GPU_IS_PLX_PRESENT +#define PDB_PROP_GPU_NVLINK_SYSMEM_BASE_CAST +#define PDB_PROP_GPU_NVLINK_SYSMEM_BASE_NAME PDB_PROP_GPU_NVLINK_SYSMEM +#define PDB_PROP_GPU_SRIOV_SYSMEM_DIRTY_PAGE_TRACKING_ENABLED_BASE_CAST +#define PDB_PROP_GPU_SRIOV_SYSMEM_DIRTY_PAGE_TRACKING_ENABLED_BASE_NAME PDB_PROP_GPU_SRIOV_SYSMEM_DIRTY_PAGE_TRACKING_ENABLED +#define PDB_PROP_GPU_IS_MOBILE_BASE_CAST +#define PDB_PROP_GPU_IS_MOBILE_BASE_NAME PDB_PROP_GPU_IS_MOBILE +#define PDB_PROP_GPU_ALTERNATE_TREE_ENABLED_BASE_CAST +#define PDB_PROP_GPU_ALTERNATE_TREE_ENABLED_BASE_NAME PDB_PROP_GPU_ALTERNATE_TREE_ENABLED +#define PDB_PROP_GPU_RTD3_RG_SUPPORTED_BASE_CAST +#define PDB_PROP_GPU_RTD3_RG_SUPPORTED_BASE_NAME PDB_PROP_GPU_RTD3_RG_SUPPORTED +#define PDB_PROP_GPU_PERSISTENT_SW_STATE_BASE_CAST +#define PDB_PROP_GPU_PERSISTENT_SW_STATE_BASE_NAME PDB_PROP_GPU_PERSISTENT_SW_STATE +#define PDB_PROP_GPU_CLKS_IN_TEGRA_SOC_BASE_CAST +#define PDB_PROP_GPU_CLKS_IN_TEGRA_SOC_BASE_NAME PDB_PROP_GPU_CLKS_IN_TEGRA_SOC +#define PDB_PROP_GPU_IN_PM_CODEPATH_BASE_CAST +#define PDB_PROP_GPU_IN_PM_CODEPATH_BASE_NAME PDB_PROP_GPU_IN_PM_CODEPATH +#define PDB_PROP_GPU_IS_VGPU_HETEROGENEOUS_MODE_BASE_CAST +#define PDB_PROP_GPU_IS_VGPU_HETEROGENEOUS_MODE_BASE_NAME PDB_PROP_GPU_IS_VGPU_HETEROGENEOUS_MODE +#define PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED_BASE_CAST +#define PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED +#define PDB_PROP_GPU_BEHIND_BR03_BASE_CAST +#define PDB_PROP_GPU_BEHIND_BR03_BASE_NAME PDB_PROP_GPU_BEHIND_BR03 +#define PDB_PROP_GPU_BEHIND_BR04_BASE_CAST +#define PDB_PROP_GPU_BEHIND_BR04_BASE_NAME PDB_PROP_GPU_BEHIND_BR04 +#define PDB_PROP_GPU_MIG_SUPPORTED_BASE_CAST +#define PDB_PROP_GPU_MIG_SUPPORTED_BASE_NAME PDB_PROP_GPU_MIG_SUPPORTED +#define PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE_BASE_CAST +#define PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE_BASE_NAME PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE +#define PDB_PROP_GPU_VGPU_OFFLOAD_CAPABLE_BASE_CAST +#define PDB_PROP_GPU_VGPU_OFFLOAD_CAPABLE_BASE_NAME PDB_PROP_GPU_VGPU_OFFLOAD_CAPABLE +#define PDB_PROP_GPU_MIG_MIRROR_HOST_CI_ON_GUEST_BASE_CAST +#define PDB_PROP_GPU_MIG_MIRROR_HOST_CI_ON_GUEST_BASE_NAME PDB_PROP_GPU_MIG_MIRROR_HOST_CI_ON_GUEST +#define PDB_PROP_GPU_IN_HIBERNATE_BASE_CAST +#define PDB_PROP_GPU_IN_HIBERNATE_BASE_NAME PDB_PROP_GPU_IN_HIBERNATE +#define PDB_PROP_GPU_BROKEN_FB_BASE_CAST +#define PDB_PROP_GPU_BROKEN_FB_BASE_NAME PDB_PROP_GPU_BROKEN_FB +#define PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT_BASE_CAST +#define PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT_BASE_NAME PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT +#define PDB_PROP_GPU_IN_TIMEOUT_RECOVERY_BASE_CAST +#define PDB_PROP_GPU_IN_TIMEOUT_RECOVERY_BASE_NAME PDB_PROP_GPU_IN_TIMEOUT_RECOVERY +#define PDB_PROP_GPU_RECOVERY_REBOOT_REQUIRED_BASE_CAST +#define PDB_PROP_GPU_RECOVERY_REBOOT_REQUIRED_BASE_NAME PDB_PROP_GPU_RECOVERY_REBOOT_REQUIRED +#define PDB_PROP_GPU_SUPPORTS_TDR_EVENT_BASE_CAST +#define PDB_PROP_GPU_SUPPORTS_TDR_EVENT_BASE_NAME PDB_PROP_GPU_SUPPORTS_TDR_EVENT +#define PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA_BASE_CAST +#define PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA_BASE_NAME PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA +#define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA_BASE_CAST +#define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA_BASE_NAME PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA +#define PDB_PROP_GPU_IS_BR03_PRESENT_BASE_CAST +#define PDB_PROP_GPU_IS_BR03_PRESENT_BASE_NAME PDB_PROP_GPU_IS_BR03_PRESENT +#define PDB_PROP_GPU_IS_BR04_PRESENT_BASE_CAST +#define PDB_PROP_GPU_IS_BR04_PRESENT_BASE_NAME PDB_PROP_GPU_IS_BR04_PRESENT +#define PDB_PROP_GPU_OPTIMUS_GOLD_CFG_SPACE_RESTORE_BASE_CAST +#define PDB_PROP_GPU_OPTIMUS_GOLD_CFG_SPACE_RESTORE_BASE_NAME PDB_PROP_GPU_OPTIMUS_GOLD_CFG_SPACE_RESTORE +#define PDB_PROP_GPU_FW_WPR_OFFSET_SET_BY_ACR_BASE_CAST +#define PDB_PROP_GPU_FW_WPR_OFFSET_SET_BY_ACR_BASE_NAME PDB_PROP_GPU_FW_WPR_OFFSET_SET_BY_ACR +#define PDB_PROP_GPU_IS_MXM_3X_BASE_CAST +#define PDB_PROP_GPU_IS_MXM_3X_BASE_NAME PDB_PROP_GPU_IS_MXM_3X +#define PDB_PROP_GPU_PREINITIALIZED_WPR_REGION_BASE_CAST +#define PDB_PROP_GPU_PREINITIALIZED_WPR_REGION_BASE_NAME PDB_PROP_GPU_PREINITIALIZED_WPR_REGION +#define PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS_BASE_CAST +#define PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS_BASE_NAME PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS +#define PDB_PROP_GPU_CHIP_SUPPORTS_RTD3_DEF_BASE_CAST +#define PDB_PROP_GPU_CHIP_SUPPORTS_RTD3_DEF_BASE_NAME PDB_PROP_GPU_CHIP_SUPPORTS_RTD3_DEF +#define PDB_PROP_GPU_PREPARING_FULLCHIP_RESET_BASE_CAST +#define PDB_PROP_GPU_PREPARING_FULLCHIP_RESET_BASE_NAME PDB_PROP_GPU_PREPARING_FULLCHIP_RESET +#define PDB_PROP_GPU_DISP_PB_REQUIRES_SMMU_BYPASS_BASE_CAST +#define PDB_PROP_GPU_DISP_PB_REQUIRES_SMMU_BYPASS_BASE_NAME PDB_PROP_GPU_DISP_PB_REQUIRES_SMMU_BYPASS +#define PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED_BASE_CAST +#define PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED_BASE_NAME PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED +#define PDB_PROP_GPU_RTD3_GCOFF_SUPPORTED_BASE_CAST +#define PDB_PROP_GPU_RTD3_GCOFF_SUPPORTED_BASE_NAME PDB_PROP_GPU_RTD3_GCOFF_SUPPORTED +#define PDB_PROP_GPU_ZERO_FB_BASE_CAST +#define PDB_PROP_GPU_ZERO_FB_BASE_NAME PDB_PROP_GPU_ZERO_FB +#define PDB_PROP_GPU_SWRL_GRANULAR_LOCKING_BASE_CAST +#define PDB_PROP_GPU_SWRL_GRANULAR_LOCKING_BASE_NAME PDB_PROP_GPU_SWRL_GRANULAR_LOCKING +#define PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK_BASE_CAST +#define PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK_BASE_NAME PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK +#define PDB_PROP_GPU_ATS_SUPPORTED_BASE_CAST +#define PDB_PROP_GPU_ATS_SUPPORTED_BASE_NAME PDB_PROP_GPU_ATS_SUPPORTED +#define PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS_BASE_CAST +#define PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS_BASE_NAME PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS +#define PDB_PROP_GPU_PRIMARY_DEVICE_BASE_CAST +#define PDB_PROP_GPU_PRIMARY_DEVICE_BASE_NAME PDB_PROP_GPU_PRIMARY_DEVICE +#define PDB_PROP_GPU_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE_BASE_CAST +#define PDB_PROP_GPU_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE_BASE_NAME PDB_PROP_GPU_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE +#define PDB_PROP_GPU_BEHIND_BRIDGE_BASE_CAST +#define PDB_PROP_GPU_BEHIND_BRIDGE_BASE_NAME PDB_PROP_GPU_BEHIND_BRIDGE +#define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY_BASE_CAST +#define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY +#define PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K_BASE_CAST +#define PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K_BASE_NAME PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K +#define PDB_PROP_GPU_IS_LOST_BASE_CAST +#define PDB_PROP_GPU_IS_LOST_BASE_NAME PDB_PROP_GPU_IS_LOST +#define PDB_PROP_GPU_EXTENDED_GSP_RM_INITIALIZATION_TIMEOUT_FOR_VGX_BASE_CAST +#define PDB_PROP_GPU_EXTENDED_GSP_RM_INITIALIZATION_TIMEOUT_FOR_VGX_BASE_NAME PDB_PROP_GPU_EXTENDED_GSP_RM_INITIALIZATION_TIMEOUT_FOR_VGX + +NV_STATUS __nvoc_objCreateDynamic_OBJGPU(OBJGPU**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJGPU(OBJGPU**, Dynamic*, NvU32, + NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, + TEGRA_CHIP_TYPE TegraChipHal_tegraType, + RM_RUNTIME_VARIANT RmVariantHal_rmVariant, + NvU32 DispIpHal_ipver, NvU32 arg_gpuInstance, NvU32 arg_gpuId, NvUuid *arg_pUuid, struct GpuArch *arg_pGpuArch); +#define __objCreate_OBJGPU(ppNewObj, pParent, createFlags, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, TegraChipHal_tegraType, RmVariantHal_rmVariant, DispIpHal_ipver, arg_gpuInstance, arg_gpuId, arg_pUuid, arg_pGpuArch) \ + __nvoc_objCreate_OBJGPU((ppNewObj), staticCast((pParent), Dynamic), (createFlags), ChipHal_arch, ChipHal_impl, ChipHal_hidrev, TegraChipHal_tegraType, RmVariantHal_rmVariant, DispIpHal_ipver, arg_gpuInstance, arg_gpuId, arg_pUuid, arg_pGpuArch) + + +// Wrapper macros +#define gpuGetIdInfo_FNPTR(pGpu) pGpu->__gpuGetIdInfo__ +#define gpuGetIdInfo(pGpu) gpuGetIdInfo_DISPATCH(pGpu) +#define gpuGetIdInfo_HAL(pGpu) gpuGetIdInfo_DISPATCH(pGpu) +#define gpuGetChildrenOrder_FNPTR(pGpu) pGpu->__gpuGetChildrenOrder__ +#define gpuGetChildrenOrder(pGpu, pNumEntries) gpuGetChildrenOrder_DISPATCH(pGpu, pNumEntries) +#define gpuGetChildrenOrder_HAL(pGpu, pNumEntries) gpuGetChildrenOrder_DISPATCH(pGpu, pNumEntries) +#define gpuGetChildrenPresent_FNPTR(pGpu) pGpu->__gpuGetChildrenPresent__ +#define gpuGetChildrenPresent(pGpu, pNumEntries) gpuGetChildrenPresent_DISPATCH(pGpu, pNumEntries) +#define gpuGetChildrenPresent_HAL(pGpu, pNumEntries) gpuGetChildrenPresent_DISPATCH(pGpu, pNumEntries) +#define gpuGetEngClassDescriptorList_FNPTR(pGpu) pGpu->__gpuGetEngClassDescriptorList__ +#define gpuGetEngClassDescriptorList(pGpu, arg2) gpuGetEngClassDescriptorList_DISPATCH(pGpu, arg2) +#define gpuGetEngClassDescriptorList_HAL(pGpu, arg2) gpuGetEngClassDescriptorList_DISPATCH(pGpu, arg2) +#define gpuGetNoEngClassList_FNPTR(pGpu) pGpu->__gpuGetNoEngClassList__ +#define gpuGetNoEngClassList(pGpu, arg2) gpuGetNoEngClassList_DISPATCH(pGpu, arg2) +#define gpuGetNoEngClassList_HAL(pGpu, arg2) gpuGetNoEngClassList_DISPATCH(pGpu, arg2) + +// Dispatch functions +static inline void gpuGetIdInfo_DISPATCH(struct OBJGPU *pGpu) { + pGpu->__gpuGetIdInfo__(pGpu); +} + +static inline const GPUCHILDORDER * gpuGetChildrenOrder_DISPATCH(struct OBJGPU *pGpu, NvU32 *pNumEntries) { + return pGpu->__gpuGetChildrenOrder__(pGpu, pNumEntries); +} + +static inline const GPUCHILDPRESENT * gpuGetChildrenPresent_DISPATCH(struct OBJGPU *pGpu, NvU32 *pNumEntries) { + return pGpu->__gpuGetChildrenPresent__(pGpu, pNumEntries); +} + +static inline const CLASSDESCRIPTOR * gpuGetEngClassDescriptorList_DISPATCH(struct OBJGPU *pGpu, NvU32 *arg2) { + return pGpu->__gpuGetEngClassDescriptorList__(pGpu, arg2); +} + +static inline const NvU32 * gpuGetNoEngClassList_DISPATCH(struct OBJGPU *pGpu, NvU32 *arg2) { + return pGpu->__gpuGetNoEngClassList__(pGpu, arg2); +} + +static inline NV_STATUS gpuConstructPhysical_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuConstructPhysical(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuConstructPhysical(pGpu) gpuConstructPhysical_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuConstructPhysical_HAL(pGpu) gpuConstructPhysical(pGpu) + +static inline void gpuDestructPhysical_b3696a(struct OBJGPU *pGpu) { + return; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuDestructPhysical(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuDestructPhysical(pGpu) gpuDestructPhysical_b3696a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDestructPhysical_HAL(pGpu) gpuDestructPhysical(pGpu) + +NV_STATUS gpuStatePreInit_IMPL(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuStatePreInit(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuStatePreInit(pGpu) gpuStatePreInit_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuStatePreInit_HAL(pGpu) gpuStatePreInit(pGpu) + +NV_STATUS gpuStateLoad_IMPL(struct OBJGPU *pGpu, NvU32 arg2); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuStateLoad(struct OBJGPU *pGpu, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuStateLoad(pGpu, arg2) gpuStateLoad_IMPL(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +#define gpuStateLoad_HAL(pGpu, arg2) gpuStateLoad(pGpu, arg2) + +NV_STATUS gpuStateDestroy_IMPL(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuStateDestroy(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuStateDestroy(pGpu) gpuStateDestroy_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuStateDestroy_HAL(pGpu) gpuStateDestroy(pGpu) + +static inline NV_STATUS gpuStateInitStartedSatisfy_56cd7a(struct OBJGPU *pGpu, struct PrereqTracker *pPrereqTracker) { + return NV_OK; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuStateInitStartedSatisfy(struct OBJGPU *pGpu, struct PrereqTracker *pPrereqTracker) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuStateInitStartedSatisfy(pGpu, pPrereqTracker) gpuStateInitStartedSatisfy_56cd7a(pGpu, pPrereqTracker) +#endif //__nvoc_gpu_h_disabled + +#define gpuStateInitStartedSatisfy_HAL(pGpu, pPrereqTracker) gpuStateInitStartedSatisfy(pGpu, pPrereqTracker) + +static inline void gpuStateInitStartedRetract_b3696a(struct OBJGPU *pGpu, struct PrereqTracker *pPrereqTracker) { + return; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuStateInitStartedRetract(struct OBJGPU *pGpu, struct PrereqTracker *pPrereqTracker) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuStateInitStartedRetract(pGpu, pPrereqTracker) gpuStateInitStartedRetract_b3696a(pGpu, pPrereqTracker) +#endif //__nvoc_gpu_h_disabled + +#define gpuStateInitStartedRetract_HAL(pGpu, pPrereqTracker) gpuStateInitStartedRetract(pGpu, pPrereqTracker) + +static inline NV_STATUS gpuPowerManagementEnterPreUnloadPhysical_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuPowerManagementEnterPreUnloadPhysical(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuPowerManagementEnterPreUnloadPhysical(pGpu) gpuPowerManagementEnterPreUnloadPhysical_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuPowerManagementEnterPreUnloadPhysical_HAL(pGpu) gpuPowerManagementEnterPreUnloadPhysical(pGpu) + +static inline NV_STATUS gpuPowerManagementEnterPostUnloadPhysical_56cd7a(struct OBJGPU *pGpu, NvU32 newLevel) { + return NV_OK; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuPowerManagementEnterPostUnloadPhysical(struct OBJGPU *pGpu, NvU32 newLevel) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuPowerManagementEnterPostUnloadPhysical(pGpu, newLevel) gpuPowerManagementEnterPostUnloadPhysical_56cd7a(pGpu, newLevel) +#endif //__nvoc_gpu_h_disabled + +#define gpuPowerManagementEnterPostUnloadPhysical_HAL(pGpu, newLevel) gpuPowerManagementEnterPostUnloadPhysical(pGpu, newLevel) + +static inline NV_STATUS gpuPowerManagementResumePreLoadPhysical_56cd7a(struct OBJGPU *pGpu, NvU32 oldLevel, NvU32 flags) { + return NV_OK; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuPowerManagementResumePreLoadPhysical(struct OBJGPU *pGpu, NvU32 oldLevel, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuPowerManagementResumePreLoadPhysical(pGpu, oldLevel, flags) gpuPowerManagementResumePreLoadPhysical_56cd7a(pGpu, oldLevel, flags) +#endif //__nvoc_gpu_h_disabled + +#define gpuPowerManagementResumePreLoadPhysical_HAL(pGpu, oldLevel, flags) gpuPowerManagementResumePreLoadPhysical(pGpu, oldLevel, flags) + +static inline NV_STATUS gpuPowerManagementResumePostLoadPhysical_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuPowerManagementResumePostLoadPhysical(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuPowerManagementResumePostLoadPhysical(pGpu) gpuPowerManagementResumePostLoadPhysical_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuPowerManagementResumePostLoadPhysical_HAL(pGpu) gpuPowerManagementResumePostLoadPhysical(pGpu) + +NV_STATUS gpuConstructDeviceInfoTable_FWCLIENT(struct OBJGPU *pGpu); + +static inline NV_STATUS gpuConstructDeviceInfoTable_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuConstructDeviceInfoTable(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuConstructDeviceInfoTable(pGpu) gpuConstructDeviceInfoTable_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuConstructDeviceInfoTable_HAL(pGpu) gpuConstructDeviceInfoTable(pGpu) + +static inline NV_STATUS gpuInitializeMemDescFromPromotedCtx_46f6a7(struct OBJGPU *pGpu, MEMORY_DESCRIPTOR **ppMemDesc, NvU64 gpuPhysAddr, NvU64 size, NvU32 physAttr, NvBool bIsCallingContextVgpuPlugin) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuInitializeMemDescFromPromotedCtx(struct OBJGPU *pGpu, MEMORY_DESCRIPTOR **ppMemDesc, NvU64 gpuPhysAddr, NvU64 size, NvU32 physAttr, NvBool bIsCallingContextVgpuPlugin) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuInitializeMemDescFromPromotedCtx(pGpu, ppMemDesc, gpuPhysAddr, size, physAttr, bIsCallingContextVgpuPlugin) gpuInitializeMemDescFromPromotedCtx_46f6a7(pGpu, ppMemDesc, gpuPhysAddr, size, physAttr, bIsCallingContextVgpuPlugin) +#endif //__nvoc_gpu_h_disabled + +#define gpuInitializeMemDescFromPromotedCtx_HAL(pGpu, ppMemDesc, gpuPhysAddr, size, physAttr, bIsCallingContextVgpuPlugin) gpuInitializeMemDescFromPromotedCtx(pGpu, ppMemDesc, gpuPhysAddr, size, physAttr, bIsCallingContextVgpuPlugin) + +NV_STATUS gpuGetNameString_T234D(struct OBJGPU *pGpu, NvU32 arg2, void *arg3); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetNameString(struct OBJGPU *pGpu, NvU32 arg2, void *arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetNameString(pGpu, arg2, arg3) gpuGetNameString_T234D(pGpu, arg2, arg3) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetNameString_HAL(pGpu, arg2, arg3) gpuGetNameString(pGpu, arg2, arg3) + +NV_STATUS gpuGetShortNameString_T234D(struct OBJGPU *pGpu, NvU8 *arg2); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetShortNameString(struct OBJGPU *pGpu, NvU8 *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetShortNameString(pGpu, arg2) gpuGetShortNameString_T234D(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetShortNameString_HAL(pGpu, arg2) gpuGetShortNameString(pGpu, arg2) + +static inline NvBool gpuCheckEngine_88bc07(struct OBJGPU *pGpu, ENGDESCRIPTOR desc) { + return NV_TRUE; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuCheckEngine(struct OBJGPU *pGpu, ENGDESCRIPTOR desc) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuCheckEngine(pGpu, desc) gpuCheckEngine_88bc07(pGpu, desc) +#endif //__nvoc_gpu_h_disabled + +#define gpuCheckEngine_HAL(pGpu, desc) gpuCheckEngine(pGpu, desc) + +NvBool gpuCheckEngineWithOrderList_KERNEL(struct OBJGPU *pGpu, ENGDESCRIPTOR desc, NvBool bCheckEngineOrder); + + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuCheckEngineWithOrderList(struct OBJGPU *pGpu, ENGDESCRIPTOR desc, NvBool bCheckEngineOrder) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuCheckEngineWithOrderList(pGpu, desc, bCheckEngineOrder) gpuCheckEngineWithOrderList_KERNEL(pGpu, desc, bCheckEngineOrder) +#endif //__nvoc_gpu_h_disabled + +#define gpuCheckEngineWithOrderList_HAL(pGpu, desc, bCheckEngineOrder) gpuCheckEngineWithOrderList(pGpu, desc, bCheckEngineOrder) + +NvBool gpuIsSliLinkSupported_IMPL(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsSliLinkSupported(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsSliLinkSupported(pGpu) gpuIsSliLinkSupported_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuIsSliLinkSupported_HAL(pGpu) gpuIsSliLinkSupported(pGpu) + +static inline void gpuSetThreadBcState_b3696a(struct OBJGPU *pGpu, NvBool arg2) { + return; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuSetThreadBcState(struct OBJGPU *pGpu, NvBool arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuSetThreadBcState(pGpu, arg2) gpuSetThreadBcState_b3696a(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +#define gpuSetThreadBcState_HAL(pGpu, arg2) gpuSetThreadBcState(pGpu, arg2) + +static inline void gpuDeterminePersistantIllumSettings_b3696a(struct OBJGPU *pGpu) { + return; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuDeterminePersistantIllumSettings(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuDeterminePersistantIllumSettings(pGpu) gpuDeterminePersistantIllumSettings_b3696a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDeterminePersistantIllumSettings_HAL(pGpu) gpuDeterminePersistantIllumSettings(pGpu) + +static inline NV_STATUS gpuInitSliIllumination_46f6a7(struct OBJGPU *pGpu) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuInitSliIllumination(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuInitSliIllumination(pGpu) gpuInitSliIllumination_46f6a7(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuInitSliIllumination_HAL(pGpu) gpuInitSliIllumination(pGpu) + +NV_STATUS gpuBuildGenericKernelFalconList_IMPL(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuBuildGenericKernelFalconList(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuBuildGenericKernelFalconList(pGpu) gpuBuildGenericKernelFalconList_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuBuildGenericKernelFalconList_HAL(pGpu) gpuBuildGenericKernelFalconList(pGpu) + +void gpuDestroyGenericKernelFalconList_IMPL(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuDestroyGenericKernelFalconList(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuDestroyGenericKernelFalconList(pGpu) gpuDestroyGenericKernelFalconList_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDestroyGenericKernelFalconList_HAL(pGpu) gpuDestroyGenericKernelFalconList(pGpu) + +NV_STATUS gpuBuildKernelVideoEngineList_IMPL(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuBuildKernelVideoEngineList(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuBuildKernelVideoEngineList(pGpu) gpuBuildKernelVideoEngineList_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuBuildKernelVideoEngineList_HAL(pGpu) gpuBuildKernelVideoEngineList(pGpu) + +NV_STATUS gpuInitVideoLogging_IMPL(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuInitVideoLogging(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuInitVideoLogging(pGpu) gpuInitVideoLogging_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuInitVideoLogging_HAL(pGpu) gpuInitVideoLogging(pGpu) + +void gpuFreeVideoLogging_IMPL(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuFreeVideoLogging(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuFreeVideoLogging(pGpu) gpuFreeVideoLogging_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuFreeVideoLogging_HAL(pGpu) gpuFreeVideoLogging(pGpu) + +void gpuDestroyKernelVideoEngineList_IMPL(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuDestroyKernelVideoEngineList(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuDestroyKernelVideoEngineList(pGpu) gpuDestroyKernelVideoEngineList_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDestroyKernelVideoEngineList_HAL(pGpu) gpuDestroyKernelVideoEngineList(pGpu) + +struct GenericKernelFalcon *gpuGetGenericKernelFalconForEngine_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg2); + + +#ifdef __nvoc_gpu_h_disabled +static inline struct GenericKernelFalcon *gpuGetGenericKernelFalconForEngine(struct OBJGPU *pGpu, ENGDESCRIPTOR arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetGenericKernelFalconForEngine(pGpu, arg2) gpuGetGenericKernelFalconForEngine_IMPL(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetGenericKernelFalconForEngine_HAL(pGpu, arg2) gpuGetGenericKernelFalconForEngine(pGpu, arg2) + +void gpuRegisterGenericKernelFalconIntrService_IMPL(struct OBJGPU *pGpu, void *pRecords); + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuRegisterGenericKernelFalconIntrService(struct OBJGPU *pGpu, void *pRecords) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuRegisterGenericKernelFalconIntrService(pGpu, pRecords) gpuRegisterGenericKernelFalconIntrService_IMPL(pGpu, pRecords) +#endif //__nvoc_gpu_h_disabled + +#define gpuRegisterGenericKernelFalconIntrService_HAL(pGpu, pRecords) gpuRegisterGenericKernelFalconIntrService(pGpu, pRecords) + +static inline void gpuGetHwDefaults_b3696a(struct OBJGPU *pGpu) { + return; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuGetHwDefaults(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuGetHwDefaults(pGpu) gpuGetHwDefaults_b3696a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetHwDefaults_HAL(pGpu) gpuGetHwDefaults(pGpu) + +RmPhysAddr gpuGetDmaEndAddress_IMPL(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline RmPhysAddr gpuGetDmaEndAddress(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + RmPhysAddr ret; + portMemSet(&ret, 0, sizeof(RmPhysAddr)); + return ret; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetDmaEndAddress(pGpu) gpuGetDmaEndAddress_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetDmaEndAddress_HAL(pGpu) gpuGetDmaEndAddress(pGpu) + +static inline NV_STATUS gpuReadPBusScratch_395e98(struct OBJGPU *pGpu, NvU8 index) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuReadPBusScratch(struct OBJGPU *pGpu, NvU8 index) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuReadPBusScratch(pGpu, index) gpuReadPBusScratch_395e98(pGpu, index) +#endif //__nvoc_gpu_h_disabled + +#define gpuReadPBusScratch_HAL(pGpu, index) gpuReadPBusScratch(pGpu, index) + +static inline void gpuWritePBusScratch_d44104(struct OBJGPU *pGpu, NvU8 index, NvU32 data) { + return; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuWritePBusScratch(struct OBJGPU *pGpu, NvU8 index, NvU32 data) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuWritePBusScratch(pGpu, index, data) gpuWritePBusScratch_d44104(pGpu, index, data) +#endif //__nvoc_gpu_h_disabled + +#define gpuWritePBusScratch_HAL(pGpu, index, data) gpuWritePBusScratch(pGpu, index, data) + +static inline NV_STATUS gpuSetResetScratchBit_46f6a7(struct OBJGPU *pGpu, NvBool bResetRequired) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSetResetScratchBit(struct OBJGPU *pGpu, NvBool bResetRequired) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSetResetScratchBit(pGpu, bResetRequired) gpuSetResetScratchBit_46f6a7(pGpu, bResetRequired) +#endif //__nvoc_gpu_h_disabled + +#define gpuSetResetScratchBit_HAL(pGpu, bResetRequired) gpuSetResetScratchBit(pGpu, bResetRequired) + +static inline NV_STATUS gpuGetResetScratchBit_46f6a7(struct OBJGPU *pGpu, NvBool *pbResetRequired) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetResetScratchBit(struct OBJGPU *pGpu, NvBool *pbResetRequired) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetResetScratchBit(pGpu, pbResetRequired) gpuGetResetScratchBit_46f6a7(pGpu, pbResetRequired) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetResetScratchBit_HAL(pGpu, pbResetRequired) gpuGetResetScratchBit(pGpu, pbResetRequired) + +static inline NV_STATUS gpuSetStateResetRequired_46f6a7(struct OBJGPU *pGpu, NvU32 exceptType) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSetStateResetRequired(struct OBJGPU *pGpu, NvU32 exceptType) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSetStateResetRequired(pGpu, exceptType) gpuSetStateResetRequired_46f6a7(pGpu, exceptType) +#endif //__nvoc_gpu_h_disabled + +#define gpuSetStateResetRequired_HAL(pGpu, exceptType) gpuSetStateResetRequired(pGpu, exceptType) + +NV_STATUS gpuResetRequiredStateChanged_FWCLIENT(struct OBJGPU *pGpu, NvBool newState); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuResetRequiredStateChanged(struct OBJGPU *pGpu, NvBool newState) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuResetRequiredStateChanged(pGpu, newState) gpuResetRequiredStateChanged_FWCLIENT(pGpu, newState) +#endif //__nvoc_gpu_h_disabled + +#define gpuResetRequiredStateChanged_HAL(pGpu, newState) gpuResetRequiredStateChanged(pGpu, newState) + +NV_STATUS gpuMarkDeviceForReset_IMPL(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuMarkDeviceForReset(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuMarkDeviceForReset(pGpu) gpuMarkDeviceForReset_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuMarkDeviceForReset_HAL(pGpu) gpuMarkDeviceForReset(pGpu) + +NV_STATUS gpuUnmarkDeviceForReset_IMPL(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuUnmarkDeviceForReset(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuUnmarkDeviceForReset(pGpu) gpuUnmarkDeviceForReset_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuUnmarkDeviceForReset_HAL(pGpu) gpuUnmarkDeviceForReset(pGpu) + +NV_STATUS gpuIsDeviceMarkedForReset_IMPL(struct OBJGPU *pGpu, NvBool *pbResetRequired); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuIsDeviceMarkedForReset(struct OBJGPU *pGpu, NvBool *pbResetRequired) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsDeviceMarkedForReset(pGpu, pbResetRequired) gpuIsDeviceMarkedForReset_IMPL(pGpu, pbResetRequired) +#endif //__nvoc_gpu_h_disabled + +#define gpuIsDeviceMarkedForReset_HAL(pGpu, pbResetRequired) gpuIsDeviceMarkedForReset(pGpu, pbResetRequired) + +static inline NV_STATUS gpuSetDrainAndResetScratchBit_46f6a7(struct OBJGPU *pGpu, NvBool bDrainRecommended) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSetDrainAndResetScratchBit(struct OBJGPU *pGpu, NvBool bDrainRecommended) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSetDrainAndResetScratchBit(pGpu, bDrainRecommended) gpuSetDrainAndResetScratchBit_46f6a7(pGpu, bDrainRecommended) +#endif //__nvoc_gpu_h_disabled + +#define gpuSetDrainAndResetScratchBit_HAL(pGpu, bDrainRecommended) gpuSetDrainAndResetScratchBit(pGpu, bDrainRecommended) + +static inline NV_STATUS gpuGetDrainAndResetScratchBit_46f6a7(struct OBJGPU *pGpu, NvBool *pbDrainRecommended) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetDrainAndResetScratchBit(struct OBJGPU *pGpu, NvBool *pbDrainRecommended) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetDrainAndResetScratchBit(pGpu, pbDrainRecommended) gpuGetDrainAndResetScratchBit_46f6a7(pGpu, pbDrainRecommended) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetDrainAndResetScratchBit_HAL(pGpu, pbDrainRecommended) gpuGetDrainAndResetScratchBit(pGpu, pbDrainRecommended) + +NV_STATUS gpuMarkDeviceForDrainAndReset_IMPL(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuMarkDeviceForDrainAndReset(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuMarkDeviceForDrainAndReset(pGpu) gpuMarkDeviceForDrainAndReset_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuMarkDeviceForDrainAndReset_HAL(pGpu) gpuMarkDeviceForDrainAndReset(pGpu) + +NV_STATUS gpuUnmarkDeviceForDrainAndReset_IMPL(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuUnmarkDeviceForDrainAndReset(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuUnmarkDeviceForDrainAndReset(pGpu) gpuUnmarkDeviceForDrainAndReset_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuUnmarkDeviceForDrainAndReset_HAL(pGpu) gpuUnmarkDeviceForDrainAndReset(pGpu) + +NV_STATUS gpuIsDeviceMarkedForDrainAndReset_IMPL(struct OBJGPU *pGpu, NvBool *pbDrainRecommended); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuIsDeviceMarkedForDrainAndReset(struct OBJGPU *pGpu, NvBool *pbDrainRecommended) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsDeviceMarkedForDrainAndReset(pGpu, pbDrainRecommended) gpuIsDeviceMarkedForDrainAndReset_IMPL(pGpu, pbDrainRecommended) +#endif //__nvoc_gpu_h_disabled + +#define gpuIsDeviceMarkedForDrainAndReset_HAL(pGpu, pbDrainRecommended) gpuIsDeviceMarkedForDrainAndReset(pGpu, pbDrainRecommended) + +void gpuGetRecoveryAction_IMPL(struct OBJGPU *pGpu, NV2080_CTRL_GPU_GET_RECOVERY_ACTION_PARAMS *pParams); + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuGetRecoveryAction(struct OBJGPU *pGpu, NV2080_CTRL_GPU_GET_RECOVERY_ACTION_PARAMS *pParams) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuGetRecoveryAction(pGpu, pParams) gpuGetRecoveryAction_IMPL(pGpu, pParams) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetRecoveryAction_HAL(pGpu, pParams) gpuGetRecoveryAction(pGpu, pParams) + +static inline void gpuRefreshRecoveryAction_b3696a(struct OBJGPU *pGpu, NvBool inLock) { + return; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuRefreshRecoveryAction(struct OBJGPU *pGpu, NvBool inLock) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuRefreshRecoveryAction(pGpu, inLock) gpuRefreshRecoveryAction_b3696a(pGpu, inLock) +#endif //__nvoc_gpu_h_disabled + +#define gpuRefreshRecoveryAction_HAL(pGpu, inLock) gpuRefreshRecoveryAction(pGpu, inLock) + +static inline void gpuSetRecoveryDrainP2P_b3696a(struct OBJGPU *pGpu, NvBool bDrainP2P) { + return; +} + +void gpuSetRecoveryDrainP2P_KERNEL(struct OBJGPU *pGpu, NvBool bDrainP2P); + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuSetRecoveryDrainP2P(struct OBJGPU *pGpu, NvBool bDrainP2P) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuSetRecoveryDrainP2P(pGpu, bDrainP2P) gpuSetRecoveryDrainP2P_KERNEL(pGpu, bDrainP2P) +#endif //__nvoc_gpu_h_disabled + +#define gpuSetRecoveryDrainP2P_HAL(pGpu, bDrainP2P) gpuSetRecoveryDrainP2P(pGpu, bDrainP2P) + +void gpuLogOobXidMessage_KERNEL(struct OBJGPU *pGpu, NvU32 xid, const char *string, NvU32 len); + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuLogOobXidMessage(struct OBJGPU *pGpu, NvU32 xid, const char *string, NvU32 len) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuLogOobXidMessage(pGpu, xid, string, len) gpuLogOobXidMessage_KERNEL(pGpu, xid, string, len) +#endif //__nvoc_gpu_h_disabled + +#define gpuLogOobXidMessage_HAL(pGpu, xid, string, len) gpuLogOobXidMessage(pGpu, xid, string, len) + +static inline NV_STATUS gpuPrivSecInitRegistryOverrides_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuPrivSecInitRegistryOverrides(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuPrivSecInitRegistryOverrides(pGpu) gpuPrivSecInitRegistryOverrides_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuPrivSecInitRegistryOverrides_HAL(pGpu) gpuPrivSecInitRegistryOverrides(pGpu) + +NV_STATUS gpuPowerOff_KERNEL(struct OBJGPU *pGpu); + +NV_STATUS gpuPowerOff_IMPL(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuPowerOff(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuPowerOff(pGpu) gpuPowerOff_KERNEL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuPowerOff_HAL(pGpu) gpuPowerOff(pGpu) + +NV_STATUS gpuPowerOn_KERNEL(struct OBJGPU *pGpu); + +NV_STATUS gpuPowerOn_IMPL(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuPowerOn(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuPowerOn(pGpu) gpuPowerOn_KERNEL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuPowerOn_HAL(pGpu) gpuPowerOn(pGpu) + +static inline NV_STATUS gpuPowerOffHda_46f6a7(struct OBJGPU *pGpu) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuPowerOffHda(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuPowerOffHda(pGpu) gpuPowerOffHda_46f6a7(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuPowerOffHda_HAL(pGpu) gpuPowerOffHda(pGpu) + +static inline NV_STATUS gpuPowerOnHda_46f6a7(struct OBJGPU *pGpu) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuPowerOnHda(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuPowerOnHda(pGpu) gpuPowerOnHda_46f6a7(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuPowerOnHda_HAL(pGpu) gpuPowerOnHda(pGpu) + +static inline NvU32 gpuGetBusIntfType_f222ee(struct OBJGPU *pGpu) { + return (8); +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetBusIntfType(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetBusIntfType(pGpu) gpuGetBusIntfType_f222ee(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetBusIntfType_HAL(pGpu) gpuGetBusIntfType(pGpu) + +static inline NV_STATUS gpuWriteBusConfigReg_46f6a7(struct OBJGPU *pGpu, NvU32 index, NvU32 value) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuWriteBusConfigReg(struct OBJGPU *pGpu, NvU32 index, NvU32 value) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuWriteBusConfigReg(pGpu, index, value) gpuWriteBusConfigReg_46f6a7(pGpu, index, value) +#endif //__nvoc_gpu_h_disabled + +#define gpuWriteBusConfigReg_HAL(pGpu, index, value) gpuWriteBusConfigReg(pGpu, index, value) + +static inline NV_STATUS gpuReadBusConfigReg_46f6a7(struct OBJGPU *pGpu, NvU32 index, NvU32 *data) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuReadBusConfigReg(struct OBJGPU *pGpu, NvU32 index, NvU32 *data) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuReadBusConfigReg(pGpu, index, data) gpuReadBusConfigReg_46f6a7(pGpu, index, data) +#endif //__nvoc_gpu_h_disabled + +#define gpuReadBusConfigReg_HAL(pGpu, index, data) gpuReadBusConfigReg(pGpu, index, data) + +static inline NV_STATUS gpuReadBusConfigRegEx_5baef9(struct OBJGPU *pGpu, NvU32 index, NvU32 *data, THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuReadBusConfigRegEx(struct OBJGPU *pGpu, NvU32 index, NvU32 *data, THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuReadBusConfigRegEx(pGpu, index, data, pThreadState) gpuReadBusConfigRegEx_5baef9(pGpu, index, data, pThreadState) +#endif //__nvoc_gpu_h_disabled + +#define gpuReadBusConfigRegEx_HAL(pGpu, index, data, pThreadState) gpuReadBusConfigRegEx(pGpu, index, data, pThreadState) + +static inline NV_STATUS gpuReadFunctionConfigReg_5baef9(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 *data) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuReadFunctionConfigReg(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 *data) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuReadFunctionConfigReg(pGpu, function, reg, data) gpuReadFunctionConfigReg_5baef9(pGpu, function, reg, data) +#endif //__nvoc_gpu_h_disabled + +#define gpuReadFunctionConfigReg_HAL(pGpu, function, reg, data) gpuReadFunctionConfigReg(pGpu, function, reg, data) + +static inline NV_STATUS gpuWriteFunctionConfigReg_5baef9(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuWriteFunctionConfigReg(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuWriteFunctionConfigReg(pGpu, function, reg, data) gpuWriteFunctionConfigReg_5baef9(pGpu, function, reg, data) +#endif //__nvoc_gpu_h_disabled + +#define gpuWriteFunctionConfigReg_HAL(pGpu, function, reg, data) gpuWriteFunctionConfigReg(pGpu, function, reg, data) + +static inline NV_STATUS gpuWriteFunctionConfigRegEx_5baef9(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data, THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuWriteFunctionConfigRegEx(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data, THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuWriteFunctionConfigRegEx(pGpu, function, reg, data, pThreadState) gpuWriteFunctionConfigRegEx_5baef9(pGpu, function, reg, data, pThreadState) +#endif //__nvoc_gpu_h_disabled + +#define gpuWriteFunctionConfigRegEx_HAL(pGpu, function, reg, data, pThreadState) gpuWriteFunctionConfigRegEx(pGpu, function, reg, data, pThreadState) + +static inline NV_STATUS gpuReadPassThruConfigReg_46f6a7(struct OBJGPU *pGpu, NvU32 index, NvU32 *data) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuReadPassThruConfigReg(struct OBJGPU *pGpu, NvU32 index, NvU32 *data) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuReadPassThruConfigReg(pGpu, index, data) gpuReadPassThruConfigReg_46f6a7(pGpu, index, data) +#endif //__nvoc_gpu_h_disabled + +#define gpuReadPassThruConfigReg_HAL(pGpu, index, data) gpuReadPassThruConfigReg(pGpu, index, data) + +static inline NV_STATUS gpuConfigAccessSanityCheck_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuConfigAccessSanityCheck(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuConfigAccessSanityCheck(pGpu) gpuConfigAccessSanityCheck_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuConfigAccessSanityCheck_HAL(pGpu) gpuConfigAccessSanityCheck(pGpu) + +NV_STATUS gpuReadBusConfigCycle_GM107(struct OBJGPU *pGpu, NvU32 hwDefAddr, NvU32 *pData); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuReadBusConfigCycle(struct OBJGPU *pGpu, NvU32 hwDefAddr, NvU32 *pData) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuReadBusConfigCycle(pGpu, hwDefAddr, pData) gpuReadBusConfigCycle_GM107(pGpu, hwDefAddr, pData) +#endif //__nvoc_gpu_h_disabled + +#define gpuReadBusConfigCycle_HAL(pGpu, hwDefAddr, pData) gpuReadBusConfigCycle(pGpu, hwDefAddr, pData) + +NV_STATUS gpuWriteBusConfigCycle_GM107(struct OBJGPU *pGpu, NvU32 hwDefAddr, NvU32 value); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuWriteBusConfigCycle(struct OBJGPU *pGpu, NvU32 hwDefAddr, NvU32 value) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuWriteBusConfigCycle(pGpu, hwDefAddr, value) gpuWriteBusConfigCycle_GM107(pGpu, hwDefAddr, value) +#endif //__nvoc_gpu_h_disabled + +#define gpuWriteBusConfigCycle_HAL(pGpu, hwDefAddr, value) gpuWriteBusConfigCycle(pGpu, hwDefAddr, value) + +static inline NV_STATUS gpuReadPcieConfigCycle_46f6a7(struct OBJGPU *pGpu, NvU32 hwDefAddr, NvU32 *pData, NvU8 func) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuReadPcieConfigCycle(struct OBJGPU *pGpu, NvU32 hwDefAddr, NvU32 *pData, NvU8 func) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuReadPcieConfigCycle(pGpu, hwDefAddr, pData, func) gpuReadPcieConfigCycle_46f6a7(pGpu, hwDefAddr, pData, func) +#endif //__nvoc_gpu_h_disabled + +#define gpuReadPcieConfigCycle_HAL(pGpu, hwDefAddr, pData, func) gpuReadPcieConfigCycle(pGpu, hwDefAddr, pData, func) + +static inline NV_STATUS gpuWritePcieConfigCycle_46f6a7(struct OBJGPU *pGpu, NvU32 hwDefAddr, NvU32 value, NvU8 func) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuWritePcieConfigCycle(struct OBJGPU *pGpu, NvU32 hwDefAddr, NvU32 value, NvU8 func) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuWritePcieConfigCycle(pGpu, hwDefAddr, value, func) gpuWritePcieConfigCycle_46f6a7(pGpu, hwDefAddr, value, func) +#endif //__nvoc_gpu_h_disabled + +#define gpuWritePcieConfigCycle_HAL(pGpu, hwDefAddr, value, func) gpuWritePcieConfigCycle(pGpu, hwDefAddr, value, func) + +static inline void gpuGetPcieExtCfgDvsecInfo_b3696a(struct OBJGPU *pGpu, NvU32 hwDefAddr, NvU32 *pVenId, NvU32 *pDvsecLen) { + return; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuGetPcieExtCfgDvsecInfo(struct OBJGPU *pGpu, NvU32 hwDefAddr, NvU32 *pVenId, NvU32 *pDvsecLen) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuGetPcieExtCfgDvsecInfo(pGpu, hwDefAddr, pVenId, pDvsecLen) gpuGetPcieExtCfgDvsecInfo_b3696a(pGpu, hwDefAddr, pVenId, pDvsecLen) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetPcieExtCfgDvsecInfo_HAL(pGpu, hwDefAddr, pVenId, pDvsecLen) gpuGetPcieExtCfgDvsecInfo(pGpu, hwDefAddr, pVenId, pDvsecLen) + +static inline NvU32 gpuGetPlatformPowerDomain_4a4dee(struct OBJGPU *pGpu) { + return 0; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetPlatformPowerDomain(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetPlatformPowerDomain(pGpu) gpuGetPlatformPowerDomain_4a4dee(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetPlatformPowerDomain_HAL(pGpu) gpuGetPlatformPowerDomain(pGpu) + +static inline void gpuUpdateIdInfo_b3696a(struct OBJGPU *pGpu) { + return; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuUpdateIdInfo(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuUpdateIdInfo(pGpu) gpuUpdateIdInfo_b3696a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuUpdateIdInfo_HAL(pGpu) gpuUpdateIdInfo(pGpu) + +static inline NvU32 gpuGetDeviceIDList_4a4dee(struct OBJGPU *pGpu, DEVICE_ID_MAPPING **arg2) { + return 0; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetDeviceIDList(struct OBJGPU *pGpu, DEVICE_ID_MAPPING **arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetDeviceIDList(pGpu, arg2) gpuGetDeviceIDList_4a4dee(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetDeviceIDList_HAL(pGpu, arg2) gpuGetDeviceIDList(pGpu, arg2) + +NV_STATUS gpuGenGidData_SOC(struct OBJGPU *pGpu, NvU8 *pGidData, NvU32 gidSize, NvU32 gidFlags); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGenGidData(struct OBJGPU *pGpu, NvU8 *pGidData, NvU32 gidSize, NvU32 gidFlags) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGenGidData(pGpu, pGidData, gidSize, gidFlags) gpuGenGidData_SOC(pGpu, pGidData, gidSize, gidFlags) +#endif //__nvoc_gpu_h_disabled + +#define gpuGenGidData_HAL(pGpu, pGidData, gidSize, gidFlags) gpuGenGidData(pGpu, pGidData, gidSize, gidFlags) + +NvU8 gpuGetChipSubRev_FWCLIENT(struct OBJGPU *pGpu); + +static inline NvU8 gpuGetChipSubRev_4a4dee(struct OBJGPU *pGpu) { + return 0; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NvU8 gpuGetChipSubRev(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetChipSubRev(pGpu) gpuGetChipSubRev_FWCLIENT(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetChipSubRev_HAL(pGpu) gpuGetChipSubRev(pGpu) + +static inline NV_STATUS gpuGetSkuInfo_92bfc3(struct OBJGPU *pGpu, NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS *pParams) { + NV_ASSERT_PRECOMP(0); + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetSkuInfo(struct OBJGPU *pGpu, NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS *pParams) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetSkuInfo(pGpu, pParams) gpuGetSkuInfo_92bfc3(pGpu, pParams) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetSkuInfo_HAL(pGpu, pParams) gpuGetSkuInfo(pGpu, pParams) + +static inline NV_STATUS gpuPerformUniversalValidation_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuPerformUniversalValidation(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuPerformUniversalValidation(pGpu) gpuPerformUniversalValidation_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuPerformUniversalValidation_HAL(pGpu) gpuPerformUniversalValidation(pGpu) + +static inline NvU32 gpuGetVirtRegPhysOffset_4a4dee(struct OBJGPU *pGpu) { + return 0; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetVirtRegPhysOffset(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetVirtRegPhysOffset(pGpu) gpuGetVirtRegPhysOffset_4a4dee(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetVirtRegPhysOffset_HAL(pGpu) gpuGetVirtRegPhysOffset(pGpu) + +NV_STATUS gpuGetRegBaseOffset_FWCLIENT(struct OBJGPU *pGpu, NvU32 arg2, NvU32 *arg3); + +static inline NV_STATUS gpuGetRegBaseOffset_46f6a7(struct OBJGPU *pGpu, NvU32 arg2, NvU32 *arg3) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetRegBaseOffset(struct OBJGPU *pGpu, NvU32 arg2, NvU32 *arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetRegBaseOffset(pGpu, arg2, arg3) gpuGetRegBaseOffset_FWCLIENT(pGpu, arg2, arg3) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetRegBaseOffset_HAL(pGpu, arg2, arg3) gpuGetRegBaseOffset(pGpu, arg2, arg3) + +static inline void gpuHandleSanityCheckRegReadError_b3696a(struct OBJGPU *pGpu, NvU32 addr, NvU32 value) { + return; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuHandleSanityCheckRegReadError(struct OBJGPU *pGpu, NvU32 addr, NvU32 value) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuHandleSanityCheckRegReadError(pGpu, addr, value) gpuHandleSanityCheckRegReadError_b3696a(pGpu, addr, value) +#endif //__nvoc_gpu_h_disabled + +#define gpuHandleSanityCheckRegReadError_HAL(pGpu, addr, value) gpuHandleSanityCheckRegReadError(pGpu, addr, value) + +static inline void gpuHandleSecFault_b3696a(struct OBJGPU *pGpu) { + return; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuHandleSecFault(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuHandleSecFault(pGpu) gpuHandleSecFault_b3696a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuHandleSecFault_HAL(pGpu) gpuHandleSecFault(pGpu) + +static inline void gpuGetSanityCheckRegReadError_b3696a(struct OBJGPU *pGpu, NvU32 value, const char **pErrorString) { + return; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuGetSanityCheckRegReadError(struct OBJGPU *pGpu, NvU32 value, const char **pErrorString) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuGetSanityCheckRegReadError(pGpu, value, pErrorString) gpuGetSanityCheckRegReadError_b3696a(pGpu, value, pErrorString) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetSanityCheckRegReadError_HAL(pGpu, value, pErrorString) gpuGetSanityCheckRegReadError(pGpu, value, pErrorString) + +static inline NV_STATUS gpuSanityCheckVirtRegAccess_56cd7a(struct OBJGPU *pGpu, NvU32 arg2) { + return NV_OK; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSanityCheckVirtRegAccess(struct OBJGPU *pGpu, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSanityCheckVirtRegAccess(pGpu, arg2) gpuSanityCheckVirtRegAccess_56cd7a(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +#define gpuSanityCheckVirtRegAccess_HAL(pGpu, arg2) gpuSanityCheckVirtRegAccess(pGpu, arg2) + +NV_STATUS gpuInitRegistryOverrides_KERNEL(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuInitRegistryOverrides(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuInitRegistryOverrides(pGpu) gpuInitRegistryOverrides_KERNEL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuInitRegistryOverrides_HAL(pGpu) gpuInitRegistryOverrides(pGpu) + +NV_STATUS gpuInitInstLocOverrides_IMPL(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuInitInstLocOverrides(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuInitInstLocOverrides(pGpu) gpuInitInstLocOverrides_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuInitInstLocOverrides_HAL(pGpu) gpuInitInstLocOverrides(pGpu) + +static inline NV_STATUS gpuInitSriov_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuInitSriov(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuInitSriov(pGpu) gpuInitSriov_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuInitSriov_HAL(pGpu) gpuInitSriov(pGpu) + +static inline NV_STATUS gpuDeinitSriov_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuDeinitSriov(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuDeinitSriov(pGpu) gpuDeinitSriov_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDeinitSriov_HAL(pGpu) gpuDeinitSriov(pGpu) + +static inline NV_STATUS gpuMnocMboxSyncRecv_46f6a7(struct OBJGPU *pGpu, struct IoAperture *pMboxAperture, NvU32 port, RMTIMEOUT *pTimeout, void *pMsgAddr, NvU32 *pMsgSize) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuMnocMboxSyncRecv(struct OBJGPU *pGpu, struct IoAperture *pMboxAperture, NvU32 port, RMTIMEOUT *pTimeout, void *pMsgAddr, NvU32 *pMsgSize) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuMnocMboxSyncRecv(pGpu, pMboxAperture, port, pTimeout, pMsgAddr, pMsgSize) gpuMnocMboxSyncRecv_46f6a7(pGpu, pMboxAperture, port, pTimeout, pMsgAddr, pMsgSize) +#endif //__nvoc_gpu_h_disabled + +#define gpuMnocMboxSyncRecv_HAL(pGpu, pMboxAperture, port, pTimeout, pMsgAddr, pMsgSize) gpuMnocMboxSyncRecv(pGpu, pMboxAperture, port, pTimeout, pMsgAddr, pMsgSize) + +static inline NV_STATUS gpuMnocMboxSend_46f6a7(struct OBJGPU *pGpu, struct IoAperture *pMboxAperture, NvU32 port, RMTIMEOUT *pTimeout, void *pMsgAddr, NvU32 msgSize) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuMnocMboxSend(struct OBJGPU *pGpu, struct IoAperture *pMboxAperture, NvU32 port, RMTIMEOUT *pTimeout, void *pMsgAddr, NvU32 msgSize) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuMnocMboxSend(pGpu, pMboxAperture, port, pTimeout, pMsgAddr, msgSize) gpuMnocMboxSend_46f6a7(pGpu, pMboxAperture, port, pTimeout, pMsgAddr, msgSize) +#endif //__nvoc_gpu_h_disabled + +#define gpuMnocMboxSend_HAL(pGpu, pMboxAperture, port, pTimeout, pMsgAddr, msgSize) gpuMnocMboxSend(pGpu, pMboxAperture, port, pTimeout, pMsgAddr, msgSize) + +static inline NV_STATUS gpuMnocMboxRecv_46f6a7(struct OBJGPU *pGpu, struct IoAperture *pMboxAperture, NvU32 port, void *pMsgAddr, NvU32 *pMsgSize) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuMnocMboxRecv(struct OBJGPU *pGpu, struct IoAperture *pMboxAperture, NvU32 port, void *pMsgAddr, NvU32 *pMsgSize) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuMnocMboxRecv(pGpu, pMboxAperture, port, pMsgAddr, pMsgSize) gpuMnocMboxRecv_46f6a7(pGpu, pMboxAperture, port, pMsgAddr, pMsgSize) +#endif //__nvoc_gpu_h_disabled + +#define gpuMnocMboxRecv_HAL(pGpu, pMboxAperture, port, pMsgAddr, pMsgSize) gpuMnocMboxRecv(pGpu, pMboxAperture, port, pMsgAddr, pMsgSize) + +static inline NvBool gpuMnocMboxIsMsgAvailable_3dd2c9(struct OBJGPU *pGpu, struct IoAperture *pMboxAperture, NvU32 port) { + return NV_FALSE; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuMnocMboxIsMsgAvailable(struct OBJGPU *pGpu, struct IoAperture *pMboxAperture, NvU32 port) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuMnocMboxIsMsgAvailable(pGpu, pMboxAperture, port) gpuMnocMboxIsMsgAvailable_3dd2c9(pGpu, pMboxAperture, port) +#endif //__nvoc_gpu_h_disabled + +#define gpuMnocMboxIsMsgAvailable_HAL(pGpu, pMboxAperture, port) gpuMnocMboxIsMsgAvailable(pGpu, pMboxAperture, port) + +static inline void gpuMnocMboxInterruptEnable_b3696a(struct OBJGPU *pGpu, struct IoAperture *pMboxAperture, NvU32 port) { + return; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuMnocMboxInterruptEnable(struct OBJGPU *pGpu, struct IoAperture *pMboxAperture, NvU32 port) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuMnocMboxInterruptEnable(pGpu, pMboxAperture, port) gpuMnocMboxInterruptEnable_b3696a(pGpu, pMboxAperture, port) +#endif //__nvoc_gpu_h_disabled + +#define gpuMnocMboxInterruptEnable_HAL(pGpu, pMboxAperture, port) gpuMnocMboxInterruptEnable(pGpu, pMboxAperture, port) + +static inline void gpuMnocMboxInterruptDisable_b3696a(struct OBJGPU *pGpu, struct IoAperture *pMboxAperture, NvU32 port) { + return; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuMnocMboxInterruptDisable(struct OBJGPU *pGpu, struct IoAperture *pMboxAperture, NvU32 port) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuMnocMboxInterruptDisable(pGpu, pMboxAperture, port) gpuMnocMboxInterruptDisable_b3696a(pGpu, pMboxAperture, port) +#endif //__nvoc_gpu_h_disabled + +#define gpuMnocMboxInterruptDisable_HAL(pGpu, pMboxAperture, port) gpuMnocMboxInterruptDisable(pGpu, pMboxAperture, port) + +static inline NvBool gpuMnocMboxInterruptRaised_3dd2c9(struct OBJGPU *pGpu, struct IoAperture *pMboxAperture, NvU32 port) { + return NV_FALSE; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuMnocMboxInterruptRaised(struct OBJGPU *pGpu, struct IoAperture *pMboxAperture, NvU32 port) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuMnocMboxInterruptRaised(pGpu, pMboxAperture, port) gpuMnocMboxInterruptRaised_3dd2c9(pGpu, pMboxAperture, port) +#endif //__nvoc_gpu_h_disabled + +#define gpuMnocMboxInterruptRaised_HAL(pGpu, pMboxAperture, port) gpuMnocMboxInterruptRaised(pGpu, pMboxAperture, port) + +static inline void gpuMnocMboxInterruptClear_b3696a(struct OBJGPU *pGpu, struct IoAperture *pMboxAperture, NvU32 port) { + return; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuMnocMboxInterruptClear(struct OBJGPU *pGpu, struct IoAperture *pMboxAperture, NvU32 port) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuMnocMboxInterruptClear(pGpu, pMboxAperture, port) gpuMnocMboxInterruptClear_b3696a(pGpu, pMboxAperture, port) +#endif //__nvoc_gpu_h_disabled + +#define gpuMnocMboxInterruptClear_HAL(pGpu, pMboxAperture, port) gpuMnocMboxInterruptClear(pGpu, pMboxAperture, port) + +static inline NvU32 gpuMnocMboxMinMessageSize_15a734(struct OBJGPU *pGpu) { + return 0U; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuMnocMboxMinMessageSize(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuMnocMboxMinMessageSize(pGpu) gpuMnocMboxMinMessageSize_15a734(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuMnocMboxMinMessageSize_HAL(pGpu) gpuMnocMboxMinMessageSize(pGpu) + +static inline NvU32 gpuMnocMboxMaxMessageSize_15a734(struct OBJGPU *pGpu) { + return 0U; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuMnocMboxMaxMessageSize(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuMnocMboxMaxMessageSize(pGpu) gpuMnocMboxMaxMessageSize_15a734(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuMnocMboxMaxMessageSize_HAL(pGpu) gpuMnocMboxMaxMessageSize(pGpu) + +static inline NV_STATUS gpuCreateDefaultClientShare_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuCreateDefaultClientShare(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuCreateDefaultClientShare(pGpu) gpuCreateDefaultClientShare_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuCreateDefaultClientShare_HAL(pGpu) gpuCreateDefaultClientShare(pGpu) + +static inline void gpuDestroyDefaultClientShare_b3696a(struct OBJGPU *pGpu) { + return; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuDestroyDefaultClientShare(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuDestroyDefaultClientShare(pGpu) gpuDestroyDefaultClientShare_b3696a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDestroyDefaultClientShare_HAL(pGpu) gpuDestroyDefaultClientShare(pGpu) + +static inline NvBool gpuFuseSupportsDisplay_88bc07(struct OBJGPU *pGpu) { + return NV_TRUE; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuFuseSupportsDisplay(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuFuseSupportsDisplay(pGpu) gpuFuseSupportsDisplay_88bc07(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuFuseSupportsDisplay_HAL(pGpu) gpuFuseSupportsDisplay(pGpu) + +void gpuGetTerminatedLinkMask_GA100(struct OBJGPU *pGpu, NvU32 arg2); + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuGetTerminatedLinkMask(struct OBJGPU *pGpu, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuGetTerminatedLinkMask(pGpu, arg2) gpuGetTerminatedLinkMask_GA100(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetTerminatedLinkMask_HAL(pGpu, arg2) gpuGetTerminatedLinkMask(pGpu, arg2) + +static inline NV_STATUS gpuJtVersionSanityCheck_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuJtVersionSanityCheck(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuJtVersionSanityCheck(pGpu) gpuJtVersionSanityCheck_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuJtVersionSanityCheck_HAL(pGpu) gpuJtVersionSanityCheck(pGpu) + +NV_STATUS gpuValidateRmctrlCmd_T234D(struct OBJGPU *pGpu, NvU32 cmd); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuValidateRmctrlCmd(struct OBJGPU *pGpu, NvU32 cmd) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuValidateRmctrlCmd(pGpu, cmd) gpuValidateRmctrlCmd_T234D(pGpu, cmd) +#endif //__nvoc_gpu_h_disabled + +#define gpuValidateRmctrlCmd_HAL(pGpu, cmd) gpuValidateRmctrlCmd(pGpu, cmd) + +NV_STATUS gpuValidateBusInfoIndex_T234D(struct OBJGPU *pGpu, NvU32 index); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuValidateBusInfoIndex(struct OBJGPU *pGpu, NvU32 index) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuValidateBusInfoIndex(pGpu, index) gpuValidateBusInfoIndex_T234D(pGpu, index) +#endif //__nvoc_gpu_h_disabled + +#define gpuValidateBusInfoIndex_HAL(pGpu, index) gpuValidateBusInfoIndex(pGpu, index) + +NvBool gpuIsSystemRebootRequired_FWCLIENT(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsSystemRebootRequired(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsSystemRebootRequired(pGpu) gpuIsSystemRebootRequired_FWCLIENT(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuIsSystemRebootRequired_HAL(pGpu) gpuIsSystemRebootRequired(pGpu) + +static inline void gpuDetermineSelfHostedMode_b3696a(struct OBJGPU *pGpu) { + return; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuDetermineSelfHostedMode(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuDetermineSelfHostedMode(pGpu) gpuDetermineSelfHostedMode_b3696a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDetermineSelfHostedMode_HAL(pGpu) gpuDetermineSelfHostedMode(pGpu) + +static inline NvU32 gpuDetermineSelfHostedSocType_997682(struct OBJGPU *pGpu) { + return NV0000_CTRL_SYSTEM_SH_SOC_TYPE_NA; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuDetermineSelfHostedSocType(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuDetermineSelfHostedSocType(pGpu) gpuDetermineSelfHostedSocType_997682(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDetermineSelfHostedSocType_HAL(pGpu) gpuDetermineSelfHostedSocType(pGpu) + +static inline NvBool gpuValidateMIGSupport_72a2e1(struct OBJGPU *pGpu) { + NV_ASSERT_PRECOMP(0); + return NV_FALSE; +} + +NvBool gpuValidateMIGSupport_KERNEL(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuValidateMIGSupport(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuValidateMIGSupport(pGpu) gpuValidateMIGSupport_KERNEL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuValidateMIGSupport_HAL(pGpu) gpuValidateMIGSupport(pGpu) + +NV_STATUS gpuInitOptimusSettings_IMPL(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuInitOptimusSettings(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuInitOptimusSettings(pGpu) gpuInitOptimusSettings_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuInitOptimusSettings_HAL(pGpu) gpuInitOptimusSettings(pGpu) + +NV_STATUS gpuDeinitOptimusSettings_IMPL(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuDeinitOptimusSettings(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuDeinitOptimusSettings(pGpu) gpuDeinitOptimusSettings_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDeinitOptimusSettings_HAL(pGpu) gpuDeinitOptimusSettings(pGpu) + +static inline NV_STATUS gpuSetCacheOnlyModeOverrides_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSetCacheOnlyModeOverrides(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSetCacheOnlyModeOverrides(pGpu) gpuSetCacheOnlyModeOverrides_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuSetCacheOnlyModeOverrides_HAL(pGpu) gpuSetCacheOnlyModeOverrides(pGpu) + +NV_STATUS gpuGetCeFaultMethodBufferSize_KERNEL(struct OBJGPU *arg1, NvU32 *arg2); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetCeFaultMethodBufferSize(struct OBJGPU *arg1, NvU32 *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetCeFaultMethodBufferSize(arg1, arg2) gpuGetCeFaultMethodBufferSize_KERNEL(arg1, arg2) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetCeFaultMethodBufferSize_HAL(arg1, arg2) gpuGetCeFaultMethodBufferSize(arg1, arg2) + +static inline NV_STATUS gpuSetVFBarSizes_46f6a7(struct OBJGPU *pGpu, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *arg2) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSetVFBarSizes(struct OBJGPU *pGpu, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSetVFBarSizes(pGpu, arg2) gpuSetVFBarSizes_46f6a7(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +#define gpuSetVFBarSizes_HAL(pGpu, arg2) gpuSetVFBarSizes(pGpu, arg2) + +static inline NvBool gpuIsSliCapableWithoutDisplay_3dd2c9(struct OBJGPU *pGpu) { + return NV_FALSE; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsSliCapableWithoutDisplay(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsSliCapableWithoutDisplay(pGpu) gpuIsSliCapableWithoutDisplay_3dd2c9(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuIsSliCapableWithoutDisplay_HAL(pGpu) gpuIsSliCapableWithoutDisplay(pGpu) + +static inline GPU_P2P_PEER_GPU_CAPS *gpuFindP2PPeerGpuCapsByGpuId_80f438(struct OBJGPU *pGpu, NvU32 peerGpuId) { + NV_ASSERT_OR_RETURN_PRECOMP(0, ((void *)0)); +} + + +#ifdef __nvoc_gpu_h_disabled +static inline GPU_P2P_PEER_GPU_CAPS *gpuFindP2PPeerGpuCapsByGpuId(struct OBJGPU *pGpu, NvU32 peerGpuId) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuFindP2PPeerGpuCapsByGpuId(pGpu, peerGpuId) gpuFindP2PPeerGpuCapsByGpuId_80f438(pGpu, peerGpuId) +#endif //__nvoc_gpu_h_disabled + +#define gpuFindP2PPeerGpuCapsByGpuId_HAL(pGpu, peerGpuId) gpuFindP2PPeerGpuCapsByGpuId(pGpu, peerGpuId) + +static inline NvBool gpuIsCtxBufAllocInPmaSupported_3dd2c9(struct OBJGPU *pGpu) { + return NV_FALSE; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsCtxBufAllocInPmaSupported(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsCtxBufAllocInPmaSupported(pGpu) gpuIsCtxBufAllocInPmaSupported_3dd2c9(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuIsCtxBufAllocInPmaSupported_HAL(pGpu) gpuIsCtxBufAllocInPmaSupported(pGpu) + +static inline NV_STATUS gpuLoadFailurePathTest_56cd7a(struct OBJGPU *pGpu, NvU32 engStage, NvU32 engDescIdx, NvBool bStopTest) { + return NV_OK; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuLoadFailurePathTest(struct OBJGPU *pGpu, NvU32 engStage, NvU32 engDescIdx, NvBool bStopTest) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuLoadFailurePathTest(pGpu, engStage, engDescIdx, bStopTest) gpuLoadFailurePathTest_56cd7a(pGpu, engStage, engDescIdx, bStopTest) +#endif //__nvoc_gpu_h_disabled + +#define gpuLoadFailurePathTest_HAL(pGpu, engStage, engDescIdx, bStopTest) gpuLoadFailurePathTest(pGpu, engStage, engDescIdx, bStopTest) + +static inline const NV_ERROR_CONT_STATE_TABLE *gpuGetErrorContStateTableAndSize_11d6dc(struct OBJGPU *pGpu, NvU32 *pTableSize) { + NV_ASSERT_OR_RETURN_PRECOMP(0, ((void *)0)); +} + + +#ifdef __nvoc_gpu_h_disabled +static inline const NV_ERROR_CONT_STATE_TABLE *gpuGetErrorContStateTableAndSize(struct OBJGPU *pGpu, NvU32 *pTableSize) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetErrorContStateTableAndSize(pGpu, pTableSize) gpuGetErrorContStateTableAndSize_11d6dc(pGpu, pTableSize) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetErrorContStateTableAndSize_HAL(pGpu, pTableSize) gpuGetErrorContStateTableAndSize(pGpu, pTableSize) + +static inline NV_STATUS gpuUpdateErrorContainmentState_f91eed(struct OBJGPU *pGpu, NV_ERROR_CONT_ERR_ID arg2, NV_ERROR_CONT_LOCATION arg3, NvU32 *arg4) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_OK); +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuUpdateErrorContainmentState(struct OBJGPU *pGpu, NV_ERROR_CONT_ERR_ID arg2, NV_ERROR_CONT_LOCATION arg3, NvU32 *arg4) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuUpdateErrorContainmentState(pGpu, arg2, arg3, arg4) gpuUpdateErrorContainmentState_f91eed(pGpu, arg2, arg3, arg4) +#endif //__nvoc_gpu_h_disabled + +#define gpuUpdateErrorContainmentState_HAL(pGpu, arg2, arg3, arg4) gpuUpdateErrorContainmentState(pGpu, arg2, arg3, arg4) + +static inline NV_STATUS gpuSetPartitionErrorAttribution_c04480(struct OBJGPU *pGpu, NV_ERROR_CONT_ERR_ID arg2, NV_ERROR_CONT_LOCATION arg3, NvU32 arg4) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSetPartitionErrorAttribution(struct OBJGPU *pGpu, NV_ERROR_CONT_ERR_ID arg2, NV_ERROR_CONT_LOCATION arg3, NvU32 arg4) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSetPartitionErrorAttribution(pGpu, arg2, arg3, arg4) gpuSetPartitionErrorAttribution_c04480(pGpu, arg2, arg3, arg4) +#endif //__nvoc_gpu_h_disabled + +#define gpuSetPartitionErrorAttribution_HAL(pGpu, arg2, arg3, arg4) gpuSetPartitionErrorAttribution(pGpu, arg2, arg3, arg4) + +static inline NV_STATUS gpuCreateRusdMemory_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuCreateRusdMemory(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuCreateRusdMemory(pGpu) gpuCreateRusdMemory_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuCreateRusdMemory_HAL(pGpu) gpuCreateRusdMemory(pGpu) + +void gpuDestroyRusdMemory_IMPL(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuDestroyRusdMemory(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuDestroyRusdMemory(pGpu) gpuDestroyRusdMemory_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDestroyRusdMemory_HAL(pGpu) gpuDestroyRusdMemory(pGpu) + +static inline NvBool gpuCheckEccCounts_d69453(struct OBJGPU *pGpu) { + return NV_FALSE; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuCheckEccCounts(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuCheckEccCounts(pGpu) gpuCheckEccCounts_d69453(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuCheckEccCounts_HAL(pGpu) gpuCheckEccCounts(pGpu) + +static inline NV_STATUS gpuWaitForGfwBootComplete_5baef9(struct OBJGPU *pGpu) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuWaitForGfwBootComplete(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuWaitForGfwBootComplete(pGpu) gpuWaitForGfwBootComplete_5baef9(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuWaitForGfwBootComplete_HAL(pGpu) gpuWaitForGfwBootComplete(pGpu) + +static inline NvU32 gpuGetFirstAsyncLce_54c809(struct OBJGPU *pGpu) { + return (11); +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetFirstAsyncLce(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetFirstAsyncLce(pGpu) gpuGetFirstAsyncLce_54c809(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetFirstAsyncLce_HAL(pGpu) gpuGetFirstAsyncLce(pGpu) + +static inline NvBool gpuIsInternalSkuFuseEnabled_3dd2c9(struct OBJGPU *pGpu) { + return NV_FALSE; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsInternalSkuFuseEnabled(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsInternalSkuFuseEnabled(pGpu) gpuIsInternalSkuFuseEnabled_3dd2c9(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuIsInternalSkuFuseEnabled_HAL(pGpu) gpuIsInternalSkuFuseEnabled(pGpu) + +static inline NV_STATUS gpuRequireGrCePresence_56cd7a(struct OBJGPU *pGpu, ENGDESCRIPTOR engDesc, NvBool *pIsEngineRequired) { + return NV_OK; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuRequireGrCePresence(struct OBJGPU *pGpu, ENGDESCRIPTOR engDesc, NvBool *pIsEngineRequired) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuRequireGrCePresence(pGpu, engDesc, pIsEngineRequired) gpuRequireGrCePresence_56cd7a(pGpu, engDesc, pIsEngineRequired) +#endif //__nvoc_gpu_h_disabled + +#define gpuRequireGrCePresence_HAL(pGpu, engDesc, pIsEngineRequired) gpuRequireGrCePresence(pGpu, engDesc, pIsEngineRequired) + +static inline NV_STATUS gpuSocGetSecureRegionInfo_46f6a7(struct OBJGPU *pGpu, NvU32 srIndex, NvU64 *pBase, NvU64 *pSize) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSocGetSecureRegionInfo(struct OBJGPU *pGpu, NvU32 srIndex, NvU64 *pBase, NvU64 *pSize) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSocGetSecureRegionInfo(pGpu, srIndex, pBase, pSize) gpuSocGetSecureRegionInfo_46f6a7(pGpu, srIndex, pBase, pSize) +#endif //__nvoc_gpu_h_disabled + +#define gpuSocGetSecureRegionInfo_HAL(pGpu, srIndex, pBase, pSize) gpuSocGetSecureRegionInfo(pGpu, srIndex, pBase, pSize) + +static inline NvU32 gpuGetDefaultResetFSMStateTransitionUs_4d4998(struct OBJGPU *pGpu) { + return 10; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetDefaultResetFSMStateTransitionUs(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetDefaultResetFSMStateTransitionUs(pGpu) gpuGetDefaultResetFSMStateTransitionUs_4d4998(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetDefaultResetFSMStateTransitionUs_HAL(pGpu) gpuGetDefaultResetFSMStateTransitionUs(pGpu) + +static inline NvBool gpuGetIsCmpSku_72a2e1(struct OBJGPU *pGpu) { + NV_ASSERT_PRECOMP(0); + return NV_FALSE; +} + +static inline NvBool gpuGetIsCmpSku_3dd2c9(struct OBJGPU *pGpu) { + return NV_FALSE; +} + + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuGetIsCmpSku(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetIsCmpSku(pGpu) gpuGetIsCmpSku_72a2e1(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetIsCmpSku_HAL(pGpu) gpuGetIsCmpSku(pGpu) + +NV_STATUS gpuRusdRequestPermanentDataPoll_IMPL(struct OBJGPU *pGpu); + + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuRusdRequestPermanentDataPoll(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuRusdRequestPermanentDataPoll(pGpu) gpuRusdRequestPermanentDataPoll_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuRusdRequestPermanentDataPoll_HAL(pGpu) gpuRusdRequestPermanentDataPoll(pGpu) + +void gpuGetIdInfo_T234D(struct OBJGPU *pGpu); + +void gpuGetIdInfo_T264D(struct OBJGPU *pGpu); + +void gpuGetIdInfo_T256D(struct OBJGPU *pGpu); + +const GPUCHILDORDER *gpuGetChildrenOrder_T234D(struct OBJGPU *pGpu, NvU32 *pNumEntries); + +const GPUCHILDORDER *gpuGetChildrenOrder_T256D(struct OBJGPU *pGpu, NvU32 *pNumEntries); + +const GPUCHILDPRESENT *gpuGetChildrenPresent_T234D(struct OBJGPU *pGpu, NvU32 *pNumEntries); + +const GPUCHILDPRESENT *gpuGetChildrenPresent_T256D(struct OBJGPU *pGpu, NvU32 *pNumEntries); + +const CLASSDESCRIPTOR *gpuGetEngClassDescriptorList_T234D(struct OBJGPU *pGpu, NvU32 *arg2); + +const CLASSDESCRIPTOR *gpuGetEngClassDescriptorList_T264D(struct OBJGPU *pGpu, NvU32 *arg2); + +const CLASSDESCRIPTOR *gpuGetEngClassDescriptorList_T256D(struct OBJGPU *pGpu, NvU32 *arg2); + +const NvU32 *gpuGetNoEngClassList_T234D(struct OBJGPU *pGpu, NvU32 *arg2); + +const NvU32 *gpuGetNoEngClassList_T264D(struct OBJGPU *pGpu, NvU32 *arg2); + +const NvU32 *gpuGetNoEngClassList_T256D(struct OBJGPU *pGpu, NvU32 *arg2); + +static inline void gpuServiceInterruptsAllGpus(struct OBJGPU *pGpu) { + return; +} + +static inline ENGDESCRIPTOR *gpuGetInitEngineDescriptors(struct OBJGPU *pGpu) { + return pGpu->engineOrder.pEngineInitDescriptors; +} + +static inline ENGDESCRIPTOR *gpuGetLoadEngineDescriptors(struct OBJGPU *pGpu) { + return pGpu->engineOrder.pEngineLoadDescriptors; +} + +static inline ENGDESCRIPTOR *gpuGetUnloadEngineDescriptors(struct OBJGPU *pGpu) { + return pGpu->engineOrder.pEngineUnloadDescriptors; +} + +static inline ENGDESCRIPTOR *gpuGetDestroyEngineDescriptors(struct OBJGPU *pGpu) { + return pGpu->engineOrder.pEngineDestroyDescriptors; +} + +static inline NvU32 gpuGetNumEngDescriptors(struct OBJGPU *pGpu) { + return pGpu->engineOrder.numEngineDescriptors; +} + +static inline NvU32 gpuGetMode(struct OBJGPU *pGpu) { + return pGpu->computeModeRefCount > 0 ? 2 : 1; +} + +static inline ACPI_DSM_FUNCTION gpuGetDispStatusHotplugFunc(struct OBJGPU *pGpu) { + return pGpu->acpi.dispStatusHotplugFunc; +} + +static inline ACPI_DSM_FUNCTION gpuGetDispStatusConfigFunc(struct OBJGPU *pGpu) { + return pGpu->acpi.dispStatusConfigFunc; +} + +static inline ACPI_DSM_FUNCTION gpuGetPerfPostPowerStateFunc(struct OBJGPU *pGpu) { + return pGpu->acpi.perfPostPowerStateFunc; +} + +static inline ACPI_DSM_FUNCTION gpuGetStereo3dStateActiveFunc(struct OBJGPU *pGpu) { + return pGpu->acpi.stereo3dStateActiveFunc; +} + +static inline NvU32 gpuGetPmcBoot0(struct OBJGPU *pGpu) { + return pGpu->chipId0; +} + +static inline NV_STATUS gpuGetSparseTextureComputeMode(struct OBJGPU *pGpu, NvU32 *arg2, NvU32 *arg3, NvU32 *arg4) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS gpuSetSparseTextureComputeMode(struct OBJGPU *pGpu, NvU32 arg2) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline struct OBJFIFO *gpuGetFifoShared(struct OBJGPU *pGpu) { + return ((void *)0); +} + +static inline struct KernelFifo *gpuGetKernelFifoShared(struct OBJGPU *pGpu) { + return ((void *)0); +} + +static inline struct OBJHOSTENG *gpuGetHosteng(struct OBJGPU *pGpu, ENGDESCRIPTOR arg2) { + return ((void *)0); +} + +static inline NV_STATUS gpuConstructUserRegisterAccessMap(struct OBJGPU *pGpu) { + return NV_OK; +} + +static inline NV_STATUS gpuInitRegisterAccessMap(struct OBJGPU *pGpu, NvU8 *arg2, NvU32 arg3, const NvU8 *arg4, const NvU32 arg5) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS gpuSetUserRegisterAccessPermissions(struct OBJGPU *pGpu, NvU32 offset, NvU32 size, NvBool bAllow) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS gpuSetUserRegisterAccessPermissionsInBulk(struct OBJGPU *pGpu, const NvU32 *regOffsetsAndSizesArr, NvU32 arrSizeBytes, NvBool bAllow) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NvBool gpuGetUserRegisterAccessPermissions(struct OBJGPU *pGpu, NvU32 offset) { + return NV_FALSE; +} + +static inline void gpuDumpCallbackRegister(struct OBJGPU *pGpu) { + return; +} + +static inline NvU32 gpuGetChipMajRev(struct OBJGPU *pGpu) { + return pGpu->chipInfo.pmcBoot42.majorRev; +} + +static inline NvU32 gpuGetChipMinRev(struct OBJGPU *pGpu) { + return pGpu->chipInfo.pmcBoot42.minorRev; +} + +static inline NvBool gpuIsMaskRevisionA01(struct OBJGPU *pGpu) { + return (gpuGetChipMajRev(pGpu) == 10) && (gpuGetChipMinRev(pGpu) == 1); +} + +static inline NvU32 gpuGetChipImpl(struct OBJGPU *pGpu) { + return pGpu->chipInfo.implementationId; +} + +static inline NvU32 gpuGetChipArch(struct OBJGPU *pGpu) { + return pGpu->chipInfo.platformId; +} + +static inline NvU32 gpuGetChipId(struct OBJGPU *pGpu) { + return pGpu->chipInfo.chipId; +} + +static inline NvU32 gpuGetChipMinExtRev(struct OBJGPU *pGpu) { + return pGpu->chipInfo.pmcBoot42.minorExtRev; +} + +static inline NvU64 gpuGetVmmuSegmentSize(struct OBJGPU *pGpu) { + return pGpu->vmmuSegmentSize; +} + +static inline NvU32 gpuGetNumChildren(struct OBJGPU *pGpu) { + return ((sizeof (pGpu->children.pChild) / sizeof ((pGpu->children.pChild)[0]))); +} + +static inline Dynamic *gpuGetChild(struct OBJGPU *pGpu, NvU32 idx) { + return pGpu->children.pChild[idx]; +} + +static inline const NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *gpuGetChipInfo(struct OBJGPU *pGpu) { + return pGpu->pChipInfo; +} + +static inline NvBool gpuIsBar2MovedByVtd(struct OBJGPU *pGpu) { + return pGpu->bBar2MovedByVtd; +} + +static inline NvBool gpuIsBar1Size64Bit(struct OBJGPU *pGpu) { + return pGpu->bBar1Is64Bit; +} + +static inline NvBool gpuIsSurpriseRemovalSupported(struct OBJGPU *pGpu) { + return pGpu->bSurpriseRemovalSupported; +} + +static inline NvBool gpuIsReplayableTraceEnabled(struct OBJGPU *pGpu) { + return pGpu->bReplayableTraceEnabled; +} + +static inline NvBool gpuIsStateLoading(struct OBJGPU *pGpu) { + return pGpu->bStateLoading; +} + +static inline NvBool gpuIsStateUnloading(struct OBJGPU *pGpu) { + return pGpu->bStateUnloading; +} + +static inline NvBool gpuIsStateLoaded(struct OBJGPU *pGpu) { + return pGpu->bStateLoaded; +} + +static inline NvBool gpuIsFullyConstructed(struct OBJGPU *pGpu) { + return pGpu->bFullyConstructed; +} + +static inline NvBool gpuIsUnifiedMemorySpaceEnabled(struct OBJGPU *pGpu) { + return pGpu->bUnifiedMemorySpaceEnabled; +} + +static inline NvBool gpuIsWarBug4040336Enabled(struct OBJGPU *pGpu) { + return pGpu->bBf3WarBug4040336Enabled; +} + +static inline NvBool gpuIsSriovEnabled(struct OBJGPU *pGpu) { + return pGpu->bSriovEnabled; +} + +static inline NvBool gpuIsCacheOnlyModeEnabled(struct OBJGPU *pGpu) { + return pGpu->bCacheOnlyMode; +} + +static inline NvBool gpuIsSplitVasManagementServerClientRmEnabled(struct OBJGPU *pGpu) { + return pGpu->bSplitVasManagementServerClientRm; +} + +static inline NvBool gpuIsWarBug200577889SriovHeavyEnabled(struct OBJGPU *pGpu) { + return pGpu->bWarBug200577889SriovHeavyEnabled; +} + +static inline NvBool gpuIsPipelinedPteMemEnabled(struct OBJGPU *pGpu) { + return pGpu->bPipelinedPteMemEnabled; +} + +static inline NvBool gpuIsBarPteInSysmemSupported(struct OBJGPU *pGpu) { + return pGpu->bIsBarPteInSysmemSupported; +} + +static inline NvBool gpuIsRegUsesGlobalSurfaceOverridesEnabled(struct OBJGPU *pGpu) { + return pGpu->bRegUsesGlobalSurfaceOverrides; +} + +static inline NvBool gpuIsTwoStageRcRecoveryEnabled(struct OBJGPU *pGpu) { + return pGpu->bTwoStageRcRecoveryEnabled; +} + +static inline NvBool gpuIsInD3Cold(struct OBJGPU *pGpu) { + return pGpu->bInD3Cold; +} + +static inline NvBool gpuIsClientRmAllocatedCtxBufferEnabled(struct OBJGPU *pGpu) { + return pGpu->bClientRmAllocatedCtxBuffer; +} + +static inline NvBool gpuIsEccPageRetirementWithSliAllowed(struct OBJGPU *pGpu) { + return pGpu->bEccPageRetirementWithSliAllowed; +} + +static inline NvBool gpuIsInstanceMemoryAlwaysCached(struct OBJGPU *pGpu) { + return pGpu->bInstanceMemoryAlwaysCached; +} + +static inline NvBool gpuIsRmProfilingPrivileged(struct OBJGPU *pGpu) { + return pGpu->bRmProfilingPrivileged; +} + +static inline NvBool gpuIsGeforceSmb(struct OBJGPU *pGpu) { + return pGpu->bGeforceSmb; +} + +static inline NvBool gpuIsGeforceBranded(struct OBJGPU *pGpu) { + return pGpu->bIsGeforce; +} + +static inline NvBool gpuIsQuadroBranded(struct OBJGPU *pGpu) { + return pGpu->bIsQuadro; +} + +static inline NvBool gpuIsVgxBranded(struct OBJGPU *pGpu) { + return pGpu->bIsVgx; +} + +static inline NvBool gpuIsNvidiaNvsBranded(struct OBJGPU *pGpu) { + return pGpu->bIsNvidiaNvs; +} + +static inline NvBool gpuIsTitanBranded(struct OBJGPU *pGpu) { + return pGpu->bIsTitan; +} + +static inline NvBool gpuIsTeslaBranded(struct OBJGPU *pGpu) { + return pGpu->bIsTesla; +} + +static inline NvBool gpuIsComputePolicyTimesliceSupported(struct OBJGPU *pGpu) { + return pGpu->bComputePolicyTimesliceSupported; +} + +static inline NvBool gpuIsSriovCapable(struct OBJGPU *pGpu) { + return pGpu->bSriovCapable; +} + +static inline NvBool gpuIsNonPowerOf2ChannelCountSupported(struct OBJGPU *pGpu) { + return pGpu->bNonPowerOf2ChannelCountSupported; +} + +static inline NvBool gpuIsVfResizableBAR1Supported(struct OBJGPU *pGpu) { + return pGpu->bVfResizableBAR1Supported; +} + +static inline NvBool gpuIsVoltaHubIntrSupported(struct OBJGPU *pGpu) { + return pGpu->bVoltaHubIntrSupported; +} + +static inline NvBool gpuIsUsePmcDeviceEnableForHostEngineEnabled(struct OBJGPU *pGpu) { + return pGpu->bUsePmcDeviceEnableForHostEngine; +} + +static inline NvBool gpuIsSelfHosted(struct OBJGPU *pGpu) { + return pGpu->bIsSelfHosted; +} + +static inline NvBool gpuIsGspOwnedFaultBuffersEnabled(struct OBJGPU *pGpu) { + return pGpu->bIsGspOwnedFaultBuffersEnabled; +} + +NV_STATUS gpuConstruct_IMPL(struct OBJGPU *arg_pGpu, NvU32 arg_gpuInstance, NvU32 arg_gpuId, NvUuid *arg_pUuid, struct GpuArch *arg_pGpuArch); + +#define __nvoc_gpuConstruct(arg_pGpu, arg_gpuInstance, arg_gpuId, arg_pUuid, arg_pGpuArch) gpuConstruct_IMPL(arg_pGpu, arg_gpuInstance, arg_gpuId, arg_pUuid, arg_pGpuArch) +NV_STATUS gpuBindHalLegacy_IMPL(struct OBJGPU *pGpu, NvU32 chipId0, NvU32 chipId1, NvU32 socChipId0); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuBindHalLegacy(struct OBJGPU *pGpu, NvU32 chipId0, NvU32 chipId1, NvU32 socChipId0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuBindHalLegacy(pGpu, chipId0, chipId1, socChipId0) gpuBindHalLegacy_IMPL(pGpu, chipId0, chipId1, socChipId0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuPostConstruct_IMPL(struct OBJGPU *pGpu, GPUATTACHARG *arg2); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuPostConstruct(struct OBJGPU *pGpu, GPUATTACHARG *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuPostConstruct(pGpu, arg2) gpuPostConstruct_IMPL(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +void gpuDestruct_IMPL(struct OBJGPU *pGpu); + +#define __nvoc_gpuDestruct(pGpu) gpuDestruct_IMPL(pGpu) +NV_STATUS gpuStateInit_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuStateInit(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuStateInit(pGpu) gpuStateInit_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuStateUnload_IMPL(struct OBJGPU *pGpu, NvU32 arg2); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuStateUnload(struct OBJGPU *pGpu, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuStateUnload(pGpu, arg2) gpuStateUnload_IMPL(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuInitDispIpHal_IMPL(struct OBJGPU *pGpu, NvU32 ipver); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuInitDispIpHal(struct OBJGPU *pGpu, NvU32 ipver) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuInitDispIpHal(pGpu, ipver) gpuInitDispIpHal_IMPL(pGpu, ipver) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuIsImplementation_IMPL(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg2); + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsImplementation(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsImplementation(pGpu, arg2) gpuIsImplementation_IMPL(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuIsImplementationOrBetter_IMPL(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg2); + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsImplementationOrBetter(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsImplementationOrBetter(pGpu, arg2) gpuIsImplementationOrBetter_IMPL(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuIsGpuFullPower_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsGpuFullPower(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsGpuFullPower(pGpu) gpuIsGpuFullPower_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuIsGpuFullPowerForPmResume_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsGpuFullPowerForPmResume(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsGpuFullPowerForPmResume(pGpu) gpuIsGpuFullPowerForPmResume_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuBuildClassDB_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuBuildClassDB(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuBuildClassDB(pGpu) gpuBuildClassDB_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuDestroyClassDB_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuDestroyClassDB(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuDestroyClassDB(pGpu) gpuDestroyClassDB_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuDeleteEngineFromClassDB_IMPL(struct OBJGPU *pGpu, NvU32 arg2); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuDeleteEngineFromClassDB(struct OBJGPU *pGpu, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuDeleteEngineFromClassDB(pGpu, arg2) gpuDeleteEngineFromClassDB_IMPL(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuDeleteEngineOnPreInit_IMPL(struct OBJGPU *pGpu, NvU32 arg2); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuDeleteEngineOnPreInit(struct OBJGPU *pGpu, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuDeleteEngineOnPreInit(pGpu, arg2) gpuDeleteEngineOnPreInit_IMPL(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuAddClassToClassDBByEngTag_IMPL(struct OBJGPU *pGpu, NvU32 arg2); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuAddClassToClassDBByEngTag(struct OBJGPU *pGpu, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuAddClassToClassDBByEngTag(pGpu, arg2) gpuAddClassToClassDBByEngTag_IMPL(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuAddClassToClassDBByClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg2); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuAddClassToClassDBByClassId(struct OBJGPU *pGpu, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuAddClassToClassDBByClassId(pGpu, arg2) gpuAddClassToClassDBByClassId_IMPL(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuAddClassToClassDBByEngTagClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg2, NvU32 arg3); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuAddClassToClassDBByEngTagClassId(struct OBJGPU *pGpu, NvU32 arg2, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuAddClassToClassDBByEngTagClassId(pGpu, arg2, arg3) gpuAddClassToClassDBByEngTagClassId_IMPL(pGpu, arg2, arg3) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuDeleteClassFromClassDBByClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg2); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuDeleteClassFromClassDBByClassId(struct OBJGPU *pGpu, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuDeleteClassFromClassDBByClassId(pGpu, arg2) gpuDeleteClassFromClassDBByClassId_IMPL(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuDeleteClassFromClassDBByEngTag_IMPL(struct OBJGPU *pGpu, NvU32 arg2); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuDeleteClassFromClassDBByEngTag(struct OBJGPU *pGpu, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuDeleteClassFromClassDBByEngTag(pGpu, arg2) gpuDeleteClassFromClassDBByEngTag_IMPL(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuDeleteClassFromClassDBByEngTagClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg2, NvU32 arg3); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuDeleteClassFromClassDBByEngTagClassId(struct OBJGPU *pGpu, NvU32 arg2, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuDeleteClassFromClassDBByEngTagClassId(pGpu, arg2, arg3) gpuDeleteClassFromClassDBByEngTagClassId_IMPL(pGpu, arg2, arg3) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuIsClassSupported_IMPL(struct OBJGPU *pGpu, NvU32 arg2); + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsClassSupported(struct OBJGPU *pGpu, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsClassSupported(pGpu, arg2) gpuIsClassSupported_IMPL(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetClassByClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg2, CLASSDESCRIPTOR **arg3); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetClassByClassId(struct OBJGPU *pGpu, NvU32 arg2, CLASSDESCRIPTOR **arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetClassByClassId(pGpu, arg2, arg3) gpuGetClassByClassId_IMPL(pGpu, arg2, arg3) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetClassByEngineAndClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg2, NvU32 arg3, CLASSDESCRIPTOR **arg4); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetClassByEngineAndClassId(struct OBJGPU *pGpu, NvU32 arg2, NvU32 arg3, CLASSDESCRIPTOR **arg4) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetClassByEngineAndClassId(pGpu, arg2, arg3, arg4) gpuGetClassByEngineAndClassId_IMPL(pGpu, arg2, arg3, arg4) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetClassList_IMPL(struct OBJGPU *pGpu, NvU32 *arg2, NvU32 *arg3, NvU32 arg4); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetClassList(struct OBJGPU *pGpu, NvU32 *arg2, NvU32 *arg3, NvU32 arg4) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetClassList(pGpu, arg2, arg3, arg4) gpuGetClassList_IMPL(pGpu, arg2, arg3, arg4) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuConstructEngineTable_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuConstructEngineTable(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuConstructEngineTable(pGpu) gpuConstructEngineTable_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +void gpuDestroyEngineTable_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuDestroyEngineTable(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuDestroyEngineTable(pGpu) gpuDestroyEngineTable_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuUpdateEngineTable_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuUpdateEngineTable(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuUpdateEngineTable(pGpu) gpuUpdateEngineTable_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuCheckEngineTable_IMPL(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg2); + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuCheckEngineTable(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuCheckEngineTable(pGpu, arg2) gpuCheckEngineTable_IMPL(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuXlateEngDescToClientEngineId_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg2, RM_ENGINE_TYPE *arg3); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuXlateEngDescToClientEngineId(struct OBJGPU *pGpu, ENGDESCRIPTOR arg2, RM_ENGINE_TYPE *arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuXlateEngDescToClientEngineId(pGpu, arg2, arg3) gpuXlateEngDescToClientEngineId_IMPL(pGpu, arg2, arg3) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuXlateClientEngineIdToEngDesc_IMPL(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg2, ENGDESCRIPTOR *arg3); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuXlateClientEngineIdToEngDesc(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg2, ENGDESCRIPTOR *arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuXlateClientEngineIdToEngDesc(pGpu, arg2, arg3) gpuXlateClientEngineIdToEngDesc_IMPL(pGpu, arg2, arg3) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetFlcnFromClientEngineId_IMPL(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg2, struct Falcon **arg3); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetFlcnFromClientEngineId(struct OBJGPU *pGpu, RM_ENGINE_TYPE arg2, struct Falcon **arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetFlcnFromClientEngineId(pGpu, arg2, arg3) gpuGetFlcnFromClientEngineId_IMPL(pGpu, arg2, arg3) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuIsEngDescSupported_IMPL(struct OBJGPU *pGpu, NvU32 arg2); + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsEngDescSupported(struct OBJGPU *pGpu, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsEngDescSupported(pGpu, arg2) gpuIsEngDescSupported_IMPL(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +RM_ENGINE_TYPE gpuGetRmEngineType_IMPL(NvU32 index); + +#define gpuGetRmEngineType(index) gpuGetRmEngineType_IMPL(index) +void gpuGetRmEngineTypeList_IMPL(NvU32 *pNv2080EngineList, NvU32 engineCount, RM_ENGINE_TYPE *pRmEngineList); + +#define gpuGetRmEngineTypeList(pNv2080EngineList, engineCount, pRmEngineList) gpuGetRmEngineTypeList_IMPL(pNv2080EngineList, engineCount, pRmEngineList) +NvU32 gpuGetNv2080EngineType_IMPL(RM_ENGINE_TYPE index); + +#define gpuGetNv2080EngineType(index) gpuGetNv2080EngineType_IMPL(index) +void gpuGetNv2080EngineTypeList_IMPL(RM_ENGINE_TYPE *pRmEngineList, NvU32 engineCount, NvU32 *pNv2080EngineList); + +#define gpuGetNv2080EngineTypeList(pRmEngineList, engineCount, pNv2080EngineList) gpuGetNv2080EngineTypeList_IMPL(pRmEngineList, engineCount, pNv2080EngineList) +NV_STATUS gpuGetRmEngineTypeCapMask_IMPL(NvU32 *NV2080EngineTypeCap, NvU32 capSize, NvU32 *RmEngineTypeCap); + +#define gpuGetRmEngineTypeCapMask(NV2080EngineTypeCap, capSize, RmEngineTypeCap) gpuGetRmEngineTypeCapMask_IMPL(NV2080EngineTypeCap, capSize, RmEngineTypeCap) +const char *gpuRmEngineTypeToString_IMPL(RM_ENGINE_TYPE engineType, NvBool bNvPrintfStr); + +#define gpuRmEngineTypeToString(engineType, bNvPrintfStr) gpuRmEngineTypeToString_IMPL(engineType, bNvPrintfStr) +NvU32 gpuGetGpuMask_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetGpuMask(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetGpuMask(pGpu) gpuGetGpuMask_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +void gpuChangeComputeModeRefCount_IMPL(struct OBJGPU *pGpu, NvU32 arg2); + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuChangeComputeModeRefCount(struct OBJGPU *pGpu, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuChangeComputeModeRefCount(pGpu, arg2) gpuChangeComputeModeRefCount_IMPL(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuEnterShutdown_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuEnterShutdown(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuEnterShutdown(pGpu) gpuEnterShutdown_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuSanityCheck_IMPL(struct OBJGPU *pGpu, NvU32 arg2, NvU32 *arg3); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSanityCheck(struct OBJGPU *pGpu, NvU32 arg2, NvU32 *arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSanityCheck(pGpu, arg2, arg3) gpuSanityCheck_IMPL(pGpu, arg2, arg3) +#endif //__nvoc_gpu_h_disabled + +DEVICE_MAPPING *gpuGetDeviceMapping_IMPL(struct OBJGPU *pGpu, DEVICE_INDEX arg2, NvU32 arg3); + +#ifdef __nvoc_gpu_h_disabled +static inline DEVICE_MAPPING *gpuGetDeviceMapping(struct OBJGPU *pGpu, DEVICE_INDEX arg2, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetDeviceMapping(pGpu, arg2, arg3) gpuGetDeviceMapping_IMPL(pGpu, arg2, arg3) +#endif //__nvoc_gpu_h_disabled + +DEVICE_MAPPING *gpuGetDeviceMappingFromDeviceID_IMPL(struct OBJGPU *pGpu, NvU32 arg2, NvU32 arg3); + +#ifdef __nvoc_gpu_h_disabled +static inline DEVICE_MAPPING *gpuGetDeviceMappingFromDeviceID(struct OBJGPU *pGpu, NvU32 arg2, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetDeviceMappingFromDeviceID(pGpu, arg2, arg3) gpuGetDeviceMappingFromDeviceID_IMPL(pGpu, arg2, arg3) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetGidInfo_IMPL(struct OBJGPU *pGpu, NvU8 **ppGidString, NvU32 *pGidStrlen, NvU32 gidFlags); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetGidInfo(struct OBJGPU *pGpu, NvU8 **ppGidString, NvU32 *pGidStrlen, NvU32 gidFlags) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetGidInfo(pGpu, ppGidString, pGidStrlen, gidFlags) gpuGetGidInfo_IMPL(pGpu, ppGidString, pGidStrlen, gidFlags) +#endif //__nvoc_gpu_h_disabled + +void gpuSetDisconnectedProperties_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuSetDisconnectedProperties(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuSetDisconnectedProperties(pGpu) gpuSetDisconnectedProperties_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuAddConstructedFalcon_IMPL(struct OBJGPU *pGpu, struct Falcon *arg2); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuAddConstructedFalcon(struct OBJGPU *pGpu, struct Falcon *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuAddConstructedFalcon(pGpu, arg2) gpuAddConstructedFalcon_IMPL(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuRemoveConstructedFalcon_IMPL(struct OBJGPU *pGpu, struct Falcon *arg2); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuRemoveConstructedFalcon(struct OBJGPU *pGpu, struct Falcon *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuRemoveConstructedFalcon(pGpu, arg2) gpuRemoveConstructedFalcon_IMPL(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetConstructedFalcon_IMPL(struct OBJGPU *pGpu, NvU32 arg2, struct Falcon **arg3); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetConstructedFalcon(struct OBJGPU *pGpu, NvU32 arg2, struct Falcon **arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetConstructedFalcon(pGpu, arg2, arg3) gpuGetConstructedFalcon_IMPL(pGpu, arg2, arg3) +#endif //__nvoc_gpu_h_disabled + +struct OBJENGSTATE *gpuGetEngstate_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg2); + +#ifdef __nvoc_gpu_h_disabled +static inline struct OBJENGSTATE *gpuGetEngstate(struct OBJGPU *pGpu, ENGDESCRIPTOR arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetEngstate(pGpu, arg2) gpuGetEngstate_IMPL(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +struct OBJENGSTATE *gpuGetEngstateNoShare_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg2); + +#ifdef __nvoc_gpu_h_disabled +static inline struct OBJENGSTATE *gpuGetEngstateNoShare(struct OBJGPU *pGpu, ENGDESCRIPTOR arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetEngstateNoShare(pGpu, arg2) gpuGetEngstateNoShare_IMPL(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +void *gpuGetNextChildOfTypeUnsafe_IMPL(struct OBJGPU *pGpu, GPU_CHILD_ITER *pIt, NvU32 classId); + +#ifdef __nvoc_gpu_h_disabled +static inline void *gpuGetNextChildOfTypeUnsafe(struct OBJGPU *pGpu, GPU_CHILD_ITER *pIt, NvU32 classId) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetNextChildOfTypeUnsafe(pGpu, pIt, classId) gpuGetNextChildOfTypeUnsafe_IMPL(pGpu, pIt, classId) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetGfidState_IMPL(struct OBJGPU *pGpu, NvU32 gfid, GFID_ALLOC_STATUS *pState); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetGfidState(struct OBJGPU *pGpu, NvU32 gfid, GFID_ALLOC_STATUS *pState) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetGfidState(pGpu, gfid, pState) gpuGetGfidState_IMPL(pGpu, gfid, pState) +#endif //__nvoc_gpu_h_disabled + +void gpuSetGfidUsage_IMPL(struct OBJGPU *pGpu, NvU32 gfid, NvBool bInUse); + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuSetGfidUsage(struct OBJGPU *pGpu, NvU32 gfid, NvBool bInUse) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuSetGfidUsage(pGpu, gfid, bInUse) gpuSetGfidUsage_IMPL(pGpu, gfid, bInUse) +#endif //__nvoc_gpu_h_disabled + +void gpuSetGfidInvalidated_IMPL(struct OBJGPU *pGpu, NvU32 gfid); + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuSetGfidInvalidated(struct OBJGPU *pGpu, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuSetGfidInvalidated(pGpu, gfid) gpuSetGfidInvalidated_IMPL(pGpu, gfid) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuSetExternalKernelClientCount_IMPL(struct OBJGPU *pGpu, NvBool bIncr); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSetExternalKernelClientCount(struct OBJGPU *pGpu, NvBool bIncr) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSetExternalKernelClientCount(pGpu, bIncr) gpuSetExternalKernelClientCount_IMPL(pGpu, bIncr) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuIsInUse_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsInUse(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsInUse(pGpu) gpuIsInUse_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NvU32 gpuGetUserClientCount_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetUserClientCount(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetUserClientCount(pGpu) gpuGetUserClientCount_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NvU32 gpuGetExternalClientCount_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetExternalClientCount(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetExternalClientCount(pGpu) gpuGetExternalClientCount_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +void gpuNotifySubDeviceEvent_IMPL(struct OBJGPU *pGpu, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize, NvV32 info32, NvV16 info16); + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuNotifySubDeviceEvent(struct OBJGPU *pGpu, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize, NvV32 info32, NvV16 info16) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuNotifySubDeviceEvent(pGpu, notifyIndex, pNotifyParams, notifyParamsSize, info32, info16) gpuNotifySubDeviceEvent_IMPL(pGpu, notifyIndex, pNotifyParams, notifyParamsSize, info32, info16) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuRegisterSubdevice_IMPL(struct OBJGPU *pGpu, struct Subdevice *pSubdevice); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuRegisterSubdevice(struct OBJGPU *pGpu, struct Subdevice *pSubdevice) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuRegisterSubdevice(pGpu, pSubdevice) gpuRegisterSubdevice_IMPL(pGpu, pSubdevice) +#endif //__nvoc_gpu_h_disabled + +void gpuUnregisterSubdevice_IMPL(struct OBJGPU *pGpu, struct Subdevice *pSubdevice); + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuUnregisterSubdevice(struct OBJGPU *pGpu, struct Subdevice *pSubdevice) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuUnregisterSubdevice(pGpu, pSubdevice) gpuUnregisterSubdevice_IMPL(pGpu, pSubdevice) +#endif //__nvoc_gpu_h_disabled + +void gpuGspPluginTriggeredEvent_IMPL(struct OBJGPU *pGpu, NvU32 gfid, NvU32 notifyIndex); + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuGspPluginTriggeredEvent(struct OBJGPU *pGpu, NvU32 gfid, NvU32 notifyIndex) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuGspPluginTriggeredEvent(pGpu, gfid, notifyIndex) gpuGspPluginTriggeredEvent_IMPL(pGpu, gfid, notifyIndex) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetProcWithObject_IMPL(struct OBJGPU *pGpu, NvU32 elementID, NvU32 internalClassId, NvU32 *pPidArray, NvU32 *pPidArrayCount, MIG_INSTANCE_REF *pRef); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetProcWithObject(struct OBJGPU *pGpu, NvU32 elementID, NvU32 internalClassId, NvU32 *pPidArray, NvU32 *pPidArrayCount, MIG_INSTANCE_REF *pRef) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetProcWithObject(pGpu, elementID, internalClassId, pPidArray, pPidArrayCount, pRef) gpuGetProcWithObject_IMPL(pGpu, elementID, internalClassId, pPidArray, pPidArrayCount, pRef) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuFindClientInfoWithPidIterator_IMPL(struct OBJGPU *pGpu, NvU32 pid, NvU32 subPid, NvU32 internalClassId, NV2080_CTRL_GPU_PID_INFO_DATA *pData, NV2080_CTRL_SMC_SUBSCRIPTION_INFO *pSmcInfo, MIG_INSTANCE_REF *pRef, NvBool bGlobalInfo); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuFindClientInfoWithPidIterator(struct OBJGPU *pGpu, NvU32 pid, NvU32 subPid, NvU32 internalClassId, NV2080_CTRL_GPU_PID_INFO_DATA *pData, NV2080_CTRL_SMC_SUBSCRIPTION_INFO *pSmcInfo, MIG_INSTANCE_REF *pRef, NvBool bGlobalInfo) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuFindClientInfoWithPidIterator(pGpu, pid, subPid, internalClassId, pData, pSmcInfo, pRef, bGlobalInfo) gpuFindClientInfoWithPidIterator_IMPL(pGpu, pid, subPid, internalClassId, pData, pSmcInfo, pRef, bGlobalInfo) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuIsOnTheBus_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsOnTheBus(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsOnTheBus(pGpu) gpuIsOnTheBus_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuEnterStandby_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuEnterStandby(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuEnterStandby(pGpu) gpuEnterStandby_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuEnterHibernate_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuEnterHibernate(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuEnterHibernate(pGpu) gpuEnterHibernate_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuResumeFromStandby_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuResumeFromStandby(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuResumeFromStandby(pGpu) gpuResumeFromStandby_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuResumeFromHibernate_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuResumeFromHibernate(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuResumeFromHibernate(pGpu) gpuResumeFromHibernate_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuCheckSysmemAccess_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuCheckSysmemAccess(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuCheckSysmemAccess(pGpu) gpuCheckSysmemAccess_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuBootGspRmProxy_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuBootGspRmProxy(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuBootGspRmProxy(pGpu) gpuBootGspRmProxy_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuIsPciBusFamily_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsPciBusFamily(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsPciBusFamily(pGpu) gpuIsPciBusFamily_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +void gpuInitChipInfo_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuInitChipInfo(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuInitChipInfo(pGpu) gpuInitChipInfo_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetChipDetails_IMPL(struct OBJGPU *pGpu, NV2080_CTRL_GPU_GET_CHIP_DETAILS_PARAMS *arg2); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetChipDetails(struct OBJGPU *pGpu, NV2080_CTRL_GPU_GET_CHIP_DETAILS_PARAMS *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetChipDetails(pGpu, arg2) gpuGetChipDetails_IMPL(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuSanityCheckRegRead_IMPL(struct OBJGPU *pGpu, NvU32 addr, NvU32 size, void *pValue); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSanityCheckRegRead(struct OBJGPU *pGpu, NvU32 addr, NvU32 size, void *pValue) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSanityCheckRegRead(pGpu, addr, size, pValue) gpuSanityCheckRegRead_IMPL(pGpu, addr, size, pValue) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuSanityCheckRegisterAccess_IMPL(struct OBJGPU *pGpu, NvU32 addr, NvU32 *pRetVal); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSanityCheckRegisterAccess(struct OBJGPU *pGpu, NvU32 addr, NvU32 *pRetVal) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSanityCheckRegisterAccess(pGpu, addr, pRetVal) gpuSanityCheckRegisterAccess_IMPL(pGpu, addr, pRetVal) +#endif //__nvoc_gpu_h_disabled + +const NvU32 *gpuGetGenericClassList_IMPL(struct OBJGPU *pGpu, NvU32 *arg2); + +#ifdef __nvoc_gpu_h_disabled +static inline const NvU32 *gpuGetGenericClassList(struct OBJGPU *pGpu, NvU32 *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetGenericClassList(pGpu, arg2) gpuGetGenericClassList_IMPL(pGpu, arg2) +#endif //__nvoc_gpu_h_disabled + +void gpuSetRecoveryRebootRequired_IMPL(struct OBJGPU *pGpu, NvBool bRebootRequired, NvBool bBlockNewWorkload); + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuSetRecoveryRebootRequired(struct OBJGPU *pGpu, NvBool bRebootRequired, NvBool bBlockNewWorkload) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuSetRecoveryRebootRequired(pGpu, bRebootRequired, bBlockNewWorkload) gpuSetRecoveryRebootRequired_IMPL(pGpu, bRebootRequired, bBlockNewWorkload) +#endif //__nvoc_gpu_h_disabled + +#undef PRIVATE_FIELD + + +// Look up pGpu associated with a pResourceRef +NV_STATUS gpuGetByRef (RsResourceRef *pContextRef, NvBool *pbBroadcast, struct OBJGPU **ppGpu); + +// Look up pGpu associated with a hResource +NV_STATUS gpuGetByHandle(struct RsClient *pClient, NvHandle hResource, NvBool *pbBroadcast, struct OBJGPU **ppGpu); + +#define GPU_GFID_PF (0) +#define IS_GFID_PF(gfid) (((NvU32)(gfid)) == GPU_GFID_PF) +#define IS_GFID_VF(gfid) (((NvU32)(gfid)) != GPU_GFID_PF) +// Invalid P2P GFID +#define INVALID_P2P_GFID (0xFFFFFFFF) +#define INVALID_FABRIC_PARTITION_ID (0xFFFFFFFF) + +// +// Generates GPU child accessor macros (i.e.: GPU_GET_{ENG}) +// +#define GPU_CHILD_SINGLE_INST(className, accessorName, numInstances, bConstructEarly, gpuField) \ + static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu) { return pGpu->children.named.gpuField; } \ + ct_assert(numInstances == 1); + +#define GPU_CHILD_MULTI_INST(className, accessorName, numInstances, bConstructEarly, gpuField) \ + static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu, NvU32 index) { return index < numInstances ? pGpu->children.named.gpuField[index] : NULL; } + +#include "gpu/gpu_child_list.h" + +static NV_FORCEINLINE struct Graphics *GPU_GET_GR(struct OBJGPU *pGpu) { return NULL; } + +// Temporary stubs +#if RM_STRICT_CONFIG_EMIT_DISABLED_GPU_ENGINE_ACCESSORS +#define GPU_CHILD_LIST_DISABLED_ONLY +#define GPU_CHILD_SINGLE_INST(className, accessorName, numInstances, bConstructEarly, gpuField) \ + static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu) { return NULL; } + +#define GPU_CHILD_MULTI_INST(className, accessorName, numInstances, bConstructEarly, gpuField) \ + static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu, NvU32 index) { return NULL; } + +#include "gpu/gpu_child_list.h" +#endif // RM_STRICT_CONFIG_EMIT_DISABLED_GPU_ENGINE_ACCESSORS + + +// Type-safe wrapper for child iteration +#define GPU_GET_NEXT_CHILD_OF_TYPE(pGpu, pIt, className) \ + ((className*)gpuGetNextChildOfTypeUnsafe(pGpu, pIt, classId(className))) + + +// +// Inline functions +// + +// +// This function returns subdevice mask for a GPU. +// For non SLI, subdeviceInstance is 0, so this +// function will always return 1. +// + +static NV_INLINE NvU32 +gpuGetSubdeviceMask +( + struct OBJGPU *pGpu +) +{ + + return 1 << pGpu->subdeviceInstance; +} + +static NV_INLINE NvU32 +gpuGetInstance +( + struct OBJGPU *pGpu +) +{ + return pGpu->gpuInstance; +} + +static NV_INLINE NvU32 +gpuGetDeviceInstance +( + struct OBJGPU *pGpu +) +{ + return pGpu->deviceInstance; +} + +NV_INLINE +static NvU32 gpuGetNumCEs(struct OBJGPU *pGpu) +{ + return pGpu->numCEs; +} + +// TODO: make `const` after bug 4292180 is fixed +static NV_INLINE /* const */ struct GpuArch * +gpuGetArch(struct OBJGPU *pGpu) +{ + return pGpu->pGpuArch; +} + +// +// Per GPU mode flags macros. In general these macros should not be +// used and all code paths should be the same on all environments. +// However occasionally a tweak is needed to work around a limitation +// or improve speed on non-hardware. Is_RTLSIM normally is handled +// in the IS_SIMULATION case and should almost never be used. +// +// IS_EMULATION actual emulation hardware +// IS_SIMULATION fmodel or RTL simulation +// IS_MODS_AMODEL amodel under mods for trace player +// IS_LIVE_AMODEL amodel under windows for 3D drivers (removed) +// IS_RTLSIM RTL simulation +// IS_SILICON Real hardware +// IS_VIRTUAL RM is running within a guest VM +// IS_GSP_CLIENT RM is a GSP client with GPU support offloaded to GSP +// IS_DCE_CLIENT RM is a DCE client with GPU support offloaded to DCE +// + +#define IS_EMULATION(pGpu) ((pGpu)->getProperty((pGpu), PDB_PROP_GPU_EMULATION)) +#define IS_SIMULATION(pGpu) (pGpu->bIsSimulation) +#define IS_MODS_AMODEL(pGpu) (pGpu->bIsModsAmodel) +#define IS_FMODEL(pGpu) (pGpu->bIsFmodel) +#define IS_RTLSIM(pGpu) (pGpu->bIsRtlsim) +#define IS_SILICON(pGpu) (!(IS_EMULATION(pGpu) || IS_SIMULATION(pGpu))) +#define IS_PASSTHRU(pGpu) ((pGpu)->bIsPassthru) +#define IS_GSP_CLIENT(pGpu) (RMCFG_FEATURE_GSP_CLIENT_RM && (pGpu)->isGspClient) +#define IS_DCE_CLIENT(pGpu) (RMCFG_FEATURE_DCE_CLIENT_RM && (pGpu)->isDceClient) +#define IS_FW_CLIENT(pGpu) (IS_GSP_CLIENT(pGpu) || IS_DCE_CLIENT(pGpu)) +#define IS_VIRTUAL(pGpu) ((pGpu)->isVirtual) +#define IS_VIRTUAL_WITH_SRIOV(pGpu) ((pGpu)->bIsVirtualWithSriov) +#define IS_VIRTUAL_WITH_HEAVY_SRIOV(pGpu) (IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) +#define IS_VIRTUAL_WITH_FULL_SRIOV(pGpu) (IS_VIRTUAL_WITH_SRIOV(pGpu) && !IS_VIRTUAL_WITH_HEAVY_SRIOV(pGpu)) +#define IS_VIRTUAL_WITHOUT_SRIOV(pGpu) (IS_VIRTUAL(pGpu) && !IS_VIRTUAL_WITH_SRIOV(pGpu)) +#define IS_SRIOV_HEAVY(pGpu) (gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) +#define IS_SRIOV_HEAVY_GUEST(pGpu) ((IS_VIRTUAL_WITH_SRIOV(pGpu)) && IS_SRIOV_HEAVY(pGpu)) +#define IS_SRIOV_FULL_GUEST(pGpu) ((IS_VIRTUAL_WITH_SRIOV(pGpu)) && !IS_SRIOV_HEAVY(pGpu)) +#define IS_SRIOV_HEAVY_HOST(pGpu) ((hypervisorIsVgxHyper()) && IS_SRIOV_HEAVY(pGpu)) +#define IS_SRIOV_FULL_HOST(pGpu) ((hypervisorIsVgxHyper()) && gpuIsSriovEnabled(pGpu) && !IS_SRIOV_HEAVY(pGpu)) +#define IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) ((pGpu)->bVgpuGspPluginOffloadEnabled) +#define IS_SRIOV_WITH_VGPU_GSP_ENABLED(pGpu) (gpuIsSriovEnabled(pGpu) && IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) && !IS_SRIOV_HEAVY(pGpu)) +#define IS_SRIOV_WITH_VGPU_GSP_DISABLED(pGpu) (gpuIsSriovEnabled(pGpu) && !IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) && !IS_SRIOV_HEAVY(pGpu)) +#define IS_MIG_RM(pGpu) ((pGpu)->bIsMigRm) + +NV_STATUS gpuCtrlExecRegOps(struct OBJGPU *, struct Graphics *, NvHandle, NvHandle, NV2080_CTRL_GPU_REG_OP *, NvU32, NvBool); +NV_STATUS gpuValidateRegOps(struct OBJGPU *, NV2080_CTRL_GPU_REG_OP *, NvU32, NvBool, NvBool, NvBool); + +// GPU Sanity Check Flags +#define GPU_SANITY_CHECK_FLAGS_BOOT_0 NVBIT(0) +#define GPU_SANITY_CHECK_FLAGS_OFF_BY_N NVBIT(1) +#define GPU_SANITY_CHECK_FLAGS_PCI_SPACE_MATCH NVBIT(2) +#define GPU_SANITY_CHECK_FLAGS_PCI_MEM_SPACE_ENABLED NVBIT(3) +#define GPU_SANITY_CHECK_FLAGS_FB NVBIT(4) + +#define GPU_SANITY_CHECK_FLAGS_NONE 0x0 +#define GPU_SANITY_CHECK_FLAGS_ALL 0xffffffff + +// +// Macro for checking if GPU is in reset. +// +#define API_GPU_IN_RESET_SANITY_CHECK(pGpu) \ + ((NULL == pGpu) || \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET) || \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET) || \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_GC6_RESET) || \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING)) + +// +// Marco for checking if GPU is still connected. +// +#define API_GPU_ATTACHED_SANITY_CHECK(pGpu) \ + ((NULL != pGpu) && \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET)) + +// +// Macro for checking if GPU has Full Sanity +// +#define FULL_GPU_SANITY_CHECK(pGpu) \ + ((NULL != pGpu) && \ + gpuIsGpuFullPower(pGpu) && \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_GC6_RESET) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST) && \ + gpuCheckSysmemAccess(pGpu)) + +// +// Macro for checking if GPU has Full Sanity +// +#define FULL_GPU_SANITY_FOR_PM_RESUME(pGpu) \ + ((NULL != pGpu) && \ + gpuIsGpuFullPowerForPmResume(pGpu) && \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_GC6_RESET) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST)) + +// +// Macro for checking if GPU is in the recovery path +// +#define API_GPU_IN_RECOVERY_SANITY_CHECK(pGpu) \ + ((NULL == pGpu) || \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_TIMEOUT_RECOVERY)) + +//****************************************************************************** +// POWER SANITY CHECKS +//****************************************************************************** +// +// Make sure the GPU is in full power or resuming from D3 state. Else, +// bailout from the calling function. An exception for systems, which support +// surprise removal feature. See Bugs 440565, 479003, and 499228.DO NOT IGNORE +// OR REMOVE THIS ASSERT. If you have problems with it, please talk to cplummer. +// +// bAllowWithoutSysmemAccess: Allow this RM Control when sysmem access is not available +// from the GPU. SHould be NV_TRUE only for NV2080_CTRL_CMD_BUS_SYSMEM_ACCESS +// +// On systems supporting surprise removal, if the GPU is in D3 cold +// and still attached we would consider it a true D3 cold state +// and return NOT_FULL_POWER. See bug 1679965. +// +// +#define API_GPU_FULL_POWER_SANITY_CHECK(pGpu, bGpuAccess, bAllowWithoutSysmemAccess) \ + if ((!gpuIsGpuFullPower(pGpu)) && \ + (!(pGpu)->getProperty((pGpu), \ + PDB_PROP_GPU_IN_PM_RESUME_CODEPATH))) \ + { \ + DBG_BREAKPOINT(); \ + if (bGpuAccess || (!gpuIsSurpriseRemovalSupported(pGpu))) \ + { \ + return NV_ERR_GPU_NOT_FULL_POWER; \ + } \ + else if (gpuIsSurpriseRemovalSupported(pGpu) && \ + (pGpu)->getProperty((pGpu), PDB_PROP_GPU_IS_CONNECTED)) \ + { \ + return NV_ERR_GPU_NOT_FULL_POWER; \ + } \ + } \ + if (!(bAllowWithoutSysmemAccess) && !gpuCheckSysmemAccess(pGpu)) \ + { \ + return NV_ERR_GPU_NOT_FULL_POWER; \ + } + +#define API_GPU_FULL_POWER_SANITY_CHECK_OR_GOTO(pGpu, bGpuAccess, bAllowWithoutSysmemAccess, status, tag) \ + if ((!gpuIsGpuFullPower(pGpu)) && \ + (!(pGpu)->getProperty((pGpu), \ + PDB_PROP_GPU_IN_PM_RESUME_CODEPATH))) \ + { \ + DBG_BREAKPOINT(); \ + if (bGpuAccess || (!gpuIsSurpriseRemovalSupported(pGpu))) \ + { \ + status = NV_ERR_GPU_NOT_FULL_POWER; \ + goto tag; \ + } \ + else if (gpuIsSurpriseRemovalSupported(pGpu) && \ + (pGpu)->getProperty((pGpu), PDB_PROP_GPU_IS_CONNECTED)) \ + { \ + status = NV_ERR_GPU_NOT_FULL_POWER; \ + goto tag; \ + } \ + } \ + if (!(bAllowWithoutSysmemAccess) && !gpuCheckSysmemAccess(pGpu)) \ + { \ + return NV_ERR_GPU_NOT_FULL_POWER; \ + } + +// +// Identifiers for gpuGetRegBaseOffset HAL interface. +// +#define NV_REG_BASE_GR (0x00000001) +#define NV_REG_BASE_TIMER (0x00000002) +#define NV_REG_BASE_MASTER (0x00000003) +#define NV_REG_BASE_USERMODE (0x00000004) +#define NV_REG_BASE_LAST NV_REG_BASE_USERMODE +ct_assert(NV_REG_BASE_LAST < NV2080_CTRL_INTERNAL_GET_CHIP_INFO_REG_BASE_MAX); + +#define GPU_READ_PRI_ERROR_MASK 0xFFF00000 +#define GPU_READ_PRI_ERROR_CODE 0xBAD00000 + +// +// Define for invalid register value. GPU could have fallen off the bus or +// the GPU could be in reset. +// +#define GPU_REG_VALUE_INVALID 0xFFFFFFFF + +typedef struct _vgpu_static_info VGPU_STATIC_INFO; +typedef struct GspStaticConfigInfo_t GspStaticConfigInfo; + +// Static info getters +VGPU_STATIC_INFO *gpuGetStaticInfo(struct OBJGPU *pGpu); +#define GPU_GET_STATIC_INFO(pGpu) gpuGetStaticInfo(pGpu) +GspStaticConfigInfo *gpuGetGspStaticInfo(struct OBJGPU *pGpu); +#define GPU_GET_GSP_STATIC_INFO(pGpu) gpuGetGspStaticInfo(pGpu) + +NV_STATUS gpuSimEscapeWrite(struct OBJGPU *, const char *path, NvU32 Index, NvU32 Size, NvU32 Value); +NV_STATUS gpuSimEscapeWriteBuffer(struct OBJGPU *, const char *path, NvU32 Index, NvU32 Size, void* pBuffer); +NV_STATUS gpuSimEscapeRead(struct OBJGPU *, const char *path, NvU32 Index, NvU32 Size, NvU32 *Value); +NV_STATUS gpuSimEscapeReadBuffer(struct OBJGPU *, const char *path, NvU32 Index, NvU32 Size, void* pBuffer); + +// +// This function needs to be called when OBJGPU is not created. HAL +// infrastructure can't be used for this case, so it has been added manually. +// It will be invoked directly by gpumgrIsDeviceMsixAllowed(). +// +NvBool gpuIsMsixAllowed_TU102(RmPhysAddr bar0BaseAddr); + +// +// This function needs to be called when OBJGPU is not created. HAL +// infrastructure can't be used for this case, so it has been added manually. +// It will be invoked directly by gpumgrWaitForBarFirewall(). +// + +// Define for PMC reset delay +#define NV_PMC_RESET_DELAY_US 2 + +#endif // _OBJGPU_H_ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_GPU_NVOC_H_ diff --git a/src/nvidia/generated/g_gpu_resource_nvoc.c b/src/nvidia/generated/g_gpu_resource_nvoc.c new file mode 100644 index 0000000..d06c5d1 --- /dev/null +++ b/src/nvidia/generated/g_gpu_resource_nvoc.c @@ -0,0 +1,441 @@ +#define NVOC_GPU_RESOURCE_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_resource_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x5d5d9f = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +// Forward declarations for GpuResource +void __nvoc_init__RmResource(RmResource*); +void __nvoc_init__GpuResource(GpuResource*); +void __nvoc_init_funcTable_GpuResource(GpuResource*); +NV_STATUS __nvoc_ctor_GpuResource(GpuResource*, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_GpuResource(GpuResource*); +void __nvoc_dtor_GpuResource(GpuResource*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__GpuResource; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__GpuResource; + +// Down-thunk(s) to bridge GpuResource methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_GpuResource_resControl(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_down_thunk_GpuResource_resMap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_down_thunk_GpuResource_resUnmap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_down_thunk_GpuResource_rmresShareCallback(struct RmResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this + +// Up-thunk(s) to bridge GpuResource methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super +NvBool __nvoc_up_thunk_RmResource_gpuresAccessCallback(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControl_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_gpuresControl_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_gpuresCanCopy(struct GpuResource *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_gpuresIsDuplicate(struct GpuResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_gpuresPreDestruct(struct GpuResource *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_gpuresControlFilter(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported(struct GpuResource *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_gpuresMapTo(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_gpuresUnmapFrom(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_gpuresGetRefCount(struct GpuResource *pResource); // this +void __nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource = +{ + /*classInfo=*/ { + /*size=*/ sizeof(GpuResource), + /*classId=*/ classId(GpuResource), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "GpuResource", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GpuResource, + /*pCastInfo=*/ &__nvoc_castinfo__GpuResource, + /*pExportInfo=*/ &__nvoc_export_info__GpuResource +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__GpuResource __nvoc_metadata__GpuResource = { + .rtti.pClassDef = &__nvoc_class_def_GpuResource, // (gpures) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GpuResource, + .rtti.offset = 0, + .metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super + .metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.rtti.offset = NV_OFFSETOF(GpuResource, __nvoc_base_RmResource), + .metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^2 + .metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^3 + .metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^2 + .metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + + .vtable.__gpuresControl__ = &gpuresControl_IMPL, // virtual override (res) base (rmres) + .metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_GpuResource_resControl, // virtual + .vtable.__gpuresMap__ = &gpuresMap_IMPL, // virtual override (res) base (rmres) + .metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &__nvoc_down_thunk_GpuResource_resMap, // virtual + .vtable.__gpuresUnmap__ = &gpuresUnmap_IMPL, // virtual override (res) base (rmres) + .metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &__nvoc_down_thunk_GpuResource_resUnmap, // virtual + .vtable.__gpuresShareCallback__ = &gpuresShareCallback_IMPL, // virtual override (res) base (rmres) + .metadata__RmResource.vtable.__rmresShareCallback__ = &__nvoc_down_thunk_GpuResource_rmresShareCallback, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__gpuresGetRegBaseOffsetAndSize__ = &gpuresGetRegBaseOffsetAndSize_IMPL, // virtual + .vtable.__gpuresGetMapAddrSpace__ = &gpuresGetMapAddrSpace_IMPL, // virtual + .vtable.__gpuresInternalControlForward__ = &gpuresInternalControlForward_IMPL, // virtual + .vtable.__gpuresGetInternalObjectHandle__ = &gpuresGetInternalObjectHandle_IMPL, // virtual + .vtable.__gpuresAccessCallback__ = &__nvoc_up_thunk_RmResource_gpuresAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__gpuresGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__gpuresCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__gpuresGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__gpuresControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__gpuresControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__gpuresControl_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__gpuresControl_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__gpuresCanCopy__ = &__nvoc_up_thunk_RsResource_gpuresCanCopy, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__gpuresIsDuplicate__ = &__nvoc_up_thunk_RsResource_gpuresIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__gpuresPreDestruct__ = &__nvoc_up_thunk_RsResource_gpuresPreDestruct, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__gpuresControlFilter__ = &__nvoc_up_thunk_RsResource_gpuresControlFilter, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__gpuresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__gpuresMapTo__ = &__nvoc_up_thunk_RsResource_gpuresMapTo, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__gpuresUnmapFrom__ = &__nvoc_up_thunk_RsResource_gpuresUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__gpuresGetRefCount__ = &__nvoc_up_thunk_RsResource_gpuresGetRefCount, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__gpuresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__GpuResource = { + .numRelatives = 5, + .relatives = { + &__nvoc_metadata__GpuResource.rtti, // [0]: (gpures) this + &__nvoc_metadata__GpuResource.metadata__RmResource.rtti, // [1]: (rmres) super + &__nvoc_metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti, // [2]: (res) super^2 + &__nvoc_metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [3]: (obj) super^3 + &__nvoc_metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti, // [4]: (rmrescmn) super^2 + } +}; + +// 4 down-thunk(s) defined to bridge methods in GpuResource from superclasses + +// gpuresControl: virtual override (res) base (rmres) +NV_STATUS __nvoc_down_thunk_GpuResource_resControl(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *) pGpuResource) - NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// gpuresMap: virtual override (res) base (rmres) +NV_STATUS __nvoc_down_thunk_GpuResource_resMap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *) pGpuResource) - NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams, pCpuMapping); +} + +// gpuresUnmap: virtual override (res) base (rmres) +NV_STATUS __nvoc_down_thunk_GpuResource_resUnmap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *) pGpuResource) - NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pCpuMapping); +} + +// gpuresShareCallback: virtual override (res) base (rmres) +NvBool __nvoc_down_thunk_GpuResource_rmresShareCallback(struct RmResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *) pGpuResource) - NV_OFFSETOF(GpuResource, __nvoc_base_RmResource)), pInvokingClient, pParentRef, pSharePolicy); +} + + +// 17 up-thunk(s) defined to bridge methods in GpuResource to superclasses + +// gpuresAccessCallback: virtual inherited (rmres) base (rmres) +NvBool __nvoc_up_thunk_RmResource_gpuresAccessCallback(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuResource, __nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// gpuresGetMemInterMapParams: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(GpuResource, __nvoc_base_RmResource)), pParams); +} + +// gpuresCheckMemInterUnmap: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(GpuResource, __nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// gpuresGetMemoryMappingDescriptor: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(GpuResource, __nvoc_base_RmResource)), ppMemDesc); +} + +// gpuresControlSerialization_Prologue: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuResource, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// gpuresControlSerialization_Epilogue: virtual inherited (rmres) base (rmres) +void __nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuResource, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// gpuresControl_Prologue: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControl_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuResource, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// gpuresControl_Epilogue: virtual inherited (rmres) base (rmres) +void __nvoc_up_thunk_RmResource_gpuresControl_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuResource, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// gpuresCanCopy: virtual inherited (res) base (rmres) +NvBool __nvoc_up_thunk_RsResource_gpuresCanCopy(struct GpuResource *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// gpuresIsDuplicate: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_gpuresIsDuplicate(struct GpuResource *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// gpuresPreDestruct: virtual inherited (res) base (rmres) +void __nvoc_up_thunk_RsResource_gpuresPreDestruct(struct GpuResource *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// gpuresControlFilter: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_gpuresControlFilter(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// gpuresIsPartialUnmapSupported: inline virtual inherited (res) base (rmres) body +NvBool __nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported(struct GpuResource *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// gpuresMapTo: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_gpuresMapTo(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// gpuresUnmapFrom: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_gpuresUnmapFrom(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// gpuresGetRefCount: virtual inherited (res) base (rmres) +NvU32 __nvoc_up_thunk_RsResource_gpuresGetRefCount(struct GpuResource *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// gpuresAddAdditionalDependants: virtual inherited (res) base (rmres) +void __nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__GpuResource = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_GpuResource(GpuResource *pThis) { + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_GpuResource(GpuResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_GpuResource(GpuResource *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_GpuResource_fail_RmResource; + __nvoc_init_dataField_GpuResource(pThis); + + status = __nvoc_gpuresConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_GpuResource_fail__init; + goto __nvoc_ctor_GpuResource_exit; // Success + +__nvoc_ctor_GpuResource_fail__init: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_GpuResource_fail_RmResource: +__nvoc_ctor_GpuResource_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_GpuResource_1(GpuResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_GpuResource_1 + + +// Initialize vtable(s) for 25 virtual method(s). +void __nvoc_init_funcTable_GpuResource(GpuResource *pThis) { + __nvoc_init_funcTable_GpuResource_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__GpuResource(GpuResource *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^3 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^2 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^2 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; // (rmres) super + pThis->__nvoc_pbase_GpuResource = pThis; // (gpures) this + + // Recurse to superclass initialization function(s). + __nvoc_init__RmResource(&pThis->__nvoc_base_RmResource); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^3 + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__GpuResource.metadata__RmResource.metadata__RsResource; // (res) super^2 + pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^2 + pThis->__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__GpuResource.metadata__RmResource; // (rmres) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__GpuResource; // (gpures) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_GpuResource(pThis); +} + +NV_STATUS __nvoc_objCreate_GpuResource(GpuResource **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + GpuResource *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(GpuResource), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(GpuResource)); + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__GpuResource(pThis); + status = __nvoc_ctor_GpuResource(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_GpuResource_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_GpuResource_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(GpuResource)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_GpuResource(GpuResource **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_GpuResource(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_gpu_resource_nvoc.h b/src/nvidia/generated/g_gpu_resource_nvoc.h new file mode 100644 index 0000000..6cb479f --- /dev/null +++ b/src/nvidia/generated/g_gpu_resource_nvoc.h @@ -0,0 +1,428 @@ + +#ifndef _G_GPU_RESOURCE_NVOC_H_ +#define _G_GPU_RESOURCE_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_gpu_resource_nvoc.h" + +#ifndef _GPURESOURCE_H_ +#define _GPURESOURCE_H_ + +#include "core/core.h" +#include "gpu/mem_mgr/mem_desc.h" + +#include "rmapi/resource.h" + + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + + +struct Device; + +#ifndef __NVOC_CLASS_Device_TYPEDEF__ +#define __NVOC_CLASS_Device_TYPEDEF__ +typedef struct Device Device; +#endif /* __NVOC_CLASS_Device_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Device +#define __nvoc_class_id_Device 0xe0ac20 +#endif /* __nvoc_class_id_Device */ + + + +struct Subdevice; + +#ifndef __NVOC_CLASS_Subdevice_TYPEDEF__ +#define __NVOC_CLASS_Subdevice_TYPEDEF__ +typedef struct Subdevice Subdevice; +#endif /* __NVOC_CLASS_Subdevice_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Subdevice +#define __nvoc_class_id_Subdevice 0x4b01b3 +#endif /* __nvoc_class_id_Subdevice */ + + + +#define GPU_RES_GET_GPU(pRes) staticCastNoPtrCheck((pRes), GpuResource)->pGpu +#define GPU_RES_GET_GPUGRP(pRes) staticCastNoPtrCheck((pRes), GpuResource)->pGpuGrp +#define GPU_RES_GET_DEVICE(pRes) staticCastNoPtrCheck((pRes), GpuResource)->pDevice +#define GPU_RES_GET_SUBDEVICE(pRes) staticCastNoPtrCheck((pRes), GpuResource)->pSubdevice + +#define GPU_RES_SET_THREAD_BC_STATE(pRes) PORT_UNREFERENCED_VARIABLE(pRes) + +/*! + * Abstract base class for common CPU mapping operations + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_GPU_RESOURCE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__GpuResource; +struct NVOC_METADATA__RmResource; +struct NVOC_VTABLE__GpuResource; + + +struct GpuResource { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__GpuResource *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct RmResource __nvoc_base_RmResource; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^3 + struct RsResource *__nvoc_pbase_RsResource; // res super^2 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^2 + struct RmResource *__nvoc_pbase_RmResource; // rmres super + struct GpuResource *__nvoc_pbase_GpuResource; // gpures + + // Data members + struct OBJGPUGRP *pGpuGrp; + struct OBJGPU *pGpu; + struct Device *pDevice; + struct Subdevice *pSubdevice; + NvBool bBcResource; +}; + + +// Vtable with 25 per-class function pointers +struct NVOC_VTABLE__GpuResource { + NV_STATUS (*__gpuresControl__)(struct GpuResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual override (res) base (rmres) + NV_STATUS (*__gpuresMap__)(struct GpuResource * /*this*/, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); // virtual override (res) base (rmres) + NV_STATUS (*__gpuresUnmap__)(struct GpuResource * /*this*/, struct CALL_CONTEXT *, struct RsCpuMapping *); // virtual override (res) base (rmres) + NvBool (*__gpuresShareCallback__)(struct GpuResource * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual override (res) base (rmres) + NV_STATUS (*__gpuresGetRegBaseOffsetAndSize__)(struct GpuResource * /*this*/, struct OBJGPU *, NvU32 *, NvU32 *); // virtual + NV_STATUS (*__gpuresGetMapAddrSpace__)(struct GpuResource * /*this*/, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); // virtual + NV_STATUS (*__gpuresInternalControlForward__)(struct GpuResource * /*this*/, NvU32, void *, NvU32); // virtual + NvHandle (*__gpuresGetInternalObjectHandle__)(struct GpuResource * /*this*/); // virtual + NvBool (*__gpuresAccessCallback__)(struct GpuResource * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__gpuresGetMemInterMapParams__)(struct GpuResource * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__gpuresCheckMemInterUnmap__)(struct GpuResource * /*this*/, NvBool); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__gpuresGetMemoryMappingDescriptor__)(struct GpuResource * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__gpuresControlSerialization_Prologue__)(struct GpuResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + void (*__gpuresControlSerialization_Epilogue__)(struct GpuResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__gpuresControl_Prologue__)(struct GpuResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + void (*__gpuresControl_Epilogue__)(struct GpuResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + NvBool (*__gpuresCanCopy__)(struct GpuResource * /*this*/); // virtual inherited (res) base (rmres) + NV_STATUS (*__gpuresIsDuplicate__)(struct GpuResource * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (rmres) + void (*__gpuresPreDestruct__)(struct GpuResource * /*this*/); // virtual inherited (res) base (rmres) + NV_STATUS (*__gpuresControlFilter__)(struct GpuResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (rmres) + NvBool (*__gpuresIsPartialUnmapSupported__)(struct GpuResource * /*this*/); // inline virtual inherited (res) base (rmres) body + NV_STATUS (*__gpuresMapTo__)(struct GpuResource * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (rmres) + NV_STATUS (*__gpuresUnmapFrom__)(struct GpuResource * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (rmres) + NvU32 (*__gpuresGetRefCount__)(struct GpuResource * /*this*/); // virtual inherited (res) base (rmres) + void (*__gpuresAddAdditionalDependants__)(struct RsClient *, struct GpuResource * /*this*/, RsResourceRef *); // virtual inherited (res) base (rmres) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__GpuResource { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__RmResource metadata__RmResource; + const struct NVOC_VTABLE__GpuResource vtable; +}; + +#ifndef __NVOC_CLASS_GpuResource_TYPEDEF__ +#define __NVOC_CLASS_GpuResource_TYPEDEF__ +typedef struct GpuResource GpuResource; +#endif /* __NVOC_CLASS_GpuResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuResource +#define __nvoc_class_id_GpuResource 0x5d5d9f +#endif /* __nvoc_class_id_GpuResource */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +#define __staticCast_GpuResource(pThis) \ + ((pThis)->__nvoc_pbase_GpuResource) + +#ifdef __nvoc_gpu_resource_h_disabled +#define __dynamicCast_GpuResource(pThis) ((GpuResource*) NULL) +#else //__nvoc_gpu_resource_h_disabled +#define __dynamicCast_GpuResource(pThis) \ + ((GpuResource*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GpuResource))) +#endif //__nvoc_gpu_resource_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_GpuResource(GpuResource**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_GpuResource(GpuResource**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_GpuResource(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_GpuResource((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define gpuresControl_FNPTR(pGpuResource) pGpuResource->__nvoc_metadata_ptr->vtable.__gpuresControl__ +#define gpuresControl(pGpuResource, pCallContext, pParams) gpuresControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define gpuresMap_FNPTR(pGpuResource) pGpuResource->__nvoc_metadata_ptr->vtable.__gpuresMap__ +#define gpuresMap(pGpuResource, pCallContext, pParams, pCpuMapping) gpuresMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define gpuresUnmap_FNPTR(pGpuResource) pGpuResource->__nvoc_metadata_ptr->vtable.__gpuresUnmap__ +#define gpuresUnmap(pGpuResource, pCallContext, pCpuMapping) gpuresUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define gpuresShareCallback_FNPTR(pGpuResource) pGpuResource->__nvoc_metadata_ptr->vtable.__gpuresShareCallback__ +#define gpuresShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) gpuresShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define gpuresGetRegBaseOffsetAndSize_FNPTR(pGpuResource) pGpuResource->__nvoc_metadata_ptr->vtable.__gpuresGetRegBaseOffsetAndSize__ +#define gpuresGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) gpuresGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define gpuresGetMapAddrSpace_FNPTR(pGpuResource) pGpuResource->__nvoc_metadata_ptr->vtable.__gpuresGetMapAddrSpace__ +#define gpuresGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) gpuresGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define gpuresInternalControlForward_FNPTR(pGpuResource) pGpuResource->__nvoc_metadata_ptr->vtable.__gpuresInternalControlForward__ +#define gpuresInternalControlForward(pGpuResource, command, pParams, size) gpuresInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define gpuresGetInternalObjectHandle_FNPTR(pGpuResource) pGpuResource->__nvoc_metadata_ptr->vtable.__gpuresGetInternalObjectHandle__ +#define gpuresGetInternalObjectHandle(pGpuResource) gpuresGetInternalObjectHandle_DISPATCH(pGpuResource) +#define gpuresAccessCallback_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define gpuresAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) gpuresAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define gpuresGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define gpuresGetMemInterMapParams(pRmResource, pParams) gpuresGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define gpuresCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define gpuresCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) gpuresCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define gpuresGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define gpuresGetMemoryMappingDescriptor(pRmResource, ppMemDesc) gpuresGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define gpuresControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define gpuresControlSerialization_Prologue(pResource, pCallContext, pParams) gpuresControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define gpuresControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define gpuresControlSerialization_Epilogue(pResource, pCallContext, pParams) gpuresControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define gpuresControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define gpuresControl_Prologue(pResource, pCallContext, pParams) gpuresControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define gpuresControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define gpuresControl_Epilogue(pResource, pCallContext, pParams) gpuresControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define gpuresCanCopy_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define gpuresCanCopy(pResource) gpuresCanCopy_DISPATCH(pResource) +#define gpuresIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define gpuresIsDuplicate(pResource, hMemory, pDuplicate) gpuresIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define gpuresPreDestruct_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define gpuresPreDestruct(pResource) gpuresPreDestruct_DISPATCH(pResource) +#define gpuresControlFilter_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define gpuresControlFilter(pResource, pCallContext, pParams) gpuresControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define gpuresIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define gpuresIsPartialUnmapSupported(pResource) gpuresIsPartialUnmapSupported_DISPATCH(pResource) +#define gpuresMapTo_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define gpuresMapTo(pResource, pParams) gpuresMapTo_DISPATCH(pResource, pParams) +#define gpuresUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define gpuresUnmapFrom(pResource, pParams) gpuresUnmapFrom_DISPATCH(pResource, pParams) +#define gpuresGetRefCount_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define gpuresGetRefCount(pResource) gpuresGetRefCount_DISPATCH(pResource) +#define gpuresAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define gpuresAddAdditionalDependants(pClient, pResource, pReference) gpuresAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) + +// Dispatch functions +static inline NV_STATUS gpuresControl_DISPATCH(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__gpuresControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS gpuresMap_DISPATCH(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__gpuresMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS gpuresUnmap_DISPATCH(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__gpuresUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NvBool gpuresShareCallback_DISPATCH(struct GpuResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__gpuresShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS gpuresGetRegBaseOffsetAndSize_DISPATCH(struct GpuResource *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__gpuresGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS gpuresGetMapAddrSpace_DISPATCH(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__gpuresGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NV_STATUS gpuresInternalControlForward_DISPATCH(struct GpuResource *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__gpuresInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NvHandle gpuresGetInternalObjectHandle_DISPATCH(struct GpuResource *pGpuResource) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__gpuresGetInternalObjectHandle__(pGpuResource); +} + +static inline NvBool gpuresAccessCallback_DISPATCH(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__gpuresAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS gpuresGetMemInterMapParams_DISPATCH(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__gpuresGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS gpuresCheckMemInterUnmap_DISPATCH(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__gpuresCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS gpuresGetMemoryMappingDescriptor_DISPATCH(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__gpuresGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS gpuresControlSerialization_Prologue_DISPATCH(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__gpuresControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void gpuresControlSerialization_Epilogue_DISPATCH(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__gpuresControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS gpuresControl_Prologue_DISPATCH(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__gpuresControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void gpuresControl_Epilogue_DISPATCH(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__gpuresControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool gpuresCanCopy_DISPATCH(struct GpuResource *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__gpuresCanCopy__(pResource); +} + +static inline NV_STATUS gpuresIsDuplicate_DISPATCH(struct GpuResource *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__gpuresIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void gpuresPreDestruct_DISPATCH(struct GpuResource *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__gpuresPreDestruct__(pResource); +} + +static inline NV_STATUS gpuresControlFilter_DISPATCH(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__gpuresControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvBool gpuresIsPartialUnmapSupported_DISPATCH(struct GpuResource *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__gpuresIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS gpuresMapTo_DISPATCH(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__gpuresMapTo__(pResource, pParams); +} + +static inline NV_STATUS gpuresUnmapFrom_DISPATCH(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__gpuresUnmapFrom__(pResource, pParams); +} + +static inline NvU32 gpuresGetRefCount_DISPATCH(struct GpuResource *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__gpuresGetRefCount__(pResource); +} + +static inline void gpuresAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__gpuresAddAdditionalDependants__(pClient, pResource, pReference); +} + +NV_STATUS gpuresControl_IMPL(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +NV_STATUS gpuresMap_IMPL(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); + +NV_STATUS gpuresUnmap_IMPL(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); + +NvBool gpuresShareCallback_IMPL(struct GpuResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); + +NV_STATUS gpuresGetRegBaseOffsetAndSize_IMPL(struct GpuResource *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); + +NV_STATUS gpuresGetMapAddrSpace_IMPL(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); + +NV_STATUS gpuresInternalControlForward_IMPL(struct GpuResource *pGpuResource, NvU32 command, void *pParams, NvU32 size); + +NvHandle gpuresGetInternalObjectHandle_IMPL(struct GpuResource *pGpuResource); + +NV_STATUS gpuresConstruct_IMPL(struct GpuResource *arg_pGpuResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_gpuresConstruct(arg_pGpuResource, arg_pCallContext, arg_pParams) gpuresConstruct_IMPL(arg_pGpuResource, arg_pCallContext, arg_pParams) +NV_STATUS gpuresCopyConstruct_IMPL(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + +#ifdef __nvoc_gpu_resource_h_disabled +static inline NV_STATUS gpuresCopyConstruct(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("GpuResource was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_resource_h_disabled +#define gpuresCopyConstruct(pGpuResource, pCallContext, pParams) gpuresCopyConstruct_IMPL(pGpuResource, pCallContext, pParams) +#endif //__nvoc_gpu_resource_h_disabled + +void gpuresSetGpu_IMPL(struct GpuResource *pGpuResource, struct OBJGPU *pGpu, NvBool bBcResource); + +#ifdef __nvoc_gpu_resource_h_disabled +static inline void gpuresSetGpu(struct GpuResource *pGpuResource, struct OBJGPU *pGpu, NvBool bBcResource) { + NV_ASSERT_FAILED_PRECOMP("GpuResource was disabled!"); +} +#else //__nvoc_gpu_resource_h_disabled +#define gpuresSetGpu(pGpuResource, pGpu, bBcResource) gpuresSetGpu_IMPL(pGpuResource, pGpu, bBcResource) +#endif //__nvoc_gpu_resource_h_disabled + +void gpuresControlSetup_IMPL(struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, struct GpuResource *pGpuResource); + +#ifdef __nvoc_gpu_resource_h_disabled +static inline void gpuresControlSetup(struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, struct GpuResource *pGpuResource) { + NV_ASSERT_FAILED_PRECOMP("GpuResource was disabled!"); +} +#else //__nvoc_gpu_resource_h_disabled +#define gpuresControlSetup(pParams, pGpuResource) gpuresControlSetup_IMPL(pParams, pGpuResource) +#endif //__nvoc_gpu_resource_h_disabled + +NV_STATUS gpuresGetByHandle_IMPL(struct RsClient *pClient, NvHandle hResource, struct GpuResource **ppGpuResource); + +#define gpuresGetByHandle(pClient, hResource, ppGpuResource) gpuresGetByHandle_IMPL(pClient, hResource, ppGpuResource) +NV_STATUS gpuresGetByDeviceOrSubdeviceHandle_IMPL(struct RsClient *pClient, NvHandle hResource, struct GpuResource **ppGpuResource); + +#define gpuresGetByDeviceOrSubdeviceHandle(pClient, hResource, ppGpuResource) gpuresGetByDeviceOrSubdeviceHandle_IMPL(pClient, hResource, ppGpuResource) +#undef PRIVATE_FIELD + + +#endif // _GPURESOURCE_H_ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_GPU_RESOURCE_NVOC_H_ diff --git a/src/nvidia/generated/g_gpu_user_shared_data_nvoc.c b/src/nvidia/generated/g_gpu_user_shared_data_nvoc.c new file mode 100644 index 0000000..9ba3b07 --- /dev/null +++ b/src/nvidia/generated/g_gpu_user_shared_data_nvoc.c @@ -0,0 +1,558 @@ +#define NVOC_GPU_USER_SHARED_DATA_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_user_shared_data_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x5e7d1f = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuUserSharedData; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +// Forward declarations for GpuUserSharedData +void __nvoc_init__Memory(Memory*); +void __nvoc_init__GpuUserSharedData(GpuUserSharedData*); +void __nvoc_init_funcTable_GpuUserSharedData(GpuUserSharedData*); +NV_STATUS __nvoc_ctor_GpuUserSharedData(GpuUserSharedData*, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_GpuUserSharedData(GpuUserSharedData*); +void __nvoc_dtor_GpuUserSharedData(GpuUserSharedData*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__GpuUserSharedData; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__GpuUserSharedData; + +// Down-thunk(s) to bridge GpuUserSharedData methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^2 +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_Memory_resIsDuplicate(struct RsResource *pMemory, NvHandle hMemory, NvBool *pDuplicate); // super +NV_STATUS __nvoc_down_thunk_Memory_resControl(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_Memory_resMap(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_down_thunk_Memory_resUnmap(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_down_thunk_Memory_rmresGetMemInterMapParams(struct RmResource *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super +NV_STATUS __nvoc_down_thunk_Memory_rmresCheckMemInterUnmap(struct RmResource *pMemory, NvBool bSubdeviceHandleProvided); // super +NV_STATUS __nvoc_down_thunk_Memory_rmresGetMemoryMappingDescriptor(struct RmResource *pMemory, MEMORY_DESCRIPTOR **ppMemDesc); // super +NvBool __nvoc_down_thunk_GpuUserSharedData_resCanCopy(struct RsResource *pData); // this + +// Up-thunk(s) to bridge GpuUserSharedData methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^2 +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^2 +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^2 +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super^2 +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super^2 +NvBool __nvoc_up_thunk_RmResource_memAccessCallback(struct Memory *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NvBool __nvoc_up_thunk_RmResource_memShareCallback(struct Memory *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_up_thunk_RmResource_memControlSerialization_Prologue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_memControlSerialization_Epilogue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_memControl_Prologue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_memControl_Epilogue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_memCanCopy(struct Memory *pResource); // super +void __nvoc_up_thunk_RsResource_memPreDestruct(struct Memory *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_memControlFilter(struct Memory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_memIsPartialUnmapSupported(struct Memory *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_memMapTo(struct Memory *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_memUnmapFrom(struct Memory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_memGetRefCount(struct Memory *pResource); // super +void __nvoc_up_thunk_RsResource_memAddAdditionalDependants(struct RsClient *pClient, struct Memory *pResource, RsResourceRef *pReference); // super +NV_STATUS __nvoc_up_thunk_Memory_gpushareddataIsDuplicate(struct GpuUserSharedData *pMemory, NvHandle hMemory, NvBool *pDuplicate); // this +NV_STATUS __nvoc_up_thunk_Memory_gpushareddataGetMapAddrSpace(struct GpuUserSharedData *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); // this +NV_STATUS __nvoc_up_thunk_Memory_gpushareddataControl(struct GpuUserSharedData *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_Memory_gpushareddataMap(struct GpuUserSharedData *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_Memory_gpushareddataUnmap(struct GpuUserSharedData *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_Memory_gpushareddataGetMemInterMapParams(struct GpuUserSharedData *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_Memory_gpushareddataCheckMemInterUnmap(struct GpuUserSharedData *pMemory, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_Memory_gpushareddataGetMemoryMappingDescriptor(struct GpuUserSharedData *pMemory, MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_Memory_gpushareddataCheckCopyPermissions(struct GpuUserSharedData *pMemory, struct OBJGPU *pDstGpu, struct Device *pDstDevice); // this +NV_STATUS __nvoc_up_thunk_Memory_gpushareddataIsReady(struct GpuUserSharedData *pMemory, NvBool bCopyConstructorContext); // this +NvBool __nvoc_up_thunk_Memory_gpushareddataIsGpuMapAllowed(struct GpuUserSharedData *pMemory, struct OBJGPU *pGpu); // this +NvBool __nvoc_up_thunk_Memory_gpushareddataIsExportAllowed(struct GpuUserSharedData *pMemory); // this +NvBool __nvoc_up_thunk_RmResource_gpushareddataAccessCallback(struct GpuUserSharedData *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NvBool __nvoc_up_thunk_RmResource_gpushareddataShareCallback(struct GpuUserSharedData *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_RmResource_gpushareddataControlSerialization_Prologue(struct GpuUserSharedData *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_gpushareddataControlSerialization_Epilogue(struct GpuUserSharedData *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_gpushareddataControl_Prologue(struct GpuUserSharedData *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_gpushareddataControl_Epilogue(struct GpuUserSharedData *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RsResource_gpushareddataPreDestruct(struct GpuUserSharedData *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_gpushareddataControlFilter(struct GpuUserSharedData *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_gpushareddataIsPartialUnmapSupported(struct GpuUserSharedData *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_gpushareddataMapTo(struct GpuUserSharedData *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_gpushareddataUnmapFrom(struct GpuUserSharedData *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_gpushareddataGetRefCount(struct GpuUserSharedData *pResource); // this +void __nvoc_up_thunk_RsResource_gpushareddataAddAdditionalDependants(struct RsClient *pClient, struct GpuUserSharedData *pResource, RsResourceRef *pReference); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_GpuUserSharedData = +{ + /*classInfo=*/ { + /*size=*/ sizeof(GpuUserSharedData), + /*classId=*/ classId(GpuUserSharedData), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "GpuUserSharedData", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GpuUserSharedData, + /*pCastInfo=*/ &__nvoc_castinfo__GpuUserSharedData, + /*pExportInfo=*/ &__nvoc_export_info__GpuUserSharedData +}; + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_GpuUserSharedData[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) gpushareddataCtrlCmdRequestDataPoll_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xde0001u, + /*paramSize=*/ sizeof(NV00DE_CTRL_REQUEST_DATA_POLL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_GpuUserSharedData.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "gpushareddataCtrlCmdRequestDataPoll" +#endif + }, + +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__GpuUserSharedData __nvoc_metadata__GpuUserSharedData = { + .rtti.pClassDef = &__nvoc_class_def_GpuUserSharedData, // (gpushareddata) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GpuUserSharedData, + .rtti.offset = 0, + .metadata__Memory.rtti.pClassDef = &__nvoc_class_def_Memory, // (mem) super + .metadata__Memory.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Memory.rtti.offset = NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory), + .metadata__Memory.metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super^2 + .metadata__Memory.metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Memory.metadata__RmResource.rtti.offset = NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory.__nvoc_base_RmResource), + .metadata__Memory.metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^3 + .metadata__Memory.metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Memory.metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^4 + .metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__Memory.metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^3 + .metadata__Memory.metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Memory.metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + + .vtable.__gpushareddataCanCopy__ = &gpushareddataCanCopy_IMPL, // virtual override (res) base (mem) + .metadata__Memory.vtable.__memCanCopy__ = &__nvoc_up_thunk_RsResource_memCanCopy, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &__nvoc_down_thunk_GpuUserSharedData_resCanCopy, // virtual + .vtable.__gpushareddataIsDuplicate__ = &__nvoc_up_thunk_Memory_gpushareddataIsDuplicate, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memIsDuplicate__ = &memIsDuplicate_IMPL, // virtual override (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &__nvoc_down_thunk_Memory_resIsDuplicate, // virtual + .vtable.__gpushareddataGetMapAddrSpace__ = &__nvoc_up_thunk_Memory_gpushareddataGetMapAddrSpace, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memGetMapAddrSpace__ = &memGetMapAddrSpace_IMPL, // virtual + .vtable.__gpushareddataControl__ = &__nvoc_up_thunk_Memory_gpushareddataControl, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memControl__ = &memControl_IMPL, // virtual override (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_Memory_resControl, // virtual + .vtable.__gpushareddataMap__ = &__nvoc_up_thunk_Memory_gpushareddataMap, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memMap__ = &memMap_IMPL, // virtual override (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &__nvoc_down_thunk_Memory_resMap, // virtual + .vtable.__gpushareddataUnmap__ = &__nvoc_up_thunk_Memory_gpushareddataUnmap, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memUnmap__ = &memUnmap_IMPL, // virtual override (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &__nvoc_down_thunk_Memory_resUnmap, // virtual + .vtable.__gpushareddataGetMemInterMapParams__ = &__nvoc_up_thunk_Memory_gpushareddataGetMemInterMapParams, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memGetMemInterMapParams__ = &memGetMemInterMapParams_IMPL, // virtual override (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &__nvoc_down_thunk_Memory_rmresGetMemInterMapParams, // virtual + .vtable.__gpushareddataCheckMemInterUnmap__ = &__nvoc_up_thunk_Memory_gpushareddataCheckMemInterUnmap, // inline virtual inherited (mem) base (mem) body + .metadata__Memory.vtable.__memCheckMemInterUnmap__ = &memCheckMemInterUnmap_ac1694, // inline virtual override (rmres) base (rmres) body + .metadata__Memory.metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &__nvoc_down_thunk_Memory_rmresCheckMemInterUnmap, // virtual + .vtable.__gpushareddataGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_Memory_gpushareddataGetMemoryMappingDescriptor, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memGetMemoryMappingDescriptor__ = &memGetMemoryMappingDescriptor_IMPL, // virtual override (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &__nvoc_down_thunk_Memory_rmresGetMemoryMappingDescriptor, // virtual + .vtable.__gpushareddataCheckCopyPermissions__ = &__nvoc_up_thunk_Memory_gpushareddataCheckCopyPermissions, // inline virtual inherited (mem) base (mem) body + .metadata__Memory.vtable.__memCheckCopyPermissions__ = &memCheckCopyPermissions_ac1694, // inline virtual body + .vtable.__gpushareddataIsReady__ = &__nvoc_up_thunk_Memory_gpushareddataIsReady, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memIsReady__ = &memIsReady_IMPL, // virtual + .vtable.__gpushareddataIsGpuMapAllowed__ = &__nvoc_up_thunk_Memory_gpushareddataIsGpuMapAllowed, // inline virtual inherited (mem) base (mem) body + .metadata__Memory.vtable.__memIsGpuMapAllowed__ = &memIsGpuMapAllowed_e661f0, // inline virtual body + .vtable.__gpushareddataIsExportAllowed__ = &__nvoc_up_thunk_Memory_gpushareddataIsExportAllowed, // inline virtual inherited (mem) base (mem) body + .metadata__Memory.vtable.__memIsExportAllowed__ = &memIsExportAllowed_e661f0, // inline virtual body + .vtable.__gpushareddataAccessCallback__ = &__nvoc_up_thunk_RmResource_gpushareddataAccessCallback, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memAccessCallback__ = &__nvoc_up_thunk_RmResource_memAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__gpushareddataShareCallback__ = &__nvoc_up_thunk_RmResource_gpushareddataShareCallback, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memShareCallback__ = &__nvoc_up_thunk_RmResource_memShareCallback, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresShareCallback__ = &rmresShareCallback_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__gpushareddataControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_gpushareddataControlSerialization_Prologue, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_memControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__gpushareddataControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_gpushareddataControlSerialization_Epilogue, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_memControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__gpushareddataControl_Prologue__ = &__nvoc_up_thunk_RmResource_gpushareddataControl_Prologue, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memControl_Prologue__ = &__nvoc_up_thunk_RmResource_memControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__gpushareddataControl_Epilogue__ = &__nvoc_up_thunk_RmResource_gpushareddataControl_Epilogue, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memControl_Epilogue__ = &__nvoc_up_thunk_RmResource_memControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__gpushareddataPreDestruct__ = &__nvoc_up_thunk_RsResource_gpushareddataPreDestruct, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memPreDestruct__ = &__nvoc_up_thunk_RsResource_memPreDestruct, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__gpushareddataControlFilter__ = &__nvoc_up_thunk_RsResource_gpushareddataControlFilter, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memControlFilter__ = &__nvoc_up_thunk_RsResource_memControlFilter, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__gpushareddataIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_gpushareddataIsPartialUnmapSupported, // inline virtual inherited (res) base (mem) body + .metadata__Memory.vtable.__memIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_memIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__Memory.metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__gpushareddataMapTo__ = &__nvoc_up_thunk_RsResource_gpushareddataMapTo, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memMapTo__ = &__nvoc_up_thunk_RsResource_memMapTo, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__gpushareddataUnmapFrom__ = &__nvoc_up_thunk_RsResource_gpushareddataUnmapFrom, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memUnmapFrom__ = &__nvoc_up_thunk_RsResource_memUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__gpushareddataGetRefCount__ = &__nvoc_up_thunk_RsResource_gpushareddataGetRefCount, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memGetRefCount__ = &__nvoc_up_thunk_RsResource_memGetRefCount, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__gpushareddataAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_gpushareddataAddAdditionalDependants, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_memAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__GpuUserSharedData = { + .numRelatives = 6, + .relatives = { + &__nvoc_metadata__GpuUserSharedData.rtti, // [0]: (gpushareddata) this + &__nvoc_metadata__GpuUserSharedData.metadata__Memory.rtti, // [1]: (mem) super + &__nvoc_metadata__GpuUserSharedData.metadata__Memory.metadata__RmResource.rtti, // [2]: (rmres) super^2 + &__nvoc_metadata__GpuUserSharedData.metadata__Memory.metadata__RmResource.metadata__RsResource.rtti, // [3]: (res) super^3 + &__nvoc_metadata__GpuUserSharedData.metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [4]: (obj) super^4 + &__nvoc_metadata__GpuUserSharedData.metadata__Memory.metadata__RmResource.metadata__RmResourceCommon.rtti, // [5]: (rmrescmn) super^3 + } +}; + +// 1 down-thunk(s) defined to bridge methods in GpuUserSharedData from superclasses + +// gpushareddataCanCopy: virtual override (res) base (mem) +NvBool __nvoc_down_thunk_GpuUserSharedData_resCanCopy(struct RsResource *pData) { + return gpushareddataCanCopy((struct GpuUserSharedData *)(((unsigned char *) pData) - NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + + +// 25 up-thunk(s) defined to bridge methods in GpuUserSharedData to superclasses + +// gpushareddataIsDuplicate: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_gpushareddataIsDuplicate(struct GpuUserSharedData *pMemory, NvHandle hMemory, NvBool *pDuplicate) { + return memIsDuplicate((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory)), hMemory, pDuplicate); +} + +// gpushareddataGetMapAddrSpace: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_gpushareddataGetMapAddrSpace(struct GpuUserSharedData *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return memGetMapAddrSpace((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory)), pCallContext, mapFlags, pAddrSpace); +} + +// gpushareddataControl: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_gpushareddataControl(struct GpuUserSharedData *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory)), pCallContext, pParams); +} + +// gpushareddataMap: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_gpushareddataMap(struct GpuUserSharedData *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory)), pCallContext, pParams, pCpuMapping); +} + +// gpushareddataUnmap: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_gpushareddataUnmap(struct GpuUserSharedData *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory)), pCallContext, pCpuMapping); +} + +// gpushareddataGetMemInterMapParams: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_gpushareddataGetMemInterMapParams(struct GpuUserSharedData *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory)), pParams); +} + +// gpushareddataCheckMemInterUnmap: inline virtual inherited (mem) base (mem) body +NV_STATUS __nvoc_up_thunk_Memory_gpushareddataCheckMemInterUnmap(struct GpuUserSharedData *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory)), bSubdeviceHandleProvided); +} + +// gpushareddataGetMemoryMappingDescriptor: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_gpushareddataGetMemoryMappingDescriptor(struct GpuUserSharedData *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory)), ppMemDesc); +} + +// gpushareddataCheckCopyPermissions: inline virtual inherited (mem) base (mem) body +NV_STATUS __nvoc_up_thunk_Memory_gpushareddataCheckCopyPermissions(struct GpuUserSharedData *pMemory, struct OBJGPU *pDstGpu, struct Device *pDstDevice) { + return memCheckCopyPermissions((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory)), pDstGpu, pDstDevice); +} + +// gpushareddataIsReady: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_gpushareddataIsReady(struct GpuUserSharedData *pMemory, NvBool bCopyConstructorContext) { + return memIsReady((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory)), bCopyConstructorContext); +} + +// gpushareddataIsGpuMapAllowed: inline virtual inherited (mem) base (mem) body +NvBool __nvoc_up_thunk_Memory_gpushareddataIsGpuMapAllowed(struct GpuUserSharedData *pMemory, struct OBJGPU *pGpu) { + return memIsGpuMapAllowed((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory)), pGpu); +} + +// gpushareddataIsExportAllowed: inline virtual inherited (mem) base (mem) body +NvBool __nvoc_up_thunk_Memory_gpushareddataIsExportAllowed(struct GpuUserSharedData *pMemory) { + return memIsExportAllowed((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory))); +} + +// gpushareddataAccessCallback: virtual inherited (rmres) base (mem) +NvBool __nvoc_up_thunk_RmResource_gpushareddataAccessCallback(struct GpuUserSharedData *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory.__nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// gpushareddataShareCallback: virtual inherited (rmres) base (mem) +NvBool __nvoc_up_thunk_RmResource_gpushareddataShareCallback(struct GpuUserSharedData *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory.__nvoc_base_RmResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// gpushareddataControlSerialization_Prologue: virtual inherited (rmres) base (mem) +NV_STATUS __nvoc_up_thunk_RmResource_gpushareddataControlSerialization_Prologue(struct GpuUserSharedData *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// gpushareddataControlSerialization_Epilogue: virtual inherited (rmres) base (mem) +void __nvoc_up_thunk_RmResource_gpushareddataControlSerialization_Epilogue(struct GpuUserSharedData *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// gpushareddataControl_Prologue: virtual inherited (rmres) base (mem) +NV_STATUS __nvoc_up_thunk_RmResource_gpushareddataControl_Prologue(struct GpuUserSharedData *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// gpushareddataControl_Epilogue: virtual inherited (rmres) base (mem) +void __nvoc_up_thunk_RmResource_gpushareddataControl_Epilogue(struct GpuUserSharedData *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// gpushareddataPreDestruct: virtual inherited (res) base (mem) +void __nvoc_up_thunk_RsResource_gpushareddataPreDestruct(struct GpuUserSharedData *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// gpushareddataControlFilter: virtual inherited (res) base (mem) +NV_STATUS __nvoc_up_thunk_RsResource_gpushareddataControlFilter(struct GpuUserSharedData *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// gpushareddataIsPartialUnmapSupported: inline virtual inherited (res) base (mem) body +NvBool __nvoc_up_thunk_RsResource_gpushareddataIsPartialUnmapSupported(struct GpuUserSharedData *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// gpushareddataMapTo: virtual inherited (res) base (mem) +NV_STATUS __nvoc_up_thunk_RsResource_gpushareddataMapTo(struct GpuUserSharedData *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// gpushareddataUnmapFrom: virtual inherited (res) base (mem) +NV_STATUS __nvoc_up_thunk_RsResource_gpushareddataUnmapFrom(struct GpuUserSharedData *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// gpushareddataGetRefCount: virtual inherited (res) base (mem) +NvU32 __nvoc_up_thunk_RsResource_gpushareddataGetRefCount(struct GpuUserSharedData *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// gpushareddataAddAdditionalDependants: virtual inherited (res) base (mem) +void __nvoc_up_thunk_RsResource_gpushareddataAddAdditionalDependants(struct RsClient *pClient, struct GpuUserSharedData *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(GpuUserSharedData, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__GpuUserSharedData = +{ + /*numEntries=*/ 1, + /*pExportEntries=*/ __nvoc_exported_method_def_GpuUserSharedData +}; + +void __nvoc_dtor_Memory(Memory*); +void __nvoc_dtor_GpuUserSharedData(GpuUserSharedData *pThis) { + __nvoc_gpushareddataDestruct(pThis); + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_GpuUserSharedData(GpuUserSharedData *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Memory(Memory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_GpuUserSharedData(GpuUserSharedData *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Memory(&pThis->__nvoc_base_Memory, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_GpuUserSharedData_fail_Memory; + __nvoc_init_dataField_GpuUserSharedData(pThis); + + status = __nvoc_gpushareddataConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_GpuUserSharedData_fail__init; + goto __nvoc_ctor_GpuUserSharedData_exit; // Success + +__nvoc_ctor_GpuUserSharedData_fail__init: + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); +__nvoc_ctor_GpuUserSharedData_fail_Memory: +__nvoc_ctor_GpuUserSharedData_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_GpuUserSharedData_1(GpuUserSharedData *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + // gpushareddataCtrlCmdRequestDataPoll -- exported (id=0xde0001) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__gpushareddataCtrlCmdRequestDataPoll__ = &gpushareddataCtrlCmdRequestDataPoll_IMPL; +#endif +} // End __nvoc_init_funcTable_GpuUserSharedData_1 with approximately 1 basic block(s). + + +// Initialize vtable(s) for 27 virtual method(s). +void __nvoc_init_funcTable_GpuUserSharedData(GpuUserSharedData *pThis) { + + // Initialize vtable(s) with 1 per-object function pointer(s). + __nvoc_init_funcTable_GpuUserSharedData_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__GpuUserSharedData(GpuUserSharedData *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^4 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^3 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource; // (rmres) super^2 + pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_Memory; // (mem) super + pThis->__nvoc_pbase_GpuUserSharedData = pThis; // (gpushareddata) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Memory(&pThis->__nvoc_base_Memory); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__GpuUserSharedData.metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^4 + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__GpuUserSharedData.metadata__Memory.metadata__RmResource.metadata__RsResource; // (res) super^3 + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__GpuUserSharedData.metadata__Memory.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__GpuUserSharedData.metadata__Memory.metadata__RmResource; // (rmres) super^2 + pThis->__nvoc_base_Memory.__nvoc_metadata_ptr = &__nvoc_metadata__GpuUserSharedData.metadata__Memory; // (mem) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__GpuUserSharedData; // (gpushareddata) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_GpuUserSharedData(pThis); +} + +NV_STATUS __nvoc_objCreate_GpuUserSharedData(GpuUserSharedData **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + GpuUserSharedData *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(GpuUserSharedData), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(GpuUserSharedData)); + + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__GpuUserSharedData(pThis); + status = __nvoc_ctor_GpuUserSharedData(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_GpuUserSharedData_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_GpuUserSharedData_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(GpuUserSharedData)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_GpuUserSharedData(GpuUserSharedData **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_GpuUserSharedData(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_gpu_user_shared_data_nvoc.h b/src/nvidia/generated/g_gpu_user_shared_data_nvoc.h new file mode 100644 index 0000000..0079849 --- /dev/null +++ b/src/nvidia/generated/g_gpu_user_shared_data_nvoc.h @@ -0,0 +1,354 @@ + +#ifndef _G_GPU_USER_SHARED_DATA_NVOC_H_ +#define _G_GPU_USER_SHARED_DATA_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_gpu_user_shared_data_nvoc.h" + +#ifndef GPU_USER_SHARED_DATA_H +#define GPU_USER_SHARED_DATA_H + +#include "core/core.h" +#include "mem_mgr/mem.h" +#include "gpu/gpu.h" +#include "nvoc/utility.h" +#include "ctrl/ctrl00de.h" + +// **************************************************************************** +// Type definitions +// **************************************************************************** + +/*! + * RM internal class representing RM_USER_SHARED_DATA + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_GPU_USER_SHARED_DATA_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__GpuUserSharedData; +struct NVOC_METADATA__Memory; +struct NVOC_VTABLE__GpuUserSharedData; + + +struct GpuUserSharedData { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__GpuUserSharedData *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Memory __nvoc_base_Memory; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^4 + struct RsResource *__nvoc_pbase_RsResource; // res super^3 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^3 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^2 + struct Memory *__nvoc_pbase_Memory; // mem super + struct GpuUserSharedData *__nvoc_pbase_GpuUserSharedData; // gpushareddata + + // Vtable with 1 per-object function pointer + NV_STATUS (*__gpushareddataCtrlCmdRequestDataPoll__)(struct GpuUserSharedData * /*this*/, NV00DE_CTRL_REQUEST_DATA_POLL_PARAMS *); // exported (id=0xde0001) + + // Data members + NvU64 polledDataMask; +}; + + +// Vtable with 26 per-class function pointers +struct NVOC_VTABLE__GpuUserSharedData { + NvBool (*__gpushareddataCanCopy__)(struct GpuUserSharedData * /*this*/); // virtual override (res) base (mem) + NV_STATUS (*__gpushareddataIsDuplicate__)(struct GpuUserSharedData * /*this*/, NvHandle, NvBool *); // virtual inherited (mem) base (mem) + NV_STATUS (*__gpushareddataGetMapAddrSpace__)(struct GpuUserSharedData * /*this*/, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); // virtual inherited (mem) base (mem) + NV_STATUS (*__gpushareddataControl__)(struct GpuUserSharedData * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (mem) base (mem) + NV_STATUS (*__gpushareddataMap__)(struct GpuUserSharedData * /*this*/, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); // virtual inherited (mem) base (mem) + NV_STATUS (*__gpushareddataUnmap__)(struct GpuUserSharedData * /*this*/, CALL_CONTEXT *, RsCpuMapping *); // virtual inherited (mem) base (mem) + NV_STATUS (*__gpushareddataGetMemInterMapParams__)(struct GpuUserSharedData * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (mem) base (mem) + NV_STATUS (*__gpushareddataCheckMemInterUnmap__)(struct GpuUserSharedData * /*this*/, NvBool); // inline virtual inherited (mem) base (mem) body + NV_STATUS (*__gpushareddataGetMemoryMappingDescriptor__)(struct GpuUserSharedData * /*this*/, MEMORY_DESCRIPTOR **); // virtual inherited (mem) base (mem) + NV_STATUS (*__gpushareddataCheckCopyPermissions__)(struct GpuUserSharedData * /*this*/, struct OBJGPU *, struct Device *); // inline virtual inherited (mem) base (mem) body + NV_STATUS (*__gpushareddataIsReady__)(struct GpuUserSharedData * /*this*/, NvBool); // virtual inherited (mem) base (mem) + NvBool (*__gpushareddataIsGpuMapAllowed__)(struct GpuUserSharedData * /*this*/, struct OBJGPU *); // inline virtual inherited (mem) base (mem) body + NvBool (*__gpushareddataIsExportAllowed__)(struct GpuUserSharedData * /*this*/); // inline virtual inherited (mem) base (mem) body + NvBool (*__gpushareddataAccessCallback__)(struct GpuUserSharedData * /*this*/, RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (mem) + NvBool (*__gpushareddataShareCallback__)(struct GpuUserSharedData * /*this*/, RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (rmres) base (mem) + NV_STATUS (*__gpushareddataControlSerialization_Prologue__)(struct GpuUserSharedData * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (mem) + void (*__gpushareddataControlSerialization_Epilogue__)(struct GpuUserSharedData * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (mem) + NV_STATUS (*__gpushareddataControl_Prologue__)(struct GpuUserSharedData * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (mem) + void (*__gpushareddataControl_Epilogue__)(struct GpuUserSharedData * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (mem) + void (*__gpushareddataPreDestruct__)(struct GpuUserSharedData * /*this*/); // virtual inherited (res) base (mem) + NV_STATUS (*__gpushareddataControlFilter__)(struct GpuUserSharedData * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (mem) + NvBool (*__gpushareddataIsPartialUnmapSupported__)(struct GpuUserSharedData * /*this*/); // inline virtual inherited (res) base (mem) body + NV_STATUS (*__gpushareddataMapTo__)(struct GpuUserSharedData * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (mem) + NV_STATUS (*__gpushareddataUnmapFrom__)(struct GpuUserSharedData * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (mem) + NvU32 (*__gpushareddataGetRefCount__)(struct GpuUserSharedData * /*this*/); // virtual inherited (res) base (mem) + void (*__gpushareddataAddAdditionalDependants__)(struct RsClient *, struct GpuUserSharedData * /*this*/, RsResourceRef *); // virtual inherited (res) base (mem) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__GpuUserSharedData { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Memory metadata__Memory; + const struct NVOC_VTABLE__GpuUserSharedData vtable; +}; + +#ifndef __NVOC_CLASS_GpuUserSharedData_TYPEDEF__ +#define __NVOC_CLASS_GpuUserSharedData_TYPEDEF__ +typedef struct GpuUserSharedData GpuUserSharedData; +#endif /* __NVOC_CLASS_GpuUserSharedData_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuUserSharedData +#define __nvoc_class_id_GpuUserSharedData 0x5e7d1f +#endif /* __nvoc_class_id_GpuUserSharedData */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuUserSharedData; + +#define __staticCast_GpuUserSharedData(pThis) \ + ((pThis)->__nvoc_pbase_GpuUserSharedData) + +#ifdef __nvoc_gpu_user_shared_data_h_disabled +#define __dynamicCast_GpuUserSharedData(pThis) ((GpuUserSharedData*) NULL) +#else //__nvoc_gpu_user_shared_data_h_disabled +#define __dynamicCast_GpuUserSharedData(pThis) \ + ((GpuUserSharedData*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GpuUserSharedData))) +#endif //__nvoc_gpu_user_shared_data_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_GpuUserSharedData(GpuUserSharedData**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_GpuUserSharedData(GpuUserSharedData**, Dynamic*, NvU32, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_GpuUserSharedData(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_GpuUserSharedData((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define gpushareddataCanCopy_FNPTR(pData) pData->__nvoc_metadata_ptr->vtable.__gpushareddataCanCopy__ +#define gpushareddataCanCopy(pData) gpushareddataCanCopy_DISPATCH(pData) +#define gpushareddataCtrlCmdRequestDataPoll_FNPTR(pData) pData->__gpushareddataCtrlCmdRequestDataPoll__ +#define gpushareddataCtrlCmdRequestDataPoll(pData, pParams) gpushareddataCtrlCmdRequestDataPoll_DISPATCH(pData, pParams) +#define gpushareddataIsDuplicate_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsDuplicate__ +#define gpushareddataIsDuplicate(pMemory, hMemory, pDuplicate) gpushareddataIsDuplicate_DISPATCH(pMemory, hMemory, pDuplicate) +#define gpushareddataGetMapAddrSpace_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memGetMapAddrSpace__ +#define gpushareddataGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) gpushareddataGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define gpushareddataControl_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memControl__ +#define gpushareddataControl(pMemory, pCallContext, pParams) gpushareddataControl_DISPATCH(pMemory, pCallContext, pParams) +#define gpushareddataMap_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memMap__ +#define gpushareddataMap(pMemory, pCallContext, pParams, pCpuMapping) gpushareddataMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define gpushareddataUnmap_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memUnmap__ +#define gpushareddataUnmap(pMemory, pCallContext, pCpuMapping) gpushareddataUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define gpushareddataGetMemInterMapParams_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memGetMemInterMapParams__ +#define gpushareddataGetMemInterMapParams(pMemory, pParams) gpushareddataGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define gpushareddataCheckMemInterUnmap_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memCheckMemInterUnmap__ +#define gpushareddataCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) gpushareddataCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define gpushareddataGetMemoryMappingDescriptor_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memGetMemoryMappingDescriptor__ +#define gpushareddataGetMemoryMappingDescriptor(pMemory, ppMemDesc) gpushareddataGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define gpushareddataCheckCopyPermissions_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memCheckCopyPermissions__ +#define gpushareddataCheckCopyPermissions(pMemory, pDstGpu, pDstDevice) gpushareddataCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, pDstDevice) +#define gpushareddataIsReady_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsReady__ +#define gpushareddataIsReady(pMemory, bCopyConstructorContext) gpushareddataIsReady_DISPATCH(pMemory, bCopyConstructorContext) +#define gpushareddataIsGpuMapAllowed_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsGpuMapAllowed__ +#define gpushareddataIsGpuMapAllowed(pMemory, pGpu) gpushareddataIsGpuMapAllowed_DISPATCH(pMemory, pGpu) +#define gpushareddataIsExportAllowed_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsExportAllowed__ +#define gpushareddataIsExportAllowed(pMemory) gpushareddataIsExportAllowed_DISPATCH(pMemory) +#define gpushareddataAccessCallback_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define gpushareddataAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) gpushareddataAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define gpushareddataShareCallback_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresShareCallback__ +#define gpushareddataShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) gpushareddataShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define gpushareddataControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define gpushareddataControlSerialization_Prologue(pResource, pCallContext, pParams) gpushareddataControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define gpushareddataControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define gpushareddataControlSerialization_Epilogue(pResource, pCallContext, pParams) gpushareddataControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define gpushareddataControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define gpushareddataControl_Prologue(pResource, pCallContext, pParams) gpushareddataControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define gpushareddataControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define gpushareddataControl_Epilogue(pResource, pCallContext, pParams) gpushareddataControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define gpushareddataPreDestruct_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define gpushareddataPreDestruct(pResource) gpushareddataPreDestruct_DISPATCH(pResource) +#define gpushareddataControlFilter_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define gpushareddataControlFilter(pResource, pCallContext, pParams) gpushareddataControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define gpushareddataIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define gpushareddataIsPartialUnmapSupported(pResource) gpushareddataIsPartialUnmapSupported_DISPATCH(pResource) +#define gpushareddataMapTo_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define gpushareddataMapTo(pResource, pParams) gpushareddataMapTo_DISPATCH(pResource, pParams) +#define gpushareddataUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define gpushareddataUnmapFrom(pResource, pParams) gpushareddataUnmapFrom_DISPATCH(pResource, pParams) +#define gpushareddataGetRefCount_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define gpushareddataGetRefCount(pResource) gpushareddataGetRefCount_DISPATCH(pResource) +#define gpushareddataAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define gpushareddataAddAdditionalDependants(pClient, pResource, pReference) gpushareddataAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) + +// Dispatch functions +static inline NvBool gpushareddataCanCopy_DISPATCH(struct GpuUserSharedData *pData) { + return pData->__nvoc_metadata_ptr->vtable.__gpushareddataCanCopy__(pData); +} + +static inline NV_STATUS gpushareddataCtrlCmdRequestDataPoll_DISPATCH(struct GpuUserSharedData *pData, NV00DE_CTRL_REQUEST_DATA_POLL_PARAMS *pParams) { + return pData->__gpushareddataCtrlCmdRequestDataPoll__(pData, pParams); +} + +static inline NV_STATUS gpushareddataIsDuplicate_DISPATCH(struct GpuUserSharedData *pMemory, NvHandle hMemory, NvBool *pDuplicate) { + return pMemory->__nvoc_metadata_ptr->vtable.__gpushareddataIsDuplicate__(pMemory, hMemory, pDuplicate); +} + +static inline NV_STATUS gpushareddataGetMapAddrSpace_DISPATCH(struct GpuUserSharedData *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__nvoc_metadata_ptr->vtable.__gpushareddataGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NV_STATUS gpushareddataControl_DISPATCH(struct GpuUserSharedData *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__nvoc_metadata_ptr->vtable.__gpushareddataControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS gpushareddataMap_DISPATCH(struct GpuUserSharedData *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__nvoc_metadata_ptr->vtable.__gpushareddataMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS gpushareddataUnmap_DISPATCH(struct GpuUserSharedData *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__nvoc_metadata_ptr->vtable.__gpushareddataUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS gpushareddataGetMemInterMapParams_DISPATCH(struct GpuUserSharedData *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__nvoc_metadata_ptr->vtable.__gpushareddataGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS gpushareddataCheckMemInterUnmap_DISPATCH(struct GpuUserSharedData *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__nvoc_metadata_ptr->vtable.__gpushareddataCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS gpushareddataGetMemoryMappingDescriptor_DISPATCH(struct GpuUserSharedData *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__nvoc_metadata_ptr->vtable.__gpushareddataGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS gpushareddataCheckCopyPermissions_DISPATCH(struct GpuUserSharedData *pMemory, struct OBJGPU *pDstGpu, struct Device *pDstDevice) { + return pMemory->__nvoc_metadata_ptr->vtable.__gpushareddataCheckCopyPermissions__(pMemory, pDstGpu, pDstDevice); +} + +static inline NV_STATUS gpushareddataIsReady_DISPATCH(struct GpuUserSharedData *pMemory, NvBool bCopyConstructorContext) { + return pMemory->__nvoc_metadata_ptr->vtable.__gpushareddataIsReady__(pMemory, bCopyConstructorContext); +} + +static inline NvBool gpushareddataIsGpuMapAllowed_DISPATCH(struct GpuUserSharedData *pMemory, struct OBJGPU *pGpu) { + return pMemory->__nvoc_metadata_ptr->vtable.__gpushareddataIsGpuMapAllowed__(pMemory, pGpu); +} + +static inline NvBool gpushareddataIsExportAllowed_DISPATCH(struct GpuUserSharedData *pMemory) { + return pMemory->__nvoc_metadata_ptr->vtable.__gpushareddataIsExportAllowed__(pMemory); +} + +static inline NvBool gpushareddataAccessCallback_DISPATCH(struct GpuUserSharedData *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__gpushareddataAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NvBool gpushareddataShareCallback_DISPATCH(struct GpuUserSharedData *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__nvoc_metadata_ptr->vtable.__gpushareddataShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS gpushareddataControlSerialization_Prologue_DISPATCH(struct GpuUserSharedData *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__gpushareddataControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void gpushareddataControlSerialization_Epilogue_DISPATCH(struct GpuUserSharedData *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__gpushareddataControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS gpushareddataControl_Prologue_DISPATCH(struct GpuUserSharedData *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__gpushareddataControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void gpushareddataControl_Epilogue_DISPATCH(struct GpuUserSharedData *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__gpushareddataControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline void gpushareddataPreDestruct_DISPATCH(struct GpuUserSharedData *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__gpushareddataPreDestruct__(pResource); +} + +static inline NV_STATUS gpushareddataControlFilter_DISPATCH(struct GpuUserSharedData *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__gpushareddataControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvBool gpushareddataIsPartialUnmapSupported_DISPATCH(struct GpuUserSharedData *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__gpushareddataIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS gpushareddataMapTo_DISPATCH(struct GpuUserSharedData *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__gpushareddataMapTo__(pResource, pParams); +} + +static inline NV_STATUS gpushareddataUnmapFrom_DISPATCH(struct GpuUserSharedData *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__gpushareddataUnmapFrom__(pResource, pParams); +} + +static inline NvU32 gpushareddataGetRefCount_DISPATCH(struct GpuUserSharedData *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__gpushareddataGetRefCount__(pResource); +} + +static inline void gpushareddataAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct GpuUserSharedData *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__gpushareddataAddAdditionalDependants__(pClient, pResource, pReference); +} + +NvBool gpushareddataCanCopy_IMPL(struct GpuUserSharedData *pData); + +NV_STATUS gpushareddataCtrlCmdRequestDataPoll_IMPL(struct GpuUserSharedData *pData, NV00DE_CTRL_REQUEST_DATA_POLL_PARAMS *pParams); + +NV_STATUS gpushareddataConstruct_IMPL(struct GpuUserSharedData *arg_pData, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_gpushareddataConstruct(arg_pData, arg_pCallContext, arg_pParams) gpushareddataConstruct_IMPL(arg_pData, arg_pCallContext, arg_pParams) +void gpushareddataDestruct_IMPL(struct GpuUserSharedData *pData); + +#define __nvoc_gpushareddataDestruct(pData) gpushareddataDestruct_IMPL(pData) +#undef PRIVATE_FIELD + + +#endif // GPU_USER_SHARED_DATA_H + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_GPU_USER_SHARED_DATA_NVOC_H_ diff --git a/src/nvidia/generated/g_hal.h b/src/nvidia/generated/g_hal.h new file mode 100644 index 0000000..45547ae --- /dev/null +++ b/src/nvidia/generated/g_hal.h @@ -0,0 +1,161 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// HAL support for use in HAL setup +// +// Profile: devel-soc-disp-dce-client +// Template: templates/gt_hal.h +// + +#ifndef _G_RMCFG_HAL_H_ +#define _G_RMCFG_HAL_H_ + + +typedef struct DISP_HAL_IFACES *PDISP_HAL_IFACES; +typedef struct DPU_HAL_IFACES *PDPU_HAL_IFACES; +typedef struct RPC_HAL_IFACES *PRPC_HAL_IFACES; +typedef struct RPCSTRUCTURECOPY_HAL_IFACES *PRPCSTRUCTURECOPY_HAL_IFACES; + + + +// +// per-GPU list of function ptrs to setup iface for each engine +// + +typedef struct { + + +} HAL_IFACE_SETUP, *PHAL_IFACE_SETUP; + + + +// +// IP_VERSIONS support +// + +typedef struct IGRP_IP_VERSIONS_TABLE_INFO IGRP_IP_VERSIONS_TABLE_INFO; + +// generic form of Head_iGrp_ipVersions_getInfo typedef + +typedef void IGrp_ipVersions_install(IGRP_IP_VERSIONS_TABLE_INFO *); +typedef NV_STATUS IGrp_ipVersions_wrapup(IGRP_IP_VERSIONS_TABLE_INFO *); + +// a single inclusive version range +typedef struct { + NvU32 v0; + NvU32 v1; +} IGRP_IP_VERSION_RANGE; + + +typedef struct { + const IGRP_IP_VERSION_RANGE *pRanges; + NvU32 numRanges; + IGrp_ipVersions_install *ifacesInstallFn; +} IGRP_IP_VERSIONS_ENTRY; + + +struct IGRP_IP_VERSIONS_TABLE_INFO { + POBJGPU pGpu; + Dynamic *pDynamic; // eg: pBiff + + const IGRP_IP_VERSIONS_ENTRY *pTable; + NvU32 numEntries; + IGrp_ipVersions_wrapup *ifacesWrapupFn; // overrides and asserts +}; + +// HAL_IMPLEMENTATION enum +typedef enum +{ + HAL_IMPL_GF100, + HAL_IMPL_GF100B, + HAL_IMPL_GF104, + HAL_IMPL_GF104B, + HAL_IMPL_GF106, + HAL_IMPL_GF106B, + HAL_IMPL_GF108, + HAL_IMPL_GF110D, + HAL_IMPL_GF110, + HAL_IMPL_GF117, + HAL_IMPL_GF118, + HAL_IMPL_GF119, + HAL_IMPL_GF110F, + HAL_IMPL_GF110F2, + HAL_IMPL_GF110F3, + HAL_IMPL_GK104, + HAL_IMPL_GK106, + HAL_IMPL_GK107, + HAL_IMPL_GK20A, + HAL_IMPL_GK110, + HAL_IMPL_GK110B, + HAL_IMPL_GK110C, + HAL_IMPL_GK208, + HAL_IMPL_GK208S, + HAL_IMPL_GM107, + HAL_IMPL_GM108, + HAL_IMPL_GM200, + HAL_IMPL_GM204, + HAL_IMPL_GM206, + HAL_IMPL_GP100, + HAL_IMPL_GP102, + HAL_IMPL_GP104, + HAL_IMPL_GP106, + HAL_IMPL_GP107, + HAL_IMPL_GP108, + HAL_IMPL_GV100, + HAL_IMPL_GV11B, + HAL_IMPL_TU102, + HAL_IMPL_TU104, + HAL_IMPL_TU106, + HAL_IMPL_TU116, + HAL_IMPL_TU117, + HAL_IMPL_GA100, + HAL_IMPL_GA102, + HAL_IMPL_GA103, + HAL_IMPL_GA104, + HAL_IMPL_GA106, + HAL_IMPL_GA107, + HAL_IMPL_GA10B, + HAL_IMPL_GA102F, + HAL_IMPL_AD102, + HAL_IMPL_AD103, + HAL_IMPL_AD104, + HAL_IMPL_AD106, + HAL_IMPL_AD107, + HAL_IMPL_GH100, + HAL_IMPL_GB100, + HAL_IMPL_GB102, + HAL_IMPL_GB110, + HAL_IMPL_GB112, + HAL_IMPL_GB202, + HAL_IMPL_GB203, + HAL_IMPL_GB205, + HAL_IMPL_GB206, + HAL_IMPL_GB207, + HAL_IMPL_T001_FERMI_NOT_EXIST, + HAL_IMPL_T124, + HAL_IMPL_T132, + HAL_IMPL_T210, + HAL_IMPL_T186, + HAL_IMPL_T194, + HAL_IMPL_T002_TURING_NOT_EXIST, + HAL_IMPL_T234, + HAL_IMPL_T003_ADA_NOT_EXIST, + HAL_IMPL_T004_HOPPER_NOT_EXIST, + HAL_IMPL_T234D, + HAL_IMPL_T264D, + HAL_IMPL_T256D, + HAL_IMPL_AMODEL, + + HAL_IMPL_MAXIMUM, // NOTE: this symbol must be at the end of the enum list. + // It is used to allocate arrays and control loop iterations. +} HAL_IMPLEMENTATION; + +// +// HAL implementation names for debug & logging use +// +#define HAL_IMPL_NAME_LIST \ + { HAL_IMPL_T234D, "T234D" }, \ + { HAL_IMPL_T264D, "T264D" }, \ + { HAL_IMPL_T256D, "T256D" } + + +#endif // _G_RMCFG_HAL_H_ diff --git a/src/nvidia/generated/g_hal_archimpl.h b/src/nvidia/generated/g_hal_archimpl.h new file mode 100644 index 0000000..f0a65f1 --- /dev/null +++ b/src/nvidia/generated/g_hal_archimpl.h @@ -0,0 +1,113 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Hal registration entry points. +// +// Profile: devel-soc-disp-dce-client +// Template: templates/gt_hal_archimpl.h +// +// Chips: T234D, T26XD, T25XD +// + +#ifndef _G_RMCFG_HAL_ARCHIMPL_H_ +#define _G_RMCFG_HAL_ARCHIMPL_H_ + +#include "g_hal.h" + +// OpenRM for Tegra build uses different include path +// The following lines refer to the same file. +// TODO: merge them +#include "nv_ref.h" + +// +// CHIPID array Implementation +// +const struct ChipID +{ + NvU32 arch; + NvU32 impl; + NvU32 hidrev; +} chipID[] = { + { 0x0, 0x0, 0x0 } , // GF100 (disabled) + { 0x0, 0x0, 0x0 } , // GF100B (disabled) + { 0x0, 0x0, 0x0 } , // GF104 (disabled) + { 0x0, 0x0, 0x0 } , // GF104B (disabled) + { 0x0, 0x0, 0x0 } , // GF106 (disabled) + { 0x0, 0x0, 0x0 } , // GF106B (disabled) + { 0x0, 0x0, 0x0 } , // GF108 (disabled) + { 0x0, 0x0, 0x0 } , // GF110D (disabled) + { 0x0, 0x0, 0x0 } , // GF110 (disabled) + { 0x0, 0x0, 0x0 } , // GF117 (disabled) + { 0x0, 0x0, 0x0 } , // GF118 (disabled) + { 0x0, 0x0, 0x0 } , // GF119 (disabled) + { 0x0, 0x0, 0x0 } , // GF110F (disabled) + { 0x0, 0x0, 0x0 } , // GF110F2 (disabled) + { 0x0, 0x0, 0x0 } , // GF110F3 (disabled) + { 0x0, 0x0, 0x0 } , // GK104 (disabled) + { 0x0, 0x0, 0x0 } , // GK106 (disabled) + { 0x0, 0x0, 0x0 } , // GK107 (disabled) + { 0x0, 0x0, 0x0 } , // GK20A (disabled) + { 0x0, 0x0, 0x0 } , // GK110 (disabled) + { 0x0, 0x0, 0x0 } , // GK110B (disabled) + { 0x0, 0x0, 0x0 } , // GK110C (disabled) + { 0x0, 0x0, 0x0 } , // GK208 (disabled) + { 0x0, 0x0, 0x0 } , // GK208S (disabled) + { 0x0, 0x0, 0x0 } , // GM107 (disabled) + { 0x0, 0x0, 0x0 } , // GM108 (disabled) + { 0x0, 0x0, 0x0 } , // GM200 (disabled) + { 0x0, 0x0, 0x0 } , // GM204 (disabled) + { 0x0, 0x0, 0x0 } , // GM206 (disabled) + { 0x0, 0x0, 0x0 } , // GP100 (disabled) + { 0x0, 0x0, 0x0 } , // GP102 (disabled) + { 0x0, 0x0, 0x0 } , // GP104 (disabled) + { 0x0, 0x0, 0x0 } , // GP106 (disabled) + { 0x0, 0x0, 0x0 } , // GP107 (disabled) + { 0x0, 0x0, 0x0 } , // GP108 (disabled) + { 0x0, 0x0, 0x0 } , // GV100 (disabled) + { 0x0, 0x0, 0x0 } , // GV11B (disabled) + { 0x0, 0x0, 0x0 } , // TU102 (disabled) + { 0x0, 0x0, 0x0 } , // TU104 (disabled) + { 0x0, 0x0, 0x0 } , // TU106 (disabled) + { 0x0, 0x0, 0x0 } , // TU116 (disabled) + { 0x0, 0x0, 0x0 } , // TU117 (disabled) + { 0x0, 0x0, 0x0 } , // GA100 (disabled) + { 0x0, 0x0, 0x0 } , // GA102 (disabled) + { 0x0, 0x0, 0x0 } , // GA103 (disabled) + { 0x0, 0x0, 0x0 } , // GA104 (disabled) + { 0x0, 0x0, 0x0 } , // GA106 (disabled) + { 0x0, 0x0, 0x0 } , // GA107 (disabled) + { 0x0, 0x0, 0x0 } , // GA10B (disabled) + { 0x0, 0x0, 0x0 } , // GA102F (disabled) + { 0x0, 0x0, 0x0 } , // AD102 (disabled) + { 0x0, 0x0, 0x0 } , // AD103 (disabled) + { 0x0, 0x0, 0x0 } , // AD104 (disabled) + { 0x0, 0x0, 0x0 } , // AD106 (disabled) + { 0x0, 0x0, 0x0 } , // AD107 (disabled) + { 0x0, 0x0, 0x0 } , // GH100 (disabled) + { 0x0, 0x0, 0x0 } , // GB100 (disabled) + { 0x0, 0x0, 0x0 } , // GB102 (disabled) + { 0x0, 0x0, 0x0 } , // GB110 (disabled) + { 0x0, 0x0, 0x0 } , // GB112 (disabled) + { 0x0, 0x0, 0x0 } , // GB202 (disabled) + { 0x0, 0x0, 0x0 } , // GB203 (disabled) + { 0x0, 0x0, 0x0 } , // GB205 (disabled) + { 0x0, 0x0, 0x0 } , // GB206 (disabled) + { 0x0, 0x0, 0x0 } , // GB207 (disabled) + { 0x0, 0x0, 0x0 } , // T001_FERMI_NOT_EXIST (disabled) + { 0x0, 0x0, 0x0 } , // T124 (disabled) + { 0x0, 0x0, 0x0 } , // T132 (disabled) + { 0x0, 0x0, 0x0 } , // T210 (disabled) + { 0x0, 0x0, 0x0 } , // T186 (disabled) + { 0x0, 0x0, 0x0 } , // T194 (disabled) + { 0x0, 0x0, 0x0 } , // T002_TURING_NOT_EXIST (disabled) + { 0x0, 0x0, 0x0 } , // T234 (disabled) + { 0x0, 0x0, 0x0 } , // T003_ADA_NOT_EXIST (disabled) + { 0x0, 0x0, 0x0 } , // T004_HOPPER_NOT_EXIST (disabled) + { 0x0, 0x0, 0x235 } , // T234D + { 0x0, 0x0, 0x265 } , // T264D + { 0x0, 0x0, 0x257 } , // T256D + { 0x0, 0x0, 0x0 } , // AMODEL (disabled) + +}; + +#endif // _G_RMCFG_HAL_ARCHIMPL_H_ + diff --git a/src/nvidia/generated/g_hal_mgr_nvoc.c b/src/nvidia/generated/g_hal_mgr_nvoc.c new file mode 100644 index 0000000..93601d4 --- /dev/null +++ b/src/nvidia/generated/g_hal_mgr_nvoc.c @@ -0,0 +1,204 @@ +#define NVOC_HAL_MGR_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_hal_mgr_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xbf26de = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHALMGR; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +// Forward declarations for OBJHALMGR +void __nvoc_init__Object(Object*); +void __nvoc_init__OBJHALMGR(OBJHALMGR*); +void __nvoc_init_funcTable_OBJHALMGR(OBJHALMGR*); +NV_STATUS __nvoc_ctor_OBJHALMGR(OBJHALMGR*); +void __nvoc_init_dataField_OBJHALMGR(OBJHALMGR*); +void __nvoc_dtor_OBJHALMGR(OBJHALMGR*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__OBJHALMGR; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJHALMGR; + +// Down-thunk(s) to bridge OBJHALMGR methods from ancestors (if any) + +// Up-thunk(s) to bridge OBJHALMGR methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHALMGR = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJHALMGR), + /*classId=*/ classId(OBJHALMGR), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJHALMGR", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJHALMGR, + /*pCastInfo=*/ &__nvoc_castinfo__OBJHALMGR, + /*pExportInfo=*/ &__nvoc_export_info__OBJHALMGR +}; + + +// Metadata with per-class RTTI with ancestor(s) +static const struct NVOC_METADATA__OBJHALMGR __nvoc_metadata__OBJHALMGR = { + .rtti.pClassDef = &__nvoc_class_def_OBJHALMGR, // (halmgr) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJHALMGR, + .rtti.offset = 0, + .metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super + .metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Object.rtti.offset = NV_OFFSETOF(OBJHALMGR, __nvoc_base_Object), +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__OBJHALMGR = { + .numRelatives = 2, + .relatives = { + &__nvoc_metadata__OBJHALMGR.rtti, // [0]: (halmgr) this + &__nvoc_metadata__OBJHALMGR.metadata__Object.rtti, // [1]: (obj) super + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJHALMGR = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJHALMGR(OBJHALMGR *pThis) { + __nvoc_halmgrDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJHALMGR(OBJHALMGR *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJHALMGR(OBJHALMGR *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJHALMGR_fail_Object; + __nvoc_init_dataField_OBJHALMGR(pThis); + + status = __nvoc_halmgrConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_OBJHALMGR_fail__init; + goto __nvoc_ctor_OBJHALMGR_exit; // Success + +__nvoc_ctor_OBJHALMGR_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_OBJHALMGR_fail_Object: +__nvoc_ctor_OBJHALMGR_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_OBJHALMGR_1(OBJHALMGR *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_OBJHALMGR_1 + + +// Initialize vtable(s): Nothing to do for empty vtables +void __nvoc_init_funcTable_OBJHALMGR(OBJHALMGR *pThis) { + __nvoc_init_funcTable_OBJHALMGR_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__OBJHALMGR(OBJHALMGR *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; // (obj) super + pThis->__nvoc_pbase_OBJHALMGR = pThis; // (halmgr) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Object(&pThis->__nvoc_base_Object); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__OBJHALMGR.metadata__Object; // (obj) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__OBJHALMGR; // (halmgr) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_OBJHALMGR(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJHALMGR(OBJHALMGR **ppThis, Dynamic *pParent, NvU32 createFlags) +{ + NV_STATUS status; + Object *pParentObj = NULL; + OBJHALMGR *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(OBJHALMGR), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(OBJHALMGR)); + + pThis->__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__OBJHALMGR(pThis); + status = __nvoc_ctor_OBJHALMGR(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJHALMGR_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_OBJHALMGR_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(OBJHALMGR)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJHALMGR(OBJHALMGR **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJHALMGR(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_hal_mgr_nvoc.h b/src/nvidia/generated/g_hal_mgr_nvoc.h new file mode 100644 index 0000000..da93130 --- /dev/null +++ b/src/nvidia/generated/g_hal_mgr_nvoc.h @@ -0,0 +1,175 @@ + +#ifndef _G_HAL_MGR_NVOC_H_ +#define _G_HAL_MGR_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_hal_mgr_nvoc.h" + +#ifndef _HAL_MGR_H_ +#define _HAL_MGR_H_ + +#include "core/core.h" +#include "core/hal.h" +#include "nvoc/object.h" + +#define HALMGR_GET_HAL(p, halid) halmgrGetHal((p), halid) + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_HAL_MGR_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__OBJHALMGR; +struct NVOC_METADATA__Object; + + +struct OBJHALMGR { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__OBJHALMGR *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Object __nvoc_base_Object; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super + struct OBJHALMGR *__nvoc_pbase_OBJHALMGR; // halmgr + + // Data members + struct OBJHAL *pHalList[79]; +}; + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__OBJHALMGR { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Object metadata__Object; +}; + +#ifndef __NVOC_CLASS_OBJHALMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJHALMGR_TYPEDEF__ +typedef struct OBJHALMGR OBJHALMGR; +#endif /* __NVOC_CLASS_OBJHALMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHALMGR +#define __nvoc_class_id_OBJHALMGR 0xbf26de +#endif /* __nvoc_class_id_OBJHALMGR */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHALMGR; + +#define __staticCast_OBJHALMGR(pThis) \ + ((pThis)->__nvoc_pbase_OBJHALMGR) + +#ifdef __nvoc_hal_mgr_h_disabled +#define __dynamicCast_OBJHALMGR(pThis) ((OBJHALMGR*) NULL) +#else //__nvoc_hal_mgr_h_disabled +#define __dynamicCast_OBJHALMGR(pThis) \ + ((OBJHALMGR*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJHALMGR))) +#endif //__nvoc_hal_mgr_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_OBJHALMGR(OBJHALMGR**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJHALMGR(OBJHALMGR**, Dynamic*, NvU32); +#define __objCreate_OBJHALMGR(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJHALMGR((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros + +// Dispatch functions +NV_STATUS halmgrConstruct_IMPL(struct OBJHALMGR *arg_); + +#define __nvoc_halmgrConstruct(arg_) halmgrConstruct_IMPL(arg_) +void halmgrDestruct_IMPL(struct OBJHALMGR *arg1); + +#define __nvoc_halmgrDestruct(arg1) halmgrDestruct_IMPL(arg1) +NV_STATUS halmgrCreateHal_IMPL(struct OBJHALMGR *arg1, NvU32 arg2); + +#ifdef __nvoc_hal_mgr_h_disabled +static inline NV_STATUS halmgrCreateHal(struct OBJHALMGR *arg1, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJHALMGR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_hal_mgr_h_disabled +#define halmgrCreateHal(arg1, arg2) halmgrCreateHal_IMPL(arg1, arg2) +#endif //__nvoc_hal_mgr_h_disabled + +NV_STATUS halmgrGetHalForGpu_IMPL(struct OBJHALMGR *arg1, NvU32 arg2, NvU32 arg3, NvU32 *arg4); + +#ifdef __nvoc_hal_mgr_h_disabled +static inline NV_STATUS halmgrGetHalForGpu(struct OBJHALMGR *arg1, NvU32 arg2, NvU32 arg3, NvU32 *arg4) { + NV_ASSERT_FAILED_PRECOMP("OBJHALMGR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_hal_mgr_h_disabled +#define halmgrGetHalForGpu(arg1, arg2, arg3, arg4) halmgrGetHalForGpu_IMPL(arg1, arg2, arg3, arg4) +#endif //__nvoc_hal_mgr_h_disabled + +struct OBJHAL *halmgrGetHal_IMPL(struct OBJHALMGR *arg1, NvU32 arg2); + +#ifdef __nvoc_hal_mgr_h_disabled +static inline struct OBJHAL *halmgrGetHal(struct OBJHALMGR *arg1, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJHALMGR was disabled!"); + return NULL; +} +#else //__nvoc_hal_mgr_h_disabled +#define halmgrGetHal(arg1, arg2) halmgrGetHal_IMPL(arg1, arg2) +#endif //__nvoc_hal_mgr_h_disabled + +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_HAL_MGR_NVOC_H_ diff --git a/src/nvidia/generated/g_hal_nvoc.c b/src/nvidia/generated/g_hal_nvoc.c new file mode 100644 index 0000000..b8fd9a6 --- /dev/null +++ b/src/nvidia/generated/g_hal_nvoc.c @@ -0,0 +1,198 @@ +#define NVOC_HAL_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_hal_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xe803b6 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHAL; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +// Forward declarations for OBJHAL +void __nvoc_init__Object(Object*); +void __nvoc_init__OBJHAL(OBJHAL*); +void __nvoc_init_funcTable_OBJHAL(OBJHAL*); +NV_STATUS __nvoc_ctor_OBJHAL(OBJHAL*); +void __nvoc_init_dataField_OBJHAL(OBJHAL*); +void __nvoc_dtor_OBJHAL(OBJHAL*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__OBJHAL; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJHAL; + +// Down-thunk(s) to bridge OBJHAL methods from ancestors (if any) + +// Up-thunk(s) to bridge OBJHAL methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHAL = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJHAL), + /*classId=*/ classId(OBJHAL), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJHAL", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJHAL, + /*pCastInfo=*/ &__nvoc_castinfo__OBJHAL, + /*pExportInfo=*/ &__nvoc_export_info__OBJHAL +}; + + +// Metadata with per-class RTTI with ancestor(s) +static const struct NVOC_METADATA__OBJHAL __nvoc_metadata__OBJHAL = { + .rtti.pClassDef = &__nvoc_class_def_OBJHAL, // (objhal) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJHAL, + .rtti.offset = 0, + .metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super + .metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Object.rtti.offset = NV_OFFSETOF(OBJHAL, __nvoc_base_Object), +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__OBJHAL = { + .numRelatives = 2, + .relatives = { + &__nvoc_metadata__OBJHAL.rtti, // [0]: (objhal) this + &__nvoc_metadata__OBJHAL.metadata__Object.rtti, // [1]: (obj) super + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJHAL = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJHAL(OBJHAL *pThis) { + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJHAL(OBJHAL *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJHAL(OBJHAL *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJHAL_fail_Object; + __nvoc_init_dataField_OBJHAL(pThis); + goto __nvoc_ctor_OBJHAL_exit; // Success + +__nvoc_ctor_OBJHAL_fail_Object: +__nvoc_ctor_OBJHAL_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_OBJHAL_1(OBJHAL *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_OBJHAL_1 + + +// Initialize vtable(s): Nothing to do for empty vtables +void __nvoc_init_funcTable_OBJHAL(OBJHAL *pThis) { + __nvoc_init_funcTable_OBJHAL_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__OBJHAL(OBJHAL *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; // (obj) super + pThis->__nvoc_pbase_OBJHAL = pThis; // (objhal) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Object(&pThis->__nvoc_base_Object); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__OBJHAL.metadata__Object; // (obj) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__OBJHAL; // (objhal) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_OBJHAL(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJHAL(OBJHAL **ppThis, Dynamic *pParent, NvU32 createFlags) +{ + NV_STATUS status; + Object *pParentObj = NULL; + OBJHAL *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(OBJHAL), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(OBJHAL)); + + pThis->__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__OBJHAL(pThis); + status = __nvoc_ctor_OBJHAL(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJHAL_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_OBJHAL_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(OBJHAL)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJHAL(OBJHAL **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJHAL(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_hal_nvoc.h b/src/nvidia/generated/g_hal_nvoc.h new file mode 100644 index 0000000..65e98a7 --- /dev/null +++ b/src/nvidia/generated/g_hal_nvoc.h @@ -0,0 +1,185 @@ + +#ifndef _G_HAL_NVOC_H_ +#define _G_HAL_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_hal_nvoc.h" + +#ifndef _OBJHAL_H_ +#define _OBJHAL_H_ + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: hal.h * +* Defines and structures used for the HAL Object. * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "nvoc/object.h" + +// +// HAL Info Block Id: +// +// 31 7 0 +// .-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | 24 bits | 8 bits | +// .-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// Info ID # Impl +// +// Impl: The hal implementation +// Info ID number: unique id for a particular info type +// +#define MKHALINFOID(impl,infoId) (((infoId & 0xffffff) << 8) | (impl & 0xff)) + +typedef struct MODULEDESCRIPTOR MODULEDESCRIPTOR, *PMODULEDESCRIPTOR; + +struct MODULEDESCRIPTOR { + + // (rmconfig) per-obj function ptr to init hal interfaces + const HAL_IFACE_SETUP *pHalSetIfaces; +}; + +typedef struct OBJHAL *POBJHAL; + +#ifndef __NVOC_CLASS_OBJHAL_TYPEDEF__ +#define __NVOC_CLASS_OBJHAL_TYPEDEF__ +typedef struct OBJHAL OBJHAL; +#endif /* __NVOC_CLASS_OBJHAL_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHAL +#define __nvoc_class_id_OBJHAL 0xe803b6 +#endif /* __nvoc_class_id_OBJHAL */ + + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_HAL_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__OBJHAL; +struct NVOC_METADATA__Object; + + +struct OBJHAL { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__OBJHAL *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Object __nvoc_base_Object; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super + struct OBJHAL *__nvoc_pbase_OBJHAL; // objhal + + // Data members + struct MODULEDESCRIPTOR moduleDescriptor; +}; + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__OBJHAL { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Object metadata__Object; +}; + +#ifndef __NVOC_CLASS_OBJHAL_TYPEDEF__ +#define __NVOC_CLASS_OBJHAL_TYPEDEF__ +typedef struct OBJHAL OBJHAL; +#endif /* __NVOC_CLASS_OBJHAL_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHAL +#define __nvoc_class_id_OBJHAL 0xe803b6 +#endif /* __nvoc_class_id_OBJHAL */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHAL; + +#define __staticCast_OBJHAL(pThis) \ + ((pThis)->__nvoc_pbase_OBJHAL) + +#ifdef __nvoc_hal_h_disabled +#define __dynamicCast_OBJHAL(pThis) ((OBJHAL*) NULL) +#else //__nvoc_hal_h_disabled +#define __dynamicCast_OBJHAL(pThis) \ + ((OBJHAL*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJHAL))) +#endif //__nvoc_hal_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_OBJHAL(OBJHAL**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJHAL(OBJHAL**, Dynamic*, NvU32); +#define __objCreate_OBJHAL(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJHAL((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros + +// Dispatch functions +PMODULEDESCRIPTOR objhalGetModuleDescriptor_IMPL(struct OBJHAL *pHal); + +#ifdef __nvoc_hal_h_disabled +static inline PMODULEDESCRIPTOR objhalGetModuleDescriptor(struct OBJHAL *pHal) { + NV_ASSERT_FAILED_PRECOMP("OBJHAL was disabled!"); + return NULL; +} +#else //__nvoc_hal_h_disabled +#define objhalGetModuleDescriptor(pHal) objhalGetModuleDescriptor_IMPL(pHal) +#endif //__nvoc_hal_h_disabled + +#undef PRIVATE_FIELD + + +#endif // _OBJHAL_H_ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_HAL_NVOC_H_ diff --git a/src/nvidia/generated/g_hal_private.h b/src/nvidia/generated/g_hal_private.h new file mode 100644 index 0000000..4c8595b --- /dev/null +++ b/src/nvidia/generated/g_hal_private.h @@ -0,0 +1,104 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Private HAL support for halgen. +// +// Profile: devel-soc-disp-dce-client +// Template: templates/gt_hal_private.h +// +// Chips: T234D, T26XD, T25XD +// + +// +// This file is included in several .c files for chips hal register and engines +// hal function assignment. The macros RMCFG_ENGINE_SETUP and RMCFG_HAL_SETUP_xxx +// are used to provide different content for those .c files. +// + +#ifndef _G_RMCFG_HAL_PRIVATE_H_ +#define _G_RMCFG_HAL_PRIVATE_H_ + +#include "g_hal.h" + +// establish the per-chip RMCFG_HAL_SETUP_chip #defines as needed. +#if defined(RMCFG_ENGINE_SETUP) + +// setup all enabled chip families +#if defined(RMCFG_HAL_SETUP_ALL) +# define RMCFG_HAL_SETUP_T23XD 1 +# define RMCFG_HAL_SETUP_T26XD 1 +# define RMCFG_HAL_SETUP_T25XD 1 +#endif // RMCFG_HAL_SETUP_ALL + +// +// setup all enabled chips in each enabled family +// + +#if defined(RMCFG_HAL_SETUP_T23XD) +# define RMCFG_HAL_SETUP_T234D 1 +#endif // T23XD + +#if defined(RMCFG_HAL_SETUP_T26XD) +# define RMCFG_HAL_SETUP_T264D 1 +#endif // T26XD + +#if defined(RMCFG_HAL_SETUP_T25XD) +# define RMCFG_HAL_SETUP_T256D 1 +#endif // T25XD + +#endif // RMCFG_ENGINE_SETUP + +// pull in private headers for each engine + + +// +// per-GPU structure with an interface init function for each engine +// + +// registerHalModule function declaration +NV_STATUS registerHalModule(NvU32, const HAL_IFACE_SETUP *); + +#if defined(RMCFG_HAL_SETUP_T234D) + +static const HAL_IFACE_SETUP halIface_T234D = { + + +}; + +NV_STATUS registerHalModule_T234D(void) +{ + return registerHalModule(HAL_IMPL_T234D, &halIface_T234D); +} + +#endif // T23XD or T234D + +#if defined(RMCFG_HAL_SETUP_T264D) + +static const HAL_IFACE_SETUP halIface_T264D = { + + +}; + +NV_STATUS registerHalModule_T264D(void) +{ + return registerHalModule(HAL_IMPL_T264D, &halIface_T264D); +} + +#endif // T26XD or T264D + +#if defined(RMCFG_HAL_SETUP_T256D) + +static const HAL_IFACE_SETUP halIface_T256D = { + + +}; + +NV_STATUS registerHalModule_T256D(void) +{ + return registerHalModule(HAL_IMPL_T256D, &halIface_T256D); +} + +#endif // T25XD or T256D + + + +#endif // _G_RMCFG_HAL_PRIVATE_H_ diff --git a/src/nvidia/generated/g_hal_register.h b/src/nvidia/generated/g_hal_register.h new file mode 100644 index 0000000..85d0404 --- /dev/null +++ b/src/nvidia/generated/g_hal_register.h @@ -0,0 +1,89 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Hal registration entry points. +// +// Profile: devel-soc-disp-dce-client +// Template: templates/gt_hal_register.h +// +// Chips: T234D, T26XD, T25XD +// + +#ifndef _G_RMCFG_HAL_REGISTER_H_ +#define _G_RMCFG_HAL_REGISTER_H_ + +// +// per-family HAL registration entry points +// + + +NV_STATUS registerHalModule_T234D(void); + +static NV_STATUS NV_INLINE REGISTER_T23XD_HALS(void) +{ + NV_STATUS rmStatus; + + rmStatus = registerHalModule_T234D(); + if (rmStatus != NV_OK) + return rmStatus; + + return NV_OK; +} + +NV_STATUS registerHalModule_T264D(void); + +static NV_STATUS NV_INLINE REGISTER_T26XD_HALS(void) +{ + NV_STATUS rmStatus; + + rmStatus = registerHalModule_T264D(); + if (rmStatus != NV_OK) + return rmStatus; + + return NV_OK; +} + +NV_STATUS registerHalModule_T256D(void); + +static NV_STATUS NV_INLINE REGISTER_T25XD_HALS(void) +{ + NV_STATUS rmStatus; + + rmStatus = registerHalModule_T256D(); + if (rmStatus != NV_OK) + return rmStatus; + + return NV_OK; +} + +// +// This routine can be used by platform dependent code to +// enable all HAL modules. +// +static NV_STATUS NV_INLINE REGISTER_ALL_HALS(void) +{ + NV_STATUS rmStatus; + + rmStatus = REGISTER_T23XD_HALS(); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + rmStatus = REGISTER_T26XD_HALS(); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + rmStatus = REGISTER_T25XD_HALS(); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + return NV_OK; +} + + + +#endif // _G_RMCFG_HAL_REGISTER_H_ diff --git a/src/nvidia/generated/g_hda_codec_api_nvoc.c b/src/nvidia/generated/g_hda_codec_api_nvoc.c new file mode 100644 index 0000000..f156415 --- /dev/null +++ b/src/nvidia/generated/g_hda_codec_api_nvoc.c @@ -0,0 +1,515 @@ +#define NVOC_HDA_CODEC_API_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_hda_codec_api_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xf59a20 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Hdacodec; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +// Forward declarations for Hdacodec +void __nvoc_init__GpuResource(GpuResource*); +void __nvoc_init__Hdacodec(Hdacodec*); +void __nvoc_init_funcTable_Hdacodec(Hdacodec*); +NV_STATUS __nvoc_ctor_Hdacodec(Hdacodec*, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_Hdacodec(Hdacodec*); +void __nvoc_dtor_Hdacodec(Hdacodec*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__Hdacodec; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__Hdacodec; + +// Down-thunk(s) to bridge Hdacodec methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^2 +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_GpuResource_resControl(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_GpuResource_resMap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_down_thunk_GpuResource_resUnmap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_down_thunk_GpuResource_rmresShareCallback(struct RmResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super + +// Up-thunk(s) to bridge Hdacodec methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^2 +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^2 +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^2 +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super^2 +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super^2 +NvBool __nvoc_up_thunk_RmResource_gpuresAccessCallback(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControl_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_gpuresControl_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_gpuresCanCopy(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresIsDuplicate(struct GpuResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_gpuresPreDestruct(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresControlFilter(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresMapTo(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresUnmapFrom(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_gpuresGetRefCount(struct GpuResource *pResource); // super +void __nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference); // super +NV_STATUS __nvoc_up_thunk_GpuResource_hdacodecControl(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_GpuResource_hdacodecMap(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_GpuResource_hdacodecUnmap(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_GpuResource_hdacodecShareCallback(struct Hdacodec *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_GpuResource_hdacodecGetRegBaseOffsetAndSize(struct Hdacodec *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); // this +NV_STATUS __nvoc_up_thunk_GpuResource_hdacodecGetMapAddrSpace(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); // this +NV_STATUS __nvoc_up_thunk_GpuResource_hdacodecInternalControlForward(struct Hdacodec *pGpuResource, NvU32 command, void *pParams, NvU32 size); // this +NvHandle __nvoc_up_thunk_GpuResource_hdacodecGetInternalObjectHandle(struct Hdacodec *pGpuResource); // this +NvBool __nvoc_up_thunk_RmResource_hdacodecAccessCallback(struct Hdacodec *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NV_STATUS __nvoc_up_thunk_RmResource_hdacodecGetMemInterMapParams(struct Hdacodec *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_hdacodecCheckMemInterUnmap(struct Hdacodec *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_hdacodecGetMemoryMappingDescriptor(struct Hdacodec *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_hdacodecControlSerialization_Prologue(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_hdacodecControlSerialization_Epilogue(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_hdacodecControl_Prologue(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_hdacodecControl_Epilogue(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_hdacodecCanCopy(struct Hdacodec *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_hdacodecIsDuplicate(struct Hdacodec *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_hdacodecPreDestruct(struct Hdacodec *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_hdacodecControlFilter(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_hdacodecIsPartialUnmapSupported(struct Hdacodec *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_hdacodecMapTo(struct Hdacodec *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_hdacodecUnmapFrom(struct Hdacodec *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_hdacodecGetRefCount(struct Hdacodec *pResource); // this +void __nvoc_up_thunk_RsResource_hdacodecAddAdditionalDependants(struct RsClient *pClient, struct Hdacodec *pResource, RsResourceRef *pReference); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_Hdacodec = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Hdacodec), + /*classId=*/ classId(Hdacodec), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Hdacodec", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Hdacodec, + /*pCastInfo=*/ &__nvoc_castinfo__Hdacodec, + /*pExportInfo=*/ &__nvoc_export_info__Hdacodec +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__Hdacodec __nvoc_metadata__Hdacodec = { + .rtti.pClassDef = &__nvoc_class_def_Hdacodec, // (hdacodec) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Hdacodec, + .rtti.offset = 0, + .metadata__GpuResource.rtti.pClassDef = &__nvoc_class_def_GpuResource, // (gpures) super + .metadata__GpuResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.rtti.offset = NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource), + .metadata__GpuResource.metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super^2 + .metadata__GpuResource.metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.rtti.offset = NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource), + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^3 + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^4 + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^3 + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + + .vtable.__hdacodecControl__ = &__nvoc_up_thunk_GpuResource_hdacodecControl, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl__ = &gpuresControl_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_GpuResource_resControl, // virtual + .vtable.__hdacodecMap__ = &__nvoc_up_thunk_GpuResource_hdacodecMap, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresMap__ = &gpuresMap_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &__nvoc_down_thunk_GpuResource_resMap, // virtual + .vtable.__hdacodecUnmap__ = &__nvoc_up_thunk_GpuResource_hdacodecUnmap, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresUnmap__ = &gpuresUnmap_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &__nvoc_down_thunk_GpuResource_resUnmap, // virtual + .vtable.__hdacodecShareCallback__ = &__nvoc_up_thunk_GpuResource_hdacodecShareCallback, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresShareCallback__ = &gpuresShareCallback_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresShareCallback__ = &__nvoc_down_thunk_GpuResource_rmresShareCallback, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__hdacodecGetRegBaseOffsetAndSize__ = &__nvoc_up_thunk_GpuResource_hdacodecGetRegBaseOffsetAndSize, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetRegBaseOffsetAndSize__ = &gpuresGetRegBaseOffsetAndSize_IMPL, // virtual + .vtable.__hdacodecGetMapAddrSpace__ = &__nvoc_up_thunk_GpuResource_hdacodecGetMapAddrSpace, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMapAddrSpace__ = &gpuresGetMapAddrSpace_IMPL, // virtual + .vtable.__hdacodecInternalControlForward__ = &__nvoc_up_thunk_GpuResource_hdacodecInternalControlForward, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresInternalControlForward__ = &gpuresInternalControlForward_IMPL, // virtual + .vtable.__hdacodecGetInternalObjectHandle__ = &__nvoc_up_thunk_GpuResource_hdacodecGetInternalObjectHandle, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetInternalObjectHandle__ = &gpuresGetInternalObjectHandle_IMPL, // virtual + .vtable.__hdacodecAccessCallback__ = &__nvoc_up_thunk_RmResource_hdacodecAccessCallback, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresAccessCallback__ = &__nvoc_up_thunk_RmResource_gpuresAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__hdacodecGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_hdacodecGetMemInterMapParams, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__hdacodecCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_hdacodecCheckMemInterUnmap, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__hdacodecGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_hdacodecGetMemoryMappingDescriptor, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__hdacodecControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_hdacodecControlSerialization_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__hdacodecControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_hdacodecControlSerialization_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__hdacodecControl_Prologue__ = &__nvoc_up_thunk_RmResource_hdacodecControl_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__hdacodecControl_Epilogue__ = &__nvoc_up_thunk_RmResource_hdacodecControl_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__hdacodecCanCopy__ = &__nvoc_up_thunk_RsResource_hdacodecCanCopy, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresCanCopy__ = &__nvoc_up_thunk_RsResource_gpuresCanCopy, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__hdacodecIsDuplicate__ = &__nvoc_up_thunk_RsResource_hdacodecIsDuplicate, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresIsDuplicate__ = &__nvoc_up_thunk_RsResource_gpuresIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__hdacodecPreDestruct__ = &__nvoc_up_thunk_RsResource_hdacodecPreDestruct, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresPreDestruct__ = &__nvoc_up_thunk_RsResource_gpuresPreDestruct, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__hdacodecControlFilter__ = &__nvoc_up_thunk_RsResource_hdacodecControlFilter, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlFilter__ = &__nvoc_up_thunk_RsResource_gpuresControlFilter, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__hdacodecIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_hdacodecIsPartialUnmapSupported, // inline virtual inherited (res) base (gpures) body + .metadata__GpuResource.vtable.__gpuresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__GpuResource.metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__hdacodecMapTo__ = &__nvoc_up_thunk_RsResource_hdacodecMapTo, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresMapTo__ = &__nvoc_up_thunk_RsResource_gpuresMapTo, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__hdacodecUnmapFrom__ = &__nvoc_up_thunk_RsResource_hdacodecUnmapFrom, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresUnmapFrom__ = &__nvoc_up_thunk_RsResource_gpuresUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__hdacodecGetRefCount__ = &__nvoc_up_thunk_RsResource_hdacodecGetRefCount, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetRefCount__ = &__nvoc_up_thunk_RsResource_gpuresGetRefCount, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__hdacodecAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_hdacodecAddAdditionalDependants, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__Hdacodec = { + .numRelatives = 6, + .relatives = { + &__nvoc_metadata__Hdacodec.rtti, // [0]: (hdacodec) this + &__nvoc_metadata__Hdacodec.metadata__GpuResource.rtti, // [1]: (gpures) super + &__nvoc_metadata__Hdacodec.metadata__GpuResource.metadata__RmResource.rtti, // [2]: (rmres) super^2 + &__nvoc_metadata__Hdacodec.metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti, // [3]: (res) super^3 + &__nvoc_metadata__Hdacodec.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [4]: (obj) super^4 + &__nvoc_metadata__Hdacodec.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti, // [5]: (rmrescmn) super^3 + } +}; + +// 25 up-thunk(s) defined to bridge methods in Hdacodec to superclasses + +// hdacodecControl: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_hdacodecControl(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource)), pCallContext, pParams); +} + +// hdacodecMap: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_hdacodecMap(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource)), pCallContext, pParams, pCpuMapping); +} + +// hdacodecUnmap: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_hdacodecUnmap(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource)), pCallContext, pCpuMapping); +} + +// hdacodecShareCallback: virtual inherited (gpures) base (gpures) +NvBool __nvoc_up_thunk_GpuResource_hdacodecShareCallback(struct Hdacodec *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// hdacodecGetRegBaseOffsetAndSize: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_hdacodecGetRegBaseOffsetAndSize(struct Hdacodec *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource)), pGpu, pOffset, pSize); +} + +// hdacodecGetMapAddrSpace: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_hdacodecGetMapAddrSpace(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource)), pCallContext, mapFlags, pAddrSpace); +} + +// hdacodecInternalControlForward: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_hdacodecInternalControlForward(struct Hdacodec *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource)), command, pParams, size); +} + +// hdacodecGetInternalObjectHandle: virtual inherited (gpures) base (gpures) +NvHandle __nvoc_up_thunk_GpuResource_hdacodecGetInternalObjectHandle(struct Hdacodec *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource))); +} + +// hdacodecAccessCallback: virtual inherited (rmres) base (gpures) +NvBool __nvoc_up_thunk_RmResource_hdacodecAccessCallback(struct Hdacodec *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// hdacodecGetMemInterMapParams: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_hdacodecGetMemInterMapParams(struct Hdacodec *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pParams); +} + +// hdacodecCheckMemInterUnmap: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_hdacodecCheckMemInterUnmap(struct Hdacodec *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// hdacodecGetMemoryMappingDescriptor: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_hdacodecGetMemoryMappingDescriptor(struct Hdacodec *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource)), ppMemDesc); +} + +// hdacodecControlSerialization_Prologue: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_hdacodecControlSerialization_Prologue(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// hdacodecControlSerialization_Epilogue: virtual inherited (rmres) base (gpures) +void __nvoc_up_thunk_RmResource_hdacodecControlSerialization_Epilogue(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// hdacodecControl_Prologue: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_hdacodecControl_Prologue(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// hdacodecControl_Epilogue: virtual inherited (rmres) base (gpures) +void __nvoc_up_thunk_RmResource_hdacodecControl_Epilogue(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// hdacodecCanCopy: virtual inherited (res) base (gpures) +NvBool __nvoc_up_thunk_RsResource_hdacodecCanCopy(struct Hdacodec *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// hdacodecIsDuplicate: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_hdacodecIsDuplicate(struct Hdacodec *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// hdacodecPreDestruct: virtual inherited (res) base (gpures) +void __nvoc_up_thunk_RsResource_hdacodecPreDestruct(struct Hdacodec *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// hdacodecControlFilter: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_hdacodecControlFilter(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// hdacodecIsPartialUnmapSupported: inline virtual inherited (res) base (gpures) body +NvBool __nvoc_up_thunk_RsResource_hdacodecIsPartialUnmapSupported(struct Hdacodec *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// hdacodecMapTo: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_hdacodecMapTo(struct Hdacodec *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// hdacodecUnmapFrom: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_hdacodecUnmapFrom(struct Hdacodec *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// hdacodecGetRefCount: virtual inherited (res) base (gpures) +NvU32 __nvoc_up_thunk_RsResource_hdacodecGetRefCount(struct Hdacodec *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// hdacodecAddAdditionalDependants: virtual inherited (res) base (gpures) +void __nvoc_up_thunk_RsResource_hdacodecAddAdditionalDependants(struct RsClient *pClient, struct Hdacodec *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__Hdacodec = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_Hdacodec(Hdacodec *pThis) { + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Hdacodec(Hdacodec *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Hdacodec(Hdacodec *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Hdacodec_fail_GpuResource; + __nvoc_init_dataField_Hdacodec(pThis); + + status = __nvoc_hdacodecConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Hdacodec_fail__init; + goto __nvoc_ctor_Hdacodec_exit; // Success + +__nvoc_ctor_Hdacodec_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_Hdacodec_fail_GpuResource: +__nvoc_ctor_Hdacodec_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_Hdacodec_1(Hdacodec *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_Hdacodec_1 + + +// Initialize vtable(s) for 25 virtual method(s). +void __nvoc_init_funcTable_Hdacodec(Hdacodec *pThis) { + __nvoc_init_funcTable_Hdacodec_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__Hdacodec(Hdacodec *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^4 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^3 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; // (rmres) super^2 + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; // (gpures) super + pThis->__nvoc_pbase_Hdacodec = pThis; // (hdacodec) this + + // Recurse to superclass initialization function(s). + __nvoc_init__GpuResource(&pThis->__nvoc_base_GpuResource); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__Hdacodec.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^4 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__Hdacodec.metadata__GpuResource.metadata__RmResource.metadata__RsResource; // (res) super^3 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__Hdacodec.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__Hdacodec.metadata__GpuResource.metadata__RmResource; // (rmres) super^2 + pThis->__nvoc_base_GpuResource.__nvoc_metadata_ptr = &__nvoc_metadata__Hdacodec.metadata__GpuResource; // (gpures) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__Hdacodec; // (hdacodec) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_Hdacodec(pThis); +} + +NV_STATUS __nvoc_objCreate_Hdacodec(Hdacodec **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + Hdacodec *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(Hdacodec), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(Hdacodec)); + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__Hdacodec(pThis); + status = __nvoc_ctor_Hdacodec(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_Hdacodec_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_Hdacodec_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(Hdacodec)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_Hdacodec(Hdacodec **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_Hdacodec(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_hda_codec_api_nvoc.h b/src/nvidia/generated/g_hda_codec_api_nvoc.h new file mode 100644 index 0000000..065e724 --- /dev/null +++ b/src/nvidia/generated/g_hda_codec_api_nvoc.h @@ -0,0 +1,321 @@ + +#ifndef _G_HDA_CODEC_API_NVOC_H_ +#define _G_HDA_CODEC_API_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_hda_codec_api_nvoc.h" + +#ifndef HDA_CODEC_API_H +#define HDA_CODEC_API_H + +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "resserv/rs_resource.h" +#include "ctrl/ctrl90ec.h" +#include "gpu/gpu_resource.h" + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_HDA_CODEC_API_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__Hdacodec; +struct NVOC_METADATA__GpuResource; +struct NVOC_VTABLE__Hdacodec; + + +struct Hdacodec { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__Hdacodec *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct GpuResource __nvoc_base_GpuResource; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^4 + struct RsResource *__nvoc_pbase_RsResource; // res super^3 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^3 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^2 + struct GpuResource *__nvoc_pbase_GpuResource; // gpures super + struct Hdacodec *__nvoc_pbase_Hdacodec; // hdacodec +}; + + +// Vtable with 25 per-class function pointers +struct NVOC_VTABLE__Hdacodec { + NV_STATUS (*__hdacodecControl__)(struct Hdacodec * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__hdacodecMap__)(struct Hdacodec * /*this*/, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__hdacodecUnmap__)(struct Hdacodec * /*this*/, struct CALL_CONTEXT *, struct RsCpuMapping *); // virtual inherited (gpures) base (gpures) + NvBool (*__hdacodecShareCallback__)(struct Hdacodec * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__hdacodecGetRegBaseOffsetAndSize__)(struct Hdacodec * /*this*/, struct OBJGPU *, NvU32 *, NvU32 *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__hdacodecGetMapAddrSpace__)(struct Hdacodec * /*this*/, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__hdacodecInternalControlForward__)(struct Hdacodec * /*this*/, NvU32, void *, NvU32); // virtual inherited (gpures) base (gpures) + NvHandle (*__hdacodecGetInternalObjectHandle__)(struct Hdacodec * /*this*/); // virtual inherited (gpures) base (gpures) + NvBool (*__hdacodecAccessCallback__)(struct Hdacodec * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__hdacodecGetMemInterMapParams__)(struct Hdacodec * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__hdacodecCheckMemInterUnmap__)(struct Hdacodec * /*this*/, NvBool); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__hdacodecGetMemoryMappingDescriptor__)(struct Hdacodec * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__hdacodecControlSerialization_Prologue__)(struct Hdacodec * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + void (*__hdacodecControlSerialization_Epilogue__)(struct Hdacodec * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__hdacodecControl_Prologue__)(struct Hdacodec * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + void (*__hdacodecControl_Epilogue__)(struct Hdacodec * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + NvBool (*__hdacodecCanCopy__)(struct Hdacodec * /*this*/); // virtual inherited (res) base (gpures) + NV_STATUS (*__hdacodecIsDuplicate__)(struct Hdacodec * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (gpures) + void (*__hdacodecPreDestruct__)(struct Hdacodec * /*this*/); // virtual inherited (res) base (gpures) + NV_STATUS (*__hdacodecControlFilter__)(struct Hdacodec * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (gpures) + NvBool (*__hdacodecIsPartialUnmapSupported__)(struct Hdacodec * /*this*/); // inline virtual inherited (res) base (gpures) body + NV_STATUS (*__hdacodecMapTo__)(struct Hdacodec * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (gpures) + NV_STATUS (*__hdacodecUnmapFrom__)(struct Hdacodec * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (gpures) + NvU32 (*__hdacodecGetRefCount__)(struct Hdacodec * /*this*/); // virtual inherited (res) base (gpures) + void (*__hdacodecAddAdditionalDependants__)(struct RsClient *, struct Hdacodec * /*this*/, RsResourceRef *); // virtual inherited (res) base (gpures) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__Hdacodec { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__GpuResource metadata__GpuResource; + const struct NVOC_VTABLE__Hdacodec vtable; +}; + +#ifndef __NVOC_CLASS_Hdacodec_TYPEDEF__ +#define __NVOC_CLASS_Hdacodec_TYPEDEF__ +typedef struct Hdacodec Hdacodec; +#endif /* __NVOC_CLASS_Hdacodec_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Hdacodec +#define __nvoc_class_id_Hdacodec 0xf59a20 +#endif /* __nvoc_class_id_Hdacodec */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Hdacodec; + +#define __staticCast_Hdacodec(pThis) \ + ((pThis)->__nvoc_pbase_Hdacodec) + +#ifdef __nvoc_hda_codec_api_h_disabled +#define __dynamicCast_Hdacodec(pThis) ((Hdacodec*) NULL) +#else //__nvoc_hda_codec_api_h_disabled +#define __dynamicCast_Hdacodec(pThis) \ + ((Hdacodec*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Hdacodec))) +#endif //__nvoc_hda_codec_api_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_Hdacodec(Hdacodec**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Hdacodec(Hdacodec**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_Hdacodec(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_Hdacodec((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define hdacodecControl_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresControl__ +#define hdacodecControl(pGpuResource, pCallContext, pParams) hdacodecControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define hdacodecMap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresMap__ +#define hdacodecMap(pGpuResource, pCallContext, pParams, pCpuMapping) hdacodecMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define hdacodecUnmap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresUnmap__ +#define hdacodecUnmap(pGpuResource, pCallContext, pCpuMapping) hdacodecUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define hdacodecShareCallback_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresShareCallback__ +#define hdacodecShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) hdacodecShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define hdacodecGetRegBaseOffsetAndSize_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetRegBaseOffsetAndSize__ +#define hdacodecGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) hdacodecGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define hdacodecGetMapAddrSpace_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetMapAddrSpace__ +#define hdacodecGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) hdacodecGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define hdacodecInternalControlForward_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresInternalControlForward__ +#define hdacodecInternalControlForward(pGpuResource, command, pParams, size) hdacodecInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define hdacodecGetInternalObjectHandle_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetInternalObjectHandle__ +#define hdacodecGetInternalObjectHandle(pGpuResource) hdacodecGetInternalObjectHandle_DISPATCH(pGpuResource) +#define hdacodecAccessCallback_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define hdacodecAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) hdacodecAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define hdacodecGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define hdacodecGetMemInterMapParams(pRmResource, pParams) hdacodecGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define hdacodecCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define hdacodecCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) hdacodecCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define hdacodecGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define hdacodecGetMemoryMappingDescriptor(pRmResource, ppMemDesc) hdacodecGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define hdacodecControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define hdacodecControlSerialization_Prologue(pResource, pCallContext, pParams) hdacodecControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define hdacodecControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define hdacodecControlSerialization_Epilogue(pResource, pCallContext, pParams) hdacodecControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define hdacodecControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define hdacodecControl_Prologue(pResource, pCallContext, pParams) hdacodecControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define hdacodecControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define hdacodecControl_Epilogue(pResource, pCallContext, pParams) hdacodecControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define hdacodecCanCopy_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define hdacodecCanCopy(pResource) hdacodecCanCopy_DISPATCH(pResource) +#define hdacodecIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define hdacodecIsDuplicate(pResource, hMemory, pDuplicate) hdacodecIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define hdacodecPreDestruct_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define hdacodecPreDestruct(pResource) hdacodecPreDestruct_DISPATCH(pResource) +#define hdacodecControlFilter_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define hdacodecControlFilter(pResource, pCallContext, pParams) hdacodecControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define hdacodecIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define hdacodecIsPartialUnmapSupported(pResource) hdacodecIsPartialUnmapSupported_DISPATCH(pResource) +#define hdacodecMapTo_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define hdacodecMapTo(pResource, pParams) hdacodecMapTo_DISPATCH(pResource, pParams) +#define hdacodecUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define hdacodecUnmapFrom(pResource, pParams) hdacodecUnmapFrom_DISPATCH(pResource, pParams) +#define hdacodecGetRefCount_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define hdacodecGetRefCount(pResource) hdacodecGetRefCount_DISPATCH(pResource) +#define hdacodecAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define hdacodecAddAdditionalDependants(pClient, pResource, pReference) hdacodecAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) + +// Dispatch functions +static inline NV_STATUS hdacodecControl_DISPATCH(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__hdacodecControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS hdacodecMap_DISPATCH(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__hdacodecMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS hdacodecUnmap_DISPATCH(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__hdacodecUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NvBool hdacodecShareCallback_DISPATCH(struct Hdacodec *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__hdacodecShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS hdacodecGetRegBaseOffsetAndSize_DISPATCH(struct Hdacodec *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__hdacodecGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS hdacodecGetMapAddrSpace_DISPATCH(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__hdacodecGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NV_STATUS hdacodecInternalControlForward_DISPATCH(struct Hdacodec *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__hdacodecInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NvHandle hdacodecGetInternalObjectHandle_DISPATCH(struct Hdacodec *pGpuResource) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__hdacodecGetInternalObjectHandle__(pGpuResource); +} + +static inline NvBool hdacodecAccessCallback_DISPATCH(struct Hdacodec *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__hdacodecAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS hdacodecGetMemInterMapParams_DISPATCH(struct Hdacodec *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__hdacodecGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS hdacodecCheckMemInterUnmap_DISPATCH(struct Hdacodec *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__hdacodecCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS hdacodecGetMemoryMappingDescriptor_DISPATCH(struct Hdacodec *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__hdacodecGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS hdacodecControlSerialization_Prologue_DISPATCH(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__hdacodecControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void hdacodecControlSerialization_Epilogue_DISPATCH(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__hdacodecControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS hdacodecControl_Prologue_DISPATCH(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__hdacodecControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void hdacodecControl_Epilogue_DISPATCH(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__hdacodecControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool hdacodecCanCopy_DISPATCH(struct Hdacodec *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__hdacodecCanCopy__(pResource); +} + +static inline NV_STATUS hdacodecIsDuplicate_DISPATCH(struct Hdacodec *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__hdacodecIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void hdacodecPreDestruct_DISPATCH(struct Hdacodec *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__hdacodecPreDestruct__(pResource); +} + +static inline NV_STATUS hdacodecControlFilter_DISPATCH(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__hdacodecControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvBool hdacodecIsPartialUnmapSupported_DISPATCH(struct Hdacodec *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__hdacodecIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS hdacodecMapTo_DISPATCH(struct Hdacodec *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__hdacodecMapTo__(pResource, pParams); +} + +static inline NV_STATUS hdacodecUnmapFrom_DISPATCH(struct Hdacodec *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__hdacodecUnmapFrom__(pResource, pParams); +} + +static inline NvU32 hdacodecGetRefCount_DISPATCH(struct Hdacodec *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__hdacodecGetRefCount__(pResource); +} + +static inline void hdacodecAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct Hdacodec *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__hdacodecAddAdditionalDependants__(pClient, pResource, pReference); +} + +NV_STATUS hdacodecConstruct_IMPL(struct Hdacodec *arg_pHdacodecApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_hdacodecConstruct(arg_pHdacodecApi, arg_pCallContext, arg_pParams) hdacodecConstruct_IMPL(arg_pHdacodecApi, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_HDA_CODEC_API_NVOC_H_ diff --git a/src/nvidia/generated/g_hypervisor_nvoc.h b/src/nvidia/generated/g_hypervisor_nvoc.h new file mode 100644 index 0000000..94329b3 --- /dev/null +++ b/src/nvidia/generated/g_hypervisor_nvoc.h @@ -0,0 +1,231 @@ + +#ifndef _G_HYPERVISOR_NVOC_H_ +#define _G_HYPERVISOR_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_hypervisor_nvoc.h" + +#ifndef HYPERVISOR_H +#define HYPERVISOR_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: hypervisor.h * +* Defines and structures used for the hypervisor object. * +\***************************************************************************/ + +#include "core/core.h" +#include "nvoc/utility.h" +#include "nv-hypervisor.h" +#include "mem_mgr/mem.h" + +/* ------------------------ Forward Declarations ---------------------------- */ + +struct OBJOS; + +#ifndef __NVOC_CLASS_OBJOS_TYPEDEF__ +#define __NVOC_CLASS_OBJOS_TYPEDEF__ +typedef struct OBJOS OBJOS; +#endif /* __NVOC_CLASS_OBJOS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJOS +#define __nvoc_class_id_OBJOS 0xaa1d70 +#endif /* __nvoc_class_id_OBJOS */ + + + +typedef struct HOST_VGPU_DEVICE HOST_VGPU_DEVICE; + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_HYPERVISOR_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__OBJHYPERVISOR; +struct NVOC_METADATA__Object; + + +struct OBJHYPERVISOR { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__OBJHYPERVISOR *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Object __nvoc_base_Object; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super + struct OBJHYPERVISOR *__nvoc_pbase_OBJHYPERVISOR; // hypervisor + + // 1 PDB property + NvBool PDB_PROP_HYPERVISOR_DRIVERVM_ENABLED; + + // Data members + NvBool bDetected; + NvBool bIsHVMGuest; + HYPERVISOR_TYPE type; + NvBool bIsHypervHost; + NvBool bIsHypervVgpuSupported; +}; + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__OBJHYPERVISOR { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Object metadata__Object; +}; + +#ifndef __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ +#define __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ +typedef struct OBJHYPERVISOR OBJHYPERVISOR; +#endif /* __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHYPERVISOR +#define __nvoc_class_id_OBJHYPERVISOR 0x33c1ba +#endif /* __nvoc_class_id_OBJHYPERVISOR */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHYPERVISOR; + +#define __staticCast_OBJHYPERVISOR(pThis) \ + ((pThis)->__nvoc_pbase_OBJHYPERVISOR) + +#ifdef __nvoc_hypervisor_h_disabled +#define __dynamicCast_OBJHYPERVISOR(pThis) ((OBJHYPERVISOR*) NULL) +#else //__nvoc_hypervisor_h_disabled +#define __dynamicCast_OBJHYPERVISOR(pThis) \ + ((OBJHYPERVISOR*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJHYPERVISOR))) +#endif //__nvoc_hypervisor_h_disabled + +// Property macros +#define PDB_PROP_HYPERVISOR_DRIVERVM_ENABLED_BASE_CAST +#define PDB_PROP_HYPERVISOR_DRIVERVM_ENABLED_BASE_NAME PDB_PROP_HYPERVISOR_DRIVERVM_ENABLED + +NV_STATUS __nvoc_objCreateDynamic_OBJHYPERVISOR(OBJHYPERVISOR**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJHYPERVISOR(OBJHYPERVISOR**, Dynamic*, NvU32); +#define __objCreate_OBJHYPERVISOR(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJHYPERVISOR((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros + +// Dispatch functions +NvBool hypervisorIsVgxHyper_IMPL(void); + + +#define hypervisorIsVgxHyper() hypervisorIsVgxHyper_IMPL() +#define hypervisorIsVgxHyper_HAL() hypervisorIsVgxHyper() + +NV_STATUS hypervisorInjectInterrupt_IMPL(struct OBJHYPERVISOR *arg1, VGPU_NS_INTR *arg2); + + +#ifdef __nvoc_hypervisor_h_disabled +static inline NV_STATUS hypervisorInjectInterrupt(struct OBJHYPERVISOR *arg1, VGPU_NS_INTR *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJHYPERVISOR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_hypervisor_h_disabled +#define hypervisorInjectInterrupt(arg1, arg2) hypervisorInjectInterrupt_IMPL(arg1, arg2) +#endif //__nvoc_hypervisor_h_disabled + +#define hypervisorInjectInterrupt_HAL(arg1, arg2) hypervisorInjectInterrupt(arg1, arg2) + +void hypervisorSetHypervVgpuSupported_IMPL(struct OBJHYPERVISOR *arg1); + + +#ifdef __nvoc_hypervisor_h_disabled +static inline void hypervisorSetHypervVgpuSupported(struct OBJHYPERVISOR *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJHYPERVISOR was disabled!"); +} +#else //__nvoc_hypervisor_h_disabled +#define hypervisorSetHypervVgpuSupported(arg1) hypervisorSetHypervVgpuSupported_IMPL(arg1) +#endif //__nvoc_hypervisor_h_disabled + +#define hypervisorSetHypervVgpuSupported_HAL(arg1) hypervisorSetHypervVgpuSupported(arg1) + +static inline NvBool hypervisorCheckForObjectAccess(NvHandle hClient) { + return NV_FALSE; +} + +static inline NvBool hypervisorIsType(HYPERVISOR_TYPE hyperType) { + return NV_FALSE; +} + +static inline NV_STATUS hypervisorDetection(struct OBJHYPERVISOR *arg1, struct OBJOS *arg2) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NvBool hypervisorPcieP2pDetection(struct OBJHYPERVISOR *arg1, NvU32 arg2) { + return NV_FALSE; +} + +static inline HYPERVISOR_TYPE hypervisorGetHypervisorType(struct OBJHYPERVISOR *arg1) { + return OS_HYPERVISOR_UNKNOWN; +} + +static inline void hypervisorSetHypervisorType(struct OBJHYPERVISOR *pHypervisor, HYPERVISOR_TYPE type) { + return; +} + +NV_STATUS hypervisorConstruct_IMPL(struct OBJHYPERVISOR *arg_); + +#define __nvoc_hypervisorConstruct(arg_) hypervisorConstruct_IMPL(arg_) +void hypervisorDestruct_IMPL(struct OBJHYPERVISOR *arg1); + +#define __nvoc_hypervisorDestruct(arg1) hypervisorDestruct_IMPL(arg1) +#undef PRIVATE_FIELD + + +#endif // HYPERVISOR_H + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_HYPERVISOR_NVOC_H_ diff --git a/src/nvidia/generated/g_io_vaspace_nvoc.c b/src/nvidia/generated/g_io_vaspace_nvoc.c new file mode 100644 index 0000000..9fe3d64 --- /dev/null +++ b/src/nvidia/generated/g_io_vaspace_nvoc.c @@ -0,0 +1,285 @@ +#define NVOC_IO_VASPACE_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_io_vaspace_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x28ed9c = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJIOVASPACE; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVASPACE; + +// Forward declarations for OBJIOVASPACE +void __nvoc_init__OBJVASPACE(OBJVASPACE*); +void __nvoc_init__OBJIOVASPACE(OBJIOVASPACE*); +void __nvoc_init_funcTable_OBJIOVASPACE(OBJIOVASPACE*); +NV_STATUS __nvoc_ctor_OBJIOVASPACE(OBJIOVASPACE*); +void __nvoc_init_dataField_OBJIOVASPACE(OBJIOVASPACE*); +void __nvoc_dtor_OBJIOVASPACE(OBJIOVASPACE*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__OBJIOVASPACE; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJIOVASPACE; + +// Down-thunk(s) to bridge OBJIOVASPACE methods from ancestors (if any) +NV_STATUS __nvoc_down_thunk_OBJIOVASPACE_vaspaceConstruct_(struct OBJVASPACE *pVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags); // this +NV_STATUS __nvoc_down_thunk_OBJIOVASPACE_vaspaceAlloc(struct OBJVASPACE *pVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSizeLockMask, VAS_ALLOC_FLAGS flags, NvU64 *pAddr); // this +NV_STATUS __nvoc_down_thunk_OBJIOVASPACE_vaspaceFree(struct OBJVASPACE *pVAS, NvU64 vAddr); // this +NV_STATUS __nvoc_down_thunk_OBJIOVASPACE_vaspaceApplyDefaultAlignment(struct OBJVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask); // this +NV_STATUS __nvoc_down_thunk_OBJIOVASPACE_vaspaceIncAllocRefCnt(struct OBJVASPACE *pVAS, NvU64 vAddr); // this +NvU64 __nvoc_down_thunk_OBJIOVASPACE_vaspaceGetVaStart(struct OBJVASPACE *pVAS); // this +NvU64 __nvoc_down_thunk_OBJIOVASPACE_vaspaceGetVaLimit(struct OBJVASPACE *pVAS); // this +NV_STATUS __nvoc_down_thunk_OBJIOVASPACE_vaspaceGetVasInfo(struct OBJVASPACE *pVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams); // this + +// Up-thunk(s) to bridge OBJIOVASPACE methods to ancestors (if any) +NvU32 __nvoc_up_thunk_OBJVASPACE_iovaspaceGetFlags(struct OBJIOVASPACE *pVAS); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJIOVASPACE = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJIOVASPACE), + /*classId=*/ classId(OBJIOVASPACE), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJIOVASPACE", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJIOVASPACE, + /*pCastInfo=*/ &__nvoc_castinfo__OBJIOVASPACE, + /*pExportInfo=*/ &__nvoc_export_info__OBJIOVASPACE +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__OBJIOVASPACE __nvoc_metadata__OBJIOVASPACE = { + .rtti.pClassDef = &__nvoc_class_def_OBJIOVASPACE, // (iovaspace) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJIOVASPACE, + .rtti.offset = 0, + .metadata__OBJVASPACE.rtti.pClassDef = &__nvoc_class_def_OBJVASPACE, // (vaspace) super + .metadata__OBJVASPACE.rtti.dtor = &__nvoc_destructFromBase, + .metadata__OBJVASPACE.rtti.offset = NV_OFFSETOF(OBJIOVASPACE, __nvoc_base_OBJVASPACE), + .metadata__OBJVASPACE.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^2 + .metadata__OBJVASPACE.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__OBJVASPACE.metadata__Object.rtti.offset = NV_OFFSETOF(OBJIOVASPACE, __nvoc_base_OBJVASPACE.__nvoc_base_Object), + + .vtable.__iovaspaceConstruct___ = &iovaspaceConstruct__IMPL, // virtual override (vaspace) base (vaspace) + .metadata__OBJVASPACE.vtable.__vaspaceConstruct___ = &__nvoc_down_thunk_OBJIOVASPACE_vaspaceConstruct_, // pure virtual + .vtable.__iovaspaceAlloc__ = &iovaspaceAlloc_IMPL, // virtual override (vaspace) base (vaspace) + .metadata__OBJVASPACE.vtable.__vaspaceAlloc__ = &__nvoc_down_thunk_OBJIOVASPACE_vaspaceAlloc, // pure virtual + .vtable.__iovaspaceFree__ = &iovaspaceFree_IMPL, // virtual override (vaspace) base (vaspace) + .metadata__OBJVASPACE.vtable.__vaspaceFree__ = &__nvoc_down_thunk_OBJIOVASPACE_vaspaceFree, // pure virtual + .vtable.__iovaspaceApplyDefaultAlignment__ = &iovaspaceApplyDefaultAlignment_IMPL, // virtual override (vaspace) base (vaspace) + .metadata__OBJVASPACE.vtable.__vaspaceApplyDefaultAlignment__ = &__nvoc_down_thunk_OBJIOVASPACE_vaspaceApplyDefaultAlignment, // pure virtual + .vtable.__iovaspaceIncAllocRefCnt__ = &iovaspaceIncAllocRefCnt_IMPL, // virtual override (vaspace) base (vaspace) + .metadata__OBJVASPACE.vtable.__vaspaceIncAllocRefCnt__ = &__nvoc_down_thunk_OBJIOVASPACE_vaspaceIncAllocRefCnt, // inline virtual body + .vtable.__iovaspaceGetVaStart__ = &iovaspaceGetVaStart_IMPL, // virtual override (vaspace) base (vaspace) + .metadata__OBJVASPACE.vtable.__vaspaceGetVaStart__ = &__nvoc_down_thunk_OBJIOVASPACE_vaspaceGetVaStart, // virtual + .vtable.__iovaspaceGetVaLimit__ = &iovaspaceGetVaLimit_IMPL, // virtual override (vaspace) base (vaspace) + .metadata__OBJVASPACE.vtable.__vaspaceGetVaLimit__ = &__nvoc_down_thunk_OBJIOVASPACE_vaspaceGetVaLimit, // virtual + .vtable.__iovaspaceGetVasInfo__ = &iovaspaceGetVasInfo_IMPL, // virtual override (vaspace) base (vaspace) + .metadata__OBJVASPACE.vtable.__vaspaceGetVasInfo__ = &__nvoc_down_thunk_OBJIOVASPACE_vaspaceGetVasInfo, // pure virtual + .vtable.__iovaspaceGetFlags__ = &__nvoc_up_thunk_OBJVASPACE_iovaspaceGetFlags, // inline virtual inherited (vaspace) base (vaspace) body + .metadata__OBJVASPACE.vtable.__vaspaceGetFlags__ = &vaspaceGetFlags_edd98b, // inline virtual body +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__OBJIOVASPACE = { + .numRelatives = 3, + .relatives = { + &__nvoc_metadata__OBJIOVASPACE.rtti, // [0]: (iovaspace) this + &__nvoc_metadata__OBJIOVASPACE.metadata__OBJVASPACE.rtti, // [1]: (vaspace) super + &__nvoc_metadata__OBJIOVASPACE.metadata__OBJVASPACE.metadata__Object.rtti, // [2]: (obj) super^2 + } +}; + +// 8 down-thunk(s) defined to bridge methods in OBJIOVASPACE from superclasses + +// iovaspaceConstruct_: virtual override (vaspace) base (vaspace) +NV_STATUS __nvoc_down_thunk_OBJIOVASPACE_vaspaceConstruct_(struct OBJVASPACE *pVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags) { + return iovaspaceConstruct_((struct OBJIOVASPACE *)(((unsigned char *) pVAS) - NV_OFFSETOF(OBJIOVASPACE, __nvoc_base_OBJVASPACE)), classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags); +} + +// iovaspaceAlloc: virtual override (vaspace) base (vaspace) +NV_STATUS __nvoc_down_thunk_OBJIOVASPACE_vaspaceAlloc(struct OBJVASPACE *pVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSizeLockMask, VAS_ALLOC_FLAGS flags, NvU64 *pAddr) { + return iovaspaceAlloc((struct OBJIOVASPACE *)(((unsigned char *) pVAS) - NV_OFFSETOF(OBJIOVASPACE, __nvoc_base_OBJVASPACE)), size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr); +} + +// iovaspaceFree: virtual override (vaspace) base (vaspace) +NV_STATUS __nvoc_down_thunk_OBJIOVASPACE_vaspaceFree(struct OBJVASPACE *pVAS, NvU64 vAddr) { + return iovaspaceFree((struct OBJIOVASPACE *)(((unsigned char *) pVAS) - NV_OFFSETOF(OBJIOVASPACE, __nvoc_base_OBJVASPACE)), vAddr); +} + +// iovaspaceApplyDefaultAlignment: virtual override (vaspace) base (vaspace) +NV_STATUS __nvoc_down_thunk_OBJIOVASPACE_vaspaceApplyDefaultAlignment(struct OBJVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask) { + return iovaspaceApplyDefaultAlignment((struct OBJIOVASPACE *)(((unsigned char *) pVAS) - NV_OFFSETOF(OBJIOVASPACE, __nvoc_base_OBJVASPACE)), pAllocInfo, pAlign, pSize, pPageSizeLockMask); +} + +// iovaspaceIncAllocRefCnt: virtual override (vaspace) base (vaspace) +NV_STATUS __nvoc_down_thunk_OBJIOVASPACE_vaspaceIncAllocRefCnt(struct OBJVASPACE *pVAS, NvU64 vAddr) { + return iovaspaceIncAllocRefCnt((struct OBJIOVASPACE *)(((unsigned char *) pVAS) - NV_OFFSETOF(OBJIOVASPACE, __nvoc_base_OBJVASPACE)), vAddr); +} + +// iovaspaceGetVaStart: virtual override (vaspace) base (vaspace) +NvU64 __nvoc_down_thunk_OBJIOVASPACE_vaspaceGetVaStart(struct OBJVASPACE *pVAS) { + return iovaspaceGetVaStart((struct OBJIOVASPACE *)(((unsigned char *) pVAS) - NV_OFFSETOF(OBJIOVASPACE, __nvoc_base_OBJVASPACE))); +} + +// iovaspaceGetVaLimit: virtual override (vaspace) base (vaspace) +NvU64 __nvoc_down_thunk_OBJIOVASPACE_vaspaceGetVaLimit(struct OBJVASPACE *pVAS) { + return iovaspaceGetVaLimit((struct OBJIOVASPACE *)(((unsigned char *) pVAS) - NV_OFFSETOF(OBJIOVASPACE, __nvoc_base_OBJVASPACE))); +} + +// iovaspaceGetVasInfo: virtual override (vaspace) base (vaspace) +NV_STATUS __nvoc_down_thunk_OBJIOVASPACE_vaspaceGetVasInfo(struct OBJVASPACE *pVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams) { + return iovaspaceGetVasInfo((struct OBJIOVASPACE *)(((unsigned char *) pVAS) - NV_OFFSETOF(OBJIOVASPACE, __nvoc_base_OBJVASPACE)), pParams); +} + + +// 1 up-thunk(s) defined to bridge methods in OBJIOVASPACE to superclasses + +// iovaspaceGetFlags: inline virtual inherited (vaspace) base (vaspace) body +NvU32 __nvoc_up_thunk_OBJVASPACE_iovaspaceGetFlags(struct OBJIOVASPACE *pVAS) { + return vaspaceGetFlags((struct OBJVASPACE *)(((unsigned char *) pVAS) + NV_OFFSETOF(OBJIOVASPACE, __nvoc_base_OBJVASPACE))); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJIOVASPACE = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJVASPACE(OBJVASPACE*); +void __nvoc_dtor_OBJIOVASPACE(OBJIOVASPACE *pThis) { + __nvoc_iovaspaceDestruct(pThis); + __nvoc_dtor_OBJVASPACE(&pThis->__nvoc_base_OBJVASPACE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJIOVASPACE(OBJIOVASPACE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_OBJVASPACE(OBJVASPACE* ); +NV_STATUS __nvoc_ctor_OBJIOVASPACE(OBJIOVASPACE *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJVASPACE(&pThis->__nvoc_base_OBJVASPACE); + if (status != NV_OK) goto __nvoc_ctor_OBJIOVASPACE_fail_OBJVASPACE; + __nvoc_init_dataField_OBJIOVASPACE(pThis); + goto __nvoc_ctor_OBJIOVASPACE_exit; // Success + +__nvoc_ctor_OBJIOVASPACE_fail_OBJVASPACE: +__nvoc_ctor_OBJIOVASPACE_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_OBJIOVASPACE_1(OBJIOVASPACE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_OBJIOVASPACE_1 + + +// Initialize vtable(s) for 9 virtual method(s). +void __nvoc_init_funcTable_OBJIOVASPACE(OBJIOVASPACE *pThis) { + __nvoc_init_funcTable_OBJIOVASPACE_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__OBJIOVASPACE(OBJIOVASPACE *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object; // (obj) super^2 + pThis->__nvoc_pbase_OBJVASPACE = &pThis->__nvoc_base_OBJVASPACE; // (vaspace) super + pThis->__nvoc_pbase_OBJIOVASPACE = pThis; // (iovaspace) this + + // Recurse to superclass initialization function(s). + __nvoc_init__OBJVASPACE(&pThis->__nvoc_base_OBJVASPACE); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__OBJIOVASPACE.metadata__OBJVASPACE.metadata__Object; // (obj) super^2 + pThis->__nvoc_base_OBJVASPACE.__nvoc_metadata_ptr = &__nvoc_metadata__OBJIOVASPACE.metadata__OBJVASPACE; // (vaspace) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__OBJIOVASPACE; // (iovaspace) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_OBJIOVASPACE(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJIOVASPACE(OBJIOVASPACE **ppThis, Dynamic *pParent, NvU32 createFlags) +{ + NV_STATUS status; + Object *pParentObj = NULL; + OBJIOVASPACE *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(OBJIOVASPACE), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(OBJIOVASPACE)); + + pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__OBJIOVASPACE(pThis); + status = __nvoc_ctor_OBJIOVASPACE(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJIOVASPACE_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_OBJIOVASPACE_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(OBJIOVASPACE)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJIOVASPACE(OBJIOVASPACE **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJIOVASPACE(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_io_vaspace_nvoc.h b/src/nvidia/generated/g_io_vaspace_nvoc.h new file mode 100644 index 0000000..d4f3e26 --- /dev/null +++ b/src/nvidia/generated/g_io_vaspace_nvoc.h @@ -0,0 +1,347 @@ + +#ifndef _G_IO_VASPACE_NVOC_H_ +#define _G_IO_VASPACE_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_io_vaspace_nvoc.h" + +#ifndef _IOVASPACE_H_ +#define _IOVASPACE_H_ + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: IOVASPACE.H * +* Defines and structures used for IOMMU Virtual Address Space Object. * +\***************************************************************************/ + +#include "mem_mgr/vaspace.h" // base class object header + +#define NV_IOVA_DOMAIN_NONE (~(NvU32)0) + +typedef struct IOVAMAPPING IOVAMAPPING; +typedef struct IOVAMAPPING *PIOVAMAPPING; + +// Opaque pointer for the OS layer to use +typedef struct OS_IOVA_MAPPING_DATA *POS_IOVA_MAPPING_DATA; + +struct IOVAMAPPING +{ + NvU32 iovaspaceId; + + // + // Refcount of the mapping. + // + // Each iovaspaceAcquireMapping() call increments the refcount, and each + // iovaspaceReleaseMapping() call decrements it. Additionally, submappings + // increment the refcount of their root mapping on creation and only + // decrement it when they are destroyed. + // + // Mappings are destroyed when their refcount reaches 0. + // + // Notably a mapping can be destroyed regardless of its refcount with + // iovaspaceDestroyMapping(). Destroying a root mapping destroys all of its + // submappings as well. + // + NvU32 refcount; + + PMEMORY_DESCRIPTOR pPhysMemDesc; + + // + // Maintain a hierarchy of IOVA mappings. The "root" mapping will generally + // be tied to the root memory descriptor. That mapping can have submappings + // within the same IOVA space that correspond to submemory descriptors of + // the root memory descriptor. + // + // Also, the root memory descriptor may have multiple IOVA mappings (up to + // one per IOVA space), so those need to be tracked in association directly + // with the root memory descriptor. + // + // The memory descriptor (root or submemory) always points to a single IOVA + // mapping. For root memory descriptors, that mapping is the head of a list + // in which each mapping covers a unique IOVA space. For submemory + // descriptors, there can only be one IOVA mapping, corresponding to the + // IOVA space of the pGpu associated with the submemory descriptor. + // + union + { + struct IOVAMAPPING *pParent; + struct IOVAMAPPING *pChildren; + } link; + + // + // For root mappings, this points to the next root mapping for the same + // parent physical memory descriptor (e.g., a root mapping for a different + // IOVA space). + // + // For submappings, this instead points to the next submapping of the + // parent root mapping, since a submemory descriptor may only have a single + // IOVA mapping (which is a submapping of an IOVA mapping on the root + // memory descriptor). + // + struct IOVAMAPPING *pNext; + + // OS data associated with this mapping. Core RM doesn't touch this. + POS_IOVA_MAPPING_DATA pOsData; + + // + // If the memory is contiguous, this array consists of one element. + // If the memory is discontiguous, this array is actually larger and has + // one entry for each physical page in pPhysMemDesc. As a result, this + // structure must be allocated from the heap. + // + RmPhysAddr iovaArray[1]; + // WARNING: DO NOT place anything behind the IOVA array! +}; + +/*! + * Virtual address space for a system's IOMMU translation. + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_IO_VASPACE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__OBJIOVASPACE; +struct NVOC_METADATA__OBJVASPACE; +struct NVOC_VTABLE__OBJIOVASPACE; + + +struct OBJIOVASPACE { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__OBJIOVASPACE *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct OBJVASPACE __nvoc_base_OBJVASPACE; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^2 + struct OBJVASPACE *__nvoc_pbase_OBJVASPACE; // vaspace super + struct OBJIOVASPACE *__nvoc_pbase_OBJIOVASPACE; // iovaspace + + // Data members + NvU64 mappingCount; +}; + + +// Vtable with 9 per-class function pointers +struct NVOC_VTABLE__OBJIOVASPACE { + NV_STATUS (*__iovaspaceConstruct___)(struct OBJIOVASPACE * /*this*/, NvU32, NvU32, NvU64, NvU64, NvU64, NvU64, NvU32); // virtual override (vaspace) base (vaspace) + NV_STATUS (*__iovaspaceAlloc__)(struct OBJIOVASPACE * /*this*/, NvU64, NvU64, NvU64, NvU64, NvU64, VAS_ALLOC_FLAGS, NvU64 *); // virtual override (vaspace) base (vaspace) + NV_STATUS (*__iovaspaceFree__)(struct OBJIOVASPACE * /*this*/, NvU64); // virtual override (vaspace) base (vaspace) + NV_STATUS (*__iovaspaceApplyDefaultAlignment__)(struct OBJIOVASPACE * /*this*/, const FB_ALLOC_INFO *, NvU64 *, NvU64 *, NvU64 *); // virtual override (vaspace) base (vaspace) + NV_STATUS (*__iovaspaceIncAllocRefCnt__)(struct OBJIOVASPACE * /*this*/, NvU64); // virtual override (vaspace) base (vaspace) + NvU64 (*__iovaspaceGetVaStart__)(struct OBJIOVASPACE * /*this*/); // virtual override (vaspace) base (vaspace) + NvU64 (*__iovaspaceGetVaLimit__)(struct OBJIOVASPACE * /*this*/); // virtual override (vaspace) base (vaspace) + NV_STATUS (*__iovaspaceGetVasInfo__)(struct OBJIOVASPACE * /*this*/, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *); // virtual override (vaspace) base (vaspace) + NvU32 (*__iovaspaceGetFlags__)(struct OBJIOVASPACE * /*this*/); // inline virtual inherited (vaspace) base (vaspace) body +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__OBJIOVASPACE { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__OBJVASPACE metadata__OBJVASPACE; + const struct NVOC_VTABLE__OBJIOVASPACE vtable; +}; + +#ifndef __NVOC_CLASS_OBJIOVASPACE_TYPEDEF__ +#define __NVOC_CLASS_OBJIOVASPACE_TYPEDEF__ +typedef struct OBJIOVASPACE OBJIOVASPACE; +#endif /* __NVOC_CLASS_OBJIOVASPACE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJIOVASPACE +#define __nvoc_class_id_OBJIOVASPACE 0x28ed9c +#endif /* __nvoc_class_id_OBJIOVASPACE */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJIOVASPACE; + +#define __staticCast_OBJIOVASPACE(pThis) \ + ((pThis)->__nvoc_pbase_OBJIOVASPACE) + +#ifdef __nvoc_io_vaspace_h_disabled +#define __dynamicCast_OBJIOVASPACE(pThis) ((OBJIOVASPACE*) NULL) +#else //__nvoc_io_vaspace_h_disabled +#define __dynamicCast_OBJIOVASPACE(pThis) \ + ((OBJIOVASPACE*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJIOVASPACE))) +#endif //__nvoc_io_vaspace_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_OBJIOVASPACE(OBJIOVASPACE**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJIOVASPACE(OBJIOVASPACE**, Dynamic*, NvU32); +#define __objCreate_OBJIOVASPACE(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJIOVASPACE((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros +#define iovaspaceConstruct__FNPTR(pVAS) pVAS->__nvoc_metadata_ptr->vtable.__iovaspaceConstruct___ +#define iovaspaceConstruct_(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags) iovaspaceConstruct__DISPATCH(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags) +#define iovaspaceAlloc_FNPTR(pVAS) pVAS->__nvoc_metadata_ptr->vtable.__iovaspaceAlloc__ +#define iovaspaceAlloc(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr) iovaspaceAlloc_DISPATCH(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr) +#define iovaspaceFree_FNPTR(pVAS) pVAS->__nvoc_metadata_ptr->vtable.__iovaspaceFree__ +#define iovaspaceFree(pVAS, vAddr) iovaspaceFree_DISPATCH(pVAS, vAddr) +#define iovaspaceApplyDefaultAlignment_FNPTR(pVAS) pVAS->__nvoc_metadata_ptr->vtable.__iovaspaceApplyDefaultAlignment__ +#define iovaspaceApplyDefaultAlignment(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask) iovaspaceApplyDefaultAlignment_DISPATCH(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask) +#define iovaspaceIncAllocRefCnt_FNPTR(pVAS) pVAS->__nvoc_metadata_ptr->vtable.__iovaspaceIncAllocRefCnt__ +#define iovaspaceIncAllocRefCnt(pVAS, vAddr) iovaspaceIncAllocRefCnt_DISPATCH(pVAS, vAddr) +#define iovaspaceGetVaStart_FNPTR(pVAS) pVAS->__nvoc_metadata_ptr->vtable.__iovaspaceGetVaStart__ +#define iovaspaceGetVaStart(pVAS) iovaspaceGetVaStart_DISPATCH(pVAS) +#define iovaspaceGetVaLimit_FNPTR(pVAS) pVAS->__nvoc_metadata_ptr->vtable.__iovaspaceGetVaLimit__ +#define iovaspaceGetVaLimit(pVAS) iovaspaceGetVaLimit_DISPATCH(pVAS) +#define iovaspaceGetVasInfo_FNPTR(pVAS) pVAS->__nvoc_metadata_ptr->vtable.__iovaspaceGetVasInfo__ +#define iovaspaceGetVasInfo(pVAS, pParams) iovaspaceGetVasInfo_DISPATCH(pVAS, pParams) +#define iovaspaceGetFlags_FNPTR(pVAS) pVAS->__nvoc_base_OBJVASPACE.__nvoc_metadata_ptr->vtable.__vaspaceGetFlags__ +#define iovaspaceGetFlags(pVAS) iovaspaceGetFlags_DISPATCH(pVAS) + +// Dispatch functions +static inline NV_STATUS iovaspaceConstruct__DISPATCH(struct OBJIOVASPACE *pVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags) { + return pVAS->__nvoc_metadata_ptr->vtable.__iovaspaceConstruct___(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags); +} + +static inline NV_STATUS iovaspaceAlloc_DISPATCH(struct OBJIOVASPACE *pVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSizeLockMask, VAS_ALLOC_FLAGS flags, NvU64 *pAddr) { + return pVAS->__nvoc_metadata_ptr->vtable.__iovaspaceAlloc__(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr); +} + +static inline NV_STATUS iovaspaceFree_DISPATCH(struct OBJIOVASPACE *pVAS, NvU64 vAddr) { + return pVAS->__nvoc_metadata_ptr->vtable.__iovaspaceFree__(pVAS, vAddr); +} + +static inline NV_STATUS iovaspaceApplyDefaultAlignment_DISPATCH(struct OBJIOVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask) { + return pVAS->__nvoc_metadata_ptr->vtable.__iovaspaceApplyDefaultAlignment__(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask); +} + +static inline NV_STATUS iovaspaceIncAllocRefCnt_DISPATCH(struct OBJIOVASPACE *pVAS, NvU64 vAddr) { + return pVAS->__nvoc_metadata_ptr->vtable.__iovaspaceIncAllocRefCnt__(pVAS, vAddr); +} + +static inline NvU64 iovaspaceGetVaStart_DISPATCH(struct OBJIOVASPACE *pVAS) { + return pVAS->__nvoc_metadata_ptr->vtable.__iovaspaceGetVaStart__(pVAS); +} + +static inline NvU64 iovaspaceGetVaLimit_DISPATCH(struct OBJIOVASPACE *pVAS) { + return pVAS->__nvoc_metadata_ptr->vtable.__iovaspaceGetVaLimit__(pVAS); +} + +static inline NV_STATUS iovaspaceGetVasInfo_DISPATCH(struct OBJIOVASPACE *pVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams) { + return pVAS->__nvoc_metadata_ptr->vtable.__iovaspaceGetVasInfo__(pVAS, pParams); +} + +static inline NvU32 iovaspaceGetFlags_DISPATCH(struct OBJIOVASPACE *pVAS) { + return pVAS->__nvoc_metadata_ptr->vtable.__iovaspaceGetFlags__(pVAS); +} + +NV_STATUS iovaspaceConstruct__IMPL(struct OBJIOVASPACE *pVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags); + +NV_STATUS iovaspaceAlloc_IMPL(struct OBJIOVASPACE *pVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSizeLockMask, VAS_ALLOC_FLAGS flags, NvU64 *pAddr); + +NV_STATUS iovaspaceFree_IMPL(struct OBJIOVASPACE *pVAS, NvU64 vAddr); + +NV_STATUS iovaspaceApplyDefaultAlignment_IMPL(struct OBJIOVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask); + +NV_STATUS iovaspaceIncAllocRefCnt_IMPL(struct OBJIOVASPACE *pVAS, NvU64 vAddr); + +NvU64 iovaspaceGetVaStart_IMPL(struct OBJIOVASPACE *pVAS); + +NvU64 iovaspaceGetVaLimit_IMPL(struct OBJIOVASPACE *pVAS); + +NV_STATUS iovaspaceGetVasInfo_IMPL(struct OBJIOVASPACE *pVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams); + +void iovaspaceDestruct_IMPL(struct OBJIOVASPACE *pIOVAS); + +#define __nvoc_iovaspaceDestruct(pIOVAS) iovaspaceDestruct_IMPL(pIOVAS) +NV_STATUS iovaspaceAcquireMapping_IMPL(struct OBJIOVASPACE *pIOVAS, PMEMORY_DESCRIPTOR pIovaMapping); + +#ifdef __nvoc_io_vaspace_h_disabled +static inline NV_STATUS iovaspaceAcquireMapping(struct OBJIOVASPACE *pIOVAS, PMEMORY_DESCRIPTOR pIovaMapping) { + NV_ASSERT_FAILED_PRECOMP("OBJIOVASPACE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_io_vaspace_h_disabled +#define iovaspaceAcquireMapping(pIOVAS, pIovaMapping) iovaspaceAcquireMapping_IMPL(pIOVAS, pIovaMapping) +#endif //__nvoc_io_vaspace_h_disabled + +void iovaspaceReleaseMapping_IMPL(struct OBJIOVASPACE *pIOVAS, PIOVAMAPPING pIovaMapping); + +#ifdef __nvoc_io_vaspace_h_disabled +static inline void iovaspaceReleaseMapping(struct OBJIOVASPACE *pIOVAS, PIOVAMAPPING pIovaMapping) { + NV_ASSERT_FAILED_PRECOMP("OBJIOVASPACE was disabled!"); +} +#else //__nvoc_io_vaspace_h_disabled +#define iovaspaceReleaseMapping(pIOVAS, pIovaMapping) iovaspaceReleaseMapping_IMPL(pIOVAS, pIovaMapping) +#endif //__nvoc_io_vaspace_h_disabled + +void iovaspaceDestroyMapping_IMPL(struct OBJIOVASPACE *pIOVAS, PIOVAMAPPING pIovaMapping); + +#ifdef __nvoc_io_vaspace_h_disabled +static inline void iovaspaceDestroyMapping(struct OBJIOVASPACE *pIOVAS, PIOVAMAPPING pIovaMapping) { + NV_ASSERT_FAILED_PRECOMP("OBJIOVASPACE was disabled!"); +} +#else //__nvoc_io_vaspace_h_disabled +#define iovaspaceDestroyMapping(pIOVAS, pIovaMapping) iovaspaceDestroyMapping_IMPL(pIOVAS, pIovaMapping) +#endif //__nvoc_io_vaspace_h_disabled + +#undef PRIVATE_FIELD + + +struct OBJIOVASPACE* iovaspaceFromId(NvU32 iovaspaceId); +struct OBJIOVASPACE* iovaspaceFromMapping(PIOVAMAPPING pIovaMapping); + +// +// Helper that looks up the IOVAS from the mapping and then calls +// iovaspaceDestroyMapping(). +// +void iovaMappingDestroy(PIOVAMAPPING pIovaMapping); + +#endif // _IOVASPACE_H_ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_IO_VASPACE_NVOC_H_ diff --git a/src/nvidia/generated/g_ioaccess_nvoc.c b/src/nvidia/generated/g_ioaccess_nvoc.c new file mode 100644 index 0000000..707e93a --- /dev/null +++ b/src/nvidia/generated/g_ioaccess_nvoc.c @@ -0,0 +1,137 @@ +#define NVOC_IOACCESS_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_ioaccess_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xfcaf2e = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RegisterAperture; + +// Forward declarations for RegisterAperture +void __nvoc_init__RegisterAperture(RegisterAperture*); +void __nvoc_init_funcTable_RegisterAperture(RegisterAperture*); +NV_STATUS __nvoc_ctor_RegisterAperture(RegisterAperture*); +void __nvoc_init_dataField_RegisterAperture(RegisterAperture*); +void __nvoc_dtor_RegisterAperture(RegisterAperture*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__RegisterAperture; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__RegisterAperture; + +// Down-thunk(s) to bridge RegisterAperture methods from ancestors (if any) + +// Up-thunk(s) to bridge RegisterAperture methods to ancestors (if any) + +// Not instantiable because it's not derived from class "Object" +// Not instantiable because it's an abstract class with following pure virtual functions: +// regaprtReadReg08 +// regaprtReadReg16 +// regaprtReadReg32 +// regaprtWriteReg08 +// regaprtWriteReg16 +// regaprtWriteReg32 +// regaprtWriteReg32Uc +// regaprtIsRegValid +const struct NVOC_CLASS_DEF __nvoc_class_def_RegisterAperture = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RegisterAperture), + /*classId=*/ classId(RegisterAperture), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RegisterAperture", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo__RegisterAperture, + /*pExportInfo=*/ &__nvoc_export_info__RegisterAperture +}; + + +// Metadata with per-class RTTI and vtable +static const struct NVOC_METADATA__RegisterAperture __nvoc_metadata__RegisterAperture = { + .rtti.pClassDef = &__nvoc_class_def_RegisterAperture, // (regaprt) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RegisterAperture, + .rtti.offset = 0, + + .vtable.__regaprtReadReg08__ = NULL, // pure virtual + .vtable.__regaprtReadReg16__ = NULL, // pure virtual + .vtable.__regaprtReadReg32__ = NULL, // pure virtual + .vtable.__regaprtWriteReg08__ = NULL, // pure virtual + .vtable.__regaprtWriteReg16__ = NULL, // pure virtual + .vtable.__regaprtWriteReg32__ = NULL, // pure virtual + .vtable.__regaprtWriteReg32Uc__ = NULL, // pure virtual + .vtable.__regaprtIsRegValid__ = NULL, // pure virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__RegisterAperture = { + .numRelatives = 1, + .relatives = { + &__nvoc_metadata__RegisterAperture.rtti, // [0]: (regaprt) this + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__RegisterAperture = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RegisterAperture(RegisterAperture *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RegisterAperture(RegisterAperture *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RegisterAperture(RegisterAperture *pThis) { + NV_STATUS status = NV_OK; + __nvoc_init_dataField_RegisterAperture(pThis); + goto __nvoc_ctor_RegisterAperture_exit; // Success + +__nvoc_ctor_RegisterAperture_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_RegisterAperture_1(RegisterAperture *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_RegisterAperture_1 + + +// Initialize vtable(s) for 8 virtual method(s). +void __nvoc_init_funcTable_RegisterAperture(RegisterAperture *pThis) { + __nvoc_init_funcTable_RegisterAperture_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__RegisterAperture(RegisterAperture *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_RegisterAperture = pThis; // (regaprt) this + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__RegisterAperture; // (regaprt) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_RegisterAperture(pThis); +} + diff --git a/src/nvidia/generated/g_ioaccess_nvoc.h b/src/nvidia/generated/g_ioaccess_nvoc.h new file mode 100644 index 0000000..6c7bc7f --- /dev/null +++ b/src/nvidia/generated/g_ioaccess_nvoc.h @@ -0,0 +1,288 @@ + +#ifndef _G_IOACCESS_NVOC_H_ +#define _G_IOACCESS_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#if (defined(NVRM) || defined(RMCFG_FEATURE_PLATFORM_GSP)) && !defined(NVWATCH) +#pragma once +#include "g_ioaccess_nvoc.h" +#endif + +#ifndef _IO_ACCESS_H_ +#define _IO_ACCESS_H_ + +#include "nvtypes.h" +#include "nvstatus.h" +#if (defined(NVRM) || defined(RMCFG_FEATURE_PLATFORM_GSP)) && !defined(NVWATCH) +#include "nvoc/prelude.h" +#endif + +#define REG_DRF_SHIFT(drf) ((0?drf) % 32) +#define REG_DRF_MASK(drf) (0xFFFFFFFF>>(31-((1?drf) % 32)+((0?drf) % 32))) +#define REG_DRF_DEF(d,r,f,c) ((NV ## d ## r ## f ## c)<>REG_DRF_SHIFT(NV ## d ## r ## f))®_DRF_MASK(NV ## d ## r ## f)) +#define REG_DRF_SHIFTMASK(drf) (REG_DRF_MASK(drf)<<(REG_DRF_SHIFT(drf))) +#define REG_DRF_WIDTH(drf) ((1?drf) - (0?drf) + 1) + +#if (defined(NVRM) || defined(RMCFG_FEATURE_PLATFORM_GSP)) && !defined(NVWATCH) +#define REG_RD08(ap, addr) regaprtReadReg08 (staticCast(ap, RegisterAperture), addr) +#define REG_RD16(ap, addr) regaprtReadReg16 (staticCast(ap, RegisterAperture), addr) +#define REG_RD32(ap, addr) regaprtReadReg32 (staticCast(ap, RegisterAperture), addr) +#define REG_WR08(ap, addr, val) regaprtWriteReg08 (staticCast(ap, RegisterAperture), addr, val) +#define REG_WR16(ap, addr, val) regaprtWriteReg16 (staticCast(ap, RegisterAperture), addr, val) +#define REG_WR32(ap, addr, val) regaprtWriteReg32 (staticCast(ap, RegisterAperture), addr, val) +#define REG_WR32_UC(ap, addr, val) regaprtWriteReg32Uc(staticCast(ap, RegisterAperture), addr, val) +#define REG_VALID(ap, addr) regaprtIsRegValid (staticCast(ap, RegisterAperture), addr) + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_IOACCESS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable +struct NVOC_METADATA__RegisterAperture; +struct NVOC_VTABLE__RegisterAperture; + + +struct RegisterAperture { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__RegisterAperture *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Ancestor object pointers for `staticCast` feature + struct RegisterAperture *__nvoc_pbase_RegisterAperture; // regaprt +}; + + +// Vtable with 8 per-class function pointers +struct NVOC_VTABLE__RegisterAperture { + NvU8 (*__regaprtReadReg08__)(struct RegisterAperture * /*this*/, NvU32); // pure virtual + NvU16 (*__regaprtReadReg16__)(struct RegisterAperture * /*this*/, NvU32); // pure virtual + NvU32 (*__regaprtReadReg32__)(struct RegisterAperture * /*this*/, NvU32); // pure virtual + void (*__regaprtWriteReg08__)(struct RegisterAperture * /*this*/, NvU32, NvV8); // pure virtual + void (*__regaprtWriteReg16__)(struct RegisterAperture * /*this*/, NvU32, NvV16); // pure virtual + void (*__regaprtWriteReg32__)(struct RegisterAperture * /*this*/, NvU32, NvV32); // pure virtual + void (*__regaprtWriteReg32Uc__)(struct RegisterAperture * /*this*/, NvU32, NvV32); // pure virtual + NvBool (*__regaprtIsRegValid__)(struct RegisterAperture * /*this*/, NvU32); // pure virtual +}; + +// Metadata with per-class RTTI and vtable +struct NVOC_METADATA__RegisterAperture { + const struct NVOC_RTTI rtti; + const struct NVOC_VTABLE__RegisterAperture vtable; +}; + +#ifndef __NVOC_CLASS_RegisterAperture_TYPEDEF__ +#define __NVOC_CLASS_RegisterAperture_TYPEDEF__ +typedef struct RegisterAperture RegisterAperture; +#endif /* __NVOC_CLASS_RegisterAperture_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RegisterAperture +#define __nvoc_class_id_RegisterAperture 0xfcaf2e +#endif /* __nvoc_class_id_RegisterAperture */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RegisterAperture; + +#define __staticCast_RegisterAperture(pThis) \ + ((pThis)->__nvoc_pbase_RegisterAperture) + +#ifdef __nvoc_ioaccess_h_disabled +#define __dynamicCast_RegisterAperture(pThis) ((RegisterAperture*) NULL) +#else //__nvoc_ioaccess_h_disabled +#define __dynamicCast_RegisterAperture(pThis) \ + ((RegisterAperture*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RegisterAperture))) +#endif //__nvoc_ioaccess_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_RegisterAperture(RegisterAperture**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RegisterAperture(RegisterAperture**, Dynamic*, NvU32); +#define __objCreate_RegisterAperture(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_RegisterAperture((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros +#define regaprtReadReg08_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__regaprtReadReg08__ +#define regaprtReadReg08(pAperture, addr) regaprtReadReg08_DISPATCH(pAperture, addr) +#define regaprtReadReg16_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__regaprtReadReg16__ +#define regaprtReadReg16(pAperture, addr) regaprtReadReg16_DISPATCH(pAperture, addr) +#define regaprtReadReg32_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__regaprtReadReg32__ +#define regaprtReadReg32(pAperture, addr) regaprtReadReg32_DISPATCH(pAperture, addr) +#define regaprtWriteReg08_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__regaprtWriteReg08__ +#define regaprtWriteReg08(pAperture, addr, value) regaprtWriteReg08_DISPATCH(pAperture, addr, value) +#define regaprtWriteReg16_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__regaprtWriteReg16__ +#define regaprtWriteReg16(pAperture, addr, value) regaprtWriteReg16_DISPATCH(pAperture, addr, value) +#define regaprtWriteReg32_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__regaprtWriteReg32__ +#define regaprtWriteReg32(pAperture, addr, value) regaprtWriteReg32_DISPATCH(pAperture, addr, value) +#define regaprtWriteReg32Uc_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__regaprtWriteReg32Uc__ +#define regaprtWriteReg32Uc(pAperture, addr, value) regaprtWriteReg32Uc_DISPATCH(pAperture, addr, value) +#define regaprtIsRegValid_FNPTR(pAperture) pAperture->__nvoc_metadata_ptr->vtable.__regaprtIsRegValid__ +#define regaprtIsRegValid(pAperture, addr) regaprtIsRegValid_DISPATCH(pAperture, addr) + +// Dispatch functions +static inline NvU8 regaprtReadReg08_DISPATCH(struct RegisterAperture *pAperture, NvU32 addr) { + return pAperture->__nvoc_metadata_ptr->vtable.__regaprtReadReg08__(pAperture, addr); +} + +static inline NvU16 regaprtReadReg16_DISPATCH(struct RegisterAperture *pAperture, NvU32 addr) { + return pAperture->__nvoc_metadata_ptr->vtable.__regaprtReadReg16__(pAperture, addr); +} + +static inline NvU32 regaprtReadReg32_DISPATCH(struct RegisterAperture *pAperture, NvU32 addr) { + return pAperture->__nvoc_metadata_ptr->vtable.__regaprtReadReg32__(pAperture, addr); +} + +static inline void regaprtWriteReg08_DISPATCH(struct RegisterAperture *pAperture, NvU32 addr, NvV8 value) { + pAperture->__nvoc_metadata_ptr->vtable.__regaprtWriteReg08__(pAperture, addr, value); +} + +static inline void regaprtWriteReg16_DISPATCH(struct RegisterAperture *pAperture, NvU32 addr, NvV16 value) { + pAperture->__nvoc_metadata_ptr->vtable.__regaprtWriteReg16__(pAperture, addr, value); +} + +static inline void regaprtWriteReg32_DISPATCH(struct RegisterAperture *pAperture, NvU32 addr, NvV32 value) { + pAperture->__nvoc_metadata_ptr->vtable.__regaprtWriteReg32__(pAperture, addr, value); +} + +static inline void regaprtWriteReg32Uc_DISPATCH(struct RegisterAperture *pAperture, NvU32 addr, NvV32 value) { + pAperture->__nvoc_metadata_ptr->vtable.__regaprtWriteReg32Uc__(pAperture, addr, value); +} + +static inline NvBool regaprtIsRegValid_DISPATCH(struct RegisterAperture *pAperture, NvU32 addr) { + return pAperture->__nvoc_metadata_ptr->vtable.__regaprtIsRegValid__(pAperture, addr); +} + +#undef PRIVATE_FIELD + + +// +// TODO: Remove the wrapper structure WAR once NVOC supports in-place object construction +// The proxy structure was introduced to avoid major refactoring until the feature is implemented +// Also fix IoAperture getters +// Use the interface class in NVWATCH once NVOC is enabled there +// +#else // (defined(NVRM) || defined(RMCFG_FEATURE_PLATFORM_GSP)) && !defined(NVWATCH) +typedef struct IO_DEVICE IO_DEVICE; +typedef struct IO_APERTURE IO_APERTURE; + +typedef NvU8 ReadReg008Fn(IO_APERTURE *a, NvU32 addr); +typedef NvU16 ReadReg016Fn(IO_APERTURE *a, NvU32 addr); +typedef NvU32 ReadReg032Fn(IO_APERTURE *a, NvU32 addr); +typedef void WriteReg008Fn(IO_APERTURE *a, NvU32 addr, NvV8 value); +typedef void WriteReg016Fn(IO_APERTURE *a, NvU32 addr, NvV16 value); +typedef void WriteReg032Fn(IO_APERTURE *a, NvU32 addr, NvV32 value); +typedef NvBool ValidRegFn(IO_APERTURE *a, NvU32 addr); + +#define REG_RD08(ap, addr) (ap)->pDevice->pReadReg008Fn((ap), (addr)) +#define REG_RD16(ap, addr) (ap)->pDevice->pReadReg016Fn((ap), (addr)) +#define REG_RD32(ap, addr) (ap)->pDevice->pReadReg032Fn((ap), (addr)) +#define REG_WR08(ap, addr, val) (ap)->pDevice->pWriteReg008Fn((ap), (addr), (val)) +#define REG_WR16(ap, addr, val) (ap)->pDevice->pWriteReg016Fn((ap), (addr), (val)) +#define REG_WR32(ap, addr, val) (ap)->pDevice->pWriteReg032Fn((ap), (addr), (val)) +#define REG_WR32_UC(ap, addr, val) (ap)->pDevice->pWriteReg032UcFn((ap), (addr), (val)) +#define REG_VALID(ap, addr) (ap)->pDevice->pValidRegFn((ap), (addr)) + +// Get the address of a register given the Aperture and offset. +#define REG_GET_ADDR(ap, offset) ((ap)->baseAddress + (offset)) + +struct IO_DEVICE +{ + ReadReg008Fn *pReadReg008Fn; + ReadReg016Fn *pReadReg016Fn; + ReadReg032Fn *pReadReg032Fn; + WriteReg008Fn *pWriteReg008Fn; + WriteReg016Fn *pWriteReg016Fn; + WriteReg032Fn *pWriteReg032Fn; + WriteReg032Fn *pWriteReg032UcFn; + ValidRegFn *pValidRegFn; +}; + +struct IO_APERTURE +{ + IO_DEVICE *pDevice; // Pointer to module specific IO_DEVICE + NvU32 baseAddress; // register base address + NvU32 length; // length of aperture +}; + +NV_STATUS ioaccessInitIOAperture +( + IO_APERTURE *pAperture, + IO_APERTURE *pParentAperture, + IO_DEVICE *pDevice, + NvU32 offset, + NvU32 length +); +#endif // (defined(NVRM) || defined(RMCFG_FEATURE_PLATFORM_GSP)) && !defined(NVWATCH) + + +// +// Macros for register I/O +// + +#define REG_FLD_WR_DRF_NUM(ap,d,r,f,n) REG_WR32(ap,NV##d##r,(REG_RD32(ap,NV##d##r)&~(REG_DRF_MASK(NV##d##r##f)<>REG_DRF_SHIFT(NV ## d ## r ## f))®_DRF_MASK(NV ## d ## r ## f)) +#define REG_FLD_TEST_DRF_DEF(ap,d,r,f,c) (REG_RD_DRF(ap,d, r, f) == NV##d##r##f##c) +#define REG_FLD_TEST_DRF_NUM(ap,d,r,f,n) (REG_RD_DRF(ap,d, r, f) == n) +#define REG_FLD_IDX_TEST_DRF_DEF(ap,d,r,f,c,i) (REG_IDX_RD_DRF(ap, d, r, i, f) == NV##d##r##f##c) + +// Read/write a field or entire register of which there are several copies each accessed via an index +#define REG_IDX_WR_DRF_NUM(ap,d,r,i,f,n) REG_WR32(ap,NV ## d ## r(i), REG_DRF_NUM(d,r,f,n)) +#define REG_IDX_WR_DRF_DEF(ap,d,r,i,f,c) REG_WR32(ap,NV ## d ## r(i), REG_DRF_DEF(d,r,f,c)) +#define REG_FLD_IDX_WR_DRF_NUM(ap,d,r,i,f,n) REG_WR32(ap,NV##d##r(i),(REG_RD32(ap,NV##d##r(i))&~(REG_DRF_MASK(NV##d##r##f)<>REG_DRF_SHIFT(NV ## d ## r ## f))®_DRF_MASK(NV ## d ## r ## f)) +#define REG_RD_DRF_IDX(ap,d,r,f,i) (((REG_RD32(ap,NV ## d ## r))>>REG_DRF_SHIFT(NV ## d ## r ## f(i)))®_DRF_MASK(NV ## d ## r ## f(i))) +#define REG_IDX_OFFSET_RD_DRF(ap,d,r,i,o,f) (((REG_RD32(ap,NV ## d ## r(i,o)))>>REG_DRF_SHIFT(NV ## d ## r ## f))®_DRF_MASK(NV ## d ## r ## f)) + +#endif // _IO_ACCESS_H_ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_IOACCESS_NVOC_H_ diff --git a/src/nvidia/generated/g_journal_nvoc.h b/src/nvidia/generated/g_journal_nvoc.h new file mode 100644 index 0000000..7c6d18e --- /dev/null +++ b/src/nvidia/generated/g_journal_nvoc.h @@ -0,0 +1,58 @@ + +#ifndef _G_JOURNAL_NVOC_H_ +#define _G_JOURNAL_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_journal_nvoc.h" + +#ifndef _JOURNAL_H_ +#define _JOURNAL_H_ + +// +// Journal object defines and Structures +// + +#include "kernel/core/core.h" + +#endif // _JOURNAL_H_ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_JOURNAL_NVOC_H_ diff --git a/src/nvidia/generated/g_kern_disp_nvoc.c b/src/nvidia/generated/g_kern_disp_nvoc.c new file mode 100644 index 0000000..4f694a2 --- /dev/null +++ b/src/nvidia/generated/g_kern_disp_nvoc.c @@ -0,0 +1,420 @@ +#define NVOC_KERN_DISP_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kern_disp_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x55952e = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelDisplay; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +// Forward declarations for KernelDisplay +void __nvoc_init__OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init__KernelDisplay(KernelDisplay*, RmHalspecOwner *pRmhalspecowner, GpuHalspecOwner *pGpuhalspecowner); +void __nvoc_init_funcTable_KernelDisplay(KernelDisplay*, RmHalspecOwner *pRmhalspecowner, GpuHalspecOwner *pGpuhalspecowner); +NV_STATUS __nvoc_ctor_KernelDisplay(KernelDisplay*, RmHalspecOwner *pRmhalspecowner, GpuHalspecOwner *pGpuhalspecowner); +void __nvoc_init_dataField_KernelDisplay(KernelDisplay*, RmHalspecOwner *pRmhalspecowner, GpuHalspecOwner *pGpuhalspecowner); +void __nvoc_dtor_KernelDisplay(KernelDisplay*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__KernelDisplay; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__KernelDisplay; + +// Down-thunk(s) to bridge KernelDisplay methods from ancestors (if any) +NV_STATUS __nvoc_down_thunk_KernelDisplay_engstateConstructEngine(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay, ENGDESCRIPTOR engDesc); // this +NV_STATUS __nvoc_down_thunk_KernelDisplay_engstateStatePreInitLocked(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay); // this +NV_STATUS __nvoc_down_thunk_KernelDisplay_engstateStateInitLocked(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay); // this +void __nvoc_down_thunk_KernelDisplay_engstateStateDestroy(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay); // this +NV_STATUS __nvoc_down_thunk_KernelDisplay_engstateStateLoad(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay, NvU32 flags); // this +NV_STATUS __nvoc_down_thunk_KernelDisplay_engstateStateUnload(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay, NvU32 flags); // this + +// Up-thunk(s) to bridge KernelDisplay methods to ancestors (if any) +void __nvoc_up_thunk_OBJENGSTATE_kdispInitMissing(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_kdispStatePreInitUnlocked(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_kdispStateInitUnlocked(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_kdispStatePreLoad(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate, NvU32 arg3); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_kdispStatePostLoad(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate, NvU32 arg3); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_kdispStatePreUnload(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate, NvU32 arg3); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_kdispStatePostUnload(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate, NvU32 arg3); // this +NvBool __nvoc_up_thunk_OBJENGSTATE_kdispIsPresent(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelDisplay = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelDisplay), + /*classId=*/ classId(KernelDisplay), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelDisplay", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelDisplay, + /*pCastInfo=*/ &__nvoc_castinfo__KernelDisplay, + /*pExportInfo=*/ &__nvoc_export_info__KernelDisplay +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__KernelDisplay __nvoc_metadata__KernelDisplay = { + .rtti.pClassDef = &__nvoc_class_def_KernelDisplay, // (kdisp) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelDisplay, + .rtti.offset = 0, + .metadata__OBJENGSTATE.rtti.pClassDef = &__nvoc_class_def_OBJENGSTATE, // (engstate) super + .metadata__OBJENGSTATE.rtti.dtor = &__nvoc_destructFromBase, + .metadata__OBJENGSTATE.rtti.offset = NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE), + .metadata__OBJENGSTATE.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^2 + .metadata__OBJENGSTATE.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__OBJENGSTATE.metadata__Object.rtti.offset = NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), + + .vtable.__kdispConstructEngine__ = &kdispConstructEngine_IMPL, // virtual override (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateConstructEngine__ = &__nvoc_down_thunk_KernelDisplay_engstateConstructEngine, // virtual + .vtable.__kdispStatePreInitLocked__ = &kdispStatePreInitLocked_IMPL, // virtual override (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePreInitLocked__ = &__nvoc_down_thunk_KernelDisplay_engstateStatePreInitLocked, // virtual + .vtable.__kdispStateInitLocked__ = &kdispStateInitLocked_IMPL, // virtual override (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStateInitLocked__ = &__nvoc_down_thunk_KernelDisplay_engstateStateInitLocked, // virtual + .vtable.__kdispStateDestroy__ = &kdispStateDestroy_IMPL, // virtual override (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStateDestroy__ = &__nvoc_down_thunk_KernelDisplay_engstateStateDestroy, // virtual + .vtable.__kdispStateLoad__ = &kdispStateLoad_IMPL, // virtual override (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStateLoad__ = &__nvoc_down_thunk_KernelDisplay_engstateStateLoad, // virtual + .vtable.__kdispStateUnload__ = &kdispStateUnload_IMPL, // virtual override (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStateUnload__ = &__nvoc_down_thunk_KernelDisplay_engstateStateUnload, // virtual + .vtable.__kdispInitMissing__ = &__nvoc_up_thunk_OBJENGSTATE_kdispInitMissing, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateInitMissing__ = &engstateInitMissing_IMPL, // virtual + .vtable.__kdispStatePreInitUnlocked__ = &__nvoc_up_thunk_OBJENGSTATE_kdispStatePreInitUnlocked, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePreInitUnlocked__ = &engstateStatePreInitUnlocked_IMPL, // virtual + .vtable.__kdispStateInitUnlocked__ = &__nvoc_up_thunk_OBJENGSTATE_kdispStateInitUnlocked, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStateInitUnlocked__ = &engstateStateInitUnlocked_IMPL, // virtual + .vtable.__kdispStatePreLoad__ = &__nvoc_up_thunk_OBJENGSTATE_kdispStatePreLoad, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePreLoad__ = &engstateStatePreLoad_IMPL, // virtual + .vtable.__kdispStatePostLoad__ = &__nvoc_up_thunk_OBJENGSTATE_kdispStatePostLoad, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePostLoad__ = &engstateStatePostLoad_IMPL, // virtual + .vtable.__kdispStatePreUnload__ = &__nvoc_up_thunk_OBJENGSTATE_kdispStatePreUnload, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePreUnload__ = &engstateStatePreUnload_IMPL, // virtual + .vtable.__kdispStatePostUnload__ = &__nvoc_up_thunk_OBJENGSTATE_kdispStatePostUnload, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePostUnload__ = &engstateStatePostUnload_IMPL, // virtual + .vtable.__kdispIsPresent__ = &__nvoc_up_thunk_OBJENGSTATE_kdispIsPresent, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateIsPresent__ = &engstateIsPresent_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__KernelDisplay = { + .numRelatives = 3, + .relatives = { + &__nvoc_metadata__KernelDisplay.rtti, // [0]: (kdisp) this + &__nvoc_metadata__KernelDisplay.metadata__OBJENGSTATE.rtti, // [1]: (engstate) super + &__nvoc_metadata__KernelDisplay.metadata__OBJENGSTATE.metadata__Object.rtti, // [2]: (obj) super^2 + } +}; + +// 6 down-thunk(s) defined to bridge methods in KernelDisplay from superclasses + +// kdispConstructEngine: virtual override (engstate) base (engstate) +NV_STATUS __nvoc_down_thunk_KernelDisplay_engstateConstructEngine(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay, ENGDESCRIPTOR engDesc) { + return kdispConstructEngine(pGpu, (struct KernelDisplay *)(((unsigned char *) pKernelDisplay) - NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE)), engDesc); +} + +// kdispStatePreInitLocked: virtual override (engstate) base (engstate) +NV_STATUS __nvoc_down_thunk_KernelDisplay_engstateStatePreInitLocked(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay) { + return kdispStatePreInitLocked(pGpu, (struct KernelDisplay *)(((unsigned char *) pKernelDisplay) - NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE))); +} + +// kdispStateInitLocked: virtual override (engstate) base (engstate) +NV_STATUS __nvoc_down_thunk_KernelDisplay_engstateStateInitLocked(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay) { + return kdispStateInitLocked(pGpu, (struct KernelDisplay *)(((unsigned char *) pKernelDisplay) - NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE))); +} + +// kdispStateDestroy: virtual override (engstate) base (engstate) +void __nvoc_down_thunk_KernelDisplay_engstateStateDestroy(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay) { + kdispStateDestroy(pGpu, (struct KernelDisplay *)(((unsigned char *) pKernelDisplay) - NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE))); +} + +// kdispStateLoad: virtual override (engstate) base (engstate) +NV_STATUS __nvoc_down_thunk_KernelDisplay_engstateStateLoad(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay, NvU32 flags) { + return kdispStateLoad(pGpu, (struct KernelDisplay *)(((unsigned char *) pKernelDisplay) - NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE)), flags); +} + +// kdispStateUnload: virtual override (engstate) base (engstate) +NV_STATUS __nvoc_down_thunk_KernelDisplay_engstateStateUnload(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay, NvU32 flags) { + return kdispStateUnload(pGpu, (struct KernelDisplay *)(((unsigned char *) pKernelDisplay) - NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE)), flags); +} + + +// 8 up-thunk(s) defined to bridge methods in KernelDisplay to superclasses + +// kdispInitMissing: virtual inherited (engstate) base (engstate) +void __nvoc_up_thunk_OBJENGSTATE_kdispInitMissing(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE))); +} + +// kdispStatePreInitUnlocked: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_kdispStatePreInitUnlocked(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE))); +} + +// kdispStateInitUnlocked: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_kdispStateInitUnlocked(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE))); +} + +// kdispStatePreLoad: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_kdispStatePreLoad(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate, NvU32 arg3) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE)), arg3); +} + +// kdispStatePostLoad: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_kdispStatePostLoad(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate, NvU32 arg3) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE)), arg3); +} + +// kdispStatePreUnload: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_kdispStatePreUnload(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate, NvU32 arg3) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE)), arg3); +} + +// kdispStatePostUnload: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_kdispStatePostUnload(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate, NvU32 arg3) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE)), arg3); +} + +// kdispIsPresent: virtual inherited (engstate) base (engstate) +NvBool __nvoc_up_thunk_OBJENGSTATE_kdispIsPresent(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE))); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__KernelDisplay = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_KernelDisplay(KernelDisplay *pThis) { + __nvoc_kdispDestruct(pThis); + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelDisplay(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner, GpuHalspecOwner *pGpuhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pGpuhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + DispIpHal *dispIpHal = &pRmhalspecowner->dispIpHal; + const unsigned long dispIpHal_HalVarIdx = (unsigned long)dispIpHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(pGpuhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(dispIpHal); + PORT_UNREFERENCED_VARIABLE(dispIpHal_HalVarIdx); + + // NVOC Property Hal field -- PDB_PROP_KDISP_IS_MISSING + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + pThis->setProperty(pThis, PDB_PROP_KDISP_IS_MISSING, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_KDISP_IMP_ALLOC_BW_IN_KERNEL_RM_DEF + if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00005000UL) )) /* ChipHal: T234D | T264D */ + { + pThis->setProperty(pThis, PDB_PROP_KDISP_IMP_ALLOC_BW_IN_KERNEL_RM_DEF, NV_TRUE); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_KDISP_IMP_ALLOC_BW_IN_KERNEL_RM_DEF, NV_FALSE); + } + pThis->setProperty(pThis, PDB_PROP_KDISP_FEATURE_STRETCH_VBLANK_CAPABLE, (0)); + + // NVOC Property Hal field -- PDB_PROP_KDISP_HAS_SEPARATE_LOW_LATENCY_LINE + // default + { + pThis->setProperty(pThis, PDB_PROP_KDISP_HAS_SEPARATE_LOW_LATENCY_LINE, NV_FALSE); + } + pThis->setProperty(pThis, PDB_PROP_KDISP_ENABLE_INLINE_INTR_SERVICE, NV_TRUE); + + pThis->pStaticInfo = ((void *)0); + + pThis->bWarPurgeSatellitesOnCoreFree = NV_FALSE; + + pThis->bExtdevIntrSupported = NV_FALSE; + + pThis->bIsPanelReplayEnabled = NV_FALSE; +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_KernelDisplay(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner, GpuHalspecOwner *pGpuhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_KernelDisplay_fail_OBJENGSTATE; + __nvoc_init_dataField_KernelDisplay(pThis, pRmhalspecowner, pGpuhalspecowner); + goto __nvoc_ctor_KernelDisplay_exit; // Success + +__nvoc_ctor_KernelDisplay_fail_OBJENGSTATE: +__nvoc_ctor_KernelDisplay_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_KernelDisplay_1(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner, GpuHalspecOwner *pGpuhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pGpuhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + DispIpHal *dispIpHal = &pRmhalspecowner->dispIpHal; + const unsigned long dispIpHal_HalVarIdx = (unsigned long)dispIpHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(pGpuhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(dispIpHal); + PORT_UNREFERENCED_VARIABLE(dispIpHal_HalVarIdx); + + // kdispGetPBTargetAperture -- halified (2 hals) body + if (((( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00001000UL) )) /* ChipHal: T234D */ && (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00001000UL) )) /* DispIpHal: DISPv0402 */ )) + { + pThis->__kdispGetPBTargetAperture__ = &kdispGetPBTargetAperture_v03_00; + } + else + { + pThis->__kdispGetPBTargetAperture__ = &kdispGetPBTargetAperture_v05_01; + } + + // kdispComputeDpModeSettings -- halified (2 hals) body + if (((( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00001000UL) )) /* ChipHal: T234D */ && (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00001000UL) )) /* DispIpHal: DISPv0402 */ )) + { + pThis->__kdispComputeDpModeSettings__ = &kdispComputeDpModeSettings_v02_04; + } + else + { + pThis->__kdispComputeDpModeSettings__ = &kdispComputeDpModeSettings_v05_01; + } +} // End __nvoc_init_funcTable_KernelDisplay_1 with approximately 4 basic block(s). + + +// Initialize vtable(s) for 16 virtual method(s). +void __nvoc_init_funcTable_KernelDisplay(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner, GpuHalspecOwner *pGpuhalspecowner) { + + // Initialize vtable(s) with 2 per-object function pointer(s). + __nvoc_init_funcTable_KernelDisplay_1(pThis, pRmhalspecowner, pGpuhalspecowner); +} + +// Initialize newly constructed object. +void __nvoc_init__KernelDisplay(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner, GpuHalspecOwner *pGpuhalspecowner) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; // (obj) super^2 + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; // (engstate) super + pThis->__nvoc_pbase_KernelDisplay = pThis; // (kdisp) this + + // Recurse to superclass initialization function(s). + __nvoc_init__OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__KernelDisplay.metadata__OBJENGSTATE.metadata__Object; // (obj) super^2 + pThis->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr = &__nvoc_metadata__KernelDisplay.metadata__OBJENGSTATE; // (engstate) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__KernelDisplay; // (kdisp) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_KernelDisplay(pThis, pRmhalspecowner, pGpuhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelDisplay(KernelDisplay **ppThis, Dynamic *pParent, NvU32 createFlags) +{ + NV_STATUS status; + Object *pParentObj = NULL; + KernelDisplay *pThis; + RmHalspecOwner *pRmhalspecowner; + GpuHalspecOwner *pGpuhalspecowner; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(KernelDisplay), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(KernelDisplay)); + + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.createFlags = createFlags; + + // pParent must be a valid object that derives from a halspec owner class. + NV_ASSERT_OR_RETURN(pParent != NULL, NV_ERR_INVALID_ARGUMENT); + + // Link the child into the parent unless flagged not to do so. + if (!(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + if ((pGpuhalspecowner = dynamicCast(pParent, GpuHalspecOwner)) == NULL) + pGpuhalspecowner = objFindAncestorOfType(GpuHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pGpuhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init__KernelDisplay(pThis, pRmhalspecowner, pGpuhalspecowner); + status = __nvoc_ctor_KernelDisplay(pThis, pRmhalspecowner, pGpuhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelDisplay_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_KernelDisplay_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(KernelDisplay)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelDisplay(KernelDisplay **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelDisplay(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kern_disp_nvoc.h b/src/nvidia/generated/g_kern_disp_nvoc.h new file mode 100644 index 0000000..23fb043 --- /dev/null +++ b/src/nvidia/generated/g_kern_disp_nvoc.h @@ -0,0 +1,1261 @@ + +#ifndef _G_KERN_DISP_NVOC_H_ +#define _G_KERN_DISP_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_kern_disp_nvoc.h" + +#ifndef KERN_DISP_H +#define KERN_DISP_H + +/****************************************************************************** +* +* Kernel Display module header +* This file contains functions managing display on CPU RM +* +******************************************************************************/ + +#include "gpu/eng_state.h" +#include "gpu/gpu_halspec.h" +#include "gpu/disp/kern_disp_type.h" +#include "gpu/disp/kern_disp_max.h" +#include "gpu/mem_mgr/context_dma.h" +#include "gpu/disp/vblank_callback/vblank.h" +#include "gpu/disp/head/kernel_head.h" + + +#include "utils/nvbitvector.h" +TYPEDEF_BITVECTOR(MC_ENGINE_BITVECTOR); + +#include "ctrl/ctrl2080/ctrl2080internal.h" + +typedef NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS KernelDisplayStaticInfo; + +typedef struct +{ + NvU32 kHeadVblankCount[OBJ_MAX_HEADS]; +} KernelDisplaySharedMem; + +// From DISP_v0501 NvDisplay HW IP has 4 interrupt vectors: high latency, low latency, GSP, PMU. +typedef enum +{ + DISP_INTERRUPT_VECTOR_HIGH_LATENCY = 0, + DISP_INTERRUPT_VECTOR_LOW_LATENCY, + DISP_INTERRUPT_VECTOR_PMU, + DISP_INTERRUPT_VECTOR_GSP, +} DISP_INTERRUPT_VECTOR; + +typedef struct +{ +// Link configuration + NvU32 linkBw; // Link Rate (270M unit) + NvU32 dp2LinkBw; // Link Rate using DP2.x convention (10M unit) + NvU32 laneCount; // Lane Count + NvBool bDP2xChannelCoding; // If is using 128b/132b channel coding. + NvBool bEnhancedFraming; // Enhanced Framing is enabled or not + NvBool bMultiStream; // Multistream is enabled or not + NvBool bFecEnable; // FEC is enabled or not + NvBool bDisableEffBppSST8b10b; // Disable effective bpp 8b10b coding or not + +// DSC Information + NvBool bDscEnable; // DSC is enabled or not + NvU32 sliceCount; + NvU32 sliceWidth; + NvU32 sliceHeight; + NvU32 dscVersionMajor; + NvU32 dscVersionMinor; + +// Mode information. + NvU64 PClkFreqHz; // Pixel clock in Hz + NvU32 bpp; // Output bits per pixel + NvU32 SetRasterSizeWidth; // Raster Size width + NvU32 SetRasterBlankStartX; // Raster Blank Start X + NvU32 SetRasterBlankEndX; // Raster Blank End X + NvU32 twoChannelAudioHz; + NvU32 eightChannelAudioHz; + NvU32 colorFormat; // Same as DP_COLORFORMAT +} DPMODESETDATA; + +// bpp in PPS is multiplied by 16 when DSC is enabled +#define DSC_BPP_FACTOR 16 + +typedef struct +{ + struct RsClient *pClient; + NvHandle hChannel; + NvU32 channelNum; + NvBool bInUse; +} KernelDisplayClientChannelMap; + +typedef void (*OSVBLANKCALLBACKPROC)(NvP64 pParm1, NvP64 pParm2); + +typedef struct _osvblankcallback { + OSVBLANKCALLBACKPROC pProc; + void * pParm1; + void * pParm2; + void * pCallback; + void * pParm3; +} OSVBLANKCALLBACK, * POSVBLANKCALLBACK; + +#define DISP_INTR_REG(reg) NV_PDISP_FE_RM_INTR_##reg +#define DISP_INTR_REG_IDX(reg,i) NV_PDISP_FE_RM_INTR_##reg(i) + + +struct DispChannel; + +#ifndef __NVOC_CLASS_DispChannel_TYPEDEF__ +#define __NVOC_CLASS_DispChannel_TYPEDEF__ +typedef struct DispChannel DispChannel; +#endif /* __NVOC_CLASS_DispChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannel +#define __nvoc_class_id_DispChannel 0xbd2ff3 +#endif /* __nvoc_class_id_DispChannel */ + + + +struct RgLineCallback; + +#ifndef __NVOC_CLASS_RgLineCallback_TYPEDEF__ +#define __NVOC_CLASS_RgLineCallback_TYPEDEF__ +typedef struct RgLineCallback RgLineCallback; +#endif /* __NVOC_CLASS_RgLineCallback_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RgLineCallback +#define __nvoc_class_id_RgLineCallback 0xa3ff1c +#endif /* __nvoc_class_id_RgLineCallback */ + + + +#define KDISP_GET_HEAD(pKernelDisplay, headID) (RMCFG_MODULE_KERNEL_HEAD ? kdispGetHead(pKernelDisplay, headID) : NULL) + +/*! + * KernelDisp is a logical abstraction of the GPU Display Engine. The + * Public API of the Display Engine is exposed through this object, and any + * interfaces which do not manage the underlying Display hardware can be + * managed by this object. + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_KERN_DISP_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__KernelDisplay; +struct NVOC_METADATA__OBJENGSTATE; +struct NVOC_VTABLE__KernelDisplay; + + +struct KernelDisplay { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__KernelDisplay *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^2 + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; // engstate super + struct KernelDisplay *__nvoc_pbase_KernelDisplay; // kdisp + + // Vtable with 2 per-object function pointers + NvU32 (*__kdispGetPBTargetAperture__)(struct OBJGPU *, struct KernelDisplay * /*this*/, NvU32, NvU32); // halified (2 hals) body + NV_STATUS (*__kdispComputeDpModeSettings__)(struct OBJGPU *, struct KernelDisplay * /*this*/, NvU32, DPMODESETDATA *, DPIMPINFO *); // halified (2 hals) body + + // 8 PDB properties + NvBool PDB_PROP_KDISP_IMP_ALLOC_BW_IN_KERNEL_RM_DEF; + NvBool PDB_PROP_KDISP_FEATURE_STRETCH_VBLANK_CAPABLE; + NvBool PDB_PROP_KDISP_IN_AWAKEN_INTR; + NvBool PDB_PROP_KDISP_HAS_SEPARATE_LOW_LATENCY_LINE; + NvBool PDB_PROP_KDISP_INTERNAL_PANEL_DISCONNECTED; + NvBool PDB_PROP_KDISP_ENABLE_INLINE_INTR_SERVICE; + NvBool PDB_PROP_KDISP_AGGRESSIVE_VBLANK_HANDLING; + + // Data members + struct DisplayInstanceMemory *pInst; + struct KernelHead *pKernelHead[8]; + const KernelDisplayStaticInfo *pStaticInfo; + NvBool bWarPurgeSatellitesOnCoreFree; + struct RgLineCallback *rgLineCallbackPerHead[8][2]; + NvU32 isrVblankHeads; + NvBool bExtdevIntrSupported; + NvU32 numHeads; + NvU32 deferredVblankHeadMask; + NvHandle hInternalClient; + NvHandle hInternalDevice; + NvHandle hInternalSubdevice; + NvHandle hDispCommonHandle; + MEMORY_DESCRIPTOR *pSharedMemDesc; + KernelDisplaySharedMem *pSharedData; + NvBool bFeatureStretchVblankCapable; + volatile NvS32 lowLatencyLock; + NvU32 vblankCallbackHeadMask; + POSVBLANKCALLBACK pOsVblankCallback; + NvU32 numDispChannels; + KernelDisplayClientChannelMap *pClientChannelTable; + NvBool bIsPanelReplayEnabled; + void *pRgVblankCb; +}; + + +// Vtable with 14 per-class function pointers +struct NVOC_VTABLE__KernelDisplay { + NV_STATUS (*__kdispConstructEngine__)(struct OBJGPU *, struct KernelDisplay * /*this*/, ENGDESCRIPTOR); // virtual override (engstate) base (engstate) + NV_STATUS (*__kdispStatePreInitLocked__)(struct OBJGPU *, struct KernelDisplay * /*this*/); // virtual override (engstate) base (engstate) + NV_STATUS (*__kdispStateInitLocked__)(struct OBJGPU *, struct KernelDisplay * /*this*/); // virtual override (engstate) base (engstate) + void (*__kdispStateDestroy__)(struct OBJGPU *, struct KernelDisplay * /*this*/); // virtual override (engstate) base (engstate) + NV_STATUS (*__kdispStateLoad__)(struct OBJGPU *, struct KernelDisplay * /*this*/, NvU32); // virtual override (engstate) base (engstate) + NV_STATUS (*__kdispStateUnload__)(struct OBJGPU *, struct KernelDisplay * /*this*/, NvU32); // virtual override (engstate) base (engstate) + void (*__kdispInitMissing__)(struct OBJGPU *, struct KernelDisplay * /*this*/); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__kdispStatePreInitUnlocked__)(struct OBJGPU *, struct KernelDisplay * /*this*/); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__kdispStateInitUnlocked__)(struct OBJGPU *, struct KernelDisplay * /*this*/); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__kdispStatePreLoad__)(struct OBJGPU *, struct KernelDisplay * /*this*/, NvU32); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__kdispStatePostLoad__)(struct OBJGPU *, struct KernelDisplay * /*this*/, NvU32); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__kdispStatePreUnload__)(struct OBJGPU *, struct KernelDisplay * /*this*/, NvU32); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__kdispStatePostUnload__)(struct OBJGPU *, struct KernelDisplay * /*this*/, NvU32); // virtual inherited (engstate) base (engstate) + NvBool (*__kdispIsPresent__)(struct OBJGPU *, struct KernelDisplay * /*this*/); // virtual inherited (engstate) base (engstate) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__KernelDisplay { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__OBJENGSTATE metadata__OBJENGSTATE; + const struct NVOC_VTABLE__KernelDisplay vtable; +}; + +#ifndef __NVOC_CLASS_KernelDisplay_TYPEDEF__ +#define __NVOC_CLASS_KernelDisplay_TYPEDEF__ +typedef struct KernelDisplay KernelDisplay; +#endif /* __NVOC_CLASS_KernelDisplay_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelDisplay +#define __nvoc_class_id_KernelDisplay 0x55952e +#endif /* __nvoc_class_id_KernelDisplay */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelDisplay; + +#define __staticCast_KernelDisplay(pThis) \ + ((pThis)->__nvoc_pbase_KernelDisplay) + +#ifdef __nvoc_kern_disp_h_disabled +#define __dynamicCast_KernelDisplay(pThis) ((KernelDisplay*) NULL) +#else //__nvoc_kern_disp_h_disabled +#define __dynamicCast_KernelDisplay(pThis) \ + ((KernelDisplay*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelDisplay))) +#endif //__nvoc_kern_disp_h_disabled + +// Property macros +#define PDB_PROP_KDISP_ENABLE_INLINE_INTR_SERVICE_BASE_CAST +#define PDB_PROP_KDISP_ENABLE_INLINE_INTR_SERVICE_BASE_NAME PDB_PROP_KDISP_ENABLE_INLINE_INTR_SERVICE +#define PDB_PROP_KDISP_AGGRESSIVE_VBLANK_HANDLING_BASE_CAST +#define PDB_PROP_KDISP_AGGRESSIVE_VBLANK_HANDLING_BASE_NAME PDB_PROP_KDISP_AGGRESSIVE_VBLANK_HANDLING +#define PDB_PROP_KDISP_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_KDISP_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING +#define PDB_PROP_KDISP_HAS_SEPARATE_LOW_LATENCY_LINE_BASE_CAST +#define PDB_PROP_KDISP_HAS_SEPARATE_LOW_LATENCY_LINE_BASE_NAME PDB_PROP_KDISP_HAS_SEPARATE_LOW_LATENCY_LINE +#define PDB_PROP_KDISP_IN_AWAKEN_INTR_BASE_CAST +#define PDB_PROP_KDISP_IN_AWAKEN_INTR_BASE_NAME PDB_PROP_KDISP_IN_AWAKEN_INTR +#define PDB_PROP_KDISP_IMP_ALLOC_BW_IN_KERNEL_RM_DEF_BASE_CAST +#define PDB_PROP_KDISP_IMP_ALLOC_BW_IN_KERNEL_RM_DEF_BASE_NAME PDB_PROP_KDISP_IMP_ALLOC_BW_IN_KERNEL_RM_DEF +#define PDB_PROP_KDISP_FEATURE_STRETCH_VBLANK_CAPABLE_BASE_CAST +#define PDB_PROP_KDISP_FEATURE_STRETCH_VBLANK_CAPABLE_BASE_NAME PDB_PROP_KDISP_FEATURE_STRETCH_VBLANK_CAPABLE +#define PDB_PROP_KDISP_INTERNAL_PANEL_DISCONNECTED_BASE_CAST +#define PDB_PROP_KDISP_INTERNAL_PANEL_DISCONNECTED_BASE_NAME PDB_PROP_KDISP_INTERNAL_PANEL_DISCONNECTED + +NV_STATUS __nvoc_objCreateDynamic_KernelDisplay(KernelDisplay**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelDisplay(KernelDisplay**, Dynamic*, NvU32); +#define __objCreate_KernelDisplay(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelDisplay((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros +#define kdispConstructEngine_FNPTR(pKernelDisplay) pKernelDisplay->__nvoc_metadata_ptr->vtable.__kdispConstructEngine__ +#define kdispConstructEngine(pGpu, pKernelDisplay, engDesc) kdispConstructEngine_DISPATCH(pGpu, pKernelDisplay, engDesc) +#define kdispStatePreInitLocked_FNPTR(pKernelDisplay) pKernelDisplay->__nvoc_metadata_ptr->vtable.__kdispStatePreInitLocked__ +#define kdispStatePreInitLocked(pGpu, pKernelDisplay) kdispStatePreInitLocked_DISPATCH(pGpu, pKernelDisplay) +#define kdispStateInitLocked_FNPTR(pKernelDisplay) pKernelDisplay->__nvoc_metadata_ptr->vtable.__kdispStateInitLocked__ +#define kdispStateInitLocked(pGpu, pKernelDisplay) kdispStateInitLocked_DISPATCH(pGpu, pKernelDisplay) +#define kdispStateDestroy_FNPTR(pKernelDisplay) pKernelDisplay->__nvoc_metadata_ptr->vtable.__kdispStateDestroy__ +#define kdispStateDestroy(pGpu, pKernelDisplay) kdispStateDestroy_DISPATCH(pGpu, pKernelDisplay) +#define kdispStateLoad_FNPTR(pKernelDisplay) pKernelDisplay->__nvoc_metadata_ptr->vtable.__kdispStateLoad__ +#define kdispStateLoad(pGpu, pKernelDisplay, flags) kdispStateLoad_DISPATCH(pGpu, pKernelDisplay, flags) +#define kdispStateUnload_FNPTR(pKernelDisplay) pKernelDisplay->__nvoc_metadata_ptr->vtable.__kdispStateUnload__ +#define kdispStateUnload(pGpu, pKernelDisplay, flags) kdispStateUnload_DISPATCH(pGpu, pKernelDisplay, flags) +#define kdispGetPBTargetAperture_FNPTR(pKernelDisplay) pKernelDisplay->__kdispGetPBTargetAperture__ +#define kdispGetPBTargetAperture(pGpu, pKernelDisplay, memAddrSpace, cacheSnoop) kdispGetPBTargetAperture_DISPATCH(pGpu, pKernelDisplay, memAddrSpace, cacheSnoop) +#define kdispGetPBTargetAperture_HAL(pGpu, pKernelDisplay, memAddrSpace, cacheSnoop) kdispGetPBTargetAperture_DISPATCH(pGpu, pKernelDisplay, memAddrSpace, cacheSnoop) +#define kdispComputeDpModeSettings_FNPTR(pKernelDisplay) pKernelDisplay->__kdispComputeDpModeSettings__ +#define kdispComputeDpModeSettings(pGpu, pKernelDisplay, headIndex, pDpModesetData, dpInfo) kdispComputeDpModeSettings_DISPATCH(pGpu, pKernelDisplay, headIndex, pDpModesetData, dpInfo) +#define kdispComputeDpModeSettings_HAL(pGpu, pKernelDisplay, headIndex, pDpModesetData, dpInfo) kdispComputeDpModeSettings_DISPATCH(pGpu, pKernelDisplay, headIndex, pDpModesetData, dpInfo) +#define kdispInitMissing_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateInitMissing__ +#define kdispInitMissing(pGpu, pEngstate) kdispInitMissing_DISPATCH(pGpu, pEngstate) +#define kdispStatePreInitUnlocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePreInitUnlocked__ +#define kdispStatePreInitUnlocked(pGpu, pEngstate) kdispStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kdispStateInitUnlocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStateInitUnlocked__ +#define kdispStateInitUnlocked(pGpu, pEngstate) kdispStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kdispStatePreLoad_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePreLoad__ +#define kdispStatePreLoad(pGpu, pEngstate, arg3) kdispStatePreLoad_DISPATCH(pGpu, pEngstate, arg3) +#define kdispStatePostLoad_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePostLoad__ +#define kdispStatePostLoad(pGpu, pEngstate, arg3) kdispStatePostLoad_DISPATCH(pGpu, pEngstate, arg3) +#define kdispStatePreUnload_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePreUnload__ +#define kdispStatePreUnload(pGpu, pEngstate, arg3) kdispStatePreUnload_DISPATCH(pGpu, pEngstate, arg3) +#define kdispStatePostUnload_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePostUnload__ +#define kdispStatePostUnload(pGpu, pEngstate, arg3) kdispStatePostUnload_DISPATCH(pGpu, pEngstate, arg3) +#define kdispIsPresent_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateIsPresent__ +#define kdispIsPresent(pGpu, pEngstate) kdispIsPresent_DISPATCH(pGpu, pEngstate) + +// Dispatch functions +static inline NV_STATUS kdispConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, ENGDESCRIPTOR engDesc) { + return pKernelDisplay->__nvoc_metadata_ptr->vtable.__kdispConstructEngine__(pGpu, pKernelDisplay, engDesc); +} + +static inline NV_STATUS kdispStatePreInitLocked_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + return pKernelDisplay->__nvoc_metadata_ptr->vtable.__kdispStatePreInitLocked__(pGpu, pKernelDisplay); +} + +static inline NV_STATUS kdispStateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + return pKernelDisplay->__nvoc_metadata_ptr->vtable.__kdispStateInitLocked__(pGpu, pKernelDisplay); +} + +static inline void kdispStateDestroy_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + pKernelDisplay->__nvoc_metadata_ptr->vtable.__kdispStateDestroy__(pGpu, pKernelDisplay); +} + +static inline NV_STATUS kdispStateLoad_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 flags) { + return pKernelDisplay->__nvoc_metadata_ptr->vtable.__kdispStateLoad__(pGpu, pKernelDisplay, flags); +} + +static inline NV_STATUS kdispStateUnload_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 flags) { + return pKernelDisplay->__nvoc_metadata_ptr->vtable.__kdispStateUnload__(pGpu, pKernelDisplay, flags); +} + +static inline NvU32 kdispGetPBTargetAperture_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 memAddrSpace, NvU32 cacheSnoop) { + return pKernelDisplay->__kdispGetPBTargetAperture__(pGpu, pKernelDisplay, memAddrSpace, cacheSnoop); +} + +static inline NV_STATUS kdispComputeDpModeSettings_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 headIndex, DPMODESETDATA *pDpModesetData, DPIMPINFO *dpInfo) { + return pKernelDisplay->__kdispComputeDpModeSettings__(pGpu, pKernelDisplay, headIndex, pDpModesetData, dpInfo); +} + +static inline void kdispInitMissing_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate) { + pEngstate->__nvoc_metadata_ptr->vtable.__kdispInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS kdispStatePreInitUnlocked_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__kdispStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kdispStateInitUnlocked_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__kdispStateInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kdispStatePreLoad_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__kdispStatePreLoad__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS kdispStatePostLoad_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__kdispStatePostLoad__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS kdispStatePreUnload_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__kdispStatePreUnload__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS kdispStatePostUnload_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__kdispStatePostUnload__(pGpu, pEngstate, arg3); +} + +static inline NvBool kdispIsPresent_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__kdispIsPresent__(pGpu, pEngstate); +} + +void kdispServiceLowLatencyIntrs_KERNEL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 arg3, NvU32 arg4, THREAD_STATE_NODE *arg5, NvU32 *pIntrServicedHeadMask, MC_ENGINE_BITVECTOR *pIntrPending); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispServiceLowLatencyIntrs(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 arg3, NvU32 arg4, THREAD_STATE_NODE *arg5, NvU32 *pIntrServicedHeadMask, MC_ENGINE_BITVECTOR *pIntrPending) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispServiceLowLatencyIntrs(pGpu, pKernelDisplay, arg3, arg4, arg5, pIntrServicedHeadMask, pIntrPending) kdispServiceLowLatencyIntrs_KERNEL(pGpu, pKernelDisplay, arg3, arg4, arg5, pIntrServicedHeadMask, pIntrPending) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispServiceLowLatencyIntrs_HAL(pGpu, pKernelDisplay, arg3, arg4, arg5, pIntrServicedHeadMask, pIntrPending) kdispServiceLowLatencyIntrs(pGpu, pKernelDisplay, arg3, arg4, arg5, pIntrServicedHeadMask, pIntrPending) + +NV_STATUS kdispConstructInstMem_IMPL(struct KernelDisplay *pKernelDisplay); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispConstructInstMem(struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispConstructInstMem(pKernelDisplay) kdispConstructInstMem_IMPL(pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispConstructInstMem_HAL(pKernelDisplay) kdispConstructInstMem(pKernelDisplay) + +void kdispDestructInstMem_IMPL(struct KernelDisplay *pKernelDisplay); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispDestructInstMem(struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispDestructInstMem(pKernelDisplay) kdispDestructInstMem_IMPL(pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispDestructInstMem_HAL(pKernelDisplay) kdispDestructInstMem(pKernelDisplay) + +NV_STATUS kdispSelectClass_v03_00_KERNEL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 swClass); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispSelectClass(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 swClass) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispSelectClass(pGpu, pKernelDisplay, swClass) kdispSelectClass_v03_00_KERNEL(pGpu, pKernelDisplay, swClass) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispSelectClass_HAL(pGpu, pKernelDisplay, swClass) kdispSelectClass(pGpu, pKernelDisplay, swClass) + +NvS32 kdispGetBaseOffset_v04_02(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NvS32 kdispGetBaseOffset(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return 0; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispGetBaseOffset(pGpu, pKernelDisplay) kdispGetBaseOffset_v04_02(pGpu, pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispGetBaseOffset_HAL(pGpu, pKernelDisplay) kdispGetBaseOffset(pGpu, pKernelDisplay) + +NV_STATUS kdispGetChannelNum_v03_00(struct KernelDisplay *pKernelDisplay, DISPCHNCLASS channelClass, NvU32 channelInstance, NvU32 *pChannelNum); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispGetChannelNum(struct KernelDisplay *pKernelDisplay, DISPCHNCLASS channelClass, NvU32 channelInstance, NvU32 *pChannelNum) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispGetChannelNum(pKernelDisplay, channelClass, channelInstance, pChannelNum) kdispGetChannelNum_v03_00(pKernelDisplay, channelClass, channelInstance, pChannelNum) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispGetChannelNum_HAL(pKernelDisplay, channelClass, channelInstance, pChannelNum) kdispGetChannelNum(pKernelDisplay, channelClass, channelInstance, pChannelNum) + +void kdispGetDisplayCapsBaseAndSize_v03_00(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *pOffset, NvU32 *pSize); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispGetDisplayCapsBaseAndSize(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *pOffset, NvU32 *pSize) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispGetDisplayCapsBaseAndSize(pGpu, pKernelDisplay, pOffset, pSize) kdispGetDisplayCapsBaseAndSize_v03_00(pGpu, pKernelDisplay, pOffset, pSize) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispGetDisplayCapsBaseAndSize_HAL(pGpu, pKernelDisplay, pOffset, pSize) kdispGetDisplayCapsBaseAndSize(pGpu, pKernelDisplay, pOffset, pSize) + +void kdispGetDisplaySfUserBaseAndSize_v03_00(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *pOffset, NvU32 *pSize); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispGetDisplaySfUserBaseAndSize(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *pOffset, NvU32 *pSize) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispGetDisplaySfUserBaseAndSize(pGpu, pKernelDisplay, pOffset, pSize) kdispGetDisplaySfUserBaseAndSize_v03_00(pGpu, pKernelDisplay, pOffset, pSize) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispGetDisplaySfUserBaseAndSize_HAL(pGpu, pKernelDisplay, pOffset, pSize) kdispGetDisplaySfUserBaseAndSize(pGpu, pKernelDisplay, pOffset, pSize) + +NV_STATUS kdispGetDisplayChannelUserBaseAndSize_v03_00(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, DISPCHNCLASS channelClass, NvU32 channelInstance, NvU32 *pOffset, NvU32 *pSize); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispGetDisplayChannelUserBaseAndSize(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, DISPCHNCLASS channelClass, NvU32 channelInstance, NvU32 *pOffset, NvU32 *pSize) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispGetDisplayChannelUserBaseAndSize(pGpu, pKernelDisplay, channelClass, channelInstance, pOffset, pSize) kdispGetDisplayChannelUserBaseAndSize_v03_00(pGpu, pKernelDisplay, channelClass, channelInstance, pOffset, pSize) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispGetDisplayChannelUserBaseAndSize_HAL(pGpu, pKernelDisplay, channelClass, channelInstance, pOffset, pSize) kdispGetDisplayChannelUserBaseAndSize(pGpu, pKernelDisplay, channelClass, channelInstance, pOffset, pSize) + +NV_STATUS kdispImportImpData_IMPL(struct KernelDisplay *pKernelDisplay); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispImportImpData(struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispImportImpData(pKernelDisplay) kdispImportImpData_IMPL(pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispImportImpData_HAL(pKernelDisplay) kdispImportImpData(pKernelDisplay) + +NV_STATUS kdispArbAndAllocDisplayBandwidth_v04_02(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, enum DISPLAY_ICC_BW_CLIENT iccBwClient, NvU32 minRequiredIsoBandwidthKBPS, NvU32 minRequiredFloorBandwidthKBPS); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispArbAndAllocDisplayBandwidth(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, enum DISPLAY_ICC_BW_CLIENT iccBwClient, NvU32 minRequiredIsoBandwidthKBPS, NvU32 minRequiredFloorBandwidthKBPS) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispArbAndAllocDisplayBandwidth(pGpu, pKernelDisplay, iccBwClient, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) kdispArbAndAllocDisplayBandwidth_v04_02(pGpu, pKernelDisplay, iccBwClient, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispArbAndAllocDisplayBandwidth_HAL(pGpu, pKernelDisplay, iccBwClient, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) kdispArbAndAllocDisplayBandwidth(pGpu, pKernelDisplay, iccBwClient, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) + +NV_STATUS kdispSetPushBufferParamsToPhysical_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel, NvHandle hObjectBuffer, struct ContextDma *pBufferContextDma, NvU32 hClass, NvU32 channelInstance, DISPCHNCLASS internalDispChnClass, ChannelPBSize channelPBSize, NvU32 subDeviceId); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispSetPushBufferParamsToPhysical(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel, NvHandle hObjectBuffer, struct ContextDma *pBufferContextDma, NvU32 hClass, NvU32 channelInstance, DISPCHNCLASS internalDispChnClass, ChannelPBSize channelPBSize, NvU32 subDeviceId) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispSetPushBufferParamsToPhysical(pGpu, pKernelDisplay, pDispChannel, hObjectBuffer, pBufferContextDma, hClass, channelInstance, internalDispChnClass, channelPBSize, subDeviceId) kdispSetPushBufferParamsToPhysical_IMPL(pGpu, pKernelDisplay, pDispChannel, hObjectBuffer, pBufferContextDma, hClass, channelInstance, internalDispChnClass, channelPBSize, subDeviceId) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispSetPushBufferParamsToPhysical_HAL(pGpu, pKernelDisplay, pDispChannel, hObjectBuffer, pBufferContextDma, hClass, channelInstance, internalDispChnClass, channelPBSize, subDeviceId) kdispSetPushBufferParamsToPhysical(pGpu, pKernelDisplay, pDispChannel, hObjectBuffer, pBufferContextDma, hClass, channelInstance, internalDispChnClass, channelPBSize, subDeviceId) + +static inline NV_STATUS kdispAcquireDispChannelHw_56cd7a(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel, NvU32 channelInstance, NvHandle hObjectBuffer, NvU32 initialGetPutOffset, NvBool allowGrabWithinSameClient, NvBool connectPbAtGrab) { + return NV_OK; +} + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispAcquireDispChannelHw(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel, NvU32 channelInstance, NvHandle hObjectBuffer, NvU32 initialGetPutOffset, NvBool allowGrabWithinSameClient, NvBool connectPbAtGrab) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispAcquireDispChannelHw(pKernelDisplay, pDispChannel, channelInstance, hObjectBuffer, initialGetPutOffset, allowGrabWithinSameClient, connectPbAtGrab) kdispAcquireDispChannelHw_56cd7a(pKernelDisplay, pDispChannel, channelInstance, hObjectBuffer, initialGetPutOffset, allowGrabWithinSameClient, connectPbAtGrab) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispAcquireDispChannelHw_HAL(pKernelDisplay, pDispChannel, channelInstance, hObjectBuffer, initialGetPutOffset, allowGrabWithinSameClient, connectPbAtGrab) kdispAcquireDispChannelHw(pKernelDisplay, pDispChannel, channelInstance, hObjectBuffer, initialGetPutOffset, allowGrabWithinSameClient, connectPbAtGrab) + +static inline NV_STATUS kdispReleaseDispChannelHw_56cd7a(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel) { + return NV_OK; +} + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispReleaseDispChannelHw(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispReleaseDispChannelHw(pKernelDisplay, pDispChannel) kdispReleaseDispChannelHw_56cd7a(pKernelDisplay, pDispChannel) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispReleaseDispChannelHw_HAL(pKernelDisplay, pDispChannel) kdispReleaseDispChannelHw(pKernelDisplay, pDispChannel) + +NV_STATUS kdispMapDispChannel_IMPL(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispMapDispChannel(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispMapDispChannel(pKernelDisplay, pDispChannel) kdispMapDispChannel_IMPL(pKernelDisplay, pDispChannel) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispMapDispChannel_HAL(pKernelDisplay, pDispChannel) kdispMapDispChannel(pKernelDisplay, pDispChannel) + +void kdispUnbindUnmapDispChannel_IMPL(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispUnbindUnmapDispChannel(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispUnbindUnmapDispChannel(pKernelDisplay, pDispChannel) kdispUnbindUnmapDispChannel_IMPL(pKernelDisplay, pDispChannel) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispUnbindUnmapDispChannel_HAL(pKernelDisplay, pDispChannel) kdispUnbindUnmapDispChannel(pKernelDisplay, pDispChannel) + +NV_STATUS kdispRegisterRgLineCallback_IMPL(struct KernelDisplay *pKernelDisplay, struct RgLineCallback *pRgLineCallback, NvU32 head, NvU32 rgIntrLine, NvBool bEnable); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispRegisterRgLineCallback(struct KernelDisplay *pKernelDisplay, struct RgLineCallback *pRgLineCallback, NvU32 head, NvU32 rgIntrLine, NvBool bEnable) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispRegisterRgLineCallback(pKernelDisplay, pRgLineCallback, head, rgIntrLine, bEnable) kdispRegisterRgLineCallback_IMPL(pKernelDisplay, pRgLineCallback, head, rgIntrLine, bEnable) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispRegisterRgLineCallback_HAL(pKernelDisplay, pRgLineCallback, head, rgIntrLine, bEnable) kdispRegisterRgLineCallback(pKernelDisplay, pRgLineCallback, head, rgIntrLine, bEnable) + +void kdispInvokeRgLineCallback_KERNEL(struct KernelDisplay *pKernelDisplay, NvU32 head, NvU32 rgIntrLine, NvBool bIsIrqlIsr); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispInvokeRgLineCallback(struct KernelDisplay *pKernelDisplay, NvU32 head, NvU32 rgIntrLine, NvBool bIsIrqlIsr) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispInvokeRgLineCallback(pKernelDisplay, head, rgIntrLine, bIsIrqlIsr) kdispInvokeRgLineCallback_KERNEL(pKernelDisplay, head, rgIntrLine, bIsIrqlIsr) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispInvokeRgLineCallback_HAL(pKernelDisplay, head, rgIntrLine, bIsIrqlIsr) kdispInvokeRgLineCallback(pKernelDisplay, head, rgIntrLine, bIsIrqlIsr) + +NvU32 kdispReadPendingVblank_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, THREAD_STATE_NODE *arg3); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NvU32 kdispReadPendingVblank(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, THREAD_STATE_NODE *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return 0; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispReadPendingVblank(pGpu, pKernelDisplay, arg3) kdispReadPendingVblank_IMPL(pGpu, pKernelDisplay, arg3) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispReadPendingVblank_HAL(pGpu, pKernelDisplay, arg3) kdispReadPendingVblank(pGpu, pKernelDisplay, arg3) + +static inline NvBool kdispGetVgaWorkspaceBase_72a2e1(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU64 *pOffset) { + NV_ASSERT_PRECOMP(0); + return NV_FALSE; +} + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NvBool kdispGetVgaWorkspaceBase(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU64 *pOffset) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispGetVgaWorkspaceBase(pGpu, pKernelDisplay, pOffset) kdispGetVgaWorkspaceBase_72a2e1(pGpu, pKernelDisplay, pOffset) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispGetVgaWorkspaceBase_HAL(pGpu, pKernelDisplay, pOffset) kdispGetVgaWorkspaceBase(pGpu, pKernelDisplay, pOffset) + +void kdispInvokeDisplayModesetCallback_KERNEL(struct KernelDisplay *pKernelDisplay, NvBool bModesetStart, NvU32 minRequiredIsoBandwidthKBPS, NvU32 minRequiredFloorBandwidthKBPS); + +void kdispInvokeDisplayModesetCallback_PHYSICAL(struct KernelDisplay *pKernelDisplay, NvBool bModesetStart, NvU32 minRequiredIsoBandwidthKBPS, NvU32 minRequiredFloorBandwidthKBPS); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispInvokeDisplayModesetCallback(struct KernelDisplay *pKernelDisplay, NvBool bModesetStart, NvU32 minRequiredIsoBandwidthKBPS, NvU32 minRequiredFloorBandwidthKBPS) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispInvokeDisplayModesetCallback(pKernelDisplay, bModesetStart, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) kdispInvokeDisplayModesetCallback_KERNEL(pKernelDisplay, bModesetStart, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispInvokeDisplayModesetCallback_HAL(pKernelDisplay, bModesetStart, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) kdispInvokeDisplayModesetCallback(pKernelDisplay, bModesetStart, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) + +NV_STATUS kdispReadRgLineCountAndFrameCount_v03_00_PHYSICAL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 head, NvU32 *pLineCount, NvU32 *pFrameCount); + +NV_STATUS kdispReadRgLineCountAndFrameCount_v03_00_KERNEL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 head, NvU32 *pLineCount, NvU32 *pFrameCount); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispReadRgLineCountAndFrameCount(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 head, NvU32 *pLineCount, NvU32 *pFrameCount) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispReadRgLineCountAndFrameCount(pGpu, pKernelDisplay, head, pLineCount, pFrameCount) kdispReadRgLineCountAndFrameCount_v03_00_KERNEL(pGpu, pKernelDisplay, head, pLineCount, pFrameCount) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispReadRgLineCountAndFrameCount_HAL(pGpu, pKernelDisplay, head, pLineCount, pFrameCount) kdispReadRgLineCountAndFrameCount(pGpu, pKernelDisplay, head, pLineCount, pFrameCount) + +static inline NV_STATUS kdispDsmMxmMxcbExecuteAcpi_92bfc3(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, void *pInOutData, NvU16 *outDataSize) { + NV_ASSERT_PRECOMP(0); + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispDsmMxmMxcbExecuteAcpi(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, void *pInOutData, NvU16 *outDataSize) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispDsmMxmMxcbExecuteAcpi(pGpu, pKernelDisplay, pInOutData, outDataSize) kdispDsmMxmMxcbExecuteAcpi_92bfc3(pGpu, pKernelDisplay, pInOutData, outDataSize) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispDsmMxmMxcbExecuteAcpi_HAL(pGpu, pKernelDisplay, pInOutData, outDataSize) kdispDsmMxmMxcbExecuteAcpi(pGpu, pKernelDisplay, pInOutData, outDataSize) + +static inline NV_STATUS kdispInitBrightcStateLoad_56cd7a(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + return NV_OK; +} + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispInitBrightcStateLoad(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispInitBrightcStateLoad(pGpu, pKernelDisplay) kdispInitBrightcStateLoad_56cd7a(pGpu, pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispInitBrightcStateLoad_HAL(pGpu, pKernelDisplay) kdispInitBrightcStateLoad(pGpu, pKernelDisplay) + +static inline NV_STATUS kdispSetupAcpiEdid_56cd7a(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + return NV_OK; +} + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispSetupAcpiEdid(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispSetupAcpiEdid(pGpu, pKernelDisplay) kdispSetupAcpiEdid_56cd7a(pGpu, pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispSetupAcpiEdid_HAL(pGpu, pKernelDisplay) kdispSetupAcpiEdid(pGpu, pKernelDisplay) + +NV_STATUS kdispGetRgScanLock_v02_01(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 head0, struct OBJGPU *pPeerGpu, NvU32 head1, NvBool *pMasterScanLock, NvU32 *pMasterScanLockPin, NvBool *pSlaveScanLock, NvU32 *pSlaveScanLockPin); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispGetRgScanLock(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 head0, struct OBJGPU *pPeerGpu, NvU32 head1, NvBool *pMasterScanLock, NvU32 *pMasterScanLockPin, NvBool *pSlaveScanLock, NvU32 *pSlaveScanLockPin) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispGetRgScanLock(pGpu, pKernelDisplay, head0, pPeerGpu, head1, pMasterScanLock, pMasterScanLockPin, pSlaveScanLock, pSlaveScanLockPin) kdispGetRgScanLock_v02_01(pGpu, pKernelDisplay, head0, pPeerGpu, head1, pMasterScanLock, pMasterScanLockPin, pSlaveScanLock, pSlaveScanLockPin) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispGetRgScanLock_HAL(pGpu, pKernelDisplay, head0, pPeerGpu, head1, pMasterScanLock, pMasterScanLockPin, pSlaveScanLock, pSlaveScanLockPin) kdispGetRgScanLock(pGpu, pKernelDisplay, head0, pPeerGpu, head1, pMasterScanLock, pMasterScanLockPin, pSlaveScanLock, pSlaveScanLockPin) + +NV_STATUS kdispDetectSliLink_v04_00(struct KernelDisplay *pKernelDisplay, struct OBJGPU *pParentGpu, struct OBJGPU *pChildGpu, NvU32 ParentDrPort, NvU32 ChildDrPort); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispDetectSliLink(struct KernelDisplay *pKernelDisplay, struct OBJGPU *pParentGpu, struct OBJGPU *pChildGpu, NvU32 ParentDrPort, NvU32 ChildDrPort) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispDetectSliLink(pKernelDisplay, pParentGpu, pChildGpu, ParentDrPort, ChildDrPort) kdispDetectSliLink_v04_00(pKernelDisplay, pParentGpu, pChildGpu, ParentDrPort, ChildDrPort) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispDetectSliLink_HAL(pKernelDisplay, pParentGpu, pChildGpu, ParentDrPort, ChildDrPort) kdispDetectSliLink(pKernelDisplay, pParentGpu, pChildGpu, ParentDrPort, ChildDrPort) + +NV_STATUS kdispReadAwakenChannelNumMask_v03_00(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *arg3, DISPCHNCLASS arg4, THREAD_STATE_NODE *arg5); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispReadAwakenChannelNumMask(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *arg3, DISPCHNCLASS arg4, THREAD_STATE_NODE *arg5) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispReadAwakenChannelNumMask(pGpu, pKernelDisplay, arg3, arg4, arg5) kdispReadAwakenChannelNumMask_v03_00(pGpu, pKernelDisplay, arg3, arg4, arg5) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispReadAwakenChannelNumMask_HAL(pGpu, pKernelDisplay, arg3, arg4, arg5) kdispReadAwakenChannelNumMask(pGpu, pKernelDisplay, arg3, arg4, arg5) + +NV_STATUS kdispAllocateCommonHandle_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispAllocateCommonHandle(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispAllocateCommonHandle(pGpu, pKernelDisplay) kdispAllocateCommonHandle_IMPL(pGpu, pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispAllocateCommonHandle_HAL(pGpu, pKernelDisplay) kdispAllocateCommonHandle(pGpu, pKernelDisplay) + +void kdispDestroyCommonHandle_IMPL(struct KernelDisplay *pKernelDisplay); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispDestroyCommonHandle(struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispDestroyCommonHandle(pKernelDisplay) kdispDestroyCommonHandle_IMPL(pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispDestroyCommonHandle_HAL(pKernelDisplay) kdispDestroyCommonHandle(pKernelDisplay) + +static inline NV_STATUS kdispAllocateSharedMem_46f6a7(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispAllocateSharedMem(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispAllocateSharedMem(pGpu, pKernelDisplay) kdispAllocateSharedMem_46f6a7(pGpu, pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispAllocateSharedMem_HAL(pGpu, pKernelDisplay) kdispAllocateSharedMem(pGpu, pKernelDisplay) + +void kdispFreeSharedMem_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispFreeSharedMem(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispFreeSharedMem(pGpu, pKernelDisplay) kdispFreeSharedMem_IMPL(pGpu, pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispFreeSharedMem_HAL(pGpu, pKernelDisplay) kdispFreeSharedMem(pGpu, pKernelDisplay) + +NvBool kdispIsDisplayConnected_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NvBool kdispIsDisplayConnected(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispIsDisplayConnected(pGpu, pKernelDisplay) kdispIsDisplayConnected_IMPL(pGpu, pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispIsDisplayConnected_HAL(pGpu, pKernelDisplay) kdispIsDisplayConnected(pGpu, pKernelDisplay) + +NvU32 kdispGetSupportedDisplayMask_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NvU32 kdispGetSupportedDisplayMask(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return 0; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispGetSupportedDisplayMask(pGpu, pKernelDisplay) kdispGetSupportedDisplayMask_IMPL(pGpu, pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispGetSupportedDisplayMask_HAL(pGpu, pKernelDisplay) kdispGetSupportedDisplayMask(pGpu, pKernelDisplay) + +static inline void kdispUpdatePdbAfterIpHalInit_b3696a(struct KernelDisplay *pKernelDisplay) { + return; +} + + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispUpdatePdbAfterIpHalInit(struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispUpdatePdbAfterIpHalInit(pKernelDisplay) kdispUpdatePdbAfterIpHalInit_b3696a(pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispUpdatePdbAfterIpHalInit_HAL(pKernelDisplay) kdispUpdatePdbAfterIpHalInit(pKernelDisplay) + +NvBool kdispReadPendingWinSemIntr_v04_01(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, THREAD_STATE_NODE *arg3, NvU32 *arg4); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NvBool kdispReadPendingWinSemIntr(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, THREAD_STATE_NODE *arg3, NvU32 *arg4) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispReadPendingWinSemIntr(pGpu, pKernelDisplay, arg3, arg4) kdispReadPendingWinSemIntr_v04_01(pGpu, pKernelDisplay, arg3, arg4) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispReadPendingWinSemIntr_HAL(pGpu, pKernelDisplay, arg3, arg4) kdispReadPendingWinSemIntr(pGpu, pKernelDisplay, arg3, arg4) + +void kdispHandleWinSemEvt_v04_01(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, THREAD_STATE_NODE *arg3); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispHandleWinSemEvt(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, THREAD_STATE_NODE *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispHandleWinSemEvt(pGpu, pKernelDisplay, arg3) kdispHandleWinSemEvt_v04_01(pGpu, pKernelDisplay, arg3) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispHandleWinSemEvt_HAL(pGpu, pKernelDisplay, arg3) kdispHandleWinSemEvt(pGpu, pKernelDisplay, arg3) + +static inline void kdispIntrRetrigger_b3696a(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 index, THREAD_STATE_NODE *arg4) { + return; +} + + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispIntrRetrigger(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 index, THREAD_STATE_NODE *arg4) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispIntrRetrigger(pGpu, pKernelDisplay, index, arg4) kdispIntrRetrigger_b3696a(pGpu, pKernelDisplay, index, arg4) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispIntrRetrigger_HAL(pGpu, pKernelDisplay, index, arg4) kdispIntrRetrigger(pGpu, pKernelDisplay, index, arg4) + +NvU32 kdispServiceAwakenIntr_v03_00(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, THREAD_STATE_NODE *pThreadState); + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NvU32 kdispServiceAwakenIntr(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return 0; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispServiceAwakenIntr(pGpu, pKernelDisplay, pThreadState) kdispServiceAwakenIntr_v03_00(pGpu, pKernelDisplay, pThreadState) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispServiceAwakenIntr_HAL(pGpu, pKernelDisplay, pThreadState) kdispServiceAwakenIntr(pGpu, pKernelDisplay, pThreadState) + +static inline void kdispSetChannelTrashAndAbortAccel_b3696a(struct OBJGPU *arg1, struct KernelDisplay *arg2, NvU32 arg3, NvU32 arg4, NvBool arg5) { + return; +} + + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispSetChannelTrashAndAbortAccel(struct OBJGPU *arg1, struct KernelDisplay *arg2, NvU32 arg3, NvU32 arg4, NvBool arg5) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispSetChannelTrashAndAbortAccel(arg1, arg2, arg3, arg4, arg5) kdispSetChannelTrashAndAbortAccel_b3696a(arg1, arg2, arg3, arg4, arg5) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispSetChannelTrashAndAbortAccel_HAL(arg1, arg2, arg3, arg4, arg5) kdispSetChannelTrashAndAbortAccel(arg1, arg2, arg3, arg4, arg5) + +static inline NvBool kdispIsChannelIdle_3dd2c9(struct OBJGPU *arg1, struct KernelDisplay *arg2, NvU32 arg3, NvU32 arg4) { + return NV_FALSE; +} + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NvBool kdispIsChannelIdle(struct OBJGPU *arg1, struct KernelDisplay *arg2, NvU32 arg3, NvU32 arg4) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispIsChannelIdle(arg1, arg2, arg3, arg4) kdispIsChannelIdle_3dd2c9(arg1, arg2, arg3, arg4) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispIsChannelIdle_HAL(arg1, arg2, arg3, arg4) kdispIsChannelIdle(arg1, arg2, arg3, arg4) + +static inline void kdispApplyChannelConnectDisconnect_b3696a(struct OBJGPU *arg1, struct KernelDisplay *arg2, NvU32 arg3, NvU32 arg4, NvU32 arg5) { + return; +} + + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispApplyChannelConnectDisconnect(struct OBJGPU *arg1, struct KernelDisplay *arg2, NvU32 arg3, NvU32 arg4, NvU32 arg5) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispApplyChannelConnectDisconnect(arg1, arg2, arg3, arg4, arg5) kdispApplyChannelConnectDisconnect_b3696a(arg1, arg2, arg3, arg4, arg5) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispApplyChannelConnectDisconnect_HAL(arg1, arg2, arg3, arg4, arg5) kdispApplyChannelConnectDisconnect(arg1, arg2, arg3, arg4, arg5) + +static inline NvBool kdispIsChannelAllocatedHw_3dd2c9(struct OBJGPU *arg1, struct KernelDisplay *arg2, NvU32 arg3, NvU32 arg4) { + return NV_FALSE; +} + + +#ifdef __nvoc_kern_disp_h_disabled +static inline NvBool kdispIsChannelAllocatedHw(struct OBJGPU *arg1, struct KernelDisplay *arg2, NvU32 arg3, NvU32 arg4) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispIsChannelAllocatedHw(arg1, arg2, arg3, arg4) kdispIsChannelAllocatedHw_3dd2c9(arg1, arg2, arg3, arg4) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispIsChannelAllocatedHw_HAL(arg1, arg2, arg3, arg4) kdispIsChannelAllocatedHw(arg1, arg2, arg3, arg4) + +NV_STATUS kdispConstructEngine_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, ENGDESCRIPTOR engDesc); + +NV_STATUS kdispStatePreInitLocked_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay); + +NV_STATUS kdispStateInitLocked_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay); + +void kdispStateDestroy_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay); + +NV_STATUS kdispStateLoad_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 flags); + +NV_STATUS kdispStateUnload_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 flags); + +NvU32 kdispGetPBTargetAperture_v03_00(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 memAddrSpace, NvU32 cacheSnoop); + +NvU32 kdispGetPBTargetAperture_v05_01(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 memAddrSpace, NvU32 cacheSnoop); + +NV_STATUS kdispComputeDpModeSettings_v02_04(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 headIndex, DPMODESETDATA *pDpModesetData, DPIMPINFO *dpInfo); + +NV_STATUS kdispComputeDpModeSettings_v05_01(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 headIndex, DPMODESETDATA *pDpModesetData, DPIMPINFO *dpInfo); + +void kdispDestruct_IMPL(struct KernelDisplay *pKernelDisplay); + +#define __nvoc_kdispDestruct(pKernelDisplay) kdispDestruct_IMPL(pKernelDisplay) +NV_STATUS kdispConstructKhead_IMPL(struct KernelDisplay *pKernelDisplay); + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispConstructKhead(struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispConstructKhead(pKernelDisplay) kdispConstructKhead_IMPL(pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +void kdispDestructKhead_IMPL(struct KernelDisplay *pKernelDisplay); + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispDestructKhead(struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispDestructKhead(pKernelDisplay) kdispDestructKhead_IMPL(pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +NV_STATUS kdispGetIntChnClsForHwCls_IMPL(struct KernelDisplay *pKernelDisplay, NvU32 hwClass, DISPCHNCLASS *pDispChnClass); + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispGetIntChnClsForHwCls(struct KernelDisplay *pKernelDisplay, NvU32 hwClass, DISPCHNCLASS *pDispChnClass) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispGetIntChnClsForHwCls(pKernelDisplay, hwClass, pDispChnClass) kdispGetIntChnClsForHwCls_IMPL(pKernelDisplay, hwClass, pDispChnClass) +#endif //__nvoc_kern_disp_h_disabled + +void kdispNotifyCommonEvent_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 notifyIndex, void *pNotifyParams); + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispNotifyCommonEvent(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 notifyIndex, void *pNotifyParams) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispNotifyCommonEvent(pGpu, pKernelDisplay, notifyIndex, pNotifyParams) kdispNotifyCommonEvent_IMPL(pGpu, pKernelDisplay, notifyIndex, pNotifyParams) +#endif //__nvoc_kern_disp_h_disabled + +void kdispNotifyEvent_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize, NvV32 info32, NvV16 info16); + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispNotifyEvent(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize, NvV32 info32, NvV16 info16) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispNotifyEvent(pGpu, pKernelDisplay, notifyIndex, pNotifyParams, notifyParamsSize, info32, info16) kdispNotifyEvent_IMPL(pGpu, pKernelDisplay, notifyIndex, pNotifyParams, notifyParamsSize, info32, info16) +#endif //__nvoc_kern_disp_h_disabled + +NV_STATUS kdispOptimizePerFrameOsCallbacks_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvBool clearIntr, THREAD_STATE_NODE *pThreadState, NvU32 *intrServicedHeadMask, MC_ENGINE_BITVECTOR *intrPending); + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispOptimizePerFrameOsCallbacks(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvBool clearIntr, THREAD_STATE_NODE *pThreadState, NvU32 *intrServicedHeadMask, MC_ENGINE_BITVECTOR *intrPending) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispOptimizePerFrameOsCallbacks(pGpu, pKernelDisplay, clearIntr, pThreadState, intrServicedHeadMask, intrPending) kdispOptimizePerFrameOsCallbacks_IMPL(pGpu, pKernelDisplay, clearIntr, pThreadState, intrServicedHeadMask, intrPending) +#endif //__nvoc_kern_disp_h_disabled + +NV_STATUS kdispSetupVBlank_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, void *pProc, void *pParm1, void *pParm2, NvU32 Head, void *pParm3); + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispSetupVBlank(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, void *pProc, void *pParm1, void *pParm2, NvU32 Head, void *pParm3) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispSetupVBlank(pGpu, pKernelDisplay, pProc, pParm1, pParm2, Head, pParm3) kdispSetupVBlank_IMPL(pGpu, pKernelDisplay, pProc, pParm1, pParm2, Head, pParm3) +#endif //__nvoc_kern_disp_h_disabled + +void kdispDestroyVBlank_IMPL(struct KernelDisplay *pKernelDisplay); + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispDestroyVBlank(struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispDestroyVBlank(pKernelDisplay) kdispDestroyVBlank_IMPL(pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +void kdispHandleAggressiveVblank_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, THREAD_STATE_NODE *arg3, MC_ENGINE_BITVECTOR *arg4); + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispHandleAggressiveVblank(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, THREAD_STATE_NODE *arg3, MC_ENGINE_BITVECTOR *arg4) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispHandleAggressiveVblank(pGpu, pKernelDisplay, arg3, arg4) kdispHandleAggressiveVblank_IMPL(pGpu, pKernelDisplay, arg3, arg4) +#endif //__nvoc_kern_disp_h_disabled + +void kdispApplyAggressiveVblankHandlingWar_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay); + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispApplyAggressiveVblankHandlingWar(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispApplyAggressiveVblankHandlingWar(pGpu, pKernelDisplay) kdispApplyAggressiveVblankHandlingWar_IMPL(pGpu, pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +void kdispSetWarPurgeSatellitesOnCoreFree_IMPL(struct KernelDisplay *pKernelDisplay, NvBool value); + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispSetWarPurgeSatellitesOnCoreFree(struct KernelDisplay *pKernelDisplay, NvBool value) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispSetWarPurgeSatellitesOnCoreFree(pKernelDisplay, value) kdispSetWarPurgeSatellitesOnCoreFree_IMPL(pKernelDisplay, value) +#endif //__nvoc_kern_disp_h_disabled + +#undef PRIVATE_FIELD + + +/*! + * Custom roll a conditional acquire spinlock. + * To be replaced when nvport supports drop-in replacement + */ +void kdispAcquireLowLatencyLock(volatile NvS32 *pLowLatencyLock); +NvBool kdispAcquireLowLatencyLockConditional(volatile NvS32 *pLowLatencyLock); +void kdispReleaseLowLatencyLock(volatile NvS32 *pLowLatencyLock); + +void +dispdeviceFillVgaSavedDisplayState( struct OBJGPU *pGpu, + NvU64 vgaAddr, + NvU8 vgaMemType, + NvBool vgaValid, + NvU64 workspaceAddr, + NvU8 workspaceMemType, + NvBool workspaceValid, + NvBool baseValid, + NvBool workspaceBaseValid +); + +/*! PushBuffer Target Aperture Types */ +typedef enum +{ + IOVA, + PHYS_NVM, + PHYS_PCI, + PHYS_PCI_COHERENT +} PBTARGETAPERTURE; + +static NV_INLINE struct KernelHead* +kdispGetHead +( + struct KernelDisplay *pKernelDisplay, + NvU32 head +) +{ + if (head >= OBJ_MAX_HEADS) + { + return NULL; + } + + return pKernelDisplay->pKernelHead[head]; +} + +static NV_INLINE NvU32 +kdispGetNumHeads(struct KernelDisplay *pKernelDisplay) +{ + NV_ASSERT(pKernelDisplay != NULL); + return pKernelDisplay->numHeads; +} + +static NV_INLINE NvU32 +kdispGetDeferredVblankHeadMask(struct KernelDisplay *pKernelDisplay) +{ + return pKernelDisplay->deferredVblankHeadMask; +} + +static NV_INLINE void +kdispSetDeferredVblankHeadMask(struct KernelDisplay *pKernelDisplay, NvU32 vblankHeadMask) +{ + pKernelDisplay->deferredVblankHeadMask = vblankHeadMask; +} + +static NV_INLINE NvHandle +kdispGetInternalClientHandle(struct KernelDisplay *pKernelDisplay) +{ + return pKernelDisplay->hInternalClient; +} + +static NV_INLINE NvHandle +kdispGetDispCommonHandle(struct KernelDisplay *pKernelDisplay) +{ + return pKernelDisplay->hDispCommonHandle; +} + +#endif // KERN_DISP_H + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_KERN_DISP_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_head_nvoc.c b/src/nvidia/generated/g_kernel_head_nvoc.c new file mode 100644 index 0000000..9292ba3 --- /dev/null +++ b/src/nvidia/generated/g_kernel_head_nvoc.c @@ -0,0 +1,277 @@ +#define NVOC_KERNEL_HEAD_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_head_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x0145e6 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelHead; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +// Forward declarations for KernelHead +void __nvoc_init__Object(Object*); +void __nvoc_init__KernelHead(KernelHead*, RmHalspecOwner *pRmhalspecowner, GpuHalspecOwner *pGpuhalspecowner); +void __nvoc_init_funcTable_KernelHead(KernelHead*, RmHalspecOwner *pRmhalspecowner, GpuHalspecOwner *pGpuhalspecowner); +NV_STATUS __nvoc_ctor_KernelHead(KernelHead*, RmHalspecOwner *pRmhalspecowner, GpuHalspecOwner *pGpuhalspecowner); +void __nvoc_init_dataField_KernelHead(KernelHead*, RmHalspecOwner *pRmhalspecowner, GpuHalspecOwner *pGpuhalspecowner); +void __nvoc_dtor_KernelHead(KernelHead*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__KernelHead; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__KernelHead; + +// Down-thunk(s) to bridge KernelHead methods from ancestors (if any) + +// Up-thunk(s) to bridge KernelHead methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelHead = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelHead), + /*classId=*/ classId(KernelHead), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelHead", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelHead, + /*pCastInfo=*/ &__nvoc_castinfo__KernelHead, + /*pExportInfo=*/ &__nvoc_export_info__KernelHead +}; + + +// Metadata with per-class RTTI with ancestor(s) +static const struct NVOC_METADATA__KernelHead __nvoc_metadata__KernelHead = { + .rtti.pClassDef = &__nvoc_class_def_KernelHead, // (khead) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelHead, + .rtti.offset = 0, + .metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super + .metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Object.rtti.offset = NV_OFFSETOF(KernelHead, __nvoc_base_Object), +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__KernelHead = { + .numRelatives = 2, + .relatives = { + &__nvoc_metadata__KernelHead.rtti, // [0]: (khead) this + &__nvoc_metadata__KernelHead.metadata__Object.rtti, // [1]: (obj) super + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__KernelHead = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_KernelHead(KernelHead *pThis) { + __nvoc_kheadDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelHead(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner, GpuHalspecOwner *pGpuhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + DispIpHal *dispIpHal = &pRmhalspecowner->dispIpHal; + const unsigned long dispIpHal_HalVarIdx = (unsigned long)dispIpHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pGpuhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(pGpuhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(dispIpHal); + PORT_UNREFERENCED_VARIABLE(dispIpHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_KernelHead(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner, GpuHalspecOwner *pGpuhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_KernelHead_fail_Object; + __nvoc_init_dataField_KernelHead(pThis, pRmhalspecowner, pGpuhalspecowner); + + status = __nvoc_kheadConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_KernelHead_fail__init; + goto __nvoc_ctor_KernelHead_exit; // Success + +__nvoc_ctor_KernelHead_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_KernelHead_fail_Object: +__nvoc_ctor_KernelHead_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_KernelHead_1(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner, GpuHalspecOwner *pGpuhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + DispIpHal *dispIpHal = &pRmhalspecowner->dispIpHal; + const unsigned long dispIpHal_HalVarIdx = (unsigned long)dispIpHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pGpuhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(pGpuhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(dispIpHal); + PORT_UNREFERENCED_VARIABLE(dispIpHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + + // kheadGetLoadVCounter -- halified (2 hals) body + if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00001000UL) )) /* DispIpHal: DISPv0402 */ + { + pThis->__kheadGetLoadVCounter__ = &kheadGetLoadVCounter_v03_00; + } + else + { + pThis->__kheadGetLoadVCounter__ = &kheadGetLoadVCounter_v05_01; + } + + // kheadGetCrashLockCounterV -- halified (2 hals) body + if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00048000UL) )) /* DispIpHal: DISPv0501 | DISPv0504 */ + { + pThis->__kheadGetCrashLockCounterV__ = &kheadGetCrashLockCounterV_v05_01; + } + // default + else + { + pThis->__kheadGetCrashLockCounterV__ = &kheadGetCrashLockCounterV_4a4dee; + } + + // kheadVsyncNotificationOverRgVblankIntr -- halified (2 hals) body + if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00001000UL) )) /* DispIpHal: DISPv0402 */ + { + pThis->__kheadVsyncNotificationOverRgVblankIntr__ = &kheadVsyncNotificationOverRgVblankIntr_b3696a; + } + else + { + pThis->__kheadVsyncNotificationOverRgVblankIntr__ = &kheadVsyncNotificationOverRgVblankIntr_v04_04; + } +} // End __nvoc_init_funcTable_KernelHead_1 with approximately 6 basic block(s). + + +// Initialize vtable(s) for 3 virtual method(s). +void __nvoc_init_funcTable_KernelHead(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner, GpuHalspecOwner *pGpuhalspecowner) { + + // Initialize vtable(s) with 3 per-object function pointer(s). + __nvoc_init_funcTable_KernelHead_1(pThis, pRmhalspecowner, pGpuhalspecowner); +} + +// Initialize newly constructed object. +void __nvoc_init__KernelHead(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner, GpuHalspecOwner *pGpuhalspecowner) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; // (obj) super + pThis->__nvoc_pbase_KernelHead = pThis; // (khead) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Object(&pThis->__nvoc_base_Object); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__KernelHead.metadata__Object; // (obj) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__KernelHead; // (khead) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_KernelHead(pThis, pRmhalspecowner, pGpuhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelHead(KernelHead **ppThis, Dynamic *pParent, NvU32 createFlags) +{ + NV_STATUS status; + Object *pParentObj = NULL; + KernelHead *pThis; + RmHalspecOwner *pRmhalspecowner; + GpuHalspecOwner *pGpuhalspecowner; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(KernelHead), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(KernelHead)); + + pThis->__nvoc_base_Object.createFlags = createFlags; + + // pParent must be a valid object that derives from a halspec owner class. + NV_ASSERT_OR_RETURN(pParent != NULL, NV_ERR_INVALID_ARGUMENT); + + // Link the child into the parent unless flagged not to do so. + if (!(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + if ((pGpuhalspecowner = dynamicCast(pParent, GpuHalspecOwner)) == NULL) + pGpuhalspecowner = objFindAncestorOfType(GpuHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pGpuhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init__KernelHead(pThis, pRmhalspecowner, pGpuhalspecowner); + status = __nvoc_ctor_KernelHead(pThis, pRmhalspecowner, pGpuhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelHead_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_KernelHead_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(KernelHead)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelHead(KernelHead **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelHead(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_head_nvoc.h b/src/nvidia/generated/g_kernel_head_nvoc.h new file mode 100644 index 0000000..ffde139 --- /dev/null +++ b/src/nvidia/generated/g_kernel_head_nvoc.h @@ -0,0 +1,581 @@ + +#ifndef _G_KERNEL_HEAD_NVOC_H_ +#define _G_KERNEL_HEAD_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/**************************** Kernelhead Routines **************************\ +* * +* Kernel head object function Definitions. * +* * +\***************************************************************************/ + +#pragma once +#include "g_kernel_head_nvoc.h" + +#ifndef KERNEL_HEAD_H +#define KERNEL_HEAD_H + +/* ------------------------ Includes --------------------------------------- */ +#include "gpu/eng_state.h" +#include "gpu/disp/vblank_callback/vblank.h" +#include "gpu/gpu_halspec.h" + +/* ------------------------ Types definitions ------------------------------ */ +typedef enum +{ + headIntr_None = 0, + headIntr_Vblank = NVBIT(0), + headIntr_RgUnderflow = NVBIT(1), + headIntr_SdBucketWalkDone = NVBIT(2), + headIntr_RgVblank = NVBIT(3), + headIntr_VactiveSpaceVblank = NVBIT(4), + headIntr_RgVactiveSpaceVblank = NVBIT(5), + headIntr_RgStall = NVBIT(6), + headIntr_LoadV = NVBIT(7), + headIntr_LastData = NVBIT(8), + headIntr_RgLineA = NVBIT(9), + headIntr_RgLineB = NVBIT(10), + headIntr_CrcMismatch = NVBIT(11), + headIntr_SecPolicy = NVBIT(12), + headIntr_DmiLine = NVBIT(13), + headIntr_SfDpOverflow = NVBIT(14), + headIntr_RgSem0 = NVBIT(15), + headIntr_RgSem1 = NVBIT(16), + headIntr_RgSem2 = NVBIT(17), + headIntr_RgSem3 = NVBIT(18), + headIntr_RgSem4 = NVBIT(19), + headIntr_RgSem5 = NVBIT(20), +} HEADINTR, HEADINTRMASK; + +typedef struct +{ + NvU32 tuSize; + NvU32 waterMark; + NvU32 hBlankSym; + NvU32 vBlankSym; + NvU32 minHBlank; + NvU32 twoChannelAudioSymbols; + NvU32 eightChannelAudioSymbols; + NvU64 linkTotalDataRate; + NvBool bEnhancedFraming; + NvU64 effectiveBppxScaler; +} DPIMPINFO; + +#define headIntr_RgSem(i) (headIntr_RgSem0 << i) +#define headIntr_RgSem__SIZE_1 6 + +/* ------------------------ Macros & Defines ------------------------------- */ + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_KERNEL_HEAD_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__KernelHead; +struct NVOC_METADATA__Object; + +struct __nvoc_inner_struc_KernelHead_1__ { + struct { + NvU32 Total; + NvU32 LowLatency; + NvU32 NormLatency; + } Counters; + struct { + VBLANKCALLBACK *pListLL; + VBLANKCALLBACK *pListNL; + VBLANKCALLBACK CheckVblankCount; + } Callback; + NvU32 VblankCountTimeout; + NvU32 IntrState; + PORT_SPINLOCK *pSpinlock; +}; + + + +struct KernelHead { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__KernelHead *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Object __nvoc_base_Object; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super + struct KernelHead *__nvoc_pbase_KernelHead; // khead + + // Vtable with 3 per-object function pointers + NvU32 (*__kheadGetLoadVCounter__)(struct OBJGPU *, struct KernelHead * /*this*/); // halified (2 hals) body + NvU32 (*__kheadGetCrashLockCounterV__)(struct OBJGPU *, struct KernelHead * /*this*/); // halified (2 hals) body + void (*__kheadVsyncNotificationOverRgVblankIntr__)(struct OBJGPU *, struct KernelHead * /*this*/); // halified (2 hals) body + + // Data members + struct __nvoc_inner_struc_KernelHead_1__ Vblank; + NvU32 PublicId; +}; + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__KernelHead { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Object metadata__Object; +}; + +#ifndef __NVOC_CLASS_KernelHead_TYPEDEF__ +#define __NVOC_CLASS_KernelHead_TYPEDEF__ +typedef struct KernelHead KernelHead; +#endif /* __NVOC_CLASS_KernelHead_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelHead +#define __nvoc_class_id_KernelHead 0x0145e6 +#endif /* __nvoc_class_id_KernelHead */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelHead; + +#define __staticCast_KernelHead(pThis) \ + ((pThis)->__nvoc_pbase_KernelHead) + +#ifdef __nvoc_kernel_head_h_disabled +#define __dynamicCast_KernelHead(pThis) ((KernelHead*) NULL) +#else //__nvoc_kernel_head_h_disabled +#define __dynamicCast_KernelHead(pThis) \ + ((KernelHead*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelHead))) +#endif //__nvoc_kernel_head_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_KernelHead(KernelHead**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelHead(KernelHead**, Dynamic*, NvU32); +#define __objCreate_KernelHead(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelHead((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros +#define kheadGetLoadVCounter_FNPTR(pKernelHead) pKernelHead->__kheadGetLoadVCounter__ +#define kheadGetLoadVCounter(pGpu, pKernelHead) kheadGetLoadVCounter_DISPATCH(pGpu, pKernelHead) +#define kheadGetLoadVCounter_HAL(pGpu, pKernelHead) kheadGetLoadVCounter_DISPATCH(pGpu, pKernelHead) +#define kheadGetCrashLockCounterV_FNPTR(pKernelHead) pKernelHead->__kheadGetCrashLockCounterV__ +#define kheadGetCrashLockCounterV(pGpu, pKernelHead) kheadGetCrashLockCounterV_DISPATCH(pGpu, pKernelHead) +#define kheadGetCrashLockCounterV_HAL(pGpu, pKernelHead) kheadGetCrashLockCounterV_DISPATCH(pGpu, pKernelHead) +#define kheadVsyncNotificationOverRgVblankIntr_FNPTR(pKernelHead) pKernelHead->__kheadVsyncNotificationOverRgVblankIntr__ +#define kheadVsyncNotificationOverRgVblankIntr(pGpu, pKernelHead) kheadVsyncNotificationOverRgVblankIntr_DISPATCH(pGpu, pKernelHead) +#define kheadVsyncNotificationOverRgVblankIntr_HAL(pGpu, pKernelHead) kheadVsyncNotificationOverRgVblankIntr_DISPATCH(pGpu, pKernelHead) + +// Dispatch functions +static inline NvU32 kheadGetLoadVCounter_DISPATCH(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) { + return pKernelHead->__kheadGetLoadVCounter__(pGpu, pKernelHead); +} + +static inline NvU32 kheadGetCrashLockCounterV_DISPATCH(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) { + return pKernelHead->__kheadGetCrashLockCounterV__(pGpu, pKernelHead); +} + +static inline void kheadVsyncNotificationOverRgVblankIntr_DISPATCH(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) { + pKernelHead->__kheadVsyncNotificationOverRgVblankIntr__(pGpu, pKernelHead); +} + +NvU32 kheadGetVblankTotalCounter_IMPL(struct KernelHead *pKernelHead); + + +#ifdef __nvoc_kernel_head_h_disabled +static inline NvU32 kheadGetVblankTotalCounter(struct KernelHead *pKernelHead) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return 0; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadGetVblankTotalCounter(pKernelHead) kheadGetVblankTotalCounter_IMPL(pKernelHead) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadGetVblankTotalCounter_HAL(pKernelHead) kheadGetVblankTotalCounter(pKernelHead) + +void kheadSetVblankTotalCounter_IMPL(struct KernelHead *pKernelHead, NvU32 arg2); + + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadSetVblankTotalCounter(struct KernelHead *pKernelHead, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadSetVblankTotalCounter(pKernelHead, arg2) kheadSetVblankTotalCounter_IMPL(pKernelHead, arg2) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadSetVblankTotalCounter_HAL(pKernelHead, arg2) kheadSetVblankTotalCounter(pKernelHead, arg2) + +NvU32 kheadGetVblankLowLatencyCounter_IMPL(struct KernelHead *pKernelHead); + + +#ifdef __nvoc_kernel_head_h_disabled +static inline NvU32 kheadGetVblankLowLatencyCounter(struct KernelHead *pKernelHead) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return 0; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadGetVblankLowLatencyCounter(pKernelHead) kheadGetVblankLowLatencyCounter_IMPL(pKernelHead) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadGetVblankLowLatencyCounter_HAL(pKernelHead) kheadGetVblankLowLatencyCounter(pKernelHead) + +void kheadSetVblankLowLatencyCounter_IMPL(struct KernelHead *pKernelHead, NvU32 arg2); + + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadSetVblankLowLatencyCounter(struct KernelHead *pKernelHead, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadSetVblankLowLatencyCounter(pKernelHead, arg2) kheadSetVblankLowLatencyCounter_IMPL(pKernelHead, arg2) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadSetVblankLowLatencyCounter_HAL(pKernelHead, arg2) kheadSetVblankLowLatencyCounter(pKernelHead, arg2) + +static inline NvU32 kheadGetVblankNormLatencyCounter_46f6a7(struct KernelHead *pKernelHead) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_kernel_head_h_disabled +static inline NvU32 kheadGetVblankNormLatencyCounter(struct KernelHead *pKernelHead) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return 0; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadGetVblankNormLatencyCounter(pKernelHead) kheadGetVblankNormLatencyCounter_46f6a7(pKernelHead) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadGetVblankNormLatencyCounter_HAL(pKernelHead) kheadGetVblankNormLatencyCounter(pKernelHead) + +static inline void kheadSetVblankNormLatencyCounter_b3696a(struct KernelHead *pKernelHead, NvU32 arg2) { + return; +} + + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadSetVblankNormLatencyCounter(struct KernelHead *pKernelHead, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadSetVblankNormLatencyCounter(pKernelHead, arg2) kheadSetVblankNormLatencyCounter_b3696a(pKernelHead, arg2) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadSetVblankNormLatencyCounter_HAL(pKernelHead, arg2) kheadSetVblankNormLatencyCounter(pKernelHead, arg2) + +void kheadResetPendingLastData_v03_00(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, THREAD_STATE_NODE *pThreadState); + + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadResetPendingLastData(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadResetPendingLastData(pGpu, pKernelHead, pThreadState) kheadResetPendingLastData_v03_00(pGpu, pKernelHead, pThreadState) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadResetPendingLastData_HAL(pGpu, pKernelHead, pThreadState) kheadResetPendingLastData(pGpu, pKernelHead, pThreadState) + +static inline NvBool kheadReadVblankIntrEnable_86b752(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_FALSE); +} + +NvBool kheadReadVblankIntrEnable_v03_00(struct OBJGPU *pGpu, struct KernelHead *pKernelHead); + + +#ifdef __nvoc_kernel_head_h_disabled +static inline NvBool kheadReadVblankIntrEnable(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadReadVblankIntrEnable(pGpu, pKernelHead) kheadReadVblankIntrEnable_86b752(pGpu, pKernelHead) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadReadVblankIntrEnable_HAL(pGpu, pKernelHead) kheadReadVblankIntrEnable(pGpu, pKernelHead) + +static inline NvBool kheadGetDisplayInitialized_86b752(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_FALSE); +} + + +#ifdef __nvoc_kernel_head_h_disabled +static inline NvBool kheadGetDisplayInitialized(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadGetDisplayInitialized(pGpu, pKernelHead) kheadGetDisplayInitialized_86b752(pGpu, pKernelHead) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadGetDisplayInitialized_HAL(pGpu, pKernelHead) kheadGetDisplayInitialized(pGpu, pKernelHead) + +static inline void kheadWriteVblankIntrEnable_e426af(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvBool arg3) { + NV_ASSERT_PRECOMP(0); + return; +} + +void kheadWriteVblankIntrEnable_v03_00(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvBool arg3); + + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadWriteVblankIntrEnable(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvBool arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadWriteVblankIntrEnable(pGpu, pKernelHead, arg3) kheadWriteVblankIntrEnable_e426af(pGpu, pKernelHead, arg3) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadWriteVblankIntrEnable_HAL(pGpu, pKernelHead, arg3) kheadWriteVblankIntrEnable(pGpu, pKernelHead, arg3) + +static inline void kheadProcessVblankCallbacks_e426af(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg3) { + NV_ASSERT_PRECOMP(0); + return; +} + + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadProcessVblankCallbacks(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadProcessVblankCallbacks(pGpu, pKernelHead, arg3) kheadProcessVblankCallbacks_e426af(pGpu, pKernelHead, arg3) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadProcessVblankCallbacks_HAL(pGpu, pKernelHead, arg3) kheadProcessVblankCallbacks(pGpu, pKernelHead, arg3) + +static inline void kheadResetPendingVblank_e426af(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_PRECOMP(0); + return; +} + +void kheadResetPendingVblank_v04_01(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, THREAD_STATE_NODE *pThreadState); + + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadResetPendingVblank(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadResetPendingVblank(pGpu, pKernelHead, pThreadState) kheadResetPendingVblank_e426af(pGpu, pKernelHead, pThreadState) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadResetPendingVblank_HAL(pGpu, pKernelHead, pThreadState) kheadResetPendingVblank(pGpu, pKernelHead, pThreadState) + +NvBool kheadReadPendingVblank_v03_00(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 *pCachedIntr, THREAD_STATE_NODE *pThreadState); + + +#ifdef __nvoc_kernel_head_h_disabled +static inline NvBool kheadReadPendingVblank(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 *pCachedIntr, THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadReadPendingVblank(pGpu, pKernelHead, pCachedIntr, pThreadState) kheadReadPendingVblank_v03_00(pGpu, pKernelHead, pCachedIntr, pThreadState) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadReadPendingVblank_HAL(pGpu, pKernelHead, pCachedIntr, pThreadState) kheadReadPendingVblank(pGpu, pKernelHead, pCachedIntr, pThreadState) + +NvU32 kheadReadPendingRgLineIntr_v03_00(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, THREAD_STATE_NODE *pThreadState); + + +#ifdef __nvoc_kernel_head_h_disabled +static inline NvU32 kheadReadPendingRgLineIntr(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return 0; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadReadPendingRgLineIntr(pGpu, pKernelHead, pThreadState) kheadReadPendingRgLineIntr_v03_00(pGpu, pKernelHead, pThreadState) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadReadPendingRgLineIntr_HAL(pGpu, pKernelHead, pThreadState) kheadReadPendingRgLineIntr(pGpu, pKernelHead, pThreadState) + +void kheadResetRgLineIntrMask_v03_00(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 headIntrMask, THREAD_STATE_NODE *pThreadState); + + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadResetRgLineIntrMask(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 headIntrMask, THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadResetRgLineIntrMask(pGpu, pKernelHead, headIntrMask, pThreadState) kheadResetRgLineIntrMask_v03_00(pGpu, pKernelHead, headIntrMask, pThreadState) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadResetRgLineIntrMask_HAL(pGpu, pKernelHead, headIntrMask, pThreadState) kheadResetRgLineIntrMask(pGpu, pKernelHead, headIntrMask, pThreadState) + +static inline void kheadProcessRgLineCallbacks_ca557d(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 head, NvU32 *headIntrMask, NvU32 *clearIntrMask, NvBool isIsr) { + NV_ASSERT_OR_RETURN_VOID_PRECOMP(0); +} + + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadProcessRgLineCallbacks(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 head, NvU32 *headIntrMask, NvU32 *clearIntrMask, NvBool isIsr) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadProcessRgLineCallbacks(pGpu, pKernelHead, head, headIntrMask, clearIntrMask, isIsr) kheadProcessRgLineCallbacks_ca557d(pGpu, pKernelHead, head, headIntrMask, clearIntrMask, isIsr) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadProcessRgLineCallbacks_HAL(pGpu, pKernelHead, head, headIntrMask, clearIntrMask, isIsr) kheadProcessRgLineCallbacks(pGpu, pKernelHead, head, headIntrMask, clearIntrMask, isIsr) + +void kheadReadPendingRgSemIntr_v04_01(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, HEADINTRMASK *pHeadIntrMask, THREAD_STATE_NODE *pThreadState, NvU32 *pCachedIntr); + + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadReadPendingRgSemIntr(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, HEADINTRMASK *pHeadIntrMask, THREAD_STATE_NODE *pThreadState, NvU32 *pCachedIntr) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadReadPendingRgSemIntr(pGpu, pKernelHead, pHeadIntrMask, pThreadState, pCachedIntr) kheadReadPendingRgSemIntr_v04_01(pGpu, pKernelHead, pHeadIntrMask, pThreadState, pCachedIntr) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadReadPendingRgSemIntr_HAL(pGpu, pKernelHead, pHeadIntrMask, pThreadState, pCachedIntr) kheadReadPendingRgSemIntr(pGpu, pKernelHead, pHeadIntrMask, pThreadState, pCachedIntr) + +void kheadHandleRgSemIntr_v04_01(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, HEADINTRMASK *pHeadIntrMask, THREAD_STATE_NODE *pThreadState); + + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadHandleRgSemIntr(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, HEADINTRMASK *pHeadIntrMask, THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadHandleRgSemIntr(pGpu, pKernelHead, pHeadIntrMask, pThreadState) kheadHandleRgSemIntr_v04_01(pGpu, pKernelHead, pHeadIntrMask, pThreadState) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadHandleRgSemIntr_HAL(pGpu, pKernelHead, pHeadIntrMask, pThreadState) kheadHandleRgSemIntr(pGpu, pKernelHead, pHeadIntrMask, pThreadState) + +NvU32 kheadGetLoadVCounter_v03_00(struct OBJGPU *pGpu, struct KernelHead *pKernelHead); + +NvU32 kheadGetLoadVCounter_v05_01(struct OBJGPU *pGpu, struct KernelHead *pKernelHead); + +NvU32 kheadGetCrashLockCounterV_v05_01(struct OBJGPU *pGpu, struct KernelHead *pKernelHead); + +static inline NvU32 kheadGetCrashLockCounterV_4a4dee(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) { + return 0; +} + +void kheadVsyncNotificationOverRgVblankIntr_v04_04(struct OBJGPU *pGpu, struct KernelHead *pKernelHead); + +static inline void kheadVsyncNotificationOverRgVblankIntr_b3696a(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) { + return; +} + +NV_STATUS kheadConstruct_IMPL(struct KernelHead *arg_pKernelHead); + +#define __nvoc_kheadConstruct(arg_pKernelHead) kheadConstruct_IMPL(arg_pKernelHead) +void kheadDestruct_IMPL(struct KernelHead *pKernelHead); + +#define __nvoc_kheadDestruct(pKernelHead) kheadDestruct_IMPL(pKernelHead) +void kheadAddVblankCallback_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, VBLANKCALLBACK *arg3); + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadAddVblankCallback(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, VBLANKCALLBACK *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadAddVblankCallback(pGpu, pKernelHead, arg3) kheadAddVblankCallback_IMPL(pGpu, pKernelHead, arg3) +#endif //__nvoc_kernel_head_h_disabled + +void kheadDeleteVblankCallback_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, VBLANKCALLBACK *arg3); + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadDeleteVblankCallback(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, VBLANKCALLBACK *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadDeleteVblankCallback(pGpu, pKernelHead, arg3) kheadDeleteVblankCallback_IMPL(pGpu, pKernelHead, arg3) +#endif //__nvoc_kernel_head_h_disabled + +void kheadPauseVblankCbNotifications_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, VBLANKCALLBACK *arg3); + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadPauseVblankCbNotifications(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, VBLANKCALLBACK *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadPauseVblankCbNotifications(pGpu, pKernelHead, arg3) kheadPauseVblankCbNotifications_IMPL(pGpu, pKernelHead, arg3) +#endif //__nvoc_kernel_head_h_disabled + +NvU32 kheadCheckVblankCallbacksQueued_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg3, NvU32 *arg4); + +#ifdef __nvoc_kernel_head_h_disabled +static inline NvU32 kheadCheckVblankCallbacksQueued(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg3, NvU32 *arg4) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return 0; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadCheckVblankCallbacksQueued(pGpu, pKernelHead, arg3, arg4) kheadCheckVblankCallbacksQueued_IMPL(pGpu, pKernelHead, arg3, arg4) +#endif //__nvoc_kernel_head_h_disabled + +NvU32 kheadReadVblankIntrState_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead); + +#ifdef __nvoc_kernel_head_h_disabled +static inline NvU32 kheadReadVblankIntrState(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return 0; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadReadVblankIntrState(pGpu, pKernelHead) kheadReadVblankIntrState_IMPL(pGpu, pKernelHead) +#endif //__nvoc_kernel_head_h_disabled + +void kheadWriteVblankIntrState_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg3); + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadWriteVblankIntrState(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadWriteVblankIntrState(pGpu, pKernelHead, arg3) kheadWriteVblankIntrState_IMPL(pGpu, pKernelHead, arg3) +#endif //__nvoc_kernel_head_h_disabled + +#undef PRIVATE_FIELD + + +#endif // KERNEL_HEAD_H + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_KERNEL_HEAD_NVOC_H_ diff --git a/src/nvidia/generated/g_lock_stress_nvoc.c b/src/nvidia/generated/g_lock_stress_nvoc.c new file mode 100644 index 0000000..855ca0c --- /dev/null +++ b/src/nvidia/generated/g_lock_stress_nvoc.c @@ -0,0 +1,727 @@ +#define NVOC_LOCK_STRESS_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_lock_stress_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xecce10 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_LockStressObject; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +// Forward declarations for LockStressObject +void __nvoc_init__GpuResource(GpuResource*); +void __nvoc_init__LockStressObject(LockStressObject*); +void __nvoc_init_funcTable_LockStressObject(LockStressObject*); +NV_STATUS __nvoc_ctor_LockStressObject(LockStressObject*, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_LockStressObject(LockStressObject*); +void __nvoc_dtor_LockStressObject(LockStressObject*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__LockStressObject; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__LockStressObject; + +// Down-thunk(s) to bridge LockStressObject methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^2 +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_GpuResource_resControl(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_GpuResource_resMap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_down_thunk_GpuResource_resUnmap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_down_thunk_GpuResource_rmresShareCallback(struct RmResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super + +// Up-thunk(s) to bridge LockStressObject methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^2 +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^2 +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^2 +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super^2 +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super^2 +NvBool __nvoc_up_thunk_RmResource_gpuresAccessCallback(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControl_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_gpuresControl_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_gpuresCanCopy(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresIsDuplicate(struct GpuResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_gpuresPreDestruct(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresControlFilter(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresMapTo(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresUnmapFrom(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_gpuresGetRefCount(struct GpuResource *pResource); // super +void __nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference); // super +NV_STATUS __nvoc_up_thunk_GpuResource_lockStressObjControl(struct LockStressObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_GpuResource_lockStressObjMap(struct LockStressObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_GpuResource_lockStressObjUnmap(struct LockStressObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_GpuResource_lockStressObjShareCallback(struct LockStressObject *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_GpuResource_lockStressObjGetRegBaseOffsetAndSize(struct LockStressObject *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); // this +NV_STATUS __nvoc_up_thunk_GpuResource_lockStressObjGetMapAddrSpace(struct LockStressObject *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); // this +NV_STATUS __nvoc_up_thunk_GpuResource_lockStressObjInternalControlForward(struct LockStressObject *pGpuResource, NvU32 command, void *pParams, NvU32 size); // this +NvHandle __nvoc_up_thunk_GpuResource_lockStressObjGetInternalObjectHandle(struct LockStressObject *pGpuResource); // this +NvBool __nvoc_up_thunk_RmResource_lockStressObjAccessCallback(struct LockStressObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NV_STATUS __nvoc_up_thunk_RmResource_lockStressObjGetMemInterMapParams(struct LockStressObject *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_lockStressObjCheckMemInterUnmap(struct LockStressObject *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_lockStressObjGetMemoryMappingDescriptor(struct LockStressObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_lockStressObjControlSerialization_Prologue(struct LockStressObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_lockStressObjControlSerialization_Epilogue(struct LockStressObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_lockStressObjControl_Prologue(struct LockStressObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_lockStressObjControl_Epilogue(struct LockStressObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_lockStressObjCanCopy(struct LockStressObject *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_lockStressObjIsDuplicate(struct LockStressObject *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_lockStressObjPreDestruct(struct LockStressObject *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_lockStressObjControlFilter(struct LockStressObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_lockStressObjIsPartialUnmapSupported(struct LockStressObject *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_lockStressObjMapTo(struct LockStressObject *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_lockStressObjUnmapFrom(struct LockStressObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_lockStressObjGetRefCount(struct LockStressObject *pResource); // this +void __nvoc_up_thunk_RsResource_lockStressObjAddAdditionalDependants(struct RsClient *pClient, struct LockStressObject *pResource, RsResourceRef *pReference); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_LockStressObject = +{ + /*classInfo=*/ { + /*size=*/ sizeof(LockStressObject), + /*classId=*/ classId(LockStressObject), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "LockStressObject", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_LockStressObject, + /*pCastInfo=*/ &__nvoc_castinfo__LockStressObject, + /*pExportInfo=*/ &__nvoc_export_info__LockStressObject +}; + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_LockStressObject[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) lockStressObjCtrlCmdResetLockStressState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x1000101u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_LockStressObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "lockStressObjCtrlCmdResetLockStressState" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) lockStressObjCtrlCmdPerformLockStressAllRmLocks_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x1000102u, + /*paramSize=*/ sizeof(NV0100_CTRL_PERFORM_LOCK_STRESS_ALL_RM_LOCKS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_LockStressObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "lockStressObjCtrlCmdPerformLockStressAllRmLocks" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) lockStressObjCtrlCmdPerformLockStressNoGpusLock_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*flags=*/ 0x9u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x1000103u, + /*paramSize=*/ sizeof(NV0100_CTRL_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_LockStressObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "lockStressObjCtrlCmdPerformLockStressNoGpusLock" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x108u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) lockStressObjCtrlCmdPerformLockStressApiLockReadMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x108u) + /*flags=*/ 0x108u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x1000104u, + /*paramSize=*/ sizeof(NV0100_CTRL_PERFORM_LOCK_STRESS_API_LOCK_READ_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_LockStressObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "lockStressObjCtrlCmdPerformLockStressApiLockReadMode" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) lockStressObjCtrlCmdPerformLockStressNoGpusLockApiLockReadMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*flags=*/ 0x109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x1000105u, + /*paramSize=*/ sizeof(NV0100_CTRL_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_API_LOCK_READ_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_LockStressObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "lockStressObjCtrlCmdPerformLockStressNoGpusLockApiLockReadMode" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) lockStressObjCtrlCmdPerformLockStressInternalAllRmLocks_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x1000106u, + /*paramSize=*/ sizeof(NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_ALL_RM_LOCKS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_LockStressObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "lockStressObjCtrlCmdPerformLockStressInternalAllRmLocks" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) lockStressObjCtrlCmdPerformLockStressInternalNoGpusLock_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*flags=*/ 0x9u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x1000107u, + /*paramSize=*/ sizeof(NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_LockStressObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "lockStressObjCtrlCmdPerformLockStressInternalNoGpusLock" +#endif + }, + { /* [7] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x108u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) lockStressObjCtrlCmdPerformLockStressInternalApiLockReadMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x108u) + /*flags=*/ 0x108u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x1000108u, + /*paramSize=*/ sizeof(NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_API_LOCK_READ_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_LockStressObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "lockStressObjCtrlCmdPerformLockStressInternalApiLockReadMode" +#endif + }, + { /* [8] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) lockStressObjCtrlCmdPerformLockStressInternalNoGpusLockApiLockReadMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*flags=*/ 0x109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x1000109u, + /*paramSize=*/ sizeof(NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_API_LOCK_READ_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_LockStressObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "lockStressObjCtrlCmdPerformLockStressInternalNoGpusLockApiLockReadMode" +#endif + }, + { /* [9] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) lockStressObjCtrlCmdGetLockStressCounters_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x100010au, + /*paramSize=*/ sizeof(NV0100_CTRL_GET_LOCK_STRESS_COUNTERS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_LockStressObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "lockStressObjCtrlCmdGetLockStressCounters" +#endif + }, + +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__LockStressObject __nvoc_metadata__LockStressObject = { + .rtti.pClassDef = &__nvoc_class_def_LockStressObject, // (lockStressObj) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_LockStressObject, + .rtti.offset = 0, + .metadata__GpuResource.rtti.pClassDef = &__nvoc_class_def_GpuResource, // (gpures) super + .metadata__GpuResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.rtti.offset = NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource), + .metadata__GpuResource.metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super^2 + .metadata__GpuResource.metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.rtti.offset = NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource.__nvoc_base_RmResource), + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^3 + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^4 + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^3 + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + + .vtable.__lockStressObjControl__ = &__nvoc_up_thunk_GpuResource_lockStressObjControl, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl__ = &gpuresControl_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_GpuResource_resControl, // virtual + .vtable.__lockStressObjMap__ = &__nvoc_up_thunk_GpuResource_lockStressObjMap, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresMap__ = &gpuresMap_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &__nvoc_down_thunk_GpuResource_resMap, // virtual + .vtable.__lockStressObjUnmap__ = &__nvoc_up_thunk_GpuResource_lockStressObjUnmap, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresUnmap__ = &gpuresUnmap_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &__nvoc_down_thunk_GpuResource_resUnmap, // virtual + .vtable.__lockStressObjShareCallback__ = &__nvoc_up_thunk_GpuResource_lockStressObjShareCallback, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresShareCallback__ = &gpuresShareCallback_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresShareCallback__ = &__nvoc_down_thunk_GpuResource_rmresShareCallback, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__lockStressObjGetRegBaseOffsetAndSize__ = &__nvoc_up_thunk_GpuResource_lockStressObjGetRegBaseOffsetAndSize, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetRegBaseOffsetAndSize__ = &gpuresGetRegBaseOffsetAndSize_IMPL, // virtual + .vtable.__lockStressObjGetMapAddrSpace__ = &__nvoc_up_thunk_GpuResource_lockStressObjGetMapAddrSpace, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMapAddrSpace__ = &gpuresGetMapAddrSpace_IMPL, // virtual + .vtable.__lockStressObjInternalControlForward__ = &__nvoc_up_thunk_GpuResource_lockStressObjInternalControlForward, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresInternalControlForward__ = &gpuresInternalControlForward_IMPL, // virtual + .vtable.__lockStressObjGetInternalObjectHandle__ = &__nvoc_up_thunk_GpuResource_lockStressObjGetInternalObjectHandle, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetInternalObjectHandle__ = &gpuresGetInternalObjectHandle_IMPL, // virtual + .vtable.__lockStressObjAccessCallback__ = &__nvoc_up_thunk_RmResource_lockStressObjAccessCallback, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresAccessCallback__ = &__nvoc_up_thunk_RmResource_gpuresAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__lockStressObjGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_lockStressObjGetMemInterMapParams, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__lockStressObjCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_lockStressObjCheckMemInterUnmap, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__lockStressObjGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_lockStressObjGetMemoryMappingDescriptor, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__lockStressObjControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_lockStressObjControlSerialization_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__lockStressObjControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_lockStressObjControlSerialization_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__lockStressObjControl_Prologue__ = &__nvoc_up_thunk_RmResource_lockStressObjControl_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__lockStressObjControl_Epilogue__ = &__nvoc_up_thunk_RmResource_lockStressObjControl_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__lockStressObjCanCopy__ = &__nvoc_up_thunk_RsResource_lockStressObjCanCopy, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresCanCopy__ = &__nvoc_up_thunk_RsResource_gpuresCanCopy, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__lockStressObjIsDuplicate__ = &__nvoc_up_thunk_RsResource_lockStressObjIsDuplicate, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresIsDuplicate__ = &__nvoc_up_thunk_RsResource_gpuresIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__lockStressObjPreDestruct__ = &__nvoc_up_thunk_RsResource_lockStressObjPreDestruct, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresPreDestruct__ = &__nvoc_up_thunk_RsResource_gpuresPreDestruct, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__lockStressObjControlFilter__ = &__nvoc_up_thunk_RsResource_lockStressObjControlFilter, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlFilter__ = &__nvoc_up_thunk_RsResource_gpuresControlFilter, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__lockStressObjIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_lockStressObjIsPartialUnmapSupported, // inline virtual inherited (res) base (gpures) body + .metadata__GpuResource.vtable.__gpuresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__GpuResource.metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__lockStressObjMapTo__ = &__nvoc_up_thunk_RsResource_lockStressObjMapTo, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresMapTo__ = &__nvoc_up_thunk_RsResource_gpuresMapTo, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__lockStressObjUnmapFrom__ = &__nvoc_up_thunk_RsResource_lockStressObjUnmapFrom, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresUnmapFrom__ = &__nvoc_up_thunk_RsResource_gpuresUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__lockStressObjGetRefCount__ = &__nvoc_up_thunk_RsResource_lockStressObjGetRefCount, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetRefCount__ = &__nvoc_up_thunk_RsResource_gpuresGetRefCount, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__lockStressObjAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_lockStressObjAddAdditionalDependants, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__LockStressObject = { + .numRelatives = 6, + .relatives = { + &__nvoc_metadata__LockStressObject.rtti, // [0]: (lockStressObj) this + &__nvoc_metadata__LockStressObject.metadata__GpuResource.rtti, // [1]: (gpures) super + &__nvoc_metadata__LockStressObject.metadata__GpuResource.metadata__RmResource.rtti, // [2]: (rmres) super^2 + &__nvoc_metadata__LockStressObject.metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti, // [3]: (res) super^3 + &__nvoc_metadata__LockStressObject.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [4]: (obj) super^4 + &__nvoc_metadata__LockStressObject.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti, // [5]: (rmrescmn) super^3 + } +}; + +// 25 up-thunk(s) defined to bridge methods in LockStressObject to superclasses + +// lockStressObjControl: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_lockStressObjControl(struct LockStressObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource)), pCallContext, pParams); +} + +// lockStressObjMap: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_lockStressObjMap(struct LockStressObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource)), pCallContext, pParams, pCpuMapping); +} + +// lockStressObjUnmap: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_lockStressObjUnmap(struct LockStressObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource)), pCallContext, pCpuMapping); +} + +// lockStressObjShareCallback: virtual inherited (gpures) base (gpures) +NvBool __nvoc_up_thunk_GpuResource_lockStressObjShareCallback(struct LockStressObject *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// lockStressObjGetRegBaseOffsetAndSize: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_lockStressObjGetRegBaseOffsetAndSize(struct LockStressObject *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource)), pGpu, pOffset, pSize); +} + +// lockStressObjGetMapAddrSpace: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_lockStressObjGetMapAddrSpace(struct LockStressObject *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource)), pCallContext, mapFlags, pAddrSpace); +} + +// lockStressObjInternalControlForward: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_lockStressObjInternalControlForward(struct LockStressObject *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource)), command, pParams, size); +} + +// lockStressObjGetInternalObjectHandle: virtual inherited (gpures) base (gpures) +NvHandle __nvoc_up_thunk_GpuResource_lockStressObjGetInternalObjectHandle(struct LockStressObject *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource))); +} + +// lockStressObjAccessCallback: virtual inherited (rmres) base (gpures) +NvBool __nvoc_up_thunk_RmResource_lockStressObjAccessCallback(struct LockStressObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// lockStressObjGetMemInterMapParams: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_lockStressObjGetMemInterMapParams(struct LockStressObject *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pParams); +} + +// lockStressObjCheckMemInterUnmap: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_lockStressObjCheckMemInterUnmap(struct LockStressObject *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource.__nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// lockStressObjGetMemoryMappingDescriptor: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_lockStressObjGetMemoryMappingDescriptor(struct LockStressObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource.__nvoc_base_RmResource)), ppMemDesc); +} + +// lockStressObjControlSerialization_Prologue: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_lockStressObjControlSerialization_Prologue(struct LockStressObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// lockStressObjControlSerialization_Epilogue: virtual inherited (rmres) base (gpures) +void __nvoc_up_thunk_RmResource_lockStressObjControlSerialization_Epilogue(struct LockStressObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// lockStressObjControl_Prologue: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_lockStressObjControl_Prologue(struct LockStressObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// lockStressObjControl_Epilogue: virtual inherited (rmres) base (gpures) +void __nvoc_up_thunk_RmResource_lockStressObjControl_Epilogue(struct LockStressObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// lockStressObjCanCopy: virtual inherited (res) base (gpures) +NvBool __nvoc_up_thunk_RsResource_lockStressObjCanCopy(struct LockStressObject *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// lockStressObjIsDuplicate: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_lockStressObjIsDuplicate(struct LockStressObject *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// lockStressObjPreDestruct: virtual inherited (res) base (gpures) +void __nvoc_up_thunk_RsResource_lockStressObjPreDestruct(struct LockStressObject *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// lockStressObjControlFilter: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_lockStressObjControlFilter(struct LockStressObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// lockStressObjIsPartialUnmapSupported: inline virtual inherited (res) base (gpures) body +NvBool __nvoc_up_thunk_RsResource_lockStressObjIsPartialUnmapSupported(struct LockStressObject *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// lockStressObjMapTo: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_lockStressObjMapTo(struct LockStressObject *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// lockStressObjUnmapFrom: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_lockStressObjUnmapFrom(struct LockStressObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// lockStressObjGetRefCount: virtual inherited (res) base (gpures) +NvU32 __nvoc_up_thunk_RsResource_lockStressObjGetRefCount(struct LockStressObject *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// lockStressObjAddAdditionalDependants: virtual inherited (res) base (gpures) +void __nvoc_up_thunk_RsResource_lockStressObjAddAdditionalDependants(struct RsClient *pClient, struct LockStressObject *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockStressObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__LockStressObject = +{ + /*numEntries=*/ 10, + /*pExportEntries=*/ __nvoc_exported_method_def_LockStressObject +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_LockStressObject(LockStressObject *pThis) { + __nvoc_lockStressObjDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_LockStressObject(LockStressObject *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_LockStressObject(LockStressObject *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_LockStressObject_fail_GpuResource; + __nvoc_init_dataField_LockStressObject(pThis); + + status = __nvoc_lockStressObjConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_LockStressObject_fail__init; + goto __nvoc_ctor_LockStressObject_exit; // Success + +__nvoc_ctor_LockStressObject_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_LockStressObject_fail_GpuResource: +__nvoc_ctor_LockStressObject_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_LockStressObject_1(LockStressObject *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + // lockStressObjCtrlCmdResetLockStressState -- exported (id=0x1000101) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__lockStressObjCtrlCmdResetLockStressState__ = &lockStressObjCtrlCmdResetLockStressState_IMPL; +#endif + + // lockStressObjCtrlCmdPerformLockStressAllRmLocks -- exported (id=0x1000102) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__lockStressObjCtrlCmdPerformLockStressAllRmLocks__ = &lockStressObjCtrlCmdPerformLockStressAllRmLocks_IMPL; +#endif + + // lockStressObjCtrlCmdPerformLockStressNoGpusLock -- exported (id=0x1000103) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + pThis->__lockStressObjCtrlCmdPerformLockStressNoGpusLock__ = &lockStressObjCtrlCmdPerformLockStressNoGpusLock_IMPL; +#endif + + // lockStressObjCtrlCmdPerformLockStressApiLockReadMode -- exported (id=0x1000104) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x108u) + pThis->__lockStressObjCtrlCmdPerformLockStressApiLockReadMode__ = &lockStressObjCtrlCmdPerformLockStressApiLockReadMode_IMPL; +#endif + + // lockStressObjCtrlCmdPerformLockStressNoGpusLockApiLockReadMode -- exported (id=0x1000105) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + pThis->__lockStressObjCtrlCmdPerformLockStressNoGpusLockApiLockReadMode__ = &lockStressObjCtrlCmdPerformLockStressNoGpusLockApiLockReadMode_IMPL; +#endif + + // lockStressObjCtrlCmdPerformLockStressInternalAllRmLocks -- exported (id=0x1000106) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__lockStressObjCtrlCmdPerformLockStressInternalAllRmLocks__ = &lockStressObjCtrlCmdPerformLockStressInternalAllRmLocks_IMPL; +#endif + + // lockStressObjCtrlCmdPerformLockStressInternalNoGpusLock -- exported (id=0x1000107) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + pThis->__lockStressObjCtrlCmdPerformLockStressInternalNoGpusLock__ = &lockStressObjCtrlCmdPerformLockStressInternalNoGpusLock_IMPL; +#endif + + // lockStressObjCtrlCmdPerformLockStressInternalApiLockReadMode -- exported (id=0x1000108) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x108u) + pThis->__lockStressObjCtrlCmdPerformLockStressInternalApiLockReadMode__ = &lockStressObjCtrlCmdPerformLockStressInternalApiLockReadMode_IMPL; +#endif + + // lockStressObjCtrlCmdPerformLockStressInternalNoGpusLockApiLockReadMode -- exported (id=0x1000109) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + pThis->__lockStressObjCtrlCmdPerformLockStressInternalNoGpusLockApiLockReadMode__ = &lockStressObjCtrlCmdPerformLockStressInternalNoGpusLockApiLockReadMode_IMPL; +#endif + + // lockStressObjCtrlCmdGetLockStressCounters -- exported (id=0x100010a) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__lockStressObjCtrlCmdGetLockStressCounters__ = &lockStressObjCtrlCmdGetLockStressCounters_IMPL; +#endif +} // End __nvoc_init_funcTable_LockStressObject_1 with approximately 10 basic block(s). + + +// Initialize vtable(s) for 35 virtual method(s). +void __nvoc_init_funcTable_LockStressObject(LockStressObject *pThis) { + + // Initialize vtable(s) with 10 per-object function pointer(s). + __nvoc_init_funcTable_LockStressObject_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__LockStressObject(LockStressObject *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^4 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^3 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; // (rmres) super^2 + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; // (gpures) super + pThis->__nvoc_pbase_LockStressObject = pThis; // (lockStressObj) this + + // Recurse to superclass initialization function(s). + __nvoc_init__GpuResource(&pThis->__nvoc_base_GpuResource); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__LockStressObject.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^4 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__LockStressObject.metadata__GpuResource.metadata__RmResource.metadata__RsResource; // (res) super^3 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__LockStressObject.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__LockStressObject.metadata__GpuResource.metadata__RmResource; // (rmres) super^2 + pThis->__nvoc_base_GpuResource.__nvoc_metadata_ptr = &__nvoc_metadata__LockStressObject.metadata__GpuResource; // (gpures) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__LockStressObject; // (lockStressObj) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_LockStressObject(pThis); +} + +NV_STATUS __nvoc_objCreate_LockStressObject(LockStressObject **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + LockStressObject *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(LockStressObject), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(LockStressObject)); + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__LockStressObject(pThis); + status = __nvoc_ctor_LockStressObject(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_LockStressObject_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_LockStressObject_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(LockStressObject)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_LockStressObject(LockStressObject **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_LockStressObject(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_lock_stress_nvoc.h b/src/nvidia/generated/g_lock_stress_nvoc.h new file mode 100644 index 0000000..7b1138c --- /dev/null +++ b/src/nvidia/generated/g_lock_stress_nvoc.h @@ -0,0 +1,423 @@ + +#ifndef _G_LOCK_STRESS_NVOC_H_ +#define _G_LOCK_STRESS_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_lock_stress_nvoc.h" + +#ifndef LOCK_STRESS_H +#define LOCK_STRESS_H + +#include "gpu/gpu_resource.h" +#include "nvoc/prelude.h" +#include "nvstatus.h" +#include "resserv/resserv.h" + +#include "ctrl/ctrl0100.h" + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_LOCK_STRESS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__LockStressObject; +struct NVOC_METADATA__GpuResource; +struct NVOC_VTABLE__LockStressObject; + + +struct LockStressObject { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__LockStressObject *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct GpuResource __nvoc_base_GpuResource; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^4 + struct RsResource *__nvoc_pbase_RsResource; // res super^3 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^3 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^2 + struct GpuResource *__nvoc_pbase_GpuResource; // gpures super + struct LockStressObject *__nvoc_pbase_LockStressObject; // lockStressObj + + // Vtable with 10 per-object function pointers + NV_STATUS (*__lockStressObjCtrlCmdResetLockStressState__)(struct LockStressObject * /*this*/); // exported (id=0x1000101) + NV_STATUS (*__lockStressObjCtrlCmdPerformLockStressAllRmLocks__)(struct LockStressObject * /*this*/, NV0100_CTRL_PERFORM_LOCK_STRESS_ALL_RM_LOCKS_PARAMS *); // exported (id=0x1000102) + NV_STATUS (*__lockStressObjCtrlCmdPerformLockStressNoGpusLock__)(struct LockStressObject * /*this*/, NV0100_CTRL_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_PARAMS *); // exported (id=0x1000103) + NV_STATUS (*__lockStressObjCtrlCmdPerformLockStressApiLockReadMode__)(struct LockStressObject * /*this*/, NV0100_CTRL_PERFORM_LOCK_STRESS_API_LOCK_READ_MODE_PARAMS *); // exported (id=0x1000104) + NV_STATUS (*__lockStressObjCtrlCmdPerformLockStressNoGpusLockApiLockReadMode__)(struct LockStressObject * /*this*/, NV0100_CTRL_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_API_LOCK_READ_MODE_PARAMS *); // exported (id=0x1000105) + NV_STATUS (*__lockStressObjCtrlCmdPerformLockStressInternalAllRmLocks__)(struct LockStressObject * /*this*/, NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_ALL_RM_LOCKS_PARAMS *); // exported (id=0x1000106) + NV_STATUS (*__lockStressObjCtrlCmdPerformLockStressInternalNoGpusLock__)(struct LockStressObject * /*this*/, NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_PARAMS *); // exported (id=0x1000107) + NV_STATUS (*__lockStressObjCtrlCmdPerformLockStressInternalApiLockReadMode__)(struct LockStressObject * /*this*/, NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_API_LOCK_READ_MODE_PARAMS *); // exported (id=0x1000108) + NV_STATUS (*__lockStressObjCtrlCmdPerformLockStressInternalNoGpusLockApiLockReadMode__)(struct LockStressObject * /*this*/, NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_API_LOCK_READ_MODE_PARAMS *); // exported (id=0x1000109) + NV_STATUS (*__lockStressObjCtrlCmdGetLockStressCounters__)(struct LockStressObject * /*this*/, NV0100_CTRL_GET_LOCK_STRESS_COUNTERS_PARAMS *); // exported (id=0x100010a) + + // Data members + NvHandle PRIVATE_FIELD(hInternalClient); + NvHandle PRIVATE_FIELD(hInternalDevice); + NvHandle PRIVATE_FIELD(hInternalSubdevice); + NvHandle PRIVATE_FIELD(hInternalLockStressObject); +}; + + +// Vtable with 25 per-class function pointers +struct NVOC_VTABLE__LockStressObject { + NV_STATUS (*__lockStressObjControl__)(struct LockStressObject * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__lockStressObjMap__)(struct LockStressObject * /*this*/, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__lockStressObjUnmap__)(struct LockStressObject * /*this*/, struct CALL_CONTEXT *, struct RsCpuMapping *); // virtual inherited (gpures) base (gpures) + NvBool (*__lockStressObjShareCallback__)(struct LockStressObject * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__lockStressObjGetRegBaseOffsetAndSize__)(struct LockStressObject * /*this*/, struct OBJGPU *, NvU32 *, NvU32 *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__lockStressObjGetMapAddrSpace__)(struct LockStressObject * /*this*/, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__lockStressObjInternalControlForward__)(struct LockStressObject * /*this*/, NvU32, void *, NvU32); // virtual inherited (gpures) base (gpures) + NvHandle (*__lockStressObjGetInternalObjectHandle__)(struct LockStressObject * /*this*/); // virtual inherited (gpures) base (gpures) + NvBool (*__lockStressObjAccessCallback__)(struct LockStressObject * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__lockStressObjGetMemInterMapParams__)(struct LockStressObject * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__lockStressObjCheckMemInterUnmap__)(struct LockStressObject * /*this*/, NvBool); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__lockStressObjGetMemoryMappingDescriptor__)(struct LockStressObject * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__lockStressObjControlSerialization_Prologue__)(struct LockStressObject * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + void (*__lockStressObjControlSerialization_Epilogue__)(struct LockStressObject * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__lockStressObjControl_Prologue__)(struct LockStressObject * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + void (*__lockStressObjControl_Epilogue__)(struct LockStressObject * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + NvBool (*__lockStressObjCanCopy__)(struct LockStressObject * /*this*/); // virtual inherited (res) base (gpures) + NV_STATUS (*__lockStressObjIsDuplicate__)(struct LockStressObject * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (gpures) + void (*__lockStressObjPreDestruct__)(struct LockStressObject * /*this*/); // virtual inherited (res) base (gpures) + NV_STATUS (*__lockStressObjControlFilter__)(struct LockStressObject * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (gpures) + NvBool (*__lockStressObjIsPartialUnmapSupported__)(struct LockStressObject * /*this*/); // inline virtual inherited (res) base (gpures) body + NV_STATUS (*__lockStressObjMapTo__)(struct LockStressObject * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (gpures) + NV_STATUS (*__lockStressObjUnmapFrom__)(struct LockStressObject * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (gpures) + NvU32 (*__lockStressObjGetRefCount__)(struct LockStressObject * /*this*/); // virtual inherited (res) base (gpures) + void (*__lockStressObjAddAdditionalDependants__)(struct RsClient *, struct LockStressObject * /*this*/, RsResourceRef *); // virtual inherited (res) base (gpures) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__LockStressObject { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__GpuResource metadata__GpuResource; + const struct NVOC_VTABLE__LockStressObject vtable; +}; + +#ifndef __NVOC_CLASS_LockStressObject_TYPEDEF__ +#define __NVOC_CLASS_LockStressObject_TYPEDEF__ +typedef struct LockStressObject LockStressObject; +#endif /* __NVOC_CLASS_LockStressObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_LockStressObject +#define __nvoc_class_id_LockStressObject 0xecce10 +#endif /* __nvoc_class_id_LockStressObject */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_LockStressObject; + +#define __staticCast_LockStressObject(pThis) \ + ((pThis)->__nvoc_pbase_LockStressObject) + +#ifdef __nvoc_lock_stress_h_disabled +#define __dynamicCast_LockStressObject(pThis) ((LockStressObject*) NULL) +#else //__nvoc_lock_stress_h_disabled +#define __dynamicCast_LockStressObject(pThis) \ + ((LockStressObject*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(LockStressObject))) +#endif //__nvoc_lock_stress_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_LockStressObject(LockStressObject**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_LockStressObject(LockStressObject**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_LockStressObject(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_LockStressObject((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define lockStressObjCtrlCmdResetLockStressState_FNPTR(pResource) pResource->__lockStressObjCtrlCmdResetLockStressState__ +#define lockStressObjCtrlCmdResetLockStressState(pResource) lockStressObjCtrlCmdResetLockStressState_DISPATCH(pResource) +#define lockStressObjCtrlCmdPerformLockStressAllRmLocks_FNPTR(pResource) pResource->__lockStressObjCtrlCmdPerformLockStressAllRmLocks__ +#define lockStressObjCtrlCmdPerformLockStressAllRmLocks(pResource, pParams) lockStressObjCtrlCmdPerformLockStressAllRmLocks_DISPATCH(pResource, pParams) +#define lockStressObjCtrlCmdPerformLockStressNoGpusLock_FNPTR(pResource) pResource->__lockStressObjCtrlCmdPerformLockStressNoGpusLock__ +#define lockStressObjCtrlCmdPerformLockStressNoGpusLock(pResource, pParams) lockStressObjCtrlCmdPerformLockStressNoGpusLock_DISPATCH(pResource, pParams) +#define lockStressObjCtrlCmdPerformLockStressApiLockReadMode_FNPTR(pResource) pResource->__lockStressObjCtrlCmdPerformLockStressApiLockReadMode__ +#define lockStressObjCtrlCmdPerformLockStressApiLockReadMode(pResource, pParams) lockStressObjCtrlCmdPerformLockStressApiLockReadMode_DISPATCH(pResource, pParams) +#define lockStressObjCtrlCmdPerformLockStressNoGpusLockApiLockReadMode_FNPTR(pResource) pResource->__lockStressObjCtrlCmdPerformLockStressNoGpusLockApiLockReadMode__ +#define lockStressObjCtrlCmdPerformLockStressNoGpusLockApiLockReadMode(pResource, pParams) lockStressObjCtrlCmdPerformLockStressNoGpusLockApiLockReadMode_DISPATCH(pResource, pParams) +#define lockStressObjCtrlCmdPerformLockStressInternalAllRmLocks_FNPTR(pResource) pResource->__lockStressObjCtrlCmdPerformLockStressInternalAllRmLocks__ +#define lockStressObjCtrlCmdPerformLockStressInternalAllRmLocks(pResource, pParams) lockStressObjCtrlCmdPerformLockStressInternalAllRmLocks_DISPATCH(pResource, pParams) +#define lockStressObjCtrlCmdPerformLockStressInternalNoGpusLock_FNPTR(pResource) pResource->__lockStressObjCtrlCmdPerformLockStressInternalNoGpusLock__ +#define lockStressObjCtrlCmdPerformLockStressInternalNoGpusLock(pResource, pParams) lockStressObjCtrlCmdPerformLockStressInternalNoGpusLock_DISPATCH(pResource, pParams) +#define lockStressObjCtrlCmdPerformLockStressInternalApiLockReadMode_FNPTR(pResource) pResource->__lockStressObjCtrlCmdPerformLockStressInternalApiLockReadMode__ +#define lockStressObjCtrlCmdPerformLockStressInternalApiLockReadMode(pResource, pParams) lockStressObjCtrlCmdPerformLockStressInternalApiLockReadMode_DISPATCH(pResource, pParams) +#define lockStressObjCtrlCmdPerformLockStressInternalNoGpusLockApiLockReadMode_FNPTR(pResource) pResource->__lockStressObjCtrlCmdPerformLockStressInternalNoGpusLockApiLockReadMode__ +#define lockStressObjCtrlCmdPerformLockStressInternalNoGpusLockApiLockReadMode(pResource, pParams) lockStressObjCtrlCmdPerformLockStressInternalNoGpusLockApiLockReadMode_DISPATCH(pResource, pParams) +#define lockStressObjCtrlCmdGetLockStressCounters_FNPTR(pResource) pResource->__lockStressObjCtrlCmdGetLockStressCounters__ +#define lockStressObjCtrlCmdGetLockStressCounters(pResource, pParams) lockStressObjCtrlCmdGetLockStressCounters_DISPATCH(pResource, pParams) +#define lockStressObjControl_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresControl__ +#define lockStressObjControl(pGpuResource, pCallContext, pParams) lockStressObjControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define lockStressObjMap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresMap__ +#define lockStressObjMap(pGpuResource, pCallContext, pParams, pCpuMapping) lockStressObjMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define lockStressObjUnmap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresUnmap__ +#define lockStressObjUnmap(pGpuResource, pCallContext, pCpuMapping) lockStressObjUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define lockStressObjShareCallback_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresShareCallback__ +#define lockStressObjShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) lockStressObjShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define lockStressObjGetRegBaseOffsetAndSize_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetRegBaseOffsetAndSize__ +#define lockStressObjGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) lockStressObjGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define lockStressObjGetMapAddrSpace_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetMapAddrSpace__ +#define lockStressObjGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) lockStressObjGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define lockStressObjInternalControlForward_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresInternalControlForward__ +#define lockStressObjInternalControlForward(pGpuResource, command, pParams, size) lockStressObjInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define lockStressObjGetInternalObjectHandle_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetInternalObjectHandle__ +#define lockStressObjGetInternalObjectHandle(pGpuResource) lockStressObjGetInternalObjectHandle_DISPATCH(pGpuResource) +#define lockStressObjAccessCallback_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define lockStressObjAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) lockStressObjAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define lockStressObjGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define lockStressObjGetMemInterMapParams(pRmResource, pParams) lockStressObjGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define lockStressObjCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define lockStressObjCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) lockStressObjCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define lockStressObjGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define lockStressObjGetMemoryMappingDescriptor(pRmResource, ppMemDesc) lockStressObjGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define lockStressObjControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define lockStressObjControlSerialization_Prologue(pResource, pCallContext, pParams) lockStressObjControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define lockStressObjControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define lockStressObjControlSerialization_Epilogue(pResource, pCallContext, pParams) lockStressObjControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define lockStressObjControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define lockStressObjControl_Prologue(pResource, pCallContext, pParams) lockStressObjControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define lockStressObjControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define lockStressObjControl_Epilogue(pResource, pCallContext, pParams) lockStressObjControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define lockStressObjCanCopy_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define lockStressObjCanCopy(pResource) lockStressObjCanCopy_DISPATCH(pResource) +#define lockStressObjIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define lockStressObjIsDuplicate(pResource, hMemory, pDuplicate) lockStressObjIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define lockStressObjPreDestruct_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define lockStressObjPreDestruct(pResource) lockStressObjPreDestruct_DISPATCH(pResource) +#define lockStressObjControlFilter_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define lockStressObjControlFilter(pResource, pCallContext, pParams) lockStressObjControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define lockStressObjIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define lockStressObjIsPartialUnmapSupported(pResource) lockStressObjIsPartialUnmapSupported_DISPATCH(pResource) +#define lockStressObjMapTo_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define lockStressObjMapTo(pResource, pParams) lockStressObjMapTo_DISPATCH(pResource, pParams) +#define lockStressObjUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define lockStressObjUnmapFrom(pResource, pParams) lockStressObjUnmapFrom_DISPATCH(pResource, pParams) +#define lockStressObjGetRefCount_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define lockStressObjGetRefCount(pResource) lockStressObjGetRefCount_DISPATCH(pResource) +#define lockStressObjAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define lockStressObjAddAdditionalDependants(pClient, pResource, pReference) lockStressObjAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) + +// Dispatch functions +static inline NV_STATUS lockStressObjCtrlCmdResetLockStressState_DISPATCH(struct LockStressObject *pResource) { + return pResource->__lockStressObjCtrlCmdResetLockStressState__(pResource); +} + +static inline NV_STATUS lockStressObjCtrlCmdPerformLockStressAllRmLocks_DISPATCH(struct LockStressObject *pResource, NV0100_CTRL_PERFORM_LOCK_STRESS_ALL_RM_LOCKS_PARAMS *pParams) { + return pResource->__lockStressObjCtrlCmdPerformLockStressAllRmLocks__(pResource, pParams); +} + +static inline NV_STATUS lockStressObjCtrlCmdPerformLockStressNoGpusLock_DISPATCH(struct LockStressObject *pResource, NV0100_CTRL_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_PARAMS *pParams) { + return pResource->__lockStressObjCtrlCmdPerformLockStressNoGpusLock__(pResource, pParams); +} + +static inline NV_STATUS lockStressObjCtrlCmdPerformLockStressApiLockReadMode_DISPATCH(struct LockStressObject *pResource, NV0100_CTRL_PERFORM_LOCK_STRESS_API_LOCK_READ_MODE_PARAMS *pParams) { + return pResource->__lockStressObjCtrlCmdPerformLockStressApiLockReadMode__(pResource, pParams); +} + +static inline NV_STATUS lockStressObjCtrlCmdPerformLockStressNoGpusLockApiLockReadMode_DISPATCH(struct LockStressObject *pResource, NV0100_CTRL_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_API_LOCK_READ_MODE_PARAMS *pParams) { + return pResource->__lockStressObjCtrlCmdPerformLockStressNoGpusLockApiLockReadMode__(pResource, pParams); +} + +static inline NV_STATUS lockStressObjCtrlCmdPerformLockStressInternalAllRmLocks_DISPATCH(struct LockStressObject *pResource, NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_ALL_RM_LOCKS_PARAMS *pParams) { + return pResource->__lockStressObjCtrlCmdPerformLockStressInternalAllRmLocks__(pResource, pParams); +} + +static inline NV_STATUS lockStressObjCtrlCmdPerformLockStressInternalNoGpusLock_DISPATCH(struct LockStressObject *pResource, NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_PARAMS *pParams) { + return pResource->__lockStressObjCtrlCmdPerformLockStressInternalNoGpusLock__(pResource, pParams); +} + +static inline NV_STATUS lockStressObjCtrlCmdPerformLockStressInternalApiLockReadMode_DISPATCH(struct LockStressObject *pResource, NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_API_LOCK_READ_MODE_PARAMS *pParams) { + return pResource->__lockStressObjCtrlCmdPerformLockStressInternalApiLockReadMode__(pResource, pParams); +} + +static inline NV_STATUS lockStressObjCtrlCmdPerformLockStressInternalNoGpusLockApiLockReadMode_DISPATCH(struct LockStressObject *pResource, NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_API_LOCK_READ_MODE_PARAMS *pParams) { + return pResource->__lockStressObjCtrlCmdPerformLockStressInternalNoGpusLockApiLockReadMode__(pResource, pParams); +} + +static inline NV_STATUS lockStressObjCtrlCmdGetLockStressCounters_DISPATCH(struct LockStressObject *pResource, NV0100_CTRL_GET_LOCK_STRESS_COUNTERS_PARAMS *pParams) { + return pResource->__lockStressObjCtrlCmdGetLockStressCounters__(pResource, pParams); +} + +static inline NV_STATUS lockStressObjControl_DISPATCH(struct LockStressObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__lockStressObjControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS lockStressObjMap_DISPATCH(struct LockStressObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__lockStressObjMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS lockStressObjUnmap_DISPATCH(struct LockStressObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__lockStressObjUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NvBool lockStressObjShareCallback_DISPATCH(struct LockStressObject *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__lockStressObjShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS lockStressObjGetRegBaseOffsetAndSize_DISPATCH(struct LockStressObject *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__lockStressObjGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS lockStressObjGetMapAddrSpace_DISPATCH(struct LockStressObject *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__lockStressObjGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NV_STATUS lockStressObjInternalControlForward_DISPATCH(struct LockStressObject *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__lockStressObjInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NvHandle lockStressObjGetInternalObjectHandle_DISPATCH(struct LockStressObject *pGpuResource) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__lockStressObjGetInternalObjectHandle__(pGpuResource); +} + +static inline NvBool lockStressObjAccessCallback_DISPATCH(struct LockStressObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__lockStressObjAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS lockStressObjGetMemInterMapParams_DISPATCH(struct LockStressObject *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__lockStressObjGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS lockStressObjCheckMemInterUnmap_DISPATCH(struct LockStressObject *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__lockStressObjCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS lockStressObjGetMemoryMappingDescriptor_DISPATCH(struct LockStressObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__lockStressObjGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS lockStressObjControlSerialization_Prologue_DISPATCH(struct LockStressObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__lockStressObjControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void lockStressObjControlSerialization_Epilogue_DISPATCH(struct LockStressObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__lockStressObjControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS lockStressObjControl_Prologue_DISPATCH(struct LockStressObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__lockStressObjControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void lockStressObjControl_Epilogue_DISPATCH(struct LockStressObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__lockStressObjControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool lockStressObjCanCopy_DISPATCH(struct LockStressObject *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__lockStressObjCanCopy__(pResource); +} + +static inline NV_STATUS lockStressObjIsDuplicate_DISPATCH(struct LockStressObject *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__lockStressObjIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void lockStressObjPreDestruct_DISPATCH(struct LockStressObject *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__lockStressObjPreDestruct__(pResource); +} + +static inline NV_STATUS lockStressObjControlFilter_DISPATCH(struct LockStressObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__lockStressObjControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvBool lockStressObjIsPartialUnmapSupported_DISPATCH(struct LockStressObject *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__lockStressObjIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS lockStressObjMapTo_DISPATCH(struct LockStressObject *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__lockStressObjMapTo__(pResource, pParams); +} + +static inline NV_STATUS lockStressObjUnmapFrom_DISPATCH(struct LockStressObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__lockStressObjUnmapFrom__(pResource, pParams); +} + +static inline NvU32 lockStressObjGetRefCount_DISPATCH(struct LockStressObject *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__lockStressObjGetRefCount__(pResource); +} + +static inline void lockStressObjAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct LockStressObject *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__lockStressObjAddAdditionalDependants__(pClient, pResource, pReference); +} + +NV_STATUS lockStressObjCtrlCmdResetLockStressState_IMPL(struct LockStressObject *pResource); + +NV_STATUS lockStressObjCtrlCmdPerformLockStressAllRmLocks_IMPL(struct LockStressObject *pResource, NV0100_CTRL_PERFORM_LOCK_STRESS_ALL_RM_LOCKS_PARAMS *pParams); + +NV_STATUS lockStressObjCtrlCmdPerformLockStressNoGpusLock_IMPL(struct LockStressObject *pResource, NV0100_CTRL_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_PARAMS *pParams); + +NV_STATUS lockStressObjCtrlCmdPerformLockStressApiLockReadMode_IMPL(struct LockStressObject *pResource, NV0100_CTRL_PERFORM_LOCK_STRESS_API_LOCK_READ_MODE_PARAMS *pParams); + +NV_STATUS lockStressObjCtrlCmdPerformLockStressNoGpusLockApiLockReadMode_IMPL(struct LockStressObject *pResource, NV0100_CTRL_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_API_LOCK_READ_MODE_PARAMS *pParams); + +NV_STATUS lockStressObjCtrlCmdPerformLockStressInternalAllRmLocks_IMPL(struct LockStressObject *pResource, NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_ALL_RM_LOCKS_PARAMS *pParams); + +NV_STATUS lockStressObjCtrlCmdPerformLockStressInternalNoGpusLock_IMPL(struct LockStressObject *pResource, NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_PARAMS *pParams); + +NV_STATUS lockStressObjCtrlCmdPerformLockStressInternalApiLockReadMode_IMPL(struct LockStressObject *pResource, NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_API_LOCK_READ_MODE_PARAMS *pParams); + +NV_STATUS lockStressObjCtrlCmdPerformLockStressInternalNoGpusLockApiLockReadMode_IMPL(struct LockStressObject *pResource, NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_API_LOCK_READ_MODE_PARAMS *pParams); + +NV_STATUS lockStressObjCtrlCmdGetLockStressCounters_IMPL(struct LockStressObject *pResource, NV0100_CTRL_GET_LOCK_STRESS_COUNTERS_PARAMS *pParams); + +NV_STATUS lockStressObjConstruct_IMPL(struct LockStressObject *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_lockStressObjConstruct(arg_pResource, arg_pCallContext, arg_pParams) lockStressObjConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +void lockStressObjDestruct_IMPL(struct LockStressObject *pResource); + +#define __nvoc_lockStressObjDestruct(pResource) lockStressObjDestruct_IMPL(pResource) +#undef PRIVATE_FIELD + + +#endif // LOCK_STRESS_H + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_LOCK_STRESS_NVOC_H_ diff --git a/src/nvidia/generated/g_lock_test_nvoc.c b/src/nvidia/generated/g_lock_test_nvoc.c new file mode 100644 index 0000000..85702b3 --- /dev/null +++ b/src/nvidia/generated/g_lock_test_nvoc.c @@ -0,0 +1,519 @@ +#define NVOC_LOCK_TEST_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_lock_test_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x19e861 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_LockTestRelaxedDupObject; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +// Forward declarations for LockTestRelaxedDupObject +void __nvoc_init__GpuResource(GpuResource*); +void __nvoc_init__LockTestRelaxedDupObject(LockTestRelaxedDupObject*); +void __nvoc_init_funcTable_LockTestRelaxedDupObject(LockTestRelaxedDupObject*); +NV_STATUS __nvoc_ctor_LockTestRelaxedDupObject(LockTestRelaxedDupObject*, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_LockTestRelaxedDupObject(LockTestRelaxedDupObject*); +void __nvoc_dtor_LockTestRelaxedDupObject(LockTestRelaxedDupObject*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__LockTestRelaxedDupObject; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__LockTestRelaxedDupObject; + +// Down-thunk(s) to bridge LockTestRelaxedDupObject methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^2 +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_GpuResource_resControl(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_GpuResource_resMap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_down_thunk_GpuResource_resUnmap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_down_thunk_GpuResource_rmresShareCallback(struct RmResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NvBool __nvoc_down_thunk_LockTestRelaxedDupObject_resCanCopy(struct RsResource *pResource); // this + +// Up-thunk(s) to bridge LockTestRelaxedDupObject methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^2 +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^2 +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^2 +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super^2 +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super^2 +NvBool __nvoc_up_thunk_RmResource_gpuresAccessCallback(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControl_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_gpuresControl_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_gpuresCanCopy(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresIsDuplicate(struct GpuResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_gpuresPreDestruct(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresControlFilter(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresMapTo(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresUnmapFrom(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_gpuresGetRefCount(struct GpuResource *pResource); // super +void __nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference); // super +NV_STATUS __nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjControl(struct LockTestRelaxedDupObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjMap(struct LockTestRelaxedDupObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjUnmap(struct LockTestRelaxedDupObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjShareCallback(struct LockTestRelaxedDupObject *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjGetRegBaseOffsetAndSize(struct LockTestRelaxedDupObject *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); // this +NV_STATUS __nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjGetMapAddrSpace(struct LockTestRelaxedDupObject *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); // this +NV_STATUS __nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjInternalControlForward(struct LockTestRelaxedDupObject *pGpuResource, NvU32 command, void *pParams, NvU32 size); // this +NvHandle __nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjGetInternalObjectHandle(struct LockTestRelaxedDupObject *pGpuResource); // this +NvBool __nvoc_up_thunk_RmResource_lockTestRelaxedDupObjAccessCallback(struct LockTestRelaxedDupObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NV_STATUS __nvoc_up_thunk_RmResource_lockTestRelaxedDupObjGetMemInterMapParams(struct LockTestRelaxedDupObject *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_lockTestRelaxedDupObjCheckMemInterUnmap(struct LockTestRelaxedDupObject *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_lockTestRelaxedDupObjGetMemoryMappingDescriptor(struct LockTestRelaxedDupObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_lockTestRelaxedDupObjControlSerialization_Prologue(struct LockTestRelaxedDupObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_lockTestRelaxedDupObjControlSerialization_Epilogue(struct LockTestRelaxedDupObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_lockTestRelaxedDupObjControl_Prologue(struct LockTestRelaxedDupObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_lockTestRelaxedDupObjControl_Epilogue(struct LockTestRelaxedDupObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_lockTestRelaxedDupObjIsDuplicate(struct LockTestRelaxedDupObject *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_lockTestRelaxedDupObjPreDestruct(struct LockTestRelaxedDupObject *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_lockTestRelaxedDupObjControlFilter(struct LockTestRelaxedDupObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_lockTestRelaxedDupObjIsPartialUnmapSupported(struct LockTestRelaxedDupObject *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_lockTestRelaxedDupObjMapTo(struct LockTestRelaxedDupObject *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_lockTestRelaxedDupObjUnmapFrom(struct LockTestRelaxedDupObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_lockTestRelaxedDupObjGetRefCount(struct LockTestRelaxedDupObject *pResource); // this +void __nvoc_up_thunk_RsResource_lockTestRelaxedDupObjAddAdditionalDependants(struct RsClient *pClient, struct LockTestRelaxedDupObject *pResource, RsResourceRef *pReference); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_LockTestRelaxedDupObject = +{ + /*classInfo=*/ { + /*size=*/ sizeof(LockTestRelaxedDupObject), + /*classId=*/ classId(LockTestRelaxedDupObject), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "LockTestRelaxedDupObject", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_LockTestRelaxedDupObject, + /*pCastInfo=*/ &__nvoc_castinfo__LockTestRelaxedDupObject, + /*pExportInfo=*/ &__nvoc_export_info__LockTestRelaxedDupObject +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__LockTestRelaxedDupObject __nvoc_metadata__LockTestRelaxedDupObject = { + .rtti.pClassDef = &__nvoc_class_def_LockTestRelaxedDupObject, // (lockTestRelaxedDupObj) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_LockTestRelaxedDupObject, + .rtti.offset = 0, + .metadata__GpuResource.rtti.pClassDef = &__nvoc_class_def_GpuResource, // (gpures) super + .metadata__GpuResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.rtti.offset = NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource), + .metadata__GpuResource.metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super^2 + .metadata__GpuResource.metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.rtti.offset = NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource.__nvoc_base_RmResource), + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^3 + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^4 + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^3 + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + + .vtable.__lockTestRelaxedDupObjCanCopy__ = &lockTestRelaxedDupObjCanCopy_e661f0, // inline virtual override (res) base (gpures) body + .metadata__GpuResource.vtable.__gpuresCanCopy__ = &__nvoc_up_thunk_RsResource_gpuresCanCopy, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &__nvoc_down_thunk_LockTestRelaxedDupObject_resCanCopy, // virtual + .vtable.__lockTestRelaxedDupObjControl__ = &__nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjControl, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl__ = &gpuresControl_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_GpuResource_resControl, // virtual + .vtable.__lockTestRelaxedDupObjMap__ = &__nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjMap, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresMap__ = &gpuresMap_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &__nvoc_down_thunk_GpuResource_resMap, // virtual + .vtable.__lockTestRelaxedDupObjUnmap__ = &__nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjUnmap, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresUnmap__ = &gpuresUnmap_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &__nvoc_down_thunk_GpuResource_resUnmap, // virtual + .vtable.__lockTestRelaxedDupObjShareCallback__ = &__nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjShareCallback, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresShareCallback__ = &gpuresShareCallback_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresShareCallback__ = &__nvoc_down_thunk_GpuResource_rmresShareCallback, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__lockTestRelaxedDupObjGetRegBaseOffsetAndSize__ = &__nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjGetRegBaseOffsetAndSize, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetRegBaseOffsetAndSize__ = &gpuresGetRegBaseOffsetAndSize_IMPL, // virtual + .vtable.__lockTestRelaxedDupObjGetMapAddrSpace__ = &__nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjGetMapAddrSpace, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMapAddrSpace__ = &gpuresGetMapAddrSpace_IMPL, // virtual + .vtable.__lockTestRelaxedDupObjInternalControlForward__ = &__nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjInternalControlForward, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresInternalControlForward__ = &gpuresInternalControlForward_IMPL, // virtual + .vtable.__lockTestRelaxedDupObjGetInternalObjectHandle__ = &__nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjGetInternalObjectHandle, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetInternalObjectHandle__ = &gpuresGetInternalObjectHandle_IMPL, // virtual + .vtable.__lockTestRelaxedDupObjAccessCallback__ = &__nvoc_up_thunk_RmResource_lockTestRelaxedDupObjAccessCallback, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresAccessCallback__ = &__nvoc_up_thunk_RmResource_gpuresAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__lockTestRelaxedDupObjGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_lockTestRelaxedDupObjGetMemInterMapParams, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__lockTestRelaxedDupObjCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_lockTestRelaxedDupObjCheckMemInterUnmap, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__lockTestRelaxedDupObjGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_lockTestRelaxedDupObjGetMemoryMappingDescriptor, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__lockTestRelaxedDupObjControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_lockTestRelaxedDupObjControlSerialization_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__lockTestRelaxedDupObjControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_lockTestRelaxedDupObjControlSerialization_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__lockTestRelaxedDupObjControl_Prologue__ = &__nvoc_up_thunk_RmResource_lockTestRelaxedDupObjControl_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__lockTestRelaxedDupObjControl_Epilogue__ = &__nvoc_up_thunk_RmResource_lockTestRelaxedDupObjControl_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__lockTestRelaxedDupObjIsDuplicate__ = &__nvoc_up_thunk_RsResource_lockTestRelaxedDupObjIsDuplicate, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresIsDuplicate__ = &__nvoc_up_thunk_RsResource_gpuresIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__lockTestRelaxedDupObjPreDestruct__ = &__nvoc_up_thunk_RsResource_lockTestRelaxedDupObjPreDestruct, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresPreDestruct__ = &__nvoc_up_thunk_RsResource_gpuresPreDestruct, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__lockTestRelaxedDupObjControlFilter__ = &__nvoc_up_thunk_RsResource_lockTestRelaxedDupObjControlFilter, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlFilter__ = &__nvoc_up_thunk_RsResource_gpuresControlFilter, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__lockTestRelaxedDupObjIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_lockTestRelaxedDupObjIsPartialUnmapSupported, // inline virtual inherited (res) base (gpures) body + .metadata__GpuResource.vtable.__gpuresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__GpuResource.metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__lockTestRelaxedDupObjMapTo__ = &__nvoc_up_thunk_RsResource_lockTestRelaxedDupObjMapTo, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresMapTo__ = &__nvoc_up_thunk_RsResource_gpuresMapTo, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__lockTestRelaxedDupObjUnmapFrom__ = &__nvoc_up_thunk_RsResource_lockTestRelaxedDupObjUnmapFrom, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresUnmapFrom__ = &__nvoc_up_thunk_RsResource_gpuresUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__lockTestRelaxedDupObjGetRefCount__ = &__nvoc_up_thunk_RsResource_lockTestRelaxedDupObjGetRefCount, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetRefCount__ = &__nvoc_up_thunk_RsResource_gpuresGetRefCount, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__lockTestRelaxedDupObjAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_lockTestRelaxedDupObjAddAdditionalDependants, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__LockTestRelaxedDupObject = { + .numRelatives = 6, + .relatives = { + &__nvoc_metadata__LockTestRelaxedDupObject.rtti, // [0]: (lockTestRelaxedDupObj) this + &__nvoc_metadata__LockTestRelaxedDupObject.metadata__GpuResource.rtti, // [1]: (gpures) super + &__nvoc_metadata__LockTestRelaxedDupObject.metadata__GpuResource.metadata__RmResource.rtti, // [2]: (rmres) super^2 + &__nvoc_metadata__LockTestRelaxedDupObject.metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti, // [3]: (res) super^3 + &__nvoc_metadata__LockTestRelaxedDupObject.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [4]: (obj) super^4 + &__nvoc_metadata__LockTestRelaxedDupObject.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti, // [5]: (rmrescmn) super^3 + } +}; + +// 1 down-thunk(s) defined to bridge methods in LockTestRelaxedDupObject from superclasses + +// lockTestRelaxedDupObjCanCopy: inline virtual override (res) base (gpures) body +NvBool __nvoc_down_thunk_LockTestRelaxedDupObject_resCanCopy(struct RsResource *pResource) { + return lockTestRelaxedDupObjCanCopy((struct LockTestRelaxedDupObject *)(((unsigned char *) pResource) - NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + + +// 24 up-thunk(s) defined to bridge methods in LockTestRelaxedDupObject to superclasses + +// lockTestRelaxedDupObjControl: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjControl(struct LockTestRelaxedDupObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource)), pCallContext, pParams); +} + +// lockTestRelaxedDupObjMap: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjMap(struct LockTestRelaxedDupObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource)), pCallContext, pParams, pCpuMapping); +} + +// lockTestRelaxedDupObjUnmap: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjUnmap(struct LockTestRelaxedDupObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource)), pCallContext, pCpuMapping); +} + +// lockTestRelaxedDupObjShareCallback: virtual inherited (gpures) base (gpures) +NvBool __nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjShareCallback(struct LockTestRelaxedDupObject *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// lockTestRelaxedDupObjGetRegBaseOffsetAndSize: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjGetRegBaseOffsetAndSize(struct LockTestRelaxedDupObject *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource)), pGpu, pOffset, pSize); +} + +// lockTestRelaxedDupObjGetMapAddrSpace: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjGetMapAddrSpace(struct LockTestRelaxedDupObject *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource)), pCallContext, mapFlags, pAddrSpace); +} + +// lockTestRelaxedDupObjInternalControlForward: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjInternalControlForward(struct LockTestRelaxedDupObject *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource)), command, pParams, size); +} + +// lockTestRelaxedDupObjGetInternalObjectHandle: virtual inherited (gpures) base (gpures) +NvHandle __nvoc_up_thunk_GpuResource_lockTestRelaxedDupObjGetInternalObjectHandle(struct LockTestRelaxedDupObject *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource))); +} + +// lockTestRelaxedDupObjAccessCallback: virtual inherited (rmres) base (gpures) +NvBool __nvoc_up_thunk_RmResource_lockTestRelaxedDupObjAccessCallback(struct LockTestRelaxedDupObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// lockTestRelaxedDupObjGetMemInterMapParams: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_lockTestRelaxedDupObjGetMemInterMapParams(struct LockTestRelaxedDupObject *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pParams); +} + +// lockTestRelaxedDupObjCheckMemInterUnmap: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_lockTestRelaxedDupObjCheckMemInterUnmap(struct LockTestRelaxedDupObject *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource.__nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// lockTestRelaxedDupObjGetMemoryMappingDescriptor: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_lockTestRelaxedDupObjGetMemoryMappingDescriptor(struct LockTestRelaxedDupObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource.__nvoc_base_RmResource)), ppMemDesc); +} + +// lockTestRelaxedDupObjControlSerialization_Prologue: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_lockTestRelaxedDupObjControlSerialization_Prologue(struct LockTestRelaxedDupObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// lockTestRelaxedDupObjControlSerialization_Epilogue: virtual inherited (rmres) base (gpures) +void __nvoc_up_thunk_RmResource_lockTestRelaxedDupObjControlSerialization_Epilogue(struct LockTestRelaxedDupObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// lockTestRelaxedDupObjControl_Prologue: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_lockTestRelaxedDupObjControl_Prologue(struct LockTestRelaxedDupObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// lockTestRelaxedDupObjControl_Epilogue: virtual inherited (rmres) base (gpures) +void __nvoc_up_thunk_RmResource_lockTestRelaxedDupObjControl_Epilogue(struct LockTestRelaxedDupObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// lockTestRelaxedDupObjIsDuplicate: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_lockTestRelaxedDupObjIsDuplicate(struct LockTestRelaxedDupObject *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// lockTestRelaxedDupObjPreDestruct: virtual inherited (res) base (gpures) +void __nvoc_up_thunk_RsResource_lockTestRelaxedDupObjPreDestruct(struct LockTestRelaxedDupObject *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// lockTestRelaxedDupObjControlFilter: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_lockTestRelaxedDupObjControlFilter(struct LockTestRelaxedDupObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// lockTestRelaxedDupObjIsPartialUnmapSupported: inline virtual inherited (res) base (gpures) body +NvBool __nvoc_up_thunk_RsResource_lockTestRelaxedDupObjIsPartialUnmapSupported(struct LockTestRelaxedDupObject *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// lockTestRelaxedDupObjMapTo: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_lockTestRelaxedDupObjMapTo(struct LockTestRelaxedDupObject *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// lockTestRelaxedDupObjUnmapFrom: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_lockTestRelaxedDupObjUnmapFrom(struct LockTestRelaxedDupObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// lockTestRelaxedDupObjGetRefCount: virtual inherited (res) base (gpures) +NvU32 __nvoc_up_thunk_RsResource_lockTestRelaxedDupObjGetRefCount(struct LockTestRelaxedDupObject *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// lockTestRelaxedDupObjAddAdditionalDependants: virtual inherited (res) base (gpures) +void __nvoc_up_thunk_RsResource_lockTestRelaxedDupObjAddAdditionalDependants(struct RsClient *pClient, struct LockTestRelaxedDupObject *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(LockTestRelaxedDupObject, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__LockTestRelaxedDupObject = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_LockTestRelaxedDupObject(LockTestRelaxedDupObject *pThis) { + __nvoc_lockTestRelaxedDupObjDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_LockTestRelaxedDupObject(LockTestRelaxedDupObject *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_LockTestRelaxedDupObject(LockTestRelaxedDupObject *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_LockTestRelaxedDupObject_fail_GpuResource; + __nvoc_init_dataField_LockTestRelaxedDupObject(pThis); + + status = __nvoc_lockTestRelaxedDupObjConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_LockTestRelaxedDupObject_fail__init; + goto __nvoc_ctor_LockTestRelaxedDupObject_exit; // Success + +__nvoc_ctor_LockTestRelaxedDupObject_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_LockTestRelaxedDupObject_fail_GpuResource: +__nvoc_ctor_LockTestRelaxedDupObject_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_LockTestRelaxedDupObject_1(LockTestRelaxedDupObject *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_LockTestRelaxedDupObject_1 + + +// Initialize vtable(s) for 25 virtual method(s). +void __nvoc_init_funcTable_LockTestRelaxedDupObject(LockTestRelaxedDupObject *pThis) { + __nvoc_init_funcTable_LockTestRelaxedDupObject_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__LockTestRelaxedDupObject(LockTestRelaxedDupObject *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^4 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^3 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; // (rmres) super^2 + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; // (gpures) super + pThis->__nvoc_pbase_LockTestRelaxedDupObject = pThis; // (lockTestRelaxedDupObj) this + + // Recurse to superclass initialization function(s). + __nvoc_init__GpuResource(&pThis->__nvoc_base_GpuResource); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__LockTestRelaxedDupObject.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^4 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__LockTestRelaxedDupObject.metadata__GpuResource.metadata__RmResource.metadata__RsResource; // (res) super^3 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__LockTestRelaxedDupObject.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__LockTestRelaxedDupObject.metadata__GpuResource.metadata__RmResource; // (rmres) super^2 + pThis->__nvoc_base_GpuResource.__nvoc_metadata_ptr = &__nvoc_metadata__LockTestRelaxedDupObject.metadata__GpuResource; // (gpures) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__LockTestRelaxedDupObject; // (lockTestRelaxedDupObj) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_LockTestRelaxedDupObject(pThis); +} + +NV_STATUS __nvoc_objCreate_LockTestRelaxedDupObject(LockTestRelaxedDupObject **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + LockTestRelaxedDupObject *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(LockTestRelaxedDupObject), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(LockTestRelaxedDupObject)); + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__LockTestRelaxedDupObject(pThis); + status = __nvoc_ctor_LockTestRelaxedDupObject(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_LockTestRelaxedDupObject_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_LockTestRelaxedDupObject_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(LockTestRelaxedDupObject)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_LockTestRelaxedDupObject(LockTestRelaxedDupObject **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_LockTestRelaxedDupObject(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_lock_test_nvoc.h b/src/nvidia/generated/g_lock_test_nvoc.h new file mode 100644 index 0000000..47835d8 --- /dev/null +++ b/src/nvidia/generated/g_lock_test_nvoc.h @@ -0,0 +1,327 @@ + +#ifndef _G_LOCK_TEST_NVOC_H_ +#define _G_LOCK_TEST_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_lock_test_nvoc.h" + +#ifndef LOCK_TEST_H +#define LOCK_TEST_H + +#include "gpu/gpu_resource.h" +#include "nvoc/prelude.h" +#include "nvstatus.h" +#include "resserv/resserv.h" + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_LOCK_TEST_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__LockTestRelaxedDupObject; +struct NVOC_METADATA__GpuResource; +struct NVOC_VTABLE__LockTestRelaxedDupObject; + + +struct LockTestRelaxedDupObject { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__LockTestRelaxedDupObject *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct GpuResource __nvoc_base_GpuResource; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^4 + struct RsResource *__nvoc_pbase_RsResource; // res super^3 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^3 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^2 + struct GpuResource *__nvoc_pbase_GpuResource; // gpures super + struct LockTestRelaxedDupObject *__nvoc_pbase_LockTestRelaxedDupObject; // lockTestRelaxedDupObj +}; + + +// Vtable with 25 per-class function pointers +struct NVOC_VTABLE__LockTestRelaxedDupObject { + NvBool (*__lockTestRelaxedDupObjCanCopy__)(struct LockTestRelaxedDupObject * /*this*/); // inline virtual override (res) base (gpures) body + NV_STATUS (*__lockTestRelaxedDupObjControl__)(struct LockTestRelaxedDupObject * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__lockTestRelaxedDupObjMap__)(struct LockTestRelaxedDupObject * /*this*/, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__lockTestRelaxedDupObjUnmap__)(struct LockTestRelaxedDupObject * /*this*/, struct CALL_CONTEXT *, struct RsCpuMapping *); // virtual inherited (gpures) base (gpures) + NvBool (*__lockTestRelaxedDupObjShareCallback__)(struct LockTestRelaxedDupObject * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__lockTestRelaxedDupObjGetRegBaseOffsetAndSize__)(struct LockTestRelaxedDupObject * /*this*/, struct OBJGPU *, NvU32 *, NvU32 *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__lockTestRelaxedDupObjGetMapAddrSpace__)(struct LockTestRelaxedDupObject * /*this*/, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__lockTestRelaxedDupObjInternalControlForward__)(struct LockTestRelaxedDupObject * /*this*/, NvU32, void *, NvU32); // virtual inherited (gpures) base (gpures) + NvHandle (*__lockTestRelaxedDupObjGetInternalObjectHandle__)(struct LockTestRelaxedDupObject * /*this*/); // virtual inherited (gpures) base (gpures) + NvBool (*__lockTestRelaxedDupObjAccessCallback__)(struct LockTestRelaxedDupObject * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__lockTestRelaxedDupObjGetMemInterMapParams__)(struct LockTestRelaxedDupObject * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__lockTestRelaxedDupObjCheckMemInterUnmap__)(struct LockTestRelaxedDupObject * /*this*/, NvBool); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__lockTestRelaxedDupObjGetMemoryMappingDescriptor__)(struct LockTestRelaxedDupObject * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__lockTestRelaxedDupObjControlSerialization_Prologue__)(struct LockTestRelaxedDupObject * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + void (*__lockTestRelaxedDupObjControlSerialization_Epilogue__)(struct LockTestRelaxedDupObject * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__lockTestRelaxedDupObjControl_Prologue__)(struct LockTestRelaxedDupObject * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + void (*__lockTestRelaxedDupObjControl_Epilogue__)(struct LockTestRelaxedDupObject * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__lockTestRelaxedDupObjIsDuplicate__)(struct LockTestRelaxedDupObject * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (gpures) + void (*__lockTestRelaxedDupObjPreDestruct__)(struct LockTestRelaxedDupObject * /*this*/); // virtual inherited (res) base (gpures) + NV_STATUS (*__lockTestRelaxedDupObjControlFilter__)(struct LockTestRelaxedDupObject * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (gpures) + NvBool (*__lockTestRelaxedDupObjIsPartialUnmapSupported__)(struct LockTestRelaxedDupObject * /*this*/); // inline virtual inherited (res) base (gpures) body + NV_STATUS (*__lockTestRelaxedDupObjMapTo__)(struct LockTestRelaxedDupObject * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (gpures) + NV_STATUS (*__lockTestRelaxedDupObjUnmapFrom__)(struct LockTestRelaxedDupObject * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (gpures) + NvU32 (*__lockTestRelaxedDupObjGetRefCount__)(struct LockTestRelaxedDupObject * /*this*/); // virtual inherited (res) base (gpures) + void (*__lockTestRelaxedDupObjAddAdditionalDependants__)(struct RsClient *, struct LockTestRelaxedDupObject * /*this*/, RsResourceRef *); // virtual inherited (res) base (gpures) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__LockTestRelaxedDupObject { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__GpuResource metadata__GpuResource; + const struct NVOC_VTABLE__LockTestRelaxedDupObject vtable; +}; + +#ifndef __NVOC_CLASS_LockTestRelaxedDupObject_TYPEDEF__ +#define __NVOC_CLASS_LockTestRelaxedDupObject_TYPEDEF__ +typedef struct LockTestRelaxedDupObject LockTestRelaxedDupObject; +#endif /* __NVOC_CLASS_LockTestRelaxedDupObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_LockTestRelaxedDupObject +#define __nvoc_class_id_LockTestRelaxedDupObject 0x19e861 +#endif /* __nvoc_class_id_LockTestRelaxedDupObject */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_LockTestRelaxedDupObject; + +#define __staticCast_LockTestRelaxedDupObject(pThis) \ + ((pThis)->__nvoc_pbase_LockTestRelaxedDupObject) + +#ifdef __nvoc_lock_test_h_disabled +#define __dynamicCast_LockTestRelaxedDupObject(pThis) ((LockTestRelaxedDupObject*) NULL) +#else //__nvoc_lock_test_h_disabled +#define __dynamicCast_LockTestRelaxedDupObject(pThis) \ + ((LockTestRelaxedDupObject*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(LockTestRelaxedDupObject))) +#endif //__nvoc_lock_test_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_LockTestRelaxedDupObject(LockTestRelaxedDupObject**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_LockTestRelaxedDupObject(LockTestRelaxedDupObject**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_LockTestRelaxedDupObject(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_LockTestRelaxedDupObject((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define lockTestRelaxedDupObjCanCopy_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjCanCopy__ +#define lockTestRelaxedDupObjCanCopy(pResource) lockTestRelaxedDupObjCanCopy_DISPATCH(pResource) +#define lockTestRelaxedDupObjControl_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresControl__ +#define lockTestRelaxedDupObjControl(pGpuResource, pCallContext, pParams) lockTestRelaxedDupObjControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define lockTestRelaxedDupObjMap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresMap__ +#define lockTestRelaxedDupObjMap(pGpuResource, pCallContext, pParams, pCpuMapping) lockTestRelaxedDupObjMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define lockTestRelaxedDupObjUnmap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresUnmap__ +#define lockTestRelaxedDupObjUnmap(pGpuResource, pCallContext, pCpuMapping) lockTestRelaxedDupObjUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define lockTestRelaxedDupObjShareCallback_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresShareCallback__ +#define lockTestRelaxedDupObjShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) lockTestRelaxedDupObjShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define lockTestRelaxedDupObjGetRegBaseOffsetAndSize_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetRegBaseOffsetAndSize__ +#define lockTestRelaxedDupObjGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) lockTestRelaxedDupObjGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define lockTestRelaxedDupObjGetMapAddrSpace_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetMapAddrSpace__ +#define lockTestRelaxedDupObjGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) lockTestRelaxedDupObjGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define lockTestRelaxedDupObjInternalControlForward_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresInternalControlForward__ +#define lockTestRelaxedDupObjInternalControlForward(pGpuResource, command, pParams, size) lockTestRelaxedDupObjInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define lockTestRelaxedDupObjGetInternalObjectHandle_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetInternalObjectHandle__ +#define lockTestRelaxedDupObjGetInternalObjectHandle(pGpuResource) lockTestRelaxedDupObjGetInternalObjectHandle_DISPATCH(pGpuResource) +#define lockTestRelaxedDupObjAccessCallback_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define lockTestRelaxedDupObjAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) lockTestRelaxedDupObjAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define lockTestRelaxedDupObjGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define lockTestRelaxedDupObjGetMemInterMapParams(pRmResource, pParams) lockTestRelaxedDupObjGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define lockTestRelaxedDupObjCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define lockTestRelaxedDupObjCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) lockTestRelaxedDupObjCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define lockTestRelaxedDupObjGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define lockTestRelaxedDupObjGetMemoryMappingDescriptor(pRmResource, ppMemDesc) lockTestRelaxedDupObjGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define lockTestRelaxedDupObjControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define lockTestRelaxedDupObjControlSerialization_Prologue(pResource, pCallContext, pParams) lockTestRelaxedDupObjControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define lockTestRelaxedDupObjControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define lockTestRelaxedDupObjControlSerialization_Epilogue(pResource, pCallContext, pParams) lockTestRelaxedDupObjControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define lockTestRelaxedDupObjControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define lockTestRelaxedDupObjControl_Prologue(pResource, pCallContext, pParams) lockTestRelaxedDupObjControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define lockTestRelaxedDupObjControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define lockTestRelaxedDupObjControl_Epilogue(pResource, pCallContext, pParams) lockTestRelaxedDupObjControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define lockTestRelaxedDupObjIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define lockTestRelaxedDupObjIsDuplicate(pResource, hMemory, pDuplicate) lockTestRelaxedDupObjIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define lockTestRelaxedDupObjPreDestruct_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define lockTestRelaxedDupObjPreDestruct(pResource) lockTestRelaxedDupObjPreDestruct_DISPATCH(pResource) +#define lockTestRelaxedDupObjControlFilter_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define lockTestRelaxedDupObjControlFilter(pResource, pCallContext, pParams) lockTestRelaxedDupObjControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define lockTestRelaxedDupObjIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define lockTestRelaxedDupObjIsPartialUnmapSupported(pResource) lockTestRelaxedDupObjIsPartialUnmapSupported_DISPATCH(pResource) +#define lockTestRelaxedDupObjMapTo_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define lockTestRelaxedDupObjMapTo(pResource, pParams) lockTestRelaxedDupObjMapTo_DISPATCH(pResource, pParams) +#define lockTestRelaxedDupObjUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define lockTestRelaxedDupObjUnmapFrom(pResource, pParams) lockTestRelaxedDupObjUnmapFrom_DISPATCH(pResource, pParams) +#define lockTestRelaxedDupObjGetRefCount_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define lockTestRelaxedDupObjGetRefCount(pResource) lockTestRelaxedDupObjGetRefCount_DISPATCH(pResource) +#define lockTestRelaxedDupObjAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define lockTestRelaxedDupObjAddAdditionalDependants(pClient, pResource, pReference) lockTestRelaxedDupObjAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) + +// Dispatch functions +static inline NvBool lockTestRelaxedDupObjCanCopy_DISPATCH(struct LockTestRelaxedDupObject *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjCanCopy__(pResource); +} + +static inline NV_STATUS lockTestRelaxedDupObjControl_DISPATCH(struct LockTestRelaxedDupObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS lockTestRelaxedDupObjMap_DISPATCH(struct LockTestRelaxedDupObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS lockTestRelaxedDupObjUnmap_DISPATCH(struct LockTestRelaxedDupObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NvBool lockTestRelaxedDupObjShareCallback_DISPATCH(struct LockTestRelaxedDupObject *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS lockTestRelaxedDupObjGetRegBaseOffsetAndSize_DISPATCH(struct LockTestRelaxedDupObject *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS lockTestRelaxedDupObjGetMapAddrSpace_DISPATCH(struct LockTestRelaxedDupObject *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NV_STATUS lockTestRelaxedDupObjInternalControlForward_DISPATCH(struct LockTestRelaxedDupObject *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NvHandle lockTestRelaxedDupObjGetInternalObjectHandle_DISPATCH(struct LockTestRelaxedDupObject *pGpuResource) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjGetInternalObjectHandle__(pGpuResource); +} + +static inline NvBool lockTestRelaxedDupObjAccessCallback_DISPATCH(struct LockTestRelaxedDupObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS lockTestRelaxedDupObjGetMemInterMapParams_DISPATCH(struct LockTestRelaxedDupObject *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS lockTestRelaxedDupObjCheckMemInterUnmap_DISPATCH(struct LockTestRelaxedDupObject *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS lockTestRelaxedDupObjGetMemoryMappingDescriptor_DISPATCH(struct LockTestRelaxedDupObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS lockTestRelaxedDupObjControlSerialization_Prologue_DISPATCH(struct LockTestRelaxedDupObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void lockTestRelaxedDupObjControlSerialization_Epilogue_DISPATCH(struct LockTestRelaxedDupObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS lockTestRelaxedDupObjControl_Prologue_DISPATCH(struct LockTestRelaxedDupObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void lockTestRelaxedDupObjControl_Epilogue_DISPATCH(struct LockTestRelaxedDupObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS lockTestRelaxedDupObjIsDuplicate_DISPATCH(struct LockTestRelaxedDupObject *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void lockTestRelaxedDupObjPreDestruct_DISPATCH(struct LockTestRelaxedDupObject *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjPreDestruct__(pResource); +} + +static inline NV_STATUS lockTestRelaxedDupObjControlFilter_DISPATCH(struct LockTestRelaxedDupObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvBool lockTestRelaxedDupObjIsPartialUnmapSupported_DISPATCH(struct LockTestRelaxedDupObject *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS lockTestRelaxedDupObjMapTo_DISPATCH(struct LockTestRelaxedDupObject *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjMapTo__(pResource, pParams); +} + +static inline NV_STATUS lockTestRelaxedDupObjUnmapFrom_DISPATCH(struct LockTestRelaxedDupObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjUnmapFrom__(pResource, pParams); +} + +static inline NvU32 lockTestRelaxedDupObjGetRefCount_DISPATCH(struct LockTestRelaxedDupObject *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjGetRefCount__(pResource); +} + +static inline void lockTestRelaxedDupObjAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct LockTestRelaxedDupObject *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__lockTestRelaxedDupObjAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvBool lockTestRelaxedDupObjCanCopy_e661f0(struct LockTestRelaxedDupObject *pResource) { + return NV_TRUE; +} + +NV_STATUS lockTestRelaxedDupObjConstruct_IMPL(struct LockTestRelaxedDupObject *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_lockTestRelaxedDupObjConstruct(arg_pResource, arg_pCallContext, arg_pParams) lockTestRelaxedDupObjConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +void lockTestRelaxedDupObjDestruct_IMPL(struct LockTestRelaxedDupObject *pResource); + +#define __nvoc_lockTestRelaxedDupObjDestruct(pResource) lockTestRelaxedDupObjDestruct_IMPL(pResource) +#undef PRIVATE_FIELD + + +#endif // LOCK_TEST_H + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_LOCK_TEST_NVOC_H_ diff --git a/src/nvidia/generated/g_mem_desc_nvoc.h b/src/nvidia/generated/g_mem_desc_nvoc.h new file mode 100644 index 0000000..e8c037d --- /dev/null +++ b/src/nvidia/generated/g_mem_desc_nvoc.h @@ -0,0 +1,1593 @@ + +#ifndef _G_MEM_DESC_NVOC_H_ +#define _G_MEM_DESC_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_mem_desc_nvoc.h" + +#ifndef _MEMDESC_H_ +#define _MEMDESC_H_ + +#include "core/prelude.h" +#include "poolalloc.h" + + + +struct OBJVASPACE; + +#ifndef __NVOC_CLASS_OBJVASPACE_TYPEDEF__ +#define __NVOC_CLASS_OBJVASPACE_TYPEDEF__ +typedef struct OBJVASPACE OBJVASPACE; +#endif /* __NVOC_CLASS_OBJVASPACE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVASPACE +#define __nvoc_class_id_OBJVASPACE 0x6c347f +#endif /* __nvoc_class_id_OBJVASPACE */ + + + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + + +struct Heap; + +#ifndef __NVOC_CLASS_Heap_TYPEDEF__ +#define __NVOC_CLASS_Heap_TYPEDEF__ +typedef struct Heap Heap; +#endif /* __NVOC_CLASS_Heap_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Heap +#define __nvoc_class_id_Heap 0x556e9a +#endif /* __nvoc_class_id_Heap */ + + + +struct RsClient; + +#ifndef __NVOC_CLASS_RsClient_TYPEDEF__ +#define __NVOC_CLASS_RsClient_TYPEDEF__ +typedef struct RsClient RsClient; +#endif /* __NVOC_CLASS_RsClient_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsClient +#define __nvoc_class_id_RsClient 0x8f87e5 +#endif /* __nvoc_class_id_RsClient */ + + +struct MEMORY_DESCRIPTOR; + +typedef struct CTX_BUF_POOL_INFO CTX_BUF_POOL_INFO; +typedef struct COMPR_INFO COMPR_INFO; + +// +// Address space identifiers. +// Note: This should match the NV2080_CTRL_GR_CTX_BUFFER_INFO_APERTURE_* defines +// in ctrl2080gr.h +// +typedef NvU32 NV_ADDRESS_SPACE; +#define ADDR_UNKNOWN 0 // Address space is unknown +#define ADDR_SYSMEM 1 // System memory (PCI) +#define ADDR_FBMEM 2 // Frame buffer memory space +#define ADDR_REGMEM 3 // NV register memory space +#define ADDR_VIRTUAL 4 // Virtual address space only +#define ADDR_FABRIC_V2 6 // Fabric address space for the FLA based addressing. Will replace ADDR_FABRIC. +#define ADDR_EGM 7 // Extended GPU Memory (EGM) +#define ADDR_FABRIC_MC 8 // Multicast fabric address space (MCFLA) + +typedef NvU32 MEMDESC_CUSTOM_HEAP; +#define MEMDESC_CUSTOM_HEAP_NONE 0 +#define MEMDESC_CUSTOM_HEAP_ACR 1 + +// +// Address translation identifiers: +// +// Memory descriptors are used to describe physical block(s) of memory. +// That memory can be described at various levels of address translation +// using the address translation (AT) enumerates. The levels of translation +// supported is illustrated below. +// +// The diagram is drawn for system memory with SR-IOV but the translations +// are similar for video memory (replace IOMMU with VMMU). VGPU pre-SR-IOV +// is also different. +// +// +-------------------+ +-------------------+ +// | CPU | | GPU Engine | +// +-------------------+ +-------------------+ +// | | +// | | GPU VA +// | V +// | +-------------------+ +// | CPU VA | GMMU | +// | +-------------------+ +// | | +// | | GPU GPA (AT_GPU) +// v v +// +-------------------+ +-------------------+ +// | MMU (1st level)| | | IOMMU (1st level) | +// +-------------------+ +-------------------+ +// | | +// | CPU GPA (AT_CPU) | <---- AT_PA for VGPU guest +// v v +// +-------------------+ +-------------------+ +// | MMU (2nd level) | | IOMMU (2nd level) | +// +-------------------+ +-------------------+ +// | | +// | SPA | SPA <---- AT_PA for bare metal +// v v or VGPU host +// +---------------------------------------------------+ +// | System Memory | +// +---------------------------------------------------+ +// +// +// Descriptions for *physical* address translation levels: +// +// AT_CPU - CPU physical address or guest physical address (GPA) +// AT_GPU - GPU physical address or guest physical address (GPA) +// AT_PA - When running in host RM or bare metal this is the system physical address. When +// running inside a VGPU guest environment, this is the last level of translation +// visible to the OS context that RM is running in. +// +// AT_CPU should typically == AT_PA, but there might be cases such as IBM P9 where vidmem +// might be 0-based on GPU but exposed elsewhere in the CPU address space. +// +// Descriptions for *virtual* address translation levels: +// +// AT_GPU_VA - Memory descriptors can also describe virtual memory allocations. AT_GPU_VA +// represents a GMMU virtual address. +// +#define AT_CPU AT_VARIANT(0) +#define AT_GPU AT_VARIANT(1) +#define AT_PA AT_VARIANT(2) + +#define AT_GPU_VA AT_VARIANT(3) + +// +// TODO - switch to using numeric values for AT_XYZ. Using pointers for +// typesafety after initial split from using class IDs/mmuContext +// +typedef struct ADDRESS_TRANSLATION_ *ADDRESS_TRANSLATION; +#define AT_VARIANT(x) ((struct ADDRESS_TRANSLATION_ *)x) +#define AT_VALUE(x) ((NvU64)(NvUPtr)(x)) + +// +// RM defined Memdesc surface names. The names are sent to Mods to enable feature verification. +// +#define NV_RM_SURF_NAME_INSTANCE_BLOCK "rm_instance_block_surface" +#define NV_RM_SURF_NAME_PAGE_TABLE "rm_page_table_surface" +#define NV_RM_SURF_NAME_NONREPLAYABLE_FAULT_BUFFER "rm_non_replayable_fault_buffer_surface" +#define NV_RM_SURF_NAME_REPLAYABLE_FAULT_BUFFER "rm_replayable_fault_buffer_surface" +#define NV_RM_SURF_NAME_CE_FAULT_METHOD_BUFFER "rm_ce_fault_method_buffer_surface" +#define NV_RM_SURF_NAME_ACCESS_COUNTER_BUFFER "rm_access_counter_buffer_surface" +#define NV_RM_SURF_NAME_VAB "rm_vab_surface" +#define NV_RM_SURF_NAME_GR_CIRCULAR_BUFFER "rm_gr_ctx_circular_buffer_surface" + +// +// Tagging wrapper macro for memdescAlloc +// +#define memdescTagAlloc(stat, tag, pMemdesc) {(pMemdesc)->allocTag = tag; stat = memdescAlloc(pMemdesc);} +#define memdescTagAllocList(stat, tag, pMemdesc, pList) {(pMemdesc)->allocTag = tag; stat = memdescAllocList(pMemdesc, pList);} + +// +// Defines for commonly used transformations for page size +// +#define GET_PAGE_SHIFT(val) (BIT_IDX_32(val)) +#define GET_PAGE_MASK(val) ((1ULL << GET_PAGE_SHIFT(val)) - 1) +#define GET_SIZE_FROM_PAGE_AND_COUNT(pageCount, pageSize) (((NvU64) pageCount) << (GET_PAGE_SHIFT(pageSize))) + +// Invalid PTE value +#define MEMDESC_INVALID_PTE (~0ULL) + +// +// External flags: +// ALLOC_PER_SUBDEVICE Allocate independent system memory for each GPU +// LOST_ON_SUSPEND PM code will skip this allocation during S/R +// LOCKLESS_SYSMEM_ALLOC System memory should be allocated unprotected by +// the RM lock +// GPU_PRIVILEGED This memory will be marked as privileged in the GPU +// page tables. When set only GPU requestors who are +// "privileged" are allowed to access this memory. +// This can be used for mapping sensitive memory into +// a user's GPU address space (like context buffers). +// Note support for this in our GPUs is limited, so +// only use it if you know the HW accessing the memory +// makes privileged requests. +// +// Internal flags: +// SET_KIND Whether or not the kind was set a different value +// than default. +// PRE_ALLOCATED Caller provided memory descriptor memory +// FIXED_ADDRESS_ALLOCATE Allocate from the heap with a fixed address +// ALLOCATED Has the memory been allocated yet? +// GUEST_ALLOCATED Is the memory allocated by a guest VM? +// We make aliased memory descriptors to guest +// allocated memory and mark it so, so that we know +// how to deal with it in memdescMap() etc. +// KERNEL_MODE Is the memory for a user or kernel context? +// XXX This is lame, and it would be best if we could +// get rid of it. Memory *storage* isn't either user +// or kernel -- only mappings are user or kernel. +// Unfortunately, osAllocPages requires that we +// provide this information. +// PHYSICALLY_CONTIGUOUS Are the underlying physical pages of this memory +// allocation contiguous? +// ENCRYPTED TurboCipher allocations need a bit in the PTE to +// indicate encrypted +// UNICAST Memory descriptor was created via UC path +// PAGED_SYSMEM Allocate the memory from paged system memory. When +// this flag is used, memdescLock() should be called +// to lock the memory in physical pages before we +// access this memory descriptor. +// CPU_ONLY Allocate memory only accessed by CPU. +// +#define MEMDESC_FLAGS_NONE ((NvU64)0x0) +#define MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE NVBIT64(0) +#define MEMDESC_FLAGS_SET_KIND NVBIT64(1) +#define MEMDESC_FLAGS_LOST_ON_SUSPEND NVBIT64(2) +#define MEMDESC_FLAGS_PRE_ALLOCATED NVBIT64(3) +#define MEMDESC_FLAGS_FIXED_ADDRESS_ALLOCATE NVBIT64(4) +#define MEMDESC_FLAGS_LOCKLESS_SYSMEM_ALLOC NVBIT64(5) +#define MEMDESC_FLAGS_GPU_IN_RESET NVBIT64(6) +#define MEMDESC_ALLOC_FLAGS_PROTECTED NVBIT64(7) +#define MEMDESC_FLAGS_GUEST_ALLOCATED NVBIT64(8) +#define MEMDESC_FLAGS_KERNEL_MODE NVBIT64(9) +#define MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS NVBIT64(10) +#define MEMDESC_FLAGS_ENCRYPTED NVBIT64(11) +#define MEMDESC_FLAGS_PAGED_SYSMEM NVBIT64(12) +#define MEMDESC_FLAGS_GPU_PRIVILEGED NVBIT64(13) +#define MEMDESC_FLAGS_PRESERVE_CONTENT_ON_SUSPEND NVBIT64(14) +#define MEMDESC_FLAGS_DUMMY_TOPLEVEL NVBIT64(15) + +// Don't use the below two flags. For memdesc internal use only. +// These flags will be removed on memory allocation refactoring in RM +#define MEMDESC_FLAGS_PROVIDE_IOMMU_MAP NVBIT64(16) +#define MEMDESC_FLAGS_SKIP_RESOURCE_COMPUTE NVBIT64(17) + +#define MEMDESC_FLAGS_CUSTOM_HEAP_ACR NVBIT64(18) + +// Allocate in "fast" or "slow" memory, if there are multiple grades of memory (like mixed density) +#define MEMDESC_FLAGS_HIGH_PRIORITY NVBIT64(19) +#define MEMDESC_FLAGS_LOW_PRIORITY NVBIT64(20) + +// Flag to specify if requested size should be rounded to page size +#define MEMDESC_FLAGS_PAGE_SIZE_ALIGN_IGNORE NVBIT64(21) + +#define MEMDESC_FLAGS_CPU_ONLY NVBIT64(22) + +// This flags is used for a special SYSMEM descriptor that points to a memory +// region allocated externally (e.g. malloc, kmalloc etc.) +#define MEMDESC_FLAGS_EXT_PAGE_ARRAY_MEM NVBIT64(23) + +// Owned by Physical Memory Allocator (PMA). +#define MEMDESC_FLAGS_ALLOC_PMA_OWNED NVBIT64(24) + +// This flag is added as part of Sub-Allocator feature meant to be used by VGPU clients. +// Once VGPU clients allocate a large block of memory for their use, they carve-out a small +// portion of it to be used for RM internal allocations originating from a given client. Each +// allocation can choose to use this carved-out memory owned by client or be part of global heap. +// This flag has to be used in RM internal allocation only when a particular allocation is tied to +// the life-time of this client and will be freed before client gets destroyed. +#define MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE NVBIT64(25) + +// This flag is used to specify the pages are pinned using other kernel module or API +// Currently, this flag is used for vGPU on KVM where RM calls vfio APIs to pin and unpin pages +// instead of using os_lock_user_pages() and os_unlock_user_pages(). +#define MEMDESC_FLAGS_FOREIGN_PAGE NVBIT64(26) + +// These flags are used for SYSMEM descriptors that point to a physical BAR +// range and do not take the usual memory mapping paths. Currently, these are used for vGPU. +#define MEMDESC_FLAGS_BAR0_REFLECT NVBIT64(27) +#define MEMDESC_FLAGS_BAR1_REFLECT NVBIT64(28) + +// This flag is used to create shared memory required for vGPU operation. +// During RPC and all other shared memory allocations, VF RM will set this flag to instruct mods +// layer to create shared memory between VF process and PF process. +#define MEMDESC_FLAGS_MODS_SHARED_MEM NVBIT64(29) + +// This flag is set in memdescs that describe client (currently MODS) managed VPR allocations. +#define MEMDESC_FLAGS_VPR_REGION_CLIENT_MANAGED NVBIT64(30) + +// This flags is used for a special SYSMEM descriptor that points to physical BAR +// range of a third party device. +#define MEMDESC_FLAGS_PEER_IO_MEM NVBIT64(31) + +// If the flag is set, the RM will only allow read-only CPU user-mappings +// to the descriptor. +#define MEMDESC_FLAGS_USER_READ_ONLY NVBIT64(32) + +// If the flag is set, the RM will only allow read-only DMA mappings +// to the descriptor. +#define MEMDESC_FLAGS_DEVICE_READ_ONLY NVBIT64(33) + +// This flag is used to denote the memory descriptor that is part of larger memory descriptor; +// created using NV01_MEMORY_LIST_SYSTEM, NV01_MEMORY_LIST_FBMEM or NV01_MEMORY_LIST_OBJECT. +#define MEMDESC_FLAGS_LIST_MEMORY NVBIT64(34) + +// This flag is used to configure the memory descriptor as SKED reflected for SYSMEM address spaces. +// Memory accesses to these pages will be routed to SKED. Note that the memory aperture needs to be +// non-coherent to enable the feature. +#define MEMDESC_FLAGS_ALLOC_SKED_REFLECTED NVBIT64(35) + +// This flag is used to denote that this memdesc is allocated from +// a context buffer pool. When this flag is set, we expect a pointer +// to this context buffer pool to be cached in memdesc. +#define MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL NVBIT64(36) + +// +// This flag is used to skip privilege checks for the ADDR_REGMEM mapping type. +// This flag is useful for cases like UserModeApi where we want to use this memory type +// in a non-privileged user context +#define MEMDESC_FLAGS_SKIP_REGMEM_PRIV_CHECK NVBIT64(37) + +// This flag denotes the memory descriptor of type Display non iso +#define MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO NVBIT64(38) + +// This flag is used to force mapping of coherent sysmem through +// the GMMU over BAR1. This is useful when we need some form +// of special translation of the SYSMEM_COH aperture by the GMMU. +#define MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1 NVBIT64(39) + +// This flag is used to override system memory limit to be allocated +// within override address width. +#define MEMDESC_FLAGS_OVERRIDE_SYSTEM_ADDRESS_LIMIT NVBIT64(40) + +// +// If this flag is set, Linux RM will ensure that the allocated memory is +// 32-bit addressable. +#define MEMDESC_FLAGS_ALLOC_32BIT_ADDRESSABLE NVBIT64(41) + +// unused NVBIT64(42) + +// +// If this flag is set then it indicates that the memory associated with +// this descriptor was allocated from local EGM. +// +#define MEMDESC_FLAGS_ALLOC_FROM_EGM NVBIT64(43) + +// +// Indicates that this memdesc is tracking client sysmem allocation as +// against RM internal sysmem allocation +// +#define MEMDESC_FLAGS_SYSMEM_OWNED_BY_CLIENT NVBIT64(44) + +// +// The following is a special use case for sharing memory between +// the GPU and a WSL client. There is no IOMMU-compliant support +// currently for this, so a WAR is required for r515. The intent +// is to remove this by r525. +// +#define MEMDESC_FLAGS_WSL_SHARED_MEMORY NVBIT64(46) + +// +// Skip IOMMU mapping creation during alloc for sysmem. +// A mapping might be requested later with custom parameters. +// +#define MEMDESC_FLAGS_SKIP_IOMMU_MAPPING NVBIT64(47) + +// +// Specical case to allocate the runlists for Guests from its GPA +// In MODS, VM's GPA allocated from subheap so using this define to +// Forcing memdesc to allocated from subheap +// +#define MEMDESC_FLAGS_FORCE_ALLOC_FROM_SUBHEAP NVBIT64(48) + +// +// Indicate if memdesc is allocated as localized memory or not. +// +#define MEMDESC_FLAGS_ALLOC_AS_LOCALIZED NVBIT64(50) + +#define MEMDESC_FLAGS_ALLOC_FROM_SCANOUT_CARVEOUT NVBIT64(51) + +// Force-compress pte kind when mapping with virtual pte kind +#define MEMDESC_FLAGS_MAP_FORCE_COMPRESSED_MAP NVBIT64(52) + +// +// RM will allow to map the ext sysmem memory into user space (cpu mapping). +// ClassId == NV01_MEMORY_SYSTEM_OS_DESCRIPTOR is treated as +// ext sysmem memory. +// +#define MEMDESC_FLAGS_ALLOW_EXT_SYSMEM_USER_CPU_MAPPING NVBIT64(53) + +// Indicate if memdesc is allocated for non IO-coherent memory. +#define MEMDESC_FLAGS_NON_IO_COHERENT NVBIT64(54) + +// +// RM internal allocations owner tags +// Total 200 tags are introduced, out of which some are already +// replaced with known verbose strings +// +typedef enum +{ + NV_FB_ALLOC_RM_INTERNAL_OWNER__MIN = 10U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_COMPBIT_STORE = 11U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_CONTEXT_BUFFER = 12U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_ATTR_BUFFER = 13U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_PMU_SURFACE = 14U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_CIRCULAR_BUFFER = 15U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_PAGE_POOL = 16U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_ACCESS_MAP = 17U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_WPR_METADATA = 18U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_LIBOS_ARGS = 19U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_BOOTLOADER_ARGS = 20U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_SR_METADATA = 21U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_ACR_SETUP = 22U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_ACR_SHADOW = 23U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_ACR_BACKUP = 24U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_ACR_BINARY = 25U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_VBIOS_FRTS = 26U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_USERD_BUFFER = 27U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_RUNLIST_ENTRIES = 28U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_PAGE_PTE = 29U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_MMU_FAULT_BUFFER = 30U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_FAULT_METHOD = 31U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_WAR_PT = 32U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_WAR_PD = 33U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_1 = 34U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_2 = 35U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_3 = 36U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_4 = 37U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_5 = 38U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_6 = 39U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_7 = 40U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_8 = 41U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_9 = 42U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_10 = 43U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_11 = 44U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_12 = 45U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_13 = 46U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_14 = 47U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_15 = 48U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_16 = 49U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_17 = 50U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_18 = 51U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_19 = 52U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_20 = 53U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_21 = 54U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_22 = 55U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_23 = 56U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_24 = 57U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_25 = 58U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_26 = 59U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_27 = 60U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_28 = 61U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_29 = 62U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_30 = 63U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_31 = 64U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_32 = 65U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_33 = 66U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_34 = 67U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_RUSD_BUFFER = 68U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_36 = 69U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_37 = 70U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_38 = 71U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_39 = 72U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_40 = 73U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_41 = 74U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_42 = 75U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_43 = 76U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_44 = 77U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_45 = 78U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_46 = 79U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_47 = 80U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_48 = 81U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_49 = 82U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_50 = 83U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_51 = 84U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_52 = 85U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_53 = 86U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_54 = 87U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_55 = 88U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_56 = 89U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_57 = 90U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_58 = 91U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_59 = 92U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_60 = 93U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_61 = 94U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_62 = 95U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_63 = 96U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_64 = 97U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_65 = 98U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_66 = 99U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_67 = 100U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_68 = 101U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_69 = 102U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_70 = 103U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_71 = 104U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_72 = 105U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_73 = 106U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_74 = 107U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_75 = 108U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_76 = 109U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_77 = 110U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_78 = 111U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_79 = 112U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_80 = 113U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_81 = 114U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_82 = 115U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_83 = 116U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_84 = 117U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_85 = 118U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_86 = 119U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_87 = 120U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_88 = 121U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_89 = 122U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_90 = 123U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_91 = 124U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_92 = 125U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_93 = 126U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_94 = 127U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_95 = 128U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_96 = 129U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_97 = 130U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_98 = 131U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_99 = 132U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_100 = 133U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_101 = 134U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_102 = 135U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_103 = 136U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_104 = 137U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_105 = 138U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_106 = 139U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_107 = 140U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_108 = 141U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_109 = 142U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_110 = 143U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_111 = 144U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_112 = 145U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_113 = 146U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_114 = 147U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_115 = 148U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_116 = 149U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_117 = 150U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_118 = 151U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_119 = 152U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_120 = 153U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_121 = 154U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_122 = 155U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_123 = 156U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_124 = 157U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_125 = 158U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_126 = 159U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_127 = 160U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_128 = 161U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_FBSR_CE_TEST_BUFFER = 162U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_130 = 163U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_131 = 164U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_132 = 165U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_133 = 166U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_134 = 167U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_135 = 168U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_136 = 169U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_137 = 170U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_138 = 171U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_139 = 172U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_140 = 173U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_141 = 174U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_142 = 175U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_143 = 176U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_GSP_NOTIFY_OP_SURFACE = 177U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_FAKE_WPR_RSVD = 178U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_GR_SCRUB_CHANNEL = 179U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_147 = 180U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_148 = 181U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_PMU_ACR_SHADOW_COPY = 182U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_FLCN_BACKING_STORE = 183U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_151 = 184U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_152 = 185U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_153 = 186U, + + // + // Unused tags from here, for any new use-case it's required + // to replace the below tags with known verbose strings + // + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_154 = 187U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_155 = 188U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_156 = 189U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_157 = 190U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_158 = 191U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_159 = 192U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_160 = 193U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_161 = 194U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_162 = 195U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_163 = 196U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_164 = 197U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_165 = 198U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_166 = 199U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_167 = 200U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_168 = 201U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_169 = 202U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_170 = 203U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_171 = 204U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_172 = 205U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_173 = 206U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_174 = 207U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_175 = 208U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_176 = 209U, + NV_FB_ALLOC_RM_INTERNAL_OWNER_COV_TASK_DESCRIPTOR = 210U, + NV_FB_ALLOC_RM_INTERNAL_OWNER__MAX = 211U, +} NV_FB_ALLOC_RM_INTERNAL_OWNER; + +// Enums defining CPU/GPU snooping behavior +typedef enum { + MEMDESC_CACHE_SNOOP_DEFER_TO_MAP = 0, // Choice was made at allocation time + MEMDESC_CACHE_SNOOP_DISABLE = 1, // No GPU cache snooping takes place (SYS_NCOH) + MEMDESC_CACHE_SNOOP_ENABLE = 2 // GPU cache is snooped (SYS_COH) +} MEMDESC_CACHE_SNOOP; + +// +// Overrides address translation in SR-IOV enabled usecases +// +// In SRIOV systems, an access from guest has to go through the following +// translations: +// +// GVA -> GPA -> SPA +// +// Given HOST manages channel/memory management for guest, there are certain +// code paths that expects VA -> GPA translations and some may need GPA -> SPA +// translations. We use address translation to differentiate between these +// cases. +// +// We use AT_PA to force GPA -> SPA translation for vidmem. In case of non-SRIOV systems, +// using IO_VASPACE_A will fall back to FERMI_VASPACE_A or default context. +// +#define FORCE_VMMU_TRANSLATION(pMemDesc, curAddressTranslation) \ + ((memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM) ? AT_PA : curAddressTranslation) + +typedef struct _memdescDestroyCallback MEM_DESC_DESTROY_CALLBACK; + +typedef void (MEM_DATA_RELEASE_CALL_BACK)(struct MEMORY_DESCRIPTOR *); + +// +// A memory descriptor is an object that describes and can be used to manipulate +// a block of memory. The memory can be video or system memory; it can be +// contiguous or noncontiguous; it can be tiled, block linear, etc. However, +// regardless of what type of memory it is, clients can use a standard set of +// APIs to manipulate it. +// +DECLARE_INTRUSIVE_LIST(MEMORY_DESCRIPTOR_LIST); + +typedef struct MEMORY_DESCRIPTOR +{ + // The GPU that this memory belongs to + OBJGPU *pGpu; + + // Flags field for optional behavior + NvU64 _flags; + + // Size of mapping used for this allocation. Multiple mappings on Fermi must always use the same page size. + NvU64 _pageSize; + + // Size of the memory allocation in pages + NvU64 PageCount; + + // Total size of the page array. Used for overflow checks. + NvU64 pageArraySize; + + // Alignment of the memory allocation as size in bytes + // XXX: would 32b work here? + NvU64 Alignment; + + // Size of the memory allocation requested in bytes + NvU64 Size; + + // Actual size of memory allocated to satisfy alignment. + // We report the requested size, not the actual size. A number of callers + // depend on this. + NvU64 ActualSize; + + // The information returned from osAllocPages + NvP64 _address; + void *_pMemData; + MEM_DATA_RELEASE_CALL_BACK *_pMemDataReleaseCallback; + + // When memory is allocated by a guest Virtual Machine (VM) + // it is aliased by the host RM. We store a unique guest ID + // for each piece of aliased memory to facilitate host RM mappings + // to these pages (only in case of system memory). + // XXX: would 32b work here? + NvU64 _guestId; + + // To keep track of the offset from parent memdesc + NvU64 subMemOffset; + + // + // The byte offset at which the memory allocation begins within the first + // PTE. To locate the physical address of the byte at offset i in the memory + // allocation, use the following logic: + // i += PteAdjust; + // if (PhysicallyContiguous) + // PhysAddr = PteArray[0] + i; + // else + // PhysAddr = PteArray[i >> RM_PAGE_SHIFT] + (i & RM_PAGE_MASK); + // + NvU32 PteAdjust; + + // Has the memory been allocated yet? + NvBool Allocated; + + // + // Marks that a request to deallocate memory has been called on this memdesc while it had multiple references + // NV_TRUE denotes that memFree will be called when refcount reaches 0. + // + NvBool bDeferredFree; + + // Does this use SUBALLOCATOR? + NvBool bUsingSuballocator; + + // Where does the memory live? Video, system, other + NV_ADDRESS_SPACE _addressSpace; + + // Attributes reflecting GPU caching of this memory. + NvU32 _gpuCacheAttrib; + + // Peer vid mem cacheability + NvU32 _gpuP2PCacheAttrib; + + // One of NV_MEMORY_CACHED, NV_MEMORY_UNCACHED, NV_MEMORY_WRITECOMBINED + NvU32 _cpuCacheAttrib; + + // + // This field is used on fully coherent platforms (like Blackwell+ Tegra) to decide + // whether memory should be mapped as COH/NCOH. + // + // For fully coherent platforms, the aperture fields have been repurposed + // to specify whether the GPU cache will be snooped by the CPU or other IO devices. + // + // Setting MEMDESC_CACHE_SNOOP_DEFER_TO_MAP defers making the choice to map time. + // + MEMDESC_CACHE_SNOOP gpuCacheSnoop; + + // + // This field is used on non-fully coherent platforms (like dGPU and preBlackwell Tegra) to decide + // whether memory should be mapped as COH/NCOH. + // + // For non-fully coherent platforms, these settings specify whether the CPU cache will be + // snooped by the GPU. + // + // Setting MEMDESC_CACHE_SNOOP_DEFER_TO_MAP defers making the choice to map time. + // + MEMDESC_CACHE_SNOOP cpuCacheSnoop; + + // The page kind of this memory + NvU32 _pteKind; + NvU32 _pteKindCompressed; + + // + // Scale memory allocation by this value + // + NvU32 _subDeviceAllocCount; + + // + // Reference count for the object. + // + NvU32 RefCount; + + // Reference count for duplication of memory object via RmDupObject. + NvU32 DupCount; + + // + // The HwResId is used by the device dependent HAL to keep track of + // resources attached to the memory (e.g.: compression tags, zcull). + // + NvU32 _hwResId; + + // + // alloc tag for tracking internal allocations @ref NV_FB_ALLOC_RM_INTERNAL_OWNER + // + NV_FB_ALLOC_RM_INTERNAL_OWNER allocTag; + + // + // Keep track which heap is actually used for this allocation + // + struct Heap *pHeap; + + // + // GFID that this memory allocation belongs to + // + NvU32 gfid; + + // + // Keep track of the PMA_ALLOC_INFO data. + // + struct PMA_ALLOC_INFO *pPmaAllocInfo; + + // Serve as head node in a list of page handles + PoolPageHandleList *pPageHandleList; + + // + // List of callbacks to call when destroying memory descriptor + // + MEM_DESC_DESTROY_CALLBACK *_pMemDestroyCallbackList; + + // pointer to descriptor which was used to subset current descriptor + struct MEMORY_DESCRIPTOR *_pParentDescriptor; + + // Count used for sanity check + NvU32 childDescriptorCnt; + + // Next memory descriptor in subdevice list + struct MEMORY_DESCRIPTOR *_pNext; + + // Pointer to system Memory descriptor which used to back some FB content across S3/S4. + struct MEMORY_DESCRIPTOR *_pStandbyBuffer; + + // Serve as a head node in a list of submemdescs + MEMORY_DESCRIPTOR_LIST *pSubMemDescList; + + // Reserved for RM exclusive use + NvBool bRmExclusiveUse; + + // If strung in a intrusive linked list + ListNode node; + + // + // Pointer to IOVA mappings used to back the IOMMU VAs for different IOVA spaces + // Submemory descriptors only have on mapping, but the root descriptor will have + // one per IOVA space that the memory is mapped into. + // + struct IOVAMAPPING *_pIommuMappings; + + // Kernel mapping of the memory + NvP64 _kernelMapping; + NvP64 _kernelMappingPriv; + + // Internal mapping + void *_pInternalMapping; + void *_pInternalMappingPriv; + NvU32 _internalMappingRefCount; + + // Static BAR1 mapping + NvU32 staticBar1MappingRefCount; + NvU32 staticBar1MappingKind; + NvU32 staticBar1DmaFlags; + + // Array to hold SPA addresses when memdesc is allocated from GPA. Valid only for SRIOV cases + RmPhysAddr *pPteSpaMappings; + + // + // context buffer pool from which this memdesc is to be allocated. + // This is controlled by PDB_PROP_GPU_MOVE_RM_BUFFERS_TO_PMA which is + // enabled only for SMC today + // + CTX_BUF_POOL_INFO *pCtxBufPool; + + // Max physical address width to be override + NvU32 _overridenAddressWidth; + + // We verified that memdesc is safe to be mapped as large pages + NvBool bForceHugePages; + + // + // If MEMDESC_FLAGS_ALLOC_AS_LOCALIZED, OR the physical address against this to + // get the address to be programmed into HW. + // + NvU64 localizedMask; + + // Indicates granularity of mapping. Will be used to implement dynamic page sizes. + NvU32 pageArrayGranularity; + + // NUMA node ID from which memory should be allocated + NvS32 numaNode; + + // Array to hold EGM addresses when EGM is enabled + RmPhysAddr *pPteEgmMappings; + + // + // If PhysicallyContiguous is NV_TRUE, this array consists of one element. + // If PhysicallyContiguous is NV_FALSE, this array is actually larger and has + // one entry for each physical page in the memory allocation. As a result, + // this structure must be allocated from the heap. + // If the AddressSpace is ADDR_FBMEM, each entry is an FB offset. + // Otherwise, each entry is a physical address on the system bus. + // TBD: for now, the array will be sized at one entry for every 4KB, but + // we probably want to optimize this later to support 64KB pages. + // + RmPhysAddr _pteArray[1]; + //!!! Place nothing behind PteArray!!! +} MEMORY_DESCRIPTOR, *PMEMORY_DESCRIPTOR; + +MAKE_INTRUSIVE_LIST(MEMORY_DESCRIPTOR_LIST, MEMORY_DESCRIPTOR, node); + +// +// Common address space lists +// +extern const NV_ADDRESS_SPACE ADDRLIST_FBMEM_PREFERRED[]; +extern const NV_ADDRESS_SPACE ADDRLIST_SYSMEM_PREFERRED[]; +extern const NV_ADDRESS_SPACE ADDRLIST_FBMEM_ONLY[]; +extern const NV_ADDRESS_SPACE ADDRLIST_SYSMEM_ONLY[]; + +NvU32 memdescAddrSpaceListToU32(const NV_ADDRESS_SPACE *addrlist); +const NV_ADDRESS_SPACE *memdescU32ToAddrSpaceList(NvU32 index); + +NV_STATUS _memdescUpdateSpaArray(PMEMORY_DESCRIPTOR pMemDesc); +// Create a memory descriptor data structure (without allocating any physical +// storage). +NV_STATUS memdescCreate(MEMORY_DESCRIPTOR **ppMemDesc, OBJGPU *pGpu, NvU64 Size, + NvU64 alignment, NvBool PhysicallyContiguous, + NV_ADDRESS_SPACE AddressSpace, NvU32 CpuCacheAttrib, NvU64 Flags); + +#define MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE_FB_BC_ONLY(pGpu, addressSpace) \ + ((gpumgrGetBcEnabledStatus(pGpu) && (pGpu != NULL) && (addressSpace == ADDR_FBMEM)) ? MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE : MEMDESC_FLAGS_NONE) + +// Initialize a caller supplied memory descriptor for use with memdescDescribe() +void memdescCreateExisting(MEMORY_DESCRIPTOR *pMemDesc, OBJGPU *pGpu, NvU64 Size, + NV_ADDRESS_SPACE AddressSpace, + NvU32 CpuCacheAttrib, NvU64 Flags); + +// Increment reference count +void memdescAddRef(MEMORY_DESCRIPTOR *pMemDesc); + +// Decrement reference count +void memdescRemoveRef(MEMORY_DESCRIPTOR *pMemDesc); + +// Decrement reference count and reclaim any resources when possible +void memdescDestroy(MEMORY_DESCRIPTOR *pMemDesc); + +// +// The destroy callback is called when the memory descriptor is +// destroyed with memdescDestroy(). +// +// The caller is responsible for managing the memory used +// containing the callback. +// +typedef void (MemDescDestroyCallBack)(OBJGPU *, void *pObject, MEMORY_DESCRIPTOR *); +struct _memdescDestroyCallback +{ + MemDescDestroyCallBack *destroyCallback; + void *pObject; + MEM_DESC_DESTROY_CALLBACK *pNext; +}; +void memdescAddDestroyCallback(MEMORY_DESCRIPTOR *pMemDesc, MEM_DESC_DESTROY_CALLBACK *); +void memdescRemoveDestroyCallback(MEMORY_DESCRIPTOR *pMemDesc, MEM_DESC_DESTROY_CALLBACK *); + +// Allocate physical storage for a memory descriptor and fill in its PteArray +NV_STATUS memdescAlloc(MEMORY_DESCRIPTOR *pMemDesc); + +// Allocate memory from one of the possible locations specified in pList. +NV_STATUS memdescAllocList(MEMORY_DESCRIPTOR *pMemDesc, const NV_ADDRESS_SPACE *pList); + +// Free physical storage for a memory descriptor +void memdescFree(MEMORY_DESCRIPTOR *pMemDesc); + +// Lock the paged virtual memory +NV_STATUS memdescLock(MEMORY_DESCRIPTOR *pMemDesc); + +// Unlock the paged virtual memory +NV_STATUS memdescUnlock(MEMORY_DESCRIPTOR *pMemDesc); + +// Allocate a CPU mapping of an arbitrary subrange of the memory. +// 64-bit clean (mac can have a 32-bit kernel pointer and 64-bit client pointers) +NV_STATUS memdescMap(MEMORY_DESCRIPTOR *pMemDesc, NvU64 Offset, NvU64 Size, + NvBool Kernel, NvU32 Protect, NvP64 *pAddress, NvP64 *pPriv); + +// Free a CPU mapping of an arbitrary subrange of the memory. +void memdescUnmap(MEMORY_DESCRIPTOR *pMemDesc, NvBool Kernel, + NvP64 Address, NvP64 Priv); + +// Allocate a CPU mapping of an arbitrary subrange of the memory. +// fails unless Kernel == NV_TRUE +NV_STATUS memdescMapOld(MEMORY_DESCRIPTOR *pMemDesc, NvU64 Offset, NvU64 Size, + NvBool Kernel, NvU32 Protect, void **pAddress, void **pPriv); + +// Free a CPU mapping of an arbitrary subrange of the memory. +void memdescUnmapOld(MEMORY_DESCRIPTOR *pMemDesc, NvBool Kernel, + void *Address, void *Priv); + +// Fill in a MEMORY_DESCRIPTOR with a description of a preexisting contiguous +// memory allocation. It should already be initialized with +// memdescCreate*(). +void memdescDescribe(MEMORY_DESCRIPTOR *pMemDesc, + NV_ADDRESS_SPACE AddressSpace, + RmPhysAddr Base, NvU64 Size); + +// Fill in a MEMORY_DESCRIPTOR with the physical page addresses returned by PMA. +// It should already be initialized with memdescCreate*(). +void memdescFillPages(MEMORY_DESCRIPTOR *pMemDesc, NvU32 offset, + NvU64 *pPages, NvU32 pageCount, NvU64 pageSize); + +// Create a MEMORY_DESCRIPTOR for a subset of an existing memory allocation. +// The new MEMORY_DESCRIPTOR must be freed with memdescDestroy. +NV_STATUS memdescCreateSubMem(MEMORY_DESCRIPTOR **ppMemDescNew, + MEMORY_DESCRIPTOR *pMemDesc, + OBJGPU *pGpu, NvU64 Offset, NvU64 Size); + +// Compute the physical address of a byte within a MEMORY_DESCRIPTOR +RmPhysAddr memdescGetPhysAddr(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvU64 offset); + +// Compute the physical address of a byte within a MEMORY_DESCRIPTOR for a PTE or HW +RmPhysAddr memdescGetPtePhysAddr(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvU64 offset); + +// Compute count physical addresses within a MEMORY_DESCRIPTOR. Starting at the +// given offset and advancing it by stride for each consecutive address. +void memdescGetPhysAddrs(MEMORY_DESCRIPTOR *pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + NvU64 offset, + NvU64 stride, + NvU64 count, + RmPhysAddr *pAddresses); + +// Compute count physical addresses for a PTE or HW within a MEMORY_DESCRIPTOR. Starting at the +// given offset and advancing it by stride for each consecutive address. +void memdescGetPtePhysAddrs(MEMORY_DESCRIPTOR *pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + NvU64 offset, + NvU64 stride, + NvU64 count, + RmPhysAddr *pAddresses); + +// Compute count physical addresses within a MEMORY_DESCRIPTOR for a specific +// GPU. Starting at the given offset and advancing it by stride for each +// consecutive address. +void memdescGetPhysAddrsForGpu(MEMORY_DESCRIPTOR *pMemDesc, + OBJGPU *pGpu, + ADDRESS_TRANSLATION addressTranslation, + NvU64 offset, + NvU64 stride, + NvU64 count, + RmPhysAddr *pAddresses); + +// Compute count physical addresses to be encoded into PTEs within a +// MEMORY_DESCRIPTOR for a specific GPU. Starting at the given offset +// and advancing it by stride for each consecutive address. +void memdescGetPtePhysAddrsForGpu(MEMORY_DESCRIPTOR *pMemDesc, + OBJGPU *pGpu, + ADDRESS_TRANSLATION addressTranslation, + NvU64 offset, + NvU64 stride, + NvU64 count, + RmPhysAddr *pAddresses); + +// Obtains one of the PTEs from the MEMORY_DESCRIPTOR. Assumes 4KB pages, +// and works for either contiguous or noncontiguous descriptors. +RmPhysAddr memdescGetPte(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvU32 PteIndex); + +void memdescSetPte(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvU32 PteIndex, RmPhysAddr PhysAddr); + +// Obtains the PteArray from the MEMORY_DESCRIPTOR for the specified GPU. +RmPhysAddr * memdescGetPteArrayForGpu(MEMORY_DESCRIPTOR *pMemDesc, OBJGPU *pGpu, ADDRESS_TRANSLATION addressTranslation); + +/*! + * @brief Obtains the PteArray from the MEMORY_DESCRIPTOR. + * + * @param[in] pMemDesc Memory descriptor to use + * @param[in] addressTranslation Address translation identifier + * + * @returns PageArray + */ +static inline RmPhysAddr * +memdescGetPteArray(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation) +{ + return memdescGetPteArrayForGpu(pMemDesc, pMemDesc->pGpu, addressTranslation); +} + +// Obtains the PteArray size from the MEMORY_DESCRIPTOR based on the mmuContext. +NvU32 memdescGetPteArraySize(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation); + +// Return the aperture of the NV_ADDRESS_SPACE as a null terminated string. +// Useful for print statements. +const char* memdescGetApertureString(NV_ADDRESS_SPACE addrSpace); + +// Return true if two MEMORY_DESCRIPTOR are equal +NvBool memdescDescIsEqual(MEMORY_DESCRIPTOR *pMemDescOne, MEMORY_DESCRIPTOR *pMemDescTwo); + +// Retrieve the per-GPU memory descriptor for a subdevice +MEMORY_DESCRIPTOR *memdescGetMemDescFromSubDeviceInst(MEMORY_DESCRIPTOR *pMemDesc, NvU32 subDeviceInst); + +// Retrieve the per-GPU memory descriptor for a GPU +MEMORY_DESCRIPTOR *memdescGetMemDescFromGpu(MEMORY_DESCRIPTOR *pMemDesc, OBJGPU *pGpu); + +// Retrieve the per-GPU memory descriptor at an index +MEMORY_DESCRIPTOR *memdescGetMemDescFromIndex(MEMORY_DESCRIPTOR *pMemDesc, NvU32 index); + +// Print information on memory descriptor +void memdescPrintMemdesc(MEMORY_DESCRIPTOR *pMemDesc, NvBool bPrintIndividualPages, const char *pPrefixMessage); + +// Get the page offset for an arbitrary power of two page size +NvU64 memdescGetPageOffset(MEMORY_DESCRIPTOR *pMemDesc, NvU64 pageSize); + +// +// Internal APIs for the IOVASPACE to manage IOMMU mappings in a memdesc. +// +// Note that the external APIs are memdescMapIommu(), +// memdescUnmapIommu() and memdescGetIommuMap(). +// +NV_STATUS memdescAddIommuMap(PMEMORY_DESCRIPTOR pMemDesc, struct IOVAMAPPING *pIommuMap); +void memdescRemoveIommuMap(PMEMORY_DESCRIPTOR pMemDesc, struct IOVAMAPPING *pIommuMap); + +// +// Map and unmap IOMMU for the specified VA space +// +// Each memdescUnmapIommu() call has to be paired with a previous successful +// memdescMapIommu() call for the same VA space. The calls are refcounted for +// each VA space and only the last Unmap will remove the mappings. +// +// The caller has to guarantee that before the VA space is destroyed, either the +// mapping is explicitly unmapped with memdescUnmapIommu() or the memdesc is +// freed (or destroyed for memdescs that are not memdescFree()d). +// +NV_STATUS memdescMapIommu(PMEMORY_DESCRIPTOR pMemDesc, NvU32 vaspaceId); +void memdescUnmapIommu(PMEMORY_DESCRIPTOR pMemDesc, NvU32 vaspaceId); + +// Returns the IOVA mapping created by memdescMapIommu(). +struct IOVAMAPPING *memdescGetIommuMap(PMEMORY_DESCRIPTOR pMemDesc, NvU32 vaspaceId); + +// +// Check subdevice consistency functions +// +void memdescCheckSubDevicePageSizeConsistency(OBJGPU *pGpu, PMEMORY_DESCRIPTOR pMemDesc, struct OBJVASPACE *pVAS, + NvU64 pageSize, NvU64 pageOffset); +void memdescCheckSubDeviceMemContiguityConsistency(OBJGPU *pGpu, PMEMORY_DESCRIPTOR pMemDesc, struct OBJVASPACE *pVAS, + NvBool bIsMemContiguous); +NV_STATUS memdescCheckSubDeviceKindComprConsistency(OBJGPU *pGpu, PMEMORY_DESCRIPTOR pMemDesc, struct OBJVASPACE *pVAS, + NvU32 kind, COMPR_INFO *pComprInfo); + +// +// Accessor functions +// +void memdescSetHeapOffset(MEMORY_DESCRIPTOR *pMemDesc, RmPhysAddr fbOffset); +void memdescSetCpuCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc, NvU32 cpuCacheAttrib); +void memdescSetGpuCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc, NvU32 GpuCacheAttrib); +NvU32 memdescGetGpuP2PCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetGpuP2PCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc, NvU32 GpuCacheAttrib); +NvU32 memdescGetPteKindForGpu(MEMORY_DESCRIPTOR *pMemDesc, OBJGPU *pGpu); +void memdescSetPteKindForGpu(MEMORY_DESCRIPTOR *pMemDesc, OBJGPU *pGpu, NvU32 pteKind); +NvU32 memdescGetPteKindCompressed(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetPteKindCompressed(MEMORY_DESCRIPTOR *pMemDesc, NvU32 pteKindCmpr); +NvP64 memdescGetKernelMapping(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetKernelMapping(MEMORY_DESCRIPTOR *pMemDesc, NvP64 kernelMapping); +NvP64 memdescGetKernelMappingPriv(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetKernelMappingPriv(MEMORY_DESCRIPTOR *pMemDesc, NvP64 kernelMappingPriv); +MEMORY_DESCRIPTOR *memdescGetStandbyBuffer(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetStandbyBuffer(MEMORY_DESCRIPTOR *pMemDesc, MEMORY_DESCRIPTOR *pStandbyBuffer); +void memdescSetDestroyCallbackList(MEMORY_DESCRIPTOR *pMemDesc, MEM_DESC_DESTROY_CALLBACK *pCb); +NvU64 memdescGetGuestId(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetGuestId(MEMORY_DESCRIPTOR *pMemDesc, NvU64 guestId); +NvBool memdescGetFlag(MEMORY_DESCRIPTOR *pMemDesc, NvU64 flag); +void memdescSetFlag(MEMORY_DESCRIPTOR *pMemDesc, NvU64 flag, NvBool bValue); +NvP64 memdescGetAddress(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetAddress(MEMORY_DESCRIPTOR *pMemDesc, NvP64 pAddress); +void *memdescGetMemData(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetMemData(MEMORY_DESCRIPTOR *pMemDesc, void *pMemData, MEM_DATA_RELEASE_CALL_BACK *pMemDataReleaseCallback); +NvBool memdescGetVolatility(MEMORY_DESCRIPTOR *pMemDesc); +NvBool memdescGetContiguity(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation); +void memdescSetContiguity(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvBool isContiguous); +NvBool memdescCheckContiguity(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation); +NV_ADDRESS_SPACE memdescGetAddressSpace(PMEMORY_DESCRIPTOR pMemDesc); +NvU64 memdescGetPageSize(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation); +void memdescSetPageSize(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvU64 pageSize); +PMEMORY_DESCRIPTOR memdescGetRootMemDesc(PMEMORY_DESCRIPTOR pMemDesc, NvU64 *pRootOffset); +void memdescSetCustomHeap(PMEMORY_DESCRIPTOR, MEMDESC_CUSTOM_HEAP heap); +MEMDESC_CUSTOM_HEAP memdescGetCustomHeap(PMEMORY_DESCRIPTOR); +NV_STATUS memdescSetPageArrayGranularity(MEMORY_DESCRIPTOR *pMemDesc, NvU64 pageArrayGranularity); +NvBool memdescAcquireRmExclusiveUse(MEMORY_DESCRIPTOR *pMemDesc); +NV_STATUS memdescFillMemdescForPhysAttr(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation, + NvU64 *pOffset,NvU32 *pMemAperture, NvU32 *pMemKind, + NvU32 *pGpuCacheAttr, NvU32 *pGpuP2PCacheAttr, NvU64 *contigSegmentSize); +NvBool memdescIsEgm(MEMORY_DESCRIPTOR *pMemDesc); +NvU64 memdescGetAdjustedPageSize(MEMORY_DESCRIPTOR *pMemDesc); + +/*! + * @brief Get PTE kind + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] addressTranslation Address translation identifier + * + * @returns Current PTE kind value. + */ +static inline NvU32 +memdescGetPteKind(PMEMORY_DESCRIPTOR pMemDesc) +{ + return memdescGetPteKindForGpu(pMemDesc, pMemDesc->pGpu); +} + +/*! + * @brief Set PTE kind. + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pteKind New PTE kind + * + * @returns nothing + */ +static inline void +memdescSetPteKind(PMEMORY_DESCRIPTOR pMemDesc, NvU32 pteKind) +{ + memdescSetPteKindForGpu(pMemDesc, pMemDesc->pGpu, pteKind); +} + +/*! + * @brief Get HW resource identifier (HwResId) + * + * TODO: Need to ensure this is checked per subdevice only. + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Current HW resource identifier + */ +static inline NvU32 +memdescGetHwResId(PMEMORY_DESCRIPTOR pMemDesc) +{ + return pMemDesc->_hwResId; +} + +/*! + * @brief Set HW resource identifier (HwResId) + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] hwResId New HW resource identifier + * + * @returns nothing + */ +static inline void +memdescSetHwResId(PMEMORY_DESCRIPTOR pMemDesc, NvU32 hwResId) +{ + pMemDesc->_hwResId = hwResId; +} + +/*! + * @brief Get mem destroy callback list pointer + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Pointer to mem destroy callback list + */ +static inline MEM_DESC_DESTROY_CALLBACK * +memdescGetDestroyCallbackList(MEMORY_DESCRIPTOR *pMemDesc) +{ + return pMemDesc->_pMemDestroyCallbackList; +} + +/*! + * @brief Get the byte offset relative to the root memory descriptor. + * + * Root memory descriptor is the Top level memory descriptor with no parent, + * from which this memory descriptor was derived. + * + * @param[in] pMemDesc Return pointer to memory descriptor. + * + * @returns the byte offset relative to Root memory descriptor. + */ +static inline NvU64 +memdescGetRootOffset(PMEMORY_DESCRIPTOR pMemDesc) +{ + NvU64 rootOffset = 0; + (void)memdescGetRootMemDesc(pMemDesc, &rootOffset); + return rootOffset; +} + +/*! + * @brief Get CPU cache attributes + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Current CPU cache attributes + */ +static inline NvU32 +memdescGetCpuCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc) +{ + return pMemDesc->_cpuCacheAttrib; +} + +/*! + * @brief Get GPU cache attributes + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Current GPU cache attributes + */ +static inline NvU32 +memdescGetGpuCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc) +{ + return pMemDesc->_gpuCacheAttrib; +} + +/*! + * @brief Return pte adjust + * + * PteAdjust is zero whenever the memory is allocated as allocations are always + * going to be page-size aligned. However, we can have memory descriptors + * created on pre-allocated addresses + offset that aren't page aligned. + * PteAdjust is non-zero in such cases. We do not allow memdescDescribe operation + * (i.e. memory descriptors created on pre-allocated address) for subdevice + * memdesc and hence top level memdesc is always used to access pte adjust. + * + * @param[in] pMemDesc Memory descriptor to use + * + * @returns PteAdjust + */ +static inline NvU32 +memdescGetPteAdjust(PMEMORY_DESCRIPTOR pMemDesc) +{ + return pMemDesc->PteAdjust; +} + +/*! + * @brief Get subdevice allocation count. + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Current subdevice allocation count value. + */ +static inline NvU32 +memdescGetSubDeviceAllocCount (MEMORY_DESCRIPTOR *pMemDesc) +{ + return pMemDesc->_subDeviceAllocCount; +} + +/*! + * @brief Get memory descriptor of parent + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Memory descriptor of parent + */ +static inline MEMORY_DESCRIPTOR * +memdescGetParentDescriptor(MEMORY_DESCRIPTOR *pMemDesc) +{ + return pMemDesc->_pParentDescriptor; +} + +/*! + * @brief Set the address space of the memory descriptor + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] addressTranslation Address translation identifier + * @param[in] addressSpace Address Space + * + * @returns nothing + */ +static inline void +memdescSetAddressSpace(PMEMORY_DESCRIPTOR pMemDesc, NV_ADDRESS_SPACE addressSpace) +{ + pMemDesc->_addressSpace = addressSpace; +} + +/*! + * @brief Return size + * + * @param[in] pMemDesc Memory descriptor to use + * + * @returns Size + */ +static inline NvU64 +memdescGetSize(PMEMORY_DESCRIPTOR pMemDesc) +{ + return pMemDesc->Size; +} + +/*! + * @brief Set CPU NUMA node to allocate memory from + * + * @param[in] pMemDesc Memory Descriptor to use + * @param[in] numaNode NUMA node to allocate memory from + */ +static NV_INLINE void +memdescSetNumaNode(MEMORY_DESCRIPTOR *pMemDesc, NvS32 numaNode) +{ + pMemDesc->numaNode = numaNode; +} + +/*! + * @brief Get CPU NUMA node to allocate memory from + * + * @param[in] pMemDesc Memory Descriptor to use + * + * @returns NUMA node to allocate memory from + */ +static NV_INLINE NvS32 +memdescGetNumaNode(MEMORY_DESCRIPTOR *pMemDesc) +{ + return pMemDesc->numaNode; +} + +/*! + * @brief Checks if subdevice memory descriptors are present + * + * See memdescGetMemDescFromSubDeviceInst for an explanation of subdevice memory + * descriptors + * + * @param[in] pMemDesc Memory descriptor to query + * + * @returns NV_TRUE if subdevice memory descriptors exist + */ +static NV_INLINE NvBool +memdescHasSubDeviceMemDescs(MEMORY_DESCRIPTOR *pMemDesc) +{ + return (pMemDesc->_subDeviceAllocCount > 1); +} + +/*! + * @brief Checks if memory descriptor describes memory that is submemory + * + * @param[in] pMemDesc Memory descriptor to query + * + * @returns NV_TRUE if it is a submemory desc, NV_FALSE otherwise. + */ +static NV_INLINE NvBool +memdescIsSubMemoryMemDesc(MEMORY_DESCRIPTOR *pMemDesc) +{ + return pMemDesc->_pParentDescriptor != NULL ? NV_TRUE : NV_FALSE; +} + +/*! + * @brief Override the registry INST_LOC two-bit enum to an aperture (list) + cpu attr. + * + * loc parameters uses NV_REG_STR_RM_INST_LOC defines. + * Caller must set initial default values. + */ +void memdescOverrideInstLoc(NvU32 loc, const char *name, NV_ADDRESS_SPACE *pAddrSpace, NvU32 *pCpuMappingAttr); +void memdescOverrideInstLocList(NvU32 loc, const char *name, const NV_ADDRESS_SPACE **ppAllocList, NvU32 *pCpuMappingAttr); + +/*! +* @brief Override the physical system address limit. +* +*/ +void memdescOverridePhysicalAddressWidthWindowsWAR(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc, NvU32 addressWidth); + +/*! +* @brief Send memory descriptor from CPU-RM to GSP +* +* This function will create a MemoryList object with the MEMORY_DESCRIPTOR information on CPU-RM +* It will then use memRegisterWithGsp API to create a corresponding MemoryList object on GSP-RM +* with the same Handle as that on CPU-RM +* +* This MemoryList object has the same MEMORY_DESCRIPTOR info as the input pMemDesc +* The CPU-RM handle can be sent to GSP-RM and then used on GSP end to retrieve the MemoryList object +* and then the corresponding MEMORY_DESCRIPTOR +* +* @param[in] pGpu OBJGPU pointer +* @param[in] pMemDesc MemDesc pointer +* @param[out] pHandle Pointer to handle of MemoryList object +* +* @returns NV_STATUS +*/ +NV_STATUS memdescSendMemDescToGSP(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc, NvHandle *pHandle); + +// cache maintenance functions +void memdescFlushCpuCaches(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc); + +// Map memory descriptor for RM internal access +void* memdescMapInternal(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags); +void memdescUnmapInternal(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags); + +/*! + * @brief Set the name of the surface. + * + * @param[in] pGpu OBJGPU pointer. + * @param[in] pMemDesc MEMORY_DESCRIPTOR pointer that the name is to be set for. + * @param[in] name const char pointer to the name to be set. + */ +void memdescSetName(OBJGPU*, MEMORY_DESCRIPTOR *pMemDesc, const char *name, const char *suffix); + +/*! + * @brief sets thet pageCount, granularity and actual size described by the memdesc + * + * @param[in] pMemDesc MEMORY_DESCRIPTOR pointer of the memdesc being populated. + * @param[in] actualSize Size of the allocation after accounting for tracking granularity. + * @param[in] granularity Tracking granularity of the memory. + * + * @returns NV_OK on success, NV_ERR_BUFFER_TOO_SMALL if page array overflow is detected. + */ +static NV_INLINE NV_STATUS +memdescSetAllocSizeFields(MEMORY_DESCRIPTOR *pMemDesc, NvU64 actualSize, NvU32 granularity) +{ + NvU64 pageCount = actualSize >> GET_PAGE_SHIFT(granularity); + + if (!(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS) && + pageCount > pMemDesc->pageArraySize) + { + return NV_ERR_BUFFER_TOO_SMALL; + } + + pMemDesc->PageCount = pageCount; + pMemDesc->ActualSize = actualSize; + pMemDesc->pageArrayGranularity = granularity; + + return NV_OK; +} + +/*! + * @brief Get GPU cache snoop setting + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Current GPU cache snoop setting + */ +static NV_INLINE MEMDESC_CACHE_SNOOP +memdescGetGpuCacheSnoop(MEMORY_DESCRIPTOR *pMemDesc) +{ + return pMemDesc->gpuCacheSnoop; +} + +/*! + * @brief Set GPU cache snoop setting + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] gpuCacheSnoop New GPU cache snoop setting + * + * @returns nothing + */ +static NV_INLINE void +memdescSetGpuCacheSnoop(MEMORY_DESCRIPTOR *pMemDesc, MEMDESC_CACHE_SNOOP gpuCacheSnoop) +{ + pMemDesc->gpuCacheSnoop = gpuCacheSnoop; +} + +/*! + * @brief Get CPU cache snoop setting + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Current CPU cache snoop setting + */ +static NV_INLINE MEMDESC_CACHE_SNOOP +memdescGetCpuCacheSnoop(MEMORY_DESCRIPTOR *pMemDesc) +{ + return pMemDesc->cpuCacheSnoop; +} + +/*! + * @brief Set CPU cache snoop setting + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] cpuCacheSnoop New CPU cache snoop setting + * + * @returns nothing + */ +static NV_INLINE void +memdescSetCpuCacheSnoop(MEMORY_DESCRIPTOR *pMemDesc, MEMDESC_CACHE_SNOOP cpuCacheSnoop) +{ + pMemDesc->cpuCacheSnoop = cpuCacheSnoop; +} + +#endif // _MEMDESC_H_ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_MEM_DESC_NVOC_H_ diff --git a/src/nvidia/generated/g_mem_list_nvoc.h b/src/nvidia/generated/g_mem_list_nvoc.h new file mode 100644 index 0000000..2f373d9 --- /dev/null +++ b/src/nvidia/generated/g_mem_list_nvoc.h @@ -0,0 +1,330 @@ + +#ifndef _G_MEM_LIST_NVOC_H_ +#define _G_MEM_LIST_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_mem_list_nvoc.h" + +#ifndef _MEMORY_LIST_H_ +#define _MEMORY_LIST_H_ + +#include "mem_mgr/mem.h" + +/*! + * These classes are used by the vGPU support to create memory objects for memory + * assigned to a guest VM. + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_MEM_LIST_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__MemoryList; +struct NVOC_METADATA__Memory; +struct NVOC_VTABLE__MemoryList; + + +struct MemoryList { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__MemoryList *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Memory __nvoc_base_Memory; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^4 + struct RsResource *__nvoc_pbase_RsResource; // res super^3 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^3 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^2 + struct Memory *__nvoc_pbase_Memory; // mem super + struct MemoryList *__nvoc_pbase_MemoryList; // memlist +}; + + +// Vtable with 26 per-class function pointers +struct NVOC_VTABLE__MemoryList { + NvBool (*__memlistCanCopy__)(struct MemoryList * /*this*/); // virtual override (res) base (mem) + NV_STATUS (*__memlistIsDuplicate__)(struct MemoryList * /*this*/, NvHandle, NvBool *); // virtual inherited (mem) base (mem) + NV_STATUS (*__memlistGetMapAddrSpace__)(struct MemoryList * /*this*/, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); // virtual inherited (mem) base (mem) + NV_STATUS (*__memlistControl__)(struct MemoryList * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (mem) base (mem) + NV_STATUS (*__memlistMap__)(struct MemoryList * /*this*/, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); // virtual inherited (mem) base (mem) + NV_STATUS (*__memlistUnmap__)(struct MemoryList * /*this*/, CALL_CONTEXT *, RsCpuMapping *); // virtual inherited (mem) base (mem) + NV_STATUS (*__memlistGetMemInterMapParams__)(struct MemoryList * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (mem) base (mem) + NV_STATUS (*__memlistCheckMemInterUnmap__)(struct MemoryList * /*this*/, NvBool); // inline virtual inherited (mem) base (mem) body + NV_STATUS (*__memlistGetMemoryMappingDescriptor__)(struct MemoryList * /*this*/, MEMORY_DESCRIPTOR **); // virtual inherited (mem) base (mem) + NV_STATUS (*__memlistCheckCopyPermissions__)(struct MemoryList * /*this*/, struct OBJGPU *, struct Device *); // inline virtual inherited (mem) base (mem) body + NV_STATUS (*__memlistIsReady__)(struct MemoryList * /*this*/, NvBool); // virtual inherited (mem) base (mem) + NvBool (*__memlistIsGpuMapAllowed__)(struct MemoryList * /*this*/, struct OBJGPU *); // inline virtual inherited (mem) base (mem) body + NvBool (*__memlistIsExportAllowed__)(struct MemoryList * /*this*/); // inline virtual inherited (mem) base (mem) body + NvBool (*__memlistAccessCallback__)(struct MemoryList * /*this*/, RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (mem) + NvBool (*__memlistShareCallback__)(struct MemoryList * /*this*/, RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (rmres) base (mem) + NV_STATUS (*__memlistControlSerialization_Prologue__)(struct MemoryList * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (mem) + void (*__memlistControlSerialization_Epilogue__)(struct MemoryList * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (mem) + NV_STATUS (*__memlistControl_Prologue__)(struct MemoryList * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (mem) + void (*__memlistControl_Epilogue__)(struct MemoryList * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (mem) + void (*__memlistPreDestruct__)(struct MemoryList * /*this*/); // virtual inherited (res) base (mem) + NV_STATUS (*__memlistControlFilter__)(struct MemoryList * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (mem) + NvBool (*__memlistIsPartialUnmapSupported__)(struct MemoryList * /*this*/); // inline virtual inherited (res) base (mem) body + NV_STATUS (*__memlistMapTo__)(struct MemoryList * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (mem) + NV_STATUS (*__memlistUnmapFrom__)(struct MemoryList * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (mem) + NvU32 (*__memlistGetRefCount__)(struct MemoryList * /*this*/); // virtual inherited (res) base (mem) + void (*__memlistAddAdditionalDependants__)(struct RsClient *, struct MemoryList * /*this*/, RsResourceRef *); // virtual inherited (res) base (mem) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__MemoryList { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Memory metadata__Memory; + const struct NVOC_VTABLE__MemoryList vtable; +}; + +#ifndef __NVOC_CLASS_MemoryList_TYPEDEF__ +#define __NVOC_CLASS_MemoryList_TYPEDEF__ +typedef struct MemoryList MemoryList; +#endif /* __NVOC_CLASS_MemoryList_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MemoryList +#define __nvoc_class_id_MemoryList 0x298f78 +#endif /* __nvoc_class_id_MemoryList */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_MemoryList; + +#define __staticCast_MemoryList(pThis) \ + ((pThis)->__nvoc_pbase_MemoryList) + +#ifdef __nvoc_mem_list_h_disabled +#define __dynamicCast_MemoryList(pThis) ((MemoryList*) NULL) +#else //__nvoc_mem_list_h_disabled +#define __dynamicCast_MemoryList(pThis) \ + ((MemoryList*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(MemoryList))) +#endif //__nvoc_mem_list_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_MemoryList(MemoryList**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_MemoryList(MemoryList**, Dynamic*, NvU32, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_MemoryList(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_MemoryList((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define memlistCanCopy_FNPTR(pMemoryList) pMemoryList->__nvoc_metadata_ptr->vtable.__memlistCanCopy__ +#define memlistCanCopy(pMemoryList) memlistCanCopy_DISPATCH(pMemoryList) +#define memlistIsDuplicate_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsDuplicate__ +#define memlistIsDuplicate(pMemory, hMemory, pDuplicate) memlistIsDuplicate_DISPATCH(pMemory, hMemory, pDuplicate) +#define memlistGetMapAddrSpace_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memGetMapAddrSpace__ +#define memlistGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) memlistGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define memlistControl_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memControl__ +#define memlistControl(pMemory, pCallContext, pParams) memlistControl_DISPATCH(pMemory, pCallContext, pParams) +#define memlistMap_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memMap__ +#define memlistMap(pMemory, pCallContext, pParams, pCpuMapping) memlistMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define memlistUnmap_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memUnmap__ +#define memlistUnmap(pMemory, pCallContext, pCpuMapping) memlistUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define memlistGetMemInterMapParams_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memGetMemInterMapParams__ +#define memlistGetMemInterMapParams(pMemory, pParams) memlistGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define memlistCheckMemInterUnmap_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memCheckMemInterUnmap__ +#define memlistCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) memlistCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define memlistGetMemoryMappingDescriptor_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memGetMemoryMappingDescriptor__ +#define memlistGetMemoryMappingDescriptor(pMemory, ppMemDesc) memlistGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define memlistCheckCopyPermissions_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memCheckCopyPermissions__ +#define memlistCheckCopyPermissions(pMemory, pDstGpu, pDstDevice) memlistCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, pDstDevice) +#define memlistIsReady_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsReady__ +#define memlistIsReady(pMemory, bCopyConstructorContext) memlistIsReady_DISPATCH(pMemory, bCopyConstructorContext) +#define memlistIsGpuMapAllowed_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsGpuMapAllowed__ +#define memlistIsGpuMapAllowed(pMemory, pGpu) memlistIsGpuMapAllowed_DISPATCH(pMemory, pGpu) +#define memlistIsExportAllowed_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsExportAllowed__ +#define memlistIsExportAllowed(pMemory) memlistIsExportAllowed_DISPATCH(pMemory) +#define memlistAccessCallback_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define memlistAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) memlistAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define memlistShareCallback_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresShareCallback__ +#define memlistShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) memlistShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define memlistControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define memlistControlSerialization_Prologue(pResource, pCallContext, pParams) memlistControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define memlistControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define memlistControlSerialization_Epilogue(pResource, pCallContext, pParams) memlistControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define memlistControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define memlistControl_Prologue(pResource, pCallContext, pParams) memlistControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define memlistControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define memlistControl_Epilogue(pResource, pCallContext, pParams) memlistControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define memlistPreDestruct_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define memlistPreDestruct(pResource) memlistPreDestruct_DISPATCH(pResource) +#define memlistControlFilter_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define memlistControlFilter(pResource, pCallContext, pParams) memlistControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define memlistIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define memlistIsPartialUnmapSupported(pResource) memlistIsPartialUnmapSupported_DISPATCH(pResource) +#define memlistMapTo_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define memlistMapTo(pResource, pParams) memlistMapTo_DISPATCH(pResource, pParams) +#define memlistUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define memlistUnmapFrom(pResource, pParams) memlistUnmapFrom_DISPATCH(pResource, pParams) +#define memlistGetRefCount_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define memlistGetRefCount(pResource) memlistGetRefCount_DISPATCH(pResource) +#define memlistAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define memlistAddAdditionalDependants(pClient, pResource, pReference) memlistAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) + +// Dispatch functions +static inline NvBool memlistCanCopy_DISPATCH(struct MemoryList *pMemoryList) { + return pMemoryList->__nvoc_metadata_ptr->vtable.__memlistCanCopy__(pMemoryList); +} + +static inline NV_STATUS memlistIsDuplicate_DISPATCH(struct MemoryList *pMemory, NvHandle hMemory, NvBool *pDuplicate) { + return pMemory->__nvoc_metadata_ptr->vtable.__memlistIsDuplicate__(pMemory, hMemory, pDuplicate); +} + +static inline NV_STATUS memlistGetMapAddrSpace_DISPATCH(struct MemoryList *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__nvoc_metadata_ptr->vtable.__memlistGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NV_STATUS memlistControl_DISPATCH(struct MemoryList *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__nvoc_metadata_ptr->vtable.__memlistControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS memlistMap_DISPATCH(struct MemoryList *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__nvoc_metadata_ptr->vtable.__memlistMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS memlistUnmap_DISPATCH(struct MemoryList *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__nvoc_metadata_ptr->vtable.__memlistUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS memlistGetMemInterMapParams_DISPATCH(struct MemoryList *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__nvoc_metadata_ptr->vtable.__memlistGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS memlistCheckMemInterUnmap_DISPATCH(struct MemoryList *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__nvoc_metadata_ptr->vtable.__memlistCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS memlistGetMemoryMappingDescriptor_DISPATCH(struct MemoryList *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__nvoc_metadata_ptr->vtable.__memlistGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS memlistCheckCopyPermissions_DISPATCH(struct MemoryList *pMemory, struct OBJGPU *pDstGpu, struct Device *pDstDevice) { + return pMemory->__nvoc_metadata_ptr->vtable.__memlistCheckCopyPermissions__(pMemory, pDstGpu, pDstDevice); +} + +static inline NV_STATUS memlistIsReady_DISPATCH(struct MemoryList *pMemory, NvBool bCopyConstructorContext) { + return pMemory->__nvoc_metadata_ptr->vtable.__memlistIsReady__(pMemory, bCopyConstructorContext); +} + +static inline NvBool memlistIsGpuMapAllowed_DISPATCH(struct MemoryList *pMemory, struct OBJGPU *pGpu) { + return pMemory->__nvoc_metadata_ptr->vtable.__memlistIsGpuMapAllowed__(pMemory, pGpu); +} + +static inline NvBool memlistIsExportAllowed_DISPATCH(struct MemoryList *pMemory) { + return pMemory->__nvoc_metadata_ptr->vtable.__memlistIsExportAllowed__(pMemory); +} + +static inline NvBool memlistAccessCallback_DISPATCH(struct MemoryList *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__memlistAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NvBool memlistShareCallback_DISPATCH(struct MemoryList *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__nvoc_metadata_ptr->vtable.__memlistShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS memlistControlSerialization_Prologue_DISPATCH(struct MemoryList *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__memlistControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void memlistControlSerialization_Epilogue_DISPATCH(struct MemoryList *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__memlistControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS memlistControl_Prologue_DISPATCH(struct MemoryList *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__memlistControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void memlistControl_Epilogue_DISPATCH(struct MemoryList *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__memlistControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline void memlistPreDestruct_DISPATCH(struct MemoryList *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__memlistPreDestruct__(pResource); +} + +static inline NV_STATUS memlistControlFilter_DISPATCH(struct MemoryList *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__memlistControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvBool memlistIsPartialUnmapSupported_DISPATCH(struct MemoryList *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__memlistIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS memlistMapTo_DISPATCH(struct MemoryList *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__memlistMapTo__(pResource, pParams); +} + +static inline NV_STATUS memlistUnmapFrom_DISPATCH(struct MemoryList *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__memlistUnmapFrom__(pResource, pParams); +} + +static inline NvU32 memlistGetRefCount_DISPATCH(struct MemoryList *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__memlistGetRefCount__(pResource); +} + +static inline void memlistAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct MemoryList *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__memlistAddAdditionalDependants__(pClient, pResource, pReference); +} + +NvBool memlistCanCopy_IMPL(struct MemoryList *pMemoryList); + +NV_STATUS memlistConstruct_IMPL(struct MemoryList *arg_pMemoryList, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_memlistConstruct(arg_pMemoryList, arg_pCallContext, arg_pParams) memlistConstruct_IMPL(arg_pMemoryList, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_MEM_LIST_NVOC_H_ diff --git a/src/nvidia/generated/g_mem_mgr_nvoc.c b/src/nvidia/generated/g_mem_mgr_nvoc.c new file mode 100644 index 0000000..230acd1 --- /dev/null +++ b/src/nvidia/generated/g_mem_mgr_nvoc.c @@ -0,0 +1,481 @@ +#define NVOC_MEM_MGR_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_mem_mgr_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x22ad47 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_MemoryManager; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +// Forward declarations for MemoryManager +void __nvoc_init__OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init__MemoryManager(MemoryManager*, GpuHalspecOwner *pGpuhalspecowner, RmHalspecOwner *pRmhalspecowner); +void __nvoc_init_funcTable_MemoryManager(MemoryManager*, GpuHalspecOwner *pGpuhalspecowner, RmHalspecOwner *pRmhalspecowner); +NV_STATUS __nvoc_ctor_MemoryManager(MemoryManager*, GpuHalspecOwner *pGpuhalspecowner, RmHalspecOwner *pRmhalspecowner); +void __nvoc_init_dataField_MemoryManager(MemoryManager*, GpuHalspecOwner *pGpuhalspecowner, RmHalspecOwner *pRmhalspecowner); +void __nvoc_dtor_MemoryManager(MemoryManager*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__MemoryManager; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__MemoryManager; + +// Down-thunk(s) to bridge MemoryManager methods from ancestors (if any) + +// Up-thunk(s) to bridge MemoryManager methods to ancestors (if any) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_memmgrConstructEngine(struct OBJGPU *pGpu, struct MemoryManager *pEngstate, ENGDESCRIPTOR arg3); // this +void __nvoc_up_thunk_OBJENGSTATE_memmgrInitMissing(struct OBJGPU *pGpu, struct MemoryManager *pEngstate); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_memmgrStatePreInitLocked(struct OBJGPU *pGpu, struct MemoryManager *pEngstate); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_memmgrStatePreInitUnlocked(struct OBJGPU *pGpu, struct MemoryManager *pEngstate); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_memmgrStateInitLocked(struct OBJGPU *pGpu, struct MemoryManager *pEngstate); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_memmgrStateInitUnlocked(struct OBJGPU *pGpu, struct MemoryManager *pEngstate); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_memmgrStatePreLoad(struct OBJGPU *pGpu, struct MemoryManager *pEngstate, NvU32 arg3); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_memmgrStateLoad(struct OBJGPU *pGpu, struct MemoryManager *pEngstate, NvU32 arg3); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_memmgrStatePostLoad(struct OBJGPU *pGpu, struct MemoryManager *pEngstate, NvU32 arg3); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_memmgrStatePreUnload(struct OBJGPU *pGpu, struct MemoryManager *pEngstate, NvU32 arg3); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_memmgrStateUnload(struct OBJGPU *pGpu, struct MemoryManager *pEngstate, NvU32 arg3); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_memmgrStatePostUnload(struct OBJGPU *pGpu, struct MemoryManager *pEngstate, NvU32 arg3); // this +void __nvoc_up_thunk_OBJENGSTATE_memmgrStateDestroy(struct OBJGPU *pGpu, struct MemoryManager *pEngstate); // this +NvBool __nvoc_up_thunk_OBJENGSTATE_memmgrIsPresent(struct OBJGPU *pGpu, struct MemoryManager *pEngstate); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_MemoryManager = +{ + /*classInfo=*/ { + /*size=*/ sizeof(MemoryManager), + /*classId=*/ classId(MemoryManager), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "MemoryManager", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_MemoryManager, + /*pCastInfo=*/ &__nvoc_castinfo__MemoryManager, + /*pExportInfo=*/ &__nvoc_export_info__MemoryManager +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__MemoryManager __nvoc_metadata__MemoryManager = { + .rtti.pClassDef = &__nvoc_class_def_MemoryManager, // (memmgr) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_MemoryManager, + .rtti.offset = 0, + .metadata__OBJENGSTATE.rtti.pClassDef = &__nvoc_class_def_OBJENGSTATE, // (engstate) super + .metadata__OBJENGSTATE.rtti.dtor = &__nvoc_destructFromBase, + .metadata__OBJENGSTATE.rtti.offset = NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE), + .metadata__OBJENGSTATE.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^2 + .metadata__OBJENGSTATE.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__OBJENGSTATE.metadata__Object.rtti.offset = NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), + + .vtable.__memmgrConstructEngine__ = &__nvoc_up_thunk_OBJENGSTATE_memmgrConstructEngine, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateConstructEngine__ = &engstateConstructEngine_IMPL, // virtual + .vtable.__memmgrInitMissing__ = &__nvoc_up_thunk_OBJENGSTATE_memmgrInitMissing, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateInitMissing__ = &engstateInitMissing_IMPL, // virtual + .vtable.__memmgrStatePreInitLocked__ = &__nvoc_up_thunk_OBJENGSTATE_memmgrStatePreInitLocked, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePreInitLocked__ = &engstateStatePreInitLocked_IMPL, // virtual + .vtable.__memmgrStatePreInitUnlocked__ = &__nvoc_up_thunk_OBJENGSTATE_memmgrStatePreInitUnlocked, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePreInitUnlocked__ = &engstateStatePreInitUnlocked_IMPL, // virtual + .vtable.__memmgrStateInitLocked__ = &__nvoc_up_thunk_OBJENGSTATE_memmgrStateInitLocked, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStateInitLocked__ = &engstateStateInitLocked_IMPL, // virtual + .vtable.__memmgrStateInitUnlocked__ = &__nvoc_up_thunk_OBJENGSTATE_memmgrStateInitUnlocked, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStateInitUnlocked__ = &engstateStateInitUnlocked_IMPL, // virtual + .vtable.__memmgrStatePreLoad__ = &__nvoc_up_thunk_OBJENGSTATE_memmgrStatePreLoad, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePreLoad__ = &engstateStatePreLoad_IMPL, // virtual + .vtable.__memmgrStateLoad__ = &__nvoc_up_thunk_OBJENGSTATE_memmgrStateLoad, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStateLoad__ = &engstateStateLoad_IMPL, // virtual + .vtable.__memmgrStatePostLoad__ = &__nvoc_up_thunk_OBJENGSTATE_memmgrStatePostLoad, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePostLoad__ = &engstateStatePostLoad_IMPL, // virtual + .vtable.__memmgrStatePreUnload__ = &__nvoc_up_thunk_OBJENGSTATE_memmgrStatePreUnload, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePreUnload__ = &engstateStatePreUnload_IMPL, // virtual + .vtable.__memmgrStateUnload__ = &__nvoc_up_thunk_OBJENGSTATE_memmgrStateUnload, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStateUnload__ = &engstateStateUnload_IMPL, // virtual + .vtable.__memmgrStatePostUnload__ = &__nvoc_up_thunk_OBJENGSTATE_memmgrStatePostUnload, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePostUnload__ = &engstateStatePostUnload_IMPL, // virtual + .vtable.__memmgrStateDestroy__ = &__nvoc_up_thunk_OBJENGSTATE_memmgrStateDestroy, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStateDestroy__ = &engstateStateDestroy_IMPL, // virtual + .vtable.__memmgrIsPresent__ = &__nvoc_up_thunk_OBJENGSTATE_memmgrIsPresent, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateIsPresent__ = &engstateIsPresent_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__MemoryManager = { + .numRelatives = 3, + .relatives = { + &__nvoc_metadata__MemoryManager.rtti, // [0]: (memmgr) this + &__nvoc_metadata__MemoryManager.metadata__OBJENGSTATE.rtti, // [1]: (engstate) super + &__nvoc_metadata__MemoryManager.metadata__OBJENGSTATE.metadata__Object.rtti, // [2]: (obj) super^2 + } +}; + +// 14 up-thunk(s) defined to bridge methods in MemoryManager to superclasses + +// memmgrConstructEngine: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_memmgrConstructEngine(struct OBJGPU *pGpu, struct MemoryManager *pEngstate, ENGDESCRIPTOR arg3) { + return engstateConstructEngine(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE)), arg3); +} + +// memmgrInitMissing: virtual inherited (engstate) base (engstate) +void __nvoc_up_thunk_OBJENGSTATE_memmgrInitMissing(struct OBJGPU *pGpu, struct MemoryManager *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE))); +} + +// memmgrStatePreInitLocked: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_memmgrStatePreInitLocked(struct OBJGPU *pGpu, struct MemoryManager *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE))); +} + +// memmgrStatePreInitUnlocked: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_memmgrStatePreInitUnlocked(struct OBJGPU *pGpu, struct MemoryManager *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE))); +} + +// memmgrStateInitLocked: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_memmgrStateInitLocked(struct OBJGPU *pGpu, struct MemoryManager *pEngstate) { + return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE))); +} + +// memmgrStateInitUnlocked: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_memmgrStateInitUnlocked(struct OBJGPU *pGpu, struct MemoryManager *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE))); +} + +// memmgrStatePreLoad: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_memmgrStatePreLoad(struct OBJGPU *pGpu, struct MemoryManager *pEngstate, NvU32 arg3) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE)), arg3); +} + +// memmgrStateLoad: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_memmgrStateLoad(struct OBJGPU *pGpu, struct MemoryManager *pEngstate, NvU32 arg3) { + return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE)), arg3); +} + +// memmgrStatePostLoad: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_memmgrStatePostLoad(struct OBJGPU *pGpu, struct MemoryManager *pEngstate, NvU32 arg3) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE)), arg3); +} + +// memmgrStatePreUnload: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_memmgrStatePreUnload(struct OBJGPU *pGpu, struct MemoryManager *pEngstate, NvU32 arg3) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE)), arg3); +} + +// memmgrStateUnload: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_memmgrStateUnload(struct OBJGPU *pGpu, struct MemoryManager *pEngstate, NvU32 arg3) { + return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE)), arg3); +} + +// memmgrStatePostUnload: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_memmgrStatePostUnload(struct OBJGPU *pGpu, struct MemoryManager *pEngstate, NvU32 arg3) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE)), arg3); +} + +// memmgrStateDestroy: virtual inherited (engstate) base (engstate) +void __nvoc_up_thunk_OBJENGSTATE_memmgrStateDestroy(struct OBJGPU *pGpu, struct MemoryManager *pEngstate) { + engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE))); +} + +// memmgrIsPresent: virtual inherited (engstate) base (engstate) +NvBool __nvoc_up_thunk_OBJENGSTATE_memmgrIsPresent(struct OBJGPU *pGpu, struct MemoryManager *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE))); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__MemoryManager = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_MemoryManager(MemoryManager *pThis) { + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_MemoryManager(MemoryManager *pThis, GpuHalspecOwner *pGpuhalspecowner, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pGpuhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pGpuhalspecowner); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + // Hal field -- bFbRegionsSupported + // default + { + pThis->bFbRegionsSupported = NV_FALSE; + } + + // Hal field -- bPmaEnabled + // default + { + pThis->bPmaEnabled = NV_FALSE; + } + + // Hal field -- bClientPageTablesPmaManaged + if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000d000UL) )) /* ChipHal: T234D | T264D | T256D */ + { + pThis->bClientPageTablesPmaManaged = NV_TRUE; + } + + // Hal field -- bScanoutSysmem + if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000d000UL) )) /* ChipHal: T234D | T264D | T256D */ + { + pThis->bScanoutSysmem = NV_TRUE; + } + + // Hal field -- bDisallowSplitLowerMemory + if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000d000UL) )) /* ChipHal: T234D | T264D | T256D */ + { + pThis->bDisallowSplitLowerMemory = NV_TRUE; + } + + // Hal field -- bSmallPageCompression + // default + { + pThis->bSmallPageCompression = NV_FALSE; + } + + // Hal field -- bSysmemCompressionSupportDef + if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000d000UL) )) /* ChipHal: T234D | T264D | T256D */ + { + pThis->bSysmemCompressionSupportDef = NV_TRUE; + } + + // Hal field -- bBug2301372IncreaseRmReserveMemoryWar + // default + { + pThis->bBug2301372IncreaseRmReserveMemoryWar = NV_FALSE; + } + + pThis->bEnableDynamicPageOfflining = NV_FALSE; + + // Hal field -- bVgpuPmaSupport + // default + { + pThis->bVgpuPmaSupport = NV_FALSE; + } + + pThis->bScrubChannelSetupInProgress = NV_FALSE; + + // Hal field -- bBug3922001DisableCtxBufOnSim + // default + { + pThis->bBug3922001DisableCtxBufOnSim = NV_FALSE; + } + + // Hal field -- bPlatformFullyCoherent + // default + { + pThis->bPlatformFullyCoherent = NV_FALSE; + } + + pThis->bEnableDynamicGranularityPageArrays = NV_FALSE; + + // Hal field -- bAllowNoncontiguousAllocation + if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000d000UL) )) /* ChipHal: T234D | T264D | T256D */ + { + pThis->bAllowNoncontiguousAllocation = NV_TRUE; + } + + // Hal field -- bLocalEgmSupported + // default + { + pThis->bLocalEgmSupported = NV_FALSE; + } + + // Hal field -- bScrubOnFreeEnabled + // default + { + pThis->bScrubOnFreeEnabled = NV_FALSE; + } + + // Hal field -- bFastScrubberEnabled + // default + { + pThis->bFastScrubberEnabled = NV_FALSE; + } + + // Hal field -- bSysmemPageSizeDefaultAllowLargePages + // default + { + pThis->bSysmemPageSizeDefaultAllowLargePages = NV_FALSE; + } + + // Hal field -- bMonitoredFenceSupported + if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000d000UL) )) /* ChipHal: T234D | T264D | T256D */ + { + pThis->bMonitoredFenceSupported = NV_FALSE; + } + + // Hal field -- b64BitSemaphoresSupported + if (( ((chipHal_HalVarIdx >> 5) == 3UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000d000UL) )) /* ChipHal: T234D | T264D | T256D */ + { + pThis->b64BitSemaphoresSupported = NV_FALSE; + } + + // Hal field -- bGenericKindSupport + // default + { + pThis->bGenericKindSupport = NV_FALSE; + } + + pThis->bSkipCompressionCheck = NV_FALSE; + + // Hal field -- bUseVirtualCopyOnSuspend + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + pThis->bUseVirtualCopyOnSuspend = NV_TRUE; + } +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_MemoryManager(MemoryManager *pThis, GpuHalspecOwner *pGpuhalspecowner, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_MemoryManager_fail_OBJENGSTATE; + __nvoc_init_dataField_MemoryManager(pThis, pGpuhalspecowner, pRmhalspecowner); + goto __nvoc_ctor_MemoryManager_exit; // Success + +__nvoc_ctor_MemoryManager_fail_OBJENGSTATE: +__nvoc_ctor_MemoryManager_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_MemoryManager_1(MemoryManager *pThis, GpuHalspecOwner *pGpuhalspecowner, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pGpuhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pGpuhalspecowner); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} // End __nvoc_init_funcTable_MemoryManager_1 + + +// Initialize vtable(s) for 14 virtual method(s). +void __nvoc_init_funcTable_MemoryManager(MemoryManager *pThis, GpuHalspecOwner *pGpuhalspecowner, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_MemoryManager_1(pThis, pGpuhalspecowner, pRmhalspecowner); +} + +// Initialize newly constructed object. +void __nvoc_init__MemoryManager(MemoryManager *pThis, GpuHalspecOwner *pGpuhalspecowner, RmHalspecOwner *pRmhalspecowner) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; // (obj) super^2 + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; // (engstate) super + pThis->__nvoc_pbase_MemoryManager = pThis; // (memmgr) this + + // Recurse to superclass initialization function(s). + __nvoc_init__OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__MemoryManager.metadata__OBJENGSTATE.metadata__Object; // (obj) super^2 + pThis->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr = &__nvoc_metadata__MemoryManager.metadata__OBJENGSTATE; // (engstate) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__MemoryManager; // (memmgr) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_MemoryManager(pThis, pGpuhalspecowner, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_MemoryManager(MemoryManager **ppThis, Dynamic *pParent, NvU32 createFlags) +{ + NV_STATUS status; + Object *pParentObj = NULL; + MemoryManager *pThis; + GpuHalspecOwner *pGpuhalspecowner; + RmHalspecOwner *pRmhalspecowner; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(MemoryManager), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(MemoryManager)); + + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.createFlags = createFlags; + + // pParent must be a valid object that derives from a halspec owner class. + NV_ASSERT_OR_RETURN(pParent != NULL, NV_ERR_INVALID_ARGUMENT); + + // Link the child into the parent unless flagged not to do so. + if (!(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pGpuhalspecowner = dynamicCast(pParent, GpuHalspecOwner)) == NULL) + pGpuhalspecowner = objFindAncestorOfType(GpuHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pGpuhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init__MemoryManager(pThis, pGpuhalspecowner, pRmhalspecowner); + status = __nvoc_ctor_MemoryManager(pThis, pGpuhalspecowner, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_MemoryManager_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_MemoryManager_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(MemoryManager)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_MemoryManager(MemoryManager **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_MemoryManager(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_mem_mgr_nvoc.h b/src/nvidia/generated/g_mem_mgr_nvoc.h new file mode 100644 index 0000000..00b8c07 --- /dev/null +++ b/src/nvidia/generated/g_mem_mgr_nvoc.h @@ -0,0 +1,2930 @@ + +#ifndef _G_MEM_MGR_NVOC_H_ +#define _G_MEM_MGR_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_mem_mgr_nvoc.h" + +#ifndef MEM_MGR_H +#define MEM_MGR_H + +#include "core/core.h" +#include "gpu/eng_state.h" + +#include "gpu/gpu.h" + +#include "mem_mgr/mem.h" + +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "containers/map.h" +#include "gpu/mem_mgr/heap_base.h" +#include "mem_mgr/vaspace.h" + +struct _PMA; +typedef struct _PMA PMA; + +#include "ctrl/ctrl2080/ctrl2080fb.h" // NV2080_CTRL_FB_GET_CARVEOUT_REGION_INFO_PARAMS + + +struct CeUtils; + +#ifndef __NVOC_CLASS_CeUtils_TYPEDEF__ +#define __NVOC_CLASS_CeUtils_TYPEDEF__ +typedef struct CeUtils CeUtils; +#endif /* __NVOC_CLASS_CeUtils_TYPEDEF__ */ + +#ifndef __nvoc_class_id_CeUtils +#define __nvoc_class_id_CeUtils 0x8b8bae +#endif /* __nvoc_class_id_CeUtils */ + + + +typedef volatile struct _cl906f_tag1 Nv906fControl; +typedef struct KERNEL_MIG_GPU_INSTANCE KERNEL_MIG_GPU_INSTANCE; + +typedef struct +{ + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 offset; + + // + // Private, should only be used by MemUtils layer + // Expected to be 0 when map is called + // Become 0 when unmapped + // + void *pMapping; + void *pMappingPriv; +} TRANSFER_SURFACE; + +// Memory transfer engine types. +typedef enum +{ + TRANSFER_TYPE_PROCESSOR = 0, // CPU/GSP/DPU depending on execution context + TRANSFER_TYPE_GSP_DMA, // Dma engine internal to GSP + TRANSFER_TYPE_CE, // Copy Engine using CeUtils channel + TRANSFER_TYPE_CE_PRI, // Copy Engine using PRIs + TRANSFER_TYPE_BAR0, // Copy using BAR0 PRAMIN +} TRANSFER_TYPE; + +#define TRANSFER_FLAGS_NONE 0 +#define TRANSFER_FLAGS_DEFER_FLUSH NVBIT32(0) // Applicable only for write operations +#define TRANSFER_FLAGS_SHADOW_ALLOC NVBIT32(1) // Applicable only for non-PROCESSOR transfers +#define TRANSFER_FLAGS_SHADOW_INIT_MEM NVBIT32(2) // Applicable only for non-PROCESSOR transfers +#define TRANSFER_FLAGS_PERSISTENT_CPU_MAPPING NVBIT32(3) // Require long lived PROCESSOR mapping +#define TRANSFER_FLAGS_DESTROY_MAPPING NVBIT32(4) // Destroy any cached mappings when complete +#define TRANSFER_FLAGS_USE_BAR1 NVBIT32(5) // Use only BAR1 for PROCESSOR transfers +#define TRANSFER_FLAGS_PREFER_CE NVBIT32(6) // Use CE if possible (BAR0 on simulation for perf) +#define TRANSFER_FLAGS_CE_PRI_DEFER_FLUSH NVBIT32(7) // Defer CE flush; only affects PRI CE operations + +// Protection flags: at most 1 may be set, none means READ_WRITE by default +#define TRANSFER_FLAGS_MAP_PROTECT_READABLE NVBIT32(8) // Transfer is only reading data +#define TRANSFER_FLAGS_MAP_PROTECT_WRITEABLE NVBIT32(9) // Transfer is only writing data + +#define TRANSFER_FLAGS_PREFER_PROCESSOR NVBIT32(10) // Use BAR1/2 if possible +#define TRANSFER_FLAGS_ALLOW_MAPPING_REUSE NVBIT32(11) // Prefer existing full-allocation mapping + // (see memdescGetKernelMapping()) + // Only affects BeginTransfer/EndTransfer + // Mapping lifetime controlled by original mapper + // Intented for short uses, + // where it can't be unmapped by the owner + +#define TRANSFER_FLAGS_FLUSH_CPU_CACHE_WAR_BUG4686457 NVBIT32(12) // Flush the data from CPU cache +typedef struct +{ + NvU32 bar1Size; + NvU32 bar1AvailSize; + NvU32 bankSwizzleAlignment; + NvU32 bar1MaxContigAvailSize; +} GETBAR1INFO, *PGETBAR1INFO; + +// +// RM Default PTE kind +// Bug #2242255, introducing the RM Default kind to allow sharing memory between +// different architectures especially between Turing+ and Pre Turing chips +// +#define RM_DEFAULT_PTE_KIND 0x100 + +typedef enum +{ + FB_IS_KIND_Z, // Kind is a Z buffer + FB_IS_KIND_ZBC, // Zero bandwidth clears + FB_IS_KIND_ZBC_ALLOWS_1, // ZBC with 1 bit of tag + FB_IS_KIND_ZBC_ALLOWS_2, // ZBC with 2 bits of tag + FB_IS_KIND_ZBC_ALLOWS_4, // ZBC with 4 bits of tag + FB_IS_KIND_COMPRESSIBLE, // Any compressible kind + FB_IS_KIND_COMPRESSIBLE_1, // Compressible with 1 comp tag bit + FB_IS_KIND_COMPRESSIBLE_2, // Compressible with 2 comp tag bits + FB_IS_KIND_COMPRESSIBLE_4, // Compressible with 4 comp tag bits + FB_IS_KIND_SUPPORTED, // Kind is supported + FB_IS_KIND_DISALLOW_PLC, // Kind Disallows PLC + FB_IS_KIND_SWIZZLED, // Kind is swizzled (not pitch or GMK) +} FB_IS_KIND_OP; + +// Surface compression parameters +typedef struct COMPR_INFO +{ + // Surface kind; if not compressed, following parameters are ignored + NvU32 kind; + + // Compression page shift; 0 if kind is uncompressed + NvU32 compPageShift; + + // + // Are comptags are determined per-page by PA? + // If set, following parameters are ignored + // + NvBool bPhysBasedComptags; + + // see GMMU_COMPR_INFO + NvU32 compPageIndexLo; + NvU32 compPageIndexHi; + NvU32 compTagLineMin; + NvU32 compTagLineMultiplier; +} COMPR_INFO; + +// +// Individual entry for logging Fb reserved use-cases +// +typedef struct NV_FB_RSVD_BLOCK_LOG_ENTRY +{ + // Owner tag associated with reservation block + NvU32 ownerId; + + // Size of the memory reserved + NvU64 rsvdSize; +} NV_FB_RSVD_BLOCK_LOG_ENTRY; + +// Total number of FB internal reservation enries +#define NV_FB_RSVD_BLOCK_LOG_ENTRY_MAX 10U + +// +// Structure for logging Fb reserved use-cases +// +typedef struct NV_FB_RSVD_BLOCK_LOG_INFO +{ + // Counter for logging entries + NvU32 counter; + + // List of all reserved entries + NV_FB_RSVD_BLOCK_LOG_ENTRY rsvdBlockList[NV_FB_RSVD_BLOCK_LOG_ENTRY_MAX]; +} NV_FB_RSVD_BLOCK_LOG_INFO; + +// +// Macro for initializing reserved block log data +// +#define NV_FB_RSVD_BLOCK_LOG_INIT(pMem) \ + { \ + ((pMem)->rsvdBlockInfo).counter = 0; \ + for (NvU32 i = 0; i < NV_FB_RSVD_BLOCK_LOG_ENTRY_MAX; i++) \ + { \ + ((pMem)->rsvdBlockInfo).rsvdBlockList[i].ownerId = 0; \ + ((pMem)->rsvdBlockInfo).rsvdBlockList[i].rsvdSize = 0; \ + } \ + } + +// +// Macro for adding new reserved block entry to the list +// If unable to log, marks the status as NV_ERR_NO_MEMORY otherwise keeps it unchanged +// +#define NV_FB_RSVD_BLOCK_LOG_ENTRY_ADD(status, pMem, tag, size) \ + { \ + if(((pMem)->rsvdBlockInfo).counter < NV_FB_RSVD_BLOCK_LOG_ENTRY_MAX) \ + { \ + ((pMem)->rsvdBlockInfo).rsvdBlockList[((pMem)->rsvdBlockInfo).counter].ownerId = (tag); \ + ((pMem)->rsvdBlockInfo).rsvdBlockList[((pMem)->rsvdBlockInfo).counter].rsvdSize = (size); \ + (((pMem)->rsvdBlockInfo).counter)++; \ + } \ + else \ + { \ + status = NV_ERR_NO_MEMORY; \ + } \ + } + +// +// Fixed Channel Properties for Memutils Object +// + +typedef NV_STATUS FbScrubCallback(OBJGPU *); + +#define BLOCK_INDEX_FROM_ADDR(addr,size) ((NvU32)((addr) >> size)) +#define BLOCK_ADDR_FROM_INDEX(idx,size) (((NvU64)(idx)) << size) + +#define MEMUTILS_SIZE_PER_BLOCK_INBYTES (0x68) +#define MEMUTILS_TOTAL_SIZE_PER_BLOCK_INBYTES (0x60) //(COPY + PB SEMA) +#define MEMUTILS_TD_BLOCKS_PER_CHUNK 0x40 + +#define BLOCK_INDEX_FROM_ADDR(addr,size) ((NvU32)((addr) >> size)) +#define BLOCK_ADDR_FROM_INDEX(idx,size) (((NvU64)(idx)) << size) + +#define MEMUTILS_NUM_PAYLOAD_SEMAPHORES (2) +#define MEMUTILS_NUM_GPFIFIO_ENTRIES (32) +// PB size should be a multiple of chunk size +#define MEMUTILS_CHANNEL_PB_SIZE (0x10 * MEMUTILS_SIZE_PER_BLOCK_INBYTES * \ + MEMUTILS_TD_BLOCKS_PER_CHUNK) +#define MEMUTILS_CHANNEL_SEMAPHORE_SIZE (4 * MEMUTILS_NUM_PAYLOAD_SEMAPHORES) +#define MEMUTILS_CHANNEL_NOTIFIER_SIZE (sizeof(NvNotification) * 1) + +// offset and line length should be a multiple of 4KB +#define MEMUTIL_SCRUB_OFFSET_ALIGNMENT (4 * 1024) +#define MEMUTIL_SCRUB_LINE_LENGTH_ALIGNMENT (4 * 1024) + +typedef enum { + CE_SCRUBBER_CHANNEL, + FAST_SCRUBBER_CHANNEL, + COPY_CHANNEL, + SWL_SCRUBBER_CHANNEL, + MAX_CHANNEL_TYPE +} CHANNEL_KIND; + +// This will be moved to a channel object next +typedef struct OBJCHANNEL +{ + NvHandle deviceId; // Device Handle + NvHandle physMemId; // Memory Handle + NvHandle channelId; // Channel Handle + NvHandle subdeviceId; // Subdevice Handle + NvHandle errNotifierIdVirt; + NvHandle errNotifierIdPhys; + NvHandle engineObjectId; + NvHandle eventId; + NvHandle pushBufferId; + NvHandle bitMapSemPhysId; + NvHandle bitMapSemVirtId; + NvHandle hVASpaceId; // VASpace handle, when scrubber in virtual mode + NvHandle hFbAlias; // Used only for virtual channels + NvHandle hFbAliasVA; + // to be moved later + + NvU32 channelSize; + NvU32 channelNumGpFifioEntries; + NvU32 channelPbSize; + NvU32 channelNotifierSize; + NvU32 methodSizePerBlock; + NvU32 semaOffset; + NvU32 finishPayloadOffset; + NvU32 authTagBufSemaOffset; + NvU32 finishPayload; + NvBool isChannelSynchronized; + NvBool isProgressChecked; +// +// RM internal channels are created as privileged channels (physical address access) by default +// For MMU Bug: 2739505, we need to switch to use channels in non-privileged mode. +// + NvBool bUseVasForCeCopy; // set to NV_TRUE, when scrubber operates in virtual address + struct RsClient *pRsClient; + struct OBJVASPACE *pVAS; + NvU32 engineType; + NvU64 startFbOffset; + NvU64 fbSize; + NvU64 fbAliasVA; + NvU64 vaStartOffset; + // to be moved to a separate object later + + NvU32 *pBlockPendingState; + NvU32 *pBlockDoneState; + NvU32 blockCount; + NvHandle hClient; + NvBool bClientAllocated; + NvU64 pbGpuVA; + NvU64 pbGpuBitMapVA; + NvU64 pbGpuNotifierVA; + MEMORY_DESCRIPTOR *pUserdMemdesc; + MEMORY_DESCRIPTOR *pChannelBufferMemdesc; + MEMORY_DESCRIPTOR *pErrNotifierMemdesc; + NvU8 *pbCpuVA; + NvU8 *pbBitMapVA; + Nv906fControl *pControlGPFifo; + NvU32 classEngineID; + NVOS10_EVENT_KERNEL_CALLBACK_EX callback; + NvU32 state; + NvU32 hTdCopyClass; + NvU32 sec2Class; + NvU32 minBlockSize; + NvU32 maxBlockSize; + NvU32 channelPutOffset; + NvU8 blockShift; + NvU32 lastPayloadPushed; + NvBool isChannelActive; + NvU32 workSubmitToken; + // + // Work submit token read from notifier memory. + // + NvNotification *pTokenFromNotifier; + NvU32 lastSubmittedEntry; + NvHandle lastAllocatedHandle; + CHANNEL_KIND type; + + // Used for Volta+ + NvHandle doorbellRegionHandle; + NvU8 *pDoorbellRegion; + NvU32 *pDoorbellRegisterOffset; + NvBool bUseDoorbellRegister; + NvHandle hUserD; + NvBool bClientUserd; + + OBJGPU *pGpu; + NvU32 ceId; + + // Used by Partition Scrubber + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance; + NvHandle hPartitionRef; + + NvBool bUseBar1; + +} OBJCHANNEL; + +#define NV_METHOD(SubCh, Method, Num) \ + (DRF_DEF(906F, _DMA_INCR, _OPCODE, _VALUE) | \ + DRF_NUM(906F, _DMA_INCR, _COUNT, Num) | \ + DRF_NUM(906F, _DMA_INCR, _SUBCHANNEL, SubCh) | \ + DRF_NUM(906F, _DMA_INCR, _ADDRESS, (Method) >> 2)) + +#define PUSH_DATA(Data) MEM_WR32(ptr++, (Data)) + +#define PUSH_PAIR(SubCh, Method, Data) \ + do \ + { \ + PUSH_DATA(NV_METHOD(SubCh, (Method), 1)); \ + PUSH_DATA((Data)); \ + } while (0) + +//----------------------------------------------------------------------------- + +typedef struct +{ + NvU32 lastSubmittedBlock; + NvBool isTopDownScrubber; + NvBool isActive; + NvU32 scrubberState; + NvU32 currentFbRegion; + NvU32 startBlock; + NvU32 endBlock; + NvU32 *pPendingBitMap; + NvU32 *pDoneBitMap; + NvU32 blockCount; + struct OBJCE *pCe; + NvBool bCeInUse; + OBJCHANNEL tdHeapState; + OBJCHANNEL allocationScrubberState; +} OBJSCRUB; + +typedef struct +{ + NvU64 base; // Base/start address of the region + NvU64 limit; // Last/end address of region + NvU64 rsvdSize; // Memory RM may be required to allocate in this region + NvBool bRsvdRegion; // Reserved region -- not publicly usable + NvU32 performance; // Relative performance. Higher is faster + NvBool bSupportCompressed; // Support compressed kinds + NvBool bSupportISO; // Support ISO (display, cursor, video) surfaces + NvBool bProtected; // Represents a protected region of memory. + NvBool bInternalHeap; // PMA:Used for internal RM allocations + NvBool bLostOnSuspend; // Not required to be Saved during S/R. + NvBool bPreserveOnSuspend; // Required to be Saved during S/R. +} FB_REGION_DESCRIPTOR, *PFB_REGION_DESCRIPTOR; + +#define MAX_FB_REGIONS 16 + +// Maximum number of contexts created for WHQL test WDDM Max Contexts +#define WHQL_TEST_MAX_CONTEXTS 100 + +// Object 'get' macros for FB relative object retrievals. +#define MEMORY_MANAGER_GET_HEAP(p) ((p)->pHeap) + +typedef struct _def_fb_mem_node +{ + struct _def_fb_mem_node *pNext; + + NvBool bFreeDescriptor; + PMEMORY_DESCRIPTOR pMemDesc; + +} FB_MEM_NODE, *PFB_MEM_NODE; + +// defines for MemoryManager::fbsrReservedRanges +#define MAX_FBSR_RESERVED_REGIONS 5 // Max. Memory descriptors for RM Instance memory +#define FBSR_RESERVED_INST_MEMORY_BEFORE_BAR2PTE 0 // Described on Kernel-RM and Physical-RM (Monolithic / GSP offload) +#define FBSR_RESERVED_INST_MEMORY_AFTER_BAR2PTE 1 // Described on Kernel-RM and Physical-RM (Monolithic / GSP offload) +#define FBSR_RESERVED_INST_MEMORY_GSP_HEAP 2 // Allocated on Kernel-RM and sent to Physical-RM (GSP offload) +#define FBSR_RESERVED_INST_MEMORY_GSP_NON_WPR 3 // Described on Physical-RM (GSP offload) +#define FBSR_RESERVED_INST_MEMORY_VGA_WORKSPACE 4 // Described on Physical-RM (GSP offload) + +/*! + * MemoryManager provides the root memory management of GPU video memory. + * External entities might provide suballocators on top of MemoryManager. + * + * MemoryManager can have static information on the memory system (e.g.: list of + * kinds, etc), however MemoryManager does not have direct access to the GPU + * memory system (e.g.: BAR0 registers). It relies on KernelMemorySystem for + * operations on the memory system. + * + * MemoryManager is instantiated in VGPU guest/GSP Client as well as the VGPU + * host/GSP-RM. + */ + +#define MEM_MGR_STUB_ORIN(...) { return __VA_ARGS__; } + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_MEM_MGR_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__MemoryManager; +struct NVOC_METADATA__OBJENGSTATE; +struct NVOC_VTABLE__MemoryManager; + +struct RM_POOL_ALLOC_MEM_RESERVE_INFO; + +struct __nvoc_inner_struc_MemoryManager_1__ { + NvBool bEnabled; + NvU32 peerId; +}; + +struct MIG_MEMORY_PARTITIONING_INFO { + struct NV_RANGE partitionableMemoryRange; + struct NV_RANGE partitionableBar1Range; + NvHandle hClient; + NvHandle hDevice; + NvHandle hSubdevice; +}; + + + +struct MemoryManager { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__MemoryManager *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^2 + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; // engstate super + struct MemoryManager *__nvoc_pbase_MemoryManager; // memmgr + + // Data members + NvBool bFbsrWddmModeEnabled; + NvBool bFbRegionsSupported; + NvBool bPmaSupportedOnPlatform; + NvBool bPmaEnabled; + NvBool bPmaInitialized; + NvBool bPmaForcePersistence; + NvBool bClientPageTablesPmaManaged; + NvBool bScanoutSysmem; + NvBool bMixedDensityFbp; + NvBool bPreferSlowRegion; + NvBool bPersistentStandbyBuffer; + NvBool bEnableFbsrPagedDma; + NvBool bDisallowSplitLowerMemory; + NvBool bIgnoreUpperMemory; + NvBool bSmallPageCompression; + NvBool bSysmemCompressionSupportDef; + NvBool bBug1698088IncreaseRmReserveMemoryWar; + NvBool bBug2301372IncreaseRmReserveMemoryWar; + NvBool bEnableFbsrFileMode; + NvBool bEnableDynamicPageOfflining; + NvBool bVgpuPmaSupport; + NvBool bScrubChannelSetupInProgress; + NvBool bBug3922001DisableCtxBufOnSim; + NvBool bPlatformFullyCoherent; + NvBool bEnableDynamicGranularityPageArrays; + NvBool bAllowNoncontiguousAllocation; + NvBool bLocalEgmSupported; + struct __nvoc_inner_struc_MemoryManager_1__ localEgmOverride; + NvBool bLocalEgmEnabled; + NvU32 localEgmPeerId; + NvS32 localEgmNodeId; + NvU64 localEgmBasePhysAddr; + NvU64 localEgmSize; + NvBool bForceEnableFlaSysmem; + NvBool bEccInterleavedVidmemScrub; + NvBool bScrubberInitialized; + NvBool bAllowSysmemHugePages; + NvBool bEccScrubOverride; + NvU64 sysmemPageSize; + struct Heap *pHeap; + NvBool bScrubOnFreeEnabled; + NvBool bFastScrubberEnabled; + NvBool bSysmemPageSizeDefaultAllowLargePages; + NvBool bDisableAsyncScrubforMods; + NvBool bUseVasForCeMemoryOps; + NvBool bCePhysicalVidmemAccessNotSupported; + NvBool bRmExecutingEccScrub; + NvBool bBug1441072EccScrubWar; + NvU64 heapStartOffset; + NvU64 rsvdMemoryBase; + NvU64 rsvdMemorySize; + struct CeUtils *pCeUtils; + NvBool bDisableGlobalCeUtils; + struct RM_POOL_ALLOC_MEM_RESERVE_INFO *pPageLevelReserve; + struct MIG_MEMORY_PARTITIONING_INFO MIGMemoryPartitioningInfo; + NV_FB_RSVD_BLOCK_LOG_INFO rsvdBlockInfo; + NvHandle hClient; + NvHandle hDevice; + NvHandle hSubdevice; + NvBool bReservedMemAtBottom; + NvU64 bug4146226ReserveOffset; + NvBool bBug4146226ReserveWar; + NvBool bMonitoredFenceSupported; + NvBool b64BitSemaphoresSupported; + NvBool bGenericKindSupport; + NvBool bSkipCompressionCheck; + NvBool bUseVirtualCopyOnSuspend; + NvBool bLocalizedMemorySupported; + NvU64 localizedMask; +}; + + +// Vtable with 14 per-class function pointers +struct NVOC_VTABLE__MemoryManager { + NV_STATUS (*__memmgrConstructEngine__)(struct OBJGPU *, struct MemoryManager * /*this*/, ENGDESCRIPTOR); // virtual inherited (engstate) base (engstate) + void (*__memmgrInitMissing__)(struct OBJGPU *, struct MemoryManager * /*this*/); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__memmgrStatePreInitLocked__)(struct OBJGPU *, struct MemoryManager * /*this*/); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__memmgrStatePreInitUnlocked__)(struct OBJGPU *, struct MemoryManager * /*this*/); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__memmgrStateInitLocked__)(struct OBJGPU *, struct MemoryManager * /*this*/); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__memmgrStateInitUnlocked__)(struct OBJGPU *, struct MemoryManager * /*this*/); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__memmgrStatePreLoad__)(struct OBJGPU *, struct MemoryManager * /*this*/, NvU32); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__memmgrStateLoad__)(struct OBJGPU *, struct MemoryManager * /*this*/, NvU32); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__memmgrStatePostLoad__)(struct OBJGPU *, struct MemoryManager * /*this*/, NvU32); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__memmgrStatePreUnload__)(struct OBJGPU *, struct MemoryManager * /*this*/, NvU32); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__memmgrStateUnload__)(struct OBJGPU *, struct MemoryManager * /*this*/, NvU32); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__memmgrStatePostUnload__)(struct OBJGPU *, struct MemoryManager * /*this*/, NvU32); // virtual inherited (engstate) base (engstate) + void (*__memmgrStateDestroy__)(struct OBJGPU *, struct MemoryManager * /*this*/); // virtual inherited (engstate) base (engstate) + NvBool (*__memmgrIsPresent__)(struct OBJGPU *, struct MemoryManager * /*this*/); // virtual inherited (engstate) base (engstate) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__MemoryManager { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__OBJENGSTATE metadata__OBJENGSTATE; + const struct NVOC_VTABLE__MemoryManager vtable; +}; + +#ifndef __NVOC_CLASS_MemoryManager_TYPEDEF__ +#define __NVOC_CLASS_MemoryManager_TYPEDEF__ +typedef struct MemoryManager MemoryManager; +#endif /* __NVOC_CLASS_MemoryManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MemoryManager +#define __nvoc_class_id_MemoryManager 0x22ad47 +#endif /* __nvoc_class_id_MemoryManager */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_MemoryManager; + +#define __staticCast_MemoryManager(pThis) \ + ((pThis)->__nvoc_pbase_MemoryManager) + +#ifdef __nvoc_mem_mgr_h_disabled +#define __dynamicCast_MemoryManager(pThis) ((MemoryManager*) NULL) +#else //__nvoc_mem_mgr_h_disabled +#define __dynamicCast_MemoryManager(pThis) \ + ((MemoryManager*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(MemoryManager))) +#endif //__nvoc_mem_mgr_h_disabled + +// Property macros +#define PDB_PROP_MEMMGR_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_MEMMGR_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_MemoryManager(MemoryManager**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_MemoryManager(MemoryManager**, Dynamic*, NvU32); +#define __objCreate_MemoryManager(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_MemoryManager((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros +#define memmgrConstructEngine_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateConstructEngine__ +#define memmgrConstructEngine(pGpu, pEngstate, arg3) memmgrConstructEngine_DISPATCH(pGpu, pEngstate, arg3) +#define memmgrInitMissing_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateInitMissing__ +#define memmgrInitMissing(pGpu, pEngstate) memmgrInitMissing_DISPATCH(pGpu, pEngstate) +#define memmgrStatePreInitLocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePreInitLocked__ +#define memmgrStatePreInitLocked(pGpu, pEngstate) memmgrStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define memmgrStatePreInitUnlocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePreInitUnlocked__ +#define memmgrStatePreInitUnlocked(pGpu, pEngstate) memmgrStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define memmgrStateInitLocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStateInitLocked__ +#define memmgrStateInitLocked(pGpu, pEngstate) memmgrStateInitLocked_DISPATCH(pGpu, pEngstate) +#define memmgrStateInitUnlocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStateInitUnlocked__ +#define memmgrStateInitUnlocked(pGpu, pEngstate) memmgrStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define memmgrStatePreLoad_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePreLoad__ +#define memmgrStatePreLoad(pGpu, pEngstate, arg3) memmgrStatePreLoad_DISPATCH(pGpu, pEngstate, arg3) +#define memmgrStateLoad_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStateLoad__ +#define memmgrStateLoad(pGpu, pEngstate, arg3) memmgrStateLoad_DISPATCH(pGpu, pEngstate, arg3) +#define memmgrStatePostLoad_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePostLoad__ +#define memmgrStatePostLoad(pGpu, pEngstate, arg3) memmgrStatePostLoad_DISPATCH(pGpu, pEngstate, arg3) +#define memmgrStatePreUnload_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePreUnload__ +#define memmgrStatePreUnload(pGpu, pEngstate, arg3) memmgrStatePreUnload_DISPATCH(pGpu, pEngstate, arg3) +#define memmgrStateUnload_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStateUnload__ +#define memmgrStateUnload(pGpu, pEngstate, arg3) memmgrStateUnload_DISPATCH(pGpu, pEngstate, arg3) +#define memmgrStatePostUnload_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePostUnload__ +#define memmgrStatePostUnload(pGpu, pEngstate, arg3) memmgrStatePostUnload_DISPATCH(pGpu, pEngstate, arg3) +#define memmgrStateDestroy_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStateDestroy__ +#define memmgrStateDestroy(pGpu, pEngstate) memmgrStateDestroy_DISPATCH(pGpu, pEngstate) +#define memmgrIsPresent_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateIsPresent__ +#define memmgrIsPresent(pGpu, pEngstate) memmgrIsPresent_DISPATCH(pGpu, pEngstate) + +// Dispatch functions +static inline NV_STATUS memmgrConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct MemoryManager *pEngstate, ENGDESCRIPTOR arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__memmgrConstructEngine__(pGpu, pEngstate, arg3); +} + +static inline void memmgrInitMissing_DISPATCH(struct OBJGPU *pGpu, struct MemoryManager *pEngstate) { + pEngstate->__nvoc_metadata_ptr->vtable.__memmgrInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS memmgrStatePreInitLocked_DISPATCH(struct OBJGPU *pGpu, struct MemoryManager *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__memmgrStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS memmgrStatePreInitUnlocked_DISPATCH(struct OBJGPU *pGpu, struct MemoryManager *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__memmgrStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS memmgrStateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct MemoryManager *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__memmgrStateInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS memmgrStateInitUnlocked_DISPATCH(struct OBJGPU *pGpu, struct MemoryManager *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__memmgrStateInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS memmgrStatePreLoad_DISPATCH(struct OBJGPU *pGpu, struct MemoryManager *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__memmgrStatePreLoad__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS memmgrStateLoad_DISPATCH(struct OBJGPU *pGpu, struct MemoryManager *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__memmgrStateLoad__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS memmgrStatePostLoad_DISPATCH(struct OBJGPU *pGpu, struct MemoryManager *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__memmgrStatePostLoad__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS memmgrStatePreUnload_DISPATCH(struct OBJGPU *pGpu, struct MemoryManager *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__memmgrStatePreUnload__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS memmgrStateUnload_DISPATCH(struct OBJGPU *pGpu, struct MemoryManager *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__memmgrStateUnload__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS memmgrStatePostUnload_DISPATCH(struct OBJGPU *pGpu, struct MemoryManager *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__memmgrStatePostUnload__(pGpu, pEngstate, arg3); +} + +static inline void memmgrStateDestroy_DISPATCH(struct OBJGPU *pGpu, struct MemoryManager *pEngstate) { + pEngstate->__nvoc_metadata_ptr->vtable.__memmgrStateDestroy__(pGpu, pEngstate); +} + +static inline NvBool memmgrIsPresent_DISPATCH(struct OBJGPU *pGpu, struct MemoryManager *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__memmgrIsPresent__(pGpu, pEngstate); +} + +static inline NvU64 memmgrDeterminePageSize_b845ef(struct MemoryManager *pMemoryManager, NvHandle hClient, NvU64 memSize, NvU32 memFormat, NvU32 pageFormatFlags, NvU32 *pRetAttr, NvU32 *pRetAttr2) { + return 4096; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU64 memmgrDeterminePageSize(struct MemoryManager *pMemoryManager, NvHandle hClient, NvU64 memSize, NvU32 memFormat, NvU32 pageFormatFlags, NvU32 *pRetAttr, NvU32 *pRetAttr2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrDeterminePageSize(pMemoryManager, hClient, memSize, memFormat, pageFormatFlags, pRetAttr, pRetAttr2) memmgrDeterminePageSize_b845ef(pMemoryManager, hClient, memSize, memFormat, pageFormatFlags, pRetAttr, pRetAttr2) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrDeterminePageSize_HAL(pMemoryManager, hClient, memSize, memFormat, pageFormatFlags, pRetAttr, pRetAttr2) memmgrDeterminePageSize(pMemoryManager, hClient, memSize, memFormat, pageFormatFlags, pRetAttr, pRetAttr2) + +static inline NV_STATUS memmgrFreeHwResources_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *arg3) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrFreeHwResources(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrFreeHwResources(pGpu, pMemoryManager, arg3) memmgrFreeHwResources_46f6a7(pGpu, pMemoryManager, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrFreeHwResources_HAL(pGpu, pMemoryManager, arg3) memmgrFreeHwResources(pGpu, pMemoryManager, arg3) + +static inline NV_STATUS memmgrScrubInit_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrScrubInit(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrScrubInit(pGpu, pMemoryManager) memmgrScrubInit_56cd7a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrScrubInit_HAL(pGpu, pMemoryManager) memmgrScrubInit(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrScrubHandlePostSchedulingEnable_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrScrubHandlePostSchedulingEnable(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrScrubHandlePostSchedulingEnable(pGpu, pMemoryManager) memmgrScrubHandlePostSchedulingEnable_56cd7a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrScrubHandlePostSchedulingEnable_HAL(pGpu, pMemoryManager) memmgrScrubHandlePostSchedulingEnable(pGpu, pMemoryManager) + +static inline void memmgrGetScrubState_f2d351(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *arg3, NvU64 *arg4, NvBool *arg5) { + NV_ASSERT_PRECOMP(0); +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrGetScrubState(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *arg3, NvU64 *arg4, NvBool *arg5) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetScrubState(pGpu, pMemoryManager, arg3, arg4, arg5) memmgrGetScrubState_f2d351(pGpu, pMemoryManager, arg3, arg4, arg5) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetScrubState_HAL(pGpu, pMemoryManager, arg3, arg4, arg5) memmgrGetScrubState(pGpu, pMemoryManager, arg3, arg4, arg5) + +static inline void memmgrScrubInternalRegions_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrScrubInternalRegions(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrScrubInternalRegions(pGpu, pMemoryManager) memmgrScrubInternalRegions_b3696a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrScrubInternalRegions_HAL(pGpu, pMemoryManager) memmgrScrubInternalRegions(pGpu, pMemoryManager) + +static inline NvBool memmgrEccScrubInProgress_3dd2c9(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_FALSE; +} + +static inline NvBool memmgrEccScrubInProgress_88bc07(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_TRUE; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrEccScrubInProgress(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrEccScrubInProgress(pGpu, pMemoryManager) memmgrEccScrubInProgress_3dd2c9(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrEccScrubInProgress_HAL(pGpu, pMemoryManager) memmgrEccScrubInProgress(pGpu, pMemoryManager) + +static inline void memmgrAsyncScrubRegion_f2d351(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 arg3, NvU64 arg4) { + NV_ASSERT_PRECOMP(0); +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrAsyncScrubRegion(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 arg3, NvU64 arg4) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAsyncScrubRegion(pGpu, pMemoryManager, arg3, arg4) memmgrAsyncScrubRegion_f2d351(pGpu, pMemoryManager, arg3, arg4) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrAsyncScrubRegion_HAL(pGpu, pMemoryManager, arg3, arg4) memmgrAsyncScrubRegion(pGpu, pMemoryManager, arg3, arg4) + +static inline NV_STATUS memmgrScrubHandlePreSchedulingDisable_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrScrubHandlePreSchedulingDisable(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrScrubHandlePreSchedulingDisable(pGpu, pMemoryManager) memmgrScrubHandlePreSchedulingDisable_56cd7a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrScrubHandlePreSchedulingDisable_HAL(pGpu, pMemoryManager) memmgrScrubHandlePreSchedulingDisable(pGpu, pMemoryManager) + +static inline void memmgrScrubDestroy_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrScrubDestroy(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrScrubDestroy(pGpu, pMemoryManager) memmgrScrubDestroy_b3696a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrScrubDestroy_HAL(pGpu, pMemoryManager) memmgrScrubDestroy(pGpu, pMemoryManager) + +static inline void memmgrScrubMemory_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, RmPhysAddr arg3, NvU64 arg4) { + return; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrScrubMemory(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, RmPhysAddr arg3, NvU64 arg4) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrScrubMemory(pGpu, pMemoryManager, arg3, arg4) memmgrScrubMemory_b3696a(pGpu, pMemoryManager, arg3, arg4) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrScrubMemory_HAL(pGpu, pMemoryManager, arg3, arg4) memmgrScrubMemory(pGpu, pMemoryManager, arg3, arg4) + +static inline NV_STATUS memmgrMemUtilsMemSetBlocking_92bfc3(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg3, RmPhysAddr arg4, NvU64 arg5) { + NV_ASSERT_PRECOMP(0); + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsMemSetBlocking(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg3, RmPhysAddr arg4, NvU64 arg5) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsMemSetBlocking(pGpu, pMemoryManager, arg3, arg4, arg5) memmgrMemUtilsMemSetBlocking_92bfc3(pGpu, pMemoryManager, arg3, arg4, arg5) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsMemSetBlocking_HAL(pGpu, pMemoryManager, arg3, arg4, arg5) memmgrMemUtilsMemSetBlocking(pGpu, pMemoryManager, arg3, arg4, arg5) + +static inline NV_STATUS memmgrMemUtilsMemSet_92bfc3(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg3, RmPhysAddr arg4, NvU64 arg5, NvU32 arg6, NvU32 *arg7) { + NV_ASSERT_PRECOMP(0); + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsMemSet(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg3, RmPhysAddr arg4, NvU64 arg5, NvU32 arg6, NvU32 *arg7) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsMemSet(pGpu, pMemoryManager, arg3, arg4, arg5, arg6, arg7) memmgrMemUtilsMemSet_92bfc3(pGpu, pMemoryManager, arg3, arg4, arg5, arg6, arg7) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsMemSet_HAL(pGpu, pMemoryManager, arg3, arg4, arg5, arg6, arg7) memmgrMemUtilsMemSet(pGpu, pMemoryManager, arg3, arg4, arg5, arg6, arg7) + +static inline NV_STATUS memmgrMemUtilsAllocateEccScrubber_92bfc3(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg3) { + NV_ASSERT_PRECOMP(0); + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsAllocateEccScrubber(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsAllocateEccScrubber(pGpu, pMemoryManager, arg3) memmgrMemUtilsAllocateEccScrubber_92bfc3(pGpu, pMemoryManager, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsAllocateEccScrubber_HAL(pGpu, pMemoryManager, arg3) memmgrMemUtilsAllocateEccScrubber(pGpu, pMemoryManager, arg3) + +static inline NV_STATUS memmgrMemUtilsAllocateEccAllocScrubber_92bfc3(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg3) { + NV_ASSERT_PRECOMP(0); + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsAllocateEccAllocScrubber(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsAllocateEccAllocScrubber(pGpu, pMemoryManager, arg3) memmgrMemUtilsAllocateEccAllocScrubber_92bfc3(pGpu, pMemoryManager, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsAllocateEccAllocScrubber_HAL(pGpu, pMemoryManager, arg3) memmgrMemUtilsAllocateEccAllocScrubber(pGpu, pMemoryManager, arg3) + +static inline NV_STATUS memmgrMemUtilsChannelInitialize_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg3) { + return NV_OK; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsChannelInitialize(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsChannelInitialize(pGpu, pMemoryManager, arg3) memmgrMemUtilsChannelInitialize_56cd7a(pGpu, pMemoryManager, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsChannelInitialize_HAL(pGpu, pMemoryManager, arg3) memmgrMemUtilsChannelInitialize(pGpu, pMemoryManager, arg3) + +static inline NV_STATUS memmgrMemUtilsCopyEngineInitialize_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg3) { + return NV_OK; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsCopyEngineInitialize(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsCopyEngineInitialize(pGpu, pMemoryManager, arg3) memmgrMemUtilsCopyEngineInitialize_56cd7a(pGpu, pMemoryManager, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsCopyEngineInitialize_HAL(pGpu, pMemoryManager, arg3) memmgrMemUtilsCopyEngineInitialize(pGpu, pMemoryManager, arg3) + +static inline NV_STATUS memmgrMemUtilsSec2CtxInit_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg3) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsSec2CtxInit(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsSec2CtxInit(pGpu, pMemoryManager, arg3) memmgrMemUtilsSec2CtxInit_46f6a7(pGpu, pMemoryManager, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsSec2CtxInit_HAL(pGpu, pMemoryManager, arg3) memmgrMemUtilsSec2CtxInit(pGpu, pMemoryManager, arg3) + +static inline NV_STATUS memmgrMemUtilsGetCopyEngineClass_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 *pClass) { + return NV_OK; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsGetCopyEngineClass(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 *pClass) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsGetCopyEngineClass(pGpu, pMemoryManager, pClass) memmgrMemUtilsGetCopyEngineClass_56cd7a(pGpu, pMemoryManager, pClass) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsGetCopyEngineClass_HAL(pGpu, pMemoryManager, pClass) memmgrMemUtilsGetCopyEngineClass(pGpu, pMemoryManager, pClass) + +static inline NV_STATUS memmgrMemUtilsCreateMemoryAlias_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg3) { + return NV_OK; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsCreateMemoryAlias(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsCreateMemoryAlias(pGpu, pMemoryManager, arg3) memmgrMemUtilsCreateMemoryAlias_56cd7a(pGpu, pMemoryManager, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsCreateMemoryAlias_HAL(pGpu, pMemoryManager, arg3) memmgrMemUtilsCreateMemoryAlias(pGpu, pMemoryManager, arg3) + +static inline NvBool memmgrMemUtilsCheckMemoryFastScrubEnable_3dd2c9(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg3, NvBool arg4, RmPhysAddr arg5, NvU32 arg6, NV_ADDRESS_SPACE arg7) { + return NV_FALSE; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrMemUtilsCheckMemoryFastScrubEnable(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg3, NvBool arg4, RmPhysAddr arg5, NvU32 arg6, NV_ADDRESS_SPACE arg7) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsCheckMemoryFastScrubEnable(pGpu, pMemoryManager, arg3, arg4, arg5, arg6, arg7) memmgrMemUtilsCheckMemoryFastScrubEnable_3dd2c9(pGpu, pMemoryManager, arg3, arg4, arg5, arg6, arg7) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsCheckMemoryFastScrubEnable_HAL(pGpu, pMemoryManager, arg3, arg4, arg5, arg6, arg7) memmgrMemUtilsCheckMemoryFastScrubEnable(pGpu, pMemoryManager, arg3, arg4, arg5, arg6, arg7) + +static inline NV_STATUS memmgrAllocHal_92bfc3(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *pFbAllocInfo) { + NV_ASSERT_PRECOMP(0); + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrAllocHal(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *pFbAllocInfo) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAllocHal(pGpu, pMemoryManager, pFbAllocInfo) memmgrAllocHal_92bfc3(pGpu, pMemoryManager, pFbAllocInfo) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrAllocHal_HAL(pGpu, pMemoryManager, pFbAllocInfo) memmgrAllocHal(pGpu, pMemoryManager, pFbAllocInfo) + +static inline NV_STATUS memmgrFreeHal_92bfc3(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *pFbAllocInfo, PRMTIMEOUT pTimeout) { + NV_ASSERT_PRECOMP(0); + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrFreeHal(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *pFbAllocInfo, PRMTIMEOUT pTimeout) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrFreeHal(pGpu, pMemoryManager, pFbAllocInfo, pTimeout) memmgrFreeHal_92bfc3(pGpu, pMemoryManager, pFbAllocInfo, pTimeout) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrFreeHal_HAL(pGpu, pMemoryManager, pFbAllocInfo, pTimeout) memmgrFreeHal(pGpu, pMemoryManager, pFbAllocInfo, pTimeout) + +static inline NV_STATUS memmgrUpdateSurfaceCompression_5baef9(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, Memory *arg3, NvBool arg4) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrUpdateSurfaceCompression(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, Memory *arg3, NvBool arg4) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrUpdateSurfaceCompression(pGpu, pMemoryManager, arg3, arg4) memmgrUpdateSurfaceCompression_5baef9(pGpu, pMemoryManager, arg3, arg4) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrUpdateSurfaceCompression_HAL(pGpu, pMemoryManager, arg3, arg4) memmgrUpdateSurfaceCompression(pGpu, pMemoryManager, arg3, arg4) + +static inline NV_STATUS memmgrGetBankPlacementData_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 *pBankPlacementLowData) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrGetBankPlacementData(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 *pBankPlacementLowData) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetBankPlacementData(pGpu, pMemoryManager, pBankPlacementLowData) memmgrGetBankPlacementData_46f6a7(pGpu, pMemoryManager, pBankPlacementLowData) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetBankPlacementData_HAL(pGpu, pMemoryManager, pBankPlacementLowData) memmgrGetBankPlacementData(pGpu, pMemoryManager, pBankPlacementLowData) + +static inline void memmgrDirtyForPmTest_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvBool partialDirty) { + return; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrDirtyForPmTest(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvBool partialDirty) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrDirtyForPmTest(pGpu, pMemoryManager, partialDirty) memmgrDirtyForPmTest_b3696a(pGpu, pMemoryManager, partialDirty) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrDirtyForPmTest_HAL(pGpu, pMemoryManager, partialDirty) memmgrDirtyForPmTest(pGpu, pMemoryManager, partialDirty) + +static inline NvU64 memmgrGetReservedHeapSizeMb_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU64 memmgrGetReservedHeapSizeMb(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetReservedHeapSizeMb(pGpu, pMemoryManager) memmgrGetReservedHeapSizeMb_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetReservedHeapSizeMb_HAL(pGpu, pMemoryManager) memmgrGetReservedHeapSizeMb(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrAllocDetermineAlignment_5baef9(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pMemSize, NvU64 *pAlign, NvU64 alignPad, NvU32 allocFlags, NvU32 retAttr, NvU32 retAttr2, NvU64 hwAlignment) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrAllocDetermineAlignment(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pMemSize, NvU64 *pAlign, NvU64 alignPad, NvU32 allocFlags, NvU32 retAttr, NvU32 retAttr2, NvU64 hwAlignment) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAllocDetermineAlignment(pGpu, pMemoryManager, pMemSize, pAlign, alignPad, allocFlags, retAttr, retAttr2, hwAlignment) memmgrAllocDetermineAlignment_5baef9(pGpu, pMemoryManager, pMemSize, pAlign, alignPad, allocFlags, retAttr, retAttr2, hwAlignment) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrAllocDetermineAlignment_HAL(pGpu, pMemoryManager, pMemSize, pAlign, alignPad, allocFlags, retAttr, retAttr2, hwAlignment) memmgrAllocDetermineAlignment(pGpu, pMemoryManager, pMemSize, pAlign, alignPad, allocFlags, retAttr, retAttr2, hwAlignment) + +static inline NV_STATUS memmgrInitFbRegionsHal_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrInitFbRegionsHal(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrInitFbRegionsHal(pGpu, pMemoryManager) memmgrInitFbRegionsHal_56cd7a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrInitFbRegionsHal_HAL(pGpu, pMemoryManager) memmgrInitFbRegionsHal(pGpu, pMemoryManager) + +static inline NvU64 memmgrGetMaxContextSize_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU64 memmgrGetMaxContextSize(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetMaxContextSize(pGpu, pMemoryManager) memmgrGetMaxContextSize_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetMaxContextSize_HAL(pGpu, pMemoryManager) memmgrGetMaxContextSize(pGpu, pMemoryManager) + +static inline void memmgrHandleSizeOverrides_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrHandleSizeOverrides(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrHandleSizeOverrides(pGpu, pMemoryManager) memmgrHandleSizeOverrides_b3696a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrHandleSizeOverrides_HAL(pGpu, pMemoryManager) memmgrHandleSizeOverrides(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrFinishHandleSizeOverrides_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrFinishHandleSizeOverrides(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrFinishHandleSizeOverrides(pGpu, pMemoryManager) memmgrFinishHandleSizeOverrides_56cd7a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrFinishHandleSizeOverrides_HAL(pGpu, pMemoryManager) memmgrFinishHandleSizeOverrides(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrGetBAR1InfoForDevice_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct Device *pDevice, PGETBAR1INFO bar1Info) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrGetBAR1InfoForDevice(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct Device *pDevice, PGETBAR1INFO bar1Info) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetBAR1InfoForDevice(pGpu, pMemoryManager, pDevice, bar1Info) memmgrGetBAR1InfoForDevice_46f6a7(pGpu, pMemoryManager, pDevice, bar1Info) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetBAR1InfoForDevice_HAL(pGpu, pMemoryManager, pDevice, bar1Info) memmgrGetBAR1InfoForDevice(pGpu, pMemoryManager, pDevice, bar1Info) + +static inline NvU64 memmgrGetFbTaxSize_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU64 memmgrGetFbTaxSize(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetFbTaxSize(pGpu, pMemoryManager) memmgrGetFbTaxSize_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetFbTaxSize_HAL(pGpu, pMemoryManager) memmgrGetFbTaxSize(pGpu, pMemoryManager) + +NvU64 memmgrGetVgpuHostRmReservedFb_KERNEL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 vgpuTypeId); + +static inline NvU64 memmgrGetVgpuHostRmReservedFb_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 vgpuTypeId) { + return 0; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU64 memmgrGetVgpuHostRmReservedFb(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 vgpuTypeId) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetVgpuHostRmReservedFb(pGpu, pMemoryManager, vgpuTypeId) memmgrGetVgpuHostRmReservedFb_KERNEL(pGpu, pMemoryManager, vgpuTypeId) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetVgpuHostRmReservedFb_HAL(pGpu, pMemoryManager, vgpuTypeId) memmgrGetVgpuHostRmReservedFb(pGpu, pMemoryManager, vgpuTypeId) + +static inline void memmgrScrubRegistryOverrides_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrScrubRegistryOverrides(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrScrubRegistryOverrides(pGpu, pMemoryManager) memmgrScrubRegistryOverrides_b3696a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrScrubRegistryOverrides_HAL(pGpu, pMemoryManager) memmgrScrubRegistryOverrides(pGpu, pMemoryManager) + +static inline NvU64 memmgrGetRsvdSizeForSr_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU64 memmgrGetRsvdSizeForSr(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetRsvdSizeForSr(pGpu, pMemoryManager) memmgrGetRsvdSizeForSr_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetRsvdSizeForSr_HAL(pGpu, pMemoryManager) memmgrGetRsvdSizeForSr(pGpu, pMemoryManager) + +static inline NvBool memmgrVerifyDepthSurfaceAttrs_88bc07(struct MemoryManager *pMemoryManager, NvU32 arg2, NvU32 arg3) { + return NV_TRUE; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrVerifyDepthSurfaceAttrs(struct MemoryManager *pMemoryManager, NvU32 arg2, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrVerifyDepthSurfaceAttrs(pMemoryManager, arg2, arg3) memmgrVerifyDepthSurfaceAttrs_88bc07(pMemoryManager, arg2, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrVerifyDepthSurfaceAttrs_HAL(pMemoryManager, arg2, arg3) memmgrVerifyDepthSurfaceAttrs(pMemoryManager, arg2, arg3) + +static inline NV_STATUS memmgrAllocMemToSaveVgaWorkspace_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR **arg3, MEMORY_DESCRIPTOR **arg4) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS memmgrAllocMemToSaveVgaWorkspace_5baef9(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR **arg3, MEMORY_DESCRIPTOR **arg4) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrAllocMemToSaveVgaWorkspace(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR **arg3, MEMORY_DESCRIPTOR **arg4) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAllocMemToSaveVgaWorkspace(pGpu, pMemoryManager, arg3, arg4) memmgrAllocMemToSaveVgaWorkspace_5baef9(pGpu, pMemoryManager, arg3, arg4) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrAllocMemToSaveVgaWorkspace_HAL(pGpu, pMemoryManager, arg3, arg4) memmgrAllocMemToSaveVgaWorkspace(pGpu, pMemoryManager, arg3, arg4) + +static inline NvBool memmgrComparePhysicalAddresses_86b752(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg3, NvU64 arg4, NvU32 arg5, NvU64 arg6) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_FALSE); +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrComparePhysicalAddresses(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg3, NvU64 arg4, NvU32 arg5, NvU64 arg6) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrComparePhysicalAddresses(pGpu, pMemoryManager, arg3, arg4, arg5, arg6) memmgrComparePhysicalAddresses_86b752(pGpu, pMemoryManager, arg3, arg4, arg5, arg6) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrComparePhysicalAddresses_HAL(pGpu, pMemoryManager, arg3, arg4, arg5, arg6) memmgrComparePhysicalAddresses(pGpu, pMemoryManager, arg3, arg4, arg5, arg6) + +static inline RmPhysAddr memmgrGetInvalidOffset_c732fb(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 4294967295U; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline RmPhysAddr memmgrGetInvalidOffset(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + RmPhysAddr ret; + portMemSet(&ret, 0, sizeof(RmPhysAddr)); + return ret; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetInvalidOffset(pGpu, pMemoryManager) memmgrGetInvalidOffset_c732fb(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetInvalidOffset_HAL(pGpu, pMemoryManager) memmgrGetInvalidOffset(pGpu, pMemoryManager) + +static inline NvU64 memmgrGetAddrSpaceSizeMB_474d46(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU64 memmgrGetAddrSpaceSizeMB(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetAddrSpaceSizeMB(pGpu, pMemoryManager) memmgrGetAddrSpaceSizeMB_474d46(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetAddrSpaceSizeMB_HAL(pGpu, pMemoryManager) memmgrGetAddrSpaceSizeMB(pGpu, pMemoryManager) + +static inline NvU64 memmgrGetUsableMemSizeMB_13cd8d(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_PRECOMP(0); + return 0; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU64 memmgrGetUsableMemSizeMB(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetUsableMemSizeMB(pGpu, pMemoryManager) memmgrGetUsableMemSizeMB_13cd8d(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetUsableMemSizeMB_HAL(pGpu, pMemoryManager) memmgrGetUsableMemSizeMB(pGpu, pMemoryManager) + +static inline NvBool memmgrVerifyComprAttrs_88bc07(struct MemoryManager *pMemoryManager, NvU32 arg2, NvU32 arg3, NvU32 arg4) { + return NV_TRUE; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrVerifyComprAttrs(struct MemoryManager *pMemoryManager, NvU32 arg2, NvU32 arg3, NvU32 arg4) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrVerifyComprAttrs(pMemoryManager, arg2, arg3, arg4) memmgrVerifyComprAttrs_88bc07(pMemoryManager, arg2, arg3, arg4) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrVerifyComprAttrs_HAL(pMemoryManager, arg2, arg3, arg4) memmgrVerifyComprAttrs(pMemoryManager, arg2, arg3, arg4) + +static inline NvBool memmgrIsKindCompressible_3dd2c9(struct MemoryManager *pMemoryManager, NvU32 arg2) { + return NV_FALSE; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrIsKindCompressible(struct MemoryManager *pMemoryManager, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrIsKindCompressible(pMemoryManager, arg2) memmgrIsKindCompressible_3dd2c9(pMemoryManager, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrIsKindCompressible_HAL(pMemoryManager, arg2) memmgrIsKindCompressible(pMemoryManager, arg2) + +static inline NvBool memmgrIsKindBlocklinear_3dd2c9(struct MemoryManager *pMemoryManager, NvU32 arg2) { + return NV_FALSE; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrIsKindBlocklinear(struct MemoryManager *pMemoryManager, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrIsKindBlocklinear(pMemoryManager, arg2) memmgrIsKindBlocklinear_3dd2c9(pMemoryManager, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrIsKindBlocklinear_HAL(pMemoryManager, arg2) memmgrIsKindBlocklinear(pMemoryManager, arg2) + +static inline NvU32 memmgrGetPteKindBl_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetPteKindBl(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetPteKindBl(pGpu, pMemoryManager) memmgrGetPteKindBl_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetPteKindBl_HAL(pGpu, pMemoryManager) memmgrGetPteKindBl(pGpu, pMemoryManager) + +static inline NvU32 memmgrGetPteKindPitch_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetPteKindPitch(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetPteKindPitch(pGpu, pMemoryManager) memmgrGetPteKindPitch_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetPteKindPitch_HAL(pGpu, pMemoryManager) memmgrGetPteKindPitch(pGpu, pMemoryManager) + +static inline NvU32 memmgrChooseKindZ_474d46(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg3) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrChooseKindZ(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrChooseKindZ(pGpu, pMemoryManager, arg3) memmgrChooseKindZ_474d46(pGpu, pMemoryManager, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrChooseKindZ_HAL(pGpu, pMemoryManager, arg3) memmgrChooseKindZ(pGpu, pMemoryManager, arg3) + +static inline NvU32 memmgrChooseKindCompressZ_474d46(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg3) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrChooseKindCompressZ(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrChooseKindCompressZ(pGpu, pMemoryManager, arg3) memmgrChooseKindCompressZ_474d46(pGpu, pMemoryManager, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrChooseKindCompressZ_HAL(pGpu, pMemoryManager, arg3) memmgrChooseKindCompressZ(pGpu, pMemoryManager, arg3) + +static inline NvU32 memmgrChooseKindCompressC_474d46(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg3) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrChooseKindCompressC(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrChooseKindCompressC(pGpu, pMemoryManager, arg3) memmgrChooseKindCompressC_474d46(pGpu, pMemoryManager, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrChooseKindCompressC_HAL(pGpu, pMemoryManager, arg3) memmgrChooseKindCompressC(pGpu, pMemoryManager, arg3) + +static inline NvU32 memmgrChooseKindCompressCForMS2_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg3) { + return 0; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrChooseKindCompressCForMS2(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrChooseKindCompressCForMS2(pGpu, pMemoryManager, arg3) memmgrChooseKindCompressCForMS2_4a4dee(pGpu, pMemoryManager, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrChooseKindCompressCForMS2_HAL(pGpu, pMemoryManager, arg3) memmgrChooseKindCompressCForMS2(pGpu, pMemoryManager, arg3) + +static inline NvU32 memmgrGetPteKindGenericMemoryCompressible_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetPteKindGenericMemoryCompressible(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetPteKindGenericMemoryCompressible(pGpu, pMemoryManager) memmgrGetPteKindGenericMemoryCompressible_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetPteKindGenericMemoryCompressible_HAL(pGpu, pMemoryManager) memmgrGetPteKindGenericMemoryCompressible(pGpu, pMemoryManager) + +static inline NvU32 memmgrGetUncompressedKind_474d46(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 kind, NvBool releaseReacquire) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetUncompressedKind(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 kind, NvBool releaseReacquire) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetUncompressedKind(pGpu, pMemoryManager, kind, releaseReacquire) memmgrGetUncompressedKind_474d46(pGpu, pMemoryManager, kind, releaseReacquire) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetUncompressedKind_HAL(pGpu, pMemoryManager, kind, releaseReacquire) memmgrGetUncompressedKind(pGpu, pMemoryManager, kind, releaseReacquire) + +static inline NV_STATUS memmgrGetUncompressedKindForMS2_5baef9(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg3, NvU32 *arg4) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrGetUncompressedKindForMS2(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg3, NvU32 *arg4) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetUncompressedKindForMS2(pGpu, pMemoryManager, arg3, arg4) memmgrGetUncompressedKindForMS2_5baef9(pGpu, pMemoryManager, arg3, arg4) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetUncompressedKindForMS2_HAL(pGpu, pMemoryManager, arg3, arg4) memmgrGetUncompressedKindForMS2(pGpu, pMemoryManager, arg3, arg4) + +static inline NvU32 memmgrGetCompressedKind_d1515c(struct MemoryManager *pMemoryManager, NvU32 kind, NvBool bDisablePlc) { + NV_ASSERT_OR_RETURN_PRECOMP(0, kind); +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetCompressedKind(struct MemoryManager *pMemoryManager, NvU32 kind, NvBool bDisablePlc) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetCompressedKind(pMemoryManager, kind, bDisablePlc) memmgrGetCompressedKind_d1515c(pMemoryManager, kind, bDisablePlc) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetCompressedKind_HAL(pMemoryManager, kind, bDisablePlc) memmgrGetCompressedKind(pMemoryManager, kind, bDisablePlc) + +static inline NV_STATUS memmgrChooseKind_474d46(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg3, NvU32 arg4, NvU32 *arg5) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrChooseKind(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg3, NvU32 arg4, NvU32 *arg5) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrChooseKind(pGpu, pMemoryManager, arg3, arg4, arg5) memmgrChooseKind_474d46(pGpu, pMemoryManager, arg3, arg4, arg5) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrChooseKind_HAL(pGpu, pMemoryManager, arg3, arg4, arg5) memmgrChooseKind(pGpu, pMemoryManager, arg3, arg4, arg5) + +NvBool memmgrIsKind_TU102(struct MemoryManager *pMemoryManager, FB_IS_KIND_OP arg2, NvU32 arg3); + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrIsKind(struct MemoryManager *pMemoryManager, FB_IS_KIND_OP arg2, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrIsKind(pMemoryManager, arg2, arg3) memmgrIsKind_TU102(pMemoryManager, arg2, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrIsKind_HAL(pMemoryManager, arg2, arg3) memmgrIsKind(pMemoryManager, arg2, arg3) + +static inline NvU32 memmgrGetMessageKind_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetMessageKind(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetMessageKind(pGpu, pMemoryManager) memmgrGetMessageKind_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetMessageKind_HAL(pGpu, pMemoryManager) memmgrGetMessageKind(pGpu, pMemoryManager) + +static inline NvU32 memmgrGetDefaultPteKindForNoHandle_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetDefaultPteKindForNoHandle(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetDefaultPteKindForNoHandle(pGpu, pMemoryManager) memmgrGetDefaultPteKindForNoHandle_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetDefaultPteKindForNoHandle_HAL(pGpu, pMemoryManager) memmgrGetDefaultPteKindForNoHandle(pGpu, pMemoryManager) + +NvBool memmgrIsSurfaceBlockLinear_TU102(struct MemoryManager *pMemoryManager, Memory *arg2, NvU32 arg3); + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrIsSurfaceBlockLinear(struct MemoryManager *pMemoryManager, Memory *arg2, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrIsSurfaceBlockLinear(pMemoryManager, arg2, arg3) memmgrIsSurfaceBlockLinear_TU102(pMemoryManager, arg2, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrIsSurfaceBlockLinear_HAL(pMemoryManager, arg2, arg3) memmgrIsSurfaceBlockLinear(pMemoryManager, arg2, arg3) + +static inline NV_STATUS memmgrGetFlaKind_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 *arg3) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrGetFlaKind(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 *arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetFlaKind(pGpu, pMemoryManager, arg3) memmgrGetFlaKind_46f6a7(pGpu, pMemoryManager, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetFlaKind_HAL(pGpu, pMemoryManager, arg3) memmgrGetFlaKind(pGpu, pMemoryManager, arg3) + +static inline NvBool memmgrIsMemDescSupportedByFla_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrIsMemDescSupportedByFla(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrIsMemDescSupportedByFla(pGpu, pMemoryManager, pMemDesc) memmgrIsMemDescSupportedByFla_46f6a7(pGpu, pMemoryManager, pMemDesc) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrIsMemDescSupportedByFla_HAL(pGpu, pMemoryManager, pMemDesc) memmgrIsMemDescSupportedByFla(pGpu, pMemoryManager, pMemDesc) + +static inline NvBool memmgrIsValidFlaPageSize_3dd2c9(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 pageSize, NvBool bIsMulticast) { + return NV_FALSE; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrIsValidFlaPageSize(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 pageSize, NvBool bIsMulticast) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrIsValidFlaPageSize(pGpu, pMemoryManager, pageSize, bIsMulticast) memmgrIsValidFlaPageSize_3dd2c9(pGpu, pMemoryManager, pageSize, bIsMulticast) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrIsValidFlaPageSize_HAL(pGpu, pMemoryManager, pageSize, bIsMulticast) memmgrIsValidFlaPageSize(pGpu, pMemoryManager, pageSize, bIsMulticast) + +static inline NvU32 memmgrGetHwPteKindFromSwPteKind_6a0a80(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 pteKind) { + return pteKind; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetHwPteKindFromSwPteKind(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 pteKind) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetHwPteKindFromSwPteKind(pGpu, pMemoryManager, pteKind) memmgrGetHwPteKindFromSwPteKind_6a0a80(pGpu, pMemoryManager, pteKind) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetHwPteKindFromSwPteKind_HAL(pGpu, pMemoryManager, pteKind) memmgrGetHwPteKindFromSwPteKind(pGpu, pMemoryManager, pteKind) + +static inline NvU32 memmgrGetSwPteKindFromHwPteKind_6a0a80(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 pteKind) { + return pteKind; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetSwPteKindFromHwPteKind(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 pteKind) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetSwPteKindFromHwPteKind(pGpu, pMemoryManager, pteKind) memmgrGetSwPteKindFromHwPteKind_6a0a80(pGpu, pMemoryManager, pteKind) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetSwPteKindFromHwPteKind_HAL(pGpu, pMemoryManager, pteKind) memmgrGetSwPteKindFromHwPteKind(pGpu, pMemoryManager, pteKind) + +static inline void memmgrGetPteKindForScrubber_f2d351(struct MemoryManager *pMemoryManager, NvU32 *arg2) { + NV_ASSERT_PRECOMP(0); +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrGetPteKindForScrubber(struct MemoryManager *pMemoryManager, NvU32 *arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetPteKindForScrubber(pMemoryManager, arg2) memmgrGetPteKindForScrubber_f2d351(pMemoryManager, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetPteKindForScrubber_HAL(pMemoryManager, arg2) memmgrGetPteKindForScrubber(pMemoryManager, arg2) + +static inline NvU32 memmgrGetCtagOffsetFromParams_1a0c2b(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *arg3) { + return -1; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetCtagOffsetFromParams(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetCtagOffsetFromParams(pGpu, pMemoryManager, arg3) memmgrGetCtagOffsetFromParams_1a0c2b(pGpu, pMemoryManager, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetCtagOffsetFromParams_HAL(pGpu, pMemoryManager, arg3) memmgrGetCtagOffsetFromParams(pGpu, pMemoryManager, arg3) + +static inline void memmgrSetCtagOffsetInParams_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *arg3, NvU32 arg4) { + return; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrSetCtagOffsetInParams(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *arg3, NvU32 arg4) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrSetCtagOffsetInParams(pGpu, pMemoryManager, arg3, arg4) memmgrSetCtagOffsetInParams_b3696a(pGpu, pMemoryManager, arg3, arg4) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrSetCtagOffsetInParams_HAL(pGpu, pMemoryManager, arg3, arg4) memmgrSetCtagOffsetInParams(pGpu, pMemoryManager, arg3, arg4) + +static inline NvU32 memmgrDetermineComptag_13cd8d(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, RmPhysAddr arg3) { + NV_ASSERT_PRECOMP(0); + return 0; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrDetermineComptag(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, RmPhysAddr arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrDetermineComptag(pGpu, pMemoryManager, arg3) memmgrDetermineComptag_13cd8d(pGpu, pMemoryManager, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrDetermineComptag_HAL(pGpu, pMemoryManager, arg3) memmgrDetermineComptag(pGpu, pMemoryManager, arg3) + +static inline void memmgrChannelPushSemaphoreMethodsBlock_f2d351(struct MemoryManager *pMemoryManager, NvU32 arg2, NvU64 arg3, NvU32 arg4, NvU32 **arg5) { + NV_ASSERT_PRECOMP(0); +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrChannelPushSemaphoreMethodsBlock(struct MemoryManager *pMemoryManager, NvU32 arg2, NvU64 arg3, NvU32 arg4, NvU32 **arg5) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrChannelPushSemaphoreMethodsBlock(pMemoryManager, arg2, arg3, arg4, arg5) memmgrChannelPushSemaphoreMethodsBlock_f2d351(pMemoryManager, arg2, arg3, arg4, arg5) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrChannelPushSemaphoreMethodsBlock_HAL(pMemoryManager, arg2, arg3, arg4, arg5) memmgrChannelPushSemaphoreMethodsBlock(pMemoryManager, arg2, arg3, arg4, arg5) + +static inline void memmgrChannelPushAddressMethodsBlock_f2d351(struct MemoryManager *pMemoryManager, NvBool arg2, NvU32 arg3, RmPhysAddr arg4, NvU32 **arg5) { + NV_ASSERT_PRECOMP(0); +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrChannelPushAddressMethodsBlock(struct MemoryManager *pMemoryManager, NvBool arg2, NvU32 arg3, RmPhysAddr arg4, NvU32 **arg5) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrChannelPushAddressMethodsBlock(pMemoryManager, arg2, arg3, arg4, arg5) memmgrChannelPushAddressMethodsBlock_f2d351(pMemoryManager, arg2, arg3, arg4, arg5) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrChannelPushAddressMethodsBlock_HAL(pMemoryManager, arg2, arg3, arg4, arg5) memmgrChannelPushAddressMethodsBlock(pMemoryManager, arg2, arg3, arg4, arg5) + +static inline NV_STATUS memmgrScrubMapDoorbellRegion_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg3) { + return NV_OK; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrScrubMapDoorbellRegion(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrScrubMapDoorbellRegion(pGpu, pMemoryManager, arg3) memmgrScrubMapDoorbellRegion_56cd7a(pGpu, pMemoryManager, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrScrubMapDoorbellRegion_HAL(pGpu, pMemoryManager, arg3) memmgrScrubMapDoorbellRegion(pGpu, pMemoryManager, arg3) + +static inline NV_STATUS memmgrSetAllocParameters_dffb6f(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *pFbAllocInfo) { + NV_ASSERT_PRECOMP(0); + return NV_OK; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrSetAllocParameters(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *pFbAllocInfo) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrSetAllocParameters(pGpu, pMemoryManager, pFbAllocInfo) memmgrSetAllocParameters_dffb6f(pGpu, pMemoryManager, pFbAllocInfo) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrSetAllocParameters_HAL(pGpu, pMemoryManager, pFbAllocInfo) memmgrSetAllocParameters(pGpu, pMemoryManager, pFbAllocInfo) + +static inline void memmgrCalcReservedFbSpaceForUVM_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *arg3) { + return; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrCalcReservedFbSpaceForUVM(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrCalcReservedFbSpaceForUVM(pGpu, pMemoryManager, arg3) memmgrCalcReservedFbSpaceForUVM_b3696a(pGpu, pMemoryManager, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrCalcReservedFbSpaceForUVM_HAL(pGpu, pMemoryManager, arg3) memmgrCalcReservedFbSpaceForUVM(pGpu, pMemoryManager, arg3) + +static inline void memmgrCalcReservedFbSpaceHal_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *arg3, NvU64 *arg4, NvU64 *arg5) { + return; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrCalcReservedFbSpaceHal(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *arg3, NvU64 *arg4, NvU64 *arg5) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrCalcReservedFbSpaceHal(pGpu, pMemoryManager, arg3, arg4, arg5) memmgrCalcReservedFbSpaceHal_b3696a(pGpu, pMemoryManager, arg3, arg4, arg5) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrCalcReservedFbSpaceHal_HAL(pGpu, pMemoryManager, arg3, arg4, arg5) memmgrCalcReservedFbSpaceHal(pGpu, pMemoryManager, arg3, arg4, arg5) + +static inline NvU32 memmgrGetGrHeapReservationSize_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetGrHeapReservationSize(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetGrHeapReservationSize(pGpu, pMemoryManager) memmgrGetGrHeapReservationSize_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetGrHeapReservationSize_HAL(pGpu, pMemoryManager) memmgrGetGrHeapReservationSize(pGpu, pMemoryManager) + +static inline NvU32 memmgrGetRunlistEntriesReservedFbSpace_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetRunlistEntriesReservedFbSpace(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetRunlistEntriesReservedFbSpace(pGpu, pMemoryManager) memmgrGetRunlistEntriesReservedFbSpace_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetRunlistEntriesReservedFbSpace_HAL(pGpu, pMemoryManager) memmgrGetRunlistEntriesReservedFbSpace(pGpu, pMemoryManager) + +static inline NvU32 memmgrGetUserdReservedFbSpace_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetUserdReservedFbSpace(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetUserdReservedFbSpace(pGpu, pMemoryManager) memmgrGetUserdReservedFbSpace_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetUserdReservedFbSpace_HAL(pGpu, pMemoryManager) memmgrGetUserdReservedFbSpace(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrCheckReservedMemorySize_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrCheckReservedMemorySize(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrCheckReservedMemorySize(pGpu, pMemoryManager) memmgrCheckReservedMemorySize_56cd7a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrCheckReservedMemorySize_HAL(pGpu, pMemoryManager) memmgrCheckReservedMemorySize(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrInitReservedMemory_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 arg3) { + return NV_OK; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrInitReservedMemory(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrInitReservedMemory(pGpu, pMemoryManager, arg3) memmgrInitReservedMemory_56cd7a(pGpu, pMemoryManager, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrInitReservedMemory_HAL(pGpu, pMemoryManager, arg3) memmgrInitReservedMemory(pGpu, pMemoryManager, arg3) + +static inline NV_STATUS memmgrPreInitReservedMemory_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrPreInitReservedMemory(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrPreInitReservedMemory(pGpu, pMemoryManager) memmgrPreInitReservedMemory_56cd7a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrPreInitReservedMemory_HAL(pGpu, pMemoryManager) memmgrPreInitReservedMemory(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrReadMmuLock_ccda6f(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvBool *pbIsValid, NvU64 *pMmuLockLo, NvU64 *pMmuLockHi) { + *pbIsValid = NV_FALSE; + return NV_OK; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrReadMmuLock(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvBool *pbIsValid, NvU64 *pMmuLockLo, NvU64 *pMmuLockHi) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrReadMmuLock(pGpu, pMemoryManager, pbIsValid, pMmuLockLo, pMmuLockHi) memmgrReadMmuLock_ccda6f(pGpu, pMemoryManager, pbIsValid, pMmuLockLo, pMmuLockHi) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrReadMmuLock_HAL(pGpu, pMemoryManager, pbIsValid, pMmuLockLo, pMmuLockHi) memmgrReadMmuLock(pGpu, pMemoryManager, pbIsValid, pMmuLockLo, pMmuLockHi) + +static inline NV_STATUS memmgrBlockMemLockedMemory_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrBlockMemLockedMemory(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrBlockMemLockedMemory(pGpu, pMemoryManager) memmgrBlockMemLockedMemory_56cd7a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrBlockMemLockedMemory_HAL(pGpu, pMemoryManager) memmgrBlockMemLockedMemory(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrInsertUnprotectedRegionAtBottomOfFb_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pSize) { + return NV_OK; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrInsertUnprotectedRegionAtBottomOfFb(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pSize) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrInsertUnprotectedRegionAtBottomOfFb(pGpu, pMemoryManager, pSize) memmgrInsertUnprotectedRegionAtBottomOfFb_56cd7a(pGpu, pMemoryManager, pSize) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrInsertUnprotectedRegionAtBottomOfFb_HAL(pGpu, pMemoryManager, pSize) memmgrInsertUnprotectedRegionAtBottomOfFb(pGpu, pMemoryManager, pSize) + +NV_STATUS memmgrInitBaseFbRegions_FWCLIENT(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +static inline NV_STATUS memmgrInitBaseFbRegions_5baef9(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrInitBaseFbRegions(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrInitBaseFbRegions(pGpu, pMemoryManager) memmgrInitBaseFbRegions_FWCLIENT(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrInitBaseFbRegions_HAL(pGpu, pMemoryManager) memmgrInitBaseFbRegions(pGpu, pMemoryManager) + +static inline void memmgrGetDisablePlcKind_b3696a(struct MemoryManager *pMemoryManager, NvU32 *pteKind) { + return; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrGetDisablePlcKind(struct MemoryManager *pMemoryManager, NvU32 *pteKind) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetDisablePlcKind(pMemoryManager, pteKind) memmgrGetDisablePlcKind_b3696a(pMemoryManager, pteKind) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetDisablePlcKind_HAL(pMemoryManager, pteKind) memmgrGetDisablePlcKind(pMemoryManager, pteKind) + +static inline void memmgrEnableDynamicPageOfflining_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrEnableDynamicPageOfflining(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrEnableDynamicPageOfflining(pGpu, pMemoryManager) memmgrEnableDynamicPageOfflining_b3696a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrEnableDynamicPageOfflining_HAL(pGpu, pMemoryManager) memmgrEnableDynamicPageOfflining(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrSetMemDescPageSize_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, PMEMORY_DESCRIPTOR arg3, ADDRESS_TRANSLATION arg4, RM_ATTR_PAGE_SIZE arg5) { + return NV_OK; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrSetMemDescPageSize(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, PMEMORY_DESCRIPTOR arg3, ADDRESS_TRANSLATION arg4, RM_ATTR_PAGE_SIZE arg5) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrSetMemDescPageSize(pGpu, pMemoryManager, arg3, arg4, arg5) memmgrSetMemDescPageSize_56cd7a(pGpu, pMemoryManager, arg3, arg4, arg5) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrSetMemDescPageSize_HAL(pGpu, pMemoryManager, arg3, arg4, arg5) memmgrSetMemDescPageSize(pGpu, pMemoryManager, arg3, arg4, arg5) + +NV_STATUS memmgrSetPartitionableMem_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrSetPartitionableMem(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrSetPartitionableMem(pGpu, pMemoryManager) memmgrSetPartitionableMem_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrSetPartitionableMem_HAL(pGpu, pMemoryManager) memmgrSetPartitionableMem(pGpu, pMemoryManager) + +NV_STATUS memmgrAllocMIGGPUInstanceMemory_PF(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 swizzId, NvHandle *phMemory, struct NV_RANGE *pAddrRange, struct Heap **ppMemoryPartitionHeap); + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrAllocMIGGPUInstanceMemory(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 swizzId, NvHandle *phMemory, struct NV_RANGE *pAddrRange, struct Heap **ppMemoryPartitionHeap) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAllocMIGGPUInstanceMemory(pGpu, pMemoryManager, swizzId, phMemory, pAddrRange, ppMemoryPartitionHeap) memmgrAllocMIGGPUInstanceMemory_PF(pGpu, pMemoryManager, swizzId, phMemory, pAddrRange, ppMemoryPartitionHeap) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrAllocMIGGPUInstanceMemory_HAL(pGpu, pMemoryManager, swizzId, phMemory, pAddrRange, ppMemoryPartitionHeap) memmgrAllocMIGGPUInstanceMemory(pGpu, pMemoryManager, swizzId, phMemory, pAddrRange, ppMemoryPartitionHeap) + +static inline NV_STATUS memmgrGetBlackListPagesForHeap_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct Heap *pHeap) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrGetBlackListPagesForHeap(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct Heap *pHeap) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetBlackListPagesForHeap(pGpu, pMemoryManager, pHeap) memmgrGetBlackListPagesForHeap_46f6a7(pGpu, pMemoryManager, pHeap) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetBlackListPagesForHeap_HAL(pGpu, pMemoryManager, pHeap) memmgrGetBlackListPagesForHeap(pGpu, pMemoryManager, pHeap) + +static inline NV_STATUS memmgrGetBlackListPages_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, BLACKLIST_ADDRESS *pBlAddrs, NvU32 *pCount) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrGetBlackListPages(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, BLACKLIST_ADDRESS *pBlAddrs, NvU32 *pCount) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetBlackListPages(pGpu, pMemoryManager, pBlAddrs, pCount) memmgrGetBlackListPages_46f6a7(pGpu, pMemoryManager, pBlAddrs, pCount) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetBlackListPages_HAL(pGpu, pMemoryManager, pBlAddrs, pCount) memmgrGetBlackListPages(pGpu, pMemoryManager, pBlAddrs, pCount) + +static inline NV_STATUS memmgrDiscoverMIGPartitionableMemoryRange_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct NV_RANGE *pMemoryRange) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrDiscoverMIGPartitionableMemoryRange(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct NV_RANGE *pMemoryRange) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrDiscoverMIGPartitionableMemoryRange(pGpu, pMemoryManager, pMemoryRange) memmgrDiscoverMIGPartitionableMemoryRange_46f6a7(pGpu, pMemoryManager, pMemoryRange) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrDiscoverMIGPartitionableMemoryRange_HAL(pGpu, pMemoryManager, pMemoryRange) memmgrDiscoverMIGPartitionableMemoryRange(pGpu, pMemoryManager, pMemoryRange) + +NvU32 memmgrGetFBEndReserveSizeEstimate_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetFBEndReserveSizeEstimate(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetFBEndReserveSizeEstimate(pGpu, pMemoryManager) memmgrGetFBEndReserveSizeEstimate_GM107(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetFBEndReserveSizeEstimate_HAL(pGpu, pMemoryManager) memmgrGetFBEndReserveSizeEstimate(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrValidateFBEndReservation_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrValidateFBEndReservation(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrValidateFBEndReservation(pGpu, pMemoryManager) memmgrValidateFBEndReservation_56cd7a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrValidateFBEndReservation_HAL(pGpu, pMemoryManager) memmgrValidateFBEndReservation(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrReserveMemoryForFakeWPR_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrReserveMemoryForFakeWPR(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrReserveMemoryForFakeWPR(pGpu, pMemoryManager) memmgrReserveMemoryForFakeWPR_56cd7a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrReserveMemoryForFakeWPR_HAL(pGpu, pMemoryManager) memmgrReserveMemoryForFakeWPR(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrReserveMemoryForPmu_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrReserveMemoryForPmu(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrReserveMemoryForPmu(pGpu, pMemoryManager) memmgrReserveMemoryForPmu_56cd7a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrReserveMemoryForPmu_HAL(pGpu, pMemoryManager) memmgrReserveMemoryForPmu(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrReserveVgaWorkspaceMemDescForFbsr_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrReserveVgaWorkspaceMemDescForFbsr(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrReserveVgaWorkspaceMemDescForFbsr(pGpu, pMemoryManager) memmgrReserveVgaWorkspaceMemDescForFbsr_46f6a7(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrReserveVgaWorkspaceMemDescForFbsr_HAL(pGpu, pMemoryManager) memmgrReserveVgaWorkspaceMemDescForFbsr(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrCalculateHeapOffsetWithGSP_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 *offset) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrCalculateHeapOffsetWithGSP(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 *offset) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrCalculateHeapOffsetWithGSP(pGpu, pMemoryManager, offset) memmgrCalculateHeapOffsetWithGSP_46f6a7(pGpu, pMemoryManager, offset) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrCalculateHeapOffsetWithGSP_HAL(pGpu, pMemoryManager, offset) memmgrCalculateHeapOffsetWithGSP(pGpu, pMemoryManager, offset) + +static inline NV_STATUS memmgrGetCarveoutRegionInfo_56cd7a(POBJGPU pGpu, struct MemoryManager *pMemoryManager, NV2080_CTRL_FB_GET_CARVEOUT_REGION_INFO_PARAMS *pParams) { + return NV_OK; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrGetCarveoutRegionInfo(POBJGPU pGpu, struct MemoryManager *pMemoryManager, NV2080_CTRL_FB_GET_CARVEOUT_REGION_INFO_PARAMS *pParams) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetCarveoutRegionInfo(pGpu, pMemoryManager, pParams) memmgrGetCarveoutRegionInfo_56cd7a(pGpu, pMemoryManager, pParams) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetCarveoutRegionInfo_HAL(pGpu, pMemoryManager, pParams) memmgrGetCarveoutRegionInfo(pGpu, pMemoryManager, pParams) + +static inline NvBool memmgrIsMemoryIoCoherent_88bc07(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NV_MEMORY_ALLOCATION_PARAMS *pAllocData) { + return NV_TRUE; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrIsMemoryIoCoherent(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NV_MEMORY_ALLOCATION_PARAMS *pAllocData) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrIsMemoryIoCoherent(pGpu, pMemoryManager, pAllocData) memmgrIsMemoryIoCoherent_88bc07(pGpu, pMemoryManager, pAllocData) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrIsMemoryIoCoherent_HAL(pGpu, pMemoryManager, pAllocData) memmgrIsMemoryIoCoherent(pGpu, pMemoryManager, pAllocData) + +static inline NV_STATUS memmgrSc7SrInitGsp_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrSc7SrInitGsp(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrSc7SrInitGsp(pGpu, pMemoryManager) memmgrSc7SrInitGsp_46f6a7(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrSc7SrInitGsp_HAL(pGpu, pMemoryManager) memmgrSc7SrInitGsp(pGpu, pMemoryManager) + +static inline NvU8 memmgrGetLocalizedOffset_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU8 memmgrGetLocalizedOffset(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetLocalizedOffset(pGpu, pMemoryManager) memmgrGetLocalizedOffset_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetLocalizedOffset_HAL(pGpu, pMemoryManager) memmgrGetLocalizedOffset(pGpu, pMemoryManager) + +static inline NvBool memmgrIsFlaSysmemSupported_3dd2c9(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_FALSE; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrIsFlaSysmemSupported(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrIsFlaSysmemSupported(pGpu, pMemoryManager) memmgrIsFlaSysmemSupported_3dd2c9(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrIsFlaSysmemSupported_HAL(pGpu, pMemoryManager) memmgrIsFlaSysmemSupported(pGpu, pMemoryManager) + +static inline NvBool memmgrGetLocalizedMemorySupported_3dd2c9(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_FALSE; +} + + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrGetLocalizedMemorySupported(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetLocalizedMemorySupported(pGpu, pMemoryManager) memmgrGetLocalizedMemorySupported_3dd2c9(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetLocalizedMemorySupported_HAL(pGpu, pMemoryManager) memmgrGetLocalizedMemorySupported(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrSavePowerMgmtState(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + +static inline NV_STATUS memmgrRestorePowerMgmtState(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + +static inline NV_STATUS memmgrFree(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct Heap *arg3, NvHandle arg4, NvHandle arg5, NvHandle arg6, NvU32 arg7, MEMORY_DESCRIPTOR *arg8) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline struct Heap *memmgrGetDeviceSuballocator(struct MemoryManager *pMemoryManager, NvBool bForceSubheap) { + return ((void *)0); +} + +static inline NV_ADDRESS_SPACE memmgrAllocGetAddrSpace(struct MemoryManager *pMemoryManager, NvU32 flags, NvU32 attr) { + return 2; +} + +static inline void memmgrFreeFbsrMemory(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return; +} + +static inline NvBool memmgrIsLocalEgmSupported(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bLocalEgmSupported; +} + +static inline NvBool memmgrIsLocalEgmEnabled(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bLocalEgmEnabled; +} + +static inline NvU32 memmgrLocalEgmPeerId(struct MemoryManager *pMemoryManager) { + return pMemoryManager->localEgmPeerId; +} + +static inline NvU64 memmgrLocalEgmBaseAddress(struct MemoryManager *pMemoryManager) { + return pMemoryManager->localEgmBasePhysAddr; +} + +static inline NvBool memmgrIsScrubOnFreeEnabled(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bScrubOnFreeEnabled; +} + +static inline NvBool memmgrIsFastScrubberEnabled(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bFastScrubberEnabled; +} + +static inline NvBool memmgrUseVasForCeMemoryOps(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bUseVasForCeMemoryOps; +} + +static inline NvBool memmgrRmExecutingEccScrub(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bRmExecutingEccScrub; +} + +static inline NvBool memmgrBug1441072EccScrubWar(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bBug1441072EccScrubWar; +} + +static inline NvBool memmgrIsPmaInitialized(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bPmaInitialized; +} + +static inline void memmgrSetPmaInitialized(struct MemoryManager *pMemoryManager, NvBool val) { + pMemoryManager->bPmaInitialized = val; +} + +static inline NvBool memmgrAreFbRegionsSupported(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bFbRegionsSupported; +} + +static inline NvBool memmgrIsPmaSupportedOnPlatform(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bPmaSupportedOnPlatform; +} + +static inline NvBool memmgrIsPmaEnabled(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bPmaEnabled; +} + +static inline NvBool memmgrIsPmaForcePersistence(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bPmaForcePersistence; +} + +static inline void memmgrSetPmaForcePersistence(struct MemoryManager *pMemoryManager, NvBool val) { + pMemoryManager->bPmaForcePersistence = val; +} + +static inline NvBool memmgrAreClientPageTablesPmaManaged(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bClientPageTablesPmaManaged; +} + +static inline void memmgrSetClientPageTablesPmaManaged(struct MemoryManager *pMemoryManager, NvBool val) { + pMemoryManager->bClientPageTablesPmaManaged = val; +} + +static inline NvU64 memmgrGetRsvdMemoryBase(struct MemoryManager *pMemoryManager) { + return pMemoryManager->rsvdMemoryBase; +} + +static inline NvU64 memmgrGetRsvdMemorySize(struct MemoryManager *pMemoryManager) { + return pMemoryManager->rsvdMemorySize; +} + +static inline NvBool memmgrBug3922001DisableCtxBufOnSim(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return pMemoryManager->bBug3922001DisableCtxBufOnSim; +} + +NV_STATUS memmgrAllocResources_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, MEMORY_ALLOCATION_REQUEST *pAllocRequest, FB_ALLOC_INFO *pFbAllocInfo); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrAllocResources(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, MEMORY_ALLOCATION_REQUEST *pAllocRequest, FB_ALLOC_INFO *pFbAllocInfo) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAllocResources(pGpu, pMemoryManager, pAllocRequest, pFbAllocInfo) memmgrAllocResources_IMPL(pGpu, pMemoryManager, pAllocRequest, pFbAllocInfo) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrAddMemNode_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc, NvBool bFreeDescriptor); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrAddMemNode(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc, NvBool bFreeDescriptor) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAddMemNode(pGpu, pMemoryManager, pMemDesc, bFreeDescriptor) memmgrAddMemNode_IMPL(pGpu, pMemoryManager, pMemDesc, bFreeDescriptor) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrAddMemNodes_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvBool bSaveAllRmAllocations); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrAddMemNodes(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvBool bSaveAllRmAllocations) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAddMemNodes(pGpu, pMemoryManager, bSaveAllRmAllocations) memmgrAddMemNodes_IMPL(pGpu, pMemoryManager, bSaveAllRmAllocations) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrRemoveMemNodes_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrRemoveMemNodes(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrRemoveMemNodes(pGpu, pMemoryManager) memmgrRemoveMemNodes_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrMemCopy_IMPL(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pDst, TRANSFER_SURFACE *pSrc, NvU32 size, NvU32 flags); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemCopy(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pDst, TRANSFER_SURFACE *pSrc, NvU32 size, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemCopy(pMemoryManager, pDst, pSrc, size, flags) memmgrMemCopy_IMPL(pMemoryManager, pDst, pSrc, size, flags) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrMemsetInBlocks_IMPL(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc, NvU32 value, NvU64 baseOffset, NvU64 size, NvU32 flags, NvU32 blockSize); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemsetInBlocks(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc, NvU32 value, NvU64 baseOffset, NvU64 size, NvU32 flags, NvU32 blockSize) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemsetInBlocks(pMemoryManager, pMemDesc, value, baseOffset, size, flags, blockSize) memmgrMemsetInBlocks_IMPL(pMemoryManager, pMemDesc, value, baseOffset, size, flags, blockSize) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrMemSet_IMPL(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pDst, NvU32 value, NvU32 size, NvU32 flags); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemSet(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pDst, NvU32 value, NvU32 size, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemSet(pMemoryManager, pDst, value, size, flags) memmgrMemSet_IMPL(pMemoryManager, pDst, value, size, flags) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrMemWrite_IMPL(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pDst, void *pBuf, NvU64 size, NvU32 flags); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemWrite(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pDst, void *pBuf, NvU64 size, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemWrite(pMemoryManager, pDst, pBuf, size, flags) memmgrMemWrite_IMPL(pMemoryManager, pDst, pBuf, size, flags) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrMemRead_IMPL(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pSrc, void *pBuf, NvU64 size, NvU32 flags); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemRead(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pSrc, void *pBuf, NvU64 size, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemRead(pMemoryManager, pSrc, pBuf, size, flags) memmgrMemRead_IMPL(pMemoryManager, pSrc, pBuf, size, flags) +#endif //__nvoc_mem_mgr_h_disabled + +NvU8 *memmgrMemBeginTransfer_IMPL(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pTransferInfo, NvU64 shadowBufSize, NvU32 flags); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU8 *memmgrMemBeginTransfer(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pTransferInfo, NvU64 shadowBufSize, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NULL; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemBeginTransfer(pMemoryManager, pTransferInfo, shadowBufSize, flags) memmgrMemBeginTransfer_IMPL(pMemoryManager, pTransferInfo, shadowBufSize, flags) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrMemEndTransfer_IMPL(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pTransferInfo, NvU64 shadowBufSize, NvU32 flags); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrMemEndTransfer(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pTransferInfo, NvU64 shadowBufSize, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemEndTransfer(pMemoryManager, pTransferInfo, shadowBufSize, flags) memmgrMemEndTransfer_IMPL(pMemoryManager, pTransferInfo, shadowBufSize, flags) +#endif //__nvoc_mem_mgr_h_disabled + +NvU8 *memmgrMemDescBeginTransfer_IMPL(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU8 *memmgrMemDescBeginTransfer(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NULL; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemDescBeginTransfer(pMemoryManager, pMemDesc, flags) memmgrMemDescBeginTransfer_IMPL(pMemoryManager, pMemDesc, flags) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrMemDescEndTransfer_IMPL(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrMemDescEndTransfer(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemDescEndTransfer(pMemoryManager, pMemDesc, flags) memmgrMemDescEndTransfer_IMPL(pMemoryManager, pMemDesc, flags) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrMemDescMemSet_IMPL(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc, NvU32 value, NvU32 flags); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemDescMemSet(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc, NvU32 value, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemDescMemSet(pMemoryManager, pMemDesc, value, flags) memmgrMemDescMemSet_IMPL(pMemoryManager, pMemDesc, value, flags) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrInitInternalChannels_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrInitInternalChannels(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrInitInternalChannels(pGpu, pMemoryManager) memmgrInitInternalChannels_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrDestroyInternalChannels_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrDestroyInternalChannels(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrDestroyInternalChannels(pGpu, pMemoryManager) memmgrDestroyInternalChannels_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrInitCeUtils_IMPL(struct MemoryManager *pMemoryManager, NvBool bFifoLite, NvBool bVirtualMode); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrInitCeUtils(struct MemoryManager *pMemoryManager, NvBool bFifoLite, NvBool bVirtualMode) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrInitCeUtils(pMemoryManager, bFifoLite, bVirtualMode) memmgrInitCeUtils_IMPL(pMemoryManager, bFifoLite, bVirtualMode) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrDestroyCeUtils_IMPL(struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrDestroyCeUtils(struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrDestroyCeUtils(pMemoryManager) memmgrDestroyCeUtils_IMPL(pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrGetInternalClientHandles_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct Device *arg3, NvHandle *arg4, NvHandle *arg5, NvHandle *arg6); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrGetInternalClientHandles(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct Device *arg3, NvHandle *arg4, NvHandle *arg5, NvHandle *arg6) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetInternalClientHandles(pGpu, pMemoryManager, arg3, arg4, arg5, arg6) memmgrGetInternalClientHandles_IMPL(pGpu, pMemoryManager, arg3, arg4, arg5, arg6) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrSetMIGPartitionableBAR1Range_IMPL(OBJGPU *arg1, struct MemoryManager *arg2); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrSetMIGPartitionableBAR1Range(OBJGPU *arg1, struct MemoryManager *arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrSetMIGPartitionableBAR1Range(arg1, arg2) memmgrSetMIGPartitionableBAR1Range_IMPL(arg1, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +struct NV_RANGE memmgrGetMIGPartitionableBAR1Range_IMPL(OBJGPU *arg1, struct MemoryManager *arg2); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline struct NV_RANGE memmgrGetMIGPartitionableBAR1Range(OBJGPU *arg1, struct MemoryManager *arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + struct NV_RANGE ret; + portMemSet(&ret, 0, sizeof(struct NV_RANGE)); + return ret; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetMIGPartitionableBAR1Range(arg1, arg2) memmgrGetMIGPartitionableBAR1Range_IMPL(arg1, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrSetMIGPartitionableMemoryRange_IMPL(OBJGPU *arg1, struct MemoryManager *arg2, struct NV_RANGE arg3); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrSetMIGPartitionableMemoryRange(OBJGPU *arg1, struct MemoryManager *arg2, struct NV_RANGE arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrSetMIGPartitionableMemoryRange(arg1, arg2, arg3) memmgrSetMIGPartitionableMemoryRange_IMPL(arg1, arg2, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +struct NV_RANGE memmgrGetMIGPartitionableMemoryRange_IMPL(OBJGPU *arg1, struct MemoryManager *arg2); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline struct NV_RANGE memmgrGetMIGPartitionableMemoryRange(OBJGPU *arg1, struct MemoryManager *arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + struct NV_RANGE ret; + portMemSet(&ret, 0, sizeof(struct NV_RANGE)); + return ret; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetMIGPartitionableMemoryRange(arg1, arg2) memmgrGetMIGPartitionableMemoryRange_IMPL(arg1, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrFreeMIGGPUInstanceMemory_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 swizzId, NvHandle hMemory, struct Heap **ppMemoryPartitionHeap); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrFreeMIGGPUInstanceMemory(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 swizzId, NvHandle hMemory, struct Heap **ppMemoryPartitionHeap) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrFreeMIGGPUInstanceMemory(pGpu, pMemoryManager, swizzId, hMemory, ppMemoryPartitionHeap) memmgrFreeMIGGPUInstanceMemory_IMPL(pGpu, pMemoryManager, swizzId, hMemory, ppMemoryPartitionHeap) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrPageLevelPoolsCreate_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrPageLevelPoolsCreate(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrPageLevelPoolsCreate(pGpu, pMemoryManager) memmgrPageLevelPoolsCreate_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrPageLevelPoolsDestroy_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrPageLevelPoolsDestroy(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrPageLevelPoolsDestroy(pGpu, pMemoryManager) memmgrPageLevelPoolsDestroy_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrPageLevelPoolsGetInfo_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct Device *pDevice, struct RM_POOL_ALLOC_MEM_RESERVE_INFO **arg4); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrPageLevelPoolsGetInfo(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct Device *pDevice, struct RM_POOL_ALLOC_MEM_RESERVE_INFO **arg4) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrPageLevelPoolsGetInfo(pGpu, pMemoryManager, pDevice, arg4) memmgrPageLevelPoolsGetInfo_IMPL(pGpu, pMemoryManager, pDevice, arg4) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrAllocMIGMemoryAllocationInternalHandles_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrAllocMIGMemoryAllocationInternalHandles(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAllocMIGMemoryAllocationInternalHandles(pGpu, pMemoryManager) memmgrAllocMIGMemoryAllocationInternalHandles_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrFreeMIGMemoryAllocationInternalHandles_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrFreeMIGMemoryAllocationInternalHandles(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrFreeMIGMemoryAllocationInternalHandles(pGpu, pMemoryManager) memmgrFreeMIGMemoryAllocationInternalHandles_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrGetFreeMemoryForAllMIGGPUInstances_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pBytes); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrGetFreeMemoryForAllMIGGPUInstances(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pBytes) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetFreeMemoryForAllMIGGPUInstances(pGpu, pMemoryManager, pBytes) memmgrGetFreeMemoryForAllMIGGPUInstances_IMPL(pGpu, pMemoryManager, pBytes) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrGetTotalMemoryForAllMIGGPUInstances_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pBytes); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrGetTotalMemoryForAllMIGGPUInstances(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pBytes) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetTotalMemoryForAllMIGGPUInstances(pGpu, pMemoryManager, pBytes) memmgrGetTotalMemoryForAllMIGGPUInstances_IMPL(pGpu, pMemoryManager, pBytes) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrGetTopLevelScrubberStatus_IMPL(OBJGPU *arg1, struct MemoryManager *arg2, NvBool *pbTopLevelScrubberEnabled, NvBool *pbTopLevelScrubberConstructed); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrGetTopLevelScrubberStatus(OBJGPU *arg1, struct MemoryManager *arg2, NvBool *pbTopLevelScrubberEnabled, NvBool *pbTopLevelScrubberConstructed) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetTopLevelScrubberStatus(arg1, arg2, pbTopLevelScrubberEnabled, pbTopLevelScrubberConstructed) memmgrGetTopLevelScrubberStatus_IMPL(arg1, arg2, pbTopLevelScrubberEnabled, pbTopLevelScrubberConstructed) +#endif //__nvoc_mem_mgr_h_disabled + +MEMORY_DESCRIPTOR *memmgrMemUtilsGetMemDescFromHandle_IMPL(struct MemoryManager *pMemoryManager, NvHandle hClient, NvHandle hMemory); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline MEMORY_DESCRIPTOR *memmgrMemUtilsGetMemDescFromHandle(struct MemoryManager *pMemoryManager, NvHandle hClient, NvHandle hMemory) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NULL; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsGetMemDescFromHandle(pMemoryManager, hClient, hMemory) memmgrMemUtilsGetMemDescFromHandle_IMPL(pMemoryManager, hClient, hMemory) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrVerifyGspDmaOps_IMPL(OBJGPU *arg1, struct MemoryManager *arg2); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrVerifyGspDmaOps(OBJGPU *arg1, struct MemoryManager *arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrVerifyGspDmaOps(arg1, arg2) memmgrVerifyGspDmaOps_IMPL(arg1, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrAllocReservedFBRegionMemdesc_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR **ppMemdesc, NvU64 rangeStart, NvU64 allocSize, NvU64 memdescFlags, NV_FB_ALLOC_RM_INTERNAL_OWNER allocTag); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrAllocReservedFBRegionMemdesc(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR **ppMemdesc, NvU64 rangeStart, NvU64 allocSize, NvU64 memdescFlags, NV_FB_ALLOC_RM_INTERNAL_OWNER allocTag) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAllocReservedFBRegionMemdesc(pGpu, pMemoryManager, ppMemdesc, rangeStart, allocSize, memdescFlags, allocTag) memmgrAllocReservedFBRegionMemdesc_IMPL(pGpu, pMemoryManager, ppMemdesc, rangeStart, allocSize, memdescFlags, allocTag) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrReserveMemoryForFsp_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrReserveMemoryForFsp(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrReserveMemoryForFsp(pGpu, pMemoryManager) memmgrReserveMemoryForFsp_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#undef PRIVATE_FIELD + + +#endif // MEM_MGR_H + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_MEM_MGR_NVOC_H_ diff --git a/src/nvidia/generated/g_mem_nvoc.c b/src/nvidia/generated/g_mem_nvoc.c new file mode 100644 index 0000000..4b98142 --- /dev/null +++ b/src/nvidia/generated/g_mem_nvoc.c @@ -0,0 +1,534 @@ +#define NVOC_MEM_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_mem_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x4789f2 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +// Forward declarations for Memory +void __nvoc_init__RmResource(RmResource*); +void __nvoc_init__Memory(Memory*); +void __nvoc_init_funcTable_Memory(Memory*); +NV_STATUS __nvoc_ctor_Memory(Memory*, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_Memory(Memory*); +void __nvoc_dtor_Memory(Memory*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__Memory; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__Memory; + +// Down-thunk(s) to bridge Memory methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_Memory_resIsDuplicate(struct RsResource *pMemory, NvHandle hMemory, NvBool *pDuplicate); // this +NV_STATUS __nvoc_down_thunk_Memory_resControl(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_down_thunk_Memory_resMap(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_down_thunk_Memory_resUnmap(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_down_thunk_Memory_rmresGetMemInterMapParams(struct RmResource *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_down_thunk_Memory_rmresCheckMemInterUnmap(struct RmResource *pMemory, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_down_thunk_Memory_rmresGetMemoryMappingDescriptor(struct RmResource *pMemory, MEMORY_DESCRIPTOR **ppMemDesc); // this + +// Up-thunk(s) to bridge Memory methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super +NvBool __nvoc_up_thunk_RmResource_memAccessCallback(struct Memory *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NvBool __nvoc_up_thunk_RmResource_memShareCallback(struct Memory *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_RmResource_memControlSerialization_Prologue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_memControlSerialization_Epilogue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_memControl_Prologue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_memControl_Epilogue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_memCanCopy(struct Memory *pResource); // this +void __nvoc_up_thunk_RsResource_memPreDestruct(struct Memory *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_memControlFilter(struct Memory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_memIsPartialUnmapSupported(struct Memory *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_memMapTo(struct Memory *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_memUnmapFrom(struct Memory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_memGetRefCount(struct Memory *pResource); // this +void __nvoc_up_thunk_RsResource_memAddAdditionalDependants(struct RsClient *pClient, struct Memory *pResource, RsResourceRef *pReference); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_Memory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Memory), + /*classId=*/ classId(Memory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Memory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Memory, + /*pCastInfo=*/ &__nvoc_castinfo__Memory, + /*pExportInfo=*/ &__nvoc_export_info__Memory +}; + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Memory[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) memCtrlCmdGetSurfacePhysAttrLvm_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x410103u, + /*paramSize=*/ sizeof(NV0041_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Memory.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "memCtrlCmdGetSurfacePhysAttrLvm" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) memCtrlCmdGetSurfaceInfoLvm_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x410110u, + /*paramSize=*/ sizeof(NV0041_CTRL_GET_SURFACE_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Memory.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "memCtrlCmdGetSurfaceInfoLvm" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) memCtrlCmdMapMemoryForGpuAccess_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x410122u, + /*paramSize=*/ sizeof(NV0041_CTRL_MAP_MEMORY_FOR_GPU_ACCESS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Memory.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "memCtrlCmdMapMemoryForGpuAccess" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) memCtrlCmdUnmapMemoryForGpuAccess_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x410153u, + /*paramSize=*/ sizeof(NV0041_CTRL_UNMAP_MEMORY_FOR_GPU_ACCESS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Memory.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "memCtrlCmdUnmapMemoryForGpuAccess" +#endif + }, + +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__Memory __nvoc_metadata__Memory = { + .rtti.pClassDef = &__nvoc_class_def_Memory, // (mem) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Memory, + .rtti.offset = 0, + .metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super + .metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.rtti.offset = NV_OFFSETOF(Memory, __nvoc_base_RmResource), + .metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^2 + .metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^3 + .metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^2 + .metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + + .vtable.__memIsDuplicate__ = &memIsDuplicate_IMPL, // virtual override (res) base (rmres) + .metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &__nvoc_down_thunk_Memory_resIsDuplicate, // virtual + .vtable.__memGetMapAddrSpace__ = &memGetMapAddrSpace_IMPL, // virtual + .vtable.__memControl__ = &memControl_IMPL, // virtual override (res) base (rmres) + .metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_Memory_resControl, // virtual + .vtable.__memMap__ = &memMap_IMPL, // virtual override (res) base (rmres) + .metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &__nvoc_down_thunk_Memory_resMap, // virtual + .vtable.__memUnmap__ = &memUnmap_IMPL, // virtual override (res) base (rmres) + .metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &__nvoc_down_thunk_Memory_resUnmap, // virtual + .vtable.__memGetMemInterMapParams__ = &memGetMemInterMapParams_IMPL, // virtual override (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &__nvoc_down_thunk_Memory_rmresGetMemInterMapParams, // virtual + .vtable.__memCheckMemInterUnmap__ = &memCheckMemInterUnmap_ac1694, // inline virtual override (rmres) base (rmres) body + .metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &__nvoc_down_thunk_Memory_rmresCheckMemInterUnmap, // virtual + .vtable.__memGetMemoryMappingDescriptor__ = &memGetMemoryMappingDescriptor_IMPL, // virtual override (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &__nvoc_down_thunk_Memory_rmresGetMemoryMappingDescriptor, // virtual + .vtable.__memCheckCopyPermissions__ = &memCheckCopyPermissions_ac1694, // inline virtual body + .vtable.__memIsReady__ = &memIsReady_IMPL, // virtual + .vtable.__memIsGpuMapAllowed__ = &memIsGpuMapAllowed_e661f0, // inline virtual body + .vtable.__memIsExportAllowed__ = &memIsExportAllowed_e661f0, // inline virtual body + .vtable.__memAccessCallback__ = &__nvoc_up_thunk_RmResource_memAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__memShareCallback__ = &__nvoc_up_thunk_RmResource_memShareCallback, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresShareCallback__ = &rmresShareCallback_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__memControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_memControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__memControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_memControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__memControl_Prologue__ = &__nvoc_up_thunk_RmResource_memControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__memControl_Epilogue__ = &__nvoc_up_thunk_RmResource_memControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__memCanCopy__ = &__nvoc_up_thunk_RsResource_memCanCopy, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__memPreDestruct__ = &__nvoc_up_thunk_RsResource_memPreDestruct, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__memControlFilter__ = &__nvoc_up_thunk_RsResource_memControlFilter, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__memIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_memIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__memMapTo__ = &__nvoc_up_thunk_RsResource_memMapTo, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__memUnmapFrom__ = &__nvoc_up_thunk_RsResource_memUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__memGetRefCount__ = &__nvoc_up_thunk_RsResource_memGetRefCount, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__memAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_memAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__Memory = { + .numRelatives = 5, + .relatives = { + &__nvoc_metadata__Memory.rtti, // [0]: (mem) this + &__nvoc_metadata__Memory.metadata__RmResource.rtti, // [1]: (rmres) super + &__nvoc_metadata__Memory.metadata__RmResource.metadata__RsResource.rtti, // [2]: (res) super^2 + &__nvoc_metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [3]: (obj) super^3 + &__nvoc_metadata__Memory.metadata__RmResource.metadata__RmResourceCommon.rtti, // [4]: (rmrescmn) super^2 + } +}; + +// 7 down-thunk(s) defined to bridge methods in Memory from superclasses + +// memIsDuplicate: virtual override (res) base (rmres) +NV_STATUS __nvoc_down_thunk_Memory_resIsDuplicate(struct RsResource *pMemory, NvHandle hMemory, NvBool *pDuplicate) { + return memIsDuplicate((struct Memory *)(((unsigned char *) pMemory) - NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// memControl: virtual override (res) base (rmres) +NV_STATUS __nvoc_down_thunk_Memory_resControl(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *) pMemory) - NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// memMap: virtual override (res) base (rmres) +NV_STATUS __nvoc_down_thunk_Memory_resMap(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *) pMemory) - NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams, pCpuMapping); +} + +// memUnmap: virtual override (res) base (rmres) +NV_STATUS __nvoc_down_thunk_Memory_resUnmap(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *) pMemory) - NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pCpuMapping); +} + +// memGetMemInterMapParams: virtual override (rmres) base (rmres) +NV_STATUS __nvoc_down_thunk_Memory_rmresGetMemInterMapParams(struct RmResource *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *) pMemory) - NV_OFFSETOF(Memory, __nvoc_base_RmResource)), pParams); +} + +// memCheckMemInterUnmap: inline virtual override (rmres) base (rmres) body +NV_STATUS __nvoc_down_thunk_Memory_rmresCheckMemInterUnmap(struct RmResource *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *) pMemory) - NV_OFFSETOF(Memory, __nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// memGetMemoryMappingDescriptor: virtual override (rmres) base (rmres) +NV_STATUS __nvoc_down_thunk_Memory_rmresGetMemoryMappingDescriptor(struct RmResource *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *) pMemory) - NV_OFFSETOF(Memory, __nvoc_base_RmResource)), ppMemDesc); +} + + +// 14 up-thunk(s) defined to bridge methods in Memory to superclasses + +// memAccessCallback: virtual inherited (rmres) base (rmres) +NvBool __nvoc_up_thunk_RmResource_memAccessCallback(struct Memory *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Memory, __nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// memShareCallback: virtual inherited (rmres) base (rmres) +NvBool __nvoc_up_thunk_RmResource_memShareCallback(struct Memory *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Memory, __nvoc_base_RmResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// memControlSerialization_Prologue: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_memControlSerialization_Prologue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Memory, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// memControlSerialization_Epilogue: virtual inherited (rmres) base (rmres) +void __nvoc_up_thunk_RmResource_memControlSerialization_Epilogue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Memory, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// memControl_Prologue: virtual inherited (rmres) base (rmres) +NV_STATUS __nvoc_up_thunk_RmResource_memControl_Prologue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Memory, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// memControl_Epilogue: virtual inherited (rmres) base (rmres) +void __nvoc_up_thunk_RmResource_memControl_Epilogue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Memory, __nvoc_base_RmResource)), pCallContext, pParams); +} + +// memCanCopy: virtual inherited (res) base (rmres) +NvBool __nvoc_up_thunk_RsResource_memCanCopy(struct Memory *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// memPreDestruct: virtual inherited (res) base (rmres) +void __nvoc_up_thunk_RsResource_memPreDestruct(struct Memory *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// memControlFilter: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_memControlFilter(struct Memory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// memIsPartialUnmapSupported: inline virtual inherited (res) base (rmres) body +NvBool __nvoc_up_thunk_RsResource_memIsPartialUnmapSupported(struct Memory *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// memMapTo: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_memMapTo(struct Memory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// memUnmapFrom: virtual inherited (res) base (rmres) +NV_STATUS __nvoc_up_thunk_RsResource_memUnmapFrom(struct Memory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// memGetRefCount: virtual inherited (res) base (rmres) +NvU32 __nvoc_up_thunk_RsResource_memGetRefCount(struct Memory *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// memAddAdditionalDependants: virtual inherited (res) base (rmres) +void __nvoc_up_thunk_RsResource_memAddAdditionalDependants(struct RsClient *pClient, struct Memory *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__Memory = +{ + /*numEntries=*/ 4, + /*pExportEntries=*/ __nvoc_exported_method_def_Memory +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_Memory(Memory *pThis) { + __nvoc_memDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Memory(Memory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Memory(Memory *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Memory_fail_RmResource; + __nvoc_init_dataField_Memory(pThis); + + status = __nvoc_memConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Memory_fail__init; + goto __nvoc_ctor_Memory_exit; // Success + +__nvoc_ctor_Memory_fail__init: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_Memory_fail_RmResource: +__nvoc_ctor_Memory_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_Memory_1(Memory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + // memCtrlCmdGetSurfacePhysAttrLvm -- exported (id=0x410103) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__memCtrlCmdGetSurfacePhysAttrLvm__ = &memCtrlCmdGetSurfacePhysAttrLvm_IMPL; +#endif + + // memCtrlCmdGetSurfaceInfoLvm -- exported (id=0x410110) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__memCtrlCmdGetSurfaceInfoLvm__ = &memCtrlCmdGetSurfaceInfoLvm_IMPL; +#endif + + // memCtrlCmdMapMemoryForGpuAccess -- exported (id=0x410122) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__memCtrlCmdMapMemoryForGpuAccess__ = &memCtrlCmdMapMemoryForGpuAccess_IMPL; +#endif + + // memCtrlCmdUnmapMemoryForGpuAccess -- exported (id=0x410153) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__memCtrlCmdUnmapMemoryForGpuAccess__ = &memCtrlCmdUnmapMemoryForGpuAccess_IMPL; +#endif +} // End __nvoc_init_funcTable_Memory_1 with approximately 4 basic block(s). + + +// Initialize vtable(s) for 30 virtual method(s). +void __nvoc_init_funcTable_Memory(Memory *pThis) { + + // Initialize vtable(s) with 4 per-object function pointer(s). + __nvoc_init_funcTable_Memory_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__Memory(Memory *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^3 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^2 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^2 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; // (rmres) super + pThis->__nvoc_pbase_Memory = pThis; // (mem) this + + // Recurse to superclass initialization function(s). + __nvoc_init__RmResource(&pThis->__nvoc_base_RmResource); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^3 + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__Memory.metadata__RmResource.metadata__RsResource; // (res) super^2 + pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__Memory.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^2 + pThis->__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__Memory.metadata__RmResource; // (rmres) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__Memory; // (mem) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_Memory(pThis); +} + +NV_STATUS __nvoc_objCreate_Memory(Memory **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + Memory *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(Memory), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(Memory)); + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__Memory(pThis); + status = __nvoc_ctor_Memory(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_Memory_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_Memory_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(Memory)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_Memory(Memory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_Memory(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_mem_nvoc.h b/src/nvidia/generated/g_mem_nvoc.h new file mode 100644 index 0000000..1bde083 --- /dev/null +++ b/src/nvidia/generated/g_mem_nvoc.h @@ -0,0 +1,594 @@ + +#ifndef _G_MEM_NVOC_H_ +#define _G_MEM_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_mem_nvoc.h" + +#ifndef _MEMORY_API_H_ +#define _MEMORY_API_H_ + +#include "core/core.h" +#include "resserv/rs_resource.h" +#include "rmapi/rmapi.h" +#include "rmapi/resource.h" + +#include "containers/btree.h" + +#include "ctrl/ctrl0041.h" + + +struct Device; + +#ifndef __NVOC_CLASS_Device_TYPEDEF__ +#define __NVOC_CLASS_Device_TYPEDEF__ +typedef struct Device Device; +#endif /* __NVOC_CLASS_Device_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Device +#define __nvoc_class_id_Device 0xe0ac20 +#endif /* __nvoc_class_id_Device */ + + + +struct Subdevice; + +#ifndef __NVOC_CLASS_Subdevice_TYPEDEF__ +#define __NVOC_CLASS_Subdevice_TYPEDEF__ +typedef struct Subdevice Subdevice; +#endif /* __NVOC_CLASS_Subdevice_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Subdevice +#define __nvoc_class_id_Subdevice 0x4b01b3 +#endif /* __nvoc_class_id_Subdevice */ + + + +struct RsClient; + +#ifndef __NVOC_CLASS_RsClient_TYPEDEF__ +#define __NVOC_CLASS_RsClient_TYPEDEF__ +typedef struct RsClient RsClient; +#endif /* __NVOC_CLASS_RsClient_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsClient +#define __nvoc_class_id_RsClient 0x8f87e5 +#endif /* __nvoc_class_id_RsClient */ + + + +struct Heap; + +#ifndef __NVOC_CLASS_Heap_TYPEDEF__ +#define __NVOC_CLASS_Heap_TYPEDEF__ +typedef struct Heap Heap; +#endif /* __NVOC_CLASS_Heap_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Heap +#define __nvoc_class_id_Heap 0x556e9a +#endif /* __nvoc_class_id_Heap */ + + + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + +typedef struct MEMORY_DESCRIPTOR MEMORY_DESCRIPTOR; +typedef struct PmuMapping PmuMapping; +typedef struct HWRESOURCE_INFO HWRESOURCE_INFO; + +// +// vGPU non-stall interrupt info +// +typedef struct _def_client_vgpu_ns_intr +{ + NvU32 nsSemValue; // Non stall interrupt semaphore value + NvU32 nsSemOffset; // Non stall interrupt semaphore offset. Currently it is always 0. + NvBool isSemaMemValidationEnabled; // Enable change in Non stall interrupt sema value check + // while generating event + NvU64 guestDomainId; // guest ID that we need to use to inject interrupt + NvU64 guestMSIAddr; // MSI address allocated by guest OS + NvU32 guestMSIData; // MSI data value set by guest OS + void *pEventDpc; // DPC event to pass the interrupt +} VGPU_NS_INTR; + +typedef struct +{ + struct Memory *pNext; + struct Memory *pPrev; +} memCircularListItem; + +/*! + * RM internal class representing NV01_MEMORY_XXX + * + * @note Memory cannot be a GpuResource because NoDeviceMemory + * subclass is not allocated under a device. + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__Memory; +struct NVOC_METADATA__RmResource; +struct NVOC_VTABLE__Memory; + + +struct Memory { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__Memory *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct RmResource __nvoc_base_RmResource; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^3 + struct RsResource *__nvoc_pbase_RsResource; // res super^2 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^2 + struct RmResource *__nvoc_pbase_RmResource; // rmres super + struct Memory *__nvoc_pbase_Memory; // mem + + // Vtable with 4 per-object function pointers + NV_STATUS (*__memCtrlCmdGetSurfacePhysAttrLvm__)(struct Memory * /*this*/, NV0041_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS *); // exported (id=0x410103) + NV_STATUS (*__memCtrlCmdGetSurfaceInfoLvm__)(struct Memory * /*this*/, NV0041_CTRL_GET_SURFACE_INFO_PARAMS *); // exported (id=0x410110) + NV_STATUS (*__memCtrlCmdMapMemoryForGpuAccess__)(struct Memory * /*this*/, NV0041_CTRL_MAP_MEMORY_FOR_GPU_ACCESS_PARAMS *); // exported (id=0x410122) + NV_STATUS (*__memCtrlCmdUnmapMemoryForGpuAccess__)(struct Memory * /*this*/, NV0041_CTRL_UNMAP_MEMORY_FOR_GPU_ACCESS_PARAMS *); // exported (id=0x410153) + + // Data members + NvBool bConstructed; + struct Device *pDevice; + struct Subdevice *pSubDevice; + struct OBJGPU *pGpu; + NvBool bBcResource; + NvU32 categoryClassId; + NvU64 Length; + NvU32 HeapOwner; + NvU32 RefCount; + struct Heap *pHeap; + MEMORY_DESCRIPTOR *pMemDesc; + NvBool isMemDescOwner; + memCircularListItem dupListItem; + NvP64 KernelVAddr; + NvP64 KernelMapPriv; + PmuMapping *pPmuMappingList; + NODE Node; + NvU32 Attr; + NvU32 Attr2; + NvU32 Pitch; + NvU32 Type; + NvU32 Flags; + NvU32 tag; + NvU64 osDeviceHandle; + HWRESOURCE_INFO *pHwResource; + NvBool bRpcAlloc; + NvBool bRegisteredWithGsp; + VGPU_NS_INTR vgpuNsIntr; +}; + + +// Vtable with 26 per-class function pointers +struct NVOC_VTABLE__Memory { + NV_STATUS (*__memIsDuplicate__)(struct Memory * /*this*/, NvHandle, NvBool *); // virtual override (res) base (rmres) + NV_STATUS (*__memGetMapAddrSpace__)(struct Memory * /*this*/, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); // virtual + NV_STATUS (*__memControl__)(struct Memory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual override (res) base (rmres) + NV_STATUS (*__memMap__)(struct Memory * /*this*/, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); // virtual override (res) base (rmres) + NV_STATUS (*__memUnmap__)(struct Memory * /*this*/, CALL_CONTEXT *, RsCpuMapping *); // virtual override (res) base (rmres) + NV_STATUS (*__memGetMemInterMapParams__)(struct Memory * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual override (rmres) base (rmres) + NV_STATUS (*__memCheckMemInterUnmap__)(struct Memory * /*this*/, NvBool); // inline virtual override (rmres) base (rmres) body + NV_STATUS (*__memGetMemoryMappingDescriptor__)(struct Memory * /*this*/, MEMORY_DESCRIPTOR **); // virtual override (rmres) base (rmres) + NV_STATUS (*__memCheckCopyPermissions__)(struct Memory * /*this*/, struct OBJGPU *, struct Device *); // inline virtual body + NV_STATUS (*__memIsReady__)(struct Memory * /*this*/, NvBool); // virtual + NvBool (*__memIsGpuMapAllowed__)(struct Memory * /*this*/, struct OBJGPU *); // inline virtual body + NvBool (*__memIsExportAllowed__)(struct Memory * /*this*/); // inline virtual body + NvBool (*__memAccessCallback__)(struct Memory * /*this*/, RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (rmres) + NvBool (*__memShareCallback__)(struct Memory * /*this*/, RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__memControlSerialization_Prologue__)(struct Memory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + void (*__memControlSerialization_Epilogue__)(struct Memory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + NV_STATUS (*__memControl_Prologue__)(struct Memory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + void (*__memControl_Epilogue__)(struct Memory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (rmres) + NvBool (*__memCanCopy__)(struct Memory * /*this*/); // virtual inherited (res) base (rmres) + void (*__memPreDestruct__)(struct Memory * /*this*/); // virtual inherited (res) base (rmres) + NV_STATUS (*__memControlFilter__)(struct Memory * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (rmres) + NvBool (*__memIsPartialUnmapSupported__)(struct Memory * /*this*/); // inline virtual inherited (res) base (rmres) body + NV_STATUS (*__memMapTo__)(struct Memory * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (rmres) + NV_STATUS (*__memUnmapFrom__)(struct Memory * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (rmres) + NvU32 (*__memGetRefCount__)(struct Memory * /*this*/); // virtual inherited (res) base (rmres) + void (*__memAddAdditionalDependants__)(struct RsClient *, struct Memory * /*this*/, RsResourceRef *); // virtual inherited (res) base (rmres) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__Memory { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__RmResource metadata__RmResource; + const struct NVOC_VTABLE__Memory vtable; +}; + +#ifndef __NVOC_CLASS_Memory_TYPEDEF__ +#define __NVOC_CLASS_Memory_TYPEDEF__ +typedef struct Memory Memory; +#endif /* __NVOC_CLASS_Memory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Memory +#define __nvoc_class_id_Memory 0x4789f2 +#endif /* __nvoc_class_id_Memory */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +#define __staticCast_Memory(pThis) \ + ((pThis)->__nvoc_pbase_Memory) + +#ifdef __nvoc_mem_h_disabled +#define __dynamicCast_Memory(pThis) ((Memory*) NULL) +#else //__nvoc_mem_h_disabled +#define __dynamicCast_Memory(pThis) \ + ((Memory*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Memory))) +#endif //__nvoc_mem_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_Memory(Memory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Memory(Memory**, Dynamic*, NvU32, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_Memory(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_Memory((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define memIsDuplicate_FNPTR(pMemory) pMemory->__nvoc_metadata_ptr->vtable.__memIsDuplicate__ +#define memIsDuplicate(pMemory, hMemory, pDuplicate) memIsDuplicate_DISPATCH(pMemory, hMemory, pDuplicate) +#define memGetMapAddrSpace_FNPTR(pMemory) pMemory->__nvoc_metadata_ptr->vtable.__memGetMapAddrSpace__ +#define memGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) memGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define memControl_FNPTR(pMemory) pMemory->__nvoc_metadata_ptr->vtable.__memControl__ +#define memControl(pMemory, pCallContext, pParams) memControl_DISPATCH(pMemory, pCallContext, pParams) +#define memMap_FNPTR(pMemory) pMemory->__nvoc_metadata_ptr->vtable.__memMap__ +#define memMap(pMemory, pCallContext, pParams, pCpuMapping) memMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define memUnmap_FNPTR(pMemory) pMemory->__nvoc_metadata_ptr->vtable.__memUnmap__ +#define memUnmap(pMemory, pCallContext, pCpuMapping) memUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define memGetMemInterMapParams_FNPTR(pMemory) pMemory->__nvoc_metadata_ptr->vtable.__memGetMemInterMapParams__ +#define memGetMemInterMapParams(pMemory, pParams) memGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define memCheckMemInterUnmap_FNPTR(pMemory) pMemory->__nvoc_metadata_ptr->vtable.__memCheckMemInterUnmap__ +#define memCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) memCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define memGetMemoryMappingDescriptor_FNPTR(pMemory) pMemory->__nvoc_metadata_ptr->vtable.__memGetMemoryMappingDescriptor__ +#define memGetMemoryMappingDescriptor(pMemory, ppMemDesc) memGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define memCheckCopyPermissions_FNPTR(pMemory) pMemory->__nvoc_metadata_ptr->vtable.__memCheckCopyPermissions__ +#define memCheckCopyPermissions(pMemory, pDstGpu, pDstDevice) memCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, pDstDevice) +#define memIsReady_FNPTR(pMemory) pMemory->__nvoc_metadata_ptr->vtable.__memIsReady__ +#define memIsReady(pMemory, bCopyConstructorContext) memIsReady_DISPATCH(pMemory, bCopyConstructorContext) +#define memIsGpuMapAllowed_FNPTR(pMemory) pMemory->__nvoc_metadata_ptr->vtable.__memIsGpuMapAllowed__ +#define memIsGpuMapAllowed(pMemory, pGpu) memIsGpuMapAllowed_DISPATCH(pMemory, pGpu) +#define memIsExportAllowed_FNPTR(pMemory) pMemory->__nvoc_metadata_ptr->vtable.__memIsExportAllowed__ +#define memIsExportAllowed(pMemory) memIsExportAllowed_DISPATCH(pMemory) +#define memCtrlCmdGetSurfacePhysAttrLvm_FNPTR(pMemory) pMemory->__memCtrlCmdGetSurfacePhysAttrLvm__ +#define memCtrlCmdGetSurfacePhysAttrLvm(pMemory, pGPAP) memCtrlCmdGetSurfacePhysAttrLvm_DISPATCH(pMemory, pGPAP) +#define memCtrlCmdGetSurfaceInfoLvm_FNPTR(pMemory) pMemory->__memCtrlCmdGetSurfaceInfoLvm__ +#define memCtrlCmdGetSurfaceInfoLvm(pMemory, pSurfaceInfoParams) memCtrlCmdGetSurfaceInfoLvm_DISPATCH(pMemory, pSurfaceInfoParams) +#define memCtrlCmdMapMemoryForGpuAccess_FNPTR(pMemory) pMemory->__memCtrlCmdMapMemoryForGpuAccess__ +#define memCtrlCmdMapMemoryForGpuAccess(pMemory, pParams) memCtrlCmdMapMemoryForGpuAccess_DISPATCH(pMemory, pParams) +#define memCtrlCmdUnmapMemoryForGpuAccess_FNPTR(pMemory) pMemory->__memCtrlCmdUnmapMemoryForGpuAccess__ +#define memCtrlCmdUnmapMemoryForGpuAccess(pMemory, pParams) memCtrlCmdUnmapMemoryForGpuAccess_DISPATCH(pMemory, pParams) +#define memAccessCallback_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define memAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) memAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define memShareCallback_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresShareCallback__ +#define memShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) memShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define memControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define memControlSerialization_Prologue(pResource, pCallContext, pParams) memControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define memControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define memControlSerialization_Epilogue(pResource, pCallContext, pParams) memControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define memControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define memControl_Prologue(pResource, pCallContext, pParams) memControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define memControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define memControl_Epilogue(pResource, pCallContext, pParams) memControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define memCanCopy_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define memCanCopy(pResource) memCanCopy_DISPATCH(pResource) +#define memPreDestruct_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define memPreDestruct(pResource) memPreDestruct_DISPATCH(pResource) +#define memControlFilter_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define memControlFilter(pResource, pCallContext, pParams) memControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define memIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define memIsPartialUnmapSupported(pResource) memIsPartialUnmapSupported_DISPATCH(pResource) +#define memMapTo_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define memMapTo(pResource, pParams) memMapTo_DISPATCH(pResource, pParams) +#define memUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define memUnmapFrom(pResource, pParams) memUnmapFrom_DISPATCH(pResource, pParams) +#define memGetRefCount_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define memGetRefCount(pResource) memGetRefCount_DISPATCH(pResource) +#define memAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define memAddAdditionalDependants(pClient, pResource, pReference) memAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) + +// Dispatch functions +static inline NV_STATUS memIsDuplicate_DISPATCH(struct Memory *pMemory, NvHandle hMemory, NvBool *pDuplicate) { + return pMemory->__nvoc_metadata_ptr->vtable.__memIsDuplicate__(pMemory, hMemory, pDuplicate); +} + +static inline NV_STATUS memGetMapAddrSpace_DISPATCH(struct Memory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__nvoc_metadata_ptr->vtable.__memGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NV_STATUS memControl_DISPATCH(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__nvoc_metadata_ptr->vtable.__memControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS memMap_DISPATCH(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__nvoc_metadata_ptr->vtable.__memMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS memUnmap_DISPATCH(struct Memory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__nvoc_metadata_ptr->vtable.__memUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS memGetMemInterMapParams_DISPATCH(struct Memory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__nvoc_metadata_ptr->vtable.__memGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS memCheckMemInterUnmap_DISPATCH(struct Memory *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__nvoc_metadata_ptr->vtable.__memCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS memGetMemoryMappingDescriptor_DISPATCH(struct Memory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__nvoc_metadata_ptr->vtable.__memGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS memCheckCopyPermissions_DISPATCH(struct Memory *pMemory, struct OBJGPU *pDstGpu, struct Device *pDstDevice) { + return pMemory->__nvoc_metadata_ptr->vtable.__memCheckCopyPermissions__(pMemory, pDstGpu, pDstDevice); +} + +static inline NV_STATUS memIsReady_DISPATCH(struct Memory *pMemory, NvBool bCopyConstructorContext) { + return pMemory->__nvoc_metadata_ptr->vtable.__memIsReady__(pMemory, bCopyConstructorContext); +} + +static inline NvBool memIsGpuMapAllowed_DISPATCH(struct Memory *pMemory, struct OBJGPU *pGpu) { + return pMemory->__nvoc_metadata_ptr->vtable.__memIsGpuMapAllowed__(pMemory, pGpu); +} + +static inline NvBool memIsExportAllowed_DISPATCH(struct Memory *pMemory) { + return pMemory->__nvoc_metadata_ptr->vtable.__memIsExportAllowed__(pMemory); +} + +static inline NV_STATUS memCtrlCmdGetSurfacePhysAttrLvm_DISPATCH(struct Memory *pMemory, NV0041_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS *pGPAP) { + return pMemory->__memCtrlCmdGetSurfacePhysAttrLvm__(pMemory, pGPAP); +} + +static inline NV_STATUS memCtrlCmdGetSurfaceInfoLvm_DISPATCH(struct Memory *pMemory, NV0041_CTRL_GET_SURFACE_INFO_PARAMS *pSurfaceInfoParams) { + return pMemory->__memCtrlCmdGetSurfaceInfoLvm__(pMemory, pSurfaceInfoParams); +} + +static inline NV_STATUS memCtrlCmdMapMemoryForGpuAccess_DISPATCH(struct Memory *pMemory, NV0041_CTRL_MAP_MEMORY_FOR_GPU_ACCESS_PARAMS *pParams) { + return pMemory->__memCtrlCmdMapMemoryForGpuAccess__(pMemory, pParams); +} + +static inline NV_STATUS memCtrlCmdUnmapMemoryForGpuAccess_DISPATCH(struct Memory *pMemory, NV0041_CTRL_UNMAP_MEMORY_FOR_GPU_ACCESS_PARAMS *pParams) { + return pMemory->__memCtrlCmdUnmapMemoryForGpuAccess__(pMemory, pParams); +} + +static inline NvBool memAccessCallback_DISPATCH(struct Memory *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__memAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NvBool memShareCallback_DISPATCH(struct Memory *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__nvoc_metadata_ptr->vtable.__memShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS memControlSerialization_Prologue_DISPATCH(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__memControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void memControlSerialization_Epilogue_DISPATCH(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__memControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS memControl_Prologue_DISPATCH(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__memControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void memControl_Epilogue_DISPATCH(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__memControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool memCanCopy_DISPATCH(struct Memory *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__memCanCopy__(pResource); +} + +static inline void memPreDestruct_DISPATCH(struct Memory *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__memPreDestruct__(pResource); +} + +static inline NV_STATUS memControlFilter_DISPATCH(struct Memory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__memControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvBool memIsPartialUnmapSupported_DISPATCH(struct Memory *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__memIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS memMapTo_DISPATCH(struct Memory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__memMapTo__(pResource, pParams); +} + +static inline NV_STATUS memUnmapFrom_DISPATCH(struct Memory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__memUnmapFrom__(pResource, pParams); +} + +static inline NvU32 memGetRefCount_DISPATCH(struct Memory *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__memGetRefCount__(pResource); +} + +static inline void memAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct Memory *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__memAddAdditionalDependants__(pClient, pResource, pReference); +} + +NV_STATUS memIsDuplicate_IMPL(struct Memory *pMemory, NvHandle hMemory, NvBool *pDuplicate); + +NV_STATUS memGetMapAddrSpace_IMPL(struct Memory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); + +NV_STATUS memControl_IMPL(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +NV_STATUS memMap_IMPL(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); + +NV_STATUS memUnmap_IMPL(struct Memory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); + +NV_STATUS memGetMemInterMapParams_IMPL(struct Memory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams); + +static inline NV_STATUS memCheckMemInterUnmap_ac1694(struct Memory *pMemory, NvBool bSubdeviceHandleProvided) { + return NV_OK; +} + +NV_STATUS memGetMemoryMappingDescriptor_IMPL(struct Memory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc); + +static inline NV_STATUS memCheckCopyPermissions_ac1694(struct Memory *pMemory, struct OBJGPU *pDstGpu, struct Device *pDstDevice) { + return NV_OK; +} + +NV_STATUS memIsReady_IMPL(struct Memory *pMemory, NvBool bCopyConstructorContext); + +static inline NvBool memIsGpuMapAllowed_e661f0(struct Memory *pMemory, struct OBJGPU *pGpu) { + return NV_TRUE; +} + +static inline NvBool memIsExportAllowed_e661f0(struct Memory *pMemory) { + return NV_TRUE; +} + +NV_STATUS memCtrlCmdGetSurfacePhysAttrLvm_IMPL(struct Memory *pMemory, NV0041_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS *pGPAP); + +NV_STATUS memCtrlCmdGetSurfaceInfoLvm_IMPL(struct Memory *pMemory, NV0041_CTRL_GET_SURFACE_INFO_PARAMS *pSurfaceInfoParams); + +NV_STATUS memCtrlCmdMapMemoryForGpuAccess_IMPL(struct Memory *pMemory, NV0041_CTRL_MAP_MEMORY_FOR_GPU_ACCESS_PARAMS *pParams); + +NV_STATUS memCtrlCmdUnmapMemoryForGpuAccess_IMPL(struct Memory *pMemory, NV0041_CTRL_UNMAP_MEMORY_FOR_GPU_ACCESS_PARAMS *pParams); + +NV_STATUS memConstruct_IMPL(struct Memory *arg_pMemory, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_memConstruct(arg_pMemory, arg_pCallContext, arg_pParams) memConstruct_IMPL(arg_pMemory, arg_pCallContext, arg_pParams) +NV_STATUS memCopyConstruct_IMPL(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + +#ifdef __nvoc_mem_h_disabled +static inline NV_STATUS memCopyConstruct(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("Memory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_h_disabled +#define memCopyConstruct(pMemory, pCallContext, pParams) memCopyConstruct_IMPL(pMemory, pCallContext, pParams) +#endif //__nvoc_mem_h_disabled + +void memDestruct_IMPL(struct Memory *pMemory); + +#define __nvoc_memDestruct(pMemory) memDestruct_IMPL(pMemory) +NV_STATUS memConstructCommon_IMPL(struct Memory *pMemory, NvU32 categoryClassId, NvU32 flags, MEMORY_DESCRIPTOR *pMemDesc, NvU32 heapOwner, struct Heap *pHeap, NvU32 attr, NvU32 attr2, NvU32 Pitch, NvU32 type, NvU32 tag, HWRESOURCE_INFO *pHwResource); + +#ifdef __nvoc_mem_h_disabled +static inline NV_STATUS memConstructCommon(struct Memory *pMemory, NvU32 categoryClassId, NvU32 flags, MEMORY_DESCRIPTOR *pMemDesc, NvU32 heapOwner, struct Heap *pHeap, NvU32 attr, NvU32 attr2, NvU32 Pitch, NvU32 type, NvU32 tag, HWRESOURCE_INFO *pHwResource) { + NV_ASSERT_FAILED_PRECOMP("Memory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_h_disabled +#define memConstructCommon(pMemory, categoryClassId, flags, pMemDesc, heapOwner, pHeap, attr, attr2, Pitch, type, tag, pHwResource) memConstructCommon_IMPL(pMemory, categoryClassId, flags, pMemDesc, heapOwner, pHeap, attr, attr2, Pitch, type, tag, pHwResource) +#endif //__nvoc_mem_h_disabled + +void memDestructCommon_IMPL(struct Memory *pMemory); + +#ifdef __nvoc_mem_h_disabled +static inline void memDestructCommon(struct Memory *pMemory) { + NV_ASSERT_FAILED_PRECOMP("Memory was disabled!"); +} +#else //__nvoc_mem_h_disabled +#define memDestructCommon(pMemory) memDestructCommon_IMPL(pMemory) +#endif //__nvoc_mem_h_disabled + +NV_STATUS memCreateMemDesc_IMPL(struct OBJGPU *pGpu, MEMORY_DESCRIPTOR **ppMemDesc, NV_ADDRESS_SPACE addrSpace, NvU64 FBOffset, NvU64 length, NvU32 attr, NvU32 attr2); + +#define memCreateMemDesc(pGpu, ppMemDesc, addrSpace, FBOffset, length, attr, attr2) memCreateMemDesc_IMPL(pGpu, ppMemDesc, addrSpace, FBOffset, length, attr, attr2) +NV_STATUS memCreateKernelMapping_IMPL(struct Memory *pMemory, NvU32 Protect, NvBool bClear); + +#ifdef __nvoc_mem_h_disabled +static inline NV_STATUS memCreateKernelMapping(struct Memory *pMemory, NvU32 Protect, NvBool bClear) { + NV_ASSERT_FAILED_PRECOMP("Memory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_h_disabled +#define memCreateKernelMapping(pMemory, Protect, bClear) memCreateKernelMapping_IMPL(pMemory, Protect, bClear) +#endif //__nvoc_mem_h_disabled + +NV_STATUS memGetByHandle_IMPL(struct RsClient *pClient, NvHandle hMemory, struct Memory **ppMemory); + +#define memGetByHandle(pClient, hMemory, ppMemory) memGetByHandle_IMPL(pClient, hMemory, ppMemory) +NV_STATUS memGetByHandleAndDevice_IMPL(struct RsClient *pClient, NvHandle hMemory, NvHandle hDevice, struct Memory **ppMemory); + +#define memGetByHandleAndDevice(pClient, hMemory, hDevice, ppMemory) memGetByHandleAndDevice_IMPL(pClient, hMemory, hDevice, ppMemory) +NV_STATUS memGetByHandleAndGroupedGpu_IMPL(struct RsClient *pClient, NvHandle hMemory, struct OBJGPU *pGpu, struct Memory **ppMemory); + +#define memGetByHandleAndGroupedGpu(pClient, hMemory, pGpu, ppMemory) memGetByHandleAndGroupedGpu_IMPL(pClient, hMemory, pGpu, ppMemory) +NV_STATUS memRegisterWithGsp_IMPL(struct OBJGPU *pGpu, struct RsClient *pClient, NvHandle hParent, NvHandle hMemory); + +#define memRegisterWithGsp(pGpu, pClient, hParent, hMemory) memRegisterWithGsp_IMPL(pGpu, pClient, hParent, hMemory) +void memSetSysmemCacheAttrib_IMPL(struct OBJGPU *pGpu, NV_MEMORY_ALLOCATION_PARAMS *pAllocData, MEMORY_DESCRIPTOR *pMemDesc); + +#define memSetSysmemCacheAttrib(pGpu, pAllocData, pMemDesc) memSetSysmemCacheAttrib_IMPL(pGpu, pAllocData, pMemDesc) +NV_STATUS memSetGpuCacheSnoop_IMPL(struct OBJGPU *pGpu, NvU32 attr, MEMORY_DESCRIPTOR *pMemDesc); + +#define memSetGpuCacheSnoop(pGpu, attr, pMemDesc) memSetGpuCacheSnoop_IMPL(pGpu, attr, pMemDesc) +#undef PRIVATE_FIELD + + +#endif + + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_MEM_NVOC_H_ diff --git a/src/nvidia/generated/g_nv_debug_dump_nvoc.h b/src/nvidia/generated/g_nv_debug_dump_nvoc.h new file mode 100644 index 0000000..7b77a36 --- /dev/null +++ b/src/nvidia/generated/g_nv_debug_dump_nvoc.h @@ -0,0 +1,441 @@ + +#ifndef _G_NV_DEBUG_DUMP_NVOC_H_ +#define _G_NV_DEBUG_DUMP_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_nv_debug_dump_nvoc.h" + +#ifndef _NV_DEBUG_DUMP_H_ +#define _NV_DEBUG_DUMP_H_ + +#include "gpu/eng_state.h" +#include "gpu/mem_mgr/mem_desc.h" + +#include "lib/protobuf/prb.h" +#include "rmapi/control.h" +#include "gpu/gpu.h" + +// Os Independent Error Types +typedef enum +{ + NVD_SKIP_ZERO, + NVD_GPU_HUNG, + NVD_FAILURE_TO_RECOVER, + NVD_MACHINE_CHECK, + NVD_POWERUP_FAILURE, + NVD_CPU_EXCEPTION, + NVD_EXTERNALLY_GENERATED, + NVD_GPU_GENERATED, +} NVD_ERROR_TYPE; + +#define NV_NVD_ERROR_CODE_MAJOR 31:16 +#define NV_NVD_ERROR_CODE_MINOR 15:0 + +#define NVD_ERROR_CODE(Major, Minor) \ + (DRF_NUM(_NVD, _ERROR_CODE, _MAJOR, Major) | \ + DRF_NUM(_NVD, _ERROR_CODE, _MINOR, Minor)) + + +#define NVD_ENGINE_FLAGS_PRIORITY 1:0 +#define NVD_ENGINE_FLAGS_PRIORITY_LOW 0x00000000 +#define NVD_ENGINE_FLAGS_PRIORITY_MED 0x00000001 +#define NVD_ENGINE_FLAGS_PRIORITY_HIGH 0x00000002 +#define NVD_ENGINE_FLAGS_PRIORITY_CRITICAL 0x00000003 + +/* + * NVD_ENGINE_FLAGS_SOURCE + * + * CPU - Always run on CPU, even if running as GSP-RM client. + * GSP - Run on GSP for GSP-RM client, otherwise run on CPU. + * BOTH - Engine dump is split between GSP-RM and CPU. Run both. + */ +#define NVD_ENGINE_FLAGS_SOURCE 3:2 +#define NVD_ENGINE_FLAGS_SOURCE_CPU 0x00000001 +#define NVD_ENGINE_FLAGS_SOURCE_GSP 0x00000002 +#define NVD_ENGINE_FLAGS_SOURCE_BOTH 0x00000003 + + +#define NV_NVD_ENGINE_STEP_MAJOR 31:16 +#define NV_NVD_ENGINE_STEP_MINOR 15:0 + +#define NVD_ENGINE_STEP(Major, Minor) \ + (DRF_NUM(_NVD, _ENGINE_STEP, _MAJOR, Major) | \ + DRF_NUM(_NVD, _ENGINE_STEP, _MINOR, Minor)) + +typedef enum +{ + NVD_FIRST_ENGINE = 0, + NVD_LAST_ENGINE = 0xFF, +} NVD_WHICH_ENGINE; + +typedef struct _def_nvd_debug_buffer { + NvU32 tag; + MEMORY_DESCRIPTOR *pMemDesc; + struct _def_nvd_debug_buffer *pNext; +} NVD_DEBUG_BUFFER; + +// Enumeration of Dump Types (Journal Entry, OCA dump, or API requested dump) +typedef enum +{ + NVD_DUMP_TYPE_JOURNAL, // Very small records only. Total for + // whole Journal is 4K (including overhead), + // actual amount of raw data stored is less. + NVD_DUMP_TYPE_OCA, // Assume 8K - 512 K total + NVD_DUMP_TYPE_API, // Mini Dump >512K +} NVD_DUMP_TYPE; + +// Enumeration of Sizes returned by nvDumpGetDumpBufferSizeEnum +typedef enum +{ + NVD_DUMP_SIZE_JOURNAL_WRITE, // Very small records only. + NVD_DUMP_SIZE_SMALL, // Assume 8K - 512 K total + NVD_DUMP_SIZE_MEDIUM, // Mini Dump >512K + NVD_DUMP_SIZE_LARGE // Megs of space +} NVD_DUMP_SIZE; + +// +// NV Dump State +// +// State passed into all dump routines. +// +typedef struct _def_nvd_state NVD_STATE; + +struct _def_nvd_state +{ + NvBool bDumpInProcess; // Currently creating dump. + NvBool bRMLock; // Acquired the RM lock. + NvBool bGpuAccessible; // OK to read priv registers on GPU. + NvU32 bugCheckCode; // Raw OS bugcheck code. + NvU32 internalCode; // OS Independent error code. + NvU32 initialbufferSize; // Size of buffer passed in. + NVD_DUMP_TYPE nvDumpType; // Type of DUMP. +}; + + +NVD_DUMP_SIZE nvDumpGetDumpBufferSizeEnum( NVD_STATE *pNvDumpState ); + +typedef NV_STATUS NvdDumpEngineFunc(struct OBJGPU *pGpu, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState, void *pvData); + +typedef struct _def_nvd_engine_callback { + NvdDumpEngineFunc *pDumpEngineFunc; // Callback function. + NvU32 engDesc; // Indicates which engine this is. + NvU32 flags; // See NVD_ENGINE_FLAGS above. + void *pvData; // Opaque pointer to data passed to callback function. + struct _def_nvd_engine_callback *pNext; // Next Engine +} NVD_ENGINE_CALLBACK; + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_NV_DEBUG_DUMP_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__NvDebugDump; +struct NVOC_METADATA__OBJENGSTATE; +struct NVOC_VTABLE__NvDebugDump; + + +struct NvDebugDump { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__NvDebugDump *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^2 + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; // engstate super + struct NvDebugDump *__nvoc_pbase_NvDebugDump; // nvd + + // 1 PDB property + + // Data members + NVD_DEBUG_BUFFER *pHeadDebugBuffer; + NVD_ENGINE_CALLBACK *pCallbacks; +}; + + +// Vtable with 14 per-class function pointers +struct NVOC_VTABLE__NvDebugDump { + NV_STATUS (*__nvdConstructEngine__)(struct OBJGPU *, struct NvDebugDump * /*this*/, ENGDESCRIPTOR); // virtual override (engstate) base (engstate) + NV_STATUS (*__nvdStateInitLocked__)(struct OBJGPU *, struct NvDebugDump * /*this*/); // virtual override (engstate) base (engstate) + void (*__nvdInitMissing__)(struct OBJGPU *, struct NvDebugDump * /*this*/); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__nvdStatePreInitLocked__)(struct OBJGPU *, struct NvDebugDump * /*this*/); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__nvdStatePreInitUnlocked__)(struct OBJGPU *, struct NvDebugDump * /*this*/); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__nvdStateInitUnlocked__)(struct OBJGPU *, struct NvDebugDump * /*this*/); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__nvdStatePreLoad__)(struct OBJGPU *, struct NvDebugDump * /*this*/, NvU32); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__nvdStateLoad__)(struct OBJGPU *, struct NvDebugDump * /*this*/, NvU32); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__nvdStatePostLoad__)(struct OBJGPU *, struct NvDebugDump * /*this*/, NvU32); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__nvdStatePreUnload__)(struct OBJGPU *, struct NvDebugDump * /*this*/, NvU32); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__nvdStateUnload__)(struct OBJGPU *, struct NvDebugDump * /*this*/, NvU32); // virtual inherited (engstate) base (engstate) + NV_STATUS (*__nvdStatePostUnload__)(struct OBJGPU *, struct NvDebugDump * /*this*/, NvU32); // virtual inherited (engstate) base (engstate) + void (*__nvdStateDestroy__)(struct OBJGPU *, struct NvDebugDump * /*this*/); // virtual inherited (engstate) base (engstate) + NvBool (*__nvdIsPresent__)(struct OBJGPU *, struct NvDebugDump * /*this*/); // virtual inherited (engstate) base (engstate) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__NvDebugDump { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__OBJENGSTATE metadata__OBJENGSTATE; + const struct NVOC_VTABLE__NvDebugDump vtable; +}; + +#ifndef __NVOC_CLASS_NvDebugDump_TYPEDEF__ +#define __NVOC_CLASS_NvDebugDump_TYPEDEF__ +typedef struct NvDebugDump NvDebugDump; +#endif /* __NVOC_CLASS_NvDebugDump_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NvDebugDump +#define __nvoc_class_id_NvDebugDump 0x7e80a2 +#endif /* __nvoc_class_id_NvDebugDump */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_NvDebugDump; + +#define __staticCast_NvDebugDump(pThis) \ + ((pThis)->__nvoc_pbase_NvDebugDump) + +#ifdef __nvoc_nv_debug_dump_h_disabled +#define __dynamicCast_NvDebugDump(pThis) ((NvDebugDump*) NULL) +#else //__nvoc_nv_debug_dump_h_disabled +#define __dynamicCast_NvDebugDump(pThis) \ + ((NvDebugDump*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(NvDebugDump))) +#endif //__nvoc_nv_debug_dump_h_disabled + +// Property macros +#define PDB_PROP_NVD_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_NVD_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_NvDebugDump(NvDebugDump**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_NvDebugDump(NvDebugDump**, Dynamic*, NvU32); +#define __objCreate_NvDebugDump(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_NvDebugDump((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros +#define nvdConstructEngine_FNPTR(pNvd) pNvd->__nvoc_metadata_ptr->vtable.__nvdConstructEngine__ +#define nvdConstructEngine(pGpu, pNvd, arg3) nvdConstructEngine_DISPATCH(pGpu, pNvd, arg3) +#define nvdStateInitLocked_FNPTR(pNvd) pNvd->__nvoc_metadata_ptr->vtable.__nvdStateInitLocked__ +#define nvdStateInitLocked(pGpu, pNvd) nvdStateInitLocked_DISPATCH(pGpu, pNvd) +#define nvdInitMissing_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateInitMissing__ +#define nvdInitMissing(pGpu, pEngstate) nvdInitMissing_DISPATCH(pGpu, pEngstate) +#define nvdStatePreInitLocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePreInitLocked__ +#define nvdStatePreInitLocked(pGpu, pEngstate) nvdStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define nvdStatePreInitUnlocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePreInitUnlocked__ +#define nvdStatePreInitUnlocked(pGpu, pEngstate) nvdStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define nvdStateInitUnlocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStateInitUnlocked__ +#define nvdStateInitUnlocked(pGpu, pEngstate) nvdStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define nvdStatePreLoad_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePreLoad__ +#define nvdStatePreLoad(pGpu, pEngstate, arg3) nvdStatePreLoad_DISPATCH(pGpu, pEngstate, arg3) +#define nvdStateLoad_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStateLoad__ +#define nvdStateLoad(pGpu, pEngstate, arg3) nvdStateLoad_DISPATCH(pGpu, pEngstate, arg3) +#define nvdStatePostLoad_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePostLoad__ +#define nvdStatePostLoad(pGpu, pEngstate, arg3) nvdStatePostLoad_DISPATCH(pGpu, pEngstate, arg3) +#define nvdStatePreUnload_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePreUnload__ +#define nvdStatePreUnload(pGpu, pEngstate, arg3) nvdStatePreUnload_DISPATCH(pGpu, pEngstate, arg3) +#define nvdStateUnload_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStateUnload__ +#define nvdStateUnload(pGpu, pEngstate, arg3) nvdStateUnload_DISPATCH(pGpu, pEngstate, arg3) +#define nvdStatePostUnload_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePostUnload__ +#define nvdStatePostUnload(pGpu, pEngstate, arg3) nvdStatePostUnload_DISPATCH(pGpu, pEngstate, arg3) +#define nvdStateDestroy_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStateDestroy__ +#define nvdStateDestroy(pGpu, pEngstate) nvdStateDestroy_DISPATCH(pGpu, pEngstate) +#define nvdIsPresent_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateIsPresent__ +#define nvdIsPresent(pGpu, pEngstate) nvdIsPresent_DISPATCH(pGpu, pEngstate) + +// Dispatch functions +static inline NV_STATUS nvdConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, ENGDESCRIPTOR arg3) { + return pNvd->__nvoc_metadata_ptr->vtable.__nvdConstructEngine__(pGpu, pNvd, arg3); +} + +static inline NV_STATUS nvdStateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct NvDebugDump *pNvd) { + return pNvd->__nvoc_metadata_ptr->vtable.__nvdStateInitLocked__(pGpu, pNvd); +} + +static inline void nvdInitMissing_DISPATCH(struct OBJGPU *pGpu, struct NvDebugDump *pEngstate) { + pEngstate->__nvoc_metadata_ptr->vtable.__nvdInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS nvdStatePreInitLocked_DISPATCH(struct OBJGPU *pGpu, struct NvDebugDump *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__nvdStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS nvdStatePreInitUnlocked_DISPATCH(struct OBJGPU *pGpu, struct NvDebugDump *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__nvdStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS nvdStateInitUnlocked_DISPATCH(struct OBJGPU *pGpu, struct NvDebugDump *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__nvdStateInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS nvdStatePreLoad_DISPATCH(struct OBJGPU *pGpu, struct NvDebugDump *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__nvdStatePreLoad__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS nvdStateLoad_DISPATCH(struct OBJGPU *pGpu, struct NvDebugDump *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__nvdStateLoad__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS nvdStatePostLoad_DISPATCH(struct OBJGPU *pGpu, struct NvDebugDump *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__nvdStatePostLoad__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS nvdStatePreUnload_DISPATCH(struct OBJGPU *pGpu, struct NvDebugDump *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__nvdStatePreUnload__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS nvdStateUnload_DISPATCH(struct OBJGPU *pGpu, struct NvDebugDump *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__nvdStateUnload__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS nvdStatePostUnload_DISPATCH(struct OBJGPU *pGpu, struct NvDebugDump *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__nvdStatePostUnload__(pGpu, pEngstate, arg3); +} + +static inline void nvdStateDestroy_DISPATCH(struct OBJGPU *pGpu, struct NvDebugDump *pEngstate) { + pEngstate->__nvoc_metadata_ptr->vtable.__nvdStateDestroy__(pGpu, pEngstate); +} + +static inline NvBool nvdIsPresent_DISPATCH(struct OBJGPU *pGpu, struct NvDebugDump *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__nvdIsPresent__(pGpu, pEngstate); +} + +NV_STATUS nvdConstructEngine_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, ENGDESCRIPTOR arg3); + +NV_STATUS nvdStateInitLocked_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd); + +void nvdDestruct_IMPL(struct NvDebugDump *pNvd); + +#define __nvoc_nvdDestruct(pNvd) nvdDestruct_IMPL(pNvd) +NV_STATUS nvdAllocDebugBuffer_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, NvU32 arg3, NvU32 *arg4, MEMORY_DESCRIPTOR **arg5); + +#ifdef __nvoc_nv_debug_dump_h_disabled +static inline NV_STATUS nvdAllocDebugBuffer(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, NvU32 arg3, NvU32 *arg4, MEMORY_DESCRIPTOR **arg5) { + NV_ASSERT_FAILED_PRECOMP("NvDebugDump was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_nv_debug_dump_h_disabled +#define nvdAllocDebugBuffer(pGpu, pNvd, arg3, arg4, arg5) nvdAllocDebugBuffer_IMPL(pGpu, pNvd, arg3, arg4, arg5) +#endif //__nvoc_nv_debug_dump_h_disabled + +NV_STATUS nvdFreeDebugBuffer_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, MEMORY_DESCRIPTOR *arg3); + +#ifdef __nvoc_nv_debug_dump_h_disabled +static inline NV_STATUS nvdFreeDebugBuffer(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, MEMORY_DESCRIPTOR *arg3) { + NV_ASSERT_FAILED_PRECOMP("NvDebugDump was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_nv_debug_dump_h_disabled +#define nvdFreeDebugBuffer(pGpu, pNvd, arg3) nvdFreeDebugBuffer_IMPL(pGpu, pNvd, arg3) +#endif //__nvoc_nv_debug_dump_h_disabled + +NV_STATUS nvdEngineSignUp_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, NvdDumpEngineFunc *arg3, NvU32 engDesc, NvU32 flags, void *arg6); + +#ifdef __nvoc_nv_debug_dump_h_disabled +static inline NV_STATUS nvdEngineSignUp(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, NvdDumpEngineFunc *arg3, NvU32 engDesc, NvU32 flags, void *arg6) { + NV_ASSERT_FAILED_PRECOMP("NvDebugDump was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_nv_debug_dump_h_disabled +#define nvdEngineSignUp(pGpu, pNvd, arg3, engDesc, flags, arg6) nvdEngineSignUp_IMPL(pGpu, pNvd, arg3, engDesc, flags, arg6) +#endif //__nvoc_nv_debug_dump_h_disabled + +NV_STATUS nvdEngineRelease_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd); + +#ifdef __nvoc_nv_debug_dump_h_disabled +static inline NV_STATUS nvdEngineRelease(struct OBJGPU *pGpu, struct NvDebugDump *pNvd) { + NV_ASSERT_FAILED_PRECOMP("NvDebugDump was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_nv_debug_dump_h_disabled +#define nvdEngineRelease(pGpu, pNvd) nvdEngineRelease_IMPL(pGpu, pNvd) +#endif //__nvoc_nv_debug_dump_h_disabled + +NV_STATUS nvdDoEngineDump_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState, NvU32 arg5); + +#ifdef __nvoc_nv_debug_dump_h_disabled +static inline NV_STATUS nvdDoEngineDump(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState, NvU32 arg5) { + NV_ASSERT_FAILED_PRECOMP("NvDebugDump was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_nv_debug_dump_h_disabled +#define nvdDoEngineDump(pGpu, pNvd, pPrbEnc, pNvDumpState, arg5) nvdDoEngineDump_IMPL(pGpu, pNvd, pPrbEnc, pNvDumpState, arg5) +#endif //__nvoc_nv_debug_dump_h_disabled + +NV_STATUS nvdDumpAllEngines_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState); + +#ifdef __nvoc_nv_debug_dump_h_disabled +static inline NV_STATUS nvdDumpAllEngines(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState) { + NV_ASSERT_FAILED_PRECOMP("NvDebugDump was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_nv_debug_dump_h_disabled +#define nvdDumpAllEngines(pGpu, pNvd, pPrbEnc, pNvDumpState) nvdDumpAllEngines_IMPL(pGpu, pNvd, pPrbEnc, pNvDumpState) +#endif //__nvoc_nv_debug_dump_h_disabled + +NV_STATUS nvdFindEngine_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, NvU32 engDesc, NVD_ENGINE_CALLBACK **ppEngineCallback); + +#ifdef __nvoc_nv_debug_dump_h_disabled +static inline NV_STATUS nvdFindEngine(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, NvU32 engDesc, NVD_ENGINE_CALLBACK **ppEngineCallback) { + NV_ASSERT_FAILED_PRECOMP("NvDebugDump was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_nv_debug_dump_h_disabled +#define nvdFindEngine(pGpu, pNvd, engDesc, ppEngineCallback) nvdFindEngine_IMPL(pGpu, pNvd, engDesc, ppEngineCallback) +#endif //__nvoc_nv_debug_dump_h_disabled + +#undef PRIVATE_FIELD + + +#endif // _NV_DEBUG_DUMP_H_ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_NV_DEBUG_DUMP_NVOC_H_ diff --git a/src/nvidia/generated/g_nv_name_released.h b/src/nvidia/generated/g_nv_name_released.h new file mode 100644 index 0000000..5b1d807 --- /dev/null +++ b/src/nvidia/generated/g_nv_name_released.h @@ -0,0 +1,6710 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef G_NV_NAME_RELEASED_H +#define G_NV_NAME_RELEASED_H + +typedef struct _CHIPS_RELEASED { + unsigned short devID; + unsigned short subSystemID; + unsigned short subSystemVendorID; + const char *name; +} CHIPS_RELEASED; + +static const CHIPS_RELEASED sChipsReleased[] = { + { 0x0040, 0x0205, 0x10de, "(null)" }, + { 0x0091, 0x02c2, 0x10de, "(null)" }, + { 0x00C1, 0x0335, 0x1043, "(null)" }, + { 0x00F9, 0x0334, 0x1043, "(null)" }, + { 0x00F9, 0x0338, 0x1043, "(null)" }, + { 0x0141, 0x0330, 0x1043, "(null)" }, + { 0x0141, 0x0331, 0x1043, "(null)" }, + { 0x0141, 0x9812, 0x1462, "(null)" }, + { 0x0141, 0x9814, 0x1462, "(null)" }, + { 0x0141, 0x3466, 0x1642, "(null)" }, + { 0x0161, 0x02b3, 0x10de, "(null)" }, + { 0x0161, 0x0013, 0x1462, "(null)" }, + { 0x0161, 0x0014, 0x1462, "(null)" }, + { 0x0161, 0x9915, 0x1462, "(null)" }, + { 0x0161, 0x9916, 0x1462, "(null)" }, + { 0x0161, 0x3493, 0x1642, "(null)" }, + { 0x0162, 0x0345, 0x1043, "(null)" }, + { 0x0165, 0x029d, 0x10de, "(null)" }, + { 0x0165, 0x0334, 0x10de, "(null)" }, + { 0x016A, 0x3432, 0x1458, "(null)" }, + { 0x016A, 0x034e, 0x1462, "(null)" }, + { 0x016A, 0x3598, 0x1642, "(null)" }, + { 0x0191, 0x039c, 0x10de, "(null)" }, + { 0x0193, 0x0420, 0x10de, "(null)" }, + { 0x0193, 0x0421, 0x10de, "(null)" }, + { 0x01D1, 0x0394, 0x10de, "(null)" }, + { 0x01D1, 0x342c, 0x1458, "(null)" }, + { 0x01D1, 0x3436, 0x1458, "(null)" }, + { 0x01D1, 0x0344, 0x1462, "(null)" }, + { 0x01D1, 0x0345, 0x1462, "(null)" }, + { 0x01D1, 0x0850, 0x1462, "(null)" }, + { 0x01D1, 0x3598, 0x1642, "(null)" }, + { 0x01D1, 0x3680, 0x1642, "(null)" }, + { 0x01D3, 0x034c, 0x1462, "(null)" }, + { 0x01D3, 0x034d, 0x1462, "(null)" }, + { 0x01D3, 0x1010, 0x174b, "(null)" }, + { 0x01D3, 0x1110, 0x174b, "(null)" }, + { 0x01D3, 0x1120, 0x174b, "(null)" }, + { 0x01D3, 0x1210, 0x174b, "(null)" }, + { 0x0242, 0x1014, 0x17aa, "(null)" }, + { 0x0242, 0x1017, 0x17aa, "(null)" }, + { 0x0242, 0x3013, 0x17aa, "(null)" }, + { 0x0242, 0x1019, 0x1b46, "(null)" }, + { 0x0290, 0x0343, 0x10de, "(null)" }, + { 0x0294, 0x035b, 0x10de, "(null)" }, + { 0x0391, 0x033d, 0x10de, "(null)" }, + { 0x0391, 0x0403, 0x10de, "(null)" }, + { 0x0392, 0x034c, 0x1043, "(null)" }, + { 0x0392, 0x034a, 0x10de, "(null)" }, + { 0x0400, 0x0438, 0x10de, "(null)" }, + { 0x0402, 0x1713, 0x1019, "(null)" }, + { 0x0402, 0x034d, 0x1043, "(null)" }, + { 0x0402, 0x034e, 0x1043, "(null)" }, + { 0x0402, 0x0439, 0x10de, "(null)" }, + { 0x0402, 0x0505, 0x10de, "(null)" }, + { 0x0402, 0x806b, 0x144d, "(null)" }, + { 0x0402, 0x806c, 0x144d, "(null)" }, + { 0x0405, 0x011d, 0x1025, "(null)" }, + { 0x0405, 0x011e, 0x1025, "(null)" }, + { 0x0405, 0x0121, 0x1025, "(null)" }, + { 0x0405, 0x0125, 0x1025, "(null)" }, + { 0x0405, 0x0126, 0x1025, "(null)" }, + { 0x0405, 0x0127, 0x1025, "(null)" }, + { 0x0405, 0x0129, 0x1025, "(null)" }, + { 0x0405, 0x012b, 0x1025, "(null)" }, + { 0x0405, 0x0136, 0x1025, "(null)" }, + { 0x0405, 0x013d, 0x1025, "(null)" }, + { 0x0405, 0x013f, 0x1025, "(null)" }, + { 0x0405, 0x0142, 0x1025, "(null)" }, + { 0x0405, 0x0143, 0x1025, "(null)" }, + { 0x0405, 0x0145, 0x1025, "(null)" }, + { 0x0405, 0x0146, 0x1025, "(null)" }, + { 0x0405, 0x015e, 0x1025, "(null)" }, + { 0x0405, 0x15d2, 0x1043, "(null)" }, + { 0x0405, 0x1634, 0x1043, "(null)" }, + { 0x0405, 0x8227, 0x1071, "(null)" }, + { 0x0405, 0x8230, 0x1071, "(null)" }, + { 0x0405, 0x6740, 0x1462, "(null)" }, + { 0x0406, 0x30f4, 0x103c, "(null)" }, + { 0x0406, 0x3603, 0x103c, "(null)" }, + { 0x0407, 0x22d4, 0x1019, "(null)" }, + { 0x0407, 0x011d, 0x1025, "(null)" }, + { 0x0407, 0x011e, 0x1025, "(null)" }, + { 0x0407, 0x0121, 0x1025, "(null)" }, + { 0x0407, 0x0125, 0x1025, "(null)" }, + { 0x0407, 0x0126, 0x1025, "(null)" }, + { 0x0407, 0x0127, 0x1025, "(null)" }, + { 0x0407, 0x0129, 0x1025, "(null)" }, + { 0x0407, 0x012b, 0x1025, "(null)" }, + { 0x0407, 0x0136, 0x1025, "(null)" }, + { 0x0407, 0x013d, 0x1025, "(null)" }, + { 0x0407, 0x013f, 0x1025, "(null)" }, + { 0x0407, 0x0142, 0x1025, "(null)" }, + { 0x0407, 0x0143, 0x1025, "(null)" }, + { 0x0407, 0x0145, 0x1025, "(null)" }, + { 0x0407, 0x0146, 0x1025, "(null)" }, + { 0x0407, 0x015e, 0x1025, "(null)" }, + { 0x0407, 0x019c, 0x1028, "(null)" }, + { 0x0407, 0x01f1, 0x1028, "(null)" }, + { 0x0407, 0x01f2, 0x1028, "(null)" }, + { 0x0407, 0x0228, 0x1028, "(null)" }, + { 0x0407, 0x0229, 0x1028, "(null)" }, + { 0x0407, 0x022e, 0x1028, "(null)" }, + { 0x0407, 0x1515, 0x1043, "(null)" }, + { 0x0407, 0x1588, 0x1043, "(null)" }, + { 0x0407, 0x1618, 0x1043, "(null)" }, + { 0x0407, 0x1632, 0x1043, "(null)" }, + { 0x0407, 0x17a2, 0x1043, "(null)" }, + { 0x0407, 0x9005, 0x104d, "(null)" }, + { 0x0407, 0x9016, 0x104d, "(null)" }, + { 0x0407, 0x9018, 0x104d, "(null)" }, + { 0x0407, 0x00a0, 0x106b, "(null)" }, + { 0x0407, 0x00a3, 0x106b, "(null)" }, + { 0x0407, 0x00a4, 0x106b, "(null)" }, + { 0x0407, 0x8049, 0x107b, "(null)" }, + { 0x0407, 0x3c04, 0x109f, "(null)" }, + { 0x0407, 0x0040, 0x1170, "(null)" }, + { 0x0407, 0x0001, 0x1179, "(null)" }, + { 0x0407, 0xff00, 0x1179, "(null)" }, + { 0x0407, 0xff10, 0x1179, "(null)" }, + { 0x0407, 0xff13, 0x1179, "(null)" }, + { 0x0407, 0xc02f, 0x144d, "(null)" }, + { 0x0407, 0xc030, 0x144d, "(null)" }, + { 0x0407, 0xc031, 0x144d, "(null)" }, + { 0x0407, 0xc519, 0x144d, "(null)" }, + { 0x0407, 0x3fad, 0x1462, "(null)" }, + { 0x0407, 0x3fbb, 0x1462, "(null)" }, + { 0x0407, 0x3fe9, 0x1462, "(null)" }, + { 0x0407, 0x401b, 0x1462, "(null)" }, + { 0x0407, 0x4327, 0x1462, "(null)" }, + { 0x0407, 0x63f2, 0x1462, "(null)" }, + { 0x0407, 0x0023, 0x14c0, "(null)" }, + { 0x0407, 0x0025, 0x14c0, "(null)" }, + { 0x0407, 0x0770, 0x152d, "(null)" }, + { 0x0407, 0x0573, 0x1558, "(null)" }, + { 0x0407, 0x2200, 0x1558, "(null)" }, + { 0x0407, 0x2201, 0x1558, "(null)" }, + { 0x0407, 0x22d4, 0x1584, "(null)" }, + { 0x0407, 0xc20d, 0x1631, "(null)" }, + { 0x0407, 0xc20e, 0x1631, "(null)" }, + { 0x0407, 0x3d7b, 0x17aa, "(null)" }, + { 0x0407, 0x2089, 0x17c0, "(null)" }, + { 0x0407, 0x208a, 0x17c0, "(null)" }, + { 0x0407, 0x0801, 0x17ff, "(null)" }, + { 0x0407, 0x0091, 0x1854, "(null)" }, + { 0x0407, 0x0115, 0x1854, "(null)" }, + { 0x0407, 0x0116, 0x1854, "(null)" }, + { 0x0407, 0x0117, 0x1854, "(null)" }, + { 0x0407, 0x0118, 0x1854, "(null)" }, + { 0x0407, 0x0119, 0x1854, "(null)" }, + { 0x0407, 0x011a, 0x1854, "(null)" }, + { 0x0407, 0x1402, 0x1a46, "(null)" }, + { 0x0407, 0x0284, 0x1a92, "(null)" }, + { 0x0408, 0x0145, 0x1025, "(null)" }, + { 0x0408, 0x30f4, 0x103c, "(null)" }, + { 0x0408, 0x1902, 0x1043, "(null)" }, + { 0x0409, 0x0145, 0x1025, "(null)" }, + { 0x0409, 0x0146, 0x1025, "(null)" }, + { 0x0409, 0x019b, 0x1028, "(null)" }, + { 0x0409, 0x019c, 0x1028, "(null)" }, + { 0x0409, 0x01f1, 0x1028, "(null)" }, + { 0x0409, 0x1584, 0x1043, "(null)" }, + { 0x0409, 0x1619, 0x1043, "(null)" }, + { 0x0409, 0x16d2, 0x1043, "(null)" }, + { 0x0409, 0x1832, 0x1043, "(null)" }, + { 0x0409, 0xff00, 0x1179, "(null)" }, + { 0x0409, 0x0770, 0x152d, "(null)" }, + { 0x0409, 0x0573, 0x1558, "(null)" }, + { 0x0409, 0x0860, 0x1558, "(null)" }, + { 0x0409, 0x0902, 0x1558, "(null)" }, + { 0x0409, 0x2052, 0x161f, "(null)" }, + { 0x040B, 0x30c3, 0x103c, "(null)" }, + { 0x040C, 0x30c5, 0x103c, "(null)" }, + { 0x040C, 0x1423, 0x10cf, "(null)" }, + { 0x040C, 0x20d9, 0x17aa, "(null)" }, + { 0x040D, 0x019b, 0x1028, "(null)" }, + { 0x040D, 0x30c3, 0x103c, "(null)" }, + { 0x040D, 0x0770, 0x152d, "(null)" }, + { 0x040D, 0x0481, 0x1558, "(null)" }, + { 0x040D, 0x0573, 0x1558, "(null)" }, + { 0x040D, 0x0860, 0x1558, "(null)" }, + { 0x040D, 0x0902, 0x1558, "(null)" }, + { 0x040D, 0x2052, 0x161f, "(null)" }, + { 0x0410, 0x3058, 0x174b, "(null)" }, + { 0x0410, 0x9076, 0x1b0a, "(null)" }, + { 0x0421, 0x8069, 0x144d, "(null)" }, + { 0x0421, 0x806a, 0x144d, "(null)" }, + { 0x0421, 0x8072, 0x144d, "(null)" }, + { 0x0421, 0x0198, 0x1620, "(null)" }, + { 0x0422, 0x8873, 0x1033, "(null)" }, + { 0x0422, 0x88b7, 0x1033, "(null)" }, + { 0x0423, 0x806f, 0x144d, "(null)" }, + { 0x0423, 0x8070, 0x144d, "(null)" }, + { 0x0423, 0x8071, 0x144d, "(null)" }, + { 0x0423, 0x0199, 0x1620, "(null)" }, + { 0x0425, 0x22d0, 0x1019, "(null)" }, + { 0x0425, 0x011d, 0x1025, "(null)" }, + { 0x0425, 0x011e, 0x1025, "(null)" }, + { 0x0425, 0x011f, 0x1025, "(null)" }, + { 0x0425, 0x0121, 0x1025, "(null)" }, + { 0x0425, 0x0125, 0x1025, "(null)" }, + { 0x0425, 0x0126, 0x1025, "(null)" }, + { 0x0425, 0x0127, 0x1025, "(null)" }, + { 0x0425, 0x0129, 0x1025, "(null)" }, + { 0x0425, 0x012b, 0x1025, "(null)" }, + { 0x0425, 0x0136, 0x1025, "(null)" }, + { 0x0425, 0x013d, 0x1025, "(null)" }, + { 0x0425, 0x0142, 0x1025, "(null)" }, + { 0x0425, 0x0143, 0x1025, "(null)" }, + { 0x0425, 0x0145, 0x1025, "(null)" }, + { 0x0425, 0x0146, 0x1025, "(null)" }, + { 0x0425, 0x0275, 0x1028, "(null)" }, + { 0x0425, 0x8875, 0x1033, "(null)" }, + { 0x0425, 0x30cc, 0x103c, "(null)" }, + { 0x0425, 0x1514, 0x1043, "(null)" }, + { 0x0425, 0x1612, 0x1043, "(null)" }, + { 0x0425, 0x1617, 0x1043, "(null)" }, + { 0x0425, 0x826c, 0x1043, "(null)" }, + { 0x0425, 0x9005, 0x104d, "(null)" }, + { 0x0425, 0x8227, 0x1071, "(null)" }, + { 0x0425, 0x8253, 0x1071, "(null)" }, + { 0x0425, 0x0040, 0x1170, "(null)" }, + { 0x0425, 0xc02f, 0x144d, "(null)" }, + { 0x0425, 0xc030, 0x144d, "(null)" }, + { 0x0425, 0xc031, 0x144d, "(null)" }, + { 0x0425, 0xc509, 0x144d, "(null)" }, + { 0x0425, 0xc519, 0x144d, "(null)" }, + { 0x0425, 0x3fad, 0x1462, "(null)" }, + { 0x0425, 0x3fbb, 0x1462, "(null)" }, + { 0x0425, 0x3fe9, 0x1462, "(null)" }, + { 0x0425, 0x401b, 0x1462, "(null)" }, + { 0x0425, 0x4327, 0x1462, "(null)" }, + { 0x0425, 0x2f03, 0x1509, "(null)" }, + { 0x0425, 0x2f04, 0x1509, "(null)" }, + { 0x0425, 0x2f05, 0x1509, "(null)" }, + { 0x0425, 0xc104, 0x1631, "(null)" }, + { 0x0425, 0xc105, 0x1631, "(null)" }, + { 0x0425, 0x1107, 0x1734, "(null)" }, + { 0x0425, 0x110b, 0x1734, "(null)" }, + { 0x0425, 0x110c, 0x1734, "(null)" }, + { 0x0425, 0x2089, 0x17c0, "(null)" }, + { 0x0425, 0x0590, 0x17ff, "(null)" }, + { 0x0425, 0x0090, 0x1854, "(null)" }, + { 0x0425, 0x0094, 0x1854, "(null)" }, + { 0x0425, 0x0098, 0x1854, "(null)" }, + { 0x0425, 0x009f, 0x1854, "(null)" }, + { 0x0425, 0x0106, 0x1854, "(null)" }, + { 0x0425, 0x0109, 0x1854, "(null)" }, + { 0x0425, 0x0115, 0x1854, "(null)" }, + { 0x0425, 0x0116, 0x1854, "(null)" }, + { 0x0425, 0x0117, 0x1854, "(null)" }, + { 0x0425, 0x0118, 0x1854, "(null)" }, + { 0x0425, 0x0119, 0x1854, "(null)" }, + { 0x0425, 0x011a, 0x1854, "(null)" }, + { 0x0425, 0x0120, 0x1854, "(null)" }, + { 0x0426, 0x8897, 0x1033, "(null)" }, + { 0x0426, 0x9005, 0x104d, "(null)" }, + { 0x0426, 0x9016, 0x104d, "(null)" }, + { 0x0426, 0x9017, 0x104d, "(null)" }, + { 0x0426, 0x9018, 0x104d, "(null)" }, + { 0x0426, 0x902d, 0x104d, "(null)" }, + { 0x0426, 0x9030, 0x104d, "(null)" }, + { 0x0426, 0x5584, 0x1991, "(null)" }, + { 0x0427, 0x011d, 0x1025, "(null)" }, + { 0x0427, 0x011e, 0x1025, "(null)" }, + { 0x0427, 0x011f, 0x1025, "(null)" }, + { 0x0427, 0x0121, 0x1025, "(null)" }, + { 0x0427, 0x0125, 0x1025, "(null)" }, + { 0x0427, 0x0126, 0x1025, "(null)" }, + { 0x0427, 0x0127, 0x1025, "(null)" }, + { 0x0427, 0x0129, 0x1025, "(null)" }, + { 0x0427, 0x012b, 0x1025, "(null)" }, + { 0x0427, 0x0136, 0x1025, "(null)" }, + { 0x0427, 0x013d, 0x1025, "(null)" }, + { 0x0427, 0x0142, 0x1025, "(null)" }, + { 0x0427, 0x0143, 0x1025, "(null)" }, + { 0x0427, 0x01f1, 0x1028, "(null)" }, + { 0x0427, 0x01f2, 0x1028, "(null)" }, + { 0x0427, 0x01f3, 0x1028, "(null)" }, + { 0x0427, 0x0209, 0x1028, "(null)" }, + { 0x0427, 0x020a, 0x1028, "(null)" }, + { 0x0427, 0x0227, 0x1028, "(null)" }, + { 0x0427, 0x0228, 0x1028, "(null)" }, + { 0x0427, 0x0229, 0x1028, "(null)" }, + { 0x0427, 0x022e, 0x1028, "(null)" }, + { 0x0427, 0x026f, 0x1028, "(null)" }, + { 0x0427, 0x0273, 0x1028, "(null)" }, + { 0x0427, 0x0286, 0x1028, "(null)" }, + { 0x0427, 0x02b5, 0x1028, "(null)" }, + { 0x0427, 0x30cc, 0x103c, "(null)" }, + { 0x0427, 0x30cd, 0x103c, "(null)" }, + { 0x0427, 0x30cf, 0x103c, "(null)" }, + { 0x0427, 0x30de, 0x103c, "(null)" }, + { 0x0427, 0x17c2, 0x1043, "(null)" }, + { 0x0427, 0x9008, 0x104d, "(null)" }, + { 0x0427, 0x3c03, 0x109f, "(null)" }, + { 0x0427, 0xff10, 0x1179, "(null)" }, + { 0x0427, 0xff13, 0x1179, "(null)" }, + { 0x0427, 0x8068, 0x144d, "(null)" }, + { 0x0427, 0xb04b, 0x144d, "(null)" }, + { 0x0427, 0xc02f, 0x144d, "(null)" }, + { 0x0427, 0xc030, 0x144d, "(null)" }, + { 0x0427, 0xc031, 0x144d, "(null)" }, + { 0x0427, 0xc509, 0x144d, "(null)" }, + { 0x0427, 0xc510, 0x144d, "(null)" }, + { 0x0427, 0xc519, 0x144d, "(null)" }, + { 0x0427, 0x3fe9, 0x1462, "(null)" }, + { 0x0427, 0x401b, 0x1462, "(null)" }, + { 0x0427, 0x4327, 0x1462, "(null)" }, + { 0x0427, 0x0023, 0x14c0, "(null)" }, + { 0x0427, 0x0025, 0x14c0, "(null)" }, + { 0x0427, 0xa00e, 0x14ff, "(null)" }, + { 0x0427, 0x2f04, 0x1509, "(null)" }, + { 0x0427, 0x0763, 0x152d, "(null)" }, + { 0x0427, 0x110b, 0x1734, "(null)" }, + { 0x0427, 0x3862, 0x17aa, "(null)" }, + { 0x0427, 0x39f5, 0x17aa, "(null)" }, + { 0x0427, 0x3d7a, 0x17aa, "(null)" }, + { 0x0427, 0x400c, 0x17aa, "(null)" }, + { 0x0427, 0x208a, 0x17c0, "(null)" }, + { 0x0427, 0x0092, 0x1854, "(null)" }, + { 0x0427, 0x0093, 0x1854, "(null)" }, + { 0x0427, 0x0097, 0x1854, "(null)" }, + { 0x0427, 0x0100, 0x1854, "(null)" }, + { 0x0427, 0x0109, 0x1854, "(null)" }, + { 0x0427, 0x011f, 0x1854, "(null)" }, + { 0x0427, 0x0125, 0x1854, "(null)" }, + { 0x0427, 0x0129, 0x1854, "(null)" }, + { 0x0427, 0x012a, 0x1854, "(null)" }, + { 0x0427, 0x012d, 0x1854, "(null)" }, + { 0x0427, 0x012e, 0x1854, "(null)" }, + { 0x0427, 0xa002, 0x1940, "(null)" }, + { 0x0427, 0x4605, 0x1961, "(null)" }, + { 0x0427, 0x5584, 0x1991, "(null)" }, + { 0x0427, 0x1402, 0x1a46, "(null)" }, + { 0x0428, 0x011d, 0x1025, "(null)" }, + { 0x0428, 0x011e, 0x1025, "(null)" }, + { 0x0428, 0x011f, 0x1025, "(null)" }, + { 0x0428, 0x0121, 0x1025, "(null)" }, + { 0x0428, 0x0125, 0x1025, "(null)" }, + { 0x0428, 0x0126, 0x1025, "(null)" }, + { 0x0428, 0x0127, 0x1025, "(null)" }, + { 0x0428, 0x0129, 0x1025, "(null)" }, + { 0x0428, 0x012b, 0x1025, "(null)" }, + { 0x0428, 0x0136, 0x1025, "(null)" }, + { 0x0428, 0x013d, 0x1025, "(null)" }, + { 0x0428, 0x0142, 0x1025, "(null)" }, + { 0x0428, 0x0143, 0x1025, "(null)" }, + { 0x0428, 0x019b, 0x1028, "(null)" }, + { 0x0428, 0x01f1, 0x1028, "(null)" }, + { 0x0428, 0x01f2, 0x1028, "(null)" }, + { 0x0428, 0x020a, 0x1028, "(null)" }, + { 0x0428, 0x1513, 0x1043, "(null)" }, + { 0x0428, 0x1616, 0x1043, "(null)" }, + { 0x0428, 0x8265, 0x1043, "(null)" }, + { 0x0428, 0xa015, 0x1071, "(null)" }, + { 0x0428, 0x1422, 0x10cf, "(null)" }, + { 0x0428, 0x0001, 0x1179, "(null)" }, + { 0x0428, 0x0002, 0x1179, "(null)" }, + { 0x0428, 0xc510, 0x144d, "(null)" }, + { 0x0428, 0x3fad, 0x1462, "(null)" }, + { 0x0428, 0x3fe9, 0x1462, "(null)" }, + { 0x0428, 0x401b, 0x1462, "(null)" }, + { 0x0428, 0x4327, 0x1462, "(null)" }, + { 0x0428, 0x2f04, 0x1509, "(null)" }, + { 0x0428, 0x0663, 0x1558, "(null)" }, + { 0x0428, 0x0668, 0x1558, "(null)" }, + { 0x0428, 0xc103, 0x1631, "(null)" }, + { 0x0428, 0xc20b, 0x1631, "(null)" }, + { 0x0428, 0x110b, 0x1734, "(null)" }, + { 0x0428, 0x3869, 0x17aa, "(null)" }, + { 0x0428, 0x0593, 0x17ff, "(null)" }, + { 0x0428, 0x0800, 0x17ff, "(null)" }, + { 0x0428, 0x009e, 0x1854, "(null)" }, + { 0x0428, 0x010e, 0x1854, "(null)" }, + { 0x0428, 0x010f, 0x1854, "(null)" }, + { 0x0428, 0x0110, 0x1854, "(null)" }, + { 0x0428, 0x0111, 0x1854, "(null)" }, + { 0x0428, 0x0112, 0x1854, "(null)" }, + { 0x0428, 0x0113, 0x1854, "(null)" }, + { 0x0428, 0x0129, 0x1854, "(null)" }, + { 0x0428, 0x012a, 0x1854, "(null)" }, + { 0x0428, 0x012d, 0x1854, "(null)" }, + { 0x0428, 0x012e, 0x1854, "(null)" }, + { 0x0428, 0x4605, 0x1961, "(null)" }, + { 0x0428, 0x5584, 0x1991, "(null)" }, + { 0x0429, 0x01fe, 0x1028, "(null)" }, + { 0x0429, 0x20d8, 0x17aa, "(null)" }, + { 0x042A, 0x0001, 0x1179, "(null)" }, + { 0x042A, 0x0002, 0x1179, "(null)" }, + { 0x042B, 0x019c, 0x1028, "(null)" }, + { 0x042B, 0x01f9, 0x1028, "(null)" }, + { 0x042B, 0x01fe, 0x1028, "(null)" }, + { 0x042D, 0x019b, 0x1028, "(null)" }, + { 0x042D, 0x01ff, 0x1028, "(null)" }, + { 0x042D, 0x024a, 0x1028, "(null)" }, + { 0x042D, 0x024b, 0x1028, "(null)" }, + { 0x042E, 0x011d, 0x1025, "(null)" }, + { 0x042E, 0x011e, 0x1025, "(null)" }, + { 0x042E, 0x011f, 0x1025, "(null)" }, + { 0x042E, 0x0121, 0x1025, "(null)" }, + { 0x042E, 0x0125, 0x1025, "(null)" }, + { 0x042E, 0x0126, 0x1025, "(null)" }, + { 0x042E, 0x0127, 0x1025, "(null)" }, + { 0x042E, 0x0129, 0x1025, "(null)" }, + { 0x042E, 0x012b, 0x1025, "(null)" }, + { 0x042E, 0x0136, 0x1025, "(null)" }, + { 0x042E, 0x013d, 0x1025, "(null)" }, + { 0x042E, 0x013f, 0x1025, "(null)" }, + { 0x042E, 0x0142, 0x1025, "(null)" }, + { 0x042E, 0x0143, 0x1025, "(null)" }, + { 0x042E, 0x0145, 0x1025, "(null)" }, + { 0x042E, 0x0146, 0x1025, "(null)" }, + { 0x042E, 0x17c2, 0x1043, "(null)" }, + { 0x042E, 0x3871, 0x17aa, "(null)" }, + { 0x042E, 0x3872, 0x17aa, "(null)" }, + { 0x042E, 0x3875, 0x17aa, "(null)" }, + { 0x042E, 0x208a, 0x17c0, "(null)" }, + { 0x042F, 0x0492, 0x10de, "(null)" }, + { 0x05E2, 0x0617, 0x10de, "(null)" }, + { 0x05E3, 0x0000, 0x106b, "(null)" }, + { 0x05E7, 0x0595, 0x10de, "(null)" }, + { 0x05E7, 0x066a, 0x10de, "(null)" }, + { 0x05E7, 0x068f, 0x10de, "(null)" }, + { 0x05E7, 0x0697, 0x10de, "(null)" }, + { 0x05E7, 0x0714, 0x10de, "(null)" }, + { 0x05E7, 0x0743, 0x10de, "(null)" }, + { 0x05FE, 0x0000, 0x106b, "(null)" }, + { 0x0601, 0x9008, 0x1043, "(null)" }, + { 0x0601, 0x9017, 0x1b0a, "(null)" }, + { 0x0603, 0x9044, 0x1b0a, "(null)" }, + { 0x0605, 0x0612, 0x10de, "(null)" }, + { 0x0605, 0x062d, 0x10de, "(null)" }, + { 0x0607, 0x0736, 0x10de, "(null)" }, + { 0x0608, 0x019c, 0x1028, "(null)" }, + { 0x0608, 0xff01, 0x1179, "(null)" }, + { 0x0608, 0x0481, 0x1558, "(null)" }, + { 0x0608, 0x207a, 0x161f, "(null)" }, + { 0x0609, 0x019b, 0x1028, "(null)" }, + { 0x0609, 0x30d4, 0x103c, "(null)" }, + { 0x0609, 0x00a7, 0x106b, "(null)" }, + { 0x0609, 0x0690, 0x107b, "(null)" }, + { 0x0609, 0x0121, 0x1170, "(null)" }, + { 0x0609, 0x0770, 0x152d, "(null)" }, + { 0x060A, 0x0161, 0x1025, "(null)" }, + { 0x060A, 0x0235, 0x1025, "(null)" }, + { 0x060A, 0x02a1, 0x1028, "(null)" }, + { 0x060A, 0x2027, 0x1043, "(null)" }, + { 0x060A, 0x0481, 0x1558, "(null)" }, + { 0x060A, 0x0577, 0x1558, "(null)" }, + { 0x060A, 0x0860, 0x1558, "(null)" }, + { 0x060A, 0x0902, 0x1558, "(null)" }, + { 0x060A, 0x0903, 0x1558, "(null)" }, + { 0x060A, 0x7200, 0x1558, "(null)" }, + { 0x060A, 0x8100, 0x1558, "(null)" }, + { 0x060A, 0x8687, 0x1558, "(null)" }, + { 0x060A, 0x8689, 0x1558, "(null)" }, + { 0x060A, 0x9800, 0x1558, "(null)" }, + { 0x060B, 0x019c, 0x1028, "(null)" }, + { 0x060B, 0x1222, 0x1043, "(null)" }, + { 0x060B, 0x0770, 0x152d, "(null)" }, + { 0x060B, 0x0481, 0x1558, "(null)" }, + { 0x060B, 0x0573, 0x1558, "(null)" }, + { 0x060B, 0x0860, 0x1558, "(null)" }, + { 0x060B, 0x0902, 0x1558, "(null)" }, + { 0x060B, 0x207a, 0x161f, "(null)" }, + { 0x060C, 0x0145, 0x1025, "(null)" }, + { 0x060C, 0x0146, 0x1025, "(null)" }, + { 0x060C, 0x019c, 0x1028, "(null)" }, + { 0x060C, 0x1619, 0x1043, "(null)" }, + { 0x060C, 0x1620, 0x1043, "(null)" }, + { 0x060C, 0x16d6, 0x1043, "(null)" }, + { 0x060C, 0x0121, 0x1170, "(null)" }, + { 0x060C, 0x0770, 0x152d, "(null)" }, + { 0x060C, 0x0481, 0x1558, "(null)" }, + { 0x060C, 0x0573, 0x1558, "(null)" }, + { 0x060C, 0x0860, 0x1558, "(null)" }, + { 0x060C, 0x0902, 0x1558, "(null)" }, + { 0x060F, 0x02a1, 0x1028, "(null)" }, + { 0x060F, 0x043a, 0x1028, "(null)" }, + { 0x060F, 0x070a, 0x1462, "(null)" }, + { 0x060F, 0x1062, 0x1462, "(null)" }, + { 0x060F, 0x0053, 0x14c0, "(null)" }, + { 0x060F, 0x0511, 0x1558, "(null)" }, + { 0x060F, 0x0512, 0x1558, "(null)" }, + { 0x060F, 0x0903, 0x1558, "(null)" }, + { 0x060F, 0x7200, 0x1558, "(null)" }, + { 0x060F, 0x8100, 0x1558, "(null)" }, + { 0x060F, 0x8687, 0x1558, "(null)" }, + { 0x060F, 0x8689, 0x1558, "(null)" }, + { 0x0611, 0x053c, 0x10de, "(null)" }, + { 0x0615, 0x1543, 0x1462, "(null)" }, + { 0x0615, 0x2103, 0x19da, "(null)" }, + { 0x0615, 0x9038, 0x1b0a, "(null)" }, + { 0x0617, 0x019c, 0x1028, "(null)" }, + { 0x0617, 0x1982, 0x1043, "(null)" }, + { 0x0617, 0x2003, 0x1043, "(null)" }, + { 0x0617, 0xff01, 0x1179, "(null)" }, + { 0x0617, 0x0770, 0x152d, "(null)" }, + { 0x0617, 0x0481, 0x1558, "(null)" }, + { 0x0617, 0x0902, 0x1558, "(null)" }, + { 0x0617, 0x207a, 0x161f, "(null)" }, + { 0x0618, 0x028e, 0x1025, "(null)" }, + { 0x0618, 0x02a1, 0x1028, "(null)" }, + { 0x0618, 0x02a2, 0x1028, "(null)" }, + { 0x0618, 0x1a52, 0x1043, "(null)" }, + { 0x0618, 0x2028, 0x1043, "(null)" }, + { 0x0618, 0x202b, 0x1043, "(null)" }, + { 0x0618, 0x2033, 0x1043, "(null)" }, + { 0x0618, 0x0481, 0x1558, "(null)" }, + { 0x0618, 0x0577, 0x1558, "(null)" }, + { 0x0618, 0x0860, 0x1558, "(null)" }, + { 0x0618, 0x0902, 0x1558, "(null)" }, + { 0x0618, 0x0903, 0x1558, "(null)" }, + { 0x0618, 0x8100, 0x1558, "(null)" }, + { 0x0618, 0x8687, 0x1558, "(null)" }, + { 0x0618, 0x8689, 0x1558, "(null)" }, + { 0x0618, 0x9800, 0x1558, "(null)" }, + { 0x061C, 0x019b, 0x1028, "(null)" }, + { 0x061C, 0x16d9, 0x1043, "(null)" }, + { 0x061C, 0x0518, 0x10de, "(null)" }, + { 0x061C, 0x0770, 0x152d, "(null)" }, + { 0x061C, 0x211a, 0x17aa, "(null)" }, + { 0x061D, 0x02ef, 0x1028, "(null)" }, + { 0x061D, 0x1520, 0x103c, "(null)" }, + { 0x061D, 0x0903, 0x1558, "(null)" }, + { 0x061D, 0x5102, 0x1558, "(null)" }, + { 0x061D, 0x7100, 0x1558, "(null)" }, + { 0x061D, 0x7200, 0x1558, "(null)" }, + { 0x061D, 0x8100, 0x1558, "(null)" }, + { 0x061D, 0x8687, 0x1558, "(null)" }, + { 0x061D, 0x8689, 0x1558, "(null)" }, + { 0x061D, 0x214f, 0x17aa, "(null)" }, + { 0x061D, 0x219f, 0x17aa, "(null)" }, + { 0x061E, 0x019c, 0x1028, "(null)" }, + { 0x061E, 0x0251, 0x1028, "(null)" }, + { 0x061E, 0x02ef, 0x1028, "(null)" }, + { 0x061E, 0x0539, 0x103c, "(null)" }, + { 0x061E, 0x0481, 0x1558, "(null)" }, + { 0x061E, 0x0573, 0x1558, "(null)" }, + { 0x061E, 0x0860, 0x1558, "(null)" }, + { 0x061E, 0x0902, 0x1558, "(null)" }, + { 0x061E, 0x2118, 0x17aa, "(null)" }, + { 0x061E, 0x2125, 0x17aa, "(null)" }, + { 0x061F, 0x02ef, 0x1028, "(null)" }, + { 0x061F, 0x1520, 0x103c, "(null)" }, + { 0x061F, 0x0903, 0x1558, "(null)" }, + { 0x061F, 0x7200, 0x1558, "(null)" }, + { 0x061F, 0x8100, 0x1558, "(null)" }, + { 0x061F, 0x8687, 0x1558, "(null)" }, + { 0x061F, 0x8689, 0x1558, "(null)" }, + { 0x061F, 0x214e, 0x17aa, "(null)" }, + { 0x061F, 0x214f, 0x17aa, "(null)" }, + { 0x061F, 0x219f, 0x17aa, "(null)" }, + { 0x061F, 0x21a0, 0x17aa, "(null)" }, + { 0x0622, 0x807a, 0x144d, "(null)" }, + { 0x0622, 0x807b, 0x144d, "(null)" }, + { 0x0622, 0x807c, 0x144d, "(null)" }, + { 0x0622, 0x807d, 0x144d, "(null)" }, + { 0x0622, 0x1275, 0x1462, "(null)" }, + { 0x0622, 0x1043, 0x19da, "(null)" }, + { 0x0623, 0x9015, 0x1b0a, "(null)" }, + { 0x0626, 0x9015, 0x1b0a, "(null)" }, + { 0x0628, 0x19e2, 0x1043, "(null)" }, + { 0x0628, 0x2004, 0x1043, "(null)" }, + { 0x0628, 0x2012, 0x1043, "(null)" }, + { 0x0628, 0x0696, 0x107b, "(null)" }, + { 0x0628, 0xff00, 0x1179, "(null)" }, + { 0x0628, 0x0481, 0x1558, "(null)" }, + { 0x0628, 0x0573, 0x1558, "(null)" }, + { 0x0628, 0x0860, 0x1558, "(null)" }, + { 0x0628, 0x0902, 0x1558, "(null)" }, + { 0x0628, 0x0188, 0x1631, "(null)" }, + { 0x062A, 0x19d2, 0x1043, "(null)" }, + { 0x062A, 0x2002, 0x1043, "(null)" }, + { 0x062A, 0x0696, 0x107b, "(null)" }, + { 0x062A, 0x0001, 0x1179, "(null)" }, + { 0x062A, 0xff00, 0x1179, "(null)" }, + { 0x062A, 0xff01, 0x1179, "(null)" }, + { 0x062A, 0x0481, 0x1558, "(null)" }, + { 0x062B, 0x1992, 0x1043, "(null)" }, + { 0x062B, 0x2014, 0x1043, "(null)" }, + { 0x062B, 0x2016, 0x1043, "(null)" }, + { 0x062B, 0x2019, 0x1043, "(null)" }, + { 0x062B, 0x2020, 0x1043, "(null)" }, + { 0x062B, 0x0696, 0x107b, "(null)" }, + { 0x062B, 0x7220, 0x1462, "(null)" }, + { 0x062B, 0x0481, 0x1558, "(null)" }, + { 0x062B, 0x0573, 0x1558, "(null)" }, + { 0x062B, 0x0860, 0x1558, "(null)" }, + { 0x062B, 0x0902, 0x1558, "(null)" }, + { 0x062C, 0x22d7, 0x1019, "(null)" }, + { 0x062C, 0x2015, 0x1043, "(null)" }, + { 0x062C, 0x2017, 0x1043, "(null)" }, + { 0x062C, 0x0696, 0x107b, "(null)" }, + { 0x062C, 0xff01, 0x1179, "(null)" }, + { 0x062C, 0x0481, 0x1558, "(null)" }, + { 0x062C, 0x0573, 0x1558, "(null)" }, + { 0x062C, 0x0860, 0x1558, "(null)" }, + { 0x062C, 0x0902, 0x1558, "(null)" }, + { 0x062C, 0x0188, 0x1631, "(null)" }, + { 0x062C, 0x1138, 0x1734, "(null)" }, + { 0x062E, 0x0605, 0x106b, "(null)" }, + { 0x0631, 0x1014, 0x1462, "(null)" }, + { 0x0631, 0x1024, 0x1462, "(null)" }, + { 0x0631, 0x0481, 0x1558, "(null)" }, + { 0x0631, 0x0577, 0x1558, "(null)" }, + { 0x0631, 0x0860, 0x1558, "(null)" }, + { 0x0631, 0x0902, 0x1558, "(null)" }, + { 0x0631, 0x0903, 0x1558, "(null)" }, + { 0x0631, 0x9800, 0x1558, "(null)" }, + { 0x0631, 0x2150, 0x17aa, "(null)" }, + { 0x063A, 0x019b, 0x1028, "(null)" }, + { 0x063A, 0x0251, 0x1028, "(null)" }, + { 0x063A, 0x02ef, 0x1028, "(null)" }, + { 0x063A, 0x30ec, 0x103c, "(null)" }, + { 0x063A, 0x0481, 0x1558, "(null)" }, + { 0x063A, 0x0860, 0x1558, "(null)" }, + { 0x063A, 0x0902, 0x1558, "(null)" }, + { 0x063A, 0x2119, 0x17aa, "(null)" }, + { 0x063A, 0x2124, 0x17aa, "(null)" }, + { 0x0640, 0x061b, 0x106b, "(null)" }, + { 0x0640, 0x3796, 0x1642, "(null)" }, + { 0x0640, 0x7045, 0x19da, "(null)" }, + { 0x0644, 0x8077, 0x144d, "(null)" }, + { 0x0644, 0x8078, 0x144d, "(null)" }, + { 0x0644, 0x8079, 0x144d, "(null)" }, + { 0x0644, 0x807e, 0x144d, "(null)" }, + { 0x0644, 0x807f, 0x144d, "(null)" }, + { 0x0644, 0x8080, 0x144d, "(null)" }, + { 0x0644, 0x8081, 0x144d, "(null)" }, + { 0x0644, 0x1330, 0x1462, "(null)" }, + { 0x0644, 0x2045, 0x19da, "(null)" }, + { 0x0645, 0x900c, 0x1b0a, "(null)" }, + { 0x0646, 0x88fc, 0x1033, "(null)" }, + { 0x0646, 0x1330, 0x1462, "(null)" }, + { 0x0646, 0xa330, 0x1462, "(null)" }, + { 0x0646, 0x900c, 0x1b0a, "(null)" }, + { 0x0647, 0x00a9, 0x106b, "(null)" }, + { 0x0647, 0x00b3, 0x106b, "(null)" }, + { 0x0647, 0x00bc, 0x106b, "(null)" }, + { 0x0648, 0x011e, 0x1025, "(null)" }, + { 0x0648, 0x0121, 0x1025, "(null)" }, + { 0x0648, 0x0126, 0x1025, "(null)" }, + { 0x0648, 0x013b, 0x1025, "(null)" }, + { 0x0648, 0x013c, 0x1025, "(null)" }, + { 0x0648, 0x013d, 0x1025, "(null)" }, + { 0x0648, 0x013e, 0x1025, "(null)" }, + { 0x0648, 0x013f, 0x1025, "(null)" }, + { 0x0648, 0x0142, 0x1025, "(null)" }, + { 0x0648, 0x0143, 0x1025, "(null)" }, + { 0x0648, 0x0145, 0x1025, "(null)" }, + { 0x0648, 0x0146, 0x1025, "(null)" }, + { 0x0648, 0x0149, 0x1025, "(null)" }, + { 0x0648, 0x014d, 0x1025, "(null)" }, + { 0x0648, 0x015e, 0x1025, "(null)" }, + { 0x0648, 0x0175, 0x1025, "(null)" }, + { 0x0648, 0x02c0, 0x1028, "(null)" }, + { 0x0648, 0x88d4, 0x1033, "(null)" }, + { 0x0648, 0x88d8, 0x1033, "(null)" }, + { 0x0648, 0x1892, 0x1043, "(null)" }, + { 0x0648, 0x2010, 0x1043, "(null)" }, + { 0x0648, 0x2011, 0x1043, "(null)" }, + { 0x0648, 0x900f, 0x1043, "(null)" }, + { 0x0648, 0x9070, 0x1071, "(null)" }, + { 0x0648, 0x0900, 0x107b, "(null)" }, + { 0x0648, 0x0001, 0x1179, "(null)" }, + { 0x0648, 0xff00, 0x1179, "(null)" }, + { 0x0648, 0xb051, 0x144d, "(null)" }, + { 0x0648, 0xb058, 0x144d, "(null)" }, + { 0x0648, 0xc03f, 0x144d, "(null)" }, + { 0x0648, 0xc044, 0x144d, "(null)" }, + { 0x0648, 0xc520, 0x144d, "(null)" }, + { 0x0648, 0x0773, 0x152d, "(null)" }, + { 0x0648, 0x0776, 0x152d, "(null)" }, + { 0x0648, 0x0481, 0x1558, "(null)" }, + { 0x0648, 0x0860, 0x1558, "(null)" }, + { 0x0648, 0x3877, 0x17aa, "(null)" }, + { 0x0648, 0x3879, 0x17aa, "(null)" }, + { 0x0648, 0x208b, 0x17c0, "(null)" }, + { 0x0648, 0x208c, 0x17c0, "(null)" }, + { 0x0648, 0x1773, 0x1854, "(null)" }, + { 0x0648, 0x1776, 0x1854, "(null)" }, + { 0x0648, 0x5584, 0x1991, "(null)" }, + { 0x0648, 0x001f, 0x1b0a, "(null)" }, + { 0x0649, 0x22d5, 0x1019, "(null)" }, + { 0x0649, 0x011e, 0x1025, "(null)" }, + { 0x0649, 0x0121, 0x1025, "(null)" }, + { 0x0649, 0x0126, 0x1025, "(null)" }, + { 0x0649, 0x013b, 0x1025, "(null)" }, + { 0x0649, 0x013c, 0x1025, "(null)" }, + { 0x0649, 0x013d, 0x1025, "(null)" }, + { 0x0649, 0x013e, 0x1025, "(null)" }, + { 0x0649, 0x013f, 0x1025, "(null)" }, + { 0x0649, 0x0142, 0x1025, "(null)" }, + { 0x0649, 0x0143, 0x1025, "(null)" }, + { 0x0649, 0x0145, 0x1025, "(null)" }, + { 0x0649, 0x0146, 0x1025, "(null)" }, + { 0x0649, 0x0149, 0x1025, "(null)" }, + { 0x0649, 0x014d, 0x1025, "(null)" }, + { 0x0649, 0x015e, 0x1025, "(null)" }, + { 0x0649, 0x0175, 0x1025, "(null)" }, + { 0x0649, 0x017e, 0x1025, "(null)" }, + { 0x0649, 0x0250, 0x1028, "(null)" }, + { 0x0649, 0x0272, 0x1028, "(null)" }, + { 0x0649, 0x30f4, 0x103c, "(null)" }, + { 0x0649, 0x3603, 0x103c, "(null)" }, + { 0x0649, 0x3610, 0x103c, "(null)" }, + { 0x0649, 0x361b, 0x103c, "(null)" }, + { 0x0649, 0x3621, 0x103c, "(null)" }, + { 0x0649, 0x19f2, 0x1043, "(null)" }, + { 0x0649, 0x2001, 0x1043, "(null)" }, + { 0x0649, 0x2005, 0x1043, "(null)" }, + { 0x0649, 0x2006, 0x1043, "(null)" }, + { 0x0649, 0x202d, 0x1043, "(null)" }, + { 0x0649, 0x9013, 0x1043, "(null)" }, + { 0x0649, 0x9040, 0x104d, "(null)" }, + { 0x0649, 0x0800, 0x107b, "(null)" }, + { 0x0649, 0x0900, 0x107b, "(null)" }, + { 0x0649, 0x3c06, 0x109f, "(null)" }, + { 0x0649, 0x0001, 0x1179, "(null)" }, + { 0x0649, 0xff00, 0x1179, "(null)" }, + { 0x0649, 0xff01, 0x1179, "(null)" }, + { 0x0649, 0xc03f, 0x144d, "(null)" }, + { 0x0649, 0xc044, 0x144d, "(null)" }, + { 0x0649, 0xc520, 0x144d, "(null)" }, + { 0x0649, 0x4327, 0x1462, "(null)" }, + { 0x0649, 0x4350, 0x1462, "(null)" }, + { 0x0649, 0x63f2, 0x1462, "(null)" }, + { 0x0649, 0x6510, 0x1462, "(null)" }, + { 0x0649, 0x6520, 0x1462, "(null)" }, + { 0x0649, 0x6710, 0x1462, "(null)" }, + { 0x0649, 0x719a, 0x1462, "(null)" }, + { 0x0649, 0x719b, 0x1462, "(null)" }, + { 0x0649, 0x7220, 0x1462, "(null)" }, + { 0x0649, 0x0031, 0x14c0, "(null)" }, + { 0x0649, 0x3008, 0x1509, "(null)" }, + { 0x0649, 0x0481, 0x1558, "(null)" }, + { 0x0649, 0x0573, 0x1558, "(null)" }, + { 0x0649, 0x0860, 0x1558, "(null)" }, + { 0x0649, 0x1135, 0x1734, "(null)" }, + { 0x0649, 0x115e, 0x1734, "(null)" }, + { 0x0649, 0x0805, 0x17ff, "(null)" }, + { 0x0649, 0x012f, 0x1854, "(null)" }, + { 0x0649, 0x0130, 0x1854, "(null)" }, + { 0x0649, 0x0131, 0x1854, "(null)" }, + { 0x0649, 0x0132, 0x1854, "(null)" }, + { 0x0649, 0x013c, 0x1854, "(null)" }, + { 0x0649, 0x1402, 0x1a46, "(null)" }, + { 0x0649, 0x1405, 0x1a46, "(null)" }, + { 0x0649, 0x001f, 0x1b0a, "(null)" }, + { 0x0649, 0x9024, 0x1b0a, "(null)" }, + { 0x064A, 0x0145, 0x1025, "(null)" }, + { 0x064A, 0x0146, 0x1025, "(null)" }, + { 0x064A, 0x19a2, 0x1043, "(null)" }, + { 0x064A, 0x1a02, 0x1043, "(null)" }, + { 0x064A, 0x2009, 0x1043, "(null)" }, + { 0x064B, 0x3874, 0x17aa, "(null)" }, + { 0x064B, 0x3a27, 0x17aa, "(null)" }, + { 0x064C, 0x1912, 0x1043, "(null)" }, + { 0x064C, 0x2013, 0x1043, "(null)" }, + { 0x064C, 0x2018, 0x1043, "(null)" }, + { 0x064C, 0x2022, 0x1043, "(null)" }, + { 0x064C, 0x1405, 0x1a46, "(null)" }, + { 0x0651, 0xc520, 0x144d, "(null)" }, + { 0x0651, 0x100c, 0x1462, "(null)" }, + { 0x0651, 0x3877, 0x17aa, "(null)" }, + { 0x0651, 0x387b, 0x17aa, "(null)" }, + { 0x0652, 0x0201, 0x1025, "(null)" }, + { 0x0652, 0x0208, 0x1025, "(null)" }, + { 0x0652, 0x020e, 0x1025, "(null)" }, + { 0x0652, 0x021e, 0x1025, "(null)" }, + { 0x0652, 0x0259, 0x1025, "(null)" }, + { 0x0652, 0x0261, 0x1025, "(null)" }, + { 0x0652, 0x0299, 0x1025, "(null)" }, + { 0x0652, 0x3610, 0x103c, "(null)" }, + { 0x0652, 0x361b, 0x103c, "(null)" }, + { 0x0652, 0x1a12, 0x1043, "(null)" }, + { 0x0652, 0x1a42, 0x1043, "(null)" }, + { 0x0652, 0x2021, 0x1043, "(null)" }, + { 0x0652, 0x2023, 0x1043, "(null)" }, + { 0x0652, 0x2024, 0x1043, "(null)" }, + { 0x0652, 0x2025, 0x1043, "(null)" }, + { 0x0652, 0x202c, 0x1043, "(null)" }, + { 0x0652, 0xc03f, 0x144d, "(null)" }, + { 0x0652, 0xc520, 0x144d, "(null)" }, + { 0x0652, 0x100b, 0x1462, "(null)" }, + { 0x0652, 0x100c, 0x1462, "(null)" }, + { 0x0652, 0x1023, 0x1462, "(null)" }, + { 0x0652, 0x1650, 0x1462, "(null)" }, + { 0x0652, 0x4350, 0x1462, "(null)" }, + { 0x0652, 0x4570, 0x1462, "(null)" }, + { 0x0652, 0x003d, 0x14c0, "(null)" }, + { 0x0652, 0x300c, 0x1509, "(null)" }, + { 0x0652, 0x0787, 0x152d, "(null)" }, + { 0x0652, 0x0788, 0x152d, "(null)" }, + { 0x0652, 0x0789, 0x152d, "(null)" }, + { 0x0652, 0x0823, 0x152d, "(null)" }, + { 0x0652, 0x0850, 0x152d, "(null)" }, + { 0x0652, 0x1172, 0x1734, "(null)" }, + { 0x0652, 0x387a, 0x17aa, "(null)" }, + { 0x0652, 0x3882, 0x17aa, "(null)" }, + { 0x0652, 0x0132, 0x1854, "(null)" }, + { 0x0652, 0x0140, 0x1854, "(null)" }, + { 0x0652, 0x0142, 0x1854, "(null)" }, + { 0x0652, 0x1784, 0x1854, "(null)" }, + { 0x0652, 0x1785, 0x1854, "(null)" }, + { 0x0652, 0x1786, 0x1854, "(null)" }, + { 0x0652, 0x1787, 0x1854, "(null)" }, + { 0x0652, 0x1789, 0x1854, "(null)" }, + { 0x0652, 0x1405, 0x1a46, "(null)" }, + { 0x0653, 0x1402, 0x1043, "(null)" }, + { 0x0653, 0x1a42, 0x1043, "(null)" }, + { 0x0653, 0x202d, 0x1043, "(null)" }, + { 0x0654, 0x14a2, 0x1043, "(null)" }, + { 0x0654, 0x14d2, 0x1043, "(null)" }, + { 0x0654, 0x1ad2, 0x1043, "(null)" }, + { 0x0654, 0x2035, 0x1043, "(null)" }, + { 0x0654, 0x2039, 0x1043, "(null)" }, + { 0x0654, 0x203b, 0x1043, "(null)" }, + { 0x0654, 0x10ca, 0x17c0, "(null)" }, + { 0x0655, 0x0633, 0x106b, "(null)" }, + { 0x0656, 0x0693, 0x106b, "(null)" }, + { 0x065A, 0x0250, 0x1028, "(null)" }, + { 0x065C, 0x0250, 0x1028, "(null)" }, + { 0x065C, 0x30e7, 0x103c, "(null)" }, + { 0x065C, 0x058b, 0x10de, "(null)" }, + { 0x065C, 0x1147, 0x1734, "(null)" }, + { 0x06C0, 0x075f, 0x10de, "(null)" }, + { 0x06C4, 0xc000, 0x1458, "(null)" }, + { 0x06CA, 0x048f, 0x1028, "(null)" }, + { 0x06CA, 0x0490, 0x1028, "(null)" }, + { 0x06CA, 0x0053, 0x14c0, "(null)" }, + { 0x06CA, 0x0903, 0x1558, "(null)" }, + { 0x06CA, 0x7200, 0x1558, "(null)" }, + { 0x06CA, 0x8100, 0x1558, "(null)" }, + { 0x06CA, 0x8687, 0x1558, "(null)" }, + { 0x06CA, 0x8689, 0x1558, "(null)" }, + { 0x06CD, 0x079f, 0x10de, "(null)" }, + { 0x06CD, 0xc000, 0x1458, "(null)" }, + { 0x06D1, 0x0771, 0x103c, "(null)" }, + { 0x06D1, 0x0771, 0x10de, "(null)" }, + { 0x06D1, 0x0772, 0x10de, "(null)" }, + { 0x06D2, 0x0774, 0x10de, "(null)" }, + { 0x06D2, 0x0830, 0x10de, "(null)" }, + { 0x06D2, 0x0842, 0x10de, "(null)" }, + { 0x06D2, 0x084d, 0x10de, "(null)" }, + { 0x06D2, 0x088f, 0x10de, "(null)" }, + { 0x06D2, 0x0908, 0x10de, "(null)" }, + { 0x06D8, 0x076f, 0x103c, "(null)" }, + { 0x06D8, 0x076f, 0x10de, "(null)" }, + { 0x06D9, 0x0770, 0x103c, "(null)" }, + { 0x06D9, 0x0770, 0x10de, "(null)" }, + { 0x06DA, 0x081a, 0x1028, "(null)" }, + { 0x06DA, 0x1520, 0x103c, "(null)" }, + { 0x06DD, 0x0780, 0x103c, "(null)" }, + { 0x06DD, 0x0000, 0x106b, "(null)" }, + { 0x06DD, 0x0780, 0x10de, "(null)" }, + { 0x06DE, 0x0773, 0x10de, "(null)" }, + { 0x06DE, 0x082f, 0x10de, "(null)" }, + { 0x06DE, 0x0840, 0x10de, "(null)" }, + { 0x06DE, 0x0842, 0x10de, "(null)" }, + { 0x06DE, 0x0846, 0x10de, "(null)" }, + { 0x06DE, 0x0866, 0x10de, "(null)" }, + { 0x06DE, 0x0907, 0x10de, "(null)" }, + { 0x06DE, 0x091e, 0x10de, "(null)" }, + { 0x06DF, 0x084d, 0x10de, "(null)" }, + { 0x06DF, 0x087f, 0x10de, "(null)" }, + { 0x06E0, 0x3483, 0x1458, "(null)" }, + { 0x06E0, 0x3484, 0x1458, "(null)" }, + { 0x06E0, 0x9004, 0x1b0a, "(null)" }, + { 0x06E0, 0x9008, 0x1b0a, "(null)" }, + { 0x06E1, 0x8073, 0x144d, "(null)" }, + { 0x06E1, 0x8074, 0x144d, "(null)" }, + { 0x06E1, 0x8075, 0x144d, "(null)" }, + { 0x06E1, 0x8076, 0x144d, "(null)" }, + { 0x06E1, 0x3483, 0x1458, "(null)" }, + { 0x06E1, 0x3484, 0x1458, "(null)" }, + { 0x06E1, 0x116b, 0x1462, "(null)" }, + { 0x06E1, 0x0203, 0x1620, "(null)" }, + { 0x06E1, 0x3776, 0x1642, "(null)" }, + { 0x06E2, 0x050f, 0x1043, "(null)" }, + { 0x06E2, 0x0510, 0x1043, "(null)" }, + { 0x06E2, 0x3467, 0x1458, "(null)" }, + { 0x06E2, 0x3478, 0x1458, "(null)" }, + { 0x06E2, 0x3776, 0x1642, "(null)" }, + { 0x06E5, 0x9025, 0x104d, "(null)" }, + { 0x06E6, 0x3483, 0x1458, "(null)" }, + { 0x06E6, 0x3484, 0x1458, "(null)" }, + { 0x06E6, 0x3776, 0x1642, "(null)" }, + { 0x06E6, 0x9004, 0x1b0a, "(null)" }, + { 0x06E6, 0x9008, 0x1b0a, "(null)" }, + { 0x06E6, 0x903e, 0x1b0a, "(null)" }, + { 0x06E8, 0x0262, 0x1028, "(null)" }, + { 0x06E8, 0x0271, 0x1028, "(null)" }, + { 0x06E8, 0x0272, 0x1028, "(null)" }, + { 0x06E8, 0x30f4, 0x103c, "(null)" }, + { 0x06E8, 0x30f7, 0x103c, "(null)" }, + { 0x06E8, 0x3603, 0x103c, "(null)" }, + { 0x06E8, 0x360b, 0x103c, "(null)" }, + { 0x06E8, 0x3621, 0x103c, "(null)" }, + { 0x06E8, 0x3629, 0x103c, "(null)" }, + { 0x06E8, 0x2008, 0x1043, "(null)" }, + { 0x06E8, 0x0900, 0x107b, "(null)" }, + { 0x06E8, 0x0001, 0x1179, "(null)" }, + { 0x06E8, 0xff00, 0x1179, "(null)" }, + { 0x06E8, 0xc041, 0x144d, "(null)" }, + { 0x06E8, 0xc042, 0x144d, "(null)" }, + { 0x06E8, 0xc048, 0x144d, "(null)" }, + { 0x06E8, 0xc04a, 0x144d, "(null)" }, + { 0x06E8, 0xc521, 0x144d, "(null)" }, + { 0x06E8, 0xc524, 0x144d, "(null)" }, + { 0x06E8, 0x0772, 0x152d, "(null)" }, + { 0x06E8, 0x0773, 0x152d, "(null)" }, + { 0x06E8, 0x0774, 0x152d, "(null)" }, + { 0x06E8, 0x0775, 0x152d, "(null)" }, + { 0x06E8, 0x1146, 0x1734, "(null)" }, + { 0x06E8, 0x1772, 0x1854, "(null)" }, + { 0x06E8, 0x1773, 0x1854, "(null)" }, + { 0x06E8, 0x1774, 0x1854, "(null)" }, + { 0x06E8, 0x1775, 0x1854, "(null)" }, + { 0x06E8, 0x4605, 0x1961, "(null)" }, + { 0x06E8, 0x5584, 0x1991, "(null)" }, + { 0x06E8, 0x000e, 0x1b0a, "(null)" }, + { 0x06E9, 0x22d5, 0x1019, "(null)" }, + { 0x06E9, 0x011e, 0x1025, "(null)" }, + { 0x06E9, 0x0121, 0x1025, "(null)" }, + { 0x06E9, 0x0126, 0x1025, "(null)" }, + { 0x06E9, 0x0128, 0x1025, "(null)" }, + { 0x06E9, 0x013b, 0x1025, "(null)" }, + { 0x06E9, 0x013c, 0x1025, "(null)" }, + { 0x06E9, 0x013d, 0x1025, "(null)" }, + { 0x06E9, 0x013e, 0x1025, "(null)" }, + { 0x06E9, 0x013f, 0x1025, "(null)" }, + { 0x06E9, 0x0142, 0x1025, "(null)" }, + { 0x06E9, 0x0143, 0x1025, "(null)" }, + { 0x06E9, 0x0145, 0x1025, "(null)" }, + { 0x06E9, 0x0146, 0x1025, "(null)" }, + { 0x06E9, 0x0149, 0x1025, "(null)" }, + { 0x06E9, 0x014d, 0x1025, "(null)" }, + { 0x06E9, 0x015e, 0x1025, "(null)" }, + { 0x06E9, 0x0167, 0x1025, "(null)" }, + { 0x06E9, 0x0175, 0x1025, "(null)" }, + { 0x06E9, 0x017e, 0x1025, "(null)" }, + { 0x06E9, 0x024b, 0x1025, "(null)" }, + { 0x06E9, 0x02bb, 0x1028, "(null)" }, + { 0x06E9, 0x02bc, 0x1028, "(null)" }, + { 0x06E9, 0x02bd, 0x1028, "(null)" }, + { 0x06E9, 0x02d9, 0x1028, "(null)" }, + { 0x06E9, 0x02de, 0x1028, "(null)" }, + { 0x06E9, 0x1505, 0x103c, "(null)" }, + { 0x06E9, 0x0510, 0x1043, "(null)" }, + { 0x06E9, 0x1972, 0x1043, "(null)" }, + { 0x06E9, 0x19b2, 0x1043, "(null)" }, + { 0x06E9, 0x19c2, 0x1043, "(null)" }, + { 0x06E9, 0x1a62, 0x1043, "(null)" }, + { 0x06E9, 0x2007, 0x1043, "(null)" }, + { 0x06E9, 0x8329, 0x1043, "(null)" }, + { 0x06E9, 0x903f, 0x104d, "(null)" }, + { 0x06E9, 0x9040, 0x104d, "(null)" }, + { 0x06E9, 0x0900, 0x107b, "(null)" }, + { 0x06E9, 0x3c05, 0x109f, "(null)" }, + { 0x06E9, 0x14a7, 0x10cf, "(null)" }, + { 0x06E9, 0x14cf, 0x10cf, "(null)" }, + { 0x06E9, 0xff00, 0x1179, "(null)" }, + { 0x06E9, 0xb051, 0x144d, "(null)" }, + { 0x06E9, 0xc03f, 0x144d, "(null)" }, + { 0x06E9, 0xc041, 0x144d, "(null)" }, + { 0x06E9, 0xc042, 0x144d, "(null)" }, + { 0x06E9, 0xc045, 0x144d, "(null)" }, + { 0x06E9, 0xc048, 0x144d, "(null)" }, + { 0x06E9, 0xc520, 0x144d, "(null)" }, + { 0x06E9, 0x4350, 0x1462, "(null)" }, + { 0x06E9, 0x6510, 0x1462, "(null)" }, + { 0x06E9, 0x6520, 0x1462, "(null)" }, + { 0x06E9, 0x6530, 0x1462, "(null)" }, + { 0x06E9, 0x6710, 0x1462, "(null)" }, + { 0x06E9, 0x7220, 0x1462, "(null)" }, + { 0x06E9, 0x7230, 0x1462, "(null)" }, + { 0x06E9, 0x0033, 0x14c0, "(null)" }, + { 0x06E9, 0xa016, 0x14ff, "(null)" }, + { 0x06E9, 0x0774, 0x152d, "(null)" }, + { 0x06E9, 0x0775, 0x152d, "(null)" }, + { 0x06E9, 0x0802, 0x1558, "(null)" }, + { 0x06E9, 0x0805, 0x1558, "(null)" }, + { 0x06E9, 0x0806, 0x1558, "(null)" }, + { 0x06E9, 0x1137, 0x1734, "(null)" }, + { 0x06E9, 0x2107, 0x17aa, "(null)" }, + { 0x06E9, 0x3873, 0x17aa, "(null)" }, + { 0x06E9, 0x3876, 0x17aa, "(null)" }, + { 0x06E9, 0x3a24, 0x17aa, "(null)" }, + { 0x06E9, 0x208b, 0x17c0, "(null)" }, + { 0x06E9, 0x0802, 0x17ff, "(null)" }, + { 0x06E9, 0x0131, 0x1854, "(null)" }, + { 0x06E9, 0x0132, 0x1854, "(null)" }, + { 0x06E9, 0x013a, 0x1854, "(null)" }, + { 0x06E9, 0x013b, 0x1854, "(null)" }, + { 0x06E9, 0x013d, 0x1854, "(null)" }, + { 0x06E9, 0x013e, 0x1854, "(null)" }, + { 0x06E9, 0x1774, 0x1854, "(null)" }, + { 0x06E9, 0x1775, 0x1854, "(null)" }, + { 0x06E9, 0x0001, 0x1895, "(null)" }, + { 0x06E9, 0x5584, 0x1991, "(null)" }, + { 0x06E9, 0x1402, 0x1a46, "(null)" }, + { 0x06E9, 0x1405, 0x1a46, "(null)" }, + { 0x06E9, 0x000e, 0x1b0a, "(null)" }, + { 0x06E9, 0x006a, 0x1b0a, "(null)" }, + { 0x06E9, 0x200f, 0x1b0a, "(null)" }, + { 0x06E9, 0x9003, 0x1b0a, "(null)" }, + { 0x06E9, 0x9010, 0x1b0a, "(null)" }, + { 0x06E9, 0x9023, 0x1b0a, "(null)" }, + { 0x06E9, 0x8986, 0x4352, "(null)" }, + { 0x06EA, 0x0001, 0x1179, "(null)" }, + { 0x06EA, 0x0002, 0x1179, "(null)" }, + { 0x06EB, 0x0233, 0x1028, "(null)" }, + { 0x06EB, 0x024f, 0x1028, "(null)" }, + { 0x06EB, 0x0001, 0x1179, "(null)" }, + { 0x06EB, 0x3a25, 0x17aa, "(null)" }, + { 0x06EC, 0x0128, 0x1025, "(null)" }, + { 0x06EC, 0x0167, 0x1025, "(null)" }, + { 0x06EC, 0x0200, 0x1025, "(null)" }, + { 0x06EC, 0x0205, 0x1025, "(null)" }, + { 0x06EC, 0x0208, 0x1025, "(null)" }, + { 0x06EC, 0x020a, 0x1025, "(null)" }, + { 0x06EC, 0x020e, 0x1025, "(null)" }, + { 0x06EC, 0x0218, 0x1025, "(null)" }, + { 0x06EC, 0x0219, 0x1025, "(null)" }, + { 0x06EC, 0x021c, 0x1025, "(null)" }, + { 0x06EC, 0x0253, 0x1025, "(null)" }, + { 0x06EC, 0x0260, 0x1025, "(null)" }, + { 0x06EC, 0x306d, 0x103c, "(null)" }, + { 0x06EC, 0x30f4, 0x103c, "(null)" }, + { 0x06EC, 0x30f7, 0x103c, "(null)" }, + { 0x06EC, 0x7010, 0x103c, "(null)" }, + { 0x06EC, 0x1a22, 0x1043, "(null)" }, + { 0x06EC, 0x1a32, 0x1043, "(null)" }, + { 0x06EC, 0x1a72, 0x1043, "(null)" }, + { 0x06EC, 0x2026, 0x1043, "(null)" }, + { 0x06EC, 0xc04f, 0x144d, "(null)" }, + { 0x06EC, 0xc050, 0x144d, "(null)" }, + { 0x06EC, 0xc051, 0x144d, "(null)" }, + { 0x06EC, 0xc520, 0x144d, "(null)" }, + { 0x06EC, 0xc540, 0x144d, "(null)" }, + { 0x06EC, 0x0784, 0x152d, "(null)" }, + { 0x06EC, 0x0785, 0x152d, "(null)" }, + { 0x06EC, 0x0786, 0x152d, "(null)" }, + { 0x06EC, 0x0817, 0x152d, "(null)" }, + { 0x06EC, 0x0802, 0x1558, "(null)" }, + { 0x06EC, 0x0806, 0x1558, "(null)" }, + { 0x06EC, 0x2128, 0x17aa, "(null)" }, + { 0x06EC, 0x3883, 0x17aa, "(null)" }, + { 0x06EC, 0x3884, 0x17aa, "(null)" }, + { 0x06EC, 0x38a1, 0x17aa, "(null)" }, + { 0x06EC, 0x208d, 0x17c0, "(null)" }, + { 0x06EC, 0x0141, 0x1854, "(null)" }, + { 0x06EC, 0x0143, 0x1854, "(null)" }, + { 0x06EC, 0x1781, 0x1854, "(null)" }, + { 0x06EC, 0x1782, 0x1854, "(null)" }, + { 0x06EC, 0x1783, 0x1854, "(null)" }, + { 0x06EC, 0x1784, 0x1854, "(null)" }, + { 0x06EC, 0x1785, 0x1854, "(null)" }, + { 0x06EC, 0x1786, 0x1854, "(null)" }, + { 0x06EC, 0x1787, 0x1854, "(null)" }, + { 0x06EC, 0x1788, 0x1854, "(null)" }, + { 0x06EF, 0x306a, 0x103c, "(null)" }, + { 0x06EF, 0x306c, 0x103c, "(null)" }, + { 0x06EF, 0x7010, 0x103c, "(null)" }, + { 0x06F1, 0x1a82, 0x1043, "(null)" }, + { 0x06F1, 0x1a92, 0x1043, "(null)" }, + { 0x06F1, 0x1ab2, 0x1043, "(null)" }, + { 0x06F1, 0x2032, 0x1043, "(null)" }, + { 0x06F1, 0x1032, 0x1462, "(null)" }, + { 0x06F9, 0x060d, 0x10de, "(null)" }, + { 0x06FB, 0x0234, 0x1028, "(null)" }, + { 0x06FD, 0x062e, 0x10de, "(null)" }, + { 0x06FF, 0x0711, 0x10de, "(null)" }, + { 0x07E1, 0xb04d, 0x144d, "(null)" }, + { 0x07E1, 0xb04e, 0x144d, "(null)" }, + { 0x07E1, 0xb065, 0x144d, "(null)" }, + { 0x07E2, 0xb04b, 0x144d, "(null)" }, + { 0x0844, 0x014a, 0x1025, "(null)" }, + { 0x0844, 0x014d, 0x1025, "(null)" }, + { 0x0844, 0x88cc, 0x1033, "(null)" }, + { 0x0844, 0x0173, 0x107b, "(null)" }, + { 0x0844, 0x6520, 0x1462, "(null)" }, + { 0x0844, 0x6710, 0x1462, "(null)" }, + { 0x0844, 0x6720, 0x1462, "(null)" }, + { 0x0844, 0x208c, 0x17c0, "(null)" }, + { 0x0844, 0x208f, 0x17c0, "(null)" }, + { 0x0844, 0x408f, 0x17c0, "(null)" }, + { 0x0845, 0x360a, 0x103c, "(null)" }, + { 0x0845, 0x6720, 0x1462, "(null)" }, + { 0x084B, 0x0227, 0x1025, "(null)" }, + { 0x084B, 0xe03b, 0x1631, "(null)" }, + { 0x0860, 0x2a8d, 0x103c, "(null)" }, + { 0x0860, 0x2aa1, 0x103c, "(null)" }, + { 0x0861, 0x00ae, 0x106b, "(null)" }, + { 0x0862, 0x0271, 0x1028, "(null)" }, + { 0x0862, 0x02a1, 0x1028, "(null)" }, + { 0x0862, 0x16c2, 0x1043, "(null)" }, + { 0x0862, 0xff00, 0x1179, "(null)" }, + { 0x0862, 0x9800, 0x1558, "(null)" }, + { 0x0863, 0x00aa, 0x106b, "(null)" }, + { 0x0863, 0x00ac, 0x106b, "(null)" }, + { 0x0863, 0x00b0, 0x106b, "(null)" }, + { 0x0863, 0x00b9, 0x106b, "(null)" }, + { 0x0863, 0x00ba, 0x106b, "(null)" }, + { 0x0863, 0x00bb, 0x106b, "(null)" }, + { 0x0863, 0x00bd, 0x106b, "(null)" }, + { 0x0864, 0x2a7c, 0x103c, "(null)" }, + { 0x0866, 0x0160, 0x1025, "(null)" }, + { 0x0866, 0x0271, 0x1028, "(null)" }, + { 0x0866, 0x02ba, 0x1028, "(null)" }, + { 0x0866, 0x1962, 0x1043, "(null)" }, + { 0x0866, 0x8403, 0x1043, "(null)" }, + { 0x0866, 0x00b1, 0x106b, "(null)" }, + { 0x0866, 0x9070, 0x1071, "(null)" }, + { 0x0866, 0x4570, 0x1462, "(null)" }, + { 0x0866, 0x1402, 0x1849, "(null)" }, + { 0x0866, 0x2033, 0x1b0a, "(null)" }, + { 0x0866, 0x2009, 0x2854, "(null)" }, + { 0x0867, 0x00ad, 0x106b, "(null)" }, + { 0x0868, 0x2a8b, 0x103c, "(null)" }, + { 0x0869, 0x00b4, 0x106b, "(null)" }, + { 0x086D, 0xb058, 0x144d, "(null)" }, + { 0x086E, 0x16e2, 0x1043, "(null)" }, + { 0x086E, 0x9070, 0x1071, "(null)" }, + { 0x086E, 0x9072, 0x1071, "(null)" }, + { 0x086E, 0xff00, 0x1179, "(null)" }, + { 0x086E, 0xb051, 0x144d, "(null)" }, + { 0x086E, 0x1012, 0x1462, "(null)" }, + { 0x086E, 0x1019, 0x1462, "(null)" }, + { 0x086E, 0x7621, 0x1462, "(null)" }, + { 0x086E, 0x0577, 0x1558, "(null)" }, + { 0x086E, 0x4006, 0x1b0a, "(null)" }, + { 0x086F, 0x16b2, 0x1043, "(null)" }, + { 0x086F, 0x9515, 0x1071, "(null)" }, + { 0x086F, 0xcb79, 0x10de, "(null)" }, + { 0x086F, 0xb051, 0x144d, "(null)" }, + { 0x086F, 0x1012, 0x1462, "(null)" }, + { 0x086F, 0x1019, 0x1462, "(null)" }, + { 0x086F, 0x101a, 0x1462, "(null)" }, + { 0x086F, 0x71f0, 0x1462, "(null)" }, + { 0x086F, 0xc217, 0x1631, "(null)" }, + { 0x086F, 0x1151, 0x1734, "(null)" }, + { 0x086F, 0x0136, 0x1854, "(null)" }, + { 0x086F, 0x0137, 0x1854, "(null)" }, + { 0x086F, 0x2008, 0x1854, "(null)" }, + { 0x0870, 0x00ab, 0x106b, "(null)" }, + { 0x0872, 0x19b4, 0x1043, "(null)" }, + { 0x0872, 0x1aa2, 0x1043, "(null)" }, + { 0x0872, 0x1c02, 0x1043, "(null)" }, + { 0x0872, 0x1c42, 0x1043, "(null)" }, + { 0x0873, 0x19b4, 0x1043, "(null)" }, + { 0x0873, 0x1c12, 0x1043, "(null)" }, + { 0x0873, 0x1c52, 0x1043, "(null)" }, + { 0x0874, 0x4570, 0x1462, "(null)" }, + { 0x0876, 0x3651, 0x103c, "(null)" }, + { 0x0876, 0x8402, 0x1043, "(null)" }, + { 0x0876, 0xc056, 0x144d, "(null)" }, + { 0x0876, 0x1202, 0x1849, "(null)" }, + { 0x0876, 0x0148, 0x1854, "(null)" }, + { 0x0876, 0x0149, 0x1854, "(null)" }, + { 0x087D, 0x301d, 0x17aa, "(null)" }, + { 0x087F, 0x8434, 0x1043, "(null)" }, + { 0x087F, 0xc056, 0x144d, "(null)" }, + { 0x08A0, 0x00c2, 0x106b, "(null)" }, + { 0x08A0, 0x00ce, 0x106b, "(null)" }, + { 0x08A2, 0x00d4, 0x106b, "(null)" }, + { 0x08A3, 0x00d3, 0x106b, "(null)" }, + { 0x08A4, 0x00c0, 0x106b, "(null)" }, + { 0x0A20, 0x8084, 0x144d, "(null)" }, + { 0x0A20, 0x1910, 0x1462, "(null)" }, + { 0x0A20, 0x1911, 0x1462, "(null)" }, + { 0x0A20, 0x1912, 0x1462, "(null)" }, + { 0x0A20, 0x1118, 0x19da, "(null)" }, + { 0x0A22, 0x8327, 0x1043, "(null)" }, + { 0x0A22, 0x3918, 0x1642, "(null)" }, + { 0x0A22, 0x906c, 0x1b0a, "(null)" }, + { 0x0A22, 0x906d, 0x1b0a, "(null)" }, + { 0x0A23, 0x3918, 0x1642, "(null)" }, + { 0x0A27, 0x1010, 0x1019, "(null)" }, + { 0x0A27, 0x5141, 0x174b, "(null)" }, + { 0x0A28, 0x8897, 0x1033, "(null)" }, + { 0x0A28, 0x363c, 0x103c, "(null)" }, + { 0x0A28, 0x363e, 0x103c, "(null)" }, + { 0x0A28, 0x3659, 0x103c, "(null)" }, + { 0x0A28, 0x365c, 0x103c, "(null)" }, + { 0x0A28, 0x7001, 0x103c, "(null)" }, + { 0x0A28, 0x2031, 0x1043, "(null)" }, + { 0x0A28, 0x9072, 0x1071, "(null)" }, + { 0x0A28, 0xff00, 0x1179, "(null)" }, + { 0x0A28, 0xff15, 0x1179, "(null)" }, + { 0x0A28, 0xff16, 0x1179, "(null)" }, + { 0x0A28, 0xff50, 0x1179, "(null)" }, + { 0x0A28, 0xc064, 0x144d, "(null)" }, + { 0x0A28, 0x0815, 0x152d, "(null)" }, + { 0x0A28, 0x0807, 0x1854, "(null)" }, + { 0x0A28, 0x903b, 0x1b0a, "(null)" }, + { 0x0A29, 0x0318, 0x1025, "(null)" }, + { 0x0A29, 0x035a, 0x1025, "(null)" }, + { 0x0A29, 0x036c, 0x1025, "(null)" }, + { 0x0A29, 0x036d, 0x1025, "(null)" }, + { 0x0A29, 0x0370, 0x1025, "(null)" }, + { 0x0A29, 0x0374, 0x1025, "(null)" }, + { 0x0A29, 0x037c, 0x1025, "(null)" }, + { 0x0A29, 0x040a, 0x1025, "(null)" }, + { 0x0A29, 0x0413, 0x1025, "(null)" }, + { 0x0A29, 0x0415, 0x1025, "(null)" }, + { 0x0A29, 0x0417, 0x1025, "(null)" }, + { 0x0A29, 0x041e, 0x1025, "(null)" }, + { 0x0A29, 0x0424, 0x1025, "(null)" }, + { 0x0A29, 0x0434, 0x1025, "(null)" }, + { 0x0A29, 0x0450, 0x1025, "(null)" }, + { 0x0A29, 0x0464, 0x1025, "(null)" }, + { 0x0A29, 0x0442, 0x1028, "(null)" }, + { 0x0A29, 0x0467, 0x1028, "(null)" }, + { 0x0A29, 0x0468, 0x1028, "(null)" }, + { 0x0A29, 0x046d, 0x1028, "(null)" }, + { 0x0A29, 0x046e, 0x1028, "(null)" }, + { 0x0A29, 0x9067, 0x104d, "(null)" }, + { 0x0A29, 0x00c7, 0x106b, "(null)" }, + { 0x0A29, 0x00c8, 0x106b, "(null)" }, + { 0x0A29, 0x0644, 0x1071, "(null)" }, + { 0x0A29, 0x1583, 0x10cf, "(null)" }, + { 0x0A29, 0x1587, 0x10cf, "(null)" }, + { 0x0A29, 0x0001, 0x1179, "(null)" }, + { 0x0A29, 0xfd22, 0x1179, "(null)" }, + { 0x0A29, 0xfd30, 0x1179, "(null)" }, + { 0x0A29, 0xfd31, 0x1179, "(null)" }, + { 0x0A29, 0xff00, 0x1179, "(null)" }, + { 0x0A29, 0xff16, 0x1179, "(null)" }, + { 0x0A29, 0xff50, 0x1179, "(null)" }, + { 0x0A29, 0xb06d, 0x144d, "(null)" }, + { 0x0A29, 0xb071, 0x144d, "(null)" }, + { 0x0A29, 0xc06a, 0x144d, "(null)" }, + { 0x0A29, 0xc06d, 0x144d, "(null)" }, + { 0x0A29, 0xc075, 0x144d, "(null)" }, + { 0x0A29, 0xc078, 0x144d, "(null)" }, + { 0x0A29, 0xc079, 0x144d, "(null)" }, + { 0x0A29, 0xc08e, 0x144d, "(null)" }, + { 0x0A29, 0xc094, 0x144d, "(null)" }, + { 0x0A29, 0xc551, 0x144d, "(null)" }, + { 0x0A29, 0xc562, 0x144d, "(null)" }, + { 0x0A29, 0x0055, 0x14c0, "(null)" }, + { 0x0A29, 0x4101, 0x1558, "(null)" }, + { 0x0A29, 0x7110, 0x1558, "(null)" }, + { 0x0A29, 0x396c, 0x17aa, "(null)" }, + { 0x0A29, 0x396e, 0x17aa, "(null)" }, + { 0x0A29, 0x010c, 0x17c0, "(null)" }, + { 0x0A29, 0x10d0, 0x17c0, "(null)" }, + { 0x0A29, 0x10d2, 0x17c0, "(null)" }, + { 0x0A29, 0x10d3, 0x17c0, "(null)" }, + { 0x0A29, 0x10d9, 0x17c0, "(null)" }, + { 0x0A29, 0x00b9, 0x1b0a, "(null)" }, + { 0x0A2A, 0x905e, 0x104d, "(null)" }, + { 0x0A2A, 0x0001, 0x1179, "(null)" }, + { 0x0A2A, 0x10ca, 0x17c0, "(null)" }, + { 0x0A2B, 0x905a, 0x104d, "(null)" }, + { 0x0A2B, 0x9072, 0x104d, "(null)" }, + { 0x0A2B, 0x4011, 0x1071, "(null)" }, + { 0x0A2B, 0x500a, 0x1071, "(null)" }, + { 0x0A2B, 0xa038, 0x1071, "(null)" }, + { 0x0A2B, 0x0001, 0x1179, "(null)" }, + { 0x0A2B, 0xfd80, 0x1179, "(null)" }, + { 0x0A2B, 0x0052, 0x14c0, "(null)" }, + { 0x0A2B, 0x00ab, 0x1b0a, "(null)" }, + { 0x0A2C, 0x1521, 0x103c, "(null)" }, + { 0x0A2C, 0x2144, 0x17aa, "(null)" }, + { 0x0A2D, 0x036d, 0x1025, "(null)" }, + { 0x0A2D, 0x0370, 0x1025, "(null)" }, + { 0x0A2D, 0x0374, 0x1025, "(null)" }, + { 0x0A2D, 0x037c, 0x1025, "(null)" }, + { 0x0A2D, 0x0417, 0x1025, "(null)" }, + { 0x0A2D, 0x0487, 0x1025, "(null)" }, + { 0x0A2D, 0x3659, 0x103c, "(null)" }, + { 0x0A2D, 0x365c, 0x103c, "(null)" }, + { 0x0A2D, 0x1312, 0x1043, "(null)" }, + { 0x0A2D, 0x9072, 0x104d, "(null)" }, + { 0x0A2D, 0xc560, 0x144d, "(null)" }, + { 0x0A2D, 0x209b, 0x1b0a, "(null)" }, + { 0x0A34, 0x0201, 0x1025, "(null)" }, + { 0x0A34, 0x020e, 0x1025, "(null)" }, + { 0x0A34, 0x0219, 0x1025, "(null)" }, + { 0x0A34, 0x021e, 0x1025, "(null)" }, + { 0x0A34, 0x0252, 0x1025, "(null)" }, + { 0x0A34, 0x0259, 0x1025, "(null)" }, + { 0x0A34, 0x026b, 0x1025, "(null)" }, + { 0x0A34, 0x0299, 0x1025, "(null)" }, + { 0x0A34, 0x02a2, 0x1028, "(null)" }, + { 0x0A34, 0x1ae2, 0x1043, "(null)" }, + { 0x0A34, 0x202a, 0x1043, "(null)" }, + { 0x0A34, 0x2031, 0x1043, "(null)" }, + { 0x0A34, 0x2034, 0x1043, "(null)" }, + { 0x0A34, 0x2036, 0x1043, "(null)" }, + { 0x0A34, 0x203a, 0x1043, "(null)" }, + { 0x0A34, 0x2040, 0x1043, "(null)" }, + { 0x0A34, 0x9072, 0x1071, "(null)" }, + { 0x0A34, 0x1013, 0x1462, "(null)" }, + { 0x0A34, 0x102e, 0x1462, "(null)" }, + { 0x0A34, 0x1031, 0x1462, "(null)" }, + { 0x0A34, 0x4570, 0x1462, "(null)" }, + { 0x0A34, 0x0828, 0x152d, "(null)" }, + { 0x0A34, 0x3928, 0x1642, "(null)" }, + { 0x0A34, 0x118d, 0x1734, "(null)" }, + { 0x0A34, 0x118e, 0x1734, "(null)" }, + { 0x0A34, 0x38cd, 0x17aa, "(null)" }, + { 0x0A34, 0x38fd, 0x17aa, "(null)" }, + { 0x0A34, 0x38ff, 0x17aa, "(null)" }, + { 0x0A34, 0x10d0, 0x17c0, "(null)" }, + { 0x0A34, 0x208d, 0x17c0, "(null)" }, + { 0x0A35, 0x1242, 0x1043, "(null)" }, + { 0x0A35, 0x1252, 0x1043, "(null)" }, + { 0x0A35, 0x1272, 0x1043, "(null)" }, + { 0x0A35, 0x1292, 0x1043, "(null)" }, + { 0x0A35, 0x14f2, 0x1043, "(null)" }, + { 0x0A35, 0x1f17, 0x1043, "(null)" }, + { 0x0A35, 0x1063, 0x1462, "(null)" }, + { 0x0A35, 0x106d, 0x1462, "(null)" }, + { 0x0A35, 0x1075, 0x1462, "(null)" }, + { 0x0A35, 0x2052, 0x1b0a, "(null)" }, + { 0x0A38, 0x0893, 0x10de, "(null)" }, + { 0x0A3C, 0x040c, 0x1028, "(null)" }, + { 0x0A3C, 0x1521, 0x103c, "(null)" }, + { 0x0A3C, 0x1584, 0x10cf, "(null)" }, + { 0x0A3C, 0x0706, 0x10de, "(null)" }, + { 0x0A3C, 0x0511, 0x1558, "(null)" }, + { 0x0A3C, 0x0512, 0x1558, "(null)" }, + { 0x0A3C, 0x2145, 0x17aa, "(null)" }, + { 0x0A60, 0x8082, 0x144d, "(null)" }, + { 0x0A60, 0x2014, 0x1462, "(null)" }, + { 0x0A60, 0x2180, 0x174b, "(null)" }, + { 0x0A60, 0x9045, 0x1b0a, "(null)" }, + { 0x0A60, 0x9057, 0x1b0a, "(null)" }, + { 0x0A62, 0x1833, 0x1462, "(null)" }, + { 0x0A62, 0x2050, 0x174b, "(null)" }, + { 0x0A62, 0x2052, 0x174b, "(null)" }, + { 0x0A62, 0x5122, 0x174b, "(null)" }, + { 0x0A64, 0x039d, 0x1025, "(null)" }, + { 0x0A64, 0x045e, 0x1025, "(null)" }, + { 0x0A64, 0x063c, 0x1025, "(null)" }, + { 0x0A64, 0x841f, 0x1043, "(null)" }, + { 0x0A64, 0x842f, 0x1043, "(null)" }, + { 0x0A64, 0x8455, 0x1043, "(null)" }, + { 0x0A64, 0x845b, 0x1043, "(null)" }, + { 0x0A64, 0x845e, 0x1043, "(null)" }, + { 0x0A64, 0xac41, 0x1462, "(null)" }, + { 0x0A64, 0x0a64, 0x1849, "(null)" }, + { 0x0A64, 0x00ce, 0x1b0a, "(null)" }, + { 0x0A66, 0x8356, 0x1043, "(null)" }, + { 0x0A66, 0x8368, 0x1043, "(null)" }, + { 0x0A66, 0x1837, 0x1462, "(null)" }, + { 0x0A66, 0x1838, 0x1462, "(null)" }, + { 0x0A66, 0x1839, 0x1462, "(null)" }, + { 0x0A66, 0x3958, 0x1642, "(null)" }, + { 0x0A66, 0x3998, 0x1642, "(null)" }, + { 0x0A66, 0x1122, 0x19da, "(null)" }, + { 0x0A67, 0x9075, 0x1b0a, "(null)" }, + { 0x0A68, 0x3659, 0x103c, "(null)" }, + { 0x0A68, 0x365c, 0x103c, "(null)" }, + { 0x0A68, 0xa036, 0x1071, "(null)" }, + { 0x0A68, 0xc059, 0x144d, "(null)" }, + { 0x0A68, 0xc064, 0x144d, "(null)" }, + { 0x0A68, 0x1032, 0x1462, "(null)" }, + { 0x0A68, 0x1034, 0x1462, "(null)" }, + { 0x0A68, 0x5584, 0x1991, "(null)" }, + { 0x0A69, 0x0418, 0x1028, "(null)" }, + { 0x0A69, 0x3650, 0x103c, "(null)" }, + { 0x0A69, 0xc525, 0x144d, "(null)" }, + { 0x0A69, 0xc544, 0x144d, "(null)" }, + { 0x0A69, 0x3905, 0x17aa, "(null)" }, + { 0x0A6A, 0x0001, 0x1179, "(null)" }, + { 0x0A6A, 0x0002, 0x1179, "(null)" }, + { 0x0A6C, 0x040a, 0x1028, "(null)" }, + { 0x0A6C, 0x040b, 0x1028, "(null)" }, + { 0x0A6C, 0x172b, 0x103c, "(null)" }, + { 0x0A6C, 0x2142, 0x17aa, "(null)" }, + { 0x0A6C, 0x215c, 0x17aa, "(null)" }, + { 0x0A6C, 0x21c0, 0x17aa, "(null)" }, + { 0x0A6C, 0x21cc, 0x17aa, "(null)" }, + { 0x0A6C, 0x21cd, 0x17aa, "(null)" }, + { 0x0A6C, 0x21d4, 0x17aa, "(null)" }, + { 0x0A6C, 0x21d5, 0x17aa, "(null)" }, + { 0x0A6C, 0x21d7, 0x17aa, "(null)" }, + { 0x0A6C, 0x21d8, 0x17aa, "(null)" }, + { 0x0A6C, 0x38a4, 0x17aa, "(null)" }, + { 0x0A6C, 0x391a, 0x17aa, "(null)" }, + { 0x0A6E, 0x0d58, 0x105b, "(null)" }, + { 0x0A6E, 0xff16, 0x1179, "(null)" }, + { 0x0A6E, 0xff17, 0x1179, "(null)" }, + { 0x0A6E, 0x3607, 0x17aa, "(null)" }, + { 0x0A6E, 0x395a, 0x17aa, "(null)" }, + { 0x0A6E, 0x3963, 0x17aa, "(null)" }, + { 0x0A6E, 0x396d, 0x17aa, "(null)" }, + { 0x0A6E, 0x10cf, 0x17c0, "(null)" }, + { 0x0A6F, 0x0470, 0x1025, "(null)" }, + { 0x0A6F, 0x846f, 0x1043, "(null)" }, + { 0x0A6F, 0x8470, 0x1043, "(null)" }, + { 0x0A6F, 0x4003, 0x1297, "(null)" }, + { 0x0A6F, 0x9001, 0x159e, "(null)" }, + { 0x0A70, 0x0370, 0x1025, "(null)" }, + { 0x0A70, 0x0374, 0x1025, "(null)" }, + { 0x0A70, 0x037c, 0x1025, "(null)" }, + { 0x0A70, 0x040a, 0x1025, "(null)" }, + { 0x0A70, 0x0413, 0x1025, "(null)" }, + { 0x0A70, 0x0415, 0x1025, "(null)" }, + { 0x0A70, 0x0417, 0x1025, "(null)" }, + { 0x0A70, 0x041e, 0x1025, "(null)" }, + { 0x0A70, 0x0424, 0x1025, "(null)" }, + { 0x0A70, 0x0452, 0x1025, "(null)" }, + { 0x0A70, 0x0453, 0x1025, "(null)" }, + { 0x0A70, 0x047e, 0x1025, "(null)" }, + { 0x0A70, 0x047d, 0x1028, "(null)" }, + { 0x0A70, 0x0483, 0x1028, "(null)" }, + { 0x0A70, 0x357f, 0x103c, "(null)" }, + { 0x0A70, 0x12d2, 0x1043, "(null)" }, + { 0x0A70, 0x1352, 0x1043, "(null)" }, + { 0x0A70, 0x1362, 0x1043, "(null)" }, + { 0x0A70, 0x1372, 0x1043, "(null)" }, + { 0x0A70, 0x13f2, 0x1043, "(null)" }, + { 0x0A70, 0x1432, 0x1043, "(null)" }, + { 0x0A70, 0x14c2, 0x1043, "(null)" }, + { 0x0A70, 0x1542, 0x1043, "(null)" }, + { 0x0A70, 0x8458, 0x1043, "(null)" }, + { 0x0A70, 0x8459, 0x1043, "(null)" }, + { 0x0A70, 0x907e, 0x104d, "(null)" }, + { 0x0A70, 0x0cdd, 0x105b, "(null)" }, + { 0x0A70, 0x4000, 0x1071, "(null)" }, + { 0x0A70, 0x4001, 0x1071, "(null)" }, + { 0x0A70, 0xb301, 0x1071, "(null)" }, + { 0x0A70, 0xb302, 0x1071, "(null)" }, + { 0x0A70, 0x15b3, 0x10cf, "(null)" }, + { 0x0A70, 0xfd22, 0x1179, "(null)" }, + { 0x0A70, 0xfd30, 0x1179, "(null)" }, + { 0x0A70, 0xfd31, 0x1179, "(null)" }, + { 0x0A70, 0xfd80, 0x1179, "(null)" }, + { 0x0A70, 0xff20, 0x1179, "(null)" }, + { 0x0A70, 0xb06d, 0x144d, "(null)" }, + { 0x0A70, 0xb071, 0x144d, "(null)" }, + { 0x0A70, 0xc078, 0x144d, "(null)" }, + { 0x0A70, 0xc079, 0x144d, "(null)" }, + { 0x0A70, 0xc088, 0x144d, "(null)" }, + { 0x0A70, 0xc08b, 0x144d, "(null)" }, + { 0x0A70, 0xc094, 0x144d, "(null)" }, + { 0x0A70, 0xc557, 0x144d, "(null)" }, + { 0x0A70, 0xc562, 0x144d, "(null)" }, + { 0x0A70, 0xc568, 0x144d, "(null)" }, + { 0x0A70, 0x1100, 0x1458, "(null)" }, + { 0x0A70, 0x1063, 0x1462, "(null)" }, + { 0x0A70, 0x1068, 0x1462, "(null)" }, + { 0x0A70, 0x106d, 0x1462, "(null)" }, + { 0x0A70, 0x1070, 0x1462, "(null)" }, + { 0x0A70, 0x0052, 0x14c0, "(null)" }, + { 0x0A70, 0x3605, 0x17aa, "(null)" }, + { 0x0A70, 0x3617, 0x17aa, "(null)" }, + { 0x0A70, 0x3955, 0x17aa, "(null)" }, + { 0x0A70, 0x3966, 0x17aa, "(null)" }, + { 0x0A70, 0x3968, 0x17aa, "(null)" }, + { 0x0A70, 0x396d, 0x17aa, "(null)" }, + { 0x0A70, 0x3970, 0x17aa, "(null)" }, + { 0x0A70, 0x3971, 0x17aa, "(null)" }, + { 0x0A70, 0x010c, 0x17c0, "(null)" }, + { 0x0A70, 0x0845, 0x1854, "(null)" }, + { 0x0A70, 0x0847, 0x1854, "(null)" }, + { 0x0A70, 0x0849, 0x1854, "(null)" }, + { 0x0A70, 0x2011, 0x1854, "(null)" }, + { 0x0A70, 0x940a, 0x1afa, "(null)" }, + { 0x0A70, 0x00d8, 0x1b0a, "(null)" }, + { 0x0A70, 0x00ec, 0x1b0a, "(null)" }, + { 0x0A70, 0x2065, 0x1b0a, "(null)" }, + { 0x0A70, 0x206f, 0x1b0a, "(null)" }, + { 0x0A70, 0x2090, 0x1b0a, "(null)" }, + { 0x0A70, 0x1001, 0x1bab, "(null)" }, + { 0x0A71, 0x1100, 0x1458, "(null)" }, + { 0x0A71, 0x395a, 0x17aa, "(null)" }, + { 0x0A71, 0x3963, 0x17aa, "(null)" }, + { 0x0A72, 0x041c, 0x1025, "(null)" }, + { 0x0A72, 0x041e, 0x1025, "(null)" }, + { 0x0A72, 0x0434, 0x1025, "(null)" }, + { 0x0A72, 0x0450, 0x1025, "(null)" }, + { 0x0A72, 0x1332, 0x1043, "(null)" }, + { 0x0A72, 0x1372, 0x1043, "(null)" }, + { 0x0A72, 0x0d58, 0x105b, "(null)" }, + { 0x0A72, 0x4000, 0x1071, "(null)" }, + { 0x0A72, 0xb301, 0x1071, "(null)" }, + { 0x0A72, 0xfd31, 0x1179, "(null)" }, + { 0x0A72, 0xfd80, 0x1179, "(null)" }, + { 0x0A72, 0xc078, 0x144d, "(null)" }, + { 0x0A72, 0x4101, 0x1558, "(null)" }, + { 0x0A72, 0x3a02, 0x17aa, "(null)" }, + { 0x0A72, 0x10d2, 0x17c0, "(null)" }, + { 0x0A72, 0x2051, 0x1b0a, "(null)" }, + { 0x0A72, 0x2001, 0x1bfd, "(null)" }, + { 0x0A73, 0x0d58, 0x105b, "(null)" }, + { 0x0A73, 0x0d5e, 0x105b, "(null)" }, + { 0x0A73, 0x3607, 0x17aa, "(null)" }, + { 0x0A73, 0x3610, 0x17aa, "(null)" }, + { 0x0A73, 0x205c, 0x1b0a, "(null)" }, + { 0x0A74, 0x0201, 0x1025, "(null)" }, + { 0x0A74, 0x020e, 0x1025, "(null)" }, + { 0x0A74, 0x0219, 0x1025, "(null)" }, + { 0x0A74, 0x021e, 0x1025, "(null)" }, + { 0x0A74, 0x0252, 0x1025, "(null)" }, + { 0x0A74, 0x0259, 0x1025, "(null)" }, + { 0x0A74, 0x0296, 0x1025, "(null)" }, + { 0x0A74, 0x0299, 0x1025, "(null)" }, + { 0x0A74, 0x0271, 0x1028, "(null)" }, + { 0x0A74, 0x0490, 0x1028, "(null)" }, + { 0x0A74, 0x12a2, 0x1043, "(null)" }, + { 0x0A74, 0x1ac2, 0x1043, "(null)" }, + { 0x0A74, 0x1af2, 0x1043, "(null)" }, + { 0x0A74, 0x1bc2, 0x1043, "(null)" }, + { 0x0A74, 0x2037, 0x1043, "(null)" }, + { 0x0A74, 0x2038, 0x1043, "(null)" }, + { 0x0A74, 0x905e, 0x104d, "(null)" }, + { 0x0A74, 0x0001, 0x1179, "(null)" }, + { 0x0A74, 0x0002, 0x1179, "(null)" }, + { 0x0A74, 0xff00, 0x1179, "(null)" }, + { 0x0A74, 0xff15, 0x1179, "(null)" }, + { 0x0A74, 0xff16, 0x1179, "(null)" }, + { 0x0A74, 0xff40, 0x1179, "(null)" }, + { 0x0A74, 0xff50, 0x1179, "(null)" }, + { 0x0A74, 0xc540, 0x144d, "(null)" }, + { 0x0A74, 0x1015, 0x1462, "(null)" }, + { 0x0A74, 0x1023, 0x1462, "(null)" }, + { 0x0A74, 0x1049, 0x1462, "(null)" }, + { 0x0A74, 0x1051, 0x1462, "(null)" }, + { 0x0A74, 0x4570, 0x1462, "(null)" }, + { 0x0A74, 0x0048, 0x14c0, "(null)" }, + { 0x0A74, 0x0814, 0x152d, "(null)" }, + { 0x0A74, 0x0808, 0x1558, "(null)" }, + { 0x0A74, 0x3940, 0x1642, "(null)" }, + { 0x0A74, 0x2142, 0x17aa, "(null)" }, + { 0x0A74, 0x389f, 0x17aa, "(null)" }, + { 0x0A74, 0x38ce, 0x17aa, "(null)" }, + { 0x0A74, 0x38fe, 0x17aa, "(null)" }, + { 0x0A74, 0x3900, 0x17aa, "(null)" }, + { 0x0A74, 0x10d0, 0x17c0, "(null)" }, + { 0x0A74, 0x208d, 0x17c0, "(null)" }, + { 0x0A74, 0x0800, 0x1854, "(null)" }, + { 0x0A74, 0x0805, 0x1854, "(null)" }, + { 0x0A74, 0x903a, 0x1b0a, "(null)" }, + { 0x0A75, 0x0318, 0x1025, "(null)" }, + { 0x0A75, 0x035a, 0x1025, "(null)" }, + { 0x0A75, 0x0370, 0x1025, "(null)" }, + { 0x0A75, 0x0374, 0x1025, "(null)" }, + { 0x0A75, 0x037c, 0x1025, "(null)" }, + { 0x0A75, 0x040a, 0x1025, "(null)" }, + { 0x0A75, 0x0413, 0x1025, "(null)" }, + { 0x0A75, 0x0415, 0x1025, "(null)" }, + { 0x0A75, 0x0417, 0x1025, "(null)" }, + { 0x0A75, 0x0424, 0x1025, "(null)" }, + { 0x0A75, 0x0452, 0x1025, "(null)" }, + { 0x0A75, 0x0453, 0x1025, "(null)" }, + { 0x0A75, 0x0464, 0x1025, "(null)" }, + { 0x0A75, 0x043f, 0x1028, "(null)" }, + { 0x0A75, 0x0440, 0x1028, "(null)" }, + { 0x0A75, 0x0441, 0x1028, "(null)" }, + { 0x0A75, 0x1c22, 0x1043, "(null)" }, + { 0x0A75, 0x842f, 0x1043, "(null)" }, + { 0x0A75, 0x9067, 0x104d, "(null)" }, + { 0x0A75, 0x9069, 0x104d, "(null)" }, + { 0x0A75, 0x9072, 0x104d, "(null)" }, + { 0x0A75, 0x907a, 0x104d, "(null)" }, + { 0x0A75, 0x0d58, 0x105b, "(null)" }, + { 0x0A75, 0x4000, 0x1071, "(null)" }, + { 0x0A75, 0xb301, 0x1071, "(null)" }, + { 0x0A75, 0x1581, 0x10cf, "(null)" }, + { 0x0A75, 0x1582, 0x10cf, "(null)" }, + { 0x0A75, 0x1586, 0x10cf, "(null)" }, + { 0x0A75, 0x0798, 0x10de, "(null)" }, + { 0x0A75, 0xfd30, 0x1179, "(null)" }, + { 0x0A75, 0xfd80, 0x1179, "(null)" }, + { 0x0A75, 0xff00, 0x1179, "(null)" }, + { 0x0A75, 0xff16, 0x1179, "(null)" }, + { 0x0A75, 0xff17, 0x1179, "(null)" }, + { 0x0A75, 0xff40, 0x1179, "(null)" }, + { 0x0A75, 0xff50, 0x1179, "(null)" }, + { 0x0A75, 0xb06b, 0x144d, "(null)" }, + { 0x0A75, 0xc06a, 0x144d, "(null)" }, + { 0x0A75, 0xc06d, 0x144d, "(null)" }, + { 0x0A75, 0xc075, 0x144d, "(null)" }, + { 0x0A75, 0xc078, 0x144d, "(null)" }, + { 0x0A75, 0xc551, 0x144d, "(null)" }, + { 0x0A75, 0xc552, 0x144d, "(null)" }, + { 0x0A75, 0x0810, 0x152d, "(null)" }, + { 0x0A75, 0x0814, 0x152d, "(null)" }, + { 0x0A75, 0x0822, 0x152d, "(null)" }, + { 0x0A75, 0x0808, 0x1558, "(null)" }, + { 0x0A75, 0x3605, 0x17aa, "(null)" }, + { 0x0A75, 0x392d, 0x17aa, "(null)" }, + { 0x0A75, 0x3955, 0x17aa, "(null)" }, + { 0x0A75, 0x3957, 0x17aa, "(null)" }, + { 0x0A75, 0x3967, 0x17aa, "(null)" }, + { 0x0A75, 0x5958, 0x17aa, "(null)" }, + { 0x0A75, 0x10d0, 0x17c0, "(null)" }, + { 0x0A75, 0x10d2, 0x17c0, "(null)" }, + { 0x0A75, 0x0805, 0x1854, "(null)" }, + { 0x0A75, 0x0821, 0x1854, "(null)" }, + { 0x0A75, 0x5584, 0x1991, "(null)" }, + { 0x0A75, 0x840a, 0x1afa, "(null)" }, + { 0x0A75, 0x9a30, 0x1afa, "(null)" }, + { 0x0A75, 0x00b9, 0x1b0a, "(null)" }, + { 0x0A75, 0x00d8, 0x1b0a, "(null)" }, + { 0x0A75, 0x00ec, 0x1b0a, "(null)" }, + { 0x0A75, 0x2036, 0x1b0a, "(null)" }, + { 0x0A75, 0x2040, 0x1b0a, "(null)" }, + { 0x0A76, 0x8446, 0x1043, "(null)" }, + { 0x0A76, 0x8447, 0x1043, "(null)" }, + { 0x0A76, 0x4003, 0x1297, "(null)" }, + { 0x0A78, 0x0746, 0x10de, "(null)" }, + { 0x0A7A, 0x907e, 0x104d, "(null)" }, + { 0x0A7A, 0xfc50, 0x1179, "(null)" }, + { 0x0A7A, 0xfc61, 0x1179, "(null)" }, + { 0x0A7A, 0xfc71, 0x1179, "(null)" }, + { 0x0A7A, 0xfc90, 0x1179, "(null)" }, + { 0x0A7A, 0xfcc0, 0x1179, "(null)" }, + { 0x0A7A, 0xfcd0, 0x1179, "(null)" }, + { 0x0A7A, 0xfce2, 0x1179, "(null)" }, + { 0x0A7A, 0xfcf2, 0x1179, "(null)" }, + { 0x0A7A, 0xfd16, 0x1179, "(null)" }, + { 0x0A7A, 0xfd40, 0x1179, "(null)" }, + { 0x0A7A, 0xfd50, 0x1179, "(null)" }, + { 0x0A7A, 0xfd52, 0x1179, "(null)" }, + { 0x0A7A, 0xfd61, 0x1179, "(null)" }, + { 0x0A7A, 0xfd71, 0x1179, "(null)" }, + { 0x0A7A, 0xfd92, 0x1179, "(null)" }, + { 0x0A7A, 0xfd96, 0x1179, "(null)" }, + { 0x0A7A, 0xfdd0, 0x1179, "(null)" }, + { 0x0A7A, 0xfdd2, 0x1179, "(null)" }, + { 0x0A7A, 0xfdfe, 0x1179, "(null)" }, + { 0x0A7A, 0xc0a2, 0x144d, "(null)" }, + { 0x0A7A, 0xc0b2, 0x144d, "(null)" }, + { 0x0A7A, 0xc581, 0x144d, "(null)" }, + { 0x0A7A, 0xc587, 0x144d, "(null)" }, + { 0x0A7A, 0xc588, 0x144d, "(null)" }, + { 0x0A7A, 0xc597, 0x144d, "(null)" }, + { 0x0A7A, 0xc606, 0x144d, "(null)" }, + { 0x0A7A, 0xaa51, 0x1462, "(null)" }, + { 0x0A7A, 0xaa58, 0x1462, "(null)" }, + { 0x0A7A, 0xac71, 0x1462, "(null)" }, + { 0x0A7A, 0xac81, 0x1462, "(null)" }, + { 0x0A7A, 0xac82, 0x1462, "(null)" }, + { 0x0A7A, 0xae33, 0x1462, "(null)" }, + { 0x0A7A, 0x3980, 0x1642, "(null)" }, + { 0x0A7A, 0x3950, 0x17aa, "(null)" }, + { 0x0A7A, 0x397d, 0x17aa, "(null)" }, + { 0x0A7A, 0x2091, 0x1b0a, "(null)" }, + { 0x0A7A, 0x90b4, 0x1b0a, "(null)" }, + { 0x0A7A, 0x0003, 0x1bfd, "(null)" }, + { 0x0A7A, 0x8006, 0x1bfd, "(null)" }, + { 0x0A7A, 0x8007, 0x1bfd, "(null)" }, + { 0x0A7B, 0x183d, 0x1462, "(null)" }, + { 0x0A7C, 0x172b, 0x103c, "(null)" }, + { 0x0CA0, 0x3926, 0x1642, "(null)" }, + { 0x0CA2, 0x1915, 0x10de, "(null)" }, + { 0x0CA2, 0x1914, 0x1462, "(null)" }, + { 0x0CA2, 0x1917, 0x1462, "(null)" }, + { 0x0CA2, 0x1918, 0x1462, "(null)" }, + { 0x0CA3, 0x069d, 0x10de, "(null)" }, + { 0x0CA3, 0x3926, 0x1642, "(null)" }, + { 0x0CA9, 0x026b, 0x1025, "(null)" }, + { 0x0CA9, 0x2033, 0x1043, "(null)" }, + { 0x0CA9, 0xff50, 0x1179, "(null)" }, + { 0x0CA9, 0x1024, 0x1462, "(null)" }, + { 0x0CA9, 0x102f, 0x1462, "(null)" }, + { 0x0CA9, 0x1035, 0x1462, "(null)" }, + { 0x0CA9, 0x3942, 0x1642, "(null)" }, + { 0x0CAF, 0x0463, 0x1025, "(null)" }, + { 0x0CAF, 0x0443, 0x1028, "(null)" }, + { 0x0CAF, 0x0465, 0x1028, "(null)" }, + { 0x0CAF, 0x1242, 0x1043, "(null)" }, + { 0x0CAF, 0x1282, 0x1043, "(null)" }, + { 0x0CAF, 0x12b2, 0x1043, "(null)" }, + { 0x0CAF, 0x1342, 0x1043, "(null)" }, + { 0x0CAF, 0x13d2, 0x1043, "(null)" }, + { 0x0CAF, 0x1422, 0x1043, "(null)" }, + { 0x0CAF, 0x1462, 0x1043, "(null)" }, + { 0x0CAF, 0x1482, 0x1043, "(null)" }, + { 0x0CAF, 0x14b2, 0x1043, "(null)" }, + { 0x0CAF, 0x1502, 0x1043, "(null)" }, + { 0x0CAF, 0x1fb2, 0x1043, "(null)" }, + { 0x0CAF, 0x0cdd, 0x105b, "(null)" }, + { 0x0CAF, 0x1585, 0x10cf, "(null)" }, + { 0x0CAF, 0x0782, 0x10de, "(null)" }, + { 0x0CAF, 0xff50, 0x1179, "(null)" }, + { 0x0CAF, 0x0813, 0x152d, "(null)" }, + { 0x0CAF, 0x0831, 0x152d, "(null)" }, + { 0x0CAF, 0x10d0, 0x17c0, "(null)" }, + { 0x0CAF, 0x10d3, 0x17c0, "(null)" }, + { 0x0CAF, 0x0804, 0x1854, "(null)" }, + { 0x0CAF, 0x0832, 0x1854, "(null)" }, + { 0x0CB0, 0x080d, 0x10de, "(null)" }, + { 0x0CB0, 0xfd30, 0x1179, "(null)" }, + { 0x0CB0, 0xff50, 0x1179, "(null)" }, + { 0x0CB0, 0x8687, 0x1558, "(null)" }, + { 0x0CB0, 0x8689, 0x1558, "(null)" }, + { 0x0CB1, 0x203c, 0x1043, "(null)" }, + { 0x0CB1, 0xff50, 0x1179, "(null)" }, + { 0x0CB1, 0x8687, 0x1558, "(null)" }, + { 0x0CB1, 0x8689, 0x1558, "(null)" }, + { 0x0CBC, 0x040c, 0x1028, "(null)" }, + { 0x0CBC, 0x1521, 0x103c, "(null)" }, + { 0x0DC0, 0x1005, 0x1019, "(null)" }, + { 0x0DC0, 0x082d, 0x10de, "(null)" }, + { 0x0DC0, 0x2310, 0x1462, "(null)" }, + { 0x0DC0, 0x2311, 0x1462, "(null)" }, + { 0x0DC0, 0x2312, 0x1462, "(null)" }, + { 0x0DC0, 0x3a28, 0x1642, "(null)" }, + { 0x0DC0, 0x1178, 0x174b, "(null)" }, + { 0x0DC0, 0x2178, 0x174b, "(null)" }, + { 0x0DC4, 0x837a, 0x1043, "(null)" }, + { 0x0DC4, 0xc000, 0x1458, "(null)" }, + { 0x0DC5, 0x1007, 0x1019, "(null)" }, + { 0x0DC5, 0x1011, 0x1019, "(null)" }, + { 0x0DC5, 0x085b, 0x10de, "(null)" }, + { 0x0DC5, 0x1184, 0x174b, "(null)" }, + { 0x0DC6, 0x2362, 0x1462, "(null)" }, + { 0x0DCD, 0x0491, 0x1028, "(null)" }, + { 0x0DCD, 0x04b7, 0x1028, "(null)" }, + { 0x0DCD, 0x04b8, 0x1028, "(null)" }, + { 0x0DCD, 0x1525, 0x1458, "(null)" }, + { 0x0DCD, 0x1532, 0x1458, "(null)" }, + { 0x0DCD, 0x10a2, 0x1462, "(null)" }, + { 0x0DCE, 0x0564, 0x1025, "(null)" }, + { 0x0DCE, 0x0565, 0x1025, "(null)" }, + { 0x0DCE, 0x0566, 0x1025, "(null)" }, + { 0x0DCE, 0x204c, 0x1043, "(null)" }, + { 0x0DCE, 0x204e, 0x1043, "(null)" }, + { 0x0DCE, 0x2051, 0x1043, "(null)" }, + { 0x0DCE, 0x1525, 0x1458, "(null)" }, + { 0x0DCE, 0x1532, 0x1458, "(null)" }, + { 0x0DCE, 0x0875, 0x152d, "(null)" }, + { 0x0DCE, 0x1500, 0x1558, "(null)" }, + { 0x0DCE, 0x1700, 0x1558, "(null)" }, + { 0x0DCE, 0x20a6, 0x1b0a, "(null)" }, + { 0x0DD1, 0x02a2, 0x1028, "(null)" }, + { 0x0DD1, 0x048f, 0x1028, "(null)" }, + { 0x0DD1, 0x0490, 0x1028, "(null)" }, + { 0x0DD1, 0x0491, 0x1028, "(null)" }, + { 0x0DD1, 0x04b9, 0x1028, "(null)" }, + { 0x0DD1, 0x04ba, 0x1028, "(null)" }, + { 0x0DD1, 0x203d, 0x1043, "(null)" }, + { 0x0DD1, 0x2040, 0x1043, "(null)" }, + { 0x0DD1, 0x2041, 0x1043, "(null)" }, + { 0x0DD1, 0x2042, 0x1043, "(null)" }, + { 0x0DD1, 0x2043, 0x1043, "(null)" }, + { 0x0DD1, 0x2044, 0x1043, "(null)" }, + { 0x0DD1, 0x2045, 0x1043, "(null)" }, + { 0x0DD1, 0x2046, 0x1043, "(null)" }, + { 0x0DD1, 0x2047, 0x1043, "(null)" }, + { 0x0DD1, 0x2048, 0x1043, "(null)" }, + { 0x0DD1, 0x204a, 0x1043, "(null)" }, + { 0x0DD1, 0x204b, 0x1043, "(null)" }, + { 0x0DD1, 0x8465, 0x1043, "(null)" }, + { 0x0DD1, 0xfcb0, 0x1179, "(null)" }, + { 0x0DD1, 0xff50, 0x1179, "(null)" }, + { 0x0DD1, 0x1083, 0x1462, "(null)" }, + { 0x0DD1, 0x5102, 0x1558, "(null)" }, + { 0x0DD1, 0x7100, 0x1558, "(null)" }, + { 0x0DD1, 0x7200, 0x1558, "(null)" }, + { 0x0DD1, 0x8100, 0x1558, "(null)" }, + { 0x0DD1, 0x8687, 0x1558, "(null)" }, + { 0x0DD1, 0x3620, 0x17aa, "(null)" }, + { 0x0DD1, 0x10ea, 0x17c0, "(null)" }, + { 0x0DD2, 0x046c, 0x1028, "(null)" }, + { 0x0DD2, 0x0491, 0x1028, "(null)" }, + { 0x0DD2, 0x0854, 0x152d, "(null)" }, + { 0x0DD3, 0x046c, 0x1028, "(null)" }, + { 0x0DD6, 0x04b7, 0x1028, "(null)" }, + { 0x0DD6, 0x04b8, 0x1028, "(null)" }, + { 0x0DD8, 0x084a, 0x103c, "(null)" }, + { 0x0DD8, 0x0914, 0x103c, "(null)" }, + { 0x0DD8, 0x084a, 0x10de, "(null)" }, + { 0x0DD8, 0x0914, 0x10de, "(null)" }, + { 0x0DDA, 0x04a3, 0x1028, "(null)" }, + { 0x0DDA, 0x14a3, 0x1028, "(null)" }, + { 0x0DDA, 0x1631, 0x103c, "(null)" }, + { 0x0DDA, 0x21cf, 0x17aa, "(null)" }, + { 0x0DDA, 0x21d1, 0x17aa, "(null)" }, + { 0x0DE0, 0xc000, 0x1458, "(null)" }, + { 0x0DE0, 0xaa73, 0x1462, "(null)" }, + { 0x0DE1, 0x836d, 0x1043, "(null)" }, + { 0x0DE1, 0xc000, 0x1458, "(null)" }, + { 0x0DE1, 0x2302, 0x1462, "(null)" }, + { 0x0DE1, 0x2303, 0x1462, "(null)" }, + { 0x0DE1, 0x2305, 0x1462, "(null)" }, + { 0x0DE1, 0x3a26, 0x1642, "(null)" }, + { 0x0DE1, 0x3162, 0x19da, "(null)" }, + { 0x0DE2, 0x1004, 0x1019, "(null)" }, + { 0x0DE2, 0x835f, 0x1043, "(null)" }, + { 0x0DE2, 0x2301, 0x1462, "(null)" }, + { 0x0DE2, 0x2302, 0x1462, "(null)" }, + { 0x0DE2, 0x3a26, 0x1642, "(null)" }, + { 0x0DE2, 0x1162, 0x174b, "(null)" }, + { 0x0DE2, 0x2162, 0x174b, "(null)" }, + { 0x0DE2, 0x9083, 0x1b0a, "(null)" }, + { 0x0DE3, 0x9995, 0x1019, "(null)" }, + { 0x0DE3, 0x181b, 0x103c, "(null)" }, + { 0x0DE3, 0x181d, 0x103c, "(null)" }, + { 0x0DE3, 0x189b, 0x103c, "(null)" }, + { 0x0DE3, 0x100d, 0x1043, "(null)" }, + { 0x0DE3, 0x10ac, 0x1043, "(null)" }, + { 0x0DE3, 0x10bc, 0x1043, "(null)" }, + { 0x0DE3, 0x10cc, 0x1043, "(null)" }, + { 0x0DE3, 0x112d, 0x1043, "(null)" }, + { 0x0DE3, 0x1447, 0x1043, "(null)" }, + { 0x0DE3, 0x1477, 0x1043, "(null)" }, + { 0x0DE3, 0x1547, 0x1043, "(null)" }, + { 0x0DE3, 0x1587, 0x1043, "(null)" }, + { 0x0DE3, 0x2137, 0x1043, "(null)" }, + { 0x0DE3, 0x2139, 0x1043, "(null)" }, + { 0x0DE3, 0x21da, 0x1043, "(null)" }, + { 0x0DE3, 0x10b8, 0x1462, "(null)" }, + { 0x0DE3, 0x0950, 0x152d, "(null)" }, + { 0x0DE3, 0x0989, 0x152d, "(null)" }, + { 0x0DE3, 0x0992, 0x152d, "(null)" }, + { 0x0DE3, 0x1006, 0x152d, "(null)" }, + { 0x0DE3, 0x1022, 0x152d, "(null)" }, + { 0x0DE3, 0x2706, 0x1558, "(null)" }, + { 0x0DE3, 0x3901, 0x17aa, "(null)" }, + { 0x0DE3, 0x3902, 0x17aa, "(null)" }, + { 0x0DE3, 0x3904, 0x17aa, "(null)" }, + { 0x0DE3, 0x5001, 0x17aa, "(null)" }, + { 0x0DE3, 0x5003, 0x17aa, "(null)" }, + { 0x0DE3, 0x5007, 0x17aa, "(null)" }, + { 0x0DE3, 0x5012, 0x17aa, "(null)" }, + { 0x0DE3, 0x20dc, 0x1b0a, "(null)" }, + { 0x0DE3, 0x20dd, 0x1b0a, "(null)" }, + { 0x0DE3, 0x20df, 0x1b0a, "(null)" }, + { 0x0DE3, 0x222a, 0x1b0a, "(null)" }, + { 0x0DE5, 0x839a, 0x1043, "(null)" }, + { 0x0DE5, 0x839b, 0x1043, "(null)" }, + { 0x0DE5, 0x2300, 0x1462, "(null)" }, + { 0x0DE5, 0x2309, 0x1462, "(null)" }, + { 0x0DE5, 0x230a, 0x1462, "(null)" }, + { 0x0DE5, 0x230c, 0x1462, "(null)" }, + { 0x0DE5, 0x230d, 0x1462, "(null)" }, + { 0x0DE5, 0x3a26, 0x1642, "(null)" }, + { 0x0DE5, 0x7162, 0x174b, "(null)" }, + { 0x0DE5, 0x90a1, 0x1b0a, "(null)" }, + { 0x0DE5, 0x90a3, 0x1b0a, "(null)" }, + { 0x0DE8, 0x9097, 0x104d, "(null)" }, + { 0x0DE8, 0x175c, 0x10cf, "(null)" }, + { 0x0DE8, 0x1763, 0x10cf, "(null)" }, + { 0x0DE8, 0x1765, 0x10cf, "(null)" }, + { 0x0DE8, 0x1767, 0x10cf, "(null)" }, + { 0x0DE8, 0x1769, 0x10cf, "(null)" }, + { 0x0DE8, 0xc652, 0x144d, "(null)" }, + { 0x0DE8, 0x006a, 0x14c0, "(null)" }, + { 0x0DE8, 0x006b, 0x14c0, "(null)" }, + { 0x0DE8, 0x006d, 0x14c0, "(null)" }, + { 0x0DE8, 0x0924, 0x152d, "(null)" }, + { 0x0DE8, 0x0970, 0x152d, "(null)" }, + { 0x0DE8, 0x0973, 0x152d, "(null)" }, + { 0x0DE8, 0x0975, 0x152d, "(null)" }, + { 0x0DE8, 0x0977, 0x152d, "(null)" }, + { 0x0DE8, 0x21fc, 0x17aa, "(null)" }, + { 0x0DE9, 0x22db, 0x1019, "(null)" }, + { 0x0DE9, 0x9995, 0x1019, "(null)" }, + { 0x0DE9, 0x999d, 0x1019, "(null)" }, + { 0x0DE9, 0x0487, 0x1025, "(null)" }, + { 0x0DE9, 0x0488, 0x1025, "(null)" }, + { 0x0DE9, 0x0505, 0x1025, "(null)" }, + { 0x0DE9, 0x0507, 0x1025, "(null)" }, + { 0x0DE9, 0x0512, 0x1025, "(null)" }, + { 0x0DE9, 0x0573, 0x1025, "(null)" }, + { 0x0DE9, 0x0574, 0x1025, "(null)" }, + { 0x0DE9, 0x0575, 0x1025, "(null)" }, + { 0x0DE9, 0x0646, 0x1025, "(null)" }, + { 0x0DE9, 0x0648, 0x1025, "(null)" }, + { 0x0DE9, 0x064a, 0x1025, "(null)" }, + { 0x0DE9, 0x064c, 0x1025, "(null)" }, + { 0x0DE9, 0x0679, 0x1025, "(null)" }, + { 0x0DE9, 0x067a, 0x1025, "(null)" }, + { 0x0DE9, 0x0680, 0x1025, "(null)" }, + { 0x0DE9, 0x0686, 0x1025, "(null)" }, + { 0x0DE9, 0x0689, 0x1025, "(null)" }, + { 0x0DE9, 0x068b, 0x1025, "(null)" }, + { 0x0DE9, 0x068d, 0x1025, "(null)" }, + { 0x0DE9, 0x068f, 0x1025, "(null)" }, + { 0x0DE9, 0x0692, 0x1025, "(null)" }, + { 0x0DE9, 0x069b, 0x1025, "(null)" }, + { 0x0DE9, 0x069e, 0x1025, "(null)" }, + { 0x0DE9, 0x0702, 0x1025, "(null)" }, + { 0x0DE9, 0x0719, 0x1025, "(null)" }, + { 0x0DE9, 0x0721, 0x1025, "(null)" }, + { 0x0DE9, 0x0722, 0x1025, "(null)" }, + { 0x0DE9, 0x0723, 0x1025, "(null)" }, + { 0x0DE9, 0x0725, 0x1025, "(null)" }, + { 0x0DE9, 0x0728, 0x1025, "(null)" }, + { 0x0DE9, 0x072b, 0x1025, "(null)" }, + { 0x0DE9, 0x072e, 0x1025, "(null)" }, + { 0x0DE9, 0x0732, 0x1025, "(null)" }, + { 0x0DE9, 0x073f, 0x1025, "(null)" }, + { 0x0DE9, 0x0753, 0x1025, "(null)" }, + { 0x0DE9, 0x0754, 0x1025, "(null)" }, + { 0x0DE9, 0x055e, 0x1028, "(null)" }, + { 0x0DE9, 0x0563, 0x1028, "(null)" }, + { 0x0DE9, 0x181a, 0x103c, "(null)" }, + { 0x0DE9, 0x181b, 0x103c, "(null)" }, + { 0x0DE9, 0x181d, 0x103c, "(null)" }, + { 0x0DE9, 0x1837, 0x103c, "(null)" }, + { 0x0DE9, 0x100d, 0x1043, "(null)" }, + { 0x0DE9, 0x10ac, 0x1043, "(null)" }, + { 0x0DE9, 0x10bc, 0x1043, "(null)" }, + { 0x0DE9, 0x10cc, 0x1043, "(null)" }, + { 0x0DE9, 0x1447, 0x1043, "(null)" }, + { 0x0DE9, 0x1477, 0x1043, "(null)" }, + { 0x0DE9, 0x1497, 0x1043, "(null)" }, + { 0x0DE9, 0x1547, 0x1043, "(null)" }, + { 0x0DE9, 0x1587, 0x1043, "(null)" }, + { 0x0DE9, 0x2104, 0x1043, "(null)" }, + { 0x0DE9, 0x2106, 0x1043, "(null)" }, + { 0x0DE9, 0x2110, 0x1043, "(null)" }, + { 0x0DE9, 0x2113, 0x1043, "(null)" }, + { 0x0DE9, 0x2114, 0x1043, "(null)" }, + { 0x0DE9, 0x2128, 0x1043, "(null)" }, + { 0x0DE9, 0x212e, 0x1043, "(null)" }, + { 0x0DE9, 0x212f, 0x1043, "(null)" }, + { 0x0DE9, 0x2131, 0x1043, "(null)" }, + { 0x0DE9, 0x2137, 0x1043, "(null)" }, + { 0x0DE9, 0x8518, 0x1043, "(null)" }, + { 0x0DE9, 0x8523, 0x1043, "(null)" }, + { 0x0DE9, 0xfb01, 0x1179, "(null)" }, + { 0x0DE9, 0xfb11, 0x1179, "(null)" }, + { 0x0DE9, 0xfb12, 0x1179, "(null)" }, + { 0x0DE9, 0xfb62, 0x1179, "(null)" }, + { 0x0DE9, 0xfb69, 0x1179, "(null)" }, + { 0x0DE9, 0xfb6d, 0x1179, "(null)" }, + { 0x0DE9, 0xfb70, 0x1179, "(null)" }, + { 0x0DE9, 0xc0d1, 0x144d, "(null)" }, + { 0x0DE9, 0xc634, 0x144d, "(null)" }, + { 0x0DE9, 0xc650, 0x144d, "(null)" }, + { 0x0DE9, 0xc652, 0x144d, "(null)" }, + { 0x0DE9, 0x109c, 0x1462, "(null)" }, + { 0x0DE9, 0x10a4, 0x1462, "(null)" }, + { 0x0DE9, 0x10aa, 0x1462, "(null)" }, + { 0x0DE9, 0x10b8, 0x1462, "(null)" }, + { 0x0DE9, 0x10ba, 0x1462, "(null)" }, + { 0x0DE9, 0x10ca, 0x1462, "(null)" }, + { 0x0DE9, 0xa962, 0x1462, "(null)" }, + { 0x0DE9, 0xaa32, 0x1462, "(null)" }, + { 0x0DE9, 0xaa59, 0x1462, "(null)" }, + { 0x0DE9, 0xaa73, 0x1462, "(null)" }, + { 0x0DE9, 0xac77, 0x1462, "(null)" }, + { 0x0DE9, 0xac7c, 0x1462, "(null)" }, + { 0x0DE9, 0xac91, 0x1462, "(null)" }, + { 0x0DE9, 0xad31, 0x1462, "(null)" }, + { 0x0DE9, 0xae32, 0x1462, "(null)" }, + { 0x0DE9, 0xae72, 0x1462, "(null)" }, + { 0x0DE9, 0xaf11, 0x1462, "(null)" }, + { 0x0DE9, 0xaf13, 0x1462, "(null)" }, + { 0x0DE9, 0x0065, 0x14c0, "(null)" }, + { 0x0DE9, 0x0066, 0x14c0, "(null)" }, + { 0x0DE9, 0x0969, 0x152d, "(null)" }, + { 0x0DE9, 0x0972, 0x152d, "(null)" }, + { 0x0DE9, 0x0974, 0x152d, "(null)" }, + { 0x0DE9, 0x0976, 0x152d, "(null)" }, + { 0x0DE9, 0x2702, 0x1558, "(null)" }, + { 0x0DE9, 0x2703, 0x1558, "(null)" }, + { 0x0DE9, 0x362b, 0x17aa, "(null)" }, + { 0x0DE9, 0x3901, 0x17aa, "(null)" }, + { 0x0DE9, 0x3902, 0x17aa, "(null)" }, + { 0x0DE9, 0x3903, 0x17aa, "(null)" }, + { 0x0DE9, 0x3904, 0x17aa, "(null)" }, + { 0x0DE9, 0x3977, 0x17aa, "(null)" }, + { 0x0DE9, 0x397d, 0x17aa, "(null)" }, + { 0x0DE9, 0x397f, 0x17aa, "(null)" }, + { 0x0DE9, 0x3983, 0x17aa, "(null)" }, + { 0x0DE9, 0x5001, 0x17aa, "(null)" }, + { 0x0DE9, 0x5003, 0x17aa, "(null)" }, + { 0x0DE9, 0x5007, 0x17aa, "(null)" }, + { 0x0DE9, 0x500f, 0x17aa, "(null)" }, + { 0x0DE9, 0x5012, 0x17aa, "(null)" }, + { 0x0DE9, 0x10e7, 0x17c0, "(null)" }, + { 0x0DE9, 0x10f5, 0x17c0, "(null)" }, + { 0x0DE9, 0x3012, 0x1854, "(null)" }, + { 0x0DE9, 0x3014, 0x1854, "(null)" }, + { 0x0DE9, 0x5584, 0x1991, "(null)" }, + { 0x0DE9, 0x20c6, 0x1b0a, "(null)" }, + { 0x0DE9, 0x20c8, 0x1b0a, "(null)" }, + { 0x0DE9, 0x20dc, 0x1b0a, "(null)" }, + { 0x0DE9, 0x20dd, 0x1b0a, "(null)" }, + { 0x0DE9, 0x20df, 0x1b0a, "(null)" }, + { 0x0DE9, 0x20e4, 0x1b0a, "(null)" }, + { 0x0DE9, 0x20ec, 0x1b0a, "(null)" }, + { 0x0DE9, 0x2210, 0x1b0a, "(null)" }, + { 0x0DE9, 0x2228, 0x1b0a, "(null)" }, + { 0x0DE9, 0x2229, 0x1b0a, "(null)" }, + { 0x0DE9, 0x222a, 0x1b0a, "(null)" }, + { 0x0DE9, 0x90b8, 0x1b0a, "(null)" }, + { 0x0DEA, 0x22db, 0x1019, "(null)" }, + { 0x0DEA, 0x0488, 0x1025, "(null)" }, + { 0x0DEA, 0x0505, 0x1025, "(null)" }, + { 0x0DEA, 0x0507, 0x1025, "(null)" }, + { 0x0DEA, 0x0509, 0x1025, "(null)" }, + { 0x0DEA, 0x0512, 0x1025, "(null)" }, + { 0x0DEA, 0x055a, 0x1025, "(null)" }, + { 0x0DEA, 0x0611, 0x1025, "(null)" }, + { 0x0DEA, 0x0d88, 0x105b, "(null)" }, + { 0x0DEA, 0x0d9a, 0x105b, "(null)" }, + { 0x0DEA, 0xff00, 0x105b, "(null)" }, + { 0x0DEA, 0x0925, 0x152d, "(null)" }, + { 0x0DEA, 0x0932, 0x152d, "(null)" }, + { 0x0DEA, 0x0938, 0x152d, "(null)" }, + { 0x0DEA, 0x0945, 0x152d, "(null)" }, + { 0x0DEA, 0x0948, 0x152d, "(null)" }, + { 0x0DEA, 0x0954, 0x152d, "(null)" }, + { 0x0DEA, 0x0956, 0x152d, "(null)" }, + { 0x0DEA, 0x0962, 0x152d, "(null)" }, + { 0x0DEA, 0x0965, 0x152d, "(null)" }, + { 0x0DEA, 0x2512, 0x1558, "(null)" }, + { 0x0DEA, 0x365a, 0x17aa, "(null)" }, + { 0x0DEA, 0x365b, 0x17aa, "(null)" }, + { 0x0DEA, 0x365e, 0x17aa, "(null)" }, + { 0x0DEA, 0x3660, 0x17aa, "(null)" }, + { 0x0DEA, 0x366c, 0x17aa, "(null)" }, + { 0x0DEA, 0x20df, 0x1b0a, "(null)" }, + { 0x0DEA, 0x0011, 0x1bfd, "(null)" }, + { 0x0DEB, 0x3620, 0x17aa, "(null)" }, + { 0x0DEB, 0x3980, 0x17aa, "(null)" }, + { 0x0DEB, 0x3981, 0x17aa, "(null)" }, + { 0x0DEC, 0x4011, 0x1071, "(null)" }, + { 0x0DEC, 0xfc50, 0x1179, "(null)" }, + { 0x0DEC, 0xfc61, 0x1179, "(null)" }, + { 0x0DEC, 0xfc71, 0x1179, "(null)" }, + { 0x0DEC, 0xfc81, 0x1179, "(null)" }, + { 0x0DEC, 0xfcd0, 0x1179, "(null)" }, + { 0x0DEC, 0xfd16, 0x1179, "(null)" }, + { 0x0DEC, 0xfd31, 0x1179, "(null)" }, + { 0x0DEC, 0xfd50, 0x1179, "(null)" }, + { 0x0DEC, 0xfd61, 0x1179, "(null)" }, + { 0x0DEC, 0xfd71, 0x1179, "(null)" }, + { 0x0DEC, 0xfdd0, 0x1179, "(null)" }, + { 0x0DEC, 0xc0a0, 0x144d, "(null)" }, + { 0x0DEC, 0xc0a5, 0x144d, "(null)" }, + { 0x0DEC, 0xc0ae, 0x144d, "(null)" }, + { 0x0DEC, 0xc0b7, 0x144d, "(null)" }, + { 0x0DEC, 0xc0c1, 0x144d, "(null)" }, + { 0x0DED, 0x105c, 0x1043, "(null)" }, + { 0x0DED, 0x0dbd, 0x105b, "(null)" }, + { 0x0DED, 0xfcd0, 0x1179, "(null)" }, + { 0x0DED, 0xfd52, 0x1179, "(null)" }, + { 0x0DED, 0xfdd2, 0x1179, "(null)" }, + { 0x0DED, 0x109c, 0x1462, "(null)" }, + { 0x0DED, 0x10aa, 0x1462, "(null)" }, + { 0x0DED, 0x0056, 0x14c0, "(null)" }, + { 0x0DED, 0x0059, 0x14c0, "(null)" }, + { 0x0DED, 0x005a, 0x14c0, "(null)" }, + { 0x0DED, 0x0873, 0x152d, "(null)" }, + { 0x0DED, 0x0878, 0x152d, "(null)" }, + { 0x0DED, 0x0880, 0x152d, "(null)" }, + { 0x0DED, 0x2500, 0x1558, "(null)" }, + { 0x0DED, 0x5584, 0x1991, "(null)" }, + { 0x0DED, 0x5686, 0x1991, "(null)" }, + { 0x0DED, 0x209f, 0x1b0a, "(null)" }, + { 0x0DED, 0x2002, 0x1bab, "(null)" }, + { 0x0DEE, 0x0370, 0x1025, "(null)" }, + { 0x0DEE, 0x0371, 0x1025, "(null)" }, + { 0x0DEE, 0x0374, 0x1025, "(null)" }, + { 0x0DEE, 0x0375, 0x1025, "(null)" }, + { 0x0DEE, 0x037c, 0x1025, "(null)" }, + { 0x0DEE, 0x037d, 0x1025, "(null)" }, + { 0x0DEE, 0x0417, 0x1025, "(null)" }, + { 0x0DEE, 0x0423, 0x1025, "(null)" }, + { 0x0DEE, 0x055a, 0x1025, "(null)" }, + { 0x0DEE, 0x1552, 0x1043, "(null)" }, + { 0x0DEE, 0x1562, 0x1043, "(null)" }, + { 0x0DEE, 0x1582, 0x1043, "(null)" }, + { 0x0DEE, 0x15a2, 0x1043, "(null)" }, + { 0x0DEF, 0x21f3, 0x17aa, "(null)" }, + { 0x0DEF, 0x21f4, 0x17aa, "(null)" }, + { 0x0DEF, 0x21f5, 0x17aa, "(null)" }, + { 0x0DEF, 0x21f6, 0x17aa, "(null)" }, + { 0x0DEF, 0x21f8, 0x17aa, "(null)" }, + { 0x0DEF, 0x362b, 0x17aa, "(null)" }, + { 0x0DEF, 0x365b, 0x17aa, "(null)" }, + { 0x0DEF, 0x5005, 0x17aa, "(null)" }, + { 0x0DF0, 0x0463, 0x1025, "(null)" }, + { 0x0DF0, 0x0468, 0x1028, "(null)" }, + { 0x0DF0, 0x1482, 0x1043, "(null)" }, + { 0x0DF0, 0x1502, 0x1043, "(null)" }, + { 0x0DF0, 0x1512, 0x1043, "(null)" }, + { 0x0DF0, 0x1522, 0x1043, "(null)" }, + { 0x0DF0, 0x1532, 0x1043, "(null)" }, + { 0x0DF0, 0x1552, 0x1043, "(null)" }, + { 0x0DF0, 0x1582, 0x1043, "(null)" }, + { 0x0DF0, 0x15f2, 0x1043, "(null)" }, + { 0x0DF0, 0x907a, 0x104d, "(null)" }, + { 0x0DF0, 0x907e, 0x104d, "(null)" }, + { 0x0DF0, 0x9280, 0x1071, "(null)" }, + { 0x0DF0, 0x9081, 0x109f, "(null)" }, + { 0x0DF0, 0xc08e, 0x144d, "(null)" }, + { 0x0DF0, 0x1075, 0x1462, "(null)" }, + { 0x0DF0, 0x1082, 0x1462, "(null)" }, + { 0x0DF0, 0x0053, 0x14c0, "(null)" }, + { 0x0DF0, 0x0852, 0x152d, "(null)" }, + { 0x0DF0, 0x0853, 0x152d, "(null)" }, + { 0x0DF0, 0x0855, 0x152d, "(null)" }, + { 0x0DF0, 0x5130, 0x1558, "(null)" }, + { 0x0DF0, 0x7130, 0x1558, "(null)" }, + { 0x0DF0, 0x396c, 0x17aa, "(null)" }, + { 0x0DF0, 0x10e2, 0x17c0, "(null)" }, + { 0x0DF0, 0x0844, 0x1854, "(null)" }, + { 0x0DF0, 0x207c, 0x1b0a, "(null)" }, + { 0x0DF0, 0x9077, 0x1b0a, "(null)" }, + { 0x0DF0, 0x907c, 0x1b0a, "(null)" }, + { 0x0DF0, 0x909a, 0x1b0a, "(null)" }, + { 0x0DF0, 0x5609, 0x1b50, "(null)" }, + { 0x0DF0, 0xa01b, 0x1bcf, "(null)" }, + { 0x0DF1, 0x035a, 0x1025, "(null)" }, + { 0x0DF1, 0x036c, 0x1025, "(null)" }, + { 0x0DF1, 0x036d, 0x1025, "(null)" }, + { 0x0DF1, 0x0370, 0x1025, "(null)" }, + { 0x0DF1, 0x0371, 0x1025, "(null)" }, + { 0x0DF1, 0x0374, 0x1025, "(null)" }, + { 0x0DF1, 0x0375, 0x1025, "(null)" }, + { 0x0DF1, 0x037c, 0x1025, "(null)" }, + { 0x0DF1, 0x037d, 0x1025, "(null)" }, + { 0x0DF1, 0x0417, 0x1025, "(null)" }, + { 0x0DF1, 0x041e, 0x1025, "(null)" }, + { 0x0DF1, 0x0423, 0x1025, "(null)" }, + { 0x0DF1, 0x0434, 0x1025, "(null)" }, + { 0x0DF1, 0x0464, 0x1025, "(null)" }, + { 0x0DF1, 0x0487, 0x1025, "(null)" }, + { 0x0DF1, 0x0488, 0x1025, "(null)" }, + { 0x0DF1, 0x0499, 0x1025, "(null)" }, + { 0x0DF1, 0x049a, 0x1025, "(null)" }, + { 0x0DF1, 0x0468, 0x1028, "(null)" }, + { 0x0DF1, 0x046e, 0x1028, "(null)" }, + { 0x0DF1, 0xc08e, 0x144d, "(null)" }, + { 0x0DF1, 0x0853, 0x152d, "(null)" }, + { 0x0DF1, 0x2036, 0x1b0a, "(null)" }, + { 0x0DF1, 0x207a, 0x1b0a, "(null)" }, + { 0x0DF1, 0x2003, 0x1bfd, "(null)" }, + { 0x0DF2, 0x046e, 0x1028, "(null)" }, + { 0x0DF2, 0x15b2, 0x1043, "(null)" }, + { 0x0DF2, 0x15c2, 0x1043, "(null)" }, + { 0x0DF2, 0x1525, 0x1458, "(null)" }, + { 0x0DF2, 0x1526, 0x1458, "(null)" }, + { 0x0DF2, 0x5162, 0x174b, "(null)" }, + { 0x0DF2, 0xa00a, 0x1bcf, "(null)" }, + { 0x0DF2, 0xa022, 0x1bcf, "(null)" }, + { 0x0DF3, 0xc08b, 0x144d, "(null)" }, + { 0x0DF4, 0x036c, 0x1025, "(null)" }, + { 0x0DF4, 0x036d, 0x1025, "(null)" }, + { 0x0DF4, 0x0371, 0x1025, "(null)" }, + { 0x0DF4, 0x037d, 0x1025, "(null)" }, + { 0x0DF4, 0x041e, 0x1025, "(null)" }, + { 0x0DF4, 0x0434, 0x1025, "(null)" }, + { 0x0DF4, 0x0487, 0x1025, "(null)" }, + { 0x0DF4, 0x0488, 0x1025, "(null)" }, + { 0x0DF4, 0x0500, 0x1025, "(null)" }, + { 0x0DF4, 0x0501, 0x1025, "(null)" }, + { 0x0DF4, 0x0502, 0x1025, "(null)" }, + { 0x0DF4, 0x0503, 0x1025, "(null)" }, + { 0x0DF4, 0x0504, 0x1025, "(null)" }, + { 0x0DF4, 0x0505, 0x1025, "(null)" }, + { 0x0DF4, 0x0506, 0x1025, "(null)" }, + { 0x0DF4, 0x0507, 0x1025, "(null)" }, + { 0x0DF4, 0x0508, 0x1025, "(null)" }, + { 0x0DF4, 0x0509, 0x1025, "(null)" }, + { 0x0DF4, 0x0511, 0x1025, "(null)" }, + { 0x0DF4, 0x0512, 0x1025, "(null)" }, + { 0x0DF4, 0x054e, 0x1025, "(null)" }, + { 0x0DF4, 0x0550, 0x1025, "(null)" }, + { 0x0DF4, 0x055a, 0x1025, "(null)" }, + { 0x0DF4, 0x055c, 0x1025, "(null)" }, + { 0x0DF4, 0x0564, 0x1025, "(null)" }, + { 0x0DF4, 0x0565, 0x1025, "(null)" }, + { 0x0DF4, 0x0566, 0x1025, "(null)" }, + { 0x0DF4, 0x0568, 0x1025, "(null)" }, + { 0x0DF4, 0x056b, 0x1025, "(null)" }, + { 0x0DF4, 0x0570, 0x1025, "(null)" }, + { 0x0DF4, 0x0572, 0x1025, "(null)" }, + { 0x0DF4, 0x0573, 0x1025, "(null)" }, + { 0x0DF4, 0x0574, 0x1025, "(null)" }, + { 0x0DF4, 0x0575, 0x1025, "(null)" }, + { 0x0DF4, 0x0576, 0x1025, "(null)" }, + { 0x0DF4, 0x0578, 0x1025, "(null)" }, + { 0x0DF4, 0x0579, 0x1025, "(null)" }, + { 0x0DF4, 0x057a, 0x1025, "(null)" }, + { 0x0DF4, 0x057b, 0x1025, "(null)" }, + { 0x0DF4, 0x0580, 0x1025, "(null)" }, + { 0x0DF4, 0x0581, 0x1025, "(null)" }, + { 0x0DF4, 0x058b, 0x1025, "(null)" }, + { 0x0DF4, 0x058c, 0x1025, "(null)" }, + { 0x0DF4, 0x04c8, 0x1028, "(null)" }, + { 0x0DF4, 0x050e, 0x1028, "(null)" }, + { 0x0DF4, 0x105c, 0x1043, "(null)" }, + { 0x0DF4, 0x15e2, 0x1043, "(null)" }, + { 0x0DF4, 0x15f2, 0x1043, "(null)" }, + { 0x0DF4, 0x1642, 0x1043, "(null)" }, + { 0x0DF4, 0x1662, 0x1043, "(null)" }, + { 0x0DF4, 0x1672, 0x1043, "(null)" }, + { 0x0DF4, 0x849e, 0x1043, "(null)" }, + { 0x0DF4, 0x84ee, 0x1043, "(null)" }, + { 0x0DF4, 0x907e, 0x104d, "(null)" }, + { 0x0DF4, 0x9086, 0x104d, "(null)" }, + { 0x0DF4, 0x9089, 0x104d, "(null)" }, + { 0x0DF4, 0x908e, 0x104d, "(null)" }, + { 0x0DF4, 0x0001, 0x1179, "(null)" }, + { 0x0DF4, 0x0010, 0x1179, "(null)" }, + { 0x0DF4, 0xfc00, 0x1179, "(null)" }, + { 0x0DF4, 0xfc01, 0x1179, "(null)" }, + { 0x0DF4, 0xfc05, 0x1179, "(null)" }, + { 0x0DF4, 0xfc2a, 0x1179, "(null)" }, + { 0x0DF4, 0xfc31, 0x1179, "(null)" }, + { 0x0DF4, 0xfc35, 0x1179, "(null)" }, + { 0x0DF4, 0xfc50, 0x1179, "(null)" }, + { 0x0DF4, 0xfcd0, 0x1179, "(null)" }, + { 0x0DF4, 0xc0a5, 0x144d, "(null)" }, + { 0x0DF4, 0xc0c0, 0x144d, "(null)" }, + { 0x0DF4, 0xc0c1, 0x144d, "(null)" }, + { 0x0DF4, 0x108d, 0x1462, "(null)" }, + { 0x0DF4, 0x109c, 0x1462, "(null)" }, + { 0x0DF4, 0x10a4, 0x1462, "(null)" }, + { 0x0DF4, 0x10aa, 0x1462, "(null)" }, + { 0x0DF4, 0xaa32, 0x1462, "(null)" }, + { 0x0DF4, 0xaa52, 0x1462, "(null)" }, + { 0x0DF4, 0xaa59, 0x1462, "(null)" }, + { 0x0DF4, 0xac71, 0x1462, "(null)" }, + { 0x0DF4, 0xac72, 0x1462, "(null)" }, + { 0x0DF4, 0xac75, 0x1462, "(null)" }, + { 0x0DF4, 0xae32, 0x1462, "(null)" }, + { 0x0DF4, 0xae33, 0x1462, "(null)" }, + { 0x0DF4, 0x0059, 0x14c0, "(null)" }, + { 0x0DF4, 0x0873, 0x152d, "(null)" }, + { 0x0DF4, 0x0877, 0x152d, "(null)" }, + { 0x0DF4, 0x0952, 0x152d, "(null)" }, + { 0x0DF4, 0x0953, 0x152d, "(null)" }, + { 0x0DF4, 0x2550, 0x1558, "(null)" }, + { 0x0DF4, 0x5140, 0x1558, "(null)" }, + { 0x0DF4, 0x7140, 0x1558, "(null)" }, + { 0x0DF4, 0x397d, 0x17aa, "(null)" }, + { 0x0DF4, 0x397f, 0x17aa, "(null)" }, + { 0x0DF4, 0x3981, 0x17aa, "(null)" }, + { 0x0DF4, 0x10e2, 0x17c0, "(null)" }, + { 0x0DF4, 0x10e5, 0x17c0, "(null)" }, + { 0x0DF4, 0x10e7, 0x17c0, "(null)" }, + { 0x0DF4, 0x10e8, 0x17c0, "(null)" }, + { 0x0DF4, 0x10ec, 0x17c0, "(null)" }, + { 0x0DF4, 0x10ed, 0x17c0, "(null)" }, + { 0x0DF4, 0x0df4, 0x1849, "(null)" }, + { 0x0DF4, 0x0863, 0x1854, "(null)" }, + { 0x0DF4, 0x3012, 0x1854, "(null)" }, + { 0x0DF4, 0x5687, 0x1991, "(null)" }, + { 0x0DF4, 0x208f, 0x1b0a, "(null)" }, + { 0x0DF4, 0x209d, 0x1b0a, "(null)" }, + { 0x0DF4, 0x90ad, 0x1b0a, "(null)" }, + { 0x0DF4, 0x2005, 0x1b61, "(null)" }, + { 0x0DF4, 0x9020, 0x1c06, "(null)" }, + { 0x0DF5, 0x0446, 0x1028, "(null)" }, + { 0x0DF5, 0x04b6, 0x1028, "(null)" }, + { 0x0DF5, 0x04c4, 0x1028, "(null)" }, + { 0x0DF5, 0x04c6, 0x1028, "(null)" }, + { 0x0DF5, 0x04ca, 0x1028, "(null)" }, + { 0x0DF5, 0x0511, 0x1028, "(null)" }, + { 0x0DF5, 0x0512, 0x1028, "(null)" }, + { 0x0DF5, 0x1649, 0x10cf, "(null)" }, + { 0x0DF5, 0x164a, 0x10cf, "(null)" }, + { 0x0DF5, 0xc0ae, 0x144d, "(null)" }, + { 0x0DF5, 0x0059, 0x14c0, "(null)" }, + { 0x0DF5, 0x005a, 0x14c0, "(null)" }, + { 0x0DF5, 0x0062, 0x14c0, "(null)" }, + { 0x0DF5, 0x0067, 0x14c0, "(null)" }, + { 0x0DF5, 0x0068, 0x14c0, "(null)" }, + { 0x0DF5, 0x397d, 0x17aa, "(null)" }, + { 0x0DF5, 0x397f, 0x17aa, "(null)" }, + { 0x0DF5, 0x10e5, 0x17c0, "(null)" }, + { 0x0DF5, 0x5584, 0x1991, "(null)" }, + { 0x0DF5, 0x5685, 0x1991, "(null)" }, + { 0x0DF5, 0x5686, 0x1991, "(null)" }, + { 0x0DF5, 0x00e6, 0x1b0a, "(null)" }, + { 0x0DF5, 0x209c, 0x1b0a, "(null)" }, + { 0x0DF5, 0x2002, 0x1bab, "(null)" }, + { 0x0DF5, 0x2004, 0x1bfd, "(null)" }, + { 0x0DF5, 0x8001, 0x1bfd, "(null)" }, + { 0x0DF5, 0x8003, 0x1bfd, "(null)" }, + { 0x0DF5, 0x9020, 0x1c06, "(null)" }, + { 0x0DF6, 0x1712, 0x1043, "(null)" }, + { 0x0DF6, 0x2525, 0x1458, "(null)" }, + { 0x0DF6, 0x2532, 0x1458, "(null)" }, + { 0x0DF6, 0x0059, 0x14c0, "(null)" }, + { 0x0DF6, 0x005e, 0x14c0, "(null)" }, + { 0x0DF6, 0x3981, 0x17aa, "(null)" }, + { 0x0DF6, 0x20a5, 0x1b0a, "(null)" }, + { 0x0DF6, 0x2002, 0x1bab, "(null)" }, + { 0x0DF7, 0x0488, 0x1025, "(null)" }, + { 0x0DF7, 0x0505, 0x1025, "(null)" }, + { 0x0DF7, 0x0507, 0x1025, "(null)" }, + { 0x0DF7, 0x0509, 0x1025, "(null)" }, + { 0x0DF7, 0x0512, 0x1025, "(null)" }, + { 0x0DF7, 0x053a, 0x1025, "(null)" }, + { 0x0DF7, 0x055a, 0x1025, "(null)" }, + { 0x0DF7, 0x060d, 0x1025, "(null)" }, + { 0x0DF7, 0x0611, 0x1025, "(null)" }, + { 0x0DF7, 0x108c, 0x1462, "(null)" }, + { 0x0DF7, 0x1094, 0x1462, "(null)" }, + { 0x0DF7, 0x005a, 0x14c0, "(null)" }, + { 0x0DF7, 0x397d, 0x17aa, "(null)" }, + { 0x0DF7, 0x397f, 0x17aa, "(null)" }, + { 0x0DF8, 0x0835, 0x103c, "(null)" }, + { 0x0DF8, 0x0835, 0x10de, "(null)" }, + { 0x0DF9, 0x092b, 0x103c, "(null)" }, + { 0x0DFA, 0x04a3, 0x1028, "(null)" }, + { 0x0DFA, 0x14a3, 0x1028, "(null)" }, + { 0x0DFA, 0x0854, 0x103c, "(null)" }, + { 0x0DFA, 0x1631, 0x103c, "(null)" }, + { 0x0DFA, 0x1632, 0x10cf, "(null)" }, + { 0x0DFA, 0x21cf, 0x17aa, "(null)" }, + { 0x0DFA, 0x21d1, 0x17aa, "(null)" }, + { 0x0DFC, 0x0534, 0x1028, "(null)" }, + { 0x0DFC, 0x0535, 0x1028, "(null)" }, + { 0x0DFC, 0x1534, 0x1028, "(null)" }, + { 0x0DFC, 0x1535, 0x1028, "(null)" }, + { 0x0E22, 0x835d, 0x1043, "(null)" }, + { 0x0E22, 0x8386, 0x1043, "(null)" }, + { 0x0E22, 0xc000, 0x1458, "(null)" }, + { 0x0E23, 0xc000, 0x1458, "(null)" }, + { 0x0E24, 0x1006, 0x1019, "(null)" }, + { 0x0E24, 0x8361, 0x1043, "(null)" }, + { 0x0E24, 0x083d, 0x10de, "(null)" }, + { 0x0E24, 0x2320, 0x1462, "(null)" }, + { 0x0E24, 0x2324, 0x1462, "(null)" }, + { 0x0E24, 0x4600, 0x174b, "(null)" }, + { 0x0E24, 0x4602, 0x174b, "(null)" }, + { 0x0E24, 0x907f, 0x1b0a, "(null)" }, + { 0x0E24, 0x9086, 0x1b0a, "(null)" }, + { 0x0E30, 0x7100, 0x1558, "(null)" }, + { 0x0E30, 0x7200, 0x1558, "(null)" }, + { 0x0E30, 0x8687, 0x1558, "(null)" }, + { 0x0E31, 0x5102, 0x1558, "(null)" }, + { 0x0E31, 0x7100, 0x1558, "(null)" }, + { 0x0E31, 0x7200, 0x1558, "(null)" }, + { 0x0E3A, 0x04a4, 0x1028, "(null)" }, + { 0x0E3A, 0x14a4, 0x1028, "(null)" }, + { 0x0E3A, 0x0851, 0x103c, "(null)" }, + { 0x0E3A, 0x1630, 0x103c, "(null)" }, + { 0x0E3A, 0x1633, 0x10cf, "(null)" }, + { 0x0E3B, 0x04a4, 0x1028, "(null)" }, + { 0x0E3B, 0x14a4, 0x1028, "(null)" }, + { 0x0E3B, 0x0852, 0x103c, "(null)" }, + { 0x0E3B, 0x1630, 0x103c, "(null)" }, + { 0x0E3B, 0x1634, 0x10cf, "(null)" }, + { 0x0F00, 0x6025, 0x17aa, "(null)" }, + { 0x0F03, 0x36e8, 0x1458, "(null)" }, + { 0x0FC0, 0x1015, 0x1019, "(null)" }, + { 0x0FC0, 0x8597, 0x1043, "(null)" }, + { 0x0FC0, 0x093d, 0x10de, "(null)" }, + { 0x0FC0, 0x100f, 0x10de, "(null)" }, + { 0x0FC0, 0x275b, 0x1462, "(null)" }, + { 0x0FC0, 0x3b86, 0x1642, "(null)" }, + { 0x0FC0, 0x0640, 0x174b, "(null)" }, + { 0x0FC0, 0xa642, 0x174b, "(null)" }, + { 0x0FC0, 0xa644, 0x174b, "(null)" }, + { 0x0FC0, 0x90c5, 0x1b0a, "(null)" }, + { 0x0FC2, 0x0936, 0x103c, "(null)" }, + { 0x0FC2, 0x83e2, 0x1043, "(null)" }, + { 0x0FC2, 0x8598, 0x1043, "(null)" }, + { 0x0FC2, 0x809e, 0x144d, "(null)" }, + { 0x0FC2, 0x809f, 0x144d, "(null)" }, + { 0x0FC2, 0x2750, 0x1462, "(null)" }, + { 0x0FC2, 0x2751, 0x1462, "(null)" }, + { 0x0FC2, 0x2752, 0x1462, "(null)" }, + { 0x0FC2, 0x2754, 0x1462, "(null)" }, + { 0x0FC2, 0x2755, 0x1462, "(null)" }, + { 0x0FC2, 0x275a, 0x1462, "(null)" }, + { 0x0FC2, 0x275c, 0x1462, "(null)" }, + { 0x0FC2, 0x3b86, 0x1642, "(null)" }, + { 0x0FC2, 0x3b98, 0x1642, "(null)" }, + { 0x0FC2, 0x0630, 0x174b, "(null)" }, + { 0x0FC2, 0xa632, 0x174b, "(null)" }, + { 0x0FC2, 0x6024, 0x17aa, "(null)" }, + { 0x0FC2, 0x6025, 0x17aa, "(null)" }, + { 0x0FC2, 0x1462, 0x275c, "(null)" }, + { 0x0FC6, 0x1017, 0x1019, "(null)" }, + { 0x0FC6, 0x068b, 0x1028, "(null)" }, + { 0x0FC6, 0x0790, 0x1028, "(null)" }, + { 0x0FC6, 0x079a, 0x1028, "(null)" }, + { 0x0FC6, 0x2802, 0x1462, "(null)" }, + { 0x0FC6, 0x2809, 0x1462, "(null)" }, + { 0x0FC6, 0x3c78, 0x1642, "(null)" }, + { 0x0FC8, 0x864e, 0x1043, "(null)" }, + { 0x0FCB, 0x4112, 0x1775, "(null)" }, + { 0x0FCD, 0x1024, 0x152d, "(null)" }, + { 0x0FCD, 0x3800, 0x17aa, "(null)" }, + { 0x0FCD, 0x3801, 0x17aa, "(null)" }, + { 0x0FD1, 0x0686, 0x1025, "(null)" }, + { 0x0FD1, 0x068b, 0x1025, "(null)" }, + { 0x0FD1, 0x0552, 0x1028, "(null)" }, + { 0x0FD1, 0x0566, 0x1028, "(null)" }, + { 0x0FD1, 0x0578, 0x1028, "(null)" }, + { 0x0FD1, 0x181a, 0x103c, "(null)" }, + { 0x0FD1, 0x181b, 0x103c, "(null)" }, + { 0x0FD1, 0x181d, 0x103c, "(null)" }, + { 0x0FD1, 0x189a, 0x103c, "(null)" }, + { 0x0FD1, 0x1597, 0x1043, "(null)" }, + { 0x0FD1, 0x15a7, 0x1043, "(null)" }, + { 0x0FD1, 0x2103, 0x1043, "(null)" }, + { 0x0FD1, 0x2105, 0x1043, "(null)" }, + { 0x0FD1, 0x2141, 0x1043, "(null)" }, + { 0x0FD1, 0x0718, 0x1071, "(null)" }, + { 0x0FD1, 0xc0cc, 0x144d, "(null)" }, + { 0x0FD1, 0xc0d1, 0x144d, "(null)" }, + { 0x0FD1, 0xc634, 0x144d, "(null)" }, + { 0x0FD1, 0xc650, 0x144d, "(null)" }, + { 0x0FD1, 0x2442, 0x1458, "(null)" }, + { 0x0FD1, 0x2542, 0x1458, "(null)" }, + { 0x0FD1, 0x10b8, 0x1462, "(null)" }, + { 0x0FD1, 0x10c7, 0x1462, "(null)" }, + { 0x0FD1, 0x10cd, 0x1462, "(null)" }, + { 0x0FD1, 0xad31, 0x1462, "(null)" }, + { 0x0FD1, 0xaf12, 0x1462, "(null)" }, + { 0x0FD1, 0x0065, 0x14c0, "(null)" }, + { 0x0FD1, 0x0066, 0x14c0, "(null)" }, + { 0x0FD1, 0x0934, 0x152d, "(null)" }, + { 0x0FD1, 0x0943, 0x152d, "(null)" }, + { 0x0FD1, 0x1150, 0x1558, "(null)" }, + { 0x0FD1, 0x1550, 0x1558, "(null)" }, + { 0x0FD1, 0x365b, 0x17aa, "(null)" }, + { 0x0FD1, 0x3970, 0x17aa, "(null)" }, + { 0x0FD1, 0x3971, 0x17aa, "(null)" }, + { 0x0FD1, 0x3972, 0x17aa, "(null)" }, + { 0x0FD1, 0x3977, 0x17aa, "(null)" }, + { 0x0FD1, 0x20dd, 0x1b0a, "(null)" }, + { 0x0FD2, 0x22db, 0x1019, "(null)" }, + { 0x0FD2, 0x0593, 0x1025, "(null)" }, + { 0x0FD2, 0x0646, 0x1025, "(null)" }, + { 0x0FD2, 0x0648, 0x1025, "(null)" }, + { 0x0FD2, 0x066c, 0x1025, "(null)" }, + { 0x0FD2, 0x0679, 0x1025, "(null)" }, + { 0x0FD2, 0x067d, 0x1025, "(null)" }, + { 0x0FD2, 0x0681, 0x1025, "(null)" }, + { 0x0FD2, 0x0682, 0x1025, "(null)" }, + { 0x0FD2, 0x0683, 0x1025, "(null)" }, + { 0x0FD2, 0x0684, 0x1025, "(null)" }, + { 0x0FD2, 0x0686, 0x1025, "(null)" }, + { 0x0FD2, 0x068b, 0x1025, "(null)" }, + { 0x0FD2, 0x069e, 0x1025, "(null)" }, + { 0x0FD2, 0x071b, 0x1025, "(null)" }, + { 0x0FD2, 0x071d, 0x1025, "(null)" }, + { 0x0FD2, 0x0733, 0x1025, "(null)" }, + { 0x0FD2, 0x0734, 0x1025, "(null)" }, + { 0x0FD2, 0x0735, 0x1025, "(null)" }, + { 0x0FD2, 0x0736, 0x1025, "(null)" }, + { 0x0FD2, 0x054b, 0x1028, "(null)" }, + { 0x0FD2, 0x054f, 0x1028, "(null)" }, + { 0x0FD2, 0x055f, 0x1028, "(null)" }, + { 0x0FD2, 0x0595, 0x1028, "(null)" }, + { 0x0FD2, 0x05b2, 0x1028, "(null)" }, + { 0x0FD2, 0x100d, 0x1043, "(null)" }, + { 0x0FD2, 0x10ac, 0x1043, "(null)" }, + { 0x0FD2, 0x10bc, 0x1043, "(null)" }, + { 0x0FD2, 0x10cc, 0x1043, "(null)" }, + { 0x0FD2, 0x1447, 0x1043, "(null)" }, + { 0x0FD2, 0x15a7, 0x1043, "(null)" }, + { 0x0FD2, 0x8533, 0x1043, "(null)" }, + { 0x0FD2, 0x9097, 0x104d, "(null)" }, + { 0x0FD2, 0x0d9a, 0x105b, "(null)" }, + { 0x0FD2, 0xfb01, 0x1179, "(null)" }, + { 0x0FD2, 0xfb05, 0x1179, "(null)" }, + { 0x0FD2, 0xfb12, 0x1179, "(null)" }, + { 0x0FD2, 0xc0d5, 0x144d, "(null)" }, + { 0x0FD2, 0x1542, 0x1458, "(null)" }, + { 0x0FD2, 0x2442, 0x1458, "(null)" }, + { 0x0FD2, 0x10b7, 0x1462, "(null)" }, + { 0x0FD2, 0x10b8, 0x1462, "(null)" }, + { 0x0FD2, 0x10ba, 0x1462, "(null)" }, + { 0x0FD2, 0x10d3, 0x1462, "(null)" }, + { 0x0FD2, 0xad31, 0x1462, "(null)" }, + { 0x0FD2, 0x0066, 0x14c0, "(null)" }, + { 0x0FD2, 0x0926, 0x152d, "(null)" }, + { 0x0FD2, 0x0933, 0x152d, "(null)" }, + { 0x0FD2, 0x0939, 0x152d, "(null)" }, + { 0x0FD2, 0x0942, 0x152d, "(null)" }, + { 0x0FD2, 0x0946, 0x152d, "(null)" }, + { 0x0FD2, 0x0949, 0x152d, "(null)" }, + { 0x0FD2, 0x0955, 0x152d, "(null)" }, + { 0x0FD2, 0x0957, 0x152d, "(null)" }, + { 0x0FD2, 0x0963, 0x152d, "(null)" }, + { 0x0FD2, 0x0966, 0x152d, "(null)" }, + { 0x0FD2, 0x2701, 0x1558, "(null)" }, + { 0x0FD2, 0x3902, 0x17aa, "(null)" }, + { 0x0FD2, 0x3904, 0x17aa, "(null)" }, + { 0x0FD2, 0x3977, 0x17aa, "(null)" }, + { 0x0FD2, 0x5003, 0x17aa, "(null)" }, + { 0x0FD2, 0x5012, 0x17aa, "(null)" }, + { 0x0FD2, 0x0168, 0x1854, "(null)" }, + { 0x0FD2, 0x0185, 0x1854, "(null)" }, + { 0x0FD2, 0x1808, 0x1854, "(null)" }, + { 0x0FD2, 0x20c6, 0x1b0a, "(null)" }, + { 0x0FD2, 0x20dc, 0x1b0a, "(null)" }, + { 0x0FD2, 0x20dd, 0x1b0a, "(null)" }, + { 0x0FD2, 0x20df, 0x1b0a, "(null)" }, + { 0x0FD2, 0x20e4, 0x1b0a, "(null)" }, + { 0x0FD2, 0x20e8, 0x1b0a, "(null)" }, + { 0x0FD2, 0x20ec, 0x1b0a, "(null)" }, + { 0x0FD2, 0x2228, 0x1b0a, "(null)" }, + { 0x0FD2, 0x222a, 0x1b0a, "(null)" }, + { 0x0FD2, 0x222f, 0x1b0a, "(null)" }, + { 0x0FD2, 0x0010, 0x1bfd, "(null)" }, + { 0x0FD3, 0x0713, 0x1025, "(null)" }, + { 0x0FD3, 0x0717, 0x1025, "(null)" }, + { 0x0FD3, 0x104d, 0x1043, "(null)" }, + { 0x0FD3, 0x909a, 0x104d, "(null)" }, + { 0x0FD3, 0x909c, 0x104d, "(null)" }, + { 0x0FD3, 0x175d, 0x10cf, "(null)" }, + { 0x0FD3, 0x1764, 0x10cf, "(null)" }, + { 0x0FD3, 0x1766, 0x10cf, "(null)" }, + { 0x0FD3, 0x1768, 0x10cf, "(null)" }, + { 0x0FD3, 0x0065, 0x14c0, "(null)" }, + { 0x0FD3, 0x0066, 0x14c0, "(null)" }, + { 0x0FD3, 0x006d, 0x14c0, "(null)" }, + { 0x0FD3, 0x0923, 0x152d, "(null)" }, + { 0x0FD3, 0x0924, 0x152d, "(null)" }, + { 0x0FD3, 0x6747, 0x1a58, "(null)" }, + { 0x0FD4, 0x0550, 0x1028, "(null)" }, + { 0x0FD4, 0x0551, 0x1028, "(null)" }, + { 0x0FD4, 0x057b, 0x1028, "(null)" }, + { 0x0FD4, 0x0580, 0x1028, "(null)" }, + { 0x0FD4, 0x05ab, 0x1028, "(null)" }, + { 0x0FD4, 0x102d, 0x1043, "(null)" }, + { 0x0FD4, 0x2115, 0x1043, "(null)" }, + { 0x0FD4, 0x2116, 0x1043, "(null)" }, + { 0x0FD4, 0x2117, 0x1043, "(null)" }, + { 0x0FD4, 0x2118, 0x1043, "(null)" }, + { 0x0FD4, 0x2542, 0x1458, "(null)" }, + { 0x0FD4, 0x6745, 0x1458, "(null)" }, + { 0x0FD4, 0x6746, 0x1458, "(null)" }, + { 0x0FD4, 0x10d7, 0x1462, "(null)" }, + { 0x0FD4, 0x10d8, 0x1462, "(null)" }, + { 0x0FD4, 0x3700, 0x1558, "(null)" }, + { 0x0FD4, 0x5105, 0x1558, "(null)" }, + { 0x0FD4, 0x7102, 0x1558, "(null)" }, + { 0x0FD4, 0x3977, 0x17aa, "(null)" }, + { 0x0FD5, 0x00f2, 0x106b, "(null)" }, + { 0x0FD5, 0x00fc, 0x106b, "(null)" }, + { 0x0FD5, 0x010a, 0x106b, "(null)" }, + { 0x0FD8, 0x0109, 0x106b, "(null)" }, + { 0x0FD9, 0x104d, 0x1043, "(null)" }, + { 0x0FD9, 0x10ac, 0x1043, "(null)" }, + { 0x0FD9, 0x10bd, 0x1043, "(null)" }, + { 0x0FD9, 0x10b8, 0x1462, "(null)" }, + { 0x0FD9, 0x0990, 0x152d, "(null)" }, + { 0x0FD9, 0x0993, 0x152d, "(null)" }, + { 0x0FD9, 0x0997, 0x152d, "(null)" }, + { 0x0FD9, 0x1001, 0x152d, "(null)" }, + { 0x0FD9, 0x1015, 0x152d, "(null)" }, + { 0x0FD9, 0x2707, 0x1558, "(null)" }, + { 0x0FD9, 0x3904, 0x17aa, "(null)" }, + { 0x0FD9, 0x5003, 0x17aa, "(null)" }, + { 0x0FD9, 0x5014, 0x17aa, "(null)" }, + { 0x0FD9, 0x20dc, 0x1b0a, "(null)" }, + { 0x0FD9, 0x20e4, 0x1b0a, "(null)" }, + { 0x0FDF, 0x0686, 0x1025, "(null)" }, + { 0x0FDF, 0x068b, 0x1025, "(null)" }, + { 0x0FDF, 0x100d, 0x1043, "(null)" }, + { 0x0FDF, 0x10cc, 0x1043, "(null)" }, + { 0x0FDF, 0x1477, 0x1043, "(null)" }, + { 0x0FDF, 0x1587, 0x1043, "(null)" }, + { 0x0FDF, 0x2139, 0x1043, "(null)" }, + { 0x0FDF, 0x21ca, 0x1043, "(null)" }, + { 0x0FE0, 0x010b, 0x106b, "(null)" }, + { 0x0FE1, 0x0646, 0x1025, "(null)" }, + { 0x0FE1, 0x0648, 0x1025, "(null)" }, + { 0x0FE1, 0x0679, 0x1025, "(null)" }, + { 0x0FE1, 0x067d, 0x1025, "(null)" }, + { 0x0FE1, 0x0686, 0x1025, "(null)" }, + { 0x0FE1, 0x068b, 0x1025, "(null)" }, + { 0x0FE1, 0x069e, 0x1025, "(null)" }, + { 0x0FE1, 0x0713, 0x1025, "(null)" }, + { 0x0FE1, 0x0717, 0x1025, "(null)" }, + { 0x0FE1, 0x071b, 0x1025, "(null)" }, + { 0x0FE1, 0x071d, 0x1025, "(null)" }, + { 0x0FE1, 0x05b2, 0x1028, "(null)" }, + { 0x0FE1, 0x8585, 0x1043, "(null)" }, + { 0x0FE1, 0x85d3, 0x1043, "(null)" }, + { 0x0FE1, 0x2442, 0x1458, "(null)" }, + { 0x0FE1, 0xa442, 0x1458, "(null)" }, + { 0x0FE1, 0x10b8, 0x1462, "(null)" }, + { 0x0FE1, 0x1013, 0x152d, "(null)" }, + { 0x0FE1, 0x1056, 0x152d, "(null)" }, + { 0x0FE1, 0x20dc, 0x1b0a, "(null)" }, + { 0x0FE1, 0x20e4, 0x1b0a, "(null)" }, + { 0x0FE2, 0x4002, 0x1071, "(null)" }, + { 0x0FE2, 0x4033, 0x1071, "(null)" }, + { 0x0FE2, 0x4037, 0x1071, "(null)" }, + { 0x0FE2, 0x4038, 0x1071, "(null)" }, + { 0x0FE2, 0x4039, 0x1071, "(null)" }, + { 0x0FE2, 0x403d, 0x1071, "(null)" }, + { 0x0FE2, 0x8338, 0x10f7, "(null)" }, + { 0x0FE2, 0x0081, 0x14c0, "(null)" }, + { 0x0FE2, 0x1043, 0x152d, "(null)" }, + { 0x0FE2, 0x1076, 0x152d, "(null)" }, + { 0x0FE2, 0x8400, 0x1558, "(null)" }, + { 0x0FE2, 0x1704, 0x172f, "(null)" }, + { 0x0FE2, 0x1705, 0x172f, "(null)" }, + { 0x0FE3, 0x999b, 0x1019, "(null)" }, + { 0x0FE3, 0x99a2, 0x1019, "(null)" }, + { 0x0FE3, 0x05d6, 0x1028, "(null)" }, + { 0x0FE3, 0x2b16, 0x103c, "(null)" }, + { 0x0FE3, 0x11ed, 0x1043, "(null)" }, + { 0x0FE3, 0x11fd, 0x1043, "(null)" }, + { 0x0FE3, 0x12dd, 0x1043, "(null)" }, + { 0x0FE3, 0x21ea, 0x1043, "(null)" }, + { 0x0FE3, 0x221a, 0x1043, "(null)" }, + { 0x0FE3, 0x222a, 0x1043, "(null)" }, + { 0x0FE3, 0xfa11, 0x1179, "(null)" }, + { 0x0FE3, 0xfa13, 0x1179, "(null)" }, + { 0x0FE3, 0xfa18, 0x1179, "(null)" }, + { 0x0FE3, 0xfa19, 0x1179, "(null)" }, + { 0x0FE3, 0xfa32, 0x1179, "(null)" }, + { 0x0FE3, 0xfa33, 0x1179, "(null)" }, + { 0x0FE3, 0xfa36, 0x1179, "(null)" }, + { 0x0FE3, 0xfa38, 0x1179, "(null)" }, + { 0x0FE3, 0xfa42, 0x1179, "(null)" }, + { 0x0FE3, 0xfa43, 0x1179, "(null)" }, + { 0x0FE3, 0xfa45, 0x1179, "(null)" }, + { 0x0FE3, 0xfa47, 0x1179, "(null)" }, + { 0x0FE3, 0xfa49, 0x1179, "(null)" }, + { 0x0FE3, 0xfa78, 0x1179, "(null)" }, + { 0x0FE3, 0xfa79, 0x1179, "(null)" }, + { 0x0FE3, 0x2442, 0x1458, "(null)" }, + { 0x0FE3, 0x1002, 0x152d, "(null)" }, + { 0x0FE3, 0x1009, 0x152d, "(null)" }, + { 0x0FE3, 0x3675, 0x17aa, "(null)" }, + { 0x0FE3, 0x3805, 0x17aa, "(null)" }, + { 0x0FE3, 0x3809, 0x17aa, "(null)" }, + { 0x0FE3, 0x5003, 0x17aa, "(null)" }, + { 0x0FE3, 0x501a, 0x17aa, "(null)" }, + { 0x0FE3, 0x0193, 0x1854, "(null)" }, + { 0x0FE3, 0x1051, 0x1854, "(null)" }, + { 0x0FE3, 0x5589, 0x1991, "(null)" }, + { 0x0FE3, 0x20e4, 0x1b0a, "(null)" }, + { 0x0FE3, 0x2106, 0x1b0a, "(null)" }, + { 0x0FE3, 0x2109, 0x1b0a, "(null)" }, + { 0x0FE3, 0x210e, 0x1b0a, "(null)" }, + { 0x0FE3, 0x2202, 0x1b0a, "(null)" }, + { 0x0FE3, 0x2222, 0x1b0a, "(null)" }, + { 0x0FE4, 0x0781, 0x1025, "(null)" }, + { 0x0FE4, 0x0798, 0x1025, "(null)" }, + { 0x0FE4, 0x0799, 0x1025, "(null)" }, + { 0x0FE4, 0x079b, 0x1025, "(null)" }, + { 0x0FE4, 0x079c, 0x1025, "(null)" }, + { 0x0FE4, 0x079d, 0x1025, "(null)" }, + { 0x0FE4, 0x0814, 0x1025, "(null)" }, + { 0x0FE4, 0x0816, 0x1025, "(null)" }, + { 0x0FE4, 0x0830, 0x1025, "(null)" }, + { 0x0FE4, 0x05a9, 0x1028, "(null)" }, + { 0x0FE4, 0x05ac, 0x1028, "(null)" }, + { 0x0FE4, 0x05b0, 0x1028, "(null)" }, + { 0x0FE4, 0x05f6, 0x1028, "(null)" }, + { 0x0FE4, 0x05fa, 0x1028, "(null)" }, + { 0x0FE4, 0x05fc, 0x1028, "(null)" }, + { 0x0FE4, 0x05fe, 0x1028, "(null)" }, + { 0x0FE4, 0x1967, 0x103c, "(null)" }, + { 0x0FE4, 0x1968, 0x103c, "(null)" }, + { 0x0FE4, 0x219b, 0x103c, "(null)" }, + { 0x0FE4, 0x219d, 0x103c, "(null)" }, + { 0x0FE4, 0x11cd, 0x1043, "(null)" }, + { 0x0FE4, 0x127d, 0x1043, "(null)" }, + { 0x0FE4, 0x129d, 0x1043, "(null)" }, + { 0x0FE4, 0x1477, 0x1043, "(null)" }, + { 0x0FE4, 0x0720, 0x1071, "(null)" }, + { 0x0FE4, 0xc740, 0x144d, "(null)" }, + { 0x0FE4, 0x1552, 0x1458, "(null)" }, + { 0x0FE4, 0x2452, 0x1458, "(null)" }, + { 0x0FE4, 0x3552, 0x1458, "(null)" }, + { 0x0FE4, 0x10e3, 0x1462, "(null)" }, + { 0x0FE4, 0x10e6, 0x1462, "(null)" }, + { 0x0FE4, 0x10e7, 0x1462, "(null)" }, + { 0x0FE4, 0x1003, 0x152d, "(null)" }, + { 0x0FE4, 0x1010, 0x152d, "(null)" }, + { 0x0FE4, 0x1029, 0x152d, "(null)" }, + { 0x0FE4, 0x0650, 0x1558, "(null)" }, + { 0x0FE4, 0x0681, 0x1558, "(null)" }, + { 0x0FE4, 0x5182, 0x1558, "(null)" }, + { 0x0FE4, 0x1703, 0x172f, "(null)" }, + { 0x0FE4, 0x05fa, 0x17aa, "(null)" }, + { 0x0FE4, 0x3683, 0x17aa, "(null)" }, + { 0x0FE4, 0x3800, 0x17aa, "(null)" }, + { 0x0FE4, 0x3801, 0x17aa, "(null)" }, + { 0x0FE4, 0x3802, 0x17aa, "(null)" }, + { 0x0FE9, 0x011e, 0x106b, "(null)" }, + { 0x0FE9, 0x0130, 0x106b, "(null)" }, + { 0x0FEA, 0x011f, 0x106b, "(null)" }, + { 0x0FEC, 0x1802, 0x172f, "(null)" }, + { 0x0FED, 0x5530, 0x1b50, "(null)" }, + { 0x0FEE, 0x10e0, 0x1043, "(null)" }, + { 0x0FEE, 0x11be, 0x1043, "(null)" }, + { 0x0FEE, 0x11de, 0x1043, "(null)" }, + { 0x0FEE, 0x133e, 0x1043, "(null)" }, + { 0x0FEE, 0x137e, 0x1043, "(null)" }, + { 0x0FEE, 0x146e, 0x1043, "(null)" }, + { 0x0FF2, 0x099d, 0x10de, "(null)" }, + { 0x0FF2, 0x1012, 0x10de, "(null)" }, + { 0x0FF3, 0x1106, 0x103c, "(null)" }, + { 0x0FF3, 0x1162, 0x103c, "(null)" }, + { 0x0FF3, 0x1106, 0x10de, "(null)" }, + { 0x0FF3, 0x1162, 0x10de, "(null)" }, + { 0x0FF3, 0x1162, 0x15c3, "(null)" }, + { 0x0FF6, 0x05cc, 0x1028, "(null)" }, + { 0x0FF6, 0x060d, 0x1028, "(null)" }, + { 0x0FF6, 0x15cc, 0x1028, "(null)" }, + { 0x0FF6, 0x1909, 0x103c, "(null)" }, + { 0x0FF6, 0x190a, 0x103c, "(null)" }, + { 0x0FF6, 0x197a, 0x103c, "(null)" }, + { 0x0FF6, 0x197b, 0x103c, "(null)" }, + { 0x0FF6, 0x2253, 0x103c, "(null)" }, + { 0x0FF6, 0x2254, 0x103c, "(null)" }, + { 0x0FF6, 0x2255, 0x103c, "(null)" }, + { 0x0FF6, 0x2256, 0x103c, "(null)" }, + { 0x0FF6, 0x8104, 0x103c, "(null)" }, + { 0x0FF6, 0x22fa, 0x1043, "(null)" }, + { 0x0FF6, 0x17ee, 0x10cf, "(null)" }, + { 0x0FF6, 0x5106, 0x1558, "(null)" }, + { 0x0FF6, 0x5281, 0x1558, "(null)" }, + { 0x0FF6, 0x7104, 0x1558, "(null)" }, + { 0x0FF6, 0x7481, 0x1558, "(null)" }, + { 0x0FF6, 0x7500, 0x1558, "(null)" }, + { 0x0FF6, 0x7700, 0x1558, "(null)" }, + { 0x0FF6, 0x2210, 0x17aa, "(null)" }, + { 0x0FF6, 0x2211, 0x17aa, "(null)" }, + { 0x0FF6, 0x221a, 0x17aa, "(null)" }, + { 0x0FF6, 0x221e, 0x17aa, "(null)" }, + { 0x0FF8, 0x176a, 0x10cf, "(null)" }, + { 0x0FF9, 0x096f, 0x10de, "(null)" }, + { 0x0FFA, 0x094b, 0x103c, "(null)" }, + { 0x0FFA, 0x094b, 0x10de, "(null)" }, + { 0x0FFB, 0x053e, 0x1028, "(null)" }, + { 0x0FFB, 0x153e, 0x1028, "(null)" }, + { 0x0FFB, 0x176b, 0x103c, "(null)" }, + { 0x0FFB, 0x175e, 0x10cf, "(null)" }, + { 0x0FFB, 0x175f, 0x10cf, "(null)" }, + { 0x0FFB, 0x10db, 0x1462, "(null)" }, + { 0x0FFB, 0x21f5, 0x17aa, "(null)" }, + { 0x0FFB, 0x21f6, 0x17aa, "(null)" }, + { 0x0FFC, 0x053e, 0x1028, "(null)" }, + { 0x0FFC, 0x153e, 0x1028, "(null)" }, + { 0x0FFC, 0x176b, 0x103c, "(null)" }, + { 0x0FFC, 0x175e, 0x10cf, "(null)" }, + { 0x0FFC, 0x8338, 0x10f7, "(null)" }, + { 0x0FFC, 0x10db, 0x1462, "(null)" }, + { 0x0FFC, 0x5106, 0x1558, "(null)" }, + { 0x0FFC, 0x7104, 0x1558, "(null)" }, + { 0x0FFC, 0x21f5, 0x17aa, "(null)" }, + { 0x0FFC, 0x21f6, 0x17aa, "(null)" }, + { 0x0FFD, 0x0967, 0x103c, "(null)" }, + { 0x0FFD, 0x0967, 0x10de, "(null)" }, + { 0x0FFE, 0x094c, 0x103c, "(null)" }, + { 0x0FFE, 0x094c, 0x10de, "(null)" }, + { 0x0FFF, 0x094a, 0x103c, "(null)" }, + { 0x0FFF, 0x094a, 0x10de, "(null)" }, + { 0x1001, 0x1001, 0x10de, "(null)" }, + { 0x1001, 0x1078, 0x10de, "(null)" }, + { 0x1004, 0x068b, 0x1028, "(null)" }, + { 0x1004, 0x0790, 0x1028, "(null)" }, + { 0x1004, 0x079a, 0x1028, "(null)" }, + { 0x1004, 0x104b, 0x10de, "(null)" }, + { 0x1004, 0x105a, 0x10de, "(null)" }, + { 0x1005, 0x068b, 0x1028, "(null)" }, + { 0x1005, 0x0790, 0x1028, "(null)" }, + { 0x1005, 0x079a, 0x1028, "(null)" }, + { 0x1007, 0x068b, 0x1028, "(null)" }, + { 0x1007, 0x0790, 0x1028, "(null)" }, + { 0x1007, 0x079a, 0x1028, "(null)" }, + { 0x1008, 0x068b, 0x1028, "(null)" }, + { 0x1008, 0x0790, 0x1028, "(null)" }, + { 0x1008, 0x079a, 0x1028, "(null)" }, + { 0x100A, 0x068b, 0x1028, "(null)" }, + { 0x100A, 0x0790, 0x1028, "(null)" }, + { 0x100A, 0x079a, 0x1028, "(null)" }, + { 0x100C, 0x068b, 0x1028, "(null)" }, + { 0x100C, 0x0790, 0x1028, "(null)" }, + { 0x100C, 0x079a, 0x1028, "(null)" }, + { 0x1021, 0x097d, 0x10de, "(null)" }, + { 0x1022, 0x0982, 0x103c, "(null)" }, + { 0x1022, 0x0982, 0x10de, "(null)" }, + { 0x1023, 0x097e, 0x10de, "(null)" }, + { 0x1024, 0x0983, 0x103c, "(null)" }, + { 0x1024, 0x0983, 0x10de, "(null)" }, + { 0x1028, 0x1015, 0x10de, "(null)" }, + { 0x102D, 0x106c, 0x10de, "(null)" }, + { 0x103A, 0x1036, 0x103c, "(null)" }, + { 0x103A, 0x1036, 0x10de, "(null)" }, + { 0x103C, 0x1095, 0x103c, "(null)" }, + { 0x103C, 0x1095, 0x10de, "(null)" }, + { 0x1040, 0x83a0, 0x1043, "(null)" }, + { 0x1040, 0x83bd, 0x1043, "(null)" }, + { 0x1040, 0x83c1, 0x1043, "(null)" }, + { 0x1040, 0x0915, 0x10de, "(null)" }, + { 0x1040, 0xc000, 0x1458, "(null)" }, + { 0x1040, 0x2592, 0x1462, "(null)" }, + { 0x1040, 0x2593, 0x1462, "(null)" }, + { 0x1040, 0x259a, 0x1462, "(null)" }, + { 0x1040, 0x3a98, 0x1642, "(null)" }, + { 0x1040, 0x3b42, 0x1642, "(null)" }, + { 0x1040, 0x3214, 0x174b, "(null)" }, + { 0x1040, 0x90aa, 0x1b0a, "(null)" }, + { 0x1042, 0x2594, 0x1462, "(null)" }, + { 0x1042, 0x2595, 0x1462, "(null)" }, + { 0x1042, 0x2596, 0x1462, "(null)" }, + { 0x1042, 0x3a98, 0x1642, "(null)" }, + { 0x1042, 0x3b42, 0x1642, "(null)" }, + { 0x1042, 0x1214, 0x174b, "(null)" }, + { 0x1042, 0x1222, 0x174b, "(null)" }, + { 0x1048, 0x1014, 0x1019, "(null)" }, + { 0x1048, 0x5afd, 0x107d, "(null)" }, + { 0x1048, 0x2597, 0x1462, "(null)" }, + { 0x1048, 0x2598, 0x1462, "(null)" }, + { 0x1048, 0x2599, 0x1462, "(null)" }, + { 0x1048, 0x3a98, 0x1642, "(null)" }, + { 0x1048, 0x3b42, 0x1642, "(null)" }, + { 0x1048, 0x3c26, 0x1642, "(null)" }, + { 0x1048, 0x0605, 0x174b, "(null)" }, + { 0x1048, 0xa601, 0x174b, "(null)" }, + { 0x1049, 0x1016, 0x1019, "(null)" }, + { 0x1049, 0x83e6, 0x1043, "(null)" }, + { 0x1049, 0x83e7, 0x1043, "(null)" }, + { 0x1049, 0x83ea, 0x1043, "(null)" }, + { 0x1049, 0x83eb, 0x1043, "(null)" }, + { 0x1049, 0x83ec, 0x1043, "(null)" }, + { 0x1049, 0x8473, 0x1043, "(null)" }, + { 0x1049, 0x0977, 0x10de, "(null)" }, + { 0x1049, 0x809c, 0x144d, "(null)" }, + { 0x1049, 0x809d, 0x144d, "(null)" }, + { 0x1049, 0x259d, 0x1462, "(null)" }, + { 0x1049, 0x259e, 0x1462, "(null)" }, + { 0x1049, 0xb591, 0x1462, "(null)" }, + { 0x1049, 0x3a98, 0x1642, "(null)" }, + { 0x1049, 0x3b42, 0x1642, "(null)" }, + { 0x1049, 0x0620, 0x174b, "(null)" }, + { 0x1049, 0xa621, 0x174b, "(null)" }, + { 0x1049, 0xa622, 0x174b, "(null)" }, + { 0x1049, 0x90b7, 0x1b0a, "(null)" }, + { 0x1049, 0x90be, 0x1b0a, "(null)" }, + { 0x1049, 0x1462, 0x259d, "(null)" }, + { 0x1049, 0x1462, 0x259e, "(null)" }, + { 0x104B, 0x1021, 0x1019, "(null)" }, + { 0x104B, 0x0790, 0x1028, "(null)" }, + { 0x104B, 0x844c, 0x1043, "(null)" }, + { 0x104B, 0x846b, 0x1043, "(null)" }, + { 0x104B, 0x102a, 0x10de, "(null)" }, + { 0x104B, 0x80a1, 0x144d, "(null)" }, + { 0x104B, 0x80a3, 0x144d, "(null)" }, + { 0x104B, 0xb590, 0x1462, "(null)" }, + { 0x104B, 0xb592, 0x1462, "(null)" }, + { 0x104B, 0x0625, 0x174b, "(null)" }, + { 0x104B, 0xa625, 0x174b, "(null)" }, + { 0x104C, 0x1027, 0x1019, "(null)" }, + { 0x104C, 0x0790, 0x1028, "(null)" }, + { 0x104C, 0x109b, 0x1028, "(null)" }, + { 0x104C, 0x1086, 0x10de, "(null)" }, + { 0x104C, 0x80a5, 0x144d, "(null)" }, + { 0x104C, 0x80a7, 0x144d, "(null)" }, + { 0x104C, 0x2700, 0x1462, "(null)" }, + { 0x104C, 0x2701, 0x1462, "(null)" }, + { 0x104C, 0x2702, 0x1462, "(null)" }, + { 0x104C, 0xb593, 0x1462, "(null)" }, + { 0x104C, 0xb594, 0x1462, "(null)" }, + { 0x104C, 0xb595, 0x1462, "(null)" }, + { 0x104C, 0xb596, 0x1462, "(null)" }, + { 0x104C, 0xb597, 0x1462, "(null)" }, + { 0x104C, 0xb598, 0x1462, "(null)" }, + { 0x104C, 0xb599, 0x1462, "(null)" }, + { 0x104C, 0x3a98, 0x1642, "(null)" }, + { 0x104C, 0x3b42, 0x1642, "(null)" }, + { 0x104C, 0x214a, 0x174b, "(null)" }, + { 0x104C, 0xa705, 0x174b, "(null)" }, + { 0x1050, 0x0487, 0x1025, "(null)" }, + { 0x1050, 0x0488, 0x1025, "(null)" }, + { 0x1050, 0x0501, 0x1025, "(null)" }, + { 0x1050, 0x0503, 0x1025, "(null)" }, + { 0x1050, 0x0505, 0x1025, "(null)" }, + { 0x1050, 0x0507, 0x1025, "(null)" }, + { 0x1050, 0x0509, 0x1025, "(null)" }, + { 0x1050, 0x0511, 0x1025, "(null)" }, + { 0x1050, 0x0512, 0x1025, "(null)" }, + { 0x1050, 0x053a, 0x1025, "(null)" }, + { 0x1050, 0x054e, 0x1025, "(null)" }, + { 0x1050, 0x0550, 0x1025, "(null)" }, + { 0x1050, 0x055a, 0x1025, "(null)" }, + { 0x1050, 0x055c, 0x1025, "(null)" }, + { 0x1050, 0x0568, 0x1025, "(null)" }, + { 0x1050, 0x056a, 0x1025, "(null)" }, + { 0x1050, 0x056b, 0x1025, "(null)" }, + { 0x1050, 0x056c, 0x1025, "(null)" }, + { 0x1050, 0x0570, 0x1025, "(null)" }, + { 0x1050, 0x0572, 0x1025, "(null)" }, + { 0x1050, 0x0573, 0x1025, "(null)" }, + { 0x1050, 0x0574, 0x1025, "(null)" }, + { 0x1050, 0x0575, 0x1025, "(null)" }, + { 0x1050, 0x0576, 0x1025, "(null)" }, + { 0x1050, 0x0578, 0x1025, "(null)" }, + { 0x1050, 0x0579, 0x1025, "(null)" }, + { 0x1050, 0x057a, 0x1025, "(null)" }, + { 0x1050, 0x057b, 0x1025, "(null)" }, + { 0x1050, 0x0580, 0x1025, "(null)" }, + { 0x1050, 0x0581, 0x1025, "(null)" }, + { 0x1050, 0x058b, 0x1025, "(null)" }, + { 0x1050, 0x058c, 0x1025, "(null)" }, + { 0x1050, 0x0593, 0x1025, "(null)" }, + { 0x1050, 0x060d, 0x1025, "(null)" }, + { 0x1050, 0x060f, 0x1025, "(null)" }, + { 0x1050, 0x0611, 0x1025, "(null)" }, + { 0x1050, 0x1050, 0x1025, "(null)" }, + { 0x1050, 0x0522, 0x1028, "(null)" }, + { 0x1050, 0x0579, 0x1028, "(null)" }, + { 0x1050, 0x184d, 0x103c, "(null)" }, + { 0x1050, 0x338a, 0x103c, "(null)" }, + { 0x1050, 0x338b, 0x103c, "(null)" }, + { 0x1050, 0x338c, 0x103c, "(null)" }, + { 0x1050, 0x1622, 0x1043, "(null)" }, + { 0x1050, 0x1662, 0x1043, "(null)" }, + { 0x1050, 0x1682, 0x1043, "(null)" }, + { 0x1050, 0x16f2, 0x1043, "(null)" }, + { 0x1050, 0x1722, 0x1043, "(null)" }, + { 0x1050, 0x1742, 0x1043, "(null)" }, + { 0x1050, 0x84cf, 0x1043, "(null)" }, + { 0x1050, 0x9089, 0x104d, "(null)" }, + { 0x1050, 0x908a, 0x104d, "(null)" }, + { 0x1050, 0x908b, 0x104d, "(null)" }, + { 0x1050, 0x1635, 0x10cf, "(null)" }, + { 0x1050, 0x1655, 0x10cf, "(null)" }, + { 0x1050, 0xfc01, 0x1179, "(null)" }, + { 0x1050, 0xfc31, 0x1179, "(null)" }, + { 0x1050, 0xfc50, 0x1179, "(null)" }, + { 0x1050, 0xfc61, 0x1179, "(null)" }, + { 0x1050, 0xfc71, 0x1179, "(null)" }, + { 0x1050, 0xfc90, 0x1179, "(null)" }, + { 0x1050, 0xfcc0, 0x1179, "(null)" }, + { 0x1050, 0xfcd0, 0x1179, "(null)" }, + { 0x1050, 0xfce2, 0x1179, "(null)" }, + { 0x1050, 0xfcf2, 0x1179, "(null)" }, + { 0x1050, 0xfd16, 0x1179, "(null)" }, + { 0x1050, 0xfd40, 0x1179, "(null)" }, + { 0x1050, 0xfd50, 0x1179, "(null)" }, + { 0x1050, 0xfd52, 0x1179, "(null)" }, + { 0x1050, 0xfd61, 0x1179, "(null)" }, + { 0x1050, 0xfd71, 0x1179, "(null)" }, + { 0x1050, 0xfdd0, 0x1179, "(null)" }, + { 0x1050, 0xfdd2, 0x1179, "(null)" }, + { 0x1050, 0xc0a0, 0x144d, "(null)" }, + { 0x1050, 0xc0b2, 0x144d, "(null)" }, + { 0x1050, 0xc0b6, 0x144d, "(null)" }, + { 0x1050, 0xc597, 0x144d, "(null)" }, + { 0x1050, 0x1132, 0x1458, "(null)" }, + { 0x1050, 0x108c, 0x1462, "(null)" }, + { 0x1050, 0x1094, 0x1462, "(null)" }, + { 0x1050, 0x0a91, 0x16f3, "(null)" }, + { 0x1050, 0x0c9f, 0x16f3, "(null)" }, + { 0x1050, 0x1050, 0x16f3, "(null)" }, + { 0x1050, 0x3652, 0x17aa, "(null)" }, + { 0x1050, 0x397d, 0x17aa, "(null)" }, + { 0x1050, 0x397f, 0x17aa, "(null)" }, + { 0x1050, 0x10e5, 0x17c0, "(null)" }, + { 0x1050, 0x10ec, 0x17c0, "(null)" }, + { 0x1050, 0x10f3, 0x17c0, "(null)" }, + { 0x1050, 0x0865, 0x1854, "(null)" }, + { 0x1050, 0x0871, 0x1854, "(null)" }, + { 0x1050, 0x1791, 0x1854, "(null)" }, + { 0x1050, 0x3001, 0x1854, "(null)" }, + { 0x1050, 0x3004, 0x1854, "(null)" }, + { 0x1050, 0x5584, 0x1991, "(null)" }, + { 0x1050, 0x2002, 0x1bab, "(null)" }, + { 0x1050, 0x8005, 0x1bfd, "(null)" }, + { 0x1050, 0x1a86, 0x4352, "(null)" }, + { 0x1050, 0x4352, 0x8986, "(null)" }, + { 0x1051, 0x1762, 0x1043, "(null)" }, + { 0x1051, 0xc0b6, 0x144d, "(null)" }, + { 0x1051, 0xc0b7, 0x144d, "(null)" }, + { 0x1051, 0xc606, 0x144d, "(null)" }, + { 0x1051, 0x1094, 0x1462, "(null)" }, + { 0x1051, 0x0892, 0x1854, "(null)" }, + { 0x1051, 0x1797, 0x1854, "(null)" }, + { 0x1051, 0x3006, 0x1854, "(null)" }, + { 0x1051, 0x300c, 0x1854, "(null)" }, + { 0x1051, 0x3011, 0x1854, "(null)" }, + { 0x1052, 0x109c, 0x1462, "(null)" }, + { 0x1052, 0x10aa, 0x1462, "(null)" }, + { 0x1054, 0x0511, 0x1028, "(null)" }, + { 0x1054, 0x1656, 0x10cf, "(null)" }, + { 0x1054, 0x1657, 0x10cf, "(null)" }, + { 0x1054, 0xfcc0, 0x1179, "(null)" }, + { 0x1054, 0x1100, 0x1458, "(null)" }, + { 0x1054, 0x1125, 0x1458, "(null)" }, + { 0x1054, 0x1094, 0x1462, "(null)" }, + { 0x1054, 0x109c, 0x1462, "(null)" }, + { 0x1054, 0x10aa, 0x1462, "(null)" }, + { 0x1054, 0x397d, 0x17aa, "(null)" }, + { 0x1054, 0x397f, 0x17aa, "(null)" }, + { 0x1054, 0x2013, 0x1854, "(null)" }, + { 0x1055, 0x908a, 0x104d, "(null)" }, + { 0x1055, 0x908b, 0x104d, "(null)" }, + { 0x1056, 0x0493, 0x1028, "(null)" }, + { 0x1056, 0x0494, 0x1028, "(null)" }, + { 0x1056, 0x04e4, 0x1028, "(null)" }, + { 0x1056, 0x1493, 0x1028, "(null)" }, + { 0x1056, 0x1494, 0x1028, "(null)" }, + { 0x1056, 0x14e4, 0x1028, "(null)" }, + { 0x1056, 0x1636, 0x10cf, "(null)" }, + { 0x1056, 0x21d2, 0x17aa, "(null)" }, + { 0x1056, 0x21d3, 0x17aa, "(null)" }, + { 0x1057, 0xc0a6, 0x144d, "(null)" }, + { 0x1057, 0xc0af, 0x144d, "(null)" }, + { 0x1057, 0xc0b0, 0x144d, "(null)" }, + { 0x1057, 0x21ce, 0x17aa, "(null)" }, + { 0x1057, 0x21cf, 0x17aa, "(null)" }, + { 0x1057, 0x21d0, 0x17aa, "(null)" }, + { 0x1057, 0x21d1, 0x17aa, "(null)" }, + { 0x1058, 0x2aed, 0x103c, "(null)" }, + { 0x1058, 0x2af1, 0x103c, "(null)" }, + { 0x1058, 0x10ac, 0x1043, "(null)" }, + { 0x1058, 0x10bc, 0x1043, "(null)" }, + { 0x1058, 0x112d, 0x1043, "(null)" }, + { 0x1058, 0x1457, 0x1043, "(null)" }, + { 0x1058, 0x1537, 0x1043, "(null)" }, + { 0x1058, 0x1652, 0x1043, "(null)" }, + { 0x1058, 0x2130, 0x1043, "(null)" }, + { 0x1058, 0x2133, 0x1043, "(null)" }, + { 0x1058, 0x8536, 0x1043, "(null)" }, + { 0x1058, 0xc652, 0x144d, "(null)" }, + { 0x1058, 0xaa5c, 0x1462, "(null)" }, + { 0x1058, 0xaa62, 0x1462, "(null)" }, + { 0x1058, 0xaa63, 0x1462, "(null)" }, + { 0x1058, 0xaa83, 0x1462, "(null)" }, + { 0x1058, 0xac71, 0x1462, "(null)" }, + { 0x1058, 0xac79, 0x1462, "(null)" }, + { 0x1058, 0xac7a, 0x1462, "(null)" }, + { 0x1058, 0xac93, 0x1462, "(null)" }, + { 0x1058, 0x0c9b, 0x16f3, "(null)" }, + { 0x1058, 0x0c9c, 0x16f3, "(null)" }, + { 0x1058, 0x367a, 0x17aa, "(null)" }, + { 0x1058, 0x3682, 0x17aa, "(null)" }, + { 0x1058, 0x3687, 0x17aa, "(null)" }, + { 0x1058, 0x3692, 0x17aa, "(null)" }, + { 0x1058, 0x3695, 0x17aa, "(null)" }, + { 0x1058, 0x36a8, 0x17aa, "(null)" }, + { 0x1058, 0x36ac, 0x17aa, "(null)" }, + { 0x1058, 0x36ad, 0x17aa, "(null)" }, + { 0x1058, 0x3901, 0x17aa, "(null)" }, + { 0x1058, 0x3902, 0x17aa, "(null)" }, + { 0x1058, 0x3903, 0x17aa, "(null)" }, + { 0x1058, 0x3977, 0x17aa, "(null)" }, + { 0x1058, 0x397d, 0x17aa, "(null)" }, + { 0x1058, 0x3983, 0x17aa, "(null)" }, + { 0x1058, 0x5001, 0x17aa, "(null)" }, + { 0x1058, 0x5003, 0x17aa, "(null)" }, + { 0x1058, 0x5005, 0x17aa, "(null)" }, + { 0x1058, 0x5007, 0x17aa, "(null)" }, + { 0x1058, 0x500f, 0x17aa, "(null)" }, + { 0x1058, 0x5012, 0x17aa, "(null)" }, + { 0x1058, 0x705a, 0x17aa, "(null)" }, + { 0x1058, 0x90c2, 0x1b0a, "(null)" }, + { 0x1058, 0x90c7, 0x1b0a, "(null)" }, + { 0x1058, 0x0009, 0x1bfd, "(null)" }, + { 0x1058, 0x3682, 0x705a, "(null)" }, + { 0x1059, 0x300c, 0x1854, "(null)" }, + { 0x1059, 0x3011, 0x1854, "(null)" }, + { 0x1059, 0x3017, 0x1854, "(null)" }, + { 0x105A, 0x0488, 0x1025, "(null)" }, + { 0x105A, 0x0505, 0x1025, "(null)" }, + { 0x105A, 0x0507, 0x1025, "(null)" }, + { 0x105A, 0x0512, 0x1025, "(null)" }, + { 0x105A, 0x055a, 0x1025, "(null)" }, + { 0x105A, 0x0611, 0x1025, "(null)" }, + { 0x105A, 0x0664, 0x1025, "(null)" }, + { 0x105A, 0x0579, 0x1028, "(null)" }, + { 0x105A, 0x1898, 0x103c, "(null)" }, + { 0x105A, 0x2aed, 0x103c, "(null)" }, + { 0x105A, 0x2af1, 0x103c, "(null)" }, + { 0x105A, 0x2111, 0x1043, "(null)" }, + { 0x105A, 0x2112, 0x1043, "(null)" }, + { 0x105A, 0x2129, 0x1043, "(null)" }, + { 0x105A, 0x851d, 0x1043, "(null)" }, + { 0x105A, 0x0930, 0x10de, "(null)" }, + { 0x105A, 0x2661, 0x1462, "(null)" }, + { 0x105B, 0x2afb, 0x103c, "(null)" }, + { 0x105B, 0xaa63, 0x1462, "(null)" }, + { 0x105B, 0xaa83, 0x1462, "(null)" }, + { 0x105B, 0xaa86, 0x1462, "(null)" }, + { 0x105B, 0xac13, 0x1462, "(null)" }, + { 0x105B, 0xac79, 0x1462, "(null)" }, + { 0x105B, 0xac7e, 0x1462, "(null)" }, + { 0x105B, 0xac93, 0x1462, "(null)" }, + { 0x105B, 0xac96, 0x1462, "(null)" }, + { 0x105B, 0x309d, 0x17aa, "(null)" }, + { 0x105B, 0x30b1, 0x17aa, "(null)" }, + { 0x105B, 0x30f3, 0x17aa, "(null)" }, + { 0x105B, 0x36a1, 0x17aa, "(null)" }, + { 0x105B, 0x90c8, 0x1b0a, "(null)" }, + { 0x105B, 0x90cd, 0x1b0a, "(null)" }, + { 0x105B, 0x90ce, 0x1b0a, "(null)" }, + { 0x105B, 0x90d0, 0x1b0a, "(null)" }, + { 0x107C, 0x102f, 0x103c, "(null)" }, + { 0x107C, 0x102f, 0x107c, "(null)" }, + { 0x107C, 0x102f, 0x10de, "(null)" }, + { 0x107D, 0x094e, 0x103c, "(null)" }, + { 0x107D, 0x1154, 0x103c, "(null)" }, + { 0x107D, 0x094e, 0x10de, "(null)" }, + { 0x107D, 0x1154, 0x10de, "(null)" }, + { 0x1080, 0x086a, 0x10de, "(null)" }, + { 0x1080, 0x0947, 0x10de, "(null)" }, + { 0x1081, 0x087e, 0x10de, "(null)" }, + { 0x1082, 0x0873, 0x10de, "(null)" }, + { 0x1082, 0x5207, 0x174b, "(null)" }, + { 0x1084, 0x2570, 0x1462, "(null)" }, + { 0x1084, 0x2571, 0x1462, "(null)" }, + { 0x1084, 0x2572, 0x1462, "(null)" }, + { 0x1084, 0x3a96, 0x1642, "(null)" }, + { 0x1086, 0x8387, 0x1043, "(null)" }, + { 0x1086, 0x2573, 0x1462, "(null)" }, + { 0x1086, 0x1207, 0x174b, "(null)" }, + { 0x1088, 0x0868, 0x10de, "(null)" }, + { 0x1091, 0x084d, 0x10de, "(null)" }, + { 0x1091, 0x0887, 0x10de, "(null)" }, + { 0x1091, 0x088e, 0x10de, "(null)" }, + { 0x1091, 0x0891, 0x10de, "(null)" }, + { 0x1091, 0x0974, 0x10de, "(null)" }, + { 0x1091, 0x098d, 0x10de, "(null)" }, + { 0x1094, 0x084d, 0x10de, "(null)" }, + { 0x1094, 0x0888, 0x10de, "(null)" }, + { 0x1096, 0x0910, 0x103c, "(null)" }, + { 0x1096, 0x0910, 0x10de, "(null)" }, + { 0x1096, 0x0911, 0x10de, "(null)" }, + { 0x109A, 0x04a4, 0x1028, "(null)" }, + { 0x109A, 0x14a4, 0x1028, "(null)" }, + { 0x109A, 0x1630, 0x103c, "(null)" }, + { 0x109A, 0x1754, 0x10cf, "(null)" }, + { 0x109A, 0x0270, 0x1558, "(null)" }, + { 0x109A, 0x5102, 0x1558, "(null)" }, + { 0x109A, 0x5105, 0x1558, "(null)" }, + { 0x109A, 0x7100, 0x1558, "(null)" }, + { 0x109A, 0x7102, 0x1558, "(null)" }, + { 0x109A, 0x7200, 0x1558, "(null)" }, + { 0x109B, 0x0918, 0x10de, "(null)" }, + { 0x10C5, 0x1009, 0x1019, "(null)" }, + { 0x10C5, 0x838d, 0x1043, "(null)" }, + { 0x10C5, 0x839c, 0x1043, "(null)" }, + { 0x10C5, 0x808a, 0x144d, "(null)" }, + { 0x10C5, 0x1834, 0x1462, "(null)" }, + { 0x10C5, 0x1835, 0x1462, "(null)" }, + { 0x10C5, 0x1837, 0x1462, "(null)" }, + { 0x10C5, 0x183b, 0x1462, "(null)" }, + { 0x10C5, 0x3899, 0x1642, "(null)" }, + { 0x10C5, 0x3958, 0x1642, "(null)" }, + { 0x10C5, 0x3150, 0x174b, "(null)" }, + { 0x10C5, 0x908e, 0x1b0a, "(null)" }, + { 0x10C5, 0x90a9, 0x1b0a, "(null)" }, + { 0x10C5, 0x90af, 0x1b0a, "(null)" }, + { 0x10D8, 0x0862, 0x103c, "(null)" }, + { 0x10D8, 0x0862, 0x10de, "(null)" }, + { 0x1140, 0x0799, 0x1019, "(null)" }, + { 0x1140, 0x999f, 0x1019, "(null)" }, + { 0x1140, 0x0600, 0x1025, "(null)" }, + { 0x1140, 0x0606, 0x1025, "(null)" }, + { 0x1140, 0x064a, 0x1025, "(null)" }, + { 0x1140, 0x064c, 0x1025, "(null)" }, + { 0x1140, 0x067a, 0x1025, "(null)" }, + { 0x1140, 0x0680, 0x1025, "(null)" }, + { 0x1140, 0x0686, 0x1025, "(null)" }, + { 0x1140, 0x0689, 0x1025, "(null)" }, + { 0x1140, 0x068b, 0x1025, "(null)" }, + { 0x1140, 0x068d, 0x1025, "(null)" }, + { 0x1140, 0x068e, 0x1025, "(null)" }, + { 0x1140, 0x0691, 0x1025, "(null)" }, + { 0x1140, 0x0692, 0x1025, "(null)" }, + { 0x1140, 0x0694, 0x1025, "(null)" }, + { 0x1140, 0x0702, 0x1025, "(null)" }, + { 0x1140, 0x0719, 0x1025, "(null)" }, + { 0x1140, 0x0725, 0x1025, "(null)" }, + { 0x1140, 0x0728, 0x1025, "(null)" }, + { 0x1140, 0x072b, 0x1025, "(null)" }, + { 0x1140, 0x072e, 0x1025, "(null)" }, + { 0x1140, 0x0732, 0x1025, "(null)" }, + { 0x1140, 0x0763, 0x1025, "(null)" }, + { 0x1140, 0x0773, 0x1025, "(null)" }, + { 0x1140, 0x0774, 0x1025, "(null)" }, + { 0x1140, 0x0776, 0x1025, "(null)" }, + { 0x1140, 0x077a, 0x1025, "(null)" }, + { 0x1140, 0x077b, 0x1025, "(null)" }, + { 0x1140, 0x077c, 0x1025, "(null)" }, + { 0x1140, 0x077d, 0x1025, "(null)" }, + { 0x1140, 0x077e, 0x1025, "(null)" }, + { 0x1140, 0x077f, 0x1025, "(null)" }, + { 0x1140, 0x0781, 0x1025, "(null)" }, + { 0x1140, 0x0798, 0x1025, "(null)" }, + { 0x1140, 0x0799, 0x1025, "(null)" }, + { 0x1140, 0x079b, 0x1025, "(null)" }, + { 0x1140, 0x079c, 0x1025, "(null)" }, + { 0x1140, 0x0807, 0x1025, "(null)" }, + { 0x1140, 0x0821, 0x1025, "(null)" }, + { 0x1140, 0x0823, 0x1025, "(null)" }, + { 0x1140, 0x0830, 0x1025, "(null)" }, + { 0x1140, 0x0833, 0x1025, "(null)" }, + { 0x1140, 0x0837, 0x1025, "(null)" }, + { 0x1140, 0x083e, 0x1025, "(null)" }, + { 0x1140, 0x0841, 0x1025, "(null)" }, + { 0x1140, 0x0853, 0x1025, "(null)" }, + { 0x1140, 0x0854, 0x1025, "(null)" }, + { 0x1140, 0x0855, 0x1025, "(null)" }, + { 0x1140, 0x0856, 0x1025, "(null)" }, + { 0x1140, 0x0857, 0x1025, "(null)" }, + { 0x1140, 0x0858, 0x1025, "(null)" }, + { 0x1140, 0x0863, 0x1025, "(null)" }, + { 0x1140, 0x0868, 0x1025, "(null)" }, + { 0x1140, 0x0869, 0x1025, "(null)" }, + { 0x1140, 0x0873, 0x1025, "(null)" }, + { 0x1140, 0x0878, 0x1025, "(null)" }, + { 0x1140, 0x087b, 0x1025, "(null)" }, + { 0x1140, 0x087f, 0x1025, "(null)" }, + { 0x1140, 0x0881, 0x1025, "(null)" }, + { 0x1140, 0x0885, 0x1025, "(null)" }, + { 0x1140, 0x088a, 0x1025, "(null)" }, + { 0x1140, 0x089b, 0x1025, "(null)" }, + { 0x1140, 0x0921, 0x1025, "(null)" }, + { 0x1140, 0x092e, 0x1025, "(null)" }, + { 0x1140, 0x092f, 0x1025, "(null)" }, + { 0x1140, 0x0932, 0x1025, "(null)" }, + { 0x1140, 0x093a, 0x1025, "(null)" }, + { 0x1140, 0x093c, 0x1025, "(null)" }, + { 0x1140, 0x093f, 0x1025, "(null)" }, + { 0x1140, 0x0941, 0x1025, "(null)" }, + { 0x1140, 0x0945, 0x1025, "(null)" }, + { 0x1140, 0x0954, 0x1025, "(null)" }, + { 0x1140, 0x0965, 0x1025, "(null)" }, + { 0x1140, 0x054d, 0x1028, "(null)" }, + { 0x1140, 0x054e, 0x1028, "(null)" }, + { 0x1140, 0x0554, 0x1028, "(null)" }, + { 0x1140, 0x0557, 0x1028, "(null)" }, + { 0x1140, 0x0562, 0x1028, "(null)" }, + { 0x1140, 0x0565, 0x1028, "(null)" }, + { 0x1140, 0x0568, 0x1028, "(null)" }, + { 0x1140, 0x0590, 0x1028, "(null)" }, + { 0x1140, 0x0592, 0x1028, "(null)" }, + { 0x1140, 0x0594, 0x1028, "(null)" }, + { 0x1140, 0x0595, 0x1028, "(null)" }, + { 0x1140, 0x05a2, 0x1028, "(null)" }, + { 0x1140, 0x05b1, 0x1028, "(null)" }, + { 0x1140, 0x05b3, 0x1028, "(null)" }, + { 0x1140, 0x05da, 0x1028, "(null)" }, + { 0x1140, 0x05de, 0x1028, "(null)" }, + { 0x1140, 0x05e0, 0x1028, "(null)" }, + { 0x1140, 0x05e8, 0x1028, "(null)" }, + { 0x1140, 0x05f4, 0x1028, "(null)" }, + { 0x1140, 0x060f, 0x1028, "(null)" }, + { 0x1140, 0x062f, 0x1028, "(null)" }, + { 0x1140, 0x064e, 0x1028, "(null)" }, + { 0x1140, 0x0652, 0x1028, "(null)" }, + { 0x1140, 0x0653, 0x1028, "(null)" }, + { 0x1140, 0x0655, 0x1028, "(null)" }, + { 0x1140, 0x065e, 0x1028, "(null)" }, + { 0x1140, 0x0662, 0x1028, "(null)" }, + { 0x1140, 0x068d, 0x1028, "(null)" }, + { 0x1140, 0x06ad, 0x1028, "(null)" }, + { 0x1140, 0x06ae, 0x1028, "(null)" }, + { 0x1140, 0x06af, 0x1028, "(null)" }, + { 0x1140, 0x06b0, 0x1028, "(null)" }, + { 0x1140, 0x06c0, 0x1028, "(null)" }, + { 0x1140, 0x06c1, 0x1028, "(null)" }, + { 0x1140, 0x18ef, 0x103c, "(null)" }, + { 0x1140, 0x18f9, 0x103c, "(null)" }, + { 0x1140, 0x18fb, 0x103c, "(null)" }, + { 0x1140, 0x18fd, 0x103c, "(null)" }, + { 0x1140, 0x18ff, 0x103c, "(null)" }, + { 0x1140, 0x218a, 0x103c, "(null)" }, + { 0x1140, 0x21bb, 0x103c, "(null)" }, + { 0x1140, 0x21bc, 0x103c, "(null)" }, + { 0x1140, 0x220e, 0x103c, "(null)" }, + { 0x1140, 0x2210, 0x103c, "(null)" }, + { 0x1140, 0x2212, 0x103c, "(null)" }, + { 0x1140, 0x2214, 0x103c, "(null)" }, + { 0x1140, 0x2218, 0x103c, "(null)" }, + { 0x1140, 0x225b, 0x103c, "(null)" }, + { 0x1140, 0x225d, 0x103c, "(null)" }, + { 0x1140, 0x226d, 0x103c, "(null)" }, + { 0x1140, 0x226f, 0x103c, "(null)" }, + { 0x1140, 0x22d2, 0x103c, "(null)" }, + { 0x1140, 0x22d9, 0x103c, "(null)" }, + { 0x1140, 0x2335, 0x103c, "(null)" }, + { 0x1140, 0x2337, 0x103c, "(null)" }, + { 0x1140, 0x2aef, 0x103c, "(null)" }, + { 0x1140, 0x2af9, 0x103c, "(null)" }, + { 0x1140, 0x10dd, 0x1043, "(null)" }, + { 0x1140, 0x10ed, 0x1043, "(null)" }, + { 0x1140, 0x11fd, 0x1043, "(null)" }, + { 0x1140, 0x124d, 0x1043, "(null)" }, + { 0x1140, 0x126d, 0x1043, "(null)" }, + { 0x1140, 0x131d, 0x1043, "(null)" }, + { 0x1140, 0x13fd, 0x1043, "(null)" }, + { 0x1140, 0x14c7, 0x1043, "(null)" }, + { 0x1140, 0x1507, 0x1043, "(null)" }, + { 0x1140, 0x15ad, 0x1043, "(null)" }, + { 0x1140, 0x15ed, 0x1043, "(null)" }, + { 0x1140, 0x160d, 0x1043, "(null)" }, + { 0x1140, 0x163d, 0x1043, "(null)" }, + { 0x1140, 0x165d, 0x1043, "(null)" }, + { 0x1140, 0x166d, 0x1043, "(null)" }, + { 0x1140, 0x16cd, 0x1043, "(null)" }, + { 0x1140, 0x16dd, 0x1043, "(null)" }, + { 0x1140, 0x170d, 0x1043, "(null)" }, + { 0x1140, 0x176d, 0x1043, "(null)" }, + { 0x1140, 0x178d, 0x1043, "(null)" }, + { 0x1140, 0x179d, 0x1043, "(null)" }, + { 0x1140, 0x2132, 0x1043, "(null)" }, + { 0x1140, 0x2136, 0x1043, "(null)" }, + { 0x1140, 0x21ba, 0x1043, "(null)" }, + { 0x1140, 0x21fa, 0x1043, "(null)" }, + { 0x1140, 0x220a, 0x1043, "(null)" }, + { 0x1140, 0x221a, 0x1043, "(null)" }, + { 0x1140, 0x223a, 0x1043, "(null)" }, + { 0x1140, 0x224a, 0x1043, "(null)" }, + { 0x1140, 0x227a, 0x1043, "(null)" }, + { 0x1140, 0x228a, 0x1043, "(null)" }, + { 0x1140, 0x22fa, 0x1043, "(null)" }, + { 0x1140, 0x232a, 0x1043, "(null)" }, + { 0x1140, 0x233a, 0x1043, "(null)" }, + { 0x1140, 0x235a, 0x1043, "(null)" }, + { 0x1140, 0x236a, 0x1043, "(null)" }, + { 0x1140, 0x238a, 0x1043, "(null)" }, + { 0x1140, 0x8595, 0x1043, "(null)" }, + { 0x1140, 0x85ea, 0x1043, "(null)" }, + { 0x1140, 0x85eb, 0x1043, "(null)" }, + { 0x1140, 0x85ec, 0x1043, "(null)" }, + { 0x1140, 0x85ee, 0x1043, "(null)" }, + { 0x1140, 0x85f3, 0x1043, "(null)" }, + { 0x1140, 0x860e, 0x1043, "(null)" }, + { 0x1140, 0x861a, 0x1043, "(null)" }, + { 0x1140, 0x861b, 0x1043, "(null)" }, + { 0x1140, 0x8628, 0x1043, "(null)" }, + { 0x1140, 0x8643, 0x1043, "(null)" }, + { 0x1140, 0x864c, 0x1043, "(null)" }, + { 0x1140, 0x8652, 0x1043, "(null)" }, + { 0x1140, 0x8660, 0x1043, "(null)" }, + { 0x1140, 0x8661, 0x1043, "(null)" }, + { 0x1140, 0x0dac, 0x105b, "(null)" }, + { 0x1140, 0x0dad, 0x105b, "(null)" }, + { 0x1140, 0x0ef3, 0x105b, "(null)" }, + { 0x1140, 0x17f5, 0x10cf, "(null)" }, + { 0x1140, 0xfa01, 0x1179, "(null)" }, + { 0x1140, 0xfa02, 0x1179, "(null)" }, + { 0x1140, 0xfa03, 0x1179, "(null)" }, + { 0x1140, 0xfa05, 0x1179, "(null)" }, + { 0x1140, 0xfa11, 0x1179, "(null)" }, + { 0x1140, 0xfa13, 0x1179, "(null)" }, + { 0x1140, 0xfa18, 0x1179, "(null)" }, + { 0x1140, 0xfa19, 0x1179, "(null)" }, + { 0x1140, 0xfa21, 0x1179, "(null)" }, + { 0x1140, 0xfa23, 0x1179, "(null)" }, + { 0x1140, 0xfa2a, 0x1179, "(null)" }, + { 0x1140, 0xfa32, 0x1179, "(null)" }, + { 0x1140, 0xfa33, 0x1179, "(null)" }, + { 0x1140, 0xfa36, 0x1179, "(null)" }, + { 0x1140, 0xfa38, 0x1179, "(null)" }, + { 0x1140, 0xfa42, 0x1179, "(null)" }, + { 0x1140, 0xfa43, 0x1179, "(null)" }, + { 0x1140, 0xfa45, 0x1179, "(null)" }, + { 0x1140, 0xfa47, 0x1179, "(null)" }, + { 0x1140, 0xfa49, 0x1179, "(null)" }, + { 0x1140, 0xfa58, 0x1179, "(null)" }, + { 0x1140, 0xfa59, 0x1179, "(null)" }, + { 0x1140, 0xfa88, 0x1179, "(null)" }, + { 0x1140, 0xfa89, 0x1179, "(null)" }, + { 0x1140, 0xb092, 0x144d, "(null)" }, + { 0x1140, 0xb098, 0x144d, "(null)" }, + { 0x1140, 0xc0d5, 0x144d, "(null)" }, + { 0x1140, 0xc0d7, 0x144d, "(null)" }, + { 0x1140, 0xc0e2, 0x144d, "(null)" }, + { 0x1140, 0xc0e3, 0x144d, "(null)" }, + { 0x1140, 0xc0e4, 0x144d, "(null)" }, + { 0x1140, 0xc10d, 0x144d, "(null)" }, + { 0x1140, 0xc652, 0x144d, "(null)" }, + { 0x1140, 0xc709, 0x144d, "(null)" }, + { 0x1140, 0xc711, 0x144d, "(null)" }, + { 0x1140, 0xc736, 0x144d, "(null)" }, + { 0x1140, 0xc737, 0x144d, "(null)" }, + { 0x1140, 0xc745, 0x144d, "(null)" }, + { 0x1140, 0xc750, 0x144d, "(null)" }, + { 0x1140, 0x10b8, 0x1462, "(null)" }, + { 0x1140, 0x10e9, 0x1462, "(null)" }, + { 0x1140, 0x1116, 0x1462, "(null)" }, + { 0x1140, 0xaa33, 0x1462, "(null)" }, + { 0x1140, 0xaaa2, 0x1462, "(null)" }, + { 0x1140, 0xaaa3, 0x1462, "(null)" }, + { 0x1140, 0xacb2, 0x1462, "(null)" }, + { 0x1140, 0xacc1, 0x1462, "(null)" }, + { 0x1140, 0xae61, 0x1462, "(null)" }, + { 0x1140, 0xae65, 0x1462, "(null)" }, + { 0x1140, 0xae6a, 0x1462, "(null)" }, + { 0x1140, 0xae71, 0x1462, "(null)" }, + { 0x1140, 0x0083, 0x14c0, "(null)" }, + { 0x1140, 0x0926, 0x152d, "(null)" }, + { 0x1140, 0x0982, 0x152d, "(null)" }, + { 0x1140, 0x0983, 0x152d, "(null)" }, + { 0x1140, 0x1005, 0x152d, "(null)" }, + { 0x1140, 0x1012, 0x152d, "(null)" }, + { 0x1140, 0x1019, 0x152d, "(null)" }, + { 0x1140, 0x1030, 0x152d, "(null)" }, + { 0x1140, 0x1055, 0x152d, "(null)" }, + { 0x1140, 0x1067, 0x152d, "(null)" }, + { 0x1140, 0x1092, 0x152d, "(null)" }, + { 0x1140, 0x2200, 0x17aa, "(null)" }, + { 0x1140, 0x2213, 0x17aa, "(null)" }, + { 0x1140, 0x2220, 0x17aa, "(null)" }, + { 0x1140, 0x309c, 0x17aa, "(null)" }, + { 0x1140, 0x30b4, 0x17aa, "(null)" }, + { 0x1140, 0x30b7, 0x17aa, "(null)" }, + { 0x1140, 0x30e4, 0x17aa, "(null)" }, + { 0x1140, 0x361b, 0x17aa, "(null)" }, + { 0x1140, 0x361c, 0x17aa, "(null)" }, + { 0x1140, 0x361d, 0x17aa, "(null)" }, + { 0x1140, 0x3656, 0x17aa, "(null)" }, + { 0x1140, 0x365a, 0x17aa, "(null)" }, + { 0x1140, 0x365e, 0x17aa, "(null)" }, + { 0x1140, 0x3661, 0x17aa, "(null)" }, + { 0x1140, 0x366c, 0x17aa, "(null)" }, + { 0x1140, 0x3685, 0x17aa, "(null)" }, + { 0x1140, 0x3686, 0x17aa, "(null)" }, + { 0x1140, 0x3687, 0x17aa, "(null)" }, + { 0x1140, 0x3696, 0x17aa, "(null)" }, + { 0x1140, 0x369b, 0x17aa, "(null)" }, + { 0x1140, 0x369c, 0x17aa, "(null)" }, + { 0x1140, 0x369d, 0x17aa, "(null)" }, + { 0x1140, 0x369e, 0x17aa, "(null)" }, + { 0x1140, 0x36a6, 0x17aa, "(null)" }, + { 0x1140, 0x36a7, 0x17aa, "(null)" }, + { 0x1140, 0x36a9, 0x17aa, "(null)" }, + { 0x1140, 0x36af, 0x17aa, "(null)" }, + { 0x1140, 0x36b0, 0x17aa, "(null)" }, + { 0x1140, 0x36b6, 0x17aa, "(null)" }, + { 0x1140, 0x3800, 0x17aa, "(null)" }, + { 0x1140, 0x3801, 0x17aa, "(null)" }, + { 0x1140, 0x3802, 0x17aa, "(null)" }, + { 0x1140, 0x3803, 0x17aa, "(null)" }, + { 0x1140, 0x3804, 0x17aa, "(null)" }, + { 0x1140, 0x3806, 0x17aa, "(null)" }, + { 0x1140, 0x3808, 0x17aa, "(null)" }, + { 0x1140, 0x380d, 0x17aa, "(null)" }, + { 0x1140, 0x380e, 0x17aa, "(null)" }, + { 0x1140, 0x380f, 0x17aa, "(null)" }, + { 0x1140, 0x3811, 0x17aa, "(null)" }, + { 0x1140, 0x3812, 0x17aa, "(null)" }, + { 0x1140, 0x3813, 0x17aa, "(null)" }, + { 0x1140, 0x3816, 0x17aa, "(null)" }, + { 0x1140, 0x3817, 0x17aa, "(null)" }, + { 0x1140, 0x3818, 0x17aa, "(null)" }, + { 0x1140, 0x381a, 0x17aa, "(null)" }, + { 0x1140, 0x381c, 0x17aa, "(null)" }, + { 0x1140, 0x381d, 0x17aa, "(null)" }, + { 0x1140, 0x3901, 0x17aa, "(null)" }, + { 0x1140, 0x3902, 0x17aa, "(null)" }, + { 0x1140, 0x3903, 0x17aa, "(null)" }, + { 0x1140, 0x3904, 0x17aa, "(null)" }, + { 0x1140, 0x3905, 0x17aa, "(null)" }, + { 0x1140, 0x3907, 0x17aa, "(null)" }, + { 0x1140, 0x3910, 0x17aa, "(null)" }, + { 0x1140, 0x3912, 0x17aa, "(null)" }, + { 0x1140, 0x3913, 0x17aa, "(null)" }, + { 0x1140, 0x3915, 0x17aa, "(null)" }, + { 0x1140, 0x3983, 0x17aa, "(null)" }, + { 0x1140, 0x5001, 0x17aa, "(null)" }, + { 0x1140, 0x5003, 0x17aa, "(null)" }, + { 0x1140, 0x5005, 0x17aa, "(null)" }, + { 0x1140, 0x500d, 0x17aa, "(null)" }, + { 0x1140, 0x5014, 0x17aa, "(null)" }, + { 0x1140, 0x5017, 0x17aa, "(null)" }, + { 0x1140, 0x5019, 0x17aa, "(null)" }, + { 0x1140, 0x501a, 0x17aa, "(null)" }, + { 0x1140, 0x501f, 0x17aa, "(null)" }, + { 0x1140, 0x5025, 0x17aa, "(null)" }, + { 0x1140, 0x5027, 0x17aa, "(null)" }, + { 0x1140, 0x502a, 0x17aa, "(null)" }, + { 0x1140, 0x502b, 0x17aa, "(null)" }, + { 0x1140, 0x502d, 0x17aa, "(null)" }, + { 0x1140, 0x502e, 0x17aa, "(null)" }, + { 0x1140, 0x502f, 0x17aa, "(null)" }, + { 0x1140, 0x5030, 0x17aa, "(null)" }, + { 0x1140, 0x5031, 0x17aa, "(null)" }, + { 0x1140, 0x5032, 0x17aa, "(null)" }, + { 0x1140, 0x5033, 0x17aa, "(null)" }, + { 0x1140, 0x503e, 0x17aa, "(null)" }, + { 0x1140, 0x503f, 0x17aa, "(null)" }, + { 0x1140, 0x5040, 0x17aa, "(null)" }, + { 0x1140, 0x0177, 0x1854, "(null)" }, + { 0x1140, 0x0180, 0x1854, "(null)" }, + { 0x1140, 0x0190, 0x1854, "(null)" }, + { 0x1140, 0x0192, 0x1854, "(null)" }, + { 0x1140, 0x0224, 0x1854, "(null)" }, + { 0x1140, 0x01c0, 0x1b0a, "(null)" }, + { 0x1140, 0x20dd, 0x1b0a, "(null)" }, + { 0x1140, 0x20df, 0x1b0a, "(null)" }, + { 0x1140, 0x210e, 0x1b0a, "(null)" }, + { 0x1140, 0x2202, 0x1b0a, "(null)" }, + { 0x1140, 0x90d7, 0x1b0a, "(null)" }, + { 0x1140, 0x90dd, 0x1b0a, "(null)" }, + { 0x1140, 0x5530, 0x1b50, "(null)" }, + { 0x1140, 0x5031, 0x1b6c, "(null)" }, + { 0x1140, 0x0106, 0x1bab, "(null)" }, + { 0x1140, 0x1013, 0x1d05, "(null)" }, + { 0x1180, 0x068b, 0x1028, "(null)" }, + { 0x1180, 0x0790, 0x1028, "(null)" }, + { 0x1180, 0x079a, 0x1028, "(null)" }, + { 0x1180, 0x0969, 0x10de, "(null)" }, + { 0x1180, 0x0999, 0x10de, "(null)" }, + { 0x1180, 0x100c, 0x10de, "(null)" }, + { 0x1180, 0x1255, 0x174b, "(null)" }, + { 0x1183, 0x068b, 0x1028, "(null)" }, + { 0x1183, 0x0790, 0x1028, "(null)" }, + { 0x1183, 0x079a, 0x1028, "(null)" }, + { 0x1184, 0x068b, 0x1028, "(null)" }, + { 0x1184, 0x0790, 0x1028, "(null)" }, + { 0x1184, 0x079a, 0x1028, "(null)" }, + { 0x1184, 0x1033, 0x10de, "(null)" }, + { 0x1184, 0x1058, 0x10de, "(null)" }, + { 0x1184, 0x4255, 0x174b, "(null)" }, + { 0x1185, 0x068b, 0x1028, "(null)" }, + { 0x1185, 0x0790, 0x1028, "(null)" }, + { 0x1185, 0x079a, 0x1028, "(null)" }, + { 0x1185, 0x6889, 0x103c, "(null)" }, + { 0x1185, 0x098a, 0x10de, "(null)" }, + { 0x1185, 0x106f, 0x10de, "(null)" }, + { 0x1185, 0x2841, 0x1462, "(null)" }, + { 0x1185, 0x3c28, 0x1642, "(null)" }, + { 0x1185, 0x2260, 0x174b, "(null)" }, + { 0x1187, 0x068b, 0x1028, "(null)" }, + { 0x1187, 0x0790, 0x1028, "(null)" }, + { 0x1187, 0x079a, 0x1028, "(null)" }, + { 0x1187, 0x85e9, 0x1043, "(null)" }, + { 0x1187, 0x2849, 0x1462, "(null)" }, + { 0x1187, 0x3d34, 0x1642, "(null)" }, + { 0x1187, 0x3265, 0x174b, "(null)" }, + { 0x1188, 0x068b, 0x1028, "(null)" }, + { 0x1188, 0x0790, 0x1028, "(null)" }, + { 0x1188, 0x079a, 0x1028, "(null)" }, + { 0x1188, 0x095b, 0x10de, "(null)" }, + { 0x1189, 0x064c, 0x1028, "(null)" }, + { 0x1189, 0x068b, 0x1028, "(null)" }, + { 0x1189, 0x0790, 0x1028, "(null)" }, + { 0x1189, 0x079a, 0x1028, "(null)" }, + { 0x1189, 0x097a, 0x10de, "(null)" }, + { 0x1189, 0x1074, 0x10de, "(null)" }, + { 0x1189, 0x1260, 0x174b, "(null)" }, + { 0x118E, 0x064c, 0x1028, "(null)" }, + { 0x118E, 0x068b, 0x1028, "(null)" }, + { 0x118E, 0x0790, 0x1028, "(null)" }, + { 0x118E, 0x079a, 0x1028, "(null)" }, + { 0x118E, 0x6892, 0x103c, "(null)" }, + { 0x118E, 0x8477, 0x1043, "(null)" }, + { 0x118E, 0x8650, 0x1043, "(null)" }, + { 0x118E, 0x098a, 0x10de, "(null)" }, + { 0x118E, 0x106f, 0x10de, "(null)" }, + { 0x118E, 0x284b, 0x1462, "(null)" }, + { 0x118E, 0x6265, 0x174b, "(null)" }, + { 0x118F, 0x0970, 0x10de, "(null)" }, + { 0x118F, 0x097f, 0x10de, "(null)" }, + { 0x1193, 0x068b, 0x1028, "(null)" }, + { 0x1193, 0x0790, 0x1028, "(null)" }, + { 0x1193, 0x079a, 0x1028, "(null)" }, + { 0x1194, 0x1085, 0x10de, "(null)" }, + { 0x1195, 0x068b, 0x1028, "(null)" }, + { 0x1195, 0x0790, 0x1028, "(null)" }, + { 0x1195, 0x079a, 0x1028, "(null)" }, + { 0x1198, 0x05aa, 0x1028, "(null)" }, + { 0x1198, 0x05ab, 0x1028, "(null)" }, + { 0x1198, 0x05ad, 0x1028, "(null)" }, + { 0x1198, 0x05ae, 0x1028, "(null)" }, + { 0x1198, 0x157d, 0x1043, "(null)" }, + { 0x1198, 0xa552, 0x1458, "(null)" }, + { 0x1198, 0xd001, 0x1458, "(null)" }, + { 0x1198, 0x1105, 0x1462, "(null)" }, + { 0x1198, 0x110f, 0x1462, "(null)" }, + { 0x1198, 0x1119, 0x1462, "(null)" }, + { 0x1198, 0x119a, 0x1462, "(null)" }, + { 0x1198, 0xaf18, 0x1462, "(null)" }, + { 0x1198, 0x0270, 0x1558, "(null)" }, + { 0x1198, 0x0376, 0x1558, "(null)" }, + { 0x1198, 0x0377, 0x1558, "(null)" }, + { 0x1198, 0x5281, 0x1558, "(null)" }, + { 0x1198, 0x7481, 0x1558, "(null)" }, + { 0x1199, 0x157d, 0x1043, "(null)" }, + { 0x1199, 0x1456, 0x1458, "(null)" }, + { 0x1199, 0xa552, 0x1458, "(null)" }, + { 0x1199, 0xb556, 0x1458, "(null)" }, + { 0x1199, 0xd001, 0x1458, "(null)" }, + { 0x1199, 0x10fd, 0x1462, "(null)" }, + { 0x1199, 0x1103, 0x1462, "(null)" }, + { 0x1199, 0x1106, 0x1462, "(null)" }, + { 0x1199, 0x1110, 0x1462, "(null)" }, + { 0x1199, 0x111a, 0x1462, "(null)" }, + { 0x1199, 0xaf18, 0x1462, "(null)" }, + { 0x1199, 0x1097, 0x152d, "(null)" }, + { 0x1199, 0x0376, 0x1558, "(null)" }, + { 0x1199, 0x5281, 0x1558, "(null)" }, + { 0x1199, 0x7481, 0x1558, "(null)" }, + { 0x1199, 0x6749, 0x1a58, "(null)" }, + { 0x119A, 0x05aa, 0x1028, "(null)" }, + { 0x119A, 0x05ab, 0x1028, "(null)" }, + { 0x119A, 0x05ad, 0x1028, "(null)" }, + { 0x119A, 0x05ae, 0x1028, "(null)" }, + { 0x119A, 0x1756, 0x1458, "(null)" }, + { 0x119A, 0xa552, 0x1458, "(null)" }, + { 0x119A, 0xb556, 0x1458, "(null)" }, + { 0x119A, 0xd001, 0x1458, "(null)" }, + { 0x119A, 0x1102, 0x1462, "(null)" }, + { 0x119A, 0x1104, 0x1462, "(null)" }, + { 0x119A, 0x0376, 0x1558, "(null)" }, + { 0x119A, 0x5281, 0x1558, "(null)" }, + { 0x119A, 0x7481, 0x1558, "(null)" }, + { 0x119D, 0x0120, 0x106b, "(null)" }, + { 0x119E, 0x0121, 0x106b, "(null)" }, + { 0x119F, 0x05aa, 0x1028, "(null)" }, + { 0x119F, 0x05ab, 0x1028, "(null)" }, + { 0x119F, 0x05ad, 0x1028, "(null)" }, + { 0x119F, 0x05ae, 0x1028, "(null)" }, + { 0x119F, 0x213e, 0x1043, "(null)" }, + { 0x119F, 0x21bb, 0x1043, "(null)" }, + { 0x119F, 0x1756, 0x1458, "(null)" }, + { 0x119F, 0x10ea, 0x1462, "(null)" }, + { 0x119F, 0x0270, 0x1558, "(null)" }, + { 0x119F, 0x0271, 0x1558, "(null)" }, + { 0x119F, 0x0376, 0x1558, "(null)" }, + { 0x119F, 0x0377, 0x1558, "(null)" }, + { 0x119F, 0x5106, 0x1558, "(null)" }, + { 0x119F, 0x7104, 0x1558, "(null)" }, + { 0x11A0, 0x0550, 0x1028, "(null)" }, + { 0x11A0, 0x0551, 0x1028, "(null)" }, + { 0x11A0, 0x057b, 0x1028, "(null)" }, + { 0x11A0, 0x0580, 0x1028, "(null)" }, + { 0x11A0, 0x10bc, 0x1462, "(null)" }, + { 0x11A0, 0x10be, 0x1462, "(null)" }, + { 0x11A0, 0xaf15, 0x1462, "(null)" }, + { 0x11A0, 0x0270, 0x1558, "(null)" }, + { 0x11A0, 0x0271, 0x1558, "(null)" }, + { 0x11A0, 0x0371, 0x1558, "(null)" }, + { 0x11A0, 0x0372, 0x1558, "(null)" }, + { 0x11A0, 0x5105, 0x1558, "(null)" }, + { 0x11A0, 0x7102, 0x1558, "(null)" }, + { 0x11A0, 0x0964, 0x196e, "(null)" }, + { 0x11A1, 0x10ad, 0x1043, "(null)" }, + { 0x11A1, 0x21ab, 0x1043, "(null)" }, + { 0x11A1, 0x10d9, 0x1462, "(null)" }, + { 0x11A1, 0xaf15, 0x1462, "(null)" }, + { 0x11A1, 0x0270, 0x1558, "(null)" }, + { 0x11A1, 0x0271, 0x1558, "(null)" }, + { 0x11A1, 0x0371, 0x1558, "(null)" }, + { 0x11A1, 0x5105, 0x1558, "(null)" }, + { 0x11A1, 0x7102, 0x1558, "(null)" }, + { 0x11A2, 0x010c, 0x106b, "(null)" }, + { 0x11A3, 0x010d, 0x106b, "(null)" }, + { 0x11A7, 0x10d9, 0x1462, "(null)" }, + { 0x11A7, 0xaf15, 0x1462, "(null)" }, + { 0x11A7, 0x5105, 0x1558, "(null)" }, + { 0x11A7, 0x7102, 0x1558, "(null)" }, + { 0x11B4, 0x1096, 0x103c, "(null)" }, + { 0x11B4, 0x1096, 0x10de, "(null)" }, + { 0x11B6, 0x05cd, 0x1028, "(null)" }, + { 0x11B6, 0x15cd, 0x1028, "(null)" }, + { 0x11B6, 0x1022, 0x103c, "(null)" }, + { 0x11B6, 0x190a, 0x103c, "(null)" }, + { 0x11B6, 0x197a, 0x103c, "(null)" }, + { 0x11B6, 0x2255, 0x103c, "(null)" }, + { 0x11B6, 0x2256, 0x103c, "(null)" }, + { 0x11B6, 0x10fc, 0x1462, "(null)" }, + { 0x11B6, 0x10fd, 0x1462, "(null)" }, + { 0x11B6, 0x1152, 0x1462, "(null)" }, + { 0x11B6, 0x118c, 0x1462, "(null)" }, + { 0x11B6, 0x5106, 0x1558, "(null)" }, + { 0x11B6, 0x5281, 0x1558, "(null)" }, + { 0x11B6, 0x7104, 0x1558, "(null)" }, + { 0x11B6, 0x7481, 0x1558, "(null)" }, + { 0x11B6, 0x7500, 0x1558, "(null)" }, + { 0x11B6, 0x7700, 0x1558, "(null)" }, + { 0x11B7, 0x05cd, 0x1028, "(null)" }, + { 0x11B7, 0x15cd, 0x1028, "(null)" }, + { 0x11B7, 0x1023, 0x103c, "(null)" }, + { 0x11B7, 0x190a, 0x103c, "(null)" }, + { 0x11B7, 0x197a, 0x103c, "(null)" }, + { 0x11B7, 0x2255, 0x103c, "(null)" }, + { 0x11B7, 0x2256, 0x103c, "(null)" }, + { 0x11B7, 0x1100, 0x1462, "(null)" }, + { 0x11B7, 0x1153, 0x1462, "(null)" }, + { 0x11B7, 0x118d, 0x1462, "(null)" }, + { 0x11B8, 0x05cd, 0x1028, "(null)" }, + { 0x11B8, 0x15cd, 0x1028, "(null)" }, + { 0x11B8, 0x190a, 0x103c, "(null)" }, + { 0x11B8, 0x197a, 0x103c, "(null)" }, + { 0x11B8, 0x2255, 0x103c, "(null)" }, + { 0x11B8, 0x2256, 0x103c, "(null)" }, + { 0x11B8, 0x0270, 0x1558, "(null)" }, + { 0x11B8, 0x0376, 0x1558, "(null)" }, + { 0x11B8, 0x7500, 0x1558, "(null)" }, + { 0x11B8, 0x7700, 0x1558, "(null)" }, + { 0x11BA, 0x0965, 0x103c, "(null)" }, + { 0x11BA, 0x0965, 0x10de, "(null)" }, + { 0x11BC, 0x053f, 0x1028, "(null)" }, + { 0x11BC, 0x153f, 0x1028, "(null)" }, + { 0x11BC, 0x176c, 0x103c, "(null)" }, + { 0x11BC, 0x1762, 0x10cf, "(null)" }, + { 0x11BC, 0x0270, 0x1558, "(null)" }, + { 0x11BC, 0x0371, 0x1558, "(null)" }, + { 0x11BC, 0x0376, 0x1558, "(null)" }, + { 0x11BD, 0x053f, 0x1028, "(null)" }, + { 0x11BD, 0x153f, 0x1028, "(null)" }, + { 0x11BD, 0x0951, 0x103c, "(null)" }, + { 0x11BD, 0x176c, 0x103c, "(null)" }, + { 0x11BD, 0x1761, 0x10cf, "(null)" }, + { 0x11BD, 0x10db, 0x1462, "(null)" }, + { 0x11BE, 0x053f, 0x1028, "(null)" }, + { 0x11BE, 0x153f, 0x1028, "(null)" }, + { 0x11BE, 0x0950, 0x103c, "(null)" }, + { 0x11BE, 0x176c, 0x103c, "(null)" }, + { 0x11BE, 0x1760, 0x10cf, "(null)" }, + { 0x11BE, 0x10db, 0x1462, "(null)" }, + { 0x11BE, 0x10eb, 0x1462, "(null)" }, + { 0x11BE, 0x5105, 0x1558, "(null)" }, + { 0x11BE, 0x5106, 0x1558, "(null)" }, + { 0x11BE, 0x7102, 0x1558, "(null)" }, + { 0x11BE, 0x7104, 0x1558, "(null)" }, + { 0x11BF, 0x100a, 0x10de, "(null)" }, + { 0x11BF, 0x100d, 0x10de, "(null)" }, + { 0x11C0, 0x068b, 0x1028, "(null)" }, + { 0x11C0, 0x0790, 0x1028, "(null)" }, + { 0x11C0, 0x079a, 0x1028, "(null)" }, + { 0x11C2, 0x068b, 0x1028, "(null)" }, + { 0x11C2, 0x0790, 0x1028, "(null)" }, + { 0x11C2, 0x079a, 0x1028, "(null)" }, + { 0x11C3, 0x068b, 0x1028, "(null)" }, + { 0x11C3, 0x0790, 0x1028, "(null)" }, + { 0x11C3, 0x079a, 0x1028, "(null)" }, + { 0x11C4, 0x1029, 0x10de, "(null)" }, + { 0x11C4, 0x1645, 0x174b, "(null)" }, + { 0x11C4, 0x90cc, 0x1b0a, "(null)" }, + { 0x11C6, 0x068b, 0x1028, "(null)" }, + { 0x11C6, 0x0790, 0x1028, "(null)" }, + { 0x11C6, 0x079a, 0x1028, "(null)" }, + { 0x11C6, 0x1016, 0x10de, "(null)" }, + { 0x11C6, 0x0995, 0x144d, "(null)" }, + { 0x11C8, 0x068b, 0x1028, "(null)" }, + { 0x11C8, 0x0790, 0x1028, "(null)" }, + { 0x11C8, 0x079a, 0x1028, "(null)" }, + { 0x11E0, 0x05aa, 0x1028, "(null)" }, + { 0x11E0, 0x05ab, 0x1028, "(null)" }, + { 0x11E0, 0x05ad, 0x1028, "(null)" }, + { 0x11E0, 0x05ae, 0x1028, "(null)" }, + { 0x11E0, 0x213e, 0x1043, "(null)" }, + { 0x11E0, 0x21bb, 0x1043, "(null)" }, + { 0x11E0, 0xfa76, 0x1179, "(null)" }, + { 0x11E0, 0xfa77, 0x1179, "(null)" }, + { 0x11E0, 0x8801, 0x13fe, "(null)" }, + { 0x11E0, 0x2552, 0x1458, "(null)" }, + { 0x11E0, 0xa552, 0x1458, "(null)" }, + { 0x11E0, 0x10e8, 0x1462, "(null)" }, + { 0x11E0, 0x10ed, 0x1462, "(null)" }, + { 0x11E0, 0x0270, 0x1558, "(null)" }, + { 0x11E0, 0x0271, 0x1558, "(null)" }, + { 0x11E0, 0x0376, 0x1558, "(null)" }, + { 0x11E0, 0x5106, 0x1558, "(null)" }, + { 0x11E0, 0x7104, 0x1558, "(null)" }, + { 0x11E0, 0x1702, 0x172f, "(null)" }, + { 0x11E1, 0x05a9, 0x1028, "(null)" }, + { 0x11E1, 0x05aa, 0x1028, "(null)" }, + { 0x11E1, 0x05ab, 0x1028, "(null)" }, + { 0x11E1, 0x05ac, 0x1028, "(null)" }, + { 0x11E1, 0x05ad, 0x1028, "(null)" }, + { 0x11E1, 0x05ae, 0x1028, "(null)" }, + { 0x11E1, 0x1756, 0x1458, "(null)" }, + { 0x11E1, 0x2552, 0x1458, "(null)" }, + { 0x11E1, 0xa552, 0x1458, "(null)" }, + { 0x11E1, 0x10ee, 0x1462, "(null)" }, + { 0x11E1, 0x0376, 0x1558, "(null)" }, + { 0x11E1, 0x5106, 0x1558, "(null)" }, + { 0x11E1, 0x7104, 0x1558, "(null)" }, + { 0x11E1, 0x1701, 0x172f, "(null)" }, + { 0x11E2, 0x0787, 0x1025, "(null)" }, + { 0x11E2, 0x05a9, 0x1028, "(null)" }, + { 0x11E2, 0x05ac, 0x1028, "(null)" }, + { 0x11E2, 0x119d, 0x1043, "(null)" }, + { 0x11E2, 0x1552, 0x1458, "(null)" }, + { 0x11E2, 0x2452, 0x1458, "(null)" }, + { 0x11E2, 0x3456, 0x1458, "(null)" }, + { 0x11E2, 0xa456, 0x1458, "(null)" }, + { 0x11E2, 0xb552, 0x1458, "(null)" }, + { 0x11E2, 0x10e0, 0x1462, "(null)" }, + { 0x11E2, 0x10e1, 0x1462, "(null)" }, + { 0x11E2, 0x0230, 0x1558, "(null)" }, + { 0x11E2, 0x3537, 0x1558, "(null)" }, + { 0x11E2, 0xb215, 0x19da, "(null)" }, + { 0x11E2, 0x6746, 0x1a58, "(null)" }, + { 0x11E2, 0x6748, 0x1a58, "(null)" }, + { 0x11E2, 0xa748, 0x1a58, "(null)" }, + { 0x11E2, 0x2007, 0x1b61, "(null)" }, + { 0x11E3, 0x073f, 0x1025, "(null)" }, + { 0x11E3, 0x0781, 0x1025, "(null)" }, + { 0x11E3, 0x0787, 0x1025, "(null)" }, + { 0x11E3, 0x082e, 0x1025, "(null)" }, + { 0x11E3, 0x14ad, 0x1043, "(null)" }, + { 0x11E3, 0x22ba, 0x1043, "(null)" }, + { 0x11E3, 0x22ca, 0x1043, "(null)" }, + { 0x11E3, 0x3456, 0x1458, "(null)" }, + { 0x11E3, 0xa456, 0x1458, "(null)" }, + { 0x11E3, 0x10e3, 0x1462, "(null)" }, + { 0x11E3, 0x10fe, 0x1462, "(null)" }, + { 0x11E3, 0x10ff, 0x1462, "(null)" }, + { 0x11E3, 0x1059, 0x152d, "(null)" }, + { 0x11E3, 0x3505, 0x1558, "(null)" }, + { 0x11E3, 0x3683, 0x17aa, "(null)" }, + { 0x11FA, 0x079c, 0x103c, "(null)" }, + { 0x11FA, 0x097c, 0x103c, "(null)" }, + { 0x11FA, 0x097c, 0x10de, "(null)" }, + { 0x11FC, 0x05cc, 0x1028, "(null)" }, + { 0x11FC, 0x15cc, 0x1028, "(null)" }, + { 0x11FC, 0x1028, 0x103c, "(null)" }, + { 0x11FC, 0x1909, 0x103c, "(null)" }, + { 0x11FC, 0x197b, 0x103c, "(null)" }, + { 0x11FC, 0x2253, 0x103c, "(null)" }, + { 0x11FC, 0x2254, 0x103c, "(null)" }, + { 0x11FC, 0x17ef, 0x10cf, "(null)" }, + { 0x11FC, 0x0001, 0x1179, "(null)" }, + { 0x11FC, 0x10fb, 0x1462, "(null)" }, + { 0x11FC, 0x111d, 0x1462, "(null)" }, + { 0x11FC, 0x111e, 0x1462, "(null)" }, + { 0x11FC, 0x1708, 0x172f, "(null)" }, + { 0x11FC, 0x2210, 0x17aa, "(null)" }, + { 0x11FC, 0x2211, 0x17aa, "(null)" }, + { 0x11FC, 0x221a, 0x17aa, "(null)" }, + { 0x11FC, 0x221e, 0x17aa, "(null)" }, + { 0x1200, 0x838b, 0x1043, "(null)" }, + { 0x1200, 0x83ac, 0x1043, "(null)" }, + { 0x1200, 0xc000, 0x1458, "(null)" }, + { 0x1201, 0xc000, 0x1458, "(null)" }, + { 0x1205, 0xc000, 0x1458, "(null)" }, + { 0x1206, 0x0958, 0x10de, "(null)" }, + { 0x1207, 0x0645, 0x174b, "(null)" }, + { 0x1210, 0x10bd, 0x1462, "(null)" }, + { 0x1211, 0x048f, 0x1028, "(null)" }, + { 0x1211, 0x0490, 0x1028, "(null)" }, + { 0x1211, 0x04ba, 0x1028, "(null)" }, + { 0x1211, 0x10a9, 0x1462, "(null)" }, + { 0x1211, 0x0270, 0x1558, "(null)" }, + { 0x1211, 0x0271, 0x1558, "(null)" }, + { 0x1211, 0x5102, 0x1558, "(null)" }, + { 0x1211, 0x7100, 0x1558, "(null)" }, + { 0x1211, 0x7101, 0x1558, "(null)" }, + { 0x1211, 0x7200, 0x1558, "(null)" }, + { 0x1212, 0x0550, 0x1028, "(null)" }, + { 0x1212, 0x0551, 0x1028, "(null)" }, + { 0x1212, 0x057b, 0x1028, "(null)" }, + { 0x1212, 0x0580, 0x1028, "(null)" }, + { 0x1212, 0xc0d0, 0x144d, "(null)" }, + { 0x1212, 0x10cb, 0x1462, "(null)" }, + { 0x1212, 0x0270, 0x1558, "(null)" }, + { 0x1212, 0x0271, 0x1558, "(null)" }, + { 0x1212, 0x5105, 0x1558, "(null)" }, + { 0x1212, 0x7102, 0x1558, "(null)" }, + { 0x1213, 0x2119, 0x1043, "(null)" }, + { 0x1213, 0x2120, 0x1043, "(null)" }, + { 0x1213, 0xfb12, 0x1179, "(null)" }, + { 0x1213, 0xfb18, 0x1179, "(null)" }, + { 0x1213, 0xfb1a, 0x1179, "(null)" }, + { 0x1213, 0x10cb, 0x1462, "(null)" }, + { 0x1213, 0xaf15, 0x1462, "(null)" }, + { 0x1213, 0x0371, 0x1558, "(null)" }, + { 0x1213, 0x5105, 0x1558, "(null)" }, + { 0x1213, 0x7102, 0x1558, "(null)" }, + { 0x1213, 0x8000, 0x1558, "(null)" }, + { 0x1241, 0x091d, 0x10de, "(null)" }, + { 0x1243, 0x1012, 0x1019, "(null)" }, + { 0x1243, 0x8508, 0x1043, "(null)" }, + { 0x1243, 0x2315, 0x1462, "(null)" }, + { 0x1243, 0x2316, 0x1462, "(null)" }, + { 0x1243, 0x2317, 0x1462, "(null)" }, + { 0x1243, 0x2318, 0x1462, "(null)" }, + { 0x1243, 0x3a28, 0x1642, "(null)" }, + { 0x1243, 0x5178, 0x174b, "(null)" }, + { 0x1243, 0x6178, 0x174b, "(null)" }, + { 0x1244, 0x838c, 0x1043, "(null)" }, + { 0x1244, 0xc000, 0x1458, "(null)" }, + { 0x1244, 0x2612, 0x1462, "(null)" }, + { 0x1244, 0x5194, 0x174b, "(null)" }, + { 0x1244, 0x90a2, 0x1b0a, "(null)" }, + { 0x1245, 0xc000, 0x1458, "(null)" }, + { 0x1246, 0x0570, 0x1028, "(null)" }, + { 0x1246, 0x0571, 0x1028, "(null)" }, + { 0x1247, 0x1752, 0x1043, "(null)" }, + { 0x1247, 0x2050, 0x1043, "(null)" }, + { 0x1247, 0x2051, 0x1043, "(null)" }, + { 0x1247, 0x212a, 0x1043, "(null)" }, + { 0x1247, 0x212b, 0x1043, "(null)" }, + { 0x1247, 0x212c, 0x1043, "(null)" }, + { 0x1247, 0x1532, 0x1458, "(null)" }, + { 0x1247, 0x6744, 0x1458, "(null)" }, + { 0x1248, 0x10e7, 0x17c0, "(null)" }, + { 0x1248, 0x10e8, 0x17c0, "(null)" }, + { 0x1248, 0x10ea, 0x17c0, "(null)" }, + { 0x1248, 0x0890, 0x1854, "(null)" }, + { 0x1248, 0x0891, 0x1854, "(null)" }, + { 0x1248, 0x1795, 0x1854, "(null)" }, + { 0x1248, 0x1796, 0x1854, "(null)" }, + { 0x1248, 0x3005, 0x1854, "(null)" }, + { 0x124B, 0x8540, 0x1043, "(null)" }, + { 0x124B, 0x2319, 0x1462, "(null)" }, + { 0x124B, 0x231a, 0x1462, "(null)" }, + { 0x124B, 0x231b, 0x1462, "(null)" }, + { 0x124B, 0x0640, 0x174b, "(null)" }, + { 0x124D, 0x0491, 0x1028, "(null)" }, + { 0x124D, 0x0570, 0x1028, "(null)" }, + { 0x124D, 0x0571, 0x1028, "(null)" }, + { 0x124D, 0x108d, 0x1462, "(null)" }, + { 0x124D, 0x10cc, 0x1462, "(null)" }, + { 0x124D, 0x1248, 0x1462, "(null)" }, + { 0x1251, 0x048f, 0x1028, "(null)" }, + { 0x1251, 0x0490, 0x1028, "(null)" }, + { 0x1251, 0x04ba, 0x1028, "(null)" }, + { 0x1251, 0x13b7, 0x1043, "(null)" }, + { 0x1251, 0x204a, 0x1043, "(null)" }, + { 0x1251, 0x204b, 0x1043, "(null)" }, + { 0x1251, 0x2100, 0x1043, "(null)" }, + { 0x1251, 0x84ba, 0x1043, "(null)" }, + { 0x1251, 0xfc00, 0x1179, "(null)" }, + { 0x1251, 0xfc01, 0x1179, "(null)" }, + { 0x1251, 0xfc05, 0x1179, "(null)" }, + { 0x1251, 0x10a9, 0x1462, "(null)" }, + { 0x1251, 0x5102, 0x1558, "(null)" }, + { 0x1251, 0x7100, 0x1558, "(null)" }, + { 0x1251, 0x7101, 0x1558, "(null)" }, + { 0x1251, 0x7200, 0x1558, "(null)" }, + { 0x1251, 0x8000, 0x1558, "(null)" }, + { 0x1251, 0x1043, 0x84ba, "(null)" }, + { 0x1280, 0x1018, 0x1019, "(null)" }, + { 0x1280, 0x1019, 0x1019, "(null)" }, + { 0x1280, 0x1020, 0x1019, "(null)" }, + { 0x1280, 0x1891, 0x10cf, "(null)" }, + { 0x1280, 0x1032, 0x10de, "(null)" }, + { 0x1280, 0x80a2, 0x144d, "(null)" }, + { 0x1280, 0x80a4, 0x144d, "(null)" }, + { 0x1280, 0x2902, 0x1462, "(null)" }, + { 0x1280, 0x2920, 0x1462, "(null)" }, + { 0x1280, 0x2922, 0x1462, "(null)" }, + { 0x1280, 0x2924, 0x1462, "(null)" }, + { 0x1280, 0x3d06, 0x1642, "(null)" }, + { 0x1281, 0x2929, 0x1462, "(null)" }, + { 0x1281, 0x3261, 0x174b, "(null)" }, + { 0x1281, 0x326a, 0x174b, "(null)" }, + { 0x1286, 0x1025, 0x1019, "(null)" }, + { 0x1286, 0x8641, 0x1043, "(null)" }, + { 0x1286, 0x1087, 0x10de, "(null)" }, + { 0x1286, 0x80a6, 0x144d, "(null)" }, + { 0x1286, 0x80a8, 0x144d, "(null)" }, + { 0x1286, 0x2927, 0x1462, "(null)" }, + { 0x1286, 0x2928, 0x1462, "(null)" }, + { 0x1286, 0x308a, 0x174b, "(null)" }, + { 0x1287, 0x1034, 0x1019, "(null)" }, + { 0x1287, 0x1083, 0x1028, "(null)" }, + { 0x1287, 0x108b, 0x1028, "(null)" }, + { 0x1287, 0x3382, 0x1028, "(null)" }, + { 0x1287, 0x1083, 0x103c, "(null)" }, + { 0x1287, 0x2b4e, 0x103c, "(null)" }, + { 0x1287, 0x82e9, 0x103c, "(null)" }, + { 0x1287, 0x0862, 0x1043, "(null)" }, + { 0x1287, 0x884b, 0x1043, "(null)" }, + { 0x1287, 0x1a01, 0x109f, "(null)" }, + { 0x1287, 0x1a02, 0x109f, "(null)" }, + { 0x1287, 0x80aa, 0x144d, "(null)" }, + { 0x1287, 0x80ac, 0x144d, "(null)" }, + { 0x1287, 0x292a, 0x1462, "(null)" }, + { 0x1287, 0x292b, 0x1462, "(null)" }, + { 0x1287, 0x3131, 0x1462, "(null)" }, + { 0x1287, 0x3380, 0x1462, "(null)" }, + { 0x1287, 0x3383, 0x1462, "(null)" }, + { 0x1287, 0x8a9f, 0x1462, "(null)" }, + { 0x1287, 0x1586, 0x1642, "(null)" }, + { 0x1287, 0x1706, 0x1642, "(null)" }, + { 0x1287, 0x1868, 0x1642, "(null)" }, + { 0x1287, 0x3e56, 0x1642, "(null)" }, + { 0x1287, 0x3f86, 0x1642, "(null)" }, + { 0x1287, 0x324a, 0x174b, "(null)" }, + { 0x1287, 0x326c, 0x174b, "(null)" }, + { 0x1287, 0xa324, 0x174b, "(null)" }, + { 0x1287, 0xb324, 0x174b, "(null)" }, + { 0x1287, 0xffa2, 0x19da, "(null)" }, + { 0x1287, 0x90f5, 0x1b0a, "(null)" }, + { 0x1287, 0x90f6, 0x1b0a, "(null)" }, + { 0x1288, 0x207b, 0x1043, "(null)" }, + { 0x1288, 0x8670, 0x1043, "(null)" }, + { 0x1288, 0x3130, 0x1462, "(null)" }, + { 0x1288, 0x3133, 0x1462, "(null)" }, + { 0x1288, 0x3381, 0x1462, "(null)" }, + { 0x1288, 0x1610, 0x1642, "(null)" }, + { 0x1288, 0x1706, 0x1642, "(null)" }, + { 0x1288, 0x3e56, 0x1642, "(null)" }, + { 0x1288, 0x326b, 0x174b, "(null)" }, + { 0x1288, 0x5326, 0x174b, "(null)" }, + { 0x1288, 0x6326, 0x174b, "(null)" }, + { 0x1288, 0xb324, 0x174b, "(null)" }, + { 0x1289, 0x206b, 0x1043, "(null)" }, + { 0x1289, 0x866f, 0x1043, "(null)" }, + { 0x1289, 0x80a9, 0x144d, "(null)" }, + { 0x128B, 0x0790, 0x1028, "(null)" }, + { 0x128B, 0x118b, 0x1028, "(null)" }, + { 0x128B, 0x8576, 0x1043, "(null)" }, + { 0x128B, 0x85f7, 0x1043, "(null)" }, + { 0x128B, 0x95e7, 0x1043, "(null)" }, + { 0x128B, 0x95f7, 0x1043, "(null)" }, + { 0x1290, 0x193e, 0x103c, "(null)" }, + { 0x1290, 0x2149, 0x103c, "(null)" }, + { 0x1290, 0x2afa, 0x103c, "(null)" }, + { 0x1290, 0x13ad, 0x1043, "(null)" }, + { 0x1290, 0x13cd, 0x1043, "(null)" }, + { 0x1290, 0x403b, 0x1071, "(null)" }, + { 0x1290, 0x403c, 0x1071, "(null)" }, + { 0x1290, 0x0011, 0x1179, "(null)" }, + { 0x1290, 0x0012, 0x1179, "(null)" }, + { 0x1290, 0x0013, 0x1179, "(null)" }, + { 0x1290, 0x10b8, 0x1462, "(null)" }, + { 0x1290, 0x0076, 0x14c0, "(null)" }, + { 0x1290, 0x0077, 0x14c0, "(null)" }, + { 0x1290, 0x0084, 0x14c0, "(null)" }, + { 0x1290, 0xc917, 0x16f8, "(null)" }, + { 0x1290, 0x1707, 0x172f, "(null)" }, + { 0x1290, 0x1709, 0x172f, "(null)" }, + { 0x1290, 0x220d, 0x17aa, "(null)" }, + { 0x1290, 0x220f, 0x17aa, "(null)" }, + { 0x1290, 0x2211, 0x17aa, "(null)" }, + { 0x1290, 0x221d, 0x17aa, "(null)" }, + { 0x1290, 0x221e, 0x17aa, "(null)" }, + { 0x1290, 0x221f, 0x17aa, "(null)" }, + { 0x1290, 0x3800, 0x17aa, "(null)" }, + { 0x1290, 0x5014, 0x17aa, "(null)" }, + { 0x1290, 0x5026, 0x17aa, "(null)" }, + { 0x1290, 0x5027, 0x17aa, "(null)" }, + { 0x1290, 0x5028, 0x17aa, "(null)" }, + { 0x1290, 0xa210, 0x1b72, "(null)" }, + { 0x1291, 0x081d, 0x1025, "(null)" }, + { 0x1291, 0x90b7, 0x104d, "(null)" }, + { 0x1291, 0x90c3, 0x104d, "(null)" }, + { 0x1291, 0x3356, 0x17aa, "(null)" }, + { 0x1292, 0x999b, 0x1019, "(null)" }, + { 0x1292, 0x999c, 0x1019, "(null)" }, + { 0x1292, 0x999f, 0x1019, "(null)" }, + { 0x1292, 0x99a2, 0x1019, "(null)" }, + { 0x1292, 0x99a3, 0x1019, "(null)" }, + { 0x1292, 0x073f, 0x1025, "(null)" }, + { 0x1292, 0x0776, 0x1025, "(null)" }, + { 0x1292, 0x0781, 0x1025, "(null)" }, + { 0x1292, 0x0798, 0x1025, "(null)" }, + { 0x1292, 0x0799, 0x1025, "(null)" }, + { 0x1292, 0x079b, 0x1025, "(null)" }, + { 0x1292, 0x079c, 0x1025, "(null)" }, + { 0x1292, 0x0833, 0x1025, "(null)" }, + { 0x1292, 0x0836, 0x1025, "(null)" }, + { 0x1292, 0x0842, 0x1025, "(null)" }, + { 0x1292, 0x05f5, 0x1028, "(null)" }, + { 0x1292, 0x0607, 0x1028, "(null)" }, + { 0x1292, 0x0616, 0x1028, "(null)" }, + { 0x1292, 0x0638, 0x1028, "(null)" }, + { 0x1292, 0x1961, 0x103c, "(null)" }, + { 0x1292, 0x1963, 0x103c, "(null)" }, + { 0x1292, 0x1964, 0x103c, "(null)" }, + { 0x1292, 0x1966, 0x103c, "(null)" }, + { 0x1292, 0x198c, 0x103c, "(null)" }, + { 0x1292, 0x198d, 0x103c, "(null)" }, + { 0x1292, 0x2165, 0x103c, "(null)" }, + { 0x1292, 0x2166, 0x103c, "(null)" }, + { 0x1292, 0x219a, 0x103c, "(null)" }, + { 0x1292, 0x219c, 0x103c, "(null)" }, + { 0x1292, 0x21b0, 0x103c, "(null)" }, + { 0x1292, 0x21b1, 0x103c, "(null)" }, + { 0x1292, 0x21d9, 0x103c, "(null)" }, + { 0x1292, 0x21da, 0x103c, "(null)" }, + { 0x1292, 0x10cc, 0x1043, "(null)" }, + { 0x1292, 0x122d, 0x1043, "(null)" }, + { 0x1292, 0x123d, 0x1043, "(null)" }, + { 0x1292, 0x124d, 0x1043, "(null)" }, + { 0x1292, 0x126d, 0x1043, "(null)" }, + { 0x1292, 0x130d, 0x1043, "(null)" }, + { 0x1292, 0x131d, 0x1043, "(null)" }, + { 0x1292, 0x139d, 0x1043, "(null)" }, + { 0x1292, 0x13fd, 0x1043, "(null)" }, + { 0x1292, 0x14fd, 0x1043, "(null)" }, + { 0x1292, 0x2130, 0x1043, "(null)" }, + { 0x1292, 0x21ba, 0x1043, "(null)" }, + { 0x1292, 0x21fa, 0x1043, "(null)" }, + { 0x1292, 0x220a, 0x1043, "(null)" }, + { 0x1292, 0x8579, 0x1043, "(null)" }, + { 0x1292, 0x85a1, 0x1043, "(null)" }, + { 0x1292, 0x90b8, 0x104d, "(null)" }, + { 0x1292, 0x90be, 0x104d, "(null)" }, + { 0x1292, 0xfa01, 0x1179, "(null)" }, + { 0x1292, 0xfa02, 0x1179, "(null)" }, + { 0x1292, 0xfa03, 0x1179, "(null)" }, + { 0x1292, 0xfa05, 0x1179, "(null)" }, + { 0x1292, 0xfa11, 0x1179, "(null)" }, + { 0x1292, 0xfa13, 0x1179, "(null)" }, + { 0x1292, 0xfa18, 0x1179, "(null)" }, + { 0x1292, 0xfa19, 0x1179, "(null)" }, + { 0x1292, 0xfa21, 0x1179, "(null)" }, + { 0x1292, 0xfa23, 0x1179, "(null)" }, + { 0x1292, 0xfa2a, 0x1179, "(null)" }, + { 0x1292, 0xfa32, 0x1179, "(null)" }, + { 0x1292, 0xfa33, 0x1179, "(null)" }, + { 0x1292, 0xfa36, 0x1179, "(null)" }, + { 0x1292, 0xfa38, 0x1179, "(null)" }, + { 0x1292, 0xfa42, 0x1179, "(null)" }, + { 0x1292, 0xfa43, 0x1179, "(null)" }, + { 0x1292, 0xfa45, 0x1179, "(null)" }, + { 0x1292, 0xfa47, 0x1179, "(null)" }, + { 0x1292, 0xfa49, 0x1179, "(null)" }, + { 0x1292, 0xfa58, 0x1179, "(null)" }, + { 0x1292, 0xfa59, 0x1179, "(null)" }, + { 0x1292, 0xfa64, 0x1179, "(null)" }, + { 0x1292, 0xfa78, 0x1179, "(null)" }, + { 0x1292, 0xfa79, 0x1179, "(null)" }, + { 0x1292, 0xfa88, 0x1179, "(null)" }, + { 0x1292, 0xfa89, 0x1179, "(null)" }, + { 0x1292, 0xfac1, 0x1179, "(null)" }, + { 0x1292, 0xfad1, 0x1179, "(null)" }, + { 0x1292, 0xc709, 0x144d, "(null)" }, + { 0x1292, 0x3552, 0x1458, "(null)" }, + { 0x1292, 0x10e9, 0x1462, "(null)" }, + { 0x1292, 0xaa33, 0x1462, "(null)" }, + { 0x1292, 0xaa73, 0x1462, "(null)" }, + { 0x1292, 0xaa89, 0x1462, "(null)" }, + { 0x1292, 0xaa8a, 0x1462, "(null)" }, + { 0x1292, 0xaaa1, 0x1462, "(null)" }, + { 0x1292, 0xaaa4, 0x1462, "(null)" }, + { 0x1292, 0xac11, 0x1462, "(null)" }, + { 0x1292, 0xac14, 0x1462, "(null)" }, + { 0x1292, 0xac7c, 0x1462, "(null)" }, + { 0x1292, 0xac91, 0x1462, "(null)" }, + { 0x1292, 0xac94, 0x1462, "(null)" }, + { 0x1292, 0xac95, 0x1462, "(null)" }, + { 0x1292, 0xacb1, 0x1462, "(null)" }, + { 0x1292, 0xae61, 0x1462, "(null)" }, + { 0x1292, 0xae62, 0x1462, "(null)" }, + { 0x1292, 0xae63, 0x1462, "(null)" }, + { 0x1292, 0xae64, 0x1462, "(null)" }, + { 0x1292, 0xaf11, 0x1462, "(null)" }, + { 0x1292, 0xaf13, 0x1462, "(null)" }, + { 0x1292, 0xaf14, 0x1462, "(null)" }, + { 0x1292, 0xaf16, 0x1462, "(null)" }, + { 0x1292, 0xaf17, 0x1462, "(null)" }, + { 0x1292, 0x0063, 0x14c0, "(null)" }, + { 0x1292, 0x0078, 0x14c0, "(null)" }, + { 0x1292, 0x1004, 0x152d, "(null)" }, + { 0x1292, 0x1008, 0x152d, "(null)" }, + { 0x1292, 0x1054, 0x152d, "(null)" }, + { 0x1292, 0x1060, 0x152d, "(null)" }, + { 0x1292, 0x1061, 0x152d, "(null)" }, + { 0x1292, 0x1077, 0x152d, "(null)" }, + { 0x1292, 0x5082, 0x1558, "(null)" }, + { 0x1292, 0x6500, 0x1558, "(null)" }, + { 0x1292, 0x6565, 0x1558, "(null)" }, + { 0x1292, 0x3675, 0x17aa, "(null)" }, + { 0x1292, 0x367c, 0x17aa, "(null)" }, + { 0x1292, 0x3684, 0x17aa, "(null)" }, + { 0x1292, 0x3801, 0x17aa, "(null)" }, + { 0x1292, 0x3802, 0x17aa, "(null)" }, + { 0x1292, 0x3805, 0x17aa, "(null)" }, + { 0x1292, 0x3806, 0x17aa, "(null)" }, + { 0x1292, 0x3809, 0x17aa, "(null)" }, + { 0x1292, 0x380a, 0x17aa, "(null)" }, + { 0x1292, 0x3904, 0x17aa, "(null)" }, + { 0x1292, 0x3912, 0x17aa, "(null)" }, + { 0x1292, 0x5003, 0x17aa, "(null)" }, + { 0x1292, 0x5007, 0x17aa, "(null)" }, + { 0x1292, 0x5014, 0x17aa, "(null)" }, + { 0x1292, 0x5019, 0x17aa, "(null)" }, + { 0x1292, 0x501a, 0x17aa, "(null)" }, + { 0x1292, 0x5023, 0x17aa, "(null)" }, + { 0x1292, 0x502a, 0x17aa, "(null)" }, + { 0x1292, 0x502f, 0x17aa, "(null)" }, + { 0x1292, 0x503e, 0x17aa, "(null)" }, + { 0x1292, 0x740a, 0x17aa, "(null)" }, + { 0x1292, 0x10f5, 0x17c0, "(null)" }, + { 0x1292, 0x0193, 0x1854, "(null)" }, + { 0x1292, 0x0208, 0x1854, "(null)" }, + { 0x1292, 0x1050, 0x1854, "(null)" }, + { 0x1292, 0x20dc, 0x1b0a, "(null)" }, + { 0x1292, 0x20dd, 0x1b0a, "(null)" }, + { 0x1292, 0x2106, 0x1b0a, "(null)" }, + { 0x1292, 0x2109, 0x1b0a, "(null)" }, + { 0x1292, 0x210e, 0x1b0a, "(null)" }, + { 0x1292, 0x2202, 0x1b0a, "(null)" }, + { 0x1292, 0x2222, 0x1b0a, "(null)" }, + { 0x1293, 0x1000, 0x152d, "(null)" }, + { 0x1293, 0x5540, 0x1b50, "(null)" }, + { 0x1295, 0x2b0d, 0x103c, "(null)" }, + { 0x1295, 0x2b0f, 0x103c, "(null)" }, + { 0x1295, 0x2b20, 0x103c, "(null)" }, + { 0x1295, 0x2b21, 0x103c, "(null)" }, + { 0x1295, 0x367a, 0x17aa, "(null)" }, + { 0x1295, 0x367c, 0x17aa, "(null)" }, + { 0x1296, 0xc737, 0x144d, "(null)" }, + { 0x1296, 0x2109, 0x1b0a, "(null)" }, + { 0x1296, 0x2222, 0x1b0a, "(null)" }, + { 0x1298, 0x124d, 0x1043, "(null)" }, + { 0x1298, 0x21fa, 0x1043, "(null)" }, + { 0x1299, 0x093c, 0x1025, "(null)" }, + { 0x1299, 0x0945, 0x1025, "(null)" }, + { 0x1299, 0x096b, 0x1025, "(null)" }, + { 0x1299, 0x0971, 0x1025, "(null)" }, + { 0x1299, 0x0974, 0x1025, "(null)" }, + { 0x1299, 0x0977, 0x1025, "(null)" }, + { 0x1299, 0x0985, 0x1025, "(null)" }, + { 0x1299, 0x0988, 0x1025, "(null)" }, + { 0x1299, 0x098b, 0x1025, "(null)" }, + { 0x1299, 0x098f, 0x1025, "(null)" }, + { 0x1299, 0x0992, 0x1025, "(null)" }, + { 0x1299, 0x099a, 0x1025, "(null)" }, + { 0x1299, 0x099f, 0x1025, "(null)" }, + { 0x1299, 0x100d, 0x1025, "(null)" }, + { 0x1299, 0x101a, 0x1025, "(null)" }, + { 0x1299, 0x1025, 0x1025, "(null)" }, + { 0x1299, 0x1035, 0x1025, "(null)" }, + { 0x1299, 0x103e, 0x1025, "(null)" }, + { 0x1299, 0x1040, 0x1025, "(null)" }, + { 0x1299, 0x1042, 0x1025, "(null)" }, + { 0x1299, 0x1045, 0x1025, "(null)" }, + { 0x1299, 0x107f, 0x1025, "(null)" }, + { 0x1299, 0x0652, 0x1028, "(null)" }, + { 0x1299, 0x0653, 0x1028, "(null)" }, + { 0x1299, 0x0655, 0x1028, "(null)" }, + { 0x1299, 0x0656, 0x1028, "(null)" }, + { 0x1299, 0x065e, 0x1028, "(null)" }, + { 0x1299, 0x0662, 0x1028, "(null)" }, + { 0x1299, 0x06ad, 0x1028, "(null)" }, + { 0x1299, 0x06ae, 0x1028, "(null)" }, + { 0x1299, 0x06af, 0x1028, "(null)" }, + { 0x1299, 0x06b0, 0x1028, "(null)" }, + { 0x1299, 0x06c0, 0x1028, "(null)" }, + { 0x1299, 0x06c1, 0x1028, "(null)" }, + { 0x1299, 0x06f0, 0x1028, "(null)" }, + { 0x1299, 0x06f1, 0x1028, "(null)" }, + { 0x1299, 0x06f2, 0x1028, "(null)" }, + { 0x1299, 0x06f3, 0x1028, "(null)" }, + { 0x1299, 0x100e, 0x1043, "(null)" }, + { 0x1299, 0x1010, 0x1043, "(null)" }, + { 0x1299, 0x102e, 0x1043, "(null)" }, + { 0x1299, 0x103e, 0x1043, "(null)" }, + { 0x1299, 0x104e, 0x1043, "(null)" }, + { 0x1299, 0x105e, 0x1043, "(null)" }, + { 0x1299, 0x16dd, 0x1043, "(null)" }, + { 0x1299, 0x176d, 0x1043, "(null)" }, + { 0x1299, 0x179d, 0x1043, "(null)" }, + { 0x1299, 0x18d0, 0x1043, "(null)" }, + { 0x1299, 0x1a6d, 0x1043, "(null)" }, + { 0x1299, 0x1a7d, 0x1043, "(null)" }, + { 0x1299, 0x1add, 0x1043, "(null)" }, + { 0x1299, 0x1aed, 0x1043, "(null)" }, + { 0x1299, 0x1bad, 0x1043, "(null)" }, + { 0x1299, 0x232a, 0x1043, "(null)" }, + { 0x1299, 0x23aa, 0x1043, "(null)" }, + { 0x1299, 0x23da, 0x1043, "(null)" }, + { 0x1299, 0x23ea, 0x1043, "(null)" }, + { 0x1299, 0x23fa, 0x1043, "(null)" }, + { 0x1299, 0x241a, 0x1043, "(null)" }, + { 0x1299, 0x242a, 0x1043, "(null)" }, + { 0x1299, 0x244a, 0x1043, "(null)" }, + { 0x1299, 0x245a, 0x1043, "(null)" }, + { 0x1299, 0x246a, 0x1043, "(null)" }, + { 0x1299, 0x24aa, 0x1043, "(null)" }, + { 0x1299, 0xf841, 0x1179, "(null)" }, + { 0x1299, 0xf842, 0x1179, "(null)" }, + { 0x1299, 0xf843, 0x1179, "(null)" }, + { 0x1299, 0xc757, 0x144d, "(null)" }, + { 0x1299, 0xc770, 0x144d, "(null)" }, + { 0x1299, 0x1116, 0x1462, "(null)" }, + { 0x1299, 0x1159, 0x1462, "(null)" }, + { 0x1299, 0x1108, 0x152d, "(null)" }, + { 0x1299, 0x1113, 0x152d, "(null)" }, + { 0x1299, 0x1123, 0x152d, "(null)" }, + { 0x1299, 0x1126, 0x152d, "(null)" }, + { 0x1299, 0x30bb, 0x17aa, "(null)" }, + { 0x1299, 0x30da, 0x17aa, "(null)" }, + { 0x1299, 0x30dc, 0x17aa, "(null)" }, + { 0x1299, 0x30dd, 0x17aa, "(null)" }, + { 0x1299, 0x30df, 0x17aa, "(null)" }, + { 0x1299, 0x3117, 0x17aa, "(null)" }, + { 0x1299, 0x361b, 0x17aa, "(null)" }, + { 0x1299, 0x362d, 0x17aa, "(null)" }, + { 0x1299, 0x362e, 0x17aa, "(null)" }, + { 0x1299, 0x3630, 0x17aa, "(null)" }, + { 0x1299, 0x3637, 0x17aa, "(null)" }, + { 0x1299, 0x369b, 0x17aa, "(null)" }, + { 0x1299, 0x36a7, 0x17aa, "(null)" }, + { 0x1299, 0x36af, 0x17aa, "(null)" }, + { 0x1299, 0x36f0, 0x17aa, "(null)" }, + { 0x1299, 0x3801, 0x17aa, "(null)" }, + { 0x1299, 0x380d, 0x17aa, "(null)" }, + { 0x1299, 0x381c, 0x17aa, "(null)" }, + { 0x1299, 0x381d, 0x17aa, "(null)" }, + { 0x1299, 0x381f, 0x17aa, "(null)" }, + { 0x1299, 0x3821, 0x17aa, "(null)" }, + { 0x1299, 0x3822, 0x17aa, "(null)" }, + { 0x1299, 0x3824, 0x17aa, "(null)" }, + { 0x1299, 0x3829, 0x17aa, "(null)" }, + { 0x1299, 0x382b, 0x17aa, "(null)" }, + { 0x1299, 0x39f1, 0x17aa, "(null)" }, + { 0x1299, 0x0260, 0x1854, "(null)" }, + { 0x1299, 0x01c6, 0x1b0a, "(null)" }, + { 0x1299, 0x225c, 0x1b0a, "(null)" }, + { 0x1299, 0x2004, 0x1d05, "(null)" }, + { 0x129A, 0x099e, 0x1025, "(null)" }, + { 0x129A, 0x1014, 0x1025, "(null)" }, + { 0x129A, 0x1015, 0x1025, "(null)" }, + { 0x129A, 0xc782, 0x144d, "(null)" }, + { 0x12B9, 0x1026, 0x103c, "(null)" }, + { 0x12B9, 0x1909, 0x103c, "(null)" }, + { 0x12B9, 0x190a, 0x103c, "(null)" }, + { 0x12B9, 0x197a, 0x103c, "(null)" }, + { 0x12B9, 0x197b, 0x103c, "(null)" }, + { 0x12B9, 0x2253, 0x103c, "(null)" }, + { 0x12B9, 0x2254, 0x103c, "(null)" }, + { 0x12BA, 0x17ed, 0x10cf, "(null)" }, + { 0x1340, 0x0000, 0x0000, "NVIDIA GeForce 830M" }, + { 0x1340, 0x2b2b, 0x103c, "NVIDIA GeForce 830A" }, + { 0x1341, 0x0000, 0x0000, "NVIDIA GeForce 840M" }, + { 0x1341, 0x3697, 0x17aa, "NVIDIA GeForce 840A" }, + { 0x1341, 0x3699, 0x17aa, "NVIDIA GeForce 840A" }, + { 0x1341, 0x369c, 0x17aa, "NVIDIA GeForce 840A" }, + { 0x1341, 0x36af, 0x17aa, "NVIDIA GeForce 840A" }, + { 0x1344, 0x0000, 0x0000, "NVIDIA GeForce 845M" }, + { 0x1346, 0x0000, 0x0000, "NVIDIA GeForce 930M" }, + { 0x1346, 0x30ba, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1346, 0x362c, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1346, 0x362f, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1346, 0x3636, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1347, 0x0000, 0x0000, "NVIDIA GeForce 940M" }, + { 0x1347, 0x36b9, 0x17aa, "NVIDIA GeForce 940A" }, + { 0x1347, 0x36ba, 0x17aa, "NVIDIA GeForce 940A" }, + { 0x1348, 0x0000, 0x0000, "NVIDIA GeForce 945M" }, + { 0x1348, 0x2b5c, 0x103c, "NVIDIA GeForce 945A" }, + { 0x1349, 0x0000, 0x0000, "NVIDIA GeForce 930M" }, + { 0x1349, 0x3124, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1349, 0x364b, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1349, 0x36c3, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1349, 0x36d1, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1349, 0x36d8, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x134B, 0x0000, 0x0000, "NVIDIA GeForce 940MX" }, + { 0x134D, 0x0000, 0x0000, "NVIDIA GeForce 940MX" }, + { 0x134E, 0x0000, 0x0000, "NVIDIA GeForce 930MX" }, + { 0x134F, 0x0000, 0x0000, "NVIDIA GeForce 920MX" }, + { 0x137A, 0x0000, 0x0000, "NVIDIA N15M-Q3" }, + { 0x137A, 0x2225, 0x17aa, "Quadro K620M" }, + { 0x137A, 0x2232, 0x17aa, "Quadro M500M" }, + { 0x137A, 0x505a, 0x17aa, "Quadro M500M" }, + { 0x137B, 0x0000, 0x0000, "Quadro M520" }, + { 0x1380, 0x0000, 0x0000, "NVIDIA GeForce GTX 750 Ti" }, + { 0x1381, 0x0000, 0x0000, "NVIDIA GeForce GTX 750" }, + { 0x1382, 0x0000, 0x0000, "NVIDIA GeForce GTX 745" }, + { 0x1390, 0x0000, 0x0000, "NVIDIA GeForce 845M" }, + { 0x1391, 0x0000, 0x0000, "NVIDIA GeForce GTX 850M" }, + { 0x1391, 0x3697, 0x17aa, "NVIDIA GeForce GTX 850A" }, + { 0x1392, 0x0000, 0x0000, "NVIDIA GeForce GTX 860M" }, + { 0x1392, 0x066a, 0x1028, "NVIDIA GeForce GPU" }, + { 0x1392, 0x861e, 0x1043, "NVIDIA GeForce GTX 750 Ti" }, + { 0x1392, 0x86d9, 0x1043, "NVIDIA GeForce GTX 750 Ti" }, + { 0x1393, 0x0000, 0x0000, "NVIDIA GeForce 840M" }, + { 0x1398, 0x0000, 0x0000, "NVIDIA GeForce 845M" }, + { 0x1399, 0x0000, 0x0000, "NVIDIA GeForce 945M" }, + { 0x139A, 0x0000, 0x0000, "NVIDIA GeForce GTX 950M" }, + { 0x139A, 0x362c, 0x17aa, "NVIDIA GeForce GTX 950A" }, + { 0x139A, 0x362f, 0x17aa, "NVIDIA GeForce GTX 950A" }, + { 0x139A, 0x363f, 0x17aa, "NVIDIA GeForce GTX 950A" }, + { 0x139A, 0x3640, 0x17aa, "NVIDIA GeForce GTX 950A" }, + { 0x139A, 0x3647, 0x17aa, "NVIDIA GeForce GTX 950A" }, + { 0x139A, 0x36b9, 0x17aa, "NVIDIA GeForce GTX 950A" }, + { 0x139B, 0x0000, 0x0000, "NVIDIA GeForce GTX 960M" }, + { 0x139B, 0x107a, 0x1025, "NVIDIA GeForce GTX 750 Ti" }, + { 0x139B, 0x06a3, 0x1028, "NVIDIA GeForce GTX 860M" }, + { 0x139B, 0x2b4c, 0x103c, "NVIDIA GeForce GTX 960A" }, + { 0x139B, 0x3649, 0x17aa, "NVIDIA GeForce GTX 750Ti" }, + { 0x139B, 0x36bf, 0x17aa, "NVIDIA GeForce GTX 960A" }, + { 0x139B, 0xc248, 0x19da, "NVIDIA GeForce GTX 750 Ti" }, + { 0x139B, 0x8a75, 0x1afa, "NVIDIA GeForce GTX 750Ti" }, + { 0x139C, 0x0000, 0x0000, "NVIDIA GeForce 940M" }, + { 0x139D, 0x0000, 0x0000, "NVIDIA GeForce GTX 750 Ti" }, + { 0x13B0, 0x0000, 0x0000, "Quadro M2000M" }, + { 0x13B1, 0x0000, 0x0000, "Quadro M1000M" }, + { 0x13B2, 0x0000, 0x0000, "Quadro M600M" }, + { 0x13B3, 0x0000, 0x0000, "Quadro K2200M" }, + { 0x13B4, 0x0000, 0x0000, "Quadro M620" }, + { 0x13B6, 0x0000, 0x0000, "Quadro M1200" }, + { 0x13B9, 0x0000, 0x0000, "NVS 810" }, + { 0x13BA, 0x0000, 0x0000, "Quadro K2200" }, + { 0x13BB, 0x0000, 0x0000, "Quadro K620" }, + { 0x13BC, 0x0000, 0x0000, "Quadro K1200" }, + { 0x13BC, 0x1140, 0x15c3, "EIZO Quadro MED-XN50LP" }, + { 0x13C0, 0x0000, 0x0000, "NVIDIA GeForce GTX 980" }, + { 0x13C2, 0x0000, 0x0000, "NVIDIA GeForce GTX 970" }, + { 0x13D7, 0x0000, 0x0000, "NVIDIA GeForce GTX 980M" }, + { 0x13D8, 0x0000, 0x0000, "NVIDIA GeForce GTX 970M" }, + { 0x13D8, 0x1198, 0x1462, "NVIDIA GeForce GTX 960" }, + { 0x13D8, 0x1199, 0x1462, "NVIDIA GeForce GTX 960" }, + { 0x13D8, 0xb282, 0x19da, "NVIDIA GeForce GTX 960" }, + { 0x13D8, 0xb284, 0x19da, "NVIDIA GeForce GTX 960" }, + { 0x13D8, 0xb286, 0x19da, "NVIDIA GeForce GTX 960" }, + { 0x13D9, 0x0000, 0x0000, "NVIDIA GeForce GTX 965M" }, + { 0x13DA, 0x0000, 0x0000, "NVIDIA GeForce GTX 980" }, + { 0x13F0, 0x0000, 0x0000, "Quadro M5000" }, + { 0x13F1, 0x0000, 0x0000, "Quadro M4000" }, + { 0x13F1, 0x1153, 0x15c3, "EIZO Quadro MED-XN90" }, + { 0x13F2, 0x0000, 0x0000, "Tesla M60" }, + { 0x13F3, 0x0000, 0x0000, "Tesla M6" }, + { 0x13F8, 0x0000, 0x0000, "Quadro M5000M" }, + { 0x13F8, 0x11dd, 0x10de, "Quadro M5000 SE" }, + { 0x13F9, 0x0000, 0x0000, "Quadro M4000M" }, + { 0x13FA, 0x0000, 0x0000, "Quadro M3000M" }, + { 0x13FA, 0x11c9, 0x10de, "Quadro M3000 SE" }, + { 0x13FB, 0x0000, 0x0000, "Quadro M5500" }, + { 0x1401, 0x0000, 0x0000, "NVIDIA GeForce GTX 960" }, + { 0x1402, 0x0000, 0x0000, "NVIDIA GeForce GTX 950" }, + { 0x1406, 0x0000, 0x0000, "NVIDIA GeForce GTX 960" }, + { 0x1407, 0x0000, 0x0000, "NVIDIA GeForce GTX 750" }, + { 0x1427, 0x0000, 0x0000, "NVIDIA GeForce GTX 965M" }, + { 0x1427, 0xd003, 0x1458, "NVIDIA GeForce GTX 950" }, + { 0x1430, 0x0000, 0x0000, "Quadro M2000" }, + { 0x1430, 0x1190, 0x15c3, "EIZO Quadro MED-XN70" }, + { 0x1431, 0x0000, 0x0000, "Tesla M4" }, + { 0x1436, 0x0000, 0x0000, "Quadro M2200" }, + { 0x15F0, 0x0000, 0x0000, "Quadro GP100" }, + { 0x15F7, 0x0000, 0x0000, "Tesla P100-PCIE-12GB" }, + { 0x15F8, 0x0000, 0x0000, "Tesla P100-PCIE-16GB" }, + { 0x15F9, 0x0000, 0x0000, "Tesla P100-SXM2-16GB" }, + { 0x1617, 0x0000, 0x0000, "NVIDIA GeForce GTX 980M" }, + { 0x1618, 0x0000, 0x0000, "NVIDIA GeForce GTX 970M" }, + { 0x1619, 0x0000, 0x0000, "NVIDIA GeForce GTX 965M" }, + { 0x161A, 0x0000, 0x0000, "NVIDIA GeForce GTX 980" }, + { 0x1667, 0x0000, 0x0000, "NVIDIA GeForce GTX 965M" }, + { 0x174D, 0x0000, 0x0000, "NVIDIA GeForce MX130" }, + { 0x174E, 0x0000, 0x0000, "NVIDIA GeForce MX110" }, + { 0x179C, 0x0000, 0x0000, "NVIDIA GeForce 940MX" }, + { 0x17C2, 0x0000, 0x0000, "NVIDIA GeForce GTX TITAN X" }, + { 0x17C8, 0x0000, 0x0000, "NVIDIA GeForce GTX 980 Ti" }, + { 0x17F0, 0x0000, 0x0000, "Quadro M6000" }, + { 0x17F1, 0x0000, 0x0000, "Quadro M6000 24GB" }, + { 0x17FD, 0x0000, 0x0000, "Tesla M40" }, + { 0x17FD, 0x1173, 0x10de, "Tesla M40 24GB" }, + { 0x1B00, 0x0000, 0x0000, "NVIDIA TITAN X (Pascal)" }, + { 0x1B02, 0x0000, 0x0000, "NVIDIA TITAN Xp" }, + { 0x1B02, 0x123e, 0x10de, "NVIDIA TITAN Xp COLLECTORS EDITION" }, + { 0x1B02, 0x123f, 0x10de, "NVIDIA TITAN Xp COLLECTORS EDITION" }, + { 0x1B06, 0x0000, 0x0000, "NVIDIA GeForce GTX 1080 Ti" }, + { 0x1B30, 0x0000, 0x0000, "Quadro P6000" }, + { 0x1B38, 0x0000, 0x0000, "Tesla P40" }, + { 0x1B80, 0x0000, 0x0000, "NVIDIA GeForce GTX 1080" }, + { 0x1B81, 0x0000, 0x0000, "NVIDIA GeForce GTX 1070" }, + { 0x1B82, 0x0000, 0x0000, "NVIDIA GeForce GTX 1070 Ti" }, + { 0x1B83, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060 6GB" }, + { 0x1B84, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060 3GB" }, + { 0x1B87, 0x0000, 0x0000, "NVIDIA P104-100" }, + { 0x1BA0, 0x0000, 0x0000, "NVIDIA GeForce GTX 1080" }, + { 0x1BA0, 0x0887, 0x1028, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BA1, 0x0000, 0x0000, "NVIDIA GeForce GTX 1070" }, + { 0x1BA1, 0x08a1, 0x1028, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x08a2, 0x1028, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x1cce, 0x1043, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x1651, 0x1458, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x1653, 0x1458, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x11e8, 0x1462, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x11e9, 0x1462, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x1225, 0x1462, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x1226, 0x1462, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x1227, 0x1462, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x9501, 0x1558, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x95e1, 0x1558, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x2000, 0x1a58, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x1032, 0x1d05, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA2, 0x0000, 0x0000, "NVIDIA GeForce GTX 1070" }, + { 0x1BB0, 0x0000, 0x0000, "Quadro P5000" }, + { 0x1BB1, 0x0000, 0x0000, "Quadro P4000" }, + { 0x1BB1, 0x11a3, 0x15c3, "EIZO Quadro MED-XN91" }, + { 0x1BB3, 0x11d8, 0x10de, "Tesla P4" }, + { 0x1BB3, 0x11e0, 0x10de, "Tesla P4" }, + { 0x1BB4, 0x0000, 0x0000, "Tesla P6" }, + { 0x1BB5, 0x0000, 0x0000, "Quadro P5200" }, + { 0x1BB5, 0x2268, 0x17aa, "Quadro P5200 with Max-Q Design" }, + { 0x1BB5, 0x2269, 0x17aa, "Quadro P5200 with Max-Q Design" }, + { 0x1BB6, 0x0000, 0x0000, "Quadro P5000" }, + { 0x1BB7, 0x0000, 0x0000, "Quadro P4000" }, + { 0x1BB7, 0x11e9, 0x1462, "Quadro P4000 with Max-Q Design" }, + { 0x1BB7, 0x9501, 0x1558, "Quadro P4000 with Max-Q Design" }, + { 0x1BB8, 0x0000, 0x0000, "Quadro P3000" }, + { 0x1BB9, 0x0000, 0x0000, "Quadro P4200" }, + { 0x1BB9, 0x95e1, 0x1558, "Quadro P4200 with Max-Q Design" }, + { 0x1BB9, 0x2268, 0x17aa, "Quadro P4200 with Max-Q Design" }, + { 0x1BB9, 0x2269, 0x17aa, "Quadro P4200 with Max-Q Design" }, + { 0x1BBB, 0x0000, 0x0000, "Quadro P3200" }, + { 0x1BBB, 0x225f, 0x17aa, "Quadro P3200 with Max-Q Design" }, + { 0x1BBB, 0x2262, 0x17aa, "Quadro P3200 with Max-Q Design" }, + { 0x1BC7, 0x0000, 0x0000, "NVIDIA P104-101" }, + { 0x1BE0, 0x0000, 0x0000, "NVIDIA GeForce GTX 1080" }, + { 0x1BE0, 0x1221, 0x1025, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE0, 0x123e, 0x1025, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE0, 0x07c0, 0x1028, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE0, 0x0876, 0x1028, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE0, 0x088b, 0x1028, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE0, 0x1031, 0x1043, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE0, 0x1bf0, 0x1043, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE0, 0x355b, 0x1458, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE1, 0x0000, 0x0000, "NVIDIA GeForce GTX 1070" }, + { 0x1BE1, 0x84db, 0x103c, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BE1, 0x16f0, 0x1043, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BE1, 0x2009, 0x3842, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1C02, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060 3GB" }, + { 0x1C03, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060 6GB" }, + { 0x1C04, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060 5GB" }, + { 0x1C06, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060 6GB" }, + { 0x1C07, 0x0000, 0x0000, "NVIDIA P106-100" }, + { 0x1C09, 0x0000, 0x0000, "NVIDIA P106-090" }, + { 0x1C20, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060" }, + { 0x1C20, 0x0802, 0x1028, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x0803, 0x1028, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x0825, 0x1028, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x0827, 0x1028, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x0885, 0x1028, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x0886, 0x1028, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x8467, 0x103c, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x8478, 0x103c, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x8581, 0x103c, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x1244, 0x1462, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x95e5, 0x1558, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x39b9, 0x17aa, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x2000, 0x1a58, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x2001, 0x1a58, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x1059, 0x1d05, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C21, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050 Ti" }, + { 0x1C22, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050" }, + { 0x1C23, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060" }, + { 0x1C30, 0x0000, 0x0000, "Quadro P2000" }, + { 0x1C30, 0x11b3, 0x15c3, "EIZO Quadro MED-XN71" }, + { 0x1C31, 0x0000, 0x0000, "Quadro P2200" }, + { 0x1C31, 0x131b, 0x15c3, "EIZO Quadro MED-XN72" }, + { 0x1C60, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060" }, + { 0x1C60, 0x8390, 0x103c, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C60, 0x8467, 0x103c, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C61, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050 Ti" }, + { 0x1C62, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050" }, + { 0x1C81, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050" }, + { 0x1C82, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050 Ti" }, + { 0x1C83, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050" }, + { 0x1C8C, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050 Ti" }, + { 0x1C8C, 0x087c, 0x1028, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8C, 0x8519, 0x103c, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8C, 0x856a, 0x103c, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8C, 0x123c, 0x1462, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8C, 0x126c, 0x1462, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8C, 0x2266, 0x17aa, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8C, 0x2267, 0x17aa, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8C, 0x39ff, 0x17aa, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8D, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050" }, + { 0x1C8D, 0x84e9, 0x103c, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x84eb, 0x103c, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x856a, 0x103c, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x114f, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x1341, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x1351, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x1481, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x14a1, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x18c1, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x1b5e, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x126c, 0x1462, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x1217, 0x152d, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x1707, 0x1d72, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8F, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050 Ti" }, + { 0x1C8F, 0x123c, 0x1462, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8F, 0x126c, 0x1462, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8F, 0x126d, 0x1462, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8F, 0x1284, 0x1462, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8F, 0x1297, 0x1462, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C90, 0x0000, 0x0000, "NVIDIA GeForce MX150" }, + { 0x1C90, 0x09c1, 0x1028, "NVIDIA GeForce MX250" }, + { 0x1C91, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050" }, + { 0x1C91, 0x856a, 0x103c, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C91, 0x86e3, 0x103c, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C91, 0x1232, 0x152d, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C92, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050" }, + { 0x1C92, 0x149f, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C92, 0x1b31, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C92, 0x1245, 0x1462, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C92, 0x126c, 0x1462, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C94, 0x0000, 0x0000, "NVIDIA GeForce MX350" }, + { 0x1C96, 0x0000, 0x0000, "NVIDIA GeForce MX350" }, + { 0x1CB1, 0x0000, 0x0000, "Quadro P1000" }, + { 0x1CB1, 0x11bc, 0x15c3, "EIZO Quadro MED-XN51LP" }, + { 0x1CB2, 0x0000, 0x0000, "Quadro P600" }, + { 0x1CB3, 0x0000, 0x0000, "Quadro P400" }, + { 0x1CB3, 0x11be, 0x15c3, "EIZO Quadro MED-XN31LP" }, + { 0x1CB6, 0x0000, 0x0000, "Quadro P620" }, + { 0x1CBA, 0x0000, 0x0000, "Quadro P2000" }, + { 0x1CBA, 0x2266, 0x17aa, "Quadro P2000 with Max-Q Design" }, + { 0x1CBA, 0x2267, 0x17aa, "Quadro P2000 with Max-Q Design" }, + { 0x1CBB, 0x0000, 0x0000, "Quadro P1000" }, + { 0x1CBC, 0x0000, 0x0000, "Quadro P600" }, + { 0x1CBD, 0x0000, 0x0000, "Quadro P620" }, + { 0x1CFA, 0x0000, 0x0000, "Quadro P2000" }, + { 0x1CFB, 0x0000, 0x0000, "Quadro P1000" }, + { 0x1CFB, 0x2600, 0x102b, "Matrox D-Series D1480" }, + { 0x1CFB, 0x2700, 0x102b, "Matrox D-Series D1450" }, + { 0x1D01, 0x0000, 0x0000, "NVIDIA GeForce GT 1030" }, + { 0x1D02, 0x0000, 0x0000, "NVIDIA GeForce GT 1010" }, + { 0x1D10, 0x0000, 0x0000, "NVIDIA GeForce MX150" }, + { 0x1D11, 0x0000, 0x0000, "NVIDIA GeForce MX230" }, + { 0x1D12, 0x0000, 0x0000, "NVIDIA GeForce MX150" }, + { 0x1D13, 0x0000, 0x0000, "NVIDIA GeForce MX250" }, + { 0x1D16, 0x0000, 0x0000, "NVIDIA GeForce MX330" }, + { 0x1D33, 0x0000, 0x0000, "Quadro P500" }, + { 0x1D34, 0x0000, 0x0000, "Quadro P520" }, + { 0x1D52, 0x0000, 0x0000, "NVIDIA GeForce MX250" }, + { 0x1D81, 0x0000, 0x0000, "NVIDIA TITAN V" }, + { 0x1DB1, 0x0000, 0x0000, "Tesla V100-SXM2-16GB" }, + { 0x1DB1, 0x1307, 0x10de, "Tesla V100-SXM2-16GB-LS" }, + { 0x1DB3, 0x0000, 0x0000, "Tesla V100-FHHL-16GB" }, + { 0x1DB4, 0x0000, 0x0000, "Tesla V100-PCIE-16GB" }, + { 0x1DB4, 0x1306, 0x10de, "Tesla V100-PCIE-16GB-LS" }, + { 0x1DB5, 0x0000, 0x0000, "Tesla V100-SXM2-32GB" }, + { 0x1DB5, 0x1308, 0x10de, "Tesla V100-SXM2-32GB-LS" }, + { 0x1DB6, 0x0000, 0x0000, "Tesla V100-PCIE-32GB" }, + { 0x1DB7, 0x0000, 0x0000, "Tesla V100-DGXS-32GB" }, + { 0x1DB8, 0x0000, 0x0000, "Tesla V100-SXM3-32GB" }, + { 0x1DB8, 0x131d, 0x10de, "Tesla V100-SXM3-32GB-H" }, + { 0x1DBA, 0x0000, 0x0000, "Quadro GV100" }, + { 0x1DBA, 0x12eb, 0x10de, "NVIDIA TITAN V JHH Special Edition" }, + { 0x1DF0, 0x0000, 0x0000, "Tesla PG500-216" }, + { 0x1DF2, 0x0000, 0x0000, "Tesla PG503-216" }, + { 0x1DF6, 0x0000, 0x0000, "Tesla V100S-PCIE-32GB" }, + { 0x1E02, 0x0000, 0x0000, "NVIDIA TITAN RTX" }, + { 0x1E04, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080 Ti" }, + { 0x1E07, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080 Ti" }, + { 0x1E09, 0x0000, 0x0000, "NVIDIA CMP 50HX" }, + { 0x1E30, 0x0000, 0x0000, "Quadro RTX 6000" }, + { 0x1E30, 0x129e, 0x1028, "Quadro RTX 8000" }, + { 0x1E30, 0x129e, 0x103c, "Quadro RTX 8000" }, + { 0x1E30, 0x129e, 0x10de, "Quadro RTX 8000" }, + { 0x1E36, 0x0000, 0x0000, "Quadro RTX 6000" }, + { 0x1E78, 0x13d8, 0x10de, "Quadro RTX 8000" }, + { 0x1E78, 0x13d9, 0x10de, "Quadro RTX 6000" }, + { 0x1E81, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080 SUPER" }, + { 0x1E82, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080" }, + { 0x1E84, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070 SUPER" }, + { 0x1E87, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080" }, + { 0x1E89, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1E90, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080" }, + { 0x1E90, 0x1375, 0x1025, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08a1, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08a2, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08ea, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08eb, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08ec, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08ed, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08ee, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08ef, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x093b, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x093c, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x8572, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x8573, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x8602, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x8606, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x86c6, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x86c7, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x87a6, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x87a7, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x131f, 0x1043, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x137f, 0x1043, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x141f, 0x1043, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1751, 0x1043, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1660, 0x1458, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1661, 0x1458, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1662, 0x1458, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x75a6, 0x1458, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x75a7, 0x1458, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x86a6, 0x1458, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x86a7, 0x1458, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1274, 0x1462, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1277, 0x1462, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1220, 0x152d, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x95e1, 0x1558, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x97e1, 0x1558, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x2002, 0x1a58, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x2005, 0x1a58, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x2007, 0x1a58, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x3000, 0x1a58, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x3001, 0x1a58, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1069, 0x1d05, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E91, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070 Super" }, + { 0x1E91, 0x8607, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x8736, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x8738, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x8772, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x878a, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x878b, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x1e61, 0x1043, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x1511, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x75b3, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x75b4, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x76b2, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x76b3, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x78a2, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x78a3, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x86b2, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x86b3, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x12ae, 0x1462, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x12b0, 0x1462, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x12c6, 0x1462, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x22c3, 0x17aa, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x22c5, 0x17aa, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x2009, 0x1a58, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x200a, 0x1a58, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x3002, 0x1a58, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x3012, 0x8086, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E93, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080 Super" }, + { 0x1E93, 0x1401, 0x1025, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x149c, 0x1025, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x09d2, 0x1028, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x8607, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x86c7, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x8736, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x8738, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x8772, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x87a6, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x87a7, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x75b1, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x75b2, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x76b0, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x76b1, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x78a0, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x78a1, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x86b0, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x86b1, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x12ae, 0x1462, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x12b0, 0x1462, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x12b4, 0x1462, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x12c6, 0x1462, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x50d3, 0x1558, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x70d1, 0x1558, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x22c3, 0x17aa, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x22c5, 0x17aa, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x2009, 0x1a58, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x200a, 0x1a58, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x3002, 0x1a58, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x1089, 0x1d05, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1EB0, 0x0000, 0x0000, "Quadro RTX 5000" }, + { 0x1EB1, 0x0000, 0x0000, "Quadro RTX 4000" }, + { 0x1EB1, 0x12a0, 0x15c3, "EIZO Quadro MED-XN92" }, + { 0x1EB5, 0x0000, 0x0000, "Quadro RTX 5000" }, + { 0x1EB5, 0x1375, 0x1025, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x1401, 0x1025, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x149c, 0x1025, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x09c3, 0x1028, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x8736, 0x103c, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x8738, 0x103c, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x8772, 0x103c, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x8780, 0x103c, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x8782, 0x103c, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x8783, 0x103c, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x8785, 0x103c, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x1dd1, 0x1043, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x1274, 0x1462, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x12b0, 0x1462, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x12c6, 0x1462, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x22b8, 0x17aa, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x22ba, 0x17aa, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x2005, 0x1a58, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x2007, 0x1a58, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x2008, 0x1a58, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x200a, 0x1a58, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB6, 0x0000, 0x0000, "Quadro RTX 4000" }, + { 0x1EB6, 0x09c3, 0x1028, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x8736, 0x103c, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x8738, 0x103c, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x8772, 0x103c, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x8780, 0x103c, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x8782, 0x103c, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x8783, 0x103c, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x8785, 0x103c, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x1274, 0x1462, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x1277, 0x1462, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x12b0, 0x1462, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x12c6, 0x1462, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x22b8, 0x17aa, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x22ba, 0x17aa, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB8, 0x12a2, 0x10de, "Tesla T4" }, + { 0x1EC2, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070 SUPER" }, + { 0x1EC7, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070 SUPER" }, + { 0x1ED0, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080" }, + { 0x1ED0, 0x132d, 0x1025, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x08ed, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x08ee, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x08ef, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x8572, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x8573, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x8600, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x8605, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x138f, 0x1043, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x15c1, 0x1043, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x3fee, 0x17aa, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x3ffe, 0x17aa, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED1, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070 Super" }, + { 0x1ED1, 0x1432, 0x1025, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED1, 0x8746, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED1, 0x878a, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED1, 0x165f, 0x1043, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED1, 0xc192, 0x144d, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED1, 0x3fce, 0x17aa, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED1, 0x3fcf, 0x17aa, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED1, 0x3fd0, 0x17aa, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED3, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080 Super" }, + { 0x1ED3, 0x1432, 0x1025, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x09d1, 0x1028, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x8746, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x878a, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x1d61, 0x1043, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x1e51, 0x1043, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x1f01, 0x1043, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x3fce, 0x17aa, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x3fcf, 0x17aa, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x3fd0, 0x17aa, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1EF5, 0x0000, 0x0000, "Quadro RTX 5000" }, + { 0x1F02, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070" }, + { 0x1F03, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1F06, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060 SUPER" }, + { 0x1F07, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070" }, + { 0x1F08, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1F0A, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x1F0B, 0x0000, 0x0000, "NVIDIA CMP 40HX" }, + { 0x1F10, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070" }, + { 0x1F10, 0x132d, 0x1025, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1342, 0x1025, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08a1, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08a2, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08ea, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08eb, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08ec, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08ed, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08ee, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08ef, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x093b, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x093c, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x8572, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x8573, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x8602, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x8606, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x132f, 0x1043, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x136f, 0x1043, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1881, 0x1043, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1e6e, 0x1043, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1658, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1663, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1664, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x75a4, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x75a5, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x86a4, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x86a5, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1274, 0x1462, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1277, 0x1462, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x95e1, 0x1558, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x97e1, 0x1558, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x2002, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x2005, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x2007, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x3000, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x3001, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x105e, 0x1d05, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1070, 0x1d05, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x2087, 0x1d05, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x2087, 0x8086, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F11, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1F12, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1F12, 0x098f, 0x1028, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x8741, 0x103c, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x8744, 0x103c, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x878e, 0x103c, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x880e, 0x103c, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x1e11, 0x1043, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x1f11, 0x1043, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x12d9, 0x1462, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x3801, 0x17aa, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x3802, 0x17aa, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x3803, 0x17aa, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F14, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070" }, + { 0x1F14, 0x1401, 0x1025, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x1432, 0x1025, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x1442, 0x1025, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x1446, 0x1025, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x147d, 0x1025, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x09e2, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x09f3, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x8607, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x86c6, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x86c7, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x8736, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x8738, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x8746, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x8772, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x878a, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x878b, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x87a6, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x87a7, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x174f, 0x1043, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x1512, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x75b5, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x75b6, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x76b4, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x76b5, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x78a4, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x78a5, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x86b4, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x86b5, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x12ae, 0x1462, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x12b0, 0x1462, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x12c6, 0x1462, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x50d3, 0x1558, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x70d1, 0x1558, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x200c, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x2011, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x3002, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F15, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1F36, 0x0000, 0x0000, "Quadro RTX 3000" }, + { 0x1F36, 0x0990, 0x1028, "Quadro RTX 3000 with Max-Q Design" }, + { 0x1F36, 0x8736, 0x103c, "Quadro RTX 3000 with Max-Q Design" }, + { 0x1F36, 0x8738, 0x103c, "Quadro RTX 3000 with Max-Q Design" }, + { 0x1F36, 0x8772, 0x103c, "Quadro RTX 3000 with Max-Q Design" }, + { 0x1F36, 0x13cf, 0x1043, "Quadro RTX 3000 with Max-Q Design" }, + { 0x1F36, 0x0032, 0x1414, "Quadro RTX 3000 with Max-Q Design" }, + { 0x1F42, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060 SUPER" }, + { 0x1F47, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060 SUPER" }, + { 0x1F50, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070" }, + { 0x1F50, 0x08ed, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x08ee, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x08ef, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x8572, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x8573, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x8574, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x8600, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x8605, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x3fee, 0x17aa, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x3ffe, 0x17aa, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F51, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1F54, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070" }, + { 0x1F54, 0x878a, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F54, 0x3fce, 0x17aa, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F54, 0x3fcf, 0x17aa, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F54, 0x3fd0, 0x17aa, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F55, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1F76, 0x0000, 0x0000, "Quadro RTX 3000" }, + { 0x1F76, 0x2800, 0x102b, "Matrox D-Series D2450" }, + { 0x1F76, 0x2900, 0x102b, "Matrox D-Series D2480" }, + { 0x1F82, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x1F83, 0x0000, 0x0000, "NVIDIA GeForce GTX 1630" }, + { 0x1F91, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x1F91, 0x863e, 0x103c, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x86e7, 0x103c, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x86e8, 0x103c, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x12cf, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x156f, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x0032, 0x1414, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0xc822, 0x144d, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x127e, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x1281, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x1284, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x1285, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x129c, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x229f, 0x17aa, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x3802, 0x17aa, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x3806, 0x17aa, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x3f1a, 0x17aa, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x1001, 0x1a58, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F95, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650 Ti" }, + { 0x1F95, 0x1479, 0x1025, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x147a, 0x1025, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x147b, 0x1025, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x147c, 0x1025, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x86e7, 0x103c, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x86e8, 0x103c, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x8815, 0x103c, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x1dff, 0x1043, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x1e1f, 0x1043, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0xc838, 0x144d, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x12bd, 0x1462, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x12c5, 0x1462, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x12d2, 0x1462, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x22c0, 0x17aa, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x22c1, 0x17aa, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x3837, 0x17aa, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x3f95, 0x17aa, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x1003, 0x1a58, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x1006, 0x1a58, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x1007, 0x1a58, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x3e30, 0x1e83, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F96, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x1F96, 0x1297, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F97, 0x0000, 0x0000, "NVIDIA GeForce MX450" }, + { 0x1F98, 0x0000, 0x0000, "NVIDIA GeForce MX450" }, + { 0x1F99, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x1F99, 0x1479, 0x1025, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x147a, 0x1025, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x147b, 0x1025, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x147c, 0x1025, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x8815, 0x103c, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x13b2, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x1402, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x1902, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x12bd, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x12c5, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x12d2, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x22da, 0x17aa, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x3f93, 0x17aa, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x3e30, 0x1e83, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9C, 0x0000, 0x0000, "NVIDIA GeForce MX450" }, + { 0x1F9D, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x1F9D, 0x128d, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x130d, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x149c, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x185c, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x189c, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x12f4, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x1302, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x131b, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x1326, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x132a, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x132e, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9F, 0x0000, 0x0000, "NVIDIA GeForce MX550" }, + { 0x1FA0, 0x0000, 0x0000, "NVIDIA GeForce MX550" }, + { 0x1FB0, 0x12db, 0x1028, "NVIDIA T1000" }, + { 0x1FB0, 0x12db, 0x103c, "NVIDIA T1000" }, + { 0x1FB0, 0x8a80, 0x103c, "NVIDIA T1000" }, + { 0x1FB0, 0x12db, 0x10de, "NVIDIA T1000" }, + { 0x1FB0, 0x1485, 0x10de, "NVIDIA DGX Display" }, + { 0x1FB0, 0x12db, 0x17aa, "NVIDIA T1000" }, + { 0x1FB1, 0x1488, 0x1028, "NVIDIA T600" }, + { 0x1FB1, 0x1488, 0x103c, "NVIDIA T600" }, + { 0x1FB1, 0x8a80, 0x103c, "NVIDIA T600" }, + { 0x1FB1, 0x1488, 0x10de, "NVIDIA T600" }, + { 0x1FB1, 0x1488, 0x17aa, "NVIDIA T600" }, + { 0x1FB2, 0x1489, 0x1028, "NVIDIA T400" }, + { 0x1FB2, 0x1489, 0x103c, "NVIDIA T400" }, + { 0x1FB2, 0x8a80, 0x103c, "NVIDIA T400" }, + { 0x1FB2, 0x1489, 0x10de, "NVIDIA T400" }, + { 0x1FB2, 0x1489, 0x17aa, "NVIDIA T400" }, + { 0x1FB6, 0x0000, 0x0000, "NVIDIA T600 Laptop GPU" }, + { 0x1FB7, 0x0000, 0x0000, "NVIDIA T550 Laptop GPU" }, + { 0x1FB8, 0x0000, 0x0000, "Quadro T2000" }, + { 0x1FB8, 0x097e, 0x1028, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x8736, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x8738, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x8772, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x8780, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x8782, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x8783, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x8785, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x87f0, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x1281, 0x1462, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x12bd, 0x1462, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x22c0, 0x17aa, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x22c1, 0x17aa, "Quadro T2000 with Max-Q Design" }, + { 0x1FB9, 0x0000, 0x0000, "Quadro T1000" }, + { 0x1FB9, 0x1479, 0x1025, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x147a, 0x1025, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x147b, 0x1025, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x147c, 0x1025, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x8736, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x8738, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x8772, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x8780, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x8782, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x8783, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x8785, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x87f0, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x12bd, 0x1462, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x22c0, 0x17aa, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x22c1, 0x17aa, "Quadro T1000 with Max-Q Design" }, + { 0x1FBA, 0x0000, 0x0000, "NVIDIA T600 Laptop GPU" }, + { 0x1FBB, 0x0000, 0x0000, "NVIDIA T500" }, + { 0x1FBC, 0x0000, 0x0000, "NVIDIA T1200 Laptop GPU" }, + { 0x1FDD, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x1FF0, 0x1612, 0x1028, "NVIDIA T1000 8GB" }, + { 0x1FF0, 0x1612, 0x103c, "NVIDIA T1000 8GB" }, + { 0x1FF0, 0x8a80, 0x103c, "NVIDIA T1000 8GB" }, + { 0x1FF0, 0x1612, 0x10de, "NVIDIA T1000 8GB" }, + { 0x1FF0, 0x1612, 0x17aa, "NVIDIA T1000 8GB" }, + { 0x1FF2, 0x1613, 0x1028, "NVIDIA T400 4GB" }, + { 0x1FF2, 0x1613, 0x103c, "NVIDIA T400 4GB" }, + { 0x1FF2, 0x18ff, 0x103c, "NVIDIA T400E" }, + { 0x1FF2, 0x8a80, 0x103c, "NVIDIA T400 4GB" }, + { 0x1FF2, 0x1613, 0x10de, "NVIDIA T400 4GB" }, + { 0x1FF2, 0x18ff, 0x10de, "NVIDIA T400E" }, + { 0x1FF2, 0x1613, 0x17aa, "NVIDIA T400 4GB" }, + { 0x1FF2, 0x18ff, 0x17aa, "NVIDIA T400E" }, + { 0x1FF9, 0x0000, 0x0000, "Quadro T1000" }, + { 0x20B0, 0x0000, 0x0000, "NVIDIA A100-SXM4-40GB" }, + { 0x20B0, 0x1450, 0x10de, "NVIDIA A100-PG509-200" }, + { 0x20B2, 0x1463, 0x10de, "NVIDIA A100-SXM4-80GB" }, + { 0x20B2, 0x147f, 0x10de, "NVIDIA A100-SXM4-80GB" }, + { 0x20B2, 0x1622, 0x10de, "NVIDIA A100-SXM4-80GB" }, + { 0x20B2, 0x1623, 0x10de, "NVIDIA A100-SXM4-80GB" }, + { 0x20B2, 0x1625, 0x10de, "NVIDIA PG509-210" }, + { 0x20B3, 0x14a7, 0x10de, "NVIDIA A100-SXM-64GB" }, + { 0x20B3, 0x14a8, 0x10de, "NVIDIA A100-SXM-64GB" }, + { 0x20B5, 0x1533, 0x10de, "NVIDIA A100 80GB PCIe" }, + { 0x20B5, 0x1642, 0x10de, "NVIDIA A100 80GB PCIe" }, + { 0x20B6, 0x1492, 0x10de, "NVIDIA PG506-232" }, + { 0x20B7, 0x1532, 0x10de, "NVIDIA A30" }, + { 0x20B7, 0x1804, 0x10de, "NVIDIA A30" }, + { 0x20B7, 0x1852, 0x10de, "NVIDIA A30" }, + { 0x20BD, 0x17f4, 0x10de, "NVIDIA A800-SXM4-40GB" }, + { 0x20F1, 0x145f, 0x10de, "NVIDIA A100-PCIE-40GB" }, + { 0x20F3, 0x179b, 0x10de, "NVIDIA A800-SXM4-80GB" }, + { 0x20F3, 0x179c, 0x10de, "NVIDIA A800-SXM4-80GB" }, + { 0x20F3, 0x179d, 0x10de, "NVIDIA A800-SXM4-80GB" }, + { 0x20F3, 0x179e, 0x10de, "NVIDIA A800-SXM4-80GB" }, + { 0x20F3, 0x179f, 0x10de, "NVIDIA A800-SXM4-80GB" }, + { 0x20F3, 0x17a0, 0x10de, "NVIDIA A800-SXM4-80GB" }, + { 0x20F3, 0x17a1, 0x10de, "NVIDIA A800-SXM4-80GB" }, + { 0x20F3, 0x17a2, 0x10de, "NVIDIA A800-SXM4-80GB" }, + { 0x20F5, 0x1799, 0x10de, "NVIDIA A800 80GB PCIe" }, + { 0x20F5, 0x179a, 0x10de, "NVIDIA A800 80GB PCIe LC" }, + { 0x20F6, 0x180a, 0x1028, "NVIDIA A800 40GB Active" }, + { 0x20F6, 0x180a, 0x103c, "NVIDIA A800 40GB Active" }, + { 0x20F6, 0x180a, 0x10de, "NVIDIA A800 40GB Active" }, + { 0x20F6, 0x180a, 0x17aa, "NVIDIA A800 40GB Active" }, + { 0x20FD, 0x17f8, 0x10de, "NVIDIA AX800" }, + { 0x2182, 0x0000, 0x0000, "NVIDIA GeForce GTX 1660 Ti" }, + { 0x2184, 0x0000, 0x0000, "NVIDIA GeForce GTX 1660" }, + { 0x2187, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650 SUPER" }, + { 0x2188, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x2189, 0x0000, 0x0000, "NVIDIA CMP 30HX" }, + { 0x2191, 0x0000, 0x0000, "NVIDIA GeForce GTX 1660 Ti" }, + { 0x2191, 0x0949, 0x1028, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x85fb, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x85fe, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x86d6, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x8741, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x8744, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x878d, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x87af, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x87b3, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x171f, 0x1043, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x17ef, 0x1043, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x18d1, 0x1043, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x0032, 0x1414, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x128a, 0x1462, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x128b, 0x1462, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x12c6, 0x1462, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x12cb, 0x1462, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x12cc, 0x1462, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x12d9, 0x1462, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x380c, 0x17aa, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x381d, 0x17aa, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x381e, 0x17aa, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2192, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650 Ti" }, + { 0x21C4, 0x0000, 0x0000, "NVIDIA GeForce GTX 1660 SUPER" }, + { 0x21D1, 0x0000, 0x0000, "NVIDIA GeForce GTX 1660 Ti" }, + { 0x2203, 0x0000, 0x0000, "NVIDIA GeForce RTX 3090 Ti" }, + { 0x2204, 0x0000, 0x0000, "NVIDIA GeForce RTX 3090" }, + { 0x2206, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080" }, + { 0x2207, 0x0000, 0x0000, "NVIDIA GeForce RTX 3070 Ti" }, + { 0x2208, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Ti" }, + { 0x220A, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080" }, + { 0x220D, 0x0000, 0x0000, "NVIDIA CMP 90HX" }, + { 0x2216, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080" }, + { 0x2230, 0x1459, 0x1028, "NVIDIA RTX A6000" }, + { 0x2230, 0x1459, 0x103c, "NVIDIA RTX A6000" }, + { 0x2230, 0x1459, 0x10de, "NVIDIA RTX A6000" }, + { 0x2230, 0x1459, 0x17aa, "NVIDIA RTX A6000" }, + { 0x2231, 0x147e, 0x1028, "NVIDIA RTX A5000" }, + { 0x2231, 0x147e, 0x103c, "NVIDIA RTX A5000" }, + { 0x2231, 0x147e, 0x10de, "NVIDIA RTX A5000" }, + { 0x2231, 0x147e, 0x17aa, "NVIDIA RTX A5000" }, + { 0x2232, 0x163c, 0x1028, "NVIDIA RTX A4500" }, + { 0x2232, 0x163c, 0x103c, "NVIDIA RTX A4500" }, + { 0x2232, 0x163c, 0x10de, "NVIDIA RTX A4500" }, + { 0x2232, 0x163c, 0x17aa, "NVIDIA RTX A4500" }, + { 0x2233, 0x165a, 0x1028, "NVIDIA RTX A5500" }, + { 0x2233, 0x165a, 0x103c, "NVIDIA RTX A5500" }, + { 0x2233, 0x165a, 0x10de, "NVIDIA RTX A5500" }, + { 0x2233, 0x165a, 0x17aa, "NVIDIA RTX A5500" }, + { 0x2235, 0x145a, 0x10de, "NVIDIA A40" }, + { 0x2236, 0x1482, 0x10de, "NVIDIA A10" }, + { 0x2237, 0x152f, 0x10de, "NVIDIA A10G" }, + { 0x2238, 0x1677, 0x10de, "NVIDIA A10M" }, + { 0x2321, 0x1839, 0x10de, "NVIDIA H100 NVL" }, + { 0x2322, 0x17a4, 0x10de, "NVIDIA H800 PCIe" }, + { 0x2324, 0x17a6, 0x10de, "NVIDIA H800" }, + { 0x2324, 0x17a8, 0x10de, "NVIDIA H800" }, + { 0x2329, 0x198b, 0x10de, "NVIDIA H20" }, + { 0x2329, 0x198c, 0x10de, "NVIDIA H20" }, + { 0x232C, 0x2063, 0x10de, "NVIDIA H20-3e" }, + { 0x2330, 0x16c0, 0x10de, "NVIDIA H100 80GB HBM3" }, + { 0x2330, 0x16c1, 0x10de, "NVIDIA H100 80GB HBM3" }, + { 0x2331, 0x1626, 0x10de, "NVIDIA H100 PCIe" }, + { 0x2335, 0x18be, 0x10de, "NVIDIA H200" }, + { 0x2335, 0x18bf, 0x10de, "NVIDIA H200" }, + { 0x2339, 0x17fc, 0x10de, "NVIDIA H100" }, + { 0x233A, 0x183a, 0x10de, "NVIDIA H800 NVL" }, + { 0x233B, 0x1996, 0x10de, "NVIDIA H200 NVL" }, + { 0x2342, 0x16eb, 0x10de, "NVIDIA GH200 120GB" }, + { 0x2342, 0x1805, 0x10de, "NVIDIA GH200 120GB" }, + { 0x2342, 0x1809, 0x10de, "NVIDIA GH200 480GB" }, + { 0x2348, 0x18d2, 0x10de, "NVIDIA GH200 144G HBM3e" }, + { 0x2414, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Ti" }, + { 0x2420, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Ti Laptop GPU" }, + { 0x2438, 0x0000, 0x0000, "NVIDIA RTX A5500 Laptop GPU" }, + { 0x2460, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Ti Laptop GPU" }, + { 0x2482, 0x0000, 0x0000, "NVIDIA GeForce RTX 3070 Ti" }, + { 0x2484, 0x0000, 0x0000, "NVIDIA GeForce RTX 3070" }, + { 0x2486, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Ti" }, + { 0x2487, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060" }, + { 0x2488, 0x0000, 0x0000, "NVIDIA GeForce RTX 3070" }, + { 0x2489, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Ti" }, + { 0x248A, 0x0000, 0x0000, "NVIDIA CMP 70HX" }, + { 0x249C, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Laptop GPU" }, + { 0x249C, 0x1194, 0x1d05, "NVIDIA GeForce RTX 3060 Laptop GPU" }, + { 0x249D, 0x0000, 0x0000, "NVIDIA GeForce RTX 3070 Laptop GPU" }, + { 0x24A0, 0x0000, 0x0000, "NVIDIA GeForce RTX 3070 Ti Laptop GPU" }, + { 0x24A0, 0x1192, 0x1d05, "NVIDIA GeForce RTX 3060 Laptop GPU" }, + { 0x24B0, 0x14ad, 0x1028, "NVIDIA RTX A4000" }, + { 0x24B0, 0x14ad, 0x103c, "NVIDIA RTX A4000" }, + { 0x24B0, 0x14ad, 0x10de, "NVIDIA RTX A4000" }, + { 0x24B0, 0x14ad, 0x17aa, "NVIDIA RTX A4000" }, + { 0x24B1, 0x1658, 0x10de, "NVIDIA RTX A4000H" }, + { 0x24B6, 0x0000, 0x0000, "NVIDIA RTX A5000 Laptop GPU" }, + { 0x24B7, 0x0000, 0x0000, "NVIDIA RTX A4000 Laptop GPU" }, + { 0x24B8, 0x0000, 0x0000, "NVIDIA RTX A3000 Laptop GPU" }, + { 0x24B9, 0x0000, 0x0000, "NVIDIA RTX A3000 12GB Laptop GPU" }, + { 0x24BA, 0x0000, 0x0000, "NVIDIA RTX A4500 Laptop GPU" }, + { 0x24BB, 0x0000, 0x0000, "NVIDIA RTX A3000 12GB Laptop GPU" }, + { 0x24C7, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060" }, + { 0x24C9, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Ti" }, + { 0x24DC, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Laptop GPU" }, + { 0x24DD, 0x0000, 0x0000, "NVIDIA GeForce RTX 3070 Laptop GPU" }, + { 0x24E0, 0x0000, 0x0000, "NVIDIA GeForce RTX 3070 Ti Laptop GPU" }, + { 0x24FA, 0x0000, 0x0000, "NVIDIA RTX A4500 Embedded GPU" }, + { 0x2503, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060" }, + { 0x2504, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060" }, + { 0x2507, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050" }, + { 0x2508, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 OEM" }, + { 0x2520, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Laptop GPU" }, + { 0x2521, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Laptop GPU" }, + { 0x2523, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Ti Laptop GPU" }, + { 0x2531, 0x151d, 0x1028, "NVIDIA RTX A2000" }, + { 0x2531, 0x151d, 0x103c, "NVIDIA RTX A2000" }, + { 0x2531, 0x151d, 0x10de, "NVIDIA RTX A2000" }, + { 0x2531, 0x151d, 0x17aa, "NVIDIA RTX A2000" }, + { 0x2544, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060" }, + { 0x2560, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Laptop GPU" }, + { 0x2563, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Ti Laptop GPU" }, + { 0x2571, 0x1611, 0x1028, "NVIDIA RTX A2000 12GB" }, + { 0x2571, 0x1611, 0x103c, "NVIDIA RTX A2000 12GB" }, + { 0x2571, 0x1611, 0x10de, "NVIDIA RTX A2000 12GB" }, + { 0x2571, 0x1611, 0x17aa, "NVIDIA RTX A2000 12GB" }, + { 0x2582, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050" }, + { 0x2584, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050" }, + { 0x25A0, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Ti Laptop GPU" }, + { 0x25A0, 0x8928, 0x103c, "NVIDIA GeForce RTX 3050Ti Laptop GPU" }, + { 0x25A0, 0x89f9, 0x103c, "NVIDIA GeForce RTX 3050Ti Laptop GPU" }, + { 0x25A0, 0x1196, 0x1d05, "NVIDIA GeForce RTX 3060 Laptop GPU" }, + { 0x25A2, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Laptop GPU" }, + { 0x25A2, 0x0baf, 0x1028, "NVIDIA GeForce RTX 3050 Ti Laptop GPU" }, + { 0x25A2, 0x1195, 0x1d05, "NVIDIA GeForce RTX 3060 Laptop GPU" }, + { 0x25A5, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Laptop GPU" }, + { 0x25A6, 0x0000, 0x0000, "NVIDIA GeForce MX570" }, + { 0x25A7, 0x0000, 0x0000, "NVIDIA GeForce RTX 2050" }, + { 0x25A9, 0x0000, 0x0000, "NVIDIA GeForce RTX 2050" }, + { 0x25AA, 0x0000, 0x0000, "NVIDIA GeForce MX570 A" }, + { 0x25AB, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 4GB Laptop GPU" }, + { 0x25AC, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 6GB Laptop GPU" }, + { 0x25AD, 0x0000, 0x0000, "NVIDIA GeForce RTX 2050" }, + { 0x25B0, 0x1878, 0x1028, "NVIDIA RTX A1000" }, + { 0x25B0, 0x1878, 0x103c, "NVIDIA RTX A1000" }, + { 0x25B0, 0x8d96, 0x103c, "NVIDIA RTX A1000" }, + { 0x25B0, 0x1878, 0x10de, "NVIDIA RTX A1000" }, + { 0x25B0, 0x1878, 0x17aa, "NVIDIA RTX A1000" }, + { 0x25B2, 0x1879, 0x1028, "NVIDIA RTX A400" }, + { 0x25B2, 0x1879, 0x103c, "NVIDIA RTX A400" }, + { 0x25B2, 0x8d95, 0x103c, "NVIDIA RTX A400" }, + { 0x25B2, 0x1879, 0x10de, "NVIDIA RTX A400" }, + { 0x25B2, 0x1879, 0x17aa, "NVIDIA RTX A400" }, + { 0x25B6, 0x14a9, 0x10de, "NVIDIA A16" }, + { 0x25B6, 0x157e, 0x10de, "NVIDIA A2" }, + { 0x25B8, 0x0000, 0x0000, "NVIDIA RTX A2000 Laptop GPU" }, + { 0x25B9, 0x0000, 0x0000, "NVIDIA RTX A1000 Laptop GPU" }, + { 0x25BA, 0x0000, 0x0000, "NVIDIA RTX A2000 8GB Laptop GPU" }, + { 0x25BB, 0x0000, 0x0000, "NVIDIA RTX A500 Laptop GPU" }, + { 0x25BC, 0x0000, 0x0000, "NVIDIA RTX A1000 6GB Laptop GPU" }, + { 0x25BD, 0x0000, 0x0000, "NVIDIA RTX A500 Laptop GPU" }, + { 0x25E0, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Ti Laptop GPU" }, + { 0x25E2, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Laptop GPU" }, + { 0x25E5, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Laptop GPU" }, + { 0x25EC, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 6GB Laptop GPU" }, + { 0x25ED, 0x0000, 0x0000, "NVIDIA GeForce RTX 2050" }, + { 0x25F9, 0x0000, 0x0000, "NVIDIA RTX A1000 Embedded GPU" }, + { 0x25FA, 0x0000, 0x0000, "NVIDIA RTX A2000 Embedded GPU" }, + { 0x25FB, 0x0000, 0x0000, "NVIDIA RTX A500 Embedded GPU" }, + { 0x2684, 0x0000, 0x0000, "NVIDIA GeForce RTX 4090" }, + { 0x2685, 0x0000, 0x0000, "NVIDIA GeForce RTX 4090 D" }, + { 0x2689, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070 Ti SUPER" }, + { 0x26B1, 0x16a1, 0x1028, "NVIDIA RTX 6000 Ada Generation" }, + { 0x26B1, 0x16a1, 0x103c, "NVIDIA RTX 6000 Ada Generation" }, + { 0x26B1, 0x16a1, 0x10de, "NVIDIA RTX 6000 Ada Generation" }, + { 0x26B1, 0x16a1, 0x17aa, "NVIDIA RTX 6000 Ada Generation" }, + { 0x26B2, 0x17fa, 0x1028, "NVIDIA RTX 5000 Ada Generation" }, + { 0x26B2, 0x17fa, 0x103c, "NVIDIA RTX 5000 Ada Generation" }, + { 0x26B2, 0x17fa, 0x10de, "NVIDIA RTX 5000 Ada Generation" }, + { 0x26B2, 0x17fa, 0x17aa, "NVIDIA RTX 5000 Ada Generation" }, + { 0x26B3, 0x1934, 0x1028, "NVIDIA RTX 5880 Ada Generation" }, + { 0x26B3, 0x1934, 0x103c, "NVIDIA RTX 5880 Ada Generation" }, + { 0x26B3, 0x1934, 0x10de, "NVIDIA RTX 5880 Ada Generation" }, + { 0x26B3, 0x1934, 0x17aa, "NVIDIA RTX 5880 Ada Generation" }, + { 0x26B5, 0x169d, 0x10de, "NVIDIA L40" }, + { 0x26B5, 0x17da, 0x10de, "NVIDIA L40" }, + { 0x26B9, 0x1851, 0x10de, "NVIDIA L40S" }, + { 0x26B9, 0x18cf, 0x10de, "NVIDIA L40S" }, + { 0x26BA, 0x1957, 0x10de, "NVIDIA L20" }, + { 0x26BA, 0x1990, 0x10de, "NVIDIA L20" }, + { 0x2702, 0x0000, 0x0000, "NVIDIA GeForce RTX 4080 SUPER" }, + { 0x2704, 0x0000, 0x0000, "NVIDIA GeForce RTX 4080" }, + { 0x2705, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070 Ti SUPER" }, + { 0x2709, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070" }, + { 0x2717, 0x0000, 0x0000, "NVIDIA GeForce RTX 4090 Laptop GPU" }, + { 0x2730, 0x0000, 0x0000, "NVIDIA RTX 5000 Ada Generation Laptop GPU" }, + { 0x2757, 0x0000, 0x0000, "NVIDIA GeForce RTX 4090 Laptop GPU" }, + { 0x2770, 0x0000, 0x0000, "NVIDIA RTX 5000 Ada Generation Embedded GPU" }, + { 0x2782, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070 Ti" }, + { 0x2783, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070 SUPER" }, + { 0x2786, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070" }, + { 0x2788, 0x0000, 0x0000, "NVIDIA GeForce RTX 4060 Ti" }, + { 0x27A0, 0x0000, 0x0000, "NVIDIA GeForce RTX 4080 Laptop GPU" }, + { 0x27B0, 0x16fa, 0x1028, "NVIDIA RTX 4000 SFF Ada Generation" }, + { 0x27B0, 0x16fa, 0x103c, "NVIDIA RTX 4000 SFF Ada Generation" }, + { 0x27B0, 0x16fa, 0x10de, "NVIDIA RTX 4000 SFF Ada Generation" }, + { 0x27B0, 0x16fa, 0x17aa, "NVIDIA RTX 4000 SFF Ada Generation" }, + { 0x27B1, 0x180c, 0x1028, "NVIDIA RTX 4500 Ada Generation" }, + { 0x27B1, 0x180c, 0x103c, "NVIDIA RTX 4500 Ada Generation" }, + { 0x27B1, 0x180c, 0x10de, "NVIDIA RTX 4500 Ada Generation" }, + { 0x27B1, 0x180c, 0x17aa, "NVIDIA RTX 4500 Ada Generation" }, + { 0x27B2, 0x181b, 0x1028, "NVIDIA RTX 4000 Ada Generation" }, + { 0x27B2, 0x181b, 0x103c, "NVIDIA RTX 4000 Ada Generation" }, + { 0x27B2, 0x181b, 0x10de, "NVIDIA RTX 4000 Ada Generation" }, + { 0x27B2, 0x181b, 0x17aa, "NVIDIA RTX 4000 Ada Generation" }, + { 0x27B6, 0x1933, 0x10de, "NVIDIA L2" }, + { 0x27B8, 0x16ca, 0x10de, "NVIDIA L4" }, + { 0x27B8, 0x16ee, 0x10de, "NVIDIA L4" }, + { 0x27BA, 0x0000, 0x0000, "NVIDIA RTX 4000 Ada Generation Laptop GPU" }, + { 0x27BB, 0x0000, 0x0000, "NVIDIA RTX 3500 Ada Generation Laptop GPU" }, + { 0x27E0, 0x0000, 0x0000, "NVIDIA GeForce RTX 4080 Laptop GPU" }, + { 0x27FB, 0x0000, 0x0000, "NVIDIA RTX 3500 Ada Generation Embedded GPU" }, + { 0x2803, 0x0000, 0x0000, "NVIDIA GeForce RTX 4060 Ti" }, + { 0x2805, 0x0000, 0x0000, "NVIDIA GeForce RTX 4060 Ti" }, + { 0x2808, 0x0000, 0x0000, "NVIDIA GeForce RTX 4060" }, + { 0x2820, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070 Laptop GPU" }, + { 0x2822, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 A Laptop GPU" }, + { 0x2838, 0x0000, 0x0000, "NVIDIA RTX 3000 Ada Generation Laptop GPU" }, + { 0x2860, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070 Laptop GPU" }, + { 0x2882, 0x0000, 0x0000, "NVIDIA GeForce RTX 4060" }, + { 0x28A0, 0x0000, 0x0000, "NVIDIA GeForce RTX 4060 Laptop GPU" }, + { 0x28A1, 0x0000, 0x0000, "NVIDIA GeForce RTX 4050 Laptop GPU" }, + { 0x28A3, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 A Laptop GPU" }, + { 0x28B0, 0x1870, 0x1028, "NVIDIA RTX 2000 Ada Generation" }, + { 0x28B0, 0x1870, 0x103c, "NVIDIA RTX 2000 Ada Generation" }, + { 0x28B0, 0x1871, 0x103c, "NVIDIA RTX 2000E Ada Generation" }, + { 0x28B0, 0x1870, 0x10de, "NVIDIA RTX 2000 Ada Generation" }, + { 0x28B0, 0x1871, 0x10de, "NVIDIA RTX 2000E Ada Generation" }, + { 0x28B0, 0x1870, 0x17aa, "NVIDIA RTX 2000 Ada Generation" }, + { 0x28B0, 0x1871, 0x17aa, "NVIDIA RTX 2000E Ada Generation" }, + { 0x28B8, 0x0000, 0x0000, "NVIDIA RTX 2000 Ada Generation Laptop GPU" }, + { 0x28B9, 0x0000, 0x0000, "NVIDIA RTX 1000 Ada Generation Laptop GPU" }, + { 0x28BA, 0x0000, 0x0000, "NVIDIA RTX 500 Ada Generation Laptop GPU" }, + { 0x28BB, 0x0000, 0x0000, "NVIDIA RTX 500 Ada Generation Laptop GPU" }, + { 0x28E0, 0x0000, 0x0000, "NVIDIA GeForce RTX 4060 Laptop GPU" }, + { 0x28E1, 0x0000, 0x0000, "NVIDIA GeForce RTX 4050 Laptop GPU" }, + { 0x28E3, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 A Laptop GPU" }, + { 0x28F8, 0x0000, 0x0000, "NVIDIA RTX 2000 Ada Generation Embedded GPU" }, + { 0x2901, 0x1999, 0x10de, "NVIDIA B200" }, + { 0x2901, 0x199b, 0x10de, "NVIDIA B200" }, + { 0x2901, 0x20da, 0x10de, "NVIDIA B200" }, + { 0x2941, 0x2046, 0x10de, "NVIDIA GB200" }, + { 0x2941, 0x20ca, 0x10de, "NVIDIA GB200" }, + { 0x2941, 0x20d5, 0x10de, "NVIDIA GB200" }, + { 0x2941, 0x21c9, 0x10de, "NVIDIA GB200" }, + { 0x2941, 0x21ca, 0x10de, "NVIDIA GB200" }, + { 0x2B85, 0x0000, 0x0000, "NVIDIA GeForce RTX 5090" }, + { 0x2B87, 0x0000, 0x0000, "NVIDIA GeForce RTX 5090 D" }, + { 0x2BB1, 0x204b, 0x1028, "NVIDIA RTX PRO 6000 Blackwell Workstation Edition" }, + { 0x2BB1, 0x204b, 0x103c, "NVIDIA RTX PRO 6000 Blackwell Workstation Edition" }, + { 0x2BB1, 0x204b, 0x10de, "NVIDIA RTX PRO 6000 Blackwell Workstation Edition" }, + { 0x2BB1, 0x204b, 0x17aa, "NVIDIA RTX PRO 6000 Blackwell Workstation Edition" }, + { 0x2BB3, 0x204d, 0x1028, "NVIDIA RTX PRO 5000 Blackwell" }, + { 0x2BB3, 0x204d, 0x103c, "NVIDIA RTX PRO 5000 Blackwell" }, + { 0x2BB3, 0x204d, 0x10de, "NVIDIA RTX PRO 5000 Blackwell" }, + { 0x2BB3, 0x204d, 0x17aa, "NVIDIA RTX PRO 5000 Blackwell" }, + { 0x2BB4, 0x204c, 0x1028, "NVIDIA RTX PRO 6000 Blackwell Max-Q Workstation Edition" }, + { 0x2BB4, 0x204c, 0x103c, "NVIDIA RTX PRO 6000 Blackwell Max-Q Workstation Edition" }, + { 0x2BB4, 0x204c, 0x10de, "NVIDIA RTX PRO 6000 Blackwell Max-Q Workstation Edition" }, + { 0x2BB4, 0x204c, 0x17aa, "NVIDIA RTX PRO 6000 Blackwell Max-Q Workstation Edition" }, + { 0x2BB5, 0x204e, 0x10de, "NVIDIA RTX PRO 6000 Blackwell Server Edition" }, + { 0x2C02, 0x0000, 0x0000, "NVIDIA GeForce RTX 5080" }, + { 0x2C05, 0x0000, 0x0000, "NVIDIA GeForce RTX 5070 Ti" }, + { 0x2C18, 0x0000, 0x0000, "NVIDIA GeForce RTX 5090 Laptop GPU" }, + { 0x2C19, 0x0000, 0x0000, "NVIDIA GeForce RTX 5080 Laptop GPU" }, + { 0x2C38, 0x0000, 0x0000, "NVIDIA RTX PRO 5000 Blackwell Generation Laptop GPU" }, + { 0x2C39, 0x0000, 0x0000, "NVIDIA RTX PRO 4000 Blackwell Generation Laptop GPU" }, + { 0x2C58, 0x0000, 0x0000, "NVIDIA GeForce RTX 5090 Laptop GPU" }, + { 0x2C59, 0x0000, 0x0000, "NVIDIA GeForce RTX 5080 Laptop GPU" }, + { 0x2D04, 0x0000, 0x0000, "NVIDIA GeForce RTX 5060 Ti" }, + { 0x2D05, 0x0000, 0x0000, "NVIDIA GeForce RTX 5060" }, + { 0x2D18, 0x0000, 0x0000, "NVIDIA GeForce RTX 5070 Laptop GPU" }, + { 0x2D19, 0x0000, 0x0000, "NVIDIA GeForce RTX 5060 Laptop GPU" }, + { 0x2D39, 0x0000, 0x0000, "NVIDIA RTX PRO 2000 Blackwell Generation Laptop GPU" }, + { 0x2D58, 0x0000, 0x0000, "NVIDIA GeForce RTX 5070 Laptop GPU" }, + { 0x2D59, 0x0000, 0x0000, "NVIDIA GeForce RTX 5060 Laptop GPU" }, + { 0x2DB8, 0x0000, 0x0000, "NVIDIA RTX PRO 1000 Blackwell Generation Laptop GPU" }, + { 0x2DB9, 0x0000, 0x0000, "NVIDIA RTX PRO 500 Blackwell Generation Laptop GPU" }, + { 0x2F04, 0x0000, 0x0000, "NVIDIA GeForce RTX 5070" }, + { 0x2F18, 0x0000, 0x0000, "NVIDIA GeForce RTX 5070 Ti Laptop GPU" }, + { 0x2F38, 0x0000, 0x0000, "NVIDIA RTX PRO 3000 Blackwell Generation Laptop GPU" }, + { 0x2F58, 0x0000, 0x0000, "NVIDIA GeForce RTX 5070 Ti Laptop GPU" }, + { 0x13BD, 0x11cc, 0x10DE, "GRID M10-0B" }, + { 0x13BD, 0x11cd, 0x10DE, "GRID M10-1B" }, + { 0x13BD, 0x11ce, 0x10DE, "GRID M10-0Q" }, + { 0x13BD, 0x11cf, 0x10DE, "GRID M10-1Q" }, + { 0x13BD, 0x11d0, 0x10DE, "GRID M10-2Q" }, + { 0x13BD, 0x11d1, 0x10DE, "GRID M10-4Q" }, + { 0x13BD, 0x11d2, 0x10DE, "GRID M10-8Q" }, + { 0x13BD, 0x11d3, 0x10DE, "GRID M10-1A" }, + { 0x13BD, 0x11d4, 0x10DE, "GRID M10-2A" }, + { 0x13BD, 0x11d5, 0x10DE, "GRID M10-4A" }, + { 0x13BD, 0x11d6, 0x10DE, "GRID M10-8A" }, + { 0x13BD, 0x1286, 0x10DE, "GRID M10-2B" }, + { 0x13BD, 0x12ee, 0x10DE, "GRID M10-2B4" }, + { 0x13BD, 0x1339, 0x10DE, "GRID M10-1B4" }, + { 0x1DB1, 0x1259, 0x10DE, "GRID V100X-1B" }, + { 0x1DB1, 0x125a, 0x10DE, "GRID V100X-1Q" }, + { 0x1DB1, 0x125b, 0x10DE, "GRID V100X-2Q" }, + { 0x1DB1, 0x125c, 0x10DE, "GRID V100X-4Q" }, + { 0x1DB1, 0x125d, 0x10DE, "GRID V100X-8Q" }, + { 0x1DB1, 0x125e, 0x10DE, "GRID V100X-16Q" }, + { 0x1DB1, 0x125f, 0x10DE, "GRID V100X-1A" }, + { 0x1DB1, 0x1260, 0x10DE, "GRID V100X-2A" }, + { 0x1DB1, 0x1261, 0x10DE, "GRID V100X-4A" }, + { 0x1DB1, 0x1262, 0x10DE, "GRID V100X-8A" }, + { 0x1DB1, 0x1263, 0x10DE, "GRID V100X-16A" }, + { 0x1DB1, 0x128e, 0x10DE, "GRID V100X-2B" }, + { 0x1DB1, 0x12f6, 0x10DE, "GRID V100X-2B4" }, + { 0x1DB1, 0x1341, 0x10DE, "GRID V100X-1B4" }, + { 0x1DB1, 0x1378, 0x10DE, "GRID V100X-16C" }, + { 0x1DB1, 0x138e, 0x10DE, "GRID V100X-4C" }, + { 0x1DB1, 0x138f, 0x10DE, "GRID V100X-8C" }, + { 0x1DB3, 0x1290, 0x10DE, "GRID V100L-1B" }, + { 0x1DB3, 0x1291, 0x10DE, "GRID V100L-2B" }, + { 0x1DB3, 0x1292, 0x10DE, "GRID V100L-1Q" }, + { 0x1DB3, 0x1293, 0x10DE, "GRID V100L-2Q" }, + { 0x1DB3, 0x1294, 0x10DE, "GRID V100L-4Q" }, + { 0x1DB3, 0x1295, 0x10DE, "GRID V100L-8Q" }, + { 0x1DB3, 0x1296, 0x10DE, "GRID V100L-16Q" }, + { 0x1DB3, 0x1297, 0x10DE, "GRID V100L-1A" }, + { 0x1DB3, 0x1298, 0x10DE, "GRID V100L-2A" }, + { 0x1DB3, 0x1299, 0x10DE, "GRID V100L-4A" }, + { 0x1DB3, 0x129a, 0x10DE, "GRID V100L-8A" }, + { 0x1DB3, 0x129b, 0x10DE, "GRID V100L-16A" }, + { 0x1DB3, 0x12f9, 0x10DE, "GRID V100L-2B4" }, + { 0x1DB3, 0x1344, 0x10DE, "GRID V100L-1B4" }, + { 0x1DB3, 0x137a, 0x10DE, "GRID V100L-16C" }, + { 0x1DB3, 0x1398, 0x10DE, "GRID V100L-4C" }, + { 0x1DB3, 0x1399, 0x10DE, "GRID V100L-8C" }, + { 0x1DB4, 0x124e, 0x10DE, "GRID V100-1B" }, + { 0x1DB4, 0x124f, 0x10DE, "GRID V100-1Q" }, + { 0x1DB4, 0x1250, 0x10DE, "GRID V100-2Q" }, + { 0x1DB4, 0x1251, 0x10DE, "GRID V100-4Q" }, + { 0x1DB4, 0x1252, 0x10DE, "GRID V100-8Q" }, + { 0x1DB4, 0x1253, 0x10DE, "GRID V100-16Q" }, + { 0x1DB4, 0x1254, 0x10DE, "GRID V100-1A" }, + { 0x1DB4, 0x1255, 0x10DE, "GRID V100-2A" }, + { 0x1DB4, 0x1256, 0x10DE, "GRID V100-4A" }, + { 0x1DB4, 0x1257, 0x10DE, "GRID V100-8A" }, + { 0x1DB4, 0x1258, 0x10DE, "GRID V100-16A" }, + { 0x1DB4, 0x128f, 0x10DE, "GRID V100-2B" }, + { 0x1DB4, 0x12f5, 0x10DE, "GRID V100-2B4" }, + { 0x1DB4, 0x1340, 0x10DE, "GRID V100-1B4" }, + { 0x1DB4, 0x1379, 0x10DE, "GRID V100-16C" }, + { 0x1DB4, 0x1393, 0x10DE, "GRID V100-4C" }, + { 0x1DB4, 0x1394, 0x10DE, "GRID V100-8C" }, + { 0x1DB5, 0x12cb, 0x10DE, "GRID V100DX-1B" }, + { 0x1DB5, 0x12cc, 0x10DE, "GRID V100DX-2B" }, + { 0x1DB5, 0x12cd, 0x10DE, "GRID V100DX-1Q" }, + { 0x1DB5, 0x12ce, 0x10DE, "GRID V100DX-2Q" }, + { 0x1DB5, 0x12cf, 0x10DE, "GRID V100DX-4Q" }, + { 0x1DB5, 0x12d0, 0x10DE, "GRID V100DX-8Q" }, + { 0x1DB5, 0x12d1, 0x10DE, "GRID V100DX-16Q" }, + { 0x1DB5, 0x12d2, 0x10DE, "GRID V100DX-32Q" }, + { 0x1DB5, 0x12d3, 0x10DE, "GRID V100DX-1A" }, + { 0x1DB5, 0x12d4, 0x10DE, "GRID V100DX-2A" }, + { 0x1DB5, 0x12d5, 0x10DE, "GRID V100DX-4A" }, + { 0x1DB5, 0x12d6, 0x10DE, "GRID V100DX-8A" }, + { 0x1DB5, 0x12d7, 0x10DE, "GRID V100DX-16A" }, + { 0x1DB5, 0x12d8, 0x10DE, "GRID V100DX-32A" }, + { 0x1DB5, 0x12f8, 0x10DE, "GRID V100DX-2B4" }, + { 0x1DB5, 0x1343, 0x10DE, "GRID V100DX-1B4" }, + { 0x1DB5, 0x1376, 0x10DE, "GRID V100DX-32C" }, + { 0x1DB5, 0x1390, 0x10DE, "GRID V100DX-4C" }, + { 0x1DB5, 0x1391, 0x10DE, "GRID V100DX-8C" }, + { 0x1DB5, 0x1392, 0x10DE, "GRID V100DX-16C" }, + { 0x1DB6, 0x12bd, 0x10DE, "GRID V100D-1B" }, + { 0x1DB6, 0x12be, 0x10DE, "GRID V100D-2B" }, + { 0x1DB6, 0x12bf, 0x10DE, "GRID V100D-1Q" }, + { 0x1DB6, 0x12c0, 0x10DE, "GRID V100D-2Q" }, + { 0x1DB6, 0x12c1, 0x10DE, "GRID V100D-4Q" }, + { 0x1DB6, 0x12c2, 0x10DE, "GRID V100D-8Q" }, + { 0x1DB6, 0x12c3, 0x10DE, "GRID V100D-16Q" }, + { 0x1DB6, 0x12c4, 0x10DE, "GRID V100D-32Q" }, + { 0x1DB6, 0x12c5, 0x10DE, "GRID V100D-1A" }, + { 0x1DB6, 0x12c6, 0x10DE, "GRID V100D-2A" }, + { 0x1DB6, 0x12c7, 0x10DE, "GRID V100D-4A" }, + { 0x1DB6, 0x12c8, 0x10DE, "GRID V100D-8A" }, + { 0x1DB6, 0x12c9, 0x10DE, "GRID V100D-16A" }, + { 0x1DB6, 0x12ca, 0x10DE, "GRID V100D-32A" }, + { 0x1DB6, 0x12f7, 0x10DE, "GRID V100D-2B4" }, + { 0x1DB6, 0x1342, 0x10DE, "GRID V100D-1B4" }, + { 0x1DB6, 0x1377, 0x10DE, "GRID V100D-32C" }, + { 0x1DB6, 0x1395, 0x10DE, "GRID V100D-4C" }, + { 0x1DB6, 0x1396, 0x10DE, "GRID V100D-8C" }, + { 0x1DB6, 0x1397, 0x10DE, "GRID V100D-16C" }, + { 0x1DB6, 0x13cd, 0x10DE, "GRID GTX V100D-8" }, + { 0x1DB6, 0x13ce, 0x10DE, "GRID GTX V100D-16" }, + { 0x1DB6, 0x13cf, 0x10DE, "GRID GTX V100D-32" }, + { 0x1DF6, 0x13e1, 0x10DE, "GRID V100S-1B" }, + { 0x1DF6, 0x13e3, 0x10DE, "GRID V100S-2B" }, + { 0x1DF6, 0x13e5, 0x10DE, "GRID V100S-1Q" }, + { 0x1DF6, 0x13e6, 0x10DE, "GRID V100S-2Q" }, + { 0x1DF6, 0x13e7, 0x10DE, "GRID V100S-4Q" }, + { 0x1DF6, 0x13e8, 0x10DE, "GRID V100S-8Q" }, + { 0x1DF6, 0x13e9, 0x10DE, "GRID V100S-16Q" }, + { 0x1DF6, 0x13ea, 0x10DE, "GRID V100S-32Q" }, + { 0x1DF6, 0x13eb, 0x10DE, "GRID V100S-1A" }, + { 0x1DF6, 0x13ec, 0x10DE, "GRID V100S-2A" }, + { 0x1DF6, 0x13ed, 0x10DE, "GRID V100S-4A" }, + { 0x1DF6, 0x13ee, 0x10DE, "GRID V100S-8A" }, + { 0x1DF6, 0x13ef, 0x10DE, "GRID V100S-16A" }, + { 0x1DF6, 0x13f0, 0x10DE, "GRID V100S-32A" }, + { 0x1DF6, 0x13f1, 0x10DE, "GRID V100S-4C" }, + { 0x1DF6, 0x13f2, 0x10DE, "GRID V100S-8C" }, + { 0x1DF6, 0x13f3, 0x10DE, "GRID V100S-16C" }, + { 0x1DF6, 0x13f4, 0x10DE, "GRID V100S-32C" }, + { 0x1E30, 0x1325, 0x10DE, "GRID RTX6000-1Q" }, + { 0x1E30, 0x1326, 0x10DE, "GRID RTX6000-2Q" }, + { 0x1E30, 0x1327, 0x10DE, "GRID RTX6000-3Q" }, + { 0x1E30, 0x1328, 0x10DE, "GRID RTX6000-4Q" }, + { 0x1E30, 0x1329, 0x10DE, "GRID RTX6000-6Q" }, + { 0x1E30, 0x132a, 0x10DE, "GRID RTX6000-8Q" }, + { 0x1E30, 0x132b, 0x10DE, "GRID RTX6000-12Q" }, + { 0x1E30, 0x132c, 0x10DE, "GRID RTX6000-24Q" }, + { 0x1E30, 0x132d, 0x10DE, "GRID RTX8000-1Q" }, + { 0x1E30, 0x132e, 0x10DE, "GRID RTX8000-2Q" }, + { 0x1E30, 0x132f, 0x10DE, "GRID RTX8000-3Q" }, + { 0x1E30, 0x1330, 0x10DE, "GRID RTX8000-4Q" }, + { 0x1E30, 0x1331, 0x10DE, "GRID RTX8000-6Q" }, + { 0x1E30, 0x1332, 0x10DE, "GRID RTX8000-8Q" }, + { 0x1E30, 0x1333, 0x10DE, "GRID RTX8000-12Q" }, + { 0x1E30, 0x1334, 0x10DE, "GRID RTX8000-16Q" }, + { 0x1E30, 0x1335, 0x10DE, "GRID RTX8000-24Q" }, + { 0x1E30, 0x1336, 0x10DE, "GRID RTX8000-48Q" }, + { 0x1E30, 0x13b9, 0x10DE, "GRID RTX6000-6" }, + { 0x1E30, 0x13ba, 0x10DE, "GRID RTX6000-12" }, + { 0x1E30, 0x13bb, 0x10DE, "GRID RTX6000-24" }, + { 0x1E30, 0x13bc, 0x10DE, "GRID RTX8000-12" }, + { 0x1E30, 0x13bd, 0x10DE, "GRID RTX8000-24" }, + { 0x1E30, 0x13be, 0x10DE, "GRID RTX8000-48" }, + { 0x1E30, 0x13bf, 0x10DE, "GRID RTX6000-4C" }, + { 0x1E30, 0x13c0, 0x10DE, "GRID RTX6000-6C" }, + { 0x1E30, 0x13c1, 0x10DE, "GRID RTX6000-8C" }, + { 0x1E30, 0x13c2, 0x10DE, "GRID RTX6000-12C" }, + { 0x1E30, 0x13c3, 0x10DE, "GRID RTX6000-24C" }, + { 0x1E30, 0x13c4, 0x10DE, "GRID RTX8000-4C" }, + { 0x1E30, 0x13c5, 0x10DE, "GRID RTX8000-6C" }, + { 0x1E30, 0x13c6, 0x10DE, "GRID RTX8000-8C" }, + { 0x1E30, 0x13c7, 0x10DE, "GRID RTX8000-12C" }, + { 0x1E30, 0x13c8, 0x10DE, "GRID RTX8000-16C" }, + { 0x1E30, 0x13c9, 0x10DE, "GRID RTX8000-24C" }, + { 0x1E30, 0x13ca, 0x10DE, "GRID RTX8000-48C" }, + { 0x1E30, 0x13cb, 0x10DE, "GRID RTX6000-8" }, + { 0x1E30, 0x13cc, 0x10DE, "GRID RTX8000-16" }, + { 0x1E30, 0x1437, 0x10DE, "GRID RTX6000-1B" }, + { 0x1E30, 0x1438, 0x10DE, "GRID RTX6000-2B" }, + { 0x1E30, 0x1439, 0x10DE, "GRID RTX6000-1A" }, + { 0x1E30, 0x143a, 0x10DE, "GRID RTX6000-2A" }, + { 0x1E30, 0x143b, 0x10DE, "GRID RTX6000-3A" }, + { 0x1E30, 0x143c, 0x10DE, "GRID RTX6000-4A" }, + { 0x1E30, 0x143d, 0x10DE, "GRID RTX6000-6A" }, + { 0x1E30, 0x143e, 0x10DE, "GRID RTX6000-8A" }, + { 0x1E30, 0x143f, 0x10DE, "GRID RTX6000-12A" }, + { 0x1E30, 0x1440, 0x10DE, "GRID RTX6000-24A" }, + { 0x1E30, 0x1441, 0x10DE, "GRID RTX8000-1B" }, + { 0x1E30, 0x1442, 0x10DE, "GRID RTX8000-2B" }, + { 0x1E30, 0x1443, 0x10DE, "GRID RTX8000-1A" }, + { 0x1E30, 0x1444, 0x10DE, "GRID RTX8000-2A" }, + { 0x1E30, 0x1445, 0x10DE, "GRID RTX8000-3A" }, + { 0x1E30, 0x1446, 0x10DE, "GRID RTX8000-4A" }, + { 0x1E30, 0x1447, 0x10DE, "GRID RTX8000-6A" }, + { 0x1E30, 0x1448, 0x10DE, "GRID RTX8000-8A" }, + { 0x1E30, 0x1449, 0x10DE, "GRID RTX8000-12A" }, + { 0x1E30, 0x144a, 0x10DE, "GRID RTX8000-16A" }, + { 0x1E30, 0x144b, 0x10DE, "GRID RTX8000-24A" }, + { 0x1E30, 0x144c, 0x10DE, "GRID RTX8000-48A" }, + { 0x1E37, 0x1347, 0x10DE, "GeForce RTX T10x-8" }, + { 0x1E37, 0x1348, 0x10DE, "GeForce RTX T10x-4" }, + { 0x1E37, 0x1349, 0x10DE, "GeForce RTX T10x-2" }, + { 0x1E37, 0x136a, 0x10DE, "GRID RTX T10-4" }, + { 0x1E37, 0x136b, 0x10DE, "GRID RTX T10-8" }, + { 0x1E37, 0x136c, 0x10DE, "GRID RTX T10-16" }, + { 0x1E37, 0x13a4, 0x10DE, "GeForce RTX T10-4" }, + { 0x1E37, 0x13a5, 0x10DE, "GeForce RTX T10-8" }, + { 0x1E37, 0x13a6, 0x10DE, "GeForce RTX T10-16" }, + { 0x1E37, 0x13a7, 0x10DE, "GRID RTX T10x-2" }, + { 0x1E37, 0x13a8, 0x10DE, "GRID RTX T10x-4" }, + { 0x1E37, 0x13a9, 0x10DE, "GRID RTX T10x-8" }, + { 0x1E37, 0x180d, 0x10DE, "NVIDIA GeForce GTX 1060" }, + { 0x1E37, 0x1820, 0x10DE, "NVIDIA GeForce RTX 2080" }, + { 0x1E78, 0x13f7, 0x10DE, "GRID RTX6000P-1B" }, + { 0x1E78, 0x13f8, 0x10DE, "GRID RTX6000P-2B" }, + { 0x1E78, 0x13f9, 0x10DE, "GRID RTX6000P-1Q" }, + { 0x1E78, 0x13fa, 0x10DE, "GRID RTX6000P-2Q" }, + { 0x1E78, 0x13fb, 0x10DE, "GRID RTX6000P-3Q" }, + { 0x1E78, 0x13fc, 0x10DE, "GRID RTX6000P-4Q" }, + { 0x1E78, 0x13fd, 0x10DE, "GRID RTX6000P-6Q" }, + { 0x1E78, 0x13fe, 0x10DE, "GRID RTX6000P-8Q" }, + { 0x1E78, 0x13ff, 0x10DE, "GRID RTX6000P-12Q" }, + { 0x1E78, 0x1400, 0x10DE, "GRID RTX6000P-24Q" }, + { 0x1E78, 0x1401, 0x10DE, "GRID RTX6000P-1A" }, + { 0x1E78, 0x1402, 0x10DE, "GRID RTX6000P-2A" }, + { 0x1E78, 0x1403, 0x10DE, "GRID RTX6000P-3A" }, + { 0x1E78, 0x1404, 0x10DE, "GRID RTX6000P-4A" }, + { 0x1E78, 0x1405, 0x10DE, "GRID RTX6000P-6A" }, + { 0x1E78, 0x1406, 0x10DE, "GRID RTX6000P-8A" }, + { 0x1E78, 0x1407, 0x10DE, "GRID RTX6000P-12A" }, + { 0x1E78, 0x1408, 0x10DE, "GRID RTX6000P-24A" }, + { 0x1E78, 0x1409, 0x10DE, "GRID RTX6000P-6" }, + { 0x1E78, 0x140a, 0x10DE, "GRID RTX6000P-8" }, + { 0x1E78, 0x140b, 0x10DE, "GRID RTX6000P-12" }, + { 0x1E78, 0x140c, 0x10DE, "GRID RTX6000P-24" }, + { 0x1E78, 0x140d, 0x10DE, "GRID RTX6000P-4C" }, + { 0x1E78, 0x140e, 0x10DE, "GRID RTX6000P-6C" }, + { 0x1E78, 0x140f, 0x10DE, "GRID RTX6000P-8C" }, + { 0x1E78, 0x1410, 0x10DE, "GRID RTX6000P-12C" }, + { 0x1E78, 0x1411, 0x10DE, "GRID RTX6000P-24C" }, + { 0x1E78, 0x1412, 0x10DE, "GRID RTX8000P-1B" }, + { 0x1E78, 0x1413, 0x10DE, "GRID RTX8000P-2B" }, + { 0x1E78, 0x1414, 0x10DE, "GRID RTX8000P-1Q" }, + { 0x1E78, 0x1415, 0x10DE, "GRID RTX8000P-2Q" }, + { 0x1E78, 0x1416, 0x10DE, "GRID RTX8000P-3Q" }, + { 0x1E78, 0x1417, 0x10DE, "GRID RTX8000P-4Q" }, + { 0x1E78, 0x1418, 0x10DE, "GRID RTX8000P-6Q" }, + { 0x1E78, 0x1419, 0x10DE, "GRID RTX8000P-8Q" }, + { 0x1E78, 0x141a, 0x10DE, "GRID RTX8000P-12Q" }, + { 0x1E78, 0x141b, 0x10DE, "GRID RTX8000P-16Q" }, + { 0x1E78, 0x141c, 0x10DE, "GRID RTX8000P-24Q" }, + { 0x1E78, 0x141d, 0x10DE, "GRID RTX8000P-48Q" }, + { 0x1E78, 0x141e, 0x10DE, "GRID RTX8000P-1A" }, + { 0x1E78, 0x141f, 0x10DE, "GRID RTX8000P-2A" }, + { 0x1E78, 0x1420, 0x10DE, "GRID RTX8000P-3A" }, + { 0x1E78, 0x1421, 0x10DE, "GRID RTX8000P-4A" }, + { 0x1E78, 0x1422, 0x10DE, "GRID RTX8000P-6A" }, + { 0x1E78, 0x1423, 0x10DE, "GRID RTX8000P-8A" }, + { 0x1E78, 0x1424, 0x10DE, "GRID RTX8000P-12A" }, + { 0x1E78, 0x1425, 0x10DE, "GRID RTX8000P-24A" }, + { 0x1E78, 0x1426, 0x10DE, "GRID RTX8000P-48A" }, + { 0x1E78, 0x1427, 0x10DE, "GRID RTX8000P-12" }, + { 0x1E78, 0x1428, 0x10DE, "GRID RTX8000P-16" }, + { 0x1E78, 0x1429, 0x10DE, "GRID RTX8000P-24" }, + { 0x1E78, 0x142a, 0x10DE, "GRID RTX8000P-48" }, + { 0x1E78, 0x142b, 0x10DE, "GRID RTX8000P-4C" }, + { 0x1E78, 0x142c, 0x10DE, "GRID RTX8000P-6C" }, + { 0x1E78, 0x142d, 0x10DE, "GRID RTX8000P-8C" }, + { 0x1E78, 0x142e, 0x10DE, "GRID RTX8000P-12C" }, + { 0x1E78, 0x142f, 0x10DE, "GRID RTX8000P-16C" }, + { 0x1E78, 0x1430, 0x10DE, "GRID RTX8000P-24C" }, + { 0x1E78, 0x1431, 0x10DE, "GRID RTX8000P-48C" }, + { 0x1E78, 0x1436, 0x10DE, "GRID RTX8000P-16A" }, + { 0x1EB8, 0x1309, 0x10DE, "GRID T4-1B" }, + { 0x1EB8, 0x130a, 0x10DE, "GRID T4-2B" }, + { 0x1EB8, 0x130b, 0x10DE, "GRID T4-2B4" }, + { 0x1EB8, 0x130c, 0x10DE, "GRID T4-1Q" }, + { 0x1EB8, 0x130d, 0x10DE, "GRID T4-2Q" }, + { 0x1EB8, 0x130e, 0x10DE, "GRID T4-4Q" }, + { 0x1EB8, 0x130f, 0x10DE, "GRID T4-8Q" }, + { 0x1EB8, 0x1310, 0x10DE, "GRID T4-16Q" }, + { 0x1EB8, 0x1311, 0x10DE, "GRID T4-1A" }, + { 0x1EB8, 0x1312, 0x10DE, "GRID T4-2A" }, + { 0x1EB8, 0x1313, 0x10DE, "GRID T4-4A" }, + { 0x1EB8, 0x1314, 0x10DE, "GRID T4-8A" }, + { 0x1EB8, 0x1315, 0x10DE, "GRID T4-16A" }, + { 0x1EB8, 0x1345, 0x10DE, "GRID T4-1B4" }, + { 0x1EB8, 0x1367, 0x10DE, "GRID RTX T4-4" }, + { 0x1EB8, 0x1368, 0x10DE, "GRID RTX T4-8" }, + { 0x1EB8, 0x1369, 0x10DE, "GRID RTX T4-16" }, + { 0x1EB8, 0x1375, 0x10DE, "GRID T4-16C" }, + { 0x1EB8, 0x139a, 0x10DE, "GRID T4-4C" }, + { 0x1EB8, 0x139b, 0x10DE, "GRID T4-8C" }, + { 0x20B0, 0x146f, 0x10DE, "GRID A100X-1-5C" }, + { 0x20B0, 0x1470, 0x10DE, "GRID A100X-2-10C" }, + { 0x20B0, 0x1471, 0x10DE, "GRID A100X-3-20C" }, + { 0x20B0, 0x1472, 0x10DE, "GRID A100X-4-20C" }, + { 0x20B0, 0x1473, 0x10DE, "GRID A100X-7-40C" }, + { 0x20B0, 0x1474, 0x10DE, "GRID A100X-4C" }, + { 0x20B0, 0x1475, 0x10DE, "GRID A100X-5C" }, + { 0x20B0, 0x1476, 0x10DE, "GRID A100X-8C" }, + { 0x20B0, 0x1477, 0x10DE, "GRID A100X-10C" }, + { 0x20B0, 0x1478, 0x10DE, "GRID A100X-20C" }, + { 0x20B0, 0x1479, 0x10DE, "GRID A100X-40C" }, + { 0x20B0, 0x160c, 0x10DE, "GRID A100X-1-5CME" }, + { 0x20B0, 0x1840, 0x10DE, "GRID A100X-1-10C" }, + { 0x20B2, 0x1523, 0x10DE, "GRID A100DX-1-10C" }, + { 0x20B2, 0x1524, 0x10DE, "GRID A100DX-2-20C" }, + { 0x20B2, 0x1525, 0x10DE, "GRID A100DX-3-40C" }, + { 0x20B2, 0x1526, 0x10DE, "GRID A100DX-4-40C" }, + { 0x20B2, 0x1527, 0x10DE, "GRID A100DX-7-80C" }, + { 0x20B2, 0x1528, 0x10DE, "GRID A100DX-4C" }, + { 0x20B2, 0x1529, 0x10DE, "GRID A100DX-8C" }, + { 0x20B2, 0x152a, 0x10DE, "GRID A100DX-10C" }, + { 0x20B2, 0x152b, 0x10DE, "GRID A100DX-16C" }, + { 0x20B2, 0x152c, 0x10DE, "GRID A100DX-20C" }, + { 0x20B2, 0x152d, 0x10DE, "GRID A100DX-40C" }, + { 0x20B2, 0x152e, 0x10DE, "GRID A100DX-80C" }, + { 0x20B2, 0x160d, 0x10DE, "GRID A100DX-1-10CME" }, + { 0x20B2, 0x1841, 0x10DE, "GRID A100DX-1-20C" }, + { 0x20B5, 0x1591, 0x10DE, "GRID A100D-1-10C" }, + { 0x20B5, 0x1592, 0x10DE, "GRID A100D-2-20C" }, + { 0x20B5, 0x1593, 0x10DE, "GRID A100D-3-40C" }, + { 0x20B5, 0x1594, 0x10DE, "GRID A100D-4-40C" }, + { 0x20B5, 0x1595, 0x10DE, "GRID A100D-7-80C" }, + { 0x20B5, 0x1596, 0x10DE, "GRID A100D-4C" }, + { 0x20B5, 0x1597, 0x10DE, "GRID A100D-8C" }, + { 0x20B5, 0x1598, 0x10DE, "GRID A100D-10C" }, + { 0x20B5, 0x1599, 0x10DE, "GRID A100D-16C" }, + { 0x20B5, 0x159a, 0x10DE, "GRID A100D-20C" }, + { 0x20B5, 0x159b, 0x10DE, "GRID A100D-40C" }, + { 0x20B5, 0x159c, 0x10DE, "GRID A100D-80C" }, + { 0x20B5, 0x160f, 0x10DE, "GRID A100D-1-10CME" }, + { 0x20B5, 0x183e, 0x10DE, "GRID A100D-1-20C" }, + { 0x20B7, 0x1589, 0x10DE, "NVIDIA A30-1-6C" }, + { 0x20B7, 0x158a, 0x10DE, "NVIDIA A30-2-12C" }, + { 0x20B7, 0x158b, 0x10DE, "NVIDIA A30-4-24C" }, + { 0x20B7, 0x158c, 0x10DE, "NVIDIA A30-4C" }, + { 0x20B7, 0x158d, 0x10DE, "NVIDIA A30-6C" }, + { 0x20B7, 0x158e, 0x10DE, "NVIDIA A30-8C" }, + { 0x20B7, 0x158f, 0x10DE, "NVIDIA A30-12C" }, + { 0x20B7, 0x1590, 0x10DE, "NVIDIA A30-24C" }, + { 0x20B7, 0x1610, 0x10DE, "NVIDIA A30-1-6CME" }, + { 0x20B7, 0x183c, 0x10DE, "NVIDIA A30-2-12CME" }, + { 0x20F1, 0x1493, 0x10DE, "GRID A100-1-5C" }, + { 0x20F1, 0x1494, 0x10DE, "GRID A100-2-10C" }, + { 0x20F1, 0x1495, 0x10DE, "GRID A100-3-20C" }, + { 0x20F1, 0x1496, 0x10DE, "GRID A100-4-20C" }, + { 0x20F1, 0x1497, 0x10DE, "GRID A100-7-40C" }, + { 0x20F1, 0x1498, 0x10DE, "GRID A100-4C" }, + { 0x20F1, 0x1499, 0x10DE, "GRID A100-5C" }, + { 0x20F1, 0x149a, 0x10DE, "GRID A100-8C" }, + { 0x20F1, 0x149b, 0x10DE, "GRID A100-10C" }, + { 0x20F1, 0x149c, 0x10DE, "GRID A100-20C" }, + { 0x20F1, 0x149d, 0x10DE, "GRID A100-40C" }, + { 0x20F1, 0x160e, 0x10DE, "GRID A100-1-5CME" }, + { 0x20F1, 0x183d, 0x10DE, "GRID A100-1-10C" }, + { 0x20F3, 0x17b2, 0x10DE, "GRID A800DX-1-10CME" }, + { 0x20F3, 0x17b3, 0x10DE, "GRID A800DX-1-10C" }, + { 0x20F3, 0x17b4, 0x10DE, "GRID A800DX-2-20C" }, + { 0x20F3, 0x17b5, 0x10DE, "GRID A800DX-3-40C" }, + { 0x20F3, 0x17b6, 0x10DE, "GRID A800DX-4-40C" }, + { 0x20F3, 0x17b7, 0x10DE, "GRID A800DX-7-80C" }, + { 0x20F3, 0x17b8, 0x10DE, "GRID A800DX-4C" }, + { 0x20F3, 0x17b9, 0x10DE, "GRID A800DX-8C" }, + { 0x20F3, 0x17ba, 0x10DE, "GRID A800DX-10C" }, + { 0x20F3, 0x17bb, 0x10DE, "GRID A800DX-16C" }, + { 0x20F3, 0x17bc, 0x10DE, "GRID A800DX-20C" }, + { 0x20F3, 0x17bd, 0x10DE, "GRID A800DX-40C" }, + { 0x20F3, 0x17be, 0x10DE, "GRID A800DX-80C" }, + { 0x20F3, 0x1842, 0x10DE, "GRID A800DX-1-20C" }, + { 0x20F5, 0x17bf, 0x10DE, "GRID A800D-1-10CME" }, + { 0x20F5, 0x17c0, 0x10DE, "GRID A800D-1-10C" }, + { 0x20F5, 0x17c1, 0x10DE, "GRID A800D-2-20C" }, + { 0x20F5, 0x17c2, 0x10DE, "GRID A800D-3-40C" }, + { 0x20F5, 0x17c3, 0x10DE, "GRID A800D-4-40C" }, + { 0x20F5, 0x17c4, 0x10DE, "GRID A800D-7-80C" }, + { 0x20F5, 0x17c5, 0x10DE, "GRID A800D-4C" }, + { 0x20F5, 0x17c6, 0x10DE, "GRID A800D-8C" }, + { 0x20F5, 0x17c7, 0x10DE, "GRID A800D-10C" }, + { 0x20F5, 0x17c8, 0x10DE, "GRID A800D-16C" }, + { 0x20F5, 0x17c9, 0x10DE, "GRID A800D-20C" }, + { 0x20F5, 0x17ca, 0x10DE, "GRID A800D-40C" }, + { 0x20F5, 0x17cb, 0x10DE, "GRID A800D-80C" }, + { 0x20F5, 0x183f, 0x10DE, "GRID A800D-1-20C" }, + { 0x20F6, 0x17cc, 0x10DE, "GRID A800-1-5CME" }, + { 0x20F6, 0x17cd, 0x10DE, "GRID A800-1-5C" }, + { 0x20F6, 0x17ce, 0x10DE, "GRID A800-2-10C" }, + { 0x20F6, 0x17cf, 0x10DE, "GRID A800-3-20C" }, + { 0x20F6, 0x17d0, 0x10DE, "GRID A800-4-20C" }, + { 0x20F6, 0x17d1, 0x10DE, "GRID A800-7-40C" }, + { 0x20F6, 0x17d2, 0x10DE, "GRID A800-4C" }, + { 0x20F6, 0x17d3, 0x10DE, "GRID A800-5C" }, + { 0x20F6, 0x17d4, 0x10DE, "GRID A800-8C" }, + { 0x20F6, 0x17d5, 0x10DE, "GRID A800-10C" }, + { 0x20F6, 0x17d6, 0x10DE, "GRID A800-20C" }, + { 0x20F6, 0x17d7, 0x10DE, "GRID A800-40C" }, + { 0x20F6, 0x1843, 0x10DE, "GRID A800-1-10C" }, + { 0x2230, 0x14fa, 0x10DE, "NVIDIA RTXA6000-1B" }, + { 0x2230, 0x14fb, 0x10DE, "NVIDIA RTXA6000-2B" }, + { 0x2230, 0x14fc, 0x10DE, "NVIDIA RTXA6000-1Q" }, + { 0x2230, 0x14fd, 0x10DE, "NVIDIA RTXA6000-2Q" }, + { 0x2230, 0x14fe, 0x10DE, "NVIDIA RTXA6000-3Q" }, + { 0x2230, 0x14ff, 0x10DE, "NVIDIA RTXA6000-4Q" }, + { 0x2230, 0x1500, 0x10DE, "NVIDIA RTXA6000-6Q" }, + { 0x2230, 0x1501, 0x10DE, "NVIDIA RTXA6000-8Q" }, + { 0x2230, 0x1502, 0x10DE, "NVIDIA RTXA6000-12Q" }, + { 0x2230, 0x1503, 0x10DE, "NVIDIA RTXA6000-16Q" }, + { 0x2230, 0x1504, 0x10DE, "NVIDIA RTXA6000-24Q" }, + { 0x2230, 0x1505, 0x10DE, "NVIDIA RTXA6000-48Q" }, + { 0x2230, 0x1506, 0x10DE, "NVIDIA RTXA6000-1A" }, + { 0x2230, 0x1507, 0x10DE, "NVIDIA RTXA6000-2A" }, + { 0x2230, 0x1508, 0x10DE, "NVIDIA RTXA6000-3A" }, + { 0x2230, 0x1509, 0x10DE, "NVIDIA RTXA6000-4A" }, + { 0x2230, 0x150a, 0x10DE, "NVIDIA RTXA6000-6A" }, + { 0x2230, 0x150b, 0x10DE, "NVIDIA RTXA6000-8A" }, + { 0x2230, 0x150c, 0x10DE, "NVIDIA RTXA6000-12A" }, + { 0x2230, 0x150d, 0x10DE, "NVIDIA RTXA6000-16A" }, + { 0x2230, 0x150e, 0x10DE, "NVIDIA RTXA6000-24A" }, + { 0x2230, 0x150f, 0x10DE, "NVIDIA RTXA6000-48A" }, + { 0x2230, 0x1510, 0x10DE, "NVIDIA RTXA6000-12" }, + { 0x2230, 0x1511, 0x10DE, "NVIDIA RTXA6000-16" }, + { 0x2230, 0x1512, 0x10DE, "NVIDIA RTXA6000-24" }, + { 0x2230, 0x1513, 0x10DE, "NVIDIA RTXA6000-48" }, + { 0x2230, 0x1514, 0x10DE, "NVIDIA RTXA6000-4C" }, + { 0x2230, 0x1515, 0x10DE, "NVIDIA RTXA6000-6C" }, + { 0x2230, 0x1516, 0x10DE, "NVIDIA RTXA6000-8C" }, + { 0x2230, 0x1517, 0x10DE, "NVIDIA RTXA6000-12C" }, + { 0x2230, 0x1518, 0x10DE, "NVIDIA RTXA6000-16C" }, + { 0x2230, 0x1519, 0x10DE, "NVIDIA RTXA6000-24C" }, + { 0x2230, 0x151a, 0x10DE, "NVIDIA RTXA6000-48C" }, + { 0x2231, 0x1562, 0x10DE, "NVIDIA RTXA5000-1B" }, + { 0x2231, 0x1563, 0x10DE, "NVIDIA RTXA5000-2B" }, + { 0x2231, 0x1564, 0x10DE, "NVIDIA RTXA5000-1Q" }, + { 0x2231, 0x1565, 0x10DE, "NVIDIA RTXA5000-2Q" }, + { 0x2231, 0x1566, 0x10DE, "NVIDIA RTXA5000-3Q" }, + { 0x2231, 0x1567, 0x10DE, "NVIDIA RTXA5000-4Q" }, + { 0x2231, 0x1568, 0x10DE, "NVIDIA RTXA5000-6Q" }, + { 0x2231, 0x1569, 0x10DE, "NVIDIA RTXA5000-8Q" }, + { 0x2231, 0x156a, 0x10DE, "NVIDIA RTXA5000-12Q" }, + { 0x2231, 0x156b, 0x10DE, "NVIDIA RTXA5000-24Q" }, + { 0x2231, 0x156c, 0x10DE, "NVIDIA RTXA5000-1A" }, + { 0x2231, 0x156d, 0x10DE, "NVIDIA RTXA5000-2A" }, + { 0x2231, 0x156e, 0x10DE, "NVIDIA RTXA5000-3A" }, + { 0x2231, 0x156f, 0x10DE, "NVIDIA RTXA5000-4A" }, + { 0x2231, 0x1570, 0x10DE, "NVIDIA RTXA5000-6A" }, + { 0x2231, 0x1571, 0x10DE, "NVIDIA RTXA5000-8A" }, + { 0x2231, 0x1572, 0x10DE, "NVIDIA RTXA5000-12A" }, + { 0x2231, 0x1573, 0x10DE, "NVIDIA RTXA5000-24A" }, + { 0x2231, 0x1574, 0x10DE, "NVIDIA RTXA5000-6" }, + { 0x2231, 0x1575, 0x10DE, "NVIDIA RTXA5000-8" }, + { 0x2231, 0x1576, 0x10DE, "NVIDIA RTXA5000-12" }, + { 0x2231, 0x1577, 0x10DE, "NVIDIA RTXA5000-24" }, + { 0x2231, 0x1578, 0x10DE, "NVIDIA RTXA5000-4C" }, + { 0x2231, 0x1579, 0x10DE, "NVIDIA RTXA5000-6C" }, + { 0x2231, 0x157a, 0x10DE, "NVIDIA RTXA5000-8C" }, + { 0x2231, 0x157b, 0x10DE, "NVIDIA RTXA5000-12C" }, + { 0x2231, 0x157c, 0x10DE, "NVIDIA RTXA5000-24C" }, + { 0x2233, 0x165c, 0x10DE, "NVIDIA RTXA5500-1B" }, + { 0x2233, 0x165d, 0x10DE, "NVIDIA RTXA5500-2B" }, + { 0x2233, 0x165e, 0x10DE, "NVIDIA RTXA5500-1Q" }, + { 0x2233, 0x165f, 0x10DE, "NVIDIA RTXA5500-2Q" }, + { 0x2233, 0x1660, 0x10DE, "NVIDIA RTXA5500-3Q" }, + { 0x2233, 0x1661, 0x10DE, "NVIDIA RTXA5500-4Q" }, + { 0x2233, 0x1662, 0x10DE, "NVIDIA RTXA5500-6Q" }, + { 0x2233, 0x1663, 0x10DE, "NVIDIA RTXA5500-8Q" }, + { 0x2233, 0x1664, 0x10DE, "NVIDIA RTXA5500-12Q" }, + { 0x2233, 0x1665, 0x10DE, "NVIDIA RTXA5500-24Q" }, + { 0x2233, 0x1666, 0x10DE, "NVIDIA RTXA5500-1A" }, + { 0x2233, 0x1667, 0x10DE, "NVIDIA RTXA5500-2A" }, + { 0x2233, 0x1668, 0x10DE, "NVIDIA RTXA5500-3A" }, + { 0x2233, 0x1669, 0x10DE, "NVIDIA RTXA5500-4A" }, + { 0x2233, 0x166a, 0x10DE, "NVIDIA RTXA5500-6A" }, + { 0x2233, 0x166b, 0x10DE, "NVIDIA RTXA5500-8A" }, + { 0x2233, 0x166c, 0x10DE, "NVIDIA RTXA5500-12A" }, + { 0x2233, 0x166d, 0x10DE, "NVIDIA RTXA5500-24A" }, + { 0x2233, 0x166e, 0x10DE, "NVIDIA RTXA5500-6" }, + { 0x2233, 0x166f, 0x10DE, "NVIDIA RTXA5500-8" }, + { 0x2233, 0x1670, 0x10DE, "NVIDIA RTXA5500-12" }, + { 0x2233, 0x1671, 0x10DE, "NVIDIA RTXA5500-24" }, + { 0x2233, 0x1672, 0x10DE, "NVIDIA RTXA5500-4C" }, + { 0x2233, 0x1673, 0x10DE, "NVIDIA RTXA5500-6C" }, + { 0x2233, 0x1674, 0x10DE, "NVIDIA RTXA5500-8C" }, + { 0x2233, 0x1675, 0x10DE, "NVIDIA RTXA5500-12C" }, + { 0x2233, 0x1676, 0x10DE, "NVIDIA RTXA5500-24C" }, + { 0x2235, 0x14d5, 0x10DE, "NVIDIA A40-1B" }, + { 0x2235, 0x14d6, 0x10DE, "NVIDIA A40-2B" }, + { 0x2235, 0x14d7, 0x10DE, "NVIDIA A40-1Q" }, + { 0x2235, 0x14d8, 0x10DE, "NVIDIA A40-2Q" }, + { 0x2235, 0x14d9, 0x10DE, "NVIDIA A40-3Q" }, + { 0x2235, 0x14da, 0x10DE, "NVIDIA A40-4Q" }, + { 0x2235, 0x14db, 0x10DE, "NVIDIA A40-6Q" }, + { 0x2235, 0x14dc, 0x10DE, "NVIDIA A40-8Q" }, + { 0x2235, 0x14dd, 0x10DE, "NVIDIA A40-12Q" }, + { 0x2235, 0x14de, 0x10DE, "NVIDIA A40-16Q" }, + { 0x2235, 0x14df, 0x10DE, "NVIDIA A40-24Q" }, + { 0x2235, 0x14e0, 0x10DE, "NVIDIA A40-48Q" }, + { 0x2235, 0x14e1, 0x10DE, "NVIDIA A40-1A" }, + { 0x2235, 0x14e2, 0x10DE, "NVIDIA A40-2A" }, + { 0x2235, 0x14e3, 0x10DE, "NVIDIA A40-3A" }, + { 0x2235, 0x14e4, 0x10DE, "NVIDIA A40-4A" }, + { 0x2235, 0x14e5, 0x10DE, "NVIDIA A40-6A" }, + { 0x2235, 0x14e6, 0x10DE, "NVIDIA A40-8A" }, + { 0x2235, 0x14e7, 0x10DE, "NVIDIA A40-12A" }, + { 0x2235, 0x14e8, 0x10DE, "NVIDIA A40-16A" }, + { 0x2235, 0x14e9, 0x10DE, "NVIDIA A40-24A" }, + { 0x2235, 0x14ea, 0x10DE, "NVIDIA A40-48A" }, + { 0x2235, 0x14eb, 0x10DE, "NVIDIA A40-12" }, + { 0x2235, 0x14ec, 0x10DE, "NVIDIA A40-16" }, + { 0x2235, 0x14ed, 0x10DE, "NVIDIA A40-24" }, + { 0x2235, 0x14ee, 0x10DE, "NVIDIA A40-48" }, + { 0x2235, 0x14f3, 0x10DE, "NVIDIA A40-4C" }, + { 0x2235, 0x14f4, 0x10DE, "NVIDIA A40-6C" }, + { 0x2235, 0x14f5, 0x10DE, "NVIDIA A40-8C" }, + { 0x2235, 0x14f6, 0x10DE, "NVIDIA A40-12C" }, + { 0x2235, 0x14f7, 0x10DE, "NVIDIA A40-16C" }, + { 0x2235, 0x14f8, 0x10DE, "NVIDIA A40-24C" }, + { 0x2235, 0x14f9, 0x10DE, "NVIDIA A40-48C" }, + { 0x2235, 0x1684, 0x10DE, "NVIDIA A40-2" }, + { 0x2235, 0x1685, 0x10DE, "NVIDIA A40-3" }, + { 0x2235, 0x1686, 0x10DE, "NVIDIA A40-4" }, + { 0x2235, 0x1687, 0x10DE, "NVIDIA A40-6" }, + { 0x2235, 0x1688, 0x10DE, "NVIDIA A40-8" }, + { 0x2235, 0x16e7, 0x10DE, "NVIDIA A40-1" }, + { 0x2236, 0x14b6, 0x10DE, "NVIDIA A10-1B" }, + { 0x2236, 0x14b7, 0x10DE, "NVIDIA A10-2B" }, + { 0x2236, 0x14b8, 0x10DE, "NVIDIA A10-1Q" }, + { 0x2236, 0x14b9, 0x10DE, "NVIDIA A10-2Q" }, + { 0x2236, 0x14ba, 0x10DE, "NVIDIA A10-3Q" }, + { 0x2236, 0x14bb, 0x10DE, "NVIDIA A10-4Q" }, + { 0x2236, 0x14bc, 0x10DE, "NVIDIA A10-6Q" }, + { 0x2236, 0x14bd, 0x10DE, "NVIDIA A10-8Q" }, + { 0x2236, 0x14be, 0x10DE, "NVIDIA A10-12Q" }, + { 0x2236, 0x14bf, 0x10DE, "NVIDIA A10-24Q" }, + { 0x2236, 0x14c0, 0x10DE, "NVIDIA A10-1A" }, + { 0x2236, 0x14c1, 0x10DE, "NVIDIA A10-2A" }, + { 0x2236, 0x14c2, 0x10DE, "NVIDIA A10-3A" }, + { 0x2236, 0x14c3, 0x10DE, "NVIDIA A10-4A" }, + { 0x2236, 0x14c4, 0x10DE, "NVIDIA A10-6A" }, + { 0x2236, 0x14c5, 0x10DE, "NVIDIA A10-8A" }, + { 0x2236, 0x14c6, 0x10DE, "NVIDIA A10-12A" }, + { 0x2236, 0x14c7, 0x10DE, "NVIDIA A10-24A" }, + { 0x2236, 0x14c8, 0x10DE, "NVIDIA A10-6" }, + { 0x2236, 0x14c9, 0x10DE, "NVIDIA A10-8" }, + { 0x2236, 0x14ca, 0x10DE, "NVIDIA A10-12" }, + { 0x2236, 0x14cb, 0x10DE, "NVIDIA A10-24" }, + { 0x2236, 0x14d0, 0x10DE, "NVIDIA A10-4C" }, + { 0x2236, 0x14d1, 0x10DE, "NVIDIA A10-6C" }, + { 0x2236, 0x14d2, 0x10DE, "NVIDIA A10-8C" }, + { 0x2236, 0x14d3, 0x10DE, "NVIDIA A10-12C" }, + { 0x2236, 0x14d4, 0x10DE, "NVIDIA A10-24C" }, + { 0x2236, 0x167e, 0x10DE, "NVIDIA A10-2" }, + { 0x2236, 0x167f, 0x10DE, "NVIDIA A10-3" }, + { 0x2236, 0x1680, 0x10DE, "NVIDIA A10-4" }, + { 0x2236, 0x16e8, 0x10DE, "NVIDIA A10-1" }, + { 0x2237, 0x155a, 0x10DE, "NVIDIA A10G-1" }, + { 0x2237, 0x155b, 0x10DE, "NVIDIA A10G-2" }, + { 0x2237, 0x155c, 0x10DE, "NVIDIA A10G-3" }, + { 0x2237, 0x155d, 0x10DE, "NVIDIA A10G-4" }, + { 0x2237, 0x155e, 0x10DE, "NVIDIA A10G-6" }, + { 0x2237, 0x155f, 0x10DE, "NVIDIA A10G-8" }, + { 0x2237, 0x1560, 0x10DE, "NVIDIA A10G-12" }, + { 0x2237, 0x1561, 0x10DE, "NVIDIA A10G-24" }, + { 0x2237, 0x162a, 0x10DE, "NVIDIA A10G-1B" }, + { 0x2237, 0x162b, 0x10DE, "NVIDIA A10G-2B" }, + { 0x2237, 0x162c, 0x10DE, "NVIDIA A10G-1Q" }, + { 0x2237, 0x162d, 0x10DE, "NVIDIA A10G-2Q" }, + { 0x2237, 0x162e, 0x10DE, "NVIDIA A10G-3Q" }, + { 0x2237, 0x162f, 0x10DE, "NVIDIA A10G-4Q" }, + { 0x2237, 0x1630, 0x10DE, "NVIDIA A10G-6Q" }, + { 0x2237, 0x1631, 0x10DE, "NVIDIA A10G-8Q" }, + { 0x2237, 0x1632, 0x10DE, "NVIDIA A10G-12Q" }, + { 0x2237, 0x1633, 0x10DE, "NVIDIA A10G-24Q" }, + { 0x2237, 0x1634, 0x10DE, "NVIDIA A10G-1A" }, + { 0x2237, 0x1635, 0x10DE, "NVIDIA A10G-2A" }, + { 0x2237, 0x1636, 0x10DE, "NVIDIA A10G-3A" }, + { 0x2237, 0x1637, 0x10DE, "NVIDIA A10G-4A" }, + { 0x2237, 0x1638, 0x10DE, "NVIDIA A10G-6A" }, + { 0x2237, 0x1639, 0x10DE, "NVIDIA A10G-8A" }, + { 0x2237, 0x163a, 0x10DE, "NVIDIA A10G-12A" }, + { 0x2237, 0x163b, 0x10DE, "NVIDIA A10G-24A" }, + { 0x2237, 0x1810, 0x10DE, "NVIDIA GeForce RTX 3050" }, + { 0x2237, 0x1811, 0x10DE, "NVIDIA GeForce RTX 3060" }, + { 0x2238, 0x16a3, 0x10DE, "NVIDIA A10M-1B" }, + { 0x2238, 0x16a4, 0x10DE, "NVIDIA A10M-2B" }, + { 0x2238, 0x16a5, 0x10DE, "NVIDIA A10M-1Q" }, + { 0x2238, 0x16a6, 0x10DE, "NVIDIA A10M-2Q" }, + { 0x2238, 0x16a7, 0x10DE, "NVIDIA A10M-4Q" }, + { 0x2238, 0x16a8, 0x10DE, "NVIDIA A10M-5Q" }, + { 0x2238, 0x16a9, 0x10DE, "NVIDIA A10M-10Q" }, + { 0x2238, 0x16aa, 0x10DE, "NVIDIA A10M-20Q" }, + { 0x2238, 0x16ab, 0x10DE, "NVIDIA A10M-1A" }, + { 0x2238, 0x16ac, 0x10DE, "NVIDIA A10M-2A" }, + { 0x2238, 0x16ad, 0x10DE, "NVIDIA A10M-4A" }, + { 0x2238, 0x16ae, 0x10DE, "NVIDIA A10M-5A" }, + { 0x2238, 0x16af, 0x10DE, "NVIDIA A10M-10A" }, + { 0x2238, 0x16b0, 0x10DE, "NVIDIA A10M-20A" }, + { 0x2238, 0x16b1, 0x10DE, "NVIDIA A10M-2" }, + { 0x2238, 0x16b2, 0x10DE, "NVIDIA A10M-4" }, + { 0x2238, 0x16b3, 0x10DE, "NVIDIA A10M-5" }, + { 0x2238, 0x16b4, 0x10DE, "NVIDIA A10M-10" }, + { 0x2238, 0x16b5, 0x10DE, "NVIDIA A10M-20" }, + { 0x2238, 0x16b6, 0x10DE, "NVIDIA A10M-4C" }, + { 0x2238, 0x16b7, 0x10DE, "NVIDIA A10M-5C" }, + { 0x2238, 0x16b8, 0x10DE, "NVIDIA A10M-10C" }, + { 0x2238, 0x16b9, 0x10DE, "NVIDIA A10M-20C" }, + { 0x2238, 0x16e6, 0x10DE, "NVIDIA A10M-1" }, + { 0x2321, 0x1853, 0x10DE, "NVIDIA H100L-1-12CME" }, + { 0x2321, 0x1854, 0x10DE, "NVIDIA H100L-1-12C" }, + { 0x2321, 0x1855, 0x10DE, "NVIDIA H100L-1-24C" }, + { 0x2321, 0x1856, 0x10DE, "NVIDIA H100L-2-24C" }, + { 0x2321, 0x1857, 0x10DE, "NVIDIA H100L-3-47C" }, + { 0x2321, 0x1858, 0x10DE, "NVIDIA H100L-4-47C" }, + { 0x2321, 0x1859, 0x10DE, "NVIDIA H100L-7-94C" }, + { 0x2321, 0x185a, 0x10DE, "NVIDIA H100L-4C" }, + { 0x2321, 0x185b, 0x10DE, "NVIDIA H100L-6C" }, + { 0x2321, 0x185c, 0x10DE, "NVIDIA H100L-11C" }, + { 0x2321, 0x185d, 0x10DE, "NVIDIA H100L-15C" }, + { 0x2321, 0x185e, 0x10DE, "NVIDIA H100L-23C" }, + { 0x2321, 0x185f, 0x10DE, "NVIDIA H100L-47C" }, + { 0x2321, 0x1860, 0x10DE, "NVIDIA H100L-94C" }, + { 0x2322, 0x17e2, 0x10DE, "NVIDIA H800-1-10CME" }, + { 0x2322, 0x17e3, 0x10DE, "NVIDIA H800-1-10C" }, + { 0x2322, 0x17e4, 0x10DE, "NVIDIA H800-2-20C" }, + { 0x2322, 0x17e5, 0x10DE, "NVIDIA H800-3-40C" }, + { 0x2322, 0x17e6, 0x10DE, "NVIDIA H800-4-40C" }, + { 0x2322, 0x17e7, 0x10DE, "NVIDIA H800-7-80C" }, + { 0x2322, 0x17e8, 0x10DE, "NVIDIA H800-4C" }, + { 0x2322, 0x17e9, 0x10DE, "NVIDIA H800-5C" }, + { 0x2322, 0x17ea, 0x10DE, "NVIDIA H800-8C" }, + { 0x2322, 0x17eb, 0x10DE, "NVIDIA H800-10C" }, + { 0x2322, 0x17ec, 0x10DE, "NVIDIA H800-16C" }, + { 0x2322, 0x17ed, 0x10DE, "NVIDIA H800-20C" }, + { 0x2322, 0x17ee, 0x10DE, "NVIDIA H800-40C" }, + { 0x2322, 0x17ef, 0x10DE, "NVIDIA H800-80C" }, + { 0x2322, 0x1845, 0x10DE, "NVIDIA H800-1-20C" }, + { 0x2324, 0x18d5, 0x10DE, "NVIDIA H800XM-1-10CME" }, + { 0x2324, 0x18d6, 0x10DE, "NVIDIA H800XM-1-10C" }, + { 0x2324, 0x18d7, 0x10DE, "NVIDIA H800XM-1-20C" }, + { 0x2324, 0x18d8, 0x10DE, "NVIDIA H800XM-2-20C" }, + { 0x2324, 0x18d9, 0x10DE, "NVIDIA H800XM-3-40C" }, + { 0x2324, 0x18da, 0x10DE, "NVIDIA H800XM-4-40C" }, + { 0x2324, 0x18db, 0x10DE, "NVIDIA H800XM-7-80C" }, + { 0x2324, 0x18dc, 0x10DE, "NVIDIA H800XM-4C" }, + { 0x2324, 0x18dd, 0x10DE, "NVIDIA H800XM-5C" }, + { 0x2324, 0x18de, 0x10DE, "NVIDIA H800XM-8C" }, + { 0x2324, 0x18df, 0x10DE, "NVIDIA H800XM-10C" }, + { 0x2324, 0x18e0, 0x10DE, "NVIDIA H800XM-16C" }, + { 0x2324, 0x18e1, 0x10DE, "NVIDIA H800XM-20C" }, + { 0x2324, 0x18e2, 0x10DE, "NVIDIA H800XM-40C" }, + { 0x2324, 0x18e3, 0x10DE, "NVIDIA H800XM-80C" }, + { 0x2329, 0x2028, 0x10DE, "NVIDIA H20-1-12CME" }, + { 0x2329, 0x2029, 0x10DE, "NVIDIA H20-1-12C" }, + { 0x2329, 0x202a, 0x10DE, "NVIDIA H20-1-24C" }, + { 0x2329, 0x202b, 0x10DE, "NVIDIA H20-2-24C" }, + { 0x2329, 0x202c, 0x10DE, "NVIDIA H20-3-48C" }, + { 0x2329, 0x202d, 0x10DE, "NVIDIA H20-4-48C" }, + { 0x2329, 0x202e, 0x10DE, "NVIDIA H20-7-96C" }, + { 0x2329, 0x202f, 0x10DE, "NVIDIA H20-4C" }, + { 0x2329, 0x2030, 0x10DE, "NVIDIA H20-6C" }, + { 0x2329, 0x2031, 0x10DE, "NVIDIA H20-12C" }, + { 0x2329, 0x2032, 0x10DE, "NVIDIA H20-16C" }, + { 0x2329, 0x2033, 0x10DE, "NVIDIA H20-24C" }, + { 0x2329, 0x2034, 0x10DE, "NVIDIA H20-48C" }, + { 0x2329, 0x2035, 0x10DE, "NVIDIA H20-96C" }, + { 0x2329, 0x2047, 0x10DE, "NVIDIA H20-8C" }, + { 0x2329, 0x2048, 0x10DE, "NVIDIA H20-32C" }, + { 0x232C, 0x2108, 0x10DE, "NVIDIA H20X-1-18CME" }, + { 0x232C, 0x2109, 0x10DE, "NVIDIA H20X-1-18C" }, + { 0x232C, 0x210a, 0x10DE, "NVIDIA H20X-1-35C" }, + { 0x232C, 0x210b, 0x10DE, "NVIDIA H20X-2-35C" }, + { 0x232C, 0x210c, 0x10DE, "NVIDIA H20X-3-71C" }, + { 0x232C, 0x210d, 0x10DE, "NVIDIA H20X-4-71C" }, + { 0x232C, 0x210e, 0x10DE, "NVIDIA H20X-7-141C" }, + { 0x232C, 0x210f, 0x10DE, "NVIDIA H20X-4C" }, + { 0x232C, 0x2110, 0x10DE, "NVIDIA H20X-7C" }, + { 0x232C, 0x2111, 0x10DE, "NVIDIA H20X-8C" }, + { 0x232C, 0x2112, 0x10DE, "NVIDIA H20X-14C" }, + { 0x232C, 0x2113, 0x10DE, "NVIDIA H20X-17C" }, + { 0x232C, 0x2114, 0x10DE, "NVIDIA H20X-28C" }, + { 0x232C, 0x2115, 0x10DE, "NVIDIA H20X-35C" }, + { 0x232C, 0x2116, 0x10DE, "NVIDIA H20X-70C" }, + { 0x232C, 0x2117, 0x10DE, "NVIDIA H20X-141C" }, + { 0x2330, 0x187a, 0x10DE, "NVIDIA H100XM-1-10CME" }, + { 0x2330, 0x187b, 0x10DE, "NVIDIA H100XM-1-10C" }, + { 0x2330, 0x187c, 0x10DE, "NVIDIA H100XM-1-20C" }, + { 0x2330, 0x187d, 0x10DE, "NVIDIA H100XM-2-20C" }, + { 0x2330, 0x187e, 0x10DE, "NVIDIA H100XM-3-40C" }, + { 0x2330, 0x187f, 0x10DE, "NVIDIA H100XM-4-40C" }, + { 0x2330, 0x1880, 0x10DE, "NVIDIA H100XM-7-80C" }, + { 0x2330, 0x1881, 0x10DE, "NVIDIA H100XM-4C" }, + { 0x2330, 0x1882, 0x10DE, "NVIDIA H100XM-5C" }, + { 0x2330, 0x1883, 0x10DE, "NVIDIA H100XM-8C" }, + { 0x2330, 0x1884, 0x10DE, "NVIDIA H100XM-10C" }, + { 0x2330, 0x1885, 0x10DE, "NVIDIA H100XM-16C" }, + { 0x2330, 0x1886, 0x10DE, "NVIDIA H100XM-20C" }, + { 0x2330, 0x1887, 0x10DE, "NVIDIA H100XM-40C" }, + { 0x2330, 0x1888, 0x10DE, "NVIDIA H100XM-80C" }, + { 0x2331, 0x16d3, 0x10DE, "NVIDIA H100-1-10C" }, + { 0x2331, 0x16d4, 0x10DE, "NVIDIA H100-2-20C" }, + { 0x2331, 0x16d5, 0x10DE, "NVIDIA H100-3-40C" }, + { 0x2331, 0x16d6, 0x10DE, "NVIDIA H100-4-40C" }, + { 0x2331, 0x16d7, 0x10DE, "NVIDIA H100-7-80C" }, + { 0x2331, 0x16d8, 0x10DE, "NVIDIA H100-4C" }, + { 0x2331, 0x16d9, 0x10DE, "NVIDIA H100-8C" }, + { 0x2331, 0x16da, 0x10DE, "NVIDIA H100-10C" }, + { 0x2331, 0x16db, 0x10DE, "NVIDIA H100-16C" }, + { 0x2331, 0x16dc, 0x10DE, "NVIDIA H100-20C" }, + { 0x2331, 0x16dd, 0x10DE, "NVIDIA H100-40C" }, + { 0x2331, 0x16de, 0x10DE, "NVIDIA H100-80C" }, + { 0x2331, 0x1798, 0x10DE, "NVIDIA H100-5C" }, + { 0x2331, 0x17f0, 0x10DE, "NVIDIA H100-1-10CME" }, + { 0x2331, 0x1844, 0x10DE, "NVIDIA H100-1-20C" }, + { 0x2335, 0x206e, 0x10DE, "NVIDIA H200X-1-18CME" }, + { 0x2335, 0x206f, 0x10DE, "NVIDIA H200X-1-18C" }, + { 0x2335, 0x2070, 0x10DE, "NVIDIA H200X-1-35C" }, + { 0x2335, 0x2071, 0x10DE, "NVIDIA H200X-2-35C" }, + { 0x2335, 0x2072, 0x10DE, "NVIDIA H200X-3-71C" }, + { 0x2335, 0x2073, 0x10DE, "NVIDIA H200X-4-71C" }, + { 0x2335, 0x2074, 0x10DE, "NVIDIA H200X-7-141C" }, + { 0x2335, 0x2075, 0x10DE, "NVIDIA H200X-4C" }, + { 0x2335, 0x2076, 0x10DE, "NVIDIA H200X-7C" }, + { 0x2335, 0x2077, 0x10DE, "NVIDIA H200X-8C" }, + { 0x2335, 0x2078, 0x10DE, "NVIDIA H200X-14C" }, + { 0x2335, 0x2079, 0x10DE, "NVIDIA H200X-17C" }, + { 0x2335, 0x207a, 0x10DE, "NVIDIA H200X-28C" }, + { 0x2335, 0x207b, 0x10DE, "NVIDIA H200X-35C" }, + { 0x2335, 0x207e, 0x10DE, "NVIDIA H200X-70C" }, + { 0x2335, 0x207f, 0x10DE, "NVIDIA H200X-141C" }, + { 0x2337, 0x18f2, 0x10DE, "NVIDIA H100XS-1-8CME" }, + { 0x2337, 0x18f3, 0x10DE, "NVIDIA H100XS-1-8C" }, + { 0x2337, 0x18f4, 0x10DE, "NVIDIA H100XS-1-16C" }, + { 0x2337, 0x18f5, 0x10DE, "NVIDIA H100XS-2-16C" }, + { 0x2337, 0x18f6, 0x10DE, "NVIDIA H100XS-3-32C" }, + { 0x2337, 0x18f7, 0x10DE, "NVIDIA H100XS-4-32C" }, + { 0x2337, 0x18f8, 0x10DE, "NVIDIA H100XS-7-64C" }, + { 0x2337, 0x18f9, 0x10DE, "NVIDIA H100XS-4C" }, + { 0x2337, 0x18fa, 0x10DE, "NVIDIA H100XS-8C" }, + { 0x2337, 0x18fb, 0x10DE, "NVIDIA H100XS-16C" }, + { 0x2337, 0x18fc, 0x10DE, "NVIDIA H100XS-32C" }, + { 0x2337, 0x18fd, 0x10DE, "NVIDIA H100XS-64C" }, + { 0x2339, 0x18e4, 0x10DE, "NVIDIA H100XL-1-12CME" }, + { 0x2339, 0x18e5, 0x10DE, "NVIDIA H100XL-1-12C" }, + { 0x2339, 0x18e6, 0x10DE, "NVIDIA H100XL-1-24C" }, + { 0x2339, 0x18e7, 0x10DE, "NVIDIA H100XL-2-24C" }, + { 0x2339, 0x18e8, 0x10DE, "NVIDIA H100XL-3-47C" }, + { 0x2339, 0x18e9, 0x10DE, "NVIDIA H100XL-4-47C" }, + { 0x2339, 0x18ea, 0x10DE, "NVIDIA H100XL-7-94C" }, + { 0x2339, 0x18eb, 0x10DE, "NVIDIA H100XL-4C" }, + { 0x2339, 0x18ec, 0x10DE, "NVIDIA H100XL-6C" }, + { 0x2339, 0x18ed, 0x10DE, "NVIDIA H100XL-11C" }, + { 0x2339, 0x18ee, 0x10DE, "NVIDIA H100XL-15C" }, + { 0x2339, 0x18ef, 0x10DE, "NVIDIA H100XL-23C" }, + { 0x2339, 0x18f0, 0x10DE, "NVIDIA H100XL-47C" }, + { 0x2339, 0x18f1, 0x10DE, "NVIDIA H100XL-94C" }, + { 0x233A, 0x1861, 0x10DE, "NVIDIA H800L-1-12CME" }, + { 0x233A, 0x1862, 0x10DE, "NVIDIA H800L-1-12C" }, + { 0x233A, 0x1863, 0x10DE, "NVIDIA H800L-1-24C" }, + { 0x233A, 0x1864, 0x10DE, "NVIDIA H800L-2-24C" }, + { 0x233A, 0x1865, 0x10DE, "NVIDIA H800L-3-47C" }, + { 0x233A, 0x1866, 0x10DE, "NVIDIA H800L-4-47C" }, + { 0x233A, 0x1867, 0x10DE, "NVIDIA H800L-7-94C" }, + { 0x233A, 0x1868, 0x10DE, "NVIDIA H800L-4C" }, + { 0x233A, 0x1869, 0x10DE, "NVIDIA H800L-6C" }, + { 0x233A, 0x186a, 0x10DE, "NVIDIA H800L-11C" }, + { 0x233A, 0x186b, 0x10DE, "NVIDIA H800L-15C" }, + { 0x233A, 0x186c, 0x10DE, "NVIDIA H800L-23C" }, + { 0x233A, 0x186d, 0x10DE, "NVIDIA H800L-47C" }, + { 0x233A, 0x186e, 0x10DE, "NVIDIA H800L-94C" }, + { 0x233B, 0x2081, 0x10DE, "NVIDIA H200-1-18CME" }, + { 0x233B, 0x2082, 0x10DE, "NVIDIA H200-1-18C" }, + { 0x233B, 0x2083, 0x10DE, "NVIDIA H200-1-35C" }, + { 0x233B, 0x2084, 0x10DE, "NVIDIA H200-2-35C" }, + { 0x233B, 0x2085, 0x10DE, "NVIDIA H200-3-71C" }, + { 0x233B, 0x2086, 0x10DE, "NVIDIA H200-4-71C" }, + { 0x233B, 0x2087, 0x10DE, "NVIDIA H200-7-141C" }, + { 0x233B, 0x2088, 0x10DE, "NVIDIA H200-4C" }, + { 0x233B, 0x2089, 0x10DE, "NVIDIA H200-7C" }, + { 0x233B, 0x208a, 0x10DE, "NVIDIA H200-8C" }, + { 0x233B, 0x208b, 0x10DE, "NVIDIA H200-14C" }, + { 0x233B, 0x208c, 0x10DE, "NVIDIA H200-17C" }, + { 0x233B, 0x208d, 0x10DE, "NVIDIA H200-28C" }, + { 0x233B, 0x208e, 0x10DE, "NVIDIA H200-35C" }, + { 0x233B, 0x208f, 0x10DE, "NVIDIA H200-70C" }, + { 0x233B, 0x2090, 0x10DE, "NVIDIA H200-141C" }, + { 0x2342, 0x18c2, 0x10DE, "NVIDIA GH200-1-12CME" }, + { 0x2342, 0x18c3, 0x10DE, "NVIDIA GH200-1-12C" }, + { 0x2342, 0x18c4, 0x10DE, "NVIDIA GH200-1-24C" }, + { 0x2342, 0x18c5, 0x10DE, "NVIDIA GH200-2-24C" }, + { 0x2342, 0x18c6, 0x10DE, "NVIDIA GH200-3-48C" }, + { 0x2342, 0x18c7, 0x10DE, "NVIDIA GH200-4-48C" }, + { 0x2342, 0x18c8, 0x10DE, "NVIDIA GH200-7-96C" }, + { 0x2342, 0x18c9, 0x10DE, "NVIDIA GH200-96C" }, + { 0x2348, 0x20c2, 0x10DE, "NVIDIA GH200L-1-18CME" }, + { 0x2348, 0x20c3, 0x10DE, "NVIDIA GH200L-1-18C" }, + { 0x2348, 0x20c4, 0x10DE, "NVIDIA GH200L-1-36C" }, + { 0x2348, 0x20c5, 0x10DE, "NVIDIA GH200L-2-36C" }, + { 0x2348, 0x20c6, 0x10DE, "NVIDIA GH200L-3-72C" }, + { 0x2348, 0x20c7, 0x10DE, "NVIDIA GH200L-4-72C" }, + { 0x2348, 0x20c8, 0x10DE, "NVIDIA GH200L-7-144C" }, + { 0x2348, 0x20c9, 0x10DE, "NVIDIA GH200L-144C" }, + { 0x25B6, 0x159d, 0x10DE, "NVIDIA A16-1B" }, + { 0x25B6, 0x159e, 0x10DE, "NVIDIA A16-2B" }, + { 0x25B6, 0x159f, 0x10DE, "NVIDIA A16-1Q" }, + { 0x25B6, 0x1600, 0x10DE, "NVIDIA A16-2Q" }, + { 0x25B6, 0x1601, 0x10DE, "NVIDIA A16-4Q" }, + { 0x25B6, 0x1602, 0x10DE, "NVIDIA A16-8Q" }, + { 0x25B6, 0x1603, 0x10DE, "NVIDIA A16-16Q" }, + { 0x25B6, 0x1604, 0x10DE, "NVIDIA A16-1A" }, + { 0x25B6, 0x1605, 0x10DE, "NVIDIA A16-2A" }, + { 0x25B6, 0x1606, 0x10DE, "NVIDIA A16-4A" }, + { 0x25B6, 0x1607, 0x10DE, "NVIDIA A16-8A" }, + { 0x25B6, 0x1608, 0x10DE, "NVIDIA A16-16A" }, + { 0x25B6, 0x1609, 0x10DE, "NVIDIA A16-4C" }, + { 0x25B6, 0x160a, 0x10DE, "NVIDIA A16-8C" }, + { 0x25B6, 0x160b, 0x10DE, "NVIDIA A16-16C" }, + { 0x25B6, 0x1646, 0x10DE, "NVIDIA A2-1B" }, + { 0x25B6, 0x1647, 0x10DE, "NVIDIA A2-2B" }, + { 0x25B6, 0x1648, 0x10DE, "NVIDIA A2-1Q" }, + { 0x25B6, 0x1649, 0x10DE, "NVIDIA A2-2Q" }, + { 0x25B6, 0x164a, 0x10DE, "NVIDIA A2-4Q" }, + { 0x25B6, 0x164b, 0x10DE, "NVIDIA A2-8Q" }, + { 0x25B6, 0x164c, 0x10DE, "NVIDIA A2-16Q" }, + { 0x25B6, 0x164d, 0x10DE, "NVIDIA A2-1A" }, + { 0x25B6, 0x164e, 0x10DE, "NVIDIA A2-2A" }, + { 0x25B6, 0x164f, 0x10DE, "NVIDIA A2-4A" }, + { 0x25B6, 0x1650, 0x10DE, "NVIDIA A2-8A" }, + { 0x25B6, 0x1651, 0x10DE, "NVIDIA A2-16A" }, + { 0x25B6, 0x1652, 0x10DE, "NVIDIA A2-4" }, + { 0x25B6, 0x1653, 0x10DE, "NVIDIA A2-8" }, + { 0x25B6, 0x1654, 0x10DE, "NVIDIA A2-16" }, + { 0x25B6, 0x1655, 0x10DE, "NVIDIA A2-4C" }, + { 0x25B6, 0x1656, 0x10DE, "NVIDIA A2-8C" }, + { 0x25B6, 0x1657, 0x10DE, "NVIDIA A2-16C" }, + { 0x26B1, 0x1708, 0x10DE, "NVIDIA RTX6000-Ada-1B" }, + { 0x26B1, 0x1709, 0x10DE, "NVIDIA RTX6000-Ada-2B" }, + { 0x26B1, 0x170a, 0x10DE, "NVIDIA RTX6000-Ada-1Q" }, + { 0x26B1, 0x170b, 0x10DE, "NVIDIA RTX6000-Ada-2Q" }, + { 0x26B1, 0x170c, 0x10DE, "NVIDIA RTX6000-Ada-3Q" }, + { 0x26B1, 0x170d, 0x10DE, "NVIDIA RTX6000-Ada-4Q" }, + { 0x26B1, 0x170e, 0x10DE, "NVIDIA RTX6000-Ada-6Q" }, + { 0x26B1, 0x170f, 0x10DE, "NVIDIA RTX6000-Ada-8Q" }, + { 0x26B1, 0x1710, 0x10DE, "NVIDIA RTX6000-Ada-12Q" }, + { 0x26B1, 0x1711, 0x10DE, "NVIDIA RTX6000-Ada-16Q" }, + { 0x26B1, 0x1712, 0x10DE, "NVIDIA RTX6000-Ada-24Q" }, + { 0x26B1, 0x1713, 0x10DE, "NVIDIA RTX6000-Ada-48Q" }, + { 0x26B1, 0x1714, 0x10DE, "NVIDIA RTX6000-Ada-1A" }, + { 0x26B1, 0x1715, 0x10DE, "NVIDIA RTX6000-Ada-2A" }, + { 0x26B1, 0x1716, 0x10DE, "NVIDIA RTX6000-Ada-3A" }, + { 0x26B1, 0x1717, 0x10DE, "NVIDIA RTX6000-Ada-4A" }, + { 0x26B1, 0x1718, 0x10DE, "NVIDIA RTX6000-Ada-6A" }, + { 0x26B1, 0x1719, 0x10DE, "NVIDIA RTX6000-Ada-8A" }, + { 0x26B1, 0x171a, 0x10DE, "NVIDIA RTX6000-Ada-12A" }, + { 0x26B1, 0x171b, 0x10DE, "NVIDIA RTX6000-Ada-16A" }, + { 0x26B1, 0x171c, 0x10DE, "NVIDIA RTX6000-Ada-24A" }, + { 0x26B1, 0x171d, 0x10DE, "NVIDIA RTX6000-Ada-48A" }, + { 0x26B1, 0x171e, 0x10DE, "NVIDIA RTX6000-Ada-1" }, + { 0x26B1, 0x171f, 0x10DE, "NVIDIA RTX6000-Ada-2" }, + { 0x26B1, 0x1720, 0x10DE, "NVIDIA RTX6000-Ada-3" }, + { 0x26B1, 0x1721, 0x10DE, "NVIDIA RTX6000-Ada-4" }, + { 0x26B1, 0x1722, 0x10DE, "NVIDIA RTX6000-Ada-6" }, + { 0x26B1, 0x1723, 0x10DE, "NVIDIA RTX6000-Ada-8" }, + { 0x26B1, 0x1724, 0x10DE, "NVIDIA RTX6000-Ada-12" }, + { 0x26B1, 0x1725, 0x10DE, "NVIDIA RTX6000-Ada-16" }, + { 0x26B1, 0x1726, 0x10DE, "NVIDIA RTX6000-Ada-24" }, + { 0x26B1, 0x1727, 0x10DE, "NVIDIA RTX6000-Ada-48" }, + { 0x26B1, 0x1728, 0x10DE, "NVIDIA RTX6000-Ada-4C" }, + { 0x26B1, 0x1729, 0x10DE, "NVIDIA RTX6000-Ada-6C" }, + { 0x26B1, 0x172a, 0x10DE, "NVIDIA RTX6000-Ada-8C" }, + { 0x26B1, 0x172b, 0x10DE, "NVIDIA RTX6000-Ada-12C" }, + { 0x26B1, 0x172c, 0x10DE, "NVIDIA RTX6000-Ada-16C" }, + { 0x26B1, 0x172d, 0x10DE, "NVIDIA RTX6000-Ada-24C" }, + { 0x26B1, 0x172e, 0x10DE, "NVIDIA RTX6000-Ada-48C" }, + { 0x26B2, 0x1821, 0x10DE, "NVIDIA RTX5000-Ada-1B" }, + { 0x26B2, 0x1822, 0x10DE, "NVIDIA RTX5000-Ada-2B" }, + { 0x26B2, 0x1823, 0x10DE, "NVIDIA RTX5000-Ada-1Q" }, + { 0x26B2, 0x1824, 0x10DE, "NVIDIA RTX5000-Ada-2Q" }, + { 0x26B2, 0x1825, 0x10DE, "NVIDIA RTX5000-Ada-4Q" }, + { 0x26B2, 0x1826, 0x10DE, "NVIDIA RTX5000-Ada-8Q" }, + { 0x26B2, 0x1827, 0x10DE, "NVIDIA RTX5000-Ada-16Q" }, + { 0x26B2, 0x1828, 0x10DE, "NVIDIA RTX5000-Ada-32Q" }, + { 0x26B2, 0x1829, 0x10DE, "NVIDIA RTX5000-Ada-1A" }, + { 0x26B2, 0x182a, 0x10DE, "NVIDIA RTX5000-Ada-2A" }, + { 0x26B2, 0x182b, 0x10DE, "NVIDIA RTX5000-Ada-4A" }, + { 0x26B2, 0x182c, 0x10DE, "NVIDIA RTX5000-Ada-8A" }, + { 0x26B2, 0x182d, 0x10DE, "NVIDIA RTX5000-Ada-16A" }, + { 0x26B2, 0x182e, 0x10DE, "NVIDIA RTX5000-Ada-32A" }, + { 0x26B2, 0x182f, 0x10DE, "NVIDIA RTX5000-Ada-1" }, + { 0x26B2, 0x1830, 0x10DE, "NVIDIA RTX5000-Ada-2" }, + { 0x26B2, 0x1831, 0x10DE, "NVIDIA RTX5000-Ada-4" }, + { 0x26B2, 0x1832, 0x10DE, "NVIDIA RTX5000-Ada-8" }, + { 0x26B2, 0x1833, 0x10DE, "NVIDIA RTX5000-Ada-16" }, + { 0x26B2, 0x1834, 0x10DE, "NVIDIA RTX5000-Ada-32" }, + { 0x26B2, 0x1835, 0x10DE, "NVIDIA RTX5000-Ada-4C" }, + { 0x26B2, 0x1836, 0x10DE, "NVIDIA RTX5000-Ada-8C" }, + { 0x26B2, 0x1837, 0x10DE, "NVIDIA RTX5000-Ada-16C" }, + { 0x26B2, 0x1838, 0x10DE, "NVIDIA RTX5000-Ada-32C" }, + { 0x26B3, 0x1958, 0x10DE, "NVIDIA RTX5880-Ada-1B" }, + { 0x26B3, 0x1959, 0x10DE, "NVIDIA RTX5880-Ada-2B" }, + { 0x26B3, 0x195a, 0x10DE, "NVIDIA RTX5880-Ada-1Q" }, + { 0x26B3, 0x195b, 0x10DE, "NVIDIA RTX5880-Ada-2Q" }, + { 0x26B3, 0x195c, 0x10DE, "NVIDIA RTX5880-Ada-3Q" }, + { 0x26B3, 0x195d, 0x10DE, "NVIDIA RTX5880-Ada-4Q" }, + { 0x26B3, 0x195e, 0x10DE, "NVIDIA RTX5880-Ada-6Q" }, + { 0x26B3, 0x195f, 0x10DE, "NVIDIA RTX5880-Ada-8Q" }, + { 0x26B3, 0x1960, 0x10DE, "NVIDIA RTX5880-Ada-12Q" }, + { 0x26B3, 0x1961, 0x10DE, "NVIDIA RTX5880-Ada-16Q" }, + { 0x26B3, 0x1962, 0x10DE, "NVIDIA RTX5880-Ada-24Q" }, + { 0x26B3, 0x1963, 0x10DE, "NVIDIA RTX5880-Ada-48Q" }, + { 0x26B3, 0x1964, 0x10DE, "NVIDIA RTX5880-Ada-1A" }, + { 0x26B3, 0x1965, 0x10DE, "NVIDIA RTX5880-Ada-2A" }, + { 0x26B3, 0x1966, 0x10DE, "NVIDIA RTX5880-Ada-3A" }, + { 0x26B3, 0x1967, 0x10DE, "NVIDIA RTX5880-Ada-4A" }, + { 0x26B3, 0x1968, 0x10DE, "NVIDIA RTX5880-Ada-6A" }, + { 0x26B3, 0x1969, 0x10DE, "NVIDIA RTX5880-Ada-8A" }, + { 0x26B3, 0x196a, 0x10DE, "NVIDIA RTX5880-Ada-12A" }, + { 0x26B3, 0x196b, 0x10DE, "NVIDIA RTX5880-Ada-16A" }, + { 0x26B3, 0x196c, 0x10DE, "NVIDIA RTX5880-Ada-24A" }, + { 0x26B3, 0x196d, 0x10DE, "NVIDIA RTX5880-Ada-48A" }, + { 0x26B3, 0x196e, 0x10DE, "NVIDIA RTX5880-Ada-1" }, + { 0x26B3, 0x196f, 0x10DE, "NVIDIA RTX5880-Ada-2" }, + { 0x26B3, 0x1970, 0x10DE, "NVIDIA RTX5880-Ada-3" }, + { 0x26B3, 0x1971, 0x10DE, "NVIDIA RTX5880-Ada-4" }, + { 0x26B3, 0x1972, 0x10DE, "NVIDIA RTX5880-Ada-6" }, + { 0x26B3, 0x1973, 0x10DE, "NVIDIA RTX5880-Ada-8" }, + { 0x26B3, 0x1974, 0x10DE, "NVIDIA RTX5880-Ada-12" }, + { 0x26B3, 0x1975, 0x10DE, "NVIDIA RTX5880-Ada-16" }, + { 0x26B3, 0x1976, 0x10DE, "NVIDIA RTX5880-Ada-24" }, + { 0x26B3, 0x1977, 0x10DE, "NVIDIA RTX5880-Ada-48" }, + { 0x26B3, 0x1978, 0x10DE, "NVIDIA RTX5880-Ada-4C" }, + { 0x26B3, 0x1979, 0x10DE, "NVIDIA RTX5880-Ada-6C" }, + { 0x26B3, 0x197a, 0x10DE, "NVIDIA RTX5880-Ada-8C" }, + { 0x26B3, 0x197b, 0x10DE, "NVIDIA RTX5880-Ada-12C" }, + { 0x26B3, 0x197c, 0x10DE, "NVIDIA RTX5880-Ada-16C" }, + { 0x26B3, 0x197d, 0x10DE, "NVIDIA RTX5880-Ada-24C" }, + { 0x26B3, 0x197e, 0x10DE, "NVIDIA RTX5880-Ada-48C" }, + { 0x26B5, 0x176d, 0x10DE, "NVIDIA L40-1B" }, + { 0x26B5, 0x176e, 0x10DE, "NVIDIA L40-2B" }, + { 0x26B5, 0x176f, 0x10DE, "NVIDIA L40-1Q" }, + { 0x26B5, 0x1770, 0x10DE, "NVIDIA L40-2Q" }, + { 0x26B5, 0x1771, 0x10DE, "NVIDIA L40-3Q" }, + { 0x26B5, 0x1772, 0x10DE, "NVIDIA L40-4Q" }, + { 0x26B5, 0x1773, 0x10DE, "NVIDIA L40-6Q" }, + { 0x26B5, 0x1774, 0x10DE, "NVIDIA L40-8Q" }, + { 0x26B5, 0x1775, 0x10DE, "NVIDIA L40-12Q" }, + { 0x26B5, 0x1776, 0x10DE, "NVIDIA L40-16Q" }, + { 0x26B5, 0x1777, 0x10DE, "NVIDIA L40-24Q" }, + { 0x26B5, 0x1778, 0x10DE, "NVIDIA L40-48Q" }, + { 0x26B5, 0x1779, 0x10DE, "NVIDIA L40-1A" }, + { 0x26B5, 0x177a, 0x10DE, "NVIDIA L40-2A" }, + { 0x26B5, 0x177b, 0x10DE, "NVIDIA L40-3A" }, + { 0x26B5, 0x177c, 0x10DE, "NVIDIA L40-4A" }, + { 0x26B5, 0x177d, 0x10DE, "NVIDIA L40-6A" }, + { 0x26B5, 0x177e, 0x10DE, "NVIDIA L40-8A" }, + { 0x26B5, 0x177f, 0x10DE, "NVIDIA L40-12A" }, + { 0x26B5, 0x1780, 0x10DE, "NVIDIA L40-16A" }, + { 0x26B5, 0x1781, 0x10DE, "NVIDIA L40-24A" }, + { 0x26B5, 0x1782, 0x10DE, "NVIDIA L40-48A" }, + { 0x26B5, 0x1783, 0x10DE, "NVIDIA L40-1" }, + { 0x26B5, 0x1784, 0x10DE, "NVIDIA L40-2" }, + { 0x26B5, 0x1785, 0x10DE, "NVIDIA L40-3" }, + { 0x26B5, 0x1786, 0x10DE, "NVIDIA L40-4" }, + { 0x26B5, 0x1787, 0x10DE, "NVIDIA L40-6" }, + { 0x26B5, 0x1788, 0x10DE, "NVIDIA L40-8" }, + { 0x26B5, 0x1789, 0x10DE, "NVIDIA L40-12" }, + { 0x26B5, 0x178a, 0x10DE, "NVIDIA L40-16" }, + { 0x26B5, 0x178b, 0x10DE, "NVIDIA L40-24" }, + { 0x26B5, 0x178c, 0x10DE, "NVIDIA L40-48" }, + { 0x26B5, 0x178d, 0x10DE, "NVIDIA L40-4C" }, + { 0x26B5, 0x178e, 0x10DE, "NVIDIA L40-6C" }, + { 0x26B5, 0x178f, 0x10DE, "NVIDIA L40-8C" }, + { 0x26B5, 0x1790, 0x10DE, "NVIDIA L40-12C" }, + { 0x26B5, 0x1791, 0x10DE, "NVIDIA L40-16C" }, + { 0x26B5, 0x1792, 0x10DE, "NVIDIA L40-24C" }, + { 0x26B5, 0x1793, 0x10DE, "NVIDIA L40-48C" }, + { 0x26B5, 0x1818, 0x10DE, "NVIDIA GeForce RTX 3060" }, + { 0x26B5, 0x181a, 0x10DE, "NVIDIA GeForce RTX 3050" }, + { 0x26B8, 0x174e, 0x10DE, "NVIDIA L40G-1B" }, + { 0x26B8, 0x174f, 0x10DE, "NVIDIA L40G-2B" }, + { 0x26B8, 0x1750, 0x10DE, "NVIDIA L40G-1Q" }, + { 0x26B8, 0x1751, 0x10DE, "NVIDIA L40G-2Q" }, + { 0x26B8, 0x1752, 0x10DE, "NVIDIA L40G-3Q" }, + { 0x26B8, 0x1753, 0x10DE, "NVIDIA L40G-4Q" }, + { 0x26B8, 0x1754, 0x10DE, "NVIDIA L40G-6Q" }, + { 0x26B8, 0x1755, 0x10DE, "NVIDIA L40G-8Q" }, + { 0x26B8, 0x1756, 0x10DE, "NVIDIA L40G-12Q" }, + { 0x26B8, 0x1757, 0x10DE, "NVIDIA L40G-24Q" }, + { 0x26B8, 0x1758, 0x10DE, "NVIDIA L40G-1A" }, + { 0x26B8, 0x1759, 0x10DE, "NVIDIA L40G-2A" }, + { 0x26B8, 0x175a, 0x10DE, "NVIDIA L40G-3A" }, + { 0x26B8, 0x175b, 0x10DE, "NVIDIA L40G-4A" }, + { 0x26B8, 0x175c, 0x10DE, "NVIDIA L40G-6A" }, + { 0x26B8, 0x175d, 0x10DE, "NVIDIA L40G-8A" }, + { 0x26B8, 0x175e, 0x10DE, "NVIDIA L40G-12A" }, + { 0x26B8, 0x175f, 0x10DE, "NVIDIA L40G-24A" }, + { 0x26B8, 0x1760, 0x10DE, "NVIDIA L40G-1" }, + { 0x26B8, 0x1761, 0x10DE, "NVIDIA L40G-2" }, + { 0x26B8, 0x1762, 0x10DE, "NVIDIA L40G-3" }, + { 0x26B8, 0x1763, 0x10DE, "NVIDIA L40G-4" }, + { 0x26B8, 0x1764, 0x10DE, "NVIDIA L40G-6" }, + { 0x26B8, 0x1765, 0x10DE, "NVIDIA L40G-8" }, + { 0x26B8, 0x1766, 0x10DE, "NVIDIA L40G-12" }, + { 0x26B8, 0x1767, 0x10DE, "NVIDIA L40G-24" }, + { 0x26B8, 0x1768, 0x10DE, "NVIDIA L40G-4C" }, + { 0x26B8, 0x1769, 0x10DE, "NVIDIA L40G-6C" }, + { 0x26B8, 0x176a, 0x10DE, "NVIDIA L40G-8C" }, + { 0x26B8, 0x176b, 0x10DE, "NVIDIA L40G-12C" }, + { 0x26B8, 0x176c, 0x10DE, "NVIDIA L40G-24C" }, + { 0x26B8, 0x181c, 0x10DE, "NVIDIA GeForce RTX 3060" }, + { 0x26B8, 0x181e, 0x10DE, "NVIDIA GeForce RTX 3050" }, + { 0x26B9, 0x1889, 0x10DE, "NVIDIA L40S-1B" }, + { 0x26B9, 0x188a, 0x10DE, "NVIDIA L40S-2B" }, + { 0x26B9, 0x188b, 0x10DE, "NVIDIA L40S-1Q" }, + { 0x26B9, 0x188c, 0x10DE, "NVIDIA L40S-2Q" }, + { 0x26B9, 0x188d, 0x10DE, "NVIDIA L40S-3Q" }, + { 0x26B9, 0x188e, 0x10DE, "NVIDIA L40S-4Q" }, + { 0x26B9, 0x188f, 0x10DE, "NVIDIA L40S-6Q" }, + { 0x26B9, 0x1890, 0x10DE, "NVIDIA L40S-8Q" }, + { 0x26B9, 0x1891, 0x10DE, "NVIDIA L40S-12Q" }, + { 0x26B9, 0x1892, 0x10DE, "NVIDIA L40S-16Q" }, + { 0x26B9, 0x1893, 0x10DE, "NVIDIA L40S-24Q" }, + { 0x26B9, 0x1894, 0x10DE, "NVIDIA L40S-48Q" }, + { 0x26B9, 0x1895, 0x10DE, "NVIDIA L40S-1A" }, + { 0x26B9, 0x1896, 0x10DE, "NVIDIA L40S-2A" }, + { 0x26B9, 0x1897, 0x10DE, "NVIDIA L40S-3A" }, + { 0x26B9, 0x1898, 0x10DE, "NVIDIA L40S-4A" }, + { 0x26B9, 0x1899, 0x10DE, "NVIDIA L40S-6A" }, + { 0x26B9, 0x189a, 0x10DE, "NVIDIA L40S-8A" }, + { 0x26B9, 0x189b, 0x10DE, "NVIDIA L40S-12A" }, + { 0x26B9, 0x189c, 0x10DE, "NVIDIA L40S-16A" }, + { 0x26B9, 0x189d, 0x10DE, "NVIDIA L40S-24A" }, + { 0x26B9, 0x189e, 0x10DE, "NVIDIA L40S-48A" }, + { 0x26B9, 0x189f, 0x10DE, "NVIDIA GeForce RTX 3050" }, + { 0x26B9, 0x18a0, 0x10DE, "NVIDIA GeForce RTX 3060" }, + { 0x26B9, 0x18a1, 0x10DE, "NVIDIA L40S-1" }, + { 0x26B9, 0x18a2, 0x10DE, "NVIDIA L40S-2" }, + { 0x26B9, 0x18a3, 0x10DE, "NVIDIA L40S-3" }, + { 0x26B9, 0x18a4, 0x10DE, "NVIDIA L40S-4" }, + { 0x26B9, 0x18a5, 0x10DE, "NVIDIA L40S-6" }, + { 0x26B9, 0x18a6, 0x10DE, "NVIDIA L40S-8" }, + { 0x26B9, 0x18a7, 0x10DE, "NVIDIA L40S-12" }, + { 0x26B9, 0x18a8, 0x10DE, "NVIDIA L40S-16" }, + { 0x26B9, 0x18a9, 0x10DE, "NVIDIA L40S-24" }, + { 0x26B9, 0x18aa, 0x10DE, "NVIDIA L40S-48" }, + { 0x26B9, 0x18ab, 0x10DE, "NVIDIA L40S-4C" }, + { 0x26B9, 0x18ac, 0x10DE, "NVIDIA L40S-6C" }, + { 0x26B9, 0x18ad, 0x10DE, "NVIDIA L40S-8C" }, + { 0x26B9, 0x18ae, 0x10DE, "NVIDIA L40S-12C" }, + { 0x26B9, 0x18af, 0x10DE, "NVIDIA L40S-16C" }, + { 0x26B9, 0x18b0, 0x10DE, "NVIDIA L40S-24C" }, + { 0x26B9, 0x18b1, 0x10DE, "NVIDIA L40S-48C" }, + { 0x26BA, 0x1909, 0x10DE, "NVIDIA L20-1B" }, + { 0x26BA, 0x190a, 0x10DE, "NVIDIA L20-2B" }, + { 0x26BA, 0x190b, 0x10DE, "NVIDIA L20-1Q" }, + { 0x26BA, 0x190c, 0x10DE, "NVIDIA L20-2Q" }, + { 0x26BA, 0x190d, 0x10DE, "NVIDIA L20-3Q" }, + { 0x26BA, 0x190e, 0x10DE, "NVIDIA L20-4Q" }, + { 0x26BA, 0x190f, 0x10DE, "NVIDIA L20-6Q" }, + { 0x26BA, 0x1910, 0x10DE, "NVIDIA L20-8Q" }, + { 0x26BA, 0x1911, 0x10DE, "NVIDIA L20-12Q" }, + { 0x26BA, 0x1912, 0x10DE, "NVIDIA L20-16Q" }, + { 0x26BA, 0x1913, 0x10DE, "NVIDIA L20-24Q" }, + { 0x26BA, 0x1914, 0x10DE, "NVIDIA L20-48Q" }, + { 0x26BA, 0x1915, 0x10DE, "NVIDIA L20-1A" }, + { 0x26BA, 0x1916, 0x10DE, "NVIDIA L20-2A" }, + { 0x26BA, 0x1917, 0x10DE, "NVIDIA L20-3A" }, + { 0x26BA, 0x1918, 0x10DE, "NVIDIA L20-4A" }, + { 0x26BA, 0x1919, 0x10DE, "NVIDIA L20-6A" }, + { 0x26BA, 0x191a, 0x10DE, "NVIDIA L20-8A" }, + { 0x26BA, 0x191b, 0x10DE, "NVIDIA L20-12A" }, + { 0x26BA, 0x191c, 0x10DE, "NVIDIA L20-16A" }, + { 0x26BA, 0x191d, 0x10DE, "NVIDIA L20-24A" }, + { 0x26BA, 0x191e, 0x10DE, "NVIDIA L20-48A" }, + { 0x26BA, 0x191f, 0x10DE, "NVIDIA GeForce RTX 3050" }, + { 0x26BA, 0x1920, 0x10DE, "NVIDIA GeForce RTX 3060" }, + { 0x26BA, 0x1921, 0x10DE, "NVIDIA L20-1" }, + { 0x26BA, 0x1922, 0x10DE, "NVIDIA L20-2" }, + { 0x26BA, 0x1923, 0x10DE, "NVIDIA L20-3" }, + { 0x26BA, 0x1924, 0x10DE, "NVIDIA L20-4" }, + { 0x26BA, 0x1925, 0x10DE, "NVIDIA L20-6" }, + { 0x26BA, 0x1926, 0x10DE, "NVIDIA L20-8" }, + { 0x26BA, 0x1927, 0x10DE, "NVIDIA L20-12" }, + { 0x26BA, 0x1928, 0x10DE, "NVIDIA L20-16" }, + { 0x26BA, 0x1929, 0x10DE, "NVIDIA L20-24" }, + { 0x26BA, 0x192a, 0x10DE, "NVIDIA L20-48" }, + { 0x26BA, 0x192b, 0x10DE, "NVIDIA L20-4C" }, + { 0x26BA, 0x192c, 0x10DE, "NVIDIA L20-6C" }, + { 0x26BA, 0x192d, 0x10DE, "NVIDIA L20-8C" }, + { 0x26BA, 0x192e, 0x10DE, "NVIDIA L20-12C" }, + { 0x26BA, 0x192f, 0x10DE, "NVIDIA L20-16C" }, + { 0x26BA, 0x1930, 0x10DE, "NVIDIA L20-24C" }, + { 0x26BA, 0x1931, 0x10DE, "NVIDIA L20-48C" }, + { 0x27B6, 0x1938, 0x10DE, "NVIDIA L2-1B" }, + { 0x27B6, 0x1939, 0x10DE, "NVIDIA L2-2B" }, + { 0x27B6, 0x193a, 0x10DE, "NVIDIA L2-1Q" }, + { 0x27B6, 0x193b, 0x10DE, "NVIDIA L2-2Q" }, + { 0x27B6, 0x193c, 0x10DE, "NVIDIA L2-3Q" }, + { 0x27B6, 0x193d, 0x10DE, "NVIDIA L2-4Q" }, + { 0x27B6, 0x193e, 0x10DE, "NVIDIA L2-6Q" }, + { 0x27B6, 0x193f, 0x10DE, "NVIDIA L2-8Q" }, + { 0x27B6, 0x1940, 0x10DE, "NVIDIA L2-12Q" }, + { 0x27B6, 0x1941, 0x10DE, "NVIDIA L2-24Q" }, + { 0x27B6, 0x1942, 0x10DE, "NVIDIA L2-1A" }, + { 0x27B6, 0x1943, 0x10DE, "NVIDIA L2-2A" }, + { 0x27B6, 0x1944, 0x10DE, "NVIDIA L2-3A" }, + { 0x27B6, 0x1945, 0x10DE, "NVIDIA L2-4A" }, + { 0x27B6, 0x1946, 0x10DE, "NVIDIA L2-6A" }, + { 0x27B6, 0x1947, 0x10DE, "NVIDIA L2-8A" }, + { 0x27B6, 0x1948, 0x10DE, "NVIDIA L2-12A" }, + { 0x27B6, 0x1949, 0x10DE, "NVIDIA L2-24A" }, + { 0x27B6, 0x194a, 0x10DE, "NVIDIA L2-1" }, + { 0x27B6, 0x194b, 0x10DE, "NVIDIA L2-2" }, + { 0x27B6, 0x194c, 0x10DE, "NVIDIA L2-3" }, + { 0x27B6, 0x194d, 0x10DE, "NVIDIA L2-4" }, + { 0x27B6, 0x194e, 0x10DE, "NVIDIA L2-6" }, + { 0x27B6, 0x194f, 0x10DE, "NVIDIA L2-8" }, + { 0x27B6, 0x1950, 0x10DE, "NVIDIA L2-12" }, + { 0x27B6, 0x1951, 0x10DE, "NVIDIA L2-24" }, + { 0x27B6, 0x1952, 0x10DE, "NVIDIA L2-4C" }, + { 0x27B6, 0x1953, 0x10DE, "NVIDIA L2-6C" }, + { 0x27B6, 0x1954, 0x10DE, "NVIDIA L2-8C" }, + { 0x27B6, 0x1955, 0x10DE, "NVIDIA L2-12C" }, + { 0x27B6, 0x1956, 0x10DE, "NVIDIA L2-24C" }, + { 0x27B8, 0x172f, 0x10DE, "NVIDIA L4-1B" }, + { 0x27B8, 0x1730, 0x10DE, "NVIDIA L4-2B" }, + { 0x27B8, 0x1731, 0x10DE, "NVIDIA L4-1Q" }, + { 0x27B8, 0x1732, 0x10DE, "NVIDIA L4-2Q" }, + { 0x27B8, 0x1733, 0x10DE, "NVIDIA L4-3Q" }, + { 0x27B8, 0x1734, 0x10DE, "NVIDIA L4-4Q" }, + { 0x27B8, 0x1735, 0x10DE, "NVIDIA L4-6Q" }, + { 0x27B8, 0x1736, 0x10DE, "NVIDIA L4-8Q" }, + { 0x27B8, 0x1737, 0x10DE, "NVIDIA L4-12Q" }, + { 0x27B8, 0x1738, 0x10DE, "NVIDIA L4-24Q" }, + { 0x27B8, 0x1739, 0x10DE, "NVIDIA L4-1A" }, + { 0x27B8, 0x173a, 0x10DE, "NVIDIA L4-2A" }, + { 0x27B8, 0x173b, 0x10DE, "NVIDIA L4-3A" }, + { 0x27B8, 0x173c, 0x10DE, "NVIDIA L4-4A" }, + { 0x27B8, 0x173d, 0x10DE, "NVIDIA L4-6A" }, + { 0x27B8, 0x173e, 0x10DE, "NVIDIA L4-8A" }, + { 0x27B8, 0x173f, 0x10DE, "NVIDIA L4-12A" }, + { 0x27B8, 0x1740, 0x10DE, "NVIDIA L4-24A" }, + { 0x27B8, 0x1741, 0x10DE, "NVIDIA L4-1" }, + { 0x27B8, 0x1742, 0x10DE, "NVIDIA L4-2" }, + { 0x27B8, 0x1743, 0x10DE, "NVIDIA L4-3" }, + { 0x27B8, 0x1744, 0x10DE, "NVIDIA L4-4" }, + { 0x27B8, 0x1745, 0x10DE, "NVIDIA L4-6" }, + { 0x27B8, 0x1746, 0x10DE, "NVIDIA L4-8" }, + { 0x27B8, 0x1747, 0x10DE, "NVIDIA L4-12" }, + { 0x27B8, 0x1748, 0x10DE, "NVIDIA L4-24" }, + { 0x27B8, 0x1749, 0x10DE, "NVIDIA L4-4C" }, + { 0x27B8, 0x174a, 0x10DE, "NVIDIA L4-6C" }, + { 0x27B8, 0x174b, 0x10DE, "NVIDIA L4-8C" }, + { 0x27B8, 0x174c, 0x10DE, "NVIDIA L4-12C" }, + { 0x27B8, 0x174d, 0x10DE, "NVIDIA L4-24C" }, + { 0x2901, 0x21d7, 0x10DE, "NVIDIA B200X-1-23CME" }, + { 0x2901, 0x21d8, 0x10DE, "NVIDIA B200X-1-23C" }, + { 0x2901, 0x21d9, 0x10DE, "NVIDIA B200X-1-45C" }, + { 0x2901, 0x21da, 0x10DE, "NVIDIA B200X-2-45C" }, + { 0x2901, 0x21db, 0x10DE, "NVIDIA B200X-3-90C" }, + { 0x2901, 0x21dc, 0x10DE, "NVIDIA B200X-4-90C" }, + { 0x2901, 0x21dd, 0x10DE, "NVIDIA B200X-7-180C" }, + { 0x2901, 0x21ea, 0x10DE, "NVIDIA B200X-180C" }, + { 0x2941, 0x20cb, 0x10DE, "NVIDIA GB200-1-24CME" }, + { 0x2941, 0x20cc, 0x10DE, "NVIDIA GB200-1-24C" }, + { 0x2941, 0x20cd, 0x10DE, "NVIDIA GB200-1-47C" }, + { 0x2941, 0x20ce, 0x10DE, "NVIDIA GB200-2-47C" }, + { 0x2941, 0x20cf, 0x10DE, "NVIDIA GB200-3-95C" }, + { 0x2941, 0x20d0, 0x10DE, "NVIDIA GB200-4-95C" }, + { 0x2941, 0x20d1, 0x10DE, "NVIDIA GB200-7-189C" }, + { 0x2941, 0x20d2, 0x10DE, "NVIDIA GB200-189C" }, + { 0x2BB5, 0x2160, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-2Q" }, + { 0x2BB5, 0x2161, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-2A" }, + { 0x2BB5, 0x2162, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-2B" }, + { 0x2BB5, 0x2163, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-3Q" }, + { 0x2BB5, 0x2164, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-3A" }, + { 0x2BB5, 0x2165, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-4Q" }, + { 0x2BB5, 0x2166, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-4A" }, + { 0x2BB5, 0x2167, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-6Q" }, + { 0x2BB5, 0x2168, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-6A" }, + { 0x2BB5, 0x2169, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-8Q" }, + { 0x2BB5, 0x216a, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-8A" }, + { 0x2BB5, 0x216b, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-8C" }, + { 0x2BB5, 0x216c, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-12Q" }, + { 0x2BB5, 0x216d, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-12A" }, + { 0x2BB5, 0x216e, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-12C" }, + { 0x2BB5, 0x216f, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-24Q" }, + { 0x2BB5, 0x2170, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-24A" }, + { 0x2BB5, 0x2171, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-24C" }, + { 0x2BB5, 0x2172, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-2-12Q" }, + { 0x2BB5, 0x2173, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-2-12A" }, + { 0x2BB5, 0x2174, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-2-12C" }, + { 0x2BB5, 0x2175, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-2-16Q" }, + { 0x2BB5, 0x2176, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-2-16A" }, + { 0x2BB5, 0x2177, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-2-16C" }, + { 0x2BB5, 0x2178, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-2-24Q" }, + { 0x2BB5, 0x2179, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-2-24A" }, + { 0x2BB5, 0x217a, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-2-24C" }, + { 0x2BB5, 0x217b, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-2-48Q" }, + { 0x2BB5, 0x217c, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-2-48A" }, + { 0x2BB5, 0x217d, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-2-48C" }, + { 0x2BB5, 0x217e, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-4-24Q" }, + { 0x2BB5, 0x217f, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-4-24A" }, + { 0x2BB5, 0x2180, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-4-24C" }, + { 0x2BB5, 0x2181, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-4-32Q" }, + { 0x2BB5, 0x2182, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-4-32A" }, + { 0x2BB5, 0x2183, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-4-32C" }, + { 0x2BB5, 0x2184, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-4-48Q" }, + { 0x2BB5, 0x2185, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-4-48A" }, + { 0x2BB5, 0x2186, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-4-48C" }, + { 0x2BB5, 0x2187, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-4-96Q" }, + { 0x2BB5, 0x2188, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-4-96A" }, + { 0x2BB5, 0x2189, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-4-96C" }, + { 0x2BB5, 0x218a, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-2B" }, + { 0x2BB5, 0x218b, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-3Q" }, + { 0x2BB5, 0x218c, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-3A" }, + { 0x2BB5, 0x218d, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-4Q" }, + { 0x2BB5, 0x218e, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-4A" }, + { 0x2BB5, 0x218f, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-6Q" }, + { 0x2BB5, 0x2190, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-6A" }, + { 0x2BB5, 0x2191, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-8Q" }, + { 0x2BB5, 0x2192, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-8A" }, + { 0x2BB5, 0x2193, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-8C" }, + { 0x2BB5, 0x2194, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-12Q" }, + { 0x2BB5, 0x2195, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-12A" }, + { 0x2BB5, 0x2196, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-12C" }, + { 0x2BB5, 0x2197, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-16Q" }, + { 0x2BB5, 0x2198, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-16A" }, + { 0x2BB5, 0x2199, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-16C" }, + { 0x2BB5, 0x219a, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-24Q" }, + { 0x2BB5, 0x219b, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-24A" }, + { 0x2BB5, 0x219c, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-24C" }, + { 0x2BB5, 0x219d, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-32Q" }, + { 0x2BB5, 0x219e, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-32A" }, + { 0x2BB5, 0x219f, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-32C" }, + { 0x2BB5, 0x21a0, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-48Q" }, + { 0x2BB5, 0x21a1, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-48A" }, + { 0x2BB5, 0x21a2, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-48C" }, + { 0x2BB5, 0x21a3, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-96Q" }, + { 0x2BB5, 0x21a4, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-96A" }, + { 0x2BB5, 0x21a5, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-96C" }, + { 0x2BB5, 0x21a6, 0x10DE, "NVIDIA GeForce RTX 3050" }, + { 0x2BB5, 0x21a7, 0x10DE, "NVIDIA GeForce RTX 3060" }, + { 0x2BB5, 0x21ae, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-2" }, + { 0x2BB5, 0x21af, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-3" }, + { 0x2BB5, 0x21b0, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-4" }, + { 0x2BB5, 0x21b1, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-6" }, + { 0x2BB5, 0x21b2, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-8" }, + { 0x2BB5, 0x21b3, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-12" }, + { 0x2BB5, 0x21b4, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-1-24" }, + { 0x2BB5, 0x21b5, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-2-12" }, + { 0x2BB5, 0x21b6, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-2-16" }, + { 0x2BB5, 0x21b7, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-2-24" }, + { 0x2BB5, 0x21b8, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-2-48" }, + { 0x2BB5, 0x21b9, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-4-24" }, + { 0x2BB5, 0x21ba, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-4-32" }, + { 0x2BB5, 0x21bb, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-4-48" }, + { 0x2BB5, 0x21bc, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-4-96" }, + { 0x2BB5, 0x21bd, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-2" }, + { 0x2BB5, 0x21be, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-3" }, + { 0x2BB5, 0x21bf, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-4" }, + { 0x2BB5, 0x21c0, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-6" }, + { 0x2BB5, 0x21c1, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-8" }, + { 0x2BB5, 0x21c2, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-12" }, + { 0x2BB5, 0x21c3, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-16" }, + { 0x2BB5, 0x21c4, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-24" }, + { 0x2BB5, 0x21c5, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-32" }, + { 0x2BB5, 0x21c6, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-48" }, + { 0x2BB5, 0x21c7, 0x10DE, "NVIDIA RTX Pro 6000 Blackwell DC-96" }, +}; + +#endif // G_NV_NAME_RELEASED_H diff --git a/src/nvidia/generated/g_nvh_state.h b/src/nvidia/generated/g_nvh_state.h new file mode 100644 index 0000000..219042a --- /dev/null +++ b/src/nvidia/generated/g_nvh_state.h @@ -0,0 +1,28 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// NVOC Header State : This file is used for different code path for disabled NVH +// +// Profile: devel-soc-disp-dce-client +// Template: templates/gt_nvh_state.h +// + +#ifndef _G_NVH_STATE_H_ +#define _G_NVH_STATE_H_ + +// +// __nvoc_nvh_state_guard +// This macro define is used to check whether this header is included before +// NVOC headers. The usage: +// #ifndef __nvoc_nvh_state_guard +// #error "NVH state guard header is not included prior to this NVOC header" +// #endif +// +#define __nvoc_nvh_state_guard + +// +// List of disabled NVOC headers +// + + + +#endif // _G_NVH_STATE_H_ diff --git a/src/nvidia/generated/g_object_nvoc.c b/src/nvidia/generated/g_object_nvoc.c new file mode 100644 index 0000000..be0169f --- /dev/null +++ b/src/nvidia/generated/g_object_nvoc.c @@ -0,0 +1,181 @@ +#define NVOC_OBJECT_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_object_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x497031 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +// Forward declarations for Object +void __nvoc_init__Object(Object*); +void __nvoc_init_funcTable_Object(Object*); +NV_STATUS __nvoc_ctor_Object(Object*); +void __nvoc_init_dataField_Object(Object*); +void __nvoc_dtor_Object(Object*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__Object; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__Object; + +// Down-thunk(s) to bridge Object methods from ancestors (if any) + +// Up-thunk(s) to bridge Object methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_Object = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Object), + /*classId=*/ classId(Object), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Object", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Object, + /*pCastInfo=*/ &__nvoc_castinfo__Object, + /*pExportInfo=*/ &__nvoc_export_info__Object +}; + + +// Metadata with per-class RTTI +static const struct NVOC_METADATA__Object __nvoc_metadata__Object = { + .rtti.pClassDef = &__nvoc_class_def_Object, // (obj) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Object, + .rtti.offset = 0, +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__Object = { + .numRelatives = 1, + .relatives = { + &__nvoc_metadata__Object.rtti, // [0]: (obj) this + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__Object = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Object(Object *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object *pThis) { + NV_STATUS status = NV_OK; + __nvoc_init_dataField_Object(pThis); + goto __nvoc_ctor_Object_exit; // Success + +__nvoc_ctor_Object_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_Object_1(Object *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_Object_1 + + +// Initialize vtable(s): Nothing to do for empty vtables +void __nvoc_init_funcTable_Object(Object *pThis) { + __nvoc_init_funcTable_Object_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__Object(Object *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = pThis; // (obj) this + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__Object; // (obj) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_Object(pThis); +} + +NV_STATUS __nvoc_objCreate_Object(Object **ppThis, Dynamic *pParent, NvU32 createFlags) +{ + NV_STATUS status; + Object *pParentObj = NULL; + Object *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(Object), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(Object)); + + pThis->createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, pThis); + } + else + { + pThis->pParent = NULL; + } + + __nvoc_init__Object(pThis); + status = __nvoc_ctor_Object(pThis); + if (status != NV_OK) goto __nvoc_objCreate_Object_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_Object_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, pThis); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(Object)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_Object(Object **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_Object(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_object_nvoc.h b/src/nvidia/generated/g_object_nvoc.h new file mode 100644 index 0000000..31d31c5 --- /dev/null +++ b/src/nvidia/generated/g_object_nvoc.h @@ -0,0 +1,235 @@ + +#ifndef _G_OBJECT_NVOC_H_ +#define _G_OBJECT_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file is part of the NVOC runtime. + */ + +#pragma once + +#include "nvoc/prelude.h" + +#include "g_object_nvoc.h" + +#ifndef _NVOC_OBJECT_H_ +#define _NVOC_OBJECT_H_ + +#include "nvtypes.h" +#include "nvstatus.h" + + + + +struct Object; + +#ifndef __NVOC_CLASS_Object_TYPEDEF__ +#define __NVOC_CLASS_Object_TYPEDEF__ +typedef struct Object Object; +#endif /* __NVOC_CLASS_Object_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Object +#define __nvoc_class_id_Object 0x497031 +#endif /* __nvoc_class_id_Object */ + + +struct NVOC_CLASS_INFO; + +/*! + * Tracks the head of an object's child list, and the next object in its + * parent's child list. + */ +struct NVOC_CHILD_TREE +{ + struct Object *pChild; + struct Object *pSibling; +}; + +//! The base class of all instantiable NVOC objects. + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_OBJECT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI +struct NVOC_METADATA__Object; + + +struct Object { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__Object *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj + + // Data members + struct Object *pParent; + struct NVOC_CHILD_TREE childTree; + NvU32 ipVersion; + NvU32 createFlags; +}; + + +// Metadata with per-class RTTI +struct NVOC_METADATA__Object { + const struct NVOC_RTTI rtti; +}; + +#ifndef __NVOC_CLASS_Object_TYPEDEF__ +#define __NVOC_CLASS_Object_TYPEDEF__ +typedef struct Object Object; +#endif /* __NVOC_CLASS_Object_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Object +#define __nvoc_class_id_Object 0x497031 +#endif /* __nvoc_class_id_Object */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +#define __staticCast_Object(pThis) \ + ((pThis)->__nvoc_pbase_Object) + +#ifdef __nvoc_object_h_disabled +#define __dynamicCast_Object(pThis) ((Object*) NULL) +#else //__nvoc_object_h_disabled +#define __dynamicCast_Object(pThis) \ + ((Object*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Object))) +#endif //__nvoc_object_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_Object(Object**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Object(Object**, Dynamic*, NvU32); +#define __objCreate_Object(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_Object((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros + +// Dispatch functions +void objAddChild_IMPL(struct Object *pObj, struct Object *pChild); + +#ifdef __nvoc_object_h_disabled +static inline void objAddChild(struct Object *pObj, struct Object *pChild) { + NV_ASSERT_FAILED_PRECOMP("Object was disabled!"); +} +#else //__nvoc_object_h_disabled +#define objAddChild(pObj, pChild) objAddChild_IMPL(pObj, pChild) +#endif //__nvoc_object_h_disabled + +void objRemoveChild_IMPL(struct Object *pObj, struct Object *pChild); + +#ifdef __nvoc_object_h_disabled +static inline void objRemoveChild(struct Object *pObj, struct Object *pChild) { + NV_ASSERT_FAILED_PRECOMP("Object was disabled!"); +} +#else //__nvoc_object_h_disabled +#define objRemoveChild(pObj, pChild) objRemoveChild_IMPL(pObj, pChild) +#endif //__nvoc_object_h_disabled + +struct Object *objGetChild_IMPL(struct Object *pObj); + +#ifdef __nvoc_object_h_disabled +static inline struct Object *objGetChild(struct Object *pObj) { + NV_ASSERT_FAILED_PRECOMP("Object was disabled!"); + return NULL; +} +#else //__nvoc_object_h_disabled +#define objGetChild(pObj) objGetChild_IMPL(pObj) +#endif //__nvoc_object_h_disabled + +struct Object *objGetSibling_IMPL(struct Object *pObj); + +#ifdef __nvoc_object_h_disabled +static inline struct Object *objGetSibling(struct Object *pObj) { + NV_ASSERT_FAILED_PRECOMP("Object was disabled!"); + return NULL; +} +#else //__nvoc_object_h_disabled +#define objGetSibling(pObj) objGetSibling_IMPL(pObj) +#endif //__nvoc_object_h_disabled + +struct Object *objGetDirectParent_IMPL(struct Object *pObj); + +#ifdef __nvoc_object_h_disabled +static inline struct Object *objGetDirectParent(struct Object *pObj) { + NV_ASSERT_FAILED_PRECOMP("Object was disabled!"); + return NULL; +} +#else //__nvoc_object_h_disabled +#define objGetDirectParent(pObj) objGetDirectParent_IMPL(pObj) +#endif //__nvoc_object_h_disabled + +#undef PRIVATE_FIELD + + +// +// IP versioning definitions are temporary until NVOC halspec support is +// finished. +// +// IP_VERSION format as defined by the hardware engines. +// A _MAJOR value of 0 means the object has no version number. +// + +#define NV_ODB_IP_VER_DEV 7:0 /* R-IVF */ +#define NV_ODB_IP_VER_ECO 15:8 /* R-IVF */ +#define NV_ODB_IP_VER_MINOR 23:16 /* R-IVF */ +#define NV_ODB_IP_VER_MAJOR 31:24 /* R-IVF */ + +#define IPVersion(pObj) staticCast((pObj), Object)->ipVersion +// v0 .. v1 inclusive +#define IsIPVersionInRange(pObj, v0, v1) ((IPVersion(pObj) >= (v0)) && (IPVersion(pObj) <= (v1))) + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_OBJECT_NVOC_H_ diff --git a/src/nvidia/generated/g_objtmr_nvoc.c b/src/nvidia/generated/g_objtmr_nvoc.c new file mode 100644 index 0000000..534424f --- /dev/null +++ b/src/nvidia/generated/g_objtmr_nvoc.c @@ -0,0 +1,389 @@ +#define NVOC_OBJTMR_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_objtmr_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x9ddede = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTMR; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +// Forward declarations for OBJTMR +void __nvoc_init__OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init__OBJTMR(OBJTMR*, GpuHalspecOwner *pGpuhalspecowner, RmHalspecOwner *pRmhalspecowner); +void __nvoc_init_funcTable_OBJTMR(OBJTMR*, GpuHalspecOwner *pGpuhalspecowner, RmHalspecOwner *pRmhalspecowner); +NV_STATUS __nvoc_ctor_OBJTMR(OBJTMR*, GpuHalspecOwner *pGpuhalspecowner, RmHalspecOwner *pRmhalspecowner); +void __nvoc_init_dataField_OBJTMR(OBJTMR*, GpuHalspecOwner *pGpuhalspecowner, RmHalspecOwner *pRmhalspecowner); +void __nvoc_dtor_OBJTMR(OBJTMR*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__OBJTMR; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJTMR; + +// Down-thunk(s) to bridge OBJTMR methods from ancestors (if any) +NV_STATUS __nvoc_down_thunk_OBJTMR_engstateConstructEngine(struct OBJGPU *pGpu, struct OBJENGSTATE *pTmr, ENGDESCRIPTOR arg3); // this +NV_STATUS __nvoc_down_thunk_OBJTMR_engstateStatePreInitLocked(struct OBJGPU *pGpu, struct OBJENGSTATE *pTmr); // this +NV_STATUS __nvoc_down_thunk_OBJTMR_engstateStateInitLocked(struct OBJGPU *pGpu, struct OBJENGSTATE *pTmr); // this +NV_STATUS __nvoc_down_thunk_OBJTMR_engstateStateLoad(struct OBJGPU *pGpu, struct OBJENGSTATE *pTmr, NvU32 arg3); // this +NV_STATUS __nvoc_down_thunk_OBJTMR_engstateStateUnload(struct OBJGPU *pGpu, struct OBJENGSTATE *pTmr, NvU32 arg3); // this +void __nvoc_down_thunk_OBJTMR_engstateStateDestroy(struct OBJGPU *pGpu, struct OBJENGSTATE *pTmr); // this + +// Up-thunk(s) to bridge OBJTMR methods to ancestors (if any) +void __nvoc_up_thunk_OBJENGSTATE_tmrInitMissing(struct OBJGPU *pGpu, struct OBJTMR *pEngstate); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_tmrStatePreInitUnlocked(struct OBJGPU *pGpu, struct OBJTMR *pEngstate); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_tmrStateInitUnlocked(struct OBJGPU *pGpu, struct OBJTMR *pEngstate); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_tmrStatePreLoad(struct OBJGPU *pGpu, struct OBJTMR *pEngstate, NvU32 arg3); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_tmrStatePostLoad(struct OBJGPU *pGpu, struct OBJTMR *pEngstate, NvU32 arg3); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_tmrStatePreUnload(struct OBJGPU *pGpu, struct OBJTMR *pEngstate, NvU32 arg3); // this +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_tmrStatePostUnload(struct OBJGPU *pGpu, struct OBJTMR *pEngstate, NvU32 arg3); // this +NvBool __nvoc_up_thunk_OBJENGSTATE_tmrIsPresent(struct OBJGPU *pGpu, struct OBJTMR *pEngstate); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTMR = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJTMR), + /*classId=*/ classId(OBJTMR), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJTMR", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJTMR, + /*pCastInfo=*/ &__nvoc_castinfo__OBJTMR, + /*pExportInfo=*/ &__nvoc_export_info__OBJTMR +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__OBJTMR __nvoc_metadata__OBJTMR = { + .rtti.pClassDef = &__nvoc_class_def_OBJTMR, // (tmr) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJTMR, + .rtti.offset = 0, + .metadata__OBJENGSTATE.rtti.pClassDef = &__nvoc_class_def_OBJENGSTATE, // (engstate) super + .metadata__OBJENGSTATE.rtti.dtor = &__nvoc_destructFromBase, + .metadata__OBJENGSTATE.rtti.offset = NV_OFFSETOF(OBJTMR, __nvoc_base_OBJENGSTATE), + .metadata__OBJENGSTATE.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^2 + .metadata__OBJENGSTATE.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__OBJENGSTATE.metadata__Object.rtti.offset = NV_OFFSETOF(OBJTMR, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), + + .vtable.__tmrConstructEngine__ = &tmrConstructEngine_IMPL, // virtual override (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateConstructEngine__ = &__nvoc_down_thunk_OBJTMR_engstateConstructEngine, // virtual + .vtable.__tmrStatePreInitLocked__ = &tmrStatePreInitLocked_IMPL, // virtual override (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePreInitLocked__ = &__nvoc_down_thunk_OBJTMR_engstateStatePreInitLocked, // virtual + .vtable.__tmrStateInitLocked__ = &tmrStateInitLocked_IMPL, // virtual override (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStateInitLocked__ = &__nvoc_down_thunk_OBJTMR_engstateStateInitLocked, // virtual + .vtable.__tmrStateLoad__ = &tmrStateLoad_IMPL, // virtual override (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStateLoad__ = &__nvoc_down_thunk_OBJTMR_engstateStateLoad, // virtual + .vtable.__tmrStateUnload__ = &tmrStateUnload_IMPL, // virtual override (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStateUnload__ = &__nvoc_down_thunk_OBJTMR_engstateStateUnload, // virtual + .vtable.__tmrStateDestroy__ = &tmrStateDestroy_IMPL, // virtual override (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStateDestroy__ = &__nvoc_down_thunk_OBJTMR_engstateStateDestroy, // virtual + .vtable.__tmrInitMissing__ = &__nvoc_up_thunk_OBJENGSTATE_tmrInitMissing, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateInitMissing__ = &engstateInitMissing_IMPL, // virtual + .vtable.__tmrStatePreInitUnlocked__ = &__nvoc_up_thunk_OBJENGSTATE_tmrStatePreInitUnlocked, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePreInitUnlocked__ = &engstateStatePreInitUnlocked_IMPL, // virtual + .vtable.__tmrStateInitUnlocked__ = &__nvoc_up_thunk_OBJENGSTATE_tmrStateInitUnlocked, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStateInitUnlocked__ = &engstateStateInitUnlocked_IMPL, // virtual + .vtable.__tmrStatePreLoad__ = &__nvoc_up_thunk_OBJENGSTATE_tmrStatePreLoad, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePreLoad__ = &engstateStatePreLoad_IMPL, // virtual + .vtable.__tmrStatePostLoad__ = &__nvoc_up_thunk_OBJENGSTATE_tmrStatePostLoad, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePostLoad__ = &engstateStatePostLoad_IMPL, // virtual + .vtable.__tmrStatePreUnload__ = &__nvoc_up_thunk_OBJENGSTATE_tmrStatePreUnload, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePreUnload__ = &engstateStatePreUnload_IMPL, // virtual + .vtable.__tmrStatePostUnload__ = &__nvoc_up_thunk_OBJENGSTATE_tmrStatePostUnload, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateStatePostUnload__ = &engstateStatePostUnload_IMPL, // virtual + .vtable.__tmrIsPresent__ = &__nvoc_up_thunk_OBJENGSTATE_tmrIsPresent, // virtual inherited (engstate) base (engstate) + .metadata__OBJENGSTATE.vtable.__engstateIsPresent__ = &engstateIsPresent_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__OBJTMR = { + .numRelatives = 3, + .relatives = { + &__nvoc_metadata__OBJTMR.rtti, // [0]: (tmr) this + &__nvoc_metadata__OBJTMR.metadata__OBJENGSTATE.rtti, // [1]: (engstate) super + &__nvoc_metadata__OBJTMR.metadata__OBJENGSTATE.metadata__Object.rtti, // [2]: (obj) super^2 + } +}; + +// 6 down-thunk(s) defined to bridge methods in OBJTMR from superclasses + +// tmrConstructEngine: virtual override (engstate) base (engstate) +NV_STATUS __nvoc_down_thunk_OBJTMR_engstateConstructEngine(struct OBJGPU *pGpu, struct OBJENGSTATE *pTmr, ENGDESCRIPTOR arg3) { + return tmrConstructEngine(pGpu, (struct OBJTMR *)(((unsigned char *) pTmr) - NV_OFFSETOF(OBJTMR, __nvoc_base_OBJENGSTATE)), arg3); +} + +// tmrStatePreInitLocked: virtual override (engstate) base (engstate) +NV_STATUS __nvoc_down_thunk_OBJTMR_engstateStatePreInitLocked(struct OBJGPU *pGpu, struct OBJENGSTATE *pTmr) { + return tmrStatePreInitLocked(pGpu, (struct OBJTMR *)(((unsigned char *) pTmr) - NV_OFFSETOF(OBJTMR, __nvoc_base_OBJENGSTATE))); +} + +// tmrStateInitLocked: virtual override (engstate) base (engstate) +NV_STATUS __nvoc_down_thunk_OBJTMR_engstateStateInitLocked(struct OBJGPU *pGpu, struct OBJENGSTATE *pTmr) { + return tmrStateInitLocked(pGpu, (struct OBJTMR *)(((unsigned char *) pTmr) - NV_OFFSETOF(OBJTMR, __nvoc_base_OBJENGSTATE))); +} + +// tmrStateLoad: virtual override (engstate) base (engstate) +NV_STATUS __nvoc_down_thunk_OBJTMR_engstateStateLoad(struct OBJGPU *pGpu, struct OBJENGSTATE *pTmr, NvU32 arg3) { + return tmrStateLoad(pGpu, (struct OBJTMR *)(((unsigned char *) pTmr) - NV_OFFSETOF(OBJTMR, __nvoc_base_OBJENGSTATE)), arg3); +} + +// tmrStateUnload: virtual override (engstate) base (engstate) +NV_STATUS __nvoc_down_thunk_OBJTMR_engstateStateUnload(struct OBJGPU *pGpu, struct OBJENGSTATE *pTmr, NvU32 arg3) { + return tmrStateUnload(pGpu, (struct OBJTMR *)(((unsigned char *) pTmr) - NV_OFFSETOF(OBJTMR, __nvoc_base_OBJENGSTATE)), arg3); +} + +// tmrStateDestroy: virtual override (engstate) base (engstate) +void __nvoc_down_thunk_OBJTMR_engstateStateDestroy(struct OBJGPU *pGpu, struct OBJENGSTATE *pTmr) { + tmrStateDestroy(pGpu, (struct OBJTMR *)(((unsigned char *) pTmr) - NV_OFFSETOF(OBJTMR, __nvoc_base_OBJENGSTATE))); +} + + +// 8 up-thunk(s) defined to bridge methods in OBJTMR to superclasses + +// tmrInitMissing: virtual inherited (engstate) base (engstate) +void __nvoc_up_thunk_OBJENGSTATE_tmrInitMissing(struct OBJGPU *pGpu, struct OBJTMR *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(OBJTMR, __nvoc_base_OBJENGSTATE))); +} + +// tmrStatePreInitUnlocked: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_tmrStatePreInitUnlocked(struct OBJGPU *pGpu, struct OBJTMR *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(OBJTMR, __nvoc_base_OBJENGSTATE))); +} + +// tmrStateInitUnlocked: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_tmrStateInitUnlocked(struct OBJGPU *pGpu, struct OBJTMR *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(OBJTMR, __nvoc_base_OBJENGSTATE))); +} + +// tmrStatePreLoad: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_tmrStatePreLoad(struct OBJGPU *pGpu, struct OBJTMR *pEngstate, NvU32 arg3) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(OBJTMR, __nvoc_base_OBJENGSTATE)), arg3); +} + +// tmrStatePostLoad: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_tmrStatePostLoad(struct OBJGPU *pGpu, struct OBJTMR *pEngstate, NvU32 arg3) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(OBJTMR, __nvoc_base_OBJENGSTATE)), arg3); +} + +// tmrStatePreUnload: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_tmrStatePreUnload(struct OBJGPU *pGpu, struct OBJTMR *pEngstate, NvU32 arg3) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(OBJTMR, __nvoc_base_OBJENGSTATE)), arg3); +} + +// tmrStatePostUnload: virtual inherited (engstate) base (engstate) +NV_STATUS __nvoc_up_thunk_OBJENGSTATE_tmrStatePostUnload(struct OBJGPU *pGpu, struct OBJTMR *pEngstate, NvU32 arg3) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(OBJTMR, __nvoc_base_OBJENGSTATE)), arg3); +} + +// tmrIsPresent: virtual inherited (engstate) base (engstate) +NvBool __nvoc_up_thunk_OBJENGSTATE_tmrIsPresent(struct OBJGPU *pGpu, struct OBJTMR *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *) pEngstate) + NV_OFFSETOF(OBJTMR, __nvoc_base_OBJENGSTATE))); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJTMR = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_OBJTMR(OBJTMR *pThis) { + __nvoc_tmrDestruct(pThis); + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJTMR(OBJTMR *pThis, GpuHalspecOwner *pGpuhalspecowner, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pGpuhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pGpuhalspecowner); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + // NVOC Property Hal field -- PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS + // default + { + pThis->setProperty(pThis, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_TMR_ALARM_INTR_REMOVED_FROM_PMC_TREE + // default + { + pThis->setProperty(pThis, PDB_PROP_TMR_ALARM_INTR_REMOVED_FROM_PMC_TREE, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + pThis->setProperty(pThis, PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS, NV_TRUE); + } + pThis->setProperty(pThis, PDB_PROP_TMR_USE_PTIMER_FOR_OSTIMER_CALLBACKS, (0)); + pThis->setProperty(pThis, PDB_PROP_TMR_USE_POLLING_FOR_CALLBACKS, (0)); + + // NVOC Property Hal field -- PDB_PROP_TMR_USE_SECOND_COUNTDOWN_TIMER_FOR_SWRL + // default + { + pThis->setProperty(pThis, PDB_PROP_TMR_USE_SECOND_COUNTDOWN_TIMER_FOR_SWRL, NV_FALSE); + } + + // NVOC Property Hal field -- PDB_PROP_TMR_WAR_FOR_BUG_4679970_DEF + // default + { + pThis->setProperty(pThis, PDB_PROP_TMR_WAR_FOR_BUG_4679970_DEF, NV_FALSE); + } +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_OBJTMR(OBJTMR *pThis, GpuHalspecOwner *pGpuhalspecowner, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_OBJTMR_fail_OBJENGSTATE; + __nvoc_init_dataField_OBJTMR(pThis, pGpuhalspecowner, pRmhalspecowner); + goto __nvoc_ctor_OBJTMR_exit; // Success + +__nvoc_ctor_OBJTMR_fail_OBJENGSTATE: +__nvoc_ctor_OBJTMR_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_OBJTMR_1(OBJTMR *pThis, GpuHalspecOwner *pGpuhalspecowner, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pGpuhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pGpuhalspecowner); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} // End __nvoc_init_funcTable_OBJTMR_1 + + +// Initialize vtable(s) for 14 virtual method(s). +void __nvoc_init_funcTable_OBJTMR(OBJTMR *pThis, GpuHalspecOwner *pGpuhalspecowner, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_OBJTMR_1(pThis, pGpuhalspecowner, pRmhalspecowner); +} + +// Initialize newly constructed object. +void __nvoc_init__OBJTMR(OBJTMR *pThis, GpuHalspecOwner *pGpuhalspecowner, RmHalspecOwner *pRmhalspecowner) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; // (obj) super^2 + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; // (engstate) super + pThis->__nvoc_pbase_OBJTMR = pThis; // (tmr) this + + // Recurse to superclass initialization function(s). + __nvoc_init__OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__OBJTMR.metadata__OBJENGSTATE.metadata__Object; // (obj) super^2 + pThis->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr = &__nvoc_metadata__OBJTMR.metadata__OBJENGSTATE; // (engstate) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__OBJTMR; // (tmr) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_OBJTMR(pThis, pGpuhalspecowner, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_OBJTMR(OBJTMR **ppThis, Dynamic *pParent, NvU32 createFlags) +{ + NV_STATUS status; + Object *pParentObj = NULL; + OBJTMR *pThis; + GpuHalspecOwner *pGpuhalspecowner; + RmHalspecOwner *pRmhalspecowner; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(OBJTMR), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(OBJTMR)); + + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.createFlags = createFlags; + + // pParent must be a valid object that derives from a halspec owner class. + NV_ASSERT_OR_RETURN(pParent != NULL, NV_ERR_INVALID_ARGUMENT); + + // Link the child into the parent unless flagged not to do so. + if (!(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pGpuhalspecowner = dynamicCast(pParent, GpuHalspecOwner)) == NULL) + pGpuhalspecowner = objFindAncestorOfType(GpuHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pGpuhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init__OBJTMR(pThis, pGpuhalspecowner, pRmhalspecowner); + status = __nvoc_ctor_OBJTMR(pThis, pGpuhalspecowner, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_OBJTMR_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_OBJTMR_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(OBJTMR)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJTMR(OBJTMR **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJTMR(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_objtmr_nvoc.h b/src/nvidia/generated/g_objtmr_nvoc.h new file mode 100644 index 0000000..c1aad09 --- /dev/null +++ b/src/nvidia/generated/g_objtmr_nvoc.h @@ -0,0 +1,1271 @@ + +#ifndef _G_OBJTMR_NVOC_H_ +#define _G_OBJTMR_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_objtmr_nvoc.h" + +#ifndef _OBJTMR_H_ +#define _OBJTMR_H_ + +/*! + * @file + * @brief Defines and structures used for the Tmr Engine Object. + */ + +/* ------------------------ Includes --------------------------------------- */ +#include "core/core.h" +#include "gpu/eng_state.h" +#include "gpu/gpu.h" +#include "gpu/timer/tmr.h" +#include "lib/ref_count.h" +#include "os/os.h" +#include "nvoc/utility.h" + +/* ------------------------ Macros ----------------------------------------- */ +// +// Extent of the timer callback array +// +#define TMR_NUM_CALLBACKS_RM 96 +#define TMR_NUM_CALLBACKS_OS 36 + +// Callback scheduled without any explicit flags set. +#define TMR_FLAGS_NONE 0x00000000 +// +// Automatically reschedule the callback, so that it repeats. +// Otherwise, callback is scheduled for one-shot execution. +// +#define TMR_FLAG_RECUR NVBIT(0) +// +// Indicate that the implementation of the callback function will/can release +// a GPU semaphore. This allows fifoIdleChannels to query this information, +// and hence not bail out early if channels are blocked on semaphores that +// will in fact be released. +// !!NOTE: This is OBSOLETE, it should be moved directly to FIFO, where it's needed +// +#define TMR_FLAG_RELEASE_SEMAPHORE NVBIT(1) +#define TMR_FLAG_OS_TIMER_QUEUED NVBIT(2) +// +// Normally, it should not be necessary to use the TMR_FLAG_USE_OS_TIMER flag, +// because the OS timer is selected automatically by the default +// PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS setting. +// Note that the OS timer is not supported in all environments (such as GSP-RM). +// +#define TMR_FLAG_USE_OS_TIMER NVBIT(3) + +#define TMR_GET_GPU(p) ENG_GET_GPU(p) + +/* ------------------------ Function Redefinitions ------------------------- */ +#define tmrEventScheduleRelSec(pTmr, pEvent, RelTimeSec) tmrEventScheduleRel(pTmr, pEvent, (NvU64)(RelTimeSec) * 1000000000 ) + +/* ------------------------ Datatypes -------------------------------------- */ +TYPEDEF_BITVECTOR(MC_ENGINE_BITVECTOR); + +// +// Forward references for timer related structures +// +typedef struct DAYMSECTIME *PDAYMSECTIME; +typedef struct DAYMSECTIME DAYMSECTIME; + +// +// System time structure +// +struct DAYMSECTIME +{ + NvU32 days; + NvU32 msecs; + NvU32 valid; +}; + +/*! + * Callback wrapper memory type, used with interfacing all scheduling functions + * Reveals only partial representation of the event information. + * User Use only, internal code will not change them. + */ +struct TMR_EVENT +{ + TIMEPROC pTimeProc; //__nvoc_pbase_OBJTMR) + +#ifdef __nvoc_objtmr_h_disabled +#define __dynamicCast_OBJTMR(pThis) ((OBJTMR*) NULL) +#else //__nvoc_objtmr_h_disabled +#define __dynamicCast_OBJTMR(pThis) \ + ((OBJTMR*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJTMR))) +#endif //__nvoc_objtmr_h_disabled + +// Property macros +#define PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS_BASE_CAST +#define PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS_BASE_NAME PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS +#define PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS_BASE_CAST +#define PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS_BASE_NAME PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS +#define PDB_PROP_TMR_USE_PTIMER_FOR_OSTIMER_CALLBACKS_BASE_CAST +#define PDB_PROP_TMR_USE_PTIMER_FOR_OSTIMER_CALLBACKS_BASE_NAME PDB_PROP_TMR_USE_PTIMER_FOR_OSTIMER_CALLBACKS +#define PDB_PROP_TMR_USE_SECOND_COUNTDOWN_TIMER_FOR_SWRL_BASE_CAST +#define PDB_PROP_TMR_USE_SECOND_COUNTDOWN_TIMER_FOR_SWRL_BASE_NAME PDB_PROP_TMR_USE_SECOND_COUNTDOWN_TIMER_FOR_SWRL +#define PDB_PROP_TMR_USE_POLLING_FOR_CALLBACKS_BASE_CAST +#define PDB_PROP_TMR_USE_POLLING_FOR_CALLBACKS_BASE_NAME PDB_PROP_TMR_USE_POLLING_FOR_CALLBACKS +#define PDB_PROP_TMR_WAR_FOR_BUG_4679970_DEF_BASE_CAST +#define PDB_PROP_TMR_WAR_FOR_BUG_4679970_DEF_BASE_NAME PDB_PROP_TMR_WAR_FOR_BUG_4679970_DEF +#define PDB_PROP_TMR_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_TMR_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING +#define PDB_PROP_TMR_ALARM_INTR_REMOVED_FROM_PMC_TREE_BASE_CAST +#define PDB_PROP_TMR_ALARM_INTR_REMOVED_FROM_PMC_TREE_BASE_NAME PDB_PROP_TMR_ALARM_INTR_REMOVED_FROM_PMC_TREE + +NV_STATUS __nvoc_objCreateDynamic_OBJTMR(OBJTMR**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJTMR(OBJTMR**, Dynamic*, NvU32); +#define __objCreate_OBJTMR(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJTMR((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros +#define tmrConstructEngine_FNPTR(pTmr) pTmr->__nvoc_metadata_ptr->vtable.__tmrConstructEngine__ +#define tmrConstructEngine(pGpu, pTmr, arg3) tmrConstructEngine_DISPATCH(pGpu, pTmr, arg3) +#define tmrStatePreInitLocked_FNPTR(pTmr) pTmr->__nvoc_metadata_ptr->vtable.__tmrStatePreInitLocked__ +#define tmrStatePreInitLocked(pGpu, pTmr) tmrStatePreInitLocked_DISPATCH(pGpu, pTmr) +#define tmrStateInitLocked_FNPTR(pTmr) pTmr->__nvoc_metadata_ptr->vtable.__tmrStateInitLocked__ +#define tmrStateInitLocked(pGpu, pTmr) tmrStateInitLocked_DISPATCH(pGpu, pTmr) +#define tmrStateLoad_FNPTR(pTmr) pTmr->__nvoc_metadata_ptr->vtable.__tmrStateLoad__ +#define tmrStateLoad(pGpu, pTmr, arg3) tmrStateLoad_DISPATCH(pGpu, pTmr, arg3) +#define tmrStateUnload_FNPTR(pTmr) pTmr->__nvoc_metadata_ptr->vtable.__tmrStateUnload__ +#define tmrStateUnload(pGpu, pTmr, arg3) tmrStateUnload_DISPATCH(pGpu, pTmr, arg3) +#define tmrStateDestroy_FNPTR(pTmr) pTmr->__nvoc_metadata_ptr->vtable.__tmrStateDestroy__ +#define tmrStateDestroy(pGpu, pTmr) tmrStateDestroy_DISPATCH(pGpu, pTmr) +#define tmrInitMissing_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateInitMissing__ +#define tmrInitMissing(pGpu, pEngstate) tmrInitMissing_DISPATCH(pGpu, pEngstate) +#define tmrStatePreInitUnlocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePreInitUnlocked__ +#define tmrStatePreInitUnlocked(pGpu, pEngstate) tmrStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define tmrStateInitUnlocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStateInitUnlocked__ +#define tmrStateInitUnlocked(pGpu, pEngstate) tmrStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define tmrStatePreLoad_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePreLoad__ +#define tmrStatePreLoad(pGpu, pEngstate, arg3) tmrStatePreLoad_DISPATCH(pGpu, pEngstate, arg3) +#define tmrStatePostLoad_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePostLoad__ +#define tmrStatePostLoad(pGpu, pEngstate, arg3) tmrStatePostLoad_DISPATCH(pGpu, pEngstate, arg3) +#define tmrStatePreUnload_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePreUnload__ +#define tmrStatePreUnload(pGpu, pEngstate, arg3) tmrStatePreUnload_DISPATCH(pGpu, pEngstate, arg3) +#define tmrStatePostUnload_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePostUnload__ +#define tmrStatePostUnload(pGpu, pEngstate, arg3) tmrStatePostUnload_DISPATCH(pGpu, pEngstate, arg3) +#define tmrIsPresent_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateIsPresent__ +#define tmrIsPresent(pGpu, pEngstate) tmrIsPresent_DISPATCH(pGpu, pEngstate) + +// Dispatch functions +static inline NV_STATUS tmrConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct OBJTMR *pTmr, ENGDESCRIPTOR arg3) { + return pTmr->__nvoc_metadata_ptr->vtable.__tmrConstructEngine__(pGpu, pTmr, arg3); +} + +static inline NV_STATUS tmrStatePreInitLocked_DISPATCH(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + return pTmr->__nvoc_metadata_ptr->vtable.__tmrStatePreInitLocked__(pGpu, pTmr); +} + +static inline NV_STATUS tmrStateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + return pTmr->__nvoc_metadata_ptr->vtable.__tmrStateInitLocked__(pGpu, pTmr); +} + +static inline NV_STATUS tmrStateLoad_DISPATCH(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 arg3) { + return pTmr->__nvoc_metadata_ptr->vtable.__tmrStateLoad__(pGpu, pTmr, arg3); +} + +static inline NV_STATUS tmrStateUnload_DISPATCH(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 arg3) { + return pTmr->__nvoc_metadata_ptr->vtable.__tmrStateUnload__(pGpu, pTmr, arg3); +} + +static inline void tmrStateDestroy_DISPATCH(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + pTmr->__nvoc_metadata_ptr->vtable.__tmrStateDestroy__(pGpu, pTmr); +} + +static inline void tmrInitMissing_DISPATCH(struct OBJGPU *pGpu, struct OBJTMR *pEngstate) { + pEngstate->__nvoc_metadata_ptr->vtable.__tmrInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS tmrStatePreInitUnlocked_DISPATCH(struct OBJGPU *pGpu, struct OBJTMR *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__tmrStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS tmrStateInitUnlocked_DISPATCH(struct OBJGPU *pGpu, struct OBJTMR *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__tmrStateInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS tmrStatePreLoad_DISPATCH(struct OBJGPU *pGpu, struct OBJTMR *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__tmrStatePreLoad__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS tmrStatePostLoad_DISPATCH(struct OBJGPU *pGpu, struct OBJTMR *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__tmrStatePostLoad__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS tmrStatePreUnload_DISPATCH(struct OBJGPU *pGpu, struct OBJTMR *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__tmrStatePreUnload__(pGpu, pEngstate, arg3); +} + +static inline NV_STATUS tmrStatePostUnload_DISPATCH(struct OBJGPU *pGpu, struct OBJTMR *pEngstate, NvU32 arg3) { + return pEngstate->__nvoc_metadata_ptr->vtable.__tmrStatePostUnload__(pGpu, pEngstate, arg3); +} + +static inline NvBool tmrIsPresent_DISPATCH(struct OBJGPU *pGpu, struct OBJTMR *pEngstate) { + return pEngstate->__nvoc_metadata_ptr->vtable.__tmrIsPresent__(pGpu, pEngstate); +} + +NV_STATUS tmrDelay_OSTIMER(struct OBJTMR *pTmr, NvU32 arg2); + + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrDelay(struct OBJTMR *pTmr, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrDelay(pTmr, arg2) tmrDelay_OSTIMER(pTmr, arg2) +#endif //__nvoc_objtmr_h_disabled + +#define tmrDelay_HAL(pTmr, arg2) tmrDelay(pTmr, arg2) + +static inline NV_STATUS tmrSetCurrentTime_46f6a7(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetCurrentTime(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetCurrentTime(pGpu, pTmr) tmrSetCurrentTime_46f6a7(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetCurrentTime_HAL(pGpu, pTmr) tmrSetCurrentTime(pGpu, pTmr) + +static inline NV_STATUS tmrSetAlarmIntrDisable_56cd7a(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + return NV_OK; +} + +static inline NV_STATUS tmrSetAlarmIntrDisable_46f6a7(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetAlarmIntrDisable(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetAlarmIntrDisable(pGpu, pTmr) tmrSetAlarmIntrDisable_56cd7a(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetAlarmIntrDisable_HAL(pGpu, pTmr) tmrSetAlarmIntrDisable(pGpu, pTmr) + +static inline NV_STATUS tmrSetAlarmIntrEnable_56cd7a(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + return NV_OK; +} + +static inline NV_STATUS tmrSetAlarmIntrEnable_46f6a7(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetAlarmIntrEnable(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetAlarmIntrEnable(pGpu, pTmr) tmrSetAlarmIntrEnable_56cd7a(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetAlarmIntrEnable_HAL(pGpu, pTmr) tmrSetAlarmIntrEnable(pGpu, pTmr) + +static inline NV_STATUS tmrSetAlarmIntrReset_56cd7a(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg3) { + return NV_OK; +} + +static inline NV_STATUS tmrSetAlarmIntrReset_46f6a7(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg3) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetAlarmIntrReset(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetAlarmIntrReset(pGpu, pTmr, arg3) tmrSetAlarmIntrReset_56cd7a(pGpu, pTmr, arg3) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetAlarmIntrReset_HAL(pGpu, pTmr, arg3) tmrSetAlarmIntrReset(pGpu, pTmr, arg3) + +static inline NV_STATUS tmrGetIntrStatus_cb5ce8(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 *pStatus, THREAD_STATE_NODE *arg4) { + *pStatus = 0; + return NV_OK; +} + +NV_STATUS tmrGetIntrStatus_OSTIMER(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 *pStatus, THREAD_STATE_NODE *arg4); + + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrGetIntrStatus(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 *pStatus, THREAD_STATE_NODE *arg4) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetIntrStatus(pGpu, pTmr, pStatus, arg4) tmrGetIntrStatus_cb5ce8(pGpu, pTmr, pStatus, arg4) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetIntrStatus_HAL(pGpu, pTmr, pStatus, arg4) tmrGetIntrStatus(pGpu, pTmr, pStatus, arg4) + +static inline NV_STATUS tmrGetCurrentTime_70fb36(struct OBJTMR *pTmr, NvU64 *pTime) { + *pTime = osGetTimestamp(); + return NV_OK; +} + +NV_STATUS tmrGetCurrentTime_IMPL(struct OBJTMR *pTmr, NvU64 *pTime); + + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrGetCurrentTime(struct OBJTMR *pTmr, NvU64 *pTime) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetCurrentTime(pTmr, pTime) tmrGetCurrentTime_IMPL(pTmr, pTime) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetCurrentTime_HAL(pTmr, pTime) tmrGetCurrentTime(pTmr, pTime) + +static inline NV_STATUS tmrGetCurrentTimeEx_70fb36(struct OBJTMR *pTmr, NvU64 *pTime, THREAD_STATE_NODE *arg3) { + *pTime = osGetTimestamp(); + return NV_OK; +} + +NV_STATUS tmrGetCurrentTimeEx_IMPL(struct OBJTMR *pTmr, NvU64 *pTime, THREAD_STATE_NODE *arg3); + + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrGetCurrentTimeEx(struct OBJTMR *pTmr, NvU64 *pTime, THREAD_STATE_NODE *arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetCurrentTimeEx(pTmr, pTime, arg3) tmrGetCurrentTimeEx_IMPL(pTmr, pTime, arg3) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetCurrentTimeEx_HAL(pTmr, pTime, arg3) tmrGetCurrentTimeEx(pTmr, pTime, arg3) + +static inline NvU32 tmrGetTimeLo_cf0499(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + return ((NvU32)(((NvU64)(osGetTimestamp())) & 4294967295U)); +} + +NvU32 tmrGetTimeLo_OSTIMER(struct OBJGPU *pGpu, struct OBJTMR *pTmr); + + +#ifdef __nvoc_objtmr_h_disabled +static inline NvU32 tmrGetTimeLo(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return 0; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetTimeLo(pGpu, pTmr) tmrGetTimeLo_OSTIMER(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetTimeLo_HAL(pGpu, pTmr) tmrGetTimeLo(pGpu, pTmr) + +static inline NvU64 tmrGetTime_fa6bbe(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + return osGetTimestamp(); +} + +NvU64 tmrGetTime_OSTIMER(struct OBJGPU *pGpu, struct OBJTMR *pTmr); + + +#ifdef __nvoc_objtmr_h_disabled +static inline NvU64 tmrGetTime(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return 0; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetTime(pGpu, pTmr) tmrGetTime_OSTIMER(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetTime_HAL(pGpu, pTmr) tmrGetTime(pGpu, pTmr) + +static inline NvU64 tmrGetTimeEx_fa6bbe(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg3) { + return osGetTimestamp(); +} + +NvU64 tmrGetTimeEx_OSTIMER(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg3); + + +#ifdef __nvoc_objtmr_h_disabled +static inline NvU64 tmrGetTimeEx(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return 0; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetTimeEx(pGpu, pTmr, arg3) tmrGetTimeEx_OSTIMER(pGpu, pTmr, arg3) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetTimeEx_HAL(pGpu, pTmr, arg3) tmrGetTimeEx(pGpu, pTmr, arg3) + +NvU32 tmrReadTimeLoReg_OSTIMER(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg3); + + +#ifdef __nvoc_objtmr_h_disabled +static inline NvU32 tmrReadTimeLoReg(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return 0; +} +#else //__nvoc_objtmr_h_disabled +#define tmrReadTimeLoReg(pGpu, pTmr, arg3) tmrReadTimeLoReg_OSTIMER(pGpu, pTmr, arg3) +#endif //__nvoc_objtmr_h_disabled + +#define tmrReadTimeLoReg_HAL(pGpu, pTmr, arg3) tmrReadTimeLoReg(pGpu, pTmr, arg3) + +NvU32 tmrReadTimeHiReg_OSTIMER(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg3); + + +#ifdef __nvoc_objtmr_h_disabled +static inline NvU32 tmrReadTimeHiReg(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return 0; +} +#else //__nvoc_objtmr_h_disabled +#define tmrReadTimeHiReg(pGpu, pTmr, arg3) tmrReadTimeHiReg_OSTIMER(pGpu, pTmr, arg3) +#endif //__nvoc_objtmr_h_disabled + +#define tmrReadTimeHiReg_HAL(pGpu, pTmr, arg3) tmrReadTimeHiReg(pGpu, pTmr, arg3) + +static inline NV_STATUS tmrGetGpuPtimerOffset_46f6a7(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 *arg3, NvU32 *arg4) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrGetGpuPtimerOffset(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 *arg3, NvU32 *arg4) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetGpuPtimerOffset(pGpu, pTmr, arg3, arg4) tmrGetGpuPtimerOffset_46f6a7(pGpu, pTmr, arg3, arg4) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetGpuPtimerOffset_HAL(pGpu, pTmr, arg3, arg4) tmrGetGpuPtimerOffset(pGpu, pTmr, arg3, arg4) + +static inline NvU64 tmrGetPtimerOffsetNs_4a4dee(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + return 0; +} + + +#ifdef __nvoc_objtmr_h_disabled +static inline NvU64 tmrGetPtimerOffsetNs(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return 0; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetPtimerOffsetNs(pGpu, pTmr) tmrGetPtimerOffsetNs_4a4dee(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetPtimerOffsetNs_HAL(pGpu, pTmr) tmrGetPtimerOffsetNs(pGpu, pTmr) + +static inline NV_STATUS tmrSetAlarm_56cd7a(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 alarm, THREAD_STATE_NODE *pThreadState) { + return NV_OK; +} + +static inline NV_STATUS tmrSetAlarm_46f6a7(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 alarm, THREAD_STATE_NODE *pThreadState) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetAlarm(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 alarm, THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetAlarm(pGpu, pTmr, alarm, pThreadState) tmrSetAlarm_56cd7a(pGpu, pTmr, alarm, pThreadState) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetAlarm_HAL(pGpu, pTmr, alarm, pThreadState) tmrSetAlarm(pGpu, pTmr, alarm, pThreadState) + +static inline NvBool tmrGetAlarmPending_3dd2c9(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg3) { + return NV_FALSE; +} + + +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrGetAlarmPending(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetAlarmPending(pGpu, pTmr, arg3) tmrGetAlarmPending_3dd2c9(pGpu, pTmr, arg3) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetAlarmPending_HAL(pGpu, pTmr, arg3) tmrGetAlarmPending(pGpu, pTmr, arg3) + +static inline NV_STATUS tmrSetCountdownIntrDisable_46f6a7(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetCountdownIntrDisable(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetCountdownIntrDisable(pGpu, pTmr) tmrSetCountdownIntrDisable_46f6a7(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetCountdownIntrDisable_HAL(pGpu, pTmr) tmrSetCountdownIntrDisable(pGpu, pTmr) + +static inline NV_STATUS tmrSetCountdownIntrEnable_46f6a7(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetCountdownIntrEnable(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetCountdownIntrEnable(pGpu, pTmr) tmrSetCountdownIntrEnable_46f6a7(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetCountdownIntrEnable_HAL(pGpu, pTmr) tmrSetCountdownIntrEnable(pGpu, pTmr) + +static inline NV_STATUS tmrSetCountdownIntrReset_46f6a7(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg3) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetCountdownIntrReset(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetCountdownIntrReset(pGpu, pTmr, arg3) tmrSetCountdownIntrReset_46f6a7(pGpu, pTmr, arg3) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetCountdownIntrReset_HAL(pGpu, pTmr, arg3) tmrSetCountdownIntrReset(pGpu, pTmr, arg3) + +static inline NvBool tmrGetCountdownPending_3dd2c9(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg3) { + return NV_FALSE; +} + + +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrGetCountdownPending(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetCountdownPending(pGpu, pTmr, arg3) tmrGetCountdownPending_3dd2c9(pGpu, pTmr, arg3) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetCountdownPending_HAL(pGpu, pTmr, arg3) tmrGetCountdownPending(pGpu, pTmr, arg3) + +static inline NV_STATUS tmrSetCountdown_46f6a7(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 arg3, NvU32 arg4, THREAD_STATE_NODE *arg5) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetCountdown(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 arg3, NvU32 arg4, THREAD_STATE_NODE *arg5) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetCountdown(pGpu, pTmr, arg3, arg4, arg5) tmrSetCountdown_46f6a7(pGpu, pTmr, arg3, arg4, arg5) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetCountdown_HAL(pGpu, pTmr, arg3, arg4, arg5) tmrSetCountdown(pGpu, pTmr, arg3, arg4, arg5) + +static inline NV_STATUS tmrGetTimerBar0MapInfo_46f6a7(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 *arg3, NvU32 *arg4) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrGetTimerBar0MapInfo(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 *arg3, NvU32 *arg4) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetTimerBar0MapInfo(pGpu, pTmr, arg3, arg4) tmrGetTimerBar0MapInfo_46f6a7(pGpu, pTmr, arg3, arg4) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetTimerBar0MapInfo_HAL(pGpu, pTmr, arg3, arg4) tmrGetTimerBar0MapInfo(pGpu, pTmr, arg3, arg4) + +static inline NV_STATUS tmrGrTickFreqChange_46f6a7(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvBool arg3) { + return NV_ERR_NOT_SUPPORTED; +} + + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrGrTickFreqChange(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvBool arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGrTickFreqChange(pGpu, pTmr, arg3) tmrGrTickFreqChange_46f6a7(pGpu, pTmr, arg3) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGrTickFreqChange_HAL(pGpu, pTmr, arg3) tmrGrTickFreqChange(pGpu, pTmr, arg3) + +static inline NvU32 tmrGetWallClkScaleFactor_4a4dee(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + return 0; +} + + +#ifdef __nvoc_objtmr_h_disabled +static inline NvU32 tmrGetWallClkScaleFactor(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return 0; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetWallClkScaleFactor(pGpu, pTmr) tmrGetWallClkScaleFactor_4a4dee(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetWallClkScaleFactor_HAL(pGpu, pTmr) tmrGetWallClkScaleFactor(pGpu, pTmr) + +NV_STATUS tmrGetGpuAndCpuTimestampPair_OSTIMER(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 *arg3, NvU64 *arg4); + + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrGetGpuAndCpuTimestampPair(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 *arg3, NvU64 *arg4) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetGpuAndCpuTimestampPair(pGpu, pTmr, arg3, arg4) tmrGetGpuAndCpuTimestampPair_OSTIMER(pGpu, pTmr, arg3, arg4) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetGpuAndCpuTimestampPair_HAL(pGpu, pTmr, arg3, arg4) tmrGetGpuAndCpuTimestampPair(pGpu, pTmr, arg3, arg4) + +static inline void tmrResetTimerRegistersForVF_b3696a(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 gfid) { + return; +} + + +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrResetTimerRegistersForVF(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrResetTimerRegistersForVF(pGpu, pTmr, gfid) tmrResetTimerRegistersForVF_b3696a(pGpu, pTmr, gfid) +#endif //__nvoc_objtmr_h_disabled + +#define tmrResetTimerRegistersForVF_HAL(pGpu, pTmr, gfid) tmrResetTimerRegistersForVF(pGpu, pTmr, gfid) + +NV_STATUS tmrEventCreateOSTimer_OSTIMER(struct OBJTMR *pTmr, struct TMR_EVENT *pEvent); + + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventCreateOSTimer(struct OBJTMR *pTmr, struct TMR_EVENT *pEvent) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventCreateOSTimer(pTmr, pEvent) tmrEventCreateOSTimer_OSTIMER(pTmr, pEvent) +#endif //__nvoc_objtmr_h_disabled + +#define tmrEventCreateOSTimer_HAL(pTmr, pEvent) tmrEventCreateOSTimer(pTmr, pEvent) + +NV_STATUS tmrEventScheduleRelOSTimer_OSTIMER(struct OBJTMR *pTmr, struct TMR_EVENT *pEvent, NvU64 timeRelNs); + + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventScheduleRelOSTimer(struct OBJTMR *pTmr, struct TMR_EVENT *pEvent, NvU64 timeRelNs) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventScheduleRelOSTimer(pTmr, pEvent, timeRelNs) tmrEventScheduleRelOSTimer_OSTIMER(pTmr, pEvent, timeRelNs) +#endif //__nvoc_objtmr_h_disabled + +#define tmrEventScheduleRelOSTimer_HAL(pTmr, pEvent, timeRelNs) tmrEventScheduleRelOSTimer(pTmr, pEvent, timeRelNs) + +NV_STATUS tmrEventServiceOSTimerCallback_OSTIMER(struct OBJGPU *pGpu, struct OBJTMR *pTmr, struct TMR_EVENT *pEvent); + + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventServiceOSTimerCallback(struct OBJGPU *pGpu, struct OBJTMR *pTmr, struct TMR_EVENT *pEvent) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventServiceOSTimerCallback(pGpu, pTmr, pEvent) tmrEventServiceOSTimerCallback_OSTIMER(pGpu, pTmr, pEvent) +#endif //__nvoc_objtmr_h_disabled + +#define tmrEventServiceOSTimerCallback_HAL(pGpu, pTmr, pEvent) tmrEventServiceOSTimerCallback(pGpu, pTmr, pEvent) + +NV_STATUS tmrEventCancelOSTimer_OSTIMER(struct OBJTMR *pTmr, struct TMR_EVENT *pEvent); + + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventCancelOSTimer(struct OBJTMR *pTmr, struct TMR_EVENT *pEvent) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventCancelOSTimer(pTmr, pEvent) tmrEventCancelOSTimer_OSTIMER(pTmr, pEvent) +#endif //__nvoc_objtmr_h_disabled + +#define tmrEventCancelOSTimer_HAL(pTmr, pEvent) tmrEventCancelOSTimer(pTmr, pEvent) + +NV_STATUS tmrEventDestroyOSTimer_OSTIMER(struct OBJTMR *pTmr, struct TMR_EVENT *pEvent); + + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventDestroyOSTimer(struct OBJTMR *pTmr, struct TMR_EVENT *pEvent) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventDestroyOSTimer(pTmr, pEvent) tmrEventDestroyOSTimer_OSTIMER(pTmr, pEvent) +#endif //__nvoc_objtmr_h_disabled + +#define tmrEventDestroyOSTimer_HAL(pTmr, pEvent) tmrEventDestroyOSTimer(pTmr, pEvent) + +NV_STATUS tmrConstructEngine_IMPL(struct OBJGPU *pGpu, struct OBJTMR *pTmr, ENGDESCRIPTOR arg3); + +NV_STATUS tmrStatePreInitLocked_IMPL(struct OBJGPU *pGpu, struct OBJTMR *pTmr); + +NV_STATUS tmrStateInitLocked_IMPL(struct OBJGPU *pGpu, struct OBJTMR *pTmr); + +NV_STATUS tmrStateLoad_IMPL(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 arg3); + +NV_STATUS tmrStateUnload_IMPL(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 arg3); + +void tmrStateDestroy_IMPL(struct OBJGPU *pGpu, struct OBJTMR *pTmr); + +static inline NvBool tmrServiceSwrlCallbacksPmcTree(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg3) { + return NV_FALSE; +} + +static inline NvBool tmrClearSwrlCallbacksSemaphore(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg3) { + return NV_FALSE; +} + +static inline void tmrServiceSwrlCallbacks(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg3) { + return; +} + +static inline NvBool tmrServiceSwrlWrapper(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg3) { + return NV_FALSE; +} + +void tmrDestruct_IMPL(struct OBJTMR *pTmr); + +#define __nvoc_tmrDestruct(pTmr) tmrDestruct_IMPL(pTmr) +NV_STATUS tmrEventCreate_IMPL(struct OBJTMR *pTmr, struct TMR_EVENT **ppEvent, TIMEPROC callbackFn, void *pUserData, NvU32 flags); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventCreate(struct OBJTMR *pTmr, struct TMR_EVENT **ppEvent, TIMEPROC callbackFn, void *pUserData, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventCreate(pTmr, ppEvent, callbackFn, pUserData, flags) tmrEventCreate_IMPL(pTmr, ppEvent, callbackFn, pUserData, flags) +#endif //__nvoc_objtmr_h_disabled + +void tmrEventCancel_IMPL(struct OBJTMR *pTmr, struct TMR_EVENT *pEvent); + +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrEventCancel(struct OBJTMR *pTmr, struct TMR_EVENT *pEvent) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventCancel(pTmr, pEvent) tmrEventCancel_IMPL(pTmr, pEvent) +#endif //__nvoc_objtmr_h_disabled + +void tmrEventDestroy_IMPL(struct OBJTMR *pTmr, struct TMR_EVENT *pEvent); + +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrEventDestroy(struct OBJTMR *pTmr, struct TMR_EVENT *pEvent) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventDestroy(pTmr, pEvent) tmrEventDestroy_IMPL(pTmr, pEvent) +#endif //__nvoc_objtmr_h_disabled + +void tmrInitCallbacks_IMPL(struct OBJTMR *pTmr); + +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrInitCallbacks(struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrInitCallbacks(pTmr) tmrInitCallbacks_IMPL(pTmr) +#endif //__nvoc_objtmr_h_disabled + +void tmrSetCountdownCallback_IMPL(struct OBJTMR *pTmr, TIMEPROC_COUNTDOWN arg2); + +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrSetCountdownCallback(struct OBJTMR *pTmr, TIMEPROC_COUNTDOWN arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetCountdownCallback(pTmr, arg2) tmrSetCountdownCallback_IMPL(pTmr, arg2) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrCancelCallback_IMPL(struct OBJTMR *pTmr, void *pObject); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrCancelCallback(struct OBJTMR *pTmr, void *pObject) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrCancelCallback(pTmr, pObject) tmrCancelCallback_IMPL(pTmr, pObject) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrGetCurrentDiffTime_IMPL(struct OBJTMR *pTmr, NvU64 arg2, NvU64 *arg3); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrGetCurrentDiffTime(struct OBJTMR *pTmr, NvU64 arg2, NvU64 *arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetCurrentDiffTime(pTmr, arg2, arg3) tmrGetCurrentDiffTime_IMPL(pTmr, arg2, arg3) +#endif //__nvoc_objtmr_h_disabled + +void tmrGetSystemTime_IMPL(struct OBJTMR *pTmr, PDAYMSECTIME pTime); + +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrGetSystemTime(struct OBJTMR *pTmr, PDAYMSECTIME pTime) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetSystemTime(pTmr, pTime) tmrGetSystemTime_IMPL(pTmr, pTime) +#endif //__nvoc_objtmr_h_disabled + +NvBool tmrCheckCallbacksReleaseSem_IMPL(struct OBJTMR *pTmr, NvU32 chId); + +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrCheckCallbacksReleaseSem(struct OBJTMR *pTmr, NvU32 chId) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrCheckCallbacksReleaseSem(pTmr, chId) tmrCheckCallbacksReleaseSem_IMPL(pTmr, chId) +#endif //__nvoc_objtmr_h_disabled + +NvBool tmrDiffExceedsTime_IMPL(struct OBJTMR *pTmr, PDAYMSECTIME pFutureTime, PDAYMSECTIME pPastTime, NvU32 time); + +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrDiffExceedsTime(struct OBJTMR *pTmr, PDAYMSECTIME pFutureTime, PDAYMSECTIME pPastTime, NvU32 time) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrDiffExceedsTime(pTmr, pFutureTime, pPastTime, time) tmrDiffExceedsTime_IMPL(pTmr, pFutureTime, pPastTime, time) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrEventScheduleAbs_IMPL(struct OBJTMR *pTmr, struct TMR_EVENT *pEvent, NvU64 timeAbs); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventScheduleAbs(struct OBJTMR *pTmr, struct TMR_EVENT *pEvent, NvU64 timeAbs) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventScheduleAbs(pTmr, pEvent, timeAbs) tmrEventScheduleAbs_IMPL(pTmr, pEvent, timeAbs) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrScheduleCallbackAbs_IMPL(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg2, void *arg3, NvU64 arg4, NvU32 arg5, NvU32 arg6); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrScheduleCallbackAbs(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg2, void *arg3, NvU64 arg4, NvU32 arg5, NvU32 arg6) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrScheduleCallbackAbs(pTmr, arg2, arg3, arg4, arg5, arg6) tmrScheduleCallbackAbs_IMPL(pTmr, arg2, arg3, arg4, arg5, arg6) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrEventScheduleRel_IMPL(struct OBJTMR *pTmr, struct TMR_EVENT *pEvent, NvU64 timeRel); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventScheduleRel(struct OBJTMR *pTmr, struct TMR_EVENT *pEvent, NvU64 timeRel) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventScheduleRel(pTmr, pEvent, timeRel) tmrEventScheduleRel_IMPL(pTmr, pEvent, timeRel) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrScheduleCallbackRel_IMPL(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg2, void *arg3, NvU64 arg4, NvU32 arg5, NvU32 arg6); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrScheduleCallbackRel(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg2, void *arg3, NvU64 arg4, NvU32 arg5, NvU32 arg6) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrScheduleCallbackRel(pTmr, arg2, arg3, arg4, arg5, arg6) tmrScheduleCallbackRel_IMPL(pTmr, arg2, arg3, arg4, arg5, arg6) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrScheduleCallbackRelSec_IMPL(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg2, void *arg3, NvU32 arg4, NvU32 arg5, NvU32 arg6); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrScheduleCallbackRelSec(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg2, void *arg3, NvU32 arg4, NvU32 arg5, NvU32 arg6) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrScheduleCallbackRelSec(pTmr, arg2, arg3, arg4, arg5, arg6) tmrScheduleCallbackRelSec_IMPL(pTmr, arg2, arg3, arg4, arg5, arg6) +#endif //__nvoc_objtmr_h_disabled + +NvBool tmrEventOnList_IMPL(struct OBJTMR *pTmr, struct TMR_EVENT *pEvent); + +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrEventOnList(struct OBJTMR *pTmr, struct TMR_EVENT *pEvent) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventOnList(pTmr, pEvent) tmrEventOnList_IMPL(pTmr, pEvent) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrEventServiceTimer_IMPL(struct OBJGPU *pGpu, struct OBJTMR *pTmr, struct TMR_EVENT *pEvent); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventServiceTimer(struct OBJGPU *pGpu, struct OBJTMR *pTmr, struct TMR_EVENT *pEvent) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventServiceTimer(pGpu, pTmr, pEvent) tmrEventServiceTimer_IMPL(pGpu, pTmr, pEvent) +#endif //__nvoc_objtmr_h_disabled + +NvBool tmrCallbackOnList_IMPL(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg2, void *arg3); + +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrCallbackOnList(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg2, void *arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrCallbackOnList(pTmr, arg2, arg3) tmrCallbackOnList_IMPL(pTmr, arg2, arg3) +#endif //__nvoc_objtmr_h_disabled + +void tmrRmCallbackIntrEnable_IMPL(struct OBJTMR *pTmr, struct OBJGPU *pGpu); + +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrRmCallbackIntrEnable(struct OBJTMR *pTmr, struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrRmCallbackIntrEnable(pTmr, pGpu) tmrRmCallbackIntrEnable_IMPL(pTmr, pGpu) +#endif //__nvoc_objtmr_h_disabled + +void tmrRmCallbackIntrDisable_IMPL(struct OBJTMR *pTmr, struct OBJGPU *pGpu); + +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrRmCallbackIntrDisable(struct OBJTMR *pTmr, struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrRmCallbackIntrDisable(pTmr, pGpu) tmrRmCallbackIntrDisable_IMPL(pTmr, pGpu) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrTimeUntilNextCallback_IMPL(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 *pTimeUntilCallbackNs); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrTimeUntilNextCallback(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 *pTimeUntilCallbackNs) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrTimeUntilNextCallback(pGpu, pTmr, pTimeUntilCallbackNs) tmrTimeUntilNextCallback_IMPL(pGpu, pTmr, pTimeUntilCallbackNs) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrEventTimeUntilNextCallback_IMPL(struct OBJTMR *pTmr, struct TMR_EVENT *pEvent, NvU64 *pTimeUntilCallbackNs); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventTimeUntilNextCallback(struct OBJTMR *pTmr, struct TMR_EVENT *pEvent, NvU64 *pTimeUntilCallbackNs) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventTimeUntilNextCallback(pTmr, pEvent, pTimeUntilCallbackNs) tmrEventTimeUntilNextCallback_IMPL(pTmr, pEvent, pTimeUntilCallbackNs) +#endif //__nvoc_objtmr_h_disabled + +NvBool tmrCallExpiredCallbacks_IMPL(struct OBJGPU *pGpu, struct OBJTMR *pTmr); + +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrCallExpiredCallbacks(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrCallExpiredCallbacks(pGpu, pTmr) tmrCallExpiredCallbacks_IMPL(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +void tmrResetCallbackInterrupt_IMPL(struct OBJGPU *pGpu, struct OBJTMR *pTmr); + +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrResetCallbackInterrupt(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrResetCallbackInterrupt(pGpu, pTmr) tmrResetCallbackInterrupt_IMPL(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +NvBool tmrGetCallbackInterruptPending_IMPL(struct OBJGPU *pGpu, struct OBJTMR *pTmr); + +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrGetCallbackInterruptPending(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetCallbackInterruptPending(pGpu, pTmr) tmrGetCallbackInterruptPending_IMPL(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#undef PRIVATE_FIELD + +#ifndef NVOC_OBJTMR_H_PRIVATE_ACCESS_ALLOWED +#ifndef __nvoc_objtmr_h_disabled +#undef tmrReadTimeLoReg +NvU32 NVOC_PRIVATE_FUNCTION(tmrReadTimeLoReg)(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg3); +#endif //__nvoc_objtmr_h_disabled + +#ifndef __nvoc_objtmr_h_disabled +#undef tmrReadTimeHiReg +NvU32 NVOC_PRIVATE_FUNCTION(tmrReadTimeHiReg)(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg3); +#endif //__nvoc_objtmr_h_disabled + +#ifndef __nvoc_objtmr_h_disabled +#undef tmrGetGpuPtimerOffset +NV_STATUS NVOC_PRIVATE_FUNCTION(tmrGetGpuPtimerOffset)(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 *arg3, NvU32 *arg4); +#endif //__nvoc_objtmr_h_disabled + +#ifndef __nvoc_objtmr_h_disabled +#undef tmrGetPtimerOffsetNs +NvU64 NVOC_PRIVATE_FUNCTION(tmrGetPtimerOffsetNs)(struct OBJGPU *pGpu, struct OBJTMR *pTmr); +#endif //__nvoc_objtmr_h_disabled + +#endif // NVOC_OBJTMR_H_PRIVATE_ACCESS_ALLOWED + + +NV_STATUS tmrCtrlCmdEventCreate(struct OBJGPU *pGpu, TMR_EVENT_SET_PARAMS *pParams); +NV_STATUS tmrCtrlCmdEventSchedule(struct OBJGPU *pGpu, TMR_EVENT_SCHEDULE_PARAMS *pParams); +NV_STATUS tmrCtrlCmdEventCancel(struct OBJGPU *pGpu, TMR_EVENT_GENERAL_PARAMS *pParams); +NV_STATUS tmrCtrlCmdEventDestroy(struct OBJGPU *pGpu, TMR_EVENT_GENERAL_PARAMS *pParams); + +#endif // _OBJTMR_H_ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_OBJTMR_NVOC_H_ diff --git a/src/nvidia/generated/g_odb.h b/src/nvidia/generated/g_odb.h new file mode 100644 index 0000000..dc0f919 --- /dev/null +++ b/src/nvidia/generated/g_odb.h @@ -0,0 +1,60 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Profile: devel-soc-disp-dce-client +// Template: templates/gt_odb.h +// + +#ifndef _G_ODB_H_ +#define _G_ODB_H_ + +#define OBJECT_BASE_DEFINITION(ENG) __##ENG##_OBJECT_BASE_DEFINITION + +#ifndef __NVOC_CLASS_OBJRPC_TYPEDEF__ +#define __NVOC_CLASS_OBJRPC_TYPEDEF__ +typedef struct OBJRPC OBJRPC; +#endif /* __NVOC_CLASS_OBJRPC_TYPEDEF__ */ +typedef struct OBJRPC *POBJRPC; + +#ifndef __NVOC_CLASS_OBJRPCSTRUCTURECOPY_TYPEDEF__ +#define __NVOC_CLASS_OBJRPCSTRUCTURECOPY_TYPEDEF__ +typedef struct OBJRPCSTRUCTURECOPY OBJRPCSTRUCTURECOPY; +#endif /* __NVOC_CLASS_OBJRPCSTRUCTURECOPY_TYPEDEF__ */ +typedef struct OBJRPCSTRUCTURECOPY *POBJRPCSTRUCTURECOPY; + + + +#if NV_PRINTF_STRINGS_ALLOWED +#define odbGetClassName(p) (objGetClassInfo((p))->name) +#endif + +// TODO : temporary hack, to delete +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ +typedef struct OBJGPU *POBJGPU; + +#ifndef __NVOC_CLASS_OBJDISP_TYPEDEF__ +#define __NVOC_CLASS_OBJDISP_TYPEDEF__ +typedef struct OBJDISP OBJDISP; +#endif /* __NVOC_CLASS_OBJDISP_TYPEDEF__ */ +typedef struct OBJDISP *POBJDISP; + +// +// #define staticCast(pObj, TYPE) ((pObj)? __staticCast_##TYPE((pObj)) : NULL) +// +#define __staticCast_OBJRPC(pObj) ((pObj)->__iom_pbase_OBJRPC) +#define __staticCast_OBJRPCSTRUCTURECOPY(pObj) ((pObj)->__iom_pbase_OBJRPCSTRUCTURECOPY) + + +// +// #define dynamicCast(pObj, TYPE) (__dynamicCast_##TYPE((pObj))) +// +#define __dynamicCast_OBJRPC(pObj) NULL +#define __dynamicCast_OBJRPCSTRUCTURECOPY(pObj) NULL + + + + + +#endif // _G_ODB_H_ diff --git a/src/nvidia/generated/g_os_desc_mem_nvoc.c b/src/nvidia/generated/g_os_desc_mem_nvoc.c new file mode 100644 index 0000000..a8c6c94 --- /dev/null +++ b/src/nvidia/generated/g_os_desc_mem_nvoc.c @@ -0,0 +1,527 @@ +#define NVOC_OS_DESC_MEM_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_os_desc_mem_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xb3dacd = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OsDescMemory; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +// Forward declarations for OsDescMemory +void __nvoc_init__Memory(Memory*); +void __nvoc_init__OsDescMemory(OsDescMemory*); +void __nvoc_init_funcTable_OsDescMemory(OsDescMemory*); +NV_STATUS __nvoc_ctor_OsDescMemory(OsDescMemory*, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_OsDescMemory(OsDescMemory*); +void __nvoc_dtor_OsDescMemory(OsDescMemory*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__OsDescMemory; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__OsDescMemory; + +// Down-thunk(s) to bridge OsDescMemory methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^2 +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_Memory_resIsDuplicate(struct RsResource *pMemory, NvHandle hMemory, NvBool *pDuplicate); // super +NV_STATUS __nvoc_down_thunk_Memory_resControl(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_Memory_resMap(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_down_thunk_Memory_resUnmap(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_down_thunk_Memory_rmresGetMemInterMapParams(struct RmResource *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super +NV_STATUS __nvoc_down_thunk_Memory_rmresCheckMemInterUnmap(struct RmResource *pMemory, NvBool bSubdeviceHandleProvided); // super +NV_STATUS __nvoc_down_thunk_Memory_rmresGetMemoryMappingDescriptor(struct RmResource *pMemory, MEMORY_DESCRIPTOR **ppMemDesc); // super +NvBool __nvoc_down_thunk_OsDescMemory_resCanCopy(struct RsResource *pOsDescMemory); // this + +// Up-thunk(s) to bridge OsDescMemory methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^2 +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^2 +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^2 +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super^2 +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super^2 +NvBool __nvoc_up_thunk_RmResource_memAccessCallback(struct Memory *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NvBool __nvoc_up_thunk_RmResource_memShareCallback(struct Memory *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_up_thunk_RmResource_memControlSerialization_Prologue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_memControlSerialization_Epilogue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_memControl_Prologue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_memControl_Epilogue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_memCanCopy(struct Memory *pResource); // super +void __nvoc_up_thunk_RsResource_memPreDestruct(struct Memory *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_memControlFilter(struct Memory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_memIsPartialUnmapSupported(struct Memory *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_memMapTo(struct Memory *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_memUnmapFrom(struct Memory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_memGetRefCount(struct Memory *pResource); // super +void __nvoc_up_thunk_RsResource_memAddAdditionalDependants(struct RsClient *pClient, struct Memory *pResource, RsResourceRef *pReference); // super +NV_STATUS __nvoc_up_thunk_Memory_osdescIsDuplicate(struct OsDescMemory *pMemory, NvHandle hMemory, NvBool *pDuplicate); // this +NV_STATUS __nvoc_up_thunk_Memory_osdescGetMapAddrSpace(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); // this +NV_STATUS __nvoc_up_thunk_Memory_osdescControl(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_Memory_osdescMap(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_Memory_osdescUnmap(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_Memory_osdescGetMemInterMapParams(struct OsDescMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_Memory_osdescCheckMemInterUnmap(struct OsDescMemory *pMemory, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_Memory_osdescGetMemoryMappingDescriptor(struct OsDescMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_Memory_osdescCheckCopyPermissions(struct OsDescMemory *pMemory, struct OBJGPU *pDstGpu, struct Device *pDstDevice); // this +NV_STATUS __nvoc_up_thunk_Memory_osdescIsReady(struct OsDescMemory *pMemory, NvBool bCopyConstructorContext); // this +NvBool __nvoc_up_thunk_Memory_osdescIsGpuMapAllowed(struct OsDescMemory *pMemory, struct OBJGPU *pGpu); // this +NvBool __nvoc_up_thunk_Memory_osdescIsExportAllowed(struct OsDescMemory *pMemory); // this +NvBool __nvoc_up_thunk_RmResource_osdescAccessCallback(struct OsDescMemory *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NvBool __nvoc_up_thunk_RmResource_osdescShareCallback(struct OsDescMemory *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_RmResource_osdescControlSerialization_Prologue(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_osdescControlSerialization_Epilogue(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_osdescControl_Prologue(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_osdescControl_Epilogue(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RsResource_osdescPreDestruct(struct OsDescMemory *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_osdescControlFilter(struct OsDescMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_osdescIsPartialUnmapSupported(struct OsDescMemory *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_osdescMapTo(struct OsDescMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_osdescUnmapFrom(struct OsDescMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_osdescGetRefCount(struct OsDescMemory *pResource); // this +void __nvoc_up_thunk_RsResource_osdescAddAdditionalDependants(struct RsClient *pClient, struct OsDescMemory *pResource, RsResourceRef *pReference); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_OsDescMemory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OsDescMemory), + /*classId=*/ classId(OsDescMemory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OsDescMemory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OsDescMemory, + /*pCastInfo=*/ &__nvoc_castinfo__OsDescMemory, + /*pExportInfo=*/ &__nvoc_export_info__OsDescMemory +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__OsDescMemory __nvoc_metadata__OsDescMemory = { + .rtti.pClassDef = &__nvoc_class_def_OsDescMemory, // (osdesc) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OsDescMemory, + .rtti.offset = 0, + .metadata__Memory.rtti.pClassDef = &__nvoc_class_def_Memory, // (mem) super + .metadata__Memory.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Memory.rtti.offset = NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory), + .metadata__Memory.metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super^2 + .metadata__Memory.metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Memory.metadata__RmResource.rtti.offset = NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource), + .metadata__Memory.metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^3 + .metadata__Memory.metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Memory.metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^4 + .metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__Memory.metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^3 + .metadata__Memory.metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Memory.metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + + .vtable.__osdescCanCopy__ = &osdescCanCopy_IMPL, // virtual override (res) base (mem) + .metadata__Memory.vtable.__memCanCopy__ = &__nvoc_up_thunk_RsResource_memCanCopy, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &__nvoc_down_thunk_OsDescMemory_resCanCopy, // virtual + .vtable.__osdescIsDuplicate__ = &__nvoc_up_thunk_Memory_osdescIsDuplicate, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memIsDuplicate__ = &memIsDuplicate_IMPL, // virtual override (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &__nvoc_down_thunk_Memory_resIsDuplicate, // virtual + .vtable.__osdescGetMapAddrSpace__ = &__nvoc_up_thunk_Memory_osdescGetMapAddrSpace, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memGetMapAddrSpace__ = &memGetMapAddrSpace_IMPL, // virtual + .vtable.__osdescControl__ = &__nvoc_up_thunk_Memory_osdescControl, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memControl__ = &memControl_IMPL, // virtual override (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_Memory_resControl, // virtual + .vtable.__osdescMap__ = &__nvoc_up_thunk_Memory_osdescMap, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memMap__ = &memMap_IMPL, // virtual override (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &__nvoc_down_thunk_Memory_resMap, // virtual + .vtable.__osdescUnmap__ = &__nvoc_up_thunk_Memory_osdescUnmap, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memUnmap__ = &memUnmap_IMPL, // virtual override (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &__nvoc_down_thunk_Memory_resUnmap, // virtual + .vtable.__osdescGetMemInterMapParams__ = &__nvoc_up_thunk_Memory_osdescGetMemInterMapParams, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memGetMemInterMapParams__ = &memGetMemInterMapParams_IMPL, // virtual override (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &__nvoc_down_thunk_Memory_rmresGetMemInterMapParams, // virtual + .vtable.__osdescCheckMemInterUnmap__ = &__nvoc_up_thunk_Memory_osdescCheckMemInterUnmap, // inline virtual inherited (mem) base (mem) body + .metadata__Memory.vtable.__memCheckMemInterUnmap__ = &memCheckMemInterUnmap_ac1694, // inline virtual override (rmres) base (rmres) body + .metadata__Memory.metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &__nvoc_down_thunk_Memory_rmresCheckMemInterUnmap, // virtual + .vtable.__osdescGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_Memory_osdescGetMemoryMappingDescriptor, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memGetMemoryMappingDescriptor__ = &memGetMemoryMappingDescriptor_IMPL, // virtual override (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &__nvoc_down_thunk_Memory_rmresGetMemoryMappingDescriptor, // virtual + .vtable.__osdescCheckCopyPermissions__ = &__nvoc_up_thunk_Memory_osdescCheckCopyPermissions, // inline virtual inherited (mem) base (mem) body + .metadata__Memory.vtable.__memCheckCopyPermissions__ = &memCheckCopyPermissions_ac1694, // inline virtual body + .vtable.__osdescIsReady__ = &__nvoc_up_thunk_Memory_osdescIsReady, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memIsReady__ = &memIsReady_IMPL, // virtual + .vtable.__osdescIsGpuMapAllowed__ = &__nvoc_up_thunk_Memory_osdescIsGpuMapAllowed, // inline virtual inherited (mem) base (mem) body + .metadata__Memory.vtable.__memIsGpuMapAllowed__ = &memIsGpuMapAllowed_e661f0, // inline virtual body + .vtable.__osdescIsExportAllowed__ = &__nvoc_up_thunk_Memory_osdescIsExportAllowed, // inline virtual inherited (mem) base (mem) body + .metadata__Memory.vtable.__memIsExportAllowed__ = &memIsExportAllowed_e661f0, // inline virtual body + .vtable.__osdescAccessCallback__ = &__nvoc_up_thunk_RmResource_osdescAccessCallback, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memAccessCallback__ = &__nvoc_up_thunk_RmResource_memAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__osdescShareCallback__ = &__nvoc_up_thunk_RmResource_osdescShareCallback, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memShareCallback__ = &__nvoc_up_thunk_RmResource_memShareCallback, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresShareCallback__ = &rmresShareCallback_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__osdescControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_osdescControlSerialization_Prologue, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_memControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__osdescControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_osdescControlSerialization_Epilogue, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_memControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__osdescControl_Prologue__ = &__nvoc_up_thunk_RmResource_osdescControl_Prologue, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memControl_Prologue__ = &__nvoc_up_thunk_RmResource_memControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__osdescControl_Epilogue__ = &__nvoc_up_thunk_RmResource_osdescControl_Epilogue, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memControl_Epilogue__ = &__nvoc_up_thunk_RmResource_memControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__osdescPreDestruct__ = &__nvoc_up_thunk_RsResource_osdescPreDestruct, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memPreDestruct__ = &__nvoc_up_thunk_RsResource_memPreDestruct, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__osdescControlFilter__ = &__nvoc_up_thunk_RsResource_osdescControlFilter, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memControlFilter__ = &__nvoc_up_thunk_RsResource_memControlFilter, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__osdescIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_osdescIsPartialUnmapSupported, // inline virtual inherited (res) base (mem) body + .metadata__Memory.vtable.__memIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_memIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__Memory.metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__osdescMapTo__ = &__nvoc_up_thunk_RsResource_osdescMapTo, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memMapTo__ = &__nvoc_up_thunk_RsResource_memMapTo, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__osdescUnmapFrom__ = &__nvoc_up_thunk_RsResource_osdescUnmapFrom, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memUnmapFrom__ = &__nvoc_up_thunk_RsResource_memUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__osdescGetRefCount__ = &__nvoc_up_thunk_RsResource_osdescGetRefCount, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memGetRefCount__ = &__nvoc_up_thunk_RsResource_memGetRefCount, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__osdescAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_osdescAddAdditionalDependants, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_memAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__OsDescMemory = { + .numRelatives = 6, + .relatives = { + &__nvoc_metadata__OsDescMemory.rtti, // [0]: (osdesc) this + &__nvoc_metadata__OsDescMemory.metadata__Memory.rtti, // [1]: (mem) super + &__nvoc_metadata__OsDescMemory.metadata__Memory.metadata__RmResource.rtti, // [2]: (rmres) super^2 + &__nvoc_metadata__OsDescMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.rtti, // [3]: (res) super^3 + &__nvoc_metadata__OsDescMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [4]: (obj) super^4 + &__nvoc_metadata__OsDescMemory.metadata__Memory.metadata__RmResource.metadata__RmResourceCommon.rtti, // [5]: (rmrescmn) super^3 + } +}; + +// 1 down-thunk(s) defined to bridge methods in OsDescMemory from superclasses + +// osdescCanCopy: virtual override (res) base (mem) +NvBool __nvoc_down_thunk_OsDescMemory_resCanCopy(struct RsResource *pOsDescMemory) { + return osdescCanCopy((struct OsDescMemory *)(((unsigned char *) pOsDescMemory) - NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + + +// 25 up-thunk(s) defined to bridge methods in OsDescMemory to superclasses + +// osdescIsDuplicate: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_osdescIsDuplicate(struct OsDescMemory *pMemory, NvHandle hMemory, NvBool *pDuplicate) { + return memIsDuplicate((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory)), hMemory, pDuplicate); +} + +// osdescGetMapAddrSpace: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_osdescGetMapAddrSpace(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return memGetMapAddrSpace((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory)), pCallContext, mapFlags, pAddrSpace); +} + +// osdescControl: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_osdescControl(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory)), pCallContext, pParams); +} + +// osdescMap: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_osdescMap(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory)), pCallContext, pParams, pCpuMapping); +} + +// osdescUnmap: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_osdescUnmap(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory)), pCallContext, pCpuMapping); +} + +// osdescGetMemInterMapParams: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_osdescGetMemInterMapParams(struct OsDescMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory)), pParams); +} + +// osdescCheckMemInterUnmap: inline virtual inherited (mem) base (mem) body +NV_STATUS __nvoc_up_thunk_Memory_osdescCheckMemInterUnmap(struct OsDescMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory)), bSubdeviceHandleProvided); +} + +// osdescGetMemoryMappingDescriptor: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_osdescGetMemoryMappingDescriptor(struct OsDescMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory)), ppMemDesc); +} + +// osdescCheckCopyPermissions: inline virtual inherited (mem) base (mem) body +NV_STATUS __nvoc_up_thunk_Memory_osdescCheckCopyPermissions(struct OsDescMemory *pMemory, struct OBJGPU *pDstGpu, struct Device *pDstDevice) { + return memCheckCopyPermissions((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory)), pDstGpu, pDstDevice); +} + +// osdescIsReady: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_osdescIsReady(struct OsDescMemory *pMemory, NvBool bCopyConstructorContext) { + return memIsReady((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory)), bCopyConstructorContext); +} + +// osdescIsGpuMapAllowed: inline virtual inherited (mem) base (mem) body +NvBool __nvoc_up_thunk_Memory_osdescIsGpuMapAllowed(struct OsDescMemory *pMemory, struct OBJGPU *pGpu) { + return memIsGpuMapAllowed((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory)), pGpu); +} + +// osdescIsExportAllowed: inline virtual inherited (mem) base (mem) body +NvBool __nvoc_up_thunk_Memory_osdescIsExportAllowed(struct OsDescMemory *pMemory) { + return memIsExportAllowed((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory))); +} + +// osdescAccessCallback: virtual inherited (rmres) base (mem) +NvBool __nvoc_up_thunk_RmResource_osdescAccessCallback(struct OsDescMemory *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// osdescShareCallback: virtual inherited (rmres) base (mem) +NvBool __nvoc_up_thunk_RmResource_osdescShareCallback(struct OsDescMemory *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// osdescControlSerialization_Prologue: virtual inherited (rmres) base (mem) +NV_STATUS __nvoc_up_thunk_RmResource_osdescControlSerialization_Prologue(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// osdescControlSerialization_Epilogue: virtual inherited (rmres) base (mem) +void __nvoc_up_thunk_RmResource_osdescControlSerialization_Epilogue(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// osdescControl_Prologue: virtual inherited (rmres) base (mem) +NV_STATUS __nvoc_up_thunk_RmResource_osdescControl_Prologue(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// osdescControl_Epilogue: virtual inherited (rmres) base (mem) +void __nvoc_up_thunk_RmResource_osdescControl_Epilogue(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// osdescPreDestruct: virtual inherited (res) base (mem) +void __nvoc_up_thunk_RsResource_osdescPreDestruct(struct OsDescMemory *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// osdescControlFilter: virtual inherited (res) base (mem) +NV_STATUS __nvoc_up_thunk_RsResource_osdescControlFilter(struct OsDescMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// osdescIsPartialUnmapSupported: inline virtual inherited (res) base (mem) body +NvBool __nvoc_up_thunk_RsResource_osdescIsPartialUnmapSupported(struct OsDescMemory *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// osdescMapTo: virtual inherited (res) base (mem) +NV_STATUS __nvoc_up_thunk_RsResource_osdescMapTo(struct OsDescMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// osdescUnmapFrom: virtual inherited (res) base (mem) +NV_STATUS __nvoc_up_thunk_RsResource_osdescUnmapFrom(struct OsDescMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// osdescGetRefCount: virtual inherited (res) base (mem) +NvU32 __nvoc_up_thunk_RsResource_osdescGetRefCount(struct OsDescMemory *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// osdescAddAdditionalDependants: virtual inherited (res) base (mem) +void __nvoc_up_thunk_RsResource_osdescAddAdditionalDependants(struct RsClient *pClient, struct OsDescMemory *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__OsDescMemory = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Memory(Memory*); +void __nvoc_dtor_OsDescMemory(OsDescMemory *pThis) { + __nvoc_osdescDestruct(pThis); + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OsDescMemory(OsDescMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Memory(Memory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_OsDescMemory(OsDescMemory *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Memory(&pThis->__nvoc_base_Memory, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_OsDescMemory_fail_Memory; + __nvoc_init_dataField_OsDescMemory(pThis); + + status = __nvoc_osdescConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_OsDescMemory_fail__init; + goto __nvoc_ctor_OsDescMemory_exit; // Success + +__nvoc_ctor_OsDescMemory_fail__init: + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); +__nvoc_ctor_OsDescMemory_fail_Memory: +__nvoc_ctor_OsDescMemory_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_OsDescMemory_1(OsDescMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_OsDescMemory_1 + + +// Initialize vtable(s) for 26 virtual method(s). +void __nvoc_init_funcTable_OsDescMemory(OsDescMemory *pThis) { + __nvoc_init_funcTable_OsDescMemory_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__OsDescMemory(OsDescMemory *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^4 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^3 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource; // (rmres) super^2 + pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_Memory; // (mem) super + pThis->__nvoc_pbase_OsDescMemory = pThis; // (osdesc) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Memory(&pThis->__nvoc_base_Memory); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__OsDescMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^4 + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__OsDescMemory.metadata__Memory.metadata__RmResource.metadata__RsResource; // (res) super^3 + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__OsDescMemory.metadata__Memory.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__OsDescMemory.metadata__Memory.metadata__RmResource; // (rmres) super^2 + pThis->__nvoc_base_Memory.__nvoc_metadata_ptr = &__nvoc_metadata__OsDescMemory.metadata__Memory; // (mem) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__OsDescMemory; // (osdesc) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_OsDescMemory(pThis); +} + +NV_STATUS __nvoc_objCreate_OsDescMemory(OsDescMemory **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + OsDescMemory *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(OsDescMemory), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(OsDescMemory)); + + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__OsDescMemory(pThis); + status = __nvoc_ctor_OsDescMemory(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_OsDescMemory_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_OsDescMemory_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(OsDescMemory)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OsDescMemory(OsDescMemory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_OsDescMemory(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_os_desc_mem_nvoc.h b/src/nvidia/generated/g_os_desc_mem_nvoc.h new file mode 100644 index 0000000..cb1ac31 --- /dev/null +++ b/src/nvidia/generated/g_os_desc_mem_nvoc.h @@ -0,0 +1,332 @@ + +#ifndef _G_OS_DESC_MEM_NVOC_H_ +#define _G_OS_DESC_MEM_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_os_desc_mem_nvoc.h" + +#ifndef _OS_DESC_MEMORY_H_ +#define _OS_DESC_MEMORY_H_ + +#include "mem_mgr/mem.h" + +/*! + * Bind memory allocated through os descriptor + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_OS_DESC_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__OsDescMemory; +struct NVOC_METADATA__Memory; +struct NVOC_VTABLE__OsDescMemory; + + +struct OsDescMemory { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__OsDescMemory *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Memory __nvoc_base_Memory; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^4 + struct RsResource *__nvoc_pbase_RsResource; // res super^3 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^3 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^2 + struct Memory *__nvoc_pbase_Memory; // mem super + struct OsDescMemory *__nvoc_pbase_OsDescMemory; // osdesc +}; + + +// Vtable with 26 per-class function pointers +struct NVOC_VTABLE__OsDescMemory { + NvBool (*__osdescCanCopy__)(struct OsDescMemory * /*this*/); // virtual override (res) base (mem) + NV_STATUS (*__osdescIsDuplicate__)(struct OsDescMemory * /*this*/, NvHandle, NvBool *); // virtual inherited (mem) base (mem) + NV_STATUS (*__osdescGetMapAddrSpace__)(struct OsDescMemory * /*this*/, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); // virtual inherited (mem) base (mem) + NV_STATUS (*__osdescControl__)(struct OsDescMemory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (mem) base (mem) + NV_STATUS (*__osdescMap__)(struct OsDescMemory * /*this*/, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); // virtual inherited (mem) base (mem) + NV_STATUS (*__osdescUnmap__)(struct OsDescMemory * /*this*/, CALL_CONTEXT *, RsCpuMapping *); // virtual inherited (mem) base (mem) + NV_STATUS (*__osdescGetMemInterMapParams__)(struct OsDescMemory * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (mem) base (mem) + NV_STATUS (*__osdescCheckMemInterUnmap__)(struct OsDescMemory * /*this*/, NvBool); // inline virtual inherited (mem) base (mem) body + NV_STATUS (*__osdescGetMemoryMappingDescriptor__)(struct OsDescMemory * /*this*/, MEMORY_DESCRIPTOR **); // virtual inherited (mem) base (mem) + NV_STATUS (*__osdescCheckCopyPermissions__)(struct OsDescMemory * /*this*/, struct OBJGPU *, struct Device *); // inline virtual inherited (mem) base (mem) body + NV_STATUS (*__osdescIsReady__)(struct OsDescMemory * /*this*/, NvBool); // virtual inherited (mem) base (mem) + NvBool (*__osdescIsGpuMapAllowed__)(struct OsDescMemory * /*this*/, struct OBJGPU *); // inline virtual inherited (mem) base (mem) body + NvBool (*__osdescIsExportAllowed__)(struct OsDescMemory * /*this*/); // inline virtual inherited (mem) base (mem) body + NvBool (*__osdescAccessCallback__)(struct OsDescMemory * /*this*/, RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (mem) + NvBool (*__osdescShareCallback__)(struct OsDescMemory * /*this*/, RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (rmres) base (mem) + NV_STATUS (*__osdescControlSerialization_Prologue__)(struct OsDescMemory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (mem) + void (*__osdescControlSerialization_Epilogue__)(struct OsDescMemory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (mem) + NV_STATUS (*__osdescControl_Prologue__)(struct OsDescMemory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (mem) + void (*__osdescControl_Epilogue__)(struct OsDescMemory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (mem) + void (*__osdescPreDestruct__)(struct OsDescMemory * /*this*/); // virtual inherited (res) base (mem) + NV_STATUS (*__osdescControlFilter__)(struct OsDescMemory * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (mem) + NvBool (*__osdescIsPartialUnmapSupported__)(struct OsDescMemory * /*this*/); // inline virtual inherited (res) base (mem) body + NV_STATUS (*__osdescMapTo__)(struct OsDescMemory * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (mem) + NV_STATUS (*__osdescUnmapFrom__)(struct OsDescMemory * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (mem) + NvU32 (*__osdescGetRefCount__)(struct OsDescMemory * /*this*/); // virtual inherited (res) base (mem) + void (*__osdescAddAdditionalDependants__)(struct RsClient *, struct OsDescMemory * /*this*/, RsResourceRef *); // virtual inherited (res) base (mem) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__OsDescMemory { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Memory metadata__Memory; + const struct NVOC_VTABLE__OsDescMemory vtable; +}; + +#ifndef __NVOC_CLASS_OsDescMemory_TYPEDEF__ +#define __NVOC_CLASS_OsDescMemory_TYPEDEF__ +typedef struct OsDescMemory OsDescMemory; +#endif /* __NVOC_CLASS_OsDescMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OsDescMemory +#define __nvoc_class_id_OsDescMemory 0xb3dacd +#endif /* __nvoc_class_id_OsDescMemory */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OsDescMemory; + +#define __staticCast_OsDescMemory(pThis) \ + ((pThis)->__nvoc_pbase_OsDescMemory) + +#ifdef __nvoc_os_desc_mem_h_disabled +#define __dynamicCast_OsDescMemory(pThis) ((OsDescMemory*) NULL) +#else //__nvoc_os_desc_mem_h_disabled +#define __dynamicCast_OsDescMemory(pThis) \ + ((OsDescMemory*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OsDescMemory))) +#endif //__nvoc_os_desc_mem_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_OsDescMemory(OsDescMemory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OsDescMemory(OsDescMemory**, Dynamic*, NvU32, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_OsDescMemory(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_OsDescMemory((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define osdescCanCopy_FNPTR(pOsDescMemory) pOsDescMemory->__nvoc_metadata_ptr->vtable.__osdescCanCopy__ +#define osdescCanCopy(pOsDescMemory) osdescCanCopy_DISPATCH(pOsDescMemory) +#define osdescIsDuplicate_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsDuplicate__ +#define osdescIsDuplicate(pMemory, hMemory, pDuplicate) osdescIsDuplicate_DISPATCH(pMemory, hMemory, pDuplicate) +#define osdescGetMapAddrSpace_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memGetMapAddrSpace__ +#define osdescGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) osdescGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define osdescControl_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memControl__ +#define osdescControl(pMemory, pCallContext, pParams) osdescControl_DISPATCH(pMemory, pCallContext, pParams) +#define osdescMap_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memMap__ +#define osdescMap(pMemory, pCallContext, pParams, pCpuMapping) osdescMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define osdescUnmap_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memUnmap__ +#define osdescUnmap(pMemory, pCallContext, pCpuMapping) osdescUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define osdescGetMemInterMapParams_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memGetMemInterMapParams__ +#define osdescGetMemInterMapParams(pMemory, pParams) osdescGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define osdescCheckMemInterUnmap_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memCheckMemInterUnmap__ +#define osdescCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) osdescCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define osdescGetMemoryMappingDescriptor_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memGetMemoryMappingDescriptor__ +#define osdescGetMemoryMappingDescriptor(pMemory, ppMemDesc) osdescGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define osdescCheckCopyPermissions_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memCheckCopyPermissions__ +#define osdescCheckCopyPermissions(pMemory, pDstGpu, pDstDevice) osdescCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, pDstDevice) +#define osdescIsReady_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsReady__ +#define osdescIsReady(pMemory, bCopyConstructorContext) osdescIsReady_DISPATCH(pMemory, bCopyConstructorContext) +#define osdescIsGpuMapAllowed_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsGpuMapAllowed__ +#define osdescIsGpuMapAllowed(pMemory, pGpu) osdescIsGpuMapAllowed_DISPATCH(pMemory, pGpu) +#define osdescIsExportAllowed_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsExportAllowed__ +#define osdescIsExportAllowed(pMemory) osdescIsExportAllowed_DISPATCH(pMemory) +#define osdescAccessCallback_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define osdescAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) osdescAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define osdescShareCallback_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresShareCallback__ +#define osdescShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) osdescShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define osdescControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define osdescControlSerialization_Prologue(pResource, pCallContext, pParams) osdescControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define osdescControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define osdescControlSerialization_Epilogue(pResource, pCallContext, pParams) osdescControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define osdescControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define osdescControl_Prologue(pResource, pCallContext, pParams) osdescControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define osdescControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define osdescControl_Epilogue(pResource, pCallContext, pParams) osdescControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define osdescPreDestruct_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define osdescPreDestruct(pResource) osdescPreDestruct_DISPATCH(pResource) +#define osdescControlFilter_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define osdescControlFilter(pResource, pCallContext, pParams) osdescControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define osdescIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define osdescIsPartialUnmapSupported(pResource) osdescIsPartialUnmapSupported_DISPATCH(pResource) +#define osdescMapTo_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define osdescMapTo(pResource, pParams) osdescMapTo_DISPATCH(pResource, pParams) +#define osdescUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define osdescUnmapFrom(pResource, pParams) osdescUnmapFrom_DISPATCH(pResource, pParams) +#define osdescGetRefCount_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define osdescGetRefCount(pResource) osdescGetRefCount_DISPATCH(pResource) +#define osdescAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define osdescAddAdditionalDependants(pClient, pResource, pReference) osdescAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) + +// Dispatch functions +static inline NvBool osdescCanCopy_DISPATCH(struct OsDescMemory *pOsDescMemory) { + return pOsDescMemory->__nvoc_metadata_ptr->vtable.__osdescCanCopy__(pOsDescMemory); +} + +static inline NV_STATUS osdescIsDuplicate_DISPATCH(struct OsDescMemory *pMemory, NvHandle hMemory, NvBool *pDuplicate) { + return pMemory->__nvoc_metadata_ptr->vtable.__osdescIsDuplicate__(pMemory, hMemory, pDuplicate); +} + +static inline NV_STATUS osdescGetMapAddrSpace_DISPATCH(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__nvoc_metadata_ptr->vtable.__osdescGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NV_STATUS osdescControl_DISPATCH(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__nvoc_metadata_ptr->vtable.__osdescControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS osdescMap_DISPATCH(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__nvoc_metadata_ptr->vtable.__osdescMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS osdescUnmap_DISPATCH(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__nvoc_metadata_ptr->vtable.__osdescUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS osdescGetMemInterMapParams_DISPATCH(struct OsDescMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__nvoc_metadata_ptr->vtable.__osdescGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS osdescCheckMemInterUnmap_DISPATCH(struct OsDescMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__nvoc_metadata_ptr->vtable.__osdescCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS osdescGetMemoryMappingDescriptor_DISPATCH(struct OsDescMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__nvoc_metadata_ptr->vtable.__osdescGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS osdescCheckCopyPermissions_DISPATCH(struct OsDescMemory *pMemory, struct OBJGPU *pDstGpu, struct Device *pDstDevice) { + return pMemory->__nvoc_metadata_ptr->vtable.__osdescCheckCopyPermissions__(pMemory, pDstGpu, pDstDevice); +} + +static inline NV_STATUS osdescIsReady_DISPATCH(struct OsDescMemory *pMemory, NvBool bCopyConstructorContext) { + return pMemory->__nvoc_metadata_ptr->vtable.__osdescIsReady__(pMemory, bCopyConstructorContext); +} + +static inline NvBool osdescIsGpuMapAllowed_DISPATCH(struct OsDescMemory *pMemory, struct OBJGPU *pGpu) { + return pMemory->__nvoc_metadata_ptr->vtable.__osdescIsGpuMapAllowed__(pMemory, pGpu); +} + +static inline NvBool osdescIsExportAllowed_DISPATCH(struct OsDescMemory *pMemory) { + return pMemory->__nvoc_metadata_ptr->vtable.__osdescIsExportAllowed__(pMemory); +} + +static inline NvBool osdescAccessCallback_DISPATCH(struct OsDescMemory *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__osdescAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NvBool osdescShareCallback_DISPATCH(struct OsDescMemory *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__nvoc_metadata_ptr->vtable.__osdescShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS osdescControlSerialization_Prologue_DISPATCH(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__osdescControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void osdescControlSerialization_Epilogue_DISPATCH(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__osdescControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS osdescControl_Prologue_DISPATCH(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__osdescControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void osdescControl_Epilogue_DISPATCH(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__osdescControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline void osdescPreDestruct_DISPATCH(struct OsDescMemory *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__osdescPreDestruct__(pResource); +} + +static inline NV_STATUS osdescControlFilter_DISPATCH(struct OsDescMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__osdescControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvBool osdescIsPartialUnmapSupported_DISPATCH(struct OsDescMemory *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__osdescIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS osdescMapTo_DISPATCH(struct OsDescMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__osdescMapTo__(pResource, pParams); +} + +static inline NV_STATUS osdescUnmapFrom_DISPATCH(struct OsDescMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__osdescUnmapFrom__(pResource, pParams); +} + +static inline NvU32 osdescGetRefCount_DISPATCH(struct OsDescMemory *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__osdescGetRefCount__(pResource); +} + +static inline void osdescAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct OsDescMemory *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__osdescAddAdditionalDependants__(pClient, pResource, pReference); +} + +NvBool osdescCanCopy_IMPL(struct OsDescMemory *pOsDescMemory); + +NV_STATUS osdescConstruct_IMPL(struct OsDescMemory *arg_pOsDescMemory, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_osdescConstruct(arg_pOsDescMemory, arg_pCallContext, arg_pParams) osdescConstruct_IMPL(arg_pOsDescMemory, arg_pCallContext, arg_pParams) +void osdescDestruct_IMPL(struct OsDescMemory *pOsDescMemory); + +#define __nvoc_osdescDestruct(pOsDescMemory) osdescDestruct_IMPL(pOsDescMemory) +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_OS_DESC_MEM_NVOC_H_ diff --git a/src/nvidia/generated/g_os_hal.h b/src/nvidia/generated/g_os_hal.h new file mode 100644 index 0000000..6ec7761 --- /dev/null +++ b/src/nvidia/generated/g_os_hal.h @@ -0,0 +1,10 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Profile: devel-soc-disp-dce-client +// Template: templates/gt_eng_empty.h +// +// The file is added to smoonth NVOC migration. After converting a module to +// NVOC class, the stale generated headers in output directory causes failures +// of incremental builds. This file ensures the content of the old header is +// removed. +// diff --git a/src/nvidia/generated/g_os_nvoc.c b/src/nvidia/generated/g_os_nvoc.c new file mode 100644 index 0000000..cee268f --- /dev/null +++ b/src/nvidia/generated/g_os_nvoc.c @@ -0,0 +1,199 @@ +#define NVOC_OS_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_os_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xaa1d70 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJOS; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +// Forward declarations for OBJOS +void __nvoc_init__Object(Object*); +void __nvoc_init__OBJOS(OBJOS*); +void __nvoc_init_funcTable_OBJOS(OBJOS*); +NV_STATUS __nvoc_ctor_OBJOS(OBJOS*); +void __nvoc_init_dataField_OBJOS(OBJOS*); +void __nvoc_dtor_OBJOS(OBJOS*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__OBJOS; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJOS; + +// Down-thunk(s) to bridge OBJOS methods from ancestors (if any) + +// Up-thunk(s) to bridge OBJOS methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJOS = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJOS), + /*classId=*/ classId(OBJOS), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJOS", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJOS, + /*pCastInfo=*/ &__nvoc_castinfo__OBJOS, + /*pExportInfo=*/ &__nvoc_export_info__OBJOS +}; + + +// Metadata with per-class RTTI with ancestor(s) +static const struct NVOC_METADATA__OBJOS __nvoc_metadata__OBJOS = { + .rtti.pClassDef = &__nvoc_class_def_OBJOS, // (os) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJOS, + .rtti.offset = 0, + .metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super + .metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Object.rtti.offset = NV_OFFSETOF(OBJOS, __nvoc_base_Object), +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__OBJOS = { + .numRelatives = 2, + .relatives = { + &__nvoc_metadata__OBJOS.rtti, // [0]: (os) this + &__nvoc_metadata__OBJOS.metadata__Object.rtti, // [1]: (obj) super + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJOS = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJOS(OBJOS *pThis) { + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJOS(OBJOS *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + pThis->setProperty(pThis, PDB_PROP_OS_SUPPORTS_DISPLAY_REMAPPER, !(1)); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJOS(OBJOS *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJOS_fail_Object; + __nvoc_init_dataField_OBJOS(pThis); + goto __nvoc_ctor_OBJOS_exit; // Success + +__nvoc_ctor_OBJOS_fail_Object: +__nvoc_ctor_OBJOS_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_OBJOS_1(OBJOS *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_OBJOS_1 + + +// Initialize vtable(s): Nothing to do for empty vtables +void __nvoc_init_funcTable_OBJOS(OBJOS *pThis) { + __nvoc_init_funcTable_OBJOS_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__OBJOS(OBJOS *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; // (obj) super + pThis->__nvoc_pbase_OBJOS = pThis; // (os) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Object(&pThis->__nvoc_base_Object); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__OBJOS.metadata__Object; // (obj) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__OBJOS; // (os) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_OBJOS(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJOS(OBJOS **ppThis, Dynamic *pParent, NvU32 createFlags) +{ + NV_STATUS status; + Object *pParentObj = NULL; + OBJOS *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(OBJOS), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(OBJOS)); + + pThis->__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__OBJOS(pThis); + status = __nvoc_ctor_OBJOS(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJOS_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_OBJOS_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(OBJOS)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJOS(OBJOS **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJOS(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_os_nvoc.h b/src/nvidia/generated/g_os_nvoc.h new file mode 100644 index 0000000..0577dd0 --- /dev/null +++ b/src/nvidia/generated/g_os_nvoc.h @@ -0,0 +1,1456 @@ + +#ifndef _G_OS_NVOC_H_ +#define _G_OS_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_os_nvoc.h" + + +#ifndef _OS_H_ +#define _OS_H_ + +/*! + * @file os.h + * @brief Interface for Operating System module + */ + +/* ------------------------ Core & Library Includes ------------------------- */ +#include "core/core.h" +#include "nvoc/object.h" +#include "containers/btree.h" +#include "ctrl/ctrl0073/ctrl0073dfp.h" +#include "kernel/diagnostics/xid_context.h" +#include "utils/nvbitvector.h" +TYPEDEF_BITVECTOR(MC_ENGINE_BITVECTOR); +/* ------------------------ SDK & Interface Includes ------------------------ */ +#include "nvsecurityinfo.h" +#include "nvacpitypes.h" +#include "nvimpshared.h" // TODO - should move from sdk to resman/interface +#include "nvi2c.h" // TODO - should move from sdk to resman/interface + +/* ------------------------ OS Includes ------------------------------------- */ +#include "os/nv_memory_type.h" +#include "os/nv_memory_area.h" +#include "os/capability.h" + +/* ------------------------ Forward Declarations ---------------------------- */ + +struct OBJOS; + +#ifndef __NVOC_CLASS_OBJOS_TYPEDEF__ +#define __NVOC_CLASS_OBJOS_TYPEDEF__ +typedef struct OBJOS OBJOS; +#endif /* __NVOC_CLASS_OBJOS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJOS +#define __nvoc_class_id_OBJOS 0xaa1d70 +#endif /* __nvoc_class_id_OBJOS */ + + + +// +// The OS module should NOT depend on RM modules. The only exception is +// core/core.h. +// +// DO NOT ADD INCLUDES TO RM MODULE HEADERS FROM THIS FILE. OS module should be +// a leaf module. Dependencies on RM headers in this files results in circular +// dependencies as most modules depend on the OS module. +// +// Ideally, all types used by the OS module's interface are from the SDK, +// resman/interface or self-contained within the OS module header. For now, +// since the OS module depends on a few RM internal types we forward declare to +// avoid the need to pull in headers from across RM. +// +typedef struct SYS_STATIC_CONFIG SYS_STATIC_CONFIG; +typedef struct MEMORY_DESCRIPTOR MEMORY_DESCRIPTOR; +typedef struct IOVAMAPPING *PIOVAMAPPING; +typedef struct OBJGPUMGR OBJGPUMGR; +typedef struct EVENTNOTIFICATION EVENTNOTIFICATION, *PEVENTNOTIFICATION; +typedef struct DEVICE_MAPPING DEVICE_MAPPING; +typedef void *PUID_TOKEN; +typedef struct OBJTMR OBJTMR; +typedef struct OBJCL OBJCL; +typedef struct _GUID *LPGUID; + +// +// Forward declare OS_GPU_INFO type +// +// TODO - We shouldn't need a special definition per-OS. OS implementations +// should use a consistent type +// +typedef struct nv_state_t OS_GPU_INFO; + +/* ------------------------ OS Interface ------------------------------------ */ + +typedef struct os_wait_queue OS_WAIT_QUEUE; + +// +// Defines and Typedefs used by the OS +// +typedef NvU64 OS_THREAD_HANDLE; + +// +// Forward references for OS1HZTIMERENTRY symbols +// +typedef struct OS1HZTIMERENTRY *POS1HZTIMERENTRY; +typedef struct OS1HZTIMERENTRY OS1HZTIMERENTRY; + +// +// Simple 1 second callback facility. Schedules the given routine to be called with the supplied data +// in approximately 1 second. Might be called from an elevated IRQL. +// Unlike the tmr facilities (tmrScheduleCallbackXXX), this does not rely on the hardware. +// +typedef void (*OS1HZPROC)(OBJGPU *, void *); + +#define NV_OS_1HZ_ONESHOT 0x00000000 +#define NV_OS_1HZ_REPEAT 0x00000001 + +struct OS1HZTIMERENTRY +{ + OS1HZPROC callback; + void* data; + NvU32 flags; + POS1HZTIMERENTRY next; +}; + +typedef struct RM_PAGEABLE_SECTION { + void *osHandle; // handle returned from OS API + void *pDataSection; // pointer to a date inside the target data/bss/const segment +} RM_PAGEABLE_SECTION; + + +// OSPollHotkeyState return values +#define NV_OS_HOTKEY_STATE_DISPLAY_CHANGE 0:0 +#define NV_OS_HOTKEY_STATE_DISPLAY_CHANGE_NOT_FOUND 0x00000000 +#define NV_OS_HOTKEY_STATE_DISPLAY_CHANGE_FOUND 0x00000001 +#define NV_OS_HOTKEY_STATE_SCALE_EVENT 1:1 +#define NV_OS_HOTKEY_STATE_SCALE_EVENT_NOT_FOUND 0x00000000 +#define NV_OS_HOTKEY_STATE_SCALE_EVENT_FOUND 0x00000001 +#define NV_OS_HOTKEY_STATE_LID_EVENT 2:2 +#define NV_OS_HOTKEY_STATE_LID_EVENT_NOT_FOUND 0x00000000 +#define NV_OS_HOTKEY_STATE_LID_EVENT_FOUND 0x00000001 +#define NV_OS_HOTKEY_STATE_POWER_EVENT 3:3 +#define NV_OS_HOTKEY_STATE_POWER_EVENT_NOT_FOUND 0x00000000 +#define NV_OS_HOTKEY_STATE_POWER_EVENT_FOUND 0x00000001 +#define NV_OS_HOTKEY_STATE_DOCK_EVENT 4:4 +#define NV_OS_HOTKEY_STATE_DOCK_EVENT_NOT_FOUND 0x00000000 +#define NV_OS_HOTKEY_STATE_DOCK_EVENT_FOUND 0x00000001 + +#define MAX_BRIGHTNESS_BCL_ELEMENTS 103 + +// ACPI _DOD Bit defines +// These bits are defined in the Hybrid SAS +#define NV_ACPI_DOD_DISPLAY_OWNER 20:18 +#define NV_ACPI_DOD_DISPLAY_OWNER_ALL 0x00000000 +#define NV_ACPI_DOD_DISPLAY_OWNER_MGPU 0x00000001 +#define NV_ACPI_DOD_DISPLAY_OWNER_DGPU1 0x00000002 + +// ACPI 3.0a definitions for requested data length +#define NV_ACPI_DDC_REQUESTED_DATA_LENGTH_128B 0x00000001 +#define NV_ACPI_DDC_REQUESTED_DATA_LENGTH_256B 0x00000002 +#define NV_ACPI_DDC_REQUESTED_DATA_LENGTH_384B 0x00000003 +#define NV_ACPI_DDC_REQUESTED_DATA_LENGTH_512B 0x00000004 +#define NV_ACPI_DDC_REQUESTED_DATA_LENGTH_DEFAULT 0x00000001 + +// osBugCheck bugcode defines +#define OS_BUG_CHECK_BUGCODE_UNKNOWN (0) +#define OS_BUG_CHECK_BUGCODE_INTERNAL_TEST (1) +#define OS_BUG_CHECK_BUGCODE_BUS (2) +#define OS_BUG_CHECK_BUGCODE_RESERVED_3 (3) // previously ECC_DBE +#define OS_BUG_CHECK_BUGCODE_RESERVED_4 (4) // previously NVLINK_TL_ERR +#define OS_BUG_CHECK_BUGCODE_PAGED_SEGMENT (5) +#define OS_BUG_CHECK_BUGCODE_BSOD_ON_ASSERT (6) +#define OS_BUG_CHECK_BUGCODE_DISPLAY_UNDERFLOW (7) +#define OS_BUG_CHECK_BUGCODE_LAST OS_BUG_CHECK_BUGCODE_DISPLAY_UNDERFLOW + +#define OS_BUG_CHECK_BUGCODE_STR \ + { \ + "Unknown Error", \ + "Nv Internal Testing", \ + "Bus Error", \ + "Reserved", \ + "Reserved", \ + "Invalid Bindata Access", \ + "BSOD on Assert or Breakpoint", \ + "Display Underflow" \ + } + +// Flags needed by OSAllocPagesNode +#define OS_ALLOC_PAGES_NODE_NONE 0x0 +#define OS_ALLOC_PAGES_NODE_SKIP_RECLAIM 0x1 + +// +// Structures for osPackageRegistry and osUnpackageRegistry +// +typedef struct PACKED_REGISTRY_ENTRY +{ + NvU32 nameOffset; + NvU8 type; + NvU32 data; + NvU32 length; +} PACKED_REGISTRY_ENTRY; + +typedef struct PACKED_REGISTRY_TABLE +{ + NvU32 size; + NvU32 numEntries; + PACKED_REGISTRY_ENTRY entries[0]; +} PACKED_REGISTRY_TABLE; + +// TODO: Merge with NV_REGISTRY_ENTRY_TYPE +// +// Values for PACKED_REGISTRY_ENTRY::type +// +#define REGISTRY_TABLE_ENTRY_TYPE_UNKNOWN 0 +#define REGISTRY_TABLE_ENTRY_TYPE_DWORD 1 +#define REGISTRY_TABLE_ENTRY_TYPE_BINARY 2 +#define REGISTRY_TABLE_ENTRY_TYPE_STRING 3 + +typedef enum +{ + NV_REGISTRY_ENTRY_TYPE_UNKNOWN = 0, + NV_REGISTRY_ENTRY_TYPE_DWORD, + NV_REGISTRY_ENTRY_TYPE_BINARY, + NV_REGISTRY_ENTRY_TYPE_STRING +} nv_reg_type_t; + +/* + * nv_reg_entry_t + * + * regParmStr/regName + * Name of key + * type + * One of nv_reg_type_t enum + * data + * Integer data of key. Only used with DWORD type + * pdata + * Pointer to data of key. Only used with BINARY or STRING type + * len + * Length of pdata buffer. Only used with BINARY or STRING type + * next + * Next entry in linked list + */ +typedef struct nv_reg_entry_s +{ + char *regParmStr; + NvU32 type; + NvU32 data; + NvU8 *pdata; + NvU32 len; + struct nv_reg_entry_s *next; +} nv_reg_entry_t; + +/* + * OS_DRIVER_BLOCK + * + * driverStart + * CPU VA of where the driver is loaded + * unique_id + * Debug GUID of the Driver. Used to match with Pdb + * age + * Additional GUID information + * offset + * Offset from VA to start of text + */ +typedef struct { + NvP64 driverStart NV_ALIGN_BYTES(8); + NvU8 unique_id[16]; + NvU32 age; + NvU32 offset; +} OS_DRIVER_BLOCK; + +// Basic OS interface functions +typedef NvU32 OSSetEvent(OBJGPU *, NvP64); +typedef NV_STATUS OSEventNotification(OBJGPU *, PEVENTNOTIFICATION, NvU32, void *, NvU32); +typedef NV_STATUS OSEventNotificationWithInfo(OBJGPU *, PEVENTNOTIFICATION, NvU32, NvU32, NvU16, void *, NvU32); +typedef NV_STATUS OSObjectEventNotification(NvHandle, NvHandle, NvU32, PEVENTNOTIFICATION, NvU32, void *, NvU32); +typedef NV_STATUS NV_FORCERESULTCHECK OSAllocPages(MEMORY_DESCRIPTOR *); +typedef NV_STATUS NV_FORCERESULTCHECK OSAllocPagesInternal(MEMORY_DESCRIPTOR *); +typedef void OSFreePages(MEMORY_DESCRIPTOR *); +typedef void OSFreePagesInternal(MEMORY_DESCRIPTOR *); +typedef NV_STATUS NV_FORCERESULTCHECK OSLockMem(MEMORY_DESCRIPTOR *); +typedef NV_STATUS OSUnlockMem(MEMORY_DESCRIPTOR *); +typedef NV_STATUS NV_FORCERESULTCHECK OSMapGPU(OBJGPU *, RS_PRIV_LEVEL, NvU64, NvU64, NvU32, NvP64 *, NvP64 *); +typedef void OSUnmapGPU(OS_GPU_INFO *, RS_PRIV_LEVEL, NvP64, NvU64, NvP64); +typedef NV_STATUS NV_FORCERESULTCHECK OSNotifyEvent(OBJGPU *, PEVENTNOTIFICATION, NvU32, NvU32, NV_STATUS); +typedef NV_STATUS OSReadRegistryString(OBJGPU *, const char *, NvU8 *, NvU32 *); +typedef NV_STATUS OSWriteRegistryBinary(OBJGPU *, const char *, NvU8 *, NvU32); +typedef NV_STATUS OSWriteRegistryVolatile(OBJGPU *, const char *, NvU8 *, NvU32); +typedef NV_STATUS OSReadRegistryVolatile(OBJGPU *, const char *, NvU8 *, NvU32); +typedef NV_STATUS OSReadRegistryVolatileSize(OBJGPU *, const char *, NvU32 *); +typedef NV_STATUS OSReadRegistryBinary(OBJGPU *, const char *, NvU8 *, NvU32 *); +typedef NV_STATUS OSWriteRegistryDword(OBJGPU *, const char *, NvU32); +typedef NV_STATUS OSReadRegistryDword(OBJGPU *, const char *, NvU32 *); +typedef NV_STATUS OSReadRegistryDwordBase(OBJGPU *, const char *, NvU32 *); +typedef NV_STATUS OSReadRegistryStringBase(OBJGPU *, const char *, NvU8 *, NvU32 *); +typedef NV_STATUS OSPackageRegistry(OBJGPU *, PACKED_REGISTRY_TABLE *, NvU32 *); +typedef NV_STATUS OSUnpackageRegistry(PACKED_REGISTRY_TABLE *); +typedef NvBool OSQueueDpc(OBJGPU *); +typedef void OSFlushCpuWriteCombineBuffer(void); +typedef NV_STATUS OSNumaMemblockSize(NvU64 *); +typedef NvBool OSNumaOnliningEnabled(OS_GPU_INFO *); +typedef NV_STATUS OSAllocPagesNode(NvS32, NvLength, NvU32, NvU64 *); +typedef void OSAllocAcquirePage(NvU64, NvU32); +typedef void OSAllocReleasePage(NvU64, NvU32); +typedef NvU32 OSGetPageRefcount(NvU64); +typedef NvU32 OSCountTailPages(NvU64); +typedef NvU64 OSGetPageSize(void); +typedef NvU64 OSGetSupportedSysmemPageSizeMask(void); +typedef NvU8 OSGetPageShift(void); + +typedef NV_STATUS NV_FORCERESULTCHECK OSAcquireRmSema(void *); +typedef NvBool NV_FORCERESULTCHECK OSIsRmSemaOwner(void *); +typedef NV_STATUS NV_FORCERESULTCHECK OSCondAcquireRmSema(void *); +typedef NvU32 OSReleaseRmSema(void *, OBJGPU *); + +#define DPC_RELEASE_ALL_GPU_LOCKS (1) +#define DPC_RELEASE_SINGLE_GPU_LOCK (2) + +typedef NV_STATUS OSGpuLocksQueueRelease(OBJGPU *pGpu, NvU32 dpcGpuLockRelease); +typedef NvU32 OSApiLockAcquireConfigureFlags(NvU32 flags); + +typedef NvU32 OSGetCpuCount(void); +typedef NvU32 OSGetMaximumCoreCount(void); +typedef NvU32 OSGetCurrentProcessorNumber(void); +typedef NV_STATUS OSDelay(NvU32); +typedef NV_STATUS OSDelayUs(NvU32); +typedef NV_STATUS OSDelayNs(NvU32); +typedef void OSSpinLoop(void); +typedef NvU64 OSGetMaxUserVa(void); +typedef NvU32 OSGetCpuVaAddrShift(void); +typedef NvU32 OSGetCurrentProcess(void); +typedef void OSGetCurrentProcessName(char *, NvU32); +typedef NvU32 OSGetCurrentPasid(void); +typedef NV_STATUS OSGetCurrentThread(OS_THREAD_HANDLE *); +typedef NV_STATUS OSAttachToProcess(void **, NvU32); +typedef void OSDetachFromProcess(void*); +typedef NV_STATUS OSVirtualToPhysicalAddr(MEMORY_DESCRIPTOR *, NvP64, RmPhysAddr *); +typedef NV_STATUS NV_FORCERESULTCHECK OSMapPciMemoryUser(OS_GPU_INFO *, RmPhysAddr, NvU64, NvU32, NvP64 *, NvP64 *, NvU32); +typedef NV_STATUS NV_FORCERESULTCHECK OSMapPciMemoryAreaUser(OS_GPU_INFO *, MemoryArea, NvU32, NvU32, NvP64 *, NvP64 *); +typedef void OSUnmapPciMemoryUser(OS_GPU_INFO *, NvP64, NvU64, NvP64); +typedef NV_STATUS NV_FORCERESULTCHECK OSMapPciMemoryKernelOld(OBJGPU *, RmPhysAddr, NvU64, NvU32, void **, NvU32); +typedef void OSUnmapPciMemoryKernelOld(OBJGPU *, void *); +typedef NV_STATUS NV_FORCERESULTCHECK OSMapPciMemoryKernel64(OBJGPU *, RmPhysAddr, NvU64, NvU32, NvP64 *, NvU32); +typedef void OSUnmapPciMemoryKernel64(OBJGPU *, NvP64); +typedef NV_STATUS NV_FORCERESULTCHECK OSMapSystemMemory(MEMORY_DESCRIPTOR *, NvU64, NvU64, NvBool, NvU32, NvP64*, NvP64*); +typedef void OSUnmapSystemMemory(MEMORY_DESCRIPTOR *, NvBool, NvP64, NvP64); +typedef NvBool OSLockShouldToggleInterrupts(OBJGPU *); +typedef NV_STATUS OSGetPerformanceCounter(NvU64 *); +NvBool osDbgBreakpointEnabled(void); +typedef NV_STATUS OSAttachGpu(OBJGPU *, void *); +typedef NV_STATUS OSDpcAttachGpu(OBJGPU *, void *); +typedef void OSDpcDetachGpu(OBJGPU *); +typedef NV_STATUS OSHandleGpuLost(OBJGPU *); +typedef void OSHandleGpuSurpriseRemoval(OBJGPU *); +typedef void OSInitScalabilityOptions(OBJGPU *, void *); +typedef void OSHandleDeferredRecovery(OBJGPU *); +typedef NvBool OSIsSwPreInitOnly(OS_GPU_INFO *); + +typedef void OSGetTimeoutParams(OBJGPU *, NvU32 *, NvU32 *, NvU32 *); +typedef NvBool OSIsRaisedIRQL(void); +typedef NvBool OSIsISR(void); +typedef NV_STATUS OSGetDriverBlock(OS_GPU_INFO *, OS_DRIVER_BLOCK *); +typedef NvBool OSIsEqualGUID(void *, void *); + +#define OS_QUEUE_WORKITEM_FLAGS_NONE 0x00000000 +#define OS_QUEUE_WORKITEM_FLAGS_DONT_FREE_PARAMS NVBIT(0) +#define OS_QUEUE_WORKITEM_FLAGS_FALLBACK_TO_DPC NVBIT(1) +// +// Lock flags: +// Only one of the LOCK_GPU flags should be provided. If multiple are, +// the priority ordering should be GPUS > GROUP_DEVICE > GROUP_SUBDEVICE +// +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_SEMA NVBIT(8) +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW NVBIT(9) +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RO NVBIT(10) +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS NVBIT(11) +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE NVBIT(12) +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE NVBIT(13) +// +// Perform a GPU full power sanity after getting GPU locks. +// One of the above LOCK_GPU flags must be provided when using this flag. +// +#define OS_QUEUE_WORKITEM_FLAGS_FULL_GPU_SANITY NVBIT(14) +#define OS_QUEUE_WORKITEM_FLAGS_FOR_PM_RESUME NVBIT(15) + +#define OS_QUEUE_WORKITEM_FLAGS_DROP_ON_UNLOAD_QUEUE_FLUSH NVBIT(16) +typedef void OSWorkItemFunction(NvU32 gpuInstance, void *); +typedef void OSSystemWorkItemFunction(void *); +NV_STATUS osQueueWorkItem(OBJGPU *pGpu, OSWorkItemFunction pFunction, void *pParams, NvU32 flags); + +NV_STATUS osQueueSystemWorkItem(OSSystemWorkItemFunction, void *); + +// MXM ACPI calls +NV_STATUS osCallACPI_MXMX(OBJGPU *, NvU32, NvU8 *); +NV_STATUS osCallACPI_DDC(OBJGPU *, NvU32, NvU8*,NvU32*, NvBool); +NV_STATUS osCallACPI_BCL(OBJGPU *, NvU32, NvU32 *, NvU16 *); + +// Display MUX ACPI calls +NV_STATUS osCallACPI_MXDS(OBJGPU *, NvU32, NvU32 *); +NV_STATUS osCallACPI_MXDM(OBJGPU *, NvU32, NvU32 *); +NV_STATUS osCallACPI_MXID(OBJGPU *, NvU32, NvU32 *); +NV_STATUS osCallACPI_LRST(OBJGPU *, NvU32, NvU32 *); + +// Hybrid GPU ACPI calls +NV_STATUS osCallACPI_NVHG_GPUON(OBJGPU *, NvU32 *); +NV_STATUS osCallACPI_NVHG_GPUOFF(OBJGPU *, NvU32 *); +NV_STATUS osCallACPI_NVHG_GPUSTA(OBJGPU *, NvU32 *); +NV_STATUS osCallACPI_NVHG_MXDS(OBJGPU *, NvU32, NvU32 *); +NV_STATUS osCallACPI_NVHG_MXMX(OBJGPU *, NvU32, NvU32 *); +NV_STATUS osCallACPI_NVHG_DOS(OBJGPU *, NvU32, NvU32 *); +NV_STATUS osCallACPI_NVHG_ROM(OBJGPU *, NvU32 *, NvU32 *); +NV_STATUS osCallACPI_NVHG_DCS(OBJGPU *, NvU32, NvU32 *); +NV_STATUS osCallACPI_DOD(OBJGPU *, NvU32 *, NvU32 *); + +// Optimus WMI ACPI calls +NV_STATUS osCallACPI_OPTM_GPUON(OBJGPU *); + +// Generic ACPI _DSM call +NV_STATUS osCallACPI_DSM(OBJGPU *pGpu, ACPI_DSM_FUNCTION acpiDSMFunction, + NvU32 NVHGDSMSubfunction, NvU32 *pInOut, NvU16 *size); + +// UEFI variable calls +NV_STATUS osGetUefiVariable(const char *, LPGUID, NvU8 *, NvU32 *); + +// The following functions are also implemented in WinNT +void osQADbgRegistryInit(void); +typedef NV_STATUS OSGetVersionDump(void *); +// End of WinNT + +NvU32 osNv_rdcr4(void); +NvU64 osNv_rdxcr0(void); +int osNv_cpuid(int, int, NvU32 *, NvU32 *, NvU32 *, NvU32 *); + +// NOTE: The following functions are also implemented in MODS +NV_STATUS osSimEscapeWrite(OBJGPU *, const char *path, NvU32 Index, NvU32 Size, NvU32 Value); +NV_STATUS osSimEscapeWriteBuffer(OBJGPU *, const char *path, NvU32 Index, NvU32 Size, void* pBuffer); +NV_STATUS osSimEscapeRead(OBJGPU *, const char *path, NvU32 Index, NvU32 Size, NvU32 *Value); +NV_STATUS osSimEscapeReadBuffer(OBJGPU *, const char *path, NvU32 Index, NvU32 Size, void* pBuffer); +NvU32 osGetSimulationMode(void); +typedef void OSLogString(const char*, ...); +typedef void OSFlushLog(void); +typedef void OSSetSurfaceName(void *pDescriptor, char *name); + +// End of MODS functions + +//Vista Specific Functions + +NV_STATUS osSetupVBlank(OBJGPU *pGpu, void * pProc, + void * pParm1, void * pParm2, NvU32 Head, void * pParm3); + +// Heap reserve tracking functions +void osInternalReserveAllocCallback(NvU64 offset, NvU64 size, NvU32 gpuId); +void osInternalReserveFreeCallback(NvU64 offset, NvU32 gpuId); + +// +// OS Functions typically only implemented for MODS +// Note: See comments above for other functions that +// are also implemented on MODS as well as other +// OS's. +// + +NV_STATUS osRmInitRm(void); + +typedef NvU32 OSPollHotkeyState(OBJGPU *); + +typedef void OSSyncWithRmDestroy(void); +typedef void OSSyncWithGpuDestroy(NvBool); + +typedef void OSModifyGpuSwStatePersistence(OS_GPU_INFO *, NvBool); + +typedef NV_STATUS OSGetCarveoutInfo(NvU64*, NvU64*); +typedef NV_STATUS OSGetVPRInfo(NvU64*, NvU64*); +typedef NV_STATUS OSAllocInVPR(MEMORY_DESCRIPTOR*); +typedef NV_STATUS OSGetGenCarveout(NvU64*, NvU64 *, NvU32, NvU64); +typedef NV_STATUS OSGetSysmemInfo(OBJGPU *, NvU64*, NvU64*); + +typedef NV_STATUS OSI2CClosePorts(OS_GPU_INFO *, NvU32); +typedef NV_STATUS OSWriteI2CBufferDirect(OBJGPU *, NvU32, NvU8, void *, NvU32, void *, NvU32); +typedef NV_STATUS OSReadI2CBufferDirect(OBJGPU *, NvU32, NvU8, void *, NvU32, void *, NvU32); +typedef NV_STATUS OSI2CTransfer(OBJGPU *, NvU32, NvU8, nv_i2c_msg_t *, NvU32); +typedef NV_STATUS OSSetGpuRailVoltage(OBJGPU *, NvU32, NvU32*); +typedef NV_STATUS OSGetGpuRailVoltage(OBJGPU *, NvU32*); +typedef NV_STATUS OSGetGpuRailVoltageInfo(OBJGPU *, NvU32 *, NvU32 *, NvU32 *); + +typedef NV_STATUS OSGC6PowerControl(OBJGPU *, NvU32, NvU32 *); + +RmPhysAddr osPageArrayGetPhysAddr(OS_GPU_INFO *pOsGpuInfo, void* pPageData, NvU32 pageIndex); +typedef NV_STATUS OSGetChipInfo(OBJGPU *, NvU32*, NvU32*, NvU32*, NvU32*); + +typedef enum +{ + RC_CALLBACK_IGNORE, + RC_CALLBACK_ISOLATE, + RC_CALLBACK_ISOLATE_NO_RESET, +} RC_CALLBACK_STATUS; +RC_CALLBACK_STATUS osRCCallback(OBJGPU *, NvHandle, NvHandle, NvHandle, NvHandle, NvU32, NvU32, NvU32 *, void *); +NvBool osCheckCallback(OBJGPU *); +RC_CALLBACK_STATUS osRCCallback_v2(OBJGPU *, NvHandle, NvHandle, NvHandle, NvHandle, NvU32, NvU32, NvBool, NvU32 *, void *); +NvBool osCheckCallback_v2(OBJGPU *); +typedef NV_STATUS OSReadPFPciConfigInVF(NvU32, NvU32*); + +// Actual definition of the OBJOS structure + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_OS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__OBJOS; +struct NVOC_METADATA__Object; + + +struct OBJOS { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__OBJOS *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Object __nvoc_base_Object; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super + struct OBJOS *__nvoc_pbase_OBJOS; // os + + // 12 PDB properties + NvBool PDB_PROP_OS_PAT_UNSUPPORTED; + NvBool PDB_PROP_OS_SLI_ALLOWED; + NvBool PDB_PROP_OS_SYSTEM_EVENTS_SUPPORTED; + NvBool PDB_PROP_OS_ONDEMAND_VBLANK_CONTROL_ENABLE_DEFAULT; + NvBool PDB_PROP_OS_WAIT_FOR_ACPI_SUBSYSTEM; + NvBool PDB_PROP_OS_UNCACHED_MEMORY_MAPPINGS_NOT_SUPPORTED; + NvBool PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE; + NvBool PDB_PROP_OS_LIMIT_GPU_RESET; + NvBool PDB_PROP_OS_SUPPORTS_TDR; + NvBool PDB_PROP_OS_SUPPORTS_DISPLAY_REMAPPER; + NvBool PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS; + NvBool PDB_PROP_OS_NO_PAGED_SEGMENT_ACCESS; + + // Data members + NvBool bIsSimMods; +}; + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__OBJOS { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Object metadata__Object; +}; + +#ifndef __NVOC_CLASS_OBJOS_TYPEDEF__ +#define __NVOC_CLASS_OBJOS_TYPEDEF__ +typedef struct OBJOS OBJOS; +#endif /* __NVOC_CLASS_OBJOS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJOS +#define __nvoc_class_id_OBJOS 0xaa1d70 +#endif /* __nvoc_class_id_OBJOS */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJOS; + +#define __staticCast_OBJOS(pThis) \ + ((pThis)->__nvoc_pbase_OBJOS) + +#ifdef __nvoc_os_h_disabled +#define __dynamicCast_OBJOS(pThis) ((OBJOS*) NULL) +#else //__nvoc_os_h_disabled +#define __dynamicCast_OBJOS(pThis) \ + ((OBJOS*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJOS))) +#endif //__nvoc_os_h_disabled + +// Property macros +#define PDB_PROP_OS_ONDEMAND_VBLANK_CONTROL_ENABLE_DEFAULT_BASE_CAST +#define PDB_PROP_OS_ONDEMAND_VBLANK_CONTROL_ENABLE_DEFAULT_BASE_NAME PDB_PROP_OS_ONDEMAND_VBLANK_CONTROL_ENABLE_DEFAULT +#define PDB_PROP_OS_SUPPORTS_DISPLAY_REMAPPER_BASE_CAST +#define PDB_PROP_OS_SUPPORTS_DISPLAY_REMAPPER_BASE_NAME PDB_PROP_OS_SUPPORTS_DISPLAY_REMAPPER +#define PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS_BASE_CAST +#define PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS_BASE_NAME PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS +#define PDB_PROP_OS_NO_PAGED_SEGMENT_ACCESS_BASE_CAST +#define PDB_PROP_OS_NO_PAGED_SEGMENT_ACCESS_BASE_NAME PDB_PROP_OS_NO_PAGED_SEGMENT_ACCESS +#define PDB_PROP_OS_WAIT_FOR_ACPI_SUBSYSTEM_BASE_CAST +#define PDB_PROP_OS_WAIT_FOR_ACPI_SUBSYSTEM_BASE_NAME PDB_PROP_OS_WAIT_FOR_ACPI_SUBSYSTEM +#define PDB_PROP_OS_UNCACHED_MEMORY_MAPPINGS_NOT_SUPPORTED_BASE_CAST +#define PDB_PROP_OS_UNCACHED_MEMORY_MAPPINGS_NOT_SUPPORTED_BASE_NAME PDB_PROP_OS_UNCACHED_MEMORY_MAPPINGS_NOT_SUPPORTED +#define PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE_BASE_CAST +#define PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE_BASE_NAME PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE +#define PDB_PROP_OS_SUPPORTS_TDR_BASE_CAST +#define PDB_PROP_OS_SUPPORTS_TDR_BASE_NAME PDB_PROP_OS_SUPPORTS_TDR +#define PDB_PROP_OS_LIMIT_GPU_RESET_BASE_CAST +#define PDB_PROP_OS_LIMIT_GPU_RESET_BASE_NAME PDB_PROP_OS_LIMIT_GPU_RESET +#define PDB_PROP_OS_PAT_UNSUPPORTED_BASE_CAST +#define PDB_PROP_OS_PAT_UNSUPPORTED_BASE_NAME PDB_PROP_OS_PAT_UNSUPPORTED +#define PDB_PROP_OS_SYSTEM_EVENTS_SUPPORTED_BASE_CAST +#define PDB_PROP_OS_SYSTEM_EVENTS_SUPPORTED_BASE_NAME PDB_PROP_OS_SYSTEM_EVENTS_SUPPORTED +#define PDB_PROP_OS_SLI_ALLOWED_BASE_CAST +#define PDB_PROP_OS_SLI_ALLOWED_BASE_NAME PDB_PROP_OS_SLI_ALLOWED + +NV_STATUS __nvoc_objCreateDynamic_OBJOS(OBJOS**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJOS(OBJOS**, Dynamic*, NvU32); +#define __objCreate_OBJOS(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJOS((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros + +// Dispatch functions +#undef PRIVATE_FIELD + + +NV_STATUS addProbe(OBJGPU *, NvU32); + + +typedef NV_STATUS OSFlushCpuCache(void); + +typedef void OSAddRecordForCrashLog(void *, NvU32); +typedef void OSDeleteRecordForCrashLog(void *); + +OSFlushCpuCache osFlushCpuCache; +OSAddRecordForCrashLog osAddRecordForCrashLog; +OSDeleteRecordForCrashLog osDeleteRecordForCrashLog; + +NV_STATUS osTegraSocPowerManagement(OS_GPU_INFO *pOsGpuInfo, + NvBool bInPMTransition, + NvU32 newPMLevel); + +// +// This file should only contain the most common OS functions that provide +// direct call. Ex. osDelay, osIsAdministrator +// +NV_STATUS osTegraSocPmPowergate(OBJGPU *pGpu); +NV_STATUS osTegraSocPmUnpowergate(OBJGPU *pGpu); +NV_STATUS osTegraSocDeviceReset(OS_GPU_INFO *pOsGpuInfo); +NV_STATUS osTegraSocBpmpSendMrq(OBJGPU *pGpu, + NvU32 mrq, + const void *pRequestData, + NvU32 requestDataSize, + void *pResponseData, + NvU32 responseDataSize, + NvS32 *pRet, + NvS32 *pApiRet); +NV_STATUS osMapGsc(NvU64 gsc_base, NvU64 *va); +NV_STATUS osTegraSocGetImpImportData(OBJGPU *pGpu, TEGRA_IMP_IMPORT_DATA *pTegraImpImportData); +NV_STATUS osTegraSocEnableDisableRfl(OS_GPU_INFO *pOsGpuInfo, NvBool bEnable); +NV_STATUS osTegraAllocateDisplayBandwidth(OS_GPU_INFO *pOsGpuInfo, + NvU32 averageBandwidthKBPS, + NvU32 floorBandwidthKBPS); + +NV_STATUS osGetCurrentProcessGfid(NvU32 *pGfid); +NvBool osIsAdministrator(void); +NvBool osCheckAccess(RsAccessRight accessRight); +NV_STATUS osGetSystemTime(NvU32 *pSec,NvU32 *puSec); +NvU64 osGetMonotonicTimeNs(void); +NvU64 osGetMonotonicTickResolutionNs(void); +NvU64 osGetTimestamp(void); +NvU64 osGetTimestampFreq(void); + +NV_STATUS osDeferredIsr(OBJGPU *pGpu); + +void osEnableInterrupts(OBJGPU *pGpu); + +void osDisableInterrupts(OBJGPU *pGpu, + NvBool bIsr); + +void osBugCheck(NvU32 bugCode); +void osAssertFailed(void); + +// OS PCI R/W functions +void *osPciInitHandle(NvU32 domain, NvU8 bus, NvU8 slot, NvU8 function, + NvU16 *pVendor, NvU16 *pDevice); +NvU32 osPciReadDword(void *pHandle, NvU32 offset); +NvU16 osPciReadWord(void *pHandle, NvU32 offset); +NvU8 osPciReadByte(void *pHandle, NvU32 offset); +void osPciWriteDword(void *pHandle, NvU32 offset, NvU32 value); +void osPciWriteWord(void *pHandle, NvU32 offset, NvU16 value); +void osPciWriteByte(void *pHandle, NvU32 offset, NvU8 value); + +// OS RM capabilities calls + +void osRmCapInitDescriptor(NvU64 *pCapDescriptor); +NV_STATUS osRmCapAcquire(OS_RM_CAPS *pOsRmCaps, NvU32 rmCap, + NvU64 capDescriptor, + NvU64 *dupedCapDescriptor); +void osRmCapRelease(NvU64 dupedCapDescriptor); +NV_STATUS osRmCapRegisterGpu(OS_GPU_INFO *pOsGpuInfo, OS_RM_CAPS **ppOsRmCaps); +void osRmCapUnregister(OS_RM_CAPS **ppOsRmCaps); +NV_STATUS osRmCapRegisterSmcPartition(OS_RM_CAPS *pGpuOsRmCaps, + OS_RM_CAPS **ppPartitionOsRmCaps, + NvU32 partitionId); +NV_STATUS osRmCapRegisterSmcExecutionPartition( + OS_RM_CAPS *pPartitionOsRmCaps, + OS_RM_CAPS **ppExecPartitionOsRmCaps, + NvU32 execPartitionId); +NV_STATUS osRmCapRegisterSys(OS_RM_CAPS **ppOsRmCaps); + +NvBool osImexChannelIsSupported(void); +NvS32 osImexChannelGet(NvU64 descriptor); +NvS32 osImexChannelCount(void); + +NV_STATUS osGetRandomBytes(NvU8 *pBytes, NvU16 numBytes); + +NV_STATUS osAllocWaitQueue(OS_WAIT_QUEUE **ppWq); +void osFreeWaitQueue(OS_WAIT_QUEUE *pWq); +void osWaitUninterruptible(OS_WAIT_QUEUE *pWq); +void osWaitInterruptible(OS_WAIT_QUEUE *pWq); +void osWakeUp(OS_WAIT_QUEUE *pWq); + +NvU32 osGetDynamicPowerSupportMask(void); + +void osUnrefGpuAccessNeeded(OS_GPU_INFO *pOsGpuInfo); +NV_STATUS osRefGpuAccessNeeded(OS_GPU_INFO *pOsGpuInfo); + +NvU32 osGetGridCspSupport(void); + +NV_STATUS osIovaMap(PIOVAMAPPING pIovaMapping); +void osIovaUnmap(PIOVAMAPPING pIovaMapping); +NV_STATUS osGetAtsTargetAddressRange(OBJGPU *pGpu, + NvU64 *pAddr, + NvU32 *pAddrWidth, + NvU32 *pMask, + NvU32 *pMaskWidth, + NvBool bIsPeer, + NvU32 peerIndex); +NV_STATUS osGetFbNumaInfo(OBJGPU *pGpu, + NvU64 *pAddrPhys, + NvU64 *pAddrRsvdPhys, + NvS32 *pNodeId); +NV_STATUS osGetEgmInfo(OBJGPU *pGpu, + NvU64 *pPhysAddr, + NvU64 *pSize, + NvS32 *pNodeId); +NV_STATUS osGetForcedNVLinkConnection(OBJGPU *pGpu, + NvU32 maxLinks, + NvU32 *pLinkConnection); +NV_STATUS osGetForcedC2CConnection(OBJGPU *pGpu, + NvU32 maxLinks, + NvU32 *pLinkConnection); +NV_STATUS osGetPlatformNvlinkLinerate(OBJGPU *pGpu,NvU32 *lineRate); +const struct nvlink_link_handlers* osGetNvlinkLinkCallbacks(void); + +void osRemoveGpu(NvU32 domain, NvU8 bus, NvU8 device); +NvBool osRemoveGpuSupported(void); + +void initVGXSpecificRegistry(OBJGPU *); + +NV_STATUS nv_vgpu_rm_get_bar_info(OBJGPU *pGpu, const NvU8 *pVgpuDevName, NvU64 *barSizes, + NvU64 *sparseOffsets, NvU64 *sparseSizes, + NvU32 *sparseCount, NvBool *isBar064bit, + NvU8 *configParams); +NV_STATUS osIsVgpuVfioPresent(void); +NV_STATUS osIsVfioPciCorePresent(void); +NV_STATUS osIsVgpuDeviceVmPresent(void); +void osWakeRemoveVgpu(NvU32, NvU32); +NV_STATUS osLockPageableDataSection(RM_PAGEABLE_SECTION *pSection); +NV_STATUS osUnlockPageableDataSection(RM_PAGEABLE_SECTION *pSection); + +void osFlushGpuCoherentCpuCacheRange(OS_GPU_INFO *pOsGpuInfo, + NvU64 cpuVirtual, + NvU64 size); +NvBool osUidTokensEqual(PUID_TOKEN arg1, PUID_TOKEN arg2); + +NV_STATUS osValidateClientTokens(PSECURITY_TOKEN arg1, + PSECURITY_TOKEN arg2); +PUID_TOKEN osGetCurrentUidToken(void); +PSECURITY_TOKEN osGetSecurityToken(void); + +NV_STATUS osIsKernelBuffer(void *pArg1, NvU32 arg2); + +NV_STATUS osMapViewToSection(OS_GPU_INFO *pArg1, + void *pSectionHandle, + void **ppAddress, + NvU64 actualSize, + NvU64 sectionOffset, + NvBool bIommuEnabled); +NV_STATUS osUnmapViewFromSection(OS_GPU_INFO *pArg1, + void *pAddress, + NvBool bIommuEnabled); + +NV_STATUS osOpenTemporaryFile(void **ppFile); +void osCloseFile(void *pFile); +NV_STATUS osWriteToFile(void *pFile, NvU8 *buffer, + NvU64 size, NvU64 offset); +NV_STATUS osReadFromFile(void *pFile, NvU8 *buffer, + NvU64 size, NvU64 offset); + +NV_STATUS osSrPinSysmem(OS_GPU_INFO *pArg1, + NvU64 commitSize, + void *pMdl); +NV_STATUS osSrUnpinSysmem(OS_GPU_INFO *pArg1); + +void osPagedSegmentAccessCheck(void); + +NV_STATUS osCreateMemFromOsDescriptorInternal(OBJGPU *pGpu, void *pAddress, + NvU32 flags, NvU64 size, + MEMORY_DESCRIPTOR **ppMemDesc, + NvBool bCachedKernel, + RS_PRIV_LEVEL privilegeLevel); + +NV_STATUS osReserveCpuAddressSpaceUpperBound(void **ppSectionHandle, + NvU64 maxSectionSize); +void osReleaseCpuAddressSpaceUpperBound(void *pSectionHandle); + +void* osGetPidInfo(void); +void osPutPidInfo(void *pOsPidInfo); +NV_STATUS osFindNsPid(void *pOsPidInfo, NvU32 *pNsPid); +NvBool osIsInitNs(void); + +// OS Tegra IPC functions +NV_STATUS osTegraDceRegisterIpcClient(NvU32 interfaceType, void *usrCtx, + NvU32 *clientId); +NV_STATUS osTegraDceClientIpcSendRecv(NvU32 clientId, void *msg, + NvU32 msgLength); +NV_STATUS osTegraDceUnregisterIpcClient(NvU32 clientId); + +// +// Define OS-layer specific type instead of #include "clk_domains.h" for +// CLKWHICH, avoids upwards dependency from OS interface on higher level +// RM modules +// +typedef NvU32 OS_CLKWHICH; + +NV_STATUS osTegraSocEnableClk(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRM); +NV_STATUS osTegraSocDisableClk(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRM); +NV_STATUS osTegraSocGetCurrFreqKHz(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRM, NvU32 *pCurrFreqKHz); +NV_STATUS osTegraSocGetMaxFreqKHz(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRM, NvU32 *pMaxFreqKHz); +NV_STATUS osTegraSocGetMinFreqKHz(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRM, NvU32 *pMinFreqKHz); +NV_STATUS osTegraSocSetFreqKHz(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRM, NvU32 reqFreqKHz); +NV_STATUS osTegraSocSetParent(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRMsource, OS_CLKWHICH whichClkRMparent); +NV_STATUS osTegraSocGetParent(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRMsource, OS_CLKWHICH *pWhichClkRMparent); + +NV_STATUS osTegraSocDeviceReset(OS_GPU_INFO *pOsGpuInfo); +NV_STATUS osTegraSocPmPowergate(OBJGPU *pGpu); +NV_STATUS osTegraSocPmUnpowergate(OBJGPU *pGpu); +NV_STATUS osGetSyncpointAperture(OS_GPU_INFO *pOsGpuInfo, + NvU32 syncpointId, + NvU64 *physAddr, + NvU64 *limit, + NvU32 *offset); +NV_STATUS osTegraI2CGetBusState(OS_GPU_INFO *pOsGpuInfo, NvU32 port, NvS32 *scl, NvS32 *sda); +NV_STATUS osTegraSocParseFixedModeTimings(OS_GPU_INFO *pOsGpuInfo, + NvU32 dcbIndex, + NV0073_CTRL_DFP_GET_FIXED_MODE_TIMING_PARAMS *pTimingsPerStream, + NvU8 *pNumTimings); + +NV_STATUS osTegraiGpuPerfBoost(OBJGPU *pGpu, NvBool enable, NvU32 duration); + +NV_STATUS osGetVersion(NvU32 *pMajorVer, + NvU32 *pMinorVer, + NvU32 *pBuildNum, + NvU16 *pServicePackMaj, + NvU16 *pProductType); + +NV_STATUS osGetIsOpenRM(NvBool *bOpenRm); + +NvBool osGrService(OS_GPU_INFO *pOsGpuInfo, NvU32 grIdx, NvU32 intr, NvU32 nstatus, NvU32 addr, NvU32 dataLo); + +NvBool osDispService(NvU32 Intr0, NvU32 Intr1); + +NV_STATUS osReferenceObjectCount(void *pEvent); + +NV_STATUS osDereferenceObjectCount(void *pEvent); + +// +// Perform OS-specific error logging. +// Like libc's vsnprintf(), osErrorLogV() invalidates its va_list argument. The va_list argument +// may not be reused after osErrorLogV() returns. If the va_list is needed after the +// osErrorLogV() call, create a copy of the va_list using va_copy(). +// The caller controls the lifetime of the va_list argument, and should free it using va_end. +// +void osErrorLogV(OBJGPU *pGpu, XidContext context, const char * pFormat, va_list arglist); +void osErrorLog(OBJGPU *pGpu, NvU32 num, const char* pFormat, ...); + +NV_STATUS osNvifInitialize(OBJGPU *pGpu); + +NV_STATUS osNvifMethod(OBJGPU *pGpu, NvU32 func, + NvU32 subFunc, void *pInParam, + NvU16 inParamSize, NvU32 *pOutStatus, + void *pOutData, NvU16 *pOutDataSize); + +NV_STATUS osCreateMemFromOsDescriptor(OBJGPU *pGpu, NvP64 pDescriptor, + NvHandle hClient, NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + NvU32 descriptorType, + RS_PRIV_LEVEL privilegeLevel); + +void* osMapKernelSpace(RmPhysAddr Start, + NvU64 Size, + NvU32 Mode, + NvU32 Protect); + +void osUnmapKernelSpace(void *addr, NvU64 size); + +NvBool osTestPcieExtendedConfigAccess(void *handle, NvU32 offset); + +NvU32 osGetCpuFrequency(void); + +void osIoWriteByte(NvU32 Address, NvU8 Value); + +NvU8 osIoReadByte(NvU32 Address); + +void osIoWriteWord(NvU32 Address, NvU16 Value); + +NvU16 osIoReadWord(NvU32 Address); + +void osIoWriteDword(NvU32 port, NvU32 data); + +NvU32 osIoReadDword(NvU32 port); + +// OS functions to get memory pages + +NV_STATUS osGetNumMemoryPages (MEMORY_DESCRIPTOR *pMemDesc, NvU32 *pNumPages); +NV_STATUS osGetMemoryPages (MEMORY_DESCRIPTOR *pMemDesc, void *pPages, NvU32 *pNumPages); + +NV_STATUS osGetAcpiTable(NvU32 tableSignature, + void **ppTable, + NvU32 tableSize, + NvU32 *retSize); + +NV_STATUS osInitGetAcpiTable(void); + +// Read NvGlobal regkey +NV_STATUS osGetNvGlobalRegistryDword(OBJGPU *, const char *pRegParmStr, NvU32 *pData); + +NV_STATUS osGetAcpiRsdpFromUefi(NvU32 *pRsdpAddr); + +NV_STATUS osCreateNanoTimer(OS_GPU_INFO *pArg1, + void *tmrEvent, + void **tmrUserData); + +NV_STATUS osStartNanoTimer(OS_GPU_INFO *pArg1, + void *pTimer, + NvU64 timeNs); + +NV_STATUS osCancelNanoTimer(OS_GPU_INFO *pArg1, + void *pArg2); + +NV_STATUS osDestroyNanoTimer(OS_GPU_INFO *pArg1, + void *pArg2); + +NV_STATUS osGetValidWindowHeadMask(OS_GPU_INFO *pArg1, + NvU64 *pWindowHeadMask); + +NV_STATUS osSchedule(void); + +void osDmaSetAddressSize(OS_GPU_INFO *pArg1, + NvU32 bits); + +void osClientGcoffDisallowRefcount(OS_GPU_INFO *pArg1, + NvBool arg2); + +NV_STATUS osTegraSocGpioGetPinState(OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 *pArg3); + +void osTegraSocGpioSetPinState(OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 arg3); + +NV_STATUS osTegraSocGpioSetPinDirection(OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 arg3); + +NV_STATUS osTegraSocGpioGetPinDirection(OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 *pArg3); + +NV_STATUS osTegraSocGpioGetPinNumber(OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 *pArg3); + +NV_STATUS osTegraSocGpioGetPinInterruptStatus(OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 arg3, + NvBool *pArg4); + +NV_STATUS osTegraSocGpioSetPinInterrupt(OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 arg3); + +NV_STATUS osTegraSocDsiParsePanelProps(OS_GPU_INFO *pArg1, + void *pArg2); + +NvBool osTegraSocIsDsiPanelConnected(OS_GPU_INFO *pArg1); + +NV_STATUS osTegraSocDsiPanelEnable(OS_GPU_INFO *pArg1, + void *pArg2); + +NV_STATUS osTegraSocDsiPanelReset(OS_GPU_INFO *pArg1, + void *pArg2); + +void osTegraSocDsiPanelDisable(OS_GPU_INFO *pArg1, + void *pArg2); + +void osTegraSocDsiPanelCleanup(OS_GPU_INFO *pArg1, + void *pArg2); + +NV_STATUS osTegraSocResetMipiCal(OS_GPU_INFO *pArg1); + +NV_STATUS osGetTegraNumDpAuxInstances(OS_GPU_INFO *pArg1, + NvU32 *pArg2); + +NvU32 osTegraSocFuseRegRead(OBJGPU *pGpu, NvU32 addr); +NV_STATUS osTegraKfuseReadReg(OBJGPU *pGpu, NvU64 base, NvU32 offset, NvU32 size, NvU32 *val); + +typedef void (*osTegraTsecCbFunc)(void*, void*); + +NvU32 osTegraSocTsecSendCmd(OBJGPU *pGpu, void* cmd, osTegraTsecCbFunc cbFunc, void* cbContext); + +NvU32 osTegraSocTsecEventRegister(OBJGPU *pGpu, osTegraTsecCbFunc cbFunc, void* cbContext, NvBool isInitEvent); + +NvU32 osTegraSocTsecEventUnRegister(OBJGPU *pGpu, NvBool isInitEvent); + +void* osTegraSocTsecAllocMemDesc(OBJGPU *pGpu, NvU32 numBytes, NvU32 *flcnAddr); + +void osTegraSocTsecFreeMemDesc(OBJGPU *pGpu, void *memDesc); + +NV_STATUS osTegraSocHspSemaphoreAcquire(OBJGPU *pGpu, NvU32 ownerId, NvBool bAcquire, NvU64 timeout); + +NV_STATUS osTegraSocDpUphyPllInit(OS_GPU_INFO *pArg1, NvU32, NvU32); + +NV_STATUS osTegraSocDpUphyPllDeInit(OS_GPU_INFO *pArg1); + +NV_STATUS osGetCurrentIrqPrivData(OS_GPU_INFO *pArg1, + NvU32 *pArg2); + +NV_STATUS osGetTegraBrightnessLevel(OS_GPU_INFO *pArg1, + NvU32 *pArg2); + +NV_STATUS osSetTegraBrightnessLevel(OS_GPU_INFO *pArg1, + NvU32 arg2); + +NvBool osTegraSocGetHdcpEnabled(OS_GPU_INFO *pOsGpuInfo); + +void osTegraGetDispSMMUStreamIds( + OS_GPU_INFO *pOsGpuInfo, + NvU32 *dispIsoStreamId, + NvU32 *dispNisoStreamId +); + +NvBool osIsVga(OS_GPU_INFO *pArg1, + NvBool bIsGpuPrimaryDevice); + +void osInitOSHwInfo(OBJGPU *pGpu); + +void osDestroyOSHwInfo(OBJGPU *pGpu); + +NV_STATUS osUserHandleToKernelPtr(NvU32 hClient, + NvP64 Handle, + NvP64 *pHandle); + +NV_STATUS osGetSmbiosTable(void **pBaseVAddr, NvU64 *pLength, + NvU64 *pNumSubTypes, NvU32 *pVersion); + +void osPutSmbiosTable(void *pBaseVAddr, NvU64 length); + +NvBool osIsNvswitchPresent(void); + +void osQueueMMUFaultHandler(OBJGPU *); + +NV_STATUS osQueueDrainP2PHandler(NvU8 *); +void osQueueResumeP2PHandler(NvU8 *); + +NvBool osIsGpuAccessible(OBJGPU *pGpu); +NvBool osIsGpuShutdown(OBJGPU *pGpu); + +NvBool osMatchGpuOsInfo(OBJGPU *pGpu, void *pOsInfo); + +void osReleaseGpuOsInfo(void *pOsInfo); + +void osGpuWriteReg008(OBJGPU *pGpu, + NvU32 thisAddress, + NvV8 thisValue); + +void osDevWriteReg008(OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress, + NvV8 thisValue); + +NvU8 osGpuReadReg008(OBJGPU *pGpu, + NvU32 thisAddress); + +NvU8 osDevReadReg008(OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress); + +void osGpuWriteReg016(OBJGPU *pGpu, + NvU32 thisAddress, + NvV16 thisValue); + +void osDevWriteReg016(OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress, + NvV16 thisValue); + +NvU16 osGpuReadReg016(OBJGPU *pGpu, + NvU32 thisAddress); + +NvU16 osDevReadReg016(OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress); + +void osGpuWriteReg032(OBJGPU *pGpu, + NvU32 thisAddress, + NvV32 thisValue); + +void osDevWriteReg032(OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress, + NvV32 thisValue); + +NvU32 osGpuReadReg032(OBJGPU *pGpu, + NvU32 thisAddress); + +NvU32 osDevReadReg032(OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress); + +NV_STATUS osIsr(OBJGPU *pGpu); + +NV_STATUS osSanityTestIsr(OBJGPU *pGpu); + +NV_STATUS osInitMapping(OBJGPU *pGpu); + +NV_STATUS osVerifySystemEnvironment(OBJGPU *pGpu); + +NV_STATUS osSanityTestIsr(OBJGPU *pGpu); + +void osAllocatedRmClient(void* pOSInfo); + +NV_STATUS osConfigurePcieReqAtomics(OS_GPU_INFO *pOsGpuInfo, NvU32 *pMask); +NV_STATUS osGetPcieCplAtomicsCaps(OS_GPU_INFO *pOsGpuInfo, NvU32 *pMask); + +NvBool osDmabufIsSupported(void); + +static NV_INLINE NV_STATUS isrWrapper(NvBool testIntr, OBJGPU *pGpu) +{ + // + // If pGpu->testIntr is not true then use original osIsr function. + // On VMware Esxi 6.0, both rm isr and dpc handlers are called from Esxi 6.0 + // dpc handler. Because of this when multiple GPU are present in the system, + // we may get a call to rm_isr routine for a hw interrupt corresponding to a + // previously initialized GPU. In that case we need to call original osIsr + // function. + // + + NV_STATUS status = NV_OK; + + if (testIntr) + { + status = osSanityTestIsr(pGpu); + } + else + { + status = osIsr(pGpu); + } + + return status; +} + +#define OS_PCIE_CAP_MASK_REQ_ATOMICS_32 NVBIT(0) +#define OS_PCIE_CAP_MASK_REQ_ATOMICS_64 NVBIT(1) +#define OS_PCIE_CAP_MASK_REQ_ATOMICS_128 NVBIT(2) + +void osGetNumaMemoryUsage(NvS32 numaId, NvU64 *free_memory_bytes, NvU64 *total_memory_bytes); + +NV_STATUS osNumaAddGpuMemory(OS_GPU_INFO *pOsGpuInfo, NvU64 offset, + NvU64 size, NvU32 *pNumaNodeId); +void osNumaRemoveGpuMemory(OS_GPU_INFO *pOsGpuInfo, NvU64 offset, + NvU64 size, NvU32 numaNodeId); + +NV_STATUS osOfflinePageAtAddress(NvU64 address); + +NvBool osGpuSupportsAts(OBJGPU *pGpu); + +// +// Os 1Hz timer callback functions +// +// 1 second is the median and mean time between two callback runs, but the worst +// case can be anywhere between 0 (back-to-back) or (1s+RMTIMEOUT). +// N callbacks are at least (N-2) seconds apart. +// +// Callbacks can run at either DISPATCH_LEVEL or PASSIVE_LEVEL +// +NV_STATUS osInit1HzCallbacks(OBJTMR *pTmr); +NV_STATUS osDestroy1HzCallbacks(OBJTMR *pTmr); +NV_STATUS osSchedule1HzCallback(OBJGPU *pGpu, OS1HZPROC callback, void *pData, NvU32 flags); +void osRemove1HzCallback(OBJGPU *pGpu, OS1HZPROC callback, void *pData); +NvBool osRun1HzCallbacksNow(OBJGPU *pGpu); +void osRunQueued1HzCallbacksUnderLock(OBJGPU *pGpu); + +NV_STATUS osDoFunctionLevelReset(OBJGPU *pGpu); + +void osDisableConsoleManagement(OBJGPU *pGpu); + +void vgpuDevWriteReg032( + OBJGPU *pGpu, + NvU32 thisAddress, + NvV32 thisValue, + NvBool *vgpuHandled +); + +NvU32 vgpuDevReadReg032( + OBJGPU *pGpu, + NvU32 thisAddress, + NvBool *vgpuHandled +); + +void osInitSystemStaticConfig(SYS_STATIC_CONFIG *); + +void osDbgBugCheckOnAssert(void); + +NvBool osBugCheckOnTimeoutEnabled(void); + +// +// TODO: to clean-up the rest of the list +// +OSAttachGpu osAttachGpu; +OSDpcAttachGpu osDpcAttachGpu; +OSDpcDetachGpu osDpcDetachGpu; +OSHandleGpuLost osHandleGpuLost; +OSHandleGpuSurpriseRemoval osHandleGpuSurpriseRemoval; +OSInitScalabilityOptions osInitScalabilityOptions; +OSQueueDpc osQueueDpc; +OSSetEvent osSetEvent; +OSEventNotification osEventNotification; +OSEventNotificationWithInfo osEventNotificationWithInfo; +OSObjectEventNotification osObjectEventNotification; +OSNotifyEvent osNotifyEvent; +OSFlushCpuWriteCombineBuffer osFlushCpuWriteCombineBuffer; +OSDelay osDelay; +OSSpinLoop osSpinLoop; +OSDelayUs osDelayUs; +OSDelayNs osDelayNs; +OSGetCpuCount osGetCpuCount; +OSGetMaximumCoreCount osGetMaximumCoreCount; +OSGetCurrentProcessorNumber osGetCurrentProcessorNumber; +OSGetVersionDump osGetVersionDump; + +OSGetMaxUserVa osGetMaxUserVa; +OSGetCpuVaAddrShift osGetCpuVaAddrShift; + +OSAllocPagesInternal osAllocPagesInternal; +OSFreePagesInternal osFreePagesInternal; + +OSGetPageSize osGetPageSize; +OSGetSupportedSysmemPageSizeMask osGetSupportedSysmemPageSizeMask; +OSGetPageShift osGetPageShift; +OSNumaMemblockSize osNumaMemblockSize; +OSNumaOnliningEnabled osNumaOnliningEnabled; +OSAllocPagesNode osAllocPagesNode; +OSAllocAcquirePage osAllocAcquirePage; +OSAllocReleasePage osAllocReleasePage; +OSGetPageRefcount osGetPageRefcount; +OSCountTailPages osCountTailPages; +OSVirtualToPhysicalAddr osKernVirtualToPhysicalAddr; +OSLockMem osLockMem; +OSUnlockMem osUnlockMem; +OSMapSystemMemory osMapSystemMemory; +OSUnmapSystemMemory osUnmapSystemMemory; +OSWriteRegistryDword osWriteRegistryDword; +OSReadRegistryDword osReadRegistryDword; +OSReadRegistryString osReadRegistryString; +OSWriteRegistryBinary osWriteRegistryBinary; +OSWriteRegistryVolatile osWriteRegistryVolatile; +OSReadRegistryVolatile osReadRegistryVolatile; +OSReadRegistryVolatileSize osReadRegistryVolatileSize; +OSReadRegistryBinary osReadRegistryBinary; +OSReadRegistryDwordBase osReadRegistryDwordBase; +OSReadRegistryStringBase osReadRegistryStringBase; +OSPackageRegistry osPackageRegistry; +OSUnpackageRegistry osUnpackageRegistry; +NV_STATUS osDestroyRegistry(void); +nv_reg_entry_t* osGetRegistryList(void); +NV_STATUS osSetRegistryList(nv_reg_entry_t *pRegList); +OSMapPciMemoryUser osMapPciMemoryUser; +OSMapPciMemoryAreaUser osMapPciMemoryAreaUser; +OSUnmapPciMemoryUser osUnmapPciMemoryUser; +OSMapPciMemoryKernelOld osMapPciMemoryKernelOld; +OSMapPciMemoryKernel64 osMapPciMemoryKernel64; +OSUnmapPciMemoryKernelOld osUnmapPciMemoryKernelOld; +OSUnmapPciMemoryKernel64 osUnmapPciMemoryKernel64; +OSMapGPU osMapGPU; +OSUnmapGPU osUnmapGPU; +OSLockShouldToggleInterrupts osLockShouldToggleInterrupts; + +OSGetPerformanceCounter osGetPerformanceCounter; + +OSI2CClosePorts osI2CClosePorts; +OSWriteI2CBufferDirect osWriteI2CBufferDirect; +OSReadI2CBufferDirect osReadI2CBufferDirect; +OSI2CTransfer osI2CTransfer; +OSSetGpuRailVoltage osSetGpuRailVoltage; +OSGetGpuRailVoltage osGetGpuRailVoltage; +OSGetChipInfo osGetChipInfo; +OSGetGpuRailVoltageInfo osGetGpuRailVoltageInfo; + +OSGetCurrentProcess osGetCurrentProcess; +OSGetCurrentProcessName osGetCurrentProcessName; +OSGetCurrentThread osGetCurrentThread; +OSAttachToProcess osAttachToProcess; +OSDetachFromProcess osDetachFromProcess; +OSPollHotkeyState osPollHotkeyState; + +OSIsRaisedIRQL osIsRaisedIRQL; +OSIsISR osIsISR; +OSGetDriverBlock osGetDriverBlock; + +OSSyncWithRmDestroy osSyncWithRmDestroy; +OSSyncWithGpuDestroy osSyncWithGpuDestroy; + +OSModifyGpuSwStatePersistence osModifyGpuSwStatePersistence; + +OSHandleDeferredRecovery osHandleDeferredRecovery; +OSIsSwPreInitOnly osIsSwPreInitOnly; +OSGetCarveoutInfo osGetCarveoutInfo; +OSGetVPRInfo osGetVPRInfo; +OSAllocInVPR osAllocInVPR; +OSGetGenCarveout osGetGenCarveout; +OSGetSysmemInfo osGetSysmemInfo; +OSGC6PowerControl osGC6PowerControl; +OSReadPFPciConfigInVF osReadPFPciConfigInVF; + +OSAcquireRmSema osAcquireRmSema; +OSAcquireRmSema osAcquireRmSemaForced; +OSCondAcquireRmSema osCondAcquireRmSema; +OSReleaseRmSema osReleaseRmSema; + +// +// When the new basic lock model is enabled then the following legacy RM +// system semaphore routines are stubbed. +// +#define osAllocRmSema(s) (NV_OK) +#define osFreeRmSema(s) +#define osIsAcquiredRmSema(s) (NV_TRUE) +#define osIsRmSemaOwner(s) (NV_TRUE) +#define osCondReleaseRmSema(s) (NV_TRUE) +#define osAcquireRmSemaForced(s) osAcquireRmSema(s) +#define osGpuLockSetOwner(s,t) (NV_OK) + +OSApiLockAcquireConfigureFlags osApiLockAcquireConfigureFlags; +OSGpuLocksQueueRelease osGpuLocksQueueRelease; + +OSFlushLog osFlushLog; +OSSetSurfaceName osSetSurfaceName; + +#define MODS_ARCH_ERROR_PRINTF(format, ...) +#define MODS_ARCH_INFO_PRINTF(format, ...) +#define MODS_ARCH_REPORT(event, format, ...) + + +#define osAllocPages(a) osAllocPagesInternal(a) +#define osFreePages(a) osFreePagesInternal(a) + +extern NV_STATUS constructObjOS(struct OBJOS *); +extern void osInitObjOS(struct OBJOS *); + +extern OSGetTimeoutParams osGetTimeoutParams; + +// +// NV OS simulation mode defines +// Keep in sync with gpu.h SIM MODE defines until osGetSimulationMode is deprecated. +// +#ifndef NV_SIM_MODE_DEFS +#define NV_SIM_MODE_DEFS +#define NV_SIM_MODE_HARDWARE 0U +#define NV_SIM_MODE_RTL 1U +#define NV_SIM_MODE_CMODEL 2U +#define NV_SIM_MODE_MODS_AMODEL 3U +#define NV_SIM_MODE_TEGRA_FPGA 4U +#define NV_SIM_MODE_INVALID (~0x0U) +#endif + +// +// NV Heap control defines +// +#define NV_HEAP_CONTROL_INTERNAL 0 +#define NV_HEAP_CONTROL_EXTERNAL 1 + +// osDelayUs flags +#define OSDELAYUS_FLAGS_USE_TMR_DELAY NVBIT(0) + +// osEventNotification notifyIndex all value +#define OS_EVENT_NOTIFICATION_INDEX_ALL (0xffffffff) + +// tells osEventNotification to only issue notifies/events on this subdev +#define OS_EVENT_NOTIFICATION_INDEX_MATCH_SUBDEV (0x10000000) + +// Notify callback action +#define NV_OS_WRITE_THEN_AWAKEN 0x00000001 + +// +// Include per-OS definitions +// +// #ifdef out for nvoctrans, this hides include to system headers which +// breaks the tool. +// +// TODO - we should delete the per-OS os_custom.h files exposed to +// OS-agnostic code. Cross-OS code shouldn't pull in per-OS headers or +// per-OS definitions. +// +#pragma once +#include "os_custom.h" + +#define NV_SEMA_RELEASE_SUCCEED 0 // lock released, no waiting thread to notify +#define NV_SEMA_RELEASE_FAILED 1 // failed to lock release +#define NV_SEMA_RELEASE_NOTIFIED 2 // lock released, notify waiting thread +#define NV_SEMA_RELEASE_DPC_QUEUED 3 // lock released, queue DPC to notify waiting thread +#define NV_SEMA_RELEASE_DPC_FAILED 4 // lock released, but failed to queue a DPC to notify waiting thread + + #define ADD_PROBE(pGpu, probeId) + +#define IS_SIM_MODS(pOS) (pOS->bIsSimMods) + +#endif // _OS_H_ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_OS_NVOC_H_ diff --git a/src/nvidia/generated/g_prereq_tracker_nvoc.c b/src/nvidia/generated/g_prereq_tracker_nvoc.c new file mode 100644 index 0000000..340b092 --- /dev/null +++ b/src/nvidia/generated/g_prereq_tracker_nvoc.c @@ -0,0 +1,205 @@ +#define NVOC_PREREQ_TRACKER_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_prereq_tracker_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x0e171b = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_PrereqTracker; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +// Forward declarations for PrereqTracker +void __nvoc_init__Object(Object*); +void __nvoc_init__PrereqTracker(PrereqTracker*); +void __nvoc_init_funcTable_PrereqTracker(PrereqTracker*); +NV_STATUS __nvoc_ctor_PrereqTracker(PrereqTracker*, struct OBJGPU *arg_pParent); +void __nvoc_init_dataField_PrereqTracker(PrereqTracker*); +void __nvoc_dtor_PrereqTracker(PrereqTracker*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__PrereqTracker; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__PrereqTracker; + +// Down-thunk(s) to bridge PrereqTracker methods from ancestors (if any) + +// Up-thunk(s) to bridge PrereqTracker methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_PrereqTracker = +{ + /*classInfo=*/ { + /*size=*/ sizeof(PrereqTracker), + /*classId=*/ classId(PrereqTracker), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "PrereqTracker", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_PrereqTracker, + /*pCastInfo=*/ &__nvoc_castinfo__PrereqTracker, + /*pExportInfo=*/ &__nvoc_export_info__PrereqTracker +}; + + +// Metadata with per-class RTTI with ancestor(s) +static const struct NVOC_METADATA__PrereqTracker __nvoc_metadata__PrereqTracker = { + .rtti.pClassDef = &__nvoc_class_def_PrereqTracker, // (prereq) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_PrereqTracker, + .rtti.offset = 0, + .metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super + .metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Object.rtti.offset = NV_OFFSETOF(PrereqTracker, __nvoc_base_Object), +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__PrereqTracker = { + .numRelatives = 2, + .relatives = { + &__nvoc_metadata__PrereqTracker.rtti, // [0]: (prereq) this + &__nvoc_metadata__PrereqTracker.metadata__Object.rtti, // [1]: (obj) super + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__PrereqTracker = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_PrereqTracker(PrereqTracker *pThis) { + __nvoc_prereqDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_PrereqTracker(PrereqTracker *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_PrereqTracker(PrereqTracker *pThis, struct OBJGPU * arg_pParent) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_PrereqTracker_fail_Object; + __nvoc_init_dataField_PrereqTracker(pThis); + + status = __nvoc_prereqConstruct(pThis, arg_pParent); + if (status != NV_OK) goto __nvoc_ctor_PrereqTracker_fail__init; + goto __nvoc_ctor_PrereqTracker_exit; // Success + +__nvoc_ctor_PrereqTracker_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_PrereqTracker_fail_Object: +__nvoc_ctor_PrereqTracker_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_PrereqTracker_1(PrereqTracker *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_PrereqTracker_1 + + +// Initialize vtable(s): Nothing to do for empty vtables +void __nvoc_init_funcTable_PrereqTracker(PrereqTracker *pThis) { + __nvoc_init_funcTable_PrereqTracker_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__PrereqTracker(PrereqTracker *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; // (obj) super + pThis->__nvoc_pbase_PrereqTracker = pThis; // (prereq) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Object(&pThis->__nvoc_base_Object); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__PrereqTracker.metadata__Object; // (obj) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__PrereqTracker; // (prereq) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_PrereqTracker(pThis); +} + +NV_STATUS __nvoc_objCreate_PrereqTracker(PrereqTracker **ppThis, Dynamic *pParent, NvU32 createFlags, struct OBJGPU * arg_pParent) +{ + NV_STATUS status; + Object *pParentObj = NULL; + PrereqTracker *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(PrereqTracker), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(PrereqTracker)); + + pThis->__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__PrereqTracker(pThis); + status = __nvoc_ctor_PrereqTracker(pThis, arg_pParent); + if (status != NV_OK) goto __nvoc_objCreate_PrereqTracker_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_PrereqTracker_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(PrereqTracker)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_PrereqTracker(PrereqTracker **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct OBJGPU * arg_pParent = va_arg(args, struct OBJGPU *); + + status = __nvoc_objCreate_PrereqTracker(ppThis, pParent, createFlags, arg_pParent); + + return status; +} + diff --git a/src/nvidia/generated/g_prereq_tracker_nvoc.h b/src/nvidia/generated/g_prereq_tracker_nvoc.h new file mode 100644 index 0000000..4689d30 --- /dev/null +++ b/src/nvidia/generated/g_prereq_tracker_nvoc.h @@ -0,0 +1,305 @@ + +#ifndef _G_PREREQ_TRACKER_NVOC_H_ +#define _G_PREREQ_TRACKER_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file prereq_tracker.h + * @brief Holds interfaces and data structures required by the prerequisite + * tracking feature/code. + * + * Code depending on multiple other features should use prereqComposeEntry() to create + * a prerequisite tracking structure with a provided bitVector of all necessary + * dependencies, which will arm the prereq to start watching those dependencies. + * Once those dependencies are fulfilled they should issue prereqSatisfy() (one-by-one) + * This common code should broadcast those to all prerequisite tracking structures + * and once all respective dependencies are satisfied, will issue the + * registered callback. + * Similarly, dependencies should issue prereqRetract() before they change + * their state and common code will broadcast that to all tracking structures + * and issue callbacks again with bSatisfied=false, if all dependencies + * for that prereq were previously satisfied. + * + * @note Feature is designed to prevent creating new prerequisites once + * dependencies start issuing Satisfy()/Retract() notifications. + * Therefore, ComposeEntry all prerequisites during + * stateInit() and allow code to issue Satisfy()/Retract() only in + * stateLoad() or later. + */ + +#pragma once +#include "g_prereq_tracker_nvoc.h" + +#ifndef __PREREQUISITE_TRACKER_H__ +#define __PREREQUISITE_TRACKER_H__ + +/* ------------------------ Includes ---------------------------------------- */ +#include "containers/list.h" +#include "utils/nvbitvector.h" + +#include "nvoc/object.h" + +/* ------------------------ Macros ------------------------------------------ */ + +#define PREREQ_ID_VECTOR_SIZE 64 + +/*! + * Performs check if all dependencies of the given prerequisite tracking + * structure has been satisfied. + * + * @param[in] _pPrereq PREREQ_ENTRY pointer + * + * @return boolean if prerequisite has been satisfied. + */ +#define PREREQ_IS_SATISFIED(_pPrereq) \ + ((_pPrereq)->countRequested == (_pPrereq)->countSatisfied) + +/* ------------------------ Datatypes --------------------------------------- */ + + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + + +/*! + * @brief Callback prototype. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] bSatisfied + * Indicates if dependencies were just satisfied or about to be retracted. + * + * @return NV_OK if callback successfully executed + * @return status failure specific error code + */ +typedef NV_STATUS GpuPrereqCallback(struct OBJGPU *pGpu, NvBool bSatisfied); + +typedef NvU16 PREREQ_ID; + +/*! + * Bitvector for storing prereq IDs required for another prereq struct + * Limited to size defined above, set to largest required by users + */ +MAKE_BITVECTOR(PREREQ_ID_BIT_VECTOR, PREREQ_ID_VECTOR_SIZE); + +/*! + * An individual prerequisite tracking entry structure. + */ +typedef struct +{ + /*! + * Mask of the dependencies (prerequisites that have to be satisfied before + * callback can be issues). + */ + PREREQ_ID_BIT_VECTOR requested; + + /*! + * Counter of all dependencies (prerequisites) tracked by this structure. + */ + NvS32 countRequested; + /*! + * Counter of currently satisfied dependencies (prerequisites) tracked by + * this structure. Once equal to @ref countRequested, callback can be issued. + */ + NvS32 countSatisfied; + + /*! + * Boolean indicating that the given PREREQ_ENTRY is armed and ready to fire @ref + * callback whenever all PREREQ_IDs specified in @ref requested are satisfied. + * + * This bit is set during @ref prereqComposeEntry_IMPL(), which will also do an + * initial satisfaction check of all @ref requested PREREQ_IDs + * and fire the @ref callback if necessary. + */ + NvBool bArmed; + + /*! + * @copydoc GpuPrereqCallback + */ + GpuPrereqCallback *callback; +} PREREQ_ENTRY; +MAKE_LIST(PrereqList, PREREQ_ENTRY); + +/*! + * Holds common prerequisite tracking information. + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_PREREQ_TRACKER_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__PrereqTracker; +struct NVOC_METADATA__Object; + + +struct PrereqTracker { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__PrereqTracker *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Object __nvoc_base_Object; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super + struct PrereqTracker *__nvoc_pbase_PrereqTracker; // prereq + + // Data members + union PREREQ_ID_BIT_VECTOR satisfied; + NvBool bInitialized; + PrereqList prereqList; + struct OBJGPU *pParent; +}; + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__PrereqTracker { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Object metadata__Object; +}; + +#ifndef __NVOC_CLASS_PrereqTracker_TYPEDEF__ +#define __NVOC_CLASS_PrereqTracker_TYPEDEF__ +typedef struct PrereqTracker PrereqTracker; +#endif /* __NVOC_CLASS_PrereqTracker_TYPEDEF__ */ + +#ifndef __nvoc_class_id_PrereqTracker +#define __nvoc_class_id_PrereqTracker 0x0e171b +#endif /* __nvoc_class_id_PrereqTracker */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_PrereqTracker; + +#define __staticCast_PrereqTracker(pThis) \ + ((pThis)->__nvoc_pbase_PrereqTracker) + +#ifdef __nvoc_prereq_tracker_h_disabled +#define __dynamicCast_PrereqTracker(pThis) ((PrereqTracker*) NULL) +#else //__nvoc_prereq_tracker_h_disabled +#define __dynamicCast_PrereqTracker(pThis) \ + ((PrereqTracker*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(PrereqTracker))) +#endif //__nvoc_prereq_tracker_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_PrereqTracker(PrereqTracker**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_PrereqTracker(PrereqTracker**, Dynamic*, NvU32, struct OBJGPU *arg_pParent); +#define __objCreate_PrereqTracker(ppNewObj, pParent, createFlags, arg_pParent) \ + __nvoc_objCreate_PrereqTracker((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pParent) + + +// Wrapper macros + +// Dispatch functions +NV_STATUS prereqConstruct_IMPL(struct PrereqTracker *arg_pTracker, struct OBJGPU *arg_pParent); + +#define __nvoc_prereqConstruct(arg_pTracker, arg_pParent) prereqConstruct_IMPL(arg_pTracker, arg_pParent) +void prereqDestruct_IMPL(struct PrereqTracker *pTracker); + +#define __nvoc_prereqDestruct(pTracker) prereqDestruct_IMPL(pTracker) +NV_STATUS prereqSatisfy_IMPL(struct PrereqTracker *pTracker, PREREQ_ID prereqId); + +#ifdef __nvoc_prereq_tracker_h_disabled +static inline NV_STATUS prereqSatisfy(struct PrereqTracker *pTracker, PREREQ_ID prereqId) { + NV_ASSERT_FAILED_PRECOMP("PrereqTracker was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_prereq_tracker_h_disabled +#define prereqSatisfy(pTracker, prereqId) prereqSatisfy_IMPL(pTracker, prereqId) +#endif //__nvoc_prereq_tracker_h_disabled + +NV_STATUS prereqRetract_IMPL(struct PrereqTracker *pTracker, PREREQ_ID prereqId); + +#ifdef __nvoc_prereq_tracker_h_disabled +static inline NV_STATUS prereqRetract(struct PrereqTracker *pTracker, PREREQ_ID prereqId) { + NV_ASSERT_FAILED_PRECOMP("PrereqTracker was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_prereq_tracker_h_disabled +#define prereqRetract(pTracker, prereqId) prereqRetract_IMPL(pTracker, prereqId) +#endif //__nvoc_prereq_tracker_h_disabled + +NvBool prereqIdIsSatisfied_IMPL(struct PrereqTracker *pTracker, PREREQ_ID prereqId); + +#ifdef __nvoc_prereq_tracker_h_disabled +static inline NvBool prereqIdIsSatisfied(struct PrereqTracker *pTracker, PREREQ_ID prereqId) { + NV_ASSERT_FAILED_PRECOMP("PrereqTracker was disabled!"); + return NV_FALSE; +} +#else //__nvoc_prereq_tracker_h_disabled +#define prereqIdIsSatisfied(pTracker, prereqId) prereqIdIsSatisfied_IMPL(pTracker, prereqId) +#endif //__nvoc_prereq_tracker_h_disabled + +NV_STATUS prereqComposeEntry_IMPL(struct PrereqTracker *pTracker, GpuPrereqCallback *callback, union PREREQ_ID_BIT_VECTOR *pDepends, PREREQ_ENTRY **ppPrereq); + +#ifdef __nvoc_prereq_tracker_h_disabled +static inline NV_STATUS prereqComposeEntry(struct PrereqTracker *pTracker, GpuPrereqCallback *callback, union PREREQ_ID_BIT_VECTOR *pDepends, PREREQ_ENTRY **ppPrereq) { + NV_ASSERT_FAILED_PRECOMP("PrereqTracker was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_prereq_tracker_h_disabled +#define prereqComposeEntry(pTracker, callback, pDepends, ppPrereq) prereqComposeEntry_IMPL(pTracker, callback, pDepends, ppPrereq) +#endif //__nvoc_prereq_tracker_h_disabled + +#undef PRIVATE_FIELD + + +#endif // __PREREQUISITE_TRACKER_H__ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_PREREQ_TRACKER_NVOC_H_ diff --git a/src/nvidia/generated/g_ref_count_nvoc.h b/src/nvidia/generated/g_ref_count_nvoc.h new file mode 100644 index 0000000..764e73e --- /dev/null +++ b/src/nvidia/generated/g_ref_count_nvoc.h @@ -0,0 +1,237 @@ + +#ifndef _G_REF_COUNT_NVOC_H_ +#define _G_REF_COUNT_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_ref_count_nvoc.h" + +#ifndef REF_COUNT_H +#define REF_COUNT_H + +/****************** Resource Manager Defines and Structures *****************\ +* * +* Defines and structures used for the Reference-Counting Object. * +* * +\****************************************************************************/ + +#include "containers/map.h" +#include "nvoc/object.h" + +#define NV_REQUESTER_INIT NV_U64_MIN +#define NV_REQUESTER_RM NV_U64_MAX +#define NV_REQUESTER_CLIENT_OBJECT(c,o) (((NvU64)(c) << 32) | o) + +typedef enum +{ + REFCNT_STATE_DEFAULT = 0, + REFCNT_STATE_ENABLED, + REFCNT_STATE_DISABLED, + REFCNT_STATE_ERROR, +} REFCNT_STATE; + +typedef struct +{ + NvU32 numReferences; +} REFCNT_REQUESTER_ENTRY, *PREFCNT_REQUESTER_ENTRY; + +MAKE_MAP(REFCNT_REQUESTER_ENTRY_MAP, REFCNT_REQUESTER_ENTRY); + + +struct OBJREFCNT; + +#ifndef __NVOC_CLASS_OBJREFCNT_TYPEDEF__ +#define __NVOC_CLASS_OBJREFCNT_TYPEDEF__ +typedef struct OBJREFCNT OBJREFCNT; +#endif /* __NVOC_CLASS_OBJREFCNT_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJREFCNT +#define __nvoc_class_id_OBJREFCNT 0xf89281 +#endif /* __nvoc_class_id_OBJREFCNT */ + + + +// +// XXX-IOM: +// These callback types are good candidates to be replaced with IOM +// functionality, where small derived classes can be created on a 'callback' +// base interface, should that become more practical (currently, adding any +// kind of class still requires a non-trivial amount of boilerplate to wire +// up). +// +typedef NV_STATUS RefcntStateChangeCallback(struct OBJREFCNT *, Dynamic *, + REFCNT_STATE, REFCNT_STATE); + +typedef void RefcntResetCallback(struct OBJREFCNT *, Dynamic *, NvU64); + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_REF_COUNT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__OBJREFCNT; +struct NVOC_METADATA__Object; + + +struct OBJREFCNT { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__OBJREFCNT *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Object __nvoc_base_Object; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super + struct OBJREFCNT *__nvoc_pbase_OBJREFCNT; // refcnt + + // 1 PDB property + NvBool PDB_PROP_REFCNT_ALLOW_RECURSIVE_REQUESTS; + + // Data members + Dynamic *pParent; + NvU32 tag; + REFCNT_REQUESTER_ENTRY_MAP requesterTree; + REFCNT_STATE state; + NvU32 count; + RefcntStateChangeCallback *refcntStateChangeCallback; + RefcntResetCallback *refcntResetCallback; +}; + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__OBJREFCNT { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Object metadata__Object; +}; + +#ifndef __NVOC_CLASS_OBJREFCNT_TYPEDEF__ +#define __NVOC_CLASS_OBJREFCNT_TYPEDEF__ +typedef struct OBJREFCNT OBJREFCNT; +#endif /* __NVOC_CLASS_OBJREFCNT_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJREFCNT +#define __nvoc_class_id_OBJREFCNT 0xf89281 +#endif /* __nvoc_class_id_OBJREFCNT */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJREFCNT; + +#define __staticCast_OBJREFCNT(pThis) \ + ((pThis)->__nvoc_pbase_OBJREFCNT) + +#ifdef __nvoc_ref_count_h_disabled +#define __dynamicCast_OBJREFCNT(pThis) ((OBJREFCNT*) NULL) +#else //__nvoc_ref_count_h_disabled +#define __dynamicCast_OBJREFCNT(pThis) \ + ((OBJREFCNT*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJREFCNT))) +#endif //__nvoc_ref_count_h_disabled + +// Property macros +#define PDB_PROP_REFCNT_ALLOW_RECURSIVE_REQUESTS_BASE_CAST +#define PDB_PROP_REFCNT_ALLOW_RECURSIVE_REQUESTS_BASE_NAME PDB_PROP_REFCNT_ALLOW_RECURSIVE_REQUESTS + +NV_STATUS __nvoc_objCreateDynamic_OBJREFCNT(OBJREFCNT**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJREFCNT(OBJREFCNT**, Dynamic*, NvU32, Dynamic *arg_pParent, NvU32 arg_tag, RefcntStateChangeCallback *arg_pStateChangeCallback, RefcntResetCallback *arg_pResetCallback); +#define __objCreate_OBJREFCNT(ppNewObj, pParent, createFlags, arg_pParent, arg_tag, arg_pStateChangeCallback, arg_pResetCallback) \ + __nvoc_objCreate_OBJREFCNT((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pParent, arg_tag, arg_pStateChangeCallback, arg_pResetCallback) + + +// Wrapper macros + +// Dispatch functions +NV_STATUS refcntConstruct_IMPL(struct OBJREFCNT *arg_pRefcnt, Dynamic *arg_pParent, NvU32 arg_tag, RefcntStateChangeCallback *arg_pStateChangeCallback, RefcntResetCallback *arg_pResetCallback); + +#define __nvoc_refcntConstruct(arg_pRefcnt, arg_pParent, arg_tag, arg_pStateChangeCallback, arg_pResetCallback) refcntConstruct_IMPL(arg_pRefcnt, arg_pParent, arg_tag, arg_pStateChangeCallback, arg_pResetCallback) +void refcntDestruct_IMPL(struct OBJREFCNT *pRefcnt); + +#define __nvoc_refcntDestruct(pRefcnt) refcntDestruct_IMPL(pRefcnt) +NV_STATUS refcntRequestReference_IMPL(struct OBJREFCNT *pRefcnt, NvU64 arg2, NvU32 arg3, NvBool arg4); + +#ifdef __nvoc_ref_count_h_disabled +static inline NV_STATUS refcntRequestReference(struct OBJREFCNT *pRefcnt, NvU64 arg2, NvU32 arg3, NvBool arg4) { + NV_ASSERT_FAILED_PRECOMP("OBJREFCNT was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_ref_count_h_disabled +#define refcntRequestReference(pRefcnt, arg2, arg3, arg4) refcntRequestReference_IMPL(pRefcnt, arg2, arg3, arg4) +#endif //__nvoc_ref_count_h_disabled + +NV_STATUS refcntReleaseReferences_IMPL(struct OBJREFCNT *pRefcnt, NvU64 arg2, NvBool arg3); + +#ifdef __nvoc_ref_count_h_disabled +static inline NV_STATUS refcntReleaseReferences(struct OBJREFCNT *pRefcnt, NvU64 arg2, NvBool arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJREFCNT was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_ref_count_h_disabled +#define refcntReleaseReferences(pRefcnt, arg2, arg3) refcntReleaseReferences_IMPL(pRefcnt, arg2, arg3) +#endif //__nvoc_ref_count_h_disabled + +NV_STATUS refcntReset_IMPL(struct OBJREFCNT *pRefcnt, NvBool arg2); + +#ifdef __nvoc_ref_count_h_disabled +static inline NV_STATUS refcntReset(struct OBJREFCNT *pRefcnt, NvBool arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJREFCNT was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_ref_count_h_disabled +#define refcntReset(pRefcnt, arg2) refcntReset_IMPL(pRefcnt, arg2) +#endif //__nvoc_ref_count_h_disabled + +#undef PRIVATE_FIELD + + +#endif // REF_COUNT_H + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_REF_COUNT_NVOC_H_ diff --git a/src/nvidia/generated/g_resource_fwd_decls_nvoc.h b/src/nvidia/generated/g_resource_fwd_decls_nvoc.h new file mode 100644 index 0000000..b33b70c --- /dev/null +++ b/src/nvidia/generated/g_resource_fwd_decls_nvoc.h @@ -0,0 +1,616 @@ + +#ifndef _G_RESOURCE_FWD_DECLS_NVOC_H_ +#define _G_RESOURCE_FWD_DECLS_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_resource_fwd_decls_nvoc.h" + +#ifndef RESOURCE_FWD_DECLS_H +#define RESOURCE_FWD_DECLS_H + +#include "nvtypes.h" +#include "nvoc/prelude.h" +#include "nvoc/object.h" +#include "rmconfig.h" + +// Base classes + +struct ChannelDescendant; + +#ifndef __NVOC_CLASS_ChannelDescendant_TYPEDEF__ +#define __NVOC_CLASS_ChannelDescendant_TYPEDEF__ +typedef struct ChannelDescendant ChannelDescendant; +#endif /* __NVOC_CLASS_ChannelDescendant_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ChannelDescendant +#define __nvoc_class_id_ChannelDescendant 0x43d7c4 +#endif /* __nvoc_class_id_ChannelDescendant */ + + + +struct DispChannel; + +#ifndef __NVOC_CLASS_DispChannel_TYPEDEF__ +#define __NVOC_CLASS_DispChannel_TYPEDEF__ +typedef struct DispChannel DispChannel; +#endif /* __NVOC_CLASS_DispChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannel +#define __nvoc_class_id_DispChannel 0xbd2ff3 +#endif /* __nvoc_class_id_DispChannel */ + + + +struct GpuResource; + +#ifndef __NVOC_CLASS_GpuResource_TYPEDEF__ +#define __NVOC_CLASS_GpuResource_TYPEDEF__ +typedef struct GpuResource GpuResource; +#endif /* __NVOC_CLASS_GpuResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuResource +#define __nvoc_class_id_GpuResource 0x5d5d9f +#endif /* __nvoc_class_id_GpuResource */ + + + +struct INotifier; + +#ifndef __NVOC_CLASS_INotifier_TYPEDEF__ +#define __NVOC_CLASS_INotifier_TYPEDEF__ +typedef struct INotifier INotifier; +#endif /* __NVOC_CLASS_INotifier_TYPEDEF__ */ + +#ifndef __nvoc_class_id_INotifier +#define __nvoc_class_id_INotifier 0xf8f965 +#endif /* __nvoc_class_id_INotifier */ + + + +struct Memory; + +#ifndef __NVOC_CLASS_Memory_TYPEDEF__ +#define __NVOC_CLASS_Memory_TYPEDEF__ +typedef struct Memory Memory; +#endif /* __NVOC_CLASS_Memory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Memory +#define __nvoc_class_id_Memory 0x4789f2 +#endif /* __nvoc_class_id_Memory */ + + + +struct Notifier; + +#ifndef __NVOC_CLASS_Notifier_TYPEDEF__ +#define __NVOC_CLASS_Notifier_TYPEDEF__ +typedef struct Notifier Notifier; +#endif /* __NVOC_CLASS_Notifier_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Notifier +#define __nvoc_class_id_Notifier 0xa8683b +#endif /* __nvoc_class_id_Notifier */ + + + +struct NotifShare; + +#ifndef __NVOC_CLASS_NotifShare_TYPEDEF__ +#define __NVOC_CLASS_NotifShare_TYPEDEF__ +typedef struct NotifShare NotifShare; +#endif /* __NVOC_CLASS_NotifShare_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NotifShare +#define __nvoc_class_id_NotifShare 0xd5f150 +#endif /* __nvoc_class_id_NotifShare */ + + + +struct Resource; + +#ifndef __NVOC_CLASS_Resource_TYPEDEF__ +#define __NVOC_CLASS_Resource_TYPEDEF__ +typedef struct Resource Resource; +#endif /* __NVOC_CLASS_Resource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Resource +#define __nvoc_class_id_Resource 0xbe8545 +#endif /* __nvoc_class_id_Resource */ + + + +struct RmResource; + +#ifndef __NVOC_CLASS_RmResource_TYPEDEF__ +#define __NVOC_CLASS_RmResource_TYPEDEF__ +typedef struct RmResource RmResource; +#endif /* __NVOC_CLASS_RmResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmResource +#define __nvoc_class_id_RmResource 0x03610d +#endif /* __nvoc_class_id_RmResource */ + + + +struct RmResourceCommon; + +#ifndef __NVOC_CLASS_RmResourceCommon_TYPEDEF__ +#define __NVOC_CLASS_RmResourceCommon_TYPEDEF__ +typedef struct RmResourceCommon RmResourceCommon; +#endif /* __NVOC_CLASS_RmResourceCommon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmResourceCommon +#define __nvoc_class_id_RmResourceCommon 0x8ef259 +#endif /* __nvoc_class_id_RmResourceCommon */ + + + +struct RsResource; + +#ifndef __NVOC_CLASS_RsResource_TYPEDEF__ +#define __NVOC_CLASS_RsResource_TYPEDEF__ +typedef struct RsResource RsResource; +#endif /* __NVOC_CLASS_RsResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsResource +#define __nvoc_class_id_RsResource 0xd551cb +#endif /* __nvoc_class_id_RsResource */ + + + +struct RsShared; + +#ifndef __NVOC_CLASS_RsShared_TYPEDEF__ +#define __NVOC_CLASS_RsShared_TYPEDEF__ +typedef struct RsShared RsShared; +#endif /* __NVOC_CLASS_RsShared_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsShared +#define __nvoc_class_id_RsShared 0x830542 +#endif /* __nvoc_class_id_RsShared */ + + + +// Classes disabled in orin but required forward declarations to build. + +struct HostVgpuDeviceApi; + +#ifndef __NVOC_CLASS_HostVgpuDeviceApi_TYPEDEF__ +#define __NVOC_CLASS_HostVgpuDeviceApi_TYPEDEF__ +typedef struct HostVgpuDeviceApi HostVgpuDeviceApi; +#endif /* __NVOC_CLASS_HostVgpuDeviceApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_HostVgpuDeviceApi +#define __nvoc_class_id_HostVgpuDeviceApi 0x4c4173 +#endif /* __nvoc_class_id_HostVgpuDeviceApi */ + + // also used by open rm + +struct MpsApi; + +#ifndef __NVOC_CLASS_MpsApi_TYPEDEF__ +#define __NVOC_CLASS_MpsApi_TYPEDEF__ +typedef struct MpsApi MpsApi; +#endif /* __NVOC_CLASS_MpsApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MpsApi +#define __nvoc_class_id_MpsApi 0x22ce42 +#endif /* __nvoc_class_id_MpsApi */ + + + +struct MIGConfigSession; + +#ifndef __NVOC_CLASS_MIGConfigSession_TYPEDEF__ +#define __NVOC_CLASS_MIGConfigSession_TYPEDEF__ +typedef struct MIGConfigSession MIGConfigSession; +#endif /* __NVOC_CLASS_MIGConfigSession_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MIGConfigSession +#define __nvoc_class_id_MIGConfigSession 0x36a941 +#endif /* __nvoc_class_id_MIGConfigSession */ + + + +struct FmSessionApi; + +#ifndef __NVOC_CLASS_FmSessionApi_TYPEDEF__ +#define __NVOC_CLASS_FmSessionApi_TYPEDEF__ +typedef struct FmSessionApi FmSessionApi; +#endif /* __NVOC_CLASS_FmSessionApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_FmSessionApi +#define __nvoc_class_id_FmSessionApi 0xdfbd08 +#endif /* __nvoc_class_id_FmSessionApi */ + + + +struct MIGMonitorSession; + +#ifndef __NVOC_CLASS_MIGMonitorSession_TYPEDEF__ +#define __NVOC_CLASS_MIGMonitorSession_TYPEDEF__ +typedef struct MIGMonitorSession MIGMonitorSession; +#endif /* __NVOC_CLASS_MIGMonitorSession_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MIGMonitorSession +#define __nvoc_class_id_MIGMonitorSession 0x29e15c +#endif /* __nvoc_class_id_MIGMonitorSession */ + + + +struct TimerApi; + +#ifndef __NVOC_CLASS_TimerApi_TYPEDEF__ +#define __NVOC_CLASS_TimerApi_TYPEDEF__ +typedef struct TimerApi TimerApi; +#endif /* __NVOC_CLASS_TimerApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_TimerApi +#define __nvoc_class_id_TimerApi 0xb13ac4 +#endif /* __nvoc_class_id_TimerApi */ + + + +struct KernelSMDebuggerSession; + +#ifndef __NVOC_CLASS_KernelSMDebuggerSession_TYPEDEF__ +#define __NVOC_CLASS_KernelSMDebuggerSession_TYPEDEF__ +typedef struct KernelSMDebuggerSession KernelSMDebuggerSession; +#endif /* __NVOC_CLASS_KernelSMDebuggerSession_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelSMDebuggerSession +#define __nvoc_class_id_KernelSMDebuggerSession 0x4adc81 +#endif /* __nvoc_class_id_KernelSMDebuggerSession */ + + + +// NVOC only expand macros inside a class. Use the stub class + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_RESOURCE_FWD_DECLS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI +struct NVOC_METADATA__NVOCFwdDeclHack; + +struct RmClientResource; + +#ifndef __NVOC_CLASS_RmClientResource_TYPEDEF__ +#define __NVOC_CLASS_RmClientResource_TYPEDEF__ +typedef struct RmClientResource RmClientResource; +#endif /* __NVOC_CLASS_RmClientResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmClientResource +#define __nvoc_class_id_RmClientResource 0x37a701 +#endif /* __nvoc_class_id_RmClientResource */ + +struct Device; + +#ifndef __NVOC_CLASS_Device_TYPEDEF__ +#define __NVOC_CLASS_Device_TYPEDEF__ +typedef struct Device Device; +#endif /* __NVOC_CLASS_Device_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Device +#define __nvoc_class_id_Device 0xe0ac20 +#endif /* __nvoc_class_id_Device */ + +struct Hdacodec; + +#ifndef __NVOC_CLASS_Hdacodec_TYPEDEF__ +#define __NVOC_CLASS_Hdacodec_TYPEDEF__ +typedef struct Hdacodec Hdacodec; +#endif /* __NVOC_CLASS_Hdacodec_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Hdacodec +#define __nvoc_class_id_Hdacodec 0xf59a20 +#endif /* __nvoc_class_id_Hdacodec */ + +struct Subdevice; + +#ifndef __NVOC_CLASS_Subdevice_TYPEDEF__ +#define __NVOC_CLASS_Subdevice_TYPEDEF__ +typedef struct Subdevice Subdevice; +#endif /* __NVOC_CLASS_Subdevice_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Subdevice +#define __nvoc_class_id_Subdevice 0x4b01b3 +#endif /* __nvoc_class_id_Subdevice */ + +struct SystemMemory; + +#ifndef __NVOC_CLASS_SystemMemory_TYPEDEF__ +#define __NVOC_CLASS_SystemMemory_TYPEDEF__ +typedef struct SystemMemory SystemMemory; +#endif /* __NVOC_CLASS_SystemMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SystemMemory +#define __nvoc_class_id_SystemMemory 0x007a98 +#endif /* __nvoc_class_id_SystemMemory */ + +struct OsDescMemory; + +#ifndef __NVOC_CLASS_OsDescMemory_TYPEDEF__ +#define __NVOC_CLASS_OsDescMemory_TYPEDEF__ +typedef struct OsDescMemory OsDescMemory; +#endif /* __NVOC_CLASS_OsDescMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OsDescMemory +#define __nvoc_class_id_OsDescMemory 0xb3dacd +#endif /* __nvoc_class_id_OsDescMemory */ + +struct SyncpointMemory; + +#ifndef __NVOC_CLASS_SyncpointMemory_TYPEDEF__ +#define __NVOC_CLASS_SyncpointMemory_TYPEDEF__ +typedef struct SyncpointMemory SyncpointMemory; +#endif /* __NVOC_CLASS_SyncpointMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SyncpointMemory +#define __nvoc_class_id_SyncpointMemory 0x529def +#endif /* __nvoc_class_id_SyncpointMemory */ + +struct DispSfUser; + +#ifndef __NVOC_CLASS_DispSfUser_TYPEDEF__ +#define __NVOC_CLASS_DispSfUser_TYPEDEF__ +typedef struct DispSfUser DispSfUser; +#endif /* __NVOC_CLASS_DispSfUser_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispSfUser +#define __nvoc_class_id_DispSfUser 0xba7439 +#endif /* __nvoc_class_id_DispSfUser */ + +struct NvDispApi; + +#ifndef __NVOC_CLASS_NvDispApi_TYPEDEF__ +#define __NVOC_CLASS_NvDispApi_TYPEDEF__ +typedef struct NvDispApi NvDispApi; +#endif /* __NVOC_CLASS_NvDispApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NvDispApi +#define __nvoc_class_id_NvDispApi 0x36aa0b +#endif /* __nvoc_class_id_NvDispApi */ + +struct DispSwObj; + +#ifndef __NVOC_CLASS_DispSwObj_TYPEDEF__ +#define __NVOC_CLASS_DispSwObj_TYPEDEF__ +typedef struct DispSwObj DispSwObj; +#endif /* __NVOC_CLASS_DispSwObj_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispSwObj +#define __nvoc_class_id_DispSwObj 0x6aa5e2 +#endif /* __nvoc_class_id_DispSwObj */ + +struct DispCommon; + +#ifndef __NVOC_CLASS_DispCommon_TYPEDEF__ +#define __NVOC_CLASS_DispCommon_TYPEDEF__ +typedef struct DispCommon DispCommon; +#endif /* __NVOC_CLASS_DispCommon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispCommon +#define __nvoc_class_id_DispCommon 0x41f4f2 +#endif /* __nvoc_class_id_DispCommon */ + +struct DispChannelPio; + +#ifndef __NVOC_CLASS_DispChannelPio_TYPEDEF__ +#define __NVOC_CLASS_DispChannelPio_TYPEDEF__ +typedef struct DispChannelPio DispChannelPio; +#endif /* __NVOC_CLASS_DispChannelPio_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannelPio +#define __nvoc_class_id_DispChannelPio 0x10dec3 +#endif /* __nvoc_class_id_DispChannelPio */ + +struct DispChannelDma; + +#ifndef __NVOC_CLASS_DispChannelDma_TYPEDEF__ +#define __NVOC_CLASS_DispChannelDma_TYPEDEF__ +typedef struct DispChannelDma DispChannelDma; +#endif /* __NVOC_CLASS_DispChannelDma_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannelDma +#define __nvoc_class_id_DispChannelDma 0xfe3d2e +#endif /* __nvoc_class_id_DispChannelDma */ + +struct DispCapabilities; + +#ifndef __NVOC_CLASS_DispCapabilities_TYPEDEF__ +#define __NVOC_CLASS_DispCapabilities_TYPEDEF__ +typedef struct DispCapabilities DispCapabilities; +#endif /* __NVOC_CLASS_DispCapabilities_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispCapabilities +#define __nvoc_class_id_DispCapabilities 0x99db3e +#endif /* __nvoc_class_id_DispCapabilities */ + +struct ContextDma; + +#ifndef __NVOC_CLASS_ContextDma_TYPEDEF__ +#define __NVOC_CLASS_ContextDma_TYPEDEF__ +typedef struct ContextDma ContextDma; +#endif /* __NVOC_CLASS_ContextDma_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ContextDma +#define __nvoc_class_id_ContextDma 0x88441b +#endif /* __nvoc_class_id_ContextDma */ + +struct Event; + +#ifndef __NVOC_CLASS_Event_TYPEDEF__ +#define __NVOC_CLASS_Event_TYPEDEF__ +typedef struct Event Event; +#endif /* __NVOC_CLASS_Event_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Event +#define __nvoc_class_id_Event 0xa4ecfc +#endif /* __nvoc_class_id_Event */ + +struct LockStressObject; + +#ifndef __NVOC_CLASS_LockStressObject_TYPEDEF__ +#define __NVOC_CLASS_LockStressObject_TYPEDEF__ +typedef struct LockStressObject LockStressObject; +#endif /* __NVOC_CLASS_LockStressObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_LockStressObject +#define __nvoc_class_id_LockStressObject 0xecce10 +#endif /* __nvoc_class_id_LockStressObject */ + +struct LockTestRelaxedDupObject; + +#ifndef __NVOC_CLASS_LockTestRelaxedDupObject_TYPEDEF__ +#define __NVOC_CLASS_LockTestRelaxedDupObject_TYPEDEF__ +typedef struct LockTestRelaxedDupObject LockTestRelaxedDupObject; +#endif /* __NVOC_CLASS_LockTestRelaxedDupObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_LockTestRelaxedDupObject +#define __nvoc_class_id_LockTestRelaxedDupObject 0x19e861 +#endif /* __nvoc_class_id_LockTestRelaxedDupObject */ + + + +struct NVOCFwdDeclHack { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__NVOCFwdDeclHack *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Ancestor object pointers for `staticCast` feature + struct NVOCFwdDeclHack *__nvoc_pbase_NVOCFwdDeclHack; // nvocfwddeclhack + + // Data members + struct RmClientResource *PRIVATE_FIELD(RmClientResource_NV01_ROOT); + struct RmClientResource *PRIVATE_FIELD(RmClientResource_NV01_ROOT_NON_PRIV); + struct RmClientResource *PRIVATE_FIELD(RmClientResource_NV01_ROOT_CLIENT); + struct Device *PRIVATE_FIELD(Device_NV01_DEVICE_0); + struct Hdacodec *PRIVATE_FIELD(Hdacodec_GF100_HDACODEC); + struct Subdevice *PRIVATE_FIELD(Subdevice_NV20_SUBDEVICE_0); + struct SystemMemory *PRIVATE_FIELD(SystemMemory_NV01_MEMORY_SYSTEM); + struct OsDescMemory *PRIVATE_FIELD(OsDescMemory_NV01_MEMORY_SYSTEM_OS_DESCRIPTOR); + struct SyncpointMemory *PRIVATE_FIELD(SyncpointMemory_NV01_MEMORY_SYNCPOINT); + struct DispSfUser *PRIVATE_FIELD(DispSfUser_NVC671_DISP_SF_USER); + struct DispSfUser *PRIVATE_FIELD(DispSfUser_NVC971_DISP_SF_USER); + struct DispSfUser *PRIVATE_FIELD(DispSfUser_NVCC71_DISP_SF_USER); + struct NvDispApi *PRIVATE_FIELD(NvDispApi_NVC670_DISPLAY); + struct NvDispApi *PRIVATE_FIELD(NvDispApi_NVC970_DISPLAY); + struct NvDispApi *PRIVATE_FIELD(NvDispApi_NVCC70_DISPLAY); + struct DispSwObj *PRIVATE_FIELD(DispSwObj_NVC372_DISPLAY_SW); + struct DispCommon *PRIVATE_FIELD(DispCommon_NV04_DISPLAY_COMMON); + struct DispChannelPio *PRIVATE_FIELD(DispChannelPio_NVC67A_CURSOR_IMM_CHANNEL_PIO); + struct DispChannelPio *PRIVATE_FIELD(DispChannelPio_NVC97A_CURSOR_IMM_CHANNEL_PIO); + struct DispChannelPio *PRIVATE_FIELD(DispChannelPio_NVCC7A_CURSOR_IMM_CHANNEL_PIO); + struct DispChannelDma *PRIVATE_FIELD(DispChannelDma_NVC67B_WINDOW_IMM_CHANNEL_DMA); + struct DispChannelDma *PRIVATE_FIELD(DispChannelDma_NVC67D_CORE_CHANNEL_DMA); + struct DispChannelDma *PRIVATE_FIELD(DispChannelDma_NVC77F_ANY_CHANNEL_DMA); + struct DispChannelDma *PRIVATE_FIELD(DispChannelDma_NVC67E_WINDOW_CHANNEL_DMA); + struct DispCapabilities *PRIVATE_FIELD(DispCapabilities_NVC673_DISP_CAPABILITIES); + struct DispChannelDma *PRIVATE_FIELD(DispChannelDma_NVC97B_WINDOW_IMM_CHANNEL_DMA); + struct DispChannelDma *PRIVATE_FIELD(DispChannelDma_NVC97D_CORE_CHANNEL_DMA); + struct DispChannelDma *PRIVATE_FIELD(DispChannelDma_NVC97E_WINDOW_CHANNEL_DMA); + struct DispCapabilities *PRIVATE_FIELD(DispCapabilities_NVC973_DISP_CAPABILITIES); + struct DispCapabilities *PRIVATE_FIELD(DispCapabilities_NVCC73_DISP_CAPABILITIES); + struct DispChannelDma *PRIVATE_FIELD(DispChannelDma_NVCC7B_WINDOW_IMM_CHANNEL_DMA); + struct DispChannelDma *PRIVATE_FIELD(DispChannelDma_NVCC7D_CORE_CHANNEL_DMA); + struct DispChannelDma *PRIVATE_FIELD(DispChannelDma_NVCC7E_WINDOW_CHANNEL_DMA); + struct ContextDma *PRIVATE_FIELD(ContextDma_NV01_CONTEXT_DMA); + struct Event *PRIVATE_FIELD(Event_NV01_EVENT); + struct Event *PRIVATE_FIELD(Event_NV01_EVENT_OS_EVENT); + struct Event *PRIVATE_FIELD(Event_NV01_EVENT_KERNEL_CALLBACK); + struct Event *PRIVATE_FIELD(Event_NV01_EVENT_KERNEL_CALLBACK_EX); + struct LockStressObject *PRIVATE_FIELD(LockStressObject_LOCK_STRESS_OBJECT); + struct LockTestRelaxedDupObject *PRIVATE_FIELD(LockTestRelaxedDupObject_LOCK_TEST_RELAXED_DUP_OBJECT); +}; + + +// Metadata with per-class RTTI +struct NVOC_METADATA__NVOCFwdDeclHack { + const struct NVOC_RTTI rtti; +}; + +#ifndef __NVOC_CLASS_NVOCFwdDeclHack_TYPEDEF__ +#define __NVOC_CLASS_NVOCFwdDeclHack_TYPEDEF__ +typedef struct NVOCFwdDeclHack NVOCFwdDeclHack; +#endif /* __NVOC_CLASS_NVOCFwdDeclHack_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NVOCFwdDeclHack +#define __nvoc_class_id_NVOCFwdDeclHack 0x0d01f5 +#endif /* __nvoc_class_id_NVOCFwdDeclHack */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_NVOCFwdDeclHack; + +#define __staticCast_NVOCFwdDeclHack(pThis) \ + ((pThis)->__nvoc_pbase_NVOCFwdDeclHack) + +#ifdef __nvoc_resource_fwd_decls_h_disabled +#define __dynamicCast_NVOCFwdDeclHack(pThis) ((NVOCFwdDeclHack*) NULL) +#else //__nvoc_resource_fwd_decls_h_disabled +#define __dynamicCast_NVOCFwdDeclHack(pThis) \ + ((NVOCFwdDeclHack*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(NVOCFwdDeclHack))) +#endif //__nvoc_resource_fwd_decls_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_NVOCFwdDeclHack(NVOCFwdDeclHack**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_NVOCFwdDeclHack(NVOCFwdDeclHack**, Dynamic*, NvU32); +#define __objCreate_NVOCFwdDeclHack(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_NVOCFwdDeclHack((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#undef PRIVATE_FIELD + + +#endif // RESOURCE_FWD_DECLS_H + + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_RESOURCE_FWD_DECLS_NVOC_H_ diff --git a/src/nvidia/generated/g_resource_nvoc.c b/src/nvidia/generated/g_resource_nvoc.c new file mode 100644 index 0000000..be3dc90 --- /dev/null +++ b/src/nvidia/generated/g_resource_nvoc.c @@ -0,0 +1,489 @@ +#define NVOC_RESOURCE_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_resource_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x8ef259 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +// Forward declarations for RmResourceCommon +void __nvoc_init__RmResourceCommon(RmResourceCommon*); +void __nvoc_init_funcTable_RmResourceCommon(RmResourceCommon*); +NV_STATUS __nvoc_ctor_RmResourceCommon(RmResourceCommon*); +void __nvoc_init_dataField_RmResourceCommon(RmResourceCommon*); +void __nvoc_dtor_RmResourceCommon(RmResourceCommon*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__RmResourceCommon; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__RmResourceCommon; + +// Down-thunk(s) to bridge RmResourceCommon methods from ancestors (if any) + +// Up-thunk(s) to bridge RmResourceCommon methods to ancestors (if any) + +// Not instantiable because it's not derived from class "Object" +const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RmResourceCommon), + /*classId=*/ classId(RmResourceCommon), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RmResourceCommon", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo__RmResourceCommon, + /*pExportInfo=*/ &__nvoc_export_info__RmResourceCommon +}; + + +// Metadata with per-class RTTI +static const struct NVOC_METADATA__RmResourceCommon __nvoc_metadata__RmResourceCommon = { + .rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RmResourceCommon, + .rtti.offset = 0, +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__RmResourceCommon = { + .numRelatives = 1, + .relatives = { + &__nvoc_metadata__RmResourceCommon.rtti, // [0]: (rmrescmn) this + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__RmResourceCommon = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RmResourceCommon(RmResourceCommon *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RmResourceCommon(RmResourceCommon *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResourceCommon(RmResourceCommon *pThis) { + NV_STATUS status = NV_OK; + __nvoc_init_dataField_RmResourceCommon(pThis); + + status = __nvoc_rmrescmnConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_RmResourceCommon_fail__init; + goto __nvoc_ctor_RmResourceCommon_exit; // Success + +__nvoc_ctor_RmResourceCommon_fail__init: +__nvoc_ctor_RmResourceCommon_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_RmResourceCommon_1(RmResourceCommon *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_RmResourceCommon_1 + + +// Initialize vtable(s): Nothing to do for empty vtables +void __nvoc_init_funcTable_RmResourceCommon(RmResourceCommon *pThis) { + __nvoc_init_funcTable_RmResourceCommon_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__RmResourceCommon(RmResourceCommon *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_RmResourceCommon = pThis; // (rmrescmn) this + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__RmResourceCommon; // (rmrescmn) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_RmResourceCommon(pThis); +} + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x03610d = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +// Forward declarations for RmResource +void __nvoc_init__RsResource(RsResource*); +void __nvoc_init__RmResourceCommon(RmResourceCommon*); +void __nvoc_init__RmResource(RmResource*); +void __nvoc_init_funcTable_RmResource(RmResource*); +NV_STATUS __nvoc_ctor_RmResource(RmResource*, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_RmResource(RmResource*); +void __nvoc_dtor_RmResource(RmResource*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__RmResource; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__RmResource; + +// Down-thunk(s) to bridge RmResource methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this + +// Up-thunk(s) to bridge RmResource methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // this +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RmResource), + /*classId=*/ classId(RmResource), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RmResource", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RmResource, + /*pCastInfo=*/ &__nvoc_castinfo__RmResource, + /*pExportInfo=*/ &__nvoc_export_info__RmResource +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__RmResource __nvoc_metadata__RmResource = { + .rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RmResource, + .rtti.offset = 0, + .metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super + .metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RsResource.rtti.offset = NV_OFFSETOF(RmResource, __nvoc_base_RsResource), + .metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^2 + .metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(RmResource, __nvoc_base_RsResource.__nvoc_base_Object), + .metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super + .metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(RmResource, __nvoc_base_RmResourceCommon), + + .vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__rmresShareCallback__ = &rmresShareCallback_IMPL, // virtual override (res) base (res) + .metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resControl__ = &resControl_IMPL, // virtual + .vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resMap__ = &resMap_IMPL, // virtual + .vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resUnmap__ = &resUnmap_IMPL, // virtual + .vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__RmResource = { + .numRelatives = 4, + .relatives = { + &__nvoc_metadata__RmResource.rtti, // [0]: (rmres) this + &__nvoc_metadata__RmResource.metadata__RsResource.rtti, // [1]: (res) super + &__nvoc_metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [2]: (obj) super^2 + &__nvoc_metadata__RmResource.metadata__RmResourceCommon.rtti, // [3]: (rmrescmn) super + } +}; + +// 6 down-thunk(s) defined to bridge methods in RmResource from superclasses + +// rmresAccessCallback: virtual override (res) base (res) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) - NV_OFFSETOF(RmResource, __nvoc_base_RsResource)), pInvokingClient, pAllocParams, accessRight); +} + +// rmresShareCallback: virtual override (res) base (res) +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *) pResource) - NV_OFFSETOF(RmResource, __nvoc_base_RsResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// rmresControlSerialization_Prologue: virtual override (res) base (res) +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) - NV_OFFSETOF(RmResource, __nvoc_base_RsResource)), pCallContext, pParams); +} + +// rmresControlSerialization_Epilogue: virtual override (res) base (res) +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) - NV_OFFSETOF(RmResource, __nvoc_base_RsResource)), pCallContext, pParams); +} + +// rmresControl_Prologue: virtual override (res) base (res) +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) - NV_OFFSETOF(RmResource, __nvoc_base_RsResource)), pCallContext, pParams); +} + +// rmresControl_Epilogue: virtual override (res) base (res) +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) - NV_OFFSETOF(RmResource, __nvoc_base_RsResource)), pCallContext, pParams); +} + + +// 12 up-thunk(s) defined to bridge methods in RmResource to superclasses + +// rmresCanCopy: virtual inherited (res) base (res) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmResource, __nvoc_base_RsResource))); +} + +// rmresIsDuplicate: virtual inherited (res) base (res) +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmResource, __nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// rmresPreDestruct: virtual inherited (res) base (res) +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmResource, __nvoc_base_RsResource))); +} + +// rmresControl: virtual inherited (res) base (res) +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmResource, __nvoc_base_RsResource)), pCallContext, pParams); +} + +// rmresControlFilter: virtual inherited (res) base (res) +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmResource, __nvoc_base_RsResource)), pCallContext, pParams); +} + +// rmresMap: virtual inherited (res) base (res) +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmResource, __nvoc_base_RsResource)), pCallContext, pParams, pCpuMapping); +} + +// rmresUnmap: virtual inherited (res) base (res) +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmResource, __nvoc_base_RsResource)), pCallContext, pCpuMapping); +} + +// rmresIsPartialUnmapSupported: inline virtual inherited (res) base (res) body +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmResource, __nvoc_base_RsResource))); +} + +// rmresMapTo: virtual inherited (res) base (res) +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmResource, __nvoc_base_RsResource)), pParams); +} + +// rmresUnmapFrom: virtual inherited (res) base (res) +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmResource, __nvoc_base_RsResource)), pParams); +} + +// rmresGetRefCount: virtual inherited (res) base (res) +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmResource, __nvoc_base_RsResource))); +} + +// rmresAddAdditionalDependants: virtual inherited (res) base (res) +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RmResource, __nvoc_base_RsResource)), pReference); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__RmResource = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RsResource(RsResource*); +void __nvoc_dtor_RmResourceCommon(RmResourceCommon*); +void __nvoc_dtor_RmResource(RmResource *pThis) { + __nvoc_dtor_RsResource(&pThis->__nvoc_base_RsResource); + __nvoc_dtor_RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RmResource(RmResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RsResource(RsResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_RmResourceCommon(RmResourceCommon* ); +NV_STATUS __nvoc_ctor_RmResource(RmResource *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsResource(&pThis->__nvoc_base_RsResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RmResource_fail_RsResource; + status = __nvoc_ctor_RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); + if (status != NV_OK) goto __nvoc_ctor_RmResource_fail_RmResourceCommon; + __nvoc_init_dataField_RmResource(pThis); + + status = __nvoc_rmresConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RmResource_fail__init; + goto __nvoc_ctor_RmResource_exit; // Success + +__nvoc_ctor_RmResource_fail__init: + __nvoc_dtor_RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); +__nvoc_ctor_RmResource_fail_RmResourceCommon: + __nvoc_dtor_RsResource(&pThis->__nvoc_base_RsResource); +__nvoc_ctor_RmResource_fail_RsResource: +__nvoc_ctor_RmResource_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_RmResource_1(RmResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_RmResource_1 + + +// Initialize vtable(s) for 21 virtual method(s). +void __nvoc_init_funcTable_RmResource(RmResource *pThis) { + __nvoc_init_funcTable_RmResource_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__RmResource(RmResource *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^2 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RsResource; // (res) super + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResourceCommon; // (rmrescmn) super + pThis->__nvoc_pbase_RmResource = pThis; // (rmres) this + + // Recurse to superclass initialization function(s). + __nvoc_init__RsResource(&pThis->__nvoc_base_RsResource); + __nvoc_init__RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^2 + pThis->__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__RmResource.metadata__RsResource; // (res) super + pThis->__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__RmResource; // (rmres) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_RmResource(pThis); +} + +NV_STATUS __nvoc_objCreate_RmResource(RmResource **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + RmResource *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(RmResource), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(RmResource)); + + pThis->__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__RmResource(pThis); + status = __nvoc_ctor_RmResource(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_RmResource_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_RmResource_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(RmResource)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RmResource(RmResource **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_RmResource(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_resource_nvoc.h b/src/nvidia/generated/g_resource_nvoc.h new file mode 100644 index 0000000..64b1a27 --- /dev/null +++ b/src/nvidia/generated/g_resource_nvoc.h @@ -0,0 +1,480 @@ + +#ifndef _G_RESOURCE_NVOC_H_ +#define _G_RESOURCE_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_resource_nvoc.h" + +#ifndef _RESOURCE_H_ +#define _RESOURCE_H_ + +#include "core/core.h" +#include "resserv/rs_resource.h" +#include "rmapi/control.h" +#include "os/nv_memory_area.h" + +/* Forward declarations */ +struct MEMORY_DESCRIPTOR; +struct OBJVASPACE; + +struct RMRES_MEM_INTER_MAP_PARAMS +{ + /// [in] + OBJGPU *pGpu; + RsResourceRef *pMemoryRef; + NvBool bSubdeviceHandleProvided; + + /// [out] + OBJGPU *pSrcGpu; + struct MEMORY_DESCRIPTOR *pSrcMemDesc; + NvHandle hMemoryDevice; + // This flag will be set when this is FLA mapping + NvBool bFlaMapping; +}; + +struct RS_RES_MAP_TO_PARAMS +{ + OBJGPU *pGpu; ///< [in] + OBJGPU *pSrcGpu; ///< [in] + struct MEMORY_DESCRIPTOR *pSrcMemDesc; ///< [in] + struct MEMORY_DESCRIPTOR **ppMemDesc; ///< [out] + RsResourceRef *pMemoryRef; ///< [in] + NvHandle hBroadcastDevice; ///< [in] + NvHandle hMemoryDevice; ///< [in] + NvU32 gpuMask; ///< [in] + NvU64 offset; ///< [in] + NvU64 length; ///< [in] + NvU32 flags; ///< [in] + NvU32 flags2; ///< [in] + NvU32 kindOverride; ///< [in] + NvU64 *pDmaOffset; ///< [inout] + NvBool bSubdeviceHandleProvided; ///< [in] + NvBool bFlaMapping; ///< [in] +}; + +struct RS_RES_UNMAP_FROM_PARAMS +{ + OBJGPU *pGpu; ///< [in] + NvHandle hMemory; ///< [in] + NvHandle hBroadcastDevice; ///< [in] + NvU32 gpuMask; ///< [in] + NvU32 flags; ///< [in] + NvU64 dmaOffset; ///< [in] + NvU64 size; ///< [in] + struct MEMORY_DESCRIPTOR *pMemDesc; ///< [in] + NvBool bSubdeviceHandleProvided; ///< [in] +}; + +struct RS_INTER_MAP_PRIVATE +{ + OBJGPU *pGpu; + OBJGPU *pSrcGpu; + struct MEMORY_DESCRIPTOR *pSrcMemDesc; + NvHandle hBroadcastDevice; + NvHandle hMemoryDevice; + NvU32 gpuMask; + NvBool bSubdeviceHandleProvided; + NvBool bFlaMapping; +}; + +struct RS_INTER_UNMAP_PRIVATE +{ + OBJGPU *pGpu; + NvHandle hBroadcastDevice; + NvU32 gpuMask; + NvBool bSubdeviceHandleProvided; + NvBool bcState; + NvBool bAllocated; ///< This struct has been allocated and must be freed +}; + +struct RS_CPU_MAPPING_PRIVATE +{ + MemoryArea memArea; + OBJGPU *pGpu; + NvP64 pPriv; + NvU32 protect; + NvBool bKernel; + MemoryRange backingRangeStore; +}; + +typedef struct RMRES_MEM_INTER_MAP_PARAMS RMRES_MEM_INTER_MAP_PARAMS; + +/*! + * All RsResource subclasses in RM must inherit from this class + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_RESOURCE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI +struct NVOC_METADATA__RmResourceCommon; + + +struct RmResourceCommon { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__RmResourceCommon *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Ancestor object pointers for `staticCast` feature + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn +}; + + +// Metadata with per-class RTTI +struct NVOC_METADATA__RmResourceCommon { + const struct NVOC_RTTI rtti; +}; + +#ifndef __NVOC_CLASS_RmResourceCommon_TYPEDEF__ +#define __NVOC_CLASS_RmResourceCommon_TYPEDEF__ +typedef struct RmResourceCommon RmResourceCommon; +#endif /* __NVOC_CLASS_RmResourceCommon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmResourceCommon +#define __nvoc_class_id_RmResourceCommon 0x8ef259 +#endif /* __nvoc_class_id_RmResourceCommon */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +#define __staticCast_RmResourceCommon(pThis) \ + ((pThis)->__nvoc_pbase_RmResourceCommon) + +#ifdef __nvoc_resource_h_disabled +#define __dynamicCast_RmResourceCommon(pThis) ((RmResourceCommon*) NULL) +#else //__nvoc_resource_h_disabled +#define __dynamicCast_RmResourceCommon(pThis) \ + ((RmResourceCommon*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RmResourceCommon))) +#endif //__nvoc_resource_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_RmResourceCommon(RmResourceCommon**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RmResourceCommon(RmResourceCommon**, Dynamic*, NvU32); +#define __objCreate_RmResourceCommon(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_RmResourceCommon((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros + +// Dispatch functions +NV_STATUS rmrescmnConstruct_IMPL(struct RmResourceCommon *arg_pResourceCommmon); + +#define __nvoc_rmrescmnConstruct(arg_pResourceCommmon) rmrescmnConstruct_IMPL(arg_pResourceCommmon) +#undef PRIVATE_FIELD + + +/*! + * Utility base class for all RsResource subclasses in by RM. Doesn't have to be + * used but if it isn't used RmResourceCommon must be inherited manually + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_RESOURCE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__RmResource; +struct NVOC_METADATA__RsResource; +struct NVOC_METADATA__RmResourceCommon; +struct NVOC_VTABLE__RmResource; + + +struct RmResource { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__RmResource *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct RsResource __nvoc_base_RsResource; + struct RmResourceCommon __nvoc_base_RmResourceCommon; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^2 + struct RsResource *__nvoc_pbase_RsResource; // res super + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super + struct RmResource *__nvoc_pbase_RmResource; // rmres + + // Data members + NvU32 rpcGpuInstance; + NvBool bRpcFree; +}; + + +// Vtable with 21 per-class function pointers +struct NVOC_VTABLE__RmResource { + NvBool (*__rmresAccessCallback__)(struct RmResource * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual override (res) base (res) + NvBool (*__rmresShareCallback__)(struct RmResource * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual override (res) base (res) + NV_STATUS (*__rmresGetMemInterMapParams__)(struct RmResource * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual + NV_STATUS (*__rmresCheckMemInterUnmap__)(struct RmResource * /*this*/, NvBool); // virtual + NV_STATUS (*__rmresGetMemoryMappingDescriptor__)(struct RmResource * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual + NV_STATUS (*__rmresControlSerialization_Prologue__)(struct RmResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual override (res) base (res) + void (*__rmresControlSerialization_Epilogue__)(struct RmResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual override (res) base (res) + NV_STATUS (*__rmresControl_Prologue__)(struct RmResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual override (res) base (res) + void (*__rmresControl_Epilogue__)(struct RmResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual override (res) base (res) + NvBool (*__rmresCanCopy__)(struct RmResource * /*this*/); // virtual inherited (res) base (res) + NV_STATUS (*__rmresIsDuplicate__)(struct RmResource * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (res) + void (*__rmresPreDestruct__)(struct RmResource * /*this*/); // virtual inherited (res) base (res) + NV_STATUS (*__rmresControl__)(struct RmResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (res) + NV_STATUS (*__rmresControlFilter__)(struct RmResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (res) + NV_STATUS (*__rmresMap__)(struct RmResource * /*this*/, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); // virtual inherited (res) base (res) + NV_STATUS (*__rmresUnmap__)(struct RmResource * /*this*/, struct CALL_CONTEXT *, RsCpuMapping *); // virtual inherited (res) base (res) + NvBool (*__rmresIsPartialUnmapSupported__)(struct RmResource * /*this*/); // inline virtual inherited (res) base (res) body + NV_STATUS (*__rmresMapTo__)(struct RmResource * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (res) + NV_STATUS (*__rmresUnmapFrom__)(struct RmResource * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (res) + NvU32 (*__rmresGetRefCount__)(struct RmResource * /*this*/); // virtual inherited (res) base (res) + void (*__rmresAddAdditionalDependants__)(struct RsClient *, struct RmResource * /*this*/, RsResourceRef *); // virtual inherited (res) base (res) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__RmResource { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__RsResource metadata__RsResource; + const struct NVOC_METADATA__RmResourceCommon metadata__RmResourceCommon; + const struct NVOC_VTABLE__RmResource vtable; +}; + +#ifndef __NVOC_CLASS_RmResource_TYPEDEF__ +#define __NVOC_CLASS_RmResource_TYPEDEF__ +typedef struct RmResource RmResource; +#endif /* __NVOC_CLASS_RmResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmResource +#define __nvoc_class_id_RmResource 0x03610d +#endif /* __nvoc_class_id_RmResource */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +#define __staticCast_RmResource(pThis) \ + ((pThis)->__nvoc_pbase_RmResource) + +#ifdef __nvoc_resource_h_disabled +#define __dynamicCast_RmResource(pThis) ((RmResource*) NULL) +#else //__nvoc_resource_h_disabled +#define __dynamicCast_RmResource(pThis) \ + ((RmResource*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RmResource))) +#endif //__nvoc_resource_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_RmResource(RmResource**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RmResource(RmResource**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_RmResource(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_RmResource((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define rmresAccessCallback_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define rmresAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) rmresAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define rmresShareCallback_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__rmresShareCallback__ +#define rmresShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) rmresShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define rmresGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define rmresGetMemInterMapParams(pRmResource, pParams) rmresGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define rmresCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define rmresCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) rmresCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define rmresGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define rmresGetMemoryMappingDescriptor(pRmResource, ppMemDesc) rmresGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define rmresControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define rmresControlSerialization_Prologue(pResource, pCallContext, pParams) rmresControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define rmresControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define rmresControlSerialization_Epilogue(pResource, pCallContext, pParams) rmresControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define rmresControl_Prologue_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define rmresControl_Prologue(pResource, pCallContext, pParams) rmresControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define rmresControl_Epilogue_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define rmresControl_Epilogue(pResource, pCallContext, pParams) rmresControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define rmresCanCopy_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define rmresCanCopy(pResource) rmresCanCopy_DISPATCH(pResource) +#define rmresIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define rmresIsDuplicate(pResource, hMemory, pDuplicate) rmresIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define rmresPreDestruct_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define rmresPreDestruct(pResource) rmresPreDestruct_DISPATCH(pResource) +#define rmresControl_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControl__ +#define rmresControl(pResource, pCallContext, pParams) rmresControl_DISPATCH(pResource, pCallContext, pParams) +#define rmresControlFilter_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define rmresControlFilter(pResource, pCallContext, pParams) rmresControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define rmresMap_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMap__ +#define rmresMap(pResource, pCallContext, pParams, pCpuMapping) rmresMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define rmresUnmap_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmap__ +#define rmresUnmap(pResource, pCallContext, pCpuMapping) rmresUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define rmresIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define rmresIsPartialUnmapSupported(pResource) rmresIsPartialUnmapSupported_DISPATCH(pResource) +#define rmresMapTo_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define rmresMapTo(pResource, pParams) rmresMapTo_DISPATCH(pResource, pParams) +#define rmresUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define rmresUnmapFrom(pResource, pParams) rmresUnmapFrom_DISPATCH(pResource, pParams) +#define rmresGetRefCount_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define rmresGetRefCount(pResource) rmresGetRefCount_DISPATCH(pResource) +#define rmresAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define rmresAddAdditionalDependants(pClient, pResource, pReference) rmresAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) + +// Dispatch functions +static inline NvBool rmresAccessCallback_DISPATCH(struct RmResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__rmresAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NvBool rmresShareCallback_DISPATCH(struct RmResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__nvoc_metadata_ptr->vtable.__rmresShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS rmresGetMemInterMapParams_DISPATCH(struct RmResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS rmresCheckMemInterUnmap_DISPATCH(struct RmResource *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS rmresGetMemoryMappingDescriptor_DISPATCH(struct RmResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS rmresControlSerialization_Prologue_DISPATCH(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void rmresControlSerialization_Epilogue_DISPATCH(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS rmresControl_Prologue_DISPATCH(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void rmresControl_Epilogue_DISPATCH(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool rmresCanCopy_DISPATCH(struct RmResource *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__rmresCanCopy__(pResource); +} + +static inline NV_STATUS rmresIsDuplicate_DISPATCH(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__rmresIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void rmresPreDestruct_DISPATCH(struct RmResource *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__rmresPreDestruct__(pResource); +} + +static inline NV_STATUS rmresControl_DISPATCH(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__rmresControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS rmresControlFilter_DISPATCH(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__rmresControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS rmresMap_DISPATCH(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__rmresMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS rmresUnmap_DISPATCH(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__rmresUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NvBool rmresIsPartialUnmapSupported_DISPATCH(struct RmResource *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__rmresIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS rmresMapTo_DISPATCH(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__rmresMapTo__(pResource, pParams); +} + +static inline NV_STATUS rmresUnmapFrom_DISPATCH(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__rmresUnmapFrom__(pResource, pParams); +} + +static inline NvU32 rmresGetRefCount_DISPATCH(struct RmResource *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__rmresGetRefCount__(pResource); +} + +static inline void rmresAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__rmresAddAdditionalDependants__(pClient, pResource, pReference); +} + +NvBool rmresAccessCallback_IMPL(struct RmResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); + +NvBool rmresShareCallback_IMPL(struct RmResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); + +NV_STATUS rmresGetMemInterMapParams_IMPL(struct RmResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); + +NV_STATUS rmresCheckMemInterUnmap_IMPL(struct RmResource *pRmResource, NvBool bSubdeviceHandleProvided); + +NV_STATUS rmresGetMemoryMappingDescriptor_IMPL(struct RmResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); + +NV_STATUS rmresControlSerialization_Prologue_IMPL(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +void rmresControlSerialization_Epilogue_IMPL(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +NV_STATUS rmresControl_Prologue_IMPL(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +void rmresControl_Epilogue_IMPL(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +NV_STATUS rmresConstruct_IMPL(struct RmResource *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_rmresConstruct(arg_pResource, arg_pCallContext, arg_pParams) rmresConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif // _RESOURCE_H_ + + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_RESOURCE_NVOC_H_ diff --git a/src/nvidia/generated/g_resserv_nvoc.h b/src/nvidia/generated/g_resserv_nvoc.h new file mode 100644 index 0000000..120a0d8 --- /dev/null +++ b/src/nvidia/generated/g_resserv_nvoc.h @@ -0,0 +1,457 @@ + +#ifndef _G_RESSERV_NVOC_H_ +#define _G_RESSERV_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_resserv_nvoc.h" + +#ifndef _RESSERV_H_ +#define _RESSERV_H_ + +#include "nvoc/object.h" + +#include "containers/list.h" +#include "containers/map.h" +#include "containers/multimap.h" + +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvos.h" +#include "nvsecurityinfo.h" +#include "rs_access.h" + +#if LOCK_VAL_ENABLED +#include "lockval/lockval.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#if (RS_STANDALONE) +#include + +#ifndef NV_PRINTF +extern int g_debugLevel; +#define NV_PRINTF(level, format, ...) if (g_debugLevel) { printf(format, ##__VA_ARGS__); } +#endif +#include "utils/nvprintf.h" +#endif + +// +// Forward declarations +// +typedef struct RsServer RsServer; +typedef struct RsDomain RsDomain; +typedef struct CLIENT_ENTRY CLIENT_ENTRY; +typedef struct RsResourceDep RsResourceDep; +typedef struct RsResourceRef RsResourceRef; +typedef struct RsInterMapping RsInterMapping; +typedef struct RsCpuMapping RsCpuMapping; + +// RS-TODO INTERNAL and EXTERNAL params should be different structures +typedef struct RS_CLIENT_FREE_PARAMS_INTERNAL RS_CLIENT_FREE_PARAMS_INTERNAL; +typedef struct RS_CLIENT_FREE_PARAMS_INTERNAL RS_CLIENT_FREE_PARAMS; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_RES_ALLOC_PARAMS_INTERNAL; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_RES_ALLOC_PARAMS; +typedef struct RS_RES_DUP_PARAMS_INTERNAL RS_RES_DUP_PARAMS_INTERNAL; +typedef struct RS_RES_DUP_PARAMS_INTERNAL RS_RES_DUP_PARAMS; +typedef struct RS_RES_SHARE_PARAMS_INTERNAL RS_RES_SHARE_PARAMS_INTERNAL; +typedef struct RS_RES_SHARE_PARAMS_INTERNAL RS_RES_SHARE_PARAMS; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_CLIENT_ALLOC_PARAMS_INTERNAL; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_CLIENT_ALLOC_PARAMS; +typedef struct RS_RES_FREE_PARAMS_INTERNAL RS_RES_FREE_PARAMS_INTERNAL; +typedef struct RS_RES_FREE_PARAMS_INTERNAL RS_RES_FREE_PARAMS; +typedef struct RS_RES_CONTROL_PARAMS_INTERNAL RS_RES_CONTROL_PARAMS_INTERNAL; +typedef struct RS_RES_CONTROL_PARAMS_INTERNAL RS_RES_CONTROL_PARAMS; +typedef struct RS_RES_CONTROL_PARAMS_INTERNAL RS_LEGACY_CONTROL_PARAMS; +typedef struct RS_LEGACY_ALLOC_PARAMS RS_LEGACY_ALLOC_PARAMS; +typedef struct RS_LEGACY_FREE_PARAMS RS_LEGACY_FREE_PARAMS; + +typedef struct RS_CPU_MAP_PARAMS RS_CPU_MAP_PARAMS; +typedef struct RS_CPU_UNMAP_PARAMS RS_CPU_UNMAP_PARAMS; +typedef struct RS_INTER_MAP_PARAMS RS_INTER_MAP_PARAMS; +typedef struct RS_INTER_UNMAP_PARAMS RS_INTER_UNMAP_PARAMS; + +// Forward declarations for structs defined by user +typedef struct RS_RES_MAP_TO_PARAMS RS_RES_MAP_TO_PARAMS; +typedef struct RS_RES_UNMAP_FROM_PARAMS RS_RES_UNMAP_FROM_PARAMS; +typedef struct RS_INTER_MAP_PRIVATE RS_INTER_MAP_PRIVATE; +typedef struct RS_INTER_UNMAP_PRIVATE RS_INTER_UNMAP_PRIVATE; +typedef struct RS_CPU_MAPPING_PRIVATE RS_CPU_MAPPING_PRIVATE; + +typedef struct RS_FREE_STACK RS_FREE_STACK; +typedef struct CALL_CONTEXT CALL_CONTEXT; +typedef struct ACCESS_CONTROL ACCESS_CONTROL; +typedef struct RS_ITERATOR RS_ITERATOR; +typedef struct RS_ORDERED_ITERATOR RS_ORDERED_ITERATOR; +typedef struct RS_SHARE_ITERATOR RS_SHARE_ITERATOR; +typedef struct API_STATE API_STATE; +typedef struct RS_LOCK_INFO RS_LOCK_INFO; +typedef struct RS_CONTROL_COOKIE RS_CONTROL_COOKIE; +typedef NV_STATUS RsCtrlFunc(struct RS_RES_CONTROL_PARAMS_INTERNAL*); + + +struct RsClient; + +#ifndef __NVOC_CLASS_RsClient_TYPEDEF__ +#define __NVOC_CLASS_RsClient_TYPEDEF__ +typedef struct RsClient RsClient; +#endif /* __NVOC_CLASS_RsClient_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsClient +#define __nvoc_class_id_RsClient 0x8f87e5 +#endif /* __nvoc_class_id_RsClient */ + + + +struct RsResource; + +#ifndef __NVOC_CLASS_RsResource_TYPEDEF__ +#define __NVOC_CLASS_RsResource_TYPEDEF__ +typedef struct RsResource RsResource; +#endif /* __NVOC_CLASS_RsResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsResource +#define __nvoc_class_id_RsResource 0xd551cb +#endif /* __nvoc_class_id_RsResource */ + + + +struct RsShared; + +#ifndef __NVOC_CLASS_RsShared_TYPEDEF__ +#define __NVOC_CLASS_RsShared_TYPEDEF__ +typedef struct RsShared RsShared; +#endif /* __NVOC_CLASS_RsShared_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsShared +#define __nvoc_class_id_RsShared 0x830542 +#endif /* __nvoc_class_id_RsShared */ + + + +MAKE_LIST(RsResourceRefList, RsResourceRef*); +MAKE_LIST(RsResourceList, RsResource*); +MAKE_LIST(RsHandleList, NvHandle); +MAKE_LIST(RsShareList, RS_SHARE_POLICY); +MAKE_MULTIMAP(RsIndex, RsResourceRef*); + +typedef NV_STATUS (*CtrlImpl_t)(struct RsClient*, struct RsResource*, void*); + +typedef void *PUID_TOKEN; + +// +// Defines +// + +/// Domain handles must start at this base value +#define RS_DOMAIN_HANDLE_BASE 0xD0D00000 + +/// Client handles must start at this base value +#define RS_CLIENT_HANDLE_BASE 0xC1D00000 + +/// Internal Client handles start at this base value +#define RS_CLIENT_INTERNAL_HANDLE_BASE 0xC1E00000 + +/// VF Client handles start at this base value +#define RS_CLIENT_VF_HANDLE_BASE 0xE0000000 + +/// Get the VF client handle range for gfid +#define RS_CLIENT_GET_VF_HANDLE_BASE(gfid) (RS_CLIENT_VF_HANDLE_BASE + ((gfid) - 1) * RS_CLIENT_HANDLE_MAX) + +// +// Print a warning if any client's resource count exceeds this +// threshold. Unless this was intentional, this is likely a client bug. +// +#define RS_CLIENT_RESOURCE_WARNING_THRESHOLD 100000 + +#define RS_CLIENT_HANDLE_MAX 0x100000 // Must be power of two +#define RS_CLIENT_HANDLE_BUCKET_COUNT 0x400 // 1024 +#define RS_CLIENT_HANDLE_BUCKET_MASK 0x3FF + + +/// The default maximum number of domains a resource server can allocate +#define RS_MAX_DOMAINS_DEFAULT 4096 + +/// The maximum length of a line of ancestry for resource references +#define RS_MAX_RESOURCE_DEPTH 6 + +/// RS_LOCK_FLAGS +#define RS_LOCK_FLAGS_NO_TOP_LOCK NVBIT(0) +#define RS_LOCK_FLAGS_NO_CLIENT_LOCK NVBIT(1) +#define RS_LOCK_FLAGS_NO_CUSTOM_LOCK_1 NVBIT(2) +#define RS_LOCK_FLAGS_NO_CUSTOM_LOCK_2 NVBIT(3) +#define RS_LOCK_FLAGS_NO_CUSTOM_LOCK_3 NVBIT(4) +#define RS_LOCK_FLAGS_NO_DEPENDANT_SESSION_LOCK NVBIT(5) +#define RS_LOCK_FLAGS_FREE_SESSION_LOCK NVBIT(6) +#define RS_LOCK_FLAGS_LOW_PRIORITY NVBIT(7) + +/// RS_LOCK_STATE +#define RS_LOCK_STATE_TOP_LOCK_ACQUIRED NVBIT(0) +#define RS_LOCK_STATE_CUSTOM_LOCK_1_ACQUIRED NVBIT(1) +#define RS_LOCK_STATE_CUSTOM_LOCK_2_ACQUIRED NVBIT(2) +#define RS_LOCK_STATE_CUSTOM_LOCK_3_ACQUIRED NVBIT(3) +#define RS_LOCK_STATE_ALLOW_RECURSIVE_RES_LOCK NVBIT(6) +#define RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED NVBIT(7) +#define RS_LOCK_STATE_SESSION_LOCK_ACQUIRED NVBIT(8) + +/// RS_LOCK_RELEASE +#define RS_LOCK_RELEASE_TOP_LOCK NVBIT(0) +#define RS_LOCK_RELEASE_CLIENT_LOCK NVBIT(1) +#define RS_LOCK_RELEASE_CUSTOM_LOCK_1 NVBIT(2) +#define RS_LOCK_RELEASE_CUSTOM_LOCK_2 NVBIT(3) +#define RS_LOCK_RELEASE_CUSTOM_LOCK_3 NVBIT(4) +#define RS_LOCK_RELEASE_SESSION_LOCK NVBIT(5) + +/// API enumerations used for locking knobs +typedef enum +{ + RS_LOCK_CLIENT =0, + RS_LOCK_TOP =1, + RS_LOCK_RESOURCE =2, + RS_LOCK_CUSTOM_3 =3, +} RS_LOCK_ENUM; + +typedef enum +{ + RS_API_ALLOC_CLIENT = 0, + RS_API_ALLOC_RESOURCE = 1, + RS_API_FREE_RESOURCE = 2, + RS_API_MAP = 3, + RS_API_UNMAP = 4, + RS_API_INTER_MAP = 5, + RS_API_INTER_UNMAP = 6, + RS_API_COPY = 7, + RS_API_SHARE = 8, + RS_API_CTRL = 9, + RS_API_MAX, +} RS_API_ENUM; + +NV_STATUS indexAdd(RsIndex *pIndex, NvU32 index, RsResourceRef *pResourceRef); +NV_STATUS indexRemove(RsIndex *pIndex, NvU32 index, RsResourceRef *pResourceRef); + +// +// Externs +// +/** + * NVOC wrapper for constructing resources of a given type + * + * @param[in] pAllocator Allocator for the resource object + * @param[in] pCallContext Caller context passed to resource constructor + * @param[inout] pParams Resource allocation parameters + * @param[out] ppResource New resource object + */ +extern NV_STATUS resservResourceFactory(PORT_MEM_ALLOCATOR *pAllocator, CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, struct RsResource **ppResource); + +/** + * NVOC wrapper for constructing an application-specific client. + */ +extern NV_STATUS resservClientFactory(PORT_MEM_ALLOCATOR *pAllocator, RS_RES_ALLOC_PARAMS_INTERNAL *pParams, struct RsClient **ppRsClient); + +/** + * Validate the UID/PID security token of the current user against a client's security token. + * + * This will be obsolete after phase 1. + * + * @param[in] pClientToken + * @param[in] pCurrentToken + * + * @returns NV_OK if the current user's security token matches the client's security token + */ +extern NV_STATUS osValidateClientTokens(PSECURITY_TOKEN pClientToken, PSECURITY_TOKEN pCurrentToken); + +/** + * Get the security token of the current user for the UID/PID security model. + * + * This will be obsolete after phase 1. + */ +extern PSECURITY_TOKEN osGetSecurityToken(void); + +/** + * TLS entry id for call contexts. All servers will use the same id. + */ +#define TLS_ENTRY_ID_RESSERV_CALL_CONTEXT TLS_ENTRY_ID_RESSERV_1 + +// +// Structs +// +struct RS_FREE_STACK +{ + RS_FREE_STACK *pPrev; + RsResourceRef *pResourceRef; +}; + +struct CALL_CONTEXT +{ + RsServer *pServer; ///< The resource server instance that owns the client + struct RsClient *pClient; ///< Client that was the target of the call + RsResourceRef *pResourceRef; ///< Reference that was the target of the call + RsResourceRef *pContextRef; ///< Reference that may be used to provide more context [optional] + RS_LOCK_INFO *pLockInfo; ///< Saved locking context information for the call + API_SECURITY_INFO secInfo; + RS_RES_CONTROL_PARAMS_INTERNAL *pControlParams; ///< parameters of the call [optional] + + void *pSerializedParams; ///< Serialized version of the params + void *pDeserializedParams; ///< Deserialized version of the params + NvU32 serializedSize; ///< Serialized size + NvU32 deserializedSize; ///< Deserialized size + NvBool bReserialize; ///< Reserialize before calling into GSP + NvBool bLocalSerialization; ///< Serialized internally +}; + +typedef enum { + RS_ITERATE_CHILDREN, ///< Iterate over a RsResourceRef's children + RS_ITERATE_DESCENDANTS, ///< Iterate over a RsResourceRef's children, grandchildren, etc. (unspecified order) + RS_ITERATE_CACHED, ///< Iterate over a RsResourceRef's cache + RS_ITERATE_DEPENDANTS, ///< Iterate over a RsResourceRef's dependants +} RS_ITER_TYPE; + +typedef enum +{ + LOCK_ACCESS_READ, + LOCK_ACCESS_WRITE, +} LOCK_ACCESS_TYPE; + + + +/** + * Access control information. This information will be filled out by the user + * of the Resource Server when allocating a client or resource. + */ +struct ACCESS_CONTROL +{ + /** + * The privilege level of this access control + */ + RS_PRIV_LEVEL privilegeLevel; + + /** + * Opaque pointer for storing a security token + */ + PSECURITY_TOKEN pSecurityToken; +}; + +// +// Utility wrappers for locking validator +// +#if LOCK_VAL_ENABLED +#define RS_LOCK_VALIDATOR_INIT(lock, lockClass, inst) \ + do { NV_ASSERT_OK(lockvalLockInit((lock), (lockClass), (inst))); } while(0) + +#define RS_SPINLOCK_ACQUIRE(lock) do \ +{ \ + NV_ASSERT_OK(lockvalPreAcquire((validator))); \ + portSyncSpinlockAcquire((lock)) ; \ + lockvalPostAcquire((validator), LOCK_VAL_SPINLOCK); \ + +#define RS_RWLOCK_ACQUIRE_READ(lock, validator) do \ +{ \ + NV_ASSERT_OK(lockvalPreAcquire((validator))); \ + portSyncRwLockAcquireRead((lock)); \ + lockvalPostAcquire((validator), LOCK_VAL_RLOCK); \ +} while(0) + +#define RS_RWLOCK_ACQUIRE_WRITE(lock, validator) do \ +{ \ + NV_ASSERT_OK(lockvalPreAcquire((validator))); \ + portSyncRwLockAcquireWrite((lock)); \ + lockvalPostAcquire((validator), LOCK_VAL_WLOCK); \ +} while(0) + +#define RS_SPINLOCK_RELEASE_EXT(lock, validator, bOutOfOrder) do \ +{ \ + void *pLockValTlsEntry, *pReleasedLockNode; \ + if (bOutOfOrder) \ + NV_ASSERT_OK(lockvalReleaseOutOfOrder((validator), LOCK_VAL_SPINLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + else \ + NV_ASSERT_OK(lockvalRelease((validator), LOCK_VAL_SPINLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + portSyncSpinlockRelease((lock)); \ + lockvalMemoryRelease(pLockValTlsEntry, pReleasedLockNode); \ +} while(0) + +#define RS_RWLOCK_RELEASE_READ_EXT(lock, validator, bOutOfOrder) do \ +{ \ + void *pLockValTlsEntry, *pReleasedLockNode; \ + if (bOutOfOrder) \ + NV_ASSERT_OK(lockvalReleaseOutOfOrder((validator), LOCK_VAL_RLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + else \ + NV_ASSERT_OK(lockvalRelease((validator), LOCK_VAL_RLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + portSyncRwLockReleaseRead((lock)); \ + lockvalMemoryRelease(pLockValTlsEntry, pReleasedLockNode); \ +} while(0) + +#define RS_RWLOCK_RELEASE_WRITE_EXT(lock, validator, bOutOfOrder) do \ +{ \ + void *pLockValTlsEntry, *pReleasedLockNode; \ + if (bOutOfOrder) \ + NV_ASSERT_OK(lockvalReleaseOutOfOrder((validator), LOCK_VAL_WLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + else \ + NV_ASSERT_OK(lockvalRelease((validator), LOCK_VAL_WLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + portSyncRwLockReleaseWrite((lock)); \ + lockvalMemoryRelease(pLockValTlsEntry, pReleasedLockNode); \ +} while(0) + +#else +#define RS_LOCK_VALIDATOR_INIT(lock, lockClass, inst) +#define RS_SPINLOCK_ACQUIRE(lock, validator) do { portSyncSpinlockAcquire((lock)); } while(0) +#define RS_RWLOCK_ACQUIRE_READ(lock, validator) do { portSyncRwLockAcquireRead((lock)); } while(0) +#define RS_RWLOCK_ACQUIRE_WRITE(lock, validator) do { portSyncRwLockAcquireWrite((lock)); } while(0) +#define RS_SPINLOCK_RELEASE_EXT(lock, validator, bOutOfOrder) do { portSyncSpinlockRelease((lock)); } while(0) +#define RS_RWLOCK_RELEASE_READ_EXT(lock, validator, bOutOfOrder) do { portSyncRwLockReleaseRead((lock)); } while(0) +#define RS_RWLOCK_RELEASE_WRITE_EXT(lock, validator, bOutOfOrder) do { portSyncRwLockReleaseWrite((lock)); } while(0) +#endif + +#define RS_SPINLOCK_RELEASE(lock, validator) RS_SPINLOCK_RELEASE_EXT(lock, validator, NV_FALSE) +#define RS_RWLOCK_RELEASE_READ(lock, validator) RS_RWLOCK_RELEASE_READ_EXT(lock, validator, NV_FALSE) +#define RS_RWLOCK_RELEASE_WRITE(lock, validator) RS_RWLOCK_RELEASE_WRITE_EXT(lock, validator, NV_FALSE) + + +#ifdef __cplusplus +} +#endif + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_RESSERV_NVOC_H_ diff --git a/src/nvidia/generated/g_rmconfig_private.h b/src/nvidia/generated/g_rmconfig_private.h new file mode 100644 index 0000000..e767c2b --- /dev/null +++ b/src/nvidia/generated/g_rmconfig_private.h @@ -0,0 +1,849 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// private rmconfig generated #defines such as IsG84(), +// RMCFG_FEATURE_ENABLED_STATUS(), etc. +// +// Only for use within resman. +// +// Profile: devel-soc-disp-dce-client +// Template: templates/gt_rmconfig_private.h +// +// Chips: T234D, T26XD, T25XD +// + +#ifndef _G_RMCFG_PRIVATE_H_ +#define _G_RMCFG_PRIVATE_H_ + +// +// CHIP identity macros such as IsGK104() +// + +// GF10X +#define IsGF100(pGpu) ((0) && (pGpu)) +#define IsGF100orBetter(pGpu) ((0) && (pGpu)) + +#define IsGF100B(pGpu) ((0) && (pGpu)) +#define IsGF100BorBetter(pGpu) ((0) && (pGpu)) + +#define IsGF104(pGpu) ((0) && (pGpu)) +#define IsGF104orBetter(pGpu) ((0) && (pGpu)) + +#define IsGF104B(pGpu) ((0) && (pGpu)) +#define IsGF104BorBetter(pGpu) ((0) && (pGpu)) + +#define IsGF106(pGpu) ((0) && (pGpu)) +#define IsGF106orBetter(pGpu) ((0) && (pGpu)) + +#define IsGF106B(pGpu) ((0) && (pGpu)) +#define IsGF106BorBetter(pGpu) ((0) && (pGpu)) + +#define IsGF108(pGpu) ((0) && (pGpu)) +#define IsGF108orBetter(pGpu) ((0) && (pGpu)) + +// Any GF10X chip? +#define IsGF10X(pGpu) (0 && (pGpu)) +#define IsGF10XorBetter(pGpu) (0 && (pGpu)) + + +// GF11X +#define IsGF110D(pGpu) ((0) && (pGpu)) +#define IsGF110DorBetter(pGpu) ((0) && (pGpu)) + +#define IsGF110(pGpu) ((0) && (pGpu)) +#define IsGF110orBetter(pGpu) ((0) && (pGpu)) + +#define IsGF117(pGpu) ((0) && (pGpu)) +#define IsGF117orBetter(pGpu) ((0) && (pGpu)) + +#define IsGF118(pGpu) ((0) && (pGpu)) +#define IsGF118orBetter(pGpu) ((0) && (pGpu)) + +#define IsGF119(pGpu) ((0) && (pGpu)) +#define IsGF119orBetter(pGpu) ((0) && (pGpu)) + +// Any GF11X chip? +#define IsGF11X(pGpu) (0 && (pGpu)) +#define IsGF11XorBetter(pGpu) (0 && (pGpu)) + + +// GF10XF +#define IsGF110F(pGpu) ((0) && (pGpu)) +#define IsGF110ForBetter(pGpu) ((0) && (pGpu)) + +#define IsGF110F2(pGpu) ((0) && (pGpu)) +#define IsGF110F2orBetter(pGpu) ((0) && (pGpu)) + +#define IsGF110F3(pGpu) ((0) && (pGpu)) +#define IsGF110F3orBetter(pGpu) ((0) && (pGpu)) + +// Any GF10XF chip? +#define IsGF10XF(pGpu) (0 && (pGpu)) +#define IsGF10XForBetter(pGpu) (0 && (pGpu)) + + +// GK10X +#define IsGK104(pGpu) ((0) && (pGpu)) +#define IsGK104orBetter(pGpu) ((0) && (pGpu)) + +#define IsGK106(pGpu) ((0) && (pGpu)) +#define IsGK106orBetter(pGpu) ((0) && (pGpu)) + +#define IsGK107(pGpu) ((0) && (pGpu)) +#define IsGK107orBetter(pGpu) ((0) && (pGpu)) + +#define IsGK20A(pGpu) ((0) && (pGpu)) +#define IsGK20AorBetter(pGpu) ((0) && (pGpu)) + +// Any GK10X chip? +#define IsGK10X(pGpu) (0 && (pGpu)) +#define IsGK10XorBetter(pGpu) (0 && (pGpu)) + + +// GK11X +#define IsGK110(pGpu) ((0) && (pGpu)) +#define IsGK110orBetter(pGpu) ((0) && (pGpu)) + +#define IsGK110B(pGpu) ((0) && (pGpu)) +#define IsGK110BorBetter(pGpu) ((0) && (pGpu)) + +#define IsGK110C(pGpu) ((0) && (pGpu)) +#define IsGK110CorBetter(pGpu) ((0) && (pGpu)) + +// Any GK11X chip? +#define IsGK11X(pGpu) (0 && (pGpu)) +#define IsGK11XorBetter(pGpu) (0 && (pGpu)) + + +// GK20X +#define IsGK208(pGpu) ((0) && (pGpu)) +#define IsGK208orBetter(pGpu) ((0) && (pGpu)) + +#define IsGK208S(pGpu) ((0) && (pGpu)) +#define IsGK208SorBetter(pGpu) ((0) && (pGpu)) + +// Any GK20X chip? +#define IsGK20X(pGpu) (0 && (pGpu)) +#define IsGK20XorBetter(pGpu) (0 && (pGpu)) + + +// GM10X +#define IsGM107(pGpu) ((0) && (pGpu)) +#define IsGM107orBetter(pGpu) ((0) && (pGpu)) + +#define IsGM108(pGpu) ((0) && (pGpu)) +#define IsGM108orBetter(pGpu) ((0) && (pGpu)) + +// Any GM10X chip? +#define IsGM10X(pGpu) (0 && (pGpu)) +#define IsGM10XorBetter(pGpu) (0 && (pGpu)) + + +// GM20X +#define IsGM200(pGpu) ((0) && (pGpu)) +#define IsGM200orBetter(pGpu) ((0) && (pGpu)) + +#define IsGM204(pGpu) ((0) && (pGpu)) +#define IsGM204orBetter(pGpu) ((0) && (pGpu)) + +#define IsGM206(pGpu) ((0) && (pGpu)) +#define IsGM206orBetter(pGpu) ((0) && (pGpu)) + +// Any GM20X chip? +#define IsGM20X(pGpu) (0 && (pGpu)) +#define IsGM20XorBetter(pGpu) (0 && (pGpu)) + + +// GP10X +#define IsGP100(pGpu) ((0) && (pGpu)) +#define IsGP100orBetter(pGpu) ((0) && (pGpu)) + +#define IsGP102(pGpu) ((0) && (pGpu)) +#define IsGP102orBetter(pGpu) ((0) && (pGpu)) + +#define IsGP104(pGpu) ((0) && (pGpu)) +#define IsGP104orBetter(pGpu) ((0) && (pGpu)) + +#define IsGP106(pGpu) ((0) && (pGpu)) +#define IsGP106orBetter(pGpu) ((0) && (pGpu)) + +#define IsGP107(pGpu) ((0) && (pGpu)) +#define IsGP107orBetter(pGpu) ((0) && (pGpu)) + +#define IsGP108(pGpu) ((0) && (pGpu)) +#define IsGP108orBetter(pGpu) ((0) && (pGpu)) + +// Any GP10X chip? +#define IsGP10X(pGpu) (0 && (pGpu)) +#define IsGP10XorBetter(pGpu) (0 && (pGpu)) + + +// GV10X +#define IsGV100(pGpu) ((0) && (pGpu)) +#define IsGV100orBetter(pGpu) ((0) && (pGpu)) + +// Any GV10X chip? +#define IsGV10X(pGpu) (0 && (pGpu)) +#define IsGV10XorBetter(pGpu) (0 && (pGpu)) + + +// GV11X +#define IsGV11B(pGpu) ((0) && (pGpu)) +#define IsGV11BorBetter(pGpu) ((0) && (pGpu)) + +// Any GV11X chip? +#define IsGV11X(pGpu) (0 && (pGpu)) +#define IsGV11XorBetter(pGpu) (0 && (pGpu)) + + +// TU10X +#define IsTU102(pGpu) ((0) && (pGpu)) +#define IsTU102orBetter(pGpu) ((0) && (pGpu)) + +#define IsTU104(pGpu) ((0) && (pGpu)) +#define IsTU104orBetter(pGpu) ((0) && (pGpu)) + +#define IsTU106(pGpu) ((0) && (pGpu)) +#define IsTU106orBetter(pGpu) ((0) && (pGpu)) + +#define IsTU116(pGpu) ((0) && (pGpu)) +#define IsTU116orBetter(pGpu) ((0) && (pGpu)) + +#define IsTU117(pGpu) ((0) && (pGpu)) +#define IsTU117orBetter(pGpu) ((0) && (pGpu)) + +// Any TU10X chip? +#define IsTU10X(pGpu) (0 && (pGpu)) +#define IsTU10XorBetter(pGpu) (0 && (pGpu)) + + +// GA10X +#define IsGA100(pGpu) ((0) && (pGpu)) +#define IsGA100orBetter(pGpu) ((0) && (pGpu)) + +#define IsGA102(pGpu) ((0) && (pGpu)) +#define IsGA102orBetter(pGpu) ((0) && (pGpu)) + +#define IsGA103(pGpu) ((0) && (pGpu)) +#define IsGA103orBetter(pGpu) ((0) && (pGpu)) + +#define IsGA104(pGpu) ((0) && (pGpu)) +#define IsGA104orBetter(pGpu) ((0) && (pGpu)) + +#define IsGA106(pGpu) ((0) && (pGpu)) +#define IsGA106orBetter(pGpu) ((0) && (pGpu)) + +#define IsGA107(pGpu) ((0) && (pGpu)) +#define IsGA107orBetter(pGpu) ((0) && (pGpu)) + +#define IsGA10B(pGpu) ((0) && (pGpu)) +#define IsGA10BorBetter(pGpu) ((0) && (pGpu)) + +// Any GA10X chip? +#define IsGA10X(pGpu) (0 && (pGpu)) +#define IsGA10XorBetter(pGpu) (0 && (pGpu)) + + +// GA10XF +#define IsGA102F(pGpu) ((0) && (pGpu)) +#define IsGA102ForBetter(pGpu) ((0) && (pGpu)) + +// Any GA10XF chip? +#define IsGA10XF(pGpu) (0 && (pGpu)) +#define IsGA10XForBetter(pGpu) (0 && (pGpu)) + + +// AD10X +#define IsAD102(pGpu) ((0) && (pGpu)) +#define IsAD102orBetter(pGpu) ((0) && (pGpu)) + +#define IsAD103(pGpu) ((0) && (pGpu)) +#define IsAD103orBetter(pGpu) ((0) && (pGpu)) + +#define IsAD104(pGpu) ((0) && (pGpu)) +#define IsAD104orBetter(pGpu) ((0) && (pGpu)) + +#define IsAD106(pGpu) ((0) && (pGpu)) +#define IsAD106orBetter(pGpu) ((0) && (pGpu)) + +#define IsAD107(pGpu) ((0) && (pGpu)) +#define IsAD107orBetter(pGpu) ((0) && (pGpu)) + +// Any AD10X chip? +#define IsAD10X(pGpu) (0 && (pGpu)) +#define IsAD10XorBetter(pGpu) (0 && (pGpu)) + + +// GH10X +#define IsGH100(pGpu) ((0) && (pGpu)) +#define IsGH100orBetter(pGpu) ((0) && (pGpu)) + +// Any GH10X chip? +#define IsGH10X(pGpu) (0 && (pGpu)) +#define IsGH10XorBetter(pGpu) (0 && (pGpu)) + + +// GB10X +#define IsGB100(pGpu) ((0) && (pGpu)) +#define IsGB100orBetter(pGpu) ((0) && (pGpu)) + +#define IsGB102(pGpu) ((0) && (pGpu)) +#define IsGB102orBetter(pGpu) ((0) && (pGpu)) + +#define IsGB110(pGpu) ((0) && (pGpu)) +#define IsGB110orBetter(pGpu) ((0) && (pGpu)) + +#define IsGB112(pGpu) ((0) && (pGpu)) +#define IsGB112orBetter(pGpu) ((0) && (pGpu)) + +// Any GB10X chip? +#define IsGB10X(pGpu) (0 && (pGpu)) +#define IsGB10XorBetter(pGpu) (0 && (pGpu)) + + +// GB20X +#define IsGB202(pGpu) ((0) && (pGpu)) +#define IsGB202orBetter(pGpu) ((0) && (pGpu)) + +#define IsGB203(pGpu) ((0) && (pGpu)) +#define IsGB203orBetter(pGpu) ((0) && (pGpu)) + +#define IsGB205(pGpu) ((0) && (pGpu)) +#define IsGB205orBetter(pGpu) ((0) && (pGpu)) + +#define IsGB206(pGpu) ((0) && (pGpu)) +#define IsGB206orBetter(pGpu) ((0) && (pGpu)) + +#define IsGB207(pGpu) ((0) && (pGpu)) +#define IsGB207orBetter(pGpu) ((0) && (pGpu)) + +// Any GB20X chip? +#define IsGB20X(pGpu) (0 && (pGpu)) +#define IsGB20XorBetter(pGpu) (0 && (pGpu)) + + +// T12X +#define IsT001_FERMI_NOT_EXIST(pGpu) ((0) && (pGpu)) +#define IsT001_FERMI_NOT_EXISTorBetter(pGpu) ((0) && (pGpu)) + +#define IsT124(pGpu) ((0) && (pGpu)) +#define IsT124orBetter(pGpu) ((0) && (pGpu)) + +// Any T12X chip? +#define IsT12X(pGpu) (0 && (pGpu)) +#define IsT12XorBetter(pGpu) (0 && (pGpu)) + + +// T13X +#define IsT132(pGpu) ((0) && (pGpu)) +#define IsT132orBetter(pGpu) ((0) && (pGpu)) + +// Any T13X chip? +#define IsT13X(pGpu) (0 && (pGpu)) +#define IsT13XorBetter(pGpu) (0 && (pGpu)) + + +// T21X +#define IsT210(pGpu) ((0) && (pGpu)) +#define IsT210orBetter(pGpu) ((0) && (pGpu)) + +// Any T21X chip? +#define IsT21X(pGpu) (0 && (pGpu)) +#define IsT21XorBetter(pGpu) (0 && (pGpu)) + + +// T18X +#define IsT186(pGpu) ((0) && (pGpu)) +#define IsT186orBetter(pGpu) ((0) && (pGpu)) + +// Any T18X chip? +#define IsT18X(pGpu) (0 && (pGpu)) +#define IsT18XorBetter(pGpu) (0 && (pGpu)) + + +// T19X +#define IsT194(pGpu) ((0) && (pGpu)) +#define IsT194orBetter(pGpu) ((0) && (pGpu)) + +#define IsT002_TURING_NOT_EXIST(pGpu) ((0) && (pGpu)) +#define IsT002_TURING_NOT_EXISTorBetter(pGpu) ((0) && (pGpu)) + +// Any T19X chip? +#define IsT19X(pGpu) (0 && (pGpu)) +#define IsT19XorBetter(pGpu) (0 && (pGpu)) + + +// T23XG +#define IsT234(pGpu) ((0) && (pGpu)) +#define IsT234orBetter(pGpu) ((0) && (pGpu)) + +#define IsT003_ADA_NOT_EXIST(pGpu) ((0) && (pGpu)) +#define IsT003_ADA_NOT_EXISTorBetter(pGpu) ((0) && (pGpu)) + +#define IsT004_HOPPER_NOT_EXIST(pGpu) ((0) && (pGpu)) +#define IsT004_HOPPER_NOT_EXISTorBetter(pGpu) ((0) && (pGpu)) + +// Any T23XG chip? +#define IsT23XG(pGpu) (0 && (pGpu)) +#define IsT23XGorBetter(pGpu) (0 && (pGpu)) + + +// T23XD +#define IsT234D(pGpu) rmcfg_IsT234D(pGpu) +#define IsT234DorBetter(pGpu) ((1) && (pGpu)) + +// Any T23XD chip? +#define IsT23XD(pGpu) rmcfg_IsT23XD(pGpu) +#define IsT23XDorBetter(pGpu) (1 || (pGpu)) + + +// T26XD +#define IsT264D(pGpu) rmcfg_IsT264D(pGpu) +#define IsT264DorBetter(pGpu) rmcfg_IsT264DorBetter(pGpu) + +// Any T26XD chip? +#define IsT26XD(pGpu) rmcfg_IsT26XD(pGpu) +#define IsT26XDorBetter(pGpu) rmcfg_IsT26XDorBetter(pGpu) + + +// T25XD +#define IsT256D(pGpu) rmcfg_IsT256D(pGpu) +#define IsT256DorBetter(pGpu) rmcfg_IsT256DorBetter(pGpu) + +// Any T25XD chip? +#define IsT25XD(pGpu) rmcfg_IsT25XD(pGpu) +#define IsT25XDorBetter(pGpu) rmcfg_IsT25XDorBetter(pGpu) + + +// SIMS +#define IsAMODEL(pGpu) ((0) && (pGpu)) +#define IsAMODELorBetter(pGpu) ((0) && (pGpu)) + +// Any SIMS chip? +#define IsSIMS(pGpu) (0 && (pGpu)) +#define IsSIMSorBetter(pGpu) (0 && (pGpu)) + + +// Any CLASSIC_GPUS chip? +#define IsCLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsCLASSIC_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any dFERMI chip? +#define IsdFERMI(pGpu) (0 && (pGpu)) +#define IsdFERMIorBetter(pGpu) (0 && (pGpu)) + + +// Any FERMI chip? +#define IsFERMI(pGpu) (IsFERMI_CLASSIC_GPUS(pGpu) || IsFERMI_TEGRA_BIG_GPUS(pGpu)) +#define IsFERMIorBetter(pGpu) (IsFERMI_CLASSIC_GPUSorBetter(pGpu) || IsFERMI_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any FERMI_CLASSIC_GPUS chip? +#define IsFERMI_CLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsFERMI_CLASSIC_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any DISPLAYLESS chip? +#define IsDISPLAYLESS(pGpu) (IsDISPLAYLESS_CLASSIC_GPUS(pGpu) || IsDISPLAYLESS_TEGRA_BIG_GPUS(pGpu)) + + +// Any DISPLAYLESS_CLASSIC_GPUS chip? +#define IsDISPLAYLESS_CLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsDISPLAYLESS_CLASSIC_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any dKEPLER chip? +#define IsdKEPLER(pGpu) (0 && (pGpu)) +#define IsdKEPLERorBetter(pGpu) (0 && (pGpu)) + + +// Any KEPLER chip? +#define IsKEPLER(pGpu) (IsKEPLER_CLASSIC_GPUS(pGpu) || IsKEPLER_TEGRA_BIG_GPUS(pGpu)) +#define IsKEPLERorBetter(pGpu) (IsKEPLER_CLASSIC_GPUSorBetter(pGpu) || IsKEPLER_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any KEPLER_CLASSIC_GPUS chip? +#define IsKEPLER_CLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsKEPLER_CLASSIC_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any dMAXWELL chip? +#define IsdMAXWELL(pGpu) (0 && (pGpu)) +#define IsdMAXWELLorBetter(pGpu) (0 && (pGpu)) + + +// Any MAXWELL chip? +#define IsMAXWELL(pGpu) (IsMAXWELL_CLASSIC_GPUS(pGpu) || IsMAXWELL_TEGRA_BIG_GPUS(pGpu)) +#define IsMAXWELLorBetter(pGpu) (IsMAXWELL_CLASSIC_GPUSorBetter(pGpu) || IsMAXWELL_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any MAXWELL_CLASSIC_GPUS chip? +#define IsMAXWELL_CLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsMAXWELL_CLASSIC_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any dPASCAL chip? +#define IsdPASCAL(pGpu) (0 && (pGpu)) +#define IsdPASCALorBetter(pGpu) (0 && (pGpu)) + + +// Any COMPUTE chip? +#define IsCOMPUTE(pGpu) (0 && (pGpu)) + + +// Any PASCAL chip? +#define IsPASCAL(pGpu) (IsPASCAL_CLASSIC_GPUS(pGpu) || IsPASCAL_TEGRA_BIG_GPUS(pGpu)) +#define IsPASCALorBetter(pGpu) (IsPASCAL_CLASSIC_GPUSorBetter(pGpu) || IsPASCAL_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any PASCAL_CLASSIC_GPUS chip? +#define IsPASCAL_CLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsPASCAL_CLASSIC_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any dVOLTA chip? +#define IsdVOLTA(pGpu) (0 && (pGpu)) +#define IsdVOLTAorBetter(pGpu) (0 && (pGpu)) + + +// Any VOLTA chip? +#define IsVOLTA(pGpu) (IsVOLTA_CLASSIC_GPUS(pGpu) || IsVOLTA_TEGRA_BIG_GPUS(pGpu)) +#define IsVOLTAorBetter(pGpu) (IsVOLTA_CLASSIC_GPUSorBetter(pGpu) || IsVOLTA_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any VOLTA_CLASSIC_GPUS chip? +#define IsVOLTA_CLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsVOLTA_CLASSIC_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any dTURING chip? +#define IsdTURING(pGpu) (0 && (pGpu)) +#define IsdTURINGorBetter(pGpu) (0 && (pGpu)) + + +// Any TURING chip? +#define IsTURING(pGpu) (IsTURING_CLASSIC_GPUS(pGpu) || IsTURING_TEGRA_BIG_GPUS(pGpu)) +#define IsTURINGorBetter(pGpu) (IsTURING_CLASSIC_GPUSorBetter(pGpu) || IsTURING_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any TURING_CLASSIC_GPUS chip? +#define IsTURING_CLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsTURING_CLASSIC_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any dAMPERE chip? +#define IsdAMPERE(pGpu) (0 && (pGpu)) +#define IsdAMPEREorBetter(pGpu) (0 && (pGpu)) + + +// Any AMPERE chip? +#define IsAMPERE(pGpu) (IsAMPERE_CLASSIC_GPUS(pGpu) || IsAMPERE_TEGRA_BIG_GPUS(pGpu)) +#define IsAMPEREorBetter(pGpu) (IsAMPERE_CLASSIC_GPUSorBetter(pGpu) || IsAMPERE_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any AMPERE_CLASSIC_GPUS chip? +#define IsAMPERE_CLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsAMPERE_CLASSIC_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any TEGRA_DGPU_AMPERE chip? +#define IsTEGRA_DGPU_AMPERE(pGpu) (0 && (pGpu)) + + +// Any TEGRA_DGPU chip? +#define IsTEGRA_DGPU(pGpu) (0 && (pGpu)) + + +// Any DFPGA chip? +#define IsDFPGA(pGpu) (0 && (pGpu)) + + +// Any dADA chip? +#define IsdADA(pGpu) (0 && (pGpu)) +#define IsdADAorBetter(pGpu) (0 && (pGpu)) + + +// Any ADA chip? +#define IsADA(pGpu) (IsADA_CLASSIC_GPUS(pGpu) || IsADA_TEGRA_BIG_GPUS(pGpu)) +#define IsADAorBetter(pGpu) (IsADA_CLASSIC_GPUSorBetter(pGpu) || IsADA_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any ADA_CLASSIC_GPUS chip? +#define IsADA_CLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsADA_CLASSIC_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any dHOPPER chip? +#define IsdHOPPER(pGpu) (0 && (pGpu)) +#define IsdHOPPERorBetter(pGpu) (0 && (pGpu)) + + +// Any HOPPER chip? +#define IsHOPPER(pGpu) (IsHOPPER_CLASSIC_GPUS(pGpu) || IsHOPPER_TEGRA_BIG_GPUS(pGpu)) +#define IsHOPPERorBetter(pGpu) (IsHOPPER_CLASSIC_GPUSorBetter(pGpu) || IsHOPPER_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any HOPPER_CLASSIC_GPUS chip? +#define IsHOPPER_CLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsHOPPER_CLASSIC_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any dBLACKWELL chip? +#define IsdBLACKWELL(pGpu) (0 && (pGpu)) +#define IsdBLACKWELLorBetter(pGpu) (0 && (pGpu)) + + +// Any BLACKWELL chip? +#define IsBLACKWELL(pGpu) (IsBLACKWELL_CLASSIC_GPUS(pGpu) || IsBLACKWELL_TEGRA_BIG_GPUS(pGpu)) +#define IsBLACKWELLorBetter(pGpu) (IsBLACKWELL_CLASSIC_GPUSorBetter(pGpu) || IsBLACKWELL_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any BLACKWELL_CLASSIC_GPUS chip? +#define IsBLACKWELL_CLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsBLACKWELL_CLASSIC_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any TEGRA_DISP chip? +#define IsTEGRA_DISP(pGpu) (1 || (pGpu)) +#define IsTEGRA_DISPorBetter(pGpu) (1 || (pGpu)) + + +// Any TEGRA_BIG_GPUS chip? +#define IsTEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsTEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any FERMI_TEGRA_BIG_GPUS chip? +#define IsFERMI_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsFERMI_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any TEGRA chip? +#define IsTEGRA(pGpu) (1 || (pGpu)) +#define IsTEGRAorBetter(pGpu) (1 || (pGpu)) + + +// Any TEGRA_TEGRA_BIG_GPUS chip? +#define IsTEGRA_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsTEGRA_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any tKEPLER chip? +#define IstKEPLER(pGpu) (0 && (pGpu)) +#define IstKEPLERorBetter(pGpu) (0 && (pGpu)) + + +// Any KEPLER_TEGRA_BIG_GPUS chip? +#define IsKEPLER_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsKEPLER_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any tMAXWELL chip? +#define IstMAXWELL(pGpu) (0 && (pGpu)) +#define IstMAXWELLorBetter(pGpu) (0 && (pGpu)) + + +// Any MAXWELL_TEGRA_BIG_GPUS chip? +#define IsMAXWELL_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsMAXWELL_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any tPASCAL chip? +#define IstPASCAL(pGpu) (0 && (pGpu)) +#define IstPASCALorBetter(pGpu) (0 && (pGpu)) + + +// Any PASCAL_TEGRA_BIG_GPUS chip? +#define IsPASCAL_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsPASCAL_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any DISPLAYLESS_TEGRA_BIG_GPUS chip? +#define IsDISPLAYLESS_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsDISPLAYLESS_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any tVOLTA chip? +#define IstVOLTA(pGpu) (0 && (pGpu)) +#define IstVOLTAorBetter(pGpu) (0 && (pGpu)) + + +// Any VOLTA_TEGRA_BIG_GPUS chip? +#define IsVOLTA_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsVOLTA_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any TURING_TEGRA_BIG_GPUS chip? +#define IsTURING_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsTURING_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any T23X chip? +#define IsT23X(pGpu) (IsT23X_TEGRA_BIG_GPUS(pGpu) || IsT23X_TEGRA_NVDISP_GPUS(pGpu)) +#define IsT23XorBetter(pGpu) (IsT23X_TEGRA_BIG_GPUSorBetter(pGpu) || IsT23X_TEGRA_NVDISP_GPUSorBetter(pGpu)) + + +// Any T23X_TEGRA_BIG_GPUS chip? +#define IsT23X_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsT23X_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any tAMPERE chip? +#define IstAMPERE(pGpu) (0 && (pGpu)) +#define IstAMPEREorBetter(pGpu) (0 && (pGpu)) + + +// Any AMPERE_TEGRA_BIG_GPUS chip? +#define IsAMPERE_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsAMPERE_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any ADA_TEGRA_BIG_GPUS chip? +#define IsADA_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsADA_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any HOPPER_TEGRA_BIG_GPUS chip? +#define IsHOPPER_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsHOPPER_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any TEGRA_NVDISP_GPUS chip? +#define IsTEGRA_NVDISP_GPUS(pGpu) (1 || (pGpu)) +#define IsTEGRA_NVDISP_GPUSorBetter(pGpu) (1 || (pGpu)) + + +// Any T23X_TEGRA_NVDISP_GPUS chip? +#define IsT23X_TEGRA_NVDISP_GPUS(pGpu) rmcfg_IsT23X_TEGRA_NVDISP_GPUS(pGpu) +#define IsT23X_TEGRA_NVDISP_GPUSorBetter(pGpu) (1 || (pGpu)) + + +// Any TEGRA_TEGRA_NVDISP_GPUS chip? +#define IsTEGRA_TEGRA_NVDISP_GPUS(pGpu) (1 || (pGpu)) +#define IsTEGRA_TEGRA_NVDISP_GPUSorBetter(pGpu) (1 || (pGpu)) + + +// Any TEGRA_DISP_TEGRA_NVDISP_GPUS chip? +#define IsTEGRA_DISP_TEGRA_NVDISP_GPUS(pGpu) (1 || (pGpu)) +#define IsTEGRA_DISP_TEGRA_NVDISP_GPUSorBetter(pGpu) (1 || (pGpu)) + + +// Any T26X chip? +#define IsT26X(pGpu) rmcfg_IsT26X(pGpu) +#define IsT26XorBetter(pGpu) rmcfg_IsT26XorBetter(pGpu) + + +// Any T25X chip? +#define IsT25X(pGpu) rmcfg_IsT25X(pGpu) +#define IsT25XorBetter(pGpu) rmcfg_IsT25XorBetter(pGpu) + + +// Any SIMULATION_GPUS chip? +#define IsSIMULATION_GPUS(pGpu) (0 && (pGpu)) +#define IsSIMULATION_GPUSorBetter(pGpu) (0 && (pGpu)) + + + + + +// +// Enable/disable printing of entity names (class, engine, etc.) +// +#define RMCFG_ENTITY_NAME(entity) "" + +// +// Macros to help with enabling or disabling code based on whether +// a feature (or chip or engine or ...) is enabled or not. +// Also have RMCFG_CHIP_), RMCFG_FEATURE_ENABLED(, etc +// from rmconfig.h. +// +// NOTE: these definitions are "flat" (ie they don't use some more general +// RMCFG_ENABLED(CHIP,X) form because the pre-processor would re-evaluate +// the expansion of the item (chip, feature, class, api). For classes, +// at least, this is a problem since we would end up with class number +// instead of its name... + +// hack: MSVC is not C99 compliant + +// CHIP's +#define RMCFG_CHIP_ENABLED_OR_BAIL(W) \ + do { \ + if ( ! RMCFG_CHIP_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "CHIP" RMCFG_ENTITY_NAME(#W) " not enabled, bailing\n"); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) + #define RMCFG_CHIP_ENABLED_OR_ASSERT_AND_BAIL(W) \ + do { \ + if ( ! RMCFG_CHIP_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "CHIP" RMCFG_ENTITY_NAME(#W) " not enabled, assert and bail\n"); \ + NV_ASSERT_PRECOMP(RMCFG_CHIP_##W); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) + +// FEATURE's +#define RMCFG_FEATURE_ENABLED_OR_BAIL(W) \ + do { \ + if ( ! RMCFG_FEATURE_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "FEATURE" RMCFG_ENTITY_NAME(#W) " not enabled, bailing\n"); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) +#define RMCFG_FEATURE_ENABLED_OR_ASSERT_AND_BAIL(W) \ + do { \ + if ( ! RMCFG_FEATURE_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "FEATURE" RMCFG_ENTITY_NAME(#W) " not enabled, assert and bail\n"); \ + NV_ASSERT_PRECOMP(RMCFG_FEATURE_##W); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) + +#define RMCFG_FEATURE_PLATFORM_P (RMCFG_FEATURE_PLATFORM_##P) + +// MODULE's +#define RMCFG_MODULE_ENABLED_OR_BAIL(W) \ + do { \ + if ( ! RMCFG_MODULE_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "MODULE" RMCFG_ENTITY_NAME(#W) " not enabled, bailing\n"); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) +#define RMCFG_MODULE_ENABLED_OR_ASSERT_AND_BAIL(W) \ + do { \ + if ( ! RMCFG_MODULE_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "MODULE" RMCFG_ENTITY_NAME(#W) " not enabled, assert and bail\n"); \ + NV_ASSERT_PRECOMP(RMCFG_MODULE_##W); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) + + +// CLASS's +#define RMCFG_CLASS_ENABLED_OR_BAIL(W) \ + do { \ + if ( ! RMCFG_CLASS_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "CLASS" RMCFG_ENTITY_NAME(#W) " not enabled, bailing\n"); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) +#define RMCFG_CLASS_ENABLED_OR_ASSERT_AND_BAIL(W) \ + do { \ + if ( ! RMCFG_CLASS_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "CLASS" RMCFG_ENTITY_NAME(#W) " not enabled, assert and bail\n"); \ + NV_ASSERT_PRECOMP(RMCFG_CLASS_##W); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) + + + +#endif // _G_RMCFG_PRIVATE_H_ diff --git a/src/nvidia/generated/g_rmconfig_util.c b/src/nvidia/generated/g_rmconfig_util.c new file mode 100644 index 0000000..5c3cc6c --- /dev/null +++ b/src/nvidia/generated/g_rmconfig_util.c @@ -0,0 +1,106 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// rmconfig runtime support that will be part of "core" resman. +// +// Profile: devel-soc-disp-dce-client +// Template: templates/gt_rmconfig_util.c +// +// Chips: T234D, T26XD, T25XD +// + +#include "gpu/gpu.h" + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +// NVOC RTTI provider for IOM objects +const NVOC_RTTI_PROVIDER __iom_rtti_provider = { 0 }; + +// +// helper functions for IsCHIP() et.al. +// These help to reduce code size for runtime IsCHIP() and IsCHIPALIAS() invocations +// + +NvBool rmcfg_IsT234D(POBJGPU pGpu) +{ + return gpuIsImplementation(pGpu, HAL_IMPL_T234D); +} + +NvBool rmcfg_IsT23XD(POBJGPU pGpu) +{ + return IsT234D(pGpu); +} + +NvBool rmcfg_IsT264D(POBJGPU pGpu) +{ + return gpuIsImplementation(pGpu, HAL_IMPL_T264D); +} + +NvBool rmcfg_IsT264DorBetter(POBJGPU pGpu) +{ + return gpuIsImplementationOrBetter(pGpu, HAL_IMPL_T264D); +} + +NvBool rmcfg_IsT26XD(POBJGPU pGpu) +{ + return IsT264D(pGpu); +} + +NvBool rmcfg_IsT26XDorBetter(POBJGPU pGpu) +{ + return IsT264DorBetter(pGpu); +} + +NvBool rmcfg_IsT256D(POBJGPU pGpu) +{ + return gpuIsImplementation(pGpu, HAL_IMPL_T256D); +} + +NvBool rmcfg_IsT256DorBetter(POBJGPU pGpu) +{ + return gpuIsImplementationOrBetter(pGpu, HAL_IMPL_T256D); +} + +NvBool rmcfg_IsT25XD(POBJGPU pGpu) +{ + return IsT256D(pGpu); +} + +NvBool rmcfg_IsT25XDorBetter(POBJGPU pGpu) +{ + return IsT256DorBetter(pGpu); +} + +NvBool rmcfg_IsT23X_TEGRA_NVDISP_GPUS(POBJGPU pGpu) +{ + return IsT234D(pGpu); +} + +NvBool rmcfg_IsT26X(POBJGPU pGpu) +{ + return IsT264D(pGpu); +} + +NvBool rmcfg_IsT26XorBetter(POBJGPU pGpu) +{ + return IsT264DorBetter(pGpu); +} + +NvBool rmcfg_IsT25X(POBJGPU pGpu) +{ + return IsT256D(pGpu); +} + +NvBool rmcfg_IsT25XorBetter(POBJGPU pGpu) +{ + return IsT256DorBetter(pGpu); +} + + + +// NVOC class ID uniqueness checks +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x0x1ab16a = 1; /* OBJRPC */ +char __nvoc_class_id_uniqueness_check_0x0xd4dff8 = 1; /* OBJRPCSTRUCTURECOPY */ + +#endif diff --git a/src/nvidia/generated/g_rmconfig_util.h b/src/nvidia/generated/g_rmconfig_util.h new file mode 100644 index 0000000..4088b64 --- /dev/null +++ b/src/nvidia/generated/g_rmconfig_util.h @@ -0,0 +1,38 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Prototypes for rmconfig utility functions such as _IsGK104(), etc. +// +// Only for use within resman. +// +// Profile: devel-soc-disp-dce-client +// Template: templates/gt_rmconfig_util.h +// +// Chips: T234D, T26XD, T25XD +// + +#ifndef _G_RMCFG_UTIL_H_ +#define _G_RMCFG_UTIL_H_ + +// +// Any needed prototypes for helper functions for IsCHIP(), eg rmcfg_IsGK104() +// These cannot be put in rmconfig_private.h as they need the OBJ typedefs. +// + +NvBool rmcfg_IsT234D(POBJGPU pGpu); +NvBool rmcfg_IsT23XD(POBJGPU pGpu); +NvBool rmcfg_IsT264D(POBJGPU pGpu); +NvBool rmcfg_IsT264DorBetter(POBJGPU pGpu); +NvBool rmcfg_IsT26XD(POBJGPU pGpu); +NvBool rmcfg_IsT26XDorBetter(POBJGPU pGpu); +NvBool rmcfg_IsT256D(POBJGPU pGpu); +NvBool rmcfg_IsT256DorBetter(POBJGPU pGpu); +NvBool rmcfg_IsT25XD(POBJGPU pGpu); +NvBool rmcfg_IsT25XDorBetter(POBJGPU pGpu); +NvBool rmcfg_IsT23X_TEGRA_NVDISP_GPUS(POBJGPU pGpu); +NvBool rmcfg_IsT26X(POBJGPU pGpu); +NvBool rmcfg_IsT26XorBetter(POBJGPU pGpu); +NvBool rmcfg_IsT25X(POBJGPU pGpu); +NvBool rmcfg_IsT25XorBetter(POBJGPU pGpu); + + +#endif // _G_RMCFG_UTIL_H_ diff --git a/src/nvidia/generated/g_rpc-message-header.h b/src/nvidia/generated/g_rpc-message-header.h new file mode 100644 index 0000000..4d188b9 --- /dev/null +++ b/src/nvidia/generated/g_rpc-message-header.h @@ -0,0 +1,68 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * WARNING: This is an autogenerated file. DO NOT EDIT. + * This file is generated using below files: + * template file: inc/kernel/vgpu/gt_rpc-message.h + * definition file: inc/kernel/vgpu/rpc-message-header.def + */ + + +#ifdef RPC_MESSAGE_STRUCTURES +typedef union rpc_message_rpc_union_field_v03_00 +{ + NvU32 spare; + NvU32 cpuRmGfid; +} rpc_message_rpc_union_field_v03_00; + +typedef rpc_message_rpc_union_field_v03_00 rpc_message_rpc_union_field_v; + +typedef struct rpc_message_header_v03_00 +{ + NvU32 header_version; + NvU32 signature; + NvU32 length; + NvU32 function; + NvU32 rpc_result; + NvU32 rpc_result_private; + NvU32 sequence; + rpc_message_rpc_union_field_v u; + rpc_generic_union rpc_message_data[]; +} rpc_message_header_v03_00; + +typedef rpc_message_header_v03_00 rpc_message_header_v; + + +#endif + +#ifdef RPC_MESSAGE_GENERIC_UNION +// This is a generic union, that will be used for the communication between the vmioplugin & guest RM. +typedef union rpc_message_generic_union { + rpc_message_rpc_union_field_v03_00 rpc_union_field_v03_00; + rpc_message_rpc_union_field_v rpc_union_field_v; + rpc_message_header_v03_00 header_v03_00; + rpc_message_header_v header_v; +} rpc_message_generic_union; + +#endif diff --git a/src/nvidia/generated/g_rpc-structures.h b/src/nvidia/generated/g_rpc-structures.h new file mode 100644 index 0000000..59082e5 --- /dev/null +++ b/src/nvidia/generated/g_rpc-structures.h @@ -0,0 +1,222 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * WARNING: This is an autogenerated file. DO NOT EDIT. + * This file is generated using below files: + * template file: inc/kernel/vgpu/gt_rpc-structures.h + * definition file: inc/kernel/vgpu/rpc-structures.def + */ + + +#ifdef RPC_STRUCTURES +// These structures will be used for the communication between the vmioplugin & guest RM. +#define SDK_STRUCTURES +#include "g_sdk-structures.h" +#undef SDK_STRUCTURES +typedef struct rpc_free_v03_00 +{ + NVOS00_PARAMETERS_v03_00 params; +} rpc_free_v03_00; + +typedef rpc_free_v03_00 rpc_free_v; + +typedef struct rpc_dup_object_v03_00 +{ + NVOS55_PARAMETERS_v03_00 params; +} rpc_dup_object_v03_00; + +typedef rpc_dup_object_v03_00 rpc_dup_object_v; + +typedef struct rpc_gsp_rm_alloc_v03_00 +{ + NvHandle hClient; + NvHandle hParent; + NvHandle hObject; + NvU32 hClass; + NvU32 status; + NvU32 paramsSize; + NvU32 flags; + NvU8 reserved[4]; + NvU8 params[]; +} rpc_gsp_rm_alloc_v03_00; + +typedef rpc_gsp_rm_alloc_v03_00 rpc_gsp_rm_alloc_v; + +typedef struct rpc_gsp_rm_control_v03_00 +{ + NvHandle hClient; + NvHandle hObject; + NvU32 cmd; + NvU32 status; + NvU32 paramsSize; + NvU32 rmapiRpcFlags; + NvU32 rmctrlFlags; + NvU32 rmctrlAccessRight; + NvU64 reserved0 NV_ALIGN_BYTES(8); + NvU8 params[]; +} rpc_gsp_rm_control_v03_00; + +typedef rpc_gsp_rm_control_v03_00 rpc_gsp_rm_control_v; + +typedef struct rpc_post_event_v17_00 +{ + NvHandle hClient; + NvHandle hEvent; + NvU32 notifyIndex; + NvU32 data; + NvU16 info16; + NvU32 status; + NvU32 eventDataSize; + NvBool bNotifyList; + NvU8 eventData[]; +} rpc_post_event_v17_00; + +typedef rpc_post_event_v17_00 rpc_post_event_v; + +typedef struct rpc_rg_line_intr_v17_00 +{ + NvU32 head; + NvU32 rgIntr; +} rpc_rg_line_intr_v17_00; + +typedef rpc_rg_line_intr_v17_00 rpc_rg_line_intr_v; + +typedef struct rpc_display_modeset_v01_00 +{ + NvBool bModesetStart; + NvU32 minRequiredIsoBandwidthKBPS; + NvU32 minRequiredFloorBandwidthKBPS; +} rpc_display_modeset_v01_00; + +typedef rpc_display_modeset_v01_00 rpc_display_modeset_v; + +typedef struct rpc_dce_rm_init_v01_00 +{ + NvBool bInit; + NvU32 hInternalClient; +} rpc_dce_rm_init_v01_00; + +typedef rpc_dce_rm_init_v01_00 rpc_dce_rm_init_v; + + +#endif + +#ifdef RPC_DEBUG_PRINT_FUNCTIONS +// These are definitions for versioned functions. These will be used for RPC logging in the vmioplugin. +#define SDK_DEBUG_PRINT_FUNCTIONS +#include "g_sdk-structures.h" +#undef SDK_DEBUG_PRINT_FUNCTIONS +#ifndef SKIP_PRINT_rpc_free_v03_00 +vmiopd_mdesc_t *rpcdebugFree_v03_00(void) +{ + return &vmiopd_mdesc_t_rpc_free_v03_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_dup_object_v03_00 +vmiopd_mdesc_t *rpcdebugDupObject_v03_00(void) +{ + return &vmiopd_mdesc_t_rpc_dup_object_v03_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_gsp_rm_alloc_v03_00 +vmiopd_mdesc_t *rpcdebugGspRmAlloc_v03_00(void) +{ + return &vmiopd_mdesc_t_rpc_gsp_rm_alloc_v03_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_gsp_rm_control_v03_00 +vmiopd_mdesc_t *rpcdebugGspRmControl_v03_00(void) +{ + return &vmiopd_mdesc_t_rpc_gsp_rm_control_v03_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_post_event_v17_00 +vmiopd_mdesc_t *rpcdebugPostEvent_v17_00(void) +{ + return &vmiopd_mdesc_t_rpc_post_event_v17_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_rg_line_intr_v17_00 +vmiopd_mdesc_t *rpcdebugRgLineIntr_v17_00(void) +{ + return &vmiopd_mdesc_t_rpc_rg_line_intr_v17_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_display_modeset_v01_00 +vmiopd_mdesc_t *rpcdebugDisplayModeset_v01_00(void) +{ + return &vmiopd_mdesc_t_rpc_display_modeset_v01_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_dce_rm_init_v01_00 +vmiopd_mdesc_t *rpcdebugDceRmInit_v01_00(void) +{ + return &vmiopd_mdesc_t_rpc_dce_rm_init_v01_00; +} +#endif + + +#endif + +#ifdef RPC_GENERIC_UNION +// This is a generic union, that will be used for the communication between the vmioplugin & guest RM. +typedef union rpc_generic_union { + rpc_free_v03_00 free_v03_00; + rpc_free_v free_v; + rpc_dup_object_v03_00 dup_object_v03_00; + rpc_dup_object_v dup_object_v; + rpc_gsp_rm_alloc_v03_00 gsp_rm_alloc_v03_00; + rpc_gsp_rm_alloc_v gsp_rm_alloc_v; + rpc_gsp_rm_control_v03_00 gsp_rm_control_v03_00; + rpc_gsp_rm_control_v gsp_rm_control_v; + rpc_post_event_v17_00 post_event_v17_00; + rpc_post_event_v post_event_v; + rpc_rg_line_intr_v17_00 rg_line_intr_v17_00; + rpc_rg_line_intr_v rg_line_intr_v; + rpc_display_modeset_v01_00 display_modeset_v01_00; + rpc_display_modeset_v display_modeset_v; + rpc_dce_rm_init_v01_00 dce_rm_init_v01_00; + rpc_dce_rm_init_v dce_rm_init_v; +} rpc_generic_union; + +#endif + + +#ifdef RPC_ARRAY_LENGTH_FUNCTIONS +#define SDK_ARRAY_LENGTH_FUNCTIONS +#include "g_sdk-structures.h" +#undef SDK_ARRAY_LENGTH_FUNCTIONS + +#endif + +#ifdef AUTOGENERATE_RPC_MIN_SUPPORTED_VERSION_INFORMATION +#define NV_VGPU_GRIDSW_VERSION_MIN_SUPPORTED_INTERNAL_MAJOR 0x18 +#define NV_VGPU_GRIDSW_VERSION_MIN_SUPPORTED_INTERNAL_MINOR 0x00 +#endif diff --git a/src/nvidia/generated/g_rs_client_nvoc.c b/src/nvidia/generated/g_rs_client_nvoc.c new file mode 100644 index 0000000..5100532 --- /dev/null +++ b/src/nvidia/generated/g_rs_client_nvoc.c @@ -0,0 +1,564 @@ +#define NVOC_RS_CLIENT_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_rs_client_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x8f87e5 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsClient; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +// Forward declarations for RsClient +void __nvoc_init__Object(Object*); +void __nvoc_init__RsClient(RsClient*); +void __nvoc_init_funcTable_RsClient(RsClient*); +NV_STATUS __nvoc_ctor_RsClient(RsClient*, struct PORT_MEM_ALLOCATOR *arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_RsClient(RsClient*); +void __nvoc_dtor_RsClient(RsClient*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__RsClient; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__RsClient; + +// Down-thunk(s) to bridge RsClient methods from ancestors (if any) + +// Up-thunk(s) to bridge RsClient methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_RsClient = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RsClient), + /*classId=*/ classId(RsClient), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RsClient", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RsClient, + /*pCastInfo=*/ &__nvoc_castinfo__RsClient, + /*pExportInfo=*/ &__nvoc_export_info__RsClient +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__RsClient __nvoc_metadata__RsClient = { + .rtti.pClassDef = &__nvoc_class_def_RsClient, // (client) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RsClient, + .rtti.offset = 0, + .metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super + .metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Object.rtti.offset = NV_OFFSETOF(RsClient, __nvoc_base_Object), + + .vtable.__clientValidate__ = &clientValidate_IMPL, // virtual + .vtable.__clientValidateLocks__ = &clientValidateLocks_IMPL, // virtual + .vtable.__clientGetCachedPrivilege__ = &clientGetCachedPrivilege_IMPL, // virtual + .vtable.__clientIsAdmin__ = &clientIsAdmin_IMPL, // virtual + .vtable.__clientFreeResource__ = &clientFreeResource_IMPL, // virtual + .vtable.__clientDestructResourceRef__ = &clientDestructResourceRef_IMPL, // virtual + .vtable.__clientUnmapMemory__ = &clientUnmapMemory_IMPL, // virtual + .vtable.__clientInterMap__ = &clientInterMap_IMPL, // virtual + .vtable.__clientInterUnmap__ = &clientInterUnmap_IMPL, // virtual + .vtable.__clientValidateNewResourceHandle__ = &clientValidateNewResourceHandle_IMPL, // virtual + .vtable.__clientPostProcessPendingFreeList__ = &clientPostProcessPendingFreeList_IMPL, // virtual + .vtable.__clientShareResource__ = &clientShareResource_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__RsClient = { + .numRelatives = 2, + .relatives = { + &__nvoc_metadata__RsClient.rtti, // [0]: (client) this + &__nvoc_metadata__RsClient.metadata__Object.rtti, // [1]: (obj) super + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__RsClient = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_RsClient(RsClient *pThis) { + __nvoc_clientDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RsClient(RsClient *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_RsClient(RsClient *pThis, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_RsClient_fail_Object; + __nvoc_init_dataField_RsClient(pThis); + + status = __nvoc_clientConstruct(pThis, arg_pAllocator, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RsClient_fail__init; + goto __nvoc_ctor_RsClient_exit; // Success + +__nvoc_ctor_RsClient_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_RsClient_fail_Object: +__nvoc_ctor_RsClient_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_RsClient_1(RsClient *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_RsClient_1 + + +// Initialize vtable(s) for 12 virtual method(s). +void __nvoc_init_funcTable_RsClient(RsClient *pThis) { + __nvoc_init_funcTable_RsClient_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__RsClient(RsClient *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; // (obj) super + pThis->__nvoc_pbase_RsClient = pThis; // (client) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Object(&pThis->__nvoc_base_Object); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__RsClient.metadata__Object; // (obj) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__RsClient; // (client) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_RsClient(pThis); +} + +NV_STATUS __nvoc_objCreate_RsClient(RsClient **ppThis, Dynamic *pParent, NvU32 createFlags, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + RsClient *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(RsClient), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(RsClient)); + + pThis->__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__RsClient(pThis); + status = __nvoc_ctor_RsClient(pThis, arg_pAllocator, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_RsClient_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_RsClient_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(RsClient)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RsClient(RsClient **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct PORT_MEM_ALLOCATOR * arg_pAllocator = va_arg(args, struct PORT_MEM_ALLOCATOR *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_RsClient(ppThis, pParent, createFlags, arg_pAllocator, arg_pParams); + + return status; +} + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x083442 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsClientResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +// Forward declarations for RsClientResource +void __nvoc_init__RsResource(RsResource*); +void __nvoc_init__RsClientResource(RsClientResource*); +void __nvoc_init_funcTable_RsClientResource(RsClientResource*); +NV_STATUS __nvoc_ctor_RsClientResource(RsClientResource*, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_RsClientResource(RsClientResource*); +void __nvoc_dtor_RsClientResource(RsClientResource*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__RsClientResource; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__RsClientResource; + +// Down-thunk(s) to bridge RsClientResource methods from ancestors (if any) + +// Up-thunk(s) to bridge RsClientResource methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_clientresCanCopy(struct RsClientResource *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_clientresIsDuplicate(struct RsClientResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_clientresPreDestruct(struct RsClientResource *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_clientresControl(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_clientresControlFilter(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_clientresControlSerialization_Prologue(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RsResource_clientresControlSerialization_Epilogue(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_clientresControl_Prologue(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RsResource_clientresControl_Epilogue(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_clientresMap(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_RsResource_clientresUnmap(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_RsResource_clientresIsPartialUnmapSupported(struct RsClientResource *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_clientresMapTo(struct RsClientResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_clientresUnmapFrom(struct RsClientResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_clientresGetRefCount(struct RsClientResource *pResource); // this +NvBool __nvoc_up_thunk_RsResource_clientresAccessCallback(struct RsClientResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NvBool __nvoc_up_thunk_RsResource_clientresShareCallback(struct RsClientResource *pResource, struct RsClient *pInvokingClient, RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +void __nvoc_up_thunk_RsResource_clientresAddAdditionalDependants(struct RsClient *pClient, struct RsClientResource *pResource, RsResourceRef *pReference); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_RsClientResource = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RsClientResource), + /*classId=*/ classId(RsClientResource), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RsClientResource", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RsClientResource, + /*pCastInfo=*/ &__nvoc_castinfo__RsClientResource, + /*pExportInfo=*/ &__nvoc_export_info__RsClientResource +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__RsClientResource __nvoc_metadata__RsClientResource = { + .rtti.pClassDef = &__nvoc_class_def_RsClientResource, // (clientres) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RsClientResource, + .rtti.offset = 0, + .metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super + .metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RsResource.rtti.offset = NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource), + .metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^2 + .metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource.__nvoc_base_Object), + + .vtable.__clientresCanCopy__ = &__nvoc_up_thunk_RsResource_clientresCanCopy, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__clientresIsDuplicate__ = &__nvoc_up_thunk_RsResource_clientresIsDuplicate, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__clientresPreDestruct__ = &__nvoc_up_thunk_RsResource_clientresPreDestruct, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__clientresControl__ = &__nvoc_up_thunk_RsResource_clientresControl, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resControl__ = &resControl_IMPL, // virtual + .vtable.__clientresControlFilter__ = &__nvoc_up_thunk_RsResource_clientresControlFilter, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__clientresControlSerialization_Prologue__ = &__nvoc_up_thunk_RsResource_clientresControlSerialization_Prologue, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &resControlSerialization_Prologue_IMPL, // virtual + .vtable.__clientresControlSerialization_Epilogue__ = &__nvoc_up_thunk_RsResource_clientresControlSerialization_Epilogue, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &resControlSerialization_Epilogue_IMPL, // virtual + .vtable.__clientresControl_Prologue__ = &__nvoc_up_thunk_RsResource_clientresControl_Prologue, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resControl_Prologue__ = &resControl_Prologue_IMPL, // virtual + .vtable.__clientresControl_Epilogue__ = &__nvoc_up_thunk_RsResource_clientresControl_Epilogue, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resControl_Epilogue__ = &resControl_Epilogue_IMPL, // virtual + .vtable.__clientresMap__ = &__nvoc_up_thunk_RsResource_clientresMap, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resMap__ = &resMap_IMPL, // virtual + .vtable.__clientresUnmap__ = &__nvoc_up_thunk_RsResource_clientresUnmap, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resUnmap__ = &resUnmap_IMPL, // virtual + .vtable.__clientresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_clientresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__clientresMapTo__ = &__nvoc_up_thunk_RsResource_clientresMapTo, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__clientresUnmapFrom__ = &__nvoc_up_thunk_RsResource_clientresUnmapFrom, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__clientresGetRefCount__ = &__nvoc_up_thunk_RsResource_clientresGetRefCount, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__clientresAccessCallback__ = &__nvoc_up_thunk_RsResource_clientresAccessCallback, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resAccessCallback__ = &resAccessCallback_IMPL, // virtual + .vtable.__clientresShareCallback__ = &__nvoc_up_thunk_RsResource_clientresShareCallback, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resShareCallback__ = &resShareCallback_IMPL, // virtual + .vtable.__clientresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_clientresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__RsClientResource = { + .numRelatives = 3, + .relatives = { + &__nvoc_metadata__RsClientResource.rtti, // [0]: (clientres) this + &__nvoc_metadata__RsClientResource.metadata__RsResource.rtti, // [1]: (res) super + &__nvoc_metadata__RsClientResource.metadata__RsResource.metadata__Object.rtti, // [2]: (obj) super^2 + } +}; + +// 18 up-thunk(s) defined to bridge methods in RsClientResource to superclasses + +// clientresCanCopy: virtual inherited (res) base (res) +NvBool __nvoc_up_thunk_RsResource_clientresCanCopy(struct RsClientResource *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource))); +} + +// clientresIsDuplicate: virtual inherited (res) base (res) +NV_STATUS __nvoc_up_thunk_RsResource_clientresIsDuplicate(struct RsClientResource *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// clientresPreDestruct: virtual inherited (res) base (res) +void __nvoc_up_thunk_RsResource_clientresPreDestruct(struct RsClientResource *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource))); +} + +// clientresControl: virtual inherited (res) base (res) +NV_STATUS __nvoc_up_thunk_RsResource_clientresControl(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource)), pCallContext, pParams); +} + +// clientresControlFilter: virtual inherited (res) base (res) +NV_STATUS __nvoc_up_thunk_RsResource_clientresControlFilter(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource)), pCallContext, pParams); +} + +// clientresControlSerialization_Prologue: virtual inherited (res) base (res) +NV_STATUS __nvoc_up_thunk_RsResource_clientresControlSerialization_Prologue(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlSerialization_Prologue((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource)), pCallContext, pParams); +} + +// clientresControlSerialization_Epilogue: virtual inherited (res) base (res) +void __nvoc_up_thunk_RsResource_clientresControlSerialization_Epilogue(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + resControlSerialization_Epilogue((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource)), pCallContext, pParams); +} + +// clientresControl_Prologue: virtual inherited (res) base (res) +NV_STATUS __nvoc_up_thunk_RsResource_clientresControl_Prologue(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl_Prologue((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource)), pCallContext, pParams); +} + +// clientresControl_Epilogue: virtual inherited (res) base (res) +void __nvoc_up_thunk_RsResource_clientresControl_Epilogue(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + resControl_Epilogue((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource)), pCallContext, pParams); +} + +// clientresMap: virtual inherited (res) base (res) +NV_STATUS __nvoc_up_thunk_RsResource_clientresMap(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource)), pCallContext, pParams, pCpuMapping); +} + +// clientresUnmap: virtual inherited (res) base (res) +NV_STATUS __nvoc_up_thunk_RsResource_clientresUnmap(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource)), pCallContext, pCpuMapping); +} + +// clientresIsPartialUnmapSupported: inline virtual inherited (res) base (res) body +NvBool __nvoc_up_thunk_RsResource_clientresIsPartialUnmapSupported(struct RsClientResource *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource))); +} + +// clientresMapTo: virtual inherited (res) base (res) +NV_STATUS __nvoc_up_thunk_RsResource_clientresMapTo(struct RsClientResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource)), pParams); +} + +// clientresUnmapFrom: virtual inherited (res) base (res) +NV_STATUS __nvoc_up_thunk_RsResource_clientresUnmapFrom(struct RsClientResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource)), pParams); +} + +// clientresGetRefCount: virtual inherited (res) base (res) +NvU32 __nvoc_up_thunk_RsResource_clientresGetRefCount(struct RsClientResource *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource))); +} + +// clientresAccessCallback: virtual inherited (res) base (res) +NvBool __nvoc_up_thunk_RsResource_clientresAccessCallback(struct RsClientResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return resAccessCallback((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource)), pInvokingClient, pAllocParams, accessRight); +} + +// clientresShareCallback: virtual inherited (res) base (res) +NvBool __nvoc_up_thunk_RsResource_clientresShareCallback(struct RsClientResource *pResource, struct RsClient *pInvokingClient, RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return resShareCallback((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// clientresAddAdditionalDependants: virtual inherited (res) base (res) +void __nvoc_up_thunk_RsResource_clientresAddAdditionalDependants(struct RsClient *pClient, struct RsClientResource *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource)), pReference); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__RsClientResource = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RsResource(RsResource*); +void __nvoc_dtor_RsClientResource(RsClientResource *pThis) { + __nvoc_clientresDestruct(pThis); + __nvoc_dtor_RsResource(&pThis->__nvoc_base_RsResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RsClientResource(RsClientResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RsResource(RsResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_RsClientResource(RsClientResource *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsResource(&pThis->__nvoc_base_RsResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RsClientResource_fail_RsResource; + __nvoc_init_dataField_RsClientResource(pThis); + + status = __nvoc_clientresConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RsClientResource_fail__init; + goto __nvoc_ctor_RsClientResource_exit; // Success + +__nvoc_ctor_RsClientResource_fail__init: + __nvoc_dtor_RsResource(&pThis->__nvoc_base_RsResource); +__nvoc_ctor_RsClientResource_fail_RsResource: +__nvoc_ctor_RsClientResource_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_RsClientResource_1(RsClientResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_RsClientResource_1 + + +// Initialize vtable(s) for 18 virtual method(s). +void __nvoc_init_funcTable_RsClientResource(RsClientResource *pThis) { + __nvoc_init_funcTable_RsClientResource_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__RsClientResource(RsClientResource *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^2 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RsResource; // (res) super + pThis->__nvoc_pbase_RsClientResource = pThis; // (clientres) this + + // Recurse to superclass initialization function(s). + __nvoc_init__RsResource(&pThis->__nvoc_base_RsResource); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__RsClientResource.metadata__RsResource.metadata__Object; // (obj) super^2 + pThis->__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__RsClientResource.metadata__RsResource; // (res) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__RsClientResource; // (clientres) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_RsClientResource(pThis); +} + +NV_STATUS __nvoc_objCreate_RsClientResource(RsClientResource **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + RsClientResource *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(RsClientResource), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(RsClientResource)); + + pThis->__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__RsClientResource(pThis); + status = __nvoc_ctor_RsClientResource(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_RsClientResource_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_RsClientResource_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(RsClientResource)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RsClientResource(RsClientResource **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_RsClientResource(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_rs_client_nvoc.h b/src/nvidia/generated/g_rs_client_nvoc.h new file mode 100644 index 0000000..3591ccc --- /dev/null +++ b/src/nvidia/generated/g_rs_client_nvoc.h @@ -0,0 +1,795 @@ + +#ifndef _G_RS_CLIENT_NVOC_H_ +#define _G_RS_CLIENT_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_rs_client_nvoc.h" + +#ifndef _RS_CLIENT_H_ +#define _RS_CLIENT_H_ + + +#include "resserv/resserv.h" +#include "nvport/nvport.h" +#include "resserv/rs_resource.h" +#include "containers/list.h" +#include "utils/nvrange.h" + +#define RS_UNIQUE_HANDLE_BASE (0xcaf00000) +#define RS_UNIQUE_HANDLE_RANGE (0x00080000) + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup RsClient + * @addtogroup RsClient + * @{*/ + +typedef enum { + CLIENT_TYPE_USER, + CLIENT_TYPE_KERNEL +} CLIENT_TYPE; + +typedef struct AccessBackRef +{ + NvHandle hClient; + NvHandle hResource; +} AccessBackRef; + +MAKE_LIST(AccessBackRefList, AccessBackRef); + +/** + * Information about a client + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_RS_CLIENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__RsClient; +struct NVOC_METADATA__Object; +struct NVOC_VTABLE__RsClient; + + +struct RsClient { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__RsClient *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Object __nvoc_base_Object; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super + struct RsClient *__nvoc_pbase_RsClient; // client + + // Data members + NvHandle hClient; + CLIENT_TYPE type; + NvBool bActive; + NvBool bResourceWarning; + NvBool bDisabled; + NvBool bHighPriorityFreeDone; + RsRefMap resourceMap; + AccessBackRefList accessBackRefList; + NvHandle handleRangeStart; + NvHandle handleRangeSize; + struct NV_RANGE handleRestrictRange; + NvHandle handleGenIdx; + RsRefFreeList pendingFreeList; + struct RS_FREE_STACK *pFreeStack; + struct ListNode disabledClientNode; +}; + + +// Vtable with 12 per-class function pointers +struct NVOC_VTABLE__RsClient { + NV_STATUS (*__clientValidate__)(struct RsClient * /*this*/, const API_SECURITY_INFO *); // virtual + NV_STATUS (*__clientValidateLocks__)(struct RsClient * /*this*/, RsServer *, const CLIENT_ENTRY *); // virtual + RS_PRIV_LEVEL (*__clientGetCachedPrivilege__)(struct RsClient * /*this*/); // virtual + NvBool (*__clientIsAdmin__)(struct RsClient * /*this*/, RS_PRIV_LEVEL); // virtual + NV_STATUS (*__clientFreeResource__)(struct RsClient * /*this*/, RsServer *, struct RS_RES_FREE_PARAMS_INTERNAL *); // virtual + NV_STATUS (*__clientDestructResourceRef__)(struct RsClient * /*this*/, RsServer *, struct RsResourceRef *, struct RS_LOCK_INFO *, API_SECURITY_INFO *); // virtual + NV_STATUS (*__clientUnmapMemory__)(struct RsClient * /*this*/, struct RsResourceRef *, struct RS_LOCK_INFO *, struct RsCpuMapping **, API_SECURITY_INFO *); // virtual + NV_STATUS (*__clientInterMap__)(struct RsClient * /*this*/, struct RsResourceRef *, struct RsResourceRef *, struct RS_INTER_MAP_PARAMS *); // virtual + NV_STATUS (*__clientInterUnmap__)(struct RsClient * /*this*/, struct RsResourceRef *, struct RS_INTER_UNMAP_PARAMS *); // virtual + NV_STATUS (*__clientValidateNewResourceHandle__)(struct RsClient * /*this*/, NvHandle, NvBool); // virtual + NV_STATUS (*__clientPostProcessPendingFreeList__)(struct RsClient * /*this*/, struct RsResourceRef **); // virtual + NV_STATUS (*__clientShareResource__)(struct RsClient * /*this*/, struct RsResourceRef *, RS_SHARE_POLICY *, struct CALL_CONTEXT *); // virtual +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__RsClient { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Object metadata__Object; + const struct NVOC_VTABLE__RsClient vtable; +}; + +#ifndef __NVOC_CLASS_RsClient_TYPEDEF__ +#define __NVOC_CLASS_RsClient_TYPEDEF__ +typedef struct RsClient RsClient; +#endif /* __NVOC_CLASS_RsClient_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsClient +#define __nvoc_class_id_RsClient 0x8f87e5 +#endif /* __nvoc_class_id_RsClient */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsClient; + +#define __staticCast_RsClient(pThis) \ + ((pThis)->__nvoc_pbase_RsClient) + +#ifdef __nvoc_rs_client_h_disabled +#define __dynamicCast_RsClient(pThis) ((RsClient*) NULL) +#else //__nvoc_rs_client_h_disabled +#define __dynamicCast_RsClient(pThis) \ + ((RsClient*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RsClient))) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_RsClient(RsClient**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RsClient(RsClient**, Dynamic*, NvU32, struct PORT_MEM_ALLOCATOR *arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_RsClient(ppNewObj, pParent, createFlags, arg_pAllocator, arg_pParams) \ + __nvoc_objCreate_RsClient((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pAllocator, arg_pParams) + + +// Wrapper macros +#define clientValidate_FNPTR(pClient) pClient->__nvoc_metadata_ptr->vtable.__clientValidate__ +#define clientValidate(pClient, pSecInfo) clientValidate_DISPATCH(pClient, pSecInfo) +#define clientValidateLocks_FNPTR(pClient) pClient->__nvoc_metadata_ptr->vtable.__clientValidateLocks__ +#define clientValidateLocks(pClient, pServer, pClientEntry) clientValidateLocks_DISPATCH(pClient, pServer, pClientEntry) +#define clientGetCachedPrivilege_FNPTR(pClient) pClient->__nvoc_metadata_ptr->vtable.__clientGetCachedPrivilege__ +#define clientGetCachedPrivilege(pClient) clientGetCachedPrivilege_DISPATCH(pClient) +#define clientIsAdmin_FNPTR(pClient) pClient->__nvoc_metadata_ptr->vtable.__clientIsAdmin__ +#define clientIsAdmin(pClient, privLevel) clientIsAdmin_DISPATCH(pClient, privLevel) +#define clientFreeResource_FNPTR(pClient) pClient->__nvoc_metadata_ptr->vtable.__clientFreeResource__ +#define clientFreeResource(pClient, pServer, pParams) clientFreeResource_DISPATCH(pClient, pServer, pParams) +#define clientDestructResourceRef_FNPTR(pClient) pClient->__nvoc_metadata_ptr->vtable.__clientDestructResourceRef__ +#define clientDestructResourceRef(pClient, pServer, pResourceRef, pLockInfo, pSecInfo) clientDestructResourceRef_DISPATCH(pClient, pServer, pResourceRef, pLockInfo, pSecInfo) +#define clientUnmapMemory_FNPTR(pClient) pClient->__nvoc_metadata_ptr->vtable.__clientUnmapMemory__ +#define clientUnmapMemory(pClient, pResourceRef, pLockInfo, ppCpuMapping, pSecInfo) clientUnmapMemory_DISPATCH(pClient, pResourceRef, pLockInfo, ppCpuMapping, pSecInfo) +#define clientInterMap_FNPTR(pClient) pClient->__nvoc_metadata_ptr->vtable.__clientInterMap__ +#define clientInterMap(pClient, pMapperRef, pMappableRef, pParams) clientInterMap_DISPATCH(pClient, pMapperRef, pMappableRef, pParams) +#define clientInterUnmap_FNPTR(pClient) pClient->__nvoc_metadata_ptr->vtable.__clientInterUnmap__ +#define clientInterUnmap(pClient, pMapperRef, pParams) clientInterUnmap_DISPATCH(pClient, pMapperRef, pParams) +#define clientValidateNewResourceHandle_FNPTR(pClient) pClient->__nvoc_metadata_ptr->vtable.__clientValidateNewResourceHandle__ +#define clientValidateNewResourceHandle(pClient, hResource, bRestrict) clientValidateNewResourceHandle_DISPATCH(pClient, hResource, bRestrict) +#define clientPostProcessPendingFreeList_FNPTR(pClient) pClient->__nvoc_metadata_ptr->vtable.__clientPostProcessPendingFreeList__ +#define clientPostProcessPendingFreeList(pClient, ppFirstLowPriRef) clientPostProcessPendingFreeList_DISPATCH(pClient, ppFirstLowPriRef) +#define clientShareResource_FNPTR(pClient) pClient->__nvoc_metadata_ptr->vtable.__clientShareResource__ +#define clientShareResource(pClient, pResourceRef, pSharePolicy, pCallContext) clientShareResource_DISPATCH(pClient, pResourceRef, pSharePolicy, pCallContext) + +// Dispatch functions +static inline NV_STATUS clientValidate_DISPATCH(struct RsClient *pClient, const API_SECURITY_INFO *pSecInfo) { + return pClient->__nvoc_metadata_ptr->vtable.__clientValidate__(pClient, pSecInfo); +} + +static inline NV_STATUS clientValidateLocks_DISPATCH(struct RsClient *pClient, RsServer *pServer, const CLIENT_ENTRY *pClientEntry) { + return pClient->__nvoc_metadata_ptr->vtable.__clientValidateLocks__(pClient, pServer, pClientEntry); +} + +static inline RS_PRIV_LEVEL clientGetCachedPrivilege_DISPATCH(struct RsClient *pClient) { + return pClient->__nvoc_metadata_ptr->vtable.__clientGetCachedPrivilege__(pClient); +} + +static inline NvBool clientIsAdmin_DISPATCH(struct RsClient *pClient, RS_PRIV_LEVEL privLevel) { + return pClient->__nvoc_metadata_ptr->vtable.__clientIsAdmin__(pClient, privLevel); +} + +static inline NV_STATUS clientFreeResource_DISPATCH(struct RsClient *pClient, RsServer *pServer, struct RS_RES_FREE_PARAMS_INTERNAL *pParams) { + return pClient->__nvoc_metadata_ptr->vtable.__clientFreeResource__(pClient, pServer, pParams); +} + +static inline NV_STATUS clientDestructResourceRef_DISPATCH(struct RsClient *pClient, RsServer *pServer, struct RsResourceRef *pResourceRef, struct RS_LOCK_INFO *pLockInfo, API_SECURITY_INFO *pSecInfo) { + return pClient->__nvoc_metadata_ptr->vtable.__clientDestructResourceRef__(pClient, pServer, pResourceRef, pLockInfo, pSecInfo); +} + +static inline NV_STATUS clientUnmapMemory_DISPATCH(struct RsClient *pClient, struct RsResourceRef *pResourceRef, struct RS_LOCK_INFO *pLockInfo, struct RsCpuMapping **ppCpuMapping, API_SECURITY_INFO *pSecInfo) { + return pClient->__nvoc_metadata_ptr->vtable.__clientUnmapMemory__(pClient, pResourceRef, pLockInfo, ppCpuMapping, pSecInfo); +} + +static inline NV_STATUS clientInterMap_DISPATCH(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RsResourceRef *pMappableRef, struct RS_INTER_MAP_PARAMS *pParams) { + return pClient->__nvoc_metadata_ptr->vtable.__clientInterMap__(pClient, pMapperRef, pMappableRef, pParams); +} + +static inline NV_STATUS clientInterUnmap_DISPATCH(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RS_INTER_UNMAP_PARAMS *pParams) { + return pClient->__nvoc_metadata_ptr->vtable.__clientInterUnmap__(pClient, pMapperRef, pParams); +} + +static inline NV_STATUS clientValidateNewResourceHandle_DISPATCH(struct RsClient *pClient, NvHandle hResource, NvBool bRestrict) { + return pClient->__nvoc_metadata_ptr->vtable.__clientValidateNewResourceHandle__(pClient, hResource, bRestrict); +} + +static inline NV_STATUS clientPostProcessPendingFreeList_DISPATCH(struct RsClient *pClient, struct RsResourceRef **ppFirstLowPriRef) { + return pClient->__nvoc_metadata_ptr->vtable.__clientPostProcessPendingFreeList__(pClient, ppFirstLowPriRef); +} + +static inline NV_STATUS clientShareResource_DISPATCH(struct RsClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext) { + return pClient->__nvoc_metadata_ptr->vtable.__clientShareResource__(pClient, pResourceRef, pSharePolicy, pCallContext); +} + +NV_STATUS clientValidate_IMPL(struct RsClient *pClient, const API_SECURITY_INFO *pSecInfo); + +NV_STATUS clientValidateLocks_IMPL(struct RsClient *pClient, RsServer *pServer, const CLIENT_ENTRY *pClientEntry); + +RS_PRIV_LEVEL clientGetCachedPrivilege_IMPL(struct RsClient *pClient); + +NvBool clientIsAdmin_IMPL(struct RsClient *pClient, RS_PRIV_LEVEL privLevel); + +NV_STATUS clientFreeResource_IMPL(struct RsClient *pClient, RsServer *pServer, struct RS_RES_FREE_PARAMS_INTERNAL *pParams); + +NV_STATUS clientDestructResourceRef_IMPL(struct RsClient *pClient, RsServer *pServer, struct RsResourceRef *pResourceRef, struct RS_LOCK_INFO *pLockInfo, API_SECURITY_INFO *pSecInfo); + +NV_STATUS clientUnmapMemory_IMPL(struct RsClient *pClient, struct RsResourceRef *pResourceRef, struct RS_LOCK_INFO *pLockInfo, struct RsCpuMapping **ppCpuMapping, API_SECURITY_INFO *pSecInfo); + +NV_STATUS clientInterMap_IMPL(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RsResourceRef *pMappableRef, struct RS_INTER_MAP_PARAMS *pParams); + +NV_STATUS clientInterUnmap_IMPL(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RS_INTER_UNMAP_PARAMS *pParams); + +NV_STATUS clientValidateNewResourceHandle_IMPL(struct RsClient *pClient, NvHandle hResource, NvBool bRestrict); + +NV_STATUS clientPostProcessPendingFreeList_IMPL(struct RsClient *pClient, struct RsResourceRef **ppFirstLowPriRef); + +NV_STATUS clientShareResource_IMPL(struct RsClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext); + +NV_STATUS clientConstruct_IMPL(struct RsClient *arg_pClient, struct PORT_MEM_ALLOCATOR *arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_clientConstruct(arg_pClient, arg_pAllocator, arg_pParams) clientConstruct_IMPL(arg_pClient, arg_pAllocator, arg_pParams) +void clientDestruct_IMPL(struct RsClient *pClient); + +#define __nvoc_clientDestruct(pClient) clientDestruct_IMPL(pClient) +NV_STATUS clientGetResourceByRef_IMPL(struct RsClient *pClient, struct RsResourceRef *pResourceRef, struct RsResource **ppResource); + +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientGetResourceByRef(struct RsClient *pClient, struct RsResourceRef *pResourceRef, struct RsResource **ppResource) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientGetResourceByRef(pClient, pResourceRef, ppResource) clientGetResourceByRef_IMPL(pClient, pResourceRef, ppResource) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientGetResource_IMPL(struct RsClient *pClient, NvHandle hResource, NvU32 internalClassId, struct RsResource **ppResource); + +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientGetResource(struct RsClient *pClient, NvHandle hResource, NvU32 internalClassId, struct RsResource **ppResource) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientGetResource(pClient, hResource, internalClassId, ppResource) clientGetResource_IMPL(pClient, hResource, internalClassId, ppResource) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientGetResourceRef_IMPL(struct RsClient *pClient, NvHandle hResource, struct RsResourceRef **ppResourceRef); + +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientGetResourceRef(struct RsClient *pClient, NvHandle hResource, struct RsResourceRef **ppResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientGetResourceRef(pClient, hResource, ppResourceRef) clientGetResourceRef_IMPL(pClient, hResource, ppResourceRef) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientGetResourceRefWithAccess_IMPL(struct RsClient *pClient, NvHandle hResource, const RS_ACCESS_MASK *pRightsRequired, struct RsResourceRef **ppResourceRef); + +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientGetResourceRefWithAccess(struct RsClient *pClient, NvHandle hResource, const RS_ACCESS_MASK *pRightsRequired, struct RsResourceRef **ppResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientGetResourceRefWithAccess(pClient, hResource, pRightsRequired, ppResourceRef) clientGetResourceRefWithAccess_IMPL(pClient, hResource, pRightsRequired, ppResourceRef) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientGetResourceRefByType_IMPL(struct RsClient *pClient, NvHandle hResource, NvU32 internalClassId, struct RsResourceRef **ppResourceRef); + +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientGetResourceRefByType(struct RsClient *pClient, NvHandle hResource, NvU32 internalClassId, struct RsResourceRef **ppResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientGetResourceRefByType(pClient, hResource, internalClassId, ppResourceRef) clientGetResourceRefByType_IMPL(pClient, hResource, internalClassId, ppResourceRef) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientAllocResource_IMPL(struct RsClient *pClient, RsServer *pServer, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientAllocResource(struct RsClient *pClient, RsServer *pServer, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientAllocResource(pClient, pServer, pParams) clientAllocResource_IMPL(pClient, pServer, pParams) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientCopyResource_IMPL(struct RsClient *pClient, RsServer *pServer, struct RS_RES_DUP_PARAMS_INTERNAL *pParams); + +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientCopyResource(struct RsClient *pClient, RsServer *pServer, struct RS_RES_DUP_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientCopyResource(pClient, pServer, pParams) clientCopyResource_IMPL(pClient, pServer, pParams) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientGenResourceHandle_IMPL(struct RsClient *pClient, NvHandle *pHandle); + +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientGenResourceHandle(struct RsClient *pClient, NvHandle *pHandle) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientGenResourceHandle(pClient, pHandle) clientGenResourceHandle_IMPL(pClient, pHandle) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientAssignResourceHandle_IMPL(struct RsClient *pClient, NvHandle *phResource); + +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientAssignResourceHandle(struct RsClient *pClient, NvHandle *phResource) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientAssignResourceHandle(pClient, phResource) clientAssignResourceHandle_IMPL(pClient, phResource) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientUpdatePendingFreeList_IMPL(struct RsClient *pClient, struct RsResourceRef *pTarget, struct RsResourceRef *pReference, NvBool bMove); + +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientUpdatePendingFreeList(struct RsClient *pClient, struct RsResourceRef *pTarget, struct RsResourceRef *pReference, NvBool bMove) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientUpdatePendingFreeList(pClient, pTarget, pReference, bMove) clientUpdatePendingFreeList_IMPL(pClient, pTarget, pReference, bMove) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientAddAccessBackRef_IMPL(struct RsClient *pClient, struct RsResourceRef *pResourceRef); + +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientAddAccessBackRef(struct RsClient *pClient, struct RsResourceRef *pResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientAddAccessBackRef(pClient, pResourceRef) clientAddAccessBackRef_IMPL(pClient, pResourceRef) +#endif //__nvoc_rs_client_h_disabled + +void clientFreeAccessBackRefs_IMPL(struct RsClient *pClient, RsServer *pServer); + +#ifdef __nvoc_rs_client_h_disabled +static inline void clientFreeAccessBackRefs(struct RsClient *pClient, RsServer *pServer) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); +} +#else //__nvoc_rs_client_h_disabled +#define clientFreeAccessBackRefs(pClient, pServer) clientFreeAccessBackRefs_IMPL(pClient, pServer) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientSetHandleGenerator_IMPL(struct RsClient *pClient, NvHandle handleRangeStart, NvHandle handleRangeSize); + +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientSetHandleGenerator(struct RsClient *pClient, NvHandle handleRangeStart, NvHandle handleRangeSize) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientSetHandleGenerator(pClient, handleRangeStart, handleRangeSize) clientSetHandleGenerator_IMPL(pClient, handleRangeStart, handleRangeSize) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientCanShareResource_IMPL(struct RsClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext); + +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientCanShareResource(struct RsClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientCanShareResource(pClient, pResourceRef, pSharePolicy, pCallContext) clientCanShareResource_IMPL(pClient, pResourceRef, pSharePolicy, pCallContext) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientShareResourceTargetClient_IMPL(struct RsClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext); + +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientShareResourceTargetClient(struct RsClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientShareResourceTargetClient(pClient, pResourceRef, pSharePolicy, pCallContext) clientShareResourceTargetClient_IMPL(pClient, pResourceRef, pSharePolicy, pCallContext) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientSetRestrictedRange_IMPL(struct RsClient *pClient, NvHandle handleRangeStart, NvU32 handleRangeSize); + +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientSetRestrictedRange(struct RsClient *pClient, NvHandle handleRangeStart, NvU32 handleRangeSize) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientSetRestrictedRange(pClient, handleRangeStart, handleRangeSize) clientSetRestrictedRange_IMPL(pClient, handleRangeStart, handleRangeSize) +#endif //__nvoc_rs_client_h_disabled + +#undef PRIVATE_FIELD + +MAKE_INTRUSIVE_LIST(RsDisabledClientList, RsClient, disabledClientNode); + +/** + * Get an iterator to the elements in the client's resource map + * @param[in] pClient + * @param[in] pScopeRef Restrict the iteration based on this reference [optional] + * @param[in] internalClassId Only iterate over resources with this class id [optional] + * @param[in] type RS_ITERATE_CHILDREN, RS_ITERATE_DESCENDANTS, RS_ITERATE_CACHED, RS_ITERATE_DEPENDANTS + * @param[in] bExactMatch If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + * + * @note If type=RS_ITERATE_CHILDREN, pScopeRef will restrict iteration to children of the scope ref + * @note If type=RS_ITERATE_DESCENDANTS, pScopeRef will restrict iteration to descendants of the scope ref + * @note If type=RS_ITERATE_CACHED, pScopeRef will restrict iteration to references cached by the scope ref + */ +RS_ITERATOR clientRefIter(struct RsClient *pClient, RsResourceRef *pScopeRef, NvU32 internalClassId, RS_ITER_TYPE type, NvBool bExactMatch); + +/** + * Get the next iterator to the elements in the client's resource map + * @param[in] pClient + * @param[inout] pIt The iterator + */ +NvBool clientRefIterNext(struct RsClient *pClient, RS_ITERATOR *pIt); + +/** + * Get an iterator to the elements in the client's resource map. + * + * This iterator will visit all descendants in pre-order according to the parent-child + * resource hierarchy. + * + * @param[in] pClient + * @param[in] pScopeRef Restrict the iteration based on this reference [optional] + * @param[in] internalClassId Only iterate over resources with this class id [optional] + * @param[in] bExactMatch If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + */ +RS_ORDERED_ITERATOR clientRefOrderedIter(struct RsClient *pClient, RsResourceRef *pScopeRef, NvU32 internalClassId, NvBool bExactMatch); + +/** + * Get the next ordered iterator to the elements in the client's resource map + * @param[in] pClient + * @param[inout] pIt The iterator + */ +NvBool clientRefOrderedIterNext(struct RsClient *pClient, RS_ORDERED_ITERATOR *pIt); + +/** + * Release all CPU address mappings for a resource + * + * @param[in] pClient Client that owns the resource + * @param[in] pCallContext Caller information (which includes the resource reference whose mappings will be freed) + * @param[in] pLockInfo Information about which locks are already held, for recursive calls + */ +NV_STATUS clientUnmapResourceRefMappings(struct RsClient *pClient, CALL_CONTEXT *pCallContext, RS_LOCK_INFO *pLockInfo); + +/** + * RsResource interface to a RsClient + * + * This allows clients to be interfaced with as-if they were resources (e.g., + * to perform a control call on a client). + * + * An RsClientResource is automatically allocated under a client as a top-level + * object when that client is allocated and cannot be explicitly freed. Only + * one RsClientResource is permitted per-client. + * + * Any resource allocated under a client will be a descendant of the client + * proxy resource. + * + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_RS_CLIENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__RsClientResource; +struct NVOC_METADATA__RsResource; +struct NVOC_VTABLE__RsClientResource; + + +struct RsClientResource { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__RsClientResource *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct RsResource __nvoc_base_RsResource; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^2 + struct RsResource *__nvoc_pbase_RsResource; // res super + struct RsClientResource *__nvoc_pbase_RsClientResource; // clientres + + // Data members + struct RsClient *pClient; +}; + + +// Vtable with 18 per-class function pointers +struct NVOC_VTABLE__RsClientResource { + NvBool (*__clientresCanCopy__)(struct RsClientResource * /*this*/); // virtual inherited (res) base (res) + NV_STATUS (*__clientresIsDuplicate__)(struct RsClientResource * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (res) + void (*__clientresPreDestruct__)(struct RsClientResource * /*this*/); // virtual inherited (res) base (res) + NV_STATUS (*__clientresControl__)(struct RsClientResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (res) + NV_STATUS (*__clientresControlFilter__)(struct RsClientResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (res) + NV_STATUS (*__clientresControlSerialization_Prologue__)(struct RsClientResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (res) + void (*__clientresControlSerialization_Epilogue__)(struct RsClientResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (res) + NV_STATUS (*__clientresControl_Prologue__)(struct RsClientResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (res) + void (*__clientresControl_Epilogue__)(struct RsClientResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (res) + NV_STATUS (*__clientresMap__)(struct RsClientResource * /*this*/, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); // virtual inherited (res) base (res) + NV_STATUS (*__clientresUnmap__)(struct RsClientResource * /*this*/, struct CALL_CONTEXT *, RsCpuMapping *); // virtual inherited (res) base (res) + NvBool (*__clientresIsPartialUnmapSupported__)(struct RsClientResource * /*this*/); // inline virtual inherited (res) base (res) body + NV_STATUS (*__clientresMapTo__)(struct RsClientResource * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (res) + NV_STATUS (*__clientresUnmapFrom__)(struct RsClientResource * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (res) + NvU32 (*__clientresGetRefCount__)(struct RsClientResource * /*this*/); // virtual inherited (res) base (res) + NvBool (*__clientresAccessCallback__)(struct RsClientResource * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (res) base (res) + NvBool (*__clientresShareCallback__)(struct RsClientResource * /*this*/, struct RsClient *, RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (res) base (res) + void (*__clientresAddAdditionalDependants__)(struct RsClient *, struct RsClientResource * /*this*/, RsResourceRef *); // virtual inherited (res) base (res) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__RsClientResource { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__RsResource metadata__RsResource; + const struct NVOC_VTABLE__RsClientResource vtable; +}; + +#ifndef __NVOC_CLASS_RsClientResource_TYPEDEF__ +#define __NVOC_CLASS_RsClientResource_TYPEDEF__ +typedef struct RsClientResource RsClientResource; +#endif /* __NVOC_CLASS_RsClientResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsClientResource +#define __nvoc_class_id_RsClientResource 0x083442 +#endif /* __nvoc_class_id_RsClientResource */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsClientResource; + +#define __staticCast_RsClientResource(pThis) \ + ((pThis)->__nvoc_pbase_RsClientResource) + +#ifdef __nvoc_rs_client_h_disabled +#define __dynamicCast_RsClientResource(pThis) ((RsClientResource*) NULL) +#else //__nvoc_rs_client_h_disabled +#define __dynamicCast_RsClientResource(pThis) \ + ((RsClientResource*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RsClientResource))) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_RsClientResource(RsClientResource**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RsClientResource(RsClientResource**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_RsClientResource(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_RsClientResource((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define clientresCanCopy_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define clientresCanCopy(pResource) clientresCanCopy_DISPATCH(pResource) +#define clientresIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define clientresIsDuplicate(pResource, hMemory, pDuplicate) clientresIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define clientresPreDestruct_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define clientresPreDestruct(pResource) clientresPreDestruct_DISPATCH(pResource) +#define clientresControl_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControl__ +#define clientresControl(pResource, pCallContext, pParams) clientresControl_DISPATCH(pResource, pCallContext, pParams) +#define clientresControlFilter_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define clientresControlFilter(pResource, pCallContext, pParams) clientresControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define clientresControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlSerialization_Prologue__ +#define clientresControlSerialization_Prologue(pResource, pCallContext, pParams) clientresControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define clientresControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlSerialization_Epilogue__ +#define clientresControlSerialization_Epilogue(pResource, pCallContext, pParams) clientresControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define clientresControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControl_Prologue__ +#define clientresControl_Prologue(pResource, pCallContext, pParams) clientresControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define clientresControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControl_Epilogue__ +#define clientresControl_Epilogue(pResource, pCallContext, pParams) clientresControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define clientresMap_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMap__ +#define clientresMap(pResource, pCallContext, pParams, pCpuMapping) clientresMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define clientresUnmap_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmap__ +#define clientresUnmap(pResource, pCallContext, pCpuMapping) clientresUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define clientresIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define clientresIsPartialUnmapSupported(pResource) clientresIsPartialUnmapSupported_DISPATCH(pResource) +#define clientresMapTo_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define clientresMapTo(pResource, pParams) clientresMapTo_DISPATCH(pResource, pParams) +#define clientresUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define clientresUnmapFrom(pResource, pParams) clientresUnmapFrom_DISPATCH(pResource, pParams) +#define clientresGetRefCount_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define clientresGetRefCount(pResource) clientresGetRefCount_DISPATCH(pResource) +#define clientresAccessCallback_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAccessCallback__ +#define clientresAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) clientresAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define clientresShareCallback_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resShareCallback__ +#define clientresShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) clientresShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define clientresAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define clientresAddAdditionalDependants(pClient, pResource, pReference) clientresAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) + +// Dispatch functions +static inline NvBool clientresCanCopy_DISPATCH(struct RsClientResource *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__clientresCanCopy__(pResource); +} + +static inline NV_STATUS clientresIsDuplicate_DISPATCH(struct RsClientResource *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__clientresIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void clientresPreDestruct_DISPATCH(struct RsClientResource *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__clientresPreDestruct__(pResource); +} + +static inline NV_STATUS clientresControl_DISPATCH(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__clientresControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS clientresControlFilter_DISPATCH(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__clientresControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS clientresControlSerialization_Prologue_DISPATCH(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__clientresControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void clientresControlSerialization_Epilogue_DISPATCH(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__clientresControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS clientresControl_Prologue_DISPATCH(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__clientresControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void clientresControl_Epilogue_DISPATCH(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__clientresControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS clientresMap_DISPATCH(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__clientresMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS clientresUnmap_DISPATCH(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__clientresUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NvBool clientresIsPartialUnmapSupported_DISPATCH(struct RsClientResource *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__clientresIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS clientresMapTo_DISPATCH(struct RsClientResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__clientresMapTo__(pResource, pParams); +} + +static inline NV_STATUS clientresUnmapFrom_DISPATCH(struct RsClientResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__clientresUnmapFrom__(pResource, pParams); +} + +static inline NvU32 clientresGetRefCount_DISPATCH(struct RsClientResource *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__clientresGetRefCount__(pResource); +} + +static inline NvBool clientresAccessCallback_DISPATCH(struct RsClientResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__clientresAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NvBool clientresShareCallback_DISPATCH(struct RsClientResource *pResource, struct RsClient *pInvokingClient, RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__nvoc_metadata_ptr->vtable.__clientresShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline void clientresAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct RsClientResource *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__clientresAddAdditionalDependants__(pClient, pResource, pReference); +} + +NV_STATUS clientresConstruct_IMPL(struct RsClientResource *arg_pClientRes, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_clientresConstruct(arg_pClientRes, arg_pCallContext, arg_pParams) clientresConstruct_IMPL(arg_pClientRes, arg_pCallContext, arg_pParams) +void clientresDestruct_IMPL(struct RsClientResource *pClientRes); + +#define __nvoc_clientresDestruct(pClientRes) clientresDestruct_IMPL(pClientRes) +#undef PRIVATE_FIELD + + +/** + * Client destruction parameters + */ +struct RS_CLIENT_FREE_PARAMS_INTERNAL +{ + NvHandle hDomain; ///< [in] The parent domain + NvHandle hClient; ///< [in] The client handle + NvBool bHiPriOnly; ///< [in] Only free high priority resources + NvBool bDisableOnly; ///< [in] Only disable the listed clients, do not free them yet + NvU32 state; ///< [in] User-defined state + + RS_RES_FREE_PARAMS_INTERNAL *pResFreeParams; ///< [in] Necessary for locking state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info +}; + +/** + * Return an iterator to a resource reference multi-map + * @param[in] pIndex The multi-map to iterate + * @param[in] index Return only the references belonging to this index + */ +RsIndexIter indexRefIter(RsIndex *pIndex, NvU32 index); + +/** + * Return an iterator to all resource references in a multi-map + * @param[in] pIndex The multi-map to iterate + */ +RsIndexIter indexRefIterAll(RsIndex *pIndex); + +/** + * Get the next iterator in a resource reference multi-map + * @param[in] pIt Iterator + */ +NvBool indexRefIterNext(RsIndexIter *pIt); + +/* @} */ + +#ifdef __cplusplus +} +#endif + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_RS_CLIENT_NVOC_H_ diff --git a/src/nvidia/generated/g_rs_resource_nvoc.c b/src/nvidia/generated/g_rs_resource_nvoc.c new file mode 100644 index 0000000..a15dabf --- /dev/null +++ b/src/nvidia/generated/g_rs_resource_nvoc.c @@ -0,0 +1,225 @@ +#define NVOC_RS_RESOURCE_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_rs_resource_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xd551cb = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +// Forward declarations for RsResource +void __nvoc_init__Object(Object*); +void __nvoc_init__RsResource(RsResource*); +void __nvoc_init_funcTable_RsResource(RsResource*); +NV_STATUS __nvoc_ctor_RsResource(RsResource*, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_RsResource(RsResource*); +void __nvoc_dtor_RsResource(RsResource*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__RsResource; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__RsResource; + +// Down-thunk(s) to bridge RsResource methods from ancestors (if any) + +// Up-thunk(s) to bridge RsResource methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RsResource), + /*classId=*/ classId(RsResource), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RsResource", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RsResource, + /*pCastInfo=*/ &__nvoc_castinfo__RsResource, + /*pExportInfo=*/ &__nvoc_export_info__RsResource +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__RsResource __nvoc_metadata__RsResource = { + .rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RsResource, + .rtti.offset = 0, + .metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super + .metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Object.rtti.offset = NV_OFFSETOF(RsResource, __nvoc_base_Object), + + .vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__resControl__ = &resControl_IMPL, // virtual + .vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__resControlSerialization_Prologue__ = &resControlSerialization_Prologue_IMPL, // virtual + .vtable.__resControlSerialization_Epilogue__ = &resControlSerialization_Epilogue_IMPL, // virtual + .vtable.__resControl_Prologue__ = &resControl_Prologue_IMPL, // virtual + .vtable.__resControl_Epilogue__ = &resControl_Epilogue_IMPL, // virtual + .vtable.__resMap__ = &resMap_IMPL, // virtual + .vtable.__resUnmap__ = &resUnmap_IMPL, // virtual + .vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__resAccessCallback__ = &resAccessCallback_IMPL, // virtual + .vtable.__resShareCallback__ = &resShareCallback_IMPL, // virtual + .vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__RsResource = { + .numRelatives = 2, + .relatives = { + &__nvoc_metadata__RsResource.rtti, // [0]: (res) this + &__nvoc_metadata__RsResource.metadata__Object.rtti, // [1]: (obj) super + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__RsResource = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_RsResource(RsResource *pThis) { + __nvoc_resDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RsResource(RsResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_RsResource(RsResource *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_RsResource_fail_Object; + __nvoc_init_dataField_RsResource(pThis); + + status = __nvoc_resConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RsResource_fail__init; + goto __nvoc_ctor_RsResource_exit; // Success + +__nvoc_ctor_RsResource_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_RsResource_fail_Object: +__nvoc_ctor_RsResource_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_RsResource_1(RsResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_RsResource_1 + + +// Initialize vtable(s) for 18 virtual method(s). +void __nvoc_init_funcTable_RsResource(RsResource *pThis) { + __nvoc_init_funcTable_RsResource_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__RsResource(RsResource *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; // (obj) super + pThis->__nvoc_pbase_RsResource = pThis; // (res) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Object(&pThis->__nvoc_base_Object); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__RsResource.metadata__Object; // (obj) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__RsResource; // (res) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_RsResource(pThis); +} + +NV_STATUS __nvoc_objCreate_RsResource(RsResource **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + RsResource *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(RsResource), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(RsResource)); + + pThis->__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__RsResource(pThis); + status = __nvoc_ctor_RsResource(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_RsResource_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_RsResource_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(RsResource)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RsResource(RsResource **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_RsResource(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_rs_resource_nvoc.h b/src/nvidia/generated/g_rs_resource_nvoc.h new file mode 100644 index 0000000..e7408ef --- /dev/null +++ b/src/nvidia/generated/g_rs_resource_nvoc.h @@ -0,0 +1,979 @@ + +#ifndef _G_RS_RESOURCE_NVOC_H_ +#define _G_RS_RESOURCE_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_rs_resource_nvoc.h" + +#ifndef _RS_RESOURCE_H_ +#define _RS_RESOURCE_H_ + +#include "nvport/nvport.h" +#include "resserv/resserv.h" +#include "nvoc/object.h" +#include "resserv/rs_access_map.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +struct RsSession; + +#ifndef __NVOC_CLASS_RsSession_TYPEDEF__ +#define __NVOC_CLASS_RsSession_TYPEDEF__ +typedef struct RsSession RsSession; +#endif /* __NVOC_CLASS_RsSession_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsSession +#define __nvoc_class_id_RsSession 0x830d90 +#endif /* __nvoc_class_id_RsSession */ + + + +/** + * @defgroup RsResource + * @addtogroup RsResource + * @{*/ + +#define ALLOC_STATE_INTERNAL_CLIENT_HANDLE NVBIT(5) + +/* + * Locking operations for lock-metering + */ +#define RS_LOCK_TRACE_INVALID 1 +#define RS_LOCK_TRACE_ACQUIRE 1 +#define RS_LOCK_TRACE_RELEASE 2 +#define RS_LOCK_TRACE_ALLOC 3 +#define RS_LOCK_TRACE_FREE 4 +#define RS_LOCK_TRACE_CTRL 5 +#define RS_LOCK_TRACE_MAP 6 +#define RS_LOCK_TRACE_UNMAP 7 + +/** + * Context information for top-level, resource-level, and client-level locking + * operations + */ +struct RS_LOCK_INFO +{ + struct RsClient *pClient; ///< Pointer to client that was locked (if any) + struct RsClient *pSecondClient; ///< Pointer to second client, for dual-client locking + RsResourceRef *pContextRef; ///< User-defined reference + RsResourceRef *pResRefToBackRef; ///< Resource from which to infer indirect GPU dependencies + struct RsSession *pSession; ///< Session object to be locked, if any + NvU32 flags; ///< RS_LOCK_FLAGS_* + NvU32 state; ///< RS_LOCK_STATE_* + NvU32 gpuMask; + NvU8 traceOp; ///< RS_LOCK_TRACE_* operation for lock-metering + NvU32 traceClassId; ///< Class of initial resource that was locked for lock metering +}; + +struct RS_RES_ALLOC_PARAMS_INTERNAL +{ + NvHandle hClient; ///< [in] The handle of the resource's client + NvHandle hParent; ///< [in] The handle of the resource's parent. This may be a client or another resource. + NvHandle hResource; ///< [inout] Server will assign a handle if this is 0, or else try the value provided + NvU32 externalClassId; ///< [in] External class ID of resource + NvHandle hDomain; ///< UNUSED + + // Internal use only + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + struct RsClient *pClient; ///< [out] Cached client + RsResourceRef *pResourceRef; ///< [out] Cached resource reference + NvU32 allocFlags; ///< [in] Allocation flags + NvU32 allocState; ///< [inout] Allocation state + API_SECURITY_INFO *pSecInfo; + + void *pAllocParams; ///< [in] Copied-in allocation parameters + NvU32 paramsSize; ///< [in] Copied-in allocation parameters size + + // ... Dupe alloc + struct RsClient *pSrcClient; ///< The client that is sharing the resource + RsResourceRef *pSrcRef; ///< Reference to the resource that will be shared + + RS_ACCESS_MASK *pRightsRequested; ///< [in] Access rights requested on the new resource + // Buffer for storing contents of user mask. Do not use directly, use pRightsRequested instead. + RS_ACCESS_MASK rightsRequestedCopy; + + RS_ACCESS_MASK *pRightsRequired; ///< [in] Access rights required to alloc this object type +}; + +struct RS_RES_DUP_PARAMS_INTERNAL +{ + NvHandle hClientSrc; ///< [in] The handle of the source resource's client + NvHandle hResourceSrc; ///< [in] The handle of the source resource. + NvHandle hClientDst; ///< [in] The handle of the destination resource's client (may be different from source client) + NvHandle hParentDst; ///< [in] The handle of the destination resource's parent. + NvHandle hResourceDst; ///< [inout] The handle of the destination resource. Generated if 0. + void *pShareParams; ///< [in] Copied-in sharing parameters + NvU32 flags; ///< [in] Flags to denote special cases ( Bug: 2859347 to track removal) + // Internal use only + struct RsClient *pSrcClient; + struct RsClient *pDstClient; + RsResourceRef *pSrcRef; + RsResourceRef *pDstParentRef; + API_SECURITY_INFO *pSecInfo; ///< [in] Security info + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state +}; + +struct RS_RES_SHARE_PARAMS_INTERNAL +{ + NvHandle hClient; ///< [in] The handle of the owner's client + NvHandle hResource; ///< [in] The handle of the resource. + RS_SHARE_POLICY *pSharePolicy; ///< [in] The policy to share with + + // Internal use only + API_SECURITY_INFO *pSecInfo; ///< [in] Security info + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state +}; + +#define RS_IS_COPY_CTOR(pParams) ((pParams)->pSrcRef != NULL) + +struct RS_RES_FREE_PARAMS_INTERNAL +{ + NvHandle hClient; ///< [in] The handle of the resource's client + NvHandle hResource; ///< [in] The handle of the resource + NvBool bInvalidateOnly; ///< [in] Free the resource, but don't release its handle + NvHandle hDomain; ///< UNUSED + + // Internal use only + NvBool bHiPriOnly; ///< [in] Only free if this is a high priority resources + NvBool bDisableOnly; ///< [in] Disable the target instead of freeing it (only applies to clients) + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + NvU32 freeFlags; ///< [in] Flags for the free operation + NvU32 freeState; ///< [inout] Free state + RsResourceRef *pResourceRef; ///< [inout] Cached RsResourceRef + NV_STATUS status; ///< [out] Status of free operation + API_SECURITY_INFO *pSecInfo; ///< [in] Security info +}; + +struct NVOC_EXPORTED_METHOD_DEF; + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + + +struct OBJGPUGRP; + +#ifndef __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ +#define __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ +typedef struct OBJGPUGRP OBJGPUGRP; +#endif /* __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPUGRP +#define __nvoc_class_id_OBJGPUGRP 0xe40531 +#endif /* __nvoc_class_id_OBJGPUGRP */ + + + +// +// RS_RES_CONTROL_PARAMS +// +// This structure encapsulates data sent to the cmd-specific rmctrl +// handlers. Along with the arguments supplied by the requesting +// client (hClient, hObject, cmd, pParams, paramSize). +// +struct RS_RES_CONTROL_PARAMS_INTERNAL +{ + NvHandle hClient; // client-specified NV01_ROOT object handle + NvHandle hObject; // client-specified object handle + NvU32 cmd; // client-specified command # + NvU32 flags; // flags related to control call execution + void *pParams; // client-specified params (in kernel space) + NvU32 paramsSize; // client-specified size of pParams in bytes + + NvHandle hParent; // handle of hObject parent + struct OBJGPU *pGpu; // ptr to OBJGPU struct if applicable + struct OBJGPUGRP *pGpuGrp; // ptr to OBJGPUGRP struct if applicable + RsResourceRef *pResourceRef; // ptr to RsResourceRef if object is managed by + // Resource Server + API_SECURITY_INFO secInfo; // information on privilege level and pointer location (user/kernel) + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + RS_CONTROL_COOKIE *pCookie; + NvBool bInternal; // True if control call was not issued from an external client + NvBool bDeferredApi; // Indicates ctrl is being dispatched via deferred API + + struct RS_RES_CONTROL_PARAMS_INTERNAL *pLegacyParams; // RS-TODO removeme +}; + +struct RS_RES_DTOR_PARAMS +{ + CALL_CONTEXT *pFreeContext; + RS_RES_FREE_PARAMS_INTERNAL *pFreeParams; +}; + +/** + * Base class for all resources. Mostly a pure virtual interface which + * should be overridden to implement resource specific behavior. + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_RS_RESOURCE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__RsResource; +struct NVOC_METADATA__Object; +struct NVOC_VTABLE__RsResource; + + +struct RsResource { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__RsResource *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Object __nvoc_base_Object; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super + struct RsResource *__nvoc_pbase_RsResource; // res + + // Data members + RsResourceRef *pResourceRef; + struct RS_RES_DTOR_PARAMS dtorParams; + NvBool bConstructed; +}; + + +// Vtable with 18 per-class function pointers +struct NVOC_VTABLE__RsResource { + NvBool (*__resCanCopy__)(struct RsResource * /*this*/); // virtual + NV_STATUS (*__resIsDuplicate__)(struct RsResource * /*this*/, NvHandle, NvBool *); // virtual + void (*__resPreDestruct__)(struct RsResource * /*this*/); // virtual + NV_STATUS (*__resControl__)(struct RsResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual + NV_STATUS (*__resControlFilter__)(struct RsResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual + NV_STATUS (*__resControlSerialization_Prologue__)(struct RsResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual + void (*__resControlSerialization_Epilogue__)(struct RsResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual + NV_STATUS (*__resControl_Prologue__)(struct RsResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual + void (*__resControl_Epilogue__)(struct RsResource * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual + NV_STATUS (*__resMap__)(struct RsResource * /*this*/, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); // virtual + NV_STATUS (*__resUnmap__)(struct RsResource * /*this*/, struct CALL_CONTEXT *, RsCpuMapping *); // virtual + NvBool (*__resIsPartialUnmapSupported__)(struct RsResource * /*this*/); // inline virtual body + NV_STATUS (*__resMapTo__)(struct RsResource * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual + NV_STATUS (*__resUnmapFrom__)(struct RsResource * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual + NvU32 (*__resGetRefCount__)(struct RsResource * /*this*/); // virtual + NvBool (*__resAccessCallback__)(struct RsResource * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual + NvBool (*__resShareCallback__)(struct RsResource * /*this*/, struct RsClient *, RsResourceRef *, RS_SHARE_POLICY *); // virtual + void (*__resAddAdditionalDependants__)(struct RsClient *, struct RsResource * /*this*/, RsResourceRef *); // virtual +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__RsResource { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Object metadata__Object; + const struct NVOC_VTABLE__RsResource vtable; +}; + +#ifndef __NVOC_CLASS_RsResource_TYPEDEF__ +#define __NVOC_CLASS_RsResource_TYPEDEF__ +typedef struct RsResource RsResource; +#endif /* __NVOC_CLASS_RsResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsResource +#define __nvoc_class_id_RsResource 0xd551cb +#endif /* __nvoc_class_id_RsResource */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +#define __staticCast_RsResource(pThis) \ + ((pThis)->__nvoc_pbase_RsResource) + +#ifdef __nvoc_rs_resource_h_disabled +#define __dynamicCast_RsResource(pThis) ((RsResource*) NULL) +#else //__nvoc_rs_resource_h_disabled +#define __dynamicCast_RsResource(pThis) \ + ((RsResource*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RsResource))) +#endif //__nvoc_rs_resource_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_RsResource(RsResource**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RsResource(RsResource**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_RsResource(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_RsResource((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define resCanCopy_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define resCanCopy(pResource) resCanCopy_DISPATCH(pResource) +#define resIsDuplicate_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define resIsDuplicate(pResource, hMemory, pDuplicate) resIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define resPreDestruct_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define resPreDestruct(pResource) resPreDestruct_DISPATCH(pResource) +#define resControl_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__resControl__ +#define resControl(pResource, pCallContext, pParams) resControl_DISPATCH(pResource, pCallContext, pParams) +#define resControlFilter_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define resControlFilter(pResource, pCallContext, pParams) resControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define resControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__resControlSerialization_Prologue__ +#define resControlSerialization_Prologue(pResource, pCallContext, pParams) resControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define resControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__resControlSerialization_Epilogue__ +#define resControlSerialization_Epilogue(pResource, pCallContext, pParams) resControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define resControl_Prologue_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__resControl_Prologue__ +#define resControl_Prologue(pResource, pCallContext, pParams) resControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define resControl_Epilogue_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__resControl_Epilogue__ +#define resControl_Epilogue(pResource, pCallContext, pParams) resControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define resMap_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__resMap__ +#define resMap(pResource, pCallContext, pParams, pCpuMapping) resMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define resUnmap_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__resUnmap__ +#define resUnmap(pResource, pCallContext, pCpuMapping) resUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define resIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define resIsPartialUnmapSupported(pResource) resIsPartialUnmapSupported_DISPATCH(pResource) +#define resMapTo_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__resMapTo__ +#define resMapTo(pResource, pParams) resMapTo_DISPATCH(pResource, pParams) +#define resUnmapFrom_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define resUnmapFrom(pResource, pParams) resUnmapFrom_DISPATCH(pResource, pParams) +#define resGetRefCount_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define resGetRefCount(pResource) resGetRefCount_DISPATCH(pResource) +#define resAccessCallback_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__resAccessCallback__ +#define resAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) resAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define resShareCallback_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__resShareCallback__ +#define resShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) resShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define resAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define resAddAdditionalDependants(pClient, pResource, pReference) resAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) + +// Dispatch functions +static inline NvBool resCanCopy_DISPATCH(struct RsResource *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__resCanCopy__(pResource); +} + +static inline NV_STATUS resIsDuplicate_DISPATCH(struct RsResource *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__resIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void resPreDestruct_DISPATCH(struct RsResource *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__resPreDestruct__(pResource); +} + +static inline NV_STATUS resControl_DISPATCH(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__resControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS resControlFilter_DISPATCH(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__resControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS resControlSerialization_Prologue_DISPATCH(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__resControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void resControlSerialization_Epilogue_DISPATCH(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__resControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS resControl_Prologue_DISPATCH(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__resControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void resControl_Epilogue_DISPATCH(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__resControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS resMap_DISPATCH(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__resMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS resUnmap_DISPATCH(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__nvoc_metadata_ptr->vtable.__resUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NvBool resIsPartialUnmapSupported_DISPATCH(struct RsResource *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS resMapTo_DISPATCH(struct RsResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__resMapTo__(pResource, pParams); +} + +static inline NV_STATUS resUnmapFrom_DISPATCH(struct RsResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__resUnmapFrom__(pResource, pParams); +} + +static inline NvU32 resGetRefCount_DISPATCH(struct RsResource *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__resGetRefCount__(pResource); +} + +static inline NvBool resAccessCallback_DISPATCH(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__resAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NvBool resShareCallback_DISPATCH(struct RsResource *pResource, struct RsClient *pInvokingClient, RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__nvoc_metadata_ptr->vtable.__resShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline void resAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct RsResource *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__(pClient, pResource, pReference); +} + +NvBool resCanCopy_IMPL(struct RsResource *pResource); + +NV_STATUS resIsDuplicate_IMPL(struct RsResource *pResource, NvHandle hMemory, NvBool *pDuplicate); + +void resPreDestruct_IMPL(struct RsResource *pResource); + +NV_STATUS resControl_IMPL(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +NV_STATUS resControlFilter_IMPL(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +NV_STATUS resControlSerialization_Prologue_IMPL(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +void resControlSerialization_Epilogue_IMPL(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +NV_STATUS resControl_Prologue_IMPL(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +void resControl_Epilogue_IMPL(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +NV_STATUS resMap_IMPL(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); + +NV_STATUS resUnmap_IMPL(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); + +static inline NvBool resIsPartialUnmapSupported_d69453(struct RsResource *pResource) { + return NV_FALSE; +} + +NV_STATUS resMapTo_IMPL(struct RsResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); + +NV_STATUS resUnmapFrom_IMPL(struct RsResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); + +NvU32 resGetRefCount_IMPL(struct RsResource *pResource); + +NvBool resAccessCallback_IMPL(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); + +NvBool resShareCallback_IMPL(struct RsResource *pResource, struct RsClient *pInvokingClient, RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); + +void resAddAdditionalDependants_IMPL(struct RsClient *pClient, struct RsResource *pResource, RsResourceRef *pReference); + +NV_STATUS resConstruct_IMPL(struct RsResource *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_resConstruct(arg_pResource, arg_pCallContext, arg_pParams) resConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +void resDestruct_IMPL(struct RsResource *pResource); + +#define __nvoc_resDestruct(pResource) resDestruct_IMPL(pResource) +NV_STATUS resSetFreeParams_IMPL(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_FREE_PARAMS_INTERNAL *pParams); + +#ifdef __nvoc_rs_resource_h_disabled +static inline NV_STATUS resSetFreeParams(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_FREE_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("RsResource was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_resource_h_disabled +#define resSetFreeParams(pResource, pCallContext, pParams) resSetFreeParams_IMPL(pResource, pCallContext, pParams) +#endif //__nvoc_rs_resource_h_disabled + +NV_STATUS resGetFreeParams_IMPL(struct RsResource *pResource, struct CALL_CONTEXT **ppCallContext, struct RS_RES_FREE_PARAMS_INTERNAL **ppParams); + +#ifdef __nvoc_rs_resource_h_disabled +static inline NV_STATUS resGetFreeParams(struct RsResource *pResource, struct CALL_CONTEXT **ppCallContext, struct RS_RES_FREE_PARAMS_INTERNAL **ppParams) { + NV_ASSERT_FAILED_PRECOMP("RsResource was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_resource_h_disabled +#define resGetFreeParams(pResource, ppCallContext, ppParams) resGetFreeParams_IMPL(pResource, ppCallContext, ppParams) +#endif //__nvoc_rs_resource_h_disabled + +NV_STATUS resControlLookup_IMPL(struct RsResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry); + +#ifdef __nvoc_rs_resource_h_disabled +static inline NV_STATUS resControlLookup(struct RsResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + NV_ASSERT_FAILED_PRECOMP("RsResource was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_resource_h_disabled +#define resControlLookup(pResource, pParams, ppEntry) resControlLookup_IMPL(pResource, pParams, ppEntry) +#endif //__nvoc_rs_resource_h_disabled + +#undef PRIVATE_FIELD + + +/* @} */ + + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + + +/** + * @defgroup RsCpuMapping + * @addtogroup RsCpuMapping + * @{*/ +struct RsCpuMapping +{ + NvU64 offset; + NvU64 length; + NvU32 flags; + NvP64 pLinearAddress; + RsResourceRef *pContextRef; ///< Context resource that may be needed for the mapping + RsResourceRef *pResourceRef; ///< Resource that is actually getting mapped + ListNode backRefNode; ///< Node to context backreference + void *pContext; ///< Additional context data for the mapping + NvU32 processId; + + RS_CPU_MAPPING_PRIVATE *pPrivate; ///< Opaque struct allocated and freed by resserv on behalf of the user +}; +MAKE_LIST(RsCpuMappingList, RsCpuMapping); + +/** + * CPU mapping parameters + */ +struct RS_CPU_MAP_PARAMS +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvU64 offset; ///< [in] Offset into the resource + NvU64 length; ///< [in] Size of the region to map + NvP64 *ppCpuVirtAddr; + NvU32 flags; ///< [in] Resource-specific flags + + // Passed from RM into CpuMapping + NvU32 protect; ///< [in] Protection flags + NvBool bKernel; + + /// [in] hContext Handle of resource that provides a context for the mapping (e.g., subdevice for channel map) + NvHandle hContext; + + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info +}; + +/** + * CPU unmapping params for resource server tests + */ +struct RS_CPU_UNMAP_PARAMS +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvP64 pLinearAddress; ///< [in] Address of mapped memory + NvU32 flags; ///< [in] Resource-specific flags + NvU32 processId; + NvBool bTeardown; ///< [in] Unmap operation is due to client teardown + + /// [in] hContext Handle of resource that provides a context for the mapping (e.g., subdevice for channel map) + NvHandle hContext; + + // RM-only + void *pProcessHandle; + + NvBool (*fnFilter)(RsCpuMapping*); ///< [in] Mapping-filter function + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info +}; + +/** + * CPU mapping back-reference + */ +MAKE_INTRUSIVE_LIST(RsCpuMappingBackRefList, RsCpuMapping, backRefNode); +/* @} */ + +/** + * @defgroup RsInterMapping + * @addtogroup RsInterMapping + * @{*/ +struct RS_INTER_MAP_PARAMS +{ + NvHandle hClient; + NvHandle hMapper; + NvHandle hMappable; + NvHandle hDevice; + NvU64 offset; + NvU64 length; + NvU32 flags; + NvU32 flags2; + NvU32 kindOverride; + NvU64 dmaOffset; ///< [inout] RS-TODO rename this + void *pMemDesc; ///< [out] + + // Internal use only + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info + + RS_INTER_MAP_PRIVATE *pPrivate; ///< Opaque struct controlled by caller +}; + +struct RS_INTER_UNMAP_PARAMS +{ + NvHandle hClient; + NvHandle hMapper; + NvHandle hDevice; + NvU32 flags; + NvU64 dmaOffset; ///< [in] RS-TODO rename this + NvU64 size; + + // Internal use only + NvHandle hMappable; + void *pMemDesc; ///< MEMORY_DESCRIPTOR * + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info + + RS_INTER_UNMAP_PRIVATE *pPrivate; ///< Opaque struct controlled by caller +}; + +/** + * Inter-mapping information + * Used to keep track of inter-mappings and unmap them on free + */ +struct RsInterMapping +{ + RsResourceRef *pMapperRef; ///< The resource that created and owns this mapping (this resource) + RsResourceRef *pMappableRef; ///< The resource being mapped by the mapper (e.g. hMemory) + RsResourceRef *pContextRef; ///< A resource used to provide additional context for the mapping (e.g. hDevice) + ListNode mappableNode; + ListNode contextNode; + NvU32 flags; ///< Flags passed when mapping, same flags also passed when unmapping + NvU32 flags2; ///< Additional flags for the mapping + NvU64 dmaOffset; + NvU64 size; + void *pMemDesc; +}; +MAKE_LIST(RsInterMappingList, RsInterMapping); + +/** + * Inter-mapping back-reference + */ +MAKE_INTRUSIVE_LIST(RsInterMappingBackRefMappableList, RsInterMapping, mappableNode); +MAKE_INTRUSIVE_LIST(RsInterMappingBackRefContextList, RsInterMapping, contextNode); +/* @} */ + +typedef struct RS_RESOURCE_DESC RS_RESOURCE_DESC; +RS_RESOURCE_DESC *RsResInfoByExternalClassId(NvU32); +NvU32 RsResInfoGetInternalClassId(const RS_RESOURCE_DESC *); + +/** + * A reference to a resource that has been allocated in RM. + */ +struct RsResourceRef +{ + struct RsClient *pClient; ///< Pointer to the client that owns the ref + struct RsResource *pResource; ///< Pointer to the actual resource + NvHandle hResource; ///< Resource handle + struct RsResourceRef *pParentRef; ///< Parent resource reference + RsIndex childRefMap; ///< Child reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + + /** + * Cached reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + * + * The resource reference cache is a one-way association between this resource reference and + * any other resource reference. Resource server does not populate the cache so it is up to the + * resource implementation to manage it. clientRefIter can be used to iterate this cache. + */ + RsIndex cachedRefMap; + + /** + * Dependants reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + * + * A map of all resources that strongly depend on this resource. + */ + RsIndex depRefMap; + + /** + * Dependants back-reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + * + * AKA dependencies map + * + * A map of all resources that this resource strongly depends on. + */ + RsIndex depBackRefMap; + + /** + * Policy under which this resource can be shared with other clients + */ + RsShareList sharePolicyList; + NvBool bSharePolicyListModified; + + /** + * A mask of the access rights that the owner client has on this object. + */ + RS_ACCESS_MASK accessMask; + + const RS_RESOURCE_DESC *pResourceDesc; ///< Cached pointer to the resource descriptor + NvU32 internalClassId; ///< Internal resource class id + NvU32 externalClassId; ///< External resource class id + NvU32 depth; ///< The depth of this reference in the resource graph + NvBool bInvalidated; ///< Reference has been freed but not removed yet + + RsCpuMappingList cpuMappings; ///< List of CPU mappings to the resource from this resource reference + RsCpuMappingBackRefList backRefs; ///< List of references that have this reference as a mapping context + + RsInterMappingList interMappings; ///< List of inter-resource mappings created by this resource + RsInterMappingBackRefMappableList interBackRefsMappable; ///< List of inter-resource mappings this resource has been mapped into + RsInterMappingBackRefContextList interBackRefsContext; ///< List of inter-resource mappings this context has been mapped into + + struct RsSession *pSession; ///< If set, this ref depends on a shared session + struct RsSession *pDependantSession; ///< If set, this ref is depended on by a shared session + + ListNode freeNode; ///< Links to the client's pendingFreeList +}; +MAKE_MAP(RsRefMap, RsResourceRef); +MAKE_INTRUSIVE_LIST(RsRefFreeList, RsResourceRef, freeNode); + + +// Iterator data structure to save state while walking through a list +struct RS_ITERATOR +{ + union + { + RsRefMapIter mapIt; ///< Map iterator for all resource references under a client + RsIndexIter idxIt; ///< Index iterator for child references of a resource reference + }; + + struct RsClient *pClient; + RsResourceRef *pScopeRef; ///< Reference to the resource that limits the scope of iteration + NvU32 internalClassId; + RsResourceRef *pResourceRef; ///< Resource ref that is being iterated over + NvU8 type; ///< RS_ITERATE_* + NvBool bExactMatch; ///< If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId +}; + +// Iterator data structure to save state while walking through a resource tree in pre-order +struct RS_ORDERED_ITERATOR +{ + NvS8 depth; ///< Depth of index stack; special value of -1 implies that the scope reference should be iterated over as well + RsIndexIter idxIt[RS_MAX_RESOURCE_DEPTH+1]; ///< Stack of index iterators for child references of a resource reference + + struct RsClient *pClient; + RsResourceRef *pScopeRef; ///< Reference to the resource that limits the scope of iteration + NvU32 internalClassId; + NvBool bExactMatch; ///< If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + + RsResourceRef *pResourceRef; ///< Resource ref that is being iterated over +}; + +/** + * Macro for looking up a reference from a resource + */ +#define RES_GET_REF(pResource) (staticCast((pResource), RsResource)->pResourceRef) + +/** + * Macro for looking up a resource handle from a resource + */ +#define RES_GET_HANDLE(pResource) (RES_GET_REF(pResource)->hResource) + +/** + * Macro for looking up a resource's external class from a resource + */ +#define RES_GET_EXT_CLASS_ID(pResource) (RES_GET_REF(pResource)->externalClassId) + +/** + * Macro for looking up a resource's parent handle from a resource + */ +#define RES_GET_PARENT_HANDLE(pResource) (RES_GET_REF(pResource)->pParentRef->hResource) + +/** + * Macro for looking up a client from a resource + */ +#define RES_GET_CLIENT(pResource) (RES_GET_REF(pResource)->pClient) + +/** + * Macro for looking up a client handle from a resource + */ +#define RES_GET_CLIENT_HANDLE(pResource) (RES_GET_REF(pResource)->pClient->hClient) + +/** + * Find a CPU mapping owned by a resource reference + * + * @param[in] pResourceRef + * @param[in] pAddress The CPU virtual address of the mapping to search for + * @param[out] ppMapping The returned mapping + */ +NV_STATUS refFindCpuMapping(RsResourceRef *pResourceRef, NvP64 pAddress, RsCpuMapping **ppMapping); + +/** + * Find a CPU mapping owned by a resource reference + * + * @param[in] pResourceRef + * @param[in] pAddress The CPU virtual address of the mapping to search for + * @param[in] fnFilter A user-provided filtering function that determines which mappings to ignore. + * If fnFilter is provided, then we will only return mappings for which fnFilter(mapping) returns NV_TRUE + * All mappings will be searched over if fnFilter is NULL. + * @param[out] ppMapping The returned mapping + * @param[in] fnFilter A user-provided filtering function that determines which mappings to ignore. + * If fnFilter is provided, then we will only return mappings for which fnFilter(mapping) returns NV_TRUE + * All mappings will be searched over if fnFilter is NULL. + */ +NV_STATUS refFindCpuMappingWithFilter(RsResourceRef *pResourceRef, NvP64 pAddress, NvBool (*fnFilter)(RsCpuMapping*), RsCpuMapping **ppMapping); + +/** + * Find the first child object of given type + * + * @param[in] pParentRef + * @param[in] internalClassId + * @param[in] bExactMatch If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + * @param[out] pResourceRef The returned RsResourceRef (Optional) + */ +NV_STATUS refFindChildOfType(RsResourceRef *pParentRef, NvU32 internalClassId, NvBool bExactMatch, RsResourceRef **ppResourceRef); + +/** + * Traverse up the reference parent-child hierarchy to find an ancestor reference of a given type + * + * @param[in] pDescendantRef + * @param[in] internalClassId + * @param[out] ppAncestorRef The returned RsResourceRef (Optional) + */ +NV_STATUS refFindAncestorOfType(RsResourceRef *pDescendantRef, NvU32 internalClassId, RsResourceRef **ppAncestorRef); + +/** + * Traverse up the reference parent-child hierarchy to find if a ref is a descendant of a given ancestor ref + * + * @param[in] pDescendantRef The node to start searching from (not included in the search) + * @param[in] pAncestorRef The node to search for in the parent-child hierarchy + */ +NvBool refHasAncestor(RsResourceRef *pDescendantRef, RsResourceRef *pAncestorRef); + +/** + * Add a new mapping to a reference's mapping list + * @param[in] pResourceRef The reference to add a mapping to + * @param[in] pMapParams The parameters used to initialize the mapping + * @param[in] pContextRef A reference to a resource that provides a context for the mapping + * @param[out] ppMapping Pointer to the allocated mapping [optional] + */ +NV_STATUS refAddMapping(RsResourceRef *pResourceRef, RS_CPU_MAP_PARAMS *pMapParams, + RsResourceRef *pContextRef, RsCpuMapping **ppMapping); + +/** + * Remove an existing mapping from a reference's mapping list and remove back-references to the mapping. + * @param[in] pResourceRef The reference to add a mapping to + * @param[in] pMapping Pointer to the allocated mapping + */ +void refRemoveMapping(RsResourceRef *pResourceRef, RsCpuMapping *pMapping); + +/** + * Allocate the user-controlled private pointer within the RsCpuMapping struct. + * Resserv will call this function to alloc the private struct when the mapping is created + * @param[in] pMapParams The parameters which were used to create the mapping + * @param[inout] pMapping Pointer to the mapping whose private struct should be allocated + */ +NV_STATUS refAllocCpuMappingPrivate(RS_CPU_MAP_PARAMS *pMapParams, RsCpuMapping *pMapping); + +/** + * Free the user-controlled private pointer within the RsCpuMapping struct. + * Resserv will call this function to free the private struct when the mapping is removed + * @param[inout] pMapping Pointer to the mapping whose private struct should be freed + */ +void refFreeCpuMappingPrivate(RsCpuMapping *pMapping); + +/** + * Add a dependency between this resource reference and a dependent reference. + * If this reference is freed, the dependent will be invalidated and torn down. + * + * @note Dependencies are implicit between a parent resource reference and child resource reference + * @note No circular dependency checking is performed + */ +NV_STATUS refAddDependant(RsResourceRef *pResourceRef, RsResourceRef *pDependantRef); + +/** + * Remove the dependency between this resource reference and a dependent resource reference. + */ +void refRemoveDependant(RsResourceRef *pResourceRef, RsResourceRef *pDependantRef); + +/** + * Find, Add, or Remove an inter-mapping between two resources to the Mapper's list of inter-mappings + * Inter-mappings are stored in the Mapper, and are matched by both the MappableRef and offset. + * + * @param[in] pMapperRef The reference which owns the inter-mapping + * @param[in] pMappableRef The reference which was mapped from to create the inter-mapping + * If NULL, will be ignored while matching inter-mappings + * @param[in] dmaOffset The offset value assigned while mapping, used to identify mappings + * @param[in] pContextRef A reference used during mapping and locking for additional context, used to identify mappings + * @param[inout] ppMapping Writes the resulting inter-mapping, if successfully created (Add) or found (Find) + * @param[in] pMapping The inter-mapping to remove (Remove) + */ +NV_STATUS refAddInterMapping(RsResourceRef *pMapperRef, RsResourceRef *pMappableRef, RsResourceRef *pContextRef, RsInterMapping **ppMapping); +void refRemoveInterMapping(RsResourceRef *pMapperRef, RsInterMapping *pMapping); + +/** + * Store a resource reference in another reference's cache. + * @param[in] pParentRef The resource reference that owns the cache + * @param[in] pResourceRef The resource reference to store in the cache + */ +NV_STATUS refCacheRef(RsResourceRef *pParentRef, RsResourceRef *pResourceRef); + +/** + * Remove a resource reference from another reference's cache + * @param[in] pParentRef The resource reference that owns the cache + * @param[in] pResourceRef The resource reference to de-index + */ +NV_STATUS refUncacheRef(RsResourceRef *pParentRef, RsResourceRef *pResourceRef); + +/** + * Determine whether a reference is queued for removal + * @param[in] pResourceRef + * @param[in] pClient + */ +NvBool refPendingFree(RsResourceRef *pResourceRef, struct RsClient *pClient); + + +#ifdef __cplusplus +} +#endif + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_RS_RESOURCE_NVOC_H_ diff --git a/src/nvidia/generated/g_rs_server_nvoc.c b/src/nvidia/generated/g_rs_server_nvoc.c new file mode 100644 index 0000000..81972dc --- /dev/null +++ b/src/nvidia/generated/g_rs_server_nvoc.c @@ -0,0 +1,402 @@ +#define NVOC_RS_SERVER_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_rs_server_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x830542 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +// Forward declarations for RsShared +void __nvoc_init__Object(Object*); +void __nvoc_init__RsShared(RsShared*); +void __nvoc_init_funcTable_RsShared(RsShared*); +NV_STATUS __nvoc_ctor_RsShared(RsShared*); +void __nvoc_init_dataField_RsShared(RsShared*); +void __nvoc_dtor_RsShared(RsShared*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__RsShared; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__RsShared; + +// Down-thunk(s) to bridge RsShared methods from ancestors (if any) + +// Up-thunk(s) to bridge RsShared methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RsShared), + /*classId=*/ classId(RsShared), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RsShared", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RsShared, + /*pCastInfo=*/ &__nvoc_castinfo__RsShared, + /*pExportInfo=*/ &__nvoc_export_info__RsShared +}; + + +// Metadata with per-class RTTI with ancestor(s) +static const struct NVOC_METADATA__RsShared __nvoc_metadata__RsShared = { + .rtti.pClassDef = &__nvoc_class_def_RsShared, // (shr) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RsShared, + .rtti.offset = 0, + .metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super + .metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Object.rtti.offset = NV_OFFSETOF(RsShared, __nvoc_base_Object), +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__RsShared = { + .numRelatives = 2, + .relatives = { + &__nvoc_metadata__RsShared.rtti, // [0]: (shr) this + &__nvoc_metadata__RsShared.metadata__Object.rtti, // [1]: (obj) super + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__RsShared = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_RsShared(RsShared *pThis) { + __nvoc_shrDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RsShared(RsShared *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_RsShared(RsShared *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_RsShared_fail_Object; + __nvoc_init_dataField_RsShared(pThis); + + status = __nvoc_shrConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_RsShared_fail__init; + goto __nvoc_ctor_RsShared_exit; // Success + +__nvoc_ctor_RsShared_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_RsShared_fail_Object: +__nvoc_ctor_RsShared_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_RsShared_1(RsShared *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_RsShared_1 + + +// Initialize vtable(s): Nothing to do for empty vtables +void __nvoc_init_funcTable_RsShared(RsShared *pThis) { + __nvoc_init_funcTable_RsShared_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__RsShared(RsShared *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; // (obj) super + pThis->__nvoc_pbase_RsShared = pThis; // (shr) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Object(&pThis->__nvoc_base_Object); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__RsShared.metadata__Object; // (obj) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__RsShared; // (shr) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_RsShared(pThis); +} + +NV_STATUS __nvoc_objCreate_RsShared(RsShared **ppThis, Dynamic *pParent, NvU32 createFlags) +{ + NV_STATUS status; + Object *pParentObj = NULL; + RsShared *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(RsShared), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(RsShared)); + + pThis->__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__RsShared(pThis); + status = __nvoc_ctor_RsShared(pThis); + if (status != NV_OK) goto __nvoc_objCreate_RsShared_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_RsShared_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(RsShared)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RsShared(RsShared **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_RsShared(ppThis, pParent, createFlags); + + return status; +} + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x830d90 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsSession; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared; + +// Forward declarations for RsSession +void __nvoc_init__RsShared(RsShared*); +void __nvoc_init__RsSession(RsSession*); +void __nvoc_init_funcTable_RsSession(RsSession*); +NV_STATUS __nvoc_ctor_RsSession(RsSession*); +void __nvoc_init_dataField_RsSession(RsSession*); +void __nvoc_dtor_RsSession(RsSession*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__RsSession; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__RsSession; + +// Down-thunk(s) to bridge RsSession methods from ancestors (if any) + +// Up-thunk(s) to bridge RsSession methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_RsSession = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RsSession), + /*classId=*/ classId(RsSession), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RsSession", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RsSession, + /*pCastInfo=*/ &__nvoc_castinfo__RsSession, + /*pExportInfo=*/ &__nvoc_export_info__RsSession +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__RsSession __nvoc_metadata__RsSession = { + .rtti.pClassDef = &__nvoc_class_def_RsSession, // (session) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RsSession, + .rtti.offset = 0, + .metadata__RsShared.rtti.pClassDef = &__nvoc_class_def_RsShared, // (shr) super + .metadata__RsShared.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RsShared.rtti.offset = NV_OFFSETOF(RsSession, __nvoc_base_RsShared), + .metadata__RsShared.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^2 + .metadata__RsShared.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__RsShared.metadata__Object.rtti.offset = NV_OFFSETOF(RsSession, __nvoc_base_RsShared.__nvoc_base_Object), + + .vtable.__sessionRemoveDependant__ = &sessionRemoveDependant_IMPL, // virtual + .vtable.__sessionRemoveDependency__ = &sessionRemoveDependency_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__RsSession = { + .numRelatives = 3, + .relatives = { + &__nvoc_metadata__RsSession.rtti, // [0]: (session) this + &__nvoc_metadata__RsSession.metadata__RsShared.rtti, // [1]: (shr) super + &__nvoc_metadata__RsSession.metadata__RsShared.metadata__Object.rtti, // [2]: (obj) super^2 + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__RsSession = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RsShared(RsShared*); +void __nvoc_dtor_RsSession(RsSession *pThis) { + __nvoc_sessionDestruct(pThis); + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RsSession(RsSession *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RsShared(RsShared* ); +NV_STATUS __nvoc_ctor_RsSession(RsSession *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsShared(&pThis->__nvoc_base_RsShared); + if (status != NV_OK) goto __nvoc_ctor_RsSession_fail_RsShared; + __nvoc_init_dataField_RsSession(pThis); + + status = __nvoc_sessionConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_RsSession_fail__init; + goto __nvoc_ctor_RsSession_exit; // Success + +__nvoc_ctor_RsSession_fail__init: + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); +__nvoc_ctor_RsSession_fail_RsShared: +__nvoc_ctor_RsSession_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_RsSession_1(RsSession *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_RsSession_1 + + +// Initialize vtable(s) for 2 virtual method(s). +void __nvoc_init_funcTable_RsSession(RsSession *pThis) { + __nvoc_init_funcTable_RsSession_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__RsSession(RsSession *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsShared.__nvoc_base_Object; // (obj) super^2 + pThis->__nvoc_pbase_RsShared = &pThis->__nvoc_base_RsShared; // (shr) super + pThis->__nvoc_pbase_RsSession = pThis; // (session) this + + // Recurse to superclass initialization function(s). + __nvoc_init__RsShared(&pThis->__nvoc_base_RsShared); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_RsShared.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__RsSession.metadata__RsShared.metadata__Object; // (obj) super^2 + pThis->__nvoc_base_RsShared.__nvoc_metadata_ptr = &__nvoc_metadata__RsSession.metadata__RsShared; // (shr) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__RsSession; // (session) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_RsSession(pThis); +} + +NV_STATUS __nvoc_objCreate_RsSession(RsSession **ppThis, Dynamic *pParent, NvU32 createFlags) +{ + NV_STATUS status; + Object *pParentObj = NULL; + RsSession *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(RsSession), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(RsSession)); + + pThis->__nvoc_base_RsShared.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsShared.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsShared.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__RsSession(pThis); + status = __nvoc_ctor_RsSession(pThis); + if (status != NV_OK) goto __nvoc_objCreate_RsSession_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_RsSession_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_RsShared.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(RsSession)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RsSession(RsSession **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_RsSession(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_rs_server_nvoc.h b/src/nvidia/generated/g_rs_server_nvoc.h new file mode 100644 index 0000000..666cba9 --- /dev/null +++ b/src/nvidia/generated/g_rs_server_nvoc.h @@ -0,0 +1,1425 @@ + +#ifndef _G_RS_SERVER_NVOC_H_ +#define _G_RS_SERVER_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_rs_server_nvoc.h" + +#ifndef _RS_SERVER_H_ +#define _RS_SERVER_H_ + +#include "nvport/nvport.h" +#include "resserv/resserv.h" +#include "resserv/rs_client.h" +#include "nvoc/object.h" + +#ifdef __cplusplus +extern "C" { +#endif + +enum CLIENT_LOCK_TYPE +{ + CLIENT_LOCK_SPECIFIC, // For locking specific RM clients encoded in the API + CLIENT_LOCK_ALL // For locking all RM clients currently in use +}; + +/** + * @defgroup RsServer + * @addtogroup RsServer + * @{*/ + +/** + * Book-keeping for individual client locks + */ +struct CLIENT_ENTRY +{ + PORT_RWLOCK *pLock; + struct RsClient *pClient; + NvHandle hClient; + NvU64 lockOwnerTid; ///< Thread id of the write lock owner + volatile NvU32 lockReadOwnerCnt; + NvU32 refCount; + NvBool bPendingFree; + ListNode node; + +#if LOCK_VAL_ENABLED + LOCK_VAL_LOCK lockVal; +#endif +}; + +MAKE_INTRUSIVE_LIST(RsClientList, CLIENT_ENTRY, node); +MAKE_LIST(RsLockedClientList, CLIENT_ENTRY*); + +/** + * Base-class for objects that are shared among multiple + * RsResources (including RsResources from other clients) + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_RS_SERVER_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__RsShared; +struct NVOC_METADATA__Object; + + +struct RsShared { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__RsShared *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Object __nvoc_base_Object; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super + struct RsShared *__nvoc_pbase_RsShared; // shr + + // Data members + NvS32 refCount; + struct MapNode node; +}; + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__RsShared { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Object metadata__Object; +}; + +#ifndef __NVOC_CLASS_RsShared_TYPEDEF__ +#define __NVOC_CLASS_RsShared_TYPEDEF__ +typedef struct RsShared RsShared; +#endif /* __NVOC_CLASS_RsShared_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsShared +#define __nvoc_class_id_RsShared 0x830542 +#endif /* __nvoc_class_id_RsShared */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared; + +#define __staticCast_RsShared(pThis) \ + ((pThis)->__nvoc_pbase_RsShared) + +#ifdef __nvoc_rs_server_h_disabled +#define __dynamicCast_RsShared(pThis) ((RsShared*) NULL) +#else //__nvoc_rs_server_h_disabled +#define __dynamicCast_RsShared(pThis) \ + ((RsShared*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RsShared))) +#endif //__nvoc_rs_server_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_RsShared(RsShared**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RsShared(RsShared**, Dynamic*, NvU32); +#define __objCreate_RsShared(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_RsShared((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros + +// Dispatch functions +NV_STATUS shrConstruct_IMPL(struct RsShared *arg_pShared); + +#define __nvoc_shrConstruct(arg_pShared) shrConstruct_IMPL(arg_pShared) +void shrDestruct_IMPL(struct RsShared *pShared); + +#define __nvoc_shrDestruct(pShared) shrDestruct_IMPL(pShared) +#undef PRIVATE_FIELD + +MAKE_INTRUSIVE_MAP(RsSharedMap, RsShared, node); + +/** + * Utility class for objects that can reference + * multiple client handle spaces. Free's and control calls + * that occur on objects which reference an RsSession will + * need to acquire pLock first. + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_RS_SERVER_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__RsSession; +struct NVOC_METADATA__RsShared; +struct NVOC_VTABLE__RsSession; + + +struct RsSession { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__RsSession *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct RsShared __nvoc_base_RsShared; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^2 + struct RsShared *__nvoc_pbase_RsShared; // shr super + struct RsSession *__nvoc_pbase_RsSession; // session + + // Data members + PORT_RWLOCK *pLock; + NvBool bValid; + RsResourceRefList dependencies; + RsResourceRefList dependants; +}; + + +// Vtable with 2 per-class function pointers +struct NVOC_VTABLE__RsSession { + void (*__sessionRemoveDependant__)(struct RsSession * /*this*/, struct RsResourceRef *); // virtual + void (*__sessionRemoveDependency__)(struct RsSession * /*this*/, struct RsResourceRef *); // virtual +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__RsSession { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__RsShared metadata__RsShared; + const struct NVOC_VTABLE__RsSession vtable; +}; + +#ifndef __NVOC_CLASS_RsSession_TYPEDEF__ +#define __NVOC_CLASS_RsSession_TYPEDEF__ +typedef struct RsSession RsSession; +#endif /* __NVOC_CLASS_RsSession_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsSession +#define __nvoc_class_id_RsSession 0x830d90 +#endif /* __nvoc_class_id_RsSession */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsSession; + +#define __staticCast_RsSession(pThis) \ + ((pThis)->__nvoc_pbase_RsSession) + +#ifdef __nvoc_rs_server_h_disabled +#define __dynamicCast_RsSession(pThis) ((RsSession*) NULL) +#else //__nvoc_rs_server_h_disabled +#define __dynamicCast_RsSession(pThis) \ + ((RsSession*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RsSession))) +#endif //__nvoc_rs_server_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_RsSession(RsSession**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RsSession(RsSession**, Dynamic*, NvU32); +#define __objCreate_RsSession(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_RsSession((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros +#define sessionRemoveDependant_FNPTR(pSession) pSession->__nvoc_metadata_ptr->vtable.__sessionRemoveDependant__ +#define sessionRemoveDependant(pSession, pResourceRef) sessionRemoveDependant_DISPATCH(pSession, pResourceRef) +#define sessionRemoveDependency_FNPTR(pSession) pSession->__nvoc_metadata_ptr->vtable.__sessionRemoveDependency__ +#define sessionRemoveDependency(pSession, pResourceRef) sessionRemoveDependency_DISPATCH(pSession, pResourceRef) + +// Dispatch functions +static inline void sessionRemoveDependant_DISPATCH(struct RsSession *pSession, struct RsResourceRef *pResourceRef) { + pSession->__nvoc_metadata_ptr->vtable.__sessionRemoveDependant__(pSession, pResourceRef); +} + +static inline void sessionRemoveDependency_DISPATCH(struct RsSession *pSession, struct RsResourceRef *pResourceRef) { + pSession->__nvoc_metadata_ptr->vtable.__sessionRemoveDependency__(pSession, pResourceRef); +} + +void sessionRemoveDependant_IMPL(struct RsSession *pSession, struct RsResourceRef *pResourceRef); + +void sessionRemoveDependency_IMPL(struct RsSession *pSession, struct RsResourceRef *pResourceRef); + +NV_STATUS sessionConstruct_IMPL(struct RsSession *arg_pSession); + +#define __nvoc_sessionConstruct(arg_pSession) sessionConstruct_IMPL(arg_pSession) +void sessionDestruct_IMPL(struct RsSession *pSession); + +#define __nvoc_sessionDestruct(pSession) sessionDestruct_IMPL(pSession) +NV_STATUS sessionAddDependant_IMPL(struct RsSession *pSession, struct RsResourceRef *pResourceRef); + +#ifdef __nvoc_rs_server_h_disabled +static inline NV_STATUS sessionAddDependant(struct RsSession *pSession, struct RsResourceRef *pResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsSession was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_server_h_disabled +#define sessionAddDependant(pSession, pResourceRef) sessionAddDependant_IMPL(pSession, pResourceRef) +#endif //__nvoc_rs_server_h_disabled + +NV_STATUS sessionAddDependency_IMPL(struct RsSession *pSession, struct RsResourceRef *pResourceRef); + +#ifdef __nvoc_rs_server_h_disabled +static inline NV_STATUS sessionAddDependency(struct RsSession *pSession, struct RsResourceRef *pResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsSession was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_server_h_disabled +#define sessionAddDependency(pSession, pResourceRef) sessionAddDependency_IMPL(pSession, pResourceRef) +#endif //__nvoc_rs_server_h_disabled + +NV_STATUS sessionCheckLocksForAdd_IMPL(struct RsSession *pSession, struct RsResourceRef *pResourceRef); + +#ifdef __nvoc_rs_server_h_disabled +static inline NV_STATUS sessionCheckLocksForAdd(struct RsSession *pSession, struct RsResourceRef *pResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsSession was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_server_h_disabled +#define sessionCheckLocksForAdd(pSession, pResourceRef) sessionCheckLocksForAdd_IMPL(pSession, pResourceRef) +#endif //__nvoc_rs_server_h_disabled + +void sessionCheckLocksForRemove_IMPL(struct RsSession *pSession, struct RsResourceRef *pResourceRef); + +#ifdef __nvoc_rs_server_h_disabled +static inline void sessionCheckLocksForRemove(struct RsSession *pSession, struct RsResourceRef *pResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsSession was disabled!"); +} +#else //__nvoc_rs_server_h_disabled +#define sessionCheckLocksForRemove(pSession, pResourceRef) sessionCheckLocksForRemove_IMPL(pSession, pResourceRef) +#endif //__nvoc_rs_server_h_disabled + +#undef PRIVATE_FIELD + + +// Iterator data structure to save state while walking through a map +struct RS_SHARE_ITERATOR +{ + RsSharedMapIter mapIt; + NvU32 internalClassId; + struct RsShared *pShared; ///< Share that is being iterated over +}; + +/** + * Top-level structure that RMAPI and RM interface with + * + * This class is all that needs to be allocated to use the resource server + * library. + * + * The RsServer interface should be kept as narrow as possible. Map and + * MapTo are added because <1> the unmap variants operate in addresses and not + * handles and <2> having explicit knowledge of map operations in the server is + * helpful when dealing with multiple levels of address spaces (e.g., guest + * user-mode, guest kernel-mode, host kernel-mode). + */ +struct RsServer +{ + /** + * Privilege level determines what objects a server is allowed to allocate, and + * also determines whether additional handle validation needs to be performed. + */ + RS_PRIV_LEVEL privilegeLevel; + + RsClientList *pClientSortedList; ///< Bucket if linked List of clients (and their locks) owned by this server + NvU32 clientCurrentHandleIndex; + + NvBool bConstructed; ///< Determines whether the server is ready to be used + PORT_MEM_ALLOCATOR *pAllocator; ///< Allocator to use for all objects allocated by the server + + PORT_SPINLOCK *pClientListLock; ///< Lock that needs to be taken when accessing the client list + + PORT_SPINLOCK *pShareMapLock; ///< Lock that needs to be taken when accessing the shared resource map + RsSharedMap shareMap; ///< Map of shared resources + +#if (RS_STANDALONE) + NvU64 topLockOwnerTid; ///< Thread id of top-lock owner + PORT_RWLOCK *pTopLock; ///< Top-level resource server lock + PORT_RWLOCK *pResLock; ///< Resource-level resource server lock +#if LOCK_VAL_ENABLED + LOCK_VAL_LOCK topLockVal; + LOCK_VAL_LOCK resLockVal; +#endif +#endif + + /// Print out a list of all resources that will be freed when a free request is made + NvBool bDebugFreeList; + + /// If true, control call param copies will be performed outside the top/api lock + NvBool bUnlockedParamCopy; + + // If true, calls annotated with ROUTE_TO_PHYISCAL will not grab global gpu locks + // (and the readonly API lock). + NvBool bRouteToPhysicalLockBypass; + + /** + * Setting this flag to false disables any attempts to + * automatically acquire access rights or to control access to resources by + * checking for access rights. + */ + NvBool bRsAccessEnabled; + + /** + * Set to thread ID of the thread that locked all clients. + */ + NvU64 allClientLockOwnerTid; + + /** + * Mask of interfaces (RS_API_*) that will use a read-only top lock by default + */ + NvU32 roTopLockApiMask; + + /// Share policies which clients default to when no other policies are used + RsShareList defaultInheritedSharePolicyList; + /// Share policies to apply to all shares, regardless of other policies + RsShareList globalInternalSharePolicyList; + + NvU32 internalHandleBase; + NvU32 clientHandleBase; + + NvU32 activeClientCount; + NvU64 activeResourceCount; + + /// List of clients that are de-activated and pending free + RsDisabledClientList disabledClientList; + struct RsClient *pNextDisabledClient; + PORT_SPINLOCK *pDisabledClientListLock; + + /** + * List of client entries locked by serverLockAllClients + * This list is required for locking all clients in order to avoid races with + * other paths creating/destroying paths in parallel WITHOUT holding the API lock. + * Ideally, there shouldn't be any other such paths but the RTD3/PM path does do + * this. CORERM-6052 tracks investigating that and potentially fixing the locking + * there. + */ + RsLockedClientList lockedClientList; +}; + +/** + * Construct a server instance. This must be performed before any other server + * operation. + * + * @param[in] pServer This server instance + * @param[in] privilegeLevel Privilege level for this resource server instance + * @param[in] maxDomains Maximum number of domains to support, or 0 for the default + */ +NV_STATUS serverConstruct(RsServer *pServer, RS_PRIV_LEVEL privilegeLevel, NvU32 maxDomains); + +/** + * Destroy a server instance. Destructing a server does not guarantee that child domains + * and clients will be appropriately freed. serverFreeDomain should be explicitly called + * on all allocated domains to ensure all clients and resources get cleaned up. + * + * @param[in] pServer This server instance + */ +NV_STATUS serverDestruct(RsServer *pServer); + +/** + * Allocate a domain handle. Domain handles are used to track clients created by a domain. + * + * @param[in] pServer This server instance + * @param[in] hParentDomain + * @param[in] pAccessControl + * @param[out] phDomain + * + */ +NV_STATUS serverAllocDomain(RsServer *pServer, NvU32 hParentDomain, ACCESS_CONTROL *pAccessControl, NvHandle *phDomain); + +/** + * Verify that the calling user is allowed to perform the access. This check only + * applies to calls from RING_USER or RING_KERNEL. No check is performed in + * RING_HOST. + * + * @param[in] pServer This server instance + * @param[in] hDomain + * @param[in] hClient + * + */ +NV_STATUS serverValidate(RsServer *pServer, NvU32 hDomain, NvHandle hClient); + +/** + * Verify that the domain has sufficient permission to allocate the given class. + * @param[in] pServer + * @param[in] hDomain + * @param[in] externalClassId External resource class id + */ +NV_STATUS serverValidateAlloc(RsServer *pServer, NvU32 hDomain, NvU32 externalClassId); + +/** + * Free a domain handle. All clients of this domain will be freed. + * + * @param[in] pServer This server instance + * @param[in] hDomain The handle of the domain to free + */ +NV_STATUS serverFreeDomain(RsServer *pServer, NvHandle hDomain); + +/** + * Allocate a client handle. A client handle is required to allocate resources. + * + * @param[in] pServer This server instance + * @param[inout] pParams Client allocation parameters + */ +NV_STATUS serverAllocClient(RsServer *pServer, RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + +/** + * Free a client handle. All resources references owned by the client will be + * freed. + * + * It is invalid to attempt to free a client from a user other than the one + * that allocated it. + * + * @param[in] pServer This server instance + * @param[in] pParams Client free params + */ +NV_STATUS serverFreeClient(RsServer *pServer, RS_CLIENT_FREE_PARAMS* pParams); + +/** + * Mark a list of client handles as disabled. All CPU mappings owned by that + * client will be unmapped immediate, and the client will be marked as disabled. + * A call to @ref serverFreeDisabledClients will then free all such clients. + * + * It is invalid to attempt to free a client from a user other than the one + * that allocated it. + * + * @param[in] pServer This server instance + * @param[in] phClientList The list of client handles to disable + * @param[in] numClients The number of clients in the list + * @param[in] freeState User-defined free state + * @param[in] pSecInfo Security Info + * + */ +NV_STATUS serverMarkClientListDisabled(RsServer *pServer, NvHandle *phClientList, NvU32 numClients, NvU32 freeState, API_SECURITY_INFO *pSecInfo); + +/** + * Frees all currently disabled clients. All resources references owned by + * any of the clients will be freed. + * All priority resources will be freed first across all listed clients. + * + * NOTE: may return NV_WARN_MORE_PROCESSING_REQUIRED if not all clients were freed + * + * @param[in] pServer This server instance + * @param[in] freeState User-defined free state + * @param[in] limit Max number of iterations to make returning; 0 means no limit + * + */ +NV_STATUS serverFreeDisabledClients(RsServer *pServer, NvU32 freeState, NvU32 limit); + +/** + * Allocate a resource. + * + * It is invalid to attempt to allocate a client from a user other than the one + * that allocated it. + * + * @param[in] pServer This server instance + * @param[inout] pParams The allocation parameters + */ +NV_STATUS serverAllocResource(RsServer *pServer, RS_RES_ALLOC_PARAMS *params); + +/** + * Allocate a ref-counted resource share. + * + * @param[in] pServer + * @param[in] pClassInfo NVOC class info for the shared class (must derive from RsShared) + * @param[out] ppShare Allocated share + */ +NV_STATUS serverAllocShare(RsServer *pServer, const NVOC_CLASS_INFO* pClassInfo, struct RsShared **ppShare); + +/** + * Allocate a ref-counted resource share with Halspec parent. + * + * @param[in] pServer + * @param[in] pClassInfo NVOC class info for the shared class (must derive from RsShared) + * @param[out] ppShare Allocated share + * @param[in] pHalspecParent Parent object whose Halspec can be used for the shared class object + */ +NV_STATUS serverAllocShareWithHalspecParent(RsServer *pServer, const NVOC_CLASS_INFO* pClassInfo, struct RsShared **ppShare, struct Object *pHalspecParent); + +/** + * Get the ref-count of a resource share. + * + * @param[in] pServer + * @param[in] pShare Resource share + */ +NvS32 serverGetShareRefCount(RsServer *pServer, struct RsShared *pShare); + +/** + * Increment the ref-count of a resource share. + * + * @param[in] pServer + * @param[in] pShare Resource share + */ +NV_STATUS serverRefShare(RsServer *pServer, struct RsShared *pShare); + +/** + * Decrement the ref-count of a resource share. If the ref-count + * has reached zero, the resource share will be freed. + * + * @param[in] pServer + * @param[in] pShare Resource share + */ +NV_STATUS serverFreeShare(RsServer *pServer, struct RsShared *pShare); + +/** + * Get an iterator to the elements in the server's shared object map + * @param[in] pServer + * @param[in] internalClassId If non-zero, only RsShared that are (or can be + * derived from) the specified class will be returned + */ +RS_SHARE_ITERATOR serverShareIter(RsServer *pServer, NvU32 internalClassId); + +/** + * Get an iterator to the elements in the server's shared object map + */ +NvBool serverShareIterNext(RS_SHARE_ITERATOR*); + +/** + * Set fixed client handle base in case clients wants to use a different + * base for client allocations + * @param[in] pServer + * @param[in] clientHandleBase + */ +NV_STATUS serverSetClientHandleBase(RsServer *pServer, NvU32 clientHandleBase); + +/** + * Deserialize parameters for servicing command + * + * @param[in] pCallContext + * @param[in] cmd + * @param[in/out] ppParams + * @param[in/out] pParamsSize + * @param[in] flags + */ +NV_STATUS serverDeserializeCtrlDown(CALL_CONTEXT *pCallContext, NvU32 cmd, void **ppParams, NvU32 *pParamsSize, NvU32 *flags); + +/** + * Serialize parameters for servicing command + * + * @param[in] pCallContext + * @param[in] cmd + * @param[in/out] ppParams + * @param[in/out] pParamsSize + * @param[in] flags + */ +NV_STATUS serverSerializeCtrlDown(CALL_CONTEXT *pCallContext, NvU32 cmd, void **ppParams, NvU32 *pParamsSize, NvU32 *flags); + +/** + * Deserialize parameters for returning from command + * + * @param[in] pCallContext + * @param[in] cmd + * @param[out] ppParams + * @param[out] pParamsSize + * @param[in] flags + */ +NV_STATUS serverDeserializeCtrlUp(CALL_CONTEXT *pCallContext, NvU32 cmd, void **ppParams, NvU32 *pParamsSize, NvU32 *flags); + +/** + * Serialize parameters for returning from command + * + * @param[in] pCallContext + * @param[in] cmd + * @param[out] ppParams + * @param[out] pParamsSize + * @param[in] flags + */ +NV_STATUS serverSerializeCtrlUp(CALL_CONTEXT *pCallContext, NvU32 cmd, void **ppParams, NvU32 *pParamsSize, NvU32 *flags); + +/** + * Unset flag for reserializing control before going to GSP + * Used if kernel control servicing passes params to GSP without changing them + * + * @param[in] pCallContext + */ +void serverDisableReserializeControl(CALL_CONTEXT *pCallContext); + +/** + * Serialize parameters for allocating + * + * @param[in] pCallContext + * @param[in] classId + * @param[in/out] ppParams + * @param[out] pParamsSize + * @param[in] flags + */ +NV_STATUS serverSerializeAllocDown(CALL_CONTEXT *pCallContext, NvU32 classId, void **ppParams, NvU32 *pParamsSize, NvU32 *flags); + +/** + * Deserialize parameters for allocating + * + * @param[in] pCallContext + * @param[in] classId + * @param[in/out] ppParams + * @param[in/out] pParamsSize + * @param[in] flags + */ +NV_STATUS serverDeserializeAllocDown(CALL_CONTEXT *pCallContext, NvU32 classId, void **ppParams, NvU32 *pParamsSize, NvU32 *flags); + +/** + * Serialize parameters for returning from allocating + * + * @param[in] pCallContext + * @param[in] classId + * @param[out] ppParams + * @param[out] pParamsSize + * @param[in] flags + */ +NV_STATUS serverSerializeAllocUp(CALL_CONTEXT *pCallContext, NvU32 classId, void **ppParams, NvU32 *pParamsSize, NvU32 *flags); + +/** + * Deserialize parameters for returning from allocating + * + * @param[in] pCallContext + * @param[in] classId + * @param[out] ppParams + * @param[out] pParamsSize + * @param[in] flags + */ +NV_STATUS serverDeserializeAllocUp(CALL_CONTEXT *pCallContext, NvU32 classId, void **ppParams, NvU32 *pParamsSize, NvU32 *flags); + +/** + * Free finn structures allocated for serializing/deserializing + * + * @param[in] pCallContext + * @param[in] pParams + */ +void serverFreeSerializeStructures(CALL_CONTEXT *pCallContext, void *pParams); + +/** + * Return an available client handle for new client allocation + * + * @param[in] pServer This server instance + * @param[in] bInternalHandle Client is an RM internal client + * @param[in] pSecInfo Security context of this client allocation + */ +extern NvU32 serverAllocClientHandleBase(RsServer *pServer, NvBool bInternalHandle, API_SECURITY_INFO *pSecInfo); + +/** + * Allocate a resource. Assumes top-level lock has been taken. + * + * It is invalid to attempt to allocate a client from a user other than the one + * that allocated it. User-implemented. + * + * @param[in] pServer This server instance + * @param[inout] pParams The allocation parameters + */ +extern NV_STATUS serverAllocResourceUnderLock(RsServer *pServer, RS_RES_ALLOC_PARAMS *pAllocParams); + +/** + * Call Free RPC for given resource. Assumes top-level lock has been taken. + * + * @param[in] pServer This server instance + * @param[inout] pFreeParams The Free parameters + */ +extern NV_STATUS serverFreeResourceRpcUnderLock(RsServer *pServer, RS_RES_FREE_PARAMS *pFreeParams); + +/** + * Copy-in parameters supplied by caller, and initialize API state. User-implemented. + * @param[in] pServer + * @param[in] pAllocParams Resource allocation parameters + * @param[out] ppApiState User-defined API_STATE; should be allocated by this function + */ +extern NV_STATUS serverAllocApiCopyIn(RsServer *pServer, RS_RES_ALLOC_PARAMS_INTERNAL *pAllocParams, API_STATE **ppApiState); + +/** + * Copy-out parameters supplied by caller, and release API state. User-implemented. + * @param[in] pServer + * @param[in] status Status of allocation request + * @param[in] pApiState API_STATE for the allocation + */ +extern NV_STATUS serverAllocApiCopyOut(RsServer *pServer, NV_STATUS status, API_STATE *pApiState); + +/** + * Obtain a second client handle to lock if required for the allocation. + * @param[in] externalClassId External class ID of resource + * @param[in] pAllocParams Class-specific allocation parameters + * @param[out] phSecondClient Second client handle to lock on success + * + * @return NV_OK on success + NV_ERR_INVALID_STATE if allocation is incorrectly configured with RS_FLAGS_DUAL_CLIENT_LOCK without having updated this function. + */ +extern NV_STATUS serverAllocLookupSecondClient(NvU32 externalClassId, void *pAllocParams, NvHandle *phSecondClient); + +/** + * Obtain a second client handle to lock if required for the control (DISCOURAGED). + * @param[in] cmd Control call ID + * @param[in] pControlParams Control-specific parameters + * @param[in] pCookie Control call cookie to check flags for + * @param[out] phSecondClient Second client handle to lock on success + * + * @return NV_OK on success + NV_ERR_INVALID_STATE if allocation is incorrectly configured with RMCTRL_FLAGS_DUAL_CLIENT_LOCK without having updated this function. + */ +extern NV_STATUS serverControlLookupSecondClient(NvU32 cmd, void *pControlParams, RS_CONTROL_COOKIE *pCookie, NvHandle *phSecondClient); + +/** + * Acquires a top-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Output flags indicating the locks that need to be released + */ +extern NV_STATUS serverTopLock_Prologue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Releases a top-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +extern void serverTopLock_Epilogue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Acquires a session lock. + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[in] pResourceRef Resource reference to take session locks on + * @param[inout] pLockInfo Lock state + */ +extern NV_STATUS serverSessionLock_Prologue(LOCK_ACCESS_TYPE access, RsResourceRef *pResourceRef, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Releases a session lock. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +extern void serverSessionLock_Epilogue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Acquires a resource-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Output flags indicating the locks that need to be released + * @param[in] gpuMask Bitmask of additional GPUs to lock + */ +extern NV_STATUS serverResLock_Prologue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags, NvU32 gpuMask); + +/** + * Releases a resource-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +extern void serverResLock_Epilogue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Acquire the client list lock. The caller is responsible for + * ensuring that lock ordering is not violated (otherwise there can be + * deadlock): the client list lock must always be released without acquiring any + * subsequent locks. + * + * @param[in] pServer This server instance + */ +void serverAcquireClientListLock(RsServer *pServer); + +/** + * Release the client list lock. + * + * @param[in] pServer This server instance + */ +void serverReleaseClientListLock(RsServer *pServer); + +/** + * WAR for additional tasks that must be performed after resource-level locks are released. User-implemented. + * @param[inout] status Allocation status + * @param[in] bClientAlloc Caller is attempting to allocate a client + * @param[inout] pParams Allocation parameters + */ +extern NV_STATUS serverAllocEpilogue_WAR(RsServer *pServer, NV_STATUS status, NvBool bClientAlloc, RS_RES_ALLOC_PARAMS_INTERNAL *pAllocParams); + +/** + * Free a resource reference and all of its descendants. This will decrease the + * resource's reference count. The resource itself will only be freed if there + * are no more references to it. + * + * It is invalid to attempt to free a resource from a user other than the one that allocated it. + * + * @param[in] pServer This server instance + * @param[in] pParams Free parameters + */ +NV_STATUS serverFreeResourceTree(RsServer *pServer, RS_RES_FREE_PARAMS *pParams); + +/** + * Same as serverFreeResourceTree except the top-level lock is assumed to have been taken. + * + * @param[in] pServer This server instance + * @param[in] pParams Free parameters + */ +NV_STATUS serverFreeResourceTreeUnderLock(RsServer *pServer, RS_RES_FREE_PARAMS *pParams); + +/** + * Updates the lock flags in the dup parameters + * + * @param[in] pServer This server instance + * @param[in] pParams Dup parameters + */ +extern NV_STATUS serverUpdateLockFlagsForCopy(RsServer *pServer, RS_RES_DUP_PARAMS *pParams); + +/** + * Updates the lock flags in the free parameters + * + * @param[in] pServer This server instance + * @param[in] pParams Free parameters + */ +extern NV_STATUS serverUpdateLockFlagsForFree(RsServer *pServer, RS_RES_FREE_PARAMS *pParams); + +/** + * Updates the lock flags for automatic inter-unmap during free + * + * @param[in] pServer This server instance + * @param[inout] pParams Unmap params, contained pLockInfo will be modified + */ +extern NV_STATUS serverUpdateLockFlagsForInterAutoUnmap(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pParams); + +/** + * Initialize parameters for a recursive call to serverFreeResourceTree. User-implemented. + * @param[in] hClient + * @param[in] hResource + * @param[inout] pParams + */ +extern NV_STATUS serverInitFreeParams_Recursive(NvHandle hClient, NvHandle hResource, RS_LOCK_INFO *pLockInfo, RS_RES_FREE_PARAMS *pParams); + +/** + * Common operations performed after top locks and client locks are taken, but before + * the control call is executed. This includes validating the control call cookie, + * looking up locking flags, parameter copy-in, and taking resource locks. + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[in] pAccess Lock access type + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + */ +NV_STATUS serverControl_Prologue(RsServer *pServer, RS_RES_CONTROL_PARAMS_INTERNAL *pParams, LOCK_ACCESS_TYPE *pAccess, NvU32 *pReleaseFlags); + +/** + * Common operations performed after the control call is executed. This + * includes releasing locks and parameter copy-out. + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[in] pAccess Lock access type + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + * @param[in] status Control call status + */ +NV_STATUS serverControl_Epilogue(RsServer *pServer, RS_RES_CONTROL_PARAMS_INTERNAL *pParams, LOCK_ACCESS_TYPE access, NvU32 *pReleaseFlags, NV_STATUS status); + +/** + * Initialize a NVOC export control call cookie + * + * @param[in] pExportedEntry + * @param[inout] pCookie + */ +extern void serverControl_InitCookie(const struct NVOC_EXPORTED_METHOD_DEF *pExportedEntry, RS_CONTROL_COOKIE *pCookie); + +/** + * Validate a NVOC export control call cookie + * + * @param[in] pParams + * @param[inout] pCookie + */ +extern NV_STATUS serverControl_ValidateCookie(RS_RES_CONTROL_PARAMS_INTERNAL *pParams, RS_CONTROL_COOKIE *pCookie); + +/** + * Copy-in control call parameters + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[inout] pCookie Control call cookie + */ +extern NV_STATUS serverControlApiCopyIn(RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie); + +/** + * Copy-out control call parameters + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[inout] pCookie Control call cookie + */ +extern NV_STATUS serverControlApiCopyOut(RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie, + NV_STATUS rmStatus); + +/** + * Determine whether an API supports a read-only lock for a given lock + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] api RS_API* + */ +NvBool serverSupportsReadOnlyLock(RsServer *pServer, RS_LOCK_ENUM lock, RS_API_ENUM api); + +/** + * Determine whether the current thread has taken the RW API lock + * @param[in] pServer ResServ instance + */ +extern NvBool serverRwApiLockIsOwner(RsServer *pServer); + +/** + * Lookup locking flags for a resource alloc + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Allocation parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverAllocResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess); +/** + * + * Lookup level locking flags for a resource free + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Allocation parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverFreeResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_FREE_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess, + NvBool *pbSupportForceROLock); + +/** + * Lookup locking flags for a resource copy + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Allocation parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverCopyResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_DUP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup locking flags for a resource access share + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Share parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverShareResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_SHARE_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup locking flags for a control call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Control call parameters + * @param[in] pCookie Control call cookie + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverControlLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup client locking flags for a control call + * + * @param[in] pCookie Control call cookie + * @param[out] pClientLockType Client lock type + */ +extern NV_STATUS serverControlLookupClientLockFlags(RS_CONTROL_COOKIE *pCookie, + enum CLIENT_LOCK_TYPE *pClientLockType); + +/** + * + * Lookup locking flags for a map call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams CPU map parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverMapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup locking flags for an unmap call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams CPU unmap parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverUnmapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * + * Lookup locking flags for an inter-resource map call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Inter-resource map parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverInterMapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * + * Lookup locking flags for an inter-resource unmap call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Inter-resource unmap parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverInterUnmapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Fill the server's share policy lists with any default or global policies needed + */ +extern NV_STATUS serverInitGlobalSharePolicies(RsServer *pServer); + +/** + * Issue a control command to a resource + * + * @param[in] pServer This server instance + * @param[in] pParams Control parameters + */ +NV_STATUS serverControl(RsServer *pServer, RS_RES_CONTROL_PARAMS *pParams); + +/** + * Copy a resource owned by one client into another client. + * + * The clients must be in the same client handle space. The underlying + * resource is not duplicated, but it is refcounted so the resource will + * not be freed until the reference count hits zero. + * + * Copying a resource will fail if the user making the call does not own + * the source client. + * + * @param[in] pServer This server instance + * @param[inout] pParams Resource sharing parameters + */ +NV_STATUS serverCopyResource(RsServer *pServer, RS_RES_DUP_PARAMS *pParams); + +/** + * Share certain access rights to a resource with other clients using the provided share policy + * + * The policy entry passed in will be added to the object's share policy list. + * If the bRevoke is true, the policy will be removed instead. + * + * Sharing will fail if the user making the call does not own the source client. + * + * @param[in] pServer This server instance + * @param[in] pParams Resource sharing parameters + */ +NV_STATUS serverShareResourceAccess(RsServer *pServer, RS_RES_SHARE_PARAMS *pParams); + +/** + * Creates a CPU mapping of the resource in the virtual address space of the process. + * + * Not all resources support mapping. + * + * @param[in] pServer This server instance + * @param[in] hClient Client handle of the resource to map + * @param[in] hResource Handle of the resource to map + * @param[inout] pParams CPU mapping parameters + */ +NV_STATUS serverMap(RsServer *pServer, NvHandle hClient, NvHandle hResource, RS_CPU_MAP_PARAMS *pParams); + +/** + * Release a CPU virtual address unmapping + * + * @param[in] pServer This server instance + * @param[in] hClient Client handle of the resource to map + * @param[in] hResource Handle of the resource to map + * @param[in] pParams CPU unmapping parameters + */ +NV_STATUS serverUnmap(RsServer *pServer, NvHandle hClient, NvHandle hResource, RS_CPU_UNMAP_PARAMS *pParams); + +/** + * Pre-map operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +NV_STATUS serverMap_Prologue(RsServer *pServer, RS_CPU_MAP_PARAMS *pMapParams); + +/** + * Post-map operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +void serverMap_Epilogue(RsServer *pServer, RS_CPU_MAP_PARAMS *pMapParams); + +/** + * Pre-unmap operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +NV_STATUS serverUnmap_Prologue(RsServer *pServer, RS_CPU_UNMAP_PARAMS *pUnmapParams); + +/** + * Post-unmap operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +void serverUnmap_Epilogue(RsServer *pServer, RS_CPU_UNMAP_PARAMS *pUnmapParams); + +/** + * Creates an inter-mapping between two resources + * + * Not all resources support mapping. + * + * @param[in] pServer This server instance + * @param[inout] pParams mapping parameters + */ +NV_STATUS serverInterMap(RsServer *pServer, RS_INTER_MAP_PARAMS *pParams); + +/** + * Release an inter-mapping between two resources + * + * @param[in] pServer This server instance + * @param[in] pParams unmapping parameters + */ +NV_STATUS serverInterUnmap(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pParams); + +/** + * Pre-inter-map operations. Called with top/client locks acquired. + * This function acquires resource locks. + * + * @param[in] pServer + * @param[in] pMapperRef The resource that can be used to create the mapping + * @param[in] pMappableRef The resource that can be mapped + * @param[inout] pMapParams mapping parameters + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + */ +NV_STATUS serverInterMap_Prologue(RsServer *pServer, RsResourceRef *pMapperRef, RsResourceRef *pMappableRef, RS_INTER_MAP_PARAMS *pMapParams, NvU32 *pReleaseFlags); + +/** + * Post-inter-map operations. Called with top, client, and resource locks acquired. + * This function releases resource locks. + * + * @param[in] pServer + * @param[inout] pMapParams mapping parameters + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + */ +void serverInterMap_Epilogue(RsServer *pServer, RS_INTER_MAP_PARAMS *pMapParams, NvU32 *pReleaseFlags); + +/** + * Pre-inter-unmap operations. Called with top, client, and resource locks acquired. + * + * @param[in] pServer + * @param[inout] pParams mapping parameters + */ +NV_STATUS serverInterUnmap_Prologue(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pUnmapParams); + +/** + * Post-inter-unmap operations. Called with top, client, and resource locks acquired. + * + * @param[in] pServer + * @param[inout] pParams mapping parameters + */ +void serverInterUnmap_Epilogue(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pUnmapParams); + +/** + * Acquire a client pointer from a client handle. The caller is responsible for + * ensuring that lock ordering is not violated (otherwise there can be + * deadlock): clients must be locked in increasing order of client index (not + * handle). + * + * @param[in] pServer This server instance + * @param[in] hClient The client to acquire + * @param[in] lockAccess LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[out] ppClientEntry Pointer to the CLIENT_ENTRY + */ +NV_STATUS serverAcquireClient(RsServer *pServer, NvHandle hClient, LOCK_ACCESS_TYPE lockAccess, CLIENT_ENTRY **ppClientEntry); + +/** + * Release a client pointer + * + * @param[in] pServer This server instance + * @param[in] lockAccess LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[in] pClientEntry Pointer to the CLIENT_ENTRY + */ +void serverReleaseClient(RsServer *pServer, LOCK_ACCESS_TYPE lockAccess, CLIENT_ENTRY *pClientEntry); + +/** + * Test is a client handle is currently locked for LOCK_ACCESS_WRITE or not. + * + * @param[in] pServer This server instance + * @param[in] hClient The client to acquire + */ +NvBool serverIsClientLocked(RsServer *pServer, NvHandle hClient); + +/** + * Test if a client handle is internal or not + * + * @param[in] hClient The client handle to test + */ +NvBool serverIsClientInternal(RsServer *pServer, NvHandle hClient); + +/** + * Lock all clients currently in use. While this function will lock the client handles + * in the correct order, the caller is responsible for ensuring that lock ordering + * is not violated (otherwise there can be a deadlock) with respect to other types + * of locks. NOTE that this CANNOT be called when already holding one or more client + * locks! + * + * @param[in] pServer This server instance + */ +NV_STATUS serverLockAllClients(RsServer *pServer); + +/** + * Release locks on all clients. + * + * @param[in] pServer This server instance + */ +NV_STATUS serverUnlockAllClients(RsServer *pServer); + +/** + * Check if we locked all clients + * + * @param[in] pServer This server instance + */ +static NV_INLINE NvBool serverAllClientsLockIsOwner(RsServer *pServer) +{ + return (pServer->allClientLockOwnerTid == portThreadGetCurrentThreadId()); +} + +/** + * Get a client pointer from a client handle without taking any locks. + * + * @param[in] pServer This server instance + * @param[in] hClient The client to acquire + * @param[out] ppClient Pointer to the RsClient + */ +NV_STATUS serverGetClientUnderLock(RsServer *pServer, NvHandle hClient, struct RsClient **ppClient); + +/** + * Get the count of clients allocated under this resource server + * + * @param[in] pServer This server instance + */ +NvU32 serverGetClientCount(RsServer *pServer); + +/** + * Get the count of resources allocated under this resource server + * + * @param[in] pServer This server instance + */ +NvU64 serverGetResourceCount(RsServer *pServer); + +/** + * Swap a TLS call context entry and increment the TLS entry refcount. + * A new TLS entry for call context will be allocated if necessary. + * + * @note This should be paired with a corresponding resservRestoreTlsCallContext call + */ +NV_STATUS resservSwapTlsCallContext(CALL_CONTEXT **ppOldCallContext, CALL_CONTEXT *pNewCallContext); + +/** + * Get the current TLS call context. This will not increment a refcount on the TLS entry. + */ +CALL_CONTEXT *resservGetTlsCallContext(void); + +/** + * Set a TLS call context entry and decrement the TLS entry refcount. + * @note This should be paired with a corresponding resservSwapTlsCallContext call + */ +NV_STATUS resservRestoreTlsCallContext(CALL_CONTEXT *pOldCallContext); + +/** + * Find a resource reference of a given type from the TLS call context + * @param[in] internalClassId Only return a reference if it matches this type + * @param[in] bSearchAncestors Search parents of the call context resource ref + */ +RsResourceRef *resservGetContextRefByType(NvU32 internalClassId, NvBool bSearchAncestors); + +/** + * Test if a client handle is currently locked for LOCK_ACCESS_READ or not. + * The caller must hold the client lock in either mode to acquire an accurate + * result. Callers without the client list lock are subject to race conditions. + * + * @param[out] pClientEntry Pointer to the CLIENT_ENTRY + */ +NvBool serverIsClientLockedForRead(CLIENT_ENTRY* pClientEntry); + +#ifdef __cplusplus +} +#endif + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_RS_SERVER_NVOC_H_ diff --git a/src/nvidia/generated/g_sdk-structures.h b/src/nvidia/generated/g_sdk-structures.h new file mode 100644 index 0000000..ce04f64 --- /dev/null +++ b/src/nvidia/generated/g_sdk-structures.h @@ -0,0 +1,62 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * WARNING: This is an autogenerated file. DO NOT EDIT. + * This file is generated using below files: + * template file: inc/kernel/vgpu/gt_sdk-structures.h + * definition file: inc/kernel/vgpu/sdk-structures.def + */ + + +#ifdef SDK_STRUCTURES +// These are copy of sdk structures, that will be used for the communication between the vmioplugin & guest RM. +typedef struct NVOS00_PARAMETERS_v03_00 +{ + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectOld; + NvV32 status; +} NVOS00_PARAMETERS_v03_00; + +typedef NVOS00_PARAMETERS_v03_00 NVOS00_PARAMETERS_v; + +typedef struct NVOS55_PARAMETERS_v03_00 +{ + NvHandle hClient; + NvHandle hParent; + NvHandle hObject; + NvHandle hClientSrc; + NvHandle hObjectSrc; + NvU32 flags; + NvU32 status; +} NVOS55_PARAMETERS_v03_00; + +typedef NVOS55_PARAMETERS_v03_00 NVOS55_PARAMETERS_v; + + +#endif + +#ifdef SDK_ARRAY_LENGTH_FUNCTIONS + +#endif + diff --git a/src/nvidia/generated/g_standard_mem_nvoc.c b/src/nvidia/generated/g_standard_mem_nvoc.c new file mode 100644 index 0000000..4c8941e --- /dev/null +++ b/src/nvidia/generated/g_standard_mem_nvoc.c @@ -0,0 +1,526 @@ +#define NVOC_STANDARD_MEM_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_standard_mem_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x897bf7 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_StandardMemory; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +// Forward declarations for StandardMemory +void __nvoc_init__Memory(Memory*); +void __nvoc_init__StandardMemory(StandardMemory*); +void __nvoc_init_funcTable_StandardMemory(StandardMemory*); +NV_STATUS __nvoc_ctor_StandardMemory(StandardMemory*, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_StandardMemory(StandardMemory*); +void __nvoc_dtor_StandardMemory(StandardMemory*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__StandardMemory; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__StandardMemory; + +// Down-thunk(s) to bridge StandardMemory methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^2 +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_Memory_resIsDuplicate(struct RsResource *pMemory, NvHandle hMemory, NvBool *pDuplicate); // super +NV_STATUS __nvoc_down_thunk_Memory_resControl(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_Memory_resMap(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_down_thunk_Memory_resUnmap(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_down_thunk_Memory_rmresGetMemInterMapParams(struct RmResource *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super +NV_STATUS __nvoc_down_thunk_Memory_rmresCheckMemInterUnmap(struct RmResource *pMemory, NvBool bSubdeviceHandleProvided); // super +NV_STATUS __nvoc_down_thunk_Memory_rmresGetMemoryMappingDescriptor(struct RmResource *pMemory, MEMORY_DESCRIPTOR **ppMemDesc); // super +NvBool __nvoc_down_thunk_StandardMemory_resCanCopy(struct RsResource *pStandardMemory); // this + +// Up-thunk(s) to bridge StandardMemory methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^2 +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^2 +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^2 +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super^2 +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super^2 +NvBool __nvoc_up_thunk_RmResource_memAccessCallback(struct Memory *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NvBool __nvoc_up_thunk_RmResource_memShareCallback(struct Memory *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_up_thunk_RmResource_memControlSerialization_Prologue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_memControlSerialization_Epilogue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_memControl_Prologue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_memControl_Epilogue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_memCanCopy(struct Memory *pResource); // super +void __nvoc_up_thunk_RsResource_memPreDestruct(struct Memory *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_memControlFilter(struct Memory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_memIsPartialUnmapSupported(struct Memory *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_memMapTo(struct Memory *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_memUnmapFrom(struct Memory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_memGetRefCount(struct Memory *pResource); // super +void __nvoc_up_thunk_RsResource_memAddAdditionalDependants(struct RsClient *pClient, struct Memory *pResource, RsResourceRef *pReference); // super +NV_STATUS __nvoc_up_thunk_Memory_stdmemIsDuplicate(struct StandardMemory *pMemory, NvHandle hMemory, NvBool *pDuplicate); // this +NV_STATUS __nvoc_up_thunk_Memory_stdmemGetMapAddrSpace(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); // this +NV_STATUS __nvoc_up_thunk_Memory_stdmemControl(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_Memory_stdmemMap(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_Memory_stdmemUnmap(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_Memory_stdmemGetMemInterMapParams(struct StandardMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_Memory_stdmemCheckMemInterUnmap(struct StandardMemory *pMemory, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_Memory_stdmemGetMemoryMappingDescriptor(struct StandardMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_Memory_stdmemCheckCopyPermissions(struct StandardMemory *pMemory, struct OBJGPU *pDstGpu, struct Device *pDstDevice); // this +NV_STATUS __nvoc_up_thunk_Memory_stdmemIsReady(struct StandardMemory *pMemory, NvBool bCopyConstructorContext); // this +NvBool __nvoc_up_thunk_Memory_stdmemIsGpuMapAllowed(struct StandardMemory *pMemory, struct OBJGPU *pGpu); // this +NvBool __nvoc_up_thunk_Memory_stdmemIsExportAllowed(struct StandardMemory *pMemory); // this +NvBool __nvoc_up_thunk_RmResource_stdmemAccessCallback(struct StandardMemory *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NvBool __nvoc_up_thunk_RmResource_stdmemShareCallback(struct StandardMemory *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_RmResource_stdmemControlSerialization_Prologue(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_stdmemControlSerialization_Epilogue(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_stdmemControl_Prologue(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_stdmemControl_Epilogue(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RsResource_stdmemPreDestruct(struct StandardMemory *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_stdmemControlFilter(struct StandardMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_stdmemIsPartialUnmapSupported(struct StandardMemory *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_stdmemMapTo(struct StandardMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_stdmemUnmapFrom(struct StandardMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_stdmemGetRefCount(struct StandardMemory *pResource); // this +void __nvoc_up_thunk_RsResource_stdmemAddAdditionalDependants(struct RsClient *pClient, struct StandardMemory *pResource, RsResourceRef *pReference); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_StandardMemory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(StandardMemory), + /*classId=*/ classId(StandardMemory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "StandardMemory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_StandardMemory, + /*pCastInfo=*/ &__nvoc_castinfo__StandardMemory, + /*pExportInfo=*/ &__nvoc_export_info__StandardMemory +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__StandardMemory __nvoc_metadata__StandardMemory = { + .rtti.pClassDef = &__nvoc_class_def_StandardMemory, // (stdmem) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_StandardMemory, + .rtti.offset = 0, + .metadata__Memory.rtti.pClassDef = &__nvoc_class_def_Memory, // (mem) super + .metadata__Memory.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Memory.rtti.offset = NV_OFFSETOF(StandardMemory, __nvoc_base_Memory), + .metadata__Memory.metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super^2 + .metadata__Memory.metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Memory.metadata__RmResource.rtti.offset = NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource), + .metadata__Memory.metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^3 + .metadata__Memory.metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Memory.metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^4 + .metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__Memory.metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^3 + .metadata__Memory.metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Memory.metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + + .vtable.__stdmemCanCopy__ = &stdmemCanCopy_IMPL, // virtual override (res) base (mem) + .metadata__Memory.vtable.__memCanCopy__ = &__nvoc_up_thunk_RsResource_memCanCopy, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &__nvoc_down_thunk_StandardMemory_resCanCopy, // virtual + .vtable.__stdmemIsDuplicate__ = &__nvoc_up_thunk_Memory_stdmemIsDuplicate, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memIsDuplicate__ = &memIsDuplicate_IMPL, // virtual override (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &__nvoc_down_thunk_Memory_resIsDuplicate, // virtual + .vtable.__stdmemGetMapAddrSpace__ = &__nvoc_up_thunk_Memory_stdmemGetMapAddrSpace, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memGetMapAddrSpace__ = &memGetMapAddrSpace_IMPL, // virtual + .vtable.__stdmemControl__ = &__nvoc_up_thunk_Memory_stdmemControl, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memControl__ = &memControl_IMPL, // virtual override (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_Memory_resControl, // virtual + .vtable.__stdmemMap__ = &__nvoc_up_thunk_Memory_stdmemMap, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memMap__ = &memMap_IMPL, // virtual override (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &__nvoc_down_thunk_Memory_resMap, // virtual + .vtable.__stdmemUnmap__ = &__nvoc_up_thunk_Memory_stdmemUnmap, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memUnmap__ = &memUnmap_IMPL, // virtual override (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &__nvoc_down_thunk_Memory_resUnmap, // virtual + .vtable.__stdmemGetMemInterMapParams__ = &__nvoc_up_thunk_Memory_stdmemGetMemInterMapParams, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memGetMemInterMapParams__ = &memGetMemInterMapParams_IMPL, // virtual override (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &__nvoc_down_thunk_Memory_rmresGetMemInterMapParams, // virtual + .vtable.__stdmemCheckMemInterUnmap__ = &__nvoc_up_thunk_Memory_stdmemCheckMemInterUnmap, // inline virtual inherited (mem) base (mem) body + .metadata__Memory.vtable.__memCheckMemInterUnmap__ = &memCheckMemInterUnmap_ac1694, // inline virtual override (rmres) base (rmres) body + .metadata__Memory.metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &__nvoc_down_thunk_Memory_rmresCheckMemInterUnmap, // virtual + .vtable.__stdmemGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_Memory_stdmemGetMemoryMappingDescriptor, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memGetMemoryMappingDescriptor__ = &memGetMemoryMappingDescriptor_IMPL, // virtual override (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &__nvoc_down_thunk_Memory_rmresGetMemoryMappingDescriptor, // virtual + .vtable.__stdmemCheckCopyPermissions__ = &__nvoc_up_thunk_Memory_stdmemCheckCopyPermissions, // inline virtual inherited (mem) base (mem) body + .metadata__Memory.vtable.__memCheckCopyPermissions__ = &memCheckCopyPermissions_ac1694, // inline virtual body + .vtable.__stdmemIsReady__ = &__nvoc_up_thunk_Memory_stdmemIsReady, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memIsReady__ = &memIsReady_IMPL, // virtual + .vtable.__stdmemIsGpuMapAllowed__ = &__nvoc_up_thunk_Memory_stdmemIsGpuMapAllowed, // inline virtual inherited (mem) base (mem) body + .metadata__Memory.vtable.__memIsGpuMapAllowed__ = &memIsGpuMapAllowed_e661f0, // inline virtual body + .vtable.__stdmemIsExportAllowed__ = &__nvoc_up_thunk_Memory_stdmemIsExportAllowed, // inline virtual inherited (mem) base (mem) body + .metadata__Memory.vtable.__memIsExportAllowed__ = &memIsExportAllowed_e661f0, // inline virtual body + .vtable.__stdmemAccessCallback__ = &__nvoc_up_thunk_RmResource_stdmemAccessCallback, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memAccessCallback__ = &__nvoc_up_thunk_RmResource_memAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__stdmemShareCallback__ = &__nvoc_up_thunk_RmResource_stdmemShareCallback, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memShareCallback__ = &__nvoc_up_thunk_RmResource_memShareCallback, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresShareCallback__ = &rmresShareCallback_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__stdmemControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_stdmemControlSerialization_Prologue, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_memControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__stdmemControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_stdmemControlSerialization_Epilogue, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_memControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__stdmemControl_Prologue__ = &__nvoc_up_thunk_RmResource_stdmemControl_Prologue, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memControl_Prologue__ = &__nvoc_up_thunk_RmResource_memControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__stdmemControl_Epilogue__ = &__nvoc_up_thunk_RmResource_stdmemControl_Epilogue, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memControl_Epilogue__ = &__nvoc_up_thunk_RmResource_memControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__stdmemPreDestruct__ = &__nvoc_up_thunk_RsResource_stdmemPreDestruct, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memPreDestruct__ = &__nvoc_up_thunk_RsResource_memPreDestruct, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__stdmemControlFilter__ = &__nvoc_up_thunk_RsResource_stdmemControlFilter, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memControlFilter__ = &__nvoc_up_thunk_RsResource_memControlFilter, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__stdmemIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_stdmemIsPartialUnmapSupported, // inline virtual inherited (res) base (mem) body + .metadata__Memory.vtable.__memIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_memIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__Memory.metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__stdmemMapTo__ = &__nvoc_up_thunk_RsResource_stdmemMapTo, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memMapTo__ = &__nvoc_up_thunk_RsResource_memMapTo, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__stdmemUnmapFrom__ = &__nvoc_up_thunk_RsResource_stdmemUnmapFrom, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memUnmapFrom__ = &__nvoc_up_thunk_RsResource_memUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__stdmemGetRefCount__ = &__nvoc_up_thunk_RsResource_stdmemGetRefCount, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memGetRefCount__ = &__nvoc_up_thunk_RsResource_memGetRefCount, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__stdmemAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_stdmemAddAdditionalDependants, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_memAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__StandardMemory = { + .numRelatives = 6, + .relatives = { + &__nvoc_metadata__StandardMemory.rtti, // [0]: (stdmem) this + &__nvoc_metadata__StandardMemory.metadata__Memory.rtti, // [1]: (mem) super + &__nvoc_metadata__StandardMemory.metadata__Memory.metadata__RmResource.rtti, // [2]: (rmres) super^2 + &__nvoc_metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.rtti, // [3]: (res) super^3 + &__nvoc_metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [4]: (obj) super^4 + &__nvoc_metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RmResourceCommon.rtti, // [5]: (rmrescmn) super^3 + } +}; + +// 1 down-thunk(s) defined to bridge methods in StandardMemory from superclasses + +// stdmemCanCopy: virtual override (res) base (mem) +NvBool __nvoc_down_thunk_StandardMemory_resCanCopy(struct RsResource *pStandardMemory) { + return stdmemCanCopy((struct StandardMemory *)(((unsigned char *) pStandardMemory) - NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + + +// 25 up-thunk(s) defined to bridge methods in StandardMemory to superclasses + +// stdmemIsDuplicate: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_stdmemIsDuplicate(struct StandardMemory *pMemory, NvHandle hMemory, NvBool *pDuplicate) { + return memIsDuplicate((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory)), hMemory, pDuplicate); +} + +// stdmemGetMapAddrSpace: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_stdmemGetMapAddrSpace(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return memGetMapAddrSpace((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory)), pCallContext, mapFlags, pAddrSpace); +} + +// stdmemControl: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_stdmemControl(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory)), pCallContext, pParams); +} + +// stdmemMap: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_stdmemMap(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory)), pCallContext, pParams, pCpuMapping); +} + +// stdmemUnmap: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_stdmemUnmap(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory)), pCallContext, pCpuMapping); +} + +// stdmemGetMemInterMapParams: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_stdmemGetMemInterMapParams(struct StandardMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory)), pParams); +} + +// stdmemCheckMemInterUnmap: inline virtual inherited (mem) base (mem) body +NV_STATUS __nvoc_up_thunk_Memory_stdmemCheckMemInterUnmap(struct StandardMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory)), bSubdeviceHandleProvided); +} + +// stdmemGetMemoryMappingDescriptor: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_stdmemGetMemoryMappingDescriptor(struct StandardMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory)), ppMemDesc); +} + +// stdmemCheckCopyPermissions: inline virtual inherited (mem) base (mem) body +NV_STATUS __nvoc_up_thunk_Memory_stdmemCheckCopyPermissions(struct StandardMemory *pMemory, struct OBJGPU *pDstGpu, struct Device *pDstDevice) { + return memCheckCopyPermissions((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory)), pDstGpu, pDstDevice); +} + +// stdmemIsReady: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_stdmemIsReady(struct StandardMemory *pMemory, NvBool bCopyConstructorContext) { + return memIsReady((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory)), bCopyConstructorContext); +} + +// stdmemIsGpuMapAllowed: inline virtual inherited (mem) base (mem) body +NvBool __nvoc_up_thunk_Memory_stdmemIsGpuMapAllowed(struct StandardMemory *pMemory, struct OBJGPU *pGpu) { + return memIsGpuMapAllowed((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory)), pGpu); +} + +// stdmemIsExportAllowed: inline virtual inherited (mem) base (mem) body +NvBool __nvoc_up_thunk_Memory_stdmemIsExportAllowed(struct StandardMemory *pMemory) { + return memIsExportAllowed((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory))); +} + +// stdmemAccessCallback: virtual inherited (rmres) base (mem) +NvBool __nvoc_up_thunk_RmResource_stdmemAccessCallback(struct StandardMemory *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// stdmemShareCallback: virtual inherited (rmres) base (mem) +NvBool __nvoc_up_thunk_RmResource_stdmemShareCallback(struct StandardMemory *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// stdmemControlSerialization_Prologue: virtual inherited (rmres) base (mem) +NV_STATUS __nvoc_up_thunk_RmResource_stdmemControlSerialization_Prologue(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// stdmemControlSerialization_Epilogue: virtual inherited (rmres) base (mem) +void __nvoc_up_thunk_RmResource_stdmemControlSerialization_Epilogue(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// stdmemControl_Prologue: virtual inherited (rmres) base (mem) +NV_STATUS __nvoc_up_thunk_RmResource_stdmemControl_Prologue(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// stdmemControl_Epilogue: virtual inherited (rmres) base (mem) +void __nvoc_up_thunk_RmResource_stdmemControl_Epilogue(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// stdmemPreDestruct: virtual inherited (res) base (mem) +void __nvoc_up_thunk_RsResource_stdmemPreDestruct(struct StandardMemory *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// stdmemControlFilter: virtual inherited (res) base (mem) +NV_STATUS __nvoc_up_thunk_RsResource_stdmemControlFilter(struct StandardMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// stdmemIsPartialUnmapSupported: inline virtual inherited (res) base (mem) body +NvBool __nvoc_up_thunk_RsResource_stdmemIsPartialUnmapSupported(struct StandardMemory *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// stdmemMapTo: virtual inherited (res) base (mem) +NV_STATUS __nvoc_up_thunk_RsResource_stdmemMapTo(struct StandardMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// stdmemUnmapFrom: virtual inherited (res) base (mem) +NV_STATUS __nvoc_up_thunk_RsResource_stdmemUnmapFrom(struct StandardMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// stdmemGetRefCount: virtual inherited (res) base (mem) +NvU32 __nvoc_up_thunk_RsResource_stdmemGetRefCount(struct StandardMemory *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// stdmemAddAdditionalDependants: virtual inherited (res) base (mem) +void __nvoc_up_thunk_RsResource_stdmemAddAdditionalDependants(struct RsClient *pClient, struct StandardMemory *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__StandardMemory = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Memory(Memory*); +void __nvoc_dtor_StandardMemory(StandardMemory *pThis) { + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_StandardMemory(StandardMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Memory(Memory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_StandardMemory(StandardMemory *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Memory(&pThis->__nvoc_base_Memory, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_StandardMemory_fail_Memory; + __nvoc_init_dataField_StandardMemory(pThis); + + status = __nvoc_stdmemConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_StandardMemory_fail__init; + goto __nvoc_ctor_StandardMemory_exit; // Success + +__nvoc_ctor_StandardMemory_fail__init: + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); +__nvoc_ctor_StandardMemory_fail_Memory: +__nvoc_ctor_StandardMemory_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_StandardMemory_1(StandardMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_StandardMemory_1 + + +// Initialize vtable(s) for 26 virtual method(s). +void __nvoc_init_funcTable_StandardMemory(StandardMemory *pThis) { + __nvoc_init_funcTable_StandardMemory_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__StandardMemory(StandardMemory *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^4 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^3 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource; // (rmres) super^2 + pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_Memory; // (mem) super + pThis->__nvoc_pbase_StandardMemory = pThis; // (stdmem) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Memory(&pThis->__nvoc_base_Memory); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^4 + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource; // (res) super^3 + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__StandardMemory.metadata__Memory.metadata__RmResource; // (rmres) super^2 + pThis->__nvoc_base_Memory.__nvoc_metadata_ptr = &__nvoc_metadata__StandardMemory.metadata__Memory; // (mem) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__StandardMemory; // (stdmem) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_StandardMemory(pThis); +} + +NV_STATUS __nvoc_objCreate_StandardMemory(StandardMemory **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + StandardMemory *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(StandardMemory), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(StandardMemory)); + + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__StandardMemory(pThis); + status = __nvoc_ctor_StandardMemory(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_StandardMemory_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_StandardMemory_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(StandardMemory)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_StandardMemory(StandardMemory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_StandardMemory(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_standard_mem_nvoc.h b/src/nvidia/generated/g_standard_mem_nvoc.h new file mode 100644 index 0000000..e4e70d9 --- /dev/null +++ b/src/nvidia/generated/g_standard_mem_nvoc.h @@ -0,0 +1,384 @@ + +#ifndef _G_STANDARD_MEM_NVOC_H_ +#define _G_STANDARD_MEM_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_standard_mem_nvoc.h" + +#ifndef _STANDARD_MEMORY_H_ +#define _STANDARD_MEMORY_H_ + +#include "mem_mgr/mem.h" + +#include "ctrl/ctrl003e.h" + +typedef struct MEMORY_ALLOCATION_REQUEST MEMORY_ALLOCATION_REQUEST; + +struct MemoryManager; + +#ifndef __NVOC_CLASS_MemoryManager_TYPEDEF__ +#define __NVOC_CLASS_MemoryManager_TYPEDEF__ +typedef struct MemoryManager MemoryManager; +#endif /* __NVOC_CLASS_MemoryManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MemoryManager +#define __nvoc_class_id_MemoryManager 0x22ad47 +#endif /* __nvoc_class_id_MemoryManager */ + + +struct RmClient; + +#ifndef __NVOC_CLASS_RmClient_TYPEDEF__ +#define __NVOC_CLASS_RmClient_TYPEDEF__ +typedef struct RmClient RmClient; +#endif /* __NVOC_CLASS_RmClient_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmClient +#define __nvoc_class_id_RmClient 0xb23d83 +#endif /* __nvoc_class_id_RmClient */ + + + +/*! + * Allocator for normal virtual, video and system memory + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_STANDARD_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__StandardMemory; +struct NVOC_METADATA__Memory; +struct NVOC_VTABLE__StandardMemory; + + +struct StandardMemory { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__StandardMemory *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Memory __nvoc_base_Memory; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^4 + struct RsResource *__nvoc_pbase_RsResource; // res super^3 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^3 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^2 + struct Memory *__nvoc_pbase_Memory; // mem super + struct StandardMemory *__nvoc_pbase_StandardMemory; // stdmem +}; + + +// Vtable with 26 per-class function pointers +struct NVOC_VTABLE__StandardMemory { + NvBool (*__stdmemCanCopy__)(struct StandardMemory * /*this*/); // virtual override (res) base (mem) + NV_STATUS (*__stdmemIsDuplicate__)(struct StandardMemory * /*this*/, NvHandle, NvBool *); // virtual inherited (mem) base (mem) + NV_STATUS (*__stdmemGetMapAddrSpace__)(struct StandardMemory * /*this*/, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); // virtual inherited (mem) base (mem) + NV_STATUS (*__stdmemControl__)(struct StandardMemory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (mem) base (mem) + NV_STATUS (*__stdmemMap__)(struct StandardMemory * /*this*/, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); // virtual inherited (mem) base (mem) + NV_STATUS (*__stdmemUnmap__)(struct StandardMemory * /*this*/, CALL_CONTEXT *, RsCpuMapping *); // virtual inherited (mem) base (mem) + NV_STATUS (*__stdmemGetMemInterMapParams__)(struct StandardMemory * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (mem) base (mem) + NV_STATUS (*__stdmemCheckMemInterUnmap__)(struct StandardMemory * /*this*/, NvBool); // inline virtual inherited (mem) base (mem) body + NV_STATUS (*__stdmemGetMemoryMappingDescriptor__)(struct StandardMemory * /*this*/, MEMORY_DESCRIPTOR **); // virtual inherited (mem) base (mem) + NV_STATUS (*__stdmemCheckCopyPermissions__)(struct StandardMemory * /*this*/, struct OBJGPU *, struct Device *); // inline virtual inherited (mem) base (mem) body + NV_STATUS (*__stdmemIsReady__)(struct StandardMemory * /*this*/, NvBool); // virtual inherited (mem) base (mem) + NvBool (*__stdmemIsGpuMapAllowed__)(struct StandardMemory * /*this*/, struct OBJGPU *); // inline virtual inherited (mem) base (mem) body + NvBool (*__stdmemIsExportAllowed__)(struct StandardMemory * /*this*/); // inline virtual inherited (mem) base (mem) body + NvBool (*__stdmemAccessCallback__)(struct StandardMemory * /*this*/, RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (mem) + NvBool (*__stdmemShareCallback__)(struct StandardMemory * /*this*/, RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (rmres) base (mem) + NV_STATUS (*__stdmemControlSerialization_Prologue__)(struct StandardMemory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (mem) + void (*__stdmemControlSerialization_Epilogue__)(struct StandardMemory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (mem) + NV_STATUS (*__stdmemControl_Prologue__)(struct StandardMemory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (mem) + void (*__stdmemControl_Epilogue__)(struct StandardMemory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (mem) + void (*__stdmemPreDestruct__)(struct StandardMemory * /*this*/); // virtual inherited (res) base (mem) + NV_STATUS (*__stdmemControlFilter__)(struct StandardMemory * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (mem) + NvBool (*__stdmemIsPartialUnmapSupported__)(struct StandardMemory * /*this*/); // inline virtual inherited (res) base (mem) body + NV_STATUS (*__stdmemMapTo__)(struct StandardMemory * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (mem) + NV_STATUS (*__stdmemUnmapFrom__)(struct StandardMemory * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (mem) + NvU32 (*__stdmemGetRefCount__)(struct StandardMemory * /*this*/); // virtual inherited (res) base (mem) + void (*__stdmemAddAdditionalDependants__)(struct RsClient *, struct StandardMemory * /*this*/, RsResourceRef *); // virtual inherited (res) base (mem) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__StandardMemory { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Memory metadata__Memory; + const struct NVOC_VTABLE__StandardMemory vtable; +}; + +#ifndef __NVOC_CLASS_StandardMemory_TYPEDEF__ +#define __NVOC_CLASS_StandardMemory_TYPEDEF__ +typedef struct StandardMemory StandardMemory; +#endif /* __NVOC_CLASS_StandardMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_StandardMemory +#define __nvoc_class_id_StandardMemory 0x897bf7 +#endif /* __nvoc_class_id_StandardMemory */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_StandardMemory; + +#define __staticCast_StandardMemory(pThis) \ + ((pThis)->__nvoc_pbase_StandardMemory) + +#ifdef __nvoc_standard_mem_h_disabled +#define __dynamicCast_StandardMemory(pThis) ((StandardMemory*) NULL) +#else //__nvoc_standard_mem_h_disabled +#define __dynamicCast_StandardMemory(pThis) \ + ((StandardMemory*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(StandardMemory))) +#endif //__nvoc_standard_mem_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_StandardMemory(StandardMemory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_StandardMemory(StandardMemory**, Dynamic*, NvU32, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_StandardMemory(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_StandardMemory((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define stdmemCanCopy_FNPTR(pStandardMemory) pStandardMemory->__nvoc_metadata_ptr->vtable.__stdmemCanCopy__ +#define stdmemCanCopy(pStandardMemory) stdmemCanCopy_DISPATCH(pStandardMemory) +#define stdmemIsDuplicate_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsDuplicate__ +#define stdmemIsDuplicate(pMemory, hMemory, pDuplicate) stdmemIsDuplicate_DISPATCH(pMemory, hMemory, pDuplicate) +#define stdmemGetMapAddrSpace_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memGetMapAddrSpace__ +#define stdmemGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) stdmemGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define stdmemControl_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memControl__ +#define stdmemControl(pMemory, pCallContext, pParams) stdmemControl_DISPATCH(pMemory, pCallContext, pParams) +#define stdmemMap_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memMap__ +#define stdmemMap(pMemory, pCallContext, pParams, pCpuMapping) stdmemMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define stdmemUnmap_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memUnmap__ +#define stdmemUnmap(pMemory, pCallContext, pCpuMapping) stdmemUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define stdmemGetMemInterMapParams_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memGetMemInterMapParams__ +#define stdmemGetMemInterMapParams(pMemory, pParams) stdmemGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define stdmemCheckMemInterUnmap_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memCheckMemInterUnmap__ +#define stdmemCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) stdmemCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define stdmemGetMemoryMappingDescriptor_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memGetMemoryMappingDescriptor__ +#define stdmemGetMemoryMappingDescriptor(pMemory, ppMemDesc) stdmemGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define stdmemCheckCopyPermissions_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memCheckCopyPermissions__ +#define stdmemCheckCopyPermissions(pMemory, pDstGpu, pDstDevice) stdmemCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, pDstDevice) +#define stdmemIsReady_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsReady__ +#define stdmemIsReady(pMemory, bCopyConstructorContext) stdmemIsReady_DISPATCH(pMemory, bCopyConstructorContext) +#define stdmemIsGpuMapAllowed_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsGpuMapAllowed__ +#define stdmemIsGpuMapAllowed(pMemory, pGpu) stdmemIsGpuMapAllowed_DISPATCH(pMemory, pGpu) +#define stdmemIsExportAllowed_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsExportAllowed__ +#define stdmemIsExportAllowed(pMemory) stdmemIsExportAllowed_DISPATCH(pMemory) +#define stdmemAccessCallback_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define stdmemAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) stdmemAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define stdmemShareCallback_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresShareCallback__ +#define stdmemShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) stdmemShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define stdmemControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define stdmemControlSerialization_Prologue(pResource, pCallContext, pParams) stdmemControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define stdmemControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define stdmemControlSerialization_Epilogue(pResource, pCallContext, pParams) stdmemControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define stdmemControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define stdmemControl_Prologue(pResource, pCallContext, pParams) stdmemControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define stdmemControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define stdmemControl_Epilogue(pResource, pCallContext, pParams) stdmemControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define stdmemPreDestruct_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define stdmemPreDestruct(pResource) stdmemPreDestruct_DISPATCH(pResource) +#define stdmemControlFilter_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define stdmemControlFilter(pResource, pCallContext, pParams) stdmemControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define stdmemIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define stdmemIsPartialUnmapSupported(pResource) stdmemIsPartialUnmapSupported_DISPATCH(pResource) +#define stdmemMapTo_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define stdmemMapTo(pResource, pParams) stdmemMapTo_DISPATCH(pResource, pParams) +#define stdmemUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define stdmemUnmapFrom(pResource, pParams) stdmemUnmapFrom_DISPATCH(pResource, pParams) +#define stdmemGetRefCount_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define stdmemGetRefCount(pResource) stdmemGetRefCount_DISPATCH(pResource) +#define stdmemAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define stdmemAddAdditionalDependants(pClient, pResource, pReference) stdmemAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) + +// Dispatch functions +static inline NvBool stdmemCanCopy_DISPATCH(struct StandardMemory *pStandardMemory) { + return pStandardMemory->__nvoc_metadata_ptr->vtable.__stdmemCanCopy__(pStandardMemory); +} + +static inline NV_STATUS stdmemIsDuplicate_DISPATCH(struct StandardMemory *pMemory, NvHandle hMemory, NvBool *pDuplicate) { + return pMemory->__nvoc_metadata_ptr->vtable.__stdmemIsDuplicate__(pMemory, hMemory, pDuplicate); +} + +static inline NV_STATUS stdmemGetMapAddrSpace_DISPATCH(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__nvoc_metadata_ptr->vtable.__stdmemGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NV_STATUS stdmemControl_DISPATCH(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__nvoc_metadata_ptr->vtable.__stdmemControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS stdmemMap_DISPATCH(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__nvoc_metadata_ptr->vtable.__stdmemMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS stdmemUnmap_DISPATCH(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__nvoc_metadata_ptr->vtable.__stdmemUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS stdmemGetMemInterMapParams_DISPATCH(struct StandardMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__nvoc_metadata_ptr->vtable.__stdmemGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS stdmemCheckMemInterUnmap_DISPATCH(struct StandardMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__nvoc_metadata_ptr->vtable.__stdmemCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS stdmemGetMemoryMappingDescriptor_DISPATCH(struct StandardMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__nvoc_metadata_ptr->vtable.__stdmemGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS stdmemCheckCopyPermissions_DISPATCH(struct StandardMemory *pMemory, struct OBJGPU *pDstGpu, struct Device *pDstDevice) { + return pMemory->__nvoc_metadata_ptr->vtable.__stdmemCheckCopyPermissions__(pMemory, pDstGpu, pDstDevice); +} + +static inline NV_STATUS stdmemIsReady_DISPATCH(struct StandardMemory *pMemory, NvBool bCopyConstructorContext) { + return pMemory->__nvoc_metadata_ptr->vtable.__stdmemIsReady__(pMemory, bCopyConstructorContext); +} + +static inline NvBool stdmemIsGpuMapAllowed_DISPATCH(struct StandardMemory *pMemory, struct OBJGPU *pGpu) { + return pMemory->__nvoc_metadata_ptr->vtable.__stdmemIsGpuMapAllowed__(pMemory, pGpu); +} + +static inline NvBool stdmemIsExportAllowed_DISPATCH(struct StandardMemory *pMemory) { + return pMemory->__nvoc_metadata_ptr->vtable.__stdmemIsExportAllowed__(pMemory); +} + +static inline NvBool stdmemAccessCallback_DISPATCH(struct StandardMemory *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__stdmemAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NvBool stdmemShareCallback_DISPATCH(struct StandardMemory *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__nvoc_metadata_ptr->vtable.__stdmemShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS stdmemControlSerialization_Prologue_DISPATCH(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__stdmemControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void stdmemControlSerialization_Epilogue_DISPATCH(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__stdmemControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS stdmemControl_Prologue_DISPATCH(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__stdmemControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void stdmemControl_Epilogue_DISPATCH(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__stdmemControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline void stdmemPreDestruct_DISPATCH(struct StandardMemory *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__stdmemPreDestruct__(pResource); +} + +static inline NV_STATUS stdmemControlFilter_DISPATCH(struct StandardMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__stdmemControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvBool stdmemIsPartialUnmapSupported_DISPATCH(struct StandardMemory *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__stdmemIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS stdmemMapTo_DISPATCH(struct StandardMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__stdmemMapTo__(pResource, pParams); +} + +static inline NV_STATUS stdmemUnmapFrom_DISPATCH(struct StandardMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__stdmemUnmapFrom__(pResource, pParams); +} + +static inline NvU32 stdmemGetRefCount_DISPATCH(struct StandardMemory *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__stdmemGetRefCount__(pResource); +} + +static inline void stdmemAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct StandardMemory *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__stdmemAddAdditionalDependants__(pClient, pResource, pReference); +} + +NvU64 stdmemGetSysmemPageSize_IMPL(struct OBJGPU *pGpu, struct StandardMemory *pMemory); + + +#ifdef __nvoc_standard_mem_h_disabled +static inline NvU64 stdmemGetSysmemPageSize(struct OBJGPU *pGpu, struct StandardMemory *pMemory) { + NV_ASSERT_FAILED_PRECOMP("StandardMemory was disabled!"); + return 0; +} +#else //__nvoc_standard_mem_h_disabled +#define stdmemGetSysmemPageSize(pGpu, pMemory) stdmemGetSysmemPageSize_IMPL(pGpu, pMemory) +#endif //__nvoc_standard_mem_h_disabled + +#define stdmemGetSysmemPageSize_HAL(pGpu, pMemory) stdmemGetSysmemPageSize(pGpu, pMemory) + +NvBool stdmemCanCopy_IMPL(struct StandardMemory *pStandardMemory); + +NV_STATUS stdmemConstruct_IMPL(struct StandardMemory *arg_pStandardMemory, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_stdmemConstruct(arg_pStandardMemory, arg_pCallContext, arg_pParams) stdmemConstruct_IMPL(arg_pStandardMemory, arg_pCallContext, arg_pParams) +NV_STATUS stdmemValidateParams_IMPL(struct OBJGPU *pGpu, struct RmClient *pRmClient, NV_MEMORY_ALLOCATION_PARAMS *pAllocData); + +#define stdmemValidateParams(pGpu, pRmClient, pAllocData) stdmemValidateParams_IMPL(pGpu, pRmClient, pAllocData) +void stdmemDumpInputAllocParams_IMPL(NV_MEMORY_ALLOCATION_PARAMS *pAllocData, CALL_CONTEXT *pCallContext); + +#define stdmemDumpInputAllocParams(pAllocData, pCallContext) stdmemDumpInputAllocParams_IMPL(pAllocData, pCallContext) +void stdmemDumpOutputAllocParams_IMPL(NV_MEMORY_ALLOCATION_PARAMS *pAllocData); + +#define stdmemDumpOutputAllocParams(pAllocData) stdmemDumpOutputAllocParams_IMPL(pAllocData) +NvU64 stdmemQueryPageSize_IMPL(struct MemoryManager *pMemoryManager, NvHandle hClient, NV_MEMORY_ALLOCATION_PARAMS *pAllocData); + +#define stdmemQueryPageSize(pMemoryManager, hClient, pAllocData) stdmemQueryPageSize_IMPL(pMemoryManager, hClient, pAllocData) +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_STANDARD_MEM_NVOC_H_ diff --git a/src/nvidia/generated/g_subdevice_nvoc.c b/src/nvidia/generated/g_subdevice_nvoc.c new file mode 100644 index 0000000..c224050 --- /dev/null +++ b/src/nvidia/generated/g_subdevice_nvoc.c @@ -0,0 +1,2759 @@ +#define NVOC_SUBDEVICE_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_subdevice_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x4b01b3 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Subdevice; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +// Forward declarations for Subdevice +void __nvoc_init__GpuResource(GpuResource*); +void __nvoc_init__Notifier(Notifier*); +void __nvoc_init__Subdevice(Subdevice*, RmHalspecOwner *pRmhalspecowner); +void __nvoc_init_funcTable_Subdevice(Subdevice*, RmHalspecOwner *pRmhalspecowner); +NV_STATUS __nvoc_ctor_Subdevice(Subdevice*, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_Subdevice(Subdevice*, RmHalspecOwner *pRmhalspecowner); +void __nvoc_dtor_Subdevice(Subdevice*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__Subdevice; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__Subdevice; + +// Down-thunk(s) to bridge Subdevice methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^2 +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_GpuResource_resControl(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_GpuResource_resMap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_down_thunk_GpuResource_resUnmap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_down_thunk_GpuResource_rmresShareCallback(struct RmResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +PEVENTNOTIFICATION * __nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr(struct INotifier *pNotifier); // super +struct NotifShare * __nvoc_down_thunk_Notifier_inotifyGetNotificationShare(struct INotifier *pNotifier); // super +void __nvoc_down_thunk_Notifier_inotifySetNotificationShare(struct INotifier *pNotifier, struct NotifShare *pNotifShare); // super +NV_STATUS __nvoc_down_thunk_Notifier_inotifyUnregisterEvent(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // super +NV_STATUS __nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // super +void __nvoc_down_thunk_Subdevice_resPreDestruct(struct RsResource *pResource); // this +NV_STATUS __nvoc_down_thunk_Subdevice_gpuresInternalControlForward(struct GpuResource *pSubdevice, NvU32 command, void *pParams, NvU32 size); // this + +// Up-thunk(s) to bridge Subdevice methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^2 +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^2 +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^2 +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super^2 +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super^2 +NvBool __nvoc_up_thunk_RmResource_gpuresAccessCallback(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControl_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_gpuresControl_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_gpuresCanCopy(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresIsDuplicate(struct GpuResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_gpuresPreDestruct(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresControlFilter(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresMapTo(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresUnmapFrom(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_gpuresGetRefCount(struct GpuResource *pResource); // super +void __nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference); // super +NV_STATUS __nvoc_up_thunk_GpuResource_subdeviceControl(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_GpuResource_subdeviceMap(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_GpuResource_subdeviceUnmap(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_GpuResource_subdeviceShareCallback(struct Subdevice *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_GpuResource_subdeviceGetRegBaseOffsetAndSize(struct Subdevice *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); // this +NV_STATUS __nvoc_up_thunk_GpuResource_subdeviceGetMapAddrSpace(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); // this +NvHandle __nvoc_up_thunk_GpuResource_subdeviceGetInternalObjectHandle(struct Subdevice *pGpuResource); // this +NvBool __nvoc_up_thunk_RmResource_subdeviceAccessCallback(struct Subdevice *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NV_STATUS __nvoc_up_thunk_RmResource_subdeviceGetMemInterMapParams(struct Subdevice *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_subdeviceCheckMemInterUnmap(struct Subdevice *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_subdeviceGetMemoryMappingDescriptor(struct Subdevice *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_subdeviceControlSerialization_Prologue(struct Subdevice *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_subdeviceControlSerialization_Epilogue(struct Subdevice *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_subdeviceControl_Prologue(struct Subdevice *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_subdeviceControl_Epilogue(struct Subdevice *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_subdeviceCanCopy(struct Subdevice *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_subdeviceIsDuplicate(struct Subdevice *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +NV_STATUS __nvoc_up_thunk_RsResource_subdeviceControlFilter(struct Subdevice *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_subdeviceIsPartialUnmapSupported(struct Subdevice *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_subdeviceMapTo(struct Subdevice *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_subdeviceUnmapFrom(struct Subdevice *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_subdeviceGetRefCount(struct Subdevice *pResource); // this +void __nvoc_up_thunk_RsResource_subdeviceAddAdditionalDependants(struct RsClient *pClient, struct Subdevice *pResource, RsResourceRef *pReference); // this +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_subdeviceGetNotificationListPtr(struct Subdevice *pNotifier); // this +struct NotifShare * __nvoc_up_thunk_Notifier_subdeviceGetNotificationShare(struct Subdevice *pNotifier); // this +void __nvoc_up_thunk_Notifier_subdeviceSetNotificationShare(struct Subdevice *pNotifier, struct NotifShare *pNotifShare); // this +NV_STATUS __nvoc_up_thunk_Notifier_subdeviceUnregisterEvent(struct Subdevice *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // this +NV_STATUS __nvoc_up_thunk_Notifier_subdeviceGetOrAllocNotifShare(struct Subdevice *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_Subdevice = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Subdevice), + /*classId=*/ classId(Subdevice), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Subdevice", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Subdevice, + /*pCastInfo=*/ &__nvoc_castinfo__Subdevice, + /*pExportInfo=*/ &__nvoc_export_info__Subdevice +}; + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevice[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x30118u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetInfoV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x30118u) + /*flags=*/ 0x30118u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800102u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_INFO_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetInfoV2" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010au) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetNameString_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010au) + /*flags=*/ 0x2010au, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800110u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetNameString" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50au) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetShortNameString_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50au) + /*flags=*/ 0x50au, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800111u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetShortNameString" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetSdm_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*flags=*/ 0x9u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800118u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_SDM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetSdm" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50bu) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetSimulationInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50bu) + /*flags=*/ 0x50bu, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800119u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetSimulationInfo" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetEngines_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*flags=*/ 0x109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800123u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_ENGINES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetEngines" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetEngineClasslist_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + /*flags=*/ 0x109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800124u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetEngineClasslist" +#endif + }, + { /* [7] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuQueryMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800128u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_QUERY_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuQueryMode" +#endif + }, + { /* [8] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10244u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuPromoteCtx_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10244u) + /*flags=*/ 0x10244u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080012bu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuPromoteCtx" +#endif + }, + { /* [9] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c240u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuEvictCtx_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c240u) + /*flags=*/ 0x1c240u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080012cu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_EVICT_CTX_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuEvictCtx" +#endif + }, + { /* [10] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x14244u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuInitializeCtx_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x14244u) + /*flags=*/ 0x14244u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080012du, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuInitializeCtx" +#endif + }, + { /* [11] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x448u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetOEMBoardInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x448u) + /*flags=*/ 0x448u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080013fu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetOEMBoardInfo" +#endif + }, + { /* [12] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10au) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetId_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10au) + /*flags=*/ 0x10au, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800142u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_ID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetId" +#endif + }, + { /* [13] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x118u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetEnginePartnerList_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x118u) + /*flags=*/ 0x118u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800147u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetEnginePartnerList" +#endif + }, + { /* [14] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10118u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetGidInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10118u) + /*flags=*/ 0x10118u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080014au, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_GID_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetGidInfo" +#endif + }, + { /* [15] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuSetOptimusInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080014cu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuSetOptimusInfo" +#endif + }, + { /* [16] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetIpVersion_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080014du, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetIpVersion" +#endif + }, + { /* [17] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x3u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuHandleGpuSR_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x3u) + /*flags=*/ 0x3u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800167u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuHandleGpuSR" +#endif + }, + { /* [18] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x448u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetOEMInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x448u) + /*flags=*/ 0x448u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800169u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetOEMInfo" +#endif + }, + { /* [19] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetEnginesV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10109u) + /*flags=*/ 0x10109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800170u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetEnginesV2" +#endif + }, + { /* [20] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuQueryFunctionStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800173u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuQueryFunctionStatus" +#endif + }, + { /* [21] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10448u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetVmmuSegmentSize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10448u) + /*flags=*/ 0x10448u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080017eu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_VMMU_SEGMENT_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetVmmuSegmentSize" +#endif + }, + { /* [22] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetCachedInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + /*flags=*/ 0x10bu, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800182u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_INFO_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetCachedInfo" +#endif + }, + { /* [23] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10018u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetMaxSupportedPageSize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10018u) + /*flags=*/ 0x10018u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800188u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetMaxSupportedPageSize" +#endif + }, + { /* [24] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetPids_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080018du, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_PIDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetPids" +#endif + }, + { /* [25] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetPidInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080018eu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_PID_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetPidInfo" +#endif + }, + { /* [26] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10248u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuHandleVfPriFault_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10248u) + /*flags=*/ 0x10248u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800192u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuHandleVfPriFault" +#endif + }, + { /* [27] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetGfid_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800196u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_GFID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetGfid" +#endif + }, + { /* [28] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdUpdateGfidP2pCapability_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800197u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_GPU_UPDATE_GFID_P2P_CAPABILITY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdUpdateGfidP2pCapability" +#endif + }, + { /* [29] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x110u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdValidateMemMapRequest_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x110u) + /*flags=*/ 0x110u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800198u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdValidateMemMapRequest" +#endif + }, + { /* [30] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xau) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetEngineLoadTimes_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xau) + /*flags=*/ 0xau, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080019bu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetEngineLoadTimes" +#endif + }, + { /* [31] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40448u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetChipDetails_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40448u) + /*flags=*/ 0x40448u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208001a4u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_CHIP_DETAILS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetChipDetails" +#endif + }, + { /* [32] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100048u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuMarkDeviceForReset_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100048u) + /*flags=*/ 0x100048u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208001a9u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuMarkDeviceForReset" +#endif + }, + { /* [33] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100048u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuUnmarkDeviceForReset_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100048u) + /*flags=*/ 0x100048u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208001aau, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuUnmarkDeviceForReset" +#endif + }, + { /* [34] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x158u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetResetStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x158u) + /*flags=*/ 0x158u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208001abu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_RESET_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetResetStatus" +#endif + }, + { /* [35] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100048u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuMarkDeviceForDrainAndReset_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100048u) + /*flags=*/ 0x100048u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208001acu, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuMarkDeviceForDrainAndReset" +#endif + }, + { /* [36] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100048u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuUnmarkDeviceForDrainAndReset_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100048u) + /*flags=*/ 0x100048u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208001adu, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuUnmarkDeviceForDrainAndReset" +#endif + }, + { /* [37] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetDrainAndResetStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208001aeu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_DRAIN_AND_RESET_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetDrainAndResetStatus" +#endif + }, + { /* [38] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10048u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetConstructedFalconInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10048u) + /*flags=*/ 0x10048u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208001b0u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetConstructedFalconInfo" +#endif + }, + { /* [39] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10109u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetVfCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10109u) + /*flags=*/ 0x10109u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208001b1u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_VF_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetVfCaps" +#endif + }, + { /* [40] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x108u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetRecoveryAction_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x108u) + /*flags=*/ 0x108u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208001b2u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_RECOVERY_ACTION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetRecoveryAction" +#endif + }, + { /* [41] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x102d0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalControlGspTrace_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x102d0u) + /*flags=*/ 0x102d0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208001e3u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalControlGspTrace" +#endif + }, + { /* [42] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlGpuGetFipsStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208001e4u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_FIPS_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlGpuGetFipsStatus" +#endif + }, + { /* [43] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10448u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetFirstAsyncCEIdx_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10448u) + /*flags=*/ 0x10448u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208001e6u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_FIRST_ASYNC_CE_IDX_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetFirstAsyncCEIdx" +#endif + }, + { /* [44] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100108u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuRpcGspTest_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100108u) + /*flags=*/ 0x100108u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208001e8u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuRpcGspTest" +#endif + }, + { /* [45] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100108u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuRpcGspQuerySizes_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100108u) + /*flags=*/ 0x100108u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208001e9u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_RPC_GSP_QUERY_SIZES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuRpcGspQuerySizes" +#endif + }, + { /* [46] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdRusdGetSupportedFeatures_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + /*flags=*/ 0x10bu, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208001eau, + /*paramSize=*/ sizeof(NV2080_CTRL_RUSD_GET_SUPPORTED_FEATURES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdRusdGetSupportedFeatures" +#endif + }, + { /* [47] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x14u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdRusdSetFeatures_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x14u) + /*flags=*/ 0x14u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208001ebu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_RUSD_SET_FEATURES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdRusdSetFeatures" +#endif + }, + { /* [48] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10118u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEventSetNotification_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10118u) + /*flags=*/ 0x10118u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800301u, + /*paramSize=*/ sizeof(NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEventSetNotification" +#endif + }, + { /* [49] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEventSetTrigger_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800302u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEventSetTrigger" +#endif + }, + { /* [50] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10008u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEventSetMemoryNotifies_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10008u) + /*flags=*/ 0x10008u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800303u, + /*paramSize=*/ sizeof(NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEventSetMemoryNotifies" +#endif + }, + { /* [51] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEventSetSemaphoreMemory_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800304u, + /*paramSize=*/ sizeof(NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEventSetSemaphoreMemory" +#endif + }, + { /* [52] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEventSetSemaMemValidation_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800306u, + /*paramSize=*/ sizeof(NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEventSetSemaMemValidation" +#endif + }, + { /* [53] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEventSetTriggerFifo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*flags=*/ 0x9u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800308u, + /*paramSize=*/ sizeof(NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEventSetTriggerFifo" +#endif + }, + { /* [54] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEventGspTraceRatsBindEvtbuf_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080030au, + /*paramSize=*/ sizeof(NV2080_CTRL_EVENT_RATS_GSP_TRACE_BIND_EVTBUF_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEventGspTraceRatsBindEvtbuf" +#endif + }, + { /* [55] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdTimerSchedule_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800401u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdTimerSchedule" +#endif + }, + { /* [56] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdTimerCancel_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800402u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdTimerCancel" +#endif + }, + { /* [57] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10118u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdTimerGetTime_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10118u) + /*flags=*/ 0x10118u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800403u, + /*paramSize=*/ sizeof(NV2080_CTRL_TIMER_GET_TIME_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdTimerGetTime" +#endif + }, + { /* [58] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdTimerGetRegisterOffset_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*flags=*/ 0x9u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800404u, + /*paramSize=*/ sizeof(NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdTimerGetRegisterOffset" +#endif + }, + { /* [59] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x108u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x108u) + /*flags=*/ 0x108u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800406u, + /*paramSize=*/ sizeof(NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo" +#endif + }, + { /* [60] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDisplayGetStaticInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a01u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDisplayGetStaticInfo" +#endif + }, + { /* [61] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x404c0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGetChipInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x404c0u) + /*flags=*/ 0x404c0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a36u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGetChipInfo" +#endif + }, + { /* [62] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c4c0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGetDeviceInfoTable_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c4c0u) + /*flags=*/ 0x1c4c0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a40u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGetDeviceInfoTable" +#endif + }, + { /* [63] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4c0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGetUserRegisterAccessMap_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4c0u) + /*flags=*/ 0x4c0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a41u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGetUserRegisterAccessMap" +#endif + }, + { /* [64] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDisplayWriteInstMem_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a49u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDisplayWriteInstMem" +#endif + }, + { /* [65] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalRecoverAllComputeContexts_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a4au, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalRecoverAllComputeContexts" +#endif + }, + { /* [66] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDisplayGetIpVersion_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a4bu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDisplayGetIpVersion" +#endif + }, + { /* [67] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGetSmcMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a4cu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGetSmcMode" +#endif + }, + { /* [68] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDisplaySetupRgLineIntr_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a4du, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDisplaySetupRgLineIntr" +#endif + }, + { /* [69] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDisplaySetImportedImpData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a54u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDisplaySetImportedImpData" +#endif + }, + { /* [70] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdIsEgpuBridge_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a55u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GET_EGPU_BRIDGE_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdIsEgpuBridge" +#endif + }, + { /* [71] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalLogOobXid_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a56u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_LOG_OOB_XID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalLogOobXid" +#endif + }, + { /* [72] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalVmmuGetSpaForGpaEntries_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a57u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalVmmuGetSpaForGpaEntries" +#endif + }, + { /* [73] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDisplaySetChannelPushbuffer_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a58u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDisplaySetChannelPushbuffer" +#endif + }, + { /* [74] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDisplayGetDisplayMask_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a5du, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDisplayGetDisplayMask" +#endif + }, + { /* [75] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalSetP2pCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800ab5u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_SET_P2P_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalSetP2pCaps" +#endif + }, + { /* [76] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalRemoveP2pCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800ab6u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_REMOVE_P2P_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalRemoveP2pCaps" +#endif + }, + { /* [77] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGetPcieP2pCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800ab8u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGetPcieP2pCaps" +#endif + }, + { /* [78] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalPostInitBrightcStateLoad_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800ac6u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalPostInitBrightcStateLoad" +#endif + }, + { /* [79] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDisplayPinsetsToLockpins_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800adcu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_DISP_PINSETS_TO_LOCKPINS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDisplayPinsetsToLockpins" +#endif + }, + { /* [80] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalDetectHsVideoBridge_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800addu, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalDetectHsVideoBridge" +#endif + }, + { /* [81] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDisplaySetSliLinkGpioSwControl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800adeu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_DISP_SET_SLI_LINK_GPIO_SW_CONTROL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDisplaySetSliLinkGpioSwControl" +#endif + }, + { /* [82] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalSetStaticEdidData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800adfu, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_INTERNAL_SET_STATIC_EDID_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalSetStaticEdidData" +#endif + }, + { /* [83] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGpuClientLowPowerModeEnter_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc8u) + /*flags=*/ 0xc8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800ae9u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GPU_CLIENT_LOW_POWER_MODE_ENTER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGpuClientLowPowerModeEnter" +#endif + }, + { /* [84] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGpuGetGspRmFreeHeap_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800aebu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GPU_GET_GSP_RM_FREE_HEAP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGpuGetGspRmFreeHeap" +#endif + }, + { /* [85] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalDisplayAcpiSubsytemActivated_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800af0u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalDisplayAcpiSubsytemActivated" +#endif + }, + { /* [86] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalDisplayPreModeSet_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800af1u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalDisplayPreModeSet" +#endif + }, + { /* [87] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalDisplayPostModeSet_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800af2u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalDisplayPostModeSet" +#endif + }, + { /* [88] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalInitUserSharedData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800afeu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_INIT_USER_SHARED_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalInitUserSharedData" +#endif + }, + { /* [89] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalUserSharedDataSetDataPoll_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800affu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_USER_SHARED_DATA_SET_DATA_POLL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalUserSharedDataSetDataPoll" +#endif + }, + { /* [90] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGpioProgramDirection_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802300u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GPIO_PROGRAM_DIRECTION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGpioProgramDirection" +#endif + }, + { /* [91] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGpioProgramOutput_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802301u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GPIO_PROGRAM_OUTPUT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGpioProgramOutput" +#endif + }, + { /* [92] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGpioReadInput_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802302u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GPIO_READ_INPUT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGpioReadInput" +#endif + }, + { /* [93] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGpioActivateHwFunction_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802303u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GPIO_ACTIVATE_HW_FUNCTION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGpioActivateHwFunction" +#endif + }, + { /* [94] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEccGetClientExposedCounters_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803400u, + /*paramSize=*/ sizeof(NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEccGetClientExposedCounters" +#endif + }, + { /* [95] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEccGetVolatileCounts_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803401u, + /*paramSize=*/ sizeof(NV2080_CTRL_ECC_GET_VOLATILE_COUNTS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEccGetVolatileCounts" +#endif + }, + { /* [96] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEccInjectError_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803403u, + /*paramSize=*/ sizeof(NV2080_CTRL_ECC_INJECT_ERROR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEccInjectError" +#endif + }, + { /* [97] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEccGetRepairStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803404u, + /*paramSize=*/ sizeof(NV2080_CTRL_ECC_GET_REPAIR_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEccGetRepairStatus" +#endif + }, + { /* [98] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEccInjectionSupported_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803405u, + /*paramSize=*/ sizeof(NV2080_CTRL_ECC_INJECTION_SUPPORTED_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEccInjectionSupported" +#endif + }, + { /* [99] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40549u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGspGetFeatures_DISPATCH, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40549u) + /*flags=*/ 0x40549u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803601u, + /*paramSize=*/ sizeof(NV2080_CTRL_GSP_GET_FEATURES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGspGetFeatures" +#endif + }, + { /* [100] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGspGetRmHeapStats_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + /*flags=*/ 0x48u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803602u, + /*paramSize=*/ sizeof(NV2080_CTRL_GSP_GET_RM_HEAP_STATS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGspGetRmHeapStats" +#endif + }, + { /* [101] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetVgpuHeapStats_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803603u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetVgpuHeapStats" +#endif + }, + { /* [102] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x248u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdLibosGetHeapStats_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x248u) + /*flags=*/ 0x248u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803604u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_GSP_GET_LIBOS_HEAP_STATS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdLibosGetHeapStats" +#endif + }, + { /* [103] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x3u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x3u) + /*flags=*/ 0x3u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803d01u, + /*paramSize=*/ sizeof(NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdOsUnixGc6BlockerRefCnt" +#endif + }, + { /* [104] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdOsUnixAllowDisallowGcoff_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + /*flags=*/ 0x9u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803d02u, + /*paramSize=*/ sizeof(NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdOsUnixAllowDisallowGcoff" +#endif + }, + { /* [105] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdOsUnixAudioDynamicPower_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + /*flags=*/ 0x1u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803d03u, + /*paramSize=*/ sizeof(NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdOsUnixAudioDynamicPower" +#endif + }, + { /* [106] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGcxEntryPrerequisite_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + /*flags=*/ 0xc0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080a7d7u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GCX_ENTRY_PREREQUISITE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGcxEntryPrerequisite" +#endif + }, + +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__Subdevice __nvoc_metadata__Subdevice = { + .rtti.pClassDef = &__nvoc_class_def_Subdevice, // (subdevice) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Subdevice, + .rtti.offset = 0, + .metadata__GpuResource.rtti.pClassDef = &__nvoc_class_def_GpuResource, // (gpures) super + .metadata__GpuResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.rtti.offset = NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource), + .metadata__GpuResource.metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super^2 + .metadata__GpuResource.metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.rtti.offset = NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource), + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^3 + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^4 + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^3 + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + .metadata__Notifier.rtti.pClassDef = &__nvoc_class_def_Notifier, // (notify) super + .metadata__Notifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Notifier.rtti.offset = NV_OFFSETOF(Subdevice, __nvoc_base_Notifier), + .metadata__Notifier.metadata__INotifier.rtti.pClassDef = &__nvoc_class_def_INotifier, // (inotify) super^2 + .metadata__Notifier.metadata__INotifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Notifier.metadata__INotifier.rtti.offset = NV_OFFSETOF(Subdevice, __nvoc_base_Notifier.__nvoc_base_INotifier), + + .vtable.__subdevicePreDestruct__ = &subdevicePreDestruct_IMPL, // virtual override (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresPreDestruct__ = &__nvoc_up_thunk_RsResource_gpuresPreDestruct, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &__nvoc_down_thunk_Subdevice_resPreDestruct, // virtual + .vtable.__subdeviceInternalControlForward__ = &subdeviceInternalControlForward_IMPL, // virtual override (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresInternalControlForward__ = &__nvoc_down_thunk_Subdevice_gpuresInternalControlForward, // virtual + .vtable.__subdeviceControl__ = &__nvoc_up_thunk_GpuResource_subdeviceControl, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl__ = &gpuresControl_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_GpuResource_resControl, // virtual + .vtable.__subdeviceMap__ = &__nvoc_up_thunk_GpuResource_subdeviceMap, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresMap__ = &gpuresMap_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &__nvoc_down_thunk_GpuResource_resMap, // virtual + .vtable.__subdeviceUnmap__ = &__nvoc_up_thunk_GpuResource_subdeviceUnmap, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresUnmap__ = &gpuresUnmap_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &__nvoc_down_thunk_GpuResource_resUnmap, // virtual + .vtable.__subdeviceShareCallback__ = &__nvoc_up_thunk_GpuResource_subdeviceShareCallback, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresShareCallback__ = &gpuresShareCallback_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresShareCallback__ = &__nvoc_down_thunk_GpuResource_rmresShareCallback, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__subdeviceGetRegBaseOffsetAndSize__ = &__nvoc_up_thunk_GpuResource_subdeviceGetRegBaseOffsetAndSize, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetRegBaseOffsetAndSize__ = &gpuresGetRegBaseOffsetAndSize_IMPL, // virtual + .vtable.__subdeviceGetMapAddrSpace__ = &__nvoc_up_thunk_GpuResource_subdeviceGetMapAddrSpace, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMapAddrSpace__ = &gpuresGetMapAddrSpace_IMPL, // virtual + .vtable.__subdeviceGetInternalObjectHandle__ = &__nvoc_up_thunk_GpuResource_subdeviceGetInternalObjectHandle, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetInternalObjectHandle__ = &gpuresGetInternalObjectHandle_IMPL, // virtual + .vtable.__subdeviceAccessCallback__ = &__nvoc_up_thunk_RmResource_subdeviceAccessCallback, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresAccessCallback__ = &__nvoc_up_thunk_RmResource_gpuresAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__subdeviceGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_subdeviceGetMemInterMapParams, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__subdeviceCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_subdeviceCheckMemInterUnmap, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__subdeviceGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_subdeviceGetMemoryMappingDescriptor, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__subdeviceControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_subdeviceControlSerialization_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__subdeviceControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_subdeviceControlSerialization_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__subdeviceControl_Prologue__ = &__nvoc_up_thunk_RmResource_subdeviceControl_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__subdeviceControl_Epilogue__ = &__nvoc_up_thunk_RmResource_subdeviceControl_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__subdeviceCanCopy__ = &__nvoc_up_thunk_RsResource_subdeviceCanCopy, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresCanCopy__ = &__nvoc_up_thunk_RsResource_gpuresCanCopy, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__subdeviceIsDuplicate__ = &__nvoc_up_thunk_RsResource_subdeviceIsDuplicate, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresIsDuplicate__ = &__nvoc_up_thunk_RsResource_gpuresIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__subdeviceControlFilter__ = &__nvoc_up_thunk_RsResource_subdeviceControlFilter, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlFilter__ = &__nvoc_up_thunk_RsResource_gpuresControlFilter, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__subdeviceIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_subdeviceIsPartialUnmapSupported, // inline virtual inherited (res) base (gpures) body + .metadata__GpuResource.vtable.__gpuresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__GpuResource.metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__subdeviceMapTo__ = &__nvoc_up_thunk_RsResource_subdeviceMapTo, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresMapTo__ = &__nvoc_up_thunk_RsResource_gpuresMapTo, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__subdeviceUnmapFrom__ = &__nvoc_up_thunk_RsResource_subdeviceUnmapFrom, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresUnmapFrom__ = &__nvoc_up_thunk_RsResource_gpuresUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__subdeviceGetRefCount__ = &__nvoc_up_thunk_RsResource_subdeviceGetRefCount, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetRefCount__ = &__nvoc_up_thunk_RsResource_gpuresGetRefCount, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__subdeviceAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_subdeviceAddAdditionalDependants, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual + .vtable.__subdeviceGetNotificationListPtr__ = &__nvoc_up_thunk_Notifier_subdeviceGetNotificationListPtr, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyGetNotificationListPtr__ = ¬ifyGetNotificationListPtr_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationListPtr__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr, // pure virtual + .vtable.__subdeviceGetNotificationShare__ = &__nvoc_up_thunk_Notifier_subdeviceGetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyGetNotificationShare__ = ¬ifyGetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationShare, // pure virtual + .vtable.__subdeviceSetNotificationShare__ = &__nvoc_up_thunk_Notifier_subdeviceSetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifySetNotificationShare__ = ¬ifySetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifySetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifySetNotificationShare, // pure virtual + .vtable.__subdeviceUnregisterEvent__ = &__nvoc_up_thunk_Notifier_subdeviceUnregisterEvent, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyUnregisterEvent__ = ¬ifyUnregisterEvent_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyUnregisterEvent__ = &__nvoc_down_thunk_Notifier_inotifyUnregisterEvent, // pure virtual + .vtable.__subdeviceGetOrAllocNotifShare__ = &__nvoc_up_thunk_Notifier_subdeviceGetOrAllocNotifShare, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyGetOrAllocNotifShare__ = ¬ifyGetOrAllocNotifShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyGetOrAllocNotifShare__ = &__nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare, // pure virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__Subdevice = { + .numRelatives = 8, + .relatives = { + &__nvoc_metadata__Subdevice.rtti, // [0]: (subdevice) this + &__nvoc_metadata__Subdevice.metadata__GpuResource.rtti, // [1]: (gpures) super + &__nvoc_metadata__Subdevice.metadata__GpuResource.metadata__RmResource.rtti, // [2]: (rmres) super^2 + &__nvoc_metadata__Subdevice.metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti, // [3]: (res) super^3 + &__nvoc_metadata__Subdevice.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [4]: (obj) super^4 + &__nvoc_metadata__Subdevice.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti, // [5]: (rmrescmn) super^3 + &__nvoc_metadata__Subdevice.metadata__Notifier.rtti, // [6]: (notify) super + &__nvoc_metadata__Subdevice.metadata__Notifier.metadata__INotifier.rtti, // [7]: (inotify) super^2 + } +}; + +// 2 down-thunk(s) defined to bridge methods in Subdevice from superclasses + +// subdevicePreDestruct: virtual override (res) base (gpures) +void __nvoc_down_thunk_Subdevice_resPreDestruct(struct RsResource *pResource) { + subdevicePreDestruct((struct Subdevice *)(((unsigned char *) pResource) - NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// subdeviceInternalControlForward: virtual override (gpures) base (gpures) +NV_STATUS __nvoc_down_thunk_Subdevice_gpuresInternalControlForward(struct GpuResource *pSubdevice, NvU32 command, void *pParams, NvU32 size) { + return subdeviceInternalControlForward((struct Subdevice *)(((unsigned char *) pSubdevice) - NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource)), command, pParams, size); +} + + +// 28 up-thunk(s) defined to bridge methods in Subdevice to superclasses + +// subdeviceControl: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_subdeviceControl(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource)), pCallContext, pParams); +} + +// subdeviceMap: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_subdeviceMap(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource)), pCallContext, pParams, pCpuMapping); +} + +// subdeviceUnmap: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_subdeviceUnmap(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource)), pCallContext, pCpuMapping); +} + +// subdeviceShareCallback: virtual inherited (gpures) base (gpures) +NvBool __nvoc_up_thunk_GpuResource_subdeviceShareCallback(struct Subdevice *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// subdeviceGetRegBaseOffsetAndSize: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_subdeviceGetRegBaseOffsetAndSize(struct Subdevice *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource)), pGpu, pOffset, pSize); +} + +// subdeviceGetMapAddrSpace: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_subdeviceGetMapAddrSpace(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource)), pCallContext, mapFlags, pAddrSpace); +} + +// subdeviceGetInternalObjectHandle: virtual inherited (gpures) base (gpures) +NvHandle __nvoc_up_thunk_GpuResource_subdeviceGetInternalObjectHandle(struct Subdevice *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource))); +} + +// subdeviceAccessCallback: virtual inherited (rmres) base (gpures) +NvBool __nvoc_up_thunk_RmResource_subdeviceAccessCallback(struct Subdevice *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// subdeviceGetMemInterMapParams: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_subdeviceGetMemInterMapParams(struct Subdevice *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pParams); +} + +// subdeviceCheckMemInterUnmap: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_subdeviceCheckMemInterUnmap(struct Subdevice *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// subdeviceGetMemoryMappingDescriptor: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_subdeviceGetMemoryMappingDescriptor(struct Subdevice *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource)), ppMemDesc); +} + +// subdeviceControlSerialization_Prologue: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_subdeviceControlSerialization_Prologue(struct Subdevice *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// subdeviceControlSerialization_Epilogue: virtual inherited (rmres) base (gpures) +void __nvoc_up_thunk_RmResource_subdeviceControlSerialization_Epilogue(struct Subdevice *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// subdeviceControl_Prologue: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_subdeviceControl_Prologue(struct Subdevice *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// subdeviceControl_Epilogue: virtual inherited (rmres) base (gpures) +void __nvoc_up_thunk_RmResource_subdeviceControl_Epilogue(struct Subdevice *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// subdeviceCanCopy: virtual inherited (res) base (gpures) +NvBool __nvoc_up_thunk_RsResource_subdeviceCanCopy(struct Subdevice *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// subdeviceIsDuplicate: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_subdeviceIsDuplicate(struct Subdevice *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// subdeviceControlFilter: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_subdeviceControlFilter(struct Subdevice *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// subdeviceIsPartialUnmapSupported: inline virtual inherited (res) base (gpures) body +NvBool __nvoc_up_thunk_RsResource_subdeviceIsPartialUnmapSupported(struct Subdevice *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// subdeviceMapTo: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_subdeviceMapTo(struct Subdevice *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// subdeviceUnmapFrom: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_subdeviceUnmapFrom(struct Subdevice *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// subdeviceGetRefCount: virtual inherited (res) base (gpures) +NvU32 __nvoc_up_thunk_RsResource_subdeviceGetRefCount(struct Subdevice *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// subdeviceAddAdditionalDependants: virtual inherited (res) base (gpures) +void __nvoc_up_thunk_RsResource_subdeviceAddAdditionalDependants(struct RsClient *pClient, struct Subdevice *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + +// subdeviceGetNotificationListPtr: virtual inherited (notify) base (notify) +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_subdeviceGetNotificationListPtr(struct Subdevice *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(Subdevice, __nvoc_base_Notifier))); +} + +// subdeviceGetNotificationShare: virtual inherited (notify) base (notify) +struct NotifShare * __nvoc_up_thunk_Notifier_subdeviceGetNotificationShare(struct Subdevice *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(Subdevice, __nvoc_base_Notifier))); +} + +// subdeviceSetNotificationShare: virtual inherited (notify) base (notify) +void __nvoc_up_thunk_Notifier_subdeviceSetNotificationShare(struct Subdevice *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(Subdevice, __nvoc_base_Notifier)), pNotifShare); +} + +// subdeviceUnregisterEvent: virtual inherited (notify) base (notify) +NV_STATUS __nvoc_up_thunk_Notifier_subdeviceUnregisterEvent(struct Subdevice *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(Subdevice, __nvoc_base_Notifier)), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +// subdeviceGetOrAllocNotifShare: virtual inherited (notify) base (notify) +NV_STATUS __nvoc_up_thunk_Notifier_subdeviceGetOrAllocNotifShare(struct Subdevice *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(Subdevice, __nvoc_base_Notifier)), hNotifierClient, hNotifierResource, ppNotifShare); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__Subdevice = +{ + /*numEntries=*/ 107, + /*pExportEntries=*/ __nvoc_exported_method_def_Subdevice +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_Subdevice(Subdevice *pThis) { + __nvoc_subdeviceDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Subdevice(Subdevice *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_Subdevice(Subdevice *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Subdevice_fail_GpuResource; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_Subdevice_fail_Notifier; + __nvoc_init_dataField_Subdevice(pThis, pRmhalspecowner); + + status = __nvoc_subdeviceConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Subdevice_fail__init; + goto __nvoc_ctor_Subdevice_exit; // Success + +__nvoc_ctor_Subdevice_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_Subdevice_fail_Notifier: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_Subdevice_fail_GpuResource: +__nvoc_ctor_Subdevice_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_Subdevice_1(Subdevice *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + // subdeviceCtrlCmdGpuGetCachedInfo -- exported (id=0x20800182) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + pThis->__subdeviceCtrlCmdGpuGetCachedInfo__ = &subdeviceCtrlCmdGpuGetCachedInfo_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetInfoV2 -- exported (id=0x20800102) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x30118u) + pThis->__subdeviceCtrlCmdGpuGetInfoV2__ = &subdeviceCtrlCmdGpuGetInfoV2_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetIpVersion -- exported (id=0x2080014d) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__subdeviceCtrlCmdGpuGetIpVersion__ = &subdeviceCtrlCmdGpuGetIpVersion_IMPL; +#endif + + // subdeviceCtrlCmdGpuSetOptimusInfo -- exported (id=0x2080014c) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__subdeviceCtrlCmdGpuSetOptimusInfo__ = &subdeviceCtrlCmdGpuSetOptimusInfo_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetNameString -- exported (id=0x20800110) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010au) + pThis->__subdeviceCtrlCmdGpuGetNameString__ = &subdeviceCtrlCmdGpuGetNameString_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetShortNameString -- exported (id=0x20800111) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50au) + pThis->__subdeviceCtrlCmdGpuGetShortNameString__ = &subdeviceCtrlCmdGpuGetShortNameString_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetSdm -- exported (id=0x20800118) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + pThis->__subdeviceCtrlCmdGpuGetSdm__ = &subdeviceCtrlCmdGpuGetSdm_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetSimulationInfo -- exported (id=0x20800119) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50bu) + pThis->__subdeviceCtrlCmdGpuGetSimulationInfo__ = &subdeviceCtrlCmdGpuGetSimulationInfo_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetEngines -- exported (id=0x20800123) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + pThis->__subdeviceCtrlCmdGpuGetEngines__ = &subdeviceCtrlCmdGpuGetEngines_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetEnginesV2 -- exported (id=0x20800170) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10109u) + pThis->__subdeviceCtrlCmdGpuGetEnginesV2__ = &subdeviceCtrlCmdGpuGetEnginesV2_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetEngineClasslist -- exported (id=0x20800124) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x109u) + pThis->__subdeviceCtrlCmdGpuGetEngineClasslist__ = &subdeviceCtrlCmdGpuGetEngineClasslist_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetEnginePartnerList -- exported (id=0x20800147) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x118u) + pThis->__subdeviceCtrlCmdGpuGetEnginePartnerList__ = &subdeviceCtrlCmdGpuGetEnginePartnerList_IMPL; +#endif + + // subdeviceCtrlCmdGpuQueryMode -- exported (id=0x20800128) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__subdeviceCtrlCmdGpuQueryMode__ = &subdeviceCtrlCmdGpuQueryMode_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetChipDetails -- exported (id=0x208001a4) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x40448u) + pThis->__subdeviceCtrlCmdGpuGetChipDetails__ = &subdeviceCtrlCmdGpuGetChipDetails_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetOEMBoardInfo -- exported (id=0x2080013f) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x448u) + pThis->__subdeviceCtrlCmdGpuGetOEMBoardInfo__ = &subdeviceCtrlCmdGpuGetOEMBoardInfo_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetOEMInfo -- exported (id=0x20800169) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x448u) + pThis->__subdeviceCtrlCmdGpuGetOEMInfo__ = &subdeviceCtrlCmdGpuGetOEMInfo_IMPL; +#endif + + // subdeviceCtrlCmdGpuHandleGpuSR -- exported (id=0x20800167) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x3u) + pThis->__subdeviceCtrlCmdGpuHandleGpuSR__ = &subdeviceCtrlCmdGpuHandleGpuSR_IMPL; +#endif + + // subdeviceCtrlCmdGpuInitializeCtx -- exported (id=0x2080012d) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x14244u) + pThis->__subdeviceCtrlCmdGpuInitializeCtx__ = &subdeviceCtrlCmdGpuInitializeCtx_IMPL; +#endif + + // subdeviceCtrlCmdGpuPromoteCtx -- exported (id=0x2080012b) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10244u) + pThis->__subdeviceCtrlCmdGpuPromoteCtx__ = &subdeviceCtrlCmdGpuPromoteCtx_IMPL; +#endif + + // subdeviceCtrlCmdGpuEvictCtx -- exported (id=0x2080012c) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c240u) + pThis->__subdeviceCtrlCmdGpuEvictCtx__ = &subdeviceCtrlCmdGpuEvictCtx_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetId -- exported (id=0x20800142) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10au) + pThis->__subdeviceCtrlCmdGpuGetId__ = &subdeviceCtrlCmdGpuGetId_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetGidInfo -- exported (id=0x2080014a) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10118u) + pThis->__subdeviceCtrlCmdGpuGetGidInfo__ = &subdeviceCtrlCmdGpuGetGidInfo_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetPids -- exported (id=0x2080018d) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__subdeviceCtrlCmdGpuGetPids__ = &subdeviceCtrlCmdGpuGetPids_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetPidInfo -- exported (id=0x2080018e) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__subdeviceCtrlCmdGpuGetPidInfo__ = &subdeviceCtrlCmdGpuGetPidInfo_IMPL; +#endif + + // subdeviceCtrlCmdGpuQueryFunctionStatus -- exported (id=0x20800173) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__subdeviceCtrlCmdGpuQueryFunctionStatus__ = &subdeviceCtrlCmdGpuQueryFunctionStatus_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetFirstAsyncCEIdx -- exported (id=0x208001e6) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10448u) + pThis->__subdeviceCtrlCmdGpuGetFirstAsyncCEIdx__ = &subdeviceCtrlCmdGpuGetFirstAsyncCEIdx_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetVmmuSegmentSize -- exported (id=0x2080017e) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10448u) + pThis->__subdeviceCtrlCmdGpuGetVmmuSegmentSize__ = &subdeviceCtrlCmdGpuGetVmmuSegmentSize_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetMaxSupportedPageSize -- exported (id=0x20800188) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10018u) + pThis->__subdeviceCtrlCmdGpuGetMaxSupportedPageSize__ = &subdeviceCtrlCmdGpuGetMaxSupportedPageSize_IMPL; +#endif + + // subdeviceCtrlCmdGpuHandleVfPriFault -- exported (id=0x20800192) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10248u) + pThis->__subdeviceCtrlCmdGpuHandleVfPriFault__ = &subdeviceCtrlCmdGpuHandleVfPriFault_IMPL; +#endif + + // subdeviceCtrlCmdValidateMemMapRequest -- exported (id=0x20800198) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x110u) + pThis->__subdeviceCtrlCmdValidateMemMapRequest__ = &subdeviceCtrlCmdValidateMemMapRequest_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetGfid -- exported (id=0x20800196) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__subdeviceCtrlCmdGpuGetGfid__ = &subdeviceCtrlCmdGpuGetGfid_IMPL; +#endif + + // subdeviceCtrlCmdUpdateGfidP2pCapability -- exported (id=0x20800197) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__subdeviceCtrlCmdUpdateGfidP2pCapability__ = &subdeviceCtrlCmdUpdateGfidP2pCapability_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetEngineLoadTimes -- exported (id=0x2080019b) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xau) + pThis->__subdeviceCtrlCmdGpuGetEngineLoadTimes__ = &subdeviceCtrlCmdGpuGetEngineLoadTimes_IMPL; +#endif + + // subdeviceCtrlCmdGpuMarkDeviceForReset -- exported (id=0x208001a9) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100048u) + pThis->__subdeviceCtrlCmdGpuMarkDeviceForReset__ = &subdeviceCtrlCmdGpuMarkDeviceForReset_IMPL; +#endif + + // subdeviceCtrlCmdGpuUnmarkDeviceForReset -- exported (id=0x208001aa) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100048u) + pThis->__subdeviceCtrlCmdGpuUnmarkDeviceForReset__ = &subdeviceCtrlCmdGpuUnmarkDeviceForReset_IMPL; +#endif + + // subdeviceCtrlCmdGpuMarkDeviceForDrainAndReset -- exported (id=0x208001ac) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100048u) + pThis->__subdeviceCtrlCmdGpuMarkDeviceForDrainAndReset__ = &subdeviceCtrlCmdGpuMarkDeviceForDrainAndReset_IMPL; +#endif + + // subdeviceCtrlCmdGpuUnmarkDeviceForDrainAndReset -- exported (id=0x208001ad) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100048u) + pThis->__subdeviceCtrlCmdGpuUnmarkDeviceForDrainAndReset__ = &subdeviceCtrlCmdGpuUnmarkDeviceForDrainAndReset_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetResetStatus -- exported (id=0x208001ab) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x158u) + pThis->__subdeviceCtrlCmdGpuGetResetStatus__ = &subdeviceCtrlCmdGpuGetResetStatus_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetDrainAndResetStatus -- exported (id=0x208001ae) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__subdeviceCtrlCmdGpuGetDrainAndResetStatus__ = &subdeviceCtrlCmdGpuGetDrainAndResetStatus_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetConstructedFalconInfo -- exported (id=0x208001b0) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10048u) + pThis->__subdeviceCtrlCmdGpuGetConstructedFalconInfo__ = &subdeviceCtrlCmdGpuGetConstructedFalconInfo_IMPL; +#endif + + // subdeviceCtrlGpuGetFipsStatus -- exported (id=0x208001e4) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__subdeviceCtrlGpuGetFipsStatus__ = &subdeviceCtrlGpuGetFipsStatus_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetVfCaps -- exported (id=0x208001b1) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10109u) + pThis->__subdeviceCtrlCmdGpuGetVfCaps__ = &subdeviceCtrlCmdGpuGetVfCaps_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetRecoveryAction -- exported (id=0x208001b2) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x108u) + pThis->__subdeviceCtrlCmdGpuGetRecoveryAction__ = &subdeviceCtrlCmdGpuGetRecoveryAction_IMPL; +#endif + + // subdeviceCtrlCmdGpuRpcGspTest -- exported (id=0x208001e8) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100108u) + pThis->__subdeviceCtrlCmdGpuRpcGspTest__ = &subdeviceCtrlCmdGpuRpcGspTest_IMPL; +#endif + + // subdeviceCtrlCmdGpuRpcGspQuerySizes -- exported (id=0x208001e9) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100108u) + pThis->__subdeviceCtrlCmdGpuRpcGspQuerySizes__ = &subdeviceCtrlCmdGpuRpcGspQuerySizes_IMPL; +#endif + + // subdeviceCtrlCmdRusdGetSupportedFeatures -- exported (id=0x208001ea) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10bu) + pThis->__subdeviceCtrlCmdRusdGetSupportedFeatures__ = &subdeviceCtrlCmdRusdGetSupportedFeatures_IMPL; +#endif + + // subdeviceCtrlCmdRusdSetFeatures -- exported (id=0x208001eb) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x14u) + pThis->__subdeviceCtrlCmdRusdSetFeatures__ = &subdeviceCtrlCmdRusdSetFeatures_IMPL; +#endif + + // subdeviceCtrlCmdEventSetTrigger -- exported (id=0x20800302) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__subdeviceCtrlCmdEventSetTrigger__ = &subdeviceCtrlCmdEventSetTrigger_IMPL; +#endif + + // subdeviceCtrlCmdEventSetTriggerFifo -- exported (id=0x20800308) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + pThis->__subdeviceCtrlCmdEventSetTriggerFifo__ = &subdeviceCtrlCmdEventSetTriggerFifo_IMPL; +#endif + + // subdeviceCtrlCmdEventSetNotification -- exported (id=0x20800301) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10118u) + pThis->__subdeviceCtrlCmdEventSetNotification__ = &subdeviceCtrlCmdEventSetNotification_IMPL; +#endif + + // subdeviceCtrlCmdEventSetMemoryNotifies -- exported (id=0x20800303) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10008u) + pThis->__subdeviceCtrlCmdEventSetMemoryNotifies__ = &subdeviceCtrlCmdEventSetMemoryNotifies_IMPL; +#endif + + // subdeviceCtrlCmdEventSetSemaphoreMemory -- exported (id=0x20800304) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__subdeviceCtrlCmdEventSetSemaphoreMemory__ = &subdeviceCtrlCmdEventSetSemaphoreMemory_IMPL; +#endif + + // subdeviceCtrlCmdEventSetSemaMemValidation -- exported (id=0x20800306) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__subdeviceCtrlCmdEventSetSemaMemValidation__ = &subdeviceCtrlCmdEventSetSemaMemValidation_IMPL; +#endif + + // subdeviceCtrlCmdEventGspTraceRatsBindEvtbuf -- exported (id=0x2080030a) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__subdeviceCtrlCmdEventGspTraceRatsBindEvtbuf__ = &subdeviceCtrlCmdEventGspTraceRatsBindEvtbuf_IMPL; +#endif + + // subdeviceCtrlCmdTimerCancel -- exported (id=0x20800402) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__subdeviceCtrlCmdTimerCancel__ = &subdeviceCtrlCmdTimerCancel_IMPL; +#endif + + // subdeviceCtrlCmdTimerSchedule -- exported (id=0x20800401) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__subdeviceCtrlCmdTimerSchedule__ = &subdeviceCtrlCmdTimerSchedule_IMPL; +#endif + + // subdeviceCtrlCmdTimerGetTime -- exported (id=0x20800403) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10118u) + pThis->__subdeviceCtrlCmdTimerGetTime__ = &subdeviceCtrlCmdTimerGetTime_IMPL; +#endif + + // subdeviceCtrlCmdTimerGetRegisterOffset -- exported (id=0x20800404) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + pThis->__subdeviceCtrlCmdTimerGetRegisterOffset__ = &subdeviceCtrlCmdTimerGetRegisterOffset_IMPL; +#endif + + // subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo -- exported (id=0x20800406) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x108u) + pThis->__subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo__ = &subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo_IMPL; +#endif + + // subdeviceCtrlCmdEccGetClientExposedCounters -- exported (id=0x20803400) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__subdeviceCtrlCmdEccGetClientExposedCounters__ = &subdeviceCtrlCmdEccGetClientExposedCounters_IMPL; +#endif + + // subdeviceCtrlCmdEccGetVolatileCounts -- exported (id=0x20803401) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__subdeviceCtrlCmdEccGetVolatileCounts__ = &subdeviceCtrlCmdEccGetVolatileCounts_IMPL; +#endif + + // subdeviceCtrlCmdEccInjectError -- exported (id=0x20803403) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__subdeviceCtrlCmdEccInjectError__ = &subdeviceCtrlCmdEccInjectError_IMPL; +#endif + + // subdeviceCtrlCmdEccGetRepairStatus -- exported (id=0x20803404) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__subdeviceCtrlCmdEccGetRepairStatus__ = &subdeviceCtrlCmdEccGetRepairStatus_IMPL; +#endif + + // subdeviceCtrlCmdEccInjectionSupported -- exported (id=0x20803405) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__subdeviceCtrlCmdEccInjectionSupported__ = &subdeviceCtrlCmdEccInjectionSupported_IMPL; +#endif + + // subdeviceCtrlCmdGspGetFeatures -- halified (singleton optimized) exported (id=0x20803601) body + pThis->__subdeviceCtrlCmdGspGetFeatures__ = &subdeviceCtrlCmdGspGetFeatures_92bfc3; + + // subdeviceCtrlCmdGspGetRmHeapStats -- exported (id=0x20803602) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x48u) + pThis->__subdeviceCtrlCmdGspGetRmHeapStats__ = &subdeviceCtrlCmdGspGetRmHeapStats_IMPL; +#endif + + // subdeviceCtrlCmdGpuGetVgpuHeapStats -- exported (id=0x20803603) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__subdeviceCtrlCmdGpuGetVgpuHeapStats__ = &subdeviceCtrlCmdGpuGetVgpuHeapStats_IMPL; +#endif + + // subdeviceCtrlCmdLibosGetHeapStats -- exported (id=0x20803604) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x248u) + pThis->__subdeviceCtrlCmdLibosGetHeapStats__ = &subdeviceCtrlCmdLibosGetHeapStats_IMPL; +#endif + + // subdeviceCtrlCmdOsUnixGc6BlockerRefCnt -- exported (id=0x20803d01) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x3u) + pThis->__subdeviceCtrlCmdOsUnixGc6BlockerRefCnt__ = &subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_IMPL; +#endif + + // subdeviceCtrlCmdOsUnixAllowDisallowGcoff -- exported (id=0x20803d02) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x9u) + pThis->__subdeviceCtrlCmdOsUnixAllowDisallowGcoff__ = &subdeviceCtrlCmdOsUnixAllowDisallowGcoff_IMPL; +#endif + + // subdeviceCtrlCmdOsUnixAudioDynamicPower -- exported (id=0x20803d03) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + pThis->__subdeviceCtrlCmdOsUnixAudioDynamicPower__ = &subdeviceCtrlCmdOsUnixAudioDynamicPower_IMPL; +#endif + + // subdeviceCtrlCmdDisplayGetIpVersion -- exported (id=0x20800a4b) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdDisplayGetIpVersion__ = &subdeviceCtrlCmdDisplayGetIpVersion_IMPL; +#endif + + // subdeviceCtrlCmdDisplayGetStaticInfo -- exported (id=0x20800a01) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdDisplayGetStaticInfo__ = &subdeviceCtrlCmdDisplayGetStaticInfo_IMPL; +#endif + + // subdeviceCtrlCmdDisplaySetChannelPushbuffer -- exported (id=0x20800a58) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdDisplaySetChannelPushbuffer__ = &subdeviceCtrlCmdDisplaySetChannelPushbuffer_IMPL; +#endif + + // subdeviceCtrlCmdDisplayWriteInstMem -- exported (id=0x20800a49) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdDisplayWriteInstMem__ = &subdeviceCtrlCmdDisplayWriteInstMem_IMPL; +#endif + + // subdeviceCtrlCmdDisplaySetupRgLineIntr -- exported (id=0x20800a4d) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdDisplaySetupRgLineIntr__ = &subdeviceCtrlCmdDisplaySetupRgLineIntr_IMPL; +#endif + + // subdeviceCtrlCmdDisplaySetImportedImpData -- exported (id=0x20800a54) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdDisplaySetImportedImpData__ = &subdeviceCtrlCmdDisplaySetImportedImpData_IMPL; +#endif + + // subdeviceCtrlCmdDisplayGetDisplayMask -- exported (id=0x20800a5d) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdDisplayGetDisplayMask__ = &subdeviceCtrlCmdDisplayGetDisplayMask_IMPL; +#endif + + // subdeviceCtrlCmdDisplayPinsetsToLockpins -- exported (id=0x20800adc) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdDisplayPinsetsToLockpins__ = &subdeviceCtrlCmdDisplayPinsetsToLockpins_IMPL; +#endif + + // subdeviceCtrlCmdDisplaySetSliLinkGpioSwControl -- exported (id=0x20800ade) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdDisplaySetSliLinkGpioSwControl__ = &subdeviceCtrlCmdDisplaySetSliLinkGpioSwControl_IMPL; +#endif + + // subdeviceCtrlCmdInternalGpioProgramDirection -- exported (id=0x20802300) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdInternalGpioProgramDirection__ = &subdeviceCtrlCmdInternalGpioProgramDirection_IMPL; +#endif + + // subdeviceCtrlCmdInternalGpioProgramOutput -- exported (id=0x20802301) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdInternalGpioProgramOutput__ = &subdeviceCtrlCmdInternalGpioProgramOutput_IMPL; +#endif + + // subdeviceCtrlCmdInternalGpioReadInput -- exported (id=0x20802302) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdInternalGpioReadInput__ = &subdeviceCtrlCmdInternalGpioReadInput_IMPL; +#endif + + // subdeviceCtrlCmdInternalGpioActivateHwFunction -- exported (id=0x20802303) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdInternalGpioActivateHwFunction__ = &subdeviceCtrlCmdInternalGpioActivateHwFunction_IMPL; +#endif + + // subdeviceCtrlCmdInternalDisplayAcpiSubsytemActivated -- exported (id=0x20800af0) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdInternalDisplayAcpiSubsytemActivated__ = &subdeviceCtrlCmdInternalDisplayAcpiSubsytemActivated_IMPL; +#endif + + // subdeviceCtrlCmdInternalDisplayPreModeSet -- exported (id=0x20800af1) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdInternalDisplayPreModeSet__ = &subdeviceCtrlCmdInternalDisplayPreModeSet_IMPL; +#endif + + // subdeviceCtrlCmdInternalDisplayPostModeSet -- exported (id=0x20800af2) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdInternalDisplayPostModeSet__ = &subdeviceCtrlCmdInternalDisplayPostModeSet_IMPL; +#endif + + // subdeviceCtrlCmdInternalGetChipInfo -- exported (id=0x20800a36) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x404c0u) + pThis->__subdeviceCtrlCmdInternalGetChipInfo__ = &subdeviceCtrlCmdInternalGetChipInfo_IMPL; +#endif + + // subdeviceCtrlCmdInternalGetUserRegisterAccessMap -- exported (id=0x20800a41) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4c0u) + pThis->__subdeviceCtrlCmdInternalGetUserRegisterAccessMap__ = &subdeviceCtrlCmdInternalGetUserRegisterAccessMap_IMPL; +#endif + + // subdeviceCtrlCmdInternalGetDeviceInfoTable -- exported (id=0x20800a40) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c4c0u) + pThis->__subdeviceCtrlCmdInternalGetDeviceInfoTable__ = &subdeviceCtrlCmdInternalGetDeviceInfoTable_IMPL; +#endif + + // subdeviceCtrlCmdInternalRecoverAllComputeContexts -- exported (id=0x20800a4a) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdInternalRecoverAllComputeContexts__ = &subdeviceCtrlCmdInternalRecoverAllComputeContexts_IMPL; +#endif + + // subdeviceCtrlCmdInternalGetSmcMode -- exported (id=0x20800a4c) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdInternalGetSmcMode__ = &subdeviceCtrlCmdInternalGetSmcMode_IMPL; +#endif + + // subdeviceCtrlCmdIsEgpuBridge -- exported (id=0x20800a55) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdIsEgpuBridge__ = &subdeviceCtrlCmdIsEgpuBridge_IMPL; +#endif + + // subdeviceCtrlCmdInternalGpuGetGspRmFreeHeap -- exported (id=0x20800aeb) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdInternalGpuGetGspRmFreeHeap__ = &subdeviceCtrlCmdInternalGpuGetGspRmFreeHeap_IMPL; +#endif + + // subdeviceCtrlCmdInternalVmmuGetSpaForGpaEntries -- exported (id=0x20800a57) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdInternalVmmuGetSpaForGpaEntries__ = &subdeviceCtrlCmdInternalVmmuGetSpaForGpaEntries_IMPL; +#endif + + // subdeviceCtrlCmdInternalGcxEntryPrerequisite -- exported (id=0x2080a7d7) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdInternalGcxEntryPrerequisite__ = &subdeviceCtrlCmdInternalGcxEntryPrerequisite_IMPL; +#endif + + // subdeviceCtrlCmdInternalSetP2pCaps -- exported (id=0x20800ab5) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdInternalSetP2pCaps__ = &subdeviceCtrlCmdInternalSetP2pCaps_IMPL; +#endif + + // subdeviceCtrlCmdInternalRemoveP2pCaps -- exported (id=0x20800ab6) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdInternalRemoveP2pCaps__ = &subdeviceCtrlCmdInternalRemoveP2pCaps_IMPL; +#endif + + // subdeviceCtrlCmdInternalGetPcieP2pCaps -- exported (id=0x20800ab8) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdInternalGetPcieP2pCaps__ = &subdeviceCtrlCmdInternalGetPcieP2pCaps_IMPL; +#endif + + // subdeviceCtrlCmdInternalPostInitBrightcStateLoad -- exported (id=0x20800ac6) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdInternalPostInitBrightcStateLoad__ = &subdeviceCtrlCmdInternalPostInitBrightcStateLoad_IMPL; +#endif + + // subdeviceCtrlCmdInternalSetStaticEdidData -- exported (id=0x20800adf) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdInternalSetStaticEdidData__ = &subdeviceCtrlCmdInternalSetStaticEdidData_IMPL; +#endif + + // subdeviceCtrlCmdInternalDetectHsVideoBridge -- exported (id=0x20800add) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdInternalDetectHsVideoBridge__ = &subdeviceCtrlCmdInternalDetectHsVideoBridge_IMPL; +#endif + + // subdeviceCtrlCmdInternalInitUserSharedData -- exported (id=0x20800afe) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdInternalInitUserSharedData__ = &subdeviceCtrlCmdInternalInitUserSharedData_IMPL; +#endif + + // subdeviceCtrlCmdInternalUserSharedDataSetDataPoll -- exported (id=0x20800aff) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdInternalUserSharedDataSetDataPoll__ = &subdeviceCtrlCmdInternalUserSharedDataSetDataPoll_IMPL; +#endif + + // subdeviceCtrlCmdInternalControlGspTrace -- exported (id=0x208001e3) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x102d0u) + pThis->__subdeviceCtrlCmdInternalControlGspTrace__ = &subdeviceCtrlCmdInternalControlGspTrace_IMPL; +#endif + + // subdeviceCtrlCmdInternalGpuClientLowPowerModeEnter -- exported (id=0x20800ae9) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc8u) + pThis->__subdeviceCtrlCmdInternalGpuClientLowPowerModeEnter__ = &subdeviceCtrlCmdInternalGpuClientLowPowerModeEnter_IMPL; +#endif + + // subdeviceCtrlCmdInternalLogOobXid -- exported (id=0x20800a56) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xc0u) + pThis->__subdeviceCtrlCmdInternalLogOobXid__ = &subdeviceCtrlCmdInternalLogOobXid_IMPL; +#endif +} // End __nvoc_init_funcTable_Subdevice_1 with approximately 107 basic block(s). + + +// Initialize vtable(s) for 137 virtual method(s). +void __nvoc_init_funcTable_Subdevice(Subdevice *pThis, RmHalspecOwner *pRmhalspecowner) { + + // Initialize vtable(s) with 107 per-object function pointer(s). + __nvoc_init_funcTable_Subdevice_1(pThis, pRmhalspecowner); +} + +// Initialize newly constructed object. +void __nvoc_init__Subdevice(Subdevice *pThis, RmHalspecOwner *pRmhalspecowner) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^4 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^3 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; // (rmres) super^2 + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; // (gpures) super + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; // (inotify) super^2 + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; // (notify) super + pThis->__nvoc_pbase_Subdevice = pThis; // (subdevice) this + + // Recurse to superclass initialization function(s). + __nvoc_init__GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init__Notifier(&pThis->__nvoc_base_Notifier); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__Subdevice.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^4 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__Subdevice.metadata__GpuResource.metadata__RmResource.metadata__RsResource; // (res) super^3 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__Subdevice.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__Subdevice.metadata__GpuResource.metadata__RmResource; // (rmres) super^2 + pThis->__nvoc_base_GpuResource.__nvoc_metadata_ptr = &__nvoc_metadata__Subdevice.metadata__GpuResource; // (gpures) super + pThis->__nvoc_base_Notifier.__nvoc_base_INotifier.__nvoc_metadata_ptr = &__nvoc_metadata__Subdevice.metadata__Notifier.metadata__INotifier; // (inotify) super^2 + pThis->__nvoc_base_Notifier.__nvoc_metadata_ptr = &__nvoc_metadata__Subdevice.metadata__Notifier; // (notify) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__Subdevice; // (subdevice) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_Subdevice(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_Subdevice(Subdevice **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + Subdevice *pThis; + RmHalspecOwner *pRmhalspecowner; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(Subdevice), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(Subdevice)); + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // pParent must be a valid object that derives from a halspec owner class. + NV_ASSERT_OR_RETURN(pParent != NULL, NV_ERR_INVALID_ARGUMENT); + + // Link the child into the parent unless flagged not to do so. + if (!(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init__Subdevice(pThis, pRmhalspecowner); + status = __nvoc_ctor_Subdevice(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_Subdevice_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_Subdevice_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(Subdevice)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_Subdevice(Subdevice **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_Subdevice(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_subdevice_nvoc.h b/src/nvidia/generated/g_subdevice_nvoc.h new file mode 100644 index 0000000..592b56c --- /dev/null +++ b/src/nvidia/generated/g_subdevice_nvoc.h @@ -0,0 +1,1497 @@ + +#ifndef _G_SUBDEVICE_NVOC_H_ +#define _G_SUBDEVICE_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_subdevice_nvoc.h" + +#ifndef _SUBDEVICE_H_ +#define _SUBDEVICE_H_ + +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "resserv/rs_resource.h" +#include "gpu/gpu_resource.h" +#include "rmapi/event.h" +#include "containers/btree.h" +#include "nvoc/utility.h" +#include "gpu/gpu_halspec.h" + +#include "class/cl2080.h" +#include "ctrl/ctrl2080.h" // rmcontrol parameters + +typedef struct TMR_EVENT TMR_EVENT; + + +struct Device; + +#ifndef __NVOC_CLASS_Device_TYPEDEF__ +#define __NVOC_CLASS_Device_TYPEDEF__ +typedef struct Device Device; +#endif /* __NVOC_CLASS_Device_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Device +#define __nvoc_class_id_Device 0xe0ac20 +#endif /* __nvoc_class_id_Device */ + + + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + + +struct Memory; + +#ifndef __NVOC_CLASS_Memory_TYPEDEF__ +#define __NVOC_CLASS_Memory_TYPEDEF__ +typedef struct Memory Memory; +#endif /* __NVOC_CLASS_Memory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Memory +#define __nvoc_class_id_Memory 0x4789f2 +#endif /* __nvoc_class_id_Memory */ + + + +struct P2PApi; + +#ifndef __NVOC_CLASS_P2PApi_TYPEDEF__ +#define __NVOC_CLASS_P2PApi_TYPEDEF__ +typedef struct P2PApi P2PApi; +#endif /* __NVOC_CLASS_P2PApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_P2PApi +#define __nvoc_class_id_P2PApi 0x3982b7 +#endif /* __nvoc_class_id_P2PApi */ + + + +/** + * A subdevice represents a single GPU within a device. Subdevice provide + * unicast semantics; that is, operations involving a subdevice are applied to + * the associated GPU only. + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_SUBDEVICE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__Subdevice; +struct NVOC_METADATA__GpuResource; +struct NVOC_METADATA__Notifier; +struct NVOC_VTABLE__Subdevice; + + +struct Subdevice { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__Subdevice *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct GpuResource __nvoc_base_GpuResource; + struct Notifier __nvoc_base_Notifier; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^4 + struct RsResource *__nvoc_pbase_RsResource; // res super^3 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^3 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^2 + struct GpuResource *__nvoc_pbase_GpuResource; // gpures super + struct INotifier *__nvoc_pbase_INotifier; // inotify super^2 + struct Notifier *__nvoc_pbase_Notifier; // notify super + struct Subdevice *__nvoc_pbase_Subdevice; // subdevice + + // Vtable with 107 per-object function pointers + NV_STATUS (*__subdeviceCtrlCmdGpuGetCachedInfo__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *); // exported (id=0x20800182) + NV_STATUS (*__subdeviceCtrlCmdGpuGetInfoV2__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *); // exported (id=0x20800102) + NV_STATUS (*__subdeviceCtrlCmdGpuGetIpVersion__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS *); // exported (id=0x2080014d) + NV_STATUS (*__subdeviceCtrlCmdGpuSetOptimusInfo__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS *); // exported (id=0x2080014c) + NV_STATUS (*__subdeviceCtrlCmdGpuGetNameString__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS *); // exported (id=0x20800110) + NV_STATUS (*__subdeviceCtrlCmdGpuGetShortNameString__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS *); // exported (id=0x20800111) + NV_STATUS (*__subdeviceCtrlCmdGpuGetSdm__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_SDM_PARAMS *); // exported (id=0x20800118) + NV_STATUS (*__subdeviceCtrlCmdGpuGetSimulationInfo__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS *); // exported (id=0x20800119) + NV_STATUS (*__subdeviceCtrlCmdGpuGetEngines__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_ENGINES_PARAMS *); // exported (id=0x20800123) + NV_STATUS (*__subdeviceCtrlCmdGpuGetEnginesV2__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS *); // exported (id=0x20800170) + NV_STATUS (*__subdeviceCtrlCmdGpuGetEngineClasslist__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *); // exported (id=0x20800124) + NV_STATUS (*__subdeviceCtrlCmdGpuGetEnginePartnerList__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *); // exported (id=0x20800147) + NV_STATUS (*__subdeviceCtrlCmdGpuQueryMode__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_QUERY_MODE_PARAMS *); // exported (id=0x20800128) + NV_STATUS (*__subdeviceCtrlCmdGpuGetChipDetails__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_CHIP_DETAILS_PARAMS *); // exported (id=0x208001a4) + NV_STATUS (*__subdeviceCtrlCmdGpuGetOEMBoardInfo__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS *); // exported (id=0x2080013f) + NV_STATUS (*__subdeviceCtrlCmdGpuGetOEMInfo__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS *); // exported (id=0x20800169) + NV_STATUS (*__subdeviceCtrlCmdGpuHandleGpuSR__)(struct Subdevice * /*this*/); // exported (id=0x20800167) + NV_STATUS (*__subdeviceCtrlCmdGpuInitializeCtx__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS *); // exported (id=0x2080012d) + NV_STATUS (*__subdeviceCtrlCmdGpuPromoteCtx__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *); // exported (id=0x2080012b) + NV_STATUS (*__subdeviceCtrlCmdGpuEvictCtx__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_EVICT_CTX_PARAMS *); // exported (id=0x2080012c) + NV_STATUS (*__subdeviceCtrlCmdGpuGetId__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_ID_PARAMS *); // exported (id=0x20800142) + NV_STATUS (*__subdeviceCtrlCmdGpuGetGidInfo__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_GID_INFO_PARAMS *); // exported (id=0x2080014a) + NV_STATUS (*__subdeviceCtrlCmdGpuGetPids__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_PIDS_PARAMS *); // exported (id=0x2080018d) + NV_STATUS (*__subdeviceCtrlCmdGpuGetPidInfo__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_PID_INFO_PARAMS *); // exported (id=0x2080018e) + NV_STATUS (*__subdeviceCtrlCmdGpuQueryFunctionStatus__)(struct Subdevice * /*this*/, NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS *); // exported (id=0x20800173) + NV_STATUS (*__subdeviceCtrlCmdGpuGetFirstAsyncCEIdx__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_FIRST_ASYNC_CE_IDX_PARAMS *); // exported (id=0x208001e6) + NV_STATUS (*__subdeviceCtrlCmdGpuGetVmmuSegmentSize__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_VMMU_SEGMENT_SIZE_PARAMS *); // exported (id=0x2080017e) + NV_STATUS (*__subdeviceCtrlCmdGpuGetMaxSupportedPageSize__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS *); // exported (id=0x20800188) + NV_STATUS (*__subdeviceCtrlCmdGpuHandleVfPriFault__)(struct Subdevice * /*this*/, NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_PARAMS *); // exported (id=0x20800192) + NV_STATUS (*__subdeviceCtrlCmdValidateMemMapRequest__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS *); // exported (id=0x20800198) + NV_STATUS (*__subdeviceCtrlCmdGpuGetGfid__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_GFID_PARAMS *); // exported (id=0x20800196) + NV_STATUS (*__subdeviceCtrlCmdUpdateGfidP2pCapability__)(struct Subdevice * /*this*/, NV2080_CTRL_CMD_GPU_UPDATE_GFID_P2P_CAPABILITY_PARAMS *); // exported (id=0x20800197) + NV_STATUS (*__subdeviceCtrlCmdGpuGetEngineLoadTimes__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS *); // exported (id=0x2080019b) + NV_STATUS (*__subdeviceCtrlCmdGpuMarkDeviceForReset__)(struct Subdevice * /*this*/); // exported (id=0x208001a9) + NV_STATUS (*__subdeviceCtrlCmdGpuUnmarkDeviceForReset__)(struct Subdevice * /*this*/); // exported (id=0x208001aa) + NV_STATUS (*__subdeviceCtrlCmdGpuMarkDeviceForDrainAndReset__)(struct Subdevice * /*this*/); // exported (id=0x208001ac) + NV_STATUS (*__subdeviceCtrlCmdGpuUnmarkDeviceForDrainAndReset__)(struct Subdevice * /*this*/); // exported (id=0x208001ad) + NV_STATUS (*__subdeviceCtrlCmdGpuGetResetStatus__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_RESET_STATUS_PARAMS *); // exported (id=0x208001ab) + NV_STATUS (*__subdeviceCtrlCmdGpuGetDrainAndResetStatus__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_DRAIN_AND_RESET_STATUS_PARAMS *); // exported (id=0x208001ae) + NV_STATUS (*__subdeviceCtrlCmdGpuGetConstructedFalconInfo__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS *); // exported (id=0x208001b0) + NV_STATUS (*__subdeviceCtrlGpuGetFipsStatus__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_FIPS_STATUS_PARAMS *); // exported (id=0x208001e4) + NV_STATUS (*__subdeviceCtrlCmdGpuGetVfCaps__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_VF_CAPS_PARAMS *); // exported (id=0x208001b1) + NV_STATUS (*__subdeviceCtrlCmdGpuGetRecoveryAction__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_GET_RECOVERY_ACTION_PARAMS *); // exported (id=0x208001b2) + NV_STATUS (*__subdeviceCtrlCmdGpuRpcGspTest__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS *); // exported (id=0x208001e8) + NV_STATUS (*__subdeviceCtrlCmdGpuRpcGspQuerySizes__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_RPC_GSP_QUERY_SIZES_PARAMS *); // exported (id=0x208001e9) + NV_STATUS (*__subdeviceCtrlCmdRusdGetSupportedFeatures__)(struct Subdevice * /*this*/, NV2080_CTRL_RUSD_GET_SUPPORTED_FEATURES_PARAMS *); // exported (id=0x208001ea) + NV_STATUS (*__subdeviceCtrlCmdRusdSetFeatures__)(struct Subdevice * /*this*/, NV2080_CTRL_GPU_RUSD_SET_FEATURES_PARAMS *); // exported (id=0x208001eb) + NV_STATUS (*__subdeviceCtrlCmdEventSetTrigger__)(struct Subdevice * /*this*/); // exported (id=0x20800302) + NV_STATUS (*__subdeviceCtrlCmdEventSetTriggerFifo__)(struct Subdevice * /*this*/, NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS *); // exported (id=0x20800308) + NV_STATUS (*__subdeviceCtrlCmdEventSetNotification__)(struct Subdevice * /*this*/, NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *); // exported (id=0x20800301) + NV_STATUS (*__subdeviceCtrlCmdEventSetMemoryNotifies__)(struct Subdevice * /*this*/, NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS *); // exported (id=0x20800303) + NV_STATUS (*__subdeviceCtrlCmdEventSetSemaphoreMemory__)(struct Subdevice * /*this*/, NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS *); // exported (id=0x20800304) + NV_STATUS (*__subdeviceCtrlCmdEventSetSemaMemValidation__)(struct Subdevice * /*this*/, NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS *); // exported (id=0x20800306) + NV_STATUS (*__subdeviceCtrlCmdEventGspTraceRatsBindEvtbuf__)(struct Subdevice * /*this*/, NV2080_CTRL_EVENT_RATS_GSP_TRACE_BIND_EVTBUF_PARAMS *); // exported (id=0x2080030a) + NV_STATUS (*__subdeviceCtrlCmdTimerCancel__)(struct Subdevice * /*this*/); // exported (id=0x20800402) + NV_STATUS (*__subdeviceCtrlCmdTimerSchedule__)(struct Subdevice * /*this*/, NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS *); // exported (id=0x20800401) + NV_STATUS (*__subdeviceCtrlCmdTimerGetTime__)(struct Subdevice * /*this*/, NV2080_CTRL_TIMER_GET_TIME_PARAMS *); // exported (id=0x20800403) + NV_STATUS (*__subdeviceCtrlCmdTimerGetRegisterOffset__)(struct Subdevice * /*this*/, NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS *); // exported (id=0x20800404) + NV_STATUS (*__subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo__)(struct Subdevice * /*this*/, NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS *); // exported (id=0x20800406) + NV_STATUS (*__subdeviceCtrlCmdEccGetClientExposedCounters__)(struct Subdevice * /*this*/, NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS *); // exported (id=0x20803400) + NV_STATUS (*__subdeviceCtrlCmdEccGetVolatileCounts__)(struct Subdevice * /*this*/, NV2080_CTRL_ECC_GET_VOLATILE_COUNTS_PARAMS *); // exported (id=0x20803401) + NV_STATUS (*__subdeviceCtrlCmdEccInjectError__)(struct Subdevice * /*this*/, NV2080_CTRL_ECC_INJECT_ERROR_PARAMS *); // exported (id=0x20803403) + NV_STATUS (*__subdeviceCtrlCmdEccGetRepairStatus__)(struct Subdevice * /*this*/, NV2080_CTRL_ECC_GET_REPAIR_STATUS_PARAMS *); // exported (id=0x20803404) + NV_STATUS (*__subdeviceCtrlCmdEccInjectionSupported__)(struct Subdevice * /*this*/, NV2080_CTRL_ECC_INJECTION_SUPPORTED_PARAMS *); // exported (id=0x20803405) + NV_STATUS (*__subdeviceCtrlCmdGspGetFeatures__)(struct Subdevice * /*this*/, NV2080_CTRL_GSP_GET_FEATURES_PARAMS *); // halified (singleton optimized) exported (id=0x20803601) body + NV_STATUS (*__subdeviceCtrlCmdGspGetRmHeapStats__)(struct Subdevice * /*this*/, NV2080_CTRL_GSP_GET_RM_HEAP_STATS_PARAMS *); // exported (id=0x20803602) + NV_STATUS (*__subdeviceCtrlCmdGpuGetVgpuHeapStats__)(struct Subdevice * /*this*/, NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS *); // exported (id=0x20803603) + NV_STATUS (*__subdeviceCtrlCmdLibosGetHeapStats__)(struct Subdevice * /*this*/, NV2080_CTRL_CMD_GSP_GET_LIBOS_HEAP_STATS_PARAMS *); // exported (id=0x20803604) + NV_STATUS (*__subdeviceCtrlCmdOsUnixGc6BlockerRefCnt__)(struct Subdevice * /*this*/, NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS *); // exported (id=0x20803d01) + NV_STATUS (*__subdeviceCtrlCmdOsUnixAllowDisallowGcoff__)(struct Subdevice * /*this*/, NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS *); // exported (id=0x20803d02) + NV_STATUS (*__subdeviceCtrlCmdOsUnixAudioDynamicPower__)(struct Subdevice * /*this*/, NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS *); // exported (id=0x20803d03) + NV_STATUS (*__subdeviceCtrlCmdDisplayGetIpVersion__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS *); // exported (id=0x20800a4b) + NV_STATUS (*__subdeviceCtrlCmdDisplayGetStaticInfo__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *); // exported (id=0x20800a01) + NV_STATUS (*__subdeviceCtrlCmdDisplaySetChannelPushbuffer__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *); // exported (id=0x20800a58) + NV_STATUS (*__subdeviceCtrlCmdDisplayWriteInstMem__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *); // exported (id=0x20800a49) + NV_STATUS (*__subdeviceCtrlCmdDisplaySetupRgLineIntr__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS *); // exported (id=0x20800a4d) + NV_STATUS (*__subdeviceCtrlCmdDisplaySetImportedImpData__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS *); // exported (id=0x20800a54) + NV_STATUS (*__subdeviceCtrlCmdDisplayGetDisplayMask__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS *); // exported (id=0x20800a5d) + NV_STATUS (*__subdeviceCtrlCmdDisplayPinsetsToLockpins__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_DISP_PINSETS_TO_LOCKPINS_PARAMS *); // exported (id=0x20800adc) + NV_STATUS (*__subdeviceCtrlCmdDisplaySetSliLinkGpioSwControl__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_DISP_SET_SLI_LINK_GPIO_SW_CONTROL_PARAMS *); // exported (id=0x20800ade) + NV_STATUS (*__subdeviceCtrlCmdInternalGpioProgramDirection__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_GPIO_PROGRAM_DIRECTION_PARAMS *); // exported (id=0x20802300) + NV_STATUS (*__subdeviceCtrlCmdInternalGpioProgramOutput__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_GPIO_PROGRAM_OUTPUT_PARAMS *); // exported (id=0x20802301) + NV_STATUS (*__subdeviceCtrlCmdInternalGpioReadInput__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_GPIO_READ_INPUT_PARAMS *); // exported (id=0x20802302) + NV_STATUS (*__subdeviceCtrlCmdInternalGpioActivateHwFunction__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_GPIO_ACTIVATE_HW_FUNCTION_PARAMS *); // exported (id=0x20802303) + NV_STATUS (*__subdeviceCtrlCmdInternalDisplayAcpiSubsytemActivated__)(struct Subdevice * /*this*/); // exported (id=0x20800af0) + NV_STATUS (*__subdeviceCtrlCmdInternalDisplayPreModeSet__)(struct Subdevice * /*this*/); // exported (id=0x20800af1) + NV_STATUS (*__subdeviceCtrlCmdInternalDisplayPostModeSet__)(struct Subdevice * /*this*/); // exported (id=0x20800af2) + NV_STATUS (*__subdeviceCtrlCmdInternalGetChipInfo__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *); // exported (id=0x20800a36) + NV_STATUS (*__subdeviceCtrlCmdInternalGetUserRegisterAccessMap__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS *); // exported (id=0x20800a41) + NV_STATUS (*__subdeviceCtrlCmdInternalGetDeviceInfoTable__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS *); // exported (id=0x20800a40) + NV_STATUS (*__subdeviceCtrlCmdInternalRecoverAllComputeContexts__)(struct Subdevice * /*this*/); // exported (id=0x20800a4a) + NV_STATUS (*__subdeviceCtrlCmdInternalGetSmcMode__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS *); // exported (id=0x20800a4c) + NV_STATUS (*__subdeviceCtrlCmdIsEgpuBridge__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_GET_EGPU_BRIDGE_INFO_PARAMS *); // exported (id=0x20800a55) + NV_STATUS (*__subdeviceCtrlCmdInternalGpuGetGspRmFreeHeap__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_GPU_GET_GSP_RM_FREE_HEAP_PARAMS *); // exported (id=0x20800aeb) + NV_STATUS (*__subdeviceCtrlCmdInternalVmmuGetSpaForGpaEntries__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES_PARAMS *); // exported (id=0x20800a57) + NV_STATUS (*__subdeviceCtrlCmdInternalGcxEntryPrerequisite__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_GCX_ENTRY_PREREQUISITE_PARAMS *); // exported (id=0x2080a7d7) + NV_STATUS (*__subdeviceCtrlCmdInternalSetP2pCaps__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_SET_P2P_CAPS_PARAMS *); // exported (id=0x20800ab5) + NV_STATUS (*__subdeviceCtrlCmdInternalRemoveP2pCaps__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_REMOVE_P2P_CAPS_PARAMS *); // exported (id=0x20800ab6) + NV_STATUS (*__subdeviceCtrlCmdInternalGetPcieP2pCaps__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS *); // exported (id=0x20800ab8) + NV_STATUS (*__subdeviceCtrlCmdInternalPostInitBrightcStateLoad__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS *); // exported (id=0x20800ac6) + NV_STATUS (*__subdeviceCtrlCmdInternalSetStaticEdidData__)(struct Subdevice * /*this*/, NV2080_CTRL_CMD_INTERNAL_SET_STATIC_EDID_DATA_PARAMS *); // exported (id=0x20800adf) + NV_STATUS (*__subdeviceCtrlCmdInternalDetectHsVideoBridge__)(struct Subdevice * /*this*/); // exported (id=0x20800add) + NV_STATUS (*__subdeviceCtrlCmdInternalInitUserSharedData__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_INIT_USER_SHARED_DATA_PARAMS *); // exported (id=0x20800afe) + NV_STATUS (*__subdeviceCtrlCmdInternalUserSharedDataSetDataPoll__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_USER_SHARED_DATA_SET_DATA_POLL_PARAMS *); // exported (id=0x20800aff) + NV_STATUS (*__subdeviceCtrlCmdInternalControlGspTrace__)(struct Subdevice * /*this*/, NV2080_CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE_PARAMS *); // exported (id=0x208001e3) + NV_STATUS (*__subdeviceCtrlCmdInternalGpuClientLowPowerModeEnter__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_GPU_CLIENT_LOW_POWER_MODE_ENTER_PARAMS *); // exported (id=0x20800ae9) + NV_STATUS (*__subdeviceCtrlCmdInternalLogOobXid__)(struct Subdevice * /*this*/, NV2080_CTRL_INTERNAL_LOG_OOB_XID_PARAMS *); // exported (id=0x20800a56) + + // Data members + NvU32 deviceInst; + NvU32 subDeviceInst; + struct Device *pDevice; + NvBool bMaxGrTickFreqRequested; + NvU64 P2PfbMappedBytes; + NvU32 notifyActions[198]; + NvHandle hNotifierMemory; + struct Memory *pNotifierMemory; + NvHandle hSemMemory; + NvU32 videoStream4KCount; + NvU32 videoStreamHDCount; + NvU32 videoStreamSDCount; + NvU32 videoStreamLinearCount; + NvU32 ofaCount; + NvBool bGpuDebugModeEnabled; + NvBool bRcWatchdogEnableRequested; + NvBool bRcWatchdogDisableRequested; + NvBool bRcWatchdogSoftDisableRequested; + NvBool bReservePerfMon; + NvU32 perfBoostIndex; + NvU32 perfBoostHighRefCount; + NvU32 perfBoostLowRefCount; + NvBool perfBoostEntryExists; + NvBool bLockedClockModeRequested; + NvU32 bNvlinkErrorInjectionModeRequested; + NvBool bSchedPolicySet; + NvBool bGcoffDisallowed; + NvBool bUpdateTGP; + TMR_EVENT *pTimerEvent; +}; + + +// Vtable with 30 per-class function pointers +struct NVOC_VTABLE__Subdevice { + void (*__subdevicePreDestruct__)(struct Subdevice * /*this*/); // virtual override (res) base (gpures) + NV_STATUS (*__subdeviceInternalControlForward__)(struct Subdevice * /*this*/, NvU32, void *, NvU32); // virtual override (gpures) base (gpures) + NV_STATUS (*__subdeviceControl__)(struct Subdevice * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__subdeviceMap__)(struct Subdevice * /*this*/, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__subdeviceUnmap__)(struct Subdevice * /*this*/, struct CALL_CONTEXT *, struct RsCpuMapping *); // virtual inherited (gpures) base (gpures) + NvBool (*__subdeviceShareCallback__)(struct Subdevice * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__subdeviceGetRegBaseOffsetAndSize__)(struct Subdevice * /*this*/, struct OBJGPU *, NvU32 *, NvU32 *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__subdeviceGetMapAddrSpace__)(struct Subdevice * /*this*/, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); // virtual inherited (gpures) base (gpures) + NvHandle (*__subdeviceGetInternalObjectHandle__)(struct Subdevice * /*this*/); // virtual inherited (gpures) base (gpures) + NvBool (*__subdeviceAccessCallback__)(struct Subdevice * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__subdeviceGetMemInterMapParams__)(struct Subdevice * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__subdeviceCheckMemInterUnmap__)(struct Subdevice * /*this*/, NvBool); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__subdeviceGetMemoryMappingDescriptor__)(struct Subdevice * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__subdeviceControlSerialization_Prologue__)(struct Subdevice * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + void (*__subdeviceControlSerialization_Epilogue__)(struct Subdevice * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__subdeviceControl_Prologue__)(struct Subdevice * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + void (*__subdeviceControl_Epilogue__)(struct Subdevice * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + NvBool (*__subdeviceCanCopy__)(struct Subdevice * /*this*/); // virtual inherited (res) base (gpures) + NV_STATUS (*__subdeviceIsDuplicate__)(struct Subdevice * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (gpures) + NV_STATUS (*__subdeviceControlFilter__)(struct Subdevice * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (gpures) + NvBool (*__subdeviceIsPartialUnmapSupported__)(struct Subdevice * /*this*/); // inline virtual inherited (res) base (gpures) body + NV_STATUS (*__subdeviceMapTo__)(struct Subdevice * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (gpures) + NV_STATUS (*__subdeviceUnmapFrom__)(struct Subdevice * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (gpures) + NvU32 (*__subdeviceGetRefCount__)(struct Subdevice * /*this*/); // virtual inherited (res) base (gpures) + void (*__subdeviceAddAdditionalDependants__)(struct RsClient *, struct Subdevice * /*this*/, RsResourceRef *); // virtual inherited (res) base (gpures) + PEVENTNOTIFICATION * (*__subdeviceGetNotificationListPtr__)(struct Subdevice * /*this*/); // virtual inherited (notify) base (notify) + struct NotifShare * (*__subdeviceGetNotificationShare__)(struct Subdevice * /*this*/); // virtual inherited (notify) base (notify) + void (*__subdeviceSetNotificationShare__)(struct Subdevice * /*this*/, struct NotifShare *); // virtual inherited (notify) base (notify) + NV_STATUS (*__subdeviceUnregisterEvent__)(struct Subdevice * /*this*/, NvHandle, NvHandle, NvHandle, NvHandle); // virtual inherited (notify) base (notify) + NV_STATUS (*__subdeviceGetOrAllocNotifShare__)(struct Subdevice * /*this*/, NvHandle, NvHandle, struct NotifShare **); // virtual inherited (notify) base (notify) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__Subdevice { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__GpuResource metadata__GpuResource; + const struct NVOC_METADATA__Notifier metadata__Notifier; + const struct NVOC_VTABLE__Subdevice vtable; +}; + +#ifndef __NVOC_CLASS_Subdevice_TYPEDEF__ +#define __NVOC_CLASS_Subdevice_TYPEDEF__ +typedef struct Subdevice Subdevice; +#endif /* __NVOC_CLASS_Subdevice_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Subdevice +#define __nvoc_class_id_Subdevice 0x4b01b3 +#endif /* __nvoc_class_id_Subdevice */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Subdevice; + +#define __staticCast_Subdevice(pThis) \ + ((pThis)->__nvoc_pbase_Subdevice) + +#ifdef __nvoc_subdevice_h_disabled +#define __dynamicCast_Subdevice(pThis) ((Subdevice*) NULL) +#else //__nvoc_subdevice_h_disabled +#define __dynamicCast_Subdevice(pThis) \ + ((Subdevice*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Subdevice))) +#endif //__nvoc_subdevice_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_Subdevice(Subdevice**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Subdevice(Subdevice**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_Subdevice(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_Subdevice((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define subdevicePreDestruct_FNPTR(pResource) pResource->__nvoc_metadata_ptr->vtable.__subdevicePreDestruct__ +#define subdevicePreDestruct(pResource) subdevicePreDestruct_DISPATCH(pResource) +#define subdeviceInternalControlForward_FNPTR(pSubdevice) pSubdevice->__nvoc_metadata_ptr->vtable.__subdeviceInternalControlForward__ +#define subdeviceInternalControlForward(pSubdevice, command, pParams, size) subdeviceInternalControlForward_DISPATCH(pSubdevice, command, pParams, size) +#define subdeviceCtrlCmdGpuGetCachedInfo_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetCachedInfo__ +#define subdeviceCtrlCmdGpuGetCachedInfo(pSubdevice, pGpuInfoParams) subdeviceCtrlCmdGpuGetCachedInfo_DISPATCH(pSubdevice, pGpuInfoParams) +#define subdeviceCtrlCmdGpuGetInfoV2_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetInfoV2__ +#define subdeviceCtrlCmdGpuGetInfoV2(pSubdevice, pGpuInfoParams) subdeviceCtrlCmdGpuGetInfoV2_DISPATCH(pSubdevice, pGpuInfoParams) +#define subdeviceCtrlCmdGpuGetIpVersion_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetIpVersion__ +#define subdeviceCtrlCmdGpuGetIpVersion(pSubdevice, pGpuIpVersionParams) subdeviceCtrlCmdGpuGetIpVersion_DISPATCH(pSubdevice, pGpuIpVersionParams) +#define subdeviceCtrlCmdGpuSetOptimusInfo_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuSetOptimusInfo__ +#define subdeviceCtrlCmdGpuSetOptimusInfo(pSubdevice, pGpuOptimusInfoParams) subdeviceCtrlCmdGpuSetOptimusInfo_DISPATCH(pSubdevice, pGpuOptimusInfoParams) +#define subdeviceCtrlCmdGpuGetNameString_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetNameString__ +#define subdeviceCtrlCmdGpuGetNameString(pSubdevice, pNameStringParams) subdeviceCtrlCmdGpuGetNameString_DISPATCH(pSubdevice, pNameStringParams) +#define subdeviceCtrlCmdGpuGetShortNameString_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetShortNameString__ +#define subdeviceCtrlCmdGpuGetShortNameString(pSubdevice, pShortNameStringParams) subdeviceCtrlCmdGpuGetShortNameString_DISPATCH(pSubdevice, pShortNameStringParams) +#define subdeviceCtrlCmdGpuGetSdm_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetSdm__ +#define subdeviceCtrlCmdGpuGetSdm(pSubdevice, pSdmParams) subdeviceCtrlCmdGpuGetSdm_DISPATCH(pSubdevice, pSdmParams) +#define subdeviceCtrlCmdGpuGetSimulationInfo_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetSimulationInfo__ +#define subdeviceCtrlCmdGpuGetSimulationInfo(pSubdevice, pGpuSimulationInfoParams) subdeviceCtrlCmdGpuGetSimulationInfo_DISPATCH(pSubdevice, pGpuSimulationInfoParams) +#define subdeviceCtrlCmdGpuGetEngines_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetEngines__ +#define subdeviceCtrlCmdGpuGetEngines(pSubdevice, pParams) subdeviceCtrlCmdGpuGetEngines_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetEnginesV2_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetEnginesV2__ +#define subdeviceCtrlCmdGpuGetEnginesV2(pSubdevice, pEngineParams) subdeviceCtrlCmdGpuGetEnginesV2_DISPATCH(pSubdevice, pEngineParams) +#define subdeviceCtrlCmdGpuGetEngineClasslist_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetEngineClasslist__ +#define subdeviceCtrlCmdGpuGetEngineClasslist(pSubdevice, pClassParams) subdeviceCtrlCmdGpuGetEngineClasslist_DISPATCH(pSubdevice, pClassParams) +#define subdeviceCtrlCmdGpuGetEnginePartnerList_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetEnginePartnerList__ +#define subdeviceCtrlCmdGpuGetEnginePartnerList(pSubdevice, pPartnerListParams) subdeviceCtrlCmdGpuGetEnginePartnerList_DISPATCH(pSubdevice, pPartnerListParams) +#define subdeviceCtrlCmdGpuQueryMode_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuQueryMode__ +#define subdeviceCtrlCmdGpuQueryMode(pSubdevice, pQueryMode) subdeviceCtrlCmdGpuQueryMode_DISPATCH(pSubdevice, pQueryMode) +#define subdeviceCtrlCmdGpuGetChipDetails_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetChipDetails__ +#define subdeviceCtrlCmdGpuGetChipDetails(pSubdevice, pParams) subdeviceCtrlCmdGpuGetChipDetails_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetOEMBoardInfo_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetOEMBoardInfo__ +#define subdeviceCtrlCmdGpuGetOEMBoardInfo(pSubdevice, pBoardInfo) subdeviceCtrlCmdGpuGetOEMBoardInfo_DISPATCH(pSubdevice, pBoardInfo) +#define subdeviceCtrlCmdGpuGetOEMInfo_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetOEMInfo__ +#define subdeviceCtrlCmdGpuGetOEMInfo(pSubdevice, pOemInfo) subdeviceCtrlCmdGpuGetOEMInfo_DISPATCH(pSubdevice, pOemInfo) +#define subdeviceCtrlCmdGpuHandleGpuSR_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuHandleGpuSR__ +#define subdeviceCtrlCmdGpuHandleGpuSR(pSubdevice) subdeviceCtrlCmdGpuHandleGpuSR_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdGpuInitializeCtx_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuInitializeCtx__ +#define subdeviceCtrlCmdGpuInitializeCtx(pSubdevice, pInitializeCtxParams) subdeviceCtrlCmdGpuInitializeCtx_DISPATCH(pSubdevice, pInitializeCtxParams) +#define subdeviceCtrlCmdGpuPromoteCtx_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuPromoteCtx__ +#define subdeviceCtrlCmdGpuPromoteCtx(pSubdevice, pPromoteCtxParams) subdeviceCtrlCmdGpuPromoteCtx_DISPATCH(pSubdevice, pPromoteCtxParams) +#define subdeviceCtrlCmdGpuEvictCtx_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuEvictCtx__ +#define subdeviceCtrlCmdGpuEvictCtx(pSubdevice, pEvictCtxParams) subdeviceCtrlCmdGpuEvictCtx_DISPATCH(pSubdevice, pEvictCtxParams) +#define subdeviceCtrlCmdGpuGetId_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetId__ +#define subdeviceCtrlCmdGpuGetId(pSubdevice, pIdParams) subdeviceCtrlCmdGpuGetId_DISPATCH(pSubdevice, pIdParams) +#define subdeviceCtrlCmdGpuGetGidInfo_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetGidInfo__ +#define subdeviceCtrlCmdGpuGetGidInfo(pSubdevice, pGidInfoParams) subdeviceCtrlCmdGpuGetGidInfo_DISPATCH(pSubdevice, pGidInfoParams) +#define subdeviceCtrlCmdGpuGetPids_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetPids__ +#define subdeviceCtrlCmdGpuGetPids(pSubdevice, pGetPidsParams) subdeviceCtrlCmdGpuGetPids_DISPATCH(pSubdevice, pGetPidsParams) +#define subdeviceCtrlCmdGpuGetPidInfo_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetPidInfo__ +#define subdeviceCtrlCmdGpuGetPidInfo(pSubdevice, pGetPidInfoParams) subdeviceCtrlCmdGpuGetPidInfo_DISPATCH(pSubdevice, pGetPidInfoParams) +#define subdeviceCtrlCmdGpuQueryFunctionStatus_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuQueryFunctionStatus__ +#define subdeviceCtrlCmdGpuQueryFunctionStatus(pSubdevice, pParams) subdeviceCtrlCmdGpuQueryFunctionStatus_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetFirstAsyncCEIdx_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetFirstAsyncCEIdx__ +#define subdeviceCtrlCmdGpuGetFirstAsyncCEIdx(pSubdevice, pParams) subdeviceCtrlCmdGpuGetFirstAsyncCEIdx_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetVmmuSegmentSize_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetVmmuSegmentSize__ +#define subdeviceCtrlCmdGpuGetVmmuSegmentSize(pSubdevice, pParams) subdeviceCtrlCmdGpuGetVmmuSegmentSize_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetMaxSupportedPageSize_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetMaxSupportedPageSize__ +#define subdeviceCtrlCmdGpuGetMaxSupportedPageSize(pSubdevice, pParams) subdeviceCtrlCmdGpuGetMaxSupportedPageSize_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuHandleVfPriFault_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuHandleVfPriFault__ +#define subdeviceCtrlCmdGpuHandleVfPriFault(pSubdevice, pParams) subdeviceCtrlCmdGpuHandleVfPriFault_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdValidateMemMapRequest_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdValidateMemMapRequest__ +#define subdeviceCtrlCmdValidateMemMapRequest(pSubdevice, pParams) subdeviceCtrlCmdValidateMemMapRequest_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetGfid_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetGfid__ +#define subdeviceCtrlCmdGpuGetGfid(pSubdevice, pParams) subdeviceCtrlCmdGpuGetGfid_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdUpdateGfidP2pCapability_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdUpdateGfidP2pCapability__ +#define subdeviceCtrlCmdUpdateGfidP2pCapability(pSubdevice, pParams) subdeviceCtrlCmdUpdateGfidP2pCapability_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetEngineLoadTimes_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetEngineLoadTimes__ +#define subdeviceCtrlCmdGpuGetEngineLoadTimes(pSubdevice, pParams) subdeviceCtrlCmdGpuGetEngineLoadTimes_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuMarkDeviceForReset_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuMarkDeviceForReset__ +#define subdeviceCtrlCmdGpuMarkDeviceForReset(pSubdevice) subdeviceCtrlCmdGpuMarkDeviceForReset_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdGpuUnmarkDeviceForReset_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuUnmarkDeviceForReset__ +#define subdeviceCtrlCmdGpuUnmarkDeviceForReset(pSubdevice) subdeviceCtrlCmdGpuUnmarkDeviceForReset_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdGpuMarkDeviceForDrainAndReset_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuMarkDeviceForDrainAndReset__ +#define subdeviceCtrlCmdGpuMarkDeviceForDrainAndReset(pSubdevice) subdeviceCtrlCmdGpuMarkDeviceForDrainAndReset_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdGpuUnmarkDeviceForDrainAndReset_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuUnmarkDeviceForDrainAndReset__ +#define subdeviceCtrlCmdGpuUnmarkDeviceForDrainAndReset(pSubdevice) subdeviceCtrlCmdGpuUnmarkDeviceForDrainAndReset_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdGpuGetResetStatus_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetResetStatus__ +#define subdeviceCtrlCmdGpuGetResetStatus(pSubdevice, pParams) subdeviceCtrlCmdGpuGetResetStatus_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetDrainAndResetStatus_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetDrainAndResetStatus__ +#define subdeviceCtrlCmdGpuGetDrainAndResetStatus(pSubdevice, pParams) subdeviceCtrlCmdGpuGetDrainAndResetStatus_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetConstructedFalconInfo_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetConstructedFalconInfo__ +#define subdeviceCtrlCmdGpuGetConstructedFalconInfo(pSubdevice, pParams) subdeviceCtrlCmdGpuGetConstructedFalconInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlGpuGetFipsStatus_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlGpuGetFipsStatus__ +#define subdeviceCtrlGpuGetFipsStatus(pSubdevice, pParams) subdeviceCtrlGpuGetFipsStatus_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetVfCaps_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetVfCaps__ +#define subdeviceCtrlCmdGpuGetVfCaps(pSubdevice, pParams) subdeviceCtrlCmdGpuGetVfCaps_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetRecoveryAction_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetRecoveryAction__ +#define subdeviceCtrlCmdGpuGetRecoveryAction(pSubdevice, pParams) subdeviceCtrlCmdGpuGetRecoveryAction_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuRpcGspTest_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuRpcGspTest__ +#define subdeviceCtrlCmdGpuRpcGspTest(pSubdevice, pParams) subdeviceCtrlCmdGpuRpcGspTest_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuRpcGspQuerySizes_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuRpcGspQuerySizes__ +#define subdeviceCtrlCmdGpuRpcGspQuerySizes(pSubdevice, pParams) subdeviceCtrlCmdGpuRpcGspQuerySizes_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdRusdGetSupportedFeatures_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdRusdGetSupportedFeatures__ +#define subdeviceCtrlCmdRusdGetSupportedFeatures(pSubdevice, pParams) subdeviceCtrlCmdRusdGetSupportedFeatures_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdRusdSetFeatures_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdRusdSetFeatures__ +#define subdeviceCtrlCmdRusdSetFeatures(pSubdevice, pParams) subdeviceCtrlCmdRusdSetFeatures_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdEventSetTrigger_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdEventSetTrigger__ +#define subdeviceCtrlCmdEventSetTrigger(pSubdevice) subdeviceCtrlCmdEventSetTrigger_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdEventSetTriggerFifo_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdEventSetTriggerFifo__ +#define subdeviceCtrlCmdEventSetTriggerFifo(pSubdevice, pTriggerFifoParams) subdeviceCtrlCmdEventSetTriggerFifo_DISPATCH(pSubdevice, pTriggerFifoParams) +#define subdeviceCtrlCmdEventSetNotification_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdEventSetNotification__ +#define subdeviceCtrlCmdEventSetNotification(pSubdevice, pSetEventParams) subdeviceCtrlCmdEventSetNotification_DISPATCH(pSubdevice, pSetEventParams) +#define subdeviceCtrlCmdEventSetMemoryNotifies_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdEventSetMemoryNotifies__ +#define subdeviceCtrlCmdEventSetMemoryNotifies(pSubdevice, pSetMemoryNotifiesParams) subdeviceCtrlCmdEventSetMemoryNotifies_DISPATCH(pSubdevice, pSetMemoryNotifiesParams) +#define subdeviceCtrlCmdEventSetSemaphoreMemory_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdEventSetSemaphoreMemory__ +#define subdeviceCtrlCmdEventSetSemaphoreMemory(pSubdevice, pSetSemMemoryParams) subdeviceCtrlCmdEventSetSemaphoreMemory_DISPATCH(pSubdevice, pSetSemMemoryParams) +#define subdeviceCtrlCmdEventSetSemaMemValidation_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdEventSetSemaMemValidation__ +#define subdeviceCtrlCmdEventSetSemaMemValidation(pSubdevice, pSetSemaMemValidationParams) subdeviceCtrlCmdEventSetSemaMemValidation_DISPATCH(pSubdevice, pSetSemaMemValidationParams) +#define subdeviceCtrlCmdEventGspTraceRatsBindEvtbuf_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdEventGspTraceRatsBindEvtbuf__ +#define subdeviceCtrlCmdEventGspTraceRatsBindEvtbuf(pSubdevice, pBindParams) subdeviceCtrlCmdEventGspTraceRatsBindEvtbuf_DISPATCH(pSubdevice, pBindParams) +#define subdeviceCtrlCmdTimerCancel_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdTimerCancel__ +#define subdeviceCtrlCmdTimerCancel(pSubdevice) subdeviceCtrlCmdTimerCancel_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdTimerSchedule_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdTimerSchedule__ +#define subdeviceCtrlCmdTimerSchedule(pSubdevice, pParams) subdeviceCtrlCmdTimerSchedule_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdTimerGetTime_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdTimerGetTime__ +#define subdeviceCtrlCmdTimerGetTime(pSubdevice, pParams) subdeviceCtrlCmdTimerGetTime_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdTimerGetRegisterOffset_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdTimerGetRegisterOffset__ +#define subdeviceCtrlCmdTimerGetRegisterOffset(pSubdevice, pTimerRegOffsetParams) subdeviceCtrlCmdTimerGetRegisterOffset_DISPATCH(pSubdevice, pTimerRegOffsetParams) +#define subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo__ +#define subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo(pSubdevice, pParams) subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdEccGetClientExposedCounters_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdEccGetClientExposedCounters__ +#define subdeviceCtrlCmdEccGetClientExposedCounters(pSubdevice, pParams) subdeviceCtrlCmdEccGetClientExposedCounters_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdEccGetVolatileCounts_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdEccGetVolatileCounts__ +#define subdeviceCtrlCmdEccGetVolatileCounts(pSubdevice, pParams) subdeviceCtrlCmdEccGetVolatileCounts_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdEccInjectError_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdEccInjectError__ +#define subdeviceCtrlCmdEccInjectError(pSubdevice, pParams) subdeviceCtrlCmdEccInjectError_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdEccGetRepairStatus_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdEccGetRepairStatus__ +#define subdeviceCtrlCmdEccGetRepairStatus(pSubdevice, pParams) subdeviceCtrlCmdEccGetRepairStatus_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdEccInjectionSupported_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdEccInjectionSupported__ +#define subdeviceCtrlCmdEccInjectionSupported(pSubdevice, pParams) subdeviceCtrlCmdEccInjectionSupported_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGspGetFeatures_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGspGetFeatures__ +#define subdeviceCtrlCmdGspGetFeatures(pSubdevice, pGspFeaturesParams) subdeviceCtrlCmdGspGetFeatures_DISPATCH(pSubdevice, pGspFeaturesParams) +#define subdeviceCtrlCmdGspGetFeatures_HAL(pSubdevice, pGspFeaturesParams) subdeviceCtrlCmdGspGetFeatures_DISPATCH(pSubdevice, pGspFeaturesParams) +#define subdeviceCtrlCmdGspGetRmHeapStats_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGspGetRmHeapStats__ +#define subdeviceCtrlCmdGspGetRmHeapStats(pSubdevice, pGspRmHeapStatsParams) subdeviceCtrlCmdGspGetRmHeapStats_DISPATCH(pSubdevice, pGspRmHeapStatsParams) +#define subdeviceCtrlCmdGpuGetVgpuHeapStats_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdGpuGetVgpuHeapStats__ +#define subdeviceCtrlCmdGpuGetVgpuHeapStats(pSubdevice, pParams) subdeviceCtrlCmdGpuGetVgpuHeapStats_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdLibosGetHeapStats_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdLibosGetHeapStats__ +#define subdeviceCtrlCmdLibosGetHeapStats(pSubdevice, pGspLibosHeapStatsParams) subdeviceCtrlCmdLibosGetHeapStats_DISPATCH(pSubdevice, pGspLibosHeapStatsParams) +#define subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdOsUnixGc6BlockerRefCnt__ +#define subdeviceCtrlCmdOsUnixGc6BlockerRefCnt(pSubdevice, pParams) subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdOsUnixAllowDisallowGcoff_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdOsUnixAllowDisallowGcoff__ +#define subdeviceCtrlCmdOsUnixAllowDisallowGcoff(pSubdevice, pParams) subdeviceCtrlCmdOsUnixAllowDisallowGcoff_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdOsUnixAudioDynamicPower_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdOsUnixAudioDynamicPower__ +#define subdeviceCtrlCmdOsUnixAudioDynamicPower(pSubdevice, pParams) subdeviceCtrlCmdOsUnixAudioDynamicPower_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDisplayGetIpVersion_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdDisplayGetIpVersion__ +#define subdeviceCtrlCmdDisplayGetIpVersion(pSubdevice, pParams) subdeviceCtrlCmdDisplayGetIpVersion_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDisplayGetStaticInfo_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdDisplayGetStaticInfo__ +#define subdeviceCtrlCmdDisplayGetStaticInfo(pSubdevice, pParams) subdeviceCtrlCmdDisplayGetStaticInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDisplaySetChannelPushbuffer_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdDisplaySetChannelPushbuffer__ +#define subdeviceCtrlCmdDisplaySetChannelPushbuffer(pSubdevice, pParams) subdeviceCtrlCmdDisplaySetChannelPushbuffer_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDisplayWriteInstMem_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdDisplayWriteInstMem__ +#define subdeviceCtrlCmdDisplayWriteInstMem(pSubdevice, pParams) subdeviceCtrlCmdDisplayWriteInstMem_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDisplaySetupRgLineIntr_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdDisplaySetupRgLineIntr__ +#define subdeviceCtrlCmdDisplaySetupRgLineIntr(pSubdevice, pParams) subdeviceCtrlCmdDisplaySetupRgLineIntr_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDisplaySetImportedImpData_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdDisplaySetImportedImpData__ +#define subdeviceCtrlCmdDisplaySetImportedImpData(pSubdevice, pParams) subdeviceCtrlCmdDisplaySetImportedImpData_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDisplayGetDisplayMask_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdDisplayGetDisplayMask__ +#define subdeviceCtrlCmdDisplayGetDisplayMask(pSubdevice, pParams) subdeviceCtrlCmdDisplayGetDisplayMask_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDisplayPinsetsToLockpins_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdDisplayPinsetsToLockpins__ +#define subdeviceCtrlCmdDisplayPinsetsToLockpins(pSubdevice, pParams) subdeviceCtrlCmdDisplayPinsetsToLockpins_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDisplaySetSliLinkGpioSwControl_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdDisplaySetSliLinkGpioSwControl__ +#define subdeviceCtrlCmdDisplaySetSliLinkGpioSwControl(pSubdevice, pParams) subdeviceCtrlCmdDisplaySetSliLinkGpioSwControl_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalGpioProgramDirection_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalGpioProgramDirection__ +#define subdeviceCtrlCmdInternalGpioProgramDirection(pSubdevice, pParams) subdeviceCtrlCmdInternalGpioProgramDirection_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalGpioProgramOutput_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalGpioProgramOutput__ +#define subdeviceCtrlCmdInternalGpioProgramOutput(pSubdevice, pParams) subdeviceCtrlCmdInternalGpioProgramOutput_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalGpioReadInput_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalGpioReadInput__ +#define subdeviceCtrlCmdInternalGpioReadInput(pSubdevice, pParams) subdeviceCtrlCmdInternalGpioReadInput_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalGpioActivateHwFunction_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalGpioActivateHwFunction__ +#define subdeviceCtrlCmdInternalGpioActivateHwFunction(pSubdevice, pParams) subdeviceCtrlCmdInternalGpioActivateHwFunction_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalDisplayAcpiSubsytemActivated_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalDisplayAcpiSubsytemActivated__ +#define subdeviceCtrlCmdInternalDisplayAcpiSubsytemActivated(pSubdevice) subdeviceCtrlCmdInternalDisplayAcpiSubsytemActivated_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdInternalDisplayPreModeSet_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalDisplayPreModeSet__ +#define subdeviceCtrlCmdInternalDisplayPreModeSet(pSubdevice) subdeviceCtrlCmdInternalDisplayPreModeSet_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdInternalDisplayPostModeSet_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalDisplayPostModeSet__ +#define subdeviceCtrlCmdInternalDisplayPostModeSet(pSubdevice) subdeviceCtrlCmdInternalDisplayPostModeSet_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdInternalGetChipInfo_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalGetChipInfo__ +#define subdeviceCtrlCmdInternalGetChipInfo(pSubdevice, pParams) subdeviceCtrlCmdInternalGetChipInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalGetUserRegisterAccessMap_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalGetUserRegisterAccessMap__ +#define subdeviceCtrlCmdInternalGetUserRegisterAccessMap(pSubdevice, pParams) subdeviceCtrlCmdInternalGetUserRegisterAccessMap_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalGetDeviceInfoTable_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalGetDeviceInfoTable__ +#define subdeviceCtrlCmdInternalGetDeviceInfoTable(pSubdevice, pParams) subdeviceCtrlCmdInternalGetDeviceInfoTable_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalRecoverAllComputeContexts_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalRecoverAllComputeContexts__ +#define subdeviceCtrlCmdInternalRecoverAllComputeContexts(pSubdevice) subdeviceCtrlCmdInternalRecoverAllComputeContexts_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdInternalGetSmcMode_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalGetSmcMode__ +#define subdeviceCtrlCmdInternalGetSmcMode(pSubdevice, pParams) subdeviceCtrlCmdInternalGetSmcMode_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdIsEgpuBridge_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdIsEgpuBridge__ +#define subdeviceCtrlCmdIsEgpuBridge(pSubdevice, pParams) subdeviceCtrlCmdIsEgpuBridge_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalGpuGetGspRmFreeHeap_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalGpuGetGspRmFreeHeap__ +#define subdeviceCtrlCmdInternalGpuGetGspRmFreeHeap(pSubdevice, pParams) subdeviceCtrlCmdInternalGpuGetGspRmFreeHeap_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalVmmuGetSpaForGpaEntries_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalVmmuGetSpaForGpaEntries__ +#define subdeviceCtrlCmdInternalVmmuGetSpaForGpaEntries(pSubdevice, pParams) subdeviceCtrlCmdInternalVmmuGetSpaForGpaEntries_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalGcxEntryPrerequisite_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalGcxEntryPrerequisite__ +#define subdeviceCtrlCmdInternalGcxEntryPrerequisite(pSubdevice, pGcxEntryPrerequisite) subdeviceCtrlCmdInternalGcxEntryPrerequisite_DISPATCH(pSubdevice, pGcxEntryPrerequisite) +#define subdeviceCtrlCmdInternalSetP2pCaps_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalSetP2pCaps__ +#define subdeviceCtrlCmdInternalSetP2pCaps(pSubdevice, pParams) subdeviceCtrlCmdInternalSetP2pCaps_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalRemoveP2pCaps_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalRemoveP2pCaps__ +#define subdeviceCtrlCmdInternalRemoveP2pCaps(pSubdevice, pParams) subdeviceCtrlCmdInternalRemoveP2pCaps_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalGetPcieP2pCaps_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalGetPcieP2pCaps__ +#define subdeviceCtrlCmdInternalGetPcieP2pCaps(pSubdevice, pParams) subdeviceCtrlCmdInternalGetPcieP2pCaps_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalPostInitBrightcStateLoad_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalPostInitBrightcStateLoad__ +#define subdeviceCtrlCmdInternalPostInitBrightcStateLoad(pSubdevice, pParams) subdeviceCtrlCmdInternalPostInitBrightcStateLoad_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalSetStaticEdidData_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalSetStaticEdidData__ +#define subdeviceCtrlCmdInternalSetStaticEdidData(pSubdevice, pParams) subdeviceCtrlCmdInternalSetStaticEdidData_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalDetectHsVideoBridge_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalDetectHsVideoBridge__ +#define subdeviceCtrlCmdInternalDetectHsVideoBridge(pSubdevice) subdeviceCtrlCmdInternalDetectHsVideoBridge_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdInternalInitUserSharedData_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalInitUserSharedData__ +#define subdeviceCtrlCmdInternalInitUserSharedData(pSubdevice, pParams) subdeviceCtrlCmdInternalInitUserSharedData_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalUserSharedDataSetDataPoll_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalUserSharedDataSetDataPoll__ +#define subdeviceCtrlCmdInternalUserSharedDataSetDataPoll(pSubdevice, pParams) subdeviceCtrlCmdInternalUserSharedDataSetDataPoll_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalControlGspTrace_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalControlGspTrace__ +#define subdeviceCtrlCmdInternalControlGspTrace(pSubdevice, pParams) subdeviceCtrlCmdInternalControlGspTrace_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalGpuClientLowPowerModeEnter_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalGpuClientLowPowerModeEnter__ +#define subdeviceCtrlCmdInternalGpuClientLowPowerModeEnter(pSubdevice, pParams) subdeviceCtrlCmdInternalGpuClientLowPowerModeEnter_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalLogOobXid_FNPTR(pSubdevice) pSubdevice->__subdeviceCtrlCmdInternalLogOobXid__ +#define subdeviceCtrlCmdInternalLogOobXid(pSubdevice, pParams) subdeviceCtrlCmdInternalLogOobXid_DISPATCH(pSubdevice, pParams) +#define subdeviceControl_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresControl__ +#define subdeviceControl(pGpuResource, pCallContext, pParams) subdeviceControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define subdeviceMap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresMap__ +#define subdeviceMap(pGpuResource, pCallContext, pParams, pCpuMapping) subdeviceMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define subdeviceUnmap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresUnmap__ +#define subdeviceUnmap(pGpuResource, pCallContext, pCpuMapping) subdeviceUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define subdeviceShareCallback_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresShareCallback__ +#define subdeviceShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) subdeviceShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define subdeviceGetRegBaseOffsetAndSize_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetRegBaseOffsetAndSize__ +#define subdeviceGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) subdeviceGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define subdeviceGetMapAddrSpace_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetMapAddrSpace__ +#define subdeviceGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) subdeviceGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define subdeviceGetInternalObjectHandle_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetInternalObjectHandle__ +#define subdeviceGetInternalObjectHandle(pGpuResource) subdeviceGetInternalObjectHandle_DISPATCH(pGpuResource) +#define subdeviceAccessCallback_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define subdeviceAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) subdeviceAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define subdeviceGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define subdeviceGetMemInterMapParams(pRmResource, pParams) subdeviceGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define subdeviceCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define subdeviceCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) subdeviceCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define subdeviceGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define subdeviceGetMemoryMappingDescriptor(pRmResource, ppMemDesc) subdeviceGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define subdeviceControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define subdeviceControlSerialization_Prologue(pResource, pCallContext, pParams) subdeviceControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define subdeviceControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define subdeviceControlSerialization_Epilogue(pResource, pCallContext, pParams) subdeviceControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define subdeviceControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define subdeviceControl_Prologue(pResource, pCallContext, pParams) subdeviceControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define subdeviceControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define subdeviceControl_Epilogue(pResource, pCallContext, pParams) subdeviceControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define subdeviceCanCopy_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define subdeviceCanCopy(pResource) subdeviceCanCopy_DISPATCH(pResource) +#define subdeviceIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define subdeviceIsDuplicate(pResource, hMemory, pDuplicate) subdeviceIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define subdeviceControlFilter_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define subdeviceControlFilter(pResource, pCallContext, pParams) subdeviceControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define subdeviceIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define subdeviceIsPartialUnmapSupported(pResource) subdeviceIsPartialUnmapSupported_DISPATCH(pResource) +#define subdeviceMapTo_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define subdeviceMapTo(pResource, pParams) subdeviceMapTo_DISPATCH(pResource, pParams) +#define subdeviceUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define subdeviceUnmapFrom(pResource, pParams) subdeviceUnmapFrom_DISPATCH(pResource, pParams) +#define subdeviceGetRefCount_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define subdeviceGetRefCount(pResource) subdeviceGetRefCount_DISPATCH(pResource) +#define subdeviceAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define subdeviceAddAdditionalDependants(pClient, pResource, pReference) subdeviceAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define subdeviceGetNotificationListPtr_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationListPtr__ +#define subdeviceGetNotificationListPtr(pNotifier) subdeviceGetNotificationListPtr_DISPATCH(pNotifier) +#define subdeviceGetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationShare__ +#define subdeviceGetNotificationShare(pNotifier) subdeviceGetNotificationShare_DISPATCH(pNotifier) +#define subdeviceSetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifySetNotificationShare__ +#define subdeviceSetNotificationShare(pNotifier, pNotifShare) subdeviceSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define subdeviceUnregisterEvent_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyUnregisterEvent__ +#define subdeviceUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) subdeviceUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define subdeviceGetOrAllocNotifShare_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetOrAllocNotifShare__ +#define subdeviceGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) subdeviceGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) + +// Dispatch functions +static inline void subdevicePreDestruct_DISPATCH(struct Subdevice *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__subdevicePreDestruct__(pResource); +} + +static inline NV_STATUS subdeviceInternalControlForward_DISPATCH(struct Subdevice *pSubdevice, NvU32 command, void *pParams, NvU32 size) { + return pSubdevice->__nvoc_metadata_ptr->vtable.__subdeviceInternalControlForward__(pSubdevice, command, pParams, size); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetCachedInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pGpuInfoParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetCachedInfo__(pSubdevice, pGpuInfoParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetInfoV2_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pGpuInfoParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetInfoV2__(pSubdevice, pGpuInfoParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetIpVersion_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS *pGpuIpVersionParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetIpVersion__(pSubdevice, pGpuIpVersionParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuSetOptimusInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS *pGpuOptimusInfoParams) { + return pSubdevice->__subdeviceCtrlCmdGpuSetOptimusInfo__(pSubdevice, pGpuOptimusInfoParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetNameString_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS *pNameStringParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetNameString__(pSubdevice, pNameStringParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetShortNameString_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS *pShortNameStringParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetShortNameString__(pSubdevice, pShortNameStringParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetSdm_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_SDM_PARAMS *pSdmParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetSdm__(pSubdevice, pSdmParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetSimulationInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS *pGpuSimulationInfoParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetSimulationInfo__(pSubdevice, pGpuSimulationInfoParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetEngines_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINES_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetEngines__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetEnginesV2_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS *pEngineParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetEnginesV2__(pSubdevice, pEngineParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetEngineClasslist_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *pClassParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetEngineClasslist__(pSubdevice, pClassParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetEnginePartnerList_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *pPartnerListParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetEnginePartnerList__(pSubdevice, pPartnerListParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuQueryMode_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_QUERY_MODE_PARAMS *pQueryMode) { + return pSubdevice->__subdeviceCtrlCmdGpuQueryMode__(pSubdevice, pQueryMode); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetChipDetails_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_CHIP_DETAILS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetChipDetails__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetOEMBoardInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS *pBoardInfo) { + return pSubdevice->__subdeviceCtrlCmdGpuGetOEMBoardInfo__(pSubdevice, pBoardInfo); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetOEMInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS *pOemInfo) { + return pSubdevice->__subdeviceCtrlCmdGpuGetOEMInfo__(pSubdevice, pOemInfo); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuHandleGpuSR_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdGpuHandleGpuSR__(pSubdevice); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuInitializeCtx_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS *pInitializeCtxParams) { + return pSubdevice->__subdeviceCtrlCmdGpuInitializeCtx__(pSubdevice, pInitializeCtxParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuPromoteCtx_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *pPromoteCtxParams) { + return pSubdevice->__subdeviceCtrlCmdGpuPromoteCtx__(pSubdevice, pPromoteCtxParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuEvictCtx_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_EVICT_CTX_PARAMS *pEvictCtxParams) { + return pSubdevice->__subdeviceCtrlCmdGpuEvictCtx__(pSubdevice, pEvictCtxParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetId_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ID_PARAMS *pIdParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetId__(pSubdevice, pIdParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetGidInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_GID_INFO_PARAMS *pGidInfoParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetGidInfo__(pSubdevice, pGidInfoParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetPids_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_PIDS_PARAMS *pGetPidsParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetPids__(pSubdevice, pGetPidsParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetPidInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_PID_INFO_PARAMS *pGetPidInfoParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetPidInfo__(pSubdevice, pGetPidInfoParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuQueryFunctionStatus_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuQueryFunctionStatus__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetFirstAsyncCEIdx_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_FIRST_ASYNC_CE_IDX_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetFirstAsyncCEIdx__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetVmmuSegmentSize_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_VMMU_SEGMENT_SIZE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetVmmuSegmentSize__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetMaxSupportedPageSize_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetMaxSupportedPageSize__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuHandleVfPriFault_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuHandleVfPriFault__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdValidateMemMapRequest_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdValidateMemMapRequest__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetGfid_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_GFID_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetGfid__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdUpdateGfidP2pCapability_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_GPU_UPDATE_GFID_P2P_CAPABILITY_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdUpdateGfidP2pCapability__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetEngineLoadTimes_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetEngineLoadTimes__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuMarkDeviceForReset_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdGpuMarkDeviceForReset__(pSubdevice); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuUnmarkDeviceForReset_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdGpuUnmarkDeviceForReset__(pSubdevice); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuMarkDeviceForDrainAndReset_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdGpuMarkDeviceForDrainAndReset__(pSubdevice); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuUnmarkDeviceForDrainAndReset_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdGpuUnmarkDeviceForDrainAndReset__(pSubdevice); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetResetStatus_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_RESET_STATUS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetResetStatus__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetDrainAndResetStatus_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_DRAIN_AND_RESET_STATUS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetDrainAndResetStatus__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetConstructedFalconInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetConstructedFalconInfo__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlGpuGetFipsStatus_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_FIPS_STATUS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlGpuGetFipsStatus__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetVfCaps_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_VF_CAPS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetVfCaps__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetRecoveryAction_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_RECOVERY_ACTION_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetRecoveryAction__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuRpcGspTest_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuRpcGspTest__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuRpcGspQuerySizes_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_RPC_GSP_QUERY_SIZES_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuRpcGspQuerySizes__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdRusdGetSupportedFeatures_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_RUSD_GET_SUPPORTED_FEATURES_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdRusdGetSupportedFeatures__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdRusdSetFeatures_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_RUSD_SET_FEATURES_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdRusdSetFeatures__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdEventSetTrigger_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdEventSetTrigger__(pSubdevice); +} + +static inline NV_STATUS subdeviceCtrlCmdEventSetTriggerFifo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS *pTriggerFifoParams) { + return pSubdevice->__subdeviceCtrlCmdEventSetTriggerFifo__(pSubdevice, pTriggerFifoParams); +} + +static inline NV_STATUS subdeviceCtrlCmdEventSetNotification_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams) { + return pSubdevice->__subdeviceCtrlCmdEventSetNotification__(pSubdevice, pSetEventParams); +} + +static inline NV_STATUS subdeviceCtrlCmdEventSetMemoryNotifies_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS *pSetMemoryNotifiesParams) { + return pSubdevice->__subdeviceCtrlCmdEventSetMemoryNotifies__(pSubdevice, pSetMemoryNotifiesParams); +} + +static inline NV_STATUS subdeviceCtrlCmdEventSetSemaphoreMemory_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS *pSetSemMemoryParams) { + return pSubdevice->__subdeviceCtrlCmdEventSetSemaphoreMemory__(pSubdevice, pSetSemMemoryParams); +} + +static inline NV_STATUS subdeviceCtrlCmdEventSetSemaMemValidation_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS *pSetSemaMemValidationParams) { + return pSubdevice->__subdeviceCtrlCmdEventSetSemaMemValidation__(pSubdevice, pSetSemaMemValidationParams); +} + +static inline NV_STATUS subdeviceCtrlCmdEventGspTraceRatsBindEvtbuf_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_RATS_GSP_TRACE_BIND_EVTBUF_PARAMS *pBindParams) { + return pSubdevice->__subdeviceCtrlCmdEventGspTraceRatsBindEvtbuf__(pSubdevice, pBindParams); +} + +static inline NV_STATUS subdeviceCtrlCmdTimerCancel_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdTimerCancel__(pSubdevice); +} + +static inline NV_STATUS subdeviceCtrlCmdTimerSchedule_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdTimerSchedule__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdTimerGetTime_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_TIMER_GET_TIME_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdTimerGetTime__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdTimerGetRegisterOffset_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS *pTimerRegOffsetParams) { + return pSubdevice->__subdeviceCtrlCmdTimerGetRegisterOffset__(pSubdevice, pTimerRegOffsetParams); +} + +static inline NV_STATUS subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdEccGetClientExposedCounters_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdEccGetClientExposedCounters__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdEccGetVolatileCounts_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_ECC_GET_VOLATILE_COUNTS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdEccGetVolatileCounts__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdEccInjectError_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_ECC_INJECT_ERROR_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdEccInjectError__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdEccGetRepairStatus_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_ECC_GET_REPAIR_STATUS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdEccGetRepairStatus__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdEccInjectionSupported_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_ECC_INJECTION_SUPPORTED_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdEccInjectionSupported__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGspGetFeatures_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GSP_GET_FEATURES_PARAMS *pGspFeaturesParams) { + return pSubdevice->__subdeviceCtrlCmdGspGetFeatures__(pSubdevice, pGspFeaturesParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGspGetRmHeapStats_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GSP_GET_RM_HEAP_STATS_PARAMS *pGspRmHeapStatsParams) { + return pSubdevice->__subdeviceCtrlCmdGspGetRmHeapStats__(pSubdevice, pGspRmHeapStatsParams); +} + +static inline NV_STATUS subdeviceCtrlCmdGpuGetVgpuHeapStats_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetVgpuHeapStats__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdLibosGetHeapStats_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_GSP_GET_LIBOS_HEAP_STATS_PARAMS *pGspLibosHeapStatsParams) { + return pSubdevice->__subdeviceCtrlCmdLibosGetHeapStats__(pSubdevice, pGspLibosHeapStatsParams); +} + +static inline NV_STATUS subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdOsUnixGc6BlockerRefCnt__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdOsUnixAllowDisallowGcoff_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdOsUnixAllowDisallowGcoff__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdOsUnixAudioDynamicPower_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdOsUnixAudioDynamicPower__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdDisplayGetIpVersion_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDisplayGetIpVersion__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdDisplayGetStaticInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDisplayGetStaticInfo__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdDisplaySetChannelPushbuffer_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDisplaySetChannelPushbuffer__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdDisplayWriteInstMem_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDisplayWriteInstMem__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdDisplaySetupRgLineIntr_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDisplaySetupRgLineIntr__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdDisplaySetImportedImpData_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDisplaySetImportedImpData__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdDisplayGetDisplayMask_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDisplayGetDisplayMask__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdDisplayPinsetsToLockpins_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISP_PINSETS_TO_LOCKPINS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDisplayPinsetsToLockpins__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdDisplaySetSliLinkGpioSwControl_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISP_SET_SLI_LINK_GPIO_SW_CONTROL_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDisplaySetSliLinkGpioSwControl__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalGpioProgramDirection_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPIO_PROGRAM_DIRECTION_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGpioProgramDirection__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalGpioProgramOutput_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPIO_PROGRAM_OUTPUT_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGpioProgramOutput__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalGpioReadInput_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPIO_READ_INPUT_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGpioReadInput__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalGpioActivateHwFunction_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPIO_ACTIVATE_HW_FUNCTION_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGpioActivateHwFunction__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalDisplayAcpiSubsytemActivated_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdInternalDisplayAcpiSubsytemActivated__(pSubdevice); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalDisplayPreModeSet_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdInternalDisplayPreModeSet__(pSubdevice); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalDisplayPostModeSet_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdInternalDisplayPostModeSet__(pSubdevice); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalGetChipInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGetChipInfo__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalGetUserRegisterAccessMap_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGetUserRegisterAccessMap__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalGetDeviceInfoTable_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGetDeviceInfoTable__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalRecoverAllComputeContexts_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdInternalRecoverAllComputeContexts__(pSubdevice); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalGetSmcMode_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGetSmcMode__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdIsEgpuBridge_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GET_EGPU_BRIDGE_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdIsEgpuBridge__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalGpuGetGspRmFreeHeap_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPU_GET_GSP_RM_FREE_HEAP_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGpuGetGspRmFreeHeap__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalVmmuGetSpaForGpaEntries_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalVmmuGetSpaForGpaEntries__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalGcxEntryPrerequisite_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GCX_ENTRY_PREREQUISITE_PARAMS *pGcxEntryPrerequisite) { + return pSubdevice->__subdeviceCtrlCmdInternalGcxEntryPrerequisite__(pSubdevice, pGcxEntryPrerequisite); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalSetP2pCaps_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_SET_P2P_CAPS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalSetP2pCaps__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalRemoveP2pCaps_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_REMOVE_P2P_CAPS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalRemoveP2pCaps__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalGetPcieP2pCaps_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGetPcieP2pCaps__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalPostInitBrightcStateLoad_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalPostInitBrightcStateLoad__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalSetStaticEdidData_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_INTERNAL_SET_STATIC_EDID_DATA_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalSetStaticEdidData__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalDetectHsVideoBridge_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdInternalDetectHsVideoBridge__(pSubdevice); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalInitUserSharedData_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_INIT_USER_SHARED_DATA_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalInitUserSharedData__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalUserSharedDataSetDataPoll_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_USER_SHARED_DATA_SET_DATA_POLL_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalUserSharedDataSetDataPoll__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalControlGspTrace_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalControlGspTrace__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalGpuClientLowPowerModeEnter_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPU_CLIENT_LOW_POWER_MODE_ENTER_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGpuClientLowPowerModeEnter__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdInternalLogOobXid_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_LOG_OOB_XID_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalLogOobXid__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceControl_DISPATCH(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__subdeviceControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS subdeviceMap_DISPATCH(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__subdeviceMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS subdeviceUnmap_DISPATCH(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__subdeviceUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NvBool subdeviceShareCallback_DISPATCH(struct Subdevice *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__subdeviceShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS subdeviceGetRegBaseOffsetAndSize_DISPATCH(struct Subdevice *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__subdeviceGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS subdeviceGetMapAddrSpace_DISPATCH(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__subdeviceGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle subdeviceGetInternalObjectHandle_DISPATCH(struct Subdevice *pGpuResource) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__subdeviceGetInternalObjectHandle__(pGpuResource); +} + +static inline NvBool subdeviceAccessCallback_DISPATCH(struct Subdevice *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__subdeviceAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS subdeviceGetMemInterMapParams_DISPATCH(struct Subdevice *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__subdeviceGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS subdeviceCheckMemInterUnmap_DISPATCH(struct Subdevice *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__subdeviceCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS subdeviceGetMemoryMappingDescriptor_DISPATCH(struct Subdevice *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__subdeviceGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS subdeviceControlSerialization_Prologue_DISPATCH(struct Subdevice *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__subdeviceControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void subdeviceControlSerialization_Epilogue_DISPATCH(struct Subdevice *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__subdeviceControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS subdeviceControl_Prologue_DISPATCH(struct Subdevice *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__subdeviceControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void subdeviceControl_Epilogue_DISPATCH(struct Subdevice *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__subdeviceControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool subdeviceCanCopy_DISPATCH(struct Subdevice *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__subdeviceCanCopy__(pResource); +} + +static inline NV_STATUS subdeviceIsDuplicate_DISPATCH(struct Subdevice *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__subdeviceIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline NV_STATUS subdeviceControlFilter_DISPATCH(struct Subdevice *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__subdeviceControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvBool subdeviceIsPartialUnmapSupported_DISPATCH(struct Subdevice *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__subdeviceIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS subdeviceMapTo_DISPATCH(struct Subdevice *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__subdeviceMapTo__(pResource, pParams); +} + +static inline NV_STATUS subdeviceUnmapFrom_DISPATCH(struct Subdevice *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__subdeviceUnmapFrom__(pResource, pParams); +} + +static inline NvU32 subdeviceGetRefCount_DISPATCH(struct Subdevice *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__subdeviceGetRefCount__(pResource); +} + +static inline void subdeviceAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct Subdevice *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__subdeviceAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline PEVENTNOTIFICATION * subdeviceGetNotificationListPtr_DISPATCH(struct Subdevice *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__subdeviceGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare * subdeviceGetNotificationShare_DISPATCH(struct Subdevice *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__subdeviceGetNotificationShare__(pNotifier); +} + +static inline void subdeviceSetNotificationShare_DISPATCH(struct Subdevice *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__nvoc_metadata_ptr->vtable.__subdeviceSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS subdeviceUnregisterEvent_DISPATCH(struct Subdevice *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__nvoc_metadata_ptr->vtable.__subdeviceUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS subdeviceGetOrAllocNotifShare_DISPATCH(struct Subdevice *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__nvoc_metadata_ptr->vtable.__subdeviceGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +void subdevicePreDestruct_IMPL(struct Subdevice *pResource); + +NV_STATUS subdeviceInternalControlForward_IMPL(struct Subdevice *pSubdevice, NvU32 command, void *pParams, NvU32 size); + +NV_STATUS subdeviceCtrlCmdGpuGetCachedInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pGpuInfoParams); + +NV_STATUS subdeviceCtrlCmdGpuGetInfoV2_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pGpuInfoParams); + +NV_STATUS subdeviceCtrlCmdGpuGetIpVersion_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS *pGpuIpVersionParams); + +NV_STATUS subdeviceCtrlCmdGpuSetOptimusInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS *pGpuOptimusInfoParams); + +NV_STATUS subdeviceCtrlCmdGpuGetNameString_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS *pNameStringParams); + +NV_STATUS subdeviceCtrlCmdGpuGetShortNameString_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS *pShortNameStringParams); + +NV_STATUS subdeviceCtrlCmdGpuGetSdm_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_SDM_PARAMS *pSdmParams); + +NV_STATUS subdeviceCtrlCmdGpuGetSimulationInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS *pGpuSimulationInfoParams); + +NV_STATUS subdeviceCtrlCmdGpuGetEngines_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINES_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdGpuGetEnginesV2_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS *pEngineParams); + +NV_STATUS subdeviceCtrlCmdGpuGetEngineClasslist_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *pClassParams); + +NV_STATUS subdeviceCtrlCmdGpuGetEnginePartnerList_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *pPartnerListParams); + +NV_STATUS subdeviceCtrlCmdGpuQueryMode_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_QUERY_MODE_PARAMS *pQueryMode); + +NV_STATUS subdeviceCtrlCmdGpuGetChipDetails_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_CHIP_DETAILS_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdGpuGetOEMBoardInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS *pBoardInfo); + +NV_STATUS subdeviceCtrlCmdGpuGetOEMInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS *pOemInfo); + +NV_STATUS subdeviceCtrlCmdGpuHandleGpuSR_IMPL(struct Subdevice *pSubdevice); + +NV_STATUS subdeviceCtrlCmdGpuInitializeCtx_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS *pInitializeCtxParams); + +NV_STATUS subdeviceCtrlCmdGpuPromoteCtx_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *pPromoteCtxParams); + +NV_STATUS subdeviceCtrlCmdGpuEvictCtx_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_EVICT_CTX_PARAMS *pEvictCtxParams); + +NV_STATUS subdeviceCtrlCmdGpuGetId_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ID_PARAMS *pIdParams); + +NV_STATUS subdeviceCtrlCmdGpuGetGidInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_GID_INFO_PARAMS *pGidInfoParams); + +NV_STATUS subdeviceCtrlCmdGpuGetPids_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_PIDS_PARAMS *pGetPidsParams); + +NV_STATUS subdeviceCtrlCmdGpuGetPidInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_PID_INFO_PARAMS *pGetPidInfoParams); + +NV_STATUS subdeviceCtrlCmdGpuQueryFunctionStatus_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdGpuGetFirstAsyncCEIdx_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_FIRST_ASYNC_CE_IDX_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdGpuGetVmmuSegmentSize_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_VMMU_SEGMENT_SIZE_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdGpuGetMaxSupportedPageSize_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdGpuHandleVfPriFault_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdValidateMemMapRequest_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdGpuGetGfid_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_GFID_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdUpdateGfidP2pCapability_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_GPU_UPDATE_GFID_P2P_CAPABILITY_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdGpuGetEngineLoadTimes_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdGpuMarkDeviceForReset_IMPL(struct Subdevice *pSubdevice); + +NV_STATUS subdeviceCtrlCmdGpuUnmarkDeviceForReset_IMPL(struct Subdevice *pSubdevice); + +NV_STATUS subdeviceCtrlCmdGpuMarkDeviceForDrainAndReset_IMPL(struct Subdevice *pSubdevice); + +NV_STATUS subdeviceCtrlCmdGpuUnmarkDeviceForDrainAndReset_IMPL(struct Subdevice *pSubdevice); + +NV_STATUS subdeviceCtrlCmdGpuGetResetStatus_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_RESET_STATUS_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdGpuGetDrainAndResetStatus_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_DRAIN_AND_RESET_STATUS_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdGpuGetConstructedFalconInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS *pParams); + +NV_STATUS subdeviceCtrlGpuGetFipsStatus_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_FIPS_STATUS_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdGpuGetVfCaps_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_VF_CAPS_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdGpuGetRecoveryAction_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_RECOVERY_ACTION_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdGpuRpcGspTest_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdGpuRpcGspQuerySizes_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_RPC_GSP_QUERY_SIZES_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdRusdGetSupportedFeatures_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_RUSD_GET_SUPPORTED_FEATURES_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdRusdSetFeatures_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_RUSD_SET_FEATURES_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdEventSetTrigger_IMPL(struct Subdevice *pSubdevice); + +NV_STATUS subdeviceCtrlCmdEventSetTriggerFifo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS *pTriggerFifoParams); + +NV_STATUS subdeviceCtrlCmdEventSetNotification_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams); + +NV_STATUS subdeviceCtrlCmdEventSetMemoryNotifies_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS *pSetMemoryNotifiesParams); + +NV_STATUS subdeviceCtrlCmdEventSetSemaphoreMemory_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS *pSetSemMemoryParams); + +NV_STATUS subdeviceCtrlCmdEventSetSemaMemValidation_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS *pSetSemaMemValidationParams); + +NV_STATUS subdeviceCtrlCmdEventGspTraceRatsBindEvtbuf_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_RATS_GSP_TRACE_BIND_EVTBUF_PARAMS *pBindParams); + +NV_STATUS subdeviceCtrlCmdTimerCancel_IMPL(struct Subdevice *pSubdevice); + +NV_STATUS subdeviceCtrlCmdTimerSchedule_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdTimerGetTime_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_TIMER_GET_TIME_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdTimerGetRegisterOffset_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS *pTimerRegOffsetParams); + +NV_STATUS subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdEccGetClientExposedCounters_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdEccGetVolatileCounts_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_ECC_GET_VOLATILE_COUNTS_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdEccInjectError_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_ECC_INJECT_ERROR_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdEccGetRepairStatus_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_ECC_GET_REPAIR_STATUS_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdEccInjectionSupported_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_ECC_INJECTION_SUPPORTED_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGspGetFeatures_92bfc3(struct Subdevice *pSubdevice, NV2080_CTRL_GSP_GET_FEATURES_PARAMS *pGspFeaturesParams) { + NV_ASSERT_PRECOMP(0); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS subdeviceCtrlCmdGspGetRmHeapStats_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GSP_GET_RM_HEAP_STATS_PARAMS *pGspRmHeapStatsParams); + +NV_STATUS subdeviceCtrlCmdGpuGetVgpuHeapStats_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdLibosGetHeapStats_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_GSP_GET_LIBOS_HEAP_STATS_PARAMS *pGspLibosHeapStatsParams); + +NV_STATUS subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdOsUnixAllowDisallowGcoff_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdOsUnixAudioDynamicPower_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdDisplayGetIpVersion_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdDisplayGetStaticInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdDisplaySetChannelPushbuffer_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdDisplayWriteInstMem_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdDisplaySetupRgLineIntr_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdDisplaySetImportedImpData_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdDisplayGetDisplayMask_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdDisplayPinsetsToLockpins_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISP_PINSETS_TO_LOCKPINS_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdDisplaySetSliLinkGpioSwControl_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISP_SET_SLI_LINK_GPIO_SW_CONTROL_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdInternalGpioProgramDirection_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPIO_PROGRAM_DIRECTION_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdInternalGpioProgramOutput_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPIO_PROGRAM_OUTPUT_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdInternalGpioReadInput_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPIO_READ_INPUT_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdInternalGpioActivateHwFunction_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPIO_ACTIVATE_HW_FUNCTION_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdInternalDisplayAcpiSubsytemActivated_IMPL(struct Subdevice *pSubdevice); + +NV_STATUS subdeviceCtrlCmdInternalDisplayPreModeSet_IMPL(struct Subdevice *pSubdevice); + +NV_STATUS subdeviceCtrlCmdInternalDisplayPostModeSet_IMPL(struct Subdevice *pSubdevice); + +NV_STATUS subdeviceCtrlCmdInternalGetChipInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdInternalGetUserRegisterAccessMap_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdInternalGetDeviceInfoTable_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdInternalRecoverAllComputeContexts_IMPL(struct Subdevice *pSubdevice); + +NV_STATUS subdeviceCtrlCmdInternalGetSmcMode_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdIsEgpuBridge_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GET_EGPU_BRIDGE_INFO_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdInternalGpuGetGspRmFreeHeap_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPU_GET_GSP_RM_FREE_HEAP_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdInternalVmmuGetSpaForGpaEntries_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdInternalGcxEntryPrerequisite_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GCX_ENTRY_PREREQUISITE_PARAMS *pGcxEntryPrerequisite); + +NV_STATUS subdeviceCtrlCmdInternalSetP2pCaps_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_SET_P2P_CAPS_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdInternalRemoveP2pCaps_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_REMOVE_P2P_CAPS_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdInternalGetPcieP2pCaps_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdInternalPostInitBrightcStateLoad_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdInternalSetStaticEdidData_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_INTERNAL_SET_STATIC_EDID_DATA_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdInternalDetectHsVideoBridge_IMPL(struct Subdevice *pSubdevice); + +NV_STATUS subdeviceCtrlCmdInternalInitUserSharedData_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_INIT_USER_SHARED_DATA_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdInternalUserSharedDataSetDataPoll_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_USER_SHARED_DATA_SET_DATA_POLL_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdInternalControlGspTrace_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdInternalGpuClientLowPowerModeEnter_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPU_CLIENT_LOW_POWER_MODE_ENTER_PARAMS *pParams); + +NV_STATUS subdeviceCtrlCmdInternalLogOobXid_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_LOG_OOB_XID_PARAMS *pParams); + +static inline NV_STATUS subdeviceSetPerfmonReservation(struct Subdevice *pSubdevice, NvBool bReservation, NvBool bClientHandlesGrGating, NvBool bRmHandlesIdleSlow) { + return NV_OK; +} + +static inline NV_STATUS subdeviceUnsetDynamicBoostLimit(struct Subdevice *pSubdevice) { + return NV_OK; +} + +static inline NV_STATUS subdeviceReleaseVideoStreams(struct Subdevice *pSubdevice) { + return NV_OK; +} + +static inline void subdeviceRestoreLockedClock(struct Subdevice *pSubdevice, struct CALL_CONTEXT *pCallContext) { + return; +} + +static inline void subdeviceRestoreVF(struct Subdevice *pSubdevice, struct CALL_CONTEXT *pCallContext) { + return; +} + +static inline void subdeviceReleaseNvlinkErrorInjectionMode(struct Subdevice *pSubdevice, struct CALL_CONTEXT *pCallContext) { + return; +} + +static inline void subdeviceRestoreGrTickFreq(struct Subdevice *pSubdevice, struct CALL_CONTEXT *pCallContext) { + return; +} + +static inline void subdeviceRestoreWatchdog(struct Subdevice *pSubdevice) { + return; +} + +NV_STATUS subdeviceConstruct_IMPL(struct Subdevice *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_subdeviceConstruct(arg_pResource, arg_pCallContext, arg_pParams) subdeviceConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +void subdeviceDestruct_IMPL(struct Subdevice *pResource); + +#define __nvoc_subdeviceDestruct(pResource) subdeviceDestruct_IMPL(pResource) +void subdeviceUnsetGpuDebugMode_IMPL(struct Subdevice *pSubdevice); + +#ifdef __nvoc_subdevice_h_disabled +static inline void subdeviceUnsetGpuDebugMode(struct Subdevice *pSubdevice) { + NV_ASSERT_FAILED_PRECOMP("Subdevice was disabled!"); +} +#else //__nvoc_subdevice_h_disabled +#define subdeviceUnsetGpuDebugMode(pSubdevice) subdeviceUnsetGpuDebugMode_IMPL(pSubdevice) +#endif //__nvoc_subdevice_h_disabled + +void subdeviceReleaseComputeModeReservation_IMPL(struct Subdevice *pSubdevice, struct CALL_CONTEXT *pCallContext); + +#ifdef __nvoc_subdevice_h_disabled +static inline void subdeviceReleaseComputeModeReservation(struct Subdevice *pSubdevice, struct CALL_CONTEXT *pCallContext) { + NV_ASSERT_FAILED_PRECOMP("Subdevice was disabled!"); +} +#else //__nvoc_subdevice_h_disabled +#define subdeviceReleaseComputeModeReservation(pSubdevice, pCallContext) subdeviceReleaseComputeModeReservation_IMPL(pSubdevice, pCallContext) +#endif //__nvoc_subdevice_h_disabled + +NV_STATUS subdeviceGetByHandle_IMPL(struct RsClient *pClient, NvHandle hSubdevice, struct Subdevice **ppSubdevice); + +#define subdeviceGetByHandle(pClient, hSubdevice, ppSubdevice) subdeviceGetByHandle_IMPL(pClient, hSubdevice, ppSubdevice) +NV_STATUS subdeviceGetByGpu_IMPL(struct RsClient *pClient, struct OBJGPU *pGpu, struct Subdevice **ppSubdevice); + +#define subdeviceGetByGpu(pClient, pGpu, ppSubdevice) subdeviceGetByGpu_IMPL(pClient, pGpu, ppSubdevice) +NV_STATUS subdeviceGetByDeviceAndGpu_IMPL(struct RsClient *pClient, struct Device *pDevice, struct OBJGPU *pGpu, struct Subdevice **ppSubdevice); + +#define subdeviceGetByDeviceAndGpu(pClient, pDevice, pGpu, ppSubdevice) subdeviceGetByDeviceAndGpu_IMPL(pClient, pDevice, pGpu, ppSubdevice) +NV_STATUS subdeviceGetByInstance_IMPL(struct RsClient *pClient, NvHandle hDevice, NvU32 subDeviceInst, struct Subdevice **ppSubdevice); + +#define subdeviceGetByInstance(pClient, hDevice, subDeviceInst, ppSubdevice) subdeviceGetByInstance_IMPL(pClient, hDevice, subDeviceInst, ppSubdevice) +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_SUBDEVICE_NVOC_H_ diff --git a/src/nvidia/generated/g_syncpoint_mem_nvoc.c b/src/nvidia/generated/g_syncpoint_mem_nvoc.c new file mode 100644 index 0000000..99ad71a --- /dev/null +++ b/src/nvidia/generated/g_syncpoint_mem_nvoc.c @@ -0,0 +1,526 @@ +#define NVOC_SYNCPOINT_MEM_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_syncpoint_mem_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x529def = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_SyncpointMemory; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +// Forward declarations for SyncpointMemory +void __nvoc_init__Memory(Memory*); +void __nvoc_init__SyncpointMemory(SyncpointMemory*); +void __nvoc_init_funcTable_SyncpointMemory(SyncpointMemory*); +NV_STATUS __nvoc_ctor_SyncpointMemory(SyncpointMemory*, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_SyncpointMemory(SyncpointMemory*); +void __nvoc_dtor_SyncpointMemory(SyncpointMemory*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__SyncpointMemory; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__SyncpointMemory; + +// Down-thunk(s) to bridge SyncpointMemory methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^2 +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_Memory_resIsDuplicate(struct RsResource *pMemory, NvHandle hMemory, NvBool *pDuplicate); // super +NV_STATUS __nvoc_down_thunk_Memory_resControl(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_Memory_resMap(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_down_thunk_Memory_resUnmap(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_down_thunk_Memory_rmresGetMemInterMapParams(struct RmResource *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super +NV_STATUS __nvoc_down_thunk_Memory_rmresCheckMemInterUnmap(struct RmResource *pMemory, NvBool bSubdeviceHandleProvided); // super +NV_STATUS __nvoc_down_thunk_Memory_rmresGetMemoryMappingDescriptor(struct RmResource *pMemory, MEMORY_DESCRIPTOR **ppMemDesc); // super +NvBool __nvoc_down_thunk_SyncpointMemory_resCanCopy(struct RsResource *pSyncpointMemory); // this + +// Up-thunk(s) to bridge SyncpointMemory methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^2 +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^2 +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^2 +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super^2 +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super^2 +NvBool __nvoc_up_thunk_RmResource_memAccessCallback(struct Memory *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NvBool __nvoc_up_thunk_RmResource_memShareCallback(struct Memory *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_up_thunk_RmResource_memControlSerialization_Prologue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_memControlSerialization_Epilogue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_memControl_Prologue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_memControl_Epilogue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_memCanCopy(struct Memory *pResource); // super +void __nvoc_up_thunk_RsResource_memPreDestruct(struct Memory *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_memControlFilter(struct Memory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_memIsPartialUnmapSupported(struct Memory *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_memMapTo(struct Memory *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_memUnmapFrom(struct Memory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_memGetRefCount(struct Memory *pResource); // super +void __nvoc_up_thunk_RsResource_memAddAdditionalDependants(struct RsClient *pClient, struct Memory *pResource, RsResourceRef *pReference); // super +NV_STATUS __nvoc_up_thunk_Memory_syncpointIsDuplicate(struct SyncpointMemory *pMemory, NvHandle hMemory, NvBool *pDuplicate); // this +NV_STATUS __nvoc_up_thunk_Memory_syncpointGetMapAddrSpace(struct SyncpointMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); // this +NV_STATUS __nvoc_up_thunk_Memory_syncpointControl(struct SyncpointMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_Memory_syncpointMap(struct SyncpointMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_Memory_syncpointUnmap(struct SyncpointMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_Memory_syncpointGetMemInterMapParams(struct SyncpointMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_Memory_syncpointCheckMemInterUnmap(struct SyncpointMemory *pMemory, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_Memory_syncpointGetMemoryMappingDescriptor(struct SyncpointMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_Memory_syncpointCheckCopyPermissions(struct SyncpointMemory *pMemory, struct OBJGPU *pDstGpu, struct Device *pDstDevice); // this +NV_STATUS __nvoc_up_thunk_Memory_syncpointIsReady(struct SyncpointMemory *pMemory, NvBool bCopyConstructorContext); // this +NvBool __nvoc_up_thunk_Memory_syncpointIsGpuMapAllowed(struct SyncpointMemory *pMemory, struct OBJGPU *pGpu); // this +NvBool __nvoc_up_thunk_Memory_syncpointIsExportAllowed(struct SyncpointMemory *pMemory); // this +NvBool __nvoc_up_thunk_RmResource_syncpointAccessCallback(struct SyncpointMemory *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NvBool __nvoc_up_thunk_RmResource_syncpointShareCallback(struct SyncpointMemory *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_RmResource_syncpointControlSerialization_Prologue(struct SyncpointMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_syncpointControlSerialization_Epilogue(struct SyncpointMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_syncpointControl_Prologue(struct SyncpointMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_syncpointControl_Epilogue(struct SyncpointMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RsResource_syncpointPreDestruct(struct SyncpointMemory *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_syncpointControlFilter(struct SyncpointMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_syncpointIsPartialUnmapSupported(struct SyncpointMemory *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_syncpointMapTo(struct SyncpointMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_syncpointUnmapFrom(struct SyncpointMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_syncpointGetRefCount(struct SyncpointMemory *pResource); // this +void __nvoc_up_thunk_RsResource_syncpointAddAdditionalDependants(struct RsClient *pClient, struct SyncpointMemory *pResource, RsResourceRef *pReference); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_SyncpointMemory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(SyncpointMemory), + /*classId=*/ classId(SyncpointMemory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "SyncpointMemory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_SyncpointMemory, + /*pCastInfo=*/ &__nvoc_castinfo__SyncpointMemory, + /*pExportInfo=*/ &__nvoc_export_info__SyncpointMemory +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__SyncpointMemory __nvoc_metadata__SyncpointMemory = { + .rtti.pClassDef = &__nvoc_class_def_SyncpointMemory, // (syncpoint) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_SyncpointMemory, + .rtti.offset = 0, + .metadata__Memory.rtti.pClassDef = &__nvoc_class_def_Memory, // (mem) super + .metadata__Memory.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Memory.rtti.offset = NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory), + .metadata__Memory.metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super^2 + .metadata__Memory.metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Memory.metadata__RmResource.rtti.offset = NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory.__nvoc_base_RmResource), + .metadata__Memory.metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^3 + .metadata__Memory.metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Memory.metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^4 + .metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__Memory.metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^3 + .metadata__Memory.metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Memory.metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + + .vtable.__syncpointCanCopy__ = &syncpointCanCopy_IMPL, // virtual override (res) base (mem) + .metadata__Memory.vtable.__memCanCopy__ = &__nvoc_up_thunk_RsResource_memCanCopy, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &__nvoc_down_thunk_SyncpointMemory_resCanCopy, // virtual + .vtable.__syncpointIsDuplicate__ = &__nvoc_up_thunk_Memory_syncpointIsDuplicate, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memIsDuplicate__ = &memIsDuplicate_IMPL, // virtual override (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &__nvoc_down_thunk_Memory_resIsDuplicate, // virtual + .vtable.__syncpointGetMapAddrSpace__ = &__nvoc_up_thunk_Memory_syncpointGetMapAddrSpace, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memGetMapAddrSpace__ = &memGetMapAddrSpace_IMPL, // virtual + .vtable.__syncpointControl__ = &__nvoc_up_thunk_Memory_syncpointControl, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memControl__ = &memControl_IMPL, // virtual override (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_Memory_resControl, // virtual + .vtable.__syncpointMap__ = &__nvoc_up_thunk_Memory_syncpointMap, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memMap__ = &memMap_IMPL, // virtual override (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &__nvoc_down_thunk_Memory_resMap, // virtual + .vtable.__syncpointUnmap__ = &__nvoc_up_thunk_Memory_syncpointUnmap, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memUnmap__ = &memUnmap_IMPL, // virtual override (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &__nvoc_down_thunk_Memory_resUnmap, // virtual + .vtable.__syncpointGetMemInterMapParams__ = &__nvoc_up_thunk_Memory_syncpointGetMemInterMapParams, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memGetMemInterMapParams__ = &memGetMemInterMapParams_IMPL, // virtual override (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &__nvoc_down_thunk_Memory_rmresGetMemInterMapParams, // virtual + .vtable.__syncpointCheckMemInterUnmap__ = &__nvoc_up_thunk_Memory_syncpointCheckMemInterUnmap, // inline virtual inherited (mem) base (mem) body + .metadata__Memory.vtable.__memCheckMemInterUnmap__ = &memCheckMemInterUnmap_ac1694, // inline virtual override (rmres) base (rmres) body + .metadata__Memory.metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &__nvoc_down_thunk_Memory_rmresCheckMemInterUnmap, // virtual + .vtable.__syncpointGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_Memory_syncpointGetMemoryMappingDescriptor, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memGetMemoryMappingDescriptor__ = &memGetMemoryMappingDescriptor_IMPL, // virtual override (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &__nvoc_down_thunk_Memory_rmresGetMemoryMappingDescriptor, // virtual + .vtable.__syncpointCheckCopyPermissions__ = &__nvoc_up_thunk_Memory_syncpointCheckCopyPermissions, // inline virtual inherited (mem) base (mem) body + .metadata__Memory.vtable.__memCheckCopyPermissions__ = &memCheckCopyPermissions_ac1694, // inline virtual body + .vtable.__syncpointIsReady__ = &__nvoc_up_thunk_Memory_syncpointIsReady, // virtual inherited (mem) base (mem) + .metadata__Memory.vtable.__memIsReady__ = &memIsReady_IMPL, // virtual + .vtable.__syncpointIsGpuMapAllowed__ = &__nvoc_up_thunk_Memory_syncpointIsGpuMapAllowed, // inline virtual inherited (mem) base (mem) body + .metadata__Memory.vtable.__memIsGpuMapAllowed__ = &memIsGpuMapAllowed_e661f0, // inline virtual body + .vtable.__syncpointIsExportAllowed__ = &__nvoc_up_thunk_Memory_syncpointIsExportAllowed, // inline virtual inherited (mem) base (mem) body + .metadata__Memory.vtable.__memIsExportAllowed__ = &memIsExportAllowed_e661f0, // inline virtual body + .vtable.__syncpointAccessCallback__ = &__nvoc_up_thunk_RmResource_syncpointAccessCallback, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memAccessCallback__ = &__nvoc_up_thunk_RmResource_memAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__syncpointShareCallback__ = &__nvoc_up_thunk_RmResource_syncpointShareCallback, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memShareCallback__ = &__nvoc_up_thunk_RmResource_memShareCallback, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresShareCallback__ = &rmresShareCallback_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__syncpointControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_syncpointControlSerialization_Prologue, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_memControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__syncpointControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_syncpointControlSerialization_Epilogue, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_memControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__syncpointControl_Prologue__ = &__nvoc_up_thunk_RmResource_syncpointControl_Prologue, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memControl_Prologue__ = &__nvoc_up_thunk_RmResource_memControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__syncpointControl_Epilogue__ = &__nvoc_up_thunk_RmResource_syncpointControl_Epilogue, // virtual inherited (rmres) base (mem) + .metadata__Memory.vtable.__memControl_Epilogue__ = &__nvoc_up_thunk_RmResource_memControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__syncpointPreDestruct__ = &__nvoc_up_thunk_RsResource_syncpointPreDestruct, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memPreDestruct__ = &__nvoc_up_thunk_RsResource_memPreDestruct, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__syncpointControlFilter__ = &__nvoc_up_thunk_RsResource_syncpointControlFilter, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memControlFilter__ = &__nvoc_up_thunk_RsResource_memControlFilter, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__syncpointIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_syncpointIsPartialUnmapSupported, // inline virtual inherited (res) base (mem) body + .metadata__Memory.vtable.__memIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_memIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__Memory.metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__syncpointMapTo__ = &__nvoc_up_thunk_RsResource_syncpointMapTo, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memMapTo__ = &__nvoc_up_thunk_RsResource_memMapTo, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__syncpointUnmapFrom__ = &__nvoc_up_thunk_RsResource_syncpointUnmapFrom, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memUnmapFrom__ = &__nvoc_up_thunk_RsResource_memUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__syncpointGetRefCount__ = &__nvoc_up_thunk_RsResource_syncpointGetRefCount, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memGetRefCount__ = &__nvoc_up_thunk_RsResource_memGetRefCount, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__syncpointAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_syncpointAddAdditionalDependants, // virtual inherited (res) base (mem) + .metadata__Memory.vtable.__memAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_memAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__Memory.metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__SyncpointMemory = { + .numRelatives = 6, + .relatives = { + &__nvoc_metadata__SyncpointMemory.rtti, // [0]: (syncpoint) this + &__nvoc_metadata__SyncpointMemory.metadata__Memory.rtti, // [1]: (mem) super + &__nvoc_metadata__SyncpointMemory.metadata__Memory.metadata__RmResource.rtti, // [2]: (rmres) super^2 + &__nvoc_metadata__SyncpointMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.rtti, // [3]: (res) super^3 + &__nvoc_metadata__SyncpointMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [4]: (obj) super^4 + &__nvoc_metadata__SyncpointMemory.metadata__Memory.metadata__RmResource.metadata__RmResourceCommon.rtti, // [5]: (rmrescmn) super^3 + } +}; + +// 1 down-thunk(s) defined to bridge methods in SyncpointMemory from superclasses + +// syncpointCanCopy: virtual override (res) base (mem) +NvBool __nvoc_down_thunk_SyncpointMemory_resCanCopy(struct RsResource *pSyncpointMemory) { + return syncpointCanCopy((struct SyncpointMemory *)(((unsigned char *) pSyncpointMemory) - NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + + +// 25 up-thunk(s) defined to bridge methods in SyncpointMemory to superclasses + +// syncpointIsDuplicate: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_syncpointIsDuplicate(struct SyncpointMemory *pMemory, NvHandle hMemory, NvBool *pDuplicate) { + return memIsDuplicate((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory)), hMemory, pDuplicate); +} + +// syncpointGetMapAddrSpace: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_syncpointGetMapAddrSpace(struct SyncpointMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return memGetMapAddrSpace((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory)), pCallContext, mapFlags, pAddrSpace); +} + +// syncpointControl: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_syncpointControl(struct SyncpointMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory)), pCallContext, pParams); +} + +// syncpointMap: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_syncpointMap(struct SyncpointMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory)), pCallContext, pParams, pCpuMapping); +} + +// syncpointUnmap: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_syncpointUnmap(struct SyncpointMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory)), pCallContext, pCpuMapping); +} + +// syncpointGetMemInterMapParams: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_syncpointGetMemInterMapParams(struct SyncpointMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory)), pParams); +} + +// syncpointCheckMemInterUnmap: inline virtual inherited (mem) base (mem) body +NV_STATUS __nvoc_up_thunk_Memory_syncpointCheckMemInterUnmap(struct SyncpointMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory)), bSubdeviceHandleProvided); +} + +// syncpointGetMemoryMappingDescriptor: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_syncpointGetMemoryMappingDescriptor(struct SyncpointMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory)), ppMemDesc); +} + +// syncpointCheckCopyPermissions: inline virtual inherited (mem) base (mem) body +NV_STATUS __nvoc_up_thunk_Memory_syncpointCheckCopyPermissions(struct SyncpointMemory *pMemory, struct OBJGPU *pDstGpu, struct Device *pDstDevice) { + return memCheckCopyPermissions((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory)), pDstGpu, pDstDevice); +} + +// syncpointIsReady: virtual inherited (mem) base (mem) +NV_STATUS __nvoc_up_thunk_Memory_syncpointIsReady(struct SyncpointMemory *pMemory, NvBool bCopyConstructorContext) { + return memIsReady((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory)), bCopyConstructorContext); +} + +// syncpointIsGpuMapAllowed: inline virtual inherited (mem) base (mem) body +NvBool __nvoc_up_thunk_Memory_syncpointIsGpuMapAllowed(struct SyncpointMemory *pMemory, struct OBJGPU *pGpu) { + return memIsGpuMapAllowed((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory)), pGpu); +} + +// syncpointIsExportAllowed: inline virtual inherited (mem) base (mem) body +NvBool __nvoc_up_thunk_Memory_syncpointIsExportAllowed(struct SyncpointMemory *pMemory) { + return memIsExportAllowed((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory))); +} + +// syncpointAccessCallback: virtual inherited (rmres) base (mem) +NvBool __nvoc_up_thunk_RmResource_syncpointAccessCallback(struct SyncpointMemory *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory.__nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// syncpointShareCallback: virtual inherited (rmres) base (mem) +NvBool __nvoc_up_thunk_RmResource_syncpointShareCallback(struct SyncpointMemory *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory.__nvoc_base_RmResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// syncpointControlSerialization_Prologue: virtual inherited (rmres) base (mem) +NV_STATUS __nvoc_up_thunk_RmResource_syncpointControlSerialization_Prologue(struct SyncpointMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// syncpointControlSerialization_Epilogue: virtual inherited (rmres) base (mem) +void __nvoc_up_thunk_RmResource_syncpointControlSerialization_Epilogue(struct SyncpointMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// syncpointControl_Prologue: virtual inherited (rmres) base (mem) +NV_STATUS __nvoc_up_thunk_RmResource_syncpointControl_Prologue(struct SyncpointMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// syncpointControl_Epilogue: virtual inherited (rmres) base (mem) +void __nvoc_up_thunk_RmResource_syncpointControl_Epilogue(struct SyncpointMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// syncpointPreDestruct: virtual inherited (res) base (mem) +void __nvoc_up_thunk_RsResource_syncpointPreDestruct(struct SyncpointMemory *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// syncpointControlFilter: virtual inherited (res) base (mem) +NV_STATUS __nvoc_up_thunk_RsResource_syncpointControlFilter(struct SyncpointMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// syncpointIsPartialUnmapSupported: inline virtual inherited (res) base (mem) body +NvBool __nvoc_up_thunk_RsResource_syncpointIsPartialUnmapSupported(struct SyncpointMemory *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// syncpointMapTo: virtual inherited (res) base (mem) +NV_STATUS __nvoc_up_thunk_RsResource_syncpointMapTo(struct SyncpointMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// syncpointUnmapFrom: virtual inherited (res) base (mem) +NV_STATUS __nvoc_up_thunk_RsResource_syncpointUnmapFrom(struct SyncpointMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// syncpointGetRefCount: virtual inherited (res) base (mem) +NvU32 __nvoc_up_thunk_RsResource_syncpointGetRefCount(struct SyncpointMemory *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// syncpointAddAdditionalDependants: virtual inherited (res) base (mem) +void __nvoc_up_thunk_RsResource_syncpointAddAdditionalDependants(struct RsClient *pClient, struct SyncpointMemory *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__SyncpointMemory = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Memory(Memory*); +void __nvoc_dtor_SyncpointMemory(SyncpointMemory *pThis) { + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_SyncpointMemory(SyncpointMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Memory(Memory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_SyncpointMemory(SyncpointMemory *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Memory(&pThis->__nvoc_base_Memory, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_SyncpointMemory_fail_Memory; + __nvoc_init_dataField_SyncpointMemory(pThis); + + status = __nvoc_syncpointConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_SyncpointMemory_fail__init; + goto __nvoc_ctor_SyncpointMemory_exit; // Success + +__nvoc_ctor_SyncpointMemory_fail__init: + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); +__nvoc_ctor_SyncpointMemory_fail_Memory: +__nvoc_ctor_SyncpointMemory_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_SyncpointMemory_1(SyncpointMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_SyncpointMemory_1 + + +// Initialize vtable(s) for 26 virtual method(s). +void __nvoc_init_funcTable_SyncpointMemory(SyncpointMemory *pThis) { + __nvoc_init_funcTable_SyncpointMemory_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__SyncpointMemory(SyncpointMemory *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^4 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^3 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource; // (rmres) super^2 + pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_Memory; // (mem) super + pThis->__nvoc_pbase_SyncpointMemory = pThis; // (syncpoint) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Memory(&pThis->__nvoc_base_Memory); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__SyncpointMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^4 + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__SyncpointMemory.metadata__Memory.metadata__RmResource.metadata__RsResource; // (res) super^3 + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__SyncpointMemory.metadata__Memory.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__SyncpointMemory.metadata__Memory.metadata__RmResource; // (rmres) super^2 + pThis->__nvoc_base_Memory.__nvoc_metadata_ptr = &__nvoc_metadata__SyncpointMemory.metadata__Memory; // (mem) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__SyncpointMemory; // (syncpoint) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_SyncpointMemory(pThis); +} + +NV_STATUS __nvoc_objCreate_SyncpointMemory(SyncpointMemory **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + SyncpointMemory *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(SyncpointMemory), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(SyncpointMemory)); + + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__SyncpointMemory(pThis); + status = __nvoc_ctor_SyncpointMemory(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_SyncpointMemory_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_SyncpointMemory_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(SyncpointMemory)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_SyncpointMemory(SyncpointMemory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_SyncpointMemory(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_syncpoint_mem_nvoc.h b/src/nvidia/generated/g_syncpoint_mem_nvoc.h new file mode 100644 index 0000000..af3cb27 --- /dev/null +++ b/src/nvidia/generated/g_syncpoint_mem_nvoc.h @@ -0,0 +1,329 @@ + +#ifndef _G_SYNCPOINT_MEM_NVOC_H_ +#define _G_SYNCPOINT_MEM_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_syncpoint_mem_nvoc.h" + +#ifndef _SYNCPOINT_MEMORY_H_ +#define _SYNCPOINT_MEMORY_H_ + +#include "mem_mgr/mem.h" + +/*! + * Bind memory allocated through os descriptor + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_SYNCPOINT_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__SyncpointMemory; +struct NVOC_METADATA__Memory; +struct NVOC_VTABLE__SyncpointMemory; + + +struct SyncpointMemory { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__SyncpointMemory *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Memory __nvoc_base_Memory; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^4 + struct RsResource *__nvoc_pbase_RsResource; // res super^3 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^3 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^2 + struct Memory *__nvoc_pbase_Memory; // mem super + struct SyncpointMemory *__nvoc_pbase_SyncpointMemory; // syncpoint +}; + + +// Vtable with 26 per-class function pointers +struct NVOC_VTABLE__SyncpointMemory { + NvBool (*__syncpointCanCopy__)(struct SyncpointMemory * /*this*/); // virtual override (res) base (mem) + NV_STATUS (*__syncpointIsDuplicate__)(struct SyncpointMemory * /*this*/, NvHandle, NvBool *); // virtual inherited (mem) base (mem) + NV_STATUS (*__syncpointGetMapAddrSpace__)(struct SyncpointMemory * /*this*/, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); // virtual inherited (mem) base (mem) + NV_STATUS (*__syncpointControl__)(struct SyncpointMemory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (mem) base (mem) + NV_STATUS (*__syncpointMap__)(struct SyncpointMemory * /*this*/, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); // virtual inherited (mem) base (mem) + NV_STATUS (*__syncpointUnmap__)(struct SyncpointMemory * /*this*/, CALL_CONTEXT *, RsCpuMapping *); // virtual inherited (mem) base (mem) + NV_STATUS (*__syncpointGetMemInterMapParams__)(struct SyncpointMemory * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (mem) base (mem) + NV_STATUS (*__syncpointCheckMemInterUnmap__)(struct SyncpointMemory * /*this*/, NvBool); // inline virtual inherited (mem) base (mem) body + NV_STATUS (*__syncpointGetMemoryMappingDescriptor__)(struct SyncpointMemory * /*this*/, MEMORY_DESCRIPTOR **); // virtual inherited (mem) base (mem) + NV_STATUS (*__syncpointCheckCopyPermissions__)(struct SyncpointMemory * /*this*/, struct OBJGPU *, struct Device *); // inline virtual inherited (mem) base (mem) body + NV_STATUS (*__syncpointIsReady__)(struct SyncpointMemory * /*this*/, NvBool); // virtual inherited (mem) base (mem) + NvBool (*__syncpointIsGpuMapAllowed__)(struct SyncpointMemory * /*this*/, struct OBJGPU *); // inline virtual inherited (mem) base (mem) body + NvBool (*__syncpointIsExportAllowed__)(struct SyncpointMemory * /*this*/); // inline virtual inherited (mem) base (mem) body + NvBool (*__syncpointAccessCallback__)(struct SyncpointMemory * /*this*/, RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (mem) + NvBool (*__syncpointShareCallback__)(struct SyncpointMemory * /*this*/, RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (rmres) base (mem) + NV_STATUS (*__syncpointControlSerialization_Prologue__)(struct SyncpointMemory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (mem) + void (*__syncpointControlSerialization_Epilogue__)(struct SyncpointMemory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (mem) + NV_STATUS (*__syncpointControl_Prologue__)(struct SyncpointMemory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (mem) + void (*__syncpointControl_Epilogue__)(struct SyncpointMemory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (mem) + void (*__syncpointPreDestruct__)(struct SyncpointMemory * /*this*/); // virtual inherited (res) base (mem) + NV_STATUS (*__syncpointControlFilter__)(struct SyncpointMemory * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (mem) + NvBool (*__syncpointIsPartialUnmapSupported__)(struct SyncpointMemory * /*this*/); // inline virtual inherited (res) base (mem) body + NV_STATUS (*__syncpointMapTo__)(struct SyncpointMemory * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (mem) + NV_STATUS (*__syncpointUnmapFrom__)(struct SyncpointMemory * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (mem) + NvU32 (*__syncpointGetRefCount__)(struct SyncpointMemory * /*this*/); // virtual inherited (res) base (mem) + void (*__syncpointAddAdditionalDependants__)(struct RsClient *, struct SyncpointMemory * /*this*/, RsResourceRef *); // virtual inherited (res) base (mem) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__SyncpointMemory { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Memory metadata__Memory; + const struct NVOC_VTABLE__SyncpointMemory vtable; +}; + +#ifndef __NVOC_CLASS_SyncpointMemory_TYPEDEF__ +#define __NVOC_CLASS_SyncpointMemory_TYPEDEF__ +typedef struct SyncpointMemory SyncpointMemory; +#endif /* __NVOC_CLASS_SyncpointMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SyncpointMemory +#define __nvoc_class_id_SyncpointMemory 0x529def +#endif /* __nvoc_class_id_SyncpointMemory */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_SyncpointMemory; + +#define __staticCast_SyncpointMemory(pThis) \ + ((pThis)->__nvoc_pbase_SyncpointMemory) + +#ifdef __nvoc_syncpoint_mem_h_disabled +#define __dynamicCast_SyncpointMemory(pThis) ((SyncpointMemory*) NULL) +#else //__nvoc_syncpoint_mem_h_disabled +#define __dynamicCast_SyncpointMemory(pThis) \ + ((SyncpointMemory*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(SyncpointMemory))) +#endif //__nvoc_syncpoint_mem_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_SyncpointMemory(SyncpointMemory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_SyncpointMemory(SyncpointMemory**, Dynamic*, NvU32, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_SyncpointMemory(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_SyncpointMemory((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define syncpointCanCopy_FNPTR(pSyncpointMemory) pSyncpointMemory->__nvoc_metadata_ptr->vtable.__syncpointCanCopy__ +#define syncpointCanCopy(pSyncpointMemory) syncpointCanCopy_DISPATCH(pSyncpointMemory) +#define syncpointIsDuplicate_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsDuplicate__ +#define syncpointIsDuplicate(pMemory, hMemory, pDuplicate) syncpointIsDuplicate_DISPATCH(pMemory, hMemory, pDuplicate) +#define syncpointGetMapAddrSpace_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memGetMapAddrSpace__ +#define syncpointGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) syncpointGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define syncpointControl_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memControl__ +#define syncpointControl(pMemory, pCallContext, pParams) syncpointControl_DISPATCH(pMemory, pCallContext, pParams) +#define syncpointMap_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memMap__ +#define syncpointMap(pMemory, pCallContext, pParams, pCpuMapping) syncpointMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define syncpointUnmap_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memUnmap__ +#define syncpointUnmap(pMemory, pCallContext, pCpuMapping) syncpointUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define syncpointGetMemInterMapParams_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memGetMemInterMapParams__ +#define syncpointGetMemInterMapParams(pMemory, pParams) syncpointGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define syncpointCheckMemInterUnmap_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memCheckMemInterUnmap__ +#define syncpointCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) syncpointCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define syncpointGetMemoryMappingDescriptor_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memGetMemoryMappingDescriptor__ +#define syncpointGetMemoryMappingDescriptor(pMemory, ppMemDesc) syncpointGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define syncpointCheckCopyPermissions_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memCheckCopyPermissions__ +#define syncpointCheckCopyPermissions(pMemory, pDstGpu, pDstDevice) syncpointCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, pDstDevice) +#define syncpointIsReady_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsReady__ +#define syncpointIsReady(pMemory, bCopyConstructorContext) syncpointIsReady_DISPATCH(pMemory, bCopyConstructorContext) +#define syncpointIsGpuMapAllowed_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsGpuMapAllowed__ +#define syncpointIsGpuMapAllowed(pMemory, pGpu) syncpointIsGpuMapAllowed_DISPATCH(pMemory, pGpu) +#define syncpointIsExportAllowed_FNPTR(pMemory) pMemory->__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsExportAllowed__ +#define syncpointIsExportAllowed(pMemory) syncpointIsExportAllowed_DISPATCH(pMemory) +#define syncpointAccessCallback_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define syncpointAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) syncpointAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define syncpointShareCallback_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresShareCallback__ +#define syncpointShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) syncpointShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define syncpointControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define syncpointControlSerialization_Prologue(pResource, pCallContext, pParams) syncpointControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define syncpointControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define syncpointControlSerialization_Epilogue(pResource, pCallContext, pParams) syncpointControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define syncpointControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define syncpointControl_Prologue(pResource, pCallContext, pParams) syncpointControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define syncpointControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define syncpointControl_Epilogue(pResource, pCallContext, pParams) syncpointControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define syncpointPreDestruct_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define syncpointPreDestruct(pResource) syncpointPreDestruct_DISPATCH(pResource) +#define syncpointControlFilter_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define syncpointControlFilter(pResource, pCallContext, pParams) syncpointControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define syncpointIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define syncpointIsPartialUnmapSupported(pResource) syncpointIsPartialUnmapSupported_DISPATCH(pResource) +#define syncpointMapTo_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define syncpointMapTo(pResource, pParams) syncpointMapTo_DISPATCH(pResource, pParams) +#define syncpointUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define syncpointUnmapFrom(pResource, pParams) syncpointUnmapFrom_DISPATCH(pResource, pParams) +#define syncpointGetRefCount_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define syncpointGetRefCount(pResource) syncpointGetRefCount_DISPATCH(pResource) +#define syncpointAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define syncpointAddAdditionalDependants(pClient, pResource, pReference) syncpointAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) + +// Dispatch functions +static inline NvBool syncpointCanCopy_DISPATCH(struct SyncpointMemory *pSyncpointMemory) { + return pSyncpointMemory->__nvoc_metadata_ptr->vtable.__syncpointCanCopy__(pSyncpointMemory); +} + +static inline NV_STATUS syncpointIsDuplicate_DISPATCH(struct SyncpointMemory *pMemory, NvHandle hMemory, NvBool *pDuplicate) { + return pMemory->__nvoc_metadata_ptr->vtable.__syncpointIsDuplicate__(pMemory, hMemory, pDuplicate); +} + +static inline NV_STATUS syncpointGetMapAddrSpace_DISPATCH(struct SyncpointMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__nvoc_metadata_ptr->vtable.__syncpointGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NV_STATUS syncpointControl_DISPATCH(struct SyncpointMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__nvoc_metadata_ptr->vtable.__syncpointControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS syncpointMap_DISPATCH(struct SyncpointMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__nvoc_metadata_ptr->vtable.__syncpointMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS syncpointUnmap_DISPATCH(struct SyncpointMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__nvoc_metadata_ptr->vtable.__syncpointUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS syncpointGetMemInterMapParams_DISPATCH(struct SyncpointMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__nvoc_metadata_ptr->vtable.__syncpointGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS syncpointCheckMemInterUnmap_DISPATCH(struct SyncpointMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__nvoc_metadata_ptr->vtable.__syncpointCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS syncpointGetMemoryMappingDescriptor_DISPATCH(struct SyncpointMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__nvoc_metadata_ptr->vtable.__syncpointGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS syncpointCheckCopyPermissions_DISPATCH(struct SyncpointMemory *pMemory, struct OBJGPU *pDstGpu, struct Device *pDstDevice) { + return pMemory->__nvoc_metadata_ptr->vtable.__syncpointCheckCopyPermissions__(pMemory, pDstGpu, pDstDevice); +} + +static inline NV_STATUS syncpointIsReady_DISPATCH(struct SyncpointMemory *pMemory, NvBool bCopyConstructorContext) { + return pMemory->__nvoc_metadata_ptr->vtable.__syncpointIsReady__(pMemory, bCopyConstructorContext); +} + +static inline NvBool syncpointIsGpuMapAllowed_DISPATCH(struct SyncpointMemory *pMemory, struct OBJGPU *pGpu) { + return pMemory->__nvoc_metadata_ptr->vtable.__syncpointIsGpuMapAllowed__(pMemory, pGpu); +} + +static inline NvBool syncpointIsExportAllowed_DISPATCH(struct SyncpointMemory *pMemory) { + return pMemory->__nvoc_metadata_ptr->vtable.__syncpointIsExportAllowed__(pMemory); +} + +static inline NvBool syncpointAccessCallback_DISPATCH(struct SyncpointMemory *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__syncpointAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NvBool syncpointShareCallback_DISPATCH(struct SyncpointMemory *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__nvoc_metadata_ptr->vtable.__syncpointShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS syncpointControlSerialization_Prologue_DISPATCH(struct SyncpointMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__syncpointControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void syncpointControlSerialization_Epilogue_DISPATCH(struct SyncpointMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__syncpointControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS syncpointControl_Prologue_DISPATCH(struct SyncpointMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__syncpointControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void syncpointControl_Epilogue_DISPATCH(struct SyncpointMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__syncpointControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline void syncpointPreDestruct_DISPATCH(struct SyncpointMemory *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__syncpointPreDestruct__(pResource); +} + +static inline NV_STATUS syncpointControlFilter_DISPATCH(struct SyncpointMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__syncpointControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvBool syncpointIsPartialUnmapSupported_DISPATCH(struct SyncpointMemory *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__syncpointIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS syncpointMapTo_DISPATCH(struct SyncpointMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__syncpointMapTo__(pResource, pParams); +} + +static inline NV_STATUS syncpointUnmapFrom_DISPATCH(struct SyncpointMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__syncpointUnmapFrom__(pResource, pParams); +} + +static inline NvU32 syncpointGetRefCount_DISPATCH(struct SyncpointMemory *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__syncpointGetRefCount__(pResource); +} + +static inline void syncpointAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct SyncpointMemory *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__syncpointAddAdditionalDependants__(pClient, pResource, pReference); +} + +NvBool syncpointCanCopy_IMPL(struct SyncpointMemory *pSyncpointMemory); + +NV_STATUS syncpointConstruct_IMPL(struct SyncpointMemory *arg_pSyncpointMemory, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_syncpointConstruct(arg_pSyncpointMemory, arg_pCallContext, arg_pParams) syncpointConstruct_IMPL(arg_pSyncpointMemory, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_SYNCPOINT_MEM_NVOC_H_ diff --git a/src/nvidia/generated/g_system_mem_nvoc.c b/src/nvidia/generated/g_system_mem_nvoc.c new file mode 100644 index 0000000..e9c3010 --- /dev/null +++ b/src/nvidia/generated/g_system_mem_nvoc.c @@ -0,0 +1,652 @@ +#define NVOC_SYSTEM_MEM_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_system_mem_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x007a98 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_SystemMemory; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_StandardMemory; + +// Forward declarations for SystemMemory +void __nvoc_init__StandardMemory(StandardMemory*); +void __nvoc_init__SystemMemory(SystemMemory*, GpuHalspecOwner *pGpuhalspecowner); +void __nvoc_init_funcTable_SystemMemory(SystemMemory*, GpuHalspecOwner *pGpuhalspecowner); +NV_STATUS __nvoc_ctor_SystemMemory(SystemMemory*, GpuHalspecOwner *pGpuhalspecowner, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_SystemMemory(SystemMemory*, GpuHalspecOwner *pGpuhalspecowner); +void __nvoc_dtor_SystemMemory(SystemMemory*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__SystemMemory; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__SystemMemory; + +// Down-thunk(s) to bridge SystemMemory methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^3 +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^3 +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +NV_STATUS __nvoc_down_thunk_Memory_resIsDuplicate(struct RsResource *pMemory, NvHandle hMemory, NvBool *pDuplicate); // super^2 +NV_STATUS __nvoc_down_thunk_Memory_resControl(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_Memory_resMap(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_down_thunk_Memory_resUnmap(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_down_thunk_Memory_rmresGetMemInterMapParams(struct RmResource *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_Memory_rmresCheckMemInterUnmap(struct RmResource *pMemory, NvBool bSubdeviceHandleProvided); // super^2 +NV_STATUS __nvoc_down_thunk_Memory_rmresGetMemoryMappingDescriptor(struct RmResource *pMemory, MEMORY_DESCRIPTOR **ppMemDesc); // super^2 +NvBool __nvoc_down_thunk_StandardMemory_resCanCopy(struct RsResource *pStandardMemory); // super + +// Up-thunk(s) to bridge SystemMemory methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^3 +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^3 +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^3 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^3 +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super^3 +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super^3 +NvBool __nvoc_up_thunk_RmResource_memAccessCallback(struct Memory *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^2 +NvBool __nvoc_up_thunk_RmResource_memShareCallback(struct Memory *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^2 +NV_STATUS __nvoc_up_thunk_RmResource_memControlSerialization_Prologue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_up_thunk_RmResource_memControlSerialization_Epilogue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RmResource_memControl_Prologue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_up_thunk_RmResource_memControl_Epilogue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NvBool __nvoc_up_thunk_RsResource_memCanCopy(struct Memory *pResource); // super^2 +void __nvoc_up_thunk_RsResource_memPreDestruct(struct Memory *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_memControlFilter(struct Memory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NvBool __nvoc_up_thunk_RsResource_memIsPartialUnmapSupported(struct Memory *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_memMapTo(struct Memory *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_memUnmapFrom(struct Memory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^2 +NvU32 __nvoc_up_thunk_RsResource_memGetRefCount(struct Memory *pResource); // super^2 +void __nvoc_up_thunk_RsResource_memAddAdditionalDependants(struct RsClient *pClient, struct Memory *pResource, RsResourceRef *pReference); // super^2 +NV_STATUS __nvoc_up_thunk_Memory_stdmemIsDuplicate(struct StandardMemory *pMemory, NvHandle hMemory, NvBool *pDuplicate); // super +NV_STATUS __nvoc_up_thunk_Memory_stdmemGetMapAddrSpace(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); // super +NV_STATUS __nvoc_up_thunk_Memory_stdmemControl(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_Memory_stdmemMap(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_up_thunk_Memory_stdmemUnmap(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_up_thunk_Memory_stdmemGetMemInterMapParams(struct StandardMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_Memory_stdmemCheckMemInterUnmap(struct StandardMemory *pMemory, NvBool bSubdeviceHandleProvided); // super +NV_STATUS __nvoc_up_thunk_Memory_stdmemGetMemoryMappingDescriptor(struct StandardMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc); // super +NV_STATUS __nvoc_up_thunk_Memory_stdmemCheckCopyPermissions(struct StandardMemory *pMemory, struct OBJGPU *pDstGpu, struct Device *pDstDevice); // super +NV_STATUS __nvoc_up_thunk_Memory_stdmemIsReady(struct StandardMemory *pMemory, NvBool bCopyConstructorContext); // super +NvBool __nvoc_up_thunk_Memory_stdmemIsGpuMapAllowed(struct StandardMemory *pMemory, struct OBJGPU *pGpu); // super +NvBool __nvoc_up_thunk_Memory_stdmemIsExportAllowed(struct StandardMemory *pMemory); // super +NvBool __nvoc_up_thunk_RmResource_stdmemAccessCallback(struct StandardMemory *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NvBool __nvoc_up_thunk_RmResource_stdmemShareCallback(struct StandardMemory *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +NV_STATUS __nvoc_up_thunk_RmResource_stdmemControlSerialization_Prologue(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_stdmemControlSerialization_Epilogue(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_stdmemControl_Prologue(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_stdmemControl_Epilogue(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RsResource_stdmemPreDestruct(struct StandardMemory *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_stdmemControlFilter(struct StandardMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_stdmemIsPartialUnmapSupported(struct StandardMemory *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_stdmemMapTo(struct StandardMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_stdmemUnmapFrom(struct StandardMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_stdmemGetRefCount(struct StandardMemory *pResource); // super +void __nvoc_up_thunk_RsResource_stdmemAddAdditionalDependants(struct RsClient *pClient, struct StandardMemory *pResource, RsResourceRef *pReference); // super +NvBool __nvoc_up_thunk_StandardMemory_sysmemCanCopy(struct SystemMemory *pStandardMemory); // this +NV_STATUS __nvoc_up_thunk_Memory_sysmemIsDuplicate(struct SystemMemory *pMemory, NvHandle hMemory, NvBool *pDuplicate); // this +NV_STATUS __nvoc_up_thunk_Memory_sysmemGetMapAddrSpace(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); // this +NV_STATUS __nvoc_up_thunk_Memory_sysmemControl(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_Memory_sysmemMap(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_Memory_sysmemUnmap(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_Memory_sysmemGetMemInterMapParams(struct SystemMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_Memory_sysmemCheckMemInterUnmap(struct SystemMemory *pMemory, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_Memory_sysmemGetMemoryMappingDescriptor(struct SystemMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_Memory_sysmemCheckCopyPermissions(struct SystemMemory *pMemory, struct OBJGPU *pDstGpu, struct Device *pDstDevice); // this +NV_STATUS __nvoc_up_thunk_Memory_sysmemIsReady(struct SystemMemory *pMemory, NvBool bCopyConstructorContext); // this +NvBool __nvoc_up_thunk_Memory_sysmemIsGpuMapAllowed(struct SystemMemory *pMemory, struct OBJGPU *pGpu); // this +NvBool __nvoc_up_thunk_Memory_sysmemIsExportAllowed(struct SystemMemory *pMemory); // this +NvBool __nvoc_up_thunk_RmResource_sysmemAccessCallback(struct SystemMemory *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NvBool __nvoc_up_thunk_RmResource_sysmemShareCallback(struct SystemMemory *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_RmResource_sysmemControlSerialization_Prologue(struct SystemMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_sysmemControlSerialization_Epilogue(struct SystemMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_sysmemControl_Prologue(struct SystemMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_sysmemControl_Epilogue(struct SystemMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RsResource_sysmemPreDestruct(struct SystemMemory *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_sysmemControlFilter(struct SystemMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_sysmemIsPartialUnmapSupported(struct SystemMemory *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_sysmemMapTo(struct SystemMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_sysmemUnmapFrom(struct SystemMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_sysmemGetRefCount(struct SystemMemory *pResource); // this +void __nvoc_up_thunk_RsResource_sysmemAddAdditionalDependants(struct RsClient *pClient, struct SystemMemory *pResource, RsResourceRef *pReference); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_SystemMemory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(SystemMemory), + /*classId=*/ classId(SystemMemory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "SystemMemory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_SystemMemory, + /*pCastInfo=*/ &__nvoc_castinfo__SystemMemory, + /*pExportInfo=*/ &__nvoc_export_info__SystemMemory +}; + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_SystemMemory[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x101u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) sysmemCtrlCmdGetSurfaceNumPhysPages_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x101u) + /*flags=*/ 0x101u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3e0102u, + /*paramSize=*/ sizeof(NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_SystemMemory.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "sysmemCtrlCmdGetSurfaceNumPhysPages" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x101u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) sysmemCtrlCmdGetSurfacePhysPages_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x101u) + /*flags=*/ 0x101u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3e0103u, + /*paramSize=*/ sizeof(NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_SystemMemory.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "sysmemCtrlCmdGetSurfacePhysPages" +#endif + }, + +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__SystemMemory __nvoc_metadata__SystemMemory = { + .rtti.pClassDef = &__nvoc_class_def_SystemMemory, // (sysmem) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_SystemMemory, + .rtti.offset = 0, + .metadata__StandardMemory.rtti.pClassDef = &__nvoc_class_def_StandardMemory, // (stdmem) super + .metadata__StandardMemory.rtti.dtor = &__nvoc_destructFromBase, + .metadata__StandardMemory.rtti.offset = NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory), + .metadata__StandardMemory.metadata__Memory.rtti.pClassDef = &__nvoc_class_def_Memory, // (mem) super^2 + .metadata__StandardMemory.metadata__Memory.rtti.dtor = &__nvoc_destructFromBase, + .metadata__StandardMemory.metadata__Memory.rtti.offset = NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory), + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super^3 + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.rtti.offset = NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource), + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^4 + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^5 + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^4 + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + + .vtable.__sysmemCanCopy__ = &__nvoc_up_thunk_StandardMemory_sysmemCanCopy, // virtual inherited (stdmem) base (stdmem) + .metadata__StandardMemory.vtable.__stdmemCanCopy__ = &stdmemCanCopy_IMPL, // virtual override (res) base (mem) + .metadata__StandardMemory.metadata__Memory.vtable.__memCanCopy__ = &__nvoc_up_thunk_RsResource_memCanCopy, // virtual inherited (res) base (rmres) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &__nvoc_down_thunk_StandardMemory_resCanCopy, // virtual + .vtable.__sysmemIsDuplicate__ = &__nvoc_up_thunk_Memory_sysmemIsDuplicate, // virtual inherited (mem) base (stdmem) + .metadata__StandardMemory.vtable.__stdmemIsDuplicate__ = &__nvoc_up_thunk_Memory_stdmemIsDuplicate, // virtual inherited (mem) base (mem) + .metadata__StandardMemory.metadata__Memory.vtable.__memIsDuplicate__ = &memIsDuplicate_IMPL, // virtual override (res) base (rmres) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &__nvoc_down_thunk_Memory_resIsDuplicate, // virtual + .vtable.__sysmemGetMapAddrSpace__ = &__nvoc_up_thunk_Memory_sysmemGetMapAddrSpace, // virtual inherited (mem) base (stdmem) + .metadata__StandardMemory.vtable.__stdmemGetMapAddrSpace__ = &__nvoc_up_thunk_Memory_stdmemGetMapAddrSpace, // virtual inherited (mem) base (mem) + .metadata__StandardMemory.metadata__Memory.vtable.__memGetMapAddrSpace__ = &memGetMapAddrSpace_IMPL, // virtual + .vtable.__sysmemControl__ = &__nvoc_up_thunk_Memory_sysmemControl, // virtual inherited (mem) base (stdmem) + .metadata__StandardMemory.vtable.__stdmemControl__ = &__nvoc_up_thunk_Memory_stdmemControl, // virtual inherited (mem) base (mem) + .metadata__StandardMemory.metadata__Memory.vtable.__memControl__ = &memControl_IMPL, // virtual override (res) base (rmres) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_Memory_resControl, // virtual + .vtable.__sysmemMap__ = &__nvoc_up_thunk_Memory_sysmemMap, // virtual inherited (mem) base (stdmem) + .metadata__StandardMemory.vtable.__stdmemMap__ = &__nvoc_up_thunk_Memory_stdmemMap, // virtual inherited (mem) base (mem) + .metadata__StandardMemory.metadata__Memory.vtable.__memMap__ = &memMap_IMPL, // virtual override (res) base (rmres) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &__nvoc_down_thunk_Memory_resMap, // virtual + .vtable.__sysmemUnmap__ = &__nvoc_up_thunk_Memory_sysmemUnmap, // virtual inherited (mem) base (stdmem) + .metadata__StandardMemory.vtable.__stdmemUnmap__ = &__nvoc_up_thunk_Memory_stdmemUnmap, // virtual inherited (mem) base (mem) + .metadata__StandardMemory.metadata__Memory.vtable.__memUnmap__ = &memUnmap_IMPL, // virtual override (res) base (rmres) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &__nvoc_down_thunk_Memory_resUnmap, // virtual + .vtable.__sysmemGetMemInterMapParams__ = &__nvoc_up_thunk_Memory_sysmemGetMemInterMapParams, // virtual inherited (mem) base (stdmem) + .metadata__StandardMemory.vtable.__stdmemGetMemInterMapParams__ = &__nvoc_up_thunk_Memory_stdmemGetMemInterMapParams, // virtual inherited (mem) base (mem) + .metadata__StandardMemory.metadata__Memory.vtable.__memGetMemInterMapParams__ = &memGetMemInterMapParams_IMPL, // virtual override (rmres) base (rmres) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &__nvoc_down_thunk_Memory_rmresGetMemInterMapParams, // virtual + .vtable.__sysmemCheckMemInterUnmap__ = &__nvoc_up_thunk_Memory_sysmemCheckMemInterUnmap, // inline virtual inherited (mem) base (stdmem) body + .metadata__StandardMemory.vtable.__stdmemCheckMemInterUnmap__ = &__nvoc_up_thunk_Memory_stdmemCheckMemInterUnmap, // inline virtual inherited (mem) base (mem) body + .metadata__StandardMemory.metadata__Memory.vtable.__memCheckMemInterUnmap__ = &memCheckMemInterUnmap_ac1694, // inline virtual override (rmres) base (rmres) body + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &__nvoc_down_thunk_Memory_rmresCheckMemInterUnmap, // virtual + .vtable.__sysmemGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_Memory_sysmemGetMemoryMappingDescriptor, // virtual inherited (mem) base (stdmem) + .metadata__StandardMemory.vtable.__stdmemGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_Memory_stdmemGetMemoryMappingDescriptor, // virtual inherited (mem) base (mem) + .metadata__StandardMemory.metadata__Memory.vtable.__memGetMemoryMappingDescriptor__ = &memGetMemoryMappingDescriptor_IMPL, // virtual override (rmres) base (rmres) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &__nvoc_down_thunk_Memory_rmresGetMemoryMappingDescriptor, // virtual + .vtable.__sysmemCheckCopyPermissions__ = &__nvoc_up_thunk_Memory_sysmemCheckCopyPermissions, // inline virtual inherited (mem) base (stdmem) body + .metadata__StandardMemory.vtable.__stdmemCheckCopyPermissions__ = &__nvoc_up_thunk_Memory_stdmemCheckCopyPermissions, // inline virtual inherited (mem) base (mem) body + .metadata__StandardMemory.metadata__Memory.vtable.__memCheckCopyPermissions__ = &memCheckCopyPermissions_ac1694, // inline virtual body + .vtable.__sysmemIsReady__ = &__nvoc_up_thunk_Memory_sysmemIsReady, // virtual inherited (mem) base (stdmem) + .metadata__StandardMemory.vtable.__stdmemIsReady__ = &__nvoc_up_thunk_Memory_stdmemIsReady, // virtual inherited (mem) base (mem) + .metadata__StandardMemory.metadata__Memory.vtable.__memIsReady__ = &memIsReady_IMPL, // virtual + .vtable.__sysmemIsGpuMapAllowed__ = &__nvoc_up_thunk_Memory_sysmemIsGpuMapAllowed, // inline virtual inherited (mem) base (stdmem) body + .metadata__StandardMemory.vtable.__stdmemIsGpuMapAllowed__ = &__nvoc_up_thunk_Memory_stdmemIsGpuMapAllowed, // inline virtual inherited (mem) base (mem) body + .metadata__StandardMemory.metadata__Memory.vtable.__memIsGpuMapAllowed__ = &memIsGpuMapAllowed_e661f0, // inline virtual body + .vtable.__sysmemIsExportAllowed__ = &__nvoc_up_thunk_Memory_sysmemIsExportAllowed, // inline virtual inherited (mem) base (stdmem) body + .metadata__StandardMemory.vtable.__stdmemIsExportAllowed__ = &__nvoc_up_thunk_Memory_stdmemIsExportAllowed, // inline virtual inherited (mem) base (mem) body + .metadata__StandardMemory.metadata__Memory.vtable.__memIsExportAllowed__ = &memIsExportAllowed_e661f0, // inline virtual body + .vtable.__sysmemAccessCallback__ = &__nvoc_up_thunk_RmResource_sysmemAccessCallback, // virtual inherited (rmres) base (stdmem) + .metadata__StandardMemory.vtable.__stdmemAccessCallback__ = &__nvoc_up_thunk_RmResource_stdmemAccessCallback, // virtual inherited (rmres) base (mem) + .metadata__StandardMemory.metadata__Memory.vtable.__memAccessCallback__ = &__nvoc_up_thunk_RmResource_memAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__sysmemShareCallback__ = &__nvoc_up_thunk_RmResource_sysmemShareCallback, // virtual inherited (rmres) base (stdmem) + .metadata__StandardMemory.vtable.__stdmemShareCallback__ = &__nvoc_up_thunk_RmResource_stdmemShareCallback, // virtual inherited (rmres) base (mem) + .metadata__StandardMemory.metadata__Memory.vtable.__memShareCallback__ = &__nvoc_up_thunk_RmResource_memShareCallback, // virtual inherited (rmres) base (rmres) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.vtable.__rmresShareCallback__ = &rmresShareCallback_IMPL, // virtual override (res) base (res) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__sysmemControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_sysmemControlSerialization_Prologue, // virtual inherited (rmres) base (stdmem) + .metadata__StandardMemory.vtable.__stdmemControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_stdmemControlSerialization_Prologue, // virtual inherited (rmres) base (mem) + .metadata__StandardMemory.metadata__Memory.vtable.__memControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_memControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__sysmemControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_sysmemControlSerialization_Epilogue, // virtual inherited (rmres) base (stdmem) + .metadata__StandardMemory.vtable.__stdmemControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_stdmemControlSerialization_Epilogue, // virtual inherited (rmres) base (mem) + .metadata__StandardMemory.metadata__Memory.vtable.__memControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_memControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__sysmemControl_Prologue__ = &__nvoc_up_thunk_RmResource_sysmemControl_Prologue, // virtual inherited (rmres) base (stdmem) + .metadata__StandardMemory.vtable.__stdmemControl_Prologue__ = &__nvoc_up_thunk_RmResource_stdmemControl_Prologue, // virtual inherited (rmres) base (mem) + .metadata__StandardMemory.metadata__Memory.vtable.__memControl_Prologue__ = &__nvoc_up_thunk_RmResource_memControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__sysmemControl_Epilogue__ = &__nvoc_up_thunk_RmResource_sysmemControl_Epilogue, // virtual inherited (rmres) base (stdmem) + .metadata__StandardMemory.vtable.__stdmemControl_Epilogue__ = &__nvoc_up_thunk_RmResource_stdmemControl_Epilogue, // virtual inherited (rmres) base (mem) + .metadata__StandardMemory.metadata__Memory.vtable.__memControl_Epilogue__ = &__nvoc_up_thunk_RmResource_memControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__sysmemPreDestruct__ = &__nvoc_up_thunk_RsResource_sysmemPreDestruct, // virtual inherited (res) base (stdmem) + .metadata__StandardMemory.vtable.__stdmemPreDestruct__ = &__nvoc_up_thunk_RsResource_stdmemPreDestruct, // virtual inherited (res) base (mem) + .metadata__StandardMemory.metadata__Memory.vtable.__memPreDestruct__ = &__nvoc_up_thunk_RsResource_memPreDestruct, // virtual inherited (res) base (rmres) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__sysmemControlFilter__ = &__nvoc_up_thunk_RsResource_sysmemControlFilter, // virtual inherited (res) base (stdmem) + .metadata__StandardMemory.vtable.__stdmemControlFilter__ = &__nvoc_up_thunk_RsResource_stdmemControlFilter, // virtual inherited (res) base (mem) + .metadata__StandardMemory.metadata__Memory.vtable.__memControlFilter__ = &__nvoc_up_thunk_RsResource_memControlFilter, // virtual inherited (res) base (rmres) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__sysmemIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_sysmemIsPartialUnmapSupported, // inline virtual inherited (res) base (stdmem) body + .metadata__StandardMemory.vtable.__stdmemIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_stdmemIsPartialUnmapSupported, // inline virtual inherited (res) base (mem) body + .metadata__StandardMemory.metadata__Memory.vtable.__memIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_memIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__sysmemMapTo__ = &__nvoc_up_thunk_RsResource_sysmemMapTo, // virtual inherited (res) base (stdmem) + .metadata__StandardMemory.vtable.__stdmemMapTo__ = &__nvoc_up_thunk_RsResource_stdmemMapTo, // virtual inherited (res) base (mem) + .metadata__StandardMemory.metadata__Memory.vtable.__memMapTo__ = &__nvoc_up_thunk_RsResource_memMapTo, // virtual inherited (res) base (rmres) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__sysmemUnmapFrom__ = &__nvoc_up_thunk_RsResource_sysmemUnmapFrom, // virtual inherited (res) base (stdmem) + .metadata__StandardMemory.vtable.__stdmemUnmapFrom__ = &__nvoc_up_thunk_RsResource_stdmemUnmapFrom, // virtual inherited (res) base (mem) + .metadata__StandardMemory.metadata__Memory.vtable.__memUnmapFrom__ = &__nvoc_up_thunk_RsResource_memUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__sysmemGetRefCount__ = &__nvoc_up_thunk_RsResource_sysmemGetRefCount, // virtual inherited (res) base (stdmem) + .metadata__StandardMemory.vtable.__stdmemGetRefCount__ = &__nvoc_up_thunk_RsResource_stdmemGetRefCount, // virtual inherited (res) base (mem) + .metadata__StandardMemory.metadata__Memory.vtable.__memGetRefCount__ = &__nvoc_up_thunk_RsResource_memGetRefCount, // virtual inherited (res) base (rmres) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__sysmemAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_sysmemAddAdditionalDependants, // virtual inherited (res) base (stdmem) + .metadata__StandardMemory.vtable.__stdmemAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_stdmemAddAdditionalDependants, // virtual inherited (res) base (mem) + .metadata__StandardMemory.metadata__Memory.vtable.__memAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_memAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__SystemMemory = { + .numRelatives = 7, + .relatives = { + &__nvoc_metadata__SystemMemory.rtti, // [0]: (sysmem) this + &__nvoc_metadata__SystemMemory.metadata__StandardMemory.rtti, // [1]: (stdmem) super + &__nvoc_metadata__SystemMemory.metadata__StandardMemory.metadata__Memory.rtti, // [2]: (mem) super^2 + &__nvoc_metadata__SystemMemory.metadata__StandardMemory.metadata__Memory.metadata__RmResource.rtti, // [3]: (rmres) super^3 + &__nvoc_metadata__SystemMemory.metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.rtti, // [4]: (res) super^4 + &__nvoc_metadata__SystemMemory.metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [5]: (obj) super^5 + &__nvoc_metadata__SystemMemory.metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RmResourceCommon.rtti, // [6]: (rmrescmn) super^4 + } +}; + +// 26 up-thunk(s) defined to bridge methods in SystemMemory to superclasses + +// sysmemCanCopy: virtual inherited (stdmem) base (stdmem) +NvBool __nvoc_up_thunk_StandardMemory_sysmemCanCopy(struct SystemMemory *pStandardMemory) { + return stdmemCanCopy((struct StandardMemory *)(((unsigned char *) pStandardMemory) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory))); +} + +// sysmemIsDuplicate: virtual inherited (mem) base (stdmem) +NV_STATUS __nvoc_up_thunk_Memory_sysmemIsDuplicate(struct SystemMemory *pMemory, NvHandle hMemory, NvBool *pDuplicate) { + return memIsDuplicate((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory)), hMemory, pDuplicate); +} + +// sysmemGetMapAddrSpace: virtual inherited (mem) base (stdmem) +NV_STATUS __nvoc_up_thunk_Memory_sysmemGetMapAddrSpace(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return memGetMapAddrSpace((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory)), pCallContext, mapFlags, pAddrSpace); +} + +// sysmemControl: virtual inherited (mem) base (stdmem) +NV_STATUS __nvoc_up_thunk_Memory_sysmemControl(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory)), pCallContext, pParams); +} + +// sysmemMap: virtual inherited (mem) base (stdmem) +NV_STATUS __nvoc_up_thunk_Memory_sysmemMap(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory)), pCallContext, pParams, pCpuMapping); +} + +// sysmemUnmap: virtual inherited (mem) base (stdmem) +NV_STATUS __nvoc_up_thunk_Memory_sysmemUnmap(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory)), pCallContext, pCpuMapping); +} + +// sysmemGetMemInterMapParams: virtual inherited (mem) base (stdmem) +NV_STATUS __nvoc_up_thunk_Memory_sysmemGetMemInterMapParams(struct SystemMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory)), pParams); +} + +// sysmemCheckMemInterUnmap: inline virtual inherited (mem) base (stdmem) body +NV_STATUS __nvoc_up_thunk_Memory_sysmemCheckMemInterUnmap(struct SystemMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory)), bSubdeviceHandleProvided); +} + +// sysmemGetMemoryMappingDescriptor: virtual inherited (mem) base (stdmem) +NV_STATUS __nvoc_up_thunk_Memory_sysmemGetMemoryMappingDescriptor(struct SystemMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory)), ppMemDesc); +} + +// sysmemCheckCopyPermissions: inline virtual inherited (mem) base (stdmem) body +NV_STATUS __nvoc_up_thunk_Memory_sysmemCheckCopyPermissions(struct SystemMemory *pMemory, struct OBJGPU *pDstGpu, struct Device *pDstDevice) { + return memCheckCopyPermissions((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory)), pDstGpu, pDstDevice); +} + +// sysmemIsReady: virtual inherited (mem) base (stdmem) +NV_STATUS __nvoc_up_thunk_Memory_sysmemIsReady(struct SystemMemory *pMemory, NvBool bCopyConstructorContext) { + return memIsReady((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory)), bCopyConstructorContext); +} + +// sysmemIsGpuMapAllowed: inline virtual inherited (mem) base (stdmem) body +NvBool __nvoc_up_thunk_Memory_sysmemIsGpuMapAllowed(struct SystemMemory *pMemory, struct OBJGPU *pGpu) { + return memIsGpuMapAllowed((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory)), pGpu); +} + +// sysmemIsExportAllowed: inline virtual inherited (mem) base (stdmem) body +NvBool __nvoc_up_thunk_Memory_sysmemIsExportAllowed(struct SystemMemory *pMemory) { + return memIsExportAllowed((struct Memory *)(((unsigned char *) pMemory) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory))); +} + +// sysmemAccessCallback: virtual inherited (rmres) base (stdmem) +NvBool __nvoc_up_thunk_RmResource_sysmemAccessCallback(struct SystemMemory *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// sysmemShareCallback: virtual inherited (rmres) base (stdmem) +NvBool __nvoc_up_thunk_RmResource_sysmemShareCallback(struct SystemMemory *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// sysmemControlSerialization_Prologue: virtual inherited (rmres) base (stdmem) +NV_STATUS __nvoc_up_thunk_RmResource_sysmemControlSerialization_Prologue(struct SystemMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// sysmemControlSerialization_Epilogue: virtual inherited (rmres) base (stdmem) +void __nvoc_up_thunk_RmResource_sysmemControlSerialization_Epilogue(struct SystemMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// sysmemControl_Prologue: virtual inherited (rmres) base (stdmem) +NV_STATUS __nvoc_up_thunk_RmResource_sysmemControl_Prologue(struct SystemMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// sysmemControl_Epilogue: virtual inherited (rmres) base (stdmem) +void __nvoc_up_thunk_RmResource_sysmemControl_Epilogue(struct SystemMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// sysmemPreDestruct: virtual inherited (res) base (stdmem) +void __nvoc_up_thunk_RsResource_sysmemPreDestruct(struct SystemMemory *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// sysmemControlFilter: virtual inherited (res) base (stdmem) +NV_STATUS __nvoc_up_thunk_RsResource_sysmemControlFilter(struct SystemMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// sysmemIsPartialUnmapSupported: inline virtual inherited (res) base (stdmem) body +NvBool __nvoc_up_thunk_RsResource_sysmemIsPartialUnmapSupported(struct SystemMemory *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// sysmemMapTo: virtual inherited (res) base (stdmem) +NV_STATUS __nvoc_up_thunk_RsResource_sysmemMapTo(struct SystemMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// sysmemUnmapFrom: virtual inherited (res) base (stdmem) +NV_STATUS __nvoc_up_thunk_RsResource_sysmemUnmapFrom(struct SystemMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// sysmemGetRefCount: virtual inherited (res) base (stdmem) +NvU32 __nvoc_up_thunk_RsResource_sysmemGetRefCount(struct SystemMemory *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// sysmemAddAdditionalDependants: virtual inherited (res) base (stdmem) +void __nvoc_up_thunk_RsResource_sysmemAddAdditionalDependants(struct RsClient *pClient, struct SystemMemory *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__SystemMemory = +{ + /*numEntries=*/ 2, + /*pExportEntries=*/ __nvoc_exported_method_def_SystemMemory +}; + +void __nvoc_dtor_StandardMemory(StandardMemory*); +void __nvoc_dtor_SystemMemory(SystemMemory *pThis) { + __nvoc_sysmemDestruct(pThis); + __nvoc_dtor_StandardMemory(&pThis->__nvoc_base_StandardMemory); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_SystemMemory(SystemMemory *pThis, GpuHalspecOwner *pGpuhalspecowner) { + ChipHal *chipHal = &pGpuhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pGpuhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_StandardMemory(StandardMemory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_SystemMemory(SystemMemory *pThis, GpuHalspecOwner *pGpuhalspecowner, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_StandardMemory(&pThis->__nvoc_base_StandardMemory, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_SystemMemory_fail_StandardMemory; + __nvoc_init_dataField_SystemMemory(pThis, pGpuhalspecowner); + + status = __nvoc_sysmemConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_SystemMemory_fail__init; + goto __nvoc_ctor_SystemMemory_exit; // Success + +__nvoc_ctor_SystemMemory_fail__init: + __nvoc_dtor_StandardMemory(&pThis->__nvoc_base_StandardMemory); +__nvoc_ctor_SystemMemory_fail_StandardMemory: +__nvoc_ctor_SystemMemory_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_SystemMemory_1(SystemMemory *pThis, GpuHalspecOwner *pGpuhalspecowner) { + ChipHal *chipHal = &pGpuhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pGpuhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + + // sysmemCtrlCmdGetSurfaceNumPhysPages -- exported (id=0x3e0102) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x101u) + pThis->__sysmemCtrlCmdGetSurfaceNumPhysPages__ = &sysmemCtrlCmdGetSurfaceNumPhysPages_IMPL; +#endif + + // sysmemCtrlCmdGetSurfacePhysPages -- exported (id=0x3e0103) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x101u) + pThis->__sysmemCtrlCmdGetSurfacePhysPages__ = &sysmemCtrlCmdGetSurfacePhysPages_IMPL; +#endif +} // End __nvoc_init_funcTable_SystemMemory_1 with approximately 2 basic block(s). + + +// Initialize vtable(s) for 28 virtual method(s). +void __nvoc_init_funcTable_SystemMemory(SystemMemory *pThis, GpuHalspecOwner *pGpuhalspecowner) { + + // Initialize vtable(s) with 2 per-object function pointer(s). + __nvoc_init_funcTable_SystemMemory_1(pThis, pGpuhalspecowner); +} + +// Initialize newly constructed object. +void __nvoc_init__SystemMemory(SystemMemory *pThis, GpuHalspecOwner *pGpuhalspecowner) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^5 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^4 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^4 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource; // (rmres) super^3 + pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory; // (mem) super^2 + pThis->__nvoc_pbase_StandardMemory = &pThis->__nvoc_base_StandardMemory; // (stdmem) super + pThis->__nvoc_pbase_SystemMemory = pThis; // (sysmem) this + + // Recurse to superclass initialization function(s). + __nvoc_init__StandardMemory(&pThis->__nvoc_base_StandardMemory); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__SystemMemory.metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^5 + pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__SystemMemory.metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RsResource; // (res) super^4 + pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__SystemMemory.metadata__StandardMemory.metadata__Memory.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^4 + pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__SystemMemory.metadata__StandardMemory.metadata__Memory.metadata__RmResource; // (rmres) super^3 + pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_metadata_ptr = &__nvoc_metadata__SystemMemory.metadata__StandardMemory.metadata__Memory; // (mem) super^2 + pThis->__nvoc_base_StandardMemory.__nvoc_metadata_ptr = &__nvoc_metadata__SystemMemory.metadata__StandardMemory; // (stdmem) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__SystemMemory; // (sysmem) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_SystemMemory(pThis, pGpuhalspecowner); +} + +NV_STATUS __nvoc_objCreate_SystemMemory(SystemMemory **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + SystemMemory *pThis; + GpuHalspecOwner *pGpuhalspecowner; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(SystemMemory), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(SystemMemory)); + + pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // pParent must be a valid object that derives from a halspec owner class. + NV_ASSERT_OR_RETURN(pParent != NULL, NV_ERR_INVALID_ARGUMENT); + + // Link the child into the parent unless flagged not to do so. + if (!(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pGpuhalspecowner = dynamicCast(pParent, GpuHalspecOwner)) == NULL) + pGpuhalspecowner = objFindAncestorOfType(GpuHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pGpuhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init__SystemMemory(pThis, pGpuhalspecowner); + status = __nvoc_ctor_SystemMemory(pThis, pGpuhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_SystemMemory_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_SystemMemory_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(SystemMemory)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_SystemMemory(SystemMemory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_SystemMemory(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_system_mem_nvoc.h b/src/nvidia/generated/g_system_mem_nvoc.h new file mode 100644 index 0000000..e1f2581 --- /dev/null +++ b/src/nvidia/generated/g_system_mem_nvoc.h @@ -0,0 +1,367 @@ + +#ifndef _G_SYSTEM_MEM_NVOC_H_ +#define _G_SYSTEM_MEM_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_system_mem_nvoc.h" + +#ifndef _SYSTEM_MEMORY_H_ +#define _SYSTEM_MEMORY_H_ + +#include "mem_mgr/standard_mem.h" +#include "gpu/mem_mgr/heap_base.h" +#include "gpu/gpu_halspec.h" + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_SYSTEM_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__SystemMemory; +struct NVOC_METADATA__StandardMemory; +struct NVOC_VTABLE__SystemMemory; + + +struct SystemMemory { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__SystemMemory *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct StandardMemory __nvoc_base_StandardMemory; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^5 + struct RsResource *__nvoc_pbase_RsResource; // res super^4 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^4 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^3 + struct Memory *__nvoc_pbase_Memory; // mem super^2 + struct StandardMemory *__nvoc_pbase_StandardMemory; // stdmem super + struct SystemMemory *__nvoc_pbase_SystemMemory; // sysmem + + // Vtable with 2 per-object function pointers + NV_STATUS (*__sysmemCtrlCmdGetSurfaceNumPhysPages__)(struct SystemMemory * /*this*/, NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS *); // exported (id=0x3e0102) + NV_STATUS (*__sysmemCtrlCmdGetSurfacePhysPages__)(struct SystemMemory * /*this*/, NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS *); // exported (id=0x3e0103) +}; + + +// Vtable with 26 per-class function pointers +struct NVOC_VTABLE__SystemMemory { + NvBool (*__sysmemCanCopy__)(struct SystemMemory * /*this*/); // virtual inherited (stdmem) base (stdmem) + NV_STATUS (*__sysmemIsDuplicate__)(struct SystemMemory * /*this*/, NvHandle, NvBool *); // virtual inherited (mem) base (stdmem) + NV_STATUS (*__sysmemGetMapAddrSpace__)(struct SystemMemory * /*this*/, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); // virtual inherited (mem) base (stdmem) + NV_STATUS (*__sysmemControl__)(struct SystemMemory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (mem) base (stdmem) + NV_STATUS (*__sysmemMap__)(struct SystemMemory * /*this*/, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); // virtual inherited (mem) base (stdmem) + NV_STATUS (*__sysmemUnmap__)(struct SystemMemory * /*this*/, CALL_CONTEXT *, RsCpuMapping *); // virtual inherited (mem) base (stdmem) + NV_STATUS (*__sysmemGetMemInterMapParams__)(struct SystemMemory * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (mem) base (stdmem) + NV_STATUS (*__sysmemCheckMemInterUnmap__)(struct SystemMemory * /*this*/, NvBool); // inline virtual inherited (mem) base (stdmem) body + NV_STATUS (*__sysmemGetMemoryMappingDescriptor__)(struct SystemMemory * /*this*/, MEMORY_DESCRIPTOR **); // virtual inherited (mem) base (stdmem) + NV_STATUS (*__sysmemCheckCopyPermissions__)(struct SystemMemory * /*this*/, struct OBJGPU *, struct Device *); // inline virtual inherited (mem) base (stdmem) body + NV_STATUS (*__sysmemIsReady__)(struct SystemMemory * /*this*/, NvBool); // virtual inherited (mem) base (stdmem) + NvBool (*__sysmemIsGpuMapAllowed__)(struct SystemMemory * /*this*/, struct OBJGPU *); // inline virtual inherited (mem) base (stdmem) body + NvBool (*__sysmemIsExportAllowed__)(struct SystemMemory * /*this*/); // inline virtual inherited (mem) base (stdmem) body + NvBool (*__sysmemAccessCallback__)(struct SystemMemory * /*this*/, RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (stdmem) + NvBool (*__sysmemShareCallback__)(struct SystemMemory * /*this*/, RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (rmres) base (stdmem) + NV_STATUS (*__sysmemControlSerialization_Prologue__)(struct SystemMemory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (stdmem) + void (*__sysmemControlSerialization_Epilogue__)(struct SystemMemory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (stdmem) + NV_STATUS (*__sysmemControl_Prologue__)(struct SystemMemory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (stdmem) + void (*__sysmemControl_Epilogue__)(struct SystemMemory * /*this*/, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (stdmem) + void (*__sysmemPreDestruct__)(struct SystemMemory * /*this*/); // virtual inherited (res) base (stdmem) + NV_STATUS (*__sysmemControlFilter__)(struct SystemMemory * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (stdmem) + NvBool (*__sysmemIsPartialUnmapSupported__)(struct SystemMemory * /*this*/); // inline virtual inherited (res) base (stdmem) body + NV_STATUS (*__sysmemMapTo__)(struct SystemMemory * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (stdmem) + NV_STATUS (*__sysmemUnmapFrom__)(struct SystemMemory * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (stdmem) + NvU32 (*__sysmemGetRefCount__)(struct SystemMemory * /*this*/); // virtual inherited (res) base (stdmem) + void (*__sysmemAddAdditionalDependants__)(struct RsClient *, struct SystemMemory * /*this*/, RsResourceRef *); // virtual inherited (res) base (stdmem) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__SystemMemory { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__StandardMemory metadata__StandardMemory; + const struct NVOC_VTABLE__SystemMemory vtable; +}; + +#ifndef __NVOC_CLASS_SystemMemory_TYPEDEF__ +#define __NVOC_CLASS_SystemMemory_TYPEDEF__ +typedef struct SystemMemory SystemMemory; +#endif /* __NVOC_CLASS_SystemMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SystemMemory +#define __nvoc_class_id_SystemMemory 0x007a98 +#endif /* __nvoc_class_id_SystemMemory */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_SystemMemory; + +#define __staticCast_SystemMemory(pThis) \ + ((pThis)->__nvoc_pbase_SystemMemory) + +#ifdef __nvoc_system_mem_h_disabled +#define __dynamicCast_SystemMemory(pThis) ((SystemMemory*) NULL) +#else //__nvoc_system_mem_h_disabled +#define __dynamicCast_SystemMemory(pThis) \ + ((SystemMemory*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(SystemMemory))) +#endif //__nvoc_system_mem_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_SystemMemory(SystemMemory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_SystemMemory(SystemMemory**, Dynamic*, NvU32, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_SystemMemory(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_SystemMemory((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define sysmemCtrlCmdGetSurfaceNumPhysPages_FNPTR(pStandardMemory) pStandardMemory->__sysmemCtrlCmdGetSurfaceNumPhysPages__ +#define sysmemCtrlCmdGetSurfaceNumPhysPages(pStandardMemory, pParams) sysmemCtrlCmdGetSurfaceNumPhysPages_DISPATCH(pStandardMemory, pParams) +#define sysmemCtrlCmdGetSurfacePhysPages_FNPTR(pStandardMemory) pStandardMemory->__sysmemCtrlCmdGetSurfacePhysPages__ +#define sysmemCtrlCmdGetSurfacePhysPages(pStandardMemory, pParams) sysmemCtrlCmdGetSurfacePhysPages_DISPATCH(pStandardMemory, pParams) +#define sysmemCanCopy_FNPTR(pStandardMemory) pStandardMemory->__nvoc_base_StandardMemory.__nvoc_metadata_ptr->vtable.__stdmemCanCopy__ +#define sysmemCanCopy(pStandardMemory) sysmemCanCopy_DISPATCH(pStandardMemory) +#define sysmemIsDuplicate_FNPTR(pMemory) pMemory->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsDuplicate__ +#define sysmemIsDuplicate(pMemory, hMemory, pDuplicate) sysmemIsDuplicate_DISPATCH(pMemory, hMemory, pDuplicate) +#define sysmemGetMapAddrSpace_FNPTR(pMemory) pMemory->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memGetMapAddrSpace__ +#define sysmemGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) sysmemGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define sysmemControl_FNPTR(pMemory) pMemory->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memControl__ +#define sysmemControl(pMemory, pCallContext, pParams) sysmemControl_DISPATCH(pMemory, pCallContext, pParams) +#define sysmemMap_FNPTR(pMemory) pMemory->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memMap__ +#define sysmemMap(pMemory, pCallContext, pParams, pCpuMapping) sysmemMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define sysmemUnmap_FNPTR(pMemory) pMemory->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memUnmap__ +#define sysmemUnmap(pMemory, pCallContext, pCpuMapping) sysmemUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define sysmemGetMemInterMapParams_FNPTR(pMemory) pMemory->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memGetMemInterMapParams__ +#define sysmemGetMemInterMapParams(pMemory, pParams) sysmemGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define sysmemCheckMemInterUnmap_FNPTR(pMemory) pMemory->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memCheckMemInterUnmap__ +#define sysmemCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) sysmemCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define sysmemGetMemoryMappingDescriptor_FNPTR(pMemory) pMemory->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memGetMemoryMappingDescriptor__ +#define sysmemGetMemoryMappingDescriptor(pMemory, ppMemDesc) sysmemGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define sysmemCheckCopyPermissions_FNPTR(pMemory) pMemory->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memCheckCopyPermissions__ +#define sysmemCheckCopyPermissions(pMemory, pDstGpu, pDstDevice) sysmemCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, pDstDevice) +#define sysmemIsReady_FNPTR(pMemory) pMemory->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsReady__ +#define sysmemIsReady(pMemory, bCopyConstructorContext) sysmemIsReady_DISPATCH(pMemory, bCopyConstructorContext) +#define sysmemIsGpuMapAllowed_FNPTR(pMemory) pMemory->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsGpuMapAllowed__ +#define sysmemIsGpuMapAllowed(pMemory, pGpu) sysmemIsGpuMapAllowed_DISPATCH(pMemory, pGpu) +#define sysmemIsExportAllowed_FNPTR(pMemory) pMemory->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_metadata_ptr->vtable.__memIsExportAllowed__ +#define sysmemIsExportAllowed(pMemory) sysmemIsExportAllowed_DISPATCH(pMemory) +#define sysmemAccessCallback_FNPTR(pResource) pResource->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define sysmemAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) sysmemAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define sysmemShareCallback_FNPTR(pResource) pResource->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresShareCallback__ +#define sysmemShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) sysmemShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define sysmemControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define sysmemControlSerialization_Prologue(pResource, pCallContext, pParams) sysmemControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define sysmemControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define sysmemControlSerialization_Epilogue(pResource, pCallContext, pParams) sysmemControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define sysmemControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define sysmemControl_Prologue(pResource, pCallContext, pParams) sysmemControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define sysmemControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define sysmemControl_Epilogue(pResource, pCallContext, pParams) sysmemControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define sysmemPreDestruct_FNPTR(pResource) pResource->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define sysmemPreDestruct(pResource) sysmemPreDestruct_DISPATCH(pResource) +#define sysmemControlFilter_FNPTR(pResource) pResource->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define sysmemControlFilter(pResource, pCallContext, pParams) sysmemControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define sysmemIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define sysmemIsPartialUnmapSupported(pResource) sysmemIsPartialUnmapSupported_DISPATCH(pResource) +#define sysmemMapTo_FNPTR(pResource) pResource->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define sysmemMapTo(pResource, pParams) sysmemMapTo_DISPATCH(pResource, pParams) +#define sysmemUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define sysmemUnmapFrom(pResource, pParams) sysmemUnmapFrom_DISPATCH(pResource, pParams) +#define sysmemGetRefCount_FNPTR(pResource) pResource->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define sysmemGetRefCount(pResource) sysmemGetRefCount_DISPATCH(pResource) +#define sysmemAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define sysmemAddAdditionalDependants(pClient, pResource, pReference) sysmemAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) + +// Dispatch functions +static inline NV_STATUS sysmemCtrlCmdGetSurfaceNumPhysPages_DISPATCH(struct SystemMemory *pStandardMemory, NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS *pParams) { + return pStandardMemory->__sysmemCtrlCmdGetSurfaceNumPhysPages__(pStandardMemory, pParams); +} + +static inline NV_STATUS sysmemCtrlCmdGetSurfacePhysPages_DISPATCH(struct SystemMemory *pStandardMemory, NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS *pParams) { + return pStandardMemory->__sysmemCtrlCmdGetSurfacePhysPages__(pStandardMemory, pParams); +} + +static inline NvBool sysmemCanCopy_DISPATCH(struct SystemMemory *pStandardMemory) { + return pStandardMemory->__nvoc_metadata_ptr->vtable.__sysmemCanCopy__(pStandardMemory); +} + +static inline NV_STATUS sysmemIsDuplicate_DISPATCH(struct SystemMemory *pMemory, NvHandle hMemory, NvBool *pDuplicate) { + return pMemory->__nvoc_metadata_ptr->vtable.__sysmemIsDuplicate__(pMemory, hMemory, pDuplicate); +} + +static inline NV_STATUS sysmemGetMapAddrSpace_DISPATCH(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__nvoc_metadata_ptr->vtable.__sysmemGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NV_STATUS sysmemControl_DISPATCH(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__nvoc_metadata_ptr->vtable.__sysmemControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS sysmemMap_DISPATCH(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__nvoc_metadata_ptr->vtable.__sysmemMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS sysmemUnmap_DISPATCH(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__nvoc_metadata_ptr->vtable.__sysmemUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS sysmemGetMemInterMapParams_DISPATCH(struct SystemMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__nvoc_metadata_ptr->vtable.__sysmemGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS sysmemCheckMemInterUnmap_DISPATCH(struct SystemMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__nvoc_metadata_ptr->vtable.__sysmemCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS sysmemGetMemoryMappingDescriptor_DISPATCH(struct SystemMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__nvoc_metadata_ptr->vtable.__sysmemGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS sysmemCheckCopyPermissions_DISPATCH(struct SystemMemory *pMemory, struct OBJGPU *pDstGpu, struct Device *pDstDevice) { + return pMemory->__nvoc_metadata_ptr->vtable.__sysmemCheckCopyPermissions__(pMemory, pDstGpu, pDstDevice); +} + +static inline NV_STATUS sysmemIsReady_DISPATCH(struct SystemMemory *pMemory, NvBool bCopyConstructorContext) { + return pMemory->__nvoc_metadata_ptr->vtable.__sysmemIsReady__(pMemory, bCopyConstructorContext); +} + +static inline NvBool sysmemIsGpuMapAllowed_DISPATCH(struct SystemMemory *pMemory, struct OBJGPU *pGpu) { + return pMemory->__nvoc_metadata_ptr->vtable.__sysmemIsGpuMapAllowed__(pMemory, pGpu); +} + +static inline NvBool sysmemIsExportAllowed_DISPATCH(struct SystemMemory *pMemory) { + return pMemory->__nvoc_metadata_ptr->vtable.__sysmemIsExportAllowed__(pMemory); +} + +static inline NvBool sysmemAccessCallback_DISPATCH(struct SystemMemory *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__sysmemAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NvBool sysmemShareCallback_DISPATCH(struct SystemMemory *pResource, RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__nvoc_metadata_ptr->vtable.__sysmemShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS sysmemControlSerialization_Prologue_DISPATCH(struct SystemMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__sysmemControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void sysmemControlSerialization_Epilogue_DISPATCH(struct SystemMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__sysmemControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS sysmemControl_Prologue_DISPATCH(struct SystemMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__sysmemControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void sysmemControl_Epilogue_DISPATCH(struct SystemMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__sysmemControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline void sysmemPreDestruct_DISPATCH(struct SystemMemory *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__sysmemPreDestruct__(pResource); +} + +static inline NV_STATUS sysmemControlFilter_DISPATCH(struct SystemMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__sysmemControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvBool sysmemIsPartialUnmapSupported_DISPATCH(struct SystemMemory *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__sysmemIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS sysmemMapTo_DISPATCH(struct SystemMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__sysmemMapTo__(pResource, pParams); +} + +static inline NV_STATUS sysmemUnmapFrom_DISPATCH(struct SystemMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__sysmemUnmapFrom__(pResource, pParams); +} + +static inline NvU32 sysmemGetRefCount_DISPATCH(struct SystemMemory *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__sysmemGetRefCount__(pResource); +} + +static inline void sysmemAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct SystemMemory *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__sysmemAddAdditionalDependants__(pClient, pResource, pReference); +} + +NV_STATUS sysmemInitAllocRequest_SOC(struct OBJGPU *pGpu, struct SystemMemory *pSystemMemory, MEMORY_ALLOCATION_REQUEST *pAllocRequest); + + +#ifdef __nvoc_system_mem_h_disabled +static inline NV_STATUS sysmemInitAllocRequest(struct OBJGPU *pGpu, struct SystemMemory *pSystemMemory, MEMORY_ALLOCATION_REQUEST *pAllocRequest) { + NV_ASSERT_FAILED_PRECOMP("SystemMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_system_mem_h_disabled +#define sysmemInitAllocRequest(pGpu, pSystemMemory, pAllocRequest) sysmemInitAllocRequest_SOC(pGpu, pSystemMemory, pAllocRequest) +#endif //__nvoc_system_mem_h_disabled + +#define sysmemInitAllocRequest_HAL(pGpu, pSystemMemory, pAllocRequest) sysmemInitAllocRequest(pGpu, pSystemMemory, pAllocRequest) + +NV_STATUS sysmemCtrlCmdGetSurfaceNumPhysPages_IMPL(struct SystemMemory *pStandardMemory, NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS *pParams); + +NV_STATUS sysmemCtrlCmdGetSurfacePhysPages_IMPL(struct SystemMemory *pStandardMemory, NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS *pParams); + +NV_STATUS sysmemConstruct_IMPL(struct SystemMemory *arg_pStandardMemory, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_sysmemConstruct(arg_pStandardMemory, arg_pCallContext, arg_pParams) sysmemConstruct_IMPL(arg_pStandardMemory, arg_pCallContext, arg_pParams) +void sysmemDestruct_IMPL(struct SystemMemory *pSystemMemory); + +#define __nvoc_sysmemDestruct(pSystemMemory) sysmemDestruct_IMPL(pSystemMemory) +#undef PRIVATE_FIELD + + +NV_STATUS sysmemAllocResources(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, + MEMORY_ALLOCATION_REQUEST *pAllocRequest, FB_ALLOC_INFO *pFbAllocInfo, + struct SystemMemory *pSystemMemory); +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_SYSTEM_MEM_NVOC_H_ diff --git a/src/nvidia/generated/g_system_nvoc.c b/src/nvidia/generated/g_system_nvoc.c new file mode 100644 index 0000000..0442bbb --- /dev/null +++ b/src/nvidia/generated/g_system_nvoc.c @@ -0,0 +1,238 @@ +#define NVOC_SYSTEM_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_system_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x40e2c8 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJSYS; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTRACEABLE; + +// Forward declarations for OBJSYS +void __nvoc_init__Object(Object*); +void __nvoc_init__OBJTRACEABLE(OBJTRACEABLE*); +void __nvoc_init__OBJSYS(OBJSYS*); +void __nvoc_init_funcTable_OBJSYS(OBJSYS*); +NV_STATUS __nvoc_ctor_OBJSYS(OBJSYS*); +void __nvoc_init_dataField_OBJSYS(OBJSYS*); +void __nvoc_dtor_OBJSYS(OBJSYS*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__OBJSYS; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJSYS; + +// Down-thunk(s) to bridge OBJSYS methods from ancestors (if any) + +// Up-thunk(s) to bridge OBJSYS methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJSYS = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJSYS), + /*classId=*/ classId(OBJSYS), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJSYS", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJSYS, + /*pCastInfo=*/ &__nvoc_castinfo__OBJSYS, + /*pExportInfo=*/ &__nvoc_export_info__OBJSYS +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__OBJSYS __nvoc_metadata__OBJSYS = { + .rtti.pClassDef = &__nvoc_class_def_OBJSYS, // (sys) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJSYS, + .rtti.offset = 0, + .metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super + .metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Object.rtti.offset = NV_OFFSETOF(OBJSYS, __nvoc_base_Object), + .metadata__OBJTRACEABLE.rtti.pClassDef = &__nvoc_class_def_OBJTRACEABLE, // (traceable) super + .metadata__OBJTRACEABLE.rtti.dtor = &__nvoc_destructFromBase, + .metadata__OBJTRACEABLE.rtti.offset = NV_OFFSETOF(OBJSYS, __nvoc_base_OBJTRACEABLE), + + .vtable.__sysCaptureState__ = &sysCaptureState_IMPL, // virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__OBJSYS = { + .numRelatives = 3, + .relatives = { + &__nvoc_metadata__OBJSYS.rtti, // [0]: (sys) this + &__nvoc_metadata__OBJSYS.metadata__Object.rtti, // [1]: (obj) super + &__nvoc_metadata__OBJSYS.metadata__OBJTRACEABLE.rtti, // [2]: (traceable) super + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJSYS = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJTRACEABLE(OBJTRACEABLE*); +void __nvoc_dtor_OBJSYS(OBJSYS *pThis) { + __nvoc_sysDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + __nvoc_dtor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJSYS(OBJSYS *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + pThis->setProperty(pThis, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE, ((0) || (1) || (0))); + pThis->setProperty(pThis, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT, ((1) && !0)); + pThis->setProperty(pThis, PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS, (0)); + pThis->setProperty(pThis, PDB_PROP_SYS_INTERNAL_EVENT_BUFFER_ALLOC_ALLOWED, ((0) || (0))); + pThis->setProperty(pThis, PDB_PROP_SYS_IS_AGGRESSIVE_GC6_ENABLED, (0)); + pThis->setProperty(pThis, PDB_PROP_SYS_PRIORITY_BOOST, (0)); + pThis->setProperty(pThis, PDB_PROP_SYS_PRIORITY_THROTTLE_DELAY_US, 16 * 1000); + pThis->setProperty(pThis, PDB_PROP_SYS_RM_LOCK_TIME_COLLECT, NV_FALSE); + pThis->setProperty(pThis, PDB_PROP_SYS_ENABLE_RM_TEST_ONLY_CODE, NV_FALSE); + pThis->setProperty(pThis, PDB_PROP_SYS_ROUTE_TO_PHYSICAL_LOCK_BYPASS, NV_TRUE); + pThis->setProperty(pThis, PDB_PROP_SYS_ENABLE_FORCE_SHARED_LOCK, NV_TRUE); + + pThis->bUseDeferredClientListFree = NV_FALSE; + + pThis->clientListDeferredFreeLimit = 0; + pThis->setProperty(pThis, PDB_PROP_SYS_RECOVERY_REBOOT_REQUIRED, NV_FALSE); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJTRACEABLE(OBJTRACEABLE* ); +NV_STATUS __nvoc_ctor_OBJSYS(OBJSYS *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJSYS_fail_Object; + status = __nvoc_ctor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); + if (status != NV_OK) goto __nvoc_ctor_OBJSYS_fail_OBJTRACEABLE; + __nvoc_init_dataField_OBJSYS(pThis); + + status = __nvoc_sysConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_OBJSYS_fail__init; + goto __nvoc_ctor_OBJSYS_exit; // Success + +__nvoc_ctor_OBJSYS_fail__init: + __nvoc_dtor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); +__nvoc_ctor_OBJSYS_fail_OBJTRACEABLE: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_OBJSYS_fail_Object: +__nvoc_ctor_OBJSYS_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_OBJSYS_1(OBJSYS *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_OBJSYS_1 + + +// Initialize vtable(s) for 1 virtual method(s). +void __nvoc_init_funcTable_OBJSYS(OBJSYS *pThis) { + __nvoc_init_funcTable_OBJSYS_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__OBJSYS(OBJSYS *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; // (obj) super + pThis->__nvoc_pbase_OBJTRACEABLE = &pThis->__nvoc_base_OBJTRACEABLE; // (traceable) super + pThis->__nvoc_pbase_OBJSYS = pThis; // (sys) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Object(&pThis->__nvoc_base_Object); + __nvoc_init__OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__OBJSYS.metadata__Object; // (obj) super + pThis->__nvoc_base_OBJTRACEABLE.__nvoc_metadata_ptr = &__nvoc_metadata__OBJSYS.metadata__OBJTRACEABLE; // (traceable) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__OBJSYS; // (sys) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_OBJSYS(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJSYS(OBJSYS **ppThis, Dynamic *pParent, NvU32 createFlags) +{ + NV_STATUS status; + Object *pParentObj = NULL; + OBJSYS *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(OBJSYS), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(OBJSYS)); + + pThis->__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__OBJSYS(pThis); + status = __nvoc_ctor_OBJSYS(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJSYS_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_OBJSYS_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(OBJSYS)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJSYS(OBJSYS **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJSYS(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_system_nvoc.h b/src/nvidia/generated/g_system_nvoc.h new file mode 100644 index 0000000..f8777f2 --- /dev/null +++ b/src/nvidia/generated/g_system_nvoc.h @@ -0,0 +1,725 @@ + +#ifndef _G_SYSTEM_NVOC_H_ +#define _G_SYSTEM_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_system_nvoc.h" + +#ifndef SYSTEM_H +#define SYSTEM_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Defines and structures used for the System Object. * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "nvoc/object.h" +#include "nvlimits.h" // NV_MAX_DEVICES +#include "ctrl/ctrl2080/ctrl2080gpu.h" +#include "diagnostics/traceable.h" +#include "nvCpuUuid.h" +#include "os/capability.h" +#include "containers/btree.h" + +#define SYS_GET_INSTANCE() (g_pSys) +#define SYS_GET_GPUMGR(p) ((p)->pGpuMgr) +#define SYS_GET_GSYNCMGR(p) ((p)->pGsyncMgr) +#define SYS_GET_VGPUMGR(p) ((p)->pVgpuMgr) +#define SYS_GET_KERNEL_VGPUMGR(p) (RMCFG_FEATURE_KERNEL_RM ? (p)->pKernelVgpuMgr : NULL) +#define SYS_GET_OS(p) sysGetOs((p)) +#define SYS_GET_PFM(p) ((p)->pPfm) +#define SYS_GET_CL(p) ((p)->pCl) +#define SYS_GET_SWINSTR(p) ((p)->pSwInstr) +#define SYS_GET_GPUACCT(p) ((p)->pGpuAcct) +#define SYS_GET_PFM_REQ_HNDLR(p) ((p)->pPlatformRequestHandler) +#define SYS_GET_RCDB(p) ((p)->pRcDB) +#define SYS_GET_VMM(p) (RMCFG_MODULE_VMM ? (p)->pVmm : NULL) +#define SYS_GET_HYPERVISOR(p) ((p)->pHypervisor) +#define SYS_GET_VRRMGR(p) ((p)->pVrrMgr) +#define SYS_GET_GPUBOOSTMGR(p) ((p)->pGpuBoostMgr) +#define SYS_GET_DISPMGR(p) ((p)->pDispMgr) +#define SYS_GET_FABRIC(p) ((p)->pFabric) +#define SYS_GET_GPUDB(p) ((p)->pGpuDb) +#define SYS_GET_HALMGR(p) ((p)->pHalMgr) + +#if RMCFG_FEATURE_GSPRM_BULLSEYE || defined(GSPRM_BULLSEYE_ENABLE) +#define SYS_GET_CODE_COV_MGR(p) ((p)->pCodeCovMgr) +#else +#define SYS_GET_CODE_COV_MGR(p) (NULL) +#endif + +#define IsMobile(p) 0 + +// Child class forward declarations. + +struct OBJPFM; + +#ifndef __NVOC_CLASS_OBJPFM_TYPEDEF__ +#define __NVOC_CLASS_OBJPFM_TYPEDEF__ +typedef struct OBJPFM OBJPFM; +#endif /* __NVOC_CLASS_OBJPFM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJPFM +#define __nvoc_class_id_OBJPFM 0xb543ae +#endif /* __nvoc_class_id_OBJPFM */ + + + +struct OBJVMM; + +#ifndef __NVOC_CLASS_OBJVMM_TYPEDEF__ +#define __NVOC_CLASS_OBJVMM_TYPEDEF__ +typedef struct OBJVMM OBJVMM; +#endif /* __NVOC_CLASS_OBJVMM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVMM +#define __nvoc_class_id_OBJVMM 0xa030ab +#endif /* __nvoc_class_id_OBJVMM */ + + + +struct OBJHYPERVISOR; + +#ifndef __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ +#define __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ +typedef struct OBJHYPERVISOR OBJHYPERVISOR; +#endif /* __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHYPERVISOR +#define __nvoc_class_id_OBJHYPERVISOR 0x33c1ba +#endif /* __nvoc_class_id_OBJHYPERVISOR */ + + + +struct OBJGPUMGR; + +#ifndef __NVOC_CLASS_OBJGPUMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJGPUMGR_TYPEDEF__ +typedef struct OBJGPUMGR OBJGPUMGR; +#endif /* __NVOC_CLASS_OBJGPUMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPUMGR +#define __nvoc_class_id_OBJGPUMGR 0xcf1b25 +#endif /* __nvoc_class_id_OBJGPUMGR */ + + + +struct OBJDISPMGR; + +#ifndef __NVOC_CLASS_OBJDISPMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJDISPMGR_TYPEDEF__ +typedef struct OBJDISPMGR OBJDISPMGR; +#endif /* __NVOC_CLASS_OBJDISPMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDISPMGR +#define __nvoc_class_id_OBJDISPMGR 0x69ad03 +#endif /* __nvoc_class_id_OBJDISPMGR */ + + + +struct PlatformRequestHandler; + +#ifndef __NVOC_CLASS_PlatformRequestHandler_TYPEDEF__ +#define __NVOC_CLASS_PlatformRequestHandler_TYPEDEF__ +typedef struct PlatformRequestHandler PlatformRequestHandler; +#endif /* __NVOC_CLASS_PlatformRequestHandler_TYPEDEF__ */ + +#ifndef __nvoc_class_id_PlatformRequestHandler +#define __nvoc_class_id_PlatformRequestHandler 0x641a7f +#endif /* __nvoc_class_id_PlatformRequestHandler */ + + + +struct GpuAccounting; + +#ifndef __NVOC_CLASS_GpuAccounting_TYPEDEF__ +#define __NVOC_CLASS_GpuAccounting_TYPEDEF__ +typedef struct GpuAccounting GpuAccounting; +#endif /* __NVOC_CLASS_GpuAccounting_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuAccounting +#define __nvoc_class_id_GpuAccounting 0x0f1350 +#endif /* __nvoc_class_id_GpuAccounting */ + + + +struct OBJHALMGR; + +#ifndef __NVOC_CLASS_OBJHALMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJHALMGR_TYPEDEF__ +typedef struct OBJHALMGR OBJHALMGR; +#endif /* __NVOC_CLASS_OBJHALMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHALMGR +#define __nvoc_class_id_OBJHALMGR 0xbf26de +#endif /* __nvoc_class_id_OBJHALMGR */ + + + +struct Fabric; + +#ifndef __NVOC_CLASS_Fabric_TYPEDEF__ +#define __NVOC_CLASS_Fabric_TYPEDEF__ +typedef struct Fabric Fabric; +#endif /* __NVOC_CLASS_Fabric_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Fabric +#define __nvoc_class_id_Fabric 0x0ac791 +#endif /* __nvoc_class_id_Fabric */ + + + +struct GpuDb; + +#ifndef __NVOC_CLASS_GpuDb_TYPEDEF__ +#define __NVOC_CLASS_GpuDb_TYPEDEF__ +typedef struct GpuDb GpuDb; +#endif /* __NVOC_CLASS_GpuDb_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuDb +#define __nvoc_class_id_GpuDb 0xcdd250 +#endif /* __nvoc_class_id_GpuDb */ + + + +struct OBJCL; + +#ifndef __NVOC_CLASS_OBJCL_TYPEDEF__ +#define __NVOC_CLASS_OBJCL_TYPEDEF__ +typedef struct OBJCL OBJCL; +#endif /* __NVOC_CLASS_OBJCL_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJCL +#define __nvoc_class_id_OBJCL 0x547dbb +#endif /* __nvoc_class_id_OBJCL */ + + + +struct KernelVgpuMgr; + +#ifndef __NVOC_CLASS_KernelVgpuMgr_TYPEDEF__ +#define __NVOC_CLASS_KernelVgpuMgr_TYPEDEF__ +typedef struct KernelVgpuMgr KernelVgpuMgr; +#endif /* __NVOC_CLASS_KernelVgpuMgr_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelVgpuMgr +#define __nvoc_class_id_KernelVgpuMgr 0xa793dd +#endif /* __nvoc_class_id_KernelVgpuMgr */ + + + +struct OBJVRRMGR; + +#ifndef __NVOC_CLASS_OBJVRRMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJVRRMGR_TYPEDEF__ +typedef struct OBJVRRMGR OBJVRRMGR; +#endif /* __NVOC_CLASS_OBJVRRMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVRRMGR +#define __nvoc_class_id_OBJVRRMGR 0x442804 +#endif /* __nvoc_class_id_OBJVRRMGR */ + + + +struct OBJGPUBOOSTMGR; + +#ifndef __NVOC_CLASS_OBJGPUBOOSTMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJGPUBOOSTMGR_TYPEDEF__ +typedef struct OBJGPUBOOSTMGR OBJGPUBOOSTMGR; +#endif /* __NVOC_CLASS_OBJGPUBOOSTMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPUBOOSTMGR +#define __nvoc_class_id_OBJGPUBOOSTMGR 0x9f6bbf +#endif /* __nvoc_class_id_OBJGPUBOOSTMGR */ + + + +struct OBJGSYNCMGR; + +#ifndef __NVOC_CLASS_OBJGSYNCMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJGSYNCMGR_TYPEDEF__ +typedef struct OBJGSYNCMGR OBJGSYNCMGR; +#endif /* __NVOC_CLASS_OBJGSYNCMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGSYNCMGR +#define __nvoc_class_id_OBJGSYNCMGR 0xd07fd0 +#endif /* __nvoc_class_id_OBJGSYNCMGR */ + + + +struct OBJVGPUMGR; + +#ifndef __NVOC_CLASS_OBJVGPUMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJVGPUMGR_TYPEDEF__ +typedef struct OBJVGPUMGR OBJVGPUMGR; +#endif /* __NVOC_CLASS_OBJVGPUMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVGPUMGR +#define __nvoc_class_id_OBJVGPUMGR 0x0e9beb +#endif /* __nvoc_class_id_OBJVGPUMGR */ + + + +struct OBJOS; + +#ifndef __NVOC_CLASS_OBJOS_TYPEDEF__ +#define __NVOC_CLASS_OBJOS_TYPEDEF__ +typedef struct OBJOS OBJOS; +#endif /* __NVOC_CLASS_OBJOS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJOS +#define __nvoc_class_id_OBJOS 0xaa1d70 +#endif /* __nvoc_class_id_OBJOS */ + + + +struct CodeCoverageManager; + +#ifndef __NVOC_CLASS_CodeCoverageManager_TYPEDEF__ +#define __NVOC_CLASS_CodeCoverageManager_TYPEDEF__ +typedef struct CodeCoverageManager CodeCoverageManager; +#endif /* __NVOC_CLASS_CodeCoverageManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_CodeCoverageManager +#define __nvoc_class_id_CodeCoverageManager 0x62cbfb +#endif /* __nvoc_class_id_CodeCoverageManager */ + + + +typedef struct OBJRCDB Journal; + +/*! + * This structure contains static system configuration data. This structure + * will become a typesafe structure that can be exchanged with code + * running on GSP. + */ +typedef struct SYS_STATIC_CONFIG +{ + /*! Indicates if the GPU is in a notebook or not. */ + NvBool bIsNotebook; + + /*! Initial SLI configuration flags */ + NvU32 initialSliFlags; + + /*! Indicates confidential compute OS support is enabled or not */ + NvBool bOsCCEnabled; + + /*! Indicates SEV-SNP confidential compute OS support is enabled or not */ + NvBool bOsCCSevSnpEnabled; + + /*! Indicates Memory Encryption OS support is enabled or not */ + NvBool bOsCCSmeEnabled; + + /*! Indicates SEV-SNP vTOM confidential compute OS support is enabled or not */ + NvBool bOsCCSnpVtomEnabled; + + /*! Indicates Intel TDX confidential compute OS support is enabled or not */ + NvBool bOsCCTdxEnabled; +} SYS_STATIC_CONFIG; + +typedef struct +{ + NvBool bInitialized; // Set to true once we id the CPU + NvU32 type; // NV0000_CTRL_SYSTEM_CPU_TYPE value + NvU32 caps; // NV0000_CTRL_SYSTEM_CPU_CAP value + NvU32 brandId; // CPU Brand ID + NvU32 clock; + NvU32 l1DataCacheSize; // L1 data (or unified) cache size (KB) + NvU32 l2DataCacheSize; // L2 data (or unified) cache size (KB) + NvU32 dataCacheLineSize; // Bytes per line in the L1 data cache + NvU64 hostPageSize; // Native host os page size (4k/64k/etc) + NvU32 numPhysicalCpus; // Number of physical cpus + NvU32 numLogicalCpus; // Total number of logical cpus + NvU32 maxLogicalCpus; // Max Number of Cores on the System + char name[52]; // Embedded processor name; only filled + // filled in if CPU has embedded name + NvU32 family; // Vendor defined Family/extended Family + NvU32 model; // Vendor defined Model/extended Model + NvU32 coresOnDie; // # of cores on the die (0 if unknown) + NvU32 platformID; // Chip package type + NvU8 stepping; // Silicon stepping + NvBool bSEVCapable; // Is capable of SEV (Secure Encrypted Virtualization) + NvU32 maxEncryptedGuests; // Max # of encrypted guests supported +} SYS_CPU_INFO; + +typedef struct +{ + NvU32 strapUser; + NvU32 genRegsVse2VidsysEn; + NvU32 genRegsMiscIoAdr; +} SYS_VGA_POST_STATE; + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_SYSTEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__OBJSYS; +struct NVOC_METADATA__Object; +struct NVOC_METADATA__OBJTRACEABLE; +struct NVOC_VTABLE__OBJSYS; + + +struct OBJSYS { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__OBJSYS *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Object __nvoc_base_Object; + struct OBJTRACEABLE __nvoc_base_OBJTRACEABLE; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super + struct OBJTRACEABLE *__nvoc_pbase_OBJTRACEABLE; // traceable super + struct OBJSYS *__nvoc_pbase_OBJSYS; // sys + + // 35 PDB properties + NvBool PDB_PROP_SYS_SBIOS_NVIF_POWERMIZER_LIMIT; + NvBool PDB_PROP_SYS_MXM_THERMAL_CONTROL_PRESENT; + NvBool PDB_PROP_SYS_POWER_BATTERY; + NvBool PDB_PROP_SYS_NVIF_INIT_DONE; + NvBool PDB_PROP_SYS_REGISTRY_OVERRIDES_INITIALIZED; + NvBool PDB_PROP_SYS_PRIMARY_VBIOS_STATE_SAVED; + NvBool PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS; + NvBool PDB_PROP_SYS_ENABLE_STREAM_MEMOPS; + NvBool PDB_PROP_SYS_IS_UEFI; + NvBool PDB_PROP_SYS_WIN_PRIMARY_DEVICE_MARKED; + NvBool PDB_PROP_SYS_IS_GSYNC_ENABLED; + NvBool PDB_PROP_SYS_NVSWITCH_IS_PRESENT; + NvBool PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED; + NvBool PDB_PROP_SYS_FABRIC_MANAGER_IS_INITIALIZED; + NvBool PDB_PROP_SYS_FABRIC_MANAGER_IS_REGISTERED; + NvBool PDB_PROP_SYS_HASWELL_CPU_C0_STEPPING; + NvBool PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE; + NvBool PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT; + NvBool PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS; + NvBool PDB_PROP_SYS_INTERNAL_EVENT_BUFFER_ALLOC_ALLOWED; + NvBool PDB_PROP_SYS_IS_AGGRESSIVE_GC6_ENABLED; + NvBool PDB_PROP_SYS_IN_OCA_DATA_COLLECTION; + NvBool PDB_PROP_SYS_DEBUGGER_DISABLED; + NvBool PDB_PROP_SYS_PRIORITY_BOOST; + NvU32 PDB_PROP_SYS_PRIORITY_THROTTLE_DELAY_US; + NvBool PDB_PROP_SYS_BUGCHECK_ON_TIMEOUT; + NvBool PDB_PROP_SYS_RM_LOCK_TIME_COLLECT; + NvBool PDB_PROP_SYS_ENABLE_RM_TEST_ONLY_CODE; + NvBool PDB_PROP_SYS_ROUTE_TO_PHYSICAL_LOCK_BYPASS; + NvBool PDB_PROP_SYS_IS_QSYNC_FW_REVISION_CHECK_DISABLED; + NvBool PDB_PROP_SYS_GPU_LOCK_MIDPATH_ENABLED; + NvBool PDB_PROP_SYS_ENABLE_FORCE_SHARED_LOCK; + NvBool PDB_PROP_SYS_DESTRUCTING; + NvBool PDB_PROP_SYS_ALLOW_UNKNOWN_4PART_IDS; + NvBool PDB_PROP_SYS_RECOVERY_REBOOT_REQUIRED; + + // Data members + NvU32 apiLockMask; + NvU32 apiLockModuleMask; + NvU32 gpuLockModuleMask; + NvU32 pwrTransitionTimeoutOverride; + SYS_STATIC_CONFIG staticConfig; + NvU32 debugFlags; + SYS_CPU_INFO cpuInfo; + SYS_VGA_POST_STATE vgaPostState; + NvBool gpuHotPlugPollingActive[32]; + NvU32 gridSwPkg; + void *pSema; + NvU32 binMask; + NvU64 rmInstanceId; + NvU32 currentChannelUniqueId; + NvU32 currentVasUniqueId; + NvBool bUseDeferredClientListFree; + NvU32 clientListDeferredFreeLimit; + OS_RM_CAPS *pOsRmCaps; + struct OBJGPUMGR *pGpuMgr; + struct OBJGSYNCMGR *pGsyncMgr; + struct OBJVGPUMGR *pVgpuMgr; + struct KernelVgpuMgr *pKernelVgpuMgr; + struct OBJOS *pOS; + struct OBJCL *pCl; + struct OBJPFM *pPfm; + struct GpuAccounting *pGpuAcct; + struct PlatformRequestHandler *pPlatformRequestHandler; + Journal *pRcDB; + struct OBJVMM *pVmm; + struct OBJHYPERVISOR *pHypervisor; + struct OBJVRRMGR *pVrrMgr; + struct OBJGPUBOOSTMGR *pGpuBoostMgr; + struct OBJDISPMGR *pDispMgr; + struct OBJHALMGR *pHalMgr; + struct Fabric *pFabric; + struct GpuDb *pGpuDb; + struct CodeCoverageManager *pCodeCovMgr; + NvBool bIsGridBuild; +}; + + +// Vtable with 1 per-class function pointer +struct NVOC_VTABLE__OBJSYS { + NV_STATUS (*__sysCaptureState__)(struct OBJSYS * /*this*/); // virtual +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__OBJSYS { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Object metadata__Object; + const struct NVOC_METADATA__OBJTRACEABLE metadata__OBJTRACEABLE; + const struct NVOC_VTABLE__OBJSYS vtable; +}; + +#ifndef __NVOC_CLASS_OBJSYS_TYPEDEF__ +#define __NVOC_CLASS_OBJSYS_TYPEDEF__ +typedef struct OBJSYS OBJSYS; +#endif /* __NVOC_CLASS_OBJSYS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJSYS +#define __nvoc_class_id_OBJSYS 0x40e2c8 +#endif /* __nvoc_class_id_OBJSYS */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJSYS; + +#define __staticCast_OBJSYS(pThis) \ + ((pThis)->__nvoc_pbase_OBJSYS) + +#ifdef __nvoc_system_h_disabled +#define __dynamicCast_OBJSYS(pThis) ((OBJSYS*) NULL) +#else //__nvoc_system_h_disabled +#define __dynamicCast_OBJSYS(pThis) \ + ((OBJSYS*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJSYS))) +#endif //__nvoc_system_h_disabled + +// Property macros +#define PDB_PROP_SYS_REGISTRY_OVERRIDES_INITIALIZED_BASE_CAST +#define PDB_PROP_SYS_REGISTRY_OVERRIDES_INITIALIZED_BASE_NAME PDB_PROP_SYS_REGISTRY_OVERRIDES_INITIALIZED +#define PDB_PROP_SYS_GPU_LOCK_MIDPATH_ENABLED_BASE_CAST +#define PDB_PROP_SYS_GPU_LOCK_MIDPATH_ENABLED_BASE_NAME PDB_PROP_SYS_GPU_LOCK_MIDPATH_ENABLED +#define PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS_BASE_CAST +#define PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS_BASE_NAME PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS +#define PDB_PROP_SYS_POWER_BATTERY_BASE_CAST +#define PDB_PROP_SYS_POWER_BATTERY_BASE_NAME PDB_PROP_SYS_POWER_BATTERY +#define PDB_PROP_SYS_IS_QSYNC_FW_REVISION_CHECK_DISABLED_BASE_CAST +#define PDB_PROP_SYS_IS_QSYNC_FW_REVISION_CHECK_DISABLED_BASE_NAME PDB_PROP_SYS_IS_QSYNC_FW_REVISION_CHECK_DISABLED +#define PDB_PROP_SYS_ROUTE_TO_PHYSICAL_LOCK_BYPASS_BASE_CAST +#define PDB_PROP_SYS_ROUTE_TO_PHYSICAL_LOCK_BYPASS_BASE_NAME PDB_PROP_SYS_ROUTE_TO_PHYSICAL_LOCK_BYPASS +#define PDB_PROP_SYS_NVIF_INIT_DONE_BASE_CAST +#define PDB_PROP_SYS_NVIF_INIT_DONE_BASE_NAME PDB_PROP_SYS_NVIF_INIT_DONE +#define PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT_BASE_CAST +#define PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT_BASE_NAME PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT +#define PDB_PROP_SYS_DESTRUCTING_BASE_CAST +#define PDB_PROP_SYS_DESTRUCTING_BASE_NAME PDB_PROP_SYS_DESTRUCTING +#define PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS_BASE_CAST +#define PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS_BASE_NAME PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS +#define PDB_PROP_SYS_PRIMARY_VBIOS_STATE_SAVED_BASE_CAST +#define PDB_PROP_SYS_PRIMARY_VBIOS_STATE_SAVED_BASE_NAME PDB_PROP_SYS_PRIMARY_VBIOS_STATE_SAVED +#define PDB_PROP_SYS_BUGCHECK_ON_TIMEOUT_BASE_CAST +#define PDB_PROP_SYS_BUGCHECK_ON_TIMEOUT_BASE_NAME PDB_PROP_SYS_BUGCHECK_ON_TIMEOUT +#define PDB_PROP_SYS_ENABLE_RM_TEST_ONLY_CODE_BASE_CAST +#define PDB_PROP_SYS_ENABLE_RM_TEST_ONLY_CODE_BASE_NAME PDB_PROP_SYS_ENABLE_RM_TEST_ONLY_CODE +#define PDB_PROP_SYS_ENABLE_STREAM_MEMOPS_BASE_CAST +#define PDB_PROP_SYS_ENABLE_STREAM_MEMOPS_BASE_NAME PDB_PROP_SYS_ENABLE_STREAM_MEMOPS +#define PDB_PROP_SYS_SBIOS_NVIF_POWERMIZER_LIMIT_BASE_CAST +#define PDB_PROP_SYS_SBIOS_NVIF_POWERMIZER_LIMIT_BASE_NAME PDB_PROP_SYS_SBIOS_NVIF_POWERMIZER_LIMIT +#define PDB_PROP_SYS_IS_UEFI_BASE_CAST +#define PDB_PROP_SYS_IS_UEFI_BASE_NAME PDB_PROP_SYS_IS_UEFI +#define PDB_PROP_SYS_INTERNAL_EVENT_BUFFER_ALLOC_ALLOWED_BASE_CAST +#define PDB_PROP_SYS_INTERNAL_EVENT_BUFFER_ALLOC_ALLOWED_BASE_NAME PDB_PROP_SYS_INTERNAL_EVENT_BUFFER_ALLOC_ALLOWED +#define PDB_PROP_SYS_IS_GSYNC_ENABLED_BASE_CAST +#define PDB_PROP_SYS_IS_GSYNC_ENABLED_BASE_NAME PDB_PROP_SYS_IS_GSYNC_ENABLED +#define PDB_PROP_SYS_FABRIC_MANAGER_IS_REGISTERED_BASE_CAST +#define PDB_PROP_SYS_FABRIC_MANAGER_IS_REGISTERED_BASE_NAME PDB_PROP_SYS_FABRIC_MANAGER_IS_REGISTERED +#define PDB_PROP_SYS_PRIORITY_BOOST_BASE_CAST +#define PDB_PROP_SYS_PRIORITY_BOOST_BASE_NAME PDB_PROP_SYS_PRIORITY_BOOST +#define PDB_PROP_SYS_PRIORITY_THROTTLE_DELAY_US_BASE_CAST +#define PDB_PROP_SYS_PRIORITY_THROTTLE_DELAY_US_BASE_NAME PDB_PROP_SYS_PRIORITY_THROTTLE_DELAY_US +#define PDB_PROP_SYS_IN_OCA_DATA_COLLECTION_BASE_CAST +#define PDB_PROP_SYS_IN_OCA_DATA_COLLECTION_BASE_NAME PDB_PROP_SYS_IN_OCA_DATA_COLLECTION +#define PDB_PROP_SYS_NVSWITCH_IS_PRESENT_BASE_CAST +#define PDB_PROP_SYS_NVSWITCH_IS_PRESENT_BASE_NAME PDB_PROP_SYS_NVSWITCH_IS_PRESENT +#define PDB_PROP_SYS_FABRIC_MANAGER_IS_INITIALIZED_BASE_CAST +#define PDB_PROP_SYS_FABRIC_MANAGER_IS_INITIALIZED_BASE_NAME PDB_PROP_SYS_FABRIC_MANAGER_IS_INITIALIZED +#define PDB_PROP_SYS_WIN_PRIMARY_DEVICE_MARKED_BASE_CAST +#define PDB_PROP_SYS_WIN_PRIMARY_DEVICE_MARKED_BASE_NAME PDB_PROP_SYS_WIN_PRIMARY_DEVICE_MARKED +#define PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_BASE_CAST +#define PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_BASE_NAME PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE +#define PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED_BASE_CAST +#define PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED_BASE_NAME PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED +#define PDB_PROP_SYS_ENABLE_FORCE_SHARED_LOCK_BASE_CAST +#define PDB_PROP_SYS_ENABLE_FORCE_SHARED_LOCK_BASE_NAME PDB_PROP_SYS_ENABLE_FORCE_SHARED_LOCK +#define PDB_PROP_SYS_IS_AGGRESSIVE_GC6_ENABLED_BASE_CAST +#define PDB_PROP_SYS_IS_AGGRESSIVE_GC6_ENABLED_BASE_NAME PDB_PROP_SYS_IS_AGGRESSIVE_GC6_ENABLED +#define PDB_PROP_SYS_HASWELL_CPU_C0_STEPPING_BASE_CAST +#define PDB_PROP_SYS_HASWELL_CPU_C0_STEPPING_BASE_NAME PDB_PROP_SYS_HASWELL_CPU_C0_STEPPING +#define PDB_PROP_SYS_RECOVERY_REBOOT_REQUIRED_BASE_CAST +#define PDB_PROP_SYS_RECOVERY_REBOOT_REQUIRED_BASE_NAME PDB_PROP_SYS_RECOVERY_REBOOT_REQUIRED +#define PDB_PROP_SYS_RM_LOCK_TIME_COLLECT_BASE_CAST +#define PDB_PROP_SYS_RM_LOCK_TIME_COLLECT_BASE_NAME PDB_PROP_SYS_RM_LOCK_TIME_COLLECT +#define PDB_PROP_SYS_DEBUGGER_DISABLED_BASE_CAST +#define PDB_PROP_SYS_DEBUGGER_DISABLED_BASE_NAME PDB_PROP_SYS_DEBUGGER_DISABLED +#define PDB_PROP_SYS_ALLOW_UNKNOWN_4PART_IDS_BASE_CAST +#define PDB_PROP_SYS_ALLOW_UNKNOWN_4PART_IDS_BASE_NAME PDB_PROP_SYS_ALLOW_UNKNOWN_4PART_IDS +#define PDB_PROP_SYS_MXM_THERMAL_CONTROL_PRESENT_BASE_CAST +#define PDB_PROP_SYS_MXM_THERMAL_CONTROL_PRESENT_BASE_NAME PDB_PROP_SYS_MXM_THERMAL_CONTROL_PRESENT + +NV_STATUS __nvoc_objCreateDynamic_OBJSYS(OBJSYS**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJSYS(OBJSYS**, Dynamic*, NvU32); +#define __objCreate_OBJSYS(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJSYS((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros +#define sysCaptureState_FNPTR(arg_this) arg_this->__nvoc_metadata_ptr->vtable.__sysCaptureState__ +#define sysCaptureState(arg_this) sysCaptureState_DISPATCH(arg_this) + +// Dispatch functions +static inline NV_STATUS sysCaptureState_DISPATCH(struct OBJSYS *arg_this) { + return arg_this->__nvoc_metadata_ptr->vtable.__sysCaptureState__(arg_this); +} + +NV_STATUS sysCaptureState_IMPL(struct OBJSYS *arg1); + +static inline NvU32 sysGetPwrTransitionTimeout(struct OBJSYS *pSys) { + return pSys->pwrTransitionTimeoutOverride; +} + +static inline const SYS_STATIC_CONFIG *sysGetStaticConfig(struct OBJSYS *pSys) { + return &pSys->staticConfig; +} + +NV_STATUS sysConstruct_IMPL(struct OBJSYS *arg_); + +#define __nvoc_sysConstruct(arg_) sysConstruct_IMPL(arg_) +void sysDestruct_IMPL(struct OBJSYS *arg1); + +#define __nvoc_sysDestruct(arg1) sysDestruct_IMPL(arg1) +void sysInitRegistryOverrides_IMPL(struct OBJSYS *arg1); + +#ifdef __nvoc_system_h_disabled +static inline void sysInitRegistryOverrides(struct OBJSYS *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJSYS was disabled!"); +} +#else //__nvoc_system_h_disabled +#define sysInitRegistryOverrides(arg1) sysInitRegistryOverrides_IMPL(arg1) +#endif //__nvoc_system_h_disabled + +void sysApplyLockingPolicy_IMPL(struct OBJSYS *arg1); + +#ifdef __nvoc_system_h_disabled +static inline void sysApplyLockingPolicy(struct OBJSYS *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJSYS was disabled!"); +} +#else //__nvoc_system_h_disabled +#define sysApplyLockingPolicy(arg1) sysApplyLockingPolicy_IMPL(arg1) +#endif //__nvoc_system_h_disabled + +struct OBJOS *sysGetOs_IMPL(struct OBJSYS *arg1); + +#ifdef __nvoc_system_h_disabled +static inline struct OBJOS *sysGetOs(struct OBJSYS *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJSYS was disabled!"); + return NULL; +} +#else //__nvoc_system_h_disabled +#define sysGetOs(arg1) sysGetOs_IMPL(arg1) +#endif //__nvoc_system_h_disabled + +void sysEnableExternalFabricMgmt_IMPL(struct OBJSYS *arg1); + +#ifdef __nvoc_system_h_disabled +static inline void sysEnableExternalFabricMgmt(struct OBJSYS *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJSYS was disabled!"); +} +#else //__nvoc_system_h_disabled +#define sysEnableExternalFabricMgmt(arg1) sysEnableExternalFabricMgmt_IMPL(arg1) +#endif //__nvoc_system_h_disabled + +void sysForceInitFabricManagerState_IMPL(struct OBJSYS *arg1); + +#ifdef __nvoc_system_h_disabled +static inline void sysForceInitFabricManagerState(struct OBJSYS *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJSYS was disabled!"); +} +#else //__nvoc_system_h_disabled +#define sysForceInitFabricManagerState(arg1) sysForceInitFabricManagerState_IMPL(arg1) +#endif //__nvoc_system_h_disabled + +NV_STATUS sysSyncExternalFabricMgmtWAR_IMPL(struct OBJSYS *arg1, OBJGPU *arg2); + +#ifdef __nvoc_system_h_disabled +static inline NV_STATUS sysSyncExternalFabricMgmtWAR(struct OBJSYS *arg1, OBJGPU *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJSYS was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_system_h_disabled +#define sysSyncExternalFabricMgmtWAR(arg1, arg2) sysSyncExternalFabricMgmtWAR_IMPL(arg1, arg2) +#endif //__nvoc_system_h_disabled + +void sysSetRecoveryRebootRequired_IMPL(struct OBJSYS *pSys, NvBool bRebootRequired); + +#ifdef __nvoc_system_h_disabled +static inline void sysSetRecoveryRebootRequired(struct OBJSYS *pSys, NvBool bRebootRequired) { + NV_ASSERT_FAILED_PRECOMP("OBJSYS was disabled!"); +} +#else //__nvoc_system_h_disabled +#define sysSetRecoveryRebootRequired(pSys, bRebootRequired) sysSetRecoveryRebootRequired_IMPL(pSys, bRebootRequired) +#endif //__nvoc_system_h_disabled + +#undef PRIVATE_FIELD + + +extern struct OBJSYS *g_pSys; + +#endif // SYSTEM_H + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_SYSTEM_NVOC_H_ diff --git a/src/nvidia/generated/g_tmr_nvoc.c b/src/nvidia/generated/g_tmr_nvoc.c new file mode 100644 index 0000000..b83740a --- /dev/null +++ b/src/nvidia/generated/g_tmr_nvoc.c @@ -0,0 +1,623 @@ +#define NVOC_TMR_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_tmr_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xb13ac4 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_TimerApi; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +// Forward declarations for TimerApi +void __nvoc_init__GpuResource(GpuResource*); +void __nvoc_init__Notifier(Notifier*); +void __nvoc_init__TimerApi(TimerApi*); +void __nvoc_init_funcTable_TimerApi(TimerApi*); +NV_STATUS __nvoc_ctor_TimerApi(TimerApi*, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +void __nvoc_init_dataField_TimerApi(TimerApi*); +void __nvoc_dtor_TimerApi(TimerApi*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__TimerApi; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__TimerApi; + +// Down-thunk(s) to bridge TimerApi methods from ancestors (if any) +NvBool __nvoc_down_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super^2 +NvBool __nvoc_down_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControlSerialization_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControlSerialization_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +void __nvoc_down_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_down_thunk_GpuResource_resControl(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_down_thunk_GpuResource_resMap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // super +NV_STATUS __nvoc_down_thunk_GpuResource_resUnmap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // super +NvBool __nvoc_down_thunk_GpuResource_rmresShareCallback(struct RmResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // super +PEVENTNOTIFICATION * __nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr(struct INotifier *pNotifier); // super +struct NotifShare * __nvoc_down_thunk_Notifier_inotifyGetNotificationShare(struct INotifier *pNotifier); // super +void __nvoc_down_thunk_Notifier_inotifySetNotificationShare(struct INotifier *pNotifier, struct NotifShare *pNotifShare); // super +NV_STATUS __nvoc_down_thunk_Notifier_inotifyUnregisterEvent(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // super +NV_STATUS __nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // super +NV_STATUS __nvoc_down_thunk_TimerApi_gpuresGetRegBaseOffsetAndSize(struct GpuResource *pTimerApi, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); // this + +// Up-thunk(s) to bridge TimerApi methods to ancestors (if any) +NvBool __nvoc_up_thunk_RsResource_rmresCanCopy(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresIsDuplicate(struct RmResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super^2 +void __nvoc_up_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); // super^2 +NvBool __nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported(struct RmResource *pResource); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super^2 +NV_STATUS __nvoc_up_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super^2 +NvU32 __nvoc_up_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource); // super^2 +void __nvoc_up_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference); // super^2 +NvBool __nvoc_up_thunk_RmResource_gpuresAccessCallback(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NV_STATUS __nvoc_up_thunk_RmResource_gpuresControl_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +void __nvoc_up_thunk_RmResource_gpuresControl_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_gpuresCanCopy(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresIsDuplicate(struct GpuResource *pResource, NvHandle hMemory, NvBool *pDuplicate); // super +void __nvoc_up_thunk_RsResource_gpuresPreDestruct(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresControlFilter(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // super +NvBool __nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported(struct GpuResource *pResource); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresMapTo(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); // super +NV_STATUS __nvoc_up_thunk_RsResource_gpuresUnmapFrom(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // super +NvU32 __nvoc_up_thunk_RsResource_gpuresGetRefCount(struct GpuResource *pResource); // super +void __nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference); // super +NV_STATUS __nvoc_up_thunk_GpuResource_tmrapiControl(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_GpuResource_tmrapiMap(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); // this +NV_STATUS __nvoc_up_thunk_GpuResource_tmrapiUnmap(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); // this +NvBool __nvoc_up_thunk_GpuResource_tmrapiShareCallback(struct TimerApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); // this +NV_STATUS __nvoc_up_thunk_GpuResource_tmrapiGetMapAddrSpace(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); // this +NV_STATUS __nvoc_up_thunk_GpuResource_tmrapiInternalControlForward(struct TimerApi *pGpuResource, NvU32 command, void *pParams, NvU32 size); // this +NvHandle __nvoc_up_thunk_GpuResource_tmrapiGetInternalObjectHandle(struct TimerApi *pGpuResource); // this +NvBool __nvoc_up_thunk_RmResource_tmrapiAccessCallback(struct TimerApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); // this +NV_STATUS __nvoc_up_thunk_RmResource_tmrapiGetMemInterMapParams(struct TimerApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_tmrapiCheckMemInterUnmap(struct TimerApi *pRmResource, NvBool bSubdeviceHandleProvided); // this +NV_STATUS __nvoc_up_thunk_RmResource_tmrapiGetMemoryMappingDescriptor(struct TimerApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); // this +NV_STATUS __nvoc_up_thunk_RmResource_tmrapiControlSerialization_Prologue(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_tmrapiControlSerialization_Epilogue(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NV_STATUS __nvoc_up_thunk_RmResource_tmrapiControl_Prologue(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +void __nvoc_up_thunk_RmResource_tmrapiControl_Epilogue(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_tmrapiCanCopy(struct TimerApi *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_tmrapiIsDuplicate(struct TimerApi *pResource, NvHandle hMemory, NvBool *pDuplicate); // this +void __nvoc_up_thunk_RsResource_tmrapiPreDestruct(struct TimerApi *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_tmrapiControlFilter(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); // this +NvBool __nvoc_up_thunk_RsResource_tmrapiIsPartialUnmapSupported(struct TimerApi *pResource); // this +NV_STATUS __nvoc_up_thunk_RsResource_tmrapiMapTo(struct TimerApi *pResource, RS_RES_MAP_TO_PARAMS *pParams); // this +NV_STATUS __nvoc_up_thunk_RsResource_tmrapiUnmapFrom(struct TimerApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); // this +NvU32 __nvoc_up_thunk_RsResource_tmrapiGetRefCount(struct TimerApi *pResource); // this +void __nvoc_up_thunk_RsResource_tmrapiAddAdditionalDependants(struct RsClient *pClient, struct TimerApi *pResource, RsResourceRef *pReference); // this +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_tmrapiGetNotificationListPtr(struct TimerApi *pNotifier); // this +struct NotifShare * __nvoc_up_thunk_Notifier_tmrapiGetNotificationShare(struct TimerApi *pNotifier); // this +void __nvoc_up_thunk_Notifier_tmrapiSetNotificationShare(struct TimerApi *pNotifier, struct NotifShare *pNotifShare); // this +NV_STATUS __nvoc_up_thunk_Notifier_tmrapiUnregisterEvent(struct TimerApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); // this +NV_STATUS __nvoc_up_thunk_Notifier_tmrapiGetOrAllocNotifShare(struct TimerApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); // this + +const struct NVOC_CLASS_DEF __nvoc_class_def_TimerApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(TimerApi), + /*classId=*/ classId(TimerApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "TimerApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_TimerApi, + /*pCastInfo=*/ &__nvoc_castinfo__TimerApi, + /*pExportInfo=*/ &__nvoc_export_info__TimerApi +}; + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_TimerApi[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) tmrapiCtrlCmdTmrSetAlarmNotify_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + /*flags=*/ 0x8u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x40110u, + /*paramSize=*/ sizeof(NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_TimerApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "tmrapiCtrlCmdTmrSetAlarmNotify" +#endif + }, + +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__TimerApi __nvoc_metadata__TimerApi = { + .rtti.pClassDef = &__nvoc_class_def_TimerApi, // (tmrapi) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_TimerApi, + .rtti.offset = 0, + .metadata__GpuResource.rtti.pClassDef = &__nvoc_class_def_GpuResource, // (gpures) super + .metadata__GpuResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.rtti.offset = NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource), + .metadata__GpuResource.metadata__RmResource.rtti.pClassDef = &__nvoc_class_def_RmResource, // (rmres) super^2 + .metadata__GpuResource.metadata__RmResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.rtti.offset = NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource), + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.pClassDef = &__nvoc_class_def_RsResource, // (res) super^3 + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti.offset = NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super^4 + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti.offset = NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.pClassDef = &__nvoc_class_def_RmResourceCommon, // (rmrescmn) super^3 + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.dtor = &__nvoc_destructFromBase, + .metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti.offset = NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), + .metadata__Notifier.rtti.pClassDef = &__nvoc_class_def_Notifier, // (notify) super + .metadata__Notifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Notifier.rtti.offset = NV_OFFSETOF(TimerApi, __nvoc_base_Notifier), + .metadata__Notifier.metadata__INotifier.rtti.pClassDef = &__nvoc_class_def_INotifier, // (inotify) super^2 + .metadata__Notifier.metadata__INotifier.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Notifier.metadata__INotifier.rtti.offset = NV_OFFSETOF(TimerApi, __nvoc_base_Notifier.__nvoc_base_INotifier), + + .vtable.__tmrapiGetRegBaseOffsetAndSize__ = &tmrapiGetRegBaseOffsetAndSize_IMPL, // virtual override (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetRegBaseOffsetAndSize__ = &__nvoc_down_thunk_TimerApi_gpuresGetRegBaseOffsetAndSize, // virtual + .vtable.__tmrapiControl__ = &__nvoc_up_thunk_GpuResource_tmrapiControl, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl__ = &gpuresControl_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl__ = &__nvoc_up_thunk_RsResource_rmresControl, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl__ = &__nvoc_down_thunk_GpuResource_resControl, // virtual + .vtable.__tmrapiMap__ = &__nvoc_up_thunk_GpuResource_tmrapiMap, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresMap__ = &gpuresMap_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresMap__ = &__nvoc_up_thunk_RsResource_rmresMap, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMap__ = &__nvoc_down_thunk_GpuResource_resMap, // virtual + .vtable.__tmrapiUnmap__ = &__nvoc_up_thunk_GpuResource_tmrapiUnmap, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresUnmap__ = &gpuresUnmap_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmap__ = &__nvoc_up_thunk_RsResource_rmresUnmap, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmap__ = &__nvoc_down_thunk_GpuResource_resUnmap, // virtual + .vtable.__tmrapiShareCallback__ = &__nvoc_up_thunk_GpuResource_tmrapiShareCallback, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresShareCallback__ = &gpuresShareCallback_IMPL, // virtual override (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresShareCallback__ = &__nvoc_down_thunk_GpuResource_rmresShareCallback, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resShareCallback__ = &__nvoc_down_thunk_RmResource_resShareCallback, // virtual + .vtable.__tmrapiGetMapAddrSpace__ = &__nvoc_up_thunk_GpuResource_tmrapiGetMapAddrSpace, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMapAddrSpace__ = &gpuresGetMapAddrSpace_IMPL, // virtual + .vtable.__tmrapiInternalControlForward__ = &__nvoc_up_thunk_GpuResource_tmrapiInternalControlForward, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresInternalControlForward__ = &gpuresInternalControlForward_IMPL, // virtual + .vtable.__tmrapiGetInternalObjectHandle__ = &__nvoc_up_thunk_GpuResource_tmrapiGetInternalObjectHandle, // virtual inherited (gpures) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetInternalObjectHandle__ = &gpuresGetInternalObjectHandle_IMPL, // virtual + .vtable.__tmrapiAccessCallback__ = &__nvoc_up_thunk_RmResource_tmrapiAccessCallback, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresAccessCallback__ = &__nvoc_up_thunk_RmResource_gpuresAccessCallback, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresAccessCallback__ = &rmresAccessCallback_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAccessCallback__ = &__nvoc_down_thunk_RmResource_resAccessCallback, // virtual + .vtable.__tmrapiGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_tmrapiGetMemInterMapParams, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMemInterMapParams__ = &__nvoc_up_thunk_RmResource_gpuresGetMemInterMapParams, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL, // virtual + .vtable.__tmrapiCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_tmrapiCheckMemInterUnmap, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresCheckMemInterUnmap__ = &__nvoc_up_thunk_RmResource_gpuresCheckMemInterUnmap, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL, // virtual + .vtable.__tmrapiGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_tmrapiGetMemoryMappingDescriptor, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetMemoryMappingDescriptor__ = &__nvoc_up_thunk_RmResource_gpuresGetMemoryMappingDescriptor, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL, // virtual + .vtable.__tmrapiControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_tmrapiControlSerialization_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlSerialization_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Prologue__ = &rmresControlSerialization_Prologue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Prologue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Prologue, // virtual + .vtable.__tmrapiControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_tmrapiControlSerialization_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlSerialization_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControlSerialization_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlSerialization_Epilogue__ = &rmresControlSerialization_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlSerialization_Epilogue__ = &__nvoc_down_thunk_RmResource_resControlSerialization_Epilogue, // virtual + .vtable.__tmrapiControl_Prologue__ = &__nvoc_up_thunk_RmResource_tmrapiControl_Prologue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl_Prologue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Prologue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Prologue__ = &__nvoc_down_thunk_RmResource_resControl_Prologue, // virtual + .vtable.__tmrapiControl_Epilogue__ = &__nvoc_up_thunk_RmResource_tmrapiControl_Epilogue, // virtual inherited (rmres) base (gpures) + .metadata__GpuResource.vtable.__gpuresControl_Epilogue__ = &__nvoc_up_thunk_RmResource_gpuresControl_Epilogue, // virtual inherited (rmres) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL, // virtual override (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControl_Epilogue__ = &__nvoc_down_thunk_RmResource_resControl_Epilogue, // virtual + .vtable.__tmrapiCanCopy__ = &__nvoc_up_thunk_RsResource_tmrapiCanCopy, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresCanCopy__ = &__nvoc_up_thunk_RsResource_gpuresCanCopy, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresCanCopy__ = &__nvoc_up_thunk_RsResource_rmresCanCopy, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resCanCopy__ = &resCanCopy_IMPL, // virtual + .vtable.__tmrapiIsDuplicate__ = &__nvoc_up_thunk_RsResource_tmrapiIsDuplicate, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresIsDuplicate__ = &__nvoc_up_thunk_RsResource_gpuresIsDuplicate, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresIsDuplicate__ = &__nvoc_up_thunk_RsResource_rmresIsDuplicate, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsDuplicate__ = &resIsDuplicate_IMPL, // virtual + .vtable.__tmrapiPreDestruct__ = &__nvoc_up_thunk_RsResource_tmrapiPreDestruct, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresPreDestruct__ = &__nvoc_up_thunk_RsResource_gpuresPreDestruct, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresPreDestruct__ = &__nvoc_up_thunk_RsResource_rmresPreDestruct, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resPreDestruct__ = &resPreDestruct_IMPL, // virtual + .vtable.__tmrapiControlFilter__ = &__nvoc_up_thunk_RsResource_tmrapiControlFilter, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresControlFilter__ = &__nvoc_up_thunk_RsResource_gpuresControlFilter, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresControlFilter__ = &__nvoc_up_thunk_RsResource_rmresControlFilter, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resControlFilter__ = &resControlFilter_IMPL, // virtual + .vtable.__tmrapiIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_tmrapiIsPartialUnmapSupported, // inline virtual inherited (res) base (gpures) body + .metadata__GpuResource.vtable.__gpuresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_gpuresIsPartialUnmapSupported, // inline virtual inherited (res) base (rmres) body + .metadata__GpuResource.metadata__RmResource.vtable.__rmresIsPartialUnmapSupported__ = &__nvoc_up_thunk_RsResource_rmresIsPartialUnmapSupported, // inline virtual inherited (res) base (res) body + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resIsPartialUnmapSupported__ = &resIsPartialUnmapSupported_d69453, // inline virtual body + .vtable.__tmrapiMapTo__ = &__nvoc_up_thunk_RsResource_tmrapiMapTo, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresMapTo__ = &__nvoc_up_thunk_RsResource_gpuresMapTo, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresMapTo__ = &__nvoc_up_thunk_RsResource_rmresMapTo, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resMapTo__ = &resMapTo_IMPL, // virtual + .vtable.__tmrapiUnmapFrom__ = &__nvoc_up_thunk_RsResource_tmrapiUnmapFrom, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresUnmapFrom__ = &__nvoc_up_thunk_RsResource_gpuresUnmapFrom, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresUnmapFrom__ = &__nvoc_up_thunk_RsResource_rmresUnmapFrom, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resUnmapFrom__ = &resUnmapFrom_IMPL, // virtual + .vtable.__tmrapiGetRefCount__ = &__nvoc_up_thunk_RsResource_tmrapiGetRefCount, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresGetRefCount__ = &__nvoc_up_thunk_RsResource_gpuresGetRefCount, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresGetRefCount__ = &__nvoc_up_thunk_RsResource_rmresGetRefCount, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resGetRefCount__ = &resGetRefCount_IMPL, // virtual + .vtable.__tmrapiAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_tmrapiAddAdditionalDependants, // virtual inherited (res) base (gpures) + .metadata__GpuResource.vtable.__gpuresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_gpuresAddAdditionalDependants, // virtual inherited (res) base (rmres) + .metadata__GpuResource.metadata__RmResource.vtable.__rmresAddAdditionalDependants__ = &__nvoc_up_thunk_RsResource_rmresAddAdditionalDependants, // virtual inherited (res) base (res) + .metadata__GpuResource.metadata__RmResource.metadata__RsResource.vtable.__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL, // virtual + .vtable.__tmrapiGetNotificationListPtr__ = &__nvoc_up_thunk_Notifier_tmrapiGetNotificationListPtr, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyGetNotificationListPtr__ = ¬ifyGetNotificationListPtr_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationListPtr__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationListPtr, // pure virtual + .vtable.__tmrapiGetNotificationShare__ = &__nvoc_up_thunk_Notifier_tmrapiGetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyGetNotificationShare__ = ¬ifyGetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyGetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifyGetNotificationShare, // pure virtual + .vtable.__tmrapiSetNotificationShare__ = &__nvoc_up_thunk_Notifier_tmrapiSetNotificationShare, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifySetNotificationShare__ = ¬ifySetNotificationShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifySetNotificationShare__ = &__nvoc_down_thunk_Notifier_inotifySetNotificationShare, // pure virtual + .vtable.__tmrapiUnregisterEvent__ = &__nvoc_up_thunk_Notifier_tmrapiUnregisterEvent, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyUnregisterEvent__ = ¬ifyUnregisterEvent_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyUnregisterEvent__ = &__nvoc_down_thunk_Notifier_inotifyUnregisterEvent, // pure virtual + .vtable.__tmrapiGetOrAllocNotifShare__ = &__nvoc_up_thunk_Notifier_tmrapiGetOrAllocNotifShare, // virtual inherited (notify) base (notify) + .metadata__Notifier.vtable.__notifyGetOrAllocNotifShare__ = ¬ifyGetOrAllocNotifShare_IMPL, // virtual override (inotify) base (inotify) + .metadata__Notifier.metadata__INotifier.vtable.__inotifyGetOrAllocNotifShare__ = &__nvoc_down_thunk_Notifier_inotifyGetOrAllocNotifShare, // pure virtual +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__TimerApi = { + .numRelatives = 8, + .relatives = { + &__nvoc_metadata__TimerApi.rtti, // [0]: (tmrapi) this + &__nvoc_metadata__TimerApi.metadata__GpuResource.rtti, // [1]: (gpures) super + &__nvoc_metadata__TimerApi.metadata__GpuResource.metadata__RmResource.rtti, // [2]: (rmres) super^2 + &__nvoc_metadata__TimerApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.rtti, // [3]: (res) super^3 + &__nvoc_metadata__TimerApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object.rtti, // [4]: (obj) super^4 + &__nvoc_metadata__TimerApi.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon.rtti, // [5]: (rmrescmn) super^3 + &__nvoc_metadata__TimerApi.metadata__Notifier.rtti, // [6]: (notify) super + &__nvoc_metadata__TimerApi.metadata__Notifier.metadata__INotifier.rtti, // [7]: (inotify) super^2 + } +}; + +// 1 down-thunk(s) defined to bridge methods in TimerApi from superclasses + +// tmrapiGetRegBaseOffsetAndSize: virtual override (gpures) base (gpures) +NV_STATUS __nvoc_down_thunk_TimerApi_gpuresGetRegBaseOffsetAndSize(struct GpuResource *pTimerApi, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return tmrapiGetRegBaseOffsetAndSize((struct TimerApi *)(((unsigned char *) pTimerApi) - NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource)), pGpu, pOffset, pSize); +} + + +// 29 up-thunk(s) defined to bridge methods in TimerApi to superclasses + +// tmrapiControl: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_tmrapiControl(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource)), pCallContext, pParams); +} + +// tmrapiMap: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_tmrapiMap(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource)), pCallContext, pParams, pCpuMapping); +} + +// tmrapiUnmap: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_tmrapiUnmap(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource)), pCallContext, pCpuMapping); +} + +// tmrapiShareCallback: virtual inherited (gpures) base (gpures) +NvBool __nvoc_up_thunk_GpuResource_tmrapiShareCallback(struct TimerApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource)), pInvokingClient, pParentRef, pSharePolicy); +} + +// tmrapiGetMapAddrSpace: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_tmrapiGetMapAddrSpace(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource)), pCallContext, mapFlags, pAddrSpace); +} + +// tmrapiInternalControlForward: virtual inherited (gpures) base (gpures) +NV_STATUS __nvoc_up_thunk_GpuResource_tmrapiInternalControlForward(struct TimerApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource)), command, pParams, size); +} + +// tmrapiGetInternalObjectHandle: virtual inherited (gpures) base (gpures) +NvHandle __nvoc_up_thunk_GpuResource_tmrapiGetInternalObjectHandle(struct TimerApi *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *) pGpuResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource))); +} + +// tmrapiAccessCallback: virtual inherited (rmres) base (gpures) +NvBool __nvoc_up_thunk_RmResource_tmrapiAccessCallback(struct TimerApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pInvokingClient, pAllocParams, accessRight); +} + +// tmrapiGetMemInterMapParams: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_tmrapiGetMemInterMapParams(struct TimerApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pParams); +} + +// tmrapiCheckMemInterUnmap: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_tmrapiCheckMemInterUnmap(struct TimerApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), bSubdeviceHandleProvided); +} + +// tmrapiGetMemoryMappingDescriptor: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_tmrapiGetMemoryMappingDescriptor(struct TimerApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *) pRmResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), ppMemDesc); +} + +// tmrapiControlSerialization_Prologue: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_tmrapiControlSerialization_Prologue(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControlSerialization_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// tmrapiControlSerialization_Epilogue: virtual inherited (rmres) base (gpures) +void __nvoc_up_thunk_RmResource_tmrapiControlSerialization_Epilogue(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControlSerialization_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// tmrapiControl_Prologue: virtual inherited (rmres) base (gpures) +NV_STATUS __nvoc_up_thunk_RmResource_tmrapiControl_Prologue(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// tmrapiControl_Epilogue: virtual inherited (rmres) base (gpures) +void __nvoc_up_thunk_RmResource_tmrapiControl_Epilogue(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *) pResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource)), pCallContext, pParams); +} + +// tmrapiCanCopy: virtual inherited (res) base (gpures) +NvBool __nvoc_up_thunk_RsResource_tmrapiCanCopy(struct TimerApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// tmrapiIsDuplicate: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_tmrapiIsDuplicate(struct TimerApi *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return resIsDuplicate((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), hMemory, pDuplicate); +} + +// tmrapiPreDestruct: virtual inherited (res) base (gpures) +void __nvoc_up_thunk_RsResource_tmrapiPreDestruct(struct TimerApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// tmrapiControlFilter: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_tmrapiControlFilter(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pCallContext, pParams); +} + +// tmrapiIsPartialUnmapSupported: inline virtual inherited (res) base (gpures) body +NvBool __nvoc_up_thunk_RsResource_tmrapiIsPartialUnmapSupported(struct TimerApi *pResource) { + return resIsPartialUnmapSupported((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// tmrapiMapTo: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_tmrapiMapTo(struct TimerApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// tmrapiUnmapFrom: virtual inherited (res) base (gpures) +NV_STATUS __nvoc_up_thunk_RsResource_tmrapiUnmapFrom(struct TimerApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pParams); +} + +// tmrapiGetRefCount: virtual inherited (res) base (gpures) +NvU32 __nvoc_up_thunk_RsResource_tmrapiGetRefCount(struct TimerApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource))); +} + +// tmrapiAddAdditionalDependants: virtual inherited (res) base (gpures) +void __nvoc_up_thunk_RsResource_tmrapiAddAdditionalDependants(struct RsClient *pClient, struct TimerApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *) pResource) + NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource)), pReference); +} + +// tmrapiGetNotificationListPtr: virtual inherited (notify) base (notify) +PEVENTNOTIFICATION * __nvoc_up_thunk_Notifier_tmrapiGetNotificationListPtr(struct TimerApi *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(TimerApi, __nvoc_base_Notifier))); +} + +// tmrapiGetNotificationShare: virtual inherited (notify) base (notify) +struct NotifShare * __nvoc_up_thunk_Notifier_tmrapiGetNotificationShare(struct TimerApi *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(TimerApi, __nvoc_base_Notifier))); +} + +// tmrapiSetNotificationShare: virtual inherited (notify) base (notify) +void __nvoc_up_thunk_Notifier_tmrapiSetNotificationShare(struct TimerApi *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(TimerApi, __nvoc_base_Notifier)), pNotifShare); +} + +// tmrapiUnregisterEvent: virtual inherited (notify) base (notify) +NV_STATUS __nvoc_up_thunk_Notifier_tmrapiUnregisterEvent(struct TimerApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(TimerApi, __nvoc_base_Notifier)), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +// tmrapiGetOrAllocNotifShare: virtual inherited (notify) base (notify) +NV_STATUS __nvoc_up_thunk_Notifier_tmrapiGetOrAllocNotifShare(struct TimerApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *) pNotifier) + NV_OFFSETOF(TimerApi, __nvoc_base_Notifier)), hNotifierClient, hNotifierResource, ppNotifShare); +} + + +const struct NVOC_EXPORT_INFO __nvoc_export_info__TimerApi = +{ + /*numEntries=*/ 1, + /*pExportEntries=*/ __nvoc_exported_method_def_TimerApi +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_TimerApi(TimerApi *pThis) { + __nvoc_tmrapiDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_TimerApi(TimerApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_TimerApi(TimerApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_TimerApi_fail_GpuResource; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_TimerApi_fail_Notifier; + __nvoc_init_dataField_TimerApi(pThis); + + status = __nvoc_tmrapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_TimerApi_fail__init; + goto __nvoc_ctor_TimerApi_exit; // Success + +__nvoc_ctor_TimerApi_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_TimerApi_fail_Notifier: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_TimerApi_fail_GpuResource: +__nvoc_ctor_TimerApi_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_TimerApi_1(TimerApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + // tmrapiCtrlCmdTmrSetAlarmNotify -- exported (id=0x40110) +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8u) + pThis->__tmrapiCtrlCmdTmrSetAlarmNotify__ = &tmrapiCtrlCmdTmrSetAlarmNotify_IMPL; +#endif +} // End __nvoc_init_funcTable_TimerApi_1 with approximately 1 basic block(s). + + +// Initialize vtable(s) for 31 virtual method(s). +void __nvoc_init_funcTable_TimerApi(TimerApi *pThis) { + + // Initialize vtable(s) with 1 per-object function pointer(s). + __nvoc_init_funcTable_TimerApi_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__TimerApi(TimerApi *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; // (obj) super^4 + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; // (res) super^3 + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; // (rmres) super^2 + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; // (gpures) super + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; // (inotify) super^2 + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; // (notify) super + pThis->__nvoc_pbase_TimerApi = pThis; // (tmrapi) this + + // Recurse to superclass initialization function(s). + __nvoc_init__GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init__Notifier(&pThis->__nvoc_base_Notifier); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__TimerApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource.metadata__Object; // (obj) super^4 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr = &__nvoc_metadata__TimerApi.metadata__GpuResource.metadata__RmResource.metadata__RsResource; // (res) super^3 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon.__nvoc_metadata_ptr = &__nvoc_metadata__TimerApi.metadata__GpuResource.metadata__RmResource.metadata__RmResourceCommon; // (rmrescmn) super^3 + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr = &__nvoc_metadata__TimerApi.metadata__GpuResource.metadata__RmResource; // (rmres) super^2 + pThis->__nvoc_base_GpuResource.__nvoc_metadata_ptr = &__nvoc_metadata__TimerApi.metadata__GpuResource; // (gpures) super + pThis->__nvoc_base_Notifier.__nvoc_base_INotifier.__nvoc_metadata_ptr = &__nvoc_metadata__TimerApi.metadata__Notifier.metadata__INotifier; // (inotify) super^2 + pThis->__nvoc_base_Notifier.__nvoc_metadata_ptr = &__nvoc_metadata__TimerApi.metadata__Notifier; // (notify) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__TimerApi; // (tmrapi) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_TimerApi(pThis); +} + +NV_STATUS __nvoc_objCreate_TimerApi(TimerApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) +{ + NV_STATUS status; + Object *pParentObj = NULL; + TimerApi *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(TimerApi), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(TimerApi)); + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__TimerApi(pThis); + status = __nvoc_ctor_TimerApi(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_TimerApi_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_TimerApi_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(TimerApi)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_TimerApi(TimerApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_TimerApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_tmr_nvoc.h b/src/nvidia/generated/g_tmr_nvoc.h new file mode 100644 index 0000000..1002766 --- /dev/null +++ b/src/nvidia/generated/g_tmr_nvoc.h @@ -0,0 +1,432 @@ + +#ifndef _G_TMR_NVOC_H_ +#define _G_TMR_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_tmr_nvoc.h" + +#ifndef _TMR_H_ +#define _TMR_H_ + +/****************************** Timer Module *******************************\ +* * +* Module: TMR.H * +* Timer functions. * +* * +****************************************************************************/ + +#include "core/core.h" +#include "gpu/gpu_resource.h" +#include "rmapi/event.h" + +#include "ctrl/ctrl0004.h" + +typedef struct OBJTMR *POBJTMR; + +#ifndef __NVOC_CLASS_OBJTMR_TYPEDEF__ +#define __NVOC_CLASS_OBJTMR_TYPEDEF__ +typedef struct OBJTMR OBJTMR; +#endif /* __NVOC_CLASS_OBJTMR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJTMR +#define __nvoc_class_id_OBJTMR 0x9ddede +#endif /* __nvoc_class_id_OBJTMR */ + + + +//--------------------------------------------------------------------------- +// +// Time objects. +// +//--------------------------------------------------------------------------- + +#define TIMER_STATE_IDLE 0 +#define TIMER_STATE_BUSY 1 + +typedef NV_STATUS (*TIMEPROC)(OBJGPU *, struct OBJTMR *, TMR_EVENT *); +typedef NV_STATUS (*TIMEPROC_OBSOLETE)(OBJGPU *, struct OBJTMR *, void *); +typedef NV_STATUS (*TIMEPROC_COUNTDOWN)(OBJGPU *, THREAD_STATE_NODE *); + +/*! + * RM internal class representing NV01_TIMER (child of SubDevice) + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_TMR_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__TimerApi; +struct NVOC_METADATA__GpuResource; +struct NVOC_METADATA__Notifier; +struct NVOC_VTABLE__TimerApi; + + +struct TimerApi { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__TimerApi *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct GpuResource __nvoc_base_GpuResource; + struct Notifier __nvoc_base_Notifier; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super^4 + struct RsResource *__nvoc_pbase_RsResource; // res super^3 + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; // rmrescmn super^3 + struct RmResource *__nvoc_pbase_RmResource; // rmres super^2 + struct GpuResource *__nvoc_pbase_GpuResource; // gpures super + struct INotifier *__nvoc_pbase_INotifier; // inotify super^2 + struct Notifier *__nvoc_pbase_Notifier; // notify super + struct TimerApi *__nvoc_pbase_TimerApi; // tmrapi + + // Vtable with 1 per-object function pointer + NV_STATUS (*__tmrapiCtrlCmdTmrSetAlarmNotify__)(struct TimerApi * /*this*/, NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS *); // exported (id=0x40110) +}; + + +// Vtable with 30 per-class function pointers +struct NVOC_VTABLE__TimerApi { + NV_STATUS (*__tmrapiGetRegBaseOffsetAndSize__)(struct TimerApi * /*this*/, struct OBJGPU *, NvU32 *, NvU32 *); // virtual override (gpures) base (gpures) + NV_STATUS (*__tmrapiControl__)(struct TimerApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__tmrapiMap__)(struct TimerApi * /*this*/, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__tmrapiUnmap__)(struct TimerApi * /*this*/, struct CALL_CONTEXT *, struct RsCpuMapping *); // virtual inherited (gpures) base (gpures) + NvBool (*__tmrapiShareCallback__)(struct TimerApi * /*this*/, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__tmrapiGetMapAddrSpace__)(struct TimerApi * /*this*/, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); // virtual inherited (gpures) base (gpures) + NV_STATUS (*__tmrapiInternalControlForward__)(struct TimerApi * /*this*/, NvU32, void *, NvU32); // virtual inherited (gpures) base (gpures) + NvHandle (*__tmrapiGetInternalObjectHandle__)(struct TimerApi * /*this*/); // virtual inherited (gpures) base (gpures) + NvBool (*__tmrapiAccessCallback__)(struct TimerApi * /*this*/, struct RsClient *, void *, RsAccessRight); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__tmrapiGetMemInterMapParams__)(struct TimerApi * /*this*/, RMRES_MEM_INTER_MAP_PARAMS *); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__tmrapiCheckMemInterUnmap__)(struct TimerApi * /*this*/, NvBool); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__tmrapiGetMemoryMappingDescriptor__)(struct TimerApi * /*this*/, struct MEMORY_DESCRIPTOR **); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__tmrapiControlSerialization_Prologue__)(struct TimerApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + void (*__tmrapiControlSerialization_Epilogue__)(struct TimerApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + NV_STATUS (*__tmrapiControl_Prologue__)(struct TimerApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + void (*__tmrapiControl_Epilogue__)(struct TimerApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (rmres) base (gpures) + NvBool (*__tmrapiCanCopy__)(struct TimerApi * /*this*/); // virtual inherited (res) base (gpures) + NV_STATUS (*__tmrapiIsDuplicate__)(struct TimerApi * /*this*/, NvHandle, NvBool *); // virtual inherited (res) base (gpures) + void (*__tmrapiPreDestruct__)(struct TimerApi * /*this*/); // virtual inherited (res) base (gpures) + NV_STATUS (*__tmrapiControlFilter__)(struct TimerApi * /*this*/, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); // virtual inherited (res) base (gpures) + NvBool (*__tmrapiIsPartialUnmapSupported__)(struct TimerApi * /*this*/); // inline virtual inherited (res) base (gpures) body + NV_STATUS (*__tmrapiMapTo__)(struct TimerApi * /*this*/, RS_RES_MAP_TO_PARAMS *); // virtual inherited (res) base (gpures) + NV_STATUS (*__tmrapiUnmapFrom__)(struct TimerApi * /*this*/, RS_RES_UNMAP_FROM_PARAMS *); // virtual inherited (res) base (gpures) + NvU32 (*__tmrapiGetRefCount__)(struct TimerApi * /*this*/); // virtual inherited (res) base (gpures) + void (*__tmrapiAddAdditionalDependants__)(struct RsClient *, struct TimerApi * /*this*/, RsResourceRef *); // virtual inherited (res) base (gpures) + PEVENTNOTIFICATION * (*__tmrapiGetNotificationListPtr__)(struct TimerApi * /*this*/); // virtual inherited (notify) base (notify) + struct NotifShare * (*__tmrapiGetNotificationShare__)(struct TimerApi * /*this*/); // virtual inherited (notify) base (notify) + void (*__tmrapiSetNotificationShare__)(struct TimerApi * /*this*/, struct NotifShare *); // virtual inherited (notify) base (notify) + NV_STATUS (*__tmrapiUnregisterEvent__)(struct TimerApi * /*this*/, NvHandle, NvHandle, NvHandle, NvHandle); // virtual inherited (notify) base (notify) + NV_STATUS (*__tmrapiGetOrAllocNotifShare__)(struct TimerApi * /*this*/, NvHandle, NvHandle, struct NotifShare **); // virtual inherited (notify) base (notify) +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__TimerApi { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__GpuResource metadata__GpuResource; + const struct NVOC_METADATA__Notifier metadata__Notifier; + const struct NVOC_VTABLE__TimerApi vtable; +}; + +#ifndef __NVOC_CLASS_TimerApi_TYPEDEF__ +#define __NVOC_CLASS_TimerApi_TYPEDEF__ +typedef struct TimerApi TimerApi; +#endif /* __NVOC_CLASS_TimerApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_TimerApi +#define __nvoc_class_id_TimerApi 0xb13ac4 +#endif /* __nvoc_class_id_TimerApi */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_TimerApi; + +#define __staticCast_TimerApi(pThis) \ + ((pThis)->__nvoc_pbase_TimerApi) + +#ifdef __nvoc_tmr_h_disabled +#define __dynamicCast_TimerApi(pThis) ((TimerApi*) NULL) +#else //__nvoc_tmr_h_disabled +#define __dynamicCast_TimerApi(pThis) \ + ((TimerApi*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(TimerApi))) +#endif //__nvoc_tmr_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_TimerApi(TimerApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_TimerApi(TimerApi**, Dynamic*, NvU32, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __objCreate_TimerApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_TimerApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + + +// Wrapper macros +#define tmrapiGetRegBaseOffsetAndSize_FNPTR(pTimerApi) pTimerApi->__nvoc_metadata_ptr->vtable.__tmrapiGetRegBaseOffsetAndSize__ +#define tmrapiGetRegBaseOffsetAndSize(pTimerApi, pGpu, pOffset, pSize) tmrapiGetRegBaseOffsetAndSize_DISPATCH(pTimerApi, pGpu, pOffset, pSize) +#define tmrapiCtrlCmdTmrSetAlarmNotify_FNPTR(pTimerApi) pTimerApi->__tmrapiCtrlCmdTmrSetAlarmNotify__ +#define tmrapiCtrlCmdTmrSetAlarmNotify(pTimerApi, pParams) tmrapiCtrlCmdTmrSetAlarmNotify_DISPATCH(pTimerApi, pParams) +#define tmrapiControl_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresControl__ +#define tmrapiControl(pGpuResource, pCallContext, pParams) tmrapiControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define tmrapiMap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresMap__ +#define tmrapiMap(pGpuResource, pCallContext, pParams, pCpuMapping) tmrapiMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define tmrapiUnmap_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresUnmap__ +#define tmrapiUnmap(pGpuResource, pCallContext, pCpuMapping) tmrapiUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define tmrapiShareCallback_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresShareCallback__ +#define tmrapiShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) tmrapiShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define tmrapiGetMapAddrSpace_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetMapAddrSpace__ +#define tmrapiGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) tmrapiGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define tmrapiInternalControlForward_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresInternalControlForward__ +#define tmrapiInternalControlForward(pGpuResource, command, pParams, size) tmrapiInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define tmrapiGetInternalObjectHandle_FNPTR(pGpuResource) pGpuResource->__nvoc_base_GpuResource.__nvoc_metadata_ptr->vtable.__gpuresGetInternalObjectHandle__ +#define tmrapiGetInternalObjectHandle(pGpuResource) tmrapiGetInternalObjectHandle_DISPATCH(pGpuResource) +#define tmrapiAccessCallback_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresAccessCallback__ +#define tmrapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) tmrapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define tmrapiGetMemInterMapParams_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemInterMapParams__ +#define tmrapiGetMemInterMapParams(pRmResource, pParams) tmrapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define tmrapiCheckMemInterUnmap_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresCheckMemInterUnmap__ +#define tmrapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) tmrapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define tmrapiGetMemoryMappingDescriptor_FNPTR(pRmResource) pRmResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresGetMemoryMappingDescriptor__ +#define tmrapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) tmrapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define tmrapiControlSerialization_Prologue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Prologue__ +#define tmrapiControlSerialization_Prologue(pResource, pCallContext, pParams) tmrapiControlSerialization_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define tmrapiControlSerialization_Epilogue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControlSerialization_Epilogue__ +#define tmrapiControlSerialization_Epilogue(pResource, pCallContext, pParams) tmrapiControlSerialization_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define tmrapiControl_Prologue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Prologue__ +#define tmrapiControl_Prologue(pResource, pCallContext, pParams) tmrapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define tmrapiControl_Epilogue_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_metadata_ptr->vtable.__rmresControl_Epilogue__ +#define tmrapiControl_Epilogue(pResource, pCallContext, pParams) tmrapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define tmrapiCanCopy_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resCanCopy__ +#define tmrapiCanCopy(pResource) tmrapiCanCopy_DISPATCH(pResource) +#define tmrapiIsDuplicate_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsDuplicate__ +#define tmrapiIsDuplicate(pResource, hMemory, pDuplicate) tmrapiIsDuplicate_DISPATCH(pResource, hMemory, pDuplicate) +#define tmrapiPreDestruct_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resPreDestruct__ +#define tmrapiPreDestruct(pResource) tmrapiPreDestruct_DISPATCH(pResource) +#define tmrapiControlFilter_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resControlFilter__ +#define tmrapiControlFilter(pResource, pCallContext, pParams) tmrapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define tmrapiIsPartialUnmapSupported_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resIsPartialUnmapSupported__ +#define tmrapiIsPartialUnmapSupported(pResource) tmrapiIsPartialUnmapSupported_DISPATCH(pResource) +#define tmrapiMapTo_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resMapTo__ +#define tmrapiMapTo(pResource, pParams) tmrapiMapTo_DISPATCH(pResource, pParams) +#define tmrapiUnmapFrom_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resUnmapFrom__ +#define tmrapiUnmapFrom(pResource, pParams) tmrapiUnmapFrom_DISPATCH(pResource, pParams) +#define tmrapiGetRefCount_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resGetRefCount__ +#define tmrapiGetRefCount(pResource) tmrapiGetRefCount_DISPATCH(pResource) +#define tmrapiAddAdditionalDependants_FNPTR(pResource) pResource->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_metadata_ptr->vtable.__resAddAdditionalDependants__ +#define tmrapiAddAdditionalDependants(pClient, pResource, pReference) tmrapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define tmrapiGetNotificationListPtr_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationListPtr__ +#define tmrapiGetNotificationListPtr(pNotifier) tmrapiGetNotificationListPtr_DISPATCH(pNotifier) +#define tmrapiGetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetNotificationShare__ +#define tmrapiGetNotificationShare(pNotifier) tmrapiGetNotificationShare_DISPATCH(pNotifier) +#define tmrapiSetNotificationShare_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifySetNotificationShare__ +#define tmrapiSetNotificationShare(pNotifier, pNotifShare) tmrapiSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define tmrapiUnregisterEvent_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyUnregisterEvent__ +#define tmrapiUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) tmrapiUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define tmrapiGetOrAllocNotifShare_FNPTR(pNotifier) pNotifier->__nvoc_base_Notifier.__nvoc_metadata_ptr->vtable.__notifyGetOrAllocNotifShare__ +#define tmrapiGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) tmrapiGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) + +// Dispatch functions +static inline NV_STATUS tmrapiGetRegBaseOffsetAndSize_DISPATCH(struct TimerApi *pTimerApi, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pTimerApi->__nvoc_metadata_ptr->vtable.__tmrapiGetRegBaseOffsetAndSize__(pTimerApi, pGpu, pOffset, pSize); +} + +static inline NV_STATUS tmrapiCtrlCmdTmrSetAlarmNotify_DISPATCH(struct TimerApi *pTimerApi, NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS *pParams) { + return pTimerApi->__tmrapiCtrlCmdTmrSetAlarmNotify__(pTimerApi, pParams); +} + +static inline NV_STATUS tmrapiControl_DISPATCH(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__tmrapiControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS tmrapiMap_DISPATCH(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__tmrapiMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS tmrapiUnmap_DISPATCH(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__tmrapiUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NvBool tmrapiShareCallback_DISPATCH(struct TimerApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__tmrapiShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS tmrapiGetMapAddrSpace_DISPATCH(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__tmrapiGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NV_STATUS tmrapiInternalControlForward_DISPATCH(struct TimerApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__tmrapiInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NvHandle tmrapiGetInternalObjectHandle_DISPATCH(struct TimerApi *pGpuResource) { + return pGpuResource->__nvoc_metadata_ptr->vtable.__tmrapiGetInternalObjectHandle__(pGpuResource); +} + +static inline NvBool tmrapiAccessCallback_DISPATCH(struct TimerApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvoc_metadata_ptr->vtable.__tmrapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS tmrapiGetMemInterMapParams_DISPATCH(struct TimerApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvoc_metadata_ptr->vtable.__tmrapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS tmrapiCheckMemInterUnmap_DISPATCH(struct TimerApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvoc_metadata_ptr->vtable.__tmrapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS tmrapiGetMemoryMappingDescriptor_DISPATCH(struct TimerApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvoc_metadata_ptr->vtable.__tmrapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS tmrapiControlSerialization_Prologue_DISPATCH(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__tmrapiControlSerialization_Prologue__(pResource, pCallContext, pParams); +} + +static inline void tmrapiControlSerialization_Epilogue_DISPATCH(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__tmrapiControlSerialization_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS tmrapiControl_Prologue_DISPATCH(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__tmrapiControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void tmrapiControl_Epilogue_DISPATCH(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvoc_metadata_ptr->vtable.__tmrapiControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NvBool tmrapiCanCopy_DISPATCH(struct TimerApi *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__tmrapiCanCopy__(pResource); +} + +static inline NV_STATUS tmrapiIsDuplicate_DISPATCH(struct TimerApi *pResource, NvHandle hMemory, NvBool *pDuplicate) { + return pResource->__nvoc_metadata_ptr->vtable.__tmrapiIsDuplicate__(pResource, hMemory, pDuplicate); +} + +static inline void tmrapiPreDestruct_DISPATCH(struct TimerApi *pResource) { + pResource->__nvoc_metadata_ptr->vtable.__tmrapiPreDestruct__(pResource); +} + +static inline NV_STATUS tmrapiControlFilter_DISPATCH(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__tmrapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvBool tmrapiIsPartialUnmapSupported_DISPATCH(struct TimerApi *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__tmrapiIsPartialUnmapSupported__(pResource); +} + +static inline NV_STATUS tmrapiMapTo_DISPATCH(struct TimerApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__tmrapiMapTo__(pResource, pParams); +} + +static inline NV_STATUS tmrapiUnmapFrom_DISPATCH(struct TimerApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvoc_metadata_ptr->vtable.__tmrapiUnmapFrom__(pResource, pParams); +} + +static inline NvU32 tmrapiGetRefCount_DISPATCH(struct TimerApi *pResource) { + return pResource->__nvoc_metadata_ptr->vtable.__tmrapiGetRefCount__(pResource); +} + +static inline void tmrapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct TimerApi *pResource, RsResourceRef *pReference) { + pResource->__nvoc_metadata_ptr->vtable.__tmrapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline PEVENTNOTIFICATION * tmrapiGetNotificationListPtr_DISPATCH(struct TimerApi *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__tmrapiGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare * tmrapiGetNotificationShare_DISPATCH(struct TimerApi *pNotifier) { + return pNotifier->__nvoc_metadata_ptr->vtable.__tmrapiGetNotificationShare__(pNotifier); +} + +static inline void tmrapiSetNotificationShare_DISPATCH(struct TimerApi *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__nvoc_metadata_ptr->vtable.__tmrapiSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS tmrapiUnregisterEvent_DISPATCH(struct TimerApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__nvoc_metadata_ptr->vtable.__tmrapiUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS tmrapiGetOrAllocNotifShare_DISPATCH(struct TimerApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__nvoc_metadata_ptr->vtable.__tmrapiGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS tmrapiGetRegBaseOffsetAndSize_IMPL(struct TimerApi *pTimerApi, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); + +NV_STATUS tmrapiCtrlCmdTmrSetAlarmNotify_IMPL(struct TimerApi *pTimerApi, NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS *pParams); + +NV_STATUS tmrapiConstruct_IMPL(struct TimerApi *arg_pTimerApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); + +#define __nvoc_tmrapiConstruct(arg_pTimerApi, arg_pCallContext, arg_pParams) tmrapiConstruct_IMPL(arg_pTimerApi, arg_pCallContext, arg_pParams) +void tmrapiDestruct_IMPL(struct TimerApi *pTimerApi); + +#define __nvoc_tmrapiDestruct(pTimerApi) tmrapiDestruct_IMPL(pTimerApi) +void tmrapiDeregisterEvents_IMPL(struct TimerApi *pTimerApi); + +#ifdef __nvoc_tmr_h_disabled +static inline void tmrapiDeregisterEvents(struct TimerApi *pTimerApi) { + NV_ASSERT_FAILED_PRECOMP("TimerApi was disabled!"); +} +#else //__nvoc_tmr_h_disabled +#define tmrapiDeregisterEvents(pTimerApi) tmrapiDeregisterEvents_IMPL(pTimerApi) +#endif //__nvoc_tmr_h_disabled + +#undef PRIVATE_FIELD + + + +//--------------------------------------------------------------------------- +// +// Function prototypes. +// +//--------------------------------------------------------------------------- + + +#endif // _TMR_H_ + + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_TMR_NVOC_H_ diff --git a/src/nvidia/generated/g_traceable_nvoc.c b/src/nvidia/generated/g_traceable_nvoc.c new file mode 100644 index 0000000..a9f552a --- /dev/null +++ b/src/nvidia/generated/g_traceable_nvoc.c @@ -0,0 +1,119 @@ +#define NVOC_TRACEABLE_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_traceable_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x6305d2 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTRACEABLE; + +// Forward declarations for OBJTRACEABLE +void __nvoc_init__OBJTRACEABLE(OBJTRACEABLE*); +void __nvoc_init_funcTable_OBJTRACEABLE(OBJTRACEABLE*); +NV_STATUS __nvoc_ctor_OBJTRACEABLE(OBJTRACEABLE*); +void __nvoc_init_dataField_OBJTRACEABLE(OBJTRACEABLE*); +void __nvoc_dtor_OBJTRACEABLE(OBJTRACEABLE*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__OBJTRACEABLE; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJTRACEABLE; + +// Down-thunk(s) to bridge OBJTRACEABLE methods from ancestors (if any) + +// Up-thunk(s) to bridge OBJTRACEABLE methods to ancestors (if any) + +// Not instantiable because it's not derived from class "Object" +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTRACEABLE = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJTRACEABLE), + /*classId=*/ classId(OBJTRACEABLE), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJTRACEABLE", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo__OBJTRACEABLE, + /*pExportInfo=*/ &__nvoc_export_info__OBJTRACEABLE +}; + + +// Metadata with per-class RTTI +static const struct NVOC_METADATA__OBJTRACEABLE __nvoc_metadata__OBJTRACEABLE = { + .rtti.pClassDef = &__nvoc_class_def_OBJTRACEABLE, // (traceable) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJTRACEABLE, + .rtti.offset = 0, +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__OBJTRACEABLE = { + .numRelatives = 1, + .relatives = { + &__nvoc_metadata__OBJTRACEABLE.rtti, // [0]: (traceable) this + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJTRACEABLE = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJTRACEABLE(OBJTRACEABLE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJTRACEABLE(OBJTRACEABLE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_OBJTRACEABLE(OBJTRACEABLE *pThis) { + NV_STATUS status = NV_OK; + __nvoc_init_dataField_OBJTRACEABLE(pThis); + goto __nvoc_ctor_OBJTRACEABLE_exit; // Success + +__nvoc_ctor_OBJTRACEABLE_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_OBJTRACEABLE_1(OBJTRACEABLE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_OBJTRACEABLE_1 + + +// Initialize vtable(s): Nothing to do for empty vtables +void __nvoc_init_funcTable_OBJTRACEABLE(OBJTRACEABLE *pThis) { + __nvoc_init_funcTable_OBJTRACEABLE_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__OBJTRACEABLE(OBJTRACEABLE *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_OBJTRACEABLE = pThis; // (traceable) this + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__OBJTRACEABLE; // (traceable) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_OBJTRACEABLE(pThis); +} + diff --git a/src/nvidia/generated/g_traceable_nvoc.h b/src/nvidia/generated/g_traceable_nvoc.h new file mode 100644 index 0000000..78e5ab0 --- /dev/null +++ b/src/nvidia/generated/g_traceable_nvoc.h @@ -0,0 +1,122 @@ + +#ifndef _G_TRACEABLE_NVOC_H_ +#define _G_TRACEABLE_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_traceable_nvoc.h" + +#ifndef __ANCI_TRACEABLE_H__ +#define __ANCI_TRACEABLE_H__ + +#include "core/core.h" +#include "nvoc/object.h" + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_TRACEABLE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI +struct NVOC_METADATA__OBJTRACEABLE; + + +struct OBJTRACEABLE { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__OBJTRACEABLE *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Ancestor object pointers for `staticCast` feature + struct OBJTRACEABLE *__nvoc_pbase_OBJTRACEABLE; // traceable +}; + + +// Metadata with per-class RTTI +struct NVOC_METADATA__OBJTRACEABLE { + const struct NVOC_RTTI rtti; +}; + +#ifndef __NVOC_CLASS_OBJTRACEABLE_TYPEDEF__ +#define __NVOC_CLASS_OBJTRACEABLE_TYPEDEF__ +typedef struct OBJTRACEABLE OBJTRACEABLE; +#endif /* __NVOC_CLASS_OBJTRACEABLE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJTRACEABLE +#define __nvoc_class_id_OBJTRACEABLE 0x6305d2 +#endif /* __nvoc_class_id_OBJTRACEABLE */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTRACEABLE; + +#define __staticCast_OBJTRACEABLE(pThis) \ + ((pThis)->__nvoc_pbase_OBJTRACEABLE) + +#ifdef __nvoc_traceable_h_disabled +#define __dynamicCast_OBJTRACEABLE(pThis) ((OBJTRACEABLE*) NULL) +#else //__nvoc_traceable_h_disabled +#define __dynamicCast_OBJTRACEABLE(pThis) \ + ((OBJTRACEABLE*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJTRACEABLE))) +#endif //__nvoc_traceable_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_OBJTRACEABLE(OBJTRACEABLE**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJTRACEABLE(OBJTRACEABLE**, Dynamic*, NvU32); +#define __objCreate_OBJTRACEABLE(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJTRACEABLE((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#undef PRIVATE_FIELD + + +void objTraverseCaptureState_IMPL(struct Object *pObj); +#define objTraverseCaptureState(p) objTraverseCaptureState_IMPL(staticCast((p), Object)) + +#endif // __ANCI_TRACEABLE_H__ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_TRACEABLE_NVOC_H_ diff --git a/src/nvidia/generated/g_vaspace_nvoc.c b/src/nvidia/generated/g_vaspace_nvoc.c new file mode 100644 index 0000000..b2e56d8 --- /dev/null +++ b/src/nvidia/generated/g_vaspace_nvoc.c @@ -0,0 +1,151 @@ +#define NVOC_VASPACE_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_vaspace_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0x6c347f = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVASPACE; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +// Forward declarations for OBJVASPACE +void __nvoc_init__Object(Object*); +void __nvoc_init__OBJVASPACE(OBJVASPACE*); +void __nvoc_init_funcTable_OBJVASPACE(OBJVASPACE*); +NV_STATUS __nvoc_ctor_OBJVASPACE(OBJVASPACE*); +void __nvoc_init_dataField_OBJVASPACE(OBJVASPACE*); +void __nvoc_dtor_OBJVASPACE(OBJVASPACE*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__OBJVASPACE; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJVASPACE; + +// Down-thunk(s) to bridge OBJVASPACE methods from ancestors (if any) + +// Up-thunk(s) to bridge OBJVASPACE methods to ancestors (if any) + +// Not instantiable because it's an abstract class with following pure virtual functions: +// vaspaceConstruct_ +// vaspaceAlloc +// vaspaceFree +// vaspaceApplyDefaultAlignment +// vaspaceGetVasInfo +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVASPACE = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJVASPACE), + /*classId=*/ classId(OBJVASPACE), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJVASPACE", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo__OBJVASPACE, + /*pExportInfo=*/ &__nvoc_export_info__OBJVASPACE +}; + + +// Metadata with per-class RTTI and vtable with ancestor(s) +static const struct NVOC_METADATA__OBJVASPACE __nvoc_metadata__OBJVASPACE = { + .rtti.pClassDef = &__nvoc_class_def_OBJVASPACE, // (vaspace) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJVASPACE, + .rtti.offset = 0, + .metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super + .metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Object.rtti.offset = NV_OFFSETOF(OBJVASPACE, __nvoc_base_Object), + + .vtable.__vaspaceConstruct___ = NULL, // pure virtual + .vtable.__vaspaceAlloc__ = NULL, // pure virtual + .vtable.__vaspaceFree__ = NULL, // pure virtual + .vtable.__vaspaceApplyDefaultAlignment__ = NULL, // pure virtual + .vtable.__vaspaceIncAllocRefCnt__ = &vaspaceIncAllocRefCnt_14ee5e, // inline virtual body + .vtable.__vaspaceGetVaStart__ = &vaspaceGetVaStart_IMPL, // virtual + .vtable.__vaspaceGetVaLimit__ = &vaspaceGetVaLimit_IMPL, // virtual + .vtable.__vaspaceGetVasInfo__ = NULL, // pure virtual + .vtable.__vaspaceGetFlags__ = &vaspaceGetFlags_edd98b, // inline virtual body +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__OBJVASPACE = { + .numRelatives = 2, + .relatives = { + &__nvoc_metadata__OBJVASPACE.rtti, // [0]: (vaspace) this + &__nvoc_metadata__OBJVASPACE.metadata__Object.rtti, // [1]: (obj) super + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJVASPACE = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJVASPACE(OBJVASPACE *pThis) { + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJVASPACE(OBJVASPACE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJVASPACE(OBJVASPACE *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJVASPACE_fail_Object; + __nvoc_init_dataField_OBJVASPACE(pThis); + goto __nvoc_ctor_OBJVASPACE_exit; // Success + +__nvoc_ctor_OBJVASPACE_fail_Object: +__nvoc_ctor_OBJVASPACE_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_OBJVASPACE_1(OBJVASPACE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_OBJVASPACE_1 + + +// Initialize vtable(s) for 9 virtual method(s). +void __nvoc_init_funcTable_OBJVASPACE(OBJVASPACE *pThis) { + __nvoc_init_funcTable_OBJVASPACE_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__OBJVASPACE(OBJVASPACE *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; // (obj) super + pThis->__nvoc_pbase_OBJVASPACE = pThis; // (vaspace) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Object(&pThis->__nvoc_base_Object); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__OBJVASPACE.metadata__Object; // (obj) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__OBJVASPACE; // (vaspace) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_OBJVASPACE(pThis); +} + diff --git a/src/nvidia/generated/g_vaspace_nvoc.h b/src/nvidia/generated/g_vaspace_nvoc.h new file mode 100644 index 0000000..d439202 --- /dev/null +++ b/src/nvidia/generated/g_vaspace_nvoc.h @@ -0,0 +1,456 @@ + +#ifndef _G_VASPACE_NVOC_H_ +#define _G_VASPACE_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once +#include "g_vaspace_nvoc.h" + +#ifndef _VASPACE_H_ +#define _VASPACE_H_ + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: VASPACE.H * +* Defines and structures used for Virtual Address Space Object. * +\***************************************************************************/ + +#include "ctrl/ctrl0080/ctrl0080dma.h" + +#include "core/core.h" +#include "resserv/rs_client.h" +#include "containers/btree.h" +#include "gpu/mem_mgr/heap_base.h" +#include "gpu/mem_mgr/mem_desc.h" + +typedef struct OBJEHEAP OBJEHEAP; +typedef struct EMEMBLOCK EMEMBLOCK; +typedef struct OBJVASPACE *POBJVASPACE; +typedef struct VASPACE VASPACE, *PVASPACE; + +struct VirtMemAllocator; + +#ifndef __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ +#define __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ +typedef struct VirtMemAllocator VirtMemAllocator; +#endif /* __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VirtMemAllocator +#define __nvoc_class_id_VirtMemAllocator 0x899e48 +#endif /* __nvoc_class_id_VirtMemAllocator */ + + +typedef struct MMU_MAP_TARGET MMU_MAP_TARGET; +typedef struct NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS; + +struct Device; + +#ifndef __NVOC_CLASS_Device_TYPEDEF__ +#define __NVOC_CLASS_Device_TYPEDEF__ +typedef struct Device Device; +#endif /* __NVOC_CLASS_Device_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Device +#define __nvoc_class_id_Device 0xe0ac20 +#endif /* __nvoc_class_id_Device */ + + + +typedef struct +{ + NvBool bReverse : 1; + NvBool bPreferSysmemPageTables : 1; + NvBool bExternallyManaged : 1; + NvBool bLazy : 1; + NvBool bSparse : 1; + NvBool bPrivileged : 1; + NvBool bClientAllocation : 1; + NvBool bFixedAddressRange : 1; + NvBool bFixedAddressAllocate : 1; + NvBool bForceContig : 1; + NvBool bForceNonContig : 1; + + // + // Using this flag may have security implications. So. use it only when + // you are sure about its usage. + // + NvBool bSkipTlbInvalidateOnFree : 1; +} VAS_ALLOC_FLAGS; + +#define VAS_EHEAP_OWNER_NVRM NvU32_BUILD('n','v','r','m') +#define VAS_EHEAP_OWNER_RSVD NvU32_BUILD('r','s','v','d') + +typedef struct +{ + NvBool bRemap : 1; // +// This flag will create a privileged PDB as part of this vaspace +// This new PDB will mirror all of the allocations made in the +// original PDB. The first PDE is considered privileged for this +// address space. +// SHARED_MANAGEMENT Enables mode where only a portion of the VAS is managed +// and the page directory may be allocated/set externally. +// ALLOW_ZERO_ADDRESS Explicitly allows the base VAS address to start at 0. +// Normally 0 is reserved to distinguish NULL pointers. +// +// BIG_PAGE_SIZE Field that specifies the big page size to be used. +// DEFAULT is used till GM10X. GM20X and later, uses +// custom value for big page size. +// SIZE_DEFAULT Lets RM pick the default value +// SIZE_64K Uses 64K as big page size for this VA space +// SIZE_128K Uses 128K as big page size for this VA space +// +// MMU_FMT_VA_BITS Selects the MMU format of the VA space by the number +// of VA bits supported. +// DEFAULT RM picks the default for the underlying MMU HW. +// 40 Fermi+ 40-bit (2-level) format. +// 49 Pascal+ 49-bit (5-level) format. +// +// ENABLE_VMM +// Temp flag to enable new VMM code path on select +// VA spaces (e.g. client but not BAR1/PMU VAS). +// +// ZERO_OLD_STRUCT Deprecated. +// +// ENABLE_FAULTING This address space is participating in UVM. +// RM will enable page faulting for all channels that will be +// associated with this address space. +// +// IS_UVM_MANAGED This flag will replace the SET_MIRRORED flag. It is used to +// denote that this VASpace is participating in UVM. +// +// ENABLE_ATS This address space has ATS enabled. +// +// +// ALLOW_PAGES_IN_PHYS_MEM_SUBALLOCATOR This flag when set will allow page table allocations +// to be routed to suballocator of the current process +// requesting mapping. If no suballocator, allocations +// will fallback to global heap. +// +// VASPACE_FLAGS_INVALIDATE_SCOPE_NVLINK_TLB This flag must be used by the VASs which use +// the NVLink MMU. +// +#define VASPACE_FLAGS_NONE 0 +#define VASPACE_FLAGS_BAR NVBIT(0) +#define VASPACE_FLAGS_SCRATCH_INVAL NVBIT(1) +#define VASPACE_FLAGS_ENABLE_ATS NVBIT(2) +#define VASPACE_FLAGS_RESTRICTED_RM_INTERNAL_VALIMITS NVBIT(3) +#define VASPACE_FLAGS_MINIMIZE_PTETABLE_SIZE NVBIT(4) +#define VASPACE_FLAGS_RETRY_PTE_ALLOC_IN_SYS NVBIT(5) +#define VASPACE_FLAGS_REQUIRE_FIXED_OFFSET NVBIT(6) +#define VASPACE_FLAGS_BAR_BAR1 NVBIT(7) +#define VASPACE_FLAGS_BAR_BAR2 NVBIT(8) +#define VASPACE_FLAGS_BAR_IFB NVBIT(9) +#define VASPACE_FLAGS_PERFMON NVBIT(10) +#define VASPACE_FLAGS_PMU NVBIT(11) +#define VASPACE_FLAGS_PTETABLE_PMA_MANAGED NVBIT(14) +#define VASPACE_FLAGS_INVALIDATE_SCOPE_NVLINK_TLB NVBIT(15) +#define VASPACE_FLAGS_DISABLE_SPLIT_VAS NVBIT(16) +#define VASPACE_FLAGS_SET_MIRRORED NVBIT(17) +#define VASPACE_FLAGS_SHARED_MANAGEMENT NVBIT(18) +#define VASPACE_FLAGS_ALLOW_ZERO_ADDRESS NVBIT(19) +#define VASPACE_FLAGS_SKIP_SCRUB_MEMPOOL NVBIT(20) +#define NV_VASPACE_FLAGS_BIG_PAGE_SIZE 22:21 +#define NV_VASPACE_FLAGS_BIG_PAGE_SIZE_DEFAULT 0x00000000 +#define NV_VASPACE_FLAGS_BIG_PAGE_SIZE_64K 0x00000001 +#define NV_VASPACE_FLAGS_BIG_PAGE_SIZE_128K 0x00000002 +#define VASPACE_FLAGS_HDA NVBIT(23) +#define VASPACE_FLAGS_FLA NVBIT(24) // Soon to be deprecated and removed. + // Used by legacy FLA implementation. +#define VASPACE_FLAGS_HWPM NVBIT(25) +#define VASPACE_FLAGS_ENABLE_VMM NVBIT(26) +#define VASPACE_FLAGS_OPTIMIZE_PTETABLE_MEMPOOL_USAGE NVBIT(27) +#define VASPACE_FLAGS_REVERSE NVBIT(28) +#define VASPACE_FLAGS_ENABLE_FAULTING NVBIT(29) +#define VASPACE_FLAGS_IS_EXTERNALLY_OWNED NVBIT(30) +#define VASPACE_FLAGS_ALLOW_PAGES_IN_PHYS_MEM_SUBALLOCATOR NVBIT(31) + +/*! + * Flags for page table memory pools. + * + * VASPACE_RESERVE_FLAGS_ALLOC_UPTO_TARGET_LEVEL_ONLY + * Only allocate levels from the top to the specified level only. + * Anything below the specified level is not allocated. + */ +#define VASPACE_RESERVE_FLAGS_NONE (0) +#define VASPACE_RESERVE_FLAGS_ALLOC_UPTO_TARGET_LEVEL_ONLY NVBIT32(0) + +/*! + * Level of RM-management for a given VA range. + * + * FULL + * RM manages everything (e.g. PDEs, PTEs). + * PDES_ONLY + * RM only manages PDEs (through non-buffer version of UpdatePde2). + * Buffer versions of FillPteMem and UpdatePde2 are still allowed. + * NONE + * RM does not manage anything. + * Buffer versions of FillPteMem and UpdatePde2 are still allowed. + */ +typedef enum +{ + VA_MANAGEMENT_FULL = 0, + VA_MANAGEMENT_PDES_ONLY, + VA_MANAGEMENT_NONE, +} VA_MANAGEMENT; + +/*! + * Abstract base class of an RM-managed virtual address space. + */ + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_VASPACE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__OBJVASPACE; +struct NVOC_METADATA__Object; +struct NVOC_VTABLE__OBJVASPACE; + + +struct OBJVASPACE { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__OBJVASPACE *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Object __nvoc_base_Object; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super + struct OBJVASPACE *__nvoc_pbase_OBJVASPACE; // vaspace + + // Data members + NvU32 gpuMask; + ADDRESS_TRANSLATION addressTranslation; + NvU32 refCnt; + NvU32 vaspaceId; + NvU64 vasStart; + NvU64 vasLimit; + NvU32 vasUniqueId; +}; + + +// Vtable with 9 per-class function pointers +struct NVOC_VTABLE__OBJVASPACE { + NV_STATUS (*__vaspaceConstruct___)(struct OBJVASPACE * /*this*/, NvU32, NvU32, NvU64, NvU64, NvU64, NvU64, NvU32); // pure virtual + NV_STATUS (*__vaspaceAlloc__)(struct OBJVASPACE * /*this*/, NvU64, NvU64, NvU64, NvU64, NvU64, VAS_ALLOC_FLAGS, NvU64 *); // pure virtual + NV_STATUS (*__vaspaceFree__)(struct OBJVASPACE * /*this*/, NvU64); // pure virtual + NV_STATUS (*__vaspaceApplyDefaultAlignment__)(struct OBJVASPACE * /*this*/, const FB_ALLOC_INFO *, NvU64 *, NvU64 *, NvU64 *); // pure virtual + NV_STATUS (*__vaspaceIncAllocRefCnt__)(struct OBJVASPACE * /*this*/, NvU64); // inline virtual body + NvU64 (*__vaspaceGetVaStart__)(struct OBJVASPACE * /*this*/); // virtual + NvU64 (*__vaspaceGetVaLimit__)(struct OBJVASPACE * /*this*/); // virtual + NV_STATUS (*__vaspaceGetVasInfo__)(struct OBJVASPACE * /*this*/, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *); // pure virtual + NvU32 (*__vaspaceGetFlags__)(struct OBJVASPACE * /*this*/); // inline virtual body +}; + +// Metadata with per-class RTTI and vtable with ancestor(s) +struct NVOC_METADATA__OBJVASPACE { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Object metadata__Object; + const struct NVOC_VTABLE__OBJVASPACE vtable; +}; + +#ifndef __NVOC_CLASS_OBJVASPACE_TYPEDEF__ +#define __NVOC_CLASS_OBJVASPACE_TYPEDEF__ +typedef struct OBJVASPACE OBJVASPACE; +#endif /* __NVOC_CLASS_OBJVASPACE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVASPACE +#define __nvoc_class_id_OBJVASPACE 0x6c347f +#endif /* __nvoc_class_id_OBJVASPACE */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVASPACE; + +#define __staticCast_OBJVASPACE(pThis) \ + ((pThis)->__nvoc_pbase_OBJVASPACE) + +#ifdef __nvoc_vaspace_h_disabled +#define __dynamicCast_OBJVASPACE(pThis) ((OBJVASPACE*) NULL) +#else //__nvoc_vaspace_h_disabled +#define __dynamicCast_OBJVASPACE(pThis) \ + ((OBJVASPACE*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJVASPACE))) +#endif //__nvoc_vaspace_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_OBJVASPACE(OBJVASPACE**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJVASPACE(OBJVASPACE**, Dynamic*, NvU32); +#define __objCreate_OBJVASPACE(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJVASPACE((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros +#define vaspaceConstruct__FNPTR(pVAS) pVAS->__nvoc_metadata_ptr->vtable.__vaspaceConstruct___ +#define vaspaceConstruct_(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags) vaspaceConstruct__DISPATCH(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags) +#define vaspaceAlloc_FNPTR(pVAS) pVAS->__nvoc_metadata_ptr->vtable.__vaspaceAlloc__ +#define vaspaceAlloc(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr) vaspaceAlloc_DISPATCH(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr) +#define vaspaceFree_FNPTR(pVAS) pVAS->__nvoc_metadata_ptr->vtable.__vaspaceFree__ +#define vaspaceFree(pVAS, vAddr) vaspaceFree_DISPATCH(pVAS, vAddr) +#define vaspaceApplyDefaultAlignment_FNPTR(pVAS) pVAS->__nvoc_metadata_ptr->vtable.__vaspaceApplyDefaultAlignment__ +#define vaspaceApplyDefaultAlignment(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask) vaspaceApplyDefaultAlignment_DISPATCH(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask) +#define vaspaceIncAllocRefCnt_FNPTR(pVAS) pVAS->__nvoc_metadata_ptr->vtable.__vaspaceIncAllocRefCnt__ +#define vaspaceIncAllocRefCnt(pVAS, vAddr) vaspaceIncAllocRefCnt_DISPATCH(pVAS, vAddr) +#define vaspaceGetVaStart_FNPTR(pVAS) pVAS->__nvoc_metadata_ptr->vtable.__vaspaceGetVaStart__ +#define vaspaceGetVaStart(pVAS) vaspaceGetVaStart_DISPATCH(pVAS) +#define vaspaceGetVaLimit_FNPTR(pVAS) pVAS->__nvoc_metadata_ptr->vtable.__vaspaceGetVaLimit__ +#define vaspaceGetVaLimit(pVAS) vaspaceGetVaLimit_DISPATCH(pVAS) +#define vaspaceGetVasInfo_FNPTR(pVAS) pVAS->__nvoc_metadata_ptr->vtable.__vaspaceGetVasInfo__ +#define vaspaceGetVasInfo(pVAS, pParams) vaspaceGetVasInfo_DISPATCH(pVAS, pParams) +#define vaspaceGetFlags_FNPTR(pVAS) pVAS->__nvoc_metadata_ptr->vtable.__vaspaceGetFlags__ +#define vaspaceGetFlags(pVAS) vaspaceGetFlags_DISPATCH(pVAS) + +// Dispatch functions +static inline NV_STATUS vaspaceConstruct__DISPATCH(struct OBJVASPACE *pVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags) { + return pVAS->__nvoc_metadata_ptr->vtable.__vaspaceConstruct___(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags); +} + +static inline NV_STATUS vaspaceAlloc_DISPATCH(struct OBJVASPACE *pVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSizeLockMask, VAS_ALLOC_FLAGS flags, NvU64 *pAddr) { + return pVAS->__nvoc_metadata_ptr->vtable.__vaspaceAlloc__(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr); +} + +static inline NV_STATUS vaspaceFree_DISPATCH(struct OBJVASPACE *pVAS, NvU64 vAddr) { + return pVAS->__nvoc_metadata_ptr->vtable.__vaspaceFree__(pVAS, vAddr); +} + +static inline NV_STATUS vaspaceApplyDefaultAlignment_DISPATCH(struct OBJVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask) { + return pVAS->__nvoc_metadata_ptr->vtable.__vaspaceApplyDefaultAlignment__(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask); +} + +static inline NV_STATUS vaspaceIncAllocRefCnt_DISPATCH(struct OBJVASPACE *pVAS, NvU64 vAddr) { + return pVAS->__nvoc_metadata_ptr->vtable.__vaspaceIncAllocRefCnt__(pVAS, vAddr); +} + +static inline NvU64 vaspaceGetVaStart_DISPATCH(struct OBJVASPACE *pVAS) { + return pVAS->__nvoc_metadata_ptr->vtable.__vaspaceGetVaStart__(pVAS); +} + +static inline NvU64 vaspaceGetVaLimit_DISPATCH(struct OBJVASPACE *pVAS) { + return pVAS->__nvoc_metadata_ptr->vtable.__vaspaceGetVaLimit__(pVAS); +} + +static inline NV_STATUS vaspaceGetVasInfo_DISPATCH(struct OBJVASPACE *pVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams) { + return pVAS->__nvoc_metadata_ptr->vtable.__vaspaceGetVasInfo__(pVAS, pParams); +} + +static inline NvU32 vaspaceGetFlags_DISPATCH(struct OBJVASPACE *pVAS) { + return pVAS->__nvoc_metadata_ptr->vtable.__vaspaceGetFlags__(pVAS); +} + +static inline NV_STATUS vaspaceIncAllocRefCnt_14ee5e(struct OBJVASPACE *pVAS, NvU64 vAddr) { + NV_ASSERT_PRECOMP(NV_FALSE); + return NV_ERR_NOT_SUPPORTED; +} + +NvU64 vaspaceGetVaStart_IMPL(struct OBJVASPACE *pVAS); + +NvU64 vaspaceGetVaLimit_IMPL(struct OBJVASPACE *pVAS); + +static inline NvU32 vaspaceGetFlags_edd98b(struct OBJVASPACE *pVAS) { + return 0U; +} + +void vaspaceIncRefCnt_IMPL(struct OBJVASPACE *pVAS); + +#ifdef __nvoc_vaspace_h_disabled +static inline void vaspaceIncRefCnt(struct OBJVASPACE *pVAS) { + NV_ASSERT_FAILED_PRECOMP("OBJVASPACE was disabled!"); +} +#else //__nvoc_vaspace_h_disabled +#define vaspaceIncRefCnt(pVAS) vaspaceIncRefCnt_IMPL(pVAS) +#endif //__nvoc_vaspace_h_disabled + +void vaspaceDecRefCnt_IMPL(struct OBJVASPACE *pVAS); + +#ifdef __nvoc_vaspace_h_disabled +static inline void vaspaceDecRefCnt(struct OBJVASPACE *pVAS) { + NV_ASSERT_FAILED_PRECOMP("OBJVASPACE was disabled!"); +} +#else //__nvoc_vaspace_h_disabled +#define vaspaceDecRefCnt(pVAS) vaspaceDecRefCnt_IMPL(pVAS) +#endif //__nvoc_vaspace_h_disabled + +NV_STATUS vaspaceGetByHandleOrDeviceDefault_IMPL(struct RsClient *pClient, NvHandle hDeviceOrSubDevice, NvHandle hVASpace, struct OBJVASPACE **ppVAS); + +#define vaspaceGetByHandleOrDeviceDefault(pClient, hDeviceOrSubDevice, hVASpace, ppVAS) vaspaceGetByHandleOrDeviceDefault_IMPL(pClient, hDeviceOrSubDevice, hVASpace, ppVAS) +#undef PRIVATE_FIELD + + +// Ideally all non-static base class method declaration should be in the _private.h file +NvU64 vaspaceGetVaStart_IMPL(struct OBJVASPACE *pVAS); + +// For getting the address translation after the MMU (i.e.: after VA->PA translation) +#define VAS_ADDRESS_TRANSLATION(pVASpace) ((pVASpace)->addressTranslation) + +#endif // _VASPACE_H_ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_VASPACE_NVOC_H_ diff --git a/src/nvidia/generated/g_virt_mem_mgr_nvoc.c b/src/nvidia/generated/g_virt_mem_mgr_nvoc.c new file mode 100644 index 0000000..9faadf1 --- /dev/null +++ b/src/nvidia/generated/g_virt_mem_mgr_nvoc.c @@ -0,0 +1,198 @@ +#define NVOC_VIRT_MEM_MGR_H_PRIVATE_ACCESS_ALLOWED + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_virt_mem_mgr_nvoc.h" + + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check__0xa030ab = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVMM; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +// Forward declarations for OBJVMM +void __nvoc_init__Object(Object*); +void __nvoc_init__OBJVMM(OBJVMM*); +void __nvoc_init_funcTable_OBJVMM(OBJVMM*); +NV_STATUS __nvoc_ctor_OBJVMM(OBJVMM*); +void __nvoc_init_dataField_OBJVMM(OBJVMM*); +void __nvoc_dtor_OBJVMM(OBJVMM*); + +// Structures used within RTTI (run-time type information) +extern const struct NVOC_CASTINFO __nvoc_castinfo__OBJVMM; +extern const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJVMM; + +// Down-thunk(s) to bridge OBJVMM methods from ancestors (if any) + +// Up-thunk(s) to bridge OBJVMM methods to ancestors (if any) + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVMM = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJVMM), + /*classId=*/ classId(OBJVMM), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJVMM", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJVMM, + /*pCastInfo=*/ &__nvoc_castinfo__OBJVMM, + /*pExportInfo=*/ &__nvoc_export_info__OBJVMM +}; + + +// Metadata with per-class RTTI with ancestor(s) +static const struct NVOC_METADATA__OBJVMM __nvoc_metadata__OBJVMM = { + .rtti.pClassDef = &__nvoc_class_def_OBJVMM, // (vmm) this + .rtti.dtor = (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJVMM, + .rtti.offset = 0, + .metadata__Object.rtti.pClassDef = &__nvoc_class_def_Object, // (obj) super + .metadata__Object.rtti.dtor = &__nvoc_destructFromBase, + .metadata__Object.rtti.offset = NV_OFFSETOF(OBJVMM, __nvoc_base_Object), +}; + + +// Dynamic down-casting information +const struct NVOC_CASTINFO __nvoc_castinfo__OBJVMM = { + .numRelatives = 2, + .relatives = { + &__nvoc_metadata__OBJVMM.rtti, // [0]: (vmm) this + &__nvoc_metadata__OBJVMM.metadata__Object.rtti, // [1]: (obj) super + } +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info__OBJVMM = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJVMM(OBJVMM *pThis) { + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJVMM(OBJVMM *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJVMM(OBJVMM *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJVMM_fail_Object; + __nvoc_init_dataField_OBJVMM(pThis); + goto __nvoc_ctor_OBJVMM_exit; // Success + +__nvoc_ctor_OBJVMM_fail_Object: +__nvoc_ctor_OBJVMM_exit: + + return status; +} + +// Vtable initialization +static void __nvoc_init_funcTable_OBJVMM_1(OBJVMM *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} // End __nvoc_init_funcTable_OBJVMM_1 + + +// Initialize vtable(s): Nothing to do for empty vtables +void __nvoc_init_funcTable_OBJVMM(OBJVMM *pThis) { + __nvoc_init_funcTable_OBJVMM_1(pThis); +} + +// Initialize newly constructed object. +void __nvoc_init__OBJVMM(OBJVMM *pThis) { + + // Initialize pointers to inherited data. + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; // (obj) super + pThis->__nvoc_pbase_OBJVMM = pThis; // (vmm) this + + // Recurse to superclass initialization function(s). + __nvoc_init__Object(&pThis->__nvoc_base_Object); + + // Pointer(s) to metadata structures(s) + pThis->__nvoc_base_Object.__nvoc_metadata_ptr = &__nvoc_metadata__OBJVMM.metadata__Object; // (obj) super + pThis->__nvoc_metadata_ptr = &__nvoc_metadata__OBJVMM; // (vmm) this + + // Initialize per-object vtables. + __nvoc_init_funcTable_OBJVMM(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJVMM(OBJVMM **ppThis, Dynamic *pParent, NvU32 createFlags) +{ + NV_STATUS status; + Object *pParentObj = NULL; + OBJVMM *pThis; + + // Assign `pThis`, allocating memory unless suppressed by flag. + status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(OBJVMM), (void**)&pThis, (void**)ppThis); + if (status != NV_OK) + return status; + + // Zero is the initial value for everything. + portMemSet(pThis, 0, sizeof(OBJVMM)); + + pThis->__nvoc_base_Object.createFlags = createFlags; + + // Link the child into the parent if there is one unless flagged not to do so. + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init__OBJVMM(pThis); + status = __nvoc_ctor_OBJVMM(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJVMM_cleanup; + + // Assignment has no effect if NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT is set. + *ppThis = pThis; + + return NV_OK; + +__nvoc_objCreate_OBJVMM_cleanup: + + // Unlink the child from the parent if it was linked above. + if (pParentObj != NULL) + objRemoveChild(pParentObj, &pThis->__nvoc_base_Object); + + // Do not call destructors here since the constructor already called them. + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + portMemSet(pThis, 0, sizeof(OBJVMM)); + else + { + portMemFree(pThis); + *ppThis = NULL; + } + + // coverity[leaked_storage:FALSE] + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJVMM(OBJVMM **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJVMM(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_virt_mem_mgr_nvoc.h b/src/nvidia/generated/g_virt_mem_mgr_nvoc.h new file mode 100644 index 0000000..d2e2f83 --- /dev/null +++ b/src/nvidia/generated/g_virt_mem_mgr_nvoc.h @@ -0,0 +1,178 @@ + +#ifndef _G_VIRT_MEM_MGR_NVOC_H_ +#define _G_VIRT_MEM_MGR_NVOC_H_ + +// Version of generated metadata structures +#ifdef NVOC_METADATA_VERSION +#undef NVOC_METADATA_VERSION +#endif +#define NVOC_METADATA_VERSION 2 + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_virt_mem_mgr_nvoc.h" + +#ifndef VIRT_MEM_MGR_H +#define VIRT_MEM_MGR_H + +/**************** Resource Manager Defines and Structures ******************\ +* Defines and structures used for Virtual Memory Management Object. * +\***************************************************************************/ + +#include "mem_mgr/vaspace.h" + +typedef struct OBJVMM *POBJVMM; + +#ifndef __NVOC_CLASS_OBJVMM_TYPEDEF__ +#define __NVOC_CLASS_OBJVMM_TYPEDEF__ +typedef struct OBJVMM OBJVMM; +#endif /* __NVOC_CLASS_OBJVMM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVMM +#define __nvoc_class_id_OBJVMM 0xa030ab +#endif /* __nvoc_class_id_OBJVMM */ + + + + +// Private field names are wrapped in PRIVATE_FIELD, which does nothing for +// the matching C source file, but causes diagnostics to be issued if another +// source file references the field. +#ifdef NVOC_VIRT_MEM_MGR_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__OBJVMM; +struct NVOC_METADATA__Object; + + +struct OBJVMM { + + // Metadata starts with RTTI structure. + union { + const struct NVOC_METADATA__OBJVMM *__nvoc_metadata_ptr; + const struct NVOC_RTTI *__nvoc_rtti; + }; + + // Parent (i.e. superclass or base class) objects + struct Object __nvoc_base_Object; + + // Ancestor object pointers for `staticCast` feature + struct Object *__nvoc_pbase_Object; // obj super + struct OBJVMM *__nvoc_pbase_OBJVMM; // vmm +}; + + +// Metadata with per-class RTTI with ancestor(s) +struct NVOC_METADATA__OBJVMM { + const struct NVOC_RTTI rtti; + const struct NVOC_METADATA__Object metadata__Object; +}; + +#ifndef __NVOC_CLASS_OBJVMM_TYPEDEF__ +#define __NVOC_CLASS_OBJVMM_TYPEDEF__ +typedef struct OBJVMM OBJVMM; +#endif /* __NVOC_CLASS_OBJVMM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVMM +#define __nvoc_class_id_OBJVMM 0xa030ab +#endif /* __nvoc_class_id_OBJVMM */ + +// Casting support +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVMM; + +#define __staticCast_OBJVMM(pThis) \ + ((pThis)->__nvoc_pbase_OBJVMM) + +#ifdef __nvoc_virt_mem_mgr_h_disabled +#define __dynamicCast_OBJVMM(pThis) ((OBJVMM*) NULL) +#else //__nvoc_virt_mem_mgr_h_disabled +#define __dynamicCast_OBJVMM(pThis) \ + ((OBJVMM*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJVMM))) +#endif //__nvoc_virt_mem_mgr_h_disabled + +NV_STATUS __nvoc_objCreateDynamic_OBJVMM(OBJVMM**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJVMM(OBJVMM**, Dynamic*, NvU32); +#define __objCreate_OBJVMM(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJVMM((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + + +// Wrapper macros + +// Dispatch functions +NV_STATUS vmmCreateVaspace_IMPL(struct OBJVMM *pVmm, NvU32 _class, NvU32 vaspaceId, NvU32 gpuMask, NvU64 vaStart, NvU64 vaLimit, NvU64 vaInternalStart, NvU64 vaInternalEnd, struct OBJVASPACE *pPteSpaceMap, NvU32 flags, struct OBJVASPACE **ppVAS); + +#ifdef __nvoc_virt_mem_mgr_h_disabled +static inline NV_STATUS vmmCreateVaspace(struct OBJVMM *pVmm, NvU32 _class, NvU32 vaspaceId, NvU32 gpuMask, NvU64 vaStart, NvU64 vaLimit, NvU64 vaInternalStart, NvU64 vaInternalEnd, struct OBJVASPACE *pPteSpaceMap, NvU32 flags, struct OBJVASPACE **ppVAS) { + NV_ASSERT_FAILED_PRECOMP("OBJVMM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_virt_mem_mgr_h_disabled +#define vmmCreateVaspace(pVmm, _class, vaspaceId, gpuMask, vaStart, vaLimit, vaInternalStart, vaInternalEnd, pPteSpaceMap, flags, ppVAS) vmmCreateVaspace_IMPL(pVmm, _class, vaspaceId, gpuMask, vaStart, vaLimit, vaInternalStart, vaInternalEnd, pPteSpaceMap, flags, ppVAS) +#endif //__nvoc_virt_mem_mgr_h_disabled + +void vmmDestroyVaspace_IMPL(struct OBJVMM *pVmm, struct OBJVASPACE *pVAS); + +#ifdef __nvoc_virt_mem_mgr_h_disabled +static inline void vmmDestroyVaspace(struct OBJVMM *pVmm, struct OBJVASPACE *pVAS) { + NV_ASSERT_FAILED_PRECOMP("OBJVMM was disabled!"); +} +#else //__nvoc_virt_mem_mgr_h_disabled +#define vmmDestroyVaspace(pVmm, pVAS) vmmDestroyVaspace_IMPL(pVmm, pVAS) +#endif //__nvoc_virt_mem_mgr_h_disabled + +NV_STATUS vmmGetVaspaceFromId_IMPL(struct OBJVMM *pVmm, NvU32 vaspaceId, NvU32 classId, struct OBJVASPACE **ppVAS); + +#ifdef __nvoc_virt_mem_mgr_h_disabled +static inline NV_STATUS vmmGetVaspaceFromId(struct OBJVMM *pVmm, NvU32 vaspaceId, NvU32 classId, struct OBJVASPACE **ppVAS) { + NV_ASSERT_FAILED_PRECOMP("OBJVMM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_virt_mem_mgr_h_disabled +#define vmmGetVaspaceFromId(pVmm, vaspaceId, classId, ppVAS) vmmGetVaspaceFromId_IMPL(pVmm, vaspaceId, classId, ppVAS) +#endif //__nvoc_virt_mem_mgr_h_disabled + +#undef PRIVATE_FIELD + + +#endif // VIRT_MEM_MGR_H + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _G_VIRT_MEM_MGR_NVOC_H_ diff --git a/src/nvidia/generated/rmconfig.h b/src/nvidia/generated/rmconfig.h new file mode 100644 index 0000000..2b3fbe4 --- /dev/null +++ b/src/nvidia/generated/rmconfig.h @@ -0,0 +1,810 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// defines to indicate enabled/disabled for all chips, features, classes, engines, and apis. +// +// Profile: devel-soc-disp-dce-client +// Template: templates/gt_rmconfig.h +// +// Chips: T234D, T26XD, T25XD +// + +#ifndef _RMCFG_H_ +#define _RMCFG_H_ + + +// +// CHIP families - enabled or disabled +// +#define RMCFG_CHIP_GF10X 0 +#define RMCFG_CHIP_GF11X 0 +#define RMCFG_CHIP_GF10XF 0 +#define RMCFG_CHIP_GK10X 0 +#define RMCFG_CHIP_GK11X 0 +#define RMCFG_CHIP_GK20X 0 +#define RMCFG_CHIP_GM10X 0 +#define RMCFG_CHIP_GM20X 0 +#define RMCFG_CHIP_GP10X 0 +#define RMCFG_CHIP_GV10X 0 +#define RMCFG_CHIP_GV11X 0 +#define RMCFG_CHIP_TU10X 0 +#define RMCFG_CHIP_GA10X 0 +#define RMCFG_CHIP_GA10XF 0 +#define RMCFG_CHIP_AD10X 0 +#define RMCFG_CHIP_GH10X 0 +#define RMCFG_CHIP_GB10X 0 +#define RMCFG_CHIP_GB20X 0 +#define RMCFG_CHIP_T12X 0 +#define RMCFG_CHIP_T13X 0 +#define RMCFG_CHIP_T21X 0 +#define RMCFG_CHIP_T18X 0 +#define RMCFG_CHIP_T19X 0 +#define RMCFG_CHIP_T23XG 0 +#define RMCFG_CHIP_T23XD 1 +#define RMCFG_CHIP_T26XD 1 +#define RMCFG_CHIP_T25XD 1 +#define RMCFG_CHIP_SIMS 0 + + +// +// CHIPS - enabled or disabled +// +#define RMCFG_CHIP_GM107 0 +#define RMCFG_CHIP_GM108 0 + +#define RMCFG_CHIP_GM200 0 +#define RMCFG_CHIP_GM204 0 +#define RMCFG_CHIP_GM206 0 + +#define RMCFG_CHIP_GP100 0 +#define RMCFG_CHIP_GP102 0 +#define RMCFG_CHIP_GP104 0 +#define RMCFG_CHIP_GP106 0 +#define RMCFG_CHIP_GP107 0 +#define RMCFG_CHIP_GP108 0 + +#define RMCFG_CHIP_GV100 0 + +#define RMCFG_CHIP_GV11B 0 + +#define RMCFG_CHIP_TU102 0 +#define RMCFG_CHIP_TU104 0 +#define RMCFG_CHIP_TU106 0 +#define RMCFG_CHIP_TU116 0 +#define RMCFG_CHIP_TU117 0 + +#define RMCFG_CHIP_GA100 0 +#define RMCFG_CHIP_GA102 0 +#define RMCFG_CHIP_GA103 0 +#define RMCFG_CHIP_GA104 0 +#define RMCFG_CHIP_GA106 0 +#define RMCFG_CHIP_GA107 0 +#define RMCFG_CHIP_GA10B 0 + +#define RMCFG_CHIP_GA102F 0 + +#define RMCFG_CHIP_AD102 0 +#define RMCFG_CHIP_AD103 0 +#define RMCFG_CHIP_AD104 0 +#define RMCFG_CHIP_AD106 0 +#define RMCFG_CHIP_AD107 0 + +#define RMCFG_CHIP_GH100 0 + +#define RMCFG_CHIP_GB100 0 +#define RMCFG_CHIP_GB102 0 +#define RMCFG_CHIP_GB110 0 +#define RMCFG_CHIP_GB112 0 + +#define RMCFG_CHIP_GB202 0 +#define RMCFG_CHIP_GB203 0 +#define RMCFG_CHIP_GB205 0 +#define RMCFG_CHIP_GB206 0 +#define RMCFG_CHIP_GB207 0 + +#define RMCFG_CHIP_T194 0 + +#define RMCFG_CHIP_T234 0 + +#define RMCFG_CHIP_T234D 1 + +#define RMCFG_CHIP_T264D 1 + +#define RMCFG_CHIP_T256D 1 + +#define RMCFG_CHIP_AMODEL 0 + +// +// Obsolete CHIPS +// +#define RMCFG_CHIP_GF100 0 +#define RMCFG_CHIP_GF100B 0 +#define RMCFG_CHIP_GF104 0 +#define RMCFG_CHIP_GF104B 0 +#define RMCFG_CHIP_GF106 0 +#define RMCFG_CHIP_GF106B 0 +#define RMCFG_CHIP_GF108 0 +#define RMCFG_CHIP_GF110D 0 +#define RMCFG_CHIP_GF110 0 +#define RMCFG_CHIP_GF117 0 +#define RMCFG_CHIP_GF118 0 +#define RMCFG_CHIP_GF119 0 +#define RMCFG_CHIP_GF110F 0 +#define RMCFG_CHIP_GF110F2 0 +#define RMCFG_CHIP_GF110F3 0 +#define RMCFG_CHIP_GK104 0 +#define RMCFG_CHIP_GK106 0 +#define RMCFG_CHIP_GK107 0 +#define RMCFG_CHIP_GK20A 0 +#define RMCFG_CHIP_GK110 0 +#define RMCFG_CHIP_GK110B 0 +#define RMCFG_CHIP_GK110C 0 +#define RMCFG_CHIP_GK208 0 +#define RMCFG_CHIP_GK208S 0 +#define RMCFG_CHIP_T001_FERMI_NOT_EXIST 0 +#define RMCFG_CHIP_T124 0 +#define RMCFG_CHIP_T132 0 +#define RMCFG_CHIP_T210 0 +#define RMCFG_CHIP_T186 0 +#define RMCFG_CHIP_T002_TURING_NOT_EXIST 0 +#define RMCFG_CHIP_T003_ADA_NOT_EXIST 0 +#define RMCFG_CHIP_T004_HOPPER_NOT_EXIST 0 + + +// +// CHIP aliases +// +#define RMCFG_CHIP_CLASSIC_GPUS 0 +#define RMCFG_CHIP_dFERMI 0 +#define RMCFG_CHIP_DFERMI 0 +#define RMCFG_CHIP_FERMI 0 +#define RMCFG_CHIP_FERMI_CLASSIC_GPUS 0 +#define RMCFG_CHIP_ALL 1 +#define RMCFG_CHIP_ALL_CLASSIC_GPUS 0 +#define RMCFG_CHIP_ALL_CHIPS 1 +#define RMCFG_CHIP_ALL_CHIPS_CLASSIC_GPUS 0 +#define RMCFG_CHIP_DISPLAYLESS 0 +#define RMCFG_CHIP_DISPLAYLESS_CLASSIC_GPUS 0 +#define RMCFG_CHIP_dKEPLER 0 +#define RMCFG_CHIP_DKEPLER 0 +#define RMCFG_CHIP_KEPLER 0 +#define RMCFG_CHIP_KEPLER_CLASSIC_GPUS 0 +#define RMCFG_CHIP_dMAXWELL 0 +#define RMCFG_CHIP_DMAXWELL 0 +#define RMCFG_CHIP_MAXWELL 0 +#define RMCFG_CHIP_MAXWELL_CLASSIC_GPUS 0 +#define RMCFG_CHIP_dPASCAL 0 +#define RMCFG_CHIP_DPASCAL 0 +#define RMCFG_CHIP_COMPUTE 0 +#define RMCFG_CHIP_PASCAL 0 +#define RMCFG_CHIP_PASCAL_CLASSIC_GPUS 0 +#define RMCFG_CHIP_dVOLTA 0 +#define RMCFG_CHIP_DVOLTA 0 +#define RMCFG_CHIP_VOLTA 0 +#define RMCFG_CHIP_VOLTA_CLASSIC_GPUS 0 +#define RMCFG_CHIP_dTURING 0 +#define RMCFG_CHIP_DTURING 0 +#define RMCFG_CHIP_TURING 0 +#define RMCFG_CHIP_TURING_CLASSIC_GPUS 0 +#define RMCFG_CHIP_dAMPERE 0 +#define RMCFG_CHIP_DAMPERE 0 +#define RMCFG_CHIP_AMPERE 0 +#define RMCFG_CHIP_AMPERE_CLASSIC_GPUS 0 +#define RMCFG_CHIP_TEGRA_DGPU_AMPERE 0 +#define RMCFG_CHIP_TEGRA_DGPU 0 +#define RMCFG_CHIP_DFPGA 0 +#define RMCFG_CHIP_dADA 0 +#define RMCFG_CHIP_DADA 0 +#define RMCFG_CHIP_ADA 0 +#define RMCFG_CHIP_ADA_CLASSIC_GPUS 0 +#define RMCFG_CHIP_dHOPPER 0 +#define RMCFG_CHIP_DHOPPER 0 +#define RMCFG_CHIP_HOPPER 0 +#define RMCFG_CHIP_HOPPER_CLASSIC_GPUS 0 +#define RMCFG_CHIP_dBLACKWELL 0 +#define RMCFG_CHIP_DBLACKWELL 0 +#define RMCFG_CHIP_BLACKWELL 0 +#define RMCFG_CHIP_BLACKWELL_CLASSIC_GPUS 0 +#define RMCFG_CHIP_TEGRA_DISP 1 +#define RMCFG_CHIP_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_FERMI_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_TEGRA 1 +#define RMCFG_CHIP_TEGRA_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_ALL_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_ALL_CHIPS_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_tKEPLER 0 +#define RMCFG_CHIP_TKEPLER 0 +#define RMCFG_CHIP_KEPLER_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_tMAXWELL 0 +#define RMCFG_CHIP_TMAXWELL 0 +#define RMCFG_CHIP_MAXWELL_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_tPASCAL 0 +#define RMCFG_CHIP_TPASCAL 0 +#define RMCFG_CHIP_PASCAL_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_DISPLAYLESS_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_tVOLTA 0 +#define RMCFG_CHIP_TVOLTA 0 +#define RMCFG_CHIP_VOLTA_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_TURING_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_T23X 1 +#define RMCFG_CHIP_T23X_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_tAMPERE 0 +#define RMCFG_CHIP_TAMPERE 0 +#define RMCFG_CHIP_AMPERE_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_ADA_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_HOPPER_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_TEGRA_NVDISP_GPUS 1 +#define RMCFG_CHIP_T23X_TEGRA_NVDISP_GPUS 1 +#define RMCFG_CHIP_TEGRA_TEGRA_NVDISP_GPUS 1 +#define RMCFG_CHIP_TEGRA_DISP_TEGRA_NVDISP_GPUS 1 +#define RMCFG_CHIP_ALL_TEGRA_NVDISP_GPUS 1 +#define RMCFG_CHIP_ALL_CHIPS_TEGRA_NVDISP_GPUS 1 +#define RMCFG_CHIP_T26X 1 +#define RMCFG_CHIP_T25X 1 +#define RMCFG_CHIP_SIMULATION_GPUS 0 +#define RMCFG_CHIP_ALL_SIMULATION_GPUS 0 +#define RMCFG_CHIP_ALL_CHIPS_SIMULATION_GPUS 0 + + +// +// Features - enabled or disabled +// +#define RMCFG_FEATURE_PLATFORM_UNKNOWN 0 // Running on an unknown platform +#define RMCFG_FEATURE_PLATFORM_WINDOWS 0 // Running on Windows +#define RMCFG_FEATURE_PLATFORM_UNIX 1 // Running on Unix +#define RMCFG_FEATURE_PLATFORM_DCE 0 // Running on Display Control Engine (DCE, an ARM Cortex R5 on Tegra) +#define RMCFG_FEATURE_PLATFORM_SIM 0 // Running on Simulator +#define RMCFG_FEATURE_PLATFORM_MODS 0 // Running as part of MODS +#define RMCFG_FEATURE_PLATFORM_GSP 0 // Running as part of GSP Firmware +#define RMCFG_FEATURE_PLATFORM_MODS_WINDOWS 0 // Running as part of MODS on Windows +#define RMCFG_FEATURE_PLATFORM_MODS_UNIX 0 // Running as part of MODS on UNIX +#define RMCFG_FEATURE_PLATFORM_UNIX_VMWARE 0 // Running on VMware +#define RMCFG_FEATURE_ARCH_UNKNOWN 0 // unknown arch +#define RMCFG_FEATURE_ARCH_X86 0 // Intel x86, 32bit +#define RMCFG_FEATURE_ARCH_X64 0 // Intel 64bit +#define RMCFG_FEATURE_ARCH_RISCV64 0 // RISCV, 64bit +#define RMCFG_FEATURE_ARCH_AMD64 0 // AMD, 64bit +#define RMCFG_FEATURE_ARCH_PPC 0 // Power PC +#define RMCFG_FEATURE_ARCH_PPC64LE 0 // 64-bit PPC little-endian +#define RMCFG_FEATURE_ARCH_ARM 0 // ARM +#define RMCFG_FEATURE_ARCH_ARM_V7 0 // ARM v7 +#define RMCFG_FEATURE_ARCH_AARCH64 1 // AArch64 +#define RMCFG_FEATURE_RMCORE_BASE 1 // RMCORE Base +#define RMCFG_FEATURE_KERNEL_RM 1 // Kernel layer of RM +#define RMCFG_FEATURE_ORIN_PHYSICAL_RM 0 // Physical layer of RM, disabled only on Orin +#define RMCFG_FEATURE_GSPRM_BULLSEYE 0 // Bullseye GSP-RM +#define RMCFG_FEATURE_GSP_MNOC_STOPGAP 1 // Stopgap for MNOC support using the baremetal driver +#define RMCFG_FEATURE_GSP_TASK_RM_DMEM_STACK 1 // Utilize DMEM stack for GSP task RM if available +#define RMCFG_FEATURE_SLINEXT 0 +#define RMCFG_FEATURE_NOTEBOOK 0 // Notebook support +#define RMCFG_FEATURE_MXM 0 // MXM Module Support (all versions) +#define RMCFG_FEATURE_DCB_0X 1 // Fallback DCB routines +#define RMCFG_FEATURE_DCB_4X 1 // DCB4x (used on G8x and later) +#define RMCFG_FEATURE_HOTPLUG_POLLING 0 // HotPlug polling +#define RMCFG_FEATURE_RM_BASIC_LOCK_MODEL 1 // Support for Basic Lock Model in RM +#define RMCFG_FEATURE_VIRTUALIZATION_LEGACY 0 // Virtualization legacy feature (pre-vGPU-GSP) +#define RMCFG_FEATURE_PRESILICON 0 // For builds that can run on simulated or emulated GPU +#define RMCFG_FEATURE_GSP_CLIENT_RM 0 // GSP client RM +#define RMCFG_FEATURE_DCE_CLIENT_RM 1 // DCE client RM +#define RMCFG_FEATURE_PROTOBUF 0 // Protobuf data encoding for OCA data dumps +#define RMCFG_FEATURE_RELEASE_BUILD 1 // Release Build +#define RMCFG_FEATURE_RM_NEW_TRACER_ETW 0 // New Event Tracing for Windows (ETW) in RM +#define RMCFG_FEATURE_VERIF_ONLY_CONTROLS 0 // Allow verify only control cmds to be used on verif builds (determined by this feature) +#define RMCFG_FEATURE_DEVINIT_SCRIPT 0 // VBIOS scripting engine for sharing register sequences +#define RMCFG_FEATURE_VBIOS_IMAGE 1 // GPU uses a VBIOS image for data +#define RMCFG_FEATURE_UNIX_CONSOLE_STATE 0 // Unix console state management and display programming +#define RMCFG_FEATURE_DSI_INFO 0 // DSI information structures support +#define RMCFG_FEATURE_CAMERA 1 // Platform and chip specific feature +#define RMCFG_FEATURE_SPARSE_TEXTURE 0 // Enables optimization and defaults for sparse texture +#define RMCFG_FEATURE_NVSR_ON_NVDISPLAY 1 // NVSR on Nvdisplay +#define RMCFG_FEATURE_MODS_FEATURES 0 // Flag for enabling MODS required features in RM +#define RMCFG_FEATURE_MANUAL_TRIGGER_BA_DMA_MODE 0 // Support for manually actuated BA DMA mode data collection. +#define RMCFG_FEATURE_RM_DRIVEN_BA_DMA_MODE 0 // Support for RM-driven BA DMA mode data collection. +#define RMCFG_FEATURE_VBLANK_CALLBACK 1 // Vblank callback functionality within RM +#define RMCFG_FEATURE_TEGRA_SOC_NVDISPLAY 1 // Tegra SOC NvDisplay Driver +#define RMCFG_FEATURE_TEGRA_SOC_NVDISPLAY_MINIMAL 1 // Enable only those parts of display code which are needed for Tegra SOC NvDisplay Driver +#define RMCFG_FEATURE_HEAD_REGIONAL_CRC 1 // Display Head Regional CRC support +#define RMCFG_FEATURE_FEATURE_GH180 1 // RMconfig to encapsulate GH180 features +#define RMCFG_FEATURE_MIG_GFX 1 // RMconfig to encapsulate MIG With GFX support (SMG) +#define RMCFG_FEATURE_MULTICAST_FABRIC 1 // Support for MULTICAST_FABRIC +#define RMCFG_FEATURE_NVLINK_ERROR_THRESHOLD 1 // Support for NVLINK_ERROR_THRESHOLD +#define RMCFG_FEATURE_LOCALIZED_MEMORY 1 // Support for Localized memory feature +#define RMCFG_FEATURE_LTS_REPAIR 1 // Support for LTS swapping in-field +#define RMCFG_FEATURE_TPC_REPAIR 1 // Support for TPC swapping in-field +#define RMCFG_FEATURE_FABRIC_LINEAR_ADDRESSING 0 // Unicast fabric memory management +#define RMCFG_FEATURE_GSP_RM_TRACE_RATS 1 +#define RMCFG_FEATURE_DP_TUNNELING 1 // Support for DP over USB-C feature, DP tunneling. +#define RMCFG_FEATURE_TEGRA_BPMP 1 // Tegra BPMP integration +#define RMCFG_FEATURE_BINDATA_IMAGE 0 // Extract bindata from RM binary during compilation and save it in bindata_image.bin +#define RMCFG_FEATURE_BINDATA_EXTRACTION 0 // Load bindata from disk.(Needs BINDATA_IMAGE to be enabled) + + + +// +// Classes - enabled or disabled +// +#define RMCFG_CLASS_NV01_ROOT 1 +#define RMCFG_CLASS_NV1_ROOT 1 // aka NV01_ROOT +#define RMCFG_CLASS_NV01_NULL_OBJECT 1 // aka NV01_ROOT +#define RMCFG_CLASS_NV1_NULL_OBJECT 1 // aka NV01_ROOT +#define RMCFG_CLASS_NV01_ROOT_NON_PRIV 1 +#define RMCFG_CLASS_NV1_ROOT_NON_PRIV 1 // aka NV01_ROOT_NON_PRIV +#define RMCFG_CLASS_NV01_ROOT_CLIENT 1 +#define RMCFG_CLASS_FABRIC_MANAGER_SESSION 0 +#define RMCFG_CLASS_NV0020_GPU_MANAGEMENT 0 +#define RMCFG_CLASS_NV01_DEVICE_0 1 +#define RMCFG_CLASS_NV20_SUBDEVICE_0 1 +#define RMCFG_CLASS_NV2081_BINAPI 0 +#define RMCFG_CLASS_NV2082_BINAPI_PRIVILEGED 0 +#define RMCFG_CLASS_NV20_SUBDEVICE_DIAG 0 +#define RMCFG_CLASS_NV01_CONTEXT_DMA 1 +#define RMCFG_CLASS_NV01_MEMORY_SYSTEM 1 +#define RMCFG_CLASS_NV1_MEMORY_SYSTEM 1 // aka NV01_MEMORY_SYSTEM +#define RMCFG_CLASS_NV01_MEMORY_LOCAL_PRIVILEGED 0 +#define RMCFG_CLASS_NV1_MEMORY_LOCAL_PRIVILEGED 0 // aka NV01_MEMORY_LOCAL_PRIVILEGED +#define RMCFG_CLASS_NV01_MEMORY_PRIVILEGED 0 // aka NV01_MEMORY_LOCAL_PRIVILEGED +#define RMCFG_CLASS_NV1_MEMORY_PRIVILEGED 0 // aka NV01_MEMORY_LOCAL_PRIVILEGED +#define RMCFG_CLASS_NV01_MEMORY_LOCAL_USER 0 +#define RMCFG_CLASS_NV1_MEMORY_LOCAL_USER 0 // aka NV01_MEMORY_LOCAL_USER +#define RMCFG_CLASS_NV01_MEMORY_USER 0 // aka NV01_MEMORY_LOCAL_USER +#define RMCFG_CLASS_NV1_MEMORY_USER 0 // aka NV01_MEMORY_LOCAL_USER +#define RMCFG_CLASS_NV01_MEMORY_VIRTUAL 0 +#define RMCFG_CLASS_NV01_MEMORY_SYSTEM_DYNAMIC 0 // aka NV01_MEMORY_VIRTUAL +#define RMCFG_CLASS_NV1_MEMORY_SYSTEM_DYNAMIC 0 // aka NV01_MEMORY_VIRTUAL +#define RMCFG_CLASS_NV_MEMORY_MAPPER 0 +#define RMCFG_CLASS_NV01_MEMORY_LOCAL_PHYSICAL 0 +#define RMCFG_CLASS_NV01_MEMORY_SYNCPOINT 1 +#define RMCFG_CLASS_NV01_MEMORY_SYSTEM_OS_DESCRIPTOR 1 +#define RMCFG_CLASS_NV01_MEMORY_DEVICELESS 0 +#define RMCFG_CLASS_NV01_MEMORY_FRAMEBUFFER_CONSOLE 0 +#define RMCFG_CLASS_NV01_MEMORY_HW_RESOURCES 0 +#define RMCFG_CLASS_NV01_MEMORY_LIST_SYSTEM 0 +#define RMCFG_CLASS_NV01_MEMORY_LIST_FBMEM 0 +#define RMCFG_CLASS_NV01_MEMORY_LIST_OBJECT 0 +#define RMCFG_CLASS_NV_IMEX_SESSION 0 +#define RMCFG_CLASS_NV_MEMORY_EXPORT 0 +#define RMCFG_CLASS_NV_CE_UTILS 0 +#define RMCFG_CLASS_NV_MEMORY_FABRIC 0 +#define RMCFG_CLASS_NV_MEMORY_FABRIC_IMPORT_V2 0 +#define RMCFG_CLASS_NV_MEMORY_FABRIC_IMPORTED_REF 0 +#define RMCFG_CLASS_FABRIC_VASPACE_A 0 +#define RMCFG_CLASS_NV_MEMORY_MULTICAST_FABRIC 0 +#define RMCFG_CLASS_NV_FB_SEGMENT 0 +#define RMCFG_CLASS_IO_VASPACE_A 1 +#define RMCFG_CLASS_NV01_NULL 0 +#define RMCFG_CLASS_NV1_NULL 0 // aka NV01_NULL +#define RMCFG_CLASS_NV01_EVENT 1 +#define RMCFG_CLASS_NV1_EVENT 1 // aka NV01_EVENT +#define RMCFG_CLASS_NV01_EVENT_KERNEL_CALLBACK 1 +#define RMCFG_CLASS_NV1_EVENT_KERNEL_CALLBACK 1 // aka NV01_EVENT_KERNEL_CALLBACK +#define RMCFG_CLASS_NV01_EVENT_OS_EVENT 1 +#define RMCFG_CLASS_NV1_EVENT_OS_EVENT 1 // aka NV01_EVENT_OS_EVENT +#define RMCFG_CLASS_NV01_EVENT_WIN32_EVENT 1 // aka NV01_EVENT_OS_EVENT +#define RMCFG_CLASS_NV1_EVENT_WIN32_EVENT 1 // aka NV01_EVENT_OS_EVENT +#define RMCFG_CLASS_NV01_EVENT_KERNEL_CALLBACK_EX 1 +#define RMCFG_CLASS_NV1_EVENT_KERNEL_CALLBACK_EX 1 // aka NV01_EVENT_KERNEL_CALLBACK_EX +#define RMCFG_CLASS_NV01_TIMER 0 +#define RMCFG_CLASS_NV1_TIMER 0 // aka NV01_TIMER +#define RMCFG_CLASS_KERNEL_GRAPHICS_CONTEXT 0 // Graphics Context in Kernel side +#define RMCFG_CLASS_KERNEL_WATCHDOG 0 // RC watchdog on CPU +#define RMCFG_CLASS_LOCK_STRESS_OBJECT 1 // Lock Stress Testing Object +#define RMCFG_CLASS_LOCK_TEST_RELAXED_DUP_OBJECT 1 // Relaxed Dup Lock Testing Object +#define RMCFG_CLASS_NV50_CHANNEL_GPFIFO 0 +#define RMCFG_CLASS_GF100_CHANNEL_GPFIFO 0 +#define RMCFG_CLASS_KEPLER_CHANNEL_GPFIFO_A 0 +#define RMCFG_CLASS_UVM_CHANNEL_RETAINER 0 +#define RMCFG_CLASS_PHYSICAL_CHANNEL_GPFIFO 0 +#define RMCFG_CLASS_KEPLER_CHANNEL_GPFIFO_B 0 +#define RMCFG_CLASS_KEPLER_CHANNEL_GPFIFO_C 0 +#define RMCFG_CLASS_MAXWELL_CHANNEL_GPFIFO_A 0 +#define RMCFG_CLASS_PASCAL_CHANNEL_GPFIFO_A 0 +#define RMCFG_CLASS_VOLTA_CHANNEL_GPFIFO_A 0 +#define RMCFG_CLASS_TURING_CHANNEL_GPFIFO_A 0 +#define RMCFG_CLASS_AMPERE_CHANNEL_GPFIFO_A 0 +#define RMCFG_CLASS_HOPPER_CHANNEL_GPFIFO_A 0 +#define RMCFG_CLASS_NV04_SOFTWARE_TEST 0 +#define RMCFG_CLASS_NV4_SOFTWARE_TEST 0 // aka NV04_SOFTWARE_TEST +#define RMCFG_CLASS_NV30_GSYNC 0 +#define RMCFG_CLASS_NV50_DISPLAY 0 +#define RMCFG_CLASS_NV9470_DISPLAY 0 +#define RMCFG_CLASS_NV9471_DISP_SF_USER 0 +#define RMCFG_CLASS_NV9570_DISPLAY 0 +#define RMCFG_CLASS_NV9571_DISP_SF_USER 0 +#define RMCFG_CLASS_NV9770_DISPLAY 0 +#define RMCFG_CLASS_NV9870_DISPLAY 0 +#define RMCFG_CLASS_VOLTA_USERMODE_A 0 +#define RMCFG_CLASS_TURING_USERMODE_A 0 +#define RMCFG_CLASS_AMPERE_USERMODE_A 0 +#define RMCFG_CLASS_HOPPER_USERMODE_A 0 +#define RMCFG_CLASS_BLACKWELL_USERMODE_A 0 +#define RMCFG_CLASS_NVC370_DISPLAY 0 +#define RMCFG_CLASS_NVC371_DISP_SF_USER 0 +#define RMCFG_CLASS_NVC372_DISPLAY_SW 1 +#define RMCFG_CLASS_NVC373_DISP_CAPABILITIES 0 +#define RMCFG_CLASS_NVC573_DISP_CAPABILITIES 0 +#define RMCFG_CLASS_NVC673_DISP_CAPABILITIES 1 +#define RMCFG_CLASS_NVC773_DISP_CAPABILITIES 0 +#define RMCFG_CLASS_NVC973_DISP_CAPABILITIES 1 +#define RMCFG_CLASS_NVCC73_DISP_CAPABILITIES 1 +#define RMCFG_CLASS_NV04_DISPLAY_COMMON 1 +#define RMCFG_CLASS_NV50_DEFERRED_API_CLASS 0 +#define RMCFG_CLASS_MPS_COMPUTE 0 +#define RMCFG_CLASS_NV917A_CURSOR_CHANNEL_PIO 0 +#define RMCFG_CLASS_NV917B_OVERLAY_IMM_CHANNEL_PIO 0 +#define RMCFG_CLASS_NV917E_OVERLAY_CHANNEL_DMA 0 +#define RMCFG_CLASS_NV927C_BASE_CHANNEL_DMA 0 +#define RMCFG_CLASS_NV947D_CORE_CHANNEL_DMA 0 +#define RMCFG_CLASS_NV957D_CORE_CHANNEL_DMA 0 +#define RMCFG_CLASS_NV977D_CORE_CHANNEL_DMA 0 +#define RMCFG_CLASS_NV987D_CORE_CHANNEL_DMA 0 +#define RMCFG_CLASS_NVC37A_CURSOR_IMM_CHANNEL_PIO 0 +#define RMCFG_CLASS_NVC37B_WINDOW_IMM_CHANNEL_DMA 0 +#define RMCFG_CLASS_NVC37D_CORE_CHANNEL_DMA 0 +#define RMCFG_CLASS_NVC37E_WINDOW_CHANNEL_DMA 0 +#define RMCFG_CLASS_NVC570_DISPLAY 0 +#define RMCFG_CLASS_NVC57A_CURSOR_IMM_CHANNEL_PIO 0 +#define RMCFG_CLASS_NVC57B_WINDOW_IMM_CHANNEL_DMA 0 +#define RMCFG_CLASS_NVC57D_CORE_CHANNEL_DMA 0 +#define RMCFG_CLASS_NVC57E_WINDOW_CHANNEL_DMA 0 +#define RMCFG_CLASS_NVC670_DISPLAY 1 +#define RMCFG_CLASS_NVC671_DISP_SF_USER 1 +#define RMCFG_CLASS_NVC67A_CURSOR_IMM_CHANNEL_PIO 1 +#define RMCFG_CLASS_NVC67B_WINDOW_IMM_CHANNEL_DMA 1 +#define RMCFG_CLASS_NVC67D_CORE_CHANNEL_DMA 1 +#define RMCFG_CLASS_NVC67E_WINDOW_CHANNEL_DMA 1 +#define RMCFG_CLASS_NVC77F_ANY_CHANNEL_DMA 1 +#define RMCFG_CLASS_NVC770_DISPLAY 0 +#define RMCFG_CLASS_NVC771_DISP_SF_USER 0 +#define RMCFG_CLASS_NVC77D_CORE_CHANNEL_DMA 0 +#define RMCFG_CLASS_NVC970_DISPLAY 1 +#define RMCFG_CLASS_NVC971_DISP_SF_USER 1 +#define RMCFG_CLASS_NVC97A_CURSOR_IMM_CHANNEL_PIO 1 +#define RMCFG_CLASS_NVC97B_WINDOW_IMM_CHANNEL_DMA 1 +#define RMCFG_CLASS_NVC97D_CORE_CHANNEL_DMA 1 +#define RMCFG_CLASS_NVC97E_WINDOW_CHANNEL_DMA 1 +#define RMCFG_CLASS_NVCC70_DISPLAY 1 +#define RMCFG_CLASS_NVCC71_DISP_SF_USER 1 +#define RMCFG_CLASS_NVCC7A_CURSOR_IMM_CHANNEL_PIO 1 +#define RMCFG_CLASS_NVCC7B_WINDOW_IMM_CHANNEL_DMA 1 +#define RMCFG_CLASS_NVCC7D_CORE_CHANNEL_DMA 1 +#define RMCFG_CLASS_NVCC7E_WINDOW_CHANNEL_DMA 1 +#define RMCFG_CLASS_NV9010_VBLANK_CALLBACK 0 +#define RMCFG_CLASS_GF100_PROFILER 0 // Profiler Client Support +#define RMCFG_CLASS_MAXWELL_PROFILER 0 // Base Profiler Class +#define RMCFG_CLASS_MAXWELL_PROFILER_CONTEXT 0 // Context level Profiler Client Support +#define RMCFG_CLASS_MAXWELL_PROFILER_DEVICE 0 // Device level Profiler Client Support +#define RMCFG_CLASS_GF100_SUBDEVICE_MASTER 0 +#define RMCFG_CLASS_TURING_VMMU_A 0 +#define RMCFG_CLASS_GF100_ZBC_CLEAR 0 +#define RMCFG_CLASS_GF100_DISP_SW 0 +#define RMCFG_CLASS_GF100_TIMED_SEMAPHORE_SW 0 +#define RMCFG_CLASS_G84_PERFBUFFER 0 +#define RMCFG_CLASS_NV50_MEMORY_VIRTUAL 0 +#define RMCFG_CLASS_NV50_P2P 0 +#define RMCFG_CLASS_NV50_THIRD_PARTY_P2P 0 +#define RMCFG_CLASS_FERMI_TWOD_A 0 // FERMI Graphics 2D +#define RMCFG_CLASS_FERMI_VASPACE_A 0 // FERMI virtual address space +#define RMCFG_CLASS_GF100_HDACODEC 1 +#define RMCFG_CLASS_NVA0B0_VIDEO_DECODER 0 // Combined MSVLD, MSPPP, MSPDEC (Maxwell) +#define RMCFG_CLASS_NVB0B0_VIDEO_DECODER 0 // Decoder Class for GM20X +#define RMCFG_CLASS_NVB6B0_VIDEO_DECODER 0 // Decoder Class for GM206 +#define RMCFG_CLASS_NVB8B0_VIDEO_DECODER 0 // Decoder Class for Hopper +#define RMCFG_CLASS_NVC1B0_VIDEO_DECODER 0 // Decoder Class for Pascal +#define RMCFG_CLASS_NVC2B0_VIDEO_DECODER 0 // Decoder Class for Pascal +#define RMCFG_CLASS_NVC3B0_VIDEO_DECODER 0 // Decoder Class for Volta +#define RMCFG_CLASS_NVC4B0_VIDEO_DECODER 0 // Decoder Class for Turing +#define RMCFG_CLASS_NVC6B0_VIDEO_DECODER 0 // Decoder Class for Ampere +#define RMCFG_CLASS_NVC7B0_VIDEO_DECODER 0 // Decoder Class for Ampere +#define RMCFG_CLASS_NVC9B0_VIDEO_DECODER 0 // Decoder Class for Ada +#define RMCFG_CLASS_NVC0B7_VIDEO_ENCODER 0 +#define RMCFG_CLASS_NVD0B7_VIDEO_ENCODER 0 +#define RMCFG_CLASS_NVC1B7_VIDEO_ENCODER 0 +#define RMCFG_CLASS_NVC2B7_VIDEO_ENCODER 0 +#define RMCFG_CLASS_NVC3B7_VIDEO_ENCODER 0 +#define RMCFG_CLASS_NVC4B7_VIDEO_ENCODER 0 +#define RMCFG_CLASS_NVB4B7_VIDEO_ENCODER 0 +#define RMCFG_CLASS_NVC7B7_VIDEO_ENCODER 0 +#define RMCFG_CLASS_NVC9B7_VIDEO_ENCODER 0 +#define RMCFG_CLASS_NVB8D1_VIDEO_NVJPG 0 +#define RMCFG_CLASS_NVC4D1_VIDEO_NVJPG 0 +#define RMCFG_CLASS_NVC9D1_VIDEO_NVJPG 0 +#define RMCFG_CLASS_NVB8FA_VIDEO_OFA 0 +#define RMCFG_CLASS_NVC6FA_VIDEO_OFA 0 +#define RMCFG_CLASS_NVC7FA_VIDEO_OFA 0 +#define RMCFG_CLASS_NVC9FA_VIDEO_OFA 0 +#define RMCFG_CLASS_KEPLER_INLINE_TO_MEMORY_B 0 // Kepler inline to memory +#define RMCFG_CLASS_FERMI_CONTEXT_SHARE_A 0 // Context Share class +#define RMCFG_CLASS_KEPLER_CHANNEL_GROUP_A 0 // Channel Group Class +#define RMCFG_CLASS_MAXWELL_A 0 // Maxwell Graphics +#define RMCFG_CLASS_MAXWELL_COMPUTE_A 0 // Maxwell Graphics Compute +#define RMCFG_CLASS_MAXWELL_B 0 // Maxwell Graphics second generation +#define RMCFG_CLASS_MAXWELL_COMPUTE_B 0 // Maxwell Graphics Compute +#define RMCFG_CLASS_PASCAL_A 0 // Pascal Graphics +#define RMCFG_CLASS_PASCAL_COMPUTE_A 0 // Pascal Graphics Compute +#define RMCFG_CLASS_PASCAL_DMA_COPY_A 0 +#define RMCFG_CLASS_PASCAL_DMA_COPY_B 0 +#define RMCFG_CLASS_VOLTA_DMA_COPY_A 0 +#define RMCFG_CLASS_TURING_DMA_COPY_A 0 +#define RMCFG_CLASS_AMPERE_DMA_COPY_A 0 +#define RMCFG_CLASS_AMPERE_DMA_COPY_B 0 +#define RMCFG_CLASS_HOPPER_DMA_COPY_A 0 +#define RMCFG_CLASS_MAXWELL_DMA_COPY_A 0 +#define RMCFG_CLASS_MAXWELL_FAULT_BUFFER_A 0 // Maxwell Fault Buffer for Gr +#define RMCFG_CLASS_ACCESS_COUNTER_NOTIFY_BUFFER 0 // Access Cntr Buffer for Gr +#define RMCFG_CLASS_MMU_FAULT_BUFFER 0 // Volta Fault Buffer for Gr +#define RMCFG_CLASS_MMU_VIDMEM_ACCESS_BIT_BUFFER 0 // Ampere Vidmem Access Bit Buffer +#define RMCFG_CLASS_PASCAL_B 0 // Pascal Graphics 2nd Gen +#define RMCFG_CLASS_PASCAL_COMPUTE_B 0 // Pascal Graphics Compute 2nd Gen +#define RMCFG_CLASS_VOLTA_A 0 // Volta Graphics +#define RMCFG_CLASS_VOLTA_COMPUTE_A 0 // Volta Graphics Compute +#define RMCFG_CLASS_TURING_A 0 // Turing Graphics +#define RMCFG_CLASS_TURING_COMPUTE_A 0 // Turing Graphics Compute +#define RMCFG_CLASS_AMPERE_A 0 // AmpereA (Graphics) +#define RMCFG_CLASS_AMPERE_COMPUTE_A 0 // AmpereComputeA (Graphics Compute) +#define RMCFG_CLASS_AMPERE_B 0 // AmpereB (Graphics) +#define RMCFG_CLASS_AMPERE_COMPUTE_B 0 // AmpereComputeB (Graphics Compute) +#define RMCFG_CLASS_ADA_A 0 // AdaA (Graphics) +#define RMCFG_CLASS_ADA_COMPUTE_A 0 // AdaComputeA (Graphics Compute) +#define RMCFG_CLASS_AMPERE_SMC_PARTITION_REF 0 // Ampere SMC Partition Subscription +#define RMCFG_CLASS_AMPERE_SMC_EXEC_PARTITION_REF 0 // Ampere SMC Execution Partition Subscription +#define RMCFG_CLASS_AMPERE_SMC_CONFIG_SESSION 0 // Ampere SMC config session subscription +#define RMCFG_CLASS_NV0092_RG_LINE_CALLBACK 0 // RG line callback functions +#define RMCFG_CLASS_AMPERE_SMC_MONITOR_SESSION 0 // Ampere SMC monitor session subscription +#define RMCFG_CLASS_HOPPER_A 0 // HopperA (Graphics) +#define RMCFG_CLASS_HOPPER_COMPUTE_A 0 // HopperComputeA (Graphics Compute) +#define RMCFG_CLASS_NV40_DEBUG_BUFFER 0 +#define RMCFG_CLASS_RM_USER_SHARED_DATA 0 +#define RMCFG_CLASS_GT200_DEBUGGER 0 // CUDA Debugger support +#define RMCFG_CLASS_NV40_I2C 0 // I2C operations +#define RMCFG_CLASS_NV_E3_THREED 0 // Tegra 3D class +#define RMCFG_CLASS_KEPLER_DEVICE_VGPU 0 // KEPLER virtual gpu +#define RMCFG_CLASS_NVA081_VGPU_CONFIG 0 // virtual gpu configuration +#define RMCFG_CLASS_NVA084_KERNEL_HOST_VGPU_DEVICE 0 // Kernel component of the host virtual gpu device +#define RMCFG_CLASS_NV0060_SYNC_GPU_BOOST 0 // Synchronized GPU Boost Class. Defines a set of GPUs for Synchronized Boost +#define RMCFG_CLASS_GP100_UVM_SW 0 // UVM SW class to support SW methods for fault cancel +#define RMCFG_CLASS_NVENC_SW_SESSION 0 // GPU NVENC Software Session +#define RMCFG_CLASS_NV_EVENT_BUFFER 0 // Event buffer class used to share event data with UMD +#define RMCFG_CLASS_NVFBC_SW_SESSION 0 // GPU NVFBC Software Session +#define RMCFG_CLASS_NV_COUNTER_COLLECTION_UNIT 0 // Counter Collection Unit Class +#define RMCFG_CLASS_NV_SEMAPHORE_SURFACE 0 // GPU Semaphore encapsulation class +#define RMCFG_CLASS_NV_SCHEDULER 0 // GPU Scheduler class +#define RMCFG_CLASS_CMC_API 0 // CMC API class +#define RMCFG_CLASS_CMC_UMD_API 0 // CMC UMD Api class + + + +// +// MODULES - enabled or disabled +// +#define RMCFG_MODULE_Object 1 // Base class for NVOC objects +#define RMCFG_MODULE_OBJECT 1 // aka Object +#define RMCFG_MODULE_TRACEABLE 0 // Interface for CaptureState +#define RMCFG_MODULE_ENGSTATE 1 // Base class for engines with generic constructors, StateLoad, etc. +#define RMCFG_MODULE_HOSTENG 0 // Base class for host engines +#define RMCFG_MODULE_FLCNABLE 0 // Base class for engines requiring falcon +#define RMCFG_MODULE_PMUCLIENT 0 // Base class for implementations of behavior to interact with the PMU engine +#define RMCFG_MODULE_PMU_CLIENT_IMPLEMENTER 0 // Base class for engines that use PMU engine +#define RMCFG_MODULE_INTRABLE 0 // Base class to generate and service top-level interrupts +#define RMCFG_MODULE_MUTEXABLE 0 // Base class for engines that implements mutex +#define RMCFG_MODULE_GpuMutexMgr 0 // GPU Mutex Manager +#define RMCFG_MODULE_GPUMUTEXMGR 0 // aka GpuMutexMgr +#define RMCFG_MODULE_PXUC 0 // PXUC Interface +#define RMCFG_MODULE_BIF 0 // Bus Interface +#define RMCFG_MODULE_KERNEL_BIF 0 // Bus Interface on Kernel(CPU) RM +#define RMCFG_MODULE_BUS 0 // Bus +#define RMCFG_MODULE_KERNEL_BUS 0 // Bus on Kernel(CPU) RM +#define RMCFG_MODULE_ClockManager 0 // Clock Manager +#define RMCFG_MODULE_CLOCKMANAGER 0 // aka ClockManager +#define RMCFG_MODULE_KERNEL_ClockManager 0 // Kernel controls for Clock Manager +#define RMCFG_MODULE_KERNEL_CLOCKMANAGER 0 // aka KERNEL_ClockManager +#define RMCFG_MODULE_DAC 0 // DAC Resource +#define RMCFG_MODULE_KERNEL_DISPLAY 1 // Display module on Kernel(CPU) RM +#define RMCFG_MODULE_DISP 0 // Display +#define RMCFG_MODULE_VIRT_MEM_ALLOCATOR 0 +#define RMCFG_MODULE_DPAUX 0 +#define RMCFG_MODULE_MEMORY_SYSTEM 0 // Memory System +#define RMCFG_MODULE_KERNEL_MEMORY_SYSTEM 0 // Kernel Memory System +#define RMCFG_MODULE_MEMORY_MANAGER 1 // Memory Manager +#define RMCFG_MODULE_FBFLCN 0 // FB falcon +#define RMCFG_MODULE_FBSR 0 // Frame Buffer Save/Restore +#define RMCFG_MODULE_KERNEL_FIFO 0 // Fifo Module on Kernel(CPU) RM +#define RMCFG_MODULE_FIFO 0 // aka. HOST +#define RMCFG_MODULE_SCHED 0 // Scheduler for runlist +#define RMCFG_MODULE_FLCN 0 // Falcon-derived engines +#define RMCFG_MODULE_KERNEL_FALCON 0 // Falcon on Kernel(CPU) RM. Used for booting Falcon cores. +#define RMCFG_MODULE_KERNEL_VIDEO_ENGINE 0 // Kernel Video Engine. Object to manage video engines (NVJPG, encoders, decoders, etc.) on Kernel RM +#define RMCFG_MODULE_GR 0 // Graphic +#define RMCFG_MODULE_GR0 0 // aka GR +#define RMCFG_MODULE_KERNEL_GRAPHICS 0 // Graphic on Kernel(CPU) RM +#define RMCFG_MODULE_GRMGR 0 // Graphics manager. Used for maintaining Gr partitioning policies +#define RMCFG_MODULE_MIG_MANAGER 0 // MIG manager on Physical (GSP) RM. Used for maintaining device partitioning policies +#define RMCFG_MODULE_KERNEL_MIG_MANAGER 0 // MIG manager on Kernel (CPU) RM. Used for maintaining device partitioning policies +#define RMCFG_MODULE_KERNEL_GRAPHICS_MANAGER 0 // Graphics manager on Kernel (CPU) RM. Used for maintaining Gr partitioning policies +#define RMCFG_MODULE_HAL 1 // Hardware Abstraction Layer +#define RMCFG_MODULE_HEAD 1 // Display component: Head +#define RMCFG_MODULE_SF 1 // Display component: Serial Formatter, output protocol formatting +#define RMCFG_MODULE_DISPLAY_INSTANCE_MEMORY 1 +#define RMCFG_MODULE_KERNEL_HEAD 1 +#define RMCFG_MODULE_INTR 0 +#define RMCFG_MODULE_MC 0 +#define RMCFG_MODULE_KERNEL_MC 0 // Master Control-related code needed in Kernel RM +#define RMCFG_MODULE_PRIV_RING 0 +#define RMCFG_MODULE_KERNEL_PERF 0 // Performance module on Kernel(CPU) RM +#define RMCFG_MODULE_PERF 0 // Performance Monitor +#define RMCFG_MODULE_STEREO 0 // Stereo Viewing +#define RMCFG_MODULE_TMR 1 +#define RMCFG_MODULE_SEQ 0 // Sequencer for backlight and LVDS control +#define RMCFG_MODULE_VGA 0 // Video Graphics Array +#define RMCFG_MODULE_VBIOS 0 +#define RMCFG_MODULE_KERNEL_RC 0 // Robust Channels and Watchdog Kernel API +#define RMCFG_MODULE_RC 0 // Robust Channels +#define RMCFG_MODULE_NV_DEBUG_DUMP 0 // NV Debug +#define RMCFG_MODULE_SWENG 0 // Software Engine for all SW classes +#define RMCFG_MODULE_GPU 1 // GPU Control Object +#define RMCFG_MODULE_I2C 0 // i2c Serial Interface +#define RMCFG_MODULE_KERNEL_I2C 0 // Kernel controls for I2C +#define RMCFG_MODULE_SPI 0 // SPI Interface +#define RMCFG_MODULE_OOB 0 // Out-of-band Interface +#define RMCFG_MODULE_GPIO 0 // General Purpose I/O Pins +#define RMCFG_MODULE_KERNEL_GPIO 0 // Kernel controls for GPIO +#define RMCFG_MODULE_FAN 0 // General Purpose I/O Pins +#define RMCFG_MODULE_KERNEL_FAN 0 // Kernel controls for FAN +#define RMCFG_MODULE_FUSE 0 +#define RMCFG_MODULE_VOLT 0 +#define RMCFG_MODULE_KERNEL_VOLT 0 // Kernel controls for VOLT +#define RMCFG_MODULE_THERM 0 // Thermal Monitoring +#define RMCFG_MODULE_KERNEL_THERM 0 // Kernel controls Thermal Monitoring +#define RMCFG_MODULE_OR 1 // Display component: Output Resource +#define RMCFG_MODULE_PIOR 0 // Display component: Parallel Input Output Resource +#define RMCFG_MODULE_SOR 1 // Display component: Serial Output Resource +#define RMCFG_MODULE_DSI 1 // Display Serial Interface +#define RMCFG_MODULE_HDCP 1 // High-bandwidth Digital Content Protection +#define RMCFG_MODULE_HDMI 1 // High-Definition Multimedia Interface +#define RMCFG_MODULE_ISOHUB 1 // Display's memory read interface +#define RMCFG_MODULE_BSP 0 // Bit Stream Processor/NVDEC +#define RMCFG_MODULE_NVDEC 0 // aka BSP +#define RMCFG_MODULE_KERNEL_NVDEC 0 +#define RMCFG_MODULE_CIPHER 0 +#define RMCFG_MODULE_CE 0 // Copy Engine +#define RMCFG_MODULE_KERNEL_CE 0 // Kernel Copy Engine +#define RMCFG_MODULE_PMU 0 // PMU peregrine core +#define RMCFG_MODULE_KERNEL_PMU 0 // PMU peregrine core on Kernel(CPU) RM +#define RMCFG_MODULE_PLATFORM_REQUEST_HANDLER 0 // Platform Request Handler on Kernel(CPU) RM +#define RMCFG_MODULE_MSENC 0 // Video Encoder (MSENC) Engine +#define RMCFG_MODULE_KERNEL_NVENC 0 +#define RMCFG_MODULE_HDA 0 // High Definition Audio (HDA) Engine +#define RMCFG_MODULE_HDACODEC 0 // High Definition Audio (HDA) Codec Engine +#define RMCFG_MODULE_INFOROM 0 // InfoROM Engine +#define RMCFG_MODULE_KERNEL_INFOROM 0 // Kernel controls for InfoROM Engine +#define RMCFG_MODULE_LPWR 0 // Low Power Object. This objects manages all power saving features. +#define RMCFG_MODULE_KERNEL_LPWR 0 // Low Power Object. This objects manages all power saving features. +#define RMCFG_MODULE_PGCTRL 0 // Power Gating Controller (PGCTRL) Engine +#define RMCFG_MODULE_LPWRFSM 1 // LPWR FSM Object Engine + +#define RMCFG_MODULE_PGISLAND 0 // Power Gating Island (PGISLAND) +#define RMCFG_MODULE_AP 0 // Adaptive Power Object (AP) Engine +#define RMCFG_MODULE_PSI 0 // Phase State Indicator Engine. HW folks calls it as Power Saving Interface. +#define RMCFG_MODULE_CG 0 // Clock Gating Object Engine. +#define RMCFG_MODULE_RPPG 0 // RAM Periphery Power Gating Object Engine. +#define RMCFG_MODULE_EI 0 // Engine Idle Framework Object Engine. +#define RMCFG_MODULE_LPWR_SEQ 0 // LPWR Unified Sequencer Feature for Power Gating +#define RMCFG_MODULE_LPWR_MON 0 // LPWR Mon Feature to get Statistics. +#define RMCFG_MODULE_LPWR_CSD 0 // LPWR_CSD Feature Object. +#define RMCFG_MODULE_DPU 0 // Display Falcon +#define RMCFG_MODULE_PMGR 0 // PCB Manager engine +#define RMCFG_MODULE_KERNEL_PMGR 0 // Kernel controls for Pmgr +#define RMCFG_MODULE_SYS 1 // System +#define RMCFG_MODULE_OS 1 // OS Layer +#define RMCFG_MODULE_GPUMGR 1 // GPU Manager object +#define RMCFG_MODULE_HEAP 0 // Heap Engine Object +#define RMCFG_MODULE_BRIGHTC 1 // Backlight brightness control module +#define RMCFG_MODULE_OD 1 // Display component: Output Device +#define RMCFG_MODULE_DFP 1 // Display component: Display Flat Panel +#define RMCFG_MODULE_CRT 0 // Display component: Cathode ray tube +#define RMCFG_MODULE_DisplayPort 1 // Display component: DisplayPort +#define RMCFG_MODULE_DISPLAYPORT 1 // aka DisplayPort +#define RMCFG_MODULE_TMDS 1 // Display component: Transition Minimized Differential Signaling +#define RMCFG_MODULE_CL 0 // Core Logic +#define RMCFG_MODULE_RCDB 0 // RC Journal log DB +#define RMCFG_MODULE_NVLOG 1 // NVLOG infrastructure for storing buffers of logs in various binary formats +#define RMCFG_MODULE_GPUACCT 0 // GPU Accounting +#define RMCFG_MODULE_GRDBG 0 // Debugger Engine Object +#define RMCFG_MODULE_PSR 1 // Panel Self Refresh +#define RMCFG_MODULE_UVM 0 // Unified Virtual Memory - provides interface to separate UVM and verification support +#define RMCFG_MODULE_VGPUMGR 0 // Virtual GPU management +#define RMCFG_MODULE_KERNEL_VGPUMGR 0 // Virtual GPU management on Kernel(CPU) RM +#define RMCFG_MODULE_SEC2 0 // New secure falcon +#define RMCFG_MODULE_KERNEL_SEC2 0 // SEC2 on Kernel(CPU) RM. Used for booting Falcon cores. +#define RMCFG_MODULE_PMS 0 // PMU ModeSet object +#define RMCFG_MODULE_GCX 0 // Idle power states of GPU +#define RMCFG_MODULE_LSFM 0 // Light Secure Falcon Manager object +#define RMCFG_MODULE_ACR 0 // Programs MMU to protect the region +#define RMCFG_MODULE_REFCNT 0 // Reference Counting +#define RMCFG_MODULE_GPULOG 0 // Logger for logging GPU related data +#define RMCFG_MODULE_FECS 0 // Front-end context switch +#define RMCFG_MODULE_HYPERVISOR 0 // Hypervisor object to support its native API +#define RMCFG_MODULE_VRRMGR 0 // VRR Management object +#define RMCFG_MODULE_GPCCS 0 // GPC context switch +#define RMCFG_MODULE_MISSING 0 // MISSING (placeholder) Engine +#define RMCFG_MODULE_VMM 1 // virtual memory manager +#define RMCFG_MODULE_VASPACE 1 // virtual address space +#define RMCFG_MODULE_GVASPACE 0 // GPU virtual address space +#define RMCFG_MODULE_AVASPACE 0 // AMODEL virtual address space +#define RMCFG_MODULE_IOVASPACE 1 // IOMMU virtual address space +#define RMCFG_MODULE_FABRICVASPACE 0 // FABRIC virtual address space +#define RMCFG_MODULE_MMU 0 // Memory Management Unit- HW interface +#define RMCFG_MODULE_GMMU 0 // GPU Memory Management Unit +#define RMCFG_MODULE_KERNEL_GMMU 0 // GPU Memory Management Unit on Kernel(CPU) RM +#define RMCFG_MODULE_VMMU 0 // Virtual Memory Management Unit (for vGPU) +#define RMCFG_MODULE_GPUGRP 1 // Group of GPU(s) that may or may not be in SLI +#define RMCFG_MODULE_KERNEL_HWPM 0 // Hardware Performance Monitor on Kernel(CPU) RM +#define RMCFG_MODULE_HWPM 0 // Hardware Performance Monitor +#define RMCFG_MODULE_NVLINK 0 // NVLINK High-speed GPU interconnect +#define RMCFG_MODULE_KERNEL_NVLINK 0 // Nvlink on Kernel(CPU) RM +#define RMCFG_MODULE_IOCTRL 0 // NVLINK Ioctrl +#define RMCFG_MODULE_HSHUB 0 // High Speed Hub +#define RMCFG_MODULE_HSHUBMANAGER 0 // High Speed Hub Manager +#define RMCFG_MODULE_KERNEL_HSHUB 0 // High Speed Hub on Kernel(CPU) RM +#define RMCFG_MODULE_GPUMON 0 // GPU Monitoring +#define RMCFG_MODULE_GPUBOOSTMGR 0 // Sync Gpu Boost Manager +#define RMCFG_MODULE_GRIDDISPLAYLESS 0 // GRID Displayless +#define RMCFG_MODULE_WINDOW 1 // NvDisplay WINDOW channel +#define RMCFG_MODULE_RPC 0 // RPC Engine for VGPU +#define RMCFG_MODULE_RPCSTRUCTURECOPY 0 // RPC structure copying for VGPU +#define RMCFG_MODULE_NVJPG 0 // Video JPEG (NVJPG) Engine +#define RMCFG_MODULE_KERNEL_NVJPG 0 +#define RMCFG_MODULE_FSP 0 // Firmware security processor +#define RMCFG_MODULE_KERNEL_FSP 0 // FSP on Kernel(CPU) RM +#define RMCFG_MODULE_GSP 0 // GPU system processor +#define RMCFG_MODULE_KERNEL_GSP 0 // GSP on Kernel(CPU) RM. Used for booting RM on GSP. +#define RMCFG_MODULE_OFA 0 // Optical Flow Accelarator +#define RMCFG_MODULE_KERNEL_OFA 0 +#define RMCFG_MODULE_HOT_PLUG 0 // Display component: hot plug +#define RMCFG_MODULE_FABRIC 0 // NVLink Fabric +#define RMCFG_MODULE_GPUDB 1 // GPU DATABASE +#define RMCFG_MODULE_NNE 0 // Neural Net Engine (NNE) +#define RMCFG_MODULE_DCECLIENTRM 1 // DCE Client RM +#define RMCFG_MODULE_DCB 0 // Display Control Block for all display related data in VBIOS/DCB Image +#define RMCFG_MODULE_DISPMACRO 0 // DispMacro RM infrastructure for IED removal from VBIOS +#define RMCFG_MODULE_CONF_COMPUTE 0 // Confidential Compute +#define RMCFG_MODULE_GSPLITE 0 // GSPLITE Engines +#define RMCFG_MODULE_KERNEL_GSPLITE 0 // Required for interfacing with GSPLITE engine from Kernel-RM. +#define RMCFG_MODULE_DISP_MGR 0 // Lid- and dock-related disp code for NOTEBOOK +#define RMCFG_MODULE_PLATFORM 0 // Object for platform related features +#define RMCFG_MODULE_KERNEL_CCU 0 // Counter Collection Unit Kernel(CPU) RM +#define RMCFG_MODULE_LIBSPDM 0 // Secure Protocol and Data Management (SPDM) library on Kernel(CPU) RM +#define RMCFG_MODULE_SPDM 0 // Secure Protocol and Data Management (SPDM) support Kernel(CPU)-Physical(GSP) RM + + + + +// Disable misspelling detection +#define __RMCFG_vet_enabled 0 + + + + + + + +// Make sure the specified feature is defined and not a misspelling +// by checking the "_def" forms above which are all set to '1' for +// each defined chip, feature, etc, irrespective of it's enable/disable +// state. +#define _RMCFG_vet(x) 0 +#if __RMCFG_vet_enabled && defined(__GNUC__) // broken on MSVC +# undef _RMCFG_vet +# define _RMCFG_vet(x) ((__def_RMCFG ## x) ? 0 : (0 * (1/0))) +#endif + +// +// Compile-time constant macros to help with enabling or disabling code based +// on whether a feature (or chip or class or engine or ...) is enabled. +// May be used by both C code ('if') and C-preprocessor directives ('#if') +// + +#define RMCFG_CHIP_ENABLED(_chip) (RMCFG_CHIP_##_chip + _RMCFG_vet(_CHIP_ ## _chip)) +#define RMCFG_FEATURE_ENABLED(_feature) (RMCFG_FEATURE_##_feature + _RMCFG_vet(_FEATURE_ ## _feature)) +#define RMCFG_MODULE_ENABLED(_module) (RMCFG_MODULE_##_module + _RMCFG_vet(_MODULE_ ## _module)) +#define RMCFG_CLASS_ENABLED(_clss) (RMCFG_CLASS_##_clss + _RMCFG_vet(_CLASS_ ## _clss)) + +#endif // _RMCFG_H_ diff --git a/src/nvidia/inc/kernel/core/core.h b/src/nvidia/inc/kernel/core/core.h new file mode 100644 index 0000000..edf41ba --- /dev/null +++ b/src/nvidia/inc/kernel/core/core.h @@ -0,0 +1,50 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __CORE_H__ +#define __CORE_H__ + +#include "core/prelude.h" + +/** + * @brief Global RM initialization + * + * The single entrypoint into the RM for all platforms. This will initial cross + * platform RM subsystems and call into OS specific init as needed. + * + * Must be called once and only once before any RM internal functions can be + * called + * + * @return NV_OK if successful, error otherwise + */ +NV_STATUS coreInitializeRm(void); + +/** + * @brief Global RM shutdown + * + * Must be called once and only once when a driver is shutting down and no more + * RM internal functions will be called. + * + */ +void coreShutdownRm(void); + +#endif /* __CORE_H__ */ diff --git a/src/nvidia/inc/kernel/core/hal.h b/src/nvidia/inc/kernel/core/hal.h new file mode 100644 index 0000000..eaa931b --- /dev/null +++ b/src/nvidia/inc/kernel/core/hal.h @@ -0,0 +1,3 @@ + +#include "g_hal_nvoc.h" + diff --git a/src/nvidia/inc/kernel/core/hal_mgr.h b/src/nvidia/inc/kernel/core/hal_mgr.h new file mode 100644 index 0000000..b15d9cd --- /dev/null +++ b/src/nvidia/inc/kernel/core/hal_mgr.h @@ -0,0 +1,3 @@ + +#include "g_hal_mgr_nvoc.h" + diff --git a/src/nvidia/inc/kernel/core/info_block.h b/src/nvidia/inc/kernel/core/info_block.h new file mode 100644 index 0000000..d32f98b --- /dev/null +++ b/src/nvidia/inc/kernel/core/info_block.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _INFO_BLOCK_H_ +#define _INFO_BLOCK_H_ + +#include "nvtypes.h" + +// +// HAL privata data management. +// +typedef struct ENG_INFO_LINK_NODE *PENG_INFO_LINK_NODE; +typedef struct ENG_INFO_LINK_NODE ENG_INFO_LINK_NODE; + + +struct ENG_INFO_LINK_NODE +{ + NvU32 dataId; + void *infoBlock; + PENG_INFO_LINK_NODE next; +}; + +void* getInfoPtr(PENG_INFO_LINK_NODE pHead, NvU32 dataId); +void* addInfoPtr(PENG_INFO_LINK_NODE *ppHead, NvU32 dataId, NvU32 size); +void deleteInfoPtr(PENG_INFO_LINK_NODE * ppHead, NvU32 dataId); +NvBool testInfoPtr(PENG_INFO_LINK_NODE, NvU32 dataId); + +#endif // _INFO_BLOCK_H_ diff --git a/src/nvidia/inc/kernel/core/locks.h b/src/nvidia/inc/kernel/core/locks.h new file mode 100644 index 0000000..1dfc02f --- /dev/null +++ b/src/nvidia/inc/kernel/core/locks.h @@ -0,0 +1,195 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef LOCKS_H +#define LOCKS_H + +#include "core/core.h" +#include "os/os.h" + +// Forward declarations +typedef struct OBJSYS OBJSYS; +typedef struct NV0000_CTRL_SYSTEM_GET_LOCK_TIMES_PARAMS NV0000_CTRL_SYSTEM_GET_LOCK_TIMES_PARAMS; + +typedef enum +{ + GPU_LOCK_GRP_SUBDEVICE, // locks will be taken for subdevice only + GPU_LOCK_GRP_DEVICE, // locks will be taken for device only + GPU_LOCK_GRP_MASK, // locks will be taken for devices specified by the mask + GPU_LOCK_GRP_ALL // locks will be taken for all devices +} GPU_LOCK_GRP_ID; +typedef NvU32 GPU_MASK; + +// +// This structure is used to trace lock acquire/release activity. +// The calling IP is stored in a circular array. +// +#define MAX_TRACE_LOCK_CALLS 32 + +typedef enum +{ + lockTraceEmpty, + lockTraceAcquire, + lockTraceRelease, + lockTraceAlloc, + lockTraceFree +} LOCK_TRACE_TYPE; + +typedef struct +{ + LOCK_TRACE_TYPE type; + union { + GPU_MASK gpuMask; // For GPU locks + NvU32 lockModule; // For API lock + NvU32 value; + } data32; + union { + NvU16 gpuInst; // For GPU locks + NvU16 lockFlags; // For API lock + NvU16 value; + } data16; + NvBool bHighIrql; + NvU8 priority; + NvU64 callerRA; + NvU64 threadId; + NvU64 timestamp; +} LOCK_TRACE_ENTRY; + +typedef struct +{ + LOCK_TRACE_ENTRY entries[MAX_TRACE_LOCK_CALLS]; + NvU32 index; +} LOCK_TRACE_INFO; + +#define INSERT_LOCK_TRACE(plti, ra, t, d16, d32, ti, irql, pr, ts) \ +{ \ + (plti)->entries[(plti)->index].callerRA = (NvUPtr)ra; \ + (plti)->entries[(plti)->index].type = t; \ + (plti)->entries[(plti)->index].data16.value = d16; \ + (plti)->entries[(plti)->index].data32.value = d32; \ + (plti)->entries[(plti)->index].threadId = ti; \ + (plti)->entries[(plti)->index].timestamp = ts; \ + (plti)->entries[(plti)->index].bHighIrql = irql; \ + (plti)->entries[(plti)->index].priority = pr; \ + (plti)->index = ((plti)->index + 1) % MAX_TRACE_LOCK_CALLS; \ +} + +// +// Callers specify this value when they to lock all possible GPUs. +// +#define GPUS_LOCK_ALL (0xFFFFFFFF) + +// +// Flags for rmGpusLock[Acquire,Release] operations. +// + +// default no flags +#define GPUS_LOCK_FLAGS_NONE (0x00000000) +// conditional acquire; if lock is already held then return error +#define GPU_LOCK_FLAGS_COND_ACQUIRE NVBIT(0) +// Attempt acquire even if it potentially violates the locking order +// But do not block in a way that could cause a deadlock +#define GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE NVBIT(1) +// Additionally acquire the GPU alloc lock (implied if locking all GPUs) +// to prevent the set of lockable GPUs from changing +#define GPU_LOCK_FLAGS_LOCK_ALLOC NVBIT(2) + +// +// RM Lock Related Functions +// +NV_STATUS rmLocksAlloc(OBJSYS *); +void rmLocksFree(OBJSYS *); + +NV_STATUS rmLocksAcquireAll(NvU32 module); +void rmLocksReleaseAll(void); + +NV_STATUS workItemLocksAcquire(NvU32 gpuInstance, NvU32 flags, NvU32 *pReleaseLocks, NvU32 *pGpuMask); +void workItemLocksRelease(NvU32 releaseLocks, NvU32 gpuMask); + +// +// Thread priority boosting and throttling: +// Used to temporarily increase the priority of a thread on Windows platforms +// in order to prevent starvation from the scheduler. +// +void threadPriorityStateAlloc(void); +void threadPriorityStateFree(void); + +//! Temporarily boost the priority of the current thread +void threadPriorityBoost(NvU64* pBoostPriority, NvU64 *pOriginalPriority); + +//! Gradually lower the priority of the current thread if it is boosted and sufficient time has elapsed +void threadPriorityThrottle(void); + +//! Restore the original priority of the current thread if it is boosted +void threadPriorityRestore(void); + +NV_STATUS rmGpuGroupLockGetMask(NvU32 gpuInst, GPU_LOCK_GRP_ID gpuGrpId, GPU_MASK* pGpuMask); + +// +// Defines for rmGpuLockSetOwner operation. +// +#define GPUS_LOCK_OWNER_PENDING_DPC_REFRESH (OS_THREAD_HANDLE)(-1) + +NV_STATUS rmGpuLockInfoInit(void); +void rmGpuLockInfoDestroy(void); +NV_STATUS rmGpuLockAlloc(NvU32); +void rmGpuLockFree(NvU32); +NV_STATUS rmGpuLocksAcquire(NvU32, NvU32); +NvU32 rmGpuLocksRelease(NvU32, OBJGPU *); +void rmGpuLocksFreeze(GPU_MASK); +void rmGpuLocksUnfreeze(GPU_MASK); +NV_STATUS rmGpuLockHide(NvU32); +void rmGpuLockShow(NvU32); +NvBool rmGpuLockIsOwner(void); +NvU32 rmGpuLocksGetOwnedMask(void); +NvBool rmGpuLockIsHidden(OBJGPU *); +NV_STATUS rmGpuLockSetOwner(OS_THREAD_HANDLE); +void rmGpuLockGetTimes(NV0000_CTRL_SYSTEM_GET_LOCK_TIMES_PARAMS *); +NV_STATUS rmGpuGroupLockAcquire(NvU32, GPU_LOCK_GRP_ID, NvU32, NvU32, GPU_MASK *); +void rmGpuGroupLockRelease(GPU_MASK, NvU32); +NvBool rmGpuGroupLockIsOwner(NvU32, GPU_LOCK_GRP_ID, GPU_MASK*); + +NvBool rmDeviceGpuLockIsOwner(NvU32); +NV_STATUS rmDeviceGpuLockSetOwner(OBJGPU *, OS_THREAD_HANDLE); +NV_STATUS rmDeviceGpuLocksAcquire(OBJGPU *, NvU32, NvU32); +NvU32 rmDeviceGpuLocksRelease(OBJGPU *, NvU32, OBJGPU *); +NvU32 rmDeviceGpuLocksReleaseAndThreadStateFreeDeferredIntHandlerOptimized(OBJGPU *, NvU32, OBJGPU *); + + +NV_STATUS rmIntrMaskLockAlloc(NvU32 gpuInst); +void rmIntrMaskLockFree(NvU32 gpuInst); +/// @note The return value is always zero, not the actual IRQL +NvU64 rmIntrMaskLockAcquire(OBJGPU *pGpu); +void rmIntrMaskLockRelease(OBJGPU *pGpu, NvU64 oldIrql); + +#define LOCK_METER_OP(f,l,t,d0,d1,d2) +#define LOCK_METER_DATA(t,d0,d1,d2) + +#define rmInitLockMetering() +#define rmDestroyLockMetering() + +#include "rmapi/rmapi.h" + +#define API_LOCK_FLAGS_NONE RMAPI_LOCK_FLAGS_NONE +#define API_LOCK_FLAGS_COND_ACQUIRE RMAPI_LOCK_FLAGS_COND_ACQUIRE + +#endif // LOCKS_H diff --git a/src/nvidia/inc/kernel/core/prelude.h b/src/nvidia/inc/kernel/core/prelude.h new file mode 100644 index 0000000..4365410 --- /dev/null +++ b/src/nvidia/inc/kernel/core/prelude.h @@ -0,0 +1,116 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __PRELUDE_H__ +#define __PRELUDE_H__ + +/* ------------------------ C library --------------------------------------- */ +#include // NULL + +/* ------------------------ SDK includes ------------------------------------ */ + +#include "nvtypes.h" +#include "nvrangetypes.h" +#include "nvstatus.h" +#include "nvmisc.h" +#include "nvlimits.h" +#include "nvos.h" + +#include "nvctassert.h" + +/* ------------------------ RM library and utils ---------------------------- */ +#include "nvport/nvport.h" +#include "nvoc/object.h" +#include "core/printf.h" +#include "core/strict.h" +#include "utils/nvassert.h" + +/* ------------------------ Code-generation --------------------------------- */ +#include "rmconfig.h" // RMCONFIG header generated by config/rmconfig.pl +#include "g_rmconfig_private.h" // resman-private hal setup such as: IsGK104(), etc. +#include "g_nvh_state.h" // pass enable/disable state to NVOC headers +#include "g_odb.h" +#include "g_hal.h" + +/* ------------------------ Common types ------------------------------------ */ +typedef NvU64 RmPhysAddr; // A physical address should be 64 bits + +typedef struct THREAD_STATE_NODE THREAD_STATE_NODE; // FW declare thread state + +/* ------------------------ Utility Macros ---------------------------------- */ + +// +// Power of 2 alignment. +// (Will give unexpected results if 'gran' is not a power of 2.) +// (v - v + gran) ensures that gran is upcasted to match v before +// the ~ operation, without explicitly having to typecast it. +// +#define RM_ALIGN_DOWN(v, gran) ((v) & ~(((v) - (v) + (gran)) - 1)) +#define RM_ALIGN_UP(v, gran) (((v) + ((gran) - 1)) & ~(((v) - (v) + (gran))-1)) +#define RM_IS_ALIGNED(v, gran) ((((gran) - 1) & (v)) == 0) + +#define RM_ALIGN_PTR_DOWN(p, gran) ((void *) RM_ALIGN_DOWN(((NvUPtr)p), (gran))) +#define RM_ALIGN_PTR_UP(p, gran) ((void *) RM_ALIGN_UP(((NvUPtr)p), (gran))) + +#define RM_PAGE_ALIGN_DOWN(value) RM_ALIGN_DOWN((value), RM_PAGE_SIZE) +#define RM_PAGE_ALIGN_UP(value) RM_ALIGN_UP((value), RM_PAGE_SIZE) + +#define NV_DELTA(a, b) (NV_MAX((a), (b)) - NV_MIN((a), (b))) // Okay for unsigned or signed + +#define NV_ROUNDUP(a,b) ((NV_CEIL(a,b))*(b)) +#define NV_ROUND_TO_QUANTA(a, quanta) (((quanta) == 0) ? (a): ((((a) + ((quanta) >> 1)) / (quanta)) * (quanta))) +#define NV_FLOOR_TO_QUANTA(a, quanta) (((a) / (quanta)) * (quanta)) +#define NV_BYTESWAP16(a) ((((a) & 0xff00)>>8) | \ + (((a) & 0x00ff)<<8)) +#define NV_BYTESWAP32(a) ((((a) & 0xff000000)>>24) | \ + (((a) & 0x00ff0000)>>8) | \ + (((a) & 0x0000ff00)<<8) | \ + (((a) & 0x000000ff)<<24)) +#define NV_TO_LOWER(c) (((c)>='A'&&(c)<='Z')?(c)+('a'-'A'):(c)) +#define NV_TO_UPPER(c) (((c)>='a'&&(c)<='z')?((c)-'a'+'A'):(c)) + +/*! + * Creates a byte mask for a word at given offset. + * offset = 0 0xffffff00 + * offset = 1 0xffff00ff + * offset = 2 0xff00ffff + * offset = 3 0x00ffffff + * + * @param[in] offset Offset for the mask. + */ +#define NV_BYTE_MASK(offset) (~(0xff << ((offset)<<3))) + +// +// note: the following trick fails if (z-1) * y > max_int +// +// since the calculation contains (x % z) * y, +// and the maximum value of (x % z) is (z-1). +// +// selecting the smaller of x and y to be y reduces the chances +// of problems, but for big enough z, the problem will return... +// +#define OVERFLOW_CAREFUL_MUL_DIV(x, y, z) \ + ((x) > (y)) ? (((x) / (z)) * (y) + (((x) % (z)) * (y)) / (z)) : (((y) / (z)) * (x) + (((y) % (z)) * (x)) / (z)) + +#define MASK_BITS(n) (~(0xFFFFFFFF << (n))) + +#endif /* __PRELUDE_H__ */ diff --git a/src/nvidia/inc/kernel/core/printf.h b/src/nvidia/inc/kernel/core/printf.h new file mode 100644 index 0000000..d5ca423 --- /dev/null +++ b/src/nvidia/inc/kernel/core/printf.h @@ -0,0 +1,263 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _PRINTF_H_ +#define _PRINTF_H_ + +/* + * RM PRINTF definitions. + * + * Provides RM internal definitions built on the generic nvprintf utilities + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvport/nvport.h" +#include "utils/nvprintf.h" +#include "nvlog/nvlog.h" + +// TODO Bug 5078337: Move these away from kernel/core +#include "kernel/diagnostics/xid_context.h" + +#define DBG_FILE_LINE_FUNCTION NV_FILE_STR, __LINE__, NV_FUNCTION_STR + +/** + * @todo bug 1583359 - Move to NvPort compiler specifics + */ +#if defined(__GNUC__) || defined(__clang__) +#define NV_RETURN_ADDRESS() __builtin_return_address(0) +#else +#define NV_RETURN_ADDRESS() _ReturnAddress() +#endif + + +//****************************************************************************** +// BREAKPOINTS +//****************************************************************************** + +// NV_DBG_BREAKPOINT_ALLOWED can be overridden through CFLAGS +#if !defined(NV_DBG_BREAKPOINT_ALLOWED) +#if defined(DEBUG) || defined(ASSERT_BUILD) || defined(NV_MODS) || defined(QA_BUILD) || (defined(NVRM) && NVOS_IS_LIBOS) +#define NV_DBG_BREAKPOINT_ALLOWED 1 +#else +#define NV_DBG_BREAKPOINT_ALLOWED 0 +#endif +#endif // !defined(NV_DBG_BREAKPOINT_ALLOWED) + +#define NV_DEBUG_BREAK_FLAGS_RC 0:0 +#define NV_DEBUG_BREAK_FLAGS_RC_DISABLE (0x00000000) +#define NV_DEBUG_BREAK_FLAGS_RC_ENABLE (0x00000001) +#define NV_DEBUG_BREAK_FLAGS_ASSERT 1:1 +#define NV_DEBUG_BREAK_FLAGS_ASSERT_DISABLE (0x00000000) +#define NV_DEBUG_BREAK_FLAGS_ASSERT_ENABLE (0x00000001) +#define NV_DEBUG_BREAK_FLAGS_DBG_BREAK 2:2 +#define NV_DEBUG_BREAK_FLAGS_DBG_BREAK_DISABLE (0x00000000) +#define NV_DEBUG_BREAK_FLAGS_DBG_BREAK_ENABLE (0x00000001) +#define NV_DEBUG_BREAK_FLAGS_GPU_TIMEOUT 3:3 +#define NV_DEBUG_BREAK_FLAGS_GPU_TIMEOUT_DISABLE (0x00000000) +#define NV_DEBUG_BREAK_FLAGS_GPU_TIMEOUT_ENABLE (0x00000001) +#define NV_DEBUG_BREAK_FLAGS_POOL_TAGS 4:4 +#define NV_DEBUG_BREAK_FLAGS_POOL_TAGS_DISABLE (0x00000000) +#define NV_DEBUG_BREAK_FLAGS_POOL_TAGS_ENABLE (0x00000001) +#define NV_DEBUG_BREAK_FLAGS_POWER_ON 5:5 +#define NV_DEBUG_BREAK_FLAGS_POWER_ON_DISABLE (0x00000000) +#define NV_DEBUG_BREAK_FLAGS_POWER_ON_ENABLE (0x00000001) +#define NV_DEBUG_BREAK_FLAGS_SMU_ERROR 6:6 +#define NV_DEBUG_BREAK_FLAGS_SMU_ERROR_DISABLE (0x0) +#define NV_DEBUG_BREAK_FLAGS_SMU_ERROR_ENABLE (0x1) +#define NV_DEBUG_BREAK_FLAGS_CRASH 7:7 +#define NV_DEBUG_BREAK_FLAGS_CRASH_DISABLE (0x00000000) +#define NV_DEBUG_BREAK_FLAGS_CRASH_ENABLE (0x00000001) + +#define NV_DEBUG_BREAK_ATTRIBUTES 7:0 +#define NV_DEBUG_BREAK_ATTRIBUTES_NONE (0x00000000) +#define NV_DEBUG_BREAK_ATTRIBUTES_RC (0x00000001) +#define NV_DEBUG_BREAK_ATTRIBUTES_ASSERT (0x00000002) +#define NV_DEBUG_BREAK_ATTRIBUTES_DBG_BREAK (0x00000004) +#define NV_DEBUG_BREAK_ATTRIBUTES_GPU_TIMEOUT (0x00000008) +#define NV_DEBUG_BREAK_ATTRIBUTES_POOL_TAGS (0x00000010) +#define NV_DEBUG_BREAK_ATTRIBUTES_POWER_ON (0x00000020) +#define NV_DEBUG_BREAK_ATTRIBUTES_SMU_ERROR (0x00000040) +#define NV_DEBUG_BREAK_ATTRIBUTES_CRASH (0x00000080) + +// Checks RMINFO and OS config to see if triggering a breakpoint is ever allowed +NvBool nvDbgBreakpointEnabled(void); +// Flushes the logs before a breakpoint, so we can see all the prints. +void osFlushLog(void); + +#define DBG_ROUTINE() \ + do \ + { \ + if (nvDbgBreakpointEnabled()) \ + PORT_BREAKPOINT_ALWAYS(); \ + } while (0) + +#define REL_DBG_BREAKPOINT() \ + REL_DBG_BREAKPOINT_MSG("NVRM-RC: Nvidia GPU Error Detected\n") + +#if NV_DBG_BREAKPOINT_ALLOWED + +#if !NVOS_IS_LIBOS + +#define DBG_BREAKPOINT_EX(PGPU, LEVEL) \ + do \ + { \ + NV_PRINTF(LEVEL_ERROR, "bp @ " NV_FILE_FMT ":%d\n", NV_FILE, __LINE__); \ + osFlushLog(); \ + DBG_ROUTINE(); \ + } while (0) + +#else // !NVOS_IS_LIBOS + +#define DBG_BREAKPOINT_EX(PGPU, LEVEL) \ + do \ + { \ + NV_ASSERT_FAILED("DBG_BREAKPOINT"); \ + } while (0) + +#endif // !NVOS_IS_LIBOS + +#define DBG_BREAKPOINT() DBG_BREAKPOINT_EX(NULL, 0) + +#define REL_DBG_BREAKPOINT_MSG(msg) \ + do \ + { \ + PORT_DBG_PRINT_STRING_LITERAL(msg); \ + DBG_BREAKPOINT(); \ + } while (0) + +#else // !NV_DBG_BREAKPOINT_ALLOWED + +#define DBG_BREAKPOINT() +#define DBG_BREAKPOINT_EX(PGPU, LEVEL) + +#define REL_DBG_BREAKPOINT_MSG(msg) \ + do \ + { \ + PORT_DBG_PRINT_STRING_LITERAL(msg); \ + DBG_ROUTINE(); \ + } while (0) + + +#endif // NV_DBG_BREAKPOINT_ALLOWED + +#define DBG_BREAKPOINT_REASON(reason) DBG_BREAKPOINT() + +#define DBG_BREAKPOINT_ERROR_INFO(errorCategory, errorInfo) DBG_BREAKPOINT() + +//****************************************************************************** +// PRINTS +//****************************************************************************** + +#include "utils/nvprintf.h" + +#define MAX_ERROR_STRING 512 +#ifndef NVPORT_CHECK_PRINTF_ARGUMENTS +#define NVPORT_CHECK_PRINTF_ARGUMENTS(x,c) +#endif +// +// Prototypes +// +NvBool nvDbgInit(void); +void nvDbgDestroy(void); +void nvDbg_Printf (const char *file, int line, const char *function, int debuglevel, const char *s, ...) NVPORT_CHECK_PRINTF_ARGUMENTS(5, 6); + +// +// Like libc's vsnprintf(), nvDbg_vPrintf() invalidates its va_list argument. The va_list argument +// may not be reused after nvDbg_vPrintf() returns. If the va_list is needed after the +// nvDbg_vPrintf() call, create a copy of the va_list using va_copy(). +// The caller controls the lifetime of the va_list argument, and should free it using va_end. +// +void nvDbg_vPrintf (const char *file, int line, const char *function, int debuglevel, const char *s, va_list args); +void nvDbg_PrintBuf(const char *file, int line, const char *function, int debgulevel, NvU8 buffer[], NvU32 bufsize); + +int nvDbgVsnprintf(char *dest, NvU32 destSize, const char *fmt, va_list args); +int nvDbgSnprintf (char *dest, NvU32 destSize, const char *fmt, ...); + +struct OBJGPU; +void nvDbgInitRmMsg(struct OBJGPU *); +// RmMsgPrefix return value +#define NVRM_MSG_PREFIX_NVRM NVBIT(0) +#define NVRM_MSG_PREFIX_FILE NVBIT(1) +#define NVRM_MSG_PREFIX_FUNCTION NVBIT(2) +#define NVRM_MSG_PREFIX_LINE NVBIT(3) +#define NVRM_MSG_PREFIX_OSTIMESTAMP NVBIT(4) +NvU32 RmMsgPrefix(NvU32 prefix, const char *filename, NvU32 linenumber, const char *function, char *str, NvU32 len); +// nvDbgRmMsgCheck return code +#define NVRM_MSG_NORMAL 0 // Use normal message handling (warnings/errors) +#define NVRM_MSG_HIDE 1 // Skip this message +#define NVRM_MSG_PRINT 2 // Force printing of this message +NvU32 nvDbgRmMsgCheck(const char *filename, NvU32 linenumber, const char *function, NvU32 level, const char *format, NvU32 *pPrefix); +void nvDbgDumpBufferBytes(void *pBuffer, NvU32 length); + + +#if NV_PRINTF_STRINGS_ALLOWED +#define DBG_STRING(str) str +#define DBG_INIT() nvDbgInit() +#define DBG_DESTROY() nvDbgDestroy() +#define DBG_VSNPRINTF(ptr_size_format_and_stuff) nvDbgVsnprintf ptr_size_format_and_stuff +#define DBG_PRINTBUF(dbglevel, buffer, bufsize) nvDbg_PrintBuf(DBG_FILE_LINE_FUNCTION, dbglevel, buffer, bufsize) +#define DBG_RMMSG_CHECK(level) (nvDbgRmMsgCheck(DBG_FILE_LINE_FUNCTION, level, NULL, NULL) == NVRM_MSG_PRINT) +#else // ! NV_PRINTF_STRINGS_ALLOWED -- debug printf strings not enabled +#define DBG_STRING(str) "" +#define DBG_INIT() (NV_TRUE) +#define DBG_DESTROY() +#define DBG_VSNPRINTF(ptr_size_format_and_stuff) +#define DBG_PRINTBUF(dbglevel, buffer, bufsize) +#define DBG_RMMSG_CHECK(level) (0) +#endif // NV_PRINTF_STRINGS_ALLOWED + +#if defined(PORT_IS_FUNC_SUPPORTED) +#if PORT_IS_FUNC_SUPPORTED(portMemExValidate) +#define DBG_VAL_PTR(p) portMemExValidate(p, NV_TRUE) +#endif +#endif +#ifndef DBG_VAL_PTR +#define DBG_VAL_PTR(p) +#endif + +// +// TODO Bug 5078337: Move these away from kernel/core and rename to indicate +// that they emit XIDs +// +#define NV_ERROR_LOG(pGpu, num, fmt, ...) \ + nvErrorLog_va((void*)pGpu, num, fmt, ##__VA_ARGS__); \ + NVLOG_PRINTF(NV_PRINTF_MODULE, NVLOG_ROUTE_RM, LEVEL_ERROR, \ + NV_PRINTF_ADD_PREFIX("Xid %d: " fmt), num, ##__VA_ARGS__) + +#define NV_ERROR_LOG_DATA(pGpu, num, fmt, ...) \ + portDbgPrintf(NV_PRINTF_ADD_PREFIX(fmt), ##__VA_ARGS__); \ + NVLOG_PRINTF(NV_PRINTF_MODULE, NVLOG_ROUTE_RM, LEVEL_ERROR, \ + NV_PRINTF_ADD_PREFIX(fmt), ##__VA_ARGS__) + +void nvErrorLog(void *pVoid, XidContext context, const char *pFormat, va_list arglist); +void nvErrorLog_va(void * pGpu, NvU32 num, const char * pFormat, ...); +void nvErrorLog2_va(void * pGpu, XidContext context, NvBool oobLogging, const char * pFormat, ...); + +#ifdef __cplusplus +} +#endif + +#endif // _PRINTF_H_ diff --git a/src/nvidia/inc/kernel/core/strict.h b/src/nvidia/inc/kernel/core/strict.h new file mode 100644 index 0000000..d102e45 --- /dev/null +++ b/src/nvidia/inc/kernel/core/strict.h @@ -0,0 +1,99 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __STRICT_H__ +#define __STRICT_H__ + +// +// RM_STRICT_SUPPRESS_DEPRECATED_DEFINITIONS_VER_XYZ should be set +// before including any RM internal headers when disabling deprecated +// definitions is desired. +// +// For transition during refactoring, we might introduce new types and +// interfaces and use macros/wrappers to forward the old interface to +// the new one. +// +// Once a callsite is migrated to use the new interface it can use RM +// strict to disable the deprecated definitions to prevent changes from +// reintroducing calls to a deprecated interface within a cleansed +// module. +// +// Controlling disablement of deprecated definitions is versioned. This +// enables us to introduce new deprecated interfaces incrementally. +// Example, ModuleA might scrub to versionX (removal of OBJFB defns) but +// not versionY (removal of legacy CLI types). +// +// Flags to turn off deprecated definitions are intended to be +// temporary, once all modules remove references the deprecated +// definitions and knobs in this header should be deleted. +// +#ifdef RM_STRICT_SUPPRESS_DEPRECATED_DEFINITIONS_VER_JAN_21_2020 +#define RM_STRICT_CONFIG_EMIT_DEPRECATED_OBJFB_DEFINITIONS 0 +#define RM_STRICT_CONFIG_EMIT_DEPRECATED_CONTEXT_DMA_DEFINITIONS 0 +#endif + +// +// RM_STRICT_SUPPRESS_PHYSICAL_DEFINITIONS_VER_XYZ should be set before +// including any RM internal headers when disabling "physical" definitions is +// desired. +// +// Physical definitions refers to interfaces/types that are only used by GSP-RM +// and VGPU-host, i.e.: not to be used by VGPU Client or GSP Client +// +#ifdef RM_STRICT_SUPPRESS_PHYSICAL_DEFINITIONS_VER_JAN_21_2020 +#define RM_STRICT_CONFIG_EMIT_MEMORY_SYSTEM_DEFINITIONS 0 +#endif + +// +// Default deprecated and "physical engine" definitions on unless specified +// +#ifndef RM_STRICT_CONFIG_EMIT_DEPRECATED_OBJFB_DEFINITIONS +#define RM_STRICT_CONFIG_EMIT_DEPRECATED_OBJFB_DEFINITIONS 1 +#endif + +#ifndef RM_STRICT_CONFIG_EMIT_DEPRECATED_CONTEXT_DMA_DEFINITIONS +#define RM_STRICT_CONFIG_EMIT_DEPRECATED_CONTEXT_DMA_DEFINITIONS 1 +#endif + +#ifndef RM_STRICT_CONFIG_EMIT_MEMORY_SYSTEM_DEFINITIONS +#define RM_STRICT_CONFIG_EMIT_MEMORY_SYSTEM_DEFINITIONS 1 +#endif + +// +// "Physical engine" definitions not yet included in any version, but available +// for T234X. Should be defined to 0 before including any RM internal headers +// when disabling OBJDISP (and related) definitions is desired. +// +#ifndef RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 1 +#endif + +// +// Generate OBJGPU engine accessors (GPU_GET_FOO(pGpu)) for disabled engines. +// These will always return NULL, but will allow the code that references them +// to compile. +// +#ifndef RM_STRICT_CONFIG_EMIT_DISABLED_GPU_ENGINE_ACCESSORS +#define RM_STRICT_CONFIG_EMIT_DISABLED_GPU_ENGINE_ACCESSORS 1 +#endif + +#endif /* __STRICT_H__ */ diff --git a/src/nvidia/inc/kernel/core/system.h b/src/nvidia/inc/kernel/core/system.h new file mode 100644 index 0000000..4e95886 --- /dev/null +++ b/src/nvidia/inc/kernel/core/system.h @@ -0,0 +1,3 @@ + +#include "g_system_nvoc.h" + diff --git a/src/nvidia/inc/kernel/core/thread_state.h b/src/nvidia/inc/kernel/core/thread_state.h new file mode 100644 index 0000000..a619a6c --- /dev/null +++ b/src/nvidia/inc/kernel/core/thread_state.h @@ -0,0 +1,227 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef THREAD_STATE_H +#define THREAD_STATE_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Defines and structures used for Thread State management * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "os/os.h" +#include "tls/tls.h" +#include "containers/map.h" +#include "containers/list.h" + +typedef struct OBJGPU OBJGPU; + +// +// Thread State Tracking structures and defines +// +typedef struct THREAD_TIMEOUT_STATE +{ + NvU64 enterTime; + NvU64 nonComputeTime; + NvU64 computeTime; + NvU64 nextCpuYieldTime; + NvU64 overrideTimeoutMsecs; + +} THREAD_TIMEOUT_STATE, *PTHREAD_TIMEOUT_STATE; + +typedef struct THREAD_STATE_FREE_CALLBACK +{ + void *pCbData; + void (*pCb)(void *pCbData); +} THREAD_STATE_FREE_CALLBACK; + +MAKE_LIST(THREAD_STATE_FREE_CB_LIST, THREAD_STATE_FREE_CALLBACK); + +typedef struct THREAD_STATE_NODE THREAD_STATE_NODE; + +struct THREAD_STATE_NODE +{ + OS_THREAD_HANDLE threadId; + /*! + * Thread sequencer id. This is a unique identifier for a given thread + * entry into the RM. This is separate from @ref threadId, as the threadId + * is really the OS's thread handle/pointer. In cases where the same + * physical thread is re-used (e.g. WORK_ITEMs are scheduled from a + * pre-allocated pool of worker threads), different RM threads will have the + * same threadId. + * + * This value is set by @ref threadStateInitXYZ() based off the global @ref + * THREAD_STATE_DB::threadSeqCntr. + */ + NvU32 threadSeqId; + NvBool bValid; + THREAD_TIMEOUT_STATE timeout; + NvU32 cpuNum; + NvU32 flags; + MapNode node; + + /*! + * If a callback is installed, threadStateFree() may block on it. + * + * The installed callbacks will be processed in FIFO order only. + * + * Only supported on non-ISR CPU RM paths. + */ + THREAD_STATE_FREE_CB_LIST cbList; +}; + +MAKE_INTRUSIVE_MAP(ThreadStateNodeMap, THREAD_STATE_NODE, node); + +typedef struct THREAD_STATE_DB_TIMEOUT +{ + NvU64 nonComputeTimeoutMsecs; + NvU64 computeTimeoutMsecs; + NvU32 computeGpuMask; + NvU32 flags; + +} THREAD_STATE_DB_TIMEOUT, *PTHREAD_STATE_DB_TIMEOUT; + +#define THREAD_STATE_TRACE_MAX_ENTRIES 8 + +typedef struct THREAD_STATE_TRACE_ENTRY +{ + NvU64 callerRA; + NvU32 flags; + +} THREAD_STATE_TRACE_ENTRY; + +typedef struct THREAD_STATE_TRACE_INFO +{ + NvU32 index; + THREAD_STATE_TRACE_ENTRY entries[THREAD_STATE_TRACE_MAX_ENTRIES]; + +} THREAD_STATE_TRACE_INFO; + +typedef struct THREAD_STATE_ISR_LOCKLESS +{ + THREAD_STATE_NODE **ppIsrThreadStateGpu; +} THREAD_STATE_ISR_LOCKLESS, *PTHREAD_STATE_ISR_LOCKLESS, **PPTHREAD_STATE_ISR_LOCKLESS; + +typedef struct THREAD_STATE_DB +{ + NvU32 setupFlags; + NvU32 maxCPUs; + /*! + * Thread state sequencer id counter. The last allocated thread state + * sequencer id via @ref threadStateInitXYZ(). + */ + NvU32 threadSeqCntr; + /*! + * Thread state sequencer id counter for only GSP task_interrupt. + */ + NvU32 gspIsrThreadSeqCntr; + PORT_SPINLOCK *spinlock; + ThreadStateNodeMap dbRoot; + THREAD_STATE_NODE **ppISRDeferredIntHandlerThreadNode; + PTHREAD_STATE_ISR_LOCKLESS pIsrlocklessThreadNode; + THREAD_STATE_DB_TIMEOUT timeout; + THREAD_STATE_TRACE_INFO traceInfo; +} THREAD_STATE_DB, *PTHREAD_STATE_DB; + +// +// This is the same for all OSes. This value was chosen because it is +// the minimum found on any OS at the time of this writing (May, 2008). +// +#define TIMEOUT_DEFAULT_OS_RESCHEDULE_INTERVAL_SECS 2 + +// +// The normal power transition requirement for Windows is 4 seconds. +// Use longer time to let OS fire timeout and ask recovery. +// +#define TIMEOUT_WDDM_POWER_TRANSITION_INTERVAL_MS 9800 + +// +// Thread state timeout for DPC or ISR handling +// +#define TIMEOUT_DPC_ISR_INTERVAL_MS 500 + +// +// Thread State flags used for threadStateInitSetupFlags +// +#define THREAD_STATE_SETUP_FLAGS_NONE 0 +#define THREAD_STATE_SETUP_FLAGS_ENABLED NVBIT(0) +#define THREAD_STATE_SETUP_FLAGS_TIMEOUT_ENABLED NVBIT(1) +#define THREAD_STATE_SETUP_FLAGS_SLI_LOGIC_ENABLED NVBIT(2) +#define THREAD_STATE_SETUP_FLAGS_CHECK_TIMEOUT_AT_FREE_ENABLED NVBIT(3) +#define THREAD_STATE_SETUP_FLAGS_ASSERT_ON_TIMEOUT_ENABLED NVBIT(4) +#define THREAD_STATE_SETUP_FLAGS_ASSERT_ON_FAILED_LOOKUP_ENABLED NVBIT(5) +#define THREAD_STATE_SETUP_FLAGS_RESET_ON_TIMEOUT_ENABLED NVBIT(6) +#define THREAD_STATE_SETUP_FLAGS_DO_NOT_INCLUDE_SLEEP_TIME_ENABLED NVBIT(7) +#define THREAD_STATE_SETUP_FLAGS_PRINT_INFO_ENABLED NVBIT(31) + +// +// Thread State flags used for threadState[Init,Free] +// +#define THREAD_STATE_FLAGS_NONE 0 +#define THREAD_STATE_FLAGS_IS_ISR NVBIT(0) +#define THREAD_STATE_FLAGS_IS_ISR_DEFERRED_INT_HANDLER NVBIT(1) +#define THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER NVBIT(2) +#define THREAD_STATE_FLAGS_IS_ISR_LOCKLESS NVBIT(3) +#define THREAD_STATE_FLAGS_TIMEOUT_INITED NVBIT(5) +#define THREAD_STATE_FLAGS_DEVICE_INIT NVBIT(7) +#define THREAD_STATE_FLAGS_STATE_FREE_CB_ENABLED NVBIT(8) + +// These Threads run exclusively between a conditional acquire +#define THREAD_STATE_FLAGS_EXCLUSIVE_RUNNING (THREAD_STATE_FLAGS_IS_ISR | \ + THREAD_STATE_FLAGS_IS_ISR_DEFERRED_INT_HANDLER | \ + THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER) + +#define THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING (THREAD_STATE_FLAGS_IS_ISR_DEFERRED_INT_HANDLER | \ + THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER) + +NV_STATUS threadStateGlobalAlloc(void); +void threadStateGlobalFree(void); +void threadStateInitRegistryOverrides(OBJGPU *pGpu); +void threadStateInitSetupFlags(NvU32 flags); +NvU32 threadStateGetSetupFlags(void); + +void threadStateInitISRLockless(THREAD_STATE_NODE *, OBJGPU*, NvU32); +void threadStateFreeISRLockless(THREAD_STATE_NODE *, OBJGPU*, NvU32); +void threadStateInitISRAndDeferredIntHandler(THREAD_STATE_NODE *, OBJGPU*, NvU32); +void threadStateOnlyProcessWorkISRAndDeferredIntHandler(THREAD_STATE_NODE *, OBJGPU*, NvU32); +void threadStateOnlyFreeISRAndDeferredIntHandler(THREAD_STATE_NODE *, OBJGPU*, NvU32); +void threadStateFreeISRAndDeferredIntHandler(THREAD_STATE_NODE *, OBJGPU*, NvU32); +void threadStateInit(THREAD_STATE_NODE *pThreadNode, NvU32 flags); +void threadStateFree(THREAD_STATE_NODE *pThreadNode, NvU32 flags); + +NV_STATUS threadStateGetCurrent(THREAD_STATE_NODE **ppThreadNode, OBJGPU *pGpu); +NV_STATUS threadStateGetCurrentUnchecked(THREAD_STATE_NODE **ppThreadNode, OBJGPU *pGpu); +NV_STATUS threadStateInitTimeout(OBJGPU *pGpu, NvU32 timeoutUs, NvU32 flags); +NV_STATUS threadStateCheckTimeout(OBJGPU *pGpu, NvU64 *pElapsedTimeUs); +NV_STATUS threadStateResetTimeout(OBJGPU *pGpu); +void threadStateLogTimeout(OBJGPU *pGpu, NvU64 funcAddr, NvU32 lineNum); +void threadStateYieldCpuIfNecessary(OBJGPU *pGpu, NvBool bQuiet); +void threadStateSetTimeoutOverride(THREAD_STATE_NODE *, NvU64); +void threadStateSetTimeoutSingleOverride(THREAD_STATE_NODE *, NvU64); + +NV_STATUS threadStateEnqueueCallbackOnFree(THREAD_STATE_NODE *pThreadNode, + THREAD_STATE_FREE_CALLBACK *pCallback); +void threadStateRemoveCallbackOnFree(THREAD_STATE_NODE *pThreadNode, + THREAD_STATE_FREE_CALLBACK *pCallback); +#endif // THREAD_STATE_H diff --git a/src/nvidia/inc/kernel/diagnostics/code_coverage_mgr.h b/src/nvidia/inc/kernel/diagnostics/code_coverage_mgr.h new file mode 100644 index 0000000..1b3fd36 --- /dev/null +++ b/src/nvidia/inc/kernel/diagnostics/code_coverage_mgr.h @@ -0,0 +1,3 @@ + +#include "g_code_coverage_mgr_nvoc.h" + diff --git a/src/nvidia/inc/kernel/diagnostics/journal.h b/src/nvidia/inc/kernel/diagnostics/journal.h new file mode 100644 index 0000000..d19db84 --- /dev/null +++ b/src/nvidia/inc/kernel/diagnostics/journal.h @@ -0,0 +1,3 @@ + +#include "g_journal_nvoc.h" + diff --git a/src/nvidia/inc/kernel/diagnostics/journal_structs.h b/src/nvidia/inc/kernel/diagnostics/journal_structs.h new file mode 100644 index 0000000..c435c8c --- /dev/null +++ b/src/nvidia/inc/kernel/diagnostics/journal_structs.h @@ -0,0 +1,53 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef JOURNAL_STRUCTS_H +#define JOURNAL_STRUCTS_H 1 + +#include "nvcd.h" +#include "rmcd.h" + +// Meta Data to Describe an error block +typedef struct RMCD_ERROR_BLOCK { + NvU8 * pBlock; + NvU32 blockSize; + struct RMCD_ERROR_BLOCK * pNext; +} RMCD_ERROR_BLOCK; + +typedef struct RMERRORHEADER { + struct RMFIFOERRORELEMENT_V3 *pNextError; + RMCD_ERROR_BLOCK *pErrorBlock; + NvU32 GPUTag; + NvU32 ErrorNumber; +} RMERRORHEADER; + +typedef struct { + RMERRORHEADER ErrorHeader; + RmPrbInfo_RECORD_V2 RmPrbErrorData; +} RMPRBERRORELEMENT_V2; + +typedef struct RMFIFOERRORELEMENT_V3 { + RMERRORHEADER ErrorHeader; +} RMFIFOERRORELEMENT_V3; + +#endif /* ifndef JOURNAL_STRUCTS_H */ diff --git a/src/nvidia/inc/kernel/diagnostics/nv_debug_dump.h b/src/nvidia/inc/kernel/diagnostics/nv_debug_dump.h new file mode 100644 index 0000000..1a09575 --- /dev/null +++ b/src/nvidia/inc/kernel/diagnostics/nv_debug_dump.h @@ -0,0 +1,3 @@ + +#include "g_nv_debug_dump_nvoc.h" + diff --git a/src/nvidia/inc/kernel/diagnostics/profiler.h b/src/nvidia/inc/kernel/diagnostics/profiler.h new file mode 100644 index 0000000..26def56 --- /dev/null +++ b/src/nvidia/inc/kernel/diagnostics/profiler.h @@ -0,0 +1,119 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _PROFILER_H_ +#define _PROFILER_H_ + +/*! + * @file profiler.h + * @brief Simple API to measure elapsed times in RM for profiling and statistics + * + * The primary goals of this API are to: + * 1. Be lightweight and have little-to-no setup required (built into release drivers) + * 2. Defer as much analysis as possible to the user of the data (keep it simple) + * 3. Provide sub-millisecond resolution if possible (medium-high granularity) + * + * This is intended mainly for coarse measurements of time-critical software + * sequences, such as GC6. For example, the measurements could be used to catch + * major latency regressions in a particular timing module. + * + * For more sophisticated profiling (e.g. for prospective analysis), use of an + * external profiling tool (e.g. xperf with ETW) is recommended instead. + */ + +#include "core/core.h" + +/*! + * Record containing the statistics of a single time module to be profiled + * periodically. + * + * This tracks the min/max elapsed time over all the measurement + * cycles, as well as the total elapsed time and number of cycles. + * To calculate the average elapsed time per cycle, divide total_ns by count. + * + * 64-bit precision integers are used to hold nanosecond resolution + * over long periods of time (e.g. greater than 4 seconds). + */ +typedef struct +{ + NvU64 count; //gpuId, data, RMTRACE_FUNC_PROG_ENTER); +// ... +// RMTRACE_MARKER_PROBE("About To Enter XXX", pGpu->gpuId, data, RMTRACE_FUNC_PROG_STEP); +// XXX(); +// ... +// BBB(); +// ... +// RMTRACE_MARKER_PROBE("AAA Function", pGpu->gpuId, data, RMTRACE_FUNC_PROG_EXIT); +// } +// +// +// void BBB() +// { +// RMTRACE_MARKER_PROBE("BBB Function", pGpu->gpuId, data, RMTRACE_FUNC_PROG_ENTER); +// ... +// CCC(); +// ... +// RMTRACE_MARKER_PROBE("BBB Function", pGpu->gpuId, data, RMTRACE_FUNC_PROG_EXIT); +// } +// +// With a tool (like EtwTool), we can generate below message automatically +// +// AAA Function (Enter) +// (0.1234ms) +// About to Enter XXX +// (0.0012ms) +// BBB Function (Enter) +// BBB Function (Leave) - 0.23ms +// AAA Function (Leave) -- 0.4111ms +// + +#define RMTRACE_FUNC_PROG_ENTER 0x0000 +#define RMTRACE_FUNC_PROG_EXIT 0x00FF +#define RMTRACE_FUNC_PROG_STEP 0x007F +#define RMTRACE_UNKNOWN_GPUID 0xFFFFFFFF +#define RMTRACE_UNUSED_PARAM 0 +#define RMTRACE_MAX_PRINT_BUFFER_SIZE 128 + +// +// Empty macros +// + +#define RMTRACE_INIT_NEW() +#define RMTRACE_DESTROY_NEW() +#define RMTRACE_SET_PTIMER_LOG(enable) +#define RMTRACE_IS_PTIMER_LOG_ENABLED() \ + NV_FALSE +#define RMTRACE_RMAPI(id, cmd) +#define RMTRACE_RMLOCK(id) +#define RMTRACE_DISP1(id, gpuId, param1) +#define RMTRACE_DISP2(id, gpuId, param1, param2) +#define RMTRACE_DISP3(id, gpuId, param1, param2, param3) +#define RMTRACE_DISP4(id, gpuId, param1, param2, param3, param4) +#define RMTRACE_DISP5(id, gpuId, param1, param2, param3, param4, param5) +#define RMTRACE_DISP6(id, gpuId, param1, param2, param3, param4, param5, param6) +#define RMTRACE_DISP_EDID(gpuId, publicId, connectedId, data, size) +#define RMTRACE_DISP_BRIGHTNESS_ENTRY(dispId, flags, blType, pwmInfoProvider, pwmInfoEntries, SBEnable, lmnProvider, lmnEntryCount, blPwmInfoSize, blPwmInfo) +#define RMTRACE_DISP_ERROR(id, gpuId, param1, param2, status) +#define RMTRACE_DISP_EXCEPTION(gpuId, param1, param2, param3, param4, param5) +#define RMTRACE_GPIO(id, _function, _state, _gpioPin, param) +#define RMTRACE_GPIO_LIST(id, count, list) +#define RMTRACE_I2C(id, gpuId, portId, address, indexSize, pIndex, dataSize, pData, status) +#define RMTRACE_I2C_SET_ACQUIRED(gpuId, portId, acquirer, status, curTime) +#define RMTRACE_I2C_ENUM_PORTS(gpuId, count, ports) +#define RMTRACE_GPU(id, gpuId, param1, param2, param3, param4, param5, param6, param7) +#define RMTRACE_RMJOURNAL(id, gpuId, type, group, key, count, firstTime, lastTime) +#define RMTRACE_POWER(id, gpuId, state, head, forcePerf, fastBootPowerState) +#define RMTRACE_PERF(id, gpuId, param1, param2, param3, param4, param5, param6, param7) +#define RMTRACE_THERM2(id, gpuId, param1, param2) +#define RMTRACE_THERM3(id, gpuId, param1, param2, param3) +#define RMTRACE_THERM6(id, gpuId, param1, param2, param3, param4, param5, param6) +#define RMTRACE_TIMEOUT(id, gpuId) +#define RMTRACE_VBIOS(id, gpuId, param1, param2, param3, param4, param5, param6, param7) +#define RMTRACE_VBIOS_ERROR(id, gpuId, param1, param2, param3, param4, param5, param6, param7) +#define RMTRACE_NVLOG(id, pData, dataSize) +#define RMTRACE_SBIOS(id, gpuId, param1, param2, param3, param4, param5, param6, param7) +#define RMTRACE_USBC0(id, gpuId) +#define RMTRACE_USBC1(id, gpuId, param1) +#define RMTRACE_USBC2(id, gpuId, param1, param2) +#define RMTRACE_USBC7(id, gpuId, param1, param2, param3, param4, param5, param6, param7) +#define RMTRACE_RMGENERAL(id, param1, param2, param3) +#define RMTRACE_NVTELEMETRY(id, gpuId, param1, param2, param3) +#define RMTRACE_NOCAT(id, gpuId, type, group, key, count, timeStamp) +#define RMTRACE_PRINT + + +#ifndef RMTRACE_FLAG_ENABLED +#define RMTRACE_FLAG_ENABLED (0) +#endif + +// +// Empty macros +// +#define RMTRACE_INIT() +#define RMTRACE_DESTROY() +#define RMTRACE_ENABLE(eventEventMask) +#define RMTRACE_PROBE(module, event) + +#define RMTRACE_PROBE1(module, event, dataType, data, dataSize) + +#define RMTRACE_PROBE2(module, event, dataType1, data1, dataSize1, dataType2, data2, dataSize2) + +#define RMTRACE_PROBE3(module, event, dataType1, data1, dataSize1, dataType2, data2, dataSize2, \ + dataType3, data3, dataSize3) + +#define RMTRACE_PROBE4(module, event, dataType1, data1, dataSize1, dataType2, data2, dataSize2, \ + dataType3, data3, dataSize3, dataType4, data4, dataSize4) + +#define RMTRACE_PROBE5(module, event, dataType1, data1, dataSize1, dataType2, data2, dataSize2, \ + dataType3, data3, dataSize3, dataType4, data4, dataSize4, \ + dataType5, data5, dataSize5) + +#define RMTRACE_PROBE6(module, event, dataType1, data1, dataSize1, dataType2, data2, dataSize2, \ + dataType3, data3, dataSize3, dataType4, data4, dataSize4, \ + dataType5, data5, dataSize5, dataType6, data6, dataSize6) + +#define RMTRACE_PROBE7(module, event, dataType1, data1, dataSize1, dataType2, data2, dataSize2, \ + dataType3, data3, dataSize3, dataType4, data4, dataSize4, \ + dataType5, data5, dataSize5, dataType6, data6, dataSize6, \ + dataType7, data7, dataSize7) +#define RMTRACE_PROBE10(module, event, dataType1, data1, dataSize1, dataType2, data2, dataSize2, \ + dataType3, data3, dataSize3, dataType4, data4, dataSize4, \ + dataType5, data5, dataSize5, dataType6, data6, dataSize6, \ + dataType7, data7, dataSize7, dataType8, data8, dataSize8, \ + dataType9, data9, dataSize9, dataType10, data10, dataSize10) +#define RMTRACE_PROBE2_PRIMTYPE(module, event, type0, val0, type1, val1) +#define RMTRACE_PROBE3_PRIMTYPE(module, event, type0, val0, type1, val1, type2, val2) +#define RMTRACE_PROBE4_PRIMTYPE(module, event, type0, val0, type1, val1, type2, val2, type3, val3) +#define RMTRACE_PROBE5_PRIMTYPE(module, event, type0, val0, type1, val1, type2, val2, type3, val3, \ + type4, val4) +#define RMTRACE_PROBE7_PRIMTYPE(module, event, type0, val0, type1, val1, type2, val2, type3, val3, \ + type4, val4, type5, val5, type6, val6) +#define RMTRACE_PROBE10_PRIMTYPE(module, event, type0, val0, type1, val1, type2, val2, type3, val3, \ + type4, val4, type5, val5, type6, val6, type7, val7, type8, val8, \ + type9, val9) +#define RMTRACE_MARKER_PROBE(name, gpuId, payload, id) + + +#endif /* TRACER_H */ diff --git a/src/nvidia/inc/kernel/diagnostics/xid_context.h b/src/nvidia/inc/kernel/diagnostics/xid_context.h new file mode 100644 index 0000000..cdc77dd --- /dev/null +++ b/src/nvidia/inc/kernel/diagnostics/xid_context.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef XID_CONTEXT_H +#define XID_CONTEXT_H 1 + +#include "nvtypes.h" + +//! +//! Root cause information to print in specific cases. +//! +//! Some Xid strings must be kept "stable", so this information is +//! only printed in certain cases where we can break the stability, or where +//! that particular Xid string was not stable. +//! +//! It will always be okay to pass an all zero struct { 0 } +//! +typedef struct +{ + //! + //! If nonzero, print this as a root cause of the current + //! ROBUST_CHANNEL_PREEMPTIVE_REMOVAL + //! + NvU32 preemptiveRemovalPreviousXid; +} RcRootCause; + + +//! +//! Xid and context information about an Xid passed to KernelRM. +//! +//! This is NOT stable. The Xid printing mechanism must take care that Xid +//! strings which must remain stable continue to do so even some information +//! changes here. +//! +typedef struct +{ + //! Xid number. + NvU32 xid; + + //! Additional root cause information valid only for certain Xids. + RcRootCause rootCause; +} XidContext; + +#endif // XID_CONTEXT_H diff --git a/src/nvidia/inc/kernel/gpu/audio/hda_codec_api.h b/src/nvidia/inc/kernel/gpu/audio/hda_codec_api.h new file mode 100644 index 0000000..30d9aaf --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/audio/hda_codec_api.h @@ -0,0 +1,3 @@ + +#include "g_hda_codec_api_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/dce_client/dce_client.h b/src/nvidia/inc/kernel/gpu/dce_client/dce_client.h new file mode 100644 index 0000000..ad600f3 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/dce_client/dce_client.h @@ -0,0 +1,3 @@ + +#include "g_dce_client_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/device/device.h b/src/nvidia/inc/kernel/gpu/device/device.h new file mode 100644 index 0000000..76faf3c --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/device/device.h @@ -0,0 +1,3 @@ + +#include "g_device_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/disp/disp_capabilities.h b/src/nvidia/inc/kernel/gpu/disp/disp_capabilities.h new file mode 100644 index 0000000..eee436c --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/disp/disp_capabilities.h @@ -0,0 +1,3 @@ + +#include "g_disp_capabilities_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/disp/disp_channel.h b/src/nvidia/inc/kernel/gpu/disp/disp_channel.h new file mode 100644 index 0000000..146dfbb --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/disp/disp_channel.h @@ -0,0 +1,3 @@ + +#include "g_disp_channel_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/disp/disp_objs.h b/src/nvidia/inc/kernel/gpu/disp/disp_objs.h new file mode 100644 index 0000000..6dde057 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/disp/disp_objs.h @@ -0,0 +1,3 @@ + +#include "g_disp_objs_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h b/src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h new file mode 100644 index 0000000..5addedc --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h @@ -0,0 +1,3 @@ + +#include "g_disp_sf_user_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/disp/head/kernel_head.h b/src/nvidia/inc/kernel/gpu/disp/head/kernel_head.h new file mode 100644 index 0000000..60fbcb0 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/disp/head/kernel_head.h @@ -0,0 +1,3 @@ + +#include "g_kernel_head_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/disp/inst_mem/disp_inst_mem.h b/src/nvidia/inc/kernel/gpu/disp/inst_mem/disp_inst_mem.h new file mode 100644 index 0000000..94c9909 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/disp/inst_mem/disp_inst_mem.h @@ -0,0 +1,3 @@ + +#include "g_disp_inst_mem_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/disp/kern_disp.h b/src/nvidia/inc/kernel/gpu/disp/kern_disp.h new file mode 100644 index 0000000..25be380 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/disp/kern_disp.h @@ -0,0 +1,3 @@ + +#include "g_kern_disp_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/disp/kern_disp_max.h b/src/nvidia/inc/kernel/gpu/disp/kern_disp_max.h new file mode 100644 index 0000000..65eedad --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/disp/kern_disp_max.h @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef KERN_DISP_MAX_H +#define KERN_DISP_MAX_H + +/*! + * @brief Defines max values used for the KernelDisplay Engine Object, + * including values shared by OBJDISP code. + */ +#define OBJ_MAX_HEADS 8 +#define MAX_RG_LINE_CALLBACKS_PER_HEAD 2 +#define OBJ_MAX_DFPS 31 + +#endif // KERN_DISP_MAX_H diff --git a/src/nvidia/inc/kernel/gpu/disp/kern_disp_type.h b/src/nvidia/inc/kernel/gpu/disp/kern_disp_type.h new file mode 100644 index 0000000..929ada5 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/disp/kern_disp_type.h @@ -0,0 +1,68 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef KERN_DISP_TYPE_H +#define KERN_DISP_TYPE_H + +/****************************************************************************** +* +* Defines display type enums that can be used in the KernelDisplay object. +* +******************************************************************************/ +#define NV_PDISP_CHN_NUM_ANY 0x7F + +typedef enum +{ + dispChnClass_Curs, + dispChnClass_Ovim, + dispChnClass_Base, + dispChnClass_Core, + dispChnClass_Ovly, + dispChnClass_Winim, + dispChnClass_Win, + dispChnClass_Any, + dispChnClass_Supported +} DISPCHNCLASS; + +enum DISPLAY_ICC_BW_CLIENT +{ + DISPLAY_ICC_BW_CLIENT_RM, + DISPLAY_ICC_BW_CLIENT_EXT, // DD or MODS + NUM_DISPLAY_ICC_BW_CLIENTS +}; + +typedef enum +{ + dispMemoryTarget_physNVM, + dispMemoryTarget_physPCI, + dispMemoryTarget_physPCICoherent +} DISPMEMORYTARGET; + +typedef struct +{ + NvU64 addr; + DISPMEMORYTARGET memTarget; + NvBool valid; +} VGAADDRDESC; + +#endif // #ifndef KERN_DISP_TYPE_H diff --git a/src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank.h b/src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank.h new file mode 100644 index 0000000..cea03ec --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank.h @@ -0,0 +1,111 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef VBLANK_H +#define VBLANK_H + +#include "gpu/gpu.h" +/* ------------------------ Types definitions ------------------------------ */ +/*! + * Callback function prototype + */ +typedef NV_STATUS (*VBLANKCALLBACKPROC)(OBJGPU*, void *, NvU32, NvU32, NV_STATUS); + +typedef struct VBLANKCALLBACK +{ + VBLANKCALLBACKPROC Proc; + void *pObject; + NvBool bObjectIsChannelDescendant; + NvU32 Param1; + NvU32 Param2; + NvU32 VBlankCount; + NvU32 VBlankOffset; + NvU64 TimeStamp; + NvU32 MC_CallbackFlag; + NvU32 Flags; + NV_STATUS Status; + struct VBLANKCALLBACK *Next; + NvBool bImmediateCallback; + NvBool bIsVblankNotifyEnable; +}VBLANKCALLBACK; + +/* ------------------------ Macros & Defines ------------------------------- */ + +/*! + * Callback function registration flags + */ +#define VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_COUNT 0x00000001 +#define VBLANK_CALLBACK_FLAG_COMPLETE_ON_OBJECT_CLEANUP 0x00000002 +#define VBLANK_CALLBACK_FLAG_PERSISTENT 0x00000004 +#define VBLANK_CALLBACK_FLAG_SPECIFIED_TIMESTAMP 0x00000010 +#define VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_NEXT 0x00000020 // Explicit request for the next vblank. +#define VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_OFFSET 0x00000040 // Explicit request for the vblank offset from the current one +#define VBLANK_CALLBACK_FLAG_PROMOTE_TO_FRONT 0x00000080 // Promotes to being 'first', while still honoring VBlankCount +#define VBLANK_CALLBACK_FLAG_RELEASES_SEMAPHORE 0x00000100 // A flag for deadlock detection to check if this callback could release a semaphore +#define VBLANK_CALLBACK_FLAG_GUARANTEE_SAFETY 0x00000200 // This callback absolutely needs to run during vertical blank, even if it runs late as a consequence. +#define VBLANK_CALLBACK_FLAG_LOW_LATENCY 0x10000000 // This now means ASAP, which could be ISR or DPC, depending on which happens first +#define VBLANK_CALLBACK_FLAG_MC_EXECUTE_ONCE 0x40000000 // A special flag for MultiChip configurations to have the callback execute only once +#define VBLANK_CALLBACK_FLAG_USER 0x80000000 + +/*! + * A little macro help for the CALLBACK_FLAG_MC_EXECUTE_ONCE flag above + */ +#define VBLANK_CALLBACK_EXECUTE_ONCE(x) (x & VBLANK_CALLBACK_FLAG_MC_EXECUTE_ONCE) + +/*! + * VBlank Service info gathering keep-alive in seconds. This value is the number of seconds the vblank service will run after a client request vblank info. + */ +#define VBLANK_INFO_GATHER_KEEPALIVE_SECONDS (5) + +/*! + * VBLANK SERVICE RELATED + * VBlank Service callback processing flags + * These two flags describe when to process the queues + */ + +#define VBLANK_STATE_PROCESS_NORMAL (0x00000000) // Process the requested queues if associated vblank interrupt is pending +#define VBLANK_STATE_PROCESS_IMMEDIATE (0x00000001) // Process the requested queues now, regardless of any vblank interrupt pending state + +/*! + * These three flags describe which queues to process + */ +#define VBLANK_STATE_PROCESS_LOW_LATENCY (0x00000002) // Process the low-latency vblank callback queue +#define VBLANK_STATE_PROCESS_NORMAL_LATENCY (0x00000004) // Process the normal-latency vblank callback queue + +#define VBLANK_STATE_PROCESS_ALL_CALLBACKS (VBLANK_STATE_PROCESS_LOW_LATENCY|VBLANK_STATE_PROCESS_NORMAL_LATENCY) // Process all callback (high and low latency) queues + +#define VBLANK_STATE_PROCESS_CALLBACKS_ONLY (0x00000008) // Process only the callback queue(s) and nothing else + +/*! + * set when called from an ISR; if VBlank() is in an ISR and there is + * more work to do, then VBlank() will not clear the pending bit + */ +#define VBLANK_STATE_PROCESS_CALLED_FROM_ISR (0x00000010) +#define VBLANK_STATE_PROCESS_CALLED_FROM_DPC (0x00000020) + +/*! Vblank Interrupt state */ +#define NV_HEAD_VBLANK_INTR_UNAVAILABLE (0x00000000) +#define NV_HEAD_VBLANK_INTR_AVAILABLE (0x00000001) +#define NV_HEAD_VBLANK_INTR_ENABLED (0x00000002) + +#endif // VBLANK_H diff --git a/src/nvidia/inc/kernel/gpu/eng_desc.h b/src/nvidia/inc/kernel/gpu/eng_desc.h new file mode 100644 index 0000000..bce623d --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/eng_desc.h @@ -0,0 +1,3 @@ + +#include "g_eng_desc_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/eng_state.h b/src/nvidia/inc/kernel/gpu/eng_state.h new file mode 100644 index 0000000..9f732f6 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/eng_state.h @@ -0,0 +1,3 @@ + +#include "g_eng_state_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/error_cont.h b/src/nvidia/inc/kernel/gpu/error_cont.h new file mode 100644 index 0000000..c9a664c --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/error_cont.h @@ -0,0 +1,189 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file error_cont.h + * @brief Holds data structures, defines and policy table required by the + * Ampere Error Containment feature / code. + */ + +#ifndef _ERROR_CONT_H_ +#define _ERROR_CONT_H_ + +/* ------------------------ Includes ---------------------------------------- */ +#include "core/core.h" +#include "kernel/gpu/gpu_engine_type.h" +#include "nverror.h" + +/* ------------------------ Forward Definitions ----------------------------- */ + +typedef struct Device Device; + +/* ------------------------ Datatypes --------------------------------------- */ + +/*! + * Error Containment error ID enum + */ +typedef enum _NV_ERROR_CONT_ERR_ID +{ + NV_ERROR_CONT_ERR_ID_E01_FB_ECC_DED = 0, // FD Error ID: E01: FB ECC DED + NV_ERROR_CONT_ERR_ID_E02_FB_ECC_DED_IN_CBC_STORE = 1, // FD Error ID: E02: FB ECC DED in CBC + NV_ERROR_CONT_ERR_ID_E05_LTC_ECC_DSTG = 2, // FD Error ID: E05: LTC ECC in data region + NV_ERROR_CONT_ERR_ID_E06_LTC_UNSUPPORTED_CLIENT_POISON = 3, // FD Error ID: E06: LTC unsupported client poison + NV_ERROR_CONT_ERR_ID_E07_LTC_ECC_TSTG = 4, // FD Error ID: E07: LTC Tag Parity error + NV_ERROR_CONT_ERR_ID_E08_LTC_ECC_RSTG = 5, // FD Error ID: E08: LTC CBC Parity error + NV_ERROR_CONT_ERR_ID_E09_FBHUB_POISON = 6, // FD Error ID: E09: FBHUB poison error + NV_ERROR_CONT_ERR_ID_E10_SM_POISON = 7, // FD Error ID: E10: SM poison error + NV_ERROR_CONT_ERR_ID_E12A_CE_POISON_IN_USER_CHANNEL = 8, // FD Error ID: E12: CE poison error in user channel + NV_ERROR_CONT_ERR_ID_E12B_CE_POISON_IN_KERNEL_CHANNEL = 9, // FD Error ID: E12: CE poison error in kernel channel + NV_ERROR_CONT_ERR_ID_E13_MMU_POISON = 10, // FD Error ID: E13: MMU poison error + NV_ERROR_CONT_ERR_ID_E16_GCC_POISON = 11, // FD Error ID: E16: GCC poison error + NV_ERROR_CONT_ERR_ID_E17_CTXSW_POISON = 12, // FD Error ID: E17: FECS/GPCCS/TPCCS poison error + NV_ERROR_CONT_ERR_ID_E20_XALEP_EGRESS_POISON = 13, // FD Error ID: E20: XALEP EGRESS poison error + NV_ERROR_CONT_ERR_ID_E21A_XALEP_INGRESS_CONTAINED_POISON = 14, // FD Error ID: E21: XALEP INGRESS contained poison error + NV_ERROR_CONT_ERR_ID_E21B_XALEP_INGRESS_UNCONTAINED_POISON = 15, // FD Error ID: E21: XALEP INGRESS contained poison error + NV_ERROR_CONT_ERR_ID_E22_PMU_POISON = 16, // FD Error ID: E22: PMU poison error + NV_ERROR_CONT_ERR_ID_E23_SEC2_POISON = 17, // FD Error ID: E23: SEC2 poison error + NV_ERROR_CONT_ERR_ID_E24_GSP_POISON = 18, // FD Error ID: E24: GSP poison error + NV_ERROR_CONT_ERR_ID_E25_FBFALCON_POISON = 19, // FD Error ID: E25: FBFalcon poison error + NV_ERROR_CONT_ERR_ID_E26_NVDEC_POISON = 20, // FD Error ID: E26: NVDEC poison error + NV_ERROR_CONT_ERR_ID_E27_NVJPG_POISON = 21, // FD Error ID: E27: NVJPG poison error + NV_ERROR_CONT_ERR_ID_E28_OFA_POISON = 22 // FD Error ID: E28: OFA poison error +} NV_ERROR_CONT_ERR_ID; + +/*! + * Error Containment settings per error ID, when SMC memory partitioning is disable or enabled. + */ +typedef struct _NV_ERROR_CONT_SMC_DIS_EN_SETTING +{ + NvU32 rcErrorCode; + NvBool bGpuResetReqd; + NvBool bGpuDrainAndResetReqd; + NvBool bPrintSmcPartitionInfo; + NvU32 nv2080Notifier; +} NV_ERROR_CONT_SMC_DIS_EN_SETTING; + +/*! + * Error Containment state table tracking policy settings for each error ID + */ +typedef struct _NV_ERROR_CONT_STATE_TABLE +{ + NV_ERROR_CONT_ERR_ID errorCode; + NV_ERROR_CONT_SMC_DIS_EN_SETTING smcDisEnSetting[2]; // 0: SMC memory partitioning disabled, + // 1: SMC memory partitioning enabled +} NV_ERROR_CONT_STATE_TABLE; + +/*! + * Struct for LTC location + */ +typedef struct _NV_ERROR_CONT_LOCATION_LTC +{ + NvU32 partition; + NvU32 slice; +} NV_ERROR_CONT_LOCATION_LTC; + +/*! + * Struct for DRAM location + */ +typedef struct _NV_ERROR_CONT_LOCATION_DRAM +{ + NvU32 partition; + NvU32 subPartition; + NvU64 physicalAddress; +} NV_ERROR_CONT_LOCATION_DRAM; + +/*! + * Struct for Engine id + */ +typedef struct _NV_ERROR_CONT_LOCATION_ENG_ID +{ + RM_ENGINE_TYPE rmEngineId; + Device *pDevice; +} NV_ERROR_CONT_LOCATION_ENG_ID; + +/*! + * Error Containment location type + */ +typedef enum _NV_ERROR_CONT_LOCATION_TYPE +{ + NV_ERROR_CONT_LOCATION_TYPE_NONE = 0, // No location information available + NV_ERROR_CONT_LOCATION_TYPE_DRAM = 1, // DRAM location + NV_ERROR_CONT_LOCATION_TYPE_LTC = 2, // LTC location + NV_ERROR_CONT_LOCATION_TYPE_ENGINE = 3, // Engine location + NV_ERROR_CONT_LOCATION_TYPE_VF = 4 // VF location +} NV_ERROR_CONT_LOCATION_TYPE; + +/*! + * Union for Error Containment location information + */ +typedef union _NV_ERROR_CONT_LOCATION_INFO +{ + NV_ERROR_CONT_LOCATION_DRAM dramLoc; // DRAM location + NV_ERROR_CONT_LOCATION_LTC ltcLoc; // LTC location + NV_ERROR_CONT_LOCATION_ENG_ID engineLoc; // Engine location + NvU32 vfGfid; // VF location +} NV_ERROR_CONT_LOCATION_INFO; + +typedef struct _NV_ERROR_CONT_LOCATION +{ + NV_ERROR_CONT_LOCATION_TYPE locType; + NV_ERROR_CONT_LOCATION_INFO locInfo; +} NV_ERROR_CONT_LOCATION; + +/* ------------------------ Macros ------------------------------------------ */ + +#define ROBUST_CHANNEL_CONTAINED_ERROR_STR "Contained" +#define ROBUST_CHANNEL_UNCONTAINED_ERROR_STR "Uncontained" +#define NO_XID NV_U32_MAX +#define NO_NV2080_NOTIFIER NV2080_NOTIFIERS_MAXCOUNT +#define NV_ERR_CONT_LOCATION_STRING_SIZE_MAX 64 + +/*! + * Error Containment error types string. + * The order of this list must match the NV_ERROR_CONT_ERR_ID enums. + */ +#define NV_ERROR_CONT_ERR_ID_STRING_PUBLIC {"FB DED", \ + "DED CBC", \ + "LTC Data", \ + "LTC GPC", \ + "LTC TAG", \ + "LTC CBC", \ + "FBHUB", \ + "SM", \ + "CE User Channel", \ + "CE Kernel Channel", \ + "MMU", \ + "GCC", \ + "CTXSW", \ + "PCIE", \ + "PCIE", \ + "PCIE", \ + "PMU", \ + "SEC2", \ + "GSP", \ + "FB Falcon", \ + "NVDEC", \ + "NVJPG", \ + "OFA"} + +#endif // _ERROR_CONT_H_ diff --git a/src/nvidia/inc/kernel/gpu/gpu.h b/src/nvidia/inc/kernel/gpu/gpu.h new file mode 100644 index 0000000..29fdb18 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu.h @@ -0,0 +1,3 @@ + +#include "g_gpu_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/gpu_access.h b/src/nvidia/inc/kernel/gpu/gpu_access.h new file mode 100644 index 0000000..c11c3e4 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu_access.h @@ -0,0 +1,3 @@ + +#include "g_gpu_access_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/gpu_acpi_data.h b/src/nvidia/inc/kernel/gpu/gpu_acpi_data.h new file mode 100644 index 0000000..3922dcd --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu_acpi_data.h @@ -0,0 +1,112 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _GPU_ACPI_DATA_H_ +#define _GPU_ACPI_DATA_H_ + +#include "ctrl/ctrl0073/ctrl0073system.h" + +#include "nvctassert.h" +#include "acpigenfuncs.h" +#include "nvstatus.h" +#include "gpu/gpu_halspec.h" + +#define MAX_DSM_SUPPORTED_FUNCS_RTN_LEN 8 // # bytes to store supported functions + +/* Indicates the current state of mux */ +typedef enum +{ + dispMuxState_None = 0, + dispMuxState_IntegratedGPU, + dispMuxState_DiscreteGPU, +} DISPMUXSTATE; + +typedef struct { + // supported function status and cache + NvU32 suppFuncStatus; + NvU8 suppFuncs[MAX_DSM_SUPPORTED_FUNCS_RTN_LEN]; + NvU32 suppFuncsLen; + NvBool bArg3isInteger; + // callback status and cache + NvU32 callbackStatus; + NvU32 callback; +} ACPI_DSM_CACHE; + +typedef struct { + + ACPI_DSM_CACHE dsm[ACPI_DSM_FUNCTION_COUNT]; + ACPI_DSM_FUNCTION dispStatusHotplugFunc; + ACPI_DSM_FUNCTION dispStatusConfigFunc; + ACPI_DSM_FUNCTION perfPostPowerStateFunc; + ACPI_DSM_FUNCTION stereo3dStateActiveFunc; + NvU32 dsmPlatCapsCache[ACPI_DSM_FUNCTION_COUNT]; + NvU32 MDTLFeatureSupport; + +} ACPI_DATA; + +typedef struct DOD_METHOD_DATA +{ + NV_STATUS status; + NvU32 acpiIdListLen; + NvU32 acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS]; +} DOD_METHOD_DATA; + +typedef struct JT_METHOD_DATA +{ + NV_STATUS status; + NvU32 jtCaps; + NvU16 jtRevId; + NvBool bSBIOSCaps; +} JT_METHOD_DATA; + +typedef struct MUX_METHOD_DATA_ELEMENT +{ + NvU32 acpiId; + NvU32 mode; + NV_STATUS status; +} MUX_METHOD_DATA_ELEMENT; + +typedef struct MUX_METHOD_DATA +{ + NvU32 tableLen; + MUX_METHOD_DATA_ELEMENT acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS]; + MUX_METHOD_DATA_ELEMENT acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS]; + MUX_METHOD_DATA_ELEMENT acpiIdMuxStateTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS]; +} MUX_METHOD_DATA; + +typedef struct CAPS_METHOD_DATA +{ + NV_STATUS status; + NvU32 optimusCaps; +} CAPS_METHOD_DATA; + +typedef struct ACPI_METHOD_DATA +{ + NvBool bValid; + DOD_METHOD_DATA dodMethodData; + JT_METHOD_DATA jtMethodData; + MUX_METHOD_DATA muxMethodData; + CAPS_METHOD_DATA capsMethodData; +} ACPI_METHOD_DATA; + +#endif // _GPU_ACPI_DATA_H_ diff --git a/src/nvidia/inc/kernel/gpu/gpu_arch.h b/src/nvidia/inc/kernel/gpu/gpu_arch.h new file mode 100644 index 0000000..6a1b4d5 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu_arch.h @@ -0,0 +1,3 @@ + +#include "g_gpu_arch_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/gpu_child_class_defs.h b/src/nvidia/inc/kernel/gpu/gpu_child_class_defs.h new file mode 100644 index 0000000..aa069ea --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu_child_class_defs.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef GPU_CHILD_CLASS_DEFS_H +#define GPU_CHILD_CLASS_DEFS_H + +/*! + * @file + * @details Provides the class definitions for every GPU child class without the + * need to include every individual header. + */ + +#include "core/prelude.h" + +#define GPU_CHILD_CLASS_DEFS_GPU_CHILD(className, accessorName, numInstances, bConstructEarly, gpuField) \ + extern const struct NVOC_CLASS_DEF NV_CONCATENATE(__nvoc_class_def_, className); + +#define GPU_CHILD \ + GPU_CHILD_CLASS_DEFS_GPU_CHILD +#include "gpu/gpu_child_list.h" + +// Sub-classes of GPU children +// Pmu sub-classes +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Pmu10; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Pmu20; + +#endif // GPU_CHILD_CLASS_DEFS_H diff --git a/src/nvidia/inc/kernel/gpu/gpu_child_list.h b/src/nvidia/inc/kernel/gpu/gpu_child_list.h new file mode 100644 index 0000000..4a8df2b --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu_child_list.h @@ -0,0 +1,313 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +// No include guards - this file is included multiple times, each time with a +// different definition for GPU_CHILD_SINGLE_INST and GPU_CHILD_GPU_CHILD_MULTI_INST +// +// Callers that will use the same definition for single- and multi- instance +// can define GPU_CHILD that will be used for both +// +#if defined(GPU_CHILD) +#if !defined(GPU_CHILD_SINGLE_INST) && !defined(GPU_CHILD_MULTI_INST) +#define GPU_CHILD_SINGLE_INST GPU_CHILD +#define GPU_CHILD_MULTI_INST GPU_CHILD +#else +#error "Must not define GPU_CHILD_{SINGLE,MULTI}_INST and GPU_CHILD at the same time" +#endif +#endif + +// +// GPU child list. All objects must inherit from OBJENGSTATE. Objects are +// constructed in the listed order and destructed in reverse order. Storage in +// OBJGPU and accessor macros (i.e.: GET_GPU_XXX) are generated from this list. +// + +// +// Temporarily needed to generate stubs for disabled modules +// To be removed when the references to these modules are gone +// +#if defined(GPU_CHILD_LIST_DISABLED_ONLY) +#define GPU_CHILD_MODULE(_rmcfgModule) !RMCFG_MODULE_ENABLED(_rmcfgModule) +#else +#define GPU_CHILD_MODULE(_rmcfgModule) RMCFG_MODULE_ENABLED(_rmcfgModule) +#endif + + /* Class Name Accessor Name Max Instances bConstructEarly OBJGPU Field */ +#if GPU_CHILD_MODULE(FUSE) + GPU_CHILD_SINGLE_INST( OBJFUSE, GPU_GET_FUSE, 1, NV_TRUE, pFuse ) +#endif +#if GPU_CHILD_MODULE(BIF) + GPU_CHILD_SINGLE_INST( OBJBIF, GPU_GET_BIF, 1, NV_TRUE, pBif ) +#endif +#if GPU_CHILD_MODULE(KERNEL_BIF) + GPU_CHILD_SINGLE_INST( KernelBif, GPU_GET_KERNEL_BIF, 1, NV_TRUE, pKernelBif ) +#endif +#if GPU_CHILD_MODULE(NNE) + GPU_CHILD_SINGLE_INST( Nne, GPU_GET_NNE, 1, NV_TRUE, pNne ) +#endif +#if GPU_CHILD_MODULE(MC) + GPU_CHILD_SINGLE_INST( OBJMC, GPU_GET_MC, 1, NV_FALSE, pMc ) +#endif +#if GPU_CHILD_MODULE(KERNEL_MC) + GPU_CHILD_SINGLE_INST( KernelMc, GPU_GET_KERNEL_MC, 1, NV_FALSE, pKernelMc ) +#endif +#if GPU_CHILD_MODULE(PRIV_RING) + GPU_CHILD_SINGLE_INST( PrivRing, GPU_GET_PRIV_RING, 1, NV_FALSE, pPrivRing ) +#endif +#if GPU_CHILD_MODULE(INTR) + GPU_CHILD_SINGLE_INST( SwIntr, GPU_GET_SW_INTR, 1, NV_FALSE, pSwIntr ) +#endif +#if GPU_CHILD_MODULE(MEMORY_SYSTEM) + GPU_CHILD_SINGLE_INST( MemorySystem, GPU_GET_MEMORY_SYSTEM, 1, NV_FALSE, pMemorySystem ) +#endif +#if GPU_CHILD_MODULE(KERNEL_MEMORY_SYSTEM) + GPU_CHILD_SINGLE_INST( KernelMemorySystem, GPU_GET_KERNEL_MEMORY_SYSTEM, 1, NV_FALSE, pKernelMemorySystem ) +#endif +#if GPU_CHILD_MODULE(MEMORY_MANAGER) + GPU_CHILD_SINGLE_INST( MemoryManager, GPU_GET_MEMORY_MANAGER, 1, NV_FALSE, pMemoryManager ) +#endif +#if GPU_CHILD_MODULE(FBFLCN) + GPU_CHILD_SINGLE_INST( OBJFBFLCN, GPU_GET_FBFLCN, 1, NV_FALSE, pFbflcn ) +#endif +#if GPU_CHILD_MODULE(HSHUBMANAGER) + GPU_CHILD_SINGLE_INST( OBJHSHUBMANAGER, GPU_GET_HSHUBMANAGER, 1, NV_FALSE, pHshMgr ) +#endif +#if GPU_CHILD_MODULE(HSHUB) + GPU_CHILD_MULTI_INST ( Hshub, GPU_GET_HSHUB, GPU_MAX_HSHUBS, NV_FALSE, pHshub ) +#endif +#if GPU_CHILD_MODULE(SEQ) + GPU_CHILD_SINGLE_INST( OBJSEQ, GPU_GET_SEQ, 1, NV_FALSE, pSeq ) +#endif +#if GPU_CHILD_MODULE(GpuMutexMgr) + GPU_CHILD_SINGLE_INST( GpuMutexMgr, GPU_GET_MUTEX_MGR, 1, NV_FALSE, pMutexMgr ) +#endif +#if GPU_CHILD_MODULE(KERNEL_DISPLAY) + GPU_CHILD_SINGLE_INST( KernelDisplay, GPU_GET_KERNEL_DISPLAY, 1, NV_FALSE, pKernelDisplay ) +#endif +#if GPU_CHILD_MODULE(TMR) + GPU_CHILD_SINGLE_INST( OBJTMR, GPU_GET_TIMER, 1, NV_TRUE, pTmr ) +#endif +#if GPU_CHILD_MODULE(DISP) + GPU_CHILD_SINGLE_INST( OBJDISP, GPU_GET_DISP, 1, NV_FALSE, pDisp ) +#endif +#if GPU_CHILD_MODULE(BUS) + GPU_CHILD_SINGLE_INST( OBJBUS, GPU_GET_BUS, 1, NV_FALSE, pBus ) +#endif +#if GPU_CHILD_MODULE(KERNEL_BUS) + GPU_CHILD_SINGLE_INST( KernelBus, GPU_GET_KERNEL_BUS, 1, NV_FALSE, pKernelBus ) +#endif +#if GPU_CHILD_MODULE(GMMU) + GPU_CHILD_SINGLE_INST( OBJGMMU, GPU_GET_GMMU, 1, NV_FALSE, pGmmu ) +#endif +#if GPU_CHILD_MODULE(KERNEL_GMMU) + GPU_CHILD_SINGLE_INST( KernelGmmu, GPU_GET_KERNEL_GMMU, 1, NV_FALSE, pKernelGmmu ) +#endif +#if GPU_CHILD_MODULE(KERNEL_SEC2) + GPU_CHILD_SINGLE_INST( KernelSec2, GPU_GET_KERNEL_SEC2, 1, NV_FALSE, pKernelSec2 ) +#endif +#if GPU_CHILD_MODULE(KERNEL_GSP) + GPU_CHILD_SINGLE_INST( KernelGsp, GPU_GET_KERNEL_GSP, 1, NV_FALSE, pKernelGsp ) +#endif +#if GPU_CHILD_MODULE(DCECLIENTRM) + GPU_CHILD_SINGLE_INST( OBJDCECLIENTRM, GPU_GET_DCECLIENTRM, 1, NV_FALSE, pDceclientrm ) +#endif +#if GPU_CHILD_MODULE(VIRT_MEM_ALLOCATOR) + GPU_CHILD_SINGLE_INST( VirtMemAllocator, GPU_GET_DMA, 1, NV_FALSE, pDma ) +#endif +#if GPU_CHILD_MODULE(GRMGR) + GPU_CHILD_SINGLE_INST( GraphicsManager, GPU_GET_GRMGR, 1, NV_FALSE, pGrMgr ) +#endif +#if GPU_CHILD_MODULE(MIG_MANAGER) + GPU_CHILD_SINGLE_INST( MIGManager, GPU_GET_MIG_MANAGER, 1, NV_FALSE, pMIGManager ) +#endif +#if GPU_CHILD_MODULE(KERNEL_MIG_MANAGER) + GPU_CHILD_SINGLE_INST( KernelMIGManager, GPU_GET_KERNEL_MIG_MANAGER, 1, NV_FALSE, pKernelMIGManager ) +#endif +#if GPU_CHILD_MODULE(KERNEL_GRAPHICS_MANAGER) + GPU_CHILD_SINGLE_INST( KernelGraphicsManager, GPU_GET_KERNEL_GRAPHICS_MANAGER, 1, NV_FALSE, pKernelGraphicsManager ) +#endif +#if GPU_CHILD_MODULE(GR) + GPU_CHILD_MULTI_INST ( Graphics, GPU_GET_GR_UNSAFE, GPU_MAX_GRS, NV_FALSE, pGr ) +#endif +#if GPU_CHILD_MODULE(KERNEL_GRAPHICS) + GPU_CHILD_MULTI_INST ( KernelGraphics, GPU_GET_KERNEL_GRAPHICS, GPU_MAX_GRS, NV_FALSE, pKernelGraphics ) +#endif +#if GPU_CHILD_MODULE(ClockManager) + GPU_CHILD_SINGLE_INST( ClockManager, GPU_GET_CLK_MGR, 1, NV_FALSE, pClk ) +#endif +#if GPU_CHILD_MODULE(FAN) + GPU_CHILD_SINGLE_INST( Fan, GPU_GET_FAN, 1, NV_FALSE, pFan ) +#endif +#if GPU_CHILD_MODULE(PERF) + GPU_CHILD_SINGLE_INST( Perf, GPU_GET_PERF, 1, NV_FALSE, pPerf ) +#endif +#if GPU_CHILD_MODULE(KERNEL_PERF) + GPU_CHILD_SINGLE_INST( KernelPerf, GPU_GET_KERNEL_PERF, 1, NV_FALSE, pKernelPerf ) +#endif +#if GPU_CHILD_MODULE(THERM) + GPU_CHILD_SINGLE_INST( Therm, GPU_GET_THERM, 1, NV_FALSE, pTherm ) +#endif +#if GPU_CHILD_MODULE(BSP) + GPU_CHILD_MULTI_INST ( OBJBSP, GPU_GET_BSP, GPU_MAX_NVDECS, NV_FALSE, pBsp ) +#endif +#if GPU_CHILD_MODULE(CIPHER) + GPU_CHILD_SINGLE_INST( OBJCIPHER, GPU_GET_CIPHER, 1, NV_FALSE, pCipher ) +#endif +#if GPU_CHILD_MODULE(VBIOS) + GPU_CHILD_SINGLE_INST( OBJVBIOS, GPU_GET_VBIOS, 1, NV_FALSE, pVbios ) +#endif +#if GPU_CHILD_MODULE(DCB) + GPU_CHILD_SINGLE_INST( OBJDCB, GPU_GET_DCB, 1, NV_FALSE, pDcb ) +#endif +#if GPU_CHILD_MODULE(GPIO) + GPU_CHILD_SINGLE_INST( OBJGPIO, GPU_GET_GPIO, 1, NV_FALSE, pGpio ) +#endif +#if GPU_CHILD_MODULE(VOLT) + GPU_CHILD_SINGLE_INST( OBJVOLT, GPU_GET_VOLT, 1, NV_FALSE, pVolt ) +#endif +#if GPU_CHILD_MODULE(I2C) + GPU_CHILD_SINGLE_INST( I2c, GPU_GET_I2C, 1, NV_FALSE, pI2c ) +#endif +#if GPU_CHILD_MODULE(SPI) + GPU_CHILD_SINGLE_INST( Spi, GPU_GET_SPI, 1, NV_FALSE, pSpi ) +#endif +#if GPU_CHILD_MODULE(KERNEL_RC) + GPU_CHILD_SINGLE_INST( KernelRc, GPU_GET_KERNEL_RC, 1, NV_FALSE, pKernelRc ) +#endif +#if GPU_CHILD_MODULE(RC) + GPU_CHILD_SINGLE_INST( OBJRC, GPU_GET_RC, 1, NV_FALSE, pRC ) +#endif +#if GPU_CHILD_MODULE(STEREO) + GPU_CHILD_SINGLE_INST( Stereo, GPU_GET_STEREO, 1, NV_FALSE, pStereo ) +#endif +#if GPU_CHILD_MODULE(INTR) + GPU_CHILD_SINGLE_INST( Intr, GPU_GET_INTR, 1, NV_FALSE, pIntr ) +#endif +#if GPU_CHILD_MODULE(DPAUX) + GPU_CHILD_SINGLE_INST( OBJDPAUX, GPU_GET_DPAUX, 1, NV_FALSE, pDpAux ) +#endif +#if GPU_CHILD_MODULE(PMU) + GPU_CHILD_SINGLE_INST( Pmu, GPU_GET_PMU, 1, NV_FALSE, pPmu ) +#endif +#if GPU_CHILD_MODULE(KERNEL_PMU) + GPU_CHILD_SINGLE_INST( KernelPmu, GPU_GET_KERNEL_PMU, 1, NV_FALSE, pKernelPmu ) +#endif +#if GPU_CHILD_MODULE(CE) + GPU_CHILD_MULTI_INST ( OBJCE, GPU_GET_CE, GPU_MAX_CES, NV_FALSE, pCe ) +#endif +#if GPU_CHILD_MODULE(KERNEL_CE) + GPU_CHILD_MULTI_INST ( KernelCE, GPU_GET_KCE, GPU_MAX_CES, NV_FALSE, pKCe ) +#endif +#if GPU_CHILD_MODULE(MSENC) + GPU_CHILD_MULTI_INST ( OBJMSENC, GPU_GET_MSENC, GPU_MAX_MSENCS, NV_FALSE, pMsenc ) +#endif +#if GPU_CHILD_MODULE(HDA) + GPU_CHILD_SINGLE_INST( OBJHDA, GPU_GET_HDA, 1, NV_FALSE, pHda ) +#endif +#if GPU_CHILD_MODULE(HDACODEC) + GPU_CHILD_SINGLE_INST( OBJHDACODEC, GPU_GET_HDACODEC, 1, NV_FALSE, pHdacodec ) +#endif +#if GPU_CHILD_MODULE(GCX) + GPU_CHILD_SINGLE_INST( GCX, GPU_GET_GCX, 1, NV_FALSE, pGcx ) +#endif +#if GPU_CHILD_MODULE(LPWR) + GPU_CHILD_SINGLE_INST( Lpwr, GPU_GET_LPWR, 1, NV_FALSE, pLpwr ) +#endif +#if GPU_CHILD_MODULE(KERNEL_FIFO) + GPU_CHILD_SINGLE_INST( KernelFifo, GPU_GET_KERNEL_FIFO_UC, 1, NV_FALSE, pKernelFifo ) +#endif +#if GPU_CHILD_MODULE(FIFO) + GPU_CHILD_SINGLE_INST( OBJFIFO, GPU_GET_FIFO_UC, 1, NV_FALSE, pFifo ) +#endif +#if GPU_CHILD_MODULE(INFOROM) + GPU_CHILD_SINGLE_INST( OBJINFOROM, GPU_GET_INFOROM, 1, NV_FALSE, pInforom ) +#endif +#if GPU_CHILD_MODULE(PMGR) + GPU_CHILD_SINGLE_INST( Pmgr, GPU_GET_PMGR, 1, NV_FALSE, pPmgr ) +#endif +#if GPU_CHILD_MODULE(UVM) + GPU_CHILD_SINGLE_INST( OBJUVM, GPU_GET_UVM, 1, NV_FALSE, pUvm ) +#endif +#if GPU_CHILD_MODULE(NV_DEBUG_DUMP) + GPU_CHILD_SINGLE_INST( NvDebugDump, GPU_GET_NVD, 1, NV_FALSE, pNvd ) +#endif +#if GPU_CHILD_MODULE(GRDBG) + GPU_CHILD_SINGLE_INST( SMDebugger, GPU_GET_GRDBG, 1, NV_FALSE, pGrdbg ) +#endif +#if GPU_CHILD_MODULE(SEC2) + GPU_CHILD_SINGLE_INST( OBJSEC2, GPU_GET_SEC2, 1, NV_FALSE, pSec2 ) +#endif +#if GPU_CHILD_MODULE(LSFM) + GPU_CHILD_SINGLE_INST( OBJLSFM, GPU_GET_LSFM, 1, NV_FALSE, pLsfm ) +#endif +#if GPU_CHILD_MODULE(ACR) + GPU_CHILD_SINGLE_INST( OBJACR, GPU_GET_ACR, 1, NV_FALSE, pAcr ) +#endif +#if GPU_CHILD_MODULE(KERNEL_NVLINK) + GPU_CHILD_SINGLE_INST( KernelNvlink, GPU_GET_KERNEL_NVLINK, 1, NV_FALSE, pKernelNvlink ) +#endif +#if GPU_CHILD_MODULE(NVLINK) + GPU_CHILD_SINGLE_INST( Nvlink, GPU_GET_NVLINK, 1, NV_FALSE, pNvLink ) +#endif +#if GPU_CHILD_MODULE(GPULOG) + GPU_CHILD_SINGLE_INST( OBJGPULOG, GPU_GET_GPULOG, 1, NV_FALSE, pGpuLog ) +#endif +#if GPU_CHILD_MODULE(GPUMON) + GPU_CHILD_SINGLE_INST( OBJGPUMON, GPU_GET_GPUMON, 1, NV_FALSE, pGpuMon ) +#endif +#if GPU_CHILD_MODULE(HWPM) + GPU_CHILD_SINGLE_INST( OBJHWPM, GPU_GET_HWPM, 1, NV_FALSE, pHwpm ) +#endif +#if GPU_CHILD_MODULE(KERNEL_HWPM) + GPU_CHILD_SINGLE_INST( KernelHwpm, GPU_GET_KERNEL_HWPM, 1, NV_FALSE, pKernelHwpm ) +#endif +#if GPU_CHILD_MODULE(GRIDDISPLAYLESS) + GPU_CHILD_SINGLE_INST( OBJGRIDDISPLAYLESS, GPU_GET_GRIDDISPLAYLESS, 1, NV_FALSE, pGridDisplayless ) +#endif +#if GPU_CHILD_MODULE(SWENG) + GPU_CHILD_SINGLE_INST( OBJSWENG, GPU_GET_SWENG, 1, NV_FALSE, pSwEng ) +#endif +#if GPU_CHILD_MODULE(VMMU) + GPU_CHILD_SINGLE_INST( OBJVMMU, GPU_GET_VMMU, 1, NV_FALSE, pVmmu ) +#endif +#if GPU_CHILD_MODULE(NVJPG) + GPU_CHILD_MULTI_INST( OBJNVJPG, GPU_GET_NVJPG, GPU_MAX_NVJPGS, NV_FALSE, pNvjpg ) +#endif +#if GPU_CHILD_MODULE(KERNEL_FSP) + GPU_CHILD_SINGLE_INST( KernelFsp, GPU_GET_KERNEL_FSP, 1, NV_TRUE, pKernelFsp ) +#endif + +#if GPU_CHILD_MODULE(GSP) + GPU_CHILD_SINGLE_INST( Gsp, GPU_GET_GSP, 1, NV_FALSE, pGsp ) +#endif +#if GPU_CHILD_MODULE(OFA) + GPU_CHILD_MULTI_INST( OBJOFA, GPU_GET_OFA, GPU_MAX_OFAS, NV_FALSE, pOfa ) +#endif + +// Undefine the entry macros to simplify call sites +#undef GPU_CHILD +#undef GPU_CHILD_SINGLE_INST +#undef GPU_CHILD_MULTI_INST +#undef GPU_CHILD_MODULE +#undef GPU_CHILD_LIST_DISABLED_ONLY diff --git a/src/nvidia/inc/kernel/gpu/gpu_device_mapping.h b/src/nvidia/inc/kernel/gpu/gpu_device_mapping.h new file mode 100644 index 0000000..2bdbf85 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu_device_mapping.h @@ -0,0 +1,68 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _GPU_DEVICE_MAPPING_H_ +#define _GPU_DEVICE_MAPPING_H_ + + +// Defines the enum type DEVICE_INDEX used for identifying the device type being accessed +typedef enum +{ + DEVICE_INDEX_GPU = 0, + DEVICE_INDEX_HOST1X, + DEVICE_INDEX_DISPLAY, + DEVICE_INDEX_DPAUX, + DEVICE_INDEX_MC, + DEVICE_INDEX_CLKRST, + DEVICE_INDEX_MSS_NVLINK, + DEVICE_INDEX_HDACODEC, + DEVICE_INDEX_EMC, + DEVICE_INDEX_FUSE, + DEVICE_INDEX_KFUSE, + DEVICE_INDEX_MIPICAL, + DEVICE_INDEX_HFRP, + DEVICE_INDEX_MAX //Should always be the last entry +} DEVICE_INDEX; + +typedef enum +{ + SOC_DEV_MAPPING_DISP = 0, + SOC_DEV_MAPPING_DPAUX0, + SOC_DEV_MAPPING_DPAUX1, + SOC_DEV_MAPPING_DPAUX2, + SOC_DEV_MAPPING_DPAUX3, // Update NV_MAX_SOC_DPAUX_NUM_DEVICES if adding new DPAUX mappings + SOC_DEV_MAPPING_HDACODEC, + SOC_DEV_MAPPING_MIPICAL, + SOC_DEV_MAPPING_HFRP0, + SOC_DEV_MAPPING_HFRP1, + SOC_DEV_MAPPING_MAX +} SOC_DEV_MAPPING; + +#define GPU_MAX_DEVICE_MAPPINGS (60) + +typedef struct +{ + DEVICE_INDEX deviceIndex; // DEVICE_INDEX_* + NvU32 devId; // NV_DEVID_* +} DEVICE_ID_MAPPING; + +#endif // _GPU_DEVICE_MAPPING_H_ diff --git a/src/nvidia/inc/kernel/gpu/gpu_ecc.h b/src/nvidia/inc/kernel/gpu/gpu_ecc.h new file mode 100644 index 0000000..b1539a0 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu_ecc.h @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _GPU_ECC_H_ +#define _GPU_ECC_H_ + +typedef struct ECC_INFO +{ + NvU32 address; + NvU32 addressExt; + NvU32 locationId; // Holds GPC#, Partition#, etc. depending on unit + NvU32 subLocationId; // Holds TPC#, sub-partition#, slice#, etc. depending on unit + NvU64 corTotCnt; + NvU64 corUniCnt; + NvU64 uncTotCnt; + NvU64 uncUniCnt; + NvBool bCorError; + NvBool bCorTotOverflow; + NvBool bCorUniOverflow; + NvBool bUncError; + NvBool bUncTotOverflow; + NvBool bUncUniOverflow; + NvBool bPermanentCorError; +} ECC_INFO; + +typedef struct ECC_COUNTERS +{ + NvU64 corCntTotCached; + NvU64 corCntTotVolatile; + NvU64 corCntUniCached; + NvU64 corCntUniVolatile; + NvU64 uncCntTotCached; + NvU64 uncCntUniCached; + NvU32 corTotOverflowCount; + NvU32 corUniOverflowCount; + NvU32 uncTotOverflowCount; + NvU32 uncUniOverflowCount; +} ECC_COUNTERS; + +#endif // _GPU_ECC_H diff --git a/src/nvidia/inc/kernel/gpu/gpu_engine_type.h b/src/nvidia/inc/kernel/gpu/gpu_engine_type.h new file mode 100644 index 0000000..1e172bc --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu_engine_type.h @@ -0,0 +1,181 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _GPU_ENGINE_TYPE_H_ +#define _GPU_ENGINE_TYPE_H_ + +#include "class/cl2080.h" +#include "nvrangetypes.h" +#include "utils/nvbitvector.h" +#include "rmconfig.h" + +typedef enum +{ + RM_ENGINE_TYPE_NULL = (0x00000000), + RM_ENGINE_TYPE_GR0 = (0x00000001), + RM_ENGINE_TYPE_GR1 = (0x00000002), + RM_ENGINE_TYPE_GR2 = (0x00000003), + RM_ENGINE_TYPE_GR3 = (0x00000004), + RM_ENGINE_TYPE_GR4 = (0x00000005), + RM_ENGINE_TYPE_GR5 = (0x00000006), + RM_ENGINE_TYPE_GR6 = (0x00000007), + RM_ENGINE_TYPE_GR7 = (0x00000008), + RM_ENGINE_TYPE_COPY0 = (0x00000009), + RM_ENGINE_TYPE_COPY1 = (0x0000000a), + RM_ENGINE_TYPE_COPY2 = (0x0000000b), + RM_ENGINE_TYPE_COPY3 = (0x0000000c), + RM_ENGINE_TYPE_COPY4 = (0x0000000d), + RM_ENGINE_TYPE_COPY5 = (0x0000000e), + RM_ENGINE_TYPE_COPY6 = (0x0000000f), + RM_ENGINE_TYPE_COPY7 = (0x00000010), + RM_ENGINE_TYPE_COPY8 = (0x00000011), + RM_ENGINE_TYPE_COPY9 = (0x00000012), + RM_ENGINE_TYPE_COPY10 = (0x00000013), + RM_ENGINE_TYPE_COPY11 = (0x00000014), + RM_ENGINE_TYPE_COPY12 = (0x00000015), + RM_ENGINE_TYPE_COPY13 = (0x00000016), + RM_ENGINE_TYPE_COPY14 = (0x00000017), + RM_ENGINE_TYPE_COPY15 = (0x00000018), + RM_ENGINE_TYPE_COPY16 = (0x00000019), + RM_ENGINE_TYPE_COPY17 = (0x0000001a), + RM_ENGINE_TYPE_COPY18 = (0x0000001b), + RM_ENGINE_TYPE_COPY19 = (0x0000001c), + RM_ENGINE_TYPE_NVDEC0 = (0x0000001d), + RM_ENGINE_TYPE_NVDEC1 = (0x0000001e), + RM_ENGINE_TYPE_NVDEC2 = (0x0000001f), + RM_ENGINE_TYPE_NVDEC3 = (0x00000020), + RM_ENGINE_TYPE_NVDEC4 = (0x00000021), + RM_ENGINE_TYPE_NVDEC5 = (0x00000022), + RM_ENGINE_TYPE_NVDEC6 = (0x00000023), + RM_ENGINE_TYPE_NVDEC7 = (0x00000024), + RM_ENGINE_TYPE_NVENC0 = (0x00000025), + RM_ENGINE_TYPE_NVENC1 = (0x00000026), + RM_ENGINE_TYPE_NVENC2 = (0x00000027), + RM_ENGINE_TYPE_NVENC3 = (0x00000028), + RM_ENGINE_TYPE_VP = (0x00000029), + RM_ENGINE_TYPE_ME = (0x0000002a), + RM_ENGINE_TYPE_PPP = (0x0000002b), + RM_ENGINE_TYPE_MPEG = (0x0000002c), + RM_ENGINE_TYPE_SW = (0x0000002d), + RM_ENGINE_TYPE_TSEC = (0x0000002e), + RM_ENGINE_TYPE_VIC = (0x0000002f), + RM_ENGINE_TYPE_MP = (0x00000030), + RM_ENGINE_TYPE_SEC2 = (0x00000031), + RM_ENGINE_TYPE_HOST = (0x00000032), + RM_ENGINE_TYPE_DPU = (0x00000033), + RM_ENGINE_TYPE_PMU = (0x00000034), + RM_ENGINE_TYPE_FBFLCN = (0x00000035), + RM_ENGINE_TYPE_NVJPEG0 = (0x00000036), + RM_ENGINE_TYPE_NVJPEG1 = (0x00000037), + RM_ENGINE_TYPE_NVJPEG2 = (0x00000038), + RM_ENGINE_TYPE_NVJPEG3 = (0x00000039), + RM_ENGINE_TYPE_NVJPEG4 = (0x0000003a), + RM_ENGINE_TYPE_NVJPEG5 = (0x0000003b), + RM_ENGINE_TYPE_NVJPEG6 = (0x0000003c), + RM_ENGINE_TYPE_NVJPEG7 = (0x0000003d), + RM_ENGINE_TYPE_OFA0 = (0x0000003e), + RM_ENGINE_TYPE_OFA1 = (0x0000003f), + RM_ENGINE_TYPE_RESERVED40 = (0x00000040), + RM_ENGINE_TYPE_RESERVED41 = (0x00000041), + RM_ENGINE_TYPE_RESERVED42 = (0x00000042), + RM_ENGINE_TYPE_RESERVED43 = (0x00000043), + RM_ENGINE_TYPE_RESERVED44 = (0x00000044), + RM_ENGINE_TYPE_RESERVED45 = (0x00000045), + RM_ENGINE_TYPE_RESERVED46 = (0x00000046), + RM_ENGINE_TYPE_RESERVED47 = (0x00000047), + RM_ENGINE_TYPE_RESERVED48 = (0x00000048), + RM_ENGINE_TYPE_RESERVED49 = (0x00000049), + RM_ENGINE_TYPE_RESERVED4a = (0x0000004a), + RM_ENGINE_TYPE_RESERVED4b = (0x0000004b), + RM_ENGINE_TYPE_RESERVED4c = (0x0000004c), + RM_ENGINE_TYPE_RESERVED4d = (0x0000004d), + RM_ENGINE_TYPE_RESERVED4e = (0x0000004e), + RM_ENGINE_TYPE_RESERVED4f = (0x0000004f), + RM_ENGINE_TYPE_RESERVED50 = (0x00000050), + RM_ENGINE_TYPE_RESERVED51 = (0x00000051), + RM_ENGINE_TYPE_RESERVED52 = (0x00000052), + RM_ENGINE_TYPE_RESERVED53 = (0x00000053), + RM_ENGINE_TYPE_LAST = (0x00000054), +} RM_ENGINE_TYPE; + +// +// The duplicates in the RM_ENGINE_TYPE. Using define instead of putting them +// in the enum to make sure that each item in the enum has a unique number. +// +#define RM_ENGINE_TYPE_GRAPHICS RM_ENGINE_TYPE_GR0 +#define RM_ENGINE_TYPE_BSP RM_ENGINE_TYPE_NVDEC0 +#define RM_ENGINE_TYPE_MSENC RM_ENGINE_TYPE_NVENC0 +#define RM_ENGINE_TYPE_CIPHER RM_ENGINE_TYPE_TSEC +#define RM_ENGINE_TYPE_NVJPG RM_ENGINE_TYPE_NVJPEG0 + +#define RM_ENGINE_TYPE_COPY_SIZE 20 +#define RM_ENGINE_TYPE_NVENC_SIZE 4 +#define RM_ENGINE_TYPE_NVJPEG_SIZE 8 +#define RM_ENGINE_TYPE_NVDEC_SIZE 8 +#define RM_ENGINE_TYPE_OFA_SIZE 2 +#define RM_ENGINE_TYPE_GR_SIZE 8 + +// Indexed engines +#define RM_ENGINE_TYPE_COPY(i) (RM_ENGINE_TYPE_COPY0+(i)) +#define RM_ENGINE_TYPE_IS_COPY(i) (((i) >= RM_ENGINE_TYPE_COPY0) && ((i) < RM_ENGINE_TYPE_COPY(RM_ENGINE_TYPE_COPY_SIZE))) +#define RM_ENGINE_TYPE_COPY_IDX(i) ((i) - RM_ENGINE_TYPE_COPY0) + +#define RM_ENGINE_TYPE_NVENC(i) (RM_ENGINE_TYPE_NVENC0+(i)) +#define RM_ENGINE_TYPE_IS_NVENC(i) (((i) >= RM_ENGINE_TYPE_NVENC0) && ((i) < RM_ENGINE_TYPE_NVENC(RM_ENGINE_TYPE_NVENC_SIZE))) +#define RM_ENGINE_TYPE_NVENC_IDX(i) ((i) - RM_ENGINE_TYPE_NVENC0) + +#define RM_ENGINE_TYPE_NVDEC(i) (RM_ENGINE_TYPE_NVDEC0+(i)) +#define RM_ENGINE_TYPE_IS_NVDEC(i) (((i) >= RM_ENGINE_TYPE_NVDEC0) && ((i) < RM_ENGINE_TYPE_NVDEC(RM_ENGINE_TYPE_NVDEC_SIZE))) +#define RM_ENGINE_TYPE_NVDEC_IDX(i) ((i) - RM_ENGINE_TYPE_NVDEC0) + +#define RM_ENGINE_TYPE_NVJPEG(i) (RM_ENGINE_TYPE_NVJPEG0+(i)) +#define RM_ENGINE_TYPE_IS_NVJPEG(i) (((i) >= RM_ENGINE_TYPE_NVJPEG0) && ((i) < RM_ENGINE_TYPE_NVJPEG(RM_ENGINE_TYPE_NVJPEG_SIZE))) +#define RM_ENGINE_TYPE_NVJPEG_IDX(i) ((i) - RM_ENGINE_TYPE_NVJPEG0) + +#define RM_ENGINE_TYPE_OFA(i) (RM_ENGINE_TYPE_OFA0+(i)) +#define RM_ENGINE_TYPE_IS_OFA(i) (((i) >= RM_ENGINE_TYPE_OFA0) && ((i) < RM_ENGINE_TYPE_OFA(RM_ENGINE_TYPE_OFA_SIZE))) +#define RM_ENGINE_TYPE_OFA_IDX(i) ((i) - RM_ENGINE_TYPE_OFA0) + +#define RM_ENGINE_TYPE_IS_VIDEO(i) (RM_ENGINE_TYPE_IS_NVENC(i) | \ + RM_ENGINE_TYPE_IS_NVDEC(i) | \ + RM_ENGINE_TYPE_IS_NVJPEG(i) | \ + RM_ENGINE_TYPE_IS_OFA(i)) + +#define RM_ENGINE_TYPE_GR(i) (RM_ENGINE_TYPE_GR0 + (i)) +#define RM_ENGINE_TYPE_IS_GR(i) (((i) >= RM_ENGINE_TYPE_GR0) && ((i) < RM_ENGINE_TYPE_GR(RM_ENGINE_TYPE_GR_SIZE))) +#define RM_ENGINE_TYPE_GR_IDX(i) ((i) - RM_ENGINE_TYPE_GR0) + +#define RM_ENGINE_TYPE_IS_VALID(i) (((i) > (RM_ENGINE_TYPE_NULL)) && ((i) < (RM_ENGINE_TYPE_LAST))) + +// Engine Range defines +#define RM_ENGINE_RANGE_GR() rangeMake(RM_ENGINE_TYPE_GR(0), RM_ENGINE_TYPE_GR(RM_ENGINE_TYPE_GR_SIZE - 1)) +#define RM_ENGINE_RANGE_COPY() rangeMake(RM_ENGINE_TYPE_COPY(0), RM_ENGINE_TYPE_COPY(RM_ENGINE_TYPE_COPY_SIZE - 1)) +#define RM_ENGINE_RANGE_NVDEC() rangeMake(RM_ENGINE_TYPE_NVDEC(0), RM_ENGINE_TYPE_NVDEC(RM_ENGINE_TYPE_NVDEC_SIZE - 1)) +#define RM_ENGINE_RANGE_NVENC() rangeMake(RM_ENGINE_TYPE_NVENC(0), RM_ENGINE_TYPE_NVENC(RM_ENGINE_TYPE_NVENC_SIZE - 1)) +#define RM_ENGINE_RANGE_NVJPEG() rangeMake(RM_ENGINE_TYPE_NVJPEG(0), RM_ENGINE_TYPE_NVJPEG(RM_ENGINE_TYPE_NVJPEG_SIZE - 1)) +#define RM_ENGINE_RANGE_OFA() rangeMake(RM_ENGINE_TYPE_OFA(0), RM_ENGINE_TYPE_OFA(RM_ENGINE_TYPE_OFA_SIZE - 1)) + +// Bit Vectors +MAKE_BITVECTOR(ENGTYPE_BIT_VECTOR, RM_ENGINE_TYPE_LAST); + +#endif //_GPU_ENGINE_TYPE_H_ diff --git a/src/nvidia/inc/kernel/gpu/gpu_halspec.h b/src/nvidia/inc/kernel/gpu/gpu_halspec.h new file mode 100644 index 0000000..3287743 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu_halspec.h @@ -0,0 +1,3 @@ + +#include "g_gpu_halspec_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/gpu_resource.h b/src/nvidia/inc/kernel/gpu/gpu_resource.h new file mode 100644 index 0000000..4f25bcb --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu_resource.h @@ -0,0 +1,3 @@ + +#include "g_gpu_resource_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/gpu_resource_desc.h b/src/nvidia/inc/kernel/gpu/gpu_resource_desc.h new file mode 100644 index 0000000..9d8df25 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu_resource_desc.h @@ -0,0 +1,37 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _GPU_RESOURCE_DESC_H_ +#define _GPU_RESOURCE_DESC_H_ + +#include "gpu/eng_desc.h" + +typedef struct GPU_RESOURCE_DESC +{ + NvU32 externalClassId; + ENGDESCRIPTOR engDesc; +} GPU_RESOURCE_DESC; + +// CLASSDESCRIPTOR is deprecated, please use GPU_RESOURCE_DESC +typedef struct GPU_RESOURCE_DESC CLASSDESCRIPTOR; + +#endif // _GPU_RESOURCE_DESC_H_ diff --git a/src/nvidia/inc/kernel/gpu/gpu_shared_data_map.h b/src/nvidia/inc/kernel/gpu/gpu_shared_data_map.h new file mode 100644 index 0000000..9b14027 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu_shared_data_map.h @@ -0,0 +1,85 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef GPU_SHARED_DATA_MAP_H +#define GPU_SHARED_DATA_MAP_H + +#include "core/core.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "class/cl00de.h" + +#include "gpu/timer/tmr.h" + +// **************************************************************************** +// Type definitions +// **************************************************************************** +typedef struct RusdQueryCache RUSD_QUERY_CACHE; + +typedef struct GpuSharedDataMap { + MEMORY_DESCRIPTOR *pMemDesc; + NvP64 pMapBuffer; + NvU64 lastPolledDataMask; + NvU32 processId; + NvU32 pollingRegistryOverride; + NvU32 pollingFrequencyMs; + NvBool bPollFrequencyOverridden; + + TMR_EVENT *pRusdRefreshTmrEvent; + + NV00DE_SHARED_DATA data; + + // Private data to assist metrics query + RUSD_QUERY_CACHE *pRusdQueryCache; + NvU8 curGroup; + NvBool bWorkItemPending; +} GpuSharedDataMap; + +/** + * Start data write, updates seq to indicate write in progress and returns data struct to write into + * + * After updating data in the returned NV00DE_SHARED_DATA struct, + * call gpushareddataWriteFinish to mark data as valid. + */ +NV00DE_SHARED_DATA * gpushareddataWriteStart_INTERNAL(OBJGPU *pGpu, NvU64 offset); + +#define gpushareddataWriteStart(pGpu, field) \ + &(gpushareddataWriteStart_INTERNAL(pGpu, NV_OFFSETOF(NV00DE_SHARED_DATA, field))->field) + +/** + * Finish data write, updates seq to indicate write is finished and data is valid. + */ +void gpushareddataWriteFinish_INTERNAL(OBJGPU *pGpu, NvU64 offset); + +#define gpushareddataWriteFinish(pGpu, field) \ + gpushareddataWriteFinish_INTERNAL(pGpu, NV_OFFSETOF(NV00DE_SHARED_DATA, field)) + +/*! + * @brief RUSD settings data + */ +typedef struct +{ + NvU64 permanentPolledDataMask; +} GPU_DB_RUSD_SETTINGS; + +#endif /* GPU_SHARED_DATA_MAP_H */ + diff --git a/src/nvidia/inc/kernel/gpu/gpu_timeout.h b/src/nvidia/inc/kernel/gpu/gpu_timeout.h new file mode 100644 index 0000000..4e94479 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu_timeout.h @@ -0,0 +1,155 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _GPU_TIMEOUT_H_ +#define _GPU_TIMEOUT_H_ + +/* ------------------------ Includes ---------------------------------------- */ +#include "core/core.h" + + +/* ------------------------ Forward Definitions ----------------------------- */ +struct OBJGPU; + +/* ------------------------ Macros ------------------------------------------ */ +/*! + * @note GPU_TIMEOUT_DEFAULT is different per platform and can range anywhere + * from 2 to 30 secs depending on the GPU Mode and Platform. + * By default if GPU_TIMEOUT_DEFAULT is specified, we use the ThreadState + * unless explicitly told not to via GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE. + */ +#define GPU_TIMEOUT_DEFAULT 0 + +/*! + * gpuSetTimeout Flags - saved in pTimeout->flags + */ +#define GPU_TIMEOUT_FLAGS_DEFAULT NVBIT(0) //!< default timeout mechanism as set by platform +#define GPU_TIMEOUT_FLAGS_USE_THREAD_STATE NVBIT(1) //!< default timeout time used - use the ThreadState +#define GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE NVBIT(2) //!< even if default time was used - skip the ThreadState +#define GPU_TIMEOUT_FLAGS_OSTIMER NVBIT(3) //!< osGetSystemTime() +#define GPU_TIMEOUT_FLAGS_OSDELAY NVBIT(4) //!< osDelay() +#define GPU_TIMEOUT_FLAGS_TMR NVBIT(5) //!< tmrGetCurrentTime() +#define GPU_TIMEOUT_FLAGS_BYPASS_JOURNAL_LOG NVBIT(6) //!< bypass timeout logging in the RM journal +#define GPU_TIMEOUT_FLAGS_TMRDELAY NVBIT(7) //!< tmrDelay() +#define GPU_TIMEOUT_FLAGS_BYPASS_CPU_YIELD NVBIT(8) //!< don't explicitly let other threads run first +/*! + * gpuCheckTimeout Flags set in pTimeout->flags upon NV_ERR_TIMEOUT + */ +#define GPU_TIMEOUT_FLAGS_STATUS_LOCAL_TIMEOUT NVBIT(30) +#define GPU_TIMEOUT_FLAGS_STATUS_THREAD_STATE_TIMEOUT NVBIT(31) + +/* ------------------------ Datatypes --------------------------------------- */ +/*! + * Timeout support. + */ +typedef struct +{ + NvU64 timeout; + NvU32 flags; + OBJGPU *pTmrGpu; //!< The GPU whose timer is used in SLI mode + // Defined only if flags is set to _TMR or _TMRDELAY +} RMTIMEOUT, +*PRMTIMEOUT; + +/*! + * @brief GPU timeout related data. + */ +typedef struct +{ + volatile NvBool bDefaultOverridden; + volatile NvBool bScaled; + volatile NvU32 defaultus; //!< Default timeout in us + volatile NvU32 defaultResetus; //!< Default timeout reset value in us + // + // Default timeout reset value in Us for the reset FSM state transitions + // between ASSERT -> ASSERTED and DEASSERT -> DEASSERTED. + // This is for presilicon test only. + // + volatile NvBool bDefaultResetFSMStateTransitionOverridden; + volatile NvU32 defaultResetFSMStateTransitionUs; + NvU32 defaultFlags; //!< Default timeout mode + NvU32 scale; //!< Emulation/Simulation multiplier + OBJGPU *pGpu; +} TIMEOUT_DATA; + +/*! + * @brief A prototype of the condition evaluation function required by the + * @ref gpuTimeoutCondWait_IMPL interface. + * + * @note Function is responsible for evaluation of the encapsulated condition + * as well as for triggering of required prerequisites (if any). + * For example if condition depends on a PMU issued message function + * should assure proper servicing of the PMU interrupts. + * + * @param[in] pGpu OBJGPU pointer for this conditional function + * @param[in] pVoid + * Void parameter pointer which can be used to pass in the + * pCondData from @ref gpuTimeoutCondWait_IMPL(). + * + * @return NV_TRUE + * Waited condition has happened and @ref + * gpuTimeoutCondWait_IMPL() may return to caller. + * @return NV_FALSE + * Waited condition has not happened and @ref + * gpuTimeoutCondWait_IMPL() should continue to wait until this + * interface returns NV_TRUE or timeout occurs (whichever occurs + * first). + */ +typedef NvBool GpuWaitConditionFunc(OBJGPU *pGpu, void *pVoid); + +/* ------------------------ Function Prototypes ----------------------------- */ + +void timeoutInitializeGpuDefault(TIMEOUT_DATA *pTD, OBJGPU *pGpu); + +void timeoutRegistryOverride(TIMEOUT_DATA *pTD, OBJGPU *pGpu); + +void timeoutOverride(TIMEOUT_DATA *pTD, NvBool bOverride, NvU32 timeoutMs); + +/*! Initialize the RMTIMEOUT structure with the selected timeout scheme. */ +void timeoutSet(TIMEOUT_DATA *, RMTIMEOUT *, NvU32 timeoutUs, NvU32 flags); + +/*! Check if the passed in RMTIMEOUT struct has expired. */ +NV_STATUS timeoutCheck(TIMEOUT_DATA *, RMTIMEOUT *, NvU32); + +/*! Wait for the condition to become satisfied while checking for the timeout */ +NV_STATUS timeoutCondWait(TIMEOUT_DATA *, RMTIMEOUT *, GpuWaitConditionFunc *, void *pCondData, NvU32); + +/*! Scales timeout values depending on the environment we are running in. */ +static NV_INLINE NvU32 timeoutApplyScale(TIMEOUT_DATA *pTD, NvU32 timeout) +{ + return timeout * pTD->scale; +} + + +// Deprecated macros +#define gpuSetTimeout(g,a,t,c) timeoutSet(&(g)->timeoutData, t, a, c) +#define gpuCheckTimeout(g,t) timeoutCheck(&(g)->timeoutData, t, __LINE__) +#define gpuScaleTimeout(g,a) timeoutApplyScale(&(g)->timeoutData, a) +#define gpuTimeoutCondWait(g,a,b,t) timeoutCondWait(&(g)->timeoutData, t, a, b, __LINE__) + +// +// In SCSIM simulation platform, both CPU and GPU are simulated and the reg write/read itself +// takes more time. This helper macro handles it with increased timeout value. +// +#define GPU_ENG_RESET_TIMEOUT_VALUE(g, t) ((gpuIsSelfHosted(g) && IS_SIMULATION(g)) ? 1000 : (t)) + +#endif // _GPU_TIMEOUT_H_ diff --git a/src/nvidia/inc/kernel/gpu/gpu_user_shared_data.h b/src/nvidia/inc/kernel/gpu/gpu_user_shared_data.h new file mode 100644 index 0000000..c4e4763 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu_user_shared_data.h @@ -0,0 +1,3 @@ + +#include "g_gpu_user_shared_data_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/gpu_uuid.h b/src/nvidia/inc/kernel/gpu/gpu_uuid.h new file mode 100644 index 0000000..6886b81 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu_uuid.h @@ -0,0 +1,54 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _GPUUUID_H_ +#define _GPUUUID_H_ + +#include "core/core.h" +#include "nvCpuUuid.h" + +// +// GPU unique ID sizes. RM_SHA1_GID_SIZE uses the first 16 bytes of +// the SHA-1 digest (this is consistent with the way canonical UUIDs are +// constructed) +// +#define RM_SHA1_GID_SIZE 16 + +// UUID conversion routine: +NV_STATUS transformGidToUserFriendlyString(const NvU8 *pGidData, NvU32 gidSize, NvU8 **ppGidString, + NvU32 *pGidStrlen, NvU32 gidFlags, NvU8 prefix); + +NV_STATUS nvGenerateGpuUuid(NvU16 chipId, NvU64 pdi, NvUuid *pUuid); + +NV_STATUS nvGenerateSmcUuid(NvU16 chipId, NvU64 pdi, + NvU32 swizzId, NvU32 syspipeId, NvUuid *pUuid); + +// 'G' 'P' 'U' '-'(x5), '\0x0', extra = 9 +#define NV_UUID_STR_LEN ((NV_UUID_LEN << 1) + 9) + +#define RM_UUID_PREFIX_GPU 0U +#define RM_UUID_PREFIX_MIG 1U +#define RM_UUID_PREFIX_DLA 2U + +void nvGetUuidString(const NvUuid *pUuid, NvU8 prefix, char *pUuidStr); + +#endif // _GPUUUID_H_ diff --git a/src/nvidia/inc/kernel/gpu/gsp/gsp_trace_rats_macro.h b/src/nvidia/inc/kernel/gpu/gsp/gsp_trace_rats_macro.h new file mode 100644 index 0000000..8000ef2 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gsp/gsp_trace_rats_macro.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file defines macros to place tracepoints for RATS (RM All-around Trace + * System). The names of the functions and variables associated with this are + * temporary as we begin to unify all RM tracing tools under one system. + */ + +#ifndef GSP_TRACE_RATS_MACRO_H +#define GSP_TRACE_RATS_MACRO_H + +#include "core/core.h" + +#define GSP_TRACING_RATS_ENABLED 0 +#define GSP_TRACE_RATS_ADD_RECORD(recordIdentifier, pGpu, info) (void) 0 + +#define KERNEL_GSP_TRACING_RATS_ENABLED 0 + +#endif // GSP_TRACE_RATS_MACRO_H diff --git a/src/nvidia/inc/kernel/gpu/gsp/kernel_gsp_trace_rats.h b/src/nvidia/inc/kernel/gpu/gsp/kernel_gsp_trace_rats.h new file mode 100644 index 0000000..cd1dcea --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gsp/kernel_gsp_trace_rats.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef KERNEL_RATS_GSP_TRACE_H +#define KERNEL_RATS_GSP_TRACE_H + +#include "core/core.h" +#include "containers/multimap.h" +#include "class/cl90cdtrace.h" +#include "rmapi/event_buffer.h" +#include "rmapi/rmapi.h" + +typedef struct +{ + EventBuffer *pEventBuffer; + NvHandle hClient; + NvHandle hNotifier; + NvHandle hEventBuffer; + NvU64 pUserInfo; + NvU32 *message_buffer; + MEMORY_DESCRIPTOR *pMemDesc; +} NV_EVENT_BUFFER_BIND_POINT_GSP_TRACE; + +MAKE_MULTIMAP(GspTraceEventBufferBindMultiMap, NV_EVENT_BUFFER_BIND_POINT_GSP_TRACE); + +void gspTraceNotifyAllConsumers(OBJGPU *pGpu, void *pArgs); + +void gspTraceEventBufferLogRecord(OBJGPU *pGpu, NV_RATS_GSP_TRACE_RECORD *intrTraceRecord); + +void gspTraceServiceVgpuEventTracing(OBJGPU *pGpu); + +NV_STATUS gspTraceAddBindpoint(OBJGPU *pGpu, + RsClient *pClient, + RsResourceRef *pEventBufferRef, + NvHandle hNotifier, + NvU64 tracepointMask, + NvU32 gspLoggingBufferSize, + NvU32 gspLoggingBufferWatermark); + +void gspTraceRemoveBindpoint(OBJGPU *pGpu, NvU64 uid, NV_EVENT_BUFFER_BIND_POINT_GSP_TRACE *pBind); + +void gspTraceRemoveAllBindpoints(EventBuffer *pEventBuffer); + +#endif diff --git a/src/nvidia/inc/kernel/gpu/gsp/message_queue.h b/src/nvidia/inc/kernel/gpu/gsp/message_queue.h new file mode 100644 index 0000000..66c828c --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gsp/message_queue.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * GSP MESSAGE QUEUE + */ + +#ifndef _MESSAGE_QUEUE_H_ +#define _MESSAGE_QUEUE_H_ + +// Used for indexing into the MESSAGE_QUEUE_COLLECTION array. +#define RPC_TASK_RM_QUEUE_IDX 0 +#define RPC_QUEUE_COUNT 1 + +#endif // _MESSAGE_QUEUE_H_ diff --git a/src/nvidia/inc/kernel/gpu/hfrp/kern_hfrp_commands_responses.h b/src/nvidia/inc/kernel/gpu/hfrp/kern_hfrp_commands_responses.h new file mode 100644 index 0000000..a809902 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/hfrp/kern_hfrp_commands_responses.h @@ -0,0 +1,73 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + + +#ifndef _KERN_HFRP_COMMANDS_RESPONSES_H_ +#define _KERN_HFRP_COMMANDS_RESPONSES_H_ + +#include "nvtypes.h" + +// default parameter values +#define HFRP_DEFAULT_CLIENT_VERSION 0U +#define HFRP_DEFAULT_SERVER_VERSION 0U + + + + + +/*! + * CMD_SOC_SET_DEVICE_POWER_STATE + * + * This command sets device power state for Nvidia IPs. + * It is expected that HFRP will follow device power state handling sequence specific to each device. + * + * Command Params: + * deviceId + * Specifies the device ID whose power state needs to be changed. + * 0 - iGPU (This includes iGPU and Display) + * 1 - DLA + * 2 - HDA + * powerState + * 0 - D0 i.e. Power up + * 1 - D3 i.e. Power down + * + */ +#define HFRP_CMD_SOC_SET_DEVICE_POWER_STATE 303U + +#pragma pack(1) +typedef struct +{ + NvU8 deviceId; + NvU8 powerState; +} CMD_SOC_SET_DEVICE_POWER_STATE_PARAMS; +#pragma pack() + +#define NV_CMD_SOC_SET_DEVICE_POWER_STATE_PARAM_DEVICE_ID_IGPU 0U +#define NV_CMD_SOC_SET_DEVICE_POWER_STATE_PARAM_DEVICE_ID_DLA 1U +#define NV_CMD_SOC_SET_DEVICE_POWER_STATE_PARAM_DEVICE_ID_HDA 2U + +#define NV_CMD_SOC_SET_DEVICE_POWER_STATE_PARAM_POWER_STATE_D0 0U +#define NV_CMD_SOC_SET_DEVICE_POWER_STATE_PARAM_POWER_STATE_D3 1U + +#endif // _KERN_HFRP_COMMANDS_RESPONSES_H_ diff --git a/src/nvidia/inc/kernel/gpu/hfrp/kern_hfrp_common.h b/src/nvidia/inc/kernel/gpu/hfrp/kern_hfrp_common.h new file mode 100644 index 0000000..e7efc0d --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/hfrp/kern_hfrp_common.h @@ -0,0 +1,130 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _KERN_HFRP_COMMON_H_ +#define _KERN_HFRP_COMMON_H_ + +#include "nvtypes.h" + +typedef enum +{ + HFRP_COMMAND_MAILBOX_INDEX_PMU = 0, + HFRP_RESPONSE_MAILBOX_INDEX_PMU = 1, + HFRP_COMMAND_MAILBOX_INDEX_SHIM = 2, + HFRP_RESPONSE_MAILBOX_INDEX_SHIM = 2, + HFRP_COMMAND_MAILBOX_INDEX_DISPLAY = 3, + HFRP_RESPONSE_MAILBOX_INDEX_DISPLAY = 4 +} HFRP_MAILBOX_INDEX; + +// Size of range of registers for which the aperture is created +#define HFRP_MAILBOX_ACCESS_RANGE 0x200 + +// Mailbox Layout Address Offsets 1.0 version +// Mailbox Layout Address Offsets 1.0 version +#define HFRP_COMMAND_BUFFER_HEAD_PTR_ADDR_ONE_MAILBOX_INTERFACE 0x110 +#define HFRP_COMMAND_BUFFER_TAIL_PTR_ADDR_ONE_MAILBOX_INTERFACE 0x189 +#define HFRP_COMMAND_BUFFER_START_ADDR_ONE_MAILBOX_INTERFACE 0x114 +#define HFRP_COMMAND_BUFFER_END_ADDR_ONE_MAILBOX_INTERFACE 0x187 +#define HFRP_RESPONSE_BUFFER_HEAD_PTR_ADDR_ONE_MAILBOX_INTERFACE 0x188 +#define HFRP_RESPONSE_BUFFER_TAIL_PTR_ADDR_ONE_MAILBOX_INTERFACE 0x111 +#define HFRP_RESPONSE_BUFFER_START_ADDR_ONE_MAILBOX_INTERFACE 0x18C +#define HFRP_RESPONSE_BUFFER_END_ADDR_ONE_MAILBOX_INTERFACE 0x1FF + +#define HFRP_COMMAND_BUFFER_HEAD_PTR_ADDR_TWO_MAILBOX_INTERFACE 0x110 +#define HFRP_COMMAND_BUFFER_TAIL_PTR_ADDR_TWO_MAILBOX_INTERFACE (0x111 + HFRP_MAILBOX_ACCESS_RANGE) +#define HFRP_COMMAND_BUFFER_START_ADDR_TWO_MAILBOX_INTERFACE 0x114 +#define HFRP_COMMAND_BUFFER_END_ADDR_TWO_MAILBOX_INTERFACE 0x1FF +#define HFRP_RESPONSE_BUFFER_HEAD_PTR_ADDR_TWO_MAILBOX_INTERFACE (0x110 + HFRP_MAILBOX_ACCESS_RANGE) +#define HFRP_RESPONSE_BUFFER_TAIL_PTR_ADDR_TWO_MAILBOX_INTERFACE 0x111 +#define HFRP_RESPONSE_BUFFER_START_ADDR_TWO_MAILBOX_INTERFACE (0x114 + HFRP_MAILBOX_ACCESS_RANGE) +#define HFRP_RESPONSE_BUFFER_END_ADDR_TWO_MAILBOX_INTERFACE (0x1FF + HFRP_MAILBOX_ACCESS_RANGE) + +#define HFRP_IRQ_IN_SET_ADDR 0x100 +#define HFRP_IRQ_OUT_SET_ADDR 0x104 +#define HFRP_IRQ_IN_CLR_ADDR 0x108 +#define HFRP_IRQ_OUT_CLR_ADDR 0x10C + +// Size of message (command or response) header in bytes +#define HFRP_MESSAGE_HEADER_BYTE_SIZE 4U + +#define HFRP_MESSAGE_FIELD_SIZE 7U : 0U +#define HFRP_MESSAGE_FIELD_SEQUENCE_ID 17U : 8U +#define HFRP_MESSAGE_FIELD_INDEX_OR_STATUS 27U : 18U + +// +// Maximum values of Sequence Id index and Sequence Id Array index (each +// Sequence Id array element has 32 bits that represent 32 Sequence Ids) +// +#define HFRP_NUMBER_OF_SEQUENCEID_INDEX 0x400 +#define HFRP_NUMBER_OF_SEQUENCEID_ARRAY_INDEX (HFRP_NUMBER_OF_SEQUENCEID_INDEX / 32U) +#define HFRP_ASYNC_NOTIFICATION_SEQUENCEID_INDEX 0x3FF + +// HFRP IRQ Reset and Doorbell bit indices +#define HFRP_IRQ_RESET_BIT_INDEX 0U +#define HFRP_IRQ_DOORBELL_BIT_INDEX 1U + +// Mailbox Interface types +#define HFRP_ONE_MAILBOX_INTERFACE 0U +#define HFRP_TWO_MAILBOX_INTERFACE 1U + +// Mailbox flags +#define HFRP_COMMAND_MAILBOX_FLAG 0U +#define HFRP_RESPONSE_MAILBOX_FLAG 1U + +// macros for supporting DRF operations +#define NV_HFRP_BYTE_FIELD(x) (8U * (x) + 7U) : (8U * (x)) + +typedef struct +{ + NvU32 hfrpCommandBufferHeadPtrAddr; + NvU32 hfrpCommandBufferTailPtrAddr; + NvU32 hfrpCommandBufferStartAddr; + NvU32 hfrpCommandBufferEndAddr; + NvU32 hfrpResponseBufferHeadPtrAddr; + NvU32 hfrpResponseBufferTailPtrAddr; + NvU32 hfrpResponseBufferStartAddr; + NvU32 hfrpResponseBufferEndAddr; + NvU32 hfrpIrqInSetAddr; + NvU32 hfrpIrqOutSetAddr; + NvU32 hfrpIrqInClrAddr; + NvU32 hfrpIrqOutClrAddr; +} HFRP_MAILBOX_IO_INFO; + +typedef struct +{ + NvU32 sequenceIdState[HFRP_NUMBER_OF_SEQUENCEID_ARRAY_INDEX]; + NvU8 *pResponsePayloadArray[HFRP_NUMBER_OF_SEQUENCEID_INDEX]; + NvU16 *pResponseStatusArray[HFRP_NUMBER_OF_SEQUENCEID_INDEX]; + NvU32 *pResponsePayloadSizeArray[HFRP_NUMBER_OF_SEQUENCEID_INDEX]; + NV_STATUS *pStatusArray[HFRP_NUMBER_OF_SEQUENCEID_INDEX]; + NvU8 sequenceIdArrayIndex; +} HFRP_SEQUENCEID_INFO; + +typedef struct +{ + HFRP_MAILBOX_IO_INFO mailboxIoInfo; + HFRP_SEQUENCEID_INFO sequenceIdInfo; +} HFRP_INFO; + +#endif // _KERN_HFRP_COMMON_H_ diff --git a/src/nvidia/inc/kernel/gpu/hfrp/kernel_hfrp.h b/src/nvidia/inc/kernel/gpu/hfrp/kernel_hfrp.h new file mode 100644 index 0000000..59573b0 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/hfrp/kernel_hfrp.h @@ -0,0 +1,118 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_hfrp_nvoc.h" + +#ifndef _KERNELHFRP_H_ +#define _KERNELHFRP_H_ + +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvmisc.h" +#include "utils/nvprintf.h" + +#include "os/os.h" +#include "gpu/eng_state.h" +#include "gpu/gpu.h" +#include "gpu/hfrp/kern_hfrp_common.h" + +// Total number of HFRP Mailboxes available for the interface +#define HFRP_NUMBER_OF_MAILBOXES 2U + +// Maximum Payload size for a message +#define HFRP_MAX_PAYLOAD_SIZE 50U + +#define HFRP_COMMAND_MAILBOX_INDEX HFRP_COMMAND_MAILBOX_INDEX_DISPLAY +#define HFRP_RESPONSE_MAILBOX_INDEX HFRP_RESPONSE_MAILBOX_INDEX_DISPLAY + +// +// Maximum values of Sequence Id index and Sequence Id Array index (each +// Sequence Id array element has 32 bits that represent 32 Sequence Ids) +// +#define HFRP_NUMBER_OF_SEQUENCEID_INDEX 0x400 +#define HFRP_NUMBER_OF_SEQUENCEID_ARRAY_INDEX (HFRP_NUMBER_OF_SEQUENCEID_INDEX / 32U) +#define HFRP_ASYNC_NOTIFICATION_SEQUENCEID_INDEX 0x3FF + +NVOC_PREFIX(khfrp) class KernelHFRP: OBJENGSTATE +{ +public: + /*! HFRP Create Object */ + virtual NV_STATUS khfrpStatePreInitLocked(OBJGPU *pGpu, KernelHFRP *pHfrp); + + virtual NV_STATUS khfrpConstructEngine(OBJGPU *pGpu, KernelHFRP *pHfrp, ENGDESCRIPTOR engDesc); + + /*! HFRP Destructor */ + void khfrpDestruct(KernelHFRP *pHfrp); + + void khfrpCommonConstruct(KernelHFRP *pHfrp); + + NV_STATUS khfrpIoApertureConstruct(OBJGPU *pGpu, KernelHFRP *pHfrp); + + void khfrpIoApertureDestruct(KernelHFRP *pHfrp, NvU32 index); + + NvU32 khfrpReadBit(KernelHFRP *pHfrp, NvU32 virtualAddr, NvU32 bitIndex); + + void khfrpWriteBit(KernelHFRP *pHfrp, NvU32 virtualAddr, NvU32 bitIndex, NvU32 data); + + NV_STATUS khfrpMailboxQueueMessage(KernelHFRP *pHfrp, NvU32 messageHeader, NvU8 *pPayloadArray, + NvU32 payloadSize, NvU32 mailboxFlag); + + void khfrpServiceEvent(KernelHFRP *pHfrp); + + NvU32 khfrpAllocateSequenceId(KernelHFRP *pHfrp, NvU16 *pResponseStatus, void *pResponsePayload, + NvU32 *pResponsePayloadSize, NV_STATUS *pStatus, NvU32 *pSequenceId); + + void khfrpFreeSequenceId(KernelHFRP *pHfrp, NvU32 index); + + NvBool khfrpIsSequenceIdFree(KernelHFRP *pHfrp, NvU32 index); + + NV_STATUS khfrpPollOnIrqWrapper(KernelHFRP *pHfrp, NvU32 irqRegAddr, NvU32 bitIndex, NvBool bData); + + NV_STATUS khfrpPollOnIrqRm(KernelHFRP *pHfrp, NvU32 irqRegAddr, NvU32 bitIndex, NvBool bData); + + NV_STATUS khfrpPostCommandBlocking(KernelHFRP *pHfrp, NvU16 commandIndex, void *pCommandPayload, NvU32 commandPayloadSize, + NvU16 *pResponseStatus, void *pResponsePayload, NvU32 *pResponsePayloadSize, NV_STATUS *pStatus); + + NV_STATUS khfrpInterfaceReset(KernelHFRP *pHfrp); + + NVOC_PROPERTY NvBool PDB_PROP_KHFRP_IS_ENABLED; + NVOC_PROPERTY NvBool PDB_PROP_KHFRP_HDA_IS_ENABLED; + + NvU32 khfrpPrivBase[5]; + NvU32 khfrpIntrCtrlReg[5]; + IoAperture *pAperture[HFRP_NUMBER_OF_MAILBOXES]; + HFRP_INFO khfrpInfo; +}; + +#define HFRP_REG_RD32(pKernelHfrp, virtualAddr) \ + REG_RD32(pKernelHfrp->pAperture[virtualAddr / HFRP_MAILBOX_ACCESS_RANGE], \ + virtualAddr % HFRP_MAILBOX_ACCESS_RANGE) + +#define HFRP_REG_WR32(pKernelHfrp, virtualAddr, data32) \ + REG_WR32(pKernelHfrp->pAperture[virtualAddr / HFRP_MAILBOX_ACCESS_RANGE], \ + virtualAddr % HFRP_MAILBOX_ACCESS_RANGE, data32) + +#define HFRP_POLL_ON_IRQ(pKernelHfrp, irqRegAddr, bitIndex, bData) \ + khfrpPollOnIrqRm(pKernelHfrp, irqRegAddr, bitIndex, bData) + +#endif // _KernelHFRP_H_ diff --git a/src/nvidia/inc/kernel/gpu/intr/intr_common.h b/src/nvidia/inc/kernel/gpu/intr/intr_common.h new file mode 100644 index 0000000..a1c2039 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/intr/intr_common.h @@ -0,0 +1,49 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef INTR_COMMON_H +#define INTR_COMMON_H 1 +/*! Common defines used by both Intr and OBJINTRABLE */ + + +/*! Kinds of interrupts that a unit can have. + * + * Different interrupt vectors can route to the same MC_ENGINE_IDX_* (and its + * associated #IntrService). + * This enum is used to disambiguate which handler function within an + * #IntrService should be called depending on the actual interrupt vector. + */ +typedef enum { + /*! + * Legacy concept of "stalling" interrupts. + * + * These may have a RETRIGGER mechanism. + */ + INTR_KIND_INTERRUPT, + /*! Notification "non-stalling" interrupts. */ + INTR_KIND_NOTIFICATION, + INTR_KIND_COUNT +} INTR_KIND; + + +#endif /* ifndef INTR_COMMON_H */ diff --git a/src/nvidia/inc/kernel/gpu/kern_gpu_power.h b/src/nvidia/inc/kernel/gpu/kern_gpu_power.h new file mode 100644 index 0000000..138ec7d --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/kern_gpu_power.h @@ -0,0 +1,73 @@ + /* + * SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef KERN_GPU_POWER_H +#define KERN_GPU_POWER_H + +#include "ctrl/ctrl2080/ctrl2080power.h" // NV2080_CTRL_GC6_FLAVOR_ID_MAX +#include "diagnostics/profiler.h" + +typedef enum +{ + GPU_GC6_STATE_POWERED_ON = 0 , + GPU_GC6_STATE_EXITED = GPU_GC6_STATE_POWERED_ON , + GPU_GC6_STATE_ENTERING , + GPU_GC6_STATE_ENTERING_FAILED , + GPU_GC6_STATE_ENTERED , + GPU_GC6_STATE_EXITING , + GPU_GC6_STATE_EXITING_FAILED , +} GPU_GC6_STATE; + +// TODO-SC use mask for the bool variables +typedef struct +{ + NvU32 refCount; + NvU16 GC6PerstDelay; // waiting time for Upstream Port of GPU, + // before asserting perst# signal, + // during RTD3/GC6 Entry. + NvU16 GC6TotalBoardPower; // Power required by GPU to sustain RTD3/GC6. + GPU_GC6_STATE currentState; + NvU32 executedStepMask; // step mask executed during entry sequence + NvU32 stepMask[NV2080_CTRL_GC6_FLAVOR_ID_MAX]; // step mask cache +} _GPU_GC6_STATE; + +// Macros for GPU_GC6_STATE +#define IS_GPU_GC6_STATE_POWERED_ON(obj) (obj->gc6State.currentState == GPU_GC6_STATE_POWERED_ON) +#define IS_GPU_GC6_STATE_EXITED(obj) (obj->gc6State.currentState == GPU_GC6_STATE_EXITED) +#define IS_GPU_GC6_STATE_ENTERING(obj) (obj->gc6State.currentState == GPU_GC6_STATE_ENTERING) +#define IS_GPU_GC6_STATE_ENTERED(obj) (obj->gc6State.currentState == GPU_GC6_STATE_ENTERED) +#define IS_GPU_GC6_STATE_EXITING(obj) (obj->gc6State.currentState == GPU_GC6_STATE_EXITING) + +#define SET_GPU_GC6_STATE(obj, state) (obj->gc6State.currentState = state) +#define SET_GPU_GC6_STATE_AND_LOG(obj, state) \ + do { \ + SET_GPU_GC6_STATE(obj, state); \ + RMTRACE_GPU(_GC6_STATE, obj->gpuId, state, 0, 0, 0, 0, 0, 0); \ + } while(0) + +//Macro to check is a given GC6 step id is set +#define GPU_IS_GC6_STEP_ID_SET(stepId, stepMask) \ + ((NVBIT(NV2080_CTRL_GC6_STEP_ID_##stepId) & (stepMask)) != 0) + + +#endif // KERN_GPU_POWER_H diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/context_dma.h b/src/nvidia/inc/kernel/gpu/mem_mgr/context_dma.h new file mode 100644 index 0000000..4225666 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/context_dma.h @@ -0,0 +1,3 @@ + +#include "g_context_dma_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h b/src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h new file mode 100644 index 0000000..a977c7d --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h @@ -0,0 +1,161 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _HEAP_BASE_H_ +#define _HEAP_BASE_H_ + +#include "nvtypes.h" +#include "core/prelude.h" +#include "gpu/mem_mgr/mem_desc.h" + +// Contains the minimal set of resources used to compute a PTE kind +typedef struct _def_fb_alloc_page_format +{ + NvU32 attr; + NvU32 attr2; + NvU32 flags; + NvU32 kind; + NvU32 type; +} FB_ALLOC_PAGE_FORMAT; + +// +// FB allocation resources structure +// Need to be allocated from heap +// +typedef struct _def_fb_alloc_info +{ + NvU32 owner; + NvU32 hwResId; + NvU32 height; + NvU32 width; + NvU32 pitch; + NvU64 size; + NvU64 align; + NvU64 alignPad; + NvU64 pad; + NvU64 offset; + NvU32 internalflags; + NvU32 retAttr; + NvU32 retAttr2; + NvU32 format; + NvU32 comprCovg; + NvU32 zcullCovg; + NvU32 uncompressedKind; + NvU32 compPageShift; + NvU32 compressedKind; + NvU32 compTagLineMin; + NvU32 compPageIndexLo; + NvU32 compPageIndexHi; + NvU32 compTagLineMultiplier; + NvU32 startCovg; + NvU64 origSize; + NvU64 adjustedSize; + NvU64 desiredOffset; + + FB_ALLOC_PAGE_FORMAT * pageFormat; + + // Tracking client for VGPU + NvHandle hClient; + NvHandle hDevice; + + // These are only used in Vista + // no need yet for possAttr2 + NvU32 possAttr; // AllocHint, BindCompr + NvU32 ctagOffset; + + // Special flag for kernel allocations + NvBool bIsKernelAlloc; + + // + // Number of 4KB pages in the PTE array + // For contiguous allocation, this will be set to '1' + // + NvU64 pageCount4k; + + // denote that underlying physical allocation is contiguous or not + NvBool bContig; + + // + // Store the PTE Array to be used for allocating comptaglines + // If the NVOS32_ATTR_PHYSICALITY_CONTIGUOUS is set, it will only have + // one entry, otherwise it will have dynamically allocated memory + // This will track the pages in 4KB granularity + // + RmPhysAddr pteArray[1]; +} FB_ALLOC_INFO; + +// +// Contains information on the various hw resources (compr, etc...) that +// can be associated with a memory allocation. +// +typedef struct HWRESOURCE_INFO +{ + NvU32 attr; // NVOS32_ATTR_* + NvU32 attr2; // NVOS32_ATTR2_* + NvU32 comprCovg; // compression coverage + NvU32 ctagOffset; // comptag offset + NvU32 hwResId; + NvU32 refCount; + NvBool isVgpuHostAllocated; // used in vGPU guest RM to indicate if this HW resource is allocated by host RM or not. Used in Windows guest. +} HWRESOURCE_INFO; + + +typedef struct PMA_ALLOC_INFO +{ + NvBool bContig; + NvU32 pageCount; + NvU64 pageSize; + NvU32 refCount; + NvU64 allocSize; + NvU32 flags; + // + // If bContig is TRUE, this array consists of one element. + // If bContig is FALSE, this array is actually larger and + // has one entry for each physical page in the allocation. + // As a result, this structure must be allocated from heap. + // + NvU64 pageArray[1]; + //!!! Place nothing behind pageArray!!! +} PMA_ALLOC_INFO; + +typedef struct MEMORY_ALLOCATION_REQUEST +{ + NV_MEMORY_ALLOCATION_PARAMS *pUserParams; + OBJGPU *pGpu; + NvHandle hMemory; // in: can be NULL (translates to 0) + NvU32 internalflags; // Extended flags ?! flags seem exhausted. + HWRESOURCE_INFO *pHwResource; // out: data copied in if non-NULL + MEMORY_DESCRIPTOR *pMemDesc; // in/out: allocate memdesc if NULL + PMA_ALLOC_INFO *pPmaAllocInfo[NV_MAX_SUBDEVICES]; // out: tracks the pre-allocated memory per GPU. + NvU32 classNum; + NvHandle hClient; + NvHandle hParent; +} MEMORY_ALLOCATION_REQUEST; + +typedef struct +{ + NvU64 address; + NvU32 type; +} BLACKLIST_ADDRESS; + +#endif //_HEAP_BASE_H_ diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h b/src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h new file mode 100644 index 0000000..6050d43 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h @@ -0,0 +1,3 @@ + +#include "g_mem_desc_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h b/src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h new file mode 100644 index 0000000..811d902 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h @@ -0,0 +1,3 @@ + +#include "g_mem_mgr_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h b/src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h new file mode 100644 index 0000000..78ebf48 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _MEM_UTILS_H_ +#define _MEM_UTILS_H_ + +#include "core/prelude.h" + +#define CLEAR_HAL_ATTR(a) \ + a = (a &~(DRF_NUM(OS32, _ATTR, _COMPR, 0x3) | \ + DRF_NUM(OS32, _ATTR, _ZCULL, 0x3))); + +#define CLEAR_HAL_ATTR2(a) \ + a = (a & ~(DRF_SHIFTMASK(NVOS32_ATTR2_ZBC) | \ + DRF_SHIFTMASK(NVOS32_ATTR2_GPU_CACHEABLE))); + +NvU64 memUtilsLeastCommonAlignment(NvU64 align1, NvU64 align2); + +void memUtilsInitFBAllocInfo(NV_MEMORY_ALLOCATION_PARAMS *pAllocParams, FB_ALLOC_INFO *pFbAllocInfo, + NvHandle hClient, NvHandle hDevice); + +NV_STATUS memUtilsAllocMemDesc(OBJGPU *pGpu, MEMORY_ALLOCATION_REQUEST *pAllocRequest, FB_ALLOC_INFO *pFbAllocInfo, + MEMORY_DESCRIPTOR **ppMemDesc, Heap *pHeap, NV_ADDRESS_SPACE addrSpace, + NvBool bContig, NvBool *bAllocedMemDesc); + +#endif //_MEM_UTILS_H_ diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/rm_page_size.h b/src/nvidia/inc/kernel/gpu/mem_mgr/rm_page_size.h new file mode 100644 index 0000000..f25795e --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/rm_page_size.h @@ -0,0 +1,87 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef RM_PAGE_SIZE_H +#define RM_PAGE_SIZE_H + + + +//--------------------------------------------------------------------------- +// +// Memory page defines. +// +// These correspond to the granularity understood by the hardware +// for address mapping; the system page size can be larger. +// +//--------------------------------------------------------------------------- +#define RM_PAGE_SIZE_INVALID 0 +#define RM_PAGE_SIZE 4096 +#define RM_PAGE_SIZE_64K (64 * 1024) +#define RM_PAGE_SIZE_128K (128 * 1024) +#define RM_PAGE_MASK 0x0FFF +#define RM_PAGE_SHIFT 12 +#define RM_PAGE_SHIFT_64K 16 +#define RM_PAGE_SHIFT_128K 17 +#define RM_PAGE_SHIFT_2M 21 +#define RM_PAGE_SIZE_2M (1 << RM_PAGE_SHIFT_2M) + +// Huge page size is 2 MB +#define RM_PAGE_SHIFT_HUGE RM_PAGE_SHIFT_2M +#define RM_PAGE_SIZE_HUGE (1ULL << RM_PAGE_SHIFT_HUGE) +#define RM_PAGE_MASK_HUGE ((1ULL << RM_PAGE_SHIFT_HUGE) - 1) + +// 512MB page size +#define RM_PAGE_SHIFT_512M 29 +#define RM_PAGE_SIZE_512M (1ULL << RM_PAGE_SHIFT_512M) +#define RM_PAGE_MASK_512M (RM_PAGE_SIZE_512M - 1) + +// 256GB page size +#define RM_PAGE_SHIFT_256G 38 +#define RM_PAGE_SIZE_256G (1ULL << RM_PAGE_SHIFT_256G) +#define RM_PAGE_MASK_256G (RM_PAGE_SHIFT_256G - 1) + +//--------------------------------------------------------------------------- +// +// Memory page attributes. +// +// These attributes are used by software for page size mapping; +// Big pages can be of 64/128KB[Fermi/Kepler/Pascal] +// Huge page is 2 MB[Pascal+] +// 512MB page is Ampere+ +// 256GB page is for Blackwell+ +// Default page attribute lets driver decide the optimal page size +// +//--------------------------------------------------------------------------- +typedef enum +{ + RM_ATTR_PAGE_SIZE_DEFAULT, + RM_ATTR_PAGE_SIZE_4KB, + RM_ATTR_PAGE_SIZE_BIG, + RM_ATTR_PAGE_SIZE_HUGE, + RM_ATTR_PAGE_SIZE_512MB, + RM_ATTR_PAGE_SIZE_256GB, + RM_ATTR_PAGE_SIZE_INVALID +} +RM_ATTR_PAGE_SIZE; + +#endif // RM_PAGE_SIZE_H diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator_common.h b/src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator_common.h new file mode 100644 index 0000000..d693319 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator_common.h @@ -0,0 +1,108 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef VIRT_MEM_ALLOCATOR_COMMON_H +#define VIRT_MEM_ALLOCATOR_COMMON_H + +/********************************* DMA Manager *****************************\ +* * +* DMA object/engine management. * +* * +****************************************************************************/ + +#include "nvtypes.h" +#include "nvgputypes.h" +#include "nvstatus.h" +#include "resserv/rs_client.h" +#include "gpu/mem_mgr/rm_page_size.h" + +typedef struct OBJGPU OBJGPU; +typedef struct ChannelDescendant ChannelDescendant; +typedef struct ContextDma ContextDma; +typedef struct Memory Memory; +typedef struct EVENTNOTIFICATION EVENTNOTIFICATION; +typedef struct Device Device; + +//--------------------------------------------------------------------------- +// +// Notification buffer structure. +// +//--------------------------------------------------------------------------- +typedef union _def_info_status_buffer +{ + struct + { + NvV16 OtherInfo16; + NvV16 Status; + } Info16Status_16; + + NvU32 Info16Status_32; + +} INFO16_STATUS; + +typedef struct _def_notification_buffer +{ + NvU32 TimeLo; + NvU32 TimeHi; + NvV32 OtherInfo32; + INFO16_STATUS Info16Status; +} NOTIFICATION, *PNOTIFICATION; + + +//--------------------------------------------------------------------------- +// +// Function prototypes. +// +//--------------------------------------------------------------------------- +void notifyMethodComplete(OBJGPU*, ChannelDescendant *, NvU32, NvV32, NvU32, NvU16, NV_STATUS); + +NV_STATUS notifyFillNotifier (OBJGPU*, ContextDma *, NvV32, NvV16, NV_STATUS); +NV_STATUS notifyFillNotifierOffset (OBJGPU*, ContextDma *, NvV32, NvV16, NV_STATUS, NvU64); +NV_STATUS notifyFillNotifierOffsetTimestamp(OBJGPU*, ContextDma *, NvV32, NvV16, NV_STATUS, NvU64, NvU64); +NV_STATUS notifyFillNotifierArray (OBJGPU*, ContextDma *, NvV32, NvV16, NV_STATUS, NvU32); +NV_STATUS notifyFillNotifierArrayTimestamp (OBJGPU*, ContextDma *, NvV32, NvV16, NV_STATUS, NvU32, NvU64); +void notifyFillNOTIFICATION(OBJGPU *pGpu, + NOTIFICATION *pNotifyBuffer, + NvV32 Info32, + NvV16 Info16, + NV_STATUS CompletionStatus, + NvBool TimeSupplied, + NvU64 Time); +NV_STATUS notifyFillNotifierGPUVA (OBJGPU*, Device*, NvHandle, NvU64, NvV32, NvV16, NV_STATUS, NvU32); +NV_STATUS notifyFillNotifierGPUVATimestamp (OBJGPU*, Device*, NvHandle, NvU64, NvV32, NvV16, NV_STATUS, NvU32, NvU64); +NV_STATUS notifyFillNotifierMemory (OBJGPU*, Memory *, NvV32, NvV16, NV_STATUS, NvU32); +NV_STATUS notifyFillNotifierMemoryTimestamp(OBJGPU*, Memory *, NvV32, NvV16, NV_STATUS, NvU32, NvU64); +void notifyFillNvNotification(OBJGPU *pGpu, + NvNotification *pNotification, + NvV32 Info32, + NvV16 Info16, + NV_STATUS CompletionStatus, + NvBool TimeSupplied, + NvU64 Time); + +NV_STATUS semaphoreFillGPUVA (OBJGPU*, Device*, NvHandle, NvU64, NvV32, NvV32, NvBool); +NV_STATUS semaphoreFillGPUVATimestamp(OBJGPU*, Device*, NvHandle, NvU64, NvV32, NvV32, NvBool, NvU64); + +RM_ATTR_PAGE_SIZE dmaNvos32ToPageSizeAttr(NvU32 attr, NvU32 attr2); + +#endif // VIRT_MEM_ALLOCATOR_COMMON_H diff --git a/src/nvidia/inc/kernel/gpu/nvbitmask.h b/src/nvidia/inc/kernel/gpu/nvbitmask.h new file mode 100644 index 0000000..501dd15 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/nvbitmask.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVBITMASK_H_ +#define _NVBITMASK_H_ + +#include "kernel/gpu/gpu_engine_type.h" + +// +// Engine Type capability mask bit-array helper MACROS to support on growing number of engine types +// The caps is defined as +// NvU32 caps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX] +// +#define NVGPU_ENGINE_CAPS_MASK_BITS 32 +#define NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1) +#define NVGPU_GET_ENGINE_CAPS_MASK(caps, id) (caps[(id)/NVGPU_ENGINE_CAPS_MASK_BITS] & NVBIT((id) % NVGPU_ENGINE_CAPS_MASK_BITS)) +#define NVGPU_SET_ENGINE_CAPS_MASK(caps, id) (caps[(id)/NVGPU_ENGINE_CAPS_MASK_BITS] |= NVBIT((id) % NVGPU_ENGINE_CAPS_MASK_BITS)) + +#endif //_NVBITMASK_H_ diff --git a/src/nvidia/inc/kernel/gpu/rpc/objrpc.h b/src/nvidia/inc/kernel/gpu/rpc/objrpc.h new file mode 100644 index 0000000..690f7e6 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/rpc/objrpc.h @@ -0,0 +1,131 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +// #ifndef NVOC +// #include "g_objrpc_nvoc.h" +// #endif + +#ifndef _OBJRPC_H_ +#define _OBJRPC_H_ + +#include "vgpu/rpc_headers.h" +#include "diagnostics/nv_debug_dump.h" +#include "ctrl/ctrl2080/ctrl2080event.h" // rmcontrol params (from hal) +#include "ctrl/ctrl2080/ctrl2080gpu.h" // rmcontrol params (from hal) +#include "ctrl/ctrl2080/ctrl2080rc.h" // rmcontrol params (from hal) +#include "ctrl/ctrl2080/ctrl2080perf.h" // rmcontrol params (from hal) +#include "ctrl/ctrl0080/ctrl0080fb.h" // rmcontrol params (from hal) +#include "ctrl/ctrl0080/ctrl0080dma.h" // rmcontrol params (from hal) +#include "gpu/gsp/message_queue.h" +#include "libraries/utils/nvbitvector.h" + + +#include "vgpu/rpc_hal_stubs.h" + +#define RPC_TIMEOUT_GPU_RESET_THRESHOLD 3 // Reset GPU after 3 back to back GSP RPC timeout +#define RPC_TIMEOUT_PRINT_RATE_SKIP 29 // skip 29 of 30 prints + +#define RPC_HISTORY_DEPTH 128 + +typedef struct RpcHistoryEntry +{ + NvU32 function; + NvU64 data[2]; + NvU64 ts_start; + NvU64 ts_end; +} RpcHistoryEntry; + +struct OBJRPC{ + OBJECT_BASE_DEFINITION(RPC); + + struct { + NvU32 ipVersion; + }__nvoc_pbase_Object[1]; // This nested structure mechanism is to bypass NVOC + + // Message buffer fields + NvU32 *message_buffer; + NvU32 *message_buffer_priv; + MEMORY_DESCRIPTOR *pMemDesc_mesg; + NvU32 maxRpcSize; + NvU32 largeRpcSize; + + // UVM Message buffer fields + NvU32 *message_buffer_uvm; + NvU32 *message_buffer_priv_uvm; + MEMORY_DESCRIPTOR *pMemDesc_mesg_uvm; + + /* Message Queue */ + NvU32 timeoutCount; + NvBool bQuietPrints; + +}; + +// +// Utility macros for composing RPC messages. +// See for message formats. +// A message has a fixed-format header and optionally a variable length +// parameter after the header. +// + +#define vgpu_rpc_message_header_v ((rpc_message_header_v*)(pRpc->message_buffer)) +#define rpc_message (vgpu_rpc_message_header_v->rpc_message_data) + +static inline void _objrpcAssignIpVersion(struct OBJRPC* pRpc, NvU32 ipVersion) +{ + pRpc->__nvoc_pbase_Object->ipVersion = ipVersion; +} + +OBJRPC *initRpcObject(OBJGPU *pGpu); +void rpcSetIpVersion(OBJGPU *pGpu, OBJRPC *pRpc, NvU32 ipVersion); +void rpcObjIfacesSetup(OBJRPC *pRpc); +NV_STATUS rpcWriteCommonHeader(OBJGPU *pGpu, OBJRPC *pRpc, NvU32 func, NvU32 paramLength); +NV_STATUS rpcWriteCommonHeaderSim(OBJGPU *pGpu); +NV_STATUS vgpuGspSetupBuffers(OBJGPU *pGpu); +void vgpuGspTeardownBuffers(OBJGPU *pGpu); +NV_STATUS vgpuReinitializeRpcInfraOnStateLoad(OBJGPU *pGpu); + +// Initialize and free RPC infrastructure +NV_STATUS initRpcInfrastructure_VGPU(OBJGPU *pGpu); +NV_STATUS freeRpcInfrastructure_VGPU(OBJGPU *pGpu); + +NV_STATUS _allocRpcMemDesc(OBJGPU *pGpu, NvU64 size, NvBool bContig, NV_ADDRESS_SPACE addrSpace, NvU32 memFlags, + MEMORY_DESCRIPTOR **ppMemDesc, void **ppMemBuffer, void **ppMemBufferPriv); +void _freeRpcMemDesc(OBJGPU *pGpu, MEMORY_DESCRIPTOR **ppMemDesc, void **ppMemBuffer, void **ppMemBufferPriv); + +// +// OBJGPU RPC member accessors. +// Historically, they have been defined inline by the following macros. +// These definitions were migrated to gpu.c in order to avoid having to include object headers in +// this file. +// + +OBJRPC *gpuGetGspClientRpc(OBJGPU*); +OBJRPC *gpuGetVgpuRpc(OBJGPU*); +OBJRPC *gpuGetRpc(OBJGPU*); + +#define GPU_GET_GSPCLIENT_RPC(u) gpuGetGspClientRpc(u) +#define GPU_GET_VGPU_RPC(u) gpuGetVgpuRpc(u) +#define GPU_GET_RPC(u) gpuGetRpc(u) + +#endif // _OBJRPC_H_ diff --git a/src/nvidia/inc/kernel/gpu/rpc/objrpcstructurecopy.h b/src/nvidia/inc/kernel/gpu/rpc/objrpcstructurecopy.h new file mode 100644 index 0000000..d68a304 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/rpc/objrpcstructurecopy.h @@ -0,0 +1,42 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _OBJRPCSTRUCTURECOPY_H_ +#define _OBJRPCSTRUCTURECOPY_H_ + +#include "vgpu/sdk-structures.h" +#include "ctrl/ctrl0080/ctrl0080bsp.h" // rmcontrol params (from hal) +#include "ctrl/ctrl2080/ctrl2080gr.h" // rmcontrol params (from hal) +#include "ctrl/ctrl2080/ctrl2080clk.h" // rmcontrol params (from hal) +#include "g_rpcstructurecopy_hal.h" // For RPCSTRUCTURECOPY_HAL_IFACES +#include "g_rpcstructurecopy_odb.h" // For RPCSTRUCTURECOPY_HAL_IFACES + +struct OBJRPCSTRUCTURECOPY{ + OBJECT_BASE_DEFINITION(RPCSTRUCTURECOPY); + + struct { + NvU32 ipVersion; + }__nvoc_pbase_Object[1]; // This nested structure mechanism is to bypass NVOC +}; + +#endif //_OBJRPCSTRUCTURECOPY_H_ diff --git a/src/nvidia/inc/kernel/gpu/subdevice/generic_engine.h b/src/nvidia/inc/kernel/gpu/subdevice/generic_engine.h new file mode 100644 index 0000000..c1a8628 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/subdevice/generic_engine.h @@ -0,0 +1,3 @@ + +#include "g_generic_engine_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/subdevice/subdevice.h b/src/nvidia/inc/kernel/gpu/subdevice/subdevice.h new file mode 100644 index 0000000..a9b688e --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/subdevice/subdevice.h @@ -0,0 +1,3 @@ + +#include "g_subdevice_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/timer/objtmr.h b/src/nvidia/inc/kernel/gpu/timer/objtmr.h new file mode 100644 index 0000000..4b47b50 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/timer/objtmr.h @@ -0,0 +1,3 @@ + +#include "g_objtmr_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/timer/tmr.h b/src/nvidia/inc/kernel/gpu/timer/tmr.h new file mode 100644 index 0000000..fd73d0e --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/timer/tmr.h @@ -0,0 +1,3 @@ + +#include "g_tmr_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu_mgr/gpu_db.h b/src/nvidia/inc/kernel/gpu_mgr/gpu_db.h new file mode 100644 index 0000000..0a4dc41 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu_mgr/gpu_db.h @@ -0,0 +1,3 @@ + +#include "g_gpu_db_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu_mgr/gpu_group.h b/src/nvidia/inc/kernel/gpu_mgr/gpu_group.h new file mode 100644 index 0000000..425a303 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu_mgr/gpu_group.h @@ -0,0 +1,3 @@ + +#include "g_gpu_group_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h b/src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h new file mode 100644 index 0000000..f3acd0c --- /dev/null +++ b/src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h @@ -0,0 +1,3 @@ + +#include "g_gpu_mgmt_api_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h b/src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h new file mode 100644 index 0000000..068c748 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h @@ -0,0 +1,3 @@ + +#include "g_gpu_mgr_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/io_vaspace.h b/src/nvidia/inc/kernel/mem_mgr/io_vaspace.h new file mode 100644 index 0000000..2f07507 --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/io_vaspace.h @@ -0,0 +1,3 @@ + +#include "g_io_vaspace_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/mem.h b/src/nvidia/inc/kernel/mem_mgr/mem.h new file mode 100644 index 0000000..f2f8ce0 --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/mem.h @@ -0,0 +1,3 @@ + +#include "g_mem_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h b/src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h new file mode 100644 index 0000000..8b9685b --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h @@ -0,0 +1,3 @@ + +#include "g_os_desc_mem_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/standard_mem.h b/src/nvidia/inc/kernel/mem_mgr/standard_mem.h new file mode 100644 index 0000000..f4b5ecb --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/standard_mem.h @@ -0,0 +1,3 @@ + +#include "g_standard_mem_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/syncpoint_mem.h b/src/nvidia/inc/kernel/mem_mgr/syncpoint_mem.h new file mode 100644 index 0000000..4894657 --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/syncpoint_mem.h @@ -0,0 +1,3 @@ + +#include "g_syncpoint_mem_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/system_mem.h b/src/nvidia/inc/kernel/mem_mgr/system_mem.h new file mode 100644 index 0000000..9ff0b13 --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/system_mem.h @@ -0,0 +1,3 @@ + +#include "g_system_mem_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/vaspace.h b/src/nvidia/inc/kernel/mem_mgr/vaspace.h new file mode 100644 index 0000000..6910058 --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/vaspace.h @@ -0,0 +1,3 @@ + +#include "g_vaspace_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h b/src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h new file mode 100644 index 0000000..ab20731 --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h @@ -0,0 +1,3 @@ + +#include "g_virt_mem_mgr_nvoc.h" + diff --git a/src/nvidia/inc/kernel/os/capability.h b/src/nvidia/inc/kernel/os/capability.h new file mode 100644 index 0000000..e2fa861 --- /dev/null +++ b/src/nvidia/inc/kernel/os/capability.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _OS_CAPABILITY_H_ +#define _OS_CAPABILITY_H_ + +// OS specific RM capabilities structure +typedef struct OS_RM_CAPS OS_RM_CAPS; + +// RM capabilities +#define NV_RM_CAP_SYS_BASE 0x0 +#define NV_RM_CAP_SYS_PROFILER_CONTEXT (NV_RM_CAP_SYS_BASE + 0) +#define NV_RM_CAP_SYS_PROFILER_DEVICE (NV_RM_CAP_SYS_BASE + 1) +#define NV_RM_CAP_SYS_SMC_CONFIG (NV_RM_CAP_SYS_BASE + 2) +#define NV_RM_CAP_SYS_SMC_MONITOR (NV_RM_CAP_SYS_BASE + 3) + +#define NV_RM_CAP_SMC_PARTITION_BASE 0x100 +#define NV_RM_CAP_SMC_PARTITION_ACCESS (NV_RM_CAP_SMC_PARTITION_BASE + 0) + +#define NV_RM_CAP_EXT_BASE 0x200 +#define NV_RM_CAP_EXT_FABRIC_MGMT (NV_RM_CAP_EXT_BASE + 0) + +#define NV_RM_CAP_SMC_EXEC_PARTITION_BASE 0x300 +#define NV_RM_CAP_SMC_EXEC_PARTITION_ACCESS (NV_RM_CAP_SMC_EXEC_PARTITION_BASE + 0) + +#endif diff --git a/src/nvidia/inc/kernel/os/nv_memory_area.h b/src/nvidia/inc/kernel/os/nv_memory_area.h new file mode 100644 index 0000000..55d7fb0 --- /dev/null +++ b/src/nvidia/inc/kernel/os/nv_memory_area.h @@ -0,0 +1,104 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NV_MEMORY_AREA_H +#define NV_MEMORY_AREA_H + +typedef struct MemoryRange +{ + NvU64 start; + NvU64 size; +} MemoryRange; + +typedef struct MemoryArea +{ + MemoryRange *pRanges; + NvU64 numRanges; +} MemoryArea; + +static inline NvU64 memareaSize(MemoryArea memArea) +{ + NvU64 size = 0; + NvU64 idx = 0; + for (idx = 0; idx < memArea.numRanges; idx++) + { + size += memArea.pRanges[idx].size; + } + return size; +} + +static inline MemoryRange +mrangeMake +( + NvU64 start, + NvU64 size +) +{ + MemoryRange range; + range.start = start; + range.size = size; + return range; +} + +static inline NvU64 +mrangeLimit +( + MemoryRange a +) +{ + return a.start + a.size; +} + +static inline NvBool +mrangeIntersects +( + MemoryRange a, + MemoryRange b +) +{ + return ((a.start >= b.start) && (a.start < mrangeLimit(b))) || + ((b.start >= a.start) && (b.start < mrangeLimit(a))); +} + +static inline NvBool +mrangeContains +( + MemoryRange outer, + MemoryRange inner +) +{ + return (inner.start >= outer.start) && (mrangeLimit(inner) <= mrangeLimit(outer)); +} + +static inline MemoryRange +mrangeOffset +( + MemoryRange range, + NvU64 amt +) +{ + range.start += amt; + return range; +} + +#endif /* NV_MEMORY_AREA_H */ diff --git a/src/nvidia/inc/kernel/os/nv_memory_type.h b/src/nvidia/inc/kernel/os/nv_memory_type.h new file mode 100644 index 0000000..34255c7 --- /dev/null +++ b/src/nvidia/inc/kernel/os/nv_memory_type.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NV_MEMORY_TYPE_H +#define NV_MEMORY_TYPE_H + +#define NV_MEMORY_NONCONTIGUOUS 0 +#define NV_MEMORY_CONTIGUOUS 1 + +#define NV_MEMORY_CACHED 0 +#define NV_MEMORY_UNCACHED 1 +#define NV_MEMORY_WRITECOMBINED 2 +#define NV_MEMORY_WRITEBACK 5 +#define NV_MEMORY_DEFAULT 6 +#define NV_MEMORY_UNCACHED_WEAK 7 + +#define NV_PROTECT_READABLE 1 +#define NV_PROTECT_WRITEABLE 2 +#define NV_PROTECT_READ_WRITE (NV_PROTECT_READABLE | NV_PROTECT_WRITEABLE) + +#endif /* NV_MEMORY_TYPE_H */ diff --git a/src/nvidia/inc/kernel/os/os.h b/src/nvidia/inc/kernel/os/os.h new file mode 100644 index 0000000..c58aa0c --- /dev/null +++ b/src/nvidia/inc/kernel/os/os.h @@ -0,0 +1,3 @@ + +#include "g_os_nvoc.h" + diff --git a/src/nvidia/inc/kernel/os/os_stub.h b/src/nvidia/inc/kernel/os/os_stub.h new file mode 100644 index 0000000..cc8442e --- /dev/null +++ b/src/nvidia/inc/kernel/os/os_stub.h @@ -0,0 +1,42 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef OS_STUB_H +#define OS_STUB_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Extern definitions of all public stub function interfaces * +* * +\***************************************************************************/ + +#include "os/os.h" + +// +// Each of these stub functions returns a different type. Used to +// stub out function pointers in OBJOS. +// +OSSpinLoop stubOsSpinLoop; +OSSetSurfaceName stubOsSetSurfaceName; + +#endif // OS_STUB_H diff --git a/src/nvidia/inc/kernel/platform/acpi_common.h b/src/nvidia/inc/kernel/platform/acpi_common.h new file mode 100644 index 0000000..2b46703 --- /dev/null +++ b/src/nvidia/inc/kernel/platform/acpi_common.h @@ -0,0 +1,113 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _ACPICOMMON_H_ +#define _ACPICOMMON_H_ + +#include "acpigenfuncs.h" +#include "core/core.h" +#include "rmconfig.h" + +NV_STATUS testIfDsmFuncSupported(OBJGPU *, ACPI_DSM_FUNCTION); +NV_STATUS testIfDsmSubFunctionEnabled(OBJGPU *, ACPI_DSM_FUNCTION, NvU32); +NV_STATUS remapDsmFunctionAndSubFunction(OBJGPU *, ACPI_DSM_FUNCTION *, NvU32 *); +NV_STATUS getDsmGetObjectSubfunction(OBJGPU *, ACPI_DSM_FUNCTION *, NvU32 *, NvU32*); +void cacheDsmSupportedFunction(OBJGPU *, ACPI_DSM_FUNCTION, NvU32, NvU32 *, NvU32); +NV_STATUS checkDsmCall(OBJGPU *, ACPI_DSM_FUNCTION *, NvU32 *, NvU32 *, NvU16 *); +void acpiDsmInit(OBJGPU *); +NV_STATUS getLicenseKey(OBJGPU *, NvU32, NvU32 *, NvU16 *); +void uncacheDsmFuncStatus(OBJGPU *, ACPI_DSM_FUNCTION, NvU32); + +// useful macros +#if NV_PRINTF_ENABLED +#define DSMFuncStr(func) (func <= ACPI_DSM_FUNCTION_CURRENT ? DSMCalls[func] : DSMCalls[ACPI_DSM_FUNCTION_COUNT]) +#endif + +#define isDsmGetSuppFuncListCached(pGpu, acpiDsmFunction) (pGpu->acpi.dsm[acpiDsmFunction].suppFuncStatus != DSM_FUNC_STATUS_UNKNOWN) +#define isDsmGetSuppFuncListFailed(pGpu, acpiDsmFunction) (pGpu->acpi.dsm[acpiDsmFunction].suppFuncStatus == DSM_FUNC_STATUS_FAILED) +#define isGenericDsmFunction(acpiDsmFunction) (acpiDsmFunction >= ACPI_DSM_FUNCTION_COUNT) +#define isGenericDsmSubFunction(acpiDsmSubFunction) (acpiDsmSubFunction >= NV_ACPI_GENERIC_FUNC_START) + + +#define NV_ACPI_ALL_FUNC_SUPPORT 0x00000000 // Common is supported subfunction. +#define NV_ACPI_ALL_FUNC_SUPPORTED NVBIT(NV_ACPI_ALL_FUNC_SUPPORT) // is common Function supported? +#define NV_ACPI_ALL_SUBFUNC_UNKNOWN 0xFFFFFFFF // Common define for unknown ACPI sub-function + +// All the callbacks (MXM, NBCI, NVHG) use the same bits. These are common. +#define NV_ACPI_CALLBACKS_ARG_POSTPOWERSTATE 2:2 +#define NV_ACPI_CALLBACKS_ARG_POSTPOWERSTATE_NOTIFY 0x00000001 +#define NV_ACPI_CALLBACKS_ARG_CURRENTPOWERSTATE 7:4 +#define NV_ACPI_CALLBACKS_ARG_3DSTEREOSTATE_ACTIVE 8:8 +#define NV_ACPI_CALLBACKS_ARG_3DSTEREOSTATE_ACTIVE_NO 0x00000000 +#define NV_ACPI_CALLBACKS_ARG_3DSTEREOSTATE_ACTIVE_YES 0x00000001 + + +#define NV_ACPI_CALLBACKS_RET_POSTPOWERSTATE 2:2 +#define NV_ACPI_CALLBACKS_RET_POSTPOWERSTATE_NOTIFY 0x00000001 +#define NV_ACPI_CALLBACKS_RET_HOTPLUG 9:9 +#define NV_ACPI_CALLBACKS_RET_HOTPLUG_NOTIFY 0x00000001 +#define NV_ACPI_CALLBACKS_RET_CONFIG 10:10 +#define NV_ACPI_CALLBACKS_RET_CONFIG_NOTIFY 0x00000001 +#define NV_ACPI_CALLBACKS_RET_3DSTEREOSTATE_ACTIVE 12:12 +#define NV_ACPI_CALLBACKS_RET_3DSTEREOSTATE_ACTIVE_NOTIFY 0x00000001 + +#define ACPI_NOTIFY_DOCK_EVENT 0x77 +#define ACPI_NOTIFY_PANEL_SWITCH 0x80 +#define ACPI_NOTIFY_DEVICE_HOTPLUG 0x81 +#define ACPI_NOTIFY_CYCLE_DISPLAY_HOTKEY 0x82 +#define ACPI_NOTIFY_NEXT_DISPLAY_HOTKEY 0x83 +#define ACPI_NOTIFY_PREV_DISPLAY_HOTKEY 0x84 +#define ACPI_NOTIFY_CYCLE_BRIGHTNESS_HOTKEY 0x85 +#define ACPI_NOTIFY_INC_BRIGHTNESS_HOTKEY 0x86 +#define ACPI_NOTIFY_DEC_BRIGHTNESS_HOTKEY 0x87 +#define ACPI_NOTIFY_ZERO_BRIGHTNESS_HOTKEY 0x88 +#define ACPI_NOTIFY_VIDEO_WAKEUP 0x90 + +#define ACPI_NOTIFY_GPS_STATUS_CHANGE 0xC0 + +#define ACPI_NOTIFY_BACKLIGHT_OFF 0xC1 +#define ACPI_NOTIFY_BACKLIGHT_ON 0xC2 + +#define ACPI_NOTIFY_POWER_LEVEL_D1 0xD1 +#define ACPI_NOTIFY_POWER_LEVEL_D2 0xD2 +#define ACPI_NOTIFY_POWER_LEVEL_D3 0xD3 +#define ACPI_NOTIFY_POWER_LEVEL_D4 0xD4 +#define ACPI_NOTIFY_POWER_LEVEL_D5 0xD5 + +#define ACPI_VIDEO_NOTIFY_PROBE 0x81 + +#define NV_ACPI_DSM_READ_SIZE (4*1024) // 4K as per spec + +// **************************************************** +// For _ROM Get ROM Data Method +// **************************************************** +#define ROM_METHOD_MAX_RETURN_BUFFER_SIZE 0x1000 + +// these are debug strings for printing which DSM subfunction didn't work. +// These map directly to the ACPI_DSM_FUNCTION enum in gpu/gpu.h. +#if NV_PRINTF_ENABLED +extern const char * const DSMCalls[]; +#endif + +#endif // _ACPICOMMON_H_ + diff --git a/src/nvidia/inc/kernel/platform/nvpcf.h b/src/nvidia/inc/kernel/platform/nvpcf.h new file mode 100644 index 0000000..02a9a9f --- /dev/null +++ b/src/nvidia/inc/kernel/platform/nvpcf.h @@ -0,0 +1,617 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVPCF_H +#define NVPCF_H + +#include "ctrl/ctrl0000/ctrl0000system.h" + +/* + * Definitions for the static params table. + */ + +/*! + * Layout of SysDev 2x data used for static config + */ +#define NVPCF_SYSDEV_STATIC_TABLE_VERSION_2X (0x20) +#define NVPCF_SYSDEV_STATIC_TABLE_HEADER_2X_SIZE_03 (0x03U) +#define NVPCF_SYSDEV_STATIC_TABLE_HEADER_2X_FMT_SIZE_03 ("3b") +#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_SIZE_01 (0x01U) +#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_FMT_SIZE_01 ("1b") + +/*! + * Static system dev header table, unpacked + */ +typedef struct +{ + /* + * System device Table Version. + */ + NvU32 version; + + /* + * Size of device Table Header in bytes . + */ + NvU32 headerSize; + + /* + * Size of common entry in bytes. + */ + NvU32 commonSize; +} SYSDEV_STATIC_TABLE_HEADER_2X; + +/*! + * Static system dev common entry + */ +typedef struct +{ + NvU32 param0; +} SYSDEV_STATIC_TABLE_COMMON_2X; + +#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_PARAM0_CPU_TYPE 3:0 +#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_PARAM0_CPU_TYPE_INTEL (0x00000000) +#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_PARAM0_CPU_TYPE_AMD (0x00000001) +#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_PARAM0_CPU_TYPE_NVIDIA (0x00000002) +#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_PARAM0_GPU_TYPE 7:4 +#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_PARAM0_GPU_TYPE_NVIDIA (0x00000000) + +/*! + * Layout of Controller 2x data used for static config + */ +#define NVPCF_CONTROLLER_STATIC_TABLE_VERSION_20 (0x20) +#define NVPCF_CONTROLLER_STATIC_TABLE_VERSION_21 (0x21) +#define NVPCF_CONTROLLER_STATIC_TABLE_VERSION_22 (0x22) +#define NVPCF_CONTROLLER_STATIC_TABLE_VERSION_23 (0x23) +#define NVPCF_CONTROLLER_STATIC_TABLE_VERSION_24 (0x24) +#define NVPCF_CONTROLLER_STATIC_TABLE_VERSION_25 (0x25) +#define NVPCF_CONTROLLER_STATIC_TABLE_MAX_ENTRIES (8) + +// format for 2.0 and 2.1 +#define NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V20_SIZE_05 (0x05U) +#define NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V20_FMT_SIZE_05 ("5b") +#define NVPCF_CONTROLLER_STATIC_TABLE_COMMON_V20_SIZE_02 (0x02U) +#define NVPCF_CONTROLLER_STATIC_TABLE_COMMON_V20_FMT_SIZE_02 ("1w") +#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_SIZE_0F (0x0FU) +#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FMT_SIZE_0F ("1b1w3d") + +/*! + * Static system controller header table v2.0/2.1, unpacked + */ +typedef struct +{ + /* + * System controller Table Version. + */ + NvU32 version; + + /* + * Size of controller Table Header in bytes . + */ + NvU32 headerSize; + + /* + * Size of controller Table Common/Global Entry in bytes. + */ + NvU32 commonSize; + + /* + * Size of controller Table Entry in bytes. + */ + NvU32 entrySize; + + /* + * Number of controller Entries + */ + NvU32 entryCount; +} CONTROLLER_STATIC_TABLE_HEADER_V20; + +/*! + * Static system controller common/global entry v2.0/2.1, unpacked + */ +typedef struct +{ + /* + * Base sampling period in ms + */ + NvU32 samplingPeriodms; +} CONTROLLER_STATIC_TABLE_COMMON_V20; + +/*! + * Static system controller entry v2.0/2.1, unpacked + */ +typedef struct +{ + /* + * System controller entry type specific flag (Flags0). + */ + NvU32 flags0; + + /* + * Sampling Multiplier. + */ + NvU32 samplingMulti; + + /* + * System controller entry filter parameters. + */ + NvU32 filterParams; + + /* + * System controller entry Usage-Specific Parameter (Param0). + */ + NvU32 param0; + + /* + * System controller entry Usage-Specific Parameter (Param1). + */ + NvU32 param1; + +} CONTROLLER_STATIC_TABLE_ENTRY_V20; + +// FLAGS0 +#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FLAGS0_CLASS 3:0 +#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FLAGS0_CLASS_DISABLED (0x00000000) +#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FLAGS0_CLASS_PPAB (0x00000001) +#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FLAGS0_CLASS_CTGP (0x00000002) + +// Filter +#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FILTER_TYPE 7:0 + +// filterType = EWMA +#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FILTERPARAM_EWMA_WEIGHT 15:8 +// filterType = MAX, others +#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FILTERPARAM_WINDOW_SIZE 15:8 + +#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FILTER_RESERVED 31:16 + +// Param0 +#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_PARAM0_QBOOST_INCREASE_GAIN 15:0 +#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_PARAM0_QBOOST_DECREASE_GAIN 31:16 +#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V25_PARAM0_CPU_TDP_TYPE 4:1 + +// Param1 +#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_PARAM1_QBOOST_DC_SUPPORT 0:0 + +// format for 2.2 +#define NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V22_SIZE_04 (0x04U) +#define NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V22_FMT_SIZE_04 ("4b") +#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_SIZE_05 (0x05U) +#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_FMT_SIZE_05 ("1b1d") + +// Param0 +#define NVPCF_CONTROLLER_SBIOS_TABLE_CPU_TDP_CONTROL_DC_ONLY (0x00) +#define NVPCF_CONTROLLER_SBIOS_TABLE_CPU_TDP_CONTROL_DC_AC (0x01) + +/*! + * Static system controller header table v2.2, unpacked + */ +typedef struct +{ + /* + * System controller Table Version. + */ + NvU32 version; + + /* + * Size of controller Table Header in bytes . + */ + NvU32 headerSize; + + /* + * Size of controller Table Entry in bytes. + */ + NvU32 entrySize; + + /* + * Number of controller Entries + */ + NvU32 entryCount; +} CONTROLLER_STATIC_TABLE_HEADER_V22; + +/*! + * Static system controller entry v2.2, unpacked + */ +typedef struct +{ + /* + * System controller entry type specific flag (Flags0). + */ + NvU32 flags0; + + /* + * System controller entry Usage-Specific Parameter (Param0). + */ + NvU32 param0; + +} CONTROLLER_STATIC_TABLE_ENTRY_V22; + +// FLAGS0 +#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_FLAGS0_CLASS 3:0 +#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_FLAGS0_CLASS_DISABLED (0x00000000) +#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_FLAGS0_CLASS_PPAB (0x00000001) +#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_FLAGS0_CLASS_CTGP (0x00000002) + +// Param0 +#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_PARAM0_QBOOST_DC_SUPPORT 0:0 + +/* + * Definitions for the dynamic params table. + */ +#define NVPCF0100_CTRL_DYNAMIC_TABLE_1X_VERSION (0x10) +#define NVPCF0100_CTRL_DYNAMIC_TABLE_1X_ENTRY_SIZE (4U) +// +// This is set to 32UL in windows NVPCF driver. Set it to 2UL which is good +// enough for now to save space +// +#define NVPCF0100_CTRL_DYNAMIC_TABLE_1X_ENTRY_MAX (2UL) +#define NVPCF0100_CTRL_DYNAMIC_TABLE_1X_INPUT_CMD_GET_TPP (0x04) + +/* + * Dynamic Params Table Header, v1.x. + */ +typedef struct +{ + NvU8 version; + + NvU8 size; + + // + // Number of entries in the entire table. + // + NvU8 entryCnt; + + NvU8 reserved; + +} NVPCF0100_CTRL_DYNAMIC_TABLE_1X_HEADER, +*PNVPCF0100_CTRL_DYNAMIC_TABLE_1X_HEADER; + +/* + * Define the dynamic params table header and entries used by the ACPI call. + */ +typedef struct +{ + NVPCF0100_CTRL_DYNAMIC_TABLE_1X_HEADER header; + NvU32 entries[NVPCF0100_CTRL_DYNAMIC_TABLE_1X_ENTRY_MAX]; +} CONTROLLER_DYNAMIC_TABLE_1X_ACPI, +*PCONTROLLER_DYNAMIC_TABLE_1X_ACPI; + +/*! + * Config DSM NVPCF 2x version specific defines + */ + +/* + * Definitions for the dynamic params table. + */ +#define NVPCF_DYNAMIC_PARAMS_20_VERSION (0x20) +#define NVPCF_DYNAMIC_PARAMS_21_VERSION (0x21) +#define NVPCF_DYNAMIC_PARAMS_22_VERSION (0x22) +#define NVPCF_DYNAMIC_PARAMS_23_VERSION (0x23) +#define NVPCF_DYNAMIC_PARAMS_2X_HEADER_SIZE_05 (0x05U) +#define NVPCF_DYNAMIC_PARAMS_2X_HEADER_FMT_SIZE_05 ("5b") +#define NVPCF_DYNAMIC_PARAMS_2X_COMMON_SIZE_10 (0x10U) +#define NVPCF_DYNAMIC_PARAMS_2X_COMMON_FMT_SIZE_10 ("4d") +#define NVPCF_DYNAMIC_PARAMS_2X_ENTRY_SIZE_1C (0x1CU) +#define NVPCF_DYNAMIC_PARAMS_2X_ENTRY_FMT_SIZE_1C ("7d") +#define NVPCF_DYNAMIC_PARAMS_2X_ENTRY_MAX (8) + +// Power unit used, 125 milli-watts +#define NVPCF_DYNAMIC_PARAMS_2X_POWER_UNIT_MW (125) + +/*! + * Dynamic params header, unpacked. + */ +typedef struct +{ + /* + * Dynamic params table Version. + */ + NvU32 version; + + /* + * Size of dynamic params table header in bytes. + */ + NvU32 headerSize; + + /* + * Size of global/common entry in bytes. + */ + NvU32 commonSize; + + /* + * Size of each controller entry in bytes. + */ + NvU32 entrySize; + + /* + * Number of controller entries. + */ + NvU32 entryCount; +} DYNAMIC_PARAMS_HEADER_2X; + +/*! + * Dynamic params table global/common, unpacked. + */ +typedef struct +{ + NvU32 param0; + NvU32 param1; + NvU32 param2; + NvU32 param3; +} DYNAMIC_PARAMS_COMMON_2X; + +/*! + * Dynamic params table controller entry, unpacked. + */ +typedef struct +{ + NvU32 param0; + NvU32 param1; + NvU32 param2; + NvU32 param3; + NvU32 param4; + NvU32 param5; + NvU32 param6; +} DYNAMIC_PARAMS_ENTRY_2X; + +/*! + * Dynamic params table header, packed. + */ +typedef struct +{ + NvU8 version; + NvU8 headerSize; + NvU8 commonSize; + NvU8 entrySize; + NvU8 entryCount; +} DYNAMIC_PARAMS_HEADER_2X_PACKED; + +/*! + * Dynamic params table global/common, packed. + */ +typedef struct +{ + NvU32 param0; + NvU32 param1; + NvU32 param2; + NvU32 param3; +} DYNAMIC_PARAMS_COMMON_2X_PACKED; + +/*! + * Dynamic params table controller entry, packed. + */ +typedef struct +{ + NvU32 param0; + NvU32 param1; + NvU32 param2; + NvU32 param3; + NvU32 param4; + NvU32 param5; + NvU32 param6; +} DYNAMIC_PARAMS_ENTRY_2X_PACKED; + +// Input Commands (Input Param0) +#define NVPCF_DYNAMIC_PARAMS_COMMON_2X_INPUT_PARAM0_CMD 1:0 +#define NVPCF_DYNAMIC_PARAMS_COMMON_2X_INPUT_PARAM0_CMD_GET (0) +#define NVPCF_DYNAMIC_PARAMS_COMMON_2X_INPUT_PARAM0_CMD_SET (1) + +// +// Input Command 0 (Get Controller Parameters) +// +// Global/Common Entry, Output Param0 +#define NVPCF_DYNAMIC_PARAMS_COMMON_2X_OUTPUT_PARAM0_CMD0_CTGP_AC_OFFSET 15:0 +#define NVPCF_DYNAMIC_PARAMS_COMMON_2X_OUTPUT_PARAM0_CMD0_CTGP_DC_OFFSET 31:16 +// Controller Entry, Output Param0 +#define NVPCF_DYNAMIC_PARAMS_ENTRY_2X_OUTPUT_PARAM0_CMD0_IDX 7:0 +#define NVPCF_DYNAMIC_PARAMS_ENTRY_2X_OUTPUT_PARAM0_CMD0_DISABLE_AC 8:8 +#define NVPCF_DYNAMIC_PARAMS_ENTRY_2X_OUTPUT_PARAM0_CMD0_DISABLE_DC 9:9 +// Controller Entry, Output Params1 +#define NVPCF_DYNAMIC_PARAMS_ENTRY_2X_OUTPUT_PARAM1_CMD0_SIGNED0 15:0 +#define NVPCF_DYNAMIC_PARAMS_ENTRY_2X_OUTPUT_PARAM1_CMD0_SIGNED1 31:16 +// Controller Entry, Output Params2 +#define NVPCF_DYNAMIC_PARAMS_ENTRY_2X_OUTPUT_PARAM2_CMD0_SIGNED0 15:0 +#define NVPCF_DYNAMIC_PARAMS_ENTRY_2X_OUTPUT_PARAM2_CMD0_SIGNED1 31:16 +// Controller Entry, Output Params3 +#define NVPCF_DYNAMIC_PARAMS_ENTRY_2X_OUTPUT_PARAM3_CMD0_SIGNED0 15:0 +#define NVPCF_DYNAMIC_PARAMS_ENTRY_2X_OUTPUT_PARAM3_CMD0_SIGNED1 31:16 +// Controller Entry, Output Params4 +#define NVPCF_DYNAMIC_PARAMS_ENTRY_2X_OUTPUT_PARAM4_CMD0_UNSIGNED 31:0 +// Controller Entry, Output Params5 +#define NVPCF_DYNAMIC_PARAMS_ENTRY_2X_OUTPUT_PARAM5_CMD0_UNSIGNED 31:0 +// Controller Entry, Output Params6 +#define NVPCF_DYNAMIC_PARAMS_ENTRY_2X_OUTPUT_PARAM6_CMD0_UNSIGNED 31:0 + +#define NVPCF_DC_SYSTEM_POWER_LIMITS_TABLE_VERSION_10 0x10 +#define NVPCF_DC_SYSTEM_POWER_LIMITS_TABLE_1X_HEADER_SIZE_02 0x02 +#define NVPCF_DC_SYSTEM_POWER_LIMITS_TABLE_1X_HEADER_FMT_SIZE_02 "2b" +#define NVPCF_DC_SYSTEM_POWER_LIMITS_TABLE_1X_HEADER_SIZE_04 0x04 +#define NVPCF_DC_SYSTEM_POWER_LIMITS_TABLE_1X_HEADER_FMT_SIZE_04 "4b" +#define NVPCF_DC_SYSTEM_POWER_LIMITS_TABLE_1X_ENTRY_SIZE_11 0x11 +#define NVPCF_DC_SYSTEM_POWER_LIMITS_TABLE_1X_ENTRY_FMT_SIZE_11 "1b4d" + +#define NVPCF_DC_SYSTEM_POWER_LIMITS_TABLE_VERSION_20 0x20 +#define NVPCF_DC_SYSTEM_POWER_LIMITS_TABLE_2X_HEADER_SIZE_02 0x02 +#define NVPCF_DC_SYSTEM_POWER_LIMITS_TABLE_2X_HEADER_FMT_SIZE_02 "2b" +#define NVPCF_DC_SYSTEM_POWER_LIMITS_TABLE_2X_HEADER_SIZE_04 0x04 +#define NVPCF_DC_SYSTEM_POWER_LIMITS_TABLE_2X_HEADER_FMT_SIZE_04 "4b" + +#define NVPCF_DC_SYSTEM_POWER_LIMITS_TABLE_VERSION_20_MIN_ENTRIES 0x01 +#define NVPCF_DC_SYSTEM_POWER_LIMITS_TABLE_VERSION_20_MAX_ENTRIES 0x08 + +typedef struct +{ + /*! + * Table version. + */ + NvU32 version; + + /*! + * Size of header in bytes. + */ + NvU32 headerSize; + + /*! + * Size of each table entry in bytes. + */ + NvU32 entrySize; + + /*! + * Number of entries. + */ + NvU32 entryCount; +} DC_SYSTEM_POWER_LIMITS_HEADER_1X; + +typedef struct +{ + /*! + * Table version. + */ + NvU32 version; + + /*! + * Size of header in bytes. + */ + NvU32 headerSize; + + /*! + * Size of each table entry in bytes. + */ + NvU32 entrySize; + + /*! + * Number of entries. + */ + NvU32 entryCount; +} DC_SYSTEM_POWER_LIMITS_HEADER_2X; + +typedef struct +{ + /* + * Params0 - Battery state of charge threshold (percent 0-100) + */ + NvU32 batteryStateOfChargePercent; + + /* + * Params1 - Battery current limit (milliamps) + */ + NvU32 batteryCurrentLimitmA; + + /* + * Params2 - Rest of system reserved power (milliwatts) + */ + NvU32 restOfSytemReservedPowermW; + + /* + * Params3 - Min CPU TDP (milliwatts) + */ + NvU32 minCpuTdpmW; + + /* + * Params4 - Max CPU TDP (milliwatts) + */ + NvU32 maxCpuTdpmW; +} DC_SYSTEM_POWER_LIMITS_ENTRY_1X; + +typedef struct +{ + /* + * Params0 - Battery state of charge threshold (percent 0-100) + */ + NvU8 batteryStateOfChargePercent; + + /* + * Params1 - Long timescale battery current limit (milliamps) + */ + NvU32 longTimescaleBatteryCurrentLimitmA; + + /* + * Params2 - Short timescale battery current limit (milliamps) + */ + NvU32 shortTimescaleBatteryCurrentLimitmA; +} DC_SYSTEM_POWER_LIMITS_ENTRY_2X; + +#define NVPCF_DC_SYSTEM_POWER_LIMITS_TABLE_2X_ENTRY_SIZE_09 0x09 +#define NVPCF_DC_SYSTEM_POWER_LIMITS_TABLE_2X_ENTRY_FMT_SIZE_09 "1b2d" + +#define NVPCF_DC_SYSTEM_POWER_LIMITS_TABLE_VERSION_10_MIN_ENTRIES 0x01 +#define NVPCF_DC_SYSTEM_POWER_LIMITS_TABLE_VERSION_10_MAX_ENTRIES 0x08 + +/* + * Layout of CPU TDP Limit Control 1x data used for static config and driver-sbios + * inter-communication + */ + +#define NVPCF_CPU_TDP_LIMIT_CONTROL_TABLE_1X_VERSION (0x10) +#define NVPCF_CPU_TDP_LIMIT_CONTROL_TABLE_1X_HEADER_SIZE_03 (0x03U) +#define NVPCF_CPU_TDP_LIMIT_CONTROL_TABLE_1X_HEADER_FMT_SIZE_03 ("3b") +#define NVPCF_CPU_TDP_LIMIT_CONTROL_TABLE_1X_BODY_SIZE_04 (0x04U) +#define NVPCF_CPU_TDP_LIMIT_CONTROL_TABLE_1X_BODY_FMT_SIZE_04 ("1d") + +/*! + * CPU TDP Limit Control Header, unpacked + */ +typedef struct +{ + /* + * CPU TDP Limit Control Table Version. + */ + NvU8 version; + + /* + * Size of CPU TDP Limit Control Table Header in bytes. + */ + NvU8 headerSize; + + /* + * Size of body entry in bytes. + */ + NvU8 bodySize; +} CPU_TDP_LIMIT_CONTROL_HEADER_1X; + +/*! + * CPU TDP Limit Control Table Common Entry, unpacked + */ +typedef struct +{ + NvU32 param0; +} CPU_TDP_LIMIT_CONTROL_BODY_1X; + +/*! + * CPU TDP Limit Control Header, packed + */ +typedef struct +{ + NvU8 version; + NvU8 headerSize; + NvU8 bodySize; +} CPU_TDP_LIMIT_CONTROL_HEADER_1X_PACKED; + +/*! + * CPU TDP Limit Control Table Output, packed + */ +typedef struct +{ + NvU32 param0; +} CPU_TDP_LIMIT_CONTROL_BODY_1X_PACKED; +#endif // NVPCF_H + diff --git a/src/nvidia/inc/kernel/platform/sli/sli.h b/src/nvidia/inc/kernel/platform/sli/sli.h new file mode 100644 index 0000000..01c2338 --- /dev/null +++ b/src/nvidia/inc/kernel/platform/sli/sli.h @@ -0,0 +1,66 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2000-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef RMSLI_H +#define RMSLI_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Private SLI related defines and structures. * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "nvlimits.h" +#include "nvmisc.h" + +#define IsDeviceDestroyed(p) (gpuGetDeviceInstance(p) == NV_MAX_DEVICES) + +// Unlinked SLI is implemented in RM clients +#define IsUnlinkedSLIEnabled(p) ((p)->getProperty((p), PDB_PROP_GPU_RM_UNLINKED_SLI)) + +void RmInitScalability(OBJGPU *pGpu); + +#define IsSLIEnabled(p) 0 +#define NumSubDevices(p) 0 + +#define SLI_LOOP_START(sliLoopFlags) { NvU32 loopIndex = 0; do { +#define SLI_LOOP_END } while (loopIndex); } +#define SLI_LOOP_BREAK break +#define SLI_LOOP_CONTINUE continue +#define SLI_LOOP_GOTO(loc) { goto loc; } +#define SLI_LOOP_RETURN(SLi_ret) { return(SLi_ret); } +#define SLI_LOOP_RETURN_VOID { return; } + +// macro to use when declaring array vars that'll be used w/i SLI_LOOPs +#define SLI_LOOP_ARRAY_SIZE (NV_MAX_SUBDEVICES+1) + +// macro to veirfy that arrays are properly sized +#define VERIFY_SLI_LOOP_ARRAY_SIZE(arr) \ +do { \ + if (sizeof(arr) > sizeof(void *)) \ + { \ + NV_ASSERT(SLI_LOOP_ARRAY_SIZE == NV_ARRAY_ELEMENTS(arr)); \ + } \ +} while (0) + +#endif // RMSLI_H diff --git a/src/nvidia/inc/kernel/rmapi/alloc_size.h b/src/nvidia/inc/kernel/rmapi/alloc_size.h new file mode 100644 index 0000000..55734f2 --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/alloc_size.h @@ -0,0 +1,38 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _ALLOC_SIZE_H_ +#define _ALLOC_SIZE_H_ + +#include "nvstatus.h" + +/* + * rmapiGetClassAllocParamSize() + * + * Returns class size in number of bytes. Returns zero + * if the specified class has no optional allocation parameters + * + */ +NV_STATUS rmapiGetClassAllocParamSize(NvU32 *pAllocParamSizeBytes, NvP64 pUserParams, NvBool *pBAllowNull, NvU32 hClass); + +#endif // _ALLOC_SIZE_H_ + diff --git a/src/nvidia/inc/kernel/rmapi/binary_api.h b/src/nvidia/inc/kernel/rmapi/binary_api.h new file mode 100644 index 0000000..cfae46e --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/binary_api.h @@ -0,0 +1,3 @@ + +#include "g_binary_api_nvoc.h" + diff --git a/src/nvidia/inc/kernel/rmapi/client.h b/src/nvidia/inc/kernel/rmapi/client.h new file mode 100644 index 0000000..bf1a434 --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/client.h @@ -0,0 +1,3 @@ + +#include "g_client_nvoc.h" + diff --git a/src/nvidia/inc/kernel/rmapi/client_resource.h b/src/nvidia/inc/kernel/rmapi/client_resource.h new file mode 100644 index 0000000..405ee3a --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/client_resource.h @@ -0,0 +1,3 @@ + +#include "g_client_resource_nvoc.h" + diff --git a/src/nvidia/inc/kernel/rmapi/control.h b/src/nvidia/inc/kernel/rmapi/control.h new file mode 100644 index 0000000..d522d20 --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/control.h @@ -0,0 +1,369 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _CONTROL_H_ +#define _CONTROL_H_ + +#include "core/core.h" + +#include "nvsecurityinfo.h" +#include "resserv/rs_resource.h" +#include "resserv/resserv.h" + +#include "utils/nvmacro.h" +#include "rmapi/param_copy.h" + +struct NVOC_EXPORTED_METHOD_DEF; +typedef RS_RES_CONTROL_PARAMS_INTERNAL RmCtrlParams; + +// RMCTRL_API_COPPY_FLAGS is used to specify control api copy behavior. +#define RMCTRL_API_COPY_FLAGS_NONE 0x00000000 + +// skip memory copy in api copy in and zero the buffer +#define RMCTRL_API_COPY_FLAGS_SKIP_COPYIN_ZERO_BUFFER NVBIT(0) + +// set control cache on api copy out +#define RMCTRL_API_COPY_FLAGS_SET_CONTROL_CACHE NVBIT(1) + +// skip copy out even for controls with RMCTRL_FLAGS_COPYOUT_ON_ERROR +#define RMCTRL_API_COPY_FLAGS_FORCE_SKIP_COPYOUT_ON_ERROR NVBIT(2) + +// +// RmCtrlExecuteCookie +// +// This typedef describes the data used by the rmctrl cmd execution +// path. The data is filled at the beginning of rmControlCmdExecute() +// and used as necessary in the other stages. +// +struct RS_CONTROL_COOKIE +{ + // Rmctrl Command ID + NvU32 cmd; + + // Rmctrl Flags + NvU32 ctrlFlags; + + // API Copy Flags + NvU32 apiCopyFlags; + + // Required Access Rights for this command + const RS_ACCESS_MASK rightsRequired; + + NvBool bFreeParamCopy; ///< Indicates that param copies should be cleaned up + NvBool bFreeEmbeddedCopy; ///< Indicates embedded param copies should be cleaned up + + RMAPI_PARAM_COPY paramCopy; + RMAPI_PARAM_COPY embeddedParamCopies[4]; // Up to 4 embedded pointers per one RmControl identified +}; +typedef RS_CONTROL_COOKIE RmCtrlExecuteCookie; + +// values for RmCtrlDeferredCmd.pending +#define RMCTRL_DEFERRED_FREE 0 // buffer is free +#define RMCTRL_DEFERRED_ACQUIRED 1 // buffer is acquired to fill in data +#define RMCTRL_DEFERRED_READY 2 // buffer is acquired and data has been copied. + +#define RMCTRL_DEFERRED_MAX_PARAM_SIZE 128 // 128 bytes internal buffer for rmctrl param + +typedef struct +{ + NvS32 volatile pending; + NvU32 cpuInst; + RmCtrlParams rmCtrlDeferredParams; + NvU8 paramBuffer[RMCTRL_DEFERRED_MAX_PARAM_SIZE]; // buffer to hold rmCtrlDeferredParams.pParams +} RmCtrlDeferredCmd; + +// catch commands misdirected to non-existent engines +#define VERIFY_OBJ_PTR(p) if (p == NULL) return NV_ERR_INVALID_ARGUMENT + +// macros to get/set/clear cap bits +#define RMCTRL_GET_CAP(tbl,cap,field) (((NvU8)tbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) & (0?cap##field)) +#define RMCTRL_SET_CAP(tbl,cap,field) ((tbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) |= (0?cap##field)) +#define RMCTRL_CLEAR_CAP(tbl,cap,field) ((tbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) &= ~(0?cap##field)) + +// macros to AND/OR caps between two tables +#define RMCTRL_AND_CAP(finaltbl,tmptbl,tmp,cap,field) \ + tmp = ((finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] & tmptbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) & (0?cap##field)); \ + finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] &= ~(0?cap##field); \ + finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] |= tmp; + +#define RMCTRL_OR_CAP(finaltbl,tmptbl,tmp,cap,field) \ + tmp = ((finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] | tmptbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) & (0?cap##field)); \ + finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] &= ~(0?cap##field); \ + finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] |= tmp; + +// Whether the command ID is a NULL command? +// We allow NVXXXX_CTRL_CMD_NULL (0x00000000) as well as the +// per-class NULL cmd ( _CATEGORY==0x00 and _INDEX==0x00 ) +#define RMCTRL_IS_NULL_CMD(cmd) ((cmd == NVXXXX_CTRL_CMD_NULL) || \ + (FLD_TEST_DRF_NUM(XXXX, _CTRL_CMD, _CATEGORY, 0x00, cmd) && \ + FLD_TEST_DRF_NUM(XXXX, _CTRL_CMD, _INDEX, 0x00, cmd))) + +// top-level internal RM Control interface +NV_STATUS rmControl_Deferred(RmCtrlDeferredCmd *pRmCtrlDeferredCmd); + +// +// Validate whether client has privilege to execute specified cmd. +// Doesn't handle vGPU CPU plugin case. +// +NV_STATUS rmControlValidateClientPrivilegeAccess(NvHandle hClient, NvHandle hObject, NvU32 cmd, NvU32 ctrlFlags, API_SECURITY_INFO *pSecInfo); + +// Helper functions for handling embedded parameter copies +NV_STATUS embeddedParamCopyIn(RMAPI_PARAM_COPY *pParamCopy, RmCtrlParams *pRmCtrlParams); +NV_STATUS embeddedParamCopyOut(RMAPI_PARAM_COPY *pParamCopy, RmCtrlParams *pRmCtrlParams); + +#define RM_CLIENT_PTR_ACCESS_CHECK_READ NVBIT(0) +#define RM_CLIENT_PTR_ACCESS_CHECK_WRITE NVBIT(1) + +// +// For NVOC Exported functions +// +// RMCTRL_FLAGS(A, B, C) is expanded to +// 0 | RMCTRL_FLAGS_A | RMCTRL_FLAGS_B | RMCTRL_FLAGS_C +// +// ACCESS_RIGHTS(A, B, C) is expanded to +// 0 | NVBIT(RS_ACCESS_A) | NVBIT(RS_ACCESS_B) | NVBIT(RS_ACCESS_C) +// +#define RMCTRL_EXPORT(cmdId, ...) [[nvoc::export(cmdId, __VA_ARGS__)]] +#define _RMCTRL_PREP_FLAG_ARG(x) | NV_CONCATENATE(RMCTRL_FLAGS_, x) +#define RMCTRL_FLAGS(...) (0 NV_FOREACH_ARG_NOCOMMA(_RMCTRL_PREP_FLAG_ARG, __VA_ARGS__)) +#define _RMCTRL_PREP_ACCESS_ARG(x) | NVBIT(NV_CONCATENATE(RS_ACCESS_, x)) +#define ACCESS_RIGHTS(...) (0 NV_FOREACH_ARG_NOCOMMA(_RMCTRL_PREP_ACCESS_ARG, __VA_ARGS__)) + +// This define is currently unused. +// In the future it will be used by NVOC to validate control flags. +// 1. PHYSICAL_IMPLEMENTED_ON_VGPU_GUEST should be set only if ROUTE_TO_PHYSICAL is set +// 2. PHYSICAL_IMPLEMENTED_ON_VGPU_GUEST and ROUTE_TO_VGPU_HOST shouldn't be set at the same time +#define NVOC_EXPORTED_METHOD_FLAGS_VALID(ctrlFlags) \ + ((ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_PHYSICAL) || !(ctrlFlags & RMCTRL_FLAGS_PHYSICAL_IMPLEMENTED_ON_VGPU_GUEST)) && \ + (!(ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_VGPU_HOST) || !(ctrlFlags & RMCTRL_FLAGS_PHYSICAL_IMPLEMENTED_ON_VGPU_GUEST)) + +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(ctrlFlags) \ + ((ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_PHYSICAL) && \ + (!(ctrlFlags & RMCTRL_FLAGS_PHYSICAL_IMPLEMENTED_ON_VGPU_GUEST))) + +// +// 'FLAGS' Attribute +// ----------------- +// +// RMCTRL_FLAGS is used to specify per-command state. +// + +#define RMCTRL_FLAGS_NONE 0x000000000 + +// +// If the KERNEL_PRIVILEGED flag is specified, the call will only be allowed +// for kernel mode callers (such as other kernel drivers) using a privileged +// kernel RM client (CliCheckIsKernelClient() returning true). Otherwise, +// NV_ERR_INSUFFICIENT_PERMISSIONS is returned. +// +#define RMCTRL_FLAGS_KERNEL_PRIVILEGED 0x000000000 + +// +// The resman rmcontrol handler will not grab the "gpus lock" +// before executing the implementing function. +// +// Please be sure you know what you're doing before using this! +// +#define RMCTRL_FLAGS_NO_GPUS_LOCK 0x000000001 + +// +// Indicate to resman that this rmcontrol does not access any gpu +// resources and can therefore run even when the gpu is powered down. +// +// Please be sure you know what you're doing before using this! +// +#define RMCTRL_FLAGS_NO_GPUS_ACCESS 0x000000002 + +// +// If the PRIVILEGED flag is specified, the call will only be allowed for +// a) user contexts with admin privleges (osIsAdministrator() returning true), or +// b) kernel mode callers, such as other kernel drivers. +// Otherwise, NV_ERR_INSUFFICIENT_PERMISSIONS is returned. +// +#define RMCTRL_FLAGS_PRIVILEGED 0x000000004 + +// +// If the NON_PRIVILEGED flag is specified, the call will be allowed from any +// client. +// +#define RMCTRL_FLAGS_NON_PRIVILEGED 0x000000008 + +// +// The resman rmcontrol handler will grab the per-device lock instead +// of the "gpus lock" before executing the implementing function. +// +// Please be sure you know what you're doing before using this! +// +#define RMCTRL_FLAGS_GPU_LOCK_DEVICE_ONLY 0x000000010 + +// +// This flag is equivalent to PRIVILEGED when the RM access rights +// implementation is disabled. Otherwise, it has no effect. +// +// The purpose of this flag is to aid in the transition to the access rights +// system, so that access rights can be used for control calls that were +// previously PRIVILEGED. Once access rights are enabled, this flag will no +// longer be necessary. +// +#define RMCTRL_FLAGS_PRIVILEGED_IF_RS_ACCESS_DISABLED 0x000000020 // for Resserv Access Rights migration + +// +// This flag specifies that the control shall be directly forwarded to the +// physical object if called on the CPU-RM kernel. +// +#define RMCTRL_FLAGS_ROUTE_TO_PHYSICAL 0x000000040 + +// +// If the INTERNAL flag is specified, the call will only be allowed +// to be issued from RM itself. Otherwise, NV_ERR_NOT_SUPPORTED is returned. +// +#define RMCTRL_FLAGS_INTERNAL 0x000000080 + +// +// If the API_LOCK_READONLY flag is specified, the call will acquire the +// read-only API lock and may run concurrently with other operations that have +// also taken the read-only API lock. This flag is ignored if read-only API +// locking is disabled in RM. +// +#define RMCTRL_FLAGS_API_LOCK_READONLY 0x000000100 + +// +// This flag specifies that the control shall be directly forwarded to the +// the VGPU host if called from a guest (where IS_VIRTUAL() is true) +// +#define RMCTRL_FLAGS_ROUTE_TO_VGPU_HOST 0x000000200 + +// +// This flag specifies that the control output does not depend on the input +// parameters and can be cached on the receiving end. +// The cache is transparent and may not exist on all platforms. +// +#define RMCTRL_FLAGS_CACHEABLE 0x000000400 + +// +// This flag specifies that the control parameters will be +// copied out back to the caller even if the control call fails. +// +#define RMCTRL_FLAGS_COPYOUT_ON_ERROR 0x000000800 + +// ?? +#define RMCTRL_FLAGS_ALLOW_WITHOUT_SYSMEM_ACCESS 0x000001000 + +// +// This flag specifies that the control can be run by an admin privileged +// client running in a full SRIOV, vGPU-GSP-DISABLED hypervisor environment. +// Overrides regular privilege level flags. +// +#define RMCTRL_FLAGS_CPU_PLUGIN_FOR_SRIOV 0x000004000 + +// +// This flag specifies that the control can be run by an admin privileged +// client running in a non-SRIOV or SRIOV-Heavy hypervisor environment. +// Overrides regular privilege level flags. +// +#define RMCTRL_FLAGS_CPU_PLUGIN_FOR_LEGACY 0x000008000 + +// +// This flag specifies that the control can be run by an unprivileged +// client running in GSP-RM when SRIOV and vGPU-GSP are ENABLED. +// Overrides regular privilege level flags. +// +#define RMCTRL_FLAGS_GSP_PLUGIN_FOR_VGPU_GSP 0x000010000 + +// +// This flag specifies that the control output depends on the input +// parameters but can be cached on receiving end. Since the control +// result depends on the input and the input varifies with controls, +// the cache should be handled in a per-control bases. +// +#define RMCTRL_FLAGS_CACHEABLE_BY_INPUT 0x000020000 + + +// +// This flag specifies that ROUTE_TO_PHYSICAL control is implemented on vGPU Guest RM. +// If a ROUTE_TO_PHYSICAL control is supported within vGPU Guest RM, +// it should either have this flag set (indicating the implementation in the vGPU Guest RM) or +// the ROUTE_TO_VGPU_HOST flag set (indicating the implementation in vGPU Host RM). +// Without either of these flags set, the control will return NV_ERR_NOT_SUPPORTED. +// +#define RMCTRL_FLAGS_PHYSICAL_IMPLEMENTED_ON_VGPU_GUEST 0x000040000 + +// The combination of cacheable flags +#define RMCTRL_FLAGS_CACHEABLE_ANY (RMCTRL_FLAGS_CACHEABLE | RMCTRL_FLAGS_CACHEABLE_BY_INPUT) + +// +// This flag specifies that two client handles need to be locked. +// An entry is required for any control calls that set this in +// serverControlLookupSecondClient or Resource Server will NV_ASSERT(0). +// +#define RMCTRL_FLAGS_DUAL_CLIENT_LOCK 0x000080000 + +// +// This flag specifies that the control call is for RM test only code. +// +#define RMCTRL_FLAGS_RM_TEST_ONLY_CODE 0x000100000 + +// +// This flag specifies that all client handles in RM need to be locked. +// This flag should almost never be used, the only cases where it is required +// are cases where an RM API loops accessed several/arbitrary clients in RM using +// something like serverutilGetFirstClientUnderLock. The RW API lock is required +// to use this flag +// +#define RMCTRL_FLAGS_ALL_CLIENT_LOCK 0x000200000 + +// +// This flag specifies that the API lock should not be acquired for this +// RM Control. DO NOT use this flag without consulting Locking/Resource Server +// experts first and please consider other alternatives as much as possible +// before resorting to using this flag! +// +#define RMCTRL_FLAGS_NO_API_LOCK 0x000400000 + +// +// This flag specifies that the control call persists in RMCTRL cache across +// OBJGPU StateLoad/Unload. This flag may be set in addition to RMCTRL_FLAGS_CACHEABLE +// or RMCTRL_FLAGS_CACHEABLE_BY_INPUT. +// +#define RMCTRL_FLAGS_PERSISTENT_CACHEABLE 0x000800000 + +// +// 'ACCESS_RIGHTS' Attribute +// ------------------------ +// +// Used to specify a set of access rights that the client must hold on the +// target resource to execute this control call. Note that this can only check +// access rights on the target object; for other objects, such as those +// specified by handles in parameter structs, checks must be done manually. +// +// The definition of each access right and its meaning is provided in the +// README located at drivers/common/shared/accessrights/README. The prefix +// "RS_ACCESS" is appended to each entry in the control call definition; +// for example, :NICE -> RS_ACCESS_NICE. +// +// This attribute only has an effect when the RM access rights implementation +// is enabled; see g_bRsAccessEnabled. +// + +#endif // _CONTROL_H_ + + diff --git a/src/nvidia/inc/kernel/rmapi/event.h b/src/nvidia/inc/kernel/rmapi/event.h new file mode 100644 index 0000000..3282443 --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/event.h @@ -0,0 +1,3 @@ + +#include "g_event_nvoc.h" + diff --git a/src/nvidia/inc/kernel/rmapi/event_buffer.h b/src/nvidia/inc/kernel/rmapi/event_buffer.h new file mode 100644 index 0000000..2867ac3 --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/event_buffer.h @@ -0,0 +1,3 @@ + +#include "g_event_buffer_nvoc.h" + diff --git a/src/nvidia/inc/kernel/rmapi/exports.h b/src/nvidia/inc/kernel/rmapi/exports.h new file mode 100644 index 0000000..2c7f066 --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/exports.h @@ -0,0 +1,127 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _EXPORTS_H +#define _EXPORTS_H + +#include "core/core.h" + +// +// !! Deprecated. Do not use these exported API functions. Instead use the +// User or Kernel ones below depending on if they are called from Kernel or +// User space. +// +// A User export is to be used for code paths originating from user space and +// MUST pass only user client handles and user-mode pointers. On most OSes, RM +// will sanity check the use of handles and pointers against incorrect or +// malicious use. +// +// A Kernel export is to be used for code paths originating from kernel space +// and MUST pass only kernel client handles and kernel-mode pointers. By default +// RM will skip any validation checks when a Kernel export is called. The onus +// is on the caller that only valid handles and pointers are passed. +// TBD. RM may enable the checks on debug builds or when a regkey is set. +// +// For more information refer to the Kernel_Client_Data_Validation wiki page +// +// WARNING!! RM has validation checks for handles and pointers. An incorrect use +// of export can cause RM failing the API. +// +void Nv01AllocMemory (NVOS02_PARAMETERS*); +void Nv01AllocObject (NVOS05_PARAMETERS*); +void Nv04Alloc (NVOS21_PARAMETERS*); +void Nv04AllocWithAccess (NVOS64_PARAMETERS*); +void Nv01Free (NVOS00_PARAMETERS*); +void Nv04Control (NVOS54_PARAMETERS*); +void Nv04VidHeapControl (NVOS32_PARAMETERS*); +void Nv04IdleChannels (NVOS30_PARAMETERS*); +void Nv04MapMemory (NVOS33_PARAMETERS*); +void Nv04UnmapMemory (NVOS34_PARAMETERS*); +void Nv04I2CAccess (NVOS_I2C_ACCESS_PARAMS*); +void Nv04AllocContextDma (NVOS39_PARAMETERS*); +void Nv04BindContextDma (NVOS49_PARAMETERS*); +void Nv04MapMemoryDma (NVOS46_PARAMETERS*); +void Nv04UnmapMemoryDma (NVOS47_PARAMETERS*); +void Nv04DupObject (NVOS55_PARAMETERS*); +void Nv04Share (NVOS57_PARAMETERS*); +void Nv04AddVblankCallback (NVOS61_PARAMETERS*); + +// exported "User" API functions +void Nv01AllocMemoryUser (NVOS02_PARAMETERS*); +void Nv01AllocObjectUser (NVOS05_PARAMETERS*); +void Nv04AllocUser (NVOS21_PARAMETERS*); +void Nv04AllocWithAccessUser (NVOS64_PARAMETERS*); +void Nv01FreeUser (NVOS00_PARAMETERS*); +void Nv04ControlUser (NVOS54_PARAMETERS*); +void Nv04VidHeapControlUser (NVOS32_PARAMETERS*); +void Nv04IdleChannelsUser (NVOS30_PARAMETERS*); +void Nv04MapMemoryUser (NVOS33_PARAMETERS*); +void Nv04UnmapMemoryUser (NVOS34_PARAMETERS*); +void Nv04I2CAccessUser (NVOS_I2C_ACCESS_PARAMS*); +void Nv04AllocContextDmaUser (NVOS39_PARAMETERS*); +void Nv04BindContextDmaUser (NVOS49_PARAMETERS*); +void Nv04MapMemoryDmaUser (NVOS46_PARAMETERS*); +void Nv04UnmapMemoryDmaUser (NVOS47_PARAMETERS*); +void Nv04DupObjectUser (NVOS55_PARAMETERS*); +void Nv04ShareUser (NVOS57_PARAMETERS*); +void Nv04AddVblankCallbackUser (NVOS61_PARAMETERS*); + +// exported "Kernel" API functions +void Nv01AllocMemoryKernel (NVOS02_PARAMETERS*); +void Nv01AllocObjectKernel (NVOS05_PARAMETERS*); +void Nv04AllocKernel (NVOS21_PARAMETERS*); +void Nv04AllocWithAccessKernel (NVOS64_PARAMETERS*); +void Nv01FreeKernel (NVOS00_PARAMETERS*); +void Nv04ControlKernel (NVOS54_PARAMETERS*); +void Nv04VidHeapControlKernel (NVOS32_PARAMETERS*); +void Nv04IdleChannelsKernel (NVOS30_PARAMETERS*); +void Nv04MapMemoryKernel (NVOS33_PARAMETERS*); +void Nv04UnmapMemoryKernel (NVOS34_PARAMETERS*); +void Nv04I2CAccessKernel (NVOS_I2C_ACCESS_PARAMS*); +void Nv04AllocContextDmaKernel (NVOS39_PARAMETERS*); +void Nv04BindContextDmaKernel (NVOS49_PARAMETERS*); +void Nv04MapMemoryDmaKernel (NVOS46_PARAMETERS*); +void Nv04UnmapMemoryDmaKernel (NVOS47_PARAMETERS*); +void Nv04DupObjectKernel (NVOS55_PARAMETERS*); +void Nv04ShareKernel (NVOS57_PARAMETERS*); +void Nv04AddVblankCallbackKernel (NVOS61_PARAMETERS*); + +// exported "WithSecInfo" API functions +void Nv01AllocMemoryWithSecInfo (NVOS02_PARAMETERS*, API_SECURITY_INFO); +void Nv01AllocObjectWithSecInfo (NVOS05_PARAMETERS*, API_SECURITY_INFO); +void Nv04AllocWithSecInfo (NVOS21_PARAMETERS*, API_SECURITY_INFO); +void Nv04AllocWithAccessSecInfo (NVOS64_PARAMETERS*, API_SECURITY_INFO); +void Nv01FreeWithSecInfo (NVOS00_PARAMETERS*, API_SECURITY_INFO); +void Nv04ControlWithSecInfo (NVOS54_PARAMETERS*, API_SECURITY_INFO); +void Nv04VidHeapControlWithSecInfo (NVOS32_PARAMETERS*, API_SECURITY_INFO); +void Nv04IdleChannelsWithSecInfo (NVOS30_PARAMETERS*, API_SECURITY_INFO); +void Nv04MapMemoryWithSecInfo (NVOS33_PARAMETERS*, API_SECURITY_INFO); +void Nv04UnmapMemoryWithSecInfo (NVOS34_PARAMETERS*, API_SECURITY_INFO); +void Nv04I2CAccessWithSecInfo (NVOS_I2C_ACCESS_PARAMS*, API_SECURITY_INFO); +void Nv04AllocContextDmaWithSecInfo (NVOS39_PARAMETERS*, API_SECURITY_INFO); +void Nv04BindContextDmaWithSecInfo (NVOS49_PARAMETERS*, API_SECURITY_INFO); +void Nv04MapMemoryDmaWithSecInfo (NVOS46_PARAMETERS*, API_SECURITY_INFO); +void Nv04UnmapMemoryDmaWithSecInfo (NVOS47_PARAMETERS*, API_SECURITY_INFO); +void Nv04DupObjectWithSecInfo (NVOS55_PARAMETERS*, API_SECURITY_INFO); +void Nv04ShareWithSecInfo (NVOS57_PARAMETERS*, API_SECURITY_INFO); + +#endif // _EXPORTS_H diff --git a/src/nvidia/inc/kernel/rmapi/lock_stress.h b/src/nvidia/inc/kernel/rmapi/lock_stress.h new file mode 100644 index 0000000..c2bf0ce --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/lock_stress.h @@ -0,0 +1,108 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_lock_stress_nvoc.h" + +#ifndef LOCK_STRESS_H +#define LOCK_STRESS_H + +#include "gpu/gpu_resource.h" +#include "nvoc/prelude.h" +#include "nvstatus.h" +#include "resserv/resserv.h" + +#include "ctrl/ctrl0100.h" + +NVOC_PREFIX(lockStressObj) class LockStressObject : GpuResource +{ +public: + NV_STATUS lockStressObjConstruct(LockStressObject *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams) : + GpuResource(pCallContext, pParams); + + void lockStressObjDestruct(LockStressObject *pResource); + + // + // RMCTRL Exported methods -- Category: LOCK_STRESS + // + RMCTRL_EXPORT(NV0100_CTRL_CMD_RESET_LOCK_STRESS_STATE, + RMCTRL_FLAGS(NON_PRIVILEGED)) + NV_STATUS lockStressObjCtrlCmdResetLockStressState(LockStressObject *pResource); + + RMCTRL_EXPORT(NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_ALL_RM_LOCKS, + RMCTRL_FLAGS(NON_PRIVILEGED)) + NV_STATUS lockStressObjCtrlCmdPerformLockStressAllRmLocks(LockStressObject *pResource, + NV0100_CTRL_PERFORM_LOCK_STRESS_ALL_RM_LOCKS_PARAMS *pParams); + + RMCTRL_EXPORT(NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_NO_GPUS_LOCK, + RMCTRL_FLAGS(NON_PRIVILEGED, NO_GPUS_LOCK)) + NV_STATUS lockStressObjCtrlCmdPerformLockStressNoGpusLock(LockStressObject *pResource, + NV0100_CTRL_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_PARAMS *pParams); + + RMCTRL_EXPORT(NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_API_LOCK_READ_MODE, + RMCTRL_FLAGS(NON_PRIVILEGED, API_LOCK_READONLY)) + NV_STATUS lockStressObjCtrlCmdPerformLockStressApiLockReadMode(LockStressObject *pResource, + NV0100_CTRL_PERFORM_LOCK_STRESS_API_LOCK_READ_MODE_PARAMS *pParams); + + RMCTRL_EXPORT(NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_API_LOCK_READ_MODE, + RMCTRL_FLAGS(NON_PRIVILEGED, NO_GPUS_LOCK, API_LOCK_READONLY)) + NV_STATUS lockStressObjCtrlCmdPerformLockStressNoGpusLockApiLockReadMode(LockStressObject *pResource, + NV0100_CTRL_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_API_LOCK_READ_MODE_PARAMS *pParams); + + RMCTRL_EXPORT(NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_INTERNAL_ALL_RM_LOCKS, + RMCTRL_FLAGS(NON_PRIVILEGED)) + NV_STATUS lockStressObjCtrlCmdPerformLockStressInternalAllRmLocks(LockStressObject *pResource, + NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_ALL_RM_LOCKS_PARAMS *pParams); + + RMCTRL_EXPORT(NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK, + RMCTRL_FLAGS(NON_PRIVILEGED, NO_GPUS_LOCK)) + NV_STATUS lockStressObjCtrlCmdPerformLockStressInternalNoGpusLock(LockStressObject *pResource, + NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_PARAMS *pParams); + + RMCTRL_EXPORT(NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_INTERNAL_API_LOCK_READ_MODE, + RMCTRL_FLAGS(NON_PRIVILEGED, API_LOCK_READONLY)) + NV_STATUS lockStressObjCtrlCmdPerformLockStressInternalApiLockReadMode(LockStressObject *pResource, + NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_API_LOCK_READ_MODE_PARAMS *pParams); + + RMCTRL_EXPORT(NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_API_LOCK_READ_MODE, + RMCTRL_FLAGS(NON_PRIVILEGED, NO_GPUS_LOCK, API_LOCK_READONLY)) + NV_STATUS lockStressObjCtrlCmdPerformLockStressInternalNoGpusLockApiLockReadMode(LockStressObject *pResource, + NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_API_LOCK_READ_MODE_PARAMS *pParams); + + RMCTRL_EXPORT(NV0100_CTRL_CMD_GET_LOCK_STRESS_COUNTERS, + RMCTRL_FLAGS(NON_PRIVILEGED)) + NV_STATUS lockStressObjCtrlCmdGetLockStressCounters(LockStressObject *pResource, + NV0100_CTRL_GET_LOCK_STRESS_COUNTERS_PARAMS *pParams); + +private: + + // Internal RM objects for internal RM API invocation + NvHandle hInternalClient; + NvHandle hInternalDevice; + NvHandle hInternalSubdevice; + NvHandle hInternalLockStressObject; +}; + +#endif // LOCK_STRESS_H diff --git a/src/nvidia/inc/kernel/rmapi/lock_test.h b/src/nvidia/inc/kernel/rmapi/lock_test.h new file mode 100644 index 0000000..ed6ab86 --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/lock_test.h @@ -0,0 +1,48 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_lock_test_nvoc.h" + +#ifndef LOCK_TEST_H +#define LOCK_TEST_H + +#include "gpu/gpu_resource.h" +#include "nvoc/prelude.h" +#include "nvstatus.h" +#include "resserv/resserv.h" + +NVOC_PREFIX(lockTestRelaxedDupObj) class LockTestRelaxedDupObject : GpuResource +{ +public: + NV_STATUS lockTestRelaxedDupObjConstruct(LockTestRelaxedDupObject *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams) : + GpuResource(pCallContext, pParams); + + void lockTestRelaxedDupObjDestruct(LockTestRelaxedDupObject *pResource); + virtual NvBool lockTestRelaxedDupObjCanCopy(LockTestRelaxedDupObject *pResource) {return NV_TRUE;}; + +}; + +#endif // LOCK_TEST_H diff --git a/src/nvidia/inc/kernel/rmapi/mapping_list.h b/src/nvidia/inc/kernel/rmapi/mapping_list.h new file mode 100644 index 0000000..cd53eca --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/mapping_list.h @@ -0,0 +1,167 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _MAPPING_LIST_H_ +#define _MAPPING_LIST_H_ + +#include +#include "containers/btree.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "os/os.h" +#include "rmapi/resource.h" +#include "mmu/gmmu_fmt.h" // GMMU_APERTURE + +typedef struct VirtualMemory VirtualMemory; +typedef struct Memory Memory; + +// **************************************************************************** +// Type definitions +// **************************************************************************** + +// dma information definitions +typedef struct _def_client_dma_mapping_info CLI_DMA_MAPPING_INFO, *PCLI_DMA_MAPPING_INFO; +typedef struct _def_client_dma_mapping_info_iterator CLI_DMA_MAPPING_INFO_ITERATOR, *PCLI_DMA_MAPPING_INFO_ITERATOR; + +// mapping information definitions +typedef struct _def_client_dma_alloc_map_info CLI_DMA_ALLOC_MAP_INFO; + +// +// DMA memory mapping XXX keep around since needed by mapping.c +// We need to figure out what to do with this +// RS-TODO gradually remove this with inter-mapping cleanup +// +struct _def_client_dma_mapping_info +{ + NvU64 DmaOffset; + void* KernelVAddr[NV_MAX_SUBDEVICES]; // Kernel's virtual address, if required + void* KernelPriv; // Token required to unmap the kernel mapping + NvU64 FbAperture[NV_MAX_SUBDEVICES]; // GPU aperture addresses, if required + NvU64 FbApertureLen[NV_MAX_SUBDEVICES]; // GPU aperture mapped lengths + MEMORY_DESCRIPTOR *pMemDesc; // Subregion to be mapped + NvU32 Flags; + NvU32 Flags2; + NvBool bP2P; + NvU32 gpuMask; + NvU64 mapPageSize; // Page size at which the memory is mapped. + GMMU_APERTURE aperture; + NvBool bNeedL2InvalidateAtUnmap; + ADDRESS_TRANSLATION addressTranslation; + MEMORY_DESCRIPTOR *pBar1P2PVirtMemDesc; // The peer GPU mapped BAR1 region + MEMORY_DESCRIPTOR *pBar1P2PPhysMemDesc; // The peer GPU vidmem sub region + CLI_DMA_MAPPING_INFO *pNext; +}; + +// +// iterator object to enum CLI_DMA_MAPPING_INFO from 'pDmaMappingList' +// +struct _def_client_dma_mapping_info_iterator +{ + PNODE pDmaMappingList; // list of pDmaMappings + PNODE pNextDmaMapping; // next pDmaMapping while iterating over the DmaOffsets +}; + +// +// DMA allocMapping +// +struct _def_client_dma_alloc_map_info +{ + CLI_DMA_MAPPING_INFO *pDmaMappingInfo; + struct VirtualMemory *pVirtualMemory; + struct Memory *pMemory; +}; + +// **************************************************************************** +// Function definitions +// **************************************************************************** + +// Client Memory Mappings +// +// CliUpdateMemoryMappingInfo - Fill in RsCpuMapping fields for system memory mappings +// +static inline NV_STATUS +CliUpdateMemoryMappingInfo +( + RsCpuMapping *pCpuMapping, + NvBool bKernel, + NvP64 cpuAddress, + NvP64 priv, + NvU64 cpuMapLength, + NvU32 flags +) +{ + if (pCpuMapping == NULL) + return NV_ERR_INVALID_ARGUMENT; + + pCpuMapping->pPrivate->bKernel = bKernel; + pCpuMapping->length = cpuMapLength; + pCpuMapping->flags = flags; + pCpuMapping->processId = osGetCurrentProcess(); + pCpuMapping->pLinearAddress = cpuAddress; + pCpuMapping->pPrivate->pPriv = priv; + portMemSet(&pCpuMapping->pPrivate->memArea, 0, sizeof(MemoryArea)); + + return NV_OK; +} + +// **************************************************************************** +// Device Memory Mappings +// **************************************************************************** + +// +// CliUpdateDeviceMemoryMapping - Fill in RsCpuMapping fields for device memory mappings +// +static inline NV_STATUS +CliUpdateDeviceMemoryMapping +( + RsCpuMapping *pCpuMapping, + NvBool bKernel, + NvP64 priv, + NvP64 cpuAddress, + NvU64 cpuMapLength, + NvU64 gpuAddress, + NvU64 gpuMapLength, + NvU32 flags +) +{ + if (pCpuMapping == NULL) + return NV_ERR_INVALID_ARGUMENT; + + pCpuMapping->pPrivate->bKernel = bKernel; + pCpuMapping->length = cpuMapLength; + pCpuMapping->flags = flags; + pCpuMapping->processId = osGetCurrentProcess(); + pCpuMapping->pLinearAddress = cpuAddress; + pCpuMapping->pPrivate->pPriv = priv; + if (gpuMapLength == 0 || gpuAddress == 0) + { + portMemSet(&pCpuMapping->pPrivate->memArea, 0, sizeof(MemoryArea)); + return NV_OK; + } + pCpuMapping->pPrivate->memArea.numRanges = 1; + pCpuMapping->pPrivate->memArea.pRanges = &pCpuMapping->pPrivate->backingRangeStore; + pCpuMapping->pPrivate->backingRangeStore.start = gpuAddress; + pCpuMapping->pPrivate->backingRangeStore.size = gpuMapLength; + + return NV_OK; +} + +#endif diff --git a/src/nvidia/inc/kernel/rmapi/param_copy.h b/src/nvidia/inc/kernel/rmapi/param_copy.h new file mode 100644 index 0000000..27115aa --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/param_copy.h @@ -0,0 +1,99 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _PARAM_COPY_H_ +#define _PARAM_COPY_H_ + +// +// RMAPI_PARAM_COPY - a mechanism for getting user params in and out of resman. +// +// The struct RMAPI_PARAM_COPY keeps track of current API params for eventual +// copyout and free as needed. +// + +#include + +struct API_STATE +{ + NvP64 pUserParams; // ptr to params in client's addr space + void **ppKernelParams; // ptr to current 'pKernelParams' + NvU32 paramsSize; // # bytes + NvU32 flags; + NvBool bSizeValid; + const char *msgTag; +}; +typedef struct API_STATE RMAPI_PARAM_COPY; + +#define RMAPI_PARAM_COPY_FLAGS_NONE 0x00000000 +#define RMAPI_PARAM_COPY_FLAGS_IS_DIRECT_USAGE NVBIT(0) +#define RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN NVBIT(1) +#define RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT NVBIT(2) +#define RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER NVBIT(3) +// +// Only set this if the paramsSize member of RMAPI_PARAM_COPY has been validated for +// correctness before calling apiParamAccess. There is a default cap on the +// largest size allowed in order to avoid huge memory allocations triggering +// out of memory scenarios if the user passes in a bogus size. +// +#define RMAPI_PARAM_COPY_FLAGS_DISABLE_MAX_SIZE_CHECK NVBIT(4) +// +// 1MB is the largest size allowed for an embedded pointer accessed through +// apiParamAccess unless RMAPI_PARAM_COPY_FLAGS_DISABLE_MAX_SIZE_CHECK is specified +// and the size is validated before calling apiParamsAcquire. +// +#define RMAPI_PARAM_COPY_MAX_PARAMS_SIZE (1*1024*1024) + +#if NV_PRINTF_STRINGS_ALLOWED +#define RMAPI_PARAM_COPY_MSG_TAG(x) x +#define RMAPI_PARAM_COPY_SET_MSG_TAG(paramCopy, theMsgTag) (paramCopy).msgTag = theMsgTag +#else +#define RMAPI_PARAM_COPY_MSG_TAG(x) ((const char *) 0) +#define RMAPI_PARAM_COPY_SET_MSG_TAG(paramCopy, theMsgTag) (paramCopy).msgTag = ((const char *) 0) +#endif + +// +// Initializes the RMAPI_PARAM_COPY structure. Sets bValid to false if calculating size +// caused an overflow. This makes the rmapiParamsAcquire() call fail with +// NV_ERR_INVALID_ARGUMENT. Since rmapiParamsAcquire() always directly follows +// this initialization, there is no need to make it return a status and +// duplicate error checking. +// +#define RMAPI_PARAM_COPY_INIT(paramCopy, pKernelParams, theUserParams, numElems, sizeOfElem) \ + do { \ + RMAPI_PARAM_COPY_SET_MSG_TAG((paramCopy), __FUNCTION__); \ + (paramCopy).ppKernelParams = (void **) &(pKernelParams); \ + (paramCopy).pUserParams = (theUserParams); \ + (paramCopy).flags = RMAPI_PARAM_COPY_FLAGS_NONE; \ + (paramCopy).bSizeValid = portSafeMulU32((numElems), (sizeOfElem), &(paramCopy).paramsSize); \ + } while(0) + +// Routines for alloc/copyin/copyout/free sequences +NV_STATUS rmapiParamsAcquire(RMAPI_PARAM_COPY *, NvBool); +NV_STATUS rmapiParamsRelease(RMAPI_PARAM_COPY *); + +NV_STATUS rmapiParamsCopyOut(const char *msgTag, void *pKernelParams, NvP64 pUserParams, NvU32 paramsSize, NvBool); +NV_STATUS rmapiParamsCopyIn(const char *msgTag, void *pKernelParams, NvP64 pUserParams, NvU32 paramsSize, NvBool); + +// Init copy_param structure +NV_STATUS rmapiParamsCopyInit(RMAPI_PARAM_COPY *, NvU32 hClass); + +#endif // _PARAM_COPY_H_ diff --git a/src/nvidia/inc/kernel/rmapi/resource.h b/src/nvidia/inc/kernel/rmapi/resource.h new file mode 100644 index 0000000..054e13a --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/resource.h @@ -0,0 +1,3 @@ + +#include "g_resource_nvoc.h" + diff --git a/src/nvidia/inc/kernel/rmapi/resource_fwd_decls.h b/src/nvidia/inc/kernel/rmapi/resource_fwd_decls.h new file mode 100644 index 0000000..cdbabe7 --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/resource_fwd_decls.h @@ -0,0 +1,3 @@ + +#include "g_resource_fwd_decls_nvoc.h" + diff --git a/src/nvidia/inc/kernel/rmapi/rmapi.h b/src/nvidia/inc/kernel/rmapi/rmapi.h new file mode 100644 index 0000000..c0da455 --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/rmapi.h @@ -0,0 +1,465 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _RMAPI_H_ +#define _RMAPI_H_ + +#include "core/core.h" +#include "nvsecurityinfo.h" + +// +// Forward declarations +// +typedef struct _RM_API RM_API; +typedef struct RsServer RsServer; +typedef struct OBJGPU OBJGPU; +typedef struct RsClient RsClient; +typedef struct RsResource RsResource; +typedef struct RsCpuMapping RsCpuMapping; +typedef struct CALL_CONTEXT CALL_CONTEXT; +typedef struct MEMORY_DESCRIPTOR MEMORY_DESCRIPTOR; +typedef struct RS_RES_FREE_PARAMS_INTERNAL RS_RES_FREE_PARAMS_INTERNAL; +typedef struct RS_LOCK_INFO RS_LOCK_INFO; +typedef struct NV0000_CTRL_SYSTEM_GET_LOCK_TIMES_PARAMS NV0000_CTRL_SYSTEM_GET_LOCK_TIMES_PARAMS; +typedef NvU32 NV_ADDRESS_SPACE; + +extern RsServer g_resServ; + +/** + * Initialize RMAPI module. + * + * Must be called once and only once before any RMAPI functions can be called + */ +NV_STATUS rmapiInitialize(void); + +/** + * Shutdown RMAPI module + * + * Must be called once and only once when a driver is shutting down and no more + * RMAPI functions will be called. + */ +void rmapiShutdown(void); + +// Flags for rmapiLockAcquire +#define RMAPI_LOCK_FLAGS_NONE (0x00000000) // default no flags +#define RMAPI_LOCK_FLAGS_COND_ACQUIRE NVBIT(0) // conditional acquire; if lock is + // already held then return error +#define RMAPI_LOCK_FLAGS_READ NVBIT(1) // Acquire API lock for READ +#define RMAPI_LOCK_FLAGS_WRITE (0x00000000) // Acquire API lock for WRITE - Default +#define RMAPI_LOCK_FLAGS_LOW_PRIORITY NVBIT(2) // Deprioritize lock acquire + +/** + * Acquire API lock for READ, even if NV_REG_STR_RM_READONLY_API_LOCK_MODULE is + * not set for the module. + * + * This allows to opt-in into the RO-locking behavior for specific paths while + * global enablement is pending. + */ +#define RMAPI_LOCK_FLAGS_READ_FORCE NVBIT(3) + +/** + * Acquire the RM API Lock + * + * The API lock is a sleeping mutex that is used to serialize access to RM APIs + * by (passive-level) RM clients. + * + * The API lock is not used to protect state accessed by DPC and ISRs. For DPC + * and ISRs that GPU lock is used instead. For state controlled by clients, this + * often requires taking both API and GPU locks in API paths + * + * @param[in] flags RM_LOCK_FLAGS_* + * @param[in] module RM_LOCK_MODULES_* + */ +NV_STATUS rmapiLockAcquire(NvU32 flags, NvU32 module); + +/** + * Release RM API Lock + */ +void rmapiLockRelease(void); + +/** + * Check if current thread owns the API lock + */ +NvBool rmapiLockIsOwner(void); + +/** + * Check if current thread owns the RW API lock + */ +NvBool rmapiLockIsWriteOwner(void); + +/** + * Retrieve total RM API lock wait and hold times + */ +void rmapiLockGetTimes(NV0000_CTRL_SYSTEM_GET_LOCK_TIMES_PARAMS *); + +/** + * Indicates current thread is in the RTD3 PM path (rm_transition_dynamic_power) which + * means that certain locking asserts/checks must be skipped due to inability to acquire + * the API lock in this path. + */ +void rmapiEnterRtd3PmPath(void); + +/** + * Signifies that current thread is leaving the RTD3 PM path, restoring lock + * asserting/checking behavior to normal. + */ +void rmapiLeaveRtd3PmPath(void); + +/** + * Checks if current thread is currently running in the RTD3 PM path. + */ +NvBool rmapiInRtd3PmPath(void); + +/** + * Type of RM API client interface + */ +typedef enum +{ + RMAPI_EXTERNAL, // For clients external from RM TLS, locks, etc -- no default security attributes + RMAPI_EXTERNAL_KERNEL, // For clients external from TLS and locks but which still need default security attributes + RMAPI_MODS_LOCK_BYPASS, // Hack for MODS - skip RM locks but initialize TLS (bug 1808386) + RMAPI_API_LOCK_INTERNAL, // For clients that already have the TLS & API lock held -- security is RM internal + RMAPI_GPU_LOCK_INTERNAL, // For clients that have TLS, API lock, and GPU lock -- security is RM internal + RMAPI_STUBS, // All functions just return NV_ERR_NOT_SUPPORTED + RMAPI_TYPE_MAX +} RMAPI_TYPE; + +/** + * Query interface that can be used to perform operations through the + * client-level RM API + */ +RM_API *rmapiGetInterface(RMAPI_TYPE rmapiType); + +// Flags for RM_API::Alloc +#define RMAPI_ALLOC_FLAGS_NONE 0 +#define RMAPI_ALLOC_FLAGS_SKIP_RPC NVBIT(0) +#define RMAPI_ALLOC_FLAGS_SERIALIZED NVBIT(1) + +// Flags for RM_API::Free +#define RMAPI_FREE_FLAGS_NONE 0 + +// Flags for RM_API RPC's +#define RMAPI_RPC_FLAGS_NONE 0 +#define RMAPI_RPC_FLAGS_COPYOUT_ON_ERROR NVBIT(0) +#define RMAPI_RPC_FLAGS_SERIALIZED NVBIT(1) + +/** + * Interface for performing operations through the RM API exposed to client + * drivers. Interface provides consistent view to the RM API while abstracting + * the individuals callers from specifying security attributes and/or from + * locking needs. For example, this interface can be used either before or after + * the API or GPU locks. + */ +struct _RM_API +{ + // Allocate a resource with default security attributes and local pointers (no NvP64) + NV_STATUS (*Alloc)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvU32 hClass, void *pAllocParams, NvU32 paramsSize); + + // Allocate a resource with default security attributes and local pointers (no NvP64) + // and client assigned handle + NV_STATUS (*AllocWithHandle)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle hObject, NvU32 hClass, void *pAllocParams, NvU32 paramsSize); + + // Allocate a resource + NV_STATUS (*AllocWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvU32 hClass, NvP64 pAllocParams, NvU32 paramsSize, + NvU32 flags, NvP64 pRightsRequested, API_SECURITY_INFO *pSecInfo); + + // Free a resource with default security attributes + NV_STATUS (*Free)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject); + + // Free a resource + NV_STATUS (*FreeWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject, + NvU32 flags, API_SECURITY_INFO *pSecInfo); + + // Disables all clients in the list, with default security attributes + NV_STATUS (*DisableClients)(struct _RM_API *pRmApi, NvHandle *phClientList, NvU32 numClients); + + // Disables all clients in the list + NV_STATUS (*DisableClientsWithSecInfo)(struct _RM_API *pRmApi, NvHandle *phClientList, + NvU32 numClients, API_SECURITY_INFO *pSecInfo); + + // Invoke a control with default security attributes and local pointers (no NvP64) + NV_STATUS (*Control)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject, NvU32 cmd, + void *pParams, NvU32 paramsSize); + + // Invoke a control + NV_STATUS (*ControlWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject, NvU32 cmd, + NvP64 pParams, NvU32 paramsSize, NvU32 flags, API_SECURITY_INFO *pSecInfo); + + // Prefetch a control parameters into the control call cache (0000, 0080 and 2080 classes only) + NV_STATUS (*ControlPrefetch)(struct _RM_API *pRmApi, NvU32 cmd); + + // Dup an object with default security attributes + NV_STATUS (*DupObject)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent, NvHandle *phObject, + NvHandle hClientSrc, NvHandle hObjectSrc, NvU32 flags); + + // Dup an object + NV_STATUS (*DupObjectWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvHandle hClientSrc, NvHandle hObjectSrc, NvU32 flags, + API_SECURITY_INFO *pSecInfo); + + // Share an object with default security attributes + NV_STATUS (*Share)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy); + + // Share an object + NV_STATUS (*ShareWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy, API_SECURITY_INFO *pSecInfo); + + // Map memory with default security attributes and local pointers (no NvP64). Provides + // RM internal implementation for NvRmMapMemory(). + NV_STATUS (*MapToCpu)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, + NvU64 offset, NvU64 length, void **ppCpuVirtAddr, NvU32 flags); + + // Map memory. Provides RM internal implementation for NvRmMapMemory(). + NV_STATUS (*MapToCpuWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, + NvU64 offset, NvU64 length, NvP64 *ppCpuVirtAddr, NvU32 flags, API_SECURITY_INFO *pSecInfo); + + // Map memory v2. Pass in flags as a pointer for in/out access + NV_STATUS (*MapToCpuWithSecInfoV2)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, + NvU64 offset, NvU64 length, NvP64 *ppCpuVirtAddr, NvU32 *flags, API_SECURITY_INFO *pSecInfo); + + // Unmap memory with default security attributes and local pointers (no NvP64) + NV_STATUS (*UnmapFromCpu)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, void *pLinearAddress, + NvU32 flags, NvU32 ProcessId); + + // Unmap memory + NV_STATUS (*UnmapFromCpuWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, + NvP64 pLinearAddress, NvU32 flags, NvU32 ProcessId, API_SECURITY_INFO *pSecInfo); + + // Map dma memory with default security attributes. Provides RM internal implementation for NvRmMapMemoryDma(). + NV_STATUS (*Map)(struct _RM_API *pRmApi, NVOS46_PARAMETERS *pParms); + + // Map dma memory. Provides RM internal implementation for NvRmMapMemoryDma(). + NV_STATUS (*MapWithSecInfo)(struct _RM_API *pRmApi, NVOS46_PARAMETERS *pParms, API_SECURITY_INFO *pSecInfo); + + // Unmap dma memory with default security attributes + NV_STATUS (*Unmap)(struct _RM_API *pRmApi, NVOS47_PARAMETERS *pParms); + + // Unmap dma memory + NV_STATUS (*UnmapWithSecInfo)(struct _RM_API *pRmApi, NVOS47_PARAMETERS *pParms, API_SECURITY_INFO *pSecInfo); + + + API_SECURITY_INFO defaultSecInfo; + NvBool bHasDefaultSecInfo; + NvBool bTlsInternal; + NvBool bApiLockInternal; + NvBool bRmSemaInternal; + NvBool bGpuLockInternal; + void *pPrivateContext; +}; + +// Called before any RM resource is freed +NV_STATUS rmapiFreeResourcePrologue(RS_RES_FREE_PARAMS_INTERNAL *pRmFreeParams); + +// Mark for deletion the client resources given a GPU mask +void rmapiSetDelPendingClientResourcesFromGpuMask(NvU32 gpuMask); + +// Delete the marked client resources +void rmapiDelPendingClients(void); +void rmapiDelPendingDevices(NvU32 gpuMask); +void rmapiReportLeakedDevices(NvU32 gpuMask); + +// +// Given a value, retrieves an array of client handles corresponding to clients +// with matching pOSInfo fields. The array is allocated dynamically, and is +// expected to be freed by the caller. +// +NV_STATUS rmapiGetClientHandlesFromOSInfo(void*, NvHandle**, NvU32*); + +// +// Base mapping routines for use by RsResource subclasses +// +NV_STATUS rmapiMapGpuCommon(RsResource *, CALL_CONTEXT *, RsCpuMapping *, OBJGPU *, NvU32, NvU32); +NV_STATUS rmapiValidateKernelMapping(RS_PRIV_LEVEL privLevel, NvU32 flags, NvBool *pbKernel); +NV_STATUS rmapiGetEffectiveAddrSpace(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags, NV_ADDRESS_SPACE *pAddrSpace); + +/** + * Deprecated RM API interfaces. Use RM_API instead. + */ +NV_STATUS RmUnmapMemoryDma(NvHandle, NvHandle, NvHandle, NvHandle, MEMORY_DESCRIPTOR*, NvU32, NvU64); +NV_STATUS RmConfigGetEx (NvHandle, NvHandle, NvU32, NvP64, NvU32, NvBool); +NV_STATUS RmConfigSetEx (NvHandle, NvHandle, NvU32, NvP64, NvU32, NvBool); + +/** + * Control cache API. + */ +NV_STATUS rmapiControlCacheInit(void); +NvBool rmapiControlIsCacheable(NvU32 flags, NvU32 accessRight, NvBool bAllowInternal); +NvBool rmapiCmdIsCacheable(NvU32 cmd, NvBool bAllowInternal); +NV_STATUS rmapiControlCacheGet(NvHandle hClient, NvHandle hObject, NvU32 cmd, + void* params, NvU32 paramsSize, API_SECURITY_INFO *pSecInfo); +NV_STATUS rmapiControlCacheGetUnchecked(NvHandle hClient, NvHandle hObject, NvU32 cmd, + void* params, NvU32 paramsSize, API_SECURITY_INFO *pSecInfo); + +NV_STATUS rmapiControlCacheSet(NvHandle hClient, NvHandle hObject, NvU32 cmd, + void* params, NvU32 paramsSize); +NV_STATUS rmapiControlCacheSetUnchecked(NvHandle hClient, NvHandle hObject, NvU32 cmd, + void* params, NvU32 paramsSize, NvU32 rmctrlFlags); + +NV_STATUS rmapiControlCacheSetGpuAttrForObject(NvHandle hClient, NvHandle hObject, OBJGPU *pGpu); +void rmapiControlCacheFreeAllCacheForGpu(NvU32 gpuInst); +void rmapiControlCacheFreeNonPersistentCacheForGpu(NvU32 gpuInst); +void rmapiControlCacheSetMode(NvU32 mode); +NvU32 rmapiControlCacheGetMode(void); +void rmapiControlCacheFree(void); +NV_STATUS rmapiControlCacheFreeForControl(NvU32 gpuInstance, NvU32 cmd); +void rmapiControlCacheFreeClientEntry(NvHandle hClient); +void rmapiControlCacheFreeObjectEntry(NvHandle hClient, NvHandle hObject); + +typedef struct _RM_API_CONTEXT { + NvU32 gpuMask; +} RM_API_CONTEXT; + +// +// Handler to do stuff that is required before invoking a RM API +// +NV_STATUS +rmapiPrologue +( + RM_API *pRmApi, + RM_API_CONTEXT *pContext +); + +// +// Handler to do stuff that is required after invoking a RM API +// +void +rmapiEpilogue +( + RM_API *pRmApi, + RM_API_CONTEXT *pContext +); + +NV_STATUS +rmapiInitLockInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hSecondClient, + RS_LOCK_INFO *pLockInfo +); + +// +// RM locking modules: 24-bit group bitmask, 8-bit subgroup id +// +// Lock acquires are tagged with a RM_LOCK_MODULE_* in order to partition +// the acquires into groups, which allows read-only locks to be +// enabled / disabled on a per-group basis (via apiLockMask and gpuLockMask +// in OBJSYS.) +// +// The groups are further partitioned into subgroups, which +// are used for lock profiling data collection. +// +#define RM_LOCK_MODULE_VAL(grp, subgrp) ((((grp) & 0xffffff) << 8) | ((subgrp) & 0xff)) +#define RM_LOCK_MODULE_GRP(val) (((val) >> 8) & 0xffffff) +// Grp SubGrp +#define RM_LOCK_MODULES_NONE RM_LOCK_MODULE_VAL(0x000000, 0x00) + +#define RM_LOCK_MODULES_WORKITEM RM_LOCK_MODULE_VAL(0x000001, 0x00) + +#define RM_LOCK_MODULES_CLIENT RM_LOCK_MODULE_VAL(0x000002, 0x00) + +#define RM_LOCK_MODULES_GPU_OPS RM_LOCK_MODULE_VAL(0x000004, 0x00) + +#define RM_LOCK_MODULES_OSAPI RM_LOCK_MODULE_VAL(0x000010, 0x00) +#define RM_LOCK_MODULES_STATE_CONFIG RM_LOCK_MODULE_VAL(0x000010, 0x01) +#define RM_LOCK_MODULES_EVENT RM_LOCK_MODULE_VAL(0x000010, 0x02) +#define RM_LOCK_MODULES_VBIOS RM_LOCK_MODULE_VAL(0x000010, 0x03) + +#define RM_LOCK_MODULES_MEM RM_LOCK_MODULE_VAL(0x000020, 0x00) +#define RM_LOCK_MODULES_MEM_FLA RM_LOCK_MODULE_VAL(0x000020, 0x01) +#define RM_LOCK_MODULES_MEM_PMA RM_LOCK_MODULE_VAL(0x000020, 0x02) + +#define RM_LOCK_MODULES_POWER RM_LOCK_MODULE_VAL(0x000040, 0x00) +#define RM_LOCK_MODULES_ACPI RM_LOCK_MODULE_VAL(0x000040, 0x01) +#define RM_LOCK_MODULES_DYN_POWER RM_LOCK_MODULE_VAL(0x000040, 0x02) + +#define RM_LOCK_MODULES_HYPERVISOR RM_LOCK_MODULE_VAL(0x000080, 0x00) +#define RM_LOCK_MODULES_VGPU RM_LOCK_MODULE_VAL(0x000080, 0x01) +#define RM_LOCK_MODULES_RPC RM_LOCK_MODULE_VAL(0x000080, 0x02) + +#define RM_LOCK_MODULES_DIAG RM_LOCK_MODULE_VAL(0x000100, 0x00) +#define RM_LOCK_MODULES_RC RM_LOCK_MODULE_VAL(0x000100, 0x01) + +#define RM_LOCK_MODULES_SLI RM_LOCK_MODULE_VAL(0x000200, 0x00) +#define RM_LOCK_MODULES_P2P RM_LOCK_MODULE_VAL(0x000200, 0x01) +#define RM_LOCK_MODULES_NVLINK RM_LOCK_MODULE_VAL(0x000200, 0x02) + +#define RM_LOCK_MODULES_HOTPLUG RM_LOCK_MODULE_VAL(0x000400, 0x00) +#define RM_LOCK_MODULES_DISP RM_LOCK_MODULE_VAL(0x000400, 0x01) +#define RM_LOCK_MODULES_KERNEL_RM_EVENTS RM_LOCK_MODULE_VAL(0x000400, 0x02) + +#define RM_LOCK_MODULES_GPU RM_LOCK_MODULE_VAL(0x000800, 0x00) +#define RM_LOCK_MODULES_GR RM_LOCK_MODULE_VAL(0x000800, 0x01) +#define RM_LOCK_MODULES_FB RM_LOCK_MODULE_VAL(0x000800, 0x02) +#define RM_LOCK_MODULES_FIFO RM_LOCK_MODULE_VAL(0x000800, 0x03) +#define RM_LOCK_MODULES_TMR RM_LOCK_MODULE_VAL(0x000800, 0x04) + +#define RM_LOCK_MODULES_I2C RM_LOCK_MODULE_VAL(0x001000, 0x00) +#define RM_LOCK_MODULES_PFM_REQ_HNDLR RM_LOCK_MODULE_VAL(0x001000, 0x01) +#define RM_LOCK_MODULES_SEC2 RM_LOCK_MODULE_VAL(0x001000, 0x02) +#define RM_LOCK_MODULES_THERM RM_LOCK_MODULE_VAL(0x001000, 0x03) +#define RM_LOCK_MODULES_INFOROM RM_LOCK_MODULE_VAL(0x001000, 0x04) + +#define RM_LOCK_MODULES_ISR RM_LOCK_MODULE_VAL(0x002000, 0x00) +#define RM_LOCK_MODULES_DPC RM_LOCK_MODULE_VAL(0x002000, 0x01) + +#define RM_LOCK_MODULES_INIT RM_LOCK_MODULE_VAL(0x004000, 0x00) +#define RM_LOCK_MODULES_STATE_LOAD RM_LOCK_MODULE_VAL(0x004000, 0x01) + +#define RM_LOCK_MODULES_STATE_UNLOAD RM_LOCK_MODULE_VAL(0x008000, 0x00) +#define RM_LOCK_MODULES_DESTROY RM_LOCK_MODULE_VAL(0x008000, 0x01) + +// +// ResServ lock flag translation +// +#define RM_LOCK_FLAGS_NONE 0 +#define RM_LOCK_FLAGS_NO_API_LOCK RS_LOCK_FLAGS_NO_TOP_LOCK +#define RM_LOCK_FLAGS_NO_CLIENT_LOCK RS_LOCK_FLAGS_NO_CLIENT_LOCK +#define RM_LOCK_FLAGS_NO_GPUS_LOCK RS_LOCK_FLAGS_NO_CUSTOM_LOCK_1 +#define RM_LOCK_FLAGS_GPU_GROUP_LOCK RS_LOCK_FLAGS_NO_CUSTOM_LOCK_2 +#define RM_LOCK_FLAGS_RM_SEMA RS_LOCK_FLAGS_NO_CUSTOM_LOCK_3 + +// +// ResServ lock state translation +// +#define RM_LOCK_STATES_NONE 0 +#define RM_LOCK_STATES_API_LOCK_ACQUIRED RS_LOCK_STATE_TOP_LOCK_ACQUIRED +#define RM_LOCK_STATES_GPUS_LOCK_ACQUIRED RS_LOCK_STATE_CUSTOM_LOCK_1_ACQUIRED +#define RM_LOCK_STATES_GPU_GROUP_LOCK_ACQUIRED RS_LOCK_STATE_CUSTOM_LOCK_2_ACQUIRED +#define RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS RS_LOCK_STATE_ALLOW_RECURSIVE_RES_LOCK +#define RM_LOCK_STATES_CLIENT_LOCK_ACQUIRED RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED +#define RM_LOCK_STATES_RM_SEMA_ACQUIRED RS_LOCK_STATE_CUSTOM_LOCK_3_ACQUIRED + +// +// ResServ lock release translation +// +#define RM_LOCK_RELEASE_API_LOCK RS_LOCK_RELEASE_TOP_LOCK +#define RM_LOCK_RELEASE_CLIENT_LOCK RS_LOCK_RELEASE_CLIENT_LOCK +#define RM_LOCK_RELEASE_GPUS_LOCK RS_LOCK_RELEASE_CUSTOM_LOCK_1 +#define RM_LOCK_RELEASE_GPU_GROUP_LOCK RS_LOCK_RELEASE_CUSTOM_LOCK_2 +#define RM_LOCK_RELEASE_RM_SEMA RS_LOCK_RELEASE_CUSTOM_LOCK_3 + +#endif // _RMAPI_H_ diff --git a/src/nvidia/inc/kernel/rmapi/rmapi_cache_handlers.h b/src/nvidia/inc/kernel/rmapi/rmapi_cache_handlers.h new file mode 100644 index 0000000..b2eb164 --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/rmapi_cache_handlers.h @@ -0,0 +1,80 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _RMAPI_CACHE_HANDLERS_H_ +#define _RMAPI_CACHE_HANDLERS_H_ + +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvlimits.h" +#include "ctrl/ctrl0073.h" +#include "rmconfig.h" +#include "core/prelude.h" + +typedef NV_STATUS (*RmapiCacheGetByInputHandler)(void *cachedEntry, void* pParams, NvBool bSet); + +// +// Rm Cmd Cache Handler structure definitions and function declarations. +// + +typedef struct DispSystemGetSupportedCacheEntry +{ + NvBool valid; + NvU32 displayMask; + NvU32 displayMaskDDC; +} DispSystemGetSupportedCacheEntry; + +NV_STATUS _dispSystemGetSupportedCacheHandler(void *cachedEntry, void* pParams, NvBool bSet); + +typedef struct DispSystemGetInternalDisplaysCacheEntry +{ + NvBool valid; + NvU32 internalDisplaysMask; + NvU32 availableInternalDisplaysMask; +} DispSystemGetInternalDisplaysCacheEntry; + +NV_STATUS _dispSystemGetInternalDisplaysCacheHandler(void *cachedEntry, void* pParams, NvBool bSet); + +typedef struct DispDpGetCapsCacheTable +{ + // Indexed by sorIndex parameter. + struct + { + NvBool valid; + NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS params; + } cachedEntries[NV_MAX_DEVICES]; +} DispDpGetCapsCacheTable; + +NV_STATUS _dispDpGetCapsCacheHandler(void *cachedEntry, void *pProvidedParams, NvBool bSet); + +typedef struct DispSpecificGetTypeCacheTable +{ + struct + { + NvBool valid; + NvU32 displayType; + } cachedEntries[NV_MAX_DEVICES]; +} DispSpecificGetTypeCacheTable; + +NV_STATUS _dispSpecificGetTypeCacheHandler(void *cachedEntry, void *pProvidedParams, NvBool bSet); + +#endif diff --git a/src/nvidia/inc/kernel/rmapi/rmapi_specific.h b/src/nvidia/inc/kernel/rmapi/rmapi_specific.h new file mode 100644 index 0000000..a3fe6f8 --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/rmapi_specific.h @@ -0,0 +1,63 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _RMAPI_SPECIFIC_H +#define _RMAPI_SPECIFIC_H + +#include "nvstatus.h" +#include "nvtypes.h" + +#include "resserv/rs_resource.h" + +/** + * Make custom modifications to class-specific allocation params as needed. + * + * @param[inout] ppResDesc Resource descriptor used for the allocation, may change if + * the class's ID changes as a result of this function. + * @param[in] pRmAllocParams Allocation params + * + * @return NV_ERR_INVALID_CLASS if an invalid class transition happens, NV_OK otherwise. + */ +NV_STATUS rmapiFixupAllocParams(RS_RESOURCE_DESC **ppResDesc, RS_RES_ALLOC_PARAMS_INTERNAL *pRmAllocParams); + +/** + * Returns NV_TRUE if the control call specific by cmd can be invoked at an increased + * IRQL level, NV_FALSE otherwise. + * + * @param[in] cmd Control call ID + * + * @return NV_TRUE if control can be RAISED_IRQL, NV_FALSE otherwise. + */ +NvBool rmapiRmControlCanBeRaisedIrql(NvU32 cmd); + +/** + * Returns NV_TRUE if the control call specific by cmd can bypass acquiring locks, + * NV_FALSE otherwise. + * + * @param[in] cmd Control call ID + * + * @return NV_TRUE if control can be BYPASS_LOCK, NV_FALSE otherwise. + */ +NvBool rmapiRmControlCanBeBypassLock(NvU32 cmd); + +#endif // _RMAPI_SPECIFIC_H diff --git a/src/nvidia/inc/kernel/rmapi/rmapi_utils.h b/src/nvidia/inc/kernel/rmapi/rmapi_utils.h new file mode 100644 index 0000000..1273840 --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/rmapi_utils.h @@ -0,0 +1,67 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef RMAPI_UTILS_H +#define RMAPI_UTILS_H + +#include "rmapi/rmapi.h" + + +// +// Alloc a client, device and subdevice handle for a gpu +// +NV_STATUS +rmapiutilAllocClientAndDeviceHandles +( + RM_API *pRmApi, + OBJGPU *pGpu, + NvHandle *phClient, + NvHandle *phDevice, + NvHandle *phSubDevice +); + +// +// Free client, device and subdevice handles +// +void +rmapiutilFreeClientAndDeviceHandles +( + RM_API *pRmApi, + NvHandle *phClient, + NvHandle *phDevice, + NvHandle *phSubDevice +); + +// +// Return NV_TRUE if the given external class ID is an INTERNAL_ONLY class +// +NvBool rmapiutilIsExternalClassIdInternalOnly(NvU32 externalClassId); + +// +// Return the flags and access right associated with this RM control command +// +NV_STATUS rmapiutilGetControlInfo(NvU32 cmd, NvU32 *pFlags, + NvU32 *pAccessRight, NvU32 *pParamsSize); + +NvBool rmapiutilSkipErrorMessageForUnsupportedVgpuGuestControl(OBJGPU *pGpu, NvU32 cmd); + +#endif /* RMAPI_UTILS_H */ diff --git a/src/nvidia/inc/kernel/rmapi/rs_utils.h b/src/nvidia/inc/kernel/rmapi/rs_utils.h new file mode 100644 index 0000000..542046a --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/rs_utils.h @@ -0,0 +1,188 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _RS_UTILS_H_ +#define _RS_UTILS_H_ + +/** + * @defgroup RsUtilities + * + * Provides convenience utilities for resserv. Utility functions provide + * abstractions that take handles as inputs -- helpful for legacy code that + * passes hClient or hResource handles and not underlying objects. Desire + * is for pClient and RsResourceRef types to be used for new code instead of + * passing handles around and this utility module phased out. + * + * @{ + */ + +#include "resserv/rs_server.h" +#include "resserv/rs_client.h" +#include "resserv/rs_resource.h" + +#include "rmapi/client.h" + +#include "containers/list.h" + +MAKE_LIST(ClientHandlesList, NvHandle); + +#define serverutilGetDerived(pRmClient, hResource, ppBaseRes, type) \ + (clientGetResource(staticCast((pRmClient), RsClient), \ + (hResource), \ + classId(type), \ + (ppBaseRes)) != NV_OK) \ + ? NULL \ + : dynamicCast(*(ppBaseRes), type) + +/** + * Get the reference to a resource + * @param[in] hClient Client handle + * @param[in] hResource The resource to lookup + * @param[out] ppResourceRef The reference to the resource + */ +NV_STATUS serverutilGetResourceRef(NvHandle hClient, NvHandle hObject, + RsResourceRef **ppResourceRef); + +/** + * Get the reference to a resource (with a type check) + * @param[in] hClient Client handle + * @param[in] hResource The resource to lookup + * @param[out] ppResourceRef The reference to the resource + */ +NV_STATUS serverutilGetResourceRefWithType(NvHandle hClient, NvHandle hObject, + NvU32 internalClassId, RsResourceRef **ppResourceRef); + +/** + * Get the reference to a resource (with a type and parent check) + * @param[in] hClient Client handle + * @param[in] hResource The resource to lookup + * @param[out] ppResourceRef The reference to the resource + */ +NV_STATUS serverutilGetResourceRefWithParent(NvHandle hClient, NvHandle hParent, NvHandle hObject, + NvU32 internalClassId, RsResourceRef **ppResourceRef); + +/** + * Find the first child object of given type + */ +RsResourceRef *serverutilFindChildRefByType(NvHandle hClient, NvHandle hParent, NvU32 internalClassId, NvBool bExactMatch); + + +/** + * Get an iterator to the elements in the client's resource map + * + * See clientRefIter for documentation on hScopedObject and iterType + */ +RS_ITERATOR serverutilRefIter(NvHandle hClient, NvHandle hScopedObject, NvU32 internalClassId, RS_ITER_TYPE iterType, NvBool bExactMatch); + +/** + * Get an iterator to the elements in the server's shared object map + */ +RS_SHARE_ITERATOR serverutilShareIter(NvU32 internalClassId); + +/** + * Get an iterator to the elements in the server's shared object map + */ +NvBool serverutilShareIterNext(RS_SHARE_ITERATOR* pIt); + +/** + * Validate that a given resource handle is well-formed and does not already + * exist under a given client. + */ +NvBool serverutilValidateNewResourceHandle(NvHandle, NvHandle); + +/** + * Generate an unused handle for a resource. The handle will be generated in the white-listed range that was + * specified when the client was allocated. + */ +NV_STATUS serverutilGenResourceHandle(NvHandle, NvHandle*); + +/** + * Get a client pointer from a client handle without taking any locks. + * + * @param[in] hClient The client to acquire + */ +RmClient *serverutilGetClientUnderLock(NvHandle hClient); + +/** + * Get a client pointer from a client handle and lock it. + * + * @param[in] hClient The client to acquire + * @param[in] access LOCK_ACCESS_* + * @param[out] ppClientEntry Pointer to the CLIENT_ENTRY + * @param[out] ppClient Pointer to the RmClient + */ +NV_STATUS serverutilAcquireClient(NvHandle hClient, LOCK_ACCESS_TYPE access, CLIENT_ENTRY **ppClientEntry, RmClient **ppClient); + +/** + * Unlock a client + * + * @param[in] access LOCK_ACCESS_* + * @param[in] pClientEntry Pointer to the CLIENT_ENTRY + */ +void serverutilReleaseClient(LOCK_ACCESS_TYPE access, CLIENT_ENTRY *pClientEntry); + +/** + * Get the first valid client pointer in resource server without taking any locks. + */ +RmClient **serverutilGetFirstClientUnderLock(void); + +/** + * Get the next valid client pointer in resource server without taking any locks. + * + * @param[in] ppClient Pointer returned by a previous call to + * serverutilGetFirstClientUnderLock or + * serverutilGetNextClientUnderLock + */ +RmClient **serverutilGetNextClientUnderLock(RmClient **pClient); + +/*! + * @brief Retrieve all hClients allocated for the given (ProcID, SubProcessID) + * + * This function iterates through all the clients in the resource server and finds + * hClients allocated for the given (ProcID, SubProcessID) and returns them to + * the caller. + * + * @param[in] procID Process ID + * @param[in] subProcessID SubProcess ID + * @param[out] pClientList List in which the client handles are returned + * + * @return NV_STATUS + */ +NV_STATUS serverutilGetClientHandlesFromPid(NvU32 procID, NvU32 subProcessID, ClientHandlesList *pClientList); + +/** + * This is a filtering function intended to be used with refFindCpuMappingWithFilter. + * This filter will only match user mappings belonging to the current process. + * + * @param[in] ppMapping The mapping that is being filtered + */ +NvBool serverutilMappingFilterCurrentUserProc(RsCpuMapping *ppMapping); + +/** + * This is a filtering function intended to be used with refFindCpuMappingWithFilter. + * This filter will only match kernel mappings. + * + * @param[in] ppMapping The mapping that is being filtered + */ +NvBool serverutilMappingFilterKernel(RsCpuMapping *ppMapping); + +#endif diff --git a/src/nvidia/inc/kernel/vgpu/dev_vgpu.h b/src/nvidia/inc/kernel/vgpu/dev_vgpu.h new file mode 100644 index 0000000..410c908 --- /dev/null +++ b/src/nvidia/inc/kernel/vgpu/dev_vgpu.h @@ -0,0 +1,349 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __vgpu_dev_nv_vgpu_h__ +#define __vgpu_dev_nv_vgpu_h__ + +#include "nvtypes.h" +#include "nvmisc.h" + +#include "nvctassert.h" + +/******************************************************************************/ +/* EMULATED REGISTERS - START */ +/******************************************************************************/ + +#define NV_VGPU_GUEST_OS_TYPE_ARCH 2:0 /* -W-4F */ +#define NV_VGPU_GUEST_OS_TYPE_ARCH_UNKNOWN 0 /* -W--V */ +#define NV_VGPU_GUEST_OS_TYPE_ARCH_AARCH64 1 /* -W--V */ +#define NV_VGPU_GUEST_OS_TYPE_ARCH_X86_64 2 /* -W--V */ +#define NV_VGPU_GUEST_OS_TYPE_ARCH_INVALID 3 /* -W--V */ +#define NV_VGPU_GUEST_OS_TYPE_OS 7:3 /* -W-4F */ +#define NV_VGPU_GUEST_OS_TYPE_OS_UNKNOWN 0 /* -W--V */ +#define NV_VGPU_GUEST_OS_TYPE_OS_LINUX 1 /* -W--V */ +#define NV_VGPU_GUEST_OS_TYPE_OS_WINDOWS7 2 /* -W--V */ +#define NV_VGPU_GUEST_OS_TYPE_OS_WINDOWS10 3 /* -W--V */ +#define NV_VGPU_GUEST_OS_TYPE_OS_INVALID 4 /* -W--V */ +// All remaining values for NV_VGPU_GUEST_OS_TYPE_OS are reserved/not supported. +#define NV_VGPU_GUEST_OS_TYPE_PAGE_SIZE 15:8 /* -W-4F */ + +#define NV_VGPU_SHARED_MEMORY__SIZE_1 4 /* */ +#define NV_VGPU_SHARED_MEMORY_TARGET 1:0 /* RWIVF */ +#define NV_VGPU_SHARED_MEMORY_TARGET_PHYS_NVM 0x00000001 /* RW--V */ +#define NV_VGPU_SHARED_MEMORY_TARGET_PHYS_PCI_COHERENT 0x00000003 /* RW--V */ +#define NV_VGPU_SHARED_MEMORY_STATUS 3:3 /* RWIVF */ +#define NV_VGPU_SHARED_MEMORY_STATUS_INVALID 0x00000000 /* RW--V */ +#define NV_VGPU_SHARED_MEMORY_STATUS_VALID 0x00000001 /* RW--V */ +#define NV_VGPU_SHARED_MEMORY_SIZE 5:4 /* RWIVF */ +#define NV_VGPU_SHARED_MEMORY_SIZE_4KB 0x00000000 /* RW--V */ +#define NV_VGPU_SHARED_MEMORY_ADDR_LO 31:12 /* RWIVF */ + +#define NV_VGPU_SHARED_MEMORY_HI_ADDR 19:0 /* RWIVF */ + +/******************************************************************************/ +/* EMULATED REGISTERS - END */ +/******************************************************************************/ + +/******************************************************************************/ +/* SHARED MEMORY - START */ +/******************************************************************************/ + +/* vGPU Current Pstate */ +#define NV_VGPU_SHARED_MEMORY_POINTER_CURRENT_PSTATE 0x00000090 +#define NV_VGPU_SHARED_MEMORY_POINTER_CURRENT_PSTATE_VALUE 31:0 + +/* vGPU ECC errors */ +#define NV_VGPU_SHARED_MEMORY_POINTER_ECC_TYPE 0x00000094 +#define NV_VGPU_SHARED_MEMORY_POINTER_ECC_TYPE_VALUE 31:0 +#define NV_VGPU_SHARED_MEMORY_POINTER_ECC_UNIT 0x00000098 +#define NV_VGPU_SHARED_MEMORY_POINTER_ECC_UNIT_VALUE 31:0 +#define NV_VGPU_SHARED_MEMORY_POINTER_ECC_ERROR_COUNT 0x0000009c +#define NV_VGPU_SHARED_MEMORY_POINTER_ECC_ERROR_COUNT_VALUE 31:0 + +/* vGPU backdoor VNC state */ +#define NV_VGPU_SHARED_MEMORY_POINTER_VNC 0x000000a0 +#define NV_VGPU_SHARED_MEMORY_POINTER_VNC_STATE 31:0 +#define NV_VGPU_SHARED_MEMORY_POINTER_VNC_STATE_ENABLED 0x00000001 +#define NV_VGPU_SHARED_MEMORY_POINTER_VNC_STATE_DISABLED 0x00000000 + +/* vGPU backdoor VNC support */ +#define NV_VGPU_SHARED_MEMORY_POINTER_VNC_SUPPORT 0x000000a4 +#define NV_VGPU_SHARED_MEMORY_POINTER_VNC_SUPPORT_VALUE 31:0 +#define NV_VGPU_SHARED_MEMORY_POINTER_VNC_SUPPORT_ENABLED 0x0001 +#define NV_VGPU_SHARED_MEMORY_POINTER_VNC_SUPPORT_DISABLED 0x0000 + +/* ecc fatal poison error*/ +#define NV_VGPU_SHARED_MEMORY_POINTER_ECC_POISON_ERROR 0x000000a8 +#define NV_VGPU_SHARED_MEMORY_POINTER_ECC_POISON_ERROR_VALUE 31:0 + +/* NvEnc Stats Reporting State */ +#define NV_VGPU_SHARED_MEMORY_POINTER_NVENC_STATS_REPORTING_STATE 0x000000ac +#define NV_VGPU_SHARED_MEMORY_POINTER_NVENC_STATS_REPORTING_STATE_VALUE 31:0 +#define NV_VGPU_SHARED_MEMORY_POINTER_NVENC_STATS_REPORTING_STATE_DISABLED 0x00000000 +#define NV_VGPU_SHARED_MEMORY_POINTER_NVENC_STATS_REPORTING_STATE_ENABLED 0x00000001 +#define NV_VGPU_SHARED_MEMORY_POINTER_NVENC_STATS_REPORTING_STATE_NOT_SUPPORTED 0x00000002 + +/* Nvlink inband message response available*/ +#define NV_VGPU_SHARED_MEMORY_POINTER_NVLINK_INBAND_RESPONSE 0x000000b0 +#define NV_VGPU_SHARED_MEMORY_POINTER_NVLINK_INBAND_RESPONSE_NONE 0x00000000 +#define NV_VGPU_SHARED_MEMORY_POINTER_NVLINK_INBAND_RESPONSE_PROBE 0:0 +#define NV_VGPU_SHARED_MEMORY_POINTER_NVLINK_INBAND_RESPONSE_PROBE_CLEAR 0x00000000 +#define NV_VGPU_SHARED_MEMORY_POINTER_NVLINK_INBAND_RESPONSE_PROBE_PENDING 0x00000001 +#define NV_VGPU_SHARED_MEMORY_POINTER_NVLINK_INBAND_RESPONSE_MC_SETUP 1:1 +#define NV_VGPU_SHARED_MEMORY_POINTER_NVLINK_INBAND_RESPONSE_MC_SETUP_CLEAR 0x00000000 +#define NV_VGPU_SHARED_MEMORY_POINTER_NVLINK_INBAND_RESPONSE_MC_SETUP_PENDING 0x00000001 + +/******************************************************************************/ +/* SHARED MEMORY - END */ +/******************************************************************************/ + +/******************************************************************************/ +/* EVENT MEMORY - START */ +/******************************************************************************/ + +/* Event ring entry (9 words) */ +#define NV_VGPU_EV__SIZE_1 0x0000000a /* */ +#define NV_VGPU_EV_HCLIENT 0x00000000 /* RW-4R */ +#define NV_VGPU_EV_HOBJECT 0x00000004 /* RW-4R */ +#define NV_VGPU_EV_NOTIFY_INDEX 0x00000008 /* RW-4R */ +#define NV_VGPU_EV_FLAGS 0x0000000c /* RW-4R */ +#define NV_VGPU_EV_FLAGS_ALLOCATED 3:0 /* RW-4R */ +#define NV_VGPU_EV_FLAGS_ALLOCATED_GUEST 0x00000000 /* RW--V */ +#define NV_VGPU_EV_FLAGS_ALLOCATED_PLUGIN 0x00000001 /* RW--V */ +#define NV_VGPU_EV_FLAGS_HAS_NOTIFIER_DATA 4:4 /* RW-4R */ +#define NV_VGPU_EV_FLAGS_HAS_NOTIFIER_DATA_FALSE 0x00000000 /* RW--V */ +#define NV_VGPU_EV_FLAGS_HAS_NOTIFIER_DATA_TRUE 0x00000001 /* RW--V */ +#define NV_VGPU_EV_FLAGS_TYPE 31:16 /* RW-4R */ +#define NV_VGPU_EV_FLAGS_TYPE_ROBUST_CHANNEL_ERROR 0x00000000 /* RW--V */ +#define NV_VGPU_EV_FLAGS_TYPE_EVENT_INTR_MTHD 0x00000001 /* RW--V */ +#define NV_VGPU_EV_FLAGS_TYPE_VBLANK_INTR 0x00000002 /* RW--V */ +#define NV_VGPU_EV_FLAGS_TYPE_VNC 0x00000003 /* RW--V */ +#define NV_VGPU_EV_FLAGS_TYPE_PSTATE 0x00000004 /* RW--V */ +#define NV_VGPU_EV_FLAGS_TYPE_ECC 0x00000005 /* RW--V */ +#define NV_VGPU_EV_FLAGS_TYPE_NVENC_REPORTING_STATE 0x00000006 /* RW--V */ +#define NV_VGPU_EV_FLAGS_TYPE_INBAND_RESPONSE 0x00000007 /* RW--V */ +#define NV_VGPU_EV_FLAGS_TYPE_TRACING 0x00000008 /* RW--V */ +#define NV_VGPU_EV_NOTIFIER_TIMESTAMP 0x00000010 /* RW-4R */ +#define NV_VGPU_EV_NOTIFIER_TIMESTAMP_HI 0x00000014 /* RW-4R */ +#define NV_VGPU_EV_NOTIFIER_INFO32 0x00000018 /* RW-4R */ +#define NV_VGPU_EV_NOTIFIER_INFO16 0x0000001c /* RW-4R */ +#define NV_VGPU_EV_NOTIFIER_INFO16_VALUE 15:0 /* RW-4R */ +#define NV_VGPU_EV_NOTIFIER_STATUS 0x00000020 /* RW-4R */ +#define NV_VGPU_EV_ROBUST_CHANNEL_ERROR_CHID 0x00000024 /* RW-4R */ + +typedef struct { + volatile NvU32 hClient; + volatile NvU32 hObject; + volatile NvU32 notifyIndex; + volatile NvU32 flags; + volatile NvU32 timestampLo; + volatile NvU32 timestampHi; + volatile NvU32 info32; + volatile NvU32 info16; + volatile NvU32 status; + volatile NvU32 rcChid; +} VGPU_EVENT_BUF_ENTRY; + +#define VGPU_EVENT_BUF_ENTRY_SIZE (sizeof(VGPU_EVENT_BUF_ENTRY)) +#define VGPU_EVENT_BUF_ENTRY_COUNT (RM_PAGE_SIZE / VGPU_EVENT_BUF_ENTRY_SIZE) + +ct_assert(NV_OFFSETOF(VGPU_EVENT_BUF_ENTRY, hClient ) == NV_VGPU_EV_HCLIENT); +ct_assert(NV_OFFSETOF(VGPU_EVENT_BUF_ENTRY, hObject ) == NV_VGPU_EV_HOBJECT); +ct_assert(NV_OFFSETOF(VGPU_EVENT_BUF_ENTRY, notifyIndex) == NV_VGPU_EV_NOTIFY_INDEX); +ct_assert(NV_OFFSETOF(VGPU_EVENT_BUF_ENTRY, flags ) == NV_VGPU_EV_FLAGS); +ct_assert(NV_OFFSETOF(VGPU_EVENT_BUF_ENTRY, timestampLo) == NV_VGPU_EV_NOTIFIER_TIMESTAMP); +ct_assert(NV_OFFSETOF(VGPU_EVENT_BUF_ENTRY, timestampHi) == NV_VGPU_EV_NOTIFIER_TIMESTAMP_HI); +ct_assert(NV_OFFSETOF(VGPU_EVENT_BUF_ENTRY, info32 ) == NV_VGPU_EV_NOTIFIER_INFO32); +ct_assert(NV_OFFSETOF(VGPU_EVENT_BUF_ENTRY, info16 ) == NV_VGPU_EV_NOTIFIER_INFO16); +ct_assert(NV_OFFSETOF(VGPU_EVENT_BUF_ENTRY, status ) == NV_VGPU_EV_NOTIFIER_STATUS); +ct_assert(NV_OFFSETOF(VGPU_EVENT_BUF_ENTRY, rcChid ) == NV_VGPU_EV_ROBUST_CHANNEL_ERROR_CHID); +ct_assert(VGPU_EVENT_BUF_ENTRY_SIZE == (NV_VGPU_EV__SIZE_1 * sizeof (NvU32))); + +/******************************************************************************/ +/* EVENT MEMORY - END */ +/******************************************************************************/ + +/* virtual GPU */ +#ifndef NV_XVE_ID_DEVICE_CHIP_VGPU +#define NV_XVE_ID_DEVICE_CHIP_VGPU 0x00000f00 /* R---V */ +#endif + +/******************************************************************************/ +/* GSP Control buffer shared between Guest RM and GSP Plugin - START */ +/******************************************************************************/ +/******************************************************************************/ +/* GSP Control buffer format - Version 1 - START */ +/******************************************************************************/ + +#define VGPU_GSP_BUF_ADDR_V1_VALIDITY 0:0 +#define VGPU_GSP_BUF_ADDR_V1_VALIDITY_INVALID 0x00000000 +#define VGPU_GSP_BUF_ADDR_V1_VALIDITY_VALID 0x00000001 +#define VGPU_GSP_BUF_ADDR_V1_APERTURE 2:1 +#define VGPU_GSP_BUF_ADDR_V1_APERTURE_INVALID 0x00000000 +#define VGPU_GSP_BUF_ADDR_V1_APERTURE_SYSMEM 0x00000001 +#define VGPU_GSP_BUF_ADDR_V1_APERTURE_FBMEM 0x00000002 +#define VGPU_GSP_BUF_ADDR_V2_SIZE 4:3 +#define VGPU_GSP_BUF_ADDR_V2_SIZE_4K 0x00000000 +#define VGPU_GSP_BUF_ADDR_V2_SIZE_128K 0x00000001 +#define VGPU_GSP_BUF_ADDR_V2_SIZE_2M 0x00000002 +#define VGPU_GSP_BUF_ADDR_V1_PFN 63:12 + +#define VGPU_GSP_CTRL_BUF_V1_VERSION 1 +#define VGPU_GSP_CTRL_BUF_V2_VERSION 2 + +/****** Control buffer: written by guest RM and read by GSP vGPU plugin *******/ + +#define VGPU_GSP_CTRL_BUF_SIZE_V1 4096 + +typedef struct { + NvU64 addr; + NvU64 bar2Offset; +} VGPU_GSP_BUF_INFO; + +typedef union { + struct { + volatile NvU32 version; // Version of control buffer format + volatile NvU32 requestId; // Request sequence number + volatile VGPU_GSP_BUF_INFO responseBuf; // Response buffer address + volatile VGPU_GSP_BUF_INFO msgBuf; // RPC message buffer address + volatile VGPU_GSP_BUF_INFO sharedMem; // Shared memory buffer + volatile VGPU_GSP_BUF_INFO eventBuf; // Event buffer address + volatile NvU32 getEventBuf; // GET index in circular event buffer + volatile NvU32 guestEccStatus; // guest ecc status + volatile NvU64 sysmemBitMapTablePfn; // Root node's pfn value of dirty sysmem tracking table + volatile NvU32 guestOsType; // Guest OS type + volatile NvU32 requestedGspCaps; // requested GSP caps + volatile VGPU_GSP_BUF_INFO debugBuf; // Debug buffer address + volatile NvU32 getSaveHibernateBuf; // GET index in circular hibernation buffer during SAVE + volatile NvU32 putRestoreHibernateBuf; // PUT index in circular hibernation buffer duing RESTORE + volatile NvU32 IsMoreHibernateDataRestore; // If data is available to restore during hibernation + } ; + volatile NvU8 buf[VGPU_GSP_CTRL_BUF_SIZE_V1]; +} VGPU_GSP_CTRL_BUF_V1; + +// check size +ct_assert(sizeof(VGPU_GSP_CTRL_BUF_V1) == VGPU_GSP_CTRL_BUF_SIZE_V1); + +// check field offset +ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, version ) == 0x000); +ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, requestId ) == 0x004); +ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, responseBuf ) == 0x008); +ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, msgBuf ) == 0x018); +ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, sharedMem ) == 0x028); +ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, eventBuf ) == 0x038); +ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, getEventBuf ) == 0x048); +ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, guestEccStatus ) == 0x04C); +ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, sysmemBitMapTablePfn ) == 0x050); +ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, guestOsType ) == 0x058); +ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, requestedGspCaps ) == 0x05C); +ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, debugBuf ) == 0x060); +ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, getSaveHibernateBuf ) == 0x070); +ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, putRestoreHibernateBuf ) == 0x074); +ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, IsMoreHibernateDataRestore ) == 0x078); + +/****** Response buffer: written by GSP vGPU plugin and read by guest RM ******/ + +#define VGPU_GSP_RESPONSE_BUF_SIZE_V1 4096 + +typedef union { + struct { + volatile NvU32 responseId; // Response sequence number + volatile NvU32 putEventBuf; // PUT index in circular event buffer + volatile NvU32 hostEccStatus; // host ecc status + volatile NvU32 usmType; // Host USM Type + volatile NvU32 enabledGspCaps; // Enabled GSP caps + volatile NvU32 putSaveHibernateBuf; // PUT index in circular hibernate shared buffer during save + volatile NvU32 getRestoreHibernateBuf; // GET index in circular hibernate shared buffer during restore + volatile NvU32 IsMoreHibernateDataSave; // Indicates if data is available to save during hibernation + }; + volatile NvU8 buf[VGPU_GSP_RESPONSE_BUF_SIZE_V1]; +} VGPU_GSP_RESPONSE_BUF_V1; + +// check size +ct_assert(sizeof(VGPU_GSP_RESPONSE_BUF_V1) == VGPU_GSP_RESPONSE_BUF_SIZE_V1); + +// check field offset +ct_assert(NV_OFFSETOF(VGPU_GSP_RESPONSE_BUF_V1, responseId ) == 0x000); +ct_assert(NV_OFFSETOF(VGPU_GSP_RESPONSE_BUF_V1, putEventBuf ) == 0x004); +ct_assert(NV_OFFSETOF(VGPU_GSP_RESPONSE_BUF_V1, hostEccStatus ) == 0x008); +ct_assert(NV_OFFSETOF(VGPU_GSP_RESPONSE_BUF_V1, usmType ) == 0x00C); +ct_assert(NV_OFFSETOF(VGPU_GSP_RESPONSE_BUF_V1, enabledGspCaps ) == 0x010); +ct_assert(NV_OFFSETOF(VGPU_GSP_RESPONSE_BUF_V1, putSaveHibernateBuf ) == 0x014); +ct_assert(NV_OFFSETOF(VGPU_GSP_RESPONSE_BUF_V1, getRestoreHibernateBuf ) == 0x018); +ct_assert(NV_OFFSETOF(VGPU_GSP_RESPONSE_BUF_V1, IsMoreHibernateDataSave ) == 0x01C); + +/******************************************************************************/ +/* GSP Control buffer format - Version 1 - END */ +/******************************************************************************/ + +// Control buffer format for Guest RM +typedef union { + volatile VGPU_GSP_CTRL_BUF_V1 v1; +} VGPU_GSP_CTRL_BUF_RM; + +// Control buffer format for GSP vGPU Plugin +typedef union { + volatile const VGPU_GSP_CTRL_BUF_V1 v1; +} VGPU_GSP_CTRL_BUF_PLUGIN; + +// Response buffer format for Guest RM +typedef union { + volatile const VGPU_GSP_RESPONSE_BUF_V1 v1; +} VGPU_GSP_RESPONSE_BUF_RM; + +// Response buffer format for GSP vGPU Plugin +typedef union { + volatile VGPU_GSP_RESPONSE_BUF_V1 v1; +} VGPU_GSP_RESPONSE_BUF_PLUGIN; + +/******************************************************************************/ +/* GSP Control buffer shared between Guest RM and GSP Plugin - END */ +/******************************************************************************/ + +// VGPU GSP dirty sysmem tracking pfn format +#define VGPU_GSP_SYSMEM_PFN_BITMAP_BUF_ADDR_VALIDITY 0:0 +#define VGPU_GSP_SYSMEM_PFN_BITMAP_BUF_ADDR_VALIDITY_INVALID 0x00000000 +#define VGPU_GSP_SYSMEM_PFN_BITMAP_BUF_ADDR_VALIDITY_VALID 0x00000001 +#define VGPU_GSP_SYSMEM_PFN_BITMAP_BUF_ADDR_APERTURE 2:1 +#define VGPU_GSP_SYSMEM_PFN_BITMAP_BUF_ADDR_APERTURE_INVALID 0x00000000 +#define VGPU_GSP_SYSMEM_PFN_BITMAP_BUF_ADDR_APERTURE_SYSMEM 0x00000001 +#define VGPU_GSP_SYSMEM_PFN_BITMAP_BUF_ADDR_APERTURE_FBMEM 0x00000002 +#define VGPU_GSP_SYSMEM_PFN_BITMAP_BUF_ADDR_PFN 63:12 + +#define MAX_PFNS_PER_4K_PAGE 512 + +// VGPU GSP dirty sysmem tracking root node format +typedef struct { + NvU16 nodeCount; // count of allocated bitmap nodes + NvU16 padding1; + NvU32 padding2; + NvU64 nodePfns[MAX_PFNS_PER_4K_PAGE - 1]; +} VGPU_GSP_SYSMEM_BITMAP_ROOT_NODE; + +#define NV_VGPU_DEBUG_BUFF_DRIVER_SIZE 0x7FF +#define NV_VGPU_GSP_CAPS_DEBUG_BUFF_SUPPORTED 0:0 +#define NV_VGPU_GSP_CAPS_DEBUG_BUFF_SUPPORTED_TRUE 0x00000001 +#define NV_VGPU_GSP_CAPS_DEBUG_BUFF_SUPPORTED_FALSE 0x00000000 + +ct_assert(sizeof(VGPU_GSP_SYSMEM_BITMAP_ROOT_NODE) == 0x1000); +#endif // __vgpu_dev_nv_vgpu_h__ diff --git a/src/nvidia/inc/kernel/vgpu/rm_plugin_shared_code.h b/src/nvidia/inc/kernel/vgpu/rm_plugin_shared_code.h new file mode 100644 index 0000000..a74c5be --- /dev/null +++ b/src/nvidia/inc/kernel/vgpu/rm_plugin_shared_code.h @@ -0,0 +1,8581 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * IMPORTANT NOTE: + * This file contains C functions shared between resman and plugin. + * Make sure that this file is included only once in resman and only once in plugin code. + * Else, make sure that all the functions in this file are static functions. + */ + +/* + * The structures and functions are used in below sequence: + * + * Some call is made from guest RM to the RPC framework in RM. + * Step 1. Guest RM : copies the parameters from guest SDK structures to RPC structures (serialization) + * Step 2. Plugin : copies the parameters from RPC structures to host SDK structures (de-serialization) + * (Host RM side operations, like RM control, alloc, free etc.) + * Step 3. Plugin : copies the parameters from host SDK structures to RPC structures (serialization) + * Step 4. Guest RM : copies the parameters from RPC structures to guest SDK structures (de-serialization) + * The data is returned to the caller. + */ + +/* + * Copy DMA Control Parameters structure element by element. + * Depending upon the cmd, copy parameters from pParams to params_in + */ + +#include "ctrl/ctrl208f/ctrl208fdma.h" // NV208F_CTRL_DMA_GET_VAS_BLOCK_DETAILS_PARAMS +#include "ctrl/ctrla06f.h" // NVA06F_CTRL_GET_CLASS_ENGINEID +#include "ctrl/ctrl90e6.h" // NV90E6_CTRL_CMD_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK +#include "ctrl/ctrl90f1.h" +#include "ctrl/ctrl90cd.h" +#include "ctrl/ctrl0080.h" +#include "nvctassert.h" + +// Copy elements from RPC structures to SDK structures (Step 2 or step 4 listed above) + +typedef NV_STATUS return_t; +#define serialize_deserialize(u) deserialize_##u##_HAL +#define getIpVersion() pObjRpcStructureCopy->__nvoc_pbase_Object->ipVersion +#define SUCCESS_T NV_OK +#define FAILURE_T NV_ERR_INVALID_ARGUMENT +#define COPY_OUTPUT_PARAMETERS + +#define NV_ADDR_UNKNOWN 0 // Address space is unknown +#define NV_ADDR_SYSMEM 1 // System memory (PCI) +#define NV_ADDR_FBMEM 2 // Frame buffer memory space +#define NV_ADDR_REGMEM 3 // NV register memory space +#define NV_ADDR_VIRTUAL 4 // Virtual address space only +#define NV_ADDR_FABRIC 5 // Multi-node fabric address space + +#define NV2080_NOTIFIERS_MAXCOUNT_R525 178 +#define NV2080_NOTIFIERS_MAXCOUNT_R470 162 + +#define NV_CHECK_AND_ALIGN_OFFSET(offset, bAlign) { \ + if (bAlign) { \ + offset = NV_ALIGN_UP(offset, sizeof(NvU64)); \ + } \ + } + +#ifdef BUILD_COMMON_RPCS + +static +return_t deserialize_NVA080_CTRL_SET_FB_USAGE_PARAMS_v07_02(NVA080_CTRL_SET_FB_USAGE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NVA080_CTRL_SET_FB_USAGE_PARAMS_v07_02 *src = (void*)(buffer); + NVA080_CTRL_SET_FB_USAGE_PARAMS *dest = pParams; + + if (src && dest) + { + dest->fbUsed = src->fbUsed; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t deserialize_NVA0BC_CTRL_NVENC_SW_SESSION_UPDATE_INFO_PARAMS_v06_01(NVA0BC_CTRL_NVENC_SW_SESSION_UPDATE_INFO_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NVA0BC_CTRL_NVENC_SW_SESSION_UPDATE_INFO_PARAMS_v06_01 *src = (void*)(buffer); + NVA0BC_CTRL_NVENC_SW_SESSION_UPDATE_INFO_PARAMS *dest = pParams; + + if (dest && src) { + dest->hResolution = src->hResolution; + dest->vResolution = src->vResolution; + dest->averageEncodeLatency = src->averageEncodeLatency; + dest->averageEncodeFps = src->averageEncodeFps; + dest->timestampBufferSize = 0; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t deserialize_NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS_v10_01(NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS_v10_01 *src = (void*)(buffer); + NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS *dest = pParams; + + if (src && dest) { + dest->engineID = src->engineID; + dest->subdeviceInstance = src->subdeviceInstance; + dest->resetReason = src->resetReason; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t deserialize_NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS_v03_00(NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS_v03_00 *src = (void*)(buffer); + NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS *dest = pParams; + + if (src && dest) { + dest->exceptType = src->exceptType; + dest->engineID = src->engineID; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_PARAMS_v18_09(NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_PARAMS_v18_09 *src = (void*)(buffer); + NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_PARAMS *dest = pParams; + + if (src && dest) { + dest->faultType = src->faultType; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_PERF_BOOST_PARAMS_v03_00(NV2080_CTRL_PERF_BOOST_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_PERF_BOOST_PARAMS_v03_00 *src = (void*)(buffer); + NV2080_CTRL_PERF_BOOST_PARAMS *dest = pParams; + + if (src && dest) { + dest->flags = src->flags; + dest->duration = src->duration; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS_v04_00(NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS_v04_00 *src = (void*)(buffer); + NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS *dest = pParams; + + if (src && dest) { + NvU32 i; + for (i = 0; i < NV9096_CTRL_SET_ZBC_COLOR_CLEAR_VALUE_SIZE; ++i) { + dest->value.colorFB[i] = src->value.colorFB[i]; + dest->value.colorDS[i] = src->value.colorDS[i]; + } + dest->value.depth = src->value.depth; + dest->value.stencil = src->value.stencil; // Changed in v04_00 + dest->indexSize = src->indexSize; + dest->indexUsed = src->indexUsed; + dest->format = src->format; + dest->valType = src->valType; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV9096_CTRL_SET_ZBC_COLOR_CLEAR_PARAMS_v03_00(NV9096_CTRL_SET_ZBC_COLOR_CLEAR_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV9096_CTRL_SET_ZBC_COLOR_CLEAR_PARAMS_v03_00 *src = (void*)(buffer); + NV9096_CTRL_SET_ZBC_COLOR_CLEAR_PARAMS *dest = pParams; + + if (src && dest) { + NvU32 i; + for (i = 0; i < NV9096_CTRL_SET_ZBC_COLOR_CLEAR_VALUE_SIZE; ++i) { + dest->colorFB[i] = src->colorFB[i]; + dest->colorDS[i] = src->colorDS[i]; + } + dest->format = src->format; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV9096_CTRL_SET_ZBC_DEPTH_CLEAR_PARAMS_v03_00(NV9096_CTRL_SET_ZBC_DEPTH_CLEAR_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV9096_CTRL_SET_ZBC_DEPTH_CLEAR_PARAMS_v03_00 *src = (void*)(buffer); + NV9096_CTRL_SET_ZBC_DEPTH_CLEAR_PARAMS *dest = pParams; + + if (src && dest) { + dest->depth = src->depth; + dest->format = src->format; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV9096_CTRL_SET_ZBC_STENCIL_CLEAR_PARAMS_v27_06(NV9096_CTRL_SET_ZBC_STENCIL_CLEAR_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV9096_CTRL_SET_ZBC_STENCIL_CLEAR_PARAMS_v27_06 *src = (void*)(buffer); + NV9096_CTRL_SET_ZBC_STENCIL_CLEAR_PARAMS *dest = pParams; + + if (src && dest) { + dest->stencil = src->stencil; + dest->format = src->format; + dest->bSkipL2Table = src->bSkipL2Table; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_v03_00(NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_v03_00 *src = (void*)(buffer); + NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS *dest = pParams; + + if (src && dest) { + dest->bEnable = src->bEnable; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NVA06C_CTRL_TIMESLICE_PARAMS_v06_00(NVA06C_CTRL_TIMESLICE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVA06C_CTRL_TIMESLICE_PARAMS_v06_00 *src = (void*)(buffer); + NVA06C_CTRL_TIMESLICE_PARAMS *dest = pParams; + + if (src && dest) { + dest->timesliceUs = src->timesliceUs; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS_v06_00(NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS_v06_00 *src = (void*)(buffer); + NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS *dest = pParams; + NvU32 i; + + if (src && dest) { + if (src->numChannels > NV2080_CTRL_FIFO_DISABLE_CHANNELS_MAX_ENTRIES) { + return FAILURE_T; + } + + dest->bDisable = src->bDisable; + dest->numChannels = src->numChannels; + dest->bOnlyDisableScheduling = src->bOnlyDisableScheduling; + dest->bRewindGpPut = src->bRewindGpPut; + dest->pRunlistPreemptEvent = NULL; // vGPU do not support guest kernel handles + + for (i = 0; i < src->numChannels ; i++) + { + dest->hClientList[i] = src->hClientList[i]; + dest->hChannelList[i] = src->hChannelList[i]; + } + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t deserialize_NVA06C_CTRL_PREEMPT_PARAMS_v09_0A(NVA06C_CTRL_PREEMPT_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NVA06C_CTRL_PREEMPT_PARAMS_v09_0A *src = (void*)(buffer); + NVA06C_CTRL_PREEMPT_PARAMS *dest = pParams; + + if (src && dest) { + dest->bWait = src->bWait; + dest->bManualTimeout = src->bManualTimeout; + dest->timeoutUs = src->timeoutUs; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t deserialize_NVA06C_CTRL_INTERLEAVE_LEVEL_PARAMS_v17_02(NVA06C_CTRL_INTERLEAVE_LEVEL_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NVA06C_CTRL_INTERLEAVE_LEVEL_PARAMS_v17_02 *src = (void*)(buffer); + NVA06C_CTRL_INTERLEAVE_LEVEL_PARAMS *dest = pParams; + + if (src && dest) { + dest->tsgInterleaveLevel = src->tsgInterleaveLevel; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t deserialize_NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS_v17_02(NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS_v17_02 *src = (void*)(buffer); + NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS *dest = pParams; + + if (src && dest) { + dest->channelInterleaveLevel = src->channelInterleaveLevel; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS_v12_01(NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS_v12_01 *src = (void*)(buffer); + NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS *dest = pParams; + + if (src && dest) { + NvU32 i; + + dest->flags = src->flags; + dest->hClient = src->hClient; + dest->hChannel = src->hChannel; + for (i = 0; i < NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END_v03_00; ++i) + dest->vMemPtrs[i] = src->vMemPtrs[i]; + dest->gfxpPreemptMode = src->gfxpPreemptMode; + dest->cilpPreemptMode = src->cilpPreemptMode; + dest->grRouteInfo.flags = src->grRouteInfo.flags; + dest->grRouteInfo.route = src->grRouteInfo.route; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS_v28_07(NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS* pParams, + NvU8* buffer, + NvU32 bufferSize, + NvU32* offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS_v28_07* src = (void*)(buffer); + NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS* dest = pParams; + + if (src && dest) { + NvU32 i; + + dest->flags = src->flags; + dest->hClient = src->hClient; + dest->hChannel = src->hChannel; + for (i = 0; i < NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END_v28_07; ++i) + dest->vMemPtrs[i] = src->vMemPtrs[i]; + dest->gfxpPreemptMode = src->gfxpPreemptMode; + dest->cilpPreemptMode = src->cilpPreemptMode; + dest->grRouteInfo.flags = src->grRouteInfo.flags; + dest->grRouteInfo.route = src->grRouteInfo.route; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS_v12_01(NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS_v12_01 *src = (void*)(buffer); + NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS *dest = pParams; + + if (src && dest) { + dest->flags = src->flags; + dest->hChannel = src->hChannel; + dest->gfxpPreemptMode = src->gfxpPreemptMode; + dest->cilpPreemptMode = src->cilpPreemptMode; + dest->grRouteInfo.flags = src->grRouteInfo.flags; + dest->grRouteInfo.route = src->grRouteInfo.route; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS_v03_00(NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS_v03_00 *src = (void*)(buffer); + NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS *dest = pParams; + + if (src && dest) { + dest->hClient = src->hClient; + dest->hChannel = src->hChannel; + dest->vMemPtr = src->vMemPtr; + dest->zcullMode = src->zcullMode; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS_v03_00(NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS_v03_00 *src = (void*)(buffer); + NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS *dest = pParams; + + if (src && dest) { + dest->engineType = src->engineType; + dest->hClient = src->hClient; + dest->ChID = src->ChID; + dest->hChanClient = src->hChanClient; + dest->hObject = src->hObject; + dest->hVirtMemory = src->hVirtMemory; + dest->physAddress = src->physAddress; + dest->physAttr = src->physAttr; + dest->hDmaHandle = src->hDmaHandle; + dest->index = src->index; + dest->size = src->size; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t deserialize_NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_v1E_04(NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_v1E_04 *src = (void*)(buffer); + NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *dest = pParams; + + if (src && dest) { + NvU32 i; + + if (src->numLevelsToCopy > GMMU_FMT_MAX_LEVELS_v1A_12) { + return FAILURE_T; + } + + dest->hSubDevice = src->hSubDevice; + dest->subDeviceId = src->subDeviceId; + dest->pageSize = src->pageSize; + dest->virtAddrLo = src->virtAddrLo; + dest->virtAddrHi = src->virtAddrHi; + dest->numLevelsToCopy = src->numLevelsToCopy; + + for (i = 0; i < dest->numLevelsToCopy; i++) + { + dest->levels[i].physAddress = src->levels[i].physAddress; + dest->levels[i].aperture = src->levels[i].aperture; + dest->levels[i].size = src->levels[i].size; + dest->levels[i].pageShift = src->levels[i].pageShift; + } + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} +#endif + +#ifdef BUILD_COMMON_RPCS + +#if (defined(GSP_PLUGIN_BUILD) || defined(RESMAN_BUILD)) && !defined(UMED_BUILD) +static +return_t deserialize_GET_BRAND_CAPS_v25_12(NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_OUTPUT_PARAMETERS + rpc_get_brand_caps_v25_12 *src = (void*)(buffer); + NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS *dest = pParams; + + if (src && dest) { + dest->brands = src->brands; + } + else + return FAILURE_T; +#endif + + return SUCCESS_T; +} +#endif // (defined(GSP_PLUGIN_BUILD) || defined(RESMAN_BUILD)) && !defined(UMED_BUILD) + +static +return_t deserialize_NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS_v15_01(NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_OUTPUT_PARAMETERS + NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS_v15_01 *src = (void*)(buffer); + NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS *dest = pParams; + + if (src && dest) { + dest->engines = src->engines; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t deserialize_NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS_v03_00(NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS_v03_00 *src = (void*)(buffer); + NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS *dest = pParams; + + if (src && dest) { + dest->hTargetChannel = src->hTargetChannel; + dest->numSMsToClear = src->numSMsToClear; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS_v21_06(NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset, + NvU32 startingSMOffset) +{ + NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS_v21_06 *src = (void*)(buffer); + NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS *dest = pParams; + + if (src && dest) { +#ifdef COPY_OUTPUT_PARAMETERS + NvU32 i, smIdDest; +#endif + + if (src->numSMsToRead > VGPU_RPC_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PER_RPC_v21_06) { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + dest->hTargetChannel = src->hTargetChannel; + dest->startingSM = src->startingSM; + dest->numSMsToRead = src->numSMsToRead; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + for (i = 0; i < src->numSMsToRead; ++i) + { + smIdDest = startingSMOffset + i; + dest->smErrorStateArray[smIdDest].hwwGlobalEsr = src->smErrorStateArray[i].hwwGlobalEsr; + dest->smErrorStateArray[smIdDest].hwwWarpEsr = src->smErrorStateArray[i].hwwWarpEsr; + dest->smErrorStateArray[smIdDest].hwwWarpEsrPc = src->smErrorStateArray[i].hwwWarpEsrPc; + dest->smErrorStateArray[smIdDest].hwwGlobalEsrReportMask = src->smErrorStateArray[i].hwwGlobalEsrReportMask; + dest->smErrorStateArray[smIdDest].hwwWarpEsrReportMask = src->smErrorStateArray[i].hwwWarpEsrReportMask; + dest->smErrorStateArray[smIdDest].hwwEsrAddr = src->smErrorStateArray[i].hwwEsrAddr; + dest->smErrorStateArray[smIdDest].hwwWarpEsrPc64 = src->smErrorStateArray[i].hwwWarpEsrPc64; + /* New fields added in version v21_06 */ + dest->smErrorStateArray[smIdDest].hwwCgaEsr = src->smErrorStateArray[i].hwwCgaEsr; + dest->smErrorStateArray[smIdDest].hwwCgaEsrReportMask = src->smErrorStateArray[i].hwwCgaEsrReportMask; + } + dest->mmuFault.valid = src->mmuFault.valid; + dest->mmuFault.faultInfo = src->mmuFault.faultInfo; + dest->mmuFaultInfo = src->mmuFault.faultInfo; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS_v03_00(NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS_v03_00 *src = (void*)(buffer); + NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS *dest = pParams; + + if (src && dest) { + dest->exceptionMask = src->exceptionMask; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_v1A_20(NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_v1A_20 *src = (void*)(buffer); + NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *dest = pParams; + NvU32 i; + + if (src && dest) + { + dest->engineType = src->engineType; + dest->hClient = src->hClient; + dest->ChID = src->ChID; + dest->hChanClient = src->hChanClient; + dest->hObject = src->hObject; + dest->hVirtMemory = src->hVirtMemory; + dest->virtAddress = src->virtAddress; + dest->size = src->size; + dest->entryCount = src->entryCount; + + if (dest->entryCount > NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES) { + return FAILURE_T; + } + + for (i = 0; i < dest->entryCount; i++) { + dest->promoteEntry[i].gpuPhysAddr = src->promoteEntry[i].gpuPhysAddr; + dest->promoteEntry[i].gpuVirtAddr = src->promoteEntry[i].gpuVirtAddr; + dest->promoteEntry[i].size = src->promoteEntry[i].size; + dest->promoteEntry[i].physAttr = src->promoteEntry[i].physAttr; + dest->promoteEntry[i].bufferId = src->promoteEntry[i].bufferId; + dest->promoteEntry[i].bInitialize = src->promoteEntry[i].bInitialize; + dest->promoteEntry[i].bNonmapped = src->promoteEntry[i].bNonmapped; + } + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t deserialize_NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT_PARAMS_v1A_06(NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT_PARAMS_v1A_06 *src = (void*)(buffer); + NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT_PARAMS *dest = pParams; + + if (src && dest) { +#ifdef COPY_OUTPUT_PARAMETERS + dest->waitForEvent = src->waitForEvent; + dest->hResidentChannel = src->hResidentChannel; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS_v1A_06(NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS_v1A_06 *src = (void*)(buffer); + NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS *dest = pParams; + + if (src && dest) { + NvU32 idx = 0; +#ifdef COPY_INPUT_PARAMETERS + if (src->regOpCount > NV83DE_CTRL_GPU_EXEC_REG_OPS_MAX_OPS) { + return FAILURE_T; + } + + dest->bNonTransactional = src->bNonTransactional; + dest->regOpCount = src->regOpCount; + + for (idx = 0; idx < src->regOpCount; idx++) + { + dest->regOps[idx].regOp = src->regOps[idx].regOp; + dest->regOps[idx].regType = src->regOps[idx].regType; + dest->regOps[idx].regQuad = src->regOps[idx].regQuad; + dest->regOps[idx].regGroupMask = src->regOps[idx].regGroupMask; + dest->regOps[idx].regSubGroupMask = src->regOps[idx].regSubGroupMask; + dest->regOps[idx].regOffset = src->regOps[idx].regOffset; + dest->regOps[idx].regAndNMaskLo = src->regOps[idx].regAndNMaskLo; + dest->regOps[idx].regAndNMaskHi = src->regOps[idx].regAndNMaskHi; + dest->regOps[idx].regValueLo = src->regOps[idx].regValueLo; + dest->regOps[idx].regValueHi = src->regOps[idx].regValueHi; + } +#endif +#ifdef COPY_OUTPUT_PARAMETERS + for (idx = 0; idx < src->regOpCount; idx++) + { + dest->regOps[idx].regStatus = src->regOps[idx].regStatus; + dest->regOps[idx].regValueLo = src->regOps[idx].regValueLo; + dest->regOps[idx].regValueHi = src->regOps[idx].regValueHi; + } +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV83DE_CTRL_DEBUG_SET_MODE_MMU_GCC_DEBUG_PARAMS_v2A_05(NV83DE_CTRL_DEBUG_SET_MODE_MMU_GCC_DEBUG_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_DEBUG_SET_MODE_MMU_GCC_DEBUG_PARAMS_v2A_05 *src = (void*)(buffer); + NV83DE_CTRL_DEBUG_SET_MODE_MMU_GCC_DEBUG_PARAMS *dest = pParams; + + if (src && dest) { +#ifdef COPY_INPUT_PARAMETERS + dest->action = src->action; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + + +static +return_t deserialize_NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS_v1A_06(NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS_v1A_06 *src = (void*)(buffer); + NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS *dest = pParams; + + if (src && dest) { +#ifdef COPY_INPUT_PARAMETERS + dest->action = src->action; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS_v21_06(NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS_v21_06 *src = (void*)(buffer); + NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS *dest = pParams; + + if (src && dest) { +#ifdef COPY_INPUT_PARAMETERS + dest->hTargetChannel = src->hTargetChannel; + dest->smID = src->smID; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->smErrorState.hwwGlobalEsr = src->smErrorState.hwwGlobalEsr; + dest->smErrorState.hwwWarpEsr = src->smErrorState.hwwWarpEsr; + dest->smErrorState.hwwWarpEsrPc = src->smErrorState.hwwWarpEsrPc; + dest->smErrorState.hwwGlobalEsrReportMask = src->smErrorState.hwwGlobalEsrReportMask; + dest->smErrorState.hwwWarpEsrReportMask = src->smErrorState.hwwWarpEsrReportMask; + dest->smErrorState.hwwEsrAddr = src->smErrorState.hwwEsrAddr; + dest->smErrorState.hwwWarpEsrPc64 = src->smErrorState.hwwWarpEsrPc64; + /* New fields added in version v21_06 */ + dest->smErrorState.hwwCgaEsr = src->smErrorState.hwwCgaEsr; + dest->smErrorState.hwwCgaEsrReportMask = src->smErrorState.hwwCgaEsrReportMask; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS_v1A_06(NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS_v1A_06 *src = (void*)(buffer); + NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS *dest = pParams; + + if (src && dest) { +#ifdef COPY_INPUT_PARAMETERS + dest->hTargetChannel = src->hTargetChannel; + dest->smID = src->smID; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS_v1A_06(NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS_v1A_06 *src = (void*)(buffer); + NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS *dest = pParams; + + if (src && dest) { +#ifdef COPY_INPUT_PARAMETERS + dest->action = src->action; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS_v1A_06(NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS_v1A_06 *src = (void*)(buffer); + NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS *dest = pParams; + + if (src && dest) { +#ifdef COPY_INPUT_PARAMETERS + dest->stopTriggerType = src->stopTriggerType; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS_v03_00(NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS_v03_00 *src = (void*)(buffer); + NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS *dest = pParams; + + if (src && dest) { + dest->hVASpace = src->hVASpace; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS_v1A_07(NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS_v1A_07 *src = (void*)(buffer); + NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS *dest = pParams; + + if (src && dest) { +#ifdef COPY_INPUT_PARAMETERS + dest->ceEngineType = src->ceEngineType; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->pceMask = src->pceMask; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS_v1A_07(NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS_v1A_07 *src = (void*)(buffer); + NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS *dest = pParams; + + if (src && dest) { + ct_assert(NV9096_CTRL_SET_ZBC_COLOR_CLEAR_VALUE_SIZE == 4); + +#ifdef COPY_INPUT_PARAMETERS + dest->index = src->index; + dest->tableType = src->tableType; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + NvU32 i; + for (i = 0; i < NV9096_CTRL_SET_ZBC_COLOR_CLEAR_VALUE_SIZE; ++i) { + dest->value.colorFB[i] = src->value.colorFB[i]; + dest->value.colorDS[i] = src->value.colorDS[i]; + } + dest->value.depth = src->value.depth; + dest->value.stencil = src->value.stencil; + dest->format = src->format; + dest->index = src->index; + dest->bIndexValid = src->bIndexValid; + dest->tableType = src->tableType; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS_v23_04(NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS_v23_04 *src = (void*)(buffer); + NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *dest = pParams; + + if (src && dest) { +#ifdef COPY_OUTPUT_PARAMETERS + NvU32 i; + + dest->enabledLinkMask = src->enabledLinkMask; + + FOR_EACH_INDEX_IN_MASK(32, i, src->enabledLinkMask) + { + NV2080_CTRL_NVLINK_DEVICE_INFO *deviceInfo_d = NULL; + NV2080_CTRL_NVLINK_DEVICE_INFO_v15_02 *deviceInfo_s = NULL; + + if (i >= NV2080_CTRL_NVLINK_MAX_LINKS_v23_04) + break; + + dest->linkInfo[i].capsTbl = src->linkInfo[i].capsTbl; + dest->linkInfo[i].phyType = src->linkInfo[i].phyType; + dest->linkInfo[i].subLinkWidth = src->linkInfo[i].subLinkWidth; + dest->linkInfo[i].linkState = src->linkInfo[i].linkState; + dest->linkInfo[i].rxSublinkStatus = src->linkInfo[i].rxSublinkStatus; + dest->linkInfo[i].txSublinkStatus = src->linkInfo[i].txSublinkStatus; + dest->linkInfo[i].nvlinkVersion = src->linkInfo[i].nvlinkVersion; + dest->linkInfo[i].nciVersion = src->linkInfo[i].nciVersion; + dest->linkInfo[i].phyVersion = src->linkInfo[i].phyVersion; + dest->linkInfo[i].nvlinkLinkClockKHz = src->linkInfo[i].nvlinkLinkClockKHz; + dest->linkInfo[i].nvlinkLineRateMbps = src->linkInfo[i].nvlinkLineRateMbps; + dest->linkInfo[i].connected = src->linkInfo[i].connected; + dest->linkInfo[i].remoteDeviceLinkNumber = src->linkInfo[i].remoteDeviceLinkNumber; + dest->linkInfo[i].localDeviceLinkNumber = src->linkInfo[i].localDeviceLinkNumber; + + deviceInfo_d = &dest->linkInfo[i].remoteDeviceInfo; + deviceInfo_s = &src->linkInfo[i].remoteDeviceInfo; + + deviceInfo_d->deviceIdFlags = deviceInfo_s->deviceIdFlags; + deviceInfo_d->domain = deviceInfo_s->domain; + deviceInfo_d->bus = deviceInfo_s->bus; + deviceInfo_d->device = deviceInfo_s->device; + deviceInfo_d->function = deviceInfo_s->function; + deviceInfo_d->pciDeviceId = deviceInfo_s->pciDeviceId; + deviceInfo_d->deviceType = deviceInfo_s->deviceType; + portMemCopy(deviceInfo_d->deviceUUID, + sizeof(deviceInfo_d->deviceUUID), + deviceInfo_s->deviceUUID, + sizeof(deviceInfo_s->deviceUUID)); + + deviceInfo_d = &dest->linkInfo[i].localDeviceInfo; + deviceInfo_s = &src->linkInfo[i].localDeviceInfo; + + deviceInfo_d->deviceIdFlags = deviceInfo_s->deviceIdFlags; + deviceInfo_d->domain = deviceInfo_s->domain; + deviceInfo_d->bus = deviceInfo_s->bus; + deviceInfo_d->device = deviceInfo_s->device; + deviceInfo_d->function = deviceInfo_s->function; + deviceInfo_d->pciDeviceId = deviceInfo_s->pciDeviceId; + deviceInfo_d->deviceType = deviceInfo_s->deviceType; + portMemCopy(deviceInfo_d->deviceUUID, + sizeof(deviceInfo_d->deviceUUID), + deviceInfo_s->deviceUUID, + sizeof(deviceInfo_s->deviceUUID)); + } + FOR_EACH_INDEX_IN_MASK_END; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS_v28_09(NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS_v28_09 *src = (void*)(buffer); + NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *dest = pParams; + + if (src && dest) { +#ifdef COPY_OUTPUT_PARAMETERS + NvU32 i; + + dest->enabledLinkMask = src->enabledLinkMask; + + FOR_EACH_INDEX_IN_MASK(32, i, src->enabledLinkMask) + { + NV2080_CTRL_NVLINK_DEVICE_INFO *deviceInfo_d = NULL; + NV2080_CTRL_NVLINK_DEVICE_INFO_v28_09 *deviceInfo_s = NULL; + + if (i >= NV2080_CTRL_NVLINK_MAX_LINKS_v23_04) + break; + + dest->linkInfo[i].capsTbl = src->linkInfo[i].capsTbl; + dest->linkInfo[i].phyType = src->linkInfo[i].phyType; + dest->linkInfo[i].subLinkWidth = src->linkInfo[i].subLinkWidth; + dest->linkInfo[i].linkState = src->linkInfo[i].linkState; + dest->linkInfo[i].rxSublinkStatus = src->linkInfo[i].rxSublinkStatus; + dest->linkInfo[i].txSublinkStatus = src->linkInfo[i].txSublinkStatus; + dest->linkInfo[i].nvlinkVersion = src->linkInfo[i].nvlinkVersion; + dest->linkInfo[i].nciVersion = src->linkInfo[i].nciVersion; + dest->linkInfo[i].phyVersion = src->linkInfo[i].phyVersion; + dest->linkInfo[i].nvlinkLinkClockKHz = src->linkInfo[i].nvlinkLinkClockKHz; + dest->linkInfo[i].nvlinkLineRateMbps = src->linkInfo[i].nvlinkLineRateMbps; + dest->linkInfo[i].connected = src->linkInfo[i].connected; + dest->linkInfo[i].remoteDeviceLinkNumber = src->linkInfo[i].remoteDeviceLinkNumber; + dest->linkInfo[i].localDeviceLinkNumber = src->linkInfo[i].localDeviceLinkNumber; + + deviceInfo_d = &dest->linkInfo[i].remoteDeviceInfo; + deviceInfo_s = &src->linkInfo[i].remoteDeviceInfo; + + deviceInfo_d->deviceIdFlags = deviceInfo_s->deviceIdFlags; + deviceInfo_d->domain = deviceInfo_s->domain; + deviceInfo_d->bus = deviceInfo_s->bus; + deviceInfo_d->device = deviceInfo_s->device; + deviceInfo_d->function = deviceInfo_s->function; + deviceInfo_d->pciDeviceId = deviceInfo_s->pciDeviceId; + deviceInfo_d->deviceType = deviceInfo_s->deviceType; + portMemCopy(deviceInfo_d->deviceUUID, + sizeof(deviceInfo_d->deviceUUID), + deviceInfo_s->deviceUUID, + sizeof(deviceInfo_s->deviceUUID)); + deviceInfo_d->fabricRecoveryStatusMask = deviceInfo_s->fabricRecoveryStatusMask; + + deviceInfo_d = &dest->linkInfo[i].localDeviceInfo; + deviceInfo_s = &src->linkInfo[i].localDeviceInfo; + + deviceInfo_d->deviceIdFlags = deviceInfo_s->deviceIdFlags; + deviceInfo_d->domain = deviceInfo_s->domain; + deviceInfo_d->bus = deviceInfo_s->bus; + deviceInfo_d->device = deviceInfo_s->device; + deviceInfo_d->function = deviceInfo_s->function; + deviceInfo_d->pciDeviceId = deviceInfo_s->pciDeviceId; + deviceInfo_d->deviceType = deviceInfo_s->deviceType; + portMemCopy(deviceInfo_d->deviceUUID, + sizeof(deviceInfo_d->deviceUUID), + deviceInfo_s->deviceUUID, + sizeof(deviceInfo_s->deviceUUID)); + deviceInfo_d->fabricRecoveryStatusMask = deviceInfo_s->fabricRecoveryStatusMask; + } + FOR_EACH_INDEX_IN_MASK_END; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS_v1F_0D(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS_v1F_0D *src = (void*)(buffer); + NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS *dest = pParams; + + if (src && dest) { +#ifdef COPY_INPUT_PARAMETERS + memcpy(dest->gpuIds, src->gpuIds, (sizeof(NvU32) * NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS)); + dest->gpuCount = src->gpuCount; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->p2pCaps = src->p2pCaps; + dest->p2pOptimalReadCEs = src->p2pOptimalReadCEs; + dest->p2pOptimalWriteCEs = src->p2pOptimalWriteCEs; + portMemCopy(dest->p2pCapsStatus, NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE_v1F_0D, + src->p2pCapsStatus, NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE_v1F_0D); +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS_v18_0A(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS_v18_0A *src = (void*)(buffer); + NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS *dest = pParams; + + if (src && dest) { + if (src->grpACount == 0 || + src->grpACount > NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS || + src->grpBCount > NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS) { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + NvU32 idx = 0; + + dest->grpACount = src->grpACount; + dest->grpBCount = src->grpBCount; + + for (idx = 0; idx < NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS; idx++) { + dest->gpuIdGrpA[idx] = src->gpuIdGrpA[idx]; + dest->gpuIdGrpB[idx] = src->gpuIdGrpB[idx]; + } +#endif +#ifdef COPY_OUTPUT_PARAMETERS + NvU32 grpAIdx = 0, grpBIdx= 0; + NvBool bReflexive = NV_FALSE; + + // Check for the reflexive case + if (src->grpBCount == 0) { + bReflexive = NV_TRUE; + } + + for (grpAIdx = 0; grpAIdx < src->grpACount; grpAIdx++) { + for (grpBIdx = 0; bReflexive ? grpBIdx <= grpAIdx : grpBIdx < src->grpBCount; grpBIdx++) { + dest->p2pCaps[grpAIdx][grpBIdx] = src->p2pCaps[grpAIdx].array[grpBIdx]; + dest->a2bOptimalReadCes[grpAIdx][grpBIdx] = src->a2bOptimalReadCes[grpAIdx].array[grpBIdx]; + dest->a2bOptimalWriteCes[grpAIdx][grpBIdx] = src->a2bOptimalWriteCes[grpAIdx].array[grpBIdx]; + dest->b2aOptimalReadCes[grpAIdx][grpBIdx] = src->b2aOptimalReadCes[grpAIdx].array[grpBIdx]; + dest->b2aOptimalWriteCes[grpAIdx][grpBIdx] = src->b2aOptimalWriteCes[grpAIdx].array[grpBIdx]; + } + } +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_GET_P2P_CAPS_PARAMS_v21_02(NV2080_CTRL_GET_P2P_CAPS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_GET_P2P_CAPS_PARAMS_v21_02 *src = (void*)(buffer); + NV2080_CTRL_GET_P2P_CAPS_PARAMS *dest = pParams; + + if (src && dest) { +#ifdef COPY_INPUT_PARAMETERS + if (!src->bAllCaps && (src->peerGpuCount > NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS)) + return FAILURE_T; + + dest->bAllCaps = src->bAllCaps; + dest->bUseUuid = src->bUseUuid; + + if (!src->bAllCaps) + { + NvU32 i; + dest->peerGpuCount = src->peerGpuCount; + + for (i = 0; i < src->peerGpuCount; ++i) + { + portMemCopy(dest->peerGpuCaps[i].gpuUuid, + VM_UUID_SIZE_v21_02, + src->peerGpuCaps[i].gpuUuid, + VM_UUID_SIZE_v21_02); + } + } +#endif +#ifdef COPY_OUTPUT_PARAMETERS + NvU32 i; + + if (src->peerGpuCount > NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS) + return FAILURE_T; + + if (src->bAllCaps) + { + dest->peerGpuCount = src->peerGpuCount; + } + + for (i = 0; i < src->peerGpuCount; ++i) + { + if (src->bAllCaps) + { + portMemCopy(dest->peerGpuCaps[i].gpuUuid, + VM_UUID_SIZE_v21_02, + src->peerGpuCaps[i].gpuUuid, + VM_UUID_SIZE_v21_02); + } + + dest->peerGpuCaps[i].p2pCaps = src->peerGpuCaps[i].p2pCaps; + dest->peerGpuCaps[i].p2pOptimalReadCEs = src->peerGpuCaps[i].p2pOptimalReadCEs; + dest->peerGpuCaps[i].p2pOptimalWriteCEs = src->peerGpuCaps[i].p2pOptimalWriteCEs; + portMemCopy(dest->peerGpuCaps[i].p2pCapsStatus, + sizeof(dest->peerGpuCaps[i].p2pCapsStatus), + src->peerGpuCaps[i].p2pCapsStatus, + sizeof(src->peerGpuCaps[i].p2pCapsStatus)); + dest->peerGpuCaps[i].busPeerId = src->peerGpuCaps[i].busPeerId; + } +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS_v1A_0F(NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS_v1A_0F *src = (void*)(buffer); + NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS *dest = pParams; + + if (src && dest) + { + dest->ctxsw = src->ctxsw; + } + else + return FAILURE_T; +#endif + + return SUCCESS_T; +} + +static +return_t deserialize_NVB0CC_CTRL_RESERVE_PM_AREA_SMPC_PARAMS_v1A_0F(NVB0CC_CTRL_RESERVE_PM_AREA_SMPC_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NVB0CC_CTRL_RESERVE_PM_AREA_SMPC_PARAMS_v1A_0F *src = (void*)(buffer); + NVB0CC_CTRL_RESERVE_PM_AREA_SMPC_PARAMS *dest = pParams; + + if (src && dest) + { + dest->ctxsw = src->ctxsw; + } + else + return FAILURE_T; +#endif + + return SUCCESS_T; +} + +static +return_t deserialize_NVB0CC_CTRL_EXEC_REG_OPS_PARAMS_v1A_0F(NVB0CC_CTRL_EXEC_REG_OPS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_EXEC_REG_OPS_PARAMS_v1A_0F *src = (void*)(buffer); + NVB0CC_CTRL_EXEC_REG_OPS_PARAMS *dest = pParams; + NvU32 idx = 0; + + if (src && dest) + { + if (src->regOpCount > NVB0CC_REGOPS_MAX_COUNT) { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + dest->regOpCount = src->regOpCount; + dest->mode = src->mode; + for (idx = 0; idx < src->regOpCount; idx++) + { + dest->regOps[idx].regOp = src->regOps[idx].regOp; + dest->regOps[idx].regType = src->regOps[idx].regType; + dest->regOps[idx].regQuad = src->regOps[idx].regQuad; + dest->regOps[idx].regGroupMask = src->regOps[idx].regGroupMask; + dest->regOps[idx].regSubGroupMask = src->regOps[idx].regSubGroupMask; + dest->regOps[idx].regOffset = src->regOps[idx].regOffset; + dest->regOps[idx].regValueLo = src->regOps[idx].regValueLo; + dest->regOps[idx].regValueHi = src->regOps[idx].regValueHi; + dest->regOps[idx].regAndNMaskLo = src->regOps[idx].regAndNMaskLo; + dest->regOps[idx].regAndNMaskHi = src->regOps[idx].regAndNMaskHi; + } +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->bPassed = src->bPassed; + dest->bDirect = src->bDirect; + for (idx = 0; idx < src->regOpCount; idx++) + { + dest->regOps[idx].regStatus = src->regOps[idx].regStatus; + dest->regOps[idx].regValueLo = src->regOps[idx].regValueLo; + dest->regOps[idx].regValueHi = src->regOps[idx].regValueHi; + } +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS_v1A_14(NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS_v1A_14 *src = (void*)(buffer); + NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS *dest = pParams; + + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + dest->hMemPmaBuffer = src->hMemPmaBuffer; + dest->pmaBufferOffset = src->pmaBufferOffset; + dest->pmaBufferSize = src->pmaBufferSize; + dest->hMemPmaBytesAvailable = src->hMemPmaBytesAvailable; + dest->pmaBytesAvailableOffset = src->pmaBytesAvailableOffset; + dest->ctxsw = src->ctxsw; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->pmaChannelIdx = src->pmaChannelIdx; + dest->pmaBufferVA = src->pmaBufferVA; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_v1A_14(NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_v1A_14 *src = (void*)(buffer); + NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS *dest = pParams; + + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + dest->bytesConsumed = src->bytesConsumed; + dest->bUpdateAvailableBytes = src->bUpdateAvailableBytes; + dest->bWait = src->bWait; + dest->bReturnPut = src->bReturnPut; + dest->pmaChannelIdx = src->pmaChannelIdx; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->bytesAvailable = src->bytesAvailable; + dest->putPtr = src->putPtr; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_v29_0B(NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_v29_0B *src = (void*)(buffer); + NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS *dest = pParams; + + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + dest->bytesConsumed = src->bytesConsumed; + dest->bUpdateAvailableBytes = src->bUpdateAvailableBytes; + dest->bWait = src->bWait; + dest->bReturnPut = src->bReturnPut; + dest->pmaChannelIdx = src->pmaChannelIdx; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->bytesAvailable = src->bytesAvailable; + dest->putPtr = src->putPtr; + dest->bOverflowStatus = src->bOverflowStatus; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_GPU_MIGRATABLE_OPS_PARAMS_v21_07(NV2080_CTRL_GPU_MIGRATABLE_OPS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_GPU_MIGRATABLE_OPS_PARAMS_v21_07 *src = (void*)(buffer); + NV2080_CTRL_GPU_MIGRATABLE_OPS_PARAMS *dest = pParams; + + if (src && dest) { + NvU32 idx = 0; +#ifdef COPY_INPUT_PARAMETERS + if (src->regOpCount > NV2080_CTRL_MIGRATABLE_OPS_ARRAY_MAX_v21_07) { + return FAILURE_T; + } + + dest->bNonTransactional = src->bNonTransactional; + dest->regOpCount = src->regOpCount; + dest->hClientTarget = src->hClientTarget; + dest->hChannelTarget = src->hChannelTarget; + + for (idx = 0; idx < src->regOpCount; idx++) + { + dest->regOps[idx].regOp = src->regOps[idx].regOp; + dest->regOps[idx].regType = src->regOps[idx].regType; + dest->regOps[idx].regQuad = src->regOps[idx].regQuad; + dest->regOps[idx].regGroupMask = src->regOps[idx].regGroupMask; + dest->regOps[idx].regSubGroupMask = src->regOps[idx].regSubGroupMask; + dest->regOps[idx].regOffset = src->regOps[idx].regOffset; + dest->regOps[idx].regAndNMaskLo = src->regOps[idx].regAndNMaskLo; + dest->regOps[idx].regAndNMaskHi = src->regOps[idx].regAndNMaskHi; + dest->regOps[idx].regValueLo = src->regOps[idx].regValueLo; + dest->regOps[idx].regValueHi = src->regOps[idx].regValueHi; + dest->smIds[idx] = src->smIds[idx]; + } +#endif +#ifdef COPY_OUTPUT_PARAMETERS + for (idx = 0; idx < src->regOpCount; idx++) + { + dest->regOps[idx].regStatus = src->regOps[idx].regStatus; + dest->regOps[idx].regValueLo = src->regOps[idx].regValueLo; + dest->regOps[idx].regValueHi = src->regOps[idx].regValueHi; + } +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} +#endif + +#ifdef BUILD_COMMON_RPCS +static +return_t deserialize_NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS_v03_00(NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS_v03_00 *src = (void*)(buffer); + NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS *dest = pParams; + + if (src && dest) + { + dest->hChannel = src->hChannel; + dest->property = src->property; + dest->value = src->value; + } + else + return FAILURE_T; + + return SUCCESS_T; +} +#endif + +#ifdef BUILD_COMMON_RPCS +static +return_t deserialize_NV2080_CTRL_GPU_EVICT_CTX_PARAMS_v1A_1C( + NV2080_CTRL_GPU_EVICT_CTX_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_GPU_EVICT_CTX_PARAMS_v03_00 *src = (void*)(buffer); + NV2080_CTRL_GPU_EVICT_CTX_PARAMS *dest = pParams; + + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + dest->engineType = src->engineType; + dest->hClient = src->hClient; + dest->ChID = src->ChID; + dest->hChanClient = src->hChanClient; + dest->hObject = src->hObject; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS_v1A_1D(NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS_v1A_1D *src = (void*)(buffer); + NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS *dest = pParams; + NvU32 idx = 0; + + if (src && dest) + { + if (src->numQueries > NV2080_CTRL_GRMGR_GR_FS_INFO_MAX_QUERIES_v1A_1D) { + return FAILURE_T; + } +#ifdef COPY_INPUT_PARAMETERS + dest->numQueries = src->numQueries; +#endif + for (idx = 0; idx < dest->numQueries; idx++) { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryType = src->queries[idx].queryType; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].status = src->queries[idx].status; +#endif + switch(dest->queries[idx].queryType) + { + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_GPC_COUNT: { +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.gpcCountData.gpcCount = src->queries[idx].queryData.gpcCountData.gpcCount; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_CHIPLET_GPC_MAP: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.chipletGpcMapData.gpcId = src->queries[idx].queryData.chipletGpcMapData.gpcId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.chipletGpcMapData.chipletGpcMap = src->queries[idx].queryData.chipletGpcMapData.chipletGpcMap; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_TPC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.tpcMaskData.gpcId = src->queries[idx].queryData.tpcMaskData.gpcId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.tpcMaskData.tpcMask = src->queries[idx].queryData.tpcMaskData.tpcMask; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PPC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.ppcMaskData.gpcId = src->queries[idx].queryData.ppcMaskData.gpcId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.ppcMaskData.ppcMask = src->queries[idx].queryData.ppcMaskData.ppcMask; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARTITION_CHIPLET_GPC_MAP: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.partitionGpcMapData.swizzId = src->queries[idx].queryData.partitionGpcMapData.swizzId; + dest->queries[idx].queryData.partitionGpcMapData.gpcId = src->queries[idx].queryData.partitionGpcMapData.gpcId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.partitionGpcMapData.chipletGpcMap = src->queries[idx].queryData.partitionGpcMapData.chipletGpcMap; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_CHIPLET_SYSPIPE_MASK: { +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.syspipeMaskData.chipletSyspipeMask = src->queries[idx].queryData.syspipeMaskData.chipletSyspipeMask; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARTITION_CHIPLET_SYSPIPE_IDS: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.partitionChipletSyspipeData.swizzId = src->queries[idx].queryData.partitionChipletSyspipeData.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeIdCount = src->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeIdCount; + + if (dest->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeIdCount > NV2080_CTRL_GRMGR_MAX_SMC_IDS_v1A_1D) { + return FAILURE_T; + } + + for (idx = 0; idx < dest->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeIdCount; idx++) + dest->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeId[idx] = src->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeId[idx]; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PROFILER_MON_GPC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.dmGpcMaskData.swizzId = src->queries[idx].queryData.dmGpcMaskData.swizzId; + dest->queries[idx].queryData.dmGpcMaskData.grIdx = src->queries[idx].queryData.dmGpcMaskData.grIdx; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.dmGpcMaskData.gpcEnMask = src->queries[idx].queryData.dmGpcMaskData.gpcEnMask; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARTITION_SYSPIPE_ID: { +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.partitionSyspipeIdData.syspipeId = src->queries[idx].queryData.partitionSyspipeIdData.syspipeId; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_ROP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.ropMaskData.gpcId = src->queries[idx].queryData.ropMaskData.gpcId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.ropMaskData.ropMask = src->queries[idx].queryData.ropMaskData.ropMask; +#endif + break; + } + default: + { + // Unknown query + return FAILURE_T; + } + } + } + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS_v2B_09(NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS_v2B_09 *src = (void*)(buffer); + NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS *dest = pParams; + NvU32 idx = 0; + + if (src && dest) + { + if (src->numQueries > NV2080_CTRL_GRMGR_GR_FS_INFO_MAX_QUERIES_v1A_1D) { + return FAILURE_T; + } +#ifdef COPY_INPUT_PARAMETERS + dest->numQueries = src->numQueries; +#endif + for (idx = 0; idx < dest->numQueries; idx++) { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryType = src->queries[idx].queryType; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].status = src->queries[idx].status; +#endif + switch(dest->queries[idx].queryType) + { + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_GPC_COUNT: { +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.gpcCountData.gpcCount = src->queries[idx].queryData.gpcCountData.gpcCount; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_CHIPLET_GPC_MAP: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.chipletGpcMapData.gpcId = src->queries[idx].queryData.chipletGpcMapData.gpcId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.chipletGpcMapData.chipletGpcMap = src->queries[idx].queryData.chipletGpcMapData.chipletGpcMap; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_TPC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.tpcMaskData.gpcId = src->queries[idx].queryData.tpcMaskData.gpcId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.tpcMaskData.tpcMask = src->queries[idx].queryData.tpcMaskData.tpcMask; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PPC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.ppcMaskData.gpcId = src->queries[idx].queryData.ppcMaskData.gpcId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.ppcMaskData.ppcMask = src->queries[idx].queryData.ppcMaskData.ppcMask; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARTITION_CHIPLET_GPC_MAP: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.partitionGpcMapData.swizzId = src->queries[idx].queryData.partitionGpcMapData.swizzId; + dest->queries[idx].queryData.partitionGpcMapData.gpcId = src->queries[idx].queryData.partitionGpcMapData.gpcId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.partitionGpcMapData.chipletGpcMap = src->queries[idx].queryData.partitionGpcMapData.chipletGpcMap; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_CHIPLET_SYSPIPE_MASK: { +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.syspipeMaskData.chipletSyspipeMask = src->queries[idx].queryData.syspipeMaskData.chipletSyspipeMask; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARTITION_CHIPLET_SYSPIPE_IDS: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.partitionChipletSyspipeData.swizzId = src->queries[idx].queryData.partitionChipletSyspipeData.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeIdCount = src->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeIdCount; + + if (dest->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeIdCount > NV2080_CTRL_GRMGR_MAX_SMC_IDS_v1A_1D) { + return FAILURE_T; + } + + for (idx = 0; idx < dest->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeIdCount; idx++) + dest->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeId[idx] = src->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeId[idx]; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PROFILER_MON_GPC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.dmGpcMaskData.swizzId = src->queries[idx].queryData.dmGpcMaskData.swizzId; + dest->queries[idx].queryData.dmGpcMaskData.grIdx = src->queries[idx].queryData.dmGpcMaskData.grIdx; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.dmGpcMaskData.gpcEnMask = src->queries[idx].queryData.dmGpcMaskData.gpcEnMask; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARTITION_SYSPIPE_ID: { +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.partitionSyspipeIdData.syspipeId = src->queries[idx].queryData.partitionSyspipeIdData.syspipeId; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_ROP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.ropMaskData.gpcId = src->queries[idx].queryData.ropMaskData.gpcId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.ropMaskData.ropMask = src->queries[idx].queryData.ropMaskData.ropMask; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_CHIPLET_GRAPHICS_SYSPIPE_MASK: { +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.gfxSyspipeMaskData.chipletSyspipeMask = src->queries[idx].queryData.gfxSyspipeMaskData.chipletSyspipeMask; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_GFX_CAPABLE_GPC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.gfxGpcMaskData.swizzId = src->queries[idx].queryData.gfxGpcMaskData.swizzId; + dest->queries[idx].queryData.gfxGpcMaskData.grIdx = src->queries[idx].queryData.gfxGpcMaskData.grIdx; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.gfxGpcMaskData.gpcEnMask = src->queries[idx].queryData.gfxGpcMaskData.gpcEnMask; +#endif + break; + } + default: + { + // Unknown query + return FAILURE_T; + } + } + } + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_FB_GET_FS_INFO_PARAMS_v24_00(NV2080_CTRL_FB_GET_FS_INFO_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_FB_GET_FS_INFO_PARAMS_v24_00 *src = (void*)(buffer); + NV2080_CTRL_FB_GET_FS_INFO_PARAMS *dest = pParams; + NvU32 idx = 0; + + if (src && dest) + { + if (src->numQueries > NV2080_CTRL_FB_FS_INFO_MAX_QUERIES_v24_00) { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + dest->numQueries = src->numQueries; +#endif + for (idx = 0; idx < dest->numQueries; idx++) { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryType = src->queries[idx].queryType; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].status = src->queries[idx].status; +#endif + switch(dest->queries[idx].queryType) + { + case NV2080_CTRL_FB_FS_INFO_INVALID_QUERY: { +#ifdef COPY_OUTPUT_PARAMETERS + NvU32 i = 0; + for (i = 0; i < NV2080_CTRL_FB_FS_INFO_MAX_QUERY_SIZE_v1A_1D; i++) { + dest->queries[idx].queryParams.inv.data[i] = src->queries[idx].queryParams.inv.data[i]; + } +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_FBP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbp.swizzId = src->queries[idx].queryParams.fbp.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbp.fbpEnMask = src->queries[idx].queryParams.fbp.fbpEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_LTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.ltc.fbpIndex = src->queries[idx].queryParams.ltc.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.ltc.ltcEnMask = src->queries[idx].queryParams.ltc.ltcEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_LTS_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.lts.fbpIndex = src->queries[idx].queryParams.lts.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.lts.ltsEnMask = src->queries[idx].queryParams.lts.ltsEnMask; +#endif + + break; + } + case NV2080_CTRL_FB_FS_INFO_FBPA_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbpa.fbpIndex = src->queries[idx].queryParams.fbpa.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbpa.fbpaEnMask = src->queries[idx].queryParams.fbpa.fbpaEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_ROP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.rop.fbpIndex = src->queries[idx].queryParams.rop.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.rop.ropEnMask = src->queries[idx].queryParams.rop.ropEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmLtc.fbpIndex = src->queries[idx].queryParams.dmLtc.fbpIndex; + dest->queries[idx].queryParams.dmLtc.swizzId = src->queries[idx].queryParams.dmLtc.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmLtc.ltcEnMask = src->queries[idx].queryParams.dmLtc.ltcEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmLts.fbpIndex = src->queries[idx].queryParams.dmLts.fbpIndex; + dest->queries[idx].queryParams.dmLts.swizzId = src->queries[idx].queryParams.dmLts.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmLts.ltsEnMask = src->queries[idx].queryParams.dmLts.ltsEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpa.fbpIndex = src->queries[idx].queryParams.dmFbpa.fbpIndex; + dest->queries[idx].queryParams.dmFbpa.swizzId = src->queries[idx].queryParams.dmFbpa.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpa.fbpaEnMask = src->queries[idx].queryParams.dmFbpa.fbpaEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmRop.fbpIndex = src->queries[idx].queryParams.dmRop.fbpIndex; + dest->queries[idx].queryParams.dmRop.swizzId = src->queries[idx].queryParams.dmRop.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmRop.ropEnMask = src->queries[idx].queryParams.dmRop.ropEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpaSubp.fbpIndex = src->queries[idx].queryParams.dmFbpaSubp.fbpIndex; + dest->queries[idx].queryParams.dmFbpaSubp.swizzId = src->queries[idx].queryParams.dmFbpaSubp.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpaSubp.fbpaSubpEnMask = src->queries[idx].queryParams.dmFbpaSubp.fbpaSubpEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbpaSubp.fbpIndex = src->queries[idx].queryParams.fbpaSubp.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbpaSubp.fbpaSubpEnMask = src->queries[idx].queryParams.fbpaSubp.fbpaSubpEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbpLogicalMap.fbpIndex = src->queries[idx].queryParams.fbpLogicalMap.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbpLogicalMap.fbpLogicalIndex = src->queries[idx].queryParams.fbpLogicalMap.fbpLogicalIndex; +#endif + break; + } + default: + { + // Unknown query + return FAILURE_T; + } + } + } + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_FB_GET_FS_INFO_PARAMS_v26_04(NV2080_CTRL_FB_GET_FS_INFO_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_FB_GET_FS_INFO_PARAMS_v26_04 *src = (void*)(buffer); + NV2080_CTRL_FB_GET_FS_INFO_PARAMS *dest = pParams; + NvU32 idx = 0; + + if (src && dest) + { + if (src->numQueries > NV2080_CTRL_FB_FS_INFO_MAX_QUERIES_v24_00) { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + dest->numQueries = src->numQueries; +#endif + for (idx = 0; idx < dest->numQueries; idx++) { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryType = src->queries[idx].queryType; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].status = src->queries[idx].status; +#endif + switch(dest->queries[idx].queryType) + { + case NV2080_CTRL_FB_FS_INFO_INVALID_QUERY: { +#ifdef COPY_OUTPUT_PARAMETERS + NvU32 i = 0; + for (i = 0; i < NV2080_CTRL_FB_FS_INFO_MAX_QUERY_SIZE_v1A_1D; i++) { + dest->queries[idx].queryParams.inv.data[i] = src->queries[idx].queryParams.inv.data[i]; + } +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_FBP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbp.swizzId = src->queries[idx].queryParams.fbp.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbp.fbpEnMask = src->queries[idx].queryParams.fbp.fbpEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_LTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.ltc.fbpIndex = src->queries[idx].queryParams.ltc.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.ltc.ltcEnMask = src->queries[idx].queryParams.ltc.ltcEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_LTS_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.lts.fbpIndex = src->queries[idx].queryParams.lts.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.lts.ltsEnMask = src->queries[idx].queryParams.lts.ltsEnMask; +#endif + + break; + } + case NV2080_CTRL_FB_FS_INFO_FBPA_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbpa.fbpIndex = src->queries[idx].queryParams.fbpa.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbpa.fbpaEnMask = src->queries[idx].queryParams.fbpa.fbpaEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_ROP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.rop.fbpIndex = src->queries[idx].queryParams.rop.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.rop.ropEnMask = src->queries[idx].queryParams.rop.ropEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmLtc.fbpIndex = src->queries[idx].queryParams.dmLtc.fbpIndex; + dest->queries[idx].queryParams.dmLtc.swizzId = src->queries[idx].queryParams.dmLtc.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmLtc.ltcEnMask = src->queries[idx].queryParams.dmLtc.ltcEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmLts.fbpIndex = src->queries[idx].queryParams.dmLts.fbpIndex; + dest->queries[idx].queryParams.dmLts.swizzId = src->queries[idx].queryParams.dmLts.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmLts.ltsEnMask = src->queries[idx].queryParams.dmLts.ltsEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpa.fbpIndex = src->queries[idx].queryParams.dmFbpa.fbpIndex; + dest->queries[idx].queryParams.dmFbpa.swizzId = src->queries[idx].queryParams.dmFbpa.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpa.fbpaEnMask = src->queries[idx].queryParams.dmFbpa.fbpaEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmRop.fbpIndex = src->queries[idx].queryParams.dmRop.fbpIndex; + dest->queries[idx].queryParams.dmRop.swizzId = src->queries[idx].queryParams.dmRop.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmRop.ropEnMask = src->queries[idx].queryParams.dmRop.ropEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpaSubp.fbpIndex = src->queries[idx].queryParams.dmFbpaSubp.fbpIndex; + dest->queries[idx].queryParams.dmFbpaSubp.swizzId = src->queries[idx].queryParams.dmFbpaSubp.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpaSubp.fbpaSubpEnMask = src->queries[idx].queryParams.dmFbpaSubp.fbpaSubpEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbpaSubp.fbpIndex = src->queries[idx].queryParams.fbpaSubp.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbpaSubp.fbpaSubpEnMask = src->queries[idx].queryParams.fbpaSubp.fbpaSubpEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbpLogicalMap.fbpIndex = src->queries[idx].queryParams.fbpLogicalMap.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbpLogicalMap.fbpLogicalIndex = src->queries[idx].queryParams.fbpLogicalMap.fbpLogicalIndex; +#endif + break; + } + case NV2080_CTRL_SYSL2_FS_INFO_SYSLTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.sysl2Ltc.sysIdx = src->queries[idx].queryParams.sysl2Ltc.sysIdx; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.sysl2Ltc.sysl2LtcEnMask = src->queries[idx].queryParams.sysl2Ltc.sysl2LtcEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PAC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.pac.fbpIndex = src->queries[idx].queryParams.pac.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.pac.pacEnMask = src->queries[idx].queryParams.pac.pacEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_LOGICAL_LTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.logicalLtc.fbpIndex = src->queries[idx].queryParams.logicalLtc.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.logicalLtc.logicalLtcEnMask = src->queries[idx].queryParams.logicalLtc.logicalLtcEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LOGICAL_LTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmLogicalLtc.fbpIndex = src->queries[idx].queryParams.dmLogicalLtc.fbpIndex; + dest->queries[idx].queryParams.dmLogicalLtc.swizzId = src->queries[idx].queryParams.dmLogicalLtc.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmLogicalLtc.logicalLtcEnMask = src->queries[idx].queryParams.dmLogicalLtc.logicalLtcEnMask; +#endif + break; + } + default: + { + // Unknown query + return FAILURE_T; + } + } + } + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_FB_GET_FS_INFO_PARAMS_v2B_07(NV2080_CTRL_FB_GET_FS_INFO_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_FB_GET_FS_INFO_PARAMS_v2B_07 *src = (void*)(buffer); + NV2080_CTRL_FB_GET_FS_INFO_PARAMS *dest = pParams; + NvU32 idx = 0; + + if (src && dest) + { + if (src->numQueries > NV2080_CTRL_FB_FS_INFO_MAX_QUERIES_v24_00) { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + dest->numQueries = src->numQueries; +#endif + for (idx = 0; idx < dest->numQueries; idx++) { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryType = src->queries[idx].queryType; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].status = src->queries[idx].status; +#endif + switch(dest->queries[idx].queryType) + { + case NV2080_CTRL_FB_FS_INFO_INVALID_QUERY: { +#ifdef COPY_OUTPUT_PARAMETERS + NvU32 i = 0; + for (i = 0; i < NV2080_CTRL_FB_FS_INFO_MAX_QUERY_SIZE_v1A_1D; i++) { + dest->queries[idx].queryParams.inv.data[i] = src->queries[idx].queryParams.inv.data[i]; + } +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_FBP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbp.swizzId = src->queries[idx].queryParams.fbp.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbp.fbpEnMask = src->queries[idx].queryParams.fbp.fbpEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_LTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.ltc.fbpIndex = src->queries[idx].queryParams.ltc.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.ltc.ltcEnMask = src->queries[idx].queryParams.ltc.ltcEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_LTS_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.lts.fbpIndex = src->queries[idx].queryParams.lts.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.lts.ltsEnMask = src->queries[idx].queryParams.lts.ltsEnMask; +#endif + + break; + } + case NV2080_CTRL_FB_FS_INFO_FBPA_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbpa.fbpIndex = src->queries[idx].queryParams.fbpa.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbpa.fbpaEnMask = src->queries[idx].queryParams.fbpa.fbpaEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_ROP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.rop.fbpIndex = src->queries[idx].queryParams.rop.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.rop.ropEnMask = src->queries[idx].queryParams.rop.ropEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmLtc.fbpIndex = src->queries[idx].queryParams.dmLtc.fbpIndex; + dest->queries[idx].queryParams.dmLtc.swizzId = src->queries[idx].queryParams.dmLtc.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmLtc.ltcEnMask = src->queries[idx].queryParams.dmLtc.ltcEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmLts.fbpIndex = src->queries[idx].queryParams.dmLts.fbpIndex; + dest->queries[idx].queryParams.dmLts.swizzId = src->queries[idx].queryParams.dmLts.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmLts.ltsEnMask = src->queries[idx].queryParams.dmLts.ltsEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpa.fbpIndex = src->queries[idx].queryParams.dmFbpa.fbpIndex; + dest->queries[idx].queryParams.dmFbpa.swizzId = src->queries[idx].queryParams.dmFbpa.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpa.fbpaEnMask = src->queries[idx].queryParams.dmFbpa.fbpaEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmRop.fbpIndex = src->queries[idx].queryParams.dmRop.fbpIndex; + dest->queries[idx].queryParams.dmRop.swizzId = src->queries[idx].queryParams.dmRop.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmRop.ropEnMask = src->queries[idx].queryParams.dmRop.ropEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpaSubp.fbpIndex = src->queries[idx].queryParams.dmFbpaSubp.fbpIndex; + dest->queries[idx].queryParams.dmFbpaSubp.swizzId = src->queries[idx].queryParams.dmFbpaSubp.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpaSubp.fbpaSubpEnMask = src->queries[idx].queryParams.dmFbpaSubp.fbpaSubpEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbpaSubp.fbpIndex = src->queries[idx].queryParams.fbpaSubp.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbpaSubp.fbpaSubpEnMask = src->queries[idx].queryParams.fbpaSubp.fbpaSubpEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbpLogicalMap.fbpIndex = src->queries[idx].queryParams.fbpLogicalMap.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbpLogicalMap.fbpLogicalIndex = src->queries[idx].queryParams.fbpLogicalMap.fbpLogicalIndex; +#endif + break; + } + case NV2080_CTRL_SYSL2_FS_INFO_SYSLTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.sysl2Ltc.sysIdx = src->queries[idx].queryParams.sysl2Ltc.sysIdx; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.sysl2Ltc.sysl2LtcEnMask = src->queries[idx].queryParams.sysl2Ltc.sysl2LtcEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PAC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.pac.fbpIndex = src->queries[idx].queryParams.pac.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.pac.pacEnMask = src->queries[idx].queryParams.pac.pacEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_LOGICAL_LTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.logicalLtc.fbpIndex = src->queries[idx].queryParams.logicalLtc.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.logicalLtc.logicalLtcEnMask = src->queries[idx].queryParams.logicalLtc.logicalLtcEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LOGICAL_LTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmLogicalLtc.fbpIndex = src->queries[idx].queryParams.dmLogicalLtc.fbpIndex; + dest->queries[idx].queryParams.dmLogicalLtc.swizzId = src->queries[idx].queryParams.dmLogicalLtc.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmLogicalLtc.logicalLtcEnMask = src->queries[idx].queryParams.dmLogicalLtc.logicalLtcEnMask; +#endif + break; + } + case NV2080_CTRL_SYSL2_FS_INFO_SYSLTS_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.sysl2Lts.sysIdx = src->queries[idx].queryParams.sysl2Lts.sysIdx; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.sysl2Lts.sysl2LtsEnMask = src->queries[idx].queryParams.sysl2Lts.sysl2LtsEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_SYS_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.sys.swizzId = src->queries[idx].queryParams.sys.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.sys.sysEnMask = src->queries[idx].queryParams.sys.sysEnMask; +#endif + break; + } + default: + { + // Unknown query + return FAILURE_T; + } + } + } + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +return_t deserialize_NVA06F_CTRL_STOP_CHANNEL_PARAMS_v1A_1E( + NVA06F_CTRL_STOP_CHANNEL_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVA06F_CTRL_STOP_CHANNEL_PARAMS_v1A_1E *src = (void*)(buffer); + NVA06F_CTRL_STOP_CHANNEL_PARAMS *dest = pParams; + + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + dest->bImmediate = src->bImmediate; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +return_t deserialize_NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS_v1A_1F(NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS_v1A_1F *src = (void*)(buffer); + NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS *dest = pParams; + + if (src && dest) + { + dest->pmaChannelIdx = src->pmaChannelIdx; + } + else + return FAILURE_T; + +#endif + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS_v1A_1F(NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS_v1A_1F *src = (void*)(buffer); + NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS *dest = pParams; + + if (src && dest) + { + dest->hChannel = src->hChannel; + dest->samplingMode = src->samplingMode; + dest->grRouteInfo.flags = src->grRouteInfo.flags; + dest->grRouteInfo.route = src->grRouteInfo.route; + + } + else + return FAILURE_T; + +#endif + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS_v1A_1F(NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS_v1A_1F *src = (void*)(buffer); + NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS *dest = pParams; + + if (src && dest) + { + dest->bSetMaxFreq = src->bSetMaxFreq; + } + else + return FAILURE_T; + +#endif + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_PERF_RATED_TDP_STATUS_PARAMS_v1A_1F(NV2080_CTRL_PERF_RATED_TDP_STATUS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_OUTPUT_PARAMETERS + NV2080_CTRL_PERF_RATED_TDP_STATUS_PARAMS_v1A_1F *src = (void*)(buffer); + NV2080_CTRL_PERF_RATED_TDP_STATUS_PARAMS *dest = pParams; + NvU32 i = 0; + + if (src && dest) + { + dest->rm.clientActiveMask = src->rm.clientActiveMask; + dest->rm.bRegkeyLimitRatedTdp = src->rm.bRegkeyLimitRatedTdp; + dest->output = src->output; + + for (i = 0; i < NV2080_CTRL_PERF_RATED_TDP_CLIENT_NUM_CLIENTS_v1A_1F; i++) + { + dest->inputs[i] = src->inputs[i]; + } + } + else + return FAILURE_T; + +#endif + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS_v1A_1F(NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS_v1A_1F *src = (void*)(buffer); + NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS *dest = pParams; + + if (src && dest) + { + dest->client = src->client; + dest->input = src->input; + } + else + return FAILURE_T; + +#endif + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS_v1A_23( + NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS_v1A_23 *src = (void*)(buffer); + NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS *dest = pParams; + + if (src && dest) + { + dest->base = src->base; + dest->size = src->size; + dest->addressSpace = src->addressSpace; + dest->cacheAttrib = src->cacheAttrib; + } + else + return FAILURE_T; + +#endif + return SUCCESS_T; +} + +return_t deserialize_NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS_v1C_02(NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS_v1C_02 *src = (void*)(buffer); + NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS *dest = pParams; + + if (src && dest) + { + dest->smID = src->smID; + dest->bSingleStep = src->bSingleStep; + } + else + return FAILURE_T; + +#endif + return SUCCESS_T; +} + +return_t deserialize_NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS_v1C_04(NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS_v1C_04 *src = (void*)(buffer); + NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS *dest = pParams; + + if (src && dest) + { + dest->hChannelGroup = src->hChannelGroup; + dest->mode = src->mode; + dest->bEnableAllTpcs = src->bEnableAllTpcs; + dest->grRouteInfo.flags = src->grRouteInfo.flags; + dest->grRouteInfo.route = src->grRouteInfo.route; + } + else + return FAILURE_T; + +#endif + return SUCCESS_T; +} + +return_t +deserialize_NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS_v1E_07( + NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS_v1E_07 *src = + (void*)(buffer); + NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS *dest = pParams; + + if (src && dest) + { + NvU32 i; + + if (src->numValidEntries > + NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_MAX_RUNQUEUES_v1E_07) + { + return FAILURE_T; + } + + dest->numValidEntries = src->numValidEntries; + + for (i = 0; i < src->numValidEntries; ++i) + { + dest->bar2Addr[i] = src->bar2Addr[i]; + dest->methodBufferMemdesc[i].base = + src->methodBufferMemdesc[i].base; + dest->methodBufferMemdesc[i].size = + src->methodBufferMemdesc[i].size; + dest->methodBufferMemdesc[i].alignment = + src->methodBufferMemdesc[i].alignment; + dest->methodBufferMemdesc[i].addressSpace = + src->methodBufferMemdesc[i].addressSpace; + dest->methodBufferMemdesc[i].cpuCacheAttrib = + src->methodBufferMemdesc[i].cpuCacheAttrib; + } + } + else + return FAILURE_T; + +#endif + return SUCCESS_T; +} + +return_t deserialize_NVB0CC_CTRL_INTERNAL_QUIESCE_PMA_CHANNEL_PARAMS_v1C_08(NVB0CC_CTRL_INTERNAL_QUIESCE_PMA_CHANNEL_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NVB0CC_CTRL_INTERNAL_QUIESCE_PMA_CHANNEL_PARAMS_v1C_08 *src = (void*)(buffer); + NVB0CC_CTRL_INTERNAL_QUIESCE_PMA_CHANNEL_PARAMS *dest = pParams; + + if (src && dest) + { + dest->pmaChannelIdx = src->pmaChannelIdx; + dest->bMembytesPollingRequired = src->bMembytesPollingRequired; + } + else + return FAILURE_T; + +#endif + return SUCCESS_T; +} + +return_t deserialize_NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS_v1E_06(NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_OUTPUT_PARAMETERS + NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS_v1E_06 *src = (void*)(buffer); + NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS *dest = pParams; + + if (src && dest) + { + dest->bMode = src->bMode; + } + else + return FAILURE_T; + +#endif + return SUCCESS_T; +} +#endif + +#ifdef BUILD_COMMON_RPCS +return_t deserialize_NV0080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS_v2A_00( + NV0080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV0080_CTRL_CMD_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_v2A_00 *src = (void*)(buffer); + NV0080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS *dest = pParams; + + if (src && dest) + { + dest->bZbcSurfacesExist = src->bZbcSurfacesExist; + } + else + return FAILURE_T; +#endif + + return SUCCESS_T; +} +#endif + +#ifdef BUILD_COMMON_RPCS +return_t deserialize_NV2080_CTRL_PERF_GET_LEVEL_INFO_V2_PARAMS_v2B_0D( + NV2080_CTRL_PERF_GET_LEVEL_INFO_V2_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_PERF_GET_LEVEL_INFO_V2_PARAMS_v2B_0D *src = (void*)(buffer); + NV2080_CTRL_PERF_GET_LEVEL_INFO_V2_PARAMS *dest = pParams; + NvU32 i; + if (src && dest) + { + if (src->perfGetClkInfoListSize > + NV2080_CTRL_PERF_CLK_MAX_DOMAINS_v2B_0D) + { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + dest->level = src->level; + dest->perfGetClkInfoListSize = src->perfGetClkInfoListSize; +#endif + +#ifdef COPY_OUTPUT_PARAMETERS + dest->flags = src->flags; +#endif + for (i = 0; i < src->perfGetClkInfoListSize ; i++) + { +#ifdef COPY_INPUT_PARAMETERS + dest->perfGetClkInfoList[i].domain = src->perfGetClkInfoList[i].domain; + dest->perfGetClkInfoList[i].flags = src->perfGetClkInfoList[i].flags; +#endif + +#ifdef COPY_OUTPUT_PARAMETERS + dest->perfGetClkInfoList[i].currentFreq = src->perfGetClkInfoList[i].currentFreq; + dest->perfGetClkInfoList[i].defaultFreq = src->perfGetClkInfoList[i].defaultFreq; + dest->perfGetClkInfoList[i].minFreq = src->perfGetClkInfoList[i].minFreq; + dest->perfGetClkInfoList[i].maxFreq = src->perfGetClkInfoList[i].maxFreq; +#endif + } + } + else + return FAILURE_T; + + return SUCCESS_T; +} +#endif + +#ifdef BUILD_COMMON_RPCS +return_t deserialize_NV0080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS_v2B_0E( + NV0080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV0080_CTRL_CMD_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_v2B_0E *src = (void*)(buffer); + NV0080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS *dest = pParams; + + if (src && dest) + { + dest->subdevInstance = src->subdevInstance; + dest->bZbcSurfacesExist = src->bZbcSurfacesExist; + } + else + return FAILURE_T; +#endif + + return SUCCESS_T; +} +#endif + +#ifdef BUILD_COMMON_RPCS +return_t deserialize_NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS_v1F_05( + NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_CMD_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_v1F_05 *src = (void*)(buffer); + NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS *dest = pParams; + + if (src && dest) + { + dest->bZbcSurfacesExist = src->bZbcSurfacesExist; + } + else + return FAILURE_T; +#endif + + return SUCCESS_T; +} +#endif + +#ifdef BUILD_COMMON_RPCS +static +return_t deserialize_NV00F8_CTRL_DESCRIBE_PARAMS_v1E_0C( + NV00F8_CTRL_DESCRIBE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV00F8_CTRL_DESCRIBE_PARAMS_v1E_0C *src = (void*)(buffer); + NV00F8_CTRL_DESCRIBE_PARAMS *dest = pParams; + + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + dest->offset = src->offset; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + NvU32 i; + + portMemSet(dest, 0, sizeof(*dest)); + + dest->totalPfns = src->totalPfns; + for (i = 0; i < NV00F8_CTRL_DESCRIBE_PFN_ARRAY_SIZE; i++) + { + dest->pfnArray[i] = src->pfnArray[i]; + } + dest->numPfns = src->numPfns; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS_v1E_0C( + NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS_v1E_0C *src = (void*)(buffer); + NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS *dest = pParams; + + if (src && dest) + { +#ifdef COPY_OUTPUT_PARAMETERS + dest->totalSize = src->totalSize; + dest->freeSize = src->freeSize; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +return_t deserialize_NV90E6_CTRL_CMD_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK_v1F_0D(NV90E6_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV90E6_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK_PARAMS_v18_0B *src = (void *) buffer; + NV90E6_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK_PARAMS *dest = pParams; + + if (src && dest) + { +#ifdef COPY_OUTPUT_PARAMETERS + dest->eccMask = src->eccMask; + dest->nvlinkMask = src->nvlinkMask; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} +#endif + +#ifdef BUILD_COMMON_RPCS +#endif + +#ifdef BUILD_COMMON_RPCS +return_t deserialize_NVB0CC_CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM_PARAMS_v1C_0C(NVB0CC_CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NVB0CC_CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM_PARAMS_v1C_0C *src = (void*)(buffer); + NVB0CC_CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM_PARAMS *dest = pParams; + + if (src && dest) + { + dest->pmaChannelIdx = src->pmaChannelIdx; + dest->pmaBufferVA = src->pmaBufferVA; + dest->pmaBufferSize = src->pmaBufferSize; + dest->membytesVA = src->membytesVA; + dest->hwpmIBPA = src->hwpmIBPA; + dest->hwpmIBAperture = src->hwpmIBAperture; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +return_t deserialize_NVC637_CTRL_CMD_EXEC_PARTITIONS_CREATE_v24_05(NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS_v24_05 *src = (void *) buffer; + NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS *dest = pParams; + NvU32 i; + + if (src && dest) + { + if (src->execPartCount > NVC637_CTRL_MAX_EXEC_PARTITIONS_v18_05) { + return FAILURE_T; + } +#ifdef COPY_INPUT_PARAMETERS + dest->bQuery = src->bQuery; + dest->execPartCount = src->execPartCount; + + for (i = 0; i < dest->execPartCount; i++) { + dest->execPartInfo[i].gpcCount = src->execPartInfo[i].gpcCount; + + //Added in version v24_05 + dest->execPartInfo[i].gfxGpcCount = src->execPartInfo[i].gfxGpcCount; + + dest->execPartInfo[i].veidCount = src->execPartInfo[i].veidCount; + dest->execPartInfo[i].ceCount = src->execPartInfo[i].ceCount; + dest->execPartInfo[i].nvEncCount = src->execPartInfo[i].nvEncCount; + dest->execPartInfo[i].nvDecCount = src->execPartInfo[i].nvDecCount; + dest->execPartInfo[i].nvJpgCount = src->execPartInfo[i].nvJpgCount; + dest->execPartInfo[i].ofaCount = src->execPartInfo[i].ofaCount; + dest->execPartInfo[i].sharedEngFlag = src->execPartInfo[i].sharedEngFlag; + dest->execPartInfo[i].smCount = src->execPartInfo[i].smCount; + dest->execPartInfo[i].spanStart = src->execPartInfo[i].spanStart; + dest->execPartInfo[i].computeSize = src->execPartInfo[i].computeSize; + } +#endif + +#ifdef COPY_OUTPUT_PARAMETERS + for (i = 0; i < src->execPartCount; i++) { + dest->execPartId[i] = src->execPartId[i]; + dest->execPartInfo[i].computeSize = src->execPartInfo[i].computeSize; + } +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +return_t deserialize_NVC637_CTRL_CMD_EXEC_PARTITIONS_DELETE_v1F_0A(NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS_v18_05 *src = (void *) buffer; + NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS *dest = pParams; + + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + NvU32 i; + + if (src->execPartCount > NVC637_CTRL_MAX_EXEC_PARTITIONS_v18_05) { + return FAILURE_T; + } + + dest->execPartCount = src->execPartCount; + for (i = 0; i < dest->execPartCount; i++) { + dest->execPartId[i] = src->execPartId[i]; + } +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +return_t deserialize_NVC637_CTRL_CMD_EXEC_PARTITIONS_EXPORT_v29_0C(NVC637_CTRL_EXEC_PARTITIONS_IMPORT_EXPORT_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVC637_CTRL_EXEC_PARTITIONS_IMPORT_EXPORT_PARAMS_v29_0C *src = (void *) buffer; + NVC637_CTRL_EXEC_PARTITIONS_IMPORT_EXPORT_PARAMS *dest = pParams; + + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + dest->id = src->id; + dest->bCreateCap = src->bCreateCap; +#endif + +#ifdef COPY_OUTPUT_PARAMETERS + for (int i = 0; i < NVC637_CTRL_EXEC_PARTITIONS_EXPORT_MAX_ENGINES_MASK_SIZE; i++) { + dest->info.enginesMask[i] = src->info.enginesMask[i]; + } + dest->info.sharedEngFlags = src->info.sharedEngFlags; + dest->info.gpcMask = src->info.gpcMask; + dest->info.gfxGpcCount = src->info.gfxGpcCount; + dest->info.veidOffset = src->info.veidOffset; + dest->info.veidCount = src->info.veidCount; + dest->info.smCount = src->info.smCount; + dest->info.spanStart = src->info.spanStart; + dest->info.computeSize = src->info.computeSize; + +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +return_t deserialize_NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_v1F_0A(NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS_v08_00 *src = (void *) buffer; + NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS *dest = pParams; + + if (src && dest) + { +#ifdef COPY_OUTPUT_PARAMETERS + dest->workSubmitToken = src->workSubmitToken; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +return_t deserialize_NVC36F_CTRL_CMD_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_v1F_0A(NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS_v16_04 *src = (void *) buffer; + NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS *dest = pParams; + + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + dest->index = src->index; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +return_t deserialize_NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS_v21_03(NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS_v21_03 *src = (void*)(buffer); + NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS *dest = pParams; + + if (src && dest) + { + dest->connectionType = src->connectionType; + dest->peerId = src->peerId; + dest->bSpaAccessOnly = src->bSpaAccessOnly; + dest->bUseUuid = src->bUseUuid; + + portMemCopy(dest->remoteGpuUuid, + VM_UUID_SIZE_v21_02, + src->remoteGpuUuid, + VM_UUID_SIZE_v21_02); + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +return_t deserialize_NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS_v29_08(NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS_v29_08 *src = (void*)(buffer); + NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS *dest = pParams; + + if (src && dest) + { + dest->connectionType = src->connectionType; + dest->peerId = src->peerId; + dest->bEgmPeer = src->bEgmPeer; + dest->bSpaAccessOnly = src->bSpaAccessOnly; + dest->bUseUuid = src->bUseUuid; + dest->remoteGpuId = src->remoteGpuId; + + portMemCopy(dest->remoteGpuUuid, + VM_UUID_SIZE_v21_02, + src->remoteGpuUuid, + VM_UUID_SIZE_v21_02); + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +return_t deserialize_NV2080_CTRL_BUS_UNSET_P2P_MAPPING_PARAMS_v21_03(NV2080_CTRL_BUS_UNSET_P2P_MAPPING_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_BUS_UNSET_P2P_MAPPING_PARAMS_v21_03 *src = (void*)(buffer); + NV2080_CTRL_BUS_UNSET_P2P_MAPPING_PARAMS *dest = pParams; + + if (src && dest) + { + dest->connectionType = src->connectionType; + dest->peerId = src->peerId; + dest->bUseUuid = src->bUseUuid; + + portMemCopy(dest->remoteGpuUuid, + VM_UUID_SIZE_v21_02, + src->remoteGpuUuid, + VM_UUID_SIZE_v21_02); + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +#ifndef UMED_BUILD +return_t deserialize_NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v25_11(NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v25_11 *src = (void*)(buffer); + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *dest = pParams; + if (src && dest) + { + NvU32 i; + if (src->gpuInfoListSize > NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE_v25_11) { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + dest->gpuInfoListSize = src->gpuInfoListSize; +#endif + + for (i = 0; i < NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE_v25_11; i++) { +#ifdef COPY_INPUT_PARAMETERS + dest->gpuInfoList[i].index = src->gpuInfoList[i].index; +#endif + dest->gpuInfoList[i].data = src->gpuInfoList[i].data; + } + } + else + return FAILURE_T; + return SUCCESS_T; +} + +return_t deserialize_NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v2A_04(NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v2A_04 *src = (void*)(buffer); + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *dest = pParams; + if (src && dest) + { + NvU32 i; + if (src->gpuInfoListSize > NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE_v2A_04) { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + dest->gpuInfoListSize = src->gpuInfoListSize; +#endif + + for (i = 0; i < NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE_v2A_04; i++) { +#ifdef COPY_INPUT_PARAMETERS + dest->gpuInfoList[i].index = src->gpuInfoList[i].index; +#endif + dest->gpuInfoList[i].data = src->gpuInfoList[i].data; + } + } + else + return FAILURE_T; + return SUCCESS_T; +} + +return_t deserialize_NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v2B_03(NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v2B_03 *src = (void*)(buffer); + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *dest = pParams; + if (src && dest) + { + NvU32 i; + if (src->gpuInfoListSize > NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE_v2B_03) { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + dest->gpuInfoListSize = src->gpuInfoListSize; +#endif + + for (i = 0; i < NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE_v2B_03; i++) { +#ifdef COPY_INPUT_PARAMETERS + dest->gpuInfoList[i].index = src->gpuInfoList[i].index; +#endif + dest->gpuInfoList[i].data = src->gpuInfoList[i].data; + } + } + else + return FAILURE_T; + return SUCCESS_T; +} + +return_t deserialize_NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v2B_05(NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v2B_05 *src = (void*)(buffer); + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *dest = pParams; + if (src && dest) + { + NvU32 i; + if (src->gpuInfoListSize > NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE_v2B_05) { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + dest->gpuInfoListSize = src->gpuInfoListSize; +#endif + + for (i = 0; i < NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE_v2B_05; i++) { +#ifdef COPY_INPUT_PARAMETERS + dest->gpuInfoList[i].index = src->gpuInfoList[i].index; +#endif + dest->gpuInfoList[i].data = src->gpuInfoList[i].data; + } + } + else + return FAILURE_T; + return SUCCESS_T; +} + +return_t deserialize_NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v2B_0C(NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v2B_0C *src = (void*)(buffer); + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *dest = pParams; + if (src && dest) + { + NvU32 i; + if (src->gpuInfoListSize > NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE_v2B_0C) { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + dest->gpuInfoListSize = src->gpuInfoListSize; +#endif + + for (i = 0; i < NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE_v2B_0C; i++) { +#ifdef COPY_INPUT_PARAMETERS + dest->gpuInfoList[i].index = src->gpuInfoList[i].index; +#endif + dest->gpuInfoList[i].data = src->gpuInfoList[i].data; + } + } + else + return FAILURE_T; + return SUCCESS_T; +} +#endif // UMED_BUILD + +return_t deserialize_NV2080_CTRL_CMD_FLA_SETUP_INSTANCE_MEM_BLOCK_v21_05( + NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS_v13_04 *src = (void*)(buffer); + NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS *dest = pParams; + + if (src && dest) + { + dest->imbPhysAddr = src->imbPhysAddr; + dest->addrSpace = src->addrSpace; + dest->flaAction = src->flaAction; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +return_t deserialize_NVB0CC_CTRL_GET_TOTAL_HS_CREDITS_PARAMS_v21_08(NVB0CC_CTRL_GET_TOTAL_HS_CREDITS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_GET_TOTAL_HS_CREDITS_PARAMS_v21_08 *src = (void*)(buffer); + NVB0CC_CTRL_GET_TOTAL_HS_CREDITS_PARAMS *dest = pParams; + + if (src && dest) + { +#ifdef COPY_OUTPUT_PARAMETERS + dest->numCredits = src->numCredits; +#endif + } + else + return FAILURE_T; + return SUCCESS_T; +} + +return_t deserialize_NVB0CC_CTRL_GET_HS_CREDITS_PARAMS_v21_08(NVB0CC_CTRL_GET_HS_CREDITS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_GET_HS_CREDITS_PARAMS_v21_08 *src = (void*)(buffer); + NVB0CC_CTRL_GET_HS_CREDITS_PARAMS *dest = pParams; + + if (src && dest) + { + NvU32 i; + if (src->numEntries > NVB0CC_MAX_CREDIT_INFO_ENTRIES_v21_08) { + return FAILURE_T; + } +#ifdef COPY_OUTPUT_PARAMETERS + dest->statusInfo.status = src->statusInfo.status; + dest->statusInfo.entryIndex = src->statusInfo.entryIndex; + for (i = 0; i < NVB0CC_MAX_CREDIT_INFO_ENTRIES_v21_08; i++) { + dest->creditInfo[i].numCredits = src->creditInfo[i].numCredits; + } + +#endif +#ifdef COPY_INPUT_PARAMETERS + dest->pmaChannelIdx = src->pmaChannelIdx; + dest->numEntries = src->numEntries; + + for (i = 0; i < NVB0CC_MAX_CREDIT_INFO_ENTRIES_v21_08; i++) { + dest->creditInfo[i].chipletType = src->creditInfo[i].chipletType; + dest->creditInfo[i].chipletIndex = src->creditInfo[i].chipletIndex; + } +#endif + } + else + return FAILURE_T; + return SUCCESS_T; +} + +return_t deserialize_NVB0CC_CTRL_SET_HS_CREDITS_PARAMS_v21_08(NVB0CC_CTRL_SET_HS_CREDITS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_SET_HS_CREDITS_PARAMS_v21_08 *src = (void*)(buffer); + NVB0CC_CTRL_SET_HS_CREDITS_PARAMS *dest = pParams; + + if (src && dest) + { +#ifdef COPY_OUTPUT_PARAMETERS + dest->statusInfo.status = src->statusInfo.status; + dest->statusInfo.entryIndex = src->statusInfo.entryIndex; +#endif +#ifdef COPY_INPUT_PARAMETERS + NvU32 i; + dest->pmaChannelIdx = src->pmaChannelIdx; + dest->numEntries = src->numEntries; + + if (src->numEntries > NVB0CC_MAX_CREDIT_INFO_ENTRIES_v21_08) { + return FAILURE_T; + } + + for (i = 0; i < NVB0CC_MAX_CREDIT_INFO_ENTRIES_v21_08; i++) { + dest->creditInfo[i].chipletType = src->creditInfo[i].chipletType; + dest->creditInfo[i].chipletIndex = src->creditInfo[i].chipletIndex; + dest->creditInfo[i].numCredits = src->creditInfo[i].numCredits; + } +#endif + } + else + return FAILURE_T; + return SUCCESS_T; +} + +return_t deserialize_NVB0CC_CTRL_RESERVE_HES_PARAMS_v29_07(NVB0CC_CTRL_RESERVE_HES_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_RESERVE_HES_PARAMS_v29_07 *src = (void*)(buffer); + NVB0CC_CTRL_RESERVE_HES_PARAMS *dest = pParams; + + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + dest->type = src->type; + dest->reserveParams.cwd.ctxsw = src->reserveParams.cwd.ctxsw; +#endif + } + else + return FAILURE_T; + return SUCCESS_T; +} + +return_t deserialize_NVB0CC_CTRL_RELEASE_HES_PARAMS_v29_07(NVB0CC_CTRL_RELEASE_HES_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_RELEASE_HES_PARAMS_v29_07 *src = (void*)(buffer); + NVB0CC_CTRL_RELEASE_HES_PARAMS *dest = pParams; + + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + dest->type = src->type; +#endif + } + else + return FAILURE_T; + return SUCCESS_T; +} + +return_t deserialize_NVB0CC_CTRL_RESERVE_CCUPROF_PARAMS_v29_07(NVB0CC_CTRL_RESERVE_CCUPROF_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_RESERVE_CCUPROF_PARAMS_v29_07 *src = (void*)(buffer); + NVB0CC_CTRL_RESERVE_CCUPROF_PARAMS *dest = pParams; + + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + dest->ctxsw = src->ctxsw; +#endif + } + else + return FAILURE_T; + return SUCCESS_T; +} + +#ifndef UMED_BUILD +return_t deserialize_NV83DE_CTRL_DEBUG_GET_MODE_MMU_GCC_DEBUG_PARAMS_v2A_05(NV83DE_CTRL_DEBUG_GET_MODE_MMU_GCC_DEBUG_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_DEBUG_GET_MODE_MMU_GCC_DEBUG_PARAMS_v2A_05 *src = (void*)(buffer); + NV83DE_CTRL_DEBUG_GET_MODE_MMU_GCC_DEBUG_PARAMS *dest = pParams; + + if (src && dest) { + dest->value = src->value; + } + else + return FAILURE_T; + + return SUCCESS_T; +} +#endif + + +#ifndef UMED_BUILD +return_t deserialize_NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS_v25_04(NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS_v25_04 *src = (void*)(buffer); + NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS *dest = pParams; + + if (src && dest) { + dest->value = src->value; + } + else + return FAILURE_T; + + return SUCCESS_T; +} +#endif + +#endif + +#undef COPY_INPUT_PARAMETERS +#undef COPY_OUTPUT_PARAMETERS + +// Copy elements from SDK structures to RPC structures (Step 1 or step 3 listed above) + +#define COPY_INPUT_PARAMETERS + +#ifdef BUILD_COMMON_RPCS + +static +return_t serialize_NVA080_CTRL_SET_FB_USAGE_PARAMS_v07_02(NVA080_CTRL_SET_FB_USAGE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NVA080_CTRL_SET_FB_USAGE_PARAMS *src = pParams; + NVA080_CTRL_SET_FB_USAGE_PARAMS_v07_02 *dest = (void*)(buffer); + + if (src && dest) + { + dest->fbUsed = src->fbUsed; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t serialize_NVA0BC_CTRL_NVENC_SW_SESSION_UPDATE_INFO_PARAMS_v06_01(NVA0BC_CTRL_NVENC_SW_SESSION_UPDATE_INFO_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NVA0BC_CTRL_NVENC_SW_SESSION_UPDATE_INFO_PARAMS *src = pParams; + NVA0BC_CTRL_NVENC_SW_SESSION_UPDATE_INFO_PARAMS_v06_01 *dest = (void*)(buffer); + + if (src && dest) { + dest->hResolution = src->hResolution; + dest->vResolution = src->vResolution; + dest->averageEncodeLatency = src->averageEncodeLatency; + dest->averageEncodeFps = src->averageEncodeFps; + dest->timestampBufferSize = 0; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t serialize_NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS_v10_01(NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS *src = pParams; + NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS_v10_01 *dest = (void*)(buffer); + + if (src && dest) { + dest->engineID = src->engineID; + dest->subdeviceInstance = src->subdeviceInstance; + dest->resetReason = src->resetReason; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t serialize_NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS_v03_00(NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS *src = pParams; + NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS_v03_00 *dest = (void*)(buffer); + + if (src && dest) { + dest->exceptType = src->exceptType; + dest->engineID = src->engineID; + } + else + return FAILURE_T; + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_PARAMS_v18_09(NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_PARAMS *src = pParams; + NV2080_CTRL_CMD_GPU_HANDLE_VF_PRI_FAULT_PARAMS_v18_09 *dest = (void*)(buffer); + + if (src && dest) { + dest->faultType = src->faultType; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_PERF_BOOST_PARAMS_v03_00(NV2080_CTRL_PERF_BOOST_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_PERF_BOOST_PARAMS *src = pParams; + NV2080_CTRL_PERF_BOOST_PARAMS_v03_00 *dest = (void*)(buffer); + + if (src && dest) { + dest->flags = src->flags; + dest->duration = src->duration; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS_v04_00(NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS *src = pParams; + NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS_v04_00 *dest = (void*)(buffer); + + if (src && dest) { + NvU32 i; + for (i = 0; i < NV9096_CTRL_SET_ZBC_COLOR_CLEAR_VALUE_SIZE; ++i) { + dest->value.colorFB[i] = src->value.colorFB[i]; + dest->value.colorDS[i] = src->value.colorDS[i]; + } + dest->value.depth = src->value.depth; + dest->value.stencil = src->value.stencil; // Changed in v04_00 + dest->indexSize = src->indexSize; + dest->indexUsed = src->indexUsed; + dest->format = src->format; + dest->valType = src->valType; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV9096_CTRL_SET_ZBC_COLOR_CLEAR_PARAMS_v03_00(NV9096_CTRL_SET_ZBC_COLOR_CLEAR_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV9096_CTRL_SET_ZBC_COLOR_CLEAR_PARAMS *src = pParams; + NV9096_CTRL_SET_ZBC_COLOR_CLEAR_PARAMS_v03_00 *dest = (void*)(buffer); + + if (src && dest) { + NvU32 i; + for (i = 0; i < NV9096_CTRL_SET_ZBC_COLOR_CLEAR_VALUE_SIZE; ++i) { + dest->colorFB[i] = src->colorFB[i]; + dest->colorDS[i] = src->colorDS[i]; + } + dest->format = src->format; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV9096_CTRL_SET_ZBC_DEPTH_CLEAR_PARAMS_v03_00(NV9096_CTRL_SET_ZBC_DEPTH_CLEAR_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV9096_CTRL_SET_ZBC_DEPTH_CLEAR_PARAMS *src = pParams; + NV9096_CTRL_SET_ZBC_DEPTH_CLEAR_PARAMS_v03_00 *dest = (void*)(buffer); + + if (src && dest) { + dest->depth = src->depth; + dest->format = src->format; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV9096_CTRL_SET_ZBC_STENCIL_CLEAR_PARAMS_v27_06(NV9096_CTRL_SET_ZBC_STENCIL_CLEAR_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV9096_CTRL_SET_ZBC_STENCIL_CLEAR_PARAMS *src = pParams; + NV9096_CTRL_SET_ZBC_STENCIL_CLEAR_PARAMS_v27_06 *dest = (void*)(buffer); + + if (src && dest) { + dest->stencil = src->stencil; + dest->format = src->format; + dest->bSkipL2Table = src->bSkipL2Table; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_v03_00(NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS *src = pParams; + NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_v03_00 *dest = (void*)(buffer); + + if (src && dest) { + dest->bEnable = src->bEnable; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NVA06C_CTRL_TIMESLICE_PARAMS_v06_00(NVA06C_CTRL_TIMESLICE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVA06C_CTRL_TIMESLICE_PARAMS *src = pParams; + NVA06C_CTRL_TIMESLICE_PARAMS_v06_00 *dest = (void*)(buffer); + + if (src && dest) { + dest->timesliceUs = src->timesliceUs; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS_v06_00(NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS *src = pParams; + NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS_v06_00 *dest = (void*)(buffer); + NvU32 i; + + if (src && dest) { + if (src->numChannels > NV2080_CTRL_FIFO_DISABLE_CHANNELS_MAX_ENTRIES) { + return FAILURE_T; + } + dest->bDisable = src->bDisable; + dest->numChannels = src->numChannels; + dest->bOnlyDisableScheduling = src->bOnlyDisableScheduling; + dest->bRewindGpPut = src->bRewindGpPut; + dest->pRunlistPreemptEvent = 0; // vGPU do not support guest kernel handles + + for (i = 0; i < src->numChannels ; i++) + { + dest->hClientList[i] = src->hClientList[i]; + dest->hChannelList[i] = src->hChannelList[i]; + } + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t serialize_NVA06C_CTRL_PREEMPT_PARAMS_v09_0A(NVA06C_CTRL_PREEMPT_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NVA06C_CTRL_PREEMPT_PARAMS *src = pParams; + NVA06C_CTRL_PREEMPT_PARAMS_v09_0A *dest = (void*)(buffer); + + if (src && dest) { + dest->bWait = src->bWait; + dest->bManualTimeout = src->bManualTimeout; + dest->timeoutUs = src->timeoutUs; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t serialize_NVA06C_CTRL_INTERLEAVE_LEVEL_PARAMS_v17_02(NVA06C_CTRL_INTERLEAVE_LEVEL_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NVA06C_CTRL_INTERLEAVE_LEVEL_PARAMS *src = pParams; + NVA06C_CTRL_INTERLEAVE_LEVEL_PARAMS_v17_02 *dest = (void*)(buffer); + + if (src && dest) { + dest->tsgInterleaveLevel = src->tsgInterleaveLevel; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t serialize_NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS_v17_02(NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS *src = pParams; + NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS_v17_02 *dest = (void*)(buffer); + + if (src && dest) { + dest->channelInterleaveLevel = src->channelInterleaveLevel; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS_v12_01(NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS *src = pParams; + NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS_v12_01 *dest = (void*)(buffer); + + if (src && dest) { + NvU32 i; + + dest->flags = src->flags; + dest->hClient = src->hClient; + dest->hChannel = src->hChannel; + for (i = 0; i < NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END_v03_00; ++i) + dest->vMemPtrs[i] = src->vMemPtrs[i]; + dest->gfxpPreemptMode = src->gfxpPreemptMode; + dest->cilpPreemptMode = src->cilpPreemptMode; + dest->grRouteInfo.flags = src->grRouteInfo.flags; + dest->grRouteInfo.route = src->grRouteInfo.route; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS_v28_07(NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS* pParams, + NvU8* buffer, + NvU32 bufferSize, + NvU32* offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS* src = pParams; + NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS_v28_07* dest = (void*)(buffer); + + if (src && dest) { + NvU32 i; + + dest->flags = src->flags; + dest->hClient = src->hClient; + dest->hChannel = src->hChannel; + for (i = 0; i < NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END_v28_07; ++i) + dest->vMemPtrs[i] = src->vMemPtrs[i]; + dest->gfxpPreemptMode = src->gfxpPreemptMode; + dest->cilpPreemptMode = src->cilpPreemptMode; + dest->grRouteInfo.flags = src->grRouteInfo.flags; + dest->grRouteInfo.route = src->grRouteInfo.route; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS_v12_01(NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS *src = pParams; + NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS_v12_01 *dest = (void*)(buffer); + + if (src && dest) { + dest->flags = src->flags; + dest->hChannel = src->hChannel; + dest->gfxpPreemptMode = src->gfxpPreemptMode; + dest->cilpPreemptMode = src->cilpPreemptMode; + dest->grRouteInfo.flags = src->grRouteInfo.flags; + dest->grRouteInfo.route = src->grRouteInfo.route; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS_v03_00(NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS *src = pParams; + NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS_v03_00 *dest = (void*)(buffer); + + if (src && dest) { + dest->hClient = src->hClient; + dest->hChannel = src->hChannel; + dest->vMemPtr = src->vMemPtr; + dest->zcullMode = src->zcullMode; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS_v03_00(NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS *src = pParams; + NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS_v03_00 *dest = (void*)(buffer); + + if (src && dest) { + dest->engineType = src->engineType; + dest->hClient = src->hClient; + dest->ChID = src->ChID; + dest->hChanClient = src->hChanClient; + dest->hObject = src->hObject; + dest->hVirtMemory = src->hVirtMemory; + dest->physAddress = src->physAddress; + dest->physAttr = src->physAttr; + dest->hDmaHandle = src->hDmaHandle; + dest->index = src->index; + dest->size = src->size; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t serialize_NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_v1E_04(NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *src = pParams; + NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_v1E_04 *dest = (void*)(buffer); + + if (src && dest) { + NvU32 i; + + if (src->numLevelsToCopy > GMMU_FMT_MAX_LEVELS_v1A_12) { + return FAILURE_T; + } + + dest->hSubDevice = src->hSubDevice; + dest->subDeviceId = src->subDeviceId; + dest->pageSize = src->pageSize; + dest->virtAddrLo = src->virtAddrLo; + dest->virtAddrHi = src->virtAddrHi; + dest->numLevelsToCopy = src->numLevelsToCopy; + + for (i = 0; i < dest->numLevelsToCopy; i++) + { + dest->levels[i].physAddress = src->levels[i].physAddress; + dest->levels[i].aperture = src->levels[i].aperture; + dest->levels[i].size = src->levels[i].size; + dest->levels[i].pageShift = src->levels[i].pageShift; + } + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} +#endif + +#ifdef BUILD_COMMON_RPCS + +#if (defined(GSP_PLUGIN_BUILD) || defined(RESMAN_BUILD)) && !defined(UMED_BUILD) +static +return_t serialize_GET_BRAND_CAPS_v25_12(NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_OUTPUT_PARAMETERS + NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS *src = pParams; + rpc_get_brand_caps_v25_12 *dest = (void*)(buffer); + + if (src && dest) { + dest->brands = src->brands; + } + else + return FAILURE_T; +#endif + + return SUCCESS_T; +} +#endif // (defined(GSP_PLUGIN_BUILD) || defined(RESMAN_BUILD)) && !defined(UMED_BUILD) + +static +return_t serialize_NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS_v15_01(NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_OUTPUT_PARAMETERS + NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS *src = pParams; + NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS_v15_01 *dest = (void*)(buffer); + + if (src && dest) { + dest->engines = src->engines; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t serialize_NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS_v03_00(NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS *src = pParams; + NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS_v03_00 *dest = (void*)(buffer); + + if (src && dest) { + dest->hTargetChannel = src->hTargetChannel; + dest->numSMsToClear = src->numSMsToClear; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS_v21_06(NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS *src = pParams; + NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS_v21_06 *dest = (void*)(buffer); + + if (src && dest) { +#ifdef COPY_OUTPUT_PARAMETERS + NvU32 i; +#endif + +#ifdef COPY_INPUT_PARAMETERS + dest->hTargetChannel = src->hTargetChannel; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + if (src->numSMsToRead > VGPU_RPC_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PER_RPC_v21_06) { + return FAILURE_T; + } + + for (i = 0; i < src->numSMsToRead; ++i) + { + dest->smErrorStateArray[i].hwwGlobalEsr = src->smErrorStateArray[i].hwwGlobalEsr; + dest->smErrorStateArray[i].hwwWarpEsr = src->smErrorStateArray[i].hwwWarpEsr; + dest->smErrorStateArray[i].hwwWarpEsrPc = src->smErrorStateArray[i].hwwWarpEsrPc; + dest->smErrorStateArray[i].hwwGlobalEsrReportMask = src->smErrorStateArray[i].hwwGlobalEsrReportMask; + dest->smErrorStateArray[i].hwwWarpEsrReportMask = src->smErrorStateArray[i].hwwWarpEsrReportMask; + dest->smErrorStateArray[i].hwwEsrAddr = src->smErrorStateArray[i].hwwEsrAddr; + dest->smErrorStateArray[i].hwwWarpEsrPc64 = src->smErrorStateArray[i].hwwWarpEsrPc64; + /* New fields added in version v21_06 */ + dest->smErrorStateArray[i].hwwCgaEsr = src->smErrorStateArray[i].hwwCgaEsr; + dest->smErrorStateArray[i].hwwCgaEsrReportMask = src->smErrorStateArray[i].hwwCgaEsrReportMask; + } + dest->mmuFault.valid = src->mmuFault.valid; + dest->mmuFault.faultInfo = src->mmuFault.faultInfo; + dest->mmuFaultInfo = src->mmuFault.faultInfo; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS_v03_00(NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS *src = pParams; + NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS_v03_00 *dest = (void*)(buffer); + + if (src && dest) { + dest->exceptionMask = src->exceptionMask; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_v1A_20(NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *src = pParams; + NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_v1A_20 *dest = (void*)(buffer); + NvU32 i; + + if (src && dest) + { + dest->engineType = src->engineType; + dest->hClient = src->hClient; + dest->ChID = src->ChID; + dest->hChanClient = src->hChanClient; + dest->hObject = src->hObject; + dest->hVirtMemory = src->hVirtMemory; + dest->virtAddress = src->virtAddress; + dest->size = src->size; + dest->entryCount = src->entryCount; + + if (dest->entryCount > NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES) { + return FAILURE_T; + } + + for (i = 0; i < dest->entryCount; i++) { + dest->promoteEntry[i].gpuPhysAddr = src->promoteEntry[i].gpuPhysAddr; + dest->promoteEntry[i].gpuVirtAddr = src->promoteEntry[i].gpuVirtAddr; + dest->promoteEntry[i].size = src->promoteEntry[i].size; + dest->promoteEntry[i].physAttr = src->promoteEntry[i].physAttr; + dest->promoteEntry[i].bufferId = src->promoteEntry[i].bufferId; + dest->promoteEntry[i].bInitialize = src->promoteEntry[i].bInitialize; + dest->promoteEntry[i].bNonmapped = src->promoteEntry[i].bNonmapped; + } + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +static +return_t serialize_NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT_PARAMS_v1A_06(NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT_PARAMS *src = pParams; + NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT_PARAMS_v1A_06 *dest = (void*)(buffer); + + if (src && dest) { +#ifdef COPY_OUTPUT_PARAMETERS + dest->waitForEvent = src->waitForEvent; + dest->hResidentChannel = src->hResidentChannel; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS_v1A_06(NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS *src = pParams; + NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS_v1A_06 *dest = (void*)(buffer); + + if (src && dest) { + NvU32 idx = 0; +#ifdef COPY_INPUT_PARAMETERS + if (src->regOpCount > NV83DE_CTRL_GPU_EXEC_REG_OPS_MAX_OPS) { + return FAILURE_T; + } + + dest->bNonTransactional = src->bNonTransactional; + dest->regOpCount = src->regOpCount; + + for (idx = 0; idx < src->regOpCount; idx++) + { + dest->regOps[idx].regOp = src->regOps[idx].regOp; + dest->regOps[idx].regType = src->regOps[idx].regType; + dest->regOps[idx].regQuad = src->regOps[idx].regQuad; + dest->regOps[idx].regGroupMask = src->regOps[idx].regGroupMask; + dest->regOps[idx].regSubGroupMask = src->regOps[idx].regSubGroupMask; + dest->regOps[idx].regOffset = src->regOps[idx].regOffset; + dest->regOps[idx].regAndNMaskLo = src->regOps[idx].regAndNMaskLo; + dest->regOps[idx].regAndNMaskHi = src->regOps[idx].regAndNMaskHi; + dest->regOps[idx].regValueLo = src->regOps[idx].regValueLo; + dest->regOps[idx].regValueHi = src->regOps[idx].regValueHi; + } +#endif +#ifdef COPY_OUTPUT_PARAMETERS + for (idx = 0; idx < src->regOpCount; idx++) + { + dest->regOps[idx].regStatus = src->regOps[idx].regStatus; + dest->regOps[idx].regValueLo = src->regOps[idx].regValueLo; + dest->regOps[idx].regValueHi = src->regOps[idx].regValueHi; + } +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV83DE_CTRL_DEBUG_SET_MODE_MMU_GCC_DEBUG_PARAMS_v2A_05(NV83DE_CTRL_DEBUG_SET_MODE_MMU_GCC_DEBUG_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_DEBUG_SET_MODE_MMU_GCC_DEBUG_PARAMS *src = pParams; + NV83DE_CTRL_DEBUG_SET_MODE_MMU_GCC_DEBUG_PARAMS_v2A_05 *dest = (void*)(buffer); + + if (src && dest) { +#ifdef COPY_INPUT_PARAMETERS + dest->action = src->action; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS_v1A_06(NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS *src = pParams; + NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS_v1A_06 *dest = (void*)(buffer); + + if (src && dest) { +#ifdef COPY_INPUT_PARAMETERS + dest->action = src->action; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS_v21_06(NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS *src = pParams; + NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS_v21_06 *dest = (void*)(buffer); + + if (src && dest) { +#ifdef COPY_INPUT_PARAMETERS + dest->hTargetChannel = src->hTargetChannel; + dest->smID = src->smID; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->smErrorState.hwwGlobalEsr = src->smErrorState.hwwGlobalEsr; + dest->smErrorState.hwwWarpEsr = src->smErrorState.hwwWarpEsr; + dest->smErrorState.hwwWarpEsrPc = src->smErrorState.hwwWarpEsrPc; + dest->smErrorState.hwwGlobalEsrReportMask = src->smErrorState.hwwGlobalEsrReportMask; + dest->smErrorState.hwwWarpEsrReportMask = src->smErrorState.hwwWarpEsrReportMask; + dest->smErrorState.hwwEsrAddr = src->smErrorState.hwwEsrAddr; + dest->smErrorState.hwwWarpEsrPc64 = src->smErrorState.hwwWarpEsrPc64; + /* New fields added in version v21_06 */ + dest->smErrorState.hwwCgaEsr = src->smErrorState.hwwCgaEsr; + dest->smErrorState.hwwCgaEsrReportMask = src->smErrorState.hwwCgaEsrReportMask; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS_v1A_06(NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS *src = pParams; + NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS_v1A_06 *dest = (void*)(buffer); + + if (src && dest) { +#ifdef COPY_INPUT_PARAMETERS + dest->hTargetChannel = src->hTargetChannel; + dest->smID = src->smID; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS_v1A_06(NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS *src = pParams; + NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS_v1A_06 *dest = (void*)(buffer); + + if (src && dest) { +#ifdef COPY_INPUT_PARAMETERS + dest->action = src->action; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS_v1A_06(NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS *src = pParams; + NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS_v1A_06 *dest = (void*)(buffer); + + if (src && dest) { +#ifdef COPY_INPUT_PARAMETERS + dest->stopTriggerType = src->stopTriggerType; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS_v03_00(NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS *src = pParams; + NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS_v03_00 *dest = (void*)(buffer); + + if (src && dest) { + dest->hVASpace = src->hVASpace; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS_v1A_07(NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS *src = pParams; + NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS_v1A_07 *dest = (void*)(buffer); + + if (src && dest) { +#ifdef COPY_INPUT_PARAMETERS + dest->ceEngineType = src->ceEngineType; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->pceMask = src->pceMask; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS_v1A_07(NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS *src = pParams; + NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS_v1A_07 *dest = (void*)(buffer); + + if (src && dest) { + ct_assert(NV9096_CTRL_SET_ZBC_COLOR_CLEAR_VALUE_SIZE == 4); + +#ifdef COPY_INPUT_PARAMETERS + dest->index = src->index; + dest->tableType = src->tableType; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + NvU32 i; + for (i = 0; i < NV9096_CTRL_SET_ZBC_COLOR_CLEAR_VALUE_SIZE; ++i) { + dest->value.colorFB[i] = src->value.colorFB[i]; + dest->value.colorDS[i] = src->value.colorDS[i]; + } + dest->value.depth = src->value.depth; + dest->value.stencil = src->value.stencil; + dest->format = src->format; + dest->index = src->index; + dest->bIndexValid = src->bIndexValid; + dest->tableType = src->tableType; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS_v23_04(NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *src = pParams; + NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS_v23_04 *dest = (void*)(buffer); + + if (src && dest) { +#ifdef COPY_OUTPUT_PARAMETERS + NvU32 i; + + dest->enabledLinkMask = src->enabledLinkMask; + + FOR_EACH_INDEX_IN_MASK(32, i, src->enabledLinkMask) + { + NV2080_CTRL_NVLINK_DEVICE_INFO *deviceInfo_s = NULL; + NV2080_CTRL_NVLINK_DEVICE_INFO_v15_02 *deviceInfo_d = NULL; + + if (i >= NV2080_CTRL_NVLINK_MAX_LINKS_v23_04) + break; + + dest->linkInfo[i].capsTbl = src->linkInfo[i].capsTbl; + dest->linkInfo[i].phyType = src->linkInfo[i].phyType; + dest->linkInfo[i].subLinkWidth = src->linkInfo[i].subLinkWidth; + dest->linkInfo[i].linkState = src->linkInfo[i].linkState; + dest->linkInfo[i].rxSublinkStatus = src->linkInfo[i].rxSublinkStatus; + dest->linkInfo[i].txSublinkStatus = src->linkInfo[i].txSublinkStatus; + dest->linkInfo[i].nvlinkVersion = src->linkInfo[i].nvlinkVersion; + dest->linkInfo[i].nciVersion = src->linkInfo[i].nciVersion; + dest->linkInfo[i].phyVersion = src->linkInfo[i].phyVersion; + dest->linkInfo[i].nvlinkLinkClockKHz = src->linkInfo[i].nvlinkLinkClockKHz; + dest->linkInfo[i].nvlinkLineRateMbps = src->linkInfo[i].nvlinkLineRateMbps; + dest->linkInfo[i].connected = src->linkInfo[i].connected; + dest->linkInfo[i].remoteDeviceLinkNumber = src->linkInfo[i].remoteDeviceLinkNumber; + dest->linkInfo[i].localDeviceLinkNumber = src->linkInfo[i].localDeviceLinkNumber; + + deviceInfo_d = &dest->linkInfo[i].localDeviceInfo; + deviceInfo_s = &src->linkInfo[i].localDeviceInfo; + + deviceInfo_d->deviceIdFlags = deviceInfo_s->deviceIdFlags; + deviceInfo_d->domain = deviceInfo_s->domain; + deviceInfo_d->bus = deviceInfo_s->bus; + deviceInfo_d->device = deviceInfo_s->device; + deviceInfo_d->function = deviceInfo_s->function; + deviceInfo_d->pciDeviceId = deviceInfo_s->pciDeviceId; + deviceInfo_d->deviceType = deviceInfo_s->deviceType; + portMemCopy(deviceInfo_d->deviceUUID, + sizeof(deviceInfo_d->deviceUUID), + deviceInfo_s->deviceUUID, + sizeof(deviceInfo_s->deviceUUID)); + + deviceInfo_d = &dest->linkInfo[i].remoteDeviceInfo; + deviceInfo_s = &src->linkInfo[i].remoteDeviceInfo; + + deviceInfo_d->deviceIdFlags = deviceInfo_s->deviceIdFlags; + deviceInfo_d->domain = deviceInfo_s->domain; + deviceInfo_d->bus = deviceInfo_s->bus; + deviceInfo_d->device = deviceInfo_s->device; + deviceInfo_d->function = deviceInfo_s->function; + deviceInfo_d->pciDeviceId = deviceInfo_s->pciDeviceId; + deviceInfo_d->deviceType = deviceInfo_s->deviceType; + portMemCopy(deviceInfo_d->deviceUUID, + sizeof(deviceInfo_d->deviceUUID), + deviceInfo_s->deviceUUID, + sizeof(deviceInfo_s->deviceUUID)); + } + FOR_EACH_INDEX_IN_MASK_END; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS_v28_09(NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *src = pParams; + NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS_v28_09 *dest = (void*)(buffer); + + if (src && dest) { +#ifdef COPY_OUTPUT_PARAMETERS + NvU32 i; + + dest->enabledLinkMask = src->enabledLinkMask; + + FOR_EACH_INDEX_IN_MASK(32, i, src->enabledLinkMask) + { + NV2080_CTRL_NVLINK_DEVICE_INFO *deviceInfo_s = NULL; + NV2080_CTRL_NVLINK_DEVICE_INFO_v28_09 *deviceInfo_d = NULL; + + if (i >= NV2080_CTRL_NVLINK_MAX_LINKS_v23_04) + break; + + dest->linkInfo[i].capsTbl = src->linkInfo[i].capsTbl; + dest->linkInfo[i].phyType = src->linkInfo[i].phyType; + dest->linkInfo[i].subLinkWidth = src->linkInfo[i].subLinkWidth; + dest->linkInfo[i].linkState = src->linkInfo[i].linkState; + dest->linkInfo[i].rxSublinkStatus = src->linkInfo[i].rxSublinkStatus; + dest->linkInfo[i].txSublinkStatus = src->linkInfo[i].txSublinkStatus; + dest->linkInfo[i].nvlinkVersion = src->linkInfo[i].nvlinkVersion; + dest->linkInfo[i].nciVersion = src->linkInfo[i].nciVersion; + dest->linkInfo[i].phyVersion = src->linkInfo[i].phyVersion; + dest->linkInfo[i].nvlinkLinkClockKHz = src->linkInfo[i].nvlinkLinkClockKHz; + dest->linkInfo[i].nvlinkLineRateMbps = src->linkInfo[i].nvlinkLineRateMbps; + dest->linkInfo[i].connected = src->linkInfo[i].connected; + dest->linkInfo[i].remoteDeviceLinkNumber = src->linkInfo[i].remoteDeviceLinkNumber; + dest->linkInfo[i].localDeviceLinkNumber = src->linkInfo[i].localDeviceLinkNumber; + + deviceInfo_d = &dest->linkInfo[i].localDeviceInfo; + deviceInfo_s = &src->linkInfo[i].localDeviceInfo; + + deviceInfo_d->deviceIdFlags = deviceInfo_s->deviceIdFlags; + deviceInfo_d->domain = deviceInfo_s->domain; + deviceInfo_d->bus = deviceInfo_s->bus; + deviceInfo_d->device = deviceInfo_s->device; + deviceInfo_d->function = deviceInfo_s->function; + deviceInfo_d->pciDeviceId = deviceInfo_s->pciDeviceId; + deviceInfo_d->deviceType = deviceInfo_s->deviceType; + portMemCopy(deviceInfo_d->deviceUUID, + sizeof(deviceInfo_d->deviceUUID), + deviceInfo_s->deviceUUID, + sizeof(deviceInfo_s->deviceUUID)); + deviceInfo_d->fabricRecoveryStatusMask = deviceInfo_s->fabricRecoveryStatusMask; + + deviceInfo_d = &dest->linkInfo[i].remoteDeviceInfo; + deviceInfo_s = &src->linkInfo[i].remoteDeviceInfo; + + deviceInfo_d->deviceIdFlags = deviceInfo_s->deviceIdFlags; + deviceInfo_d->domain = deviceInfo_s->domain; + deviceInfo_d->bus = deviceInfo_s->bus; + deviceInfo_d->device = deviceInfo_s->device; + deviceInfo_d->function = deviceInfo_s->function; + deviceInfo_d->pciDeviceId = deviceInfo_s->pciDeviceId; + deviceInfo_d->deviceType = deviceInfo_s->deviceType; + portMemCopy(deviceInfo_d->deviceUUID, + sizeof(deviceInfo_d->deviceUUID), + deviceInfo_s->deviceUUID, + sizeof(deviceInfo_s->deviceUUID)); + deviceInfo_d->fabricRecoveryStatusMask = deviceInfo_s->fabricRecoveryStatusMask; + } + FOR_EACH_INDEX_IN_MASK_END; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + + +static +return_t serialize_NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS_v1F_0D(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS *src = pParams; + NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS_v1F_0D *dest = (void*)(buffer); + + if (src && dest) { +#ifdef COPY_INPUT_PARAMETERS + portMemCopy(dest->gpuIds, (sizeof(NvU32) * NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS), + src->gpuIds, (sizeof(NvU32) * NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS)); + dest->gpuCount = src->gpuCount; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->p2pCaps = src->p2pCaps; + dest->p2pOptimalReadCEs = src->p2pOptimalReadCEs; + dest->p2pOptimalWriteCEs = src->p2pOptimalWriteCEs; + memcpy(dest->p2pCapsStatus, src->p2pCapsStatus, NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE_v1F_0D); +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS_v18_0A(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS *src = pParams; + NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS_v18_0A *dest = (void*)(buffer); + + if (src && dest) { + if (src->grpACount == 0 || + src->grpACount > NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS || + src->grpBCount > NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS) { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + NvU32 idx = 0; + + dest->grpACount = src->grpACount; + dest->grpBCount = src->grpBCount; + + for (idx = 0; idx < NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS; idx++) { + dest->gpuIdGrpA[idx] = src->gpuIdGrpA[idx]; + dest->gpuIdGrpB[idx] = src->gpuIdGrpB[idx]; + } +#endif +#ifdef COPY_OUTPUT_PARAMETERS + NvU32 grpAIdx = 0, grpBIdx= 0; + + NvBool bReflexive = NV_FALSE; + + // Check for the reflexive case + if (src->grpBCount == 0) { + bReflexive = NV_TRUE; + } + + for (grpAIdx = 0; grpAIdx < src->grpACount; grpAIdx++) { + for (grpBIdx = 0; bReflexive ? grpBIdx <= grpAIdx : grpBIdx < src->grpBCount; grpBIdx++) { + dest->p2pCaps[grpAIdx].array[grpBIdx] = src->p2pCaps[grpAIdx][grpBIdx]; + dest->a2bOptimalReadCes[grpAIdx].array[grpBIdx] = src->a2bOptimalReadCes[grpAIdx][grpBIdx]; + dest->a2bOptimalWriteCes[grpAIdx].array[grpBIdx] = src->a2bOptimalWriteCes[grpAIdx][grpBIdx]; + dest->b2aOptimalReadCes[grpAIdx].array[grpBIdx] = src->b2aOptimalReadCes[grpAIdx][grpBIdx]; + dest->b2aOptimalWriteCes[grpAIdx].array[grpBIdx] = src->b2aOptimalWriteCes[grpAIdx][grpBIdx]; + } + } +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_GET_P2P_CAPS_PARAMS_v21_02(NV2080_CTRL_GET_P2P_CAPS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_GET_P2P_CAPS_PARAMS *src = pParams; + NV2080_CTRL_GET_P2P_CAPS_PARAMS_v21_02 *dest = (void*)(buffer); + + if (src && dest) { +#ifdef COPY_INPUT_PARAMETERS + if (!src->bAllCaps && (src->peerGpuCount > NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS)) + return FAILURE_T; + + dest->bAllCaps = src->bAllCaps; + dest->bUseUuid = src->bUseUuid; + + if (!src->bAllCaps) + { + NvU32 i; + dest->peerGpuCount = src->peerGpuCount; + + for (i = 0; i < src->peerGpuCount; ++i) + { + portMemCopy(dest->peerGpuCaps[i].gpuUuid, + VM_UUID_SIZE_v21_02, + src->peerGpuCaps[i].gpuUuid, + VM_UUID_SIZE_v21_02); + } + } +#endif +#ifdef COPY_OUTPUT_PARAMETERS + NvU32 i; + + if (src->peerGpuCount > NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS) + return FAILURE_T; + + if (src->bAllCaps) + { + dest->peerGpuCount = src->peerGpuCount; + } + + for (i = 0; i < src->peerGpuCount; ++i) + { + if (src->bAllCaps) + { + portMemCopy(dest->peerGpuCaps[i].gpuUuid, + VM_UUID_SIZE_v21_02, + src->peerGpuCaps[i].gpuUuid, + VM_UUID_SIZE_v21_02); + } + + dest->peerGpuCaps[i].p2pCaps = src->peerGpuCaps[i].p2pCaps; + dest->peerGpuCaps[i].p2pOptimalReadCEs = src->peerGpuCaps[i].p2pOptimalReadCEs; + dest->peerGpuCaps[i].p2pOptimalWriteCEs = src->peerGpuCaps[i].p2pOptimalWriteCEs; + portMemCopy(dest->peerGpuCaps[i].p2pCapsStatus, + sizeof(dest->peerGpuCaps[i].p2pCapsStatus), + src->peerGpuCaps[i].p2pCapsStatus, + sizeof(src->peerGpuCaps[i].p2pCapsStatus)); + dest->peerGpuCaps[i].busPeerId = src->peerGpuCaps[i].busPeerId; + } +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS_v1A_0F(NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS_v1A_0F *dest = (void*)(buffer); + NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS *src = pParams; + + if (src && dest) + { + dest->ctxsw = src->ctxsw; + } + else + return FAILURE_T; + +#endif + return SUCCESS_T; +} + +static +return_t serialize_NVB0CC_CTRL_RESERVE_PM_AREA_SMPC_PARAMS_v1A_0F(NVB0CC_CTRL_RESERVE_PM_AREA_SMPC_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NVB0CC_CTRL_RESERVE_PM_AREA_SMPC_PARAMS_v1A_0F *dest = (void*)(buffer); + NVB0CC_CTRL_RESERVE_PM_AREA_SMPC_PARAMS *src = pParams; + if (src && dest) + { + dest->ctxsw = src->ctxsw; + } + else + return FAILURE_T; +#endif + + return SUCCESS_T; +} + +static +return_t serialize_NVB0CC_CTRL_EXEC_REG_OPS_PARAMS_v1A_0F(NVB0CC_CTRL_EXEC_REG_OPS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_EXEC_REG_OPS_PARAMS_v1A_0F *dest = (void*)(buffer); + NVB0CC_CTRL_EXEC_REG_OPS_PARAMS *src = pParams; + NvU32 idx = 0; + + if (src && dest) + { + if (src->regOpCount > NVB0CC_REGOPS_MAX_COUNT) { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + dest->regOpCount = src->regOpCount; + dest->mode = src->mode; + for (idx = 0; idx < src->regOpCount; idx++) + { + dest->regOps[idx].regOp = src->regOps[idx].regOp; + dest->regOps[idx].regType = src->regOps[idx].regType; + dest->regOps[idx].regQuad = src->regOps[idx].regQuad; + dest->regOps[idx].regGroupMask = src->regOps[idx].regGroupMask; + dest->regOps[idx].regSubGroupMask = src->regOps[idx].regSubGroupMask; + dest->regOps[idx].regOffset = src->regOps[idx].regOffset; + dest->regOps[idx].regValueLo = src->regOps[idx].regValueLo; + dest->regOps[idx].regValueHi = src->regOps[idx].regValueHi; + dest->regOps[idx].regAndNMaskLo = src->regOps[idx].regAndNMaskLo; + dest->regOps[idx].regAndNMaskHi = src->regOps[idx].regAndNMaskHi; + } +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->bPassed = src->bPassed; + dest->bDirect = src->bDirect; + for (idx = 0; idx < src->regOpCount; idx++) + { + dest->regOps[idx].regStatus = src->regOps[idx].regStatus; + dest->regOps[idx].regValueLo = src->regOps[idx].regValueLo; + dest->regOps[idx].regValueHi = src->regOps[idx].regValueHi; + } +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS_v1A_14(NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS_v1A_14 *dest = (void*)(buffer); + NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS *src = pParams; + + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + dest->hMemPmaBuffer = src->hMemPmaBuffer; + dest->pmaBufferOffset = src->pmaBufferOffset; + dest->pmaBufferSize = src->pmaBufferSize; + dest->hMemPmaBytesAvailable = src->hMemPmaBytesAvailable; + dest->pmaBytesAvailableOffset = src->pmaBytesAvailableOffset; + dest->ctxsw = src->ctxsw; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->pmaChannelIdx = src->pmaChannelIdx; + dest->pmaBufferVA = src->pmaBufferVA; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_v1A_14(NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_v1A_14 *dest = (void*)(buffer); + NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS *src = pParams; + + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + dest->bytesConsumed = src->bytesConsumed; + dest->bUpdateAvailableBytes = src->bUpdateAvailableBytes; + dest->bWait = src->bWait; + dest->bReturnPut = src->bReturnPut; + dest->pmaChannelIdx = src->pmaChannelIdx; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->bytesAvailable = src->bytesAvailable; + dest->putPtr = src->putPtr; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_v29_0B(NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_v29_0B *dest = (void*)(buffer); + NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS *src = pParams; + + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + dest->bytesConsumed = src->bytesConsumed; + dest->bUpdateAvailableBytes = src->bUpdateAvailableBytes; + dest->bWait = src->bWait; + dest->bReturnPut = src->bReturnPut; + dest->pmaChannelIdx = src->pmaChannelIdx; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->bytesAvailable = src->bytesAvailable; + dest->putPtr = src->putPtr; + dest->bOverflowStatus = src->bOverflowStatus; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_GPU_MIGRATABLE_OPS_PARAMS_v21_07(NV2080_CTRL_GPU_MIGRATABLE_OPS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset){ + NV2080_CTRL_GPU_MIGRATABLE_OPS_PARAMS *src = pParams; + NV2080_CTRL_GPU_MIGRATABLE_OPS_PARAMS_v21_07 *dest = (void*)(buffer); + + if (src && dest) { + NvU32 idx = 0; +#ifdef COPY_INPUT_PARAMETERS + if (src->regOpCount > NV2080_CTRL_MIGRATABLE_OPS_ARRAY_MAX_v21_07) { + return FAILURE_T; + } + + dest->bNonTransactional = src->bNonTransactional; + dest->regOpCount = src->regOpCount; + dest->hClientTarget = src->hClientTarget; + dest->hChannelTarget = src->hChannelTarget; + + for (idx = 0; idx < src->regOpCount; idx++) + { + dest->regOps[idx].regOp = src->regOps[idx].regOp; + dest->regOps[idx].regType = src->regOps[idx].regType; + dest->regOps[idx].regQuad = src->regOps[idx].regQuad; + dest->regOps[idx].regGroupMask = src->regOps[idx].regGroupMask; + dest->regOps[idx].regSubGroupMask = src->regOps[idx].regSubGroupMask; + dest->regOps[idx].regOffset = src->regOps[idx].regOffset; + dest->regOps[idx].regAndNMaskLo = src->regOps[idx].regAndNMaskLo; + dest->regOps[idx].regAndNMaskHi = src->regOps[idx].regAndNMaskHi; + dest->regOps[idx].regValueLo = src->regOps[idx].regValueLo; + dest->regOps[idx].regValueHi = src->regOps[idx].regValueHi; + dest->smIds[idx] = src->smIds[idx]; + } +#endif +#ifdef COPY_OUTPUT_PARAMETERS + for (idx = 0; idx < src->regOpCount; idx++) + { + dest->regOps[idx].regStatus = src->regOps[idx].regStatus; + dest->regOps[idx].regValueLo = src->regOps[idx].regValueLo; + dest->regOps[idx].regValueHi = src->regOps[idx].regValueHi; + } +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} +#endif + +#ifdef BUILD_COMMON_RPCS +static +return_t serialize_NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS_v03_00(NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS *src = pParams; + NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS_v03_00 *dest = (void*)(buffer); + + if (src && dest) + { + dest->hChannel = src->hChannel; + dest->property = src->property; + dest->value = src->value; + } + else + return FAILURE_T; + + return SUCCESS_T; +} +#endif + +#ifdef BUILD_COMMON_RPCS +static +return_t serialize_NV2080_CTRL_GPU_EVICT_CTX_PARAMS_v1A_1C( + NV2080_CTRL_GPU_EVICT_CTX_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_GPU_EVICT_CTX_PARAMS *src = pParams; + NV2080_CTRL_GPU_EVICT_CTX_PARAMS_v03_00 *dest = (void*)(buffer); + + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + dest->engineType = src->engineType; + dest->hClient = src->hClient; + dest->ChID = src->ChID; + dest->hChanClient = src->hChanClient; + dest->hObject = src->hObject; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS_v1A_1D(NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS_v1A_1D *dest = (void*)(buffer); + NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS *src = pParams; + NvU32 idx = 0; + + if (src && dest) + { + if (src->numQueries > NV2080_CTRL_GRMGR_GR_FS_INFO_MAX_QUERIES_v1A_1D) { + return FAILURE_T; + } +#ifdef COPY_INPUT_PARAMETERS + dest->numQueries = src->numQueries; +#endif + for (idx = 0; idx < dest->numQueries; idx++) { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryType = src->queries[idx].queryType; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].status = src->queries[idx].status; +#endif + switch(dest->queries[idx].queryType) + { + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_GPC_COUNT: { +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.gpcCountData.gpcCount = src->queries[idx].queryData.gpcCountData.gpcCount; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_CHIPLET_GPC_MAP: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.chipletGpcMapData.gpcId = src->queries[idx].queryData.chipletGpcMapData.gpcId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.chipletGpcMapData.chipletGpcMap = src->queries[idx].queryData.chipletGpcMapData.chipletGpcMap; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_TPC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.tpcMaskData.gpcId = src->queries[idx].queryData.tpcMaskData.gpcId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.tpcMaskData.tpcMask = src->queries[idx].queryData.tpcMaskData.tpcMask; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PPC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.ppcMaskData.gpcId = src->queries[idx].queryData.ppcMaskData.gpcId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.ppcMaskData.ppcMask = src->queries[idx].queryData.ppcMaskData.ppcMask; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARTITION_CHIPLET_GPC_MAP: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.partitionGpcMapData.swizzId = src->queries[idx].queryData.partitionGpcMapData.swizzId; + dest->queries[idx].queryData.partitionGpcMapData.gpcId = src->queries[idx].queryData.partitionGpcMapData.gpcId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.partitionGpcMapData.chipletGpcMap = src->queries[idx].queryData.partitionGpcMapData.chipletGpcMap; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_CHIPLET_SYSPIPE_MASK: { +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.syspipeMaskData.chipletSyspipeMask = src->queries[idx].queryData.syspipeMaskData.chipletSyspipeMask; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARTITION_CHIPLET_SYSPIPE_IDS: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.partitionChipletSyspipeData.swizzId = src->queries[idx].queryData.partitionChipletSyspipeData.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeIdCount = src->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeIdCount; + + if (dest->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeIdCount > NV2080_CTRL_GRMGR_MAX_SMC_IDS_v1A_1D) { + return FAILURE_T; + } + + for (idx = 0; idx < dest->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeIdCount; idx++) + dest->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeId[idx] = src->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeId[idx]; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PROFILER_MON_GPC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.dmGpcMaskData.swizzId = src->queries[idx].queryData.dmGpcMaskData.swizzId; + dest->queries[idx].queryData.dmGpcMaskData.grIdx = src->queries[idx].queryData.dmGpcMaskData.grIdx; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.dmGpcMaskData.gpcEnMask = src->queries[idx].queryData.dmGpcMaskData.gpcEnMask; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARTITION_SYSPIPE_ID: { +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.partitionSyspipeIdData.syspipeId = src->queries[idx].queryData.partitionSyspipeIdData.syspipeId; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_ROP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.ropMaskData.gpcId = src->queries[idx].queryData.ropMaskData.gpcId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.ropMaskData.ropMask = src->queries[idx].queryData.ropMaskData.ropMask; +#endif + break; + } + default: + { + // Unknown query + return FAILURE_T; + } + } + } + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS_v2B_09(NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS_v2B_09 *dest = (void*)(buffer); + NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS *src = pParams; + NvU32 idx = 0; + + if (src && dest) + { + if (src->numQueries > NV2080_CTRL_GRMGR_GR_FS_INFO_MAX_QUERIES_v1A_1D) { + return FAILURE_T; + } +#ifdef COPY_INPUT_PARAMETERS + dest->numQueries = src->numQueries; +#endif + for (idx = 0; idx < dest->numQueries; idx++) { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryType = src->queries[idx].queryType; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].status = src->queries[idx].status; +#endif + switch(dest->queries[idx].queryType) + { + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_GPC_COUNT: { +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.gpcCountData.gpcCount = src->queries[idx].queryData.gpcCountData.gpcCount; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_CHIPLET_GPC_MAP: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.chipletGpcMapData.gpcId = src->queries[idx].queryData.chipletGpcMapData.gpcId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.chipletGpcMapData.chipletGpcMap = src->queries[idx].queryData.chipletGpcMapData.chipletGpcMap; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_TPC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.tpcMaskData.gpcId = src->queries[idx].queryData.tpcMaskData.gpcId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.tpcMaskData.tpcMask = src->queries[idx].queryData.tpcMaskData.tpcMask; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PPC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.ppcMaskData.gpcId = src->queries[idx].queryData.ppcMaskData.gpcId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.ppcMaskData.ppcMask = src->queries[idx].queryData.ppcMaskData.ppcMask; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARTITION_CHIPLET_GPC_MAP: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.partitionGpcMapData.swizzId = src->queries[idx].queryData.partitionGpcMapData.swizzId; + dest->queries[idx].queryData.partitionGpcMapData.gpcId = src->queries[idx].queryData.partitionGpcMapData.gpcId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.partitionGpcMapData.chipletGpcMap = src->queries[idx].queryData.partitionGpcMapData.chipletGpcMap; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_CHIPLET_SYSPIPE_MASK: { +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.syspipeMaskData.chipletSyspipeMask = src->queries[idx].queryData.syspipeMaskData.chipletSyspipeMask; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARTITION_CHIPLET_SYSPIPE_IDS: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.partitionChipletSyspipeData.swizzId = src->queries[idx].queryData.partitionChipletSyspipeData.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeIdCount = src->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeIdCount; + + if (dest->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeIdCount > NV2080_CTRL_GRMGR_MAX_SMC_IDS_v1A_1D) { + return FAILURE_T; + } + + for (idx = 0; idx < dest->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeIdCount; idx++) + dest->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeId[idx] = src->queries[idx].queryData.partitionChipletSyspipeData.physSyspipeId[idx]; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PROFILER_MON_GPC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.dmGpcMaskData.swizzId = src->queries[idx].queryData.dmGpcMaskData.swizzId; + dest->queries[idx].queryData.dmGpcMaskData.grIdx = src->queries[idx].queryData.dmGpcMaskData.grIdx; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.dmGpcMaskData.gpcEnMask = src->queries[idx].queryData.dmGpcMaskData.gpcEnMask; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARTITION_SYSPIPE_ID: { +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.partitionSyspipeIdData.syspipeId = src->queries[idx].queryData.partitionSyspipeIdData.syspipeId; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_ROP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.ropMaskData.gpcId = src->queries[idx].queryData.ropMaskData.gpcId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.ropMaskData.ropMask = src->queries[idx].queryData.ropMaskData.ropMask; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_CHIPLET_GRAPHICS_SYSPIPE_MASK: { +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.gfxSyspipeMaskData.chipletSyspipeMask = src->queries[idx].queryData.gfxSyspipeMaskData.chipletSyspipeMask; +#endif + break; + } + case NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_GFX_CAPABLE_GPC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryData.gfxGpcMaskData.swizzId = src->queries[idx].queryData.gfxGpcMaskData.swizzId; + dest->queries[idx].queryData.gfxGpcMaskData.grIdx = src->queries[idx].queryData.gfxGpcMaskData.grIdx; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryData.gfxGpcMaskData.gpcEnMask = src->queries[idx].queryData.gfxGpcMaskData.gpcEnMask; +#endif + break; + } + default: + { + // Unknown query + return FAILURE_T; + } + } + } + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_FB_GET_FS_INFO_PARAMS_v24_00(NV2080_CTRL_FB_GET_FS_INFO_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_FB_GET_FS_INFO_PARAMS_v24_00 *dest = (void*)(buffer); + NV2080_CTRL_FB_GET_FS_INFO_PARAMS *src = pParams; + NvU32 idx = 0; + + if (src && dest) + { + if (src->numQueries > NV2080_CTRL_FB_FS_INFO_MAX_QUERIES_v24_00) { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + dest->numQueries = src->numQueries; +#endif + for (idx = 0; idx < dest->numQueries; idx++) { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryType = src->queries[idx].queryType; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].status = src->queries[idx].status; +#endif + switch(dest->queries[idx].queryType) + { + case NV2080_CTRL_FB_FS_INFO_INVALID_QUERY: { +#ifdef COPY_OUTPUT_PARAMETERS + NvU32 i = 0; + for (i = 0; i < NV2080_CTRL_FB_FS_INFO_MAX_QUERY_SIZE_v1A_1D; i++) { + dest->queries[idx].queryParams.inv.data[i] = src->queries[idx].queryParams.inv.data[i]; + } +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_FBP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbp.swizzId = src->queries[idx].queryParams.fbp.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbp.fbpEnMask = src->queries[idx].queryParams.fbp.fbpEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_LTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.ltc.fbpIndex = src->queries[idx].queryParams.ltc.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.ltc.ltcEnMask = src->queries[idx].queryParams.ltc.ltcEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_LTS_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.lts.fbpIndex = src->queries[idx].queryParams.lts.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.lts.ltsEnMask = src->queries[idx].queryParams.lts.ltsEnMask; +#endif + + break; + } + case NV2080_CTRL_FB_FS_INFO_FBPA_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbpa.fbpIndex = src->queries[idx].queryParams.fbpa.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbpa.fbpaEnMask = src->queries[idx].queryParams.fbpa.fbpaEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_ROP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.rop.fbpIndex = src->queries[idx].queryParams.rop.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.rop.ropEnMask = src->queries[idx].queryParams.rop.ropEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmLtc.fbpIndex = src->queries[idx].queryParams.dmLtc.fbpIndex; + dest->queries[idx].queryParams.dmLtc.swizzId = src->queries[idx].queryParams.dmLtc.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmLtc.ltcEnMask = src->queries[idx].queryParams.dmLtc.ltcEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmLts.fbpIndex = src->queries[idx].queryParams.dmLts.fbpIndex; + dest->queries[idx].queryParams.dmLts.swizzId = src->queries[idx].queryParams.dmLts.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmLts.ltsEnMask = src->queries[idx].queryParams.dmLts.ltsEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpa.fbpIndex = src->queries[idx].queryParams.dmFbpa.fbpIndex; + dest->queries[idx].queryParams.dmFbpa.swizzId = src->queries[idx].queryParams.dmFbpa.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpa.fbpaEnMask = src->queries[idx].queryParams.dmFbpa.fbpaEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmRop.fbpIndex = src->queries[idx].queryParams.dmRop.fbpIndex; + dest->queries[idx].queryParams.dmRop.swizzId = src->queries[idx].queryParams.dmRop.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmRop.ropEnMask = src->queries[idx].queryParams.dmRop.ropEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpaSubp.fbpIndex = src->queries[idx].queryParams.dmFbpaSubp.fbpIndex; + dest->queries[idx].queryParams.dmFbpaSubp.swizzId = src->queries[idx].queryParams.dmFbpaSubp.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpaSubp.fbpaSubpEnMask = src->queries[idx].queryParams.dmFbpaSubp.fbpaSubpEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbpaSubp.fbpIndex = src->queries[idx].queryParams.fbpaSubp.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbpaSubp.fbpaSubpEnMask = src->queries[idx].queryParams.fbpaSubp.fbpaSubpEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbpLogicalMap.fbpIndex = src->queries[idx].queryParams.fbpLogicalMap.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbpLogicalMap.fbpLogicalIndex = src->queries[idx].queryParams.fbpLogicalMap.fbpLogicalIndex; +#endif + break; + } + default: + { + // Unknown query + return FAILURE_T; + } + } + } + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_FB_GET_FS_INFO_PARAMS_v26_04(NV2080_CTRL_FB_GET_FS_INFO_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_FB_GET_FS_INFO_PARAMS_v26_04 *dest = (void*)(buffer); + NV2080_CTRL_FB_GET_FS_INFO_PARAMS *src = pParams; + NvU32 idx = 0; + + if (src && dest) + { + if (src->numQueries > NV2080_CTRL_FB_FS_INFO_MAX_QUERIES_v24_00) { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + dest->numQueries = src->numQueries; +#endif + for (idx = 0; idx < dest->numQueries; idx++) { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryType = src->queries[idx].queryType; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].status = src->queries[idx].status; +#endif + switch(dest->queries[idx].queryType) + { + case NV2080_CTRL_FB_FS_INFO_INVALID_QUERY: { +#ifdef COPY_OUTPUT_PARAMETERS + NvU32 i = 0; + for (i = 0; i < NV2080_CTRL_FB_FS_INFO_MAX_QUERY_SIZE_v1A_1D; i++) { + dest->queries[idx].queryParams.inv.data[i] = src->queries[idx].queryParams.inv.data[i]; + } +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_FBP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbp.swizzId = src->queries[idx].queryParams.fbp.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbp.fbpEnMask = src->queries[idx].queryParams.fbp.fbpEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_LTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.ltc.fbpIndex = src->queries[idx].queryParams.ltc.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.ltc.ltcEnMask = src->queries[idx].queryParams.ltc.ltcEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_LTS_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.lts.fbpIndex = src->queries[idx].queryParams.lts.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.lts.ltsEnMask = src->queries[idx].queryParams.lts.ltsEnMask; +#endif + + break; + } + case NV2080_CTRL_FB_FS_INFO_FBPA_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbpa.fbpIndex = src->queries[idx].queryParams.fbpa.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbpa.fbpaEnMask = src->queries[idx].queryParams.fbpa.fbpaEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_ROP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.rop.fbpIndex = src->queries[idx].queryParams.rop.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.rop.ropEnMask = src->queries[idx].queryParams.rop.ropEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmLtc.fbpIndex = src->queries[idx].queryParams.dmLtc.fbpIndex; + dest->queries[idx].queryParams.dmLtc.swizzId = src->queries[idx].queryParams.dmLtc.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmLtc.ltcEnMask = src->queries[idx].queryParams.dmLtc.ltcEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmLts.fbpIndex = src->queries[idx].queryParams.dmLts.fbpIndex; + dest->queries[idx].queryParams.dmLts.swizzId = src->queries[idx].queryParams.dmLts.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmLts.ltsEnMask = src->queries[idx].queryParams.dmLts.ltsEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpa.fbpIndex = src->queries[idx].queryParams.dmFbpa.fbpIndex; + dest->queries[idx].queryParams.dmFbpa.swizzId = src->queries[idx].queryParams.dmFbpa.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpa.fbpaEnMask = src->queries[idx].queryParams.dmFbpa.fbpaEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmRop.fbpIndex = src->queries[idx].queryParams.dmRop.fbpIndex; + dest->queries[idx].queryParams.dmRop.swizzId = src->queries[idx].queryParams.dmRop.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmRop.ropEnMask = src->queries[idx].queryParams.dmRop.ropEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpaSubp.fbpIndex = src->queries[idx].queryParams.dmFbpaSubp.fbpIndex; + dest->queries[idx].queryParams.dmFbpaSubp.swizzId = src->queries[idx].queryParams.dmFbpaSubp.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpaSubp.fbpaSubpEnMask = src->queries[idx].queryParams.dmFbpaSubp.fbpaSubpEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbpaSubp.fbpIndex = src->queries[idx].queryParams.fbpaSubp.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbpaSubp.fbpaSubpEnMask = src->queries[idx].queryParams.fbpaSubp.fbpaSubpEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbpLogicalMap.fbpIndex = src->queries[idx].queryParams.fbpLogicalMap.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbpLogicalMap.fbpLogicalIndex = src->queries[idx].queryParams.fbpLogicalMap.fbpLogicalIndex; +#endif + break; + } + case NV2080_CTRL_SYSL2_FS_INFO_SYSLTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.sysl2Ltc.sysIdx = src->queries[idx].queryParams.sysl2Ltc.sysIdx; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.sysl2Ltc.sysl2LtcEnMask = src->queries[idx].queryParams.sysl2Ltc.sysl2LtcEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PAC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.pac.fbpIndex = src->queries[idx].queryParams.pac.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.pac.pacEnMask = src->queries[idx].queryParams.pac.pacEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_LOGICAL_LTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.logicalLtc.fbpIndex = src->queries[idx].queryParams.logicalLtc.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.logicalLtc.logicalLtcEnMask = src->queries[idx].queryParams.logicalLtc.logicalLtcEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LOGICAL_LTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmLogicalLtc.fbpIndex = src->queries[idx].queryParams.dmLogicalLtc.fbpIndex; + dest->queries[idx].queryParams.dmLogicalLtc.swizzId = src->queries[idx].queryParams.dmLogicalLtc.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmLogicalLtc.logicalLtcEnMask = src->queries[idx].queryParams.dmLogicalLtc.logicalLtcEnMask; +#endif + break; + } + default: + { + // Unknown query + return FAILURE_T; + } + } + } + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_FB_GET_FS_INFO_PARAMS_v2B_07(NV2080_CTRL_FB_GET_FS_INFO_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_FB_GET_FS_INFO_PARAMS_v2B_07 *dest = (void*)(buffer); + NV2080_CTRL_FB_GET_FS_INFO_PARAMS *src = pParams; + NvU32 idx = 0; + + if (src && dest) + { + if (src->numQueries > NV2080_CTRL_FB_FS_INFO_MAX_QUERIES_v24_00) { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + dest->numQueries = src->numQueries; +#endif + for (idx = 0; idx < dest->numQueries; idx++) { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryType = src->queries[idx].queryType; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].status = src->queries[idx].status; +#endif + switch(dest->queries[idx].queryType) + { + case NV2080_CTRL_FB_FS_INFO_INVALID_QUERY: { +#ifdef COPY_OUTPUT_PARAMETERS + NvU32 i = 0; + for (i = 0; i < NV2080_CTRL_FB_FS_INFO_MAX_QUERY_SIZE_v1A_1D; i++) { + dest->queries[idx].queryParams.inv.data[i] = src->queries[idx].queryParams.inv.data[i]; + } +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_FBP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbp.swizzId = src->queries[idx].queryParams.fbp.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbp.fbpEnMask = src->queries[idx].queryParams.fbp.fbpEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_LTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.ltc.fbpIndex = src->queries[idx].queryParams.ltc.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.ltc.ltcEnMask = src->queries[idx].queryParams.ltc.ltcEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_LTS_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.lts.fbpIndex = src->queries[idx].queryParams.lts.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.lts.ltsEnMask = src->queries[idx].queryParams.lts.ltsEnMask; +#endif + + break; + } + case NV2080_CTRL_FB_FS_INFO_FBPA_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbpa.fbpIndex = src->queries[idx].queryParams.fbpa.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbpa.fbpaEnMask = src->queries[idx].queryParams.fbpa.fbpaEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_ROP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.rop.fbpIndex = src->queries[idx].queryParams.rop.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.rop.ropEnMask = src->queries[idx].queryParams.rop.ropEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmLtc.fbpIndex = src->queries[idx].queryParams.dmLtc.fbpIndex; + dest->queries[idx].queryParams.dmLtc.swizzId = src->queries[idx].queryParams.dmLtc.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmLtc.ltcEnMask = src->queries[idx].queryParams.dmLtc.ltcEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmLts.fbpIndex = src->queries[idx].queryParams.dmLts.fbpIndex; + dest->queries[idx].queryParams.dmLts.swizzId = src->queries[idx].queryParams.dmLts.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmLts.ltsEnMask = src->queries[idx].queryParams.dmLts.ltsEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpa.fbpIndex = src->queries[idx].queryParams.dmFbpa.fbpIndex; + dest->queries[idx].queryParams.dmFbpa.swizzId = src->queries[idx].queryParams.dmFbpa.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpa.fbpaEnMask = src->queries[idx].queryParams.dmFbpa.fbpaEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmRop.fbpIndex = src->queries[idx].queryParams.dmRop.fbpIndex; + dest->queries[idx].queryParams.dmRop.swizzId = src->queries[idx].queryParams.dmRop.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmRop.ropEnMask = src->queries[idx].queryParams.dmRop.ropEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpaSubp.fbpIndex = src->queries[idx].queryParams.dmFbpaSubp.fbpIndex; + dest->queries[idx].queryParams.dmFbpaSubp.swizzId = src->queries[idx].queryParams.dmFbpaSubp.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmFbpaSubp.fbpaSubpEnMask = src->queries[idx].queryParams.dmFbpaSubp.fbpaSubpEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbpaSubp.fbpIndex = src->queries[idx].queryParams.fbpaSubp.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbpaSubp.fbpaSubpEnMask = src->queries[idx].queryParams.fbpaSubp.fbpaSubpEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.fbpLogicalMap.fbpIndex = src->queries[idx].queryParams.fbpLogicalMap.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.fbpLogicalMap.fbpLogicalIndex = src->queries[idx].queryParams.fbpLogicalMap.fbpLogicalIndex; +#endif + break; + } + case NV2080_CTRL_SYSL2_FS_INFO_SYSLTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.sysl2Ltc.sysIdx = src->queries[idx].queryParams.sysl2Ltc.sysIdx; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.sysl2Ltc.sysl2LtcEnMask = src->queries[idx].queryParams.sysl2Ltc.sysl2LtcEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PAC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.pac.fbpIndex = src->queries[idx].queryParams.pac.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.pac.pacEnMask = src->queries[idx].queryParams.pac.pacEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_LOGICAL_LTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.logicalLtc.fbpIndex = src->queries[idx].queryParams.logicalLtc.fbpIndex; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.logicalLtc.logicalLtcEnMask = src->queries[idx].queryParams.logicalLtc.logicalLtcEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LOGICAL_LTC_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.dmLogicalLtc.fbpIndex = src->queries[idx].queryParams.dmLogicalLtc.fbpIndex; + dest->queries[idx].queryParams.dmLogicalLtc.swizzId = src->queries[idx].queryParams.dmLogicalLtc.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.dmLogicalLtc.logicalLtcEnMask = src->queries[idx].queryParams.dmLogicalLtc.logicalLtcEnMask; +#endif + break; + } + case NV2080_CTRL_SYSL2_FS_INFO_SYSLTS_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.sysl2Lts.sysIdx = src->queries[idx].queryParams.sysl2Lts.sysIdx; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.sysl2Lts.sysl2LtsEnMask = src->queries[idx].queryParams.sysl2Lts.sysl2LtsEnMask; +#endif + break; + } + case NV2080_CTRL_FB_FS_INFO_SYS_MASK: { +#ifdef COPY_INPUT_PARAMETERS + dest->queries[idx].queryParams.sys.swizzId = src->queries[idx].queryParams.sys.swizzId; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + dest->queries[idx].queryParams.sys.sysEnMask = src->queries[idx].queryParams.sys.sysEnMask; +#endif + break; + } + default: + { + // Unknown query + return FAILURE_T; + } + } + } + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +return_t serialize_NVA06F_CTRL_STOP_CHANNEL_PARAMS_v1A_1E( + NVA06F_CTRL_STOP_CHANNEL_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVA06F_CTRL_STOP_CHANNEL_PARAMS *src = pParams; + NVA06F_CTRL_STOP_CHANNEL_PARAMS_v1A_1E *dest = (void*)(buffer); + + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + dest->bImmediate = src->bImmediate; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +return_t serialize_NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS_v1A_1F(NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS_v1A_1F *dest = (void*)(buffer); + NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS *src = pParams; + + if (src && dest) + { + dest->pmaChannelIdx = src->pmaChannelIdx; + } + else + return FAILURE_T; + +#endif + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS_v1A_1F(NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS_v1A_1F *dest = (void*)(buffer); + NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS *src = pParams; + + if (src && dest) + { + dest->hChannel = src->hChannel; + dest->samplingMode = src->samplingMode; + dest->grRouteInfo.flags = src->grRouteInfo.flags; + dest->grRouteInfo.route = src->grRouteInfo.route; + } + else + return FAILURE_T; + +#endif + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS_v1A_1F(NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS_v1A_1F *dest = (void*)(buffer); + NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS *src = pParams; + + if (src && dest) + { + dest->bSetMaxFreq = src->bSetMaxFreq; + } + else + return FAILURE_T; + +#endif + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_PERF_RATED_TDP_STATUS_PARAMS_v1A_1F(NV2080_CTRL_PERF_RATED_TDP_STATUS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_OUTPUT_PARAMETERS + NV2080_CTRL_PERF_RATED_TDP_STATUS_PARAMS_v1A_1F *dest = (void*)(buffer); + NV2080_CTRL_PERF_RATED_TDP_STATUS_PARAMS *src = pParams; + NvU32 i = 0; + + if (src && dest) + { + dest->rm.clientActiveMask = src->rm.clientActiveMask; + dest->rm.bRegkeyLimitRatedTdp = src->rm.bRegkeyLimitRatedTdp; + dest->output = src->output; + + for (i = 0; i < NV2080_CTRL_PERF_RATED_TDP_CLIENT_NUM_CLIENTS_v1A_1F; i++) + { + dest->inputs[i] = src->inputs[i]; + } + } + else + return FAILURE_T; + +#endif + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS_v1A_1F(NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS_v1A_1F *dest = (void*)(buffer); + NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS *src = pParams; + + if (src && dest) + { + dest->client = src->client; + dest->input = src->input; + } + else + return FAILURE_T; + +#endif + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS_v1A_23( + NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS_v1A_23 *dest = (void*)(buffer); + NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS *src = pParams; + + if (src && dest) + { + dest->base = src->base; + dest->size = src->size; + dest->addressSpace = src->addressSpace; + dest->cacheAttrib = src->cacheAttrib; + } + else + return FAILURE_T; + +#endif + return SUCCESS_T; +} + +return_t serialize_NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS_v1C_02(NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS_v1C_02 *dest = (void*)(buffer); + NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS *src = pParams; + + if (src && dest) + { + dest->smID = src->smID; + dest->bSingleStep = src->bSingleStep; + } + else + return FAILURE_T; + +#endif + return SUCCESS_T; +} + +return_t serialize_NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS_v1C_04(NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS_v1C_04 *dest = (void*)(buffer); + NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS *src = pParams; + + if (src && dest) + { + dest->hChannelGroup = src->hChannelGroup; + dest->mode = src->mode; + dest->bEnableAllTpcs = src->bEnableAllTpcs; + dest->grRouteInfo.flags = src->grRouteInfo.flags; + dest->grRouteInfo.route = src->grRouteInfo.route; + } + else + return FAILURE_T; + +#endif + return SUCCESS_T; +} + +return_t +serialize_NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS_v1E_07( + NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS_v1E_07 *dest = + (void*)(buffer); + NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS *src = pParams; + + if (src && dest) + { + NvU32 i; + if (src->numValidEntries > + NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_MAX_RUNQUEUES_v1E_07) + { + return FAILURE_T; + } + + dest->numValidEntries = src->numValidEntries; + + for (i = 0; i < src->numValidEntries; ++i) + { + dest->bar2Addr[i] = src->bar2Addr[i]; + dest->methodBufferMemdesc[i].base = + src->methodBufferMemdesc[i].base; + dest->methodBufferMemdesc[i].size = + src->methodBufferMemdesc[i].size; + dest->methodBufferMemdesc[i].alignment = + src->methodBufferMemdesc[i].alignment; + dest->methodBufferMemdesc[i].addressSpace = + src->methodBufferMemdesc[i].addressSpace; + dest->methodBufferMemdesc[i].cpuCacheAttrib = + src->methodBufferMemdesc[i].cpuCacheAttrib; + } + } + else + return FAILURE_T; + +#endif + return SUCCESS_T; +} + +return_t serialize_NVB0CC_CTRL_INTERNAL_QUIESCE_PMA_CHANNEL_PARAMS_v1C_08(NVB0CC_CTRL_INTERNAL_QUIESCE_PMA_CHANNEL_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NVB0CC_CTRL_INTERNAL_QUIESCE_PMA_CHANNEL_PARAMS_v1C_08 *dest = (void*)(buffer); + NVB0CC_CTRL_INTERNAL_QUIESCE_PMA_CHANNEL_PARAMS *src = pParams; + + if (src && dest) + { + dest->pmaChannelIdx = src->pmaChannelIdx; + dest->bMembytesPollingRequired = src->bMembytesPollingRequired; + } + else + return FAILURE_T; + +#endif + return SUCCESS_T; +} + +return_t serialize_NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS_v1E_06(NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_OUTPUT_PARAMETERS + NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS_v1E_06 *dest = (void*)(buffer); + NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS *src = pParams; + + if (src && dest) + { + dest->bMode = src->bMode; + } + else + return FAILURE_T; + +#endif + return SUCCESS_T; +} +#endif + +#ifdef BUILD_COMMON_RPCS +static +return_t serialize_NV00F8_CTRL_DESCRIBE_PARAMS_v1E_0C( + NV00F8_CTRL_DESCRIBE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV00F8_CTRL_DESCRIBE_PARAMS_v1E_0C *dest = (void*)(buffer); + NV00F8_CTRL_DESCRIBE_PARAMS *src = pParams; + + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + dest->offset = src->offset; +#endif +#ifdef COPY_OUTPUT_PARAMETERS + NvU32 i; + dest->totalPfns = src->totalPfns; + for (i = 0; i pfnArray[i] = src->pfnArray[i]; + } + dest->numPfns = src->numPfns; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS_v1E_0C( + NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS_v1E_0C *dest = (void*)(buffer); + NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS *src = pParams; + + if (src && dest) + { +#ifdef COPY_OUTPUT_PARAMETERS + dest->totalSize = src->totalSize; + dest->freeSize = src->freeSize; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +return_t serialize_NVC637_CTRL_CMD_EXEC_PARTITIONS_CREATE_v24_05(NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS_v24_05 *dest = (void *) buffer; + NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS *src = pParams; + NvU32 i; + + if (src && dest) + { + if (src->execPartCount > NVC637_CTRL_MAX_EXEC_PARTITIONS_v18_05) { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + dest->bQuery = src->bQuery; + dest->execPartCount = src->execPartCount; + + for (i = 0; i < dest->execPartCount; i++) { + dest->execPartInfo[i].gpcCount = src->execPartInfo[i].gpcCount; + + // Added in version v24_05 + dest->execPartInfo[i].gfxGpcCount = src->execPartInfo[i].gfxGpcCount; + + dest->execPartInfo[i].veidCount = src->execPartInfo[i].veidCount; + dest->execPartInfo[i].ceCount = src->execPartInfo[i].ceCount; + dest->execPartInfo[i].nvEncCount = src->execPartInfo[i].nvEncCount; + dest->execPartInfo[i].nvDecCount = src->execPartInfo[i].nvDecCount; + dest->execPartInfo[i].nvJpgCount = src->execPartInfo[i].nvJpgCount; + dest->execPartInfo[i].ofaCount = src->execPartInfo[i].ofaCount; + dest->execPartInfo[i].sharedEngFlag = src->execPartInfo[i].sharedEngFlag; + dest->execPartInfo[i].smCount = src->execPartInfo[i].smCount; + dest->execPartInfo[i].spanStart = src->execPartInfo[i].spanStart; + dest->execPartInfo[i].computeSize = src->execPartInfo[i].computeSize; + } +#endif + +#ifdef COPY_OUTPUT_PARAMETERS + for (i = 0; i < src->execPartCount; i++) { + dest->execPartId[i] = src->execPartId[i]; + dest->execPartInfo[i].computeSize = src->execPartInfo[i].computeSize; + } +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +return_t serialize_NVC637_CTRL_CMD_EXEC_PARTITIONS_DELETE_v1F_0A(NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS_v18_05 *dest = (void *) buffer; + NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS *src = pParams; + + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + NvU32 i; + + if (src->execPartCount > NVC637_CTRL_MAX_EXEC_PARTITIONS_v18_05) { + return FAILURE_T; + } + + dest->execPartCount = src->execPartCount; + for (i = 0; i < dest->execPartCount; i++) { + dest->execPartId[i] = src->execPartId[i]; + } +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +return_t serialize_NVC637_CTRL_CMD_EXEC_PARTITIONS_EXPORT_v29_0C(NVC637_CTRL_EXEC_PARTITIONS_IMPORT_EXPORT_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVC637_CTRL_EXEC_PARTITIONS_IMPORT_EXPORT_PARAMS_v29_0C *dest = (void *) buffer; + NVC637_CTRL_EXEC_PARTITIONS_IMPORT_EXPORT_PARAMS *src = pParams; + + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + dest->id = src->id; + dest->bCreateCap = src->bCreateCap; +#endif + +#ifdef COPY_OUTPUT_PARAMETERS + for (int i = 0; i < NVC637_CTRL_EXEC_PARTITIONS_EXPORT_MAX_ENGINES_MASK_SIZE; i++) { + dest->info.enginesMask[i] = src->info.enginesMask[i]; + } + dest->info.sharedEngFlags = src->info.sharedEngFlags; + dest->info.gpcMask = src->info.gpcMask; + dest->info.gfxGpcCount = src->info.gfxGpcCount; + dest->info.veidOffset = src->info.veidOffset; + dest->info.veidCount = src->info.veidCount; + dest->info.smCount = src->info.smCount; + dest->info.spanStart = src->info.spanStart; + dest->info.computeSize = src->info.computeSize; + + // UUID will not be copied as Guest will choose uuid by itself. +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + + +return_t serialize_NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_v1F_0A(NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS_v08_00 *dest = (void *) buffer; + NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS *src = pParams; + + if (src && dest) + { +#ifdef COPY_OUTPUT_PARAMETERS + dest->workSubmitToken = src->workSubmitToken; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +return_t serialize_NVC36F_CTRL_CMD_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_v1F_0A(NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + + NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS_v16_04 *dest = (void *) buffer; + NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS *src = pParams; + + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + dest->index = src->index; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +return_t serialize_NV90E6_CTRL_CMD_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK_v1F_0D(NV90E6_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV90E6_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK_PARAMS_v18_0B *dest = (void *) buffer; + NV90E6_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK_PARAMS *src = pParams; + + if (src && dest) + { +#ifdef COPY_OUTPUT_PARAMETERS + dest->eccMask = src->eccMask; + dest->nvlinkMask = src->nvlinkMask; +#endif + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +#endif + +#ifdef BUILD_COMMON_RPCS +return_t serialize_NV0080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS_v2A_00( + NV0080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV0080_CTRL_CMD_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_v2A_00 *dest = (void*)(buffer); + NV0080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS *src = pParams; + + if (src && dest) + { + dest->bZbcSurfacesExist = src->bZbcSurfacesExist; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} +#endif + +#ifdef BUILD_COMMON_RPCS +return_t serialize_NV2080_CTRL_PERF_GET_LEVEL_INFO_V2_PARAMS_v2B_0D( + NV2080_CTRL_PERF_GET_LEVEL_INFO_V2_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_PERF_GET_LEVEL_INFO_V2_PARAMS_v2B_0D *dest = (void*)(buffer); + NV2080_CTRL_PERF_GET_LEVEL_INFO_V2_PARAMS *src = pParams; + NvU32 i; + if (src && dest) + { + if (src->perfGetClkInfoListSize > + NV2080_CTRL_PERF_CLK_MAX_DOMAINS_v2B_0D) + { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + dest->level = src->level; + dest->perfGetClkInfoListSize = src->perfGetClkInfoListSize; +#endif + +#ifdef COPY_OUTPUT_PARAMETERS + dest->flags = src->flags; +#endif + for (i = 0; i < src->perfGetClkInfoListSize ; i++) + { +#ifdef COPY_INPUT_PARAMETERS + dest->perfGetClkInfoList[i].domain = src->perfGetClkInfoList[i].domain; + dest->perfGetClkInfoList[i].flags = src->perfGetClkInfoList[i].flags; +#endif + +#ifdef COPY_OUTPUT_PARAMETERS + dest->perfGetClkInfoList[i].currentFreq = src->perfGetClkInfoList[i].currentFreq; + dest->perfGetClkInfoList[i].defaultFreq = src->perfGetClkInfoList[i].defaultFreq; + dest->perfGetClkInfoList[i].minFreq = src->perfGetClkInfoList[i].minFreq; + dest->perfGetClkInfoList[i].maxFreq = src->perfGetClkInfoList[i].maxFreq; +#endif + } + } + else + return FAILURE_T; + + return SUCCESS_T; +} +#endif + +#ifdef BUILD_COMMON_RPCS +return_t serialize_NV0080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS_v2B_0E( + NV0080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV0080_CTRL_CMD_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_v2B_0E *dest = (void*)(buffer); + NV0080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS *src = pParams; + + if (src && dest) + { + dest->subdevInstance = src->subdevInstance; + dest->bZbcSurfacesExist = src->bZbcSurfacesExist; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} +#endif + +#ifdef BUILD_COMMON_RPCS +return_t serialize_NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS_v1F_05( + NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_CMD_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_v1F_05 *dest = (void*)(buffer); + NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS *src = pParams; + + if (src && dest) + { + dest->bZbcSurfacesExist = src->bZbcSurfacesExist; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} +#endif + +#ifdef BUILD_COMMON_RPCS +return_t serialize_NVB0CC_CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM_PARAMS_v1C_0C(NVB0CC_CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NVB0CC_CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM_PARAMS_v1C_0C *dest = (void*)(buffer); + NVB0CC_CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM_PARAMS *src = pParams; + + if (src && dest) + { + dest->pmaChannelIdx = src->pmaChannelIdx; + dest->pmaBufferVA = src->pmaBufferVA; + dest->pmaBufferSize = src->pmaBufferSize; + dest->membytesVA = src->membytesVA; + dest->hwpmIBPA = src->hwpmIBPA; + dest->hwpmIBAperture = src->hwpmIBAperture; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +#ifdef BUILD_COMMON_RPCS +return_t serialize_NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS_v21_03(NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS_v21_03 *dest = (void*)(buffer); + NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS *src = pParams; + + if (src && dest) + { + dest->connectionType = src->connectionType; + dest->peerId = src->peerId; + dest->bSpaAccessOnly = src->bSpaAccessOnly; + dest->bUseUuid = src->bUseUuid; + + portMemCopy(dest->remoteGpuUuid, + VM_UUID_SIZE_v21_02, + src->remoteGpuUuid, + VM_UUID_SIZE_v21_02); + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} +#endif + +#ifdef BUILD_COMMON_RPCS +return_t serialize_NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS_v29_08(NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS_v29_08 *dest = (void*)(buffer); + NV2080_CTRL_BUS_SET_P2P_MAPPING_PARAMS *src = pParams; + + if (src && dest) + { + dest->connectionType = src->connectionType; + dest->peerId = src->peerId; + dest->bEgmPeer = src->bEgmPeer; + dest->bSpaAccessOnly = src->bSpaAccessOnly; + dest->bUseUuid = src->bUseUuid; + dest->remoteGpuId = src->remoteGpuId; + + portMemCopy(dest->remoteGpuUuid, + VM_UUID_SIZE_v21_02, + src->remoteGpuUuid, + VM_UUID_SIZE_v21_02); + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} +#endif + +#ifdef BUILD_COMMON_RPCS +return_t serialize_NV2080_CTRL_BUS_UNSET_P2P_MAPPING_PARAMS_v21_03(NV2080_CTRL_BUS_UNSET_P2P_MAPPING_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_BUS_UNSET_P2P_MAPPING_PARAMS_v21_03 *dest = (void*)(buffer); + NV2080_CTRL_BUS_UNSET_P2P_MAPPING_PARAMS *src = pParams; + + if (src && dest) + { + dest->connectionType = src->connectionType; + dest->peerId = src->peerId; + dest->bUseUuid = src->bUseUuid; + + portMemCopy(dest->remoteGpuUuid, + VM_UUID_SIZE_v21_02, + src->remoteGpuUuid, + VM_UUID_SIZE_v21_02); + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +#ifndef UMED_BUILD +return_t serialize_NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v25_11(NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v25_11 *dest = (void*)(buffer); + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *src = pParams; + + if (src && dest) + { + NvU32 i; + if (src->gpuInfoListSize > NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE_v25_11) { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + dest->gpuInfoListSize = src->gpuInfoListSize; +#endif + + for (i = 0; i < NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE_v25_11; i++) { +#ifdef COPY_INPUT_PARAMETERS + dest->gpuInfoList[i].index = src->gpuInfoList[i].index; +#endif + dest->gpuInfoList[i].data = src->gpuInfoList[i].data; + } + } + else + return FAILURE_T; + return SUCCESS_T; +} + +return_t serialize_NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v2A_04(NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v2A_04 *dest = (void*)(buffer); + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *src = pParams; + + if (src && dest) + { + NvU32 i; + if (src->gpuInfoListSize > NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE_v2A_04) { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + dest->gpuInfoListSize = src->gpuInfoListSize; +#endif + + for (i = 0; i < NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE_v2A_04; i++) { +#ifdef COPY_INPUT_PARAMETERS + dest->gpuInfoList[i].index = src->gpuInfoList[i].index; +#endif + dest->gpuInfoList[i].data = src->gpuInfoList[i].data; + } + } + else + return FAILURE_T; + return SUCCESS_T; +} + +return_t serialize_NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v2B_03(NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v2B_03 *dest = (void*)(buffer); + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *src = pParams; + + if (src && dest) + { + NvU32 i; + if (src->gpuInfoListSize > NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE_v2B_03) { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + dest->gpuInfoListSize = src->gpuInfoListSize; +#endif + + for (i = 0; i < NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE_v2B_03; i++) { +#ifdef COPY_INPUT_PARAMETERS + dest->gpuInfoList[i].index = src->gpuInfoList[i].index; +#endif + dest->gpuInfoList[i].data = src->gpuInfoList[i].data; + } + } + else + return FAILURE_T; + return SUCCESS_T; +} + +return_t serialize_NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v2B_05(NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v2B_05 *dest = (void*)(buffer); + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *src = pParams; + + if (src && dest) + { + NvU32 i; + if (src->gpuInfoListSize > NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE_v2B_05) { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + dest->gpuInfoListSize = src->gpuInfoListSize; +#endif + + for (i = 0; i < NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE_v2B_05; i++) { +#ifdef COPY_INPUT_PARAMETERS + dest->gpuInfoList[i].index = src->gpuInfoList[i].index; +#endif + dest->gpuInfoList[i].data = src->gpuInfoList[i].data; + } + } + else + return FAILURE_T; + return SUCCESS_T; +} + +return_t serialize_NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v2B_0C(NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS_v2B_0C *dest = (void*)(buffer); + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *src = pParams; + + if (src && dest) + { + NvU32 i; + if (src->gpuInfoListSize > NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE_v2B_0C) { + return FAILURE_T; + } + +#ifdef COPY_INPUT_PARAMETERS + dest->gpuInfoListSize = src->gpuInfoListSize; +#endif + + for (i = 0; i < NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE_v2B_0C; i++) { +#ifdef COPY_INPUT_PARAMETERS + dest->gpuInfoList[i].index = src->gpuInfoList[i].index; +#endif + dest->gpuInfoList[i].data = src->gpuInfoList[i].data; + } + } + else + return FAILURE_T; + return SUCCESS_T; +} +#endif // UMED_BUILD + +return_t serialize_NV2080_CTRL_CMD_FLA_SETUP_INSTANCE_MEM_BLOCK_v21_05( + NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ +#ifdef COPY_INPUT_PARAMETERS + NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS_v13_04 *dest = (void*)(buffer); + NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS *src = pParams; + + if (src && dest) + { + dest->imbPhysAddr = src->imbPhysAddr; + dest->addrSpace = src->addrSpace; + dest->flaAction = src->flaAction; + } + else + return FAILURE_T; +#endif + return SUCCESS_T; +} + +return_t serialize_NVB0CC_CTRL_GET_TOTAL_HS_CREDITS_PARAMS_v21_08(NVB0CC_CTRL_GET_TOTAL_HS_CREDITS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_GET_TOTAL_HS_CREDITS_PARAMS_v21_08 *dest = (void*)(buffer); + NVB0CC_CTRL_GET_TOTAL_HS_CREDITS_PARAMS *src = pParams; + if (src && dest) + { +#ifdef COPY_OUTPUT_PARAMETERS + dest->numCredits = src->numCredits; +#endif + } + else + return FAILURE_T; + return SUCCESS_T; +} + +return_t serialize_NVB0CC_CTRL_GET_HS_CREDITS_PARAMS_v21_08(NVB0CC_CTRL_GET_HS_CREDITS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_GET_HS_CREDITS_PARAMS_v21_08 *dest = (void*)(buffer); + NVB0CC_CTRL_GET_HS_CREDITS_PARAMS *src = pParams; + if (src && dest) + { + NvU32 i; + if (src->numEntries > NVB0CC_MAX_CREDIT_INFO_ENTRIES_v21_08) { + return FAILURE_T; + } +#ifdef COPY_OUTPUT_PARAMETERS + dest->statusInfo.status = src->statusInfo.status; + dest->statusInfo.entryIndex = src->statusInfo.entryIndex; + for (i = 0; i < NVB0CC_MAX_CREDIT_INFO_ENTRIES_v21_08; i++) { + dest->creditInfo[i].numCredits = src->creditInfo[i].numCredits; + } +#endif +#ifdef COPY_INPUT_PARAMETERS + dest->pmaChannelIdx = src->pmaChannelIdx; + dest->numEntries = src->numEntries; + + for (i = 0; i < NVB0CC_MAX_CREDIT_INFO_ENTRIES_v21_08; i++) { + dest->creditInfo[i].chipletType = src->creditInfo[i].chipletType; + dest->creditInfo[i].chipletIndex = src->creditInfo[i].chipletIndex; + dest->creditInfo[i].numCredits = src->creditInfo[i].numCredits; + } +#endif + } + else + return FAILURE_T; + return SUCCESS_T; +} + +return_t serialize_NVB0CC_CTRL_SET_HS_CREDITS_PARAMS_v21_08(NVB0CC_CTRL_SET_HS_CREDITS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_SET_HS_CREDITS_PARAMS_v21_08 *dest = (void*)(buffer); + NVB0CC_CTRL_SET_HS_CREDITS_PARAMS *src = pParams; + + if (src && dest) + { +#ifdef COPY_OUTPUT_PARAMETERS + dest->statusInfo.status = src->statusInfo.status; + dest->statusInfo.entryIndex = src->statusInfo.entryIndex; +#endif +#ifdef COPY_INPUT_PARAMETERS + NvU32 i; + dest->pmaChannelIdx = src->pmaChannelIdx; + dest->numEntries = src->numEntries; + + if (src->numEntries > NVB0CC_MAX_CREDIT_INFO_ENTRIES_v21_08) { + return FAILURE_T; + } + + for (i = 0; i < NVB0CC_MAX_CREDIT_INFO_ENTRIES_v21_08; i++) { + dest->creditInfo[i].chipletType = src->creditInfo[i].chipletType; + dest->creditInfo[i].chipletIndex = src->creditInfo[i].chipletIndex; + dest->creditInfo[i].numCredits = src->creditInfo[i].numCredits; + } +#endif + } + else + return FAILURE_T; + return SUCCESS_T; +} + +return_t serialize_NVB0CC_CTRL_RESERVE_HES_PARAMS_v29_07(NVB0CC_CTRL_RESERVE_HES_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_RESERVE_HES_PARAMS_v29_07 *dest = (void*)(buffer); + NVB0CC_CTRL_RESERVE_HES_PARAMS *src = pParams; + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + dest->type = src->type; + dest->reserveParams.cwd.ctxsw = src->reserveParams.cwd.ctxsw; +#endif + } + else + return FAILURE_T; + return SUCCESS_T; +} + +return_t serialize_NVB0CC_CTRL_RELEASE_HES_PARAMS_v29_07(NVB0CC_CTRL_RELEASE_HES_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_RELEASE_HES_PARAMS_v29_07 *dest = (void*)(buffer); + NVB0CC_CTRL_RELEASE_HES_PARAMS *src = pParams; + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + dest->type = src->type; +#endif + } + else + return FAILURE_T; + return SUCCESS_T; +} + +return_t serialize_NVB0CC_CTRL_RESERVE_CCUPROF_PARAMS_v29_07(NVB0CC_CTRL_RESERVE_CCUPROF_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_RESERVE_CCUPROF_PARAMS_v29_07 *dest = (void*)(buffer); + NVB0CC_CTRL_RESERVE_CCUPROF_PARAMS *src = pParams; + if (src && dest) + { +#ifdef COPY_INPUT_PARAMETERS + dest->ctxsw = src->ctxsw; +#endif + } + else + return FAILURE_T; + return SUCCESS_T; +} + +#ifndef UMED_BUILD +return_t serialize_NV83DE_CTRL_DEBUG_GET_MODE_MMU_GCC_DEBUG_PARAMS_v2A_05(NV83DE_CTRL_DEBUG_GET_MODE_MMU_GCC_DEBUG_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_DEBUG_GET_MODE_MMU_GCC_DEBUG_PARAMS *src = pParams; + NV83DE_CTRL_DEBUG_GET_MODE_MMU_GCC_DEBUG_PARAMS_v2A_05 *dest = (void*)(buffer); + + if (src && dest) { + dest->value = src->value; + } + else + return FAILURE_T; + + return SUCCESS_T; +} +#endif + +#ifndef UMED_BUILD +return_t serialize_NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS_v25_04(NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS *src = pParams; + NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS_v25_04 *dest = (void*)(buffer); + + if (src && dest) { + dest->value = src->value; + } + else + return FAILURE_T; + + return SUCCESS_T; +} +#endif + +#endif +#endif + +#if defined(BUILD_COMMON_RPCS) +static +return_t serialize_NV2080_CTRL_FB_GET_INFO_V2_PARAMS_v25_0A(NV2080_CTRL_FB_GET_INFO_V2_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_FB_GET_INFO_V2_PARAMS *src = pParams; + NV2080_CTRL_FB_GET_INFO_V2_PARAMS_v25_0A *dest = (void*)(buffer); + + if (src && dest) + { + NvU32 i; + + if ((src->fbInfoListSize == 0) || + (src->fbInfoListSize > NV2080_CTRL_FB_INFO_MAX_LIST_SIZE_24_0A)) { + return FAILURE_T; + } + + dest->fbInfoListSize = src->fbInfoListSize; + + for (i = 0; i < src->fbInfoListSize; i++) { + dest->fbInfoList[i].index = src->fbInfoList[i].index; + dest->fbInfoList[i].data = src->fbInfoList[i].data; + } + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_FB_GET_INFO_V2_PARAMS_v25_0A(NV2080_CTRL_FB_GET_INFO_V2_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_FB_GET_INFO_V2_PARAMS_v25_0A *src = (void*)(buffer); + NV2080_CTRL_FB_GET_INFO_V2_PARAMS *dest = pParams; + + if (src && dest) + { + NvU32 i; + + if ((src->fbInfoListSize == 0) || + (src->fbInfoListSize > NV2080_CTRL_FB_INFO_MAX_LIST_SIZE_24_0A)) { + return FAILURE_T; + } + + dest->fbInfoListSize = src->fbInfoListSize; + + for (i = 0; i < src->fbInfoListSize; i++) { + dest->fbInfoList[i].index = src->fbInfoList[i].index; + dest->fbInfoList[i].data = src->fbInfoList[i].data; + } + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_FB_GET_INFO_V2_PARAMS_v27_00(NV2080_CTRL_FB_GET_INFO_V2_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_FB_GET_INFO_V2_PARAMS *src = pParams; + NV2080_CTRL_FB_GET_INFO_V2_PARAMS_v27_00 *dest = (void*)(buffer); + + if (src && dest) + { + NvU32 i; + + if ((src->fbInfoListSize == 0) || + (src->fbInfoListSize > NV2080_CTRL_FB_INFO_MAX_LIST_SIZE_27_00)) { + return FAILURE_T; + } + + dest->fbInfoListSize = src->fbInfoListSize; + + for (i = 0; i < src->fbInfoListSize; i++) { + dest->fbInfoList[i].index = src->fbInfoList[i].index; + dest->fbInfoList[i].data = src->fbInfoList[i].data; + } + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_FB_GET_INFO_V2_PARAMS_v27_00(NV2080_CTRL_FB_GET_INFO_V2_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_FB_GET_INFO_V2_PARAMS_v27_00 *src = (void*)(buffer); + NV2080_CTRL_FB_GET_INFO_V2_PARAMS *dest = pParams; + + if (src && dest) + { + NvU32 i; + + if ((src->fbInfoListSize == 0) || + (src->fbInfoListSize > NV2080_CTRL_FB_INFO_MAX_LIST_SIZE_27_00)) { + return FAILURE_T; + } + + dest->fbInfoListSize = src->fbInfoListSize; + + for (i = 0; i < src->fbInfoListSize; i++) { + dest->fbInfoList[i].index = src->fbInfoList[i].index; + dest->fbInfoList[i].data = src->fbInfoList[i].data; + } + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_FB_GET_INFO_V2_PARAMS_v2B_00(NV2080_CTRL_FB_GET_INFO_V2_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_FB_GET_INFO_V2_PARAMS *src = pParams; + NV2080_CTRL_FB_GET_INFO_V2_PARAMS_v2B_00 *dest = (void*)(buffer); + + if (src && dest) + { + NvU32 i; + + if ((src->fbInfoListSize == 0) || + (src->fbInfoListSize > NV2080_CTRL_FB_INFO_MAX_LIST_SIZE_2B_00)) { + return FAILURE_T; + } + + dest->fbInfoListSize = src->fbInfoListSize; + + for (i = 0; i < src->fbInfoListSize; i++) { + dest->fbInfoList[i].index = src->fbInfoList[i].index; + dest->fbInfoList[i].data = src->fbInfoList[i].data; + } + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_FB_GET_INFO_V2_PARAMS_v2B_00(NV2080_CTRL_FB_GET_INFO_V2_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_FB_GET_INFO_V2_PARAMS_v2B_00 *src = (void*)(buffer); + NV2080_CTRL_FB_GET_INFO_V2_PARAMS *dest = pParams; + + if (src && dest) + { + NvU32 i; + + if ((src->fbInfoListSize == 0) || + (src->fbInfoListSize > NV2080_CTRL_FB_INFO_MAX_LIST_SIZE_2B_00)) { + return FAILURE_T; + } + + dest->fbInfoListSize = src->fbInfoListSize; + + for (i = 0; i < src->fbInfoListSize; i++) { + dest->fbInfoList[i].index = src->fbInfoList[i].index; + dest->fbInfoList[i].data = src->fbInfoList[i].data; + } + } + else + return FAILURE_T; + + return SUCCESS_T; +} +#endif // defined(BUILD_COMMON_RPCS) + +#if defined(BUILD_COMMON_RPCS) && !defined(UMED_BUILD) +static +return_t serialize_NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS_v28_03(NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS *src = pParams; + NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS_v28_03 *dest = (void*)(buffer); + + if (src && dest) + { + dest->allocatedSize = src->allocatedSize; + dest->peakAllocatedSize = src->peakAllocatedSize; + dest->managedSize = src->managedSize; + dest->allocationCount = src->allocationCount; + dest->peakAllocationCount = src->peakAllocationCount; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS_v28_03(NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS_v28_03 *src = (void*)(buffer); + NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS *dest = pParams; + + if (src && dest) + { + dest->allocatedSize = src->allocatedSize; + dest->peakAllocatedSize = src->peakAllocatedSize; + dest->managedSize = src->managedSize; + dest->allocationCount = src->allocationCount; + dest->peakAllocationCount = src->peakAllocationCount; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS_v28_06(NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS *src = pParams; + NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS_v28_06 *dest = (void*)(buffer); + + if (src && dest) + { + dest->allocatedSize = src->allocatedSize; + dest->peakAllocatedSize = src->peakAllocatedSize; + dest->managedSize = src->managedSize; + dest->allocationCount = src->allocationCount; + dest->peakAllocationCount = src->peakAllocationCount; + dest->largestFreeChunkSize = src->largestFreeChunkSize; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS_v28_06(NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS_v28_06 *src = (void*)(buffer); + NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS *dest = pParams; + + if (src && dest) + { + dest->allocatedSize = src->allocatedSize; + dest->peakAllocatedSize = src->peakAllocatedSize; + dest->managedSize = src->managedSize; + dest->allocationCount = src->allocationCount; + dest->peakAllocationCount = src->peakAllocationCount; + dest->largestFreeChunkSize = src->largestFreeChunkSize; + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +return_t serialize_NV2080_CTRL_CMD_GSP_GET_LIBOS_HEAP_STATS_PARAMS_v29_02(NV2080_CTRL_CMD_GSP_GET_LIBOS_HEAP_STATS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_CMD_GSP_GET_LIBOS_HEAP_STATS_PARAMS *src = pParams; + NV2080_CTRL_CMD_GSP_GET_LIBOS_HEAP_STATS_PARAMS_v29_02 *dest = (void*)(buffer); + NvU8 i; + if (src && dest && (src->poolCount <= NV2080_CTRL_GSP_LIBOS_POOL_COUNT_MAX_v29_02)) + { + dest->poolCount = src->poolCount; + dest->totalHeapSize= src->totalHeapSize; + for (i = 0; i < dest->poolCount; ++i) + { + portMemCopy(&dest->poolStats[i], sizeof(dest->poolStats[i]), &src->poolStats[i], sizeof(src->poolStats[i])); + } + } + else + return FAILURE_T; + return SUCCESS_T; +} +static +return_t deserialize_NV2080_CTRL_CMD_GSP_GET_LIBOS_HEAP_STATS_PARAMS_v29_02(NV2080_CTRL_CMD_GSP_GET_LIBOS_HEAP_STATS_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NV2080_CTRL_CMD_GSP_GET_LIBOS_HEAP_STATS_PARAMS_v29_02 *src = (void*)(buffer); + NV2080_CTRL_CMD_GSP_GET_LIBOS_HEAP_STATS_PARAMS *dest = pParams; + NvU8 i; + if (src && dest && (src->poolCount <= NV2080_CTRL_GSP_LIBOS_POOL_COUNT_MAX_v29_02)) + { + dest->poolCount = src->poolCount; + dest->totalHeapSize= src->totalHeapSize; + for (i = 0; i < dest->poolCount; ++i) + { + portMemCopy(&dest->poolStats[i], sizeof(dest->poolStats[i]), &src->poolStats[i], sizeof(src->poolStats[i])); + } + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NVB0CC_CTRL_GET_CHIPLET_HS_CREDIT_POOL_v29_0A(NVB0CC_CTRL_GET_CHIPLET_HS_CREDIT_POOL *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_GET_CHIPLET_HS_CREDIT_POOL *src = pParams; + NVB0CC_CTRL_GET_CHIPLET_HS_CREDIT_POOL_v29_0A *dest = (void*)(buffer); + + if (src && dest && (src->poolInfosCount <= NVB0CC_CREDIT_POOL_MAX_COUNT_v29_0A)) + { + NvU32 i; + dest->poolInfosCount = src->poolInfosCount; + for (i = 0; i < dest->poolInfosCount; ++i) + { + dest->poolInfos[i].numCredits = src->poolInfos[i].numCredits; + dest->poolInfos[i].poolIndex = src->poolInfos[i].poolIndex; + dest->poolInfos[i].chipletType = src->poolInfos[i].chipletType; + } + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NVB0CC_CTRL_GET_CHIPLET_HS_CREDIT_POOL_v29_0A(NVB0CC_CTRL_GET_CHIPLET_HS_CREDIT_POOL *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_GET_CHIPLET_HS_CREDIT_POOL_v29_0A *src = (void*)(buffer); + NVB0CC_CTRL_GET_CHIPLET_HS_CREDIT_POOL *dest = pParams; + + if (src && dest && (src->poolInfosCount <= NVB0CC_CREDIT_POOL_MAX_COUNT_v29_0A)) + { + NvU32 i; + dest->poolInfosCount = src->poolInfosCount; + for (i = 0; i < dest->poolInfosCount; ++i) + { + dest->poolInfos[i].numCredits = src->poolInfos[i].numCredits; + dest->poolInfos[i].poolIndex = src->poolInfos[i].poolIndex; + dest->poolInfos[i].chipletType = src->poolInfos[i].chipletType; + } + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t serialize_NVB0CC_CTRL_GET_HS_CREDITS_POOL_MAPPING_PARAMS_v29_0A(NVB0CC_CTRL_GET_HS_CREDITS_POOL_MAPPING_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_GET_HS_CREDITS_POOL_MAPPING_PARAMS *src = pParams; + NVB0CC_CTRL_GET_HS_CREDITS_POOL_MAPPING_PARAMS_v29_0A *dest = (void*)(buffer); + NvU16 i; + + if (src && dest && (src->numQueries <= NVB0CC_MAX_CREDIT_INFO_ENTRIES_v21_08)) + { + dest->numQueries = src->numQueries; + dest->statusInfo.status = src->statusInfo.status; + dest->statusInfo.entryIndex = src->statusInfo.entryIndex; + + for (i = 0; i < dest->numQueries; ++i) + { + dest->queries[i].chipletType = src->queries[i].chipletType; + dest->queries[i].chipletIndex = src->queries[i].chipletIndex; + dest->queries[i].poolIndex = src->queries[i].poolIndex; + } + } + else + return FAILURE_T; + + return SUCCESS_T; +} + +static +return_t deserialize_NVB0CC_CTRL_GET_HS_CREDITS_POOL_MAPPING_PARAMS_v29_0A(NVB0CC_CTRL_GET_HS_CREDITS_POOL_MAPPING_PARAMS *pParams, + NvU8 *buffer, + NvU32 bufferSize, + NvU32 *offset) +{ + NVB0CC_CTRL_GET_HS_CREDITS_POOL_MAPPING_PARAMS_v29_0A *src = (void*)(buffer); + NVB0CC_CTRL_GET_HS_CREDITS_POOL_MAPPING_PARAMS *dest = pParams; + NvU16 i; + + if (src && dest && (src->numQueries <= NVB0CC_MAX_CREDIT_INFO_ENTRIES_v21_08)) + { + dest->numQueries = src->numQueries; + dest->statusInfo.status = src->statusInfo.status; + dest->statusInfo.entryIndex = src->statusInfo.entryIndex; + + for (i = 0; i < dest->numQueries; ++i) + { + dest->queries[i].chipletType = src->queries[i].chipletType; + dest->queries[i].chipletIndex = src->queries[i].chipletIndex; + dest->queries[i].poolIndex = src->queries[i].poolIndex; + } + } + else + return FAILURE_T; + return SUCCESS_T; +} +#endif +#ifdef BUILD_COMMON_RPCS + +static NV_STATUS static_data_copy(OBJRPCSTRUCTURECOPY *pObjRpcStructureCopy, + VGPU_STATIC_INFO *pVSI, + NvU8 *buffer, NvU32 bufferSize, NvU32 *offset, + NvBool bAlignOffset) +{ + NV_STATUS status = NVOS_STATUS_SUCCESS; + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NVC637_CTRL_EXEC_PARTITIONS_GET_PARAMS)(pObjRpcStructureCopy, + (NVC637_CTRL_EXEC_PARTITIONS_GET_PARAMS *)&pVSI->execPartitionInfo, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(VGPU_STATIC_DATA)(pObjRpcStructureCopy, + pVSI, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS)(pObjRpcStructureCopy, + &pVSI->eccStatus, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV0000_CTRL_SYSTEM_GET_VGX_SYSTEM_INFO_PARAMS)(pObjRpcStructureCopy, + &pVSI->vgxSystemInfo, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_GPU_GET_GID_INFO_PARAMS)(pObjRpcStructureCopy, + &pVSI->gidInfo, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS)(pObjRpcStructureCopy, + &pVSI->SKUInfo, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS)(pObjRpcStructureCopy, + &pVSI->fbRegionInfoParams, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_GPU_GET_COMPUTE_PROFILES_PARAMS)(pObjRpcStructureCopy, + &pVSI->ciProfiles, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV9096_CTRL_GET_ZBC_CLEAR_TABLE_SIZE_PARAMS)(pObjRpcStructureCopy, + &pVSI->zbcTableSizes[0], + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_FB_GET_LTC_INFO_FOR_FBP_PARAMS)(pObjRpcStructureCopy, + NULL, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS_PARAMS)(pObjRpcStructureCopy, + &pVSI->mcEngineNotificationIntrVectors, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + // Unused + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_GR_GET_ZCULL_INFO_PARAMS)(pObjRpcStructureCopy, + &pVSI->grZcullInfo, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(VGPU_STATIC_PROPERTIES)(pObjRpcStructureCopy, + &pVSI->vgpuStaticProperties, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(VGPU_FIFO_GET_DEVICE_INFO_TABLE)(pObjRpcStructureCopy, + &pVSI->fifoDeviceInfoTable, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(VGPU_FB_GET_DYNAMIC_BLACKLISTED_PAGES)(pObjRpcStructureCopy, + &pVSI->fbDynamicBlacklistedPages, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(VGPU_BSP_GET_CAPS)(pObjRpcStructureCopy, + &pVSI->vgpuBspCaps, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(VGPU_GET_LATENCY_BUFFER_SIZE)(pObjRpcStructureCopy, + &pVSI->fifoLatencyBufferSize, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(VGPU_CE_GET_CAPS_V2)(pObjRpcStructureCopy, + &pVSI->ceCaps, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS_PARAMS)(pObjRpcStructureCopy, + &pVSI->nvlinkCaps, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(VGPU_FB_GET_LTC_INFO_FOR_FBP)(pObjRpcStructureCopy, + &pVSI->fbLtcInfoForFbp, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_BUS_GET_INFO_V2_PARAMS)(pObjRpcStructureCopy, + &pVSI->busGetInfoV2, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_FLA_GET_RANGE_PARAMS)(pObjRpcStructureCopy, + &pVSI->flaInfo, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NVA080_CTRL_VGPU_GET_CONFIG_PARAMS)(pObjRpcStructureCopy, + &pVSI->vgpuConfig, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + // Unused + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS)(pObjRpcStructureCopy, + &pVSI->grSmIssueRateModifier, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + // Unused + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_V2_PARAMS)(pObjRpcStructureCopy, + &pVSI->grSmIssueRateModifierV2, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_PARAMS)(pObjRpcStructureCopy, + &pVSI->mcStaticIntrTable, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_CMD_BUS_GET_PCIE_SUPPORTED_GPU_ATOMICS_PARAMS)(pObjRpcStructureCopy, + &pVSI->pcieSupportedGpuAtomics, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS)(pObjRpcStructureCopy, + &pVSI->ceGetAllCaps, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_CMD_BUS_GET_C2C_INFO_PARAMS)(pObjRpcStructureCopy, + &pVSI->c2cInfo, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV0080_CTRL_MSENC_GET_CAPS_V2_PARAMS)(pObjRpcStructureCopy, + &pVSI->nvencCaps, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS)(pObjRpcStructureCopy, + &pVSI->constructedFalconInfo, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(VGPU_P2P_CAPABILITY_PARAMS)(pObjRpcStructureCopy, + &pVSI->p2pCaps, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS)(pObjRpcStructureCopy, + &pVSI->deviceInfoTable, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_INTERNAL_MEMSYS_GET_STATIC_CONFIG_PARAMS)(pObjRpcStructureCopy, + &pVSI->memsysStaticConfig, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_PARAMS)(pObjRpcStructureCopy, + &pVSI->busGetPcieReqAtomicsCaps, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(GPU_PARTITION_INFO)(pObjRpcStructureCopy, + &pVSI->gpuPartitionInfo, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + if (getIpVersion() < 0x25130000) { + goto end; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV90E6_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK_PARAMS)(pObjRpcStructureCopy, + &pVSI->masterGetVfErrCntIntMsk, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + if (getIpVersion() < 0x26010000) { + goto end; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(GPU_EXEC_SYSPIPE_INFO)(pObjRpcStructureCopy, + &pVSI->execSyspipeInfo, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + if (getIpVersion() < 0x29050000) { + goto end; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_INTERNAL_CCU_SAMPLE_INFO_PARAMS)(pObjRpcStructureCopy, + &pVSI->ccuSampleInfo, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + + if (getIpVersion() < 0x2B0A0000) { + goto end; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_MC_GET_INTR_CATEGORY_SUBTREE_MAP_PARAMS)( + pObjRpcStructureCopy, + &pVSI->intrCategorySubtreeMapParams, + buffer, + bufferSize, + offset); + + if (status != NVOS_STATUS_SUCCESS) { + return status; + } + +end: + return status; +} + +static NV_STATUS consolidated_gr_static_info_copy(OBJRPCSTRUCTURECOPY *pObjRpcStructureCopy, + VGPU_STATIC_INFO *pVSI, + NvU8 *buffer, NvU32 bufferSize, NvU32 *offset, + NvBool bAlignOffset) +{ + NV_STATUS status = NVOS_STATUS_SUCCESS; + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_INTERNAL_STATIC_GR_GET_INFO_PARAMS)(pObjRpcStructureCopy, + &pVSI->grInfoParams, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + goto end; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_INTERNAL_STATIC_GR_GET_GLOBAL_SM_ORDER_PARAMS)(pObjRpcStructureCopy, + &pVSI->globalSmOrder, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + goto end; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_INTERNAL_STATIC_GR_GET_ROP_INFO_PARAMS)(pObjRpcStructureCopy, + &pVSI->ropInfoParams, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + goto end; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_INTERNAL_STATIC_GR_GET_PPC_MASKS_PARAMS)(pObjRpcStructureCopy, + &pVSI->ppcMaskParams, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + goto end; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS)(pObjRpcStructureCopy, + &pVSI->ctxBuffInfo, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + goto end; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_INTERNAL_STATIC_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS)(pObjRpcStructureCopy, + &pVSI->smIssueRateModifier, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + goto end; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_INTERNAL_STATIC_GR_GET_SM_ISSUE_RATE_MODIFIER_V2_PARAMS)(pObjRpcStructureCopy, + &pVSI->smIssueRateModifierV2, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + goto end; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_INTERNAL_STATIC_GR_GET_FLOORSWEEPING_MASKS_PARAMS)(pObjRpcStructureCopy, + &pVSI->floorsweepMaskParams, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + goto end; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_INTERNAL_STATIC_GR_GET_ZCULL_INFO_PARAMS)(pObjRpcStructureCopy, + &pVSI->zcullInfoParams, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + goto end; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE_PARAMS)(pObjRpcStructureCopy, + &pVSI->fecsRecordSize, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + goto end; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES_PARAMS)(pObjRpcStructureCopy, + &pVSI->fecsTraceDefines, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + goto end; + } + + NV_CHECK_AND_ALIGN_OFFSET(*offset, bAlignOffset) + status = serialize_deserialize(NV2080_CTRL_INTERNAL_STATIC_GR_GET_PDB_PROPERTIES_PARAMS)(pObjRpcStructureCopy, + &pVSI->pdbTableParams, + buffer, + bufferSize, + offset); + if (status != NVOS_STATUS_SUCCESS) { + goto end; + } + +end: + return status; +} +#endif + +#if defined(CROSS_BRANCH_CONVERT) + +typedef struct { + NvU32 outType; +} GUEST_HOST_MAPPING; + +/* + * guest_to_host_enginetype_v1A_00 is used to convert engine type from r460 to + * r470. refer sdk/nvidia/inc/class/cl2080.h for definition. + */ +const GUEST_HOST_MAPPING guest_to_host_enginetype_v1A_00[] = +{ +// Out Engine Type In Engine Type + { NV2080_ENGINE_TYPE_NULL }, // 0x0 + { NV2080_ENGINE_TYPE_GR0 }, // 0x1 + { NV2080_ENGINE_TYPE_GR1 }, // 0x2 + { NV2080_ENGINE_TYPE_GR2 }, // 0x3 + { NV2080_ENGINE_TYPE_GR3 }, // 0x4 + { NV2080_ENGINE_TYPE_GR4 }, // 0x5 + { NV2080_ENGINE_TYPE_GR5 }, // 0x6 + { NV2080_ENGINE_TYPE_GR6 }, // 0x7 + { NV2080_ENGINE_TYPE_GR7 }, // 0x8 + { NV2080_ENGINE_TYPE_COPY0 }, // 0x9 + { NV2080_ENGINE_TYPE_COPY1 }, // 0xa + { NV2080_ENGINE_TYPE_COPY2 }, // 0xb + { NV2080_ENGINE_TYPE_COPY3 }, // 0xc + { NV2080_ENGINE_TYPE_COPY4 }, // 0xd + { NV2080_ENGINE_TYPE_COPY5 }, // 0xe + { NV2080_ENGINE_TYPE_COPY6 }, // 0xf + { NV2080_ENGINE_TYPE_COPY7 }, // 0x10 + { NV2080_ENGINE_TYPE_COPY8 }, // 0x11 + { NV2080_ENGINE_TYPE_COPY9 }, // 0x12 + { NV2080_ENGINE_TYPE_NVDEC0 }, // 0x13 + { NV2080_ENGINE_TYPE_NVDEC1 }, // 0x14 + { NV2080_ENGINE_TYPE_NVDEC2 }, // 0x15 + { NV2080_ENGINE_TYPE_NVDEC3 }, // 0x16 + { NV2080_ENGINE_TYPE_NVDEC4 }, // 0x17 + { NV2080_ENGINE_TYPE_NVENC0 }, // 0x18 + { NV2080_ENGINE_TYPE_NVENC1 }, // 0x19 + { NV2080_ENGINE_TYPE_NVENC2 }, // 0x1a + { NV2080_ENGINE_TYPE_VP }, // 0x1b + { NV2080_ENGINE_TYPE_ME }, // 0x1c + { NV2080_ENGINE_TYPE_PPP }, // 0x1d + { NV2080_ENGINE_TYPE_MPEG }, // 0x1e + { NV2080_ENGINE_TYPE_SW }, // 0x1f + { NV2080_ENGINE_TYPE_CIPHER }, // 0x20 + { NV2080_ENGINE_TYPE_VIC }, // 0x21 + { NV2080_ENGINE_TYPE_MP }, // 0x22 + { NV2080_ENGINE_TYPE_SEC2 }, // 0x23 + { NV2080_ENGINE_TYPE_HOST }, // 0x24 + { NV2080_ENGINE_TYPE_DPU }, // 0x25 + { NV2080_ENGINE_TYPE_PMU }, // 0x26 + { NV2080_ENGINE_TYPE_FBFLCN }, // 0x27 + { NV2080_ENGINE_TYPE_NVJPG }, // 0x28 + { NV2080_ENGINE_TYPE_OFA0 }, // 0x29 + { NV2080_ENGINE_TYPE_LAST_v1A_00 }, // 0x2a +}; + +// Convert a guest engineType to a host engineType. +NvU32 deserialize_engineType(NvU32 inEngineType) +{ + + if (g_vmiopd.vgx_internal_version_curr.major_number >= 0x1B) { + return inEngineType; + } + if (inEngineType >= 0x2a) { + return NV2080_ENGINE_TYPE_NULL; + } + return guest_to_host_enginetype_v1A_00[inEngineType].outType; +} + +/* + * host_to_guest_enginetype_v1A_00 is used to convert engine type from r470 to + * r460. refer sdk/nvidia/inc/class/cl2080.h for definition. + * unsupported/invalid engines are returned with NV2080_ENGINE_TYPE_NULL (this + * is considered invalid in RM) + */ +const GUEST_HOST_MAPPING host_to_guest_enginetype_v1A_00[] = +{ +// Out Engine Type In Engine Type + { 0x0 }, // NV2080_ENGINE_TYPE_NULL + { 0x1 }, // NV2080_ENGINE_TYPE_GR0 + { 0x2 }, // NV2080_ENGINE_TYPE_GR1 + { 0x3 }, // NV2080_ENGINE_TYPE_GR2 + { 0x4 }, // NV2080_ENGINE_TYPE_GR3 + { 0x5 }, // NV2080_ENGINE_TYPE_GR4 + { 0x6 }, // NV2080_ENGINE_TYPE_GR5 + { 0x7 }, // NV2080_ENGINE_TYPE_GR6 + { 0x8 }, // NV2080_ENGINE_TYPE_GR7 + { 0x9 }, // NV2080_ENGINE_TYPE_COPY0 + { 0xa }, // NV2080_ENGINE_TYPE_COPY1 + { 0xb }, // NV2080_ENGINE_TYPE_COPY2 + { 0xc }, // NV2080_ENGINE_TYPE_COPY3 + { 0xd }, // NV2080_ENGINE_TYPE_COPY4 + { 0xe }, // NV2080_ENGINE_TYPE_COPY5 + { 0xf }, // NV2080_ENGINE_TYPE_COPY6 + { 0x10 }, // NV2080_ENGINE_TYPE_COPY7 + { 0x11 }, // NV2080_ENGINE_TYPE_COPY8 + { 0x12 }, // NV2080_ENGINE_TYPE_COPY9 + { 0x13 }, // NV2080_ENGINE_TYPE_NVDEC0 + { 0x14 }, // NV2080_ENGINE_TYPE_NVDEC1 + { 0x15 }, // NV2080_ENGINE_TYPE_NVDEC2 + { 0x16 }, // NV2080_ENGINE_TYPE_NVDEC3 + { 0x17 }, // NV2080_ENGINE_TYPE_NVDEC4 + { NV2080_ENGINE_TYPE_NULL }, // NV2080_ENGINE_TYPE_NVDEC5 + { NV2080_ENGINE_TYPE_NULL }, // NV2080_ENGINE_TYPE_NVDEC6 + { NV2080_ENGINE_TYPE_NULL }, // NV2080_ENGINE_TYPE_NVDEC7 + { 0x18 }, // NV2080_ENGINE_TYPE_NVENC0 + { 0x19 }, // NV2080_ENGINE_TYPE_NVENC1 + { 0x1a }, // NV2080_ENGINE_TYPE_NVENC2 + { 0x1b }, // NV2080_ENGINE_TYPE_VP + { 0x1c }, // NV2080_ENGINE_TYPE_ME + { 0x1d }, // NV2080_ENGINE_TYPE_PPP + { 0x1e }, // NV2080_ENGINE_TYPE_MPEG + { 0x1f }, // NV2080_ENGINE_TYPE_SW + { 0x20 }, // NV2080_ENGINE_TYPE_CIPHER + { 0x21 }, // NV2080_ENGINE_TYPE_VIC + { 0x22 }, // NV2080_ENGINE_TYPE_MP + { 0x23 }, // NV2080_ENGINE_TYPE_SEC2 + { 0x24 }, // NV2080_ENGINE_TYPE_HOST + { 0x25 }, // NV2080_ENGINE_TYPE_DPU + { 0x26 }, // NV2080_ENGINE_TYPE_PMU + { 0x27 }, // NV2080_ENGINE_TYPE_FBFLCN + { 0x28 }, // NV2080_ENGINE_TYPE_NVJPEG0 + { NV2080_ENGINE_TYPE_NULL }, // NV2080_ENGINE_TYPE_NVJPEG1 + { NV2080_ENGINE_TYPE_NULL }, // NV2080_ENGINE_TYPE_NVJPEG2 + { NV2080_ENGINE_TYPE_NULL }, // NV2080_ENGINE_TYPE_NVJPEG3 + { NV2080_ENGINE_TYPE_NULL }, // NV2080_ENGINE_TYPE_NVJPEG4 + { NV2080_ENGINE_TYPE_NULL }, // NV2080_ENGINE_TYPE_NVJPEG5 + { NV2080_ENGINE_TYPE_NULL }, // NV2080_ENGINE_TYPE_NVJPEG6 + { NV2080_ENGINE_TYPE_NULL }, // NV2080_ENGINE_TYPE_NVJPEG7 + { 0x29 }, // NV2080_ENGINE_TYPE_OFA0 + { 0x2a }, // NV2080_ENGINE_TYPE_LAST +}; + +// Convert a host engineType to a guest engineType. +NvU32 serialize_engineType(NvU32 inEngineType) +{ + + if (g_vmiopd.vgx_internal_version_curr.major_number >= 0x1B) { + return inEngineType; + } + if (inEngineType >= 0x34) { + return NV2080_ENGINE_TYPE_NULL; + } + return host_to_guest_enginetype_v1A_00[inEngineType].outType; +} + +// Convert a host notifier index to a guest notifier index. +NvU32 serialize_notifier(NvU32 inNotifier) +{ + if (g_vmiopd.vgx_internal_version_curr.major_number > 0x21) { + return inNotifier; + } + + if (g_vmiopd.vgx_internal_version_curr.major_number == 0x21 && + (REF_VAL(NV0005_NOTIFY_INDEX_INDEX, inNotifier) >= NV2080_NOTIFIERS_MAXCOUNT_R525)) { + return NV2080_NOTIFIERS_MAXCOUNT_R525; + } + + if (g_vmiopd.vgx_internal_version_curr.major_number == 0x1C && + (REF_VAL(NV0005_NOTIFY_INDEX_INDEX, inNotifier) >= NV2080_NOTIFIERS_MAXCOUNT_R470)) { + return NV2080_NOTIFIERS_MAXCOUNT_R470; + } + + return inNotifier; +} + +// Convert a guest notifier index to a host notifier index. +NvU32 deserialize_notifier(NvU32 inNotifier) +{ + if (g_vmiopd.vgx_internal_version_curr.major_number > 0x21) { + return inNotifier; + } + + if (g_vmiopd.vgx_internal_version_curr.major_number == 0x21 && + (REF_VAL(NV0005_NOTIFY_INDEX_INDEX, inNotifier) >= NV2080_NOTIFIERS_MAXCOUNT_R525)) { + return NV2080_NOTIFIERS_MAXCOUNT; + } + + if (g_vmiopd.vgx_internal_version_curr.major_number == 0x1C && + (REF_VAL(NV0005_NOTIFY_INDEX_INDEX, inNotifier) >= NV2080_NOTIFIERS_MAXCOUNT_R470)) { + return NV2080_NOTIFIERS_MAXCOUNT; + } + + return inNotifier; +} + +/* + * rcmask table is used to convert ROBUST_CHANNEL_* macros from r470 to r460. + * eg, ROBUST_CHANNEL_COPY0_ENGINE is defined as 22 in r470 and 19 in r460. + */ +const GUEST_HOST_MAPPING host_to_guest_rcmask_v1A_00[] = +{ +// Out rcmask // In rcmask + { 0 }, // 0 + { 1 }, // 1 + { 2 }, // 2 + { 3 }, // 3 + { 4 }, // 4 + { 5 }, // 5 + { 6 }, // 6 + { 7 }, // 7 + { 8 }, // 8 + { 9 }, // 9 + { 10 }, // 10 + { 11 }, // 11 + { 12 }, // 12 + { 13 }, // 13 + { 14 }, // 14 + { 15 }, // 15 + { 16 }, // 16 + { 17 }, // 17 + { 18 }, // 18 + { 0 }, // 19 + { 0 }, // 20 + { 0 }, // 21 + { 19 }, // 22 + { 20 }, // 23 + { 21 }, // 24 + { 22 }, // 25 + { 23 }, // 26 + { 24 }, // 27 + { 25 }, // 28 + { 26 }, // 29 + { 27 }, // 30 + { 28 }, // 31 + { 29 }, // 32 + { 0 }, // 33 + { 0 }, // 34 + { 0 }, // 35 + { 0 }, // 36 + { 0 }, // 37 + { 0 }, // 38 + { 0 }, // 39 + { 30 }, // 40 + { 31 }, // 41 + { 32 }, // 42 + { 33 }, // 43 + { 34 }, // 44 + { 35 }, // 45 + { 36 }, // 46 + { 37 }, // 47 + { 38 }, // 48 + { 39 }, // 49 + +}; + +// Convert a host rcmask to a guest rcmask. +NvU32 serialize_rcmask(NvU32 inType) +{ + + if (g_vmiopd.vgx_internal_version_curr.major_number >= 0x1B) { + return inType; + } + if (inType > 49) { + return 0; + } + return host_to_guest_rcmask_v1A_00[inType].outType; +} + +typedef struct { + NvU32 inType; + NvU32 outType; +} GUEST_HOST_MAPPING_TUPLE; + +/* + * mcbit table is used to convert MC_ENGINE_IDX_* macros from r470 to r460. + * Please refer engine_idx.h for definition. + * eg, MC_ENGINE_IDX_BSP is defined as 56 in r470 and 49 in r460. + */ +const GUEST_HOST_MAPPING_TUPLE host_to_guest_mcbit_v1A_00[] = +{ +// Host mcbit Guest mcbit + { 0, 0 }, + { 1, 1 }, + { 2, 2 }, + { 3, 3 }, + { 4, 4 }, + { 5, 5 }, + { 6, 6 }, + { 7, 7 }, + { 8, 8 }, + { 9, 9 }, + { 10, 10 }, + { 11, 11 }, + { 12, 12 }, + { 13, 13 }, + { 14, 14 }, + { 15, 15 }, + { 16, 16 }, + { 17, 17 }, + { 18, 18 }, + { 19, 19 }, + { 20, 20 }, + { 21, 21 }, + { 22, 22 }, + { 23, 23 }, + { 24, 24 }, + { 25, 25 }, + { 26, 26 }, + { 27, 27 }, + { 28, 28 }, + { 29, 30 }, + { 30, 31 }, + { 31, 32 }, + { 32, 0 }, + { 33, 0 }, + { 34, 34 }, + { 35, 35 }, + { 36, 36 }, + { 37, 37 }, + { 38, 38 }, + { 39, 0 }, + { 40, 40 }, + { 41, 41 }, + { 42, 42 }, + { 43, 0 }, + { 44, 0 }, + { 45, 0 }, + { 46, 0 }, + { 47, 0 }, + { 48, 0 }, + { 49, 0 }, + { 50, 43 }, + { 51, 44 }, + { 52, 45 }, + { 53, 46 }, + { 54, 47 }, + { 55, 48 }, + { 56, 49 }, + { 57, 50 }, + { 58, 51 }, + { 59, 52 }, + { 60, 53 }, + { 61, 0 }, + { 62, 0 }, + { 63, 0 }, + { 64, 54 }, + { 65, 55 }, + { 66, 56 }, + { 67, 57 }, + { 68, 58 }, + { 69, 59 }, + { 70, 60 }, + { 71, 61 }, + { 72, 62 }, + { 73, 63 }, + { 74, 64 }, + { 75, 65 }, + { 76, 66 }, + { 77, 67 }, + { 78, 68 }, + { 79, 69 }, + { 80, 70 }, + { 81, 71 }, + { 82, 72 }, + { 146, 104 }, + { 147, 105 }, + { 148, 106 }, + { 149, 107 }, + { 150, 108 }, + { 151, 109 }, + { 152, 110 }, + { 153, 111 }, + { 154, 112 }, + { 155, 113 }, +}; + +NvU32 get_index(NvU32 inType) { + NvU32 index = 0, l = 0; + NvU32 r = (sizeof(host_to_guest_mcbit_v1A_00) / (sizeof(NvU32) * 2)) - 1; + + do { + index = (l + r) / 2; + if (inType == host_to_guest_mcbit_v1A_00[index].inType) { + break; + } + if (r <= l) { + index = 0; + break; + } + if (inType > host_to_guest_mcbit_v1A_00[index].inType) { + l = index + 1; + continue; + } + if (inType < host_to_guest_mcbit_v1A_00[index].inType) { + r = index - 1; + continue; + } + } while(1); + + return index; +} + +// Convert a host rcmask to a guest rcmask. +NvU32 serialize_mcbit(NvU32 inType) +{ + + if (g_vmiopd.vgx_internal_version_curr.major_number >= 0x1B) { + return inType; + } + if (inType > 155) { + return 0; + } + return host_to_guest_mcbit_v1A_00[get_index(inType)].outType; +} + +#endif +#undef COPY_INPUT_PARAMETERS +#undef COPY_OUTPUT_PARAMETERS diff --git a/src/nvidia/inc/kernel/vgpu/rpc.h b/src/nvidia/inc/kernel/vgpu/rpc.h new file mode 100644 index 0000000..815d139 --- /dev/null +++ b/src/nvidia/inc/kernel/vgpu/rpc.h @@ -0,0 +1,555 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +//****************************************************************************** +// +// Declarations for the RPC module. +// +// Description: +// This module declares the RPC interface functions/macros. +// +//****************************************************************************** + +#ifndef __vgpu_dev_nv_rpc_h__ +#define __vgpu_dev_nv_rpc_h__ + +#include "class/cl84a0.h" +#include "vgpu/rpc_headers.h" +#include "gpu/dce_client/dce_client.h" +#include "gpu/rpc/objrpc.h" +#include "rpc_vgpu.h" + +#define KERNEL_PID (0xFFFFFFFFULL) + +typedef struct ContextDma ContextDma; + +#define NV_RM_RPC_ALLOC_SHARE_DEVICE_FWCLIENT(pGpu, hclient, hdevice, hclientshare, htargetclient, htargetdevice, hclass, \ + allocflags, vasize, vamode, bFirstDevice, status) \ + do \ + { \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + NV0000_ALLOC_PARAMETERS root_alloc_params = {0}; \ + \ + root_alloc_params.hClient = hclient; \ + \ + if (!IsT234DorBetter(pGpu)) \ + { \ + RmClient *pClient = serverutilGetClientUnderLock(hclient); \ + \ + /* Get process ID from the client database */ \ + if (pClient != NULL) \ + { \ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); \ + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); \ + \ + if (RMCFG_FEATURE_PLATFORM_UNIX && \ + (pCallContext->secInfo.privLevel >= RS_PRIV_LEVEL_KERNEL)) \ + { \ + root_alloc_params.processID = KERNEL_PID; \ + } \ + else \ + { \ + root_alloc_params.processID = pClient->ProcID; \ + NV_ASSERT(root_alloc_params.processID == osGetCurrentProcess()); \ + root_alloc_params.pOsPidInfo = (NvP64)(pClient->pOsPidInfo); \ + } \ + } \ + else \ + NV_ASSERT(0); \ + } \ + \ + if (bFirstDevice) \ + { \ + status = pRmApi->AllocWithHandle(pRmApi, hclient, NV01_NULL_OBJECT, \ + NV01_NULL_OBJECT, NV01_ROOT, \ + &root_alloc_params, sizeof(root_alloc_params)); \ + } \ + else \ + { \ + status = NV_OK; \ + } \ + \ + if (status == NV_OK) \ + { \ + NV0080_ALLOC_PARAMETERS device_alloc_params = {0}; \ + \ + device_alloc_params.hClientShare = hclientshare; \ + device_alloc_params.hTargetClient = htargetclient; \ + device_alloc_params.hTargetDevice = htargetdevice; \ + device_alloc_params.flags = allocflags; \ + device_alloc_params.vaSpaceSize = vasize; \ + \ + status = pRmApi->AllocWithHandle(pRmApi, hclient, hclient, hdevice, \ + hclass, &device_alloc_params, \ + sizeof(device_alloc_params)); \ + } \ + else \ + NV_ASSERT(0); \ + } \ + while (0) + +#define NV_RM_RPC_ALLOC_MEMORY(pGpu, hclient, hdevice, hmemory, hclass, \ + flags, pmemdesc, status) \ + do \ + { \ + OBJRPC *pRpc; \ + pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL) \ + && (!(IS_VIRTUAL_WITH_SRIOV(pGpu) && \ + !gpuIsWarBug200577889SriovHeavyEnabled(pGpu) && \ + !NV_IS_MODS))) { \ + if (IS_DCE_CLIENT(pGpu)) \ + { \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + NV_MEMORY_LIST_ALLOCATION_PARAMS listAllocParams = {0}; \ + listAllocParams.pteAdjust = pmemdesc->PteAdjust; \ + listAllocParams.format = memdescGetPteKind(pmemdesc); \ + listAllocParams.size = pmemdesc->Size; \ + listAllocParams.pageCount = pmemdesc->PageCount; \ + listAllocParams.pageNumberList = memdescGetPteArray(pmemdesc, AT_GPU); \ + listAllocParams.hClient = NV01_NULL_OBJECT; \ + listAllocParams.hParent = NV01_NULL_OBJECT; \ + listAllocParams.hObject = NV01_NULL_OBJECT; \ + listAllocParams.limit = pmemdesc->Size - 1; \ + listAllocParams.flagsOs02 = (DRF_DEF(OS02,_FLAGS,_MAPPING,_NO_MAP) | \ + DRF_DEF(OS02,_FLAGS,_PHYSICALITY,_NONCONTIGUOUS) | \ + (flags & DRF_SHIFTMASK(NVOS02_FLAGS_COHERENCY))); \ + status = pRmApi->AllocWithHandle(pRmApi, hclient, hdevice, \ + hmemory, NV01_MEMORY_LIST_SYSTEM, &listAllocParams, \ + sizeof(listAllocParams)); \ + } \ + else \ + { \ + status = rpcAllocMemory_HAL(pGpu, pRpc, hclient, hdevice, hmemory, \ + hclass, flags, pmemdesc); \ + } \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } \ + while (0) + +#define NV_RM_RPC_MAP_MEMORY_DMA(pGpu, hclient, hdevice, hdma, hmemory, offset, length, flags, \ + dmaoffset, status) \ + do \ + { \ + OBJRPC *pRpc; \ + pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL) && \ + !gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) \ + status = rpcMapMemoryDma_HAL(pGpu, pRpc, hclient, hdevice, hdma, hmemory, offset, \ + length, flags, dmaoffset); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + + +#define NV_RM_RPC_UNMAP_MEMORY_DMA(pGpu, hclient, hdevice, hdma, hmemory, flags, dmaoffset, \ + status) \ + do \ + { \ + OBJRPC *pRpc; \ + pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL) && \ + !gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) \ + status = rpcUnmapMemoryDma_HAL(pGpu, pRpc, hclient, hdevice, hdma, hmemory, \ + flags, dmaoffset); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_IDLE_CHANNELS(pGpu, phclients, phdevices, phchannels, \ + nentries, flags, timeout, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcIdleChannels_HAL(pGpu, pRpc, phclients, phdevices, \ + phchannels, nentries, flags, timeout); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while(0) + +#define NV_RM_RPC_ALLOC_SHARE_DEVICE(pGpu, hclient, hdevice, hclientshare, htargetclient, htargetdevice, hclass, \ + allocflags, vasize, vamode, bFirstDevice, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + if (IS_FW_CLIENT(pGpu)) \ + { \ + NV_RM_RPC_ALLOC_SHARE_DEVICE_FWCLIENT(pGpu, hclient, hdevice, hclientshare, htargetclient, htargetdevice, hclass, \ + allocflags, vasize, vamode, bFirstDevice, status); \ + } \ + else \ + status = rpcAllocShareDevice_HAL(pGpu, pRpc, hclient, hdevice, hclientshare, \ + htargetclient, htargetdevice, hclass, \ + allocflags, vasize, vamode); \ + } else if (pRpc == NULL) \ + return NV_ERR_INSUFFICIENT_RESOURCES; \ + } \ + while (0) + +/* + * Control RPC macro + */ +#define NV_RM_RPC_CONTROL(pGpu, hClient, hObject, cmd, pParams, paramSize, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + if (IS_FW_CLIENT(pGpu)) \ + { \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + status = pRmApi->Control(pRmApi, hClient, hObject, cmd, \ + pParams, paramSize); \ + } \ + else \ + { \ + status = rpcDmaControl_wrapper(pGpu, pRpc, hClient, hObject, \ + cmd, pParams, paramSize); \ + } \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_ALLOC_CHANNEL(pGpu, hclient, hparent, hchannel, hclass, \ + pGpfifoAllocParams, pchid, status) \ + do \ + { \ + OBJRPC *pRpc; \ + pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + if (IS_FW_CLIENT(pGpu)) \ + { \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + status = pRmApi->AllocWithHandle(pRmApi, hclient, hparent, hchannel, \ + hclass, pGpfifoAllocParams, \ + sizeof(*pGpfifoAllocParams)); \ + } \ + else \ + { \ + status = rpcAllocChannelDma_HAL(pGpu, pRpc, hclient, hparent, hchannel, \ + hclass, pGpfifoAllocParams, pchid); \ + } \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } \ + while (0) + +#define NV_RM_RPC_ALLOC_OBJECT(pGpu, hclient, hchannel, hobject, hclass, params, paramsSize, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + if (IS_FW_CLIENT(pGpu)) \ + { \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + status = pRmApi->AllocWithHandle(pRmApi, hclient, hchannel, hobject, \ + hclass, params, paramsSize); \ + } \ + else \ + { \ + status = rpcAllocObject_HAL(pGpu, pRpc, \ + hclient, hchannel, hobject, hclass, params);\ + } \ + } \ + } while (0) + +/* + * free RPC macros + */ +#define NV_RM_RPC_FREE(pGpu, hclient, hparent, hobject, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + if (IS_FW_CLIENT(pGpu)) \ + { \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + status = pRmApi->Free(pRmApi, hclient, hobject); \ + } \ + else \ + { \ + status = rpcFree_HAL(pGpu, pRpc, hclient, hparent, hobject); \ + } \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_FREE_ON_ERROR(pGpu, hclient, hparent, hobject) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + /* used in failure cases, macro doesn't overwrite rmStatus */ \ + if (pRpc != NULL) \ + { \ + if (IS_FW_CLIENT(pGpu)) \ + { \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + pRmApi->Free(pRmApi, hclient, hobject); \ + } \ + else \ + { \ + rpcFree_HAL(pGpu, pRpc, hclient, hparent, hobject); \ + } \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } \ + while (0) + +#define NV_RM_RPC_ALLOC_EVENT(pGpu, hclient, hparentclient, hchannel, hobject, \ + hevent, hclass, idx, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + if (IS_FW_CLIENT(pGpu)) \ + { \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + NV0005_ALLOC_PARAMETERS allocParams = {0}; \ + allocParams.hParentClient = hparentclient; \ + allocParams.hClass = hclass; \ + allocParams.notifyIndex = idx | NV01_EVENT_CLIENT_RM; \ + allocParams.data = 0; \ + status = pRmApi->AllocWithHandle(pRmApi, hclient, \ + hobject, hevent, \ + hclass, &allocParams, \ + sizeof(allocParams)); \ + } \ + else \ + { \ + status = rpcAllocEvent_HAL(pGpu, pRpc, hclient, hparentclient, \ + hchannel, hobject, hevent, hclass, idx);\ + } \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while(0) + +#define NV_RM_RPC_ALLOC_SUBDEVICE(pGpu, hclient, hdevice, hsubdevice, \ + hclass, subDeviceInst, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + if (IS_FW_CLIENT(pGpu)) \ + { \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + NV2080_ALLOC_PARAMETERS alloc_params = {0}; \ + \ + alloc_params.subDeviceId = subDeviceInst; \ + \ + status = pRmApi->AllocWithHandle(pRmApi, hclient, hdevice, hsubdevice, \ + hclass, &alloc_params, \ + sizeof(alloc_params)); \ + } \ + else \ + status = rpcAllocSubdevice_HAL(pGpu, pRpc, hclient, hdevice, \ + hsubdevice, hclass, subDeviceInst); \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_DUP_OBJECT(pGpu, hclient, hparent, hobject, hclient_src, \ + hobject_src, flags, bAutoFreeRpc, pDstRef, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + if (IS_FW_CLIENT(pGpu)) \ + { \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + status = pRmApi->DupObject(pRmApi, hclient, hparent, \ + &hobject, hclient_src, \ + hobject_src, flags); \ + } \ + else \ + status = rpcDupObject_HAL(pGpu, pRpc, hclient, hparent, \ + hobject, hclient_src, \ + hobject_src, flags); \ + if ((bAutoFreeRpc) && (pDstRef != NULL) && (status == NV_OK)) \ + { \ + RmResource *pRmResource; \ + pRmResource = dynamicCast(((RsResourceRef*)pDstRef)->pResource, RmResource); \ + pRmResource->bRpcFree = NV_TRUE; \ + } \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +static inline void NV_RM_RPC_MANAGE_HW_RESOURCE_ALLOC(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_MANAGE_HW_RESOURCE_FREE(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_SIM_UPDATE_DISP_CONTEXT_DMA(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_SIM_DELETE_DISP_CONTEXT_DMA(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_SIM_UPDATE_DISP_CHANNEL_INFO(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_SIM_FREE_INFRA(OBJGPU *pGpu, ...) { return; } + +#define NV_RM_RPC_SET_GUEST_SYSTEM_INFO(pGpu, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = RmRpcSetGuestSystemInfo(pGpu, pRpc); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while(0) + +#define NV_RM_RPC_UNLOADING_GUEST_DRIVER(pGpu, status, bInPMTransition, bGc6Entering, newPMLevel) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcUnloadingGuestDriver_HAL(pGpu, pRpc, bInPMTransition, bGc6Entering, newPMLevel); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } \ + while (0) + +#define NV_RM_RPC_GPU_EXEC_REG_OPS(pGpu, hClient, hObject, pParams, pRegOps, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcGpuExecRegOps_HAL(pGpu, pRpc, hClient, hObject, pParams, pRegOps); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } \ + while (0) + +#define NV_RM_RPC_REGISTER_VIRTUAL_EVENT_BUFFER(pGpu, hClient, hSubdevice, hEventBuffer, hBufferHeader, hRecordBuffer, recordSize, recordCount, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcRegisterVirtualEventBuffer_HAL(pGpu, pRpc, hClient, hSubdevice, hEventBuffer, hBufferHeader, hRecordBuffer, recordSize, recordCount); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_UPDATE_BAR_PDE(pGpu, barType, entryValue, entryLevelShift, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcUpdateBarPde_HAL(pGpu, pRpc, barType, entryValue, entryLevelShift); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +static inline void NV_RM_RPC_PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION(OBJGPU *pGpu, ...) { return; } + +#define NV_RM_RPC_INVALIDATE_TLB(pGpu, status, pdbAddress, regVal) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcInvalidateTlb_HAL(pGpu, pRpc, pdbAddress, regVal); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while(0) + +// +// DCE_CLIENT_RM specific RPCs +// + +#define NV_RM_RPC_DCE_RM_INIT(pGpu, bInit, status) \ + do \ + { \ + OBJRPC* pRpc = GPU_GET_RPC(pGpu); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + status = rpcDceRmInit_dce(pRmApi, bInit); \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +// +// GSP_CLIENT_RM specific RPCs +// + +#define NV_RM_RPC_GET_GSP_STATIC_INFO(pGpu, status) do {} while (0) +#define NV_RM_RPC_GSP_SET_SYSTEM_INFO(pGpu, status) do {} while (0) +#define NV_RM_RPC_SET_REGISTRY(pGpu, status) do {} while (0) +#define NV_RM_RPC_DUMP_PROTOBUF_COMPONENT(pGpu, status, pPrbEnc, pNvDumpState, component) do {} while (0) +#define NV_RM_RPC_ECC_NOTIFIER_WRITE_ACK(pGpu, status) do {} while (0) + +NV_STATUS RmRpcPerfGetCurrentPstate(OBJGPU *pGpu, NV2080_CTRL_PERF_GET_CURRENT_PSTATE_PARAMS *pParamStructPtr); + +static inline NV_STATUS RmRpcSimFreeInfra(OBJGPU *pGpu, ...) { return NV_OK; } +static inline NV_STATUS RmRpcSimUpdateDisplayContextDma(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcSimDeleteDisplayContextDma(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcSimUpdateDispChannelInfo(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcHwResourceAlloc(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcHwResourceFree(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcPerfGetPstateInfo(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcPerfGetVirtualPstateInfo(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } + +static inline NV_STATUS RmRpcSimEscapeRead(OBJGPU *pGpu, const char *path, NvU32 index, + NvU32 count, NvU32 *data) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcSimEscapeWrite(OBJGPU *pGpu, const char *path, NvU32 index, + NvU32 count, NvU32 data) { return NV_ERR_NOT_SUPPORTED; } + +static NV_INLINE NV_STATUS RmRpcSetGuestSystemInfo(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } + +/*! + * Defines the size of the GSP sim access buffer. + */ +#define GSP_SIM_ACCESS_BUFFER_SIZE 0x4000 + +/*! + * Defines the structure used to pass SimRead data from Kernel to Physical RM. + */ +typedef struct SimAccessBuffer +{ + volatile NvU32 data[GSP_SIM_ACCESS_BUFFER_SIZE]; + volatile NvU32 seq; +} SimAccessBuffer; + +#endif // __vgpu_dev_nv_rpc_h__ diff --git a/src/nvidia/inc/kernel/vgpu/rpc_global_enums.h b/src/nvidia/inc/kernel/vgpu/rpc_global_enums.h new file mode 100644 index 0000000..5b62569 --- /dev/null +++ b/src/nvidia/inc/kernel/vgpu/rpc_global_enums.h @@ -0,0 +1,292 @@ +#ifndef _RPC_GLOBAL_ENUMS_H_ +#define _RPC_GLOBAL_ENUMS_H_ + +// Deprecated RPC's numbers cannot be reused in order to not break compatibility +#ifndef X +# define X(UNIT, RPC, VAL) NV_VGPU_MSG_FUNCTION_##RPC = VAL, +# define DEFINING_X_IN_RPC_GLOBAL_ENUMS_H +enum { +#endif + X(RM, NOP, 0) + X(RM, SET_GUEST_SYSTEM_INFO, 1) + X(RM, ALLOC_ROOT, 2) + X(RM, ALLOC_DEVICE, 3) // deprecated + X(RM, ALLOC_MEMORY, 4) + X(RM, ALLOC_CTX_DMA, 5) + X(RM, ALLOC_CHANNEL_DMA, 6) + X(RM, MAP_MEMORY, 7) + X(RM, BIND_CTX_DMA, 8) // deprecated + X(RM, ALLOC_OBJECT, 9) + X(RM, FREE, 10) + X(RM, LOG, 11) + X(RM, ALLOC_VIDMEM, 12) + X(RM, UNMAP_MEMORY, 13) + X(RM, MAP_MEMORY_DMA, 14) + X(RM, UNMAP_MEMORY_DMA, 15) + X(RM, GET_EDID, 16) // deprecated + X(RM, ALLOC_DISP_CHANNEL, 17) + X(RM, ALLOC_DISP_OBJECT, 18) + X(RM, ALLOC_SUBDEVICE, 19) + X(RM, ALLOC_DYNAMIC_MEMORY, 20) + X(RM, DUP_OBJECT, 21) + X(RM, IDLE_CHANNELS, 22) + X(RM, ALLOC_EVENT, 23) + X(RM, SEND_EVENT, 24) // deprecated + X(RM, REMAPPER_CONTROL, 25) // deprecated + X(RM, DMA_CONTROL, 26) // deprecated + X(RM, DMA_FILL_PTE_MEM, 27) + X(RM, MANAGE_HW_RESOURCE, 28) + X(RM, BIND_ARBITRARY_CTX_DMA, 29) // deprecated + X(RM, CREATE_FB_SEGMENT, 30) + X(RM, DESTROY_FB_SEGMENT, 31) + X(RM, ALLOC_SHARE_DEVICE, 32) + X(RM, DEFERRED_API_CONTROL, 33) + X(RM, REMOVE_DEFERRED_API, 34) + X(RM, SIM_ESCAPE_READ, 35) + X(RM, SIM_ESCAPE_WRITE, 36) + X(RM, SIM_MANAGE_DISPLAY_CONTEXT_DMA, 37) + X(RM, FREE_VIDMEM_VIRT, 38) + X(RM, PERF_GET_PSTATE_INFO, 39) // deprecated + X(RM, PERF_GET_PERFMON_SAMPLE, 40) + X(RM, PERF_GET_VIRTUAL_PSTATE_INFO, 41) // deprecated + X(RM, PERF_GET_LEVEL_INFO, 42) + X(RM, MAP_SEMA_MEMORY, 43) + X(RM, UNMAP_SEMA_MEMORY, 44) + X(RM, SET_SURFACE_PROPERTIES, 45) + X(RM, CLEANUP_SURFACE, 46) + X(RM, UNLOADING_GUEST_DRIVER, 47) + X(RM, TDR_SET_TIMEOUT_STATE, 48) + X(RM, SWITCH_TO_VGA, 49) + X(RM, GPU_EXEC_REG_OPS, 50) + X(RM, GET_STATIC_INFO, 51) + X(RM, ALLOC_VIRTMEM, 52) + X(RM, UPDATE_PDE_2, 53) + X(RM, SET_PAGE_DIRECTORY, 54) + X(RM, GET_STATIC_PSTATE_INFO, 55) + X(RM, TRANSLATE_GUEST_GPU_PTES, 56) + X(RM, RESERVED_57, 57) + X(RM, RESET_CURRENT_GR_CONTEXT, 58) + X(RM, SET_SEMA_MEM_VALIDATION_STATE, 59) + X(RM, GET_ENGINE_UTILIZATION, 60) + X(RM, UPDATE_GPU_PDES, 61) + X(RM, GET_ENCODER_CAPACITY, 62) + X(RM, VGPU_PF_REG_READ32, 63) // deprecated + X(RM, SET_GUEST_SYSTEM_INFO_EXT, 64) + X(GSP, GET_GSP_STATIC_INFO, 65) + X(RM, RMFS_INIT, 66) // deprecated + X(RM, RMFS_CLOSE_QUEUE, 67) // deprecated + X(RM, RMFS_CLEANUP, 68) // deprecated + X(RM, RMFS_TEST, 69) // deprecated + X(RM, UPDATE_BAR_PDE, 70) + X(RM, CONTINUATION_RECORD, 71) + X(RM, GSP_SET_SYSTEM_INFO, 72) + X(RM, SET_REGISTRY, 73) + X(GSP, GSP_INIT_POST_OBJGPU, 74) // deprecated + X(RM, SUBDEV_EVENT_SET_NOTIFICATION, 75) // deprecated + X(GSP, GSP_RM_CONTROL, 76) + X(RM, GET_STATIC_INFO2, 77) + X(RM, DUMP_PROTOBUF_COMPONENT, 78) + X(RM, UNSET_PAGE_DIRECTORY, 79) + X(RM, GET_CONSOLIDATED_STATIC_INFO, 80) // deprecated + X(RM, GMMU_REGISTER_FAULT_BUFFER, 81) // deprecated + X(RM, GMMU_UNREGISTER_FAULT_BUFFER, 82) // deprecated + X(RM, GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER, 83) // deprecated + X(RM, GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER, 84) // deprecated + X(RM, CTRL_SET_VGPU_FB_USAGE, 85) + X(RM, CTRL_NVFBC_SW_SESSION_UPDATE_INFO, 86) + X(RM, CTRL_NVENC_SW_SESSION_UPDATE_INFO, 87) + X(RM, CTRL_RESET_CHANNEL, 88) + X(RM, CTRL_RESET_ISOLATED_CHANNEL, 89) + X(RM, CTRL_GPU_HANDLE_VF_PRI_FAULT, 90) + X(RM, CTRL_CLK_GET_EXTENDED_INFO, 91) + X(RM, CTRL_PERF_BOOST, 92) + X(RM, CTRL_PERF_VPSTATES_GET_CONTROL, 93) + X(RM, CTRL_GET_ZBC_CLEAR_TABLE, 94) + X(RM, CTRL_SET_ZBC_COLOR_CLEAR, 95) + X(RM, CTRL_SET_ZBC_DEPTH_CLEAR, 96) + X(RM, CTRL_GPFIFO_SCHEDULE, 97) + X(RM, CTRL_SET_TIMESLICE, 98) + X(RM, CTRL_PREEMPT, 99) + X(RM, CTRL_FIFO_DISABLE_CHANNELS, 100) + X(RM, CTRL_SET_TSG_INTERLEAVE_LEVEL, 101) + X(RM, CTRL_SET_CHANNEL_INTERLEAVE_LEVEL, 102) + X(GSP, GSP_RM_ALLOC, 103) + X(RM, CTRL_GET_P2P_CAPS_V2, 104) + X(RM, CTRL_CIPHER_AES_ENCRYPT, 105) + X(RM, CTRL_CIPHER_SESSION_KEY, 106) + X(RM, CTRL_CIPHER_SESSION_KEY_STATUS, 107) + X(RM, CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES, 108) + X(RM, CTRL_DBG_READ_ALL_SM_ERROR_STATES, 109) + X(RM, CTRL_DBG_SET_EXCEPTION_MASK, 110) + X(RM, CTRL_GPU_PROMOTE_CTX, 111) + X(RM, CTRL_GR_CTXSW_PREEMPTION_BIND, 112) + X(RM, CTRL_GR_SET_CTXSW_PREEMPTION_MODE, 113) + X(RM, CTRL_GR_CTXSW_ZCULL_BIND, 114) + X(RM, CTRL_GPU_INITIALIZE_CTX, 115) + X(RM, CTRL_VASPACE_COPY_SERVER_RESERVED_PDES, 116) + X(RM, CTRL_FIFO_CLEAR_FAULTED_BIT, 117) + X(RM, CTRL_GET_LATEST_ECC_ADDRESSES, 118) + X(RM, CTRL_MC_SERVICE_INTERRUPTS, 119) + X(RM, CTRL_DMA_SET_DEFAULT_VASPACE, 120) + X(RM, CTRL_GET_CE_PCE_MASK, 121) + X(RM, CTRL_GET_ZBC_CLEAR_TABLE_ENTRY, 122) + X(RM, CTRL_GET_NVLINK_PEER_ID_MASK, 123) // deprecated + X(RM, CTRL_GET_NVLINK_STATUS, 124) + X(RM, CTRL_GET_P2P_CAPS, 125) + X(RM, CTRL_GET_P2P_CAPS_MATRIX, 126) + X(RM, RESERVED_0, 127) + X(RM, CTRL_RESERVE_PM_AREA_SMPC, 128) + X(RM, CTRL_RESERVE_HWPM_LEGACY, 129) + X(RM, CTRL_B0CC_EXEC_REG_OPS, 130) + X(RM, CTRL_BIND_PM_RESOURCES, 131) + X(RM, CTRL_DBG_SUSPEND_CONTEXT, 132) + X(RM, CTRL_DBG_RESUME_CONTEXT, 133) + X(RM, CTRL_DBG_EXEC_REG_OPS, 134) + X(RM, CTRL_DBG_SET_MODE_MMU_DEBUG, 135) + X(RM, CTRL_DBG_READ_SINGLE_SM_ERROR_STATE, 136) + X(RM, CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE, 137) + X(RM, CTRL_DBG_SET_MODE_ERRBAR_DEBUG, 138) + X(RM, CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE, 139) + X(RM, CTRL_ALLOC_PMA_STREAM, 140) + X(RM, CTRL_PMA_STREAM_UPDATE_GET_PUT, 141) + X(RM, CTRL_FB_GET_INFO_V2, 142) + X(RM, CTRL_FIFO_SET_CHANNEL_PROPERTIES, 143) + X(RM, CTRL_GR_GET_CTX_BUFFER_INFO, 144) + X(RM, CTRL_KGR_GET_CTX_BUFFER_PTES, 145) + X(RM, CTRL_GPU_EVICT_CTX, 146) + X(RM, CTRL_FB_GET_FS_INFO, 147) + X(RM, CTRL_GRMGR_GET_GR_FS_INFO, 148) + X(RM, CTRL_STOP_CHANNEL, 149) + X(RM, CTRL_GR_PC_SAMPLING_MODE, 150) + X(RM, CTRL_PERF_RATED_TDP_GET_STATUS, 151) + X(RM, CTRL_PERF_RATED_TDP_SET_CONTROL, 152) + X(RM, CTRL_FREE_PMA_STREAM, 153) + X(RM, CTRL_TIMER_SET_GR_TICK_FREQ, 154) + X(RM, CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB, 155) + X(RM, GET_CONSOLIDATED_GR_STATIC_INFO, 156) + X(RM, CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP, 157) + X(RM, CTRL_GR_GET_TPC_PARTITION_MODE, 158) + X(RM, CTRL_GR_SET_TPC_PARTITION_MODE, 159) + X(UVM, UVM_PAGING_CHANNEL_ALLOCATE, 160) + X(UVM, UVM_PAGING_CHANNEL_DESTROY, 161) + X(UVM, UVM_PAGING_CHANNEL_MAP, 162) + X(UVM, UVM_PAGING_CHANNEL_UNMAP, 163) + X(UVM, UVM_PAGING_CHANNEL_PUSH_STREAM, 164) + X(UVM, UVM_PAGING_CHANNEL_SET_HANDLES, 165) + X(UVM, UVM_METHOD_STREAM_GUEST_PAGES_OPERATION, 166) + X(RM, CTRL_INTERNAL_QUIESCE_PMA_CHANNEL, 167) + X(RM, DCE_RM_INIT, 168) + X(RM, REGISTER_VIRTUAL_EVENT_BUFFER, 169) + X(RM, CTRL_EVENT_BUFFER_UPDATE_GET, 170) + X(RM, GET_PLCABLE_ADDRESS_KIND, 171) + X(RM, CTRL_PERF_LIMITS_SET_STATUS_V2, 172) + X(RM, CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM, 173) + X(RM, CTRL_GET_MMU_DEBUG_MODE, 174) + X(RM, CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS, 175) + X(RM, CTRL_FLCN_GET_CTX_BUFFER_SIZE, 176) + X(RM, CTRL_FLCN_GET_CTX_BUFFER_INFO, 177) + X(RM, DISABLE_CHANNELS, 178) + X(RM, CTRL_FABRIC_MEMORY_DESCRIBE, 179) + X(RM, CTRL_FABRIC_MEM_STATS, 180) + X(RM, SAVE_HIBERNATION_DATA, 181) + X(RM, RESTORE_HIBERNATION_DATA, 182) + X(RM, CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED, 183) + X(RM, CTRL_EXEC_PARTITIONS_CREATE, 184) + X(RM, CTRL_EXEC_PARTITIONS_DELETE, 185) + X(RM, CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN, 186) + X(RM, CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX, 187) + X(RM, PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION, 188) + X(RM, CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK, 189) + X(RM, RESERVED_190, 190) + X(RM, CTRL_SUBDEVICE_GET_P2P_CAPS, 191) + X(RM, CTRL_BUS_SET_P2P_MAPPING, 192) + X(RM, CTRL_BUS_UNSET_P2P_MAPPING, 193) + X(RM, CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK, 194) + X(RM, CTRL_GPU_MIGRATABLE_OPS, 195) + X(RM, CTRL_GET_TOTAL_HS_CREDITS, 196) + X(RM, CTRL_GET_HS_CREDITS, 197) + X(RM, CTRL_SET_HS_CREDITS, 198) + X(RM, CTRL_PM_AREA_PC_SAMPLER, 199) + X(RM, INVALIDATE_TLB, 200) + X(RM, CTRL_GPU_QUERY_ECC_STATUS, 201) // deprecated + X(RM, ECC_NOTIFIER_WRITE_ACK, 202) + X(RM, CTRL_DBG_GET_MODE_MMU_DEBUG, 203) + X(RM, RM_API_CONTROL, 204) + X(RM, CTRL_CMD_INTERNAL_GPU_START_FABRIC_PROBE, 205) + X(RM, CTRL_NVLINK_GET_INBAND_RECEIVED_DATA, 206) + X(RM, GET_STATIC_DATA, 207) + X(RM, RESERVED_208, 208) + X(RM, CTRL_GPU_GET_INFO_V2, 209) + X(RM, GET_BRAND_CAPS, 210) + X(RM, CTRL_CMD_NVLINK_INBAND_SEND_DATA, 211) + X(RM, UPDATE_GPM_GUEST_BUFFER_INFO, 212) + X(RM, CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE, 213) + X(RM, CTRL_SET_ZBC_STENCIL_CLEAR, 214) + X(RM, CTRL_SUBDEVICE_GET_VGPU_HEAP_STATS, 215) + X(RM, CTRL_SUBDEVICE_GET_LIBOS_HEAP_STATS, 216) + X(RM, CTRL_DBG_SET_MODE_MMU_GCC_DEBUG, 217) + X(RM, CTRL_DBG_GET_MODE_MMU_GCC_DEBUG, 218) + X(RM, CTRL_RESERVE_HES, 219) + X(RM, CTRL_RELEASE_HES, 220) + X(RM, CTRL_RESERVE_CCU_PROF, 221) + X(RM, CTRL_RELEASE_CCU_PROF, 222) + X(RM, SETUP_HIBERNATION_BUFFER, 223) + X(RM, CTRL_CMD_GET_CHIPLET_HS_CREDIT_POOL, 224) + X(RM, CTRL_CMD_GET_HS_CREDITS_MAPPING, 225) + X(RM, CTRL_EXEC_PARTITIONS_EXPORT, 226) + X(RM, NUM_FUNCTIONS, 227) +#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H +}; +# undef X +# undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H +#endif + +// RPC Events. Used by GSP-RM. +#ifndef E +# define E(RPC, VAL) NV_VGPU_MSG_EVENT_##RPC = VAL, +# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H +enum { +#endif + E(FIRST_EVENT, 0x1000) + E(GSP_INIT_DONE, 0x1001) + E(GSP_RUN_CPU_SEQUENCER, 0x1002) + E(POST_EVENT, 0x1003) + E(RC_TRIGGERED, 0x1004) + E(MMU_FAULT_QUEUED, 0x1005) + E(OS_ERROR_LOG, 0x1006) + E(RG_LINE_INTR, 0x1007) + E(GPUACCT_PERFMON_UTIL_SAMPLES, 0x1008) + E(SIM_READ, 0x1009) + E(SIM_WRITE, 0x100a) + E(SEMAPHORE_SCHEDULE_CALLBACK, 0x100b) + E(UCODE_LIBOS_PRINT, 0x100c) + E(VGPU_GSP_PLUGIN_TRIGGERED, 0x100d) + E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK, 0x100e) + E(PERF_BRIDGELESS_INFO_UPDATE, 0x100f) + E(VGPU_CONFIG, 0x1010) + E(DISPLAY_MODESET, 0x1011) + E(EXTDEV_INTR_SERVICE, 0x1012) + E(NVLINK_INBAND_RECEIVED_DATA_256, 0x1013) + E(NVLINK_INBAND_RECEIVED_DATA_512, 0x1014) + E(NVLINK_INBAND_RECEIVED_DATA_1024, 0x1015) + E(NVLINK_INBAND_RECEIVED_DATA_2048, 0x1016) + E(NVLINK_INBAND_RECEIVED_DATA_4096, 0x1017) + E(TIMED_SEMAPHORE_RELEASE, 0x1018) + E(NVLINK_IS_GPU_DEGRADED, 0x1019) + E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK, 0x101a) + E(NVLINK_FAULT_UP, 0x101b) + E(GSP_LOCKDOWN_NOTICE, 0x101c) + E(MIG_CI_CONFIG_UPDATE, 0x101d) + E(UPDATE_GSP_TRACE, 0x101e) + E(NVLINK_FATAL_ERROR_RECOVERY, 0x101f) + E(GSP_POST_NOCAT_RECORD, 0x1020) + E(FECS_ERROR, 0x1021) + E(RECOVERY_ACTION, 0x1022) + E(NUM_EVENTS, 0x1023) +#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H +}; +# undef E +# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H +#endif + +#endif /*_RPC_GLOBAL_ENUMS_H_*/ diff --git a/src/nvidia/inc/kernel/vgpu/rpc_hal_stubs.h b/src/nvidia/inc/kernel/vgpu/rpc_hal_stubs.h new file mode 100644 index 0000000..04ce65c --- /dev/null +++ b/src/nvidia/inc/kernel/vgpu/rpc_hal_stubs.h @@ -0,0 +1,76 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _RPC_HAL_STUBS_H_ +#define _RPC_HAL_STUBS_H_ + +// The file replaces g_rpc_hal.h to provide stubs for rpc HAL functions when +// Rmconfig Module RPC is disabled, while the BASE_DEFINITION for RPC object +// is not needed. Thus making it a noop. +#define __RPC_OBJECT_BASE_DEFINITION + +static inline NV_STATUS rpcAllocShareDevice_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcAllocMemory_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcAllocCtxDma_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcAllocChannelDma_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcAllocObject_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcMapMemoryDma_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcUnmapMemoryDma_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcAllocSubdevice_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcDupObject_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcIdleChannels_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcAllocEvent_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcDmaControl_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcDmaControl_wrapper(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcRmApiControl_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcFree_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcPerfGetLevelInfo_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcUnloadingGuestDriver_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcGpuExecRegOps_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcUpdateBarPde_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcSetPageDirectory_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcUnsetPageDirectory_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcUpdateGpuPdes_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcVgpuPfRegRead32_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcGetGspStaticInfo_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcSetMemoryInfo_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcSetRegistry_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcGspInitPostObjgpu_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcDumpProtobufComponent_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcLog_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcSetGuestSystemInfoExt_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcSetSurfaceProperties_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcCleanupSurface_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcSwitchToVga_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcGetStaticData_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcGetConsolidatedGrStaticInfo_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcGetEncoderCapacity_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcDisableChannels_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcSaveHibernationData_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcRestoreHibernationData_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcGetEngineUtilizationWrapper_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcGetBrandCaps_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcSetupHibernationBuffer_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } + +#endif // _RPC_HAL_STUBS_H_ + diff --git a/src/nvidia/inc/kernel/vgpu/rpc_headers.h b/src/nvidia/inc/kernel/vgpu/rpc_headers.h new file mode 100644 index 0000000..ecdfa64 --- /dev/null +++ b/src/nvidia/inc/kernel/vgpu/rpc_headers.h @@ -0,0 +1,259 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __vgpu_rpc_nv_headers_h__ +#define __vgpu_rpc_nv_headers_h__ + +#include "ctrl/ctrl0080/ctrl0080perf.h" +#include "ctrl/ctrl2080/ctrl2080perf.h" +#include "ctrl/ctrl2080/ctrl2080internal.h" +#include "nvstatus.h" + +#define MAX_GPC_COUNT 32 + +/* + * Maximum number of RegOps that can be accommodated within one RPC call + * due to RPC message buffer size being limited to 4k + */ +#define VGPU_MAX_REGOPS_PER_RPC 100 + +#define VGPU_RESERVED_HANDLE_BASE 0xCAF3F000 +#define VGPU_RESERVED_HANDLE_RANGE 0x1000 + +#define VGPU_CALC_PARAM_OFFSET(prev_offset, prev_params) (prev_offset + NV_ALIGN_UP(sizeof(prev_params), sizeof(NvU32))) + +/* + * Message header (in buffer addressed by ring entry) + * + * If message is invalid (bad length or signature), signature and length + * are forced to be valid (if in range of descriptor) and result is set to + * NV_VGPU_RESULT_INVALID_MESSAGE_FORMAT. Otherwise, signature, length, and + * function are always unchanged and result is always set. + * + * The function message header, if defined, immediately follows the main message + * header. + */ +#define NV_VGPU_MSG_HEADER_VERSION_MAJOR 31:24 /* R---D */ +#define NV_VGPU_MSG_HEADER_VERSION_MINOR 23:16 /* R---D */ +#define NV_VGPU_MSG_HEADER_VERSION_MAJOR_TOT 0x00000003 /* R---D */ +#define NV_VGPU_MSG_HEADER_VERSION_MINOR_TOT 0x00000000 /* R---D */ +/* signature must equal valid value */ +#define NV_VGPU_MSG_SIGNATURE_VALID 0x43505256 /* RW--V */ + +#include "rpc_global_enums.h" + +/* result code */ +/* codes below 0xFF000000 must match exactly the NV_STATUS codes in nvos.h */ +#define NV_VGPU_MSG_RESULT__RM NV_ERR_GENERIC:0x00000000 /* RW--D */ +#define NV_VGPU_MSG_RESULT_SUCCESS NV_OK +#define NV_VGPU_MSG_RESULT_CARD_NOT_PRESENT NV_ERR_CARD_NOT_PRESENT +#define NV_VGPU_MSG_RESULT_DUAL_LINK_INUSE NV_ERR_DUAL_LINK_INUSE +#define NV_VGPU_MSG_RESULT_GENERIC NV_ERR_GENERIC +#define NV_VGPU_MSG_RESULT_GPU_NOT_FULL_POWER NV_ERR_GPU_NOT_FULL_POWER +#define NV_VGPU_MSG_RESULT_IN_USE NV_ERR_IN_USE +#define NV_VGPU_MSG_RESULT_INSUFFICIENT_RESOURCES NV_ERR_INSUFFICIENT_RESOURCES +#define NV_VGPU_MSG_RESULT_INVALID_ACCESS_TYPE NV_ERR_INVALID_ACCESS_TYPE +#define NV_VGPU_MSG_RESULT_INVALID_ARGUMENT NV_ERR_INVALID_ARGUMENT +#define NV_VGPU_MSG_RESULT_INVALID_BASE NV_ERR_INVALID_BASE +#define NV_VGPU_MSG_RESULT_INVALID_CHANNEL NV_ERR_INVALID_CHANNEL +#define NV_VGPU_MSG_RESULT_INVALID_CLASS NV_ERR_INVALID_CLASS +#define NV_VGPU_MSG_RESULT_INVALID_CLIENT NV_ERR_INVALID_CLIENT +#define NV_VGPU_MSG_RESULT_INVALID_COMMAND NV_ERR_INVALID_COMMAND +#define NV_VGPU_MSG_RESULT_INVALID_DATA NV_ERR_INVALID_DATA +#define NV_VGPU_MSG_RESULT_INVALID_DEVICE NV_ERR_INVALID_DEVICE +#define NV_VGPU_MSG_RESULT_INVALID_DMA_SPECIFIER NV_ERR_INVALID_DMA_SPECIFIER +#define NV_VGPU_MSG_RESULT_INVALID_EVENT NV_ERR_INVALID_EVENT +#define NV_VGPU_MSG_RESULT_INVALID_FLAGS NV_ERR_INVALID_FLAGS +#define NV_VGPU_MSG_RESULT_INVALID_FUNCTION NV_ERR_INVALID_FUNCTION +#define NV_VGPU_MSG_RESULT_INVALID_HEAP NV_ERR_INVALID_HEAP +#define NV_VGPU_MSG_RESULT_INVALID_INDEX NV_ERR_INVALID_INDEX +#define NV_VGPU_MSG_RESULT_INVALID_LIMIT NV_ERR_INVALID_LIMIT +#define NV_VGPU_MSG_RESULT_INVALID_METHOD NV_ERR_INVALID_METHOD +#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_BUFFER NV_ERR_INVALID_OBJECT_BUFFER +#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_ERROR NV_ERR_INVALID_OBJECT +#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_HANDLE NV_ERR_INVALID_OBJECT_HANDLE +#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_NEW NV_ERR_INVALID_OBJECT_NEW +#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_OLD NV_ERR_INVALID_OBJECT_OLD +#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_PARENT NV_ERR_INVALID_OBJECT_PARENT +#define NV_VGPU_MSG_RESULT_INVALID_OFFSET NV_ERR_INVALID_OFFSET +#define NV_VGPU_MSG_RESULT_INVALID_OWNER NV_ERR_INVALID_OWNER +#define NV_VGPU_MSG_RESULT_INVALID_PARAM_STRUCT NV_ERR_INVALID_PARAM_STRUCT +#define NV_VGPU_MSG_RESULT_INVALID_PARAMETER NV_ERR_INVALID_PARAMETER +#define NV_VGPU_MSG_RESULT_INVALID_POINTER NV_ERR_INVALID_POINTER +#define NV_VGPU_MSG_RESULT_INVALID_REGISTRY_KEY NV_ERR_INVALID_REGISTRY_KEY +#define NV_VGPU_MSG_RESULT_INVALID_STATE NV_ERR_INVALID_STATE +#define NV_VGPU_MSG_RESULT_INVALID_STRING_LENGTH NV_ERR_INVALID_STRING_LENGTH +#define NV_VGPU_MSG_RESULT_INVALID_XLATE NV_ERR_INVALID_XLATE +#define NV_VGPU_MSG_RESULT_IRQ_NOT_FIRING NV_ERR_IRQ_NOT_FIRING +#define NV_VGPU_MSG_RESULT_MULTIPLE_MEMORY_TYPES NV_ERR_MULTIPLE_MEMORY_TYPES +#define NV_VGPU_MSG_RESULT_NOT_SUPPORTED NV_ERR_NOT_SUPPORTED +#define NV_VGPU_MSG_RESULT_OPERATING_SYSTEM NV_ERR_OPERATING_SYSTEM +#define NV_VGPU_MSG_RESULT_PROTECTION_FAULT NV_ERR_PROTECTION_FAULT +#define NV_VGPU_MSG_RESULT_TIMEOUT NV_ERR_TIMEOUT +#define NV_VGPU_MSG_RESULT_TOO_MANY_PRIMARIES NV_ERR_TOO_MANY_PRIMARIES +#define NV_VGPU_MSG_RESULT_IRQ_EDGE_TRIGGERED NV_ERR_IRQ_EDGE_TRIGGERED +#define NV_VGPU_MSG_RESULT_GUEST_HOST_DRIVER_MISMATCH NV_ERR_LIB_RM_VERSION_MISMATCH + +/* + * codes above 0xFF000000 and below 0xFF100000 must match one-for-one + * the vmiop_error_t codes in vmioplugin.h, with 0xFF000000 added. + */ +#define NV_VGPU_MSG_RESULT__VMIOP 0xFF00000a:0xFF000000 /* RW--D */ +#define NV_VGPU_MSG_RESULT_VMIOP_INVAL 0xFF000001 /* RW--V */ +#define NV_VGPU_MSG_RESULT_VMIOP_RESOURCE 0xFF000002 /* RW--V */ +#define NV_VGPU_MSG_RESULT_VMIOP_RANGE 0xFF000003 /* RW--V */ +#define NV_VGPU_MSG_RESULT_VMIOP_READ_ONLY 0xFF000004 /* RW--V */ +#define NV_VGPU_MSG_RESULT_VMIOP_NOT_FOUND 0xFF000005 /* RW--V */ +#define NV_VGPU_MSG_RESULT_VMIOP_NO_ADDRESS_SPACE 0xFF000006 /* RW--V */ +#define NV_VGPU_MSG_RESULT_VMIOP_TIMEOUT 0xFF000007 /* RW--V */ +#define NV_VGPU_MSG_RESULT_VMIOP_NOT_ALLOWED_IN_CALLBACK 0xFF000008 /* RW--V */ +#define NV_VGPU_MSG_RESULT_VMIOP_ECC_MISMATCH 0xFF000009 /* RW--V */ +#define NV_VGPU_MSG_RESULT_VMIOP_NOT_SUPPORTED 0xFF00000a /* RW--V */ +/* RPC-specific error codes */ +#define NV_VGPU_MSG_RESULT__RPC 0xFF100009:0xFF100000 /* RW--D */ +#define NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION 0xFF100001 /* RW--V */ +#define NV_VGPU_MSG_RESULT_RPC_INVALID_MESSAGE_FORMAT 0xFF100002 /* RW--V */ +#define NV_VGPU_MSG_RESULT_RPC_HANDLE_NOT_FOUND 0xFF100003 /* RW--V */ +#define NV_VGPU_MSG_RESULT_RPC_HANDLE_EXISTS 0xFF100004 /* RW--V */ +#define NV_VGPU_MSG_RESULT_RPC_UNKNOWN_RM_ERROR 0xFF100005 /* RW--V */ +#define NV_VGPU_MSG_RESULT_RPC_UNKNOWN_VMIOP_ERROR 0xFF100006 /* RW--V */ +#define NV_VGPU_MSG_RESULT_RPC_RESERVED_HANDLE 0xFF100007 /* RW--V */ +#define NV_VGPU_MSG_RESULT_RPC_CUDA_PROFILING_DISABLED 0xFF100008 /* RW--V */ +// This error code is used by plugin to notify the guest the that API control +// is recognized but not supported. It used by the guest to avoid printing +// error message about a failed API control. +#define NV_VGPU_MSG_RESULT_RPC_API_CONTROL_NOT_SUPPORTED 0xFF100009 /* RW--V */ +/* RPC-specific code in result for incomplete request */ +#define NV_VGPU_MSG_RESULT_RPC_PENDING 0xFFFFFFFF /* RW--V */ +/* shared union field */ +#define NV_VGPU_MSG_UNION_INIT 0x00000000 /* RW--V */ + +/* + * common PTEDESC message defines (used w/ ALLOC_MEMORY, ALLOC_VIDMEM, FILL_PTE_MEM) + */ +#define NV_VGPU_PTEDESC_INIT 0x00000000 /* RWI-V */ +#define NV_VGPU_PTEDESC__PROD 0x00000000 /* RW--V */ +#define NV_VGPU_PTEDESC_IDR_NONE 0x00000000 /* RW--V */ +#define NV_VGPU_PTEDESC_IDR_SINGLE 0x00000001 /* RW--V */ +#define NV_VGPU_PTEDESC_IDR_DOUBLE 0x00000002 /* RW--V */ +#define NV_VGPU_PTEDESC_IDR_TRIPLE 0x00000003 /* RW--V */ + +#define NV_VGPU_PTE_PAGE_SIZE 0x1000 /* R---V */ +#define NV_VGPU_PTE_SIZE 4 /* R---V */ +#define NV_VGPU_PTE_INDEX_SHIFT 10 /* R---V */ +#define NV_VGPU_PTE_INDEX_MASK 0x3FF /* R---V */ + +#define NV_VGPU_PTE_64_PAGE_SIZE 0x1000 /* R---V */ +#define NV_VGPU_PTE_64_SIZE 8 /* R---V */ +#define NV_VGPU_PTE_64_INDEX_SHIFT 9 /* R---V */ +#define NV_VGPU_PTE_64_INDEX_MASK 0x1FF /* R---V */ + +/* + * LOG message + */ +#define NV_VGPU_LOG_LEVEL_FATAL 0x00000000 /* RW--V */ +#define NV_VGPU_LOG_LEVEL_ERROR 0x00000001 /* RW--V */ +#define NV_VGPU_LOG_LEVEL_NOTICE 0x00000002 /* RW--V */ +#define NV_VGPU_LOG_LEVEL_STATUS 0x00000003 /* RW--V */ +#define NV_VGPU_LOG_LEVEL_DEBUG 0x00000004 /* RW--V */ + +typedef enum +{ + RPC_GR_BUFFER_TYPE_GRAPHICS = 0, + RPC_GR_BUFFER_TYPE_GRAPHICS_ZCULL = 1, + RPC_GR_BUFFER_TYPE_GRAPHICS_GRAPHICS_PM = 2, + RPC_GR_BUFFER_TYPE_COMPUTE_PREEMPT = 3, + RPC_GR_BUFFER_TYPE_GRAPHICS_PATCH = 4, + RPC_GR_BUFFER_TYPE_GRAPHICS_BUNDLE_CB = 5, + RPC_GR_BUFFER_TYPE_GRAPHICS_PAGEPOOL_GLOBAL = 6, + RPC_GR_BUFFER_TYPE_GRAPHICS_ATTRIBUTE_CB = 7, + RPC_GR_BUFFER_TYPE_GRAPHICS_RTV_CB_GLOBAL = 8, + RPC_GR_BUFFER_TYPE_GRAPHICS_GFXP_POOL = 9, + RPC_GR_BUFFER_TYPE_GRAPHICS_GFXP_CTRL_BLK = 10, + RPC_GR_BUFFER_TYPE_GRAPHICS_FECS_EVENT = 11, + RPC_GR_BUFFER_TYPE_GRAPHICS_PRIV_ACCESS_MAP = 12, + RPC_GR_BUFFER_TYPE_GRAPHICS_MAX = 13, +} RPC_GR_BUFFER_TYPE; + +typedef enum +{ + FECS_ERROR_EVENT_TYPE_NONE = 0, + FECS_ERROR_EVENT_TYPE_BUFFER_RESET_REQUIRED = 1, + FECS_ERROR_EVENT_TYPE_BUFFER_FULL = 2, + FECS_ERROR_EVENT_TYPE_MAX = 3, +} FECS_ERROR_EVENT_TYPE; + +/* + * Maximum entries that can be sent in a single pass of RPC. + */ +#define VGPU_RPC_GET_P2P_CAPS_V2_MAX_GPUS_SQUARED_PER_RPC 512 + +/* Fetching NV2080_CTRL_GR_MAX_CTX_BUFFER_COUNT in single RPC mesaage + * causes RPC buffer to overflow. To accommodate, we will have to convert + * current RPC to multipass. But currently, RM allocates only + * (3 + GR_GLOBALCTX_BUFFER_COUNT) < 32 buffers and they accommodate in single + * RPC message size. Hence, not converting current RPC to multipass. + * and limiting the max buffer count per RPC to 32. + */ +#define GR_MAX_RPC_CTX_BUFFER_COUNT 32 + +/* + * Enums specifying the BAR number that we are going to update its PDE + */ +typedef enum +{ + NV_RPC_UPDATE_PDE_BAR_1, + NV_RPC_UPDATE_PDE_BAR_2, + NV_RPC_UPDATE_PDE_BAR_INVALID, +} NV_RPC_UPDATE_PDE_BAR_TYPE; + +typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS +{ + NvU32 headIndex; + NvU32 maxHResolution; + NvU32 maxVResolution; +} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS; + +typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS +{ + NvU32 numHeads; + NvU32 maxNumHeads; +} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS; + + +/* + * Maximum number of SMs that can be read in one RPC call to get error states + */ + +#define VGPU_RPC_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PER_RPC_v21_06 80 + +typedef enum +{ + GPU_RECOVERY_EVENT_TYPE_REFRESH, + GPU_RECOVERY_EVENT_TYPE_GPU_DRAIN_P2P, + GPU_RECOVERY_EVENT_TYPE_SYS_REBOOT, + GPU_RECOVERY_EVENT_TYPE_GPU_REBOOT, +} GPU_RECOVERY_EVENT_TYPE; + + +#endif // __vgpu_rpc_nv_headers_h__ diff --git a/src/nvidia/inc/kernel/vgpu/rpc_vgpu.h b/src/nvidia/inc/kernel/vgpu/rpc_vgpu.h new file mode 100644 index 0000000..b2d0b03 --- /dev/null +++ b/src/nvidia/inc/kernel/vgpu/rpc_vgpu.h @@ -0,0 +1,73 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __vgpu_dev_nv_rpc_vgpu_h__ +#define __vgpu_dev_nv_rpc_vgpu_h__ + + +//****************************************************************************** +// +// Declarations for the RPC VGPU module. +// +// Description: +// This module declares the RPC interface functions/macros for VGPU. +// +//****************************************************************************** + +static NV_INLINE void NV_RM_RPC_ALLOC_LOCAL_USER(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_ALLOC_VIDMEM(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_ALLOC_VIRTMEM(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_MAP_MEMORY(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_UNMAP_MEMORY(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_DMA_FILL_PTE_MEM(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_CREATE_FB_SEGMENT(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_DESTROY_FB_SEGMENT(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_DEFERRED_API_CONTROL(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_REMOVE_DEFERRED_API(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_FREE_VIDMEM_VIRT(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_MAP_SEMA_MEMORY(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_UNMAP_SEMA_MEMORY(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_GET_CONSOLIDATED_STATIC_INFO(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_UPDATE_PDE_2(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_TRANSLATE_GUEST_GPU_PTES(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_SET_SEMA_MEM_VALIDATION_STATE(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_RESET_CURRENT_GR_CONTEXT(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_ALLOC_CONTEXT_DMA(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_GET_PLCABLE_ADDRESS_KIND(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_UPDATE_GPU_PDES(OBJGPU *pGpu, ...) { } + +static NV_INLINE void NV_RM_RPC_LOG(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_SET_GUEST_SYSTEM_INFO_EXT(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_GET_ENGINE_UTILIZATION(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_SET_SURFACE_PROPERTIES(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_CLEANUP_SURFACE(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_SWITCH_TO_VGA(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_GET_STATIC_DATA(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_GET_CONSOLIDATED_GR_STATIC_INFO(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_GET_ENCODER_CAPACITY(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_SAVE_HIBERNATION_DATA(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_RESTORE_HIBERNATION_DATA(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_SETUP_HIBERNATION_BUFFER(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_PERF_GET_LEVEL_INFO(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_API_CONTROL(OBJGPU *pGpu, ...) { } +#endif // __vgpu_dev_nv_rpc_vgpu_h__ diff --git a/src/nvidia/inc/kernel/vgpu/vgpu_util.h b/src/nvidia/inc/kernel/vgpu/vgpu_util.h new file mode 100644 index 0000000..a0b195d --- /dev/null +++ b/src/nvidia/inc/kernel/vgpu/vgpu_util.h @@ -0,0 +1,53 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +//****************************************************************************** +// +// Declarations for VGPU util functions. +// +//****************************************************************************** + +#ifndef __vgpu_util_h__ +#define __vgpu_util_h__ + +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_desc.h" + +#include "vgpu/dev_vgpu.h" + +static inline NV_STATUS vgpuUpdateSysmemPfnBitMap(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc, NvBool bAlloc) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS vgpuUpdateGuestSysmemPfnBitMap(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc, NvBool bAlloc) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static inline NvBool vgpuIsGuestManagedHwAlloc(OBJGPU *pGpu) +{ + return NV_FALSE; +} + +#endif // __vgpu_util_h__ diff --git a/src/nvidia/inc/kernel/vgpu/vgpuapi.h b/src/nvidia/inc/kernel/vgpu/vgpuapi.h new file mode 100644 index 0000000..ae35d1e --- /dev/null +++ b/src/nvidia/inc/kernel/vgpu/vgpuapi.h @@ -0,0 +1,71 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_vgpuapi_nvoc.h" + +#ifndef _VGPUAPI_H_ +#define _VGPUAPI_H_ + +#include "core/core.h" +#include "rmapi/client.h" +#include "gpu/gpu_resource.h" +#include "rmapi/control.h" +#include "ctrl/ctrla080.h" + +NVOC_PREFIX(vgpuapi) class VgpuApi : GpuResource +{ +public: + NV_STATUS vgpuapiConstruct(VgpuApi *pVgpuApi, CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams) + : GpuResource(pCallContext, pParams); + void vgpuapiDestruct(VgpuApi *pVgpuApi); + + NODE node; + NvHandle handle; + NvHandle hDevice; + + // + // RMCTRL Exported methods -- Category: VGPU_DISPLAY + // + RMCTRL_EXPORT(NVA080_CTRL_CMD_VGPU_DISPLAY_SET_SURFACE_PROPERTIES, + RMCTRL_FLAGS(NON_PRIVILEGED)) + NV_STATUS vgpuapiCtrlCmdVgpuDisplaySetSurfaceProperties(VgpuApi *pVgpuApi, + NVA080_CTRL_VGPU_DISPLAY_SET_SURFACE_PROPERTIES *pParams); + + RMCTRL_EXPORT(NVA080_CTRL_CMD_VGPU_DISPLAY_CLEANUP_SURFACE, + RMCTRL_FLAGS(NON_PRIVILEGED)) + NV_STATUS vgpuapiCtrlCmdVgpuDisplayCleanupSurface(VgpuApi *pVgpuApi, + NVA080_CTRL_VGPU_DISPLAY_CLEANUP_SURFACE_PARAMS *pParams); + + // + // RMCTRL Exported methods -- Category: VGPU_OTHERS + // + RMCTRL_EXPORT(NVA080_CTRL_CMD_VGPU_GET_CONFIG, + RMCTRL_FLAGS(NON_PRIVILEGED)) + NV_STATUS vgpuapiCtrlCmdVGpuGetConfig(VgpuApi *pVgpuApi, + NVA080_CTRL_VGPU_GET_CONFIG_PARAMS *pParams); + +}; + +#endif // _VGPUAPI_H_ diff --git a/src/nvidia/inc/kernel/virtualization/hypervisor/hypervisor.h b/src/nvidia/inc/kernel/virtualization/hypervisor/hypervisor.h new file mode 100644 index 0000000..2a07abc --- /dev/null +++ b/src/nvidia/inc/kernel/virtualization/hypervisor/hypervisor.h @@ -0,0 +1,3 @@ + +#include "g_hypervisor_nvoc.h" + diff --git a/src/nvidia/inc/lib/base_utils.h b/src/nvidia/inc/lib/base_utils.h new file mode 100644 index 0000000..44e87b4 --- /dev/null +++ b/src/nvidia/inc/lib/base_utils.h @@ -0,0 +1,79 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef BASE_UTILS_H +#define BASE_UTILS_H + +#include "nvtypes.h" + +/*! + * @file + * @brief Various helper utility functions that have no other home. + */ + +NvU32 nvLogBase2(NvU64); + +// bit field helper functions +NvU32 nvBitFieldLSZero(NvU32 *, NvU32); +NvU32 nvBitFieldMSZero(NvU32 *, NvU32); +NvBool nvBitFieldTest(NvU32 *, NvU32, NvU32); +void nvBitFieldSet(NvU32 *, NvU32, NvU32, NvBool); + +// +// Sort an array of n elements/structures. +// Example: +// NvBool integerLess(void * a, void * b) +// { +// return *(int *)a < *(int *)b; +// } +// int array[1000]; +// ... +// nvMergeSort(array, sizeof(array)/sizeof(*array), sizeof(*array), integerLess); +// +void nvMergeSort(void * array, NvU32 n, void * tempBuffer, NvU32 elementSize, NvBool (*less)(void *, void *)); + +// +#define BASE10 (10) +#define BASE16 (16) + +// Do not conflict with libc naming +NvS32 nvStrToL(NvU8* pStr, NvU8** pEndStr, NvS32 base, NvU8 stopChar, NvU32 *numFound); + +// +// Returns bit mask of most significant bit of input +// +NvU64 nvMsb64(NvU64); + +// +// Converts unsigned long int to string +// +char * nvU32ToStr(NvU32 value, char *string, NvU32 radix); + +/*! Converts unsigned long long hex int to string */ +char * nvU64ToStr(NvU64 value, char *string, NvU32 targedStrLen); + +// +// Find the string length +// +NvU32 nvStringLen(const char * str); + +#endif // BASE_UTILS_H diff --git a/src/nvidia/inc/lib/protobuf/prb.h b/src/nvidia/inc/lib/protobuf/prb.h new file mode 100644 index 0000000..1cdba45 --- /dev/null +++ b/src/nvidia/inc/lib/protobuf/prb.h @@ -0,0 +1,299 @@ +/* + * Lightweight protocol buffers. + * + * Based on code taken from + * https://code.google.com/archive/p/lwpb/source/default/source + * + * The code there is licensed as Apache 2.0. However, NVIDIA has received the + * code from the original author under MIT license terms. + * + * + * Copyright 2009 Simon Kallweit + * Copyright 2009-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __PRB_H__ +#define __PRB_H__ + +// Make sure the generated files can see rmconfig.h + +#ifndef _RMCFG_H +#include "rmconfig.h" +#endif + +// Maximum depth of message embedding +#ifndef PRB_MAX_DEPTH +#define PRB_MAX_DEPTH 8 +#endif + +// Maximum number of required fields in a message +#ifndef PRB_MAX_REQUIRED_FIELDS +#define PRB_MAX_REQUIRED_FIELDS 16 +#endif + +// Provide enum names as strings +#ifndef PRB_ENUM_NAMES +#define PRB_ENUM_NAMES 0 +#endif + +#if PRB_ENUM_NAMES +#define PRB_MAYBE_ENUM_NAME(n) n, +#else +#define PRB_MAYBE_ENUM_NAME(n) +#endif + +// Provide field names as strings +#ifndef PRB_FIELD_NAMES +#define PRB_FIELD_NAMES 0 +#endif + +#if PRB_FIELD_NAMES +#define PRB_MAYBE_FIELD_NAME(n) n, +#else +#define PRB_MAYBE_FIELD_NAME(n) +#endif + +// Provide field default values +#ifndef PRB_FIELD_DEFAULTS +#define PRB_FIELD_DEFAULTS 0 +#endif + +#if PRB_FIELD_DEFAULTS +#define PRB_MAYBE_FIELD_DEFAULT_DEF(n) n +#define PRB_MAYBE_FIELD_DEFAULT(n) n, +#else +#define PRB_MAYBE_FIELD_DEFAULT_DEF(n) +#define PRB_MAYBE_FIELD_DEFAULT(n) +#endif + +// Provide message names as strings +#ifndef PRB_MESSAGE_NAMES +#define PRB_MESSAGE_NAMES 0 +#endif + +#if PRB_MESSAGE_NAMES +#define PRB_MAYBE_MESSAGE_NAME(n) n, +#else +#define PRB_MAYBE_MESSAGE_NAME(n) +#endif + +// Provide method names as strings +#ifndef PRB_METHOD_NAMES +#define PRB_METHOD_NAMES 0 +#endif + +#if PRB_METHOD_NAMES +#define PRB_MAYBE_METHOD_NAME(n) n, +#else +#define PRB_MAYBE_MESSAGE_NAME(n) +#endif + +// Provide service names as strings +#ifndef PRB_SERVICE_NAMES +#define PRB_SERVICE_NAMES 0 +#endif + +#if PRB_SERVICE_NAMES +#define PRB_MAYBE_SERVICE_NAME(n) n, +#else +#define PRB_MAYBE_SERVICE_NAME(n) +#endif + +// Field labels +#define PRB_REQUIRED 0 +#define PRB_OPTIONAL 1 +#define PRB_REPEATED 2 + +// Field value types +#define PRB_DOUBLE 0 +#define PRB_FLOAT 1 +#define PRB_INT32 2 +#define PRB_INT64 3 +#define PRB_UINT32 4 +#define PRB_UINT64 5 +#define PRB_SINT32 6 +#define PRB_SINT64 7 +#define PRB_FIXED32 8 +#define PRB_FIXED64 9 +#define PRB_SFIXED32 10 +#define PRB_SFIXED64 11 +#define PRB_BOOL 12 +#define PRB_ENUM 13 +#define PRB_STRING 14 +#define PRB_BYTES 15 +#define PRB_MESSAGE 16 + +// Field flags +#define PRB_HAS_DEFAULT (1 << 0) +#define PRB_IS_PACKED (1 << 1) +#define PRB_IS_DEPRECATED (1 << 2) + +typedef struct +{ + unsigned int label : 2; + unsigned int typ : 6; + unsigned int flags : 8; +} PRB_FIELD_OPTS; + +// Protocol buffer wire types +typedef enum +{ + WT_VARINT = 0, + WT_64BIT = 1, + WT_STRING = 2, + WT_32BIT = 5 +} WIRE_TYPE; + +// Protocol buffer wire values +typedef union +{ + NvU64 varint; + NvU64 int64; + struct { + NvU64 len; + const void *data; + } string; + NvU32 int32; +} WIRE_VALUE; + +typedef struct +{ + const char *str; + NvU32 len; +} PRB_VALUE_STRING; + +typedef struct +{ + NvU8 *data; + NvU32 len; +} PRB_VALUE_BYTES; + +typedef struct +{ + void *data; + NvU32 len; +} PRB_VALUE_MESSAGE; + +typedef union +{ + NvF64 double_; + NvF32 float_; + NvS32 int32; + NvS64 int64; + NvU32 uint32; + NvU64 uint64; + NvBool bool_; + PRB_VALUE_STRING string; + PRB_VALUE_BYTES bytes; + PRB_VALUE_MESSAGE message; + int enum_; + int null; +} PRB_VALUE; + +typedef struct +{ + int value; +#if PRB_ENUM_NAMES + const char *name; +#endif +} PRB_ENUM_MAPPING; + +typedef struct +{ + const PRB_ENUM_MAPPING *mappings; + NvU32 count; +#if PRB_ENUM_NAMES + const char *name; +#endif +} PRB_ENUM_DESC; + +struct PRB_MSG_DESC; + +//* Protocol buffer field descriptor +typedef struct PRB_FIELD_DESC +{ + NvU32 number; + PRB_FIELD_OPTS opts; + const struct PRB_MSG_DESC *msg_desc; + const PRB_ENUM_DESC *enum_desc; +#if PRB_FIELD_NAMES + const char *name; +#endif +#if PRB_FIELD_DEFAULTS + const PRB_VALUE *def; +#endif +} PRB_FIELD_DESC; + +//* Protocol buffer message descriptor +typedef struct PRB_MSG_DESC +{ + NvU32 num_fields; + const PRB_FIELD_DESC *fields; +#if PRB_MESSAGE_NAMES + const char *name; +#endif +} PRB_MSG_DESC; + +// Forward declaration +struct PRB_SERVICE_DESC; + +// Protocol buffer method descriptor +struct PRB_METHOD_DESC +{ + const struct PRB_SERVICE_DESC *service; + const PRB_MSG_DESC *req_desc; + const PRB_MSG_DESC *res_desc; +#if PRB_METHOD_NAMES + const char *name; +#endif +}; + +// Protocol buffer service descriptor +typedef struct PRB_SERVICE_DESC +{ + const NvU32 num_methods; + const struct PRB_METHOD_DESC *methods; +#if PRB_SERVICE_NAMES + const char *name; +#endif +} PRB_SERVICE_DESC; + +// Simple memory buffer +typedef struct +{ + NvU8 *base; + NvU8 *pos; + NvU8 *end; +} PRB_BUF; + +// Encoder interface +typedef struct +{ + PRB_BUF buf; + const PRB_FIELD_DESC *field_desc; + const PRB_MSG_DESC *msg_desc; +} PRB_ENCODER_STACK_FRAME; + +typedef NV_STATUS PrbBufferCallback(void *pEncoder, NvBool bBufferFull); + +typedef NvU32 PRB_ENCODER; + +#endif // __PRB_H__ diff --git a/src/nvidia/inc/lib/ref_count.h b/src/nvidia/inc/lib/ref_count.h new file mode 100644 index 0000000..c530d0e --- /dev/null +++ b/src/nvidia/inc/lib/ref_count.h @@ -0,0 +1,3 @@ + +#include "g_ref_count_nvoc.h" + diff --git a/src/nvidia/inc/lib/zlib/inflate.h b/src/nvidia/inc/lib/zlib/inflate.h new file mode 100644 index 0000000..51f8b64 --- /dev/null +++ b/src/nvidia/inc/lib/zlib/inflate.h @@ -0,0 +1,134 @@ +/* + Portions of this file are based on zlib. Subsequent additions by NVIDIA. + + Copyright (c) 2001-2021, NVIDIA CORPORATION. All rights reserved. + + zlib.h -- interface of the 'zlib' general purpose compression library + version 1.1.3, July 9th, 1998 + + Copyright (C) 1995-1998 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + + The data format used by the zlib library is described by RFCs (Request for + Comments) 1950 to 1952 in the files ftp://ds.internic.net/rfc/rfc1950.txt + (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format). +*/ +#ifndef _INFLATE_H_ +#define _INFLATE_H_ + +#include "nvos.h" + +#define NOMEMCPY 1 + +typedef NvU8 uch; +typedef NvU16 ush; +typedef NvU32 ulg; + +#define GZ_SLIDE_WINDOW_SIZE 32768 + +#define NEXTBYTE() pGzState->inbuf[pGzState->inptr++] +#define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<>=(n);k-=(n);} + +/* If BMAX needs to be larger than 16, then h and x[] should be ulg. */ +#define BMAX 16 /* maximum bit length of any code (16 for explode) */ +#define N_MAX 288 /* maximum number of codes in any set */ + +/* Huffman code lookup table entry--this entry is four bytes for machines + that have 16-bit pointers (e.g. PC's in the small or medium model). + Valid extra bits are 0..13. e == 15 is EOB (end of block), e == 16 + means that v is a literal, 16 < e < 32 means that v is a pointer to + the next table, which codes e - 16 bits, and lastly e == 99 indicates + an unused code. If a code with e == 99 is looked up, this implies an + error in the data. */ +struct huft { + uch e; /* number of extra bits or operation */ + uch b; /* number of bits in this code or subcode */ + union { + ush n; /* literal, length base, or distance base */ + struct huft *t; /* pointer to next level of table */ + } v; +}; + +/* The inflate algorithm uses a sliding 32K byte window on the uncompressed + stream to find repeated byte strings. This is implemented here as a + circular buffer. The index is updated simply by incrementing and then + and'ing with 0x7fff (32K-1). */ +/* It is left to other modules to supply the 32K area. It is assumed + to be usable as if it were declared "uch slide[32768];" or as just + "uch *slide;" and then malloc'ed in the latter case. The definition + must be in unzip.h, included above. */ +/* unsigned pGzState->wp; current position in slide */ +#define WSIZE GZ_SLIDE_WINDOW_SIZE +#define flush_output(w) (pGzState->wp=(w),flush_window(pGzState)) +#define Tracecv(A,B) +#define Tracevv(X) + +#define GZ_STATE_ITERATOR_OK 0 +#define GZ_STATE_ITERATOR_ERROR 1 +#define GZ_STATE_ITERATOR_END 2 + +#define GZ_STATE_HUFT_OK 0 +#define GZ_STATE_HUFT_INCOMP 1 +#define GZ_STATE_HUFT_ERROR 2 + +typedef struct { + unsigned int e; /* table entry flag/number of extra bits */ + unsigned int n, d; /* length and index for copy */ + unsigned int w; /* current window position */ + struct huft *t; /* pointer to table entry */ + ulg b; /* bit buffer */ + unsigned int k; /* number of bits in bit buffer */ + int continue_copy; /* last flush not finished*/ + unsigned int sn; /* used by inflated type 0 (stored) block */ +} GZ_INFLATE_CODES_STATE, *PGZ_INFLATE_CODES_STATE; + +typedef struct { + struct huft *tl; /* literal/length code table */ + struct huft *td; /* distance code table */ + NvS32 bl; /* lookup bits for tl */ + NvS32 bd; /* lookup bits for td */ + + NvU8 *inbuf,*outbuf; + NvU32 outBufSize; + NvU32 inptr,outptr; + NvU32 outLower,outUpper; + unsigned int wp; + unsigned int wp1; /* wp1 is index of first unflushed byte in slide window */ + unsigned int wp2; /* wp2 is index of last unflushed byte in slide window */ + uch *window; + + ulg bb; /* bit buffer */ + unsigned int bk; /* bits in bit buffer */ + int e; /* last block flag */ + + int newblock; /* start a new decompression block */ + NvU32 optSize; + GZ_INFLATE_CODES_STATE codesState; + +} GZ_INFLATE_STATE, *PGZ_INFLATE_STATE; + +NV_STATUS utilGzIterator(PGZ_INFLATE_STATE pGzState); +NV_STATUS utilGzAllocate(const NvU8 *zArray, NvU32 numTotalBytes, PGZ_INFLATE_STATE *ppGzState); +NvU32 utilGzGetData(PGZ_INFLATE_STATE pGzState, NvU32 offset, NvU32 size, NvU8 * outBuffer); +NV_STATUS utilGzDestroy(PGZ_INFLATE_STATE pGzState); + +#endif diff --git a/src/nvidia/inc/libraries/containers/btree.h b/src/nvidia/inc/libraries/containers/btree.h new file mode 100644 index 0000000..a46caa7 --- /dev/null +++ b/src/nvidia/inc/libraries/containers/btree.h @@ -0,0 +1,68 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _BTREE_H_ +#define _BTREE_H_ + +/*********************** Balanced Tree data structure **********************\ +* * +* Module: BTREE.H * +* API to BTREE routines. * +* * +\***************************************************************************/ + +// +// RED BLACK TREE structure. +// +#include "nvtypes.h" +#include "nvstatus.h" + +typedef struct NODE +{ + // public: + void *Data; + NvU64 keyStart; + NvU64 keyEnd; + + // private: + NvBool isRed; // !IsRed == IsBlack + struct NODE *parent; // tree links + struct NODE *left; + struct NODE *right; + +} NODE, *PNODE; + +//--------------------------------------------------------------------------- +// +// Function prototypes. +// +//--------------------------------------------------------------------------- + +NV_STATUS btreeInsert(PNODE, PNODE *); +NV_STATUS btreeUnlink(PNODE, PNODE *); +NV_STATUS btreeSearch(NvU64, PNODE *, PNODE); +NV_STATUS btreeEnumStart(NvU64, PNODE *, PNODE); +NV_STATUS btreeEnumNext(PNODE *, PNODE); +NV_STATUS btreeDestroyData(PNODE); +NV_STATUS btreeDestroyNodes(PNODE); + +#endif // _BTREE_H_ diff --git a/src/nvidia/inc/libraries/containers/eheap_old.h b/src/nvidia/inc/libraries/containers/eheap_old.h new file mode 100644 index 0000000..77ed7c7 --- /dev/null +++ b/src/nvidia/inc/libraries/containers/eheap_old.h @@ -0,0 +1,113 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef EHEAP_H +#define EHEAP_H + +/*! + * @brief + * EHEAP is an extent allocator. It is just an abstract E(xtent)Heap. + */ + +#include "nvtypes.h" +#include "nvos.h" +#include "containers/btree.h" +#include "utils/nvrange.h" + +typedef struct OBJEHEAP OBJEHEAP; + +typedef struct EMEMBLOCK EMEMBLOCK; +struct EMEMBLOCK +{ + NvU64 begin; + NvU64 end; + NvU64 align; + NvU32 growFlags; + NvU32 refCount; + NvU32 owner; + NODE node; + EMEMBLOCK *prevFree; + EMEMBLOCK *nextFree; + EMEMBLOCK *prev; + EMEMBLOCK *next; + void *pData; +}; + +typedef NvBool EHeapOwnershipComparator(void*, void*); + +typedef NV_STATUS (*EHeapDestruct)(OBJEHEAP *); +typedef NV_STATUS (*EHeapAlloc)(OBJEHEAP *, NvU32, NvU32 *, NvU64 *, NvU64 *, NvU64 , NvU64, EMEMBLOCK **, void*, EHeapOwnershipComparator*); +typedef NV_STATUS (*EHeapFree)(OBJEHEAP *, NvU64); +typedef void (*EHeapInfo)(OBJEHEAP *, NvU64 *, NvU64 *,NvU64 *, NvU64 *, NvU32 *, NvU64 *); +typedef void (*EHeapInfoForRange)(OBJEHEAP *, NV_RANGE, NvU64 *, NvU64 *, NvU32 *, NvU64 *); +typedef NV_STATUS (*EHeapGetSize)(OBJEHEAP *, NvU64 *); +typedef NV_STATUS (*EHeapGetFree)(OBJEHEAP *, NvU64 *); +typedef NV_STATUS (*EHeapGetBase)(OBJEHEAP *, NvU64 *); +typedef EMEMBLOCK *(*EHeapGetBlock)(OBJEHEAP *, NvU64, NvBool bReturnFreeBlock); +typedef NV_STATUS (*EHeapSetAllocRange)(OBJEHEAP *, NvU64 rangeLo, NvU64 rangeHi); +typedef NV_STATUS (*EHeapTraversalFn)(OBJEHEAP *, void *pEnv, EMEMBLOCK *, NvU32 *pContinue, NvU32 *pInvalCursor); +typedef NV_STATUS (*EHeapTraverse)(OBJEHEAP *, void *pEnv, EHeapTraversalFn, NvS32 direction); +typedef NvU32 (*EHeapGetNumBlocks)(OBJEHEAP *); +typedef NV_STATUS (*EHeapSetOwnerIsolation)(OBJEHEAP *, NvBool bEnable, NvU32 granularity); + +struct OBJEHEAP +{ + // Public heap interface methods + EHeapDestruct eheapDestruct; + EHeapAlloc eheapAlloc; + EHeapFree eheapFree; + EHeapInfo eheapInfo; + EHeapInfoForRange eheapInfoForRange; + EHeapGetSize eheapGetSize; + EHeapGetFree eheapGetFree; + EHeapGetBase eheapGetBase; + EHeapGetBlock eheapGetBlock; + EHeapSetAllocRange eheapSetAllocRange; + EHeapTraverse eheapTraverse; + EHeapGetNumBlocks eheapGetNumBlocks; + EHeapSetOwnerIsolation eheapSetOwnerIsolation; + + // private data + NvU64 base; + NvU64 total; + NvU64 free; + NvU64 rangeLo; + NvU64 rangeHi; + NvBool bOwnerIsolation; + NvU32 ownerGranularity; + EMEMBLOCK *pBlockList; + EMEMBLOCK *pFreeBlockList; + NvU32 memHandle; + NvU32 numBlocks; + NvU32 sizeofMemBlock; + PNODE pBlockTree; + // user can specify num of EMEMBLOCK structs to + // be allocated at heap construction time so that + // we will not call portMemAllocNonPaged during eheapAlloc. + NvU32 numPreAllocMemStruct; + EMEMBLOCK *pFreeMemStructList; + EMEMBLOCK *pPreAllocAddr; +}; + +extern void constructObjEHeap(OBJEHEAP *, NvU64, NvU64, NvU32, NvU32); + +#endif // EHEAP_H diff --git a/src/nvidia/inc/libraries/containers/list.h b/src/nvidia/inc/libraries/containers/list.h new file mode 100644 index 0000000..629f33c --- /dev/null +++ b/src/nvidia/inc/libraries/containers/list.h @@ -0,0 +1,337 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_CONTAINERS_LIST_H_ +#define _NV_CONTAINERS_LIST_H_ + +// Contains mix of C/C++ declarations. +#include "containers/type_safety.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" +#include "nvmisc.h" +#include "nvport/nvport.h" +#include "utils/nvassert.h" + +/** + * @defgroup NV_CONTAINERS_LIST List + * + * @brief List (sequence) of user-defined values. + * + * @details Order of values is not necessarily increasing or sorted, but order is + * preserved across mutation. Please see + * https://en.wikipedia.org/wiki/Sequence for a formal definition. + * + * The provided interface is abstract, decoupling the user from the underlying + * list implementation. Two options are available with regard to memory + * management, intrusive and non-intrusive. Users can select either one based + * on different situations. Despite the two versions of the list, the following + * implementation constraints are guaranteed. + * + * - Time Complexity: + * * Operations are \b O(1), + * * Unless stated otherwise. + * + * - Memory Usage: + * * \b O(N) memory is required for N values. + * * Intrusive and non-intrusive variants are provided. + * See @ref mem-ownership for further details. + * + * - Synchronization: + * * \b None. The container is not thread-safe. + * * Locking must be handled by the user if required. + */ + +#define MAKE_LIST(listTypeName, dataType) \ + typedef union listTypeName##Iter \ + { \ + dataType *pValue; \ + ListIterBase iter; \ + } listTypeName##Iter; \ + typedef union listTypeName \ + { \ + NonIntrusiveList real; \ + CONT_TAG_TYPE(ListBase, dataType, listTypeName##Iter); \ + CONT_TAG_NON_INTRUSIVE(dataType); \ + } listTypeName + +#define DECLARE_LIST(listTypeName) \ + typedef union listTypeName##Iter listTypeName##Iter; \ + typedef union listTypeName listTypeName + +#define MAKE_INTRUSIVE_LIST(listTypeName, dataType, node) \ + typedef union listTypeName##Iter \ + { \ + dataType *pValue; \ + ListIterBase iter; \ + } listTypeName##Iter; \ + typedef union listTypeName \ + { \ + IntrusiveList real; \ + CONT_TAG_TYPE(ListBase, dataType, listTypeName##Iter); \ + CONT_TAG_INTRUSIVE(dataType, node); \ + } listTypeName \ + +#define DECLARE_INTRUSIVE_LIST(listTypeName) \ + typedef union listTypeName##Iter listTypeName##Iter; \ + typedef union listTypeName listTypeName + +/** +* @brief Internal node structure to embed within intrusive list values. +*/ +typedef struct ListNode ListNode; + +/** + * @brief Base type common to both intrusive and non-intrusive variants. + */ +typedef struct ListBase ListBase; + +/** + * @brief Non-intrusive list (container-managed memory). + */ +typedef struct NonIntrusiveList NonIntrusiveList; + +/** + * @brief Intrusive list (user-managed memory). + */ +typedef struct IntrusiveList IntrusiveList; + +/** + * @brief Iterator over a range of list values. + * + * See @ref iterators for usage details. + */ +typedef struct ListIterBase ListIterBase; + +struct ListNode +{ + /// @privatesection + ListNode *pPrev; + ListNode *pNext; +#if PORT_IS_CHECKED_BUILD + ListBase *pList; +#endif +}; + +struct ListIterBase +{ + void *pValue; + ListBase *pList; + ListNode *pNode; + ListNode *pLast; +#if PORT_IS_CHECKED_BUILD + NvU32 versionNumber; + NvBool bValid; +#endif +}; + +ListIterBase listIterRange_IMPL(ListBase *pList, void *pFirst, void *pLast); +CONT_VTABLE_DECL(ListBase, ListIterBase); + +struct ListBase +{ + CONT_VTABLE_FIELD(ListBase); + ListNode *pHead; + ListNode *pTail; + NvU32 count; + NvS32 nodeOffset; +#if PORT_IS_CHECKED_BUILD + NvU32 versionNumber; +#endif +}; + +struct NonIntrusiveList +{ + ListBase base; + PORT_MEM_ALLOCATOR *pAllocator; + NvU32 valueSize; +}; + +struct IntrusiveList +{ + ListBase base; +}; + +#define listInit(pList, pAllocator) \ + listInit_IMPL(&((pList)->real), pAllocator, sizeof(*(pList)->valueSize)) + +#define listInitIntrusive(pList) \ + listInitIntrusive_IMPL(&((pList)->real), sizeof(*(pList)->nodeOffset)) + +#define listDestroy(pList) \ + CONT_DISPATCH_ON_KIND(pList, \ + listDestroy_IMPL((NonIntrusiveList*)&((pList)->real)), \ + listDestroyIntrusive_IMPL(&((pList)->real.base)), \ + contDispatchVoid_STUB()) + +#define listCount(pList) \ + listCount_IMPL(&((pList)->real).base) + +#define listInsertNew(pList, pNext) \ + CONT_CAST_ELEM(pList, \ + listInsertNew_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pNext)), listIsValid_IMPL) + +#define listAppendNew(pList) \ + CONT_CAST_ELEM(pList, listAppendNew_IMPL(&(pList)->real), listIsValid_IMPL) + +#define listPrependNew(pList) \ + CONT_CAST_ELEM(pList, listPrependNew_IMPL(&(pList)->real), listIsValid_IMPL) + +#define listInsertValue(pList, pNext, pValue) \ + CONT_CAST_ELEM(pList, \ + listInsertValue_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pNext), \ + CONT_CHECK_ARG(pList, pValue)), listIsValid_IMPL) + +#define listAppendValue(pList, pValue) \ + CONT_CAST_ELEM(pList, \ + listAppendValue_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pValue)), listIsValid_IMPL) + +#define listPrependValue(pList, pValue) \ + CONT_CAST_ELEM(pList, \ + listPrependValue_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pValue)), listIsValid_IMPL) + +#define listInsertExisting(pList, pNext, pValue) \ + listInsertExisting_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pNext), \ + CONT_CHECK_ARG(pList, pValue)) + +#define listAppendExisting(pList, pValue) \ + listAppendExisting_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pValue)) + +#define listPrependExisting(pList, pValue) \ + listPrependExisting_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pValue)) + +#define listRemove(pList, pValue) \ + CONT_DISPATCH_ON_KIND(pList, \ + listRemove_IMPL((NonIntrusiveList*)&((pList)->real), \ + CONT_CHECK_ARG(pList, pValue)), \ + listRemoveIntrusive_IMPL(&((pList)->real).base, \ + CONT_CHECK_ARG(pList, pValue)), \ + contDispatchVoid_STUB()) + +#define listRemoveFirstByValue(pList, pValue) \ + listRemoveFirstByValue_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pValue)) + +#define listRemoveAllByValue(pList, pValue) \ + listRemoveAllByValue_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pValue)) + +#define listClear(pList) \ + listDestroy(pList) + +#define listFindByValue(pList, pValue) \ + CONT_CAST_ELEM(pList, \ + listFindByValue_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pValue)), listIsValid_IMPL) + +#define listHead(pList) \ + CONT_CAST_ELEM(pList, listHead_IMPL(&((pList)->real).base), listIsValid_IMPL) + +#define listTail(pList) \ + CONT_CAST_ELEM(pList, listTail_IMPL(&((pList)->real).base), listIsValid_IMPL) + +#define listNext(pList, pValue) \ + CONT_CAST_ELEM(pList, \ + listNext_IMPL(&((pList)->real).base, \ + CONT_CHECK_ARG(pList, pValue)), listIsValid_IMPL) + +#define listPrev(pList, pValue) \ + CONT_CAST_ELEM(pList, \ + listPrev_IMPL(&((pList)->real).base, \ + CONT_CHECK_ARG(pList, pValue)), listIsValid_IMPL) + +#define listIterAll(pList) \ + listIterRange(pList, listHead(pList), listTail(pList)) + +#define listIterRange(pList, pFirst, pLast) \ + CONT_ITER_RANGE(pList, &listIterRange_IMPL, \ + CONT_CHECK_ARG(pList, pFirst), CONT_CHECK_ARG(pList, pLast), listIsValid_IMPL) + +#define listIterNext(pIt) \ + listIterNext_IMPL(&((pIt)->iter)) + +void listInit_IMPL(NonIntrusiveList *pList, PORT_MEM_ALLOCATOR *pAllocator, + NvU32 valueSize); +void listInitIntrusive_IMPL(IntrusiveList *pList, NvS32 nodeOffset); +void listDestroy_IMPL(NonIntrusiveList *pList); +void listDestroyIntrusive_IMPL(ListBase *pList); + +NvU32 listCount_IMPL(ListBase *pList); +void *listInsertNew_IMPL(NonIntrusiveList *pList, void *pNext); +void *listAppendNew_IMPL(NonIntrusiveList *pList); +void *listPrependNew_IMPL(NonIntrusiveList *pList); +void *listInsertValue_IMPL(NonIntrusiveList *pList, + void *pNext, + const void *pValue); +void *listAppendValue_IMPL(NonIntrusiveList *pList, const void *pValue); +void *listPrependValue_IMPL(NonIntrusiveList *pList, const void *pValue); +void listInsertExisting_IMPL(IntrusiveList *pList, void *pNext, void *pValue); +void listAppendExisting_IMPL(IntrusiveList *pList, void *pValue); +void listPrependExisting_IMPL(IntrusiveList *pList, void *pValue); +void listRemove_IMPL(NonIntrusiveList *pList, void *pValue); +void listRemoveIntrusive_IMPL(ListBase *pList, void *pValue); +void listRemoveFirstByValue_IMPL(NonIntrusiveList *pList, void *pValue); +void listRemoveAllByValue_IMPL(NonIntrusiveList *pList, void *pValue); + +void *listFindByValue_IMPL(NonIntrusiveList *pList, void *pValue); +void *listHead_IMPL(ListBase *pList); +void *listTail_IMPL(ListBase *pList); +void *listNext_IMPL(ListBase *pList, void *pValue); +void *listPrev_IMPL(ListBase *pList, void *pValue); + +ListIterBase listIterAll_IMPL(ListBase *pList); +ListIterBase listIterRange_IMPL(ListBase *pList, void *pFirst, void *pLast); +NvBool listIterNext_IMPL(ListIterBase *pIt); + +static NV_FORCEINLINE ListNode * +listValueToNode(ListBase *pList, void *pValue) +{ + if (NULL == pList) return NULL; + if (NULL == pValue) return NULL; + return (ListNode*)((NvU8*)pValue + pList->nodeOffset); +} + +static NV_FORCEINLINE void * +listNodeToValue(ListBase *pList, ListNode *pNode) +{ + if (NULL == pList) return NULL; + if (NULL == pNode) return NULL; + return (NvU8*)pNode - pList->nodeOffset; +} + +NvBool listIsValid_IMPL(void *pMap); + +#ifdef __cplusplus +} +#endif + +#endif // _NV_CONTAINERS_LIST_H_ diff --git a/src/nvidia/inc/libraries/containers/map.h b/src/nvidia/inc/libraries/containers/map.h new file mode 100644 index 0000000..861ab1d --- /dev/null +++ b/src/nvidia/inc/libraries/containers/map.h @@ -0,0 +1,303 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_CONTAINERS_MAP_H_ +#define _NV_CONTAINERS_MAP_H_ + +// Contains mix of C/C++ declarations. +#include "containers/type_safety.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" +#include "nvmisc.h" +#include "nvport/nvport.h" +#include "utils/nvassert.h" + +/** + * @defgroup NV_CONTAINERS_MAP Map + * + * @brief Map (ordered) from 64-bit integer keys to user-defined values. + * + * @details The provided interface is abstract, decoupling the user from the + * underlying ordered map implementation. Two options are available with regard + * to memory management, intrusive and non-intrusive. Users can select either + * one based on different situations. Despite the two versions of the map, + * the following implementation constraints are guaranteed. + * + * - Time Complexity: + * * Operations are \b O(log N), + * * Unless stated otherwise, + * * Where N is the number of values in the map. + * + * - Memory Usage: + * * \b O(N) memory is required for N values. + * * Intrusive and non-intrusive variants are provided. + * See @ref mem-ownership for further details. + * + * - Synchronization: + * * \b None. The container is not thread-safe. + * * Locking must be handled by the user if required. + * + */ + +#define MAKE_MAP(mapTypeName, dataType) \ + typedef union mapTypeName##Iter \ + { \ + dataType *pValue; \ + MapIterBase iter; \ + } mapTypeName##Iter; \ + typedef union mapTypeName \ + { \ + NonIntrusiveMap real; \ + CONT_TAG_TYPE(MapBase, dataType, mapTypeName##Iter); \ + CONT_TAG_NON_INTRUSIVE(dataType); \ + } mapTypeName + +#define DECLARE_MAP(mapTypeName) \ + typedef union mapTypeName##Iter mapTypeName##Iter; \ + typedef union mapTypeName mapTypeName + +#define MAKE_INTRUSIVE_MAP(mapTypeName, dataType, node) \ + typedef union mapTypeName##Iter \ + { \ + dataType *pValue; \ + MapIterBase iter; \ + } mapTypeName##Iter; \ + typedef union mapTypeName \ + { \ + IntrusiveMap real; \ + CONT_TAG_TYPE(MapBase, dataType, mapTypeName##Iter); \ + CONT_TAG_INTRUSIVE(dataType, node); \ + } mapTypeName + +#define DECLARE_INTRUSIVE_MAP(mapTypeName) \ + typedef union mapTypeName##Iter mapTypeName##Iter; \ + typedef union mapTypeName mapTypeName + +/** + * @brief Internal node structure to embed within intrusive map values. + */ +typedef struct MapNode MapNode; + +/** + * @brief Base type common to both intrusive and non-intrusive variants. + */ +typedef struct MapBase MapBase; + +/** + * @brief Non-intrusive map (container-managed memory). + */ +typedef struct NonIntrusiveMap NonIntrusiveMap; + +/** + * @brief Intrusive map (user-managed memory). + */ +typedef struct IntrusiveMap IntrusiveMap; + +/** + * @brief Iterator over a range of map values. + * + * See @ref iterators for usage details. + */ +typedef struct MapIterBase MapIterBase; + +struct MapNode +{ + /// @privatesection + NvU64 key; + MapNode *pParent; + MapNode *pLeft; + MapNode *pRight; + NvBool bIsRed; +#if PORT_IS_CHECKED_BUILD + MapBase *pMap; +#endif +}; + +struct MapIterBase +{ + void *pValue; + MapBase *pMap; + MapNode *pNode; + MapNode *pLast; +#if PORT_IS_CHECKED_BUILD + NvU32 versionNumber; + NvBool bValid; +#endif +}; + +MapIterBase mapIterRange_IMPL(MapBase *pMap, void *pFirst, void *pLast); +CONT_VTABLE_DECL(MapBase, MapIterBase); + +struct MapBase +{ + CONT_VTABLE_FIELD(MapBase); + MapNode *pRoot; + NvS32 nodeOffset; + NvU32 count; +#if PORT_IS_CHECKED_BUILD + NvU32 versionNumber; +#endif +}; + +struct NonIntrusiveMap +{ + MapBase base; + PORT_MEM_ALLOCATOR *pAllocator; + NvU32 valueSize; +}; + +struct IntrusiveMap +{ + MapBase base; +}; + +#define mapInit(pMap, pAllocator) \ + mapInit_IMPL(&((pMap)->real), pAllocator, sizeof(*(pMap)->valueSize)) + +#define mapInitIntrusive(pMap) \ + mapInitIntrusive_IMPL(&((pMap)->real), sizeof(*(pMap)->nodeOffset)) + +#define mapDestroy(pMap) \ + CONT_DISPATCH_ON_KIND(pMap, \ + mapDestroy_IMPL((NonIntrusiveMap*)&((pMap)->real)), \ + mapDestroyIntrusive_IMPL(&((pMap)->real.base)), \ + contDispatchVoid_STUB()) + +#define mapCount(pMap) \ + mapCount_IMPL(&((pMap)->real).base) + +#define mapKey(pMap, pValue) \ + mapKey_IMPL(&((pMap)->real).base, pValue) + +#define mapInsertNew(pMap, key) \ + CONT_CAST_ELEM(pMap, mapInsertNew_IMPL(&(pMap)->real, key), mapIsValid_IMPL) + +#define mapInsertValue(pMap, key, pValue) \ + CONT_CAST_ELEM(pMap, \ + mapInsertValue_IMPL(&(pMap)->real, key, \ + CONT_CHECK_ARG(pMap, pValue)), mapIsValid_IMPL) + +#define mapInsertExisting(pMap, key, pValue) \ + mapInsertExisting_IMPL(&(pMap)->real, key, \ + CONT_CHECK_ARG(pMap, pValue)) + +#define mapRemove(pMap, pValue) \ + CONT_DISPATCH_ON_KIND(pMap, \ + mapRemove_IMPL((NonIntrusiveMap*)&((pMap)->real), \ + CONT_CHECK_ARG(pMap, pValue)), \ + mapRemoveIntrusive_IMPL(&((pMap)->real).base, \ + CONT_CHECK_ARG(pMap, pValue)), \ + contDispatchVoid_STUB()) + +#define mapClear(pMap) \ + mapDestroy(pMap) + +#define mapRemoveByKey(pMap, key) \ + CONT_DISPATCH_ON_KIND(pMap, \ + mapRemoveByKey_IMPL((NonIntrusiveMap*)&((pMap)->real), key), \ + mapRemoveByKeyIntrusive_IMPL(&((pMap)->real).base, key), \ + contDispatchVoid_STUB()) + +#define mapFind(pMap, key) \ + CONT_CAST_ELEM(pMap, mapFind_IMPL(&((pMap)->real).base, key), mapIsValid_IMPL) + +#define mapFindGEQ(pMap, keyMin) \ + CONT_CAST_ELEM(pMap, \ + mapFindGEQ_IMPL(&((pMap)->real).base, keyMin), mapIsValid_IMPL) + +#define mapFindLEQ(pMap, keyMax) \ + CONT_CAST_ELEM(pMap, \ + mapFindLEQ_IMPL(&((pMap)->real).base, keyMax), mapIsValid_IMPL) + +#define mapNext(pMap, pValue) \ + CONT_CAST_ELEM(pMap, \ + mapNext_IMPL(&((pMap)->real).base, \ + CONT_CHECK_ARG(pMap, pValue)), mapIsValid_IMPL) + +#define mapPrev(pMap, pValue) \ + CONT_CAST_ELEM(pMap, \ + mapPrev_IMPL(&((pMap)->real).base, \ + CONT_CHECK_ARG(pMap, pValue)), mapIsValid_IMPL) + +#define mapIterAll(pMap) \ + mapIterRange(pMap, mapFindGEQ(pMap, 0), mapFindLEQ(pMap, NV_U64_MAX)) + +#define mapIterRange(pMap, pFirst, pLast) \ + CONT_ITER_RANGE(pMap, &mapIterRange_IMPL, \ + CONT_CHECK_ARG(pMap, pFirst), CONT_CHECK_ARG(pMap, pLast), mapIsValid_IMPL) + +#define mapIterNext(pIt) \ + mapIterNext_IMPL(&((pIt)->iter)) + +void mapInit_IMPL(NonIntrusiveMap *pMap, + PORT_MEM_ALLOCATOR *pAllocator, NvU32 valueSize); +void mapInitIntrusive_IMPL(IntrusiveMap *pMap, NvS32 nodeOffset); +void mapDestroy_IMPL(NonIntrusiveMap *pMap); +void mapDestroyIntrusive_IMPL(MapBase *pMap); + +NvU32 mapCount_IMPL(MapBase *pMap); +NvU64 mapKey_IMPL(MapBase *pMap, void *pValue); + +void *mapInsertNew_IMPL(NonIntrusiveMap *pMap, NvU64 key); +void *mapInsertValue_IMPL(NonIntrusiveMap *pMap, NvU64 key, const void *pValue); +NvBool mapInsertExisting_IMPL(IntrusiveMap *pMap, NvU64 key, void *pValue); +void mapRemove_IMPL(NonIntrusiveMap *pMap, void *pValue); +void mapRemoveIntrusive_IMPL(MapBase *pMap, void *pValue); +void mapRemoveByKey_IMPL(NonIntrusiveMap *pMap, NvU64 key); +void mapRemoveByKeyIntrusive_IMPL(MapBase *pMap, NvU64 key); + +void *mapFind_IMPL(MapBase *pMap, NvU64 key); +void *mapFindGEQ_IMPL(MapBase *pMap, NvU64 keyMin); +void *mapFindLEQ_IMPL(MapBase *pMap, NvU64 keyMax); +void *mapNext_IMPL(MapBase *pMap, void *pValue); +void *mapPrev_IMPL(MapBase *pMap, void *pValue); + +MapIterBase mapIterAll_IMPL(MapBase *pMap); +NvBool mapIterNext_IMPL(MapIterBase *pIt); + +static NV_FORCEINLINE MapNode * +mapValueToNode(MapBase *pMap, void *pValue) +{ + if (NULL == pMap) return NULL; + if (NULL == pValue) return NULL; + return (MapNode*)((NvU8*)pValue + pMap->nodeOffset); +} + +static NV_FORCEINLINE void * +mapNodeToValue(MapBase *pMap, MapNode *pNode) +{ + if (NULL == pMap) return NULL; + if (NULL == pNode) return NULL; + return (NvU8*)pNode - pMap->nodeOffset; +} + +NvBool mapIsValid_IMPL(void *pMap); + +#ifdef __cplusplus +} +#endif + +#endif // _NV_CONTAINERS_MAP_H_ diff --git a/src/nvidia/inc/libraries/containers/multimap.h b/src/nvidia/inc/libraries/containers/multimap.h new file mode 100644 index 0000000..c6c961c --- /dev/null +++ b/src/nvidia/inc/libraries/containers/multimap.h @@ -0,0 +1,301 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_CONTAINERS_MULTIMAP_H_ +#define _NV_CONTAINERS_MULTIMAP_H_ + +// Contains mix of C/C++ declarations. +#include "containers/type_safety.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "containers/map.h" + +/** + * @defgroup NV_CONTAINERS_MULTIMAP Multimap + * + * @brief Two-layer multimap (ordered) from pairs of 64-bit unsigned integer + * keys to user-defined values. + * + * @details The provided interface is abstract, decoupling the user from the + * underlying ordered multimap implementation. Currently, memory management is + * limited to non-intrusive container-managed memory. The following + * implementation constraints are guaranteed. + * + * - Time Complexity: + * * Operations are \b O(log M + log N), + * * Unless stated otherwise, + * * Where M is the number of submaps and N is the total number of values in + * the map. + * + * - Memory Usage: + * * \b O(M + N) memory is required for M submaps and N values. + * * Only a non-intrusive variant is provided. + * See @ref mem-ownership for further details. + * + * - Synchronization: + * * \b None. The container is not thread-safe. + * * Locking must be handled by the user if required. + * + */ + +#define MAKE_MULTIMAP(multimapTypeName, dataType) \ + typedef struct multimapTypeName##Leaf \ + { \ + dataType data; \ + MultimapNode node; \ + } multimapTypeName##Leaf; \ + MAKE_INTRUSIVE_MAP(multimapTypeName##Submap, multimapTypeName##Leaf, \ + node.submapNode); \ + MAKE_MAP(multimapTypeName##Supermap, multimapTypeName##Submap); \ + typedef union multimapTypeName##Iter \ + { \ + dataType *pValue; \ + MultimapIterBase iter; \ + } multimapTypeName##Iter; \ + typedef union multimapTypeName \ + { \ + CONT_TAG_TYPE(MultimapBase, dataType, multimapTypeName##Iter); \ + struct { MultimapBase base; } real; \ + struct \ + { \ + /* This field simply aligns map with the one in MultimapBase */ \ + CONT_VTABLE_FIELD(MultimapBase); \ + multimapTypeName##Supermap map; \ + } type; \ + CONT_TAG_NON_INTRUSIVE(dataType); \ + struct {char _[NV_OFFSETOF(multimapTypeName##Leaf, node)];} *nodeOffset; \ + struct {char _[sizeof(multimapTypeName##Submap)];} *submapSize; \ + } multimapTypeName; + +#define DECLARE_MULTIMAP(multimapTypeName) \ + typedef struct multimapTypeName##Leaf multimapTypeName##Leaf; \ + DECLARE_INTRUSIVE_MAP(multimapTypeName##Submap); \ + DECLARE_MAP(multimapTypeName##Supermap); \ + typedef union multimapTypeName##Iter multimapTypeName##Iter; \ + typedef union multimapTypeName multimapTypeName + +/** + * @brief Internal node structure associated with multimap values. + */ +typedef struct MultimapNode MultimapNode; + +/** + * @brief Base type common to all multimap iterator types. + */ +typedef struct MultimapIterBase MultimapIterBase; + +/** + * @brief Base type common to all multimap types. + */ +typedef struct MultimapBase MultimapBase; + +struct MultimapNode +{ + void *pSubmap; + MapNode submapNode; +}; + +struct MultimapIterBase +{ + void *pValue; + MultimapBase *pMultimap; + void *pNext; + void *pLast; +}; + +CONT_VTABLE_DECL(MultimapBase, MultimapIterBase); + +struct MultimapBase +{ + CONT_VTABLE_FIELD(MultimapBase); + NonIntrusiveMap map; + NvS32 multimapNodeOffset; + NvU32 itemCount; + NvU32 itemSize; +}; + + +#define multimapInit(pMultimap, pAllocator) \ + multimapInit_IMPL(&(pMultimap)->real.base, pAllocator, \ + sizeof(*(pMultimap)->valueSize), \ + sizeof(*(pMultimap)->nodeOffset), \ + sizeof(*(pMultimap)->submapSize)) + +#define multimapDestroy(pMultimap) \ + multimapDestroy_IMPL(&(pMultimap)->real.base) + +#define multimapClear(pMultimap) \ + multimapClear_IMPL(&(pMultimap)->real.base) + +#define multimapCountSubmaps(pMultimap) \ + mapCount(&(pMultimap)->type.map) + +#define multimapCountItems(pMultimap) \ + (pMultimap)->real.base.itemCount + +#define multimapFindSubmap(pMultimap, submapKey) \ + CONT_CAST_ELEM(&(pMultimap)->type.map, \ + multimapFindSubmap_IMPL(&(pMultimap)->real.base, submapKey), multimapIsValid_IMPL) + +#define multimapFindSubmapLEQ(pMultimap, submapKey) \ + CONT_CAST_ELEM(&(pMultimap)->type.map, \ + multimapFindSubmapLEQ_IMPL(&(pMultimap)->real.base, submapKey), multimapIsValid_IMPL) + +#define multimapFindSubmapGEQ(pMultimap, submapKey) \ + CONT_CAST_ELEM(&(pMultimap)->type.map, \ + multimapFindSubmapGEQ_IMPL(&(pMultimap)->real.base, submapKey), multimapIsValid_IMPL) + +#define multimapCountSubmapItems(pMultimap, pSubmap) \ + mapCount(pSubmap) + +#define multimapInsertItemNew(pMultimap, submapKey, itemKey) \ + CONT_CAST_ELEM(pMultimap, \ + multimapInsertItemNew_IMPL(&(pMultimap)->real.base, submapKey, itemKey), multimapIsValid_IMPL) + +#define multimapInsertItemValue(pMultimap, submapKey, itemKey, pValue) \ + CONT_CAST_ELEM(pMultimap, \ + multimapInsertItemValue_IMPL(&(pMultimap)->real.base, \ + submapKey, itemKey, pValue), multimapIsValid_IMPL) + +#define multimapInsertSubmap(pMultimap, submapKey) \ + CONT_CAST_ELEM(&(pMultimap)->type.map, \ + multimapInsertSubmap_IMPL(&(pMultimap)->real.base, submapKey), multimapIsValid_IMPL) + +#define multimapFindItem(pMultimap, submapKey, itemKey) \ + CONT_CAST_ELEM(pMultimap, \ + multimapFindItem_IMPL(&(pMultimap)->real.base, submapKey, itemKey), multimapIsValid_IMPL) + +#define multimapRemoveItem(pMultimap, pValue) \ + multimapRemoveItem_IMPL(&(pMultimap)->real.base, pValue) + +#define multimapRemoveSubmap(pMultimap, pSubmap) \ + multimapRemoveSubmap_IMPL(&(pMultimap)->real.base, &(pSubmap)->real.base) + +#define multimapRemoveItemByKey(pMultimap, submapKey, itemKey) \ + multimapRemoveItemByKey_IMPL(&(pMultimap)->real.base, submapKey, itemKey) + +#define multimapNextItem(pMultimap, pValue) \ + CONT_CAST_ELEM(pMultimap, \ + multimapNextItem_IMPL(&(pMultimap)->real.base, pValue), multimapIsValid_IMPL) + +#define multimapPrevItem(pMultimap, pValue) \ + CONT_CAST_ELEM(pMultimap, \ + multimapPrevItem_IMPL(&(pMultimap)->real.base, pValue), multimapIsValid_IMPL) + +#define multimapFirstItem(pMultimap) \ + CONT_CAST_ELEM(pMultimap, multimapFirstItem_IMPL(&(pMultimap)->real.base), multimapIsValid_IMPL) + +#define multimapLastItem(pMultimap) \ + CONT_CAST_ELEM(pMultimap, multimapLastItem_IMPL(&(pMultimap)->real.base), multimapIsValid_IMPL) + +#define multimapItemIterAll(pMultimap) \ + multimapItemIterRange(pMultimap, \ + multimapFirstItem(pMultimap), multimapLastItem(pMultimap)) + +#define multimapItemIterRange(pMultimap, pFirst, pLast) \ + CONT_ITER_RANGE(pMultimap, multimapItemIterRange_IMPL, \ + CONT_CHECK_ARG(pMultimap, pFirst), CONT_CHECK_ARG(pMultimap, pLast), multimapIsValid_IMPL) + +#define multimapSubmapIterItems(pMultimap, pSubmap) \ + multimapItemIterRange(pMultimap, \ + &mapFindGEQ(pSubmap, 0)->data, &mapFindLEQ(pSubmap, NV_U64_MAX)->data) + +#define multimapItemIterNext(pIt) \ + multimapItemIterNext_IMPL(&(pIt)->iter) + +#define multimapSubmapIterAll(pMultimap) \ + mapIterAll(&(pMultimap)->type.map) + +#define multimapSubmapIterRange(pMultimap, pFirst, pLast) \ + mapIterRange(&(pMultimap)->type.map, pFirst, pLast) + +#define multimapSubmapIterNext(pIt) \ + mapIterNext(pIt) + +#define multimapItemKey(pMultimap, pValue) \ + multimapValueToNode(&(pMultimap)->real.base, pValue)->submapNode.key + +#define multimapSubmapKey(pMultimap, pSubmap) \ + mapKey(&(pMultimap)->type.map, pSubmap) + +void multimapInit_IMPL(MultimapBase *pBase, PORT_MEM_ALLOCATOR *pAllocator, + NvU32 valueSize, NvS32 nodeOffset, NvU32 submapSize); +void multimapRemoveSubmap_IMPL(MultimapBase *pMultimap, MapBase *submap); +void multimapDestroy_IMPL(MultimapBase *pBase); +void multimapClear_IMPL(MultimapBase *pBase); + +void *multimapInsertSubmap_IMPL(MultimapBase *pBase, NvU64 submapKey); + +void *multimapFindSubmap_IMPL(MultimapBase *pBase, NvU64 submapKey); +void *multimapFindSubmapLEQ_IMPL(MultimapBase *pBase, NvU64 submapKey); +void *multimapFindSubmapGEQ_IMPL(MultimapBase *pBase, NvU64 submapKey); + +void *multimapInsertItemNew_IMPL(MultimapBase *pBase, NvU64 submapKey, + NvU64 itemKey); +void *multimapInsertItemValue_IMPL(MultimapBase *pBase, + NvU64 submapKey, + NvU64 itemKey, + const void *pValue); + +void *multimapFindItem_IMPL(MultimapBase *pBase, NvU64 submapKey, + NvU64 itemKey); + +void multimapRemoveItem_IMPL(MultimapBase *pBase, void *pLeaf); +void multimapRemoveItemByKey_IMPL(MultimapBase *pBase, NvU64 submapKey, + NvU64 itemKey); + +void *multimapNextItem_IMPL(MultimapBase *pBase, void *pValue); +void *multimapPrevItem_IMPL(MultimapBase *pBase, void *pValue); + +void *multimapFirstItem_IMPL(MultimapBase *pBase); +void *multimapLastItem_IMPL(MultimapBase *pBase); + +MultimapIterBase multimapItemIterRange_IMPL(MultimapBase *pBase, + void *pFirst, void *pLast); +NvBool multimapItemIterNext_IMPL(MultimapIterBase *pIt); + +static NV_FORCEINLINE MultimapNode * +multimapValueToNode(MultimapBase *pBase, void *pValue) +{ + if (NULL == pBase || NULL == pValue) return NULL; + + return (MultimapNode *)((NvU8*)pValue + pBase->multimapNodeOffset); +} +static NV_FORCEINLINE void * +multimapNodeToValue(MultimapBase *pBase, MultimapNode *pNode) +{ + if (NULL == pBase || NULL == pNode) return NULL; + + return (NvU8*)pNode - pBase->multimapNodeOffset; +} + +NvBool multimapIsValid_IMPL(void *pMap); + + +#ifdef __cplusplus +} +#endif + +#endif // _NV_CONTAINERS_MULTIMAP_H_ diff --git a/src/nvidia/inc/libraries/containers/queue.h b/src/nvidia/inc/libraries/containers/queue.h new file mode 100644 index 0000000..bd23409 --- /dev/null +++ b/src/nvidia/inc/libraries/containers/queue.h @@ -0,0 +1,146 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef NV_CONTAINERS_QUEUE_H +#define NV_CONTAINERS_QUEUE_H + +#include "containers/type_safety.h" +#include "nvtypes.h" +#include "nvmisc.h" +#include "nvport/nvport.h" +#include "utils/nvassert.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define MAKE_QUEUE_CIRCULAR(queueTypeName, dataType) \ + typedef struct queueTypeName##Iter_UNUSED \ + { \ + NvLength dummyElem; \ + } queueTypeName##Iter_UNUSED; \ + typedef union queueTypeName \ + { \ + Queue real; \ + CONT_TAG_TYPE(Queue, dataType, queueTypeName##Iter_UNUSED); \ + CONT_TAG_NON_INTRUSIVE(dataType); \ + } queueTypeName + +#define DECLARE_QUEUE_CIRCULAR(queueTypeName) \ + typedef struct queueTypeName##Iter_UNUSED queueTypeName##Iter_UNUSED; \ + typedef union queueTypeName queueTypeName + +struct Queue; +struct QueueContext; + +typedef void QueueCopyData(NvLength msgSize, NvLength opIdx, + struct QueueContext *pCtx, void *pClientData, + NvLength count, NvBool bCopyIn); + +typedef struct QueueContext { + QueueCopyData *pCopyData; // Function performing accesses to queue memory. + void *pData; // Private data. +} QueueContext; + +typedef struct Queue { + NvLength capacity; // Queue Capacity + PORT_MEM_ALLOCATOR *pAllocator; // Set of functions used for managing queue memory + void *pData; // Queue memory, if managed by pAllocator + NvLength msgSize; // Message size produced by Producer + NvLength getIdx NV_ALIGN_BYTES(64);// GET index modified by Consumer + NvLength putIdx NV_ALIGN_BYTES(64);// PUT index modified by Producer +} Queue; + +//for future use (more possible queues - just an example, currently only CIRCULAR will get implemented) +typedef enum +{ + QUEUE_TYPE_CIRCULAR = 1, + //QUEUE_TYPE_LINEAR = 2, + //QUEUE_TYPE_PRIORITY = 3, +}QUEUE_TYPE; + +#define queueInit(pQueue, pAllocator, capacity) \ + circularQueueInit_IMPL(&((pQueue)->real), pAllocator, \ + capacity, sizeof(*(pQueue)->valueSize)) + +#define queueInitNonManaged(pQueue, capacity) \ + circularQueueInitNonManaged_IMPL(&((pQueue)->real), \ + capacity, sizeof(*(pQueue)->valueSize)) + +#define queueDestroy(pQueue) \ + circularQueueDestroy_IMPL(&((pQueue)->real)) + +#define queueCount(pQueue) \ + circularQueueCount_IMPL(&((pQueue)->real)) + +#define queueCapacity(pQueue) \ + circularQueueCapacity_IMPL(&((pQueue)->real)) + +#define queueIsEmpty(pQueue) \ + circularQueueIsEmpty_IMPL(&((pQueue)->real)) + +#define queuePush(pQueue, pElements, numElements) \ + circularQueuePush_IMPL(&(pQueue)->real, \ + CONT_CHECK_ARG(pQueue, pElements), numElements) + +#define queuePushNonManaged(pQueue, pCtx, pElements, numElements) \ + circularQueuePushNonManaged_IMPL(&(pQueue)->real, pCtx, \ + CONT_CHECK_ARG(pQueue, pElements), numElements) + +#define queuePeek(pQueue) \ + CONT_CAST_ELEM(pQueue, circularQueuePeek_IMPL(&((pQueue)->real)), circularQueueIsValid_IMPL) + +#define queuePop(pQueue) \ + circularQueuePop_IMPL(&((pQueue)->real)) + +#define queuePopAndCopy(pQueue, pCopyTo) \ + circularQueuePopAndCopy_IMPL(&((pQueue)->real), \ + CONT_CHECK_ARG(pQueue, pCopyTo)) + +#define queuePopAndCopyNonManaged(pQueue, pCtx, pCopyTo) \ + circularQueuePopAndCopyNonManaged_IMPL(&((pQueue)->real), pCtx, \ + CONT_CHECK_ARG(pQueue, pCopyTo)) + +NV_STATUS circularQueueInit_IMPL(Queue *pQueue, PORT_MEM_ALLOCATOR *pAllocator, + NvLength capacity, NvLength msgSize); +NV_STATUS circularQueueInitNonManaged_IMPL(Queue *pQueue, NvLength capacity, + NvLength msgSize); +void circularQueueDestroy_IMPL(Queue *pQueue); +NvLength circularQueueCapacity_IMPL(Queue *pQueue); +NvLength circularQueueCount_IMPL(Queue *pQueue); +NvBool circularQueueIsEmpty_IMPL(Queue *pQueue); +NvLength circularQueuePush_IMPL(Queue *pQueue, void* pElements, NvLength numElements); +NvLength circularQueuePushNonManaged_IMPL(Queue *pQueue, QueueContext *pCtx, + void* pElements, NvLength numElements); +void* circularQueuePeek_IMPL(Queue *pQueue); +void circularQueuePop_IMPL(Queue *pQueue); +NvBool circularQueuePopAndCopy_IMPL(Queue *pQueue, void *pCopyTo); +NvBool circularQueuePopAndCopyNonManaged_IMPL(Queue *pQueue, QueueContext *pCtx, + void *pCopyTo); + +NvBool circularQueueIsValid_IMPL(void *pQueue); + +#ifdef __cplusplus +} +#endif + +#endif // NV_CONTAINERS_QUEUE_H diff --git a/src/nvidia/inc/libraries/containers/ringbuf.h b/src/nvidia/inc/libraries/containers/ringbuf.h new file mode 100644 index 0000000..f298370 --- /dev/null +++ b/src/nvidia/inc/libraries/containers/ringbuf.h @@ -0,0 +1,169 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef NV_CONTAINERS_RINGBUF_H +#define NV_CONTAINERS_RINGBUF_H + +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvmisc.h" +#include "nvctassert.h" +#include "utils/nvassert.h" +#include "nvport/nvport.h" +#include "containers/type_safety.h" + +typedef struct RingBufBase +{ + NvU64 head; + NvU64 tail; + NvU64 logSz; + NvU8 *arr; +} RingBufBase; + +#define RINGBUF_ARRAY_SIZE(pBuf) (1llu << ((pBuf)->logSz)) + +#define RINGBUF_ARRAY_MASK(pBuf) (RINGBUF_ARRAY_SIZE(pBuf) - 1llu) + +#define MAKE_RINGBUF(containedType, outputType, logSz) \ + typedef struct outputType { \ + union { \ + RingBufBase base; \ + PORT_MEM_ALLOCATOR *pAllocator; \ + }; \ + union { \ + containedType elem[1llu << (logSz)]; \ + NvU8 lgSz[((logSz) < 64) ? (logSz) : -1]; /* Ternary to ensure we get CT error when logSz >= 64 */ \ + }; \ + } outputType + +#define MAKE_RINGBUF_DYNAMIC(containedType, outputType) \ + typedef union outputType { \ + struct { \ + RingBufBase base; \ + PORT_MEM_ALLOCATOR *pAllocator; \ + }; \ + containedType *elem; \ + } outputType + +#if NV_TYPEOF_SUPPORTED + +#define FOR_EACH_IN_RINGBUF(pBuf, idx, ptr) \ + { \ + (idx) = (pBuf)->base.tail; \ + while((idx) != (pBuf)->base.head) \ + { \ + (ptr) = (typeof(&((pBuf)->elem)[0])) &(pBuf)->base.arr[((idx) & RINGBUF_ARRAY_MASK(&((pBuf)->base))) * \ + sizeof(*((pBuf)->elem))]; \ + (idx)++; + +#else + +#define FOR_EACH_IN_RINGBUF(pBuf, idx, ptr) \ + { \ + (idx) = (pBuf)->base.tail; \ + while((idx) != (pBuf)->base.head) \ + { \ + (ptr) = (void *)&(pBuf)->base.arr[((idx) & RINGBUF_ARRAY_MASK(&((pBuf)->base))) * \ + sizeof(*((pBuf)->elem))]; \ + (idx)++; + +#endif // NV_TYPEOF_SUPPORTED + +#define FOR_EACH_END_RINGBUF() \ + } \ + } + + +#define ringbufConstructDynamic(pBuf, logSz, pAlloc) \ + ((pBuf)->pAllocator = pAlloc, ringbufConstructDynamic_IMPL(&((pBuf)->base), logSz, sizeof(*((pBuf)->elem)), pAlloc)) + +#define ringbufConstruct(pBuf) \ + ringbufConstruct_IMPL(&((pBuf)->base), sizeof((pBuf)->lgSz), (void*)((pBuf)->elem)) + +#define ringbufDestruct(pBuf) \ + ringbufDestruct_IMPL(&((pBuf)->base), ((void*)&((pBuf)->pAllocator)) == ((void*)&((pBuf)->base)) ? NULL : ((pBuf)->pAllocator) ) + +#if NV_TYPEOF_SUPPORTED + +#define ringbufPopN(pBuf, pMax) \ + (typeof(&((pBuf)->elem)[0])) ringbufPopN_IMPL(&((pBuf)->base), sizeof(*((pBuf)->elem)), pMax) + +#define ringbufPeekN(pBuf, pMax) \ + (typeof(&((pBuf)->elem)[0])) ringbufPeekN_IMPL(&((pBuf)->base), sizeof(*((pBuf)->elem)), pMax) + +#define ringbufAppendN(pBuf, pEles, num, bOver) \ + ringbufAppendN_IMPL(&((pBuf)->base), sizeof(*((pBuf)->elem)), (NvU8*)pEles, num, bOver) + +#define ringbufPop(pBuf) \ + (typeof(&((pBuf)->elem)[0])) ringbufPop_IMPL(&((pBuf)->base), sizeof(*((pBuf)->elem))) + +#define ringbufPeek(pBuf) \ + (typeof(&((pBuf)->elem)[0])) ringbufPeek_IMPL(&((pBuf)->base), sizeof(*((pBuf)->elem))) + +#else + +#define ringbufPopN(pBuf, pMax) \ + (void *)ringbufPopN_IMPL(&((pBuf)->base), sizeof(*((pBuf)->elem)), pMax) + +#define ringbufPeekN(pBuf, pMax) \ + (void *)ringbufPeekN_IMPL(&((pBuf)->base), sizeof(*((pBuf)->elem)), pMax) + +#define ringbufAppendN(pBuf, pEles, num, bOver) \ + ringbufAppendN_IMPL(&((pBuf)->base), sizeof(*((pBuf)->elem)), (NvU8*)pEles, num, bOver) + +#define ringbufPop(pBuf) \ + (void *)ringbufPop_IMPL(&((pBuf)->base), sizeof(*((pBuf)->elem))) + +#define ringbufPeek(pBuf) \ + (void *)ringbufPeek_IMPL(&((pBuf)->base), sizeof(*((pBuf)->elem))) + +#endif // NV_TYPEOF_SUPPORTED + +#define ringbufAppend(pBuf, pEle, bOver) \ + ringbufAppend_IMPL(&((pBuf)->base), sizeof(*((pBuf)->elem)), (NvU8*)pEle, bOver) + +#define ringbufCurrentSize(pBuf) \ + ringbufCurrentSize_IMPL(&((pBuf)->base)) + +NvBool ringbufConstructDynamic_IMPL(RingBufBase *pBase, NvU64 logSz, NvU64 eleSz, PORT_MEM_ALLOCATOR *pAlloc); +NvBool ringbufConstruct_IMPL(RingBufBase *pBase, NvU64 logSz, void *arr); +void ringbufDestruct_IMPL(RingBufBase *pBase, void *alloc); +void *ringbufPopN_IMPL(RingBufBase *pBase, NvU64 eleSz, NvU64 *pMax); +void *ringbufPeekN_IMPL(RingBufBase *pBase, NvU64 eleSz, NvU64 *pMax); +NvBool ringbufAppendN_IMPL(RingBufBase *pBase, NvU64 eleSz, NvU8 *pEle, NvU64 num, NvBool bOverwrite); +NvU64 ringbufCurrentSize_IMPL(RingBufBase *pBase); + +static inline void *ringbufPeek_IMPL(RingBufBase *pBase, NvU64 eleSz) +{ + NvU64 max = 1; + return ringbufPeekN_IMPL(pBase, eleSz, &max); +} +static inline void *ringbufPop_IMPL(RingBufBase *pBase, NvU64 eleSz) +{ + NvU64 max = 1; + return ringbufPopN_IMPL(pBase, eleSz, &max); +} +static inline NvBool ringbufAppend_IMPL(RingBufBase *pBase, NvU64 eleSz, NvU8 *pEle, NvBool bOverwrite) +{ + return ringbufAppendN_IMPL(pBase, eleSz, pEle, 1, bOverwrite); +} +#endif diff --git a/src/nvidia/inc/libraries/containers/type_safety.h b/src/nvidia/inc/libraries/containers/type_safety.h new file mode 100644 index 0000000..90a9afa --- /dev/null +++ b/src/nvidia/inc/libraries/containers/type_safety.h @@ -0,0 +1,283 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_CONTAINERS_TYPE_SAFETY_H_ +#define _NV_CONTAINERS_TYPE_SAFETY_H_ + +#include "nvtypes.h" +#include "nvport/nvport.h" + +// Check for typeof support. For now restricting to GNUC compilers. +#if defined(__GNUC__) +#define NV_TYPEOF_SUPPORTED 1 +#else +#define NV_TYPEOF_SUPPORTED 0 +#endif + +// +// Validate whether a container iterator is valid. +// In checked builds, a container and its iterator get assigned a versionNumber. pIter gets version +// number once at the time of construction. pCont's versionNumber is initially zero and changes +// when pCont is modified. Any container modification invalidates all iterators. +// +#define CONT_ITER_IS_VALID(pCont, pIter) ((pCont)->versionNumber == (pIter)->versionNumber) + +/** + * Tag a non-intrusive container union with the following info: + * valueSize : size of its element type for non-intrusive malloc + * kind : non-intrusive kind ID for static dispatch + */ +#define CONT_TAG_NON_INTRUSIVE(elemType) \ + struct {char _[sizeof(elemType)];} *valueSize; \ + struct {char _[CONT_KIND_NON_INTRUSIVE];} *kind + +/** + * Tag an intrusive container union with the following info: + * nodeOffset : offset of the data structure node within element type + * kind : intrusive kind ID for static dispatch + */ +// FIXME: Do not use this for any structure members with offset 0! +// The size of a 0 length array is undefined according to the C99 standard +// and we've seen non-zero values of sizeof(*nodeOffset) appear at runtime +// leading to corruption. Filed Bug 2858103 to track work against this. +#define CONT_TAG_INTRUSIVE(elemType, node) \ + struct {char _[NV_OFFSETOF(elemType, node)];} *nodeOffset; \ + struct {char _[CONT_KIND_INTRUSIVE];} *kind + + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Utility identity function for several type-safety mechanisms. + */ +static NV_FORCEINLINE void *contId(void *pValue) +{ + return pValue; +} + +#ifdef __cplusplus +} +#endif + +/** + * @def CONT_TAG_ELEM_TYPE + * Tag a container union with element type info. + */ + +/** + * @def CONT_CHECK_ARG + * Check that a value has a container's element type. + */ + +/** + * @def CONT_CAST_ELEM + * Cast a void pointer to a container's element type. + */ + +// With C++ we can use typedef and templates for 100% type safety. +#if defined(__cplusplus) && !defined(NV_CONTAINERS_NO_TEMPLATES) + +#define CONT_TAG_TYPE(contType, elemType, iterType) \ + CONT_VTABLE_TAG(contType, elemType, iterType); \ + typedef contType ContType; \ + typedef elemType ElemType; \ + typedef iterType IterType + +template +typename T::ElemType *CONT_CHECK_ARG(T *pCont, typename T::ElemType *pValue) +{ + return pValue; +} + +template +typename T::ElemType *CONT_CAST_ELEM(T *pCont, void *pValue, ...) +{ + return (typename T::ElemType *)pValue; +} + +template +typename T::IterType CONT_ITER_RANGE +( + T *pCont, + It (*pFunc)(typename T::ContType *, void *, void *), + void *pFirst, + void *pLast, + ... +) +{ + typename T::IterType temp; + temp.iter = pFunc(&pCont->real.base, pFirst, pLast); + return temp; +} + +template +typename T::IterType CONT_ITER_RANGE_INDEX +( + T *pCont, + It (*pFunc)(typename T::ContType *, NvU64, NvU64), + NvU64 first, + NvU64 last, + ... +) +{ + typename T::IterType temp; + temp.iter = pFunc(&pCont->real.base, first, last); + return temp; +} + +// Without C++ we need more creativity. :) +#else + +// Element tag is a pointer to the element type (no mem overhead in union). +#define CONT_TAG_TYPE(contType, elemType, iterType) \ + CONT_VTABLE_TAG(contType, elemType, iterType); \ + elemType *elem; \ + iterType *iter + +// +// sizeof forces this expression to be evaluated at compile-time only. +// +// The ternary will always evalute true and passthrough the original pValue. +// However, the pointer comparison will generate a compile time warning if +// pValue type differs from the container's element type +// +#define CONT_CHECK_ARG(pCont, pValue) \ + (sizeof((pCont)->elem == (pValue)) ? (pValue) : NULL) + +// +// Return checks are more problematic, but typeof is perfect when available. +// Without typeof we resort to a runtime vtable. +// +#if NV_TYPEOF_SUPPORTED + +#define CONT_CAST_ELEM(pCont, ret, validfunc) ((typeof((pCont)->elem))(ret)) + +// +// The dummy contId prevents compilers from warning about incompatible +// function casts. This is safe since we know the two return structures +// are identical (modulo alpha-conversion). +// +#define CONT_ITER_RANGE(pCont, pFunc, pFirst, pLast, validfunc) \ + (((typeof(*(pCont)->iter)(*)(void *, void *, void *))contId(pFunc))( \ + pCont, pFirst, pLast)) + +#define CONT_ITER_RANGE_INDEX(pCont, pFunc, first, last, validfunc) \ + (((typeof(*(pCont)->iter)(*)(void *, NvU64, NvU64))contId(pFunc))( \ + pCont, first, last)) + +#else + +// Actual implementations +#define CONT_CAST_ELEM2(pCont, ret) ((pCont)->vtable->checkRet(ret)) + +#define CONT_ITER_RANGE2(pCont, pFunc, pFirst, pLast) \ + ((pCont)->vtable->iterRange(&(pCont)->real.base, pFirst, pLast)) + +#define CONT_ITER_RANGE_RANGE2(pCont, pFunc, first, last) \ + ((pCont)->vtable->iterRangeIndex(&(pCont)->real.base, first, last)) + +// Calls validfunc() first to initialize vtable +#define CONT_CAST_ELEM(pCont, ret, validfunc) \ + (validfunc((pCont)) ? CONT_CAST_ELEM2(pCont, ret) : CONT_CAST_ELEM2(pCont, NULL)) + +#define CONT_ITER_RANGE(pCont, pFunc, pFirst, pLast, validfunc) \ + (validfunc((pCont)) ? CONT_ITER_RANGE2(pCont, pFunc, pFirst, pLast) : CONT_ITER_RANGE2(pCont, NULL, NULL, NULL)) + +#define CONT_ITER_RANGE_INDEX(pCont, pFunc, first, last, validfunc) \ + (validfunc((pCont)) ? CONT_ITER_RANGE_INDEX2(pCont, pFunc, first, last) : CONT_ITER_RANGE_INDEX2(pCont, NULL, 0, 0)) + +#endif + +#endif + +#if NV_TYPEOF_SUPPORTED + +#define CONT_VTABLE_DECL(contType, iterType) +#define CONT_VTABLE_DEFN(contType, contIterRange, contIterRangeIndex) +#define CONT_VTABLE_TAG(contType, elemType, iterType) +#define CONT_VTABLE_FIELD(contType) +#define CONT_VTABLE_INIT(contType, pCont) + +#else + +#define CONT_VTABLE_DECL(contType, iterType) \ + typedef struct \ + { \ + void *(*checkRet)(void *pValue); \ + iterType (*iterRange)(contType *pCont, void *pFirst, void *pLast); \ + iterType (*iterRangeIndex)(contType *pCont, NvU64 first, NvU64 last); \ + } contType##_VTABLE; \ + +#define CONT_VTABLE_DEFN(contType, contIterRange, contIterRangeIndex) \ + static const contType##_VTABLE g_##contType##_VTABLE = \ + { \ + contId, \ + contIterRange, \ + contIterRangeIndex, \ + } + +#define CONT_VTABLE_TAG(contType, elemType, iterType) \ + const struct \ + { \ + elemType *(*checkRet)(void *pValue); \ + iterType (*iterRange)(contType *pCont, void *pFirst, void *pLast); \ + iterType (*iterRangeIndex)(contType *pCont, NvU64 first, NvU64 last); \ + } *vtable + +#define CONT_VTABLE_FIELD(contType) const contType##_VTABLE *vtable + +#define CONT_VTABLE_INIT(contType, pCont) \ + ((pCont)->vtable = &g_##contType##_VTABLE) + +#define CONT_VTABLE_VALID(pCont) ((pCont)->vtable != NULL) + +#endif + +enum CONT_KIND +{ + CONT_KIND_NON_INTRUSIVE = 1, + CONT_KIND_INTRUSIVE = 2, +}; + +/** + * Static dispatch uses sizeof with dummy arrays to select a path. + * + * With optimizations enabled the unused paths should be trimmed, so this + * should have zero overhead in release builds. +*/ +#define CONT_DISPATCH_ON_KIND(pCont, ret1, ret2, ret3) \ + ((sizeof(*(pCont)->kind) == CONT_KIND_NON_INTRUSIVE) ? (ret1) : \ + (sizeof(*(pCont)->kind) == CONT_KIND_INTRUSIVE) ? (ret2) : \ + (ret3)) + +/** + * Utility stub useful for the above ret3 argument (unreachable path). + * Add stubs for different return types as needed. + */ +static NV_FORCEINLINE void contDispatchVoid_STUB(void) +{ + PORT_BREAKPOINT(); +} + +#endif // _NV_CONTAINERS_TYPE_SAFETY_H_ diff --git a/src/nvidia/inc/libraries/containers/vector.h b/src/nvidia/inc/libraries/containers/vector.h new file mode 100644 index 0000000..6554e5d --- /dev/null +++ b/src/nvidia/inc/libraries/containers/vector.h @@ -0,0 +1,203 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef NV_CONTAINERS_VECTOR_H +#define NV_CONTAINERS_VECTOR_H 1 + +#include "containers/type_safety.h" +#include "nvtypes.h" +#include "nvmisc.h" +#include "nvport/nvport.h" +#include "utils/nvassert.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup NV_CONTAINERS_VECTOR Vector + * + * @brief Sequence of user-defined values. + * + * @details Order of values is not necessarily increasing or sorted, but order is + * preserved across mutation. Please see + * https://en.wikipedia.org/wiki/Sequence for a formal definition. + * + * - Time Complexity: + * * Operations are \b O(1), + * * Unless stated otherwise. + * + * - Memory Usage: + * * \b O(N) memory is required for N values. + * * See @ref mem-ownership for further details. + * + * - Synchronization: + * * \b None. The container is not thread-safe. + * * Locking must be handled by the user if required. + * + */ + +#define MAKE_VECTOR(vectTypeName, dataType) \ + typedef union vectTypeName##Iter \ + { \ + dataType *pValue; \ + VectorIterBase iter; \ + } vectTypeName##Iter; \ + typedef union vectTypeName \ + { \ + VectorWrapper real; \ + CONT_TAG_TYPE(Vector, dataType, vectTypeName##Iter); \ + CONT_TAG_NON_INTRUSIVE(dataType); \ + } vectTypeName + +#define DECLARE_VECTOR(vectTypeName) \ + typedef union vectTypeName##Iter vectTypeName##Iter; \ + typedef union vectTypeName vectTypeName + +typedef struct Vector Vector; +typedef struct VectorIterBase VectorIterBase; +typedef struct VectorWrapper VectorWrapper; + +/** + * Note that the vector values are NvU32 and Iterator values are NvS32, + * so in case there is a need for a vector with over ~2 billion entries + * this might not work. + */ +struct VectorIterBase +{ + void *pValue; + Vector *pVector; + NvS32 nextIndex; + NvS32 prevIndex; + NvS32 firstIndex; + NvS32 lastIndex; + NvBool bForward; +#if PORT_IS_CHECKED_BUILD + NvU32 versionNumber; + NvBool bValid; +#endif +}; + +VectorIterBase vectIterRange_IMPL(Vector *pVector, void *pFirst, void *pLast); +CONT_VTABLE_DECL(Vector, VectorIterBase); + +struct Vector +{ + CONT_VTABLE_FIELD(Vector); + void *pHead; + PORT_MEM_ALLOCATOR *pAllocator; + NvU32 valueSize; + NvU32 capacity; + NvU32 size; +#if PORT_IS_CHECKED_BUILD + NvU32 versionNumber; +#endif +}; + +struct VectorWrapper +{ + Vector base; +}; + +#define vectInit(pVector, pAllocator, capacity) \ + vectInit_IMPL(&((pVector)->real.base), \ + pAllocator, \ + capacity, \ + sizeof(*(pVector)->valueSize)) +#define vectDestroy(pVector) vectDestroy_IMPL(&((pVector)->real.base)) +#define vectClear(pVector) vectClear_IMPL(&((pVector)->real.base)) +#define vectCount(pVector) vectCount_IMPL(&((pVector)->real.base)) +#define vectCapacity(pVector) vectCapacity_IMPL(&((pVector)->real.base)) +#define vectIsEmpty(pVector) vectIsEmpty_IMPL(&((pVector)->real.base)) +#define vectAt(pVector, index) \ + CONT_CAST_ELEM((pVector), \ + vectAt_IMPL(&((pVector)->real.base), index), \ + vectIsValid_IMPL) +#define vectInsert(pVector, index, pValue) \ + CONT_CAST_ELEM((pVector), \ + vectInsert_IMPL(&(pVector)->real.base, \ + index, \ + CONT_CHECK_ARG(pVector, pValue)), \ + vectIsValid_IMPL) +#define vectRemove(pVector, index) \ + vectRemove_IMPL(&((pVector)->real.base), index) +#define vectAppend(pVector, pValue) \ + CONT_CAST_ELEM((pVector), \ + vectAppend_IMPL(&(pVector)->real.base, \ + CONT_CHECK_ARG(pVector, pValue)), \ + vectIsValid_IMPL) +#define vectPrepend(pVector, pValue) \ + CONT_CAST_ELEM((pVector), \ + vectPrepend_IMPL(&(pVector)->real.base, \ + CONT_CHECK_ARG(pVector, pValue)), \ + vectIsValid_IMPL) +#define vectReserve(pVector, size) \ + vectReserve_IMPL(&((pVector)->real.base), size) +#define vectTrim(pVector, size) vectTrim_IMPL(&((pVector)->real.base), size) + +#define vectIterAll(pVector) \ + vectIterRangeIndex(pVector, 0, vectCount(pVector) - 1) +#define vectIterRangeIndex(pVector, firstIndex, lastIndex) \ + vectIterRange(pVector, \ + vectAt(pVector, firstIndex), \ + vectAt(pVector, lastIndex)) +#define vectIterRange(pVector, pFirst, pLast) \ + CONT_ITER_RANGE(pVector, \ + &vectIterRange_IMPL, \ + CONT_CHECK_ARG(pVector, pFirst), \ + CONT_CHECK_ARG(pVector, pLast), \ + vectIsValid_IMPL) +#define vectIterNext(pIterator) \ + vectIterNext_IMPL(&((pIterator)->iter), (void **)&(pIterator)->pValue) +#define vectIterPrev(pIterator) \ + vectIterPrev_IMPL(&((pIterator)->iter), (void **)&(pIterator)->pValue) + +NV_STATUS vectInit_IMPL(Vector *pVector, + PORT_MEM_ALLOCATOR *pAllocator, + NvU32 capacity, + NvU32 valueSize); +void vectDestroy_IMPL(Vector *pVector); +void vectClear_IMPL(Vector *pVector); +NvU32 vectCount_IMPL(Vector *pVector); +NvU32 vectCapacity_IMPL(Vector *pVector); +NvBool vectIsEmpty_IMPL(Vector *pVector); + +void *vectAt_IMPL(Vector *pVector, NvU32 index); +void *vectInsert_IMPL(Vector *pVector, NvU32 index, const void *pData); +void vectRemove_IMPL(Vector *pVector, NvU32 index); +void *vectAppend_IMPL(Vector *pVector, const void *pData); +void *vectPrepend_IMPL(Vector *pvector, const void *pData); + +NV_STATUS vectReserve_IMPL(Vector *pVector, NvU32 n); +NV_STATUS vectTrim_IMPL(Vector *pvector, NvU32 n); + +VectorIterBase vectIterRange_IMPL(Vector *pVector, void *pFirst, void *pLast); +NvBool vectIterNext_IMPL(VectorIterBase *pIter, void **ppValue); +NvBool vectIterPrev_IMPL(VectorIterBase *pIter, void **ppValue); + +NvBool vectIsValid_IMPL(void *pVect); + +#ifdef __cplusplus +} +#endif + +#endif // NV_CONTAINERS_VECTOR_H diff --git a/src/nvidia/inc/libraries/eventbufferproducer.h b/src/nvidia/inc/libraries/eventbufferproducer.h new file mode 100644 index 0000000..3a7412e --- /dev/null +++ b/src/nvidia/inc/libraries/eventbufferproducer.h @@ -0,0 +1,177 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_EVENT_BUFFER_PRODUCER_H_ +#define _NV_EVENT_BUFFER_PRODUCER_H_ +#include "nvtypes.h" +#include "class/cl90cd.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* +* RECORD_BUFFER_INFO +* This structure holds information for the record buffer, which is a circular +* buffer with fixed sized records +* +* pHeader +* This is a shared header between the producer and consumer. +* It holds the get put pointers and overflow counts. +* +* recordBuffAddr +* This is the producer mapping to the record buffer. +* +* recordSize +* Size of each record in bytes. +* +* totalRecordCount +* Total number of records that this buffer can hold. +* +* bufferSize +* Total size of record buffer in bytes. +* +* notificationThreshold +* This felid specifies the number of records that the buffer can +* still hold before it gets full. +* Consumer is notified when this threshold is met. +* +*/ +typedef struct +{ + NV_EVENT_BUFFER_HEADER* pHeader; + NvP64 recordBuffAddr; + NvU32 recordSize; + NvU32 totalRecordCount; + NvU32 bufferSize; + NvU32 notificationThreshold; +} RECORD_BUFFER_INFO; + +/* +* VARDATA_BUFFER_INFO: +* This structure holds information for the variable length data buffer, +* which is a circular buffer with variable sized data records +* +* vardataBuffAddr +* This is the producer mapping to the vardata buffer. +* +* bufferSize +* Total size of vardata buffer in bytes. +* +* notificationThreshold +* This felid specifies the number of records that the buffer can +* still hold before it gets full. +* Consumer is notified when this threshold is met. +* +* get\put +* These are the get and put offsets for vardata buffer. +* These are not shared with the consumer. +* +* remainingSize +* Size in bytes remaining in the vardata buffer. +*/ +typedef struct +{ + NvP64 vardataBuffAddr; + NvU32 bufferSize; + NvU32 notificationThreshold; + NvU32 get; + NvU32 put; + NvU32 remainingSize; +} VARDATA_BUFFER_INFO; + +/* +* EVENT_BUFFER_PRODUCER_INFO: +* +* recordBuffer +* Record buffer information +* +* vardataBuffer +* Vardata buffer information +* +* notificationHandle +* notification handle used to notify the consumer +* +* isEnabled +* Data is added to the event buffer only if this flag is set +* Controlled by Consumer. +* +* isKeepNewest +* This flag is set if keepNewest mode is selected by the consumer. +*/ +typedef struct +{ + RECORD_BUFFER_INFO recordBuffer; + VARDATA_BUFFER_INFO vardataBuffer; + NvP64 notificationHandle; + NvBool isEnabled; + NvBool isKeepNewest; +} EVENT_BUFFER_PRODUCER_INFO; + +/* +* EVENT_BUFFER_PRODUCER_DATA: +* This structure holds data info to add a record in a buffer +* +* pPayload +* Pointer to the payload that needs to be added in the record buffer +* +* payloadSize +* Size of payload in bytes. +* +* pVardata +* Pointer to data that needs to be added in the vardata buffer +* +* vardataSize +* Size of vardata in bytes. +*/ +typedef struct +{ + NvP64 pPayload; + NvU32 payloadSize; + NvP64 pVardata; + NvU32 vardataSize; +} EVENT_BUFFER_PRODUCER_DATA; + +void eventBufferInitRecordBuffer(EVENT_BUFFER_PRODUCER_INFO *info, NV_EVENT_BUFFER_HEADER* pHeader, + NvP64 recordBuffAddr, NvU32 recordSize, NvU32 recordCount, NvU32 bufferSize, NvU32 notificationThreshold); + +void eventBufferInitVardataBuffer(EVENT_BUFFER_PRODUCER_INFO *info, NvP64 vardataBuffAddr, + NvU32 bufferSize, NvU32 notificationThreshold); + +void eventBufferInitNotificationHandle(EVENT_BUFFER_PRODUCER_INFO *info, NvP64 notificationHandle); +void eventBufferSetEnable(EVENT_BUFFER_PRODUCER_INFO *info, NvBool isEnabled); +void eventBufferSetKeepNewest(EVENT_BUFFER_PRODUCER_INFO *info, NvBool isKeepNewest); +void eventBufferUpdateRecordBufferGet(EVENT_BUFFER_PRODUCER_INFO *info, NvU32 get); +void eventBufferUpdateVardataBufferGet(EVENT_BUFFER_PRODUCER_INFO *info, NvU32 get); +NvU32 eventBufferGetRecordBufferCount(EVENT_BUFFER_PRODUCER_INFO *info); +NvU32 eventBufferGetVardataBufferCount(EVENT_BUFFER_PRODUCER_INFO *info); + +void eventBufferProducerAddEvent(EVENT_BUFFER_PRODUCER_INFO* info, NvU16 eventType, NvU16 eventSubtype, + EVENT_BUFFER_PRODUCER_DATA *pData); + +NvBool eventBufferIsNotifyThresholdMet(EVENT_BUFFER_PRODUCER_INFO* info); + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif //_NV_EVENT_BUFFER_PRODUCER_H_ diff --git a/src/nvidia/inc/libraries/field_desc.h b/src/nvidia/inc/libraries/field_desc.h new file mode 100644 index 0000000..47d6892 --- /dev/null +++ b/src/nvidia/inc/libraries/field_desc.h @@ -0,0 +1,450 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef NV_FIELD_DESC_H +#define NV_FIELD_DESC_H + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * @file + * @brief Simple runtime DRF-macro framework. + * + * Allows HAL definitions at the register/field level to maximize common code. + * Two primitive versions are provided - 32-bit and 64-bit. + */ +#include "nvtypes.h" +#include "nvmisc.h" +#include "compat.h" + +// Forward declarations. +typedef struct NV_FIELD_DESC32 NV_FIELD_DESC32; +typedef struct NV_FIELD_DESC64 NV_FIELD_DESC64; +typedef struct NV_FIELD_ENUM NV_FIELD_ENUM; +typedef struct NV_FIELD_ENUM_ENTRY NV_FIELD_ENUM_ENTRY; +typedef struct NV_FIELD_BOOL NV_FIELD_BOOL; + +/*! + * Descriptor for fields <= 32-bits in length. + * A field is assumed to fit within a 4-byte aligned region. + */ +struct NV_FIELD_DESC32 +{ + /*! + * Positive bitmask of the field, e.g. 0x6ULL for a field 2:1. + */ + NvU32 maskPos; + + /*! + * Negative bitmask of the field, e.g. ~0x6ULL for a field 2:1. + */ + NvU32 maskNeg; + + /*! + * Bit shift, e.g. 4 for a field 31:4. + */ + NvU16 shift; + + /*! + * Offset into the memory in bytes. + * This is needed for regions greater than 4 bytes. + */ + NvU16 offset; +}; + +#define NV_FIELD_DESC64_MAX_DISCONTIG_REGIONS 2 + +/*! + * Descriptor for fields <= 64-bits in length. + * A field is assumed to fit within an 8-byte aligned region of memory. + */ +struct NV_FIELD_DESC64 +{ + NvU32 regionCount; + + struct + { + /*! + * Positive bitmask of the field, e.g. 0x6ULL for a field 2:1. + */ + NvU64 maskPos; + + /*! + * Width of field, e.g. 0x2 for a field 2:1. + */ + NvU32 width; + + /*! + * Bit shift, e.g. 4 for a field 31:4. + */ + NvU32 shift; + + /*! + * Offset into the memory in bytes. + * This is needed for regions greater than 8 bytes. + */ + NvU32 offset; + } regions[NV_FIELD_DESC64_MAX_DISCONTIG_REGIONS]; +}; + +/*! + * Enumeration field value. + */ +struct NV_FIELD_ENUM_ENTRY +{ + NvU8 bValid; //!< Indicates the value is valid (for checking). + NvU8 value; //!< Raw (encoded) value. +}; + +/*! + * Enumeration field descriptor. + */ +struct NV_FIELD_ENUM +{ + NV_FIELD_DESC32 desc; + NvU8 numEntries; + const NV_FIELD_ENUM_ENTRY *entries; +}; + +/*! + * Boolean field descriptor. + */ +struct NV_FIELD_BOOL +{ + NV_FIELD_DESC32 desc; + NvBool bInvert; +}; + +// TODO: Move to nvmisc.h. +#ifndef DRF_OFFSET +#define DRF_OFFSET(drf) (((0?drf) / 32) * 4) +#endif + +#ifndef DRF_OFFSET64 +#define DRF_OFFSET64(drf) (((0?drf) / 64) * 8) +#endif + +// Utility macros to define field formats using HW manuals. +#define INIT_FIELD_DESC32(pField, drf) \ + do { \ + (pField)->maskPos = DRF_SHIFTMASK(drf); \ + (pField)->maskNeg = ~DRF_SHIFTMASK(drf); \ + (pField)->shift = DRF_SHIFT(drf); \ + (pField)->offset = DRF_OFFSET(drf); \ + } while (0) + +#define INIT_FIELD_DESC64(pField, drf) \ + do { \ + (pField)->regionCount = 1; \ + (pField)->regions[0].maskPos = DRF_SHIFTMASK64(drf); \ + (pField)->regions[0].width = (1?drf) - (0?drf) + 1; \ + (pField)->regions[0].shift = DRF_SHIFT64(drf); \ + (pField)->regions[0].offset = DRF_OFFSET64(drf); \ + } while (0) + + +/* + @note: + BEGIN_DISCONTIG_FIELD_DESC64(pField) + DRF_DISCONTIG_FIELD_DESC64(pField, NV_MMU_VER2_PTE_COMPTAGLINE) + DRF_DISCONTIG_FIELD_DESC64(pField, NV_MMU_VER2_PTE_ADDRESS_VID) + END_FIELD_DESC64_DISCONTIGUOUS(pField) + +*/ +#define BEGIN_DISCONTIG_FIELD_DESC64(pField) \ + pField->regionCount = 0; + +#define DRF_DISCONTIG_FIELD_DESC64(pField, drf) \ + do { \ + NV_ASSERT_CHECKED_PRECOMP(pField->regionCount < NV_FIELD_DESC64_MAX_DISCONTIG_REGIONS); \ + (pField)->regions[pField->regionCount].maskPos = DRF_SHIFTMASK64(drf); \ + (pField)->regions[pField->regionCount].width = (1?drf) - (0?drf) + 1; \ + (pField)->regions[pField->regionCount].shift = DRF_SHIFT64(drf); \ + (pField)->regions[pField->regionCount].offset = DRF_OFFSET64(drf); \ + pField->regionCount ++; \ + } while(0); + +#define END_FIELD_DESC64_DISCONTIGUOUS(pField) + + +#define INIT_FIELD_ENUM(pEnum, drf, count, pEntries) \ + do { \ + INIT_FIELD_DESC32(&(pEnum)->desc, drf); \ + (pEnum)->numEntries = count; \ + (pEnum)->entries = pEntries; \ + } while(0) + +#define INIT_FIELD_BOOL(pBool, drf) \ + do { \ + INIT_FIELD_DESC32(&(pBool)->desc, drf); \ + (pBool)->bInvert = (NvBool)!(drf##_TRUE); \ + } while(0) + +static NV_FORCEINLINE void +nvFieldEnumEntryInit(NV_FIELD_ENUM_ENTRY *pEntry, const NvU8 value) +{ + pEntry->bValid = NV_TRUE; + pEntry->value = value; +} + +/*! + * Test whether a 32-bit field descriptor is valid. + */ +static NV_FORCEINLINE NvBool +nvFieldIsValid32(const NV_FIELD_DESC32 *pField) +{ + if (0 != pField->maskPos) + { + NV_ASSERT_CHECKED_PRECOMP(pField->maskPos == ~pField->maskNeg); + NV_ASSERT_CHECKED_PRECOMP(0 != (NVBIT64(pField->shift) & pField->maskPos)); + NV_ASSERT_CHECKED_PRECOMP(0 == (pField->offset & 0x3)); + return NV_TRUE; + } + return NV_FALSE; +} + +/*! + * Test whether a 64-bit field descriptor is valid. + */ +static NV_FORCEINLINE NvBool +nvFieldIsValid64(const NV_FIELD_DESC64 *pField) +{ + NvU32 i; + NvU32 aggregate_mask = 0; + for (i = 0; i < pField->regionCount; i++) { + // Forbid empty registers + if (pField->regions[i].maskPos == 0) + return NV_FALSE; + + // Ensure that fields don't overlap + NV_ASSERT_CHECKED_PRECOMP((pField->regions[i].maskPos & aggregate_mask) == 0); + aggregate_mask |= pField->regions[i].maskPos; + + // Ensure that shift is bottom bit of maskPos + NV_ASSERT_CHECKED_PRECOMP(0 != (NVBIT64(pField->regions[i].shift) & pField->regions[i].maskPos)); + + // Ensure offset is quad-word aligned + NV_ASSERT_CHECKED_PRECOMP(0 == (pField->regions[i].offset & 0x7)); + } + return NV_TRUE; +} + +/*! + * Set a 32-bit field based on its descriptor. + * + * @param[in] pField Field format. + * @param[in] value Value to set within the entry. + * @param[in,out] pMem Existing memory to update of at least length (pField->offset + 4). + */ +static NV_FORCEINLINE void +nvFieldSet32 +( + const NV_FIELD_DESC32 *pField, + const NvU32 value, + NvU8 *pMem +) +{ + NvU32 *pValue = (NvU32*)(pMem + pField->offset); + const NvU32 shifted = value << pField->shift; + + NV_ASSERT_CHECKED_PRECOMP(nvFieldIsValid32(pField)); + NV_ASSERT_CHECKED_PRECOMP((shifted >> pField->shift) == value); + NV_ASSERT_CHECKED_PRECOMP((shifted & pField->maskPos) == shifted); + + *pValue = (*pValue & pField->maskNeg) | shifted; +} + +/*! + * Set a 64-bit field based on its descriptor. + * + * @param[in] pField Field format. + * @param[in] value Value to set within the entry. + * @param[in,out] pMem Existing memory to update of at least length (pField->offset + 8). + */ +static NV_FORCEINLINE void +nvFieldSet64 +( + const NV_FIELD_DESC64 *pField, + NvU64 value, + NvU8 *pMem +) +{ + NvU32 i; + NV_ASSERT_CHECKED_PRECOMP(nvFieldIsValid64(pField)); + + for (i = 0; i < pField->regionCount; i++) + { + // Compute location and mask + NvU64 *pValue = (NvU64*)(pMem + pField->regions[i].offset); + const NvU64 shifted = value << pField->regions[i].shift; + + // Store the portion of the value that fits in this field + *pValue = (*pValue & ~pField->regions[i].maskPos) | + (shifted & pField->regions[i].maskPos); + + // Shift off the bits we just stored + value >>= pField->regions[i].width; + } + + // Ensure value doesn't overflow fiel + NV_ASSERT_CHECKED_PRECOMP(value == 0); +} + +/*! + * Encode and set an enum value based on its descriptor. + * + * @param[in] pEnum Enum format. + * @param[in] value Un-encoded value to set within the entry. + * @param[in,out] pMem Existing memory to update of at least length (pEnum->desc.offset + 4). + */ +static NV_FORCEINLINE void +nvFieldSetEnum +( + const NV_FIELD_ENUM *pEnum, + const NvU32 value, + NvU8 *pMem +) +{ + NV_ASSERT_CHECKED_PRECOMP(value < pEnum->numEntries); + NV_ASSERT_CHECKED_PRECOMP(pEnum->entries[value].bValid); + nvFieldSet32(&pEnum->desc, pEnum->entries[value].value, pMem); +} + +/*! + * Set an boolean field based on its descriptor. + * + * @param[in] pField Boolean field descriptor. + * @param[in] value Truth value. + * @param[in,out] pMem Existing memory to update of at least length (pField->desc.offset + 4). + */ +static NV_FORCEINLINE void +nvFieldSetBool +( + const NV_FIELD_BOOL *pField, + const NvBool value, + NvU8 *pMem +) +{ + nvFieldSet32(&pField->desc, value ^ pField->bInvert, pMem); +} + +/*! + * Get the value of a 32-bit field based on its descriptor. + * + * @param[in] pField Field format. + * @param[in] pMem Memory of at least length (pField->offset + 4). + * + * @returns the extracted value. + */ +static NV_FORCEINLINE NvU32 +nvFieldGet32 +( + const NV_FIELD_DESC32 *pField, + const NvU8 *pMem +) +{ + NV_ASSERT_CHECKED_PRECOMP(nvFieldIsValid32(pField)); + return (*(const NvU32*)(pMem + pField->offset) & pField->maskPos) >> pField->shift; +} + +/*! + * Get the value of a 64-bit field based on its descriptor. + * + * @param[in] pField Field format. + * @param[in] pMem Memory of at least length (pField->offset + 8). + * + * @returns the extracted value. + */ +static NV_FORCEINLINE NvU64 +nvFieldGet64 +( + const NV_FIELD_DESC64 *pField, + const NvU8 *pMem +) +{ + NvU32 i, shift = 0; + NvU64 value = 0; + NV_ASSERT_CHECKED_PRECOMP(nvFieldIsValid64(pField)); + for (i = 0; i < pField->regionCount; i++) + { + NvU64 region_value = (*(const NvU64*)(pMem + pField->regions[i].offset) & + pField->regions[i].maskPos) >> pField->regions[i].shift; + + value |= region_value << shift; + + shift += pField->regions[i].width; + } + return value; +} + +/*! + * Get and decode an enum value based on its descriptor. + * + * @param[in] pEnum Enum format. + * @param[in] pMem Memory of at least length (pEnum->desc.offset + 4). + */ +static NV_FORCEINLINE NvU32 +nvFieldGetEnum +( + const NV_FIELD_ENUM *pEnum, + const NvU8 *pMem +) +{ + const NvU32 encoded = nvFieldGet32(&pEnum->desc, pMem); + NvU32 decoded; + for (decoded = 0; decoded < pEnum->numEntries; ++decoded) + { + if (pEnum->entries[decoded].bValid && + (pEnum->entries[decoded].value == encoded)) + { + return decoded; + } + } + NV_ASSERT_CHECKED_PRECOMP(0); + return 0; +} + +/*! + * Get an boolean field based on its descriptor. + * + * @param[in] pField Boolean field descriptor. + * @param[in] pMem Memory of at least length (pField->desc.offset + 4). + */ +static NV_FORCEINLINE NvBool +nvFieldGetBool +( + const NV_FIELD_BOOL *pField, + const NvU8 *pMem +) +{ + const NvU32 value = nvFieldGet32(&pField->desc, pMem); + NV_ASSERT_CHECKED_PRECOMP(value <= 1); + return (NvBool)(value ^ pField->bInvert); +} + +#ifdef __cplusplus +} +#endif + +#endif // NV_FIELD_DESC_H diff --git a/src/nvidia/inc/libraries/ioaccess/ioaccess.h b/src/nvidia/inc/libraries/ioaccess/ioaccess.h new file mode 100644 index 0000000..0a2ca71 --- /dev/null +++ b/src/nvidia/inc/libraries/ioaccess/ioaccess.h @@ -0,0 +1,149 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#if (defined(NVRM) || defined(RMCFG_FEATURE_PLATFORM_GSP)) && !defined(NVWATCH) +#pragma once +#include "g_ioaccess_nvoc.h" +#endif + +#ifndef _IO_ACCESS_H_ +#define _IO_ACCESS_H_ + +#include "nvtypes.h" +#include "nvstatus.h" +#if (defined(NVRM) || defined(RMCFG_FEATURE_PLATFORM_GSP)) && !defined(NVWATCH) +#include "nvoc/prelude.h" +#endif + +#define REG_DRF_SHIFT(drf) ((0?drf) % 32) +#define REG_DRF_MASK(drf) (0xFFFFFFFF>>(31-((1?drf) % 32)+((0?drf) % 32))) +#define REG_DRF_DEF(d,r,f,c) ((NV ## d ## r ## f ## c)<>REG_DRF_SHIFT(NV ## d ## r ## f))®_DRF_MASK(NV ## d ## r ## f)) +#define REG_DRF_SHIFTMASK(drf) (REG_DRF_MASK(drf)<<(REG_DRF_SHIFT(drf))) +#define REG_DRF_WIDTH(drf) ((1?drf) - (0?drf) + 1) + +#if (defined(NVRM) || defined(RMCFG_FEATURE_PLATFORM_GSP)) && !defined(NVWATCH) +#define REG_RD08(ap, addr) regaprtReadReg08 (staticCast(ap, RegisterAperture), addr) +#define REG_RD16(ap, addr) regaprtReadReg16 (staticCast(ap, RegisterAperture), addr) +#define REG_RD32(ap, addr) regaprtReadReg32 (staticCast(ap, RegisterAperture), addr) +#define REG_WR08(ap, addr, val) regaprtWriteReg08 (staticCast(ap, RegisterAperture), addr, val) +#define REG_WR16(ap, addr, val) regaprtWriteReg16 (staticCast(ap, RegisterAperture), addr, val) +#define REG_WR32(ap, addr, val) regaprtWriteReg32 (staticCast(ap, RegisterAperture), addr, val) +#define REG_WR32_UC(ap, addr, val) regaprtWriteReg32Uc(staticCast(ap, RegisterAperture), addr, val) +#define REG_VALID(ap, addr) regaprtIsRegValid (staticCast(ap, RegisterAperture), addr) + +NVOC_PREFIX(regaprt) +class RegisterAperture +{ +public: + virtual NvU8 regaprtReadReg08 (RegisterAperture *pAperture, NvU32 addr) = 0; + virtual NvU16 regaprtReadReg16 (RegisterAperture *pAperture, NvU32 addr) = 0; + virtual NvU32 regaprtReadReg32 (RegisterAperture *pAperture, NvU32 addr) = 0; + virtual void regaprtWriteReg08 (RegisterAperture *pAperture, NvU32 addr, NvV8 value) = 0; + virtual void regaprtWriteReg16 (RegisterAperture *pAperture, NvU32 addr, NvV16 value) = 0; + virtual void regaprtWriteReg32 (RegisterAperture *pAperture, NvU32 addr, NvV32 value) = 0; + virtual void regaprtWriteReg32Uc(RegisterAperture *pAperture, NvU32 addr, NvV32 value) = 0; + virtual NvBool regaprtIsRegValid (RegisterAperture *pAperture, NvU32 addr) = 0; +}; + +// +// TODO: Remove the wrapper structure WAR once NVOC supports in-place object construction +// The proxy structure was introduced to avoid major refactoring until the feature is implemented +// Also fix IoAperture getters +// Use the interface class in NVWATCH once NVOC is enabled there +// +#else // (defined(NVRM) || defined(RMCFG_FEATURE_PLATFORM_GSP)) && !defined(NVWATCH) +typedef struct IO_DEVICE IO_DEVICE; +typedef struct IO_APERTURE IO_APERTURE; + +typedef NvU8 ReadReg008Fn(IO_APERTURE *a, NvU32 addr); +typedef NvU16 ReadReg016Fn(IO_APERTURE *a, NvU32 addr); +typedef NvU32 ReadReg032Fn(IO_APERTURE *a, NvU32 addr); +typedef void WriteReg008Fn(IO_APERTURE *a, NvU32 addr, NvV8 value); +typedef void WriteReg016Fn(IO_APERTURE *a, NvU32 addr, NvV16 value); +typedef void WriteReg032Fn(IO_APERTURE *a, NvU32 addr, NvV32 value); +typedef NvBool ValidRegFn(IO_APERTURE *a, NvU32 addr); + +#define REG_RD08(ap, addr) (ap)->pDevice->pReadReg008Fn((ap), (addr)) +#define REG_RD16(ap, addr) (ap)->pDevice->pReadReg016Fn((ap), (addr)) +#define REG_RD32(ap, addr) (ap)->pDevice->pReadReg032Fn((ap), (addr)) +#define REG_WR08(ap, addr, val) (ap)->pDevice->pWriteReg008Fn((ap), (addr), (val)) +#define REG_WR16(ap, addr, val) (ap)->pDevice->pWriteReg016Fn((ap), (addr), (val)) +#define REG_WR32(ap, addr, val) (ap)->pDevice->pWriteReg032Fn((ap), (addr), (val)) +#define REG_WR32_UC(ap, addr, val) (ap)->pDevice->pWriteReg032UcFn((ap), (addr), (val)) +#define REG_VALID(ap, addr) (ap)->pDevice->pValidRegFn((ap), (addr)) + +// Get the address of a register given the Aperture and offset. +#define REG_GET_ADDR(ap, offset) ((ap)->baseAddress + (offset)) + +struct IO_DEVICE +{ + ReadReg008Fn *pReadReg008Fn; + ReadReg016Fn *pReadReg016Fn; + ReadReg032Fn *pReadReg032Fn; + WriteReg008Fn *pWriteReg008Fn; + WriteReg016Fn *pWriteReg016Fn; + WriteReg032Fn *pWriteReg032Fn; + WriteReg032Fn *pWriteReg032UcFn; + ValidRegFn *pValidRegFn; +}; + +struct IO_APERTURE +{ + IO_DEVICE *pDevice; // Pointer to module specific IO_DEVICE + NvU32 baseAddress; // register base address + NvU32 length; // length of aperture +}; + +NV_STATUS ioaccessInitIOAperture +( + IO_APERTURE *pAperture, + IO_APERTURE *pParentAperture, + IO_DEVICE *pDevice, + NvU32 offset, + NvU32 length +); +#endif // (defined(NVRM) || defined(RMCFG_FEATURE_PLATFORM_GSP)) && !defined(NVWATCH) + + +// +// Macros for register I/O +// + +#define REG_FLD_WR_DRF_NUM(ap,d,r,f,n) REG_WR32(ap,NV##d##r,(REG_RD32(ap,NV##d##r)&~(REG_DRF_MASK(NV##d##r##f)<>REG_DRF_SHIFT(NV ## d ## r ## f))®_DRF_MASK(NV ## d ## r ## f)) +#define REG_FLD_TEST_DRF_DEF(ap,d,r,f,c) (REG_RD_DRF(ap,d, r, f) == NV##d##r##f##c) +#define REG_FLD_TEST_DRF_NUM(ap,d,r,f,n) (REG_RD_DRF(ap,d, r, f) == n) +#define REG_FLD_IDX_TEST_DRF_DEF(ap,d,r,f,c,i) (REG_IDX_RD_DRF(ap, d, r, i, f) == NV##d##r##f##c) + +// Read/write a field or entire register of which there are several copies each accessed via an index +#define REG_IDX_WR_DRF_NUM(ap,d,r,i,f,n) REG_WR32(ap,NV ## d ## r(i), REG_DRF_NUM(d,r,f,n)) +#define REG_IDX_WR_DRF_DEF(ap,d,r,i,f,c) REG_WR32(ap,NV ## d ## r(i), REG_DRF_DEF(d,r,f,c)) +#define REG_FLD_IDX_WR_DRF_NUM(ap,d,r,i,f,n) REG_WR32(ap,NV##d##r(i),(REG_RD32(ap,NV##d##r(i))&~(REG_DRF_MASK(NV##d##r##f)<>REG_DRF_SHIFT(NV ## d ## r ## f))®_DRF_MASK(NV ## d ## r ## f)) +#define REG_RD_DRF_IDX(ap,d,r,f,i) (((REG_RD32(ap,NV ## d ## r))>>REG_DRF_SHIFT(NV ## d ## r ## f(i)))®_DRF_MASK(NV ## d ## r ## f(i))) +#define REG_IDX_OFFSET_RD_DRF(ap,d,r,i,o,f) (((REG_RD32(ap,NV ## d ## r(i,o)))>>REG_DRF_SHIFT(NV ## d ## r ## f))®_DRF_MASK(NV ## d ## r ## f)) + +#endif // _IO_ACCESS_H_ diff --git a/src/nvidia/inc/libraries/mapping_reuse/mapping_reuse.h b/src/nvidia/inc/libraries/mapping_reuse/mapping_reuse.h new file mode 100644 index 0000000..7c3561b --- /dev/null +++ b/src/nvidia/inc/libraries/mapping_reuse/mapping_reuse.h @@ -0,0 +1,118 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef MAPPING_REUSE_H +#define MAPPING_REUSE_H + +#include "containers/map.h" +#include "os/nv_memory_area.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// Only return single range. +#define REUSE_MAPPING_DB_MAP_FLAGS_SINGLE_RANGE NVBIT(0) +// No reuse, call the map callback directly. +#define REUSE_MAPPING_DB_MAP_FLAGS_NO_REUSE NVBIT(1) + +// Remove this from defaults when we support multi-range reuse. +#define REUSE_MAPPING_DB_MAP_FLAGS_DEFAULT REUSE_MAPPING_DB_MAP_FLAGS_SINGLE_RANGE + +typedef struct ReuseMappingDbEntry ReuseMappingDbEntry; +typedef struct ReuseMappingDbEntry { + NvU64 size; + NvU64 refCount; + + // + // trackingInfo is the data used to locate this entry within the reuse database. + // This is in union with newMappingNode, which is a node used in a linked list when we + // create new entries, before we actually insert into the reuse database. + // + union + { + struct + { + void *pAllocCtx; + MapNode virtualNode; + MapNode physicalNode; + } trackingInfo; + struct + { + ReuseMappingDbEntry *pNextEntry; + NvU64 virtualOffset; + NvU64 physicalOffset; + } newMappingNode; + }; +} ReuseMappingDbEntry; + +MAKE_INTRUSIVE_MAP(ReuseMappingDbPhysicalMap, ReuseMappingDbEntry, trackingInfo.physicalNode); +MAKE_INTRUSIVE_MAP(ReuseMappingDbVirtualMap, ReuseMappingDbEntry, trackingInfo.virtualNode); + +// +// There are 2 levels of mapping here: the first maps from a given allocation context to a physical +// map, and a second that is an ordered map of physical offsets. All physical offsets are referenced +// to a given allocation context, and are not valid on there own (ie are not real/unique memory offsets) +// +MAKE_MAP(ReuseMappingDbAllocCtxMap, ReuseMappingDbPhysicalMap); + +// +// This function is called by the ReuseMappingDbMapFunction when it intends to add new mapped ranges. +// pToken is the same token passed into ReuseMappingDbMapFunction. +// +typedef NV_STATUS (*ReuseMappingDbAddMappingCallback)(void *pToken, NvU64 physicalOffset, NvU64 virtualOffset, NvU64 size); + +// Map callback when the database doesn't contained cached mappings +typedef NV_STATUS (*ReuseMappingDbMapFunction)(void *pGlobalCtx, void *pAllocCtx, MemoryRange physicalRange, NvU64 cachingFlags, void *pToken, ReuseMappingDbAddMappingCallback fn); +// Unmap callback when refcount of any mapped range reaches 0 +typedef void (*ReuseMappingDbUnnmapFunction)(void *pGlobalCtx, void *pAllocCtx, MemoryRange virtualRange); +// Callback for when a node is split in two. Performs any tracking or cleanup necessary. +typedef NV_STATUS (*ReuseMappingDbSplitMappingFunction)(void *pGlobalCtx, void *pAllocCtx, MemoryRange virtualRange, NvU64 boundary); + +typedef struct ReuseMappingDb +{ + ReuseMappingDbAllocCtxMap allocCtxPhysicalMap; + ReuseMappingDbVirtualMap virtualMap; + void *pGlobalCtx; + PORT_MEM_ALLOCATOR *pAllocator; + + ReuseMappingDbMapFunction pMapCb; + ReuseMappingDbUnnmapFunction pUnmapCb; + ReuseMappingDbSplitMappingFunction pSplitCb; +} ReuseMappingDb; + +void reusemappingdbInit(ReuseMappingDb *pReuseMappingDb, PORT_MEM_ALLOCATOR *pAllocator, + void *pGlobalCtx, ReuseMappingDbMapFunction pMapCb, ReuseMappingDbUnnmapFunction pUnmapCb, + ReuseMappingDbSplitMappingFunction pSplitCb); + +void reusemappingdbDestruct(ReuseMappingDb *pReuseMappingDb); + +NV_STATUS reusemappingdbMap(ReuseMappingDb *pReuseMappingDb, void *pAllocCtx, MemoryRange range, + MemoryArea *pMemoryArea, NvU64 cachingFlags); + +void reusemappingdbUnmap(ReuseMappingDb *pReuseMappingDb, void *pAllocCtx, MemoryRange range); + +#ifdef __cplusplus +} +#endif + +#endif // MAPPING_REUSE_H diff --git a/src/nvidia/inc/libraries/mmu/gmmu_fmt.h b/src/nvidia/inc/libraries/mmu/gmmu_fmt.h new file mode 100644 index 0000000..1d82fba --- /dev/null +++ b/src/nvidia/inc/libraries/mmu/gmmu_fmt.h @@ -0,0 +1,704 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_GMMU_FMT_H_ +#define _NV_GMMU_FMT_H_ + +#include + +#if (defined(NVRM) || defined(RMCFG_FEATURE_PLATFORM_GSP)) && !defined(NVWATCH) +#include +#else +#if !defined(RMCFG_CHIP_ENABLED) +#define RMCFG_CHIP_x 0 +#endif +#endif //(defined(NVRM) || defined(RMCFG_FEATURE_PLATFORM_GSP)) && !defined(NVWATCH) + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * @file gmmu_fmt.h + * + * @brief Defines a light abstraction over GPU MMU (GMMU) HW formats. + * + * There are two main components of the abstraction: + * 1. General topology of the format provided by mmu_fmt.h. + * 2. Description of the fields within PDEs and PTEs described by the + * field_desc.h and GMMU_FMT_P*E structs. + * + * The GMMU_FMT structure wraps these compoments together. + * + * The goals of this abstraction are: + * G1. Allow common page table management code to work across a variety + * of GMMU HW formats. + * G2. Provide as much low-level control as if directly using the HW + * manuals. + * G3. As close to DRF-macro efficiency as possible for critical paths. + * An example of a critical path is writing PTE values in a tight loop. + * On the other hand, PDE value writes (some which have more complex + * formats) occur orders of magnitude less frequently, and thus can + * afford more generality. + * + * One design consideration is how to distinguish + * MMU fields which are specific to certain architectures. + * + * The current approach is to describe the union of all fields + * across the supported formats. + * HW that does not support a given field must initialize the descriptor to + * zero (invalid) which will assert in the field setter/getter if used. + * + * While this introduces risk of "kitchen sink" syndrome, this approach was + * taken for the following reasons: + * 1. There are few fundamental feature differences between GMMU formats. + * 2. GMMU formats change relatively infrequently (e.g. rarely per-chip). + */ + +#include "nvtypes.h" +#include "field_desc.h" +#include "mmu_fmt.h" + +// +// Defines needed by PCF programming in PTE V3. +// Index bits are used when callers set flags. The below defines are only used +// for the HW <-> SW translation. +// + +// +// Note: The following PCF patterns have not been verified in HW +// and have been currently added to help overcome issues wrt +// PCF patterns tested in rmtest. +// +// SW_MMU_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACD +// SW_MMU_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACE +// SW_MMU_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_CACHED_ACE +// SW_MMU_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_UNCACHED_ACE +// SW_MMU_PTE_PCF_PRIVILEGE_RO_ATOMIC_UNCACHED_ACE +// SW_MMU_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_CACHED_ACE +// SW_MMU_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_UNCACHED_ACE +// SW_MMU_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACD +// SW_MMU_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACD +// SW_MMU_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACE +// + +// Used by PDE +#define SW_MMU_PDE_PCF_INVALID_ATS_ALLOWED 0x00000201 +#define SW_MMU_PDE_PCF_INVALID_ATS_NOT_ALLOWED 0x00000001 +#define SW_MMU_PDE_PCF_SPARSE_ATS_ALLOWED 0x00000204 +#define SW_MMU_PDE_PCF_SPARSE_ATS_NOT_ALLOWED 0x00000004 + +#define SW_MMU_PDE_PCF_VALID_CACHED_ATS_ALLOWED 0x00000200 +#define SW_MMU_PDE_PCF_VALID_UNCACHED_ATS_ALLOWED 0x00000220 +#define SW_MMU_PDE_PCF_VALID_CACHED_ATS_NOT_ALLOWED 0x00000000 +#define SW_MMU_PDE_PCF_VALID_UNCACHED_ATS_NOT_ALLOWED 0x00000020 + +// Used by PTEs +#define SW_MMU_PTE_PCF_INVALID 0x00000001 +#define SW_MMU_PTE_PCF_NO_VALID_4KB_PAGE 0x00000002 +#define SW_MMU_PTE_PCF_SPARSE 0x00000004 +#define SW_MMU_PTE_PCF_MAPPING_NOWHERE 0x00000008 + +#define SW_MMU_PTE_PCF_PRIVILEGE_RW_ATOMIC_CACHED_ACD 0x00000000 +#define SW_MMU_PTE_PCF_PRIVILEGE_RW_ATOMIC_CACHED_ACE 0x00000010 +#define SW_MMU_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACD 0x00000020 +#define SW_MMU_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACE 0x00000030 +#define SW_MMU_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_CACHED_ACE 0x00000050 +#define SW_MMU_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_UNCACHED_ACE 0x00000070 +#define SW_MMU_PTE_PCF_PRIVILEGE_RO_ATOMIC_UNCACHED_ACE 0x000000B0 +#define SW_MMU_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_CACHED_ACE 0x000000D0 +#define SW_MMU_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_UNCACHED_ACE 0x000000F0 + +#define SW_MMU_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACD 0x00000100 +#define SW_MMU_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACE 0x00000110 +#define SW_MMU_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACD 0x00000120 +#define SW_MMU_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACE 0x00000130 + +#define SW_MMU_PTE_PCF_REGULAR_RW_NO_ATOMIC_CACHED_ACD 0x00000140 +#define SW_MMU_PTE_PCF_REGULAR_RW_NO_ATOMIC_CACHED_ACE 0x00000150 +#define SW_MMU_PTE_PCF_REGULAR_RW_NO_ATOMIC_UNCACHED_ACD 0x00000160 +#define SW_MMU_PTE_PCF_REGULAR_RW_NO_ATOMIC_UNCACHED_ACE 0x00000170 + +#define SW_MMU_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACD 0x00000180 +#define SW_MMU_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACE 0x00000190 +#define SW_MMU_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACD 0x000001A0 +#define SW_MMU_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACE 0x000001B0 + +#define SW_MMU_PTE_PCF_REGULAR_RO_NO_ATOMIC_CACHED_ACD 0x000001C0 +#define SW_MMU_PTE_PCF_REGULAR_RO_NO_ATOMIC_CACHED_ACE 0x000001D0 +#define SW_MMU_PTE_PCF_REGULAR_RO_NO_ATOMIC_UNCACHED_ACD 0x000001E0 +#define SW_MMU_PTE_PCF_REGULAR_RO_NO_ATOMIC_UNCACHED_ACE 0x000001F0 + +// +// Defines all toggles in either PTE or PDE PCF space +// Note: please do not change these defines without careful review! +// PTE and PDE defines are allowed to collide as they will not be +// processed in the same code paths anyway. +// +#define SW_MMU_PCF_INVALID_IDX 0 // Used for PDE and PTE +#define SW_MMU_PCF_NV4K_IDX 1 // PTE specific +#define SW_MMU_PCF_SPARSE_IDX 2 // Used for PDE and PTE +#define SW_MMU_PCF_NOMAPPING_IDX 3 // PTE specific + +#define SW_MMU_PCF_ACE_IDX 4 // PTE specific +#define SW_MMU_PCF_UNCACHED_IDX 5 // Used for PDE and PTE +#define SW_MMU_PCF_NOATOMIC_IDX 6 // PTE specific +#define SW_MMU_PCF_RO_IDX 7 // PTE specific + +#define SW_MMU_PCF_REGULAR_IDX 8 // PTE specific +#define SW_MMU_PCF_ATS_ALLOWED_IDX 9 // PDE specific + +// Forward declarations. +typedef union GMMU_ENTRY_VALUE GMMU_ENTRY_VALUE; +typedef struct GMMU_FIELD_APERTURE GMMU_FIELD_APERTURE; +typedef struct GMMU_FIELD_ADDRESS GMMU_FIELD_ADDRESS; +typedef struct GMMU_FMT GMMU_FMT; +typedef struct GMMU_FMT_PDE GMMU_FMT_PDE; +typedef struct GMMU_FMT_PDE_MULTI GMMU_FMT_PDE_MULTI; +typedef struct GMMU_FMT_PTE GMMU_FMT_PTE; +typedef struct GMMU_COMPR_INFO GMMU_COMPR_INFO; + +/*! + * Maximum size in bytes of page directory and table entries across + * the supported formats. + */ +#define GMMU_FMT_MAX_ENTRY_SIZE 16 + +/*! + * Default version specifier for API args to indicate no preference. + * This is not a real version number and not part of the + * enumeration array below. + */ +#define GMMU_FMT_VERSION_DEFAULT 0 + +/*! + * 2-level (40b VA) format supported Fermi through Maxwell. + * Still supported in Pascal HW as fallback. + */ +#define GMMU_FMT_VERSION_1 1 + +/*! + * 5-level (49b VA) format supported on Pascal+. + */ +#define GMMU_FMT_VERSION_2 2 + +/*! + * 6-level (57b VA) format supported on Hopper+. + */ +#define GMMU_FMT_VERSION_3 3 + +/*! + * Maximum number of MMU versions supported. + */ +#define GMMU_FMT_MAX_VERSION_COUNT 3 + +/*! + * Array of format version numbers for enumeration utility. + */ +extern const NvU32 g_gmmuFmtVersions[GMMU_FMT_MAX_VERSION_COUNT]; + +/*! + * Maximum number of big page sizes supported by a single GPU. + */ +#define GMMU_FMT_MAX_BIG_PAGE_SIZES 2 + +/*! + * Array of big page shifts for enumeration utility. + */ +extern const NvU32 g_gmmuFmtBigPageShifts[GMMU_FMT_MAX_BIG_PAGE_SIZES]; + +/*! + * Convenience type for declaring generic temporary GMMU entry values. + */ +union GMMU_ENTRY_VALUE +{ + NvU8 v8[GMMU_FMT_MAX_ENTRY_SIZE / 1]; + NvU32 v32[GMMU_FMT_MAX_ENTRY_SIZE / 4]; + NvU64 v64[GMMU_FMT_MAX_ENTRY_SIZE / 8]; +}; + +/*! + * Top-level structure describing a GPU MMU format. + */ +struct GMMU_FMT +{ + NvU32 version; + + /*! + * Root of the page level topology (e.g. the root page directory). + */ + const MMU_FMT_LEVEL *pRoot; + + /*! + * Description of page directory entry fields common + * across page directory levels with a single sub-level. + */ + const GMMU_FMT_PDE *pPde; + + /*! + * Description of page directory entry fields common + * across page directory levels with two sub-levels. + */ + const GMMU_FMT_PDE_MULTI *pPdeMulti; + + /*! + * Description of page table entry fields common + * across all page table levels in the topology. + */ + const GMMU_FMT_PTE *pPte; + + /*! + * Indicates if the MMU HW supports sparse through the + * volatile field of each PDE/PTE. + */ + NvBool bSparseHwSupport; +}; + +/*! + * Physical apertures for the supported GMMU formats. + */ +typedef enum +{ + /*! + * Indicates an invalid aperture. + * @note Only supported for GPU PDEs to distinguish invalid sub-sevels. + */ + GMMU_APERTURE_INVALID, + + /*! + * GPU-local video memory (a.k.a. FB). + * @note Only supported for GPU PDEs and PTEs. + */ + GMMU_APERTURE_VIDEO, + + /*! + * GPU-peer video memory. + * @note Only supported for GPU PTEs. + * @note Peer index must be initialized in the appropriate address field. + */ + GMMU_APERTURE_PEER, + + /*! + * Non-coherent system memory. + * + * (GPU) MMU will NOT maintain coherence with CPU L2 cache. + * + * Higher-level APIs should only allow this when it is known + * the memory is not cacheable by CPU or the coherency is + * managed explicitly (e.g. w/ flushes in SW). + * Also consider that this path is not necessarily faster. + */ + GMMU_APERTURE_SYS_NONCOH, + + /*! + * Coherent system memory. + * + * (GPU) MMU will snoop CPU L2 cache if possible. + * TODO: Wiki link on arch differences. + * + * This is usually the safer choice over NONCOH since it works + * whether the memory is cached by CPU L2 or not. + * On some CPU architectures going through CPU L2 may + * even be faster than the non-coherent path. + */ + GMMU_APERTURE_SYS_COH, + + // Last value. + GMMU_APERTURE__COUNT +} GMMU_APERTURE; + +/*! + * Aperture field descriptor. + */ +struct GMMU_FIELD_APERTURE +{ + NV_FIELD_ENUM _enum; +}; + +#define INIT_FIELD_APERTURE(pAper, drf, _entries) \ + do { \ + INIT_FIELD_ENUM(&(pAper)->_enum, drf, GMMU_APERTURE__COUNT, _entries); \ + } while(0) + +/*! + * Encode and set a GMMU aperture enum value to a HW aperture field. + */ +static NV_FORCEINLINE void +gmmuFieldSetAperture +( + const GMMU_FIELD_APERTURE *pAperture, + const GMMU_APERTURE value, + NvU8 *pMem +) +{ + nvFieldSetEnum(&pAperture->_enum, value, pMem); +} + +/*! + * Get and decode a HW aperture field value to a GMMU aperture enum value. + */ +static NV_FORCEINLINE GMMU_APERTURE +gmmuFieldGetAperture +( + const GMMU_FIELD_APERTURE *pAperture, + const NvU8 *pMem +) +{ + return (GMMU_APERTURE)nvFieldGetEnum(&pAperture->_enum, pMem); +} + +/*! + * Address field descriptor. + */ +struct GMMU_FIELD_ADDRESS +{ + NV_FIELD_DESC64 desc; + NvU32 shift; +}; + +#define INIT_FIELD_ADDRESS(pAddr, drf, _shift) \ + do { \ + INIT_FIELD_DESC64(&(pAddr)->desc, drf); \ + (pAddr)->shift = _shift; \ + } while(0) + +/*! + * Encode (shift) and set a GMMU address field. + */ +static NV_FORCEINLINE void +gmmuFieldSetAddress +( + const GMMU_FIELD_ADDRESS *pField, + const NvU64 address, + NvU8 *pMem +) +{ + NV_ASSERT_CHECKED_PRECOMP(0 == (address & (NVBIT64(pField->shift) - 1))); + nvFieldSet64(&pField->desc, address >> pField->shift, pMem); +} + +/*! + * Get and decode (shift) a GMMU address field. + */ +static NV_FORCEINLINE NvU64 +gmmuFieldGetAddress +( + const GMMU_FIELD_ADDRESS *pField, + const NvU8 *pMem +) +{ + return nvFieldGet64(&pField->desc, pMem) << pField->shift; +} + +/*! + * Page directory entry (PDE) format. + */ +struct GMMU_FMT_PDE +{ + /*! + * Version information is needed to interpret fields differently. + * Should be always the same as version in GMMU_FMT above. + */ + NvU32 version; + + /*! + * Aperture field indicating which physical address space the sublevel resides. + */ + GMMU_FIELD_APERTURE fldAperture; + + /*! + * Physical address field when aperture is system memory. + */ + GMMU_FIELD_ADDRESS fldAddrSysmem; + + /*! + * Physical address field when aperture is video memory. + */ + GMMU_FIELD_ADDRESS fldAddrVidmem; + + /*! + * Physical address field (used by V3 format only). + */ + GMMU_FIELD_ADDRESS fldAddr; + + /*! + * Indicates GPU reads memory on every access to the + * next page directory/table level. + * + * @note This is not the same as caching, and is ignored for some + * apertures on some chips. + * TODO: Wiki link to explain arch differences. + */ + NV_FIELD_BOOL fldVolatile; + + /*! + * PDE_PCF field for V3 format. + */ + NV_FIELD_DESC32 fldPdePcf; +}; + +/*! + * Get the PDE physical address field format for a given aperture. + */ +const GMMU_FIELD_ADDRESS *gmmuFmtPdePhysAddrFld( + const GMMU_FMT_PDE *pPde, + const GMMU_APERTURE aperture); + +/*! + * Multi (e.g. dual) page directory entry format. + */ +struct GMMU_FMT_PDE_MULTI +{ + /*! + * Reciprocal exponent field for partial sub-level size. + * Minimum size of each sub-level is FullLevelSize / (2 ^ sizeRecipExpMax). + */ + NV_FIELD_DESC32 fldSizeRecipExp; + + /*! + * Per-sub-level information. + */ + GMMU_FMT_PDE subLevels[MMU_FMT_MAX_SUB_LEVELS]; +}; + +/*! + * Retrieve the PDE format corresponding to a particular level and sub-level. + * + * @param[in] pFmt MMU format. + * @param[in] pLevel Level format. + * @param[in] subLevel Sub-level index <= MMU_FMT_MAX_SUB_LEVELS. + * + * @returns Sub-level PDE format or NULL if not a page directory level. + */ +const GMMU_FMT_PDE* gmmuFmtGetPde( + const GMMU_FMT *pFmt, + const MMU_FMT_LEVEL *pLevel, + const NvU32 subLevel); + +/*! + * Page table entry (PTE) format. + */ +struct GMMU_FMT_PTE +{ + /*! + * Version information is needed to interpret fields differently. + * Should be always the same as version in GMMU_FMT above. + */ + NvU32 version; + + /*! + * Field that determines if the PTE is valid. + */ + NV_FIELD_BOOL fldValid; + + /*! + * Aperture field indicating which the physical page resides. + */ + GMMU_FIELD_APERTURE fldAperture; + + /*! + * Physical address field when aperture is system memory. + */ + GMMU_FIELD_ADDRESS fldAddrSysmem; + + /*! + * Physical address field when aperture is video memory. + */ + GMMU_FIELD_ADDRESS fldAddrVidmem; + + /*! + * Physical address field when aperture is peer memory. + */ + GMMU_FIELD_ADDRESS fldAddrPeer; + + /*! + * Peer index field when aperture is peer memory. + */ + NV_FIELD_DESC32 fldPeerIndex; + + /*! + * Indicates GPU reads/writes memory on every access to the page. + * + * @note This is not the same as caching, and is ignored for some + * apertures on some chips. + * TODO: Wiki link to explain arch differences. + */ + NV_FIELD_BOOL fldVolatile; + + /*! + * Indicates to generate a read-only (RO) fault on writes. + * + * @note This does not affect L1 cache access if + * fldWriteDisable is supported. + */ + NV_FIELD_BOOL fldReadOnly; + + /*! + * Indicates to generate a write-only (WO) fault on L1 reads. + * @note Only supported on some GPU architectures. + */ + NV_FIELD_BOOL fldReadDisable; + + /*! + * Indicates to generate a read-only (WO) fault on L1 writes. + * @note Only supported on some GPU architectures. + */ + NV_FIELD_BOOL fldWriteDisable; + + /*! + * Indicates to fault on non-priviledged access. + */ + NV_FIELD_BOOL fldPrivilege; + + /*! + * See HW manuals. + */ + NV_FIELD_BOOL fldEncrypted; + + /*! + * Indicates to lock the PTE in the GPU TLBs, giving precedence over + * unlocked-PTEs. + * TLB invalidate will still evict the PTE. + */ + NV_FIELD_BOOL fldLocked; + + /*! + * TODO: TBD + */ + NV_FIELD_BOOL fldAtomicDisable; + + /*! + * Kind (storage format) field. + */ + NV_FIELD_DESC32 fldKind; + + /*! + * Compression tag field. + */ + NV_FIELD_DESC32 fldCompTagLine; + + /*! + * Compression tag sub-index field. + */ + NV_FIELD_DESC32 fldCompTagSubIndex; + + /*! + * PTE_PCF field for V3 format. + */ + NV_FIELD_DESC32 fldPtePcf; +}; + +/*! + * Determine if an entry is a PTE or PDE based either on its static format or + * dynamic value. + * + * @param[in] pFmt MMU format. + * @param[in] pLevel Level format. + * @param[in] pEntry Entry value of size pLevel->entrySize. + * + * @returns true if the entry is a PTE, false if it is a PDE. + */ +NvBool gmmuFmtEntryIsPte( + const GMMU_FMT *pFmt, + const MMU_FMT_LEVEL *pLevel, + const NvU8 *pEntry); + +/*! + * Get the PTE physical address field format for a given aperture. + */ +const GMMU_FIELD_ADDRESS *gmmuFmtPtePhysAddrFld( + const GMMU_FMT_PTE *pPte, + const GMMU_APERTURE aperture); + +/*! + * GPU compression attributes for a physical surface. + * + * This info will be returned by RM from HW resource alloc API. + */ +struct GMMU_COMPR_INFO +{ + /*! + * log2 of compression page size. + */ + NvU32 compPageShift; + + /*! + * Compressed kind. + */ + NvU32 compressedKind; + + /*! + * Index of the first compression page relative to the surface. + * e.g. if the entire surface is compressed this is 0. + */ + NvU32 compPageIndexLo; + + /*! + * Index of the last compression page relative to the surface. + * e.g. (compPageIndexHi - compPageIndexLo + 1) is the number of comp + * tag lines used for the surface. + * CompPageIndex is tracked in a (1 << compPageShift) granularity + */ + NvU32 compPageIndexHi; + + /*! + * Starting comptag line to use at compPageIndexLo. + * Comptags are used contiguously up to the maximum + * compTagLineMin + (compPageIndexHi - compPageIndexLo). + */ + NvU32 compTagLineMin; + + /*! + * Granularity of comptagline assignment. + * Used for Verif only, Deprecated from Turing + */ + NvU32 compTagLineMultiplier; +}; + +/*! + * Update a PTE value's compression fields based + * on the legacy compression attributes of the surface being mapped. + * + * @param[in] pFmt MMU format. + * @param[in] pLevel Level format. + * @param[in] pCompr Compression info of the physical surface. + * @param[in] surfOffset Offset in bytes into the physical surface. + * @param[in] startPteIndex Starting pte index for comptagSubIndex calculation. + * @param[in] numPages Number of pages (PTEs) to update. + * @param[in,out] pEntries Array of PTE values to update of length + * numPages * pLevel->entrySize. + */ +void gmmuFmtInitPteCompTags( + const GMMU_FMT *pFmt, + const MMU_FMT_LEVEL *pLevel, + const GMMU_COMPR_INFO *pCompr, + const NvU64 surfOffset, + const NvU32 startPteIndex, + const NvU32 numPages, + NvU8 *pEntries); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/nvidia/inc/libraries/mmu/mmu_fmt.h b/src/nvidia/inc/libraries/mmu/mmu_fmt.h new file mode 100644 index 0000000..6b687eb --- /dev/null +++ b/src/nvidia/inc/libraries/mmu/mmu_fmt.h @@ -0,0 +1,237 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_MMU_FMT_H_ +#define _NV_MMU_FMT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * @file mmu_fmt.h + * + * @brief Defines an abstraction over general MMU HW formats. + * + * The main goal is to leverage common page table management + * code across a variety MMU HW formats. + */ +#include "nvtypes.h" +#include "nvmisc.h" +#include "compat.h" + +// +// Separate files for the types as they're included by CTRL definitions +// +#include "mmu_fmt_types.h" + +/*! + * Get bitmask of page sizes supported under a given MMU level. + * + * Example: For the root level this returns all the page sizes + * supported by the MMU format. + * + * @returns Bitmask of page sizes (sufficient since page sizes are power of 2). + */ +NvU64 mmuFmtAllPageSizes(const MMU_FMT_LEVEL *pLevel); + +/*! + * Get bitmask of the VA coverages for each level, starting at a given level. + * This is a superset of mmuFmtAllPageSizes, but includes page directory coverage bits. + * + * Example: For the root level this provides a summary of the VA breakdown. + * Each bit corresponds to the shift of a level in the format and + * the number bits set is equal to the total number of levels + * (including parallel sub-levels). + * + * @returns Bitmask of level VA coverages. + */ +NvU64 mmuFmtAllLevelCoverages(const MMU_FMT_LEVEL *pLevel); + +/*! + * Find a level with the given page shift. + * + * @param[in] pLevel Level format to start search. + * @param[in] pageShift log2(pageSize). + * + * @returns The level if found or NULL otherwise. + */ +const MMU_FMT_LEVEL *mmuFmtFindLevelWithPageShift( + const MMU_FMT_LEVEL *pLevel, + const NvU64 pageShift); + +/*! + * Find the parent level of a given level. + * + * @param[in] pRoot Root level format. + * @param[in] pLevel Child level format. + * @param[out] pSubLevel Returns the sub-level of the child within the parent if found. + * Can be NULL if not needed. + * + * @returns Parent level if found or NULL otherwise. + */ +const MMU_FMT_LEVEL *mmuFmtFindLevelParent( + const MMU_FMT_LEVEL *pRoot, + const MMU_FMT_LEVEL *pLevel, + NvU32 *pSubLevel); + +/*! + * Get the next sub-level format in a search for a particular level. + * + * @returns Next level if found or NULL otherwise. + */ +const MMU_FMT_LEVEL *mmuFmtGetNextLevel( + const MMU_FMT_LEVEL *pLevelFmt, + const MMU_FMT_LEVEL *pTargetFmt); + +/*! + * Return a level description from the format. Used for Verif. + * + * @returns const char* string description of the level. + */ +const char* +mmuFmtConvertLevelIdToSuffix +( + const MMU_FMT_LEVEL *pLevelFmt +); + +/*! + * Bitmask of VA covered by a given level. + * e.g. for the root level this is the maximum VAS limit. + */ +static NV_FORCEINLINE NvU64 +mmuFmtLevelVirtAddrMask(const MMU_FMT_LEVEL *pLevel) +{ + return NVBIT64(pLevel->virtAddrBitHi + 1) - 1; +} + +/*! + * Bitmask of VA covered by a single entry within a level. + * e.g. (page size - 1) for PTEs within this level. + */ +static NV_FORCEINLINE NvU64 +mmuFmtEntryVirtAddrMask(const MMU_FMT_LEVEL *pLevel) +{ + return NVBIT64(pLevel->virtAddrBitLo) - 1; +} + +/*! + * Bitmask of VA that contains the entry index of a level. + */ +static NV_FORCEINLINE NvU64 +mmuFmtEntryIndexVirtAddrMask(const MMU_FMT_LEVEL *pLevel) +{ + return mmuFmtLevelVirtAddrMask(pLevel) & ~mmuFmtEntryVirtAddrMask(pLevel); +} + +/*! + * Extract the entry index of a level from a virtual address. + */ +static NV_FORCEINLINE NvU32 +mmuFmtVirtAddrToEntryIndex(const MMU_FMT_LEVEL *pLevel, const NvU64 virtAddr) +{ + return (NvU32)((virtAddr & mmuFmtEntryIndexVirtAddrMask(pLevel)) >> pLevel->virtAddrBitLo); +} + +/*! + * Truncate a virtual address to the base of a level. + */ +static NV_FORCEINLINE NvU64 +mmuFmtLevelVirtAddrLo(const MMU_FMT_LEVEL *pLevel, const NvU64 virtAddr) +{ + return virtAddr & ~mmuFmtLevelVirtAddrMask(pLevel); +} + +/*! + * Round a virtual address up to the limit covered by a level. + */ +static NV_FORCEINLINE NvU64 +mmuFmtLevelVirtAddrHi(const MMU_FMT_LEVEL *pLevel, const NvU64 virtAddr) +{ + return mmuFmtLevelVirtAddrLo(pLevel, virtAddr) + mmuFmtLevelVirtAddrMask(pLevel); +} + +/*! + * Get the virtual address base of an entry index from the base virtual + * address of its level. + */ +static NV_FORCEINLINE NvU64 +mmuFmtEntryIndexVirtAddrLo(const MMU_FMT_LEVEL *pLevel, const NvU64 vaLevelBase, + const NvU32 entryIndex) +{ + NV_ASSERT_CHECKED_PRECOMP(0 == (vaLevelBase & mmuFmtLevelVirtAddrMask(pLevel))); + return vaLevelBase + ((NvU64)entryIndex << pLevel->virtAddrBitLo); +} + +/*! + * Get the virtual address limit of an entry index from the base virtual + * address of its level. + */ +static NV_FORCEINLINE NvU64 +mmuFmtEntryIndexVirtAddrHi(const MMU_FMT_LEVEL *pLevel, const NvU64 vaLevelBase, + const NvU32 entryIndex) +{ + return mmuFmtEntryIndexVirtAddrLo(pLevel, vaLevelBase, entryIndex) + + mmuFmtEntryVirtAddrMask(pLevel); +} + +/*! + * Get the page size for PTEs within a given MMU level. + */ +static NV_FORCEINLINE NvU64 +mmuFmtLevelPageSize(const MMU_FMT_LEVEL *pLevel) +{ + return mmuFmtEntryVirtAddrMask(pLevel) + 1; +} + +/*! + * Extract the page offset of a virtual address based on a given MMU level. + */ +static NV_FORCEINLINE NvU64 +mmuFmtVirtAddrPageOffset(const MMU_FMT_LEVEL *pLevel, const NvU64 virtAddr) +{ + return virtAddr & mmuFmtEntryVirtAddrMask(pLevel); +} + +/*! + * Calculate the maximum number of entries contained by a given MMU level. + */ +static NV_FORCEINLINE NvU32 +mmuFmtLevelEntryCount(const MMU_FMT_LEVEL *pLevel) +{ + return NVBIT32(pLevel->virtAddrBitHi - pLevel->virtAddrBitLo + 1); +} + +/*! + * Calculate the maximum size in bytes of a given MMU level. + */ +static NV_FORCEINLINE NvU32 +mmuFmtLevelSize(const MMU_FMT_LEVEL *pLevel) +{ + return mmuFmtLevelEntryCount(pLevel) * pLevel->entrySize; +} + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/nvidia/inc/libraries/nvlog/internal/nvlog_printf_internal.h b/src/nvidia/inc/libraries/nvlog/internal/nvlog_printf_internal.h new file mode 100644 index 0000000..8465949 --- /dev/null +++ b/src/nvidia/inc/libraries/nvlog/internal/nvlog_printf_internal.h @@ -0,0 +1,154 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2017,2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * Internal macro definitions for NVLOG_PRINTF + * + * Macro magic example: (Assuming nothing gets compiled out) + * 0) NV_PRINTF(LEVEL_ERROR, "Bla %d %d", arg0, arg1) + * 1) NVLOG_PRINTF(GLOBAL, LEVEL_ERROR, "Bla %d %d", arg0, arg1)) + * - This gets picked up by the parser + * 2) _NVLOG_GET_PRINT + * 3) _NVLOG_GET_PRINT1(NVLOG_, NVLOG_FILEID, __LINE__, PRINT_REL, ___please_include_noprecomp_h___) + * 4) _NVLOG_GET_PRINT2(NVLOG_, 0xaaaaaa, 1024, PRINT_REL, ___please_include_noprecomp_h___) + * 5) NVLOG_0xaaaaaa_1024_PRINT_REL + * 6) NVLOG_PRINT(LEVEL_ERROR, 0xaaaaaa, 0x04001100, arg0, arg1) + * 7) NVLOG_PRINT2(LEVEL_ERROR) (0xaaaaaa, 0x04001100, arg0, arg1) + * 8) NVLOG_PRINT_LEVEL_0x4 (0xaaaaaa, 0x04001100, arg0, arg1) + * 9) nvLog_Printf4 (0xaaaaaa, 0x04001100, arg0, arg1) + * + */ + +// Compile time stubbing out output below NVLOG_LEVEL level +#define _NVLOG_NOTHING(...) ((void)0) + +#include "nvport/nvport.h" + +// +// Use __COUNTER__ if available. If not, we can use __LINE__ since it is also +// monotonically rising. If __COUNTER__ is unavailable, we can't have inline +// functions using NvLog. +// +#if PORT_COMPILER_HAS_COUNTER +#define _NVLOG_COUNTER __COUNTER__ +#else +#define _NVLOG_COUNTER __LINE__ +#endif + +// +// NVLOG_PARSING is defined if the file is being compiled for the parser run +// +#if defined(NVLOG_PARSING) + +// +// Since the '@' symbol is not found in C code, using it here makes it trivial +// for the parser code to extract the needed info from preprocessed source. +// +#define _NVLOG_PRINTF2(count, file, line, tag, route, level, format, ...) \ + NVLOG@@@count@@@file@@@line@@@level@@@tag@@@route@@@format@@@__VA_ARGS__@@@ + +#define _NVLOG_PRINTF(tag, route, level, format, ...) \ + _NVLOG_PRINTF2(_NVLOG_COUNTER, __FILE__, __LINE__, tag, route, level, format, __VA_ARGS__) + +#elif !NVLOG_ENABLED + +#define _NVLOG_PRINTF _NVLOG_NOTHING + +#else // NVLOG_ENABLED && !defined(NVLOG_PARSING) + +#include "nvlog_inc.h" + +#ifdef NVLOG_STRINGS_ALLOWED +#define NVLOG_STRING(...) __VA_ARGS__ +#else +#define NVLOG_STRING(...) +#endif + +// +// One for every debug level, needed for compile time filtering. +// +#include "nvstatus.h" + +typedef NV_STATUS NVLOG_PRINTF_PROTO(NvU32, NvU32, ...); +NVLOG_PRINTF_PROTO nvlogPrint_printf0; +NVLOG_PRINTF_PROTO nvlogPrint_printf1; +NVLOG_PRINTF_PROTO nvlogPrint_printf2; +NVLOG_PRINTF_PROTO nvlogPrint_printf3; +NVLOG_PRINTF_PROTO nvlogPrint_printf4; +NVLOG_PRINTF_PROTO nvlogPrint_printf5; +NVLOG_PRINTF_PROTO nvlogPrint_printf6; + +// This one is used for unknown debug level - It has an extra argument +NV_STATUS nvlogPrint_printf(NvU32 dbgLevel, NvU32 file, NvU32 line, ...); + + +#if NVLOG_LEVEL <= LEVEL_SILENT +#define NVLOG_PRINT_LEVEL_0x0 nvlogPrint_printf0 +#else +#define NVLOG_PRINT_LEVEL_0x0 _NVLOG_NOTHING +#endif +#if NVLOG_LEVEL <= LEVEL_INFO +#define NVLOG_PRINT_LEVEL_0x1 nvlogPrint_printf1 +#else +#define NVLOG_PRINT_LEVEL_0x1 _NVLOG_NOTHING +#endif +#if NVLOG_LEVEL <= LEVEL_NOTICE +#define NVLOG_PRINT_LEVEL_0x2 nvlogPrint_printf2 +#else +#define NVLOG_PRINT_LEVEL_0x2 _NVLOG_NOTHING +#endif +#if NVLOG_LEVEL <= LEVEL_WARNING +#define NVLOG_PRINT_LEVEL_0x3 nvlogPrint_printf3 +#else +#define NVLOG_PRINT_LEVEL_0x3 _NVLOG_NOTHING +#endif +#if NVLOG_LEVEL <= LEVEL_ERROR +#define NVLOG_PRINT_LEVEL_0x4 nvlogPrint_printf4 +#else +#define NVLOG_PRINT_LEVEL_0x4 _NVLOG_NOTHING +#endif +#if NVLOG_LEVEL <= LEVEL_HW_ERROR +#define NVLOG_PRINT_LEVEL_0x5 nvlogPrint_printf5 +#else +#define NVLOG_PRINT_LEVEL_0x5 _NVLOG_NOTHING +#endif +#if NVLOG_LEVEL <= LEVEL_FATAL +#define NVLOG_PRINT_LEVEL_0x6 nvlogPrint_printf6 +#else +#define NVLOG_PRINT_LEVEL_0x6 _NVLOG_NOTHING +#endif +// For when the level isn't known at compile time +#define NVLOG_PRINT_LEVEL_ NVLOG_PRINT_LEVEL_UNKNOWN +#define NVLOG_PRINT_LEVEL_UNKNOWN nvlogPrint_printf + + +#define NVLOG_PRINT2(dbglvl) NVLOG_PRINT_LEVEL_ ## dbglvl +#define NVLOG_PRINT(level, ...) NVLOG_PRINT2(level)(__VA_ARGS__) + +#define _NVLOG_GET_PRINT2(prefix, x) prefix ##x +#define _NVLOG_GET_PRINT1(prefix, id) _NVLOG_GET_PRINT2(prefix, id) +#define _NVLOG_GET_PRINT _NVLOG_GET_PRINT1(NVLOG_PRINT_ID_, _NVLOG_COUNTER) + +#define _NVLOG_PRINTF(tag, route, level, format, ...) _NVLOG_GET_PRINT + +#endif // NVLOG_ENABLED && !defined(NVLOG_PARSING) diff --git a/src/nvidia/inc/libraries/nvlog/nvlog.h b/src/nvidia/inc/libraries/nvlog/nvlog.h new file mode 100644 index 0000000..42c66b9 --- /dev/null +++ b/src/nvidia/inc/libraries/nvlog/nvlog.h @@ -0,0 +1,400 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NVLOG_H_ +#define _NVLOG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" +#include "nvstatus.h" + +/******************* Common Debug & Trace Defines **************************\ +* * +* Module: NVLOG.H * +* * +\***************************************************************************/ + +// Include common NvLog definitions +#include "nvlog_defs.h" + +// Include printf definitions +#include "nvlog/nvlog_printf.h" + +#if defined(NVRM) +#include "rmconfig.h" + +#define NVLOG_MODULE_ENABLED RMCFG_MODULE_NVLOG + +#else + +#define NVLOG_MODULE_ENABLED 1 + +#endif + +/********************************/ +/***** Exported functions *****/ +/********************************/ + +#if NVLOG_MODULE_ENABLED + +extern NVLOG_LOGGER NvLogLogger; +extern NVLOG_PRINT_LOGGER NvLogPrintLogger; + +/** + * @brief Global NvLog initialization function + * + * @return NV_OK on success + */ +NV_STATUS nvlogInit(void *pData); + +/** + * @brief Update the NvLog configuration from the registry + * + */ +void nvlogUpdate(void); + +/** + * @brief Global NvLog deinitialization function + * + * @return NV_OK on success + */ +NV_STATUS nvlogDestroy(void); + +/** + * @brief Allocate a new NvLog buffer + * + * @param[in] size Size of the buffer to allocate + * @param[in] flags Buffer flags, uses NVLOG_BUFFER_FLAGS_* DRF's + * @param[in] tag Tag for the new buffer, to identify it in a dump + * @param[out] pBufferHandle Handle of the newly created buffer + * + * @return NV_OK on success + */ +NV_STATUS nvlogAllocBuffer(NvU32 size, NvU32 flags, NvU32 tag, NVLOG_BUFFER_HANDLE *pBufferHandle, ...); + +/** + * @brief Deallocate a buffer with the given handle + * + * @param[in] hBuffer Handle of the buffer to deallocate + * @param[in] bDeallocPreserved Deallocate preserved buffers + */ +void nvlogDeallocBuffer(NVLOG_BUFFER_HANDLE hBuffer, NvBool bDeallocPreserved); + +/** + * @brief Write to a buffer with the given handle + * + * @param[in] hBuffer Handle of the buffer to write to + * @param[in] pData Pointer to the data to be written + * @param[in] dataSize Size of the data to be written + * + * @return NV_OK on success + */ +NV_STATUS nvlogWriteToBuffer(NVLOG_BUFFER_HANDLE hBuffer, NvU8 *pData, NvU32 dataSize); + +/** + * @brief Extract a chunk of a buffer + * + * @param[in] hBuffer Handle of the buffer to extract + * @param[in] chunkNum Index (0-based) of the chunk to extract + * @param[in,out] pCunkSize In - Size of the chunk to extract + * Out - Size that was actually extracted, can be less + * @param[out] pDest Pointer to the memory the chunk will be copied to + * + * @return NV_OK on success + */ +NV_STATUS nvlogExtractBufferChunk(NVLOG_BUFFER_HANDLE hBuffer, NvU32 chunkNum, NvU32 *pChunkSize, NvU8 *pDest); + +/** + * @brief Get the size of a specified buffer + * + * @param[in] hBuffer Handle of the buffer + * @param[out] pSize Buffer size. + * + * @return NV_OK on success + */ +NV_STATUS nvlogGetBufferSize(NVLOG_BUFFER_HANDLE hBuffer, NvU32 *pSize); + +/** + * @brief Get the tag of a specified buffer. + * + * @param[in] hBuffer Handle of the buffer + * @param[out] pTag Buffer tag. + * + * @return NV_OK on success + */ +NV_STATUS nvlogGetBufferTag(NVLOG_BUFFER_HANDLE hBuffer, NvU32 *pTag); + +/** + * @brief Get flags for a specified buffer. + * Flag fields are defined as NVLOG_BUFFER_FLAGS_* in nvlog_defs.h + * + * @param[in] hBuffer Handle of the buffer + * @param[out] pFlags Buffer flags. + * + * @return NV_OK on success + */ +NV_STATUS nvlogGetBufferFlags(NVLOG_BUFFER_HANDLE hBuffer, NvU32 *pFlags); + +/** + * @brief Pause/resume logging to a specified buffer + * + * @param[in] hBuffer Handle of the buffer + * @param[out] bPause NV_TRUE ??Pause, NV_FALSE ??resume + * + * @return NV_OK on success + */ +NV_STATUS nvlogPauseLoggingToBuffer(NVLOG_BUFFER_HANDLE hBuffer, NvBool bPause); + +/** + * @brief Pause/resume logging to all buffers + * + * @param[out] bPause NV_TRUE ??Pause, NV_FALSE ??resume + * + * @return NV_OK on success + */ +NV_STATUS nvlogPauseAllLogging(NvBool bPause); + +/** + * @brief Get the handle of a buffer with the given tag + * + * @param[in] tag Tag of the buffer requested + * @param[out] pBufferHandle Handle of the buffer + * + * @return NV_OK on success + */ +NV_STATUS nvlogGetBufferHandleFromTag(NvU32 tag, NVLOG_BUFFER_HANDLE *pBufferHandle); + +/** + * @brief Get the handle of a buffer with the given tag + * + * @param[in] tag Tag of the buffer requested + * @param[out] pBufferHandle Handle of the buffer + * + * @return NV_OK on success + */ +NV_STATUS nvlogGetBufferSnapshot(NVLOG_BUFFER_HANDLE hBuffer, NvU8 *pDest, NvU32 destSize); + + +/** + * @brief Dumps all logs into the the kernel print log + * + * @note this will write to the log even if all other prints are disabled, + * including external release builds. The output will be base64 encoded and + * not decodable without the database, and pollute the logs. Use with caution. + * + * The format of the dump will be the same as the OS Crash Log dumps. + */ +void nvlogDumpToKernelLog(NvBool bDumpUnchangedBuffersOnlyOnce); + +// +// The values returned by CheckFilter functions contain up to four buffers. +// These indexes are in the local buffer array (i.e. in NVLOG_PRINT_LOGGER) +// There can be more than 256 total NvLog buffers, but only 256 per subsystem. +// +#define NVLOG_FILTER_BUFFER_NONE 0xFF + +// +// NvLog Print functions +// + +/** + * @brief Check the filtering rules for a given DBG_PRINTF + * + * @param[in] fileId ID (name hash) of the file + * @param[in] line Line number of the print + * @param[in] level Debug level (DBG_LEVEL_*) of the print + * @param[in] module Debug module (DBG_MODULE_*) of the print + * + * @return 32 bits to indicate which of the print buffers to log to. + */ +NvU32 nvlogPrintCheckFilter(NvU32 fileId, NvU16 line, NvU32 level, NvU32 module); + +/** + * @brief Global NvLog Print initialization function + * + * @return NV_OK on success + */ +NV_STATUS nvlogPrintInit(void); + +/** + * @brief NvLog Print update function + * + * @return NV_OK on success + */ +NV_STATUS nvlogPrintUpdate(void); + +/** + * @brief Global NvLog Print deinitialization function + * + * @return NV_OK on success + */ +NV_STATUS nvlogPrintDestroy(void); + +// +// NvLog ETW functions +// + +/** + * @brief Global NvLog ETW capture state function + * + * @return NV_OK on success + */ +NV_STATUS nvlogETWCaptureState(void); + +/** + * @brief Pushes all buffer tags to ETW + */ +void nvlogETWPushTags(void); + +/** + * @brief Pushes an nvlog buffer header to ETW + */ +void nvlogETWPushBufferHeader(NVLOG_BUFFER *pBuffer); + +/** + * @brief Pushes an nvlog entry to ETW + * + * @return NV_TRUE on success + */ +NvBool nvlogETWPush(NVLOG_BUFFER *pBuffer, NvU8 *pData, NvU32 dataSize); + +// +// Global initialization macros +// +extern volatile NvU32 nvlogInitCount; +#define NVLOG_INIT(pData) \ + do \ + { \ + if (portAtomicIncrementU32(&nvlogInitCount) == 1) \ + { \ + nvlogInit(pData); \ + } \ + } while (0) + +#define NVLOG_UPDATE() \ + do \ + { \ + if (nvlogInitCount == 1) \ + { \ + nvlogUpdate(); \ + } \ + } while (0) + +#define NVLOG_DESTROY() \ + do \ + { \ + if (portAtomicDecrementU32(&nvlogInitCount) == 0) \ + { \ + nvlogDestroy(); \ + } \ + } while (0) + +/********************************/ +/****** NvLog Filtering *******/ +/********************************/ + +// +// Used both by print and regtrace functions. +// + +/** + * @brief Binary search the range array for a given number + * + * @param[in] ranges Range array to search + * @param[in] numRanges Size of the given array + * @param[in] num Number to search for. + * + * @return Number that is found in the given range. + * If no number is found, returns ~0 (0xFFFFFFFF) + */ +NvU32 nvlogFindInRange16Array(NVLOG_RANGE_16 *ranges, NvU32 numRanges, NvU16 num); +/** + * @brief Binary search the range array for a given number + * + * @param[in] ranges Range array to search + * @param[in] numRanges Size of the given array + * @param[in] num Number to search for. + * + * @return Number that is found in the given range. + * If no number is found, returns ~0 (0xFFFFFFFF) + */ +NvU32 nvlogFindInRange32Array(NVLOG_RANGE_32 *ranges, NvU32 numRanges, NvU32 num); + +// Returns the rules for the given fileId-lineNum pair +/** + * @brief Binary search the range array for a given number + * + * @param[in] pFileLineFilter File:line filter to check + * @param[in] fileId ID of the file to search + * @param[in] lineNum Line number to search in the file entry + * + * @return Number that is found for the given file:line. + * If no number is found, returns ~0 (0xFFFFFFFF) + */ +NvU32 nvlogGetFileLineFilterRules(NVLOG_FILELINE_FILTER *pFileLineFilter, NvU32 fileId, NvU16 lineNum); + + +/** + * @brief Dump nvlog to kernel log only if enabled (performs regkey and platform checks) + */ +void nvlogDumpToKernelLogIfEnabled(void); + +/** + * @param[in] pCb callback function to be called when nvlog buffers need to be flushed + * @param[in] pData argument to pass to pCb + * @param[out] ppCb output callback data pointer + * + * @return NV_OK on success + */ +NV_STATUS nvlogRegisterFlushCb(void (*pCb)(void*), void *pData); + +/** + * @param[in] pCb callback pCb to be deregistered + * @param[in] pData argument that pCb was registered with + */ +void nvlogDeregisterFlushCb(void (*pCb)(void*), void *pData); + +// +// Run registered callbacks. +// All callback list accesses are synchronised. +// +void nvlogRunFlushCbs(void); + +#else + +#define NVLOG_INIT(pData) +#define NVLOG_UPDATE() +#define NVLOG_DESTROY() + +#define nvlogDumpToKernelLogIfEnabled() +#define nvlogDumpToKernelLog(bDumpUnchangedBuffersOnlyOnce) + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _NVLOG_H_ diff --git a/src/nvidia/inc/libraries/nvlog/nvlog_printf.h b/src/nvidia/inc/libraries/nvlog/nvlog_printf.h new file mode 100644 index 0000000..fbce4b3 --- /dev/null +++ b/src/nvidia/inc/libraries/nvlog/nvlog_printf.h @@ -0,0 +1,92 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * NvLog call that logs prints. + * + * This is the traditional NvLog component. When enabled, it will also activate + * preprocessing of all source files to detect calls to NVLOG_PRINTF, and + * generate a database to be used for decoding. + * + * This file just defines the macros used by NV_PRINTF and others clients + */ + +#ifndef NVLOG_PRINTF_H +#define NVLOG_PRINTF_H + +#include "utils/nvprintf_level.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef NVLOG_ENABLED +/// @brief If zero, most of NvLog will be compiled out +#define NVLOG_ENABLED 0 +#endif + +#ifndef NVLOG_LEVEL +/// @brief Level below which all prints will be compiled out. +#define NVLOG_LEVEL LEVEL_NOTICE +#endif + +/// @brief Maximum number of arguments to NVLOG_PRINTF +#define NVLOG_MAX_ARGS 20 + +/** + * @brief Log this printf in NvLog internal binary buffers + * + * These calls are picked up by the NvLog parser, and are replaced with custom + * calls from the generated header. See @page nvlog-parser for details. + * + * @param tag - An identifier to help with offline filtering. Doesn't need to + * be defined anywhere. + * @param route - 8bit mask of buffers the print will be routed to. + * Use NVLOG_BUFFER_XXX constants + * @param level - Level at which to print. Calls with level < NVLOG_LEVEL will + * be compiled out. + * @param format - printf-like format string + * @param ... - printf arguments + */ +#define NVLOG_PRINTF(tag, route, level, format, ...) _NVLOG_PRINTF(tag, route, level, format, __VA_ARGS__) + +#define NVLOG_BUFFER_NULL 0x01 +#define NVLOG_BUFFER_RM 0x02 +#define NVLOG_BUFFER_RM_BOOT 0x04 +#define NVLOG_BUFFER_ETW 0x08 +#define NVLOG_BUFFER_KMD_BOOT 0x10 +#define NVLOG_BUFFER_KMD 0x20 +#define NVLOG_BUFFER_ERROR 0x40 +#define NVLOG_BUFFER_DD 0x80 + +#define NVLOG_ROUTE_RM (NVLOG_BUFFER_RM | NVLOG_BUFFER_RM_BOOT | NVLOG_BUFFER_ETW) +#define NVLOG_ROUTE_KMD (NVLOG_BUFFER_KMD | NVLOG_BUFFER_KMD_BOOT | NVLOG_BUFFER_ETW) +#define NVLOG_ROUTE_DD (NVLOG_BUFFER_DD | NVLOG_BUFFER_KMD_BOOT | NVLOG_BUFFER_ETW) + +#include "nvlog/internal/nvlog_printf_internal.h" + +#ifdef __cplusplus +} //extern "C" +#endif + +#endif // NVLOG_PRINTF_H diff --git a/src/nvidia/inc/libraries/nvoc/object.h b/src/nvidia/inc/libraries/nvoc/object.h new file mode 100644 index 0000000..a4e8181 --- /dev/null +++ b/src/nvidia/inc/libraries/nvoc/object.h @@ -0,0 +1,131 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file is part of the NVOC runtime. + */ + +#pragma once + +#include "nvoc/prelude.h" + +#include "g_object_nvoc.h" + +#ifndef _NVOC_OBJECT_H_ +#define _NVOC_OBJECT_H_ + +#include "nvtypes.h" +#include "nvstatus.h" + + + +class Object; +struct NVOC_CLASS_INFO; + +/*! + * Tracks the head of an object's child list, and the next object in its + * parent's child list. + */ +struct NVOC_CHILD_TREE +{ + Object *pChild; + Object *pSibling; +}; + +//! The base class of all instantiable NVOC objects. +NVOC_PREFIX(obj) class Object +{ +public: + + //! Runtime ownership tree parent node. + Object *pParent; + + //! Runtime ownership tree child and sibling links. + struct NVOC_CHILD_TREE childTree; + + //! IP Version value. Temporary until NVOC-style HALs are rolled out. + NvU32 ipVersion; + + //! flags used to create the object. + NvU32 createFlags; + + /*! + * @brief Add pChild as a child of this object. + * + * This method is wrapped by objCreate and typically doesn't need to be + * called directly. + * + * Asserts if pChild is already a child of any object. + */ + void objAddChild(Object *pObj, Object *pChild); + + /*! + * @brief Remove pChild as a child of this object. + * + * This method is wrapped by objDelete and typically doesn't need to be + * called directly. + * + * Asserts if pChild is not a child of this object. + */ + void objRemoveChild(Object *pObj, Object *pChild); + + /*! + * @brief Gets the head of this object's child list from the child tree. + * + * This is a constant-time operation. + */ + Object *objGetChild(Object *pObj); + + /*! + * @brief Gets the next child of this object's parent from the child tree. + * + * This is a constant-time operation. + */ + Object *objGetSibling(Object *pObj); + + /*! + * @brief Gets the direct parent of this object. + * + * This is a constant-time operation. + */ + Object *objGetDirectParent(Object *pObj); +}; + +// +// IP versioning definitions are temporary until NVOC halspec support is +// finished. +// +// IP_VERSION format as defined by the hardware engines. +// A _MAJOR value of 0 means the object has no version number. +// + +#define NV_ODB_IP_VER_DEV 7:0 /* R-IVF */ +#define NV_ODB_IP_VER_ECO 15:8 /* R-IVF */ +#define NV_ODB_IP_VER_MINOR 23:16 /* R-IVF */ +#define NV_ODB_IP_VER_MAJOR 31:24 /* R-IVF */ + +#define IPVersion(pObj) staticCast((pObj), Object)->ipVersion +// v0 .. v1 inclusive +#define IsIPVersionInRange(pObj, v0, v1) ((IPVersion(pObj) >= (v0)) && (IPVersion(pObj) <= (v1))) + +#endif diff --git a/src/nvidia/inc/libraries/nvoc/prelude.h b/src/nvidia/inc/libraries/nvoc/prelude.h new file mode 100644 index 0000000..e1ddedd --- /dev/null +++ b/src/nvidia/inc/libraries/nvoc/prelude.h @@ -0,0 +1,266 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file is part of the NVOC runtime. + */ + +#ifndef _NVOC_PRELUDE_H_ +#define _NVOC_PRELUDE_H_ + +#include "nvtypes.h" +#include "utils/nvmacro.h" + +/* Calls the macro named in the first parameter with the rest of the given arguments. Written + * like this instead of just func(__VA_ARGS__) because some preprocessors treat __VA_ARGS__ + * as a single argument even when it contains commas. */ +#define NVOC_PP_CALL(func, ...) NV_EXPAND(func NV_EXPAND() (__VA_ARGS__)) + +/*! Macro to help specify prefixes on NVOC classes */ +#define NVOC_PREFIX(x) [[nvoc::prefix(x)]] + +/*! Macro to help specify NVOC classes attributes */ +#define NVOC_ATTRIBUTE(str) [[nvoc::classAttributes("\""#str"\"")]] + +/*! Macro to help specify properties on NVOC classes */ +#define NVOC_PROPERTY [[nvoc::property]] + +#ifndef NV_PRINTF_STRINGS_ALLOWED +#if defined(DEBUG) || defined(NV_MODS) || defined(QA_BUILD) +#define NV_PRINTF_STRINGS_ALLOWED 1 +#else +#define NV_PRINTF_STRINGS_ALLOWED 0 +#endif +#endif + +/*! + * @brief Gets a pointer to the NVOC_CLASS_INFO for the named NVOC class. + * + * This is similar to C++'s typeid macro. + */ +#define classInfo(x) reinterpretCast((&__nvoc_class_def_##x), const NVOC_CLASS_INFO *) + +/*! + * @brief Gets a unique integer identifier for the named NVOC class. + * + * This is similar to the hash_code of C++'s std::type_info. + */ +#define classId(x) __nvoc_class_id_##x + + +/*! NVOC class IDs will be no wider than NVOC_CLASS_ID_MAX_WIDTH bits. */ +#define NVOC_CLASS_ID_MAX_WIDTH 24 + +/*! + * @brief Statically casts pObj to a TYPE*. Fails to compile if the cast is invalid. + * + * This is similar to C++'s static_cast(pObj). + */ +#define staticCast(pObj, TYPE) ((pObj)? __staticCast_##TYPE((pObj)) : NULL) + +/*! + * @brief Statically casts pObj to a TYPE*. Fails to compile if the cast is invalid. + * + * This version staticCast() skips pointer check as a trade of better binary size and + * runtime efficiency. The caller is responsible to ensure pObj can never be NULL. + */ +#define staticCastNoPtrCheck(pObj, TYPE) __staticCast_##TYPE((pObj)) + +/*! + * @brief Dynamically casts pObj to a TYPE*. Returns NULL if the cast is invalid. + * + * This is similar to C++'s dynamic_cast(pObj). + */ +#define dynamicCast(pObj, TYPE) (__dynamicCast_##TYPE((pObj))) + +/*! + * @brief Reinterpret e as if it had type T. + * + * This is similar to C++'s reinterpret_cast(e). + */ +#define reinterpretCast(e, T) ((T)(e)) + +/*! + * NVOC_OBJ_CREATE_FLAGS* are used with objCreateWithFlags()/objCreateDynamicWithFlags(). + * + * NVOC_OBJ_CREATE_FLAGS_NONE + * Default behavior + * NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY + * Use halspec from parent without adding the new created object the child tree + * NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT + * Skip memory allocation on object create, assume the argument points to memory + * already allocated. + */ +#define NVOC_OBJ_CREATE_FLAGS_NONE 0x0000u +#define NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY 0x0001u +#define NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT 0x0002u + +/*! + * @brief Create and construct a new object by class name. + * + * @param[out] ppNewObj A pointer to the new object + * @param[in] pParent A pointer to the object that should be the new object's parent, + * or NULL if the new object has no parent. + * @param[in] NAME The name of the class of the new object. + */ +/* MSVC suppresses trailing commas at the final expansion but not at intermediate expansions, so we + * need to put our trailing comma inside another macro so it will be eaten. Normally, one would + * just wrap the trailing comma and __VA_ARGS__ in NV_EXPAND, but Bullseye's preprocessor has + * trouble dealing with that properly, so instead we use an indirect macro caller that Bullseye + * seems to handle properly. This avoids producing a "too many arguments for macro" warning (C4002). */ +#define objCreate(ppNewObj, pParent, NAME, ...) \ + NVOC_PP_CALL(__objCreate_##NAME, (ppNewObj), (pParent), (NVOC_OBJ_CREATE_FLAGS_NONE), ##__VA_ARGS__) +#define objCreateWithFlags(ppNewObj, pParent, NAME, flags, ...) \ + NVOC_PP_CALL(__objCreate_##NAME, (ppNewObj), (pParent), (flags), ##__VA_ARGS__) + +/*! + * @brief Destruct and free an object and all of its children recursively. + * + * In C++, fields are destructed in reverse syntactic order. Similarly, in + * NVOC, runtime children are deleted in the reverse of the order they were + * added (usually reverse creation order). + */ +#define objDelete(pObj) __nvoc_objDelete(staticCast((pObj), Dynamic)) + +/*! + * @brief Get the given object's class ID + */ +#define objGetClassId(pObj) __nvoc_objGetClassId(staticCast((pObj), Dynamic)) + +/*! + * @brief Get the given object's class info + */ +#define objGetClassInfo(pObj) __nvoc_objGetClassInfo(staticCast((pObj), Dynamic)) + +#if NV_PRINTF_STRINGS_ALLOWED +/*! + * Get the given class's name from its class info. + */ +#define objGetClassName(pObj) (objGetClassInfo((pObj))->name) +#endif + +/*! + * @brief Create and construct a new object by class ID. + * + * @param[out] ppNewObj A pointer to the new object + * @param[in] pParent A pointer to the object that should be the new object's parent, + * or NULL if the new object has no parent. + * @param[in] pClassInfo A pointer to the NVOC_CLASS_INFO for the desired class. + */ +#define objCreateDynamic(ppNewObj, pParent, pClassInfo, ...) \ + __nvoc_objCreateDynamic((ppNewObj), staticCast((pParent), Dynamic), \ + (pClassInfo), (NVOC_OBJ_CREATE_FLAGS_NONE), ##__VA_ARGS__) +#define objCreateDynamicWithFlags(ppNewObj, pParent, pClassInfo, flags, ...) \ + __nvoc_objCreateDynamic((ppNewObj), staticCast((pParent), Dynamic), \ + (pClassInfo), (flags), ##__VA_ARGS__) + +/*! + * @brief Cast any object supporting Run-Time Type Information (RTTI) to 'Dynamic'. + * + * The purpose of this more complicated expression is to force a compile-time + * error if `pObj` does not contain Metadata/RTTI information. + * + * Since the `__nvoc_rtti` pointer is always first, `pObj == &(pObj)->__nvoc_rtti`. + * With metadata version 2, `__nvoc_rtti` is unioned with `__nvoc_metadata`, + * which is okay since the RTTI structure is first in the metadata structure. + * + */ +#define __staticCast_Dynamic(pObj) ((Dynamic*) &(pObj)->__nvoc_rtti) + + +/* + * Helper macros for "pObject->getProperty(pObject, prop)" + * + * The NVOC property macros are currently based on IOM's property macros. + * + * Property inheritance for IOM (Improved Object Model) is done by introducing + * 'prop##_BASE_CAST' and 'prop##_BASE_NAME'. For IOM, those are defined in + * generated file g_odb.h. For NVOC, they are defined in each class's generated + * header. + * + * In non-inheritance cases, getProperty/setProperty functions are equal to: + * #define getProperty(pObj, prop) prop // or pdb.prop for IOM + * #define setProperty(pObj, prop, val) prop = val // or pdb.prop = val for IOM + * + * Once the IOM model is phased out, these will become: + * #define getProperty(pObj, prop) pObj->prop + * #define setProperty(pObj, prop, val) pObj->prop = val + */ +#define getProperty(pObj, prop) prop##_BASE_CAST prop##_BASE_NAME +#define setProperty(pObj, prop, val) prop##_BASE_CAST prop##_BASE_NAME = val + +/*! Special NULL pointer for macros that expect to staticCast their parameter */ +#define NVOC_NULL_OBJECT ((Object*) NULL) + + +/*! + * @brief Wrapper of the Run-Time Type Information (RTTI) pointer. + * + * @details In effect, this is the base class (not Object) for all classes + * that support RTTI because the RTTI pointer is always first in memory, + */ +typedef struct { + const struct NVOC_RTTI *__nvoc_rtti; +} Dynamic; + + +typedef NvU32 NVOC_CLASS_ID; + +typedef struct NVOC_RTTI_PROVIDER { + NvU32 dummy; +} NVOC_RTTI_PROVIDER; + +typedef const NVOC_RTTI_PROVIDER *NVOC_RTTI_PROVIDER_ID; + +//! Public metadata about an NVOC class definition. +typedef struct NVOC_CLASS_INFO +{ + const NvU32 size; + const NVOC_CLASS_ID classId; + const NVOC_RTTI_PROVIDER_ID providerId; +#if NV_PRINTF_STRINGS_ALLOWED + const char *name; +#endif +} NVOC_CLASS_INFO; + + +/*! + * @brief Wrapper of private field and private function + */ +#if defined(__clang__) // clang +#define NVOC_PRIVATE_FIELD(x) __attribute__((unavailable(#x " is a private field"))) x +#define NVOC_PRIVATE_FUNCTION(x) __attribute__((unavailable(#x " is a private function"))) x +#elif defined(__INTEL_COMPILER) // icc +#pragma warning(error: 1786) // treat deprecated as error (globally affected) +#define NVOC_PRIVATE_FIELD(x) __attribute__((deprecated(#x " is a private field"))) x +#define NVOC_PRIVATE_FUNCTION(x) __attribute__((deprecated(#x " is a private function"))) x +#elif defined(__GNUC__) || defined(__GNUG__) // gcc +#pragma GCC diagnostic error "-Wdeprecated-declarations" // treat deprecated as error (globally affected) +#define NVOC_PRIVATE_FIELD(x) __attribute__((deprecated(#x " is a private field"))) x +#define NVOC_PRIVATE_FUNCTION(x) __attribute__((error(#x " is a private function"))) x +#else // other +#define NVOC_PRIVATE_FIELD(x) x##_PRIVATE +#define NVOC_PRIVATE_FUNCTION(x) x##_PRIVATE +#endif + +#endif diff --git a/src/nvidia/inc/libraries/nvoc/rtti.h b/src/nvidia/inc/libraries/nvoc/rtti.h new file mode 100644 index 0000000..c0cac58 --- /dev/null +++ b/src/nvidia/inc/libraries/nvoc/rtti.h @@ -0,0 +1,80 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file is part of the NVOC runtime. + */ + +#ifndef _NVOC_RTTI_H_ +#define _NVOC_RTTI_H_ + +#include "nvtypes.h" +#include "nvoc/prelude.h" +#include "nvport/inline/util_valist.h" + +typedef NV_STATUS (*NVOC_DYNAMIC_OBJ_CREATE)(Dynamic**, Dynamic *pParent, NvU32 createFlags, va_list); +typedef void (*NVOC_DYNAMIC_DTOR)(Dynamic*); + + +// struct NVOC_CLASS_METADATA +// { +// // NvBool isMixedMode; +// // NvS32 ring; +// // const struct NVOC_EXPORTS *const pExportedClasses; +// }; + +// MSVC warning C4200 on "NVOC_CASTINFO::relatives": zero-sized array in struct/union +// Ignore the warning on VS2013+ +//! List of valid casts, needed for dynamicCast. +struct NVOC_CASTINFO +{ + const NvU32 numRelatives; + const struct NVOC_RTTI *const relatives[]; +}; + + + +//! Items unique to each NVOC class definition. Used to identify NVOC classes. +struct NVOC_CLASS_DEF { + const NVOC_CLASS_INFO classInfo; // public, defined in runtime.h; contains classId, size, and name + const NVOC_DYNAMIC_OBJ_CREATE objCreatefn; + const struct NVOC_CASTINFO *const pCastInfo; + const struct NVOC_EXPORT_INFO* const pExportInfo; +}; + +//! Items used to identify base class subobjects. +struct NVOC_RTTI // one per derived-ancestor relationship pair (and every derived class is also its own ancestor) +{ + const struct NVOC_CLASS_DEF *const pClassDef; // drives dynamicCast and objCreateDynamic, one per class + const NVOC_DYNAMIC_DTOR dtor; // __nvoc_destructFromBase for base substructures, real destructor for derived + const NvU32 offset; // 0 for derived +}; + +//! Initialize RTTI structure for older NVOC versions. +#ifndef NVOC_METADATA_VERSION +void __nvoc_initRtti(Dynamic *pNewObject, const struct NVOC_CLASS_DEF *pClassDef); +#elif NVOC_METADATA_VERSION < 2 +void __nvoc_initRtti(Dynamic *pNewObject, const struct NVOC_CLASS_DEF *pClassDef); +#endif + +#endif diff --git a/src/nvidia/inc/libraries/nvoc/runtime.h b/src/nvidia/inc/libraries/nvoc/runtime.h new file mode 100644 index 0000000..19bbcee --- /dev/null +++ b/src/nvidia/inc/libraries/nvoc/runtime.h @@ -0,0 +1,118 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file is part of the NVOC runtime. + */ + +#ifndef _NVOC_RUNTIME_H_ +#define _NVOC_RUNTIME_H_ + +#include "nvport/nvport.h" +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvmisc.h" + +#include "nvoc/prelude.h" + +#ifdef __cplusplus +extern "C" { +#endif + +NVOC_CLASS_ID __nvoc_objGetClassId(Dynamic *pObj); +const NVOC_CLASS_INFO *__nvoc_objGetClassInfo(Dynamic *pObj); + +void __nvoc_objDelete(Dynamic *pDynamic); + +NV_STATUS __nvoc_handleObjCreateMemAlloc(NvU32 createFlags, NvU32 allocSize, void **ppLocalPtr, void **ppThis); + +NV_STATUS __nvoc_objCreateDynamic( + Dynamic **pNewObject, + Dynamic *pParent, + const NVOC_CLASS_INFO *pClassInfo, + NvU32 createFlags, + ...); + +Dynamic *__nvoc_dynamicCast(Dynamic *pFromObj, const NVOC_CLASS_INFO *pClassInfo); +Dynamic *__nvoc_dynamicCastById(Dynamic *pFromObj, NVOC_CLASS_ID classId); + +void __nvoc_destructFromBase(Dynamic *pDynamic); + +Dynamic *fullyDeriveWrapper(Dynamic *pDynamic); + +extern const NVOC_RTTI_PROVIDER __nvoc_rtti_provider; + +#define objFindAncestor(pObj, classId) objFindAncestor_IMPL(staticCast((pObj), Dynamic), classId) +#define objDynamicCastById(pObj, classId) objDynamicCastById_IMPL(staticCast((pObj), Dynamic), classId) +#define objFindAncestorOfType(TYPE, pObj) dynamicCast(objFindAncestor((pObj), classId(TYPE)), TYPE) +#define __nvoc_fullyDerive(pObj) __nvoc_fullyDerive_IMPL(staticCast((pObj), Dynamic)) +#define objFullyDerive(pObj) fullyDeriveWrapper(staticCast((pObj), Dynamic)) +#define objGetExportedMethodDef(pObj, methodId) objGetExportedMethodDef_IMPL(pObj, methodId) + +//! Contains data needed to call the exported method from kernel +struct NVOC_EXPORTED_METHOD_DEF +{ + void (*pFunc) (void); // Pointer to the method itself + NvU32 flags; // Export flags used for permission, method attribute verification (eg. NO_LOCK, PRIVILEGED...) + NvU32 accessRight; // Access rights required for this method + NvU32 methodId; // Id of the method in the class. Used for method identification. + NvU32 paramSize; // Size of the parameter structure that the method takes as the argument (0 if it takes no arguments) + const NVOC_CLASS_INFO* pClassInfo; // Class info for the parent class of the method + +#if NV_PRINTF_STRINGS_ALLOWED + const char *func; // Debug info +#endif +}; + +struct NVOC_EXPORT_INFO { + NvU32 numEntries; // Number of entries + const struct NVOC_EXPORTED_METHOD_DEF *pExportEntries; //An array of exported methods +}; + +/*! + * @brief Finds the closest ancestor of this object with the given class ID. + * + * This is a linear-time operation. + */ +Dynamic *objFindAncestor_IMPL(Dynamic *pDynamic, NVOC_CLASS_ID classId); + +/*! + * @brief Finds the exported method with the given method ID. + * + * If the method isn't found in the derived class, we search the ancestors. + * Returns NULL if the search is unsuccessful. + * This is a linear-time operation. + */ +const struct NVOC_EXPORTED_METHOD_DEF* objGetExportedMethodDef_IMPL(Dynamic* pObj, NvU32 methodId); +const struct NVOC_EXPORTED_METHOD_DEF* nvocGetExportedMethodDefFromMethodInfo_IMPL(const struct NVOC_EXPORT_INFO *pExportInfo, NvU32 methodId); + +/*! + * @brief Dynamic cast by class id + */ +Dynamic *objDynamicCastById_IMPL(Dynamic *pFromObj, NVOC_CLASS_ID classId); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif diff --git a/src/nvidia/inc/libraries/nvoc/utility.h b/src/nvidia/inc/libraries/nvoc/utility.h new file mode 100644 index 0000000..9adb2ac --- /dev/null +++ b/src/nvidia/inc/libraries/nvoc/utility.h @@ -0,0 +1,28 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVOC_UTILITY_H_ +#define _NVOC_UTILITY_H_ + + +#endif // _NVOC_UTILITY_H_ diff --git a/src/nvidia/inc/libraries/nvport/atomic.h b/src/nvidia/inc/libraries/nvport/atomic.h new file mode 100644 index 0000000..42a02c5 --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/atomic.h @@ -0,0 +1,425 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Atomic module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_ATOMIC_H_ +#define _NVPORT_ATOMIC_H_ +/** + * @defgroup NVPORT_ATOMIC Atomic operations + * + * @brief This module contains atomic operations + * + * @note that mixing atomic and non-atomic modifications to the same memory + * location can have undefined behavior that varies from platform to platform. + * You are better off not trying it. + * + * @note All atomic operations actually impose at least a compiler memory + * barrier - either just on the variable manipulated, or on all globally + * accessible variables. This is just a consequence of the current + * implementations, and should not be relied on. If you need a memory barrier, + * use @ref portAtomicMemFenceFull. + * + * @{ + */ + +/** See @ref PORT_UTIL_INLINE */ +#ifndef PORT_ATOMIC_INLINE +#if PORT_COMPILER_HAS_INTRINSIC_ATOMICS && !defined(NV_MODS) +#define PORT_ATOMIC_INLINE PORT_INLINE +#if NVCPU_IS_64_BITS +#define PORT_ATOMIC64_INLINE PORT_INLINE +#else +#define PORT_ATOMIC64_INLINE +#endif +#else +#define PORT_ATOMIC_INLINE +#define PORT_ATOMIC64_INLINE +#endif +#endif + +/** + * @name Core Functions + * @{ + */ + +/** + * @brief Initialization function for port atomics + * + * This function is only needed for Libos + */ +PORT_ATOMIC_INLINE void portAtomicInit(void); + +/** + * @brief Atomic addition on a signed 32b integer + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * *pVal += val; + * return *pVal; + * ~~~ + * + * @return the new value of `*pVal` + */ +PORT_ATOMIC_INLINE NvS32 portAtomicAddS32(volatile NvS32 *pVal, NvS32 val); +/// @brief Unsigned version of @ref portAtomicAddS32 +PORT_ATOMIC_INLINE NvU32 portAtomicAddU32(volatile NvU32 *pVal, NvU32 val); + +/** + * @brief Atomic subtraction on a signed 32b integer + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * *pVal -= val; + * return *pVal; + * ~~~ + * + * @return the new value of `*pVal` + */ +PORT_ATOMIC_INLINE NvS32 portAtomicSubS32(volatile NvS32 *pVal, NvS32 val); +/// @brief Unsigned version of @ref portAtomicSubS32 +PORT_ATOMIC_INLINE NvU32 portAtomicSubU32(volatile NvU32 *pVal, NvU32 val); + +/** + * @brief Atomic set a signed 32b integer to the specified value + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * *pVal = val; + * ~~~ + * + * Once complete `val` will be visible in the location pointed to by `pVal` by all + * threads on all processors. + * + * @note On some platforms this operation is different from other atomic + * operations with respect to memory ordering. The best that can be guaranteed + * for this operation that it will behave as an acquire barrier. This + * means that operations occurring after this one in program order are + * guaranteed to not occur until the atomic operation is complete. It also + * means that it does not guarantee that previous stores are visible, or that + * previous loads have been satisfied. + * + */ +PORT_ATOMIC_INLINE void portAtomicSetS32(volatile NvS32 *pVal, NvS32 val); +/// @brief Unsigned version of @ref portAtomicSetS32 +PORT_ATOMIC_INLINE void portAtomicSetU32(volatile NvU32 *pVal, NvU32 val); + +/** + * @brief Atomic compare-and-swap on a signed 32b integer. + * + * A compare and swap is an atomic operation that reads a memory location, + * compares it to `oldVal` and if they are equal sets the memory location to + * `newVal`. + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * if (*pVal == oldVal) + * { + * *pVal = newVal; + * return NV_TRUE; + * } + * return NV_FALSE; + * ~~~ + * + * @return NV_TRUE if the operation modified the target of `pVal`, NV_FALSE otherwise + * + */ +PORT_ATOMIC_INLINE NvBool portAtomicCompareAndSwapS32(volatile NvS32 *pVal, NvS32 newVal, NvS32 oldVal); +/// @brief Unsigned version of @ref portAtomicCompareAndSwapS32 +PORT_ATOMIC_INLINE NvBool portAtomicCompareAndSwapU32(volatile NvU32 *pVal, NvU32 newVal, NvU32 oldVal); + +/** + * @brief Atomic increments of a signed 32b integer. + * + * Adds one to the memory location pointed to by the parameter and returns the + * resulting value. + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * ++(*pVal); + * return *pVal; + * ~~~ + * + * @return the new value of `*pVal` + * + */ +PORT_ATOMIC_INLINE NvS32 portAtomicIncrementS32(volatile NvS32 *pVal); +/// @brief Unsigned version of @ref portAtomicIncrementS32 +PORT_ATOMIC_INLINE NvU32 portAtomicIncrementU32(volatile NvU32 *pVal); + +/** + * @brief Atomic decrements of a signed 32b integer. + * + * Subtracts one to the memory location pointed to by the parameter and returns + * the resulting value. + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * --(*pVal); + * return *pVal; + * ~~~ + * + * @return the new value of `*pVal` + */ +PORT_ATOMIC_INLINE NvS32 portAtomicDecrementS32(volatile NvS32 *pVal); +/// @brief Unsigned version of @ref portAtomicDecrementS32 +PORT_ATOMIC_INLINE NvU32 portAtomicDecrementU32(volatile NvU32 *pVal); + + +/** + * @brief Atomic bitwise XOR on a signed 32b integer. + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * *pVal ^= val; + * return *pVal; + * ~~~ + * + * @return the new value of `*pVal` + */ +PORT_ATOMIC_INLINE NvS32 portAtomicXorS32(volatile NvS32 *pVal, NvS32 val); +/// @brief Unsigned version of @ref portAtomicXorS32 +PORT_ATOMIC_INLINE NvU32 portAtomicXorU32(volatile NvU32 *pVal, NvU32 val); + +/** + * @brief Atomic bitwise OR on a signed 32b integer. + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * *pVal |= val; + * return *pVal; + * ~~~ + * + * @return the new value of `*pVal` + */ +PORT_ATOMIC_INLINE NvS32 portAtomicOrS32(volatile NvS32 *pVal, NvS32 val); +/// @brief Unsigned version of @ref portAtomicOrS32 +PORT_ATOMIC_INLINE NvU32 portAtomicOrU32(volatile NvU32 *pVal, NvU32 val); + +/** + * @brief Atomic bitwise AND on a signed 32b integer. + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * *pVal &= val; + * return *pVal; + * ~~~ + * + * @return the new value of `*pVal` + */ +PORT_ATOMIC_INLINE NvS32 portAtomicAndS32(volatile NvS32 *pVal, NvS32 val); +/// @brief Unsigned version of @ref portAtomicAndS32 +PORT_ATOMIC_INLINE NvU32 portAtomicAndU32(volatile NvU32 *pVal, NvU32 val); + + + +/** + * @name Memory Barrier functions + * @note Memory fence functions must be marked inline, so the compiler knows + * about the barrier and doesn't reorder instructions around the call. Thus, + * this is PORT_INLINE and not PORT_ATOMIC_INLINE. (Force inline not necessary) + * + * @note A given platform is allowed to implement the load/store barriers as + * full barriers instead, if the former isn't supported. Thus, you should only + * use @ref portAtomicMemoryFenceLoad and @ref portAtomicMemoryFenceStore for + * possible performance bonus over @ref portAtomicMemoryFenceFull. Don't write + * code that relies on those being load/store only barriers. + * + * @{ + */ + +/** + * @brief Creates a full HW and compiler memory barrier. + * + * A memory fence (memory barrier) imposes a sequential ordering on access to + * all globally accessible variables. That means that all accesses found before + * the fence will finish before any of those after it. + */ +PORT_INLINE void portAtomicMemoryFenceFull(void); +/** + * @brief Creates a HW and compiler load memory barrier. + * + * A load memory fence (memory barrier) imposes a sequential ordering on all + * loads to globally accessible variables. All loads found before the barrier + * will happen before any loads found after it. A load barrier has no effect on + * store operations. + */ +PORT_INLINE void portAtomicMemoryFenceLoad(void); +/** + * @brief Creates a HW and compiler store memory barrier. + * + * A store memory fence (memory barrier) imposes a sequential ordering on all + * stores to globally accessible variables. All stores found before the barrier + * will happen before any stores found after it. A store barrier has no effect + * on load operations. + */ +PORT_INLINE void portAtomicMemoryFenceStore(void); +/// @} End memory barrier functions + +/// @} End core functions + +/** + * @name Extended Functions + * @{ + */ + +#ifndef PORT_ATOMIC_64_BIT_SUPPORTED +/// @note We support 64bit atomics on all 64bit systems (and some 32bit) +#define PORT_ATOMIC_64_BIT_SUPPORTED NVCPU_IS_64_BITS +#endif + +#define portAtomicExAddS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExSubS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExSetS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExCompareAndSwapS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExIncrementS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExDecrementS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExXorS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExOrS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExAndS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED + +#if PORT_ATOMIC_64_BIT_SUPPORTED +/** + * @brief Like @ref portAtomicAddS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvS64 portAtomicExAddS64(volatile NvS64 *pVal, NvS64 val); +/// @brief Unsigned version of @ref portAtomicExAddS64 +PORT_ATOMIC64_INLINE NvU64 portAtomicExAddU64(volatile NvU64 *pVal, NvU64 val); +/** + * @brief Like @ref portAtomicSubS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvS64 portAtomicExSubS64(volatile NvS64 *pVal, NvS64 val); +/// @brief Unsigned version of @ref portAtomicExSubS64 +PORT_ATOMIC64_INLINE NvU64 portAtomicExSubU64(volatile NvU64 *pVal, NvU64 val); +/** + * @brief Like @ref portAtomicSetS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE void portAtomicExSetS64(volatile NvS64 *pVal, NvS64 val); +/// @brief Unsigned version of @ref portAtomicExSetS64 +PORT_ATOMIC64_INLINE void portAtomicExSetU64(volatile NvU64 *pVal, NvU64 val); +/** + * @brief Like @ref portAtomicCompareAndSwapS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvBool portAtomicExCompareAndSwapS64(volatile NvS64 *pVal, NvS64 newVal, NvS64 oldVal); +/// @brief Unsigned version of @ref portAtomicExCompareAndSwapS64 +PORT_ATOMIC64_INLINE NvBool portAtomicExCompareAndSwapU64(volatile NvU64 *pVal, NvU64 newVal, NvU64 oldVal); +/** + * @brief Like @ref portAtomicIncrementS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvS64 portAtomicExIncrementS64(volatile NvS64 *pVal); +/// @brief Unsigned version of @ref portAtomicExIncrementS64 +PORT_ATOMIC64_INLINE NvU64 portAtomicExIncrementU64(volatile NvU64 *pVal); +/** + * @brief Like @ref portAtomicDecrementS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvS64 portAtomicExDecrementS64(volatile NvS64 *pVal); +/// @brief Unsigned version of @ref portAtomicExDecrementS64 +PORT_ATOMIC64_INLINE NvU64 portAtomicExDecrementU64(volatile NvU64 *pVal); +/** + * @brief Like @ref portAtomicXorS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvS64 portAtomicExXorS64(volatile NvS64 *pVal, NvS64 val); +/// @brief Unsigned version of @ref portAtomicExXorS64 +PORT_ATOMIC64_INLINE NvU64 portAtomicExXorU64(volatile NvU64 *pVal, NvU64 val); +/** + * @brief Like @ref portAtomicOrS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvS64 portAtomicExOrS64(volatile NvS64 *pVal, NvS64 val); +/// @brief Unsigned version of @ref portAtomicExOrS64 +PORT_ATOMIC64_INLINE NvU64 portAtomicExOrU64(volatile NvU64 *pVal, NvU64 val); +/** + * @brief Like @ref portAtomicAndS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvS64 portAtomicExAndS64(volatile NvS64 *pVal, NvS64 val); +/// @brief Unsigned version of @ref portAtomicExAndS64 +PORT_ATOMIC64_INLINE NvU64 portAtomicExAndU64(volatile NvU64 *pVal, NvU64 val); + +#endif // PORT_ATOMIC_64_BIT_SUPPORTED + +/// @} End extended functions + +/** + * Platform-specific inline implementations + */ +#if NVOS_IS_LIBOS +#include "nvport/inline/atomic_libos.h" +#endif + +#if PORT_COMPILER_IS_GCC +#include "nvport/inline/atomic_gcc.h" +#elif PORT_COMPILER_IS_CLANG +#include "nvport/inline/atomic_clang.h" +#elif PORT_COMPILER_IS_MSVC +#include "nvport/inline/atomic_msvc.h" +#endif + + +/** + * @name Utility Functions + * + * These are utility functions for performing operations on pointer sized + * operands. While the 64bit functions are "extended", they should always be + * present on systems where pointers and NvLength are 64 bits. + * @{ + */ +#if !NVCPU_IS_64_BITS +#define portAtomicAddSize(a,b) (NvSPtr)portAtomicAddS32((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicSubSize(a,b) (NvSPtr)portAtomicSubS32((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicSetSize(a,b) portAtomicSetS32((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicCompareAndSwapSize(a,b,c) portAtomicCompareAndSwapS32((volatile NvSPtr *)a, (NvSPtr)b, (NvSPtr)c) +#define portAtomicIncrementSize(a) (NvSPtr)portAtomicIncrementS32((volatile NvSPtr *)a) +#define portAtomicDecrementSize(a) (NvSPtr)portAtomicDecrementS32((volatile NvSPtr *)a) +#define portAtomicXorSize(a,b) (NvSPtr)portAtomicXorS32((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicOrSize(a,b) (NvSPtr)portAtomicOrS32((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicAndSize(a,b) (NvSPtr)portAtomicAndS32((volatile NvSPtr *)a, (NvSPtr)b) +#else +#define portAtomicAddSize(a,b) (NvSPtr)portAtomicExAddS64((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicSubSize(a,b) (NvSPtr)portAtomicExSubS64((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicSetSize(a,b) portAtomicExSetS64((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicCompareAndSwapSize(a,b,c) portAtomicExCompareAndSwapS64((volatile NvSPtr *)a, (NvSPtr)b, (NvSPtr)c) +#define portAtomicIncrementSize(a) (NvSPtr)portAtomicExIncrementS64((volatile NvSPtr *)a) +#define portAtomicDecrementSize(a) (NvSPtr)portAtomicExDecrementS64((volatile NvSPtr *)a) +#define portAtomicXorSize(a,b) (NvSPtr)portAtomicExXorS64((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicOrSize(a,b) (NvSPtr)portAtomicExOrS64((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicAndSize(a,b) (NvSPtr)portAtomicExAndS64((volatile NvSPtr *)a, (NvSPtr)b) +#endif +/// @} + +#endif // _NVPORT_ATOMIC_H_ +/// @} diff --git a/src/nvidia/inc/libraries/nvport/core.h b/src/nvidia/inc/libraries/nvport/core.h new file mode 100644 index 0000000..26f8ec1 --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/core.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NVPORT_CORE_H_ +#define _NVPORT_CORE_H_ + +/** + * @defgroup NVPORT_CORE Core Functions + * + * @brief These are core NvPort functions present in all configurations. + * @{ + */ +/** + * @brief Global initialization + * + * Must be called once and only once before any NvPort functions can be called + * + * If this function returns an error then calling any NvPort function will result + * in undefined behavior. + * + * + * @return NV_OK if successful, error otherwise + */ +NV_STATUS portInitialize(void); + +/** + * @brief Global shutdown + * + * Must be called once and only once when a driver is shutting down and no more + * NvPort functions will be called. + * + */ +void portShutdown(void); + +/** + * @brief Returns if NvPort is initialized + * + * This function can be called at any time. It returns if @ref portInitialize + * has been called (and @ref portShutdown has not). + */ +NvBool portIsInitialized(void); + +/// @} + +#endif // _NVPORT_CORE_H_ diff --git a/src/nvidia/inc/libraries/nvport/cpu.h b/src/nvidia/inc/libraries/nvport/cpu.h new file mode 100644 index 0000000..2380fb7 --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/cpu.h @@ -0,0 +1,657 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief CPU module public interface. + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_CPU_H_ +#define _NVPORT_CPU_H_ +/** + * @defgroup NVPORT_CPU CPU specifice operations. + * + * @brief This module contains cpu specific operations. + * + * @{ + */ +/** + * @brief Initialize global CPU module state. This function is called by + * @ref portInitialize. + */ +void portCpuInitialize(void); + +/** + * @brief Clean up global CPU module state. This function is called by + * @ref portShutdown + */ +void portCpuShutdown(void); + +/** + * @name Core Functions + * @{ + */ +/** + * @brief Read requested MSR + * + * @param [in] address Address of the MSR + * @param [out] *pValue Value of MSR + * + * @return NV_OK If successful. + */ +NV_STATUS portReadMsr(NvU32 address, NvU64 *pValue); + +/** + * @brief Write value to requested MSR + * + * @param [in] address Address of the MSR + * @param [in] value Value to be written + * + * @return NV_OK If successful. + */ +NV_STATUS portWriteMsr(NvU32 address, NvU64 value); + + /// @} End Core functions + + /** + * @name Extended Functions + * @{ + */ + /** + * @name Intel X86 Structures, unions and enums. + * @{ + */ + +/** +* @brief Structure representing Intel Processor's general +* features broken down into individual bit fields. +*/ +typedef struct PORT_CPU_INTEL_X86_FEATURES +{ + /// @{ + NvU32 SSE3 : 1; /**< Streaming SIMD Extensions 3.*/ + NvU32 PCLMULQDQ : 1; /**< PCLMULQDQ instruction.*/ + NvU32 DTES64 : 1; /**< 64-bit DS Area.*/ + NvU32 MONITOR : 1; /**< MONITOR/MWAIT.*/ + NvU32 DSCPL : 1; /**< CPL Qualified Debug Store.*/ + NvU32 VMX : 1; /**< Virtual Machine Extensions.*/ + NvU32 SMX : 1; /**< Safer Mode Extensions.*/ + NvU32 EIST : 1; /**< Enhanced Intel SpeedStep(R) technology*/ + NvU32 TM2 : 1; /**< Thermal Monitor 2.*/ + NvU32 SSSE3 : 1; /**< Supplemental Streaming SIMD Extensions 3*/ + NvU32 CNXTID : 1; /**< L1 Context ID*/ + NvU32 SDBG : 1; /**< IA32_DEBUG_INTERFACE MSR for silicon debug.*/ + NvU32 FMA : 1; /**< FMA extensions using YMM state.*/ + NvU32 CMPXCHG16B : 1; /**< CMPXCHG8B/CMPXCHG16B Compare and Exchange Bytes*/ + NvU32 xTPRUpdateControl : 1; /** supports changing + IA32_MISC_ENABLE[bit 23].*/ + NvU32 PDCM : 1; /**< Perfmon and Debug Capability: supports the performance + and debug feature indication MSR IA32_PERF_CAPABILITIES.*/ + NvU32 PCID : 1; /**< Process-context identifiers: Supports PCIDs and that + software may set CR4.PCIDE to 1.*/ + NvU32 DCA : 1; /**< Supports the ability to prefetch data from a memory mapped + device.*/ + NvU32 SSE41 : 1; /**< Supports SSE4.1.*/ + NvU32 SSE42 : 1; /**< Supports SSE4.2.*/ + NvU32 x2APIC : 1; /**< Support x2APIC.*/ + NvU32 MOVBE : 1; /**< Supports MOVBE instruction.*/ + NvU32 POPCNT : 1; /**< Supports the POPCNT instruction.*/ + NvU32 TSCDeadline : 1; /**< The processor's local APIC timer supports + one-shot operation using a TSC deadline value.*/ + NvU32 AES : 1; /**< Supports the AESNI instruction extensions.*/ + NvU32 XSAVE : 1; /**< Supports the XSAVE/XRSTOR processor extended states + feature, the XSETBV/XGETBV instructions, and XCR0.*/ + NvU32 OSXSAVE : 1; /**< the OS has set CR4.OSXSAVE[bit 18] to enable + XSETBV/XGETBV instructions to access XCR0 and to support + processor extended state management using + XSAVE/XRSTOR.*/ + NvU32 AVX : 1; /**< The processor supports the AVX instruction + extensions.*/ + NvU32 F16C : 1; /**< processor supports 16-bit floating-point conversion + instructions.*/ + NvU32 RDEND : 1; /**< Processor supports RDRAND instruction.*/ + NvU32 FPU : 1; /**< Floating Point Unit On-Chip.*/ + NvU32 VME : 1; /**< Virtual 8086 Mode Enhancements.*/ + NvU32 DE : 1; /**< Debugging Extensions.*/ + NvU32 PSE : 1; /**< Page Size Extension.*/ + NvU32 TSC : 1; /**< Time Stamp Counter.*/ + NvU32 MSR : 1; /**< Model Specific Registers RDMSR and WRMSR + Instructions.*/ + NvU32 PAE : 1; /**< Physical Address Extension.*/ + NvU32 MCE : 1; /**< Machine Check Exception.*/ + NvU32 CX8 : 1; /**< CMPXCHG8B Instruction.*/ + NvU32 APIC : 1; /**< APIC On-Chip.*/ + NvU32 SEP : 1; /**< SYSENTER and SYSEXIT Instructions.*/ + NvU32 MTRR : 1; /**< Memory Type Range Registers.*/ + NvU32 PGE : 1; /**< Page Global Bit*/ + NvU32 MCA : 1; /**< Machine Check Architecture.*/ + NvU32 CMOV : 1; /**< Conditional Move Instructions.*/ + NvU32 PAT : 1; /**< Page Attribute Table.*/ + NvU32 PSE36 : 1; /**< 36-Bit Page Size Extension.*/ + NvU32 PSN : 1; /**< 96-Bit Processor Serial Number.*/ + NvU32 CLFSH : 1; /**< CLFLUSH Instruction.*/ + NvU32 DEBUGS : 1; /**< Debug Store.*/ + NvU32 ACPI : 1; /**< Thermal Monitor and Software Controlled Clock + Facilities.*/ + NvU32 MMX : 1; /**< Intel MMX Technology.*/ + NvU32 FXSR : 1; /**< FXSAVE and FXRSTOR Instructions.*/ + NvU32 SSE : 1; /**< SSE Extensions.*/ + NvU32 SSE2 : 1; /**< SSE2 extensions.*/ + NvU32 SELFS : 1; /**< Self Snoop.*/ + NvU32 HTT : 1; /**< Max APIC IDs reserved field is Valid.*/ + NvU32 TM : 1; /**< Thermal Monitor.*/ + NvU32 PBE : 1; /**< Pending Break Enable.*/ + /// @} +} PORT_CPU_INTEL_X86_FEATURES; + +/** + * @brief Enum representing Intel processor family information. + * + */ +typedef enum PORT_CPU_INTEL_FAMILY +{ + PORT_CPU_INTEL_FAMILY_6 = 6, + PORT_CPU_INTEL_FAMILY_7 = 7 +} PORT_CPU_INTEL_FAMILY; + +/** + * @brief Enum representing Intel family 6 processor model information. + * + */ +typedef enum PORT_CPU_INTEL_FAMILY_6_MODEL +{ + PORT_CPU_INTEL_FAMLLY_6_MODEL_SANDYBRIDGE = 42, + PORT_CPU_INTEL_FAMLLY_6_MODEL_SANDYBRIDGE_X = 45, + PORT_CPU_INTEL_FAMLLY_6_MODEL_IVYBRIDGE = 58, + PORT_CPU_INTEL_FAMLLY_6_MODEL_IVYBRIDGE_X = 62, + PORT_CPU_INTEL_FAMLLY_6_MODEL_HASWELL = 60, + PORT_CPU_INTEL_FAMLLY_6_MODEL_HASWELL_X = 63, + PORT_CPU_INTEL_FAMLLY_6_MODEL_HASWELL_ULT = 69, + PORT_CPU_INTEL_FAMLLY_6_MODEL_HASWELL_GT3 = 70, + PORT_CPU_INTEL_FAMLLY_6_MODEL_BROADWELL = 61, + PORT_CPU_INTEL_FAMLLY_6_MODEL_BROADWELL_GT3 = 71, + PORT_CPU_INTEL_FAMLLY_6_MODEL_BROADWELL_X = 79, + PORT_CPU_INTEL_FAMLLY_6_MODEL_SKYLAKE = 94, + PORT_CPU_INTEL_FAMLLY_6_MODEL_SKYLAKE_MOBILE = 78, + PORT_CPU_INTEL_FAMLLY_6_MODEL_KABYLAKE = 158, + PORT_CPU_INTEL_FAMLLY_6_MODEL_KABYLAKE_MOBILE = 142, + PORT_CPU_INTEL_FAMLLY_6_MODEL_SKYLAKE_X = 85, + PORT_CPU_INTEL_FAMLLY_6_MODEL_CANNONLAKE_MOBILE = 102, + PORT_CPU_INTEL_FAMILY_6_MODEL_COMETLAKE_MOBILE = 166, + PORT_CPU_INTEL_FAMILY_6_MODEL_COMETLAKE = 165, + PORT_CPU_INTEL_FAMILY_6_MODEL_TIGERLAKE_MOBILE = 140, + PORT_CPU_INTEL_FAMILY_6_MODEL_TIGERLAKE = 141, +} PORT_CPU_INTEL_FAMILY_6_MODEL; + +/** + * @brief Union representing Intel processor family information. + * + */ +typedef union PORT_CPU_INTEL_MODEL +{ + PORT_CPU_INTEL_FAMILY_6_MODEL family6; +} PORT_CPU_INTEL_MODEL; + +/** + * @brief Enum representing Intel processor type information. + * + */ +typedef enum PORT_CPU_INTEL_PROCESSOR_TYPE +{ + PORT_CPU_INTEL_PROCESSOR_TYPE_ORIGINAL_OEM = 0, + PORT_CPU_INTEL_PROCESSOR_TYPE_OVERDRIVE = 1, + PORT_CPU_INTEL_PROCESSOR_TYPE_DUAL_PROCESSOR = 2, + PORT_CPU_INTEL_PROCESSOR_TYPE_RESERVED = 3 +} PORT_CPU_INTEL_PROCESSOR_TYPE; + +/** + * @brief Structure representing Intel Processor's Threamal & Power Management + * features broken down into individual bit fields. + */ +typedef struct PORT_CPU_INTEL_TPM_FEATURES +{ + /// @{ + NvU32 DTS : 1; /**< Digital Temperature Sensor is supported if set.*/ + NvU32 IntelTurboBoost : 1; /**< Intel Turbo Boost Technology available.*/ + NvU32 ARAT : 1; /**< APIC-Timer-always-running feature is supported + if set.*/ + NvU32 PLN : 1; /**< Power limit notification controls are supported + if set.*/ + NvU32 ECMD : 1; /**< Clock modulation duty cycle extension is supported + if set.*/ + NvU32 PTM : 1; /**< Package thermal management is supported if set.*/ + NvU32 HWP : 1; /**< HWP base registers (IA32_PM_ENABLE[bit 0], + IA32_HWP_CAPABILITIES, IA32_HWP_REQUEST, IA32_HWP_STATUS) + are supported if set.*/ + NvU32 HWPNotification : 1; /**< IA32_HWP_INTERRUPT MSR is supported + if set.*/ + NvU32 HWPActivityWindow : 1; /**< IA32_HWP_REQUEST[bits 41:32] is + supported if set.*/ + NvU32 HWPEPP : 1; /**< HWP_Energy_Performance_Preference. + IA32_HWP_REQUEST[bits 31:24] is supported if set.*/ + NvU32 HWPPLR : 1; /**< HWP_Package_Level_Request. IA32_HWP_REQUEST_PKG MSR + is supported if set.*/ + NvU32 HDC : 1; /**< HDC base registers IA32_PKG_HDC_CTL, IA32_PM_CTL1, + IA32_THREAD_STALL MSRs are supported if set.*/ + NvU32 IntelTurboBoostMaxTech30 : 1; /**< Intel(R) Turbo Boost Max Technology + 3.0 available.*/ + NvU32 HWPCapabilities : 1; /**< Highest Performance change is supported + if set.*/ + NvU32 HWPPECI : 1; /**< HWP PECI override is supported if set.*/ + NvU32 FLEXHWP : 1; /**< Flexible HWP is supported if set.*/ + NvU32 FAM : 1; /**< Fast access mode for the IA32_HWP_REQUEST MSR is + supported if set.*/ + NvU32 ILPHWPRequest : 1; /**< Ignoring Idle Logical Processor HWP request + is supported if set.*/ + NvU32 NoOfInterruptThresholdsInDTS : 4; /**< Number of Interrupt Thresholds + in Digital Thermal Sensor.*/ + NvU32 HCFC : 1; /**< Hardware Coordination Feedback Capability + (Presence of IA32_MPERF and IA32_APERF). The capability to + provide a measure of delivered processor performance + (since last reset of the counters), as a percentage of the + expected processor performance when running at the TSC + frequency.*/ + NvU32 PEBP : 1; /**< The processor supports performance-energy bias + preference if CPUID.06H:ECX.SETBH[bit 3] is set and it also + implies the presence of a new architectural MSR called + IA32_ENERGY_PERF_BIAS (1B0H).*/ + /// @} +} PORT_CPU_INTEL_TPM_FEATURES; + +/** + * @brief Structure representing Intel Processor's Architecture Performance + * monitering features broken down into individual bit fields. + */ +typedef struct PORT_CPU_INTEL_ARCH_PERF_MONITOR +{ + /// @{ + NvU32 versionId; /**< Version ID of architectural performance monitoring.*/ + NvU32 noOfGPPerfMonitoringCounters; /**< Number of general-purpose + performance monitoring counter per + logical processor.*/ + NvU32 bitWidthOfGPCounters; /** Bit width of general-purpose, performance + monitoring counter.*/ + NvU32 coreCycleEvent : 1; /**< Core cycle event available if 1.*/ + NvU32 instructionRetiredEvent : 1; /**< Instruction retired event + available if 1.*/ + NvU32 referenceCycelEvent : 1; /**< Reference cycles event available if 1.*/ + NvU32 lastLevelCacheRefEvent : 1; /**< Last-level cache reference event + available if 1.*/ + NvU32 lastLevelCacheMissEvent : 1; /**< Last-level cache misses event not + available if 1.*/ + NvU32 branchInstructionRetiredEvent : 1; /**< Branch instruction retired + event not available if 1.*/ + NvU32 branchMispredictRetiredEvent : 1; /**< Branch mispredict retired event + not available if 1.*/ + NvU32 noOfFixedFuncPerfCounters; /**< Number of fixed-function performance + counters (if Version ID > 1).*/ + NvU32 bitWidthOfFixedFuncPerfCounters; /**< Bit width of fixed-function + performance counters + (if Version ID > 1).*/ + /// @} +} PORT_CPU_INTEL_ARCH_PERF_MONITOR; + +/** + * @brief Structure representing Intel Processor version and features + * broken down into individual fields. + */ +typedef struct PORT_CPU_INTEL +{ + /// @{ + PORT_CPU_INTEL_FAMILY family; /**< Family of the Processor.*/ + PORT_CPU_INTEL_MODEL model; /**< Model of the Processor.*/ + PORT_CPU_INTEL_PROCESSOR_TYPE processorType; /**< Processor Type.*/ + NvU8 steppingId; /**< Stepping ID of the Processor.*/ + NvU8 brandIndex; /**< Numerical Index of Brand String Index Table + entry.*/ + NvU8 localApicId; /** Local APIC ID of the Processor.*/ + NvU32 threadCountPerCore; /**< Threads Per Core.*/ + NvU32 physicalCoreCount; /**< Physical Cores Per Package.*/ + NvU32 logicalCoreCount; /**< Logical Cores Per Package.*/ + PORT_CPU_INTEL_X86_FEATURES features; /**< General Features.*/ + PORT_CPU_INTEL_TPM_FEATURES tpmFeatures; /**< Thermal and Power Management + Features.*/ + PORT_CPU_INTEL_ARCH_PERF_MONITOR archPerfMonitor; /**< Architecture + Performance Monitoring + Features.*/ + /// @} +} PORT_CPU_INTEL; + +/// @} + +/** + * @name AMD X86 Structures, unions and enums. + * @{ + */ + +/** + * @brief Enum representing AMD processor family information. + * + */ +typedef enum PORT_CPU_AMD_FAMILY +{ + PORT_CPU_AMD_FAMILY_0 = 0, + PORT_CPU_AMD_FAMILY_1 = 1, + PORT_CPU_AMD_FAMILY_ZEN3 = 25 +} PORT_CPU_AMD_FAMILY; + +/** + * @brief Enum representing AMD processor family 0 model information. + * + */ +typedef enum PORT_CPU_AMD_FAMILY_0_MODEL +{ + PORT_CPU_AMD_FAMLLY_0_MODEL_X = 0, +} PORT_CPU_AMD_FAMILY_0_MODEL; + +/** + * @brief Union representing AMD processor family wise model information. + * + */ +typedef union PORT_CPU_AMD_MODEL +{ + PORT_CPU_AMD_FAMILY_0_MODEL family0; +} PORT_CPU_AMD_MODEL; + +/** + * @brief Structure representing AMD Processor's Threamal & Power Management + * features broken down into individual bit fields. + */ +typedef struct PORT_CPU_AMD_TPM_FEATURES +{ + /// @{ + NvU32 EffFreq : 1; /**< */ + /// @} +} PORT_CPU_AMD_TPM_FEATURES; + +/** + * @brief Structure representing AMD Processor version and features + * broken down into individual fields. + */ +typedef struct PORT_CPU_AMD +{ + /// @{ + PORT_CPU_AMD_FAMILY family; /**< Family of the Processor.*/ + PORT_CPU_AMD_MODEL model; /**< Model of the Processor.*/ + NvU8 steppingId; /**< Stepping ID of the Processor.*/ + NvU8 brandIndex; /**< Numerical Index of Brand String Index Table + entry.*/ + NvU8 localApicId; /** Local APIC ID of the Processor.*/ + NvU32 threadCountPerCore; /**< Threads Per Core.*/ + NvU32 physicalCoreCount; /**< Physical Cores Per Package.*/ + NvU32 logicalCoreCount; /**< Logical Cores Per Package.*/ + PORT_CPU_AMD_TPM_FEATURES tpmFeatures; /**< Thermal and Power Management + Features.*/ + /// @} +} PORT_CPU_AMD; + +/// @} + +/** + * @name Generic CPU Information Structures, unions and enums. + * @{ + */ + +/** +*@brief Maximum length of Vendor ID Null terminated string. +*/ +#define PORT_CPU_VENDOR_ID_LENGTH 20 + +/** +*@brief Enum represening the Processor Architecture Type. +*/ +typedef enum PORT_CPU_TYPE +{ + /// @{ + PORT_CPU_TYPE_INTEL_X86 = 0, /**< Intel X86/X86-64 Architecture.*/ + PORT_CPU_TYPE_AMD_X86 = 1, /**< AMD X86/AMD64 Architecture.*/ + PORT_CPU_TYPE_ARM = 2 /**< ARM Architecture.*/ + /// @} +} PORT_CPU_TYPE; + +/** +*@brief Union represening the Abstract Processor data structure. +*/ +typedef union PORT_CPU +{ + PORT_CPU_AMD amd; + PORT_CPU_INTEL intel; +} PORT_CPU; + +/** + * @brief Structure representing processor information broken down into + * individual fields. + */ +typedef struct PORT_CPU_INFORMATION +{ + /// @{ + PORT_CPU_TYPE type; /**< Type of Architecture/CPU.*/ + char vendorId[PORT_CPU_VENDOR_ID_LENGTH]; /**< Null terminated Vendor Id + String.*/ + NvLength vendorIdLength; /**< Actual length of the null terminated Vendor + Id string.*/ + PORT_CPU cpu; /**< CPU specifice information.*/ + /// @} +} PORT_CPU_INFORMATION; + +/** + * @brief Structure representing processor logical topology information broken + * down into individual fields. + */ +typedef struct PORT_CPU_LOGICAL_TOPOLOGY +{ + /// @{ + NvU64 activeCpuCount; /**< Active Logical CPUs.*/ + NvU64 activeGroupCount; /**< Active Logical CPU Group count.*/ + NvU64 maxCpuCount; /**< Maximum Logical CPUs system can support*/ + NvU64 maxGroupCount; /**< Maximum Logical CPUs Groups system can support*/ + NvU64 maxCpuPerGroup; /**< Maximum Logical CPUs system can support per group*/ + /// @} +} PORT_CPU_LOGICAL_TOPOLOGY; + +/** + * @brief Structure representing a BAR descriptor for a PCIe device + */ +typedef struct PORT_CPU_BAR_DESC +{ + /// @{ + void *pBarAddr; /**< Starting virtual address of the BAR space */ + NvU64 physAddr; /**< Starting physical address of the BAR space */ + NvU32 barSize; /**< Size of BAR space */ + /// @} +} PORT_CPU_BAR_DESC; + +/// @} End Generic CPU Information Structures, unions and enums. + +/** + * @brief Get Logical Topology of CPU. + * @param[out] pCpuTopology PORT_CPU_LOGICAL_TOPOLOGY pointer. + * @return NV_OK If successful and cpu logical topology information + * in pCpuInfo structure. + */ +NV_STATUS portCpuGetLogicalTopology(PORT_CPU_LOGICAL_TOPOLOGY *pCpuTopology); +#define portCpuGetLogicalTopology_SUPPORTED (NVOS_IS_WINDOWS) + +/** + * @brief Get CPU Logical Topology Information. + * @param[out] pCpuInfo PORT_CPU_INFORMATION pointer. + * @return NV_OK If successful and CPU Information in pCpuInfo structure. + */ +NV_STATUS portCpuGetInfo(PORT_CPU_INFORMATION* pCpuInfo); +#define portCpuGetInfo_SUPPORTED (_X86_ || _AMD64_) + +/** + * @brief Get CPU information using CPUID Instruction (X86-64 Specifice) + * @param[out] pCpuInfo Pointer to array which return value + * cpuInfo[0] = EAX, + * cpuInfo[1] = EBX, + * cpuInfo[2] = ECX, + * cpuInfo[3] = EDX. + * @param[in] functionId Function Id of CPUID instruction to execute. + * @param[in] subfunctionId Sub-Function Id of CPUID instruction to execute. + * subfunctionId enables you to gather additional information about + * the processor + + * @return NV_OK if successful, otherwise return errors. + */ +NV_STATUS portCpuExCpuId(NvU32* pCpuInfo, NvU32 functionId, + NvU32 subfunctionId); +#define portCpuExCpuId_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) +/// @} End extended functions + +/** + * @brief Retrieve the current value and frequency of the performance counter + * + * @param[out] pFreq A pointer to a variable to which this routine writes the + * performance counter frequency, in ticks per second. + * This parameter is optional and can be NULL if the caller + * does not need the counter frequency value. + * + * @return The performance counter value in units of ticks + */ +NvS64 portCpuExQueryPerformanceCounter(NvS64 *pFreq); +#define portCpuExQueryPerformanceCounter_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_ || _ARM64_)) + +/** + * @brief Enable PMC read feature + */ +void portCpuExEnablePmc(void); +#define portCpuExEnablePmc_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Read requested PMC register + * + * @param [in] address Address of the PMC register + * @param [out] *pValue Value of PMC register + * + * @return NV_OK If successful. + */ +NV_STATUS portCpuExReadPmc(NvU32 address, NvU64 *pValue); +#define portCpuExReadPmc_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Fill in BAR descriptor of Integrated memory controller + * + * @param [in] pImcBarDesc Pointer to BAR descriptor structure + * + * @return NV_OK If successful. + */ +NV_STATUS portCpuExAllocImcBarDesc(PORT_CPU_BAR_DESC *pImcBarDesc); +#define portCpuExAllocImcBarDesc_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Free BAR descriptor of Integrated memory controller + * + * @param [in] pImcBarDesc Pointer to BAR descriptor structure + * + * @return NV_OK If successful. + */ +NV_STATUS portCpuExFreeImcBarDesc(PORT_CPU_BAR_DESC *pImcBarDesc); +#define portCpuExFreeImcBarDesc_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Reset Performance monitoring counters + * + * @return NV_OK If successful. + */ +NV_STATUS portCpuExResetPmu(void); +#define portCpuExResetPmu_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Program Performance monitoring counters + * + * @param [in] numValidEvents Number of valid events in array pPerfEvents + * @param [in] pPerfEvents Array of events to be configured into general + * purpose performance monitoring counters(PMCs) + * + * @return NV_OK If successful. + */ +NV_STATUS portCpuExProgramPmu(NvU32 numValidEvents, NvU32 *pPerfEvents); +#define portCpuExProgramPmu_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Get number of DRAM reads in terms of bytes + * + * @param [out] pNumReads + * + * @return NV_OK If successful + */ +NV_STATUS portCpuExGetDRamReads(NvU64 *pNumReads); +#define portCpuExGetDRamReads_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Get number of DRAM writes in terms of bytes + * + * @param [out] pNumWrites + * + * @return NV_OK If successful + */ +NV_STATUS portCpuExGetDRamWrites(NvU64 *pNumWrites); +#define portCpuExGetDRamWrites_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Check if the given MSR is supported on the current processor + * + * @param [in] address Address of the MSR that needs to be checked + * + * @return NV_TRUE If MSR is supported + * NV_FALSE If MSR is not supported + */ +NvBool portCpuExIsMsrSupported(NvU32 address); +#define portCpuExIsMsrSupported_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_ || _ARM64_)) + +/** + * @brief Check if the current processor supports DRAM read/write request counting + * + * @return NV_TRUE If supported + * NV_FALSE If not supported + */ +NvBool portCpuExIsDramRwCountingSupported(void); +#define portCpuExIsDramRwCountingSupported_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Acquire CPU counters resource before use + * + * @param [out] pResourceHandle handle returned from allocation + * + * @return NV_OK If successful. + */ +NV_STATUS portCpuExAcquireHardwareCounters(NvP64* pResourceHandle); +#define portCpuExAcquireHardwareCounters_SUPPORTED (NVOS_IS_WINDOWS) + +/** + * @brief Release CPU counters resource after use + * + * @param [in] resourceHandle handle used to free the allocation + * + * @return NV_OK If successful. + */ +NV_STATUS portCpuExReleaseHardwareCounters(NvP64 resourceHandle); +#define portCpuExReleaseHardwareCounters_SUPPORTED (NVOS_IS_WINDOWS) + +#endif // _NVPORT_CPU_H_ +/// @} diff --git a/src/nvidia/inc/libraries/nvport/crypto.h b/src/nvidia/inc/libraries/nvport/crypto.h new file mode 100644 index 0000000..adda6c9 --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/crypto.h @@ -0,0 +1,346 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Crypto module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_CRYPTO_H_ +#define _NVPORT_CRYPTO_H_ +/** + * @defgroup NVPORT_CRYPTO Cryptography operations + * + * @brief This module contains cryptographic and PRNG functions + * + * + * A note on terminology: + * + * Pseudorandom numbers are deterministic and reproducible. When given the same + * seed, they will always give the same sequence, across all platforms. They are + * not suitable for cryptography or any security sensitive operations. + * + * True random numbers are generated from hardware, and as such are completely + * nondeterministic. There is no support for setting a seed, and you can expect + * the output to always be different. Unlike pseudorandom numbers, true random + * output will always vary across different platforms. + * + * These numbers are suitable for security sensitive and cryptography operations. + * + * In case of kernelmode code, the entropy pool will contain bits that are not + * available to usermode clients. As a consequence, a usermode client cannot + * deplete the entropy pool to lower the security + * + * @note Unless ending with the "-Blocking" suffix, all functions are + * non-blocking. With regards to True Random numbers, this has a consequence + * that if there are insufficient bits in the entropy pool, they will be used + * to seed a custom PRNG which will provide the final output. A blocking + * version of some functions may be available as an extended function. + * + * @note As a general rule, you should always use the non-blocking version of a + * function, unless ALL the following conditions are satisfied: + * - First time booting a clean OS + * - No connection to the network + * - The GPU is not booted yet + * - Dealing with a remote machine (i.e. no direct mouse/keyboard input) + * - No HW random support (older CPUs) + * + * For additional information, see these links: + * - https://www.2uo.de/myths-about-urandom/ + * - https://bugs.ruby-lang.org/issues/9569 + * - https://security.stackexchange.com/questions/3936/is-a-rand-from-dev-urandom-secure-for-a-login-key + * + * @{ + */ + + +/** + * @name Core Functions + * @{ + */ + +/** + * @brief Initializes global CRYPTO module state + * + * This function is called by @ref portInitialize. It is available here in case + * it is needed to initialize the CRYPTO module without initializing all the + * others. e.g. for unit tests. + * + */ +void portCryptoInitialize(void); +/** + * @brief Destroys global CRYPTO module state + * + * This function is called by @ref portShutdown. It is available here in case + * it is needed to initialize the CRYPTO module without initializing all the + * others. e.g. for unit tests. + * + */ +void portCryptoShutdown(void); + +/** + * @brief A pseudorandom number generator object + */ +typedef struct PORT_CRYPTO_PRNG PORT_CRYPTO_PRNG; + + +/** + * @brief Construct a PRNG with the given seed. + * + * @warning These objects are not Cryptographically Secure, and thus not + * appropriate for any security sensitive operations. Use "True" random instead. + * + * The same seed will always result in the same sequence returned by + * @ref portCryptoPseudoRandomGeneratorGetU32, + * @ref portCryptoPseudoRandomGeneratorGetU64 and + * @ref portCryptoPseudoRandomGeneratorFillBuffer. This behavior is consistent + * across all platforms. The following code will always print the same thing: + * ~~~{.c} + * PORT_CRYPTO_PRNG *pPrng = portCryptoPseudoRandomGeneratorCreate(0xdeadbeef); + * if (pPrng) + * { + * NvU32 n = portCryptoPseudoRandomGeneratorGetU32(pPrng); + * portDbgPrintf("%u", n); + * portCryptoPseudoRandomGeneratorDestroy(pPrng); + * } + * ~~~ + * + * @return NULL if the construction failed, a PRNG object otherwise. + * + */ +PORT_CRYPTO_PRNG *portCryptoPseudoRandomGeneratorCreate(NvU64 seed); +/** + * @brief Destroys an object created with + * @ref portCryptoPseudoRandomGeneratorCreate + * + */ +void portCryptoPseudoRandomGeneratorDestroy(PORT_CRYPTO_PRNG *pPrng); +/** + * @brief Returns a 32bit pseudorandom number from a given PRNG. + * + * @warning The numbers generated in this way are not appropriate for security + * sensitive operations. Use @ref portCryptoExTrueRandomGetU32 instead. + * + * @param [in] pPrng - Generator object. If NULL, the default one will be used. + * + */ +NvU32 portCryptoPseudoRandomGeneratorGetU32(PORT_CRYPTO_PRNG *pPrng); +/** + * @brief Returns a 64bit pseudorandom number from a given PRNG. + * + * @warning The numbers generated in this way are not appropriate for security + * sensitive operations. Use @ref portCryptoExTrueRandomGetU64 instead. + * + * @param [in] pPrng - Generator object. If NULL, the default one will be used + * + */ +NvU64 portCryptoPseudoRandomGeneratorGetU64(PORT_CRYPTO_PRNG *pPrng); +/** + * @brief Fills a user provided buffer with a pseudorandom sequence from a given + * PRNG + * + * @warning The numbers generated in this way are not appropriate for security + * sensitive operations. Use @ref portCryptoExTrueRandomFillBuffer instead. + * + * @param [in] pPrng - Generator object. If NULL, the default one will be used + * + * @return NV_OK if successful; + * NV_ERR_INVALID_POINTER if pBuffer is NULL; + * + */ +NV_STATUS portCryptoPseudoRandomGeneratorFillBuffer(PORT_CRYPTO_PRNG *pPrng, NvU8 *pBuffer, NvLength bufSize); + +/** + * @brief Sets the PRNG seed of the global generator + * + * The same seed will always result in the same sequence returned by + * @ref portCryptoPseudoRandomGetU32, @ref portCryptoPseudoRandomGetU64 and + * @ref portCryptoPseudoRandomFillBuffer. This behavior is consistent across + * all platforms. The following code will print the same thing on all platforms: + * ~~~{.c} + * portCryptoPseudoRandomSetSeed(0xdeadbeef); + * NvU32 n = portCryptoPseudoRandomGetU32(); + * portDbgPrintf("%u", n); + * ~~~ + * + */ +void portCryptoPseudoRandomSetSeed(NvU64 seed); + +/** + * @brief Returns a 32bit pseudorandom number from global generator + * + * This is equivalent to calling @ref portCryptoPseudoRandomGeneratorGetU32 with + * a NULL generator object. + * + * @warning The numbers generated in this way are not appropriate for security + * sensitive operations. Use @ref portCryptoExTrueRandomGetU32 instead. + * + */ +NvU32 portCryptoPseudoRandomGetU32(void); +/** + * @brief Returns a 64bit pseudorandom number + * + * This is equivalent to calling @ref portCryptoPseudoRandomGeneratorGetU64 with + * a NULL generator object. + * + * @warning The numbers generated in this way are not appropriate for security + * sensitive operations. Use @ref portCryptoExTrueRandomGetU64 instead. + * + */ +NvU64 portCryptoPseudoRandomGetU64(void); +/** + * @brief Fills a user provided buffer with a pseudorandom sequence. + * + * This is equivalent to calling @ref portCryptoPseudoRandomGeneratorFillBuffer + * with a NULL generator object. + * + * @warning The numbers generated in this way are not appropriate for security + * sensitive operations. Use @ref portCryptoExTrueRandomFillBuffear instead. + * + * @return NV_OK if successful; + * NV_ERR_INVALID_POINTER if pBuffer is NULL; + * + */ +NV_STATUS portCryptoPseudoRandomFillBuffer(NvU8 *pBuffer, NvLength bufSize); + +/** + * @brief Calculate the MD5 hash of a given buffer + * + * @param [in] pInBuffer - Input data. Must not be NULL. + * @param [in] bufSize - Size of input buffer, in bytes. + * @param [out] pOutBuffer - Output buffer. Must be at least 16 bytes in length + * + * @return NV_OK if successful. + */ +NV_STATUS portCryptoHashMD5(const NvU8 *pInBuffer, NvLength bufSize, NvU8 pOutBuffer[16]); +/** + * @brief Calculate the first 24 bits of the MD5 hash of a given buffer + * + * The 24 bits are interpreted as a full hash, and are stored as big endian. So, + * if the full hash was d41d8cd98f00b204e9800998ecf8427e, the short 24bit hash + * would be 0x00d41d8c. + * + * @param [in] pInBuffer - Input data. Must not be NULL. + * @param [in] bufSize - Size of input buffer, in bytes. + * @param [out] pOut - Output location. Only the lowest 24 bits are set. + * + * @return NV_OK if successful. + */ +NV_STATUS portCryptoHashMD5Short(const NvU8 *pInBuffer, NvLength bufSize, NvU32 *pOut); +/** + * @brief Convert a binary representation of the MD5 hash to a 32digit hex string + */ +NV_STATUS portCryptoHashMD5BinaryToHexString(const NvU8 pBinary[16], char pHexStr[33]); +/** + * @brief Convert a 32 digit hex string representation of the MD5 hash to binary + */ +NV_STATUS portCryptoHashMD5HexStringToBinary(const char *pHexStr, NvU8 pBinary[16]); + +/// @} End core functions + +/** + * @name Extended Functions + * @{ + */ +#if defined(NV_MODS) || PORT_IS_KERNEL_BUILD +#define PORT_CRYPTO_TRUE_RANDOM_SUPPORTED 0 +#else +#define PORT_CRYPTO_TRUE_RANDOM_SUPPORTED 1 +#endif +/** + * @brief Returns a 32bit random number + * + * @note This function does not block, but rather combines the bits from the + * entropy pool with a PRNG to produce a random output of desired width. + * This is considered safe for most cryptographic applications. You can use + * @ref portCryptoExTrueRandomGetU32Blocking for a guaranteed high entropy output. + */ +NvU32 portCryptoExTrueRandomGetU32(void); +#define portCryptoExTrueRandomGetU32_SUPPORTED 0 +/** + * @brief Returns a 64bit random number + * + * @note This function does not block, but rather combines the bits from the + * entropy pool with a PRNG to produce a random output of desired width. + * This is considered safe for most cryptographic applications. You can use + * @ref portCryptoExTrueRandomGetU64Blocking for a guaranteed high entropy output. + */ +NvU64 portCryptoExTrueRandomGetU64(void); +#define portCryptoExTrueRandomGetU64_SUPPORTED 0 +/** + * @brief Fills a user provided buffer with a random sequence. + * + * @note This function does not block, but rather combines the bits from the + * entropy pool with a PRNG to produce a random output of desired width. This is + * considered safe for most cryptographic applications. You can use + * @ref portCryptoExTrueRandomFillBufferBlocking for a guaranteed high entropy + * output. + * + * @return NV_OK if successful; + * NV_ERR_INVALID_POINTER if pBuffer is NULL; + */ +NV_STATUS portCryptoExTrueRandomFillBuffer(NvU8 *pBuffer, NvLength bufSize); +#define portCryptoExTrueRandomFillBuffer_SUPPORTED 0 + +#define PORT_CRYPTO_TRUE_RANDOM_BLOCKING_SUPPORTED (!PORT_IS_KERNEL_BUILD && !NVOS_IS_WINDOWS) + +/** + * @brief Returns a 32bit random number, possibly blocking the thread. + * + * If there is not enough entropy bits available, the function will block until + * available. Use @ref portCryptoExTrueRandomGetU32 unless you really need the + * entire result to exclusively made of true random bits. + */ +NvU32 portCryptoExTrueRandomGetU32Blocking(void); +#define portCryptoExTrueRandomGetU32Blocking_SUPPORTED 0 +/** + * @brief Returns a 64bit random number, possibly blocking the thread. + * + * If there is not enough entropy bits available, the function will block until + * available. Use @ref portCryptoExTrueRandomGetU64 unless you really need the + * entire result to exclusively made of true random bits. + */ +NvU64 portCryptoExTrueRandomGetU64Blocking(void); +#define portCryptoExTrueRandomGetU64Blocking_SUPPORTED 0 + +/** + * @brief Fills a user provided buffer with a random sequence, + * possibly blocking the thread. + * + * If there is not enough entropy bits available, the function will block until + * available. Use @ref portCryptoExTrueRandomFillBuffer unless you really need the + * entire result to exclusively made of true random bits. + */ +NV_STATUS portCryptoExTrueRandomFillBufferBlocking(NvU8 *pBuffer, NvLength bufSize); +#define portCryptoExTrueRandomFillBufferBlocking_SUPPORTED 0 + +/// @} End extended functions + +/// @} + +#endif // _NVPORT_CRYPTO_H_ +/// @} diff --git a/src/nvidia/inc/libraries/nvport/debug.h b/src/nvidia/inc/libraries/nvport/debug.h new file mode 100644 index 0000000..c8e5d75 --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/debug.h @@ -0,0 +1,318 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/** + * @file + * @brief Debug module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_DEBUG_H_ +#define _NVPORT_DEBUG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup NVPORT_DEBUG Debug Support Routines + * @brief This module provides debug support routines like breakpoints and prints. + * @{ + */ + +/** @brief See @ref PORT_UTIL_INLINE */ +#ifndef PORT_DEBUG_INLINE +#define PORT_DEBUG_INLINE PORT_INLINE +#endif + +/** + * @name Core Functions + * @{ + * + * @note The breakpoint macro comes in several flavors: + * - @ref PORT_BREAKPOINT - + * Shouldn't be used directly + * - @ref PORT_BREAKPOINT_DEBUG - + * Causes a breakpoint in debug builds only, use for all debug purposes. + * - @ref PORT_BREAKPOINT_CHECKED - + * Causes a breakpoint in checked builds only, use when you want the + * @c int3 present in develop and release builds, such as QA builds. + * - @ref PORT_BREAKPOINT_ALWAYS - + * Always breaks, use only if you want to trigger @c int3 even on + * public release builds. + */ + + +/** + * @brief Prints a string to a platform dependent output stream + * + * This function will print the string where you would expect it for a given + * platform. In user space it will be standard out. In kernel space it will + * be the kernel debug log. + * + * Note NvPort does not provide advanced logging capabilities, only the ability + * to emit a string. For a more robust logging solution see the NvLog project. + * + */ +PORT_DEBUG_INLINE void portDbgPrintString(const char *str, NvLength length); + +/** + * @brief Convenience macro when printing a string literal. + */ +#define PORT_DBG_PRINT_STRING_LITERAL(s) portDbgPrintString(s, sizeof(s)-1) + +/** + * @def PORT_BREAKPOINT_DEBUG_ENABLED + * @brief Controls whether @ref PORT_BREAKPOINT_DEBUG is enabled or not + */ +#if !defined(PORT_BREAKPOINT_DEBUG_ENABLED) +#if defined(DEBUG) +#define PORT_BREAKPOINT_DEBUG_ENABLED 1 +#else +#define PORT_BREAKPOINT_DEBUG_ENABLED 0 +#endif +#endif + + +/** + * @def PORT_BREAKPOINT_DEBUG + * @brief Cause a breakpoint into the debugger only when + * @ref PORT_BREAKPOINT_DEBUG_ENABLED is defined. + * + * By default PORT_BREAKPOINT_DEBUG_ENABLED is set based on the value of DEBUG. + * However it is kept as a separate define so you can override separately if so + * desired. + */ +#if PORT_BREAKPOINT_DEBUG_ENABLED +#define PORT_BREAKPOINT_DEBUG PORT_BREAKPOINT +#else +#define PORT_BREAKPOINT_DEBUG() +#endif + +#define PORT_FILE_STR __FILE__ + +/// @cond NVPORT_INTERNAL +#if !defined(PORT_ASSERT_FAILED_USES_STRINGS) +#define PORT_ASSERT_FAILED_USES_STRINGS PORT_IS_CHECKED_BUILD +#endif + +#if PORT_ASSERT_FAILED_USES_STRINGS +#define _PORT_STRINGIFY2(x) #x +#define _PORT_STRINGIFY(x) _PORT_STRINGIFY2(x) +#define _PORT_ASSERT_MESSAGE(cond) "Assertion failed: \"" #cond "\" at " \ + PORT_FILE_STR ":" _PORT_STRINGIFY(__LINE__) "\n" +#else +#define _PORT_ASSERT_MESSAGE(cond) "Assertion failed" +#endif +/// @endcond + +/** + * @brief Causes a breakpoint if the condition evaluates to false. + */ +#define PORT_ASSERT(cond) \ + do \ + { \ + PORT_COVERAGE_PUSH_OFF(); \ + if (!(cond)) \ + { \ + PORT_DBG_PRINT_STRING_LITERAL(_PORT_ASSERT_MESSAGE(cond)); \ + PORT_BREAKPOINT(); \ + } \ + PORT_COVERAGE_POP(); \ + } while (0) + +/* + * Checks osDbgBreakpointEnabled and PDB_PROP_SYS_DEBUGGER_DISABLED + * to see if breakpoints are allowed + */ +NvBool nvDbgBreakpointEnabled(void); + +/** + * @def PORT_BREAKPOINT_CHECKED() + * @brief Causes a breakpoint in checked builds only + */ +/** + * @def PORT_ASSERT_CHECKED(x) + * @brief Causes an assert in checked builds only + */ +#if PORT_IS_CHECKED_BUILD + +/* + * TODO: defined(NVRM) && PORT_IS_KERNEL_BUILD && defined(NVWATCH) are all true + * when NvWatch is included in the Debug Linux AMD64 Mfg Mods build. + * This seems wrong... + */ +#if defined(NVRM) && PORT_IS_KERNEL_BUILD == 1 && !defined(NVWATCH) +#define PORT_BREAKPOINT_CHECKED() \ + do \ + { \ + if (nvDbgBreakpointEnabled()) \ + PORT_BREAKPOINT(); \ + } while (0) +#else +#define PORT_BREAKPOINT_CHECKED() PORT_BREAKPOINT() +#endif +#define PORT_ASSERT_CHECKED(x) PORT_ASSERT(x) +#else // PORT_IS_CHECKED_BUILD +#define PORT_BREAKPOINT_CHECKED() +#define PORT_ASSERT_CHECKED(x) +#endif // PORT_IS_CHECKED_BUILD + +/** + * @brief Causes a breakpoint into the debugger regardless of build configuration. + * + * Note this is equivalent to just calling @ref PORT_BREAKPOINT. It is only + * included to provide an alternative to @ref PORT_BREAKPOINT_DEBUG that is + * consistent in look and usage. + */ +#define PORT_BREAKPOINT_ALWAYS PORT_BREAKPOINT + +/** + * @def PORT_COVERAGE_PUSH_OFF() + * @brief Saves the current coverage tracking state to a stack and disables it + * + * This is useful to do around some error checking code (e.g. "default:") so the + * bullseye tool doesn't take those branches into account when checking code + * coverage. + * + * - See @ref PORT_ASSERT for usage example. + * - See https://www.bullseye.com/help/build-exclude.html for more details. + */ +/** + * @def PORT_COVERAGE_PUSH_ON() + * @brief Saves the current coverage tracking state to a stack and enables it + */ +/** + * @def PORT_COVERAGE_POP() + * @brief Restores the last saved coverage tracking state + * + * See @ref PORT_ASSERT for usage example. + */ +#if defined(NV_BULLSEYE) +#define PORT_COVERAGE_PUSH_OFF() "BullseyeCoverage save off" +#define PORT_COVERAGE_PUSH_ON() "BullseyeCoverage save on" +#define PORT_COVERAGE_POP() "BullseyeCoverage restore" +#else +#define PORT_COVERAGE_PUSH_OFF() +#define PORT_COVERAGE_PUSH_ON() +#define PORT_COVERAGE_POP() +#endif + + + +/// @} End core functions + +/** + * @def NVPORT_CHECK_PRINTF_ARGUMENTS(a,b) + * @brief Compile time check that arguments conform to printf rules + */ +#if PORT_COMPILER_HAS_ATTRIBUTE_FORMAT +#define NVPORT_CHECK_PRINTF_ARGUMENTS(a,b) __attribute__((format(printf, a, b))) +#else +#define NVPORT_CHECK_PRINTF_ARGUMENTS(a,b) +#endif + +/** + * @name Extended Functions + * @{ + */ + +#if !defined(portDbgPrintf_SUPPORTED) +#define portDbgPrintf_SUPPORTED 0 +#endif +#if !defined(portDbgExPrintfLevel_SUPPORTED) +#define portDbgExPrintfLevel_SUPPORTED 0 +#endif + +#if PORT_IS_FUNC_SUPPORTED(portDbgPrintf) +/** + * @brief Prints a formatted string to using @ref portDbgPrintString + * + * The parameters are like those of printf(). + */ +PORT_DEBUG_INLINE void portDbgPrintf(const char *format, ...) NVPORT_CHECK_PRINTF_ARGUMENTS(1, 2); +#endif + +#if PORT_IS_FUNC_SUPPORTED(portDbgExPrintfLevel) +/** + * @brief Similar to @ref portDbgPrintf, except that it passes the level to the + * underlying implementation. + * + * Some platforms (e.g. MODS) have an API where prints are given a level, and + * some tools may depend on certain prints being at a certain level. This + * function simply passes the level to that API- NvPort does not understand + * or filter these levels. + * + * @param level - An int representing the level at which to print. + */ +PORT_DEBUG_INLINE void portDbgExPrintfLevel(NvU32 level, const char *format, ...) NVPORT_CHECK_PRINTF_ARGUMENTS(2, 3); +#endif + +/// @} End extended functions + +// Include platform specific inline definitions + +#if NVOS_IS_QNX +#include "nvport/inline/debug_qnx.h" +#elif NVOS_IS_DCECORE +#include "nvport/inline/debug_dcecore.h" +#else + +#if PORT_IS_KERNEL_BUILD + +#if NVOS_IS_WINDOWS +#include "nvport/inline/debug_win_kernel.h" +#elif NVOS_IS_UNIX +#include "nvport/inline/debug_unix_kernel_os.h" +#elif NVOS_IS_LIBOS +#include "nvport/inline/debug_libos.h" +#else +#error "Unsupported target OS" +#endif + +#else // Usermode build + +#if NVOS_IS_WINDOWS +#include "nvport/inline/debug_win_user.h" +#elif NVOS_IS_UNIX +#include "nvport/inline/debug_unix_user.h" +#elif NVOS_IS_LIBOS +#include "nvport/inline/debug_libos.h" +#else +#error "Unsupported target OS" +#endif + +#endif // PORT_IS_KERNEL_BUILD +#endif // NV_MODS + +#if !defined(PORT_DUMP_STACK) +#define PORT_DUMP_STACK() do {} while (0) +#endif + +#ifdef __cplusplus +} +#endif //__cplusplus +#endif // _NVPORT_DEBUG_H_ +/// @} diff --git a/src/nvidia/inc/libraries/nvport/inline/atomic_clang.h b/src/nvidia/inc/libraries/nvport/inline/atomic_clang.h new file mode 100644 index 0000000..40dd6c2 --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/inline/atomic_clang.h @@ -0,0 +1,478 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Atomic functions implementations using clang compiler intrinsics + */ + +#ifndef _NVPORT_ATOMIC_CLANG_H_ +#define _NVPORT_ATOMIC_CLANG_H_ + + +#if !(defined(__clang__)) +#error "Unsupported compiler: This file can only be compiled by clang" +#endif + + +PORT_INLINE void +portAtomicMemoryFenceLoad(void) +{ + __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); +} +PORT_INLINE void +portAtomicMemoryFenceStore(void) +{ + __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); +} +PORT_INLINE void +portAtomicMemoryFenceFull(void) +{ + __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); +} +PORT_INLINE void +portAtomicTimerBarrier(void) +{ +#if NVCPU_IS_FAMILY_ARM + __asm__ __volatile__ ("ISB" : : : "memory"); +#elif NVCPU_IS_PPC || NVCPU_IS_PPC64LE + __asm__ __volatile__ ("isync" : : : "memory"); +#elif NVCPU_IS_X86 || NVCPU_IS_X86_64 + __asm__ __volatile__ ("lfence" : : : "memory"); +#elif NVCPU_IS_RISCV64 + __asm__ __volatile__ ("fence.i" : : : "memory"); +#else +#error "portAtomicTimerBarrier implementation not found" +#endif +} + +#if PORT_COMPILER_HAS_INTRINSIC_ATOMICS && !defined(NV_MODS) && !NVOS_IS_LIBOS + +PORT_ATOMIC_INLINE void +portAtomicInit(void) +{ + +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicAddS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __c11_atomic_fetch_add((_Atomic NvS32 *)pVal, val, __ATOMIC_SEQ_CST) + val; +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicSubS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __c11_atomic_fetch_sub((_Atomic NvS32 *)pVal, + val, __ATOMIC_SEQ_CST) - val; +} + +PORT_ATOMIC_INLINE void +portAtomicSetS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + __c11_atomic_store((_Atomic NvS32 *)pVal, val, __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC_INLINE NvBool +portAtomicCompareAndSwapS32 +( + volatile NvS32 *pVal, + NvS32 newVal, + NvS32 oldVal +) +{ + NvS32 tmp = oldVal; // Needed so the compiler can still inline this function + return __c11_atomic_compare_exchange_strong((_Atomic NvS32 *)pVal, + &tmp, + newVal, + __ATOMIC_SEQ_CST, + __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicIncrementS32 +( + volatile NvS32 *pVal +) +{ + return portAtomicAddS32(pVal, 1); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicDecrementS32 +( + volatile NvS32 *pVal +) +{ + return portAtomicSubS32(pVal, 1); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicXorS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __c11_atomic_fetch_xor((_Atomic NvS32 *)pVal, + val, __ATOMIC_SEQ_CST) ^ val; +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicOrS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __c11_atomic_fetch_or((_Atomic NvS32 *)pVal, + val, __ATOMIC_SEQ_CST) | val; +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicAndS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __c11_atomic_fetch_and((_Atomic NvS32 *)pVal, + val, __ATOMIC_SEQ_CST) & val; +} + + +PORT_ATOMIC_INLINE NvU32 +portAtomicAddU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __c11_atomic_fetch_add((_Atomic NvU32 *)pVal, val, __ATOMIC_SEQ_CST) + val; +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicSubU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __c11_atomic_fetch_sub((_Atomic NvU32 *)pVal, + val, __ATOMIC_SEQ_CST) - val; +} + +PORT_ATOMIC_INLINE void +portAtomicSetU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + __c11_atomic_store((_Atomic NvU32 *)pVal, val, __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC_INLINE NvBool +portAtomicCompareAndSwapU32 +( + volatile NvU32 *pVal, + NvU32 newVal, + NvU32 oldVal +) +{ + NvU32 tmp = oldVal; // Needed so the compiler can still inline this function + return __c11_atomic_compare_exchange_strong((_Atomic NvU32 *)pVal, + &tmp, + newVal, + __ATOMIC_SEQ_CST, + __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicIncrementU32 +( + volatile NvU32 *pVal +) +{ + return portAtomicAddU32(pVal, 1); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicDecrementU32 +( + volatile NvU32 *pVal +) +{ + return portAtomicSubU32(pVal, 1); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicXorU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __c11_atomic_fetch_xor((_Atomic NvU32 *)pVal, + val, __ATOMIC_SEQ_CST) ^ val; +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicOrU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __c11_atomic_fetch_or((_Atomic NvU32 *)pVal, + val, __ATOMIC_SEQ_CST) | val; +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicAndU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __c11_atomic_fetch_and((_Atomic NvU32 *)pVal, + val, __ATOMIC_SEQ_CST) & val; +} + + +#if NVCPU_IS_64_BITS + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExAddS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __c11_atomic_fetch_add((_Atomic NvS64 *)pVal, + val, __ATOMIC_SEQ_CST) + val; +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExSubS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __c11_atomic_fetch_sub((_Atomic NvS64 *)pVal, + val, __ATOMIC_SEQ_CST) - val; +} + +PORT_ATOMIC64_INLINE void +portAtomicExSetS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + __c11_atomic_store((_Atomic NvS64 *)pVal, val, __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC64_INLINE NvBool +portAtomicExCompareAndSwapS64 +( + volatile NvS64 *pVal, NvS64 newVal, NvS64 oldVal +) +{ + NvS64 tmp = oldVal; // Needed so the compiler can still inline this function + return __c11_atomic_compare_exchange_strong((_Atomic NvS64 *)pVal, + &tmp, + newVal, + __ATOMIC_SEQ_CST, + __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExIncrementS64 +( + volatile NvS64 *pVal +) +{ + return portAtomicExAddS64(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExDecrementS64 +( + volatile NvS64 *pVal +) +{ + return portAtomicExSubS64(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExXorS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __c11_atomic_fetch_xor((_Atomic NvS64 *)pVal, + val, __ATOMIC_SEQ_CST) ^ val; +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExOrS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __c11_atomic_fetch_or((_Atomic NvS64 *)pVal, + val, __ATOMIC_SEQ_CST) | val; +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExAndS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __c11_atomic_fetch_and((_Atomic NvS64 *)pVal, + val, __ATOMIC_SEQ_CST) & val; +} + + + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExAddU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __c11_atomic_fetch_add((_Atomic NvU64 *)pVal, + val, __ATOMIC_SEQ_CST) + val; +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExSubU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __c11_atomic_fetch_sub((_Atomic NvU64 *)pVal, + val, __ATOMIC_SEQ_CST) - val; +} + +PORT_ATOMIC64_INLINE void +portAtomicExSetU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + __c11_atomic_store((_Atomic NvU64 *)pVal, val, __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC64_INLINE NvBool +portAtomicExCompareAndSwapU64 +( + volatile NvU64 *pVal, NvU64 newVal, NvU64 oldVal +) +{ + NvU64 tmp = oldVal; // Needed so the compiler can still inline this function + return __c11_atomic_compare_exchange_strong((_Atomic NvU64 *)pVal, + &tmp, + newVal, + __ATOMIC_SEQ_CST, + __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExIncrementU64 +( + volatile NvU64 *pVal +) +{ + return portAtomicExAddU64(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExDecrementU64 +( + volatile NvU64 *pVal +) +{ + return portAtomicExSubU64(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExXorU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __c11_atomic_fetch_xor((_Atomic NvU64 *)pVal, + val, __ATOMIC_SEQ_CST) ^ val; +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExOrU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __c11_atomic_fetch_or((_Atomic NvU64 *)pVal, + val, __ATOMIC_SEQ_CST) | val; +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExAndU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __c11_atomic_fetch_and((_Atomic NvU64 *)pVal, + val, __ATOMIC_SEQ_CST) & val; +} + + +#endif // NVCPU_IS_64_BITS + +#endif // PORT_COMPILER_HAS_INTRINSIC_ATOMICS && !defined(NV_MODS) && !NVOS_IS_LIBOS + +#endif // _NVPORT_ATOMIC_CLANG_H_ diff --git a/src/nvidia/inc/libraries/nvport/inline/atomic_gcc.h b/src/nvidia/inc/libraries/nvport/inline/atomic_gcc.h new file mode 100644 index 0000000..f0f8a5c --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/inline/atomic_gcc.h @@ -0,0 +1,493 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Atomic functions implementations using gcc compiler intrinsics + */ + +#ifndef _NVPORT_ATOMIC_GCC_H_ +#define _NVPORT_ATOMIC_GCC_H_ + +#if !NVOS_IS_LIBOS + +PORT_INLINE void +portAtomicMemoryFenceStore(void) +{ +#if NVCPU_IS_FAMILY_ARM + __asm__ __volatile__ ("DMB ST" : : : "memory"); +#elif NVCPU_IS_PPC || NVCPU_IS_PPC64LE + __asm__ __volatile__ ("sync" : : : "memory"); +#elif NVCPU_IS_X86 || NVCPU_IS_X86_64 + __asm__ __volatile__ ("sfence" : : : "memory"); +#elif NVCPU_IS_RISCV64 + __asm__ __volatile__ ("fence" : : : "memory"); +#else +#error "portAtomicMemoryFenceStore implementation not found" +#endif +} +PORT_INLINE void +portAtomicMemoryFenceLoad(void) +{ +#if NVCPU_IS_FAMILY_ARM + __asm__ __volatile__ ("DMB SY" : : : "memory"); +#elif NVCPU_IS_PPC || NVCPU_IS_PPC64LE + __asm__ __volatile__ ("sync" : : : "memory"); +#elif NVCPU_IS_X86 || NVCPU_IS_X86_64 + __asm__ __volatile__ ("lfence" : : : "memory"); +#elif NVCPU_IS_RISCV64 + __asm__ __volatile__ ("fence" : : : "memory"); +#else +#error "portAtomicMemoryFenceLoad implementation not found" +#endif +} +PORT_INLINE void +portAtomicMemoryFenceFull(void) +{ +#if NVCPU_IS_FAMILY_ARM + __asm__ __volatile__ ("DMB SY" : : : "memory"); +#elif NVCPU_IS_PPC || NVCPU_IS_PPC64LE + __asm__ __volatile__ ("sync" : : : "memory"); +#elif NVCPU_IS_X86 || NVCPU_IS_X86_64 + __asm__ __volatile__ ("mfence" : : : "memory"); +#elif NVCPU_IS_RISCV64 + __asm__ __volatile__ ("fence" : : : "memory"); +#else +#error "portAtomicMemoryFenceFull implementation not found" +#endif +} + +#else + +#include "libos_interface.h" +PORT_INLINE void portAtomicMemoryFenceStore(void) +{ + __asm__ __volatile__ ("fence" : : : "memory"); + libosInterfaceSysopFlush(); + +} + +PORT_INLINE void portAtomicMemoryFenceLoad(void) +{ + __asm__ __volatile__ ("fence" : : : "memory"); + libosInterfaceSysopFlush(); + +} + +PORT_INLINE void portAtomicMemoryFenceFull(void) +{ + __asm__ __volatile__ ("fence" : : : "memory"); + libosInterfaceSysopFlush(); +} + +#endif //!NVOS_IS_LIBOS + +PORT_INLINE void +portAtomicTimerBarrier(void) +{ +#if NVCPU_IS_FAMILY_ARM + __asm__ __volatile__ ("ISB" : : : "memory"); +#elif NVCPU_IS_PPC || NVCPU_IS_PPC64LE + __asm__ __volatile__ ("isync" : : : "memory"); +#elif NVCPU_IS_X86 || NVCPU_IS_X86_64 + __asm__ __volatile__ ("lfence" : : : "memory"); +#elif NVCPU_IS_RISCV64 + __asm__ __volatile__ ("fence.i" : : : "memory"); +#else +#error "portAtomicTimerBarrier implementation not found" +#endif +} + +#if PORT_COMPILER_HAS_INTRINSIC_ATOMICS && !defined(NV_MODS) && !NVOS_IS_LIBOS + +PORT_ATOMIC_INLINE void +portAtomicInit(void) +{ + +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicAddS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __sync_add_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicSubS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __sync_sub_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE void +portAtomicSetS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + while (!__sync_bool_compare_and_swap(pVal, *pVal, val)); +} + +PORT_ATOMIC_INLINE NvBool +portAtomicCompareAndSwapS32 +( + volatile NvS32 *pVal, + NvS32 newVal, + NvS32 oldVal +) +{ + return __sync_bool_compare_and_swap(pVal, oldVal, newVal); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicIncrementS32 +( + volatile NvS32 *pVal +) +{ + return __sync_add_and_fetch(pVal, 1); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicDecrementS32 +( + volatile NvS32 *pVal +) +{ + return __sync_sub_and_fetch(pVal, 1); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicXorS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __sync_xor_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicOrS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __sync_or_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicAndS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __sync_and_and_fetch(pVal, val); +} + + +PORT_ATOMIC_INLINE NvU32 +portAtomicAddU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __sync_add_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicSubU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __sync_sub_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE void +portAtomicSetU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + while (!__sync_bool_compare_and_swap(pVal, *pVal, val)); +} + +PORT_ATOMIC_INLINE NvBool +portAtomicCompareAndSwapU32 +( + volatile NvU32 *pVal, + NvU32 newVal, + NvU32 oldVal +) +{ + return __sync_bool_compare_and_swap(pVal, oldVal, newVal); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicIncrementU32 +( + volatile NvU32 *pVal +) +{ + return __sync_add_and_fetch(pVal, 1); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicDecrementU32 +( + volatile NvU32 *pVal +) +{ + return __sync_sub_and_fetch(pVal, 1); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicXorU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __sync_xor_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicOrU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __sync_or_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicAndU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __sync_and_and_fetch(pVal, val); +} + + + +#if defined(NV_64_BITS) + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExAddS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __sync_add_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExSubS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __sync_sub_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE void +portAtomicExSetS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + while (!__sync_bool_compare_and_swap(pVal, *pVal, val)); +} + +PORT_ATOMIC64_INLINE NvBool +portAtomicExCompareAndSwapS64 +( + volatile NvS64 *pVal, + NvS64 newVal, + NvS64 oldVal +) +{ + return __sync_bool_compare_and_swap(pVal, oldVal, newVal); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExIncrementS64 +( + volatile NvS64 *pVal +) +{ + return __sync_add_and_fetch(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExDecrementS64 +( + volatile NvS64 *pVal +) +{ + return __sync_sub_and_fetch(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExXorS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __sync_xor_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExOrS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __sync_or_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExAndS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __sync_and_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExAddU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __sync_add_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExSubU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __sync_sub_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE void +portAtomicExSetU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + while (!__sync_bool_compare_and_swap(pVal, *pVal, val)); +} + +PORT_ATOMIC64_INLINE NvBool +portAtomicExCompareAndSwapU64 +( + volatile NvU64 *pVal, + NvU64 newVal, + NvU64 oldVal +) +{ + return __sync_bool_compare_and_swap(pVal, oldVal, newVal); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExIncrementU64 +( + volatile NvU64 *pVal +) +{ + return __sync_add_and_fetch(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExDecrementU64 +( + volatile NvU64 *pVal +) +{ + return __sync_sub_and_fetch(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExXorU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __sync_xor_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExOrU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __sync_or_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExAndU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __sync_and_and_fetch(pVal, val); +} + +#endif // NV_64_BITS + +#endif // PORT_COMPILER_HAS_INTRINSIC_ATOMICS && !defined(NV_MODS) && !NVOS_IS_LIBOS +#endif // _NVPORT_ATOMIC_GCC_H_ diff --git a/src/nvidia/inc/libraries/nvport/inline/debug_unix_kernel_os.h b/src/nvidia/inc/libraries/nvport/inline/debug_unix_kernel_os.h new file mode 100644 index 0000000..b24f162 --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/inline/debug_unix_kernel_os.h @@ -0,0 +1,76 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief DEBUG module implementation for Unix kernelspace. + */ +#ifndef _NVPORT_DEBUG_UNIX_KERNEL_OS_H_ +#define _NVPORT_DEBUG_UNIX_KERNEL_OS_H_ +#ifdef __cplusplus +extern "C" { +#endif + +#if !PORT_IS_KERNEL_BUILD +#error "This file can only be compiled as part of the kernel build." +#endif +#if !NVOS_IS_UNIX +#error "This file can only be compiled on Unix." +#endif + +#include "nv-kernel-interface-api.h" +void NV_API_CALL os_dbg_breakpoint(void); +void NV_API_CALL out_string(const char *str); +int NV_API_CALL nv_printf(NvU32 debuglevel, const char *format, ...); +void NV_API_CALL os_dump_stack(void); + +// No init/shutdown needed +#define portDbgInitialize() +#define portDbgShutdown() + + +PORT_DEBUG_INLINE void +portDbgPrintString +( + const char *str, + NvLength length +) +{ + out_string(str); +} + +#define portDbgPrintf(fmt, ...) nv_printf(0xFFFFFFFF, fmt, ##__VA_ARGS__) +#undef portDbgPrintf_SUPPORTED +#define portDbgPrintf_SUPPORTED 1 + +#define portDbgExPrintfLevel(level, fmt, ...) nv_printf(level, fmt, ##__VA_ARGS__) +#undef portDbgExPrintfLevel_SUPPORTED +#define portDbgExPrintfLevel_SUPPORTED 1 + +#define PORT_BREAKPOINT() os_dbg_breakpoint() +#define PORT_DUMP_STACK() os_dump_stack() + +#ifdef __cplusplus +} +#endif //__cplusplus +#endif // _NVPORT_DEBUG_UNIX_KERNEL_OS_H_ diff --git a/src/nvidia/inc/libraries/nvport/inline/memory_tracking.h b/src/nvidia/inc/libraries/nvport/inline/memory_tracking.h new file mode 100644 index 0000000..e6d4165 --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/inline/memory_tracking.h @@ -0,0 +1,370 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief NvPort MEMORY module extension to track memory allocations + * + * This file is internal to NvPort MEMORY module. + * @cond NVPORT_INTERNAL + */ + +#ifndef _NVPORT_MEMORY_INTERNAL_H_ +#define _NVPORT_MEMORY_INTERNAL_H_ + +#include "nvtypes.h" + +#define portMemExTrackingGetActiveStats_SUPPORTED PORT_MEM_TRACK_USE_COUNTER +#define portMemExTrackingGetTotalStats_SUPPORTED PORT_MEM_TRACK_USE_COUNTER +#define portMemExTrackingGetPeakStats_SUPPORTED PORT_MEM_TRACK_USE_COUNTER +#define portMemExTrackingGetNext_SUPPORTED \ + (PORT_MEM_TRACK_USE_FENCEPOSTS & PORT_MEM_TRACK_USE_ALLOCLIST) +#define portMemExTrackingGetHeapSize_SUPPORTED (NVOS_IS_LIBOS) +#define portMemGetLargestFreeChunkSize_SUPPORTED (NVOS_IS_LIBOS) +#define portMemExValidate_SUPPORTED 0 +#define portMemExValidateAllocations_SUPPORTED 0 +#define portMemExFreeAll_SUPPORTED 0 + +/** @brief Untracked paged memory allocation, platform specific */ +void *_portMemAllocPagedUntracked(NvLength lengthBytes); +/** @brief Untracked nonpaged memory allocation, platform specific */ +void *_portMemAllocNonPagedUntracked(NvLength lengthBytes); +/** @brief Untracked memory free, platform specific */ +void _portMemFreeUntracked(void *pMemory); +/** @brief Wrapper around pAlloc->_portAlloc() that tracks the allocation */ +void *_portMemAllocatorAlloc(PORT_MEM_ALLOCATOR *pAlloc, NvLength length); +/** @brief Wrapper around pAlloc->_portFree() that tracks the allocation */ +void _portMemAllocatorFree(PORT_MEM_ALLOCATOR *pAlloc, void *pMem); + +#if PORT_MEM_TRACK_USE_LIMIT +/** @brief Initialize per VF tracking limit **/ +void portMemInitializeAllocatorTrackingLimit(NvU32 gfid, NvLength limit, NvBool bLimitEnabled); +/** @brief Init per Gfid mem tracking **/ +void portMemGfidTrackingInit(NvU32 gfid); +/** @brief Free per Gfid mem tracking **/ +void portMemGfidTrackingFree(NvU32 gfid); +/** @brief Increment per Gfid LibOS mem tracking **/ +void portMemLibosLimitInc(NvU32 gfid, NvLength size); +/** @brief Decrement per Gfid LibOS mem tracking **/ +void portMemLibosLimitDec(NvU32 gfid, NvLength size); +/** @brief Check if per Gfid LibOS mem limit is exceeded by allocation **/ +NvBool portMemLibosLimitExceeded(NvU32 gfid, NvLength size); +/** @brief Initialize per Gfid LibOS tracking limit **/ +void portMemInitializeAllocatorTrackingLibosLimit(NvU32 gfid, NvLength limit); +#endif + +#if PORT_MEM_TRACK_USE_LIMIT +#define PORT_MEM_LIMIT_MAX_GFID 64 +#define LIBOS_RW_LOCK_SIZE 144 +#endif + +typedef struct PORT_MEM_COUNTER +{ + volatile NvU32 activeAllocs; + volatile NvU32 totalAllocs; + volatile NvU32 peakAllocs; + volatile NvLength activeSize; + volatile NvLength totalSize; + volatile NvLength peakSize; +} PORT_MEM_COUNTER; + +typedef struct PORT_MEM_FENCE_HEAD +{ + PORT_MEM_ALLOCATOR *pAllocator; + NvU32 magic; +} PORT_MEM_FENCE_HEAD; + +typedef struct PORT_MEM_FENCE_TAIL +{ + NvU32 magic; +} PORT_MEM_FENCE_TAIL; + +typedef struct PORT_MEM_LIST +{ + struct PORT_MEM_LIST *pPrev; + struct PORT_MEM_LIST *pNext; +} PORT_MEM_LIST; + +#if PORT_MEM_TRACK_USE_CALLERINFO + +#if PORT_MEM_TRACK_USE_CALLERINFO_IP + +typedef NvUPtr PORT_MEM_CALLERINFO; +#define PORT_MEM_CALLERINFO_MAKE portUtilGetIPAddress() + +#else // PORT_MEM_TRACK_USE_CALLERINFO_IP + +typedef struct PORT_MEM_CALLERINFO +{ + const char *file; + const char *func; + NvU32 line; +} PORT_MEM_CALLERINFO; + +/** @note Needed since not all compilers support automatic struct creation */ +static NV_INLINE PORT_MEM_CALLERINFO +_portMemCallerInfoMake +( + const char *file, + const char *func, + NvU32 line +) +{ + PORT_MEM_CALLERINFO callerInfo; + callerInfo.file = file; + callerInfo.func = func; + callerInfo.line = line; + return callerInfo; +} + +#define PORT_MEM_CALLERINFO_MAKE \ + _portMemCallerInfoMake(__FILE__, __FUNCTION__, __LINE__) +#endif // PORT_MEM_TRACK_USE_CALLERINFO_IP + +void *portMemAllocPaged_CallerInfo(NvLength, PORT_MEM_CALLERINFO); +void *portMemAllocNonPaged_CallerInfo(NvLength, PORT_MEM_CALLERINFO); +PORT_MEM_ALLOCATOR *portMemAllocatorCreatePaged_CallerInfo(PORT_MEM_CALLERINFO); +PORT_MEM_ALLOCATOR *portMemAllocatorCreateNonPaged_CallerInfo(PORT_MEM_CALLERINFO); +void portMemInitializeAllocatorTracking_CallerInfo(PORT_MEM_ALLOCATOR *, PORT_MEM_ALLOCATOR_TRACKING *, PORT_MEM_CALLERINFO); +void *_portMemAllocatorAlloc_CallerInfo(PORT_MEM_ALLOCATOR*, NvLength, PORT_MEM_CALLERINFO); +PORT_MEM_ALLOCATOR *portMemAllocatorCreateOnExistingBlock_CallerInfo(void *, NvLength, PORT_MEM_CALLERINFO); +#if portMemExAllocatorCreateLockedOnExistingBlock_SUPPORTED +PORT_MEM_ALLOCATOR *portMemExAllocatorCreateLockedOnExistingBlock_CallerInfo(void *, NvLength, void *, PORT_MEM_CALLERINFO); +#endif //portMemExAllocatorCreateLockedOnExistingBlock_SUPPORTED +#undef PORT_ALLOC +#define PORT_ALLOC(pAlloc, length) \ + _portMemAllocatorAlloc_CallerInfo(pAlloc, length, PORT_MEM_CALLERINFO_MAKE) + +#define portMemAllocPaged(size) \ + portMemAllocPaged_CallerInfo((size), PORT_MEM_CALLERINFO_MAKE) +#define portMemAllocNonPaged(size) \ + portMemAllocNonPaged_CallerInfo((size), PORT_MEM_CALLERINFO_MAKE) +#define portMemAllocatorCreatePaged() \ + portMemAllocatorCreatePaged_CallerInfo(PORT_MEM_CALLERINFO_MAKE) +#define portMemAllocatorCreateNonPaged() \ + portMemAllocatorCreateNonPaged_CallerInfo(PORT_MEM_CALLERINFO_MAKE) + +#define portMemInitializeAllocatorTracking(pAlloc, pTrack) \ + portMemInitializeAllocatorTracking_CallerInfo(pAlloc, pTrack, PORT_MEM_CALLERINFO_MAKE) + +#define portMemAllocatorCreateOnExistingBlock(pMem, size) \ + portMemAllocatorCreateOnExistingBlock_CallerInfo(pMem, size, PORT_MEM_CALLERINFO_MAKE) +#if portMemExAllocatorCreateLockedOnExistingBlock_SUPPORTED +#define portMemExAllocatorCreateLockedOnExistingBlock(pMem, size, pLock) \ + portMemExAllocatorCreateLockedOnExistingBlock_CallerInfo(pMem, size, pLock, \ + PORT_MEM_CALLERINFO_MAKE) +#endif //portMemExAllocatorCreateLockedOnExistingBlock_SUPPORTED +#else +#define PORT_MEM_CALLERINFO_MAKE +#endif // CALLERINFO + + +#if PORT_MEM_TRACK_USE_FENCEPOSTS || PORT_MEM_TRACK_USE_ALLOCLIST || PORT_MEM_TRACK_USE_CALLERINFO || PORT_MEM_TRACK_USE_LIMIT + +// +// The blockSize of the allocation is tracked in PORT_MEM_HEADER::blockSize +// when fenceposts or per-GFID limit tracking is enabled. +// +#define PORT_MEM_HEADER_HAS_BLOCK_SIZE \ + (PORT_MEM_TRACK_USE_FENCEPOSTS || PORT_MEM_TRACK_USE_LIMIT) + +typedef struct PORT_MEM_HEADER +{ +#if PORT_MEM_HEADER_HAS_BLOCK_SIZE + NvLength blockSize; +#endif +#if PORT_MEM_TRACK_USE_CALLERINFO + PORT_MEM_CALLERINFO callerInfo; +#endif +#if PORT_MEM_TRACK_USE_ALLOCLIST + PORT_MEM_LIST list; +#endif +#if PORT_MEM_TRACK_USE_FENCEPOSTS + PORT_MEM_FENCE_HEAD fence; +#endif +#if PORT_MEM_TRACK_USE_LIMIT + NV_DECLARE_ALIGNED(NvU32 gfid, 8); +#endif +} PORT_MEM_HEADER; + +typedef struct PORT_MEM_FOOTER +{ +#if PORT_MEM_TRACK_USE_FENCEPOSTS + PORT_MEM_FENCE_TAIL fence; +#endif +} PORT_MEM_FOOTER; + +#define PORT_MEM_ADD_HEADER_PTR(p) ((PORT_MEM_HEADER*)p + 1) +#define PORT_MEM_SUB_HEADER_PTR(p) ((PORT_MEM_HEADER*)p - 1) +#define PORT_MEM_STAGING_SIZE (sizeof(PORT_MEM_HEADER)+sizeof(PORT_MEM_FOOTER)) +#else +#define PORT_MEM_ADD_HEADER_PTR(p) p +#define PORT_MEM_SUB_HEADER_PTR(p) p +#define PORT_MEM_STAGING_SIZE 0 +#define PORT_MEM_HEADER_HAS_BLOCK_SIZE 0 +#endif + +#define PORT_MEM_TRACK_ALLOC_SIZE \ + PORT_MEM_TRACK_USE_COUNTER && \ + PORT_MEM_HEADER_HAS_BLOCK_SIZE + +struct PORT_MEM_ALLOCATOR_TRACKING +{ + PORT_MEM_ALLOCATOR *pAllocator; + struct PORT_MEM_ALLOCATOR_TRACKING *pPrev; + struct PORT_MEM_ALLOCATOR_TRACKING *pNext; + +#if PORT_MEM_TRACK_USE_COUNTER + PORT_MEM_COUNTER counter; +#endif +#if PORT_MEM_TRACK_USE_ALLOCLIST + PORT_MEM_LIST *pFirstAlloc; + void *listLock; +#endif +#if PORT_MEM_TRACK_USE_CALLERINFO + PORT_MEM_CALLERINFO callerInfo; +#endif +#if PORT_MEM_TRACK_USE_LIMIT + NvLength limitGfid; + NvLength counterGfid; + NvU32 gfid; + NvLength limitLibosGfid; + NvLength counterLibosGfid; +#endif +}; + +/// @brief Actual size of an allocator structure, including internals +#define PORT_MEM_ALLOCATOR_SIZE \ + (sizeof(PORT_MEM_ALLOCATOR) + sizeof(PORT_MEM_ALLOCATOR_TRACKING)) + +#if defined(BIT) +#define NVIDIA_UNDEF_LEGACY_BIT_MACROS +#endif +#include "nvmisc.h" + +// +// Internal bitvector structures for allocators over existing blocks +// +#define PORT_MEM_BITVECTOR_CHUNK_SIZE 16U +typedef NvU8 PORT_MEM_BITVECTOR_CHUNK[PORT_MEM_BITVECTOR_CHUNK_SIZE]; +typedef struct +{ + // + // Points to a PORT_SPINLOCK that make memory thread safe. + // If this is not thread safe variant, then it is NULL. + // + void *pSpinlock; + // Points to after the bitvector, aligned to first chunk. + PORT_MEM_BITVECTOR_CHUNK *pChunks; + NvU32 numChunks; + // + // What follows are two bitvectors one next to another: + // - The first represents availability of chunks: 0=free, 1=allocated + // - The second represents allocation sizes: 1=last chunk of an allocation + // So the total size of this array is 2*numChunks bits + // The second vector continues immediately after the first, no alignment + // + // Example: numChunks = 8, 2 allocations of 3 chunks each: + // bits == |11111100| <- 2*3 chunks allocated, 2 free + // |00100100| <- Chunks 2 and 5 are last in allocation + // + NvU32 bits[NV_ANYSIZE_ARRAY]; +} PORT_MEM_BITVECTOR; + +/// @note the following can be used as arguments for static array size, so +/// they must be fully known at compile time - macros, not inline functions + +/// @brief Total number of chunks in a preallocated block of given size +#define PORT_MEM_PREALLOCATED_BLOCK_NUM_CHUNKS(size) \ + NV_DIV_AND_CEIL(size, PORT_MEM_BITVECTOR_CHUNK_SIZE) + +/// @brief Minimal nonaligned bookkeeping size required for a preallocated block +#define PORT_MEM_PREALLOCATED_BLOCK_MINIMAL_NONALIGNED_EXTRA_SIZE \ + sizeof(PORT_MEM_ALLOCATOR) + sizeof(PORT_MEM_BITVECTOR) + +/// @brief Minimal bookkeeping size required for a preallocated block +#define PORT_MEM_PREALLOCATED_BLOCK_MINIMAL_EXTRA_SIZE \ + NV_ALIGN_UP(PORT_MEM_PREALLOCATED_BLOCK_MINIMAL_NONALIGNED_EXTRA_SIZE, \ + PORT_MEM_BITVECTOR_CHUNK_SIZE) + +/// @brief Number of chunks that can be tracked in the minimal bookkeeping size +#define PORT_MEM_PREALLOCATED_BLOCK_CHUNKS_GRATIS \ + (( \ + PORT_MEM_PREALLOCATED_BLOCK_MINIMAL_EXTRA_SIZE - \ + sizeof(PORT_MEM_ALLOCATOR) - \ + NV_OFFSETOF(PORT_MEM_BITVECTOR, bits) \ + )*4U) + +// Although we can never execute the underflow branch, the compiler will complain +// if any constant expression results in underflow, even in dead code. +// Note: Skipping (parens) around a and b on purpose here. +#define _PORT_CEIL_NO_UNDERFLOW(a, b) (NV_DIV_AND_CEIL(b + a, b) - 1) + +/// @brief Required additional size for a given number of chunks +#define PORT_MEM_PREALLOCATED_BLOCK_SIZE_FOR_NONGRATIS_CHUNKS(num_chunks) \ + ((num_chunks > PORT_MEM_PREALLOCATED_BLOCK_CHUNKS_GRATIS) \ + ? _PORT_CEIL_NO_UNDERFLOW(num_chunks - PORT_MEM_PREALLOCATED_BLOCK_CHUNKS_GRATIS, \ + 4*PORT_MEM_BITVECTOR_CHUNK_SIZE) \ + * PORT_MEM_BITVECTOR_CHUNK_SIZE \ + : 0) + +/// @brief Total required bookkeeping size for a block of given useful size +#define PORT_MEM_PREALLOCATED_BLOCK_EXTRA_SIZE(size) \ + PORT_MEM_PREALLOCATED_BLOCK_MINIMAL_EXTRA_SIZE + \ + PORT_MEM_PREALLOCATED_BLOCK_SIZE_FOR_NONGRATIS_CHUNKS( \ + PORT_MEM_PREALLOCATED_BLOCK_NUM_CHUNKS(size)) + +/** + * Macros for defining memory allocation wrappers. + * + * The function / file / line reference is not useful when portMemAlloc + * is called from a generic memory allocator function, such as the memCreate + * function in resman. + * + * These macros can be used to push the function /file / line reference up one + * level when defining a memory allocator function. In other words, log who + * calls memCreate instead of logging memCreate. + * + * These macros are also used throughout memory-tracking.c + */ +#if PORT_MEM_TRACK_USE_CALLERINFO + +#define PORT_MEM_CALLERINFO_PARAM _portMemCallerInfo +#define PORT_MEM_CALLERINFO_TYPE_PARAM \ + PORT_MEM_CALLERINFO PORT_MEM_CALLERINFO_PARAM +#define PORT_MEM_CALLERINFO_COMMA_PARAM ,PORT_MEM_CALLERINFO_PARAM +#define PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM ,PORT_MEM_CALLERINFO_TYPE_PARAM +#define PORT_MEM_CALLINFO_FUNC(f) f##_CallerInfo + +#else // PORT_MEM_TRACK_USE_CALLERINFO + +#define PORT_MEM_CALLERINFO_PARAM +#define PORT_MEM_CALLERINFO_TYPE_PARAM void +#define PORT_MEM_CALLERINFO_COMMA_PARAM +#define PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +#define PORT_MEM_CALLINFO_FUNC(f) f + +#endif // PORT_MEM_TRACK_USE_CALLERINFO + +#endif // _NVPORT_MEMORY_INTERNAL_H_ +/// @endcond diff --git a/src/nvidia/inc/libraries/nvport/inline/safe_generic.h b/src/nvidia/inc/libraries/nvport/inline/safe_generic.h new file mode 100644 index 0000000..e701c2c --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/inline/safe_generic.h @@ -0,0 +1,311 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +// Disable warnings when constant expressions are always true/false, and +// some signed/unsigned mismatch. To get a common implementation for all safe +// functions, we need to rely on these. There is no undefined behavior here. +// +#if PORT_COMPILER_IS_MSVC +#pragma warning( disable : 4296) +#elif PORT_COMPILER_IS_GCC +// GCC 4.6+ needed for GCC diagnostic +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic push +// Allow unknown pragmas to ignore unrecognized -W flags. +#pragma GCC diagnostic ignored "-Wpragmas" +#pragma GCC diagnostic ignored "-Wtautological-constant-out-of-range-compare" +#pragma GCC diagnostic ignored "-Wsign-compare" +#pragma GCC diagnostic ignored "-Wtype-limits" +#else +// +// On older GCCs we declare this as a system header, which tells the compiler +// to ignore all warnings in it (this has no effect on the primary source file) +// +#pragma GCC system_header +#endif +#elif PORT_COMPILER_IS_CLANG +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wtautological-constant-out-of-range-compare" +#pragma clang diagnostic ignored "-Wsign-compare" +#pragma clang diagnostic ignored "-Wtype-limits" +#endif + +#define PORT_SAFE_OP(a, b, pRes, _op_, _US_) \ + ((sizeof(a) == 1) ? portSafe##_op_##_US_##8 (a, b, pRes) : \ + (sizeof(a) == 2) ? portSafe##_op_##_US_##16(a, b, pRes) : \ + (sizeof(a) == 4) ? portSafe##_op_##_US_##32(a, b, pRes) : \ + (sizeof(a) == 8) ? portSafe##_op_##_US_##64(a, b, pRes) : \ + NV_FALSE) + +#define PORT_SAFE_ADD_U(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Add, U) +#define PORT_SAFE_SUB_U(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Sub, U) +#define PORT_SAFE_MUL_U(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Mul, U) +#define PORT_SAFE_DIV_U(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Div, U) + +#define PORT_SAFE_ADD_S(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Add, S) +#define PORT_SAFE_SUB_S(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Sub, S) +#define PORT_SAFE_MUL_S(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Mul, S) +#define PORT_SAFE_DIV_S(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Div, S) + +#define PORT_SAFE_ADD(a, b, pRes) PORT_SAFE_ADD_U(a, b, pRes) +#define PORT_SAFE_SUB(a, b, pRes) PORT_SAFE_SUB_U(a, b, pRes) +#define PORT_SAFE_MUL(a, b, pRes) PORT_SAFE_MUL_U(a, b, pRes) +#define PORT_SAFE_DIV(a, b, pRes) PORT_SAFE_DIV_U(a, b, pRes) + +//////////////////////////////////////////////////////////////////////////////// + +#define PORT_EXPAND(X) X +#define PORT_SAFE_MAX(t) PORT_EXPAND(NV_##t##_MAX) +#define PORT_SAFE_MIN(t) PORT_EXPAND(NV_##t##_MIN) + +// These constants should really be in nvtypes.h +#if !defined (NV_UPtr_MAX) +#if defined(NV_64_BITS) +#define NV_UPtr_MAX NV_U64_MAX +#define NV_Length_MAX NV_U64_MAX +#else +#define NV_UPtr_MAX NV_U32_MAX +#define NV_Length_MAX NV_U32_MAX +#endif +#define NV_UPtr_MIN 0 +#define NV_Length_MIN 0 +#endif + +#define PORT_WILL_OVERFLOW_UADD(a, b) ((a + b) < a) +#define PORT_WILL_OVERFLOW_USUB(a, b) (b > a) +#define PORT_WILL_OVERFLOW_UMUL(a, b, r) (a != 0 && b != (r/a)) + +/** @note Signed overflow is Undefined Behavior, which means we have to detect + * it before it actually happens. We can't do (a+b) unless we are sure it won't + * overflow. + */ +#define PORT_WILL_OVERFLOW_SADD(a, b, size) \ + ((b < 0) ? (a < (NV_S##size##_MIN - b)) : (a > (NV_S##size##_MAX - b))) + +#define PORT_WILL_OVERFLOW_SSUB(a, b, size) \ + ((b < 0) ? (a > (NV_S##size##_MAX + b)) : (a < (NV_S##size##_MIN + b))) + +#define PORT_MIN_MUL(x, s) ((x < 0) ? (NV_S##s##_MAX / x) : (NV_S##s##_MIN / x)) +#define PORT_MAX_MUL(x, s) ((x < 0) ? (NV_S##s##_MIN / x) : (NV_S##s##_MAX / x)) +#define PORT_WILL_OVERFLOW_SMUL(a, b, size) \ + (a != 0 && b != 0 && (a > PORT_MAX_MUL(b, size) || a < PORT_MIN_MUL(b, size))) + +#define PORT_SAFE_DIV_IMPL(a, b, pRes) \ + ((b == 0) ? NV_FALSE : ((*pRes = a / b), NV_TRUE)) + +#define PORT_SAFE_Add_IMPL_S(a, b, pRes, n) \ + (PORT_WILL_OVERFLOW_SADD(a, b, n) ? NV_FALSE : ((*pRes = a + b), NV_TRUE)) +#define PORT_SAFE_Sub_IMPL_S(a, b, pRes, n) \ + (PORT_WILL_OVERFLOW_SSUB(a, b, n) ? NV_FALSE : ((*pRes = a - b), NV_TRUE)) +#define PORT_SAFE_Mul_IMPL_S(a, b, pRes, n) \ + (PORT_WILL_OVERFLOW_SMUL(a, b, n) ? NV_FALSE : ((*pRes = a * b), NV_TRUE)) +#define PORT_SAFE_Div_IMPL_S(a, b, pRes, n) PORT_SAFE_DIV_IMPL(a, b, pRes) + +#define PORT_SAFE_Add_IMPL_U(a, b, pRes, n) \ + ((*pRes = a + b), ((*pRes < a) ? NV_FALSE : NV_TRUE)) +#define PORT_SAFE_Sub_IMPL_U(a, b, pRes, n) \ + ((*pRes = a - b), ((b > a) ? NV_FALSE : NV_TRUE)) +#define PORT_SAFE_Mul_IMPL_U(a, b, pRes, n) \ + ((*pRes = a * b), ((a != 0 && b != *pRes/a) ? NV_FALSE : NV_TRUE)) +#define PORT_SAFE_Div_IMPL_U(a, b, pRes, n) PORT_SAFE_DIV_IMPL(a, b, pRes) + + +#define PORT_SAFE_Add_IMPL_ PORT_SAFE_Add_IMPL_U +#define PORT_SAFE_Sub_IMPL_ PORT_SAFE_Sub_IMPL_U +#define PORT_SAFE_Mul_IMPL_ PORT_SAFE_Mul_IMPL_U +#define PORT_SAFE_Div_IMPL_ PORT_SAFE_Div_IMPL_U + +#define PORT_SAFE_CAST(a, b, t) \ + ((a < PORT_SAFE_MIN(t) || a > PORT_SAFE_MAX(t)) ? \ + NV_FALSE : \ + ((b = (Nv##t) a), NV_TRUE)) + + +#define PORT_SAFE_DEFINE_MATH_FUNC(_op_, _US_, _size_) \ + PORT_SAFE_INLINE NvBool \ + portSafe##_op_##_US_##_size_ \ + ( \ + Nv##_US_##_size_ x, \ + Nv##_US_##_size_ y, \ + Nv##_US_##_size_ *pRes \ + ) \ + { \ + return PORT_EXPAND(PORT_SAFE_##_op_##_IMPL_##_US_)(x, y, pRes, _size_); \ + } + + +#define PORT_SAFE_DEFINE_CAST_FUNC(_type_from_, _type_to_) \ + PORT_SAFE_INLINE NvBool \ + portSafe##_type_from_##To##_type_to_ \ + ( \ + Nv##_type_from_ data, \ + Nv##_type_to_ *pResult \ + ) \ + { \ + if (((data<0) && (PORT_SAFE_MIN(_type_to_) == 0 || \ + PORT_SAFE_MIN(_type_to_) > data)) \ + || data > PORT_SAFE_MAX(_type_to_)) \ + return NV_FALSE; \ + *pResult = (Nv##_type_to_) data; \ + return NV_TRUE; \ + } + + + +PORT_SAFE_DEFINE_MATH_FUNC(Add, S, 8) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, S, 8) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, S, 8) +PORT_SAFE_DEFINE_MATH_FUNC(Div, S, 8) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, S, 16) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, S, 16) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, S, 16) +PORT_SAFE_DEFINE_MATH_FUNC(Div, S, 16) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, S, 32) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, S, 32) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, S, 32) +PORT_SAFE_DEFINE_MATH_FUNC(Div, S, 32) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, S, 64) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, S, 64) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, S, 64) +PORT_SAFE_DEFINE_MATH_FUNC(Div, S, 64) + + +PORT_SAFE_DEFINE_MATH_FUNC(Add, U, 8) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, U, 8) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, U, 8) +PORT_SAFE_DEFINE_MATH_FUNC(Div, U, 8) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, U, 16) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, U, 16) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, U, 16) +PORT_SAFE_DEFINE_MATH_FUNC(Div, U, 16) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, U, 32) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, U, 32) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, U, 32) +PORT_SAFE_DEFINE_MATH_FUNC(Div, U, 32) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, U, 64) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, U, 64) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, U, 64) +PORT_SAFE_DEFINE_MATH_FUNC(Div, U, 64) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, U, Ptr) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, U, Ptr) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, U, Ptr) +PORT_SAFE_DEFINE_MATH_FUNC(Div, U, Ptr) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, , Length) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, , Length) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, , Length) +PORT_SAFE_DEFINE_MATH_FUNC(Div, , Length) + + +PORT_SAFE_DEFINE_CAST_FUNC(S8, U8) +PORT_SAFE_DEFINE_CAST_FUNC(S8, U16) +PORT_SAFE_DEFINE_CAST_FUNC(S8, U32) +PORT_SAFE_DEFINE_CAST_FUNC(S8, U64) +PORT_SAFE_DEFINE_CAST_FUNC(S8, UPtr) +PORT_SAFE_DEFINE_CAST_FUNC(S8, Length) + +PORT_SAFE_DEFINE_CAST_FUNC(S16, S8) +PORT_SAFE_DEFINE_CAST_FUNC(S16, U8) +PORT_SAFE_DEFINE_CAST_FUNC(S16, U16) +PORT_SAFE_DEFINE_CAST_FUNC(S16, U32) +PORT_SAFE_DEFINE_CAST_FUNC(S16, U64) +PORT_SAFE_DEFINE_CAST_FUNC(S16, UPtr) +PORT_SAFE_DEFINE_CAST_FUNC(S16, Length) + +PORT_SAFE_DEFINE_CAST_FUNC(S32, S8) +PORT_SAFE_DEFINE_CAST_FUNC(S32, S16) +PORT_SAFE_DEFINE_CAST_FUNC(S32, U8) +PORT_SAFE_DEFINE_CAST_FUNC(S32, U16) +PORT_SAFE_DEFINE_CAST_FUNC(S32, U32) +PORT_SAFE_DEFINE_CAST_FUNC(S32, U64) +PORT_SAFE_DEFINE_CAST_FUNC(S32, UPtr) +PORT_SAFE_DEFINE_CAST_FUNC(S32, Length) + +PORT_SAFE_DEFINE_CAST_FUNC(S64, S8) +PORT_SAFE_DEFINE_CAST_FUNC(S64, S16) +PORT_SAFE_DEFINE_CAST_FUNC(S64, S32) +PORT_SAFE_DEFINE_CAST_FUNC(S64, U8) +PORT_SAFE_DEFINE_CAST_FUNC(S64, U16) +PORT_SAFE_DEFINE_CAST_FUNC(S64, U32) +PORT_SAFE_DEFINE_CAST_FUNC(S64, U64) +PORT_SAFE_DEFINE_CAST_FUNC(S64, UPtr) +PORT_SAFE_DEFINE_CAST_FUNC(S64, Length) + +PORT_SAFE_DEFINE_CAST_FUNC(U8, S8) + +PORT_SAFE_DEFINE_CAST_FUNC(U16, S8) +PORT_SAFE_DEFINE_CAST_FUNC(U16, S16) +PORT_SAFE_DEFINE_CAST_FUNC(U16, U8) + +PORT_SAFE_DEFINE_CAST_FUNC(U32, S8) +PORT_SAFE_DEFINE_CAST_FUNC(U32, S16) +PORT_SAFE_DEFINE_CAST_FUNC(U32, S32) +PORT_SAFE_DEFINE_CAST_FUNC(U32, U8) +PORT_SAFE_DEFINE_CAST_FUNC(U32, U16) + +PORT_SAFE_DEFINE_CAST_FUNC(U64, S8) +PORT_SAFE_DEFINE_CAST_FUNC(U64, S16) +PORT_SAFE_DEFINE_CAST_FUNC(U64, S32) +PORT_SAFE_DEFINE_CAST_FUNC(U64, S64) +PORT_SAFE_DEFINE_CAST_FUNC(U64, U8) +PORT_SAFE_DEFINE_CAST_FUNC(U64, U16) +PORT_SAFE_DEFINE_CAST_FUNC(U64, U32) +PORT_SAFE_DEFINE_CAST_FUNC(U64, UPtr) +PORT_SAFE_DEFINE_CAST_FUNC(U64, Length) + +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, S8) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, S16) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, S32) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, S64) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, U8) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, U16) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, U32) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, U64) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, Length) + +PORT_SAFE_DEFINE_CAST_FUNC(Length, S8) +PORT_SAFE_DEFINE_CAST_FUNC(Length, S16) +PORT_SAFE_DEFINE_CAST_FUNC(Length, S32) +PORT_SAFE_DEFINE_CAST_FUNC(Length, S64) +PORT_SAFE_DEFINE_CAST_FUNC(Length, U8) +PORT_SAFE_DEFINE_CAST_FUNC(Length, U16) +PORT_SAFE_DEFINE_CAST_FUNC(Length, U32) +PORT_SAFE_DEFINE_CAST_FUNC(Length, U64) +PORT_SAFE_DEFINE_CAST_FUNC(Length, UPtr) + + +#if PORT_COMPILER_IS_MSVC +#pragma warning( default : 4296) +#elif PORT_COMPILER_IS_GCC && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) +#pragma GCC diagnostic pop +#elif PORT_COMPILER_IS_CLANG +#pragma clang diagnostic pop +#endif diff --git a/src/nvidia/inc/libraries/nvport/inline/sync_tracking.h b/src/nvidia/inc/libraries/nvport/inline/sync_tracking.h new file mode 100644 index 0000000..7c40db7 --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/inline/sync_tracking.h @@ -0,0 +1,211 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/** + * @file + * @brief SYNC debugging utilities + * + * If PORT_SYNC_PRINT_DEBUG_INFO is defined, the definitions in this file will + * cause all Sync operations to verbosely print out the actions performed. + */ + +#if defined(PORT_SYNC_PRINT_DEBUG_INFO) + +#if defined(PORT_SYNC_IMPL) + +#undef portSyncInitialize +#undef portSyncShutdown +#undef portSyncSpinlockInitialize +#undef portSyncSpinlockCreate +#undef portSyncSpinlockDestroy +#undef portSyncSpinlockAcquire +#undef portSyncSpinlockRelease +#undef portSyncMutexInitialize +#undef portSyncMutexCreate +#undef portSyncMutexDestroy +#undef portSyncMutexAcquire +#undef portSyncMutexRelease +#undef portSyncMutexAcquireConditional +#undef portSyncSemaphoreInitialize +#undef portSyncSemaphoreCreate +#undef portSyncSemaphoreDestroy +#undef portSyncSemaphoreAcquire +#undef portSyncSemaphoreRelease +#undef portSyncSemaphoreAcquireConditional + +#else + +#define portSyncInitialize() \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncInitialize() ", __FILE__, __LINE__); \ + portSyncInitialize(); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncShutdown() \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncShutdown() ", __FILE__, __LINE__); \ + portSyncShutdown(); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + + + +static NV_INLINE NV_STATUS _syncPrintReturnStatus(NV_STATUS status) +{ + portDbgPrintf("%s\n", nvstatusToString(status)); + return status; +} + +static NV_INLINE void *_syncPrintReturnPtr(void *ptr) +{ + portDbgPrintf("%p\n", ptr); + return ptr; +} + + +#define portSyncSpinlockInitialize(pSpinlock) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncSpinlockInitialize(%p) - ", \ + __FILE__, __LINE__, pSpinlock), \ + _syncPrintReturnStatus(portSyncSpinlockInitialize(pSpinlock))) + +#define portSyncSpinlockCreate(pAllocator) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncSpinlockCreate(%p) - ", \ + __FILE__, __LINE__, pAllocator), \ + _syncPrintReturnPtr(portSyncSpinlockCreate(pAllocator))) + +#define portSyncSpinlockDestroy(pSpinlock) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncSpinlockDestroy(%p) ", \ + __FILE__, __LINE__, pSpinlock); \ + portSyncSpinlockDestroy(pSpinlock); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncSpinlockAcquire(pSpinlock) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncSpinlockAcquire(%p) ", \ + __FILE__, __LINE__, pSpinlock); \ + portSyncSpinlockAcquire(pSpinlock); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncSpinlockRelease(pSpinlock) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncSpinlockRelease(%p) ", \ + __FILE__, __LINE__, pSpinlock); \ + portSyncSpinlockRelease(pSpinlock); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + + + + +#define portSyncMutexInitialize(pMutex) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncMutexInitialize(%p) - ", \ + __FILE__, __LINE__, pMutex), \ + _syncPrintReturnStatus(portSyncMutexInitialize(pMutex))) + +#define portSyncMutexCreate(pAllocator) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncMutexCreate(%p) - ", \ + __FILE__, __LINE__, pAllocator), \ + _syncPrintReturnPtr(portSyncMutexCreate(pAllocator))) + +#define portSyncMutexDestroy(pMutex) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncMutexDestroy(%p) ", \ + __FILE__, __LINE__, pMutex); \ + portSyncMutexDestroy(pMutex); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncMutexAcquire(pMutex) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncMutexAcquire(%p) ", \ + __FILE__, __LINE__, pMutex); \ + portSyncMutexAcquire(pMutex); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncMutexRelease(pMutex) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncMutexRelease(%p) ", \ + __FILE__, __LINE__, pMutex); \ + portSyncMutexRelease(pMutex); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncMutexAcquireConditional(pMutex) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncMutexAcquireConditional(%p) - ", \ + __FILE__, __LINE__, pMutex), \ + (portSyncMutexAcquireConditional(pMutex) ? \ + (portDbgPrintf("TRUE\n"),NV_TRUE) : (portDbgPrintf("FALSE\n"),NV_FALSE))) + + + + + +#define portSyncSemaphoreInitialize(pSemaphore, s, l) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncSemaphoreInitialize(%p, %u, %u) - ", \ + __FILE__, __LINE__, pSemaphore, s, l), \ + _syncPrintReturnStatus(portSyncSemaphoreInitialize(pSemaphore, s, l))) + +#define portSyncSemaphoreCreate(pAllocator, s, l) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncSemaphoreCreate(%p, %u, %u) - ", \ + __FILE__, __LINE__, pAllocator, s, l), \ + _syncPrintReturnPtr(portSyncSemaphoreCreate(pAllocator, s, l))) + +#define portSyncSemaphoreDestroy(pSemaphore) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncSemaphoreDestroy(%p) ", \ + __FILE__, __LINE__, pSemaphore); \ + portSyncSemaphoreDestroy(pSemaphore); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncSemaphoreAcquire(pSemaphore) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncSemaphoreAcquire(%p) ", \ + __FILE__, __LINE__, pSemaphore); \ + portSyncSemaphoreAcquire(pSemaphore); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncSemaphoreRelease(pSemaphore) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncSemaphoreRelease(%p) ", \ + __FILE__, __LINE__, pSemaphore); \ + portSyncSemaphoreRelease(pSemaphore); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncSemaphoreAcquireConditional(pSemaphore) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncSemaphoreAcquireConditional(%p) - ", \ + __FILE__, __LINE__, pSemaphore), \ + (portSyncSemaphoreAcquireConditional(pSemaphore) ? \ + (portDbgPrintf("TRUE\n"),NV_TRUE) : (portDbgPrintf("FALSE\n"),NV_FALSE))) + + +#endif // PORT_SYNC_IMPL +#endif // PORT_SYNC_PRINT_DEBUG_INFO diff --git a/src/nvidia/inc/libraries/nvport/inline/util_gcc_clang.h b/src/nvidia/inc/libraries/nvport/inline/util_gcc_clang.h new file mode 100644 index 0000000..d14846b --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/inline/util_gcc_clang.h @@ -0,0 +1,188 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Util functions implementations using gcc and clang compiler intrinsics + */ + +#ifndef _NVPORT_UTIL_GCC_CLANG_H_ +#define _NVPORT_UTIL_GCC_CLANG_H_ + +// +// Disabling portUtilExGetStackTrace_SUPPORTED on all clients because the +// implementation is unsafe and generates warnings on new build compilers. +// +// From https://gcc.gnu.org/onlinedocs/gcc/Return-Address.html : +// Calling this function with a nonzero argument can have unpredictable effects, +// including crashing the calling program. As a result, calls that are considered +// unsafe are diagnosed when the -Wframe-address option is in effect. Such calls +// should only be made in debugging situations. +// +// If this feature is desirable, please replace the body of portUtilExGetStackTrace() +// with implementations that tie into native stacktrace reporting infrastructure +// of the platforms nvport runs on. +// +#define portUtilExGetStackTrace_SUPPORTED 0 +#define portUtilExGetStackTrace(_level) ((NvUPtr)0) + +#define portUtilGetReturnAddress() (NvUPtr)__builtin_return_address(0) + +#if NVCPU_IS_X86 || NVCPU_IS_X86_64 +#define NVPORT_DUMMY_LOOP() \ + __asm__ __volatile__ ("pause"); \ + __asm__ __volatile__ ("pause"); \ + __asm__ __volatile__ ("pause"); \ + __asm__ __volatile__ ("pause"); \ + __asm__ __volatile__ ("pause"); \ + \ + __asm__ __volatile__ ("pause"); \ + __asm__ __volatile__ ("pause"); \ + __asm__ __volatile__ ("pause"); \ + __asm__ __volatile__ ("pause"); \ + __asm__ __volatile__ ("pause"); \ + \ + __asm__ __volatile__ ("pause"); \ + __asm__ __volatile__ ("pause"); \ + __asm__ __volatile__ ("pause"); \ + __asm__ __volatile__ ("pause"); \ + __asm__ __volatile__ ("pause"); \ + \ + __asm__ __volatile__ ("pause"); \ + __asm__ __volatile__ ("pause"); \ + __asm__ __volatile__ ("pause"); \ + __asm__ __volatile__ ("pause"); \ + __asm__ __volatile__ ("pause"); +#else +#define NVPORT_DUMMY_LOOP() \ + __asm__ __volatile__ ("nop"); \ + __asm__ __volatile__ ("nop"); \ + __asm__ __volatile__ ("nop"); \ + __asm__ __volatile__ ("nop"); \ + __asm__ __volatile__ ("nop"); \ + \ + __asm__ __volatile__ ("nop"); \ + __asm__ __volatile__ ("nop"); \ + __asm__ __volatile__ ("nop"); \ + __asm__ __volatile__ ("nop"); \ + __asm__ __volatile__ ("nop"); \ + \ + __asm__ __volatile__ ("nop"); \ + __asm__ __volatile__ ("nop"); \ + __asm__ __volatile__ ("nop"); \ + __asm__ __volatile__ ("nop"); \ + __asm__ __volatile__ ("nop"); \ + \ + __asm__ __volatile__ ("nop"); \ + __asm__ __volatile__ ("nop"); \ + __asm__ __volatile__ ("nop"); \ + __asm__ __volatile__ ("nop"); \ + __asm__ __volatile__ ("nop"); +#endif + +#if (__GNUC__ < 4) || (NVCPU_IS_ARM) || (NVCPU_IS_X86 && PORT_IS_KERNEL_BUILD) || (NVCPU_IS_RISCV64) +#define PORT_UTIL_CLZ_CTX_NOT_DEFINED 1 +#else +PORT_UTIL_INLINE NvU32 portUtilCountLeadingZeros64(NvU64 n) +{ + if (n == 0) + return 64; + + return __builtin_clzll(n); +} +PORT_UTIL_INLINE NvU32 portUtilCountLeadingZeros32(NvU32 n) +{ + if (n == 0) + return 32; + + return __builtin_clz(n); +} + + +PORT_UTIL_INLINE NvU32 portUtilCountTrailingZeros64(NvU64 n) +{ + if (n == 0) + return 64; + + return __builtin_ctzll(n); +} +PORT_UTIL_INLINE NvU32 portUtilCountTrailingZeros32(NvU32 n) +{ + if (n == 0) + return 32; + + return __builtin_ctz(n); +} + +#endif + + +#if NVCPU_IS_FAMILY_X86 && !defined(NV_MODS) +PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter(void) +{ + NvU32 lo; + NvU32 hi; + __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); + return (lo | ((NvU64)hi << 32)); +} +#define portUtilExReadTimestampCounter_SUPPORTED 1 + +#elif NVCPU_IS_AARCH64 && !defined(NV_MODS) +PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter(void) +{ + NvU64 ts = 0; + __asm__ __volatile__ ("mrs %0, cntvct_el0" : "=r" (ts)); + return ts; +} +#define portUtilExReadTimestampCounter_SUPPORTED 1 + +#elif NVCPU_IS_PPC64LE && !defined(NV_MODS) +PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter(void) +{ + NvU64 ts; + __asm__ __volatile__ ("mfspr %0,268" : "=r"(ts)); + return ts; +} +#define portUtilExReadTimestampCounter_SUPPORTED 1 + +#elif NVCPU_IS_PPC && !defined(NV_MODS) +PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter(void) +{ + NvU32 lo, hi, tmp; + __asm__ __volatile__ ( + "0:\n" + "mftbu %0\n" + "mftbl %1\n" + "mftbu %2\n" + "cmpw %0, %2\n" + "bne- 0b" + : "=r" (hi), "=r" (lo), "=r" (tmp) ); + return ((hi << 32) | lo); +} +#define portUtilExReadTimestampCounter_SUPPORTED 1 + +#else +#define portUtilExReadTimestampCounter_SUPPORTED 0 +#endif + +#endif // _NVPORT_UTIL_GCC_CLANG_H_ diff --git a/src/nvidia/inc/libraries/nvport/inline/util_generic.h b/src/nvidia/inc/libraries/nvport/inline/util_generic.h new file mode 100644 index 0000000..d9fe83a --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/inline/util_generic.h @@ -0,0 +1,267 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /** + * @file + * @brief UTIL module generic crossplatform implementation + */ + +#ifndef _NVPORT_UTIL_GENERIC_H_ +#define _NVPORT_UTIL_GENERIC_H_ + +PORT_UTIL_INLINE NvBool +portUtilCheckOverlap +( + const NvU8 *pData0, + NvLength len0, + const NvU8 *pData1, + NvLength len1 +) +{ + return (pData0 >= pData1 && pData0 < (pData1 + len1)) || + (pData1 >= pData0 && pData1 < (pData0 + len0)); +} + +PORT_UTIL_INLINE NvBool +portUtilCheckAlignment +( + const void *address, + NvU32 align +) +{ + if (!portUtilIsPowerOfTwo(align)) + return NV_FALSE; + + return ((NvUPtr)address & (align-1)) == 0; +} + +PORT_UTIL_INLINE NvBool +portUtilIsPowerOfTwo +( + NvU64 num +) +{ + return (num & (num-1)) == 0; +} + +/* + * This function is designed to be able to make unaligned access + * (but might be slower because of the byte by byte access) + */ +PORT_UTIL_INLINE void +portUtilWriteLittleEndian16 +( + void *pBuf, + NvU16 value +) +{ + *((NvU8*)pBuf + 1) = (NvU8)(value >> 8); + *((NvU8*)pBuf + 0) = (NvU8)(value); +} + +/* + * This function is designed to be able to make unaligned access + * (but might be slower because of the byte by byte access) + */ +PORT_UTIL_INLINE void +portUtilWriteLittleEndian32 +( + void *pBuf, + NvU32 value +) +{ + *((NvU8*)pBuf + 3) = (NvU8)(value >> 24); + *((NvU8*)pBuf + 2) = (NvU8)(value >> 16); + *((NvU8*)pBuf + 1) = (NvU8)(value >> 8); + *((NvU8*)pBuf + 0) = (NvU8)(value); +} + +/* + * This function is designed to be able to make unaligned access + * (but might be slower because of the byte by byte access) + */ +PORT_UTIL_INLINE void +portUtilWriteLittleEndian64 +( + void *pBuf, + NvU64 value +) +{ + *((NvU8*)pBuf + 7) = (NvU8)(value >> 56); + *((NvU8*)pBuf + 6) = (NvU8)(value >> 48); + *((NvU8*)pBuf + 5) = (NvU8)(value >> 40); + *((NvU8*)pBuf + 4) = (NvU8)(value >> 32); + *((NvU8*)pBuf + 3) = (NvU8)(value >> 24); + *((NvU8*)pBuf + 2) = (NvU8)(value >> 16); + *((NvU8*)pBuf + 1) = (NvU8)(value >> 8); + *((NvU8*)pBuf + 0) = (NvU8)(value); +} + +/* + * This function is designed to be able to make unaligned access + * (but might be slower because of the byte by byte access) + */ +PORT_UTIL_INLINE void +portUtilWriteBigEndian16 +( + void *pBuf, + NvU16 value +) +{ + *((NvU8*)pBuf + 0) = (NvU8)(value >> 8); + *((NvU8*)pBuf + 1) = (NvU8)(value); +} + +/* + * This function is designed to be able to make unaligned access + * (but might be slower because of the byte by byte access) + */ +PORT_UTIL_INLINE void +portUtilWriteBigEndian32 +( + void *pBuf, + NvU32 value +) +{ + *((NvU8*)pBuf + 0) = (NvU8)(value >> 24); + *((NvU8*)pBuf + 1) = (NvU8)(value >> 16); + *((NvU8*)pBuf + 2) = (NvU8)(value >> 8); + *((NvU8*)pBuf + 3) = (NvU8)(value); +} + +/* + * This function is designed to be able to make unaligned access + * (but might be slower because of the byte by byte access) + */ +PORT_UTIL_INLINE void +portUtilWriteBigEndian64 +( + void *pBuf, + NvU64 value +) +{ + *((NvU8*)pBuf + 0) = (NvU8)(value >> 56); + *((NvU8*)pBuf + 1) = (NvU8)(value >> 48); + *((NvU8*)pBuf + 2) = (NvU8)(value >> 40); + *((NvU8*)pBuf + 3) = (NvU8)(value >> 32); + *((NvU8*)pBuf + 4) = (NvU8)(value >> 24); + *((NvU8*)pBuf + 5) = (NvU8)(value >> 16); + *((NvU8*)pBuf + 6) = (NvU8)(value >> 8); + *((NvU8*)pBuf + 7) = (NvU8)(value); +} + +#if PORT_COMPILER_IS_GCC || PORT_COMPILER_IS_CLANG +#include "nvport/inline/util_gcc_clang.h" +#elif PORT_COMPILER_IS_MSVC +#include "nvport/inline/util_msvc.h" +#else +#error "Unsupported compiler" +#endif // switch + +#ifdef PORT_UTIL_CLZ_CTX_NOT_DEFINED +PORT_UTIL_INLINE NvU32 portUtilCountLeadingZeros64(NvU64 n) +{ + NvU32 y; + + if (n == 0) + return 64; + + for (y = 0; !(n & 0x8000000000000000LL); y++) + n <<= 1; + + return y; +} +PORT_UTIL_INLINE NvU32 portUtilCountLeadingZeros32(NvU32 n) +{ + NvU32 y; + + if (n == 0) + return 32; + + for (y = 0; !(n & 0x80000000); y++) + n <<= 1; + + return y; +} + +PORT_UTIL_INLINE NvU32 portUtilCountTrailingZeros64(NvU64 n) +{ + NvU32 bz, b5, b4, b3, b2, b1, b0; + NvU64 y; + + y = n & (~n + 1); + bz = y ? 0 : 1; + b5 = (y & 0x00000000FFFFFFFFLL) ? 0 : 32; + b4 = (y & 0x0000FFFF0000FFFFLL) ? 0 : 16; + b3 = (y & 0x00FF00FF00FF00FFLL) ? 0 : 8; + b2 = (y & 0x0F0F0F0F0F0F0F0FLL) ? 0 : 4; + b1 = (y & 0x3333333333333333LL) ? 0 : 2; + b0 = (y & 0x5555555555555555LL) ? 0 : 1; + + return (bz + b5 + b4 + b3 + b2 + b1 + b0); +} +PORT_UTIL_INLINE NvU32 portUtilCountTrailingZeros32(NvU32 n) +{ + NvU32 bz, b4, b3, b2, b1, b0; + NvU32 y; + + y = n & (~n + 1); + bz = y ? 0 : 1; + b4 = (y & 0x0000FFFF) ? 0 : 16; + b3 = (y & 0x00FF00FF) ? 0 : 8; + b2 = (y & 0x0F0F0F0F) ? 0 : 4; + b1 = (y & 0x33333333) ? 0 : 2; + b0 = (y & 0x55555555) ? 0 : 1; + + return (bz + b4 + b3 + b2 + b1 + b0); +} +#endif + +static NV_FORCEINLINE void +portUtilSpin(void) +{ + NvU32 idx; + for (idx = 0; idx < 100; idx++) + { + NVPORT_DUMMY_LOOP(); + } +} + +#if NVCPU_IS_FAMILY_X86 && !defined(NV_MODS) && PORT_IS_MODULE_SUPPORTED(atomic) +static NV_FORCEINLINE NvU64 +portUtilExReadTimestampCounterSerialized(void) +{ + NvU64 val; + + portAtomicMemoryFenceLoad(); + val = portUtilExReadTimestampCounter(); + portAtomicMemoryFenceLoad(); + + return val; +} +#define portUtilExReadTimestampCounterSerialized_SUPPORTED 1 +#else +#define portUtilExReadTimestampCounterSerialized_SUPPORTED 0 +#endif + +#endif // _NVPORT_UTIL_GENERIC_H_ diff --git a/src/nvidia/inc/libraries/nvport/inline/util_valist.h b/src/nvidia/inc/libraries/nvport/inline/util_valist.h new file mode 100644 index 0000000..4d293c3 --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/inline/util_valist.h @@ -0,0 +1,30 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief va_list declarations for all platforms + */ + +// We used to have custom implementations in here, but now we just take the standard ones +#include // define va_* diff --git a/src/nvidia/inc/libraries/nvport/memory.h b/src/nvidia/inc/libraries/nvport/memory.h new file mode 100644 index 0000000..828915d --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/memory.h @@ -0,0 +1,1068 @@ + /* + * SPDX-FileCopyrightText: Copyright (c) 2014-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Memory module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_MEMORY_H_ +#define _NVPORT_MEMORY_H_ + +/** + * Platform-specific inline implementations + */ +#if NVOS_IS_LIBOS +#include "nvport/inline/memory_libos.h" +#endif + +// Go straight at the memory or hardware. +#define PORT_MEM_RD08(p) (*(p)) +#define PORT_MEM_RD16(p) (*(p)) +#define PORT_MEM_RD32(p) (*(p)) +#define PORT_MEM_RD64(p) (*(p)) +#define PORT_MEM_WR08(p, v) (*(p) = (v)) +#define PORT_MEM_WR16(p, v) (*(p) = (v)) +#define PORT_MEM_WR32(p, v) (*(p) = (v)) +#define PORT_MEM_WR64(p, v) (*(p) = (v)) + +/** + * @defgroup NVPORT_MEMORY Memory + * @brief This module contains memory management related functionality. + * + * @{ + */ + +/** + * @brief Single allocation description - forward reference. + */ +struct PORT_MEM_TRACK_ALLOC_INFO; +typedef struct PORT_MEM_TRACK_ALLOC_INFO PORT_MEM_TRACK_ALLOC_INFO; + + +/** + * @name Core Functions + * @{ + */ + + +/** + * @brief Initializes global Memory tracking structures. + * + * This function is called by @ref portInitialize. It is available here in case + * it is needed to initialize the MEMORY module without initializing all the + * others. e.g. for unit tests. + */ +void portMemInitialize(void); +/** + * @brief Destroys global Memory tracking structures, and checks for leaks + * + * This function is called by @ref portShutdown. It is available here in case + * it is needed to initialize the MEMORY module without initializing all the + * others. e.g. for unit tests. + * + * @param bForceSilent - Will not print the report, even if + * @ref PORT_MEM_TRACK_PRINT_LEVEL isn't PORT_MEM_TRACK_PRINT_LEVEL_SILENT + */ +void portMemShutdown(NvBool bForceSilent); + + +/** + * @brief Allocates pageable virtual memory of given size. + * + * Will allocate at least lengthBytes bytes and return a pointer to the + * allocated virtual memory. The caller will be able to both read and write + * the returned memory via standard pointer accesses. + * + * The memory is not guaranteed to be initialized before being returned to the + * caller. + * + * An allocation request of size 0 will result in a return value of NULL. + * + * @par Checked builds only: + * Requests of size 0 will breakpoint/assert. + * + * @par Undefined: + * It is possible this function will consume more than lengthBytes of virtual + * address space. However behavior is undefined if the caller attempts to read + * or write addresses beyond lengthBytes. + * + * @return Pointer to requested memory, NULL if allocation fails. + * + * @note Calling this function is identical to calling + * @ref PORT_ALLOC ( @ref portMemAllocatorGetGlobalPaged() , lengthBytes) + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +NV_FORCERESULTCHECK void *portMemAllocPaged(NvLength lengthBytes); + +/** + * @brief Allocates non-paged (i.e. pinned) memory. + * + * This function is essentially the same to @ref portMemAllocPaged except that + * the virtual memory once returned will always be resident in CPU memory. + * + * @return Pointer to requested memory, NULL if allocation fails. + * + * @note Calling this function is identical to calling + * @ref PORT_ALLOC ( @ref portMemAllocatorGetGlobalNonPaged() , lengthBytes) + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +NV_FORCERESULTCHECK void *portMemAllocNonPaged(NvLength lengthBytes); + +/** + * @brief Allocates non-paged (i.e. pinned) memory on the stack or the heap + * + * USE ONLY FOR MEMORY THAT ALLOCATED AND FREED IN THE SAME FUNCTION! + * + * This function allocates memory on the stack for platforms with a large stack. + * Otherwise it is defined to @ref portMemAllocNonPaged and @ref portMemFree. + */ +#define portMemExAllocStack(lengthBytes) __builtin_alloca(lengthBytes) +#define portMemExAllocStack_SUPPORTED PORT_COMPILER_IS_GCC + +#if portMemExAllocStack_SUPPORTED && NVOS_IS_LIBOS +#define portMemAllocStackOrHeap(lengthBytes) portMemExAllocStack(lengthBytes) +#define portMemFreeStackOrHeap(pData) +#else +#define portMemAllocStackOrHeap(size) portMemAllocNonPaged(size) +#define portMemFreeStackOrHeap(pData) portMemFree(pData) +#endif + +/** + * @brief Frees memory allocated by @ref portMemAllocPaged or @ref portMemAllocNonPaged. + * + * Frees either paged or non-paged virtual memory. The pointer passed in must + * have been the exact value returned by the allocation routine. + * + * Calling with NULL has no effect. + * + * @par Checked builds only: + * Will fill the memory with a pattern to help detect use after free.
+ * Will assert/breakpoint if the memory fenceposts have been corrupted + * + * @par Undefined: + * Freeing the same address multiple times results in undefined behavior.
+ * Accessing memory in the region freed by this function results in undefined + * behavior. It may generate a page fault, or if the memory has been + * reallocated (or kept around to optimize subsequent allocation requests) then + * the access may unexpectedly work. + * + * @pre Windows: IRQL <= APC_LEVEL (DISPATCH_LEVEL if freeing NonPaged memory) + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +void portMemFree(void *pData); + +/** + * @brief Copies data from one address to another. + * + * Copies srcSize bytes from pSource to pDestination, returning pDestination. + * pDestination should be at least destSize bytes, pSource at least srcSize. + * destSize should be equal or greater to srcSize. + * + * If destSize is 0, it is guaranteed to not access either buffer. + * + * @par Undefined: + * Behavior is undefined if memory regions referred to by pSource and + * pDestination overlap. + * + * @par Checked builds only: + * Will assert/breakpoint if the regions overlap.
+ * Will assert/breakpoint if destSize < srcSize
+ * Will assert/breakpoint if either pointer is NULL + * + * @return pDestination on success, NULL if the operation failed. + * + */ +void *portMemCopy(void *pDestination, NvLength destSize, const void *pSource, NvLength srcSize); + +/** + * @brief Copies data from one address to another. + * + * Copies srcSize bytes from pSource to pDestination, returning pDestination. + * pDestination should be at least destSize bytes, pSource at least srcSize. + * destSize should be equal or greater to srcSize. + * + * This function will also ensure that alignment faults will not be generated + * when the device memory is accessed. + * + * If destSize is 0, it is guaranteed to not access either buffer. + * + * @par Undefined: + * Behavior is undefined if memory regions referred to by pSource and + * pDestination overlap. + * + * @par Checked builds only: + * Will assert/breakpoint if the regions overlap.
+ * Will assert/breakpoint if destSize < srcSize
+ * Will assert/breakpoint if either pointer is NULL + * + * @return pDestination on success, NULL if the operation failed. + * + */ +void *portMemCopyAligned(void *pDestination, NvLength destSize, const void *pSource, NvLength srcSize); + +/** + * @brief Moves data from one address to another. + * + * Copies memory from pSource to pDestination, returning pDestination. + * pDestination should be at least destSize bytes, pSource at least srcSize. + * srcSize should be equal or greater to destSize. + * + * If destSize is 0, it is guaranteed to not access either buffer. + * + * Unlike @ref portMemCopy this function allows the regions to overlap. + * + * @par Checked builds only: + * Will assert/breakpoint if destSize < srcSize
+ * Will assert/breakpoint if either pointer is NULL + * + * @return pDestination on success, NULL if the operation failed. + * + */ +void *portMemMove(void *pDestination, NvLength destSize, const void *pSource, NvLength srcSize); + +/** + * @brief Sets given memory to specified value. + * + * Writes lengthBytes bytes of data starting at pData with value. + * The buffer is assumed to have the size of at least lengthBytes. + * + * if lengthBytes is 0 it is guaranteed to not access pData. + * + * @return pData + */ +void *portMemSet(void *pData, NvU8 value, NvLength lengthBytes); + +/** + * @brief Sets given memory to specified pattern + * + * Fills lengthBytes of pData repeating the pPattern pattern. + * The pData buffer is assumed to have the size of at least lengthBytes. + * The pPattern buffer is assumed to have the size of at least patternBytes. + * + * If lengthBytes is 0 it is guaranteed to not access pData. + * @par Undefined: + * Behavior is undefined if patternBytes is zero.
+ * Behavior is undefined if pPattern and pData overlap. + * + * @return pData + */ +void *portMemSetPattern(void *pData, NvLength lengthBytes, const NvU8 *pPattern, NvLength patternBytes); + +/** + * @brief Compares two memory regions. + * + * This function does a byte by byte comparison of the 2 memory regions provided. + * + * It simultaneously scans pData0 and pData1 starting from byte 0 and going + * until lengthBytes bytes have been scanned or the bytes in pData0 and pData1 + * are not equal. + * + * The return value will be + * - 0 if all lengthBytes bytes are equal. + * - <0 if pData0 is less than pData1 for the first unequal byte. + * - >0 if pData0 is greater than pData1 for the first unequal byte. + * + * Both buffers are assumed to have the size of at least lengthBytes. + * + * @par Undefined: + * Behavior is undefined if memory regions referred to by pData0 and pData1 + * overlap.
+ * Behavior is undefined if lengthBytes is 0. + * + * @par Checked builds only: + * The function will return 0 and breakpoint/assert if there is overlap.
+ * The function will return 0 and breakpoint/assert if the length is 0. + */ +NvS32 portMemCmp(const void *pData0, const void *pData1, NvLength lengthBytes); + + +typedef struct PORT_MEM_ALLOCATOR PORT_MEM_ALLOCATOR; + +/** + * @brief Function signature for PORT_MEM_ALLOCATOR::alloc. + * + * Basic behavior is similar to @ref portMemAllocPaged. What type of memory + * is returned depends on the type of allocator that was created. + * + * Must be given the same instance of @ref PORT_MEM_ALLOCATOR as that which + * contains the calling function pointer. A different copy returned by the + * same function is not sufficient. Behavior is undefined if this is not done. + */ +typedef void *PortMemAllocatorAlloc(PORT_MEM_ALLOCATOR *pAlloc, NvLength length); + +/** + * @brief Function signature for PORT_MEM_ALLOCATOR::free. + * + * See @ref portMemFree for details. + * + * Must be given the same instance of @ref PORT_MEM_ALLOCATOR as that which + * contains the calling function pointer. A different copy returned by the + * same function is not sufficient. Behavior is undefined if this is not done. + * + * @par Checked builds only: + * Will assert if given a different pointer than the one the memory + * was allocated with. + */ +typedef void PortMemAllocatorFree(PORT_MEM_ALLOCATOR *pAlloc, void *pMemory); + +/** + * @brief Function signature for PORT_MEM_ALLOCATOR::release. + * + * This function is called by @ref portMemAllocatorRelease when the allocator is + * released. This is only needed when implementing custom allocators, to be able + * to clean up as necessary. + */ +typedef void PortMemAllocatorRelease(PORT_MEM_ALLOCATOR *pAlloc); + + +/** + * @brief Platform specific allocator implementation. + */ +typedef struct PORT_MEM_ALLOCATOR_IMPL PORT_MEM_ALLOCATOR_IMPL; + +/** + * @brief Opaque structure to hold all memory tracking information. + */ +typedef struct PORT_MEM_ALLOCATOR_TRACKING PORT_MEM_ALLOCATOR_TRACKING; + +/** + * @brief Initializes an allocator tracking structures. + * + * You only need to call this when creating a custom allocator. The functions + * declared in this file call this internally. + * + * @param pTracking - Pointer to an already allocated tracking structure. + */ +void portMemInitializeAllocatorTracking(PORT_MEM_ALLOCATOR *pAllocator, PORT_MEM_ALLOCATOR_TRACKING *pTracking); + +/** + * @brief A set of functions that can be used to manage a specific type of memory. + * + * The intent of the allocator paradigm is to allow for generic code to be + * given an instance of PORT_MEM_ALLOCATOR for use to create memory so it does + * not have to embed a policy decision in its implementation. It can also + * allow for the implementation of specialized allocators that can be leveraged + * through a generic interface. + * + * Don't call these functions directly, use @ref PORT_ALLOC and @ref PORT_FREE + * This is done to provide full tracking support for these calls. + */ +struct PORT_MEM_ALLOCATOR { + /** + * @brief see @ref PortMemAllocatorAlloc for documentation + */ + PortMemAllocatorAlloc *_portAlloc; + /** + * @brief see @ref PortMemAllocatorFree for documentation + */ + PortMemAllocatorFree *_portFree; + /** + * @brief see @ref PortMemAllocatorRelease for documentation + */ + PortMemAllocatorRelease *_portRelease; + /** + * @brief Pointer to tracking structure. + */ + PORT_MEM_ALLOCATOR_TRACKING *pTracking; + /** + * @brief Pointer to the platform specific implementation. + */ + PORT_MEM_ALLOCATOR_IMPL *pImpl; +}; + +/** + * @brief Macro for calling the alloc method of an allocator object. + * + * Please use this instead of calling the methods directly, to ensure proper + * memory tracking in all cases. + * + * @pre Windows: IRQL <= APC_LEVEL(DISPATCH_LEVEL if allocating NonPaged memory) + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +#define PORT_ALLOC(pAlloc, length) _portMemAllocatorAlloc(pAlloc, length) +/** + * @brief Macro for calling the free method of an allocator object + * + * Please use this instead of calling the methods directly, to ensure proper + * memory tracking in all cases. + * + * @pre Windows: IRQL <= APC_LEVEL (DISPATCH_LEVEL if freeing NonPaged memory) + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +#define PORT_FREE(pAlloc, pMem) _portMemAllocatorFree(pAlloc, pMem) + +/** + * @brief Creates an allocator for paged memory. + * + * Returns an allocator instance where @ref PORT_ALLOC will behave + * like @ref portMemAllocPaged. Note the memory holding the PORT_MEM_ALLOCATOR + * instance may also be paged. + * + * @return NULL if creation failed. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +NV_FORCERESULTCHECK PORT_MEM_ALLOCATOR *portMemAllocatorCreatePaged(void); + +/** + * @brief Creates an allocator for non-paged memory. + * + * Returns an allocator instance where @ref PORT_ALLOC will + * behave like @ref portMemAllocNonPaged. Note the memory holding the + * PORT_MEM_ALLOCATOR instance will also be non-paged. + * + * @return NULL if creation failed. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +NV_FORCERESULTCHECK PORT_MEM_ALLOCATOR *portMemAllocatorCreateNonPaged(void); + +/** + * @brief Creates an allocator over an existing block of memory. + * + * Adds allocator bookkeeping information to an existing memory block, so that + * it can be used with the standard allocator interface. Some of the space of + * the preallocated block will be consumed for bookkeeping, so not all of the + * memory will be allocatable. + * + * Use this to create an allocator object on an ISR stack, so memory allocations + * can be done at DIQRL. + * + * @par Implementation details: + * The allocator allocates in chunks of 16 bytes, and uses a 2bit-vector to keep + * track of free chunks. Thus, the bookkeeping structures for a block of size N + * will take about N/64+sizeof(PORT_MEM_ALLOCATOR) bytes. + * Use @ref PORT_MEM_PREALLOCATED_BLOCK if you want to specify useful(allocable) + * size instead of total size. + * + * The allocator is only valid while the memory it was created on is valid. + * @ref portMemAllocatorRelease must be called on the allocator before the + * memory lifecycle ends. + * + * @return NULL if creation failed. + * + * @pre Usable at any IRQL/interrupt context + * @note Will not put the thread to sleep. + * @note This allocator is not thread safe. + */ +NV_FORCERESULTCHECK PORT_MEM_ALLOCATOR *portMemAllocatorCreateOnExistingBlock(void *pPreallocatedBlock, NvLength blockSizeBytes); + +/** + * @brief Extends the given size to fit the required bookkeeping information + * + * To be used when preallocating blocks that will be used to create an allocator + * Consider these two preallocated memory blocks: + * ~~~{.c} + * NvU8 xxx[1024]; + * NvU8 yyy[PORT_MEM_PREALLOCATED_BLOCK(1024)]; + * ~~~ + * Block @c xxx has a size of 1024, but only ~950 of that can be allocated. + * Block @c yyy has a size of ~1100, and exactly 1024 bytes can be allocated. + */ +#define PORT_MEM_PREALLOCATED_BLOCK(size) \ + (size + PORT_MEM_PREALLOCATED_BLOCK_EXTRA_SIZE(size)) + +/** + * @brief releases an allocator instance. + * + * This must be called to release any resources associated with the allocator. + * + * @par Checked builds only: + * Will assert if pAllocator has unfreed allocations + * + * @par Undefined: + * pAllocator must be an instance of PORT_MEM_ALLOCATOR that was provided by one + * of the portMemAllocatorCreate* functions. + * + * These limitations don't apply to allocators created using @ref portMemAllocatorCreateOnExistingBlock and + * @ref portMemExAllocatorCreateLockedOnExistingBlock. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +void portMemAllocatorRelease(PORT_MEM_ALLOCATOR *pAllocator); + +/** + * @brief Returns the pointer to the global nonpaged allocator. + * + * This allocator is always initialized does not need to be released. + * + * Allocations performed using this allocator are identical to the ones done + * by @ref portMemAllocNonPaged + */ +PORT_MEM_ALLOCATOR *portMemAllocatorGetGlobalNonPaged(void); +/** + * @brief Returns the pointer to the global paged allocator. + * + * This allocator is always initialized does not need to be released. + * + * Allocations performed using this allocator are identical to the ones done + * by @ref portMemAllocPaged + */ +PORT_MEM_ALLOCATOR *portMemAllocatorGetGlobalPaged(void); +/** + * @brief Prints the memory details gathered by whatever tracking mechanism is + * enabled. If pTracking is NULL, aggregate tracking information from all + * allocators will be printed. + * + * @note Printing is done using portDbgPrintf, which prints regardless of + * build type and debug levels. + */ +void portMemPrintTrackingInfo(const PORT_MEM_ALLOCATOR_TRACKING *pTracking); +/** + * @brief Calls @ref portMemPrintTrackingInfo for all current allocator trackers. + */ +void portMemPrintAllTrackingInfo(void); + +// @} End core functions + + +/** + * @name Extended Functions + * @{ + */ + +/** + * @brief Returns true if it is safe to allocate paged memory. + */ +NvBool portMemExSafeForPagedAlloc(void); +#define portMemExSafeForPagedAlloc_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Returns true if it is safe to allocate non-paged memory. + */ +NvBool portMemExSafeForNonPagedAlloc(void); +#define portMemExSafeForNonPagedAlloc_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Public allocator tracking information + */ +typedef struct PORT_MEM_TRACK_ALLOCATOR_STATS +{ + /** @brief Total number of allocations */ + NvU32 numAllocations; + /** @brief Total allocated bytes, including all staging */ + NvLength allocatedSize; + /** @brief Useful size of allocations - What was actually requested */ + NvLength usefulSize; + /** @brief Extra size allocated for tracking/debugging purposes */ + NvLength metaSize; +} PORT_MEM_TRACK_ALLOCATOR_STATS; + +/** + * @brief Returns the statistics of currently active allocations for the given + * allocator. + * + * If pAllocator is NULL, it returns stats for all allocators, as well as the + * memory allocated with @ref portMemAllocPaged and @ref portMemAllocNonPaged + */ +NV_STATUS portMemExTrackingGetActiveStats(const PORT_MEM_ALLOCATOR *pAllocator, PORT_MEM_TRACK_ALLOCATOR_STATS *pStats); + +/** + * @brief Returns the statistics of currently active allocations made with the + * given gfid. + * + * If the corresponding pTracking is not found, it returns + * NV_ERR_OBJECT_NOT_FOUND + */ +NV_STATUS portMemExTrackingGetGfidActiveStats( + NvU32 gfid, + PORT_MEM_TRACK_ALLOCATOR_STATS *pStats +); + +/** + * @brief Returns the statistics of all allocations made with the given + * allocator since it was created. + * + * If pAllocator is NULL, it returns stats for all allocators, as well as the + * memory allocated with @ref portMemAllocPaged and @ref portMemAllocNonPaged + */ +NV_STATUS portMemExTrackingGetTotalStats(const PORT_MEM_ALLOCATOR *pAllocator, PORT_MEM_TRACK_ALLOCATOR_STATS *pStats); + +/** + * @brief Returns the statistics of all allocations made with the given + * gfid. + * + * If the corresponding pTracking is not found, it returns + * NV_ERR_OBJECT_NOT_FOUND + */ +NV_STATUS portMemExTrackingGetGfidTotalStats( + NvU32 gfid, + PORT_MEM_TRACK_ALLOCATOR_STATS *pStats +); + +/** + * @brief Returns the statistics of peak allocations made with the given + * allocator since it was created. + * + * Peak data reports the high-water mark based on the maximum size (the peak + * allocations doesn't report the largest number of allocations, it reports + * the number of allocations at the time the peak size was achieved). This is + * done so that the other peak stats, which are derived from peak size and + * peak allocations, are consistent with each other. + * + * If pAllocator is NULL, it returns stats for all allocators, as well as the + * memory allocated with @ref portMemAllocPaged and @ref portMemAllocNonPaged + */ +NV_STATUS portMemExTrackingGetPeakStats(const PORT_MEM_ALLOCATOR *pAllocator, PORT_MEM_TRACK_ALLOCATOR_STATS *pStats); + +/** + * @brief Returns the statistics of peak allocations made with the given + * gfid since it was created. + * + * Peak data reports the high-water mark based on the maximum size (the peak + * allocations doesn't report the largest number of allocations, it reports + * the number of allocations at the time the peak size was achieved). This is + * done so that the other peak stats, which are derived from peak size and + * peak allocations, are consistent with each other. + * + * If the corresponding pTracking is not found, it returns + * NV_ERR_OBJECT_NOT_FOUND + */ +NV_STATUS portMemExTrackingGetGfidPeakStats( + NvU32 gfid, + PORT_MEM_TRACK_ALLOCATOR_STATS *pStats +); + +/** + * @brief Cycles through the tracking infos for allocations by pAllocator + * If pAllocator is NULL, it will cycle through all allocations. + * + * @param [out] pInfo The info will be written to this buffer. + * @param [in, out] pIterator + * Should point to NULL the first time it is called. + * Every next call should pass the value returned by previous. + * To reset the loop, set the iterator to NULL. + * Upon writing the last range, the iterator will be set to NULL. + * The iterator is only valid until the next alloc/free from this allocator. + * There is no need to release the iterator in any way. + * + * @return NV_ERR_OBJECT_NOT_FOUND if no allocations exist. + */ +NV_STATUS portMemExTrackingGetNext(const PORT_MEM_ALLOCATOR *pAllocator, PORT_MEM_TRACK_ALLOC_INFO *pInfo, void **pIterator); + +/** + * @brief Gets the total size of the underlying heap, in bytes. + */ +NvLength portMemExTrackingGetHeapSize(void); + +/** + * @brief Gets the usable size in bytes (sans metadata/padding) of the given allocation. + */ +NvLength portMemExTrackingGetAllocUsableSize(void *pMem); + +/** + * @brief Copies from user memory to kernel memory. + * + * When accepting data as input from user space it is necessary to take + * additional precautions to access it safely and securely. This means copy + * the user data into a kernel buffer and then using that kernel buffer for all + * needed accesses. + * + * The function will fail if pUser is an invalid user space pointer or if the + * memory it refers to is less than length bytes long. A valid kernel pointer + * is interpreted as an invalid user pointer. + * @par Checked builds only: + * Will trigger a breakpoint if pUser is invalid userspace pointer + * + * The function will fail if pKernel is NULL. + * + * The function will fail if lengthBytes is 0. + * + * @return + * - NV_OK if successful + * - NV_ERR_INVALID_POINTER if pUser is invalid or pKernel is NULL + * - NV_ERR_INVALID_ARGUMENT if lengthBytes is 0 + */ +NV_STATUS portMemExCopyFromUser(const NvP64 pUser, void *pKernel, NvLength lengthBytes); +#define portMemExCopyFromUser_SUPPORTED PORT_IS_KERNEL_BUILD + + +/** + * @brief Copies from kernel memory to user memory. + * + * This is the reverse of @ref portMemExCopyFromUser. The copy in this case is + * from pKernel to pUser. + * + * See @ref portMemExCopyFromUser for more details. + * + */ +NV_STATUS portMemExCopyToUser(const void *pKernel, NvP64 pUser, NvLength lengthBytes); +#define portMemExCopyToUser_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Returns the size (in bytes) of a single memory page. + */ +NvLength portMemExGetPageSize(void); +#define portMemExGetPageSize_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Opaque container holding an allocation of physical system memory. + */ +typedef struct PORT_PHYSICAL_MEMDESC PORT_PHYSICAL_MEMDESC; + +/** + * @brief Creates a handle used to manage and manipulate a physical memory + * allocation. + * + * @param pAllocator the allocator to use the create the allocation's tracking + * structures. This allocator is *not* used to allocate physical memory. + * + * @return NULL if the allocation failed. + */ +PORT_PHYSICAL_MEMDESC *portMemExPhysicalDescCreate(PORT_MEM_ALLOCATOR *pAllocator); +#define portMemExPhysicalDescCreate_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Types of caching for physical memory mappings. + * + * In case a target architecture does not support a specific caching mode, + * the mapping call will fail. + * Specifying PORT_MEM_ANYCACHE lets the implementation pick a caching mode that + * is present on the target architecture. This way the mapping will not fail. + */ +typedef enum +{ + PORT_MEM_UNCACHED, + PORT_MEM_CACHED, + PORT_MEM_WRITECOMBINED, + PORT_MEM_ANYCACHE +} PortMemCacheMode; + +/** + * @brief Types of access protections for physical memory mappings. + */ +typedef enum +{ + PORT_MEM_PROT_NO_ACCESS = 0, + PORT_MEM_PROT_READ = 1, + PORT_MEM_PROT_WRITE = 2, + PORT_MEM_PROT_READ_WRITE = 3, + PORT_MEM_PROT_EXEC = 4, + PORT_MEM_PROT_READ_EXEC = 5, + PORT_MEM_PROT_WRITE_EXEC = 6, + PORT_MEM_PROT_READ_WRITE_EXEC = 7 +} PortMemProtectMode; + +/** + * @brief Populates a physical memory descriptor with backing pages. + * + * Populates a descriptor with physical pages. Pages will be zeroed. + */ +NV_STATUS portMemExPhysicalDescPopulate(PORT_PHYSICAL_MEMDESC *pPmd, NvLength sizeBytes, NvBool bContiguous); +#define portMemExPhysicalDescPopulate_SUPPORTED PORT_IS_KERNEL_BUILD + + +/** + * @brief allocates a PMD and populates it with memory + * + * This is a combination of @ref portMemExPhysicalDescCreate and @ref + * portMemExPhysicalDescPopulate. It should be the preferred method to allocate + * physical memory when it is possible to do it as a single step. Not only + * does the caller require less code and error handling but it allows the + * implementation the option to combine the tracking data into fewer + * allocations since it knows the size up front. + * + * @param [out] ppPmd - Pointer to the allocated PMD. + * @param pAllocator - Allocator to use when allocating the PMD + */ +NV_STATUS portMemExPhysicalDescCreateAndPopulate(PORT_MEM_ALLOCATOR *pAllocator, + PORT_PHYSICAL_MEMDESC **ppPmd, NvLength sizeBytes, NvBool bContiguous); +#define portMemExPhysicalDescCreateAndPopulate_SUPPORTED PORT_IS_KERNEL_BUILD + + +/** + * @brief Adds a contiguous memory range to the physical memory descriptor + * + * To describe a non-contiguous memory range, call this function once for every + * contiguous range. Range order will be determined by function call order, + * not the range addresses. + */ +NV_STATUS portMemExPhysicalDescribeRange(PORT_PHYSICAL_MEMDESC *pPmd, NvU64 start, NvLength length); +#define portMemExPhysicalDescribeRange_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Hands back the next contiguous memory range in the memory descriptor + * + * @param [out] pStart - Physical address of the range + * @param [out] pLength - Length of the range + * @param [in, out] pIterator + * Should point to NULL the first time it is called. + * Every next call should pass the value returned by previous. + * To reset the loop, set the iterator to NULL. + * Upon writing the last range, the iterator will be set to NULL. + * The iterator is valid until pPmd is destroyed. + * There is no need to release the iterator in any way. + * + * @return NV_ERR_OBJECT_NOT_FOUND if no ranges exist. + */ +NV_STATUS portMemExPhysicalGetNextRange(PORT_PHYSICAL_MEMDESC *pPmd, + NvU64 *pStart, NvLength *pLength, void **pIterator); +#define portMemExPhysicalGetNextRange_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Frees the memory descriptor and all tracking data. The descriptor must + * have been allocated with @ref portMemExPhysicalDescCreate or + * @ref portMemExPhysicalDescCreateAndPopulate + * + * Freed memory is not automatically unmapped. + * + * It is guaranteed that after memory has been freed, the original data can no + * longer be read in any way. + * @par Undefined: + * Accessing a mapping that has been freed results in undefined behavior. + */ +void portMemExPhysicalDescFree(PORT_PHYSICAL_MEMDESC *pPmd); +#define portMemExPhysicalDescFree_SUPPORTED PORT_IS_KERNEL_BUILD + + +/** + * @brief Frees physical memory allocated with @ref portMemExPhysicalDescPopulate + */ +void portMemExPhysicalFree(PORT_PHYSICAL_MEMDESC *pPmd); +#define portMemExPhysicalFree_SUPPORTED PORT_IS_KERNEL_BUILD + + +/** + * @brief Maps a region of a @ref PORT_PHYSICAL_MEMDESC + * + * @param [out] ppMapping - Virtual address where the physical memory is mapped + * @param offset - Offset of the physical memory where the region starts. + * The region must start on a page boundary. + * @param length - Length of the physical memory region. + * Needs to be a multiple of page size. + * @param protect - Mapping protections + * @param cacheMode - Mapping cache mode. + * Only PORT_MEM_ANYCACHE is guaranteed to be supported. + * + * @return NV_ERR_NOT_SUPPORTED if the specified cache mode is not supported by + * the current architecture. + */ +NV_STATUS portMemExPhysicalMap(PORT_PHYSICAL_MEMDESC *pPmd, + void **ppMapping, NvU64 offset, NvU64 length, + PortMemProtectMode protect, PortMemCacheMode cacheMode); +#define portMemExPhysicalMap_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Unmaps a region created with @ref portMemExPhysicalMap. + * + * @par Undefined: + * Accessing an unmapped memory is undefined, but it is guaranteed that the + * actual data can't be read/overwritten. + */ +NV_STATUS portMemExPhysicalUnmap(PORT_PHYSICAL_MEMDESC *pPmd, void *pMapping); +#define portMemExPhysicalUnmap_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Creates a thread safe allocator over an existing block of memory. + * + * @note See @ref portMemAllocatorCreateOnExistingBlock for other limitations. + * @note User should initialize @p pSpinlock and destroy it after it + * has finished using this allocator. + */ +PORT_MEM_ALLOCATOR *portMemExAllocatorCreateLockedOnExistingBlock(void *pPreallocatedBlock, NvLength blockSizeBytes, void *pSpinlock); +#define portMemExAllocatorCreateLockedOnExistingBlock_SUPPORTED \ + (PORT_IS_MODULE_SUPPORTED(sync)) + + +/** + * @brief Maps the given physical address range to nonpaged system space. + * + * @param[in] start Specifies the starting physical address of the I/O + * range to be mapped. + * @param[in] byteSize Specifies the number of bytes to be mapped. + * + * @return The base virtual address that maps the base physical address for + * the range + */ +void *portMemExMapIOSpace(NvU64 start, NvU64 byteSize); +#define portMemExMapIOSpace_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS) + +/** + * @brief Unmaps a specified range of physical addresses previously mapped by + * portMapIOSpace + * + * @param[in] addr Pointer to the base virtual address to which the + * physical pages were mapped. + * @param[in] byteSize Specifies the number of bytes that were mapped. + */ +void portMemExUnmapIOSpace(void *addr, NvU64 byteSize); +#define portMemExUnmapIOSpace_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS) + +// @} End extended functions + + +/** + * @note Memory tracking is controlled through the following compile-time flags. + * The PORT_MEM_TRACK_USE_* constants should be defined to 0 or 1. + * If nothing is defined, the default values are assigned here. + */ +#if !defined(PORT_MEM_TRACK_USE_COUNTER) +/** + * @brief Use allocations counter for all allocators + * + * Allocation counter is lightweight and can detect if a leak is present. + * Default is always on. + */ +#define PORT_MEM_TRACK_USE_COUNTER 1 +#endif +#if !defined(PORT_MEM_TRACK_USE_FENCEPOSTS) +/** + * @brief Use fenceposts around all allocated blocks + * + * Fenceposts can detect out of bounds writes and improper free calls + * Default is on for checked builds (where it will assert if an error occurs) + */ +#define PORT_MEM_TRACK_USE_FENCEPOSTS PORT_IS_CHECKED_BUILD +#endif +#if !defined(PORT_MEM_TRACK_USE_ALLOCLIST) +/** + * @brief Keep a list of all allocations. + * + * Allocation lists can give more details about detected leaks, and allow + * cycling through all allocations. + * Default is off. + * @todo Perhaps enable for checked builds? + */ +#define PORT_MEM_TRACK_USE_ALLOCLIST 0 +#endif +#if !defined(PORT_MEM_TRACK_USE_CALLERINFO) +/** + * @brief Track file:line information for all allocations + * + * On release builds the filename hash is passed instead of the string. This + * requires NvLog to be enabled. + * Default is off. + */ +#define PORT_MEM_TRACK_USE_CALLERINFO 0 +#endif +/** + * @brief Track instruction pointer instead of function/file/line information + * for all allocations + * + * Has no effect unless PORT_MEM_TRACK_USE_CALLERINFO is also set. + */ +#if !defined(PORT_MEM_TRACK_USE_CALLERINFO_IP) +#if NVOS_IS_LIBOS +#define PORT_MEM_TRACK_USE_CALLERINFO_IP 1 +#else +#define PORT_MEM_TRACK_USE_CALLERINFO_IP 0 +#endif +#endif +#if !defined(PORT_MEM_TRACK_USE_LOGGING) +/** + * @brief Log all alloc and free calls to a binary NvLog buffer + * Requires NvLog to be enabled. + * + * Default is off. + */ +#define PORT_MEM_TRACK_USE_LOGGING 0 +#endif +#if !defined(PORT_MEM_TRACK_USE_LIMIT) +/** + * @brief Track and enforce a heap memory usage limit on processes + * running in GSP-RM. + * + * Default is on in GSP-RM only. + */ +#ifndef GSP_PLUGIN_BUILD +#define PORT_MEM_TRACK_USE_LIMIT (NVOS_IS_LIBOS) +#else +#define PORT_MEM_TRACK_USE_LIMIT 0 +#endif +#endif // !defined(PORT_MEM_TRACK_USE_LIMIT) + +// Memory tracking header can redefine some functions declared here. +#include "nvport/inline/memory_tracking.h" + +/** @brief Nothing is printed unless @ref portMemPrintTrackingInfo is called */ +#define PORT_MEM_TRACK_PRINT_LEVEL_SILENT 0 +/** @brief Print when an error occurs and at shutdown */ +#define PORT_MEM_TRACK_PRINT_LEVEL_BASIC 1 +/** @brief Print at every alloc and free, and at any abnormal situation */ +#define PORT_MEM_TRACK_PRINT_LEVEL_VERBOSE 2 + +#if !defined(PORT_MEM_TRACK_PRINT_LEVEL) +#if PORT_IS_CHECKED_BUILD || PORT_MEM_TRACK_ALLOC_SIZE +#define PORT_MEM_TRACK_PRINT_LEVEL PORT_MEM_TRACK_PRINT_LEVEL_BASIC +#else +#define PORT_MEM_TRACK_PRINT_LEVEL PORT_MEM_TRACK_PRINT_LEVEL_SILENT +#endif // PORT_IS_CHECKED_BUILD +#endif // !defined(PORT_MEM_TRACK_PRINT_LEVEL) + +/** + * @brief Single allocation description. + * + * Must be defined after memory_tracking.h is included for PORT_MEM_CALLERINFO. + */ +struct PORT_MEM_TRACK_ALLOC_INFO +{ +#if PORT_MEM_TRACK_USE_CALLERINFO + /** + * @brief Function / file / line or instruction pointer. + */ + PORT_MEM_CALLERINFO callerInfo; +#endif + /** + * @brief pointer to the allocated memory block. + */ + void *pMemory; + /** + * @brief Size of the allocated memory block + */ + NvLength size; + /** + * @brief Pointer to the allocator that allocated the memory. + * If the memory was allocated globally, this will be NULL + */ + PORT_MEM_ALLOCATOR *pAllocator; + /** + * @brief Timestamp of the allocation. Will be 0 if it wasn't logged. + */ + NvU64 timestamp; +}; + +/** + * @} + */ + +#endif // _NVPORT_MEMORY_H_ diff --git a/src/nvidia/inc/libraries/nvport/nvport.h b/src/nvidia/inc/libraries/nvport/nvport.h new file mode 100644 index 0000000..f9cd603 --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/nvport.h @@ -0,0 +1,262 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief NvPort global definitions + */ + +#ifndef _NVPORT_H_ +#define _NVPORT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @note nvport.h may be included through nvrm.h by projects which haven't yet + * configured their makefiles appropriately. These files don't use any NvPort + * features, so it's safe to define away this entire file instead of failing + * the build. This will be removed once NvPort becomes ubiquitous enough. + */ +#if defined(PORT_IS_KERNEL_BUILD) + +#include +#include + +#if !defined(PORT_IS_KERNEL_BUILD) +#error "PORT_IS_KERNEL_BUILD must be defined to 0 or 1 by makefile" +#endif + +#if !defined(PORT_IS_CHECKED_BUILD) +#error "PORT_IS_CHECKED_BUILD must be defined to 0 or 1 by makefile" +#endif + +/** + * @defgroup NVPORT_CORE Core Functions + * @{ + */ + +/** + * @brief Helper macro to test if an extended function is supported + * + * Whether an extended function is supported or not is a compile time decision. + * Every function has an associated define that will look like this: + * + * ~~~{.c} + * #define portSomeFunction_SUPPORTED SOME_EXPRESSION + * ~~~ + * + * That will be evaluated by the preprocessor to either 0 or 1 (not supported + * or supported). If it evaluates to 0 then the symbol will not exist and the + * function cannot be referenced. + */ +#define PORT_IS_FUNC_SUPPORTED(function) function ## _SUPPORTED + +/** + * @brief Helper macro to test if a module is supported. The argument should be + * a lowercase module name, e.g. @c PORT_IS_MODULE_SUPPORTED(memory) + * + * Whether a module is included in the build is decided at compile time. + * Modules can either not support a given platform or be explicitly disabled + * through the Makefile. + * + * This define will be equal to 1 if the module is supported. + * If it evaluates to 0 or is not defined, then none of the module's symbols or + * defines will exist in the build. + */ +#define PORT_IS_MODULE_SUPPORTED(module) PORT_MODULE_ ## module + + +#if defined(__clang__) +#define PORT_COMPILER_IS_CLANG 1 +#define PORT_COMPILER_HAS_INTRINSIC_ATOMICS __has_builtin(__c11_atomic_fetch_add) +#define PORT_COMPILER_HAS_ATTRIBUTE_FORMAT __has_attribute(__format__) +#define PORT_COMPILER_HAS_COUNTER 1 +#else +#define PORT_COMPILER_IS_CLANG 0 +#endif + +#if defined(__GNUC__) && !defined(__clang__) +#define PORT_COMPILER_IS_GCC 1 +#define PORT_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) +#define PORT_COMPILER_HAS_INTRINSIC_ATOMICS (PORT_GCC_VERSION >= 40100) +#define PORT_COMPILER_HAS_ATTRIBUTE_FORMAT (PORT_GCC_VERSION >= 20300) +#define PORT_COMPILER_HAS_COUNTER (PORT_GCC_VERSION >= 40300) +#define PORT_COMPILER_HAS_INTRINSIC_CPUID 1 +#else +#define PORT_COMPILER_IS_GCC 0 +#endif + +#define PORT_COMPILER_IS_MSVC 0 + +#if !(PORT_COMPILER_IS_GCC || PORT_COMPILER_IS_CLANG || PORT_COMPILER_IS_MSVC) +#error "Unsupported compiler" +#endif + +// +// Need to define an IS_MODS macro that expands to 1 or 0 as defined(NV_MODS) +// is not entirely portable when used within a macro expansion. +// e.g. this would not always work: #define PORT_IS_MODS defined(NV_MODS) +// +#define PORT_IS_MODS 0 + +#ifndef PORT_INLINE +/** + * @brief Qualifier for all inline functions declared by NvPort. + * Modules will usually define PORT__INLINE which is either PORT_INLINE + * or nothing, depending whether the functions are being inlined in that module. + */ +#define PORT_INLINE static NV_INLINE +#endif + + +/** + * @def PORT_CHECKED_ONLY(x) + * @brief Evaluates the argument only if it is a checked build + */ +#if PORT_IS_CHECKED_BUILD +#define PORT_CHECKED_ONLY(x) x +#else +#define PORT_CHECKED_ONLY(x) +#endif + +/** + * @def PORT_KERNEL_ONLY(x) + * @brief Evaluates the argument only if it is a kernel build + */ +#if PORT_IS_KERNEL_BUILD +#define PORT_KERNEL_ONLY(x) x +#else +#define PORT_KERNEL_ONLY(x) +#endif + +#ifndef PORT_INCLUDE_NEW_STYLE_ALIASES +/** + * @brief Switch to include aliases for objects and methods that conform to the + * new RM style. + * + * This switch will define type and method aliases for object types in NvPort. + * The current NvPort style object names are PORT_MODULE_OBJECT, while the + * methods are portModuleObjectMethod(). + * The update proposal dictates these to be PortModuleObject and objectMethod. + * + * @todo Currently we just alias the new names to the old ones. Once the coding + * style has been finalized, we should add a deprecation note to the old names, + * and do a mass search and replace. + */ +#define PORT_INCLUDE_NEW_STYLE_ALIASES 1 +#endif // PORT_INCLUDE_NEW_STYLE_ALIASES + +/** + * @brief Suppresses unused variable warnings + * @param x - Variable or argument name + * + * No compilation errors are reported by any compiler when we use + * the following definition. + * + * #define PORT_UNREFERENCED_VARIABLE(x) ((void)sizeof(&(x))) + * + * But Coverity reports BAD_SIZEOF error with this definition. + * Adding a Coverity annotation "coverity[bad_sizeof]" near + * the definition does not work. The preprocessor ignores all + * the comments and the Coverity annotation is also ignored + * as a legal comment. As a result, this annotation never ends + * up in the source code where this macro is used. Hence, we use + * two definitions of this macro - one for Coverity and the other + * for the rest of the targets. + * + * Coverity does not report any warnings for unused variables. + * Hence, we do nothing while building for Coverity. + */ +#if !defined(__COVERITY__) +#define PORT_UNREFERENCED_VARIABLE(x) ((void)sizeof(&(x))) +#else +#define PORT_UNREFERENCED_VARIABLE(x) +#endif + +/// @} + +#if PORT_IS_MODULE_SUPPORTED(core) +#include "nvport/core.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(example) +#include "nvport/example.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(atomic) +#include "nvport/atomic.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(debug) +#include "nvport/debug.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(util) +#include "nvport/util.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(memory) +#include "nvport/memory.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(sync) +#include "nvport/sync.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(safe) +#include "nvport/safe.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(mmio) +#include "nvport/mmio.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(thread) +#include "nvport/thread.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(time) +#include "nvport/time.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(crypto) +#include "nvport/crypto.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(string) +#include "nvport/string.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(cpu) +#include "nvport/cpu.h" +#endif + +#endif // defined(PORT_IS_KERNEL_BUILD) + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _NVPORT_H_ diff --git a/src/nvidia/inc/libraries/nvport/safe.h b/src/nvidia/inc/libraries/nvport/safe.h new file mode 100644 index 0000000..2847c71 --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/safe.h @@ -0,0 +1,621 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Safe module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_SAFE_H_ +#define _NVPORT_SAFE_H_ + +#ifndef PORT_SAFE_INLINE +#define PORT_SAFE_INLINE PORT_INLINE +#endif + +/** + * @defgroup NVPORT_SAFE Safe integer operations module + * + * @brief This module contains functions for safe use of integer types, without + * concern for overflow errors + * + * All functions return true if the operation was performed successfully, + * and false if there was an overflow (or division by zero). + * The final value is handed back in both cases, but if the function returned + * false, the value handed back is undefined. + * + * @note These functions should be used: + * - When operating on data passing through the trust boundary (e.g. RM API) + * - When operating on signed data types (where overflows are undefined!) + * - Instead of checking overflows manually + * For internal functions, it is recommended not to clutter the code with this. + * Usually an internal overflow is a bug, and it should be fixed up the stack. + * + * + * @note A couple of artificial examples: + * - GOOD - Data received from user, not to be trusted. + * ~~~{.c} + * NvU32 NV_APIENTRY NvRmBzero(NvU8 *mem, NvLength len) + * { + * NvUPtr uptr = (NvUPtr) mem; + * if (mem == NULL || !portSafeAddUPtr(uptr, len, &uptr)) + * return NV_ERR_INVALID_PARAMETER; + * while ((NvUPtr) mem != uptr) + * *mem++ = 0; + * return NV_OK; + * } + * ~~~ + * - GOOD - Internal RM function, allowed to crash if given invalid params + * ~~~{.c} + * void bzero(NvU8 *mem, NvLength len) + * { + * while (len > 0) + * mem[--len] = 0; + * } + * ~~~ + * - BAD - If you are already checking for overflows manually, use these functions + * ~~~{.c} + * NV_STATUS osAllocMemTracked(void **ppAddress, NvU32 size) + * { + * NvU32 paddedSize = size; + * // allocate three extra dwords to hold the size and some debug tags + * paddedSize += 3 * sizeof(NvU32); + * // check for the overflow after increasing the size + * if (paddedSize < size) + * return NV_ERR_INSUFFICIENT_RESOURCES; + * size = paddedSize; + * ... + * } + * ~~~ + * - GOOD - Use provided functions + * ~~~{.c} + * NV_STATUS osAllocMemTracked(void **ppAddress, NvU32 size) + * { + * if (!portSafeAddU32(size, 3*sizeof(NvU32), &size)) + * return NV_ERR_INSUFFICIENT_RESOURCES; + * ... + * } + * ~~~ + * + * @{ + */ + +/** + * @name Core Functions + * @{ + */ + +/** + * @brief Add two signed 8bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddS8(NvS8 augend, NvS8 addend, NvS8 *pResult); +/** + * @brief Subtract two signed 8bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubS8(NvS8 minuend, NvS8 subtrahend, NvS8 *pResult); +/** + * @brief Multiply two signed 8bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulS8(NvS8 multiplicand, NvS8 multiplier, NvS8 *pResult); +/** + * @brief Divide two signed 8bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivS8(NvS8 dividend, NvS8 divisor, NvS8 *pResult); + + +/** + * @brief Add two signed 16bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddS16(NvS16 augend, NvS16 addend, NvS16 *pResult); +/** + * @brief Subtract two signed 16bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubS16(NvS16 minuend, NvS16 subtrahend, NvS16 *pResult); +/** + * @brief Multiply two signed 16bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulS16(NvS16 multiplicand, NvS16 multiplier, NvS16 *pResult); +/** + * @brief Divide two signed 16bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivS16(NvS16 dividend, NvS16 divisor, NvS16 *pResult); + + +/** + * @brief Add two signed 32bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddS32(NvS32 augend, NvS32 addend, NvS32 *pResult); +/** + * @brief Subtract two signed 32bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubS32(NvS32 minuend, NvS32 subtrahend, NvS32 *pResult); +/** + * @brief Multiply two signed 32bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulS32(NvS32 multiplicand, NvS32 multiplier, NvS32 *pResult); +/** + * @brief Divide two signed 32bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivS32(NvS32 dividend, NvS32 divisor, NvS32 *pResult); + + +/** + * @brief Add two signed 64bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddS64(NvS64 augend, NvS64 addend, NvS64 *pResult); +/** + * @brief Subtract two signed 64bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubS64(NvS64 minuend, NvS64 subtrahend, NvS64 *pResult); +/** + * @brief Multiply two signed 64bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulS64(NvS64 multiplicand, NvS64 multiplier, NvS64 *pResult); +/** + * @brief Divide two signed 64bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivS64(NvS64 dividend, NvS64 divisor, NvS64 *pResult); + + + + +/** + * @brief Add two unsigned 8bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddU8(NvU8 augend, NvU8 addend, NvU8 *pResult); +/** + * @brief Subtract two unsigned 8bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubU8(NvU8 minuend, NvU8 subtrahend, NvU8 *pResult); +/** + * @brief Multiply two unsigned 8bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulU8(NvU8 multiplicand, NvU8 multiplier, NvU8 *pResult); +/** + * @brief Divide two unsigned 8bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivU8(NvU8 dividend, NvU8 divisor, NvU8 *pResult); + + +/** + * @brief Add two unsigned 16bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddU16(NvU16 augend, NvU16 addend, NvU16 *pResult); +/** + * @brief Subtract two unsigned 16bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubU16(NvU16 minuend, NvU16 subtrahend, NvU16 *pResult); +/** + * @brief Multiply two unsigned 16bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulU16(NvU16 multiplicand, NvU16 multiplier, NvU16 *pResult); +/** + * @brief Divide two unsigned 16bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivU16(NvU16 dividend, NvU16 divisor, NvU16 *pResult); + + +/** + * @brief Add two unsigned 32bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddU32(NvU32 augend, NvU32 addend, NvU32 *pResult); +/** + * @brief Subtract two unsigned 32bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubU32(NvU32 minuend, NvU32 subtrahend, NvU32 *pResult); +/** + * @brief Multiply two unsigned 32bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulU32(NvU32 multiplicand, NvU32 multiplier, NvU32 *pResult); +/** + * @brief Divide two unsigned 32bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivU32(NvU32 dividend, NvU32 divisor, NvU32 *pResult); + + +/** + * @brief Add two unsigned 64bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddU64(NvU64 augend, NvU64 addend, NvU64 *pResult); +/** + * @brief Subtract two unsigned 64bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubU64(NvU64 minuend, NvU64 subtrahend, NvU64 *pResult); +/** + * @brief Multiply two unsigned 64bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulU64(NvU64 multiplicand, NvU64 multiplier, NvU64 *pResult); +/** + * @brief Divide two unsigned 64bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivU64(NvU64 dividend, NvU64 divisor, NvU64 *pResult); + + +/** + * @brief Add two pointer-sized integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddUPtr(NvUPtr augend, NvUPtr addend, NvUPtr *pResult); +/** + * @brief Subtract two pointer-sized integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubUPtr(NvUPtr minuend, NvUPtr subtrahend, NvUPtr *pResult); +/** + * @brief Multiply two pointer-sized integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulUPtr(NvUPtr multiplicand, NvUPtr multiplier, NvUPtr *pResult); +/** + * @brief Divide two pointer-sized integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivUPtr(NvUPtr dividend, NvUPtr divisor, NvUPtr *pResult); + + +/** + * @brief Add two length integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddLength(NvLength augend, NvLength addend, NvLength *pResult); +/** + * @brief Subtract two length integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubLength(NvLength minuend, NvLength subtrahend, NvLength *pResult); +/** + * @brief Multiply two length integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulLength(NvLength multiplicand, NvLength multiplier, NvLength *pResult); +/** + * @brief Divide two length integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivLength(NvLength dividend, NvLength divisor, NvLength *pResult); + + + + + + +/** + * @brief Convert a 8bit signed integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS8ToU8(NvS8 data, NvU8 *pResult); +/** + * @brief Convert a 8bit signed integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS8ToU16(NvS8 data, NvU16 *pResult); +/** + * @brief Convert a 8bit signed integer to a 32bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS8ToU32(NvS8 data, NvU32 *pResult); +/** + * @brief Convert a 8bit signed integer to a 64bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS8ToU64(NvS8 data, NvU64 *pResult); +/** + * @brief Convert a 8bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS8ToUPtr(NvS8 data, NvUPtr *pResult); +/** + * @brief Convert a 8bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS8ToLength(NvS8 data, NvLength *pResult); + + +/** + * @brief Convert a 16bit signed integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS16ToS8(NvS16 data, NvS8 *pResult); +/** + * @brief Convert a 16bit signed integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS16ToU8(NvS16 data, NvU8 *pResult); +/** + * @brief Convert a 16bit signed integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS16ToU16(NvS16 data, NvU16 *pResult); +/** + * @brief Convert a 16bit signed integer to a 32bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS16ToU32(NvS16 data, NvU32 *pResult); +/** + * @brief Convert a 16bit signed integer to a 64bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS16ToU64(NvS16 data, NvU64 *pResult); +/** + * @brief Convert a 16bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS16ToUPtr(NvS16 data, NvUPtr *pResult); +/** + * @brief Convert a 16bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS16ToLength(NvS16 data, NvLength *pResult); + +/** + * @brief Convert a 32bit signed integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToS8(NvS32 data, NvS8 *pResult); +/** + * @brief Convert a 32bit signed integer to a 16bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToS16(NvS32 data, NvS16 *pResult); +/** + * @brief Convert a 32bit signed integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToU8(NvS32 data, NvU8 *pResult); +/** + * @brief Convert a 32bit signed integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToU16(NvS32 data, NvU16 *pResult); +/** + * @brief Convert a 32bit signed integer to a 32bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToU32(NvS32 data, NvU32 *pResult); +/** + * @brief Convert a 32bit signed integer to a 64bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToU64(NvS32 data, NvU64 *pResult); +/** + * @brief Convert a 32bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToUPtr(NvS32 data, NvUPtr *pResult); +/** + * @brief Convert a 32bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToLength(NvS32 data, NvLength *pResult); + + +/** + * @brief Convert a 64bit signed integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToS8(NvS64 data, NvS8 *pResult); +/** + * @brief Convert a 64bit signed integer to a 16bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToS16(NvS64 data, NvS16 *pResult); +/** + * @brief Convert a 64bit signed integer to a 32bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToS32(NvS64 data, NvS32 *pResult); +/** + * @brief Convert a 64bit signed integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToU8(NvS64 data, NvU8 *pResult); +/** + * @brief Convert a 64bit signed integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToU16(NvS64 data, NvU16 *pResult); +/** + * @brief Convert a 64bit signed integer to a 32bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToU32(NvS64 data, NvU32 *pResult); +/** + * @brief Convert a 64bit signed integer to a 64bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToU64(NvS64 data, NvU64 *pResult); +/** + * @brief Convert a 64bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToUPtr(NvS64 data, NvUPtr *pResult); +/** + * @brief Convert a 64bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToLength(NvS64 data, NvLength *pResult); + + + +/** + * @brief Convert a 8bit unsigned integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU8ToS8(NvU8 data, NvS8 *pResult); + +/** + * @brief Convert a 16bit unsigned integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU16ToS8(NvU16 data, NvS8 *pResult); +/** + * @brief Convert a 16bit unsigned integer to a 16bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU16ToS16(NvU16 data, NvS16 *pResult); +/** + * @brief Convert a 16bit unsigned integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU16ToU8(NvU16 data, NvU8 *pResult); + + +/** + * @brief Convert a 32bit unsigned integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU32ToS8(NvU32 data, NvS8 *pResult); +/** + * @brief Convert a 32bit unsigned integer to a 16bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU32ToS16(NvU32 data, NvS16 *pResult); +/** + * @brief Convert a 32bit unsigned integer to a 32bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU32ToS32(NvU32 data, NvS32 *pResult); +/** + * @brief Convert a 32bit unsigned integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU32ToU8(NvU32 data, NvU8 *pResult); +/** + * @brief Convert a 32bit unsigned integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU32ToU16(NvU32 data, NvU16 *pResult); + + +/** + * @brief Convert a 64bit unsigned integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToS8(NvU64 data, NvS8 *pResult); +/** + * @brief Convert a 64bit unsigned integer to a 16bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToS16(NvU64 data, NvS16 *pResult); +/** + * @brief Convert a 64bit unsigned integer to a 32bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToS32(NvU64 data, NvS32 *pResult); +/** + * @brief Convert a 64bit unsigned integer to a 64bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToS64(NvU64 data, NvS64 *pResult); +/** + * @brief Convert a 64bit unsigned integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToU8(NvU64 data, NvU8 *pResult); +/** + * @brief Convert a 64bit unsigned integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToU16(NvU64 data, NvU16 *pResult); +/** + * @brief Convert a 64bit unsigned integer to a 32bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToU32(NvU64 data, NvU32 *pResult); +/** + * @brief Convert a 64bit unsigned integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToUPtr(NvU64 data, NvUPtr *pResult); +/** + * @brief Convert a 64bit unsigned integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToLength(NvU64 data, NvLength *pResult); + + +/** + * @brief Convert a pointer-sized unsigned integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToS8(NvUPtr data, NvS8 *pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a 16bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToS16(NvUPtr data, NvS16 *pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a 32bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToS32(NvUPtr data, NvS32 *pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a 64bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToS64(NvUPtr data, NvS64*pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToU8(NvUPtr data, NvU8 *pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToU16(NvUPtr data, NvU16 *pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a 32bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToU32(NvUPtr data, NvU32 *pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a 64bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToU64(NvUPtr data, NvU64 *pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToLength(NvUPtr data, NvLength *pResult); + + +/** + * @brief Convert a length-sized unsigned integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToS8(NvLength data, NvS8 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a 16bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToS16(NvLength data, NvS16 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a 32bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToS32(NvLength data, NvS32 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a 64bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToS64(NvLength data, NvS64 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToU8(NvLength data, NvU8 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToU16(NvLength data, NvU16 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a 32bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToU32(NvLength data, NvU32 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a 64bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToU64(NvLength data, NvU64 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToUPtr(NvLength data, NvUPtr *pResult); + + + +/// @} End core functions + +/** + * @name Extended Functions + * @{ + */ + +// Place extended functions here + +/// @} End extended functions + + +#if NVOS_IS_WINDOWS +#include "nvport/inline/safe_win.h" +#else +#include "nvport/inline/safe_generic.h" +#endif + +#endif // _NVPORT_SAFE_H_ +/// @} diff --git a/src/nvidia/inc/libraries/nvport/string.h b/src/nvidia/inc/libraries/nvport/string.h new file mode 100644 index 0000000..ea0409b --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/string.h @@ -0,0 +1,179 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /** + * @file + * @brief String module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_STRING_H_ +#define _NVPORT_STRING_H_ + +/** + * @defgroup NVPORT_STRING String module + * + * @brief This module contains string functionality used by other modules. + * + * @{ + */ + +/** + * @name Core Functions + * @{ + */ + +/** + * @brief Compare two strings, character by character. + * + * Will compare the first 'length' chars of each string, or until + * the nul-terminator is reached in either string, whichever comes first. + * + * @returns: + * - 0 if all bytes are equal + * - <0 if str1 is less than str2 for the first unequal byte. + * - >0 if str1 is greater than str2 for the first unequal byte. + * @par Undefined: + * Behavior is undefined if str1, str2 is NULL.
+ */ +NvS32 portStringCompare(const char *str1, const char *str2, NvLength length); +/** + * @brief Copy a string. + * + * Will copy at most destSize bytes, stopping early if a null-terminator is found + * or if srcSize bytes are read from the source. + * + * Null character is always written at the end of the string. + * + * @param dest destination buffer, of at least destSize bytes (including null terminator). + * @param src source buffer, of at least srcSize bytes (including null terminator). + * + * @return size bytes copied successfully including null terminator, min(destSize, srcSize) + * + * @par Undefined: + * Number of allocated bytes in destination buffer are smaller than destSize.
+ * Behavior is undefined if destination and source overlaps.
+ */ +NvLength portStringCopy(char *dest, NvLength destSize, const char *src, NvLength srcSize); +/** + * @brief Concatenate two strings + * + * Will copy cat string after the end of str. Will copy only until str buffer is + * filled. str is assumed to point to a buffer of at least strSize bytes. + * + * Null character is always written at the end of the string. + * + * @return str if concatenation is succeeded. + * + * @par Undefined: + * Number of allocated bytes in destination buffer are smaller than destSize.
+ * Behavior is undefined if destination and source overlaps.
+ */ +char *portStringCat(char *str, NvLength strSize, const char *cat, NvLength catSize); + + +/** + * @brief Returns the index of the first NULL byte in the given string + * + */ + NvLength portStringLength(const char *str); + + + /** + * @brief Returns the index of the first NULL byte in the given string, it searches maxLength + * chars. If NULL byte is not found it returns maxLength. + * + */ + NvLength portStringLengthSafe(const char *str, NvLength maxLength); + + +/** + * @brief Converts a string from ASCII (8-bit) to UTF16 (16 bit) + * + * Can perform the conversion in place if dest == src. + * + * @returns The number of characters in destination buffer, without the null + * terminator (i.e. strlen(dest)) + */ +NvLength portStringConvertAsciiToUtf16(NvU16 *dest, NvLength destSize, const char *src, NvLength srcSize); + +/** + * @brief Writes the hexadecimal string representation of the buffer + * + * @returns The number of characters in destination buffer, without the null + * terminator (i.e. strlen(str)) + */ +NvLength portStringBufferToHex(char *str, NvLength strSize, const NvU8 *buf, NvLength bufSize); + +/** + * @brief Convert a binary buffer into readable group of hex digits + * + * @param groupCount - Number of groups + * @param groups - How to structure the groups (in number of hex chars) + * @param separator - Character to separate the groups + * + * For the traditional display of UUIDs, there would be five groups, {8,4,4,4,12} + * with the separator being '-'. + * + * @note odd numbers for group sizes are not supported, they will be rounded down + * + * @returns The number of characters in destination buffer, without the null + * terminator (i.e. strlen(str)) + */ +NvLength portStringBufferToHexGroups(char *str, NvLength strSize, const NvU8 *buf, NvLength bufSize, NvLength groupCount, const NvU32 *groups, const char *separator); + +/** + * @brief Breaks the string into series of tokens using the delimiter + * + * @returns The number of characters in destination buffer + */ +char *portStringTok(char *str, const char *delim, char **saveptr); + +/** + * @brief Returns pointer to the first occurrence of substr in the str + * + */ +char *portStringStrStr(char *str, char *substr); + +/** + * @brief Returns pointer to the first occurrence of character in the str + * + */ +const char *portStringStrChar(const char *str, int c); + +/// @} End core functions + +/** + * @name Extended Functions + * @{ + */ + +// Place extended functions here + +/// @} End extended functions + +#endif // _NVPORT_STRING_H_ +/// @} diff --git a/src/nvidia/inc/libraries/nvport/sync.h b/src/nvidia/inc/libraries/nvport/sync.h new file mode 100644 index 0000000..29e472d --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/sync.h @@ -0,0 +1,829 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Sync module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_SYNC_H_ +#define _NVPORT_SYNC_H_ +/** + * @defgroup NVPORT_SYNC Synchronization + * @brief This module includes synchronization primitives. + * + * @note The module provides two types of constructors: + * - portSyncXXXInitialize initializes the structure in the caller provided + * memory. + * - portSyncXXXCreate takes a @ref PORT_MEM_ALLOCATOR object that is used to + * allocate the memory. This memory is freed when the object is destroyed. + * If running in kernel mode, the provided memory (or allocator) must be + * non-paged. The functions do not check this, and behavior is undefined if + * the object is allocated in paged memory. + * + * Typical usage of synchronization objects is: + * ~~~{.c} + * PORT_XXX *pXxx = portSyncXxxCreate(pAllocator); + * if (!pXxx) + * return NV_ERR_INSUFFICIENT_RESOURCES; + * + * portSyncXxxAcquire(pXxx); + * doCriticalSection(); + * portSyncXxxRelease(pXxx); + * portSyncXxxDestroy(pXxx); + * ~~~ + * + * @par Checked builds only: + * The functions will assert the needed IRQL/interrupt requirements. These are + * specified for every function in a "Precondition" block. + * + * @note The IRQL/interrupt context requirements listed in "Precondition" blocks + * are only valid for Kernel Mode builds of NvPort. Usermode builds have no such + * restrictions. + * @{ + */ + +#if !PORT_IS_MODULE_SUPPORTED(memory) +#error "NvPort SYNC module requires MEMORY module to be present." +#endif + +#if LOCK_VAL_ENABLED +#define PORT_SYNC_RENAME_SUFFIX _REAL +#include "inline/sync_rename.h" +#endif + +/** + * Platform-specific inline implementations + */ +#if NVOS_IS_LIBOS +#include "nvport/inline/sync_libos.h" +#endif + +/** + * @name Core Functions + * @{ + */ + +/** + * @brief Initializes global sYNC tracking structures + * + * This function is called by @ref portInitialize. It is available here in case + * it is needed to initialize the SYNC module without initializing all the + * others. e.g. for unit tests. + */ +void portSyncInitialize(void); + +/** + * @brief Destroys global sYNC tracking structures + * + * This function is called by @ref portShutdown. It is available here in case + * it is needed to initialize the SYNC module without initializing all the + * others. e.g. for unit tests. + */ +void portSyncShutdown(void); + +/** + * @brief A spinlock data type. + * + * For documentation on what a spinlock is and how it behaves see + * https://en.wikipedia.org/wiki/Spinlock + * + * - A valid spinlock is any which is non-NULL + * - Spinlocks are not recursive. + * - Spinlocks will not put the thread to sleep. + * - No pageable data or code can be accessed while holding a spinlock (@ref + * portMemAllocPaged). + * - Spinlocks can be used in ISRs. + * + * @par Undefined: + * The behavior is undefined if the spinlock is acquired by one thread and + * released by another. + */ +typedef struct PORT_SPINLOCK PORT_SPINLOCK; +/** + * @brief Size (in bytes) of the @ref PORT_SPINLOCK structure + */ +extern NvLength portSyncSpinlockSize; + +/** + * @brief Initializes a spinlock using caller provided memory. + * + * Spinlocks are initialized in the released state. After a spinlock is + * initialized it can only be freed or acquired. + * + * On some platforms the underlying platform code may allocate memory. + * This memory will be freed upon calling @ref portSyncSpinlockDestroy. + * + * @par Undefined: + * Initializing a spinlock multiple times is undefined.
+ * Using a spinlock before it is initialized results in undefined behavior. + * + * @return + * - NV_OK if successful + * - NV_ERR_INVALID_POINTER if pSpinlock is NULL + * - Can return other NV_STATUS values from the OS interface layer. + * + * @pre Windows: Any IRQL + * @pre Unix: Non-interrupt context + */ +NV_STATUS portSyncSpinlockInitialize(PORT_SPINLOCK *pSpinlock); + +/** + * @brief Creates a new spinlock using the provided allocator. The newly created + * spinlock is initialized, as if @ref portSyncSpinlockInitialize was called. + * + * @par Checked builds only: + * Will assert if pAllocator == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @return NULL on failed allocation / initialization. + * + * @pre Windows: Any IRQL + * @pre Unix: Non-interrupt context + * @pre Calls pAllocator->alloc, which may have additional restrictions. + */ +PORT_SPINLOCK *portSyncSpinlockCreate(PORT_MEM_ALLOCATOR *pAllocator); + +/** + * @brief Destroys a spinlock created with @ref portSyncSpinlockInitialize or + * @ref portSyncSpinlockCreate + * + * This frees any internally allocated resources that may be associated with + * the spinlock. If the spinlock was created using @ref portSyncSpinlockCreate, + * the memory will also be freed. + * + * @par Checked builds only: + * Will assert if pSpinlock == NULL
+ * Will assert if the lock is being held
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Behavior is undefined if called on an uninitialized spinlock.
+ * Behavior is undefined if called on a currently acquired spinlock.
+ * Behavior is undefined if any operation is performed on a spinlock that has + * been destroyed. + * + * @pre Windows: Any IRQL + * @pre Unix: Non-interrupt context + * @pre Calls pAllocator->free, which may have additional restrictions. + */ +void portSyncSpinlockDestroy(PORT_SPINLOCK *pSpinlock); + +/** + * @brief Acquires a spinlock + * + * Blocks until the spinlock is acquired. + * + * Recursive acquires are not allowed and will result in a deadlock. + * + * @par Checked builds only: + * Will assert if pSpinlock == NULL
+ * Will assert if the lock is held by the current thread + * + * @pre Windows: Any IRQL + * @pre Unix: Interrupt context is OK. + * @note Will not put the thread to sleep. + * @post Will raise the IRQL / mask interrupts + */ +void portSyncSpinlockAcquire(PORT_SPINLOCK *pSpinlock); + +/** + * @brief Releases a spinlock acquired with @ref portSyncSpinlockAcquire. + * + * @par Checked builds only: + * Will assert if pSpinlock == NULL
+ * Will assert if the lock is not held by the current thread + * + * @par Undefined: + * Behavior is undefined if the spinlock has not previously been acquired. + * + * @pre Windows: Any IRQL + * @pre Unix: Interrupt context is OK. + * @post Will restore the IRQL / interrupts + */ +void portSyncSpinlockRelease(PORT_SPINLOCK *pSpinlock); + +/** + * @brief A mutex data type. + * + * A PORT_MUTEX is a classic mutex that follows the following rules. + * - Only a single thread can hold the mutex. + * - The thread that acquires the mutex must be the one to release it. + * - Failure to acquire the mutex may result in the thread blocking and not + * resuming until the mutex is available. + * - Failure of a thread to release a mutex before it exits can result in a + * deadlock if any other threads attempts to acquire it. + * - Mutexes are not recursive. + * - Mutexes may put the thread to sleep. + * + * Mutexes can be used on IRQL <= DISPATCH_LEVEL on Windows, and in + * non-interrupt context on Unix. + */ +typedef struct PORT_MUTEX PORT_MUTEX; + +/** + * @brief Size (in bytes) of the @ref PORT_MUTEX structure + */ +extern NvLength portSyncMutexSize; + +/** + * @brief Creates a new mutex using the provided allocator. The newly created + * mutex is initialized, as if @ref portSyncMutexInitialize was called. + * + * @par Checked builds only: + * Will assert if pAllocator == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @return NULL on failed allocation / initialization. + * + * @pre Windows: Any IRQL + * @pre Unix: Non-interrupt context + * @pre Calls pAllocator->alloc, which may have additional restrictions. + */ +PORT_MUTEX *portSyncMutexCreate(PORT_MEM_ALLOCATOR *pAllocator); +/** + * @brief Initializes a mutex using caller provided memory. + * + * Mutexes are initialized in the released state. After a mutex is + * initialized it can only be freed or acquired. + * + * On some platforms the underlying platform code may allocate memory. + * This memory will be freed upon calling @ref portSyncMutexDestroy. + * + * @par Checked builds only: + * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Initializing a mutex multiple times is undefined.
+ * Using a mutex before it is initialized results in undefined behavior. + * + * @return + * - NV_OK if successful + * - NV_ERR_INVALID_POINTER if pMutex is NULL + * - Can return other NV_STATUS values from the OS interface layer. + * + * @pre Windows: Any IRQL + * @pre Unix: Non-interrupt context + */ +NV_STATUS portSyncMutexInitialize(PORT_MUTEX *pMutex); +/** + * @brief Destroys a mutex created with @ref portSyncMutexInitialize or + * @ref portSyncMutexCreate + * + * This frees any internally allocated resources that may be associated with + * the mutex. If the mutex was created using @ref portSyncMutexCreate, + * the memory will also be freed. + * + * @par Checked builds only: + * Will assert if pMutex == NULL
+ * Will assert if the lock is being held
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Behavior is undefined if called on an uninitialized mutex.
+ * Behavior is undefined if the mutex is currently acquired and it is + * destroyed.
+ * Behavior is undefined if any operation is performed on a mutex that has + * been destroyed. + * + * @pre Windows: Any IRQL + * @pre Unix: Non-interrupt context + * @pre Calls pAllocator->free, which may have additional restrictions. + */ +void portSyncMutexDestroy(PORT_MUTEX *pMutex); + +/** + * @brief Acquires a mutex. + * + * If the mutex is already held a call will block and the thread may be put to + * sleep until it is released. + * + * @par Checked builds only: + * Will assert if pMutex == NULL
+ * Will assert if the lock is held by the current thread
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note May put the thread to sleep. + */ +void portSyncMutexAcquire(PORT_MUTEX *pMutex); + +/** + * @brief Attempts to acquire a mutex without blocking. + * + * A call to this function will immediately return NV_TRUE with the mutex + * acquired by the calling thread if the mutex is not held by another thread. + * It will immediately return NV_FALSE if the mutex is held by another thread. + * + * If the mutex is held by the calling thread then this call will always fail. + * + * @par Checked builds only: + * Will assert if pMutex == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +NvBool NV_FORCERESULTCHECK portSyncMutexAcquireConditional(PORT_MUTEX *pMutex); + +/** + * @brief Releases a mutex held by the current thread. + * + * A call to this function releases control of the mutex. Immediately on + * return of this function another thread will be allowed to acquire the mutex. + * + * @par Checked builds only: + * Will assert if pMutex == NULL
+ * Will assert if the lock is not held by the current thread
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Attempting to release a mutex not held by the current thread will result in + * undefined behavior + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +void portSyncMutexRelease(PORT_MUTEX *pMutex); + +PORT_INLINE void portSyncMutexReleaseOutOfOrder(PORT_MUTEX *pMutex) +{ + portSyncMutexRelease(pMutex); +} + +/** + * @brief Represents a semaphore data type. + * + * This behaves as you would expect a classic semaphore to. It follows the + * following rules: + * - A semaphore is initialized with a starting value + * - Acquiring the semaphore decrements the count. If the count is 0 it will + * block until the count is non-zero. + * - Releasing the semaphore increments the count. + * - A semaphore can be acquired or released by any thread and a + * acquire/release pair are not required to be from the same thread. + * - PORT_SEMAPHORE is a 32 bit semaphore. + * - Semaphores may put the thread to sleep. + * + * Semaphores have varying IRQL restrictions on Windows, which is documented for + * every function separately. + * They can only be used in non-interrupt context on Unix. + */ +typedef struct PORT_SEMAPHORE PORT_SEMAPHORE; +/** + * @brief Size (in bytes) of the @ref PORT_SEMAPHORE structure + */ +extern NvLength portSyncSemaphoreSize; + +/** + * @brief Initializes a semaphore using caller provided memory. + * + * Semaphores are initialized with startValue. + * + * On some platforms the underlying platform code may allocate memory. + * This memory will be freed upon calling @ref portSyncSemaphoreDestroy. + * + * @par Checked builds only: + * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Initializing a semaphore multiple times is undefined.
+ * Using a semaphore before it is initialized results in undefined behavior. + * + * @return + * - NV_OK if successful + * - NV_ERR_INVALID_POINTER if pSemaphore is NULL + * - Can return other NV_STATUS values from the OS interface layer. + * + * @pre Windows: IRQL == PASSIVE_LEVEL + * @pre Unix: Non-interrupt context + */ +NV_STATUS portSyncSemaphoreInitialize(PORT_SEMAPHORE *pSemaphore, NvU32 startValue); +/** + * @brief Creates a new semaphore using the provided allocator. The newly + * created semaphore is initialized, as if @ref portSyncSemaphoreInitialize + * was called. + * + * @par Checked builds only: + * Will assert if pAllocator == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @return NULL on failed allocation / initialization. + * + * @pre Windows: IRQL == PASSIVE_LEVEL + * @pre Unix: Non-interrupt context + */ +PORT_SEMAPHORE *portSyncSemaphoreCreate(PORT_MEM_ALLOCATOR *pAllocator, NvU32 startValue); +/** + * @brief Destroys a semaphore created with @ref portSyncSemaphoreInitialize or + * @ref portSyncSemaphoreCreate + * + * This frees any internally allocated resources that may be associated with + * the semaphore. If the semaphore was created using + * @ref portSyncSemaphoreCreate, the memory will also be freed. + * + * @par Checked builds only: + * Will assert if pSemaphore == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Behavior is undefined if called on an uninitialized semaphore.
+ * Behavior is undefined if the semaphore is currently acquired and it is + * destroyed.
+ * Behavior is undefined if any operation is performed on a semaphore that has + * been destroyed. + * + * @pre Windows: Any IRQL + * @pre Unix: Non-interrupt context + * @pre Calls pAllocator->free, which may have additional restrictions. + */ +void portSyncSemaphoreDestroy(PORT_SEMAPHORE *pSemaphore); +/** + * @brief Acquires (decrements) a semaphore. + * + * @par Checked builds only: + * Will assert if pSemaphore == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note May put the thread to sleep. + */ +void portSyncSemaphoreAcquire(PORT_SEMAPHORE *pSemaphore); +/** + * @brief Conditionally acquires a semaphore. + * + * A call to this function will immediately return NV_TRUE and acquire the + * semaphore if it can be done without blocking. + * + * It will immediately return NV_FALSE if acquiring the semaphore would require + * blocking. + * + * @par Checked builds only: + * Will assert if pSemaphore == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +NvBool NV_FORCERESULTCHECK portSyncSemaphoreAcquireConditional(PORT_SEMAPHORE *pSemaphore); +/** + * @brief Releases (increments) a semaphore. + * + * @par Checked builds only: + * Will assert if pSemaphore == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +void portSyncSemaphoreRelease(PORT_SEMAPHORE *pSemaphore); + +PORT_INLINE void portSyncSemaphoreReleaseOutOfOrder(PORT_SEMAPHORE *pSemaphore) +{ + portSyncSemaphoreRelease(pSemaphore); +} + +/** + * @brief Represents a readers-writer lock data type. + * + * AcquireRead and AcquireWrite will do a sleeping wait if the lock isn't + * immediately available. + * + * PORT_RWLOCK prevents starvation of both readers and writers. + * + * @par Undefined: + * Any irregular use will result in undefined behavior. This includes: + * - One thread acquiring both read and write locks + * - Performing operations on an uninitialized/destroyed lock + * - Releasing locks which weren't acquired by the releasing thread + * - Acquiring the same lock twice without releasing (it is not recursive) + * + * @note If you desire to upgrade the lock (shared->exclusive), you must first + * release the shared lock, then acquire the exclusive. + */ +typedef struct PORT_RWLOCK PORT_RWLOCK; +/** + * @brief Size (in bytes) of the @ref PORT_RWLOCK structure + */ +extern NvLength portSyncRwLockSize; + +/** + * @brief Initializes a RWLock using caller provided memory. + * + * On some platforms the underlying platform code may allocate memory. + * This memory will be freed upon calling @ref portSyncRwLockDestroy. + * + * @par Checked builds only: + * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Initializing a lock multiple times is undefined.
+ * Using a lock before it is initialized results in undefined behavior. + * + * @return + * - NV_OK if successful + * - NV_ERR_INVALID_POINTER if pLock is NULL + * - Can return other NV_STATUS values from the OS interface layer. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + */ +NV_STATUS portSyncRwLockInitialize(PORT_RWLOCK *pLock); +/** + * @brief Creates and initializes a new RWLock using the provided allocator. + * + * @par Checked builds only: + * Will assert if pAllocator == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @return NULL on failed allocation / initialization. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + */ +PORT_RWLOCK *portSyncRwLockCreate(PORT_MEM_ALLOCATOR *pAllocator); +/** + * @brief Acquires the read (shared) lock on the given RW_LOCK + * + * If the lock cannot be immediately acquired, the thread will sleep. + * + * @par Checked builds only: + * Will assert if pLock == NULL
+ * Will assert if the lock is held by the current thread
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note May put the thread to sleep. + * @post Windows: Normal APCs will be disabled. + */ +void portSyncRwLockAcquireRead(PORT_RWLOCK *pLock); +/** + * @brief Conditionally acquires the read (shared) lock on the given RW_LOCK + * + * If the lock cannot be immediately acquired, it will return NV_FALSE without + * blocking. + * + * @par Checked builds only: + * Will assert if pLock == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @return NV_TRUE if the lock was acquired. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + * @post Windows: Normal APCs will be disabled if the lock was acquired. + */ +NvBool NV_FORCERESULTCHECK portSyncRwLockAcquireReadConditional(PORT_RWLOCK *pLock); +/** + * @brief Acquires the write (exclusive) lock on the given RW_LOCK + * + * If the lock cannot be immediately acquired, the thread will sleep. + * + * @par Checked builds only: + * Will assert if pLock == NULL
+ * Will assert if the lock is held by the current thread
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note May put the thread to sleep. + * @post Windows: Normal APCs will be disabled. + */ +void portSyncRwLockAcquireWrite(PORT_RWLOCK *pLock); +/** + * @brief Conditionally acquires the write (exclusive) lock on the given RW_LOCK + * + * If the lock cannot be immediately acquired, it will return NV_FALSE without + * blocking. + * + * @par Checked builds only: + * Will assert if pLock == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @return NV_TRUE if the lock was acquired. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + * @post Windows: Normal APCs will be disabled if the lock was acquired. + */ +NvBool NV_FORCERESULTCHECK portSyncRwLockAcquireWriteConditional(PORT_RWLOCK *pLock); +/** + * @brief Releases the read (shared) lock on the given RW_LOCK + * + * @par Checked builds only: + * Will assert if pLock == NULL
+ * Will assert if the lock is not held by the current thread
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + * @post Windows: Normal APCs will be re-enabled. + */ +void portSyncRwLockReleaseRead(PORT_RWLOCK *pLock); + +PORT_INLINE void portSyncRwLockReleaseReadOutOfOrder(PORT_RWLOCK *pLock) +{ + portSyncRwLockReleaseRead(pLock); +} + +/** + * @brief Releases the write (exclusive) lock on the given RW_LOCK + * + * @par Checked builds only: + * Will assert if pLock == NULL
+ * Will assert if the lock is not held by the current thread
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + * @post Windows: Normal APCs will be re-enabled. + */ +void portSyncRwLockReleaseWrite(PORT_RWLOCK *pLock); + +PORT_INLINE void portSyncRwLockReleaseWriteOutOfOrder(PORT_RWLOCK *pLock) +{ + portSyncRwLockReleaseWrite(pLock); +} + +/** + * @brief Destroys a RWLock created with @ref portSyncRwLockCreate o + * + * This frees any internally allocated resources that may be associated with + * the lock. + * + * @par Checked builds only: + * Will assert if pLock == NULL
+ * Will assert if the lock is being held
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Behavior is undefined if called on an uninitialized lock.
+ * Behavior is undefined if the lock is currently acquired and it is destroyed.
+ * Behavior is undefined if any operation is performed on a lock that has + * been destroyed. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @pre Calls pAllocator->free, which may have additional restrictions. + */ +void portSyncRwLockDestroy(PORT_RWLOCK *pLock); + + +/// @} End core functions + +/** + * @name Extended Functions + * @{ + */ + +/** + * @brief Creates a new fast mutex using the provided allocator. The newly + * created mutex is initialized, as if @ref portSyncExFastMutexInitialize was + * called. + * + * See @ref portSyncExFastMutexInitialize for details on fast mutex objects. + * + * @par Checked builds only: + * Will assert if pAllocator == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @return NULL on failed allocation / initialization. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + */ +PORT_MUTEX *portSyncExFastMutexCreate(PORT_MEM_ALLOCATOR *pAllocator); +/** + * @brief Initializes a fast mutex using caller provided memory. + * + * A fast mutex is a subtype of the @ref PORT_MUTEX object that is generally + * more performant, but cannot be acquired from DPCs (IRQL == DISPATCH_LEVEL), + * even when using @ref portSyncMutexAcquireConditional. + * + * Code allocating fast mutex objects must ensure that conditional acquire is + * never attempted at DISPATCH_LEVEL. In checked builds, an assert will be + * triggered if this is not satisfied. + * + * Other than the limitation above, fast mutex objects have the same interface + * as regular @ref PORT_MUTEX objects. + * + * @par Checked builds only: + * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Initializing a mutex multiple times is undefined.
+ * Using a mutex before it is initialized results in undefined behavior. + * + * @return + * - NV_OK if successful + * - NV_ERR_INVALID_POINTER if pMutex is NULL + * - Can return other NV_STATUS values from the OS interface layer. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + */ +NV_STATUS portSyncExFastMutexInitialize(PORT_MUTEX *pMutex); + +// Fast mutexes only make sense on Windows kernel mode +#define portSyncExFastMutexCreate_SUPPORTED (PORT_IS_KERNEL_BUILD && NVOS_IS_WINDOWS) +#define portSyncExFastMutexInitialize_SUPPORTED (PORT_IS_KERNEL_BUILD && NVOS_IS_WINDOWS) + +/** + * @brief Returns true if it is safe to put the current thread to sleep. + * + * Safety in this case relates only to the current interrupt level, and does not + * take into account any locks held by the thread that may result in deadlocks. + */ +NvBool portSyncExSafeToSleep(void); +#define portSyncExSafeToSleep_SUPPORTED PORT_IS_KERNEL_BUILD +/** + * @brief Returns true if it is safe to wake other threads. + * + * Safety in this case relates only to the current interrupt level. + */ +NvBool portSyncExSafeToWake(void); +#define portSyncExSafeToWake_SUPPORTED PORT_IS_KERNEL_BUILD +/** + * @brief Returns the platform specific implementation of the interrupt level. + * + * On platforms that have multiplie interrupt levels (i.e. Windows), this will + * return the numeric representation that the underlying platform uses. + * + * If a platform only has a binary distinction, this will return 0 or 1. + * + * On platforms where the concept of interrupt levels does not exist, it will + * return 0. + */ +NvU64 portSyncExGetInterruptLevel(void); +#define portSyncExGetInterruptLevel_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Disable preemption on a given CPU + * + * After calling this function, the thread will not be scheduled out of the + * current CPU until a call to @ref portSyncExRestorePreemption is made. + * + * The thread may still be paused to service an IRQ on the same CPU, but upon + * completion, execution will resume on the same CPU. + * + * @pre Can be called at any IRQL/interrupt context + * @post Blocking calls are prohibited while preemption is disabled. + * + * @return Returns the previous preemption state, that should be passed onto + * @ref portSyncExRestorePreemption + */ +NvU64 portSyncExDisablePreemption(void); +/** + * @todo bug 1583359 - Implement for other platforms + * Only on Windows for now, needed for bug 1995797 + */ +#define portSyncExDisablePreemption_SUPPORTED (PORT_IS_KERNEL_BUILD && NVOS_IS_WINDOWS) + +/** + * @brief Restores the previous preemption state + * + * See @ref portSyncExDisablePreemption for details + */ +void portSyncExRestorePreemption(NvU64 preemptionState); +#define portSyncExRestorePreemption_SUPPORTED (PORT_IS_KERNEL_BUILD && NVOS_IS_WINDOWS) + + +/// @} End extended functions + +#include "nvport/inline/sync_tracking.h" + +#endif // _NVPORT_SYNC_H_ +/// @} diff --git a/src/nvidia/inc/libraries/nvport/thread.h b/src/nvidia/inc/libraries/nvport/thread.h new file mode 100644 index 0000000..5b9234f --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/thread.h @@ -0,0 +1,318 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Thread module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_THREAD_H_ +#define _NVPORT_THREAD_H_ + +/** + * Platform-specific inline implementations + */ +#if NVOS_IS_LIBOS +#include "nvport/inline/thread_libos.h" +#endif + +/** + * @defgroup NVPORT_THREAD Threading module + * + * @brief This module contains basic threading functionality. + * + * @{ + */ + +/** + * @name Core Functions + * @{ + */ + +/** + * @brief Opaque structure representing a thread. + * + * Structure is allocated on the stack. + */ +struct PORT_THREAD +{ + NvU64 threadId; +}; + +typedef struct PORT_THREAD PORT_THREAD; + +/** + * @brief Opaque structure representing a process. + * + * While this structure is opaque, you can still allocate it on the stack. + */ +struct PORT_PROCESS +{ + NvU64 pid; +}; + +typedef struct PORT_PROCESS PORT_PROCESS; + +/// @brief An invalid thread handle. Depencence on OS. +extern const PORT_THREAD PORT_THREAD_INVALID; +/// @brief An invalid process handle. Dependnce on OS. +extern const PORT_PROCESS PORT_PROCESS_INVALID; + +/** + * @brief returns true if the given thread handle is valid. + */ +NvBool portThreadIsValid(PORT_THREAD thread); + +/** + * @brief Get the handle of the currently executing thread. + * + * @note In case of win-user you need to destroy returned thread. + */ +PORT_THREAD portThreadGetCurrentThread(void); + +/** + * @brief Get the thread handle by the thread ID. + * + * This ID translates directly into the underlying platform's thread ID. + * + * @returns PORT_THREAD_INVALID if the ID is not valid, thread handle if it is. + * + * @note In case of win-user you need to destroy returned thread. + */ +PORT_THREAD portThreadGetThreadById(NvU64 threadId); + +/** + * @brief Get the id of the currently executing thread. + */ +NvU64 portThreadGetCurrentThreadId(void); + +/** + * @brief Get the process id of the currently executing thread. + */ +NvU64 portThreadGetCurrentProcessId(void); + +/** + * @brief Compare two thread handles + * + * @returns TRUE if the handles are equal. + */ +NvBool portThreadEqual(PORT_THREAD thread1, PORT_THREAD thread2); + +/** + * @brief A thread's "main" function. The arguments are passed through a single + * void*, which the thread can then cast accordingly. + */ +typedef NvS32 (*PORT_THREAD_MAIN)(void *); + +/** + * @brief A thread constructor + * + * Creates a thread with the given main function and its argument. The created + * thread will immediately start executing. Any synchronization should be done + * in the thread body. + * + * @param [out] pThread - The new thread's handle + * @param [in] threadMain - The new thread's main() function + * @param [in] argument - The void* pointer to be passed into thread's main() + * + * @return NV_OK on success + * + * @todo Should we provide a flag to automatically destroy the thread when finished? + */ +NV_STATUS portThreadCreate(PORT_THREAD *pThread, PORT_THREAD_MAIN threadMain, void *argument); + +/** + * @brief A thread destructor + * + * Destroys the given thread, freeing any resources associated with it. If the + * specified thread has not finished its execution, it will block until it finishes. + * + * Will assert if called on a thread that hasn't been created using + * @ref portThreadCreate + */ +void portThreadDestroy(PORT_THREAD thread); + +/** + * @brief End execution of the current thread, returning the status. + * + * This behaves like the C standard exit(int) function - Execution is + * immediately stopped, without any stack unwinding. No resources allocated in + * the thread are freed. The status is returned to the parent thread. + * + * @par Kernel mode only: + * Will assert if called on a thread not created by @ref portThreadCreate. + * On usermode, this is acceptable (equivalent of calling exit() from main()) + */ +void portThreadExit(NvS32 status); + +/** + * @brief Block the current thread until the given thread has finished. + * + * Sometimes called a thread join operation. The current thread is suspended + * until threadToWaitOn has completed execution, either by returning from the + * main function, by calling @ref portThreadExit, or by being killed by @ref + * portThreadKill. + * + * The current thread can also be awoken by @ref portThreadWake. + * + * @param [out, optional] pReturnStatus - The finished thread's return status. + */ +NV_STATUS portThreadWaitToComplete(PORT_THREAD threadToWaitOn, NvS32 *pReturnStatus); + +/** + * @brief Move the current thread to the end of the run queue + * + * The OS schedules other waiting threads to run, before returning to the current thread. + * + * This function must not be called in interrupt context or raised IRQL. It may not be + * advisable to call this function while holding various RM locks. + */ +void portThreadYield(void); + +/// @} End core functions + +/** + * @name Extended Functions + * @{ + */ + +/** + * @brief Get the priority of a given thread as an int + * + * The priority values are defined by the OS, but they can be compared with < and > + */ +NvU64 portThreadExGetPriority(NvU64 threadId); +#define portThreadExGetPriority_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS) + +/** + * @brief Set the priority of a given thread + * + * Only valid values are those returned by a previous call to @ref portThreadGetPriority, + * though not necessarily on the same thread object + */ +void portThreadExSetPriority(NvU64 threadId, NvU64 priority); +#define portThreadExSetPriority_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS) + +#if PORT_IS_FUNC_SUPPORTED(portThreadExGetPriority) +extern const NvU64 portThreadPriorityMin; +extern const NvU64 portThreadPriorityDefault; +extern const NvU64 portThreadPriorityLowRealtime; +extern const NvU64 portThreadPriorityMax; +#endif + +/** + * @brief Structure representing the processors affinity of the thread. + * + * A structure describes a thread affinity, which is a set of processors on + * which a thread is allowed to run. All of the processors in this set belong + * to the group that is identified by the cpuGroup member of the structure. + * The mask member contains an affinity mask that identifies the processors in + * the set of 64 processors. +*/ +typedef struct PORT_THREAD_AFFINITY +{ + NvU64 cpuGroup; + NvU64 mask; +} PORT_THREAD_AFFINITY; + +/** + * @brief Set the affinity of a current thread. + * @param [in] pAffinity - Pointer to affinity structure. + * @param [out] pPreAffinity - Pointer to Previous affinity structure. + * @return NV_OK If successful else return the following errors + * NV_ERR_INVALID_IRQ_LEVEL: IRQL is >= DISPATCH_LEVEL in Windows Drivers. + * NV_ERR_INVALID_ARGUMENT: Either of the passed arguments are NULL. + */ +NV_STATUS portThreadExSetAffinity(const PORT_THREAD_AFFINITY *pAffinity, + PORT_THREAD_AFFINITY *pPreAffinity); +#define portThreadExSetAffinity_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS) + +/** + * @brief Set the affinity of the current thread with input as logical core index + * + * @param [in] coreIndex Logical core to which affinity needs to be set. For + * systems with more than one group, client need to compute + * required core index. + * + * @param [out] pPrevAffinity Pointer to previous affinity + * + * @return NV_OK on success + */ +NV_STATUS portThreadExSetSystemGroupAffinity(NvU32 coreIndex, PORT_THREAD_AFFINITY* pPrevAffinity); +#define portThreadExSetSystemGroupAffinity_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS) + +/** + * @brief Restores the previous affinity of the current thread + * + * @param [in] pPrevAffinity Specifies the new system affinity of the current thread. + Set this parameter to the value that was returned by a + previous call to the portThreadExSetSystemGroupAffinity + * + * @return NV_OK on success + */ +NV_STATUS portThreadExRevertToUserGroupAffinity(PORT_THREAD_AFFINITY* pPrevAffinity); +#define portThreadExRevertToUserGroupAffinity_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS) + +typedef enum PORT_THREAD_PROCESS_NOTIFY_EVENT +{ + PORT_THREAD_PROCESS_NOTIFY_EVENT_CREATE, + PORT_THREAD_PROCESS_NOTIFY_EVENT_EXIT +} PORT_THREAD_PROCESS_NOTIFY_EVENT; + +typedef void (*PORT_THREAD_PROCESS_NOTIFY_ROUTINE)(NvU64 processId, + PORT_THREAD_PROCESS_NOTIFY_EVENT notifyEvent, void *pPvtData); +/** + * @brief Register a callback function with nvport thread module to get thread + * create/exit event notifications. + * @param [in] pFunc Pointer to event callback function. + * @param [in] pPvtData Pointer to event callback function private data. + * @param [out] ppOldFunc Pointer to old event callback function. + * @param [out] ppPvtData Pointer to old event callback function private data. + * + * @return NV_OK on success + */ +NV_STATUS portThreadExRegisterProcessNotifyRoutine(PORT_THREAD_PROCESS_NOTIFY_ROUTINE pFunc, void *pPvtData, + PORT_THREAD_PROCESS_NOTIFY_ROUTINE *ppOldFunc, void **ppPvtData); +#define portThreadExRegisterProcessNotifyRoutine_SUPPORTED (NVOS_IS_WINDOWS && PORT_IS_KERNEL_BUILD && !PORT_IS_MODS) + +/** + * @brief Unregister a callback function with nvport thread module to get thread + * create/exit event notifications. + * @param [in] pOldFunc Pointer to old event callback function which was returned + * by portThreadExRegisterProcessNotifyRoutine. + * @param [in] pOldPvtData Pointer to old event callback function private data which + * was returned by portThreadExRegisterProcessNotifyRoutine. + * + * @return NV_OK on success + */ +NV_STATUS portThreadExUnregisterProcessNotifyRoutine(PORT_THREAD_PROCESS_NOTIFY_ROUTINE pOldFunc, void* pOldPvtData); +#define portThreadExUnregisterProcessNotifyRoutine_SUPPORTED (NVOS_IS_WINDOWS && PORT_IS_KERNEL_BUILD && !PORT_IS_MODS) +/// @} End extended functions + +#endif // _NVPORT_THREAD_H_ +/// @} + diff --git a/src/nvidia/inc/libraries/nvport/util.h b/src/nvidia/inc/libraries/nvport/util.h new file mode 100644 index 0000000..6234d61 --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/util.h @@ -0,0 +1,254 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Util module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_UTIL_H_ +#define _NVPORT_UTIL_H_ + +/** + * @defgroup NVPORT_UTIL Utilities module + * + * @brief This module contains utility functions used by other modules. + * + * Generic implementation for all functions is in util-generic.h + * + * @{ + */ + +/** + * @def PORT_UTIL_INLINE + * + * @note There are ways to declare a function without qualifiers, and then + * redefine it as static/extern inline, but none work across all compilers that + * we use. The easiest solution is to just specify the qualifiers upon function + * declaration. We assume all these will be inline, but that can be changed + * through the makefile when adding non-inline implementations: + * MODULE_DEFINES += PORT_UTIL_INLINE + * MODULE_SOURCES += util-impl.c + */ +#ifndef PORT_UTIL_INLINE +#define PORT_UTIL_INLINE PORT_INLINE +#endif + +#if NVOS_IS_LIBOS +#include "nvport/inline/util_libos.h" +#endif + +/** + * @name Core Functions + * @{ + */ + +/** + * @brief Returns true if the two buffers overlap. + * + * Buffer length is specified in len0 and len1 params. + */ +PORT_UTIL_INLINE NvBool portUtilCheckOverlap(const NvU8 *pData0, NvLength len0, const NvU8 *pData1, NvLength len1); + +/** + * @brief Returns true if address is aligned to align bytes + * + * If align is not a power of two, it will return false. + */ +PORT_UTIL_INLINE NvBool portUtilCheckAlignment(const void *address, NvU32 align); + +/** + * @brief Returns true if num is a power of two. + */ +PORT_UTIL_INLINE NvBool portUtilIsPowerOfTwo(NvU64 num); + +/** + * @brief Write the 16bit number to pBuf in Little Endian + */ +PORT_UTIL_INLINE void portUtilWriteLittleEndian16(void *pBuf, NvU16 value); + +/** + * @brief Write the 32bit number to pBuf in Little Endian + */ +PORT_UTIL_INLINE void portUtilWriteLittleEndian32(void *pBuf, NvU32 value); + +/** + * @brief Write the 64bit number to pBuf in Little Endian + */ +PORT_UTIL_INLINE void portUtilWriteLittleEndian64(void *pBuf, NvU64 value); + +/** + * @brief Write the 16bit number to pBuf in Big Endian + */ +PORT_UTIL_INLINE void portUtilWriteBigEndian16(void *pBuf, NvU16 value); + +/** + * @brief Write the 32bit number to pBuf in Big Endian + */ +PORT_UTIL_INLINE void portUtilWriteBigEndian32(void *pBuf, NvU32 value); + +/** + * @brief Write the 64bit number to pBuf in Big Endian + */ +PORT_UTIL_INLINE void portUtilWriteBigEndian64(void *pBuf, NvU64 value); + +/** + * @brief Efficient spinloop body that doesn't waste power. + * + * This function will spin for a very short time, then return, so it should be + * called as: + * + * ~~~{.c} + * while (bShouldSpin) + * portUtilSpin(); + * ~~~ + */ +static NV_FORCEINLINE void portUtilSpin(void); + +/** + * @brief Returns true if the caller is currently in interrupt context. + * + * Interrupt context here means: + * - Unix: Interrupts are masked + * - Windows: IRQL > DISPATCH_LEVEL + */ +NvBool portUtilIsInterruptContext(void); + +/** + * @def portUtilGetReturnAddress() + * Returns the current function's return address. + */ + +/** + * @def portUtilGetIPAddress() + * Returns the current IP address. + */ +NV_NOINLINE NvUPtr portUtilGetIPAddress(void); + +/** + * @brief Returns number of leading zeros - starting from MSB; + * + * Examples: + * portUtilCountLeadingZeros64(0) == 64 + * portUtilCountLeadingZeros64(1) == 63 + * portUtilCountLeadingZeros64(2) == 62 + * portUtilCountLeadingZeros64(0xFFFFFFFFFFFFFF00) == 0 + */ +PORT_UTIL_INLINE NvU32 portUtilCountLeadingZeros64(NvU64 n); + +/** + * @brief Like @ref portUtilCountLeadingZeros64 but operating on 32bit ints + */ +PORT_UTIL_INLINE NvU32 portUtilCountLeadingZeros32(NvU32 n); + +/** + * @brief Returns number of trailing zeros - starting from LSB; + * + * Examples: + * portUtilCountTrailingZeros64(0) == 64 + * portUtilCountTrailingZeros64(1) == 0 + * portUtilCountTrailingZeros64(2) == 1 + * portUtilCountTrailingZeros64(0xFFFFFFFFFFFFFF00) == 8 + */ +PORT_UTIL_INLINE NvU32 portUtilCountTrailingZeros64(NvU64 n); + +/** + * @brief Like @ref portUtilCountTrailingZeros64 but operating on 32bit ints + */ +PORT_UTIL_INLINE NvU32 portUtilCountTrailingZeros32(NvU32 n); + +/// @} End core functions + +#include /* NULL */ + +/** + * @name Extended Functions + * @{ + */ + +/** + * @brief Returns a return address up the stack of the current function. + * + * @param level The number of levels up the stack to go. + * level == 0 - Gives the current IP. + * level == 1 - The current function's return address, same as + * @ref portUtilGetReturnAddress + */ +NV_NOINLINE NvUPtr portUtilExGetStackTrace(NvU32 level); + +#define portUtilExSpinNs_SUPPORTED PORT_IS_MODULE_SUPPORTED(time) +#define portUtilExDelayMs_SUPPORTED PORT_IS_MODULE_SUPPORTED(time) + +/** + * @brief Spin for the given amount of nanoseconds. + * + * Utilizes @ref portUtilSpin to spin for the given duration, without putting + * the thread to sleep. + */ +void portUtilExSpinNs(NvU32 nanoseconds); + +/** + * @brief Delay the thread execution for the given duration in milliseconds. + * + * Unlike @ref portUtilSpinNs, this function may put the thread to sleep. + */ +void portUtilExDelayMs(NvU32 milliseconds); + +#if (NVCPU_IS_FAMILY_X86 || NVCPU_IS_PPC64LE || NVCPU_IS_PPC || NVCPU_IS_AARCH64) && !defined(NV_MODS) +/** + * @brief Gets the Time stamp counter. + * + * @note This function is not serialized, and can be reorder by cpu or compiler. + * @note On Intel "pre-Nehalem multi-core" cpu and all multi-socket cpu, time + * may not be synced on all the cores and this function may return timestamps + * that are not monotonically increasing. + * @note On some old Intel cpus (P3/P4), timestamp counter was not incremented + * at a fixed clock rate, but Intel fixed this with "invariant TSC" in late P4+ + * chips. + */ +PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter(void); +#endif + +#if NVCPU_IS_FAMILY_X86 && !defined(NV_MODS) && PORT_IS_MODULE_SUPPORTED(atomic) +/** + * @brief Gets the Time stamp counter. + * + * Unlike @ref portUtilExReadTimestampCounter, this function serializes + * the reading of time stamp counter to prevent both compiler and cpu + * reordering. + * @note Other than serialization issue, this function has same issues as + * @ref portUtilExReadTimestampCounter. + */ +static NV_FORCEINLINE NvU64 portUtilExReadTimestampCounterSerialized(void); +#endif +/// @} End extended functions + +#include "nvport/inline/util_generic.h" +#include "nvport/inline/util_valist.h" + +#endif // _NVPORT_UTIL_H_ +/// @} diff --git a/src/nvidia/inc/libraries/poolalloc.h b/src/nvidia/inc/libraries/poolalloc.h new file mode 100644 index 0000000..bf076d0 --- /dev/null +++ b/src/nvidia/inc/libraries/poolalloc.h @@ -0,0 +1,290 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file poolalloc.h + * @brief This file contains the interfaces for pool allocator. + * A chained sub-allocator originally designed to sub-allocate GPU + * frame buffer given out by PMA (physical memory allocator). + * + * The only requirement of a node in the chained allocator is that the ratio + * between upSTreamPageSize and allocPageSize is less or equal to 64. + * + * @bug Make more abstract -- fix up the variable names + */ + + +#ifndef _NV_POOLALLOC_H_ +#define _NV_POOLALLOC_H_ + +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvport/nvport.h" +#include "containers/list.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct poolnode POOLNODE; + +/*! + * Each node corresponds to one page of upStreamPageSize + * The pool allocator sub-allocates from each of these pages. + */ +struct poolnode +{ + NvU64 pageAddr; // Address of the page to sub-allocate + NvU64 bitmap; // The bit map for this page. Only used if the + // node represents a partially allocated node + POOLNODE *pParent; // The upstream pool node in case this node is + // allocated from the upper pool. + ListNode node; // For intrusive lists. +}; + +MAKE_INTRUSIVE_LIST(PoolNodeList, POOLNODE, node); + +/*! + * The handle contains a generic metadata field that is needed for fast + * access. In the case of a linked list implementation of the pool allocator, + * the metadata is the pointer to the node that contains the page it was + * sub-allocated from + */ +typedef struct poolallocHandle +{ + NvU64 address; // The base address for this chunk + void *pMetadata; // The opaque metadata for storing necessary info +} POOLALLOC_HANDLE; + + +// non-intrusive list of page handles +MAKE_LIST(PoolPageHandleList, POOLALLOC_HANDLE); + +/*! + * @brief Callback function to upstream allocators for allocating new pages + * + * This function can allocate multiple pages at a time + * + * @param[in] ctxPtr Provides context to upstream allocator + * @param[in] pageSize Size of page to ask for from upstream + * @param[in] numPages Number of pages to allocate + * @param[out] pPage The output page handle from upstream + * + * @return NV_OK if successfully allocated NvF32 totalTest, doneTest, failTest; the page + * NV_ERR_NO_MEMORY if allocator cannot allocate enough backing + * NV_ERR_BAD_PARAM if any parameter is invalid or size info is not + * multiple of SMALLEST_PAGE_SIZE + * + */ +typedef NV_STATUS (*allocCallback_t)(void *ctxPtr, NvU64 pageSize, + NvU64 numPages, POOLALLOC_HANDLE *pPage); + +/*! + * @brief Callback function to upstream allocators for freeing unused pages + * + * This function only allocate 1 page at a time right now + * + * @param[in] ctxPtr Provides context to upstream allocator + * @param[in] pageSize Not really needed. For debugging only + * @param[in] pPage The input page handle to be freed + * + */ +typedef void (*freeCallback_t)(void *ctxPtr, NvU64 pageSize, POOLALLOC_HANDLE *pPage); + +/*! + * Structure representing a pool. + */ +typedef struct poolalloc +{ + PoolNodeList freeList; // List of nodes representing free pages + PoolNodeList fullList; // List of nodes representing fully allocated pages + PoolNodeList partialList; // List of nodes representing partially allocated pages + + PORT_MEM_ALLOCATOR *pAllocator; + + struct + { + allocCallback_t allocCb; // Callback to upstream allocator + freeCallback_t freeCb; // Callback to free pages + void *pUpstreamCtx; // The context to pass to upstream allocator + } callBackInfo; + + NvU64 upstreamPageSize; // Page size for upstream allocations + NvU64 allocPageSize; // Page size to give out + NvU32 ratio; // Ratio == upstreamPageSize / allocPageSize + NvU32 flags; // POOLALLOC_FLAGS_* +} POOLALLOC; + + +/*! + * Dump the lists maintained by the pools. + */ +void poolAllocPrint(POOLALLOC *pPool); + +/*! + * If _AUTO_POPULATE is set to ENABLE then poolAllocate will call upstream function to repopulate + * the pool when it runs out of memory. If set to DISABLE, poolAllocate will fail when it runs out of memory + * By default this is disabled as for usecases like page tables or context buffers since upstream function can call + * into PMA with GPU lock held which has a possibility of deadlocking + */ +#define NV_RMPOOL_FLAGS_AUTO_POPULATE 1:0 +#define NV_RMPOOL_FLAGS_AUTO_POPULATE_DEFAULT 0x0 +#define NV_RMPOOL_FLAGS_AUTO_POPULATE_DISABLE 0x0 +#define NV_RMPOOL_FLAGS_AUTO_POPULATE_ENABLE 0x1 + +/*! + * @brief This function initializes a pool allocator object + * + * This function establishes a link from this allocator to its upstream + * allocator by registering a callback function that lazily allocates memory + * if needed. + * + * @param[in] upstreamPageSize The page size granularity managed by + * the allocator + * @param[in] allocPageSize The page size to hand out + * @param[in] allocCb The allocation callback function + * @param[in] freeCb The free callback function + * @param[in] pUpstreamCtxPtr The context pointer for the upstream + * allocator, passed back on callback + * @param[in] mallocFun The allocator for internal strutures + * @param[in] freeFun The free for internal structures + * @param[in] pAllocCtxPtr The context pointer for the special + * allocator + * @param[in] flags POOLALLOC_FLAGS_* + * + * @return A pointer to a POOLALLOC structure if the initialization + * succeeded; NULL otherwise + * + */ + +POOLALLOC *poolInitialize(NvU64 upstreamPageSize, NvU64 allocPageSize, + allocCallback_t allocCb, freeCallback_t freeCb, void *pUpstreamCtxPtr, + PORT_MEM_ALLOCATOR *pAllocator, NvU32 flags); + + +/*! + * @brief Reserves numPages from upstream allocator. After the call + * freeListSize will equal to/greater than numPages. + * + * Since it will call into the upstream allocator, the page size of those + * pages will be the upstream page size. + * + * @param[in] pPool The pool allocator + * @param[out] numPages Number of pages to reserve + * + * @return NV_OK if successful + * NV_ERR_NO_MEMORY if allocator cannot allocate enough backing + * NV_ERR_BAD_PARAM if any parameter is invalid + * + */ +NV_STATUS poolReserve(POOLALLOC *pPool, NvU64 numPages); + + +/*! + * @brief This call will give back any free pages. After the call + * freeListSize will be less or equal to preserveNum. + * + * If the allocator has less or equal number of pages than preserveNum before + * the call, this function will simply return. + * + * @param[in] pPool The pool allocator to trim from + * @param[in] preserveNum The number of pages that we try to preserve + */ +void poolTrim(POOLALLOC *pPool, NvU64 preserveNum); + + +/*! + * @brief This function allocates memory from the allocator and returns one + * page of the fixed allocPageSize as specified in the initialization function + * + * The implementation does not guarantee the allocated pages are contiguous. + * Although there is no potential synchronization issues, if two allocation + * happen to lie on upstream page bundaries, the allocation will most likely + * be discontiguous. + * + * This function will also callback to upstream allocator to get more pages if + * it does not have enough pages already reserved. + * + * @param[in] pPool The pool allocator + * @param[out] pPageHandle The allocation handle that contains address and + * metadata for optimization + * + * @return NV_OK if successful + * NV_ERR_NO_MEMORY if allocator cannot allocate enough backing + * NV_ERR_BAD_PARAM if any parameter is invalid + */ +NV_STATUS poolAllocate(POOLALLOC *pPool, POOLALLOC_HANDLE *pPageHandle); + + +/*! + * @brief This function allocates memory from the allocator and returns numPages + * of the fixed allocPageSize as specified in the initialization function + * + * These pages are allocated contiguously and the single start address is returned. + * Although there is no potential synchronization issues, if two allocation + * happen to lie on upstream page bundaries, the allocation will most likely + * be discontiguous. + * + * This function will not callback to upstream allocator to get more pages as + * this is relying on a single chunk of free pages to make contiguous allocations. + * So the max number of pages that can be allocated contiguously is the number of pages + * fit in upstream page size i.e the "ratio" of this pool + * + * @param[in] pPool The pool allocator + * @param[in] numPages The number of pages requested to be allocated + * @param[out] pPageHandleList The allocation handles that contain addresses and + * metadata for optimization + * + * @return NV_OK if successful + * NV_ERR_NO_MEMORY if allocator cannot allocate enough backing + * NV_ERR_BAD_PARAM if any parameter is invalid + */ +NV_STATUS poolAllocateContig(POOLALLOC *pPool, NvU32 numPages, PoolPageHandleList *pPageHandleList); + +/*! + * @brief This function frees the page based on the allocPageSize + * + * @param[in] pPool The pool allocator + * @param[out] pPageHandle The allocation handle that contains address and + * metadata for optimization + * + */ +void poolFree(POOLALLOC *pPool, POOLALLOC_HANDLE *pPageHandle); + + +/*! + * @brief Destroys the pool allocator and frees memory + */ +void poolDestroy(POOLALLOC *pPool); + +/*! + * @briefs Returns the lengths of a pool's lists + */ +void poolGetListLength(POOLALLOC *pPool, NvU32 *pFreeListLength, + NvU32 *pPartialListLength, NvU32 *pFullListLength); + +#ifdef __cplusplus +} +#endif + +#endif /* _NV_POOLALLOC_H_ */ diff --git a/src/nvidia/inc/libraries/prereq_tracker/prereq_tracker.h b/src/nvidia/inc/libraries/prereq_tracker/prereq_tracker.h new file mode 100644 index 0000000..e574ca3 --- /dev/null +++ b/src/nvidia/inc/libraries/prereq_tracker/prereq_tracker.h @@ -0,0 +1,3 @@ + +#include "g_prereq_tracker_nvoc.h" + diff --git a/src/nvidia/inc/libraries/resserv/resserv.h b/src/nvidia/inc/libraries/resserv/resserv.h new file mode 100644 index 0000000..c8e8775 --- /dev/null +++ b/src/nvidia/inc/libraries/resserv/resserv.h @@ -0,0 +1,398 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_resserv_nvoc.h" + +#ifndef _RESSERV_H_ +#define _RESSERV_H_ + +#include "nvoc/object.h" + +#include "containers/list.h" +#include "containers/map.h" +#include "containers/multimap.h" + +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvos.h" +#include "nvsecurityinfo.h" +#include "rs_access.h" + +#if LOCK_VAL_ENABLED +#include "lockval/lockval.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#if (RS_STANDALONE) +#include + +#ifndef NV_PRINTF +extern int g_debugLevel; +#define NV_PRINTF(level, format, ...) if (g_debugLevel) { printf(format, ##__VA_ARGS__); } +#endif +#include "utils/nvprintf.h" +#endif + +// +// Forward declarations +// +typedef struct RsServer RsServer; +typedef struct RsDomain RsDomain; +typedef struct CLIENT_ENTRY CLIENT_ENTRY; +typedef struct RsResourceDep RsResourceDep; +typedef struct RsResourceRef RsResourceRef; +typedef struct RsInterMapping RsInterMapping; +typedef struct RsCpuMapping RsCpuMapping; + +// RS-TODO INTERNAL and EXTERNAL params should be different structures +typedef struct RS_CLIENT_FREE_PARAMS_INTERNAL RS_CLIENT_FREE_PARAMS_INTERNAL; +typedef struct RS_CLIENT_FREE_PARAMS_INTERNAL RS_CLIENT_FREE_PARAMS; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_RES_ALLOC_PARAMS_INTERNAL; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_RES_ALLOC_PARAMS; +typedef struct RS_RES_DUP_PARAMS_INTERNAL RS_RES_DUP_PARAMS_INTERNAL; +typedef struct RS_RES_DUP_PARAMS_INTERNAL RS_RES_DUP_PARAMS; +typedef struct RS_RES_SHARE_PARAMS_INTERNAL RS_RES_SHARE_PARAMS_INTERNAL; +typedef struct RS_RES_SHARE_PARAMS_INTERNAL RS_RES_SHARE_PARAMS; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_CLIENT_ALLOC_PARAMS_INTERNAL; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_CLIENT_ALLOC_PARAMS; +typedef struct RS_RES_FREE_PARAMS_INTERNAL RS_RES_FREE_PARAMS_INTERNAL; +typedef struct RS_RES_FREE_PARAMS_INTERNAL RS_RES_FREE_PARAMS; +typedef struct RS_RES_CONTROL_PARAMS_INTERNAL RS_RES_CONTROL_PARAMS_INTERNAL; +typedef struct RS_RES_CONTROL_PARAMS_INTERNAL RS_RES_CONTROL_PARAMS; +typedef struct RS_RES_CONTROL_PARAMS_INTERNAL RS_LEGACY_CONTROL_PARAMS; +typedef struct RS_LEGACY_ALLOC_PARAMS RS_LEGACY_ALLOC_PARAMS; +typedef struct RS_LEGACY_FREE_PARAMS RS_LEGACY_FREE_PARAMS; + +typedef struct RS_CPU_MAP_PARAMS RS_CPU_MAP_PARAMS; +typedef struct RS_CPU_UNMAP_PARAMS RS_CPU_UNMAP_PARAMS; +typedef struct RS_INTER_MAP_PARAMS RS_INTER_MAP_PARAMS; +typedef struct RS_INTER_UNMAP_PARAMS RS_INTER_UNMAP_PARAMS; + +// Forward declarations for structs defined by user +typedef struct RS_RES_MAP_TO_PARAMS RS_RES_MAP_TO_PARAMS; +typedef struct RS_RES_UNMAP_FROM_PARAMS RS_RES_UNMAP_FROM_PARAMS; +typedef struct RS_INTER_MAP_PRIVATE RS_INTER_MAP_PRIVATE; +typedef struct RS_INTER_UNMAP_PRIVATE RS_INTER_UNMAP_PRIVATE; +typedef struct RS_CPU_MAPPING_PRIVATE RS_CPU_MAPPING_PRIVATE; + +typedef struct RS_FREE_STACK RS_FREE_STACK; +typedef struct CALL_CONTEXT CALL_CONTEXT; +typedef struct ACCESS_CONTROL ACCESS_CONTROL; +typedef struct RS_ITERATOR RS_ITERATOR; +typedef struct RS_ORDERED_ITERATOR RS_ORDERED_ITERATOR; +typedef struct RS_SHARE_ITERATOR RS_SHARE_ITERATOR; +typedef struct API_STATE API_STATE; +typedef struct RS_LOCK_INFO RS_LOCK_INFO; +typedef struct RS_CONTROL_COOKIE RS_CONTROL_COOKIE; +typedef NV_STATUS RsCtrlFunc(struct RS_RES_CONTROL_PARAMS_INTERNAL*); + +class RsClient; +class RsResource; +class RsShared; + +MAKE_LIST(RsResourceRefList, RsResourceRef*); +MAKE_LIST(RsResourceList, RsResource*); +MAKE_LIST(RsHandleList, NvHandle); +MAKE_LIST(RsShareList, RS_SHARE_POLICY); +MAKE_MULTIMAP(RsIndex, RsResourceRef*); + +typedef NV_STATUS (*CtrlImpl_t)(RsClient*, RsResource*, void*); + +typedef void *PUID_TOKEN; + +// +// Defines +// + +/// Domain handles must start at this base value +#define RS_DOMAIN_HANDLE_BASE 0xD0D00000 + +/// Client handles must start at this base value +#define RS_CLIENT_HANDLE_BASE 0xC1D00000 + +/// Internal Client handles start at this base value +#define RS_CLIENT_INTERNAL_HANDLE_BASE 0xC1E00000 + +/// VF Client handles start at this base value +#define RS_CLIENT_VF_HANDLE_BASE 0xE0000000 + +/// Get the VF client handle range for gfid +#define RS_CLIENT_GET_VF_HANDLE_BASE(gfid) (RS_CLIENT_VF_HANDLE_BASE + ((gfid) - 1) * RS_CLIENT_HANDLE_MAX) + +// +// Print a warning if any client's resource count exceeds this +// threshold. Unless this was intentional, this is likely a client bug. +// +#define RS_CLIENT_RESOURCE_WARNING_THRESHOLD 100000 + +#define RS_CLIENT_HANDLE_MAX 0x100000 // Must be power of two +#define RS_CLIENT_HANDLE_BUCKET_COUNT 0x400 // 1024 +#define RS_CLIENT_HANDLE_BUCKET_MASK 0x3FF + + +/// The default maximum number of domains a resource server can allocate +#define RS_MAX_DOMAINS_DEFAULT 4096 + +/// The maximum length of a line of ancestry for resource references +#define RS_MAX_RESOURCE_DEPTH 6 + +/// RS_LOCK_FLAGS +#define RS_LOCK_FLAGS_NO_TOP_LOCK NVBIT(0) +#define RS_LOCK_FLAGS_NO_CLIENT_LOCK NVBIT(1) +#define RS_LOCK_FLAGS_NO_CUSTOM_LOCK_1 NVBIT(2) +#define RS_LOCK_FLAGS_NO_CUSTOM_LOCK_2 NVBIT(3) +#define RS_LOCK_FLAGS_NO_CUSTOM_LOCK_3 NVBIT(4) +#define RS_LOCK_FLAGS_NO_DEPENDANT_SESSION_LOCK NVBIT(5) +#define RS_LOCK_FLAGS_FREE_SESSION_LOCK NVBIT(6) +#define RS_LOCK_FLAGS_LOW_PRIORITY NVBIT(7) + +/// RS_LOCK_STATE +#define RS_LOCK_STATE_TOP_LOCK_ACQUIRED NVBIT(0) +#define RS_LOCK_STATE_CUSTOM_LOCK_1_ACQUIRED NVBIT(1) +#define RS_LOCK_STATE_CUSTOM_LOCK_2_ACQUIRED NVBIT(2) +#define RS_LOCK_STATE_CUSTOM_LOCK_3_ACQUIRED NVBIT(3) +#define RS_LOCK_STATE_ALLOW_RECURSIVE_RES_LOCK NVBIT(6) +#define RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED NVBIT(7) +#define RS_LOCK_STATE_SESSION_LOCK_ACQUIRED NVBIT(8) + +/// RS_LOCK_RELEASE +#define RS_LOCK_RELEASE_TOP_LOCK NVBIT(0) +#define RS_LOCK_RELEASE_CLIENT_LOCK NVBIT(1) +#define RS_LOCK_RELEASE_CUSTOM_LOCK_1 NVBIT(2) +#define RS_LOCK_RELEASE_CUSTOM_LOCK_2 NVBIT(3) +#define RS_LOCK_RELEASE_CUSTOM_LOCK_3 NVBIT(4) +#define RS_LOCK_RELEASE_SESSION_LOCK NVBIT(5) + +/// API enumerations used for locking knobs +typedef enum +{ + RS_LOCK_CLIENT =0, + RS_LOCK_TOP =1, + RS_LOCK_RESOURCE =2, + RS_LOCK_CUSTOM_3 =3, +} RS_LOCK_ENUM; + +typedef enum +{ + RS_API_ALLOC_CLIENT = 0, + RS_API_ALLOC_RESOURCE = 1, + RS_API_FREE_RESOURCE = 2, + RS_API_MAP = 3, + RS_API_UNMAP = 4, + RS_API_INTER_MAP = 5, + RS_API_INTER_UNMAP = 6, + RS_API_COPY = 7, + RS_API_SHARE = 8, + RS_API_CTRL = 9, + RS_API_MAX, +} RS_API_ENUM; + +NV_STATUS indexAdd(RsIndex *pIndex, NvU32 index, RsResourceRef *pResourceRef); +NV_STATUS indexRemove(RsIndex *pIndex, NvU32 index, RsResourceRef *pResourceRef); + +// +// Externs +// +/** + * NVOC wrapper for constructing resources of a given type + * + * @param[in] pAllocator Allocator for the resource object + * @param[in] pCallContext Caller context passed to resource constructor + * @param[inout] pParams Resource allocation parameters + * @param[out] ppResource New resource object + */ +extern NV_STATUS resservResourceFactory(PORT_MEM_ALLOCATOR *pAllocator, CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, RsResource **ppResource); + +/** + * NVOC wrapper for constructing an application-specific client. + */ +extern NV_STATUS resservClientFactory(PORT_MEM_ALLOCATOR *pAllocator, RS_RES_ALLOC_PARAMS_INTERNAL *pParams, RsClient **ppRsClient); + +/** + * Validate the UID/PID security token of the current user against a client's security token. + * + * This will be obsolete after phase 1. + * + * @param[in] pClientToken + * @param[in] pCurrentToken + * + * @returns NV_OK if the current user's security token matches the client's security token + */ +extern NV_STATUS osValidateClientTokens(PSECURITY_TOKEN pClientToken, PSECURITY_TOKEN pCurrentToken); + +/** + * Get the security token of the current user for the UID/PID security model. + * + * This will be obsolete after phase 1. + */ +extern PSECURITY_TOKEN osGetSecurityToken(void); + +/** + * TLS entry id for call contexts. All servers will use the same id. + */ +#define TLS_ENTRY_ID_RESSERV_CALL_CONTEXT TLS_ENTRY_ID_RESSERV_1 + +// +// Structs +// +struct RS_FREE_STACK +{ + RS_FREE_STACK *pPrev; + RsResourceRef *pResourceRef; +}; + +struct CALL_CONTEXT +{ + RsServer *pServer; ///< The resource server instance that owns the client + RsClient *pClient; ///< Client that was the target of the call + RsResourceRef *pResourceRef; ///< Reference that was the target of the call + RsResourceRef *pContextRef; ///< Reference that may be used to provide more context [optional] + RS_LOCK_INFO *pLockInfo; ///< Saved locking context information for the call + API_SECURITY_INFO secInfo; + RS_RES_CONTROL_PARAMS_INTERNAL *pControlParams; ///< parameters of the call [optional] + + void *pSerializedParams; ///< Serialized version of the params + void *pDeserializedParams; ///< Deserialized version of the params + NvU32 serializedSize; ///< Serialized size + NvU32 deserializedSize; ///< Deserialized size + NvBool bReserialize; ///< Reserialize before calling into GSP + NvBool bLocalSerialization; ///< Serialized internally +}; + +typedef enum { + RS_ITERATE_CHILDREN, ///< Iterate over a RsResourceRef's children + RS_ITERATE_DESCENDANTS, ///< Iterate over a RsResourceRef's children, grandchildren, etc. (unspecified order) + RS_ITERATE_CACHED, ///< Iterate over a RsResourceRef's cache + RS_ITERATE_DEPENDANTS, ///< Iterate over a RsResourceRef's dependants +} RS_ITER_TYPE; + +typedef enum +{ + LOCK_ACCESS_READ, + LOCK_ACCESS_WRITE, +} LOCK_ACCESS_TYPE; + + + +/** + * Access control information. This information will be filled out by the user + * of the Resource Server when allocating a client or resource. + */ +struct ACCESS_CONTROL +{ + /** + * The privilege level of this access control + */ + RS_PRIV_LEVEL privilegeLevel; + + /** + * Opaque pointer for storing a security token + */ + PSECURITY_TOKEN pSecurityToken; +}; + +// +// Utility wrappers for locking validator +// +#if LOCK_VAL_ENABLED +#define RS_LOCK_VALIDATOR_INIT(lock, lockClass, inst) \ + do { NV_ASSERT_OK(lockvalLockInit((lock), (lockClass), (inst))); } while(0) + +#define RS_SPINLOCK_ACQUIRE(lock) do \ +{ \ + NV_ASSERT_OK(lockvalPreAcquire((validator))); \ + portSyncSpinlockAcquire((lock)) ; \ + lockvalPostAcquire((validator), LOCK_VAL_SPINLOCK); \ + +#define RS_RWLOCK_ACQUIRE_READ(lock, validator) do \ +{ \ + NV_ASSERT_OK(lockvalPreAcquire((validator))); \ + portSyncRwLockAcquireRead((lock)); \ + lockvalPostAcquire((validator), LOCK_VAL_RLOCK); \ +} while(0) + +#define RS_RWLOCK_ACQUIRE_WRITE(lock, validator) do \ +{ \ + NV_ASSERT_OK(lockvalPreAcquire((validator))); \ + portSyncRwLockAcquireWrite((lock)); \ + lockvalPostAcquire((validator), LOCK_VAL_WLOCK); \ +} while(0) + +#define RS_SPINLOCK_RELEASE_EXT(lock, validator, bOutOfOrder) do \ +{ \ + void *pLockValTlsEntry, *pReleasedLockNode; \ + if (bOutOfOrder) \ + NV_ASSERT_OK(lockvalReleaseOutOfOrder((validator), LOCK_VAL_SPINLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + else \ + NV_ASSERT_OK(lockvalRelease((validator), LOCK_VAL_SPINLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + portSyncSpinlockRelease((lock)); \ + lockvalMemoryRelease(pLockValTlsEntry, pReleasedLockNode); \ +} while(0) + +#define RS_RWLOCK_RELEASE_READ_EXT(lock, validator, bOutOfOrder) do \ +{ \ + void *pLockValTlsEntry, *pReleasedLockNode; \ + if (bOutOfOrder) \ + NV_ASSERT_OK(lockvalReleaseOutOfOrder((validator), LOCK_VAL_RLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + else \ + NV_ASSERT_OK(lockvalRelease((validator), LOCK_VAL_RLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + portSyncRwLockReleaseRead((lock)); \ + lockvalMemoryRelease(pLockValTlsEntry, pReleasedLockNode); \ +} while(0) + +#define RS_RWLOCK_RELEASE_WRITE_EXT(lock, validator, bOutOfOrder) do \ +{ \ + void *pLockValTlsEntry, *pReleasedLockNode; \ + if (bOutOfOrder) \ + NV_ASSERT_OK(lockvalReleaseOutOfOrder((validator), LOCK_VAL_WLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + else \ + NV_ASSERT_OK(lockvalRelease((validator), LOCK_VAL_WLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + portSyncRwLockReleaseWrite((lock)); \ + lockvalMemoryRelease(pLockValTlsEntry, pReleasedLockNode); \ +} while(0) + +#else +#define RS_LOCK_VALIDATOR_INIT(lock, lockClass, inst) +#define RS_SPINLOCK_ACQUIRE(lock, validator) do { portSyncSpinlockAcquire((lock)); } while(0) +#define RS_RWLOCK_ACQUIRE_READ(lock, validator) do { portSyncRwLockAcquireRead((lock)); } while(0) +#define RS_RWLOCK_ACQUIRE_WRITE(lock, validator) do { portSyncRwLockAcquireWrite((lock)); } while(0) +#define RS_SPINLOCK_RELEASE_EXT(lock, validator, bOutOfOrder) do { portSyncSpinlockRelease((lock)); } while(0) +#define RS_RWLOCK_RELEASE_READ_EXT(lock, validator, bOutOfOrder) do { portSyncRwLockReleaseRead((lock)); } while(0) +#define RS_RWLOCK_RELEASE_WRITE_EXT(lock, validator, bOutOfOrder) do { portSyncRwLockReleaseWrite((lock)); } while(0) +#endif + +#define RS_SPINLOCK_RELEASE(lock, validator) RS_SPINLOCK_RELEASE_EXT(lock, validator, NV_FALSE) +#define RS_RWLOCK_RELEASE_READ(lock, validator) RS_RWLOCK_RELEASE_READ_EXT(lock, validator, NV_FALSE) +#define RS_RWLOCK_RELEASE_WRITE(lock, validator) RS_RWLOCK_RELEASE_WRITE_EXT(lock, validator, NV_FALSE) + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/nvidia/inc/libraries/resserv/rs_access_map.h b/src/nvidia/inc/libraries/resserv/rs_access_map.h new file mode 100644 index 0000000..e75da74 --- /dev/null +++ b/src/nvidia/inc/libraries/resserv/rs_access_map.h @@ -0,0 +1,234 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef RS_ACCESS_MAP_H +#define RS_ACCESS_MAP_H + +#include "nvstatus.h" +#include "nvtypes.h" + +#include "containers/map.h" +#include "resserv/resserv.h" +#include "resserv/rs_access_rights.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * @brief Returns the client's access mask for the resource, returning NULL if + * the resource is not owned by the client + * + * @param[in] pResourceRef The resource whose access mask is being checked + * @param[in] pClient The client accessing the resource + * + * @return The resource's access rights mask, or + * @return NULL if pClient does not own pResourceRef + */ +RS_ACCESS_MASK *rsAccessLookup(RsResourceRef *pResourceRef, RsClient *pClient); + +/*! + * @brief Fills a mask with all rights available to a client on a resource + * Gets both rights directly available through the access mask, + * as well as rights shared by the resource. + * + * @param[in] pResourceRef + * @param[in] pClient + * @param[out] pRightsShared The set of access rights available for this client on this resource + * + * @return none + */ +void rsAccessGetAvailableRights(RsResourceRef *pResourceRef, RsClient *pClient, + RS_ACCESS_MASK *pAvailableRights); + +/*! + * @brief Perform an access rights check on a target resource + * + * This function should be used to determine whether sufficient access + * rights are already present, NOT whether access rights should be granted. + * It will not update any state on its own. + * + * For each of the required access rights, the invoking client must hold + * that access right on the target resource. + * + * @param[in] pResourceRef A reference to the target resource for which we are + * checking access rights + * @param[in] pInvokingClient The client that is requesting access rights + * @param[in] pRightsRequired The set of access rights that the invoking client + * should have on the target resource + * + * @return NV_OK if the invoking client has the required access rights on the + * target resource + * @return NV_ERR_INSUFFICIENT_PERMISSIONS if the invoking client does not have + * the required access rights on the target resource + */ +NV_STATUS rsAccessCheckRights(RsResourceRef *pResourceRef, RsClient *pInvokingClient, + const RS_ACCESS_MASK *pRightsRequired); + +/*! + * @brief Update what access rights are currently owned on a target resource + * based on the target resource's current privilege. + * + * This function should be used to update the access rights currently owned + * by the target resource. Most access rights are only obtained once and don't + * disappear/reappear. However, the RS_ACCESS_FLAG_UNCACHED_CHECK flag can be + * used to indicate access rights that are present/not present based on the target + * resource's current level of privilege, NOT what the level of privilege was when + * the access right was initially requested. This function is useful for updating the + * which access rights are owned when accounting for uncached access rights. + * + * @param[in] pResourceRef A reference to the target resource for which we are + * checking access rights + * @param[in] pInvokingClient The client to check level of access with + * @param[in] pRightsToUpdate If non-NULL, only access rights set in this mask + * will be updated + * + * @return none + */ +void rsAccessUpdateRights(RsResourceRef *pResourceRef, RsClient *pInvokingClient, + const RS_ACCESS_MASK *pRightsToUpdate); + +/*! + * @brief Searches a resource's share list for an entry equal to the + * passed in share policy, as defined by rsSharePolicyEquals + * + * @param[in] pShareList The RsShareList to be searched + * @param[in] pSharePolicy The policy to be found + * + * @return A pointer to the corresponding policy, or + * @return NULL if no matching entry is found + */ +RS_SHARE_POLICY *rsShareListLookup(RsShareList *pShareList, RS_SHARE_POLICY *pSharePolicy); + +/*! + * @brief Adds a new share policy to a resource's share list, or merges into + * an existing policy, if possible + * + * @param[in] pShareList The RsShareList to be searched + * @param[in] pSharePolicy The policy to be added to the list, may be merged with + * another policy with a matching pSharePolicy->type and ->target. + * In this case, ->accessMask for the existing entry and the + * new pSharePolicy will be merged with a union operation. + * @param[out] pAccessMask The rights now shared for this share policy, may or + * may not match pSharePolicy->accessMask if merged with an existing policy. + * User may pass NULL, in which case nothing is written into this. + * + * @return NV_OK if the operation succeeded, + * @return NV_ERR_NO_MEMORY if a new element needed to be added to the list, but + * insufficient memory was present to allocate one + */ +NV_STATUS rsShareListInsert(RsShareList *pShareList, RS_SHARE_POLICY *pSharePolicy, + RS_ACCESS_MASK *pAccessMask); + +/*! + * @brief Removes certain rights from being shared in a share policy entry + * from a resource's RsShareList. + * + * @param[in] pShareList The RsShareList to be searched + * @param[in] pSharePolicy The policy to be removed from the list, matched using + * pSharePolicy->type and ->target. Only rights specified in + * pSharePolicy->accessMask are revoked, others will remain. + * Use RS_ACCESS_MASK_FILL to for a full mask to revoke all rights. + * @param[out] pAccessMask The rights still shared for this share policy, may or + * may not be empty. + * User may pass NULL, in which case nothing is written into this. + * + * @return none + */ +void rsShareListRemove(RsShareList *pShareList, RS_SHARE_POLICY *pSharePolicy, + RS_ACCESS_MASK *pAccessMask); + +/*! + * @brief Copy one share list into another + * Note that this does not replace the Dst list if it is not empty, + * elements will be appended onto any existing list. + * + * @param[in] pShareListDst The list to copy into + * @param[in] pShareListSrc The list to copy from + * + * @return NV_ERR_NO_MEMORY, NV_OK + */ +NV_STATUS rsShareListCopy(RsShareList *pShareListDst, RsShareList *pShareListSrc); + +/*! + * @brief Returns the list which should be used for a resource's sharing + * In order, selects either the resource's own list, the client's inherited + * list, or the server's global default list. + * + * @param[in] pResourceRef + * @param[in] pServer + * + * @return A pointer to the relevant share list + * @return NULL if no list is available, and no pServer was provided. + */ +RsShareList * rsAccessGetActiveShareList(RsResourceRef *pResourceRef, RsServer *pServer); + +/*! + * @brief Attempts to grant a set of requested access rights on this resource. + * + * This function will attempt to grant the rights specified in pRightsRequested + * to the client referred to by pClient. If successful, it will update the + * access rights of the target resource referred to by pResourceRef. + * + * The resAccessCallback method on the target resource will be invoked to + * perform checks. This requires that the target resource + * pResourceRef->pResource already be initialized. + * + * If pRightsRequested is non-NULL, then the call will return an error code if + * it is unable to grant any of the requested rights. + * + * If pRightsRequested is NULL, then the call will ignore any failure to + * grant, taking a "best-effort" approach to granting access rights. The + * rights requested will be determined as follows: + * + * - If pResourceRef is a client resource, the function will attempt to + * request all possible access rights + * - For any other resource, the function will attempt to request the + * same set of access rights held by the invoking client on the parent + * resource + * + * @param[in] pResourceRef The target resource reference on which access + * rights will be granted + * @param[in] pCallContext Information about the call context + * @param[in] pInvokingClient The client requesting the access right + * @param[in] pRightsRequested The set of access rights to attempt to grant, + * or NULL if no access rights were explicitly requested + * @param[in] pRightsRequired Any rights additionally required for the operation, + * will be requested if pRightsRequested is not specified. + * If specified, all rights in this mask must be granted for the call to succeed. + * @param[in] pAllocParams per-class allocation parameters passed into Alloc, + * NULL if this is not being called from the Alloc path. + * + * @return NV_OK if the access right should be granted + * @return NV_ERR_INSUFFICIENT_PERMISSIONS if access rights were + * explicitly requested, and the function failed to grant all of the + * requested access rights + */ +NV_STATUS rsAccessGrantRights(RsResourceRef *pResourceRef, CALL_CONTEXT *pCallContext, + RsClient *pInvokingClient, const RS_ACCESS_MASK *pRightsRequested, + const RS_ACCESS_MASK *pRightsRequired, void *pAllocParams); + +#ifdef __cplusplus +} +#endif + +#endif /* RS_ACCESS_MAP_H */ diff --git a/src/nvidia/inc/libraries/resserv/rs_access_rights.h b/src/nvidia/inc/libraries/resserv/rs_access_rights.h new file mode 100644 index 0000000..9ff6397 --- /dev/null +++ b/src/nvidia/inc/libraries/resserv/rs_access_rights.h @@ -0,0 +1,167 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef RS_ACCESS_RIGHTS_H +#define RS_ACCESS_RIGHTS_H + +#include "nvstatus.h" +#include "nvtypes.h" +#include "nvmisc.h" + +// Part of this header in userspace, at sdk/nvidia/inc/rs_access.h +#include "rs_access.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +/****************************************************************************/ +/* Access right flags */ +/****************************************************************************/ + +// +// The meaning of each access right flag is documented in +// drivers/resman/docs/rmapi/resource_server/rm_capabilities.adoc +// + +#define RS_ACCESS_FLAG_NONE 0U +#define RS_ACCESS_FLAG_ALLOW_KERNEL_PRIVILEGED NVBIT(1) +#define RS_ACCESS_FLAG_ALLOW_PRIVILEGED NVBIT(2) +#define RS_ACCESS_FLAG_UNCACHED_CHECK NVBIT(3) +#define RS_ACCESS_FLAG_ALLOW_OWNER NVBIT(4) + + +/****************************************************************************/ +/* Access right metadata */ +/****************************************************************************/ + +/*! + * @brief Metadata about each access right + * + * The ith entry in this array represents access right i. + */ +extern const RS_ACCESS_INFO g_rsAccessMetadata[RS_ACCESS_COUNT]; + + +/****************************************************************************/ +/* Access right macros */ +/****************************************************************************/ + +/*! + * @brief Initializer for an access mask. Avoid use if possible. + * + * To initialize an access mask, if possible, first zero-initialize it then + * add specific access rights at runtime. Zero-initialization can be performed + * with the RS_ACCESS_MASK_EMPTY static initializer, the RS_ACCESS_MASK_CLEAR() macro, + * or a memset. + * + * Only use this when a static initializer is TRULY needed, and when the code is + * generated by a script, not hardcoded. For instance, this is useful when + * statically initializing control call table entries. + * + * The ith argument will directly initialize the ith limb. An access right A + * should be placed in the limb SDK_RS_ACCESS_LIMB_INDEX(A). Each limb should be a + * mask of flags, where each flag is in the form SDK_RS_ACCESS_OFFSET_MASK(A), or 0 + * to indicate no flags. + * + * For example, suppose we have access righs A, B, and C, where + * + * SDK_RS_ACCESS_LIMB_INDEX(A) == 0 + * SDK_RS_ACCESS_LIMB_INDEX(B) == 2 + * SDK_RS_ACCESS_LIMB_INDEX(C) == 2 + * + * In this case, the appropriate way to initialize a mask containing all + * three access rights is: + * + * RS_ACCESS_MASK mask = RS_ACCESS_MASK_INITIALIZER + * ( + * SDK_RS_ACCESS_OFFSET_MASK(A), + * 0, + * SDK_RS_ACCESS_OFFSET_MASK(B) | SDK_RS_ACCESS_OFFSET_MASK(C) + * ); + */ +#define RS_ACCESS_MASK_INITIALIZER(...) { { __VA_ARGS__ } } + +/*! + * @brief Empty initializer for an access mask. + * + * An example of usage is as follows: + * + * RS_ACCESS_MASK mask = RS_ACCESS_MASK_EMPTY; + */ +#define RS_ACCESS_MASK_EMPTY RS_ACCESS_MASK_INITIALIZER(0) + + +/****************************************************************************/ +/* Access right functions */ +/****************************************************************************/ + +/*! + * @brief Checks if one access rights mask is a subset of another + * + * @param[in] pRightsPresent The access rights that are held by some actor + * @param[in] pRightsRequired The access rights that must be a subset of + * the rights in pRightsPresent + * + * @return NV_TRUE if each of the access rights in pRightsPresent is also + * present in pRightsRequired + * @return NV_FALSE otherwise + */ +NvBool rsAccessMaskIsSubset(const RS_ACCESS_MASK *pRightsPresent, + const RS_ACCESS_MASK *pRightsRequired); + +/*! + * @brief Checks if an access right mask is empty + * + * @param[in] pAccessMask The mask to check for emptiness + * + * @return NV_TRUE if the mask contains no access rights + * @return NV_FALSE otherwise + */ +NvBool rsAccessMaskIsEmpty(const RS_ACCESS_MASK *pAccessMask); + + +/*! + * @brief Converts an array of access rights into a mask + * + * This function is useful for processing a statically-initialized array of + * access rights, since it is not always desirable to directly statically + * initialize an access mask. One example of this use is the definitions used + * in resource_list.h. + * + * @param[out] pAccessMask The newly initialized access mask + * @param[in] pRightsArray An array of access right values + * @param[in] length The number of elements in pRightsList + * + * @return Void + */ +void rsAccessMaskFromArray(RS_ACCESS_MASK *pAccessMask, + const RsAccessRight *pRightsArray, + NvLength length); + + +#ifdef __cplusplus +} +#endif + +#endif /* RS_ACCESS_RIGHTS_H */ diff --git a/src/nvidia/inc/libraries/resserv/rs_client.h b/src/nvidia/inc/libraries/resserv/rs_client.h new file mode 100644 index 0000000..2d90ef7 --- /dev/null +++ b/src/nvidia/inc/libraries/resserv/rs_client.h @@ -0,0 +1,557 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_rs_client_nvoc.h" + +#ifndef _RS_CLIENT_H_ +#define _RS_CLIENT_H_ + + +#include "resserv/resserv.h" +#include "nvport/nvport.h" +#include "resserv/rs_resource.h" +#include "containers/list.h" +#include "utils/nvrange.h" + +#define RS_UNIQUE_HANDLE_BASE (0xcaf00000) +#define RS_UNIQUE_HANDLE_RANGE (0x00080000) + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup RsClient + * @addtogroup RsClient + * @{*/ + +typedef enum { + CLIENT_TYPE_USER, + CLIENT_TYPE_KERNEL +} CLIENT_TYPE; + +typedef struct AccessBackRef +{ + NvHandle hClient; + NvHandle hResource; +} AccessBackRef; + +MAKE_LIST(AccessBackRefList, AccessBackRef); + +/** + * Information about a client + */ +NVOC_PREFIX(client) class RsClient : Object +{ +public: + /** + * The handle of this client + */ + NvHandle hClient; + + /** + * Kernel or user client + */ + CLIENT_TYPE type; + + /** + * Client is in a state where it can allocate new objects + */ + NvBool bActive; + + /** + * True if client tripped the resource count warning threshold + */ + NvBool bResourceWarning; + + /** + * True if client is disabled, awaiting free + */ + NvBool bDisabled; + + /** + * True if client's high priority resources were freed + */ + NvBool bHighPriorityFreeDone; + + /** + * Maps resource handle -> RsResourceRef + */ + RsRefMap resourceMap; + + /** + * Access right back reference list of pairs + * + * A map of all hResource's (with hClient to scope the handle) that have + * shared access rights with us. + */ + AccessBackRefList accessBackRefList; + + /** + * The first generated handle in the generated resource handle space + * + * It is an error for the handleRangeStart to be 0 because that is a + * reserved handle. + * + * The first generated handle is not necessarily the lowest possible handle + * because the handle generator may overflow. The lowest possible resource + * handle is 0x1. + * + * Generated handles will be of the form: handleRangeStart + [0, handleRangeSize) + */ + NvHandle handleRangeStart; + + /** + * The size of the generated resource handle space. + * + * It is an error for the handleRangeSize to be 0. + * + * Generated handles will be of the form: handleRangeStart + [0, handleRangeSize) + */ + NvHandle handleRangeSize; + + /** + * The handles in the restricted resource handle space. + */ + NV_RANGE handleRestrictRange; + + /** + * Index used to generate the next handle in the resource handle space + */ + NvHandle handleGenIdx; + + /** + * Ordered list of resources that are to be freed + */ + RsRefFreeList pendingFreeList; + + /** + * Information about recursive resource free calls is stored here + */ + RS_FREE_STACK *pFreeStack; + + /** + * Node for a client's disabled client list + */ + ListNode disabledClientNode; + + /** + * Construct a client instance + * @param[in] pClient This client + * @param[in] pAllocator NvPort memory allocation interface for client memory allocations + * @param[in] pParams The allocation params + */ + NV_STATUS clientConstruct(RsClient *pClient, PORT_MEM_ALLOCATOR *pAllocator, RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + + /** + * Destruct a client instance and free all allocated resources + * @param[in] pClient This client + */ + void clientDestruct(RsClient *pClient); + + /** + * Get a resource pointer from a resource reference. No resource locks are taken. + * @param[in] pClient This client + * @param[in] pResourceRef The reference to the resource + * @param[out] ppResource Pointer to the resource + */ + NV_STATUS clientGetResourceByRef(RsClient *pClient, RsResourceRef *pResourceRef, RsResource **ppResource); + + /** + * Get a resource pointer from a resource handle. No resource locks are taken. + * @param[in] pClient This client + * @param[in] hResource Resource handle + * @param[in] internalClassId Expected internal class ID of object. Must match. + * @param[out] ppResource Pointer to the resource + */ + NV_STATUS clientGetResource(RsClient *pClient, NvHandle hResource, NvU32 internalClassId, RsResource **ppResource); + + /** + * Get the reference to a resource + * @param[in] pClient This client + * @param[in] hResource The resource to lookup + * @param[out] ppResourceRef The reference to the resource + */ + NV_STATUS clientGetResourceRef(RsClient *pClient, NvHandle hResource, RsResourceRef **ppResourceRef); + + /** + * Get the reference to a resource, but only if the passed in access rights are + * possessed by the invoking client. + * + * @param[in] pClient This client + * @param[in] hResource The resource to lookup + * @param[in] pRightsRequired The rights required for success + * @param[out] ppResourceRef The reference to the resource + */ + NV_STATUS clientGetResourceRefWithAccess(RsClient *pClient, NvHandle hResource, const RS_ACCESS_MASK *pRightsRequired, RsResourceRef **ppResourceRef); + + /** + * Get the reference to a resource (with a type check) + * @param[in] pClient This client + * @param[in] hResource The resource to lookup + * @param[in] internalClassId The internal resource class id + * @param[out] ppResourceRef The reference to the resource + */ + NV_STATUS clientGetResourceRefByType(RsClient *pClient, NvHandle hResource, NvU32 internalClassId, RsResourceRef **ppResourceRef); + + /** + * Validate that current process is allowed to use this client + * @param[in] pClient This client + * @param[in] pSecInfo Security info of the current API call + */ + virtual NV_STATUS clientValidate(RsClient *pClient, const API_SECURITY_INFO * pSecInfo); + + /** + * Validate that current process has the required locks to use this client + * @param[in] pClient This client + * @param[in] pServer Resource Server instance + * @param[in] pClientEntry Client entry of the client + */ + virtual NV_STATUS clientValidateLocks(RsClient *pClient, RsServer *pServer, const CLIENT_ENTRY *pClientEntry); + + /** + * Stub virtual function + * @param[in] NvHandle hClient + */ + virtual RS_PRIV_LEVEL clientGetCachedPrivilege(RsClient *pClient); + + /** + * Stub virtual function + * @param[in] NvHandle hClient + * @param[in] RS_PRIV_LEVEL privLevel + */ + virtual NvBool clientIsAdmin(RsClient *pClient, RS_PRIV_LEVEL privLevel); + + /** + * Allocate a resource in RM for this client + * @param[in] pClient This client + * @param[in] pServer + * @param[inout] pParams Resource allocation parameters + */ + NV_STATUS clientAllocResource(RsClient *pClient, RsServer *pServer, RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + + /** + * Duplicate a resource reference into this client + * @param[in] pClient This client + * @param[in] pServer The resource server instance + * @param[inout] pParams Resource sharing parameters + */ + NV_STATUS clientCopyResource(RsClient *pClient, RsServer *pServer, RS_RES_DUP_PARAMS_INTERNAL *pParams); + + /** + * Free a resource for this client and updates resource reference book-keeping. + * If the resource has a non-zero reference count, only book-keeping will be updated. + * Resources should never be freed in control calls. + * + * @param[in] pClient This client + * @param[in] pServer + * @param[in] pParams Resource destruction parameters + */ + virtual NV_STATUS clientFreeResource(RsClient *pClient, RsServer *pServer, RS_RES_FREE_PARAMS_INTERNAL *pParams); + + /** + * Remove a resource reference to the client's resource hashmap + * @param[in] pClient This client + * @param[in] pResourceRef The reference to free + */ + virtual NV_STATUS clientDestructResourceRef(RsClient *pClient, RsServer *pServer, RsResourceRef *pResourceRef, + RS_LOCK_INFO *pLockInfo, API_SECURITY_INFO *pSecInfo); + + /** + * Unmap a mapping that belongs to a resource reference in this client. + * @param[in] pClient This client + * @param[in] pResourceRef The reference that owns the mapping + * @param[inout] ppCpuMapping The mapping to unmap + */ + virtual NV_STATUS clientUnmapMemory(RsClient *pClient, RsResourceRef *pResourceRef, + RS_LOCK_INFO *pLockInfo, RsCpuMapping **ppCpuMapping, + API_SECURITY_INFO *pSecInfo); + /** + * Create an inter-mapping between two resources owned by this client + * Resserv only implements a stub, users should override this to fill their own MapTo params struct + * + * @param[in] pClient This client + * @param[in] pMapperRef The resource that can be used to create the mapping + * @param[in] pMappableRef The resource that can be mapped + * @param[in] pParams parameters describing the unmapping + */ + virtual NV_STATUS clientInterMap(RsClient *pClient, RsResourceRef *pMapperRef, RsResourceRef *pMappableRef, RS_INTER_MAP_PARAMS *pParams); + + /** + * Unmap an inter-mapping between two resources owned by this client + * Resserv only implements a stub, users should override this to fill their own UnmapFrom params struct + * + * @param[in] pClient This client + * @param[in] pMapperRef The reference that was was used to create the mapping + * @param[in] pParams parameters describing the unmapping + */ + virtual NV_STATUS clientInterUnmap(RsClient *pClient, RsResourceRef *pMapperRef, RS_INTER_UNMAP_PARAMS *pParams); + + /** + * Generate an unused handle for a resource. The handle will be generated in the white-listed range that was + * specified when the client was allocated. + * + * The handle generator will wrap-around when the number of handles generated is greater than handleRangeSize, and + * the generator will start at handle 0x1 if it overflows (0x0 is a reserved handle). + * + * The handle generator can generate up to 2^32-2 unique handles if handleRangeStart + handleRangeSize overflows + * (because 0x0 is a reserved handle). Otherwise, the handle generator can generate up to 2^32-1 unique handles. + * + * @param[in] pClient This client + * @param[out] pHandle The generated handle + * + */ + NV_STATUS clientGenResourceHandle(RsClient *pClient, NvHandle *pHandle); + + /** + * Validate that a given resource handle is well-formed and does not already + * exist under a given client. + * + * @param[in] pClient + * @param[in] hResource + * @param[in] bRestrict If true, fail validation for handles in the client's restricted range + */ + virtual NV_STATUS clientValidateNewResourceHandle(RsClient *pClient, NvHandle hResource, NvBool bRestrict); + + /** + * Wrapper that generates a resource handle if a handle of 0 is provided, or otherwise + * validates a handle that was provided. + * + * @param[in] pClient + * @param[inout] phResource + */ + NV_STATUS clientAssignResourceHandle(RsClient *pClient, NvHandle *phResource); + + /** + * Recursively generate a client's list of resources to free + * @param[in] pClient + * @param[in] pTarget The resource ref currently being processed + * @param[in] pReference The resource ref that this function was initially called on + * @param[in] bMove If NV_TRUE: Add/move the target to the front of the list + * If NV_FALSE: Add the target to the front of the list if it isn't already in the list + */ + NV_STATUS clientUpdatePendingFreeList(RsClient *pClient, RsResourceRef *pTarget, + RsResourceRef *pReference, NvBool bMove); + + /** + * Allow derived client classes to modify the generated list of resources to free + * before they are freed. + * @param[in] pClient + * @param[out] ppFirstLowPriRef A pointer to the first reference that is low priority + */ + virtual NV_STATUS clientPostProcessPendingFreeList(RsClient *pClient, RsResourceRef **ppFirstLowPriRef); + + /** + * Add a back reference to a client/resource pair that shared access with our client + * so we can remove that access entry on client destruction. + * @param[in] pClient This client + * @param[in] pResourceRef Resource reference that decided to share access with us + */ + NV_STATUS clientAddAccessBackRef(RsClient* pClient, RsResourceRef* pResourceRef); + + /** + * Remove all access map entries for all back references we stored so other clients + * reusing the same client handle won't get unauthorized access. Intended to be called + * during client destruction. + * @param[in] pClient This client + * @param[in] pServer Resource Server instance + */ + void clientFreeAccessBackRefs(RsClient *pClient, RsServer *pServer); + + /* + * Set the start handle and range for this client's handle generator. + * + * @note Supplying a range and size of 0 will set the generator to the default start handle and range + * @note The handle generator can only be set before any handle has been generated + * + * @param[in] pClient + * @param[in] handleRangeStart + * @param[in] handleRangeSize + */ + NV_STATUS clientSetHandleGenerator(RsClient *pClient, NvHandle handleRangeStart, NvHandle handleRangeSize); + + /** + * Verify whether a client is able to share a resource under a certain share policy + * + * @param[in] pClient Client attempting to share the resource + * @param[in] pReousrceRef The resource being shared + * @param[in] pSharePolicy The policy under which the resource is to be shared + * @param[in] pCallContext The context of the call intending to perform the share + */ + NV_STATUS clientCanShareResource(RsClient *pClient, RsResourceRef *pResourceRef, + RS_SHARE_POLICY *pSharePolicy, CALL_CONTEXT *pCallContext); + + /** + * Share access to a resource with other clients under the specified share policy. + * + * @param[in] pClient This client + * @param[in] pResourceRef Resource reference which is sharing access + * @param[in] pSharePolicy The policy under which the resource is sharing access + */ + virtual NV_STATUS clientShareResource(RsClient *pClient, RsResourceRef *pResourceRef, + RS_SHARE_POLICY *pSharePolicy, + CALL_CONTEXT *pCallContext); + + /** + * Share access to a resource with other clients under the specified share policy. + * + * @param[in] pClient This client + * @param[in] pResourceRef Resource reference which is sharing access + * @param[in] pSharePolicy The policy under which the resource is sharing access + */ + NV_STATUS clientShareResourceTargetClient(RsClient *pClient, RsResourceRef *pResourceRef, + RS_SHARE_POLICY *pSharePolicy, CALL_CONTEXT *pCallContext); + /* + * Set the start handle and range for this client's restricted handle + * range. This range of handles cannot be explicitly requested. Any + * restricted handles that are in the client's resource handle generator + * range can still be generated. + * + * @note Supplying a range and size of 0 will disable the restricted range + * @note The handle generator can only be set before any handle has been generated + * + * @param[in] pClient + * @param[in] handleRangeStart + * @param[in] handleRangeSize + */ + NV_STATUS clientSetRestrictedRange(RsClient *pClient, NvHandle handleRangeStart, NvU32 handleRangeSize); +}; +MAKE_INTRUSIVE_LIST(RsDisabledClientList, RsClient, disabledClientNode); + +/** + * Get an iterator to the elements in the client's resource map + * @param[in] pClient + * @param[in] pScopeRef Restrict the iteration based on this reference [optional] + * @param[in] internalClassId Only iterate over resources with this class id [optional] + * @param[in] type RS_ITERATE_CHILDREN, RS_ITERATE_DESCENDANTS, RS_ITERATE_CACHED, RS_ITERATE_DEPENDANTS + * @param[in] bExactMatch If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + * + * @note If type=RS_ITERATE_CHILDREN, pScopeRef will restrict iteration to children of the scope ref + * @note If type=RS_ITERATE_DESCENDANTS, pScopeRef will restrict iteration to descendants of the scope ref + * @note If type=RS_ITERATE_CACHED, pScopeRef will restrict iteration to references cached by the scope ref + */ +RS_ITERATOR clientRefIter(RsClient *pClient, RsResourceRef *pScopeRef, NvU32 internalClassId, RS_ITER_TYPE type, NvBool bExactMatch); + +/** + * Get the next iterator to the elements in the client's resource map + * @param[in] pClient + * @param[inout] pIt The iterator + */ +NvBool clientRefIterNext(RsClient *pClient, RS_ITERATOR *pIt); + +/** + * Get an iterator to the elements in the client's resource map. + * + * This iterator will visit all descendants in pre-order according to the parent-child + * resource hierarchy. + * + * @param[in] pClient + * @param[in] pScopeRef Restrict the iteration based on this reference [optional] + * @param[in] internalClassId Only iterate over resources with this class id [optional] + * @param[in] bExactMatch If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + */ +RS_ORDERED_ITERATOR clientRefOrderedIter(RsClient *pClient, RsResourceRef *pScopeRef, NvU32 internalClassId, NvBool bExactMatch); + +/** + * Get the next ordered iterator to the elements in the client's resource map + * @param[in] pClient + * @param[inout] pIt The iterator + */ +NvBool clientRefOrderedIterNext(RsClient *pClient, RS_ORDERED_ITERATOR *pIt); + +/** + * Release all CPU address mappings for a resource + * + * @param[in] pClient Client that owns the resource + * @param[in] pCallContext Caller information (which includes the resource reference whose mappings will be freed) + * @param[in] pLockInfo Information about which locks are already held, for recursive calls + */ +NV_STATUS clientUnmapResourceRefMappings(RsClient *pClient, CALL_CONTEXT *pCallContext, RS_LOCK_INFO *pLockInfo); + +/** + * RsResource interface to a RsClient + * + * This allows clients to be interfaced with as-if they were resources (e.g., + * to perform a control call on a client). + * + * An RsClientResource is automatically allocated under a client as a top-level + * object when that client is allocated and cannot be explicitly freed. Only + * one RsClientResource is permitted per-client. + * + * Any resource allocated under a client will be a descendant of the client + * proxy resource. + * + */ +NVOC_PREFIX(clientres) class RsClientResource : RsResource +{ +public: + NV_STATUS clientresConstruct(RsClientResource* pClientRes, CALL_CONTEXT *pCallContext, RS_RES_ALLOC_PARAMS_INTERNAL *pParams) + : RsResource(pCallContext, pParams); + void clientresDestruct(RsClientResource* pClientRes); + +// private: + RsClient* pClient; +}; + +/** + * Client destruction parameters + */ +struct RS_CLIENT_FREE_PARAMS_INTERNAL +{ + NvHandle hDomain; ///< [in] The parent domain + NvHandle hClient; ///< [in] The client handle + NvBool bHiPriOnly; ///< [in] Only free high priority resources + NvBool bDisableOnly; ///< [in] Only disable the listed clients, do not free them yet + NvU32 state; ///< [in] User-defined state + + RS_RES_FREE_PARAMS_INTERNAL *pResFreeParams; ///< [in] Necessary for locking state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info +}; + +/** + * Return an iterator to a resource reference multi-map + * @param[in] pIndex The multi-map to iterate + * @param[in] index Return only the references belonging to this index + */ +RsIndexIter indexRefIter(RsIndex *pIndex, NvU32 index); + +/** + * Return an iterator to all resource references in a multi-map + * @param[in] pIndex The multi-map to iterate + */ +RsIndexIter indexRefIterAll(RsIndex *pIndex); + +/** + * Get the next iterator in a resource reference multi-map + * @param[in] pIt Iterator + */ +NvBool indexRefIterNext(RsIndexIter *pIt); + +/* @} */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/nvidia/inc/libraries/resserv/rs_domain.h b/src/nvidia/inc/libraries/resserv/rs_domain.h new file mode 100644 index 0000000..856568c --- /dev/null +++ b/src/nvidia/inc/libraries/resserv/rs_domain.h @@ -0,0 +1,80 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _RS_DOMAIN_H_ +#define _RS_DOMAIN_H_ + +#include "resserv/resserv.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup RsDomain + * @addtogroup RsDomain + * @{*/ + +/** + * @brief Domains are being re-worked + */ +struct RsDomain +{ + NvU32 dummy; +}; + +/** + * Construct a domain instance + * @param[in] pDomain This domain + * @param[in] pAllocator + * @param[in] hDomain The handle for this domain + * @param[in] hParentDomain The handle for the parent domain + * @param[in] pAccessControl The privileges of the domain + */ +NV_STATUS +domainConstruct +( + RsDomain *pDomain, + PORT_MEM_ALLOCATOR *pAllocator, + NvHandle hDomain, + NvHandle hParentDomain, + ACCESS_CONTROL *pAccessControl +); + +/** + * Destruct a domain instance + * @param[in] pDomain The domain to destruct + */ +NV_STATUS +domainDestruct +( + RsDomain *pDomain +); + +/* @} */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/nvidia/inc/libraries/resserv/rs_resource.h b/src/nvidia/inc/libraries/resserv/rs_resource.h new file mode 100644 index 0000000..05e9c52 --- /dev/null +++ b/src/nvidia/inc/libraries/resserv/rs_resource.h @@ -0,0 +1,871 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_rs_resource_nvoc.h" + +#ifndef _RS_RESOURCE_H_ +#define _RS_RESOURCE_H_ + +#include "nvport/nvport.h" +#include "resserv/resserv.h" +#include "nvoc/object.h" +#include "resserv/rs_access_map.h" + +#ifdef __cplusplus +extern "C" { +#endif + +class RsSession; + +/** + * @defgroup RsResource + * @addtogroup RsResource + * @{*/ + +#define ALLOC_STATE_INTERNAL_CLIENT_HANDLE NVBIT(5) + +/* + * Locking operations for lock-metering + */ +#define RS_LOCK_TRACE_INVALID 1 +#define RS_LOCK_TRACE_ACQUIRE 1 +#define RS_LOCK_TRACE_RELEASE 2 +#define RS_LOCK_TRACE_ALLOC 3 +#define RS_LOCK_TRACE_FREE 4 +#define RS_LOCK_TRACE_CTRL 5 +#define RS_LOCK_TRACE_MAP 6 +#define RS_LOCK_TRACE_UNMAP 7 + +/** + * Context information for top-level, resource-level, and client-level locking + * operations + */ +struct RS_LOCK_INFO +{ + RsClient *pClient; ///< Pointer to client that was locked (if any) + RsClient *pSecondClient; ///< Pointer to second client, for dual-client locking + RsResourceRef *pContextRef; ///< User-defined reference + RsResourceRef *pResRefToBackRef; ///< Resource from which to infer indirect GPU dependencies + RsSession *pSession; ///< Session object to be locked, if any + NvU32 flags; ///< RS_LOCK_FLAGS_* + NvU32 state; ///< RS_LOCK_STATE_* + NvU32 gpuMask; + NvU8 traceOp; ///< RS_LOCK_TRACE_* operation for lock-metering + NvU32 traceClassId; ///< Class of initial resource that was locked for lock metering +}; + +struct RS_RES_ALLOC_PARAMS_INTERNAL +{ + NvHandle hClient; ///< [in] The handle of the resource's client + NvHandle hParent; ///< [in] The handle of the resource's parent. This may be a client or another resource. + NvHandle hResource; ///< [inout] Server will assign a handle if this is 0, or else try the value provided + NvU32 externalClassId; ///< [in] External class ID of resource + NvHandle hDomain; ///< UNUSED + + // Internal use only + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + RsClient *pClient; ///< [out] Cached client + RsResourceRef *pResourceRef; ///< [out] Cached resource reference + NvU32 allocFlags; ///< [in] Allocation flags + NvU32 allocState; ///< [inout] Allocation state + API_SECURITY_INFO *pSecInfo; + + void *pAllocParams; ///< [in] Copied-in allocation parameters + NvU32 paramsSize; ///< [in] Copied-in allocation parameters size + + // ... Dupe alloc + RsClient *pSrcClient; ///< The client that is sharing the resource + RsResourceRef *pSrcRef; ///< Reference to the resource that will be shared + + RS_ACCESS_MASK *pRightsRequested; ///< [in] Access rights requested on the new resource + // Buffer for storing contents of user mask. Do not use directly, use pRightsRequested instead. + RS_ACCESS_MASK rightsRequestedCopy; + + RS_ACCESS_MASK *pRightsRequired; ///< [in] Access rights required to alloc this object type +}; + +struct RS_RES_DUP_PARAMS_INTERNAL +{ + NvHandle hClientSrc; ///< [in] The handle of the source resource's client + NvHandle hResourceSrc; ///< [in] The handle of the source resource. + NvHandle hClientDst; ///< [in] The handle of the destination resource's client (may be different from source client) + NvHandle hParentDst; ///< [in] The handle of the destination resource's parent. + NvHandle hResourceDst; ///< [inout] The handle of the destination resource. Generated if 0. + void *pShareParams; ///< [in] Copied-in sharing parameters + NvU32 flags; ///< [in] Flags to denote special cases ( Bug: 2859347 to track removal) + // Internal use only + RsClient *pSrcClient; + RsClient *pDstClient; + RsResourceRef *pSrcRef; + RsResourceRef *pDstParentRef; + API_SECURITY_INFO *pSecInfo; ///< [in] Security info + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state +}; + +struct RS_RES_SHARE_PARAMS_INTERNAL +{ + NvHandle hClient; ///< [in] The handle of the owner's client + NvHandle hResource; ///< [in] The handle of the resource. + RS_SHARE_POLICY *pSharePolicy; ///< [in] The policy to share with + + // Internal use only + API_SECURITY_INFO *pSecInfo; ///< [in] Security info + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state +}; + +#define RS_IS_COPY_CTOR(pParams) ((pParams)->pSrcRef != NULL) + +struct RS_RES_FREE_PARAMS_INTERNAL +{ + NvHandle hClient; ///< [in] The handle of the resource's client + NvHandle hResource; ///< [in] The handle of the resource + NvBool bInvalidateOnly; ///< [in] Free the resource, but don't release its handle + NvHandle hDomain; ///< UNUSED + + // Internal use only + NvBool bHiPriOnly; ///< [in] Only free if this is a high priority resources + NvBool bDisableOnly; ///< [in] Disable the target instead of freeing it (only applies to clients) + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + NvU32 freeFlags; ///< [in] Flags for the free operation + NvU32 freeState; ///< [inout] Free state + RsResourceRef *pResourceRef; ///< [inout] Cached RsResourceRef + NV_STATUS status; ///< [out] Status of free operation + API_SECURITY_INFO *pSecInfo; ///< [in] Security info +}; + +struct NVOC_EXPORTED_METHOD_DEF; +class OBJGPU; +class OBJGPUGRP; + +// +// RS_RES_CONTROL_PARAMS +// +// This structure encapsulates data sent to the cmd-specific rmctrl +// handlers. Along with the arguments supplied by the requesting +// client (hClient, hObject, cmd, pParams, paramSize). +// +struct RS_RES_CONTROL_PARAMS_INTERNAL +{ + NvHandle hClient; // client-specified NV01_ROOT object handle + NvHandle hObject; // client-specified object handle + NvU32 cmd; // client-specified command # + NvU32 flags; // flags related to control call execution + void *pParams; // client-specified params (in kernel space) + NvU32 paramsSize; // client-specified size of pParams in bytes + + NvHandle hParent; // handle of hObject parent + OBJGPU *pGpu; // ptr to OBJGPU struct if applicable + OBJGPUGRP *pGpuGrp; // ptr to OBJGPUGRP struct if applicable + RsResourceRef *pResourceRef; // ptr to RsResourceRef if object is managed by + // Resource Server + API_SECURITY_INFO secInfo; // information on privilege level and pointer location (user/kernel) + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + RS_CONTROL_COOKIE *pCookie; + NvBool bInternal; // True if control call was not issued from an external client + NvBool bDeferredApi; // Indicates ctrl is being dispatched via deferred API + + struct RS_RES_CONTROL_PARAMS_INTERNAL *pLegacyParams; // RS-TODO removeme +}; + +struct RS_RES_DTOR_PARAMS +{ + CALL_CONTEXT *pFreeContext; + RS_RES_FREE_PARAMS_INTERNAL *pFreeParams; +}; + +/** + * Base class for all resources. Mostly a pure virtual interface which + * should be overridden to implement resource specific behavior. + */ +NVOC_PREFIX(res) class RsResource : Object +{ +public: +// private: + + /** + * Back-reference to the RsResourceRef that owns this object + */ + RsResourceRef *pResourceRef; + + /** + * Params for dtor + */ + RS_RES_DTOR_PARAMS dtorParams; + + /** + * Flag that indicates whether the RsResource was constructed. If params to + * resConstruct are null the Resource ctor and dtor will be skipped. This is + * only added for migration where the entire class hierarchy can't be + * converted at once. + * + * RS-TODO: Remove once migrations are finished (added initially for + * DmaObject) + */ + NvBool bConstructed; + +public: + + /** + * Resource initializer + * @param[in] pResource Resource object to init + * @param[in] pCallContext + * @param[in] params Resource create parameters + */ + NV_STATUS resConstruct(RsResource *pResource, CALL_CONTEXT *pCallContext, RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + + /** + * Returns TRUE if the resource can be copied + */ + virtual NvBool resCanCopy(RsResource *pResource); + + /** + * Returns TRUE if the resources are duplicates + */ + virtual NV_STATUS resIsDuplicate(RsResource *pResource, NvHandle hMemory, NvBool *pDuplicate); + + /** + * Resource destructor + * @param[in] pResource Resource object to destruct + */ + void resDestruct(RsResource *pResource); + + /** + * Resource destructor prologue (occurs before mappings are torn-down) + * @param[in] pResource Resource object to destruct + */ + virtual void resPreDestruct(RsResource *pResource); + + /** + * Resource dtors take no parameters, so set them here. + * @param[in] pResource + * @param[in] pCallContext + * @param[in] params Resource destroy parameters + */ + NV_STATUS resSetFreeParams(RsResource *pResource, CALL_CONTEXT *pCallContext, RS_RES_FREE_PARAMS_INTERNAL *pParams); + + /** + * Resource dtors take no parameters, so get them here. + * @param[in] pResource + * @param[out] ppCallContext + * @param[out] ppParams Resource destroy parameters + */ + NV_STATUS resGetFreeParams(RsResource *pResource, CALL_CONTEXT **ppCallContext, RS_RES_FREE_PARAMS_INTERNAL **ppParams); + + /** + * Lookup a control call entry from a NVOC export table + * + * @param[in] pResource + * @param[in] pParams + * @param[out] ppEntry + */ + NV_STATUS resControlLookup(RsResource *pResource, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + const struct NVOC_EXPORTED_METHOD_DEF **ppEntry); + + /** + * Dispatch resource control call + * @param[in] pResource + * @param[in] pCallContext + * @param[in] pParams + */ + virtual NV_STATUS resControl(RsResource *pResource, CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + + /** + * Early filter for control calls we don't want to service on a particular platform + * + * @param[in] pResource + * @param[in] pCallContext + * @param[in] pParams + */ + virtual NV_STATUS resControlFilter(RsResource *pResource, CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + + /** + * Serialize the control parameters if they are going to GSP/Host, not serialized, and support serialization + * Or + * Deserialize the control parameters if necessary and replace the inner params pointer with the deserialized params + * + * @param[in] pResource + * @param[in] pCallContext + * @param[in] pParams + */ + virtual NV_STATUS resControlSerialization_Prologue(RsResource *pResource, CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + + /** + * Deserialize the parameters returned from GSP if client did not pass serialized params + * Or + * Serialize the control parameters if client expects it and restore the original inner params pointer + * + * @param[in] pResource + * @param[in] pCallContext + * @param[in] pParams + */ + virtual void resControlSerialization_Epilogue(RsResource *pResource, CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + + /** + * Operations performed right before the control call is executed. Default stubbed. + * + * @param[in] pResource + * @param[in] pCallContext + * @param[in] pParams + */ + virtual NV_STATUS resControl_Prologue(RsResource *pResource, CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + + /** + * Operations performed right after the control call is executed. No return value. (void) + * + * @param[in] pResource + * @param[in] pCallContext + * @param[in] pParams + */ + virtual void resControl_Epilogue(RsResource *pResource, CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + + /** + * Creates a mapping of the underlying resource in the physical address space of the requested process. + * + * The difference between serverResMap and resMap is that resMap provides a locked physical address + * and serverResMap creates a virtual mapping to the physical address. For virtualization, the + * tandem resource servers should be able to map a host physical address in a guest user space + * VA without any resource-specific VA mapping code. + * + * Not all resources support mapping. + * + * @param[in] pResource Resource to map + * @param[in] pCallContext + * @param[inout] pCpuMapping + */ + virtual NV_STATUS resMap(RsResource *pResource, + CALL_CONTEXT *pCallContext, + RS_CPU_MAP_PARAMS *pParams, + RsCpuMapping *pCpuMapping); + + /** + * Release a virtual address mapping + * @param[in] pResource Resource to map + * @param[in] pCallContext + * @param[in] pCpuMapping + */ + virtual NV_STATUS resUnmap(RsResource *pResource, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); + + /** + * Returns true if partial unmap is supported by the resource + * If true, resUnmapFrom() can be called to unmap a mapping partially + */ + virtual NvBool resIsPartialUnmapSupported(RsResource *pResource) { return NV_FALSE; } + + /** + * Maps to this resource from another resource + * Not all resources can be mapped to, in such a case returns NV_ERR_INVALID_OBJECT_HANDLE + * + * @param[in] pResource + * @param[inout] pParams + */ + virtual NV_STATUS resMapTo(RsResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); + + /** + * Unmaps a resource mapped to this resource + * Not all resources can be unmapped, in such a case returns NV_ERR_INVALID_OBJECT_HANDLE + * + * @param[in] pResource + * @param[in] pParams + */ + virtual NV_STATUS resUnmapFrom(RsResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); + + /** + * Gets a refcount for any underlying shared resource + * @returns refcount + */ + virtual NvU32 resGetRefCount(RsResource *pResource); + + /** + * Decides whether the invoking client should be granted an access right on this resource. + * + * The purpose of providing this function is to provide subclassed resources the ability + * to set custom policies for granting access rights. These policies can be implemented + * based on the ambient privilege of the caller, such as the PID. + * + * @param[in] pResource The resource for which the access right will be granted + * @param[in] pInvokingClient The client requesting the access right + * @param[in] pAllocParams The alloc params struct passed into the alloc call, + * NULL if called from outside the Alloc path + * @param[in] accessRight The access right to be granted + * @returns NV_TRUE if the access right should be granted, and NV_FALSE otherwise + */ + virtual NvBool resAccessCallback(RsResource *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); + + /** + * Decides whether rights can be shared with a client under a certain policy. + * + * The purpose of this function is to provide subclasses the ability to set custom definitions + * for how certain policies will share. Certain share types can then be created to work based + * on components not stored directly in resserv, such as PID. + * + * @param[in] pResource The resource being shared + * @param[in] pInvokingClient The client being shared with + * @param[in] pParentRef dstParent if calling from DupObject, NULL otherwise + * @param[in] pSharePolicy The policy under which to share + * @returns NV_TRUE if the share policy applies and rights should be shared, NV_FALSE otherwise + */ + virtual NvBool resShareCallback(RsResource *pResource, RsClient *pInvokingClient, RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); + + /** + * Adds dependants that aren't in childRefMap or depRefMap to the pending free list. + * + * Due to RAM constraints, some classes can add more dependants that aren't + * represented in childRefMap or depRefMap. They can override this function + * to put them in the pending free list while we are updating it. + * + * @param[in] pClient + * @param[in] pResource The RsResource with potential additional dependants + * @param[in] pReference The pReference to pass in to + * clientUpdatePendingFreeList() + */ + virtual void resAddAdditionalDependants(RsClient *pClient, RsResource *pResource, RsResourceRef *pReference); +}; + +/* @} */ + +class OBJGPU; + +/** + * @defgroup RsCpuMapping + * @addtogroup RsCpuMapping + * @{*/ +struct RsCpuMapping +{ + NvU64 offset; + NvU64 length; + NvU32 flags; + NvP64 pLinearAddress; + RsResourceRef *pContextRef; ///< Context resource that may be needed for the mapping + RsResourceRef *pResourceRef; ///< Resource that is actually getting mapped + ListNode backRefNode; ///< Node to context backreference + void *pContext; ///< Additional context data for the mapping + NvU32 processId; + + RS_CPU_MAPPING_PRIVATE *pPrivate; ///< Opaque struct allocated and freed by resserv on behalf of the user +}; +MAKE_LIST(RsCpuMappingList, RsCpuMapping); + +/** + * CPU mapping parameters + */ +struct RS_CPU_MAP_PARAMS +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvU64 offset; ///< [in] Offset into the resource + NvU64 length; ///< [in] Size of the region to map + NvP64 *ppCpuVirtAddr; + NvU32 flags; ///< [in] Resource-specific flags + + // Passed from RM into CpuMapping + NvU32 protect; ///< [in] Protection flags + NvBool bKernel; + + /// [in] hContext Handle of resource that provides a context for the mapping (e.g., subdevice for channel map) + NvHandle hContext; + + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info +}; + +/** + * CPU unmapping params for resource server tests + */ +struct RS_CPU_UNMAP_PARAMS +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvP64 pLinearAddress; ///< [in] Address of mapped memory + NvU32 flags; ///< [in] Resource-specific flags + NvU32 processId; + NvBool bTeardown; ///< [in] Unmap operation is due to client teardown + + /// [in] hContext Handle of resource that provides a context for the mapping (e.g., subdevice for channel map) + NvHandle hContext; + + // RM-only + void *pProcessHandle; + + NvBool (*fnFilter)(RsCpuMapping*); ///< [in] Mapping-filter function + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info +}; + +/** + * CPU mapping back-reference + */ +MAKE_INTRUSIVE_LIST(RsCpuMappingBackRefList, RsCpuMapping, backRefNode); +/* @} */ + +/** + * @defgroup RsInterMapping + * @addtogroup RsInterMapping + * @{*/ +struct RS_INTER_MAP_PARAMS +{ + NvHandle hClient; + NvHandle hMapper; + NvHandle hMappable; + NvHandle hDevice; + NvU64 offset; + NvU64 length; + NvU32 flags; + NvU32 flags2; + NvU32 kindOverride; + NvU64 dmaOffset; ///< [inout] RS-TODO rename this + void *pMemDesc; ///< [out] + + // Internal use only + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info + + RS_INTER_MAP_PRIVATE *pPrivate; ///< Opaque struct controlled by caller +}; + +struct RS_INTER_UNMAP_PARAMS +{ + NvHandle hClient; + NvHandle hMapper; + NvHandle hDevice; + NvU32 flags; + NvU64 dmaOffset; ///< [in] RS-TODO rename this + NvU64 size; + + // Internal use only + NvHandle hMappable; + void *pMemDesc; ///< MEMORY_DESCRIPTOR * + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info + + RS_INTER_UNMAP_PRIVATE *pPrivate; ///< Opaque struct controlled by caller +}; + +/** + * Inter-mapping information + * Used to keep track of inter-mappings and unmap them on free + */ +struct RsInterMapping +{ + RsResourceRef *pMapperRef; ///< The resource that created and owns this mapping (this resource) + RsResourceRef *pMappableRef; ///< The resource being mapped by the mapper (e.g. hMemory) + RsResourceRef *pContextRef; ///< A resource used to provide additional context for the mapping (e.g. hDevice) + ListNode mappableNode; + ListNode contextNode; + NvU32 flags; ///< Flags passed when mapping, same flags also passed when unmapping + NvU32 flags2; ///< Additional flags for the mapping + NvU64 dmaOffset; + NvU64 size; + void *pMemDesc; +}; +MAKE_LIST(RsInterMappingList, RsInterMapping); + +/** + * Inter-mapping back-reference + */ +MAKE_INTRUSIVE_LIST(RsInterMappingBackRefMappableList, RsInterMapping, mappableNode); +MAKE_INTRUSIVE_LIST(RsInterMappingBackRefContextList, RsInterMapping, contextNode); +/* @} */ + +typedef struct RS_RESOURCE_DESC RS_RESOURCE_DESC; +RS_RESOURCE_DESC *RsResInfoByExternalClassId(NvU32); +NvU32 RsResInfoGetInternalClassId(const RS_RESOURCE_DESC *); + +/** + * A reference to a resource that has been allocated in RM. + */ +struct RsResourceRef +{ + RsClient *pClient; ///< Pointer to the client that owns the ref + RsResource *pResource; ///< Pointer to the actual resource + NvHandle hResource; ///< Resource handle + struct RsResourceRef *pParentRef; ///< Parent resource reference + RsIndex childRefMap; ///< Child reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + + /** + * Cached reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + * + * The resource reference cache is a one-way association between this resource reference and + * any other resource reference. Resource server does not populate the cache so it is up to the + * resource implementation to manage it. clientRefIter can be used to iterate this cache. + */ + RsIndex cachedRefMap; + + /** + * Dependants reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + * + * A map of all resources that strongly depend on this resource. + */ + RsIndex depRefMap; + + /** + * Dependants back-reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + * + * AKA dependencies map + * + * A map of all resources that this resource strongly depends on. + */ + RsIndex depBackRefMap; + + /** + * Policy under which this resource can be shared with other clients + */ + RsShareList sharePolicyList; + NvBool bSharePolicyListModified; + + /** + * A mask of the access rights that the owner client has on this object. + */ + RS_ACCESS_MASK accessMask; + + const RS_RESOURCE_DESC *pResourceDesc; ///< Cached pointer to the resource descriptor + NvU32 internalClassId; ///< Internal resource class id + NvU32 externalClassId; ///< External resource class id + NvU32 depth; ///< The depth of this reference in the resource graph + NvBool bInvalidated; ///< Reference has been freed but not removed yet + + RsCpuMappingList cpuMappings; ///< List of CPU mappings to the resource from this resource reference + RsCpuMappingBackRefList backRefs; ///< List of references that have this reference as a mapping context + + RsInterMappingList interMappings; ///< List of inter-resource mappings created by this resource + RsInterMappingBackRefMappableList interBackRefsMappable; ///< List of inter-resource mappings this resource has been mapped into + RsInterMappingBackRefContextList interBackRefsContext; ///< List of inter-resource mappings this context has been mapped into + + RsSession *pSession; ///< If set, this ref depends on a shared session + RsSession *pDependantSession; ///< If set, this ref is depended on by a shared session + + ListNode freeNode; ///< Links to the client's pendingFreeList +}; +MAKE_MAP(RsRefMap, RsResourceRef); +MAKE_INTRUSIVE_LIST(RsRefFreeList, RsResourceRef, freeNode); + + +// Iterator data structure to save state while walking through a list +struct RS_ITERATOR +{ + union + { + RsRefMapIter mapIt; ///< Map iterator for all resource references under a client + RsIndexIter idxIt; ///< Index iterator for child references of a resource reference + }; + + RsClient *pClient; + RsResourceRef *pScopeRef; ///< Reference to the resource that limits the scope of iteration + NvU32 internalClassId; + RsResourceRef *pResourceRef; ///< Resource ref that is being iterated over + NvU8 type; ///< RS_ITERATE_* + NvBool bExactMatch; ///< If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId +}; + +// Iterator data structure to save state while walking through a resource tree in pre-order +struct RS_ORDERED_ITERATOR +{ + NvS8 depth; ///< Depth of index stack; special value of -1 implies that the scope reference should be iterated over as well + RsIndexIter idxIt[RS_MAX_RESOURCE_DEPTH+1]; ///< Stack of index iterators for child references of a resource reference + + RsClient *pClient; + RsResourceRef *pScopeRef; ///< Reference to the resource that limits the scope of iteration + NvU32 internalClassId; + NvBool bExactMatch; ///< If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + + RsResourceRef *pResourceRef; ///< Resource ref that is being iterated over +}; + +/** + * Macro for looking up a reference from a resource + */ +#define RES_GET_REF(pResource) (staticCast((pResource), RsResource)->pResourceRef) + +/** + * Macro for looking up a resource handle from a resource + */ +#define RES_GET_HANDLE(pResource) (RES_GET_REF(pResource)->hResource) + +/** + * Macro for looking up a resource's external class from a resource + */ +#define RES_GET_EXT_CLASS_ID(pResource) (RES_GET_REF(pResource)->externalClassId) + +/** + * Macro for looking up a resource's parent handle from a resource + */ +#define RES_GET_PARENT_HANDLE(pResource) (RES_GET_REF(pResource)->pParentRef->hResource) + +/** + * Macro for looking up a client from a resource + */ +#define RES_GET_CLIENT(pResource) (RES_GET_REF(pResource)->pClient) + +/** + * Macro for looking up a client handle from a resource + */ +#define RES_GET_CLIENT_HANDLE(pResource) (RES_GET_REF(pResource)->pClient->hClient) + +/** + * Find a CPU mapping owned by a resource reference + * + * @param[in] pResourceRef + * @param[in] pAddress The CPU virtual address of the mapping to search for + * @param[out] ppMapping The returned mapping + */ +NV_STATUS refFindCpuMapping(RsResourceRef *pResourceRef, NvP64 pAddress, RsCpuMapping **ppMapping); + +/** + * Find a CPU mapping owned by a resource reference + * + * @param[in] pResourceRef + * @param[in] pAddress The CPU virtual address of the mapping to search for + * @param[in] fnFilter A user-provided filtering function that determines which mappings to ignore. + * If fnFilter is provided, then we will only return mappings for which fnFilter(mapping) returns NV_TRUE + * All mappings will be searched over if fnFilter is NULL. + * @param[out] ppMapping The returned mapping + * @param[in] fnFilter A user-provided filtering function that determines which mappings to ignore. + * If fnFilter is provided, then we will only return mappings for which fnFilter(mapping) returns NV_TRUE + * All mappings will be searched over if fnFilter is NULL. + */ +NV_STATUS refFindCpuMappingWithFilter(RsResourceRef *pResourceRef, NvP64 pAddress, NvBool (*fnFilter)(RsCpuMapping*), RsCpuMapping **ppMapping); + +/** + * Find the first child object of given type + * + * @param[in] pParentRef + * @param[in] internalClassId + * @param[in] bExactMatch If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + * @param[out] pResourceRef The returned RsResourceRef (Optional) + */ +NV_STATUS refFindChildOfType(RsResourceRef *pParentRef, NvU32 internalClassId, NvBool bExactMatch, RsResourceRef **ppResourceRef); + +/** + * Traverse up the reference parent-child hierarchy to find an ancestor reference of a given type + * + * @param[in] pDescendantRef + * @param[in] internalClassId + * @param[out] ppAncestorRef The returned RsResourceRef (Optional) + */ +NV_STATUS refFindAncestorOfType(RsResourceRef *pDescendantRef, NvU32 internalClassId, RsResourceRef **ppAncestorRef); + +/** + * Traverse up the reference parent-child hierarchy to find if a ref is a descendant of a given ancestor ref + * + * @param[in] pDescendantRef The node to start searching from (not included in the search) + * @param[in] pAncestorRef The node to search for in the parent-child hierarchy + */ +NvBool refHasAncestor(RsResourceRef *pDescendantRef, RsResourceRef *pAncestorRef); + +/** + * Add a new mapping to a reference's mapping list + * @param[in] pResourceRef The reference to add a mapping to + * @param[in] pMapParams The parameters used to initialize the mapping + * @param[in] pContextRef A reference to a resource that provides a context for the mapping + * @param[out] ppMapping Pointer to the allocated mapping [optional] + */ +NV_STATUS refAddMapping(RsResourceRef *pResourceRef, RS_CPU_MAP_PARAMS *pMapParams, + RsResourceRef *pContextRef, RsCpuMapping **ppMapping); + +/** + * Remove an existing mapping from a reference's mapping list and remove back-references to the mapping. + * @param[in] pResourceRef The reference to add a mapping to + * @param[in] pMapping Pointer to the allocated mapping + */ +void refRemoveMapping(RsResourceRef *pResourceRef, RsCpuMapping *pMapping); + +/** + * Allocate the user-controlled private pointer within the RsCpuMapping struct. + * Resserv will call this function to alloc the private struct when the mapping is created + * @param[in] pMapParams The parameters which were used to create the mapping + * @param[inout] pMapping Pointer to the mapping whose private struct should be allocated + */ +NV_STATUS refAllocCpuMappingPrivate(RS_CPU_MAP_PARAMS *pMapParams, RsCpuMapping *pMapping); + +/** + * Free the user-controlled private pointer within the RsCpuMapping struct. + * Resserv will call this function to free the private struct when the mapping is removed + * @param[inout] pMapping Pointer to the mapping whose private struct should be freed + */ +void refFreeCpuMappingPrivate(RsCpuMapping *pMapping); + +/** + * Add a dependency between this resource reference and a dependent reference. + * If this reference is freed, the dependent will be invalidated and torn down. + * + * @note Dependencies are implicit between a parent resource reference and child resource reference + * @note No circular dependency checking is performed + */ +NV_STATUS refAddDependant(RsResourceRef *pResourceRef, RsResourceRef *pDependantRef); + +/** + * Remove the dependency between this resource reference and a dependent resource reference. + */ +void refRemoveDependant(RsResourceRef *pResourceRef, RsResourceRef *pDependantRef); + +/** + * Find, Add, or Remove an inter-mapping between two resources to the Mapper's list of inter-mappings + * Inter-mappings are stored in the Mapper, and are matched by both the MappableRef and offset. + * + * @param[in] pMapperRef The reference which owns the inter-mapping + * @param[in] pMappableRef The reference which was mapped from to create the inter-mapping + * If NULL, will be ignored while matching inter-mappings + * @param[in] dmaOffset The offset value assigned while mapping, used to identify mappings + * @param[in] pContextRef A reference used during mapping and locking for additional context, used to identify mappings + * @param[inout] ppMapping Writes the resulting inter-mapping, if successfully created (Add) or found (Find) + * @param[in] pMapping The inter-mapping to remove (Remove) + */ +NV_STATUS refAddInterMapping(RsResourceRef *pMapperRef, RsResourceRef *pMappableRef, RsResourceRef *pContextRef, RsInterMapping **ppMapping); +void refRemoveInterMapping(RsResourceRef *pMapperRef, RsInterMapping *pMapping); + +/** + * Store a resource reference in another reference's cache. + * @param[in] pParentRef The resource reference that owns the cache + * @param[in] pResourceRef The resource reference to store in the cache + */ +NV_STATUS refCacheRef(RsResourceRef *pParentRef, RsResourceRef *pResourceRef); + +/** + * Remove a resource reference from another reference's cache + * @param[in] pParentRef The resource reference that owns the cache + * @param[in] pResourceRef The resource reference to de-index + */ +NV_STATUS refUncacheRef(RsResourceRef *pParentRef, RsResourceRef *pResourceRef); + +/** + * Determine whether a reference is queued for removal + * @param[in] pResourceRef + * @param[in] pClient + */ +NvBool refPendingFree(RsResourceRef *pResourceRef, RsClient *pClient); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/nvidia/inc/libraries/resserv/rs_server.h b/src/nvidia/inc/libraries/resserv/rs_server.h new file mode 100644 index 0000000..42adad3 --- /dev/null +++ b/src/nvidia/inc/libraries/resserv/rs_server.h @@ -0,0 +1,1199 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#include "g_rs_server_nvoc.h" + +#ifndef _RS_SERVER_H_ +#define _RS_SERVER_H_ + +#include "nvport/nvport.h" +#include "resserv/resserv.h" +#include "resserv/rs_client.h" +#include "nvoc/object.h" + +#ifdef __cplusplus +extern "C" { +#endif + +enum CLIENT_LOCK_TYPE +{ + CLIENT_LOCK_SPECIFIC, // For locking specific RM clients encoded in the API + CLIENT_LOCK_ALL // For locking all RM clients currently in use +}; + +/** + * @defgroup RsServer + * @addtogroup RsServer + * @{*/ + +/** + * Book-keeping for individual client locks + */ +struct CLIENT_ENTRY +{ + PORT_RWLOCK *pLock; + RsClient *pClient; + NvHandle hClient; + NvU64 lockOwnerTid; ///< Thread id of the write lock owner + volatile NvU32 lockReadOwnerCnt; + NvU32 refCount; + NvBool bPendingFree; + ListNode node; + +#if LOCK_VAL_ENABLED + LOCK_VAL_LOCK lockVal; +#endif +}; + +MAKE_INTRUSIVE_LIST(RsClientList, CLIENT_ENTRY, node); +MAKE_LIST(RsLockedClientList, CLIENT_ENTRY*); + +/** + * Base-class for objects that are shared among multiple + * RsResources (including RsResources from other clients) + */ +NVOC_PREFIX(shr) class RsShared : Object +{ +public: + NV_STATUS shrConstruct(RsShared *pShared); + void shrDestruct(RsShared *pShared); + + NvS32 refCount; + MapNode node; +}; +MAKE_INTRUSIVE_MAP(RsSharedMap, RsShared, node); + +/** + * Utility class for objects that can reference + * multiple client handle spaces. Free's and control calls + * that occur on objects which reference an RsSession will + * need to acquire pLock first. + */ +NVOC_PREFIX(session) class RsSession : RsShared +{ +public: + NV_STATUS sessionConstruct(RsSession *pSession); + void sessionDestruct(RsSession *pSession); + + NV_STATUS sessionAddDependant(RsSession *pSession, RsResourceRef *pResourceRef); + NV_STATUS sessionAddDependency(RsSession *pSession, RsResourceRef *pResourceRef); + virtual void sessionRemoveDependant(RsSession *pSession, RsResourceRef *pResourceRef); + virtual void sessionRemoveDependency(RsSession *pSession, RsResourceRef *pResourceRef); + + PORT_RWLOCK *pLock; +#if LOCK_VAL_ENABLED + LOCK_VAL_LOCK lockVal; +#endif + + NvBool bValid; + + RsResourceRefList dependencies; + RsResourceRefList dependants; +// private: + NV_STATUS sessionCheckLocksForAdd(RsSession *pSession, RsResourceRef *pResourceRef); + void sessionCheckLocksForRemove(RsSession *pSession, RsResourceRef *pResourceRef); +}; + +// Iterator data structure to save state while walking through a map +struct RS_SHARE_ITERATOR +{ + RsSharedMapIter mapIt; + NvU32 internalClassId; + RsShared *pShared; ///< Share that is being iterated over +}; + +/** + * Top-level structure that RMAPI and RM interface with + * + * This class is all that needs to be allocated to use the resource server + * library. + * + * The RsServer interface should be kept as narrow as possible. Map and + * MapTo are added because <1> the unmap variants operate in addresses and not + * handles and <2> having explicit knowledge of map operations in the server is + * helpful when dealing with multiple levels of address spaces (e.g., guest + * user-mode, guest kernel-mode, host kernel-mode). + */ +struct RsServer +{ + /** + * Privilege level determines what objects a server is allowed to allocate, and + * also determines whether additional handle validation needs to be performed. + */ + RS_PRIV_LEVEL privilegeLevel; + + RsClientList *pClientSortedList; ///< Bucket if linked List of clients (and their locks) owned by this server + NvU32 clientCurrentHandleIndex; + + NvBool bConstructed; ///< Determines whether the server is ready to be used + PORT_MEM_ALLOCATOR *pAllocator; ///< Allocator to use for all objects allocated by the server + + PORT_SPINLOCK *pClientListLock; ///< Lock that needs to be taken when accessing the client list + + PORT_SPINLOCK *pShareMapLock; ///< Lock that needs to be taken when accessing the shared resource map + RsSharedMap shareMap; ///< Map of shared resources + +#if (RS_STANDALONE) + NvU64 topLockOwnerTid; ///< Thread id of top-lock owner + PORT_RWLOCK *pTopLock; ///< Top-level resource server lock + PORT_RWLOCK *pResLock; ///< Resource-level resource server lock +#if LOCK_VAL_ENABLED + LOCK_VAL_LOCK topLockVal; + LOCK_VAL_LOCK resLockVal; +#endif +#endif + + /// Print out a list of all resources that will be freed when a free request is made + NvBool bDebugFreeList; + + /// If true, control call param copies will be performed outside the top/api lock + NvBool bUnlockedParamCopy; + + // If true, calls annotated with ROUTE_TO_PHYISCAL will not grab global gpu locks + // (and the readonly API lock). + NvBool bRouteToPhysicalLockBypass; + + /** + * Setting this flag to false disables any attempts to + * automatically acquire access rights or to control access to resources by + * checking for access rights. + */ + NvBool bRsAccessEnabled; + + /** + * Set to thread ID of the thread that locked all clients. + */ + NvU64 allClientLockOwnerTid; + + /** + * Mask of interfaces (RS_API_*) that will use a read-only top lock by default + */ + NvU32 roTopLockApiMask; + + /// Share policies which clients default to when no other policies are used + RsShareList defaultInheritedSharePolicyList; + /// Share policies to apply to all shares, regardless of other policies + RsShareList globalInternalSharePolicyList; + + NvU32 internalHandleBase; + NvU32 clientHandleBase; + + NvU32 activeClientCount; + NvU64 activeResourceCount; + + /// List of clients that are de-activated and pending free + RsDisabledClientList disabledClientList; + RsClient *pNextDisabledClient; + PORT_SPINLOCK *pDisabledClientListLock; + + /** + * List of client entries locked by serverLockAllClients + * This list is required for locking all clients in order to avoid races with + * other paths creating/destroying paths in parallel WITHOUT holding the API lock. + * Ideally, there shouldn't be any other such paths but the RTD3/PM path does do + * this. CORERM-6052 tracks investigating that and potentially fixing the locking + * there. + */ + RsLockedClientList lockedClientList; +}; + +/** + * Construct a server instance. This must be performed before any other server + * operation. + * + * @param[in] pServer This server instance + * @param[in] privilegeLevel Privilege level for this resource server instance + * @param[in] maxDomains Maximum number of domains to support, or 0 for the default + */ +NV_STATUS serverConstruct(RsServer *pServer, RS_PRIV_LEVEL privilegeLevel, NvU32 maxDomains); + +/** + * Destroy a server instance. Destructing a server does not guarantee that child domains + * and clients will be appropriately freed. serverFreeDomain should be explicitly called + * on all allocated domains to ensure all clients and resources get cleaned up. + * + * @param[in] pServer This server instance + */ +NV_STATUS serverDestruct(RsServer *pServer); + +/** + * Allocate a domain handle. Domain handles are used to track clients created by a domain. + * + * @param[in] pServer This server instance + * @param[in] hParentDomain + * @param[in] pAccessControl + * @param[out] phDomain + * + */ +NV_STATUS serverAllocDomain(RsServer *pServer, NvU32 hParentDomain, ACCESS_CONTROL *pAccessControl, NvHandle *phDomain); + +/** + * Verify that the calling user is allowed to perform the access. This check only + * applies to calls from RING_USER or RING_KERNEL. No check is performed in + * RING_HOST. + * + * @param[in] pServer This server instance + * @param[in] hDomain + * @param[in] hClient + * + */ +NV_STATUS serverValidate(RsServer *pServer, NvU32 hDomain, NvHandle hClient); + +/** + * Verify that the domain has sufficient permission to allocate the given class. + * @param[in] pServer + * @param[in] hDomain + * @param[in] externalClassId External resource class id + */ +NV_STATUS serverValidateAlloc(RsServer *pServer, NvU32 hDomain, NvU32 externalClassId); + +/** + * Free a domain handle. All clients of this domain will be freed. + * + * @param[in] pServer This server instance + * @param[in] hDomain The handle of the domain to free + */ +NV_STATUS serverFreeDomain(RsServer *pServer, NvHandle hDomain); + +/** + * Allocate a client handle. A client handle is required to allocate resources. + * + * @param[in] pServer This server instance + * @param[inout] pParams Client allocation parameters + */ +NV_STATUS serverAllocClient(RsServer *pServer, RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + +/** + * Free a client handle. All resources references owned by the client will be + * freed. + * + * It is invalid to attempt to free a client from a user other than the one + * that allocated it. + * + * @param[in] pServer This server instance + * @param[in] pParams Client free params + */ +NV_STATUS serverFreeClient(RsServer *pServer, RS_CLIENT_FREE_PARAMS* pParams); + +/** + * Mark a list of client handles as disabled. All CPU mappings owned by that + * client will be unmapped immediate, and the client will be marked as disabled. + * A call to @ref serverFreeDisabledClients will then free all such clients. + * + * It is invalid to attempt to free a client from a user other than the one + * that allocated it. + * + * @param[in] pServer This server instance + * @param[in] phClientList The list of client handles to disable + * @param[in] numClients The number of clients in the list + * @param[in] freeState User-defined free state + * @param[in] pSecInfo Security Info + * + */ +NV_STATUS serverMarkClientListDisabled(RsServer *pServer, NvHandle *phClientList, NvU32 numClients, NvU32 freeState, API_SECURITY_INFO *pSecInfo); + +/** + * Frees all currently disabled clients. All resources references owned by + * any of the clients will be freed. + * All priority resources will be freed first across all listed clients. + * + * NOTE: may return NV_WARN_MORE_PROCESSING_REQUIRED if not all clients were freed + * + * @param[in] pServer This server instance + * @param[in] freeState User-defined free state + * @param[in] limit Max number of iterations to make returning; 0 means no limit + * + */ +NV_STATUS serverFreeDisabledClients(RsServer *pServer, NvU32 freeState, NvU32 limit); + +/** + * Allocate a resource. + * + * It is invalid to attempt to allocate a client from a user other than the one + * that allocated it. + * + * @param[in] pServer This server instance + * @param[inout] pParams The allocation parameters + */ +NV_STATUS serverAllocResource(RsServer *pServer, RS_RES_ALLOC_PARAMS *params); + +/** + * Allocate a ref-counted resource share. + * + * @param[in] pServer + * @param[in] pClassInfo NVOC class info for the shared class (must derive from RsShared) + * @param[out] ppShare Allocated share + */ +NV_STATUS serverAllocShare(RsServer *pServer, const NVOC_CLASS_INFO* pClassInfo, RsShared **ppShare); + +/** + * Allocate a ref-counted resource share with Halspec parent. + * + * @param[in] pServer + * @param[in] pClassInfo NVOC class info for the shared class (must derive from RsShared) + * @param[out] ppShare Allocated share + * @param[in] pHalspecParent Parent object whose Halspec can be used for the shared class object + */ +NV_STATUS serverAllocShareWithHalspecParent(RsServer *pServer, const NVOC_CLASS_INFO* pClassInfo, RsShared **ppShare, Object *pHalspecParent); + +/** + * Get the ref-count of a resource share. + * + * @param[in] pServer + * @param[in] pShare Resource share + */ +NvS32 serverGetShareRefCount(RsServer *pServer, RsShared *pShare); + +/** + * Increment the ref-count of a resource share. + * + * @param[in] pServer + * @param[in] pShare Resource share + */ +NV_STATUS serverRefShare(RsServer *pServer, RsShared *pShare); + +/** + * Decrement the ref-count of a resource share. If the ref-count + * has reached zero, the resource share will be freed. + * + * @param[in] pServer + * @param[in] pShare Resource share + */ +NV_STATUS serverFreeShare(RsServer *pServer, RsShared *pShare); + +/** + * Get an iterator to the elements in the server's shared object map + * @param[in] pServer + * @param[in] internalClassId If non-zero, only RsShared that are (or can be + * derived from) the specified class will be returned + */ +RS_SHARE_ITERATOR serverShareIter(RsServer *pServer, NvU32 internalClassId); + +/** + * Get an iterator to the elements in the server's shared object map + */ +NvBool serverShareIterNext(RS_SHARE_ITERATOR*); + +/** + * Set fixed client handle base in case clients wants to use a different + * base for client allocations + * @param[in] pServer + * @param[in] clientHandleBase + */ +NV_STATUS serverSetClientHandleBase(RsServer *pServer, NvU32 clientHandleBase); + +/** + * Deserialize parameters for servicing command + * + * @param[in] pCallContext + * @param[in] cmd + * @param[in/out] ppParams + * @param[in/out] pParamsSize + * @param[in] flags + */ +NV_STATUS serverDeserializeCtrlDown(CALL_CONTEXT *pCallContext, NvU32 cmd, void **ppParams, NvU32 *pParamsSize, NvU32 *flags); + +/** + * Serialize parameters for servicing command + * + * @param[in] pCallContext + * @param[in] cmd + * @param[in/out] ppParams + * @param[in/out] pParamsSize + * @param[in] flags + */ +NV_STATUS serverSerializeCtrlDown(CALL_CONTEXT *pCallContext, NvU32 cmd, void **ppParams, NvU32 *pParamsSize, NvU32 *flags); + +/** + * Deserialize parameters for returning from command + * + * @param[in] pCallContext + * @param[in] cmd + * @param[out] ppParams + * @param[out] pParamsSize + * @param[in] flags + */ +NV_STATUS serverDeserializeCtrlUp(CALL_CONTEXT *pCallContext, NvU32 cmd, void **ppParams, NvU32 *pParamsSize, NvU32 *flags); + +/** + * Serialize parameters for returning from command + * + * @param[in] pCallContext + * @param[in] cmd + * @param[out] ppParams + * @param[out] pParamsSize + * @param[in] flags + */ +NV_STATUS serverSerializeCtrlUp(CALL_CONTEXT *pCallContext, NvU32 cmd, void **ppParams, NvU32 *pParamsSize, NvU32 *flags); + +/** + * Unset flag for reserializing control before going to GSP + * Used if kernel control servicing passes params to GSP without changing them + * + * @param[in] pCallContext + */ +void serverDisableReserializeControl(CALL_CONTEXT *pCallContext); + +/** + * Serialize parameters for allocating + * + * @param[in] pCallContext + * @param[in] classId + * @param[in/out] ppParams + * @param[out] pParamsSize + * @param[in] flags + */ +NV_STATUS serverSerializeAllocDown(CALL_CONTEXT *pCallContext, NvU32 classId, void **ppParams, NvU32 *pParamsSize, NvU32 *flags); + +/** + * Deserialize parameters for allocating + * + * @param[in] pCallContext + * @param[in] classId + * @param[in/out] ppParams + * @param[in/out] pParamsSize + * @param[in] flags + */ +NV_STATUS serverDeserializeAllocDown(CALL_CONTEXT *pCallContext, NvU32 classId, void **ppParams, NvU32 *pParamsSize, NvU32 *flags); + +/** + * Serialize parameters for returning from allocating + * + * @param[in] pCallContext + * @param[in] classId + * @param[out] ppParams + * @param[out] pParamsSize + * @param[in] flags + */ +NV_STATUS serverSerializeAllocUp(CALL_CONTEXT *pCallContext, NvU32 classId, void **ppParams, NvU32 *pParamsSize, NvU32 *flags); + +/** + * Deserialize parameters for returning from allocating + * + * @param[in] pCallContext + * @param[in] classId + * @param[out] ppParams + * @param[out] pParamsSize + * @param[in] flags + */ +NV_STATUS serverDeserializeAllocUp(CALL_CONTEXT *pCallContext, NvU32 classId, void **ppParams, NvU32 *pParamsSize, NvU32 *flags); + +/** + * Free finn structures allocated for serializing/deserializing + * + * @param[in] pCallContext + * @param[in] pParams + */ +void serverFreeSerializeStructures(CALL_CONTEXT *pCallContext, void *pParams); + +/** + * Return an available client handle for new client allocation + * + * @param[in] pServer This server instance + * @param[in] bInternalHandle Client is an RM internal client + * @param[in] pSecInfo Security context of this client allocation + */ +extern NvU32 serverAllocClientHandleBase(RsServer *pServer, NvBool bInternalHandle, API_SECURITY_INFO *pSecInfo); + +/** + * Allocate a resource. Assumes top-level lock has been taken. + * + * It is invalid to attempt to allocate a client from a user other than the one + * that allocated it. User-implemented. + * + * @param[in] pServer This server instance + * @param[inout] pParams The allocation parameters + */ +extern NV_STATUS serverAllocResourceUnderLock(RsServer *pServer, RS_RES_ALLOC_PARAMS *pAllocParams); + +/** + * Call Free RPC for given resource. Assumes top-level lock has been taken. + * + * @param[in] pServer This server instance + * @param[inout] pFreeParams The Free parameters + */ +extern NV_STATUS serverFreeResourceRpcUnderLock(RsServer *pServer, RS_RES_FREE_PARAMS *pFreeParams); + +/** + * Copy-in parameters supplied by caller, and initialize API state. User-implemented. + * @param[in] pServer + * @param[in] pAllocParams Resource allocation parameters + * @param[out] ppApiState User-defined API_STATE; should be allocated by this function + */ +extern NV_STATUS serverAllocApiCopyIn(RsServer *pServer, RS_RES_ALLOC_PARAMS_INTERNAL *pAllocParams, API_STATE **ppApiState); + +/** + * Copy-out parameters supplied by caller, and release API state. User-implemented. + * @param[in] pServer + * @param[in] status Status of allocation request + * @param[in] pApiState API_STATE for the allocation + */ +extern NV_STATUS serverAllocApiCopyOut(RsServer *pServer, NV_STATUS status, API_STATE *pApiState); + +/** + * Obtain a second client handle to lock if required for the allocation. + * @param[in] externalClassId External class ID of resource + * @param[in] pAllocParams Class-specific allocation parameters + * @param[out] phSecondClient Second client handle to lock on success + * + * @return NV_OK on success + NV_ERR_INVALID_STATE if allocation is incorrectly configured with RS_FLAGS_DUAL_CLIENT_LOCK without having updated this function. + */ +extern NV_STATUS serverAllocLookupSecondClient(NvU32 externalClassId, void *pAllocParams, NvHandle *phSecondClient); + +/** + * Obtain a second client handle to lock if required for the control (DISCOURAGED). + * @param[in] cmd Control call ID + * @param[in] pControlParams Control-specific parameters + * @param[in] pCookie Control call cookie to check flags for + * @param[out] phSecondClient Second client handle to lock on success + * + * @return NV_OK on success + NV_ERR_INVALID_STATE if allocation is incorrectly configured with RMCTRL_FLAGS_DUAL_CLIENT_LOCK without having updated this function. + */ +extern NV_STATUS serverControlLookupSecondClient(NvU32 cmd, void *pControlParams, RS_CONTROL_COOKIE *pCookie, NvHandle *phSecondClient); + +/** + * Acquires a top-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Output flags indicating the locks that need to be released + */ +extern NV_STATUS serverTopLock_Prologue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Releases a top-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +extern void serverTopLock_Epilogue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Acquires a session lock. + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[in] pResourceRef Resource reference to take session locks on + * @param[inout] pLockInfo Lock state + */ +extern NV_STATUS serverSessionLock_Prologue(LOCK_ACCESS_TYPE access, RsResourceRef *pResourceRef, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Releases a session lock. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +extern void serverSessionLock_Epilogue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Acquires a resource-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Output flags indicating the locks that need to be released + * @param[in] gpuMask Bitmask of additional GPUs to lock + */ +extern NV_STATUS serverResLock_Prologue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags, NvU32 gpuMask); + +/** + * Releases a resource-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +extern void serverResLock_Epilogue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Acquire the client list lock. The caller is responsible for + * ensuring that lock ordering is not violated (otherwise there can be + * deadlock): the client list lock must always be released without acquiring any + * subsequent locks. + * + * @param[in] pServer This server instance + */ +void serverAcquireClientListLock(RsServer *pServer); + +/** + * Release the client list lock. + * + * @param[in] pServer This server instance + */ +void serverReleaseClientListLock(RsServer *pServer); + +/** + * WAR for additional tasks that must be performed after resource-level locks are released. User-implemented. + * @param[inout] status Allocation status + * @param[in] bClientAlloc Caller is attempting to allocate a client + * @param[inout] pParams Allocation parameters + */ +extern NV_STATUS serverAllocEpilogue_WAR(RsServer *pServer, NV_STATUS status, NvBool bClientAlloc, RS_RES_ALLOC_PARAMS_INTERNAL *pAllocParams); + +/** + * Free a resource reference and all of its descendants. This will decrease the + * resource's reference count. The resource itself will only be freed if there + * are no more references to it. + * + * It is invalid to attempt to free a resource from a user other than the one that allocated it. + * + * @param[in] pServer This server instance + * @param[in] pParams Free parameters + */ +NV_STATUS serverFreeResourceTree(RsServer *pServer, RS_RES_FREE_PARAMS *pParams); + +/** + * Same as serverFreeResourceTree except the top-level lock is assumed to have been taken. + * + * @param[in] pServer This server instance + * @param[in] pParams Free parameters + */ +NV_STATUS serverFreeResourceTreeUnderLock(RsServer *pServer, RS_RES_FREE_PARAMS *pParams); + +/** + * Updates the lock flags in the dup parameters + * + * @param[in] pServer This server instance + * @param[in] pParams Dup parameters + */ +extern NV_STATUS serverUpdateLockFlagsForCopy(RsServer *pServer, RS_RES_DUP_PARAMS *pParams); + +/** + * Updates the lock flags in the free parameters + * + * @param[in] pServer This server instance + * @param[in] pParams Free parameters + */ +extern NV_STATUS serverUpdateLockFlagsForFree(RsServer *pServer, RS_RES_FREE_PARAMS *pParams); + +/** + * Updates the lock flags for automatic inter-unmap during free + * + * @param[in] pServer This server instance + * @param[inout] pParams Unmap params, contained pLockInfo will be modified + */ +extern NV_STATUS serverUpdateLockFlagsForInterAutoUnmap(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pParams); + +/** + * Initialize parameters for a recursive call to serverFreeResourceTree. User-implemented. + * @param[in] hClient + * @param[in] hResource + * @param[inout] pParams + */ +extern NV_STATUS serverInitFreeParams_Recursive(NvHandle hClient, NvHandle hResource, RS_LOCK_INFO *pLockInfo, RS_RES_FREE_PARAMS *pParams); + +/** + * Common operations performed after top locks and client locks are taken, but before + * the control call is executed. This includes validating the control call cookie, + * looking up locking flags, parameter copy-in, and taking resource locks. + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[in] pAccess Lock access type + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + */ +NV_STATUS serverControl_Prologue(RsServer *pServer, RS_RES_CONTROL_PARAMS_INTERNAL *pParams, LOCK_ACCESS_TYPE *pAccess, NvU32 *pReleaseFlags); + +/** + * Common operations performed after the control call is executed. This + * includes releasing locks and parameter copy-out. + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[in] pAccess Lock access type + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + * @param[in] status Control call status + */ +NV_STATUS serverControl_Epilogue(RsServer *pServer, RS_RES_CONTROL_PARAMS_INTERNAL *pParams, LOCK_ACCESS_TYPE access, NvU32 *pReleaseFlags, NV_STATUS status); + +/** + * Initialize a NVOC export control call cookie + * + * @param[in] pExportedEntry + * @param[inout] pCookie + */ +extern void serverControl_InitCookie(const struct NVOC_EXPORTED_METHOD_DEF *pExportedEntry, RS_CONTROL_COOKIE *pCookie); + +/** + * Validate a NVOC export control call cookie + * + * @param[in] pParams + * @param[inout] pCookie + */ +extern NV_STATUS serverControl_ValidateCookie(RS_RES_CONTROL_PARAMS_INTERNAL *pParams, RS_CONTROL_COOKIE *pCookie); + +/** + * Copy-in control call parameters + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[inout] pCookie Control call cookie + */ +extern NV_STATUS serverControlApiCopyIn(RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie); + +/** + * Copy-out control call parameters + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[inout] pCookie Control call cookie + */ +extern NV_STATUS serverControlApiCopyOut(RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie, + NV_STATUS rmStatus); + +/** + * Determine whether an API supports a read-only lock for a given lock + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] api RS_API* + */ +NvBool serverSupportsReadOnlyLock(RsServer *pServer, RS_LOCK_ENUM lock, RS_API_ENUM api); + +/** + * Determine whether the current thread has taken the RW API lock + * @param[in] pServer ResServ instance + */ +extern NvBool serverRwApiLockIsOwner(RsServer *pServer); + +/** + * Lookup locking flags for a resource alloc + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Allocation parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverAllocResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess); +/** + * + * Lookup level locking flags for a resource free + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Allocation parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverFreeResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_FREE_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess, + NvBool *pbSupportForceROLock); + +/** + * Lookup locking flags for a resource copy + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Allocation parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverCopyResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_DUP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup locking flags for a resource access share + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Share parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverShareResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_SHARE_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup locking flags for a control call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Control call parameters + * @param[in] pCookie Control call cookie + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverControlLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup client locking flags for a control call + * + * @param[in] pCookie Control call cookie + * @param[out] pClientLockType Client lock type + */ +extern NV_STATUS serverControlLookupClientLockFlags(RS_CONTROL_COOKIE *pCookie, + enum CLIENT_LOCK_TYPE *pClientLockType); + +/** + * + * Lookup locking flags for a map call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams CPU map parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverMapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup locking flags for an unmap call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams CPU unmap parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverUnmapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * + * Lookup locking flags for an inter-resource map call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Inter-resource map parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverInterMapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * + * Lookup locking flags for an inter-resource unmap call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Inter-resource unmap parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverInterUnmapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Fill the server's share policy lists with any default or global policies needed + */ +extern NV_STATUS serverInitGlobalSharePolicies(RsServer *pServer); + +/** + * Issue a control command to a resource + * + * @param[in] pServer This server instance + * @param[in] pParams Control parameters + */ +NV_STATUS serverControl(RsServer *pServer, RS_RES_CONTROL_PARAMS *pParams); + +/** + * Copy a resource owned by one client into another client. + * + * The clients must be in the same client handle space. The underlying + * resource is not duplicated, but it is refcounted so the resource will + * not be freed until the reference count hits zero. + * + * Copying a resource will fail if the user making the call does not own + * the source client. + * + * @param[in] pServer This server instance + * @param[inout] pParams Resource sharing parameters + */ +NV_STATUS serverCopyResource(RsServer *pServer, RS_RES_DUP_PARAMS *pParams); + +/** + * Share certain access rights to a resource with other clients using the provided share policy + * + * The policy entry passed in will be added to the object's share policy list. + * If the bRevoke is true, the policy will be removed instead. + * + * Sharing will fail if the user making the call does not own the source client. + * + * @param[in] pServer This server instance + * @param[in] pParams Resource sharing parameters + */ +NV_STATUS serverShareResourceAccess(RsServer *pServer, RS_RES_SHARE_PARAMS *pParams); + +/** + * Creates a CPU mapping of the resource in the virtual address space of the process. + * + * Not all resources support mapping. + * + * @param[in] pServer This server instance + * @param[in] hClient Client handle of the resource to map + * @param[in] hResource Handle of the resource to map + * @param[inout] pParams CPU mapping parameters + */ +NV_STATUS serverMap(RsServer *pServer, NvHandle hClient, NvHandle hResource, RS_CPU_MAP_PARAMS *pParams); + +/** + * Release a CPU virtual address unmapping + * + * @param[in] pServer This server instance + * @param[in] hClient Client handle of the resource to map + * @param[in] hResource Handle of the resource to map + * @param[in] pParams CPU unmapping parameters + */ +NV_STATUS serverUnmap(RsServer *pServer, NvHandle hClient, NvHandle hResource, RS_CPU_UNMAP_PARAMS *pParams); + +/** + * Pre-map operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +NV_STATUS serverMap_Prologue(RsServer *pServer, RS_CPU_MAP_PARAMS *pMapParams); + +/** + * Post-map operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +void serverMap_Epilogue(RsServer *pServer, RS_CPU_MAP_PARAMS *pMapParams); + +/** + * Pre-unmap operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +NV_STATUS serverUnmap_Prologue(RsServer *pServer, RS_CPU_UNMAP_PARAMS *pUnmapParams); + +/** + * Post-unmap operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +void serverUnmap_Epilogue(RsServer *pServer, RS_CPU_UNMAP_PARAMS *pUnmapParams); + +/** + * Creates an inter-mapping between two resources + * + * Not all resources support mapping. + * + * @param[in] pServer This server instance + * @param[inout] pParams mapping parameters + */ +NV_STATUS serverInterMap(RsServer *pServer, RS_INTER_MAP_PARAMS *pParams); + +/** + * Release an inter-mapping between two resources + * + * @param[in] pServer This server instance + * @param[in] pParams unmapping parameters + */ +NV_STATUS serverInterUnmap(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pParams); + +/** + * Pre-inter-map operations. Called with top/client locks acquired. + * This function acquires resource locks. + * + * @param[in] pServer + * @param[in] pMapperRef The resource that can be used to create the mapping + * @param[in] pMappableRef The resource that can be mapped + * @param[inout] pMapParams mapping parameters + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + */ +NV_STATUS serverInterMap_Prologue(RsServer *pServer, RsResourceRef *pMapperRef, RsResourceRef *pMappableRef, RS_INTER_MAP_PARAMS *pMapParams, NvU32 *pReleaseFlags); + +/** + * Post-inter-map operations. Called with top, client, and resource locks acquired. + * This function releases resource locks. + * + * @param[in] pServer + * @param[inout] pMapParams mapping parameters + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + */ +void serverInterMap_Epilogue(RsServer *pServer, RS_INTER_MAP_PARAMS *pMapParams, NvU32 *pReleaseFlags); + +/** + * Pre-inter-unmap operations. Called with top, client, and resource locks acquired. + * + * @param[in] pServer + * @param[inout] pParams mapping parameters + */ +NV_STATUS serverInterUnmap_Prologue(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pUnmapParams); + +/** + * Post-inter-unmap operations. Called with top, client, and resource locks acquired. + * + * @param[in] pServer + * @param[inout] pParams mapping parameters + */ +void serverInterUnmap_Epilogue(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pUnmapParams); + +/** + * Acquire a client pointer from a client handle. The caller is responsible for + * ensuring that lock ordering is not violated (otherwise there can be + * deadlock): clients must be locked in increasing order of client index (not + * handle). + * + * @param[in] pServer This server instance + * @param[in] hClient The client to acquire + * @param[in] lockAccess LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[out] ppClientEntry Pointer to the CLIENT_ENTRY + */ +NV_STATUS serverAcquireClient(RsServer *pServer, NvHandle hClient, LOCK_ACCESS_TYPE lockAccess, CLIENT_ENTRY **ppClientEntry); + +/** + * Release a client pointer + * + * @param[in] pServer This server instance + * @param[in] lockAccess LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[in] pClientEntry Pointer to the CLIENT_ENTRY + */ +void serverReleaseClient(RsServer *pServer, LOCK_ACCESS_TYPE lockAccess, CLIENT_ENTRY *pClientEntry); + +/** + * Test is a client handle is currently locked for LOCK_ACCESS_WRITE or not. + * + * @param[in] pServer This server instance + * @param[in] hClient The client to acquire + */ +NvBool serverIsClientLocked(RsServer *pServer, NvHandle hClient); + +/** + * Test if a client handle is internal or not + * + * @param[in] hClient The client handle to test + */ +NvBool serverIsClientInternal(RsServer *pServer, NvHandle hClient); + +/** + * Lock all clients currently in use. While this function will lock the client handles + * in the correct order, the caller is responsible for ensuring that lock ordering + * is not violated (otherwise there can be a deadlock) with respect to other types + * of locks. NOTE that this CANNOT be called when already holding one or more client + * locks! + * + * @param[in] pServer This server instance + */ +NV_STATUS serverLockAllClients(RsServer *pServer); + +/** + * Release locks on all clients. + * + * @param[in] pServer This server instance + */ +NV_STATUS serverUnlockAllClients(RsServer *pServer); + +/** + * Check if we locked all clients + * + * @param[in] pServer This server instance + */ +static NV_INLINE NvBool serverAllClientsLockIsOwner(RsServer *pServer) +{ + return (pServer->allClientLockOwnerTid == portThreadGetCurrentThreadId()); +} + +/** + * Get a client pointer from a client handle without taking any locks. + * + * @param[in] pServer This server instance + * @param[in] hClient The client to acquire + * @param[out] ppClient Pointer to the RsClient + */ +NV_STATUS serverGetClientUnderLock(RsServer *pServer, NvHandle hClient, RsClient **ppClient); + +/** + * Get the count of clients allocated under this resource server + * + * @param[in] pServer This server instance + */ +NvU32 serverGetClientCount(RsServer *pServer); + +/** + * Get the count of resources allocated under this resource server + * + * @param[in] pServer This server instance + */ +NvU64 serverGetResourceCount(RsServer *pServer); + +/** + * Swap a TLS call context entry and increment the TLS entry refcount. + * A new TLS entry for call context will be allocated if necessary. + * + * @note This should be paired with a corresponding resservRestoreTlsCallContext call + */ +NV_STATUS resservSwapTlsCallContext(CALL_CONTEXT **ppOldCallContext, CALL_CONTEXT *pNewCallContext); + +/** + * Get the current TLS call context. This will not increment a refcount on the TLS entry. + */ +CALL_CONTEXT *resservGetTlsCallContext(void); + +/** + * Set a TLS call context entry and decrement the TLS entry refcount. + * @note This should be paired with a corresponding resservSwapTlsCallContext call + */ +NV_STATUS resservRestoreTlsCallContext(CALL_CONTEXT *pOldCallContext); + +/** + * Find a resource reference of a given type from the TLS call context + * @param[in] internalClassId Only return a reference if it matches this type + * @param[in] bSearchAncestors Search parents of the call context resource ref + */ +RsResourceRef *resservGetContextRefByType(NvU32 internalClassId, NvBool bSearchAncestors); + +/** + * Test if a client handle is currently locked for LOCK_ACCESS_READ or not. + * The caller must hold the client lock in either mode to acquire an accurate + * result. Callers without the client list lock are subject to race conditions. + * + * @param[out] pClientEntry Pointer to the CLIENT_ENTRY + */ +NvBool serverIsClientLockedForRead(CLIENT_ENTRY* pClientEntry); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/nvidia/inc/libraries/tls/tls.h b/src/nvidia/inc/libraries/tls/tls.h new file mode 100644 index 0000000..8e4d862 --- /dev/null +++ b/src/nvidia/inc/libraries/tls/tls.h @@ -0,0 +1,347 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Thread local storage public interface + */ + +#include "nvport/nvport.h" + +#ifndef _NV_TLS_H_ +#define _NV_TLS_H_ + +/** + * @defgroup Thread local storage operations + * + * @brief This module contains thread local storage functionality used by other + * modules. + * + * @par Module dependencies: + * - NvPort (UTIL, ATOMIC, MEMORY, SYNC and THREAD modules) + * - NvContainers (Map) + * - NvUtils (NV_PRINTF and NV_ASSERT) + * + * @par TLS architecture: + * A base TLS allocation unit is an Entry (@ref TLS_ENTRY). Entries are local + * to a thread and are identified by a 64bit ID. Entries are lazy-allocated + * and refcounted. All entries for a given thread are organized in one Map - + * i.e. TLS has as many Maps active as there are threads; each map is + * inherently single-threaded. The Map for a given thread ID is obtained by + * searching a map of all threads with thread ID as key. + * The whole TLS system can be thought of as: + * map> + * + * @par Complexity: + * All operations are O(log(numActiveThreads) + log(numEntriesForGivenThread)) + * + * @par A note on ISRs and DPCs + * Interrupt Service Routines (and in some cases Deferred Procedure Calls) do + * not have their own thread IDs - they can have the same ID as a regular + * thread. Because of this, they are kept in a separate map indexed by their + * stack pointer instead of thread ID. Because getting the exact base of the + * ISR stack can be difficult, when searching we use the closest one, in the + * direction of stack growth. This assumes that the given entry always exists, + * so ISR thread entries are preallocated with @ref tlsIsrInit. + * + * An example of how this works: + * ~~~{.c} + * if (is_isr()) + * return isr_map.find(get_approx_sp()); + * else + * return thread_map.find(get_thread_id()); + * ~~~ + * The exact definition of is_isr() varies by platform, but generally means + * "if it does not have a unique thread ID". Threaded IRQs are not ISRs. + * + * @par Locking: + * Currently, TLS has two spinlocks - separate locks for ISR and passive + * thread maps. This will be changed to RW-spinlocks in the future. + * We cannot use RW sleeper locks in passive threads, since they may modify + * their IRQL and thus be unable to acquire them, even conditionally. + * + * In cases where ISRs are not allowed to acquire a spinlock at all, the ISR + * map is implemented in a lockless fashion. This is slower than the locked + * implementation (O(maxIsrs)), but works in cases where all locks are banned. + * + * + * @{ + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @note Only returned in cases of irregular order of public API calls. + */ +#define TLS_ERROR_VAL ~0 + +/** + * @brief Global TLS structure initialization. + * + * Must be called before any TLS functions can be called. + * + * If this function returns an error then calling any TLS function will result + * in undefined behavior. + * + * Called on RmInitRm(). + * @return NV_OK if successful; + * @return Error code otherwise. + * + */ +NV_STATUS tlsInitialize(void); + +/** + * @brief Global TLS structure termination. + * + * It frees resources allocated by tlsInitialize. + * Called on RmDestroyRm(). + * + */ +void tlsShutdown(void); + +enum { + TLS_ENTRY_ID_THREADSTATE, + TLS_ENTRY_ID_RESSERV_1, + TLS_ENTRY_ID_MAPPING_CONTEXT, + TLS_ENTRY_ID_CURRENT_GPU_INSTANCE, + TLS_ENTRY_ID_PRIORITY, + TLS_ENTRY_ID_GPUMGR_EXPANDED_GPU_VISIBILITY, + TLS_ENTRY_ID_DYNAMIC, // dynamic allocations start here + TLS_ENTRY_ID_TAG_START = 0x100000 // Custom tags start here +}; +/** + * @brief Allocates a new entry spot and returns a unique entry ID. + * + * Ids are unique for all threads. + * + * @return 0 if all ids are used; + * @return unique id otherwise. + * + */ +NvU64 tlsEntryAlloc(void); + +/** + * @brief Get pointer to TLS entry for given @p entryId. + * + * This function increments the refCount of the given entry. + * + * @return NULL if @p entryId is invalid (Not returned by @ref tlsEntryAlloc), + * in case of not enough memory. + * @return Pointer to a void* the users can use to point to custom structure. + * + * Example usage: + * ~~~{.c} + * NvU64 id = tlsEntryAlloc(); + * MY_THREAD_DATA **ppData = tlsEntryAcquire(id); + * if (**ppData == NULL) + * *ppData = portMemAllocNonPaged(sizeof(MY_THREAD_DATA)) + * ~~~ + * + * @note On first call for given @p entryId, the dereferenced (user) pointer + * will be set to NULL - (*tlsEntryAcquire(x) == NULL) + * + */ +NvP64 *tlsEntryAcquire(NvU64 entryId); + +/** + * @brief Like @ref tlsEntryAcquire, but memory is allocated using @p pAllocator. + * + * @note Should be used only when performance is important in allocation or + * when a spinlock is acquired in a non ISR thread and there is a need for the tls. + * + * @note pAllocator should be thread safe. + */ +NvP64 *tlsEntryAcquireWithAllocator(NvU64 entryId, PORT_MEM_ALLOCATOR *pAllocator); + +/** + * @brief Release the TLS entry for given @p entryId. + * + * This functions decrements the refCount of the given entry. + * + * @return refCount after releasing the structure if @p entryId is valid, + * @return TLS_ERROR_VAL if TLS entry for given @p entryId doesn't exist. + * + * ~~~{.c} + * if (tlsEntryRelease(id) == 0) + * portMemFree(*ppData); + * ~~~ + */ +NvU32 tlsEntryRelease(NvU64 entryId); + +/** + * @brief Like @ref tlsEntryRelease, but memory is allocated using @p pAllocator. + * + * @note Should be used only when performance is important in allocation or + * when a spinlock is acquired in a non ISR thread and there is a need for the tls. + * + * @note @p pAllocator should be thread safe. + */ +NvU32 tlsEntryReleaseWithAllocator(NvU64 entryId, PORT_MEM_ALLOCATOR *pAllocator); + +/** + * @brief Get pointer to TLS data for given entryId. + * + * This function will not modify the refCount, and does not return a double + * pointer required to set the entry value. + * + * @return NULL if the entry doesn't exist. + * @return Otherwise pointer on user's custom structure. + * + * Example usage: + * ~~~{.c} + * NvU64 id = tlsEntryAlloc(); + * MY_THREAD_DATA **ppData = tlsEntryAcquire(id); + * if (**ppData == NULL) + * { + * *ppData = portMemAllocNonPaged(sizeof(MY_THREAD_DATA)) + * *ppData->myData = 1; + * } + * MY_THREAD_DATA *pData = tlsEntryGet(id); + * if (pData->myData == 1) + * { + * ... + * } + * ~~~ + * + */ +NvP64 tlsEntryGet(NvU64 entryId); + +/** + * @brief Increment the refCount of given TLS entry. + * + * If an entry with given entryId doesn't exist, this function does nothing. + * + * This is useful when the code requires a call to a function that might call + * @ref tlsEntryRelease, but TLS should not be freed. An example might be when + * calling a function that acquires the GPU lock while already holding the lock. + * Currently, the code will temporarily release the lock, so the nested function + * acquires it again. Since rmGpuLock{Acquire,Release} acquires/releases TLS, + * this release could cause the data to be freed. + * + * @return TLS_ERROR_VAL if the entry doesn't exist. + * @return New TLS entry refCount, after increment. + * + */ +NvU32 tlsEntryReference(NvU64 entryId); + +/** + * @brief Decrement the refCount of given TLS entry. + * + * If an entry with given entryId doesn't exist, this function does nothing. + * See @ref tlsEntryReference for details. + * + * @return TLS_ERROR_VAL if the entry doesn't exist. + * @return New TLS entry refCount, after decrement. + * + */ +NvU32 tlsEntryUnreference(NvU64 entryId); + +/// @brief Size of memory to preallocate on ISR stack for TLS +#if PORT_IS_CHECKED_BUILD +// Checked builds have per-allocation overhead for tracking +#define TLS_ISR_ALLOCATOR_SIZE 512 +#else +#if defined(LOCK_VAL_ENABLED) + #define TLS_ISR_ALLOCATOR_SIZE 512 +#else + #define TLS_ISR_ALLOCATOR_SIZE 256 +#endif +#endif + +/** + * @brief Allocates thread id for current ISR thread. + * + * @note Function should be called on the beginning of ISR, as early as possible + * + */ +void tlsIsrInit(PORT_MEM_ALLOCATOR *pIsrAllocator); + +/** + * @brief Destroys thread id for current ISR thread. + * + * @note should be called at end of ISR. Must be NOINLINE because if it gets + * inlined and tlsIsrInit doesn't, SP order can be wrong. + */ +NV_NOINLINE void tlsIsrDestroy(PORT_MEM_ALLOCATOR *pIsrAllocator); + +/** + * @brief Returns allocator that can be used for allocations of memory in ISR + * threads. + * In case this function is called outside of ISR NULL will be returned. + * @note Should be called between tlsIsrInit and tlsIsrDestroy if you are in ISR, + * otherwise it will ASSERT and return NULL. + */ +PORT_MEM_ALLOCATOR *tlsIsrAllocatorGet(void); + +/** + * @brief Set if DPCs have a unique thread ID that can be acquired by + * @ref portThreadGetCurrentThreadId. Windows DPCs have the same thread ID + * as the thread they preempted, so they are treated like ISRs. + * + * This isn't used by the TLS implementation, but is needed to decide whether + * the DPCs should call @ref tlsIsrInit + */ +#if PORT_IS_KERNEL_BUILD && !defined(NV_MODS) && NVOS_IS_WINDOWS +#define TLS_DPC_HAVE_UNIQUE_ID 0 +#else +#define TLS_DPC_HAVE_UNIQUE_ID 1 +#endif + +/** + * @brief Set if threads can modify their own IRQL/interrupt context. + * On such builds, we cannot use @ref portUtilIsInterruptContext to decide + * whether a given thread is an ISR or a passive thread, and instead use a + * per-CPU ISR counter. + */ +#if PORT_IS_KERNEL_BUILD && (defined(NV_MODS) || NVOS_IS_WINDOWS) +#define TLS_THREADS_CAN_RAISE_IRQL 1 +#else +#define TLS_THREADS_CAN_RAISE_IRQL 0 +#endif + +/** + * @brief Set if ISRs are allowed to acquire a spinlock. On VMWare, the top + * level interrupt handler (ACK function) is not allowed to hold the spinlock + * for any amount of time (enforced by validation suite), so it uses a slower + * lockless implementation. + */ +#if PORT_IS_KERNEL_BUILD && NVOS_IS_VMWARE +#define TLS_ISR_CAN_USE_LOCK 0 +#else +#define TLS_ISR_CAN_USE_LOCK 1 +#endif + +/// @brief If set, a copy of THREAD_STATE_NODE pointer will be kept in TLS. +#ifndef TLS_MIRROR_THREADSTATE +#define TLS_MIRROR_THREADSTATE 0 +#endif + +#ifdef __cplusplus +} +#endif + +///@} + +#endif diff --git a/src/nvidia/inc/libraries/utils/nv_enum.h b/src/nvidia/inc/libraries/utils/nv_enum.h new file mode 100644 index 0000000..5662fea --- /dev/null +++ b/src/nvidia/inc/libraries/utils/nv_enum.h @@ -0,0 +1,684 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file nv_enum.h + * @brief A header providing code-generation tools to define an enumerated type + * from a specification of a potentially-nested enum of limited depth. + * @see confluence page "Proposal for Better Enum Generation (NV_ENUM) Version 1.0" + */ + +/** + * @defgroup NV_UTILS_ENUM Infrastructure for generating better enumerated values. + * + * @brief Generates symbols comprising an enumerated type given a list of + * entries provided via macro argument. + * + * This file exposes macro functions which generate enum types and associated + * metadata from an enum specification consisting of entry names and values, + * with support for nesting enums up to a maximum depth of 2. The associated + * metadata generated from the enum specification allows for conversion of + * contiguous enums (those without holes within their valid value range) to and + * from indices, iteration over enum values (for each loop) and runtime + * determination of whether a given integer is a valid enum value. Additionally, + * macros are provided to "export" an enum such that only the entry names and + * values are defined, and no name is given to the enum. This is useful in + * situations where two different APIs utilize the same enum definition, such as + * in the RM SDK where enums are disallowed in control call parameters, but are + * very much desired inside of the driver. + * @{ + */ + +#ifndef NV_ENUM_H_ +#define NV_ENUM_H_ + +#define NV_ENUM_MIN ((NvS64) 0) +#define NV_ENUM_MAX ((NvS64)0xFFFFFFFF) + +/** @brief Fully expands both arguments, then concatenates them. */ +#define NV_ENUM_CONCATENATE(a, b) NV_ENUM_CONCATENATE2(a, b) +#define NV_ENUM_CONCATENATE2(a, b) _NV_ENUM_CONCATENATE(a, b) +#define _NV_ENUM_CONCATENATE(a, b) a##b + +/** @brief Fully expands the given argument, then stringifies it. */ +#define NV_ENUM_STRINGIFY(s) _NV_ENUM_STRINGIFY(s) +#define _NV_ENUM_STRINGIFY(s) #s + +/** @brief Expands the given argument. */ +#define NV_EXPAND_ONE(x) x + +/** @brief Discards the given argument, expands to nothing. */ +#define NV_DISCARD_ONE(x) + + +/** + * @brief Fully expands both arguments, then attempts to invoke parameter `a` as + * a macro with parameter `b` as its argument. + * + * @param a : Macro Macro to apply + * @param b : Argument List Arguments to pass to macro `a`, surrounded by parentehses + */ +#define NV_ENUM_APPLY(a, b) _NV_ENUM_APPLY(a, b) +#define _NV_ENUM_APPLY(a, b) a b + +/** @brief expands to the Nth argument */ +#define NV_ENUM_A1(a, b, c, d, e, f) a +#define NV_ENUM_A2(a, b, c, d, e, f) b +#define NV_ENUM_A3(a, b, c, d, e, f) c +#define NV_ENUM_A4(a, b, c, d, e, f) d +#define NV_ENUM_A5(a, b, c, d, e, f) e +#define NV_ENUM_A6(a, b, c, d, e, f) f + +/** + * @brief Expands to an argument list containing 6 elements with argument `b` + * moved to the last place. + */ +#define NV_ENUM_DL_POP(a, b, c, d, e, f) (a, c, d, e, f, b) + +/** + * @brief Expands to argument list `l` with its first element replaced by + * parameter `r` + */ +#define NV_ENUM_NV_ENUM_REPLACE_1(r, l) (r, NV_ENUM_APPLY(NV_ENUM_A2, l), NV_ENUM_APPLY(NV_ENUM_A3, l), NV_ENUM_APPLY(NV_ENUM_A4, l), NV_ENUM_APPLY(NV_ENUM_A5, l), NV_ENUM_APPLY(NV_ENUM_A6, l)) + +/** + * @brief Expands to argument list `l` with its first element replaced by + * parameter `r1`, its fifth argument replaced by parameter `r5`, and its + * sixth argument replaced by parameter `r6` + */ +#define NV_ENUM_REPLACE_3(r1, r5, r6, l) (r1, NV_ENUM_APPLY(NV_ENUM_A2, l), NV_ENUM_APPLY(NV_ENUM_A3, l), NV_ENUM_APPLY(NV_ENUM_A4, l), r5, r6) + +/** + * @brief Expands to argument list `l` with its first element replaced by + * parameter `r1`, its second argument replaced by parameter `r2`, its + * fifth argument replaced by parameter `r5`, and its sixth argument + * replaced by parameter `r6` + */ +#define NV_ENUM_REPLACE_4(r1, r2, r5, r6, l) (r1, r2, NV_ENUM_APPLY(NV_ENUM_A3, l), NV_ENUM_APPLY(NV_ENUM_A4, l), r5, r6) + + +/*! + * @brief Convenience LISP-like wrappers for CAR and CDR + * + * @note For those unfamiliar with LISP, most LISP interpreters allow for + * convenient macros which expand to nested invocations of CAR and CDR, + * formed by specifying 'A' and 'D' in any order between 'C' and 'R'. A + * regular expression which identifies this pattern is: 'C(A|D)+R'. The + * order of operations is performed from right to left, e.g. CAAADR + * applies CDR, then CAR, then CAR, then CAR. These are used to unpack + * data at specific locations within nested lists, which this tool uses + * often. There is no such thing as a meta-macro in the c preprocessor, so + * we have defined the operations which we use frequently here. + * + * @note instead of LISP-style structured lists which are formatted as + * records containing two elements each (e.g. (car, (cdr, ()))), this tool + * uses preprocessor argument lists (e.g. (car, cdr, etc)) because the + * former require proper recursion to deal with, which this tool does not + * have available to it. + * + * @note Because some compilers do not support variadic macros, we cannot use + * the generic versions of CAR and CDR here, so we have replaced them + * with very specific size-restricted versions. + */ +#define NV_CAAR(l) NV_ENUM_APPLY(NV_ENUM_A1, NV_ENUM_APPLY(NV_ENUM_A1, l)) +#define NV_CADAR(l) NV_ENUM_APPLY(NV_ENUM_A2, NV_ENUM_APPLY(NV_ENUM_A1, l)) +#define NV_CADDAR(l) NV_ENUM_APPLY(NV_ENUM_A3, NV_ENUM_APPLY(NV_ENUM_A1, l)) +#define NV_CADDDAR(l) NV_ENUM_APPLY(NV_ENUM_A4, NV_ENUM_APPLY(NV_ENUM_A1, l)) +#define NV_CADDDDAR(l) NV_ENUM_APPLY(NV_ENUM_A5, NV_ENUM_APPLY(NV_ENUM_A1, l)) +#define NV_CADDDDDAR(l) NV_ENUM_APPLY(NV_ENUM_A6, NV_ENUM_APPLY(NV_ENUM_A1, l)) +#define NV_CAADR(l) NV_ENUM_APPLY(NV_ENUM_A1, NV_ENUM_APPLY(NV_ENUM_A2, l)) +#define NV_CADADR(l) NV_ENUM_APPLY(NV_ENUM_A2, NV_ENUM_APPLY(NV_ENUM_A2, l)) +#define NV_CADDADR(l) NV_ENUM_APPLY(NV_ENUM_A3, NV_ENUM_APPLY(NV_ENUM_A2, l)) +#define NV_CADDDADR(l) NV_ENUM_APPLY(NV_ENUM_A4, NV_ENUM_APPLY(NV_ENUM_A2, l)) +#define NV_CADDDDADR(l) NV_ENUM_APPLY(NV_ENUM_A5, NV_ENUM_APPLY(NV_ENUM_A2, l)) + +/*! + * @brief Performs per-entry generation function, and either expands or extinguishes it + * + * @param dat__ Data List - Generation data table + * @param name Identifier - Name of enum entry + * @param value Integer Literal - Value for this entry + * + * @return the result of the generation function for this table, + * or nothing if this layer is being filtered (i.e. nested enum) + */ +#define NV_ENUM_ENTRY(dat__, name, value) \ + NV_ENUM_DAT_ENTRY(dat__) (NV_ENUM_DAT_GEN2(dat__) (dat__, name, value)) + +/*! + * @brief Expands enum entries within nested enum specification using an updated + * data list specification + * + * @note the Current Enum Name is concatenated with parameter `name` + * the function table has its first entry popped + * other variables are unchanged. + * + * @param dat__ Data List - Generation data table + * @param name Token - String to append to previous enum name + * @param res_lo Integer Literal - Min value of this nested enum + * @param res_hi Integer Literal - Max value of this nested enum + * @param entries Macro - Nested Enum Specification + */ +#define NV_ENUM_NEST_EXPAND0(dat__, name, res_lo, res_hi, entries) \ + entries(NV_ENUM_APPLY(NV_ENUM_DL_POP, NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), name), NV_ENUM_APPLY(NV_ENUM_A1, dat__)), dat__))) +#define NV_ENUM_NEST_EXPAND1(dat__, name, res_lo, res_hi, entries) \ + entries(NV_ENUM_APPLY(NV_ENUM_DL_POP, NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), name), NV_ENUM_APPLY(NV_ENUM_A1, dat__)), dat__))) +#define NV_ENUM_NEST_EXPAND2(dat__, name, res_lo, res_hi, entries) \ + entries(NV_ENUM_APPLY(NV_ENUM_DL_POP, NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), name), NV_ENUM_APPLY(NV_ENUM_A1, dat__)), dat__))) +#define NV_ENUM_NEST_EXPAND3(dat__, name, res_lo, res_hi, entries) \ + entries(NV_ENUM_APPLY(NV_ENUM_DL_POP, NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), name), NV_ENUM_APPLY(NV_ENUM_A1, dat__)), dat__))) + +/*! + * @brief Performs all enum generation for the given nested enum specification + * + * @note the Current Enum Name is concatenated with parameter `name` + * the Nested Enum Name is updated to match the Current Enum Name + * Res. Min is updated with parameter `res_lo` + * Res. Max is updated with parameter `res_hi` + * the function table has its first entry popped + * other variables are unchanged + * + * @param dat__ Data List - Generation data table + * @param name Token - String to append to previous enum name + * @param res_lo Integer Literal - Min value of this nested enum + * @param res_hi Integer Literal - Max value of this nested enum + * @param entries Macro - Nested Enum Specification + */ +#define NV_ENUM_NEST_GEN(dat__, name, res_lo, res_hi, entries) \ + NV_ENUM_DAT_GEN1(dat__)( \ + NV_ENUM_APPLY( \ + NV_ENUM_DL_POP, \ + NV_ENUM_NV_ENUM_REPLACE_1( \ + NV_ENUM_REPLACE_4( \ + NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), name), \ + NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), name), \ + res_lo, \ + res_hi, \ + NV_ENUM_APPLY( \ + NV_ENUM_A1, \ + dat__ \ + ) \ + ), \ + dat__ \ + ) \ + ), \ + entries \ + ) + +/*! + * @note Definition: Data List + * This tool packs information used in each depth of processing within a + * nested list, which is passed to each invocation of NV_ENUM_ENTRY and + * NV_ENUM_NEST. The format of this object is as follows: + * ( + * ( , <0-Depth Nested Enum Name>, , , , ) + * ( , , , ) + * ( , , , ) + * ( , , , ) + * ... + * ) + * + * Root Enum Name: Name of enum passed to NV_ENUM DEF (unaffected by NV_ENUM_NEST) + * 0-Depth Nested Enum Name: Name of the enum at depth 0 (affected by NV_ENUM_NEST) + * Prefix: Text prepended to each entry name (no spaces) + * Current Enum Name: Name of the enum at current depth + * Reserved Min: The minimum allowable enum value at this depth + * Reserved Max: The maximum allowable enum value at this depth + * Entry Fn: macro called once per entry with the entry as an argument + * Nest Fn: Duplicate definition of NV_ENUM_NEST_EXPAND to WAR recursion limits + * Per-Enum Gen Fn: Function to call once per NV_ENUM_DEF or NV_ENUM_NEST + * Per-Entry Gen Fn: Function to call once per NV_ENUM_ENTRY + * + */ + +// +// Data list accessor macros +// + +/*! @brief Given data list, returns Current Enum Name */ +#define NV_ENUM_DAT_CURR_NAME(dat__) NV_CAAR(dat__) +/*! @brief Given data list, returns 0-depth nested enum name */ +#define NV_ENUM_DAT_NEST_NAME(dat__) NV_CADAR(dat__) +/*! @brief Given data list, returns Prefix */ +#define NV_ENUM_DAT_PREFIX(dat__) NV_CADDAR(dat__) +/*! @brief Given data List, returns Root Enum Name */ +#define NV_ENUM_DAT_ROOT_NAME(dat__) NV_CADDDAR(dat__) +/*! @brief Given data list, returns Res. Min at current depth */ +#define NV_ENUM_DAT_MIN(dat__) NV_CADDDDAR(dat__) +/*! @brief Given data list, returns Res. Max at current depth */ +#define NV_ENUM_DAT_MAX(dat__) NV_CADDDDDAR(dat__) +/*! @brief Given data list, returns Entry Fn at current depth */ +#define NV_ENUM_DAT_ENTRY(dat__) NV_CAADR(dat__) +/*! @brief Given data list, returns Nest Fn at current depth */ +#define NV_ENUM_NEST(dat__) NV_CADADR(dat__) +/*! @brief Given data list, returns Per-Enum Gen Fn at current depth */ +#define NV_ENUM_DAT_GEN1(dat__) NV_CADDADR(dat__) +/*! @brief Given data list, returns Per-Entry Gen Fn at current depth */ +#define NV_ENUM_DAT_GEN2(dat__) NV_CADDDADR(dat__) + +/*! + * @brief constructs a data list to be used for generation of the root enum + */ +#define NV_ENUM_DEPTH_0(name, prefix, gen1_fn, gen2_fn) \ + ( (name, name, prefix, name, NV_ENUM_MIN, NV_ENUM_MAX) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND0, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND1, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND2, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND3, gen1_fn, gen2_fn, unused, unused) \ + , (unused, unused, unused, unused, unused, unused) \ + ) + +/*! + * @brief constructs a data list to be used for generation of enums at depth 1 + */ +#define NV_ENUM_DEPTH_1(name, prefix, gen1_fn, gen2_fn) \ + ( (name, name, prefix, name, NV_ENUM_MIN, NV_ENUM_MAX) \ + , (NV_DISCARD_ONE, NV_ENUM_NEST_GEN, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND0, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND1, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND2, gen1_fn, gen2_fn, unused, unused) \ + , (unused, unused, unused, unused, unused, unused) \ + ) + +/*! + * @brief constructs a data list to be used for generation of enums at depth 2 + */ +#define NV_ENUM_DEPTH_2(name, prefix, gen1_fn, gen2_fn) \ + ( (name, name, prefix, name, NV_ENUM_MIN, NV_ENUM_MAX) \ + , (NV_DISCARD_ONE, NV_ENUM_NEST_EXPAND0, gen1_fn, gen2_fn, unused, unused) \ + , (NV_DISCARD_ONE, NV_ENUM_NEST_GEN, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND1, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND2, gen1_fn, gen2_fn, unused, unused) \ + , (unused, unused, unused, unused, unused, unused) \ + ) + +/// +/// Generator Functions +/// + + +/*! @brief Generates an enum type given the enum specification in entries */ +#define NV_ENUM_GEN_MAIN(dat__, entries) \ + enum NV_ENUM_DAT_CURR_NAME(dat__) { entries(dat__) }; + +/*! @brief Generates a single enum entry with the given name and value */ +#define NV_ENUM_GEN_MAIN_FN(dat__, entry_name, value) \ + NV_ENUM_CONCATENATE(NV_ENUM_DAT_PREFIX(dat__), entry_name) = value, + + +/*! @brief Generates an enum typedef for the given enum. All nested types receive the same typedef (i.e. the root enum */ +#define NV_ENUM_GEN_TYPEDEF(dat__, entries) \ + typedef enum NV_ENUM_DAT_ROOT_NAME(dat__) NV_ENUM_DAT_CURR_NAME(dat__); + +/*! @brief Does nothing. There is no per-entry generation for typedefs. */ +#define NV_ENUM_GEN_TYPEDEF_FN(dat__, entry_name, value) + + +/*! @brief Generates an enum with an added entry at the end to provide the enum size*/ +#define NV_ENUM_GEN_SIZE(dat__, entries) \ + enum { entries(dat__) NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __SIZE) }; + +/*! @brief Generates a single enum entry with __UNUSED appended. These values are not meant to be used. */ +#define NV_ENUM_GEN_SIZE_FN(dat__, entry_name, value) \ + NV_ENUM_CONCATENATE(NV_ENUM_CONCATENATE(NV_ENUM_DAT_NEST_NAME(dat__), entry_name), __UNUSED) = value, + + +/*! @brief Generates a conversion function from an enum value to string representation. */ +#define NV_ENUM_GEN_STRING(dat__, entries) \ + static inline const char * \ + NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), _TO_STRING) \ + ( \ + enum NV_ENUM_DAT_ROOT_NAME(dat__) in \ + ) \ + { \ + switch (in) \ + { \ + entries(dat__) \ + default: \ + break; \ + } \ + return NV_ENUM_STRINGIFY(NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __UNKNOWN)); \ + } + +/*! @brief Generates a case for the given enum entry, and its string representation. */ +#define NV_ENUM_GEN_STRING_FN(dat__, entry_name, value) \ + case NV_ENUM_CONCATENATE(NV_ENUM_DAT_PREFIX(dat__), entry_name): \ + return NV_ENUM_STRINGIFY(NV_ENUM_CONCATENATE(NV_ENUM_DAT_PREFIX(dat__), entry_name)); + + +/*! @brief Generates a conversion function from NvU32 to enum value. */ +#define NV_ENUM_GEN_FROM(dat__, entries) \ + static inline NV_STATUS \ + NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), _FROM32) \ + ( \ + NvU32 in, \ + enum NV_ENUM_DAT_ROOT_NAME(dat__) *out \ + ) \ + { \ + switch (in) \ + { \ + entries(dat__) \ + if (out != NULL) \ + *out = ((enum NV_ENUM_DAT_ROOT_NAME(dat__))in); \ + return NV_OK; \ + default: \ + break; \ + } \ + return NV_ERR_OUT_OF_RANGE; \ + } + +/*! @brief Generates a case for the given enum entry. */ +#define NV_ENUM_GEN_FROM_FN(dat__, entry_name, value) \ + case NV_ENUM_CONCATENATE(NV_ENUM_DAT_PREFIX(dat__), entry_name): + + +/*! @brief Generates a struct constant containing the smallest value contained within the enum (plus one). */ +#define NV_ENUM_GEN_LO(dat__, entries) \ + typedef struct { char lo[(1 * entries(dat__) 0 + 1)]; } NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __LO) ; + +/*! @brief Builds a portion of the expression calculating the smallest enum value. */ +#define NV_ENUM_GEN_LO_FN(dat__, entry_name, value) \ + (value)) + (0 * + + +/*! @brief Generates a struct constant containing the number of values contained within the enum. */ +#define NV_ENUM_GEN_COUNT(dat__, entries) \ + typedef struct { char count[(0 + entries(dat__) 0)]; } NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __COUNT) ; + +/*! @brief Builds a portion of the expression calculating the number of enum values. */ +#define NV_ENUM_GEN_COUNT_FN(dat__, entry_name, value) \ + 1 + + + +/*! @brief Generates a group of struct constants containing the above generated values. */ +#define NV_ENUM_GEN_META(dat__, entries) \ + typedef struct { char lo[sizeof(((NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __LO) *) NULL)->lo)]; \ + char hi[NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __SIZE)]; \ + char count[sizeof(((NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __COUNT) *) NULL)->count)]; \ + char size[NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __SIZE)]; \ + char bContiguous[(sizeof(((NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __COUNT) *) NULL)->count) == (NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __SIZE) - sizeof(((NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __LO) *) NULL)->lo) + 1)) + 1]; \ + } NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __META) ; + +/*! @brief unused. Nothing needs to be generated per-entry for this generator. */ +#define NV_ENUM_GEN_META_FN(dat__, entry_name, value) + +/*! @brief Generates a compile-time assertion. */ +#define NV_ENUM_GEN_ASSERT_MONOTONIC(dat__, entries) \ + typedef char NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), _assert_monotonic)[ (2 * ((0 > entries(dat__) ((NvU32)-1)) == 0)) - 1 ]; + +/*! + * @brief Builds a portion of the expression asserting that all enum values + * must be declared in strictly monotonically increasing order. + */ +#define NV_ENUM_GEN_ASSERT_MONOTONIC_FN(dat__, entry_name, value) \ + value) + (value > + + +/*! @brief Generates a compile-time assertion. */ +#define NV_ENUM_GEN_ASSERT_IN_RANGE(dat__, entries) \ + typedef char NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), _assert_in_range)[ (2 * ((1 * entries(dat__) 1) == 1)) - 1 ]; + +/*! + * @brief Builds a portion of the expression asserting that all nested enum + * values must be within the reserved range of their parent enum. + */ +#define NV_ENUM_GEN_ASSERT_IN_RANGE_FN(dat__, entry_name, value) \ + (((NvS64)value) >= NV_ENUM_DAT_MIN(dat__)) * (((NvS64)value) <= NV_ENUM_DAT_MAX(dat__)) * + + +/// +/// End of Generator Functions +/// + + +/*! + * @brief Performs code generation for the given generator function pair + * + * @note This function must be updated if supporting deeper nesting in the future + * + * @param fn1 Macro - Per-Enum Gen Fn + * @param fn2 Macro - Per-Entry Gen Fn + * @param enum_name Token - Root Enum Name + * @param prefix Token - Prefix + * @param entries Macro - Enum Specification + */ +#define NV_ENUM_GENERATOR(fn1, fn2, enum_name, prefix, entries) \ + fn1(NV_ENUM_DEPTH_0(enum_name, prefix, fn1, fn2), entries) \ + entries(NV_ENUM_DEPTH_1(enum_name, prefix, fn1, fn2)) \ + entries(NV_ENUM_DEPTH_2(enum_name, prefix, fn1, fn2)) \ + +// +// Windows preprocessor crashes with "ran out of heap space" errors if the +// preproccessed output from a single macro gets too large, so skip the +// verification sanity asserts when running on windows to increase the size of +// representable enums +// +#if NVOS_IS_WINDOWS + +/*! + * @brief Generates an enum and associated metadata with the given enum name and prefix + * + * @param prefix Token - Prefix + * @param enum_name Token - Root Enum Name + * @param entries Macro - Enum Specification + */ +#define NV_ENUM_DEF_PREFIX(prefix, enum_name, entries) \ + NV_ENUM_GEN_MAIN(NV_ENUM_DEPTH_0(enum_name, prefix, NV_ENUM_GEN_MAIN, NV_ENUM_GEN_MAIN_FN), entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_TYPEDEF, NV_ENUM_GEN_TYPEDEF_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_SIZE, NV_ENUM_GEN_SIZE_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_STRING, NV_ENUM_GEN_STRING_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_FROM, NV_ENUM_GEN_FROM_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_LO, NV_ENUM_GEN_LO_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_COUNT, NV_ENUM_GEN_COUNT_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_META, NV_ENUM_GEN_META_FN, enum_name, prefix, entries) \ + +#else + +/*! + * @brief Generates an enum and associated metadata with the given enum name and prefix + * + * @param prefix Token - Prefix + * @param enum_name Token - Root Enum Name + * @param entries Macro - Enum Specification + */ +#define NV_ENUM_DEF_PREFIX(prefix, enum_name, entries) \ + NV_ENUM_GEN_MAIN(NV_ENUM_DEPTH_0(enum_name, prefix, NV_ENUM_GEN_MAIN, NV_ENUM_GEN_MAIN_FN), entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_TYPEDEF, NV_ENUM_GEN_TYPEDEF_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_SIZE, NV_ENUM_GEN_SIZE_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_STRING, NV_ENUM_GEN_STRING_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_FROM, NV_ENUM_GEN_FROM_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_LO, NV_ENUM_GEN_LO_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_COUNT, NV_ENUM_GEN_COUNT_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_META, NV_ENUM_GEN_META_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_ASSERT_MONOTONIC, NV_ENUM_GEN_ASSERT_MONOTONIC_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_ASSERT_IN_RANGE, NV_ENUM_GEN_ASSERT_IN_RANGE_FN, enum_name, prefix, entries) + + + +#endif // NVOS_IS_WINDOWS + +#define NV_ENUM_NOTHING + +/*! + * @brief Generates an enum and associated metadata with the given enum name + * + * @param prefix Token - Prefix + * @param enum_name Token - Root Enum Name + * @param entries Macro - Enum Specification + */ +#define NV_ENUM_DEF(enum_name, entries) \ + NV_ENUM_DEF_PREFIX(NV_ENUM_NOTHING, enum_name, entries) + +/*! + * @brief Generates an exported enum with the given prefix + * + * @param prefix Token - Prefix + * @param entries Macro - Enum Specification + */ +#define NV_ENUM_EXPORT_PREFIX(prefix, entries) \ + NV_ENUM_GEN_MAIN(NV_ENUM_DEPTH_0(NV_ENUM_NOTHING, prefix, NV_ENUM_GEN_MAIN, NV_ENUM_GEN_MAIN_FN), entries) + +/*! + * @brief Generates an exported enum + * + * @param entries Macro - Enum Specification + */ +#define NV_ENUM_EXPORT(entries) \ + NV_ENUM_EXPORT_PREFIX( , entries) + + +/// +/// Runtime Utility Functions +/// + +/*! + * @brief Converrts an unsigned integer into an enum value, or returns error. + * + * @param[in] type identifier - Enum type name + * @param[in] value rvalue - integer value belonging to given enum + * @param[out] pResult pointer - Optional pointer to enum, updated with value on success + * + * @return NV_OK if the value belongs to the enum + * NV_ERR_OUT_OF_RANGE otherwise + */ +#define NV_ENUM_FROM32(type, value, pResult) \ + (NV_ENUM_CONCATENATE(type, _FROM32)((value), (pResult))) + +/*! + * @brief Returns a string representation of the name of the given enum value + * + * @param[in] type identifier - Enum type name + * @param[in] value rvalue - integer value belonging to given enum + * + * @return a string representing the given value + */ +#define NV_ENUM_TO_STRING(type, value) \ + (NV_ENUM_CONCATENATE(type, _TO_STRING)(value)) + +/*! + * @brief Queries whether the given value belongs to the given enum + * + * @param[in] type identifier - Enum type name + * @param[in] value rvalue - integer to check + * + * @return NV_TRUE if the given value is valid + * NV_FALSE otherwise + */ +#define NV_ENUM_IS(type, value) \ + (NV_OK == NV_ENUM_FROM32(type, (value), NULL)) + +/*! + * @brief Queries the value of the smallest enum entry + * + * @param[in] type identifier - Enum type name + */ +#define NV_ENUM_LO(type) \ + ((type)(sizeof(((NV_ENUM_CONCATENATE(type, __META) *) NULL)->lo) - 1)) + +/*! + * @brief Queries the value of the largest enum entry + * + * @param[in] type identifier - Enum type name + */ +#define NV_ENUM_HI(type) \ + ((type)(sizeof(((NV_ENUM_CONCATENATE(type, __META) *) NULL)->hi) - 1)) + +/*! + * @brief Queries the number of values between the first and last enum entries + * @note This value is large enough to use in an array declaration with enum + * entries used as indices into the array. + * + * @param[in] type identifier - Enum type name + */ +#define NV_ENUM_SIZE(type) \ + (sizeof(((NV_ENUM_CONCATENATE(type, __META) *) NULL)->size)) + +/*! + * @brief Queries the number of values defined by the enum + * + * @param[in] type identifier - Enum type name + */ +#define NV_ENUM_COUNT(type) \ + (sizeof(((NV_ENUM_CONCATENATE(type, __META) *) NULL)->count)) + +/*! + * @brief Queries whether or not the enum is defined contiguously (i.e. no holes) + * + * @param[in] type identifier - Enum type name + * + * @return NV_TRUE if each value between the lo and hi enum values are valid enum values + */ +#define NV_ENUM_IS_CONTIGUOUS(type) \ + ((NvBool)(sizeof(((NV_ENUM_CONCATENATE(type, __META) *) NULL)->bContiguous) - 1)) + +/*! + * @brief Macros providing iteration over each value defined by the enum type + * @note Iteration is faster over contiguous enums + * + * @param[in] type identifier - Enum type name + * @param[in] value lvalue - iterator holding current enum value + */ +#define FOR_EACH_IN_ENUM(type, value) \ +{ \ + NvU32 localValue; \ + for (localValue = value = NV_ENUM_LO(type); localValue <= NV_ENUM_HI(type); (value) = (type) (++localValue)) \ + { \ + if (!NV_ENUM_IS_CONTIGUOUS(type) && !NV_ENUM_IS(type, localValue)) \ + continue; + +#define FOR_EACH_IN_ENUM_END \ + } \ +} + +/*! + * @brief Given the Nth enum value defined by the enum type, returns N + * @note Only supports contiguous enums + * + * @param[in] type identifier - Enum type name + * @param[in] value rvalue - integer value belonging to enum type + * + * @return the index at which the enum value was defined within the enum, or -1 + */ +#define NV_ENUM_TO_IDX(type, value) \ + ((NV_ENUM_IS_CONTIGUOUS(type) && NV_ENUM_IS(type, value)) ? ((value) - NV_ENUM_LO(type)) : ((NvU32)-1)) + +/*! + * @brief Returns the Nth enum value defined by the given type + * @note Only supports contiguous enums + * + * @param[in] type identifier - Enum type name + * @param[in] value rvalue - integer specifying entry index + * + * @return The Nth enum value defined within the enum, or NV_ENUM_SIZE(type) if non-existent + */ +#define NV_ENUM_FROM_IDX(type, idx) \ + ((type)((NV_ENUM_IS_CONTIGUOUS(type) && idx < NV_ENUM_COUNT(type)) ? (NV_ENUM_LO(type) + (idx)) : NV_ENUM_SIZE(type))) + +/// +/// End of Runtime Utility Functions +/// + +///@} +/// NV_UTILS_ENUM + +#endif // NV_ENUM_H_ diff --git a/src/nvidia/inc/libraries/utils/nvassert.h b/src/nvidia/inc/libraries/utils/nvassert.h new file mode 100644 index 0000000..a6613b4 --- /dev/null +++ b/src/nvidia/inc/libraries/utils/nvassert.h @@ -0,0 +1,992 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /** + * @file + * @brief Utility assertion macros + * + * @see "NV_ASSERT" confluence page for additional documentation + */ + +#ifndef _NV_UTILS_ASSERT_H_ +#define _NV_UTILS_ASSERT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup NV_UTILS_ASSERT Utility Assertion Macros + * + * @brief Provides a light abstraction layer for common assert macro patterns. + * + * NvPort and NvPrintf are used for debug and logging primitives. + * If an environment cannot use these directly then it can override + * the NV_PORT_HEADER and NV_PRINTF_HEADER defines in its makefile + * to point to appropriate replacements. + * @{ + */ +#include "nvstatus.h" +#include "nvmacro.h" + +// Include portability header, falling back to NvPort if not provided. +#ifndef NV_PORT_HEADER +#define NV_PORT_HEADER "nvport/nvport.h" +#endif +#include NV_PORT_HEADER + +// Include printf header, falling back to NvPrintf if not provided. +#ifndef NV_PRINTF_HEADER +#define NV_PRINTF_HEADER "utils/nvprintf.h" +#endif +#include NV_PRINTF_HEADER + +/* + * Use __builtin_expect to improve branch predictions on the GNU compiler. + * + * Note that these macros convert the parameter to bool. They should + * only be used in 'if' statements. + * + * '!= 0' is used (instead of a cast to NvBool or !!) to avoid 'will always + * evaluate as 'true'' warnings in some gcc versions. + */ +#if defined(__GNUC__) && __GNUC__ >= 3 +#define NV_LIKELY(expr) __builtin_expect(((expr) != 0), 1) +#define NV_UNLIKELY(expr) __builtin_expect(((expr) != 0), 0) +#else +#define NV_LIKELY(expr) ((expr) != 0) +#define NV_UNLIKELY(expr) ((expr) != 0) +#endif + +/* + * Set this to pass expression, function name, file name, and line number + * to the nvAssertFailed functions. + * + * NOTE: NV_PRINTF_STRINGS_ALLOWED defaults to: + * defined(DEBUG) || defined(NV_MODS) || defined(QA_BUILD) + * + * RM_ASSERT used this condition to decide whether to print assert strings: + * defined(DEBUG) || defined(ASSERT_BUILD) || defined(QA_BUILD) + */ +#if !defined(NV_ASSERT_FAILED_USES_STRINGS) +#if (NV_PRINTF_STRINGS_ALLOWED && (defined(DEBUG) || defined(ASSERT_BUILD) || defined(QA_BUILD))) +#define NV_ASSERT_FAILED_USES_STRINGS 1 +#else +#define NV_ASSERT_FAILED_USES_STRINGS 0 +#endif +#endif + +// Hook NV_ASSERT into RCDB. +#if !defined(NV_JOURNAL_ASSERT_ENABLE) +#if defined(NVRM) && (NVOS_IS_WINDOWS || NVOS_IS_UNIX || NVOS_IS_LIBOS) && !defined(NVWATCH) && !defined(NV_MODS) +#define NV_JOURNAL_ASSERT_ENABLE 1 +#else +#define NV_JOURNAL_ASSERT_ENABLE 0 +#endif +#endif + +#if !defined(NV_ASSERT_FAILED_BACKTRACE_ENABLE) +#if defined(NVRM) && NVOS_IS_UNIX && !defined(NVWATCH) && defined(DEBUG) +#define NV_ASSERT_FAILED_BACKTRACE_ENABLE 1 +#else +#define NV_ASSERT_FAILED_BACKTRACE_ENABLE 0 +#endif +#endif + +#if !defined(COVERITY_ASSERT_FAIL) +#if defined(__COVERITY__) +void __coverity_panic__(void); +#define COVERITY_ASSERT_FAIL() __coverity_panic__() +#else // defined(__COVERITY__) +#define COVERITY_ASSERT_FAIL() ((void) 0) +#endif // defined(__COVERITY__) +#endif // !defined(COVERITY_ASSERT_FAIL) + +/* + * NV_ASSERT_FAILED, NV_ASSERT_OK_FAILED, NV_CHECK_FAILED, and NV_CHECK_OK_FAILED + * These macros are defined in three flavors: + * + * normal - expr/file/line are concatenated with format string for NVLOG. + * expr/file/line are passed in as parameters to a helper function + * for NV_PRINTF. + * + * normal for GSP-RM - expr/file/line are omitted, since each NV_PRINTF line + * already has them. NVLOG is not used. + * + * _FUNC - expr/file/line are passed in as parameters to a helper function + * for both NVLOG and NV_PRINTF. + * The _FUNC macros are used for pre-compiled headers on most platforms. + */ + +#if NV_ASSERT_FAILED_USES_STRINGS +#define NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr) , exprStr, __FILE__, __LINE__ +#define NV_ASSERT_FAILED_FUNC_PARAM(exprStr) exprStr, __FILE__, __LINE__ +#define NV_ASSERT_FAILED_FUNC_COMMA_TYPE ,const char *pszExpr, const char *pszFileName, NvU32 lineNum +#define NV_ASSERT_FAILED_FUNC_TYPE const char *pszExpr, const char *pszFileName, NvU32 lineNum +#else +#define NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr) , __LINE__ +#define NV_ASSERT_FAILED_FUNC_PARAM(exprStr) __LINE__ +#define NV_ASSERT_FAILED_FUNC_COMMA_TYPE , NvU32 lineNum +#define NV_ASSERT_FAILED_FUNC_TYPE NvU32 lineNum +#endif + +#if defined(GSP_PLUGIN_BUILD) || (defined(NVRM) && NVOS_IS_LIBOS) + +void nvAssertInit(void); +void nvAssertDestroy(void); + +void nvAssertFailed(NV_ASSERT_FAILED_FUNC_TYPE); +void nvAssertOkFailed(NvU32 status NV_ASSERT_FAILED_FUNC_COMMA_TYPE); + +#define NV_ASSERT_FAILED(exprStr) \ + do { \ + NV_LOG_SPECIAL(LEVEL_ERROR, RM_GSP_LOG_SPECIAL_ASSERT_FAILED, \ + exprStr "\n"); \ + nvAssertFailed(0); \ + COVERITY_ASSERT_FAIL(); \ + PORT_BREAKPOINT(); \ + } while(0) + +#define NV_ASSERT_OK_FAILED(exprStr, status) \ + do { \ + NV_LOG_SPECIAL(LEVEL_ERROR, RM_GSP_LOG_SPECIAL_ASSERT_OK_FAILED, \ + exprStr "\n", status); \ + nvAssertOkFailed(status, 0); \ + COVERITY_ASSERT_FAIL(); \ + PORT_BREAKPOINT(); \ + } while(0) + +#define NV_CHECK_FAILED(level, exprStr) \ + do { \ + NV_LOG_SPECIAL(level, RM_GSP_LOG_SPECIAL_CHECK_FAILED, \ + exprStr "\n"); \ + } while(0) \ + +#define NV_CHECK_OK_FAILED(level, exprStr, status) \ + do { \ + NV_LOG_SPECIAL(level, RM_GSP_LOG_SPECIAL_CHECK_OK_FAILED, \ + exprStr "\n", status); \ + } while (0) + +#else // defined(GSP_PLUGIN_BUILD) || (defined(NVRM) && NVOS_IS_LIBOS) + +void nvAssertInit(void); +void nvAssertDestroy(void); + +// Helper function prototypes for _FAILED macros below. +#if NV_PRINTF_ENABLED || NV_JOURNAL_ASSERT_ENABLE +void nvAssertFailed(NV_ASSERT_FAILED_FUNC_TYPE); +void nvAssertOkFailed(NvU32 status NV_ASSERT_FAILED_FUNC_COMMA_TYPE); +void nvCheckFailed(NvU32 level NV_ASSERT_FAILED_FUNC_COMMA_TYPE); +void nvCheckOkFailed(NvU32 level, NvU32 status NV_ASSERT_FAILED_FUNC_COMMA_TYPE); +void nvAssertFailedNoLog(NV_ASSERT_FAILED_FUNC_TYPE); +void nvAssertOkFailedNoLog(NvU32 status NV_ASSERT_FAILED_FUNC_COMMA_TYPE); +void nvCheckFailedNoLog(NvU32 level NV_ASSERT_FAILED_FUNC_COMMA_TYPE); +void nvCheckOkFailedNoLog(NvU32 level, NvU32 status NV_ASSERT_FAILED_FUNC_COMMA_TYPE); +#else // NV_PRINTF_ENABLED || NV_JOURNAL_ASSERT_ENABLE +#define nvAssertFailed(...) +#define nvAssertOkFailed(...) +#define nvCheckFailed(...) +#define nvCheckOkFailed(...) +#define nvAssertFailedNoLog(...) +#define nvAssertOkFailedNoLog(...) +#define nvCheckFailedNoLog(...) +#define nvCheckOkFailedNoLog(...) +#endif // NV_PRINTF_ENABLED || NV_JOURNAL_ASSERT_ENABLE + +#define NV_ASSERT_LOG(level, fmt, ...) \ + NVLOG_PRINTF(NV_PRINTF_MODULE, NVLOG_ROUTE_RM, level, \ + NV_PRINTF_ADD_PREFIX(fmt), ##__VA_ARGS__) + +#define NV_ASSERT_FAILED(exprStr) \ + do { \ + NV_ASSERT_LOG(LEVEL_ERROR, "Assertion failed: " exprStr); \ + nvAssertFailedNoLog(NV_ASSERT_FAILED_FUNC_PARAM(exprStr)); \ + COVERITY_ASSERT_FAIL(); \ + PORT_BREAKPOINT_CHECKED(); \ + } while(0) + +#define NV_ASSERT_OK_FAILED(exprStr, status) \ + do { \ + NV_ASSERT_LOG(LEVEL_ERROR, "Assertion failed: 0x%08X returned from " \ + exprStr, status); \ + nvAssertOkFailedNoLog(status \ + NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr)); \ + COVERITY_ASSERT_FAIL(); \ + PORT_BREAKPOINT_CHECKED(); \ + } while(0) + +#define NV_CHECK_FAILED(level, exprStr) \ + do { \ + NV_ASSERT_LOG(level, "Check failed: " exprStr); \ + if (NV_PRINTF_LEVEL_ENABLED(level)) \ + { \ + nvCheckFailedNoLog(level \ + NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr)); \ + } \ + } while(0) + +#define NV_CHECK_OK_FAILED(level, exprStr, status) \ + do { \ + NV_ASSERT_LOG(level, "Check failed: 0x%08X returned from " \ + exprStr, status); \ + if (NV_PRINTF_LEVEL_ENABLED(level)) \ + { \ + nvCheckOkFailedNoLog(level, status \ + NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr)); \ + } \ + } while(0) + +#define NV_ASSERT_FAILED_FUNC(exprStr) \ + do { \ + nvAssertFailed(NV_ASSERT_FAILED_FUNC_PARAM(exprStr)); \ + COVERITY_ASSERT_FAIL(); \ + PORT_BREAKPOINT_CHECKED(); \ + } while(0) + +#define NV_ASSERT_OK_FAILED_FUNC(exprStr, status) \ + do { \ + nvAssertOkFail(status NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr)); \ + COVERITY_ASSERT_FAIL(); \ + PORT_BREAKPOINT_CHECKED(); \ + } while(0) + +#define NV_CHECK_FAILED_FUNC(level, exprStr) \ + if (NV_PRINTF_LEVEL_ENABLED(level)) \ + { \ + nvCheckFailed(level NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr)) \ + } + +#define NV_CHECK_OK_FAILED_FUNC(level, exprStr, status) \ + if (NV_PRINTF_LEVEL_ENABLED(level)) \ + { \ + nvCheckOkFailed(level, status \ + NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr)) \ + } + +#endif // defined(GSP_PLUGIN_BUILD) || (defined(NVRM) && NVOS_IS_LIBOS) + +/* + * Defines for precompiled headers. + * + * On platforms other than GSP-RM, the _INLINE macros cannot be used inside + * precompiled headers due to conflicting NVLOG_PRINT_IDs. + */ +#if defined(GSP_PLUGIN_BUILD) || (defined(NVRM) && NVOS_IS_LIBOS) +#define NV_ASSERT_FAILED_PRECOMP NV_ASSERT_FAILED +#else +#define NV_ASSERT_FAILED_PRECOMP NV_ASSERT_FAILED_FUNC +#endif + +// ***************************************************************************** +// * NV_ASSERT family of macros * +// ***************************************************************************** +/** + * General notes: + * + * Assert that an expression is true. If not, do the actions defined + * in NV_ASSERT_FAILED as well as an "other action": + * Print an error message in the debug output + * Log an error message in NvLog + * Mark as an error condition for coverity + * Breakpoint + * Log an assert record to the journal + * "Other action" as defined by each macro below. + * + * The actions are enabled or omitted based on platform and build, and the + * implementations are platform dependent. + * + * The expression is always evaluated even if assertion failures are not logged + * in the environment. Use @ref NV_ASSERT_CHECKED if the expression should only + * be evaluated in checked builds. + * + * USE GENEROUSLY FOR any condition that requires immediate action from NVIDIA. + * Expect to be ARBed on bugs when an assert you added shows up internally + * or in the field. + * + * DO NOT USE for normal run-time conditions, such as a user application + * passing a bad parameter. + */ + +/** + * Assert that an expression is true. + * + * @param[in] expr Expression that evaluates to a truth value. + */ +#define NV_ASSERT(expr) \ + NV_ASSERT_OR_ELSE_STR(expr, #expr, /* no other action */) + +/** + * Assert that an expression is true only in checked builds. + * + * @note The expression is only evaluated in checked builds so should + * not contain required side-effects. + * Also to prevent side effects, no "other action" is permitted. + * + * @param[in] expr Expression that evaluates to a truth value. + */ +#if PORT_IS_CHECKED_BUILD +#define NV_ASSERT_CHECKED(expr) \ + NV_ASSERT_OR_ELSE_STR(expr, #expr, /* no other action */) +#define NV_ASSERT_CHECKED_PRECOMP(expr) \ + NV_ASSERT_OR_ELSE_STR_PRECOMP(expr, #expr, /* no other action */) +#else +#define NV_ASSERT_CHECKED(expr) ((void)0) +#define NV_ASSERT_CHECKED_PRECOMP(expr) ((void)0) +#endif + +/** + * Assert that an expression is true or else do something. + * + * This macro can't use NV_ASSERT_OR_ELSE_STR when NV_PRINTF is passed in as + * the elseStmt parameter. + * + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] elseStmt Statement to evaluate if the expression is false. + */ +#define NV_ASSERT_OR_ELSE(expr, elseStmt) \ + if (1) \ + { \ + PORT_COVERAGE_PUSH_OFF(); \ + if (NV_UNLIKELY(!(expr))) \ + { \ + NV_ASSERT_FAILED(#expr); \ + elseStmt; \ + } \ + PORT_COVERAGE_POP(); \ + } else ((void) 0) + +/** + * Assert that an expression is true or else goto a label. + * + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] label Label to jump to when the expression is false. + */ +#define NV_ASSERT_OR_GOTO(expr, label) \ + NV_ASSERT_OR_ELSE_STR(expr, #expr, goto label) + +/** + * Assert that an expression is true or else return a value. + * + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] retval Value to return if the expression is false. + */ +#define NV_ASSERT_OR_RETURN(expr, retval) \ + NV_ASSERT_OR_ELSE_STR(expr, #expr, return (retval)) + +/** + * Assert that an expression is true or else return void. + * + * @param[in] expr Expression that evaluates to a truth value. + */ +#define NV_ASSERT_OR_RETURN_VOID(expr) \ + NV_ASSERT_OR_ELSE_STR(expr, #expr, return) + +/** + * Assert that an expression is true or else do something. + * + * Although it can be used directly, this second level macro is designed to be + * called from other macros. Passing expr through multiple levels of macros + * before it is stringified expands it. This is especially bad for DRF macros, + * which result in an embedded %, breaking the format string in the + * NV_ASSERT_FAILED_INLINE macro defined above. The macros in this header + * always pass the stringified expr as a into the second level macros as + * a separate parameter. + * + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] exprStr Expression as a string. + * @param[in] elseStmt Statement to evaluate if the expression is false. + */ +#define NV_ASSERT_OR_ELSE_STR(expr, exprStr, elseStmt) \ + do \ + { \ + PORT_COVERAGE_PUSH_OFF(); \ + if (NV_UNLIKELY(!(expr))) \ + { \ + NV_ASSERT_FAILED(exprStr); \ + elseStmt; \ + } \ + PORT_COVERAGE_POP(); \ + } while (0) + +// ***************************************************************************** +// * NV_ASSERT_OK family of macros * +// ***************************************************************************** +/** + * General notes: + * + * Assert that an expression evaluates to NV_OK. If not, do the actions defined + * in NV_ASSERT_OK_FAILED as well as an "other action": + * Print an error message in the debug output, including decoded NV_STATUS. + * Log an error message in NvLog. + * Mark as an error condition for coverity. + * Breakpoint. + * Log an assert record to the journal. + * "Other action" as defined by each macro below. + * + * The actions are enabled or omitted based on platform and build, and the + * implementations are platform dependent. + * + * The expression is always evaluated even if assertion failures are not logged + * in the environment. Use @ref NV_ASSERT_OK_CHECKED if the expression should + * only be evaluated in checked builds. + * + * USE GENEROUSLY FOR any condition that requires immediate action from NVIDIA. + * Expect to be ARBed on bugs when an assert you added shows up internally + * or in the field. + * + * DO NOT USE for normal run-time conditions, such as a user application + * passing a bad parameter. + */ + +/** + * Assert that an expression evaluates to NV_OK. + * + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#define NV_ASSERT_OK(expr) \ + do \ + { \ + NV_STATUS rm_pvt_status; \ + NV_ASSERT_OK_OR_ELSE_STR(rm_pvt_status, expr, #expr, \ + /* no other action */); \ + } while(0) + +/** + * Assert that an expression evaluates to NV_OK only in checked builds. + * + * @note The expression is only evaluated in checked builds so should + * not contain required side-effects. + * Also to prevent side effects, no "other action" is permitted, + * and the status parameter is omitted. + * + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#if PORT_IS_CHECKED_BUILD +#define NV_ASSERT_OK_CHECKED(expr) \ + do \ + { \ + NV_STATUS rm_pvt_status; \ + NV_ASSERT_OK_OR_ELSE_STR(rm_pvt_status, expr, #expr, \ + return rm_pvt_status); \ + } while(0) +#else +#define NV_ASSERT_OK_CHECKED(expr) ((void)0) +#endif + +/*! + * Call a function that returns NV_STATUS and assert that the + * return values is NV_OK. In case this was a first failure + * update global status @ref status. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#define NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(status, expr) \ + do \ + { \ + NV_STATUS rm_pvt_status; \ + NV_ASSERT_OK_OR_ELSE_STR(rm_pvt_status, expr, #expr, \ + if (status == NV_OK) status = rm_pvt_status); \ + } while (0) + +/** + * Assert that an expression evaluates to NV_OK or else do something. + * + * This macro can't use NV_ASSERT_OK_OR_ELSE_STR when NV_PRINTF is passed in as + * the elseStmt parameter. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] expr Expression that evaluates to an NV_STATUS. + * @param[in] elseStmt Statement to evaluate if the expression is false. + */ +#define NV_ASSERT_OK_OR_ELSE(status, expr, elseStmt) \ + do \ + { \ + status = (expr); \ + PORT_COVERAGE_PUSH_OFF(); \ + if (NV_UNLIKELY(NV_OK != status)) \ + { \ + NV_ASSERT_OK_FAILED(#expr, status); \ + elseStmt; \ + } \ + PORT_COVERAGE_POP(); \ + } while(0) + +/** + * Assert that an expression evaluates to NV_OK or else goto a label. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] expr Expression that evaluates to an NV_STATUS. + * @param[in] label Label to jump to when the expression is false. +*/ +#define NV_ASSERT_OK_OR_GOTO(status, expr, label) \ + NV_ASSERT_OK_OR_ELSE_STR(status, expr, #expr, goto label); + +/** + * Assert that an expression evaluates to NV_TRUE or else goto a label. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] cond Condition that evaluates to either NV_TRUE or NV_FALSE. + * @param[in] error Error to be reflected in @ref status when @cond evaluates + to NV_FALSE. + * @param[in] label Label to jump to when @ref cond evaluates to NV_FALSE. +*/ +#define NV_ASSERT_TRUE_OR_GOTO(status, cond, error, label) \ + NV_ASSERT_OK_OR_ELSE_STR(status, ((cond) ? NV_OK : (error)), \ + #cond, goto label); + +/** + * Assert that an expression evaluates to NV_OK or else return the status. + * + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#define NV_ASSERT_OK_OR_RETURN(expr) \ + do \ + { \ + NV_STATUS rm_pvt_status; \ + NV_ASSERT_OK_OR_ELSE_STR(rm_pvt_status, expr, #expr, \ + return rm_pvt_status); \ + } while(0) + +/** + * Assert that an expression evaluates to NV_OK or else do something. + * + * Although it can be used directly, this second level macro is designed to be + * called from other macros. Passing expr through multiple levels of macros + * before it is stringified expands it. This is especially bad for DRF macros, + * which result in an embedded %, breaking the format string in the + * NV_ASSERT_OK_FAILED_INLINE macro defined above. The macros in this header + * always pass the stringified expr as a into the second level macros as + * a separate parameter. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] expr Expression that evaluates to an NV_STATUS. + * @param[in] exprStr Expression as a string. + * @param[in] elseStmt Statement to evaluate if the expression is false. + */ +#define NV_ASSERT_OK_OR_ELSE_STR(status, expr, exprStr, elseStmt) \ + do \ + { \ + status = (expr); \ + PORT_COVERAGE_PUSH_OFF(); \ + if (NV_UNLIKELY(NV_OK != status)) \ + { \ + NV_ASSERT_OK_FAILED(exprStr, status); \ + elseStmt; \ + } \ + PORT_COVERAGE_POP(); \ + } while(0) + +// ***************************************************************************** +// * NV_CHECK family of macros * +// ***************************************************************************** +/** + * General notes: + * + * Check that an expression is true. If not, do the following actions: + * Print a message in the debug output at user specified level. + * Log a message in NvLog at user specified level. + * "Other action" as defined by each macro below. + * + * The actions are enabled or omitted based on platform and build, and the + * implementations are platform dependent. + * + * The expression is always evaluated even if check failures are not logged + * in the environment. Use @ref NV_CHECK_CHECKED if the expression should only + * be evaluated in checked builds. + * + * USE FOR error conditions that DO NOT require immediate action from NVIDIA, + * but may be useful in diagnosing other issues. + */ + +/** + * Check that an expression is true. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to a truth value. + */ +#define NV_CHECK(level, expr) \ + NV_CHECK_OR_ELSE_STR(level, expr, #expr, /* no other action */) + +/** + * Check that an expression is true only in checked builds. + * + * @note The expression is only evaluated in checked builds so should + * not contain required side-effects. + * Also to prevent side effects, no "other action" is permitted. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to a truth value. + */ +#if PORT_IS_CHECKED_BUILD +#define NV_CHECK_CHECKED(level, expr) \ + NV_CHECK_OR_ELSE_STR(level, expr, #expr, /* no other action */) +#else +#define NV_CHECK_CHECKED(level, expr) ((void)0) +#endif + +/** + * Check that an expression is true or else do something. + * + * This macro can't use NV_CHECK_OR_ELSE_STR when NV_PRINTF is passed in as + * the elseStmt parameter. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] elseStmt Statement to evaluate if the expression is false. + */ +#define NV_CHECK_OR_ELSE(level, expr, elseStmt) \ + do \ + { \ + if (NV_UNLIKELY(!(expr))) \ + { \ + NV_CHECK_FAILED(level, #expr); \ + elseStmt; \ + } \ + } while (0) + +/** + * Check that an expression is true or else goto a label. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] label Label to jump to when the expression is false. + */ +#define NV_CHECK_OR_GOTO(level, expr, label) \ + NV_CHECK_OR_ELSE_STR(level, expr, #expr, goto label) + +/** + * Check that an expression is true or else return a value. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] retval Value to return if the expression is false. + */ +#define NV_CHECK_OR_RETURN(level, expr, retval) \ + NV_CHECK_OR_ELSE_STR(level, expr, #expr, return (retval)) + +/** + * Check that an expression is true or else return void. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to a truth value. + */ +#define NV_CHECK_OR_RETURN_VOID(level, expr) \ + NV_CHECK_OR_ELSE_STR(level, expr, #expr, return) + +/** + * Check that an expression is true or else do something. + * + * Although it can be used directly, this second level macro is designed to be + * called from other macros. Passing expr through multiple levels of macros + * before it is stringified expands it. This is especially bad for DRF macros, + * which result in an embedded %, breaking the format string in the + * NV_CHECK_FAILED_INLINE macro defined above. The macros in this header + * always pass the stringified expr as a into the second level macros as + * a separate parameter. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] exprStr Expression as a string. + * @param[in] elseStmt Statement to evaluate if the expression is false. + */ +#define NV_CHECK_OR_ELSE_STR(level, expr, exprStr, elseStmt) \ + do \ + { \ + if (NV_UNLIKELY(!(expr))) \ + { \ + NV_CHECK_FAILED(level, exprStr); \ + elseStmt; \ + } \ + } while (0) + + +// ***************************************************************************** +// * NV_CHECK_OK family of macros * +// ***************************************************************************** +/** + * General notes: + * + * Check that an expression evaluates to NV_OK. If not, do the following actions: + * Print a message in the debug output at user specified. + * Log a message in NvLog at user specified level. + * "Other action" as defined by each macro below. + * + * The actions are enabled or omitted based on platform and build, and the + * implementations are platform dependent. + * + * The expression is always evaluated even if assertion failures are not logged + * in the environment. Use @ref NV_ASSERT_OK_CHECKED if the expression should + * only be evaluated in checked builds. + * + * USE FOR error conditions that DO NOT require immediate action from NVIDIA, + * but may be useful in diagnosing other issues. + */ + +/** + * Check that an expression evaluates to NV_OK. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#define NV_CHECK_OK(status, level, expr) \ + do \ + { \ + NV_CHECK_OK_OR_ELSE_STR(status, level, expr, #expr, \ + /* no other action */); \ + } while(0) + +/** + * Check that an expression evaluates to NV_OK only in checked builds. + * + * @note The expression is only evaluated in checked builds so should + * not contain required side-effects. + * Also to prevent side effects, no "other action" is permitted, + * and the status parameter is omitted. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#if PORT_IS_CHECKED_BUILD +#define NV_CHECK_OK_CHECKED(level, expr) \ + do \ + { \ + NV_STATUS rm_pvt_status; \ + NV_CHECK_OK_OR_ELSE_STR(rm_pvt_status, level, expr, #expr, \ + /* no other action */); \ + } while(0) +#else +#define NV_CHECK_OK_CHECKED(level, expr) ((void)0) +#endif + +/*! + * Call a function that returns NV_STATUS and check that the return values is + * NV_OK. If an error is returned, record the error code. In case this was a + * first failure update global status @ref status. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#define NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(status, level, expr) \ + do \ + { \ + NV_STATUS rm_pvt_status; \ + NV_CHECK_OK_OR_ELSE_STR(rm_pvt_status, level, expr, #expr, \ + if (status == NV_OK) status = rm_pvt_status); \ + } while (0) + +/** + * Check that an expression evaluates to NV_OK or else do something. + * + * This macro can't use NV_CHECK_OK_OR_ELSE_STR when NV_PRINTF is passed in as + * the elseStmt parameter. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to an NV_STATUS. + * @param[in] elseStmt Statement to evaluate if the expression returns error. + */ +#define NV_CHECK_OK_OR_ELSE(status, level, expr, elseStmt) \ + do \ + { \ + status = (expr); \ + if (NV_UNLIKELY(NV_OK != status)) \ + { \ + NV_CHECK_OK_FAILED(level, #expr, status); \ + elseStmt; \ + } \ + } while (0) + +/** + * Check that an expression evaluates to NV_OK or else goto a label. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to an NV_STATUS. + * @param[in] label Label to jump to when the expression returns error. + */ +#define NV_CHECK_OK_OR_GOTO(status, level, expr, label) \ + NV_CHECK_OK_OR_ELSE_STR(status, level, expr, #expr, goto label) + +/** + * Check that an expression evaluates to NV_TRUE or else goto a label. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to either NV_TRUE or NV_FALSE. + * @param[in] error Error to be reflected in @p status when @p expr evaluates + to NV_FALSE. + * @param[in] label Label to jump to when @p expr evaluates to NV_FALSE. +*/ +#define NV_CHECK_TRUE_OR_GOTO(status, level, expr, error, label) \ + NV_CHECK_OK_OR_ELSE_STR(status, level, ((expr) ? NV_OK : (error)), \ + #expr, goto label); + +/** + * Check that an expression evaluates to NV_OK or return the status. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#define NV_CHECK_OK_OR_RETURN(level, expr) \ + do \ + { \ + NV_STATUS rm_pvt_status; \ + NV_CHECK_OK_OR_ELSE_STR(rm_pvt_status, level, expr, #expr, \ + return rm_pvt_status); \ + } while(0) + + +/** + * Check that an expression evaluates to NV_OK or else record the error code and + * do something. + * + * Although it can be used directly, this second level macro is designed to be + * called from other macros. Passing expr through multiple levels of macros + * before it is stringified expands it. This is especially bad for DRF macros, + * which result in an embedded %, breaking the format string in the + * NV_CHECK_OK_FAILED_INLINE macro defined above. The macros in this header + * always pass the stringified expr as a into the second level macros as + * a separate parameter. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to an NV_STATUS. + * @param[in] exprStr Expression as a string. + * @param[in] elseStmt Statement to evaluate if the expression returns error. + */ +#define NV_CHECK_OK_OR_ELSE_STR(status, level, expr, exprStr, elseStmt) \ + do \ + { \ + status = (expr); \ + if (NV_UNLIKELY(NV_OK != status)) \ + { \ + NV_CHECK_OK_FAILED(level, exprStr, status); \ + elseStmt; \ + } \ + } while (0) + + +// ***************************************************************************** +// * NV_ASSERT_PRECOMP family of macros * +// ***************************************************************************** +/** + * General notes: + * + * Exactly the same as the NV_ASSERT macros, but always safe to use in + * precompiled headers. + * + * On platforms other than GSP-RM, the _INLINE macros cannot be used inside + * precompiled headers due to conflicting NVLOG_PRINT_IDs. The PRECOMP macros + * work around this issue by calling helper functions for NvLog. + * + * Hoping for a better solution, only the macro variants that are currently + * used in precompiled headers are defined. + * + * See the NV_ASSERT documentation above for parameters and use cases. + */ +#define NV_ASSERT_PRECOMP(expr) \ + NV_ASSERT_OR_ELSE_STR_PRECOMP(expr, #expr, /* no other action */) + +#define NV_ASSERT_OR_RETURN_PRECOMP(expr, retval) \ + NV_ASSERT_OR_ELSE_STR_PRECOMP(expr, #expr, return (retval)) + +#define NV_ASSERT_OR_RETURN_VOID_PRECOMP(expr) \ + NV_ASSERT_OR_ELSE_STR_PRECOMP(expr, #expr, return) + +#define NV_ASSERT_OR_ELSE_STR_PRECOMP(expr, exprStr, elseStmt) \ + do \ + { \ + PORT_COVERAGE_PUSH_OFF(); \ + if (NV_UNLIKELY(!(expr))) \ + { \ + NV_ASSERT_FAILED_PRECOMP(exprStr); \ + elseStmt; \ + } \ + PORT_COVERAGE_POP(); \ + } while (0) + +/** + * @def NV_CHECKED_ONLY + * @brief Compile a piece of code only in checked builds. + * + * This macro helps avoid #ifdefs to improve readability but should be + * used sparingly. + * + * Cases that make heavy use of this should likely define a wrapper + * macro or other abstraction for the build variation. + * For example NV_CHECKED_ONLY(NV_PRINTF(...)) is not a good use case. + * + * A typical use case is declaring and setting a canary value: + * ~~~{.c} + * typedef struct + * { + * NV_CHECKED_ONLY(NvU32 canary); + * ... + * } MY_STRUCT; + * + * void initMyStruct(MY_STRUCT *pStruct) + * { + * NV_CHECKED_ONLY(pStruct->canaray = 0xDEADBEEF); + * ... + * } + * + * void destroyMyStruct(MY_STRUCT *pStruct) + * { + * NV_ASSERT_CHECKED(pStruct->canaray == 0xDEADBEEF); + * ... + * } + * ~~~ + */ +#if PORT_IS_CHECKED_BUILD +#define NV_CHECKED_ONLY NV_EXPAND +#else +#define NV_CHECKED_ONLY NV_DISCARD +#endif + +// Verify prerequisites are defined. +#ifndef PORT_IS_CHECKED_BUILD +#error "NV_PORT_HEADER must define PORT_IS_CHECKED_BUILD" +#endif +#ifndef PORT_BREAKPOINT_CHECKED +#error "NV_PORT_HEADER must define PORT_BREAKPOINT_CHECKED" +#endif +#ifndef PORT_COVERAGE_PUSH_OFF +#error "NV_PORT_HEADER must define PORT_COVERAGE_PUSH_OFF" +#endif +#ifndef PORT_COVERAGE_POP +#error "NV_PORT_HEADER must define PORT_COVERAGE_POP" +#endif +#ifndef NV_PRINTF +#error "NV_PRINTF_HEADER must define NV_PRINTF" +#endif + + +#ifdef __cplusplus +} +#endif //__cplusplus +/// @} +#endif // _NV_UTILS_ASSERT_H_ diff --git a/src/nvidia/inc/libraries/utils/nvbitvector.h b/src/nvidia/inc/libraries/utils/nvbitvector.h new file mode 100644 index 0000000..6b3cd2f --- /dev/null +++ b/src/nvidia/inc/libraries/utils/nvbitvector.h @@ -0,0 +1,605 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_UTILS_NV_BITVECTOR_H_ +#define _NV_UTILS_NV_BITVECTOR_H_ + +#include "nvport/nvport.h" +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvmisc.h" +#include "utils/nvassert.h" +#include "utils/nvrange.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// +// Note: This will need to be recalculated if the data size changes +// IDX(i) = (index & ~(MASK(num bits)) >> log2(num bits) +// +#define NV_BITVECTOR_IDX(index) (((index) & ~(0x3F)) >> 6) +#define NV_BITVECTOR_ARRAY_SIZE(last) (NV_BITVECTOR_IDX((last) - 1) + 1) +#define NV_BITVECTOR_BYTE_SIZE(last) (NV_BITVECTOR_ARRAY_SIZE((last)) * sizeof(NvU64)) +#define NV_BITVECTOR_OFFSET(index) ((index) & ((sizeof(NvU64) * 8) - 1)) + +/** + * \anchor NV_BITVECTOR_1 + * @defgroup NV_BITVECTOR NV_BITVECTOR + * + * @brief NV_BITVECTOR is a collection of individual consecutive bit flags + * packed within an array of 64-bit integers. Each derivative of the + * NV_BITVECTOR type may specify the number of queryable flags, and the + * array will be sized according to the minimum number of 64-bit integers + * required to hold the flags. + * + * @details NV_BITVECTOR is a general purpose data structure utility. + * It consists of a single (real) field, named \b qword. + * Flags within a NV_BITVECTOR are represented beginning with the LSB of + * index 0 of \b qword, and are packed fully within a single qword + * before expanding into a new qword. Derivatives of NV_BITVECTOR must + * provide a type name for the new type, and the first index outside of + * the range of the new type (this value must be greater than 0.) A + * bitvector with bits 63 and 64 raised is represented in memory in a + * little-endian system as follows: + * + * 63 NV_BITVECTOR_OFFSET(i) 0 + * .-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-. + * 0 |1 | + * 1 | 1| + * `-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-' + * + * Thus, in order to conceptually model an NV_BITVECTOR horizontally as + * a continual ordered list of bits, one would have to write the + * bitvector from highest index to lowest, and read from right to left. + * + * @note The unused bits within a derivative type of NV_BITVECTOR are reserved, + * and must not be depended upon to contain any consistent value. + * + * @{ + */ +typedef struct NV_BITVECTOR NV_BITVECTOR; +struct NV_BITVECTOR +{ + NvU64 qword; +}; + +#define TYPEDEF_BITVECTOR(bitvectTypeName) \ + union bitvectTypeName; \ + typedef union bitvectTypeName bitvectTypeName; \ + +#define IMPL_BITVECTOR(bitvectTypeName, last_val) \ + union bitvectTypeName \ + { \ + NV_BITVECTOR real; \ + NvU64 qword[NV_BITVECTOR_ARRAY_SIZE(last_val)]; \ + struct \ + { \ + char _[last_val]; \ + char asrt[1 - 2 * !(last_val > 0)]; \ + } *last; \ + } + +#define MAKE_BITVECTOR(bitvectTypeName, last_val) \ + TYPEDEF_BITVECTOR(bitvectTypeName) \ + IMPL_BITVECTOR(bitvectTypeName, last_val) + +#define MAKE_ANON_BITVECTOR(last_val) \ + IMPL_BITVECTOR( , last_val) + +#define NV_BITVECTOR_ARRAY_LENGTH(pBitVector) \ + (NV_ARRAY_ELEMENTS(pBitVector->qword)) + +#define bitVectorSizeOf(pBitVector) \ + bitVectorSizeOf_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorClrAll(pBitVector) \ + bitVectorClrAll_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorClr(pBitVector, idx) \ + bitVectorClr_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), (idx)) + +#define bitVectorClrRange(pBitVector, range) \ + bitVectorClrRange_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), (range)) + +#define bitVectorSetAll(pBitVector) \ + bitVectorSetAll_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorSet(pBitVector, idx) \ + bitVectorSet_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), (idx)) + +#define bitVectorSetRange(pBitVector, range) \ + bitVectorSetRange_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), (range)) + +#define bitVectorFromArrayU16(pBitVector, pArr, sz) \ + bitVectorFromArrayU16_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), \ + (pArr), \ + (sz)) + +#define bitVectorTestAllSet(pBitVector) \ + bitVectorTestAllSet_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorTestAllCleared(pBitVector) \ + bitVectorTestAllCleared_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorTestEqual(pBitVectorA, pBitVectorB) \ + bitVectorTestEqual_IMPL(&((pBitVectorA)->real), \ + sizeof(((pBitVectorA)->last->_)), \ + &((pBitVectorB)->real), \ + sizeof(((pBitVectorB)->last->_))) + +#define bitVectorTestIsSubset(pBitVectorA, pBitVectorB) \ + bitVectorTestIsSubset_IMPL(&((pBitVectorA)->real), \ + sizeof(((pBitVectorA)->last->_)), \ + &((pBitVectorB)->real), \ + sizeof(((pBitVectorB)->last->_))) + +#define bitVectorTest(pBitVector, idx) \ + bitVectorTest_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), \ + (idx)) + +#define bitVectorAnd(pBitVectorDst, pBitVectorA, pBitVectorB) \ + bitVectorAnd_IMPL(&((pBitVectorDst)->real), \ + sizeof(((pBitVectorDst)->last->_)), \ + &((pBitVectorA)->real), \ + sizeof(((pBitVectorA)->last->_)), \ + &((pBitVectorB)->real), \ + sizeof(((pBitVectorB)->last->_))) + +#define bitVectorOr(pBitVectorDst, pBitVectorA, pBitVectorB) \ + bitVectorOr_IMPL(&((pBitVectorDst)->real), \ + sizeof(((pBitVectorDst)->last->_)), \ + &((pBitVectorA)->real), \ + sizeof(((pBitVectorA)->last->_)), \ + &((pBitVectorB)->real), \ + sizeof(((pBitVectorB)->last->_))) + +#define bitVectorXor(pBitVectorDst, pBitVectorA, pBitVectorB) \ + bitVectorXor_IMPL(&((pBitVectorDst)->real), \ + sizeof(((pBitVectorDst)->last->_)), \ + &((pBitVectorA)->real), \ + sizeof(((pBitVectorA)->last->_)), \ + &((pBitVectorB)->real), \ + sizeof(((pBitVectorB)->last->_))) + +#define bitVectorComplement(pBitVectorDst, pBitVectorSrc) \ + bitVectorComplement_IMPL(&((pBitVectorDst)->real), \ + sizeof(((pBitVectorDst)->last->_)), \ + &((pBitVectorSrc)->real), \ + sizeof(((pBitVectorSrc)->last->_))) + +#define bitVectorCopy(pBitVectorDst, pBitVectorSrc) \ + bitVectorCopy_IMPL(&((pBitVectorDst)->real), \ + sizeof(((pBitVectorDst)->last->_)), \ + &((pBitVectorSrc)->real), \ + sizeof(((pBitVectorSrc)->last->_))) + +#define bitVectorCountTrailingZeros(pBitVector) \ + bitVectorCountTrailingZeros_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorCountLeadingZeros(pBitVector) \ + bitVectorCountLeadingZeros_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorCountSetBits(pBitVector) \ + bitVectorCountSetBits_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorToRaw(pBitVector, pRawMask, rawMaskSize) \ + bitVectorToRaw_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), \ + pRawMask, \ + rawMaskSize) + +#define bitVectorFromRaw(pBitVector, pRawMask, rawMaskSize) \ + bitVectorFromRaw_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), \ + pRawMask, \ + rawMaskSize) + +#define bitVectorGetSlice(pBitVector, range, slice) \ + bitVectorGetSlice_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), \ + range, \ + slice) + +#define bitVectorGetSliceAtOffset(pBitVector, offset, size, slice) \ + bitVectorGetSlice_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), \ + rangeMake(offset, offset + size - 1), \ + slice) + +#define bitVectorLowestNBits(pBitVectorDst, pBitVectorSrc, N) \ + bitVectorLowestNBits_IMPL(&((pBitVectorDst)->real), \ + sizeof(((pBitVectorDst)->last->_)), \ + &((pBitVectorSrc)->real), \ + sizeof(((pBitVectorSrc)->last->_)), \ + N) + +#define FOR_EACH_IN_BITVECTOR(pBitVector, index) \ + { \ + MAKE_ANON_BITVECTOR(sizeof(((pBitVector)->last->_))) localMask; \ + bitVectorCopy(&localMask, (pBitVector)); \ + for ((index) = bitVectorCountTrailingZeros(&localMask); \ + !bitVectorTestAllCleared(&localMask); \ + bitVectorClr(&localMask, (index)), \ + (index) = bitVectorCountTrailingZeros(&localMask)) \ + { + +#define FOR_EACH_IN_BITVECTOR_END() \ + } \ + } + +#define FOR_EACH_IN_BITVECTOR_PAIR(pBitVectorA, indexA, pBitVectorB, indexB) \ + { \ + MAKE_ANON_BITVECTOR(sizeof(((pBitVectorA)->last->_))) localMaskA; \ + bitVectorCopy(&localMaskA, (pBitVectorA)); \ + MAKE_ANON_BITVECTOR(sizeof(((pBitVectorB)->last->_))) localMaskB; \ + bitVectorCopy(&localMaskB, (pBitVectorB)); \ + for ((indexA) = bitVectorCountTrailingZeros(&localMaskA), \ + (indexB) = bitVectorCountTrailingZeros(&localMaskB); \ + !bitVectorTestAllCleared(&localMaskA) && \ + !bitVectorTestAllCleared(&localMaskB); \ + bitVectorClr(&localMaskA, (indexA)), \ + bitVectorClr(&localMaskB, (indexB)), \ + (indexA) = bitVectorCountTrailingZeros(&localMaskA), \ + (indexB) = bitVectorCountTrailingZeros(&localMaskB)) \ + { + +#define FOR_EACH_IN_BITVECTOR_PAIR_END() \ + } \ + } + +/* + * @brief NV_BITVECTOR_INLINE_PRINT prints out a nvbitvector up to + * the first element + * @param[in] stmnt should be the NV_PRINTF statement using + * NV_BITVECTOR_INLINE_FMTX_* defines for places holders + * for the string formatting + * @params[in] b bitvector to be printed + * @params[in] l length of bitvector in bits to print + * must be the same as the NV_BITVECTOR_INLINE_FMTX_* used +*/ +#define NV_BITVECTOR_INLINE_FMTX "0x%llx" + +#define NV_BITVECTOR_INLINE_PRINTF_ARG(b) \ + (b)->qword[0] + +#if defined(DEBUG) || defined(DEVELOP) +/*! + * void bitvectorPrint(pBitvector) + * @brief Dump a human-readable formatted string representing bitvector contents. + * @param[in] pBitvector pointer to a NV_BITVECTOR + * @note Defined as a macro such that the dumps are associated with the correct NVLOG module + */ +#define bitVectorPrint(pBitvector) \ +{ \ + NvU32 rdr; \ + NvU32 *s; \ + NvU32 offset = 0; \ + NvU32 i; \ + NvU32 length; \ + NvU32 lengthInBytes = NV_BITVECTOR_ARRAY_LENGTH(pBitvector) * sizeof(NvU64); \ + length = (lengthInBytes) / 4; \ + \ + rdr = length % 4; \ + s = (NvU32 *)(pBitvector); \ + \ + NV_PRINTF(LEVEL_INFO, "--------------------------------------------------\n"); \ + \ + NV_PRINTF(LEVEL_INFO, " 0x0 0x4 0x8 0xc\n"); \ + \ + for (i = 0; i < (length / 4); i++) \ + { \ + NV_PRINTF(LEVEL_INFO, "%08x %08x %08x %08x %08x\n", offset, \ + MEM_RD32(s + 0), \ + MEM_RD32(s + 1), \ + MEM_RD32(s + 2), \ + MEM_RD32(s + 3)); \ + \ + s += 4; \ + offset += 16; \ + \ + if ((offset % 256) == 0) \ + { \ + NV_PRINTF(LEVEL_INFO, "--------------------------------------------------\n"); \ + } \ + } \ + \ + switch (rdr) \ + { \ + case 1: \ + NV_PRINTF(LEVEL_INFO, "%08x %08x ........ ........ ........\n", offset, \ + MEM_RD32(s + 0)); \ + break; \ + case 2: \ + NV_PRINTF(LEVEL_INFO, "%08x %08x %08x ........ ........\n", offset, \ + MEM_RD32(s + 0), \ + MEM_RD32(s + 1)); \ + break; \ + case 3: \ + NV_PRINTF(LEVEL_INFO, "%08x %08x %08x %08x ........\n", offset, \ + MEM_RD32(s + 0), \ + MEM_RD32(s + 1), \ + MEM_RD32(s + 2)); \ + break; \ + default: \ + break; \ + } \ + \ + NV_PRINTF(LEVEL_INFO, "--------------------------------------------------\n"); \ +} \ + +#else +#define bitVectorPrint(pBitvector) +#endif // defined(DEBUG) + +#define NV_BITVECTOR_PRINT(stmt, pBitvector) \ + stmt; \ + bitVectorPrint((pBitvector)); + +NvU32 +bitVectorSizeOf_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NV_STATUS +bitVectorClrAll_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NV_STATUS +bitVectorClr_IMPL +( + NV_BITVECTOR *pBitVector, + NvU32 bitVectorLast, + NvU32 idx +); + +NV_STATUS +bitVectorClrRange_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NV_RANGE range +); + +NV_STATUS +bitVectorSetAll_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NV_STATUS +bitVectorSet_IMPL +( + NV_BITVECTOR *pBitVector, + NvU32 bitVectorLast, + NvU32 idx +); + +NV_STATUS +bitVectorSetRange_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NV_RANGE range +); + +NV_STATUS +bitVectorInv_IMPL +( + NV_BITVECTOR *pBitVector, + NvU32 bitVectorLast, + NvU32 idx +); + +NV_STATUS +bitVectorInvRange_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NV_RANGE range +); + +NV_STATUS +bitVectorFromArrayU16_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NvU16 *pIndices, + NvU32 indicesSize +); + +NvBool +bitVectorTestAllSet_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NvBool +bitVectorTestAllCleared_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NvBool +bitVectorTestEqual_IMPL +( + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +); + +NvBool +bitVectorTestIsSubset_IMPL +( + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +); + +NvBool +bitVectorTest_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU32 bitVectorLast, + NvU32 idx +); + +NV_STATUS +bitVectorAnd_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +); + +NV_STATUS +bitVectorOr_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +); + +NV_STATUS +bitVectorXor_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +); + +NV_STATUS +bitVectorComplement_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorSrc, + NvU16 bitVectorSrcLast +); + +NV_STATUS +bitVectorCopy_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorSrc, + NvU16 bitVectorSrcLast +); + +NvU32 +bitVectorCountTrailingZeros_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NvU32 +bitVectorCountLeadingZeros_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NvU32 +bitVectorCountSetBits_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NV_STATUS +bitVectorToRaw_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + void *pRawMask, + NvU32 rawMaskize +); + +NV_STATUS +bitVectorFromRaw_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + const void *pRawMask, + NvU32 rawMaskSize +); + +NV_STATUS +bitVectorGetSlice_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NV_RANGE range, + NvU64 *slice +); + +NV_STATUS +bitVectorLowestNBits_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorSrc, + NvU16 bitVectorSrcLast, + NvU16 n +); + +#ifdef __cplusplus +} +#endif +///@} +/// NV_UTILS_BITVECTOR +#endif diff --git a/src/nvidia/inc/libraries/utils/nvmacro.h b/src/nvidia/inc/libraries/utils/nvmacro.h new file mode 100644 index 0000000..86f670a --- /dev/null +++ b/src/nvidia/inc/libraries/utils/nvmacro.h @@ -0,0 +1,251 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /** + * @file + * @brief Standard utility macros for some more advanced CPP operations + */ + +#ifndef _NV_UTILS_MACRO_H_ +#define _NV_UTILS_MACRO_H_ + +/** + * @defgroup NV_UTILS_MACRO Standard utility Macros + * + * @brief Implements commonly used macros for advanced CPP operations + * + * @{ + */ + +/** + * @brief Expands all arguments + */ +#define NV_EXPAND(...) __VA_ARGS__ +/** + * @brief Discards all arguments + */ +#define NV_DISCARD(...) + +/** + * @brief Fully expands the given argument, then stringifies it. + */ +#define NV_STRINGIFY(s) _NV_STRINGIFY(s) +/** + * @brief Fully expands both arguments, then concatenates them. + */ +#define NV_CONCATENATE(a, b) _NV_CONCATENATE(a, b) + +/** + * @brief Returns a number literal corresponding to the number of arguments. + * + * NV_NUM_ARGS(x) expands to 1 + * NV_NUM_ARGS(x,y,z) expands to 3 + * + * @warning Due to differences in standards, it is impossible to make this + * consistently return 0 when called without arguments. Thus, the behavior of + * NV_NUM_ARGS() is undefined, and shouldn't be counted on. + * If you do decide to use it: It usually returns 0, except when -std=c++11. + * + * @note Works for a maximum of 16 arguments + */ +#define NV_NUM_ARGS(...) _NV_NUM_ARGS(unused, ##__VA_ARGS__, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +#define _NV_NUM_ARGS(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, N, ...) N + +/** + * @brief Performs an operation on each of its arguments, except first + * + * @param what - Function or function-like macro that takes exactly one param. + * This will be called for other args: what(arg1), what(arg2), ... + * + * NV_FOREACH_ARG(foo, a, b, c) expands to: foo(a), foo(b), foo(c) + * #define INC(x) (x+1) + * NV_FOREACH_ARG(INC,0,1,2,3,4) expands to: (0+1), (1+1), (2+1), (3+1), (4+1) + * + * @note Works for a maximum of 16 arguments, not counting 'what' param + */ +#define NV_FOREACH_ARG(what, ...) \ + NV_CONCATENATE(_NV_FOREACH_ARG_, NV_NUM_ARGS(__VA_ARGS__)) (what, __VA_ARGS__) + +/** + * @brief Similar NV_FOREACH_ARG, but without comma in the expanded result + * + * @param what - Function or function-like macro that takes exactly one param. + * This will be called for other args: what(arg1) what(arg2) ... + * + * NV_FOREACH_ARG(foo, a, b, c) expands to: foo(a) foo(b) foo(c) + * #define OR(x) | (x) + * #define FLAGS(...) (0 NV_FOREACH_ARG_NOCOMMA(OR, __VA_ARGS__)) + * FLAGS(flag1, flag2, flag3) expands to: 0 | (flag1) | (flag2) | (flag3) + * + * @note Works for a maximum of 16 arguments, not counting 'what' param + */ +#define NV_FOREACH_ARG_NOCOMMA(what, ...) \ + NV_CONCATENATE(_NV_FOREACH_ARG_NOCOMMA_, NV_NUM_ARGS(__VA_ARGS__)) (what, __VA_ARGS__) + + +/** + * @brief Compile time evaluate a condition + * + * - If 'cond' evaluates to 1 at compile time, macro expands to 'then' + * - If 'cond' evaluates to 0 at compile time, macro expands to nothing + * - If 'cond' is undefined or evaluates to anything else, report a build error + */ +#define NV_STATIC_IF(cond, then) \ + NV_EXPAND(NV_CONCATENATE(NV_STATIC_IF_, NV_EXPAND(cond))) (then) + + +/** + * @brief Similar to @ref NV_STATIC_IF except condition is reversed + * + * - If 'cond' evaluates to 0 at compile time, macro expands to 'then' + * - If 'cond' evaluates to 1 at compile time, macro expands to nothing + * - If 'cond' is undefined or evaluates to anything else, report a build error + */ +#define NV_STATIC_IFNOT(cond, then) \ + NV_EXPAND(NV_CONCATENATE(NV_STATIC_IFNOT_, NV_EXPAND(cond))) (then) + + +/** + * @brief Similar to @ref NV_STATIC_IF except with both THEN and ELSE branches + * + * - If 'cond' evaluates to 1 at compile time, macro expands to 'then' + * - If 'cond' evaluates to 0 at compile time, macro expands to 'els' + * - If 'cond' is undefined or evaluates to anything else, report a build error + */ +#define NV_STATIC_IFELSE(cond, then, els) \ + NV_STATIC_IF(NV_EXPAND(cond), then) NV_STATIC_IFNOT(NV_EXPAND(cond), els) + +/// @} + +/// @cond NV_MACROS_IMPLEMENTATION + +#define _NV_STRINGIFY(s) #s +#define _NV_CONCATENATE(a, b) a##b + +#define NV_STATIC_IF_0(then) NV_DISCARD(then) +#define NV_STATIC_IF_1(then) NV_EXPAND(then) + +#define NV_STATIC_IFNOT_0(then) NV_EXPAND(then) +#define NV_STATIC_IFNOT_1(then) NV_DISCARD(then) + +// Iterating over empty list is unsupported. Give a semi-readable error. +#define _NV_FOREACH_ARG_0(X) NV_FOREACH_ERROR_argument_list_emtpy + +#define _NV_FOREACH_ARG_1(X, _1) \ + X(_1) +#define _NV_FOREACH_ARG_2(X, _1, _2) \ + X(_1), X(_2) +#define _NV_FOREACH_ARG_3(X, _1, _2, _3) \ + X(_1), X(_2), X(_3) +#define _NV_FOREACH_ARG_4(X, _1, _2, _3, _4) \ + X(_1), X(_2), X(_3), X(_4) +#define _NV_FOREACH_ARG_5(X, _1, _2, _3, _4, _5) \ + X(_1), X(_2), X(_3), X(_4), X(_5), +#define _NV_FOREACH_ARG_6(X, _1, _2, _3, _4, _5, _6) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6) +#define _NV_FOREACH_ARG_7(X, _1, _2, _3, _4, _5, _6, _7) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7) +#define _NV_FOREACH_ARG_8(X, _1, _2, _3, _4, _5, _6, _7, _8) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8) +#define _NV_FOREACH_ARG_9(X, _1, _2, _3, _4, _5, _6, _7, _8, _9) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9) +#define _NV_FOREACH_ARG_10(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9), X(_10) +#define _NV_FOREACH_ARG_11(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9), X(_10), X(_11) +#define _NV_FOREACH_ARG_12(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9), X(_10), X(_11), X(_12) +#define _NV_FOREACH_ARG_13(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9), X(_10), X(_11), X(_12), X(_13) +#define _NV_FOREACH_ARG_14(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9), X(_10), X(_11), X(_12), X(_13), X(_14) +#define _NV_FOREACH_ARG_15(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9), X(_10), X(_11), X(_12), X(_13), X(_14), X(_15) +#define _NV_FOREACH_ARG_16(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9), X(_10), X(_11), X(_12), X(_13), X(_14), X(_15), X(_16) + +// Iterating over empty list is unsupported. Give a semi-readable error. +#define _NV_FOREACH_ARG_NOCOMMA_0(X) NV_FOREACH_NOCOMMA_ERROR_argument_list_emtpy + +#define _NV_FOREACH_ARG_NOCOMMA_1(X, _1) \ + X(_1) +#define _NV_FOREACH_ARG_NOCOMMA_2(X, _1, _2) \ + X(_1) X(_2) +#define _NV_FOREACH_ARG_NOCOMMA_3(X, _1, _2, _3) \ + X(_1) X(_2) X(_3) +#define _NV_FOREACH_ARG_NOCOMMA_4(X, _1, _2, _3, _4) \ + X(_1) X(_2) X(_3) X(_4) +#define _NV_FOREACH_ARG_NOCOMMA_5(X, _1, _2, _3, _4, _5) \ + X(_1) X(_2) X(_3) X(_4) X(_5) +#define _NV_FOREACH_ARG_NOCOMMA_6(X, _1, _2, _3, _4, _5, _6) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) +#define _NV_FOREACH_ARG_NOCOMMA_7(X, _1, _2, _3, _4, _5, _6, _7) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) +#define _NV_FOREACH_ARG_NOCOMMA_8(X, _1, _2, _3, _4, _5, _6, _7, _8) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) +#define _NV_FOREACH_ARG_NOCOMMA_9(X, _1, _2, _3, _4, _5, _6, _7, _8, _9) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) +#define _NV_FOREACH_ARG_NOCOMMA_10(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) X(_10) +#define _NV_FOREACH_ARG_NOCOMMA_11(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) X(_10) X(_11) +#define _NV_FOREACH_ARG_NOCOMMA_12(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) X(_10) X(_11) X(_12) +#define _NV_FOREACH_ARG_NOCOMMA_13(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) X(_10) X(_11) X(_12) X(_13) +#define _NV_FOREACH_ARG_NOCOMMA_14(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) X(_10) X(_11) X(_12) X(_13) X(_14) +#define _NV_FOREACH_ARG_NOCOMMA_15(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) X(_10) X(_11) X(_12) X(_13) X(_14) X(_15) +#define _NV_FOREACH_ARG_NOCOMMA_16(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) X(_10) X(_11) X(_12) X(_13) X(_14) X(_15) X(_16) + +/// @endcond + +/// @cond NV_MACROS_COMPILE_TIME_TESTS +// +// What follows are a couple of compile time smoke tests that will let us know +// if the given compiler does not properly implement these macros. +// These are disabled by default in the interest of compile time. +// +#if defined(NVMACRO_DO_COMPILETIME_TESTS) +#if NV_NUM_ARGS(a) != 1 +#error "[NvMacros CT Test] NV_NUM_ARGS fails when given 1 args" +#endif +#if NV_NUM_ARGS(a,b,c,d) != 4 +#error "[NvMacros CT Test] NV_NUM_ARGS fails when given 4 args" +#endif +#if NV_NUM_ARGS(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) != 16 +#error "[NvMacros CT Test] NV_NUM_ARGS fails when given 16 args" +#endif + +#define _NVMACRO_ADD_TYPE(x) int x +extern void _nvmacro_compiletime_test_func_proto1(NV_FOREACH_ARG(_NVMACRO_ADD_TYPE, aa, bb, cc)); + +#define _NVMACRO_ADD_TYPES(...) NV_FOREACH_ARG(_NVMACRO_ADD_TYPE, __VA_ARGS__) +extern void _nvmacro_compiletime_test_func_proto2(_NVMACRO_ADD_TYPES(a, b, c)); + +#endif // NVMACRO_DO_COMPILETIME_TESTS +/// @endcond + +#endif // _NV_UTILS_MACRO_H_ diff --git a/src/nvidia/inc/libraries/utils/nvprintf.h b/src/nvidia/inc/libraries/utils/nvprintf.h new file mode 100644 index 0000000..72303c5 --- /dev/null +++ b/src/nvidia/inc/libraries/utils/nvprintf.h @@ -0,0 +1,393 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * Standard printf logging interface + */ + +#ifndef NVPRINTF_H +#define NVPRINTF_H + +#include "utils/nvprintf_level.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(GSP_PLUGIN_BUILD) || (defined(NVRM) && NVOS_IS_LIBOS) + +/** + * GSPRM uses a different system for logging. + * The format strings, filename, line number, etc. are stored in a separate + * data segment that is not loaded on the GSP, but is available to the decoder. + */ + +#include "libos_log.h" + +/** + * Define NV_PRINTF_LEVEL to the minimum level for debug output. This is compared + * to the level for each NV_PRINT to cull them at compile time. + */ +#define NV_PRINTF_LEVEL LEVEL_NOTICE + +#undef NV_PRINTF_ENABLED +#define NV_PRINTF_ENABLED 1 + +#undef NV_PRINTF_STRINGS_ALLOWED +#define NV_PRINTF_STRINGS_ALLOWED 0 + +#define NV_PRINTF_STRING_SECTION LIBOS_SECTION_LOGGING + +#define MAKE_NV_PRINTF_STR MAKE_LIBOS_LOGGING_STR + +// NVLOG is not used on GSP-RM. +#undef NVLOG_ENABLED +#define NVLOG_ENABLED 0 + +// Direct dmesg printing through NV_PRINTF_STRING is a no-op on GSP-RM +#define NV_PRINTF_STRING(module, level, format, ...) + +#define LIBOS_LOG_ENTRY LibosLogEntry + +#define NV_PRINTF(level, format, ...) do { \ + if (NV_PRINTF_LEVEL_ENABLED(level)) \ + { \ + LIBOS_LOG_INTERNAL(LIBOS_LOG_ENTRY, level, \ + format, ##__VA_ARGS__); \ + } \ +} while (0) + +#define NV_PRINTF_EX(module, level, format, ...) do { \ + if (NV_PRINTF_LEVEL_ENABLED(level)) \ + { \ + LIBOS_LOG_INTERNAL(LIBOS_LOG_ENTRY, level, \ + format, ##__VA_ARGS__); \ + } \ +} while (0) + +#define NV_LOG_SPECIAL(level, special, ...) do { \ + if (NV_PRINTF_LEVEL_ENABLED(level)) \ + { \ + LIBOS_LOG_INTERNAL_SPECIAL(LIBOS_LOG_ENTRY, level, \ + special, ##__VA_ARGS__); \ + } \ +} while (0) + +#else // defined(NVRM) && NVOS_IS_LIBOS + +/** + * @defgroup NV_PRINTF Utility Printing Macros + * + * @brief Provides a light abstraction layer for printf logging. + * + * NvPort and NvLog are used for portability and logging primitives. + * If an environment cannot use these directly then it can override + * the NV_PORT_HEADER and NV_LOG_HEADER defines in its makefile + * to point to appropriate replacements. + * @{ + */ + +#if defined(NVRM) && !defined(NVWATCH) +#undef NV_PRINTF_PREFIX +#define NV_PRINTF_PREFIX "NVRM" +#undef NV_PRINTF_PREFIX_SEPARATOR +#define NV_PRINTF_PREFIX_SEPARATOR ": " +#endif + +#ifndef NV_PRINTF_PREFIX +/** + * @brief Prefix to prepend to all messages printed by @ref NV_PRINTF. + */ +#define NV_PRINTF_PREFIX "" +#endif + +#ifndef NV_PRINTF_PREFIX_SEPARATOR +/** + * @brief Separator between prefix messages printed by @ref NV_PRINTF. + * + * If defined, it must be a single character followed by an optional space. + */ +#define NV_PRINTF_PREFIX_SEPARATOR "" +#endif + +#ifndef NV_PRINTF_ADD_PREFIX +/** + * @brief Apply the full prefix string to a format string. + * + * This is a function-like macro so it can support inserting arguments after the + * format string. Example: + * #define NV_PRINTF_ADD_PREFIX(fmt) "%s():"fmt, __FUNCTION__ + */ +#define NV_PRINTF_ADD_PREFIX(fmt) NV_PRINTF_PREFIX NV_PRINTF_PREFIX_SEPARATOR fmt +#endif + +// Include portability header, falling back to NvPort if not provided. +#ifndef NV_PORT_HEADER +#define NV_PORT_HEADER "nvport/nvport.h" +#endif +#include NV_PORT_HEADER + +// Include logging header, falling back to NvLog if not provided. +#ifndef NV_LOG_HEADER +#define NV_LOG_HEADER "nvlog/nvlog_printf.h" +#endif +#include NV_LOG_HEADER + +#define NV_PRINTF_STRING_SECTION + +#define MAKE_NV_PRINTF_STR(str) str + +/** + * @def NV_PRINTF(level, format, args...) + * @brief Standard formatted printing/logging interface. + * + * @param level - Debug level to print at. One of @ref NV_PRINTF_LEVELS + * @param format - A standard printf format string. Must be a string literal. + * @param args... - Arguments for the format string literal, like regular printf + * + * The logging header can redefine the behavior, but the basic implementation + * will just print to standard output, like the printf function. + * + * This will print to the @ref NV_PRINTF_MODULE module. If the module is not + * defined, it will default to GLOBAL. Use @ref NV_PRINTF_EX to specify another + * module. + * + * This will prefix the prints with @ref NV_PRINTF_PREFIX string and function + * name. To specify a different (or no) prefix, use @ref NV_PRINTF_EX + * + * @note The format string must be a string literal. The level can be a variable, + * but it may have positive speed and size effects to use the above levels + * directly. + */ +#ifndef NV_PRINTF +#define NV_PRINTF(level, format, ...) \ + NV_PRINTF_EX(NV_PRINTF_MODULE, level, NV_PRINTF_ADD_PREFIX(format), ##__VA_ARGS__) +#endif + +/** + * @def NV_PRINTF_EX(module, level, format, args...) + * @brief Extended version of the standard @ref NV_PRINTF + * + * This interface allows you to explicitly specify the module to print to and + * doesn't perform any automatic prefixing. + * + * The logging header can redefine the behavior, but the basic implementation + * will just print to standard output, like the printf function. + * + * @note The format string must be a string literal. The level can be a variable, + * but it may have positive speed and size effects to use the above levels + * directly. + */ +#ifndef NV_PRINTF_EX +#define NV_PRINTF_EX(module, level, format, ...) \ + do \ + { \ + NVLOG_PRINTF(module, NVLOG_ROUTE_RM, level, format, ##__VA_ARGS__); \ + NV_PRINTF_STRING(module, level, format, ##__VA_ARGS__); \ + } while (0) +#endif + +/** + * @def NV_PRINTF_STRINGS_ALLOWED + * @brief This switch controls whether strings are allowed to appear in the + * final binary. + * + * By default, strings are allowed on DEBUG and QA builds, and all MODS builds + */ +#ifndef NV_PRINTF_STRINGS_ALLOWED +#if defined(DEBUG) || defined(NV_MODS) || defined(QA_BUILD) +#define NV_PRINTF_STRINGS_ALLOWED 1 +#else +#define NV_PRINTF_STRINGS_ALLOWED 0 +#endif +#endif // NV_PRINTF_STRINGS_ALLOWED + +// +// Default values for the compile time switches: +// - Strings are allowed on DEBUG and QA builds, and all MODS builds +// - NV_PRINTF is only available if strings are allowed +// - All levels are available if NV_PRINTF is available. + +// +// Special handling for RM internal prints so we have equivalent functionality +// between NV_PRINTF and DBG_PRINTF. This is needed to seamlessly migrate RM to +// the new interface. The implementations will eventually be fully extracted and +// only depend on other common code, such as NvPort. +// +#if defined(NVRM) && !defined(NVWATCH) + +#if NV_PRINTF_STRINGS_ALLOWED + +// Declare internal RM print function: +// This is utDbg_Printf in unit tests and nvDbg_Printf in regular RM builds +#if defined(RM_UNITTEST) +#define NVRM_PRINTF_FUNCTION utDbg_Printf +#else +#define NVRM_PRINTF_FUNCTION nvDbg_Printf +#endif // defined(RM_UNITTEST) + +void NVRM_PRINTF_FUNCTION(const char *file, + int line, + const char *function, + int debuglevel, + const char *s, + ...) NVPORT_CHECK_PRINTF_ARGUMENTS(5, 6); + +#define NV_PRINTF_STRING(module, level, format, ...) \ + NVRM_PRINTF_FUNCTION(NV_FILE_STR, __LINE__, NV_FUNCTION_STR, level, format, ##__VA_ARGS__) + +#endif // NV_PRINTF_STRINGS_ALLOWED + +// RM always has printf enabled +#define NV_PRINTF_ENABLED 1 + +#endif // defined(NVRM) && !defined(NVWATCH) + +// +// Default definitions if none are specified +// + +/** + * @def NV_PRINTF_ENABLED + * @brief This macro evaluates to 1 if NV_PRINTF is available (either as regular + * debug prints or binary logging) + * + * By default, it is available on all builds that allow strings + */ +#ifndef NV_PRINTF_ENABLED +#define NV_PRINTF_ENABLED NV_PRINTF_STRINGS_ALLOWED +#endif + +#if NV_PRINTF_STRINGS_ALLOWED +#define NV_PRINTF_LEVEL LEVEL_INFO +#else +#define NV_PRINTF_LEVEL LEVEL_NOTICE +#endif + +/** + * @def NV_PRINTF_STRING(module, level, format, ...) + * @brief Prints the string to the given output, if strings are allowed. + */ +#ifndef NV_PRINTF_STRING +#if NV_PRINTF_STRINGS_ALLOWED +#define NV_PRINTF_STRING(module, level, format, ...) \ + portDbgPrintf(format, ##__VA_ARGS__) + +#if !defined(portDbgPrintf) && !PORT_IS_FUNC_SUPPORTED(portDbgPrintf) +#error "NV_PORT_HEADER must implement portDbgPrintf()" +#endif + +#else +#define NV_PRINTF_STRING(module, level, format, ...) +#endif +#endif // NV_PRINTF_STRING + +#ifndef NVLOG_PRINTF +#define NVLOG_PRINTF(...) +#endif + +#endif // defined(NVRM) && NVOS_IS_LIBOS + +/** + * @def NV_PRINTF_COND(condition, leveltrue, levelfalse, format, args...) + * @brief NV_PRINTF with conditional print level + * + * Splits NV_PRINTF calls with a print level based on a variable or ternary + * operation, to be handled by preprocessors to remove INFO-level prints + * + * If condition is true, uses leveltrue, else uses levelfalse + */ +#ifndef NV_PRINTF_COND +#define NV_PRINTF_COND(condition, leveltrue, levelfalse, format, ...) \ + do { \ + if (condition) \ + { \ + NV_PRINTF(leveltrue, format, ##__VA_ARGS__); \ + } \ + else \ + { \ + NV_PRINTF(levelfalse, format, ##__VA_ARGS__); \ + } \ + } while (0) +#endif + +// +// NV_FILE and NV_FUNCTION macros are used to wrap the __FILE__ and __FUNCTION__ +// macros, respectively, to enable passing them as parameters on release builds +// without linking the strings into the object files. Instead, this will use +// NV_LOG and other utilities to pass values in a way that the same information +// can be decoded on retail builds. +// +// On non-release builds, the strings are directly referenced and included in +// the builds (just like their normal references in DBG_PRINTF() and +// DBG_BREAKPOINT()). +// +// In MODS builds, we allow all printfs, but don't automatically include the +// __FILE__ or __FUNCTION__ references. +// +#if NV_PRINTF_STRINGS_ALLOWED && (!defined(NV_MODS) || defined(SIM_BUILD) || defined(DEBUG) || defined(DEVELOP) || defined(NV_MODS_INTERNAL)) +#define NV_FILE_STR __FILE__ +#define NV_FILE __FILE__ +#define NV_FILE_FMT "%s" +#define NV_FILE_TYPE const char * +#define NV_FUNCTION_STR __FUNCTION__ +#define NV_FUNCTION __FUNCTION__ +#define NV_FUNCTION_FMT "%s" +#define NV_FUNCTION_TYPE const char * +#else +#ifndef NV_FILE_STR +#define NV_FILE_STR "" +#endif +#ifdef NVLOG_FILEID +# define NV_FILE NVLOG_FILEID +#else +# define NV_FILE 0 +#endif +#define NV_FILE_FMT "" +#define NV_FILE_TYPE NvU32 +// +// A couple caveats on portUtilExGetStackTrace(): +// +// 1. portUtilExGetStackTrace is not supported on all builds. For example, see +// GCC support in util-gcc-clang.h. +// +// 2. portUtilExGetStackTrace(0) will give us the current IP, which is +// current_function()+offset. Commands such as `ln` in windbg can translate the +// IP into func+offset. But sometimes, due to inlining/optimizations, the +// current function at runtime is not the same as at compile time. In the +// inlining example, if a function using NV_FUNCTION is inlined, the pointer +// printed will be calling_function()+offset. +// +//#define NV_FUNCTION portUtilExGetStackTrace(0) +#define NV_FUNCTION_STR "" +#define NV_FUNCTION 0 +#define NV_FUNCTION_FMT "" +#define NV_FUNCTION_TYPE NvUPtr +#endif + +#ifdef __cplusplus +} +#endif //__cplusplus + +/// @} +#endif // NVPRINTF_H diff --git a/src/nvidia/inc/libraries/utils/nvprintf_level.h b/src/nvidia/inc/libraries/utils/nvprintf_level.h new file mode 100644 index 0000000..2d6f275 --- /dev/null +++ b/src/nvidia/inc/libraries/utils/nvprintf_level.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVPRINTF_LEVEL_H +#define NVPRINTF_LEVEL_H + +/// @defgroup NV_PRINTF_LEVELS Printf verbosity levels +/// @{ +/// @brief Prints at this level are discarded +#define LEVEL_SILENT 0x0 +/// @brief Verbose debug logging level (e.g. signaling function entry) +#define LEVEL_INFO 0x1 +/// @brief Standard debug logging level (e.g. Illegal ctrcall call) +#define LEVEL_NOTICE 0x2 +/// @brief Warning logging level (e.g. feature not supported) +#define LEVEL_WARNING 0x3 +/// @brief Error logging level (e.g. resource allocation failed) +#define LEVEL_ERROR 0x4 +/// @brief Recoverable HW error (e.g. RC events) +#define LEVEL_HW_ERROR 0x5 +/// @brief Unrecoverable error (e.g. Bus timeout) +#define LEVEL_FATAL 0x6 +/// @} + +#define NV_LEVEL_MAX LEVEL_FATAL + +/** + * @def NV_PRINTF_LEVEL_ENABLED(level) + * @brief This macro evaluates to 1 if prints of a given level will be compiled. + * + * By default, it is available on all builds that allow strings + */ +#ifndef NV_PRINTF_LEVEL_ENABLED +#define NV_PRINTF_LEVEL_ENABLED(level) ((level) >= NV_PRINTF_LEVEL) +#endif + +// Values for NV_LOG_SPECIAL to specify how log entries are encoded. +#define RM_GSP_LOG_SPECIAL_NONE 0x0 +#define RM_GSP_LOG_SPECIAL_ASSERT_FAILED 0x1 +#define RM_GSP_LOG_SPECIAL_ASSERT_OK_FAILED 0x2 +#define RM_GSP_LOG_SPECIAL_CHECK_FAILED 0x3 +#define RM_GSP_LOG_SPECIAL_CHECK_OK_FAILED 0x4 + +#endif // NVPRINTF_LEVEL_H diff --git a/src/nvidia/inc/libraries/utils/nvrange.h b/src/nvidia/inc/libraries/utils/nvrange.h new file mode 100644 index 0000000..1bc3642 --- /dev/null +++ b/src/nvidia/inc/libraries/utils/nvrange.h @@ -0,0 +1,378 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_UTILS_NV_RANGE_H_ +#define _NV_UTILS_NV_RANGE_H_ + +#include "nvtypes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \anchor NV_RANGE_1 + * @defgroup NV_RANGE NV_RANGE + * + * @brief Range is a sequence of unsigned 64 bit integers, represented by its + * lower and upper bounds, inclusive. + * + * @details Range is a general purpose data structure utility. + * It consist of two fields, lower and upper bound. + * It is assumed that both lower and upper bounds are \b inclusive. + * Range with lower bound greater than the upper bound is considered to + * be an empty range. + * + * @note If a range is declared like Range r = {0x0, 0x2} it consist + * of elements 0x0, 0x1 and 0x2 , i.e. Range = [lo, hi] ! + * + * > There are 4 possible options + * > -# (lo, hi) lo+1 .. hi-1 + * > -# [lo, hi) lo .. hi-1 + * > -# (lo, hi] lo+1 .. hi + * > -# [lo, hi] lo .. hi + * + * Notice that only option 4 is capable of describing a full range. + * Full range would be 0x0..NvU64_MAX, where + * NvU64_MAX = 0xFFFFFFFFFFFFFFFF. + * + * @{ + */ +typedef struct NV_RANGE NV_RANGE; +struct NV_RANGE +{ + /** Lower bound of the range, where range includes the lower bound.*/ + NvU64 lo; + /** Upper bound of the range, where range includes the upper bound.*/ + NvU64 hi; +}; + +static const NV_RANGE NV_RANGE_EMPTY = {1, 0}; + +/** + * @brief Checks if range is empty, i.e. range.lo > range.hi + * + * @returns NV_TRUE if range is empty, NV_FALSE otherwise. + */ +static NV_INLINE NvBool rangeIsEmpty(NV_RANGE range) +{ + return range.lo > range.hi; +} + +/** + * @brief Calculate range length in bytes. + * + * @warning If the range is max, i.e. from 0 to NvU64_MAX, calling this + * function would result in overflow since range length is calculated + * as hi-lo+1. + * + * @par Example: + * @snippet nv_range-test.cpp rangeLengthExample + */ +static NV_INLINE NvU64 rangeLength(NV_RANGE range) +{ + return rangeIsEmpty(range) ? 0 : range.hi - range.lo + 1; +} + +/** + * @brief Creates a range + * + * @details This is useful since on some compilers the following code won't + * work: `rangeLength({0, 100})`. + * However, `rangeLength(rangeMake(0, 100))` will always work. + * + * @Returns Range of elements from and including \a lo to and + * including \a hi, i.e. [lo, hi] + */ +static NV_INLINE NV_RANGE rangeMake(NvU64 lo, NvU64 hi) +{ + NV_RANGE rng = {lo, hi}; + return rng; +} + +/** + * @brief Check if the two given ranges are equal. + */ +static NV_INLINE NvBool rangeEquals(NV_RANGE range1, NV_RANGE range2) +{ + if (rangeIsEmpty(range1) && rangeIsEmpty(range2)) + { + return NV_TRUE; + } + + return (range1.lo == range2.lo) && (range1.hi == range2.hi); +} + +/** + * @brief Check if \a range1 contains \a range2. + * + * @param[in] range1 Container. + * @param[in] range2 Containee. + * + * @par Example: + * @snippet nv_range-test.cpp rangeContainsExample + */ +static NV_INLINE NvBool rangeContains(NV_RANGE range1, NV_RANGE range2) +{ + return (range1.lo <= range2.lo) &&(range1.hi >= range2.hi); +} + +/** + * @brief Checks if intersection of two ranges is not an empty range. + * + * @par Example: + * @snippet nv_range-test.cpp rangeOverlapExample + */ +static NV_INLINE NvBool rangeOverlaps(NV_RANGE range1, NV_RANGE range2) +{ + return (range1.lo <= range2.lo && range2.lo <= range1.hi) + || + (range1.lo <= range2.hi && range2.hi <= range1.hi) + || + (range2.lo <= range1.lo && range1.lo <= range2.hi) + || + (range2.lo <= range1.hi && range1.hi <= range2.hi); +} + +/** + * @brief Returns a range representing an intersection between two given ranges. + * + * @par Example: + * @snippet nv_range-test.cpp rangeOverlapExample + */ +static NV_INLINE NV_RANGE rangeIntersection(NV_RANGE range1, NV_RANGE range2) +{ + NV_RANGE intersect; + + if (rangeIsEmpty(range1) || rangeIsEmpty(range2)) + { + return NV_RANGE_EMPTY; + } + + intersect.lo = range1.lo < range2.lo ? range2.lo : range1.lo; + intersect.hi = range1.hi > range2.hi ? range2.hi : range1.hi; + + return intersect; +} + +/** + * @brief Compares two ranges. + * @returns 0 - \a range1's lower bound is equal to \a range2's lower bound, + * <0 - \a range1's lower bound is less than \a range2's lower bound, + * >0 - \a range2's lower bound is greater than \a range2's lower bound. + * + * @warning If function returns 0 that does not mean that ranges are equal, + * just that their lower bounds are equal! + * + * @par Example: + * @snippet nv_range-test.cpp rangeCompareExample + */ +static NV_INLINE NvS32 rangeCompare(NV_RANGE range1, NV_RANGE range2) +{ + if (rangeIsEmpty(range1) && rangeIsEmpty(range2)) + { + return 0; + } + + return range1.lo >= range2.lo ? (range1.lo == range2.lo ? 0 : 1) : -1; +} + +/** + * @brief Merge two ranges into one. + * + * @returns Merged range. If two ranges have no intersection + * the returned range will be empty. + * + * @note Empty range is range with lo > hi. + * + * @par Example: + * @snippet nv_range-test.cpp rangeMergeExample + */ +static NV_INLINE NV_RANGE rangeMerge(NV_RANGE range1, NV_RANGE range2) +{ + NV_RANGE merged = NV_RANGE_EMPTY; + + if (rangeIsEmpty(range1) || rangeIsEmpty(range2) || !rangeOverlaps(range1, range2)) + { + return merged; + } + + merged.lo = range1.lo; + merged.hi = range1.hi; + + if (range2.lo < merged.lo) + { + merged.lo = range2.lo; + } + if (range2.hi > merged.hi) + { + merged.hi = range2.hi; + } + + return merged; +} + +/** + * @brief Checks if \a range1 borders with \a range2, i.e. \a range1.lo == + * \a range2.hi+1 or \a range2.lo == \a range1.hi+1 + * + * @note [a,b] borders with [b+1,c] where a < b < c + * + */ +static NV_INLINE NvBool rangeBorders(NV_RANGE range1, NV_RANGE range2) +{ + if (rangeIsEmpty(range1) || rangeIsEmpty(range2)) + { + return NV_FALSE; + } + + return (range1.hi + 1 == range2.lo) || (range2.hi + 1 == range1.lo); +} + +/** + * @brief Splits \a pBigRange + * + * @param[in/out] pBigRange Pointer to starting range. + * @param[in] rangeToSplit Range to split the first range over. + * @param[out] pSecondPartAfterSplit Second range after split. + * + * @par Example: + * @snippet nv_range-test.cpp rangeSplitExample + */ +static NV_INLINE NV_STATUS rangeSplit(NV_RANGE *pBigRange, + NV_RANGE rangeToSplit, NV_RANGE *pSecondPartAfterSplit) +{ + if (rangeIsEmpty(*pBigRange) || rangeIsEmpty(rangeToSplit) || + !rangeContains(*pBigRange, rangeToSplit)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pSecondPartAfterSplit->hi = pBigRange->hi; + + // make sure we don't overflow/underflow + if (pBigRange->lo == rangeToSplit.lo) + { + *pBigRange = NV_RANGE_EMPTY; + } + else + { + pBigRange->hi = rangeToSplit.lo - 1; + } + + if (pSecondPartAfterSplit->hi == rangeToSplit.hi) + { + *pSecondPartAfterSplit = NV_RANGE_EMPTY; + } + else + { + pSecondPartAfterSplit->lo = rangeToSplit.hi + 1; + } + + return NV_OK; +} + +/** + * @brief carve out ranges from base ranges + * + * @param[in/out] baseRanges array of the base ranges to be operated on + * @param[in] arraySize the size of the baseRanges array + * @param[in/out] numBaseRanges the number of valid ranges in the array + * @param[in] carveouts array of ranges that need to be carved out + * @param[in] numCarveouts number of the valid carveout entries + * + * @note need to satsify: arraySize >= *numBaseRanges + numCarveouts + */ +static NV_INLINE NV_STATUS rangesCarveout +( + NV_RANGE *baseRanges, + NvU32 arraySize, + NvU32 *numBaseRanges, + NV_RANGE *carveouts, + NvU32 numCarveouts +) +{ + NvU32 i; + NvU32 j; + NvU32 count = *numBaseRanges; + NV_RANGE swap; + + if (count > arraySize) + return NV_ERR_INVALID_ARGUMENT; + if (numCarveouts > arraySize - count) + return NV_ERR_INVALID_ARGUMENT; + + for (i = 0; i < numCarveouts; i++) + { + if (rangeIsEmpty(carveouts[i])) + continue; + + for (j = 0; j < count; j++) + { + if (rangeContains(baseRanges[j], carveouts[i])) + { + rangeSplit(&baseRanges[j], carveouts[i], &baseRanges[count]); + count += 1; + break; + } + } + } + + // remove all empty ranges, and sort results by lo + // a rather inefficient in-place sort for its simpilcity, the number + // of ranges is expected to be low + i = 0; + while (i < count) + { + if (rangeIsEmpty(baseRanges[i])) + { + baseRanges[i] = baseRanges[count-1]; + baseRanges[count-1] = NV_RANGE_EMPTY; + count -= 1; + continue; + } + + for (j = i + 1; j < count; j++) + { + if (!rangeIsEmpty(baseRanges[j]) && + baseRanges[j].lo < baseRanges[i].lo) + { + swap = baseRanges[i]; + baseRanges[i] = baseRanges[j]; + baseRanges[j] = swap; + } + } + + i++; + } + + *numBaseRanges = count; + + return NV_OK; +} + +#ifdef __cplusplus +} +#endif +///@} +/// NV_UTILS_RANGE +#endif diff --git a/src/nvidia/inc/os/dce_rm_client_ipc.h b/src/nvidia/inc/os/dce_rm_client_ipc.h new file mode 100644 index 0000000..9b1b5d0 --- /dev/null +++ b/src/nvidia/inc/os/dce_rm_client_ipc.h @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _OS_DCE_CLIENT_IPC_H_ +#define _OS_DCE_CLIENT_IPC_H_ + +// RM IPC Client Types + +#define DCE_CLIENT_RM_IPC_TYPE_SYNC 0x0 +#define DCE_CLIENT_RM_IPC_TYPE_EVENT 0x1 +#define DCE_CLIENT_RM_IPC_TYPE_MAX 0x2 + +void dceclientHandleAsyncRpcCallback(NvU32 handle, NvU32 interfaceType, + NvU32 msgLength, void *data, + void *usrCtx); +#endif diff --git a/src/nvidia/interface/acpidsmguids.h b/src/nvidia/interface/acpidsmguids.h new file mode 100644 index 0000000..38b867a --- /dev/null +++ b/src/nvidia/interface/acpidsmguids.h @@ -0,0 +1,90 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2000-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef ACPIDSMGUIDS_H +#define ACPIDSMGUIDS_H + +#include "nvcd.h" + +// +// These guids are used in linux. +// The same guid values are defined in a windows way in guids.c +// These revision_ids are also defined in platform/nbsi/nbsi_read.h, nvmxm.h, nvhybridacpi.h, nbci.h. +// Those files should be changed and these used. +// +#define NBSI_DSM_GUID_STR { 0xA6, 0x69, 0x86, 0x99, 0xE9, 0x8B, 0xFB, 0x49, \ + 0xBD, 0xDB, 0x51, 0xA1, 0xEF, 0xE1, 0x9C, 0x3D } +#define NBSI_REVISION_ID 0x00000101 + +#define MXM_DSM_GUID_STR { 0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C, \ + 0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65} +#define ACPI_MXM_REVISION_ID 0x00000300 + +#define NVHG_DSM_GUID_STR { 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4d, \ + 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4} +#define NVHG_REVISION_ID 0x00000102 + +#define NBCI_DSM_GUID_STR { 0x75, 0x0B, 0xA5, 0xD4, 0xC7, 0x65, 0xF7, 0x46, \ + 0xBF, 0xB7, 0x41, 0x51, 0x4C, 0xEA, 0x02, 0x44} +#define NBCI_REVISION_ID 0x00000102 + +#define NVOP_DSM_GUID_STR { 0xF8, 0xD8, 0x86, 0xA4, 0xDA, 0x0B, 0x1B, 0x47, \ + 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0} +#define NVOP_REVISION_ID 0x00000100 + +#define PCFG_DSM_GUID_STR { 0x7D, 0x14, 0xC6, 0x81, 0x5F, 0x73, 0xD9, 0x42, \ + 0x9E, 0x41, 0xB0, 0x02, 0xCB, 0xC6, 0x57, 0x1D} +#define PCFG_REVISION_ID 0x00000100 + +#define GPS_2X_REVISION_ID 0x00000200 + +#define GPS_DSM_GUID_STR { 0x01, 0x2D, 0x13, 0xA3, 0xDA, 0x8C, 0xBA, 0x49, \ + 0xA5, 0x2E, 0xBC, 0x9D, 0x46, 0xDF, 0x6B, 0x81}, +#define GPS_REVISION_ID 0x00000100 + +#define JT_DSM_GUID_STR { 0x51, 0xA3, 0xEC, 0xCB, 0x7B, 0x06, 0x24, 0x49, \ + 0x9C, 0xBD, 0xB4, 0x6B, 0x00, 0xB8, 0x6F, 0x34} +#define JT_REVISION_ID 0x00000103 + +// PEX_DSM_GUID {E5C937D0-3553-4D7A-9117-EA4D19C3434D} +#define PEX_DSM_GUID_STR { 0xD0, 0x37, 0xC9, 0xE5, 0x53, 0x35, 0x7A, 0x4D, \ + 0x91, 0x17, 0xEA, 0x4D, 0x19, 0xC3, 0x43, 0x4D} +#define PEX_REVISION_ID 0x00000002 + +#define NVPCF_ACPI_DSM_REVISION_ID 0x00000100 + +#define NVPCF_2X_ACPI_DSM_REVISION_ID 0x00000200 + +extern const GUID NBCI_DSM_GUID; +extern const GUID NBSI_DSM_GUID; +extern const GUID NVHG_DSM_GUID; +extern const GUID NVOP_DSM_GUID; +extern const GUID SPB_DSM_GUID; +extern const GUID DSM_MXM_GUID; +extern const GUID PCFG_DSM_GUID; +extern const GUID GPS_DSM_GUID; +extern const GUID PEX_DSM_GUID; +extern const GUID JT_DSM_GUID; +extern const GUID NVPCF_ACPI_DSM_GUID; + +#endif // ACPIDSMGUIDS_H diff --git a/src/nvidia/interface/acpigenfuncs.h b/src/nvidia/interface/acpigenfuncs.h new file mode 100644 index 0000000..0e98fc9 --- /dev/null +++ b/src/nvidia/interface/acpigenfuncs.h @@ -0,0 +1,83 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvacpitypes.h" + +#ifndef _ACPIGENFUNCS_H_ +#define _ACPIGENFUNCS_H_ + +#define NV_ACPI_DSM_READ_SIZE (4*1024) + +#define NV_ACPI_GENERIC_FUNC_START 0x0200 +#define NV_ACPI_GENERIC_FUNC_COUNT 8 + +// Only use these when using the generic function ACPI_DSM_FUNCTION_CURRENT. +#define NV_ACPI_GENERIC_FUNC_DISPLAYSTATUS (NV_ACPI_GENERIC_FUNC_START+0x00) // Get Display & Hot-Key information +#define NV_ACPI_GENERIC_FUNC_MDTL (NV_ACPI_GENERIC_FUNC_START+0x01) // Display Toggle List +#define NV_ACPI_GENERIC_FUNC_GETOBJBYTYPE (NV_ACPI_GENERIC_FUNC_START+0x02) // Get the firmware object +#define NV_ACPI_GENERIC_FUNC_GETALLOBJS (NV_ACPI_GENERIC_FUNC_START+0x03) // Get the directory and all objects +#define NV_ACPI_GENERIC_FUNC_GETEVENTLIST (NV_ACPI_GENERIC_FUNC_START+0x04) // Get the List of required Event Notifiers and their meaning +#define NV_ACPI_GENERIC_FUNC_CALLBACKS (NV_ACPI_GENERIC_FUNC_START+0x05) // Get the list of system-required callbacks +#define NV_ACPI_GENERIC_FUNC_GETBACKLIGHT (NV_ACPI_GENERIC_FUNC_START+0x06) // Get the Backlight +#define NV_ACPI_GENERIC_FUNC_MSTL (NV_ACPI_GENERIC_FUNC_START+0x07) // Get Multiple Stream Topology Toggle info + +// structure used for NV_ACPI_GENERIC_FUNC_CTL_TESTSUBFUNCENABLED and NV_ACPI_GENERIC_FUNC_CTL_REMAPFUNC calls. +typedef struct +{ + ACPI_DSM_FUNCTION acpiDsmFunction; + NvU32 acpiDsmSubFunction; + NvU32 status; +} DSMTESTCTL, *PDSMTESTCTL; + +// when adding new generic functions, change NV_ACPI_GENERIC_FUNC_LAST_SUBFUNCTION to last entry. +#define NV_ACPI_GENERIC_FUNC_LAST_SUBFUNCTION (NV_ACPI_GENERIC_FUNC_MSTL) +ct_assert(NV_ACPI_GENERIC_FUNC_COUNT == ((NV_ACPI_GENERIC_FUNC_LAST_SUBFUNCTION-NV_ACPI_GENERIC_FUNC_START)+1)); + +// These are not DSM functions, but used by clients (such as DD) to choose special ctrl0073 processing related to DSM. +#define NV_ACPI_GENERIC_FUNC_CTL_START 0x0600 +#define NV_ACPI_GENERIC_FUNC_CTL_TESTSUBFUNCENABLED (NV_ACPI_GENERIC_FUNC_CTL_START+0x00) // exec testIfDsmSubFunctionEnabled +#define NV_ACPI_GENERIC_FUNC_CTL_REMAPFUNC (NV_ACPI_GENERIC_FUNC_CTL_START+0x01) // exec remapDsmFunctionAndSubFunction +#define NV_ACPI_GENERIC_FUNC_CTL_GETFUNCSUPPORT (NV_ACPI_GENERIC_FUNC_CTL_START+0x02) // get generic dsm supported functions + // +// when adding new control functions, change NV_ACPI_GENERIC_FUNC_CTL_LAST_SUBFUNCTION to last entry. +#define NV_ACPI_GENERIC_FUNC_CTL_LAST_SUBFUNCTION (NV_ACPI_GENERIC_FUNC_CTL_GETFUNCSUPPORT) +#define NV_ACPI_GENERIC_FUNC_CTL_COUNT ((NV_ACPI_GENERIC_FUNC_CTL_LAST_SUBFUNCTION-NV_ACPI_GENERIC_FUNC_CTL_START)+1) + +#define IS_GENERIC_DSM_FUNC_SUPPORTED(package, subfunc) (((package >> (subfunc-NV_ACPI_GENERIC_FUNC_START)) & NVBIT(0)) ? true : false) + +// status for dsm functions. +#define DSM_FUNC_STATUS_UNKNOWN 0 // untried +#define DSM_FUNC_STATUS_FAILED 1 // tried but failed +#define DSM_FUNC_STATUS_SUCCESS 2 // tried and successful +#define DSM_FUNC_STATUS_DISABLED 3 // disabled via regkey +#define DSM_FUNC_STATUS_OVERRIDE 4 // regkey or code hack override + +// +// common NV definitions used in ACPI dsm calls in particular. +// +#define NV_ACPI_ALL_FUNC_SUPPORT 0x00000000 // Common is supported subfunction. +#define NV_ACPI_ALL_FUNC_SUPPORTED NVBIT(NV_ACPI_ALL_FUNC_SUPPORT) // is common Function supported? +#define NV_ACPI_ALL_SUBFUNC_UNKNOWN 0xFFFFFFFF // Common define for unknown ACPI sub-function + +#endif // _ACPIGENFUNCS_H_ + diff --git a/src/nvidia/interface/deprecated/rmapi_deprecated.h b/src/nvidia/interface/deprecated/rmapi_deprecated.h new file mode 100644 index 0000000..57ab6b2 --- /dev/null +++ b/src/nvidia/interface/deprecated/rmapi_deprecated.h @@ -0,0 +1,121 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _RMAPI_DEPRECATED_H_ +#define _RMAPI_DEPRECATED_H_ + +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvmisc.h" +#include "nvos.h" +#include "nvsecurityinfo.h" +// +// This file provides implementations for deprecated RM API by building on the +// modern APIs. The implementations support running in either +// user-mode or kernel-mode context and should have no dependencies on RM +// internals. +// + +/*! + * GSS legacy command masks + */ +#define RM_GSS_LEGACY_MASK 0x00008000 +#define RM_GSS_LEGACY_MASK_NON_PRIVILEGED 0x00008000 +#define RM_GSS_LEGACY_MASK_PRIVILEGED 0x0000C000 + + +typedef enum +{ + RMAPI_DEPRECATED_COPYIN, + RMAPI_DEPRECATED_COPYOUT, + RMAPI_DEPRECATED_COPYRELEASE, +} RMAPI_DEPRECATED_COPY_OP; + +typedef enum +{ + RMAPI_DEPRECATED_BUFFER_EMPLACE, // Use buffer passed into CopyUser + RMAPI_DEPRECATED_BUFFER_ALLOCATE // Allocate a new buffer in CopyUser +} RMAPI_DEPRECATED_BUFFER_POLICY; + +/** + * Fields are populated by the deprecated RM API caller. RmAlloc, RmControl, and + * RmFree should be routed to RM. pExtendedContext can hold any domain specific + * state needed by the RmAlloc/etc implementations. AllocMem/FreeMem are routed + * to OS layers for allocation/free-up of system memory. + */ +typedef struct _DEPRECATED_CONTEXT +{ + NV_STATUS (*RmAlloc)(struct _DEPRECATED_CONTEXT *pContext, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvU32 hClass, void *pAllocParams, NvU32 paramsSize); + + NV_STATUS (*RmControl)(struct _DEPRECATED_CONTEXT *pContext, NvHandle hClient, NvHandle hObject, + NvU32 cmd, void *pParams, NvU32 paramsSize); + + NV_STATUS (*RmFree)(struct _DEPRECATED_CONTEXT *pContext, NvHandle hClient, NvHandle hObject); + + NV_STATUS (*RmMapMemory)(struct _DEPRECATED_CONTEXT *pContext, NvHandle hClient, NvHandle hDevice, + NvHandle hMemory, NvU64 offset, NvU64 length, NvP64 *ppCpuVirtAddr, NvU32 flags); + + // Copies data in/out of user-mode address space. + NV_STATUS (*CopyUser)(struct _DEPRECATED_CONTEXT *pContext, RMAPI_DEPRECATED_COPY_OP op, + RMAPI_DEPRECATED_BUFFER_POLICY bufPolicy, NvP64 dataPtr, + NvU32 dataSize, void **ppKernelPtr); + void * (*AllocMem)(NvU32 length); + void (*FreeMem)(void *pAddress); + void *pExtendedContext; +} DEPRECATED_CONTEXT; + +/** + * List of deprecated APIs supported by this library + */ +void RmDeprecatedAllocObject(DEPRECATED_CONTEXT *pContext, NVOS05_PARAMETERS *pArgs); +void RmDeprecatedAddVblankCallback(DEPRECATED_CONTEXT *pContext, NVOS61_PARAMETERS *pArgs); +void RmDeprecatedAllocContextDma(DEPRECATED_CONTEXT *pContext, NVOS39_PARAMETERS *pArgs); +void RmDeprecatedBindContextDma(DEPRECATED_CONTEXT *pContext, NVOS49_PARAMETERS *pArgs); +void RmDeprecatedI2CAccess(DEPRECATED_CONTEXT *pContext, NVOS_I2C_ACCESS_PARAMS *pArgs); +void RmDeprecatedIdleChannels(DEPRECATED_CONTEXT *pContext, NVOS30_PARAMETERS *pArgs); +void RmDeprecatedVidHeapControl(DEPRECATED_CONTEXT *pContext, NVOS32_PARAMETERS *pArgs); +void RmDeprecatedAllocMemory(DEPRECATED_CONTEXT *pContext, NVOS02_PARAMETERS *pArgs); + + +/** + * List of utility functions (used within shims) + */ +typedef NV_STATUS (*RmDeprecatedControlHandler)(API_SECURITY_INFO*,DEPRECATED_CONTEXT*,NVOS54_PARAMETERS*); +RmDeprecatedControlHandler RmDeprecatedGetControlHandler(NVOS54_PARAMETERS *pArgs); +NvBool IsGssLegacyCall(NvU32 cmd); + +NV_STATUS RmDeprecatedGetHandleParent(DEPRECATED_CONTEXT *pContext, NvHandle hClient, + NvHandle hObject, NvHandle *phParent); +NV_STATUS RmDeprecatedGetClassID(DEPRECATED_CONTEXT *pContext, NvHandle hClient, + NvHandle hObject, NvU32 *pClassId); +NV_STATUS RmDeprecatedFindOrCreateSubDeviceHandle(DEPRECATED_CONTEXT *pContext, NvHandle hClient, + NvHandle hDeviceOrSubDevice, NvHandle *pHSubDevice, + NvBool *pBMustFree); +NV_STATUS RmDeprecatedConvertOs32ToOs02Flags(NvU32 attr, NvU32 attr2, NvU32 os32Flags, NvU32 *pOs02Flags); +NV_STATUS RmDeprecatedConvertOs02ToOs32Flags(NvU32 os02Flags, NvU32 *pAttr, NvU32 *pAttr2, NvU32 *pOs32Flags); + +NV_STATUS RmDeprecatedGetOrAllocObject(DEPRECATED_CONTEXT *pContext, NvHandle hClient, NvU32 classId, NvHandle *pHObject); + +NV_STATUS RmCopyUserForDeprecatedApi(RMAPI_DEPRECATED_COPY_OP op,RMAPI_DEPRECATED_BUFFER_POLICY bufPolicy, + NvP64 dataPtr, NvU32 dataSize, void **ppKernelPtr, NvBool bUserModeArgs); +#endif diff --git a/src/nvidia/interface/deprecated/rmapi_deprecated_utils.c b/src/nvidia/interface/deprecated/rmapi_deprecated_utils.c new file mode 100644 index 0000000..0b2f8bb --- /dev/null +++ b/src/nvidia/interface/deprecated/rmapi_deprecated_utils.c @@ -0,0 +1,468 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "deprecated/rmapi_deprecated.h" + +#include "class/cl0080.h" // NV01_DEVICE_0 +#include "class/cl2080.h" // NV20_SUBDEVICE_0 +#include "ctrl/ctrl0000/ctrl0000client.h" // NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE +#include "ctrl/ctrl0080/ctrl0080gpu.h" // NV0080_CTRL_CMD_GPU_FIND_SUBDEVICE_HANDLE +#include "nvos.h" + +#include + +NV_STATUS +RmDeprecatedGetHandleParent +( + DEPRECATED_CONTEXT *pContext, + NvHandle hClient, + NvHandle hObject, + NvHandle *phParent +) +{ + NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS parentParams = {0}; + NV_STATUS status; + + parentParams.hObject = hObject; + parentParams.index = NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO_INDEX_PARENT; + + status = pContext->RmControl(pContext, hClient, hClient, NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO, + &parentParams, sizeof(parentParams)); + + *phParent = parentParams.data.hResult; + + return status; +} + +NV_STATUS +RmDeprecatedGetClassID +( + DEPRECATED_CONTEXT *pContext, + NvHandle hClient, + NvHandle hObject, + NvU32 *pClassId +) +{ + NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS classIdParams = {0}; + NV_STATUS status; + + classIdParams.hObject = hObject; + classIdParams.index = NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO_INDEX_CLASSID; + + status = pContext->RmControl(pContext, hClient, hClient, + NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO, + &classIdParams, + sizeof(classIdParams)); + + *pClassId = NvU64_LO32(classIdParams.data.iResult); + + return status; +} + +NV_STATUS +RmDeprecatedFindOrCreateSubDeviceHandle +( + DEPRECATED_CONTEXT *pContext, + NvHandle hClient, + NvHandle hDeviceOrSubDevice, + NvHandle *pHSubDevice, + NvBool *pBMustFree +) +{ + NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM findParams = {0}; + NV_STATUS status; + NvU32 classId; + + // + // Step 1.) check if we already have a subdevice + // + status = RmDeprecatedGetClassID(pContext, hClient, hDeviceOrSubDevice, &classId); + + if (status != NV_OK) + return status; + + if (classId == NV20_SUBDEVICE_0) + { + *pBMustFree = NV_FALSE; + *pHSubDevice = hDeviceOrSubDevice; + return NV_OK; + } + else if (classId != NV01_DEVICE_0) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // Step 2.) check if there is a subdevice allocated under this device + // + findParams.subDeviceInst = 0; + + status = pContext->RmControl(pContext, hClient, hDeviceOrSubDevice, + NV0080_CTRL_CMD_GPU_FIND_SUBDEVICE_HANDLE, + &findParams, + sizeof(findParams)); + + if (status == NV_OK && findParams.hSubDevice) + { + *pBMustFree = NV_FALSE; + *pHSubDevice = findParams.hSubDevice; + return status; + } + + // + // Step 3.) if there is no device, we temporarily allocate a subdevice. + // Subdevice must be freed before we exit out to allow the client to reserve + // it if it chooses to do so later on. + // + *pBMustFree = NV_TRUE; + + *pHSubDevice = 0; + + status = pContext->RmAlloc(pContext, hClient, hDeviceOrSubDevice, pHSubDevice, NV20_SUBDEVICE_0, NULL, 0); + + return status; +} + +NV_STATUS RmDeprecatedGetOrAllocObject +( + DEPRECATED_CONTEXT *pContext, + NvHandle hClient, + NvU32 classId, + NvHandle *pHObject +) +{ + NV_STATUS status; + + NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS params = {0}; + params.hParent = *pHObject; + params.classId = classId; + status = pContext->RmControl(pContext, hClient, hClient, + NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE, + ¶ms, sizeof(params)); + // Object already exists, just return it + if (status == NV_OK && params.hObject != 0) + { + *pHObject = params.hObject; + } + else + { + // + // Object does not exist yet, allocate. + // TODO: Fill alloc params for classes that need them + // + status = pContext->RmAlloc(pContext, hClient, *pHObject, + pHObject, classId, NULL, 0); + } + return status; +} + +NV_STATUS +RmDeprecatedConvertOs32ToOs02Flags +( + NvU32 attr, + NvU32 attr2, + NvU32 os32Flags, + NvU32 *pOs02Flags +) +{ + NvU32 os02Flags = 0; + NV_STATUS rmStatus = NV_OK; + + switch (DRF_VAL(OS32, _ATTR, _PHYSICALITY, attr)) + { + case NVOS32_ATTR_PHYSICALITY_DEFAULT: // NVOS02 defaults to contiguous. + case NVOS32_ATTR_PHYSICALITY_CONTIGUOUS: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, os02Flags); + break; + } + case NVOS32_ATTR_PHYSICALITY_NONCONTIGUOUS: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _PHYSICALITY, _NONCONTIGUOUS, os02Flags); + break; + } + default: + { + rmStatus = NV_ERR_INVALID_FLAGS; + break; + } + } + + switch (DRF_VAL(OS32, _ATTR, _LOCATION, attr)) + { + case NVOS32_ATTR_LOCATION_PCI: + case NVOS32_ATTR_LOCATION_ANY: // NVOS02 defaults to PCI + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _LOCATION, _PCI, os02Flags); + break; + } + case NVOS32_ATTR_LOCATION_VIDMEM: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _LOCATION, _VIDMEM, os02Flags); + break; + } + default: + { + rmStatus = NV_ERR_INVALID_FLAGS; + break; + } + } + + switch (DRF_VAL(OS32, _ATTR, _COHERENCY, attr)) + { + case NVOS32_ATTR_COHERENCY_UNCACHED: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _COHERENCY, _UNCACHED, os02Flags); + break; + } + case NVOS32_ATTR_COHERENCY_CACHED: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _COHERENCY, _CACHED, os02Flags); + break; + } + case NVOS32_ATTR_COHERENCY_WRITE_COMBINE: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_COMBINE, os02Flags); + break; + } + case NVOS32_ATTR_COHERENCY_WRITE_THROUGH: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_THROUGH, os02Flags); + break; + } + case NVOS32_ATTR_COHERENCY_WRITE_PROTECT: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_PROTECT, os02Flags); + break; + } + case NVOS32_ATTR_COHERENCY_WRITE_BACK: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_BACK, os02Flags); + break; + } + default: + { + rmStatus = NV_ERR_INVALID_FLAGS; + break; + } + } + + switch (DRF_VAL(OS32, _ATTR2, _GPU_CACHEABLE, attr2)) + { + case NVOS32_ATTR2_GPU_CACHEABLE_YES: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _YES, os02Flags); + break; + } + case NVOS32_ATTR2_GPU_CACHEABLE_DEFAULT: // NVOS02 defaults to non-cacheable + case NVOS32_ATTR2_GPU_CACHEABLE_NO: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _NO, os02Flags); + break; + } + default: + { + rmStatus = NV_ERR_INVALID_FLAGS; + break; + } + } + + switch (DRF_VAL(OS32, _ATTR2, _REGISTER_MEMDESC_TO_PHYS_RM, attr2)) + { + case NVOS32_ATTR2_REGISTER_MEMDESC_TO_PHYS_RM_TRUE: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _REGISTER_MEMDESC_TO_PHYS_RM, _TRUE, os02Flags); + break; + } + case NVOS32_ATTR2_REGISTER_MEMDESC_TO_PHYS_RM_FALSE: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _REGISTER_MEMDESC_TO_PHYS_RM, _FALSE, os02Flags); + break; + } + default: + { + rmStatus = NV_ERR_INVALID_FLAGS; + break; + } + } + + // VidHeapControl never creates a mapping + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _MAPPING, _NO_MAP, os02Flags); + if (os32Flags & NVOS32_ALLOC_FLAGS_KERNEL_MAPPING_MAP) + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _KERNEL_MAPPING, _MAP, os02Flags); + else + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _KERNEL_MAPPING, _NO_MAP, os02Flags); + + if (os32Flags & NVOS32_ALLOC_FLAGS_USER_READ_ONLY) + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _ALLOC_USER_READ_ONLY, _YES, os02Flags); + + if (os32Flags & NVOS32_ALLOC_FLAGS_DEVICE_READ_ONLY) + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _ALLOC_DEVICE_READ_ONLY, _YES, os02Flags); + + if (FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_ONLY, attr2)) + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _ALLOC_USER_READ_ONLY, _YES, os02Flags); + + if (FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_DEVICE, _READ_ONLY, attr2)) + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _ALLOC_DEVICE_READ_ONLY, _YES, os02Flags); + + if (FLD_TEST_DRF(OS32, _ATTR2, _NISO_DISPLAY, _YES, attr2)) + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _ALLOC_NISO_DISPLAY, _YES, os02Flags); + else + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _ALLOC_NISO_DISPLAY, _NO, os02Flags); + + if (FLD_TEST_DRF(OS32, _ATTR2, _MEMORY_PROTECTION, _PROTECTED, attr2)) + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _MEMORY_PROTECTION, _PROTECTED, os02Flags); + } + else if (FLD_TEST_DRF(OS32, _ATTR2, _MEMORY_PROTECTION, _UNPROTECTED, attr2)) + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _MEMORY_PROTECTION, _UNPROTECTED, os02Flags); + } + + if (rmStatus == NV_OK) + { + *pOs02Flags = os02Flags; + } + + return rmStatus; +} + +NV_STATUS +RmDeprecatedConvertOs02ToOs32Flags +( + NvU32 os02Flags, + NvU32 *pAttr, + NvU32 *pAttr2, + NvU32 *pOs32Flags +) +{ + NvU32 os32Flags = 0; + NvU32 attr = 0, attr2 = 0; + NV_STATUS rmStatus = NV_OK; + + attr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _DEFAULT, attr); + + if (FLD_TEST_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, os02Flags)) + attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS, attr); + else + attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS, attr); + + switch (DRF_VAL(OS02, _FLAGS, _LOCATION, os02Flags)) + { + case NVOS02_FLAGS_LOCATION_PCI: + { + attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _PCI, attr); + break; + } + case NVOS02_FLAGS_LOCATION_VIDMEM: + { + attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, attr); + break; + } + default: + { + rmStatus = NV_ERR_INVALID_FLAGS; + break; + } + } + + switch (DRF_VAL(OS02, _FLAGS, _COHERENCY, os02Flags)) + { + case NVOS02_FLAGS_COHERENCY_UNCACHED: + { + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _UNCACHED, attr); + break; + } + case NVOS02_FLAGS_COHERENCY_CACHED: + { + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _CACHED, attr); + break; + } + case NVOS02_FLAGS_COHERENCY_WRITE_COMBINE: + { + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_COMBINE, attr); + break; + } + case NVOS02_FLAGS_COHERENCY_WRITE_THROUGH: + { + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_THROUGH, attr); + break; + } + case NVOS02_FLAGS_COHERENCY_WRITE_PROTECT: + { + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_PROTECT, attr); + break; + } + case NVOS02_FLAGS_COHERENCY_WRITE_BACK: + { + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_BACK, attr); + break; + } + default: + { + rmStatus = NV_ERR_INVALID_FLAGS; + break; + } + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _YES, os02Flags)) + attr2 |= DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _YES); + else + attr2 |= DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _NO); + + if (FLD_TEST_DRF(OS02, _FLAGS, _KERNEL_MAPPING, _MAP, os02Flags)) + os32Flags |= NVOS32_ALLOC_FLAGS_KERNEL_MAPPING_MAP; + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_NISO_DISPLAY, _YES, os02Flags)) + attr2 |= DRF_DEF(OS32, _ATTR2, _NISO_DISPLAY, _YES); + else + attr2 |= DRF_DEF(OS32, _ATTR2, _NISO_DISPLAY, _NO); + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_USER_READ_ONLY, _YES, os02Flags)) + attr2 |= DRF_DEF(OS32, _ATTR2, _PROTECTION_USER, _READ_ONLY); + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_DEVICE_READ_ONLY, _YES, os02Flags)) + attr2 |= DRF_DEF(OS32, _ATTR2, _PROTECTION_DEVICE, _READ_ONLY); + + if (FLD_TEST_DRF(OS02, _FLAGS, _REGISTER_MEMDESC_TO_PHYS_RM, _TRUE, os02Flags)) + attr2 |= DRF_DEF(OS32, _ATTR2, _REGISTER_MEMDESC_TO_PHYS_RM, _TRUE); + else + attr2 |= DRF_DEF(OS32, _ATTR2, _REGISTER_MEMDESC_TO_PHYS_RM, _FALSE); + + if (FLD_TEST_DRF(OS02, _FLAGS, _MEMORY_PROTECTION, _PROTECTED, os02Flags)) + { + attr2 |= DRF_DEF(OS32, _ATTR2, _MEMORY_PROTECTION, _PROTECTED); + } + else if (FLD_TEST_DRF(OS02, _FLAGS, _MEMORY_PROTECTION, _UNPROTECTED, os02Flags)) + { + attr2 |= DRF_DEF(OS32, _ATTR2, _MEMORY_PROTECTION, _UNPROTECTED); + } + + if (rmStatus == NV_OK) + { + *pOs32Flags = os32Flags; + *pAttr = attr; + *pAttr2 = attr2; + } + + return rmStatus; +} diff --git a/src/nvidia/interface/nv-firmware-registry.h b/src/nvidia/interface/nv-firmware-registry.h new file mode 100644 index 0000000..ab8b405 --- /dev/null +++ b/src/nvidia/interface/nv-firmware-registry.h @@ -0,0 +1,83 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +// This file holds GPU firmware related registry key definitions that are +// shared between Windows and Unix +// + +#ifndef NV_FIRMWARE_REGISTRY_H +#define NV_FIRMWARE_REGISTRY_H + +// +// Registry key that when enabled, will enable use of GPU firmware. +// +// Possible mode values: +// 0 - Do not enable GPU firmware +// 1 - Enable GPU firmware +// 2 - (Default) Use the default enablement policy for GPU firmware +// +// Setting this to anything other than 2 will alter driver firmware- +// enablement policies, possibly disabling GPU firmware where it would +// have otherwise been enabled by default. +// +// Policy bits: +// +// POLICY_ALLOW_FALLBACK: +// As the normal behavior is to fail GPU initialization if this registry +// entry is set in such a way that results in an invalid configuration, if +// instead the user would like the driver to automatically try to fallback +// to initializing the failing GPU with firmware disabled, then this bit can +// be set (ex: 0x11 means try to enable GPU firmware but fall back if needed). +// Note that this can result in a mixed mode configuration (ex: GPU0 has +// firmware enabled, but GPU1 does not). +// +#define NV_REG_STR_ENABLE_GPU_FIRMWARE "EnableGpuFirmware" + +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_MASK 0x0000000F +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DISABLED 0x00000000 +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_ENABLED 0x00000001 +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DEFAULT 0x00000002 + +#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_MASK 0x000000F0 +#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_ALLOW_FALLBACK 0x00000010 + +#define NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE 0x00000012 + +// +// Registry key that when enabled, will send GPU firmware logs +// to the system log, when possible. +// +// Possible values: +// 0 - Do not send GPU firmware logs to the system log +// 1 - Enable sending of GPU firmware logs to the system log +// 2 - (Default) Enable sending of GPU firmware logs to the system log for +// the debug kernel driver build only +// +#define NV_REG_STR_ENABLE_GPU_FIRMWARE_LOGS "EnableGpuFirmwareLogs" + +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_DISABLE 0x00000000 +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE 0x00000001 +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG 0x00000002 + +#endif // NV_FIRMWARE_REGISTRY_H diff --git a/src/nvidia/interface/nv_sriov_defines.h b/src/nvidia/interface/nv_sriov_defines.h new file mode 100644 index 0000000..1f8a2eb --- /dev/null +++ b/src/nvidia/interface/nv_sriov_defines.h @@ -0,0 +1,101 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file nv_sriov_defines.h + * @brief Defines for doorbell tokens and scratch registers index. + * + * Define:- + * - VF scratch registers index used in VGPU-GSP RPC setup + * - VF scratch registers index used in FECS tracing support + * - Doorbell interrupt handles for Guest RM to GSP-Plugin communication + * - Doorbell interrupt handles for Host CPU-Plugin to GSP-Plugin communication + */ + +// Guest interrupts + +#define NV_DOORBELL_NOTIFY_LEAF_VF_RPC_SETUP_REQUEST 0:0 +#define NV_DOORBELL_NOTIFY_LEAF_VF_RPC_SETUP_REQUEST_EN (0x1) +#define NV_DOORBELL_NOTIFY_LEAF_VF_RPC_SETUP_REQUEST_DIS (0x0) +#define NV_DOORBELL_NOTIFY_LEAF_VF_RPC_SETUP_HANDLE (0 ? NV_DOORBELL_NOTIFY_LEAF_VF_RPC_SETUP_REQUEST) + +#define NV_DOORBELL_NOTIFY_LEAF_VF_RPC_MESSAGE_REQUEST 1:1 +#define NV_DOORBELL_NOTIFY_LEAF_VF_RPC_MESSAGE_REQUEST_EN (0x1) +#define NV_DOORBELL_NOTIFY_LEAF_VF_RPC_MESSAGE_REQUEST_DIS (0x0) +#define NV_DOORBELL_NOTIFY_LEAF_VF_RPC_MESSAGE_HANDLE (0 ? NV_DOORBELL_NOTIFY_LEAF_VF_RPC_MESSAGE_REQUEST) + +#define NV_DOORBELL_NOTIFY_LEAF_VF_MASK (NVBIT(NV_DOORBELL_NOTIFY_LEAF_VF_RPC_SETUP_HANDLE) | \ + NVBIT(NV_DOORBELL_NOTIFY_LEAF_VF_RPC_MESSAGE_HANDLE)) + +// Top half serviced interrupts (serviced by both guest and host) + +#define NV_DOORBELL_NOTIFY_LEAF_SERVICE_LOCKLESS_OP_REQUEST 2:2 +#define NV_DOORBELL_NOTIFY_LEAF_SERVICE_LOCKLESS_OP_REQUEST_EN (0x1) +#define NV_DOORBELL_NOTIFY_LEAF_SERVICE_LOCKLESS_OP_REQUEST_DIS (0x0) +#define NV_DOORBELL_NOTIFY_LEAF_SERVICE_LOCKLESS_OP_HANDLE (0 ? NV_DOORBELL_NOTIFY_LEAF_SERVICE_LOCKLESS_OP_REQUEST) + +#define NV_DOORBELL_NOTIFY_LEAF_TOP_HALF_MASK NVBIT(NV_DOORBELL_NOTIFY_LEAF_SERVICE_LOCKLESS_OP_HANDLE) + +// Host PF interrupts + +#define NV_DOORBELL_NOTIFY_LEAF_SERVICE_TMR_REQUEST 3:3 +#define NV_DOORBELL_NOTIFY_LEAF_SERVICE_TMR_REQUEST_EN (0x1) +#define NV_DOORBELL_NOTIFY_LEAF_SERVICE_TMR_REQUEST_DIS (0x0) +#define NV_DOORBELL_NOTIFY_LEAF_SERVICE_TMR_HANDLE (0 ? NV_DOORBELL_NOTIFY_LEAF_SERVICE_TMR_REQUEST) + +#define NV_DOORBELL_NOTIFY_LEAF_SERVICE_MMU_INFO_FAULT_REQUEST 15:15 +#define NV_DOORBELL_NOTIFY_LEAF_SERVICE_MMU_INFO_FAULT_REQUEST_EN (0x1) +#define NV_DOORBELL_NOTIFY_LEAF_SERVICE_MMU_INFO_FAULT_REQUEST_DIS (0x0) +#define NV_DOORBELL_NOTIFY_LEAF_SERVICE_MMU_INFO_FAULT_HANDLE (0 ? NV_DOORBELL_NOTIFY_LEAF_SERVICE_MMU_INFO_FAULT_REQUEST) + +#define NV_DOORBELL_NOTIFY_LEAF_SERVICE_NON_REPLAYABLE_FAULT_REQUEST 16:16 +#define NV_DOORBELL_NOTIFY_LEAF_SERVICE_NON_REPLAYABLE_FAULT_REQUEST_EN (0x1) +#define NV_DOORBELL_NOTIFY_LEAF_SERVICE_NON_REPLAYABLE_FAULT_REQUEST_DIS (0x0) +#define NV_DOORBELL_NOTIFY_LEAF_SERVICE_NON_REPLAYABLE_FAULT_HANDLE (0 ? NV_DOORBELL_NOTIFY_LEAF_SERVICE_NON_REPLAYABLE_FAULT_REQUEST) + +#define NV_DOORBELL_NOTIFY_LEAF_HOST_PF_MASK (NVBIT(NV_DOORBELL_NOTIFY_LEAF_SERVICE_NON_REPLAYABLE_FAULT_HANDLE) | \ + NVBIT(NV_DOORBELL_NOTIFY_LEAF_SERVICE_TMR_HANDLE) | \ + NVBIT(NV_DOORBELL_NOTIFY_LEAF_SERVICE_MMU_INFO_FAULT_HANDLE) ) + +// Host VF interrupts + +#define NV_DOORBELL_NOTIFY_LEAF_VF_CPU_PLUGIN_REQUEST 17:17 +#define NV_DOORBELL_NOTIFY_LEAF_VF_CPU_PLUGIN_REQUEST_EN (0x1) +#define NV_DOORBELL_NOTIFY_LEAF_VF_CPU_PLUGIN_REQUEST_DIS (0x0) +#define NV_DOORBELL_NOTIFY_LEAF_VF_CPU_PLUGIN_HANDLE (0 ? NV_DOORBELL_NOTIFY_LEAF_VF_CPU_PLUGIN_REQUEST) + +#define NV_DOORBELL_NOTIFY_LEAF_HOST_VF_MASK NVBIT(NV_DOORBELL_NOTIFY_LEAF_VF_CPU_PLUGIN_HANDLE) + +// current write offset of the FECS trace buffer +#define NV_VF_SCRATCH_REGISTER_FECS_TRACE_WR_RD_OFFSET 0x0 +// current read offset of the FECS trace buffer +#define NV_VF_SCRATCH_REGISTER_FECS_TRACE_RD_RD_OFFSET 0x1 + +// used in VGPU-GSP RPC setup +#define NV_VF_SCRATCH_REGISTER_GUEST_RPC_LO 0x2 +// used in VGPU-GSP RPC setup +#define NV_VF_SCRATCH_REGISTER_GUEST_RPC_HI 0x3 + +#define MAX_PARTITIONS_WITH_GFID (48) +#define MAX_PARTITIONS_WITH_GFID_32VM (32) +#define MAX_PARTITIONS_WITH_GFID_MIG_ENABLED (7) diff --git a/src/nvidia/interface/nvacpitypes.h b/src/nvidia/interface/nvacpitypes.h new file mode 100644 index 0000000..4cb6115 --- /dev/null +++ b/src/nvidia/interface/nvacpitypes.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NVACPITYPES_H_ +#define _NVACPITYPES_H_ + +typedef enum _ACPI_DSM_FUNCTION +{ + ACPI_DSM_FUNCTION_NBSI = 0, + ACPI_DSM_FUNCTION_NVHG, + ACPI_DSM_FUNCTION_MXM, + ACPI_DSM_FUNCTION_NBCI, + ACPI_DSM_FUNCTION_NVOP, + ACPI_DSM_FUNCTION_PCFG, + ACPI_DSM_FUNCTION_GPS_2X, + ACPI_DSM_FUNCTION_JT, + ACPI_DSM_FUNCTION_PEX, + ACPI_DSM_FUNCTION_NVPCF_2X, + ACPI_DSM_FUNCTION_GPS, + ACPI_DSM_FUNCTION_NVPCF, + // insert new DSM Functions here + ACPI_DSM_FUNCTION_COUNT, + ACPI_DSM_FUNCTION_CURRENT, // pseudo option to select currently available GUID which supports the subfunction. + ACPI_DSM_FUNCTION_INVALID = 0xFF +} ACPI_DSM_FUNCTION; + +#endif // _NVACPITYPES_H_ + diff --git a/src/nvidia/interface/nvrm_registry.h b/src/nvidia/interface/nvrm_registry.h new file mode 100644 index 0000000..61e02d6 --- /dev/null +++ b/src/nvidia/interface/nvrm_registry.h @@ -0,0 +1,2967 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1997-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +// This file holds NVIDIA Resource Manager registry key definitions that are +// shared between Windows and Unix +// + +#ifndef NVRM_REGISTRY_H +#define NVRM_REGISTRY_H + +#include "nvtypes.h" + +#define NV_REG_STR_PCIPOWERCONTROL_MAX_LENGTH 34 +#define NV_REG_STR_PCIPOWERCONTROL_CHIPSET_LENGTH 34 +#define NV_REG_STR_PCIPOWERCONTROL_CHIPSET_GPU_LENGTH 51 +#define NV_REG_STR_ENABLE_PCIPOWERCONTROL_WITH_SUFFIX "PCIEPowerControl_" +#define NV_REG_STR_ENABLE_PCIPOWERCONTROL_WITHOUT_SUFFIX "PCIEPowerControl" + +#define NVPCIE_POWER_CONTROL_REGKEY_ENABLE 0:0 +#define NVPCIE_POWER_CONTROL_REGKEY_ENABLE_FALSE (0x00000000) +#define NVPCIE_POWER_CONTROL_REGKEY_ENABLE_TRUE (0x00000001) +#define NVPCIE_POWER_CONTROL_REGKEY_OVERRIDE_RM 1:1 +#define NVPCIE_POWER_CONTROL_REGKEY_OVERRIDE_RM_FALSE (0x00000000) +#define NVPCIE_POWER_CONTROL_REGKEY_OVERRIDE_RM_TRUE (0x00000001) +#define NVPCIE_POWER_CONTROL_REGKEY_OVERRIDE_PLATFORM 2:2 +#define NVPCIE_POWER_CONTROL_REGKEY_OVERRIDE_PLATFORM_FALSE (0x00000000) +#define NVPCIE_POWER_CONTROL_REGKEY_OVERRIDE_PLATFORM_TRUE (0x00000001) +#define NVPCIE_POWER_CONTROL_REGKEY_TURNON_COREPOWER 3:3 +#define NVPCIE_POWER_CONTROL_REGKEY_TURNON_COREPOWER_FALSE (0x00000000) +#define NVPCIE_POWER_CONTROL_REGKEY_TURNON_COREPOWER_TRUE (0x00000001) +#define NVPCIE_POWER_CONTROL_REGKEY_ASPM_ENABLE 5:4 +#define NVPCIE_POWER_CONTROL_REGKEY_ASPM_ENABLE_FALSE (0x00000000) +#define NVPCIE_POWER_CONTROL_REGKEY_ASPM_ENABLE_L0S (0x00000001) +#define NVPCIE_POWER_CONTROL_REGKEY_ASPM_ENABLE_L1 (0x00000002) +#define NVPCIE_POWER_CONTROL_REGKEY_ASPM_ENABLE_ALL (0x00000003) + +#define NVPCIE_POWER_CONTROL_REGKEY_NOT_IN_REGISTRY 31:31 +#define NVPCIE_POWER_CONTROL_REGKEY_NOT_IN_REGISTRY_FALSE (0x00000000) +#define NVPCIE_POWER_CONTROL_REGKEY_NOT_IN_REGISTRY_TRUE (0x00000000) + +// Type BINARY +// SBIOS hash key for ASPM enablement on DT +// This has no effect if added in OS registry hive or via MODS +// Encoding: +// Binary Structure: Description +// 0:0 Set to 1 to enable L0s +// 1:1 Set to 1 to enable L1 +// 7:2 Reserved +#define NV_REG_STR_RM_SBIOS_ENABLE_ASPM_DT "RMSbiosEnableASPMDT" +#define NV_REG_STR_RM_SBIOS_ENABLE_ASPM_DT_L0S 0:0 +#define NV_REG_STR_RM_SBIOS_ENABLE_ASPM_DT_L1 1:1 + +// +// Some shared defines with nvReg.h +// +#if defined(NV_UNIX) +#define NV4_REG_GLOBAL_BASE_KEY "" +#define NV4_REG_GLOBAL_BASE_PATH "_NV_" +#else +#define NV4_REG_GLOBAL_BASE_KEY HKEY_LOCAL_MACHINE +#define NV4_REG_GLOBAL_BASE_PATH "SOFTWARE\\NVIDIA Corporation\\Global" +#endif +#define NV4_REG_SUBKEY "NVidia" +#define NV4_REG_DISPLAY_DRIVER_SUBKEY "Display" +#define NV4_REG_RESOURCE_MANAGER_SUBKEY "System" + +// +// Globally overrides the memory type used to store surfaces. +// Used by all parts of the driver and stored in the hardware-specific key. +// Mirrored from nvReg.h +// +#define NV_REG_STR_GLOBAL_SURFACE_OVERRIDE "GlobalSurfaceOverrides" +#define NV_REG_STR_GLOBAL_SURFACE_OVERRIDE_DISABLE (0x00000000) // Do not use global surface overrides +#define NV_REG_STR_GLOBAL_SURFACE_OVERRIDE_ENABLE (0x00000001) +#define NV_REG_STR_GLOBAL_SURFACE_OVERRIDE_RM_VALUE 1:0 +#define NV_REG_STR_GLOBAL_SURFACE_OVERRIDE_RM_ENABLE 3:3 + + +// +// This regkey is experimental and may behave differently on specific platforms. +// DO NOT rely on it being a stable regkey to change all timeouts at once. +// +// Type Dword +// Change all RM internal timeouts to experiment with Bug 5203024. +// +// Some timeouts may still silently clamp to differnt min/max values and this +// regkey does NOT validate their range. +// +#define NV_REG_STR_RM_BUG5203024_OVERRIDE_TIMEOUT "RmOverrideInternalTimeoutsMs" +// Timeout value to set in milliseconds +#define NV_REG_STR_RM_BUG5203024_OVERRIDE_TIMEOUT_VALUE_MS 23:0 +// Same effect as setting "RmDefaultTimeout" to VALUE_MS +#define NV_REG_STR_RM_BUG5203024_OVERRIDE_TIMEOUT_FLAGS_SET_RM_DEFAULT_TIMEOUT 31:31 +// Same effect as setting "RmWatchDogTimeOut" to VALUE_MS (converted to seconds) +#define NV_REG_STR_RM_BUG5203024_OVERRIDE_TIMEOUT_FLAGS_SET_RC_WATCHDOG_TIMEOUT 30:30 +// Same effect as setting "RmEngineContextSwitchTimeoutUs" to VALUE_MS (converted to usec) +#define NV_REG_STR_RM_BUG5203024_OVERRIDE_TIMEOUT_FLAGS_SET_CTXSW_TIMEOUT 29:29 +// Currently has no effect +#define NV_REG_STR_RM_BUG5203024_OVERRIDE_TIMEOUT_FLAGS_SET_VIDENG_TIMEOUT 28:28 +// Currently has no effect +#define NV_REG_STR_RM_BUG5203024_OVERRIDE_TIMEOUT_FLAGS_SET_PMU_INTERNAL_TIMEOUT 27:27 +// Disables FECS watchdog (timeout value is ignored) +#define NV_REG_STR_RM_BUG5203024_OVERRIDE_TIMEOUT_FLAGS_SET_FECS_WATCHDOG_TIMEOUT 26:26 + + +// +// Type Dword +// Override default RM timeout. Measured in milliseconds. +// Not scaled for emulation +// +#define NV_REG_STR_RM_DEFAULT_TIMEOUT_MS "RmDefaultTimeout" + + +// +// Type Dword +// Override default RM timeout flags to either OSDELAY or OSTIMER. +// +#define NV_REG_STR_RM_OVERRIDE_DEFAULT_TIMEOUT_FLAGS "RmDefaultTimeoutFlags" +#define NV_REG_STR_RM_OVERRIDE_DEFAULT_TIMEOUT_FLAGS_OSTIMER 4 +#define NV_REG_STR_RM_OVERRIDE_DEFAULT_TIMEOUT_FLAGS_OSDELAY 8 + + +#define NV_REG_STR_SUPPRESS_CLASS_LIST "SuppressClassList" +// Type String +// A list of comma separated classes to suppress +// examples: +// 5097 +// 4097, 5097 +// etc + + +// +// Allow instance memory overrides. Some fields are chip specific +// and may not apply to all chips. Since there are many fields, +// this is spread across several DWORD registry keys. +// +// Type DWORD +// Encoding: +// DEFAULT RM determines +// COH Coherent system memory +// NCOH Non-coherent system memory +// VID Local video memory +// +#define NV_REG_STR_RM_INST_LOC "RMInstLoc" +#define NV_REG_STR_RM_INST_LOC_2 "RMInstLoc2" +#define NV_REG_STR_RM_INST_LOC_3 "RMInstLoc3" +#define NV_REG_STR_RM_INST_LOC_4 "RMInstLoc4" + +#define NV_REG_STR_RM_INST_LOC_DEFAULT (0x00000000) +#define NV_REG_STR_RM_INST_LOC_COH (0x00000001) +#define NV_REG_STR_RM_INST_LOC_NCOH (0x00000002) +#define NV_REG_STR_RM_INST_LOC_VID (0x00000003) + +#define NV_REG_STR_RM_INST_LOC_ALL_DEFAULT (0x00000000) +#define NV_REG_STR_RM_INST_LOC_ALL_COH (0x55555555) +#define NV_REG_STR_RM_INST_LOC_ALL_NCOH (0xAAAAAAAA) +#define NV_REG_STR_RM_INST_LOC_ALL_VID (0xFFFFFFFF) + +// +// Allow instance memory overrides. Some fields are chip specific +// and may not apply to all chips. Since there are many fields, +// this is spread across several DWORD registry keys. +// +// The registry keys are defined in nvrm_registry. +// Specific overrrides are defined here. +// +// Type DWORD +// Encoding: +// DEFAULT RM determines +// COH Coherent system memory +// NCOH Non-coherent system memory +// VID Local video memory +// +#define NV_REG_STR_RM_INST_LOC_PTE 1:0 // Context PTE +#define NV_REG_STR_RM_INST_LOC_PTE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_PTE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_PTE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_PTE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_BAR_PTE 3:2 // BAR PTE +#define NV_REG_STR_RM_INST_LOC_BAR_PTE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_BAR_PTE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_BAR_PTE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_BAR_PTE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_INSTBLK 5:4 // Instance block +#define NV_REG_STR_RM_INST_LOC_INSTBLK_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_INSTBLK_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_INSTBLK_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_INSTBLK_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_RAMFC 7:6 // RAMFC save area +#define NV_REG_STR_RM_INST_LOC_RAMFC_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_RAMFC_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_RAMFC_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_RAMFC_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_CACHE1 9:8 // CACHE1 save area +#define NV_REG_STR_RM_INST_LOC_CACHE1_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_CACHE1_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_CACHE1_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_CACHE1_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_GRCTX 11:10 // Graphics contxt +#define NV_REG_STR_RM_INST_LOC_GRCTX_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_GRCTX_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_GRCTX_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_GRCTX_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_RUNLIST 13:12 // Runlist +#define NV_REG_STR_RM_INST_LOC_RUNLIST_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_RUNLIST_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_RUNLIST_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_RUNLIST_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_DISPLAY 15:14 // Display +#define NV_REG_STR_RM_INST_LOC_DISPLAY_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_DISPLAY_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_DISPLAY_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_DISPLAY_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_USERD 17:16 // USERD +#define NV_REG_STR_RM_INST_LOC_USERD_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_USERD_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_USERD_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_USERD_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_EVENTBUFFER 19:18 // EVENTBUFFER +#define NV_REG_STR_RM_INST_LOC_EVENTBUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_EVENTBUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_EVENTBUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_EVENTBUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_UNUSED 21:20 // UNUSED +#define NV_REG_STR_RM_INST_LOC_UNUSED_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_CIPHER_XCHG 23:22 // Cipher exchange memory resources +#define NV_REG_STR_RM_INST_LOC_CIPHER_XCHG_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_CIPHER_XCHG_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_CIPHER_XCHG_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_CIPHER_XCHG_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_PDE 25:24 // Context PDE +#define NV_REG_STR_RM_INST_LOC_PDE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_PDE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_PDE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_PDE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_BAR_PDE 27:26 // BAR PDE +#define NV_REG_STR_RM_INST_LOC_BAR_PDE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_BAR_PDE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_BAR_PDE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_BAR_PDE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_PMUINST 29:28 // PMUINST +#define NV_REG_STR_RM_INST_LOC_PMUINST_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_PMUINST_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_PMUINST_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_PMUINST_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_PMUUCODE 31:30 // PMU UCODE +#define NV_REG_STR_RM_INST_LOC_PMUUCODE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_PMUUCODE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_PMUUCODE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_PMUUCODE_VID NV_REG_STR_RM_INST_LOC_VID + +#define NV_REG_STR_RM_INST_LOC_2_COMPTAG_STORE 1:0 // Compbit backing store +#define NV_REG_STR_RM_INST_LOC_2_COMPTAG_STORE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_COMPTAG_STORE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_COMPTAG_STORE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_COMPTAG_STORE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_ATTR_CB 3:2 // Attribute Circular Buffer +#define NV_REG_STR_RM_INST_LOC_2_ATTR_CB_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_ATTR_CB_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_ATTR_CB_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_ATTR_CB_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_BUNDLE_CB 5:4 // Bundle Circular Buffer +#define NV_REG_STR_RM_INST_LOC_2_BUNDLE_CB_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_BUNDLE_CB_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_BUNDLE_CB_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_BUNDLE_CB_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_PAGEPOOL 7:6 // Pagepool Buffer +#define NV_REG_STR_RM_INST_LOC_2_PAGEPOOL_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_PAGEPOOL_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_PAGEPOOL_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_PAGEPOOL_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_GOLD_CTX 9:8 // Golden Context Image +#define NV_REG_STR_RM_INST_LOC_2_GOLD_CTX_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_GOLD_CTX_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_GOLD_CTX_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_GOLD_CTX_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_BAR_CTX 11:10 // Bar context aperture +#define NV_REG_STR_RM_INST_LOC_2_BAR_CTX_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_BAR_CTX_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_BAR_CTX_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_BAR_CTX_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_UNUSED 13:12 // Unused +#define NV_REG_STR_RM_INST_LOC_2_CTX_PATCH 15:14 // context patch +#define NV_REG_STR_RM_INST_LOC_2_CTX_PATCH_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_CTX_PATCH_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_CTX_PATCH_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_CTX_PATCH_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_MMU_READ 17:16 // MMU Read +#define NV_REG_STR_RM_INST_LOC_2_MMU_READ_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_MMU_READ_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_MMU_READ_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_MMU_READ_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_MMU_WRITE 19:18 // MMU Write +#define NV_REG_STR_RM_INST_LOC_2_MMU_WRITE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_MMU_WRITE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_MMU_WRITE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_MMU_WRITE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_UNUSED2 21:20 // Unused +#define NV_REG_STR_RM_INST_LOC_2_ZCULLCTX 23:22 // zcull context buffer +#define NV_REG_STR_RM_INST_LOC_2_ZCULLCTX_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_ZCULLCTX_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_ZCULLCTX_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_ZCULLCTX_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_PMCTX 25:24 // PM context buffer +#define NV_REG_STR_RM_INST_LOC_2_PMCTX_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_PMCTX_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_PMCTX_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_PMCTX_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_DPUDBG 27:26 // DPU Debug/Falctrace Buffer +#define NV_REG_STR_RM_INST_LOC_2_DPUDBG_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_DPUDBG_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_DPUDBG_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_DPUDBG_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_PMUPG 29:28 // PMU PG buffer +#define NV_REG_STR_RM_INST_LOC_2_PMUPG_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_PMUPG_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_PMUPG_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_PMUPG_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_PMU_LOGGER 31:30 +#define NV_REG_STR_RM_INST_LOC_2_PMU_LOGGER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_PMU_LOGGER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_PMU_LOGGER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_PMU_LOGGER_VID NV_REG_STR_RM_INST_LOC_VID + +#define NV_REG_STR_RM_INST_LOC_3_PG_LOG_SURFACE 1:0 // PG log surface +#define NV_REG_STR_RM_INST_LOC_3_PG_LOG_SURFACE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_PG_LOG_SURFACE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_PG_LOG_SURFACE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_PG_LOG_SURFACE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_PREEMPT_BUFFER 3:2 // Preemption buffer +#define NV_REG_STR_RM_INST_LOC_3_PREEMPT_BUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_PREEMPT_BUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_PREEMPT_BUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_PREEMPT_BUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_GFXP_BETACB_BUFFER 5:4 // GFXP BetaCB buffer +#define NV_REG_STR_RM_INST_LOC_3_GFXP_BETACB_BUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_GFXP_BETACB_BUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_BETACB_BUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_BETACB_BUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_GFXP_PAGEPOOL_BUFFER 7:6 // GFXP Pagepool buffer +#define NV_REG_STR_RM_INST_LOC_3_GFXP_PAGEPOOL_BUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_GFXP_PAGEPOOL_BUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_PAGEPOOL_BUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_PAGEPOOL_BUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_BSI_IMAGE 9:8 // BSI RAM image +#define NV_REG_STR_RM_INST_LOC_3_BSI_IMAGE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_BSI_IMAGE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_BSI_IMAGE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_BSI_IMAGE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_PRIV_ACCESS_MAP 11:10 // Priv whitelist buffer +#define NV_REG_STR_RM_INST_LOC_3_PRIV_ACCESS_MAP_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_PRIV_ACCESS_MAP_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_PRIV_ACCESS_MAP_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_PRIV_ACCESS_MAP_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_SEC2DBG 13:12 // SEC2 Debug/Falctrace Buffer +#define NV_REG_STR_RM_INST_LOC_3_SEC2DBG_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_SEC2DBG_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_SEC2DBG_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_SEC2DBG_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_FECS_UCODE 15:14 // FECS UCODE +#define NV_REG_STR_RM_INST_LOC_3_FECS_UCODE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_FECS_UCODE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_FECS_UCODE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_FECS_UCODE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_GFXP_SPILL_BUFFER 17:16 // GFXP Pagepool buffer +#define NV_REG_STR_RM_INST_LOC_3_GFXP_SPILL_BUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_GFXP_SPILL_BUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_SPILL_BUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_SPILL_BUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_UVM_FAULT_BUFFER_NONREPLAYABLE 19:18 // UVM Non-Replayable fault buffer +#define NV_REG_STR_RM_INST_LOC_3_UVM_FAULT_BUFFER_NONREPLAYABLE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_UVM_FAULT_BUFFER_NONREPLAYABLE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_UVM_FAULT_BUFFER_NONREPLAYABLE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_UVM_FAULT_BUFFER_NONREPLAYABLE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_BAR_SCRATCH_PAGE 21:20 // BAR scratch pages +#define NV_REG_STR_RM_INST_LOC_3_BAR_SCRATCH_PAGE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_BAR_SCRATCH_PAGE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_BAR_SCRATCH_PAGE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_BAR_SCRATCH_PAGE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_FLCNINST 23:22 // FLCNINST +#define NV_REG_STR_RM_INST_LOC_3_FLCNINST_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_FLCNINST_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_FLCNINST_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_FLCNINST_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_RTVCB_BUFFER 25:24 // RTVCB buffer +#define NV_REG_STR_RM_INST_LOC_3_RTVCB_BUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_RTVCB_BUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_RTVCB_BUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_RTVCB_BUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_GFXP_RTVCB_BUFFER 27:26 // GFXP RTVCB buffer +#define NV_REG_STR_RM_INST_LOC_3_GFXP_RTVCB_BUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_GFXP_RTVCB_BUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_RTVCB_BUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_RTVCB_BUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_FAULT_METHOD_BUFFER 29:28 // Fault method buffer +#define NV_REG_STR_RM_INST_LOC_3_FAULT_METHOD_BUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_FAULT_METHOD_BUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_FAULT_METHOD_BUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_FAULT_METHOD_BUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_PMU_DPU_DMA 31:30 // PMU/DPU DMA transfers +#define NV_REG_STR_RM_INST_LOC_3_PMU_DPU_DMA_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_PMU_DPU_DMA_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_PMU_DPU_DMA_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_PMU_DPU_DMA_VID NV_REG_STR_RM_INST_LOC_VID + +#define NV_REG_STR_RM_INST_LOC_4_DISP_SC 1:0 // Display state cache buffer +#define NV_REG_STR_RM_INST_LOC_4_DISP_SC_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_DISP_SC_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_DISP_SC_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_DISP_SC_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_4_CHANNEL_PUSHBUFFER 3:2 // FIFO channel push buffer +#define NV_REG_STR_RM_INST_LOC_4_CHANNEL_PUSHBUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_CHANNEL_PUSHBUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_CHANNEL_PUSHBUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_CHANNEL_PUSHBUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_4_FW_SEC_LIC_COMMAND 5:4 // Firmware security license command +#define NV_REG_STR_RM_INST_LOC_4_FW_SEC_LIC_COMMAND_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_FW_SEC_LIC_COMMAND_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_FW_SEC_LIC_COMMAND_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_FW_SEC_LIC_COMMAND_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_4_VRDS 7:6 // VBIOS runtime data security +#define NV_REG_STR_RM_INST_LOC_4_VRDS_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_VRDS_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_VRDS_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_VRDS_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_4_FLCN_UCODE_BUFFERS 9:8 // Falcon uCode buffers +#define NV_REG_STR_RM_INST_LOC_4_FLCN_UCODE_BUFFERS_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_FLCN_UCODE_BUFFERS_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_FLCN_UCODE_BUFFERS_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_FLCN_UCODE_BUFFERS_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_4_UVM_FAULT_BUFFER_REPLAYABLE 11:10 // UVM Replayable fault buffer +#define NV_REG_STR_RM_INST_LOC_4_UVM_FAULT_BUFFER_REPLAYABLE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_UVM_FAULT_BUFFER_REPLAYABLE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_UVM_FAULT_BUFFER_REPLAYABLE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_UVM_FAULT_BUFFER_REPLAYABLE_VID NV_REG_STR_RM_INST_LOC_VID + +// +// Separately define instance block location of BARs. Default Setting +// reverts to NV_REG_STR_RM_INST_LOC_INSTBLK +// +#define NV_REG_STR_RM_INST_LOC_4_BAR 13:12 // BAR Bind location +#define NV_REG_STR_RM_INST_LOC_4_BAR_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_BAR_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_BAR_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_BAR_VID NV_REG_STR_RM_INST_LOC_VID + +// +// Separately define instance block location of async CEs. Default Setting +// reverts to NV_REG_STR_RM_INST_LOC_INSTBLK +// +#define NV_REG_STR_RM_INST_LOC_4_CE 15:14 // Async CE Bind location +#define NV_REG_STR_RM_INST_LOC_4_CE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_CE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_CE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_CE_VID NV_REG_STR_RM_INST_LOC_VID + +// +// Separately define instance block location of GR/GRCE. Default Setting +// reverts to NV_REG_STR_RM_INST_LOC_INSTBLK +// +#define NV_REG_STR_RM_INST_LOC_4_GR 17:16 // GR/GRCE Bind location +#define NV_REG_STR_RM_INST_LOC_4_GR_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_GR_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_GR_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_GR_VID NV_REG_STR_RM_INST_LOC_VID + +// +// Separately define instance block location of VEs. Default Setting +// reverts to NV_REG_STR_RM_INST_LOC_INSTBLK +// +#define NV_REG_STR_RM_INST_LOC_4_FALCON 19:18 // FALCON Bind location +#define NV_REG_STR_RM_INST_LOC_4_FALCON_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_FALCON_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_FALCON_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_FALCON_VID NV_REG_STR_RM_INST_LOC_VID + +// +// Separately define instance block location of HWPM PMA. Default Setting +// reverts to NV_REG_STR_RM_INST_LOC_INSTBLK +// +#define NV_REG_STR_RM_INST_LOC_4_HWPM_PMA 21:20 // HWPM PMA Bind location +#define NV_REG_STR_RM_INST_LOC_4_HWPM_PMA_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_HWPM_PMA_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_HWPM_PMA_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_HWPM_PMA_VID NV_REG_STR_RM_INST_LOC_VID + +// +// Separately define instance block location of HWPM PMA. Default Setting +// reverts to NV_REG_STR_RM_INST_LOC_INSTBLK +// +#define NV_REG_STR_RM_INST_LOC_4_FECS_EVENT_BUF 23:22 // FECS EVENT buffer location +#define NV_REG_STR_RM_INST_LOC_4_FECS_EVENT_BUF_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_FECS_EVENT_BUF_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_FECS_EVENT_BUF_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_FECS_EVENT_BUF_VID NV_REG_STR_RM_INST_LOC_VID + +// +// Overrides for the GFXP SETUP buffer +// +#define NV_REG_STR_RM_INST_LOC_4_GFXP_SETUP_BUFFER 25:24 // GFXP SETUP buffer +#define NV_REG_STR_RM_INST_LOC_4_GFXP_SETUP_BUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_GFXP_SETUP_BUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_GFXP_SETUP_BUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_GFXP_SETUP_BUFFER_VID NV_REG_STR_RM_INST_LOC_VID + +// +// Overrides for the VIDEO ENGINE BOOTARGS buffer +// +#define NV_REG_STR_RM_INST_LOC_4_VIDEO_ENGINE_BOOTARGS 27:26 // VIDEO_ENGINE BOOTARGS buffer +#define NV_REG_STR_RM_INST_LOC_4_VIDEO_ENGINE_BOOTARGS_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_VIDEO_ENGINE_BOOTARGS_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_VIDEO_ENGINE_BOOTARGS_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_VIDEO_ENGINE_BOOTARGS_VID NV_REG_STR_RM_INST_LOC_VID + +#define NV_REG_STR_RM_GSP_STATUS_QUEUE_SIZE "RmGspStatusQueueSize" +// TYPE DWORD +// Set the GSP status queue size in KB (for GSP to CPU RPC status and event communication) + +#define NV_REG_STR_RM_MSG "RmMsg" +// Type String: Set parameters for RM DBG_PRINTF. Only for builds with printfs enabled. +// Encoding: +// rule = [!][filename|function][:startline][-endline] +// Format = rule[,rule] + + +#define NV_REG_STR_RM_THREAD_STATE_SETUP_FLAGS "RmThreadStateSetupFlags" +// Type DWORD +// Enables or disables various ThreadState features +// See resman/inc/kernel/core/thread_state.h for +// THREAD_STATE_SETUP_FLAGS values. + + +#define NV_REG_STR_RM_ENABLE_EVENT_TRACER "RMEnableEventTracer" +#define NV_REG_STR_RM_ENABLE_EVENT_TRACER_DISABLE 0 +#define NV_REG_STR_RM_ENABLE_EVENT_TRACER_ENABLE 1 +#define NV_REG_STR_RM_ENABLE_EVENT_TRACER_DEFAULT NV_REG_STR_RM_ENABLE_EVENT_TRACER_DISABLE +// Type DWORD +// Encoding boolean +// Enable/Disable RM event tracing +// 0 - Disable RM event tracing +// 1 - Enable RM event tracing + + +#define NV_REG_STR_RM_COMPUTE_MODE_RULES "RmComputeModeRules" +// Type DWORD +// Saves the last compute mode rule set by the client. +// Encoding: +// Bits 31:0 : Last compute mode rule set by the client + +#define NV_REG_STR_ILLUM_ATTRIB_LOGO_BRIGHTNESS "RmIllumLogoBrightness" +// Type DWORD: +// contains Logo Illumination Brightness in percent to be used on driver load. + +#define NV_REG_STR_ILLUM_ATTRIB_SLI_BRIGHTNESS "RmIllumSLIBrightness" +// Type DWORD: +// contains SLI Illumination Brightness in percent to be used on driver load. + + +#define NV_REG_STR_RM_NVLOG_EXTRA_BUFFER_1 "RMNvLogExtraBuffer1" +// #define NV_REG_STR_RM_NVLOG_EXTRA_BUFFER_2 "RMNvLogExtraBuffer2" +// #define NV_REG_STR_RM_NVLOG_EXTRA_BUFFER_3 "RMNvLogExtraBuffer3" +// #define NV_REG_STR_RM_NVLOG_EXTRA_BUFFER_4 "RMNvLogExtraBuffer4" +// #define NV_REG_STR_RM_NVLOG_EXTRA_BUFFER_5 "RMNvLogExtraBuffer5" +// #define NV_REG_STR_RM_NVLOG_EXTRA_BUFFER_6 "RMNvLogExtraBuffer6" +// Type DWORD +// Used to specify up to 6 additional logging buffers +// Encoding: +// _BUFFER_FLAGS +// x: uses NVLOG_BUFFER_FLAGS fields, for main nvlog buffer +// _BUFFER_SIZE +// n: Size of main buffer, in kilobytes + + +// Type DWORD +// This can be used for dumping NvLog buffers (in /var/log/vmkernel.log ), when +// we hit critical XIDs e.g 31/79. +#define NV_REG_STR_RM_DUMP_NVLOG "RMDumpNvLog" +#define NV_REG_STR_RM_DUMP_NVLOG_DEFAULT (0x00000000) +#define NV_REG_STR_RM_DUMP_NVLOG_DISABLE (0x00000000) +#define NV_REG_STR_RM_DUMP_NVLOG_ENABLE (0x00000001) + +// Type: Binary +// Comma separated list of XID values to suppress from the kernel log +// example: +// RmSuppressXidDump="43,31" +#define NV_REG_SUPPRESS_XID_DUMP "RmSuppressXidDump" +#define MAX_XID_SUPPRESS_KEY_LENGTH 64 + +// +// Type DWORD +// RM external fabric management. +// +// RM currently uses nvlink core driver APIs which internally trigger +// link initialization and training. However, nvlink core driver now exposes a +// set of APIs for managing nvlink fabric externally (from user mode). +// +// When the regkey is enabled, RM will skip use of APIs which trigger +// link initialization and training. In that case, link training needs to be +// triggered externally. +// +#define NV_REG_STR_RM_EXTERNAL_FABRIC_MGMT "RMExternalFabricMgmt" +#define NV_REG_STR_RM_EXTERNAL_FABRIC_MGMT_MODE 0:0 +#define NV_REG_STR_RM_EXTERNAL_FABRIC_MGMT_MODE_ENABLE (0x00000001) +#define NV_REG_STR_RM_EXTERNAL_FABRIC_MGMT_MODE_DISABLE (0x00000000) + + +// +// Type DWORD +// BIT 1:0: All Data validation +// 0 - Default +// 1 - Validate the kernel data - enable all below +// 2 - Do not validate the kernel data - disable all below +// BIT 3:2: Buffer validation +// 0 - Default +// 1 - Validate the kernel buffers +// 2 - Do not validate the kernel buffers +// BIT 5:4: Handle validation +// 0 - Default +// 1 - Validate the handles +// 2 - Do not validate the handles +// BIT 7:6: Strict client validation +// 0 - Default +// 1 - Enable strict client validation +// 2 - Do not enable strict client validation +// +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION "RmValidateClientData" +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_ALL 1:0 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_ALL_DEFAULT 0x00000000 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_ALL_ENABLED 0x00000001 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_ALL_DISABLED 0x00000002 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_KERNEL_BUFFERS 3:2 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_KERNEL_BUFFERS_DEFAULT 0x00000000 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_KERNEL_BUFFERS_ENABLED 0x00000001 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_KERNEL_BUFFERS_DISABLED 0x00000002 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_HANDLE 5:4 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_HANDLE_DEFAULT 0x00000000 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_HANDLE_ENABLED 0x00000001 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_HANDLE_DISABLED 0x00000002 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_STRICT_CLIENT 7:6 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_STRICT_CLIENT_DEFAULT 0x00000000 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_STRICT_CLIENT_ENABLED 0x00000001 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_STRICT_CLIENT_DISABLED 0x00000002 + + +// +// Type: DWORD +// +// This regkey configures thread priority boosting whenever +// the thread is holding a GPU lock. +// +#define NV_REG_STR_RM_PRIORITY_BOOST "RMPriorityBoost" +#define NV_REG_STR_RM_PRIORITY_BOOST_DISABLE 0x00000000 +#define NV_REG_STR_RM_PRIORITY_BOOST_ENABLE 0x00000001 +#define NV_REG_STR_RM_PRIORITY_BOOST_DEFAULT NV_REG_STR_RM_PRIORITY_BOOST_DISABLE + + +// +// Type: DWORD +// +// This regkey configures the delay (us) before a boosted thread is throttled +// down. +// +// Default value: 0 (Disable) +// +#define NV_REG_STR_RM_PRIORITY_THROTTLE_DELAY "RMPriorityThrottleDelay" +#define NV_REG_STR_RM_PRIORITY_THROTTLE_DELAY_DISABLE 0x00000000 + + +// +// Type DWORD +// Enable support for CUDA Stream Memory Operations in user-mode applications. +// +// BIT 0:0 - Feature enablement +// 0 - disable feature (default) +// 1 - enable feature +// +#define NV_REG_STR_RM_STREAM_MEMOPS "RmStreamMemOps" +#define NV_REG_STR_RM_STREAM_MEMOPS_ENABLE 0:0 +#define NV_REG_STR_RM_STREAM_MEMOPS_ENABLE_YES 1 +#define NV_REG_STR_RM_STREAM_MEMOPS_ENABLE_NO 0 + + +// +// Type DWORD: Enable read-only RMAPI locks for select interfaces +// +// Setting an interface to 0 will disable read-only API locks for that interface +// Setting an interface to 1 will enable read-only API locks for that interface, +// however, RM may still choose to take a read-write lock if it needs to. +// +#define NV_REG_STR_RM_READONLY_API_LOCK "RmRoApiLock" +#define NV_REG_STR_RM_READONLY_API_LOCK_ALLOC_RESOURCE 1:1 +#define NV_REG_STR_RM_READONLY_API_LOCK_ALLOC_RESOURCE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_ALLOC_RESOURCE_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_ALLOC_RESOURCE_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_FREE_RESOURCE 2:2 +#define NV_REG_STR_RM_READONLY_API_LOCK_FREE_RESOURCE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_FREE_RESOURCE_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_FREE_RESOURCE_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_MAP 3:3 +#define NV_REG_STR_RM_READONLY_API_LOCK_MAP_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_MAP_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_MAP_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_UNMAP 4:4 +#define NV_REG_STR_RM_READONLY_API_LOCK_UNMAP_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_UNMAP_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_UNMAP_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_MAP 5:5 +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_MAP_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_MAP_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_MAP_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_UNMAP 6:6 +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_UNMAP_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_UNMAP_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_UNMAP_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_COPY 7:7 +#define NV_REG_STR_RM_READONLY_API_LOCK_COPY_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_COPY_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_COPY_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_SHARE 8:8 +#define NV_REG_STR_RM_READONLY_API_LOCK_SHARE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_SHARE_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_SHARE_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_CTRL 9:9 +#define NV_REG_STR_RM_READONLY_API_LOCK_CTRL_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_CTRL_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_CTRL_ENABLE (0x00000001) + + +// +// Type DWORD: Enable read-only RMAPI locks for select modules +// +// Setting an interface to 0 will disable read-only API locks for that module +// Setting an interface to 1 will enable read-only API locks for that module, +// however, RM may still choose to take a read-write lock if it needs to. +// +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE "RmRoApiLockModule" +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_GPU_OPS 0:0 +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_GPU_OPS_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_GPU_OPS_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_GPU_OPS_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_WORKITEM 1:1 +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_WORKITEM_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_WORKITEM_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_WORKITEM_ENABLE (0x00000001) + + +// +// Type DWORD: Enable read-only GPU locks for select modules +// +// Setting an interface to 0 will disable read-only GPU locks for that module +// Setting an interface to 1 will enable read-only GPU locks for that module, +// however, RM may still choose to take a read-write lock if it needs to. +// +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE "RmRoGpuLockModule" +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_GPU_OPS 0:0 +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_GPU_OPS_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_GPU_OPS_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_GPU_OPS_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_WORKITEM 1:1 +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_WORKITEM_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_WORKITEM_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_WORKITEM_ENABLE (0x00000001) + + +// Mode for CACHEABLE rmapi control +// RMCTRL cache mode defined in ctrl0000system.h +#define NV_REG_STR_RM_CACHEABLE_CONTROLS "RmEnableCacheableControls" + +// Type DWORD +// This regkey forces for Maxwell+ that on FB Unload we wait for FB pull before issuing the +// L2 clean. WAR for bug 1032432 +#define NV_REG_STR_RM_L2_CLEAN_FB_PULL "RmL2CleanFbPull" +#define NV_REG_STR_RM_L2_CLEAN_FB_PULL_ENABLED (0x00000000) +#define NV_REG_STR_RM_L2_CLEAN_FB_PULL_DISABLED (0x00000001) +#define NV_REG_STR_RM_L2_CLEAN_FB_PULL_DEFAULT (0x00000000) + +// +// Type: DWORD +// This regkey overrides BL8, 16, and 24 kinds to only be of GENERIC_MEMORY or +// GENERIC_MEMORY_COMPRESSIBLE kinds. +// 0 - No override +// > 0 - Override memkind to GMK +// bit 0: override BL8 type +// bit 1: override BL16 type +// bit 2: override BL24 type +// ex. 0x00001000 means override all types +#define NV_REG_STR_RM_OVERRIDE_TO_GMK "RMOverrideToGMK" +#define NV_REG_STR_RM_OVERRIDE_TO_GMK_DISABLED (0x00000000) +#define NV_REG_STR_RM_OVERRIDE_TO_GMK_BL8 (0x00000001) +#define NV_REG_STR_RM_OVERRIDE_TO_GMK_BL16 (0x00000002) +#define NV_REG_STR_RM_OVERRIDE_TO_GMK_BL24 (0x00000004) +#define NV_REG_STR_RM_OVERRIDE_TO_GMK_ALL (0x00000007) + +// Enable backtrace dumping at assertion failure. +// If physical RM or RCDB is unavailable, then this regkey controls the behaviour of backtrace +// printing. +// 0: disable +// 1 (default): only print unique backtraces, identified by instruction pointer of the failed assert +// 2: print all +#define NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE "RmPrintAssertBacktrace" +#define NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_DISABLE 0 +#define NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_UNIQUE 1 +#define NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_ENABLE 2 + + +// +// Type DWORD +// Used to enable no locking on copy +// +#define NV_REG_STR_RM_PARAM_COPY_NO_LOCK "RMParamCopyNoLock" + +// +// Type DWORD +// Used to control RM API lock aging for low priority acquires. +// If 0, low priority acquires (e.g. from cleanup when a process dies) +// are disabled and treated like regular ones. +// Otherwise, they will yield the lock this many times to the higher priority +// threads before proceeding. +// Off by default; 3 would be a good starting value if the feature is desired. +// +#define NV_REG_STR_RM_LOCKING_LOW_PRIORITY_AGING "RMLockingLowPriorityAging" + +// +// Type DWORD +// This regkey restricts profiling capabilities (creation of profiling objects +// and access to profiling-related registers) to admin only. +// 0 - (default - disabled) +// 1 - Enables admin check +// +#define NV_REG_STR_RM_PROFILING_ADMIN_ONLY "RmProfilingAdminOnly" +#define NV_REG_STR_RM_PROFILING_ADMIN_ONLY_FALSE 0x00000000 +#define NV_REG_STR_RM_PROFILING_ADMIN_ONLY_TRUE 0x00000001 + + +#define NV_REG_STR_GPU_BROKEN_FB "nvBrokenFb" +#define NV_REG_STR_GPU_BROKEN_FB_ALL_OKAY 0x00000000 +#define NV_REG_STR_GPU_BROKEN_FB_ALL_BROKEN 0xffffffff +#define NV_REG_STR_GPU_BROKEN_FB_DEFAULT NV_REG_STR_GPU_BROKEN_FB_ALL_OKAY +#define NV_REG_STR_GPU_BROKEN_FB_DEFAULT_GF100_A01 NV_REG_STR_GPU_BROKEN_FB_MEMORY_BROKEN +#define NV_REG_STR_GPU_BROKEN_FB_MEMORY 0:0 +#define NV_REG_STR_GPU_BROKEN_FB_MEMORY_OKAY 0x00000000 +#define NV_REG_STR_GPU_BROKEN_FB_MEMORY_BROKEN 0x00000001 +#define NV_REG_STR_GPU_BROKEN_FB_REG_VIA_CPU 1:1 +#define NV_REG_STR_GPU_BROKEN_FB_REG_VIA_CPU_OKAY 0x00000000 +#define NV_REG_STR_GPU_BROKEN_FB_REG_VIA_CPU_BROKEN 0x00000001 +#define NV_REG_STR_GPU_BROKEN_FB_REG_VIA_PMU 2:2 +#define NV_REG_STR_GPU_BROKEN_FB_REG_VIA_PMU_OKAY 0x00000000 +#define NV_REG_STR_GPU_BROKEN_FB_REG_VIA_PMU_BROKEN 0x00000001 +// Type DWORD +// _ALL_OKAY: FB is not broken. All is okay. +// _ALL_BROKEN: FB is broken and no software will try to use it. +// _MEMORY: Memory itself can/cannot be accessed. (PDB_PROP_GPU_BROKEN_FB property) +// _REG_VIA_CPU: CPU can/cannot access FBPA/FBIO registers. (PDB_PROP_GPU_BROKEN_FB_REG_VIA_CPU property) +// _REG_VIA_PMU: PMU can/cannot access FBPA/FBIO registers. (PDB_PROP_GPU_BROKEN_FB_REG_VIA_PMU property) +// FBPA/FBIO register addresses are defined by gpuIsBrokenFbReg(). +// Note that if the CPU and the PMU can't access registers, then memory isn't going to work either. +// In other words, the only even number that makes sense for this regkey is zero. +// Default depends on the chip and mask revision. + +#define NV_REG_STR_OVERRIDE_FB_SIZE "OverrideFbSize" +// Type Dword +// Encoding Numeric Value +// Size in MB +// Used to reduce FB for testing memory management +// +#define NV_REG_STR_OVERRIDE_FB_SIZE_DEFAULT 0 + +// +// TYPE DWORD +// This regkey helps increase the size of RM reserved region. +// Exposed to clients for bug 2404337. +// Note: In GSP builds this key applies to the kernel (CPU) RM only. +// +#define NV_REG_STR_RM_INCREASE_RSVD_MEMORY_SIZE_MB "RMIncreaseRsvdMemorySizeMB" +#define NV_REG_STR_RM_INCREASE_RSVD_MEMORY_SIZE_MB_DEFAULT 0x0 + +// TYPE Dword +// Determines whether or not RM reserved space should be increased. +// 1 - Increases RM reserved space +// 0 - (default) Keeps RM reserved space as it is. + +#define NV_REG_STR_RM_DISABLE_SCRUB_ON_FREE "RMDisableScrubOnFree" +// Type DWORD +// Encoding 0 (default) - Scrub on free +// 1 - Disable Scrub on Free + +#define NV_REG_STR_RM_DISABLE_FAST_SCRUBBER "RMDisableFastScrubber" +// Type DWORD +// Encoding 0 (default) - Enable Fast Scrubber +// 1 - Disable Fast Scrubber + +// +// Type DWORD +// Controls enable of PMA memory management instead of existing legacy +// RM FB heap manager. +// +#define NV_REG_STR_RM_ENABLE_PMA "RMEnablePMA" +#define NV_REG_STR_RM_ENABLE_PMA_YES (0x00000001) +#define NV_REG_STR_RM_ENABLE_PMA_NO (0x00000000) + +// +// Type DWORD +// Controls management of client page tables by PMA on MODS. +// Default enable. MODS will use regkey to override to disable feature. +// +#define NV_REG_STR_RM_ENABLE_PMA_MANAGED_PTABLES "RMEnablePmaManagedPtables" +#define NV_REG_STR_RM_ENABLE_PMA_MANAGED_PTABLES_YES (0x00000001) +#define NV_REG_STR_RM_ENABLE_PMA_MANAGED_PTABLES_NO (0x00000000) +#define NV_REG_STR_RM_ENABLE_PMA_MANAGED_PTABLES_DEFAULT (0x00000001) + +// +// TYPE DWORD +// This regkey enables localized memory changes +// 0 - (default disabled) +// 1 - Enables localized memory changes +// +#define NV_REG_STR_RM_LOCALIZED_MEMORY "RmEnableLocalizedMemory" +#define NV_REG_STR_RM_LOCALIZED_MEMORY_DISABLE 0x00000000 +#define NV_REG_STR_RM_LOCALIZED_MEMORY_ENABLE 0x00000001 +#define NV_REG_STR_RM_LOCALIZED_MEMORY_DEFAULT NV_REG_STR_RM_LOCALIZED_MEMORY_ENABLE + +// +// Type DWORD +// Disable global CeUtils instance creation after fifo scheduling enablement +// +#define NV_REG_STR_DISABLE_GLOBAL_CE_UTILS "RmDisableGlobalCeUtils" +#define NV_REG_STR_DISABLE_GLOBAL_CE_UTILS_YES (0x00000001) +#define NV_REG_STR_DISABLE_GLOBAL_CE_UTILS_NO (0x00000000) + +#define NV_REG_STR_RM_SCRUB_BLOCK_SHIFT "RMScrubBlockShift" +// Type DWORD +// Encoding Numeric Value +// A value in the range 12 to 20 represents logbase2 of maxBlockSize for heap +// scrubber. Any other value will be defaulted to 16 i.e. maxBlockSize = 64KB. + +#define NV_REG_STR_RM_INST_VPR "RMInstVPR" +// Type DWORD +// Encoding: takes effect for allocations in VIDEO memory +// TRUE Make allocation in protected region +// FALSE Make allocation in non-protected region (default) +// +#define NV_REG_STR_RM_INST_VPR_INSTBLK 0:0 // Instance block +#define NV_REG_STR_RM_INST_VPR_INSTBLK_FALSE (0x00000000) +#define NV_REG_STR_RM_INST_VPR_INSTBLK_TRUE (0x00000001) +#define NV_REG_STR_RM_INST_VPR_RAMFC 1:1 // RAMFC save area +#define NV_REG_STR_RM_INST_VPR_RAMFC_FALSE (0x00000000) +#define NV_REG_STR_RM_INST_VPR_RAMFC_TRUE (0x00000001) +#define NV_REG_STR_RM_INST_VPR_RUNLIST 2:2 // Runlist +#define NV_REG_STR_RM_INST_VPR_RUNLIST_FALSE (0x00000000) +#define NV_REG_STR_RM_INST_VPR_RUNLIST_TRUE (0x00000001) +#define NV_REG_STR_RM_INST_VPR_MMU_READ 3:3 // MMU Debug Read +#define NV_REG_STR_RM_INST_VPR_MMU_READ_FALSE (0x00000000) +#define NV_REG_STR_RM_INST_VPR_MMU_READ_TRUE (0x00000001) +#define NV_REG_STR_RM_INST_VPR_MMU_WRITE 4:4 // MMU Debug Read +#define NV_REG_STR_RM_INST_VPR_MMU_WRITE_FALSE (0x00000000) +#define NV_REG_STR_RM_INST_VPR_MMU_WRITE_TRUE (0x00000001) + +#define NV_REG_STR_RM_GPU_SURPRISE_REMOVAL "RMGpuSurpriseRemoval" +// Type DWORD +// Encoding boolean +// If set, this will cause RM mark GPU as lost when it detects 0xFF from register +// access. + +#define NV_REG_STR_RM_BLACKLIST_ADDRESSES "RmBlackListAddresses" +// Type BINARY: +// struct +// { +// NvU64 addresses[NV2080_CTRL_FB_OFFLINED_PAGES_MAX_PAGES]; +// }; + +#define NV_REG_STR_RM_NUM_FIFOS "RmNumFifos" +// Type Dword +// Override number of fifos (channels) on NV4X +#define NV_REG_STR_RM_NUM_FIFOS_COMPAT 0x00000020 +#define NV_REG_STR_RM_NUM_FIFOS_EXTENDED 0x00000200 + +#define NV_REG_STR_RM_SUPPORT_USERD_MAP_DMA "RMSupportUserdMapDma" +// Type DWORD +// Encoding: Boolean +// If set, allow MapMemoryDma calls to be made on channel objects + +#define NV_REG_STR_SECONDARY_BUS_RESET_ENABLED "RMSecBusResetEnable" +// Type DWORD +// Encoding boolean +// Default FALSE + +#define NV_REG_STR_FORCE_PCIE_CONFIG_SAVE "RMForcePcieConfigSave" +// Type DWORD +// Encoding boolean +// Default FALSE + +#define NV_REG_STR_RM_PCIE_FLR_DEVINIT_TIMEOUT_SCALE "RMPcieFlrDevinitTimeout" +#define NV_REG_STR_RM_PCIE_FLR_DEVINIT_TIMEOUT_SCALE_MIN_ALLOWED 1 +#define NV_REG_STR_RM_PCIE_FLR_DEVINIT_TIMEOUT_SCALE_MAX_ALLOWED 4 +// Type DWORD +// Regkey to change FLR devinit timeout value. Increase in scale value increases +// the timeout value and vice versa. +// Scale value has to be greater than 0 since flr devinit timeout can't be 0 +// Scale value for now is limited to 4 which translates to maximum of +// 3.6seconds(900ms*4) timeout value. +// + +#define NV_REG_STR_RM_PCIE_FLR_POLICY "RMPcieFLRPolicy" +#define NV_REG_STR_RM_PCIE_FLR_POLICY_DEFAULT 0 +#define NV_REG_STR_RM_PCIE_FLR_POLICY_FORCE_DISABLE 1 +// Type DWORD +// Regkey to force disable Function Level Reset +// Note that we don't want to provision for force enabling FLR since as per current design - +// For Pre-Turing boards, FLR will be strictly disabled since it's not supported in HW +// Default policy could be different for different boards though + +// Type DWORD +// Encoding Numeric Value +// Overrides chipset-based P2P configurations. +// Only be used to test on internal issues +// +// P2P reads: +// 0 - Do not allow P2P reads +// 1 - Allow P2P reads +// 2 - Do not override chipset-selected config (default) +// P2P writes: +// 0 - Do not allow P2P writes +// 1 - Allow P2P writes +// 2 - Do not override chipset-selected config (default) +// P2P atomics: +// 0 - Do not allow P2P atomics +// 1 - Allow P2P atomics +// 2 - Do not override chipset-selected config (default) +// +#define NV_REG_STR_CL_FORCE_P2P "ForceP2P" +#define NV_REG_STR_CL_FORCE_P2P_READ 1:0 +#define NV_REG_STR_CL_FORCE_P2P_READ_DISABLE 0x00000000 +#define NV_REG_STR_CL_FORCE_P2P_READ_ENABLE 0x00000001 +#define NV_REG_STR_CL_FORCE_P2P_READ_DEFAULT 0x00000002 +#define NV_REG_STR_CL_FORCE_P2P_WRITE 5:4 +#define NV_REG_STR_CL_FORCE_P2P_WRITE_DISABLE 0x00000000 +#define NV_REG_STR_CL_FORCE_P2P_WRITE_ENABLE 0x00000001 +#define NV_REG_STR_CL_FORCE_P2P_WRITE_DEFAULT 0x00000002 +#define NV_REG_STR_CL_FORCE_P2P_ATOMICS 9:8 +#define NV_REG_STR_CL_FORCE_P2P_ATOMICS_DISABLE 0x00000000 +#define NV_REG_STR_CL_FORCE_P2P_ATOMICS_ENABLE 0x00000001 +#define NV_REG_STR_CL_FORCE_P2P_ATOMICS_DEFAULT 0x00000002 + +// +// Type DWORD +// Use this regkey to force RM to pick a P2P type. HW has to support the picked TYPE to take effect. +// e.g., TYPE_BAR1P2P will not work if HW does not support it. A call to create NV50_P2P object will +// will fail in such a case. +// +// TYPE_DEFAULT let RM to choose a P2P type. The priority is: +// C2C > NVLINK > mailbox P2P > BAR1P2P +// +// TYPE_C2C to use C2C P2P if it supports +// TYPE_NVLINK to use NVLINK P2P, including INDIRECT_NVLINK_P2P if it supports +// TYPE_PCIEP2P to use PCIE P2P if it supports. To be used in conjunction with +// NV_REG_STR_RM_PCIEP2P_TYPE. +// +#define NV_REG_STR_RM_FORCE_P2P_TYPE "RMForceP2PType" +#define NV_REG_STR_RM_FORCE_P2P_TYPE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_FORCE_P2P_TYPE_PCIEP2P (0x00000001) +#define NV_REG_STR_RM_FORCE_P2P_TYPE_NVLINK (0x00000002) +#define NV_REG_STR_RM_FORCE_P2P_TYPE_C2C (0x00000003) +#define NV_REG_STR_RM_FORCE_P2P_TYPE_MAX NV_REG_STR_RM_FORCE_P2P_TYPE_C2C + +// +// Type: DWORD +// Allows the choice of which PCIe P2P method to use +// +// TYPE_DEFAULT mailbox P2P is to be used by default until UVM supports BAR1 P2P +// +// TYPE_MAILBOX forces mailbox P2P to be used if supported +// TYPE_BAR1 forces BAR1 P2P to be used if supported +// TYPE_AUTO allows automatic selection of BAR1/mailbox depending on if BAR1P2P is supported +// +#define NV_REG_STR_RM_PCIEP2P_TYPE "RMPcieP2PType" +#define NV_REG_STR_RM_PCIEP2P_TYPE_MAILBOX (0x00000000) +#define NV_REG_STR_RM_PCIEP2P_TYPE_BAR1 (0x00000001) +#define NV_REG_STR_RM_PCIEP2P_TYPE_AUTO (0x00000002) +#define NV_REG_STR_RM_PCIEP2P_TYPE_DEFAULT NV_REG_STR_RM_PCIEP2P_TYPE_MAILBOX + +// +// Type: DWORD +// Enables/Disables the WAR for bug 1630288 where we disable 3rd-party peer mappings +// Disabled by default +// +#define NV_REG_STR_PEERMAPPING_OVERRIDE "PeerMappingOverride" +#define NV_REG_STR_PEERMAPPING_OVERRIDE_DEFAULT 0 + +#define NV_REG_STR_P2P_MAILBOX_CLIENT_ALLOCATED "P2PMailboxClientAllocated" +#define NV_REG_STR_P2P_MAILBOX_CLIENT_ALLOCATED_FALSE 0 +#define NV_REG_STR_P2P_MAILBOX_CLIENT_ALLOCATED_TRUE 1 +// Type Dword +// Overrides the P2P Mailbox allocation policy +// For testing only +// 0 - P2P Mailbox area is allocated by RM +// 1 - P2P Mailbox area is not allocated by RM, but by the client. + +#define NV_REG_STR_RM_MAP_P2P_PEER_ID "RMP2PPeerId" +// Type DWORD +// Encoding: +// Peer ID to use when mapping p2p to peer subdevice in p2p loopback mode +// Default: RM takes care of assigning peer ID. + +#define NV_REG_STR_OVERRIDE_GPU_NUMA_NODE_ID "RMOverrideGpuNumaNodeId" +// Type DWORD: +// Encoding -- NvS32 +// Override GPU NUMA Node ID assigned by OS + +#define NV_REG_STR_RESTORE_BAR1_SIZE_BUG_3249028_WAR "RMBar1RestoreSize" +#define NV_REG_STR_RESTORE_BAR1_SIZE_BUG_3249028_TRUE (0x00000001) +#define NV_REG_STR_RESTORE_BAR1_SIZE_BUG_3249028_FALSE (0x00000000) +// Type DWORD: +// Encoding -- Boolean +// Check if BAR1 size has been restored correctly by SBIOS across power transitions +// Default: enabled for Ampere and up +// + +// +// Type DWORD +// Numa allocations allow for skipping reclaim less than a specified memory occupancy threshold. +// This override allows for its tuning, value supplied here shall indicate a percent of free memory +// less than which GFP_RECLAIM flag will be dropped. +// +#define NV_REG_STR_RM_NUMA_ALLOC_SKIP_RECLAIM_PERCENTAGE "RmNumaAllocSkipReclaimPercent" +#define NV_REG_STR_RM_NUMA_ALLOC_SKIP_RECLAIM_PERCENTAGE_DEFAULT 4 +#define NV_REG_STR_RM_NUMA_ALLOC_SKIP_RECLAIM_PERCENTAGE_DISABLED 0 +#define NV_REG_STR_RM_NUMA_ALLOC_SKIP_RECLAIM_PERCENTAGE_MIN 0 +#define NV_REG_STR_RM_NUMA_ALLOC_SKIP_RECLAIM_PERCENTAGE_MAX 100 + +// +// Disable 64KB BAR1 mappings +// 0 - Disable 64KB BAR1 mappings +// 1 - Force/Enable 64KB BAR1 mappings +// +#define NV_REG_STR_RM_64KB_BAR1_MAPPINGS "RM64KBBAR1Mappings" +#define NV_REG_STR_RM_64KB_BAR1_MAPPINGS_ENABLED 0x00000001 +#define NV_REG_STR_RM_64KB_BAR1_MAPPINGS_DISABLED 0x00000000 + +#define NV_REG_STR_RM_BAR1_APERTURE_SIZE_MB "RMBar1ApertureSizeMB" +// Type DWORD +// Encoding Numeric Value +// Overrides the size of the BAR1 aperture. Used to shrink BAR1. It cannot be +// greater than the physical size of BAR1. + +// Type DWORD +// Encoding Numeric Value +// Forces the entire BAR1 to be statically allocated. +// +// DISABLE will force disable static BAR1 +// ENABLE will force the static BAR1 to be enabled if there is sufficient BAR1 +// to map all of FB once or fail initialization otherwise. This mode does +// not take into account other expected BAR1 mappings and may lead to +// BAR1 exhaustion later. Use with caution. +// AUTO will only map static BAR1 if static BAR1 size is calculated to be big enough +// to map all of FB once plus a calculated amount for other expected BAR1 mappings +// +#define NV_REG_STR_RM_FORCE_STATIC_BAR1 "RMForceStaticBar1" +#define NV_REG_STR_RM_FORCE_STATIC_BAR1_DISABLE 0x00000000 +#define NV_REG_STR_RM_FORCE_STATIC_BAR1_ENABLE 0x00000001 +#define NV_REG_STR_RM_FORCE_STATIC_BAR1_AUTO 0x00000002 +#define NV_REG_STR_RM_FORCE_STATIC_BAR1_MAX 0x00000003 + +#define NV_REG_STR_RM_BAR2_APERTURE_SIZE_MB "RMBar2ApertureSizeMB" +// Type DWORD +// Encoding Numeric Value +// Overrides the size of the BAR2 aperture. Cannot be greater than the +// physical size of BAR2 available to RM (which may be less than the total size +// of BAR2). When this regkey is present we cap the total aperture size to the +// RM aperture size. This can result in undefined beahvior in environments that +// rely on a virtual bar2 aperture shared between RM and VBIOS for VESA support. + +// +// This is used to control C2C low power features. +// +#define NV_REG_STR_RM_LPWR_C2C_STATE "RmLpwrC2CState" +#define NV_REG_STR_RM_LPWR_C2C_STATE_CL3 0:0 +#define NV_REG_STR_RM_LPWR_C2C_STATE_CL3_DISABLE 0 +#define NV_REG_STR_RM_LPWR_C2C_STATE_CL3_ENABLE 1 +#define NV_REG_STR_RM_LPWR_C2C_STATE_CL3_DEFAULT NV_REG_STR_RM_LPWR_C2C_STATE_CL3_DISABLE +#define NV_REG_STR_RM_LPWR_C2C_STATE_CL4 1:1 +#define NV_REG_STR_RM_LPWR_C2C_STATE_CL4_DISABLE 0 +#define NV_REG_STR_RM_LPWR_C2C_STATE_CL4_ENABLE 1 +#define NV_REG_STR_RM_LPWR_C2C_STATE_CL4_DEFAULT NV_REG_STR_RM_LPWR_C2C_STATE_CL4_DISABLE + +#define NV_REG_STR_RM_N1X_GPU_PPS_OVERRIDE "N1xGpuPpsOverride" +#define NV_REG_STR_RM_LPWR_C2C_PPS_OVERRIDE "RmLpwrC2cPpsOverride" +#define NV_REG_STR_RM_LPWR_C2C_PPS_OVERRIDE_SUPPORT 0:0 +#define NV_REG_STR_RM_LPWR_C2C_PPS_OVERRIDE_SUPPORT_ENABLE 1 +#define NV_REG_STR_RM_LPWR_C2C_PPS_OVERRIDE_SUPPORT_DISABLE 0 +#define NV_REG_STR_RM_LPWR_C2C_PPS_OVERRIDE_CL3_PPSINDEX 7:1 +#define NV_REG_STR_RM_LPWR_C2C_PPS_OVERRIDE_CL4_PPSINDEX 15:8 + +// +// Type DWORD: +// This regkey overrides C2C CL3/CL4 idle thresholds set by vbios. +// Encoding - Value of CL3/CL4 idle threshold in microseconds. Zero means that vbios values will be used. +// +#define NV_REG_STR_RM_LPWR_C2C_CL3_IDLE_THRESHOLD_US "RmLpwrC2cCl3IdleThresholdUs" +#define NV_REG_STR_RM_LPWR_C2C_CL4_IDLE_THRESHOLD_US "RmLpwrC2cCl4IdleThresholdUs" + +#if defined(DEVELOP) || defined(DEBUG) || (defined(RMCFG_FEATURE_MODS_FEATURES) && RMCFG_FEATURE_MODS_FEATURES) +// +// TYPE DWORD +// This setting will override the BAR1 Big page size +// This is used for interop testing for MODS +// +#define NV_REG_STR_RM_SET_BAR1_ADDRESS_SPACE_BIG_PAGE_SIZE "RMSetBAR1AddressSpaceBigPageSize" +#define NV_REG_STR_RM_SET_BAR1_ADDRESS_SPACE_BIG_PAGE_SIZE_64k (64 * 1024) +#define NV_REG_STR_RM_SET_BAR1_ADDRESS_SPACE_BIG_PAGE_SIZE_128k (128 * 1024) +#endif //DEVELOP || DEBUG || MODS_FEATURES + +// This regkey is to disable coherent path CPU->Nvlink/C2C->FB and force BAR path. +#define NV_REG_STR_RM_FORCE_BAR_PATH "RMForceBarPath" +// Type DWORD +// Encoding 0 (default) - Enable Coherent C2C/NvLink Path +// 1 - Force BAR Path + +// +// Type: Dword +// Encoding: +// 0 - client RM allocated context buffer feature is disabled +// 1 - client RM allocated context buffer feature is enabled +// +#define NV_REG_STR_RM_CLIENT_RM_ALLOCATED_CTX_BUFFER "RMSetClientRMAllocatedCtxBuffer" +#define NV_REG_STR_RM_CLIENT_RM_ALLOCATED_CTX_BUFFER_DISABLED 0x00000000 +#define NV_REG_STR_RM_CLIENT_RM_ALLOCATED_CTX_BUFFER_ENABLED 0x00000001 + +// +// Type: Dword +// Encoding: +// 0 - Split VA space management between server/client RM is disabled +// 1 - Split VA space management between server/client RM is enabled +// +#define NV_REG_STR_RM_SPLIT_VAS_MGMT_SERVER_CLIENT_RM "RMSplitVasMgmtServerClientRm" +#define NV_REG_STR_RM_SPLIT_VAS_MGMT_SERVER_CLIENT_RM_DISABLED 0x00000000 +#define NV_REG_STR_RM_SPLIT_VAS_MGMT_SERVER_CLIENT_RM_ENABLED 0x00000001 + +// +// Type: Dword +// Encoding: +// 0 - Disable PE FIRO Buffer +// 1 - Enable PE FIRO Buffer +// Default: disabled (0x0) +// +#define NV_REG_STR_RM_ENABLE_PE_FIRO_BUFFER "RMEnablePeFiroBuffer" +#define NV_REG_STR_RM_ENABLE_PE_FIRO_BUFFER_DISABLED 0x00000000 +#define NV_REG_STR_RM_ENABLE_PE_FIRO_BUFFER_ENABLED 0x00000001 + +// +// Restrict the VA range to be <= @ref VASPACE_SIZE_FERMI. +// Used in cases where some engines support 49 bit VA and some don't. +// Ignored if NVOS32_ALLOC_FLAGS_USE_BEGIN_END (DDMA_ALLOC_VASPACE_USE_RANGE) or +// NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE (DMA_ALLOC_VASPACE_VA_FIXED) is set. +// Default: OFF (0x0) +// Type: DWORD +// +#define NV_REG_STR_RM_RESTRICT_VA_RANGE "RMRestrictVARange" +#define NV_REG_STR_RM_RESTRICT_VA_RANGE_DEFAULT (0x0) +#define NV_REG_STR_RM_RESTRICT_VA_RANGE_ON (0x1) + +#define NV_REG_STR_RESERVE_PTE_SYSMEM_MB "RmReservePteSysmemMB" +// Type DWORD: +// Encoding -- Value = 0 -> Do not reserve sysmem for PTEs (default) +// Value > 0 -> Reserve ValueMB for PTEs when we run out of video and system memory +// + +// Type DWORD +// Contains the sysmem page size. +#define NV_REG_STR_RM_SYSMEM_PAGE_SIZE "RMSysmemPageSize" + +// +// Allows pages that are aligned to large page boundaries to be mapped as large +// pages. +// +#define NV_REG_STR_RM_ALLOW_SYSMEM_LARGE_PAGES "RMAllowSysmemLargePages" + +#define NV_REG_STR_FERMI_BIG_PAGE_SIZE "RMFermiBigPageSize" +#define NV_REG_STR_FERMI_BIG_PAGE_SIZE_64KB (64 * 1024) +#define NV_REG_STR_FERMI_BIG_PAGE_SIZE_128KB (128 * 1024) + +// +// TYPE DWORD +// This setting will disable big page size per address space +// +#define NV_REG_STR_RM_DISABLE_BIG_PAGE_PER_ADDRESS_SPACE "RmDisableBigPagePerAddressSpace" +#define NV_REG_STR_RM_DISABLE_BIG_PAGE_PER_ADDRESS_SPACE_FALSE (0x00000000) +#define NV_REG_STR_RM_DISABLE_BIG_PAGE_PER_ADDRESS_SPACE_TRUE (0x00000001) + +#define NV_REG_STR_RM_DISABLE_NONCONTIGUOUS_ALLOCATION "RMDisableNoncontigAlloc" +#define NV_REG_STR_RM_DISABLE_NONCONTIGUOUS_ALLOCATION_FALSE (0x00000000) +#define NV_REG_STR_RM_DISABLE_NONCONTIGUOUS_ALLOCATION_TRUE (0x00000001) +// Type DWORD: +// Encoding -- Boolean +// Disable noncontig vidmem allocation +// + +#define NV_REG_STR_RM_FBSR_PAGED_DMA "RmFbsrPagedDMA" +#define NV_REG_STR_RM_FBSR_PAGED_DMA_ENABLE 1 +#define NV_REG_STR_RM_FBSR_PAGED_DMA_DISABLE 0 +#define NV_REG_STR_RM_FBSR_PAGED_DMA_DEFAULT NV_REG_STR_RM_FBSR_PAGED_DMA_DISABLE +// Type Dword +// Encoding Numeric Value +// Enable the Paged DMA mode for FBSR +// 0 - Disable (default) +// 1 - Enable + +#define NV_REG_STR_RM_FBSR_FILE_MODE "RmFbsrFileMode" +#define NV_REG_STR_RM_FBSR_FILE_MODE_ENABLE 1 +#define NV_REG_STR_RM_FBSR_FILE_MODE_DISABLE 0 +#define NV_REG_STR_RM_FBSR_FILE_MODE_DEFAULT NV_REG_STR_RM_FBSR_FILE_MODE_DISABLE +// Type Dword +// Encoding Numeric Value +// Enable the File based power saving mode for Linux +// 0 - Disable (default) +// 1 - Enable + +#define NV_REG_STR_RM_FBSR_WDDM_MODE "RmFbsrWDDMMode" +#define NV_REG_STR_RM_FBSR_WDDM_MODE_ENABLE 1 +#define NV_REG_STR_RM_FBSR_WDDM_MODE_DISABLE 0 +#define NV_REG_STR_RM_FBSR_WDDM_MODE_DEFAULT NV_REG_STR_RM_FBSR_WDDM_MODE_DISABLE +// Type Dword +// Encoding Numeric Value +// Enable the WDDM power saving mode for FBSR +// 0 - Disable (default) +// 1 - Enable + +// Type DWORD: Disables HW fault buffers on Pascal+ chips +// Encoding : 1 -- TRUE +// : 0 -- False +// : Default -- False +#define NV_REG_STR_RM_DISABLE_HW_FAULT_BUFFER "RmDisableHwFaultBuffer" +#define NV_REG_STR_RM_DISABLE_HW_FAULT_BUFFER_TRUE 0x00000001 +#define NV_REG_STR_RM_DISABLE_HW_FAULT_BUFFER_FALSE 0x00000000 +#define NV_REG_STR_RM_DISABLE_HW_FAULT_BUFFER_DEFAULT 0x00000000 + +// +// Type: DWORD +// Encoding: +// 3 - Enable interrupt-based FECS context switch logging with bottom-half/APC fall-back +// 2 - Enable interrupt-based FECS context switch logging without bottom-half/APC fall-back +// 1 - Enable periodic FECS context switch logging +// 0 - Disable FECS context switch logging +// +// Note: Interrupt-based logging and periodic logging are mutually exclusive +// +#define NV_REG_STR_RM_CTXSW_LOG "RMCtxswLog" +#define NV_REG_STR_RM_CTXSW_LOG_DISABLE 0x00000000 +#define NV_REG_STR_RM_CTXSW_LOG_ENABLE 0x00000001 +#define NV_REG_STR_RM_CTXSW_LOG_ENABLE_INTR 0x00000002 +#define NV_REG_STR_RM_CTXSW_LOG_ENABLE_INTR_APC 0x00000003 +#define NV_REG_STR_RM_CTXSW_LOG_DEFAULT NV_REG_STR_RM_CTXSW_LOG_DISABLE + +// Type DWORD: Indicates if enabling video event tracing +// +// 0 - Disables Video event trace usage (default) +// > 0 - Enable video event trace and define sizes for different buffers +// bit 16 - 30: sizes of the event buffer in 4K pages +// bit 31 - 31: Enable always logging: +// By default, video engines only log video events when there is +// at least one eventbuffer bound and enabled. If this flag is set, +// video engines will always log events even without a consumer. This +// is helpful for debugging purposes. +// Example: 0x01000000 means 1MB event buffer. +#define NV_REG_STR_RM_VIDEO_EVENT_TRACE "RmVideoEventTrace" +#define NV_REG_STR_RM_VIDEO_EVENT_TRACE_DISABLED (0x00000000) +#define NV_REG_STR_RM_VIDEO_EVENT_TRACE_EVENT_BUFFER_SIZE_IN_4k 30:16 +#define NV_REG_STR_RM_VIDEO_EVENT_TRACE_ALWAYS_LOG 31:31 +#define NV_REG_STR_RM_VIDEO_EVENT_TRACE_ALWAYS_LOG_DISABLED 0x00000000 +#define NV_REG_STR_RM_VIDEO_EVENT_TRACE_ALWAYS_LOG_ENABLED 0x00000001 + +// +// Type: DWORD +// +// This regkey configures the maximum number of records that can be +// processed per DPC when using interrupt-based ctxsw logging +#define NV_REG_STR_RM_CTXSW_LOG_RECORDS_PER_INTR "RMCtxswLogMaxRecordsPerIntr" +#define NV_REG_STR_RM_CTXSW_LOG_RECORDS_PER_INTR_DEFAULT 0x30 + +// +// Type: DWORD +// Encoding: +// 0 - Disable more detailed debug INTR logs +// 1 - Enable more detailed debug INTR logs +// +#define NV_REG_STR_RM_INTR_DETAILED_LOGS "RMIntrDetailedLogs" +#define NV_REG_STR_RM_INTR_DETAILED_LOGS_DISABLE 0x00000000 +#define NV_REG_STR_RM_INTR_DETAILED_LOGS_ENABLE 0x00000001 + +#define NV_REG_STR_RM_INTR_LOCKING_MODE "RMIntrLockingMode" +// Type DWORD +// Encoding enum +// Overrides what INTR Locking Mode is in use. +// Default 0 +#define NV_REG_STR_RM_INTR_LOCKING_MODE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_INTR_LOCKING_MODE_INTR_MASK (0x00000001) + +#define NV_REG_INTERNAL_PANEL_DISCONNECTED "RMInternalPanelDisconnected" +#define NV_REG_INTERNAL_PANEL_DISCONNECTED_DISABLE 0x00000000 +#define NV_REG_INTERNAL_PANEL_DISCONNECTED_ENABLE 0x00000001 +#define NV_REG_INTERNAL_PANEL_DISCONNECTED_DEFAULT RM_REG_INTERNAL_PANEL_DISCONNECTED_DISABLE + +#define NV_REG_STR_RM_ENABLE_AGGRESSIVE_VBLANK "RmEnableAggressiveVblank" +// Type DWORD +// Encoding Boolean +// Enable/Disable Aggressive Vblank Handling +// 0 -- Don't enable +// 1 -- Do enable +#define NV_REG_STR_RM_ENABLE_AGGRESSIVE_VBLANK_DISABLE (0) +#define NV_REG_STR_RM_ENABLE_AGGRESSIVE_VBLANK_ENABLE (1) +#define NV_REG_STR_RM_ENABLE_AGGRESSIVE_VBLANK_DEFAULT (NV_REG_STR_RM_ENABLE_AGGRESSIVE_VBLANK_DISABLE) + +#define NV_REG_STR_RM_PER_INTR_DPC_QUEUING "RMDisablePerIntrDPCQueueing" +// Type DWORD +// This regkey is used to disable per interrupt DPC queuing. +// 0: Enable Per interrupt DPC Queuing +// 1: Disable Per interrupt DPC Queuing + +#define NV_REG_STR_INTR_STUCK_THRESHOLD "RM654663" +// Type DWORD +// Encoding NvU32 +// Number of iterations to see an interrupt in succession before considering it +// "stuck." +// Default - See INTR_STUCK_THRESHOLD + + +#define NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR "RMProcessNonStallIntrInLocklessIsr" + +// Type: DWORD +// Enables/Disables processing of non-stall interrupts in lockless ISR for +// Linux only. +// Non-stall interrupts are processed by the function +// intrServiceNonStall_HAL(pIntr,pGpu, TRUE /* bProcess*/); where bProcess is TRUE which +// means that event list will be traversed to notify clients registered for it. +// Disabled by default +// + +#define NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR_DISABLE 0x00000000 +#define NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR_ENABLE 0x00000001 + + +// +// Type: DWORD +// Sets the Initial runlist Context switch timeout value in base 2 microseconds +// (1024 nanosecond timer ticks). +// Default: 0x003fffff base2 usec ~ 4.3 seconds +// +// The lower 31 bits have these limits +// Min: 0x00000002 +// Max: 0x7fffffff +// A value of 0 means CTXSW timeout is disabled entirely. +// +// It is possible for a privileged client to change this value for all engines +// using a ctrl call. +// +// If MSB (bit 31) is set, then the timeout value set will be "locked" and the +// ctrl call to change it will fail. +// +#define NV_REG_STR_RM_CTXSW_TIMEOUT "RmEngineContextSwitchTimeoutUs" +#define NV_REG_STR_RM_CTXSW_TIMEOUT_DEFAULT 0x003fffff +#define NV_REG_STR_RM_CTXSW_TIMEOUT_TIME 30:0 +#define NV_REG_STR_RM_CTXSW_TIMEOUT_TIME_DISABLE 0x00000000 +#define NV_REG_STR_RM_CTXSW_TIMEOUT_LOCK 31:31 +#define NV_REG_STR_RM_CTXSW_TIMEOUT_LOCK_FALSE 0x0 +#define NV_REG_STR_RM_CTXSW_TIMEOUT_LOCK_TRUE 0x1 + + +#define NV_REG_STR_RM_ROBUST_CHANNELS "RmRobustChannels" +#define NV_REG_STR_RM_ROBUST_CHANNELS_ENABLE 0x00000001 +#define NV_REG_STR_RM_ROBUST_CHANNELS_DISABLE 0x00000000 +#define NV_REG_STR_RM_ROBUST_CHANNELS_DEFAULT NV_REG_STR_RM_ROBUST_CHANNELS_DISABLE + +#define NV_REG_STR_RM_RC_WATCHDOG "RmRcWatchdog" +#define NV_REG_STR_RM_RC_WATCHDOG_ENABLE 0x00000001 +#define NV_REG_STR_RM_RC_WATCHDOG_DISABLE 0x00000000 +#define NV_REG_STR_RM_RC_WATCHDOG_DEFAULT NV_REG_STR_RM_RC_WATCHDOG_ENABLE + +#define NV_REG_STR_RM_WATCHDOG_TIMEOUT "RmWatchDogTimeOut" +#define NV_REG_STR_RM_WATCHDOG_TIMEOUT_LOW 5 +#define NV_REG_STR_RM_WATCHDOG_TIMEOUT_HI 60 +#define NV_REG_STR_RM_WATCHDOG_TIMEOUT_DEFAULT 7 + +#define NV_REG_STR_RM_WATCHDOG_INTERVAL "RmWatchDogInterval" +#define NV_REG_STR_RM_WATCHDOG_INTERVAL_LOW 5 +#define NV_REG_STR_RM_WATCHDOG_INTERVAL_HI 30 +#define NV_REG_STR_RM_WATCHDOG_INTERVAL_DEFAULT 7 + +// Enable/Disable watchcat in GSP-RM partition +// Default is Enabled +#define NV_REG_STR_TASK_RM_WATCHCAT "RmEnableRmTaskWatchcat" +#define NV_REG_STR_TASK_RM_WATCHCAT_ENABLE 0x00000001 +#define NV_REG_STR_TASK_RM_WATCHCAT_DISABLE 0x00000000 +#define NV_REG_STR_TASK_RM_WATCHCAT_DEFAULT NV_REG_STR_TASK_RM_WATCHCAT_ENABLE + +// Enable/Disable watchcat in GSP-Plugin for Guest RPC +// Default is Enabled +#define NV_REG_STR_RM_GSP_VGPU_WATCHCAT "RmEnableGspPluginWatchcat" +#define NV_REG_STR_RM_GSP_VGPU_WATCHCAT_ENABLE 0x00000001 +#define NV_REG_STR_RM_GSP_VGPU_WATCHCAT_DISABLE 0x00000000 +#define NV_REG_STR_RM_GSP_VGPU_WATCHCAT_DEFAULT NV_REG_STR_RM_GSP_VGPU_WATCHCAT_ENABLE + +// Set watchcat timeout value in GSP-Plugin for Guest RPC +// Default is 10 seconds +#define NV_REG_STR_RM_GSP_VGPU_WATCHCAT_TIMEOUT "RmGspPluginWatchcatTimeOut" +#define NV_REG_STR_RM_GSP_VGPU_WATCHCAT_TIMEOUT_MIN 0x0000000A +#define NV_REG_STR_RM_GSP_VGPU_WATCHCAT_TIMEOUT_DEFAULT NV_REG_STR_RM_GSP_VGPU_WATCHCAT_TIMEOUT_MIN + +// Set watchdog timeout value for the libos user task watchdog +#define NV_REG_STR_RM_GSP_LIBOS_WATCHDOG_TIMEOUT "RmGspLibosWatchdogTimeOut" +#define NV_REG_STR_RM_GSP_LIBOS_WATCHDOG_TIMEOUT_MIN 0x00000000 +#define NV_REG_STR_RM_GSP_LIBOS_WATCHDOG_TIMEOUT_DEFAULT 0x00000005 + +#define NV_REG_STR_RM_DO_LOG_RC_EVENTS "RmLogonRC" +// Type Dword +// Encoding : 0 --> Skip Logging +// 1 --> Do log +// Enable/Disable Event Logging on RC errors +// Default is Disabled +#define NV_REG_STR_RM_DO_LOG_RC_ENABLE 0x00000001 +#define NV_REG_STR_RM_DO_LOG_RC_DISABLE 0x00000000 +#define NV_REG_STR_RM_DO_LOG_RC_DEFAULT NV_REG_STR_RM_DO_LOG_RC_DISABLE + +// Type Dword +// Encoding : 0 --> Skip Breakpoint +// nonzero --> Do Breakpoint +// Enable/Disable breakpoint on DEBUG resource manager on RC errors + +#define NV_REG_STR_RM_BREAK_ON_RC "RmBreakonRC" +#define NV_REG_STR_RM_BREAK_ON_RC_DISABLE 0x00000000 +#define NV_REG_STR_RM_BREAK_ON_RC_ENABLE 0x00000001 + +// Explicitly disable RmBreakOnRC for Retail and +// RMCFG_FEATURE_PLATFORM_GSP builds +#if ((defined(DEBUG) || defined(QA_BUILD)) && \ + (!defined(RMCFG_FEATURE_PLATFORM_GSP) || \ + (defined(RMCFG_FEATURE_PLATFORM_GSP) && !RMCFG_FEATURE_PLATFORM_GSP))) +#define NV_REG_STR_RM_BREAK_ON_RC_DEFAULT NV_REG_STR_RM_BREAK_ON_RC_ENABLE +#else +#define NV_REG_STR_RM_BREAK_ON_RC_DEFAULT NV_REG_STR_RM_BREAK_ON_RC_DISABLE +#endif + +// Volatile registry entries for previous driver version. +// Used to record driver unload/reload for debugging purposes. +#define NV_REG_STR_RM_RC_PREV_DRIVER_VERSION "RmRCPrevDriverVersion" +#define NV_REG_STR_RM_RC_PREV_DRIVER_BRANCH "RmRCPrevDriverBranch" +#define NV_REG_STR_RM_RC_PREV_DRIVER_CHANGELIST "RmRCPrevDriverChangelist" +#define NV_REG_STR_RM_RC_PREV_DRIVER_LOAD_COUNT "RmRCPrevDriverLoadCount" + +#define NV_REG_STR_USE_UNCACHED_PCI_MAPPINGS "UseUncachedPCIMappings" +// Type DWORD +// Encode -- Numeric Value +// Check to see if we are converting PCI mappings + +#define NV_REG_STR_RM_CE_USE_GEN4_MAPPING "RmCeUseGen4Mapping" +#define NV_REG_STR_RM_CE_USE_GEN4_MAPPING_TRUE 0x1 +#define NV_REG_STR_RM_CE_USE_GEN4_MAPPING_FALSE 0x0 +// Type Dword (Boolean) +// Encoding Numeric Value +// Use gen4 mapping that uses a HSHUB CE, if available +// Else, continue using FBHUB PCEs + +// Type Dword +// Enable PCE LCE auto config +#define NV_REG_STR_RM_CE_ENABLE_AUTO_CONFIG "RmCeEnableAutoConfig" +#define NV_REG_STR_RM_CE_ENABLE_AUTO_CONFIG_TRUE 0x1 +#define NV_REG_STR_RM_CE_ENABLE_AUTO_CONFIG_FALSE 0x0 + +// +// Type DWORD +// NVLINK control overrides. +// +// FORCE_DISABLE: Force disable NVLINK when the current default is ON (POR) +// +// TRAIN_AT_LOAD : Force train links during driver load +// +// FORCE_AUTOCONFIG : Force autoconfig training regardless of chiplib forced config links +// +// FORCE_ENABLE: Force enable NVLINK when the current default is OFF (bringup etc.) +// +// PARALLEL_TRAINING: Have the GPU endpoint parallelize link training +#define NV_REG_STR_RM_NVLINK_CONTROL "RMNvLinkControl" +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_DISABLE 0:0 +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_DISABLE_NO (0x00000000) +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_DISABLE_YES (0x00000001) +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_DISABLE_DEFAULT (NV_REG_STR_RM_NVLINK_CONTROL_FORCE_DISABLE_NO) +#define NV_REG_STR_RM_NVLINK_CONTROL_TRAIN_AT_LOAD 1:1 +#define NV_REG_STR_RM_NVLINK_CONTROL_TRAIN_AT_LOAD_NO (0x00000000) +#define NV_REG_STR_RM_NVLINK_CONTROL_TRAIN_AT_LOAD_YES (0x00000001) +#define NV_REG_STR_RM_NVLINK_CONTROL_TRAIN_AT_LOAD_DEFAULT (NV_REG_STR_RM_NVLINK_CONTROL_TRAIN_AT_LOAD_NO) +#define NV_REG_STR_RM_NVLINK_CONTROL_SKIP_TRAIN 2:2 +#define NV_REG_STR_RM_NVLINK_CONTROL_SKIP_TRAIN_NO (0x00000000) +#define NV_REG_STR_RM_NVLINK_CONTROL_SKIP_TRAIN_YES (0x00000001) +#define NV_REG_STR_RM_NVLINK_CONTROL_SKIP_TRAIN_DEFAULT (NV_REG_STR_RM_NVLINK_CONTROL_SKIP_TRAIN_NO) +#define NV_REG_STR_RM_NVLINK_CONTROL_RESERVED_0 6:3 +#define NV_REG_STR_RM_NVLINK_CONTROL_LINK_TRAINING_DEBUG_SPEW 7:7 +#define NV_REG_STR_RM_NVLINK_CONTROL_LINK_TRAINING_DEBUG_SPEW_OFF (0x00000000) +#define NV_REG_STR_RM_NVLINK_CONTROL_LINK_TRAINING_DEBUG_SPEW_ON (0x00000001) +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_AUTOCONFIG 8:8 +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_AUTOCONFIG_NO (0x00000000) +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_AUTOCONFIG_YES (0x00000001) +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_AUTOCONFIG_DEFAULT (NV_REG_STR_RM_NVLINK_CONTROL_FORCE_AUTOCONFIG_NO) +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_ENABLE 31:31 +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_ENABLE_NO (0x00000000) +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_ENABLE_YES (0x00000001) +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_ENABLE_DEFAULT (NV_REG_STR_RM_NVLINK_CONTROL_FORCE_ENABLE_NO) + +// +// Type DWORD +// Knob to control NVLink MINION +// +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL "RMNvLinkMinionControl" +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_ENABLE 3:0 +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_ENABLE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_ENABLE_FORCE_ON (0x00000001) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_ENABLE_FORCE_OFF (0x00000002) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_PHY_CONFIG 7:4 +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_PHY_CONFIG_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_PHY_CONFIG_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_PHY_CONFIG_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_DL_STATUS 11:8 +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_DL_STATUS_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_DL_STATUS_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_DL_STATUS_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITNEGOTIATE 15:12 +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITNEGOTIATE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITNEGOTIATE_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITNEGOTIATE_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITOPTIMIZE 19:16 +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITOPTIMIZE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITOPTIMIZE_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITOPTIMIZE_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_CACHE_SEEDS 23:20 +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_CACHE_SEEDS_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_CACHE_SEEDS_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_CACHE_SEEDS_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_BOOT_CORE 27:24 +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_BOOT_CORE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_BOOT_CORE_RISCV (0x00000001) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_BOOT_CORE_FALCON (0x00000002) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_BOOT_CORE_RISCV_MANIFEST (0x00000003) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_BOOT_CORE_NO_MANIFEST (0x00000004) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_ALI_TRAINING 30:28 +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_ALI_TRAINING_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_ALI_TRAINING_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_ALI_TRAINING_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_GFW_BOOT_DISABLE 31:31 +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_GFW_BOOT_DISABLE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_GFW_BOOT_DISABLE_DISABLE (0x00000001) + +// +// Type DWORD +// Knob to change NVLink link speed +// __LAST is same as highest supported speed +// NOTE: +// NVLINK_SPEED_CONTROL_SPEED_25G is exactly 25.00000Gbps on Pascal +// NVLINK_SPEED_CONTROL_SPEED_25G is exactly 25.78125Gbps on Volta and later +// NVLINK_SPEED_CONTROL_SPEED_2500000G is exactly 25.00000Gbps on all chips +// +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL "RMNvLinkSpeedControl" +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED 4:0 +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_16G (0x00000001) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_19_2G (0x00000002) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_20G (0x00000003) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_21G (0x00000004) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_22G (0x00000005) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_23G (0x00000006) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_24G (0x00000007) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_25G (0x00000008) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_25_78125G (0x00000008) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_25_00000G (0x00000009) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_16_14583G (0x0000000A) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_26_56250G (0x0000000B) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_27_34375G (0x0000000C) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_28_12500G (0x0000000D) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_32G (0x0000000E) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_40G (0x0000000F) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_50_00000G (0x00000010) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_53_12500G (0x00000011) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_100_00000G (0x00000012) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_106_25000G (0x00000013) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_FAULT (0x00000014) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED__LAST (0x00000014) + +// +// Type DWORD +// P2P Loopback over NVLINK will be enabled by default if RM +// detects loopback links. For P2P over PCIE, force disable +// P2P loopback over NVLINK using the following regkey +// +#define NV_REG_STR_RM_NVLINK_DISABLE_P2P_LOOPBACK "RMNvLinkDisableP2PLoopback" +#define NV_REG_STR_RM_NVLINK_DISABLE_P2P_LOOPBACK_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_DISABLE_P2P_LOOPBACK_TRUE (0x00000001) +#define NV_REG_STR_RM_NVLINK_DISABLE_P2P_LOOPBACK_FALSE (0x00000000) + +// +// Type DWORD +// Knob to control NVLink Link Power States +// +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL "RMNvLinkControlLinkPM" +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_SINGLE_LANE_MODE 1:0 +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_SINGLE_LANE_MODE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_SINGLE_LANE_MODE_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_SINGLE_LANE_MODE_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_PROD_WRITES 3:2 +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_PROD_WRITES_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_PROD_WRITES_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_PROD_WRITES_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_L1_MODE 5:4 +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_L1_MODE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_L1_MODE_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_L1_MODE_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_L2_MODE 7:6 +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_L2_MODE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_L2_MODE_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_L2_MODE_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_RESERVED 31:8 + +// +// Type DWORD +// Knob to force lane disable and shutdown during driver unload +// The regkey will also cause a toggle of link reset on driver load +// The regkey should not be used in S/R paths +// +#define NV_REG_STR_RM_NVLINK_FORCE_LANESHUTDOWN "RMNvLinkForceLaneshutdown" +#define NV_REG_STR_RM_NVLINK_FORCE_LANESHUTDOWN_TRUE (0x00000001) +#define NV_REG_STR_RM_NVLINK_FORCE_LANESHUTDOWN_FALSE (0x00000000) +#define NV_REG_STR_RM_NVLINK_FORCE_LANESHUTDOWN_DEFAULT (NV_REG_STR_RM_NVLINK_FORCE_LANESHUTDOWN_FALSE) + +// +// Type DWORD +// For links that are SYSMEM, use this device type for force configs +// Choose the value from NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_* +// +#define NV_REG_STR_RM_NVLINK_SYSMEM_DEVICE_TYPE "RMNvLinkForcedSysmemDeviceType" + +// +// Type DWORD +// NVLink Disable Link Overrides +// The supplied value is ANDed with the set of discovered +// (not necessarily connected) links to remove unwanted links. +// A value of DISABLE_ALL removes/disables all links on this device. +// A value of DISABLE_NONE removes no links. +// If not present, this regkey has no effect. +// +#define NV_REG_STR_RM_NVLINK_DISABLE_LINKS "RMNvLinkDisableLinks" +#define NV_REG_STR_RM_NVLINK_DISABLE_LINKS_DISABLE_ALL (0xFFFFFFFF) +#define NV_REG_STR_RM_NVLINK_DISABLE_LINKS_DISABLE_NONE (0x00000000) + +// +// Type DWORD +// NVLINK Enable Links Overrides +// Note that this control does not force enable links, rather, it should be +// used to disable or mask off SW discovered links supported by the HW. +// +// NOTE: THIS REGKEY HAS BEEN DEPRECATED IN RM, since it does NOT work +// with NVLink auto-configuration. Instead, please move to using +// the new regkey NV_REG_STR_RM_NVLINK_DISABLE_LINKS +// +#define NV_REG_STR_RM_NVLINK_ENABLE "RMNvLinkEnable" +#define NV_REG_STR_RM_NVLINK_ENABLE_IDX(i) (i):(i) +#define NV_REG_STR_RM_NVLINK_ENABLE_IDX__SIZE 32 +#define NV_REG_STR_RM_NVLINK_ENABLE_IDX_TRUE (0x00000001) +#define NV_REG_STR_RM_NVLINK_ENABLE_IDX_FALSE (0x00000000) + +// +// Type DWORD +// Knob to control NVLink Verbose Prints +// +#define NV_REG_STR_RM_NVLINK_VERBOSE_MASK_CONTROL "RMNvLinkverboseControlMask" +#define NV_REG_STR_RM_NVLINK_VERBOSE_MASK_CONTROL_REGINIT 0:0 +#define NV_REG_STR_RM_NVLINK_VERBOSE_MASK_CONTROL_REGINIT_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_VERBOSE_MASK_CONTROL_REGINIT_ON (0x00000001) +#define NV_REG_STR_RM_NVLINK_VERBOSE_MASK_CONTROL_REGINIT_OFF (0x00000000) + +// Type DWORD: +#define NV_REG_STR_RM_PCIE_LINK_SPEED "RMPcieLinkSpeed" +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN2 1:0 +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN2_DEFAULT (0x00000000) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN2_ENABLE (0x00000001) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN2_DISABLE (0x00000002) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN3 3:2 +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN3_DEFAULT (0x00000000) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN3_ENABLE (0x00000001) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN3_DISABLE (0x00000002) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN4 5:4 +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN4_DEFAULT (0x00000000) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN4_ENABLE (0x00000001) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN4_DISABLE (0x00000002) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN5 7:6 +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN5_DEFAULT (0x00000000) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN5_ENABLE (0x00000001) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN5_DISABLE (0x00000002) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN6 9:8 +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN6_DEFAULT (0x00000000) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN6_ENABLE (0x00000001) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN6_DISABLE (0x00000002) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_LOCK_AT_LOAD 31:31 +#define NV_REG_STR_RM_PCIE_LINK_SPEED_LOCK_AT_LOAD_DISABLE (0x00000000) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_LOCK_AT_LOAD_ENABLE (0x00000001) + +// +// Type DWORD +// +// +// This can be used as a per-device regkey or not, in which case the setting +// will apply to all devices. If this key is supplied as both a per-device and +// non-per-device regkey, the non-per-device option will apply first to all +// devices, and then the per-device key settings will apply, overriding the +// settings for the relevant devices. +// +// Encoding : 0 - Disable PCIe Relaxed Ordering TLP header bit setting. This is +// the default option. +// 1 - Try to enable PCIe Relaxed Ordering TLP header bit setting. +// Traverses the PCIe topology and only enables the header bit if +// it is safe to do so, with regard to all devices that could be +// affected. +// 2 - Forcibly enable PCIe Relaxed Ordering TLP header bit setting. +// Explicitly ignores the compatibility of the PCIe topology +// around the device or devices in question. +// +#define NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING "RmSetPCIERelaxedOrdering" +#define NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_DEFAULT 0x00000000 +#define NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_DISABLE 0x00000000 +#define NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_ENABLE 0x00000001 +#define NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_FORCE_ENABLE 0x00000002 + +// Type DWORD +// This regkey overrides the default use case to optimize the GPU for. +// This regkey should not be used with the RMFermiBigPageSize regkey. +// This regkey should only be set by the RM. +#define NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX "RMOptimizeComputeOrSparseTex" +#define NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX_DEFAULT (0x00000000) +#define NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX_COMPUTE (0x00000001) +#define NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX_SPARSE_TEX (0x00000002) + +#define NV_REG_STR_CL_ASLM_CFG "AslmCfg" +#define NV_REG_STR_CL_ASLM_CFG_NV_LINK_UPGRADE 1:0 +#define NV_REG_STR_CL_ASLM_CFG_NV_LINK_UPGRADE_NO 0x00000000 +#define NV_REG_STR_CL_ASLM_CFG_NV_LINK_UPGRADE_YES 0x00000001 +#define NV_REG_STR_CL_ASLM_CFG_NV_LINK_UPGRADE_DEFAULT 0x00000002 +#define NV_REG_STR_CL_ASLM_CFG_HOT_RESET 5:4 +#define NV_REG_STR_CL_ASLM_CFG_HOT_RESET_NO 0x00000000 +#define NV_REG_STR_CL_ASLM_CFG_HOT_RESET_YES 0x00000001 +#define NV_REG_STR_CL_ASLM_CFG_HOT_RESET_DEFAULT 0x00000002 +#define NV_REG_STR_CL_ASLM_CFG_FAST_UPGRADE 9:8 +#define NV_REG_STR_CL_ASLM_CFG_FAST_UPGRADE_NO 0x00000000 +#define NV_REG_STR_CL_ASLM_CFG_FAST_UPGRADE_YES 0x00000001 +#define NV_REG_STR_CL_ASLM_CFG_FAST_UPGRADE_DEFAULT 0x00000002 +#define NV_REG_STR_CL_ASLM_CFG_GEN2_LINK_UPGRADE 11:10 +#define NV_REG_STR_CL_ASLM_CFG_GEN2_LINK_UPGRADE_NO 0x00000000 +#define NV_REG_STR_CL_ASLM_CFG_GEN2_LINK_UPGRADE_YES 0x00000001 +#define NV_REG_STR_CL_ASLM_CFG_GEN2_LINK_UPGRADE_DEFAULT 0x00000002 +// Type Dword +// Encoding Numeric Value +// Overrides chipset-based ASLM configurations. +// +// NV link upgrade: +// 0 - Do not use NV link upgrade for ASLM +// 1 - Use NV link upgrade for ASLM +// 2 - Do not override chipset-selected config (default) +// Hot reset: +// 0 - Do not use hot reset for ASLM +// 1 - Use hot reset for ASLM +// 2 - Do not override chipset-selected config (default) +// Fast link upgrade: +// 0 - Do not use fast link upgrade for ASLM +// 1 - Use fast link upgrade for ASLM +// 2 - Do not override chipset-selected config (default) +// Gen2 link width upgrade: +// 0 - Do not use Gen2 link upgrade for ASLM +// 1 - Use Gen2 link upgrade for ASLM +// 2 - Do not override chipset-selected config (default) + +#define NV_REG_STR_RM_DISABLE_BR03_FLOW_CONTROL "MB_DisableBr03FlowControl" +// Type DWORD +// Encoding 1 -> Do not program BR03 flow control registers +// 0 -> Setup BR03 flow control registers +// Determine whether we need to program BR03 flow control registers, in objcl.c + +#define NV_REG_STR_RM_FORCE_ENABLE_GEN2 "RmForceEnableGen2" +#define NV_REG_STR_RM_FORCE_ENABLE_GEN2_NO 0 +#define NV_REG_STR_RM_FORCE_ENABLE_GEN2_YES 1 +#define NV_REG_STR_RM_FORCE_ENABLE_GEN2_DEFAULT NV_REG_STR_RM_FORCE_ENABLE_GEN2_NO +// Type DWORD: On some platform, Gen2 is disabled to work around system problems. +// This key is to force enabling Gen2 for testing or other purpose. It is +// ineffective on platforms not Gen2 capable. +// Encoding boolean: +// 0 - Do Nothing +// 1 - Force Enable Gen2 (to invalidate PDB_PROP_CL_PCIE_GEN1_GEN2_SWITCH_CHIPSET_DISABLED) +// + +#define NV_REG_STR_RM_D3_FEATURE "RMD3Feature" +// Type DWORD +// This regkey controls D3 related features +#define NV_REG_STR_RM_D3_FEATURE_DRIVER_CFG_SPACE_RESTORE 1:0 +#define NV_REG_STR_RM_D3_FEATURE_DRIVER_CFG_SPACE_RESTORE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_D3_FEATURE_DRIVER_CFG_SPACE_RESTORE_ENABLED (0x00000001) +#define NV_REG_STR_RM_D3_FEATURE_DRIVER_CFG_SPACE_RESTORE_DISABLED (0x00000002) +#define NV_REG_STR_RM_D3_FEATURE_DRIVER_CFG_SPACE_RESTORE_UNUSED (0x00000003) + +#define NV_REG_STR_EMULATED_NBSI_TABLE "RMemNBSItable" +// The emulated NBSI table + +#define NV_REG_STR_RM_DISABLE_FSP "RmDisableFsp" +#define NV_REG_STR_RM_DISABLE_FSP_NO (0x00000000) +#define NV_REG_STR_RM_DISABLE_FSP_YES (0x00000001) + +#define NV_REG_STR_RM_DISABLE_SEC2 "RmDisableSec2" +#define NV_REG_STR_RM_DISABLE_SEC2_NO (0x00000000) +#define NV_REG_STR_RM_DISABLE_SEC2_YES (0x00000001) + +// Type DWORD (Boolean) +// Override any other settings and disable FSP + +#define NV_REG_STR_RM_DISABLE_COT_CMD "RmDisableCotCmd" +#define NV_REG_STR_RM_DISABLE_COT_CMD_FRTS_SYSMEM 1:0 +#define NV_REG_STR_RM_DISABLE_COT_CMD_FRTS_VIDMEM 3:2 +#define NV_REG_STR_RM_DISABLE_COT_CMD_GSPFMC 5:4 +#define NV_REG_STR_RM_DISABLE_COT_CMD_DEFAULT (0x00000000) +#define NV_REG_STR_RM_DISABLE_COT_CMD_YES (0x00000001) +// Type DWORD (Boolean) +// Disable the specified commands as part of Chain-Of-Trust feature + +#define NV_REG_STR_RM_FSP_USE_MNOC "RmFspUseMnoc" +#define NV_REG_STR_RM_FSP_USE_MNOC_DEFAULT (0x00000000) +#define NV_REG_STR_RM_FSP_USE_MNOC_CPU (0x00000001) +#define NV_REG_STR_RM_FSP_USE_MNOC_GSP (0x00000002) +#define NV_REG_STR_RM_FSP_USE_MNOC_BOTH (0x00000003) +// Type DWORD +// Use MNOC (mailbox on CPU / MCTP on GSP) interface to communicate with FSP + +#define NV_REG_STR_PCI_LATENCY_TIMER_CONTROL "PciLatencyTimerControl" +// Type Dword +// Encoding Numeric Value +// Override to control setting/not setting of pci timer latency value. +// Not present suggests default value. A value 0xFFFFFFFF will leave the value unmodified (ie bios value). +// All other values must be multiples of 8 + +#define NV_REG_STR_RM_ENABLE_ROUTE_TO_PHYSICAL_LOCK_BYPASS "RmRouteToPhyiscalLockBypass" +// Type Bool +// Enable optimisation to only take API READ (not WRITE) lock when forwarding ROUTE_TO_PHYSICAL +// control calls to GSP-enabled GPUs. +// This will heavily improve multi-gpu-multi-process control call latency and throughput. +// This optimisation will only work when *all* GPUs in the system are in offload mode (GSP mode). + +#define NV_REG_STR_RM_GPU_FABRIC_PROBE "RmGpuFabricProbe" +#define NV_REG_STR_RM_GPU_FABRIC_PROBE_DELAY 7:0 +#define NV_REG_STR_RM_GPU_FABRIC_PROBE_SLOWDOWN_THRESHOLD 15:8 +#define NV_REG_STR_RM_GPU_FABRIC_PROBE_OVERRIDE 31:31 +// Type DWORD +// Enable GPU fabric probe +// +// When this option is enabled, the GPU will probe its fabric state over the +// NVLink inband channel. The fabric state includes the attributes to allow +// the GPU to participate in P2P over the NVLink fabric. +// +// This option is only honored on NVSwitch based systems. +// +// Encoding: +// _DELAY : Delay between consecutive probe retries (in sec) +// before the slowdown starts. (Default: 5 sec) +// _SLOWDOWN_THRESHOLD : Number of probes retries before the slowdown starts +// (Default: 10). The slowdown doubles the delay +// between every consecutive probe retries until success. +// + +// Enable plugin logs in ftrace buffer. +// 0 - Default +// 0 - Disabled +// 1 - Enabled +#define NV_REG_STR_RM_ENABLE_PLUGIN_IN_FTRACE_BUFFER "RmEnablePluginFtrace" +#define NV_REG_STR_RM_ENABLE_PLUGIN_IN_FTRACE_BUFFER_ENABLED 0x00000001 +#define NV_REG_STR_RM_ENABLE_PLUGIN_IN_FTRACE_BUFFER_DISABLED 0x00000000 +#define NV_REG_STR_RM_ENABLE_PLUGIN_IN_FTRACE_BUFFER_DEFAULT 0x00000000 + +// TYPE Dword +// Enable vGPU migration on KVM hyperivsor. +// 1 - (Default) Enable vGPU migration on KVM +// 0 - Disable vGPU migration on KVM hypervisor +// +#define NV_REG_STR_RM_ENABLE_KVM_VGPU_MIGRATION "RmEnableKvmVgpuMigration" +#define NV_REG_STR_RM_ENABLE_KVM_VGPU_MIGRATION_TRUE 0x00000001 +#define NV_REG_STR_RM_ENABLE_KVM_VGPU_MIGRATION_FALSE 0x00000000 +#define NV_REG_STR_RM_ENABLE_KVM_VGPU_MIGRATION_DEFAULT 0x00000001 + +#define NV_REG_STR_RM_QSYNC_FW_REV_CHECK "QuadroSyncFirmwareRevisionCheckDisable" +#define NV_REG_STR_RM_QSYNC_FW_REV_CHECK_DEFAULT 0x00000000 +#define NV_REG_STR_RM_QSYNC_FW_REV_CHECK_ENABLE 0x00000000 +#define NV_REG_STR_RM_QSYNC_FW_REV_CHECK_DISABLE 0x00000001 + +// Type DWORD +// Disable Quadro Sync Firmware Revision Checking, for testing new versions. +// + +// +// Type: Dword +// Encoding: +// 1 - SRIOV Enabled on supported GPU +// 0 - SRIOV Disabled on specific GPU +// +#define NV_REG_STR_RM_SET_SRIOV_MODE "RMSetSriovMode" +#define NV_REG_STR_RM_SET_SRIOV_MODE_DISABLED 0x00000000 +#define NV_REG_STR_RM_SET_SRIOV_MODE_ENABLED 0x00000001 + +#define NV_REG_STR_RM_SET_VGPU_VERSION_MIN "RMSetVGPUVersionMin" +// +// TYPE DWORD +// Set the minimum vGPU version enforced to support + +#define NV_REG_STR_RM_SET_VGPU_VERSION_MAX "RMSetVGPUVersionMax" +// +// TYPE DWORD +// Set the maximum vGPU version enforced to support + +#define NV_REG_STR_TIME_SWAP_RDY_HI_MODIFY_LSR_MIN_TIME "TSwapRdyHiLsrMinTime" +#define NV_REG_STR_TIME_SWAP_RDY_HI_MODIFY_LSR_MIN_TIME_DEFAULT 250 // 250 micro seconds +// Type: DWORD +// Encoding: +// To modify LSR_MIN_TIME parameter according to the time +// period for which swap lock window will remain HIGH for QSYNC III +// i.e. P2060 during swap barrier. + +#define NV_REG_STR_TIME_SWAP_RDY_HI_MODIFY_SWAP_LOCKOUT_START "TSwapRdyHiSwapLockoutStart" +#define NV_REG_STR_TIME_SWAP_RDY_HI_MODIFY_SWAP_LOCKOUT_START_DEFAULT 250 // 250 micro seconds +// Type: DWORD +// Encoding: +// To modify SWAP_LOCKOUT_START parameter according to the time +// period for which swap lock window will remain HIGH for QSYNC III. +// + +#define NV_REG_STR_RM_NVLINK_BW "RmNvlinkBandwidth" +// Type String +// The option is in the string format. +// +// Possible string values: +// OFF: 0% bandwidth +// MIN: 15%-25% bandwidth depending on the system's NVLink topology +// HALF: 50% bandwidth +// 3QUARTER: 75% bandwidth +// FULL: 100% bandwidth (default) +// +// This option is only for Hopper+ GPU with NVLINK version 4.0. + +#define NV_REG_STR_RM_NVLINK_BW_LINK_COUNT "RmNvlinkBandwidthLinkCount" +// Type: DWORD +// +// Link count RBM (Reduced Bandwidth Mode) requested. +// +// This option is only for Blackwell+ GPU with NVLINK version 5.0. + +// +// Type DWORD (Boolean) +// 1 - Measure API and GPU lock hold/wait times which can be retrieved with the +// NV0000_CTRL_CMD_SYSTEM_GET_LOCK_TIMES control call +// 0 - (Default) Don't measure lock hold/wait times +// +#define NV_REG_STR_RM_LOCK_TIME_COLLECT "RmLockTimeCollect" + +// +// Type: DWORD (Boolean) +// +// 1 - Only invalidate and free CPU mappings immediatelly, then collect GPU resources +// from individual clients under separate lock acquire/release sequences. +// 0 - (Default) Immediately free all clients resources when freeing a client list +#define NV_REG_STR_RM_CLIENT_LIST_DEFERRED_FREE "RMClientListDeferredFree" + +// +// Type: DWORD +// +// Number of clients to free in a single chunk before yielding and scheduling +// a work item to handle the rest. +// +// Only valid if NV_REG_STR_RM_CLIENT_LIST_DEFERRED_FREE is set. +// +// Value of 0 (default) means there is no limit and all clients will be freed +// at once before the process terminates. +// +#define NV_REG_STR_RM_CLIENT_LIST_DEFERRED_FREE_LIMIT "RMClientListDeferredFreeLimit" + +// +// TYPE Dword +// Determines whether or not to emulate VF MMU TLB Invalidation register range +// Encoding : 1 - Emulate register range (Default) +// : 0 - Do not emulate register range. +// +#define NV_REG_STR_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE "RMBug3007008EmulateVfMmuTlbInvalidate" +#define NV_REG_STR_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE_ENABLE 0x00000001 +#define NV_REG_STR_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE_DISABLE 0x00000000 +#define NV_REG_STR_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE_DEFAULT NV_REG_STR_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE_ENABLE + +#define NV_REG_STR_RM_POWER_FEATURES "RMPowerFeature" + +// Type DWORD +// This Regkey controls inforom black box data recording. This can be used to +// restrict access to BBX. +// 0 - Enable BBX. (Default) +// COMPLETELY - Enable/Disable BBX access (read/write). +// WRITE_BY_RM - Enable/Disable writes by RM itself. +// WRITE_BY_CLIENT - Enable/Disable writes by clients to RM. +// PERIODIC FLUSH - Enable/Disable periodic flush to inforom (Also enables/disables Power data collection) +// +#define NV_REG_STR_RM_INFOROM_DISABLE_BBX "RmDisableInforomBBX" +#define NV_REG_STR_RM_INFOROM_DISABLE_BBX_NO (0x00000000) +#define NV_REG_STR_RM_INFOROM_DISABLE_BBX_YES (0x00000001) +#define NV_REG_STR_RM_INFOROM_DISABLE_BBX_COMPLETELY 0:0 +#define NV_REG_STR_RM_INFOROM_DISABLE_BBX_COMPLETELY_NO (0x00000000) +#define NV_REG_STR_RM_INFOROM_DISABLE_BBX_COMPLETELY_YES (0x00000001) +#define NV_REG_STR_RM_INFOROM_DISABLE_BBX_WRITE_BY_RM 1:1 +#define NV_REG_STR_RM_INFOROM_DISABLE_BBX_WRITE_BY_RM_NO (0x00000000) +#define NV_REG_STR_RM_INFOROM_DISABLE_BBX_WRITE_BY_RM_YES (0x00000001) +#define NV_REG_STR_RM_INFOROM_DISABLE_BBX_WRITE_BY_CLIENT 2:2 +#define NV_REG_STR_RM_INFOROM_DISABLE_BBX_WRITE_BY_CLIENT_NO (0x00000000) +#define NV_REG_STR_RM_INFOROM_DISABLE_BBX_WRITE_BY_CLIENT_YES (0x00000001) +#define NV_REG_STR_RM_INFOROM_DISABLE_BBX_PERIODIC_FLUSH 3:3 +#define NV_REG_STR_RM_INFOROM_DISABLE_BBX_PERIODIC_FLUSH_YES (0x00000000) +#define NV_REG_STR_RM_INFOROM_DISABLE_BBX_PERIODIC_FLUSH_NO (0x00000001) + +// Type DWORD +// The period of logging masked ECC corrected errors in minutes. +// +#define NV_REG_STR_RM_INFOROM_ECC_SBE_LOGGING_PERIOD_MIN "RmEccSbeLoggingPeriodMin" +#define NV_REG_STR_RM_INFOROM_ECC_SBE_LOGGING_PERIOD_MIN_DEFAULT 10 +#define NV_REG_STR_RM_INFOROM_ECC_SBE_LOGGING_PERIOD_MIN_MIN 1 + +// +// Type DWORD (Boolean) +// RmNvlinkEnablePrivErrorRc +// +// 0 - (default) does not do RC recovery when PRIV_ERROR +// 1 - enable FLA PRIV_ERROR RC recovery +// +#define NV_REG_STR_RM_NVLINK_ENABLE_PRIV_ERROR_RC "RmNvlinkEnablePrivErrorRc" +#define NV_REG_STR_RM_NVLINK_ENABLE_PRIV_ERROR_RC_NO 0 +#define NV_REG_STR_RM_NVLINK_ENABLE_PRIV_ERROR_RC_YES 1 + +// +// Add the conditions to exclude these macros from Orin build, as CONFIDENTIAL_COMPUTE +// is a guardword. The #if could be removed when nvRmReg.h file is trimmed from Orin build. +// +// Enable Disable Confidential Compute and control its various modes of operation +// 0 - Feature Disable +// 1 - Feature Enable +// +#define NV_REG_STR_RM_CONFIDENTIAL_COMPUTE "RmConfidentialCompute" +#define NV_REG_STR_RM_CONFIDENTIAL_COMPUTE_ENABLED 0:0 +#define NV_REG_STR_RM_CONFIDENTIAL_COMPUTE_ENABLED_NO 0x00000000 +#define NV_REG_STR_RM_CONFIDENTIAL_COMPUTE_ENABLED_YES 0x00000001 +#define NV_REG_STR_RM_CONFIDENTIAL_COMPUTE_DEV_MODE_ENABLED 1:1 +#define NV_REG_STR_RM_CONFIDENTIAL_COMPUTE_DEV_MODE_ENABLED_NO 0x00000000 +#define NV_REG_STR_RM_CONFIDENTIAL_COMPUTE_DEV_MODE_ENABLED_YES 0x00000001 +#define NV_REG_STR_RM_CONFIDENTIAL_COMPUTE_GPUS_READY_CHECK 2:2 +#define NV_REG_STR_RM_CONFIDENTIAL_COMPUTE_GPUS_READY_CHECK_DISABLED 0x00000000 +#define NV_REG_STR_RM_CONFIDENTIAL_COMPUTE_GPUS_READY_CHECK_ENABLED 0x00000001 + +// +// Enable/disable SPDM feature in Confidential Compute. SPDM-capable profiles +// may not be loaded by default. This regkey allows us to override the default +// behavior and force SPDM to enabled/disabled. +// +// 0 - Feature disable +// 1 - Feature enable +// +#define NV_REG_STR_RM_CONF_COMPUTE_SPDM_POLICY "RmConfComputeSpdmPolicy" +#define NV_REG_STR_RM_CONF_COMPUTE_SPDM_POLICY_ENABLED 0:0 +#define NV_REG_STR_RM_CONF_COMPUTE_SPDM_POLICY_ENABLED_NO 0x00000000 +#define NV_REG_STR_RM_CONF_COMPUTE_SPDM_POLICY_ENABLED_YES 0x00000001 + +// +// Enable/disable key rotation in Confidential Compute. +// +// 0 - Feature disabled +// 1 - Feature enabled +// +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION "RmConfComputeKeyRotation" +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_ENABLED 0:0 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_ENABLED_NO 0x00000000 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_ENABLED_YES 0x00000001 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_SEC2_KEYS 1:1 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_SEC2_KEYS_NO 0x00000000 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_SEC2_KEYS_YES 0x00000001 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE2_KEYS 2:2 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE2_KEYS_NO 0x00000000 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE2_KEYS_YES 0x00000001 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE3_KEYS 3:3 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE3_KEYS_NO 0x00000000 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE3_KEYS_YES 0x00000001 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE4_KEYS 4:4 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE4_KEYS_NO 0x00000000 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE4_KEYS_YES 0x00000001 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE5_KEYS 5:5 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE5_KEYS_NO 0x00000000 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE5_KEYS_YES 0x00000001 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE6_KEYS 6:6 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE6_KEYS_NO 0x00000000 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE6_KEYS_YES 0x00000001 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE7_KEYS 7:7 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE7_KEYS_NO 0x00000000 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE7_KEYS_YES 0x00000001 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE8_KEYS 8:8 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE8_KEYS_NO 0x00000000 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE8_KEYS_YES 0x00000001 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE9_KEYS 9:9 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE9_KEYS_NO 0x00000000 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LCE9_KEYS_YES 0x00000001 + +// if all kernel keys should be considered for key rotation +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_KERNEL_KEYS 10:10 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_KERNEL_KEYS_NO 0x00000000 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_KERNEL_KEYS_YES 0x00000001 + +// if all user keys should be considered for key rotation +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_USER_KEYS 11:11 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_USER_KEYS_NO 0x00000000 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_USER_KEYS_YES 0x00000001 + +// if internal RM keys should be considered for key rotation +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_INTERNAL_KEYS 12:12 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_INTERNAL_KEYS_NO 0x00000000 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_INTERNAL_KEYS_YES 0x00000001 + +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_ENABLED_ALL 12:0 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_ENABLED_ALL_NO 0x00000000 +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_ENABLED_ALL_YES 0x00001fff + +// +// Set period for "keep-alive" heartbeat message sent between SPDM Requester and Responder. +// This will sent a keep-alive message every period to GPU. GPU will set timeout to 2 * period. +// If GPU doesn't receive message within 2 * period, it is fatal error and GPU will require reset. +// Minimum period is 4 seconds, maximum period is 255 seconds. Setting period to 0 will disable heartbeat. +// +// 0 - Disable feature (no heartbeat sending) +// x - Period value in seconds +// +#define NV_REG_STR_RM_CONF_COMPUTE_HEARTBEAT "RmConfComputeHeartbeatPeriod" +#define NV_REG_STR_RM_CONF_COMPUTE_HEARTBEAT_PERIOD_SECONDS 31:0 +#define NV_REG_STR_RM_CONF_COMPUTE_HEARTBEAT_PERIOD_SECONDS_DISABLE 0x00000000 +#define NV_REG_STR_RM_CONF_COMPUTE_HEARTBEAT_PERIOD_SECONDS_MIN 0x00000004 +#define NV_REG_STR_RM_CONF_COMPUTE_HEARTBEAT_PERIOD_SECONDS_MAX 0x000000FF + +// +// Set the key rotation timeout value for user-mode clients. +// This is the amount of time in seconds, after the threshold has been crossed, that user-mode clients +// have to idle their channels before RM RCs the channels for key rotation. +// This value must be greater than or equal to 2. +// +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_TIMEOUT_IN_SEC "RmKeyRotationTimeout" + +// +// Set the difference between the lower and upper thresholds. +// Value is in units of (amount of data encrypted in units of 16 bytes + number of encryption invocations) +// If this is set then NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LOWER_THRESHOLD and +// NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_UPPER_THRESHOLD are ignored. +// This value must be greater than 0. +// +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_THRESHOLD_DELTA "RmKeyRotationThresholdDelta" + +// +// Set lower threshold for key rotation. +// Value is in units of (amount of data encrypted in units of 16 bytes + number of encryption invocations) +// If this is set then RmKeyRotationUpperThreshold must also be set. +// This value must be less than RmKeyRotationUpperThreshold. +// Note that setting the attacker advantage via SMI/NVML will overwrite this value. +// +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_LOWER_THRESHOLD "RmKeyRotationLowerThreshold" + +// +// Set upper threshold for key rotation. +// Value is in units of (amount of data encrypted in units of 16 bytes + number of encryption invocations) +// If this is set then RmKeyRotationLowerThreshold must also be set. +// This value must be greater than RmKeyRotationLowerThreshold. +// Note that setting the attacker advantage via SMI/NVML will overwrite this value. +// +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_UPPER_THRESHOLD "RmKeyRotationUpperThreshold" + +// +// Set threshold for rotation of internal (RM only) keys. +// Value is in units of (amount of data encrypted in units of 16 bytes + number of encryption invocations) +// Value must be greater than minimum of (0x7FFFFFF). +// This value cannot be changed at runtime, only via this registry key at boot time. +// +#define NV_REG_STR_RM_CONF_COMPUTE_KEY_ROTATION_INTERNAL_THRESHOLD "RmKeyRotationInternalThreshold" + +// +// Controls whether GSP-RM profiling is enabled. +// 0 (default): disabled +// 1: enabled +// +#define NV_REG_STR_RM_GSPRM_PROFILING "RmGspRmProfiling" +#define NV_REG_STR_RM_GSPRM_PROFILING_DISABLE 0 +#define NV_REG_STR_RM_GSPRM_PROFILING_ENABLE 1 + +// +// Controls, GSP-RM start with boost clocks. +// 0 : disabled +// 1 : enabled (default) +// +#define NV_REG_STR_RM_BOOT_GSPRM_WITH_BOOST_CLOCKS "RmBootGspRmWithBoostClocks" +#define NV_REG_STR_RM_BOOT_GSPRM_WITH_BOOST_CLOCKS_DISABLED 0 +#define NV_REG_STR_RM_BOOT_GSPRM_WITH_BOOST_CLOCKS_ENABLED 1 + +// +// Enable Local EGM HW verification using RM/SW stack. +// Must be specified with a peerID corresponding to local EGM +// +#define NV_REG_STR_RM_ENABLE_LOCAL_EGM_PEER_ID "RMEnableLocalEgmPeerId" + +// +// Overrides the size of the GSP-RM firmware heap in GPU memory. +// The GSP-RM firmware heap is reserved for system use and is not available to +// applications. This regkey can be used to optimize the amount of memory +// reserved for system use for targeted use cases. The default value for this +// regkey is determined to support certain worst case resource allocation +// patterns, but many use cases do not exhibit such resource allocation patterns +// and could benefit from the lesser reserved GPU memory. Other use cases may +// exhibit an even more pathological/stressful resource allocation pattern, +// which can be enabled (up to a limit) with this regkey. +// +// However, NVIDIA does not support setting this registry key, and will require +// that any bugs observed with it set be reproducible with the default setting +// as well. +// +// The value of this regkey is specified in megabytes. A value of 0 indicates to +// use the default value. Values less than the minimum or greater than the +// maximum will be clamped to the nearest optimum. The default values are +// are dynamically computed for each GPU prior to booting GSP-RM. +// +#define NV_REG_STR_GSP_FIRMWARE_HEAP_SIZE_MB "RmGspFirmwareHeapSizeMB" +#define NV_REG_STR_GSP_FIRMWARE_HEAP_SIZE_MB_DEFAULT 0 + +// +// Sets the size of the sysmem heap for GSP-RM, only for use with 0FB chips. +// On 0FB chips, a segment of sysmem is allocated for use by GSP-RM in place of +// where FB would be used. +// +// NOTE: This is different from the above "RmGspFirmwareHeapSizeMB", which +// is used for the WPR heap. The sysmem heap is used for memdescAlloc() calls +// +#define NV_REG_STR_GSP_SYSMEM_HEAP_SIZE_MB "RmGspSysmemHeapSizeMB" +#define NV_REG_STR_GSP_SYSMEM_HEAP_SIZE_MB_DEFAULT 512 + +// +// Type DWORD +// This regkey can be used to enable GSP owned fault buffers +// +#define NV_REG_STR_RM_GSP_OWNED_FAULT_BUFFERS_ENABLE "RmGspOwnedFaultBuffersEnable" +#define NV_REG_STR_RM_GSP_OWNED_FAULT_BUFFERS_ENABLE_NO 0x00000000 +#define NV_REG_STR_RM_GSP_OWNED_FAULT_BUFFERS_ENABLE_YES 0x00000001 + +// +// WAR for BlueField3: Bug 4040336 +// BF3's PCI MMIO bus address 0x800000000000 is too high for Ampere to address. +// Due to this, BF3's bus address is now moved to < 4GB. So, the CPU PA is no longer +// the same as the bus address and this regkey adjusts the CPU PA passed in to the +// correct bus address. +// +#define NV_REG_STR_RM_DMA_ADJUST_PEER_MMIO_BF3 "RmDmaAdjustPeerMmioBF3" +#define NV_REG_STR_RM_DMA_ADJUST_PEER_MMIO_BF3_DISABLE 0 +#define NV_REG_STR_RM_DMA_ADJUST_PEER_MMIO_BF3_ENABLE 1 + +#define NV_REG_STR_RM_NVLINK_FORCED_LOOPBACK_ON_SWITCH "RMNvLinkForcedLoopbackOnSwitch" +#define NV_REG_STR_RM_NVLINK_FORCED_LOOPBACK_ON_SWITCH_MODE 0:0 +#define NV_REG_STR_RM_NVLINK_FORCED_LOOPBACK_ON_SWITCH_MODE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_FORCED_LOOPBACK_ON_SWITCH_MODE_ENABLED (0x00000001) + +// +// Type DWORD +// This set of MIG regkeys specifies a set of allocation requests to be issued to the GPU on boot. +// MIG configuration contained within GPUMGR always supersedes these regkeys, if present and valid. +// The entire configuration specified by these regkeys is validated before being applied. An error +// reflected in whole or in part on these regkeys will cause them to be discarded entirely. +// +// RmMIGBootConfigurationGI is used to encode a series of GPU instance allocations. These are applied in order. +// RmMIGBootConfigurationCI is used to encode a series of CI instance allocations. +// The GI associated with each CI allocation entry is specified by RmMIGBootConfigurationCIAssignment. +// It is an error to specify a CI via RmMIGBootConfigurationCI without specifying the associated GPU +// instance entry via RmMIGBootConfigurationCIAssignment. The values for any CI assignment for a CI +// entry left unspecified must be 0. +// +// RmMIGBootConfigurationGI_N +// _FLAGS - determines granularity of GPU partitioning. See NV2080_CTRL_CMD_GPU_SET_PARTITIONS +// _PLACEMENT_LO - Optional placement span to allocate the partition into. Unused if HI ASSERTED and DEASSERT -> DEASSERTED +// This is for presilicon test only. +// +#define NV_REG_STR_RM_RESET_FSM_STATE_TRANSITION_TIMEOUT_US "RmResetFsmStateTimeoutUs" + +// +// Type DWORD +// Enable GR debug dump for CTXSW timeouts. +// +#define NV_REG_STR_RM_CTXSW_TIMEOUT_DEBUG_DUMP "RmCtxswTimeoutDebugDump" +#define NV_REG_STR_RM_CTXSW_TIMEOUT_DEBUG_DUMP_VAL 0:0 +#define NV_REG_STR_RM_CTXSW_TIMEOUT_DEBUG_DUMP_VAL_ENABLE 0x1 +#define NV_REG_STR_RM_CTXSW_TIMEOUT_DEBUG_DUMP_VAL_DISABLE 0x0 +#define NV_REG_STR_RM_CTXSW_TIMEOUT_DEBUG_DUMP_VAL_DEFAULT NV_REG_STR_RM_CTXSW_TIMEOUT_DEBUG_DUMP_VAL_DISABLE + +#endif // NVRM_REGISTRY_H diff --git a/src/nvidia/interface/rmapi/src/g_finn_rm_api.c b/src/nvidia/interface/rmapi/src/g_finn_rm_api.c new file mode 100644 index 0000000..dfb6812 --- /dev/null +++ b/src/nvidia/interface/rmapi/src/g_finn_rm_api.c @@ -0,0 +1,9042 @@ +#include "g_finn_rm_api.h" +#include "ctrl/ctrl0000/ctrl0000nvd.h" +#include "ctrl/ctrl0080/ctrl0080dma.h" +#include "ctrl/ctrl0080/ctrl0080fb.h" +#include "ctrl/ctrl0080/ctrl0080fifo.h" +#include "ctrl/ctrl0080/ctrl0080gpu.h" +#include "ctrl/ctrl0080/ctrl0080gr.h" +#include "ctrl/ctrl0080/ctrl0080host.h" +#include "ctrl/ctrl0080/ctrl0080msenc.h" +#include "ctrl/ctrl2080/ctrl2080bios.h" +#include "ctrl/ctrl2080/ctrl2080ce.h" +#include "ctrl/ctrl2080/ctrl2080gpu.h" +#include "ctrl/ctrl2080/ctrl2080i2c.h" +#include "ctrl/ctrl2080/ctrl2080nvd.h" +#include "ctrl/ctrl2080/ctrl2080perf.h" +#include "ctrl/ctrl2080/ctrl2080rc.h" +#include "ctrl/ctrl2080/ctrl2080ucodefuzzer.h" +#include "ctrl/ctrl208f/ctrl208fgpu.h" +#include "ctrl/ctrl402c.h" +#include "ctrl/ctrl83de/ctrl83dedebug.h" +#include "ctrl/ctrlb06f.h" + +#if defined(NVRM) /* Kernel Mode */ +#include +#include +#include "nvport/nvport.h" +#elif defined(NV_LIBOS) /* LIBOS */ +#include +#include "nvport/nvport.h" +#elif defined(USE_CUSTOM_MALLOC) /* OpenGL */ +#include +#include +#include +extern void *rmapi_import_malloc(size_t size); +extern void rmapi_import_free(void *ptr); +#else /* Default */ +#include +#include +#include +#include +#endif + + +// +// This file was generated with FINN, an NVIDIA coding tool. +// + +/* + * Serialization helper macros. These make field copying code more readable. + * Size is provided explicitly for cross-platform compatibility. + */ + +// Allocate memory. +#if defined(FINN_MALLOC) /* Use override from Makefile */ +#elif defined(NVRM) /* Kernel Mode */ +#define FINN_MALLOC(size) portMemAllocNonPaged(size) + +#elif defined(NV_LIBOS) /* LIBOS */ +#define FINN_MALLOC(size) portMemAllocNonPaged(size) + +#elif defined(USE_CUSTOM_MALLOC) /* OpenGL */ +#define FINN_MALLOC(size) rmapi_import_malloc(size) + +#else /* Default */ +#define FINN_MALLOC(size) malloc(size) +#endif + +// Free allocated memory. +#if defined(FINN_FREE) /* Use override from Makefile */ +#elif defined(NVRM) /* Kernel Mode */ +#define FINN_FREE(buf) portMemFree(buf) + +#elif defined(NV_LIBOS) /* LIBOS */ +#define FINN_FREE(buf) portMemFree(buf) + +#elif defined(USE_CUSTOM_MALLOC) /* OpenGL */ +#define FINN_FREE(buf) rmapi_import_free(buf) + +#else /* Default */ +#define FINN_FREE(buf) free(buf) +#endif + +// Set memory region to all zeroes. +#if defined(FINN_MEMZERO) /* Use override from Makefile */ +#elif defined(NVRM) /* Kernel Mode */ +#define FINN_MEMZERO(buf, size) portMemSet(buf, 0, size) + +#elif defined(NV_LIBOS) /* LIBOS */ +#define FINN_MEMZERO(buf, size) portMemSet(buf, 0, size) + +#elif defined(USE_CUSTOM_MALLOC) /* OpenGL */ +#define FINN_MEMZERO(buf, size) memset(buf, 0, size) + +#else /* Default */ +#define FINN_MEMZERO(buf, size) memset(buf, 0, size) +#endif + +// Copy nonoverlapping memory region. +#if defined(FINN_MEMCPY) /* Use override from Makefile */ +#elif defined(NVRM) /* Kernel Mode */ +#define FINN_MEMCPY(dst, src, size) portMemCopy(dst, size, src, size) + +#elif defined(NV_LIBOS) /* LIBOS */ +#define FINN_MEMCPY(dst, src, size) portMemCopy(dst, size, src, size) + +#elif defined(USE_CUSTOM_MALLOC) /* OpenGL */ +#define FINN_MEMCPY(dst, src, size) memcpy(dst, src, size) + +#else /* Default */ +#define FINN_MEMCPY(dst, src, size) memcpy(dst, src, size) +#endif + +// Report an error. +#if defined(FINN_ERROR) /* Use override from Makefile */ +#else /* Default */ +#define FINN_ERROR(err) /* No-op */ +#endif + + +// +// The purpose of the bit pump is to ensure 64-bit aligned access to the +// buffer while enabling arbitrary bits to be read/written. +// + +typedef struct finn_bit_pump_for_read finn_bit_pump_for_read; +struct finn_bit_pump_for_read +{ + uint64_t accumulator; // Bits not yet read from the data buffer + const uint64_t *buffer_position; // Next word within data buffer to be read + const uint64_t *end_of_data; // End of data within buffer + uint8_t remaining_bit_count; // Number of bits remaining in the accumulator +}; + + +// +// Initialize bit pump for reading from the buffer. +// +// WARNING: The buffer start is assumed to be 64-bit aligned for optimal performance. +// `sod` (start of data) and `eod` (end of data) must be multiples of 64 bits +// since this logic is optimized for a 64-bit word size. Caller must check both +// `sod` and `eod`. +// +// `eod` points to the 64-bit word after the data (like most C++ `std` iterators). +// +static inline void finn_open_buffer_for_read(finn_bit_pump_for_read *bp, const uint64_t *sod, const uint64_t *eod) +{ + bp->accumulator = 0U; + bp->buffer_position = sod; + bp->end_of_data = eod; + bp->remaining_bit_count = 0U; +} + + +// +// Read the next several bits. +// +// `bit_size` must be in range of 0 to 64 inclusive; no check is made. +// When `bit_size` is zero, an unsupported use-case, it works as expected by +// returning zero without advancing the pointer. +// +static uint64_t finn_read_buffer(finn_bit_pump_for_read *bp, uint8_t bit_size) +{ + // Value to be deserialized and returned + uint64_t value; + + // Boundary crossing + // Accumulator does not have enough to satisfy the request. + if (bit_size > bp->remaining_bit_count) + { + // Number of bits not yet satisfied + bit_size -= bp->remaining_bit_count; + + // Shift the bits we have into place. + value = bp->accumulator; + + // Return zeroes for unsatisfied bits (if any) at end of data. + if (bp->buffer_position >= bp->end_of_data) + bp->accumulator = 0U; + + // Read the next word from the buffer. + else + bp->accumulator = *(bp->buffer_position++); + + // + // This is the special case where we are reading an entire 64-bit word + // without crossing a boundary (when the accumulator is empty). The + // accumulator remains empty on exit. + // + // The bitwise operations in the normal flow do not work in this case. + // Shifts are not well-defined in C when the right operand exceeds the + // size of the left operand. Also, the right operand of the bitwise-and + // would exceed the 64-bit capacity. However, the needed logic is simple. + // + // 64 is the largest legal value for `bit_size`, so `>=` is equivalent to `==`. + // Furthermore, if `bit_size == 64`, then `bp->remaining_bit_count` must + // have been zero when subtracted above. That's how we know that there is + // no boundary crossing. + // + if (bit_size >= 64) + { + // The value is the entire word. + value = bp->accumulator; + + // Discard the consumed data from the accumulator. + bp->accumulator = 0U; + + // Under the assumption that `bit_size` is never larger than 64, + // `bit_size == 64` implies `bp->remaining_bit_count == 0` because + // of the above `bit_size -= bp->remaining_bit_count`. As such, there + // is no need to do `bp->remaining_bit_count = 64U - bit_size`. + + // Done + return value; + } + + // OR in the bits since this was a boundary crossing. + // Shift it over by the number of bits we get from the prior word. + value |= (bp->accumulator + & (((uint64_t) 1U << bit_size) - 1U)) + << bp->remaining_bit_count; + + // Logic below subtracts off the bits consumed in the accumulator. + bp->remaining_bit_count = 64U; + } + + else + { + // The accumulator has enough to satisfy the request. + value = bp->accumulator & (((uint64_t) 1U << bit_size) - 1U); + } + + // Discard the consumed bits from the accumulator. + bp->accumulator >>= bit_size; + + // Keep track of the remaining available bits in the accumulator. + bp->remaining_bit_count -= bit_size; + + // Done + return value; +} + + + +typedef struct finn_bit_pump_for_write finn_bit_pump_for_write; +struct finn_bit_pump_for_write +{ + uint64_t accumulator; // Bits not yet written to the data buffer + uint64_t checksum; // Checksum of data + uint64_t *buffer_position; // Next word within the data buffer to be written + const uint64_t *end_of_buffer; // End of buffer (which may be after end of data) + uint8_t empty_bit_count; // Number of available bits in the accumulator +}; + + +// +// Initialize bit pump for writing to the buffer. +// +// In the general case for writing to the bit pump: +// +// WARNING: The buffer start is assumed to be 64-bit aligned for optimal performance. +// `sod` (start of data) and `eob` (end of buffer) must be multiples of 64 bits +// since this logic is optimized for a 64-bit word size. Caller must check both +// `sod` and `eod`. +// +// `eob` points to the 64-bit word after the buffer, an illegal access. +// +// +// Special case to get the serialized size without writing to the buffer: +// Both `sod` and `eob` are null. +// When closed, `bp->buffer_position` contains the byte count. +// +static inline void finn_open_buffer_for_write(finn_bit_pump_for_write *bp, uint64_t *sod, const uint64_t *eob) +{ + bp->accumulator = 0U; + bp->buffer_position = sod; + bp->end_of_buffer = eob; + bp->checksum = 0U; + bp->empty_bit_count = 64U; +} + +// +// Write several bits to the buffer. +// +// `bit_size` must be in range of 1 to 64 inclusive; no check is made. +// `value` must not have more 1 bits than specified by `bit_size`. +// In other words, bits that are left of `bit_size` must be 0s; no check is made. +// +// Return value is nonzero if the end of buffer is reached, an error. +// +// `bp->end_of_buffer` is null to disable writing to the buffer. +// +static int finn_write_buffer(finn_bit_pump_for_write *bp, uint64_t value, uint8_t bit_size) +{ + // Boundary crossing: Accumulator does not have enough to satisfy the request, + if (bit_size >= bp->empty_bit_count) + { + // Number of bits not yet satisfied + bit_size -= bp->empty_bit_count; + + // OR as many bits as will fit into the accumulator. + bp->accumulator |= value << (64U - bp->empty_bit_count); + + // Discard these bits by setting them to 0s. + // CAUTION: `value` may be unchanged when `bp->empty_bit_count` is 64 + // depending on the processor/ISA. + value >>= bp->empty_bit_count; + + // Write the word to the buffer unless writes are disabled. + if (bp->end_of_buffer) + { + *bp->buffer_position = bp->accumulator; + } + + // Advance to the next word in the buffer. + bp->buffer_position++; + + // Update the checksum. + bp->checksum = ((bp->checksum << 1) ^ (bp->checksum & 1U)) ^ bp->accumulator; + + // Re-initialize the accumulator and the bits filled. + bp->accumulator = 0U; + bp->empty_bit_count = 64U; + } + + // OR the data into the accumulator. + // When `bit_size` and `bp->empty_bit_count` are both 64 above, `bit_size` + // is assigned zero, but `value` may be unchanged. Check `bit_size` here so + // that stale `value` is not ORed into the accumulator again. + if (bit_size) + { + bp->accumulator |= (value << (64U - bp->empty_bit_count)); + } + + // Advance the bit count + bp->empty_bit_count -= bit_size; + + // Return nonzero on buffer overrun. + return bp->end_of_buffer && bp->buffer_position >= bp->end_of_buffer && bit_size; +} + + +// +// Close the write buffer and compute the checksum. +// +// Do NOT call this function if `finn_write_buffer` returned nonzero; no check is made. +// +// In the general case for writing to the bit pump: +// +// Postcondition: `bp->buffer_position` points to the word after the end of the data, +// which can be used to calculate the data size in 64-bit words by subtracting from +// `bp->end_of_buffer`. Buffer data at and after this point is set to zeroes. +// +// Special case to get the serialized size without writing to the buffer: +// Postcondition: ``bp->buffer_position` contains the byte count. +// +// All cases: +// Postcondition: `bp->checksum` contains the checksum of words written to the buffer. +// +static inline void finn_close_buffer_for_write(finn_bit_pump_for_write *bp) +{ + uint64_t *p; + + // The accumulator is not empty. + if (bp->empty_bit_count < 64U) + { + // Update the buffer with the last word. + if (bp->end_of_buffer) + { + *bp->buffer_position = bp->accumulator; + } + + // Advance to the next word to get an accurate word count. + bp->buffer_position++; + + // Update the checksum. + bp->checksum = ((bp->checksum << 1U) ^ (bp->checksum & 1U)) ^ bp->accumulator; + } + + // Zero out the rest of the buffer. + for (p = bp->buffer_position; p < bp->end_of_buffer; ++p) + { + *p = 0u; + } +} + + + +static NV_STATUS finnSerializeRoot_FINN_RM_API(NvU64 interface, NvU64 message, const char *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeRoot_FINN_RM_API(NvU64 interface, NvU64 message, finn_bit_pump_for_read *bp, char *api, NvLength dst_size, NvBool deser_up); +#if (defined(NVRM)) +NvBool finnBadEnum_NV402C_CTRL_I2C_TRANSACTION_TYPE(NV402C_CTRL_I2C_TRANSACTION_TYPE value); +#endif // (defined(NVRM)) + +static NV_STATUS finnSerializeInterface_FINN_GT200_DEBUGGER_DEBUG(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeInterface_FINN_GT200_DEBUGGER_DEBUG(NvU64 message, finn_bit_pump_for_read *bp, FINN_GT200_DEBUGGER_DEBUG *api_intf, NvLength api_size, NvBool deser_up); +static NvU64 finnUnserializedInterfaceSize_FINN_GT200_DEBUGGER_DEBUG(NvU64 message); +static NV_STATUS finnSerializeInterface_FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeInterface_FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO(NvU64 message, finn_bit_pump_for_read *bp, FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO *api_intf, NvLength api_size, NvBool deser_up); +static NvU64 finnUnserializedInterfaceSize_FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO(NvU64 message); +static NV_STATUS finnSerializeInterface_FINN_NV01_DEVICE_0_DMA(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeInterface_FINN_NV01_DEVICE_0_DMA(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV01_DEVICE_0_DMA *api_intf, NvLength api_size, NvBool deser_up); +static NvU64 finnUnserializedInterfaceSize_FINN_NV01_DEVICE_0_DMA(NvU64 message); +static NV_STATUS finnSerializeInterface_FINN_NV01_DEVICE_0_FB(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeInterface_FINN_NV01_DEVICE_0_FB(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV01_DEVICE_0_FB *api_intf, NvLength api_size, NvBool deser_up); +static NvU64 finnUnserializedInterfaceSize_FINN_NV01_DEVICE_0_FB(NvU64 message); +static NV_STATUS finnSerializeInterface_FINN_NV01_DEVICE_0_FIFO(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeInterface_FINN_NV01_DEVICE_0_FIFO(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV01_DEVICE_0_FIFO *api_intf, NvLength api_size, NvBool deser_up); +static NvU64 finnUnserializedInterfaceSize_FINN_NV01_DEVICE_0_FIFO(NvU64 message); +static NV_STATUS finnSerializeInterface_FINN_NV01_DEVICE_0_GPU(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeInterface_FINN_NV01_DEVICE_0_GPU(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV01_DEVICE_0_GPU *api_intf, NvLength api_size, NvBool deser_up); +static NvU64 finnUnserializedInterfaceSize_FINN_NV01_DEVICE_0_GPU(NvU64 message); +static NV_STATUS finnSerializeInterface_FINN_NV01_DEVICE_0_GR(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeInterface_FINN_NV01_DEVICE_0_GR(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV01_DEVICE_0_GR *api_intf, NvLength api_size, NvBool deser_up); +static NvU64 finnUnserializedInterfaceSize_FINN_NV01_DEVICE_0_GR(NvU64 message); +static NV_STATUS finnSerializeInterface_FINN_NV01_DEVICE_0_HOST(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeInterface_FINN_NV01_DEVICE_0_HOST(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV01_DEVICE_0_HOST *api_intf, NvLength api_size, NvBool deser_up); +static NvU64 finnUnserializedInterfaceSize_FINN_NV01_DEVICE_0_HOST(NvU64 message); +static NV_STATUS finnSerializeInterface_FINN_NV01_DEVICE_0_MSENC(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeInterface_FINN_NV01_DEVICE_0_MSENC(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV01_DEVICE_0_MSENC *api_intf, NvLength api_size, NvBool deser_up); +static NvU64 finnUnserializedInterfaceSize_FINN_NV01_DEVICE_0_MSENC(NvU64 message); +static NV_STATUS finnSerializeInterface_FINN_NV01_ROOT_NVD(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeInterface_FINN_NV01_ROOT_NVD(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV01_ROOT_NVD *api_intf, NvLength api_size, NvBool deser_up); +static NvU64 finnUnserializedInterfaceSize_FINN_NV01_ROOT_NVD(NvU64 message); +static NV_STATUS finnSerializeInterface_FINN_NV20_SUBDEVICE_0_BIOS(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeInterface_FINN_NV20_SUBDEVICE_0_BIOS(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV20_SUBDEVICE_0_BIOS *api_intf, NvLength api_size, NvBool deser_up); +static NvU64 finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_0_BIOS(NvU64 message); +static NV_STATUS finnSerializeInterface_FINN_NV20_SUBDEVICE_0_CE(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeInterface_FINN_NV20_SUBDEVICE_0_CE(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV20_SUBDEVICE_0_CE *api_intf, NvLength api_size, NvBool deser_up); +static NvU64 finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_0_CE(NvU64 message); +static NV_STATUS finnSerializeInterface_FINN_NV20_SUBDEVICE_0_GPU(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeInterface_FINN_NV20_SUBDEVICE_0_GPU(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV20_SUBDEVICE_0_GPU *api_intf, NvLength api_size, NvBool deser_up); +static NvU64 finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_0_GPU(NvU64 message); +static NV_STATUS finnSerializeInterface_FINN_NV20_SUBDEVICE_0_I2C(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeInterface_FINN_NV20_SUBDEVICE_0_I2C(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV20_SUBDEVICE_0_I2C *api_intf, NvLength api_size, NvBool deser_up); +static NvU64 finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_0_I2C(NvU64 message); +static NV_STATUS finnSerializeInterface_FINN_NV20_SUBDEVICE_0_NVD(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeInterface_FINN_NV20_SUBDEVICE_0_NVD(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV20_SUBDEVICE_0_NVD *api_intf, NvLength api_size, NvBool deser_up); +static NvU64 finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_0_NVD(NvU64 message); +static NV_STATUS finnSerializeInterface_FINN_NV20_SUBDEVICE_0_PERF(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeInterface_FINN_NV20_SUBDEVICE_0_PERF(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV20_SUBDEVICE_0_PERF *api_intf, NvLength api_size, NvBool deser_up); +static NvU64 finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_0_PERF(NvU64 message); +static NV_STATUS finnSerializeInterface_FINN_NV20_SUBDEVICE_0_RC(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeInterface_FINN_NV20_SUBDEVICE_0_RC(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV20_SUBDEVICE_0_RC *api_intf, NvLength api_size, NvBool deser_up); +static NvU64 finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_0_RC(NvU64 message); + +static NV_STATUS finnSerializeInterface_FINN_NV20_SUBDEVICE_DIAG_GPU(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeInterface_FINN_NV20_SUBDEVICE_DIAG_GPU(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV20_SUBDEVICE_DIAG_GPU *api_intf, NvLength api_size, NvBool deser_up); +static NvU64 finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_DIAG_GPU(NvU64 message); +static NV_STATUS finnSerializeInterface_FINN_NV40_I2C_I2C(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeInterface_FINN_NV40_I2C_I2C(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV40_I2C_I2C *api_intf, NvLength api_size, NvBool deser_up); +static NvU64 finnUnserializedInterfaceSize_FINN_NV40_I2C_I2C(NvU64 message); +#if (defined(NVRM)) +static NV_STATUS finnSerializeMessage_NV0000_CTRL_NVD_GET_DUMP_PARAMS(const NV0000_CTRL_NVD_GET_DUMP_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NV0000_CTRL_NVD_GET_DUMP_PARAMS(finn_bit_pump_for_read *bp, NV0000_CTRL_NVD_GET_DUMP_PARAMS *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeMessage_NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS(const NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS(finn_bit_pump_for_read *bp, NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeMessage_NV0080_CTRL_FB_GET_CAPS_PARAMS(const NV0080_CTRL_FB_GET_CAPS_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NV0080_CTRL_FB_GET_CAPS_PARAMS(finn_bit_pump_for_read *bp, NV0080_CTRL_FB_GET_CAPS_PARAMS *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeMessage_NV0080_CTRL_FIFO_GET_CAPS_PARAMS(const NV0080_CTRL_FIFO_GET_CAPS_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NV0080_CTRL_FIFO_GET_CAPS_PARAMS(finn_bit_pump_for_read *bp, NV0080_CTRL_FIFO_GET_CAPS_PARAMS *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeMessage_NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS(const NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS(finn_bit_pump_for_read *bp, NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeMessage_NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS(const NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS(finn_bit_pump_for_read *bp, NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeMessage_NV0080_CTRL_GR_GET_CAPS_PARAMS(const NV0080_CTRL_GR_GET_CAPS_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NV0080_CTRL_GR_GET_CAPS_PARAMS(finn_bit_pump_for_read *bp, NV0080_CTRL_GR_GET_CAPS_PARAMS *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeMessage_NV0080_CTRL_HOST_GET_CAPS_PARAMS(const NV0080_CTRL_HOST_GET_CAPS_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NV0080_CTRL_HOST_GET_CAPS_PARAMS(finn_bit_pump_for_read *bp, NV0080_CTRL_HOST_GET_CAPS_PARAMS *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeMessage_NV0080_CTRL_MSENC_GET_CAPS_PARAMS(const NV0080_CTRL_MSENC_GET_CAPS_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NV0080_CTRL_MSENC_GET_CAPS_PARAMS(finn_bit_pump_for_read *bp, NV0080_CTRL_MSENC_GET_CAPS_PARAMS *api, NvLength api_size, NvBool deser_up); +#endif // (defined(NVRM)) + +#if (defined(NVRM)) +static NV_STATUS finnSerializeMessage_NV2080_CTRL_BIOS_GET_NBSI_OBJ_PARAMS(const NV2080_CTRL_BIOS_GET_NBSI_OBJ_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NV2080_CTRL_BIOS_GET_NBSI_OBJ_PARAMS(finn_bit_pump_for_read *bp, NV2080_CTRL_BIOS_GET_NBSI_OBJ_PARAMS *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeMessage_NV2080_CTRL_CE_GET_CAPS_PARAMS(const NV2080_CTRL_CE_GET_CAPS_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NV2080_CTRL_CE_GET_CAPS_PARAMS(finn_bit_pump_for_read *bp, NV2080_CTRL_CE_GET_CAPS_PARAMS *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeMessage_NV2080_CTRL_GPU_GET_ENGINES_PARAMS(const NV2080_CTRL_GPU_GET_ENGINES_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NV2080_CTRL_GPU_GET_ENGINES_PARAMS(finn_bit_pump_for_read *bp, NV2080_CTRL_GPU_GET_ENGINES_PARAMS *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeMessage_NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS(const NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS(finn_bit_pump_for_read *bp, NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeMessage_NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS(const NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS(finn_bit_pump_for_read *bp, NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeMessage_NV2080_CTRL_I2C_ACCESS_PARAMS(const NV2080_CTRL_I2C_ACCESS_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NV2080_CTRL_I2C_ACCESS_PARAMS(finn_bit_pump_for_read *bp, NV2080_CTRL_I2C_ACCESS_PARAMS *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeMessage_NV2080_CTRL_NVD_GET_DUMP_PARAMS(const NV2080_CTRL_NVD_GET_DUMP_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NV2080_CTRL_NVD_GET_DUMP_PARAMS(finn_bit_pump_for_read *bp, NV2080_CTRL_NVD_GET_DUMP_PARAMS *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeMessage_NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS(const NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS(finn_bit_pump_for_read *bp, NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS *api, NvLength api_size, NvBool deser_up); +#endif // (defined(NVRM)) + +#if (defined(NVRM)) +static NV_STATUS finnSerializeMessage_NV402C_CTRL_I2C_INDEXED_PARAMS(const NV402C_CTRL_I2C_INDEXED_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NV402C_CTRL_I2C_INDEXED_PARAMS(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_INDEXED_PARAMS *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeMessage_NV402C_CTRL_I2C_TRANSACTION_PARAMS(const NV402C_CTRL_I2C_TRANSACTION_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NV402C_CTRL_I2C_TRANSACTION_PARAMS(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_PARAMS *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeMessage_NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS(const NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS(finn_bit_pump_for_read *bp, NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeMessage_NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS(const NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS(finn_bit_pump_for_read *bp, NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeMessage_NVB06F_CTRL_CMD_RESTORE_ENGINE_CTX_DATA_FINN_PARAMS(const NVB06F_CTRL_CMD_RESTORE_ENGINE_CTX_DATA_FINN_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NVB06F_CTRL_CMD_RESTORE_ENGINE_CTX_DATA_FINN_PARAMS(finn_bit_pump_for_read *bp, NVB06F_CTRL_CMD_RESTORE_ENGINE_CTX_DATA_FINN_PARAMS *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeMessage_NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS(const NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS(finn_bit_pump_for_read *bp, NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeMessage_NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS(const NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeMessage_NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS(finn_bit_pump_for_read *bp, NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeRecord_NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS(const NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeRecord_NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS(finn_bit_pump_for_read *bp, NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeRecord_NV2080_CTRL_GPUMON_SAMPLES(const NV2080_CTRL_GPUMON_SAMPLES *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeRecord_NV2080_CTRL_GPUMON_SAMPLES(finn_bit_pump_for_read *bp, NV2080_CTRL_GPUMON_SAMPLES *api, NvLength api_size, NvBool deser_up); +#endif // (defined(NVRM)) + +#if (defined(NVRM)) +static NV_STATUS finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW(const NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW(const NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW(const NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC(const NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW *api, NvLength api_size, NvBool deser_up); +static NV_STATUS finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW *api, finn_bit_pump_for_write *bp, NvBool seri_up); +static NV_STATUS finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW *api, NvLength api_size, NvBool deser_up); +#endif // (defined(NVRM)) + +#if (defined(NVRM)) +static NV_STATUS finnSerializeUnion_NV402C_CTRL_I2C_TRANSACTION_DATA(const NV402C_CTRL_I2C_TRANSACTION_DATA *api, finn_bit_pump_for_write *bp, NvBool seri_up, NV402C_CTRL_I2C_TRANSACTION_TYPE transType); +static NV_STATUS finnDeserializeUnion_NV402C_CTRL_I2C_TRANSACTION_DATA(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA *api, NvLength dst_size, NvBool deser_up, NV402C_CTRL_I2C_TRANSACTION_TYPE transType); + +#endif // (defined(NVRM)) + +// Serialize this API. +NV_STATUS finnSerializeInternal_FINN_RM_API(NvU64 interface, NvU64 message, const char *api, char *dst, NvLength dst_size, NvBool seri_up) +{ + // Header + FINN_RM_API *header; + + // Buffer end + // `char` is the C-standrd unit of measure for `sizeof`. + const char *dst_end; + + // Bit pump is used to fill the buffer with serialized data. + finn_bit_pump_for_write bp; + + // Error code returned from serialization + NV_STATUS error_code; + + // Input validation + // Null pointers are not permitted. + // Buffer must begin on an 8-byte boundary. + if (!api || !dst || !dst_size || (uintptr_t) dst & 0x7u) + { + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + return NV_ERR_INVALID_ARGUMENT; + } + + // Header is at the start of the buffer. + header = (FINN_RM_API *) dst; + + // Buffer must end on an 8-byte boundary, so round down. + dst_end = (const char *) ((uintptr_t) (dst + dst_size) & ~ (uintptr_t) 0x7); + + // Set header data. + header->version = FINN_SERIALIZATION_VERSION; + header->payloadSize = 0; // Zero until completed successfully + header->interface = interface; + header->message = message; + + // Advance past header. + dst += sizeof(FINN_RM_API); + + // Open the bit pump. + finn_open_buffer_for_write(&bp, (uint64_t *) dst, (const uint64_t *) dst_end); + + // Call the serializer. + error_code = finnSerializeRoot_FINN_RM_API(interface, message, api, &bp, seri_up); + + // Close the bit pump. + finn_close_buffer_for_write(&bp); + + // Payload size in bytes + if (error_code == NV_OK) + header->payloadSize = (NvU64) (((const char *) bp.buffer_position) - ((const char *) header)); + + // Done + return error_code; +} + +// Serialize this API to the bit pump. +static NV_STATUS finnSerializeRoot_FINN_RM_API(NvU64 interface, NvU64 message, const char *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize the specified interface. + switch (interface) + { + case FINN_INTERFACE_ID(FINN_NV01_ROOT_NVD): + return finnSerializeInterface_FINN_NV01_ROOT_NVD(message, api, bp, seri_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_DMA): + return finnSerializeInterface_FINN_NV01_DEVICE_0_DMA(message, api, bp, seri_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_FB): + return finnSerializeInterface_FINN_NV01_DEVICE_0_FB(message, api, bp, seri_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_FIFO): + return finnSerializeInterface_FINN_NV01_DEVICE_0_FIFO(message, api, bp, seri_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_GPU): + return finnSerializeInterface_FINN_NV01_DEVICE_0_GPU(message, api, bp, seri_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_GR): + return finnSerializeInterface_FINN_NV01_DEVICE_0_GR(message, api, bp, seri_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_HOST): + return finnSerializeInterface_FINN_NV01_DEVICE_0_HOST(message, api, bp, seri_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_MSENC): + return finnSerializeInterface_FINN_NV01_DEVICE_0_MSENC(message, api, bp, seri_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_BIOS): + return finnSerializeInterface_FINN_NV20_SUBDEVICE_0_BIOS(message, api, bp, seri_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_CE): + return finnSerializeInterface_FINN_NV20_SUBDEVICE_0_CE(message, api, bp, seri_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_GPU): + return finnSerializeInterface_FINN_NV20_SUBDEVICE_0_GPU(message, api, bp, seri_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_I2C): + return finnSerializeInterface_FINN_NV20_SUBDEVICE_0_I2C(message, api, bp, seri_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_NVD): + return finnSerializeInterface_FINN_NV20_SUBDEVICE_0_NVD(message, api, bp, seri_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_PERF): + return finnSerializeInterface_FINN_NV20_SUBDEVICE_0_PERF(message, api, bp, seri_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_RC): + return finnSerializeInterface_FINN_NV20_SUBDEVICE_0_RC(message, api, bp, seri_up); + + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_DIAG_GPU): + return finnSerializeInterface_FINN_NV20_SUBDEVICE_DIAG_GPU(message, api, bp, seri_up); + case FINN_INTERFACE_ID(FINN_NV40_I2C_I2C): + return finnSerializeInterface_FINN_NV40_I2C_I2C(message, api, bp, seri_up); + case FINN_INTERFACE_ID(FINN_GT200_DEBUGGER_DEBUG): + return finnSerializeInterface_FINN_GT200_DEBUGGER_DEBUG(message, api, bp, seri_up); + case FINN_INTERFACE_ID(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO): + return finnSerializeInterface_FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO(message, api, bp, seri_up); + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} // end finnSerializeRoot_FINN_RM_API + + +// Deserialize this API. +NV_STATUS finnDeserializeInternal_FINN_RM_API(const char *src, NvLength src_size, char *api, NvLength api_size, NvBool deser_up) +{ + // Header + const FINN_RM_API *header; + + // End of data + const char *src_max; + + // Bit pump is used to read the serialized data. + finn_bit_pump_for_read bp; + + // Error code + NV_STATUS status; + + // Input validation + // Null pointers are not permitted. + // Buffer must begin on an 8-byte boundary. + if (!src || !src_size || !api || !api_size || (uintptr_t) src & 0x7u) + { + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + return NV_ERR_INVALID_ARGUMENT; + } + + // Header data comes first. + header = (const FINN_RM_API *) src; + + // Check the version. + if (header->version != FINN_SERIALIZATION_VERSION) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Set src_max for buffer bounds checking. + src_max = src + src_size; + + // Check that source buffer is large enough. + if (sizeof(FINN_RM_API) > src_size || + header->payloadSize > src_size || + header->payloadSize < sizeof(FINN_RM_API)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Open the bit pump, skipping past the header. + finn_open_buffer_for_read(&bp, (const uint64_t *) (src + sizeof(FINN_RM_API)), (const uint64_t *) (src_max)); + + // Dispatch to interface-specific routine + status = finnDeserializeRoot_FINN_RM_API(header->interface, header->message, &bp, api, api_size, deser_up); + // Nothing more to do if there was an error. + if (status != NV_OK) + return status; + + // Check that the declared size matches the serialization outcome. + if (header->payloadSize != (NvU64) (((const char *) bp.buffer_position) - ((const char *) header))) + { + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + return NV_ERR_INVALID_ARGUMENT; + } + + // All good + return NV_OK; +} + + +// Deserialize this API from the bit pump. +static NV_STATUS finnDeserializeRoot_FINN_RM_API(NvU64 interface, NvU64 message, finn_bit_pump_for_read *bp, char *api, NvLength api_size, NvBool deser_up) +{ + // Deserialize the specified interface. + switch (interface) + { + case FINN_INTERFACE_ID(FINN_NV01_ROOT_NVD): + return finnDeserializeInterface_FINN_NV01_ROOT_NVD(message, bp, (FINN_NV01_ROOT_NVD *) api, api_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_DMA): + return finnDeserializeInterface_FINN_NV01_DEVICE_0_DMA(message, bp, (FINN_NV01_DEVICE_0_DMA *) api, api_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_FB): + return finnDeserializeInterface_FINN_NV01_DEVICE_0_FB(message, bp, (FINN_NV01_DEVICE_0_FB *) api, api_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_FIFO): + return finnDeserializeInterface_FINN_NV01_DEVICE_0_FIFO(message, bp, (FINN_NV01_DEVICE_0_FIFO *) api, api_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_GPU): + return finnDeserializeInterface_FINN_NV01_DEVICE_0_GPU(message, bp, (FINN_NV01_DEVICE_0_GPU *) api, api_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_GR): + return finnDeserializeInterface_FINN_NV01_DEVICE_0_GR(message, bp, (FINN_NV01_DEVICE_0_GR *) api, api_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_HOST): + return finnDeserializeInterface_FINN_NV01_DEVICE_0_HOST(message, bp, (FINN_NV01_DEVICE_0_HOST *) api, api_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_MSENC): + return finnDeserializeInterface_FINN_NV01_DEVICE_0_MSENC(message, bp, (FINN_NV01_DEVICE_0_MSENC *) api, api_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_BIOS): + return finnDeserializeInterface_FINN_NV20_SUBDEVICE_0_BIOS(message, bp, (FINN_NV20_SUBDEVICE_0_BIOS *) api, api_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_CE): + return finnDeserializeInterface_FINN_NV20_SUBDEVICE_0_CE(message, bp, (FINN_NV20_SUBDEVICE_0_CE *) api, api_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_GPU): + return finnDeserializeInterface_FINN_NV20_SUBDEVICE_0_GPU(message, bp, (FINN_NV20_SUBDEVICE_0_GPU *) api, api_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_I2C): + return finnDeserializeInterface_FINN_NV20_SUBDEVICE_0_I2C(message, bp, (FINN_NV20_SUBDEVICE_0_I2C *) api, api_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_NVD): + return finnDeserializeInterface_FINN_NV20_SUBDEVICE_0_NVD(message, bp, (FINN_NV20_SUBDEVICE_0_NVD *) api, api_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_PERF): + return finnDeserializeInterface_FINN_NV20_SUBDEVICE_0_PERF(message, bp, (FINN_NV20_SUBDEVICE_0_PERF *) api, api_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_RC): + return finnDeserializeInterface_FINN_NV20_SUBDEVICE_0_RC(message, bp, (FINN_NV20_SUBDEVICE_0_RC *) api, api_size, deser_up); + + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_DIAG_GPU): + return finnDeserializeInterface_FINN_NV20_SUBDEVICE_DIAG_GPU(message, bp, (FINN_NV20_SUBDEVICE_DIAG_GPU *) api, api_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV40_I2C_I2C): + return finnDeserializeInterface_FINN_NV40_I2C_I2C(message, bp, (FINN_NV40_I2C_I2C *) api, api_size, deser_up); + case FINN_INTERFACE_ID(FINN_GT200_DEBUGGER_DEBUG): + return finnDeserializeInterface_FINN_GT200_DEBUGGER_DEBUG(message, bp, (FINN_GT200_DEBUGGER_DEBUG *) api, api_size, deser_up); + case FINN_INTERFACE_ID(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO): + return finnDeserializeInterface_FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO(message, bp, (FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO *) api, api_size, deser_up); + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} // end finnDeserializeRoot_FINN_RM_API + + +// Size of the serialized format for this interface/message +NvU64 FinnRmApiGetSerializedSize(NvU64 interface, NvU64 message, const NvP64 src) +{ + // Bit pump with writing disabled. + finn_bit_pump_for_write bp; + finn_open_buffer_for_write(&bp, (uint64_t *) 0, (const uint64_t *) 0); + + // Call the serializer with write-suppressed bit pump. + // The size is the same in bith directions (up/down). + // Eeturn zero on error to indicate that this API is not serialized by FINN. + if (finnSerializeRoot_FINN_RM_API(interface, message, (const char *) NvP64_VALUE(src), &bp, 0) != NV_OK) + return 0; + + // Close the bit pump. + finn_close_buffer_for_write(&bp); + + // Add the header size in bytes to the amount of data serialzied. + // `buffer_position` is the payload size (not really the buffer position). + return (NvU64) NV_PTR_TO_NvP64(bp.buffer_position) + sizeof(FINN_RM_API); +} + + +// Size of the unserialized format for this interface/message +NvU64 FinnRmApiGetUnserializedSize(NvU64 interface, NvU64 message) +{ + // Forward to message-specific routine. + switch (interface) + { + case FINN_INTERFACE_ID(FINN_NV01_ROOT_NVD): + return finnUnserializedInterfaceSize_FINN_NV01_ROOT_NVD(message); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_DMA): + return finnUnserializedInterfaceSize_FINN_NV01_DEVICE_0_DMA(message); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_FB): + return finnUnserializedInterfaceSize_FINN_NV01_DEVICE_0_FB(message); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_FIFO): + return finnUnserializedInterfaceSize_FINN_NV01_DEVICE_0_FIFO(message); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_GPU): + return finnUnserializedInterfaceSize_FINN_NV01_DEVICE_0_GPU(message); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_GR): + return finnUnserializedInterfaceSize_FINN_NV01_DEVICE_0_GR(message); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_HOST): + return finnUnserializedInterfaceSize_FINN_NV01_DEVICE_0_HOST(message); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_MSENC): + return finnUnserializedInterfaceSize_FINN_NV01_DEVICE_0_MSENC(message); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_BIOS): + return finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_0_BIOS(message); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_CE): + return finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_0_CE(message); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_GPU): + return finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_0_GPU(message); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_I2C): + return finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_0_I2C(message); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_NVD): + return finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_0_NVD(message); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_PERF): + return finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_0_PERF(message); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_RC): + return finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_0_RC(message); + + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_DIAG_GPU): + return finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_DIAG_GPU(message); + case FINN_INTERFACE_ID(FINN_NV40_I2C_I2C): + return finnUnserializedInterfaceSize_FINN_NV40_I2C_I2C(message); + case FINN_INTERFACE_ID(FINN_GT200_DEBUGGER_DEBUG): + return finnUnserializedInterfaceSize_FINN_GT200_DEBUGGER_DEBUG(message); + case FINN_INTERFACE_ID(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO): + return finnUnserializedInterfaceSize_FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO(message); + + // Zero indicates an unsupported interface (or message). + default: + return 0; + } +} // end FINN_RM_APIGetUnserializedSize +#if (defined(NVRM)) +// Validate the enum value. +NvBool finnBadEnum_NV402C_CTRL_I2C_TRANSACTION_TYPE(NV402C_CTRL_I2C_TRANSACTION_TYPE value) +{ switch(value) + { + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW: + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BYTE_RW: + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BLOCK_RW: + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW: + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BYTE_RW: + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_WORD_RW: + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW: + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_PROCESS_CALL: + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_PROCESS_CALL: + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW: + case NV402C_CTRL_I2C_TRANSACTION_TYPE_READ_EDID_DDC: + return NV_FALSE; + + default: + return NV_TRUE; + } +} + +#endif // (defined(NVRM)) + +// Serialize this interface. +static NV_STATUS finnSerializeInterface_FINN_GT200_DEBUGGER_DEBUG(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize one of 2 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS): + return finnSerializeMessage_NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS((const NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS *) api_intf, bp, seri_up); + case FINN_MESSAGE_ID(NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS): + return finnSerializeMessage_NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS((const NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS *) api_intf, bp, seri_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Deserialize this interface. +static NV_STATUS finnDeserializeInterface_FINN_GT200_DEBUGGER_DEBUG(NvU64 message, finn_bit_pump_for_read *bp, FINN_GT200_DEBUGGER_DEBUG *api_intf, NvLength api_size, NvBool deser_up) +{ + // Deerialize one of 2 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS): + return finnDeserializeMessage_NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS(bp, (NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS *) api_intf, api_size, deser_up); + case FINN_MESSAGE_ID(NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS): + return finnDeserializeMessage_NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS(bp, (NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS *) api_intf, api_size, deser_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Size of the unserialized format for this interface/message +static NvU64 finnUnserializedInterfaceSize_FINN_GT200_DEBUGGER_DEBUG(NvU64 message) +{ + // Forward to message-specific routine. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS): + return sizeof(NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS); + case FINN_MESSAGE_ID(NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS): + return sizeof(NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS); +#endif // (defined(NVRM)) + + + // Zero indicates an unsupported message (or interface). + default: + return 0; + } +} + +// Serialize this interface. +static NV_STATUS finnSerializeInterface_FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize one of 3 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS): + return finnSerializeMessage_NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS((const NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS *) api_intf, bp, seri_up); + case FINN_MESSAGE_ID(NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS): + return finnSerializeMessage_NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS((const NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS *) api_intf, bp, seri_up); + case FINN_MESSAGE_ID(NVB06F_CTRL_CMD_RESTORE_ENGINE_CTX_DATA_FINN_PARAMS): + return finnSerializeMessage_NVB06F_CTRL_CMD_RESTORE_ENGINE_CTX_DATA_FINN_PARAMS((const NVB06F_CTRL_CMD_RESTORE_ENGINE_CTX_DATA_FINN_PARAMS *) api_intf, bp, seri_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Deserialize this interface. +static NV_STATUS finnDeserializeInterface_FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO(NvU64 message, finn_bit_pump_for_read *bp, FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO *api_intf, NvLength api_size, NvBool deser_up) +{ + // Deerialize one of 3 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS): + return finnDeserializeMessage_NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS(bp, (NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS *) api_intf, api_size, deser_up); + case FINN_MESSAGE_ID(NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS): + return finnDeserializeMessage_NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS(bp, (NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS *) api_intf, api_size, deser_up); + case FINN_MESSAGE_ID(NVB06F_CTRL_CMD_RESTORE_ENGINE_CTX_DATA_FINN_PARAMS): + return finnDeserializeMessage_NVB06F_CTRL_CMD_RESTORE_ENGINE_CTX_DATA_FINN_PARAMS(bp, (NVB06F_CTRL_CMD_RESTORE_ENGINE_CTX_DATA_FINN_PARAMS *) api_intf, api_size, deser_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Size of the unserialized format for this interface/message +static NvU64 finnUnserializedInterfaceSize_FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO(NvU64 message) +{ + // Forward to message-specific routine. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS): + return sizeof(NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS); + case FINN_MESSAGE_ID(NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS): + return sizeof(NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS); + case FINN_MESSAGE_ID(NVB06F_CTRL_CMD_RESTORE_ENGINE_CTX_DATA_FINN_PARAMS): + return sizeof(NVB06F_CTRL_CMD_RESTORE_ENGINE_CTX_DATA_FINN_PARAMS); +#endif // (defined(NVRM)) + + + // Zero indicates an unsupported message (or interface). + default: + return 0; + } +} + +// Serialize this interface. +static NV_STATUS finnSerializeInterface_FINN_NV01_DEVICE_0_DMA(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS): + return finnSerializeMessage_NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS((const NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *) api_intf, bp, seri_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Deserialize this interface. +static NV_STATUS finnDeserializeInterface_FINN_NV01_DEVICE_0_DMA(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV01_DEVICE_0_DMA *api_intf, NvLength api_size, NvBool deser_up) +{ + // Deerialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS): + return finnDeserializeMessage_NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS(bp, (NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *) api_intf, api_size, deser_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Size of the unserialized format for this interface/message +static NvU64 finnUnserializedInterfaceSize_FINN_NV01_DEVICE_0_DMA(NvU64 message) +{ + // Forward to message-specific routine. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS): + return sizeof(NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS); +#endif // (defined(NVRM)) + + + // Zero indicates an unsupported message (or interface). + default: + return 0; + } +} + +// Serialize this interface. +static NV_STATUS finnSerializeInterface_FINN_NV01_DEVICE_0_FB(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0080_CTRL_FB_GET_CAPS_PARAMS): + return finnSerializeMessage_NV0080_CTRL_FB_GET_CAPS_PARAMS((const NV0080_CTRL_FB_GET_CAPS_PARAMS *) api_intf, bp, seri_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Deserialize this interface. +static NV_STATUS finnDeserializeInterface_FINN_NV01_DEVICE_0_FB(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV01_DEVICE_0_FB *api_intf, NvLength api_size, NvBool deser_up) +{ + // Deerialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0080_CTRL_FB_GET_CAPS_PARAMS): + return finnDeserializeMessage_NV0080_CTRL_FB_GET_CAPS_PARAMS(bp, (NV0080_CTRL_FB_GET_CAPS_PARAMS *) api_intf, api_size, deser_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Size of the unserialized format for this interface/message +static NvU64 finnUnserializedInterfaceSize_FINN_NV01_DEVICE_0_FB(NvU64 message) +{ + // Forward to message-specific routine. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0080_CTRL_FB_GET_CAPS_PARAMS): + return sizeof(NV0080_CTRL_FB_GET_CAPS_PARAMS); +#endif // (defined(NVRM)) + + + // Zero indicates an unsupported message (or interface). + default: + return 0; + } +} + +// Serialize this interface. +static NV_STATUS finnSerializeInterface_FINN_NV01_DEVICE_0_FIFO(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize one of 2 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0080_CTRL_FIFO_GET_CAPS_PARAMS): + return finnSerializeMessage_NV0080_CTRL_FIFO_GET_CAPS_PARAMS((const NV0080_CTRL_FIFO_GET_CAPS_PARAMS *) api_intf, bp, seri_up); + case FINN_MESSAGE_ID(NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS): + return finnSerializeMessage_NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS((const NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS *) api_intf, bp, seri_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Deserialize this interface. +static NV_STATUS finnDeserializeInterface_FINN_NV01_DEVICE_0_FIFO(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV01_DEVICE_0_FIFO *api_intf, NvLength api_size, NvBool deser_up) +{ + // Deerialize one of 2 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0080_CTRL_FIFO_GET_CAPS_PARAMS): + return finnDeserializeMessage_NV0080_CTRL_FIFO_GET_CAPS_PARAMS(bp, (NV0080_CTRL_FIFO_GET_CAPS_PARAMS *) api_intf, api_size, deser_up); + case FINN_MESSAGE_ID(NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS): + return finnDeserializeMessage_NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS(bp, (NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS *) api_intf, api_size, deser_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Size of the unserialized format for this interface/message +static NvU64 finnUnserializedInterfaceSize_FINN_NV01_DEVICE_0_FIFO(NvU64 message) +{ + // Forward to message-specific routine. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0080_CTRL_FIFO_GET_CAPS_PARAMS): + return sizeof(NV0080_CTRL_FIFO_GET_CAPS_PARAMS); + case FINN_MESSAGE_ID(NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS): + return sizeof(NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS); +#endif // (defined(NVRM)) + + + // Zero indicates an unsupported message (or interface). + default: + return 0; + } +} + +// Serialize this interface. +static NV_STATUS finnSerializeInterface_FINN_NV01_DEVICE_0_GPU(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS): + return finnSerializeMessage_NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS((const NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *) api_intf, bp, seri_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Deserialize this interface. +static NV_STATUS finnDeserializeInterface_FINN_NV01_DEVICE_0_GPU(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV01_DEVICE_0_GPU *api_intf, NvLength api_size, NvBool deser_up) +{ + // Deerialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS): + return finnDeserializeMessage_NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS(bp, (NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *) api_intf, api_size, deser_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Size of the unserialized format for this interface/message +static NvU64 finnUnserializedInterfaceSize_FINN_NV01_DEVICE_0_GPU(NvU64 message) +{ + // Forward to message-specific routine. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS): + return sizeof(NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS); +#endif // (defined(NVRM)) + + + // Zero indicates an unsupported message (or interface). + default: + return 0; + } +} + +// Serialize this interface. +static NV_STATUS finnSerializeInterface_FINN_NV01_DEVICE_0_GR(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0080_CTRL_GR_GET_CAPS_PARAMS): + return finnSerializeMessage_NV0080_CTRL_GR_GET_CAPS_PARAMS((const NV0080_CTRL_GR_GET_CAPS_PARAMS *) api_intf, bp, seri_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Deserialize this interface. +static NV_STATUS finnDeserializeInterface_FINN_NV01_DEVICE_0_GR(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV01_DEVICE_0_GR *api_intf, NvLength api_size, NvBool deser_up) +{ + // Deerialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0080_CTRL_GR_GET_CAPS_PARAMS): + return finnDeserializeMessage_NV0080_CTRL_GR_GET_CAPS_PARAMS(bp, (NV0080_CTRL_GR_GET_CAPS_PARAMS *) api_intf, api_size, deser_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Size of the unserialized format for this interface/message +static NvU64 finnUnserializedInterfaceSize_FINN_NV01_DEVICE_0_GR(NvU64 message) +{ + // Forward to message-specific routine. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0080_CTRL_GR_GET_CAPS_PARAMS): + return sizeof(NV0080_CTRL_GR_GET_CAPS_PARAMS); +#endif // (defined(NVRM)) + + + // Zero indicates an unsupported message (or interface). + default: + return 0; + } +} + +// Serialize this interface. +static NV_STATUS finnSerializeInterface_FINN_NV01_DEVICE_0_HOST(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0080_CTRL_HOST_GET_CAPS_PARAMS): + return finnSerializeMessage_NV0080_CTRL_HOST_GET_CAPS_PARAMS((const NV0080_CTRL_HOST_GET_CAPS_PARAMS *) api_intf, bp, seri_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Deserialize this interface. +static NV_STATUS finnDeserializeInterface_FINN_NV01_DEVICE_0_HOST(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV01_DEVICE_0_HOST *api_intf, NvLength api_size, NvBool deser_up) +{ + // Deerialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0080_CTRL_HOST_GET_CAPS_PARAMS): + return finnDeserializeMessage_NV0080_CTRL_HOST_GET_CAPS_PARAMS(bp, (NV0080_CTRL_HOST_GET_CAPS_PARAMS *) api_intf, api_size, deser_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Size of the unserialized format for this interface/message +static NvU64 finnUnserializedInterfaceSize_FINN_NV01_DEVICE_0_HOST(NvU64 message) +{ + // Forward to message-specific routine. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0080_CTRL_HOST_GET_CAPS_PARAMS): + return sizeof(NV0080_CTRL_HOST_GET_CAPS_PARAMS); +#endif // (defined(NVRM)) + + + // Zero indicates an unsupported message (or interface). + default: + return 0; + } +} + +// Serialize this interface. +static NV_STATUS finnSerializeInterface_FINN_NV01_DEVICE_0_MSENC(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0080_CTRL_MSENC_GET_CAPS_PARAMS): + return finnSerializeMessage_NV0080_CTRL_MSENC_GET_CAPS_PARAMS((const NV0080_CTRL_MSENC_GET_CAPS_PARAMS *) api_intf, bp, seri_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Deserialize this interface. +static NV_STATUS finnDeserializeInterface_FINN_NV01_DEVICE_0_MSENC(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV01_DEVICE_0_MSENC *api_intf, NvLength api_size, NvBool deser_up) +{ + // Deerialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0080_CTRL_MSENC_GET_CAPS_PARAMS): + return finnDeserializeMessage_NV0080_CTRL_MSENC_GET_CAPS_PARAMS(bp, (NV0080_CTRL_MSENC_GET_CAPS_PARAMS *) api_intf, api_size, deser_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Size of the unserialized format for this interface/message +static NvU64 finnUnserializedInterfaceSize_FINN_NV01_DEVICE_0_MSENC(NvU64 message) +{ + // Forward to message-specific routine. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0080_CTRL_MSENC_GET_CAPS_PARAMS): + return sizeof(NV0080_CTRL_MSENC_GET_CAPS_PARAMS); +#endif // (defined(NVRM)) + + + // Zero indicates an unsupported message (or interface). + default: + return 0; + } +} + +// Serialize this interface. +static NV_STATUS finnSerializeInterface_FINN_NV01_ROOT_NVD(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0000_CTRL_NVD_GET_DUMP_PARAMS): + return finnSerializeMessage_NV0000_CTRL_NVD_GET_DUMP_PARAMS((const NV0000_CTRL_NVD_GET_DUMP_PARAMS *) api_intf, bp, seri_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Deserialize this interface. +static NV_STATUS finnDeserializeInterface_FINN_NV01_ROOT_NVD(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV01_ROOT_NVD *api_intf, NvLength api_size, NvBool deser_up) +{ + // Deerialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0000_CTRL_NVD_GET_DUMP_PARAMS): + return finnDeserializeMessage_NV0000_CTRL_NVD_GET_DUMP_PARAMS(bp, (NV0000_CTRL_NVD_GET_DUMP_PARAMS *) api_intf, api_size, deser_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Size of the unserialized format for this interface/message +static NvU64 finnUnserializedInterfaceSize_FINN_NV01_ROOT_NVD(NvU64 message) +{ + // Forward to message-specific routine. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV0000_CTRL_NVD_GET_DUMP_PARAMS): + return sizeof(NV0000_CTRL_NVD_GET_DUMP_PARAMS); +#endif // (defined(NVRM)) + + + // Zero indicates an unsupported message (or interface). + default: + return 0; + } +} + +// Serialize this interface. +static NV_STATUS finnSerializeInterface_FINN_NV20_SUBDEVICE_0_BIOS(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize one of 2 messages in this interface. + switch (message) + { + +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV2080_CTRL_BIOS_GET_NBSI_OBJ_PARAMS): + return finnSerializeMessage_NV2080_CTRL_BIOS_GET_NBSI_OBJ_PARAMS((const NV2080_CTRL_BIOS_GET_NBSI_OBJ_PARAMS *) api_intf, bp, seri_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Deserialize this interface. +static NV_STATUS finnDeserializeInterface_FINN_NV20_SUBDEVICE_0_BIOS(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV20_SUBDEVICE_0_BIOS *api_intf, NvLength api_size, NvBool deser_up) +{ + // Deerialize one of 2 messages in this interface. + switch (message) + { + +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV2080_CTRL_BIOS_GET_NBSI_OBJ_PARAMS): + return finnDeserializeMessage_NV2080_CTRL_BIOS_GET_NBSI_OBJ_PARAMS(bp, (NV2080_CTRL_BIOS_GET_NBSI_OBJ_PARAMS *) api_intf, api_size, deser_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Size of the unserialized format for this interface/message +static NvU64 finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_0_BIOS(NvU64 message) +{ + // Forward to message-specific routine. + switch (message) + { + +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV2080_CTRL_BIOS_GET_NBSI_OBJ_PARAMS): + return sizeof(NV2080_CTRL_BIOS_GET_NBSI_OBJ_PARAMS); +#endif // (defined(NVRM)) + + + // Zero indicates an unsupported message (or interface). + default: + return 0; + } +} + +// Serialize this interface. +static NV_STATUS finnSerializeInterface_FINN_NV20_SUBDEVICE_0_CE(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV2080_CTRL_CE_GET_CAPS_PARAMS): + return finnSerializeMessage_NV2080_CTRL_CE_GET_CAPS_PARAMS((const NV2080_CTRL_CE_GET_CAPS_PARAMS *) api_intf, bp, seri_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Deserialize this interface. +static NV_STATUS finnDeserializeInterface_FINN_NV20_SUBDEVICE_0_CE(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV20_SUBDEVICE_0_CE *api_intf, NvLength api_size, NvBool deser_up) +{ + // Deerialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV2080_CTRL_CE_GET_CAPS_PARAMS): + return finnDeserializeMessage_NV2080_CTRL_CE_GET_CAPS_PARAMS(bp, (NV2080_CTRL_CE_GET_CAPS_PARAMS *) api_intf, api_size, deser_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Size of the unserialized format for this interface/message +static NvU64 finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_0_CE(NvU64 message) +{ + // Forward to message-specific routine. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV2080_CTRL_CE_GET_CAPS_PARAMS): + return sizeof(NV2080_CTRL_CE_GET_CAPS_PARAMS); +#endif // (defined(NVRM)) + + + // Zero indicates an unsupported message (or interface). + default: + return 0; + } +} + +// Serialize this interface. +static NV_STATUS finnSerializeInterface_FINN_NV20_SUBDEVICE_0_GPU(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize one of 3 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV2080_CTRL_GPU_GET_ENGINES_PARAMS): + return finnSerializeMessage_NV2080_CTRL_GPU_GET_ENGINES_PARAMS((const NV2080_CTRL_GPU_GET_ENGINES_PARAMS *) api_intf, bp, seri_up); + case FINN_MESSAGE_ID(NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS): + return finnSerializeMessage_NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS((const NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *) api_intf, bp, seri_up); + case FINN_MESSAGE_ID(NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS): + return finnSerializeMessage_NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS((const NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS *) api_intf, bp, seri_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Deserialize this interface. +static NV_STATUS finnDeserializeInterface_FINN_NV20_SUBDEVICE_0_GPU(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV20_SUBDEVICE_0_GPU *api_intf, NvLength api_size, NvBool deser_up) +{ + // Deerialize one of 3 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV2080_CTRL_GPU_GET_ENGINES_PARAMS): + return finnDeserializeMessage_NV2080_CTRL_GPU_GET_ENGINES_PARAMS(bp, (NV2080_CTRL_GPU_GET_ENGINES_PARAMS *) api_intf, api_size, deser_up); + case FINN_MESSAGE_ID(NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS): + return finnDeserializeMessage_NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS(bp, (NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *) api_intf, api_size, deser_up); + case FINN_MESSAGE_ID(NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS): + return finnDeserializeMessage_NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS(bp, (NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS *) api_intf, api_size, deser_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Size of the unserialized format for this interface/message +static NvU64 finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_0_GPU(NvU64 message) +{ + // Forward to message-specific routine. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV2080_CTRL_GPU_GET_ENGINES_PARAMS): + return sizeof(NV2080_CTRL_GPU_GET_ENGINES_PARAMS); + case FINN_MESSAGE_ID(NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS): + return sizeof(NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS); + case FINN_MESSAGE_ID(NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS): + return sizeof(NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS); +#endif // (defined(NVRM)) + + + // Zero indicates an unsupported message (or interface). + default: + return 0; + } +} + +// Serialize this interface. +static NV_STATUS finnSerializeInterface_FINN_NV20_SUBDEVICE_0_I2C(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV2080_CTRL_I2C_ACCESS_PARAMS): + return finnSerializeMessage_NV2080_CTRL_I2C_ACCESS_PARAMS((const NV2080_CTRL_I2C_ACCESS_PARAMS *) api_intf, bp, seri_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Deserialize this interface. +static NV_STATUS finnDeserializeInterface_FINN_NV20_SUBDEVICE_0_I2C(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV20_SUBDEVICE_0_I2C *api_intf, NvLength api_size, NvBool deser_up) +{ + // Deerialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV2080_CTRL_I2C_ACCESS_PARAMS): + return finnDeserializeMessage_NV2080_CTRL_I2C_ACCESS_PARAMS(bp, (NV2080_CTRL_I2C_ACCESS_PARAMS *) api_intf, api_size, deser_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Size of the unserialized format for this interface/message +static NvU64 finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_0_I2C(NvU64 message) +{ + // Forward to message-specific routine. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV2080_CTRL_I2C_ACCESS_PARAMS): + return sizeof(NV2080_CTRL_I2C_ACCESS_PARAMS); +#endif // (defined(NVRM)) + + + // Zero indicates an unsupported message (or interface). + default: + return 0; + } +} + +// Serialize this interface. +static NV_STATUS finnSerializeInterface_FINN_NV20_SUBDEVICE_0_NVD(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV2080_CTRL_NVD_GET_DUMP_PARAMS): + return finnSerializeMessage_NV2080_CTRL_NVD_GET_DUMP_PARAMS((const NV2080_CTRL_NVD_GET_DUMP_PARAMS *) api_intf, bp, seri_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Deserialize this interface. +static NV_STATUS finnDeserializeInterface_FINN_NV20_SUBDEVICE_0_NVD(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV20_SUBDEVICE_0_NVD *api_intf, NvLength api_size, NvBool deser_up) +{ + // Deerialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV2080_CTRL_NVD_GET_DUMP_PARAMS): + return finnDeserializeMessage_NV2080_CTRL_NVD_GET_DUMP_PARAMS(bp, (NV2080_CTRL_NVD_GET_DUMP_PARAMS *) api_intf, api_size, deser_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Size of the unserialized format for this interface/message +static NvU64 finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_0_NVD(NvU64 message) +{ + // Forward to message-specific routine. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV2080_CTRL_NVD_GET_DUMP_PARAMS): + return sizeof(NV2080_CTRL_NVD_GET_DUMP_PARAMS); +#endif // (defined(NVRM)) + + + // Zero indicates an unsupported message (or interface). + default: + return 0; + } +} + +// Serialize this interface. +static NV_STATUS finnSerializeInterface_FINN_NV20_SUBDEVICE_0_PERF(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM): /* alias */ + return finnSerializeRecord_NV2080_CTRL_GPUMON_SAMPLES((const NV2080_CTRL_GPUMON_SAMPLES *) api_intf, bp, seri_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Deserialize this interface. +static NV_STATUS finnDeserializeInterface_FINN_NV20_SUBDEVICE_0_PERF(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV20_SUBDEVICE_0_PERF *api_intf, NvLength api_size, NvBool deser_up) +{ + // Deerialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM): /* alias */ + return finnDeserializeRecord_NV2080_CTRL_GPUMON_SAMPLES(bp, (NV2080_CTRL_GPUMON_SAMPLES *) api_intf, api_size, deser_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Size of the unserialized format for this interface/message +static NvU64 finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_0_PERF(NvU64 message) +{ + // Forward to message-specific routine. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM): /* alias */ + return sizeof(NV2080_CTRL_GPUMON_SAMPLES); +#endif // (defined(NVRM)) + + + // Zero indicates an unsupported message (or interface). + default: + return 0; + } +} + +// Serialize this interface. +static NV_STATUS finnSerializeInterface_FINN_NV20_SUBDEVICE_0_RC(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS): + return finnSerializeMessage_NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS((const NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS *) api_intf, bp, seri_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Deserialize this interface. +static NV_STATUS finnDeserializeInterface_FINN_NV20_SUBDEVICE_0_RC(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV20_SUBDEVICE_0_RC *api_intf, NvLength api_size, NvBool deser_up) +{ + // Deerialize one of 1 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS): + return finnDeserializeMessage_NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS(bp, (NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS *) api_intf, api_size, deser_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Size of the unserialized format for this interface/message +static NvU64 finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_0_RC(NvU64 message) +{ + // Forward to message-specific routine. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS): + return sizeof(NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS); +#endif // (defined(NVRM)) + + + // Zero indicates an unsupported message (or interface). + default: + return 0; + } +} + +// Serialize this interface. +static NV_STATUS finnSerializeInterface_FINN_NV20_SUBDEVICE_DIAG_GPU(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize one of 2 messages in this interface. + switch (message) + { + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Deserialize this interface. +static NV_STATUS finnDeserializeInterface_FINN_NV20_SUBDEVICE_DIAG_GPU(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV20_SUBDEVICE_DIAG_GPU *api_intf, NvLength api_size, NvBool deser_up) +{ + // Deerialize one of 2 messages in this interface. + switch (message) + { + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Size of the unserialized format for this interface/message +static NvU64 finnUnserializedInterfaceSize_FINN_NV20_SUBDEVICE_DIAG_GPU(NvU64 message) +{ + // Forward to message-specific routine. + switch (message) + { + + + // Zero indicates an unsupported message (or interface). + default: + return 0; + } +} + +// Serialize this interface. +static NV_STATUS finnSerializeInterface_FINN_NV40_I2C_I2C(NvU64 message, const char *api_intf, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize one of 2 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV402C_CTRL_I2C_INDEXED_PARAMS): + return finnSerializeMessage_NV402C_CTRL_I2C_INDEXED_PARAMS((const NV402C_CTRL_I2C_INDEXED_PARAMS *) api_intf, bp, seri_up); + case FINN_MESSAGE_ID(NV402C_CTRL_I2C_TRANSACTION_PARAMS): + return finnSerializeMessage_NV402C_CTRL_I2C_TRANSACTION_PARAMS((const NV402C_CTRL_I2C_TRANSACTION_PARAMS *) api_intf, bp, seri_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Deserialize this interface. +static NV_STATUS finnDeserializeInterface_FINN_NV40_I2C_I2C(NvU64 message, finn_bit_pump_for_read *bp, FINN_NV40_I2C_I2C *api_intf, NvLength api_size, NvBool deser_up) +{ + // Deerialize one of 2 messages in this interface. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV402C_CTRL_I2C_INDEXED_PARAMS): + return finnDeserializeMessage_NV402C_CTRL_I2C_INDEXED_PARAMS(bp, (NV402C_CTRL_I2C_INDEXED_PARAMS *) api_intf, api_size, deser_up); + case FINN_MESSAGE_ID(NV402C_CTRL_I2C_TRANSACTION_PARAMS): + return finnDeserializeMessage_NV402C_CTRL_I2C_TRANSACTION_PARAMS(bp, (NV402C_CTRL_I2C_TRANSACTION_PARAMS *) api_intf, api_size, deser_up); +#endif // (defined(NVRM)) + + + // Everything else is unsupported. + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +// Size of the unserialized format for this interface/message +static NvU64 finnUnserializedInterfaceSize_FINN_NV40_I2C_I2C(NvU64 message) +{ + // Forward to message-specific routine. + switch (message) + { +#if (defined(NVRM)) + case FINN_MESSAGE_ID(NV402C_CTRL_I2C_INDEXED_PARAMS): + return sizeof(NV402C_CTRL_I2C_INDEXED_PARAMS); + case FINN_MESSAGE_ID(NV402C_CTRL_I2C_TRANSACTION_PARAMS): + return sizeof(NV402C_CTRL_I2C_TRANSACTION_PARAMS); +#endif // (defined(NVRM)) + + + // Zero indicates an unsupported message (or interface). + default: + return 0; + } +} + +#if (defined(NVRM)) + +// Serialize each of the 3 field(s). +// 2 out of 2 independent field(s) are reordered to be before 1 dependent field(s). +static NV_STATUS finnSerializeMessage_NV0000_CTRL_NVD_GET_DUMP_PARAMS(const NV0000_CTRL_NVD_GET_DUMP_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `component`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->component, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `size`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Reject source data if it is out of range. + if (api->size > NV0000_CTRL_NVD_MAX_DUMP_SIZE) + { + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + return NV_ERR_OUT_OF_RANGE; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->size, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->pBuffer), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->pBuffer) + { + // Serialize each element in `pBuffer`. + { + NvLength i; + for (i = 0; i < (api->size); ++i) + { + // Serialize field-presence indicator for `pBuffer[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, ((NvU8 *)(NvP64_VALUE(api->pBuffer)))[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->pBuffer) + FINN_FREE(NvP64_VALUE(api->pBuffer)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 3 field(s). +// 2 out of 2 independent field(s) are reordered to be before 1 dependent field(s). +static NV_STATUS finnDeserializeMessage_NV0000_CTRL_NVD_GET_DUMP_PARAMS(finn_bit_pump_for_read *bp, NV0000_CTRL_NVD_GET_DUMP_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV0000_CTRL_NVD_GET_DUMP_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `component`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->component = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `size`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->size = (NvU32) finn_read_buffer(bp, 32); + + // Reject deserialized data if it is out of range. + if (api->size > NV0000_CTRL_NVD_MAX_DUMP_SIZE) + { + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + return NV_ERR_OUT_OF_RANGE; + } + + // Check data-presence (nonnull pointer) indicator for `pBuffer`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->size) * (sizeof(NvU8) /*pBuffer[i]*/) /*pBuffer*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->pBuffer = NV_PTR_TO_NvP64(FINN_MALLOC((api->size) * (sizeof(NvU8) /*pBuffer[i]*/) /*pBuffer*/)); + if (!api->pBuffer) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->pBuffer), (api->size) * (sizeof(NvU8) /*pBuffer[i]*/) /*pBuffer*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->pBuffer) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `pBuffer`. + { + NvLength i; + for (i = 0; i < (api->size); ++i) + { + // Check field-presence indicator for `pBuffer[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + ((NvU8 *)(NvP64_VALUE(api->pBuffer)))[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->pBuffer = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 6 field(s). +// 2 out of 4 independent field(s) are reordered to be before 2 dependent field(s). +static NV_STATUS finnSerializeMessage_NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS(const NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `pdeIndex`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->pdeIndex, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `flags`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->flags, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `hVASpace`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->hVASpace, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `subDeviceId`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->subDeviceId, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize each element in `ptParams`. + { + NvLength i; + for (i = 0; i < NV0080_CTRL_DMA_UPDATE_PDE_2_PT_IDX__SIZE; ++i) + { + // Serialize field-presence indicator for `ptParams[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Record has 3 field(s) to be serialized. + { + NV_STATUS status = finnSerializeRecord_NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS((api->ptParams+(i)), bp, seri_up); + if (status != NV_OK) + return status; + } + + } + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->pPdeBuffer), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->pPdeBuffer) + { + // Serialize each element in `pPdeBuffer`. + { + NvLength i; + for (i = 0; i < 1; ++i) + { + // Serialize field-presence indicator for `pPdeBuffer[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 64-bit NvU64 primitive. + if (finn_write_buffer(bp, ((*(NvU64 (*) [1])(NvP64_VALUE(api->pPdeBuffer))))[i], 64)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->pPdeBuffer) + FINN_FREE(NvP64_VALUE(api->pPdeBuffer)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 6 field(s). +// 2 out of 4 independent field(s) are reordered to be before 2 dependent field(s). +static NV_STATUS finnDeserializeMessage_NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS(finn_bit_pump_for_read *bp, NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `pdeIndex`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->pdeIndex = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `flags`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->flags = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `hVASpace`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->hVASpace = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `subDeviceId`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->subDeviceId = (NvU32) finn_read_buffer(bp, 32); + + // Deserialize each element in `ptParams`. + { + NvLength i; + for (i = 0; i < NV0080_CTRL_DMA_UPDATE_PDE_2_PT_IDX__SIZE; ++i) + { + // Check field-presence indicator for `ptParams[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Record has 3 field(s) to be deserialized. + { + NV_STATUS status = finnDeserializeRecord_NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS(bp, (api->ptParams+(i)), sizeof(NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS), deser_up); + if (status != NV_OK) + return status; + } + + } + } + + // Check data-presence (nonnull pointer) indicator for `pPdeBuffer`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if (1 * (sizeof(NvU64) /*pPdeBuffer[i]*/) /*pPdeBuffer*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->pPdeBuffer = NV_PTR_TO_NvP64(FINN_MALLOC(1 * (sizeof(NvU64) /*pPdeBuffer[i]*/) /*pPdeBuffer*/)); + if (!api->pPdeBuffer) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->pPdeBuffer), 1 * (sizeof(NvU64) /*pPdeBuffer[i]*/) /*pPdeBuffer*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->pPdeBuffer) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `pPdeBuffer`. + { + NvLength i; + for (i = 0; i < 1; ++i) + { + // Check field-presence indicator for `pPdeBuffer[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 64-bit NvU64 primitive. + ((*(NvU64 (*) [1])(NvP64_VALUE(api->pPdeBuffer))))[i] = (NvU64) finn_read_buffer(bp, 64); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->pPdeBuffer = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 2 field(s). +static NV_STATUS finnSerializeMessage_NV0080_CTRL_FB_GET_CAPS_PARAMS(const NV0080_CTRL_FB_GET_CAPS_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `capsTblSize`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Reject source data if it is out of range. + if (api->capsTblSize > NV0080_CTRL_FB_CAPS_TBL_SIZE) + { + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + return NV_ERR_OUT_OF_RANGE; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->capsTblSize, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->capsTbl), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->capsTbl) + { + // Serialize each element in `capsTbl`. + { + NvLength i; + for (i = 0; i < (api->capsTblSize); ++i) + { + // Serialize field-presence indicator for `capsTbl[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, ((NvU8 *)(NvP64_VALUE(api->capsTbl)))[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->capsTbl) + FINN_FREE(NvP64_VALUE(api->capsTbl)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 2 field(s). +static NV_STATUS finnDeserializeMessage_NV0080_CTRL_FB_GET_CAPS_PARAMS(finn_bit_pump_for_read *bp, NV0080_CTRL_FB_GET_CAPS_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV0080_CTRL_FB_GET_CAPS_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `capsTblSize`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->capsTblSize = (NvU32) finn_read_buffer(bp, 32); + + // Reject deserialized data if it is out of range. + if (api->capsTblSize > NV0080_CTRL_FB_CAPS_TBL_SIZE) + { + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + return NV_ERR_OUT_OF_RANGE; + } + + // Check data-presence (nonnull pointer) indicator for `capsTbl`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->capsTblSize) * (sizeof(NvU8) /*capsTbl[i]*/) /*capsTbl*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->capsTbl = NV_PTR_TO_NvP64(FINN_MALLOC((api->capsTblSize) * (sizeof(NvU8) /*capsTbl[i]*/) /*capsTbl*/)); + if (!api->capsTbl) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->capsTbl), (api->capsTblSize) * (sizeof(NvU8) /*capsTbl[i]*/) /*capsTbl*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->capsTbl) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `capsTbl`. + { + NvLength i; + for (i = 0; i < (api->capsTblSize); ++i) + { + // Check field-presence indicator for `capsTbl[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + ((NvU8 *)(NvP64_VALUE(api->capsTbl)))[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->capsTbl = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 2 field(s). +static NV_STATUS finnSerializeMessage_NV0080_CTRL_FIFO_GET_CAPS_PARAMS(const NV0080_CTRL_FIFO_GET_CAPS_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `capsTblSize`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Reject source data if it is out of range. + if (api->capsTblSize > NV0080_CTRL_FIFO_CAPS_TBL_SIZE) + { + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + return NV_ERR_OUT_OF_RANGE; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->capsTblSize, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->capsTbl), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->capsTbl) + { + // Serialize each element in `capsTbl`. + { + NvLength i; + for (i = 0; i < (api->capsTblSize); ++i) + { + // Serialize field-presence indicator for `capsTbl[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, ((NvU8 *)(NvP64_VALUE(api->capsTbl)))[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->capsTbl) + FINN_FREE(NvP64_VALUE(api->capsTbl)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 2 field(s). +static NV_STATUS finnDeserializeMessage_NV0080_CTRL_FIFO_GET_CAPS_PARAMS(finn_bit_pump_for_read *bp, NV0080_CTRL_FIFO_GET_CAPS_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV0080_CTRL_FIFO_GET_CAPS_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `capsTblSize`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->capsTblSize = (NvU32) finn_read_buffer(bp, 32); + + // Reject deserialized data if it is out of range. + if (api->capsTblSize > NV0080_CTRL_FIFO_CAPS_TBL_SIZE) + { + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + return NV_ERR_OUT_OF_RANGE; + } + + // Check data-presence (nonnull pointer) indicator for `capsTbl`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->capsTblSize) * (sizeof(NvU8) /*capsTbl[i]*/) /*capsTbl*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->capsTbl = NV_PTR_TO_NvP64(FINN_MALLOC((api->capsTblSize) * (sizeof(NvU8) /*capsTbl[i]*/) /*capsTbl*/)); + if (!api->capsTbl) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->capsTbl), (api->capsTblSize) * (sizeof(NvU8) /*capsTbl[i]*/) /*capsTbl*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->capsTbl) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `capsTbl`. + { + NvLength i; + for (i = 0; i < (api->capsTblSize); ++i) + { + // Check field-presence indicator for `capsTbl[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + ((NvU8 *)(NvP64_VALUE(api->capsTbl)))[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->capsTbl = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 3 field(s). +static NV_STATUS finnSerializeMessage_NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS(const NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `numChannels`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->numChannels, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->pChannelHandleList), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->pChannelHandleList) + { + // Serialize each element in `pChannelHandleList`. + { + NvLength i; + for (i = 0; i < (api->numChannels); ++i) + { + // Serialize field-presence indicator for `pChannelHandleList[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, ((NvU32 *)(NvP64_VALUE(api->pChannelHandleList)))[i], 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->pChannelHandleList) + FINN_FREE(NvP64_VALUE(api->pChannelHandleList)); + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->pChannelList), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->pChannelList) + { + // Serialize each element in `pChannelList`. + { + NvLength i; + for (i = 0; i < (api->numChannels); ++i) + { + // Serialize field-presence indicator for `pChannelList[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, ((NvU32 *)(NvP64_VALUE(api->pChannelList)))[i], 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->pChannelList) + FINN_FREE(NvP64_VALUE(api->pChannelList)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 3 field(s). +static NV_STATUS finnDeserializeMessage_NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS(finn_bit_pump_for_read *bp, NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `numChannels`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->numChannels = (NvU32) finn_read_buffer(bp, 32); + + // Check data-presence (nonnull pointer) indicator for `pChannelHandleList`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->numChannels) * (sizeof(NvU32) /*pChannelHandleList[i]*/) /*pChannelHandleList*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->pChannelHandleList = NV_PTR_TO_NvP64(FINN_MALLOC((api->numChannels) * (sizeof(NvU32) /*pChannelHandleList[i]*/) /*pChannelHandleList*/)); + if (!api->pChannelHandleList) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->pChannelHandleList), (api->numChannels) * (sizeof(NvU32) /*pChannelHandleList[i]*/) /*pChannelHandleList*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->pChannelHandleList) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `pChannelHandleList`. + { + NvLength i; + for (i = 0; i < (api->numChannels); ++i) + { + // Check field-presence indicator for `pChannelHandleList[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + ((NvU32 *)(NvP64_VALUE(api->pChannelHandleList)))[i] = (NvU32) finn_read_buffer(bp, 32); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->pChannelHandleList = NV_PTR_TO_NvP64(NULL); + } + + // Check data-presence (nonnull pointer) indicator for `pChannelList`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->numChannels) * (sizeof(NvU32) /*pChannelList[i]*/) /*pChannelList*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->pChannelList = NV_PTR_TO_NvP64(FINN_MALLOC((api->numChannels) * (sizeof(NvU32) /*pChannelList[i]*/) /*pChannelList*/)); + if (!api->pChannelList) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->pChannelList), (api->numChannels) * (sizeof(NvU32) /*pChannelList[i]*/) /*pChannelList*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->pChannelList) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `pChannelList`. + { + NvLength i; + for (i = 0; i < (api->numChannels); ++i) + { + // Check field-presence indicator for `pChannelList[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + ((NvU32 *)(NvP64_VALUE(api->pChannelList)))[i] = (NvU32) finn_read_buffer(bp, 32); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->pChannelList = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 2 field(s). +static NV_STATUS finnSerializeMessage_NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS(const NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `numClasses`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->numClasses, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->classList), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->classList) + { + // Serialize each element in `classList`. + { + NvLength i; + for (i = 0; i < (api->numClasses); ++i) + { + // Serialize field-presence indicator for `classList[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, ((NvU32 *)(NvP64_VALUE(api->classList)))[i], 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->classList) + FINN_FREE(NvP64_VALUE(api->classList)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 2 field(s). +static NV_STATUS finnDeserializeMessage_NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS(finn_bit_pump_for_read *bp, NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `numClasses`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->numClasses = (NvU32) finn_read_buffer(bp, 32); + + // Check data-presence (nonnull pointer) indicator for `classList`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->numClasses) * (sizeof(NvU32) /*classList[i]*/) /*classList*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->classList = NV_PTR_TO_NvP64(FINN_MALLOC((api->numClasses) * (sizeof(NvU32) /*classList[i]*/) /*classList*/)); + if (!api->classList) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->classList), (api->numClasses) * (sizeof(NvU32) /*classList[i]*/) /*classList*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->classList) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `classList`. + { + NvLength i; + for (i = 0; i < (api->numClasses); ++i) + { + // Check field-presence indicator for `classList[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + ((NvU32 *)(NvP64_VALUE(api->classList)))[i] = (NvU32) finn_read_buffer(bp, 32); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->classList = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 2 field(s). +static NV_STATUS finnSerializeMessage_NV0080_CTRL_GR_GET_CAPS_PARAMS(const NV0080_CTRL_GR_GET_CAPS_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `capsTblSize`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->capsTblSize, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->capsTbl), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->capsTbl) + { + // Serialize each element in `capsTbl`. + { + NvLength i; + for (i = 0; i < (api->capsTblSize); ++i) + { + // Serialize field-presence indicator for `capsTbl[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, ((NvU8 *)(NvP64_VALUE(api->capsTbl)))[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->capsTbl) + FINN_FREE(NvP64_VALUE(api->capsTbl)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 2 field(s). +static NV_STATUS finnDeserializeMessage_NV0080_CTRL_GR_GET_CAPS_PARAMS(finn_bit_pump_for_read *bp, NV0080_CTRL_GR_GET_CAPS_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV0080_CTRL_GR_GET_CAPS_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `capsTblSize`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->capsTblSize = (NvU32) finn_read_buffer(bp, 32); + + // Check data-presence (nonnull pointer) indicator for `capsTbl`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->capsTblSize) * (sizeof(NvU8) /*capsTbl[i]*/) /*capsTbl*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->capsTbl = NV_PTR_TO_NvP64(FINN_MALLOC((api->capsTblSize) * (sizeof(NvU8) /*capsTbl[i]*/) /*capsTbl*/)); + if (!api->capsTbl) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->capsTbl), (api->capsTblSize) * (sizeof(NvU8) /*capsTbl[i]*/) /*capsTbl*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->capsTbl) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `capsTbl`. + { + NvLength i; + for (i = 0; i < (api->capsTblSize); ++i) + { + // Check field-presence indicator for `capsTbl[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + ((NvU8 *)(NvP64_VALUE(api->capsTbl)))[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->capsTbl = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 2 field(s). +static NV_STATUS finnSerializeMessage_NV0080_CTRL_HOST_GET_CAPS_PARAMS(const NV0080_CTRL_HOST_GET_CAPS_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `capsTblSize`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Reject source data if it is out of range. + if (api->capsTblSize > NV0080_CTRL_HOST_CAPS_TBL_SIZE) + { + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + return NV_ERR_OUT_OF_RANGE; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->capsTblSize, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->capsTbl), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->capsTbl) + { + // Serialize each element in `capsTbl`. + { + NvLength i; + for (i = 0; i < (api->capsTblSize); ++i) + { + // Serialize field-presence indicator for `capsTbl[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, ((NvU8 *)(NvP64_VALUE(api->capsTbl)))[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->capsTbl) + FINN_FREE(NvP64_VALUE(api->capsTbl)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 2 field(s). +static NV_STATUS finnDeserializeMessage_NV0080_CTRL_HOST_GET_CAPS_PARAMS(finn_bit_pump_for_read *bp, NV0080_CTRL_HOST_GET_CAPS_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV0080_CTRL_HOST_GET_CAPS_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `capsTblSize`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->capsTblSize = (NvU32) finn_read_buffer(bp, 32); + + // Reject deserialized data if it is out of range. + if (api->capsTblSize > NV0080_CTRL_HOST_CAPS_TBL_SIZE) + { + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + return NV_ERR_OUT_OF_RANGE; + } + + // Check data-presence (nonnull pointer) indicator for `capsTbl`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->capsTblSize) * (sizeof(NvU8) /*capsTbl[i]*/) /*capsTbl*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->capsTbl = NV_PTR_TO_NvP64(FINN_MALLOC((api->capsTblSize) * (sizeof(NvU8) /*capsTbl[i]*/) /*capsTbl*/)); + if (!api->capsTbl) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->capsTbl), (api->capsTblSize) * (sizeof(NvU8) /*capsTbl[i]*/) /*capsTbl*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->capsTbl) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `capsTbl`. + { + NvLength i; + for (i = 0; i < (api->capsTblSize); ++i) + { + // Check field-presence indicator for `capsTbl[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + ((NvU8 *)(NvP64_VALUE(api->capsTbl)))[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->capsTbl = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 2 field(s). +static NV_STATUS finnSerializeMessage_NV0080_CTRL_MSENC_GET_CAPS_PARAMS(const NV0080_CTRL_MSENC_GET_CAPS_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `capsTblSize`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Reject source data if it is out of range. + if (api->capsTblSize > NV0080_CTRL_MSENC_CAPS_TBL_SIZE) + { + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + return NV_ERR_OUT_OF_RANGE; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->capsTblSize, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->capsTbl), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->capsTbl) + { + // Serialize each element in `capsTbl`. + { + NvLength i; + for (i = 0; i < (api->capsTblSize); ++i) + { + // Serialize field-presence indicator for `capsTbl[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, ((NvU8 *)(NvP64_VALUE(api->capsTbl)))[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->capsTbl) + FINN_FREE(NvP64_VALUE(api->capsTbl)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 2 field(s). +static NV_STATUS finnDeserializeMessage_NV0080_CTRL_MSENC_GET_CAPS_PARAMS(finn_bit_pump_for_read *bp, NV0080_CTRL_MSENC_GET_CAPS_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV0080_CTRL_MSENC_GET_CAPS_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `capsTblSize`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->capsTblSize = (NvU32) finn_read_buffer(bp, 32); + + // Reject deserialized data if it is out of range. + if (api->capsTblSize > NV0080_CTRL_MSENC_CAPS_TBL_SIZE) + { + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + return NV_ERR_OUT_OF_RANGE; + } + + // Check data-presence (nonnull pointer) indicator for `capsTbl`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->capsTblSize) * (sizeof(NvU8) /*capsTbl[i]*/) /*capsTbl*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->capsTbl = NV_PTR_TO_NvP64(FINN_MALLOC((api->capsTblSize) * (sizeof(NvU8) /*capsTbl[i]*/) /*capsTbl*/)); + if (!api->capsTbl) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->capsTbl), (api->capsTblSize) * (sizeof(NvU8) /*capsTbl[i]*/) /*capsTbl*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->capsTbl) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `capsTbl`. + { + NvLength i; + for (i = 0; i < (api->capsTblSize); ++i) + { + // Check field-presence indicator for `capsTbl[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + ((NvU8 *)(NvP64_VALUE(api->capsTbl)))[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->capsTbl = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + +#endif // (defined(NVRM)) + +#if (defined(NVRM)) + +// Serialize each of the 8 field(s). +// 3 out of 7 independent field(s) are reordered to be before 1 dependent field(s). +static NV_STATUS finnSerializeMessage_NV2080_CTRL_BIOS_GET_NBSI_OBJ_PARAMS(const NV2080_CTRL_BIOS_GET_NBSI_OBJ_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `globType`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 16-bit NvU16 primitive. + if (finn_write_buffer(bp, api->globType, 16)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `globIndex`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, api->globIndex, 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `globSource`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 16-bit NvU16 primitive. + if (finn_write_buffer(bp, api->globSource, 16)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `retBufOffset`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->retBufOffset, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `retSize`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->retSize, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `totalObjSize`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->totalObjSize, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `errorCode`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->errorCode, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->retBuf), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->retBuf) + { + // Serialize each element in `retBuf`. + { + NvLength i; + for (i = 0; i < (api->retSize); ++i) + { + // Serialize field-presence indicator for `retBuf[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, ((NvU8 *)(NvP64_VALUE(api->retBuf)))[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->retBuf) + FINN_FREE(NvP64_VALUE(api->retBuf)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 8 field(s). +// 3 out of 7 independent field(s) are reordered to be before 1 dependent field(s). +static NV_STATUS finnDeserializeMessage_NV2080_CTRL_BIOS_GET_NBSI_OBJ_PARAMS(finn_bit_pump_for_read *bp, NV2080_CTRL_BIOS_GET_NBSI_OBJ_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV2080_CTRL_BIOS_GET_NBSI_OBJ_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `globType`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 16-bit NvU16 primitive. + api->globType = (NvU16) finn_read_buffer(bp, 16); + + // Check field-presence indicator for `globIndex`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + api->globIndex = (NvU8) finn_read_buffer(bp, 8); + + // Check field-presence indicator for `globSource`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 16-bit NvU16 primitive. + api->globSource = (NvU16) finn_read_buffer(bp, 16); + + // Check field-presence indicator for `retBufOffset`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->retBufOffset = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `retSize`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->retSize = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `totalObjSize`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->totalObjSize = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `errorCode`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->errorCode = (NvU32) finn_read_buffer(bp, 32); + + // Check data-presence (nonnull pointer) indicator for `retBuf`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->retSize) * (sizeof(NvU8) /*retBuf[i]*/) /*retBuf*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->retBuf = NV_PTR_TO_NvP64(FINN_MALLOC((api->retSize) * (sizeof(NvU8) /*retBuf[i]*/) /*retBuf*/)); + if (!api->retBuf) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->retBuf), (api->retSize) * (sizeof(NvU8) /*retBuf[i]*/) /*retBuf*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->retBuf) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `retBuf`. + { + NvLength i; + for (i = 0; i < (api->retSize); ++i) + { + // Check field-presence indicator for `retBuf[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + ((NvU8 *)(NvP64_VALUE(api->retBuf)))[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->retBuf = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 3 field(s). +static NV_STATUS finnSerializeMessage_NV2080_CTRL_CE_GET_CAPS_PARAMS(const NV2080_CTRL_CE_GET_CAPS_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `ceEngineType`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->ceEngineType, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `capsTblSize`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Reject source data if it is out of range. + if (api->capsTblSize > NV2080_CTRL_CE_CAPS_TBL_SIZE) + { + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + return NV_ERR_OUT_OF_RANGE; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->capsTblSize, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->capsTbl), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->capsTbl) + { + // Serialize each element in `capsTbl`. + { + NvLength i; + for (i = 0; i < (api->capsTblSize); ++i) + { + // Serialize field-presence indicator for `capsTbl[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, ((NvU8 *)(NvP64_VALUE(api->capsTbl)))[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->capsTbl) + FINN_FREE(NvP64_VALUE(api->capsTbl)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 3 field(s). +static NV_STATUS finnDeserializeMessage_NV2080_CTRL_CE_GET_CAPS_PARAMS(finn_bit_pump_for_read *bp, NV2080_CTRL_CE_GET_CAPS_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV2080_CTRL_CE_GET_CAPS_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `ceEngineType`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->ceEngineType = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `capsTblSize`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->capsTblSize = (NvU32) finn_read_buffer(bp, 32); + + // Reject deserialized data if it is out of range. + if (api->capsTblSize > NV2080_CTRL_CE_CAPS_TBL_SIZE) + { + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + return NV_ERR_OUT_OF_RANGE; + } + + // Check data-presence (nonnull pointer) indicator for `capsTbl`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->capsTblSize) * (sizeof(NvU8) /*capsTbl[i]*/) /*capsTbl*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->capsTbl = NV_PTR_TO_NvP64(FINN_MALLOC((api->capsTblSize) * (sizeof(NvU8) /*capsTbl[i]*/) /*capsTbl*/)); + if (!api->capsTbl) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->capsTbl), (api->capsTblSize) * (sizeof(NvU8) /*capsTbl[i]*/) /*capsTbl*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->capsTbl) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `capsTbl`. + { + NvLength i; + for (i = 0; i < (api->capsTblSize); ++i) + { + // Check field-presence indicator for `capsTbl[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + ((NvU8 *)(NvP64_VALUE(api->capsTbl)))[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->capsTbl = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 2 field(s). +static NV_STATUS finnSerializeMessage_NV2080_CTRL_GPU_GET_ENGINES_PARAMS(const NV2080_CTRL_GPU_GET_ENGINES_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `engineCount`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->engineCount, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->engineList), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->engineList) + { + // Serialize each element in `engineList`. + { + NvLength i; + for (i = 0; i < (api->engineCount); ++i) + { + // Serialize field-presence indicator for `engineList[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, ((NvU32 *)(NvP64_VALUE(api->engineList)))[i], 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->engineList) + FINN_FREE(NvP64_VALUE(api->engineList)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 2 field(s). +static NV_STATUS finnDeserializeMessage_NV2080_CTRL_GPU_GET_ENGINES_PARAMS(finn_bit_pump_for_read *bp, NV2080_CTRL_GPU_GET_ENGINES_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV2080_CTRL_GPU_GET_ENGINES_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `engineCount`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->engineCount = (NvU32) finn_read_buffer(bp, 32); + + // Check data-presence (nonnull pointer) indicator for `engineList`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->engineCount) * (sizeof(NvU32) /*engineList[i]*/) /*engineList*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->engineList = NV_PTR_TO_NvP64(FINN_MALLOC((api->engineCount) * (sizeof(NvU32) /*engineList[i]*/) /*engineList*/)); + if (!api->engineList) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->engineList), (api->engineCount) * (sizeof(NvU32) /*engineList[i]*/) /*engineList*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->engineList) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `engineList`. + { + NvLength i; + for (i = 0; i < (api->engineCount); ++i) + { + // Check field-presence indicator for `engineList[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + ((NvU32 *)(NvP64_VALUE(api->engineList)))[i] = (NvU32) finn_read_buffer(bp, 32); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->engineList = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 3 field(s). +static NV_STATUS finnSerializeMessage_NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS(const NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `engineType`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->engineType, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `numClasses`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->numClasses, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->classList), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->classList) + { + // Serialize each element in `classList`. + { + NvLength i; + for (i = 0; i < (api->numClasses); ++i) + { + // Serialize field-presence indicator for `classList[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, ((NvU32 *)(NvP64_VALUE(api->classList)))[i], 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->classList) + FINN_FREE(NvP64_VALUE(api->classList)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 3 field(s). +static NV_STATUS finnDeserializeMessage_NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS(finn_bit_pump_for_read *bp, NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `engineType`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->engineType = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `numClasses`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->numClasses = (NvU32) finn_read_buffer(bp, 32); + + // Check data-presence (nonnull pointer) indicator for `classList`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->numClasses) * (sizeof(NvU32) /*classList[i]*/) /*classList*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->classList = NV_PTR_TO_NvP64(FINN_MALLOC((api->numClasses) * (sizeof(NvU32) /*classList[i]*/) /*classList*/)); + if (!api->classList) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->classList), (api->numClasses) * (sizeof(NvU32) /*classList[i]*/) /*classList*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->classList) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `classList`. + { + NvLength i; + for (i = 0; i < (api->numClasses); ++i) + { + // Check field-presence indicator for `classList[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + ((NvU32 *)(NvP64_VALUE(api->classList)))[i] = (NvU32) finn_read_buffer(bp, 32); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->classList = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 5 field(s). +static NV_STATUS finnSerializeMessage_NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS(const NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `test`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, api->test, 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `dataSize`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->dataSize, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `startTimestamp`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 64-bit NvU64 primitive. + if (finn_write_buffer(bp, api->startTimestamp, 64)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `stopTimestamp`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 64-bit NvU64 primitive. + if (finn_write_buffer(bp, api->stopTimestamp, 64)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->data), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->data) + { + // Serialize each element in `data`. + { + NvLength i; + for (i = 0; i < (api->dataSize); ++i) + { + // Serialize field-presence indicator for `data[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, ((NvU32 *)(NvP64_VALUE(api->data)))[i], 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->data) + FINN_FREE(NvP64_VALUE(api->data)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 5 field(s). +static NV_STATUS finnDeserializeMessage_NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS(finn_bit_pump_for_read *bp, NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `test`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + api->test = (NvU8) finn_read_buffer(bp, 8); + + // Check field-presence indicator for `dataSize`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->dataSize = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `startTimestamp`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 64-bit NvU64 primitive. + api->startTimestamp = (NvU64) finn_read_buffer(bp, 64); + + // Check field-presence indicator for `stopTimestamp`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 64-bit NvU64 primitive. + api->stopTimestamp = (NvU64) finn_read_buffer(bp, 64); + + // Check data-presence (nonnull pointer) indicator for `data`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->dataSize) * (sizeof(NvU32) /*data[i]*/) /*data*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->data = NV_PTR_TO_NvP64(FINN_MALLOC((api->dataSize) * (sizeof(NvU32) /*data[i]*/) /*data*/)); + if (!api->data) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->data), (api->dataSize) * (sizeof(NvU32) /*data[i]*/) /*data*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->data) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `data`. + { + NvLength i; + for (i = 0; i < (api->dataSize); ++i) + { + // Check field-presence indicator for `data[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + ((NvU32 *)(NvP64_VALUE(api->data)))[i] = (NvU32) finn_read_buffer(bp, 32); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->data = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 9 field(s). +// 4 out of 8 independent field(s) are reordered to be before 1 dependent field(s). +static NV_STATUS finnSerializeMessage_NV2080_CTRL_I2C_ACCESS_PARAMS(const NV2080_CTRL_I2C_ACCESS_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `token`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->token, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `cmd`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->cmd, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `port`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->port, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `flags`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->flags, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `status`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->status, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `dataBuffSize`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Reject source data if it is out of range. + if (api->dataBuffSize > NV2080_CTRL_I2C_MAX_ENTRIES) + { + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + return NV_ERR_OUT_OF_RANGE; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->dataBuffSize, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `speed`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->speed, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `encrClientID`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->encrClientID, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->data), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->data) + { + // Serialize each element in `data`. + { + NvLength i; + for (i = 0; i < (api->dataBuffSize); ++i) + { + // Serialize field-presence indicator for `data[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, ((NvU8 *)(NvP64_VALUE(api->data)))[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->data) + FINN_FREE(NvP64_VALUE(api->data)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 9 field(s). +// 4 out of 8 independent field(s) are reordered to be before 1 dependent field(s). +static NV_STATUS finnDeserializeMessage_NV2080_CTRL_I2C_ACCESS_PARAMS(finn_bit_pump_for_read *bp, NV2080_CTRL_I2C_ACCESS_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV2080_CTRL_I2C_ACCESS_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `token`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->token = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `cmd`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->cmd = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `port`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->port = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `flags`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->flags = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `status`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->status = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `dataBuffSize`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->dataBuffSize = (NvU32) finn_read_buffer(bp, 32); + + // Reject deserialized data if it is out of range. + if (api->dataBuffSize > NV2080_CTRL_I2C_MAX_ENTRIES) + { + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + return NV_ERR_OUT_OF_RANGE; + } + + // Check field-presence indicator for `speed`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->speed = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `encrClientID`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->encrClientID = (NvU32) finn_read_buffer(bp, 32); + + // Check data-presence (nonnull pointer) indicator for `data`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->dataBuffSize) * (sizeof(NvU8) /*data[i]*/) /*data*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->data = NV_PTR_TO_NvP64(FINN_MALLOC((api->dataBuffSize) * (sizeof(NvU8) /*data[i]*/) /*data*/)); + if (!api->data) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->data), (api->dataBuffSize) * (sizeof(NvU8) /*data[i]*/) /*data*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->data) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `data`. + { + NvLength i; + for (i = 0; i < (api->dataBuffSize); ++i) + { + // Check field-presence indicator for `data[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + ((NvU8 *)(NvP64_VALUE(api->data)))[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->data = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 3 field(s). +// 2 out of 2 independent field(s) are reordered to be before 1 dependent field(s). +static NV_STATUS finnSerializeMessage_NV2080_CTRL_NVD_GET_DUMP_PARAMS(const NV2080_CTRL_NVD_GET_DUMP_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `component`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->component, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `size`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->size, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->pBuffer), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->pBuffer) + { + // Serialize each element in `pBuffer`. + { + NvLength i; + for (i = 0; i < (api->size); ++i) + { + // Serialize field-presence indicator for `pBuffer[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, ((NvU8 *)(NvP64_VALUE(api->pBuffer)))[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->pBuffer) + FINN_FREE(NvP64_VALUE(api->pBuffer)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 3 field(s). +// 2 out of 2 independent field(s) are reordered to be before 1 dependent field(s). +static NV_STATUS finnDeserializeMessage_NV2080_CTRL_NVD_GET_DUMP_PARAMS(finn_bit_pump_for_read *bp, NV2080_CTRL_NVD_GET_DUMP_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV2080_CTRL_NVD_GET_DUMP_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `component`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->component = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `size`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->size = (NvU32) finn_read_buffer(bp, 32); + + // Check data-presence (nonnull pointer) indicator for `pBuffer`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->size) * (sizeof(NvU8) /*pBuffer[i]*/) /*pBuffer*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->pBuffer = NV_PTR_TO_NvP64(FINN_MALLOC((api->size) * (sizeof(NvU8) /*pBuffer[i]*/) /*pBuffer*/)); + if (!api->pBuffer) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->pBuffer), (api->size) * (sizeof(NvU8) /*pBuffer[i]*/) /*pBuffer*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->pBuffer) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `pBuffer`. + { + NvLength i; + for (i = 0; i < (api->size); ++i) + { + // Check field-presence indicator for `pBuffer[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + ((NvU8 *)(NvP64_VALUE(api->pBuffer)))[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->pBuffer = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 4 field(s). +// 1 out of 3 independent field(s) are reordered to be before 1 dependent field(s). +static NV_STATUS finnSerializeMessage_NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS(const NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `hChannel`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->hChannel, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `virtAddress`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 64-bit NvU64 primitive. + if (finn_write_buffer(bp, api->virtAddress, 64)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `bufferSize`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->bufferSize, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->bufferPtr), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->bufferPtr) + { + // Serialize each element in `bufferPtr`. + { + NvLength i; + for (i = 0; i < (api->bufferSize); ++i) + { + // Serialize field-presence indicator for `bufferPtr[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, ((NvU8 *)(NvP64_VALUE(api->bufferPtr)))[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->bufferPtr) + FINN_FREE(NvP64_VALUE(api->bufferPtr)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 4 field(s). +// 1 out of 3 independent field(s) are reordered to be before 1 dependent field(s). +static NV_STATUS finnDeserializeMessage_NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS(finn_bit_pump_for_read *bp, NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `hChannel`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->hChannel = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `virtAddress`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 64-bit NvU64 primitive. + api->virtAddress = (NvU64) finn_read_buffer(bp, 64); + + // Check field-presence indicator for `bufferSize`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->bufferSize = (NvU32) finn_read_buffer(bp, 32); + + // Check data-presence (nonnull pointer) indicator for `bufferPtr`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->bufferSize) * (sizeof(NvU8) /*bufferPtr[i]*/) /*bufferPtr*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->bufferPtr = NV_PTR_TO_NvP64(FINN_MALLOC((api->bufferSize) * (sizeof(NvU8) /*bufferPtr[i]*/) /*bufferPtr*/)); + if (!api->bufferPtr) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->bufferPtr), (api->bufferSize) * (sizeof(NvU8) /*bufferPtr[i]*/) /*bufferPtr*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->bufferPtr) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `bufferPtr`. + { + NvLength i; + for (i = 0; i < (api->bufferSize); ++i) + { + // Check field-presence indicator for `bufferPtr[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + ((NvU8 *)(NvP64_VALUE(api->bufferPtr)))[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->bufferPtr = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + +#endif // (defined(NVRM)) + +#if (defined(NVRM)) + +// Serialize each of the 8 field(s). +// 1 out of 6 independent field(s) are reordered to be before 2 dependent field(s). +static NV_STATUS finnSerializeMessage_NV402C_CTRL_I2C_INDEXED_PARAMS(const NV402C_CTRL_I2C_INDEXED_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `portId`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, api->portId, 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `bIsWrite`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, api->bIsWrite, 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `address`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 16-bit NvU16 primitive. + if (finn_write_buffer(bp, api->address, 16)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `flags`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->flags, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `indexLength`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Reject source data if it is out of range. + if (api->indexLength > NV402C_CTRL_I2C_INDEX_LENGTH_MAX) + { + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + return NV_ERR_OUT_OF_RANGE; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->indexLength, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `messageLength`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->messageLength, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize each element in `index`. + { + NvLength i; + for (i = 0; i < NV402C_CTRL_I2C_INDEX_LENGTH_MAX; ++i) + { + // Serialize field-presence indicator for `index[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, (api->index)[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->pMessage), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->pMessage) + { + // Serialize each element in `pMessage`. + { + NvLength i; + for (i = 0; i < (api->messageLength); ++i) + { + // Serialize field-presence indicator for `pMessage[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, ((NvU8 *)(NvP64_VALUE(api->pMessage)))[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->pMessage) + FINN_FREE(NvP64_VALUE(api->pMessage)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 8 field(s). +// 1 out of 6 independent field(s) are reordered to be before 2 dependent field(s). +static NV_STATUS finnDeserializeMessage_NV402C_CTRL_I2C_INDEXED_PARAMS(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_INDEXED_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV402C_CTRL_I2C_INDEXED_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `portId`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + api->portId = (NvU8) finn_read_buffer(bp, 8); + + // Check field-presence indicator for `bIsWrite`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + api->bIsWrite = (NvU8) finn_read_buffer(bp, 8); + + // Check field-presence indicator for `address`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 16-bit NvU16 primitive. + api->address = (NvU16) finn_read_buffer(bp, 16); + + // Check field-presence indicator for `flags`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->flags = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `indexLength`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->indexLength = (NvU32) finn_read_buffer(bp, 32); + + // Reject deserialized data if it is out of range. + if (api->indexLength > NV402C_CTRL_I2C_INDEX_LENGTH_MAX) + { + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + return NV_ERR_OUT_OF_RANGE; + } + + // Check field-presence indicator for `messageLength`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->messageLength = (NvU32) finn_read_buffer(bp, 32); + + // Deserialize each element in `index`. + { + NvLength i; + for (i = 0; i < NV402C_CTRL_I2C_INDEX_LENGTH_MAX; ++i) + { + // Check field-presence indicator for `index[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + (api->index)[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + // Check data-presence (nonnull pointer) indicator for `pMessage`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->messageLength) * (sizeof(NvU8) /*pMessage[i]*/) /*pMessage*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->pMessage = NV_PTR_TO_NvP64(FINN_MALLOC((api->messageLength) * (sizeof(NvU8) /*pMessage[i]*/) /*pMessage*/)); + if (!api->pMessage) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->pMessage), (api->messageLength) * (sizeof(NvU8) /*pMessage[i]*/) /*pMessage*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->pMessage) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `pMessage`. + { + NvLength i; + for (i = 0; i < (api->messageLength); ++i) + { + // Check field-presence indicator for `pMessage[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + ((NvU8 *)(NvP64_VALUE(api->pMessage)))[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->pMessage = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 5 field(s). +static NV_STATUS finnSerializeMessage_NV402C_CTRL_I2C_TRANSACTION_PARAMS(const NV402C_CTRL_I2C_TRANSACTION_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `portId`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, api->portId, 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `flags`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->flags, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `deviceAddress`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 16-bit NvU16 primitive. + if (finn_write_buffer(bp, api->deviceAddress, 16)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `transType`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Validate the enum value. + if (finnBadEnum_NV402C_CTRL_I2C_TRANSACTION_TYPE(api->transType)) + { + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + return NV_ERR_INVALID_ARGUMENT; + } + + // Serialize NV402C_CTRL_I2C_TRANSACTION_TYPE enum. + if (finn_write_buffer(bp, api->transType, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `transData`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // `transData` has 11 cases to serialize. + { + NV_STATUS status = finnSerializeUnion_NV402C_CTRL_I2C_TRANSACTION_DATA(&api->transData, bp, seri_up, api->transType); + if (status != NV_OK) + return status; + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 5 field(s). +static NV_STATUS finnDeserializeMessage_NV402C_CTRL_I2C_TRANSACTION_PARAMS(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `portId`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + api->portId = (NvU8) finn_read_buffer(bp, 8); + + // Check field-presence indicator for `flags`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->flags = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `deviceAddress`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 16-bit NvU16 primitive. + api->deviceAddress = (NvU16) finn_read_buffer(bp, 16); + + // Check field-presence indicator for `transType`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize NV402C_CTRL_I2C_TRANSACTION_TYPE enum. + api->transType = (NV402C_CTRL_I2C_TRANSACTION_TYPE) finn_read_buffer(bp, 32); + + // Validate the enum value. + if (finnBadEnum_NV402C_CTRL_I2C_TRANSACTION_TYPE(api->transType)) + { + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + return NV_ERR_INVALID_ARGUMENT; + } + + // Check field-presence indicator for `transData`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // `transData` has 11 cases to deserialize. + { + NV_STATUS status = finnDeserializeUnion_NV402C_CTRL_I2C_TRANSACTION_DATA(bp, &api->transData, api_size, deser_up, api->transType); + if (status != NV_OK) + return status; + } + + // Done + return NV_OK; +} + + +// Serialize each of the 4 field(s). +static NV_STATUS finnSerializeMessage_NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS(const NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `hMemory`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->hMemory, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `length`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->length, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `offset`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 64-bit NvU64 primitive. + if (finn_write_buffer(bp, api->offset, 64)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->buffer), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->buffer) + { + // Serialize each element in `buffer`. + { + NvLength i; + for (i = 0; i < (api->length); ++i) + { + // Serialize field-presence indicator for `buffer[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, ((NvU8 *)(NvP64_VALUE(api->buffer)))[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->buffer) + FINN_FREE(NvP64_VALUE(api->buffer)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 4 field(s). +static NV_STATUS finnDeserializeMessage_NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS(finn_bit_pump_for_read *bp, NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `hMemory`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->hMemory = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `length`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->length = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `offset`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 64-bit NvU64 primitive. + api->offset = (NvU64) finn_read_buffer(bp, 64); + + // Check data-presence (nonnull pointer) indicator for `buffer`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->length) * (sizeof(NvU8) /*buffer[i]*/) /*buffer*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->buffer = NV_PTR_TO_NvP64(FINN_MALLOC((api->length) * (sizeof(NvU8) /*buffer[i]*/) /*buffer*/)); + if (!api->buffer) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->buffer), (api->length) * (sizeof(NvU8) /*buffer[i]*/) /*buffer*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->buffer) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `buffer`. + { + NvLength i; + for (i = 0; i < (api->length); ++i) + { + // Check field-presence indicator for `buffer[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + ((NvU8 *)(NvP64_VALUE(api->buffer)))[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->buffer = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 4 field(s). +static NV_STATUS finnSerializeMessage_NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS(const NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `hMemory`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->hMemory, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `length`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->length, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `offset`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 64-bit NvU64 primitive. + if (finn_write_buffer(bp, api->offset, 64)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->buffer), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->buffer) + { + // Serialize each element in `buffer`. + { + NvLength i; + for (i = 0; i < (api->length); ++i) + { + // Serialize field-presence indicator for `buffer[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, ((NvU8 *)(NvP64_VALUE(api->buffer)))[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->buffer) + FINN_FREE(NvP64_VALUE(api->buffer)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 4 field(s). +static NV_STATUS finnDeserializeMessage_NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS(finn_bit_pump_for_read *bp, NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `hMemory`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->hMemory = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `length`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->length = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `offset`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 64-bit NvU64 primitive. + api->offset = (NvU64) finn_read_buffer(bp, 64); + + // Check data-presence (nonnull pointer) indicator for `buffer`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->length) * (sizeof(NvU8) /*buffer[i]*/) /*buffer*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->buffer = NV_PTR_TO_NvP64(FINN_MALLOC((api->length) * (sizeof(NvU8) /*buffer[i]*/) /*buffer*/)); + if (!api->buffer) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->buffer), (api->length) * (sizeof(NvU8) /*buffer[i]*/) /*buffer*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->buffer) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `buffer`. + { + NvLength i; + for (i = 0; i < (api->length); ++i) + { + // Check field-presence indicator for `buffer[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + ((NvU8 *)(NvP64_VALUE(api->buffer)))[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->buffer = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 1 field(s). +static NV_STATUS finnSerializeMessage_NVB06F_CTRL_CMD_RESTORE_ENGINE_CTX_DATA_FINN_PARAMS(const NVB06F_CTRL_CMD_RESTORE_ENGINE_CTX_DATA_FINN_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `params`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Message has 3 field(s) to be serialized. + { + NV_STATUS status = finnSerializeMessage_NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS(&api->params, bp, seri_up); + if (status != NV_OK) + return status; + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 1 field(s). +static NV_STATUS finnDeserializeMessage_NVB06F_CTRL_CMD_RESTORE_ENGINE_CTX_DATA_FINN_PARAMS(finn_bit_pump_for_read *bp, NVB06F_CTRL_CMD_RESTORE_ENGINE_CTX_DATA_FINN_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NVB06F_CTRL_CMD_RESTORE_ENGINE_CTX_DATA_FINN_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `params`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Message has 3 field(s) to be deserialized. + { + NV_STATUS status = finnDeserializeMessage_NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS(bp, &api->params, sizeof(NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS), deser_up); + if (status != NV_OK) + return status; + } + + // Done + return NV_OK; +} + + +// Serialize each of the 3 field(s). +static NV_STATUS finnSerializeMessage_NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS(const NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `engineID`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->engineID, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `size`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->size, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->pEngineCtxBuff), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->pEngineCtxBuff) + { + // Serialize each element in `pEngineCtxBuff`. + { + NvLength i; + for (i = 0; i < (api->size); ++i) + { + // Serialize field-presence indicator for `pEngineCtxBuff[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, ((NvU8 *)(NvP64_VALUE(api->pEngineCtxBuff)))[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->pEngineCtxBuff) + FINN_FREE(NvP64_VALUE(api->pEngineCtxBuff)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 3 field(s). +static NV_STATUS finnDeserializeMessage_NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS(finn_bit_pump_for_read *bp, NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `engineID`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->engineID = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `size`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->size = (NvU32) finn_read_buffer(bp, 32); + + // Check data-presence (nonnull pointer) indicator for `pEngineCtxBuff`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->size) * (sizeof(NvU8) /*pEngineCtxBuff[i]*/) /*pEngineCtxBuff*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->pEngineCtxBuff = NV_PTR_TO_NvP64(FINN_MALLOC((api->size) * (sizeof(NvU8) /*pEngineCtxBuff[i]*/) /*pEngineCtxBuff*/)); + if (!api->pEngineCtxBuff) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->pEngineCtxBuff), (api->size) * (sizeof(NvU8) /*pEngineCtxBuff[i]*/) /*pEngineCtxBuff*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->pEngineCtxBuff) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `pEngineCtxBuff`. + { + NvLength i; + for (i = 0; i < (api->size); ++i) + { + // Check field-presence indicator for `pEngineCtxBuff[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + ((NvU8 *)(NvP64_VALUE(api->pEngineCtxBuff)))[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->pEngineCtxBuff = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 3 field(s). +static NV_STATUS finnSerializeMessage_NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS(const NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `engineID`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->engineID, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `size`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->size, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize each element in `engineCtxBuff`. + { + NvLength i; + for (i = 0; i < NVB06F_CTRL_ENGINE_CTX_BUFFER_SIZE_MAX; ++i) + { + // Serialize field-presence indicator for `engineCtxBuff[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, (api->engineCtxBuff)[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 3 field(s). +static NV_STATUS finnDeserializeMessage_NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS(finn_bit_pump_for_read *bp, NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NVB06F_CTRL_SAVE_ENGINE_CTX_DATA_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `engineID`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->engineID = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `size`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->size = (NvU32) finn_read_buffer(bp, 32); + + // Deserialize each element in `engineCtxBuff`. + { + NvLength i; + for (i = 0; i < NVB06F_CTRL_ENGINE_CTX_BUFFER_SIZE_MAX; ++i) + { + // Check field-presence indicator for `engineCtxBuff[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + (api->engineCtxBuff)[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + // Done + return NV_OK; +} + + +// Serialize each of the 3 field(s). +static NV_STATUS finnSerializeRecord_NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS(const NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `physAddr`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 64-bit NvU64 primitive. + if (finn_write_buffer(bp, api->physAddr, 64)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `numEntries`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->numEntries, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `aperture`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->aperture, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 3 field(s). +static NV_STATUS finnDeserializeRecord_NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS(finn_bit_pump_for_read *bp, NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `physAddr`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 64-bit NvU64 primitive. + api->physAddr = (NvU64) finn_read_buffer(bp, 64); + + // Check field-presence indicator for `numEntries`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->numEntries = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `aperture`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->aperture = (NvU32) finn_read_buffer(bp, 32); + + // Done + return NV_OK; +} + + +// Serialize each of the 5 field(s). +static NV_STATUS finnSerializeRecord_NV2080_CTRL_GPUMON_SAMPLES(const NV2080_CTRL_GPUMON_SAMPLES *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `type`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, api->type, 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `bufSize`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->bufSize, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `count`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->count, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `tracker`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->tracker, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->pSamples), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->pSamples) + { + // Serialize each element in `pSamples`. + { + NvLength i; + for (i = 0; i < (api->bufSize); ++i) + { + // Serialize field-presence indicator for `pSamples[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, ((NvU8 *)(NvP64_VALUE(api->pSamples)))[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->pSamples) + FINN_FREE(NvP64_VALUE(api->pSamples)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 5 field(s). +static NV_STATUS finnDeserializeRecord_NV2080_CTRL_GPUMON_SAMPLES(finn_bit_pump_for_read *bp, NV2080_CTRL_GPUMON_SAMPLES *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV2080_CTRL_GPUMON_SAMPLES) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `type`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + api->type = (NvU8) finn_read_buffer(bp, 8); + + // Check field-presence indicator for `bufSize`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->bufSize = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `count`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->count = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `tracker`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->tracker = (NvU32) finn_read_buffer(bp, 32); + + // Check data-presence (nonnull pointer) indicator for `pSamples`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->bufSize) * (sizeof(NvU8) /*pSamples[i]*/) /*pSamples*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->pSamples = NV_PTR_TO_NvP64(FINN_MALLOC((api->bufSize) * (sizeof(NvU8) /*pSamples[i]*/) /*pSamples*/)); + if (!api->pSamples) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->pSamples), (api->bufSize) * (sizeof(NvU8) /*pSamples[i]*/) /*pSamples*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->pSamples) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `pSamples`. + { + NvLength i; + for (i = 0; i < (api->bufSize); ++i) + { + // Check field-presence indicator for `pSamples[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + ((NvU8 *)(NvP64_VALUE(api->pSamples)))[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->pSamples = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + +#endif // (defined(NVRM)) + +#if (defined(NVRM)) + +// Serialize each of the 3 field(s). +static NV_STATUS finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW(const NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `bWrite`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 1-bit NvBool primitive. + if (finn_write_buffer(bp, api->bWrite, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `messageLength`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->messageLength, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->pMessage), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->pMessage) + { + // Serialize each element in `pMessage`. + { + NvLength i; + for (i = 0; i < (api->messageLength); ++i) + { + // Serialize field-presence indicator for `pMessage[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, ((NvU8 *)(NvP64_VALUE(api->pMessage)))[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->pMessage) + FINN_FREE(NvP64_VALUE(api->pMessage)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 3 field(s). +static NV_STATUS finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `bWrite`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 1-bit NvBool primitive. + api->bWrite = (NvBool) finn_read_buffer(bp, 1); + + // Check field-presence indicator for `messageLength`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->messageLength = (NvU32) finn_read_buffer(bp, 32); + + // Check data-presence (nonnull pointer) indicator for `pMessage`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->messageLength) * (sizeof(NvU8) /*pMessage[i]*/) /*pMessage*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->pMessage = NV_PTR_TO_NvP64(FINN_MALLOC((api->messageLength) * (sizeof(NvU8) /*pMessage[i]*/) /*pMessage*/)); + if (!api->pMessage) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->pMessage), (api->messageLength) * (sizeof(NvU8) /*pMessage[i]*/) /*pMessage*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->pMessage) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `pMessage`. + { + NvLength i; + for (i = 0; i < (api->messageLength); ++i) + { + // Check field-presence indicator for `pMessage[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + ((NvU8 *)(NvP64_VALUE(api->pMessage)))[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->pMessage = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 5 field(s). +static NV_STATUS finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW(const NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `bWrite`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 1-bit NvBool primitive. + if (finn_write_buffer(bp, api->bWrite, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `warFlags`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->warFlags, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `registerAddress`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, api->registerAddress, 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `messageLength`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->messageLength, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->pMessage), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->pMessage) + { + // Serialize each element in `pMessage`. + { + NvLength i; + for (i = 0; i < (api->messageLength); ++i) + { + // Serialize field-presence indicator for `pMessage[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, ((NvU8 *)(NvP64_VALUE(api->pMessage)))[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->pMessage) + FINN_FREE(NvP64_VALUE(api->pMessage)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 5 field(s). +static NV_STATUS finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `bWrite`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 1-bit NvBool primitive. + api->bWrite = (NvBool) finn_read_buffer(bp, 1); + + // Check field-presence indicator for `warFlags`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->warFlags = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `registerAddress`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + api->registerAddress = (NvU8) finn_read_buffer(bp, 8); + + // Check field-presence indicator for `messageLength`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->messageLength = (NvU32) finn_read_buffer(bp, 32); + + // Check data-presence (nonnull pointer) indicator for `pMessage`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->messageLength) * (sizeof(NvU8) /*pMessage[i]*/) /*pMessage*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->pMessage = NV_PTR_TO_NvP64(FINN_MALLOC((api->messageLength) * (sizeof(NvU8) /*pMessage[i]*/) /*pMessage*/)); + if (!api->pMessage) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->pMessage), (api->messageLength) * (sizeof(NvU8) /*pMessage[i]*/) /*pMessage*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->pMessage) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `pMessage`. + { + NvLength i; + for (i = 0; i < (api->messageLength); ++i) + { + // Check field-presence indicator for `pMessage[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + ((NvU8 *)(NvP64_VALUE(api->pMessage)))[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->pMessage = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 2 field(s). +static NV_STATUS finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW(const NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `bWrite`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 1-bit NvBool primitive. + if (finn_write_buffer(bp, api->bWrite, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `message`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, api->message, 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 2 field(s). +static NV_STATUS finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `bWrite`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 1-bit NvBool primitive. + api->bWrite = (NvBool) finn_read_buffer(bp, 1); + + // Check field-presence indicator for `message`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + api->message = (NvU8) finn_read_buffer(bp, 8); + + // Done + return NV_OK; +} + + +// Serialize each of the 4 field(s). +static NV_STATUS finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC(const NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `segmentNumber`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, api->segmentNumber, 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `registerAddress`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, api->registerAddress, 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `messageLength`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->messageLength, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->pMessage), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->pMessage) + { + // Serialize each element in `pMessage`. + { + NvLength i; + for (i = 0; i < (api->messageLength); ++i) + { + // Serialize field-presence indicator for `pMessage[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, ((NvU8 *)(NvP64_VALUE(api->pMessage)))[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->pMessage) + FINN_FREE(NvP64_VALUE(api->pMessage)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 4 field(s). +static NV_STATUS finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `segmentNumber`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + api->segmentNumber = (NvU8) finn_read_buffer(bp, 8); + + // Check field-presence indicator for `registerAddress`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + api->registerAddress = (NvU8) finn_read_buffer(bp, 8); + + // Check field-presence indicator for `messageLength`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->messageLength = (NvU32) finn_read_buffer(bp, 32); + + // Check data-presence (nonnull pointer) indicator for `pMessage`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->messageLength) * (sizeof(NvU8) /*pMessage[i]*/) /*pMessage*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->pMessage = NV_PTR_TO_NvP64(FINN_MALLOC((api->messageLength) * (sizeof(NvU8) /*pMessage[i]*/) /*pMessage*/)); + if (!api->pMessage) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->pMessage), (api->messageLength) * (sizeof(NvU8) /*pMessage[i]*/) /*pMessage*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->pMessage) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `pMessage`. + { + NvLength i; + for (i = 0; i < (api->messageLength); ++i) + { + // Check field-presence indicator for `pMessage[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + ((NvU8 *)(NvP64_VALUE(api->pMessage)))[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->pMessage = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 5 field(s). +// 1 out of 3 independent field(s) are reordered to be before 2 dependent field(s). +static NV_STATUS finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `registerAddress`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, api->registerAddress, 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `writeMessageLength`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Reject source data if it is out of range. + if (api->writeMessageLength > NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX) + { + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + return NV_ERR_OUT_OF_RANGE; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->writeMessageLength, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `readMessageLength`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Reject source data if it is out of range. + if (api->readMessageLength > NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX) + { + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + return NV_ERR_OUT_OF_RANGE; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->readMessageLength, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize each element in `writeMessage`. + { + NvLength i; + for (i = 0; i < NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX; ++i) + { + // Serialize field-presence indicator for `writeMessage[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, (api->writeMessage)[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Serialize each element in `readMessage`. + { + NvLength i; + for (i = 0; i < NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX; ++i) + { + // Serialize field-presence indicator for `readMessage[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, (api->readMessage)[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 5 field(s). +// 1 out of 3 independent field(s) are reordered to be before 2 dependent field(s). +static NV_STATUS finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `registerAddress`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + api->registerAddress = (NvU8) finn_read_buffer(bp, 8); + + // Check field-presence indicator for `writeMessageLength`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->writeMessageLength = (NvU32) finn_read_buffer(bp, 32); + + // Reject deserialized data if it is out of range. + if (api->writeMessageLength > NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX) + { + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + return NV_ERR_OUT_OF_RANGE; + } + + // Check field-presence indicator for `readMessageLength`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->readMessageLength = (NvU32) finn_read_buffer(bp, 32); + + // Reject deserialized data if it is out of range. + if (api->readMessageLength > NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX) + { + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + return NV_ERR_OUT_OF_RANGE; + } + + // Deserialize each element in `writeMessage`. + { + NvLength i; + for (i = 0; i < NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX; ++i) + { + // Check field-presence indicator for `writeMessage[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + (api->writeMessage)[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + // Deserialize each element in `readMessage`. + { + NvLength i; + for (i = 0; i < NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX; ++i) + { + // Check field-presence indicator for `readMessage[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + (api->readMessage)[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + // Done + return NV_OK; +} + + +// Serialize each of the 4 field(s). +static NV_STATUS finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `bWrite`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 1-bit NvBool primitive. + if (finn_write_buffer(bp, api->bWrite, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `registerAddress`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, api->registerAddress, 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `messageLength`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->messageLength, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->pMessage), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->pMessage) + { + // Serialize each element in `pMessage`. + { + NvLength i; + for (i = 0; i < (api->messageLength); ++i) + { + // Serialize field-presence indicator for `pMessage[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, ((NvU8 *)(NvP64_VALUE(api->pMessage)))[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->pMessage) + FINN_FREE(NvP64_VALUE(api->pMessage)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 4 field(s). +static NV_STATUS finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `bWrite`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 1-bit NvBool primitive. + api->bWrite = (NvBool) finn_read_buffer(bp, 1); + + // Check field-presence indicator for `registerAddress`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + api->registerAddress = (NvU8) finn_read_buffer(bp, 8); + + // Check field-presence indicator for `messageLength`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->messageLength = (NvU32) finn_read_buffer(bp, 32); + + // Check data-presence (nonnull pointer) indicator for `pMessage`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->messageLength) * (sizeof(NvU8) /*pMessage[i]*/) /*pMessage*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->pMessage = NV_PTR_TO_NvP64(FINN_MALLOC((api->messageLength) * (sizeof(NvU8) /*pMessage[i]*/) /*pMessage*/)); + if (!api->pMessage) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->pMessage), (api->messageLength) * (sizeof(NvU8) /*pMessage[i]*/) /*pMessage*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->pMessage) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `pMessage`. + { + NvLength i; + for (i = 0; i < (api->messageLength); ++i) + { + // Check field-presence indicator for `pMessage[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + ((NvU8 *)(NvP64_VALUE(api->pMessage)))[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->pMessage = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 3 field(s). +static NV_STATUS finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `bWrite`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 1-bit NvBool primitive. + if (finn_write_buffer(bp, api->bWrite, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `registerAddress`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, api->registerAddress, 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `message`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, api->message, 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 3 field(s). +static NV_STATUS finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `bWrite`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 1-bit NvBool primitive. + api->bWrite = (NvBool) finn_read_buffer(bp, 1); + + // Check field-presence indicator for `registerAddress`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + api->registerAddress = (NvU8) finn_read_buffer(bp, 8); + + // Check field-presence indicator for `message`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + api->message = (NvU8) finn_read_buffer(bp, 8); + + // Done + return NV_OK; +} + + +// Serialize each of the 6 field(s). +// 1 out of 4 independent field(s) are reordered to be before 2 dependent field(s). +static NV_STATUS finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `bWrite`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 1-bit NvBool primitive. + if (finn_write_buffer(bp, api->bWrite, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `warFlags`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->warFlags, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `indexLength`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Reject source data if it is out of range. + if (api->indexLength > NV402C_CTRL_I2C_INDEX_LENGTH_MAX) + { + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + return NV_ERR_OUT_OF_RANGE; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->indexLength, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `messageLength`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->messageLength, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize each element in `index`. + { + NvLength i; + for (i = 0; i < NV402C_CTRL_I2C_INDEX_LENGTH_MAX; ++i) + { + // Serialize field-presence indicator for `index[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, (api->index)[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Serialize data-presence (nonnull pointer) indicator. + if (finn_write_buffer(bp, !!(api->pMessage), 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Skip if pointer is null. + if (api->pMessage) + { + // Serialize each element in `pMessage`. + { + NvLength i; + for (i = 0; i < (api->messageLength); ++i) + { + // Serialize field-presence indicator for `pMessage[i]`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, ((NvU8 *)(NvP64_VALUE(api->pMessage)))[i], 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + } + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && api->pMessage) + FINN_FREE(NvP64_VALUE(api->pMessage)); + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 6 field(s). +// 1 out of 4 independent field(s) are reordered to be before 2 dependent field(s). +static NV_STATUS finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `bWrite`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 1-bit NvBool primitive. + api->bWrite = (NvBool) finn_read_buffer(bp, 1); + + // Check field-presence indicator for `warFlags`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->warFlags = (NvU32) finn_read_buffer(bp, 32); + + // Check field-presence indicator for `indexLength`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->indexLength = (NvU32) finn_read_buffer(bp, 32); + + // Reject deserialized data if it is out of range. + if (api->indexLength > NV402C_CTRL_I2C_INDEX_LENGTH_MAX) + { + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + return NV_ERR_OUT_OF_RANGE; + } + + // Check field-presence indicator for `messageLength`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->messageLength = (NvU32) finn_read_buffer(bp, 32); + + // Deserialize each element in `index`. + { + NvLength i; + for (i = 0; i < NV402C_CTRL_I2C_INDEX_LENGTH_MAX; ++i) + { + // Check field-presence indicator for `index[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + (api->index)[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + // Check data-presence (nonnull pointer) indicator for `pMessage`. + if (finn_read_buffer(bp, 1)) + { + // Allocate memory and set pointer when deserializing down. + // (Calling code is expected to do so when deserializing up.) + if (!deser_up) + { + // The data-presence indicator would have been false + // if there were no data to deserialize. + if ((api->messageLength) * (sizeof(NvU8) /*pMessage[i]*/) /*pMessage*/ < 1) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + api->pMessage = NV_PTR_TO_NvP64(FINN_MALLOC((api->messageLength) * (sizeof(NvU8) /*pMessage[i]*/) /*pMessage*/)); + if (!api->pMessage) + { + FINN_ERROR(NV_ERR_NO_MEMORY); + return NV_ERR_NO_MEMORY; + } + + FINN_MEMZERO(NvP64_VALUE(api->pMessage), (api->messageLength) * (sizeof(NvU8) /*pMessage[i]*/) /*pMessage*/); + } + + // Otherwise the pointer must be provided by caller. + else if (!api->pMessage) + { + FINN_ERROR(NV_ERR_INVALID_POINTER); + return NV_ERR_INVALID_POINTER; + } + + // Deserialize each element in `pMessage`. + { + NvLength i; + for (i = 0; i < (api->messageLength); ++i) + { + // Check field-presence indicator for `pMessage[i]`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + ((NvU8 *)(NvP64_VALUE(api->pMessage)))[i] = (NvU8) finn_read_buffer(bp, 8); + + } + } + + } + + // Nullify pointer only if FINN manages memory allocation. + else + { + if (!deser_up) + api->pMessage = NV_PTR_TO_NvP64(NULL); + } + + // Done + return NV_OK; +} + + +// Serialize each of the 3 field(s). +static NV_STATUS finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `registerAddress`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, api->registerAddress, 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `writeMessage`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 16-bit NvU16 primitive. + if (finn_write_buffer(bp, api->writeMessage, 16)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `readMessage`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 16-bit NvU16 primitive. + if (finn_write_buffer(bp, api->readMessage, 16)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 3 field(s). +static NV_STATUS finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `registerAddress`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + api->registerAddress = (NvU8) finn_read_buffer(bp, 8); + + // Check field-presence indicator for `writeMessage`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 16-bit NvU16 primitive. + api->writeMessage = (NvU16) finn_read_buffer(bp, 16); + + // Check field-presence indicator for `readMessage`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 16-bit NvU16 primitive. + api->readMessage = (NvU16) finn_read_buffer(bp, 16); + + // Done + return NV_OK; +} + + +// Serialize each of the 2 field(s). +static NV_STATUS finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `bWrite`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 1-bit NvBool primitive. + if (finn_write_buffer(bp, api->bWrite, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `warFlags`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 32-bit NvU32 primitive. + if (finn_write_buffer(bp, api->warFlags, 32)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 2 field(s). +static NV_STATUS finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `bWrite`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 1-bit NvBool primitive. + api->bWrite = (NvBool) finn_read_buffer(bp, 1); + + // Check field-presence indicator for `warFlags`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 32-bit NvU32 primitive. + api->warFlags = (NvU32) finn_read_buffer(bp, 32); + + // Done + return NV_OK; +} + + +// Serialize each of the 3 field(s). +static NV_STATUS finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW *api, finn_bit_pump_for_write *bp, NvBool seri_up) +{ + // Serialize field-presence indicator for `bWrite`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 1-bit NvBool primitive. + if (finn_write_buffer(bp, api->bWrite, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `registerAddress`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 8-bit NvU8 primitive. + if (finn_write_buffer(bp, api->registerAddress, 8)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize field-presence indicator for `message`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Serialize 16-bit NvU16 primitive. + if (finn_write_buffer(bp, api->message, 16)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + + // Done + return NV_OK; +} + + +// Deserialize each of the 3 field(s). +static NV_STATUS finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW *api, NvLength api_size, NvBool deser_up) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Check field-presence indicator for `bWrite`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 1-bit NvBool primitive. + api->bWrite = (NvBool) finn_read_buffer(bp, 1); + + // Check field-presence indicator for `registerAddress`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 8-bit NvU8 primitive. + api->registerAddress = (NvU8) finn_read_buffer(bp, 8); + + // Check field-presence indicator for `message`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Deserialize 16-bit NvU16 primitive. + api->message = (NvU16) finn_read_buffer(bp, 16); + + // Done + return NV_OK; +} + +#endif // (defined(NVRM)) + +#if (defined(NVRM)) + +// Serialize selected field from 11 possible values. +static NV_STATUS finnSerializeUnion_NV402C_CTRL_I2C_TRANSACTION_DATA(const NV402C_CTRL_I2C_TRANSACTION_DATA *api, finn_bit_pump_for_write *bp, NvBool seri_up, NV402C_CTRL_I2C_TRANSACTION_TYPE transType) +{ + switch(transType) + { + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW: + { + // Serialize field-presence indicator for `api->smbusQuickData`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Record has 2 field(s) to be serialized. + { + NV_STATUS status = finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW(&api->smbusQuickData, bp, seri_up); + if (status != NV_OK) + return status; + } + + return NV_OK; // Success + } + + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BYTE_RW: + { + // Serialize field-presence indicator for `api->i2cByteData`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Record has 2 field(s) to be serialized. + { + NV_STATUS status = finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW(&api->i2cByteData, bp, seri_up); + if (status != NV_OK) + return status; + } + + return NV_OK; // Success + } + + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BLOCK_RW: + { + // Serialize field-presence indicator for `api->i2cBlockData`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Record has 3 field(s) to be serialized. + { + NV_STATUS status = finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW(&api->i2cBlockData, bp, seri_up); + if (status != NV_OK) + return status; + } + + return NV_OK; // Success + } + + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW: + { + // Serialize field-presence indicator for `api->i2cBufferData`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Record has 5 field(s) to be serialized. + { + NV_STATUS status = finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW(&api->i2cBufferData, bp, seri_up); + if (status != NV_OK) + return status; + } + + return NV_OK; // Success + } + + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BYTE_RW: + { + // Serialize field-presence indicator for `api->smbusByteData`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Record has 3 field(s) to be serialized. + { + NV_STATUS status = finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW(&api->smbusByteData, bp, seri_up); + if (status != NV_OK) + return status; + } + + return NV_OK; // Success + } + + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_WORD_RW: + { + // Serialize field-presence indicator for `api->smbusWordData`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Record has 3 field(s) to be serialized. + { + NV_STATUS status = finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW(&api->smbusWordData, bp, seri_up); + if (status != NV_OK) + return status; + } + + return NV_OK; // Success + } + + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW: + { + // Serialize field-presence indicator for `api->smbusBlockData`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Record has 4 field(s) to be serialized. + { + NV_STATUS status = finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW(&api->smbusBlockData, bp, seri_up); + if (status != NV_OK) + return status; + } + + return NV_OK; // Success + } + + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_PROCESS_CALL: + { + // Serialize field-presence indicator for `api->smbusProcessData`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Record has 3 field(s) to be serialized. + { + NV_STATUS status = finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL(&api->smbusProcessData, bp, seri_up); + if (status != NV_OK) + return status; + } + + return NV_OK; // Success + } + + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_PROCESS_CALL: + { + // Serialize field-presence indicator for `api->smbusBlockProcessData`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Record has 5 field(s) to be serialized. + { + NV_STATUS status = finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL(&api->smbusBlockProcessData, bp, seri_up); + if (status != NV_OK) + return status; + } + + return NV_OK; // Success + } + + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW: + { + // Serialize field-presence indicator for `api->smbusMultibyteRegisterData`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Record has 6 field(s) to be serialized. + { + NV_STATUS status = finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW(&api->smbusMultibyteRegisterData, bp, seri_up); + if (status != NV_OK) + return status; + } + + return NV_OK; // Success + } + + case NV402C_CTRL_I2C_TRANSACTION_TYPE_READ_EDID_DDC: + { + // Serialize field-presence indicator for `api->edidData`. + if (finn_write_buffer(bp, 1, 1)) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Record has 4 field(s) to be serialized. + { + NV_STATUS status = finnSerializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC(&api->edidData, bp, seri_up); + if (status != NV_OK) + return status; + } + + return NV_OK; // Success + } + + default: + { + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + return NV_ERR_INVALID_ARGUMENT; + } + } +} + + +// Deserialize selected field from 11 possible values. +static NV_STATUS finnDeserializeUnion_NV402C_CTRL_I2C_TRANSACTION_DATA(finn_bit_pump_for_read *bp, NV402C_CTRL_I2C_TRANSACTION_DATA *api, NvLength api_size, NvBool deser_up, NV402C_CTRL_I2C_TRANSACTION_TYPE transType) +{ + // Check that the destination struct fits within the destination buffer. + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA) > api_size) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + switch(transType) + { + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW: + { + // Check field-presence indicator for `api->smbusQuickData`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Record has 2 field(s) to be deserialized. + { + NV_STATUS status = finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW(bp, &api->smbusQuickData, sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW), deser_up); + if (status != NV_OK) + return status; + } + + return NV_OK; // Success + } + + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BYTE_RW: + { + // Check field-presence indicator for `api->i2cByteData`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Record has 2 field(s) to be deserialized. + { + NV_STATUS status = finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW(bp, &api->i2cByteData, sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW), deser_up); + if (status != NV_OK) + return status; + } + + return NV_OK; // Success + } + + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BLOCK_RW: + { + // Check field-presence indicator for `api->i2cBlockData`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Record has 3 field(s) to be deserialized. + { + NV_STATUS status = finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW(bp, &api->i2cBlockData, sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW), deser_up); + if (status != NV_OK) + return status; + } + + return NV_OK; // Success + } + + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW: + { + // Check field-presence indicator for `api->i2cBufferData`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Record has 5 field(s) to be deserialized. + { + NV_STATUS status = finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW(bp, &api->i2cBufferData, sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW), deser_up); + if (status != NV_OK) + return status; + } + + return NV_OK; // Success + } + + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BYTE_RW: + { + // Check field-presence indicator for `api->smbusByteData`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Record has 3 field(s) to be deserialized. + { + NV_STATUS status = finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW(bp, &api->smbusByteData, sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW), deser_up); + if (status != NV_OK) + return status; + } + + return NV_OK; // Success + } + + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_WORD_RW: + { + // Check field-presence indicator for `api->smbusWordData`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Record has 3 field(s) to be deserialized. + { + NV_STATUS status = finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW(bp, &api->smbusWordData, sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW), deser_up); + if (status != NV_OK) + return status; + } + + return NV_OK; // Success + } + + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW: + { + // Check field-presence indicator for `api->smbusBlockData`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Record has 4 field(s) to be deserialized. + { + NV_STATUS status = finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW(bp, &api->smbusBlockData, sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW), deser_up); + if (status != NV_OK) + return status; + } + + return NV_OK; // Success + } + + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_PROCESS_CALL: + { + // Check field-presence indicator for `api->smbusProcessData`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Record has 3 field(s) to be deserialized. + { + NV_STATUS status = finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL(bp, &api->smbusProcessData, sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL), deser_up); + if (status != NV_OK) + return status; + } + + return NV_OK; // Success + } + + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_PROCESS_CALL: + { + // Check field-presence indicator for `api->smbusBlockProcessData`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Record has 5 field(s) to be deserialized. + { + NV_STATUS status = finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL(bp, &api->smbusBlockProcessData, sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL), deser_up); + if (status != NV_OK) + return status; + } + + return NV_OK; // Success + } + + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW: + { + // Check field-presence indicator for `api->smbusMultibyteRegisterData`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Record has 6 field(s) to be deserialized. + { + NV_STATUS status = finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW(bp, &api->smbusMultibyteRegisterData, sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW), deser_up); + if (status != NV_OK) + return status; + } + + return NV_OK; // Success + } + + case NV402C_CTRL_I2C_TRANSACTION_TYPE_READ_EDID_DDC: + { + // Check field-presence indicator for `api->edidData`. + if (!finn_read_buffer(bp, 1)) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + // Record has 4 field(s) to be deserialized. + { + NV_STATUS status = finnDeserializeRecord_NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC(bp, &api->edidData, sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC), deser_up); + if (status != NV_OK) + return status; + } + + return NV_OK; // Success + } + + default: + { + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + return NV_ERR_INVALID_ARGUMENT; + } + } +} + + +#endif // (defined(NVRM)) diff --git a/src/nvidia/nv-kernel.ld b/src/nvidia/nv-kernel.ld new file mode 100644 index 0000000..89ce366 --- /dev/null +++ b/src/nvidia/nv-kernel.ld @@ -0,0 +1,35 @@ +/* + * resman linker script + * + * Linking nv-kernel.o has several problems: + * + * (1) We build with '-ffunction-sections -fdata-sections' to put each + * function and data into separate ELF sections, so that the linker + * can distinguish separate functions and garbage collect dead code + * ('--gc-sections'). The linker is supposed to then merge sections + * together (e.g., all the ".text.*" into ".text", all the ".data.*" + * sections into ".data"). The linker doesn't seem to do this when + * linking a relocatable object file. + * + * (2) g++ puts inline functions, vtables, template functions, etc, in + * separate ".gnu.linkonce.*" sections. Duplicates are supposed to get + * collapsed at link time. The linker doesn't seem to do this when + * linking a relocatable object file. + * + * Resolve both of these problems by defining our own naive linker + * script to do the merging described above. + */ + +SECTIONS { + + .text : { *(.text) *(.text.*) *(.gnu.linkonce.t.*) } + + .data : { *(.data) *(.data.*) } + + .rodata : { *(.rodata) *(.rodata.*) *(.gnu.linkonce.r.*) } + + .bss : { *(.bss) *(.bss.*) } + + /* The rest of the sections ("orphaned sections") will just be copied from + the input to the output */ +} diff --git a/src/nvidia/src/kernel/core/hal/hal.c b/src/nvidia/src/kernel/core/hal/hal.c new file mode 100644 index 0000000..d94182e --- /dev/null +++ b/src/nvidia/src/kernel/core/hal/hal.c @@ -0,0 +1,68 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/system.h" +#include "core/hal_mgr.h" +#include "core/hal.h" + +#include "g_hal_private.h" + +PMODULEDESCRIPTOR +objhalGetModuleDescriptor_IMPL(OBJHAL *thisHal) +{ + return &thisHal->moduleDescriptor; +} + +// +// registerHalModule() is referred by functions in generated file g_hal_private.h +// So, placed it here instead of gt_hal_register.h to avoid duplications of this +// function as g_hal_private.h is included by several files +// +NV_STATUS +registerHalModule(NvU32 halImpl, const HAL_IFACE_SETUP *pHalSetIfaces) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJHALMGR *pHalMgr = SYS_GET_HALMGR(pSys); + OBJHAL *pHal; + PMODULEDESCRIPTOR pMod; + NV_STATUS rmStatus; + + // create a HAL object + rmStatus = halmgrCreateHal(pHalMgr, halImpl); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + // retrieve the HAL object + pHal = HALMGR_GET_HAL(pHalMgr, halImpl); + NV_ASSERT(pHal); + + // init the iface descriptor lists + pMod = objhalGetModuleDescriptor(pHal); + + // point to rmconfig structure that can init our engines' interfaces + pMod->pHalSetIfaces = pHalSetIfaces; + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/core/hal/hals_all.c b/src/nvidia/src/kernel/core/hal/hals_all.c new file mode 100644 index 0000000..18c9480 --- /dev/null +++ b/src/nvidia/src/kernel/core/hal/hals_all.c @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Routines ***************************\ +* * +* Module: hals_all.c * +* Hal interface init routines for files generated by rmconfig * +* * +\***************************************************************************/ + +#include "nvoc/object.h" +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/eng_desc.h" + +// +// These defines affect what we see in halgen generated headers. +// +// If RMCFG_ENGINE_SETUP is not already defined, then setup for +// a monolithic hal. +// +// The per-gpu-family hal setups #include this file with the RMCFG +// setup defines already defined to pull in just the interfaces +// needed for that gpu family. +// + +#if ! defined(RMCFG_ENGINE_SETUP) + +# define RMCFG_ENGINE_SETUP 1 // pull in per-gpu engine interface's + +# define RMCFG_HAL_SETUP_ALL 1 // monolithic - ALL configured gpus' support in this file +# define RMCFG_HAL_SUPPORT_ALL 1 // not required, but keeps us honest + +#endif // ! defined RMCFG_ENGINE_SETUP + +// Pull in generated code to setup each engine's hal interfaces for each gpu +#include "g_hal_register.h" +#include "g_hal_private.h" + diff --git a/src/nvidia/src/kernel/core/hal/info_block.c b/src/nvidia/src/kernel/core/hal/info_block.c new file mode 100644 index 0000000..25ccd95 --- /dev/null +++ b/src/nvidia/src/kernel/core/hal/info_block.c @@ -0,0 +1,171 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * @file + * @brief Implementation for info block utility + */ + +#include "os/os.h" +#include "core/info_block.h" + +// +// getInfoPtr +// +// Return pointer to HAL implementation-specific private data info block. +// +void * +getInfoPtr(PENG_INFO_LINK_NODE head, NvU32 dataId) +{ + PENG_INFO_LINK_NODE curNode = head; + + while (curNode && (curNode->dataId != dataId)) + curNode = curNode->next; + + if (curNode == NULL) + return NULL; + + return curNode->infoBlock; +} + +// +// testInfoPtr +// +// Returns info weather HAL implementation-specific private data info block is allocated. +// +NvBool +testInfoPtr(PENG_INFO_LINK_NODE head, NvU32 dataId) +{ + PENG_INFO_LINK_NODE curNode = head; + + while (curNode && (curNode->dataId != dataId)) + curNode = curNode->next; + + if (curNode == NULL) + return NV_FALSE; + + return NV_TRUE; +} + +// +// createLinkNode +// +// Allocate and initialize new info block. +// +static PENG_INFO_LINK_NODE +createLinkNode(NvU32 dataId, NvU32 size) +{ + PENG_INFO_LINK_NODE newNode; + NV_STATUS rmStatus; + + newNode = portMemAllocNonPaged(sizeof(ENG_INFO_LINK_NODE)); + if (newNode == NULL) + { + rmStatus = NV_ERR_NO_MEMORY; + NV_ASSERT(rmStatus == NV_OK); + return NULL; + } + + portMemSet(newNode, 0, sizeof(ENG_INFO_LINK_NODE)); + + newNode->infoBlock = portMemAllocNonPaged(size); + if (newNode->infoBlock == NULL) + { + rmStatus = NV_ERR_NO_MEMORY; + portMemFree(newNode); + NV_ASSERT(rmStatus == NV_OK); + return NULL; + } + + portMemSet(newNode->infoBlock, 0, size); + + newNode->dataId = dataId; + + return newNode; +} + +// +// addInfoPtr +// +// Create new HAL privata data block and add it to specified list. +// +void * +addInfoPtr(PENG_INFO_LINK_NODE *head, NvU32 dataId, NvU32 size) +{ + PENG_INFO_LINK_NODE curNode = *head; + PENG_INFO_LINK_NODE newNode = createLinkNode(dataId, size); + + if (newNode == NULL) + return NULL; + + while (curNode && curNode->next) + curNode = curNode->next; + + if (!curNode) + *head = newNode; + else + curNode->next = newNode; + + return newNode->infoBlock; +} + +// +// deleteInfoPtr +// +// Destroy HAL privata data block and remove it from specified list. +// +void +deleteInfoPtr(PENG_INFO_LINK_NODE *head, NvU32 dataId) +{ + PENG_INFO_LINK_NODE curNode = *head; + + if (!curNode) + return ; + + // check list head + if (curNode->dataId == dataId) + { + *head = curNode->next; + NV_ASSERT(curNode->infoBlock); + portMemFree(curNode->infoBlock); + portMemFree(curNode); + return ; + } + + // search for it + while (curNode->next && (curNode->next->dataId != dataId)) + curNode = curNode->next; + + if (curNode->next) + { + PENG_INFO_LINK_NODE delNode; + + delNode = curNode->next; + curNode->next = curNode->next->next; + NV_ASSERT(delNode->infoBlock); + portMemFree(delNode->infoBlock); + portMemFree(delNode); + } + + return ; +} diff --git a/src/nvidia/src/kernel/core/hal_mgr.c b/src/nvidia/src/kernel/core/hal_mgr.c new file mode 100644 index 0000000..78e6c0d --- /dev/null +++ b/src/nvidia/src/kernel/core/hal_mgr.c @@ -0,0 +1,229 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/hal_mgr.h" +#include "core/hal.h" + +#include "g_hal_archimpl.h" + +// NOTE: string arguments only get used when NV_PRINTF_STRINGS_ALLOWED is true. +#if NV_PRINTF_STRINGS_ALLOWED +static const char *_halmgrGetStringRepForHalImpl(OBJHALMGR *pHalMgr, HAL_IMPLEMENTATION); +#endif + +NV_STATUS +halmgrConstruct_IMPL +( + OBJHALMGR *pHalMgr +) +{ + HAL_IMPLEMENTATION i; + + // + // Make sure all the possible handles to the Hal Objects + // have been zeroed out. Also initialize the implementation + // and public id's in the hal descriptor list. + // + for (i = 0; i < HAL_IMPL_MAXIMUM; i++) + pHalMgr->pHalList[i] = NULL; + + return NV_OK; +} + +void +halmgrDestruct_IMPL +( + OBJHALMGR *pHalMgr +) +{ + NvU32 i; + + for (i = 0; i < HAL_IMPL_MAXIMUM; i++) + { + objDelete(pHalMgr->pHalList[i]); + pHalMgr->pHalList[i] = NULL; + } +} + +NV_STATUS +halmgrCreateHal_IMPL +( + OBJHALMGR *pHalMgr, + NvU32 halImpl +) +{ + OBJHAL *pHal; + NV_STATUS status; + + NV_ASSERT_OR_RETURN(halImpl < HAL_IMPL_MAXIMUM, NV_ERR_INVALID_ARGUMENT); + + status = objCreate(&pHal, pHalMgr, OBJHAL); + if (status != NV_OK) + return status; + + // Store away the object pointer for this particular HAL object + pHalMgr->pHalList[halImpl] = pHal; + + return NV_OK; +} + +OBJHAL * +halmgrGetHal_IMPL +( + OBJHALMGR *pHalMgr, + NvU32 halImpl +) +{ + if (halImpl < HAL_IMPL_MAXIMUM) + return pHalMgr->pHalList[halImpl]; + else + return NULL; +} + +static NvBool +_halmgrIsTegraSupported +( + NvU32 publicHalID, + NvU32 socChipID +) +{ + NvU32 chipid, majorRev; + + chipid = DRF_VAL(_PAPB_MISC, _GP_HIDREV, _CHIPID, socChipID); + majorRev = DRF_VAL(_PAPB_MISC, _GP_HIDREV, _MAJORREV, socChipID); + + // WAR: The majorrev of t234 shows 0xa on fmodel instead of 0x4 + if ((chipid == 0x23) && (majorRev == 0xa)) + { + majorRev = 0x4; + } + + // Convert to the HIDREV field format of chip-config + return ((chipid << 4) | majorRev) == chipID[publicHalID].hidrev; +} + +static NvBool +_halmgrIsChipSupported +( + OBJHALMGR *pHalMgr, + NvU32 publicHalID, + NvU32 pPmcBoot0, + NvU32 pPmcBoot42 +) +{ + NvBool retVal = NV_FALSE; + + if (chipID[publicHalID].hidrev) + return _halmgrIsTegraSupported(publicHalID, pPmcBoot0); + + if (pPmcBoot42) + { + if ((decodePmcBoot42Architecture(pPmcBoot42) == chipID[publicHalID].arch) && + (DRF_VAL(_PMC, _BOOT_42, _IMPLEMENTATION, pPmcBoot42) == chipID[publicHalID].impl)) + { + retVal = NV_TRUE; + } + } + else + { + // Fail safely on older GPUs where pPmcBoot42 is not supported + retVal = NV_FALSE; + } + + return retVal; +} + +NV_STATUS +halmgrGetHalForGpu_IMPL +( + OBJHALMGR *pHalMgr, + NvU32 pPmcBoot0, + NvU32 pPmcBoot42, + NvU32 *pHalImpl +) +{ + HAL_IMPLEMENTATION halImpl; + OBJHAL *pHal; + + for (halImpl = 0; halImpl < HAL_IMPL_MAXIMUM; halImpl++) + { + pHal = pHalMgr->pHalList[halImpl]; + + // skip impls that have no hal object + if (pHal == NULL) + continue; + + if (_halmgrIsChipSupported(pHalMgr, halImpl, pPmcBoot0, pPmcBoot42)) + { + *pHalImpl = halImpl; + +#if NV_PRINTF_STRINGS_ALLOWED + NV_PRINTF(LEVEL_INFO, + "Matching %s = 0x%x to HAL_IMPL_%s\n", + pPmcBoot42 ? "PMC_BOOT_42" : "PMC_BOOT_0", + pPmcBoot42 ? pPmcBoot42 : pPmcBoot0, + _halmgrGetStringRepForHalImpl(pHalMgr, halImpl)); +#else // NV_PRINTF_STRINGS_ALLOWED + NV_PRINTF(LEVEL_INFO, + "Matching 0x%x to %u\n", + pPmcBoot42 ? pPmcBoot42 : pPmcBoot0, + halImpl); +#endif // NV_PRINTF_STRINGS_ALLOWED + + return NV_OK; + } + } + + return NV_ERR_NOT_SUPPORTED; +} + +// NOTE: string arguments only get used when NV_PRINTF_STRINGS_ALLOWED is true. +#if NV_PRINTF_STRINGS_ALLOWED +static const char * +_halmgrGetStringRepForHalImpl +( + OBJHALMGR *pHalMgr, + HAL_IMPLEMENTATION halImpl +) +{ + const char *chipName = "UNKNOWN"; + static const struct + { + HAL_IMPLEMENTATION halImpl; + const char *name; + } halImplNames[] = { HAL_IMPL_NAME_LIST }; // generated by rmconfig into g_hal.h + NvU32 i; + + for (i = 0; i < NV_ARRAY_ELEMENTS(halImplNames); i++) + { + if (halImplNames[i].halImpl == halImpl) + { + chipName = halImplNames[i].name; + break; + } + } + + return chipName; +} +#endif + diff --git a/src/nvidia/src/kernel/core/locks_common.c b/src/nvidia/src/kernel/core/locks_common.c new file mode 100644 index 0000000..7b74e31 --- /dev/null +++ b/src/nvidia/src/kernel/core/locks_common.c @@ -0,0 +1,289 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "core/locks.h" +#include "core/system.h" +#include "os/os.h" +#include "tls/tls.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/gpu.h" + +static NvBool s_bRmLocksAllocated = NV_FALSE; + +NV_STATUS +rmLocksAlloc(OBJSYS *pSys) +{ + NV_STATUS status; + + s_bRmLocksAllocated = NV_FALSE; + + // legacy lock model : RM system semaphore + status = osAllocRmSema(&pSys->pSema); + if (status != NV_OK) + return status; + + // RM_BASIC_LOCK_MODEL : GPU lock info (ISR/DPC synchronization) + status = rmGpuLockInfoInit(); + if (status != NV_OK) + { + osFreeRmSema(&pSys->pSema); + return status; + } + rmInitLockMetering(); + + s_bRmLocksAllocated = NV_TRUE; + + return status; +} + +void +rmLocksFree(OBJSYS *pSys) +{ + if (s_bRmLocksAllocated) + { + rmDestroyLockMetering(); + rmGpuLockInfoDestroy(); + osFreeRmSema(pSys->pSema); + + s_bRmLocksAllocated = NV_FALSE; + } +} + +/*! + * @brief Acquires all of the locks necessary to execute RM code safely + * + * Other threads and client APIs will be blocked from executing while the locks + * are held, so the locks should not be held longer than necessary. The locks + * should not be held across long HW delays. + * + * @returns NV_OK if locks are acquired successfully + * NV_ERR_INVALID_LOCK_STATE if locks cannot be acquired + */ +NV_STATUS +rmLocksAcquireAll(NvU32 module) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + if (osAcquireRmSemaForced(pSys->pSema) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to acquire the RM lock!\n"); + return NV_ERR_INVALID_LOCK_STATE; + } + + if (rmapiLockAcquire(API_LOCK_FLAGS_NONE, module) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to acquire the API lock!\n"); + osReleaseRmSema(pSys->pSema, NULL); + return NV_ERR_INVALID_LOCK_STATE; + } + + if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, module) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to acquire the GPU lock!\n"); + rmapiLockRelease(); + osReleaseRmSema(pSys->pSema, NULL); + return NV_ERR_INVALID_LOCK_STATE; + } + + return NV_OK; +} + +/*! + * @brief Releases the locks acquired by rmLocksAcquireAll + */ +void +rmLocksReleaseAll(void) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + rmapiLockRelease(); + osReleaseRmSema(pSys->pSema, NULL); +} + + +NV_STATUS +workItemLocksAcquire(NvU32 gpuInstance, NvU32 flags, NvU32 *pReleaseLocks, NvU32 *pGpuMask) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPU *pGpu; + NvU32 grp; + NV_STATUS status = NV_OK; + + *pReleaseLocks = 0; + *pGpuMask = 0; + + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_SEMA) + { + status = osAcquireRmSema(pSys->pSema); + if (status != NV_OK) + goto done; + + *pReleaseLocks |= OS_QUEUE_WORKITEM_FLAGS_LOCK_SEMA; + } + + if ((flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW) || + (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RO)) + { + NvU32 apiLockFlags = RMAPI_LOCK_FLAGS_NONE; + NvU32 releaseFlags = OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW; + + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RO) + { + apiLockFlags = RMAPI_LOCK_FLAGS_READ; + releaseFlags = OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RO; + } + + status = rmapiLockAcquire(apiLockFlags, RM_LOCK_MODULES_WORKITEM); + if (status != NV_OK) + goto done; + + *pReleaseLocks |= releaseFlags; + } + + if ((flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS) || + (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE) || + (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE)) + { + NvU32 gpuLockFlags = GPUS_LOCK_FLAGS_NONE; + + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS) + grp = GPU_LOCK_GRP_ALL; + else if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE) + grp = GPU_LOCK_GRP_DEVICE; + else // (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE) + grp = GPU_LOCK_GRP_SUBDEVICE; + + pGpu = gpumgrGetGpu(gpuInstance); + if (pGpu == NULL) + { + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + status = rmGpuGroupLockAcquire(gpuInstance, grp, gpuLockFlags, + RM_LOCK_MODULES_WORKITEM, pGpuMask); + if (status != NV_OK) + goto done; + + // All of these call into the same function, just share the flag + *pReleaseLocks |= OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS; + + if (flags & OS_QUEUE_WORKITEM_FLAGS_FULL_GPU_SANITY) + { + if (!FULL_GPU_SANITY_CHECK(pGpu) || + !pGpu->getProperty(pGpu, PDB_PROP_GPU_STATE_INITIALIZED)) + { + status = NV_ERR_INVALID_STATE; + NV_PRINTF(LEVEL_ERROR, + "GPU isn't full power! gpuInstance = 0x%x.\n", + gpuInstance); + goto done; + } + } + + if (flags & OS_QUEUE_WORKITEM_FLAGS_FOR_PM_RESUME) + { + if (!FULL_GPU_SANITY_FOR_PM_RESUME(pGpu)) + { + status = NV_ERR_INVALID_STATE; + NV_PRINTF(LEVEL_ERROR, + "GPU isn't full power and isn't in resume codepath! gpuInstance = 0x%x.\n", + gpuInstance); + goto done; + } + } + } + +done: + if (status != NV_OK) + { + workItemLocksRelease(*pReleaseLocks, *pGpuMask); + *pReleaseLocks = 0; + } + return status; +} + +void +workItemLocksRelease(NvU32 releaseLocks, NvU32 gpuMask) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + if (releaseLocks & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS) + { + rmGpuGroupLockRelease(gpuMask, GPUS_LOCK_FLAGS_NONE); + } + + if ((releaseLocks & OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW) || + (releaseLocks & OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RO)) + { + rmapiLockRelease(); + } + + if (releaseLocks & OS_QUEUE_WORKITEM_FLAGS_LOCK_SEMA) + { + osReleaseRmSema(pSys->pSema, NULL); + } +} + +// +// rmGpuGroupLockGetMask +// +// Given a GPU group ID this function returns the MASK for all GPUS in that group +// We skip the lookup for GPU_LOCK_GRP_MASK as that implies that the caller is aware of the mask +// +NV_STATUS +rmGpuGroupLockGetMask(NvU32 gpuInst, GPU_LOCK_GRP_ID gpuGrpId, GPU_MASK *pGpuMask) +{ + switch (gpuGrpId) + { + case GPU_LOCK_GRP_SUBDEVICE: + *pGpuMask = NVBIT(gpuInst); + break; + + case GPU_LOCK_GRP_DEVICE: + *pGpuMask = gpumgrGetGrpMaskFromGpuInst(gpuInst); + break; + + case GPU_LOCK_GRP_MASK: + break; + + case GPU_LOCK_GRP_ALL: + *pGpuMask = GPUS_LOCK_ALL; + break; + + default: + NV_ASSERT_FAILED("Unexpected gpuGrpId in gpu lock get mask"); + return NV_ERR_INVALID_ARGUMENT; + } + return NV_OK; +} + + +void threadPriorityStateAlloc(void) {} +void threadPriorityStateFree(void) {} +void threadPriorityThrottle(void) {} +void threadPriorityBoost(NvU64 *p, NvU64 *o) {} +void threadPriorityRestore(void) {} + diff --git a/src/nvidia/src/kernel/core/locks_minimal.c b/src/nvidia/src/kernel/core/locks_minimal.c new file mode 100644 index 0000000..eeb9968 --- /dev/null +++ b/src/nvidia/src/kernel/core/locks_minimal.c @@ -0,0 +1,328 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "core/core.h" +#include "core/locks.h" +#include "core/thread_state.h" +#include "os/os.h" +#include "gpu_mgr/gpu_mgr.h" +#include + +typedef struct +{ + PORT_SEMAPHORE *pLock; //getProperty(pSys, PDB_PROP_SYS_RM_LOCK_TIME_COLLECT)) + startWaitTime = osGetMonotonicTimeNs(); + + if (bCondAcquire || bHighIrql) + { + NvBool success = portSyncSemaphoreAcquireConditional(rmGpuLock.pLock); + if (!success) + { + return NV_ERR_STATE_IN_USE; + } + } + else + { + portAtomicIncrementU32(&rmGpuLock.waiting); + portSyncSemaphoreAcquire(rmGpuLock.pLock); + portAtomicDecrementU32(&rmGpuLock.waiting); + } + + osGetCurrentThread(&rmGpuLock.threadId); + rmGpuLock.timestamp = osGetMonotonicTimeNs(); + + // Update total GPU lock wait time if measuring lock times + if (pSys->getProperty(pSys, PDB_PROP_SYS_RM_LOCK_TIME_COLLECT)) + portAtomicExAddU64(&rmGpuLock.totalWaitTime, rmGpuLock.timestamp - startWaitTime); + + OBJGPU *pGpu = gpumgrGetSomeGpu(); + if (pGpu && osLockShouldToggleInterrupts(pGpu)) + osDisableInterrupts(pGpu, bCondAcquire || bHighIrql); + + INSERT_LOCK_TRACE(&rmGpuLock.traceInfo, ra, + lockTraceAcquire, 0, 0, rmGpuLock.threadId, + bHighIrql, 0, rmGpuLock.timestamp); + + return NV_OK; +} + +static NV_STATUS _rmGpuLockRelease(void *ra) +{ + OS_THREAD_HANDLE threadId; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPU *pGpu = NULL; + NvU64 timestamp; + NvU32 gpuCount = 0; + + // + // We may get a bValid as NV_FALSE before GPU is attached. + // + if (rmGpuLock.bValid == NV_FALSE) + { + return NV_OK; + } + + osGetCurrentThread(&threadId); + timestamp = osGetMonotonicTimeNs(); + + NV_ASSERT_OR_RETURN(threadId == rmGpuLock.threadId, NV_ERR_INVALID_STATE); + + INSERT_LOCK_TRACE(&rmGpuLock.traceInfo, ra, + lockTraceRelease, 0, 0, rmGpuLock.threadId, + 0, 0, timestamp); + + rmGpuLock.threadId = INVALID_THREAD_ID; + + portSyncSemaphoreRelease(rmGpuLock.pLock); + + gpumgrGetGpuAttachInfo(&gpuCount, NULL); + if (gpuCount != 0) + pGpu = gpumgrGetSomeGpu(); + + if (pGpu && osLockShouldToggleInterrupts(pGpu)) + osEnableInterrupts(pGpu); + + // Update total GPU lock hold time if measuring lock times + if (pSys->getProperty(pSys, PDB_PROP_SYS_RM_LOCK_TIME_COLLECT)) + { + timestamp = osGetMonotonicTimeNs(); + + portAtomicExAddU64(&rmGpuLock.totalHoldTime, timestamp - rmGpuLock.timestamp); + } + + return NV_OK; +} + +NV_STATUS rmGpuLocksAcquire(NvU32 flags, NvU32 module) +{ + return _rmGpuLockAcquire(flags, NV_RETURN_ADDRESS()); +} + +NvU32 rmGpuLocksRelease(NvU32 flags, OBJGPU* pGpu) +{ + return _rmGpuLockRelease(NV_RETURN_ADDRESS()); +} + +void rmGpuLocksFreeze(GPU_MASK gpuMask) +{ + NV_ASSERT_OR_RETURN_VOID(!"Function not implemented"); +} +void rmGpuLocksUnfreeze(GPU_MASK gpuMask) +{ + NV_ASSERT_OR_RETURN_VOID(!"Function not implemented"); +} +NV_STATUS rmGpuLockHide(NvU32 gpuMask) +{ + NV_ASSERT_OR_RETURN(!"Function not implemented", NV_ERR_NOT_SUPPORTED); +} +void rmGpuLockShow(NvU32 gpuMask) +{ + NV_ASSERT_OR_RETURN_VOID(!"Function not implemented"); +} + +NvBool rmGpuLockIsOwner(void) +{ + OS_THREAD_HANDLE threadId; + osGetCurrentThread(&threadId); + return threadId == rmGpuLock.threadId; +} + +NvU32 rmGpuLocksGetOwnedMask(void) +{ + return rmGpuLockIsOwner() ? 0x1 : 0x0; +} + +NvBool rmGpuLockIsHidden(OBJGPU* pGpu) +{ + return NV_FALSE; +} + +void rmGpuLockGetTimes(NV0000_CTRL_SYSTEM_GET_LOCK_TIMES_PARAMS *pParams) +{ + pParams->holdGpuLock = rmGpuLock.totalHoldTime; + pParams->waitGpuLock = rmGpuLock.totalWaitTime; +} + +NV_STATUS rmGpuLockSetOwner(OS_THREAD_HANDLE threadId) +{ + NvBool toDpcRefresh = (threadId == GPUS_LOCK_OWNER_PENDING_DPC_REFRESH); + NvBool fromDpcRefresh = (rmGpuLock.threadId == GPUS_LOCK_OWNER_PENDING_DPC_REFRESH); + NV_ASSERT_OR_RETURN(toDpcRefresh || fromDpcRefresh, NV_ERR_INVALID_STATE); + + rmGpuLock.threadId = threadId; + return NV_OK; +} + +NV_STATUS rmGpuGroupLockAcquire(NvU32 gpuInst, GPU_LOCK_GRP_ID grp, NvU32 flags, NvU32 mod, GPU_MASK* pGpuMask) +{ + NV_ASSERT_OR_RETURN(pGpuMask != NULL, NV_ERR_INVALID_ARGUMENT); + *pGpuMask = ~0; + return _rmGpuLockAcquire(flags, NV_RETURN_ADDRESS()); +} + +void rmGpuGroupLockRelease(GPU_MASK gpuMask, NvU32 flags) +{ + if (gpuMask == 0) + return; + _rmGpuLockRelease(NV_RETURN_ADDRESS()); +} + +NvBool rmGpuGroupLockIsOwner(NvU32 gpuInst, GPU_LOCK_GRP_ID grp, GPU_MASK* pMask) +{ + if (*pMask == 0 && grp == GPU_LOCK_GRP_MASK) + return NV_TRUE; + return rmGpuLockIsOwner(); +} + +NvBool rmDeviceGpuLockIsOwner(NvU32 gpuInst) +{ + return rmGpuLockIsOwner(); +} + +NV_STATUS rmDeviceGpuLockSetOwner(OBJGPU *pGpu, OS_THREAD_HANDLE threadId) +{ + return rmGpuLockSetOwner(threadId); +} + +NV_STATUS rmDeviceGpuLocksAcquire(OBJGPU *pGpu, NvU32 flags, NvU32 module) +{ + return _rmGpuLockAcquire(flags, NV_RETURN_ADDRESS()); +} + +NvU32 rmDeviceGpuLocksRelease(OBJGPU *pGpu, NvU32 flags, OBJGPU *pDpcGpu) +{ + return _rmGpuLockRelease(NV_RETURN_ADDRESS()); +} + +// +// rmDeviceGpuLocksRelease and threadStateFreeISRAndDeferredIntHandler in lockstep in the DPC only +// without releasing the GPUs device lock in between. Done for all GPUs under the device lock +// +NV_STATUS +rmDeviceGpuLocksReleaseAndThreadStateFreeDeferredIntHandlerOptimized(OBJGPU *pGpu, NvU32 flags, OBJGPU *pDpcGpu) +{ + // no deferred work in locks_minimal + NvU32 rc; + + threadStateOnlyProcessWorkISRAndDeferredIntHandler(pGpu->pDpcThreadState, pGpu, + THREAD_STATE_FLAGS_IS_ISR_DEFERRED_INT_HANDLER); + + rc = _rmGpuLockRelease(NV_RETURN_ADDRESS()); + + threadStateOnlyFreeISRAndDeferredIntHandler(pGpu->pDpcThreadState, pGpu, + THREAD_STATE_FLAGS_IS_ISR_DEFERRED_INT_HANDLER); + + return rc; +} + +NvU64 rmIntrMaskLockAcquire(OBJGPU* pGpu) +{ + NV_ASSERT_OR_RETURN(!"Function not implemented", 0); +} +void rmIntrMaskLockRelease(OBJGPU* pGpu, NvU64 oldIrql) +{ + NV_ASSERT_OR_RETURN_VOID(!"Function not implemented"); +} + diff --git a/src/nvidia/src/kernel/core/system.c b/src/nvidia/src/kernel/core/system.c new file mode 100644 index 0000000..087adca --- /dev/null +++ b/src/nvidia/src/kernel/core/system.c @@ -0,0 +1,760 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Routines ***************************\ +* * +* System Object Function Definitions. * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "core/system.h" +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" +#include "core/locks.h" +#include "os/os.h" +#include "nvrm_registry.h" +#include "core/thread_state.h" +#include "diagnostics/tracer.h" +#include "rmosxfac.h" +#include "tls/tls.h" +#include "rmapi/rmapi.h" +#include "rmapi/client.h" +#include "core/hal_mgr.h" +#include "nvoc/rtti.h" + +#include "mem_mgr/virt_mem_mgr.h" +#include "gpu_mgr/gpu_db.h" + +#if RMCFG_FEATURE_GSPRM_BULLSEYE || defined(GSPRM_BULLSEYE_ENABLE) +#include "diagnostics/code_coverage_mgr.h" +#endif + +// local static functions +static NV_STATUS _sysCreateOs(OBJSYS *); +static NV_STATUS _sysCreateChildObjects(OBJSYS *); +static void _sysDeleteChildObjects(OBJSYS *); +static void _sysNvSwitchDetection(OBJSYS *pSys); +static void _sysInitStaticConfig(OBJSYS *pSys);; + +// Global pointer to instance of OBJSYS +OBJSYS *g_pSys = NULL; + +typedef struct +{ + NvLength childOffset; + const NVOC_CLASS_INFO *pClassInfo; + NvBool bDynamicConstruct; +} sysChildObject; + +static sysChildObject sysChildObjects[] = +{ + { NV_OFFSETOF(OBJSYS, pHalMgr), classInfo(OBJHALMGR), NV_TRUE }, + { NV_OFFSETOF(OBJSYS, pOS), classInfo(OBJOS), NV_FALSE }, // OS: Wrapper macros must be enabled to use :CONSTRUCT. + { NV_OFFSETOF(OBJSYS, pGpuMgr), classInfo(OBJGPUMGR), NV_TRUE }, + { NV_OFFSETOF(OBJSYS, pVmm), classInfo(OBJVMM), NV_TRUE }, + { NV_OFFSETOF(OBJSYS, pGpuDb), classInfo(GpuDb), NV_TRUE }, +#if RMCFG_FEATURE_GSPRM_BULLSEYE || defined(GSPRM_BULLSEYE_ENABLE) + { NV_OFFSETOF(OBJSYS, pCodeCovMgr), classInfo(CodeCoverageManager), NV_TRUE }, +#endif +}; + +NV_STATUS +sysConstruct_IMPL(OBJSYS *pSys) +{ + NV_STATUS status; + NvU32 sec = 0; + NvU32 uSec = 0; + + g_pSys = pSys; + + RMTRACE_INIT(); + RMTRACE_INIT_NEW(); + + _sysInitStaticConfig(pSys); + + status = _sysCreateChildObjects(pSys); + if (status != NV_OK) + { + goto failed; + } + + // Use the monotonic system clock for a unique value + osGetSystemTime(&sec, &uSec); + pSys->rmInstanceId = (NvU64)sec * 1000000 + (NvU64)uSec; + + status = osRmInitRm(); + if (status != NV_OK) + goto failed; + + _sysNvSwitchDetection(pSys); + + // allocate locks, semaphores, whatever + status = rmLocksAlloc(pSys); + if (status != NV_OK) + goto failed; + + status = threadStateGlobalAlloc(); + if (status != NV_OK) + goto failed; + + status = rmapiInitialize(); + if (status != NV_OK) + goto failed; + + return NV_OK; + +failed: + + _sysDeleteChildObjects(pSys); + + g_pSys = NULL; + + threadStateGlobalFree(); + + rmapiShutdown(); + rmLocksFree(pSys); + + return status; +} + +void +sysDestruct_IMPL(OBJSYS *pSys) +{ + + pSys->setProperty(pSys, PDB_PROP_SYS_DESTRUCTING, NV_TRUE); + + // + // Any of these operations might fail but go ahead and + // attempt to free remaining resources before complaining. + // + listDestroy(&g_clientListBehindGpusLock); + listDestroy(&g_userInfoList); + multimapDestroy(&g_osInfoList); + + rmapiShutdown(); + osSyncWithRmDestroy(); + threadStateGlobalFree(); + rmLocksFree(pSys); + + // + // Free child objects + // + _sysDeleteChildObjects(pSys); + + g_pSys = NULL; + + RMTRACE_DESTROY(); + RMTRACE_DESTROY_NEW(); + +} + +// +// Create static system object offspring. +// +static NV_STATUS +_sysCreateChildObjects(OBJSYS *pSys) +{ + NV_STATUS status = NV_OK; + NvU32 i, n; + + n = NV_ARRAY_ELEMENTS(sysChildObjects); + + for (i = 0; i < n; i++) + { + if (sysChildObjects[i].bDynamicConstruct) + { + NvLength offset = sysChildObjects[i].childOffset; + Dynamic **ppChild = reinterpretCast(reinterpretCast(pSys, NvU8*) + offset, Dynamic**); + Dynamic *pNewObj; + status = objCreateDynamic(&pNewObj, pSys, sysChildObjects[i].pClassInfo); + + if (status == NV_OK) + { + *ppChild = pNewObj; + } + } + else + { + // + // More cases should NOT be added to this list. OBJOS needs to be + // cleaned up to use the bDynamicConstruct path then this hack can + // be removed. + // + switch (sysChildObjects[i].pClassInfo->classId) + { + case classId(OBJOS): + status = _sysCreateOs(pSys); + break; + default: + NV_ASSERT(0); + status = NV_ERR_INVALID_ARGUMENT; + break; + } + } + + // RMCONFIG: Bail on errors unless the feature/object/engine/class + // is simply unsupported + if (status == NV_ERR_NOT_SUPPORTED) + status = NV_OK; + if (status != NV_OK) break; + } + + return status; +} + +static void +_sysDeleteChildObjects(OBJSYS *pSys) +{ + int i; + + osRmCapUnregister(&pSys->pOsRmCaps); + + for (i = NV_ARRAY_ELEMENTS(sysChildObjects) - 1; i >= 0; i--) + { + NvLength offset = sysChildObjects[i].childOffset; + Dynamic **ppChild = reinterpretCast(reinterpretCast(pSys, NvU8*) + offset, Dynamic**); + objDelete(*ppChild); + *ppChild = NULL; + } +} + +static void +_sysRegistryOverrideResourceServer +( + OBJSYS *pSys, + OBJGPU *pGpu +) +{ + NvU32 data32; + + // Set read-only API lock override + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_READONLY_API_LOCK, + &data32) == NV_OK) + { + NvU32 apiMask = 0; + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _ALLOC_RESOURCE, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_ALLOC_RESOURCE); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _FREE_RESOURCE, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_FREE_RESOURCE); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _MAP, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_MAP); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _UNMAP, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_UNMAP); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _INTER_MAP, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_INTER_MAP); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _INTER_UNMAP, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_INTER_UNMAP); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _CTRL, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_CTRL); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _COPY, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_COPY); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _SHARE, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_SHARE); + + pSys->apiLockMask = apiMask; + } + else + { + pSys->apiLockMask = NVBIT(RS_API_CTRL); + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_READONLY_API_LOCK_MODULE, + &data32) == NV_OK) + { + pSys->apiLockModuleMask = data32; + } + else + { + pSys->apiLockModuleMask = RM_LOCK_MODULE_GRP(RM_LOCK_MODULES_CLIENT); + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_LOCK_TIME_COLLECT, + &data32) == NV_OK) + { + pSys->setProperty(pSys, PDB_PROP_SYS_RM_LOCK_TIME_COLLECT, !!data32); + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_CLIENT_LIST_DEFERRED_FREE, + &data32) == NV_OK) + { + pSys->bUseDeferredClientListFree = !!data32; + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_CLIENT_LIST_DEFERRED_FREE_LIMIT, + &data32) == NV_OK) + { + pSys->clientListDeferredFreeLimit = data32; + } +} + +static void +_sysRegistryOverrideExternalFabricMgmt +( + OBJSYS *pSys, + OBJGPU *pGpu +) +{ + NvU32 data32; + + // Set external fabric management property + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_EXTERNAL_FABRIC_MGMT, + &data32) == NV_OK) + { + if (FLD_TEST_DRF(_REG_STR_RM, _EXTERNAL_FABRIC_MGMT, _MODE, _ENABLE, data32)) + { + NV_PRINTF(LEVEL_INFO, + "Enabling external fabric management.\n"); + + pSys->setProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED, NV_TRUE); + } + + if (FLD_TEST_DRF(_REG_STR_RM, _EXTERNAL_FABRIC_MGMT, _MODE, _DISABLE, data32)) + { + NV_PRINTF(LEVEL_INFO, + "Disabling external fabric management.\n"); + + pSys->setProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED, NV_FALSE); + } + } +} + +void +sysEnableExternalFabricMgmt_IMPL +( + OBJSYS *pSys +) +{ + pSys->setProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED, NV_TRUE); + + NV_PRINTF(LEVEL_INFO, + "Enabling external fabric management for Proxy NvSwitch systems.\n"); +} + +void +sysForceInitFabricManagerState_IMPL +( + OBJSYS *pSys +) +{ + // + // We should only allow force init if there is not way to run fabric + // manager. For example, HGX-2 virtualization use-case. + // + if (pSys->getProperty(pSys, PDB_PROP_SYS_NVSWITCH_IS_PRESENT) || + pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_MANAGER_IS_REGISTERED)) + { + NV_ASSERT(0); + return; + } + + pSys->setProperty(pSys, PDB_PROP_SYS_FABRIC_MANAGER_IS_INITIALIZED, NV_TRUE); + + NV_PRINTF(LEVEL_INFO, + "Forcing fabric manager's state as initialized to unblock clients.\n"); +} + +static void +_sysNvSwitchDetection +( + OBJSYS *pSys +) +{ + + if (osIsNvswitchPresent()) + { + pSys->setProperty(pSys, PDB_PROP_SYS_NVSWITCH_IS_PRESENT, NV_TRUE); + + NV_PRINTF(LEVEL_INFO, "NvSwitch is found in the system\n"); + + sysEnableExternalFabricMgmt(pSys); + } +} + +/*! + * @brief Initialize static system configuration data. + * + * @param[in] pSys SYSTEM object pointer + */ +static void +_sysInitStaticConfig(OBJSYS *pSys) +{ + portMemSet(&pSys->staticConfig, 0, sizeof(pSys->staticConfig)); + osInitSystemStaticConfig(&pSys->staticConfig); +} + +NV_STATUS +coreInitializeRm(void) +{ + NV_STATUS status; + OBJSYS *pSys = NULL; + + // + // Initialize libraries used by RM + // + + // Portable runtime init + status = portInitialize(); + if (status != NV_OK) + return status; + + // Required before any NvLog (NV_PRINTF) calls + NVLOG_INIT(NULL); + + // Required before any NV_PRINTF() calls + if (!DBG_INIT()) + { + status = NV_ERR_GENERIC; + return status; + } + + // + // Initialize OBJSYS which spawns all the RM internal modules + // + status = objCreate(&pSys, NVOC_NULL_OBJECT, OBJSYS); + + nvAssertInit(); + + return status; + } + +void +coreShutdownRm(void) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + // + // Destruct OBJSYS which frees all the RM internal modules + // + objDelete(pSys); + + // + // Deinitialize libraries used by RM + // + nvAssertDestroy(); + + DBG_DESTROY(); + + NVLOG_DESTROY(); + + portShutdown(); +} + +// Obsolete RM init function -- code should migrate to new interfaces +NvS32 +RmInitRm(void) +{ + return (coreInitializeRm() == NV_OK); +} + +// Obsolete RM destroy function -- code should migrate to new interfaces +NvS32 +RmDestroyRm(void) +{ + coreShutdownRm(); + return NV_TRUE; +} + +static NV_STATUS +_sysCreateOs(OBJSYS *pSys) +{ + OBJOS *pOS; + NV_STATUS status; + + // RMCONFIG: only if OS is enabled :-) + RMCFG_MODULE_ENABLED_OR_BAIL(OS); + + status = objCreate(&pOS, pSys, OBJOS); + if (status != NV_OK) + { + return status; + } + + status = constructObjOS(pOS); + if (status != NV_OK) + { + objDelete(pOS); + return status; + } + + status = osRmCapRegisterSys(&pSys->pOsRmCaps); + if (status != NV_OK) + { + // + // Device objects needed for some access rights failed + // This is not system-critical since access rights are currently disabled, + // so continue booting, just log error. + // + // RS-TODO make this fail once RM Capabilities are enabled (Bug 2549938) + // + NV_PRINTF(LEVEL_ERROR, "RM Access Sys Cap creation failed: 0x%x\n", status); + } + + pSys->pOS = pOS; + + return NV_OK; +} + +NV_STATUS +sysCaptureState_IMPL(OBJSYS *pSys) +{ + return NV_OK; +} + +OBJOS* +sysGetOs_IMPL(OBJSYS *pSys) +{ + if (pSys->pOS) + return pSys->pOS; + + // + // A special case for any early 'get-object' calls for the OS + // object before there is an OS object. Some RC code called on + // DBG_BREAKPOINT assumes an OS object exists, and can cause a crash. + // + PORT_BREAKPOINT_ALWAYS(); + + return NULL; +} + +void +sysInitRegistryOverrides_IMPL +( + OBJSYS *pSys +) +{ + OBJGPU *pGpu = NULL; + NvU32 data32 = 0; + + if (pSys->getProperty(pSys, + PDB_PROP_SYS_REGISTRY_OVERRIDES_INITIALIZED)) + { + // The registry overrides, if any, have already been applied. + return; + } + + // Get some GPU - as of now we need some gpu to read registry. + pGpu = gpumgrGetSomeGpu(); + if (pGpu == NULL) + { + // Too early call ! we can not read the registry. + return; + } + + if ((osReadRegistryDword(pGpu, + NV_REG_STR_RM_ENABLE_EVENT_TRACER, &data32) == NV_OK) && data32 ) + { + RMTRACE_ENABLE(data32); + } + + if (osReadRegistryDword(pGpu, + NV_REG_STR_RM_CLIENT_DATA_VALIDATION, &data32) == NV_OK) + { + if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _KERNEL_BUFFERS, _ENABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS, NV_TRUE); + } + else if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _KERNEL_BUFFERS, _DISABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS, NV_FALSE); + } + + if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _HANDLE, _ENABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE, NV_TRUE); + } + else if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _HANDLE, _DISABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE, NV_FALSE); + } + + if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _STRICT_CLIENT, _ENABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT, NV_TRUE); + } + else if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _STRICT_CLIENT, _DISABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT, NV_FALSE); + } + + if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _ALL, _ENABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE, NV_TRUE); + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS, NV_TRUE); + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT, NV_TRUE); + } + else if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _ALL, _DISABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE, NV_FALSE); + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS, NV_FALSE); + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT, NV_FALSE); + } + } + + pSys->setProperty(pSys, PDB_PROP_SYS_REGISTRY_OVERRIDES_INITIALIZED, NV_TRUE); + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_STREAM_MEMOPS, + &data32) == NV_OK) + { + if (FLD_TEST_DRF(_REG_STR_RM, _STREAM_MEMOPS, _ENABLE, _YES, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_ENABLE_STREAM_MEMOPS, NV_TRUE); + } + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_PRIORITY_BOOST, + &data32) == NV_OK) + { + if (data32 == NV_REG_STR_RM_PRIORITY_BOOST_DISABLE) + pSys->setProperty(pSys, PDB_PROP_SYS_PRIORITY_BOOST, NV_FALSE); + else + pSys->setProperty(pSys, PDB_PROP_SYS_PRIORITY_BOOST, NV_TRUE); + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_PRIORITY_THROTTLE_DELAY, + &data32) == NV_OK) + { + pSys->setProperty(pSys, PDB_PROP_SYS_PRIORITY_THROTTLE_DELAY_US, data32); + } + + _sysRegistryOverrideExternalFabricMgmt(pSys, pGpu); + _sysRegistryOverrideResourceServer(pSys, pGpu); + + if (osBugCheckOnTimeoutEnabled()) + { + pSys->setProperty(pSys, PDB_PROP_SYS_BUGCHECK_ON_TIMEOUT, NV_TRUE); + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_ENABLE_ROUTE_TO_PHYSICAL_LOCK_BYPASS, + &data32) == NV_OK) + { + pSys->setProperty(pSys, PDB_PROP_SYS_ROUTE_TO_PHYSICAL_LOCK_BYPASS, !!data32); + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_GPU_LOCK_MIDPATH, &data32) == NV_OK) + { + pSys->setProperty(pSys, PDB_PROP_SYS_GPU_LOCK_MIDPATH_ENABLED, !!data32); + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_ENABLE_RM_TEST_ONLY_CODE, + &data32) == NV_OK) + { + pSys->setProperty(pSys, PDB_PROP_SYS_ENABLE_RM_TEST_ONLY_CODE, !!data32); + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_ENABLE_FORCE_SHARED_LOCK, &data32) + == NV_OK) + { + pSys->setProperty(pSys, PDB_PROP_SYS_ENABLE_FORCE_SHARED_LOCK, !!data32); + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_ALLOW_UNKNOWN_4PART_IDS, &data32) + == NV_OK) + { + pSys->setProperty(pSys, PDB_PROP_SYS_ALLOW_UNKNOWN_4PART_IDS, !!data32); + } + + gpumgrSetGpuNvlinkBwModeFromRegistry(pGpu); +} + +void +sysApplyLockingPolicy_IMPL(OBJSYS *pSys) +{ + g_resServ.bRouteToPhysicalLockBypass = pSys->getProperty(pSys, PDB_PROP_SYS_ROUTE_TO_PHYSICAL_LOCK_BYPASS); + g_resServ.roTopLockApiMask = pSys->apiLockMask; +} + +NV_STATUS +sysSyncExternalFabricMgmtWAR_IMPL +( + OBJSYS *pSys, + OBJGPU *pGpu +) +{ + NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS params; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_STATUS status = NV_OK; + + params.bExternalFabricMgmt = pSys->getProperty(pSys, + PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED); + + status = pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalClient, + NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT, + ¶ms, sizeof(params)); + + return status; +} + +static void +_sysRefreshAllGpuRecoveryAction +( + void *pData +) +{ + NV_STATUS status; + OBJGPU *pGpu; + NvU32 i; + NvU32 gpuCount; + NvU32 gpuIndex; + NvU32 gpuMask; + + NV_ASSERT_OK_OR_ELSE(status, + rmGpuGroupLockAcquire(0, GPU_LOCK_GRP_ALL, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_NONE, &gpuMask), + return); + + gpumgrGetGpuAttachInfo(&gpuCount, &gpuMask); + for (i = 0, gpuIndex = 0; i < gpuCount; i++) + { + pGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex); + if (pGpu) + { + gpuRefreshRecoveryAction(pGpu, NV_TRUE); + } + } + + rmGpuGroupLockRelease(gpuMask, GPUS_LOCK_FLAGS_NONE); +} + +void +sysSetRecoveryRebootRequired_IMPL +( + OBJSYS *pSys, + NvBool bRebootRequired +) +{ + if (!!pSys->getProperty(pSys, PDB_PROP_SYS_RECOVERY_REBOOT_REQUIRED) != !!bRebootRequired) + { + pSys->setProperty(pSys, PDB_PROP_SYS_RECOVERY_REBOOT_REQUIRED, bRebootRequired); + osQueueSystemWorkItem(_sysRefreshAllGpuRecoveryAction, NULL); + } +} + diff --git a/src/nvidia/src/kernel/core/thread_state.c b/src/nvidia/src/kernel/core/thread_state.c new file mode 100644 index 0000000..ffa8d8a --- /dev/null +++ b/src/nvidia/src/kernel/core/thread_state.c @@ -0,0 +1,1287 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +//***************************************************************************** +// +// This file contains code used for Thread State management +// +// Terminology: +// +// ISR: First level interrupt handler, acknowledge function (VMK) +// +// Deferred INT handler: DPC (Windows), Bottom-half (*nux), Interrupt handler (VMK) +// +//***************************************************************************** + +#include "core/core.h" +#include "core/thread_state.h" +#include "core/locks.h" +#include "os/os.h" +#include "containers/map.h" +#include "nvrm_registry.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/gpu.h" +#include "gpu/gpu_timeout.h" + +THREAD_STATE_DB threadStateDatabase; + +static void _threadStatePrintInfo(THREAD_STATE_NODE *pThreadNode) +{ + if ((threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_PRINT_INFO_ENABLED) == 0) + return; + + if (pThreadNode != NULL) + { + NV_PRINTF(LEVEL_NOTICE, "Thread state:\n"); + NV_PRINTF(LEVEL_NOTICE, + "threadId: 0x%llx flags: 0x0%x\n", + pThreadNode->threadId, + pThreadNode->flags); + + NV_PRINTF(LEVEL_NOTICE, + "enterTime: 0x%llx Limits: nonComputeTime: 0x%llx computeTime: 0x%llx\n", + pThreadNode->timeout.enterTime, + pThreadNode->timeout.nonComputeTime, + pThreadNode->timeout.computeTime); + } +} + +static void _threadStateFreeProcessWork(THREAD_STATE_NODE *pThreadNode) +{ + PORT_UNREFERENCED_VARIABLE(pThreadNode); +} + +/** + * @brief allocate threadState which is per-cpu and per-GPU, only supporting lockless ISR + * + * @param[in/out] ppIsrlocklessThreadNode + * + * @return NV_OK if success, error otherwise + * + */ +static NV_STATUS _threadStateAllocPerCpuPerGpu(PPTHREAD_STATE_ISR_LOCKLESS ppIsrlocklessThreadNode) +{ + NvU32 allocSize; + PTHREAD_STATE_ISR_LOCKLESS pIsrlocklessThreadNode; + NvS32 i; + NvU32 coreCount = osGetMaximumCoreCount(); + + // Bug 789767 + threadStateDatabase.maxCPUs = 32; + if (coreCount > threadStateDatabase.maxCPUs) + threadStateDatabase.maxCPUs = coreCount; + + allocSize = threadStateDatabase.maxCPUs * sizeof(PTHREAD_STATE_ISR_LOCKLESS); + + pIsrlocklessThreadNode = portMemAllocNonPaged(allocSize); + if (pIsrlocklessThreadNode == NULL) + return NV_ERR_NO_MEMORY; + + portMemSet(pIsrlocklessThreadNode, 0, allocSize); + allocSize = NV_MAX_DEVICES * sizeof(THREAD_STATE_NODE *); + + // Allocate thread node for each gpu per cpu. + for (i = 0; i < (NvS32)threadStateDatabase.maxCPUs; i++) + { + pIsrlocklessThreadNode[i].ppIsrThreadStateGpu = portMemAllocNonPaged(allocSize); + if (pIsrlocklessThreadNode[i].ppIsrThreadStateGpu == NULL) + { + for (--i; i >= 0; --i) + portMemFree(pIsrlocklessThreadNode[i].ppIsrThreadStateGpu); + + portMemFree(pIsrlocklessThreadNode); + return NV_ERR_NO_MEMORY; + } + else + { + portMemSet(pIsrlocklessThreadNode[i].ppIsrThreadStateGpu, 0, allocSize); + } + } + *ppIsrlocklessThreadNode = pIsrlocklessThreadNode; + return NV_OK; +} + +/** + * @brief free threadState which is per-cpu and per-GPU, only working for lockless ISR + * + * @param[in/out] pIsrlocklessThreadNode + * + */ +static void _threadStateFreePerCpuPerGpu(PTHREAD_STATE_ISR_LOCKLESS pIsrlocklessThreadNode) +{ + NvU32 i; + // Free any memory we allocated + if (pIsrlocklessThreadNode) + { + for (i = 0; i < threadStateDatabase.maxCPUs; i++) + portMemFree(pIsrlocklessThreadNode[i].ppIsrThreadStateGpu); + portMemFree(pIsrlocklessThreadNode); + } +} + +/** + * @brief the main function to allocate the threadState + * + * @return NV_OK if the entire global threadState is created successfully, + * and an appropriate ERROR otherwise. + * + */ +NV_STATUS threadStateGlobalAlloc(void) +{ + NV_STATUS rmStatus; + NvU32 allocSize; + + NV_ASSERT(tlsInitialize() == NV_OK); + + // Init the thread sequencer id counter to 0. + threadStateDatabase.threadSeqCntr = 0; + threadStateDatabase.gspIsrThreadSeqCntr = 0; + + threadStateDatabase.spinlock = portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged()); + if (threadStateDatabase.spinlock == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + allocSize = NV_MAX_DEVICES * sizeof(THREAD_STATE_NODE *); + threadStateDatabase.ppISRDeferredIntHandlerThreadNode = portMemAllocNonPaged(allocSize); + if (threadStateDatabase.ppISRDeferredIntHandlerThreadNode == NULL) + { + portSyncSpinlockDestroy(threadStateDatabase.spinlock); + return NV_ERR_NO_MEMORY; + } + portMemSet(threadStateDatabase.ppISRDeferredIntHandlerThreadNode, 0, allocSize); + + rmStatus = _threadStateAllocPerCpuPerGpu(&threadStateDatabase.pIsrlocklessThreadNode); + if (rmStatus != NV_OK) + { + portMemFree(threadStateDatabase.ppISRDeferredIntHandlerThreadNode); + portSyncSpinlockDestroy(threadStateDatabase.spinlock); + return rmStatus; + } + + mapInitIntrusive(&threadStateDatabase.dbRoot); + + return rmStatus; +} + +void threadStateGlobalFree(void) +{ + // Disable all threadState usage once the spinlock is freed + threadStateDatabase.setupFlags = THREAD_STATE_SETUP_FLAGS_NONE; + + // Free any memory we allocated + _threadStateFreePerCpuPerGpu(threadStateDatabase.pIsrlocklessThreadNode); + threadStateDatabase.pIsrlocklessThreadNode = NULL; + + portMemFree(threadStateDatabase.ppISRDeferredIntHandlerThreadNode); + threadStateDatabase.ppISRDeferredIntHandlerThreadNode = NULL; + + if (threadStateDatabase.spinlock != NULL) + { + portSyncSpinlockDestroy(threadStateDatabase.spinlock); + threadStateDatabase.spinlock = NULL; + } + + mapDestroy(&threadStateDatabase.dbRoot); + + tlsShutdown(); +} + +void threadStateInitRegistryOverrides(OBJGPU *pGpu) +{ + NvU32 flags; + + if (osReadRegistryDword(pGpu, + NV_REG_STR_RM_THREAD_STATE_SETUP_FLAGS, &flags) == NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Overriding threadStateDatabase.setupFlags from 0x%x to 0x%x\n", + threadStateDatabase.setupFlags, flags); + threadStateDatabase.setupFlags = flags; + } +} + +void threadStateInitSetupFlags(NvU32 flags) +{ + threadStateDatabase.timeout.nonComputeTimeoutMsecs = 0; + threadStateDatabase.timeout.computeTimeoutMsecs = 0; + threadStateDatabase.timeout.computeGpuMask = 0; + threadStateDatabase.setupFlags = flags; +} + +NvU32 threadStateGetSetupFlags(void) +{ + return threadStateDatabase.setupFlags; +} + +// +// Sets the nextCpuYieldTime field to a value that corresponds to a +// short time in the future. This value represents the next time that +// the osScheduler may be invoked, during long waits. +// +static void _threadStateSetNextCpuYieldTime(THREAD_STATE_NODE *pThreadNode) +{ + NvU64 timeInNs; + timeInNs = osGetMonotonicTimeNs(); + + pThreadNode->timeout.nextCpuYieldTime = timeInNs + + (TIMEOUT_DEFAULT_OS_RESCHEDULE_INTERVAL_SECS) * 1000000 * 1000; +} + +void threadStateYieldCpuIfNecessary(OBJGPU *pGpu, NvBool bQuiet) +{ + NV_STATUS rmStatus; + THREAD_STATE_NODE *pThreadNode = NULL; + NvU64 timeInNs; + + rmStatus = threadStateGetCurrent(&pThreadNode, pGpu); + if ((rmStatus == NV_OK) && pThreadNode ) + { + timeInNs = osGetMonotonicTimeNs(); + if (timeInNs >= pThreadNode->timeout.nextCpuYieldTime) + { + if (NV_OK == osSchedule()) + { + NV_PRINTF_COND(bQuiet, LEVEL_INFO, LEVEL_WARNING, "Yielding\n"); + } + + _threadStateSetNextCpuYieldTime(pThreadNode); + } + } +} + +static NV_STATUS _threadNodeInitTime(THREAD_STATE_NODE *pThreadNode) +{ + NV_STATUS rmStatus = NV_OK; + NvU64 timeInNs; + NvBool firstInit; + NvU64 computeTimeoutMsecs; + NvU64 nonComputeTimeoutMsecs; + NvBool bIsDpcOrIsr = !!(pThreadNode->flags & + (THREAD_STATE_FLAGS_IS_ISR | + THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING | + THREAD_STATE_FLAGS_IS_ISR_LOCKLESS)); + + // + // _threadNodeInitTime() is used both for the first init and + // threadStateResetTimeout(). We can tell the two apart by checking whether + // enterTime has been initialized already. + // + firstInit = (pThreadNode->timeout.enterTime == 0); + + computeTimeoutMsecs = threadStateDatabase.timeout.computeTimeoutMsecs; + nonComputeTimeoutMsecs = threadStateDatabase.timeout.nonComputeTimeoutMsecs; + + // + // If we are in DPC or ISR contexts, we need to timeout the driver before OS + // mechanisms kick in and panic the kernel + // + if (bIsDpcOrIsr) + { + // + // Note that MODS does not have interrupt timeout requirements and there are + // existing code paths that violates the timeout + // + computeTimeoutMsecs = TIMEOUT_DPC_ISR_INTERVAL_MS; + nonComputeTimeoutMsecs = TIMEOUT_DPC_ISR_INTERVAL_MS; + } + + timeInNs = osGetMonotonicTimeNs(); + + if (firstInit) + { + // + // Save off the time we first entered the RM. We do not + // want to reset this if we call threadStateResetTimeout() + // + pThreadNode->timeout.enterTime = timeInNs; + } + + if (pThreadNode->timeout.overrideTimeoutMsecs) + { + nonComputeTimeoutMsecs = pThreadNode->timeout.overrideTimeoutMsecs; + computeTimeoutMsecs = pThreadNode->timeout.overrideTimeoutMsecs; + } + + if ((pThreadNode->flags & THREAD_STATE_FLAGS_DEVICE_INIT) != 0) + { + // + // Even on platforms with strict timing requirements (e.g. WDDM) there + // is an exception for initialization. While init time is an important + // performance metric, we do not want to functionally fail because of + // an arbitrary deadline. Thus, we set the timeout to give plenty of + // buffer room for some of the slower platforms: + // - P40 can take ~30 seconds when booting in passthrough due to + // Hyper-V intercepting all MMIO accesses (bug 1900927) + // - Hopper+ can take 3+ seconds due to memory link initialization + // + const NvU32 DEVICE_INIT_TIMEOUT_MS = 60 * 1000; + + computeTimeoutMsecs = NV_MAX(computeTimeoutMsecs, DEVICE_INIT_TIMEOUT_MS); + nonComputeTimeoutMsecs = NV_MAX(nonComputeTimeoutMsecs, DEVICE_INIT_TIMEOUT_MS); + } + + _threadStateSetNextCpuYieldTime(pThreadNode); + + if (threadStateDatabase.timeout.flags & GPU_TIMEOUT_FLAGS_OSTIMER) + { + pThreadNode->timeout.nonComputeTime = timeInNs + (nonComputeTimeoutMsecs * 1000 * 1000); + pThreadNode->timeout.computeTime = timeInNs + (computeTimeoutMsecs * 1000 * 1000); + } + else if (threadStateDatabase.timeout.flags & GPU_TIMEOUT_FLAGS_OSDELAY) + { + // Convert from msecs (1,000) to usecs (1,000,000) + pThreadNode->timeout.nonComputeTime = nonComputeTimeoutMsecs * 1000; + pThreadNode->timeout.computeTime = computeTimeoutMsecs * 1000; + } + else + { + NV_PRINTF(LEVEL_INFO, + "Bad threadStateDatabase.timeout.flags: 0x%x!\n", + threadStateDatabase.timeout.flags); + + rmStatus = NV_ERR_INVALID_STATE; + } + + return rmStatus; +} + +static void _getTimeoutDataFromGpuMode( + OBJGPU *pGpu, + THREAD_STATE_NODE *pThreadNode, + NvU64 **ppThreadNodeTime, + NvU64 *pThreadStateDatabaseTimeoutMsecs) +{ + if (pGpu) + { + if (threadStateDatabase.timeout.computeGpuMask & NVBIT(pGpu->gpuInstance)) + { + *ppThreadNodeTime = &pThreadNode->timeout.computeTime; + } + else + { + *ppThreadNodeTime = &pThreadNode->timeout.nonComputeTime; + } + + *pThreadStateDatabaseTimeoutMsecs = + NV_MAX(threadStateDatabase.timeout.computeTimeoutMsecs, threadStateDatabase.timeout.nonComputeTimeoutMsecs); + } +} + +// +// The logic in _threadNodeCheckTimeout() should closely resemble +// that of _gpuCheckTimeout(). +// +static NV_STATUS _threadNodeCheckTimeout(OBJGPU *pGpu, THREAD_STATE_NODE *pThreadNode, NvU64 *pElapsedTimeUs) +{ + NV_STATUS rmStatus = NV_OK; + NvU64 threadStateDatabaseTimeoutMsecs = 0; + NvU64 *pThreadNodeTime = NULL; + NvU64 timeInNs; + + if (pGpu) + { + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + NV_PRINTF(LEVEL_ERROR, "API_GPU_ATTACHED_SANITY_CHECK failed!\n"); + return NV_ERR_TIMEOUT; + } + } + + _getTimeoutDataFromGpuMode(pGpu, pThreadNode, &pThreadNodeTime, + &threadStateDatabaseTimeoutMsecs); + if ((threadStateDatabaseTimeoutMsecs == 0) || + (pThreadNodeTime == NULL)) + { + NV_PRINTF(LEVEL_ERROR, + "threadStateDatabaseTimeoutMsecs or pThreadNodeTime was NULL!\n"); + return NV_ERR_INVALID_STATE; + } + + timeInNs = osGetMonotonicTimeNs(); + if (pElapsedTimeUs) + { + *pElapsedTimeUs = (timeInNs - pThreadNode->timeout.enterTime) / 1000; + } + + if (threadStateDatabase.timeout.flags & GPU_TIMEOUT_FLAGS_OSTIMER) + { + if (timeInNs >= *pThreadNodeTime) + { + NV_PRINTF(LEVEL_ERROR, + "_threadNodeCheckTimeout: currentTime: %llx >= %llx\n", + timeInNs, *pThreadNodeTime); + + rmStatus = NV_ERR_TIMEOUT; + } + } + else if (threadStateDatabase.timeout.flags & GPU_TIMEOUT_FLAGS_OSDELAY) + { + osDelayUs(100); + *pThreadNodeTime -= NV_MIN(100, *pThreadNodeTime); + if (*pThreadNodeTime == 0) + { + rmStatus = NV_ERR_TIMEOUT; + } + } + else + { + NV_PRINTF(LEVEL_INFO, + "_threadNodeCheckTimeout: Unsupported timeout.flags: 0x%x!\n", + threadStateDatabase.timeout.flags); + + rmStatus = NV_ERR_INVALID_STATE; + } + + if (rmStatus == NV_ERR_TIMEOUT) + { + // Report the time this Thread entered the RM + _threadStatePrintInfo(pThreadNode); + + // This is set via osGetTimeoutParams per platform + NV_PRINTF(LEVEL_ERROR, + "_threadNodeCheckTimeout: Timeout was set to: %lld msecs!\n", + threadStateDatabaseTimeoutMsecs); + + if (threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ASSERT_ON_TIMEOUT_ENABLED) + { + NV_ASSERT(0); + } + + if (threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_RESET_ON_TIMEOUT_ENABLED) + { + threadStateResetTimeout(pGpu); + } + } + + return rmStatus; +} + +static void _threadStateFreeInvokeCallbacks +( + THREAD_STATE_NODE *pThreadNode +) +{ + THREAD_STATE_FREE_CALLBACK *pCbListNode; + + NV_ASSERT_OR_RETURN_VOID(pThreadNode->flags & + THREAD_STATE_FLAGS_STATE_FREE_CB_ENABLED); + + // Start from head to maintain FIFO semantics. + while ((pCbListNode = listHead(&pThreadNode->cbList)) != NULL) + { + (*pCbListNode->pCb)(pCbListNode->pCbData); + listRemove(&pThreadNode->cbList, pCbListNode); + } +} + +static void _threadStateLogInitCaller(THREAD_STATE_NODE *pThreadNode, NvU64 funcAddr) +{ + threadStateDatabase.traceInfo.entries[threadStateDatabase.traceInfo.index].callerRA = funcAddr; + threadStateDatabase.traceInfo.entries[threadStateDatabase.traceInfo.index].flags = pThreadNode->flags; + threadStateDatabase.traceInfo.index = + (threadStateDatabase.traceInfo.index + 1) % THREAD_STATE_TRACE_MAX_ENTRIES; +} + +/** + * @brief Initialize a threadState for regular threads (non-interrupt context) + * + * @param[in/out] pThreadNode + * @param[in] flags + * + */ +void threadStateInit(THREAD_STATE_NODE *pThreadNode, NvU32 flags) +{ + NV_STATUS rmStatus; + NvU64 funcAddr; + + // Isrs should be using threadStateIsrInit(). + NV_ASSERT((flags & (THREAD_STATE_FLAGS_IS_ISR_LOCKLESS | + THREAD_STATE_FLAGS_IS_ISR | + THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING)) == 0); + + // Check to see if ThreadState is enabled + if (!(threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ENABLED)) + return; + + portMemSet(pThreadNode, 0, sizeof(*pThreadNode)); + pThreadNode->threadSeqId = portAtomicIncrementU32(&threadStateDatabase.threadSeqCntr); + pThreadNode->cpuNum = osGetCurrentProcessorNumber(); + pThreadNode->flags = flags; + + // + // The thread state free callbacks are only supported in the non-ISR paths + // as they invoke memory allocation routines. + // + listInit(&pThreadNode->cbList, portMemAllocatorGetGlobalNonPaged()); + pThreadNode->flags |= THREAD_STATE_FLAGS_STATE_FREE_CB_ENABLED; + + rmStatus = _threadNodeInitTime(pThreadNode); + if (rmStatus == NV_OK) + pThreadNode->flags |= THREAD_STATE_FLAGS_TIMEOUT_INITED; + + rmStatus = osGetCurrentThread(&pThreadNode->threadId); + if (rmStatus != NV_OK) + return; + + NV_ASSERT_OR_RETURN_VOID(pThreadNode->cpuNum < threadStateDatabase.maxCPUs); + + funcAddr = (NvU64) (NV_RETURN_ADDRESS()); + + portSyncSpinlockAcquire(threadStateDatabase.spinlock); + if (!mapInsertExisting(&threadStateDatabase.dbRoot, (NvU64)pThreadNode->threadId, pThreadNode)) + { + // Reset the threadId as insertion failed. bValid is already NV_FALSE + pThreadNode->threadId = 0; + portSyncSpinlockRelease(threadStateDatabase.spinlock); + return; + } + else + { + pThreadNode->bValid = NV_TRUE; + rmStatus = NV_OK; + } + + _threadStateLogInitCaller(pThreadNode, funcAddr); + + portSyncSpinlockRelease(threadStateDatabase.spinlock); + + _threadStatePrintInfo(pThreadNode); + + NV_ASSERT(rmStatus == NV_OK); + threadPriorityStateAlloc(); + + if (TLS_MIRROR_THREADSTATE) + { + THREAD_STATE_NODE **pTls = (THREAD_STATE_NODE **)tlsEntryAcquire(TLS_ENTRY_ID_THREADSTATE); + NV_ASSERT_OR_RETURN_VOID(pTls != NULL); + if (*pTls != NULL) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: Nested threadState inits detected. Previous threadState node is %p, new is %p\n", + *pTls, pThreadNode); + } + *pTls = pThreadNode; + } +} + +/** + * @brief Initialize a threadState for locked ISR and Bottom-half + * + * @param[in/out] pThreadNode + * @param[in] pGpu + * @param[in] flags THREAD_STATE_FLAGS_IS_ISR or THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING + * + */ +void threadStateInitISRAndDeferredIntHandler +( + THREAD_STATE_NODE *pThreadNode, + OBJGPU *pGpu, + NvU32 flags +) +{ + NV_STATUS rmStatus; + + NV_ASSERT(pGpu); + + // should be using threadStateIsrInit(). + NV_ASSERT(flags & (THREAD_STATE_FLAGS_IS_ISR | THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING)); + + portMemSet(pThreadNode, 0, sizeof(*pThreadNode)); + pThreadNode->threadSeqId = portAtomicIncrementU32(&threadStateDatabase.threadSeqCntr); + pThreadNode->cpuNum = osGetCurrentProcessorNumber(); + pThreadNode->flags = flags; + + rmStatus = _threadNodeInitTime(pThreadNode); + + if (rmStatus == NV_OK) + pThreadNode->flags |= THREAD_STATE_FLAGS_TIMEOUT_INITED; + + if (TLS_MIRROR_THREADSTATE) + { + THREAD_STATE_NODE **pTls = (THREAD_STATE_NODE **)tlsEntryAcquire(TLS_ENTRY_ID_THREADSTATE); + NV_ASSERT_OR_GOTO(pTls != NULL, TlsMirror_Exit); + if (*pTls != NULL) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: Nested threadState inits detected. Previous threadState node is %p, new is %p\n", + *pTls, pThreadNode); + } + *pTls = pThreadNode; + } +TlsMirror_Exit: + + rmStatus = osGetCurrentThread(&pThreadNode->threadId); + if (rmStatus != NV_OK) + return; + + portSyncSpinlockAcquire(threadStateDatabase.spinlock); + threadStateDatabase.ppISRDeferredIntHandlerThreadNode[pGpu->gpuInstance] = pThreadNode; + portSyncSpinlockRelease(threadStateDatabase.spinlock); +} + +/** + * @brief Initialize a threadState for lockless ISR + * + * @param[in/out] pThreadNode + * @param[in] pGpu + * @param[in] flags THREAD_STATE_FLAGS_IS_ISR_LOCKLESS + * + */ +void threadStateInitISRLockless(THREAD_STATE_NODE *pThreadNode, OBJGPU *pGpu, NvU32 flags) +{ + NV_STATUS rmStatus; + PTHREAD_STATE_ISR_LOCKLESS pThreadStateIsrLockless; + + NV_ASSERT(flags & THREAD_STATE_FLAGS_IS_ISR_LOCKLESS); + + // Check to see if ThreadState is enabled + if (!(threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ENABLED)) + return; + + portMemSet(pThreadNode, 0, sizeof(*pThreadNode)); + pThreadNode->threadSeqId = portAtomicIncrementU32(&threadStateDatabase.threadSeqCntr); + pThreadNode->cpuNum = osGetCurrentProcessorNumber(); + pThreadNode->flags = flags; + + rmStatus = _threadNodeInitTime(pThreadNode); + if (rmStatus == NV_OK) + pThreadNode->flags |= THREAD_STATE_FLAGS_TIMEOUT_INITED; + + if (TLS_MIRROR_THREADSTATE) + { + THREAD_STATE_NODE **pTls = (THREAD_STATE_NODE **)tlsEntryAcquire(TLS_ENTRY_ID_THREADSTATE); + NV_ASSERT_OR_GOTO(pTls != NULL, TlsMirror_Exit); + if (*pTls != NULL) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: Nested threadState inits detected. Previous threadState node is %p, new is %p\n", + *pTls, pThreadNode); + } + *pTls = pThreadNode; + } +TlsMirror_Exit: + + rmStatus = osGetCurrentThread(&pThreadNode->threadId); + if (rmStatus != NV_OK) + return; + + NV_ASSERT_OR_RETURN_VOID(pThreadNode->cpuNum < threadStateDatabase.maxCPUs); + + portSyncSpinlockAcquire(threadStateDatabase.spinlock); + + // + // We use a cpu/gpu indexed structure to store the threadNode pointer + // instead of a tree indexed by threadId because threadId is no longer + // unique in an isr. We also need to index by both cpu num and gpu instance + // because isrs can prempt one another, and run on the same processor + // at the same time. + // + pThreadStateIsrLockless = &threadStateDatabase.pIsrlocklessThreadNode[pThreadNode->cpuNum]; + NV_ASSERT(pThreadStateIsrLockless->ppIsrThreadStateGpu[pGpu->gpuInstance] == NULL); + pThreadStateIsrLockless->ppIsrThreadStateGpu[pGpu->gpuInstance] = pThreadNode; + portSyncSpinlockRelease(threadStateDatabase.spinlock); +} + +void threadStateOnlyProcessWorkISRAndDeferredIntHandler +( + THREAD_STATE_NODE *pThreadNode, + OBJGPU *pGpu, + NvU32 flags +) +{ + NV_ASSERT_OR_RETURN_VOID(pGpu && + (flags & (THREAD_STATE_FLAGS_IS_ISR | THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING))); + + if (!(threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ENABLED)) + return; + + // Process any work needed before exiting. + _threadStateFreeProcessWork(pThreadNode); +} + +void threadStateOnlyFreeISRAndDeferredIntHandler +( + THREAD_STATE_NODE *pThreadNode, + OBJGPU *pGpu, + NvU32 flags +) +{ + NV_STATUS rmStatus; + + NV_ASSERT_OR_RETURN_VOID(pGpu && + (flags & (THREAD_STATE_FLAGS_IS_ISR | THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING))); + + if (!(threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ENABLED)) + return; + + if (threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_CHECK_TIMEOUT_AT_FREE_ENABLED) + { + rmStatus = _threadNodeCheckTimeout(NULL /*pGpu*/, pThreadNode, NULL /*pElapsedTimeUs*/); + NV_ASSERT(rmStatus == NV_OK); + } + + portSyncSpinlockAcquire(threadStateDatabase.spinlock); + threadStateDatabase.ppISRDeferredIntHandlerThreadNode[pGpu->gpuInstance] = NULL; + portSyncSpinlockRelease(threadStateDatabase.spinlock); + + if (TLS_MIRROR_THREADSTATE) + { + NvU32 r; + THREAD_STATE_NODE *pTlsNode = NvP64_VALUE(tlsEntryGet(TLS_ENTRY_ID_THREADSTATE)); + NV_ASSERT(pTlsNode); + if (pTlsNode != pThreadNode) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: TLS / threadState mismatch: pTlsNode=%p, pThreadNode=%p\n", + pTlsNode, pThreadNode); + } + r = tlsEntryRelease(TLS_ENTRY_ID_THREADSTATE); + if (r != 0) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: tlsEntryRelease returned %d (this is likely due to nested threadStateInit() calls)\n", + r); + } + } +} + +/** + * @brief Free the thread state for locked ISR and bottom-half + * + * @param[in/out] pThreadNode + * @param[in] pGpu + * @param[in] flags THREAD_STATE_FLAGS_IS_ISR or THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING + * + */ +void threadStateFreeISRAndDeferredIntHandler +( + THREAD_STATE_NODE *pThreadNode, + OBJGPU *pGpu, + NvU32 flags +) +{ + threadStateOnlyProcessWorkISRAndDeferredIntHandler(pThreadNode, pGpu, flags); + + threadStateOnlyFreeISRAndDeferredIntHandler(pThreadNode, pGpu, flags); +} + +/** + * @brief Free the thread state for a regular thread + * + * @param[in/out] pThreadNode + * @param[in] flags + * + */ +void threadStateFree(THREAD_STATE_NODE *pThreadNode, NvU32 flags) +{ + NV_STATUS rmStatus; + THREAD_STATE_NODE *pNode; + ThreadStateNodeMap *pMap; + + NV_ASSERT((flags & (THREAD_STATE_FLAGS_IS_ISR_LOCKLESS | + THREAD_STATE_FLAGS_IS_ISR | + THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING)) == 0); + + // Check to see if ThreadState is enabled + if (!(threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ENABLED)) + return; + + if (!(flags & THREAD_STATE_FLAGS_EXCLUSIVE_RUNNING)) + { + // + // Do not do this for exclusive running threads as all the info + // is not filled in. + // + if (!pThreadNode->bValid && pThreadNode->threadId == 0) + return; + } + + _threadStateFreeInvokeCallbacks(pThreadNode); + + listDestroy(&pThreadNode->cbList); + + // Process any work needed before exiting. + _threadStateFreeProcessWork(pThreadNode); + + if (threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_CHECK_TIMEOUT_AT_FREE_ENABLED) + { + rmStatus = _threadNodeCheckTimeout(NULL /*pGpu*/, pThreadNode, NULL /*pElapsedTimeUs*/); + NV_ASSERT(rmStatus == NV_OK); + } + + portSyncSpinlockAcquire(threadStateDatabase.spinlock); + pMap = &threadStateDatabase.dbRoot; + + pNode = mapFind(pMap, (NvU64)pThreadNode->threadId); + + if (pNode != NULL) + { + mapRemove(pMap, pThreadNode); + pThreadNode->bValid = NV_FALSE; + rmStatus = NV_OK; + } + else + { + rmStatus = NV_ERR_OBJECT_NOT_FOUND; + } + + portSyncSpinlockRelease(threadStateDatabase.spinlock); + + _threadStatePrintInfo(pThreadNode); + + NV_ASSERT(rmStatus == NV_OK); + + threadPriorityStateFree(); + + if (TLS_MIRROR_THREADSTATE) + { + NvU32 r; + THREAD_STATE_NODE *pTlsNode = NvP64_VALUE(tlsEntryGet(TLS_ENTRY_ID_THREADSTATE)); + NV_ASSERT(pTlsNode); + if (pTlsNode != pThreadNode) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: TLS / threadState mismatch: pTlsNode=%p, pThreadNode=%p\n", + pTlsNode, pThreadNode); + } + r = tlsEntryRelease(TLS_ENTRY_ID_THREADSTATE); + if (r != 0) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: tlsEntryRelease returned %d (this is likely due to nested threadStateInit() calls)\n", + r); + } + } +} + +/** + * @brief Free thread state for lockless ISR + * + * @param[in/out] pThreadNode + * @param[in] pGpu + * @param[in] flags + * + */ +void threadStateFreeISRLockless(THREAD_STATE_NODE *pThreadNode, OBJGPU *pGpu, NvU32 flags) +{ + NV_STATUS rmStatus = NV_OK; + PTHREAD_STATE_ISR_LOCKLESS pThreadStateIsrlockless; + + NV_ASSERT(flags & (THREAD_STATE_FLAGS_IS_ISR_LOCKLESS | THREAD_STATE_FLAGS_IS_ISR)); + NV_ASSERT(pThreadNode->cpuNum == osGetCurrentProcessorNumber()); + + // Check to see if ThreadState is enabled + if (!(threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ENABLED)) + return; + + // Process any work needed before exiting. + _threadStateFreeProcessWork(pThreadNode); + + if (threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_CHECK_TIMEOUT_AT_FREE_ENABLED) + { + rmStatus = _threadNodeCheckTimeout(NULL /*pGpu*/, pThreadNode, NULL /*pElapsedTimeUs*/); + NV_ASSERT(rmStatus == NV_OK); + } + + portSyncSpinlockAcquire(threadStateDatabase.spinlock); + pThreadStateIsrlockless = &threadStateDatabase.pIsrlocklessThreadNode[pThreadNode->cpuNum]; + NV_ASSERT(pThreadStateIsrlockless->ppIsrThreadStateGpu[pGpu->gpuInstance] != NULL); + pThreadStateIsrlockless->ppIsrThreadStateGpu[pGpu->gpuInstance] = NULL; + portSyncSpinlockRelease(threadStateDatabase.spinlock); + + if (TLS_MIRROR_THREADSTATE) + { + NvU32 r; + THREAD_STATE_NODE *pTlsNode = NvP64_VALUE(tlsEntryGet(TLS_ENTRY_ID_THREADSTATE)); + NV_ASSERT(pTlsNode); + if (pTlsNode != pThreadNode) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: TLS / threadState mismatch: pTlsNode=%p, pThreadNode=%p\n", + pTlsNode, pThreadNode); + } + r = tlsEntryRelease(TLS_ENTRY_ID_THREADSTATE); + if (r != 0) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: tlsEntryRelease returned %d (this is likely due to nested threadStateInit() calls)\n", + r); + } + } +} + +/** + * @brief Get the thread state with given + * + * @param[in] threadId + * @param[in] pGpu + * @param[out] ppThreadNode + * + * @return NV_OK if we are able to locate the thread state with , + * NV_ERR_OBJECT_NOT_FOUND if we can't find inside map + * NV_ERR_INVALID_STATE if the thread state is not enabled or the CPU has + * been hotpluged. + */ +static NV_STATUS _threadStateGet +( + OS_THREAD_HANDLE threadId, + OBJGPU *pGpu, + THREAD_STATE_NODE **ppThreadNode +) +{ + THREAD_STATE_NODE *pNode; + + // Check to see if ThreadState is enabled + if ((threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ENABLED) == NV_FALSE) + { + *ppThreadNode = NULL; + return NV_ERR_INVALID_STATE; + } + else + { + NvU32 cpuNum = osGetCurrentProcessorNumber(); + THREAD_STATE_NODE *pIsrlocklessThreadNode; + THREAD_STATE_NODE *pISRDeferredIntHandlerNode; + + if (cpuNum >= threadStateDatabase.maxCPUs) + { + NV_ASSERT(0); + *ppThreadNode = NULL; + return NV_ERR_INVALID_STATE; + } + + // + // Several threadState call sites will not pass a pGpu b/c it is not + // easily available, and they are not running in interrupt context. + // _threadStateGet() only needs to pGpu for getting the thread node + // when called for an isr, so that site has assumed it will never + // be in interrupt context. + // + if (pGpu) + { + // Check to see if the this is an lockless ISR running thread. + pIsrlocklessThreadNode = threadStateDatabase.pIsrlocklessThreadNode[cpuNum].ppIsrThreadStateGpu[pGpu->gpuInstance]; + if (pIsrlocklessThreadNode && (pIsrlocklessThreadNode->threadId == threadId)) + { + *ppThreadNode = pIsrlocklessThreadNode; + return NV_OK; + } + + // Check to see if the this is an ISR or bottom-half thread + pISRDeferredIntHandlerNode = threadStateDatabase.ppISRDeferredIntHandlerThreadNode[pGpu->gpuInstance]; + if (pISRDeferredIntHandlerNode && (pISRDeferredIntHandlerNode->threadId == threadId)) + { + *ppThreadNode = pISRDeferredIntHandlerNode; + return NV_OK; + } + } + } + + portSyncSpinlockAcquire(threadStateDatabase.spinlock); + pNode = mapFind(&threadStateDatabase.dbRoot, (NvU64) threadId); + portSyncSpinlockRelease(threadStateDatabase.spinlock); + + *ppThreadNode = pNode; + if (pNode != NULL) + { + NV_ASSERT((*ppThreadNode)->threadId == threadId); + return NV_OK; + } + else + { + return NV_ERR_OBJECT_NOT_FOUND; + } +} + +NV_STATUS threadStateGetCurrentUnchecked(THREAD_STATE_NODE **ppThreadNode, OBJGPU *pGpu) +{ + NV_STATUS rmStatus; + OS_THREAD_HANDLE threadId; + + // Check to see if ThreadState is enabled + if ((threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ENABLED) == NV_FALSE) + { + *ppThreadNode = NULL; + return NV_ERR_INVALID_STATE; + } + + rmStatus = osGetCurrentThread(&threadId); + if (rmStatus == NV_OK) + { + rmStatus = _threadStateGet(threadId, pGpu, ppThreadNode); + } + + // Assert if the current lookup failed - Please add the stack from this assert to bug 690089. + if (threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ASSERT_ON_FAILED_LOOKUP_ENABLED) + { + NV_PRINTF(LEVEL_ERROR, + "threadState[Init,Free] call may be missing from this RM entry point!\n"); + NV_ASSERT(rmStatus == NV_OK); + } + + return rmStatus; +} + +NV_STATUS threadStateGetCurrent(THREAD_STATE_NODE **ppThreadNode, OBJGPU *pGpu) +{ + NV_STATUS status = threadStateGetCurrentUnchecked(ppThreadNode, pGpu); + + if (TLS_MIRROR_THREADSTATE) + { + THREAD_STATE_NODE *pTlsNode = NvP64_VALUE(tlsEntryGet(TLS_ENTRY_ID_THREADSTATE)); + + if ((status == NV_OK) && (pTlsNode != *ppThreadNode)) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: TLS / threadState mismatch: pTlsNode=%p, *ppThreadNode=%p; ThreadID = %llx (NvPort:%llx), sp=%p\n", + pTlsNode, *ppThreadNode, + (NvU64)(*ppThreadNode)->threadId, + portThreadGetCurrentThreadId(), &status); + + } + else if ((status != NV_OK) && (pTlsNode != NULL)) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: TLS / threadState mismatch: ThreadNode not found (status=0x%08x), but found in TLS:%p (tid=%llx;sp=%p)\n", + status, pTlsNode, + portThreadGetCurrentThreadId(), &status); + } + } + + return status; +} + +// +// Sets the timeout value and method of timeout +// +NV_STATUS threadStateInitTimeout(OBJGPU *pGpu, NvU32 timeoutUs, NvU32 flags) +{ + NvU32 timeoutMsecs = (timeoutUs / 1000); + NvU32 gpuMode = gpuGetMode(pGpu); + NvU32 scaleIgnored = 0; + NvU32 flagsIgnored = 0; + NvU32 perOSTimeoutUs = 999; // What we'll see if osGetTimeoutParams ever fails + + if (gpuMode == NV_GPU_MODE_GRAPHICS_MODE) + { + threadStateDatabase.timeout.nonComputeTimeoutMsecs = timeoutMsecs; + threadStateDatabase.timeout.computeGpuMask &= ~NVBIT(pGpu->gpuInstance); + } + else + { + threadStateDatabase.timeout.computeGpuMask |= NVBIT(pGpu->gpuInstance); + } + // + // Initializing the compute timeout limits in all cases, but use + // per-OS values: + // + osGetTimeoutParams(pGpu, &perOSTimeoutUs, &scaleIgnored, &flagsIgnored); + timeoutMsecs = (perOSTimeoutUs / 1000); + timeoutMsecs = gpuScaleTimeout(pGpu, timeoutMsecs); + + threadStateDatabase.timeout.computeTimeoutMsecs = timeoutMsecs; + threadStateDatabase.timeout.flags = flags; + + return NV_OK; +} + +// +// Resets the current threadId time +// +NV_STATUS threadStateResetTimeout(OBJGPU *pGpu) +{ + NV_STATUS rmStatus; + THREAD_STATE_NODE *pThreadNode = NULL; + + // Check to see if ThreadState Timeout is enabled + if ((threadStateDatabase.setupFlags & + THREAD_STATE_SETUP_FLAGS_TIMEOUT_ENABLED) == NV_FALSE) + { + return NV_ERR_INVALID_STATE; + } + + rmStatus = threadStateGetCurrent(&pThreadNode, pGpu); + if ((rmStatus == NV_OK) && pThreadNode ) + { + // Reset the timeout + rmStatus = _threadNodeInitTime(pThreadNode); + if (rmStatus == NV_OK) + { + pThreadNode->flags |= THREAD_STATE_FLAGS_TIMEOUT_INITED; + _threadStatePrintInfo(pThreadNode); + } + } + + return rmStatus; +} + +void threadStateLogTimeout(OBJGPU *pGpu, NvU64 funcAddr, NvU32 lineNum) +{ + + // If this is release and we have RmBreakOnRC on -- Stop +#ifndef DEBUG + OBJSYS *pSys = SYS_GET_INSTANCE(); + if (DRF_VAL(_DEBUG, _BREAK_FLAGS, _GPU_TIMEOUT, pSys->debugFlags) == + NV_DEBUG_BREAK_FLAGS_GPU_TIMEOUT_ENABLE) + { + DBG_BREAKPOINT(); + } +#endif +} + +// +// Checks the current threadId time against a set timeout period +// +NV_STATUS threadStateCheckTimeout(OBJGPU *pGpu, NvU64 *pElapsedTimeUs) +{ + NV_STATUS rmStatus; + THREAD_STATE_NODE *pThreadNode = NULL; + + if (pElapsedTimeUs) + *pElapsedTimeUs = 0; + + // + // Make sure the DB has been initialized, we have a valid threadId, + // and that the Timeout logic is enabled + // + if ((threadStateDatabase.setupFlags & + THREAD_STATE_SETUP_FLAGS_TIMEOUT_ENABLED) == NV_FALSE) + { + return NV_ERR_INVALID_STATE; + } + if (threadStateDatabase.timeout.flags == 0) + { + return NV_ERR_INVALID_STATE; + } + + rmStatus = threadStateGetCurrent(&pThreadNode, pGpu); + if ((rmStatus == NV_OK) && pThreadNode ) + { + if (pThreadNode->flags & THREAD_STATE_FLAGS_TIMEOUT_INITED) + { + rmStatus = _threadNodeCheckTimeout(pGpu, pThreadNode, pElapsedTimeUs); + } + else + { + rmStatus = NV_ERR_INVALID_STATE; + } + } + + return rmStatus; +} + +static void _threadStateSetTimeoutOverride(THREAD_STATE_NODE *pThreadNode, NvU64 newTimeoutMs) +{ + NvU64 timeInNs = osGetMonotonicTimeNs(); + + _threadStateSetNextCpuYieldTime(pThreadNode); + + if (threadStateDatabase.timeout.flags & GPU_TIMEOUT_FLAGS_OSTIMER) + { + pThreadNode->timeout.nonComputeTime = timeInNs + (newTimeoutMs * 1000 * 1000); + pThreadNode->timeout.computeTime = timeInNs + (newTimeoutMs * 1000 * 1000); + } + else if (threadStateDatabase.timeout.flags & GPU_TIMEOUT_FLAGS_OSDELAY) + { + // Convert from msecs (1,000) to usecs (1,000,000) + pThreadNode->timeout.nonComputeTime = newTimeoutMs * 1000; + pThreadNode->timeout.computeTime = newTimeoutMs * 1000; + } +} + +// +// Set override timeout value for specified thread +// +void threadStateSetTimeoutOverride(THREAD_STATE_NODE *pThreadNode, NvU64 newTimeoutMs) +{ + pThreadNode->timeout.overrideTimeoutMsecs = newTimeoutMs; + _threadStateSetTimeoutOverride(pThreadNode, newTimeoutMs); +} + +// +// One-time override timeout for specified thread; does not apply across timeout resets +// +void threadStateSetTimeoutSingleOverride(THREAD_STATE_NODE *pThreadNode, NvU64 newTimeoutMs) +{ + // Does not cache override in overrideTimeoutMsecs, so it is not re-applied upon reset. + _threadStateSetTimeoutOverride(pThreadNode, newTimeoutMs); +} + +NV_STATUS threadStateEnqueueCallbackOnFree +( + THREAD_STATE_NODE *pThreadNode, + THREAD_STATE_FREE_CALLBACK *pCallback +) +{ + THREAD_STATE_FREE_CALLBACK *pCbListNode; + + if ((pThreadNode == NULL) || (pCallback == NULL) || + (pCallback->pCb == NULL)) + return NV_ERR_INVALID_ARGUMENT; + + if (!(pThreadNode->flags & THREAD_STATE_FLAGS_STATE_FREE_CB_ENABLED)) + return NV_ERR_INVALID_OPERATION; + + // Add from tail to maintain FIFO semantics. + pCbListNode = listAppendNew(&pThreadNode->cbList); + if (pCbListNode == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + pCbListNode->pCb = pCallback->pCb; + pCbListNode->pCbData = pCallback->pCbData; + + return NV_OK; +} + +void threadStateRemoveCallbackOnFree +( + THREAD_STATE_NODE *pThreadNode, + THREAD_STATE_FREE_CALLBACK *pCallback +) +{ + THREAD_STATE_FREE_CALLBACK *pCbListNode; + + NV_ASSERT_OR_RETURN_VOID(pThreadNode->flags & + THREAD_STATE_FLAGS_STATE_FREE_CB_ENABLED); + + // + // Remove doesn't need to obey FIFO semantics. + // + // Must remove only one entry per call to be symmetric with + // threadStateEnqueueCallbackOnFree(). It is caller's responsibility to + // invoke this API repeatedly as needed. + // + for (pCbListNode = listHead(&pThreadNode->cbList); + pCbListNode != NULL; + pCbListNode = listNext(&pThreadNode->cbList, pCbListNode)) + { + if ((pCbListNode->pCb == pCallback->pCb) && + (pCbListNode->pCbData = pCallback->pCbData)) + { + listRemove(&pThreadNode->cbList, pCbListNode); + return; + } + } +} diff --git a/src/nvidia/src/kernel/diagnostics/code_coverage_mgr.c b/src/nvidia/src/kernel/diagnostics/code_coverage_mgr.c new file mode 100644 index 0000000..79500c3 --- /dev/null +++ b/src/nvidia/src/kernel/diagnostics/code_coverage_mgr.c @@ -0,0 +1,121 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "containers/list.h" +#include "diagnostics/code_coverage_mgr.h" +#include "nv_sriov_defines.h" +#include "gpu_mgr/gpu_mgr.h" + + +void +codecovmgrRegisterCoverageBuffer_IMPL(CodeCoverageManager *pCodeCovMgr, NvU32 gfid, NvU32 gpuInstance, NvU64 bufferSize) +{ + GSP_BULLSEYE_COVERAGE_DATA *pCovDataNode = listAppendNew(&pCodeCovMgr->covDataList); + NvU8 *pCoverageData = (NvU8*) portMemAllocNonPaged(bufferSize); + pCovDataNode->gfid = gfid; + pCovDataNode->gpuInstance = gpuInstance; + pCovDataNode->pCoverageData = pCoverageData; + pCovDataNode->bufferLength = 0; + portMemSet(pCovDataNode->pCoverageData, 0x00, bufferSize); +} + +void +codecovmgrDeregisterCoverageBuffer_IMPL(CodeCoverageManager *pCodeCovMgr, NvU32 gfid, NvU32 gpuInstance) +{ + GSP_BULLSEYE_COVERAGE_DATA *pNode = codecovmgrGetCoverageNode(pCodeCovMgr, gfid, gpuInstance); + if (pNode != NULL) + { + portMemFree(pNode->pCoverageData); + listRemove(&pCodeCovMgr->covDataList, pNode); + } +} + +NV_STATUS codecovmgrConstruct_IMPL(CodeCoverageManager *pCodeCovMgr) +{ + listInit(&pCodeCovMgr->covDataList, portMemAllocatorGetGlobalNonPaged()); + return NV_OK; +} + +void codecovmgrDestruct_IMPL(CodeCoverageManager *pCodeCovMgr) +{ + for (NvU32 gfid = 0; gfid <= MAX_PARTITIONS_WITH_GFID; gfid++) + { + for (NvU32 gpuInstance = 0; gpuInstance < GPUMGR_MAX_GPU_INSTANCES; gpuInstance++) + { + codecovmgrDeregisterCoverageBuffer(pCodeCovMgr, gfid, gpuInstance); + } + } +} + +GSP_BULLSEYE_COVERAGE_DATA* +codecovmgrGetCoverageNode_IMPL(CodeCoverageManager *pCodeCovMgr, NvU32 gfid, NvU32 gpuInstance) +{ + GSP_BULLSEYE_COVERAGE_DATA *pNode = listHead(&pCodeCovMgr->covDataList); + while (pNode != NULL) + { + if (pNode->gfid == gfid && pNode->gpuInstance == gpuInstance) + { + return pNode; + } + pNode = listNext(&pCodeCovMgr->covDataList, pNode); + } + /* not able to find a buffer with the given gfid and gpuInstance */ + return NULL; +} + +NvU8* +codecovmgrGetCoverageBuffer_IMPL(CodeCoverageManager *pCodeCovMgr, NvU32 gfid, NvU32 gpuInstance) +{ + GSP_BULLSEYE_COVERAGE_DATA *pNode = codecovmgrGetCoverageNode(pCodeCovMgr, gfid, gpuInstance); + return (pNode != NULL) ? pNode->pCoverageData : NULL; +} + +void +codecovmgrMergeCoverage_IMPL(CodeCoverageManager *pCodeCovMgr, NvU32 gfid, NvU32 gpuInstance, NvU8* pSysmemBuffer) +{ + if (pSysmemBuffer != NULL) + { + /* the first 8 bytes are the size of the buffer */ + pCodeCovMgr->bullseyeOutputBuffer.length = *((NvU64*) pSysmemBuffer); + pCodeCovMgr->bullseyeOutputBuffer.dataBuffer = pSysmemBuffer+sizeof(NvU64); + GSP_BULLSEYE_COVERAGE_DATA *pNode = codecovmgrGetCoverageNode(pCodeCovMgr, gfid, gpuInstance); + if (pNode != NULL) + { + for (NvU64 i = 0; i < pCodeCovMgr->bullseyeOutputBuffer.length; i++) + { + pNode->pCoverageData[i] |= pCodeCovMgr->bullseyeOutputBuffer.dataBuffer[i]; + } + pNode->bufferLength = pCodeCovMgr->bullseyeOutputBuffer.length; + } + } +} + +void +codecovmgrResetCoverage_IMPL(CodeCoverageManager *pCodeCovMgr, NvU32 gfid, NvU32 gpuInstance) +{ + GSP_BULLSEYE_COVERAGE_DATA *pNode = codecovmgrGetCoverageNode(pCodeCovMgr, gfid, gpuInstance); + if (pNode != NULL) + { + portMemSet(pNode->pCoverageData, 0x00, pNode->bufferLength); + } +} diff --git a/src/nvidia/src/kernel/diagnostics/nvlog.c b/src/nvidia/src/kernel/diagnostics/nvlog.c new file mode 100644 index 0000000..edb1c6e --- /dev/null +++ b/src/nvidia/src/kernel/diagnostics/nvlog.c @@ -0,0 +1,830 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlimits.h" +#include "nvlog/nvlog.h" +#include "nvrm_registry.h" +#include "os/os.h" +#include "diagnostics/tracer.h" +#include "tls/tls.h" +#include "core/locks.h" + +// +// Buffer push method declarations +// +NvBool nvlogRingBufferPush (NVLOG_BUFFER *pBuffer, NvU8 *pData, NvU32 dataSize); +NvBool nvlogNowrapBufferPush(NVLOG_BUFFER *pBuffer, NvU8 *pData, NvU32 dataSize); +NvBool nvlogStringBufferPush(NVLOG_BUFFER *unused, NvU8 *pData, NvU32 dataSize); +NvBool nvlogKernelLogPush(NVLOG_BUFFER *unused, NvU8 *pData, NvU32 dataSize); + +static void _printBase64(NvU8 *pData, NvU32 dataSize); +static NV_STATUS _allocateNvlogBuffer(NvU32 size, NvU32 flags, NvU32 tag, + NVLOG_BUFFER **ppBuffer); +static void _deallocateNvlogBuffer(NVLOG_BUFFER *pBuffer); + +volatile NvU32 nvlogInitCount; +static void *nvlogRegRoot; + +// Zero (null) buffer definition. +static NVLOG_BUFFER _nvlogZeroBuffer = +{ + {nvlogStringBufferPush}, + 0, + NvU32_BUILD('l','l','u','n'), + 0, + 0, + 0 +}; + +NVLOG_LOGGER NvLogLogger = +{ + NVLOG_LOGGER_VERSION, + + // Default buffers + { + // The 0th buffer just prints to the screen in debug builds. + &_nvlogZeroBuffer + }, + + // Next available slot + 1, + + // Free slots + NVLOG_MAX_BUFFERS-1, + + // Main lock, must be allocated at runtime. + NULL +}; + +#define NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer) \ + ((hBuffer < NVLOG_MAX_BUFFERS) && (NvLogLogger.pBuffers[hBuffer] != NULL)) + +typedef struct +{ + void (*pCb)(void *); + void *pData; +} NvlogFlushCb; + +#define NVLOG_MAX_FLUSH_CBS 32 + +// At least one callback for each OBJGPU's KernelGsp +ct_assert(NVLOG_MAX_FLUSH_CBS >= NV_MAX_DEVICES); + +static NvlogFlushCb nvlogFlushCbs[NVLOG_MAX_FLUSH_CBS]; + +NV_STATUS +nvlogInit(void *pData) +{ + NV_STATUS status = NV_OK; + + nvlogRegRoot = pData; + portInitialize(); + NvLogLogger.mainLock = portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged()); + if (NvLogLogger.mainLock == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + NvLogLogger.buffersLock = portSyncMutexCreate(portMemAllocatorGetGlobalNonPaged()); + if (NvLogLogger.buffersLock == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + NvLogLogger.flushCbsLock = portSyncRwLockCreate(portMemAllocatorGetGlobalNonPaged()); + if (NvLogLogger.flushCbsLock == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + tlsInitialize(); + + portMemSet(nvlogFlushCbs, '\0', sizeof(nvlogFlushCbs)); + return status; +} + +void nvlogUpdate(void) { +} + +NV_STATUS +nvlogDestroy(void) +{ + NV_STATUS status = NV_OK; + NvU32 i; + + for (i = 0; i < NVLOG_MAX_BUFFERS; i++) + { + nvlogDeallocBuffer(i, NV_TRUE); + } + + if (NvLogLogger.mainLock != NULL) + { + portSyncSpinlockDestroy(NvLogLogger.mainLock); + NvLogLogger.mainLock = NULL; + } + if (NvLogLogger.buffersLock != NULL) + { + portSyncMutexDestroy(NvLogLogger.buffersLock); + NvLogLogger.buffersLock = NULL; + } + if (NvLogLogger.flushCbsLock != NULL) + { + portSyncRwLockDestroy(NvLogLogger.flushCbsLock); + NvLogLogger.flushCbsLock = NULL; + } + + tlsShutdown(); + /// @todo Destructor should return void. + portShutdown(); + + return status; +} + +static NV_STATUS +_allocateNvlogBuffer +( + NvU32 size, + NvU32 flags, + NvU32 tag, + NVLOG_BUFFER **ppBuffer +) +{ + NVLOG_BUFFER *pBuffer; + NVLOG_BUFFER_PUSHFUNC pushfunc; + + // Sanity check on some invalid combos: + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _EXPANDABLE, _YES, flags)) + { + // Only nonwrapping buffers can be expanded + if (!FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _TYPE, _NOWRAP, flags)) + return NV_ERR_INVALID_ARGUMENT; + // Full locking required to expand the buffer. + if (!FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _LOCKING, _FULL, flags)) + return NV_ERR_INVALID_ARGUMENT; + } + + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _TYPE, _SYSTEMLOG, flags)) + { + // System log does not need to allocate memory for buffer. + pushfunc = (NVLOG_BUFFER_PUSHFUNC) nvlogKernelLogPush; + size = 0; + } + else + { + NV_ASSERT_OR_RETURN(size > 0, NV_ERR_INVALID_ARGUMENT); + + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _TYPE, _RING, flags)) + { + pushfunc = (NVLOG_BUFFER_PUSHFUNC) nvlogRingBufferPush; + } + else if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _TYPE, _NOWRAP, flags)) + { + pushfunc = (NVLOG_BUFFER_PUSHFUNC) nvlogNowrapBufferPush; + } + else + { + return NV_ERR_INVALID_ARGUMENT; + } + } + + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _NONPAGED, _YES, flags)) + pBuffer = portMemAllocNonPaged(sizeof(*pBuffer) + size); + else + pBuffer = portMemAllocPaged(sizeof(*pBuffer) + size); + + if (!pBuffer) + return NV_ERR_NO_MEMORY; + + portMemSet(pBuffer, 0, sizeof(*pBuffer) + size); + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _OCA, _YES, flags)) + { + osAddRecordForCrashLog(pBuffer, NV_OFFSETOF(NVLOG_BUFFER, data) + size); + } + + pBuffer->push.fn = pushfunc; + pBuffer->size = size; + pBuffer->flags = flags; + pBuffer->tag = tag; + + *ppBuffer = pBuffer; + + return NV_OK; +} + +static void +_deallocateNvlogBuffer +( + NVLOG_BUFFER *pBuffer +) +{ + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _OCA, _YES, pBuffer->flags)) + osDeleteRecordForCrashLog(pBuffer); + + portMemFree(pBuffer); +} + +NV_STATUS +nvlogAllocBuffer +( + NvU32 size, + NvU32 flags, + NvU32 tag, + NVLOG_BUFFER_HANDLE *pBufferHandle, + ... +) +{ + NVLOG_BUFFER *pBuffer; + NV_STATUS status; + + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _TYPE, _SYSTEMLOG, flags)) + { + } + else + { + NV_ASSERT_OR_RETURN(NvLogLogger.totalFree > 0, + NV_ERR_INSUFFICIENT_RESOURCES); + } + + status = _allocateNvlogBuffer(size, flags, tag, &pBuffer); + + if (status != NV_OK) + { + return status; + } + + portSyncMutexAcquire(NvLogLogger.buffersLock); + portSyncSpinlockAcquire(NvLogLogger.mainLock); + + if (NvLogLogger.nextFree < NVLOG_MAX_BUFFERS) + { + NvLogLogger.pBuffers[NvLogLogger.nextFree] = pBuffer; + *pBufferHandle = NvLogLogger.nextFree++; + NvLogLogger.totalFree--; + } + else + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + } + + // Find the next slot in the buffers array + while (NvLogLogger.nextFree < NVLOG_MAX_BUFFERS) + { + if (NvLogLogger.pBuffers[NvLogLogger.nextFree] != NULL) + NvLogLogger.nextFree++; + else break; + } + portSyncSpinlockRelease(NvLogLogger.mainLock); + portSyncMutexRelease(NvLogLogger.buffersLock); + + if (status != NV_OK) + { + portMemFree(pBuffer); + } + + return status; +} + +void +nvlogDeallocBuffer +( + NVLOG_BUFFER_HANDLE hBuffer, + NvBool bDeallocPreserved +) +{ + NVLOG_BUFFER *pBuffer; + + if ((hBuffer == 0) || !NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer)) + return; + + pBuffer = NvLogLogger.pBuffers[hBuffer]; + + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _PRESERVE, _YES, pBuffer->flags) && + !bDeallocPreserved) + { + return; + } + + pBuffer->flags = FLD_SET_DRF(LOG_BUFFER, _FLAGS, _DISABLED, + _YES, pBuffer->flags); + + while (pBuffer->threadCount > 0) { /*spin*/ } + portSyncMutexAcquire(NvLogLogger.buffersLock); + portSyncSpinlockAcquire(NvLogLogger.mainLock); + NvLogLogger.pBuffers[hBuffer] = NULL; + NvLogLogger.nextFree = NV_MIN(hBuffer, NvLogLogger.nextFree); + NvLogLogger.totalFree++; + portSyncSpinlockRelease(NvLogLogger.mainLock); + portSyncMutexRelease(NvLogLogger.buffersLock); + + _deallocateNvlogBuffer(pBuffer); +} + +NV_STATUS +nvlogWriteToBuffer +( + NVLOG_BUFFER_HANDLE hBuffer, + NvU8 *pData, + NvU32 size +) +{ + NvBool status; + NVLOG_BUFFER *pBuffer; + + NV_ASSERT_OR_RETURN(size > 0, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pData != NULL, NV_ERR_INVALID_POINTER); + + NV_ASSERT_OR_RETURN(NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer), + NV_ERR_INVALID_OBJECT_HANDLE); + + pBuffer = NvLogLogger.pBuffers[hBuffer]; + + // Normal condition when fetching nvLog from NV0000_CTRL_CMD_NVD_GET_NVLOG. + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _DISABLED, _YES, pBuffer->flags)) + return NV_ERR_NOT_READY; + + portAtomicIncrementS32(&pBuffer->threadCount); + status = pBuffer->push.fn(pBuffer, pData, size); + // Get pBuffer from the handle again, as it might have realloc'd + portAtomicDecrementS32(&NvLogLogger.pBuffers[hBuffer]->threadCount); + + return (status == NV_TRUE) ? NV_OK : NV_ERR_BUFFER_TOO_SMALL; +} + + + +NV_STATUS +nvlogExtractBufferChunk +( + NVLOG_BUFFER_HANDLE hBuffer, + NvU32 chunkNum, + NvU32 *pChunkSize, + NvU8 *pDest +) +{ + NVLOG_BUFFER *pBuffer; + NvU32 index; + + NV_ASSERT_OR_RETURN(*pChunkSize > 0, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pDest != NULL, NV_ERR_INVALID_POINTER); + + NV_ASSERT_OR_RETURN(NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer), + NV_ERR_INVALID_OBJECT_HANDLE); + + pBuffer = NvLogLogger.pBuffers[hBuffer]; + + index = chunkNum * (*pChunkSize); + NV_ASSERT_OR_RETURN(index <= pBuffer->size, NV_ERR_OUT_OF_RANGE); + *pChunkSize = NV_MIN(*pChunkSize, (pBuffer->size - index)); + + portSyncSpinlockAcquire(NvLogLogger.mainLock); + portMemCopy(pDest, *pChunkSize, &pBuffer->data[index], *pChunkSize); + portSyncSpinlockRelease(NvLogLogger.mainLock); + + return NV_OK; +} + + +NV_STATUS +nvlogGetBufferSize +( + NVLOG_BUFFER_HANDLE hBuffer, + NvU32 *pSize +) +{ + NV_ASSERT_OR_RETURN(pSize != NULL, NV_ERR_INVALID_POINTER); + + NV_ASSERT_OR_RETURN(NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer), + NV_ERR_INVALID_OBJECT_HANDLE); + + *pSize = NvLogLogger.pBuffers[hBuffer]->size; + return NV_OK; +} + +NV_STATUS +nvlogGetBufferTag +( + NVLOG_BUFFER_HANDLE hBuffer, + NvU32 *pTag +) +{ + NV_ASSERT_OR_RETURN(pTag != NULL, NV_ERR_INVALID_POINTER); + + NV_ASSERT_OR_RETURN(NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer), + NV_ERR_INVALID_OBJECT_HANDLE); + + *pTag = NvLogLogger.pBuffers[hBuffer]->tag; + return NV_OK; +} + +NV_STATUS +nvlogGetBufferFlags +( + NVLOG_BUFFER_HANDLE hBuffer, + NvU32 *pFlags +) +{ + NV_ASSERT_OR_RETURN(pFlags != NULL, NV_ERR_INVALID_POINTER); + + NV_ASSERT_OR_RETURN(NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer), + NV_ERR_INVALID_OBJECT_HANDLE); + + *pFlags = NvLogLogger.pBuffers[hBuffer]->flags; + return NV_OK; +} + + +NV_STATUS +nvlogPauseLoggingToBuffer +( + NVLOG_BUFFER_HANDLE hBuffer, + NvBool bPause +) +{ + NVLOG_BUFFER *pBuffer; + + NV_ASSERT_OR_RETURN(NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer), + NV_ERR_INVALID_OBJECT_HANDLE); + + pBuffer = NvLogLogger.pBuffers[hBuffer]; + + pBuffer->flags = (bPause) + ? FLD_SET_DRF(LOG, _BUFFER_FLAGS, _DISABLED, _YES, pBuffer->flags) + : FLD_SET_DRF(LOG, _BUFFER_FLAGS, _DISABLED, _NO, pBuffer->flags); + + return NV_OK; +} + + +NV_STATUS +nvlogPauseAllLogging +( + NvBool bPause +) +{ + return NV_OK; +} + +NV_STATUS +nvlogGetBufferHandleFromTag +( + NvU32 tag, + NVLOG_BUFFER_HANDLE *pBufferHandle +) +{ + NvU32 i; + + NV_ASSERT_OR_RETURN(pBufferHandle != NULL, NV_ERR_INVALID_POINTER); + + for (i = 0; i < NVLOG_MAX_BUFFERS; i++) + { + if (NvLogLogger.pBuffers[i] != NULL) + { + if (NvLogLogger.pBuffers[i]->tag == tag) + { + *pBufferHandle = i; + return NV_OK; + } + } + } + return NV_ERR_OBJECT_NOT_FOUND; +} + +NV_STATUS +nvlogGetBufferSnapshot +( + NVLOG_BUFFER_HANDLE hBuffer, + NvU8 *pDest, + NvU32 destSize +) +{ + NVLOG_BUFFER *pBuffer; + + NV_ASSERT_OR_RETURN(NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer), + NV_ERR_INVALID_OBJECT_HANDLE); + + NV_ASSERT_OR_RETURN(pDest != NULL, NV_ERR_INVALID_POINTER); + + pBuffer = NvLogLogger.pBuffers[hBuffer]; + + NV_ASSERT_OR_RETURN(destSize >= NVLOG_BUFFER_SIZE(pBuffer), + NV_ERR_BUFFER_TOO_SMALL); + + portSyncSpinlockAcquire(NvLogLogger.mainLock); + portMemCopy(pDest, NVLOG_BUFFER_SIZE(pBuffer), pBuffer, NVLOG_BUFFER_SIZE(pBuffer)); + portSyncSpinlockRelease(NvLogLogger.mainLock); + + return NV_OK; +} + + + +NvBool +nvlogRingBufferPush +( + NVLOG_BUFFER *pBuffer, + NvU8 *pData, + NvU32 dataSize +) +{ + NvU32 writeSize; + NvU32 oldPos; + NvU32 lock = DRF_VAL(LOG, _BUFFER_FLAGS, _LOCKING, pBuffer->flags); + + if (lock != NVLOG_BUFFER_FLAGS_LOCKING_NONE) + portSyncSpinlockAcquire(NvLogLogger.mainLock); + + oldPos = pBuffer->pos; + pBuffer->extra.ring.overflow += (pBuffer->pos + dataSize) / pBuffer->size; + pBuffer->pos = (pBuffer->pos + dataSize) % pBuffer->size; + + // State locking does portMemCopy unlocked. + if (lock == NVLOG_BUFFER_FLAGS_LOCKING_STATE) + portSyncSpinlockRelease(NvLogLogger.mainLock); + + while (dataSize > 0) + { + writeSize = NV_MIN(pBuffer->size - oldPos, dataSize); + portMemCopy(&pBuffer->data[oldPos], writeSize, pData, writeSize); + oldPos = 0; + dataSize -= writeSize; + pData += writeSize; + } + + if (lock == NVLOG_BUFFER_FLAGS_LOCKING_FULL) + portSyncSpinlockRelease(NvLogLogger.mainLock); + + return NV_TRUE; +} + +NvBool +nvlogNowrapBufferPush +( + NVLOG_BUFFER *pBuffer, + NvU8 *pData, + NvU32 dataSize +) +{ + NvU32 oldPos; + NvU32 lock = DRF_VAL(LOG, _BUFFER_FLAGS, _LOCKING, pBuffer->flags); + + if (pBuffer->pos + dataSize >= pBuffer->size) + { + NvBool bExpandable = FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _EXPANDABLE, _YES, pBuffer->flags); + NvBool bNonPaged = FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _NONPAGED, _YES, pBuffer->flags); + + // Expandable buffer, and we are at IRQL where we can do realloc + if (bExpandable && + ((bNonPaged && portMemExSafeForNonPagedAlloc()) || (!bNonPaged && portMemExSafeForPagedAlloc()))) + { + NVLOG_BUFFER *pNewBuffer; + NvU32 i; + NvU32 newSize = pBuffer->size * 2; + NvU32 allocSize = sizeof(*pBuffer) + newSize; + + pNewBuffer = bNonPaged ? portMemAllocNonPaged(allocSize) : portMemAllocPaged(allocSize); + if (pNewBuffer == NULL) + return NV_FALSE; + + // + // Two threads couid have entered this block at the same time, and + // both will have allocated their own bigger buffer. Only the one + // that takes the spinlock first should do the copy and the swap. + // + portSyncSpinlockAcquire(NvLogLogger.mainLock); + // Check if this buffer is still there and was not swapped for a bigger one + for (i = 0; i < NVLOG_MAX_BUFFERS; i++) + { + if (NvLogLogger.pBuffers[i] == pBuffer) + break; + } + if (i == NVLOG_MAX_BUFFERS) + { + // Another thread has already expanded the buffer, bail out. + // TODO: Maybe we could store the handle and then try again? + portSyncSpinlockRelease(NvLogLogger.mainLock); + portMemFree(pNewBuffer); + return NV_FALSE; + } + + portMemCopy(pNewBuffer, allocSize, pBuffer, sizeof(*pBuffer)+pBuffer->size); + pNewBuffer->size = newSize; + for (i = 0; i < NVLOG_MAX_BUFFERS; i++) + { + if (NvLogLogger.pBuffers[i] == pBuffer) + NvLogLogger.pBuffers[i] = pNewBuffer; + } + portSyncSpinlockRelease(NvLogLogger.mainLock); + + // + // Before we can free this buffer, we need to make sure any threads + // that were still accessing it are done. Spin on volatile threadCount + // NOTE: threadCount includes the current thread too. + // + while (pBuffer->threadCount > 1) { /*spin*/ } + portMemFree(pBuffer); + pBuffer = pNewBuffer; + } + else + { + return NV_FALSE; + } + } + + if (lock != NVLOG_BUFFER_FLAGS_LOCKING_NONE) + portSyncSpinlockAcquire(NvLogLogger.mainLock); + + oldPos = pBuffer->pos; + pBuffer->pos = oldPos + dataSize; + + // State locking does portMemCopy unlocked. + if (lock == NVLOG_BUFFER_FLAGS_LOCKING_STATE) + portSyncSpinlockRelease(NvLogLogger.mainLock); + + portMemCopy(&pBuffer->data[oldPos], dataSize, pData, dataSize); + + if (lock == NVLOG_BUFFER_FLAGS_LOCKING_FULL) + portSyncSpinlockRelease(NvLogLogger.mainLock); + + return NV_TRUE; +} + +NvBool +nvlogStringBufferPush +( + NVLOG_BUFFER *unused, + NvU8 *pData, + NvU32 dataSize +) +{ + return NV_TRUE; +} + +// +// Prints the buffer encoded as base64, with a prefix for easy grepping. +// Base64 allows the padding characters ('=') to appear anywhere, not just at +// the end, so it is fine to print buffers one at a time without merging. +// +static void _printBase64(NvU8 *pData, NvU32 dataSize) +{ + const NvU8 base64_key[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + NvU8 output[64+1]; // 64 bas64 characters per line of output + NvU32 i; + + do + { + i = 0; + while (i < (sizeof(output)-1) && (dataSize > 0)) + { + output[i++] = base64_key[pData[0] >> 2]; + if (dataSize == 1) + { + output[i++] = base64_key[(pData[0] << 4) & 0x3F]; + output[i++] = '='; + output[i++] = '='; + dataSize = 0; + break; + } + + output[i++] = base64_key[((pData[0] << 4) & 0x3F) | (pData[1] >> 4)]; + if (dataSize == 2) + { + output[i++] = base64_key[(pData[1] << 2) & 0x3F]; + output[i++] = '='; + dataSize = 0; + break; + } + + output[i++] = base64_key[((pData[1] << 2) & 0x3F) | (pData[2] >> 6)]; + output[i++] = base64_key[pData[2] & 0x3F]; + + pData += 3; + dataSize -= 3; + } + output[i] = 0; + portDbgPrintf("nvrm-nvlog: %s\n", output); + } while (dataSize > 0); +} + +NvBool nvlogKernelLogPush(NVLOG_BUFFER *unused, NvU8 *pData, NvU32 dataSize) +{ + PORT_UNREFERENCED_VARIABLE(unused); + _printBase64(pData, dataSize); + return NV_TRUE; +} + +void nvlogDumpToKernelLog(NvBool bDumpUnchangedBuffersOnlyOnce) +{ + NvU32 i; + static NvU32 lastDumpPos[NVLOG_MAX_BUFFERS]; + + for (i = 0; i < NVLOG_MAX_BUFFERS; i++) + { + NVLOG_BUFFER *pBuf = NvLogLogger.pBuffers[i]; + + if (pBuf && pBuf->size) + { + if (bDumpUnchangedBuffersOnlyOnce) + { + NvU32 pos = pBuf->pos + (pBuf->size * pBuf->extra.ring.overflow); + + //Dump the buffer only if it's contents have changed + if (lastDumpPos[i] != pos) + { + lastDumpPos[i] = pos; + _printBase64((NvU8*)pBuf, NVLOG_BUFFER_SIZE(pBuf)); + } + } + else + { + _printBase64((NvU8*)pBuf, NVLOG_BUFFER_SIZE(pBuf)); + } + } + } +} + +void nvlogDumpToKernelLogIfEnabled(void) +{ + NvU32 dumpNvlogValue; + + // Debug and develop builds already dump everything as it happens. +#if defined(DEBUG) || defined(DEVELOP) + return; +#endif + + // Enable only if the regkey has been set + if (osReadRegistryDword(NULL, NV_REG_STR_RM_DUMP_NVLOG, &dumpNvlogValue) != NV_OK) + return; + + if (dumpNvlogValue != NV_REG_STR_RM_DUMP_NVLOG_ENABLE) + return; + + nvlogDumpToKernelLog(NV_FALSE); +} + +NV_STATUS nvlogRegisterFlushCb(void (*pCb)(void*), void *pData) +{ + NV_STATUS status = NV_ERR_INSUFFICIENT_RESOURCES; + portSyncRwLockAcquireWrite(NvLogLogger.flushCbsLock); + + for (NvU32 i = 0; i < NV_ARRAY_ELEMENTS(nvlogFlushCbs); i++) + { + // The same callback should not be registered twice + NV_ASSERT(nvlogFlushCbs[i].pCb != pCb || nvlogFlushCbs[i].pData != pData); + + if (nvlogFlushCbs[i].pCb == NULL) + { + nvlogFlushCbs[i].pCb = pCb; + nvlogFlushCbs[i].pData = pData; + + status = NV_OK; + goto done; + } + } + +done: + portSyncRwLockReleaseWrite(NvLogLogger.flushCbsLock); + return status; +} + +void nvlogDeregisterFlushCb(void (*pCb)(void*), void *pData) +{ + portSyncRwLockAcquireWrite(NvLogLogger.flushCbsLock); + + for (NvU32 i = 0; i < NV_ARRAY_ELEMENTS(nvlogFlushCbs); i++) + { + if (nvlogFlushCbs[i].pCb == pCb && nvlogFlushCbs[i].pData == pData) + { + nvlogFlushCbs[i] = (NvlogFlushCb){0}; + goto done; + } + } + +done: + portSyncRwLockReleaseWrite(NvLogLogger.flushCbsLock); +} + +void nvlogRunFlushCbs(void) +{ + portSyncRwLockAcquireRead(NvLogLogger.flushCbsLock); + for (NvU32 i = 0; i < NV_ARRAY_ELEMENTS(nvlogFlushCbs); i++) + if (nvlogFlushCbs[i].pCb != NULL) + nvlogFlushCbs[i].pCb(nvlogFlushCbs[i].pData); + portSyncRwLockReleaseRead(NvLogLogger.flushCbsLock); +} diff --git a/src/nvidia/src/kernel/diagnostics/nvlog_printf.c b/src/nvidia/src/kernel/diagnostics/nvlog_printf.c new file mode 100644 index 0000000..3c537b1 --- /dev/null +++ b/src/nvidia/src/kernel/diagnostics/nvlog_printf.c @@ -0,0 +1,1322 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2002-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************************************************************\ +* * +* Description: Common debug print defines and functions * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "core/system.h" +#include "os/os.h" // to pick up declarations for osDelay() and osDelayUs() +#include "nvrm_registry.h" + +#include // NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE + +static int inttodecfmtstr(NvS64 sval, char *dest, int fieldwidth, int flags); +static int uinttohexfmtstr(NvU64 uval, char *dest, int fieldwidth, int flags); +static int strtofmtstr(const char *src, char *dest, char *destLimit, int fieldwidth, int precision, int flags); + +#if 0 +static int float64todecfmtstr(F064 f64val, NvU8 *dest, int fieldwidth, int precision, int flags); +#endif + +// +// Numeric & string conversion flags (used if you call the 'XtoYfmtstr' routines directly) +// +enum { + DONTTERMINATE = 1, // Don't null-terminate the string if this flag is set + UNSIGNED_F = 2, // Force an unsigned number conversion (other sign options are ignored) + PLUSSIGN_F = 4, // For signed numbers >= 0, force a '+' in the sign position + SPACESIGN_F = 8, // For signed numbers >= 0, force a space in the sign position + LEFTALIGN_F = 16, // Left-justify the result in the destination field (overrides zero fill) + ZEROFILL_F = 32, // Use leading zeros for padding to a field width + LOWERCASE_F = 64 // Use lower case hex digits: a-f instead of A-F +}; + +// +// nvDbgBreakpointEnabled - Returns true if triggering a breakpoint is allowed +// +NvBool osDbgBreakpointEnabled(void); +NvBool nvDbgBreakpointEnabled(void) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + if (pSys != NULL) + { + if (pSys->getProperty(pSys, PDB_PROP_SYS_DEBUGGER_DISABLED)) + return NV_FALSE; + } + return osDbgBreakpointEnabled(); +} + +#if NV_PRINTF_STRINGS_ALLOWED +static PORT_SPINLOCK *_nv_dbg_lock = NULL; +static char _nv_dbg_string[MAX_ERROR_STRING]; + +// +// nvDbgInit - Allocate the printf spinlock +// +NvBool +nvDbgInit(void) +{ + if (NULL != _nv_dbg_lock) + { + // already initialized + return NV_TRUE; + } + if (portInitialize() != NV_OK) + return NV_FALSE; + + _nv_dbg_lock = portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged()); + if (_nv_dbg_lock == NULL) + return NV_FALSE; + else + return NV_TRUE; +} + +// +// nvDbgDestroy - Free the printf spinlock +// +void +nvDbgDestroy(void) +{ + if (NULL != _nv_dbg_lock) + { + portSyncSpinlockDestroy(_nv_dbg_lock); + _nv_dbg_lock = NULL; + portShutdown(); + } +} + +// +// nvDbg_PrintMsg - Common message control for two flavors of printf +// +// Differences for mods builds. +// * Mods has its own messaging system, and we always pass messages +// to the mods unless RmMsg explicitly wants to hide a message. +// * Mods requires messages even when the debugger is not enabled. +// * Sorry for the #ifdefs, but RmMsg complicates the code enough +// that it is nice to have one implementation. +// +static NvBool +nvDbg_PrintMsg +( + const char *filename, + int linenumber, + const char *function, + int debuglevel, + const char *printf_format, + NvBool *pForce, + NvU32 *pPrefix +) +{ + NvU32 rc; + int debuglevel_min; + +#if defined(DEVELOP) || defined(DEBUG) || defined(QA_BUILD) + debuglevel_min = LEVEL_NOTICE; +#else + debuglevel_min = LEVEL_ERROR; +#endif + + OBJSYS *pSys = SYS_GET_INSTANCE(); + + if ((NULL == pSys) || (pSys->getProperty(pSys, PDB_PROP_SYS_DEBUGGER_DISABLED))) + { + return NV_FALSE; + } + + // + // Message is filtered by an explicit RmMsg rule + // + rc = nvDbgRmMsgCheck(filename, linenumber, (char *)function, debuglevel, printf_format, pPrefix); + switch (rc) + { + case NVRM_MSG_HIDE: + // Hide this error message + return NV_FALSE; + + case NVRM_MSG_PRINT: + // Force this error message + *pForce = NV_TRUE; + return NV_TRUE; + + case NVRM_MSG_NORMAL: + default: + if (debuglevel >= debuglevel_min) + { + return NV_TRUE; + } + break; + } + return NV_FALSE; +} + +void nvDbg_Printf +( + const char *filename, + int linenumber, + const char *function, + int debuglevel, + const char *printf_format, + ... +) +{ + va_list arglist; + va_start(arglist, printf_format); + nvDbg_vPrintf(filename, linenumber, function, debuglevel, printf_format, arglist); + va_end(arglist); +} + +// +// Internal function to prepare _nv_dbg_string for printing. +// Should only be called while _nv_dbg_lock is held. +// +static void +_nvDbgPrepareString +( + const char *file, + int line, + const char *func, + const char *fmt, + NvU32 prefix, + va_list arglist +) +{ + NvU32 len = 0; + + // + // If RmMsg has added a prefix, skip the standard NV_PRINTF_PREFIX. + // If there is no prefix, don't include the RmMsg prefix. + // + if (portStringCompare(fmt, NV_PRINTF_PREFIX, sizeof(NV_PRINTF_PREFIX) - 1) == 0) + { + len = RmMsgPrefix(prefix, file, line, func, _nv_dbg_string, MAX_ERROR_STRING); + fmt += sizeof(NV_PRINTF_PREFIX) - 1; + } + + nvDbgVsnprintf(_nv_dbg_string + len, MAX_ERROR_STRING - len, fmt, arglist); +} + +// +// Temporary helper to map LEVEL_xxx constants to a platform specific level. +// +#if PORT_IS_FUNC_SUPPORTED(portDbgExPrintfLevel) + +static NvU32 _nvDbgForceLevel(NvBool bForce, NvU32 level) +{ + return bForce ? LEVEL_FATAL : level; +} +#endif + +// +// Some varargs interfaces need a va_list interface, but still +// want the common output buffer and the RmMsg handling. +// +void nvDbg_vPrintf +( + const char *filename, + int linenumber, + const char *function, + int debuglevel, + const char *printf_format, + va_list arglist +) +{ + NvBool force = NV_FALSE; + NvU32 prefix = 0; + + if (nvDbg_PrintMsg(filename, linenumber, function, debuglevel, printf_format, &force, &prefix)) + { + portSyncSpinlockAcquire(_nv_dbg_lock); + _nvDbgPrepareString(filename, linenumber, function, printf_format, prefix, arglist); +#if PORT_IS_FUNC_SUPPORTED(portDbgExPrintfLevel) + portDbgExPrintfLevel(_nvDbgForceLevel(force, debuglevel), + "%.*s", MAX_ERROR_STRING, _nv_dbg_string); +#else + portDbgPrintString(_nv_dbg_string, MAX_ERROR_STRING); +#endif + portSyncSpinlockRelease(_nv_dbg_lock); + } +} + + +#define IS_PRINT(c) (((c) >= 0x20) && ((c) <= 0x7E)) + +void nvDbg_PrintBuf +( + const char *file, + int line, + const char *function, + int dbglevel, + NvU8 buffer[], + NvU32 bufsize +) +{ + NvU32 i, j; + nvDbg_Printf(file, line, function, dbglevel, NV_PRINTF_ADD_PREFIX("printBuf [BEGIN]")); + for (i = 0; i < bufsize; i += 16) + { + nvDbg_Printf(file, line, function, dbglevel, "\n"); + nvDbg_Printf(file, line, function, dbglevel, NV_PRINTF_ADD_PREFIX("printBuf 0x%p "), buffer + i); + for (j = 0; j < 16; j++) + { + if ((i + j) < bufsize) + { + nvDbg_Printf(file, line, function, dbglevel, "%02x", *(buffer + i + j)); + } + else + { + nvDbg_Printf(file, line, function, dbglevel, " "); + } + } + nvDbg_Printf(file, line, function, dbglevel, " "); + for (j = 0; j < 16; j++) + { + if ((i + j) < bufsize) + { + nvDbg_Printf(file, line, function, dbglevel, "%c", IS_PRINT(*(buffer + i + j))? *(buffer + i + j) : '.'); + } + else + { + nvDbg_Printf(file, line, function, dbglevel, " "); + } + } + } + nvDbg_Printf(file, line, function, dbglevel, "\n"); + nvDbg_Printf(file, line, function, dbglevel, NV_PRINTF_ADD_PREFIX("printBuf [END]\n")); +} + +#endif + +#define TMPBUF_SIZE 63 +//====================================================================================== +// +// nvDbgVsnprintf() +// +//====================================================================================== +int nvDbgVsnprintf(char *dest, NvU32 destSize, const char *fmt, va_list args) +{ + int ch, precision, flags; + NvU32 fieldwidth; + int longlong; + NvS32 s32val; + NvU32 u32val; + NvS64 s64val; + NvU64 u64val; + + const char *f; + const char *specptr; + char *d; + char *strpval; + char *destLimit; + void *pval; + char tmpBuf[TMPBUF_SIZE + 1]; + NvU32 tmpSize; + + if (dest == NULL || destSize == 0) + { + return(0); // If we don't have a destination, we didn't do any characters + } + + f = fmt; + d = dest; + destLimit = dest + destSize - 1; + dest[destSize - 1] = 0; + + while ((ch = *f++) != '\0') + { + if (ch != '%') + { + if (d < destLimit) + { + *d++ = (NvU8)ch; + } + continue; + } + longlong = NV_FALSE; + specptr = f - 1; // Save a pointer to the '%' specifier, in case of syntax errors + ch = *f++; + + // revert to correct printf behavior for % + // from printf.3 regarding '%' format character: + // % A `%' is written. No argument is converted. The complete conversion specification is `%%'. + if (ch == '%') { + if (d < destLimit) + { + *d++ = (NvU8)ch; + } + continue; + } + + flags = DONTTERMINATE; // Don't terminate substrings -- we'll null-terminate when we're all done + // Check for left-alignment + if (ch == '-') { + flags |= LEFTALIGN_F; + ch = *f++; + } + // Check for using a plus sign for non-negative numbers + if (ch == '+') { + flags |= PLUSSIGN_F; + ch = *f++; + } + // Check for using a space character (sign place-holder) for non-negative numbers + if (ch == ' ') { + flags |= SPACESIGN_F; + ch = *f++; + } + // Check for leading zero fill + if (ch == '0') { + flags |= ZEROFILL_F; + // Don't bump the character pointer in case '0' was the only digit + } + // Collect the field width specifier + if (ch == '*') { + // Field width specified by separate argument + fieldwidth = va_arg(args, int); + ch = *f++; + } + else { + fieldwidth = 0; // Default field width + while (ch >= '0' && ch <= '9') { + fieldwidth = fieldwidth * 10 + ch - '0'; + ch = *f++; + } + } + + // Check for a precision specifier + precision = -1; // Default unspecified precision + if (ch == '.') { // We have a precision specifier, skip the '.' + ch = *f++; + if (ch == '*') { + // precision specified by separate argument + precision = va_arg(args, int); + ch = *f++; + } + else { + while (ch >= '0' && ch <= '9') { + precision = precision * 10 + ch - '0'; + ch = *f++; + } + } + } + + if (ch == 'l') { + ch = *f++; + if (ch == 'l') { + longlong = NV_TRUE; + ch = *f++; + } + } + + // Perform the conversion operation + switch (ch) { + case 'c': // Copy an ASCII character + u32val = va_arg(args, int); + if (d < destLimit) + { + *d++ = (NvU8) u32val; + } + break; + case 'u': // Copy a formatted, unsigned decimal number + flags |= UNSIGNED_F; + if (fieldwidth > TMPBUF_SIZE) + { + fieldwidth = TMPBUF_SIZE; + } + if ( longlong ) // long long specifier "llu" or "lld" + { + u64val = va_arg(args, unsigned long long); + // Format the number, increment the dest pointer by the characters copied + tmpSize = inttodecfmtstr(u64val, tmpBuf, fieldwidth, flags); + } + else + { + u32val = va_arg(args, unsigned int); + // Format the number, increment the dest pointer by the characters copied + tmpSize = inttodecfmtstr((NvU64)u32val, tmpBuf, fieldwidth, flags); + } + if (d < destLimit) + { + tmpSize = (d + tmpSize) < destLimit ? tmpSize : (NvU32)(destLimit - d); + portMemCopy(d, tmpSize, tmpBuf, tmpSize); + d += tmpSize; + } + break; + case 'd': // Copy a formatted, signed decimal number + if (fieldwidth > TMPBUF_SIZE) + { + fieldwidth = TMPBUF_SIZE; + } + if ( longlong ) // long long specifier "llu" or "lld" + { + s64val = va_arg(args, long long); + // Format the number, increment the dest pointer by the characters copied + tmpSize = inttodecfmtstr(s64val, tmpBuf, fieldwidth, flags); + } + else + { + s32val = va_arg(args, int); + // Format the number, increment the dest pointer by the characters copied + tmpSize = inttodecfmtstr((NvS64)s32val, tmpBuf, fieldwidth, flags); + } + if (d < destLimit) + { + tmpSize = (d + tmpSize) < destLimit ? tmpSize : (NvU32)(destLimit - d); + portMemCopy(d, tmpSize, tmpBuf, tmpSize); + d += tmpSize; + } + break; + case 'x': // Copy a formatted, lower-case hexadecimal number + flags |= LOWERCASE_F; + case 'X': // Copy a formatted, upper-case hexadecimal number + if (fieldwidth > TMPBUF_SIZE) + { + fieldwidth = TMPBUF_SIZE; + } + if ( longlong ) // long long specifier "llx" or "llX" + { + u64val = va_arg(args, long long); + // Format the number, increment the dest pointer by the characters copied + tmpSize = uinttohexfmtstr(u64val, tmpBuf, fieldwidth, flags); + } + else + { + u32val = va_arg(args, int); + // Format the number, increment the dest pointer by the characters copied + tmpSize = uinttohexfmtstr((NvU64)u32val, tmpBuf, fieldwidth, flags); + } + if (d < destLimit) + { + tmpSize = (d + tmpSize) < destLimit ? tmpSize : (NvU32)(destLimit - d); + portMemCopy(d, tmpSize, tmpBuf, tmpSize); + d += tmpSize; + } + break; + case 'p': // Copy a formatted pointer value + if (fieldwidth > TMPBUF_SIZE) + { + fieldwidth = TMPBUF_SIZE; + } + pval = va_arg(args, void *); + tmpSize = uinttohexfmtstr((NvU64)((NvUPtr)pval), tmpBuf, fieldwidth, flags); + if (d < destLimit) + { + tmpSize = (d + tmpSize) < destLimit ? tmpSize : (NvU32)(destLimit - d); + portMemCopy(d, tmpSize, tmpBuf, tmpSize); + d += tmpSize; + } + break; + case 's': // Copy a formatted string + strpval = va_arg(args, char *); + d += strtofmtstr(strpval, d, destLimit, fieldwidth, precision, flags); + break; + case 0: // Gracefully handle premature end-of-string + f--; // Back up, now f points to the null character again + default: // Unexpected conversion operator, so just echo to the destination + while (specptr < f) + { + if (d < destLimit) + { + *d++ = *specptr; + } + specptr++; + } + if (ch == 0) + { + goto stringdone; + } + break; + } + } + +stringdone: + if (d <= destLimit) + { + *d = '\0'; // Null-terminate the string + } + return((int)(d - dest)); // Return the number of characters we [might] transferred +} + +int nvDbgSnprintf(char *dest, NvU32 destSize, const char *fmt, ...) +{ + va_list arglist; + int len; + + va_start(arglist, fmt); + len = nvDbgVsnprintf(dest, destSize, fmt, arglist); + va_end(arglist); + + return len; +} + +enum { // Padding option definitions + PRESPACE_O = 1, + PREZERO_O = 2, + POSTSPACE_O = 4 +}; + +#define NUMBUFSIZE 20 // Should be enough for 64-bit integers in decimal or hex + +//====================================================================================== +// +// inttodecfmtstr() +// +// This takes a signed integer value and converts it to a formatted decimal string, +// using options (field width and flags) like those provided by sprintf(). The 32-bit +// number is assumed to be signed unless the UNSIGNED_F flag is set. Look at the code +// for dbugsprintf() above to see which formatting options are implemented. +// +//====================================================================================== +static int inttodecfmtstr(NvS64 sval, char *dest, int fieldwidth, int flags) +{ + int i, digitcount, destcount; + int sign, signchar; + int fillcount; + int pad_options; + NvU64 uval, quotient, remainder; + char *intdigp; + char nbuf[NUMBUFSIZE]; + + signchar = ' '; // avoid compiler init warning + // Process the sign-related options + if (flags & UNSIGNED_F) { // Unsigned conversion + sign = 0; // No sign character + } else { // We're doing a signed conversion + sign = 1; // Assume we'll have a sign character + if (sval < 0) { + signchar = '-'; + sval = -sval; // Make the number positive now so we can 'digitize' it + } else { // sval >= 0 + if (flags & PLUSSIGN_F) + signchar = '+'; + else if (flags & SPACESIGN_F) + signchar = ' '; + else + sign = 0; // No sign character + } + } + uval = sval; // Do unsigned math from here on out + + // Convert the number into ASCII decimal digits in our local buffer, counting them + intdigp = &nbuf[NUMBUFSIZE]; // Point past the last character in the buffer + digitcount = 0; // Nothing written to our local buffer yet + do { + quotient = uval / 10; + remainder = uval - quotient * 10; + *--intdigp = (NvU8) (remainder + '0'); // Put the digit into the next lower buffer slot + digitcount++; + uval = quotient; + } while (uval > 0); + + // Process the field-padding options + pad_options = 0; // Assume we won't be doing any padding + fillcount = fieldwidth - (sign + digitcount); // Account for the sign, if used + if (fillcount > 0) { // We need to do left or right padding + if (flags & LEFTALIGN_F) { + pad_options = POSTSPACE_O; + } else { // Right-aligned, fill with zeros or spaces + if (flags & ZEROFILL_F) + pad_options = PREZERO_O; + else + pad_options = PRESPACE_O; + } + } + + destcount = 0; // Nothing written out to the destination yet + + // Copy any leading spaces + if (pad_options & PRESPACE_O) { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + *dest++ = ' '; + destcount += fillcount; + } + // Copy the sign character, if any + if (sign) { + *dest++ = (char)signchar; + destcount++; + } + // Copy any leading zeros + if (pad_options & PREZERO_O) { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + *dest++ = '0'; + destcount += fillcount; + } + // Copy the decimal digits from our local buffer + for (i = 0; i < digitcount; i++) + *dest++ = *intdigp++; + destcount += digitcount; + + // Copy any trailing spaces + if (pad_options & POSTSPACE_O) { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + *dest++ = ' '; + destcount += fillcount; + } + if ((flags & DONTTERMINATE) == 0) // Null-terminate the string unless requested not to + *dest = 0; + return(destcount); // Return the character count, not including the null +} + +//====================================================================================== +// +// uinttohexfmtstr() +// +// This takes an unsigned integer value and converts it to a formatted hexadecimal +// string, using options (field width and flags) like those provided by sprintf(). Look +// at the code for dbugsprintf() above to see which formatting options are implemented. +// +//====================================================================================== +static int uinttohexfmtstr(NvU64 uval, char *dest, int fieldwidth, int flags) +{ + int i, digitcount, destcount; + int c, hexadjust; + int fillcount; + char fillchar = ' '; + int pad_options; + char *intdigp; + char nbuf[NUMBUFSIZE]; + + hexadjust = 'A' - '9' - 1; + if (flags & LOWERCASE_F) + hexadjust += 'a' - 'A'; + + // Convert the number into ASCII hex digits in our local buffer, counting them + intdigp = &nbuf[NUMBUFSIZE]; // Point past the last character in the buffer + digitcount = 0; // Nothing written to our local buffer yet + do { + c = (int)(uval % 16) + '0'; + if (c > '9') /* A-F */ + c += hexadjust; + *--intdigp = (NvU8)c; // Put the digit into the next lower buffer slot + digitcount++; + uval /= 16; + } while (uval > 0); + + // Process the field-padding options + pad_options = 0; // Assume we won't be doing any padding + fillcount = fieldwidth - digitcount; // No sign to worry about + if (fillcount > 0) { // We need to do left or right padding + fillchar = ' '; // Most common fill character is the space + if (flags & LEFTALIGN_F) { + pad_options = POSTSPACE_O; + } else { // Right-aligned, fill with zeros or spaces + if (flags & ZEROFILL_F) { + pad_options = PREZERO_O; + fillchar = '0'; + } else { + pad_options = PRESPACE_O; + } + } + } + + destcount = 0; // Nothing written out to the destination yet + + // Copy any leading zeros or spaces + if (pad_options & (PREZERO_O | PRESPACE_O)) { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + *dest++ = fillchar; + destcount += fillcount; + } + // Copy the hex digits from our local buffer + for (i = 0; i < digitcount; i++) + *dest++ = *intdigp++; + destcount += digitcount; + + // Copy any trailing spaces + if (pad_options & POSTSPACE_O) { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + *dest++ = fillchar; + destcount += fillcount; + } + if ((flags & DONTTERMINATE) == 0) // Null-terminate the string unless requested not to + *dest = 0; + return(destcount); // Return the character count, not including the null +} + +//====================================================================================== +// +// strtofmtstr() +// +// This takes a source C string and converts it to a formatted output C string, +// using options (field width, precision, and flags) like those provided by sprintf(). Look at +// the code for nvDbgVsnprintf() above to see which formatting options are implemented. +// +// fieldwidth - minimum total characters to output (including pad) +// precision - maximum characters from src to output; or entire string if negative +//====================================================================================== +static int strtofmtstr(const char *src, char *dest, char *destLimit, int fieldwidth, int precision, int flags) +{ + int i, srclen; + int fillcount; + char fillchar = ' '; + int pad_options; + const char *s; + char *d; + + // Make sure we have a source string to work with + if (src == NULL) + { + src = ""; + } + + // For padding calculations, we need to know the source string length + for (s = src, srclen = 0; *s != 0; s++) + srclen++; + + // But truncated to precision, if specified. + if(precision >= 0 && srclen > precision) + srclen = precision; + + // Process the field-padding options + pad_options = 0; // Assume we won't be doing any padding + fillcount = fieldwidth - srclen; + + if (fillcount > 0) { // We need to do left or right padding + fillchar = ' '; // Most common fill character is the space + if (flags & LEFTALIGN_F) { + pad_options = POSTSPACE_O; + } else { // Right-aligned, fill with zeros or spaces + if (flags & ZEROFILL_F) { + pad_options = PREZERO_O; + fillchar = '0'; + } else { + pad_options = PRESPACE_O; + } + } + } + + s = src; + d = dest; + + // Copy any leading zeros or spaces + if (pad_options & (PREZERO_O | PRESPACE_O)) + { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + { + if (d < destLimit) + { + *d++ = fillchar; + } + } + } + // Copy the characters from the source string + for (i = 0; i < srclen; i++) + { + if (d < destLimit) + { + *d++ = *s++; + } + } + + // Copy any trailing spaces + if (pad_options & POSTSPACE_O) + { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + { + if (d < destLimit) + { + *d++ = fillchar; + } + } + } + + if ((flags & DONTTERMINATE) == 0) // Null-terminate the string unless requested not to + *d = 0; + return((int)(d - dest)); // Return the character count, not including the null +} + +#if NV_PRINTF_STRINGS_ALLOWED +// +// String matching helper for nvDbgRmMsgCheck. +// strstr with the length of the pattern string +// passed in. +// + +static const char *nv_strnstr +( + const char *str, + const char *pat, + int patlen +) +{ + int len; + + // Should be NULL, but this makes noun optional + if (pat == NULL) + { + return str; + } + + while (*str) + { + len = 0; + while (len < patlen) + { + if (str[len] != pat[len]) + break; + len++; + } + if (len == patlen) + { + return str; + } + str++; + } + return NULL; +} + +// +// Buffer to store RmMsg string. This is stored in bss +// so it can be updated in the debugger dynamically. +// +char RmMsg[NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE]; + +// +// nvDbgRmMsgCheck +// Override priority of debug printf based on file, function with optional +// line ranges. Rules are matched on each printf. Rules are applied left to +// right and the final result is the cumulative result of all rules. +// +// Format +// rule = [!][filename|function][:startline][-endline][@level][^prefix] +// Format = rule[,rule] +// +// See RmMsg wiki for detailed documentation + +// Examples: +// "dmanv50.c" - enable all printfs in dmanv50.c +// "fifoAlloc_NV50" - enable all printfs in function fifoAlloc_NV50 +// "!fifoAlloc_NV50" - disable all printfs in function fifoAlloc_NV50 +// "dmanv50.c:150" - enable printf on line 150 of dmanv50.c +// "dmanv50.c:100-200" - enable printf on lines 100-200 in dmanv50.c +// "dmanv50.c:100-200,!dmanv50:125" - same but disable printf on line 125 +// "fifo^*" - enable verbose prefix for fifo +// ":" - enable all printfs +// "!" - disable all printfs (dangerous!) +// +NvU32 +nvDbgRmMsgCheck +( + const char * filename, + NvU32 linenumber, + const char * function, + NvU32 debuglevel, + const char * printf_format, + NvU32 * pPrefix +) +{ + enum { NOUN, STARTLINE, ENDLINE, LEVEL, PREFIX } state; + int status = NVRM_MSG_NORMAL; + int inc; + char *noun; + NvU32 nounlen; + NvU32 startline; + NvU32 endline; + NvU32 level; + NvU32 prefix = NVRM_MSG_PREFIX_NVRM | NVRM_MSG_PREFIX_FUNCTION; + NvU32 tempPrefix; + char *p; + + // Handle the normal case quickly. + if (RmMsg[0] == '\0') + { + goto done; + } + + p = RmMsg; + + while (*p != '\0') + { + // Initial default state for this rule + inc = 1; + noun = NULL; + nounlen = 0; + startline = 0; + endline = 0x7fffffff; + tempPrefix = NVRM_MSG_PREFIX_NVRM | NVRM_MSG_PREFIX_FUNCTION; + level = LEVEL_INFO; // default to everything + state = NOUN; + + for (; *p != '\0' && *p != ','; p++) + { + if (*p == ':') + { + state = STARTLINE; + continue; + } + else if (*p == '-') + { + state = ENDLINE; + endline = 0; + continue; + } + else if (*p == '!' && !noun) + { + state = NOUN; + inc = 0; + continue; + } + else if (*p == '@') + { + state = LEVEL; + level = 0; + continue; + } + else if (*p == '^') + { + state = PREFIX; + tempPrefix = NVRM_MSG_PREFIX_NVRM | NVRM_MSG_PREFIX_FUNCTION; + continue; + } + switch (state) + { + case NOUN: + if (noun == NULL) + { + noun = p; + } + nounlen++; + break; + case STARTLINE: + if ((*p >= '0') && (*p <= '9')) + { + startline *= 10; + startline += *p - '0'; + endline = startline; // only one line + } + break; + case ENDLINE: + if ((*p >= '0') && (*p <= '9')) + { + endline *= 10; + endline += *p - '0'; + } + break; + case LEVEL: + if ((*p >= '0') && (*p <= '9')) + { + level *= 10; + level += *p - '0'; + } + break; + case PREFIX: + switch (*p) + { + case '*': + tempPrefix = NVRM_MSG_PREFIX_NVRM | NVRM_MSG_PREFIX_FILE | + NVRM_MSG_PREFIX_LINE | NVRM_MSG_PREFIX_FUNCTION | + NVRM_MSG_PREFIX_OSTIMESTAMP; + break; + case 'n': + tempPrefix |= NVRM_MSG_PREFIX_NVRM; + break; + case 'N': + tempPrefix &= ~NVRM_MSG_PREFIX_NVRM; + break; + case 'c': + tempPrefix |= NVRM_MSG_PREFIX_FILE; + break; + case 'C': + tempPrefix &= ~NVRM_MSG_PREFIX_FILE; + break; + case 'l': + tempPrefix |= NVRM_MSG_PREFIX_LINE; + break; + case 'L': + tempPrefix &= ~NVRM_MSG_PREFIX_LINE; + break; + case 'f': + tempPrefix |= NVRM_MSG_PREFIX_FUNCTION; + break; + case 'F': + tempPrefix &= ~NVRM_MSG_PREFIX_FUNCTION; + break; + case 't': + tempPrefix |= NVRM_MSG_PREFIX_OSTIMESTAMP; + break; + case 'T': + tempPrefix &= ~NVRM_MSG_PREFIX_OSTIMESTAMP; + break; + } + break; + default: // ignore any trainling words + break; + } + } + + // Does the last rule hit + if (((nv_strnstr(filename, noun, nounlen) != NULL) || + (nv_strnstr(function, noun, nounlen) != NULL)) && + (linenumber >= startline) && + (linenumber <= endline)) + { + status = inc ? NVRM_MSG_PRINT : NVRM_MSG_HIDE; + prefix = tempPrefix; + + if (status == NVRM_MSG_PRINT && debuglevel < level) + { + status = NVRM_MSG_HIDE; + } + } + + if (*p == '\0') + { + break; + } + p++; + } + +done: + if (pPrefix != NULL) + { + *pPrefix = prefix; + } + + return status; +} + +// +// RmMsgPrefix - Add the RmMsg prefix to the passed in string, returning +// the length of the formatted string. +// +// Format: "NVRM: file linenum function timestamp: " +// +NvU32 +RmMsgPrefix +( + NvU32 prefix, + const char *filename, + NvU32 linenumber, + const char *function, + char *str, + NvU32 totalLen +) +{ + const char *space = ""; + NvU32 len = 0; + NvU32 sec, usec; + + *str = '\0'; + + if (prefix & NVRM_MSG_PREFIX_NVRM) + { + portStringCopy(str + len, totalLen - len, NV_PRINTF_PREFIX, sizeof(NV_PRINTF_PREFIX)); + len += sizeof(NV_PRINTF_PREFIX) - 1; + portStringCopy(str + len, totalLen - len, NV_PRINTF_PREFIX_SEPARATOR, sizeof(NV_PRINTF_PREFIX_SEPARATOR)); + len += sizeof(NV_PRINTF_PREFIX_SEPARATOR) - 1; + } + + if (prefix & NVRM_MSG_PREFIX_FILE) + { + len += nvDbgSnprintf(str + len, totalLen - len, "%s%s", space, filename); + space = " "; + } + + if (prefix & NVRM_MSG_PREFIX_LINE) + { + len += nvDbgSnprintf(str + len, totalLen - len, "%s%d", space, linenumber); + space = " "; + } + + if (prefix & NVRM_MSG_PREFIX_FUNCTION) + { + len += nvDbgSnprintf(str + len, totalLen - len, "%s%s", space, function); + space = " "; + } + + if (prefix & NVRM_MSG_PREFIX_OSTIMESTAMP) + { + osGetSystemTime(&sec, &usec); + + len += nvDbgSnprintf(str + len, totalLen - len, "%s%d.%06d", space, sec, usec); + } + + return len; +} + +// +// Initialize RmMsg from the registry. Skip if the string was initialized +// already initialized (from the debugger). +// Called from the platform specific platform code. +// +void nvDbgInitRmMsg(OBJGPU *pGpu) +{ + NvU32 len = NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE; + + if (RmMsg[0] == '\0') + { + if (osReadRegistryString(pGpu, NV_REG_STR_RM_MSG, + (NvU8*)RmMsg, &len) != NV_OK) + { + len = NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE; + } + } +} + +#else // #else ! NV_PRINTF_STRINGS_ALLOWED + +void nvDbgInitRmMsg(OBJGPU *pGpu) +{ +} + +NvU32 +nvDbgRmMsgCheck +( + const char * filename, + NvU32 linenumber, + const char * function, + NvU32 debuglevel, + const char * printf_format, + NvU32 * pPrefix +) +{ + return NVRM_MSG_HIDE; +} + +#endif // #if NV_PRINTF_STRINGS_ALLOWED + +/*! + * @brief Does a byte by byte dump of the buffer passed. + * + * @param[in] pBuffer Pointer to the buffer to dump. + * @param[in] length Length of the buffer to dump (in # of bytes). + */ +void +nvDbgDumpBufferBytes +( + void *pBuffer, + NvU32 length +) +{ + NvU8 *s = (NvU8 *)pBuffer; + NvU32 remainingBytes = length % 16; + NvU32 i; + + NV_PRINTF(LEVEL_ERROR, + " x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xa xb xc xd xe xf\n"); + + for (i = 0; i < (length / 16); i++) + { + + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8], + s[9], s[10], s[11], s[12], s[13], s[14], s[15]); + + s += 16; + } + + /* + * 16 statement switch, so that these are added to nvlog correctly. + */ + switch (remainingBytes) + { + case 0: + default: + break; + case 1: + NV_PRINTF(LEVEL_ERROR, + "%p %02x .. .. .. .. .. .. .. .. .. .. .. .. .. .. ..\n", + s, s[0]); + break; + case 2: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x .. .. .. .. .. .. .. .. .. .. .. .. .. ..\n", + s, s[0], s[1]); + break; + case 3: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x .. .. .. .. .. .. .. .. .. .. .. .. ..\n", + s, s[0], s[1], s[2]); + break; + case 4: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x .. .. .. .. .. .. .. .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3]); + break; + case 5: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x .. .. .. .. .. .. .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4]); + break; + case 6: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x .. .. .. .. .. .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5]); + break; + case 7: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x .. .. .. .. .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6]); + break; + case 8: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x .. .. .. .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7]); + break; + case 9: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x .. .. .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8]); + break; + case 10: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x .. .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8], + s[9]); + break; + case 11: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8], + s[9], s[10]); + break; + case 12: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8], + s[9], s[10], s[11]); + break; + case 13: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8], + s[9], s[10], s[11], s[12]); + break; + case 14: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8], + s[9], s[10], s[11], s[12], s[13]); + break; + case 15: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8], + s[9], s[10], s[11], s[12], s[13], s[14]); + break; + } +} + diff --git a/src/nvidia/src/kernel/diagnostics/profiler.c b/src/nvidia/src/kernel/diagnostics/profiler.c new file mode 100644 index 0000000..a21e490 --- /dev/null +++ b/src/nvidia/src/kernel/diagnostics/profiler.c @@ -0,0 +1,227 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** ODB State Routines **************************\ +* * +* Simple API to measure elapsed times in RM for profiling * +* * +\***************************************************************************/ + +#include "diagnostics/profiler.h" +#include "os/os.h" + +static void _rmProfStopTime(RM_PROF_STATS *pStats, NvU64 stop_ns); + +/*! + * @brief Start measuring elapsed time for a specific profiling module. + * + * @param[in,out] pStats Profiling stats for the module + */ +void +rmProfStart +( + RM_PROF_STATS *pStats +) +{ + NV_ASSERT_OR_RETURN_VOID(pStats != NULL); + + if (pStats->start_ns != 0) + { + NV_PRINTF(LEVEL_ERROR, + "Starting time measurement that is already started\n"); + // + // No breakpoint since this isn't fatal by itself. + // Most likely there was an error that propagated up the stack before + // the measurement was stopped on the last cycle. + // + // In that case, restarting the measurement is probably the right thing + // to do anyway. + // + } + osGetPerformanceCounter(&pStats->start_ns); +} + +/*! + * @brief Stop measuring elapsed time for a specific profiling module and + * update the module's statistics. + * + * @param[in,out] pStats Profiling stats for the module + */ +void +rmProfStop +( + RM_PROF_STATS *pStats +) +{ + NvU64 stop_ns; + + NV_ASSERT_OR_RETURN_VOID(pStats != NULL); + + osGetPerformanceCounter(&stop_ns); + _rmProfStopTime(pStats, stop_ns); +} + +/*! + * Same as #rmProfStop but parameterized by the stop time. + */ +static void +_rmProfStopTime +( + RM_PROF_STATS *pStats, + NvU64 stop_ns +) +{ + NV_ASSERT_OR_RETURN_VOID(pStats != NULL); + + if (pStats->start_ns == 0) + { + NV_PRINTF(LEVEL_ERROR, + "Stopping time measurement that is already stopped\n"); + DBG_BREAKPOINT(); + // + // Breakpoint since this case is more serious - something is likely + // wrong with the profiling code. Also return early so the bogus + // measurement is not recorded. + // + return; + } + RM_PROF_RECORD(pStats, stop_ns - pStats->start_ns); + pStats->start_ns = 0; +} + +/*! + * @brief Manually update the statistics for one cycle of a specific profiling + * module. + * + * @param[in,out] pStats Profiling stats for the module + * @param[in] time_ns Elapsed time in nanoseconds for this cycle. + */ +void +rmProfRecord +( + RM_PROF_STATS *pStats, + NvU64 time_ns +) +{ + NV_ASSERT_OR_RETURN_VOID(pStats != NULL); + + if (pStats->count == 0 || time_ns < pStats->min_ns) + { + pStats->min_ns = time_ns; + } + if (pStats->count == 0 || time_ns > pStats->max_ns) + { + pStats->max_ns = time_ns; + } + pStats->total_ns += time_ns; + pStats->count += 1; +} + +/*! + * @brief Start measuring time for the specified profiling group (begin a new cycle). + * + * @param[out] pGroup Profiling group structure to be used. + * @param[in/out] pTotal Optional stats for the whole group duration (may be NULL). + * @param[in/out] pFirst First module of the group. + */ +void +rmProfGroupStart +( + RM_PROF_GROUP *pGroup, + RM_PROF_STATS *pTotal, + RM_PROF_STATS *pFirst +) +{ + NV_ASSERT_OR_RETURN_VOID(pGroup != NULL); + NV_ASSERT_OR_RETURN_VOID(pFirst != NULL); + + // Start profiling the first module. + RM_PROF_START(pFirst); + + // Reuse the first modules' start time for the total module. + if (pTotal != NULL) + { + pTotal->start_ns = pFirst->start_ns; + } + + // Initialize the group structure. + pGroup->pTotal = pTotal; + pGroup->pLast = pFirst; +} + +/*! + * @brief Continue profiling the next module of a profiling group. + * + * @param[in/out] pGroup Profiling group. + * @param[in/out] pNext Next module of the group. + */ +void +rmProfGroupNext +( + RM_PROF_GROUP *pGroup, + RM_PROF_STATS *pNext +) +{ + NV_ASSERT_OR_RETURN_VOID(pGroup != NULL); + NV_ASSERT_OR_RETURN_VOID(pGroup->pLast != NULL); + NV_ASSERT_OR_RETURN_VOID(pNext != NULL); + + // Start profiling the next module. + RM_PROF_START(pNext); + + // Reuse the next modules' start time for the last module's stop time. + _rmProfStopTime(pGroup->pLast, pNext->start_ns); + + // Update the group structure. + pGroup->pLast = pNext; +} + +/*! + * @brief Stop profiling a cycle of a profiling group (ends both the last and total modules). + * + * @param[in] pGroup Profiling group. + */ +void +rmProfGroupStop +( + RM_PROF_GROUP *pGroup +) +{ + NvU64 stop_ns; + + NV_ASSERT_OR_RETURN_VOID(pGroup != NULL); + NV_ASSERT_OR_RETURN_VOID(pGroup->pLast != NULL); + + osGetPerformanceCounter(&stop_ns); + + // Reuse the same stop time for both last and total module. + _rmProfStopTime(pGroup->pLast, stop_ns); + if (pGroup->pTotal != NULL) + { + _rmProfStopTime(pGroup->pTotal, stop_ns); + } + + // Clear the group structure. + pGroup->pTotal = NULL; + pGroup->pLast = NULL; +} diff --git a/src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_arch_t234d.c b/src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_arch_t234d.c new file mode 100644 index 0000000..bf2467c --- /dev/null +++ b/src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_arch_t234d.c @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief T234D / DCE client specific kernel stubs + */ + +#include "core/core.h" +#include "gpu/gpu_arch.h" + +NvU32 gpuarchGetSystemPhysAddrWidth_T234D(GpuArch *pGpuArch) +{ + return 39; +} + diff --git a/src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_t234d.c b/src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_t234d.c new file mode 100644 index 0000000..3e1afb6 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_t234d.c @@ -0,0 +1,207 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Routines ***************************\ +* * +* Implementation specific Descriptor List management functions * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/eng_desc.h" +#include "gpu/gpu_child_class_defs.h" +#include + +#include "swref/published/t23x/t234/dev_fuse.h" + +#include "class/cl0080.h" +#include "class/cl2080.h" + +#include "ctrl/ctrl0080/ctrl0080dma.h" +#include "ctrl/ctrl0080/ctrl0080fb.h" +#include "ctrl/ctrl0080/ctrl0080gpu.h" +#include "ctrl/ctrl0080/ctrl0080unix.h" + +#include "ctrl/ctrl2080/ctrl2080bus.h" +#include "ctrl/ctrl2080/ctrl2080event.h" +#include "ctrl/ctrl2080/ctrl2080fb.h" +#include "ctrl/ctrl2080/ctrl2080gpu.h" +#include "ctrl/ctrl2080/ctrl2080internal.h" +#include "ctrl/ctrl2080/ctrl2080unix.h" + +/*! + * @brief fill in the GPU ID information + */ +void +gpuGetIdInfo_T234D +( + OBJGPU *pGpu +) +{ + pGpu->chipInfo.platformId = GPU_ARCHITECTURE_T23X; + pGpu->chipInfo.implementationId = GPU_IMPLEMENTATION_T234D; + pGpu->chipInfo.revisionId = 0; +} + +// See gpuChildOrderList_GM200 for documentation +static const GPUCHILDORDER +gpuChildOrderList_T234D[] = +{ + {classId(OBJDCECLIENTRM), GCO_ALL}, + {classId(MemorySystem), GCO_ALL}, + {classId(KernelMemorySystem), GCO_ALL}, + {classId(MemoryManager), GCO_ALL}, + {classId(OBJDCB), GCO_ALL}, + {classId(OBJDISP), GCO_ALL}, + {classId(KernelDisplay), GCO_ALL}, + {classId(OBJDPAUX), GCO_ALL}, + {classId(I2c), GCO_ALL}, + {classId(OBJGPIO), GCO_ALL}, + {classId(OBJHDACODEC), GCO_ALL}, +}; + +// See gpuChildrenPresent_GM200 for documentation on GPUCHILDPRESENT +static const GPUCHILDPRESENT gpuChildrenPresent_T234D[] = +{ + GPU_CHILD_PRESENT(OBJTMR, 1), + GPU_CHILD_PRESENT(OBJDCECLIENTRM, 1), + GPU_CHILD_PRESENT(KernelDisplay, 1), + GPU_CHILD_PRESENT(MemoryManager, 1), +}; + + +const GPUCHILDORDER * +gpuGetChildrenOrder_T234D(OBJGPU *pGpu, NvU32 *pNumEntries) +{ + *pNumEntries = NV_ARRAY_ELEMENTS(gpuChildOrderList_T234D); + return gpuChildOrderList_T234D; +} + +const GPUCHILDPRESENT * +gpuGetChildrenPresent_T234D(OBJGPU *pGpu, NvU32 *pNumEntries) +{ + *pNumEntries = NV_ARRAY_ELEMENTS(gpuChildrenPresent_T234D); + return gpuChildrenPresent_T234D; +} + +// list of valid NV01_DEVICE (0x0080) rmctrl commands for SOC Display GPUs +static const NvU32 +gpuDeviceRmctrlAllowlist_T234D[] = +{ + NV0080_CTRL_CMD_GPU_GET_CLASSLIST, + NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES, + NV0080_CTRL_CMD_GPU_GET_CLASSLIST_V2, + NV0080_CTRL_CMD_FB_GET_CAPS_V2, + NV0080_CTRL_CMD_DMA_GET_CAPS, + NV0080_CTRL_CMD_OS_UNIX_VT_SWITCH, + NV0080_CTRL_CMD_OS_UNIX_VT_GET_FB_INFO, +}; + +// list of valid NV20_SUBDEVICE (0x2080) rmctrl commands for SOC Display GPUs +static const NvU32 +gpuSubdeviceRmctrlAllowlist_T234D[] = +{ + NV2080_CTRL_CMD_GPU_GET_NAME_STRING, + NV2080_CTRL_CMD_GPU_GET_SHORT_NAME_STRING, + NV2080_CTRL_CMD_GPU_GET_SIMULATION_INFO, + NV2080_CTRL_CMD_GPU_GET_ID, + NV2080_CTRL_CMD_GPU_GET_GID_INFO, + NV2080_CTRL_CMD_GPU_GET_ENGINES_V2, + NV2080_CTRL_CMD_GPU_GET_CONSTRUCTED_FALCON_INFO, + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO, + NV2080_CTRL_CMD_INTERNAL_GPU_GET_CHIP_INFO, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_IP_VERSION, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_SET_IMP_INIT_INFO, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER, + NV2080_CTRL_INTERNAL_GPU_CLIENT_LOW_POWER_MODE_ENTER, + NV2080_CTRL_CMD_BUS_GET_INFO_V2, + NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT, + NV2080_CTRL_CMD_OS_UNIX_ALLOW_DISALLOW_GCOFF, + NV2080_CTRL_CMD_OS_UNIX_AUDIO_DYNAMIC_POWER, +}; + +static NV_STATUS +gpuCheckRmctrlAllowList(const NvU32 *pAllowList, NvU32 allowListSize, NvU32 cmd) +{ + NvU32 i; + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + for (i = 0; i < allowListSize; i++) + { + if (pAllowList[i] == cmd) + { + status = NV_OK; + break; + } + } + + return status; +} + +NV_STATUS +gpuValidateRmctrlCmd_T234D(OBJGPU *pGpu, NvU32 cmd) +{ + NV_STATUS status = NV_OK; + + // only validate device/subdevice rmctrls currently + switch (DRF_VAL(XXXX, _CTRL_CMD, _CLASS, cmd)) + { + case NV01_DEVICE_0: + status = gpuCheckRmctrlAllowList(gpuDeviceRmctrlAllowlist_T234D, + NV_ARRAY_ELEMENTS(gpuDeviceRmctrlAllowlist_T234D), + cmd); + break; + case NV20_SUBDEVICE_0: + status = gpuCheckRmctrlAllowList(gpuSubdeviceRmctrlAllowlist_T234D, + NV_ARRAY_ELEMENTS(gpuSubdeviceRmctrlAllowlist_T234D), + cmd); + break; + default: + status = NV_OK; + break; + } + + return status; +} + +NV_STATUS +gpuValidateBusInfoIndex_T234D(OBJGPU *pGpu, NvU32 index) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + switch (index) + { + case NV2080_CTRL_BUS_INFO_INDEX_TYPE: + case NV2080_CTRL_BUS_INFO_INDEX_COHERENT_DMA_FLAGS: + case NV2080_CTRL_BUS_INFO_INDEX_NONCOHERENT_DMA_FLAGS: + status = NV_OK; + break; + default: + break; + } + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/arch/t25x/kern_gpu_t256d.c b/src/nvidia/src/kernel/gpu/arch/t25x/kern_gpu_t256d.c new file mode 100644 index 0000000..0f8a8ac --- /dev/null +++ b/src/nvidia/src/kernel/gpu/arch/t25x/kern_gpu_t256d.c @@ -0,0 +1,89 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Routines ***************************\ +* * +* Implementation specific Descriptor List management functions * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/eng_desc.h" +#include "gpu/gpu_child_class_defs.h" +#include "g_allclasses.h" +#include + +// See gpuChildOrderList_GM200 for documentation +static const GPUCHILDORDER +gpuChildOrderList_T256D[] = +{ + {classId(OBJDCECLIENTRM), GCO_ALL}, + {classId(MemorySystem), GCO_ALL}, + {classId(KernelMemorySystem), GCO_ALL}, + {classId(MemoryManager), GCO_ALL}, + {classId(OBJDCB), GCO_ALL}, + {classId(OBJDISP), GCO_ALL}, + {classId(KernelDisplay), GCO_ALL}, + {classId(OBJDPAUX), GCO_ALL}, + {classId(I2c), GCO_ALL}, + {classId(OBJGPIO), GCO_ALL}, + {classId(OBJHDACODEC), GCO_ALL}, +}; + +// See gpuChildrenPresent_GM200 for documentation on GPUCHILDPRESENT +static const GPUCHILDPRESENT gpuChildrenPresent_T256D[] = +{ + GPU_CHILD_PRESENT(OBJTMR, 1), + GPU_CHILD_PRESENT(OBJDCECLIENTRM, 1), + GPU_CHILD_PRESENT(KernelDisplay, 1), + GPU_CHILD_PRESENT(MemoryManager, 1), +}; + +const GPUCHILDORDER * +gpuGetChildrenOrder_T256D(OBJGPU *pGpu, NvU32 *pNumEntries) +{ + *pNumEntries = NV_ARRAY_ELEMENTS(gpuChildOrderList_T256D); + return gpuChildOrderList_T256D; +} + +const GPUCHILDPRESENT * +gpuGetChildrenPresent_T256D(OBJGPU *pGpu, NvU32 *pNumEntries) +{ + *pNumEntries = NV_ARRAY_ELEMENTS(gpuChildrenPresent_T256D); + return gpuChildrenPresent_T256D; +} + +/*! + * @brief fill in the GPU ID information + */ +void +gpuGetIdInfo_T256D +( + OBJGPU *pGpu +) +{ + pGpu->chipInfo.platformId = GPU_ARCHITECTURE_T25X; + pGpu->chipInfo.implementationId = GPU_IMPLEMENTATION_T256D; + pGpu->chipInfo.revisionId = 0; +} diff --git a/src/nvidia/src/kernel/gpu/arch/t26x/kern_gpu_t264d.c b/src/nvidia/src/kernel/gpu/arch/t26x/kern_gpu_t264d.c new file mode 100644 index 0000000..b9ea776 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/arch/t26x/kern_gpu_t264d.c @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Routines ***************************\ +* * +* Implementation specific Descriptor List management functions * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/eng_desc.h" +#include + +/*! + * @brief fill in the GPU ID information + */ +void +gpuGetIdInfo_T264D +( + OBJGPU *pGpu +) +{ + pGpu->chipInfo.platformId = GPU_ARCHITECTURE_T26X; + pGpu->chipInfo.implementationId = GPU_IMPLEMENTATION_T264D; + pGpu->chipInfo.revisionId = 0; +} diff --git a/src/nvidia/src/kernel/gpu/audio/hda_codec_api.c b/src/nvidia/src/kernel/gpu/audio/hda_codec_api.c new file mode 100644 index 0000000..b3af417 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/audio/hda_codec_api.c @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/audio/hda_codec_api.h" + +NV_STATUS hdacodecConstruct_IMPL +( + Hdacodec *pHdacodecApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/dce_client/dce_client.c b/src/nvidia/src/kernel/gpu/dce_client/dce_client.c new file mode 100644 index 0000000..371e230 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/dce_client/dce_client.c @@ -0,0 +1,117 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/dce_client/dce_client.h" + +NV_STATUS +dceclientConstructEngine_IMPL +( + OBJGPU *pGpu, + DceClient *pDceClient, + ENGDESCRIPTOR engDesc +) +{ + NV_PRINTF(LEVEL_INFO, "dceclientConstructEngine_IMPL Called\n"); + + if (!IS_DCE_CLIENT(pGpu)) + { + return NV_ERR_NOT_SUPPORTED; + } + + return dceclientInitRpcInfra(pGpu, pDceClient); +} + +void +dceclientDestruct_IMPL +( + DceClient *pDceClient +) +{ + NV_PRINTF(LEVEL_INFO, "dceclientDestruct_IMPL Called\n"); + dceclientDeinitRpcInfra(pDceClient); +} + +NV_STATUS +dceclientStateLoad_IMPL +( + OBJGPU *pGpu, + DceClient *pDceClient, + NvU32 flags +) +{ + if (!(flags & GPU_STATE_FLAGS_PM_TRANSITION)) + return NV_OK; + + return dceclientInitRpcInfra(pGpu, pDceClient); +} + +NV_STATUS +dceclientStateUnload_IMPL +( + OBJGPU *pGpu, + DceClient *pDceClient, + NvU32 flags +) +{ + NV_STATUS nvStatus = NV_OK; + NvU32 newPMLevel; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_INTERNAL_GPU_CLIENT_LOW_POWER_MODE_ENTER_PARAMS params; + + NV_PRINTF(LEVEL_INFO, "dceclientStateUnload_IMPL Called\n"); + + if (!(flags & GPU_STATE_FLAGS_PM_TRANSITION)) + goto done; + + if (flags & GPU_STATE_FLAGS_PM_HIBERNATE) + newPMLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_7; + else + newPMLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3; + + portMemSet(¶ms, 0, sizeof(params)); + params.bInPMTransition = NV_TRUE; + params.newPMLevel = newPMLevel; + nvStatus = pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_INTERNAL_GPU_CLIENT_LOW_POWER_MODE_ENTER, + ¶ms, sizeof(params)); + + if (nvStatus != NV_OK) + goto done; + + dceclientDeinitRpcInfra(pDceClient); + +done: + return nvStatus; +} + +void +dceclientStateDestroy_IMPL +( + OBJGPU *pGpu, + DceClient *pDceClient +) +{ + NV_PRINTF(LEVEL_INFO, "Destroy DCE Client State Called\n"); +} diff --git a/src/nvidia/src/kernel/gpu/dce_client/dce_client_rpc.c b/src/nvidia/src/kernel/gpu/dce_client/dce_client_rpc.c new file mode 100644 index 0000000..7f816d0 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/dce_client/dce_client_rpc.c @@ -0,0 +1,704 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvUnixVersion.h" + +#include "gpu/dce_client/dce_client.h" +#include "os/dce_rm_client_ipc.h" + +#include "os/os.h" + +#include "kernel/core/thread_state.h" +#include "kernel/core/locks.h" + +#include "vgpu/rpc.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "class/cl0073.h" +#include "class/clc670.h" +#include "class/clc67b.h" +#include "class/clc67d.h" +#include "class/clc67e.h" +#include "class/cl84a0.h" + +#include + +#include "gpu/disp/kern_disp.h" + +#define RPC_STRUCTURES +#define RPC_GENERIC_UNION +#include "g_rpc-structures.h" +#undef RPC_STRUCTURES +#undef RPC_GENERIC_UNION + +#define RPC_MESSAGE_STRUCTURES +#define RPC_MESSAGE_GENERIC_UNION +#include "g_rpc-message-header.h" +#undef RPC_MESSAGE_STRUCTURES +#undef RPC_MESSAGE_GENERIC_UNION + +#define DCE_MAX_RPC_MSG_SIZE 4096 + +NV_STATUS +dceclientInitRpcInfra_IMPL +( + OBJGPU *pGpu, + DceClient *pDceClient +) +{ + NV_STATUS nvStatus = NV_OK; + + NV_PRINTF(LEVEL_INFO, "Init RPC Infra Called\n"); + + pDceClient->pRpc = initRpcObject(pGpu); + if (pDceClient->pRpc == NULL) + { + NV_PRINTF(LEVEL_ERROR, "initRpcObject failed\n"); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + pDceClient->pRpc->maxRpcSize = DCE_MAX_RPC_MSG_SIZE; + + pDceClient->pRpc->message_buffer = portMemAllocNonPaged(DCE_MAX_RPC_MSG_SIZE); + if (pDceClient->pRpc->message_buffer == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Cannot allocate memory for message_buffer\n"); + goto done; + } + + // Register Synchronous IPC client for RPC to DCE RM + pDceClient->clientId[DCE_CLIENT_RM_IPC_TYPE_SYNC] = 0; + nvStatus = osTegraDceRegisterIpcClient(DCE_CLIENT_RM_IPC_TYPE_SYNC, + NULL, + &pDceClient->clientId[DCE_CLIENT_RM_IPC_TYPE_SYNC]); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Register dce ipc client failed for DCE_CLIENT_RM_IPC_TYPE_SYNC error 0x%x\n", + nvStatus); + goto done; + } + + NV_PRINTF(LEVEL_INFO, "Registered dce ipc client DCE_CLIENT_RM_IPC_TYPE_SYNC handle: 0x%x\n", + pDceClient->clientId[DCE_CLIENT_RM_IPC_TYPE_SYNC]); + + // Register Asynchronous IPC client for event notification from DCE RM + pDceClient->clientId[DCE_CLIENT_RM_IPC_TYPE_EVENT] = 0; + nvStatus = osTegraDceRegisterIpcClient(DCE_CLIENT_RM_IPC_TYPE_EVENT, + pGpu, + &pDceClient->clientId[DCE_CLIENT_RM_IPC_TYPE_EVENT]); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Register dce ipc client failed for DCE_CLIENT_RM_IPC_TYPE_EVENT error 0x%x\n", + nvStatus); + goto done; + } + NV_PRINTF(LEVEL_INFO, "Register dce ipc client DCE_CLIENT_RM_IPC_TYPE_EVENT: 0x%x\n", + pDceClient->clientId[DCE_CLIENT_RM_IPC_TYPE_EVENT]); + +done: + if (nvStatus != NV_OK) + { + dceclientDeinitRpcInfra(pDceClient); + } + + return nvStatus; +} + +NV_STATUS +dceclientDceRmInit_IMPL +( + OBJGPU *pGpu, + DceClient *pDceClient, + NvBool bInit +) +{ + NV_STATUS nvStatus = NV_OK; + GPU_MASK gpusLockedMask = 0; + + if (bInit) + { + NV_ASSERT_OK_OR_GOTO(nvStatus, + rmGpuGroupLockAcquire(pGpu->gpuInstance, GPU_LOCK_GRP_SUBDEVICE, + GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT, &gpusLockedMask), + done); + } + + NV_RM_RPC_DCE_RM_INIT(pGpu, bInit, nvStatus); + +done: + if (gpusLockedMask != 0) + { + rmGpuGroupLockRelease(gpusLockedMask, GPUS_LOCK_FLAGS_NONE); + } + + return nvStatus; +} + +void +dceclientDeinitRpcInfra_IMPL +( + DceClient *pDceClient +) +{ + NvU32 i = 0; + + NV_PRINTF(LEVEL_INFO, "Free RPC Infra Called\n"); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pDceClient->clientId); i++) + { + osTegraDceUnregisterIpcClient(pDceClient->clientId[i]); + } + + if (pDceClient->pRpc != NULL) + { + portMemFree(pDceClient->pRpc->message_buffer); + portMemFree(pDceClient->pRpc); + pDceClient->pRpc = NULL; + } +} + +NV_STATUS +dceclientSendRpc_IMPL +( + DceClient *pDceClient, + void *msgData, + NvU32 msgLength +) +{ + NV_STATUS nvStatus = NV_OK; + NvU32 clientId = pDceClient->clientId[DCE_CLIENT_RM_IPC_TYPE_SYNC]; + + NV_PRINTF(LEVEL_INFO, "Send RPC Called, clientid used 0x%x\n", clientId); + + if (msgData) + { + nvStatus = osTegraDceClientIpcSendRecv(clientId, msgData, msgLength); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Send RPC failed for clientId %u error %u\n", clientId, nvStatus); + return nvStatus; + } + } + + return nvStatus; +} + +NV_STATUS +dceclientReceiveMsg_IMPL +( + DceClient *pDceClient +) +{ + NV_STATUS nvStatus = NV_OK; + + NV_PRINTF(LEVEL_INFO, "Receive Message Called\n"); + + return nvStatus; +} + +NV_STATUS +dceclientSendMsg_IMPL +( + DceClient *pDceClient +) +{ + NV_STATUS nvStatus = NV_OK; + + NV_PRINTF(LEVEL_INFO, "Send Message Called\n"); + + return nvStatus; +} + +static inline rpc_message_header_v *_dceRpcGetMessageHeader(OBJRPC *pRpc) +{ + return ((rpc_message_header_v*)(pRpc->message_buffer)); +} + +static inline rpc_generic_union *_dceRpcGetMessageData(OBJRPC *pRpc) +{ + return _dceRpcGetMessageHeader(pRpc)->rpc_message_data; +} + +static inline NV_STATUS _dceRpcGetRpcResult(OBJRPC *pRpc) +{ + return _dceRpcGetMessageHeader(pRpc)->rpc_result; +} + +/** + * Prints the header info when _INFO level is enabled. + */ +static void _dceclientrmPrintHdr +( + OBJRPC *pRpc +) +{ + NV_PRINTF(LEVEL_INFO, "NVRM_RPC_DCE : [msg-buf:0x%p] header_version = 0x%x signature = 0x%x " + "length = 0x%x function = 0x%x rpc_result = 0x%x\n", pRpc->message_buffer, + _dceRpcGetMessageHeader(pRpc)->header_version, _dceRpcGetMessageHeader(pRpc)->signature, + _dceRpcGetMessageHeader(pRpc)->length, _dceRpcGetMessageHeader(pRpc)->function, + _dceRpcGetMessageHeader(pRpc)->rpc_result); +} + +/** + * Send RPC msg and check the result. + */ +static NV_STATUS +_dceRpcIssueAndWait +( + RM_API *pRmApi +) +{ + OBJGPU *pGpu = (OBJGPU*)pRmApi->pPrivateContext; + OBJRPC *pRpc = GPU_GET_RPC(pGpu); + + NV_STATUS status = NV_ERR_INVALID_ARGUMENT; + rpc_message_header_v* message_header = NULL; + DceClient *pDceclientrm = GPU_GET_DCECLIENTRM(pGpu); + + message_header = _dceRpcGetMessageHeader(pRpc); + if (message_header) + { + _dceclientrmPrintHdr(pRpc); + + status = dceclientSendRpc(pDceclientrm, message_header, message_header->length); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: Error while issuing RPC [0x%x]\n", status); + goto done; + } + } + +done: + return status; +} + +void dceclientHandleAsyncRpcCallback +( + NvU32 handle, + NvU32 interfaceType, + NvU32 msgLength, + void *data, + void *usrCtx +) +{ + NV_PRINTF(LEVEL_INFO, "dceclientHandleAsyncRpcCallback called\n"); + + rpc_message_header_v *msg_hdr = NULL; + rpc_generic_union *rpc_msg_data = NULL; + OBJGPU *pGpu = (OBJGPU *)usrCtx; + + NV_ASSERT_OR_RETURN_VOID(interfaceType == DCE_CLIENT_RM_IPC_TYPE_EVENT); + NV_ASSERT_OR_RETURN_VOID(pGpu != NULL && data != NULL); + + msg_hdr = (rpc_message_header_v *)data; + rpc_msg_data = msg_hdr->rpc_message_data; + + switch (msg_hdr->function) + { + case NV_VGPU_MSG_EVENT_POST_EVENT: + { + rpc_post_event_v *rpc_params = &rpc_msg_data->post_event_v; + + if (rpc_params->bNotifyList) + { + gpuNotifySubDeviceEvent(pGpu, rpc_params->notifyIndex, + rpc_params->eventData, + rpc_params->eventDataSize, 0, 0); + } + else + { + PEVENTNOTIFICATION pNotifyList = NULL; + PEVENTNOTIFICATION pNotifyEvent = NULL; + Event *pEvent = NULL; + NV_STATUS nvStatus = NV_OK; + + // Get the notification list that contains this event. + NV_ASSERT_OR_RETURN_VOID(CliGetEventInfo(rpc_params->hClient, + rpc_params->hEvent, &pEvent)); + + if (pEvent->pNotifierShare != NULL) + pNotifyList = pEvent->pNotifierShare->pEventList; + + NV_ASSERT_OR_RETURN_VOID(pNotifyList != NULL); + + // Send event to a specific hEvent. Find hEvent in the notification list. + for (pNotifyEvent = pNotifyList; pNotifyEvent != NULL; pNotifyEvent = pNotifyEvent->Next) + { + if (pNotifyEvent->hEvent == rpc_params->hEvent) + { + nvStatus = osNotifyEvent(pGpu, pNotifyEvent, 0, + rpc_params->data, rpc_params->status); + if (nvStatus != NV_OK) + NV_PRINTF(LEVEL_ERROR, "osNotifyEvent failed with status: %x\n",nvStatus); + break; + } + } + NV_ASSERT(pNotifyEvent != NULL); + } + return; + } + case NV_VGPU_MSG_EVENT_RG_LINE_INTR: + { + rpc_rg_line_intr_v *rpc_params = &rpc_msg_data->rg_line_intr_v; + + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + NV_CHECK_OR_RETURN_VOID(LEVEL_ERROR, pKernelDisplay != NULL); + + kdispInvokeRgLineCallback(pKernelDisplay, rpc_params->head, rpc_params->rgIntr, NV_FALSE); + return; + } + case NV_VGPU_MSG_EVENT_DISPLAY_MODESET: + { + rpc_display_modeset_v *rpc_params = &rpc_msg_data->display_modeset_v; + + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + NV_CHECK_OR_RETURN_VOID(LEVEL_ERROR, pKernelDisplay != NULL); + + kdispInvokeDisplayModesetCallback(pKernelDisplay, + rpc_params->bModesetStart, + rpc_params->minRequiredIsoBandwidthKBPS, + rpc_params->minRequiredFloorBandwidthKBPS); + return; + } + default: + { + NV_PRINTF(LEVEL_ERROR, "Unexpected RPC function 0x%x\n", msg_hdr->function); + NV_ASSERT_FAILED("Unexpected RPC function"); + return; + } + } +} + +NV_STATUS rpcRmApiControl_dce +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + void *pParamStructPtr, + NvU32 paramsSize +) +{ + OBJGPU *pGpu = (OBJGPU*)pRmApi->pPrivateContext; + OBJRPC *pRpc = GPU_GET_RPC(pGpu); + + rpc_generic_union *msg_data; + rpc_gsp_rm_control_v *rpc_params = NULL; + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + NV_PRINTF(LEVEL_INFO, "NVRM_RPC_DCE : Prepare and send RmApiControl RPC [cmd:0x%x]\n", cmd); + + msg_data = _dceRpcGetMessageData(pRpc); + rpc_params = &msg_data->gsp_rm_control_v; + + status = rpcWriteCommonHeader(pGpu, pRpc, + NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, + (sizeof(rpc_gsp_rm_control_v) + + paramsSize)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: Writing RPC Header Failed [0x%x]\n", status); + goto done; + } + + rpc_params->hClient = hClient; + rpc_params->hObject = hObject; + rpc_params->cmd = cmd; + rpc_params->paramsSize = paramsSize; + portMemCopy(rpc_params->params, paramsSize,pParamStructPtr, paramsSize); + + status = _dceRpcIssueAndWait(pRmApi); + if (status != NV_OK) + { + goto done; + } + + status = _dceRpcGetRpcResult(pRpc); + if (status != NV_OK) + { + NV_PRINTF(status != NV_ERR_NOT_SUPPORTED ? LEVEL_ERROR : LEVEL_INFO, + "NVRM_RPC_DCE: Failed RM ctrl call cmd:0x%x result 0x%x: %s\n", + cmd, status, nvstatusToString(status)); + } + else + { + NV_PRINTF(LEVEL_INFO, "NVRM_RPC_DCE: RPC for GSP RM Control Successful\n"); + } + + portMemCopy(pParamStructPtr, paramsSize, rpc_params->params, paramsSize); + +done: + return status; +} + +NV_STATUS rpcRmApiAlloc_dce +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle hObject, + NvU32 hClass, + void *pAllocParams, + NvU32 allocParamSize +) +{ + OBJGPU *pGpu = (OBJGPU*)pRmApi->pPrivateContext; + OBJRPC *pRpc = GPU_GET_RPC(pGpu); + + rpc_generic_union *msg_data; + rpc_gsp_rm_alloc_v *rpc_params; + NV_STATUS status; + NvU32 paramsSize; + NvBool bNullAllowed; + + NV_PRINTF(LEVEL_INFO, "NVRM_RPC_DCE: Prepare and send RmApiAlloc RPC\n"); + + msg_data = _dceRpcGetMessageData(pRpc); + rpc_params = &msg_data->gsp_rm_alloc_v; + + NV_ASSERT_OK_OR_GOTO(status, + rmapiGetClassAllocParamSize(¶msSize, pAllocParams, &bNullAllowed, hClass), + done); + + if (pAllocParams == NULL && !bNullAllowed) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: NULL allocation params not allowed for class 0x%x\n", hClass); + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + status = rpcWriteCommonHeader(pGpu, pRpc, + NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC, + (sizeof(rpc_gsp_rm_alloc_v) + paramsSize)); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: Writing RPC Header Failed [0x%x]\n", status); + goto done; + } + + rpc_params->hClient = hClient; + rpc_params->hParent = hParent; + rpc_params->hObject = hObject; + rpc_params->hClass = hClass; + rpc_params->paramsSize = pAllocParams ? paramsSize : 0; + portMemCopy(rpc_params->params, rpc_params->paramsSize, pAllocParams, paramsSize); + + status = _dceRpcIssueAndWait(pRmApi); + if (status != NV_OK) + { + goto done; + } + + status = _dceRpcGetRpcResult(pRpc); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: Failed RM Alloc Object 0x%x result 0x%x: %s\n", + hClass, status, nvstatusToString(status)); + } + + // Deserialize the response + // We do not deserialize the variable length data as we do not expect it to be modified + if (paramsSize > 0) + { + portMemCopy(pAllocParams, paramsSize, rpc_params->params, rpc_params->paramsSize); + } + + NV_PRINTF(LEVEL_INFO, "NVRM_RPC_DCE: RPC for GSP RM Alloc Successful\n"); + +done: + return status; +} + +NV_STATUS rpcRmApiFree_dce(RM_API *pRmApi, NvHandle hClient, NvHandle hObject) +{ + OBJGPU *pGpu = (OBJGPU*)pRmApi->pPrivateContext; + OBJRPC *pRpc = GPU_GET_RPC(pGpu); + + rpc_generic_union *msg_data; + NVOS00_PARAMETERS_v *rpc_params; + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + NV_PRINTF(LEVEL_INFO, "NVRM_RPC_DCE Free " + "RPC Called for hClient: 0x%x\n", hClient); + + msg_data = _dceRpcGetMessageData(pRpc); + rpc_params = &msg_data->free_v.params; + + status = rpcWriteCommonHeader(pGpu, pRpc, + NV_VGPU_MSG_FUNCTION_FREE, + sizeof(rpc_free_v)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: Writing RPC Header Failed [0x%x]\n", status); + goto done; + } + + rpc_params->hRoot = hClient; + rpc_params->hObjectParent = NV01_NULL_OBJECT; + rpc_params->hObjectOld = hObject; + + status = _dceRpcIssueAndWait(pRmApi); + if (status != NV_OK) + { + goto done; + } + + status = _dceRpcGetRpcResult(pRpc); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: Failed RM Free Object result 0x%x:\n", status); + } + + NV_PRINTF(LEVEL_INFO, "NVRM_RPC_DCE: RPC for Free Successful\n"); + +done: + return status; +} + +NV_STATUS rpcRmApiDupObject_dce +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags +) +{ + OBJGPU *pGpu = (OBJGPU*)pRmApi->pPrivateContext; + OBJRPC *pRpc = GPU_GET_RPC(pGpu); + + rpc_generic_union *msg_data; + NVOS55_PARAMETERS_v03_00 *rpc_params = NULL; + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + NV_PRINTF(LEVEL_INFO, "NVRM_RPC_DCE Dup Object " + "RPC Called for hClient: 0x%x\n", hClient); + + + msg_data = _dceRpcGetMessageData(pRpc); + rpc_params = &msg_data->dup_object_v.params; + + status = rpcWriteCommonHeader(pGpu, pRpc, + NV_VGPU_MSG_FUNCTION_DUP_OBJECT, + sizeof(rpc_dup_object_v)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: Writing RPC Header Failed [0x%x]\n", status); + goto done; + } + + rpc_params->hClient = hClient; + rpc_params->hParent = hParent; + rpc_params->hObject = *phObject; + rpc_params->hClientSrc = hClientSrc; + rpc_params->hObjectSrc = hObjectSrc; + rpc_params->flags = flags; + + status = _dceRpcIssueAndWait(pRmApi); + if (status != NV_OK) + { + goto done; + } + + status = _dceRpcGetRpcResult(pRpc); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: Failed RM Dup Object result 0x%x:\n", status); + } + + NV_PRINTF(LEVEL_INFO, "NVRM_RPC_DCE: RPC for DUP OBJECT Successful\n"); + +done: + return status; +} + +NV_STATUS +rpcDceRmInit_dce +( + RM_API *pRmApi, + NvBool bInit +) +{ + OBJGPU *pGpu = (OBJGPU*)pRmApi->pPrivateContext; + OBJRPC *pRpc = GPU_GET_RPC(pGpu); + DceClient *pDceClientrm = GPU_GET_DCECLIENTRM(pGpu); + + rpc_generic_union *msg_data; + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + rpc_dce_rm_init_v *rpc_params = NULL; + + NV_PRINTF(LEVEL_INFO, "NVRM_RPC_DCE RPC to trigger %s called\n", + bInit ? "RmInitAdapter" : "RmShutdownAdapter"); + + msg_data = _dceRpcGetMessageData(pRpc); + rpc_params = &msg_data->dce_rm_init_v; + + status = rpcWriteCommonHeader(pGpu, pRpc, + NV_VGPU_MSG_FUNCTION_DCE_RM_INIT, + sizeof(rpc_dce_rm_init_v)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: Writing RPC Header Failed [0x%x]\n", status); + goto done; + } + + rpc_params->bInit = bInit; + status = _dceRpcIssueAndWait(pRmApi); + if (status != NV_OK) + { + goto done; + } + + status = _dceRpcGetRpcResult(pRpc); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: Failed RM init/deinit result 0x%x:\n", status); + } + + pDceClientrm->hInternalClient = rpc_params->hInternalClient; + + if (bInit) + { + NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS *pGbvParams = portMemAllocNonPaged(sizeof(*pGbvParams)); + + NV_ASSERT_TRUE_OR_GOTO(status, pGbvParams != NULL, NV_ERR_NO_MEMORY, done); + + status = rpcRmApiControl_dce(pRmApi, pDceClientrm->hInternalClient, pDceClientrm->hInternalClient, + NV0000_CTRL_CMD_SYSTEM_GET_BUILD_VERSION_V2, pGbvParams, sizeof(*pGbvParams)); + NV_ASSERT_OK(status); + + if (status == NV_OK && + portStringCompare(NV_VERSION_STRING, pGbvParams->driverVersionBuffer, sizeof(NV_VERSION_STRING)) != 0) + { + NV_PRINTF(LEVEL_WARNING, "Possibly incompatible DCE RM version! CPU RM: %s DCE RM: %s\n", + NV_VERSION_STRING, pGbvParams->driverVersionBuffer); + } + + portMemFree(pGbvParams); + } + +done: + return status; +} diff --git a/src/nvidia/src/kernel/gpu/device.c b/src/nvidia/src/kernel/gpu/device.c new file mode 100644 index 0000000..35400c9 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/device.c @@ -0,0 +1,607 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This is a device resource implementation. +* +******************************************************************************/ + + + +#include "resserv/resserv.h" +#include "resserv/rs_server.h" +#include "resserv/rs_client.h" +#include "resserv/rs_resource.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "platform/sli/sli.h" + +#include "class/cl0080.h" +#include "core/locks.h" +#include "vgpu/rpc.h" +#include "mem_mgr/mem.h" + +#include "rmapi/rs_utils.h" +#include "nvsecurityinfo.h" + +static NV_STATUS _deviceTeardown(Device *pDevice, CALL_CONTEXT *pCallContext); +static NV_STATUS _deviceTeardownRef(Device *pDevice, CALL_CONTEXT *pCallContext); +static NV_STATUS _deviceInit(Device *pDevice, CALL_CONTEXT *pCallContext, + NvHandle hClient, NvHandle hDevice, NvU32 deviceInst, + NvHandle hClientShare, NvHandle hTargetClient, NvHandle hTargetDevice, + NvU64 vaSize, NvU64 vaStartInternal, NvU64 vaLimitInternal, + NvU32 allocFlags, NvU32 vaMode, NvBool *pbIsFirstDevice); + +NV_STATUS +deviceConstruct_IMPL +( + Device *pDevice, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV0080_ALLOC_PARAMETERS *pNv0080AllocParams = pParams->pAllocParams; + NvU32 deviceInst, flags, vaMode; + NvU32 deviceClass = pParams->externalClassId; + NvHandle hClientShare; + NvHandle hTargetClient = NV01_NULL_OBJECT; + NvHandle hTargetDevice = NV01_NULL_OBJECT; + NvU64 vaSize = 0; + NV_STATUS rmStatus = NV_OK; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + OBJGPU *pGpu; + NvU64 vaStartInternal = 0; + NvU64 vaLimitInternal = 0; + NvU32 physicalAllocFlags; + NvBool bIsFirstDevice; + + if (pNv0080AllocParams == NULL) + { + deviceInst = pParams->externalClassId - NV01_DEVICE_0; + hClientShare = NV01_NULL_OBJECT; + flags = 0; + vaSize = 0; + vaMode = 0; + } + else + { + deviceInst = pNv0080AllocParams->deviceId; + hClientShare = pNv0080AllocParams->hClientShare; + hTargetClient = pNv0080AllocParams->hTargetClient; + hTargetDevice = pNv0080AllocParams->hTargetDevice; + flags = pNv0080AllocParams->flags; + vaSize = pNv0080AllocParams->vaSpaceSize; + vaMode = pNv0080AllocParams->vaMode; + + // valid only if NV_DEVICE_ALLOCATION_FLAGS_RESTRICT_RESERVED_VALIMITS is flagged. + if (flags & NV_DEVICE_ALLOCATION_FLAGS_RESTRICT_RESERVED_VALIMITS) + { + vaStartInternal = pNv0080AllocParams->vaStartInternal; + vaLimitInternal = pNv0080AllocParams->vaLimitInternal; + + if ((vaLimitInternal < vaStartInternal) || (vaLimitInternal == 0)) + { + return NV_ERR_INVALID_ARGUMENT; + } + } + } + + // validate device instance + if (gpumgrIsDeviceInstanceValid(deviceInst) != NV_OK) + { + return NV_ERR_INVALID_CLASS; + } + + // Make sure this device has not been disabled + if (gpumgrIsDeviceEnabled(deviceInst) == NV_FALSE) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // add new device to client and set the device context + rmStatus = _deviceInit(pDevice, pCallContext, pParams->hClient, pParams->hResource, deviceInst, + hClientShare, hTargetClient, hTargetDevice, vaSize, vaStartInternal, vaLimitInternal, + flags, vaMode, &bIsFirstDevice); + if (rmStatus != NV_OK) + return rmStatus; + + pGpu = GPU_RES_GET_GPU(pDevice); + + if (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL) + { + if (!osIsGpuAccessible(pGpu)) + { + // Delete the device from the client since we should not be allocating it + _deviceTeardownRef(pDevice, pCallContext); + _deviceTeardown(pDevice, pCallContext); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + } + + // + // Make sure this device is not in fullchip reset on OSes where it is + // restricted. + // + if (pOS->getProperty(pOS, PDB_PROP_OS_LIMIT_GPU_RESET) && + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET)) + { + // Delete the device from the client since we should not be allocating it + _deviceTeardownRef(pDevice, pCallContext); + _deviceTeardown(pDevice, pCallContext); + return NV_ERR_GPU_IN_FULLCHIP_RESET; + } + + if (IS_VIRTUAL(pGpu) || IS_FW_CLIENT(pGpu)) + { + physicalAllocFlags = flags & ~(NV_DEVICE_ALLOCATION_FLAGS_PLUGIN_CONTEXT + | NV_DEVICE_ALLOCATION_FLAGS_HOST_VGPU_DEVICE); + + NV_RM_RPC_ALLOC_SHARE_DEVICE(pGpu, pParams->hParent, pParams->hResource, pDevice->hClientShare, + hTargetClient, hTargetDevice, deviceClass, + physicalAllocFlags, vaSize, vaMode, bIsFirstDevice, rmStatus); + if (rmStatus != NV_OK) + { + return rmStatus; + } + } + + return rmStatus; +} // end of deviceConstruct_IMPL + +void +deviceDestruct_IMPL +( + Device *pDevice +) +{ + CALL_CONTEXT *pCallContext; + RS_RES_FREE_PARAMS_INTERNAL *pParams; + NV_STATUS rmStatus = NV_OK; + NV_STATUS tmpStatus; + NvHandle hClient; + + resGetFreeParams(staticCast(pDevice, RsResource), &pCallContext, &pParams); + + hClient = pCallContext->pClient->hClient; + + NV_PRINTF(LEVEL_INFO, " type: device\n"); + + LOCK_METER_DATA(FREE_DEVICE, 0, 0, 0); + + // free the device + if (_deviceTeardownRef(pDevice, pCallContext) != NV_OK || + _deviceTeardown(pDevice, pCallContext) != NV_OK) + { + tmpStatus = NV_ERR_INVALID_OBJECT_HANDLE; + if (tmpStatus != NV_OK && rmStatus == NV_OK) + rmStatus = tmpStatus; + } + + // + // If the client was created, but never had any devices successfully + // attached, we'll get here. The client's device structure will have + // been created, but pGpu will be NULL if the device was later found + // to be non-existent + // + if (GPU_RES_GET_GPU(pDevice)) + { + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + // vGpu support + if (IS_VIRTUAL(pGpu) || IS_FW_CLIENT(pGpu)) + { + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + NvHandle hDevice = pResourceRef->hResource; + NvBool bClientInUse = NV_FALSE; + RsClient *pRsClient = pCallContext->pClient; + NvBool bNonOffloadVgpu = (IS_VIRTUAL(pGpu) && !IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu)); + RS_ITERATOR it; + + if (rmStatus == NV_OK) + { + NV_RM_RPC_FREE(pGpu, hClient, hClient, hDevice, rmStatus); + } + + if (rmStatus != NV_OK) + { + pParams->status = rmStatus; + return; + } + + // check if there are any more devices in use. + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + + while (clientRefIterNext(it.pClient, &it)) + { + Device *pDeviceTest = dynamicCast(it.pResourceRef->pResource, Device); + NvBool bSameGpu = (GPU_RES_GET_GPU(pDeviceTest) == pGpu); + + if ((pDeviceTest != pDevice) && (bNonOffloadVgpu || bSameGpu)) + { + bClientInUse = NV_TRUE; + break; + } + } + + // If neither any devices nor KernelSMDebuggerSession are in use, free up the client on host. + if (!bClientInUse) + { + NV_RM_RPC_FREE(pGpu, hClient, NV01_NULL_OBJECT, hClient, rmStatus); + } + } + } +} // end of deviceDestruct_IMPL + +NV_STATUS +deviceControl_IMPL +( + Device *pDevice, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + + // + // Some assertions to make RMCTRL to NVOC migration smooth + // Those will be removed at the end of ctrl0080.def migration + // + NV_ASSERT_OR_RETURN(pParams->hClient == RES_GET_CLIENT_HANDLE(pDevice), NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pParams->hObject == RES_GET_HANDLE(pDevice), NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pParams->hParent == RES_GET_PARENT_HANDLE(pDevice), NV_ERR_INVALID_STATE); + + pParams->pGpuGrp = GPU_RES_GET_GPUGRP(pDevice); + return gpuresControl_IMPL(staticCast(pDevice, GpuResource), + pCallContext, pParams); +} + +NV_STATUS +deviceInternalControlForward_IMPL +( + Device *pDevice, + NvU32 command, + void *pParams, + NvU32 size +) +{ + return gpuresInternalControlForward_IMPL(staticCast(pDevice, GpuResource), command, pParams, size); +} + +// +// add a device with specified handle, instance num, within a specified client +// (hClientShare also specified) +// +static NV_STATUS +_deviceInit +( + Device *pDevice, + CALL_CONTEXT *pCallContext, + NvHandle hClient, + NvHandle hDevice, + NvU32 deviceInst, + NvHandle hClientShare, + NvHandle hTargetClient, + NvHandle hTargetDevice, + NvU64 vaSize, + NvU64 vaStartInternal, + NvU64 vaLimitInternal, + NvU32 allocFlags, + NvU32 vaMode, + NvBool *pbIsFirstDevice +) +{ + OBJGPU *pGpu; + NV_STATUS status; + GpuResource *pGpuResource = staticCast(pDevice, GpuResource); + Device *pExistingDevice; + NvU32 gpuInst; + + if (deviceInst >= NV_MAX_DEVICES) + return NV_ERR_INVALID_ARGUMENT; + + // Look up GPU and GPU Group + gpuInst = gpumgrGetPrimaryForDevice(deviceInst); + + if ((pGpu = gpumgrGetGpu(gpuInst)) == NULL) + { + return NV_ERR_INVALID_STATE; + } + + // Check if device inst already allocated, fail if this call succeeds. + status = deviceGetByInstance(pCallContext->pClient, deviceInst, &pExistingDevice); + if (status == NV_OK) + { + // + // RS-TODO: Status code should be NV_ERR_STATE_IN_USE, however keeping + // existing code from CliAllocElement (for now) + // + if (IS_VIRTUAL(pGpu)) + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + *pbIsFirstDevice = (status != NV_OK); + + pDevice->hTargetClient = hTargetClient; + pDevice->hTargetDevice = hTargetDevice; + pDevice->pKernelHostVgpuDevice = NULL; + + pDevice->deviceInst = deviceInst; + + // Update VA Mode + pDevice->vaMode = vaMode; + + gpuresSetGpu(pGpuResource, pGpu, NV_TRUE); + + // + // In case of a SR-IOV enabled guest we create a default client inside + // the guest whose handle can be used for VAS sharing. Setting hClientShare + // to 0 on baremetal causes any VA alloc made under this device to use the + // global vaspace. We do not support use of the global vaspace inside guest. + // The legacy paravirtualization config also makes use of a default client. + // But, in the legacy case, the client is created by the plugin and not guest + // RM . On SR-IOV, vaspace management has been pushed inside the guest. So, + // having a vaspace only on the plugin side won't help since RmMapMemoryDma + // calls will no longer be RPCed to host RM. + // + if (IS_VIRTUAL_WITH_SRIOV(pGpu) && + gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) + { + if (hClientShare == NV01_NULL_OBJECT) + { + hClientShare = pGpu->hDefaultClientShare; + } + } + + status = deviceSetClientShare(pDevice, hClientShare, vaSize, + vaStartInternal, vaLimitInternal, allocFlags); + if (NV_OK != status) + goto done; + + if (allocFlags & NV_DEVICE_ALLOCATION_FLAGS_PLUGIN_CONTEXT) + { + NV_ASSERT_OR_RETURN(allocFlags & NV_DEVICE_ALLOCATION_FLAGS_HOST_VGPU_DEVICE, + NV_ERR_INVALID_ARGUMENT); + } + +done: + if (status != NV_OK) + { + deviceRemoveFromClientShare(pDevice); + } + + return status; +} + +// +// delete a device with a specified handle within a client +// +static NV_STATUS +_deviceTeardown +( + Device *pDevice, + CALL_CONTEXT *pCallContext +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + PORT_UNREFERENCED_VARIABLE(pGpu); + + deviceRemoveFromClientShare(pDevice); + + return NV_OK; +} + +static NV_STATUS _deviceTeardownRef +( + Device *pDevice, + CALL_CONTEXT *pCallContext +) +{ + + return NV_OK; +} + +NV_STATUS +deviceGetByHandle_IMPL +( + RsClient *pClient, + NvHandle hDevice, + Device **ppDevice +) +{ + RsResourceRef *pResourceRef; + NV_STATUS status; + + *ppDevice = NULL; + + status = clientGetResourceRef(pClient, hDevice, &pResourceRef); + if (status != NV_OK) + return status; + + *ppDevice = dynamicCast(pResourceRef->pResource, Device); + + return (*ppDevice) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +NV_STATUS +deviceGetByInstance_IMPL +( + RsClient *pClient, + NvU32 deviceInstance, + Device **ppDevice +) +{ + RS_ITERATOR it; + Device *pDevice; + + *ppDevice = NULL; + + it = clientRefIter(pClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + + while (clientRefIterNext(it.pClient, &it)) + { + pDevice = dynamicCast(it.pResourceRef->pResource, Device); + + if ((pDevice != NULL) && (deviceInstance == pDevice->deviceInst)) + { + *ppDevice = pDevice; + return NV_OK; + } + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +NV_STATUS +deviceGetByGpu_IMPL +( + RsClient *pClient, + OBJGPU *pGpu, + NvBool bAnyInGroup, + Device **ppDevice +) +{ + NvU32 deviceInstance = gpuGetDeviceInstance(pGpu); + NV_STATUS status; + + status = deviceGetByInstance(pClient, deviceInstance, ppDevice); + if (status != NV_OK) + return status; + + // If pGpu is not the primary GPU return failure + if (!bAnyInGroup && pGpu != GPU_RES_GET_GPU(*ppDevice)) + { + *ppDevice = NULL; + return NV_ERR_OBJECT_NOT_FOUND; + } + + return NV_OK; +} + +// **************************************************************************** +// Deprecated Functions +// **************************************************************************** + +/** + * WARNING: This function is deprecated and use is *strongly* discouraged + * (especially for new code!) + * + * From the function name (CliSetGpuContext) it appears as a simple accessor but + * violates expectations by modifying the SLI BC threadstate (calls to + * GPU_RES_SET_THREAD_BC_STATE). This can be dangerous if not carefully managed + * by the caller. + * + * Instead of using this routine, please use deviceGetByHandle then call + * GPU_RES_GET_GPU, GPU_RES_GET_GPUGRP, GPU_RES_SET_THREAD_BC_STATE as needed. + * + * Note that GPU_RES_GET_GPU supports returning a pGpu for both pDevice, + * pSubdevice, the base pResource type, and any resource that inherits from + * GpuResource. That is, instead of using CliSetGpuContext or + * CliSetSubDeviceContext, please use following pattern to look up the pGpu: + * + * OBJGPU *pGpu = GPU_RES_GET_GPU(pResource or pResourceRef->pResource) + * + * To set the threadstate, please use: + * + * GPU_RES_SET_THREAD_BC_STATE(pResource or pResourceRef->pResource); + */ +NV_STATUS +CliSetGpuContext +( + NvHandle hClient, + NvHandle hDevice, + OBJGPU **ppGpu, + OBJGPUGRP **ppGpuGrp +) +{ + Device *pDevice; + RsClient *pClient; + NV_STATUS status; + + if (ppGpuGrp != NULL) + *ppGpuGrp = NULL; + + if (ppGpu != NULL) + *ppGpu = NULL; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return status; + + status = deviceGetByHandle(pClient, hDevice, &pDevice); + if (status != NV_OK) + return status; + + if (ppGpu != NULL) + *ppGpu = GPU_RES_GET_GPU(pDevice); + + if (ppGpuGrp != NULL) + *ppGpuGrp = GPU_RES_GET_GPUGRP(pDevice); + + GPU_RES_SET_THREAD_BC_STATE(pDevice); + + return NV_OK; +} + +/** + * WARNING: This function is deprecated! Please use gpuGetByRef() + */ +POBJGPU +CliGetGpuFromContext +( + RsResourceRef *pContextRef, + NvBool *pbBroadcast +) +{ + NV_STATUS status; + OBJGPU *pGpu; + + status = gpuGetByRef(pContextRef, pbBroadcast, &pGpu); + + return (status == NV_OK) ? pGpu : NULL; +} + +/** + * WARNING: This function is deprecated! Please use gpuGetByHandle() + */ +POBJGPU +CliGetGpuFromHandle +( + NvHandle hClient, + NvHandle hResource, + NvBool *pbBroadcast +) +{ + RsClient *pClient; + NV_STATUS status; + OBJGPU *pGpu; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return NULL; + + status = gpuGetByHandle(pClient, hResource, pbBroadcast, &pGpu); + + return (status == NV_OK) ? pGpu : NULL; +} diff --git a/src/nvidia/src/kernel/gpu/device_ctrl.c b/src/nvidia/src/kernel/gpu/device_ctrl.c new file mode 100644 index 0000000..8b4036b --- /dev/null +++ b/src/nvidia/src/kernel/gpu/device_ctrl.c @@ -0,0 +1,381 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief This module contains the gpu control interfaces for the + * device (NV01_DEVICE_0) class. Device-level control calls + * are broadcasted to all GPUs within the device. + */ + +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "core/system.h" +#include "core/locks.h" +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" +#include "platform/sli/sli.h" +#include "virtualization/hypervisor/hypervisor.h" +#include "vgpu/rpc.h" + +#if defined(NV_UNIX) +#include "os-interface.h" +#endif + +// +// This rmctrl MUST NOT touch hw since it's tagged as NO_GPUS_ACCESS in ctrl0080.def +// RM allow this type of rmctrl to go through when GPU is not available. +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +deviceCtrlCmdGpuGetClasslist_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *pClassListParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + return gpuGetClassList(pGpu, &pClassListParams->numClasses, + NvP64_VALUE(pClassListParams->classList), ENG_INVALID); +} + +// +// This rmctrl MUST NOT touch hw since it's tagged with flag NO_GPUS_ACCESS in device.h +// RM allow this type of rmctrl to go through when GPU is not available. +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +deviceCtrlCmdGpuGetClasslistV2_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS *pClassListParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + pClassListParams->numClasses = NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE; + + return gpuGetClassList(pGpu, &pClassListParams->numClasses, + pClassListParams->classList, ENG_INVALID); +} + +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +deviceCtrlCmdGpuGetNumSubdevices_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS *pSubDeviceCountParams +) +{ + pSubDeviceCountParams->numSubDevices = 1; + + return NV_OK; +} + +NV_STATUS +deviceCtrlCmdGpuModifyGpuSwStatePersistence_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS *pParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 gpuMask, index; + NvBool bEnable; + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + OBJGPU *pTmpGpu; + + if (NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_ENABLED == + pParams->newState) + { + bEnable = NV_TRUE; + } + else if (NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_DISABLED == + pParams->newState) + { + bEnable = NV_FALSE; + } + else + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Get the gpuMask for the device pGpu belongs to + gpuMask = gpumgrGetGpuMask(pGpu); + + index = 0; + while ((pTmpGpu = gpumgrGetNextGpu(gpuMask, &index)) != NULL) + { + if (bEnable) + { + pGpuMgr->persistentSwStateGpuMask |= NVBIT(pTmpGpu->gpuInstance); + pTmpGpu->setProperty(pTmpGpu, PDB_PROP_GPU_PERSISTENT_SW_STATE, + NV_TRUE); + } + else + { + pGpuMgr->persistentSwStateGpuMask &= ~NVBIT(pTmpGpu->gpuInstance); + pTmpGpu->setProperty(pTmpGpu, PDB_PROP_GPU_PERSISTENT_SW_STATE, + NV_FALSE); + } + + // Set/Clear OS-specific persistence flags + osModifyGpuSwStatePersistence(pTmpGpu->pOsGpuInfo, bEnable); + } + + return NV_OK; +} + +NV_STATUS +deviceCtrlCmdGpuQueryGpuSwStatePersistence_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_PERSISTENT_SW_STATE)) + { + pParams->swStatePersistence = + NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_ENABLED; + } + else + { + pParams->swStatePersistence = + NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_DISABLED; + } + + return NV_OK; +} + +/*! + * @brief This Command is used to get the virtualization mode of GPU. GPU + * can be in NMOS/VGX/host-vGPU/host-vSGA mode. + * + * @return Returns NV_STATUS + * NV_OK If GPU is present. + * NV_ERR_INVALID_ARGUMENT If GPU is not present. + */ +NV_STATUS +deviceCtrlCmdGpuGetVirtualizationMode_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + + if (pGpu == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + +#if defined(NV_UNIX) && !RMCFG_FEATURE_MODS_FEATURES + pParams->isGridBuild = os_is_grid_supported(); +#endif + + if (IS_VIRTUAL(pGpu)) + { + pParams->virtualizationMode = + NV0080_CTRL_GPU_VIRTUALIZATION_MODE_VGX; + } + else if (IS_PASSTHRU(pGpu)) + { + pParams->virtualizationMode = + NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NMOS; + } + else if (hypervisorIsVgxHyper() && (gpuIsSriovEnabled(pGpu) + )) + { + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU)) + { + pParams->virtualizationMode = + NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST_VGPU; + } + else if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA)) + { + pParams->virtualizationMode = + NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST_VSGA; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "invalid virtualization Mode: %x. Returning NONE!\n", + pParams->virtualizationMode); + + pParams->virtualizationMode = + NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NONE; + } + } + else + { + pParams->virtualizationMode = + NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NONE; + } + + NV_PRINTF(LEVEL_INFO, "Virtualization Mode: %x\n", + pParams->virtualizationMode); + + return NV_OK; +} + +/*! + * @brief This Command issues an RPC call to the host to switch the "backdoor + * VNC" view to the console. + * + * @return Returns NV_STATUS + * NV_ERR_NOT_SUPPORTED If GPU is not present under host hypervisor. + * NV_OK If GPU is present under host hypervisor. + * NV_ERR_INVALID_ARGUMENT If GPU is not present. + * + */ +NV_STATUS +deviceCtrlCmdGpuVirtualizationSwitchToVga_IMPL +( + Device *pDevice +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + NV_STATUS status = NV_OK; + + if (pGpu == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + NV_RM_RPC_SWITCH_TO_VGA(pGpu, status); + return status; +} + +/*! + * @brief This command is used to find a subdevice handle by subdeviceinst + */ +NV_STATUS +deviceCtrlCmdGpuGetFindSubDeviceHandle_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM *pParams +) +{ + NV_STATUS status; + Subdevice *pSubdevice; + + status = subdeviceGetByInstance(RES_GET_CLIENT(pDevice), + RES_GET_HANDLE(pDevice), + pParams->subDeviceInst, + &pSubdevice); + + if (status == NV_OK) + { + pParams->hSubDevice = RES_GET_HANDLE(pSubdevice); + } + + return status; +} + +NV_STATUS +deviceCtrlCmdGpuSetVgpuHeterogeneousMode_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_SET_VGPU_HETEROGENEOUS_MODE_PARAMS *pParams +) +{ + + return NV_OK; +} + +NV_STATUS +deviceCtrlCmdGpuGetVgpuHeterogeneousMode_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_GET_VGPU_HETEROGENEOUS_MODE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + + NV_CHECK_OR_RETURN(LEVEL_ERROR, pGpu != NULL, NV_ERR_INVALID_ARGUMENT); + { + pParams->bHeterogeneousMode = pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_VGPU_HETEROGENEOUS_MODE); + } + return NV_OK; + +} + +/*! + * @brief Get the GPU's branding information. + * + * @returns NV_STATUS + * NV_OK Success + */ +NV_STATUS +deviceCtrlCmdGpuGetBrandCaps_VF +( + Device *pDevice, + NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + + if (IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu)) + { + NV_STATUS status = NV_OK; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + NvU32 gpuMask = 0; + + NV_ASSERT_OK_OR_RETURN( + rmGpuGroupLockAcquire(pGpu->gpuInstance, + GPU_LOCK_GRP_DEVICE, + GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE, + RM_LOCK_MODULES_GPU, + &gpuMask)); + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + + rmGpuGroupLockRelease(gpuMask, GPUS_LOCK_FLAGS_NONE); + + return status; + } + + return NV_ERR_NOT_SUPPORTED; +} diff --git a/src/nvidia/src/kernel/gpu/device_share.c b/src/nvidia/src/kernel/gpu/device_share.c new file mode 100644 index 0000000..a6c5e48 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/device_share.c @@ -0,0 +1,318 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "os/os.h" +#include "mem_mgr/virt_mem_mgr.h" +#include "mem_mgr/vaspace.h" +#include "core/system.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu_mgr/gpu_group.h" +#include "class/cl00f2.h" // IO_VASPACE_A +#include "rmapi/rs_utils.h" +#include "gpu/device/device.h" +#include "gpu/mem_mgr/mem_mgr.h" + +#include "gpu_mgr/gpu_mgr.h" + +/*! + * @brief Save client share allocation information for this device + * + * Save client share allocation information for this device. The + * client share is actually allocated as a result of CliGetVASpace() + * before the VAShare is actually used. + * + * @param[in] pDevice + * @param[in] hClientShare RM client specified share handle + * @param[in] deviceAllocFlags Allocation flags from RM client + * + * @returns NV_STATUS + */ +NV_STATUS +deviceSetClientShare_IMPL +( + Device *pDevice, + NvHandle hClientShare, + NvU64 vaSize, + NvU64 vaStartInternal, + NvU64 vaLimitInternal, + NvU32 deviceAllocFlags +) +{ + pDevice->pVASpace = NULL; + pDevice->hClientShare = hClientShare; + pDevice->deviceAllocFlags = deviceAllocFlags; + pDevice->deviceInternalAllocFlags = 0; + pDevice->vaSize = vaSize; + + if (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_RESTRICT_RESERVED_VALIMITS) + { + pDevice->vaStartInternal = vaStartInternal; + pDevice->vaLimitInternal = vaLimitInternal; + } + + if ((deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_SIZE) && (vaSize == 0)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +/*! + * @brief Initialize the device VASPACE + */ +static NV_STATUS +deviceInitClientShare +( + Device *pDevice, + NvHandle hClientShare, + NvU64 vaSize, + NvU32 deviceAllocFlags, + NvU32 deviceAllocInternalFlags +) +{ + Device *pShareDevice; + RsClient *pClientShare; + OBJVASPACE *pVAS = NULL; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJVMM *pVmm = SYS_GET_VMM(pSys); + NV_STATUS status; + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + NvU32 gpuMask = gpumgrGetGpuMask(pGpu); + NvU32 vaspaceClass = 0; + + pDevice->pVASpace = NULL; + + // Set broadcast state for thread + GPU_RES_SET_THREAD_BC_STATE(pDevice); + + // + // Share "default" behavior is defined by "share w/null", which + // attaches to the global address space. + // + if (hClientShare == NV01_NULL_OBJECT) + { + OBJGPUGRP *pGpuGrp = gpumgrGetGpuGrpFromGpu(pGpu); + status = gpugrpGetGlobalVASpace(pGpuGrp, &pVAS); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + + vaspaceIncRefCnt(pVAS); + status = NV_OK; + } + + // + // "Force a new share" behavior is defined by "share w/myself" + // + else if (hClientShare == RES_GET_CLIENT_HANDLE(pDevice)) + { + NvU32 flags = 0; + NvU64 vaLimit; + + flags |= (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_SHARED_MANAGEMENT) ? + VASPACE_FLAGS_SHARED_MANAGEMENT : 0; + + if (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_MINIMIZE_PTETABLE_SIZE) + { + flags |= VASPACE_FLAGS_MINIMIZE_PTETABLE_SIZE; + } + if (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_RETRY_PTE_ALLOC_IN_SYS) + { + flags |= VASPACE_FLAGS_RETRY_PTE_ALLOC_IN_SYS; + } + if (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_SIZE) + { + vaLimit = pDevice->vaSize - 1; + } + else + { + vaLimit = 0; + } + + if ( (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_BIG_PAGE_SIZE_64k) && + (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_BIG_PAGE_SIZE_128k) ) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_BIG_PAGE_SIZE_64k) + { + flags |= DRF_DEF(_VASPACE, _FLAGS, _BIG_PAGE_SIZE, _DEFAULT); + } + else if (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_BIG_PAGE_SIZE_128k) + { + flags |= DRF_DEF(_VASPACE, _FLAGS, _BIG_PAGE_SIZE, _DEFAULT); + } + else + { + // will cause it to use the default size + flags |= DRF_DEF(_VASPACE, _FLAGS, _BIG_PAGE_SIZE, _DEFAULT); + } + + if (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_RESTRICT_RESERVED_VALIMITS) + { + flags |= VASPACE_FLAGS_RESTRICTED_RM_INTERNAL_VALIMITS; + NV_ASSERT(pDevice->vaStartInternal); + NV_ASSERT(pDevice->vaLimitInternal); + } + else + { + NV_ASSERT(!pDevice->vaStartInternal); + NV_ASSERT(!pDevice->vaLimitInternal); + } + + // + // NV_DEVICE_ALLOCATION_FLAGS_VASPACE_IS_MIRRORED will be removed once CUDA phases out + // and uses the ctrl call NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE + // to set privileged address space + // + if ((deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_IS_MIRRORED) + ) + { + flags |= VASPACE_FLAGS_SET_MIRRORED; + } + if (NULL == GPU_GET_KERNEL_GMMU(pGpu) && (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY) || IsDFPGA(pGpu))) + vaspaceClass = IO_VASPACE_A; + else if (vaspaceClass == 0) + { + NV_ASSERT(0); + return NV_ERR_OBJECT_NOT_FOUND; + } + + flags |= VASPACE_FLAGS_ENABLE_VMM; + + // + // Page tables are allocated in guest subheap only inside non SRIOV guests + // and on host RM. + // + if (!gpuIsSplitVasManagementServerClientRmEnabled(pGpu) || + !IS_VIRTUAL(pGpu)) + { + flags |= VASPACE_FLAGS_ALLOW_PAGES_IN_PHYS_MEM_SUBALLOCATOR; + } + + // + // For RM unlinked SLI: the fixed offset requirement is enforced at the OBJGVASPACE + // level during allocations and mappings, so the Device flag must be converted + // into the internal VASPACE flag. + // + if (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_REQUIRE_FIXED_OFFSET) + { + flags |= VASPACE_FLAGS_REQUIRE_FIXED_OFFSET; + } + + status = vmmCreateVaspace(pVmm, vaspaceClass, 0, gpuMask, 0, + vaLimit, pDevice->vaStartInternal, + pDevice->vaLimitInternal, NULL, flags, &pVAS); + if (NV_OK != status) + { + NV_ASSERT(0); + return status; + } + } + + // + // Try to attach to another clients VA Share. Validate client and pull the + // share information off the first device. + // + else + { + status = serverGetClientUnderLock(&g_resServ, hClientShare, &pClientShare); + if (status != NV_OK) + return status; + + // + // If the share client doesn't have a device allocated for this GPU, + // there's no address space to share. + // + status = deviceGetByInstance(pClientShare, pDevice->deviceInst, &pShareDevice); + if (status != NV_OK) + return status; + + // Init target share if needed + if (pShareDevice->pVASpace == NULL) + { + status = deviceInitClientShare(pShareDevice, + pShareDevice->hClientShare, + pShareDevice->vaSize, + pShareDevice->deviceAllocFlags, + pShareDevice->deviceInternalAllocFlags); + if (status != NV_OK) + return status; + } + + pVAS = pShareDevice->pVASpace; + vaspaceIncRefCnt(pVAS); + } + + pDevice->pVASpace = pVAS; + return status; +} + + +/*! + * @brief Detach this pDevice from the share group + */ +void +deviceRemoveFromClientShare_IMPL +( + Device *pDevice +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJVMM *pVmm = SYS_GET_VMM(pSys); + + if (pDevice->pVASpace != NULL) + { + vmmDestroyVaspace(pVmm, pDevice->pVASpace); + pDevice->pVASpace = NULL; + } +} + +NV_STATUS +deviceGetDefaultVASpace_IMPL +( + Device *pDevice, + OBJVASPACE **ppVAS +) +{ + NV_STATUS status = NV_OK; + + // + // There are some cases in SLI transitions where we allocate + // a device before the hal is initialized. + // + if (pDevice->pVASpace == NULL) + { + status = deviceInitClientShare(pDevice, + pDevice->hClientShare, + pDevice->vaSize, + pDevice->deviceAllocFlags, + pDevice->deviceInternalAllocFlags); + } + + *ppVAS = pDevice->pVASpace; + + return status; +} + diff --git a/src/nvidia/src/kernel/gpu/disp/arch/v02/kern_disp_0204.c b/src/nvidia/src/kernel/gpu/disp/arch/v02/kern_disp_0204.c new file mode 100644 index 0000000..5f7b1fc --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/arch/v02/kern_disp_0204.c @@ -0,0 +1,219 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/head/kernel_head.h" +#include "disp/v02_04/dev_disp.h" +#include "displayport/displayport.h" +#include "displayport/displayport2x.h" + +NV_STATUS +kdispComputeDpModeSettings_v02_04 +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 headIndex, // not being used. + DPMODESETDATA *pDpmodesetData, + DPIMPINFO *dpInfo +) +{ + // number of link clocks per line. + NvS32 vblank_symbols = 0; + NvS32 hblank_symbols = 0; + NvU64 PrecisionFactor, ratioF, watermarkF; + NvU32 linkFreqHz = 0; + NvU32 dscFactor = pDpmodesetData->bDscEnable ? DSC_BPP_FACTOR : 1; + + NvU32 numLanesPerLink = pDpmodesetData->laneCount; + + NvS32 BlankingBits, PixelSteeringBits , NumBlankingLinkClocks; + + NvU32 hActive = pDpmodesetData->SetRasterBlankStartX - + pDpmodesetData->SetRasterBlankEndX; + NvU32 minHBlank = 0; + NvU32 num_symbols_per_line = 0; + NvU32 watermarkAdjust = DP_CONFIG_WATERMARK_ADJUST; + NvU32 watermarkMinimum = DP_CONFIG_WATERMARK_LIMIT; + + dpInfo->bEnhancedFraming = pDpmodesetData->bEnhancedFraming; + + if (pDpmodesetData->bDscEnable) + { + if (pDpmodesetData->dp2LinkBw != 0) + { + linkFreqHz = (NvU32)((((NvU64)pDpmodesetData->dp2LinkBw) * DP_LINK_BW_FREQ_MULTI_10M_TO_MBPS * 97) / 100); + } + else if (pDpmodesetData->linkBw != 0) + { + linkFreqHz = (NvU32)((((NvU64)pDpmodesetData->linkBw) * DP_LINK_BW_FREQ_MULTI_MBPS * 97) / 100); + } + } + else + { + if (pDpmodesetData->dp2LinkBw != 0) + { + linkFreqHz = (NvU32)((((NvU64)pDpmodesetData->dp2LinkBw) * DP_LINK_BW_FREQ_MULTI_10M_TO_MBPS * 995) / 1000); + } + else if (pDpmodesetData->linkBw != 0) + { + linkFreqHz = (NvU32)((((NvU64)pDpmodesetData->linkBw) * DP_LINK_BW_FREQ_MULTI_MBPS * 995) / 1000); + } + } + + if (!linkFreqHz || !pDpmodesetData->laneCount|| !pDpmodesetData->PClkFreqHz) + { + NV_PRINTF(LEVEL_ERROR, + "One of linkFreq (%d Hz), pDpmodesetData->laneCount (%d Hz) or PClkFreq" + " (%lld Hz) came in as zero. Report issue to client (DD)\n", + linkFreqHz, pDpmodesetData->laneCount, + pDpmodesetData->PClkFreqHz); + NV_ASSERT(linkFreqHz && pDpmodesetData->laneCount && pDpmodesetData->PClkFreqHz); + } + + // Check if we have a valid laneCount as currently we support only upto 8-lanes + if (!IS_VALID_LANECOUNT(pDpmodesetData->laneCount)) + { + + DBG_BREAKPOINT(); + } + + dpInfo->tuSize = 64; + PrecisionFactor = 100000; + ratioF = + ((NvU64)pDpmodesetData->PClkFreqHz * pDpmodesetData->bpp * PrecisionFactor) + / dscFactor; + ratioF /= 8 * (NvU64) linkFreqHz * pDpmodesetData->laneCount; + + { + watermarkAdjust = DP_CONFIG_INCREASED_WATERMARK_ADJUST; + watermarkMinimum = DP_CONFIG_INCREASED_WATERMARK_LIMIT; + } + + watermarkF = ratioF * dpInfo->tuSize * (PrecisionFactor - ratioF) / PrecisionFactor; + dpInfo->waterMark = + (NvU32)(watermarkAdjust + + ((2 * (pDpmodesetData->bpp * PrecisionFactor / (8 * numLanesPerLink * dscFactor)) + + watermarkF) / PrecisionFactor)); + + num_symbols_per_line = (hActive * pDpmodesetData->bpp) / + (8 * pDpmodesetData->laneCount * dscFactor); + + if (dpInfo->waterMark > 39) + { + dpInfo->waterMark = 39; + DBG_BREAKPOINT(); + } + else if (dpInfo->waterMark < watermarkMinimum) + { + dpInfo->waterMark = watermarkMinimum; + } + else if (dpInfo->waterMark > num_symbols_per_line) + { + NV_PRINTF(LEVEL_ERROR, + "\n\t\twatermark greater than number of symbols in the line\n"); + DBG_BREAKPOINT(); + dpInfo->waterMark = num_symbols_per_line; + } + + // minHBlank calculation. + if (pDpmodesetData->bMultiStream) + { + minHBlank = + (((hActive % 4) > 0) ? + (((4-(hActive % 4)) * pDpmodesetData->bpp) / dscFactor) : 0 ) + + (160 + 6 * 4); + + minHBlank = minHBlank + (32 - minHBlank % 32); + + // bpp - 1 --> Rounding + minHBlank = (minHBlank + (pDpmodesetData->bpp/dscFactor) - 1) + / (pDpmodesetData->bpp/dscFactor); + } + else + { + BlankingBits = 3 * 8 * numLanesPerLink + + (pDpmodesetData->bEnhancedFraming ? 3*8*numLanesPerLink : 0); + + BlankingBits += 3*8*4; + + PixelSteeringBits = (hActive % numLanesPerLink) ? + ((numLanesPerLink - hActive % numLanesPerLink) * pDpmodesetData->bpp) / dscFactor : 0; + + BlankingBits += PixelSteeringBits; + NumBlankingLinkClocks = + (NvU32)(((NvU64)BlankingBits * PrecisionFactor) / (8 * numLanesPerLink)); + + minHBlank = + (NvU32)(NumBlankingLinkClocks * pDpmodesetData->PClkFreqHz / linkFreqHz / PrecisionFactor); + + minHBlank += 12; + + NV_ASSERT (hActive >= 60); + } + + // Min hblank should be hit + NV_ASSERT ((pDpmodesetData->SetRasterSizeWidth - hActive) >= minHBlank); + + hblank_symbols = + (NvS32)(((NvU64)(pDpmodesetData->SetRasterSizeWidth - hActive - minHBlank) + * linkFreqHz) / pDpmodesetData->PClkFreqHz); + + hblank_symbols -= 1; + hblank_symbols -= 3; + + hblank_symbols -= numLanesPerLink == 1 ? 9 : numLanesPerLink == 2 ? 6 : 3; + + dpInfo->hBlankSym = (hblank_symbols < 0) ? 0 : hblank_symbols; + + if (hActive < 40) + { + vblank_symbols = 0; + } + else + { + vblank_symbols = + (NvS32)(((NvU64)(hActive - 40) * linkFreqHz) / + pDpmodesetData->PClkFreqHz) - 1; + + vblank_symbols -= numLanesPerLink == 1 ? 39 : + numLanesPerLink == 2 ? 21 : 12; + } + + dpInfo->vBlankSym = (vblank_symbols < 0) ? 0 : vblank_symbols; + + NV_PRINTF(LEVEL_INFO, "Minimum HBlank required is %d\n", minHBlank); + + // Change this to _ERRORS for now but eventually we'll back to _INFO + NV_PRINTF(LEVEL_ERROR, + "\n\t\t[IN]: PixelClockHz:%lld PixelDepth:%d LinkBw:%d LaneCount:%d " + "TuSize: %d DSCEnable:%s\n" + "\t\t[OUT]:WaterMark:%d\n" + "\t\t[OUT]:VBlankSymbols:%d HBlankSymbols:%d\n", + pDpmodesetData->PClkFreqHz, pDpmodesetData->bpp, linkFreqHz, + pDpmodesetData->laneCount, dpInfo->tuSize, + (pDpmodesetData->bDscEnable ? "NV_TRUE" : "NV_FALSE"), + dpInfo->waterMark, + dpInfo->vBlankSym, dpInfo->hBlankSym); + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/disp/arch/v03/kern_disp_0300.c b/src/nvidia/src/kernel/gpu/disp/arch/v03/kern_disp_0300.c new file mode 100644 index 0000000..87473db --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/arch/v03/kern_disp_0300.c @@ -0,0 +1,681 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Kernel Display Module +* This file contains functions managing display on CPU RM +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "gpu/gpu.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/head/kernel_head.h" +#include "gpu/disp/disp_channel.h" +#include "gpu/disp/disp_objs.h" +#include "os/os.h" + +#include "disp/v03_00/dev_disp.h" + +#include "class/clc371.h" +#include "class/clc373.h" + +NV_STATUS +kdispGetChannelNum_v03_00 +( + KernelDisplay *pKernelDisplay, + DISPCHNCLASS channelClass, + NvU32 channelInstance, + NvU32 *pChannelNum +) +{ + NV_STATUS status = NV_ERR_INVALID_ARGUMENT; + + if (pChannelNum == NULL) + return NV_ERR_INVALID_ARGUMENT; + + const KernelDisplayStaticInfo *pStaticInfo = pKernelDisplay->pStaticInfo; + NV_ASSERT_OR_RETURN(pStaticInfo != NULL, NV_ERR_INVALID_STATE); + + switch (channelClass) + { + case dispChnClass_Curs: + if ((channelInstance < NV_PDISP_CHN_NUM_CURS__SIZE_1) && + (channelInstance < NV_PDISP_FE_HW_SYS_CAP_HEAD_EXISTS__SIZE_1)) + { + if (FLD_IDX_TEST_DRF(_PDISP, _FE_HW_SYS_CAP, _HEAD_EXISTS, channelInstance, _YES, pStaticInfo->feHwSysCap)) + { + *pChannelNum = NV_PDISP_CHN_NUM_CURS(channelInstance); + status = NV_OK; + } + else + { + status = NV_ERR_NOT_SUPPORTED; + } + } + break; + + case dispChnClass_Winim: + if (channelInstance < NV_PDISP_CHN_NUM_WINIM__SIZE_1) + { + if (pStaticInfo->windowPresentMask & NVBIT32(channelInstance)) + { + *pChannelNum = NV_PDISP_CHN_NUM_WINIM(channelInstance); + status = NV_OK; + } + else + { + status = NV_ERR_NOT_SUPPORTED; + } + } + break; + + case dispChnClass_Core: + *pChannelNum = NV_PDISP_CHN_NUM_CORE; + status = NV_OK; + break; + + case dispChnClass_Win: + if (channelInstance < NV_PDISP_CHN_NUM_WIN__SIZE_1) + { + if (pStaticInfo->windowPresentMask & NVBIT32(channelInstance)) + { + *pChannelNum = NV_PDISP_CHN_NUM_WIN(channelInstance); + status = NV_OK; + } + else + { + status = NV_ERR_NOT_SUPPORTED; + } + } + break; + + case dispChnClass_Any: + // Assert incase of physical RM, Any channel is kernel only channel. + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_INVALID_CHANNEL); + *pChannelNum = NV_PDISP_CHN_NUM_ANY; + status = NV_OK; + break; + + default: + NV_PRINTF(LEVEL_ERROR, "Unknown channel class %x\n", channelClass); + status = NV_ERR_INVALID_CHANNEL; + DBG_BREAKPOINT(); + break; + } + + return status; +} + +/*! + * @brief Get the register base address for display capabilities registers + * + * @param pGpu + * @param pKernelDisplay + * @param[out] pOffset NvU32 pointer to return base offset + * @param[out] pSize NvU32 pointer to return size + */ +void +kdispGetDisplayCapsBaseAndSize_v03_00 +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 *pOffset, + NvU32 *pSize +) +{ + if (pOffset) + { + // Tegra offsets needs to be subtracted with -0x610000. + *pOffset = DRF_BASE(NV_PDISP_FE_SW) + + kdispGetBaseOffset_HAL(pGpu, pKernelDisplay); + } + + if (pSize) + { + *pSize = sizeof(NvC373DispCapabilities_Map); + } +} + +/*! + * @brief Get the register base address for SF user space. + * + * @param pGpu + * @param pKernelDisplay + * @param[out] pOffset NvU32 pointer to return base offset + * @param[out] pSize NvU32 pointer to return size + */ +void +kdispGetDisplaySfUserBaseAndSize_v03_00 +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 *pOffset, + NvU32 *pSize +) +{ + if (pOffset) + { + // Tegra offsets needs to be subtracted with -0x610000. + *pOffset = DRF_BASE(NV_PDISP_SF_USER_0) + + kdispGetBaseOffset_HAL(pGpu, pKernelDisplay); + } + + if (pSize) + { + *pSize = sizeof(NvC371DispSfUserMap); + } +} + +/*! + * @brief Get the register base address and size of channel user area + * + * @param pGpu + * @param pKernelDisplay + * @param[in] channelClass Class of the channel + * @param[in] channelInstance Channel instance # + * @param[out] pOffset User space bease address + * @param[out] pSize User space length (optional) + * + * @return NV_STATUS + */ +NV_STATUS +kdispGetDisplayChannelUserBaseAndSize_v03_00 +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + DISPCHNCLASS channelClass, + NvU32 channelInstance, + NvU32 *pOffset, + NvU32 *pSize +) +{ + NvU32 dispChannelNum; + NV_STATUS status; + + if (pOffset == NULL) + return NV_ERR_INVALID_ARGUMENT; + + status = kdispGetChannelNum_HAL(pKernelDisplay, channelClass, channelInstance, &dispChannelNum); + if (status != NV_OK) + return status; + + NV_ASSERT(dispChannelNum < NV_UDISP_FE_CHN_ASSY_BASEADR__SIZE_1); + + *pOffset = NV_UDISP_FE_CHN_ASSY_BASEADR(dispChannelNum); + + // + // The user are size for Core Channel is 64KB (32K for Armed and 32k for Assembly), + // and all other channels are 4KB (2K for Armed and 2k for Assembly). + // + if (pSize != NULL) + { + switch (channelClass) + { + case dispChnClass_Curs: + *pSize = NV_UDISP_FE_CHN_ASSY_BASEADR_CURS(dispChannelNum + 1) - NV_UDISP_FE_CHN_ASSY_BASEADR_CURS(dispChannelNum); + break; + + case dispChnClass_Winim: + *pSize = NV_UDISP_FE_CHN_ASSY_BASEADR_WINIM(dispChannelNum + 1) - NV_UDISP_FE_CHN_ASSY_BASEADR_WINIM(dispChannelNum); + break; + + case dispChnClass_Core: + *pSize = (NV_UDISP_FE_CHN_ARMED_BASEADR_CORE - NV_UDISP_FE_CHN_ASSY_BASEADR_CORE) * 2; + break; + + case dispChnClass_Win: + *pSize = NV_UDISP_FE_CHN_ASSY_BASEADR_WIN(dispChannelNum + 1) - NV_UDISP_FE_CHN_ASSY_BASEADR_WIN(dispChannelNum); + break; + + default: + break; + } + } + + return NV_OK; +} + +/*! + * @brief Validate selected sw class. + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelDisplay KernelDisplay object pointer + * @param[in] swClass Selected class name + */ +NV_STATUS +kdispSelectClass_v03_00_KERNEL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 swClass +) +{ + if (!gpuIsClassSupported(pGpu, swClass)) + { + NV_PRINTF(LEVEL_ERROR, "class %x not supported\n", swClass); + return NV_ERR_INVALID_CLASS; + } + + return NV_OK; +} + +/*! + * @brief Read line count and frame count from RG_DPCA. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelDisplay KernelDisplay pointer + * @param[in] head head index + * @param[out] pLineCount line count + * @param[out] pFrameCount frame count + * + * @return NV_STATUS + */ +NV_STATUS +kdispReadRgLineCountAndFrameCount_v03_00_KERNEL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 head, + NvU32 *pLineCount, + NvU32 *pFrameCount +) +{ + NvU32 data32; + + if (head >= kdispGetNumHeads(pKernelDisplay)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + data32 = GPU_REG_RD32(pGpu,NV_PDISP_RG_DPCA(head)); + + *pLineCount = DRF_VAL(_PDISP, _RG_DPCA, _LINE_CNT, data32); + *pFrameCount = DRF_VAL(_PDISP, _RG_DPCA, _FRM_CNT, data32); + + return NV_OK; +} + +/*! + * @brief Get the LOADV counter + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelHead KernelHead object pointer + * + * @return the current LOADV counter + */ +NvU32 +kheadGetLoadVCounter_v03_00 +( + OBJGPU *pGpu, + KernelHead *pKernelHead +) +{ + return GPU_REG_RD32(pGpu, NV_PDISP_POSTCOMP_HEAD_LOADV_COUNTER(pKernelHead->PublicId)); +} + +NvU32 +kdispGetPBTargetAperture_v03_00 +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 memAddrSpace, + NvU32 cacheSnoop +) +{ + NvU32 pbTargetAperture = PHYS_NVM; + + if ((memAddrSpace == ADDR_SYSMEM) && (cacheSnoop != 0U)) + { + pbTargetAperture = PHYS_PCI_COHERENT; + } + else if (memAddrSpace == ADDR_SYSMEM) + { + pbTargetAperture = PHYS_PCI; + } + else + { + pbTargetAperture = PHYS_NVM; + } + + return pbTargetAperture; +} + +NvU32 +kheadReadPendingRgLineIntr_v03_00 +( + OBJGPU *pGpu, + KernelHead *pKernelHead, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 intr; + NvU32 headIntrMask = 0; + + intr = GPU_REG_RD32_EX(pGpu, NV_PDISP_FE_RM_INTR_DISPATCH, pThreadState); + + if (!FLD_IDX_TEST_DRF(_PDISP, _FE_RM_INTR_DISPATCH, _HEAD_TIMING, pKernelHead->PublicId, _PENDING, intr)) + { + return headIntrMask; + } + + intr = GPU_REG_RD32_EX(pGpu, NV_PDISP_FE_RM_INTR_STAT_HEAD_TIMING(pKernelHead->PublicId), pThreadState); + + if (FLD_TEST_DRF(_PDISP, _FE_EVT_STAT_HEAD_TIMING, _RG_LINE_A, _PENDING, intr)) + { + headIntrMask |= headIntr_RgLineA; + } + + if (FLD_TEST_DRF(_PDISP, _FE_EVT_STAT_HEAD_TIMING, _RG_LINE_B, _PENDING, intr)) + { + headIntrMask |= headIntr_RgLineB; + } + + return headIntrMask; +} + +void +kheadResetRgLineIntrMask_v03_00 +( + OBJGPU *pGpu, + KernelHead *pKernelHead, + NvU32 headIntrMask, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 writeIntr = 0; + + if (headIntrMask & headIntr_RgLineA) + { + writeIntr |= DRF_DEF(_PDISP, _FE_EVT_STAT_HEAD_TIMING, _RG_LINE_A, _RESET); + } + + if (headIntrMask & headIntr_RgLineB) + { + writeIntr |= DRF_DEF(_PDISP, _FE_EVT_STAT_HEAD_TIMING, _RG_LINE_B, _RESET); + } + + GPU_REG_WR32_EX(pGpu, NV_PDISP_FE_EVT_STAT_HEAD_TIMING(pKernelHead->PublicId), writeIntr, pThreadState); +} + +NvBool +kheadReadPendingVblank_v03_00 +( + OBJGPU *pGpu, + KernelHead *pKernelHead, + NvU32 *pCachedIntr, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 intr = pCachedIntr ? *pCachedIntr : GPU_REG_RD32_EX(pGpu, NV_PDISP_FE_RM_INTR_DISPATCH, pThreadState); + + if (!FLD_IDX_TEST_DRF(_PDISP, _FE_RM_INTR_DISPATCH, _HEAD_TIMING, pKernelHead->PublicId, _PENDING, intr)) + { + return NV_FALSE; + } + + intr = GPU_REG_RD32_EX(pGpu, NV_PDISP_FE_RM_INTR_STAT_HEAD_TIMING(pKernelHead->PublicId), pThreadState); + + if (FLD_TEST_DRF(_PDISP, _FE_EVT_STAT_HEAD_TIMING, _LAST_DATA, _PENDING, intr)) + { + return NV_TRUE; + } + + return NV_FALSE; +} + +void kheadResetPendingLastData_v03_00 +( + OBJGPU *pGpu, + KernelHead *pKernelHead, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 writeIntr = GPU_REG_RD32_EX(pGpu, NV_PDISP_FE_EVT_STAT_HEAD_TIMING(pKernelHead->PublicId), pThreadState); + + writeIntr = DRF_DEF(_PDISP, _FE_EVT_STAT_HEAD_TIMING, _LAST_DATA, _RESET); + + GPU_REG_WR32_EX(pGpu, NV_PDISP_FE_EVT_STAT_HEAD_TIMING(pKernelHead->PublicId), writeIntr, pThreadState); +} + +NV_STATUS +kdispReadAwakenChannelNumMask_v03_00 +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 *pAwakenChannelNumMask, + DISPCHNCLASS channelClass, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 channelNum = 0; + NvU32 channelNumMask = 0; + NvU32 intr = 0; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(pAwakenChannelNumMask, NV_ERR_INVALID_ARGUMENT); + + switch (channelClass) + { + case dispChnClass_Win: + intr = GPU_REG_RD32_EX(pGpu, NV_PDISP_FE_EVT_STAT_AWAKEN_WIN, pThreadState); + + for (channelNum = 0; + channelNum < NV_PDISP_FE_EVT_STAT_AWAKEN_WIN_CH__SIZE_1; + ++channelNum) + { + if (FLD_IDX_TEST_DRF(_PDISP, _FE_EVT_STAT_AWAKEN_WIN, + _CH, channelNum, _PENDING, intr)) + { + channelNumMask |= NVBIT(channelNum); + } + } + break; + + case dispChnClass_Core: + intr = GPU_REG_RD32_EX(pGpu, NV_PDISP_FE_EVT_STAT_AWAKEN_OTHER, pThreadState); + + if (FLD_TEST_DRF(_PDISP, _FE_EVT_STAT_AWAKEN_OTHER, _CORE, _PENDING, intr)) + { + channelNumMask |= NVBIT(NV_PDISP_CHN_NUM_CORE); + } + break; + + default: + NV_PRINTF(LEVEL_ERROR, "invalid channel class passed\n"); + DBG_BREAKPOINT(); + channelNumMask = 0; + status = NV_ERR_INVALID_ARGUMENT; + break; + } + + *pAwakenChannelNumMask = channelNumMask; + return status; +} + +static void +_kdispResetAwakenChannelNumMask +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 awakenChannelNumMask, + DISPCHNCLASS channelClass, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 channelNum = 0; + NvU32 writeIntr = 0; + + switch (channelClass) + { + case dispChnClass_Win: + for (channelNum = 0; awakenChannelNumMask != 0; channelNum++) + { + if (awakenChannelNumMask & NVBIT(channelNum)) + { + writeIntr |= DRF_IDX_DEF(_PDISP, _FE_EVT_STAT_AWAKEN_WIN, + _CH, channelNum, _RESET); + awakenChannelNumMask &= ~NVBIT(channelNum); + } + } + if (writeIntr != 0) + { + GPU_REG_WR32_EX(pGpu, NV_PDISP_FE_EVT_STAT_AWAKEN_WIN, writeIntr, pThreadState); + } + break; + + case dispChnClass_Core: + if (awakenChannelNumMask & NVBIT(NV_PDISP_CHN_NUM_CORE)) + { + writeIntr = DRF_DEF(_PDISP, _FE_EVT_STAT_AWAKEN_OTHER, _CORE, _RESET); + GPU_REG_WR32_EX(pGpu, NV_PDISP_FE_EVT_STAT_AWAKEN_OTHER, writeIntr, pThreadState); + } + break; + + default: + NV_PRINTF(LEVEL_ERROR, "invalid channel class passed!\n"); + DBG_BREAKPOINT(); + break; + } +} + +static void +_kdispHandleAwakenChnMask +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 channelNumMask, + DISPCHNCLASS channelClass, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 i = 0; + NvBool bEventFound = NV_FALSE; + NV_STATUS status = NV_OK; + NvU32 channelNum = 0; + RsClient *pClient; + NvHandle hChannel; + NvBool bInUse; + + _kdispResetAwakenChannelNumMask(pGpu, pKernelDisplay, channelNumMask, channelClass, pThreadState); + + // Handle Awaken notifiers in channel order + for (i = 0; channelNumMask != 0; i++) + { + if (!(NVBIT(i) & channelNumMask)) + { + continue; + } + + bEventFound = NV_FALSE; + + status = kdispGetChannelNum_HAL(pKernelDisplay, channelClass, i, &channelNum); + + if (status != NV_OK) + { + return; + } + + NV_ASSERT_OR_RETURN_VOID(pKernelDisplay->pClientChannelTable != NULL); + bInUse = pKernelDisplay->pClientChannelTable[channelNum].bInUse; + pClient = pKernelDisplay->pClientChannelTable[channelNum].pClient; + hChannel = pKernelDisplay->pClientChannelTable[channelNum].hChannel; + + // OK, we've got this interrupt, lets see if we have a SW channel ready for it. + if (bInUse) + { + DispChannel *pChannel; + + if (dispchnGetByHandle(pClient, hChannel, &pChannel) == NV_OK) + { + PEVENTNOTIFICATION pEventNotifications = inotifyGetNotificationList(staticCast(pChannel, INotifier)); + bEventFound = NV_TRUE; + + // + // Assuming that caller must always use 0 as the index. + // And since the method that will always cause this is UPDATE, there's no need + // to send any real method data here anyway. + // + notifyEvents(pGpu, pEventNotifications, 0, 0, 0, NV_OK, NV_OS_WRITE_THEN_AWAKEN); + } + } + + if (bEventFound == NV_FALSE) + { + NV_PRINTF(LEVEL_ERROR, + "seeing an awaken in channel %d without an associated awaken event\n", + channelNum); + DBG_BREAKPOINT(); + } + channelNumMask &= ~NVBIT(i); + } +} + +NvU32 +kdispServiceAwakenIntr_v03_00 +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 awakenWinChannelNumMask = 0, awakenCoreChannelNumMask = 0; + NvU32 intrCtrlDisp; + NvBool bAwakenIntrPending = NV_FALSE; + + if (pKernelDisplay->getProperty(pKernelDisplay, PDB_PROP_KDISP_IN_AWAKEN_INTR)) + return awakenWinChannelNumMask; + + pKernelDisplay->setProperty(pKernelDisplay, PDB_PROP_KDISP_IN_AWAKEN_INTR, NV_TRUE); + + intrCtrlDisp = GPU_REG_RD32_EX(pGpu, DISP_INTR_REG(STAT_CTRL_DISP), pThreadState); + + // Handle awaken interrupts + if (FLD_TEST_DRF(_PDISP, _FE_RM_INTR_STAT_CTRL_DISP, _AWAKEN, _PENDING, intrCtrlDisp)) + { + // Handle Window Awaken Inetrrupts + kdispReadAwakenChannelNumMask_HAL(pGpu, pKernelDisplay, &awakenWinChannelNumMask, dispChnClass_Win, pThreadState); + if (awakenWinChannelNumMask != 0) + { + bAwakenIntrPending = NV_TRUE; + _kdispHandleAwakenChnMask(pGpu, pKernelDisplay, awakenWinChannelNumMask, dispChnClass_Win, pThreadState); + } + + // Handle Core Channel Awaken Inetrrupts + kdispReadAwakenChannelNumMask_HAL(pGpu, pKernelDisplay, &awakenCoreChannelNumMask, dispChnClass_Core, pThreadState); + if (awakenCoreChannelNumMask != 0) + { + bAwakenIntrPending = NV_TRUE; + _kdispHandleAwakenChnMask(pGpu, pKernelDisplay, awakenCoreChannelNumMask, dispChnClass_Core, pThreadState); + } + + // HW reported AWAKEN interrupt, we should not end up with no channel having AWAKEN pending + NV_ASSERT(bAwakenIntrPending); + } + + pKernelDisplay->setProperty(pKernelDisplay, PDB_PROP_KDISP_IN_AWAKEN_INTR, NV_FALSE); + + return awakenWinChannelNumMask; +} diff --git a/src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0401.c b/src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0401.c new file mode 100644 index 0000000..abc9f33 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0401.c @@ -0,0 +1,91 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Kernel Display Module +* This file contains functions managing display on CPU RM +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "os/os.h" +#include "gpu/gpu.h" +#include "gpu/disp/kern_disp.h" +#include "disp/v04_01/dev_disp.h" +#include "platform/sli/sli.h" +#include "class/clc370.h" + +NvBool +kdispReadPendingWinSemIntr_v04_01 +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + THREAD_STATE_NODE *pThreadState, + NvU32 *pCachedIntr +) +{ + NvU32 intr = pCachedIntr ? *pCachedIntr : + GPU_REG_RD32_EX(pGpu, NV_PDISP_FE_RM_INTR_STAT_CTRL_DISP, pThreadState); + return FLD_TEST_DRF(_PDISP, _FE_RM_INTR_STAT_CTRL_DISP, _WIN_SEM, _PENDING, intr); +} + +void +kdispHandleWinSemEvt_v04_01 +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 data32 = 0; + NvU32 winSemEvtMask = 0; + NvU32 win; + + if (!kdispReadPendingWinSemIntr_HAL(pGpu, pKernelDisplay, pThreadState, NULL)) + return; + + data32 = GPU_REG_RD32_EX(pGpu, NV_PDISP_FE_EVT_DISPATCH, pThreadState); + if (FLD_TEST_DRF(_PDISP, _FE_EVT_DISPATCH, _SEM_WIN, _NOT_PENDING, data32)) + return; + + data32 = GPU_REG_RD32_EX(pGpu, NV_PDISP_FE_EVT_STAT_SEM_WIN, pThreadState); + + for (win = 0; win < NV_PDISP_FE_EVT_STAT_SEM_WIN_CH__SIZE_1; win++) + { + if (FLD_IDX_TEST_DRF(_PDISP, _FE_EVT_STAT_SEM_WIN, _CH, win, _PENDING, data32)) + { + winSemEvtMask |= NVBIT(win); + } + } + + //Clear Event + osDispService(NV_PDISP_FE_EVT_STAT_SEM_WIN, winSemEvtMask); + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + GPU_REG_WR32_EX(pGpu, NV_PDISP_FE_EVT_STAT_SEM_WIN, winSemEvtMask, pThreadState); + SLI_LOOP_END; + + //TODO: Notify DD about the event, + kdispNotifyEvent(pGpu, pKernelDisplay, NVC370_NOTIFIERS_WIN_SEM_NOTIFICATION, &winSemEvtMask, sizeof(winSemEvtMask), 0, 0); +} diff --git a/src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0402.c b/src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0402.c new file mode 100644 index 0000000..bfcfee6 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0402.c @@ -0,0 +1,148 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Kernel Display Module +* This file contains functions managing display on CPU RM +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "os/os.h" +#include "gpu/gpu.h" +#include "gpu/disp/kern_disp.h" +#include "disp/v04_02/dev_disp.h" + +/*! + * @brief Return base offset for NV_PDISP that needs to be adjusted + * for register accesses. + */ +NvS32 kdispGetBaseOffset_v04_02 +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay +) +{ + return (0x0 - DRF_BASE(NV_PDISP)); +} + +/*! + * @brief Tracks display bandwidth requests and forwards highest request to ICC + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelDisplay KernelDisplay pointer + * @param[in] iccBwClient Identifies requester + * (DISPLAY_ICC_BW_CLIENT_xxx value) + * @param[in] minRequiredIsoBandwidthKBPS ISO BW requested (KB/sec) + * @param[in] minRequiredFloorBandwidthKBPS dramclk freq * pipe width (KB/sec) + * + * @returns NV_OK if successful, + * NV_ERR_INSUFFICIENT_RESOURCES if one of the bandwidth values is too + * high, and bandwidth cannot be allocated, + * NV_ERR_INVALID_PARAMETER if iccBwClient is not a valid value, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, or + * NV_ERR_GENERIC if some other kind of error occurred. + */ +NV_STATUS +kdispArbAndAllocDisplayBandwidth_v04_02 +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + enum DISPLAY_ICC_BW_CLIENT iccBwClient, + NvU32 minRequiredIsoBandwidthKBPS, + NvU32 minRequiredFloorBandwidthKBPS +) +{ +typedef struct +{ + NvU32 minRequiredIsoBandwidthKBPS; + NvU32 minRequiredFloorBandwidthKBPS; +} ICC_BW_VALUES; + + static ICC_BW_VALUES clientBwValues[NUM_DISPLAY_ICC_BW_CLIENTS] = {0}; + static ICC_BW_VALUES oldArbBwValues = {0}; + ICC_BW_VALUES newArbBwValues; + NV_STATUS status = NV_OK; + + NV_PRINTF(LEVEL_INFO, "%s requests ISO BW = %u KBPS, floor BW = %u KBPS\n", + (iccBwClient == DISPLAY_ICC_BW_CLIENT_RM) ? "RM" : + (iccBwClient == DISPLAY_ICC_BW_CLIENT_EXT) ? "Ext client" : + "Unknown client", + minRequiredIsoBandwidthKBPS, + minRequiredFloorBandwidthKBPS); + if (iccBwClient >= NUM_DISPLAY_ICC_BW_CLIENTS) + { + NV_PRINTF(LEVEL_ERROR, "Bad iccBwClient value (%u)\n", iccBwClient); + NV_ASSERT(NV_FALSE); + return NV_ERR_INVALID_PARAMETER; + } + if (iccBwClient == DISPLAY_ICC_BW_CLIENT_RM) + { + // + // DD should have allocated the required ISO BW prior to the modeset. + // It is not safe for RM to do the allocation because the allocation + // may fail, but the modeset has already started and cannot be aborted. + // (The only reason RM needs to put its ISO BW request in at all is to + // make sure the required BW is maintained until all of the RM work at + // the end of the modeset is done.) + // + NV_ASSERT(minRequiredIsoBandwidthKBPS <= + clientBwValues[DISPLAY_ICC_BW_CLIENT_EXT].minRequiredIsoBandwidthKBPS); + } + clientBwValues[iccBwClient].minRequiredIsoBandwidthKBPS = minRequiredIsoBandwidthKBPS; + clientBwValues[iccBwClient].minRequiredFloorBandwidthKBPS = minRequiredFloorBandwidthKBPS; + // + // Make sure there are only two BW clients; otherwise, we would need a loop + // to process the array elements. + // + ct_assert(NUM_DISPLAY_ICC_BW_CLIENTS <= 2); + newArbBwValues.minRequiredIsoBandwidthKBPS = + NV_MAX(clientBwValues[DISPLAY_ICC_BW_CLIENT_RM].minRequiredIsoBandwidthKBPS, + clientBwValues[DISPLAY_ICC_BW_CLIENT_EXT].minRequiredIsoBandwidthKBPS); + newArbBwValues.minRequiredFloorBandwidthKBPS = + NV_MAX(clientBwValues[DISPLAY_ICC_BW_CLIENT_RM].minRequiredFloorBandwidthKBPS, + clientBwValues[DISPLAY_ICC_BW_CLIENT_EXT].minRequiredFloorBandwidthKBPS); + if ((oldArbBwValues.minRequiredIsoBandwidthKBPS != + newArbBwValues.minRequiredIsoBandwidthKBPS) || + (oldArbBwValues.minRequiredFloorBandwidthKBPS != + newArbBwValues.minRequiredFloorBandwidthKBPS)) + { + NV_PRINTF(LEVEL_INFO, "Sending request to icc_set_bw: ISO BW = %u KBPS, floor BW = %u KBPS\n", + newArbBwValues.minRequiredIsoBandwidthKBPS, + newArbBwValues.minRequiredFloorBandwidthKBPS); + status = + osTegraAllocateDisplayBandwidth(pGpu->pOsGpuInfo, + newArbBwValues.minRequiredIsoBandwidthKBPS, + newArbBwValues.minRequiredFloorBandwidthKBPS); + NV_PRINTF(LEVEL_INFO, "Allocation request returns: %s (0x%08X)\n", + nvstatusToString(status), status); + if (status == NV_OK) + { + oldArbBwValues = newArbBwValues; + } + } + return status; +} + diff --git a/src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0404.c b/src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0404.c new file mode 100644 index 0000000..320320f --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0404.c @@ -0,0 +1,55 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Kernel Display Module +* This file contains functions managing display on CPU RM +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "gpu/disp/vblank_callback/vblank.h" +#include "gpu/disp/kern_disp.h" + +void +kheadVsyncNotificationOverRgVblankIntr_v04_04 +( + OBJGPU *pGpu, + KernelHead *pKernelHead +) +{ + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + VBLANKCALLBACK *pCallback = (VBLANKCALLBACK *)pKernelDisplay->pRgVblankCb; + if (pKernelDisplay->bIsPanelReplayEnabled) + { + if (pCallback != NULL && pCallback->Proc != NULL) + { + pCallback->Proc(pGpu, pCallback->pObject, pCallback->Param1, + pCallback->Param2,pCallback->Status); + } + } + +} + diff --git a/src/nvidia/src/kernel/gpu/disp/arch/v05/kern_disp_0501.c b/src/nvidia/src/kernel/gpu/disp/arch/v05/kern_disp_0501.c new file mode 100644 index 0000000..dc3c106 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/arch/v05/kern_disp_0501.c @@ -0,0 +1,711 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Kernel Display Module +* This file contains functions managing display on CPU RM +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "gpu/gpu.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/head/kernel_head.h" +#include "disp/v05_01/dev_disp.h" +#include "displayport/displayport.h" +#include "displayport/displayport2x.h" + + +// Local help functions and strctures/macros for NVD5 DP IMP. + +// All bpp / effective bpp are multiplied by this scaler to avoid floating operation. +#define BPPX256_SCALER 256U +#define BPPX16_SCALER 16U +#define BPPX32_SCALER 32U +#define LOGICAL_LANES 4U + +// +// DP1: 8b/10b, we have 1 byte per logic lane. +// DP2: 128b/132b, we have only 4 bytes per lane. +// +#define DP1_BYTE_PER_LOGIC_LANE 1U +#define DP2_BYTES_PER_LOGIC_LANE 4U + +// +// DP1: 8b/10b, 1 symbol is 8 bits. +// DP2: 128b/132b, 1 symbol is 32 bits. +// +#define DP1_SYMBOL_SIZE 8U +#define DP2_SYMBOL_SIZE 32U + +// DP1: 8b/10b, symbol size after channel coding. +#define DP1_CODING_SIZE 10U + +#define GET_SYMBOL_SIZE(bIsDp2xChannelCoding) ((bIsDp2xChannelCoding) ? DP2_SYMBOL_SIZE : DP1_SYMBOL_SIZE) +#define BYTES_PER_LOGIC_LANE(bIsDp2xChannelCoding) ((bIsDp2xChannelCoding) ? DP2_BYTES_PER_LOGIC_LANE : DP1_BYTE_PER_LOGIC_LANE) + +#define FEC_PARITY_SYM(lanes, x) ((lanes) == 1 ? (NV_MIN(((x) - 1) % 512, 12) + ((x) - 1) / 512 * 12 + 1) : \ + (NV_MIN(((x) - 1) % 256, 6) + ((x) - 1) / 256 * 6 + 1)) +#define HBLANK_SYMBOLS_SCALE 64U + +typedef enum +{ + RG_PACKET_MODE_SINGLE = 1, + RG_PACKET_MODE_DOUBLE = 2, + RG_PACKET_MODE_QUAD = 4 +} RG_PACKET_MODE; + +#define DEFAULT_RG_PACKET_MODE 4U +#define DEFAULT_PCLK_FACTOR_VALUE 8U + +typedef enum +{ + TWO_CHANNELS = 2, + EIGHT_CHANNELS = 8 +} DP_AUDIO_CHANNELS; + +static NvU32 _calcEffectiveBppxScalerNonDsc +( + NvU32 hActive, + NvU32 sourceBpp, + NvBool bIsDp2xChannelCoding, + NvBool bMultiStream, + NvU32 laneCount +) +{ + NvU32 totalSymbols = 0U; + NvU32 effectiveBppxScaler = 0U; + NvU32 totalSymbolsPerLane = 0U; + NvU32 logicalLanes = LOGICAL_LANES; + + NvU32 symbolSize = GET_SYMBOL_SIZE(bIsDp2xChannelCoding); + + // + // Logic lane count values for : + // 1. 128b/132b : 4 + // 2. 8b/10 : + // a. MST : 4 + // B. SST : 1, 2 or 4 based on lane Count + // + if (!bMultiStream && !bIsDp2xChannelCoding) + { + logicalLanes = laneCount; + } + + NvU32 bitsPerLane = (NvU32)NV_CEIL(hActive, logicalLanes) * sourceBpp; + totalSymbolsPerLane = (NvU32)NV_CEIL(bitsPerLane, symbolSize); + totalSymbols = totalSymbolsPerLane * logicalLanes; + effectiveBppxScaler = (NvU32)NV_CEIL((totalSymbols * symbolSize * BPPX256_SCALER), hActive); + + return effectiveBppxScaler; +} + +static NvU32 _calcDpMinHBlankMST +( + NvU32 bppXScaler, + NvBool bIsDp2xChannelCoding +) +{ + NvU32 symbolCounts = bIsDp2xChannelCoding ? 3U : 5U; + NvU32 symbolSize = GET_SYMBOL_SIZE(bIsDp2xChannelCoding); + return NV_CEIL(symbolCounts * symbolSize * LOGICAL_LANES * BPPX256_SCALER, bppXScaler); +} + +static NvU32 _getSdpSymbolsMST +( + NvU32 sdpPayloadSize, + NvBool bIsDp2xChannelCoding +) +{ + if (sdpPayloadSize <= 0) + return 0; + if (bIsDp2xChannelCoding) + return (4 + ((sdpPayloadSize / 16) * 5)); + else + return (2 + (NV_CEIL((sdpPayloadSize / 16 * 5) + 2, 4))); +} + +static NvU32 _getDpAudioSymbolMST +( + DP_AUDIO_CHANNELS channels, + NvU32 samples_x1000, + NvBool bIsDp2xChannelCoding +) +{ + // + // Two channel : samples per packets = 2. + // Eight Channel : samples per packets = 1 + // + NvU32 packets_x1000 = samples_x1000 / ((channels == 2) ? 2 : 1); + NvU32 packetPerLineOff = packets_x1000 % 1000; + NvU32 packetPerLineBase = packets_x1000 / 1000; + + NvU32 sdpPayloadSize = (channels == 2) ? 16U : 32U; + NvU32 symbolPerLineLo = _getSdpSymbolsMST(sdpPayloadSize, bIsDp2xChannelCoding) * packetPerLineBase; + NvU32 symbolPerLineHi = _getSdpSymbolsMST(sdpPayloadSize, bIsDp2xChannelCoding) * (packetPerLineBase + 1); + + NvU32 symbolPerLine_x1000 = symbolPerLineLo * (1000 - packetPerLineOff) + + symbolPerLineHi * packetPerLineOff; + + return NV_CEIL(symbolPerLine_x1000, 1000); +} + +// Returns Audio_Symbols_per_Line_per_Lane. +static NvU32 _getDpAudioSymbolSST +( + DP_AUDIO_CHANNELS channels, + NvU32 samplesPerLine_x1000, + NvU32 laneCount, + NvU32 surfaceWidth +) +{ + NvU32 cyclesPerPacket = 0U; + NvU32 cyclesPerPacketInc = 0U; + NvU32 cyclesOverHead = (laneCount == 1) ? 12 : 2; + + NvU32 packetsPerLine = 0U; + + switch (channels) + { + case TWO_CHANNELS: + { + switch (laneCount) + { + case 1: + cyclesPerPacket = 30; + cyclesPerPacketInc = 20; + break; + case 2: + cyclesPerPacket = 16; + cyclesPerPacketInc = 10; + break; + case 4: + cyclesPerPacket = 9; + cyclesPerPacketInc = 5; + break; + } + } + case EIGHT_CHANNELS: + { + switch (laneCount) + { + case 1: + cyclesPerPacket = 50; + cyclesPerPacketInc = 40; + break; + case 2: + cyclesPerPacket = 26; + cyclesPerPacketInc = 20; + break; + case 4: + cyclesPerPacket = 14; + cyclesPerPacketInc = 10; + break; + } + } + } + + // + // Two channel : samples per packets = 2. + // Eight Channel : samples per packets = 1 + // + packetsPerLine = NV_CEIL(samplesPerLine_x1000, + 1000 * ((channels == TWO_CHANNELS) ? 2 : 1)); + + if (packetsPerLine == 0) + return 0; + + return (cyclesPerPacket + (packetsPerLine - 1) * cyclesPerPacketInc + cyclesOverHead); +} + +static NV_STATUS _calcPClkFactorAndRgPacketMode +( + OBJGPU *pGpu, + DPMODESETDATA *pDpModesetData, + NvU32 headIndex, + NvU32 *PClkFactor, + NvU32 *rgPacketMode +) +{ + + NvU64 rawPixelFreq = pDpModesetData->PClkFreqHz; + NvU32 hTotal = pDpModesetData->SetRasterSizeWidth; + + NvU32 pClkCap = GPU_REG_RD32(pGpu, NV_PDISP_FE_SW_HEAD_CLK_CAP(headIndex)); + NvU32 pClkMax = DRF_VAL(_PDISP, _FE_SW_HEAD_CLK_CAP, _PCLK_MAX, pClkCap); + NvU32 pClkMin = DRF_VAL(_PDISP, _FE_SW_HEAD_CLK_CAP, _PCLK_MIN, pClkCap); + + NV_ASSERT_OR_RETURN((PClkFactor != NULL && rgPacketMode != NULL), NV_ERR_INVALID_ARGUMENT); + + // Init the value to 1; + *PClkFactor = 1; + *rgPacketMode = RG_PACKET_MODE_SINGLE; + if (pDpModesetData->colorFormat == dpColorFormat_YCbCr420) + *PClkFactor = 2; + + if ((((hTotal / *PClkFactor) % 4) == 0) && + ((rawPixelFreq / (*PClkFactor * 4)) > (pClkMin * 1000000))) + { + *PClkFactor <<= 2; + *rgPacketMode = RG_PACKET_MODE_QUAD; + } + else if ((((hTotal / *PClkFactor) % 2) == 0) && + ((rawPixelFreq / (*PClkFactor * 2)) > (pClkMin * 1000000))) + { + *PClkFactor <<= 1; + *rgPacketMode = RG_PACKET_MODE_DOUBLE; + } + + if (((rawPixelFreq / *PClkFactor) > (pClkMax * 10000000)) && + (pDpModesetData->bDscEnable && pDpModesetData->bpp <= 16)) + { + *PClkFactor <<= 1; + } + + return NV_OK; +} + +static NvU32 _calcWatermark8b10bSST +( + NvU32 pClkHz, + NvU64 linkRateHz, + NvU32 laneCount, + NvBool bFecEnabled, + NvU32 scaledBpp, + NvBool bIsDscEnabled +) +{ + NvU32 linkRateMHz = linkRateHz / 1000000; + NvU32 pClkKhz = pClkHz / 1000; + + NvS32 ratio_x1000 = 1000; + NvU32 w0 = (8 / laneCount); + NvS32 s = 0U; + NvS32 watermark = 0U; + NvU32 bppScaler = 1U; + + if (bFecEnabled) + { + s = (laneCount == 1) ? 18 : 13; + } + else + { + s = 3 - w0; + } + + if (bIsDscEnabled) + { + bppScaler = BPPX16_SCALER; + } + + ratio_x1000 = ((NvS64) pClkKhz * scaledBpp * (bFecEnabled ? 1024 : 1000)) / + (linkRateMHz * laneCount * (bppScaler * 1000 * DP1_SYMBOL_SIZE / DP1_CODING_SIZE)); + + // 15625 = 1000 * 1000 / 64 + watermark = NV_CEIL(3 * 15625 * scaledBpp, 8 * bppScaler * laneCount); + watermark += (ratio_x1000 * (1000 - ratio_x1000)); + watermark = watermark * 1000 + ratio_x1000 * s * 15625; + watermark = NV_CEIL(watermark, 15625 * 1000); + watermark += (w0 + 3); + + return (NvU32) watermark; +} + +static NvU64 _convertLinkRateToDataRate +( + NvU32 linkRate10M, + NvBool bIs128b132bChannelCoding, + NvBool bEnableFEC +) +{ + NvU64 dataRateHz = 0U; + NvU64 minRate = DP_LINK_RATE_10M_TO_BPS(((NvU64)linkRate10M)); + + if (bIs128b132bChannelCoding) + { + // 128b/132b Total Data BW efficiency considers FEC overhead + dataRateHz = DP_LINK_RATE_BITSPS_TO_BYTESPS(DATA_BW_EFF_128B_132B(minRate)); + } + else + { + if (bEnableFEC) + { + minRate = minRate - 3 * minRate / 100; + } + else + { + minRate = minRate - 6 * minRate / 1000; + } + dataRateHz = DP_LINK_RATE_BITSPS_TO_BYTESPS(OVERHEAD_8B_10B(minRate)); + } + return dataRateHz; +} + +/*! + * @brief Get the LOADV counter + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelHead KernelHead object pointer + * + * @return the current LOADV counter + */ +NvU32 +kheadGetLoadVCounter_v05_01 +( + OBJGPU *pGpu, + KernelHead *pKernelHead +) +{ + return GPU_REG_RD32(pGpu, NV_PDISP_RG_IN_LOADV_COUNTER(pKernelHead->PublicId)); +} + +NvU32 +kdispGetPBTargetAperture_v05_01 +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 memAddrSpace, + NvU32 cacheSnoop +) +{ + NvU32 pbTargetAperture = PHYS_NVM; + + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_DISP_PB_REQUIRES_SMMU_BYPASS)) + { + pbTargetAperture = IOVA; + } + else + { + pbTargetAperture = PHYS_NVM; + } + + return pbTargetAperture; +} + +/*! + * @brief Reads the non-notification kernel interrupt vector # from the interrupt control register + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pDisp OBJDISP pointer + * + */ +void kdispIntrRetrigger_v05_01 +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 index, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 val = 0; + val = FLD_SET_DRF(_PDISP_FE, _INTR_RETRIGGER, _TRIGGER, _TURE, val); + GPU_REG_WR32_EX(pGpu, NV_PDISP_FE_INTR_RETRIGGER(index), val, pThreadState); +} + + +NV_STATUS +kdispComputeDpModeSettings_v05_01 +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 headIndex, + DPMODESETDATA *pDpModesetData, + DPIMPINFO *dpInfo +) +{ + NvU32 hActive = pDpModesetData->SetRasterBlankStartX - + pDpModesetData->SetRasterBlankEndX; + + NvU32 hBlank = pDpModesetData->SetRasterSizeWidth - hActive; + + NvU32 effectiveBppxScaler = 0U; + NvU32 symbolSize = GET_SYMBOL_SIZE(pDpModesetData->bDP2xChannelCoding); + NvU64 linkFreqHz = pDpModesetData->dp2LinkBw ? + DP_LINK_RATE_10M_TO_BPS((NvU64)pDpModesetData->dp2LinkBw) : + DP_LINK_RATE_270M_TO_BPS((NvU64)pDpModesetData->linkBw); + + NvU32 sliceCount = 0U; + NvU32 chunkSize = 0U; + NvU32 sliceWidth = 0U; + + NvU32 rgPacketMode = 0U; + NvU32 pclkFactor = 0U; + + // depth is multiplied by 16 in case of DSC enable + NvU32 dscFactor = pDpModesetData->bDscEnable ? 16U : 1U; + NvU64 linkDataRate = 0U; + NvS32 hBlankSym = 0U; + NvS32 vBlankSym = 0U; + NvU32 msaSym = 0U; + + dpInfo->bEnhancedFraming = pDpModesetData->bEnhancedFraming; + + if (!linkFreqHz || !pDpModesetData->laneCount|| !pDpModesetData->PClkFreqHz) + { + NV_ASSERT(linkFreqHz && pDpModesetData->laneCount && pDpModesetData->PClkFreqHz); + } + + // Check if we have a valid laneCount as currently we support only upto 4-lanes + if (!IS_VALID_LANECOUNT(pDpModesetData->laneCount)) + { + DBG_BREAKPOINT(); + } + + dpInfo->tuSize = 64; + + // Init dpInfo here + dpInfo->vBlankSym = 0U; + dpInfo->twoChannelAudioSymbols = 0U; + dpInfo->eightChannelAudioSymbols = 0U; + + if (pDpModesetData->bDscEnable) + { + // Default to 4 if client does not speicify the slice count. + sliceCount = (pDpModesetData->sliceCount ? pDpModesetData->sliceCount : 4U); + sliceWidth = (NvU32)NV_CEIL(pDpModesetData->SetRasterSizeWidth, sliceCount); + chunkSize = (NvU32)NV_CEIL(pDpModesetData->bpp * sliceWidth, 8U * dscFactor); + } + linkDataRate = _convertLinkRateToDataRate(pDpModesetData->dp2LinkBw, + pDpModesetData->bDP2xChannelCoding, + pDpModesetData->bFecEnable); + dpInfo->linkTotalDataRate = 8 * linkDataRate * pDpModesetData->laneCount; + + // SST + if (!(pDpModesetData->bMultiStream)) + { + if (pDpModesetData->bDscEnable) + { + effectiveBppxScaler = pDpModesetData->bpp * BPPX16_SCALER; + } + else + { + if (pDpModesetData->bDisableEffBppSST8b10b && !pDpModesetData->bDP2xChannelCoding) + { + effectiveBppxScaler = pDpModesetData->bpp * BPPX256_SCALER; + } + else + { + effectiveBppxScaler = _calcEffectiveBppxScalerNonDsc(hActive, pDpModesetData->bpp, pDpModesetData->bDP2xChannelCoding, + pDpModesetData->bMultiStream, pDpModesetData->laneCount); + } + } + } + else + { + + effectiveBppxScaler = pDpModesetData->bpp; + } + + dpInfo->effectiveBppxScaler = effectiveBppxScaler; + + if (pDpModesetData->bDP2xChannelCoding || pDpModesetData->bMultiStream) + { + + dpInfo->minHBlank = _calcDpMinHBlankMST(effectiveBppxScaler, pDpModesetData->bDP2xChannelCoding); + + if (pDpModesetData->bMultiStream) + { + dpInfo->hBlankSym = (hBlank * pDpModesetData->bpp) / (LOGICAL_LANES * symbolSize * BPPX256_SCALER); + } + else + { + dpInfo->hBlankSym = (hBlank * pDpModesetData->bpp) / (LOGICAL_LANES * symbolSize * dscFactor); + } + + if (pDpModesetData->bDP2xChannelCoding) + { + // exclude BS/BE + dpInfo->hBlankSym -= 2U; + } + else + { + // Better to leave some margin for hblank + dpInfo->hBlankSym -= 6U; + } + + if (pDpModesetData->twoChannelAudioHz != 0) + { + NvU32 samples_x1000 = (NvU32)NV_CEIL((NvU64)pDpModesetData->twoChannelAudioHz * + pDpModesetData->SetRasterSizeWidth, + pDpModesetData->PClkFreqHz * 1000); + + dpInfo->twoChannelAudioSymbols = _getDpAudioSymbolMST(TWO_CHANNELS, samples_x1000, + pDpModesetData->bDP2xChannelCoding); + } + + // 3-8 channel case + if (pDpModesetData->eightChannelAudioHz != 0) + { + NvU32 samples_x1000 = (NvU32)NV_CEIL((NvU64)pDpModesetData->eightChannelAudioHz * + pDpModesetData->SetRasterSizeWidth, + pDpModesetData->PClkFreqHz * 1000); + + dpInfo->eightChannelAudioSymbols = _getDpAudioSymbolMST(EIGHT_CHANNELS, samples_x1000, + pDpModesetData->bDP2xChannelCoding); + } + } + else + { + // 8b/10b SST. + + // (VBID+MVID+MAUD) + const NvU32 symbolCount = 3U; + // Per spec, each symbol will be repeated 4 times. + const NvU32 repeatCount = 4U; + // enhanced_framing and 3cycle dummy + const NvU32 enhancedFramingSymbols = 8U; + NvU32 blankingBits = symbolCount * repeatCount * DP1_SYMBOL_SIZE + + enhancedFramingSymbols * DP1_SYMBOL_SIZE * pDpModesetData->laneCount; + + if (pDpModesetData->bDscEnable) + { + blankingBits += DP1_SYMBOL_SIZE * pDpModesetData->laneCount; + blankingBits += sliceCount * (chunkSize * 8 - (sliceWidth * pDpModesetData->bpp / dscFactor)); + blankingBits += + NV_CEIL(chunkSize, pDpModesetData->laneCount) * pDpModesetData->laneCount * 8 - chunkSize * 8; + } + else + { + NvU32 HActivePerLane = NV_CEIL(hActive, pDpModesetData->laneCount); + //padding + blankingBits += NV_CEIL(HActivePerLane * pDpModesetData->bpp * BPPX256_SCALER, DP1_SYMBOL_SIZE * BPPX256_SCALER) * DP1_SYMBOL_SIZE * pDpModesetData->laneCount - + (hActive * pDpModesetData->bpp); //+blankingBits_nonDSC_padding + } + + // 1006 / 1000 for downspread. + dpInfo->minHBlank = (NvU32)NV_CEIL((NvU64)blankingBits * pDpModesetData->PClkFreqHz * DP1_CODING_SIZE * 1006, + (NvU64)DP1_SYMBOL_SIZE * pDpModesetData->laneCount * linkFreqHz * 1000); + // 1-2 channel case + if (pDpModesetData->twoChannelAudioHz != 0) + { + NvU32 samplesPerLine_x1000 = (NvU32)NV_CEIL((NvU64)pDpModesetData->twoChannelAudioHz * + pDpModesetData->SetRasterSizeWidth, + pDpModesetData->PClkFreqHz); + + dpInfo->twoChannelAudioSymbols = + _getDpAudioSymbolSST(TWO_CHANNELS, samplesPerLine_x1000, pDpModesetData->laneCount, hActive); + } + + // 3-8 channel case + if (pDpModesetData->eightChannelAudioHz != 0) + { + NvU32 samplesPerLine_x1000 = (NvU32)NV_CEIL((NvU64)pDpModesetData->eightChannelAudioHz * + pDpModesetData->SetRasterSizeWidth, + pDpModesetData->PClkFreqHz); + + dpInfo->eightChannelAudioSymbols = + _getDpAudioSymbolSST(EIGHT_CHANNELS, samplesPerLine_x1000, pDpModesetData->laneCount, hActive); + } + + dpInfo->hBlankSym = 0; + + if (_calcPClkFactorAndRgPacketMode(pGpu, pDpModesetData, headIndex, + &pclkFactor, &rgPacketMode) != NV_OK) + { + pclkFactor = DEFAULT_PCLK_FACTOR_VALUE; + rgPacketMode = DEFAULT_RG_PACKET_MODE; + } + + hBlankSym = (hBlank - dpInfo->minHBlank - 3 * (1 << pclkFactor)) * HBLANK_SYMBOLS_SCALE; + if (pDpModesetData->bDscEnable) + { + hBlankSym -= ((1 << pclkFactor) * 32 * dscFactor * HBLANK_SYMBOLS_SCALE / pDpModesetData->bpp) - 1; + } + hBlankSym = hBlankSym * linkFreqHz * 994 / (DP1_CODING_SIZE * HBLANK_SYMBOLS_SCALE * pDpModesetData->PClkFreqHz * 1000); + if (pDpModesetData->bFecEnable) + { + NvU32 totalHBlankSymbols = + (NvU32)NV_CEIL((NvU64)(hBlank * linkFreqHz * 994), + ((NvU64)DP1_CODING_SIZE * pDpModesetData->PClkFreqHz * 1000)); + hBlankSym -= FEC_PARITY_SYM(pDpModesetData->laneCount, totalHBlankSymbols); + hBlankSym -= 1; + hBlankSym -= 3; + hBlankSym -= 3; + + } + + // Bug 5042450 clamp min_hBlankSym value + NvS32 hBlankSymMin = pDpModesetData->laneCount==4 ? 12 : ( pDpModesetData->laneCount==2 ? 19 : 43 ); + if (hBlankSym < hBlankSymMin) + { + hBlankSym = hBlankSymMin; + } + + if (hBlankSym > 0) + { + dpInfo->hBlankSym = hBlankSym; + } + + // Bug - 4300218 + // Programmed the vBlankSym based on the alogirthm mentioned in the bug 4300218 + vBlankSym = (NvS32)(((hActive - 3*pclkFactor) * linkFreqHz * 994) / (pDpModesetData->PClkFreqHz*1000)); + msaSym = (36 / pDpModesetData->laneCount) + 3; + vBlankSym -= msaSym; + + if (pDpModesetData->bFecEnable) + { + vBlankSym -= FEC_PARITY_SYM(LOGICAL_LANES, vBlankSym); + } + + //added some margin + vBlankSym -= 3; + + if(vBlankSym > 0) + { + dpInfo->vBlankSym = vBlankSym; + } + + dpInfo->waterMark = _calcWatermark8b10bSST(pDpModesetData->PClkFreqHz, + linkFreqHz, + pDpModesetData->laneCount, + pDpModesetData->bFecEnable, + pDpModesetData->bpp, + pDpModesetData->bDscEnable); + } + + return NV_OK; +} + + +/*! + * @brief Get the crash lock counter vertical + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelHead KernelHead object pointer + * + * @return the current crash lock counter + */ +NvU32 +kheadGetCrashLockCounterV_v05_01 +( + OBJGPU *pGpu, + KernelHead *pKernelHead +) +{ + NvU32 counter = GPU_REG_RD32(pGpu, NV_PDISP_RG_CRASHLOCK_COUNTER(pKernelHead->PublicId)); + + return DRF_VAL(_PDISP, _RG_CRASHLOCK_COUNTER, _V, counter); +} + diff --git a/src/nvidia/src/kernel/gpu/disp/disp_capabilities.c b/src/nvidia/src/kernel/gpu/disp/disp_capabilities.c new file mode 100644 index 0000000..fd2b8fd --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/disp_capabilities.c @@ -0,0 +1,77 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing DispCapabilities class. +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "resserv/resserv.h" + +#include "gpu/gpu.h" +#include "gpu/disp/disp_capabilities.h" +#include "gpu/disp/kern_disp.h" + +NV_STATUS +dispcapConstruct_IMPL +( + DispCapabilities *pDispCapabilities, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDispCapabilities); + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + + // Set display caps RegBase offsets + kdispGetDisplayCapsBaseAndSize_HAL(pGpu, pKernelDisplay, + &pDispCapabilities->ControlOffset, + &pDispCapabilities->ControlLength); + + return NV_OK; +} + +NV_STATUS +dispcapGetRegBaseOffsetAndSize_IMPL +( + DispCapabilities *pDispCapabilities, + OBJGPU *pGpu, + NvU32 *pOffset, + NvU32 *pSize +) +{ + if (pOffset) + { + *pOffset = pDispCapabilities->ControlOffset; + } + if (pSize) + { + *pSize = pDispCapabilities->ControlLength; + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/disp/disp_channel.c b/src/nvidia/src/kernel/gpu/disp/disp_channel.c new file mode 100644 index 0000000..b317b99 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/disp_channel.c @@ -0,0 +1,863 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing DispChannel and its derived classes. +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "resserv/resserv.h" +#include "core/locks.h" +#include "rmapi/rs_utils.h" + +#include "gpu/device/device.h" +#include "gpu/gpu_resource.h" +#include "gpu/disp/disp_channel.h" +#include "gpu/disp/disp_objs.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/inst_mem/disp_inst_mem.h" +#include "gpu/mem_mgr/context_dma.h" +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" +#include "platform/sli/sli.h" +#include "vgpu/rpc.h" + +static void +dispchnParseAllocParams +( + DispChannel *pDispChannel, + void *pAllocParams, + NvU32 *pChannelInstance, + NvHandle *pHObjectBuffer, + NvU32 *pInitialGetPutOffset, + NvBool *pAllowGrabWithinSameClient, + NvBool *pConnectPbAtGrab, + ChannelPBSize *channelPBSize, + NvU32 *pSubDeviceId +) +{ + NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *pDmaChannelAllocParams = NULL; + NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *pPioChannelAllocParams = NULL; + + *pAllowGrabWithinSameClient = NV_FALSE; + *pConnectPbAtGrab = NV_FALSE; + + if (pDispChannel->bIsDma) + { + pDmaChannelAllocParams = pAllocParams; + *pChannelInstance = pDmaChannelAllocParams->channelInstance; + *pHObjectBuffer = pDmaChannelAllocParams->hObjectBuffer; + *pInitialGetPutOffset = pDmaChannelAllocParams->offset; + *channelPBSize = pDmaChannelAllocParams->channelPBSize; + + if (FLD_TEST_DRF(50VAIO_CHANNELDMA_ALLOCATION, _FLAGS, + _CONNECT_PB_AT_GRAB, _YES, + pDmaChannelAllocParams->flags)) + { + *pConnectPbAtGrab = NV_TRUE; + } + + if (pDmaChannelAllocParams->hObjectNotify != 0) + { + NV_PRINTF(LEVEL_WARNING, "Error notifier parameter is not used in Display channel allocation.\n"); + } + + *pSubDeviceId = pDmaChannelAllocParams->subDeviceId; + } + else + { + pPioChannelAllocParams = pAllocParams; + *pChannelInstance = pPioChannelAllocParams->channelInstance; + *pHObjectBuffer = 0; // No one should look at this. So, 0 should be fine. + *pInitialGetPutOffset = 0; // No one should look at this. So, 0 should be fine. + *channelPBSize = 0; // No one should look at this. So, 0 should be fine. + + if (pPioChannelAllocParams->hObjectNotify != 0) + { + NV_PRINTF(LEVEL_WARNING, "Error notifier parameter is not used in Display channel allocation.\n"); + } + } +} + +NV_STATUS +dispchnConstruct_IMPL +( + DispChannel *pDispChannel, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + NvU32 isDma +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDispChannel); + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + NV_STATUS rmStatus = NV_OK; + NvU32 channelInstance; + NvHandle hObjectBuffer; + NvBool bIsDma = !!isDma; + NvU32 initialGetPutOffset; + NvBool allowGrabWithinSameClient; + NvBool connectPbAtGrab; + DISPCHNCLASS internalDispChnClass; + void *pAllocParams = pParams->pAllocParams; + RsResourceRef *pParentRef = RES_GET_REF(pDispChannel)->pParentRef; + DispObject *pDispObject = dynamicCast(pParentRef->pResource, DispObject); + ContextDma *pBufferContextDma = NULL; + NvU32 hClass = RES_GET_EXT_CLASS_ID(pDispChannel); + ChannelPBSize channelPBSize; + NvU32 subDeviceId = 0U; + RsClient *pClient; + NvHandle hChannel; + NvU32 dispChannelNum; + + NV_ASSERT_OR_RETURN(pDispObject, NV_ERR_INVALID_OBJECT_HANDLE); + + if (pParams->pSecInfo->privLevel < RS_PRIV_LEVEL_USER_ROOT) + { + NV_PRINTF(LEVEL_ERROR, + "Failure allocating display class 0x%08x: Only root(admin)/kernel clients are allowed\n", + pParams->externalClassId); + + // + // GPUSWSEC-1560 introduced a central object privilege check in RS. Please mark derived external classes + // of DispChannel privileged in their RS_ENTRY. Since DispChannel doesn't have an external class of its own + // and is used as a base class, leaving this check inline to catch future derivations. + // + osAssertFailed(); + + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + // + // Make sure this channel class is supported on this chip. + // Need to have the check below since, the switch in RmAlloc + // doesn't tell if the current chip supports the class + // + if (!gpuIsClassSupported(pGpu, RES_GET_EXT_CLASS_ID(pDispChannel))) + { + NV_PRINTF(LEVEL_ERROR, "Unsupported class in\n"); + return NV_ERR_INVALID_CLASS; + } + + // Move params into RM's address space + pDispChannel->pDispObject = pDispObject; + pDispChannel->bIsDma = bIsDma; + dispchnParseAllocParams(pDispChannel, pAllocParams, + &channelInstance, + &hObjectBuffer, + &initialGetPutOffset, + &allowGrabWithinSameClient, + &connectPbAtGrab, + &channelPBSize, + &subDeviceId); + + rmStatus = kdispGetIntChnClsForHwCls(pKernelDisplay, + RES_GET_EXT_CLASS_ID(pDispChannel), + &internalDispChnClass); + if (rmStatus != NV_OK) + return rmStatus; + + if (internalDispChnClass == dispChnClass_Any) + { + // + // Any channel is kernel only channel, Physical RM doesn't need ANY channel information. + // return from here as ANY channel is constructed. + // + pDispChannel->DispClass = internalDispChnClass; + pDispChannel->InstanceNumber = channelInstance; + return NV_OK; + } + + API_GPU_FULL_POWER_SANITY_CHECK(pGpu, NV_TRUE, NV_FALSE); + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + { + rmStatus = kdispSetPushBufferParamsToPhysical_HAL(pGpu, + pKernelDisplay, + pDispChannel, + hObjectBuffer, + pBufferContextDma, + hClass, + channelInstance, + internalDispChnClass, + channelPBSize, + subDeviceId); + if (rmStatus != NV_OK) + return rmStatus; + } + SLI_LOOP_END + + // Acquire the underlying HW resources + rmStatus = kdispAcquireDispChannelHw_HAL(pKernelDisplay, + pDispChannel, + channelInstance, + hObjectBuffer, + initialGetPutOffset, + allowGrabWithinSameClient, + connectPbAtGrab); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "disp channel[0x%x] alloc failed. Return status = 0x%x\n", + channelInstance, rmStatus); + + return rmStatus; + } + + // Channel allocation is successful, initialize new channel's data structures + pDispChannel->DispClass = internalDispChnClass; + pDispChannel->InstanceNumber = channelInstance; + dispchnSetRegBaseOffsetAndSize(pDispChannel, pGpu); + + // Map memory for parent GPU + rmStatus = kdispMapDispChannel_HAL(pKernelDisplay, pDispChannel); + + // setup to return pControl to client + if (pDispChannel->bIsDma) + { + NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *pDmaChannelAllocParams = pAllocParams; + pDmaChannelAllocParams->pControl = pDispChannel->pControl; + } + else + { + NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *pPioChannelAllocParams = pAllocParams; + pPioChannelAllocParams->pControl = pDispChannel->pControl; + } + + if (rmStatus == NV_OK && (pKernelDisplay->pClientChannelTable != NULL)) + { + pClient = RES_GET_CLIENT(pDispChannel); + hChannel = RES_GET_HANDLE(pDispChannel); + + rmStatus = kdispGetChannelNum_HAL(pKernelDisplay, internalDispChnClass, channelInstance, &dispChannelNum); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "kdispGetChannelNum_HAL failed!\n"); + return rmStatus; + } + + pKernelDisplay->pClientChannelTable[dispChannelNum].pClient = pClient; + pKernelDisplay->pClientChannelTable[dispChannelNum].hChannel = hChannel; + pKernelDisplay->pClientChannelTable[dispChannelNum].bInUse = NV_TRUE; + + NV_PRINTF(LEVEL_INFO, "Mapped hclient: %p hchannel: 0x%x channleNum: 0x%x\n", + pClient, hChannel, dispChannelNum); + } + + return rmStatus; +} + +// +// Performs grab operation for a channel. +// +// Pre-Volta Linux swapgroups is the only remaining use of channel grabbing. +// Bug 2869820 is tracking the transition of swapgroups from requiring this +// RM feature. +// +NV_STATUS +dispchnGrabChannel_IMPL +( + DispChannel *pDispChannel, + NvHandle hClient, + NvHandle hParent, + NvHandle hChannel, + NvU32 hClass, + void *pAllocParams +) +{ + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pDispChannel); + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + NvU32 channelInstance; + NvHandle hObjectBuffer; + NvU32 initialGetPutOffset; + NvBool allowGrabWithinSameClient; + NvBool connectPbAtGrab; + ContextDma *pBufferContextDma = NULL; + DISPCHNCLASS internalDispChnClass; + ChannelPBSize channelPBSize; + NvU32 subDeviceId = 0U; + + if (RES_GET_PARENT_HANDLE(pDispChannel) != hParent) + { + NV_PRINTF(LEVEL_ERROR, + "disp channel grab failed because of bad display parent 0x%x\n", + hParent); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_OBJECT_PARENT; + } + + // Move params into RM's address space + dispchnParseAllocParams(pDispChannel, pAllocParams, + &channelInstance, + &hObjectBuffer, + &initialGetPutOffset, + &allowGrabWithinSameClient, + &connectPbAtGrab, + &channelPBSize, + &subDeviceId); + + // + // The handle already exists in our DB. + // The supplied params must be same as what we already have with us + // + if (RES_GET_EXT_CLASS_ID(pDispChannel) != hClass || + pDispChannel->InstanceNumber != channelInstance) + { + NV_PRINTF(LEVEL_ERROR, + "Information supplied for handle 0x%x doesn't match that in RM's client DB\n", + hChannel); + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + rmStatus = kdispGetIntChnClsForHwCls(pKernelDisplay, + hClass, + &internalDispChnClass); + + API_GPU_FULL_POWER_SANITY_CHECK(pGpu, NV_TRUE, NV_FALSE); + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + { + rmStatus = kdispSetPushBufferParamsToPhysical_HAL(pGpu, + pKernelDisplay, + pDispChannel, + hObjectBuffer, + pBufferContextDma, + hClass, + channelInstance, + internalDispChnClass, + channelPBSize, + subDeviceId); + if (rmStatus != NV_OK) + return rmStatus; + } + SLI_LOOP_END + + // Acquire the underlying HW resources + rmStatus = kdispAcquireDispChannelHw_HAL(pKernelDisplay, + pDispChannel, + channelInstance, + hObjectBuffer, + initialGetPutOffset, + allowGrabWithinSameClient, + connectPbAtGrab); + + // setup to return pControl to client + if (pDispChannel->bIsDma) + { + NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *pDmaChannelAllocParams = pAllocParams; + pDmaChannelAllocParams->pControl = pDispChannel->pControl; + } + else + { + NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *pPioChannelAllocParams = pAllocParams; + pPioChannelAllocParams->pControl = pDispChannel->pControl; + } + + return rmStatus; +} + +NV_STATUS +dispchnGetRegBaseOffsetAndSize_IMPL +( + DispChannel *pDispChannel, + OBJGPU *pGpu, + NvU32 *pOffset, + NvU32 *pSize +) +{ + if (pOffset) + *pOffset = pDispChannel->ControlOffset; + + if (pSize) + *pSize = pDispChannel->ControlLength; + + return NV_OK; +} + +void +dispchnSetRegBaseOffsetAndSize_IMPL +( + DispChannel *pDispChannel, + OBJGPU *pGpu +) +{ + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + + (void)kdispGetDisplayChannelUserBaseAndSize_HAL(pGpu, pKernelDisplay, + pDispChannel->DispClass, + pDispChannel->InstanceNumber, + &pDispChannel->ControlOffset, + &pDispChannel->ControlLength); + + // Tegra offsets needs to be subtracted with -0x610000. + pDispChannel->ControlOffset += kdispGetBaseOffset_HAL(pGpu, pKernelDisplay); +} + +/*! + * @brief Maps channel user area for parent GPU. + */ +NV_STATUS +kdispMapDispChannel_IMPL +( + KernelDisplay *pKernelDisplay, + DispChannel *pDispChannel +) +{ + NV_STATUS rmStatus; + OBJGPU *pGpu = GPU_RES_GET_GPU(pDispChannel); + RsClient *pClient = RES_GET_CLIENT(pDispChannel); + RmClient *pRmClient = dynamicCast(pClient, RmClient); + RS_PRIV_LEVEL privLevel = rmclientGetCachedPrivilege(pRmClient); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + // + // Only need the map for the parent GPU since we require the client to + // use RmMapMemory for subdevice channel mapping. + // + rmStatus = osMapGPU(pGpu, privLevel, + pDispChannel->ControlOffset, + pDispChannel->ControlLength, + NV_PROTECT_READ_WRITE, + &pDispChannel->pControl, + &pDispChannel->pPriv); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "disp channel[0x%x] mapping failed. Return status = 0x%x\n", + pDispChannel->InstanceNumber, rmStatus); + + (void) pRmApi->Free(pRmApi, + RES_GET_CLIENT_HANDLE(pDispChannel), + RES_GET_HANDLE(pDispChannel)); + + return rmStatus; + } + + return NV_OK; +} + +/*! + * @brief Unbinds Context DMAs and unmaps channel user area for the given channel. + */ +void kdispUnbindUnmapDispChannel_IMPL +( + KernelDisplay *pKernelDisplay, + DispChannel *pDispChannel +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDispChannel); + RsClient *pClient = RES_GET_CLIENT(pDispChannel); + RmClient *pRmClient = dynamicCast(pClient, RmClient); + RS_PRIV_LEVEL privLevel = rmclientGetCachedPrivilege(pRmClient); + + // Unbind all ContextDmas from this channel + dispchnUnbindAllCtx(pGpu, pDispChannel); + + // Unmap the channel + osUnmapGPU(pGpu->pOsGpuInfo, privLevel, pDispChannel->pControl, + pDispChannel->ControlLength, pDispChannel->pPriv); +} + +void +dispchnDestruct_IMPL +( + DispChannel *pDispChannel +) +{ + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pDispChannel); + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvU32 dispChannelNum; + + LOCK_METER_DATA(FREE_CHANNEL_DISP, pDispChannel->DispClass, 0, 0); + + // + // Before freeing the CORE channel, make sure all satellite channels are + // torn down. This is currently necessary on UNIX to deal with cases + // where X (i.e. the userspace display driver) terminates before other + // RM clients with satellite channel allocations, e.g. OpenGL clients with + // BASE channel allocations. + // + if ((pDispChannel->DispClass == dispChnClass_Core) && + pKernelDisplay->bWarPurgeSatellitesOnCoreFree) + { + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + RS_ITERATOR it; + Device *pDevice; + OBJGPU *pTmpGpu; + DispChannel *pTmpDispChannel; + + NV_ASSERT(gpuIsGpuFullPower(pGpu)); + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + + while (clientRefIterNext(it.pClient, &it)) + { + RS_ITERATOR dispIt; + RsResourceRef *pResourceRef; + DispObject *pDispObject; + + pDevice = dynamicCast(it.pResourceRef->pResource, Device); + + pTmpGpu = GPU_RES_GET_GPU(pDevice); + if (pTmpGpu != pGpu) + continue; + + rmStatus = dispobjGetByDevice(pRsClient, pDevice, &pDispObject); + if (rmStatus != NV_OK) + continue; + + pResourceRef = RES_GET_REF(pDispObject); + + dispIt = clientRefIter(pRsClient, pResourceRef, classId(DispChannel), RS_ITERATE_CHILDREN, NV_FALSE); + + while (clientRefIterNext(dispIt.pClient, &dispIt)) + { + pTmpDispChannel = dynamicCast(dispIt.pResourceRef->pResource, DispChannel); + + if (pTmpDispChannel->DispClass != dispChnClass_Core) + { + rmStatus = pRmApi->Free(pRmApi, + RES_GET_CLIENT_HANDLE(pTmpDispChannel), + RES_GET_HANDLE(pTmpDispChannel)); + + if (rmStatus == NV_OK) + { + // Client's resource map has been modified, re-snap iterators + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + dispIt = clientRefIter(pRsClient, it.pResourceRef, classId(DispChannel), RS_ITERATE_DESCENDANTS, NV_FALSE); + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Failed to free satellite DispChannel 0x%x!\n", + RES_GET_HANDLE(pTmpDispChannel)); + } + } + } + } + } + } + + if (pKernelDisplay->pClientChannelTable != NULL) + { + rmStatus = kdispGetChannelNum_HAL(pKernelDisplay, pDispChannel->DispClass, pDispChannel->InstanceNumber, &dispChannelNum); + if (rmStatus == NV_OK) + { + pKernelDisplay->pClientChannelTable[dispChannelNum].pClient = NULL; + pKernelDisplay->pClientChannelTable[dispChannelNum].hChannel = 0; + pKernelDisplay->pClientChannelTable[dispChannelNum].bInUse = NV_FALSE; + } + else + { + NV_PRINTF(LEVEL_WARNING, "Failed to reset clientChannelTable!\n"); + } + } + + // + // Unbind all context dmas bound to this channel, unmap the channel and + // finally release HW resources. + // + kdispUnbindUnmapDispChannel_HAL(pKernelDisplay, pDispChannel); + rmStatus = kdispReleaseDispChannelHw_HAL(pKernelDisplay, pDispChannel); + + if (rmStatus != NV_OK) + { + // Try to avoid returning error codes on free under new resource server design + NV_ASSERT(0); + } +} + +NV_STATUS +dispchnpioConstruct_IMPL +( + DispChannelPio *pDispChannelPio, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +NV_STATUS +dispchndmaConstruct_IMPL +( + DispChannelDma *pDispChannelDma, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +NV_STATUS +dispchnGetByHandle_IMPL +( + RsClient *pClient, + NvHandle hDisplayChannel, + DispChannel **ppDispChannel +) +{ + RsResourceRef *pResourceRef; + NV_STATUS status; + + *ppDispChannel = NULL; + + status = clientGetResourceRef(pClient, hDisplayChannel, &pResourceRef); + if (status != NV_OK) + return status; + + *ppDispChannel = dynamicCast(pResourceRef->pResource, DispChannel); + + return (*ppDispChannel) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +// +// Bind the DMA context to a display channel +// +NV_STATUS +dispchnBindCtx_IMPL +( + OBJGPU *pGpu, + ContextDma *pContextDma, + NvHandle hChannel +) +{ + RsClient *pClient = RES_GET_CLIENT(pContextDma); + DispChannel *pDispChannel = NULL; + NV_STATUS rmStatus = NV_OK; + KernelDisplay *pKernelDisplay; + DisplayInstanceMemory *pInstMem; + + // Look-up channel + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + dispchnGetByHandle(pClient, hChannel, &pDispChannel)); + + // Ensure ContextDma and DisplayChannel are on the save device + NV_CHECK_OR_RETURN(LEVEL_ERROR, pContextDma->pDevice == GPU_RES_GET_DEVICE(pDispChannel), + NV_ERR_INVALID_DEVICE); + + // + // Enforce alignment requirements + // ISO ctx dmas need to be a multiple of 256B and 256B aligned + // NISO ctx dmas need to be a multiple of 4K and 4K aligned + // We can only ensure common minimum -- 4K alignment and 4K size + // Limit alignment is handled by rounding up in lower-level code. + // This will be in hw in future. + // + if (pContextDma->pMemDesc->PteAdjust != 0) + { + NV_PRINTF(LEVEL_ERROR, + "ISO ctx dmas must be 4K aligned. PteAdjust = 0x%x\n", + pContextDma->pMemDesc->PteAdjust); + return NV_ERR_INVALID_OFFSET; + } + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + pInstMem = KERNEL_DISPLAY_GET_INST_MEM(pKernelDisplay); + + rmStatus = instmemBindContextDma(pGpu, pInstMem, pContextDma, pDispChannel); + if (rmStatus != NV_OK) + { + SLI_LOOP_RETURN(rmStatus); + } + + SLI_LOOP_END + + return NV_OK; +} + +NV_STATUS +dispchnUnbindCtx_IMPL +( + OBJGPU *pGpu, + ContextDma *pContextDma, + NvHandle hChannel +) +{ + RsClient *pClient = RES_GET_CLIENT(pContextDma); + DispChannel *pDispChannel = NULL; + NV_STATUS rmStatus = NV_OK; + KernelDisplay *pKernelDisplay; + DisplayInstanceMemory *pInstMem; + NvBool bFound = NV_FALSE; + + // Look-up channel given by client + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + dispchnGetByHandle(pClient, hChannel, &pDispChannel)); + + // Ensure ContextDma and DisplayChannel are on the save device + NV_CHECK_OR_RETURN(LEVEL_ERROR, pContextDma->pDevice == GPU_RES_GET_DEVICE(pDispChannel), + NV_ERR_INVALID_DEVICE); + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + pInstMem = KERNEL_DISPLAY_GET_INST_MEM(pKernelDisplay); + + rmStatus = instmemUnbindContextDma(pGpu, pInstMem, pContextDma, pDispChannel); + if (rmStatus == NV_OK) + { + bFound = NV_TRUE; + } + + SLI_LOOP_END + + return bFound ? NV_OK : NV_ERR_INVALID_STATE; +} + +/*! + * @brief Unbind all ContextDmas from the given channel + */ +void +dispchnUnbindAllCtx_IMPL +( + OBJGPU *pGpu, + DispChannel *pDispChannel +) +{ + KernelDisplay *pKernelDisplay; + DisplayInstanceMemory *pInstMem; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + pInstMem = KERNEL_DISPLAY_GET_INST_MEM(pKernelDisplay); + + instmemUnbindDispChannelContextDmas(pGpu, pInstMem, pDispChannel); + + SLI_LOOP_END +} + +/*! + * @brief Unbind ContextDma from all display channels + */ +void +dispchnUnbindCtxFromAllChannels_IMPL +( + OBJGPU *pGpu, + ContextDma *pContextDma +) +{ + KernelDisplay *pKernelDisplay; + DisplayInstanceMemory *pInstMem; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + pInstMem = KERNEL_DISPLAY_GET_INST_MEM(pKernelDisplay); + + instmemUnbindContextDmaFromAllChannels(pGpu, pInstMem, pContextDma); + + SLI_LOOP_END +} + +NV_STATUS +kdispSetPushBufferParamsToPhysical_IMPL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + DispChannel *pDispChannel, + NvHandle hObjectBuffer, + ContextDma *pBufferContextDma, + NvU32 hClass, + NvU32 channelInstance, + DISPCHNCLASS internalDispChnClass, + ChannelPBSize channelPBSize, + NvU32 subDeviceId +) +{ + RsClient *pClient = RES_GET_CLIENT(pDispChannel); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_STATUS rmStatus = NV_OK; + NvU32 dispChannelNum; + NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS pushBufferParams = {0}; + + rmStatus = kdispGetChannelNum_HAL(pKernelDisplay, internalDispChnClass, channelInstance, &dispChannelNum); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + pushBufferParams.hclass = hClass; + pushBufferParams.channelInstance = channelInstance; + pushBufferParams.subDeviceId = subDeviceId; + + if (pDispChannel->bIsDma) + { + rmStatus = ctxdmaGetByHandle(pClient, hObjectBuffer, &pBufferContextDma); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "disp channel[0x%x] didn't have valid ctxdma 0x%x\n", + channelInstance, hObjectBuffer); + return rmStatus; + } + + pushBufferParams.limit = pBufferContextDma->Limit; + pushBufferParams.addressSpace = memdescGetAddressSpace(pBufferContextDma->pMemDesc); + if ((pushBufferParams.addressSpace != ADDR_SYSMEM) && (pushBufferParams.addressSpace != ADDR_FBMEM)) + { + DBG_BREAKPOINT(); + return NV_ERR_GENERIC; + } + // Generate PUSHBUFFER_ADDR. Shift the addr to get the size in 4KB + pushBufferParams.physicalAddr = memdescGetPhysAddr(memdescGetMemDescFromGpu(pBufferContextDma->pMemDesc, pGpu), AT_GPU, 0); + pushBufferParams.cacheSnoop= pBufferContextDma->CacheSnoop; + pushBufferParams.pbTargetAperture = kdispGetPBTargetAperture_HAL(pGpu, + pKernelDisplay, + pushBufferParams.addressSpace, + pushBufferParams.cacheSnoop); + + NvU32 size = NVBIT32(channelPBSize + 12) - 1; + if (size <= pushBufferParams.limit) + { + pushBufferParams.channelPBSize = (NvU32)channelPBSize; + } + else + { + return NV_ERR_INVALID_ARGUMENT; + } + pushBufferParams.valid = NV_TRUE; + } + else + { + pushBufferParams.valid = NV_FALSE; + } + + pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER, + &pushBufferParams, sizeof(pushBufferParams)); + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c b/src/nvidia/src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c new file mode 100644 index 0000000..f0900ae --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c @@ -0,0 +1,564 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /** + * @file disp_common_kern_ctrl_minimal.c implements rmctrls which + * (a) are declared in disp_common_ctrl_minimal.h; i.e. + * (i) are dispcmnCtrlCmd* functions + * (ii) which are used by Tegra SOC NVDisplay and/or OS layer; and + * (b) are implemented in Kernel RM. + */ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 +#define BPPX256_SCALER 256U + +#include "os/os.h" +#include "core/locks.h" +#include "gpu/gpu.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/disp_objs.h" +#include "rmapi/rs_utils.h" +#include "rmapi/rmapi.h" +#include "gpu/disp/head/kernel_head.h" +#include "mem_mgr/mem.h" +#include "platform/sli/sli.h" +#include "diagnostics/journal.h" +#include "displayport/displayport.h" +#include "displayport/displayport2x.h" + +NV_STATUS +dispcmnCtrlCmdSystemGetHotplugUnplugState_IMPL +( + DispCommon *pDispCommon, + NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS *pHotplugParams +) +{ + NvHandle hDevice = RES_GET_PARENT_HANDLE(pDispCommon); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(DISPAPI_GET_GPU(pDispCommon)); + NvU32 hotPlugMask = 0; + NvU32 hotUnplugMask = 0; + NV_STATUS status; + + status = pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pDispCommon), + RES_GET_HANDLE(pDispCommon), + NV0073_CTRL_CMD_INTERNAL_GET_HOTPLUG_UNPLUG_STATE, + pHotplugParams, + sizeof(*pHotplugParams)); + + hotPlugMask = pHotplugParams->hotPlugMask; + hotUnplugMask = pHotplugParams->hotUnplugMask; + pHotplugParams->hotPlugMask = 0; + pHotplugParams->hotUnplugMask = 0; + + if (status != NV_OK) + { + return status; + } + + if ((hotPlugMask != 0) || (hotUnplugMask != 0)) + { + RmClient **ppClient; + RsClient *pRsClient; + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + pRsClient = staticCast(*ppClient, RsClient); + DispCommon *pDispCommonLoop; + + dispcmnGetByDevice(pRsClient, hDevice, &pDispCommonLoop); + if (pDispCommonLoop == NULL) + continue; + + pDispCommonLoop->hotPlugMaskToBeReported |= hotPlugMask & (~(pDispCommonLoop->hotPlugMaskToBeReported & hotUnplugMask)); + pDispCommonLoop->hotUnplugMaskToBeReported |= hotUnplugMask & (~(pDispCommonLoop->hotUnplugMaskToBeReported & hotPlugMask)); + } + } + + pHotplugParams->hotPlugMask = pDispCommon->hotPlugMaskToBeReported; + pHotplugParams->hotUnplugMask = pDispCommon->hotUnplugMaskToBeReported; + pDispCommon->hotPlugMaskToBeReported = 0; + pDispCommon->hotUnplugMaskToBeReported = 0; + + return status; +} + +/*! + * @brief Allocate display memory bandwidth. + */ +NV_STATUS +dispcmnCtrlCmdSystemAllocateDisplayBandwidth_IMPL +( + DispCommon *pDispCommon, + NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS *pParams +) +{ + OBJGPU *pGpu; + KernelDisplay *pKernelDisplay; + NV_STATUS status; + RM_API *pRmApi; + NvU32 hClient; + NvU32 hSubdevice; + + // client gave us a subdevice #: get right pGpu for it + status = dispapiSetUnicastAndSynchronize_HAL( + staticCast(pDispCommon, DisplayApi), + DISPAPI_GET_GPUGRP(pDispCommon), + &pGpu, + NULL, + pParams->subDeviceInstance); + if (status != NV_OK) + { + return status; + } + + status = dispapiValidateRmctrlPriv(pGpu); + if (status != NV_OK) + return status; + + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + if (pKernelDisplay->getProperty(pKernelDisplay, + PDB_PROP_KDISP_IMP_ALLOC_BW_IN_KERNEL_RM_DEF)) + { + // Process the request in Kernel RM. + status = + kdispArbAndAllocDisplayBandwidth_HAL(pGpu, + pKernelDisplay, + DISPLAY_ICC_BW_CLIENT_EXT, + pParams->averageBandwidthKBPS, + pParams->floorBandwidthKBPS); + } + else + { + // + // In this function, we are processing an + // NV0073_CTRL_CMD_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH RmCtrl call. But + // we are in Kernel RM, and if + // PDB_PROP_KDISP_IMP_ALLOC_BW_IN_KERNEL_RM_DEF is false, we want to + // process the call in Physical RM. So invoke the + // NV0073_CTRL_CMD_SYSTEM_INTERNAL_ALLOCATE_DISPLAY_BANDWIDTH RmCtrl + // call, which is configured to run in Physical RM, with the same + // parameters. + // + pRmApi = GPU_GET_PHYSICAL_RMAPI(DISPAPI_GET_GPU(pDispCommon)); + hClient = RES_GET_CLIENT_HANDLE(pDispCommon); + hSubdevice = RES_GET_HANDLE(pDispCommon); + status = + pRmApi->Control(pRmApi, hClient, hSubdevice, + NV0073_CTRL_CMD_SYSTEM_INTERNAL_ALLOCATE_DISPLAY_BANDWIDTH, + pParams, sizeof(*pParams)); + } + return status; +} + +NV_STATUS +dispcmnCtrlCmdSystemGetVblankEnable_IMPL +( + DispCommon *pDispCommon, + NV0073_CTRL_SYSTEM_GET_VBLANK_ENABLE_PARAMS *pVBEnableParams +) +{ + OBJGPU *pGpu; + KernelDisplay *pKernelDisplay; + KernelHead *pKernelHead; + NV_STATUS status = NV_OK; + // client gave us a subdevice #: get right pGpu for it + status = dispapiSetUnicastAndSynchronize_HAL( + staticCast(pDispCommon, DisplayApi), + DISPAPI_GET_GPUGRP(pDispCommon), + &pGpu, + NULL, + pVBEnableParams->subDeviceInstance); + if(status != NV_OK) + { + return status; + } + + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + pKernelHead = KDISP_GET_HEAD(pKernelDisplay,pVBEnableParams->head); + NV_ASSERT_OR_RETURN(pKernelHead != NULL, NV_ERR_INVALID_ARGUMENT); + + pVBEnableParams->bEnabled = (kheadReadVblankIntrState(pGpu, pKernelHead) != + NV_HEAD_VBLANK_INTR_UNAVAILABLE); + + return status; +} + +NV_STATUS +dispcmnCtrlCmdDpGenerateFakeInterrupt_IMPL +( + DispCommon *pDispCommon, + NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS *pParams +) +{ + OBJGPU *pGpu = DISPAPI_GET_GPU(pDispCommon); + NvU32 displayId = pParams->displayId; + NvU32 interruptType = pParams->interruptType; + NV_STATUS status = NV_OK; + + // get target pGpu + status = dispapiSetUnicastAndSynchronize_HAL( + staticCast(pDispCommon, DisplayApi), + DISPAPI_GET_GPUGRP(pDispCommon), + &pGpu, + NULL, + pParams->subDeviceInstance); + if (status != NV_OK) + { + return status; + } + + NV_ASSERT_OR_RETURN(pParams->displayId, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pGpu, NV_ERR_INVALID_ARGUMENT); + + // Send a DP IRQ (short pulse) to a registered client. + if (interruptType == NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_IRQ) + { + Nv2080DpIrqNotification params = {0}; + params.displayId = displayId; + + // Check eDP power state; if off, return an error. + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV0073_CTRL_DP_GET_EDP_DATA_PARAMS edpData; + + portMemSet(&edpData, 0, sizeof(edpData)); + + status = pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pDispCommon), + RES_GET_HANDLE(pDispCommon), + NV0073_CTRL_CMD_DP_GET_EDP_DATA, + &edpData, + sizeof(edpData)); + + if (status == NV_OK && FLD_TEST_DRF(0073_CTRL_DP, _GET_EDP_DATA, _PANEL_POWER, _OFF, edpData.data)) + { + return NV_ERR_GENERIC; + } + + gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_DP_IRQ, ¶ms, sizeof(params), 0, 0); + } + else if (interruptType == NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PLUG || + interruptType == NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_UNPLUG) + { + Nv2080HotplugNotification hotplugNotificationParams; + portMemSet(&hotplugNotificationParams, 0, sizeof(hotplugNotificationParams)); + + if (interruptType == NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PLUG) + { + hotplugNotificationParams.plugDisplayMask = displayId; + hotplugNotificationParams.unplugDisplayMask = 0; + } + else if (interruptType == NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_UNPLUG) + { + hotplugNotificationParams.plugDisplayMask = 0; + hotplugNotificationParams.unplugDisplayMask = displayId; + } + gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_HOTPLUG, + &hotplugNotificationParams, sizeof(hotplugNotificationParams), 0, 0); + } + else + { + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +NV_STATUS dispcmnCtrlCmdVRRSetRgLineActive_IMPL +( + DispCommon *pDispCommon, + NV0073_CTRL_CMD_SYSTEM_VRR_SET_RGLINE_ACTIVE_PARAMS *pParams +) +{ + OBJGPU *pGpu = DISPAPI_GET_GPU(pDispCommon); + RsClient *pClient = RES_GET_CLIENT(pDispCommon); + NvHandle hParent = RES_GET_PARENT_HANDLE(pDispCommon); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(DISPAPI_GET_GPU(pDispCommon)); + NV_STATUS status = NV_OK; + + // Get the right pGpu from subdevice instance given by client + status = dispapiSetUnicastAndSynchronize_HAL( + staticCast(pDispCommon, DisplayApi), + DISPAPI_GET_GPUGRP(pDispCommon), + &pGpu, + NULL, + pParams->subDeviceInstance); + + if (status != NV_OK) + { + return status; + } + + if (pParams->bEnable) + { + // + // Note: memRegisterWithGsp() is a noop when either (a) we're not + // operating as a GSP client, or (b) the hMemory is already registered + // with GSP. + // + // Also, note that we don't explicitly unregister here in the + // !pParams->bEnable case: that could unregister the memory out from + // under other uses of this hMemory on GSP. + // Instead, we rely on the hMemory getting unregistered when the + // 'struct Memory' is freed. + // + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + memRegisterWithGsp(pGpu, pClient, hParent, pParams->hMemory)); + } + + return pRmApi->Control(pRmApi, + pClient->hClient, + RES_GET_HANDLE(pDispCommon), + NV0073_CTRL_CMD_INTERNAL_VRR_SET_RGLINE_ACTIVE, + pParams, + sizeof(*pParams)); +} + +static NV_STATUS _kheadCheckVblankCountCallback +( + OBJGPU *pGpu, + void *Object, + NvU32 param1, + NvV32 BuffNum, + NV_STATUS Status +) +{ + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + KernelHead *pKernelHead = KDISP_GET_HEAD(pKernelDisplay, param1); + + portSyncSpinlockAcquire(pKernelHead->Vblank.pSpinlock); + if ((--pKernelHead->Vblank.VblankCountTimeout) == 0) + { + pKernelHead->Vblank.Callback.CheckVblankCount.Flags &= ~VBLANK_CALLBACK_FLAG_PERSISTENT; + } + portSyncSpinlockRelease(pKernelHead->Vblank.pSpinlock); + + return NV_OK; +} + +NV_STATUS +dispcmnCtrlCmdSystemGetVblankCounter_IMPL +( + DispCommon *pDispCommon, + NV0073_CTRL_SYSTEM_GET_VBLANK_COUNTER_PARAMS *pVBCounterParams +) +{ + OBJGPU *pGpu = DISPAPI_GET_GPU(pDispCommon); + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + KernelHead *pKernelHead; + NvU32 flags = (VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_NEXT | VBLANK_CALLBACK_FLAG_PERSISTENT); + + // + // Ensure that GPU locks are not held, otherwise we would need to maintain a strict order of locks, + // in case if some other GPU lock is held by the current thread. This is not necessary, + // since this ctrl/ would only be called by UMD clients, thus there would never be any GPU lock held. + // If any GPU lock is held by the current thread, we just bail out early. + // + NV_CHECK_OR_RETURN(LEVEL_ERROR, rmGpuLocksGetOwnedMask() == 0, NV_ERR_INVALID_LOCK_STATE); + + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, pVBCounterParams->head); + if (pKernelHead == NULL) + { + NV_PRINTF(LEVEL_ERROR, "invalid head number!\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + // TODO: make the behaviour same for monolithic and offload RM case + if (IS_GSP_CLIENT(pGpu)) + { + if (pKernelDisplay->pSharedData == NULL) + { + NV_PRINTF(LEVEL_ERROR, "no memory allocated for vblank count\n"); + return NV_ERR_NOT_SUPPORTED; + } + flags |= VBLANK_CALLBACK_FLAG_LOW_LATENCY; + } + + // + // This spinlock is required to prevent a possible race condition with _kheadCheckVblankCountCallback, + // given that we don't hold the GPU lock at this point and VblankCountTimeout can be reset + // in _kheadCheckVblankCountCallback in parallel. + // + portSyncSpinlockAcquire(pKernelHead->Vblank.pSpinlock); + + NvBool bAddCallback = pKernelHead->Vblank.VblankCountTimeout == 0; + if (!bAddCallback) + { + pKernelHead->Vblank.VblankCountTimeout = 60 * VBLANK_INFO_GATHER_KEEPALIVE_SECONDS; + } + + // + // Safe to release the lock immediately, since the counter is monotonically decrementing and + // the only possible race condition here is adding multiple callbacks. + // But that wouldn't never happen, since kheadAddVblankCallback doesn't allow duplicates. + // + portSyncSpinlockRelease(pKernelHead->Vblank.pSpinlock); + + if (bAddCallback) + { + NV_ASSERT_OK_OR_RETURN(rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DISP)); + + pKernelHead->Vblank.VblankCountTimeout = 60 * VBLANK_INFO_GATHER_KEEPALIVE_SECONDS; + + pKernelHead->Vblank.Callback.CheckVblankCount.Proc = _kheadCheckVblankCountCallback; + pKernelHead->Vblank.Callback.CheckVblankCount.pObject = NULL; + pKernelHead->Vblank.Callback.CheckVblankCount.bObjectIsChannelDescendant = NV_FALSE; + pKernelHead->Vblank.Callback.CheckVblankCount.Param1 = pKernelHead->PublicId; + pKernelHead->Vblank.Callback.CheckVblankCount.Param2 = 0; + pKernelHead->Vblank.Callback.CheckVblankCount.Status = NV_OK; + pKernelHead->Vblank.Callback.CheckVblankCount.bIsVblankNotifyEnable = NV_TRUE; + pKernelHead->Vblank.Callback.CheckVblankCount.Flags = flags; + + kheadAddVblankCallback(pGpu, pKernelHead, &pKernelHead->Vblank.Callback.CheckVblankCount); + + rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL); + } + + if (IS_GSP_CLIENT(pGpu)) + { + if (pVBCounterParams->lowLatencyHint) + { + pVBCounterParams->verticalBlankCounter = kheadGetVblankLowLatencyCounter_HAL(pKernelHead); + } + else + { + pVBCounterParams->verticalBlankCounter = pKernelDisplay->pSharedData->kHeadVblankCount[pKernelHead->PublicId]; + } + } + else + { + pVBCounterParams->verticalBlankCounter = kheadGetVblankNormLatencyCounter_HAL(pKernelHead); + } + + return NV_OK; +} + +/*! + * @brief prints the LoadV Counter info + * + * @Parameter pDispCommon [In] + * @Parameter pLoadVCounterInfoParams [In, Out] + * + * @Possible return status: + * NV_OK + * succeed in getting value of LoadVCounter + * NV_ERR_INVALID_ARGUMENT + * wrong parameters passed in + * + */ + +NV_STATUS +dispcmnCtrlCmdSystemGetLoadVCounterInfo_IMPL +( + DispCommon *pDispCommon, + NV0073_CTRL_CMD_SYSTEM_GET_LOADV_COUNTER_INFO_PARAMS *pLoadVCounterInfoParams +) +{ + OBJGPU *pGpu; + KernelDisplay *pKernelDisplay; + KernelHead *pKernelHead; + NV_STATUS status = NV_OK; + + status = dispapiSetUnicastAndSynchronize_HAL( + staticCast(pDispCommon, DisplayApi), + DISPAPI_GET_GPUGRP(pDispCommon), + &pGpu, + NULL, + pLoadVCounterInfoParams->subDeviceInstance); + + if (status != NV_OK) + return status; + + status = dispapiValidateRmctrlPriv(pGpu); + if (status != NV_OK) + return status; + + if (pLoadVCounterInfoParams->head >= OBJ_MAX_HEADS) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, pLoadVCounterInfoParams->head); + NV_ASSERT_OR_RETURN(pKernelHead != NULL, NV_ERR_INVALID_ARGUMENT); + + pLoadVCounterInfoParams->counterValue = kheadGetLoadVCounter_HAL(pGpu, pKernelHead); + NV_PRINTF(LEVEL_INFO, "LoadV Counter value fetched from register is : %x\n", pLoadVCounterInfoParams->counterValue); + + return status; +} + +/*! + * @brief returns crashlock counter value + * + * @Parameter pDispCommon [In] + * @Parameter pCrashLockCounterInfoParams [In, Out] + * + * @Possible return status: + * NV_OK + * succeed in getting value of LoadVCounter + * NV_ERR_INVALID_ARGUMENT + * wrong parameters passed in + * + */ + +NV_STATUS +dispcmnCtrlCmdSystemGetCrashLockCounterInfo_IMPL +( + DispCommon *pDispCommon, + NV0073_CTRL_CMD_SYSTEM_GET_CRASH_LOCK_COUNTER_INFO_PARAMS *pCrashLockCounterInfoParams +) +{ + OBJGPU *pGpu; + KernelDisplay *pKernelDisplay; + KernelHead *pKernelHead; + NV_STATUS status = NV_OK; + + status = dispapiSetUnicastAndSynchronize_HAL( + staticCast(pDispCommon, DisplayApi), + DISPAPI_GET_GPUGRP(pDispCommon), + &pGpu, + NULL, + pCrashLockCounterInfoParams->subDeviceInstance); + + if (status != NV_OK) + return status; + + if (pCrashLockCounterInfoParams->head >= OBJ_MAX_HEADS) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, pCrashLockCounterInfoParams->head); + NV_ASSERT_OR_RETURN(pKernelHead != NULL, NV_ERR_INVALID_ARGUMENT); + + pCrashLockCounterInfoParams->counterValueV = kheadGetCrashLockCounterV_HAL(pGpu, pKernelHead); + + NV_PRINTF(LEVEL_INFO, "Crash Lock Counter value fetched from register is : %x\n", pCrashLockCounterInfoParams->counterValueV); + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c b/src/nvidia/src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c new file mode 100644 index 0000000..7089893 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c @@ -0,0 +1,110 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "class/cl5070.h" +#include "gpu/disp/disp_objs.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/gpu.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu_mgr/gpu_mgr.h" +#include "mem_mgr/mem.h" +#include "rmapi/client_resource.h" +#include "rmapi/rmapi.h" +#include "rmapi/rs_utils.h" + +NV_STATUS +dispobjCtrlCmdGetRgConnectedLockpinStateless_IMPL +( + DispObject *pDispObject, + NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS *pParams +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +nvdispapiCtrlCmdChannelCancelFlip_IMPL +( + NvDispApi *pNvDispApi, + NVC370_CTRL_CHANNEL_CANCEL_FLIP_PARAMS *pParams +) +{ + OBJGPU *pGpu = DISPAPI_GET_GPU(pNvDispApi); + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pNvDispApi); + DISPCHNCLASS internalChnClass = dispChnClass_Supported; + NvU32 dispChannelNum = 0; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(pParams != NULL, NV_ERR_INVALID_ARGUMENT); + + status = kdispGetIntChnClsForHwCls(pKernelDisplay, pParams->channelClass, &internalChnClass); + if (status != NV_OK) + { + return status; + } + + if (kdispGetChannelNum_HAL(pKernelDisplay, internalChnClass, pParams->channelInstance, &dispChannelNum) != NV_OK) + { + return NV_ERR_INVALID_CHANNEL; + } + + if (pKernelDisplay->pClientChannelTable[dispChannelNum].bInUse != NV_TRUE) + { + NV_PRINTF(LEVEL_WARNING, "disp Channel not allocated by RM yet!\n"); + return NV_ERR_INVALID_CHANNEL; + } + else + { + // Does HW also think the same + if (!kdispIsChannelAllocatedHw_HAL(pGpu, pKernelDisplay, internalChnClass, pParams->channelInstance)) + { + NV_PRINTF(LEVEL_WARNING, "disp Channel not allocated by HW yet!\n"); + return NV_ERR_INVALID_CHANNEL; + } + } + + if (internalChnClass == dispChnClass_Core) + { + // Ensure that only core channel owner can touch it. + if (pKernelDisplay->pClientChannelTable[dispChannelNum].pClient->hClient != hClient) + { + NV_ASSERT(0); + return NV_ERR_INVALID_OWNER; + } + } + + kdispSetChannelTrashAndAbortAccel_HAL(pGpu, pKernelDisplay, internalChnClass, pParams->channelInstance, NV_TRUE); + + if (!kdispIsChannelIdle_HAL(pGpu, pKernelDisplay, internalChnClass, pParams->channelInstance)) + { + NV_PRINTF(LEVEL_WARNING, "disp channel not in idle state! %u %u\n", internalChnClass, pParams->channelInstance); + NV_ASSERT(0); + } + + kdispSetChannelTrashAndAbortAccel_HAL(pGpu, pKernelDisplay, internalChnClass, pParams->channelInstance, NV_FALSE); + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/disp/disp_objs.c b/src/nvidia/src/kernel/gpu/disp/disp_objs.c new file mode 100644 index 0000000..081d5ee --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/disp_objs.c @@ -0,0 +1,701 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing the display - both Disp and DispCommon +* entries with their insides (DispChannelList and DispDmaControlList) +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "core/locks.h" +#include "resserv/rs_client.h" + +#include "gpu/gpu.h" +#include "gpu/device/device.h" +#include "gpu/disp/disp_objs.h" +#include "gpu/disp/disp_channel.h" +#include "gpu/disp/kern_disp.h" +#include "gpu_mgr/gpu_mgr.h" +#include "platform/sli/sli.h" + +#include "class/cl0073.h" // NV04_DISPLAY_COMMON +#include "class/cl5070.h" // NV50_DISPLAY +#include "class/clc370.h" // NVC370_DISPLAY + +NV_STATUS +dispapiConstruct_IMPL +( + DisplayApi *pDisplayApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status; + CLASSDESCRIPTOR *pClassDescriptor; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + OBJGPU *pGpu; + KernelDisplay *pKernelDisplay; + NvBool bBcResource; + NvU32 i; + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + // Use gpuGetByRef instead of GpuResource because it will work even if resource + // isn't a GpuResource. + status = gpuGetByRef(pResourceRef, &bBcResource, &pGpu); + if (status != NV_OK) + return status; + + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_STATE); + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY) && + pParams->pSecInfo->privLevel < RS_PRIV_LEVEL_USER_ROOT) + { + NV_PRINTF(LEVEL_ERROR, + "Failure allocating display class 0x%08x: Only root(admin)/kernel clients are allowed\n", + pParams->externalClassId); + + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + // Find class in class db (verifies class is valid for this GPU) + status = gpuGetClassByClassId(pGpu, pParams->externalClassId, &pClassDescriptor); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, "bad class 0x%x\n", pParams->externalClassId); + return NV_ERR_INVALID_CLASS; + } + + // Check display is enabled (i.e. not displayless) + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + if (pKernelDisplay == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + for (i = 0; i < NV2080_MAX_SUBDEVICES; i++) + pDisplayApi->pNotifyActions[i] = NULL; + + pDisplayApi->pGpuInRmctrl = NULL; + pDisplayApi->pGpuGrp = gpumgrGetGpuGrpFromGpu(pGpu); + pDisplayApi->bBcResource = bBcResource; + + gpuSetThreadBcState(pGpu, bBcResource); + + return status; +} + +void +dispapiDestruct_IMPL +( + DisplayApi *pDisplayApi +) +{ + NvU32 i; + + // Free notify actions memory if it's been allocated + for (i = 0; i < NV2080_MAX_SUBDEVICES; i++) + { + portMemFree(pDisplayApi->pNotifyActions[i]); + pDisplayApi->pNotifyActions[i] = NULL; + } +} + +static NV_STATUS +_dispapiNotifierInit +( + DisplayApi *pDisplayApi, + NvU32 numNotifiers, + NvU32 disableCmd +) +{ + NvU32 i, j; + NV_STATUS status = NV_OK; + + pDisplayApi->numNotifiers = numNotifiers; + + for (i = 0; i < NV2080_MAX_SUBDEVICES; i++) + { + // get memory for pNotifyActions table + pDisplayApi->pNotifyActions[i] = portMemAllocNonPaged( + pDisplayApi->numNotifiers * sizeof(NvU32)); + if (pDisplayApi->pNotifyActions[i] != NULL) + { + // default actions for each notifier type is disabled + for (j = 0; j < pDisplayApi->numNotifiers; j++) + { + pDisplayApi->pNotifyActions[i][j] = disableCmd; + } + } + else + { + goto fail; + } + } + + return status; + +fail: + // first release any notifyActions memory + for (i = 0; i < NV2080_MAX_SUBDEVICES; i++) + { + portMemFree(pDisplayApi->pNotifyActions[i]); + pDisplayApi->pNotifyActions[i] = NULL; + } + + return NV_ERR_INSUFFICIENT_RESOURCES; +} + +NV_STATUS +dispobjConstructHal_IMPL +( + DispObject *pDispObject, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + DisplayApi *pDisplayApi = staticCast(pDispObject, DisplayApi); + Device *pDevice = dynamicCast(pCallContext->pResourceRef->pParentRef->pResource, Device); + GpuResource *pGpuResource = staticCast(pDevice, GpuResource); + OBJGPU *pGpu = pGpuResource->pGpu; + NV_STATUS rmStatus = NV_ERR_INVALID_STATE; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + { + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + + rmStatus = kdispSelectClass_HAL(pGpu, pKernelDisplay, pCallContext->pResourceRef->externalClassId); + + if (rmStatus != NV_OK) + { + // If the operation fails, it should fail on the first try + NV_ASSERT(gpumgrIsParentGPU(pGpu)); + SLI_LOOP_BREAK; + } + } + SLI_LOOP_END; + + if (rmStatus != NV_OK) + return rmStatus; + + if(dynamicCast(pDisplayApi, NvDispApi)) + { + rmStatus = _dispapiNotifierInit(pDisplayApi, + NVC370_NOTIFIERS_MAXCOUNT, + NVC370_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE); + } + else + { + rmStatus = _dispapiNotifierInit(pDisplayApi, + NV5070_NOTIFIERS_MAXCOUNT, + NV0073_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE); + } + + return rmStatus; +} + +NV_STATUS +dispobjConstruct_IMPL +( + DispObject *pDispObject, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + pDispObject->rmFreeFlags = NV5070_CTRL_SET_RMFREE_FLAGS_NONE; + + if (pParams->pSecInfo->privLevel < RS_PRIV_LEVEL_USER_ROOT) + { + NV_PRINTF(LEVEL_ERROR, + "Failure allocating display class 0x%08x: Only root(admin)/kernel clients are allowed\n", + pParams->externalClassId); + + // + // GPUSWSEC-1560 introduced a central object privilege check in RS. Please mark derived external classes + // of DispObject privileged in their RS_ENTRY. Since DispObject doesn't have an external class of its own + // and is used as a base class, leaving this check inline to catch future derivations. + // + osAssertFailed(); + + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + return dispobjConstructHal_HAL(pDispObject, pCallContext, pParams); +} + +NV_STATUS +dispobjGetByHandle_IMPL +( + RsClient *pClient, + NvHandle hDispObject, + DispObject **ppDispObject +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef; + + status = clientGetResourceRef(pClient, hDispObject, &pResourceRef); + if (status != NV_OK) + return status; + + *ppDispObject = dynamicCast(pResourceRef->pResource, DispObject); + + return (*ppDispObject) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +NV_STATUS +dispobjGetByDevice_IMPL +( + RsClient *pClient, + Device *pDevice, + DispObject **ppDispObject +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef; + + status = refFindChildOfType(RES_GET_REF(pDevice), classId(DispObject), NV_FALSE /*bExactMatch*/, &pResourceRef); + if (status != NV_OK) + return status; + + *ppDispObject = dynamicCast(pResourceRef->pResource, DispObject); + + return (*ppDispObject) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +// +// Most display control calls take a subDeviceInstance argument. +// We need to verify that this argument is valid and then use it to +// locate the correct OBJGPU for the particular subdevice. +// +NV_STATUS +dispapiSetUnicastAndSynchronize_KERNEL +( + DisplayApi *pDisplayApi, + OBJGPUGRP *pGpuGroup, + OBJGPU **ppGpu, + OBJDISP **ppDisp, + NvU32 subDeviceInstance +) +{ + NV_STATUS nvStatus = NV_OK; + + nvStatus = gpugrpGetGpuFromSubDeviceInstance(pGpuGroup, subDeviceInstance, ppGpu); + if (nvStatus != NV_OK) + return nvStatus; + + gpumgrSetBcEnabledStatus(*ppGpu, NV_FALSE); + + // + // The _KERNEL version of this function is only called from Kernel RM, but + // in Kernel RM, OBJDISP is not available, so ppDisp must be NULL. If the + // caller needs to access OBJDISP, either the caller code must remove the + // OBJDISP dependency, or the caller code must be changed so that + // dispapiSetUnicastAndSynchronize is called only from physical or + // monolithic RM, never Kernel RM. + // + if (ppDisp != NULL) + { + return NV_ERR_INVALID_STATE; + } + + return nvStatus; +} + +NV_STATUS +dispapiControl_Prologue_IMPL +( + DisplayApi *pDisplayApi, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pRmCtrlParams +) +{ + NvU32 subdeviceIndex; + NV_STATUS status; + RmResource *pResource = staticCast(pDisplayApi, RmResource); + + if (dynamicCast(pDisplayApi, DispCommon)) + { + Device *pDevice = dynamicCast(pCallContext->pResourceRef->pParentRef->pResource, Device); + GpuResource *pGpuResource = staticCast(pDevice, GpuResource); + + pResource->rpcGpuInstance = gpuGetInstance(pGpuResource->pGpu); + pDisplayApi->pGpuInRmctrl = pGpuResource->pGpu; + return rmresControl_Prologue_IMPL(pResource, pCallContext, pRmCtrlParams); + } + + // Read the subdevice ID out and swap GPU pointer + if (dynamicCast(pDisplayApi, NvDispApi)) + { + NVC370_CTRL_CMD_BASE_PARAMS *pBaseParameters = pRmCtrlParams->pParams; + + // + // All non-NULL disp control 5070 methods have + // NVC370_CTRL_CMD_BASE_PARAMS as their first member. + // + if ((pBaseParameters == NULL) || (pRmCtrlParams->paramsSize < sizeof(NVC370_CTRL_CMD_BASE_PARAMS))) + { + status = NV_ERR_INVALID_PARAM_STRUCT; + goto done; + } + subdeviceIndex = pBaseParameters->subdeviceIndex; + } + else if (dynamicCast(pDisplayApi, DispSwObj)) + { + NVC372_CTRL_CMD_BASE_PARAMS *pBaseParameters = pRmCtrlParams->pParams; + + // + // All non-NULL disp control C372 methods have + // NVC372_CTRL_CMD_BASE_PARAMS as their first member. + // + if ((pBaseParameters == NULL) || (pRmCtrlParams->paramsSize < sizeof(NVC372_CTRL_CMD_BASE_PARAMS))) + { + status = NV_ERR_INVALID_PARAM_STRUCT; + goto done; + } + subdeviceIndex = pBaseParameters->subdeviceIndex; + } + else + { + NV5070_CTRL_CMD_BASE_PARAMS *pBaseParameters = pRmCtrlParams->pParams; + + // + // All non-NULL disp control 5070 methods have + // NV5070_CTRL_CMD_BASE_PARAMS as their first member. + // + if ((pBaseParameters == NULL) || (pRmCtrlParams->paramsSize < sizeof(NV5070_CTRL_CMD_BASE_PARAMS))) + { + status = NV_ERR_INVALID_PARAM_STRUCT; + goto done; + } + subdeviceIndex = pBaseParameters->subdeviceIndex; + } + + status = dispapiSetUnicastAndSynchronize_HAL(pDisplayApi, + pRmCtrlParams->pGpuGrp, + &pRmCtrlParams->pGpu, + NULL, + subdeviceIndex); + + if (status == NV_OK) + { + pResource->rpcGpuInstance = gpuGetInstance(pRmCtrlParams->pGpu); + pDisplayApi->pGpuInRmctrl = pRmCtrlParams->pGpu; + return rmresControl_Prologue_IMPL(pResource, pCallContext, pRmCtrlParams); + } + +done: + return status; +} + +void +dispapiControl_Epilogue_IMPL +( + DisplayApi *pDisplayApi, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pRmCtrlParams +) +{ + if (dynamicCast(pDisplayApi, DispCommon) == NULL) + { + RmResource *pResource = staticCast(pDisplayApi, RmResource); + pResource->rpcGpuInstance = ~0; + } + + pDisplayApi->pGpuInRmctrl = NULL; +} + +NV_STATUS +dispapiControl_IMPL +( + DisplayApi *pDisplayApi, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status = NV_OK; + Device *pDevice = dynamicCast(pCallContext->pResourceRef->pParentRef->pResource, Device); + GpuResource *pGpuResource = staticCast(pDevice, GpuResource); + RmCtrlParams *pRmCtrlParams = pParams->pLegacyParams; + OBJGPU *pGpu = pGpuResource->pGpu; + + NV_PRINTF(LEVEL_INFO, "class: 0x%x cmd 0x%x\n", + RES_GET_EXT_CLASS_ID(pDisplayApi), + pRmCtrlParams->cmd); + + pRmCtrlParams->pGpu = pGpu; + pRmCtrlParams->pGpuGrp = pGpuResource->pGpuGrp; + + gpuSetThreadBcState(pGpu, NV_TRUE); + + status = resControl_IMPL(staticCast(pDisplayApi, RsResource), + pCallContext, pParams); + + return status; +} + +NV_STATUS +dispswobjConstruct_IMPL +( + DispSwObj *pDispSwObj, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +NV_STATUS +dispcmnConstruct_IMPL +( + DispCommon *pDispCommon, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + DisplayApi *pDisplayApi = staticCast(pDispCommon, DisplayApi); + + // + // Not adding the priv-level check for this class + // as it is being used by OpenGL from userspace.Once the Cleanup is done from the OpenGL + // we can add the priv level check here below + // + + pDispCommon->hotPlugMaskToBeReported = 0; + pDispCommon->hotUnplugMaskToBeReported = 0; + + return _dispapiNotifierInit(pDisplayApi, + NV0073_NOTIFIERS_MAXCOUNT, + NV0073_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE); +} + +NV_STATUS +dispcmnGetByHandle_IMPL +( + RsClient *pClient, + NvHandle hDispCommon, + DispCommon **ppDispCommon +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef; + + status = clientGetResourceRef(pClient, hDispCommon, &pResourceRef); + if (status != NV_OK) + return status; + + *ppDispCommon = dynamicCast(pResourceRef->pResource, DispCommon); + + return (*ppDispCommon) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +void +dispcmnGetByDevice_IMPL +( + RsClient *pClient, + NvHandle hDevice, + DispCommon **ppDispCommon +) +{ + Device *pDevice; + RsResourceRef *pResourceRef; + + *ppDispCommon = NULL; /* return failure by default */ + + if (deviceGetByHandle(pClient, hDevice, &pDevice) != NV_OK) + return; + + if (refFindChildOfType(RES_GET_REF(pDevice), + classId(DispCommon), + NV_FALSE, + &pResourceRef) != NV_OK) + return; + + *ppDispCommon = dynamicCast(pResourceRef->pResource, DispCommon); +} + +/** + * @brief Return NV_TRUE if RmFree() needs to preserve the HW, otherwise NV_FALSE + * + * @param[in] DispObject Pointer + */ +NvBool dispobjGetRmFreeFlags_IMPL(DispObject *pDispObject) +{ + return !!(pDispObject->rmFreeFlags & NV5070_CTRL_SET_RMFREE_FLAGS_PRESERVE_HW); +} + +/** + * @brief Clears the RmFree() temporary flags + * + * @param[in] DispObject Pointer + * + * @return void + */ +void dispobjClearRmFreeFlags_IMPL(DispObject *pDispObject) +{ + pDispObject->rmFreeFlags = NV5070_CTRL_SET_RMFREE_FLAGS_NONE; +} + +NV_STATUS +nvdispapiConstruct_IMPL +( + NvDispApi *pNvdispApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +// **************************************************************************** +// Deprecated Functions +// **************************************************************************** + +/** + * @warning This function is deprecated! Please use dispchnGetByHandle. + */ +NV_STATUS +CliFindDispChannelInfo +( + NvHandle hClient, + NvHandle hDispChannel, + DispChannel **ppDispChannel, + NvHandle *phParent +) +{ + RsClient *pClient; + NV_STATUS status; + + *ppDispChannel = NULL; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return NV_ERR_INVALID_CLIENT; + + status = dispchnGetByHandle(pClient, hDispChannel, ppDispChannel); + if (status != NV_OK) + return status; + + if (phParent) + *phParent = RES_GET_PARENT_HANDLE(*ppDispChannel); + + return NV_OK; +} + +// +// DISP Event RM Controls +// +NV_STATUS +dispapiCtrlCmdEventSetNotification_IMPL +( + DisplayApi *pDisplayApi, + NV0073_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams +) +{ + OBJGPU *pGpu = DISPAPI_GET_GPU(pDisplayApi); + NvU32 *pNotifyActions; + NV_STATUS status = NV_OK; + PEVENTNOTIFICATION pEventNotifications = inotifyGetNotificationList(staticCast(pDisplayApi, INotifier)); + + // NV01_EVENT must have been plugged into this subdevice + if (pEventNotifications == NULL) + { + NV_PRINTF(LEVEL_INFO, "cmd 0x%x: no event list\n", NV0073_CTRL_CMD_EVENT_SET_NOTIFICATION); + return NV_ERR_INVALID_STATE; + } + + // error check event index + if (pSetEventParams->event >= pDisplayApi->numNotifiers) + { + NV_PRINTF(LEVEL_INFO, "bad event 0x%x\n", pSetEventParams->event); + return NV_ERR_INVALID_ARGUMENT; + } + + // error check subDeviceInstance + if (pSetEventParams->subDeviceInstance >= gpumgrGetSubDeviceMaxValuePlus1(pGpu)) + { + NV_PRINTF(LEVEL_INFO, "bad subDeviceInstance 0x%x\n", + pSetEventParams->subDeviceInstance); + return NV_ERR_INVALID_ARGUMENT; + } + + pNotifyActions = pDisplayApi->pNotifyActions[pSetEventParams->subDeviceInstance]; + + switch (pSetEventParams->action) + { + case NV0073_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE: + case NV0073_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT: + { + // must be in disabled state to transition to an active state + if (pNotifyActions[pSetEventParams->event] != NV0073_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + status = NV_ERR_INVALID_STATE; + break; + } + + // bind hEvent to particular subdeviceInst + status = bindEventNotificationToSubdevice(pEventNotifications, + pSetEventParams->hEvent, + pSetEventParams->subDeviceInstance); + if (status != NV_OK) + return status; + + pNotifyActions[pSetEventParams->event] = pSetEventParams->action; + break; + } + + case NV0073_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE: + { + pNotifyActions[pSetEventParams->event] = pSetEventParams->action; + break; + } + default: + { + status = NV_ERR_INVALID_ARGUMENT; + break; + } + } + + return status; +} + +NV_STATUS dispapiValidateRmctrlPriv_IMPL(OBJGPU *pGpu) +{ + RS_PRIV_LEVEL minPrivLevel = RS_PRIV_LEVEL_KERNEL; + + // Demote priv level of certain controls to root for MODS on soc-disp. Bug 5117826 + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + minPrivLevel = RS_PRIV_LEVEL_USER_ROOT; + + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + + if (pCallContext->secInfo.privLevel < minPrivLevel) + return NV_ERR_INSUFFICIENT_PERMISSIONS; + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/disp/disp_sf_user.c b/src/nvidia/src/kernel/gpu/disp/disp_sf_user.c new file mode 100644 index 0000000..a5c260e --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/disp_sf_user.c @@ -0,0 +1,80 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing DispSfUser class. +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "resserv/resserv.h" + +#include "gpu/gpu.h" +#include "gpu/disp/disp_sf_user.h" +#include "gpu/disp/kern_disp.h" + +NV_STATUS +dispsfConstruct_IMPL +( + DispSfUser *pDispSfUser, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDispSfUser); + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + + NV_CHECK_OR_RETURN(LEVEL_ERROR, pKernelDisplay != NULL, NV_ERR_NOT_SUPPORTED); + + // Set sf user RegBase offset + kdispGetDisplaySfUserBaseAndSize_HAL(pGpu, pKernelDisplay, + &pDispSfUser->ControlOffset, + &pDispSfUser->ControlLength); + + return NV_OK; +} + +NV_STATUS +dispsfGetRegBaseOffsetAndSize_IMPL +( + DispSfUser *pDispSfUser, + OBJGPU *pGpu, + NvU32* pOffset, + NvU32* pSize +) +{ + if (pOffset) + { + *pOffset = pDispSfUser->ControlOffset; + } + + if (pSize) + { + *pSize = pDispSfUser->ControlLength; + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/disp/head/arch/v04/kernel_head_0401.c b/src/nvidia/src/kernel/gpu/disp/head/arch/v04/kernel_head_0401.c new file mode 100644 index 0000000..32b3d07 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/head/arch/v04/kernel_head_0401.c @@ -0,0 +1,98 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Kernel Display Module +* This file contains functions managing display on CPU RM +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "os/os.h" +#include "gpu/gpu.h" +#include "disp/v04_01/dev_disp.h" +#include "platform/sli/sli.h" +#include "class/clc370.h" +#include "ctrl/ctrlc370/ctrlc370event.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/head/kernel_head.h" + +void +kheadReadPendingRgSemIntr_v04_01 +( + OBJGPU *pGpu, + KernelHead *pKernelHead, + HEADINTRMASK *pHeadIntrMask, + THREAD_STATE_NODE *pThreadState, + NvU32 *pCachedIntr +) +{ + NvU32 i; + NvU32 intr = pCachedIntr ? *pCachedIntr : + GPU_REG_RD32_EX(pGpu, NV_PDISP_FE_RM_INTR_STAT_HEAD_TIMING(pKernelHead->PublicId), pThreadState); + + for (i = 0; i < headIntr_RgSem__SIZE_1; ++i) + { + if (FLD_IDX_TEST_DRF(_PDISP, _FE_EVT_STAT_HEAD_TIMING, _RG_SEM, i, _PENDING, intr)) + { + *pHeadIntrMask |= headIntr_RgSem(i); + } + } +} + +void +kheadHandleRgSemIntr_v04_01 +( + OBJGPU *pGpu, + KernelHead *pKernelHead, + HEADINTRMASK *pHeadIntrMask, + THREAD_STATE_NODE *pThreadState +) +{ + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + NvU32 rgSemIndex, intr; + + NVC370_RG_SEM_NOTIFICATION_PARAMS params = {0}; + + for (rgSemIndex = 0; rgSemIndex < headIntr_RgSem__SIZE_1; ++rgSemIndex) + { + if (*pHeadIntrMask & headIntr_RgSem(rgSemIndex)) + { + intr = DRF_IDX_DEF(_PDISP, _FE_EVT_STAT_HEAD_TIMING, _RG_SEM, rgSemIndex, _RESET); + + osDispService(NV_PDISP_FE_EVT_STAT_HEAD_TIMING(pKernelHead->PublicId), intr); + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + GPU_REG_WR32_EX(pGpu, NV_PDISP_FE_EVT_STAT_HEAD_TIMING(pKernelHead->PublicId), intr, pThreadState); + SLI_LOOP_END; + + *pHeadIntrMask &= (~headIntr_RgSem(rgSemIndex)); + + params.headId = pKernelHead->PublicId; + params.rgSemId = rgSemIndex; + + kdispNotifyEvent(pGpu, pKernelDisplay, NVC370_NOTIFIERS_RG_SEM_NOTIFICATION, ¶ms, sizeof(params), 0, 0); + } + } +} diff --git a/src/nvidia/src/kernel/gpu/disp/head/kernel_head.c b/src/nvidia/src/kernel/gpu/disp/head/kernel_head.c new file mode 100644 index 0000000..bd81e3d --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/head/kernel_head.c @@ -0,0 +1,445 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/head/kernel_head.h" +#include "gpu/timer/objtmr.h" + +NV_STATUS +kheadConstruct_IMPL(KernelHead *pKernelHead) +{ + pKernelHead->Vblank.IntrState = NV_HEAD_VBLANK_INTR_UNAVAILABLE; + + pKernelHead->Vblank.pSpinlock = (PORT_SPINLOCK *) portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged()); + NV_ASSERT_OR_RETURN(pKernelHead->Vblank.pSpinlock != NULL, NV_ERR_INSUFFICIENT_RESOURCES); + + return NV_OK; +} + +void +kheadDestruct_IMPL +( + KernelHead *pKernelHead +) +{ + if (pKernelHead->Vblank.pSpinlock != NULL) + { + portSyncSpinlockDestroy(pKernelHead->Vblank.pSpinlock); + pKernelHead->Vblank.pSpinlock = NULL; + } +} + +NvU32 +kheadGetVblankTotalCounter_IMPL +( + KernelHead *pKernelHead +) +{ + return pKernelHead->Vblank.Counters.Total; +} + +void +kheadSetVblankTotalCounter_IMPL +( + KernelHead *pKernelHead, + NvU32 counter +) +{ + pKernelHead->Vblank.Counters.Total = counter; +} + +NvU32 +kheadGetVblankLowLatencyCounter_IMPL +( + KernelHead *pKernelHead +) +{ + return pKernelHead->Vblank.Counters.LowLatency; +} + +void +kheadSetVblankLowLatencyCounter_IMPL +( + KernelHead *pKernelHead, + NvU32 counter +) +{ + pKernelHead->Vblank.Counters.LowLatency = counter; +} + +NvU32 +kheadGetVblankNormLatencyCounter_IMPL +( + KernelHead *pKernelHead +) +{ + return pKernelHead->Vblank.Counters.NormLatency; +} + +void +kheadSetVblankNormLatencyCounter_IMPL +( + KernelHead *pKernelHead, + NvU32 counter +) +{ + pKernelHead->Vblank.Counters.NormLatency = counter; +} + +static NvBool +kheadIsVblankCallbackDue +( + VBLANKCALLBACK *pCallback, + NvU32 state, + NvU64 time, + NvU32 vblankCount +) +{ + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_SPECIFIED_TIMESTAMP) + { + // + // Time stamp based call backs don't have a valid vblank count + // vblank might be delayed and we might see only one vblank instead of two. + // so, count doesn't make sense in case of TS. + // and since the semantics is flip on vblank at TS >= TS specified, we can't + // use tmrCallbacks (they might flip outside vblank) + // + return (time >= pCallback->TimeStamp); + } + else + { + // + // These are now guaranteed to be sorted by VBlank + // and, now all have a VBlankCount to make processing simpler + // in this function, 'due' means "the next time the queue's counter is incremented, + // will it be time to process this callback?" This definition requires us to add 1 to + // the current vblankCount during the comparison. + // + if (VBLANK_STATE_PROCESS_IMMEDIATE & state) + { + return NV_TRUE; + } + + // Persistent callbacks that want to run every vblank + if ((pCallback->Flags & VBLANK_CALLBACK_FLAG_PERSISTENT) && (pCallback->Flags & VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_NEXT)) + { + return NV_TRUE; + } + + // Every other callback whose time has come. + if (pCallback->VBlankCount == 1+vblankCount) + { + return NV_TRUE; + } + } + return NV_FALSE; +} + +NvU32 +kheadCheckVblankCallbacksQueued_IMPL +( + OBJGPU *thisGpu, + KernelHead *pKernelHead, + NvU32 state, + NvU32 *expiring +) +{ + OBJTMR *pTmr; + NvU64 time; + NvU32 queues = 0; + + pTmr = GPU_GET_TIMER(thisGpu); + tmrGetCurrentTime(pTmr, &time); + + if (expiring) + { + *expiring = 0; + } + // + // return a union of queues (represented by VBLANK_STATE_PROCESS_XXX_LATENCY flags,) + // that are nonempty, i.e. have at least one callback. + // optionally, also return (via 'expiring', when non-NULL) which of those non-empty queues contain + // callbacks that are due to be processed, the next time that queue's counter gets incremented. + // + if ( (pKernelHead->Vblank.Callback.pListLL) && + (state & VBLANK_STATE_PROCESS_LOW_LATENCY) ) + { + queues |= VBLANK_STATE_PROCESS_LOW_LATENCY; + + if (expiring) + { + NvU32 vblankCount; + VBLANKCALLBACK *pCallback; + + vblankCount = pKernelHead->Vblank.Counters.LowLatency; + pCallback = pKernelHead->Vblank.Callback.pListLL; + + do + { + if (kheadIsVblankCallbackDue(pCallback, state, time, vblankCount)) + { + *expiring |= VBLANK_STATE_PROCESS_LOW_LATENCY; + } + pCallback = pCallback->Next; + } + while (pCallback && !(*expiring & VBLANK_STATE_PROCESS_LOW_LATENCY)); + } + } + + if ( (pKernelHead->Vblank.Callback.pListNL) && + (state & VBLANK_STATE_PROCESS_NORMAL_LATENCY) ) + { + queues |= VBLANK_STATE_PROCESS_NORMAL_LATENCY; + + if (expiring) + { + NvU32 vblankCount; + VBLANKCALLBACK *pCallback; + + vblankCount = pKernelHead->Vblank.Counters.NormLatency; + pCallback = pKernelHead->Vblank.Callback.pListNL; + + do + { + if (kheadIsVblankCallbackDue(pCallback, state, time, vblankCount)) + { + *expiring |= VBLANK_STATE_PROCESS_NORMAL_LATENCY; + } + + pCallback = pCallback->Next; + } + while (pCallback && !(*expiring & VBLANK_STATE_PROCESS_NORMAL_LATENCY)); + } + } + + return queues & state; +} +NvU32 +kheadReadVblankIntrState_IMPL +( + OBJGPU *pGpu, + KernelHead *pKernelHead +) +{ + // Check to make sure that our SW state grooves with the HW state + if (kheadReadVblankIntrEnable_HAL(pGpu, pKernelHead) && + kheadGetDisplayInitialized_HAL(pGpu, pKernelHead)) + { + // HW is enabled, check if SW state is not enabled + if (pKernelHead->Vblank.IntrState != NV_HEAD_VBLANK_INTR_ENABLED) + { + pKernelHead->Vblank.IntrState = NV_HEAD_VBLANK_INTR_ENABLED; + } + } + else + { + // + // If HW is not enabled, SW state would depend on whether head is + // driving display. Check for both the SW states and base the + // SW state decision on head initialized state. + // If head is initialized SW state should be AVAILABLE else + // UNAVAILABLE. + // + if ((pKernelHead->Vblank.IntrState == NV_HEAD_VBLANK_INTR_ENABLED) || + (pKernelHead->Vblank.IntrState == NV_HEAD_VBLANK_INTR_UNAVAILABLE)) + { + NvU32 state = NV_HEAD_VBLANK_INTR_UNAVAILABLE; + + // + // We should say HW not enabled is AVAILABLE or UNAVAILABLE + // So, we'll base the correct decision on whether or not + // this head is driving any display. + // + if (kheadGetDisplayInitialized_HAL(pGpu, pKernelHead)) + { + state = NV_HEAD_VBLANK_INTR_AVAILABLE; + } + + if (state != pKernelHead->Vblank.IntrState) + { + pKernelHead->Vblank.IntrState = state; + } + } + else if (pKernelHead->Vblank.IntrState == NV_HEAD_VBLANK_INTR_AVAILABLE) + { + // + // If HW is not enabled and head is not driving any display then + // the SW state should be UNAVAILABLE + // + if (!kheadGetDisplayInitialized_HAL(pGpu, pKernelHead)) + { + pKernelHead->Vblank.IntrState = NV_HEAD_VBLANK_INTR_UNAVAILABLE; + } + } + } + + return pKernelHead->Vblank.IntrState; +} + +void +kheadWriteVblankIntrState_IMPL +( + OBJGPU *pGpu, + KernelHead *pKernelHead, + NvU32 newstate +) +{ + NvU32 previous; + NvBool enablehw = NV_FALSE; // Dont update the hw by default + NvBool updatehw = NV_FALSE; // Dont enable the hw by default + + // Get the previous state for various other stuff + previous = pKernelHead->Vblank.IntrState; + + // Make sure we really support the requested next state + if ( (newstate != NV_HEAD_VBLANK_INTR_UNAVAILABLE) && + (newstate != NV_HEAD_VBLANK_INTR_AVAILABLE) && + (newstate != NV_HEAD_VBLANK_INTR_ENABLED) ) + { + NV_PRINTF(LEVEL_ERROR, "Unknown state %x requested on head %d.\n", + newstate, pKernelHead->PublicId); + return; + } + + // Spew where we were and where we are going for tracking... +#if defined(DEBUG) + + NV_PRINTF(LEVEL_INFO, "Changing vblank state on pGpu=%p head %d: ", pGpu, + pKernelHead->PublicId); + + switch(previous) + { + case NV_HEAD_VBLANK_INTR_UNAVAILABLE: + NV_PRINTF(LEVEL_INFO, "UNAVAILABLE -> "); + break; + case NV_HEAD_VBLANK_INTR_AVAILABLE: + NV_PRINTF(LEVEL_INFO, "AVAILABLE -> "); + break; + case NV_HEAD_VBLANK_INTR_ENABLED: + NV_PRINTF(LEVEL_INFO, "ENABLED -> "); + break; + default: + NV_PRINTF(LEVEL_INFO, "UNKNOWN -> "); + break; + } + + switch(newstate) + { + case NV_HEAD_VBLANK_INTR_UNAVAILABLE: + NV_PRINTF(LEVEL_INFO, "UNAVAILABLE\n"); + break; + case NV_HEAD_VBLANK_INTR_AVAILABLE: + NV_PRINTF(LEVEL_INFO, "AVAILABLE\n"); + break; + case NV_HEAD_VBLANK_INTR_ENABLED: + NV_PRINTF(LEVEL_INFO, "ENABLED\n"); + break; + default: + NV_PRINTF(LEVEL_INFO, "UNKNOWN\n"); + break; + } + +#endif + + // Move to the new state + switch(newstate) + { + // Move to the unavailable state. This has an implied disabled state. + case NV_HEAD_VBLANK_INTR_UNAVAILABLE: + + // If the hw is on, turn it off + if (previous == NV_HEAD_VBLANK_INTR_ENABLED) + { + enablehw = NV_FALSE; + updatehw = NV_TRUE; + } + break; + + // Move to the available state. This has an implied disabled state. + case NV_HEAD_VBLANK_INTR_AVAILABLE: + + // If the hw is on, turn it off + if (previous == NV_HEAD_VBLANK_INTR_ENABLED) + { + enablehw = NV_FALSE; + updatehw = NV_TRUE; + } + break; + + // Move to the enabled state. This has an implied available state. + case NV_HEAD_VBLANK_INTR_ENABLED: + + // If the hw was off, turn it on + if (previous != NV_HEAD_VBLANK_INTR_ENABLED) + { + enablehw = NV_TRUE; + updatehw = NV_TRUE; + } + break; + + default: + // We REALLY should never get here with the correct filtering above. + NV_PRINTF(LEVEL_ERROR, "Unknown state %x requested on head %d.\n", + newstate, pKernelHead->PublicId); + DBG_BREAKPOINT(); + return; + break; + } + + // Update the sw state + pKernelHead->Vblank.IntrState = newstate; + + // Update the hw + if (updatehw) + { + kheadWriteVblankIntrEnable_HAL(pGpu, pKernelHead, enablehw); + + } +} + +void +kheadProcessRgLineCallbacks_KERNEL +( + OBJGPU *pGpu, + KernelHead *pKernelHead, + NvU32 head, + NvU32 *headIntrMask, + NvU32 *clearIntrMask, + NvBool bIsIrqlIsr +) +{ + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + NvU32 rgIntrMask[MAX_RG_LINE_CALLBACKS_PER_HEAD] = {headIntr_RgLineA, headIntr_RgLineB}; + NvU32 rgIntr; + + for (rgIntr = 0; rgIntr < MAX_RG_LINE_CALLBACKS_PER_HEAD; rgIntr++) + { + if (*headIntrMask & rgIntrMask[rgIntr]) + { + kdispInvokeRgLineCallback(pKernelDisplay, head, rgIntr, bIsIrqlIsr); + *clearIntrMask |= rgIntrMask[rgIntr]; + } + } +} + diff --git a/src/nvidia/src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c b/src/nvidia/src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c new file mode 100644 index 0000000..47f348e --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c @@ -0,0 +1,360 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Display Instance Memory Module +* This file contains functions managing display on CPU RM +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/disp/inst_mem/disp_inst_mem.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/context_dma.h" +#include "disp/v03_00/dev_disp.h" + +/*! + * @brief Get display instance memory and hash table size + * + * @param[in] pGpu + * @param[in] PInstMem + * @param[out] pTotalInstMemSize pointer to instance memory size + * @param[out] pHashTableSize pointer to hash table size + * + * @return void + */ +void +instmemGetSize_v03_00 +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 *pTotalInstMemSize, + NvU32 *pHashTableSize +) +{ + if (pTotalInstMemSize != NULL) + { + *pTotalInstMemSize = (NV_UDISP_HASH_LIMIT - NV_UDISP_HASH_BASE + 1) + + (NV_UDISP_OBJ_MEM_LIMIT - NV_UDISP_OBJ_MEM_BASE + 1); + } + + if (pHashTableSize != NULL) + { + *pHashTableSize = (NV_UDISP_HASH_LIMIT - NV_UDISP_HASH_BASE + 1); + } +} + +NvU32 +instmemGetHashTableBaseAddr_v03_00 +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem +) +{ + return NV_UDISP_HASH_BASE; +} + +/*! Check if the instance memory pointer is valid */ +NvBool +instmemIsValid_v03_00 +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 offset +) +{ + return (((offset << 5) < NV_UDISP_OBJ_MEM_LIMIT) && + ((offset << 5) > NV_UDISP_HASH_LIMIT)); +} + +NV_STATUS +instmemHashFunc_v03_00 +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvHandle hClient, + NvHandle hContextDma, + NvU32 dispChannelNum, + NvU32 *pResult +) +{ + NV_ASSERT_OR_RETURN(pResult, NV_ERR_INVALID_ARGUMENT); + + + // + // The hash function for display will be: + // hContextDma[9:0] + // ^ hContextDma[19:10] + // ^ hContextDma[29:20] + // ^ {hClient[7:0], hContextDma[31:30]} + // ^ {dispChannelNum[3:0], hClient[13:8]} + // ^ {7'h00, dispChannelNum[6:4]} + // + *pResult = ((hContextDma >> 0) & 0x3FF) ^ + ((hContextDma >> 10) & 0x3FF) ^ + ((hContextDma >> 20) & 0x3FF) ^ + (((hClient & 0xFF) << 2) | (hContextDma >> 30)) ^ + (((dispChannelNum & 0xF) << 6) | ((hClient >> 8) & 0x3F))^ + ((dispChannelNum >> 4) & 0x7); + + return NV_OK; +} + +/*! Generate hash table data */ +NvU32 +instmemGenerateHashTableData_v03_00 +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 hClient, + NvU32 offset, + NvU32 dispChannelNum +) +{ + return (SF_NUM(_UDISP, _HASH_TBL_CLIENT_ID, hClient) | + SF_NUM(_UDISP, _HASH_TBL_INSTANCE, offset) | + SF_NUM(_UDISP, _HASH_TBL_CHN, dispChannelNum)); +} + +/*! Write the Context DMA to display instance memory */ +NV_STATUS +instmemCommitContextDma_v03_00 +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma +) +{ + MEMORY_DESCRIPTOR *pMemDesc = memdescGetMemDescFromGpu(pContextDma->pMemDesc, pGpu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + RmPhysAddr FrameAddr, Limit; + RmPhysAddr FrameAddr256Align; + RmPhysAddr LimitAlign; + NvU32 ctxDMAFlag; + NvU32 instoffset; + NvU8 *pInstMemCpuVA; + NvBool bIsSurfaceBl = NV_FALSE; + TRANSFER_SURFACE dest = {0}; + + // This function must be called in unicast. + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); + + instoffset = pContextDma->Instance[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] << 4; + NV_ASSERT_OR_RETURN(instoffset, NV_ERR_INVALID_OBJECT); + + FrameAddr = memdescGetPhysAddr(pMemDesc, AT_GPU, 0); + Limit = FrameAddr + pContextDma->Limit; + + // + // Set surface format + // + switch (DRF_VAL(OS03, _FLAGS, _PTE_KIND, pContextDma->Flags)) + { + case NVOS03_FLAGS_PTE_KIND_BL: + bIsSurfaceBl = NV_TRUE; + break; + case NVOS03_FLAGS_PTE_KIND_PITCH: + bIsSurfaceBl = NV_FALSE; + break; + case NVOS03_FLAGS_PTE_KIND_NONE: + { + NvU32 const kind = memdescGetPteKindForGpu(pMemDesc, pGpu); + + // Cannot bind a Z surface to display. Bug 439965. + if (memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_Z, kind)) + return NV_ERR_INVALID_ARGUMENT; + + bIsSurfaceBl = memmgrIsSurfaceBlockLinear_HAL(pMemoryManager, + pContextDma->pMemory, + kind); + } + break; + default: + NV_PRINTF(LEVEL_ERROR, "Unexpected PTE_KIND value\n"); + return NV_ERR_INVALID_STATE; + } + + ctxDMAFlag = 0; + + if (bIsSurfaceBl) + { + ctxDMAFlag |= SF_DEF(_DMA, _KIND, _BLOCKLINEAR); + } + else + { + ctxDMAFlag |= SF_DEF(_DMA, _KIND, _PITCH); + } + + if (pContextDma->bReadOnly) + { + ctxDMAFlag |= SF_DEF(_DMA, _ACCESS, _READ_ONLY); + } + else + { + ctxDMAFlag |= SF_DEF(_DMA, _ACCESS, _READ_AND_WRITE); + } + + switch (memdescGetAddressSpace(pMemDesc)) + { + case ADDR_SYSMEM: + case ADDR_REGMEM: + // SOC Display always need _PHYSICAL_NVM flag to be set as display is not over PCI + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_SOC_SDM)) + { + ctxDMAFlag |= SF_DEF(_DMA, _TARGET_NODE, _PHYSICAL_NVM); + } + else + { + if (pContextDma->CacheSnoop) + ctxDMAFlag |= SF_DEF(_DMA, _TARGET_NODE, _PHYSICAL_PCI_COHERENT); + else + ctxDMAFlag |= SF_DEF(_DMA, _TARGET_NODE, _PHYSICAL_PCI); + } + break; + case ADDR_FBMEM: + ctxDMAFlag |= SF_DEF(_DMA, _TARGET_NODE, _PHYSICAL_NVM); + break; + default: + NV_PRINTF(LEVEL_ERROR, "Invalid address space: %d\n", + memdescGetAddressSpace(pMemDesc)); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_ARGUMENT; + } + + dest.pMemDesc = pInstMem->pInstMemDesc; + dest.offset = instoffset; + + pInstMemCpuVA = memmgrMemBeginTransfer(pMemoryManager, &dest, NV_DMA_SIZE, + TRANSFER_FLAGS_SHADOW_ALLOC); + if (pInstMemCpuVA == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + MEM_WR32(pInstMemCpuVA + SF_OFFSET(NV_DMA_TARGET_NODE), ctxDMAFlag); // word 0 + + // Address in disp ctxdma is 256B aligned + FrameAddr256Align = FrameAddr >> 8; + MEM_WR32(pInstMemCpuVA + SF_OFFSET(NV_DMA_ADDRESS_BASE_LO), // word 1 + NvU64_LO32(FrameAddr256Align)); + MEM_WR32(pInstMemCpuVA + SF_OFFSET(NV_DMA_ADDRESS_BASE_HI), // word 2 + NvU64_HI32(FrameAddr256Align)); + + LimitAlign = Limit >> 8; + MEM_WR32(pInstMemCpuVA + SF_OFFSET(NV_DMA_ADDRESS_LIMIT_LO), // word 3 + NvU64_LO32(LimitAlign)); + MEM_WR32(pInstMemCpuVA + SF_OFFSET(NV_DMA_ADDRESS_LIMIT_HI), // word 4 + NvU64_HI32(LimitAlign)); + + memmgrMemEndTransfer(pMemoryManager, &dest, NV_DMA_SIZE, + TRANSFER_FLAGS_SHADOW_ALLOC); + + return NV_OK; +} + +/*! + * @brief Update the Context DMA already in display instance memory + * + * NOTE: this control call may be called at high IRQL on WDDM. + */ +NV_STATUS +instmemUpdateContextDma_v03_00 +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma, + NvU64 *pNewAddress, + NvU64 *pNewLimit, + NvHandle hMemory, + NvU32 comprInfo +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NV_STATUS status = NV_OK; + NvU8 *pInst; + NvU32 instoffset; + TRANSFER_SURFACE dest = {0}; + + // Must use comprInfo to specify kind + NV_CHECK_OR_RETURN(LEVEL_SILENT, hMemory == NV01_NULL_OBJECT, NV_ERR_INVALID_ARGUMENT); + + instoffset = pContextDma->Instance[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] << 4; + NV_ASSERT(instoffset); + + dest.pMemDesc = pInstMem->pInstMemDesc; + dest.offset = instoffset; + + pInst = memmgrMemBeginTransfer(pMemoryManager, &dest, NV_DMA_SIZE, + TRANSFER_FLAGS_SHADOW_ALLOC | TRANSFER_FLAGS_SHADOW_INIT_MEM); + if (pInst == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto exit; + } + + if (pNewAddress != NULL) + { + // Address in disp ctxdma is 256B aligned + NvU64 newAddress256Align = (*pNewAddress) >> 8; + MEM_WR32(pInst + SF_OFFSET(NV_DMA_ADDRESS_BASE_LO), + NvU64_LO32(newAddress256Align)); + MEM_WR32(pInst + SF_OFFSET(NV_DMA_ADDRESS_BASE_HI), + NvU64_HI32(newAddress256Align)); + } + + if (pNewLimit != NULL) + { + NvU64 newLimitAlign = (*pNewLimit) >> 8; + MEM_WR32(pInst + SF_OFFSET(NV_DMA_ADDRESS_LIMIT_LO), + NvU64_LO32(newLimitAlign)); + MEM_WR32(pInst + SF_OFFSET(NV_DMA_ADDRESS_LIMIT_HI), + NvU64_HI32(newLimitAlign)); + } + + if (comprInfo != NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_USE_COMPR_INFO_NONE) + { + NvU32 word = MEM_RD32(pInst + SF_OFFSET(NV_DMA_KIND)); + + if (comprInfo == NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_USE_COMPR_INFO_FORMAT_BLOCK_LINEAR) + { + word = FLD_SF_DEF(_DMA, _KIND, _BLOCKLINEAR, word); + } + else + { + word = FLD_SF_DEF(_DMA, _KIND, _PITCH, word); + } + + MEM_WR32(pInst + SF_OFFSET(NV_DMA_KIND), word); + } + + memmgrMemEndTransfer(pMemoryManager, &dest, NV_DMA_SIZE, + TRANSFER_FLAGS_SHADOW_ALLOC | TRANSFER_FLAGS_SHADOW_INIT_MEM); + +exit: + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/disp/inst_mem/disp_inst_mem.c b/src/nvidia/src/kernel/gpu/disp/inst_mem/disp_inst_mem.c new file mode 100644 index 0000000..0d962d0 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/inst_mem/disp_inst_mem.c @@ -0,0 +1,959 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/**************************** Instmem Routines *****************************\ +* * +* Display instance memory object function Definitions. * +* * +\***************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/disp_channel.h" +#include "gpu/disp/inst_mem/disp_inst_mem.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/mem_mgr/context_dma.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "os/nv_memory_type.h" +#include "os/os.h" +#include "containers/eheap_old.h" + +/*! + * Display Context DMA instance memory is always 2 16B blocks in size on all chips. There + * is no HW support for scatter lists. Instance memory should be naturally aligned. + */ +#define DISPLAY_CONTEXT_DMA_INST_SIZE 2 +#define DISPLAY_CONTEXT_DMA_INST_ALIGN 2 + +#define DISP_INST_MEM_EHEAP_OWNER NvU32_BUILD('i','n','s','t') + +/*! + * A hardware display hash table entry. + */ +typedef struct +{ + NvHandle ht_ObjectHandle; + NvV32 ht_Context; +} DISP_HW_HASH_TABLE_ENTRY; + + +/*! @brief Constructor */ +NV_STATUS +instmemConstruct_IMPL +( + DisplayInstanceMemory *pInstMem +) +{ + pInstMem->pInstMem = NULL; + pInstMem->pAllocedInstMemDesc = NULL; + pInstMem->pInstMemDesc = NULL; + pInstMem->pHashTable = NULL; + pInstMem->pInstHeap = NULL; + + return NV_OK; +} + + +/*! + * @brief Instmem destructor + */ +void +instmemDestruct_IMPL +( + DisplayInstanceMemory *pInstMem +) +{ +} + +/*! @brief Initialized heap related files in display instance memory */ +static NV_STATUS +instmemInitBitmap +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 instMemSize, + NvU32 hashTableSize +) +{ + NV_STATUS status = NV_OK; + NvU64 base, limit; + NvU64 allocSize, allocOffset; + NvU32 allocFlags; + NvU32 freeInstMemBase; + NvU32 freeInstMemSize; + NvU32 freeInstMemMax; + + // + // Locate and size the free instance area. This is the base where + // allocations should start and size of the allocatable inst mem. + // Initially hash table is the only entity that's allocated. + // + freeInstMemBase = hashTableSize >> 4; + freeInstMemSize = instMemSize - hashTableSize; + freeInstMemMax = (freeInstMemSize / 16) & ~0x07; + + // Allocate the Instmem heap manager + pInstMem->pInstHeap = portMemAllocNonPaged(sizeof(OBJEHEAP)); + if (pInstMem->pInstHeap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Unable to allocate instance memory heap manager.\n"); + status = NV_ERR_NO_MEMORY; + goto exit; + } + portMemSet(pInstMem->pInstHeap, 0x00, sizeof(OBJEHEAP)); + + NV_PRINTF(LEVEL_INFO, "FB Free Size = 0x%x\n", freeInstMemSize); + NV_PRINTF(LEVEL_INFO, "FB Free Inst Base = 0x%x\n", freeInstMemBase); + NV_PRINTF(LEVEL_INFO, "FB Free Inst Max = 0x%x\n", + freeInstMemMax + freeInstMemBase); + + // + // Construct the Instmem heap manager - Pre-allocate mgmt structures + // to avoid dynamic allocation and allow bind/unbind at high IRQL + // on Windows. Size to fill hash table + NULL instance. + // + base = freeInstMemBase; + limit = freeInstMemBase + freeInstMemMax + 1; + constructObjEHeap( + pInstMem->pInstHeap, + base, + limit, + 0, // sizeofMemBlock + pInstMem->nHashTableEntries + 1); // numPreAllocMemStruct + + // Reserve instance 0 as the NULL instance. + allocSize = 1; + allocOffset = base; + allocFlags = NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE; + status = pInstMem->pInstHeap->eheapAlloc( + pInstMem->pInstHeap, // thisHeap + DISP_INST_MEM_EHEAP_OWNER, // owner + &allocFlags, // flags + &allocOffset, // offset + &allocSize, // size + 1, // offsetAlign + 1, // sizeAlign + NULL, // ppMemBlock + NULL, // isolation id + NULL); // callback ownership checker + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "eheapAlloc failed for instance memory heap manager.\n"); + status = NV_ERR_NO_MEMORY; + } + +exit: + return status; +} + +/*! @brief Initialized hash table related files in display instance memory */ +static NV_STATUS +instmemInitHashTable +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 hashTableSize +) +{ + NV_STATUS status = NV_OK; + + pInstMem->nHashTableEntries = hashTableSize / sizeof(DISP_HW_HASH_TABLE_ENTRY); + pInstMem->hashTableBaseAddr = instmemGetHashTableBaseAddr_HAL(pGpu, pInstMem); + + // Allocate Hash Table structure. + pInstMem->pHashTable = portMemAllocNonPaged(pInstMem->nHashTableEntries * + sizeof(SW_HASH_TABLE_ENTRY)); + if (pInstMem->pHashTable == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Unable to allocate hash table.\n"); + status = NV_ERR_NO_MEMORY; + goto exit; + } + + // Initialize Hash Table. + portMemSet(pInstMem->pHashTable, 0x00, pInstMem->nHashTableEntries * + sizeof(SW_HASH_TABLE_ENTRY)); + + +exit: + return status; +} + +/*! + * @brief Save instance memory parameters + * + * For dGPU called from mem_mgr initialization with reserved frame buffer memory. For SOC + * we dynamically allocate system memory later. + */ +void +instmemSetMemory_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NV_ADDRESS_SPACE dispInstMemAddrSpace, + NvU32 dispInstMemAttr, + NvU64 dispInstMemBase, + NvU32 dispInstMemSize +) +{ + pInstMem->instMemAddrSpace = dispInstMemAddrSpace; + pInstMem->instMemAttr = dispInstMemAttr; + pInstMem->instMemBase = dispInstMemBase; + pInstMem->instMemSize = dispInstMemSize; +} + +/*! @brief Initialize instance memory descriptor */ +static NV_STATUS +instmemInitMemDesc +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 instMemSize +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NV_STATUS status = NV_OK; + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + // + // FB reserved memory logic not be getting called for Tegra system memory scanout. + // So as InstMem Desc is not getting initialized, currently hardcoding + // dispInstMemAttr to NV_MEMORY_CACHED this needs to be set based on system configuration/registry parameter. + // + instmemSetMemory(pGpu, pInstMem, + ADDR_SYSMEM, NV_MEMORY_CACHED, + 0 /* base */, instMemSize); + } + else if (IS_GSP_CLIENT(pGpu)) + { + // ToDO: Need to respect RM overrides and keep monolithic design same as offload. + instmemSetMemory(pGpu, pInstMem, + ADDR_FBMEM, NV_MEMORY_WRITECOMBINED, + 0 , instMemSize); + } + + switch (pInstMem->instMemAddrSpace) + { + default: + case ADDR_FBMEM: + { + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + memdescCreate(&pInstMem->pInstMemDesc, pGpu, + pInstMem->instMemSize, + DISP_INST_MEM_ALIGN, + NV_TRUE, pInstMem->instMemAddrSpace, + pInstMem->instMemAttr, + MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO), + exit); + + memdescDescribe(pInstMem->pInstMemDesc, + ADDR_FBMEM, + memmgrGetRsvdMemoryBase(pMemoryManager) + pInstMem->instMemBase, + pInstMem->instMemSize); + } + break; + + case ADDR_SYSMEM: + { + // + // memdescAlloc won't (currently) honor a request for sysmem alloc alignment! Overallocate + // and round up the address to work around this. + // + // Create a sub-memdesc to the aligned block. This keeps the alignment calculation local + // to this function. + // + NvU64 base; + NvU64 offset; + NvBool bContig = NV_TRUE; + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + // + // On Orin, display FE goes through the NISO SMMU to read + // from Instance Memory. As such, there's absolutely no + // reason why we need a contiguous allocation for Instance + // Memory. + // + bContig = NV_FALSE; + } + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + memdescCreate(&pInstMem->pAllocedInstMemDesc, pGpu, + instMemSize + (DISP_INST_MEM_ALIGN - RM_PAGE_SIZE), + DISP_INST_MEM_ALIGN, + bContig, pInstMem->instMemAddrSpace, + pInstMem->instMemAttr, + MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO), + exit); + + memdescTagAlloc(status, NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_67, + pInstMem->pAllocedInstMemDesc); + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, status, + exit); + + base = memdescGetPhysAddr(pInstMem->pAllocedInstMemDesc, AT_GPU, 0); + offset = RM_ALIGN_UP(base, DISP_INST_MEM_ALIGN) - base; + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + memdescCreateSubMem(&pInstMem->pInstMemDesc, pInstMem->pAllocedInstMemDesc, + pGpu, + offset, + instMemSize), + exit); + } + break; + } + +exit: + // Clean-up is handled by the caller + return status; +} + +/*! @brief Free all memory allocations done for display instance memory */ +static void +instmemDestroy +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem +) +{ + // Free up the inst mem descriptors + memdescDestroy(pInstMem->pInstMemDesc); + pInstMem->pInstMemDesc = NULL; + + memdescFree(pInstMem->pAllocedInstMemDesc); + memdescDestroy(pInstMem->pAllocedInstMemDesc); + pInstMem->pAllocedInstMemDesc = NULL; + + if (pInstMem->pInstHeap != NULL) + { + pInstMem->pInstHeap->eheapDestruct(pInstMem->pInstHeap); + portMemFree(pInstMem->pInstHeap); + pInstMem->pInstHeap = NULL; + } + + portMemFree(pInstMem->pHashTable); + pInstMem->pHashTable = NULL; +} + +NV_STATUS +instmemStateInitLocked_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem +) +{ + NV_STATUS status = NV_OK; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvU32 hClient = pGpu->hInternalClient; + NvU32 hSubdevice = pGpu->hInternalSubdevice; + NvU32 instMemSize, hashTableSize; + NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS ctrlParams; + + instmemGetSize_HAL(pGpu, pInstMem, &instMemSize, &hashTableSize); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + instmemInitHashTable(pGpu, pInstMem, hashTableSize), exit); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + instmemInitBitmap(pGpu, pInstMem, instMemSize, hashTableSize), exit); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + instmemInitMemDesc(pGpu, pInstMem, instMemSize), exit); + + // Make internal RPC to write the instance memory register + ctrlParams.instMemAddrSpace = memdescGetAddressSpace(pInstMem->pInstMemDesc); + ctrlParams.instMemCpuCacheAttr = memdescGetCpuCacheAttrib(pInstMem->pInstMemDesc); + ctrlParams.instMemPhysAddr = memdescGetPhysAddr(pInstMem->pInstMemDesc, AT_GPU, 0); + ctrlParams.instMemSize = memdescGetSize(pInstMem->pInstMemDesc); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->Control(pRmApi, hClient, hSubdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM, + &ctrlParams, sizeof(ctrlParams)), exit); + +exit: + if (status != NV_OK) + { + instmemDestroy(pGpu, pInstMem); + } + + return status; +} + +void +instmemStateDestroy_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem +) +{ + instmemDestroy(pGpu, pInstMem); +} + +NV_STATUS +instmemStateLoad_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 flags +) +{ + NvBool bPersistent; + + // + // We keep a persistent mapping to instance memory in two cases: + // * Windows issues bind/update/unbind control calls with with BYPASS_LOCK, + // so we cannot generate a new BAR2 mapping at control call time. + // * System memory backing. + // + bPersistent = (pInstMem->instMemAddrSpace == ADDR_SYSMEM); + if (bPersistent) + { + // + // Windows issues bind/update/unbind control calls with BYPASS_LOCK, + // so we generate a new BAR2 mapping control call time. + // + pInstMem->pInstMem = memdescMapInternal(pGpu, pInstMem->pInstMemDesc, + TRANSFER_FLAGS_PERSISTENT_CPU_MAPPING); + if (pInstMem->pInstMem == NULL) + return NV_ERR_NO_MEMORY; + } + + return NV_OK; +} + +NV_STATUS +instmemStateUnload_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 flags +) +{ + if (pInstMem->pInstMem != NULL) + { + memdescUnmapInternal(pGpu, pInstMem->pInstMemDesc, TRANSFER_FLAGS_NONE); + pInstMem->pInstMem = NULL; + } + + return NV_OK; +} + +/*! + * @brief Reserve a chunk of display instance memory (will always be for Context DMAs). + * @return offset from the base of display instance memory (not base of FB). + */ +static NV_STATUS +_instmemReserveContextDma +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 *offset +) +{ + NV_STATUS rmStatus; + NvU64 allocSize = DISPLAY_CONTEXT_DMA_INST_SIZE; // size << 4; + NvU64 allocOffset; + NvU32 allocFlags = 0; + + *offset = 0; + + rmStatus = pInstMem->pInstHeap->eheapAlloc( + pInstMem->pInstHeap, // thisHeap + DISP_INST_MEM_EHEAP_OWNER, // owner + &allocFlags, // flags + &allocOffset, // offset + &allocSize, // size + DISPLAY_CONTEXT_DMA_INST_ALIGN, // offsetAlign + DISPLAY_CONTEXT_DMA_INST_ALIGN, // sizeAlign + NULL, // ppMemBlock + NULL, // isolation id + NULL); // callback ownership checker + + // return the allocation offset if successful + if (rmStatus == NV_OK) + { + *offset = (NvU32)allocOffset; + } + else + { + rmStatus = NV_ERR_NO_MEMORY; + } + + return rmStatus; +} + +/*! + * @brief Free display instance memory reserved for Context DMA. + */ +static NV_STATUS +_instmemFreeContextDma +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 offset +) +{ + // + // If instance is already set to 0, then it has already been freed. This can + // happen in some cases when a mode switch is happening while MIDI is playing + // using the timer alarm notifies. Ignoring this case can potentially cause a + // protection fault, so be careful. + // + if (offset == 0) + return NV_OK; + + if (pInstMem->pInstHeap == NULL) + return NV_OK; + + pInstMem->pInstHeap->eheapFree( + pInstMem->pInstHeap, // thisHeap + offset); // offset + + return NV_OK; +} + +static void +_instmemClearHashEntry +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 htEntry +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + TRANSFER_SURFACE dest = {0}; + NvU32 entryOffset; + DISP_HW_HASH_TABLE_ENTRY entry; + + pInstMem->pHashTable[htEntry].pContextDma = NULL; + pInstMem->pHashTable[htEntry].pDispChannel = NULL; + + // + // If we found the entry, clear the inst mem copy of the entry + // Start with offset of base of inst mem + // Add offset of base of hash table from base of inst mem + // Add the offset of entry from base of hash table + // + entryOffset = pInstMem->hashTableBaseAddr + + (sizeof(DISP_HW_HASH_TABLE_ENTRY) * htEntry); + + dest.pMemDesc = pInstMem->pInstMemDesc; + dest.offset = entryOffset; + + entry.ht_ObjectHandle = 0; + entry.ht_Context = instmemGenerateHashTableData_HAL(pGpu, pInstMem, + 0 /* client id */, + 0 /* NV_UDISP_HASH_TBL_INSTANCE_INVALID */, + 0 /* dispChannelNum */); + + NV_ASSERT_OK(memmgrMemWrite(pMemoryManager, &dest, &entry, sizeof(entry), + TRANSFER_FLAGS_NONE)); +} + +static NV_STATUS +_instmemRemoveHashEntry +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma, + DispChannel *pDispChannel +) +{ + NvU32 htEntry; + + for (htEntry = 0; htEntry < pInstMem->nHashTableEntries; htEntry++) + { + if ( (pInstMem->pHashTable[htEntry].pContextDma == pContextDma) && + (pInstMem->pHashTable[htEntry].pDispChannel == pDispChannel)) + { + _instmemClearHashEntry(pGpu, pInstMem, htEntry); + return NV_OK; + } + } + + return NV_ERR_INVALID_STATE; +} + +static NV_STATUS +_instmemAddHashEntry +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma, + DispChannel *pDispChannel, + NvU32 offset +) +{ + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pContextDma); + NvU32 entryOffset, dispChannelNum; + NvU32 Limit, i, Entry; + NvHandle handle = RES_GET_HANDLE(pContextDma); + NvU32 hash; + NV_STATUS status; + TRANSFER_SURFACE dest = {0}; + DISP_HW_HASH_TABLE_ENTRY entry; + + status = kdispGetChannelNum_HAL(pKernelDisplay, pDispChannel->DispClass, pDispChannel->InstanceNumber, &dispChannelNum); + if (status != NV_OK) + return status; + + // Query HAL for starting entry for this pair. + instmemHashFunc_HAL(pGpu, pInstMem, hClient, RES_GET_HANDLE(pContextDma), dispChannelNum, &hash); + + // + // Since all the ctx dmas are 32 byte aligned, we don't need to + // store offsets in bytes. We store "which 32 byte chunk" does the + // ctx dma reside in. So, right shift the whole thing by 5 after + // left shifting by 4 (need to left shift by 4 since internally we + // track offsets in 16 byte chunks + // + offset >>= (5 - 4); // offset <<= 4; followed by offset >>= 5 + + if (offset == 0) //NV_UDISP_HASH_TBL_INSTANCE_INVALID + { + NV_PRINTF(LEVEL_ERROR, "Instance pointer is invalid!!\n"); + return (NV_ERR_GENERIC); + } + + // + // Make sure instance memory pointer is valid as well. That is, + // it's within the mask range of possible instance values + // + NV_ASSERT(instmemIsValid_HAL(pGpu, pInstMem, offset)); + + // Make sure that hash is valid as well. + NV_ASSERT(hash < pInstMem->nHashTableEntries); + + // + // Search table for free slot. + // + // Here's the Old way that we did this - Allows for arbitrary sized hash tables + // + // Limit = hash + pDispHalPvtInfo->pPram[ChID].nHashTableEntries; // loop over whole table + // Entry = hash; + // while(Entry < Limit) + // { + // if (pDispHalPvtInfo->pPram[ChID].pHashTable[Entry].Object == NULL) + // break; + // + // // + // // if we just checked the last entry and have more entries + // // to check for empty, wrap search back to beginning of table + // // + // if (Entry == (pDispHalPvtInfo->pPram[ChID].nHashTableEntries-1) && + // ((Entry + 1) < Limit)) + // { + // Limit = Limit - Entry - 1; // -1 since we count the one we just checked + // Entry = 0; + // continue; + // } + // + // Entry++; + // } + // + // But since we know that this hash table is always 512 in size, let's go ahead + // and make this assumption to make the loops faster. Or even better, lets just + // make sure that the Hash Depth is a power of 2. That way, we can use + // nHashTableEntries - 1 as the mask of what entries are valid - and this allows for any + // nHashTableEntries that is a power of 2. + // + NV_ASSERT(!(pInstMem->nHashTableEntries & (pInstMem->nHashTableEntries - 1))); + + Limit = hash + pInstMem->nHashTableEntries; // loop over whole table + Entry = hash; + for (i = hash; i < Limit; i++) + { + // Mask off high bits of i since we loop the hash table. + Entry = i & (pInstMem->nHashTableEntries - 1); + if (pInstMem->pHashTable[Entry].pContextDma == NULL) + break; + } + + if (pInstMem->pHashTable[Entry].pContextDma != NULL) + { + NV_PRINTF(LEVEL_ERROR, "Display Hash table is FULL!!\n"); + return NV_ERR_TOO_MANY_PRIMARIES; + } + + entryOffset = pInstMem->hashTableBaseAddr + + (Entry * sizeof(DISP_HW_HASH_TABLE_ENTRY)); + + // Add object to the Hash Table. + pInstMem->pHashTable[Entry].pContextDma = pContextDma; + pInstMem->pHashTable[Entry].pDispChannel = pDispChannel; + + dest.pMemDesc = pInstMem->pInstMemDesc; + dest.offset = entryOffset; + + entry.ht_ObjectHandle = handle; + + // Note that we have full 32 bit client id at this point and we only need to tell hw the lower 14 bits + entry.ht_Context = instmemGenerateHashTableData_HAL( + pGpu, + pInstMem, + hClient, + offset, + dispChannelNum); + + NV_ASSERT_OK_OR_RETURN(memmgrMemWrite(pMemoryManager, &dest, &entry, sizeof(entry), + TRANSFER_FLAGS_NONE)); + + return NV_OK; +} + +/*! + * @brief Is the this ContextDma bound to this DispChannel + */ +static NV_STATUS +_instmemProbeHashEntry +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma, + DispChannel *pDispChannel +) +{ + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + NvU32 dispChannelNum; + NV_STATUS status; + NvU32 hash = 0; + NvU32 limit; + NvU32 i; + + status = kdispGetChannelNum_HAL(pKernelDisplay, pDispChannel->DispClass, pDispChannel->InstanceNumber, &dispChannelNum); + if (status == NV_OK) + { + instmemHashFunc_HAL(pGpu, pInstMem, + RES_GET_CLIENT_HANDLE(pContextDma), + RES_GET_HANDLE(pContextDma), + dispChannelNum, &hash); + } + + // Hash table must be a power of 2 currently + NV_ASSERT(!(pInstMem->nHashTableEntries & (pInstMem->nHashTableEntries - 1))); + + limit = hash + pInstMem->nHashTableEntries; // loop over whole table + + for (i = hash; i < limit; i++) + { + NvU32 htEntry = i & (pInstMem->nHashTableEntries - 1); + + if ((pInstMem->pHashTable[htEntry].pDispChannel == pDispChannel) && + (pInstMem->pHashTable[htEntry].pContextDma == pContextDma)) + { + return NV_OK; + } + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +/*! + * @brief Bind the ContextDma to the given Display Channel + */ +NV_STATUS +instmemBindContextDma_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma, + DispChannel *pDispChannel +) +{ + NvU32 gpuSubDevInst; + NV_STATUS status; + + gpuSubDevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + // Production SW requires each context is bound only once + status = _instmemProbeHashEntry(pGpu, pInstMem, pContextDma, pDispChannel); + if (status == NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "The ctx dma (0x%x) has already been bound\n", + RES_GET_HANDLE(pContextDma)); + status = NV_ERR_STATE_IN_USE; + goto exit; + } + + if (pContextDma->InstRefCount[gpuSubDevInst] == 0) + { + // Reserve inst mem space for this ctx dma + status = _instmemReserveContextDma(pGpu, pInstMem, &(pContextDma->Instance[gpuSubDevInst])); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to alloc space in disp inst mem for ctx dma 0x%x\n", + RES_GET_HANDLE(pContextDma)); + goto exit; + } + + // Call into HAL to write inst mem with the ctx dma info + status = instmemCommitContextDma_HAL(pGpu, pInstMem, pContextDma); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to commit ctx dma (0x%x) to inst mem\n", + RES_GET_HANDLE(pContextDma)); + _instmemFreeContextDma(pGpu, pInstMem, pContextDma->Instance[gpuSubDevInst]); + pContextDma->Instance[gpuSubDevInst] = 0; + goto exit; + } + } + + // + // Now add the hash table entry for this ctx dma + // We loop around this call instead of looping at MEM_WR level because we + // also want to propagate the SW hash table. + // + status = _instmemAddHashEntry(pGpu, pInstMem, + pContextDma, + pDispChannel, + pContextDma->Instance[gpuSubDevInst]); + if (status != NV_OK) + { + if (pContextDma->InstRefCount[gpuSubDevInst] == 0) + { + instmemDecommitContextDma_HAL(pGpu, pInstMem, pContextDma); + _instmemFreeContextDma(pGpu, pInstMem, pContextDma->Instance[gpuSubDevInst]); + pContextDma->Instance[gpuSubDevInst] = 0; + } + goto exit; + } + + // We have one more reference to the context DMA in instance memory now. + pContextDma->InstRefCount[gpuSubDevInst]++; + +exit: + + return status; +} + +/*! + * @brief Remove reference to an instance allocation. Free after last reference. + */ +void +_instmemRemoveReference +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma +) +{ + NvU32 gpuSubDevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + NV_ASSERT(pContextDma->InstRefCount[gpuSubDevInst]); + if (pContextDma->InstRefCount[gpuSubDevInst]) + { + pContextDma->InstRefCount[gpuSubDevInst]--; + + // Remove DMA object if this is the last binding + if (pContextDma->InstRefCount[gpuSubDevInst] == 0) + { + instmemDecommitContextDma_HAL(pGpu, pInstMem, pContextDma); + _instmemFreeContextDma(pGpu, pInstMem, pContextDma->Instance[gpuSubDevInst]); + pContextDma->Instance[gpuSubDevInst] = 0; + } + } +} + +/*! + * @brief Unbind the ContextDma from the given Display Channel + */ +NV_STATUS +instmemUnbindContextDma_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma, + DispChannel *pDispChannel +) +{ + NV_STATUS status; + + // If ContextDma is not bound to this subdevice, there is no bookkeeping to do + status = _instmemRemoveHashEntry(pGpu, pInstMem, pContextDma, pDispChannel); + if (status == NV_OK) + { + _instmemRemoveReference(pGpu, pInstMem, pContextDma); + } + + return status; +} + +/*! + * @brief Unbind the ContextDma from all Display channels on the given context + */ +void +instmemUnbindContextDmaFromAllChannels_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma +) +{ + NvU32 htEntry; + + // Check all entries in the hash table + for (htEntry = 0; htEntry < pInstMem->nHashTableEntries; htEntry++) + { + if (pInstMem->pHashTable[htEntry].pContextDma == pContextDma) + { + _instmemClearHashEntry(pGpu, pInstMem, htEntry); + _instmemRemoveReference(pGpu, pInstMem, pContextDma); + } + } + +} + +/*! + * @brief Unbind the ContextDma from all Display channels on the given context + */ +void +instmemUnbindDispChannelContextDmas_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + DispChannel *pDispChannel +) +{ + NvU32 htEntry; + + // Check all entries in the hash table + for (htEntry = 0; htEntry < pInstMem->nHashTableEntries; htEntry++) + { + if (pInstMem->pHashTable[htEntry].pDispChannel == pDispChannel) + { + ContextDma *pContextDma = pInstMem->pHashTable[htEntry].pContextDma; + + _instmemClearHashEntry(pGpu, pInstMem, htEntry); + _instmemRemoveReference(pGpu, pInstMem, pContextDma); + } + } + +} diff --git a/src/nvidia/src/kernel/gpu/disp/kern_disp.c b/src/nvidia/src/kernel/gpu/disp/kern_disp.c new file mode 100644 index 0000000..01e21dc --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/kern_disp.c @@ -0,0 +1,1755 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Kernel Display Module +* This file contains functions managing display on CPU RM +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "resserv/resserv.h" +#include "rmapi/rmapi.h" +#include "rmapi/rs_utils.h" +#include "os/os.h" + +#include "gpu/gpu.h" +#include "gpu/device/device.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/inst_mem/disp_inst_mem.h" +#include "gpu/disp/head/kernel_head.h" +#include "gpu/disp/disp_objs.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/timer/objtmr.h" +#include "core/locks.h" +#include "ctrl/ctrl402c.h" +#include "platform/acpi_common.h" +#include "nvrm_registry.h" +#include "gpu/mem_mgr/mem_mgr.h" + +#include "ctrl/ctrl2080.h" + +#include "class/cl5070.h" +#include "class/cl917a.h" +#include "class/cl917b.h" +#include "class/cl917e.h" +#include "class/cl927c.h" +#include "class/cl947d.h" +#include "class/cl957d.h" +#include "class/cl977d.h" +#include "class/cl987d.h" +#include "class/clc37a.h" +#include "class/clc37b.h" +#include "class/clc37d.h" +#include "class/clc37e.h" +#include "class/clc57a.h" +#include "class/clc57b.h" +#include "class/clc57d.h" +#include "class/clc57e.h" +#include "class/clc67a.h" +#include "class/clc67b.h" +#include "class/clc67d.h" +#include "class/clc67e.h" +#include "class/clc77f.h" //NVC77F_ANY_CHANNEL_DMA + +#include "class/clc97a.h" +#include "class/clc97b.h" +#include "class/clc97d.h" +#include "class/clc97e.h" + +#include "class/clcc7a.h" +#include "class/clcc7b.h" +#include "class/clcc7d.h" +#include "class/clcc7e.h" + +#include "rmapi/rmapi_utils.h" +#include "class/cl0073.h" + +NV_STATUS +kdispConstructEngine_IMPL(OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + ENGDESCRIPTOR engDesc) +{ + NV_STATUS status; + NvU32 data; + + // + // NOTE: DO NOT call IpVersion _HAL functions in ConstructEngine. + // IP version based _HAL functions can only be used starting StatePreInit. + // Long-term: RM offload initialization will be moved earlier so KernelDisplay + // has the ability to use IP version HAL functions even in construct phase. + // + + // + // Sanity check: the only time KERNEL_DISPLAY module should be enabled + // while DISP is disabled is on KERNEL_ONLY build. + // + NV_ASSERT(IS_VIRTUAL(pGpu) || IS_FW_CLIENT(pGpu) || RMCFG_MODULE_DISP); + + // + // We also need to check if we are in certain configurations which can't + // even attempt a control call to DISP. + // + if (pKernelDisplay->getProperty(pKernelDisplay, PDB_PROP_KDISP_IS_MISSING)) + return NV_ERR_NOT_SUPPORTED; + + // Create children + pKernelDisplay->pInst = NULL; + status = kdispConstructInstMem_HAL(pKernelDisplay); + if (status != NV_OK) + { + return status; + } + + status = kdispConstructKhead(pKernelDisplay); + + // We defer checking whether DISP has been disabled some other way until + // StateInit, when we can do a control call. + + // make sure this is cleared properly + pKernelDisplay->pOsVblankCallback = NULL; + + portAtomicSetS32(&pKernelDisplay->lowLatencyLock, 0); + + if ((osReadRegistryDword(pGpu, NV_REG_INTERNAL_PANEL_DISCONNECTED, &data) == NV_OK) + && (data == NV_REG_INTERNAL_PANEL_DISCONNECTED_ENABLE)) + { + pKernelDisplay->setProperty(pKernelDisplay, + PDB_PROP_KDISP_INTERNAL_PANEL_DISCONNECTED, + NV_TRUE); + } + else + { + pKernelDisplay->setProperty(pKernelDisplay, + PDB_PROP_KDISP_INTERNAL_PANEL_DISCONNECTED, + NV_FALSE); + } + + if (osReadRegistryDword(pGpu, + NV_REG_STR_RM_ENABLE_AGGRESSIVE_VBLANK, + &data) != NV_OK) + { + data = NV_REG_STR_RM_ENABLE_AGGRESSIVE_VBLANK_DEFAULT; + } + if (NV_REG_STR_RM_ENABLE_AGGRESSIVE_VBLANK_ENABLE == data) + { + pKernelDisplay->setProperty(pGpu, PDB_PROP_KDISP_AGGRESSIVE_VBLANK_HANDLING, NV_TRUE); + } + + + return status; +} + +void +kdispDestruct_IMPL +( + KernelDisplay *pKernelDisplay +) +{ + // Destroy children + kdispDestructInstMem_HAL(pKernelDisplay); + kdispDestructKhead(pKernelDisplay); + kdispDestroyVBlank(pKernelDisplay); +} + +/*! Constructor for DisplayInstanceMemory */ +NV_STATUS +kdispConstructInstMem_IMPL +( + KernelDisplay *pKernelDisplay +) +{ + NV_STATUS status; + DisplayInstanceMemory *pInst; + + status = objCreate(&pInst, pKernelDisplay, DisplayInstanceMemory); + if (status != NV_OK) + { + return status; + } + + pKernelDisplay->pInst = pInst; + return NV_OK; +} + +/*! Destructor for DisplayInstanceMemory */ +void +kdispDestructInstMem_IMPL +( + KernelDisplay *pKernelDisplay +) +{ + objDelete(pKernelDisplay->pInst); + pKernelDisplay->pInst = NULL; +} + +/*! Constructor for Kernel head */ +NV_STATUS +kdispConstructKhead_IMPL +( + KernelDisplay *pKernelDisplay +) +{ + NV_STATUS status; + KernelHead *pKernelHead; + NvU8 headIdx; + + for (headIdx = 0; headIdx < OBJ_MAX_HEADS; headIdx++) + { + status = objCreate(&pKernelHead, pKernelDisplay, KernelHead); + if (status != NV_OK) + { + return status; + } + + pKernelDisplay->pKernelHead[headIdx] = pKernelHead; + pKernelDisplay->pKernelHead[headIdx]->PublicId = headIdx; + } + return NV_OK; +} + +/*! Destructor for Kernel head */ +void +kdispDestructKhead_IMPL +( + KernelDisplay *pKernelDisplay +) +{ + NvU8 headIdx; + + for (headIdx = 0; headIdx < OBJ_MAX_HEADS; headIdx++) + { + objDelete(pKernelDisplay->pKernelHead[headIdx]); + pKernelDisplay->pKernelHead[headIdx] = NULL; + } +} + +NV_STATUS +kdispAllocateCommonHandle_IMPL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay +) +{ + NV_STATUS rmStatus; + NvHandle hClient; + NvHandle hDevice; + NvHandle hSubdevice; + NvHandle hSubscription = NV01_NULL_OBJECT; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + rmStatus = rmapiutilAllocClientAndDeviceHandles(pRmApi, pGpu, &hClient, + &hDevice, &hSubdevice); + NV_ASSERT_OR_RETURN(rmStatus == NV_OK, NV_FALSE); + + rmStatus = pRmApi->AllocWithSecInfo(pRmApi, hClient, hDevice, &hSubscription, + NV04_DISPLAY_COMMON, NULL, 0, RMAPI_ALLOC_FLAGS_NONE, + NULL, &pRmApi->defaultSecInfo); + NV_ASSERT_OR_RETURN(rmStatus == NV_OK, NV_FALSE); + + pKernelDisplay->hInternalClient = hClient; + pKernelDisplay->hInternalDevice = hDevice; + pKernelDisplay->hInternalSubdevice = hSubdevice; + pKernelDisplay->hDispCommonHandle = hSubscription; + + return NV_OK; +} + +void +kdispDestroyCommonHandle_IMPL +( + KernelDisplay *pKernelDisplay +) +{ + NV_STATUS rmStatus; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + rmStatus = pRmApi->FreeWithSecInfo(pRmApi, pKernelDisplay->hInternalClient, + pKernelDisplay->hDispCommonHandle, + RMAPI_ALLOC_FLAGS_NONE, &pRmApi->defaultSecInfo); + NV_ASSERT(rmStatus == NV_OK); + + rmapiutilFreeClientAndDeviceHandles(pRmApi, &pKernelDisplay->hInternalClient, + &pKernelDisplay->hInternalDevice, + &pKernelDisplay->hInternalSubdevice); + + pKernelDisplay->hInternalClient = 0; + pKernelDisplay->hInternalDevice = 0; + pKernelDisplay->hInternalSubdevice = 0; + pKernelDisplay->hDispCommonHandle = 0; +} + +NV_STATUS +kdispStatePreInitLocked_IMPL(OBJGPU *pGpu, + KernelDisplay *pKernelDisplay) +{ + NV_STATUS status; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvU32 hClient = pGpu->hInternalClient; + NvU32 hSubdevice = pGpu->hInternalSubdevice; + NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS ctrlParams; + + if (!gpuFuseSupportsDisplay_HAL(pGpu)) + return NV_ERR_NOT_SUPPORTED; + + status = pRmApi->Control(pRmApi, hClient, hSubdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_IP_VERSION, + &ctrlParams, sizeof(ctrlParams)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Failed to read display IP version (FUSE disabled), status=0x%x\n", + status); + return status; + } + + // NOTE: KernelDisplay IpVersion _HAL functions can only be called after this point. + status = gpuInitDispIpHal(pGpu, ctrlParams.ipVersion); + + kdispUpdatePdbAfterIpHalInit(pKernelDisplay); + + kdispAllocateCommonHandle(pGpu, pKernelDisplay); + + return status; +} + +NV_STATUS +kdispInitBrightcStateLoad_IMPL(OBJGPU *pGpu, + KernelDisplay *pKernelDisplay) +{ + NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS *pBrightcInfo = NULL; + NvU32 status = NV_ERR_NOT_SUPPORTED; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvBool bInternalSkuFuseEnabled; + + // Skip ACPI _DSM backlight control if internal SKU fuse is enabled + bInternalSkuFuseEnabled = gpuIsInternalSkuFuseEnabled_HAL(pGpu); + + pBrightcInfo = portMemAllocNonPaged(sizeof(NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS)); + if (pBrightcInfo == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Could not allocate memory for pBrightcInfo\n"); + return NV_ERR_NO_MEMORY; + } + portMemSet(pBrightcInfo, 0, sizeof(*pBrightcInfo)); + + pBrightcInfo->status = status; + if ((pKernelDisplay != NULL) + && (pKernelDisplay->pStaticInfo->internalDispActiveMask != 0) + && !(bInternalSkuFuseEnabled + || (pKernelDisplay->getProperty(pKernelDisplay, PDB_PROP_KDISP_INTERNAL_PANEL_DISCONNECTED)))) + { + // Fill in the Backlight Method Data. + pBrightcInfo->backLightDataSize = sizeof(pBrightcInfo->backLightData); + status = osCallACPI_DSM(pGpu, ACPI_DSM_FUNCTION_CURRENT, NV_ACPI_GENERIC_FUNC_GETBACKLIGHT, + (NvU32 *)(pBrightcInfo->backLightData), + &pBrightcInfo->backLightDataSize); + pBrightcInfo->status = status; + } + + status = pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD, + pBrightcInfo, sizeof(*pBrightcInfo)); + + portMemFree(pBrightcInfo); + + return status; +} + +NV_STATUS +kdispSetupAcpiEdid_IMPL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay +) +{ + NV2080_CTRL_CMD_INTERNAL_SET_STATIC_EDID_DATA_PARAMS *pEdidParams = NULL; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvU32 status = NV_ERR_GENERIC; + NvU32 index; + + pEdidParams = portMemAllocNonPaged(sizeof(NV2080_CTRL_CMD_INTERNAL_SET_STATIC_EDID_DATA_PARAMS)); + if (pEdidParams == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Could not allocate memory for pEdidParams\n"); + return NV_ERR_NO_MEMORY; + } + portMemSet(pEdidParams, 0, sizeof(*pEdidParams)); + + pEdidParams->tableLen = pGpu->acpiMethodData.dodMethodData.acpiIdListLen / sizeof(NvU32); + + for (index = 0; index < pEdidParams->tableLen; index++) + { + pEdidParams->edidTable[index].bufferSize = MAX_EDID_SIZE_FROM_SBIOS; + status = osCallACPI_DDC(pGpu, pGpu->acpiMethodData.dodMethodData.acpiIdList[index], + pEdidParams->edidTable[index].edidBuffer, + &pEdidParams->edidTable[index].bufferSize, NV_TRUE); + pEdidParams->edidTable[index].acpiId = pGpu->acpiMethodData.dodMethodData.acpiIdList[index]; + pEdidParams->edidTable[index].status = status; + } + + status = pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_SET_STATIC_EDID_DATA, + pEdidParams, sizeof(*pEdidParams)); + + portMemFree(pEdidParams); + + return status; +} + +NV_STATUS +kdispStateInitLocked_IMPL(OBJGPU *pGpu, + KernelDisplay *pKernelDisplay) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_STATUS status = NV_OK; + KernelDisplayStaticInfo *pStaticInfo; + + pStaticInfo = portMemAllocNonPaged(sizeof(KernelDisplayStaticInfo)); + if (pStaticInfo == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Could not allocate KernelDisplayStaticInfo\n"); + status = NV_ERR_NO_MEMORY; + goto exit; + } + portMemSet(pStaticInfo, 0, sizeof(*pStaticInfo)); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO, + pStaticInfo, sizeof(*pStaticInfo)), + exit); + + pKernelDisplay->pStaticInfo = pStaticInfo; + pKernelDisplay->numHeads = pStaticInfo->numHeads; + pKernelDisplay->numDispChannels = pStaticInfo->numDispChannels; + pStaticInfo = NULL; + + // allocate channel-client mapping table + pKernelDisplay->pClientChannelTable = portMemAllocNonPaged(sizeof(KernelDisplayClientChannelMap) * + pKernelDisplay->numDispChannels); + if (pKernelDisplay->pClientChannelTable == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Could not allocate clientChannelTable\n"); + status = NV_ERR_NO_MEMORY; + goto exit; + } + + // Initiate Brightc module state load + status = kdispInitBrightcStateLoad_HAL(pGpu, pKernelDisplay); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "rmapi control call for brightc state load failed\n"); + goto exit; + } + + // Set up ACPI DDC data in Physical RM for future usage + status = kdispSetupAcpiEdid_HAL(pGpu, pKernelDisplay); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, "rmapi control call for acpi child device init failed\n"); + goto exit; + } + + if (pKernelDisplay->pInst != NULL) + { + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + instmemStateInitLocked(pGpu, pKernelDisplay->pInst), exit); + } + + // NOTE: Fills IMP parameters and populate those to disp object in Tegra + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + kdispImportImpData_HAL(pKernelDisplay), exit); + +exit: + portMemFree(pStaticInfo); + + if (status != NV_OK) + { + portMemFree(pKernelDisplay->pClientChannelTable); + pKernelDisplay->pClientChannelTable = NULL; + } + + return status; +} + +void +kdispStateDestroy_IMPL(OBJGPU *pGpu, + KernelDisplay *pKernelDisplay) +{ + + if (pKernelDisplay->pInst != NULL) + { + instmemStateDestroy(pGpu, pKernelDisplay->pInst); + } + + portMemFree(pKernelDisplay->pClientChannelTable); + pKernelDisplay->pClientChannelTable = NULL; + + portMemFree((void*) pKernelDisplay->pStaticInfo); + pKernelDisplay->pStaticInfo = NULL; + + kdispDestroyCommonHandle(pKernelDisplay); +} + +NV_STATUS +kdispAllocateSharedMem_IMPL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay +) +{ + NV_STATUS rmStatus; + void *address = NULL; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_ADDRESS_SPACE addressSpace = ADDR_FBMEM; + struct NV0073_CTRL_CMD_SYSTEM_MAP_SHARED_DATA_PARAMS params = {0}; + NvBool bIsFbBroken = NV_FALSE; + + NV_ASSERT_OR_RETURN(pKernelDisplay->pSharedData == NULL, NV_ERR_INVALID_STATE); + + bIsFbBroken = pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM); + if (bIsFbBroken) + addressSpace = ADDR_SYSMEM; + + rmStatus = memdescCreate(&pKernelDisplay->pSharedMemDesc, + pGpu, + sizeof(KernelDisplaySharedMem), + RM_PAGE_SIZE, + NV_TRUE, + addressSpace, + NV_MEMORY_UNCACHED, + MEMDESC_FLAGS_NONE); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to create memdesc from FB!\n"); + return rmStatus; + } + + rmStatus = memdescAlloc(pKernelDisplay->pSharedMemDesc); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to allocate memory from FB!\n"); + goto exit; + } + + address = memdescMapInternal(pGpu, pKernelDisplay->pSharedMemDesc, 0); + if (address == NULL) + { + NV_PRINTF(LEVEL_ERROR, "failed to map memory!\n"); + goto exit; + } + pKernelDisplay->pSharedData = (KernelDisplaySharedMem *)address; + + params.memDescInfo.base = memdescGetPhysAddr(pKernelDisplay->pSharedMemDesc, AT_GPU, 0); + params.memDescInfo.size = sizeof(KernelDisplaySharedMem); + params.memDescInfo.alignment = pKernelDisplay->pSharedMemDesc->Alignment; + params.memDescInfo.addressSpace = addressSpace; + params.memDescInfo.cpuCacheAttrib = NV_MEMORY_UNCACHED; + params.bMap = NV_TRUE; + rmStatus = pRmApi->Control(pRmApi, + kdispGetInternalClientHandle(pKernelDisplay), + kdispGetDispCommonHandle(pKernelDisplay), + NV0073_CTRL_CMD_SYSTEM_MAP_SHARED_DATA, + ¶ms, sizeof(params)); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "NV0073_CTRL_CMD_SYSTEM_MAP_SHARED_DATA RM control failed!\n"); + goto exit; + } + + return rmStatus; + +exit: + kdispFreeSharedMem(pGpu, pKernelDisplay); + return rmStatus; +} + +void +kdispFreeSharedMem_IMPL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);; + struct NV0073_CTRL_CMD_SYSTEM_MAP_SHARED_DATA_PARAMS params = {0}; + + if (pKernelDisplay->pSharedData != NULL) + { + params.bMap = NV_FALSE; + pRmApi->Control(pRmApi, kdispGetInternalClientHandle(pKernelDisplay), + kdispGetDispCommonHandle(pKernelDisplay), + NV0073_CTRL_CMD_SYSTEM_MAP_SHARED_DATA, + ¶ms, sizeof(params)); + + memdescUnmapInternal(pGpu, pKernelDisplay->pSharedMemDesc, 0); + pKernelDisplay->pSharedData = NULL; + } + + if (pKernelDisplay->pSharedMemDesc != NULL) + { + memdescFree(pKernelDisplay->pSharedMemDesc); + memdescDestroy(pKernelDisplay->pSharedMemDesc); + pKernelDisplay->pSharedMemDesc = NULL; + } +} + +NV_STATUS +kdispStateLoad_IMPL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 flags +) +{ + NV_STATUS status = NV_OK; + + if (pKernelDisplay->pInst != NULL) + status = instmemStateLoad(pGpu, pKernelDisplay->pInst, flags); + + kdispAllocateSharedMem_HAL(pGpu, pKernelDisplay); + + return status; +} + +NV_STATUS +kdispStateUnload_IMPL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 flags +) +{ + NV_STATUS status = NV_OK; + + if (pKernelDisplay->pInst != NULL) + status = instmemStateUnload(pGpu, pKernelDisplay->pInst, flags); + + kdispFreeSharedMem_HAL(pGpu, pKernelDisplay); + + return status; +} + +/*! Get and Populate IMP init data for Tegra */ +NV_STATUS +kdispImportImpData_IMPL(KernelDisplay *pKernelDisplay) +{ + OBJGPU *pGpu = ENG_GET_GPU(pKernelDisplay); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvU32 hClient = pGpu->hInternalClient; + NvU32 hSubdevice = pGpu->hInternalSubdevice; + NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS params; + NV_STATUS nvStatus; + + // + // osTegraSocGetImpImportData was originally called to collect memory and + // clock data for IMP from BPMP and kernel drivers. Now, since this + // functionality is supported only on Linux, and we also need support on + // Windows, most of the information is collected in physical RM itself, + // rather than using a Linux OS layer function. (The function is expected + // to fail on other OSes besides Linux.) + // + nvStatus = osTegraSocGetImpImportData(pGpu, ¶ms.tegraImpImportData); + (void) nvStatus; // shut up compiler warning re: unused variable + NV_PRINTF(LEVEL_INFO, + "osTegraSocGetImpImportData returned nvStatus = 0x%08X\n", + nvStatus); + + // + // The following RmCtrl call was originally called to communicate + // information collected from the osTegraSocGetImpImportData call to + // physcial RM. Now, only a small amount of information is communicated, + // but the RmCtrl call still invokes RM boot-time code to collect and + // process information on its own. + // + NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi, hClient, hSubdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_SET_IMP_INIT_INFO, + ¶ms, sizeof(params))); + + return NV_OK; +} + +/*! Get internal enum equivalent of the HW class number */ +NV_STATUS +kdispGetIntChnClsForHwCls_IMPL +( + KernelDisplay *pKernelDisplay, + NvU32 hwClass, + DISPCHNCLASS *pDispChnClass +) +{ + // sanity check + if (pDispChnClass == NULL) + return NV_ERR_INVALID_ARGUMENT; + + switch (hwClass) + { + case NV917A_CURSOR_CHANNEL_PIO: + case NVC37A_CURSOR_IMM_CHANNEL_PIO: + case NVC57A_CURSOR_IMM_CHANNEL_PIO: + case NVC67A_CURSOR_IMM_CHANNEL_PIO: + case NVC97A_CURSOR_IMM_CHANNEL_PIO: + case NVCC7A_CURSOR_IMM_CHANNEL_PIO: + *pDispChnClass = dispChnClass_Curs; + break; + + case NV917B_OVERLAY_IMM_CHANNEL_PIO: + *pDispChnClass = dispChnClass_Ovim; + break; + + case NV927C_BASE_CHANNEL_DMA: + *pDispChnClass = dispChnClass_Base; + break; + + case NV947D_CORE_CHANNEL_DMA: + case NV957D_CORE_CHANNEL_DMA: + case NV977D_CORE_CHANNEL_DMA: + case NV987D_CORE_CHANNEL_DMA: + case NVC37D_CORE_CHANNEL_DMA: + case NVC57D_CORE_CHANNEL_DMA: + case NVC67D_CORE_CHANNEL_DMA: + case NVC97D_CORE_CHANNEL_DMA: + case NVCC7D_CORE_CHANNEL_DMA: + *pDispChnClass = dispChnClass_Core; + break; + + case NV917E_OVERLAY_CHANNEL_DMA: + *pDispChnClass = dispChnClass_Ovly; + break; + + case NVC37B_WINDOW_IMM_CHANNEL_DMA: + case NVC57B_WINDOW_IMM_CHANNEL_DMA: + case NVC67B_WINDOW_IMM_CHANNEL_DMA: + case NVC97B_WINDOW_IMM_CHANNEL_DMA: + case NVCC7B_WINDOW_IMM_CHANNEL_DMA: + *pDispChnClass = dispChnClass_Winim; + break; + + case NVC37E_WINDOW_CHANNEL_DMA: + case NVC57E_WINDOW_CHANNEL_DMA: + case NVC67E_WINDOW_CHANNEL_DMA: + case NVC97E_WINDOW_CHANNEL_DMA: + case NVCC7E_WINDOW_CHANNEL_DMA: + *pDispChnClass = dispChnClass_Win; + break; + + case NVC77F_ANY_CHANNEL_DMA: + // Assert incase of physical RM, Any channel is kernel only channel. + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_INVALID_CHANNEL); + *pDispChnClass = dispChnClass_Any; + break; + + default: + NV_PRINTF(LEVEL_ERROR, "Unknown channel class %x\n", hwClass); + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +void +kdispNotifyCommonEvent_IMPL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 notifyIndex, + void *pNotifyParams +) +{ + PEVENTNOTIFICATION pEventNotifications; + NvU32 *pNotifyActions; + NvU32 disableCmd, singleCmd; + NvU32 subDeviceInst; + NOTIFICATION *pParams = (NOTIFICATION *)pNotifyParams; + RS_SHARE_ITERATOR it = serverutilShareIter(classId(NotifShare)); + + // search notifiers with events hooked up for this gpu + while (serverutilShareIterNext(&it)) + { + RsShared *pShared = it.pShared; + DispCommon *pDispCommon; + DisplayApi *pDisplayApi; + Device *pDevice; + INotifier *pNotifier; + NotifShare *pNotifierShare = dynamicCast(pShared, NotifShare); + + if ((pNotifierShare == NULL) || (pNotifierShare->pNotifier == NULL)) + continue; + + pNotifier = pNotifierShare->pNotifier; + pDispCommon = dynamicCast(pNotifier, DispCommon); + + // Only notify matching GPUs + if (pDispCommon == NULL) + continue; + + pDevice = dynamicCast(RES_GET_REF(pDispCommon)->pParentRef->pResource, Device); + if (GPU_RES_GET_GPU(pDevice) != pGpu) + continue; + pDisplayApi = staticCast(pDispCommon, DisplayApi); + + gpuSetThreadBcState(GPU_RES_GET_GPU(pDevice), pDisplayApi->bBcResource); + + disableCmd = NV0073_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + singleCmd = NV0073_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE; + + // get notify actions list + subDeviceInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + pNotifyActions = pDisplayApi->pNotifyActions[subDeviceInst]; + if (pNotifyActions == NULL) + { + continue; + } + + // get event list + pEventNotifications = inotifyGetNotificationList(pNotifier); + if (pEventNotifications == NULL) + { + continue; + } + + // skip if client not "listening" to events of this type + if (pNotifyActions[notifyIndex] == disableCmd) + { + continue; + } + + // ping events bound to subdevice associated with pGpu + osEventNotification(pGpu, pEventNotifications, + (notifyIndex | OS_EVENT_NOTIFICATION_INDEX_MATCH_SUBDEV), + pParams, sizeof(*pParams)); + + // reset if single shot notify action + if (pNotifyActions[notifyIndex] == singleCmd) + { + pNotifyActions[notifyIndex] = disableCmd; + } + } +} + +void +kdispNotifyEvent_IMPL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 notifyIndex, + void *pNotifyParams, + NvU32 notifyParamsSize, + NvV32 info32, + NvV16 info16 +) +{ + PEVENTNOTIFICATION pEventNotifications; + NvU32 *pNotifyActions; + NvU32 disableCmd, singleCmd; + NvU32 subDeviceInst; + RS_SHARE_ITERATOR it = serverutilShareIter(classId(NotifShare)); + + // search notifiers with events hooked up for this gpu + while (serverutilShareIterNext(&it)) + { + RsShared *pShared = it.pShared; + DispObject *pDispObject; + DisplayApi *pDisplayApi; + INotifier *pNotifier; + Device *pDevice; + NotifShare *pNotifierShare = dynamicCast(pShared, NotifShare); + + if ((pNotifierShare == NULL) || (pNotifierShare->pNotifier == NULL)) + continue; + + pNotifier = pNotifierShare->pNotifier; + pDispObject = dynamicCast(pNotifier, DispObject); + + // Only notify matching GPUs + if (pDispObject == NULL) + continue; + + pDevice = dynamicCast(RES_GET_REF(pDispObject)->pParentRef->pResource, Device); + + if (GPU_RES_GET_GPU(pDevice) != pGpu) + continue; + + pDisplayApi = staticCast(pDispObject, DisplayApi); + + gpuSetThreadBcState(GPU_RES_GET_GPU(pDevice), pDisplayApi->bBcResource); + + disableCmd = NV0073_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + singleCmd = NV0073_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE; + + // get notify actions list + subDeviceInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + pNotifyActions = pDisplayApi->pNotifyActions[subDeviceInst]; + if (pNotifyActions == NULL) + { + continue; + } + + // get event list + pEventNotifications = inotifyGetNotificationList(pNotifier); + if (pEventNotifications == NULL) + { + continue; + } + + // skip if client not "listening" to events of this type + if (pNotifyActions[notifyIndex] == disableCmd) + { + continue; + } + + // ping events bound to subdevice associated with pGpu + osEventNotification(pGpu, pEventNotifications, + (notifyIndex | OS_EVENT_NOTIFICATION_INDEX_MATCH_SUBDEV), + pNotifyParams, notifyParamsSize); + + // reset if single shot notify action + if (pNotifyActions[notifyIndex] == singleCmd) + { + pNotifyActions[notifyIndex] = disableCmd; + } + } +} + +void kdispAcquireLowLatencyLock(volatile NvS32 *pLowLatencyLock) +{ + while (!portAtomicCompareAndSwapS32(pLowLatencyLock, 1, 0)) + osSpinLoop(); +} + +NvBool kdispAcquireLowLatencyLockConditional(volatile NvS32 *pLowLatencyLock) +{ + return portAtomicCompareAndSwapS32(pLowLatencyLock, 1, 0); +} + +void kdispReleaseLowLatencyLock(volatile NvS32 *pLowLatencyLock) +{ + portAtomicCompareAndSwapS32(pLowLatencyLock, 0, 1); +} + +NV_STATUS kdispSetupVBlank_IMPL(OBJGPU *pGpu, KernelDisplay *pKernelDisplay, void * pProc, + void * pParm1, void * pParm2, NvU32 Head, void * pParm3) +{ + POSVBLANKCALLBACK pTmpCallback = pKernelDisplay->pOsVblankCallback; + + if (pKernelDisplay == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + if (pProc == NULL) + { + // Clear this head from the callback mask + pKernelDisplay->vblankCallbackHeadMask &= ~NVBIT(Head); + } + + // If the callback data has not been initialized, do so now. + if (!pTmpCallback) + { + pTmpCallback = portMemAllocNonPaged(sizeof(OSVBLANKCALLBACK) * kdispGetNumHeads(pKernelDisplay)); + if (NULL != pTmpCallback) + { + portMemSet(pTmpCallback, 0x0, sizeof(OSVBLANKCALLBACK) * kdispGetNumHeads(pKernelDisplay)); + } + else + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + } + + // + // Need to protect pDevInfo->pOsVblankCallback accesses as + // kdispOptimizePerFrameOsCallbacks (headIntr_Vblank) may be running. + // + kdispAcquireLowLatencyLock(&pKernelDisplay->lowLatencyLock); + + pKernelDisplay->pOsVblankCallback = pTmpCallback; + if (pKernelDisplay->pOsVblankCallback) + { + pKernelDisplay->pOsVblankCallback[Head].pProc = pProc; + pKernelDisplay->pOsVblankCallback[Head].pParm1 = pParm1; + pKernelDisplay->pOsVblankCallback[Head].pParm2 = pParm2; + pKernelDisplay->pOsVblankCallback[Head].pParm3 = pParm3; + } + + if (pProc) + { + // Add this head to the callback mask + pKernelDisplay->vblankCallbackHeadMask |= NVBIT(Head); + } + + kdispReleaseLowLatencyLock(&pKernelDisplay->lowLatencyLock); + + return NV_OK; +} + +void +kdispDestroyVBlank_IMPL(KernelDisplay *pKernelDisplay) +{ + portMemFree(pKernelDisplay->pOsVblankCallback); + pKernelDisplay->pOsVblankCallback = NULL; +} + + +// This handling must be protected by the lowLatencyLock +NV_STATUS kdispOptimizePerFrameOsCallbacks_IMPL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvBool clearIntr, + THREAD_STATE_NODE *pThreadState, + NvU32 *pIntrServicedHeadMask, + MC_ENGINE_BITVECTOR *pIntrPending +) +{ + NV_STATUS status = NV_OK; + NvU32 headIntrMask = 0; + NvU32 head, exVblankServiceHeadMask = 0, vblankServicedHeadMask = 0; + NvBool bIsLowLatencyInterruptLine = NV_FALSE; + + *pIntrServicedHeadMask = vblankServicedHeadMask; + NV_ASSERT_OR_RETURN(pKernelDisplay, NV_ERR_INVALID_ARGUMENT); + + // handle win_sem interrupt + kdispHandleWinSemEvt_HAL(pGpu, pKernelDisplay, pThreadState); + + // + // Handle AWAKEN interrupts inline to avoid regressing high fps performance + // with enabling of immediate flip completion notification to OS (Bug 1976509) + // + kdispServiceAwakenIntr_HAL(pGpu, pKernelDisplay, pThreadState); + + if (!IS_GSP_CLIENT(pGpu)) + { + exVblankServiceHeadMask = kdispGetDeferredVblankHeadMask(pKernelDisplay); + } + + for (head = 0; head < kdispGetNumHeads(pKernelDisplay); ++head) + { + KernelHead *pKernelHead = KDISP_GET_HEAD(pKernelDisplay, head); + + headIntrMask = kheadReadPendingRgLineIntr_HAL(pGpu, pKernelHead, pThreadState); + + if (headIntrMask != 0) + { + NvU32 clearIntrMask = 0; + + kheadProcessRgLineCallbacks_HAL(pGpu, pKernelHead, head, &headIntrMask, &clearIntrMask, osIsISR()); + + if (clearIntrMask != 0) + { + kheadResetRgLineIntrMask_HAL(pGpu, pKernelHead, clearIntrMask, pThreadState); + } + } + + if (kheadReadPendingVblank_HAL(pGpu, pKernelHead, NULL, pThreadState)) + { + headIntrMask |= headIntr_Vblank; + } + + // Is vblank pending for this head? + if (headIntrMask & headIntr_Vblank) + { + OSVBLANKCALLBACK osVblankCallback; + VBLANKCALLBACK *pCallback = NULL; + NvBool dataValid = NV_FALSE; + + if (!IS_GSP_CLIENT(pGpu)) + { + // Keep track of which heads that we serviced + vblankServicedHeadMask |= NVBIT(head); + } + + // Clear the intr if we were asked to + if (clearIntr == NV_TRUE) + { + if (bIsLowLatencyInterruptLine) + { + kheadResetPendingLastData_HAL(pGpu, pKernelHead, pThreadState); + } + else + { + kheadResetPendingVblank_HAL(pGpu, pKernelHead, pThreadState); + } + } + + // + // Copy the data locally as pKernelDisplay->pOsVblankCallback may be updated + // by kdispSetupVBlank as we are running here. + // + if (pKernelDisplay->pOsVblankCallback) + { + osVblankCallback = pKernelDisplay->pOsVblankCallback[head]; + dataValid = NV_TRUE; + } + + if (dataValid && osVblankCallback.pProc) + { + // + // Increment the count so that the DPC does not process this callback + // as well during headProcessVblankCallbacks. + // + pCallback = (VBLANKCALLBACK *) osVblankCallback.pParm3; + if (pCallback) + { + // Increment count only when not deferred. + if (!(exVblankServiceHeadMask & (NVBIT(head)))) + { + pCallback->VBlankCount++; + } + } + + // + // Typically this is KMD's vblank callback function which can immediately + // call RmAddDeleteVblankCallback to remove and free this and the corresponding + // RM callback above - aka osVblankCallback.pParm3. + // + + if (pCallback->bIsVblankNotifyEnable && !pKernelDisplay->bIsPanelReplayEnabled) + { + osVblankCallback.pProc(osVblankCallback.pParm1, osVblankCallback.pParm2); + } + } + } + } + + *pIntrServicedHeadMask = vblankServicedHeadMask; + + return status; +} + + +// This function must be called with the lowLatencyLock held +void +kdispHandleAggressiveVblank_IMPL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + THREAD_STATE_NODE *pThreadState, + MC_ENGINE_BITVECTOR *pPending +) +{ + NV_STATUS rmStatus = NV_OK; + NvU32 vblankIntrServicedHeadMask = 0; + + // This function is only called if the display interrupt is pending. + + // Service some Vblank events locally + pKernelDisplay->isrVblankHeads = 0; + + // + // This assumes aggressive vblank handling and can only be + // done on Windows (for which this function is anyway only + // compiled for). + // + + // Service vblank and rg line intrs by calling KMD callback + rmStatus = kdispOptimizePerFrameOsCallbacks(pGpu, pKernelDisplay, NV_TRUE, pThreadState, + &vblankIntrServicedHeadMask, pPending); + NV_ASSERT(rmStatus == NV_OK); + + // + // Need to setup deferred vblank handling for once the + // GPUs Lock is released since we cleared vblank intrs. + // + if (vblankIntrServicedHeadMask) + { + vblankIntrServicedHeadMask |= kdispGetDeferredVblankHeadMask(pKernelDisplay); + kdispSetDeferredVblankHeadMask(pKernelDisplay, vblankIntrServicedHeadMask); + } +} + +/*! + * @brief Handle the aggressive vblank inline such that KMD callbacks are not delayed + * in case of VR to avoid stutter. + * This is required as a WAR for Bug# 1778552 + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pDisp OBJDISP pointer + * + */ +void kdispApplyAggressiveVblankHandlingWar_IMPL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay +) +{ +} + +void +kdispSetWarPurgeSatellitesOnCoreFree_IMPL +( + KernelDisplay *pKernelDisplay, + NvBool value +) +{ + pKernelDisplay->bWarPurgeSatellitesOnCoreFree = value; +} + +NV_STATUS +kdispRegisterRgLineCallback_IMPL +( + KernelDisplay *pKernelDisplay, + RgLineCallback *pRgLineCallback, + NvU32 head, + NvU32 rgIntrLine, + NvBool bEnable +) +{ + NV_ASSERT_OR_RETURN(head < OBJ_MAX_HEADS, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(rgIntrLine < MAX_RG_LINE_CALLBACKS_PER_HEAD, NV_ERR_INVALID_ARGUMENT); + + RgLineCallback **slot = &pKernelDisplay->rgLineCallbackPerHead[head][rgIntrLine]; + + if (bEnable && *slot == NULL) + { + *slot = pRgLineCallback; + } + else if (!bEnable && *slot == pRgLineCallback) + { + *slot = NULL; + } + else + { + // + // OBJDISP is the authority for *allocating* these "slots"; + // KernelDisplay trusts it as an allocator. + // If we try to register a callback in an existing slot, or free an + // empty slot, it means OBJDISP has created conflicting allocations or + // has allowed a double-free. (Or RgLineCallback has provided invalid + // parameters.) + // + NV_ASSERT_FAILED("Invalid KernelDisplay state for RgLineCallback"); + return NV_ERR_INVALID_STATE; + } + + return NV_OK; +} + +void +kdispInvokeRgLineCallback_KERNEL +( + KernelDisplay *pKernelDisplay, + NvU32 head, + NvU32 rgIntrLine, + NvBool bIsIrqlIsr +) +{ + NV_ASSERT_OR_RETURN_VOID(head < OBJ_MAX_HEADS); + NV_ASSERT_OR_RETURN_VOID(rgIntrLine < MAX_RG_LINE_CALLBACKS_PER_HEAD); + +} + +#define HOTPLUG_PROFILE 0 + +#if HOTPLUG_PROFILE + + #define ISR_TSTAMP_SIZE 18000 /* 5 minutes (5*60Hz*60)*/ + + NvU32 timeStampIndexISR = ISR_TSTAMP_SIZE-1; + + tmr_tstamp_u timeStampStartISR[ISR_TSTAMP_SIZE]; + tmr_tstamp_u timeStampDeltaISR[ISR_TSTAMP_SIZE]; + +#endif + +void +kdispServiceLowLatencyIntrs_KERNEL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 headmask, + NvU32 state, + THREAD_STATE_NODE *pThreadState, + NvU32 *pIntrServicedHeadMask, + MC_ENGINE_BITVECTOR *pIntrPending +) +{ + NvU32 pending, check_pending, pending_checked; + NvU32 Head; + NvU32 maskNonEmptyQueues[OBJ_MAX_HEADS]; // array of masks of VBLANK_STATE_PROCESS_XXX_LATENCY bits, indicating which queues are non-empty + NvU32 unionNonEmptyQueues = 0; // mask of VBLANK_STATE_PROCESS_XXX_LATENCY bits, union of queue states of all heads w/ pending vblank ints + NvU32 Count = 0; + NvU32 i, skippedcallbacks; + NvU32 maskCallbacksStillPending = 0; + KernelHead *pKernelHead = NULL; + NvU32 head, headIntrMask, deferredVblank = kdispGetDeferredVblankHeadMask(pKernelDisplay); + + if (pIntrServicedHeadMask != NULL) + { + *pIntrServicedHeadMask = 0; + } + +#if HOTPLUG_PROFILE + OBJTMR *pTmr; + pTmr = GPU_GET_TIMER(pGpu); + if (++timeStampIndexISR >= ISR_TSTAMP_SIZE) + timeStampIndexISR = 0; + + tmrGetCurrentTime(pTmr, &timeStampStartISR[timeStampIndexISR].time32.hi, &timeStampStartISR[timeStampIndexISR].time32.lo); + + // For the ISR we want to know how much time since the last ISR. + if (timeStampIndexISR) + { + NvU64 temp64; + + temp64 = timeStampStartISR[timeStampIndexISR].time64; + temp64 -= timeStampStartISR[timeStampIndexISR-1].time64; + + timeStampDeltaISR[timeStampIndexISR].time64 = temp64; + } +#endif + + // handle win_sem interrupt + kdispHandleWinSemEvt_HAL(pGpu, pKernelDisplay, pThreadState); + + // handle awaken interrupt + kdispServiceAwakenIntr_HAL(pGpu, pKernelDisplay, pThreadState); + + for (head = 0; head < kdispGetNumHeads(pKernelDisplay); ++head) + { + KernelHead *pKernelHead = KDISP_GET_HEAD(pKernelDisplay, head); + + headIntrMask = kheadReadPendingRgLineIntr_HAL(pGpu, pKernelHead, pThreadState); + if (headIntrMask != 0) + { + NvU32 clearIntrMask = 0; + + kheadProcessRgLineCallbacks_HAL(pGpu, + pKernelHead, + head, + &headIntrMask, + &clearIntrMask, + osIsISR()); + if (clearIntrMask != 0) + { + kheadResetRgLineIntrMask_HAL(pGpu, pKernelHead, clearIntrMask, pThreadState); + } + } + } + + // handle rg_sem interrupt + for (head = 0; head < kdispGetNumHeads(pKernelDisplay); ++head) + { + HEADINTRMASK headMask = 0; + KernelHead *pKernelHead = KDISP_GET_HEAD(pKernelDisplay, head); + + kheadReadPendingRgSemIntr_HAL(pGpu, pKernelHead, &headMask, pThreadState, NULL); + if (headMask != 0) + { + kheadHandleRgSemIntr_HAL(pGpu, pKernelHead, &headMask, pThreadState); + } + } + + // If the caller failed to spec which queue, figure they wanted all of them + if (!(state & VBLANK_STATE_PROCESS_ALL_CALLBACKS) ) + { + state |= VBLANK_STATE_PROCESS_ALL_CALLBACKS; + } + + // If the headmask is 0, we should process all heads + if (headmask == 0) + { + headmask = 0xFFFFFFFF; + } + + // + // If we are being asked to process the callbacks now, regardless of the true irqspending, + // we force the pending mask to the head mask passed in. + // + if (state & VBLANK_STATE_PROCESS_IMMEDIATE) + { + pending = headmask; + } + else + { + // We're here because at least one of the PCRTC bits MAY be pending. + pending = kdispReadPendingVblank_HAL(pGpu, pKernelDisplay, pThreadState); + pending |= deferredVblank; + } + + // Reset vblank deferred Mask + kdispSetDeferredVblankHeadMask(pKernelDisplay, 0x0); + + // No sense in doing anything if there is nothing pending. + if (pending == 0) + { + goto done; + } + + // + // We want to check for pending service now and then we check again each + // time through the loop. Keep these seperate. + // + check_pending = pending; + + // We have not checked anything yet + pending_checked = 0; + + // Start with head 0 + Head = 0; + + // + // We keep scanning all supported heads, and if we have something pending, + // check the associated queues + // + while(pending_checked != pending) + { + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, Head); + + // Move on if this crtc's interrupt isn't pending... + if ( (headmask & check_pending & ~pending_checked) & NVBIT(Head)) + { + // Track that we have now checked this head + pending_checked |= NVBIT(Head); + + // If our queues are empty, we can bail early + maskNonEmptyQueues[Head] = kheadCheckVblankCallbacksQueued(pGpu, pKernelHead, state, NULL); + unionNonEmptyQueues |= maskNonEmptyQueues[Head]; + + // This function will check to see if there are callback states in which the + // caller has skipped execution. + skippedcallbacks = ((state & VBLANK_STATE_PROCESS_ALL_CALLBACKS) ^ VBLANK_STATE_PROCESS_ALL_CALLBACKS); + skippedcallbacks |= (state & (VBLANK_STATE_PROCESS_CALLED_FROM_ISR | VBLANK_STATE_PROCESS_IMMEDIATE)); + + // now lets see if there's callbacks pending on the skipped callbacks + maskCallbacksStillPending |= NVBIT(Head) * !!kheadCheckVblankCallbacksQueued(pGpu, pKernelHead, skippedcallbacks, NULL); + } + + // Don't check for new interrupts if we are in immediate mode + if (!(state & VBLANK_STATE_PROCESS_IMMEDIATE) ) + { + pending = kdispReadPendingVblank_HAL(pGpu, pKernelDisplay, pThreadState); + pending |= deferredVblank; + } + + // if there was a change in the pending state, we should recheck everything + if (check_pending != pending) + { + // We need to recheck heads that were not pending before + check_pending = pending; + Head = 0; + } + else + { + // Nothing changed, so move on to the next head + Head++; + } + + // Make sure we dont waste time on heads that dont exist + if (Head >= kdispGetNumHeads(pKernelDisplay)) + { + break; + } + } + + if (state & VBLANK_STATE_PROCESS_CALLED_FROM_ISR) + { + // store off which heads have pending vblank interrupts, for comparison at the next DPC time. + pKernelDisplay->isrVblankHeads = pending; + } + + // increment the per-head vblank total counter, for any head with a pending vblank intr + for (Head = 0; Head < kdispGetNumHeads(pKernelDisplay); Head++) + { + // Move on if this crtc's interrupt isn't pending... + if ((pending & NVBIT(Head)) == 0) + { + continue; + } + + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, Head); + // + // increment vblank counters, as appropriate. + // + + // Track the fact that we passed through here. This keeps the RC manager happy. + Count = kheadGetVblankTotalCounter_HAL(pKernelHead) + 1; + kheadSetVblankTotalCounter_HAL(pKernelHead, Count); + + // + // Update the vblank counter if we are single chip or multichip master. + // We now have two queues, so we need to have two vblank counters. + // + + // did they ask for processing of low-latency work? + if (state & VBLANK_STATE_PROCESS_LOW_LATENCY /* & maskNonEmptyQueues[Head]*/) + { + // + // don't let the DPC thread increment the low-latency counter. + // otherwise, the counter will frequently increment at double the + // expected rate, breaking things like swapInterval. + // + // XXX actually, there is one case where it would be OK for the DPC + // thread to increment this counter: if the DPC thread could ascertain + // that 'pending & NVBIT(Head)' represented a new interrupt event, and + // not simply the one that the ISR left uncleared in PCRTC_INTR_0, for + // the purpose of causing this DPC thread to get queued. + // Not sure how to do that. + // + if ( !(state & VBLANK_STATE_PROCESS_CALLED_FROM_DPC) || (pending & NVBIT(Head) & ~pKernelDisplay->isrVblankHeads) ) + { + // either we were called from the ISR, or vblank is asserted in DPC when it wasn't in the ISR + + // low latency queue requested, and this isn't a DPC thread. + Count = kheadGetVblankLowLatencyCounter_HAL(pKernelHead) + 1; + kheadSetVblankLowLatencyCounter_HAL(pKernelHead, Count); + } + } + + // did they ask for processing of normal-latency work? + if (state & VBLANK_STATE_PROCESS_NORMAL_LATENCY /* & maskNonEmptyQueues[Head]*/) + { + // processing of the normal latency queue requested + Count = kheadGetVblankNormLatencyCounter_HAL(pKernelHead) + 1; + kheadSetVblankNormLatencyCounter_HAL(pKernelHead, Count); + } + } + + // + // If we have nothing to process (no work to do in queue), + // we can bail early. We got here for some reason, so make + // sure we clear the interrupts. + // + + if (!unionNonEmptyQueues) + { + // all queues (belonging to heads with pending vblank ints) are empty. + if (IS_FW_CLIENT(pGpu) || + pKernelDisplay->getProperty(pKernelDisplay, PDB_PROP_KDISP_HAS_SEPARATE_LOW_LATENCY_LINE)) + { + for (Head = 0; Head < kdispGetNumHeads(pKernelDisplay); Head++) + { + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, Head); + kheadResetPendingLastData_HAL(pGpu, pKernelHead, pThreadState); + } + } + + goto done; + } + + // + // Although we have separate handlers for each head, attempt to process all + // interrupting heads now. What about DPCs schedule already? + // + for (Head = 0; Head < kdispGetNumHeads(pKernelDisplay); Head++) + { + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, Head); + // Move on if this crtc's interrupt isn't pending... + if ((pending & NVBIT(Head)) == 0) + { + continue; + } + + // Process the callback list for this Head... + kheadProcessVblankCallbacks(pGpu, pKernelHead, state); + } + + if (pKernelDisplay->getProperty(pKernelDisplay, PDB_PROP_KDISP_HAS_SEPARATE_LOW_LATENCY_LINE)) + { + for (i = 0; i < kdispGetNumHeads(pKernelDisplay); i++) + { + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, i); + // Only reset the heads which we have serviced. + if ((pending & NVBIT(i)) == 0) + { + continue; + } + kheadResetPendingLastData_HAL(pGpu, pKernelHead, pThreadState); + } + } + // + // if there are still callbacks pending, and we are in an ISR, + // then don't clear PCRTC_INTR; XXXar why would we *ever* want + // to clear PCRTC_INTR if there are still things pending? + // + else if ( (maskCallbacksStillPending) && + (state & VBLANK_STATE_PROCESS_CALLED_FROM_ISR) ) + { + // + // there are still callbacks pending; don't clear + // PCRTC_INTR, yet. The expectation is that the OS layer + // will see that interrupts are still pending and queue a + // DPC/BottomHalf/whatever to service the rest of the + // vblank callback queues + // + for (i = 0; i < kdispGetNumHeads(pKernelDisplay); i++) + { + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, i); + if (IS_FW_CLIENT(pGpu)) + { + kheadResetPendingLastData_HAL(pGpu, pKernelHead, pThreadState); + } + } + } + else + { + // reset the VBlank intrs we've handled, and don't reset the vblank intrs we haven't. + for (i = 0; i < kdispGetNumHeads(pKernelDisplay); i++) + { + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, i); + if (pending & NVBIT(i) & ~maskCallbacksStillPending) + { + kheadResetPendingLastData_HAL(pGpu, pKernelHead, pThreadState); + } + } + } + + if (pIntrServicedHeadMask != NULL) + { + *pIntrServicedHeadMask = pending; + } + +done: + + return; +} + +NvU32 kdispReadPendingVblank_IMPL(OBJGPU *pGpu, KernelDisplay *pKernelDisplay, THREAD_STATE_NODE *pThreadState) +{ + KernelHead *pKernelHead; + NvU32 headIdx, pending = 0; + + for (headIdx = 0; headIdx < kdispGetNumHeads(pKernelDisplay); headIdx++) + { + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, headIdx); + + if (kheadReadPendingVblank_HAL(pGpu, pKernelHead, NULL, pThreadState)) + { + pending |= NVBIT(headIdx); + } + } + + return pending; +} + +/*! + * @brief Route modeset start/end notification to kernel RM + * + * Physical RM is expected to send a "start" notification at the beginning of + * every display modeset (supervisor interrupt sequence), and an "end" + * notification at the end. However, if physical RM detects back-to-back + * modesets, the intervening "end" notification MAY be skipped; in this case, + * the "start" notification for the next modeset serves as the "end notification + * for the previous modeset. + * + * Kernel RM will use the notification to update the BW allocation for display. + * The ICC call that is required to update the BW allocation cannot be made + * from physical RM. + * + * @param[in] pKernelDisplay KernelDisplay pointer + * @param[in] bModesetStart NV_TRUE -> start of modeset; + * NV_FALSE -> end of modeset + * @param[in] minRequiredIsoBandwidthKBPS Min ISO BW required by IMP (KB/sec) + * @param[in] minRequiredFloorBandwidthKBPS Min dramclk freq * pipe width (KB/sec) + */ +void +kdispInvokeDisplayModesetCallback_KERNEL +( + KernelDisplay *pKernelDisplay, + NvBool bModesetStart, + NvU32 minRequiredIsoBandwidthKBPS, + NvU32 minRequiredFloorBandwidthKBPS +) +{ + NV_STATUS status; + + NV_PRINTF(LEVEL_INFO, + "Kernel RM received \"%s of modeset\" notification " + "(minRequiredIsoBandwidthKBPS = %u, minRequiredFloorBandwidthKBPS = %u)\n", + bModesetStart ? "start" : "end", + minRequiredIsoBandwidthKBPS, + minRequiredFloorBandwidthKBPS); + + OBJGPU *pGpu = ENG_GET_GPU(pKernelDisplay); + status = + kdispArbAndAllocDisplayBandwidth_HAL(pGpu, + pKernelDisplay, + DISPLAY_ICC_BW_CLIENT_RM, + minRequiredIsoBandwidthKBPS, + minRequiredFloorBandwidthKBPS); + // + // The modeset cannot be aborted, so, if there is an error, no recovery + // is possible. + // + NV_ASSERT_OK(status); +} + +/*! Get the supported display mask */ +NvU32 +kdispGetSupportedDisplayMask_IMPL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay +) +{ + NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS supportParams = { 0 }; + NV_STATUS status; + + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + NV_ASSERT_OK_OR_ELSE(status, + pRmApi->Control(pRmApi, + kdispGetInternalClientHandle(pKernelDisplay), + kdispGetDispCommonHandle(pKernelDisplay), + NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, + &supportParams, + sizeof(supportParams)), + return 0U); + + return supportParams.displayMask; +} + +/*! This determines if a GPU has a display attached on any head. */ +NvBool +kdispIsDisplayConnected_IMPL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay +) +{ + NV_STATUS status; + NvU32 supportedMask = 0U; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + supportedMask = kdispGetSupportedDisplayMask(pGpu, pKernelDisplay); + + NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS connectParams = { 0 }; + connectParams.displayMask = supportedMask; + connectParams.flags = NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD_CACHED; + + NV_ASSERT_OK_OR_ELSE(status, + pRmApi->Control(pRmApi, + kdispGetInternalClientHandle(pKernelDisplay), + kdispGetDispCommonHandle(pKernelDisplay), + NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE, + &connectParams, + sizeof(connectParams)), + return NV_FALSE); + + return connectParams.displayMask != 0U; +} diff --git a/src/nvidia/src/kernel/gpu/disp/vblank_callback/vblank.c b/src/nvidia/src/kernel/gpu/disp/vblank_callback/vblank.c new file mode 100644 index 0000000..ef7121a --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/vblank_callback/vblank.c @@ -0,0 +1,679 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/disp/vblank_callback/vblank.h" + +#include "kernel/gpu/disp/head/kernel_head.h" +#include "kernel/gpu/disp/kern_disp.h" +#include "kernel/gpu/timer/objtmr.h" +#include "kernel/gpu/gpu.h" +#include "kernel/os/os.h" + +void +kheadAddVblankCallback_IMPL +( + OBJGPU *pGpu, + KernelHead *pKernelHead, + VBLANKCALLBACK *pCallback +) +{ + NvBool OktoAdd = NV_TRUE; + VBLANKCALLBACK *pCheck = NULL; + VBLANKCALLBACK *pNext = NULL; + VBLANKCALLBACK *pPrev = NULL; + VBLANKCALLBACK *pList = NULL; + NvU32 Count; + NvBool vblankIntrIsBeingGenerated = NV_FALSE; + + // + // If callback needs vblank safety, make it low-latency, persistent and promote-to-front. + // The callback is responsible for clearing its own persistence & safety flags, + // once it achieves its raison d'etre, within it's own particular idiom. + // + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_GUARANTEE_SAFETY) + { + pCallback->Flags |= VBLANK_CALLBACK_FLAG_PERSISTENT; + pCallback->Flags |= VBLANK_CALLBACK_FLAG_LOW_LATENCY; + pCallback->Flags |= VBLANK_CALLBACK_FLAG_PROMOTE_TO_FRONT; + } + + // Cache the requested queue and its current vblank count + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_LOW_LATENCY) + { + pList = pKernelHead->Vblank.Callback.pListLL; + Count = pKernelHead->Vblank.Counters.LowLatency; + } + else + { + pList = pKernelHead->Vblank.Callback.pListNL; + Count = pKernelHead->Vblank.Counters.NormLatency; + } + + // + // If this callback is supposed to fire at a specific vblank count, + // then that count (VBlankCount) better be in the future still. + // + NV_ASSERT(!(pCallback->Flags & VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_COUNT) || + (pCallback->VBlankCount > Count) ); + + NV_PRINTF(LEVEL_INFO, "headAddVblankCallback: pGpu=%p cb=%p\n", pGpu, + pCallback); + NV_PRINTF(LEVEL_INFO, + " cbproc=%p cbobj=%p p1=0x%x p2=0x%x count=0x%x flags=0x%x offset=0x%x\n", + pCallback->Proc, pCallback->pObject, pCallback->Param1, + pCallback->Param2, pCallback->VBlankCount, pCallback->Flags, + pCallback->VBlankOffset); + + if (kheadReadVblankIntrState(pGpu, pKernelHead) != NV_HEAD_VBLANK_INTR_UNAVAILABLE) + vblankIntrIsBeingGenerated = NV_TRUE; + + if ( vblankIntrIsBeingGenerated || (pCallback->Flags & VBLANK_CALLBACK_FLAG_PERSISTENT) ) + { + pCheck = pList; + + // + // Check that the list doesn't become a circular queue of one element, which can happen in multichip, if the a method + // is called twice on multiple devices. If this happens, we'll be in an infinite loop in the while(Callback) below. + // + while (NULL != pCheck) + { + if (pCheck == pCallback) + { + // + // It is expected that we may try to add the same callback again, as we may not get a + // dacdisable (which deletes callbacks) between modesets and/or dacenables. + // + NV_PRINTF(LEVEL_INFO, + "headAddVblankCallback: VblankCallback already on the Callback List\n"); + OktoAdd = NV_FALSE; + } + pCheck = pCheck->Next; + } + + if (OktoAdd) + { + // + // Best-effort test to verify that this callback is not already part of any callback list + // (the test won't detect callbacks added twice at the end of two lists) + // + NV_ASSERT(pCallback->Next == NULL); + + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_COUNT) + { + // We set the target to the one that the caller supplied. + Count = pCallback->VBlankCount; + } + else if (pCallback->Flags & VBLANK_CALLBACK_FLAG_SPECIFIED_TIMESTAMP) + { + // We don't know which vblank would correspond to the timestamp, so just add it to end of list. + Count = 0xFFFFFFFF; + } + else if (pCallback->Flags & VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_OFFSET) + { + // We set the target to the current plus the offset that the caller supplied. + Count += pCallback->VBlankOffset; + pCallback->VBlankCount = Count; + + // If we are persistent, we should convert the vblank offset flag to a vblank count flag. + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_PERSISTENT) + { + pCallback->Flags &= ~VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_OFFSET; + pCallback->Flags |= VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_COUNT; + } + } + else if (pCallback->Flags & VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_NEXT) + { + // We set the target to the current plus one (the next vblank) + Count += 1; + pCallback->VBlankCount = Count; + } + else + { + // + // We set the target to the current plus one (the next vblank). + // We use this case when we dont know the request or legacy support. + // + Count += 1; + pCallback->VBlankCount = Count; + } + + // These are now guaranteed to be sorted by VBlank + pPrev = NULL; + pNext = pList; + + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_PROMOTE_TO_FRONT) + { + // To the front of the group that shares the same 'VBlankCount' value + while ((NULL != pNext) && (Count > pNext->VBlankCount)) + { + pPrev = pNext; + pNext = pNext->Next; + } + } + else + { + // To the back of the group that shares the same 'VBlankCount' value + while ((NULL != pNext) && (Count >= pNext->VBlankCount)) + { + pPrev = pNext; + pNext = pNext->Next; + } + } + + // Are we at the head? + if (pPrev == NULL) + { + pCallback->Next = pList; + + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_LOW_LATENCY) + { + pKernelHead->Vblank.Callback.pListLL = pCallback; + } + else + { + pKernelHead->Vblank.Callback.pListNL = pCallback; + } + } + else // In the middle or tail + { + pPrev->Next = pCallback; + pCallback->Next = pNext; + } + } + else + { + NV_PRINTF(LEVEL_INFO, + "VBlankCallback discarded in dacCRTCAddVblankCallback to avoid infinite loop\n"); + } + } + else + { + // call it now + if (pCallback->Proc) + { + NV_PRINTF(LEVEL_INFO, + "headAddVblankCallback: immediate invocation\n"); + pCallback->bImmediateCallback = NV_TRUE; + + // Force it to appear to be on the correct VBlankCount + pCallback->VBlankCount = Count; + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_USER) + { + // This is a user call back, they don't get a pointer to our pDev or Object data structs. + pCallback->Proc(NULL, + NULL, + pCallback->Param1, + pCallback->Param2, + pCallback->Status); + } + else + { + // + // this callback was scheduled when a trace was being conducted, + // turn tracing back to make sure that we record this callback's + // register operations too, so the trace will be complete + // DON'T LOG USER CALLBACKS, not RM activity. (plus the tracing system + // requires a pDev ptr to find its own data structures) + // + pCallback->Proc(pGpu, + pCallback->pObject, + pCallback->Param1, + pCallback->Param2, + pCallback->Status); + } + } + } + + // After all of that, if at least one callback is scheduled, head is enabled and the vblank is AVAILABLE, enable it now. + if (vblankIntrIsBeingGenerated) + { + if ( (pKernelHead->Vblank.Callback.pListLL) || + (pKernelHead->Vblank.Callback.pListNL) ) + { + if (kheadReadVblankIntrState(pGpu, pKernelHead) != NV_HEAD_VBLANK_INTR_ENABLED) + { + kheadWriteVblankIntrState(pGpu, pKernelHead, NV_HEAD_VBLANK_INTR_ENABLED); + NV_PRINTF(LEVEL_INFO, + "headAddVblankCallback: Changed vblank stat to ENABLED\n"); + } + } + } +} + +void +kheadPauseVblankCbNotifications_IMPL +( + OBJGPU *pGpu, + KernelHead *pKernelHead, + VBLANKCALLBACK *pCallback +) +{ + VBLANKCALLBACK *pList = NULL; + NvBool bShouldDisable = NV_TRUE; + + // Cache the requested queue and its current vblank count + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_LOW_LATENCY) + { + pList = pKernelHead->Vblank.Callback.pListLL; + } + else + { + pList = pKernelHead->Vblank.Callback.pListNL; + } + + + VBLANKCALLBACK *pPrev = pList; + + while (pPrev) + { + if (pPrev->bIsVblankNotifyEnable == NV_TRUE) + { + bShouldDisable = NV_FALSE; + break; + } + pPrev = pPrev->Next; + } + + if(bShouldDisable) + { + kheadWriteVblankIntrState(pGpu, pKernelHead, NV_HEAD_VBLANK_INTR_AVAILABLE); + } +} +void +kheadDeleteVblankCallback_IMPL +( + OBJGPU *pGpu, + KernelHead *pKernelHead, + VBLANKCALLBACK *pCallback +) +{ + VBLANKCALLBACK *pList = NULL; + NvBool enabled = NV_FALSE; + NvU32 Count; + + // Cache the requested queue and its current vblank count + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_LOW_LATENCY) + { + pList = pKernelHead->Vblank.Callback.pListLL; + Count = pKernelHead->Vblank.Counters.LowLatency; + } + else + { + pList = pKernelHead->Vblank.Callback.pListNL; + Count = pKernelHead->Vblank.Counters.NormLatency; + } + + // Disable VBlank (if it is even on) while we scan/process the callback list + enabled = kheadReadVblankIntrEnable_HAL(pGpu, pKernelHead); + + if (enabled) + { + kheadWriteVblankIntrState(pGpu, pKernelHead, NV_HEAD_VBLANK_INTR_AVAILABLE); + } + + // Search the list and remove this Callback entry + if (pList == pCallback) + { + // + // Found it. + // Unlink it now. If we call it, it may try to add itself again, and wont be able to. + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_LOW_LATENCY) + { + pKernelHead->Vblank.Callback.pListLL = pCallback->Next; + } + else + { + pKernelHead->Vblank.Callback.pListNL = pCallback->Next; + } + + // + // Should the callback be executed as part of the object destroy? + // (safe to do, since we already hold the necessary lock). + // + if ( (pCallback->Proc) && + (pCallback->Flags & VBLANK_CALLBACK_FLAG_COMPLETE_ON_OBJECT_CLEANUP) ) + { + // Force it to appear to be on the correct VBlankCount + pCallback->VBlankCount = Count; + + // This is a user call back, they don't get a pointer to our pDev or Object data structs. + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_USER) + { + pCallback->Proc(NULL, + NULL, + pCallback->Param1, + pCallback->Param2, + pCallback->Status); + } + else + { + pCallback->Proc(pGpu, + pCallback->pObject, + pCallback->Param1, + pCallback->Param2, + pCallback->Status); + } + } + } + else + { + VBLANKCALLBACK *pPrev = pList; + + while (pPrev) + { + if (pPrev->Next == pCallback) + { + // + // Found it. + // Should the callback be executed as part of the object destroy? + // (safe to do, since we already hold the necessary lock). + // + if ( (pCallback->Proc) && + (pCallback->Flags & VBLANK_CALLBACK_FLAG_COMPLETE_ON_OBJECT_CLEANUP) ) + { + // Force it to appear to be on the correct VBlankCount + pCallback->VBlankCount = Count; + + // This is a user call back, they don't get a pointer to our pDev or Object data structs. + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_USER) + { + pCallback->Proc(NULL, + NULL, + pCallback->Param1, + pCallback->Param2, + pCallback->Status); + } + else + { + pCallback->Proc(pGpu, + pCallback->pObject, + pCallback->Param1, + pCallback->Param2, + pCallback->Status); + } + } + + pPrev->Next = pCallback->Next; + break; + } + pPrev = pPrev->Next; + } + } + pCallback->Next = NULL; + + // Check to see if there are no callbacks scheduled for this head + if (!(pKernelHead->Vblank.Callback.pListLL) && + !(pKernelHead->Vblank.Callback.pListNL) ) + { + // + // Since there are no callbacks scheduled, then we don't need + // to reenable anything. + // + enabled = NV_FALSE; + } + + // Restore VBlank enable + if (enabled) + { + kheadWriteVblankIntrState(pGpu, pKernelHead, NV_HEAD_VBLANK_INTR_ENABLED); + } +} + +void +kheadProcessVblankCallbacks_IMPL +( + OBJGPU *pGpu, + KernelHead *pKernelHead, + NvU32 state +) +{ + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + VBLANKCALLBACK *pCallback = NULL; + VBLANKCALLBACK *pNext = NULL; + VBLANKCALLBACK **ppPrev = NULL; + NvBool done = NV_FALSE; + NvBool bQueueDpc = NV_FALSE; + NvU32 newstate; + NvU32 Count = 0; + NvU64 time = 0; + + // If the caller failed to spec which queue, figure they wanted all of them + if ((state & VBLANK_STATE_PROCESS_ALL_CALLBACKS) == 0) + { + state |= VBLANK_STATE_PROCESS_ALL_CALLBACKS; + } + + // Keep a local copy we can mess with + newstate = state; + + // We may have more then one queue to process, so this is the main loop. + while (!done) + { + // Select the next queue to process. Give priority to the low latency folks. + if (newstate & VBLANK_STATE_PROCESS_LOW_LATENCY) + { + // We dont want to come back here again. + newstate &= ~VBLANK_STATE_PROCESS_LOW_LATENCY; + + // Grab the low latency queue and vblank count + pCallback = pKernelHead->Vblank.Callback.pListLL; + ppPrev = &pKernelHead->Vblank.Callback.pListLL; + Count = pKernelHead->Vblank.Counters.LowLatency; + } + else if (newstate & VBLANK_STATE_PROCESS_NORMAL_LATENCY) + { + // We dont want to come back here again. + newstate &= ~VBLANK_STATE_PROCESS_NORMAL_LATENCY; + + // Grab the normal latency queue and vblank count + pCallback = pKernelHead->Vblank.Callback.pListNL; + ppPrev = &pKernelHead->Vblank.Callback.pListNL; + Count = pKernelHead->Vblank.Counters.NormLatency; + } + else + { + // We appear to have gone through all of the queues + done = NV_TRUE; + } + + // If we are not done, proces the next callback queue + if (!done) + { + while (pCallback) + { + pNext = pCallback->Next; + + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_SPECIFIED_TIMESTAMP) + { + // + // Time stamp based call backs don't have a valid vblank count + // Vblank might be delayed and we might see only one vblank instead of two. + // So, count doesn't make sense in case of TS. + // And since the semantics is flip on vblank at TS >= TS specified, we can't + // use tmrCallbacks (they might flip outside vblank) + // + + // Only re-read the time if we don't already know the result + if (time < pCallback->TimeStamp) + { + tmrGetCurrentTime(pTmr, &time); + } + + if (time >= pCallback->TimeStamp) + { + // + // Unlink it before we call it. Otherwise, it may + // try to add itself again, and wont be able to. + // + pCallback->Next = NULL; + *ppPrev = pNext; + + // We better have something to do if we are wasting time reading TS + NV_ASSERT(pCallback->Proc); + + // + // We need to avoid calling the _vblank_callback during Panel Replay + // as it will be taken care during _RG_VBLANK interrupt handling + // + if (pCallback != (VBLANKCALLBACK *)pKernelDisplay->pRgVblankCb && !pKernelDisplay->bIsPanelReplayEnabled) + { + pCallback->Proc(pGpu, + pCallback->pObject, + pCallback->Param1, + pCallback->Param2, + pCallback->Status); + } + bQueueDpc = NV_TRUE; + } + else + { + ppPrev = &pCallback->Next; + } + } + else + { + if ( (pCallback->VBlankCount == Count) || + ((pCallback->VBlankCount + 1) == Count) || + (VBLANK_STATE_PROCESS_IMMEDIATE == state) ) + { + pCallback->VBlankCount = Count; + + // + // If this is not a persistent callback, unlink it. + // Otherwise, it may try to add itself again, and wont be able to add. + // + + // Call the function now + if (pCallback->Proc) + { + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_USER) + { + // + // DON'T LOG USER CALLBACKS, not RM activity. (plus the tracing system + // requires a pDev ptr to find its own data structures) + // + + // This is a user call back, they don't get a pointer to our pDev or Object data structs. + pCallback->Proc(NULL, + NULL, + pCallback->Param1, + pCallback->Param2, + pCallback->Status); + bQueueDpc = NV_TRUE; + } + else + { + // + // This callback was scheduled when a trace was being conducted, + // turn tracing back to make sure that we record this callback's + // register operations too, so the trace will be complete + // + pCallback->Proc(pGpu, + pCallback->pObject, + pCallback->Param1, + pCallback->Param2, + pCallback->Status); + bQueueDpc = NV_TRUE; + } + } + + // If this is a persistent callback make sure to updates its time to run if we are not multichip and not the last chip + if ( (pCallback->Flags & VBLANK_CALLBACK_FLAG_PERSISTENT) ) + { + // + // So, it appears there are those that like to update vblank counts and such within the callback. + // This is fine I suppose, but we dont promise that this order is sorted then. + // Anyway, it may be that the callbacker updated the vblank offset also, so update that now. + // We should never see an OFFSET and PERSISTENT within the process loop. + // + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_OFFSET) + { + // We set the target to the current plus the offset that the caller supplied. + pCallback->VBlankCount = Count + pCallback->VBlankOffset; + + // We are persistent, so we should convert the vblank offset flag to a vblank count flag. + pCallback->Flags &= ~VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_OFFSET; + pCallback->Flags |= VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_COUNT; + } + + // + // If the vblank count has already been specified, we don't need to increment the + // the vblank count. + // + if ( !(pCallback->Flags & VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_COUNT) ) + { + pCallback->VBlankCount = Count + 1; + } + + // Leave in callback chain. + ppPrev = &pCallback->Next; + } + else + { + // + // Yes, the proper way to terminate a persistent callback from within a callback is + // to make it non-persistant. This is what the cursor functions do, and so we should + // check again after the callback. + // + pCallback->Next = NULL; + *ppPrev = pNext; + } + } + // This condition arises at wrap time which is about every 331 days at 150 Hz + else + { + // + // MK: A callback may increase it's vblank count as part of it's execution. Since the + // callback list is only sorted at insertion time, this can render the list + // unsorted. So, we need to read the remaining entries in the list. + // + ppPrev = &pCallback->Next; + + } + } + + pCallback = pNext; + } + } + } + + if (bQueueDpc) + { + osQueueDpc(pGpu); + } + + // After all of that, if the callback lists are null and the vblank is ENABLED, move it to AVAILABLE now. + if (!(pKernelHead->Vblank.Callback.pListLL) && + !(pKernelHead->Vblank.Callback.pListNL) ) + { + if (kheadReadVblankIntrState(pGpu, pKernelHead) == NV_HEAD_VBLANK_INTR_ENABLED) + { + kheadWriteVblankIntrState(pGpu, pKernelHead, NV_HEAD_VBLANK_INTR_AVAILABLE); + + NV_PRINTF(LEVEL_INFO, + "Changed vblank state on head %d to AVAILABLE\n", + pKernelHead->PublicId); + } + } +} diff --git a/src/nvidia/src/kernel/gpu/eng_state.c b/src/nvidia/src/kernel/gpu/eng_state.c new file mode 100644 index 0000000..d0ed328 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/eng_state.c @@ -0,0 +1,428 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/gpu.h" +#include "gpu/eng_state.h" +#include "core/hal.h" +#include "core/locks.h" + +NV_STATUS +engstateConstructBase_IMPL +( + OBJENGSTATE *pEngstate, + OBJGPU *pGpu, + ENGDESCRIPTOR engDesc +) +{ + pEngstate->pGpu = pGpu; + pEngstate->engDesc = engDesc; + pEngstate->currentState = ENGSTATE_STATE_UNDEFINED; + + if (pEngstate->getProperty(pEngstate, PDB_PROP_ENGSTATE_IS_MISSING)) + return NV_ERR_NOT_SUPPORTED; + +#if NV_PRINTF_STRINGS_ALLOWED + nvDbgSnprintf(pEngstate->name, sizeof(pEngstate->name), "%s:%d", + objGetClassName(pEngstate), ENGDESC_FIELD(pEngstate->engDesc, _INST)); +#endif + + return NV_OK; +} + +void +engstateLogStateTransitionPre_IMPL +( + OBJENGSTATE *pEngstate, + ENGSTATE_STATE targetState, + ENGSTATE_TRANSITION_DATA *pData +) +{ + ENGSTATE_STATS *stats = &pEngstate->stats[targetState]; + NV_ASSERT_OR_RETURN_VOID(targetState < ENGSTATE_STATE_COUNT); + + // First call, init + portMemSet(stats, 0, sizeof(ENGSTATE_STATS)); + portMemSet(pData, 0, sizeof(ENGSTATE_TRANSITION_DATA)); + osGetPerformanceCounter(&pData->transitionStartTimeNs); + +#if PORT_IS_FUNC_SUPPORTED(portMemExTrackingGetActiveStats) + { + PORT_MEM_TRACK_ALLOCATOR_STATS memstats = {0}; + portMemExTrackingGetActiveStats(NULL, &memstats); + + pData->memoryAllocCount = (NvS64) memstats.numAllocations; + pData->memoryAllocSize = (NvS64) memstats.usefulSize; + } +#endif +} + +void +engstateLogStateTransitionPost_IMPL +( + OBJENGSTATE *pEngstate, + ENGSTATE_STATE targetState, + ENGSTATE_TRANSITION_DATA *pData +) +{ + ENGSTATE_STATS *stats = &pEngstate->stats[targetState]; + NvU64 endTimeNs; + + NV_ASSERT_OR_RETURN_VOID(targetState < ENGSTATE_STATE_COUNT); + + osGetPerformanceCounter(&endTimeNs); + stats->transitionTimeUs = (endTimeNs - pData->transitionStartTimeNs) / 1000; + +#if NV_PRINTF_STRINGS_ALLOWED + const char *stateStrings[ENGSTATE_STATE_COUNT] = + { + "Undefined", + "Construct", + "Pre-Init", + "Init", + "Pre-Load", + "Load", + "Post-Load", + "Pre-Unload", + "Unload", + "Post-Unload", + "Destroy" + }; + ct_assert(ENGSTATE_STATE_COUNT == 11); + + NV_PRINTF(LEVEL_INFO, + "Engine %s state change: %s -> %s, took %uus\n", + engstateGetName(pEngstate), + stateStrings[pEngstate->currentState], stateStrings[targetState], + stats->transitionTimeUs); +#else + NV_PRINTF(LEVEL_INFO, + "Engine 0x%06x:%d state change: %d -> %d, took %uus\n", + objGetClassId(pEngstate), ENGDESC_FIELD(pEngstate->engDesc, _INST), + pEngstate->currentState, targetState, + stats->transitionTimeUs); +#endif + +#if PORT_IS_FUNC_SUPPORTED(portMemExTrackingGetActiveStats) + { + PORT_MEM_TRACK_ALLOCATOR_STATS memstats = {0}; + portMemExTrackingGetActiveStats(NULL, &memstats); + + stats->memoryAllocCount = (NvS32)((NvS64)memstats.numAllocations - pData->memoryAllocCount); + stats->memoryAllocSize = (NvS32)((NvS64)memstats.usefulSize - pData->memoryAllocSize); + + NV_PRINTF(LEVEL_INFO, " Memory usage change: %d allocations, %d bytes\n", + stats->memoryAllocCount, stats->memoryAllocSize); + } +#endif + + pEngstate->currentState = targetState; +} + +const char * +engstateGetName_IMPL +( + OBJENGSTATE *pEngstate +) +{ +#if NV_PRINTF_STRINGS_ALLOWED + return pEngstate->name; +#else + return ""; +#endif +} + +/*! + * @brief generic constructor + */ +NV_STATUS +engstateConstructEngine_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + ENGDESCRIPTOR engDesc +) +{ + return NV_OK; +} + +/*! + * @brief destructor + */ +void +engstateDestruct_IMPL +( + OBJENGSTATE *pEngstate +) +{ +} + +/*! + * @brief init missing engine + */ +void +engstateInitMissing_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate +) +{ + return; +} + +/*! + * @brief Wrapper around StatePreInitUnlocked and StatePreInitLocked + */ +NV_STATUS +engstateStatePreInit_IMPL(OBJGPU *pGpu, OBJENGSTATE *pEngstate) +{ + NV_ASSERT_OR_RETURN(rmGpuLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + /* Check if we overrode the unlocked variant */ + if ((engstateStatePreInitUnlocked_FNPTR(pEngstate) != + engstateStatePreInitUnlocked_IMPL)) + { + NV_STATUS status, lockStatus; + + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + status = engstateStatePreInitUnlocked(pGpu, pEngstate); + + lockStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_INIT); + + if (status == NV_OK) + status = lockStatus; + if (status != NV_OK) + return status; + } + + return engstateStatePreInitLocked(pGpu, pEngstate); +} + +/*! + * @brief state pre-init locked + */ +NV_STATUS +engstateStatePreInitLocked_IMPL(OBJGPU *pGpu, OBJENGSTATE *pEngstate) +{ + return NV_OK; +} + +/*! + * @brief state pre-init unlocked + */ +NV_STATUS +engstateStatePreInitUnlocked_IMPL(OBJGPU *pGpu, OBJENGSTATE *pEngstate) +{ + return NV_OK; +} + +/*! + * @brief Wrapper around StateInitUnlocked and StateInitLocked + */ +NV_STATUS +engstateStateInit_IMPL(OBJGPU *pGpu, OBJENGSTATE *pEngstate) +{ + NV_ASSERT_OR_RETURN(rmGpuLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + /* Check if we overrode the unlocked variant */ + if (engstateStateInitUnlocked_FNPTR(pEngstate) != engstateStateInitUnlocked_IMPL) + { + NV_STATUS status, lockStatus; + + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + status = engstateStateInitUnlocked(pGpu, pEngstate); + lockStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_INIT); + + if (status == NV_OK) + status = lockStatus; + if (status != NV_OK) + return status; + } + + return engstateStateInitLocked(pGpu, pEngstate); +} + +/*! + * @brief state init locked + */ +NV_STATUS +engstateStateInitLocked_IMPL(OBJGPU *pGpu, OBJENGSTATE *pEngstate) +{ + return NV_OK; +} + +/*! + * @brief state init unlocked + */ +NV_STATUS +engstateStateInitUnlocked_IMPL(OBJGPU *pGpu, OBJENGSTATE *pEngstate) +{ + return NV_OK; +} + +/*! + * @brief state pre-load + */ +NV_STATUS +engstateStatePreLoad_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + NvU32 flags +) +{ + return NV_OK; +} + +/*! + * @brief state load + */ +NV_STATUS +engstateStateLoad_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + NvU32 flags +) +{ + return NV_OK; +} + +/*! + * @brief state post-load + */ +NV_STATUS +engstateStatePostLoad_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + NvU32 flags +) +{ + return NV_OK; +} + +/*! + * @brief state unload + */ +NV_STATUS +engstateStateUnload_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + NvU32 flags +) +{ + return NV_OK; +} + +/*! + * @brief state pre-unload + */ +NV_STATUS +engstateStatePreUnload_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + NvU32 flags +) +{ + return NV_OK; +} + +/*! + * @brief state post-unload + */ +NV_STATUS +engstateStatePostUnload_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + NvU32 flags +) +{ + return NV_OK; +} + +/*! + * @brief state destroy + */ +void +engstateStateDestroy_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate +) +{ +} + +/*! + * @brief returns the ENGDESCRIPTOR associated with this ENGSTATE + * + * @param[in] pEngstate + */ +ENGDESCRIPTOR +engstateGetDescriptor_IMPL +( + OBJENGSTATE *pEngstate +) +{ + return pEngstate->engDesc; +} + +/*! + * @brief checks for presence of the hardware associated with this ENGSTATE + * + * @param[in] pGpu + * @param[in] pEngstate + */ +NvBool +engstateIsPresent_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate +) +{ + return gpuCheckEngine_HAL(pGpu, pEngstate->engDesc); +} + + +/*! + * @brief returns the FIFO associated with this ENGSTATE + * + * @param[in] pEngstate + */ +OBJFIFO * +engstateGetFifo_IMPL +( + OBJENGSTATE *pEngstate +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pEngstate); + + return GPU_GET_FIFO(pGpu); +} + diff --git a/src/nvidia/src/kernel/gpu/gpu.c b/src/nvidia/src/kernel/gpu/gpu.c new file mode 100644 index 0000000..8ec7649 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu.c @@ -0,0 +1,4957 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief HW State Routines: System Object Function Definitions. + */ + + +#include "lib/base_utils.h" +#include "gpu/gpu.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/inst_mem/disp_inst_mem.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu/gsp/gsp_trace_rats_macro.h" +#include "gpu/eng_desc.h" +#include "nv_ref.h" +#include "os/os.h" +#include "nvrm_registry.h" +#include "gpu_mgr/gpu_mgr.h" +#include "core/thread_state.h" +#include "core/locks.h" +#include "diagnostics/tracer.h" +#include "rmapi/client_resource.h" +#include "diagnostics/journal.h" +#include "rmapi/rs_utils.h" +#include "rmapi/rmapi_utils.h" +#include "platform/sli/sli.h" +#include "core/hal_mgr.h" +#include "vgpu/rpc.h" +#include "jt.h" +#include "kernel/gpu/nvbitmask.h" + +#include "nvmisc.h" + +#include "ctrl/ctrl402c.h" // NV402C_CTRL_NUM_I2C_PORTS +#include "ctrl/ctrl5070/ctrl5070chnc.h" // NV5070_CTRL_CMD_GET_PINSET_PEER_PEER_PINSET_NONE + +#include + +#include "kernel/gpu/mem_mgr/mem_mgr.h" + +#include "nvdevid.h" // for NV_PCI_DEVID_DEVICE + +#include "virtualization/hypervisor/hypervisor.h" + +static NV_STATUS gpuDetermineVirtualMode(OBJGPU *); + +#if RMCFG_FEATURE_GSPRM_BULLSEYE || defined(GSPRM_BULLSEYE_ENABLE) +#include "nv_sriov_defines.h" +#include "diagnostics/code_coverage_mgr.h" +#endif + +#include "g_odb.h" + +#define RMTRACE_ENGINE_PROFILE_EVENT(EventName, EngineId, ReadCount, WriteCount) \ +{ \ + RMTRACE_PROBE4(generic, marker, \ + NvU32, EngineId, sizeof(NvU32), \ + char*, EventName, sizeof(EventName), \ + NvU32, ReadCount, sizeof(NvU32), \ + NvU32, WriteCount, sizeof(NvU32)); \ + } + +static NV_STATUS gpuRemoveMissingEngines(OBJGPU *); + +// local static function +static NV_STATUS _gpuChildrenPresentInit(OBJGPU *pGpu); +static NV_STATUS gpuCreateChildObjects(OBJGPU *, NvBool); +static NV_STATUS gpuCreateObject(OBJGPU *pGpu, NVOC_CLASS_ID, NvU64, Dynamic **); +static NV_STATUS gpuStatePreLoad(OBJGPU *, NvU32); +static NV_STATUS gpuStatePostLoad(OBJGPU *, NvU32); +static NV_STATUS gpuStatePreUnload(OBJGPU *, NvU32); +static NV_STATUS gpuStatePostUnload(OBJGPU *, NvU32); +static void gpuXlateHalImplToArchImpl(HAL_IMPLEMENTATION, NvU32 *, NvU32 *); +static NvBool gpuSatisfiesTemporalOrder(OBJGPU *, HAL_IMPLEMENTATION); +static NvBool gpuShouldCreateObject(OBJGPU *pGpu, NvU32 classId, NvU32 instance); + +static void gpuDestroyMissingEngine(OBJGPU *, OBJENGSTATE *); +static void gpuRemoveMissingEngineClasses(OBJGPU *, NvU32); + +static NV_STATUS _gpuCreateEngineOrderList(OBJGPU *pGpu); +static void _gpuFreeEngineOrderList(OBJGPU *pGpu); + + +static void _gpuInitPciHandle(OBJGPU *pGpu); +static void _gpuInitPhysicalRmApi(OBJGPU *pGpu); +static NV_STATUS _gpuAllocateInternalObjects(OBJGPU *pGpu); +static void _gpuFreeInternalObjects(OBJGPU *pGpu); +static NV_STATUS _gpuSetResetRequiredState(OBJGPU *pGpu, NvBool newState); + +typedef struct +{ + NvS32 childOrderIndex; + NvS32 instanceID; + NvU32 flags; + NvBool bStarted; +} ENGLIST_ITER, *PENGLIST_ITER; + +static ENGLIST_ITER gpuGetEngineOrderListIter(OBJGPU *pGpu, NvU32 flags); +static NvBool gpuGetNextInEngineOrderList(OBJGPU *pGpu, ENGLIST_ITER *pIt, ENGDESCRIPTOR *pEngDesc); + +static inline void _setPlatformNoHostbridgeDetect(NvBool bValue) +{ +} + +static NV_STATUS _gpuChildNvocClassInfoGet(OBJGPU *pGpu, NVOC_CLASS_ID classId, const NVOC_CLASS_INFO **ppClassInfo); + +void +_gpuDetectNvswitchSupport +( + OBJGPU *pGpu +) +{ +} + +// +// Generate a 32-bit id from domain, bus and device tuple. +// +// This is a one way function that is not guaranteed to generate a unique id for +// each domain, bus, device tuple as domain alone can be 32-bit. Historically, +// we have been assuming that the domain can only be 16-bit, but that has never +// been true on Linux and Hyper-V virtualization has exposed that by using +// arbitrary 32-bit domains for passthrough GPUs. This is the only known case +// today that requires immediate support. The domains on Hyper-V come from +// hashing some system and GPU information and are claimed to be unique even if +// we consider the lower 16-bits only. Hence, as a temporary solution, only the +// lower 16-bits are used and it's asserted that top 16-bits are only non-0 on +// Hyper-V. +// +// Long term the 32-bit ids should be changed to 64-bit or the generation scheme +// should be changed to guarantee uniqueness. Both of these are impactful as the +// biggest user of this is the commonly used 32-bit OBJGPU::gpuId. +// +NvU32 gpuGenerate32BitId(NvU32 domain, NvU8 bus, NvU8 device) +{ + NvU32 id = gpuEncodeBusDevice(bus, device); + + // Include only the lower 16-bits to match the old gpuId scheme + id |= (domain & 0xffff) << 16; + + if ((domain >> 16) != 0) { + NV_ASSERT(hypervisorIsType(OS_HYPERVISOR_HYPERV)); + } + + return id; +} + +NvU32 gpuGenerate32BitIdFromPhysAddr(RmPhysAddr addr) +{ + NvU32 id = NvU64_LO32(addr>>RM_PAGE_SHIFT); + return id; +} + +void gpuChangeComputeModeRefCount_IMPL(OBJGPU *pGpu, NvU32 command) +{ + switch(command) + { + case NV_GPU_COMPUTE_REFCOUNT_COMMAND_INCREMENT: + NV_ASSERT(pGpu->computeModeRefCount >= 0); + ++pGpu->computeModeRefCount; + + if (1 == pGpu->computeModeRefCount) + { + NV_PRINTF(LEVEL_INFO, "GPU (ID: 0x%x): new mode: COMPUTE\n", + pGpu->gpuId); + + timeoutInitializeGpuDefault(&pGpu->timeoutData, pGpu); + } + break; + + case NV_GPU_COMPUTE_REFCOUNT_COMMAND_DECREMENT: + --pGpu->computeModeRefCount; + NV_ASSERT(pGpu->computeModeRefCount >= 0); + + if (pGpu->computeModeRefCount < 0) + { + pGpu->computeModeRefCount = 0; + } + + if (0 == pGpu->computeModeRefCount) + { + NV_PRINTF(LEVEL_INFO, "GPU (ID: 0x%x): new mode: GRAPHICS\n", + pGpu->gpuId); + + timeoutInitializeGpuDefault(&pGpu->timeoutData, pGpu); + } + break; + + default: + NV_PRINTF(LEVEL_ERROR, "Bad command: 0x%x\n", command); + NV_ASSERT(0); + break; + } +} + +// +// gpuPostConstruct +// +// Called by the gpu manager to finish OBJGPU construction phase. +// Tasks handled here include binding a HAL module to the gpu +// and the construction of engine object offspring. +// +NV_STATUS +gpuPostConstruct_IMPL +( + OBJGPU *pGpu, + GPUATTACHARG *pAttachArg +) +{ + NV_STATUS rmStatus; + + gpumgrAddDeviceInstanceToGpus(NVBIT(pGpu->gpuInstance)); + + rmStatus = regAccessConstruct(&pGpu->registerAccess, pGpu); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to construct IO Apertures for attached devices \n"); + return rmStatus; + } + + if (IS_VIRTUAL(pGpu)) + { + // + // FIXME: gpumgrGetGpuHalFactorOfVirtual has already checked this to initialize halspec, + // but the VF HAL matches both legacy and SRIOV. + // + NvU32 config = GPU_REG_RD32(pGpu, NV_PMC_BOOT_1); + if (FLD_TEST_DRF(_PMC, _BOOT_1, _VGPU, _VF, config)) + { + pGpu->bIsVirtualWithSriov = NV_TRUE; + } + else + { + pGpu->bIsVirtualWithSriov = NV_FALSE; + } + } + + pGpu->sriovState.virtualRegPhysOffset = gpuGetVirtRegPhysOffset_HAL(pGpu); + pGpu->simMode = NV_SIM_MODE_INVALID; + + gpuInitChipInfo(pGpu); + + // check if RM is running in a virtualization mode + // This function needs to be called before we use IS_VIRTUAL macro. + // Because IS_VIRTUAL macro relies on this function to determine Virtual mode. + // eg. gpuCreateChildObjects->...->xxxHalIfacesSetup_xxx relies on IS_VIRTUAL macro. + rmStatus = gpuDetermineVirtualMode(pGpu); + if (rmStatus != NV_OK) + return rmStatus; + + pGpu->setProperty(pGpu, PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA, + gpuIsCtxBufAllocInPmaSupported_HAL(pGpu)); + // + // gpuDetermineVirtualMode inits hPci but only for virtualization case. So if + // it does not init it, do here for using it for non-virtualization as well + // Don't bother initing SOC Pci handle since it's not on PCIE. + // + if (pGpu->hPci == NULL && !pGpu->bIsSOC) + { + // + // We don't check the return status. Even if PCI handle is not obtained + // it should not block rest of the gpu init sequence. + // + _gpuInitPciHandle(pGpu); + } + + // + // Initialize the base offset for the virtual registers for physical function + // or baremetal + // + pGpu->sriovState.virtualRegPhysOffset = gpuGetVirtRegPhysOffset_HAL(pGpu); + + NV_ASSERT_OK_OR_RETURN( + _gpuChildrenPresentInit(pGpu)); + + // + // Initialize engine order before engine init/load/etc + // + rmStatus = _gpuCreateEngineOrderList(pGpu); + if ( rmStatus != NV_OK ) + return rmStatus; + + gpuBuildClassDB(pGpu); + + // The first time the emulation setting is checked is in timeoutInitializeGpuDefault. + pGpu->computeModeRefCount = 0; + pGpu->hComputeModeReservation = NV01_NULL_OBJECT; + + // Setting default timeout values + timeoutInitializeGpuDefault(&pGpu->timeoutData, pGpu); + + // Set 2 stage error recovery if Vista or Unix or GSP-RM. + if (!IsAMODEL(pGpu)) + { + pGpu->bTwoStageRcRecoveryEnabled = NV_TRUE; + } + + if (hypervisorIsVgxHyper()) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU, NV_TRUE); + } + + // create core objects (i.e. bif) + rmStatus = gpuCreateChildObjects(pGpu, /* bConstructEarly */ NV_TRUE); + if (rmStatus != NV_OK) + return rmStatus; + + gpuGetIdInfo_HAL(pGpu); + gpuUpdateIdInfo_HAL(pGpu); + + _gpuInitPhysicalRmApi(pGpu); + + // need to get illumination values after the GPU Id + // has been setup to allow for GPU specific settings + gpuDeterminePersistantIllumSettings(pGpu); + +#if NVCPU_IS_PPC64LE + // Skip PCI Express Host Bridge initialization on PPC64 platforms + _setPlatformNoHostbridgeDetect(NV_TRUE); +#endif + + // Construct and update the engine database + rmStatus = gpuConstructEngineTable(pGpu); + if (rmStatus != NV_OK) + return rmStatus; + rmStatus = gpuUpdateEngineTable(pGpu); + if (rmStatus != NV_OK) + return rmStatus; + + // create remaining gpu offspring + rmStatus = gpuCreateChildObjects(pGpu, /* bConstructEarly */ NV_FALSE); + if (rmStatus != NV_OK) + return rmStatus; + + if (IS_SIMULATION(pGpu) && !IS_VIRTUAL(pGpu)) + { + // + // gpuDetermineSelfHostedMode must be called after gpuDetermineVirtualMode + // and vgpuCreateObject(for VGPU static info) as the self hosted detection mechanism + // in VF depends on them. But in SCSIM(SIM_BUILD) it is required to be called + // before vgpuCreateObject(from where initRpcInfrastructure_VGPU is called) in PF + // so that the correct setting NV_VGPU_SEND_RING_GP_IN_RING_NO is done + // for the SIM escapes. + // + gpuDetermineSelfHostedMode_HAL(pGpu); + } + + if ( + IS_VIRTUAL(pGpu)) + { + + } + + gpuGetHwDefaults(pGpu); + + // Set any state overrides required for L2 cache only mode + if (gpuIsCacheOnlyModeEnabled(pGpu)) + { + gpuSetCacheOnlyModeOverrides_HAL(pGpu); + } + + // Register the OCA dump callback function. + gpuDumpCallbackRegister(pGpu); + + // Initialize reference count for external kernel clients + pGpu->externalKernelClientCount = 0; + +#if RMCFG_FEATURE_GSPRM_BULLSEYE || defined(GSPRM_BULLSEYE_ENABLE) + if (IS_GSP_CLIENT(pGpu)) + { + } +#endif + + // Initialize the GPU recovery action, if the OS is already in a bad state. + pGpu->currentRecoveryAction = GPU_RECOVERY_ACTION_UNKNOWN; + gpuRefreshRecoveryAction_HAL(pGpu, NV_TRUE); + + return NV_OK; +} + +NV_STATUS gpuConstruct_IMPL +( + OBJGPU *pGpu, + NvU32 gpuInstance, + NvU32 gpuId, + NvUuid *pGpuUuid, + /* const */ GpuArch *pGpuArch // TODO: make `const` after bug 4292180 is fixed +) +{ + + pGpu->pGpuArch = pGpuArch; + pGpu->gpuInstance = gpuInstance; + pGpu->gpuId = pGpu->boardId = gpuId; // boardId may be updated later + pGpu->gspRmInitialized = NV_FALSE; + + if (pGpuUuid != NULL) + { + portMemCopy(&pGpu->gpuUuid.uuid[0], sizeof(pGpu->gpuUuid.uuid), + &pGpuUuid->uuid[0], sizeof(pGpuUuid->uuid)); + pGpu->gpuUuid.isInitialized = NV_TRUE; + } + + // allocate OS-specific GPU extension area + osInitOSHwInfo(pGpu); + +#if KERNEL_GSP_TRACING_RATS_ENABLED + multimapInit(&pGpu->gspTraceEventBufferBindingsUid, portMemAllocatorGetGlobalNonPaged()); +#endif + + // Initialize the i2c port via which external devices will be connected. + pGpu->i2cPortForExtdev = NV402C_CTRL_NUM_I2C_PORTS; + + pGpu->pDpcThreadState = portMemAllocNonPaged(sizeof(THREAD_STATE_NODE)); + NV_ASSERT_OR_RETURN(pGpu->pDpcThreadState != NULL, NV_ERR_NO_MEMORY); + + return gpuConstructPhysical(pGpu); +} + +// NVOC-TODO : delete this after all Rmconfig modules migrated to NVOC +NV_STATUS +gpuBindHalLegacy_IMPL +( + OBJGPU *pGpu, + NvU32 chipId0, + NvU32 chipId1, + NvU32 socChipId0 +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJHALMGR *pHalMgr = SYS_GET_HALMGR(pSys); + NV_STATUS status; + + // chipId0 and chipId1 needs to be function parameter since GPU Reg read + // is not ready at this point. + pGpu->chipId0 = chipId0; + pGpu->chipId1 = chipId1; + + // + // The system object will pass PMC_BOOT_0 and PMC_BOOT_42 to all the HAL's and return the + // one that claims it supports this chip arch/implementation + // + status = halmgrGetHalForGpu(pHalMgr, socChipId0 ? socChipId0 : pGpu->chipId0, pGpu->chipId1, &pGpu->halImpl); + if (status != NV_OK) + return status; + + pGpu->pHal = halmgrGetHal(pHalMgr, pGpu->halImpl); + + return status; +} + +/*! + * @brief The PCI bus family means it has the concept of bus/dev/func + * and compatible PCI config space. + */ +NvBool +gpuIsPciBusFamily_IMPL +( + OBJGPU *pGpu +) +{ + NvU32 busType = gpuGetBusIntfType_HAL(pGpu); + + return ((busType == NV2080_CTRL_BUS_INFO_TYPE_PCI) || + (busType == NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS) || + (busType == NV2080_CTRL_BUS_INFO_TYPE_FPCI)); +} + +static void +_gpuInitPciHandle +( + OBJGPU *pGpu +) +{ + NvU32 domain = gpuGetDomain(pGpu); + NvU8 bus = gpuGetBus(pGpu); + NvU8 device = gpuGetDevice(pGpu); + NvU8 function = 0; + + pGpu->hPci = osPciInitHandle(domain, bus, device, function, NULL, NULL); +} + +static NV_STATUS _gpuRmApiControl +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + void *pParams, + NvU32 paramsSize +) +{ + RmCtrlParams rmCtrlParams; + CALL_CONTEXT callCtx, *oldCtx = NULL; + RS_LOCK_INFO lockInfo = {0}; + NV_STATUS status = NV_OK; + + OBJGPU *pGpu = (OBJGPU*)pRmApi->pPrivateContext; + + // This API is only used to route locally on monolithic or UCODE + NV_ASSERT_OR_RETURN(!IS_FW_CLIENT(pGpu), NV_ERR_INVALID_STATE); + + // + // The physical API can be used on any controls and any handles and it is + // expected to be routed correctly. However, if the caller is using the GPU + // internal handles, we can skip the resource server overhead and make a + // direct function call instead. + // + // This optimization should be skipped on vGPU Guests as they should always + // go through the resource server. The resource server is responsible for + // deciding whether to RPC to the HOST (ROUTE_TO_VGPU_HOST) or call the + // implementation on the guest. + // + if (!IS_VIRTUAL(pGpu) && hClient == pGpu->hInternalClient && hObject == pGpu->hInternalSubdevice) + { + NV_ASSERT_OR_RETURN(pGpu->pCachedSubdevice && pGpu->pCachedRsClient, NV_ERR_INVALID_STATE); + + const struct NVOC_EXPORTED_METHOD_DEF *pEntry; + pEntry = objGetExportedMethodDef((void*)pGpu->pCachedSubdevice, cmd); + + NV_ASSERT_OR_RETURN(pEntry != NULL, NV_ERR_NOT_SUPPORTED); + + NV_ASSERT_OR_RETURN(pEntry->paramSize == paramsSize, NV_ERR_INVALID_PARAM_STRUCT); + NV_PRINTF(LEVEL_INFO, "GPU Internal RM control 0x%08x on gpuInst:%x hClient:0x%08x hSubdevice:0x%08x\n", + cmd, pGpu->gpuInstance, hClient, hObject); + + portMemSet(&rmCtrlParams, 0, sizeof(rmCtrlParams)); + rmCtrlParams.hClient = hClient; + rmCtrlParams.hObject = hObject; + rmCtrlParams.pGpu = pGpu; + rmCtrlParams.cmd = cmd; + rmCtrlParams.flags = NVOS54_FLAGS_LOCK_BYPASS; + rmCtrlParams.pParams = pParams; + rmCtrlParams.paramsSize = paramsSize; + rmCtrlParams.secInfo.privLevel = RS_PRIV_LEVEL_KERNEL; + rmCtrlParams.secInfo.paramLocation = PARAM_LOCATION_KERNEL; + rmCtrlParams.bInternal = NV_TRUE; + + lockInfo.flags = RM_LOCK_FLAGS_NO_GPUS_LOCK | RM_LOCK_FLAGS_NO_CLIENT_LOCK; + rmCtrlParams.pLockInfo = &lockInfo; + + portMemSet(&callCtx, 0, sizeof(callCtx)); + callCtx.pResourceRef = RES_GET_REF(pGpu->pCachedSubdevice); + callCtx.pClient = pGpu->pCachedRsClient; + callCtx.secInfo = rmCtrlParams.secInfo; + callCtx.pServer = &g_resServ; + callCtx.pControlParams = &rmCtrlParams; + callCtx.pLockInfo = rmCtrlParams.pLockInfo; + + NV_ASSERT_OK_OR_RETURN(resservSwapTlsCallContext(&oldCtx, &callCtx)); + + if (pEntry->paramSize == 0) + { + status = ((NV_STATUS(*)(void*))pEntry->pFunc)(pGpu->pCachedSubdevice); + } + else + { + status = ((NV_STATUS(*)(void*,void*))pEntry->pFunc)(pGpu->pCachedSubdevice, pParams); + } + + NV_ASSERT_OK(resservRestoreTlsCallContext(oldCtx)); + } + else + { + RM_API *pInternalRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NV_ASSERT_OR_RETURN(rmDeviceGpuLockIsOwner(pGpu->gpuInstance), + NV_ERR_INVALID_LOCK_STATE); + + status = pInternalRmApi->Control(pInternalRmApi, hClient, hObject, cmd, pParams, paramsSize); + } + + return status; +} + +static NV_STATUS _gpuRmApiAllocWithHandle +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle hObject, + NvU32 hClass, + void *pAllocParams, + NvU32 paramsSize +) +{ + // Simple forwarder for now + RM_API *pInternalRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + return pInternalRmApi->AllocWithHandle(pInternalRmApi, hClient, hParent, hObject, hClass, pAllocParams, paramsSize); +} +static NV_STATUS _gpuRmApiFree +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject +) +{ + // Simple forwarder for now + RM_API *pInternalRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + return pInternalRmApi->Free(pInternalRmApi, hClient, hObject); +} + +static void +_gpuInitPhysicalRmApi +( + OBJGPU *pGpu +) +{ + // Populate all unused APIs with stubs + pGpu->physicalRmApi = *rmapiGetInterface(RMAPI_STUBS); + pGpu->physicalRmApi.pPrivateContext = pGpu; + + portMemSet(&pGpu->physicalRmApi.defaultSecInfo, 0, + sizeof(pGpu->physicalRmApi.defaultSecInfo)); + pGpu->physicalRmApi.defaultSecInfo.privLevel = RS_PRIV_LEVEL_KERNEL; + pGpu->physicalRmApi.defaultSecInfo.paramLocation = PARAM_LOCATION_KERNEL; + pGpu->physicalRmApi.bHasDefaultSecInfo = NV_TRUE; + pGpu->physicalRmApi.bTlsInternal = NV_TRUE; + pGpu->physicalRmApi.bApiLockInternal = NV_TRUE; + pGpu->physicalRmApi.bRmSemaInternal = NV_TRUE; + pGpu->physicalRmApi.bGpuLockInternal = NV_TRUE; + + // Only initialize the methods that exist on GSP/DCE as well + pGpu->physicalRmApi.Control = _gpuRmApiControl; + pGpu->physicalRmApi.AllocWithHandle = _gpuRmApiAllocWithHandle; + pGpu->physicalRmApi.Free = _gpuRmApiFree; +} + +static NV_STATUS +_gpuInitChipInfo +( + OBJGPU *pGpu +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + const NvU32 paramSize = sizeof(NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS); + NV_STATUS status; + + pGpu->pChipInfo = portMemAllocNonPaged(paramSize); + NV_ASSERT_OR_RETURN(pGpu->pChipInfo != NULL, NV_ERR_NO_MEMORY); + + portMemSet(pGpu->pChipInfo, 0, paramSize); + + NV_ASSERT_OK_OR_GOTO(status, pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_GPU_GET_CHIP_INFO, + pGpu->pChipInfo, paramSize), done); + + pGpu->chipInfo.subRevision = pGpu->pChipInfo->chipSubRev; + pGpu->idInfo.PCIDeviceID = pGpu->pChipInfo->pciDeviceId; + pGpu->idInfo.PCISubDeviceID = pGpu->pChipInfo->pciSubDeviceId; + pGpu->idInfo.PCIRevisionID = pGpu->pChipInfo->pciRevisionId; + +done: + if (status != NV_OK) + { + portMemFree(pGpu->pChipInfo); + pGpu->pChipInfo = NULL; + } + + return status; +} + +static NV_STATUS +gpuInitVmmuInfo +( + OBJGPU *pGpu +) +{ + pGpu->vmmuSegmentSize = 0; + + if (!IS_VIRTUAL(pGpu) && !IS_DCE_CLIENT(pGpu)) + { + NV_STATUS status; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_GPU_GET_VMMU_SEGMENT_SIZE_PARAMS params; + + status = pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_GPU_GET_VMMU_SEGMENT_SIZE, + ¶ms, sizeof(params)); + + if (status == NV_ERR_NOT_SUPPORTED) + { + // Leave segment size initialized to zero to signal no VMMU present on physical + return NV_OK; + } + else if (status != NV_OK) + { + return status; + } + + pGpu->vmmuSegmentSize = params.vmmuSegmentSize; + } + + return NV_OK; +} + +static NvU32 gpuGetDceClientInternalClientHandle(OBJGPU *pGpu) +{ + NvU32 hClient = RS_CLIENT_INTERNAL_HANDLE_BASE; + if (IS_DCE_CLIENT(pGpu)) + { + NV_ASSERT_OR_GOTO(GPU_GET_DCECLIENTRM(pGpu) != NULL, exit); + hClient = GPU_GET_DCECLIENTRM(pGpu)->hInternalClient; + } +exit: + return hClient; +} +static NV_STATUS _gpuAllocateInternalObjects +( + OBJGPU *pGpu +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NV_STATUS status = NV_OK; + + if (IS_GSP_CLIENT(pGpu)) + { + } + else if (IS_DCE_CLIENT(pGpu)) + { + pGpu->hInternalClient = gpuGetDceClientInternalClientHandle(pGpu); + pGpu->hInternalDevice = NV_GPU_INTERNAL_DEVICE_HANDLE; + pGpu->hInternalSubdevice = NV_GPU_INTERNAL_SUBDEVICE_HANDLE; + } + else + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NV_ASSERT_OK_OR_RETURN(rmapiutilAllocClientAndDeviceHandles( + pRmApi, pGpu, &pGpu->hInternalClient, &pGpu->hInternalDevice, &pGpu->hInternalSubdevice)); + + NV_ASSERT_OK_OR_GOTO(status, serverGetClientUnderLock(&g_resServ, pGpu->hInternalClient, + &pGpu->pCachedRsClient), done); + NV_ASSERT_OK_OR_GOTO(status, subdeviceGetByHandle(pGpu->pCachedRsClient, pGpu->hInternalSubdevice, + &pGpu->pCachedSubdevice), done); + } + + NV_PRINTF(LEVEL_INFO, "GPU-%d allocated hInternalClient=0x%08x\n", pGpu->gpuInstance, pGpu->hInternalClient); + + if (IS_FW_CLIENT(pGpu)) + { + rmapiControlCacheSetGpuAttrForObject(pGpu->hInternalClient, + pGpu->hInternalSubdevice, + pGpu); + + rmapiControlCacheSetGpuAttrForObject(pGpu->hInternalClient, + pGpu->hInternalDevice, + pGpu); + } + + // + // Allocate the internal client for lock stress testing if lock stress testing is + // enabled through the registry. + // + if (pSys->getProperty(pSys, PDB_PROP_SYS_ENABLE_RM_TEST_ONLY_CODE)) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->AllocWithHandle(pRmApi, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_ROOT, + &pGpu->hInternalLockStressClient, + sizeof(pGpu->hInternalLockStressClient)), + done); + } + else + pGpu->hInternalLockStressClient = NV01_NULL_OBJECT; + + +done: + if (status != NV_OK) + { + _gpuFreeInternalObjects(pGpu); + } + + return status; +} + +static void _gpuFreeInternalObjects +( + OBJGPU *pGpu +) +{ + // Free internal lock stress client if it was allocated + if (pGpu->hInternalLockStressClient != NV01_NULL_OBJECT) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + pRmApi->Free(pRmApi, pGpu->hInternalLockStressClient, + pGpu->hInternalLockStressClient); + } + + if (IS_FW_CLIENT(pGpu)) + { + rmapiControlCacheFreeObjectEntry(pGpu->hInternalClient, pGpu->hInternalSubdevice); + rmapiControlCacheFreeObjectEntry(pGpu->hInternalClient, pGpu->hInternalDevice); + } + else + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + rmapiutilFreeClientAndDeviceHandles(pRmApi, + &pGpu->hInternalClient, &pGpu->hInternalDevice, &pGpu->hInternalSubdevice); + } +} + +static NV_STATUS +_gpuCreateEngineOrderList +( + OBJGPU *pGpu +) +{ + NvU32 i; + NvU32 numLists; + NV_STATUS status = NV_OK; + GpuEngineOrder *pEngineOrder = &pGpu->engineOrder; + NvU32 numEngineDesc, curEngineDesc; + NvU32 listTypes[] = {GCO_LIST_INIT, GCO_LIST_LOAD, GCO_LIST_UNLOAD, GCO_LIST_DESTROY}; + ENGDESCRIPTOR **ppEngDescriptors[4]; + ENGLIST_ITER it; + ENGDESCRIPTOR engDesc; + + ct_assert(NV_ARRAY_ELEMENTS(ppEngDescriptors) == NV_ARRAY_ELEMENTS(listTypes)); + +#define GPU_CHILD(a, b, numInstances, c, d) +numInstances + + struct ChildList { + char children[ 0 + + #include "gpu/gpu_child_list.h" + ]; + }; + + // + // The maximum number of engines known to RM controls + // must be at least the number of actual OBJGPU children. + // + ct_assert(NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS >= + sizeof(((struct ChildList*)(NULL))->children) /* sizeof(ChildList::children) */); + + numLists = NV_ARRAY_ELEMENTS(listTypes); + + ppEngDescriptors[0] = &pEngineOrder->pEngineInitDescriptors; + ppEngDescriptors[1] = &pEngineOrder->pEngineLoadDescriptors; + ppEngDescriptors[2] = &pEngineOrder->pEngineUnloadDescriptors; + ppEngDescriptors[3] = &pEngineOrder->pEngineDestroyDescriptors; + + // + // Find the size of the engine descriptor list. The sizes of all lists + // are checked for consistency to catch mistakes. + // + // The list is copied into OBJGPU storage as it's modified during + // dynamic engine removal (e.g.: gpuMissingEngDescriptor). + // + numEngineDesc = 0; + + for (i = 0; i < numLists; i++) + { + curEngineDesc = 0; + + it = gpuGetEngineOrderListIter(pGpu, listTypes[i]); + + while (gpuGetNextInEngineOrderList(pGpu, &it, &engDesc)) + { + curEngineDesc++; + } + + if ((numEngineDesc != 0) && (numEngineDesc != curEngineDesc)) + { + NV_PRINTF(LEVEL_ERROR, + "Sizes of all engine order lists do not match!\n"); + NV_ASSERT(0); + status = NV_ERR_INVALID_STATE; + goto done; + } + + numEngineDesc = curEngineDesc; + } + + pEngineOrder->numEngineDescriptors = numEngineDesc; + + + for (i = 0; i < numLists; i++) + { + curEngineDesc = 0; + + *ppEngDescriptors[i] = portMemAllocNonPaged(sizeof(ENGDESCRIPTOR) * numEngineDesc); + if ( NULL == *ppEngDescriptors[i]) + { + NV_ASSERT(0); + status = NV_ERR_NO_MEMORY; + goto done; + } + + it = gpuGetEngineOrderListIter(pGpu, listTypes[i]); + + while (gpuGetNextInEngineOrderList(pGpu, &it, &engDesc)) + { + (*ppEngDescriptors[i])[curEngineDesc] = engDesc; + curEngineDesc++; + } + } + + NvU32 numGenericClassDesc = 0, numEngClassDescHal = 0, numNoEngClassDescHal = 0; + + const NvU32 *pGenericClassDescs = NULL; + const NvU32 *pNoEngClassDescsHal = NULL; + const CLASSDESCRIPTOR *pEngClassDescsHal = gpuGetEngClassDescriptorList_HAL(pGpu, &numEngClassDescHal); + + if (!RMCFG_FEATURE_PLATFORM_MODS) + { + pGenericClassDescs = gpuGetGenericClassList(pGpu, &numGenericClassDesc); + pNoEngClassDescsHal = gpuGetNoEngClassList_HAL(pGpu, &numNoEngClassDescHal); + } + + NvU32 numClassDescTotal = numGenericClassDesc + numNoEngClassDescHal + numEngClassDescHal; + + pEngineOrder->pClassDescriptors = portMemAllocNonPaged(sizeof(CLASSDESCRIPTOR) * numClassDescTotal); + if (pEngineOrder->pClassDescriptors == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + NvU32 tgtIdx = 0; + + if (!RMCFG_FEATURE_PLATFORM_MODS) + { + for (NvU32 srcIdx = 0; srcIdx < numGenericClassDesc; srcIdx++) + { + pEngineOrder->pClassDescriptors[tgtIdx++] = (CLASSDESCRIPTOR){pGenericClassDescs[srcIdx], ENG_GPU}; + } + + for (NvU32 srcIdx = 0; srcIdx < numNoEngClassDescHal; srcIdx++) + { + pEngineOrder->pClassDescriptors[tgtIdx++] = (CLASSDESCRIPTOR){pNoEngClassDescsHal[srcIdx], ENG_GPU}; + } + } + + if (numEngClassDescHal > 0) + { + portMemCopy(&pEngineOrder->pClassDescriptors[tgtIdx], + sizeof(CLASSDESCRIPTOR) * numEngClassDescHal, + pEngClassDescsHal, + sizeof(CLASSDESCRIPTOR) * numEngClassDescHal); + } + + pEngineOrder->numClassDescriptors = numClassDescTotal; + + return NV_OK; + +done: + portMemFree(pEngineOrder->pEngineInitDescriptors); + pEngineOrder->pEngineInitDescriptors = NULL; + + portMemFree(pEngineOrder->pEngineDestroyDescriptors); + pEngineOrder->pEngineDestroyDescriptors = NULL; + + portMemFree(pEngineOrder->pEngineLoadDescriptors); + pEngineOrder->pEngineLoadDescriptors = NULL; + + portMemFree(pEngineOrder->pEngineUnloadDescriptors); + pEngineOrder->pEngineUnloadDescriptors = NULL; + + return status; +} + +static void +_gpuFreeEngineOrderList +( + OBJGPU *pGpu +) +{ + GpuEngineOrder *pEngineOrder = &pGpu->engineOrder; + + if (!pEngineOrder->pEngineInitDescriptors) + return; + + portMemFree(pEngineOrder->pEngineInitDescriptors); + portMemFree(pEngineOrder->pEngineDestroyDescriptors); + portMemFree(pEngineOrder->pEngineLoadDescriptors); + portMemFree(pEngineOrder->pEngineUnloadDescriptors); + portMemFree(pEngineOrder->pClassDescriptors); + + pEngineOrder->pEngineInitDescriptors = NULL; + pEngineOrder->pEngineDestroyDescriptors = NULL; + pEngineOrder->pEngineLoadDescriptors = NULL; + pEngineOrder->pEngineUnloadDescriptors = NULL; + pEngineOrder->pClassDescriptors = NULL; +} + +/*! + * @brief For a given @ref OBJGPU, given the @ref NVOC_CLASS_ID for an object + * that is a @ref OBJGPU child, this function will return the + * @ref NVOC_CLASS_INFO for the concrete class type that should be + * instantiated for the field with that @ref NVOC_CLASS_ID + * + * @details This function helps support polymorphism of @ref OBJGPU children. + * The provided @ref NVOC_CLASS_ID is the class ID for the base class + * of the pointer field in @ref OBJGPU (e.g., @ref OBJGPU::pPmu), which + * can be made to point at different sub-classes at runtime. This + * function, given that @ref NVOC_CLASS_ID, provides the information + * about which concrete sub-class should actually be constructed. + * + * @param[in] pGpu + * @param[in] classId + * @ref NVOC_CLASS_ID for the class type of the base class pointer in + * @ref OBJGPU + * @param[out] ppClassInfo + * Pointer to location into which to store pointer to the class info for the + * concrete class to be constructed + * + * @return @ref NV_OK + * Success + * @return @ref NV_ERR_INVALID_STATE + * No match for classId found within this @ref OBJGPU's children + */ +static NV_STATUS +_gpuChildNvocClassInfoGet +( + OBJGPU *pGpu, + NVOC_CLASS_ID classId, + const NVOC_CLASS_INFO **ppClassInfo +) +{ + NvU32 i; + + for (i = 0U; i < pGpu->numChildrenPresent; i++) + { + if (classId == pGpu->pChildrenPresent[i].classId) + { + *ppClassInfo = pGpu->pChildrenPresent[i].pClassInfo; + return NV_OK; + } + } + + DBG_BREAKPOINT(); + return NV_ERR_INVALID_STATE; +} + +/*! + * @brief Returns the unshared engstate for the child object with the given engine + * descriptor (i.e.: the ENGSTATE without any of the SLI sharing hacks). + * + * All engines are uniquely identified by their engine descriptor. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc ENGDESCRIPTOR + */ +OBJENGSTATE * +gpuGetEngstateNoShare_IMPL(OBJGPU *pGpu, ENGDESCRIPTOR engDesc) +{ + switch (ENGDESC_FIELD(engDesc, _CLASS)) + { +#define GPU_CHILD_SINGLE_INST(className, accessorName, c, d, e) \ + case classId(className): \ + return dynamicCast((Dynamic*)accessorName(pGpu), OBJENGSTATE); +#define GPU_CHILD_MULTI_INST(className, accessorName, c, d, e) \ + case classId(className): \ + return dynamicCast((Dynamic*)accessorName(pGpu, ENGDESC_FIELD(engDesc, _INST)), OBJENGSTATE); + + #include "gpu/gpu_child_list.h" + } + + return NULL; +} + +/*! + * @brief Returns the engstate for the child object with the given engine descriptor + * + * All engines are uniquely identified by their engine descriptor. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc ENGDESCRIPTOR + */ +OBJENGSTATE * +gpuGetEngstate_IMPL(OBJGPU *pGpu, ENGDESCRIPTOR engDesc) +{ + + // Everything else is unshared + return gpuGetEngstateNoShare(pGpu, engDesc); +} + + +/*! + * @brief Iterates over pGpu's children, returning those that inherit given classId + * + * @param[in] pGpu OBJGPU pointer + * @param[in,out] pIt Iterator + * @param[in] classId classId of class the given child has to inherit + * + * @return The next matching child, already cast to given type or NULL + */ +void * +gpuGetNextChildOfTypeUnsafe_IMPL +( + OBJGPU *pGpu, + GPU_CHILD_ITER *pIt, + NvU32 classId +) +{ + void *pDerivedChild; + const NvU32 numChildren = gpuGetNumChildren(pGpu); + + while (pIt->childIndex < numChildren) + { + Dynamic *pDynamicChild = gpuGetChild(pGpu, pIt->childIndex++); + if (pDynamicChild == NULL) + continue; + + pDerivedChild = objDynamicCastById(pDynamicChild, classId); + if (pDerivedChild != NULL) + return pDerivedChild; + } + + return NULL; +} + +/*! + * @brief The generic object constructor + */ +static NV_STATUS +gpuCreateObject +( + OBJGPU *pGpu, + NVOC_CLASS_ID classId, + NvU64 instanceID, + Dynamic **ppChildPtr +) +{ + NV_STATUS status; + OBJENGSTATE *pEngstate; + ENGDESCRIPTOR engDesc = MKENGDESC(classId, instanceID); + const NVOC_CLASS_INFO *pClassInfo; + Dynamic *pConcreteChild = NULL; + ENGSTATE_TRANSITION_DATA engTransitionData; + + if (!gpuShouldCreateObject(pGpu, classId, instanceID)) + return NV_OK; + + NV_ASSERT_OK_OR_RETURN( + _gpuChildNvocClassInfoGet(pGpu, classId, &pClassInfo)); + + // Ask the object database utility to create a child object. + status = objCreateDynamic(&pConcreteChild, pGpu, pClassInfo); + + if (status != NV_OK) + { + return status; + } + NV_ASSERT_OR_RETURN(pConcreteChild != NULL, NV_ERR_INVALID_STATE); + + // + // Cast back to a pointer to the base class and assign it into the pointer + // in OBJGPU + // + *ppChildPtr = objDynamicCastById(pConcreteChild, classId); + NV_ASSERT_TRUE_OR_GOTO(status, + (*ppChildPtr != NULL), + NV_ERR_INVALID_STATE, + gpuCreateObject_exit); + + pEngstate = dynamicCast(*ppChildPtr, OBJENGSTATE); + + if (pEngstate == NULL) + { + status = NV_ERR_INVALID_STATE; + goto gpuCreateObject_exit; + } + + status = engstateConstructBase(pEngstate, pGpu, engDesc); + NV_CHECK_OR_GOTO(LEVEL_INFO, status == NV_OK, gpuCreateObject_exit); + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_CONSTRUCT, &engTransitionData); + status = engstateConstructEngine(pGpu, pEngstate, engDesc); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_CONSTRUCT, &engTransitionData); + + // If engine is missing, free it immediately + if (pEngstate->getProperty(pEngstate, PDB_PROP_ENGSTATE_IS_MISSING)) + { + status = NV_ERR_NOT_SUPPORTED; + } + +gpuCreateObject_exit: + if (status != NV_OK) + { + objDelete(pConcreteChild); + *ppChildPtr = NULL; + } + + // + // It's not an error if the engine is simply unsupported. + // We correct the status here instead of propagating it to the caller, + // to minimize the generated code size since it directly impacts GSP-RM perf + // + if (status == NV_ERR_NOT_SUPPORTED) + status = NV_OK; + + return status; +} + + +void +gpuDestruct_IMPL +( + OBJGPU *pGpu +) +{ + HWBC_LIST *pGpuHWBCList = NULL; + NvS32 i; + + // + // If device instance is unassigned, we haven't initialized far enough to + // do any accounting with it + // + if (gpuGetDeviceInstance(pGpu) != NV_MAX_DEVICES) + { + rmapiReportLeakedDevices(gpuGetGpuMask(pGpu)); + } + + // Free children in reverse order from construction + for (i = (NvS32)gpuGetNumChildren(pGpu) - 1; i >= 0; i--) + { + Dynamic *pChild = gpuGetChild(pGpu, i); + if (pChild) + { + objDelete(pChild); + pGpu->children.pChild[i] = NULL; + } + } + + _gpuFreeEngineOrderList(pGpu); + + portMemFree(pGpu->pDeviceInfoTable); + pGpu->pDeviceInfoTable = NULL; + pGpu->numDeviceInfoEntries = 0; + + gpuDestroyEngineTable(pGpu); + gpuDestroyClassDB(pGpu); + osDestroyOSHwInfo(pGpu); + + while(pGpu->pHWBCList) + { + pGpuHWBCList = pGpu->pHWBCList; + pGpu->pHWBCList = pGpuHWBCList->pNext; + portMemFree(pGpuHWBCList); + } + + // + // Destroy and free the RegisterAccess object linked to this GPU + // This should be moved out to gpu_mgr in the future to line up with + // the construction, but currently depends on pGpu still existing + // + regAccessDestruct(&pGpu->registerAccess); + + NV_ASSERT(pGpu->numConstructedFalcons == 0); + + portMemFree(pGpu->pRegopOffsetScratchBuffer); + pGpu->pRegopOffsetScratchBuffer = NULL; + + portMemFree(pGpu->pRegopOffsetAddrScratchBuffer); + pGpu->pRegopOffsetAddrScratchBuffer = NULL; + + pGpu->regopScratchBufferMaxOffsets = 0; + + NV_ASSERT(pGpu->numSubdeviceBackReferences == 0); + portMemFree(pGpu->pSubdeviceBackReferences); + pGpu->pSubdeviceBackReferences = NULL; + pGpu->numSubdeviceBackReferences = 0; + pGpu->maxSubdeviceBackReferences = 0; + +#if KERNEL_GSP_TRACING_RATS_ENABLED + multimapDestroy(&pGpu->gspTraceEventBufferBindingsUid); +#endif + + portMemFree(pGpu->pDpcThreadState); + + gpuDestructPhysical(pGpu); +} + +/*! + * @brief Initializes @ref OBJGPU::pChildrenPresent data + * + * @param[in] pGpu + * + * @return @ref NV_OK + * Success + */ +static NV_STATUS +_gpuChildrenPresentInit +( + OBJGPU *pGpu +) +{ + pGpu->pChildrenPresent = + gpuGetChildrenPresent_HAL(pGpu, &pGpu->numChildrenPresent); + return NV_OK; +} + +static NV_STATUS +gpuCreateChildObjects +( + OBJGPU *pGpu, + NvBool bConstructEarly +) +{ + NV_STATUS status; + +#define ACCESSOR_SINGLE_INST(indexVar, gpuField) gpuField +#define ACCESSOR_MULTI_INST(indexVar, gpuField) gpuField[indexVar] + +#define COMMON_CREATE_CHILD(className, numInstances, bEarly, gpuField, accessorMacro) \ + if (bEarly == bConstructEarly) \ + { \ + NvU64 i; \ + for (i = 0; i < numInstances; i++) \ + { \ + status = gpuCreateObject(pGpu, classId(className), i, \ + (Dynamic**)&pGpu->children.named.accessorMacro(i, gpuField)); \ + if (status != NV_OK) \ + return status; \ + } \ + } + +#define GPU_CHILD_SINGLE_INST(className, _unusedAccessorName, numInstances, bEarly, gpuField) \ + COMMON_CREATE_CHILD(className, numInstances, bEarly, gpuField, ACCESSOR_SINGLE_INST) + +#define GPU_CHILD_MULTI_INST(className, _unusedAccessorName, numInstances, bEarly, gpuField) \ + COMMON_CREATE_CHILD(className, numInstances, bEarly, gpuField, ACCESSOR_MULTI_INST) + + #include "gpu/gpu_child_list.h" + +#undef COMMON_CREATE_CHILD +#undef ACCESSOR_MULTI_INST +#undef ACCESSOR_SINGLE_INST + + return NV_OK; +} + +static NvBool +gpuShouldCreateObject +( + OBJGPU *pGpu, + NvU32 classId, + NvU32 instance +) +{ + NvU32 childIdx; + + // Let the HAL confirm that we should create an object for this engine. + for (childIdx = 0; childIdx < pGpu->numChildrenPresent; childIdx++) + { + if (classId == pGpu->pChildrenPresent[childIdx].classId) + { + return (instance < pGpu->pChildrenPresent[childIdx].instances); + } + } + + return NV_FALSE; +} + +NvU32 +gpuGetGpuMask_IMPL +( + OBJGPU *pGpu +) +{ + if (IsSLIEnabled(pGpu)) + { + return 1 << (gpumgrGetSubDeviceInstanceFromGpu(pGpu)); + } + else + { + return 1 << (pGpu->gpuInstance); + } +} + +/*! + * The engine removal protocol is as follows: + * - engines returning an error code from ConstructEngine will be immediately + * removed (this happens in gpuCreateObject) + * - engines may set ENGSTATE_IS_MISSING at any time before gpuStatePreInit + * - engines with ENGSTATE_IS_MISSING set at gpuStatePreInit will be removed + * - engines that return NV_FALSE from engstateIsPresent at gpuStatePreInit + * will be removed + * + * gpuRemoveMissingEngines takes place before the main loop in gpuStatePreInit + * and is responsible for removing engines satisfying the last two bullets + * above. + * + * Additionally, note that this function handles engines that were never + * present according to gpuChildrenPresent_HAL; these engines' associated + * classes must be removed from the class DB, and that is handled via this + * function. + */ +static NV_STATUS +gpuRemoveMissingEngines +( + OBJGPU *pGpu +) +{ + NvU32 curEngDescIdx; + ENGDESCRIPTOR *pEngDescriptorList = gpuGetInitEngineDescriptors(pGpu); + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + NV_STATUS rmStatus = NV_OK; + NvU32 curClassDescIdx; + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + OBJENGSTATE *pEngstate; + ENGDESCRIPTOR curEngDescriptor = pEngDescriptorList[curEngDescIdx]; + NVOC_CLASS_ID curClassId = ENGDESC_FIELD(curEngDescriptor, _CLASS); + + if (curClassId == classId(OBJINVALID)) + { + continue; + } + + pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate != NULL) + { + if (!pEngstate->getProperty(pEngstate, PDB_PROP_ENGSTATE_IS_MISSING) && + engstateIsPresent(pGpu, pEngstate)) + { + continue; + } + + gpuDestroyMissingEngine(pGpu, pEngstate); + pEngstate = NULL; + } + + // + // pEngstate is NULL or missing, so we must be sure to unregister + // all associated API classes and remove the stale engine descriptors + // from the GPU HAL engine lists. + // + NV_PRINTF(LEVEL_INFO, "engine %d:%d is missing, removing\n", + ENGDESC_FIELD(curEngDescriptor, _CLASS), + ENGDESC_FIELD(curEngDescriptor, _INST)); + + rmStatus = gpuDeleteEngineOnPreInit(pGpu, curEngDescriptor); + NV_ASSERT(rmStatus == NV_OK || !"Error while trying to remove missing engine"); + } + + // + // Check the rest of the class descriptors for engines that are not part of + // the present list, and therefore not part of the init list, and ensure + // that those get removed from the class DB. + // + for (curClassDescIdx = 0U; + curClassDescIdx < pGpu->engineOrder.numClassDescriptors; + curClassDescIdx++) + { + const GPU_RESOURCE_DESC *const pCurDesc = + &pGpu->engineOrder.pClassDescriptors[curClassDescIdx]; + NvBool bHostSupportsEngine = NV_FALSE; + + // + // Skip any classes which: + // 1.) Do not have an engine class ID + // 2.) Have an engine of GPU. ENG_GPU does not correspond to an + // OBJENGSTATE, and it can never be missing. + // 3.) Have an OBJENGSTATE that is present + // + if ((ENGDESC_FIELD(pCurDesc->engDesc, _CLASS) == classId(OBJINVALID)) || + (pCurDesc->engDesc == ENG_GPU) || + gpuGetEngstate(pGpu, pCurDesc->engDesc) != NULL) + { + continue; + } + + // + // If the engstate is NULL, the engine may still be supported on GSP or VGPU host. If + // it is, we can skip removing it. + // + if (IS_FW_CLIENT(pGpu) || IS_VIRTUAL(pGpu)) + { + bHostSupportsEngine = gpuCheckEngineWithOrderList_HAL(pGpu, pCurDesc->engDesc, NV_FALSE); + } + + if (bHostSupportsEngine) + continue; + + NV_ASSERT_OK_OR_RETURN( + gpuDeleteClassFromClassDBByEngTag(pGpu, pCurDesc->engDesc)); + } + + // Update the engine table after deleting any classes from the class DB + NV_ASSERT_OK_OR_RETURN( + gpuUpdateEngineTable(pGpu)); + + return rmStatus; +} + +/* + * Removing classes from classDB of a missing engine + */ +static void +gpuRemoveMissingEngineClasses +( + OBJGPU *pGpu, + NvU32 missingEngDescriptor +) +{ + NvU32 numClasses, i; + NvU32 *pClassList = NULL; + if (gpuGetClassList(pGpu, &numClasses, NULL, missingEngDescriptor) == NV_OK) + { + pClassList = portMemAllocNonPaged(sizeof(NvU32) * numClasses); + if (NV_OK == gpuGetClassList(pGpu, &numClasses, pClassList, missingEngDescriptor)) + { + for (i = 0; i < numClasses; i++) + { + gpuDeleteClassFromClassDBByClassId(pGpu, pClassList[i]); + } + } + portMemFree(pClassList); + pClassList = NULL; + } +} + +/* + * Destroy and unregister engine object of a missing engine + */ +static void +gpuDestroyMissingEngine +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate +) +{ + Dynamic *pDynamic = objFullyDerive(pEngstate); + NvU32 i; + + engstateInitMissing(pGpu, pEngstate); + objDelete(pDynamic); + + for (i = 0; i < gpuGetNumChildren(pGpu); i++) + { + if (pGpu->children.pChild[i] == pDynamic) + { + pGpu->children.pChild[i] = NULL; + return; + } + } +} + +/*! + * @brief Introduced to optimize ENG_GET_GPU by skipping checks and dynamic cast + */ +OBJGPU *gpuEngineGetGpu(Object *pObject) +{ + Object *pObj = pObject; + while ((pObj = pObj->pParent) != NULL) + { + if (objGetClassId(objFullyDerive(pObj)) == classId(OBJGPU)) + return (OBJGPU *)objFullyDerive(pObj); + } + return NULL; +} + +/* + * @brief Find if given engine descriptor is supported by GPU + * + * @param[in] pGpu OBJGPU pointer + * @param[in] descriptor engine descriptor to search for + * + * @returns NV_TRUE if given engine descriptor was found in a + * given engine descriptor list, NV_FALSE otherwise. + * + */ +NvBool +gpuIsEngDescSupported_IMPL +( + OBJGPU *pGpu, + NvU32 descriptor +) +{ + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + ENGDESCRIPTOR *pEngDescriptor = gpuGetInitEngineDescriptors(pGpu); + NvU32 counter = 0; + NvBool engDescriptorFound = NV_FALSE; + + for (counter = 0; counter < numEngDescriptors; counter++) + { + if (pEngDescriptor[counter] == descriptor) + { + engDescriptorFound = NV_TRUE; + break; + } + } + + return engDescriptorFound; +} +/*! + * @brief Mark given Engine Descriptor with ENG_INVALID engine descriptor. + * + * Note: It is legal to have more than one entry with equal Descriptor + * in the Engine Descriptor list. + * + * @param[in] pEngDescriptor Pointer to array of engine descriptors + * @param[in] maxDescriptors Size of engine descriptor array + * @param[in] descriptor Engine descriptor to be changed to ENG_INVALID engine descriptor + * + * @returns void + */ +static void +gpuMissingEngDescriptor(ENGDESCRIPTOR *pEngDescriptor, NvU32 maxDescriptors, + ENGDESCRIPTOR descriptor) +{ + NvU32 counter; + + for (counter = 0; counter < maxDescriptors; counter++) + { + if (pEngDescriptor[counter] == descriptor) + { + pEngDescriptor[counter] = ENG_INVALID; + } + } +} + + +/*! + * @brief Delete an engine from class DB. + * + * WARNING! Function doesn't remove INIT/DESTROY engines from HAL lists. + * gpuInitEng and gpuDestroyEng won't be no-ops for relevant engine. + * + * Use case: + * If an engine needs to be removed, but StateInit/Destroy are required. + * It's better to use gpuDeleteEngineOnPreInit instead. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc Engine ID to search and remove + * + * @returns NV_STATUS - NV_OK always. + */ +NV_STATUS +gpuDeleteEngineFromClassDB_IMPL(OBJGPU *pGpu, NvU32 engDesc) +{ + ENGDESCRIPTOR *pEngDesc = NULL; + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + NvU32 engDescriptor = engDesc; + + // remove Class tagged with engDesc from Class Database + gpuDeleteClassFromClassDBByEngTag(pGpu, engDesc); + + // + // Bug 370327 + // Q: Why remove load/unload? + // A: Since this engine does not exist, we should prevent hw accesses to it + // which should ideally only take place in load/unload ( not init/destroy ) + // + // Q: Why not remove init/destroy, the engines gone right? + // A: If init does some alloc and loadhw does the probe then removing destroy + // will leak. + // + + // Remove load + pEngDesc = gpuGetLoadEngineDescriptors(pGpu); + gpuMissingEngDescriptor(pEngDesc, numEngDescriptors, + engDescriptor); + + // Remove unload + pEngDesc = gpuGetUnloadEngineDescriptors(pGpu); + gpuMissingEngDescriptor(pEngDesc, numEngDescriptors, + engDescriptor); + + pGpu->engineDB.bValid = NV_FALSE; + + return NV_OK; +} + +/*! + * @brief Delete an engine from class DB only prior or on gpuPreInit stage. + * + * WARNING! Function must be used only before INIT stage, to avoid leaks. + * See gpuDeleteEngineFromClassDB for more information. + * + * Function removes Classes with given Engine Tag from class DB + * and removes Engines from HAL lists with equal Engine Tags. + * Function doesn't remove Engines from HAL Sync list, + * see gpuDeleteEngineFromClassDB for more information. + * + * Use case: + * Any platform where an engine is absent and it is required to + * prevent engine's load/unload and init/destroy calls from getting executed. + * In other words, this function is used when it is OK to remove/STUB all + * of the HALs of an engine without jeopardizing the initialization and + * operation of other engines. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc Engine ID to search and remove + * + * @returns NV_STATUS - NV_OK on success, error otherwise. + * + */ +NV_STATUS +gpuDeleteEngineOnPreInit_IMPL(OBJGPU *pGpu, NvU32 engDesc) +{ + ENGDESCRIPTOR *pEngDesc = NULL; + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + ENGDESCRIPTOR engDescriptor = engDesc; + NV_STATUS rmStatus = NV_OK; + NvBool bHostSupported = NV_FALSE; + + if (IS_FW_CLIENT(pGpu) || IS_VIRTUAL(pGpu)) + bHostSupported = gpuCheckEngineWithOrderList_HAL(pGpu, engDesc, NV_FALSE); + + // remove Class tagged with engDesc from Class Database. + if (!bHostSupported) + gpuDeleteClassFromClassDBByEngTag(pGpu, engDesc); + + // Remove Load Engine Descriptors + pEngDesc = gpuGetLoadEngineDescriptors(pGpu); + gpuMissingEngDescriptor(pEngDesc, numEngDescriptors, + engDescriptor); + + // Remove Unload Engine Descriptors + pEngDesc = gpuGetUnloadEngineDescriptors(pGpu); + gpuMissingEngDescriptor(pEngDesc, numEngDescriptors, + engDescriptor); + + // Remove Init Engine Descriptors + pEngDesc = gpuGetInitEngineDescriptors(pGpu); + gpuMissingEngDescriptor(pEngDesc, numEngDescriptors, + engDescriptor); + + // Remove Destroy Engine Descriptors + pEngDesc = gpuGetDestroyEngineDescriptors(pGpu); + gpuMissingEngDescriptor(pEngDesc, numEngDescriptors, + engDescriptor); + + if (!bHostSupported) + { + rmStatus = gpuUpdateEngineTable(pGpu); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Update engine table operation failed!\n"); + DBG_BREAKPOINT(); + } + } + + return rmStatus; +} + +/*! + * @brief Perform GPU pre init tasks + * + * Function tries to pre-init all engines from HAL Init Engine Descriptor list. + * If engine is not present, or its engine pre-init function reports it is unsupported + * then engine will be deleted from Class DB and HAL lists. + * + * TODO: Merge structurally equivalent code with other gpuState* functions. + * TODO: Fix "init missing" concept to not create unsupported objects at all. + * + * @param[in] pGpu OBJGPU pointer + * + * @returns NV_OK upon successful pre-initialization + */ +NV_STATUS +gpuStatePreInit_IMPL +( + OBJGPU *pGpu +) +{ + NV_STATUS rmStatus = NV_OK; + + // + // The prereq tracker must be kept track of in stateInit/Destroy because + // it accumulates dependencies throughout stateInit, stateInit may happen + // multiple times in SLI linking, and it and does not destroy the prereq list + // until the entire object is destroyed + // + NV_ASSERT_OK_OR_RETURN( + objCreate(&pGpu->pPrereqTracker, pGpu, PrereqTracker, pGpu)); + + // + // gpuDetermineSelfHostedMode must be called after gpuDetermineVirtualMode/kgspInitRm + // where VGPU/GSP static info is populated as the self hosted detection mechanism + // in VF and Kernel-RM depends on them respectively. + // + // For Kernel-RM, gpuDetermineSelfHostedMode_HAL should be called in + // gpuStatePreInit because GSP static config gets populated only by + // PreInit and also all the gpuIsSelfHosted callers are from PreInit and + // onwards. + // + gpuDetermineSelfHostedMode_HAL(pGpu); + + NV_ASSERT_OR_RETURN(rmGpuLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + NV_ASSERT_OK_OR_RETURN(_gpuAllocateInternalObjects(pGpu)); + NV_ASSERT_OK_OR_RETURN(_gpuInitChipInfo(pGpu)); + NV_ASSERT_OK_OR_RETURN(gpuConstructUserRegisterAccessMap(pGpu)); + NV_ASSERT_OK_OR_RETURN(gpuBuildGenericKernelFalconList(pGpu)); + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_MIG_SUPPORTED)) + { + NvBool bGpuSupportMig = gpuValidateMIGSupport_HAL(pGpu); + + pGpu->setProperty(pGpu, PDB_PROP_GPU_MIG_SUPPORTED, bGpuSupportMig); + } + + rmStatus = gpuRemoveMissingEngines(pGpu); + NV_ASSERT(rmStatus == NV_OK); + + pGpu->bFullyConstructed = NV_TRUE; + + ENGDESCRIPTOR *pEngDescriptorList = gpuGetInitEngineDescriptors(pGpu); + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + NvU32 curEngDescIdx; + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = pEngDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + rmStatus = gpuLoadFailurePathTest(pGpu, NV_REG_STR_GPU_LOAD_FAILURE_TEST_STAGE_PREINIT, curEngDescIdx, NV_FALSE); + if (rmStatus == NV_OK) + { + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_PRE_INIT, &engTransitionData); + rmStatus = engstateStatePreInit(pGpu, pEngstate); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_PRE_INIT, &engTransitionData); + } + if (rmStatus == NV_ERR_NOT_SUPPORTED) + { + switch (curEngDescriptor) + { + // + // Allow removing kernel engines in StatePreInit if their + // physical counterpart is absent. + // + case ENG_KERNEL_DISPLAY: + // On Displayless GPU's, Display Engine is not present. So, RM should not keep the display + // classes in GET_CLASSLIST. Hence removing the Display classes from the ClassDB + gpuRemoveMissingEngineClasses(pGpu, ENG_KERNEL_DISPLAY); + break; + // + // Explicitly track engines that trigger this block + // so that we can verify they function properly + // after they are no longer removed here. + // + case ENG_INFOROM: + // TODO: try to remove this special case + NV_PRINTF(LEVEL_WARNING, + "engine removal in PreInit with NV_ERR_NOT_SUPPORTED is deprecated (%s)\n", + engstateGetName(pEngstate)); + break; + case ENG_HDACODEC: + NV_PRINTF(LEVEL_WARNING, + "engine removal in PreInit with NV_ERR_NOT_SUPPORTED is deprecated (%s)\n", + engstateGetName(pEngstate)); + break; + default: + NV_PRINTF(LEVEL_ERROR, + "disallowing NV_ERR_NOT_SUPPORTED PreInit removal of untracked engine (%s)\n", + engstateGetName(pEngstate)); + DBG_BREAKPOINT(); + NV_ASSERT(0); + break; + } + + gpuDestroyMissingEngine(pGpu, pEngstate); + pEngstate = NULL; + + rmStatus = gpuDeleteEngineOnPreInit(pGpu, curEngDescriptor); + // TODO: destruct engine here after MISSING support is removed + NV_ASSERT(rmStatus == NV_OK || !"Error while trying to remove missing engine"); + } + else if (rmStatus != NV_OK) + { + break; + } + } + + // RM User Shared Data is currently unable to support VGPU due to isolation requirements + if (IS_VIRTUAL(pGpu)) + { + gpuDeleteClassFromClassDBByClassId(pGpu, RM_USER_SHARED_DATA); + } + + gpuInitOptimusSettings(pGpu); + + return rmStatus; +} + +// TODO: Merge structurally equivalent code with other gpuState* functions. +NV_STATUS +gpuStateInit_IMPL +( + OBJGPU *pGpu +) +{ + NV_STATUS rmStatus = NV_OK; + + // Initialize numaNodeId to invalid node ID as "0" can be considered valid node + pGpu->numaNodeId = NV0000_CTRL_NO_NUMA_NODE; + + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + // TODO: Move the below code into appropriate ENGSTATE objects. + // DO NOT ADD MORE SPECIAL CASES HERE! + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + NV_ASSERT_OR_RETURN(rmGpuLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + NV_ASSERT_OK_OR_GOTO(rmStatus, + gpuStateInitStartedSatisfy_HAL(pGpu, pGpu->pPrereqTracker), + gpuStateInit_exit); + + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + // TODO: Move the above code into appropriate ENGSTATE objects. + // DO NOT ADD MORE SPECIAL CASES HERE! + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + ENGDESCRIPTOR *pEngDescriptorList = gpuGetInitEngineDescriptors(pGpu); + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + NvU32 curEngDescIdx; + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = pEngDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + rmStatus = gpuLoadFailurePathTest(pGpu, NV_REG_STR_GPU_LOAD_FAILURE_TEST_STAGE_INIT, curEngDescIdx, NV_FALSE); + if (rmStatus == NV_OK) + { + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_INIT, &engTransitionData); + rmStatus = engstateStateInit(pGpu, pEngstate); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_INIT, &engTransitionData); + } + + // RMCONFIG: Bail on errors unless the feature/object/engine/class + // is simply unsupported + + if (rmStatus == NV_ERR_NOT_SUPPORTED) + rmStatus = NV_OK; + if (rmStatus != NV_OK) + goto gpuStateInit_exit; + } + + // Set a property indicating that VF BAR0 MMU TLB Invalidation register emulation is required or not. + if (hypervisorIsVgxHyper()) + { + if ( + 0) + { + NvU32 data32 = NV_REG_STR_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE_DEFAULT; + + // Registry override to change default mode, i.e, emulate VF MMU TLB Invalidation register + if ((osReadRegistryDword(pGpu, NV_REG_STR_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE, &data32) == NV_OK) && + (data32 == NV_REG_STR_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE_DISABLE)) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE, NV_FALSE); + } + } + else + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE, NV_FALSE); + } + } + + // Set a property indicating that the state initialization has been done + pGpu->setProperty(pGpu, PDB_PROP_GPU_STATE_INITIALIZED, NV_TRUE); + +gpuStateInit_exit: + return rmStatus; +} + +/*! + * @brief Top level pre-load routine + * + * Provides a mechanism to resolve cyclic dependencies between engines. + * + * StatePreLoad() is called before StateLoad() likewise StatePostUnload() is + * called after StateUnload(). + * + * Dependencies which are DAGs should continue to be resolved by reordering the + * engine descriptor lists. Reordering the descriptor lists won't solve cyclic + * dependencies as at least one constraint would always be violated. + * + * TODO: Merge structurally equivalent code with other gpuState* functions. + * + * @param[in] pGpu OBJPGU pointer + * @param[in] flags Type of transition + */ +static NV_STATUS +gpuStatePreLoad +( + OBJGPU *pGpu, + NvU32 flags +) +{ + NV_STATUS rmStatus = NV_OK; + ENGDESCRIPTOR *pEngDescriptorList = gpuGetLoadEngineDescriptors(pGpu); + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + NvU32 curEngDescIdx; + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = pEngDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + rmStatus = gpuLoadFailurePathTest(pGpu, NV_REG_STR_GPU_LOAD_FAILURE_TEST_STAGE_PRELOAD, curEngDescIdx, NV_FALSE); + if (rmStatus == NV_OK) + { + RMTRACE_ENGINE_PROFILE_EVENT("gpuStatePreLoadEngStart", curEngDescriptor, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_PRE_LOAD, &engTransitionData); + rmStatus = engstateStatePreLoad(pGpu, pEngstate, flags); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_PRE_LOAD, &engTransitionData); + + RMTRACE_ENGINE_PROFILE_EVENT("gpuStatePreLoadEngEnd", curEngDescriptor, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + } + // + // An engine load leaving the broadcast status to NV_TRUE + // will most likely mess up the pre-load of the next engines + // + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + // RMCONFIG: Bail on errors unless the feature/object/engine/class + // is simply unsupported + if (rmStatus == NV_ERR_NOT_SUPPORTED) + rmStatus = NV_OK; + if (rmStatus != NV_OK) + break; + + // + // Release and re-acquire the lock to allow interrupts + // + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_STATE_LOAD); + if (rmStatus != NV_OK) + break; + } + + return rmStatus; +} + +// TODO: Merge structurally equivalent code with other gpuState* functions. +NV_STATUS +gpuStateLoad_IMPL +( + OBJGPU *pGpu, + NvU32 flags +) +{ + NV_STATUS rmStatus = NV_OK; + NvU32 status = NV_OK; + + pGpu->registerAccess.regReadCount = pGpu->registerAccess.regWriteCount = 0; + RMTRACE_ENGINE_PROFILE_EVENT("gpuStateLoadStart", pGpu->gpuId, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + + _gpuDetectNvswitchSupport(pGpu); + + // Initialize SRIOV specific members of OBJGPU + status = gpuInitSriov_HAL(pGpu); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error initializing SRIOV: 0x%0x\n", status); + return status; + } + + if (IS_VIRTUAL_WITH_FULL_SRIOV(pGpu) && (flags & GPU_STATE_FLAGS_PRESERVING)) + { + NV_RM_RPC_RESTORE_HIBERNATION_DATA(pGpu, rmStatus); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "RPC to restore host hibernation data failed, status 0x%x\n", rmStatus); + DBG_BREAKPOINT(); + return rmStatus; + } + } + + if (!(flags & GPU_STATE_FLAGS_PRESERVING)) + { + // It is a no-op on baremetal and inside non SRIOV guest. + rmStatus = gpuCreateDefaultClientShare_HAL(pGpu); + if (rmStatus != NV_OK) + { + return rmStatus; + } + } + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + rmStatus = gpuStatePreLoad(pGpu, flags); + if (rmStatus != NV_OK) + { + // + // return early if we broke out of the preLoad sequence with + // rmStatus != NV_OK + // + return rmStatus; + } + + ENGDESCRIPTOR *pEngDescriptorList = gpuGetLoadEngineDescriptors(pGpu); + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + NvU32 curEngDescIdx; + + // Set indicator that we are running state load + pGpu->bStateLoading = NV_TRUE; + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = pEngDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + rmStatus = gpuLoadFailurePathTest(pGpu, NV_REG_STR_GPU_LOAD_FAILURE_TEST_STAGE_LOAD, curEngDescIdx, NV_FALSE); + if (rmStatus == NV_OK) + { + RMTRACE_ENGINE_PROFILE_EVENT("gpuStateLoadEngStart", curEngDescriptor, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_LOAD, &engTransitionData); + rmStatus = engstateStateLoad(pGpu, pEngstate, flags); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_LOAD, &engTransitionData); + } + + // TODO: This is temporary and may be dead with TESLA + if (rmStatus == NV_ERR_INVALID_ADDRESS) + { + NV_PRINTF(LEVEL_ERROR, "NV_ERR_INVALID_ADDRESS is no longer supported in StateLoad (%s)\n", + engstateGetName(pEngstate)); + DBG_BREAKPOINT(); + } + + // + // An engine load leaving the broadcast status to NV_TRUE + // will most likely mess up the load of the next engines + // + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + // RMCONFIG: Bail on errors unless the feature/object/engine/class + // is simply unsupported + if (rmStatus == NV_ERR_NOT_SUPPORTED) + rmStatus = NV_OK; + if (rmStatus != NV_OK) + { + goto gpuStateLoad_exit; + } + + // + // Release and re-acquire the lock to allow interrupts + // + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_STATE_LOAD); + if (rmStatus != NV_OK) + goto gpuStateLoad_exit; + + RMTRACE_ENGINE_PROFILE_EVENT("gpuStateLoadEngEnd", curEngDescriptor, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + } + + rmStatus = gpuInitVmmuInfo(pGpu); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error initializing VMMU info: 0x%0x\n", status); + goto gpuStateLoad_exit; + } + + { + // Perform post load operations + rmStatus = gpuStatePostLoad(pGpu, flags); + if (rmStatus != NV_OK) + goto gpuStateLoad_exit; + + } + + // Clear indicator that we are running state load + pGpu->bStateLoading = NV_FALSE; + + // Set a property indicating that the state load has been done + pGpu->bStateLoaded = NV_TRUE; + + RMTRACE_ENGINE_PROFILE_EVENT("gpuStateLoadEnd", pGpu->gpuId, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + +gpuStateLoad_exit: + return rmStatus; +} + +static NV_STATUS +_gpuSetVgpuMgrConfig +( + OBJGPU *pGpu +) +{ + + return NV_OK; +} + +/*! + * @brief Top level post-load routine + * + * Provides a mechanism to resolve cyclic dependencies between engines. For + * example, OBJFB depends on OBJCE on Fermi (for memory scrubbing), likewise + * OBJCE also depends on OBJFB (for instance memory). + * + * StatePostLoad() is called after StateLoad() likewise StatePreUnload() is + * called prior to StateUnload(). + * + * Dependencies which are DAGs should continue to be resolved by reordering the + * engine descriptor lists. Reordering the descriptor lists won't solve cyclic + * dependencies as at least one constraint would always be violated. + * + * TODO: Merge structurally equivalent code with other gpuState* functions. + * + * @param[in] pGpu OBJPGU pointer + * @param[in] flags Type of transition + */ +static NV_STATUS +gpuStatePostLoad +( + OBJGPU *pGpu, + NvU32 flags +) +{ + NV_STATUS rmStatus = NV_OK; + + ENGDESCRIPTOR *pEngDescriptorList = gpuGetLoadEngineDescriptors(pGpu); + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + NvU32 curEngDescIdx; + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = pEngDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + rmStatus = gpuLoadFailurePathTest(pGpu, NV_REG_STR_GPU_LOAD_FAILURE_TEST_STAGE_POSTLOAD, curEngDescIdx, NV_FALSE); + if (rmStatus == NV_OK) + { + RMTRACE_ENGINE_PROFILE_EVENT("gpuStatePostLoadEngStart", curEngDescriptor, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_POST_LOAD, &engTransitionData); + rmStatus = engstateStatePostLoad(pGpu, pEngstate, flags); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_POST_LOAD, &engTransitionData); + RMTRACE_ENGINE_PROFILE_EVENT("gpuStatePostLoadEngEnd", curEngDescriptor, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + } + // RMCONFIG: Bail on errors unless the feature/object/engine/class + // is simply unsupported + if (rmStatus == NV_ERR_NOT_SUPPORTED) + rmStatus = NV_OK; + if (rmStatus != NV_OK) + goto gpuStatePostLoad_exit; + + // + // Release and re-acquire the lock to allow interrupts + // + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_STATE_LOAD); + + if (rmStatus != NV_OK) + goto gpuStatePostLoad_exit; + } + + // Caching GID data, the GID is generated by PMU and passed to RM during PMU INIT message. + //NV_ASSERT_OK(gpuGetGidInfo(pGpu, NULL, NULL, DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _TYPE, _SHA1))); + + if (hypervisorIsVgxHyper()) + { + NV_CHECK_OK_OR_GOTO(rmStatus, + LEVEL_ERROR, + _gpuSetVgpuMgrConfig(pGpu), + gpuStatePostLoad_exit); + } + + if (!IS_VIRTUAL(pGpu) && !IS_DCE_CLIENT(pGpu)) + { + pGpu->boardInfo = portMemAllocNonPaged(sizeof(*pGpu->boardInfo)); + if (pGpu->boardInfo) + { + // To avoid potential race of xid reporting with the control, zero it out + portMemSet(pGpu->boardInfo, '\0', sizeof(*pGpu->boardInfo)); + + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + if (pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_GPU_GET_OEM_BOARD_INFO, + pGpu->boardInfo, + sizeof(*pGpu->boardInfo)) != NV_OK) + { + portMemFree(pGpu->boardInfo); + pGpu->boardInfo = NULL; + } + } + } + + if (gpuIsSystemRebootRequired_HAL(pGpu)) + { + gpuSetRecoveryRebootRequired(pGpu, NV_TRUE, NV_FALSE); + } + +// terminate the load failure test + if (rmStatus == NV_OK) + gpuLoadFailurePathTest(pGpu, NV_REG_STR_GPU_LOAD_FAILURE_TEST_STAGE_POSTLOAD, 0, NV_TRUE); + +gpuStatePostLoad_exit: + return rmStatus; +} + +/*! + * @brief Top level pre-unload routine + * + * Provides a mechanism to resolve cyclic dependencies between engines. For + * example, OBJFB depends on OBJCE on Fermi (for memory scrubbing), likewise + * OBJCE also depends on OBJFB (for instance memory). + * + * StatePostLoad() is called after StateLoad() likewise StatePreUnload() is + * called prior to StateUnload(). + * + * Dependencies which are DAGs should continue to be resolved by reordering the + * engine descriptor lists. Reordering the descriptor lists won't solve cyclic + * dependencies as at least one constraint would always be violated. + * + * TODO: Merge structurally equivalent code with other gpuState* functions. + * + * @param[in] pGpu OBJPGU pointer + * @param[in] flags Type of transition + */ +static NV_STATUS +gpuStatePreUnload +( + OBJGPU *pGpu, + NvU32 flags +) +{ + NV_STATUS rmStatus = NV_OK; + + rmapiControlCacheFreeNonPersistentCacheForGpu(pGpu->gpuInstance); + + portMemFree(pGpu->boardInfo); + pGpu->boardInfo = NULL; + + ENGDESCRIPTOR *pEngDescriptorList = gpuGetUnloadEngineDescriptors(pGpu); + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + NvU32 curEngDescIdx; + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = pEngDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_PRE_UNLOAD, &engTransitionData); + rmStatus = engstateStatePreUnload(pGpu, pEngstate, flags); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_PRE_UNLOAD, &engTransitionData); + + // + // During unload, failure of a single engine may not be fatal. + // ASSERT if there is a failure, but ignore the status and continue + // unloading other engines to prevent (worse) memory leaks. + // + if (rmStatus != NV_OK) + { + if (rmStatus != NV_ERR_NOT_SUPPORTED) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to pre unload engine with descriptor index: 0x%x and descriptor: 0x%x\n", + curEngDescIdx, curEngDescriptor); + if (!IS_FMODEL(pGpu)) + { + NV_ASSERT(0); + } + } + rmStatus = NV_OK; + } + + // Ensure that intr on other GPUs are serviced + gpuServiceInterruptsAllGpus(pGpu); + } + + return rmStatus; +} + +NV_STATUS +gpuEnterShutdown_IMPL +( + OBJGPU *pGpu +) +{ + NV_STATUS rmStatus = gpuStateUnload(pGpu, GPU_STATE_DEFAULT); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to unload the device with error 0x%x\n", rmStatus); + } + + return rmStatus; +} + +// TODO: Merge structurally equivalent code with other gpuState* functions. +NV_STATUS +gpuStateUnload_IMPL +( + OBJGPU *pGpu, + NvU32 flags +) +{ + NV_STATUS rmStatus = NV_OK; + NV_STATUS fatalErrorStatus = NV_OK; + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + // Set indicator that state is currently unloading. + pGpu->bStateUnloading = NV_TRUE; + + + { + rmStatus = gpuStatePreUnload(pGpu, flags); + } + + if (rmStatus != NV_OK) + return rmStatus; + + ENGDESCRIPTOR *pEngDescriptorList = gpuGetUnloadEngineDescriptors(pGpu); + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + NvU32 curEngDescIdx; + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = pEngDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_UNLOAD, &engTransitionData); + rmStatus = engstateStateUnload(pGpu, pEngstate, flags); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_UNLOAD, &engTransitionData); + + // + // An engine unload leaving the broadcast status to NV_TRUE + // will most likely mess up the unload of the next engines + // + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + // + // During unload, failure of a single engine may not be fatal. + // ASSERT if there is a failure, but ignore the status and continue + // unloading other engines to prevent (worse) memory leaks. + // + if (rmStatus != NV_OK) + { + if (rmStatus != NV_ERR_NOT_SUPPORTED) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to unload engine with descriptor index: 0x%x and descriptor: 0x%x\n", + curEngDescIdx, curEngDescriptor); + if (!IS_FMODEL(pGpu)) + { + NV_ASSERT(0); + + if (flags & GPU_STATE_FLAGS_PRESERVING) + { + // + // FBSR can fail due to low sysmem. + // So return error. + // See bugs 2051056, 2049141 + // + if (objDynamicCastById(pEngstate, classId(MemorySystem))) + { + fatalErrorStatus = rmStatus; + } + } + } + } + rmStatus = NV_OK; + } + // Ensure that intr on other GPUs are serviced + gpuServiceInterruptsAllGpus(pGpu); + } + + /* Hibernation SAVE steps for vGPU GSP. + * 1. Initial GSP buffers in FBMEM during RPC init. + * 2. Save hibernate data by calling NV_RM_RPC_SAVE_HIBERNATION_DATA RPC. + * 3. Call vgpuGspTeardownBuffers() to teardown GSP buffers. + * 4. Destroy BAR2. + */ + if (IS_VIRTUAL_WITH_FULL_SRIOV(pGpu) && (flags & GPU_STATE_FLAGS_PRESERVING)) + { + // Save hibernate data by calling NV_RM_RPC_SAVE_HIBERNATION_DATA RPC. + NV_RM_RPC_SAVE_HIBERNATION_DATA(pGpu, rmStatus); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "RPC to save host hibernation data failed, status 0x%x\n", rmStatus); + DBG_BREAKPOINT(); + return rmStatus; + } + } + + // Call the gpuStatePostUnload routine + rmStatus = gpuStatePostUnload(pGpu, flags); + NV_ASSERT_OK(rmStatus); + + if (!(flags & GPU_STATE_FLAGS_PRESERVING)) + gpuDestroyDefaultClientShare_HAL(pGpu); + + // De-init SRIOV + gpuDeinitSriov_HAL(pGpu); + + // Set indicator that state unload finished. + pGpu->bStateUnloading = NV_FALSE; + + // Set a property indicating that the state unload has been done + if (rmStatus == NV_OK) + { + pGpu->bStateLoaded = NV_FALSE; + } + + + if (fatalErrorStatus != NV_OK) + { + rmStatus = fatalErrorStatus; + } + + return rmStatus; +} + +/*! + * @brief Top level post-unload routine + * + * Provides a mechanism to resolve cyclic dependencies between engines. + * + * + * Dependencies which are DAGs should continue to be resolved by reordering the + * engine descriptor lists. Reordering the descriptor lists won't solve cyclic + * dependencies as at least one constraint would always be violated. + * + * TODO: Merge structurally equivalent code with other gpuState* functions. + * + * @param[in] pGpu OBJPGU pointer + * @param[in] flags Type of transition + */ +static NV_STATUS +gpuStatePostUnload +( + OBJGPU *pGpu, + NvU32 flags +) +{ + NV_STATUS rmStatus = NV_OK; + + ENGDESCRIPTOR *pEngDescriptorList = gpuGetUnloadEngineDescriptors(pGpu); + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + NvU32 curEngDescIdx; + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = pEngDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_POST_UNLOAD, &engTransitionData); + rmStatus = engstateStatePostUnload(pGpu, pEngstate, flags); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_POST_UNLOAD, &engTransitionData); + + // + // An engine post-unload leaving the broadcast status to NV_TRUE + // will most likely mess up the post-unload of the next engines + // + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + // + // During unload, failure of a single engine may not be fatal. + // ASSERT if there is a failure, but ignore the status and continue + // unloading other engines to prevent (worse) memory leaks. + // + if (rmStatus != NV_OK) + { + if (rmStatus != NV_ERR_NOT_SUPPORTED) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to post unload engine with descriptor index: 0x%x and descriptor: 0x%x\n", + curEngDescIdx, curEngDescriptor); + if (!IS_FMODEL(pGpu)) + { + NV_ASSERT(0); + } + } + rmStatus = NV_OK; + } + + // Ensure that intr on other GPUs are serviced + gpuServiceInterruptsAllGpus(pGpu); + } + + return rmStatus; +} + +NV_STATUS +gpuStateDestroy_IMPL +( + OBJGPU *pGpu +) +{ + NV_STATUS rmStatus = NV_OK; + + rmapiControlCacheFreeAllCacheForGpu(pGpu->gpuInstance); + + ENGDESCRIPTOR *pEngDescriptorList = gpuGetDestroyEngineDescriptors(pGpu); + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + NvU32 curEngDescIdx; + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = pEngDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_DESTROY, &engTransitionData); + engstateStateDestroy(pGpu, pEngstate); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_DESTROY, &engTransitionData); + } + + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + // TODO: Move the below code into appropriate ENGSTATE objects. + // DO NOT ADD MORE SPECIAL CASES HERE! + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + gpuStateInitStartedRetract_HAL(pGpu, pGpu->pPrereqTracker); + + // Clear the property indicating that the state initialization has been done + if (rmStatus == NV_OK) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_STATE_INITIALIZED, NV_FALSE); + } + + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + // TODO: Move the above code into appropriate ENGSTATE objects. + // DO NOT ADD MORE SPECIAL CASES HERE! + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + _gpuFreeInternalObjects(pGpu); + gpuDestroyGenericKernelFalconList(pGpu); + + objDelete(pGpu->pPrereqTracker); + pGpu->pPrereqTracker = NULL; + + portMemFree(pGpu->pChipInfo); + pGpu->pChipInfo = NULL; + + portMemFree(pGpu->pUserRegisterAccessMap); + pGpu->pUserRegisterAccessMap = NULL; + + portMemFree(pGpu->pUnrestrictedRegisterAccessMap); + pGpu->pUnrestrictedRegisterAccessMap = NULL; + + pGpu->userRegisterAccessMapSize = 0; + + pGpu->bFullyConstructed = NV_FALSE; + + gpuDeinitOptimusSettings(pGpu); + + return rmStatus; +} + +// +// Logic: If arch = requested AND impl = requested --> NV_TRUE +// +NvBool +gpuIsImplementation_IMPL +( + OBJGPU *pGpu, + HAL_IMPLEMENTATION halImpl +) +{ + NvU32 gpuArch, gpuImpl; + + gpuXlateHalImplToArchImpl(halImpl, &gpuArch, &gpuImpl); + + return ((gpuGetChipArch(pGpu) == gpuArch) && + (gpuGetChipImpl(pGpu) == gpuImpl)); +} + +/*! + * @brief Initialize SBIOS settings for Optimus GOLD to driver loaded state. + * + * @param[in] pGpu GPU object pointer + * + * @return NV_OK + */ +NV_STATUS +gpuInitOptimusSettings_IMPL(OBJGPU *pGpu) +{ + + return NV_OK; +} + +/*! + * @brief Restore SBIOS settings for Optimus GOLD to driver unloaded state. + * + * @param[in] pGpu GPU object pointer + * + * @return NV_OK if successful + * @return NV_ERR_INVALID_STATE if SBIOS failed to acknowledge the restore request + * @return Bubbles up error codes on ACPI call failure + */ +NV_STATUS +gpuDeinitOptimusSettings_IMPL(OBJGPU *pGpu) +{ + + return NV_OK; +} + +// Check the software state to decide if we are in full power mode or not. +NvBool +gpuIsGpuFullPower_IMPL +( + OBJGPU *pGpu +) +{ + NvBool retVal = NV_TRUE; + + // + // SW may have indicated that the GPU ins in standby, hibernate, or powered off, + // indicating a logical power state. + // + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_STANDBY) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_HIBERNATE)) + { + retVal = NV_FALSE; + } + + return retVal; +} + +// Check the software state to decide if we are in full power mode or not. +NvBool +gpuIsGpuFullPowerForPmResume_IMPL +( + OBJGPU *pGpu +) +{ + NvBool retVal = NV_TRUE; + // + // SW may have indicated that the GPU ins in standby, resume, hibernate, or powered off, + // indicating a logical power state. + // + if ((!pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH)) && + (pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_STANDBY) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_HIBERNATE))) + { + retVal = NV_FALSE; + } + return retVal; +} + +static NV_STATUS +gpuDetermineVirtualMode +( + OBJGPU *pGpu +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + OBJGPU *pGpuTemp; + OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys); + NvU32 gpuAttachMask, gpuInstance; + NvBool bIsVirtual = NV_FALSE; + NvU32 config = 0; + NvBool bNoHostBridgeDetected = NV_TRUE; + + if (pGpu->bIsSOC || pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + return NV_OK; + } + + // Using Hypervisor native interface to detect + if (pHypervisor && (!pHypervisor->bDetected)) + hypervisorDetection(pHypervisor, pOS); + + gpumgrGetGpuAttachInfo(NULL, &gpuAttachMask); + gpuInstance = 0; + + if (FLD_TEST_DRF(_PMC, _BOOT_1, _VGPU, _VF, config)) + { + NV_ASSERT(pGpu->bIsVirtualWithSriov); + + bIsVirtual = NV_TRUE; + pGpu->bPipelinedPteMemEnabled = NV_TRUE; + } + else + { + bNoHostBridgeDetected = NV_FALSE; + } + + _setPlatformNoHostbridgeDetect(bNoHostBridgeDetected); + + if (!pGpu->bSriovEnabled && !IS_VIRTUAL_WITH_SRIOV(pGpu)) + { + pGpu->bWarBug200577889SriovHeavyEnabled = NV_FALSE; + } + + // Checking if the VM is already vGPU enabled. + // NMOS and vGPU cannot be simultaneously enabled on a VM. + if (pGpu->bIsPassthru) + { + while ((pGpuTemp = gpumgrGetNextGpu(gpuAttachMask, &gpuInstance)) != NULL) + { + if (IS_VIRTUAL(pGpuTemp)) + { + NV_PRINTF(LEVEL_ERROR, + "vGPU and Passthrough not supported simultaneously on the same VM.\n"); + pGpu->bIsPassthru = NV_FALSE; + return NV_ERR_NOT_SUPPORTED; + } + } + } + + // Early detection at GPU creation time should be consistent to the real detection + NV_ASSERT_OR_RETURN(pGpu->isVirtual == bIsVirtual, NV_ERR_INVALID_STATE); + + return NV_OK; +} + +NvBool +gpuIsImplementationOrBetter_IMPL +( + OBJGPU *pGpu, + HAL_IMPLEMENTATION halImpl +) +{ + NvU32 gpuArch, gpuImpl; + NvU32 chipArch; + + gpuXlateHalImplToArchImpl(halImpl, &gpuArch, &gpuImpl); + + // "is implementation or better" is only defined between 2 gpus within + // the same "gpu series" as defined in config/Chips.pm and nv_arch.h + chipArch = gpuGetChipArch(pGpu); + + if (DRF_VAL(GPU, _ARCHITECTURE, _SERIES, chipArch) != DRF_VAL(GPU, _ARCHITECTURE, _SERIES, gpuArch)) + { + return NV_FALSE; + } + + // In case there is a temporal ordering we need to account for + return gpuSatisfiesTemporalOrder(pGpu, halImpl); +} + +static void +gpuXlateHalImplToArchImpl +( + HAL_IMPLEMENTATION halImpl, + NvU32 *gpuArch, + NvU32 *gpuImpl +) +{ + switch (halImpl) + { + case HAL_IMPL_GM107: + { + *gpuArch = GPU_ARCHITECTURE_MAXWELL; + *gpuImpl = GPU_IMPLEMENTATION_GM107; + break; + } + + case HAL_IMPL_GM108: + { + *gpuArch = GPU_ARCHITECTURE_MAXWELL; + *gpuImpl = GPU_IMPLEMENTATION_GM108; + break; + } + + case HAL_IMPL_GM200: + { + *gpuArch = GPU_ARCHITECTURE_MAXWELL2; + *gpuImpl = GPU_IMPLEMENTATION_GM200; + break; + } + + case HAL_IMPL_GM204: + { + *gpuArch = GPU_ARCHITECTURE_MAXWELL2; + *gpuImpl = GPU_IMPLEMENTATION_GM204; + break; + } + + case HAL_IMPL_GM206: + { + *gpuArch = GPU_ARCHITECTURE_MAXWELL2; + *gpuImpl = GPU_IMPLEMENTATION_GM206; + break; + } + + case HAL_IMPL_GP100: + { + *gpuArch = GPU_ARCHITECTURE_PASCAL; + *gpuImpl = GPU_IMPLEMENTATION_GP100; + break; + } + + case HAL_IMPL_GP102: + { + *gpuArch = GPU_ARCHITECTURE_PASCAL; + *gpuImpl = GPU_IMPLEMENTATION_GP102; + break; + } + + case HAL_IMPL_GP104: + { + *gpuArch = GPU_ARCHITECTURE_PASCAL; + *gpuImpl = GPU_IMPLEMENTATION_GP104; + break; + } + + case HAL_IMPL_GP106: + { + *gpuArch = GPU_ARCHITECTURE_PASCAL; + *gpuImpl = GPU_IMPLEMENTATION_GP106; + break; + } + + case HAL_IMPL_GP107: + { + *gpuArch = GPU_ARCHITECTURE_PASCAL; + *gpuImpl = GPU_IMPLEMENTATION_GP107; + break; + } + + case HAL_IMPL_GP108: + { + *gpuArch = GPU_ARCHITECTURE_PASCAL; + *gpuImpl = GPU_IMPLEMENTATION_GP108; + break; + } + + case HAL_IMPL_GV100: + { + *gpuArch = GPU_ARCHITECTURE_VOLTA; + *gpuImpl = GPU_IMPLEMENTATION_GV100; + break; + } + + case HAL_IMPL_GV11B: + { + *gpuArch = GPU_ARCHITECTURE_VOLTA2; + *gpuImpl = GPU_IMPLEMENTATION_GV11B; + break; + } + + case HAL_IMPL_TU102: + { + *gpuArch = GPU_ARCHITECTURE_TURING; + *gpuImpl = GPU_IMPLEMENTATION_TU102; + break; + } + + case HAL_IMPL_TU104: + { + *gpuArch = GPU_ARCHITECTURE_TURING; + *gpuImpl = GPU_IMPLEMENTATION_TU104; + break; + } + + case HAL_IMPL_TU106: + { + *gpuArch = GPU_ARCHITECTURE_TURING; + *gpuImpl = GPU_IMPLEMENTATION_TU106; + break; + } + + case HAL_IMPL_TU116: + { + *gpuArch = GPU_ARCHITECTURE_TURING; + *gpuImpl = GPU_IMPLEMENTATION_TU116; + break; + } + + case HAL_IMPL_TU117: + { + *gpuArch = GPU_ARCHITECTURE_TURING; + *gpuImpl = GPU_IMPLEMENTATION_TU117; + break; + } + + case HAL_IMPL_AMODEL: + { + *gpuArch = GPU_ARCHITECTURE_SIMS; + *gpuImpl = GPU_IMPLEMENTATION_AMODEL; + break; + } + + case HAL_IMPL_T124: + { + *gpuArch = GPU_ARCHITECTURE_T12X; + *gpuImpl = GPU_IMPLEMENTATION_T124; + break; + } + + case HAL_IMPL_T132: + { + *gpuArch = GPU_ARCHITECTURE_T13X; + *gpuImpl = GPU_IMPLEMENTATION_T132; + break; + } + + case HAL_IMPL_T210: + { + *gpuArch = GPU_ARCHITECTURE_T21X; + *gpuImpl = GPU_IMPLEMENTATION_T210; + break; + } + + case HAL_IMPL_T186: + { + *gpuArch = GPU_ARCHITECTURE_T18X; + *gpuImpl = GPU_IMPLEMENTATION_T186; + break; + } + + case HAL_IMPL_T194: + { + *gpuArch = GPU_ARCHITECTURE_T19X; + *gpuImpl = GPU_IMPLEMENTATION_T194; + break; + } + + case HAL_IMPL_T234D: + { + *gpuArch = GPU_ARCHITECTURE_T23X; + *gpuImpl = GPU_IMPLEMENTATION_T234D; + break; + } + + case HAL_IMPL_GA100: + { + *gpuArch = GPU_ARCHITECTURE_AMPERE; + *gpuImpl = GPU_IMPLEMENTATION_GA100; + break; + } + + case HAL_IMPL_GA102: + { + *gpuArch = GPU_ARCHITECTURE_AMPERE; + *gpuImpl = GPU_IMPLEMENTATION_GA102; + break; + } + + case HAL_IMPL_GA102F: + { + *gpuArch = GPU_ARCHITECTURE_AMPERE; + *gpuImpl = GPU_IMPLEMENTATION_GA102F; + break; + } + + case HAL_IMPL_GA104: + { + *gpuArch = GPU_ARCHITECTURE_AMPERE; + *gpuImpl = GPU_IMPLEMENTATION_GA104; + break; + } + + + case HAL_IMPL_T264D: + { + *gpuArch = GPU_ARCHITECTURE_T26X; + *gpuImpl = GPU_IMPLEMENTATION_T264D; + break; + } + + case HAL_IMPL_T256D: + { + *gpuArch = GPU_ARCHITECTURE_T25X; + *gpuImpl = GPU_IMPLEMENTATION_T256D; + break; + } + + default: + { + *gpuArch = 0; + *gpuImpl = 0; + NV_PRINTF(LEVEL_ERROR, "Invalid halimpl\n"); + DBG_BREAKPOINT(); + break; + } + } +} + +// +// default Logic: If halImpl is equal or greater than requested --> NV_TRUE +// +// Arch and impl IDs are not guaranteed to be ordered. +// "halImpl" is used here to match the ordering in chip-config/NVOC +// +// NOTE: only defined for gpus within same gpu series +// +static NvBool +gpuSatisfiesTemporalOrder +( + OBJGPU *pGpu, + HAL_IMPLEMENTATION halImpl +) +{ + NvBool result = NV_FALSE; + + switch (halImpl) + { + default: + { + HAL_IMPLEMENTATION chipImpl = pGpu->halImpl; + NV_ASSERT(chipImpl < HAL_IMPL_MAXIMUM); + + result = (chipImpl >= halImpl); + + break; + } + } + + return result; +} + +// =============== Engine Database ============================== + +typedef struct { + RM_ENGINE_TYPE clientEngineId; + NVOC_CLASS_ID class; + NvU32 instance; + NvBool bHostEngine; +} EXTERN_TO_INTERNAL_ENGINE_ID; + +static const EXTERN_TO_INTERNAL_ENGINE_ID rmClientEngineTable[] = +{ + { RM_ENGINE_TYPE_GR0, classId(Graphics) , 0, NV_TRUE }, + { RM_ENGINE_TYPE_GR1, classId(Graphics) , 1, NV_TRUE }, + { RM_ENGINE_TYPE_GR2, classId(Graphics) , 2, NV_TRUE }, + { RM_ENGINE_TYPE_GR3, classId(Graphics) , 3, NV_TRUE }, + { RM_ENGINE_TYPE_GR4, classId(Graphics) , 4, NV_TRUE }, + { RM_ENGINE_TYPE_GR5, classId(Graphics) , 5, NV_TRUE }, + { RM_ENGINE_TYPE_GR6, classId(Graphics) , 6, NV_TRUE }, + { RM_ENGINE_TYPE_GR7, classId(Graphics) , 7, NV_TRUE }, + { RM_ENGINE_TYPE_COPY0, classId(OBJCE) , 0, NV_TRUE }, + { RM_ENGINE_TYPE_COPY1, classId(OBJCE) , 1, NV_TRUE }, + { RM_ENGINE_TYPE_COPY2, classId(OBJCE) , 2, NV_TRUE }, + { RM_ENGINE_TYPE_COPY3, classId(OBJCE) , 3, NV_TRUE }, + { RM_ENGINE_TYPE_COPY4, classId(OBJCE) , 4, NV_TRUE }, + { RM_ENGINE_TYPE_COPY5, classId(OBJCE) , 5, NV_TRUE }, + { RM_ENGINE_TYPE_COPY6, classId(OBJCE) , 6, NV_TRUE }, + { RM_ENGINE_TYPE_COPY7, classId(OBJCE) , 7, NV_TRUE }, + { RM_ENGINE_TYPE_COPY8, classId(OBJCE) , 8, NV_TRUE }, + { RM_ENGINE_TYPE_COPY9, classId(OBJCE) , 9, NV_TRUE }, + { RM_ENGINE_TYPE_COPY10, classId(OBJCE) , 10, NV_TRUE }, + { RM_ENGINE_TYPE_COPY11, classId(OBJCE) , 11, NV_TRUE }, + { RM_ENGINE_TYPE_COPY12, classId(OBJCE) , 12, NV_TRUE }, + { RM_ENGINE_TYPE_COPY13, classId(OBJCE) , 13, NV_TRUE }, + { RM_ENGINE_TYPE_COPY14, classId(OBJCE) , 14, NV_TRUE }, + { RM_ENGINE_TYPE_COPY15, classId(OBJCE) , 15, NV_TRUE }, + { RM_ENGINE_TYPE_COPY16, classId(OBJCE) , 16, NV_TRUE }, + { RM_ENGINE_TYPE_COPY17, classId(OBJCE) , 17, NV_TRUE }, + { RM_ENGINE_TYPE_COPY18, classId(OBJCE) , 18, NV_TRUE }, + { RM_ENGINE_TYPE_COPY19, classId(OBJCE) , 19, NV_TRUE }, + { RM_ENGINE_TYPE_NVDEC0, classId(OBJBSP) , 0, NV_TRUE }, + { RM_ENGINE_TYPE_NVDEC1, classId(OBJBSP) , 1, NV_TRUE }, + { RM_ENGINE_TYPE_NVDEC2, classId(OBJBSP) , 2, NV_TRUE }, + { RM_ENGINE_TYPE_NVDEC3, classId(OBJBSP) , 3, NV_TRUE }, + { RM_ENGINE_TYPE_NVDEC4, classId(OBJBSP) , 4, NV_TRUE }, + { RM_ENGINE_TYPE_NVDEC5, classId(OBJBSP) , 5, NV_TRUE }, + { RM_ENGINE_TYPE_NVDEC6, classId(OBJBSP) , 6, NV_TRUE }, + { RM_ENGINE_TYPE_NVDEC7, classId(OBJBSP) , 7, NV_TRUE }, + { RM_ENGINE_TYPE_CIPHER, classId(OBJCIPHER) , 0, NV_TRUE }, + { RM_ENGINE_TYPE_NVENC0, classId(OBJMSENC) , 0, NV_TRUE }, + { RM_ENGINE_TYPE_NVENC1, classId(OBJMSENC) , 1, NV_TRUE }, + { RM_ENGINE_TYPE_NVENC2, classId(OBJMSENC) , 2, NV_TRUE }, + { RM_ENGINE_TYPE_NVENC3, classId(OBJMSENC) , 3, NV_TRUE }, + { RM_ENGINE_TYPE_SW, classId(OBJSWENG) , 0, NV_TRUE }, + { RM_ENGINE_TYPE_SEC2, classId(OBJSEC2) , 0, NV_TRUE }, + { RM_ENGINE_TYPE_NVJPEG0, classId(OBJNVJPG) , 0, NV_TRUE }, + { RM_ENGINE_TYPE_NVJPEG1, classId(OBJNVJPG) , 1, NV_TRUE }, + { RM_ENGINE_TYPE_NVJPEG2, classId(OBJNVJPG) , 2, NV_TRUE }, + { RM_ENGINE_TYPE_NVJPEG3, classId(OBJNVJPG) , 3, NV_TRUE }, + { RM_ENGINE_TYPE_NVJPEG4, classId(OBJNVJPG) , 4, NV_TRUE }, + { RM_ENGINE_TYPE_NVJPEG5, classId(OBJNVJPG) , 5, NV_TRUE }, + { RM_ENGINE_TYPE_NVJPEG6, classId(OBJNVJPG) , 6, NV_TRUE }, + { RM_ENGINE_TYPE_NVJPEG7, classId(OBJNVJPG) , 7, NV_TRUE }, + { RM_ENGINE_TYPE_OFA0, classId(OBJOFA) , 0, NV_TRUE }, + { RM_ENGINE_TYPE_OFA1, classId(OBJOFA) , 1, NV_TRUE }, + { RM_ENGINE_TYPE_DPU, classId(OBJDPU) , 0, NV_FALSE }, + { RM_ENGINE_TYPE_PMU, classId(Pmu) , 0, NV_FALSE }, + { RM_ENGINE_TYPE_FBFLCN, classId(OBJFBFLCN) , 0, NV_FALSE }, + { RM_ENGINE_TYPE_HOST, classId(KernelFifo) , 0, NV_FALSE }, +}; + +NV_STATUS gpuConstructEngineTable_IMPL +( + OBJGPU *pGpu +) +{ + NvU32 engineIdx = 0; + + // Alloc engine DB + pGpu->engineDB.bValid = NV_FALSE; + pGpu->engineDB.pType = portMemAllocNonPaged( + NV_ARRAY_ELEMENTS(rmClientEngineTable) * sizeof(*pGpu->engineDB.pType)); + if (pGpu->engineDB.pType == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "gpuConstructEngineTable: Could not allocate engine DB\n"); + DBG_BREAKPOINT(); + return NV_ERR_NO_MEMORY; + } + pGpu->engineDB.size = 0; // That's right, its the size not the capacity + // of the engineDB + + // Initialize per-GPU per-engine list of non-stall interrupt event nodes. + for (engineIdx = 0; engineIdx < (NvU32)RM_ENGINE_TYPE_LAST; engineIdx++) + { + NV_STATUS status = gpuEngineEventNotificationListCreate(pGpu, + &pGpu->engineNonstallIntrEventNotifications[engineIdx]); + if (status != NV_OK) + { + gpuDestroyEngineTable(pGpu); + return status; + } + } + + return NV_OK; +} + +NV_STATUS gpuUpdateEngineTable_IMPL +( + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + NvU32 counter = 0; + NvU32 numClasses = 0; + + if (pGpu->engineDB.pType == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "gpuUpdateEngineTable: EngineDB has not been created yet\n"); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_STATE; + } + + if (pGpu->engineDB.bValid) + { + return NV_OK; + } + + // Read through the classDB and populate engineDB + pGpu->engineDB.size = 0; + for (counter = 0; counter < NV_ARRAY_ELEMENTS(rmClientEngineTable); counter++) + { + // There are tests such as ClassA06fTest that attempt to bind all engines reported + if (!rmClientEngineTable[counter].bHostEngine) + { + continue; + } + + status = gpuGetClassList(pGpu, &numClasses, NULL, + MKENGDESC(rmClientEngineTable[counter].class, rmClientEngineTable[counter].instance)); + if ((status != NV_OK) || ( numClasses == 0)) + { + continue; + } + pGpu->engineDB.pType[pGpu->engineDB.size++] = + rmClientEngineTable[counter].clientEngineId; + } + + pGpu->engineDB.bValid = NV_TRUE; + + return NV_OK; +} +void gpuDestroyEngineTable_IMPL(OBJGPU *pGpu) +{ + for (NvU32 engineIdx = 0; engineIdx < (NvU32)RM_ENGINE_TYPE_LAST; engineIdx++) + gpuEngineEventNotificationListDestroy(pGpu, + pGpu->engineNonstallIntrEventNotifications[engineIdx]); + + if (pGpu->engineDB.pType) + { + pGpu->engineDB.size = 0; + portMemFree(pGpu->engineDB.pType); + pGpu->engineDB.pType = NULL; + pGpu->engineDB.bValid = NV_FALSE; + } +} + +NvBool gpuCheckEngineTable_IMPL +( + OBJGPU *pGpu, + RM_ENGINE_TYPE engType +) +{ + NvU32 engineIdx; + + if (!IS_MODS_AMODEL(pGpu)) + { + NV_ASSERT_OR_RETURN(pGpu->engineDB.bValid, NV_FALSE); + } + + NV_ASSERT_OR_RETURN(engType < RM_ENGINE_TYPE_LAST, NV_FALSE); + + for (engineIdx = 0; engineIdx < pGpu->engineDB.size; engineIdx++) + { + if (engType == pGpu->engineDB.pType[engineIdx]) + { + return NV_TRUE; + } + } + + return NV_FALSE; +} + +NV_STATUS +gpuXlateClientEngineIdToEngDesc_IMPL +( + OBJGPU *pGpu, + RM_ENGINE_TYPE clientEngineID, + ENGDESCRIPTOR *pEngDesc + +) +{ + NvU32 counter; + + for (counter = 0; counter < NV_ARRAY_ELEMENTS(rmClientEngineTable); counter++) + { + if (rmClientEngineTable[counter].clientEngineId == clientEngineID) + { + *pEngDesc = MKENGDESC(rmClientEngineTable[counter].class, rmClientEngineTable[counter].instance); + return NV_OK; + } + } + + return NV_ERR_INVALID_ARGUMENT; +} + +NV_STATUS +gpuXlateEngDescToClientEngineId_IMPL +( + OBJGPU *pGpu, + ENGDESCRIPTOR engDesc, + RM_ENGINE_TYPE *pClientEngineID +) +{ + NvU32 counter; + + for (counter = 0; counter < NV_ARRAY_ELEMENTS(rmClientEngineTable); counter++) + { + if (MKENGDESC(rmClientEngineTable[counter].class, rmClientEngineTable[counter].instance) == engDesc) + { + *pClientEngineID = rmClientEngineTable[counter].clientEngineId; + return NV_OK; + } + } + + return NV_ERR_INVALID_ARGUMENT; +} + +NV_STATUS +gpuGetFlcnFromClientEngineId_IMPL +( + OBJGPU *pGpu, + RM_ENGINE_TYPE clientEngineId, + Falcon **ppFlcn +) +{ + *ppFlcn = NULL; + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +gpuGetGidInfo_IMPL +( + OBJGPU *pGpu, + NvU8 **ppGidString, + NvU32 *pGidStrlen, + NvU32 gidFlags +) +{ + NV_STATUS rmStatus = NV_OK; + NvU8 gidData[RM_SHA1_GID_SIZE]; + NvU32 gidSize = RM_SHA1_GID_SIZE; + + if (!FLD_TEST_DRF(2080_GPU_CMD,_GPU_GET_GID_FLAGS,_TYPE,_SHA1,gidFlags)) + { + return NV_ERR_INVALID_FLAGS; + } + + if (pGpu->gpuUuid.isInitialized) + { + portMemCopy(gidData, gidSize, &pGpu->gpuUuid.uuid[0], gidSize); + goto fillGidData; + } + + rmStatus = gpuGenGidData_HAL(pGpu, gidData, gidSize, gidFlags); + + if (rmStatus != NV_OK) + { + return rmStatus; + } + + // if not cached, cache it here + portMemCopy(&pGpu->gpuUuid.uuid[0], gidSize, gidData, gidSize); + pGpu->gpuUuid.isInitialized = NV_TRUE; + +fillGidData: + if (ppGidString != NULL) + { + if (FLD_TEST_DRF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _FORMAT, _BINARY, + gidFlags)) + { + // + // Instead of transforming the Gid into a string, just use it in its + // original binary form. The allocation rules are the same as those + // followed by the transformGidToUserFriendlyString routine: we + // allocate ppGidString here, and the caller frees ppGidString. + // + *ppGidString = portMemAllocNonPaged(gidSize); + if (*ppGidString == NULL) + { + return NV_ERR_NO_MEMORY; + } + + portMemCopy(*ppGidString, gidSize, gidData, gidSize); + *pGidStrlen = gidSize; + } + else + { + NV_ASSERT_OR_RETURN(pGidStrlen != NULL, NV_ERR_INVALID_ARGUMENT); + rmStatus = transformGidToUserFriendlyString(gidData, gidSize, + ppGidString, pGidStrlen, gidFlags, RM_UUID_PREFIX_GPU); + } + } + + return rmStatus; +} + +static void +_gpuSetDisconnectedPropertiesWorker +( + NvU32 gpuInstance, + void *pArg +) +{ + OBJGPU *pGpu = gpumgrGetGpu(gpuInstance); + + gpuRefreshRecoveryAction_HAL(pGpu, NV_FALSE); +} + +void +gpuSetDisconnectedProperties_IMPL +( + OBJGPU *pGpu +) +{ + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_LOST, NV_TRUE); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED, NV_FALSE); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_PM_CODEPATH, NV_FALSE); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_STANDBY, NV_FALSE); + pGpu->bInD3Cold = NV_FALSE; + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_HIBERNATE, NV_FALSE); + SET_GPU_GC6_STATE(pGpu, GPU_GC6_STATE_POWERED_ON); + + // + // Queue a work item to refresh recovery action, as + // gpuSetDisconnectedProperties can be called at raised (device) IRQL. + // + NV_ASSERT_OK( + osQueueWorkItem(pGpu, + _gpuSetDisconnectedPropertiesWorker, + NULL, + OS_QUEUE_WORKITEM_FLAGS_FALLBACK_TO_DPC | + OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE)); +} + +NV_STATUS +gpuAddConstructedFalcon_IMPL +( + OBJGPU *pGpu, + Falcon *pFlcn +) +{ + NV_ASSERT_OR_RETURN(pFlcn, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN( + pGpu->numConstructedFalcons < NV_ARRAY_ELEMENTS(pGpu->constructedFalcons), + NV_ERR_BUFFER_TOO_SMALL); + + pGpu->constructedFalcons[pGpu->numConstructedFalcons++] = pFlcn; + return NV_OK; +} + +NV_STATUS +gpuRemoveConstructedFalcon_IMPL +( + OBJGPU *pGpu, + Falcon *pFlcn +) +{ + NvU32 i, j; + for (i = 0; i < pGpu->numConstructedFalcons; i++) + { + if (pGpu->constructedFalcons[i] == pFlcn) + { + for (j = i+1; j < pGpu->numConstructedFalcons; j++) + { + pGpu->constructedFalcons[j-1] = pGpu->constructedFalcons[j]; + } + pGpu->numConstructedFalcons--; + pGpu->constructedFalcons[pGpu->numConstructedFalcons] = NULL; + return NV_OK; + } + } + NV_ASSERT_FAILED("Attempted to remove a non-existent initialized Falcon!"); + return NV_ERR_OBJECT_NOT_FOUND; +} + +NV_STATUS +gpuGetConstructedFalcon_IMPL +( + OBJGPU *pGpu, + NvU32 index, + Falcon **ppFlcn +) +{ + if (index >= pGpu->numConstructedFalcons) + return NV_ERR_OUT_OF_RANGE; + + *ppFlcn = pGpu->constructedFalcons[index]; + NV_ASSERT(*ppFlcn != NULL); + return NV_OK; +} + +NV_STATUS gpuBuildGenericKernelFalconList_IMPL(OBJGPU *pGpu) +{ + return NV_OK; +} + +void gpuDestroyGenericKernelFalconList_IMPL(OBJGPU *pGpu) +{ +} + +GenericKernelFalcon * +gpuGetGenericKernelFalconForEngine_IMPL +( + OBJGPU *pGpu, + ENGDESCRIPTOR engDesc +) +{ + return NULL; +} + +void gpuRegisterGenericKernelFalconIntrService_IMPL(OBJGPU *pGpu, void *pRecords) +{ +} + +/** + * @brief Initializes iterator for ENGDESCRIPTOR load order + * + * @return GPU_CHILD_ITER + */ +static ENGLIST_ITER +gpuGetEngineOrderListIter(OBJGPU *pGpu, NvU32 flags) +{ + ENGLIST_ITER it = { 0 }; + it.flags = flags; + return it; +} + + +static const GPUCHILDPRESENT * +gpuFindChildPresent(const GPUCHILDPRESENT *pChildPresentList, NvU32 numChildPresent, NvU32 classId) +{ + NvU32 i; + + for (i = 0; i < numChildPresent; i++) + { + if (pChildPresentList[i].classId == classId) + return &pChildPresentList[i]; + } + + return NULL; +} + +/*! + * @brief Get GFID State + * + * @param[in] pGpu OBJGPU pointer + * @param[in] gfid GFID to be validated + * @param[out] Current state of GFID + */ +NV_STATUS +gpuGetGfidState_IMPL(OBJGPU *pGpu, NvU32 gfid, GFID_ALLOC_STATUS *pState) +{ + if (!gpuIsSriovEnabled(pGpu)) + return NV_OK; + + if (pGpu->sriovState.pAllocatedGfids == NULL || pState == NULL) + { + return NV_ERR_INVALID_ADDRESS; + } + + // Sanity check on GFID + if (gfid > pGpu->sriovState.maxGfid) + { + return NV_ERR_OUT_OF_RANGE; + } + + *pState = (GFID_ALLOC_STATUS)pGpu->sriovState.pAllocatedGfids[gfid]; + + return NV_OK; +} + +/*! + * @brief Set/Unset bit in pAllocatedGfids + * + * @param[in] pGpu OBJGPU pointer + * @param[in] gfid GFID to be set/unset (Assumes GFID is sanity checked before calling this function) + * @param[in] bInUse NV_TRUE if GFID in use + */ +void +gpuSetGfidUsage_IMPL(OBJGPU *pGpu, NvU32 gfid, NvBool bInUse) +{ + NV_ASSERT_OR_RETURN_VOID(pGpu->sriovState.pAllocatedGfids != NULL); + + if (bInUse == NV_TRUE) + pGpu->sriovState.pAllocatedGfids[gfid] = GFID_ALLOCATED; + else + pGpu->sriovState.pAllocatedGfids[gfid] = GFID_FREE; +} + +/*! + * @brief Set pAllocatedGfids allocated status as being invalidated + * + * @param[in] pGpu OBJGPU pointer + * @param[in] gfid GFID to be set/unset (Assumes GFID is sanity checked before calling this function) + */ +void +gpuSetGfidInvalidated_IMPL(OBJGPU *pGpu, NvU32 gfid) +{ + NV_ASSERT_OR_RETURN_VOID(pGpu->sriovState.pAllocatedGfids != NULL); + + pGpu->sriovState.pAllocatedGfids[gfid] = GFID_INVALIDATED; +} + +/** + * @brief Iterates over the engine ordering list + * + * @param[in,out] pIt Iterator + * @param[out] pEngDesc The next engine descriptor + * + * @return NV_TRUE if *pEngDesc is valid, NV_FALSE if there are no more engines + */ +NvBool +gpuGetNextInEngineOrderList(OBJGPU *pGpu, ENGLIST_ITER *pIt, ENGDESCRIPTOR *pEngDesc) +{ + NvBool bReverse = !!(pIt->flags & (GCO_LIST_UNLOAD | GCO_LIST_DESTROY)); + const GPUCHILDORDER *pChildOrderList; + NvU32 numChildOrder; + const GPUCHILDPRESENT *pCurChildPresent; + const GPUCHILDORDER *pCurChildOrder; + NvBool bAdvance = NV_FALSE; + NvBool bFirstIteration = NV_FALSE; + + pChildOrderList = gpuGetChildrenOrder_HAL(pGpu, &numChildOrder); + + if (!pIt->bStarted) + { + bFirstIteration = NV_TRUE; + pIt->bStarted = NV_TRUE; + pIt->childOrderIndex = bReverse ? (NvS32)numChildOrder - 1 : 0; + } + + while (1) + { + if (bAdvance) + pIt->childOrderIndex += bReverse ? -1 : 1; + + if ((pIt->childOrderIndex >= (NvS32)numChildOrder) || (pIt->childOrderIndex < 0)) + return NV_FALSE; + + pCurChildOrder = &pChildOrderList[pIt->childOrderIndex]; + + if ((pCurChildOrder->flags & pIt->flags) != pIt->flags) + { + bAdvance = NV_TRUE; + continue; + } + + pCurChildPresent = gpuFindChildPresent(pGpu->pChildrenPresent, + pGpu->numChildrenPresent, pCurChildOrder->classId); + + if (!pCurChildPresent) + { + bAdvance = NV_TRUE; + continue; + } + + if (bAdvance || bFirstIteration) + { + pIt->instanceID = bReverse ? pCurChildPresent->instances - 1 : 0; + } + + if ((pIt->instanceID < (NvS32)pCurChildPresent->instances) && (pIt->instanceID >= 0)) + { + *pEngDesc = MKENGDESC(pCurChildOrder->classId, pIt->instanceID); + + pIt->instanceID += bReverse ? -1 : 1; + + return NV_TRUE; + } + + bAdvance = NV_TRUE; + } + + return NV_FALSE; +} + + +NV_STATUS +gpuInitDispIpHal_IMPL +( + OBJGPU *pGpu, + NvU32 ipver +) +{ + RmHalspecOwner *pRmHalspecOwner = staticCast(pGpu, RmHalspecOwner); + GpuHalspecOwner *pGpuHalspecOwner = staticCast(pGpu, GpuHalspecOwner); + DispIpHal *pDispIpHal = &pRmHalspecOwner->dispIpHal; + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + + // + // 0xFFFFFFFF ipver value happens when Display engines is disabled. NVOC Disp IP + // halspec doesn't have a hal variant maps to this value. Convert it to DISPv0000. + // + if (ipver == 0xFFFFFFFF) + { + ipver = 0; + } + else if (ipver == 0x03010000) + { + // + // Display on GV100 has 0x0301 IP ver while it uses v0300 manuals. It is listed + // in disp.def IP_VERSIONS table as v03_00 since we added the chip. This wasn't a + // problem in chip-config as there it maps a range of IP ver to an implementation. + // Versions in "v0300 <= ipver < 0400" map to _v03_00 or lower IP version function. + // NVOC maps exact number but not range, thus we need to override the value when + // initializing halspec. + // + ipver = 0x03000000; + } + + __nvoc_init_halspec_DispIpHal(pDispIpHal, ipver & 0xFFFF0000); + + if ((ipver & 0xFFFF0000) != 0) + { + DispIpHal dispIpHalv00; + __nvoc_init_halspec_DispIpHal(&dispIpHalv00, 0); + + // + // At GPU creation time, dispIpHal.__nvoc_HalVarIdx is initialized with DISPv0000. + // Any valid non-zero IP version listed in halspec DispIpHal assigns __nvoc_HalVarIdx + // to different value. + // + // If dispIpHal.__nvoc_HalVarIdx keeps same idx of DISPv0000 for a non-zero ipver, + // this means the IP ver is not listed in halspec DispIpHal and should be fixed. + // + // NVOC-TODO : make __nvoc_init_halspec_DispIpHal return error code and remove the check + if (pDispIpHal->__nvoc_HalVarIdx == dispIpHalv00.__nvoc_HalVarIdx) + { + NV_PRINTF(LEVEL_ERROR, "Invalid dispIpHal.__nvoc_HalVarIdx %d for Disp IP Vertion 0x%08x\n", + pDispIpHal->__nvoc_HalVarIdx, ipver); + + NV_ASSERT(0); + return NV_ERR_INVALID_STATE; + } + } + + void __nvoc_init_funcTable_KernelDisplay(KernelDisplay *, RmHalspecOwner *, GpuHalspecOwner *); + void __nvoc_init_dataField_KernelDisplay(KernelDisplay *, RmHalspecOwner *, GpuHalspecOwner *); + __nvoc_init_funcTable_KernelDisplay(pKernelDisplay, pRmHalspecOwner, pGpuHalspecOwner); + __nvoc_init_dataField_KernelDisplay(pKernelDisplay, pRmHalspecOwner, pGpuHalspecOwner); + + void __nvoc_init_funcTable_DisplayInstanceMemory(DisplayInstanceMemory *, RmHalspecOwner *); + void __nvoc_init_dataField_DisplayInstanceMemory(DisplayInstanceMemory *, RmHalspecOwner *); + __nvoc_init_funcTable_DisplayInstanceMemory(KERNEL_DISPLAY_GET_INST_MEM(pKernelDisplay), + pRmHalspecOwner); + __nvoc_init_dataField_DisplayInstanceMemory(KERNEL_DISPLAY_GET_INST_MEM(pKernelDisplay), + pRmHalspecOwner); + + void __nvoc_init_funcTable_KernelHead(KernelHead *, RmHalspecOwner *, GpuHalspecOwner *); + void __nvoc_init_dataField_KernelHead(KernelHead *, RmHalspecOwner *, GpuHalspecOwner *); + NvU32 headIdx; + + for (headIdx = 0; headIdx < OBJ_MAX_HEADS; headIdx++) + { + __nvoc_init_funcTable_KernelHead(KDISP_GET_HEAD(pKernelDisplay, headIdx), + pRmHalspecOwner, pGpuHalspecOwner); + __nvoc_init_dataField_KernelHead(KDISP_GET_HEAD(pKernelDisplay, headIdx), + pRmHalspecOwner, pGpuHalspecOwner); + } + + return NV_OK; +} + +/*! + * @brief: Initialize chip related info + * This function fills up the chip info structure of OBJGPU. + * + * @param[in] pGpu OBJGPU pointer + * + * @returns void + */ + +void +gpuInitChipInfo_IMPL +( + OBJGPU *pGpu +) +{ + // + // NOTE: Register access and DRF field splitting should generally always + // go in HAL functions, but PMC_BOOT_0 and PMC_BOOT_42 are an exception + // as these are guaranteed to remain the same across chips, since we use + // them to figure out which chip it is and how to wire up the HALs. + // + pGpu->chipInfo.pmcBoot0.impl = DRF_VAL(_PMC, _BOOT_0, _IMPLEMENTATION, pGpu->chipId0); + pGpu->chipInfo.pmcBoot0.arch = decodePmcBoot0Architecture(pGpu->chipId0) << GPU_ARCH_SHIFT; + pGpu->chipInfo.pmcBoot0.majorRev = DRF_VAL(_PMC, _BOOT_0, _MAJOR_REVISION, pGpu->chipId0); + pGpu->chipInfo.pmcBoot0.minorRev = DRF_VAL(_PMC, _BOOT_0, _MINOR_REVISION, pGpu->chipId0); + pGpu->chipInfo.pmcBoot0.minorExtRev = NV2080_CTRL_GPU_INFO_MINOR_REVISION_EXT_NONE; + pGpu->chipInfo.pmcBoot42.impl = DRF_VAL(_PMC, _BOOT_42, _IMPLEMENTATION, pGpu->chipId1); + pGpu->chipInfo.pmcBoot42.arch = decodePmcBoot42Architecture(pGpu->chipId1) << GPU_ARCH_SHIFT; + pGpu->chipInfo.pmcBoot42.majorRev = DRF_VAL(_PMC, _BOOT_42, _MAJOR_REVISION, pGpu->chipId1); + pGpu->chipInfo.pmcBoot42.minorRev = DRF_VAL(_PMC, _BOOT_42, _MINOR_REVISION, pGpu->chipId1); + pGpu->chipInfo.pmcBoot42.minorExtRev = DRF_VAL(_PMC, _BOOT_42, _MINOR_EXTENDED_REVISION, pGpu->chipId1); + + // + // SOC do not use pmcBoot0/pmcBoot42 and instead write the impl details to + // these top level chipInfo fields, which is what the getters return. + // + pGpu->chipInfo.implementationId = pGpu->chipInfo.pmcBoot42.impl; + pGpu->chipInfo.platformId = pGpu->chipInfo.pmcBoot42.arch; + pGpu->chipInfo.chipId = decodePmcBoot42ChipId(pGpu->chipId1); +} + +/*! + * @brief: Returns physical address of end of DMA accessible range. + * + * @param[in] pGpu GPU object pointer + * + * @returns physical address of end of DMA accessible range + */ +RmPhysAddr +gpuGetDmaEndAddress_IMPL(OBJGPU *pGpu) +{ + NvU32 numPhysAddrBits = gpuarchGetSystemPhysAddrWidth_HAL(gpuGetArch(pGpu)); + + return (1ULL << numPhysAddrBits) - 1; +} + +VGPU_STATIC_INFO *gpuGetStaticInfo(OBJGPU *pGpu) +{ + + return NULL; +} + +GspStaticConfigInfo *gpuGetGspStaticInfo(OBJGPU *pGpu) +{ + if (IS_DCE_CLIENT(pGpu)) + return NULL; + + return NULL; +} + +OBJRPC *gpuGetGspClientRpc(OBJGPU *pGpu) +{ + if (IS_DCE_CLIENT(pGpu)) + { + NV_ASSERT_OR_RETURN(GPU_GET_DCECLIENTRM(pGpu) != NULL, NULL); + return GPU_GET_DCECLIENTRM(pGpu)->pRpc; + } + else if (IS_GSP_CLIENT(pGpu)) + { + } + + return NULL; +} + +OBJRPC *gpuGetVgpuRpc(OBJGPU *pGpu) +{ + return NULL; +} + +OBJRPC *gpuGetRpc(OBJGPU *pGpu) +{ + if (IS_VIRTUAL(pGpu)) + return gpuGetVgpuRpc(pGpu); + + if (IS_FW_CLIENT(pGpu)) + return gpuGetGspClientRpc(pGpu); + + return NULL; +} + +/*! + * @brief: Check if system memory is accessible by GPU + * Dependent on NV2080_CTRL_CMD_BUS_SYSMEM_ACCESS only exercised on Windows. + * + * @param[in] pGpu OBJGPU pointer + * + * @returns NvBool NV_TRUE is system memory is accessible, + * NV_FALSE otherwise + */ +NvBool +gpuCheckSysmemAccess_IMPL(OBJGPU* pGpu) +{ + return NV_TRUE; +} + + +/*! + * @brief Sets the GC6/JT SBIOS capability + * + * The capabilities are retrieved from the SBIOS through JT_FUNC_CAPS subfunction + * + * @param[in] pGpu GPU object pointer + * + * @return status bubble up the return status from osCallACPI_DSM + */ +NV_STATUS +gpuSetGC6SBIOSCapabilities_IMPL(OBJGPU *pGpu) +{ + NV_STATUS status; + + pGpu->acpiMethodData.jtMethodData.bSBIOSCaps = NV_FALSE; + + if ((!pGpu->acpiMethodData.bValid) || + (pGpu->acpiMethodData.jtMethodData.status != NV_OK)) + { + RMTRACE_SBIOS (_ACPI_DSM_ERROR, pGpu->gpuId, ACPI_DSM_FUNCTION_JT, JT_FUNC_CAPS, 0, 0, 0, 0, 0); + return NV_ERR_NOT_SUPPORTED; + } + + status = gpuJtVersionSanityCheck_HAL(pGpu); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Unsupported JT revision ID. GC6 is being disabled.\n"); + RMTRACE_SBIOS (_ACPI_DSM_ERROR, pGpu->gpuId, ACPI_DSM_FUNCTION_JT, JT_FUNC_CAPS, 0, 0, 0, 0, 0); + return status; + } + + if (FLD_TEST_DRF(_JT_FUNC, _CAPS, _JT_ENABLED, _TRUE, pGpu->acpiMethodData.jtMethodData.jtCaps)) + { + pGpu->acpiMethodData.jtMethodData.bSBIOSCaps = NV_TRUE; + + switch (pGpu->acpiMethodData.jtMethodData.jtRevId) + { + case NV_JT_FUNC_CAPS_REVISION_ID_1_03: + // GC6 2.0 production + break; + case NV_JT_FUNC_CAPS_REVISION_ID_2_00: + // GC6 3.0 and FGC6 production + break; + default: + NV_PRINTF(LEVEL_ERROR, + "Unsupported JT revision ID. GC6 is being disabled. Update the " + "board EC PIC FW. On Windows, update the SBIOS GC6 AML as well.\n"); + DBG_BREAKPOINT(); + pGpu->acpiMethodData.jtMethodData.bSBIOSCaps = NV_FALSE; + break; + } + + } + + RMTRACE_GPU(_GC6_SBIOS_CAP, pGpu->gpuId, pGpu->acpiMethodData.jtMethodData.jtCaps, pGpu->acpiMethodData.jtMethodData.jtRevId, pGpu->acpiMethodData.jtMethodData.bSBIOSCaps, 0, 0, 0, 0); + RMTRACE_SBIOS (_ACPI_DSM_METHOD, pGpu->gpuId, ACPI_DSM_FUNCTION_JT, JT_FUNC_CAPS, pGpu->acpiMethodData.jtMethodData.jtCaps, 0, 0, 0, 0); + + return NV_OK; +} + +NV_STATUS gpuSimEscapeWrite(OBJGPU *pGpu, const char *path, NvU32 Index, NvU32 Size, NvU32 Value) +{ + NV_ASSERT_OR_RETURN(Size <= (sizeof Value), NV_ERR_INVALID_ARGUMENT); + + return osSimEscapeWrite(pGpu, path, Index, Size, Value); +} + +NV_STATUS gpuSimEscapeWriteBuffer(OBJGPU *pGpu, const char *path, NvU32 Index, NvU32 Size, void* pBuffer) +{ + return osSimEscapeWriteBuffer(pGpu, path, Index, Size, pBuffer); +} + +NV_STATUS gpuSimEscapeRead(OBJGPU *pGpu, const char *path, NvU32 Index, NvU32 Size, NvU32 *Value) +{ + NV_ASSERT_OR_RETURN(Size <= (sizeof *Value), NV_ERR_INVALID_ARGUMENT); + + // + // Zero-initialize because the functions below don't fill in entire Value + // for Size < 4 + // + NvU32 readValue = 0; + NV_STATUS status; + + { + status = osSimEscapeRead(pGpu, path, Index, Size, &readValue); + } + + *Value = readValue; + return status; +} + +NV_STATUS gpuSimEscapeReadBuffer(OBJGPU *pGpu, const char *path, NvU32 Index, NvU32 Size, void* pBuffer) +{ + return osSimEscapeReadBuffer(pGpu, path, Index, Size, pBuffer); +} + +// +// Only supported with Windows, Debug or Develop driver, or with Release drivers instrumented builds. +// Instrumented build: add RMCFG_OPTIONS="--enable=RMTEST" to nvmake command line +// Todo: add OpenRM/GSP support +// +#if defined(GPU_LOAD_FAILURE_TEST_SUPPORTED) +NV_STATUS +gpuLoadFailurePathTest_IMPL +( + OBJGPU *pGpu, + NvU32 engStage, + NvU32 engDescIdx, + NvBool bStopTest +) +{ + NV_STATUS rmStatus = NV_OK; + + // Check that test is enabled and in the stating stage + if (!FLD_TEST_DRF(_REG_STR, _GPU_LOAD_FAILURE_TEST, _STATUS, _START, pGpu->loadFailurePathTestControl)) + { + return NV_OK; + } + + // Not supported for suspend/resume + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH)) + { + return NV_OK; + } + + // Stop the test + if (bStopTest) + { + pGpu->loadFailurePathTestControl = FLD_SET_DRF(_REG_STR, _GPU_LOAD_FAILURE_TEST, _STATUS, _FINISHED, pGpu->loadFailurePathTestControl); + goto writeRegistryAndExit; + } + + NvU32 regEngStage = DRF_VAL(_REG_STR, _GPU_LOAD_FAILURE_TEST, _STAGE, pGpu->loadFailurePathTestControl); + NvU32 regEngDescIdx = DRF_VAL(_REG_STR, _GPU_LOAD_FAILURE_TEST, _ENGINEINDEX, pGpu->loadFailurePathTestControl); + + // Check that we are not yet in the targeted stage (preinit, init, preload, load or postload) + if (engStage < regEngStage) + { + return NV_OK; + } + + // Check that we are in the next stage + if (engStage > regEngStage) + { + // Reset engine index to 0 and move to next index + regEngDescIdx = 0; + regEngStage = engStage; + } + + // Check that the engine is the next one to be tested + if (engDescIdx < regEngDescIdx) + { + return NV_OK; + } + + // At this point we fail the engine + rmStatus = NV_ERR_GENERIC; + + // Update registry key for next step + pGpu->loadFailurePathTestControl = FLD_SET_DRF(_REG_STR, _GPU_LOAD_FAILURE_TEST, _STATUS, _RUNNING, pGpu->loadFailurePathTestControl); + pGpu->loadFailurePathTestControl = FLD_SET_DRF_NUM(_REG_STR, _GPU_LOAD_FAILURE_TEST, _STAGE, regEngStage, pGpu->loadFailurePathTestControl); + pGpu->loadFailurePathTestControl = FLD_SET_DRF_NUM(_REG_STR, _GPU_LOAD_FAILURE_TEST, _ENGINEINDEX, regEngDescIdx + 1, pGpu->loadFailurePathTestControl); + +writeRegistryAndExit: + osWriteRegistryDword(pGpu, + NV_REG_STR_GPU_LOAD_FAILURE_TEST, + pGpu->loadFailurePathTestControl); + + switch (engStage) + { + case NV_REG_STR_GPU_LOAD_FAILURE_TEST_STAGE_PREINIT: + NV_PRINTF(LEVEL_ERROR, "Failing GPU PreInit for Engine ID 0x%x (%d)\n", engDescIdx, engDescIdx); + break; + case NV_REG_STR_GPU_LOAD_FAILURE_TEST_STAGE_INIT: + NV_PRINTF(LEVEL_ERROR, "Failing GPU Init for Engine ID 0x%x (%d)\n", engDescIdx, engDescIdx); + break; + case NV_REG_STR_GPU_LOAD_FAILURE_TEST_STAGE_PRELOAD: + NV_PRINTF(LEVEL_ERROR, "Failing GPU PreLoad for Engine ID 0x%x (%d)\n", engDescIdx, engDescIdx); + break; + case NV_REG_STR_GPU_LOAD_FAILURE_TEST_STAGE_LOAD: + NV_PRINTF(LEVEL_ERROR, "Failing GPU Load for Engine ID 0x%x (%d)\n", engDescIdx, engDescIdx); + break; + case NV_REG_STR_GPU_LOAD_FAILURE_TEST_STAGE_POSTLOAD: + NV_PRINTF(LEVEL_ERROR, "Failing GPU PostLoad for Engine ID 0x%x (%d)\n", engDescIdx, engDescIdx); + break; + } + + + return rmStatus; +} +#endif + +NvU32 +gpuGetLitterValues_KERNEL +( + OBJGPU *pGpu, + NvU32 index +) +{ + return 0; +} + +NV_STATUS gpuGetChipDetails_IMPL +( + OBJGPU *pGpu, + NV2080_CTRL_GPU_GET_CHIP_DETAILS_PARAMS *pParams +) +{ + NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS biosGetSKUInfoParams; + + NV_ASSERT_OK_OR_RETURN(gpuGetSkuInfo_HAL(pGpu, &biosGetSKUInfoParams)); + + // + // GPU chip name (PCI device ID) + // Upper half of pGpu->idInfo.PCIDeviceID is devid + // + pParams->pciDevId = (NvU16)DRF_VAL(_PCI, _SUBID, _DEVICE, pGpu->idInfo.PCIDeviceID); + + // GPU chip SKU + portStringCopy((char *) pParams->chipSku, + sizeof(pParams->chipSku), + (char *) biosGetSKUInfoParams.chipSKU, + sizeof(biosGetSKUInfoParams.chipSKU)); + + // GPU revision + pParams->chipMajor = gpuGetChipMajRev(pGpu); + pParams->chipMinor = gpuGetChipMinRev(pGpu); + + return NV_OK; +} + + +/*! + * @brief returns a boolean indicating if an SLI bridge is supported by the specified GPU. + * + * @param[In] pGpu The GPU to check for SLI bridge support + * + * @return a boolean indicating if the specified GPU supports an SLI bridge. + * the bridge may be a Video bridge or NvLink. +.*/ +NvBool +gpuIsSliLinkSupported_IMPL +( + OBJGPU *pGpu +) +{ + NvBool bIsSupported = NV_FALSE; + + return bIsSupported; +} + +NvBool +gpuCheckEngine_KERNEL +( + OBJGPU *pGpu, + ENGDESCRIPTOR engDesc +) +{ + return gpuCheckEngineWithOrderList_KERNEL(pGpu, engDesc, NV_TRUE); +} + +/*! + * @brief Checks whether an engine is available or not. + * + * The 'engine' is an engine descriptor + * This function is different from busProbeRegister in a sense that it doesn't + * rely on timeouts after a read of a register in the reg space for engine. + * Instead, it + * - Return TRUE for all engines which are must present in GPU. + * - Get information about CE, MSENC, NVJPG and OFA engines from plugin or GSP-RM. + * - If bCheckEngineOrder is true, the remaining engines are searched for in gpuChildOrderList_HAL. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc ENGDESCRIPTOR pointer used to check Engine presence + * @param[in] bCheckEngineOrder If true, check gpuChildOrderList_HAL for engDesc as well + * + * @returns NV_TRUE if engine is available. + * NV_FALSE if engine is not available or floorswept. + * + */ +NvBool +gpuCheckEngineWithOrderList_KERNEL +( + OBJGPU *pGpu, + ENGDESCRIPTOR engDesc, + NvBool bCheckEngineOrder +) +{ + NvU32 rmEngineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX] = {0}; + NvU32 nv2080EngineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX] = {0}; + NvBool bSupported; + NV_STATUS status; + + NV_CHECK_OK_OR_ELSE(status, LEVEL_ERROR, + gpuGetRmEngineTypeCapMask(nv2080EngineCaps, + NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX, + rmEngineCaps), + return NV_FALSE); + + switch (ENGDESC_FIELD(engDesc, _CLASS)) + { + case ENG_CLASS_LSFM: + case ENG_CLASS_PMU: + case ENG_CLASS_CLK: + case ENG_CLASS_ACR: + case ENG_CLASS_DISP: + return NV_FALSE; + // + // This function is used in two environments: + // (a) vGPU where display is not yet supported. + // (b) RM offload (Kernel RM) where display is supported. + // + case ENG_CLASS_KERNEL_DISPLAY: + return IS_FW_CLIENT(pGpu); + + case ENG_CLASS_BIF: + case ENG_CLASS_KERNEL_BIF: + case ENG_CLASS_MC: + case ENG_CLASS_KERNEL_MC: + case ENG_CLASS_PRIV_RING: + case ENG_CLASS_SW_INTR: + case ENG_CLASS_TMR: + case ENG_CLASS_DMA: + case ENG_CLASS_BUS: + case ENG_CLASS_CIPHER: + case ENG_CLASS_INTR: + case ENG_CLASS_GPULOG: + case ENG_CLASS_GPUMON: + case ENG_CLASS_FIFO: + return NV_TRUE; + + case ENG_CLASS_CE: + { + NvBool isEnginePresent = NV_FALSE; + if (!IS_VIRTUAL(pGpu)) + { + } + else + { + isEnginePresent = !!NVGPU_GET_ENGINE_CAPS_MASK(rmEngineCaps, + RM_ENGINE_TYPE_COPY(GET_CE_IDX(engDesc))); + } + return isEnginePresent; + } + + case ENG_CLASS_NVENC: + return !!NVGPU_GET_ENGINE_CAPS_MASK(rmEngineCaps, + RM_ENGINE_TYPE_NVENC(GET_MSENC_IDX(engDesc))); + case ENG_CLASS_SEC2: + return !!NVGPU_GET_ENGINE_CAPS_MASK(rmEngineCaps, + RM_ENGINE_TYPE_SEC2); + case ENG_CLASS_NVDEC: + return !!NVGPU_GET_ENGINE_CAPS_MASK(rmEngineCaps, + RM_ENGINE_TYPE_NVDEC(GET_NVDEC_IDX(engDesc))); + + case ENG_CLASS_OFA: + return !!NVGPU_GET_ENGINE_CAPS_MASK(rmEngineCaps, + RM_ENGINE_TYPE_OFA(GET_OFA_IDX(engDesc))); + + case ENG_CLASS_NVJPEG: + return !!NVGPU_GET_ENGINE_CAPS_MASK(rmEngineCaps, + RM_ENGINE_TYPE_NVJPEG(GET_NVJPEG_IDX(engDesc))); + + case ENG_CLASS_GR: + { + if (engDesc == ENG_GR(0)) + { + return NV_TRUE; + } + + return NV_FALSE; + } + + case ENG_CLASS_INVALID: + NV_PRINTF(LEVEL_ERROR, + "Query for ENG_INVALID considered erroneous: %d\n", + engDesc); + return NV_TRUE; + // + // Check if engine descriptor is supported by current GPU. + // Callee must not send engine descriptor which are not on + // HAL lists of GPU. So Add ASSERT there. + // + default: + { + if (bCheckEngineOrder) + { + bSupported = gpuIsEngDescSupported(pGpu, engDesc); + + if (!bSupported) + { + NV_PRINTF(LEVEL_ERROR, "Unable to check engine ID: 0x%x\n", + engDesc); + NV_ASSERT(bSupported); + } + } + else + bSupported = NV_FALSE; + + return bSupported; + } + } +} + +static NV_STATUS +_gpuSetResetRequiredState +( + OBJGPU *pGpu, + NvBool newState +) +{ + NvBool prevState; + NV_STATUS status; + + status = gpuIsDeviceMarkedForReset(pGpu, &prevState); + if (status != NV_OK) + { + goto _gpuSetResetRequiredState_exit; + } + + status = gpuSetResetScratchBit_HAL(pGpu, newState); + if (status != NV_OK) + { + goto _gpuSetResetRequiredState_exit; + } + + if (prevState != newState) + { + status = gpuResetRequiredStateChanged_HAL(pGpu, newState); + } + +_gpuSetResetRequiredState_exit: + return status; +} + +/** + * @brief Interface which allows GPU to be marked for pending reset. + * + * @param[in] pGpu OBJGPU pointer + * + * @return NV_OK if successful + */ +NV_STATUS +gpuMarkDeviceForReset_IMPL +( + OBJGPU *pGpu +) +{ + return _gpuSetResetRequiredState(pGpu, NV_TRUE); +} + +/** + * @brief Interface which allows GPU to be unmarked for pending reset. + * + * This should be used only for debug or testing through + * NV2080_CTRL_CMD_GPU_UNMARK_DEVICE_FOR_RESET and never called directly. + * + * @param[in] pGpu OBJGPU pointer + * + * @return NV_OK if successful + */ +NV_STATUS +gpuUnmarkDeviceForReset_IMPL +( + OBJGPU *pGpu +) +{ + return _gpuSetResetRequiredState(pGpu, NV_FALSE); +} + +/** + * @brief Interface which returns if a GPU reset is needed. + * + * @param[in] pGpu OBJGPU pointer + * @param[out] pbResetRequired NvBool pointer indicating if the GPU needs a reset + * + * @return NV_OK if successful + */ +NV_STATUS +gpuIsDeviceMarkedForReset_IMPL +( + OBJGPU *pGpu, + NvBool *pbResetRequired +) +{ + return gpuGetResetScratchBit_HAL(pGpu, pbResetRequired); +} + +static NV_STATUS +_gpuSetDrainAndResetState +( + OBJGPU *pGpu, + NvBool newState +) +{ + NvBool prevState; + NV_STATUS status; + + status = gpuIsDeviceMarkedForDrainAndReset(pGpu, &prevState); + if (status != NV_OK) + { + goto _gpuSetDrainAndResetState_exit; + } + + status = gpuSetDrainAndResetScratchBit_HAL(pGpu, newState); + if (status != NV_OK) + { + goto _gpuSetDrainAndResetState_exit; + } + + if (prevState != newState) + { + gpuRefreshRecoveryAction_HAL(pGpu, NV_FALSE); + } + +_gpuSetDrainAndResetState_exit: + return status; +} + +/*! + * @brief Interface which allows GPU to be marked for pending drain and reset. This means, + * applications should be drained from the GPU and the GPU reset to regain full operability. + * + * How is this state exposed: + * - PMU SMBPBI mailbox will provide a query to expose this flag. So presumably a BMC queries + * this flag and resets GPU after all workload running on the GPU has completed. + * - Ampere Error containment XID prints also exposes this state + * + * @param[in] pGpu OBJGPU pointer + * + * @return NV_OK if successful + */ +NV_STATUS +gpuMarkDeviceForDrainAndReset_IMPL +( + OBJGPU *pGpu +) +{ + return _gpuSetDrainAndResetState(pGpu, NV_TRUE); +} + +/*! + * @brief Interface which allows GPU to be unmarked for pending drain and reset. + * + * This should be used only for debug or testing through + * NV2080_CTRL_CMD_GPU_UNMARK_DEVICE_FOR_DRAIN_AND_RESET and never called directly. + * + * How is this state exposed: + * - PMU SMBPBI mailbox will provide a query to expose this flag. So presumably a BMC queries + * this flag and resets GPU after all workload running on the GPU has completed. + * - Ampere Error containment XID prints also exposes this state + * + * @param[in] pGpu OBJGPU pointer + * + * @return NV_OK if successful + */ +NV_STATUS +gpuUnmarkDeviceForDrainAndReset_IMPL +( + OBJGPU *pGpu +) +{ + return _gpuSetDrainAndResetState(pGpu, NV_FALSE); +} + +/*! + * @brief Interface which returns if a GPU drain and reset is recommended. + * + * @param[in] pGpu OBJGPU pointer + * @param[out] pbDrainRecommended NvBool pointer indicating if the GPU needs a + * drain and reset + * + * @return NV_OK if successful + */ +NV_STATUS +gpuIsDeviceMarkedForDrainAndReset_IMPL +( + OBJGPU *pGpu, + NvBool *pbDrainRecommended +) +{ + return gpuGetDrainAndResetScratchBit_HAL(pGpu, pbDrainRecommended); +} + +const char* +_gpuRecoveryActionName +( + NV2080_CTRL_GPU_RECOVERY_ACTION action +) +{ + switch(action) + { + case NV2080_CTRL_GPU_RECOVERY_ACTION_NONE: + return "None"; + case NV2080_CTRL_GPU_RECOVERY_ACTION_GPU_RESET: + return "GPU Reset Required"; + case NV2080_CTRL_GPU_RECOVERY_ACTION_NODE_REBOOT: + return "Node Reboot Required"; + case NV2080_CTRL_GPU_RECOVERY_ACTION_DRAIN_P2P: + return "Drain P2P"; + case NV2080_CTRL_GPU_RECOVERY_ACTION_DRAIN_AND_RESET: + return "Drain and Reset"; + default: + NV_ASSERT_FAILED("Unknown recovery action!"); + return "Unknown"; + } +} + +/*! + * @brief This function queries the action required to recover from a GPU fault. + * + * @param[In] pGpu The GPU to query + * @param[Out] pParams The structure to store the recovery action + * + * @return None. + */ +void +gpuGetRecoveryAction_IMPL +( + OBJGPU *pGpu, + NV2080_CTRL_GPU_GET_RECOVERY_ACTION_PARAMS *pParams +) +{ + NV_PRINTF(LEVEL_INFO, + "GetRecoveryAction: 0x%x (%s)\n", + pGpu->currentRecoveryAction, + _gpuRecoveryActionName(pGpu->currentRecoveryAction)); + + pParams->action = pGpu->currentRecoveryAction; +} + +/*! + * @brief This function refreshes the GPU fault recovery action, accounting for + * recent changes. This function runs in a work item with GPUS_LOCK_ALL already + * acquired. + * + * @param[In] pGpu The GPU object + * + * @return None. + */ +static void +_gpuRefreshRecoveryActionInLock +( + NvU32 gpuInstance, + void *pParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPU *pGpu = gpumgrGetGpu(gpuInstance); + NV_STATUS rmStatus; + NvBool bResetRequired; + NvBool bDrainAndReset; + NV2080_CTRL_GPU_RECOVERY_ACTION newAction; + NV2080_CTRL_GPU_RECOVERY_ACTION oldAction; + + if (pGpu == NULL) + { + // Call-back is too late. pGpu is already NULL + return; + } + + // Decide the new recovery action + if (pSys->getProperty(pSys, PDB_PROP_SYS_RECOVERY_REBOOT_REQUIRED) + || pGpu->getProperty(pGpu, PDB_PROP_GPU_RECOVERY_REBOOT_REQUIRED)) + { + newAction = NV2080_CTRL_GPU_RECOVERY_ACTION_NODE_REBOOT; + } + else + { + rmStatus = gpuIsDeviceMarkedForReset(pGpu, &bResetRequired); + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED) || + ((rmStatus == NV_OK) && bResetRequired)) + { + newAction = NV2080_CTRL_GPU_RECOVERY_ACTION_GPU_RESET; + } + else + { + rmStatus = gpuIsDeviceMarkedForDrainAndReset(pGpu, &bDrainAndReset); + if ((rmStatus == NV_OK) && bDrainAndReset) + { + newAction = NV2080_CTRL_GPU_RECOVERY_ACTION_DRAIN_AND_RESET; + } + else if (pGpu->getProperty(pGpu, PDB_PROP_GPU_RECOVERY_DRAIN_P2P_REQUIRED)) + { + newAction = NV2080_CTRL_GPU_RECOVERY_ACTION_DRAIN_P2P; + } + else + { + newAction = NV2080_CTRL_GPU_RECOVERY_ACTION_NONE; + } + } + } + + if (newAction != pGpu->currentRecoveryAction) + { + oldAction = pGpu->currentRecoveryAction; + pGpu->currentRecoveryAction = newAction; + + if (oldAction != GPU_RECOVERY_ACTION_UNKNOWN) + { + // Trigger NVML event for the new recovery action. + gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_GPU_RECOVERY_ACTION, NULL, 0, 0, newAction); + + // Log XID 154 to indicate new recovery action. + nvErrorLog_va(pGpu, GPU_RECOVERY_ACTION_CHANGED, "GPU recovery action changed from 0x%x (%s) to 0x%x (%s)", + oldAction, _gpuRecoveryActionName(oldAction), newAction, _gpuRecoveryActionName(newAction)); + } + } +} + +/*! + * @brief This function refreshes the GPU fault recovery action, accounting for + * recent changes. + * + * @param[In] pGpu The GPU object + * @param[In] inLock Specifies whether the calling thread has already + * acquired all GPU locks. + * + * @return None. + */ +void +gpuRefreshRecoveryAction_KERNEL +( + OBJGPU *pGpu, + NvBool inLock +) +{ + if (!inLock) + { + // + // Schedule a workitem to acquire GPUS_LOCK_ALL and perform the refresh + // as the current thread could be in any IRQL / lock context. + // + NV_ASSERT_OK(osQueueWorkItem(pGpu, + _gpuRefreshRecoveryActionInLock, + NULL, + OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS)); + } + else + { + // Lock requirement is already satisfied, perform the refresh directly. + _gpuRefreshRecoveryActionInLock(pGpu->gpuInstance, NULL); + } +} + +/*! + * @brief This function specifies whether the GPU needs all its P2P traffic + * drained before it can be used. + * + * @param[In] pGpu The GPU object + * @param[In] bDrainP2P Specifies whether the GPU needs P2P draining + * + * @return None. + */ +void +gpuSetRecoveryDrainP2P_KERNEL +( + OBJGPU *pGpu, + NvBool bDrainP2P +) +{ + if (!!pGpu->getProperty(pGpu, PDB_PROP_GPU_RECOVERY_DRAIN_P2P_REQUIRED) != !!bDrainP2P) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_RECOVERY_DRAIN_P2P_REQUIRED, bDrainP2P); + gpuRefreshRecoveryAction_KERNEL(pGpu, NV_FALSE); + } +} +/*! + * @brief Set partition error attribution + * + * @param[in] pGpu OBJGPU pointer + * @param[in] errorCode Error Containment error code + * @param[in] loc Location, SubLocation information + * @param[in] rcErrorCode RC error code + * + * @returns NV_OK + * NV_ERR_INVALID_ARGUMENT if errorCode is invalid or + if partition attribution isn't supported for the error + */ +NV_STATUS +gpuSetPartitionErrorAttribution_KERNEL +( + OBJGPU *pGpu, + NV_ERROR_CONT_ERR_ID errorCode, + NV_ERROR_CONT_LOCATION loc, + NvU32 rcErrorCode +) +{ + switch (errorCode) + { + case NV_ERROR_CONT_ERR_ID_E24_GSP_POISON: + { + NV_PRINTF(LEVEL_ERROR, "MIG_INSTANCE_REF determining is not supported for error ID 0x%x.\n", errorCode); + NV_ASSERT(0); + return NV_ERR_INVALID_ARGUMENT; + } + default: + { + NV_PRINTF(LEVEL_ERROR, "Invalid error ID: 0x%x\n", errorCode); + NV_ASSERT(0); + return NV_ERR_INVALID_ARGUMENT; + } + } + + return NV_OK; +} + +/*! + * @brief This function logs an XID message to OOB by sending an RPC message to the GSP. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] xid The XID number + * @param[in] message The text message associated with the XID + * @param[in] len Length, in bytes, of the text message, excluding the null terminator + * + * @returns NV_OK + * NV_ERR_INVALID_ARGUMENT if errorCode is invalid or + if partition attribution isn't supported for the error + */ +void +gpuLogOobXidMessage_KERNEL +( + OBJGPU *pGpu, + NvU32 xid, + const char *message, + NvU32 len +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_STATUS status; + NvBool bGspFatalError = NV_FALSE; + NV2080_CTRL_INTERNAL_LOG_OOB_XID_PARAMS params = {0}; + + // Exclude conditions that indicate issues with GSP communication. + if ((xid == GSP_ERROR) || + (xid == GSP_RPC_TIMEOUT) || + (xid == ROBUST_CHANNEL_GPU_HAS_FALLEN_OFF_THE_BUS) || + API_GPU_IN_RESET_SANITY_CHECK(pGpu) || + !pGpu->gspRmInitialized || + pGpu->getProperty(pGpu, PDB_PROP_GPU_PREPARING_FULLCHIP_RESET) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST) || + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED) || + bGspFatalError) + { + return; + } + + // Copy the message into the RPC params, truncated to max RPC size, but + // always include a tailing NULL terminator. + len = NV_MIN(len, NV2080_INTERNAL_OOB_XID_MESSAGE_BUFFER_SIZE - 1); + params.message[len] = '\0'; + params.xid = xid; + params.len = len; + portMemCopy(params.message, NV2080_INTERNAL_OOB_XID_MESSAGE_BUFFER_SIZE, message, len); + + NV_CHECK_OK(status, LEVEL_ERROR, + pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_LOG_OOB_XID, + ¶ms, + sizeof(params))); + +} + +NvBool +gpuValidateMIGSupport_KERNEL +( + OBJGPU *pGpu +) +{ + NV_ASSERT_FAILED("This function should not be called\n"); + return NV_FALSE; +} + +void +gpuSetRecoveryRebootRequired_IMPL +( + OBJGPU *pGpu, + NvBool bRebootRequired, + NvBool bBlockNewWorkload +) +{ + if (!!pGpu->getProperty(pGpu, PDB_PROP_GPU_RECOVERY_REBOOT_REQUIRED) != !!bRebootRequired) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_RECOVERY_REBOOT_REQUIRED, bRebootRequired); + + gpuRefreshRecoveryAction_KERNEL(pGpu, NV_FALSE); + } + + pGpu->bBlockNewWorkload = bBlockNewWorkload; +} + +/*! + * Generate GID data for a GPU. The function uses a pre-defined + * GID combined with GPU instance for now, but will be modified + * later to use PDI for generating the GID. + * + * @param [in] pGpu OBJGPU pointer + * @param [out] pGidData data array into which GID should be written + * @param [in] gidSize size of data array + * @param [in] gidFlags selects either the SHA-1 or SHA-256 GID + * + * @return NV_OK if the GID is generated correctly + */ +NV_STATUS +gpuGenGidData_SOC +( + OBJGPU *pGpu, + NvU8 *pGidData, + NvU32 gidSize, + NvU32 gidFlags +) +{ + if (FLD_TEST_DRF(2080_GPU_CMD,_GPU_GET_GID_FLAGS,_TYPE,_SHA1,gidFlags)) + { + // + // SHA1 generated from string "Nvidia" => "0xA7C66AD26DBB0AB8C1A237BA6DBA36B8" with + // last byte replaced by gpuInstance to generate unique value for each GPU + // + NvU8 GID_DATA[RM_SHA1_GID_SIZE] = {0xA7,0xC6,0x6A,0xD2, + 0x6D,0xBB,0x0A,0xB8, + 0xC1,0xA2,0x37,0xBA, + 0x6D,0xBA,0x36,pGpu->gpuInstance}; + + if (gidSize != RM_SHA1_GID_SIZE || pGidData == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + portMemCopy(pGidData, + RM_SHA1_GID_SIZE, GID_DATA, + RM_SHA1_GID_SIZE); + + return NV_OK; + } + else + { + return NV_ERR_INVALID_FLAGS; + } +} + +/*! + * Boot GSP-RM Proxy by sending COT command to either FSP or SEC2. + * + * @param [in] pGpu OBJGPU pointer + * + * @return NV_OK is successful + */ +NV_STATUS +gpuBootGspRmProxy_IMPL +( + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/gpu_access.c b/src/nvidia/src/kernel/gpu/gpu_access.c new file mode 100644 index 0000000..35b2505 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_access.c @@ -0,0 +1,1873 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" + +#include "core/thread_state.h" +#include "platform/sli/sli.h" +#include "nv_ref.h" + +// Following enums are duplicated in 'apps/nvbucket/oca/ocarm.h'. +typedef enum { + BAD_READ_GPU_OFF_BUS = 1, + BAD_READ_LOW_POWER, + BAD_READ_PCI_DEVICE_DISABLED, + BAD_READ_GPU_RESET, + BAD_READ_DWORD_SHIFT, + BAD_READ_UNKNOWN, +} RMCD_BAD_READ_REASON; + +static void _gpuCleanRegisterFilterList(DEVICE_REGFILTER_INFO *); +static NvU32 _gpuHandleReadRegisterFilter(OBJGPU *, DEVICE_INDEX devIndex, NvU32 devInstance, NvU32 addr, NvU32 accessSize, NvU32 *pFlags, THREAD_STATE_NODE *pThreadState); +static void _gpuHandleWriteRegisterFilter(OBJGPU *, DEVICE_INDEX devIndex, NvU32 devInstance, NvU32 addr, NvU32 val, NvU32 accessSize, NvU32 *pFlags, THREAD_STATE_NODE *pThreadState); + +static void ioaprtWriteRegUnicast(OBJGPU *, IoAperture *pAperture, NvU32 addr, NvV32 val, NvU32 size); +static NvU32 ioaprtReadReg(IoAperture *pAperture, NvU32 addr, NvU32 size); + +static REGISTER_FILTER * _findGpuRegisterFilter(DEVICE_INDEX devIndex, NvU32 devInstance, NvU32 addr, REGISTER_FILTER *); +static NV_STATUS _gpuInitIOAperture(OBJGPU *pGpu, NvU32 deviceIndex, DEVICE_MAPPING *pMapping); + +NV_STATUS +regAccessConstruct +( + RegisterAccess *pRegisterAccess, + OBJGPU *pGpu +) +{ + NV_STATUS rmStatus = NV_OK; + DEVICE_INDEX deviceIndex, minDeviceIndex, maxDeviceIndex; + + pRegisterAccess->pGpu = pGpu; + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + // DEVICE_INDEX_GPU aperture is of GPU, as Tegra SOC NvDisplay constructs + // display device IO aperture as part of objdisp construction so its safe to + // skip this function. + return NV_OK; + } + + // Check that GPU is the first device + ct_assert(DEVICE_INDEX_GPU == 0); + + minDeviceIndex = DEVICE_INDEX_GPU; + maxDeviceIndex = pGpu->bIsSOC ? (DEVICE_INDEX_MAX - 1) : (DEVICE_INDEX_GPU); + + for (deviceIndex = minDeviceIndex; deviceIndex <= maxDeviceIndex; deviceIndex++) + { + // Initialize IO Device and Aperture + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, deviceIndex, 0); + if (pMapping != NULL) + { + rmStatus = _gpuInitIOAperture(pGpu, deviceIndex, pMapping); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to initialize pGpu IO device/aperture for deviceIndex=%d.\n", deviceIndex); + return rmStatus; + } + } + } + + return rmStatus; +} + +void +regAccessDestruct +( + RegisterAccess *pRegisterAccess +) +{ + OBJGPU *pGpu = pRegisterAccess->pGpu; + DEVICE_INDEX deviceIndex; + NvU32 mappingNum; + IoAperture *pIOAperture; + REGISTER_FILTER *pNode; + + // Ignore attempt to destruct a not-fully-constructed RegisterAccess + if (pGpu == NULL) + { + return; + } + + for (deviceIndex = 0; deviceIndex < DEVICE_INDEX_MAX; deviceIndex++) + { + pIOAperture = pGpu->pIOApertures[deviceIndex]; + if (pIOAperture != NULL) + { + objDelete(pIOAperture); + } + } + + for (mappingNum = 0; mappingNum < pGpu->gpuDeviceMapCount; mappingNum++) + { + // Device-specific register filter list + NV_ASSERT(!pGpu->deviceMappings[mappingNum].devRegFilterInfo.pRegFilterList); + if (NULL != pGpu->deviceMappings[mappingNum].devRegFilterInfo.pRegFilterLock) + { + portSyncSpinlockDestroy(pGpu->deviceMappings[mappingNum].devRegFilterInfo.pRegFilterLock); + pGpu->deviceMappings[mappingNum].devRegFilterInfo.pRegFilterLock = NULL; + } + + while (pGpu->deviceMappings[mappingNum].devRegFilterInfo.pRegFilterRecycleList) + { + pNode = pGpu->deviceMappings[mappingNum].devRegFilterInfo.pRegFilterRecycleList; + + pGpu->deviceMappings[mappingNum].devRegFilterInfo.pRegFilterRecycleList = pNode->pNext; + portMemFree(pNode); + } + } +} + +// +// The following register I/O functions are organized into two groups; +// a multi-chip unaware group and a multi-chip aware group. +// The multi-chip aware group of register I/O functions is also split +// into two groups; one that really does multi-chip logic and another +// that has the same interface but doesn't do any of the multi-chip +// logic. +// +// In the interests of performance, the determination as to whether +// multi-chip logic is necessary is done at two levels; the upper-level +// functions use 'MC' register I/O macros where multi-chip considerations +// are required, and when the 'MC' register I/O macros are used they +// call through GPU object pointers that are polymorphic - they contain +// pointers to one of the two groups of multi-chip aware functions +// depending on whether the multi-chip condition actually exists. +// This avoids a run-time SLI LOOP call. +// +static void +_regWriteUnicast +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr, + NvU32 val, + NvU32 size, + THREAD_STATE_NODE *pThreadState +) +{ + OBJGPU *pGpu = pRegisterAccess->pGpu; + NvU32 flags = 0; + NV_STATUS status; + DEVICE_MAPPING *pMapping; + + pRegisterAccess->regWriteCount++; + + pMapping = gpuGetDeviceMapping(pGpu, deviceIndex, instance); + if (pMapping == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "Could not find mapping for reg %x, deviceIndex=0x%x instance=%d\n", + addr, deviceIndex, instance); + NV_ASSERT(0); + return; + } + + status = gpuSanityCheckRegisterAccess(pGpu, addr, NULL); + if (status != NV_OK) + { + return; + } + + _gpuHandleWriteRegisterFilter(pGpu, deviceIndex, instance, addr, val, size, &flags, pThreadState); + + if (!(flags & REGISTER_FILTER_FLAGS_WRITE)) + { + switch (size) + { + case 8: + osDevWriteReg008(pGpu, pMapping, addr, 0xFFU & (val)); + break; + case 16: + osDevWriteReg016(pGpu, pMapping, addr, 0xFFFFU & (val)); + break; + case 32: + osDevWriteReg032(pGpu, pMapping, addr, val); + break; + } + } +} + +/*! + * @brief: Initialize an IoAperture instance in-place. + * + * @param[out] pAperture pointer to the IoAperture. + * @param[in] pParentAperture pointer to the parent of the new IoAperture. + * @param[in] offset offset from the parent APERTURE's baseAddress. + * @param[in] length length of the APERTURE. + * + * @return NV_OK upon success + * NV_ERR* otherwise. + */ +NV_STATUS +ioaprtInit +( + IoAperture *pAperture, + IoAperture *pParentAperture, + NvU32 offset, + NvU32 length +) +{ + return objCreateWithFlags(&pAperture, NVOC_NULL_OBJECT, IoAperture, NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT, pParentAperture, NULL, 0, 0, NULL, 0, offset, length); +} + +/*! + * Initialize an IoAperture instance. + * + * @param[in,out] pAperture pointer to IoAperture instance to be initialized. + * @param[in] pParentAperture pointer to parent of the new IoAperture. + * @param[in] deviceIndex device index + * @param[in] deviceInstance device instance + * @param[in] pMapping device register mapping + * @param[in] mappingStartAddr register address corresponding to the start of the mapping + * @param[in] offset offset from the parent APERTURE's baseAddress. + * @param[in] length length of the APERTURE. + * + * @return NV_OK when inputs are valid. + */ +NV_STATUS +ioaprtConstruct_IMPL +( + IoAperture *pAperture, + IoAperture *pParentAperture, + OBJGPU *pGpu, + NvU32 deviceIndex, + NvU32 deviceInstance, + DEVICE_MAPPING *pMapping, + NvU32 mappingStartAddr, + NvU32 offset, + NvU32 length +) +{ + NV_ASSERT_OR_RETURN(length > 0, NV_ERR_INVALID_ARGUMENT); + + if (pParentAperture != NULL) + { + NV_ASSERT_OR_RETURN(pMapping == NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pGpu == NULL || pGpu == pParentAperture->pGpu, NV_ERR_INVALID_ARGUMENT); + + pAperture->pGpu = pParentAperture->pGpu; + pAperture->deviceIndex = pParentAperture->deviceIndex; + pAperture->deviceInstance = pParentAperture->deviceInstance; + pAperture->pMapping = pParentAperture->pMapping; + pAperture->baseAddress = pParentAperture->baseAddress; + pAperture->mappingStartAddr = pParentAperture->mappingStartAddr; + + // Check if the child Aperture strides beyond the parent's boundary. + if ((length + offset) > pParentAperture->length) + { + NV_PRINTF(LEVEL_WARNING, + "Child aperture crosses parent's boundary, length %u offset %u, Parent's length %u\n", + length, offset, pParentAperture->length); + } + + } + else + { + NV_ASSERT_OR_RETURN(pMapping != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_ARGUMENT); + + pAperture->pGpu = pGpu; + pAperture->deviceIndex = deviceIndex; + pAperture->deviceInstance = deviceInstance; + pAperture->pMapping = pMapping; + pAperture->baseAddress = 0; + pAperture->mappingStartAddr = mappingStartAddr; + } + + pAperture->baseAddress += offset; + pAperture->length = length; + + return NV_OK; +} + +static void +ioaprtWriteRegUnicast +( + OBJGPU *pGpu, + IoAperture *pAperture, + NvU32 addr, + NvV32 val, + NvU32 size +) +{ + NvU32 deviceIndex = pAperture->deviceIndex; + NvU32 instance = pAperture->deviceInstance; + NvU32 regAddr = pAperture->baseAddress + addr; + NvU32 mappingRegAddr = regAddr - pAperture->mappingStartAddr; + DEVICE_MAPPING *pMapping = pAperture->pMapping; + NvU32 flags = 0; + NV_STATUS status; + THREAD_STATE_NODE *pThreadState; + + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + status = gpuSanityCheckRegisterAccess(pGpu, regAddr, NULL); + if (status != NV_OK) + { + return; + } + + threadStateGetCurrentUnchecked(&pThreadState, pGpu); + + _gpuHandleWriteRegisterFilter(pGpu, deviceIndex, instance, regAddr, + val, size, &flags, pThreadState); + } + + if (!(flags & REGISTER_FILTER_FLAGS_WRITE)) + { + switch (size) + { + case 8: + osDevWriteReg008(pGpu, pMapping, mappingRegAddr, 0xFFU & (val)); + break; + case 16: + osDevWriteReg016(pGpu, pMapping, mappingRegAddr, 0xFFFFU & (val)); + break; + case 32: + osDevWriteReg032(pGpu, pMapping, mappingRegAddr, val); + break; + } + } +} + +void +ioaprtWriteReg08_IMPL +( + IoAperture *pAperture, + NvU32 addr, + NvV8 val +) +{ + NV_ASSERT(!gpumgrGetBcEnabledStatus(pAperture->pGpu)); + + ioaprtWriteRegUnicast(pAperture->pGpu, pAperture, addr, val, 8 /* size */); +} + +void +ioaprtWriteReg16_IMPL +( + IoAperture *pAperture, + NvU32 addr, + NvV16 val +) +{ + NV_ASSERT(!gpumgrGetBcEnabledStatus(pAperture->pGpu)); + + ioaprtWriteRegUnicast(pAperture->pGpu, pAperture, addr, val, 16 /* size */); +} + +void +ioaprtWriteReg32_IMPL +( + IoAperture *pAperture, + NvU32 addr, + NvV32 val +) +{ + NV_ASSERT(!gpumgrGetBcEnabledStatus(pAperture->pGpu)); + + ioaprtWriteRegUnicast(pAperture->pGpu, pAperture, addr, val, 32 /* size */); +} + +void +ioaprtWriteReg32Uc_IMPL +( + IoAperture *pAperture, + NvU32 addr, + NvV32 val +) +{ + ioaprtWriteRegUnicast(pAperture->pGpu, pAperture, addr, val, 32 /* size */); +} + +void +regWrite008 +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr, + NvV8 val +) +{ + OBJGPU *pGpu = pRegisterAccess->pGpu; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + _regWriteUnicast(GPU_GET_REGISTER_ACCESS(pGpu), deviceIndex, instance, addr, val, 8, NULL); + SLI_LOOP_END; +} +void +regWrite016 +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr, + NvV16 val +) +{ + OBJGPU *pGpu = pRegisterAccess->pGpu; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + _regWriteUnicast(GPU_GET_REGISTER_ACCESS(pGpu), deviceIndex, instance, addr, val, 16, NULL); + SLI_LOOP_END; +} + +void +regWrite032 +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr, + NvV32 val, + THREAD_STATE_NODE *pThreadState +) +{ + OBJGPU *pGpu = pRegisterAccess->pGpu; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + regWrite032Unicast(GPU_GET_REGISTER_ACCESS(pGpu), deviceIndex, instance, addr, val, pThreadState); + SLI_LOOP_END +} + +void +regWrite032Unicast +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr, + NvV32 val, + THREAD_STATE_NODE *pThreadState +) +{ + + _regWriteUnicast(pRegisterAccess, deviceIndex, instance, addr, val, 32, pThreadState); +} + +static NvU32 +ioaprtReadReg +( + IoAperture *pAperture, + NvU32 addr, + NvU32 size +) +{ + NvU32 flags = 0; + NvU32 returnValue = 0; + OBJGPU *pGpu = pAperture->pGpu; + NV_STATUS status = NV_OK; + NvU32 regAddr = pAperture->baseAddress + addr; + NvU32 mappingRegAddr = regAddr - pAperture->mappingStartAddr; + NvU32 deviceIndex = pAperture->deviceIndex; + NvU32 instance = pAperture->deviceInstance; + DEVICE_MAPPING *pMapping = pAperture->pMapping; + THREAD_STATE_NODE *pThreadState; + + pGpu->registerAccess.regReadCount++; + + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + status = gpuSanityCheckRegisterAccess(pGpu, regAddr, NULL); + if (status != NV_OK) + { + return (~0); + } + + threadStateGetCurrentUnchecked(&pThreadState, pGpu); + + returnValue = _gpuHandleReadRegisterFilter(pGpu, deviceIndex, instance, + regAddr, size, &flags, pThreadState); + } + + if (!(flags & REGISTER_FILTER_FLAGS_READ)) + { + switch (size) + { + case 8: + returnValue = osDevReadReg008(pGpu, pMapping, mappingRegAddr); + break; + case 16: + returnValue = osDevReadReg016(pGpu, pMapping, mappingRegAddr); + break; + case 32: + returnValue = osDevReadReg032(pGpu, pMapping, mappingRegAddr); + break; + } + } + + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + // Make sure the value read is sane before we party on it. + gpuSanityCheckRegRead(pGpu, regAddr, size, &returnValue); + } + + return returnValue; +} + +NvU8 +ioaprtReadReg08_IMPL +( + IoAperture *pAperture, + NvU32 addr +) +{ + return (NvU8) ioaprtReadReg(pAperture, addr, 8 /* size */); +} + +NvU16 +ioaprtReadReg16_IMPL +( + IoAperture *pAperture, + NvU32 addr +) +{ + return (NvU16) ioaprtReadReg(pAperture, addr, 16 /* size */); +} + +NvU32 +ioaprtReadReg32_IMPL +( + IoAperture *pAperture, + NvU32 addr + +) +{ + return ioaprtReadReg(pAperture, addr, 32 /* size */); +} + +/*! + * Checks if the register address is valid for a particular aperture + * + * @param[in] pAperture IoAperture pointer + * @param[in] addr register address + * + * @returns NV_TRUE Register offset is valid + */ +NvBool +ioaprtIsRegValid_IMPL +( + IoAperture *pAperture, + NvU32 addr +) +{ + NV_ASSERT_OR_RETURN(pAperture != NULL, NV_FALSE); + + return addr < pAperture->length; +} + +static NvU32 +_regRead +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr, + NvU32 size, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 flags = 0; + NvU32 returnValue = 0; + OBJGPU *pGpu = pRegisterAccess->pGpu; + DEVICE_MAPPING *pMapping; + NV_STATUS status = NV_OK; + + pRegisterAccess->regReadCount++; + + pMapping = gpuGetDeviceMapping(pGpu, deviceIndex, instance); + if (pMapping == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "Could not find mapping for reg %x, deviceIndex=0x%x instance=%d\n", + addr, deviceIndex, instance); + NV_ASSERT(0); + return 0xd0d0d0d0; + } + + if ((size == 32) && + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE)) + { + return osDevReadReg032(pGpu, pMapping, addr); + } + + status = gpuSanityCheckRegisterAccess(pGpu, addr, &returnValue); + if (status != NV_OK) + return returnValue; + + returnValue = _gpuHandleReadRegisterFilter(pGpu, deviceIndex, instance, + addr, size, &flags, pThreadState); + + if (!(flags & REGISTER_FILTER_FLAGS_READ)) + { + switch (size) + { + case 8: + returnValue = osDevReadReg008(pGpu, pMapping, addr); + break; + case 16: + returnValue = osDevReadReg016(pGpu, pMapping, addr); + break; + case 32: + returnValue = osDevReadReg032(pGpu, pMapping, addr); + break; + } + } + + // Make sure the value read is sane before we party on it. + gpuSanityCheckRegRead(pGpu, addr, size, &returnValue); + + return returnValue; +} + +NvU8 +regRead008 +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr +) +{ + return _regRead(pRegisterAccess, deviceIndex, instance, addr, 8, NULL); +} + +NvU16 +regRead016 +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr +) +{ + return _regRead(pRegisterAccess, deviceIndex, instance, addr, 16, NULL); +} + +/*! + * This function is used for converting do-while read register constructs in RM to + * equivalent PMU sequencer handling. The idea is to construct seq instruction + * which polls on a field in the given register. + * + * @param[in] pRegisterAccess RegisterAccess object pointer + * @param[in] deviceIndex deviceIndex + * @param[in] addr register address + * @param[in] mask required mask for the field + * @param[in] val value to poll for + * + * @returns NV_OK if val is found + * NV_ERR_TIMEOUT if val is not found within timeout limit + */ +NV_STATUS +regRead032_AndPoll +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 addr, + NvU32 mask, + NvU32 val +) +{ + RMTIMEOUT timeout; + OBJGPU *pGpu = pRegisterAccess->pGpu; + NvU32 data = 0; + NV_STATUS status = NV_OK; + + { + gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, 0); + + do + { + data = GPU_REG_RD32(pGpu, addr); + + if ((data & mask) == val) + { + status = NV_OK; + break; + } + + // Loosen this loop + osSpinLoop(); + + status = gpuCheckTimeout(pGpu, &timeout); + } while (status != NV_ERR_TIMEOUT); + } + + return status; +} + +NvU32 +regRead032 +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr, + THREAD_STATE_NODE *pThreadState +) +{ + if (pRegisterAccess == NULL) + { + return NV_ERR_INVALID_POINTER; + } + + return _regRead(pRegisterAccess, deviceIndex, instance, addr, 32, pThreadState); +} + +/*! + * @brief Allocates and initializes GPU_IO_DEVICE and IO Aperture. + * + * @param pGpu + * @param[in] deviceIndex DEVICE_INDEX enum value for identifying device type + * @param[in] gpuDeviceEnum Device ID NV_DEVID_* + * @param[in] gpuNvPAddr Physical Base Address + * @param[in] gpuNvLength Length of Aperture + * + * @return NV_OK if IO Aperture is successfully initialized, error otherwise. + */ +static NV_STATUS +_gpuInitIOAperture +( + OBJGPU *pGpu, + NvU32 deviceIndex, + DEVICE_MAPPING *pMapping +) +{ + NV_STATUS rmStatus; + + rmStatus = objCreate(&pGpu->pIOApertures[deviceIndex], NVOC_NULL_OBJECT, IoAperture, + NULL, // no parent aperture + pGpu, + deviceIndex, + 0, // GPU register operations are always on instance 0 + pMapping, 0, // mapping, mappingStartAddr + 0, pMapping->gpuNvLength); // offset, length + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to initialize pGpu IO aperture for devIdx %d.\n", + deviceIndex); + + return rmStatus; + } + + return NV_OK; +} + + +NV_STATUS +regAddRegisterFilter +( + RegisterAccess *pRegisterAccess, + NvU32 flags, + DEVICE_INDEX devIndex, NvU32 devInstance, + NvU32 rangeStart, NvU32 rangeEnd, + GpuWriteRegCallback pWriteCallback, + GpuReadRegCallback pReadCallback, + void *pParam, + REGISTER_FILTER **ppFilter +) +{ + DEVICE_REGFILTER_INFO *pRegFilter; + REGISTER_FILTER *pNode; + REGISTER_FILTER *pTmpNode; + DEVICE_MAPPING *pMapping; + + NV_ASSERT_OR_RETURN(devIndex < DEVICE_INDEX_MAX, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pRegisterAccess != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(ppFilter != NULL, NV_ERR_INVALID_ARGUMENT); + + // Get the device filter + pMapping = gpuGetDeviceMapping(pRegisterAccess->pGpu, devIndex, devInstance); + NV_ASSERT_OR_RETURN(pMapping != NULL, NV_ERR_INVALID_ARGUMENT); + + pRegFilter = &pMapping->devRegFilterInfo; + + if (!pWriteCallback && !pReadCallback) + { + // At least one register callback needs to be passed. + NV_PRINTF(LEVEL_ERROR, + "Need to specify at least one callback function.\n"); + + return NV_ERR_NOT_SUPPORTED; + } + + NV_ASSERT(!(flags & REGISTER_FILTER_FLAGS_INVALID)); + + if ((flags & REGISTER_FILTER_FLAGS_READ) && !pReadCallback) + { + // If REGISTER_FILTER_FLAGS_READ is specified, then a read + // callback must also be specified. + NV_PRINTF(LEVEL_ERROR, + "REGISTER_FILTER_FLAGS_READ requires a read callback function.\n"); + + return NV_ERR_INVALID_ARGUMENT; + } + + if ((flags & REGISTER_FILTER_FLAGS_WRITE) && !pWriteCallback) + { + // If REGISTER_FILTER_FLAGS_WRITE is specified, then a write + // callback must also be specified. + NV_PRINTF(LEVEL_ERROR, + "REGISTER_FILTER_FLAGS_WRITE requires a write callback function.\n"); + + return NV_ERR_INVALID_ARGUMENT; + } + + // If the regfilter hasn't been used yet, then allocate a lock + if (NULL == pRegFilter->pRegFilterLock) + { + // Allocate spinlock for reg filter access + pRegFilter->pRegFilterLock = portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged()); + NV_ASSERT_OR_RETURN(pRegFilter->pRegFilterLock != NULL, NV_ERR_INSUFFICIENT_RESOURCES); + } + + portSyncSpinlockAcquire(pRegFilter->pRegFilterLock); + + if (NULL != pRegFilter->pRegFilterRecycleList) + { + pNode = pRegFilter->pRegFilterRecycleList; + pRegFilter->pRegFilterRecycleList = pNode->pNext; + } + else + { + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); + pNode = portMemAllocNonPaged(sizeof(REGISTER_FILTER)); + if (NULL == pNode) + { + return NV_ERR_NO_MEMORY; + } + portSyncSpinlockAcquire(pRegFilter->pRegFilterLock); + } + + // Print a warning if there's another register filter already registered. + if (((pTmpNode = _findGpuRegisterFilter(devIndex, devInstance, rangeStart, pRegFilter->pRegFilterList)) != NULL) || + ((pTmpNode = _findGpuRegisterFilter(devIndex, devInstance, rangeEnd, pRegFilter->pRegFilterList)) != NULL)) + { + NV_PRINTF(LEVEL_WARNING, + "WARNING!! Previously registered reg filter found. Handle: %p, dev: " + "%d(%d) Range : 0x%x - 0x%x, WR/RD Callback: %p/%p, flags : %x\n", + pTmpNode, pTmpNode->devIndex, pTmpNode->devInstance, + pTmpNode->rangeStart, pTmpNode->rangeEnd, + pTmpNode->pWriteCallback, pTmpNode->pReadCallback, + pTmpNode->flags); + } + + // Populate structure + pNode->flags = flags; + pNode->devIndex = devIndex; + pNode->devInstance = devInstance; + pNode->rangeStart = rangeStart; + pNode->rangeEnd = rangeEnd; + pNode->pWriteCallback = pWriteCallback; + pNode->pReadCallback = pReadCallback; + pNode->pParam = pParam; + + // Link in + pNode->pNext = pRegFilter->pRegFilterList; + pRegFilter->pRegFilterList = pNode; + + // return pNode + *ppFilter = pNode; + + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); + return NV_OK; +} + +void +regRemoveRegisterFilter +( + RegisterAccess *pRegisterAccess, + REGISTER_FILTER *pFilter +) +{ + REGISTER_FILTER *pNode; + REGISTER_FILTER *pPrev = NULL; + REGISTER_FILTER *pNext = NULL; + DEVICE_REGFILTER_INFO *pRegFilter; + DEVICE_MAPPING *pMapping; + + // Get the device filter + pMapping = gpuGetDeviceMapping(pRegisterAccess->pGpu, pFilter->devIndex, pFilter->devInstance); + NV_ASSERT_OR_RETURN_VOID(pMapping != NULL); + + pRegFilter = &pMapping->devRegFilterInfo; + + portSyncSpinlockAcquire(pRegFilter->pRegFilterLock); + pNode = pRegFilter->pRegFilterList; + while (pNode) + { + // + // we could have used a doubly linked list to do a quick removal, but + // iterating the list to find the match serves as sanity test, so let's + // stick with a singly linked list. + // + if (pNode == pFilter) + { + if (pRegFilter->regFilterRefCnt > 0) + { + // defer removal if another thread is working on the list + pNode->flags |= REGISTER_FILTER_FLAGS_INVALID; + pRegFilter->bRegFilterNeedRemove = NV_TRUE; + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); + return; + } + + // Unlink + pNext = pNode->pNext; + + // place on recycle list + pNode->pNext = pRegFilter->pRegFilterRecycleList; + pRegFilter->pRegFilterRecycleList = pNode; + + if (pPrev) + { + pPrev->pNext = pNext; + } + else + { + pRegFilter->pRegFilterList = pNext; + } + + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); + return; + } + + pPrev = pNode; + pNode = pNode->pNext; + } + NV_ASSERT_FAILED("Attempted to remove a nonexistent filter"); + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); +} + +// called with lock held +static void +_gpuCleanRegisterFilterList +( + DEVICE_REGFILTER_INFO *pRegFilter +) +{ + REGISTER_FILTER *pNode = pRegFilter->pRegFilterList; + REGISTER_FILTER *pPrev = NULL; + REGISTER_FILTER *pNext = NULL; + + while (pNode) + { + if (pNode->flags & REGISTER_FILTER_FLAGS_INVALID) + { + // Unlink + pNext = pNode->pNext; + + // place on recycle list + pNode->pNext = pRegFilter->pRegFilterRecycleList; + pRegFilter->pRegFilterRecycleList = pNode; + + if (pPrev) + { + pPrev->pNext = pNext; + } + else + { + pRegFilter->pRegFilterList = pNext; + } + + pNode = pNext; + continue; + } + + pPrev = pNode; + pNode = pNode->pNext; + } +} + +static NvU32 +_gpuHandleReadRegisterFilter +( + OBJGPU *pGpu, + DEVICE_INDEX devIndex, + NvU32 devInstance, + NvU32 addr, + NvU32 accessSize, + NvU32 *pFlags, + THREAD_STATE_NODE *pThreadState +) +{ + REGISTER_FILTER *pFilter; + NvU32 returnValue = 0; + NvU32 tempVal = 0; + DEVICE_REGFILTER_INFO *pRegFilter; + DEVICE_MAPPING *pMapping; + + // Get the device filter + pMapping = gpuGetDeviceMapping(pGpu, devIndex, devInstance); + NV_ASSERT_OR_RETURN(pMapping != NULL, returnValue); + + pRegFilter = &pMapping->devRegFilterInfo; + + // if there is no filter, do nothing. just bail out. + if (pRegFilter->pRegFilterList == NULL) + { + return returnValue; + } + + if (pThreadState != NULL) + { + // Filters should be only used with GPU lock is held. + if (pThreadState->flags & THREAD_STATE_FLAGS_IS_ISR_LOCKLESS) + { + return returnValue; + } + } +#ifdef DEBUG + else + { + THREAD_STATE_NODE *pCurThread; + + if (NV_OK == threadStateGetCurrentUnchecked(&pCurThread, pGpu)) + { + // Filters should be only used with GPU lock is held. + // Assert because ISRs are expected to pass threadstate down the stack. + // Don't bale out to keep release and debug path behavior identical. + if (pCurThread->flags & THREAD_STATE_FLAGS_IS_ISR_LOCKLESS) + { + NV_ASSERT(0); + } + } + } +#endif + + // + // NOTE: we can't simply grab the lock and release it after + // the search since it is not safe to assume that + // callbacks can be called with spinlock held + // + portSyncSpinlockAcquire(pRegFilter->pRegFilterLock); + pRegFilter->regFilterRefCnt++; + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); + + // + // Note there is potential thread race condition where a filter may be + // being added or removed in one thread (dispatch) while another thread + // is searching the list. This search should have a lock in place. + // + pFilter = pRegFilter->pRegFilterList; + while ((pFilter) && (pFilter = _findGpuRegisterFilter(devIndex, devInstance, addr, pFilter))) + { + if (pFilter->pReadCallback) + { + tempVal = pFilter->pReadCallback(pGpu, pFilter->pParam, addr, + accessSize, *pFlags); + // + // if there are multiple filters, we use the last filter found to + // save returnValue + // + if (pFilter->flags & REGISTER_FILTER_FLAGS_READ) + { + returnValue = tempVal; + } + } + *pFlags |= pFilter->flags; + pFilter = pFilter->pNext; + } + + portSyncSpinlockAcquire(pRegFilter->pRegFilterLock); + pRegFilter->regFilterRefCnt--; + if (pRegFilter->regFilterRefCnt == 0 && pRegFilter->bRegFilterNeedRemove) + { + // no other thread can be touching the list. remove invalid entries + _gpuCleanRegisterFilterList(pRegFilter); + pRegFilter->bRegFilterNeedRemove = NV_FALSE; + } + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); + return returnValue; +} + +static void +_gpuHandleWriteRegisterFilter +( + OBJGPU *pGpu, + DEVICE_INDEX devIndex, + NvU32 devInstance, + NvU32 addr, + NvU32 val, + NvU32 accessSize, + NvU32 *pFlags, + THREAD_STATE_NODE *pThreadState +) +{ + REGISTER_FILTER *pFilter; + DEVICE_REGFILTER_INFO *pRegFilter; + DEVICE_MAPPING *pMapping; + + // Get the device filter + pMapping = gpuGetDeviceMapping(pGpu, devIndex, devInstance); + NV_ASSERT_OR_RETURN_VOID(pMapping != NULL); + + pRegFilter = &pMapping->devRegFilterInfo; + + // if there is no filter, do nothing. just bail out. + if (pRegFilter->pRegFilterList == NULL) + { + return; + } + + if (pThreadState != NULL) + { + // Filters should be only used with GPU lock is held. + if (pThreadState->flags & THREAD_STATE_FLAGS_IS_ISR_LOCKLESS) + { + return; + } + } +#ifdef DEBUG + else + { + THREAD_STATE_NODE *pCurThread; + + if (NV_OK == threadStateGetCurrentUnchecked(&pCurThread, pGpu)) + { + // Filters should be only used with GPU lock is held. + // Assert because ISRs are expected to pass threadstate down the stack. + // Don't bale out to keep release and debug path behavior identical. + if (pCurThread->flags & THREAD_STATE_FLAGS_IS_ISR_LOCKLESS) + { + NV_ASSERT(0); + } + } + } +#endif + + // + // NOTE: we can't simply grab the lock and release it after + // the search since it is not safe to assume that + // callbacks can be called with spinlock held + // + portSyncSpinlockAcquire(pRegFilter->pRegFilterLock); + pRegFilter->regFilterRefCnt++; + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); + + // + // Note there is potential thread race condition where a filter may be + // being added or removed in one thread (dispatch) while another thread + // is searching the list. This search should have a lock in place. + // + pFilter = pRegFilter->pRegFilterList; + while ((pFilter) && (pFilter = _findGpuRegisterFilter(devIndex, devInstance, addr, pFilter))) + { + if (pFilter->pWriteCallback) + { + pFilter->pWriteCallback(pGpu, pFilter->pParam, addr, val, + accessSize, *pFlags); + } + *pFlags |= pFilter->flags; + pFilter = pFilter->pNext; + } + + portSyncSpinlockAcquire(pRegFilter->pRegFilterLock); + pRegFilter->regFilterRefCnt--; + if (pRegFilter->regFilterRefCnt == 0 && pRegFilter->bRegFilterNeedRemove) + { + // no other thread can be touching the list. remove invalid entries + _gpuCleanRegisterFilterList(pRegFilter); + pRegFilter->bRegFilterNeedRemove = NV_FALSE; + } + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); +} + +static REGISTER_FILTER * +_findGpuRegisterFilter +( + DEVICE_INDEX devIndex, + NvU32 devInstance, + NvU32 addr, + REGISTER_FILTER *pFilter +) +{ + while (pFilter != NULL) + { + if (!(pFilter->flags & REGISTER_FILTER_FLAGS_INVALID) && + (devIndex == pFilter->devIndex) && + (devInstance == pFilter->devInstance) && + (addr >= pFilter->rangeStart) && (addr <= pFilter->rangeEnd)) + { + break; + } + + pFilter = pFilter->pNext; + } + + return pFilter; +} + +static NvBool +_gpuEnablePciMemSpaceAndCheckPmcBoot0Match +( + OBJGPU *pGpu +) +{ + NvU16 VendorId; + NvU16 DeviceId; + NvU8 bus = gpuGetBus(pGpu); + NvU8 device = gpuGetDevice(pGpu); + NvU32 domain = gpuGetDomain(pGpu); + void *Handle = osPciInitHandle(domain, bus, device, 0, &VendorId, &DeviceId); + NvU32 Enabled = osPciReadDword(Handle, NV_CONFIG_PCI_NV_1); + NvU32 pmcBoot0; + + // If Memory Spaced is not enabled, enable it + if (DRF_VAL(_CONFIG, _PCI_NV_1, _MEMORY_SPACE, Enabled) != NV_CONFIG_PCI_NV_1_MEMORY_SPACE_ENABLED) + { + osPciWriteDword(Handle, NV_CONFIG_PCI_NV_1, + Enabled | + (DRF_DEF(_CONFIG, _PCI_NV_1, _MEMORY_SPACE, _ENABLED) | + DRF_DEF(_CONFIG, _PCI_NV_1, _BUS_MASTER, _ENABLED))); + } + + // Check PMC_ENABLE to make sure that it matches + pmcBoot0 = GPU_REG_RD32(pGpu, NV_PMC_BOOT_0); + if (pmcBoot0 == pGpu->chipId0) + { + return NV_TRUE; + } + + return NV_FALSE; +} + +static NvU32 +_regCheckReadFailure +( + OBJGPU *pGpu, + NvU32 value +) +{ + NvU32 flagsFailed; + NvU32 reason = BAD_READ_UNKNOWN; + + if ((!pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_CODEPATH)) && + (!pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST))) + { + gpuSanityCheck(pGpu, GPU_SANITY_CHECK_FLAGS_ALL, &flagsFailed); + + // This is where we need to determine why we might be seeing this failure + if (value == GPU_REG_VALUE_INVALID) + { + // Does PCI Space Match + if (flagsFailed & GPU_SANITY_CHECK_FLAGS_PCI_SPACE_MATCH) + { + reason = BAD_READ_GPU_OFF_BUS; + goto exit; + } + + // Is Memory Spaced Enabled + if (flagsFailed & GPU_SANITY_CHECK_FLAGS_PCI_MEM_SPACE_ENABLED) + { + reason = BAD_READ_PCI_DEVICE_DISABLED; + + if (!_gpuEnablePciMemSpaceAndCheckPmcBoot0Match(pGpu)) + { + // We have been reset! + reason = BAD_READ_GPU_RESET; + goto exit; + } + } + } + + // Are we off by N + if (flagsFailed & GPU_SANITY_CHECK_FLAGS_OFF_BY_N) + { + reason = BAD_READ_DWORD_SHIFT; + } + } + else + { + reason = BAD_READ_LOW_POWER; + } + +exit: + return reason; +} + +void +regCheckAndLogReadFailure +( + RegisterAccess *pRegisterAccess, + NvU32 addr, + NvU32 mask, + NvU32 value +) +{ + OBJGPU *pGpu = pRegisterAccess->pGpu; + const NvU32 failureReason = _regCheckReadFailure(pGpu, value); + + PORT_UNREFERENCED_VARIABLE(failureReason); +} + +NvU32 +regCheckRead032 +( + RegisterAccess *pRegisterAccess, + NvU32 addr, + NvU32 mask, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 returnValue; + OBJGPU *pGpu = pRegisterAccess->pGpu; + + returnValue = GPU_REG_RD32_EX(pGpu, addr, pThreadState); + if (returnValue & mask) + { + if (!API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + regCheckAndLogReadFailure(pRegisterAccess, addr, mask, returnValue); + returnValue = 0; + } + + return returnValue; +} + +#if GPU_REGISTER_ACCESS_DUMP + +NvU8 +gpuRegRd08_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr) +{ + NvU8 val = REG_INST_RD08(pGpu, GPU, 0, addr); + // filter out duplicate read + static NvU32 prev_addr = 0; + static NvU8 prev_val = 0; + if (addr != prev_addr || val != prev_val) + { + // filter out bar0 windows registers (NV_PRAMIN -- range 0x007FFFFF:0x00700000 ) + if ((addr & 0xFFF00000) != 0x00700000) + { + NV_PRINTF(LEVEL_NOTICE, + "READ func: %s, reg name: %s, addr: %08x, val: %02x\n", + func, addrStr, addr, val); + } + prev_addr = addr; + prev_val = val; + } + return val; +} + +NvU16 +gpuRegRd16_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr) +{ + NvU16 val = REG_INST_RD16(pGpu, GPU, 0, addr); + // filter out duplicate read + static NvU32 prev_addr = 0; + static NvU16 prev_val = 0; + if (addr != prev_addr || val != prev_val) + { + // filter out bar0 windows registers (NV_PRAMIN -- range 0x007FFFFF:0x00700000 ) + if ((addr & 0xFFF00000) != 0x00700000) + { + NV_PRINTF(LEVEL_NOTICE, + "READ func: %s, reg name: %s, addr: %08x, val: %04x\n", + func, addrStr, addr, val); + } + prev_addr = addr; + prev_val = val; + } + return val; +} + +NvU32 +gpuRegRd32_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr) +{ + NvU32 val = REG_INST_RD32(pGpu, GPU, 0, addr); + // filter out duplicate read + static NvU32 prev_addr = 0; + static NvU32 prev_val = 0; + if (addr != prev_addr || val != prev_val) + { + // filter out bar0 windows registers (NV_PRAMIN -- range 0x007FFFFF:0x00700000 ) + if ((addr & 0xFFF00000) != 0x00700000) + { + NV_PRINTF(LEVEL_NOTICE, + "READ %s func: %s, reg name: %s, addr: %08x, val: %08x\n", + vreg, func, addrStr, addr, val); + } + prev_addr = addr; + prev_val = val; + } + return val; +} + +void +gpuRegWr08_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr, NvV8 val) +{ + // filter out bar0 windows registers (NV_PRAMIN -- range 0x007FFFFF:0x00700000 ) + if ((addr & 0xFFF00000) != 0x00700000) + { + NV_PRINTF(LEVEL_NOTICE, + "WRITE func: %s, reg name: %s, addr: %08x, val: %02x\n", + func, addrStr, addr, val); + } + REG_INST_WR08(pGpu, GPU, 0, addr, val); +} + +void +gpuRegWr16_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr, NvV16 val) +{ + // filter out bar0 windows registers (NV_PRAMIN -- range 0x007FFFFF:0x00700000 ) + if ((addr & 0xFFF00000) != 0x00700000) + { + NV_PRINTF(LEVEL_NOTICE, + "WRITE func: %s, reg name: %s, addr: %08x, val: %04x\n", + func, addrStr, addr, val); + } + REG_INST_WR16(pGpu, GPU, 0, addr, val); +} + +void +gpuRegWr32_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr, NvV32 val) +{ + // filter out bar0 windows registers (NV_PRAMIN -- range 0x007FFFFF:0x00700000 ) + if ((addr & 0xFFF00000) != 0x00700000) + { + NV_PRINTF(LEVEL_NOTICE, + "WRITE %s func: %s, reg name: %s, addr: %08x, val: %08x\n", + vreg, func, addrStr, addr, val); + } + REG_INST_WR32(pGpu, GPU, 0, addr, val); +} + +void +gpuRegWr32Uc_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr, NvV32 val) +{ + // filter out bar0 windows registers (NV_PRAMIN -- range 0x007FFFFF:0x00700000 ) + if ((addr & 0xFFF00000) != 0x00700000) + { + NV_PRINTF(LEVEL_NOTICE, + "WRITE func: %s, reg name: %s, addr: %08x, val: %08x\n", + func, addrStr, addr, val); + } + REG_INST_WR32_UC(pGpu, GPU, 0, addr, val); +} + +#endif // GPU_REGISTER_ACCESS_DUMP + +/*! + * @brief Do any sanity checks for the GPU's state before actually reading/writing to the chip. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] addr Address of the register to be sanity checked + * @param[out] pRetVal Default return value for read accesses incase of sanity check failure. Only for U032 hals. + * + * @returns NV_ERR_GPU_IN_FULLCHIP_RESET if GPU is in reset + * NV_ERR_GPU_IS_LOST if GPU is inaccessible + * NV_ERR_GPU_NOT_FULL_POWER if GPU is not at full power AND + * GPU is not in resume codepath + * sim low power reg access is disabled + * NV_OK Otherwise + */ +NV_STATUS +gpuSanityCheckRegisterAccess_IMPL +( + OBJGPU *pGpu, + NvU32 addr, + NvU32 *pRetVal +) +{ + NV_STATUS status = NV_OK; + NvU32 retVal = ~0; + + + if (API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + { + status = NV_ERR_GPU_IN_FULLCHIP_RESET; + goto done; + } + + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + status = NV_ERR_GPU_IS_LOST; + goto done; + } + + if ((status = gpuSanityCheckVirtRegAccess_HAL(pGpu, addr)) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Invalid register access on VF, addr: 0x%x\n", addr); + osAssertFailed(); + + // Return 0 to match with HW behavior + retVal = 0; + goto done; + } + + // + // Make sure the GPU is in full power or resuming. When the OS has put the + // GPU in suspend (i.e. any of the D3 variants) there's no guarantee the GPU is + // accessible over PCI-E: the GPU may be completely powered off, the + // upstream bridges may not be properly configured, etc. Attempts to access + // the GPU may then result in PCI-E errors and/or bugchecks. For examples, + // see Bugs 440565 and 479003. + // On Mshybrid, the OS will make sure we are up and alive before calling + // into the driver. So we can skip this check on MsHybrid. + // + // DO NOT IGNORE OR REMOVE THIS ASSERT. It is a warning that improperly + // written RM code further up the stack is trying to access a GPU which is + // in suspend (i.e. low power). Any entry points into the RM (especially + // those between GPUs or for asynchronous callbacks) should always check + // that the GPU is in full power via gpuIsGpuFullPower(), bailing out in the + // appropriate manner when it returns NV_FALSE. + // + // If you are not an RM engineer and are encountering this assert, please + // file a bug against the RM. + // + if ((gpuIsGpuFullPower(pGpu) == NV_FALSE) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH)) + { + DBG_BREAKPOINT(); + status = NV_ERR_GPU_NOT_FULL_POWER; + goto done; + } + + // TODO: More complete sanity checking + +done: + // Assign the return value + if ((status != NV_OK) && (pRetVal != NULL)) + { + *pRetVal = retVal; + } + return status; +} + +/** + * @brief checks if the register offset is valid + * + * @param[in] pGpu + * @param[in] offset + * @param[in] bSkipPermissionValidation + * + * @returns NV_OK if valid + * @returns NV_ERR_INVALID_ARGUMENT if offset is too large for bar + * @returns NV_ERR_INSUFFICIENT_PERMISSIONS if user is not authorized to access register + */ +NV_STATUS +gpuValidateRegOffset_IMPL +( + OBJGPU *pGpu, + NvU32 offset, + NvBool bSkipPermissionValidation +) +{ + NvU64 maxBar0Size = pGpu->deviceMappings[0].gpuNvLength; + + // The register offset should be 4 bytes smaller than the max bar size + if (offset > (maxBar0Size - 4)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Regop calls are typically subject to the allowlist, however certain regop calls might originate from RM on behalf of user-space clients. + // In these cases, we can skip permission validation so that these ctrl calls can be made by any UMD without adding these registers to the allowlist. + // To bypass this check, set bSkipPermissionValidation to true in gpuExecRegOps(). + if (!bSkipPermissionValidation && !osIsAdministrator() && + !gpuGetUserRegisterAccessPermissions(pGpu, offset)) + { + NV_PRINTF(LEVEL_ERROR, + "User does not have permission to access register offset 0x%x\n", + offset); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + return NV_OK; +} + +/*! + * @brief Verify existence function. + * + * @param[in] pGpu + * + * @returns NV_OK if GPU is still accessible + * NV_ERR_INVALID_STATE if GPU is inaccessible + */ +NV_STATUS +gpuVerifyExistence_IMPL +( + OBJGPU *pGpu +) +{ + NvU32 regVal = GPU_REG_RD32(pGpu, NV_PMC_BOOT_0); + + if (regVal != pGpu->chipId0) + { + osHandleGpuLost(pGpu); + regVal = GPU_REG_RD32(pGpu, NV_PMC_BOOT_0); + if (regVal != pGpu->chipId0) + { + return NV_ERR_GPU_IS_LOST; + } + } + + return NV_OK; +} + +/*! + * @brief Perform a sanity check on a register read value + * Starts with gpu-independent check, then calls into HAL for specific cases + * + * @param[in] pGpu GPU object pointer + * @param[in] addr Value address + * @param[in] size Access size + * @param[in/out] pValue Value to sanity check + */ +NV_STATUS +gpuSanityCheckRegRead_IMPL +( + OBJGPU *pGpu, + NvU32 addr, + NvU32 size, + void *pValue +) +{ + NvU8 *pValue8; + NvU16 *pValue16; + NvU32 *pValue32; + NvU32 value; + + switch (size) + { + case 8: + { + pValue8 = ((NvU8 *) pValue); + if (*pValue8 == (NvU8) (~0)) + { + // + // The result looks suspicious, let's check if the GPU is still attached. + // + NvU32 testValue = osGpuReadReg032(pGpu, NV_PMC_BOOT_0); + if (testValue == GPU_REG_VALUE_INVALID) + { + osHandleGpuLost(pGpu); + *pValue8 = osGpuReadReg008(pGpu, addr); + } + } + break; + } + case 16: + { + pValue16 = ((NvU16 *) pValue); + if (*pValue16 == (NvU16) (~0)) + { + // + // The result looks suspicious, let's check if the GPU is still attached. + // + NvU32 testValue = osGpuReadReg032(pGpu, NV_PMC_BOOT_0); + if (testValue == GPU_REG_VALUE_INVALID) + { + osHandleGpuLost(pGpu); + *pValue16 = osGpuReadReg016(pGpu, addr); + } + } + break; + } + case 32: + { + pValue32 = ((NvU32 *) pValue); + if (*pValue32 == (NvU32) (~0)) + { + // + // The result looks suspicious, let's check if the GPU is still attached. + // + NvU32 testValue = osGpuReadReg032(pGpu, NV_PMC_BOOT_0); + if (testValue == GPU_REG_VALUE_INVALID) + { + osHandleGpuLost(pGpu); + *pValue32 = osGpuReadReg032(pGpu, addr); + } + } + + value = *((NvU32 *)pValue); + + // + // HW will return 0xbad in the upper 3 nibbles + // when there is a possible issue. + // + if ((value & GPU_READ_PRI_ERROR_MASK) == GPU_READ_PRI_ERROR_CODE) + { + gpuHandleSanityCheckRegReadError_HAL(pGpu, addr, value); + } + break; + } + default: + { + NV_ASSERT_FAILED("Invalid access size"); + break; + } + } + + return NV_OK; +} + + +NV_STATUS swbcaprtConstruct_IMPL +( + SwBcAperture *pAperture, + IoAperture *pApertures, + NvU32 numApertures +) +{ + NV_ASSERT_OR_RETURN(numApertures != 0, NV_ERR_INVALID_ARGUMENT); + + pAperture->pApertures = pApertures; + pAperture->numApertures = numApertures; + + return NV_OK; +} + +NvU8 +swbcaprtReadReg08_IMPL +( + SwBcAperture *pAperture, + NvU32 addr +) +{ + NvU8 val = REG_RD08(&pAperture->pApertures[0], addr); + +#if defined(DEBUG) + NvU32 i; + for (i = 1; i < pAperture->numApertures; i++) + NV_ASSERT(REG_RD08(&pAperture->pApertures[i], addr) == val); +#endif // defined(DEBUG) + + return val; +} + +NvU16 +swbcaprtReadReg16_IMPL +( + SwBcAperture *pAperture, + NvU32 addr +) +{ + NvU16 val = REG_RD16(&pAperture->pApertures[0], addr); + +#if defined(DEBUG) + NvU32 i; + for (i = 1; i < pAperture->numApertures; i++) + NV_ASSERT(REG_RD16(&pAperture->pApertures[i], addr) == val); +#endif // defined(DEBUG) + + return val; +} + +NvU32 +swbcaprtReadReg32_IMPL +( + SwBcAperture *pAperture, + NvU32 addr +) +{ + NvU32 val = REG_RD32(&pAperture->pApertures[0], addr); + +#if defined(DEBUG) + NvU32 i; + for (i = 1; i < pAperture->numApertures; i++) + NV_ASSERT(REG_RD32(&pAperture->pApertures[i], addr) == val); +#endif // defined(DEBUG) + + return val; +} + +void +swbcaprtWriteReg08_IMPL +( + SwBcAperture *pAperture, + NvU32 addr, + NvV8 value +) +{ + NvU32 i; + + for (i = 0; i < pAperture->numApertures; i++) + REG_WR08(&pAperture->pApertures[i], addr, value); +} + +void +swbcaprtWriteReg16_IMPL +( + SwBcAperture *pAperture, + NvU32 addr, + NvV16 value +) +{ + NvU32 i; + + for (i = 0; i < pAperture->numApertures; i++) + REG_WR16(&pAperture->pApertures[i], addr, value); +} + +void +swbcaprtWriteReg32_IMPL +( + SwBcAperture *pAperture, + NvU32 addr, + NvV32 value +) +{ + NvU32 i; + + for (i = 0; i < pAperture->numApertures; i++) + REG_WR32(&pAperture->pApertures[i], addr, value); +} + +void +swbcaprtWriteReg32Uc_IMPL +( + SwBcAperture *pAperture, + NvU32 addr, + NvV32 value +) +{ + NvU32 i; + + for (i = 0; i < pAperture->numApertures; i++) + REG_WR32_UC(&pAperture->pApertures[i], addr, value); +} + +NvBool +swbcaprtIsRegValid_IMPL +( + SwBcAperture *pAperture, + NvU32 addr +) +{ + + NvU32 i; + + for (i = 0; i < pAperture->numApertures; i++) + { + if (!REG_VALID(&pAperture->pApertures[i], addr)) + return NV_FALSE; + } + + return NV_TRUE; +} diff --git a/src/nvidia/src/kernel/gpu/gpu_arch.c b/src/nvidia/src/kernel/gpu/gpu_arch.c new file mode 100644 index 0000000..1f7b964 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_arch.c @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/gpu_arch.h" + +NV_STATUS gpuarchConstruct_IMPL +( + GpuArch *pGpuArch, + NvU32 chipArch, + NvU32 chipImpl, + NvU32 hidrev, + TEGRA_CHIP_TYPE tegraType +) +{ + pGpuArch->chipArch = chipArch; + pGpuArch->chipImpl = chipImpl; + pGpuArch->hidrev = hidrev; + pGpuArch->tegraType = tegraType; + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/gpu_device_mapping.c b/src/nvidia/src/kernel/gpu/gpu_device_mapping.c new file mode 100644 index 0000000..7adaa44 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_device_mapping.c @@ -0,0 +1,329 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/gpu.h" +#include "gpu/gpu_device_mapping.h" +#include "core/thread_state.h" +#include "nv_ref.h" + +/** + * @brief Finds the device mapping matching the specified address and device index + * + * @param[in] pGpu + * @param[in] deviceIndex device specific device enum (DEVICE_INDEX_*) + * @param[in] addr device register address + * + * @returns matching mapping, or NULL if not found. + */ +static DEVICE_MAPPING * +_gpuFindDeviceMapping +( + OBJGPU *pGpu, + DEVICE_INDEX deviceIndex, + NvU32 instance +) +{ + NvU32 i; + NvU32 devId = 0; + DEVICE_ID_MAPPING *deviceIdMapping; + NvU32 numDeviceIDs; + + numDeviceIDs = gpuGetDeviceIDList_HAL(pGpu, &deviceIdMapping); + + // Find the devID that matches the requested device index + for (i = 0; i < numDeviceIDs; i++) + { + if (deviceIdMapping[i].deviceIndex == deviceIndex) + { + devId = deviceIdMapping[i].devId; + break; + } + } + + if (devId == 0) + { + // For discrete GPU, just return BAR0 mapping + if (deviceIndex == DEVICE_INDEX_GPU) + { + return &pGpu->deviceMappings[0]; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Could not find mapping for deviceIndex=%d\n", + deviceIndex); + return NULL; + } + } + return gpuGetDeviceMappingFromDeviceID(pGpu, devId, instance); +} + +DEVICE_MAPPING * +gpuGetDeviceMapping_IMPL +( + OBJGPU *pGpu, + DEVICE_INDEX deviceIndex, + NvU32 instance +) +{ + // Fast lookup path for first instance of a device + if ((deviceIndex < DEVICE_INDEX_MAX) && (instance == 0)) + { + if (!pGpu->pDeviceMappingsByDeviceInstance[deviceIndex]) + { + pGpu->pDeviceMappingsByDeviceInstance[deviceIndex] = _gpuFindDeviceMapping(pGpu, deviceIndex, instance); + } + return pGpu->pDeviceMappingsByDeviceInstance[deviceIndex]; + } + + return _gpuFindDeviceMapping(pGpu, deviceIndex, instance); +} + +/** + * @brief Returns the device mapping matching the specified device ID from + * project relocation table + * + * @param[in] pGpu OBJGPU pointer + * @param[in] deviceId device ID from project relocation table + * @param[in] instance instance of the particular device ID + * + * @returns matching mapping, or NULL if not found. + */ + +DEVICE_MAPPING * +gpuGetDeviceMappingFromDeviceID_IMPL +( + OBJGPU *pGpu, + NvU32 deviceId, + NvU32 instance +) +{ + NvU32 i; + + // + // For SOC, walk the list of devices to find the device/instance requested. + // For GPU (legacy), only NV_DEVID_GPU(0) is expected & allowed + // + if (pGpu->bIsSOC) + { + for (i = 0; i < pGpu->gpuDeviceMapCount; i++) + { + if (pGpu->deviceMappings[i].gpuDeviceEnum == deviceId) + { + // Find the Nth instance of the requested device + if (instance) + instance--; + else + return &pGpu->deviceMappings[i]; + } + } + + NV_PRINTF(LEVEL_ERROR, "Could not find mapping for deviceId=%d\n", + deviceId); + } + else + { + // For GPU, always assume NV_DEVID_GPU instance 0. + NV_ASSERT(instance == 0); + NV_ASSERT(pGpu->gpuDeviceMapCount == 1); + + return &pGpu->deviceMappings[0]; + } + + return NULL; +} + +static NvBool _gpuCheckIsBar0OffByN(OBJGPU *pGpu) +{ + NvU32 i, pmcBoot0; + + // Check to see if we can find PMC_BOOT_0 + for (i = 0; i < 20; i++) + { + pmcBoot0 = GPU_REG_RD32(pGpu, NV_PMC_BOOT_0 + (i * 4)); + if (pmcBoot0 == pGpu->chipId0) + { + break; + } + } + + if ((i != 0) && (i != 20)) + { + // We are off by N + return NV_TRUE; + } + + // Everything looks ok + return NV_FALSE; +} + +static NvBool _gpuCheckDoesPciSpaceMatch(OBJGPU *pGpu) +{ + NvU16 VendorId; + NvU16 DeviceId; + NvU8 bus = gpuGetBus(pGpu); + NvU8 device = gpuGetDevice(pGpu); + NvU32 domain = gpuGetDomain(pGpu); + + osPciInitHandle(domain, bus, device, 0, &VendorId, &DeviceId); + if ((DeviceId == 0xFFFF) || + (VendorId != 0x10DE)) + { + return NV_FALSE; + } + + return NV_TRUE; +} + +static NvBool _gpuCheckIsPciMemSpaceEnabled(OBJGPU *pGpu) +{ + NvU16 VendorId; + NvU16 DeviceId; + NvU8 bus = gpuGetBus(pGpu); + NvU8 device = gpuGetDevice(pGpu); + NvU32 domain = gpuGetDomain(pGpu); + void *Handle = osPciInitHandle(domain, bus, device, 0, &VendorId, &DeviceId); + NvU32 Enabled = osPciReadDword(Handle, NV_CONFIG_PCI_NV_1); + + // Is Memory Spaced Enabled + if (DRF_VAL(_CONFIG, _PCI_NV_1, _MEMORY_SPACE, Enabled) != NV_CONFIG_PCI_NV_1_MEMORY_SPACE_ENABLED) + { + return NV_FALSE; + } + + return NV_TRUE; +} + +NV_STATUS gpuSanityCheck_IMPL +( + OBJGPU *pGpu, + NvU32 flags, + NvU32 *pFlagsFailed +) +{ + NV_STATUS rmStatus = NV_OK; + NvU32 flagsFailed = GPU_SANITY_CHECK_FLAGS_NONE; + THREAD_STATE_NODE *pThreadNode = NULL; + + if (pFlagsFailed != NULL) + { + *pFlagsFailed = GPU_SANITY_CHECK_FLAGS_NONE; + } + + if (pGpu->bIsSOC) + { + flags &= ~( + GPU_SANITY_CHECK_FLAGS_BOOT_0 | + GPU_SANITY_CHECK_FLAGS_OFF_BY_N | + GPU_SANITY_CHECK_FLAGS_PCI_SPACE_MATCH | + GPU_SANITY_CHECK_FLAGS_PCI_MEM_SPACE_ENABLED | + GPU_SANITY_CHECK_FLAGS_FB); + + } + + // + // Check to make sure the lock is held for this thread as the underlying + // functions can touch state and lists that expect exclusive access. + // + rmStatus = threadStateGetCurrent(&pThreadNode, pGpu); + if (rmStatus != NV_OK) + { + return rmStatus; + } + if (pThreadNode->flags & THREAD_STATE_FLAGS_IS_ISR_LOCKLESS) + { + return NV_ERR_NOT_SUPPORTED; + } + + // Check to make sure we are powered on first + if (gpuIsGpuFullPower(pGpu) == NV_FALSE) + { + NV_ASSERT(0); + return NV_ERR_GPU_NOT_FULL_POWER; + } + + if (flags & GPU_SANITY_CHECK_FLAGS_BOOT_0) + { + // + // When GPU is in reset reg reads will return 0xFFFFFFFF. + // Without this check RM would keep hitting assert during TDR recovery. + // + if (!API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + { + NvU32 pmcBoot0 = GPU_REG_RD32(pGpu, NV_PMC_BOOT_0); + if (pmcBoot0 != pGpu->chipId0) + { + flagsFailed |= GPU_SANITY_CHECK_FLAGS_BOOT_0; + NV_ASSERT(0); + } + } + } + + if (flags & GPU_SANITY_CHECK_FLAGS_OFF_BY_N) + { + if (_gpuCheckIsBar0OffByN(pGpu)) + { + flagsFailed |= GPU_SANITY_CHECK_FLAGS_OFF_BY_N; + NV_ASSERT(0); + } + } + + if (flags & GPU_SANITY_CHECK_FLAGS_PCI_SPACE_MATCH) + { + if (!_gpuCheckDoesPciSpaceMatch(pGpu)) + { + flagsFailed |= GPU_SANITY_CHECK_FLAGS_PCI_SPACE_MATCH; + NV_ASSERT(0); + } + } + + if (flags & GPU_SANITY_CHECK_FLAGS_PCI_MEM_SPACE_ENABLED) + { + if (!_gpuCheckIsPciMemSpaceEnabled(pGpu)) + { + flagsFailed |= GPU_SANITY_CHECK_FLAGS_PCI_MEM_SPACE_ENABLED; + NV_ASSERT(0); + } + } + + if (flags & GPU_SANITY_CHECK_FLAGS_FB) + { + if (!gpuIsGpuFullPower(pGpu)) + { + NV_ASSERT(0); + } + } + + if (flagsFailed != GPU_SANITY_CHECK_FLAGS_NONE) + { + rmStatus = NV_ERR_GENERIC; + NV_PRINTF(LEVEL_ERROR, "Failed test flags: 0x%x\n", flagsFailed); + } + + if (pFlagsFailed != NULL) + { + *pFlagsFailed = flagsFailed; + } + + return rmStatus; +} diff --git a/src/nvidia/src/kernel/gpu/gpu_engine_type.c b/src/nvidia/src/kernel/gpu/gpu_engine_type.c new file mode 100644 index 0000000..8c61689 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_engine_type.c @@ -0,0 +1,370 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief ENGINE_TYPE controls + */ + +#include "kernel/gpu/gpu.h" +#include "kernel/gpu/nvbitmask.h" + +ct_assert(RM_ENGINE_TYPE_LAST == NV2080_ENGINE_TYPE_LAST); + +/*! + * @brief Convert NV2080 engine type to Rm internal engine type + * + * Rm internally uses RM engine type instead of NV2080 engine types. + * Some clients, like VGPU and CUDA, need have the cross-branch compatibility, we need to keep + * NV2080_ENGINE_TYPE consistent. When we add new ENGINE TYPEs, especially to increase the engine + * number on an existing ENGINE groups, we can not insert number in the middle. It will change the number + * of the rest of NV2080_ENGINE_TYPEs. But RM need to group the same type of ENGINE_TYPE together. + * So the solution is the separate RM and NV2080 engine types. When ENGINE_TYPE cross RM boundary, + * through control calls or RPC calls, we will need to convert the engine types. + * + * @param[in] index NV2080_ENGINE_TYPE number + * + * @returns RM_ENGINE_TYPE number + * RM_ENGINE_TYPE_LAST if the index is invalid + */ +RM_ENGINE_TYPE gpuGetRmEngineType_IMPL(NvU32 index) +{ + // + // The passed in index generally comes from outside RM itself w/o any checking. To + // avoid log spam, we no longer assert on the value per the new policy on parameter + // validation. + // + NV_CHECK_OR_RETURN(LEVEL_INFO, index < NV2080_ENGINE_TYPE_LAST, RM_ENGINE_TYPE_LAST); + + switch (index) + { + case NV2080_ENGINE_TYPE_NULL: return RM_ENGINE_TYPE_NULL; + case NV2080_ENGINE_TYPE_GR0: return RM_ENGINE_TYPE_GR0; + case NV2080_ENGINE_TYPE_GR1: return RM_ENGINE_TYPE_GR1; + case NV2080_ENGINE_TYPE_GR2: return RM_ENGINE_TYPE_GR2; + case NV2080_ENGINE_TYPE_GR3: return RM_ENGINE_TYPE_GR3; + case NV2080_ENGINE_TYPE_GR4: return RM_ENGINE_TYPE_GR4; + case NV2080_ENGINE_TYPE_GR5: return RM_ENGINE_TYPE_GR5; + case NV2080_ENGINE_TYPE_GR6: return RM_ENGINE_TYPE_GR6; + case NV2080_ENGINE_TYPE_GR7: return RM_ENGINE_TYPE_GR7; + case NV2080_ENGINE_TYPE_COPY0: return RM_ENGINE_TYPE_COPY0; + case NV2080_ENGINE_TYPE_COPY1: return RM_ENGINE_TYPE_COPY1; + case NV2080_ENGINE_TYPE_COPY2: return RM_ENGINE_TYPE_COPY2; + case NV2080_ENGINE_TYPE_COPY3: return RM_ENGINE_TYPE_COPY3; + case NV2080_ENGINE_TYPE_COPY4: return RM_ENGINE_TYPE_COPY4; + case NV2080_ENGINE_TYPE_COPY5: return RM_ENGINE_TYPE_COPY5; + case NV2080_ENGINE_TYPE_COPY6: return RM_ENGINE_TYPE_COPY6; + case NV2080_ENGINE_TYPE_COPY7: return RM_ENGINE_TYPE_COPY7; + case NV2080_ENGINE_TYPE_COPY8: return RM_ENGINE_TYPE_COPY8; + case NV2080_ENGINE_TYPE_COPY9: return RM_ENGINE_TYPE_COPY9; + case NV2080_ENGINE_TYPE_NVDEC0: return RM_ENGINE_TYPE_NVDEC0; + case NV2080_ENGINE_TYPE_NVDEC1: return RM_ENGINE_TYPE_NVDEC1; + case NV2080_ENGINE_TYPE_NVDEC2: return RM_ENGINE_TYPE_NVDEC2; + case NV2080_ENGINE_TYPE_NVDEC3: return RM_ENGINE_TYPE_NVDEC3; + case NV2080_ENGINE_TYPE_NVDEC4: return RM_ENGINE_TYPE_NVDEC4; + case NV2080_ENGINE_TYPE_NVDEC5: return RM_ENGINE_TYPE_NVDEC5; + case NV2080_ENGINE_TYPE_NVDEC6: return RM_ENGINE_TYPE_NVDEC6; + case NV2080_ENGINE_TYPE_NVDEC7: return RM_ENGINE_TYPE_NVDEC7; + case NV2080_ENGINE_TYPE_NVENC0: return RM_ENGINE_TYPE_NVENC0; + case NV2080_ENGINE_TYPE_NVENC1: return RM_ENGINE_TYPE_NVENC1; + case NV2080_ENGINE_TYPE_NVENC2: return RM_ENGINE_TYPE_NVENC2; + case NV2080_ENGINE_TYPE_NVENC3: return RM_ENGINE_TYPE_NVENC3; + case NV2080_ENGINE_TYPE_VP: return RM_ENGINE_TYPE_VP; + case NV2080_ENGINE_TYPE_ME: return RM_ENGINE_TYPE_ME; + case NV2080_ENGINE_TYPE_PPP: return RM_ENGINE_TYPE_PPP; + case NV2080_ENGINE_TYPE_MPEG: return RM_ENGINE_TYPE_MPEG; + case NV2080_ENGINE_TYPE_SW: return RM_ENGINE_TYPE_SW; + case NV2080_ENGINE_TYPE_TSEC: return RM_ENGINE_TYPE_TSEC; + case NV2080_ENGINE_TYPE_VIC: return RM_ENGINE_TYPE_VIC; + case NV2080_ENGINE_TYPE_MP: return RM_ENGINE_TYPE_MP; + case NV2080_ENGINE_TYPE_SEC2: return RM_ENGINE_TYPE_SEC2; + case NV2080_ENGINE_TYPE_HOST: return RM_ENGINE_TYPE_HOST; + case NV2080_ENGINE_TYPE_DPU: return RM_ENGINE_TYPE_DPU; + case NV2080_ENGINE_TYPE_PMU: return RM_ENGINE_TYPE_PMU; + case NV2080_ENGINE_TYPE_FBFLCN: return RM_ENGINE_TYPE_FBFLCN; + case NV2080_ENGINE_TYPE_NVJPEG0: return RM_ENGINE_TYPE_NVJPEG0; + case NV2080_ENGINE_TYPE_NVJPEG1: return RM_ENGINE_TYPE_NVJPEG1; + case NV2080_ENGINE_TYPE_NVJPEG2: return RM_ENGINE_TYPE_NVJPEG2; + case NV2080_ENGINE_TYPE_NVJPEG3: return RM_ENGINE_TYPE_NVJPEG3; + case NV2080_ENGINE_TYPE_NVJPEG4: return RM_ENGINE_TYPE_NVJPEG4; + case NV2080_ENGINE_TYPE_NVJPEG5: return RM_ENGINE_TYPE_NVJPEG5; + case NV2080_ENGINE_TYPE_NVJPEG6: return RM_ENGINE_TYPE_NVJPEG6; + case NV2080_ENGINE_TYPE_NVJPEG7: return RM_ENGINE_TYPE_NVJPEG7; + case NV2080_ENGINE_TYPE_OFA0: return RM_ENGINE_TYPE_OFA0; + case NV2080_ENGINE_TYPE_OFA1: return RM_ENGINE_TYPE_OFA1; + case NV2080_ENGINE_TYPE_COPY10: return RM_ENGINE_TYPE_COPY10; + case NV2080_ENGINE_TYPE_COPY11: return RM_ENGINE_TYPE_COPY11; + case NV2080_ENGINE_TYPE_COPY12: return RM_ENGINE_TYPE_COPY12; + case NV2080_ENGINE_TYPE_COPY13: return RM_ENGINE_TYPE_COPY13; + case NV2080_ENGINE_TYPE_COPY14: return RM_ENGINE_TYPE_COPY14; + case NV2080_ENGINE_TYPE_COPY15: return RM_ENGINE_TYPE_COPY15; + case NV2080_ENGINE_TYPE_COPY16: return RM_ENGINE_TYPE_COPY16; + case NV2080_ENGINE_TYPE_COPY17: return RM_ENGINE_TYPE_COPY17; + case NV2080_ENGINE_TYPE_COPY18: return RM_ENGINE_TYPE_COPY18; + case NV2080_ENGINE_TYPE_COPY19: return RM_ENGINE_TYPE_COPY19; + case NV2080_ENGINE_TYPE_COMP_DECOMP_COPY0: return RM_ENGINE_TYPE_COPY0; + case NV2080_ENGINE_TYPE_COMP_DECOMP_COPY1: return RM_ENGINE_TYPE_COPY1; + case NV2080_ENGINE_TYPE_COMP_DECOMP_COPY2: return RM_ENGINE_TYPE_COPY2; + case NV2080_ENGINE_TYPE_COMP_DECOMP_COPY3: return RM_ENGINE_TYPE_COPY3; + case NV2080_ENGINE_TYPE_COMP_DECOMP_COPY4: return RM_ENGINE_TYPE_COPY4; + case NV2080_ENGINE_TYPE_COMP_DECOMP_COPY5: return RM_ENGINE_TYPE_COPY5; + case NV2080_ENGINE_TYPE_COMP_DECOMP_COPY6: return RM_ENGINE_TYPE_COPY6; + case NV2080_ENGINE_TYPE_COMP_DECOMP_COPY7: return RM_ENGINE_TYPE_COPY7; + case NV2080_ENGINE_TYPE_COMP_DECOMP_COPY8: return RM_ENGINE_TYPE_COPY8; + case NV2080_ENGINE_TYPE_COMP_DECOMP_COPY9: return RM_ENGINE_TYPE_COPY9; + case NV2080_ENGINE_TYPE_COMP_DECOMP_COPY10: return RM_ENGINE_TYPE_COPY10; + case NV2080_ENGINE_TYPE_COMP_DECOMP_COPY11: return RM_ENGINE_TYPE_COPY11; + case NV2080_ENGINE_TYPE_COMP_DECOMP_COPY12: return RM_ENGINE_TYPE_COPY12; + case NV2080_ENGINE_TYPE_COMP_DECOMP_COPY13: return RM_ENGINE_TYPE_COPY13; + case NV2080_ENGINE_TYPE_COMP_DECOMP_COPY14: return RM_ENGINE_TYPE_COPY14; + case NV2080_ENGINE_TYPE_COMP_DECOMP_COPY15: return RM_ENGINE_TYPE_COPY15; + case NV2080_ENGINE_TYPE_COMP_DECOMP_COPY16: return RM_ENGINE_TYPE_COPY16; + case NV2080_ENGINE_TYPE_COMP_DECOMP_COPY17: return RM_ENGINE_TYPE_COPY17; + case NV2080_ENGINE_TYPE_COMP_DECOMP_COPY18: return RM_ENGINE_TYPE_COPY18; + case NV2080_ENGINE_TYPE_COMP_DECOMP_COPY19: return RM_ENGINE_TYPE_COPY19; + default: break; + } + + return RM_ENGINE_TYPE_NULL; +} + +/*! + * @brief Convert RM engine type to NV2080 engine type + * + * Refer to the comments of gpuGetRmEngineType_IMPL + * + * @param[in] index RM_ENGINE_TYPE number + * + * @returns NV2080_ENGINE_TYPE number + * NV2080_ENGINE_TYPE_LAST if the index is invalid + */ +NvU32 gpuGetNv2080EngineType_IMPL(RM_ENGINE_TYPE index) +{ + // + // RM itself should never generate an out of range value, so we + // continue to assert here to catch internal RM programming errors. + // + NV_ASSERT_OR_RETURN(index < RM_ENGINE_TYPE_LAST, NV2080_ENGINE_TYPE_LAST); + + switch (index) + { + case RM_ENGINE_TYPE_NULL: return NV2080_ENGINE_TYPE_NULL; + case RM_ENGINE_TYPE_GR0: return NV2080_ENGINE_TYPE_GR0; + case RM_ENGINE_TYPE_GR1: return NV2080_ENGINE_TYPE_GR1; + case RM_ENGINE_TYPE_GR2: return NV2080_ENGINE_TYPE_GR2; + case RM_ENGINE_TYPE_GR3: return NV2080_ENGINE_TYPE_GR3; + case RM_ENGINE_TYPE_GR4: return NV2080_ENGINE_TYPE_GR4; + case RM_ENGINE_TYPE_GR5: return NV2080_ENGINE_TYPE_GR5; + case RM_ENGINE_TYPE_GR6: return NV2080_ENGINE_TYPE_GR6; + case RM_ENGINE_TYPE_GR7: return NV2080_ENGINE_TYPE_GR7; + case RM_ENGINE_TYPE_COPY0: return NV2080_ENGINE_TYPE_COPY0; + case RM_ENGINE_TYPE_COPY1: return NV2080_ENGINE_TYPE_COPY1; + case RM_ENGINE_TYPE_COPY2: return NV2080_ENGINE_TYPE_COPY2; + case RM_ENGINE_TYPE_COPY3: return NV2080_ENGINE_TYPE_COPY3; + case RM_ENGINE_TYPE_COPY4: return NV2080_ENGINE_TYPE_COPY4; + case RM_ENGINE_TYPE_COPY5: return NV2080_ENGINE_TYPE_COPY5; + case RM_ENGINE_TYPE_COPY6: return NV2080_ENGINE_TYPE_COPY6; + case RM_ENGINE_TYPE_COPY7: return NV2080_ENGINE_TYPE_COPY7; + case RM_ENGINE_TYPE_COPY8: return NV2080_ENGINE_TYPE_COPY8; + case RM_ENGINE_TYPE_COPY9: return NV2080_ENGINE_TYPE_COPY9; + case RM_ENGINE_TYPE_COPY10: return NV2080_ENGINE_TYPE_COPY10; + case RM_ENGINE_TYPE_COPY11: return NV2080_ENGINE_TYPE_COPY11; + case RM_ENGINE_TYPE_COPY12: return NV2080_ENGINE_TYPE_COPY12; + case RM_ENGINE_TYPE_COPY13: return NV2080_ENGINE_TYPE_COPY13; + case RM_ENGINE_TYPE_COPY14: return NV2080_ENGINE_TYPE_COPY14; + case RM_ENGINE_TYPE_COPY15: return NV2080_ENGINE_TYPE_COPY15; + case RM_ENGINE_TYPE_COPY16: return NV2080_ENGINE_TYPE_COPY16; + case RM_ENGINE_TYPE_COPY17: return NV2080_ENGINE_TYPE_COPY17; + case RM_ENGINE_TYPE_COPY18: return NV2080_ENGINE_TYPE_COPY18; + case RM_ENGINE_TYPE_COPY19: return NV2080_ENGINE_TYPE_COPY19; + case RM_ENGINE_TYPE_NVDEC0: return NV2080_ENGINE_TYPE_NVDEC0; + case RM_ENGINE_TYPE_NVDEC1: return NV2080_ENGINE_TYPE_NVDEC1; + case RM_ENGINE_TYPE_NVDEC2: return NV2080_ENGINE_TYPE_NVDEC2; + case RM_ENGINE_TYPE_NVDEC3: return NV2080_ENGINE_TYPE_NVDEC3; + case RM_ENGINE_TYPE_NVDEC4: return NV2080_ENGINE_TYPE_NVDEC4; + case RM_ENGINE_TYPE_NVDEC5: return NV2080_ENGINE_TYPE_NVDEC5; + case RM_ENGINE_TYPE_NVDEC6: return NV2080_ENGINE_TYPE_NVDEC6; + case RM_ENGINE_TYPE_NVDEC7: return NV2080_ENGINE_TYPE_NVDEC7; + case RM_ENGINE_TYPE_NVENC0: return NV2080_ENGINE_TYPE_NVENC0; + case RM_ENGINE_TYPE_NVENC1: return NV2080_ENGINE_TYPE_NVENC1; + case RM_ENGINE_TYPE_NVENC2: return NV2080_ENGINE_TYPE_NVENC2; + case RM_ENGINE_TYPE_NVENC3: return NV2080_ENGINE_TYPE_NVENC3; + case RM_ENGINE_TYPE_VP: return NV2080_ENGINE_TYPE_VP; + case RM_ENGINE_TYPE_ME: return NV2080_ENGINE_TYPE_ME; + case RM_ENGINE_TYPE_PPP: return NV2080_ENGINE_TYPE_PPP; + case RM_ENGINE_TYPE_MPEG: return NV2080_ENGINE_TYPE_MPEG; + case RM_ENGINE_TYPE_SW: return NV2080_ENGINE_TYPE_SW; + case RM_ENGINE_TYPE_TSEC: return NV2080_ENGINE_TYPE_TSEC; + case RM_ENGINE_TYPE_VIC: return NV2080_ENGINE_TYPE_VIC; + case RM_ENGINE_TYPE_MP: return NV2080_ENGINE_TYPE_MP; + case RM_ENGINE_TYPE_SEC2: return NV2080_ENGINE_TYPE_SEC2; + case RM_ENGINE_TYPE_HOST: return NV2080_ENGINE_TYPE_HOST; + case RM_ENGINE_TYPE_DPU: return NV2080_ENGINE_TYPE_DPU; + case RM_ENGINE_TYPE_PMU: return NV2080_ENGINE_TYPE_PMU; + case RM_ENGINE_TYPE_FBFLCN: return NV2080_ENGINE_TYPE_FBFLCN; + case RM_ENGINE_TYPE_NVJPEG0: return NV2080_ENGINE_TYPE_NVJPEG0; + case RM_ENGINE_TYPE_NVJPEG1: return NV2080_ENGINE_TYPE_NVJPEG1; + case RM_ENGINE_TYPE_NVJPEG2: return NV2080_ENGINE_TYPE_NVJPEG2; + case RM_ENGINE_TYPE_NVJPEG3: return NV2080_ENGINE_TYPE_NVJPEG3; + case RM_ENGINE_TYPE_NVJPEG4: return NV2080_ENGINE_TYPE_NVJPEG4; + case RM_ENGINE_TYPE_NVJPEG5: return NV2080_ENGINE_TYPE_NVJPEG5; + case RM_ENGINE_TYPE_NVJPEG6: return NV2080_ENGINE_TYPE_NVJPEG6; + case RM_ENGINE_TYPE_NVJPEG7: return NV2080_ENGINE_TYPE_NVJPEG7; + case RM_ENGINE_TYPE_OFA0: return NV2080_ENGINE_TYPE_OFA0; + case RM_ENGINE_TYPE_OFA1: return NV2080_ENGINE_TYPE_OFA1; + default: break; + } + + return NV2080_ENGINE_TYPE_NULL; +} + +/*! + * @brief Convert a list of RM engine type to a list of NV2080 engine type + * + * Refer to the comments of gpuGetRmEngineType_IMPL + * + * @param[in] pRmEngineList A list in order of RM_ENGINE_TYPE + * @param[in] engineCount Engine numbers + * @param[out] pNv2080EngineList Output list in order of NV2080_ENGINE_TYPE + * + * @returns void + */ +void gpuGetNv2080EngineTypeList_IMPL +( + RM_ENGINE_TYPE *pRmEngineList, + NvU32 engineCount, + NvU32 *pNv2080EngineList +) +{ + NV_ASSERT_OR_RETURN_VOID(engineCount < RM_ENGINE_TYPE_LAST); + + NvU32 i; + for (i = 0; i < engineCount; i++) + { + pNv2080EngineList[i] = gpuGetNv2080EngineType(pRmEngineList[i]); + } +} + +/*! + * @brief Convert a list NV2080 engine type of to a list of RM engine type + * + * @param[in] pNv2080EngineList A list in order of NV2080_ENGINE_TYPE + * @param[in] engineCount Engine numbers + * @param[out] pRmEngineList Output list in order of RM_ENGINE_TYPE + * + * @returns void + */ +void gpuGetRmEngineTypeList_IMPL +( + NvU32 *pNv2080EngineList, + NvU32 engineCount, + RM_ENGINE_TYPE *pRmEngineList +) +{ + NV_ASSERT_OR_RETURN_VOID(engineCount < RM_ENGINE_TYPE_LAST); + + NvU32 i; + for (i = 0; i < engineCount; i++) + { + pRmEngineList[i] = gpuGetRmEngineType(pNv2080EngineList[i]); + } +} + +/*! + * @brief Convert a capability mask of NV2080 engine type to the RM engine type capability mask. + * + * Refer to the comments of gpuGetRmEngineType_IMPL + * + * @param[in] pNV2080EngineTypeCap NV2080 engine type capability mask + * @param[in] capSize Cap size in dword + * @param[out] pRmEngineTypeCap RM engine type capability mask + * + * @returns NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +NV_STATUS gpuGetRmEngineTypeCapMask_IMPL +( + NvU32 *pNV2080EngineTypeCap, + NvU32 capSize, + NvU32 *pRmEngineTypeCap +) +{ + NvU32 i; + + NV_ASSERT_OR_RETURN(capSize == NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pRmEngineTypeCap != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pNV2080EngineTypeCap != NULL, NV_ERR_INVALID_ARGUMENT); + + for (i = 0; i < capSize; i++) + { + pRmEngineTypeCap[i] = 0; + } + + for (i = 0; i < NV2080_ENGINE_TYPE_LAST; i++) + { + if (NVGPU_GET_ENGINE_CAPS_MASK(pNV2080EngineTypeCap, i)) + { + NVGPU_SET_ENGINE_CAPS_MASK(pRmEngineTypeCap, gpuGetRmEngineType(i)); + } + } + + return NV_OK; +} + +/*! + * @brief Convert RM engine type to the engine class as a string + * + * Use ENGDESC_FIELD(engDesc, _INST) to get the engine instance + * + * @param[in] engineType RM_ENGINE_TYPE number + * @param[in] bNvPrintfStr Make the returned string compatible with NV_PRINTF + * + * @returns engine name as a string + */ +const char* gpuRmEngineTypeToString_IMPL +( + RM_ENGINE_TYPE engineType, + NvBool bNvPrintfStr +) +{ + if (RM_ENGINE_TYPE_IS_GR(engineType)) return bNvPrintfStr ? MAKE_NV_PRINTF_STR("GR") : "GR"; + else if (RM_ENGINE_TYPE_IS_COPY(engineType)) return bNvPrintfStr ? MAKE_NV_PRINTF_STR("COPY") : "COPY"; + else if (RM_ENGINE_TYPE_IS_NVDEC(engineType)) return bNvPrintfStr ? MAKE_NV_PRINTF_STR("NVDEC") : "NVDEC"; + else if (RM_ENGINE_TYPE_IS_NVENC(engineType)) return bNvPrintfStr ? MAKE_NV_PRINTF_STR("NVENC") : "NVENC"; + else if (RM_ENGINE_TYPE_IS_NVJPEG(engineType)) return bNvPrintfStr ? MAKE_NV_PRINTF_STR("NVJPEG") : "NVJPEG"; + else if (RM_ENGINE_TYPE_IS_OFA(engineType)) return bNvPrintfStr ? MAKE_NV_PRINTF_STR("OFA") : "OFA"; + else if (engineType == RM_ENGINE_TYPE_VP) return bNvPrintfStr ? MAKE_NV_PRINTF_STR("VP") : "VP"; + else if (engineType == RM_ENGINE_TYPE_ME) return bNvPrintfStr ? MAKE_NV_PRINTF_STR("ME") : "ME"; + else if (engineType == RM_ENGINE_TYPE_PPP) return bNvPrintfStr ? MAKE_NV_PRINTF_STR("PPP") : "PPP"; + else if (engineType == RM_ENGINE_TYPE_MPEG) return bNvPrintfStr ? MAKE_NV_PRINTF_STR("MPEG") : "MPEG"; + else if (engineType == RM_ENGINE_TYPE_SW) return bNvPrintfStr ? MAKE_NV_PRINTF_STR("SW") : "SW"; + else if (engineType == RM_ENGINE_TYPE_TSEC) return bNvPrintfStr ? MAKE_NV_PRINTF_STR("TSEC") : "TSEC"; + else if (engineType == RM_ENGINE_TYPE_VIC) return bNvPrintfStr ? MAKE_NV_PRINTF_STR("VIC") : "VIC"; + else if (engineType == RM_ENGINE_TYPE_MP) return bNvPrintfStr ? MAKE_NV_PRINTF_STR("MP") : "MP"; + else if (engineType == RM_ENGINE_TYPE_SEC2) return bNvPrintfStr ? MAKE_NV_PRINTF_STR("SEC2") : "SEC2"; + else if (engineType == RM_ENGINE_TYPE_HOST) return bNvPrintfStr ? MAKE_NV_PRINTF_STR("HOST") : "HOST"; + else if (engineType == RM_ENGINE_TYPE_DPU) return bNvPrintfStr ? MAKE_NV_PRINTF_STR("DPU") : "DPU"; + else if (engineType == RM_ENGINE_TYPE_PMU) return bNvPrintfStr ? MAKE_NV_PRINTF_STR("PMU") : "PMU"; + else if (engineType == RM_ENGINE_TYPE_FBFLCN) return bNvPrintfStr ? MAKE_NV_PRINTF_STR("FBFLCN") : "FBFLCN"; + else return bNvPrintfStr ? MAKE_NV_PRINTF_STR("Unknown") : "Unknown"; +} diff --git a/src/nvidia/src/kernel/gpu/gpu_gspclient.c b/src/nvidia/src/kernel/gpu/gpu_gspclient.c new file mode 100644 index 0000000..62302da --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_gspclient.c @@ -0,0 +1,95 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * GSP Client (CPU RM) specific GPU routines reside in this file. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "ctrl/ctrl2080.h" + +NV_STATUS +gpuGetRegBaseOffset_FWCLIENT +( + OBJGPU *pGpu, + NvU32 regBase, + NvU32 *pOffset +) +{ + const NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *pChipInfo = gpuGetChipInfo(pGpu); + NV_ASSERT_OR_RETURN(pChipInfo != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(regBase < NV_ARRAY_ELEMENTS(pChipInfo->regBases), NV_ERR_NOT_SUPPORTED); + + if (pChipInfo->regBases[regBase] != 0xFFFFFFFF) + { + *pOffset = pChipInfo->regBases[regBase]; + return NV_OK; + } + + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief These functions are used on CPU RM when pGpu is a GSP client. + * Data is fetched from GSP using subdeviceCtrlCmdInternalGetChipInfo and cached, + * then retrieved through the internal gpuGetChipInfo. + * + * Functions either return value directly, or through a second [out] param, depending + * on the underlying function. + * + * @param[in] pGpu + */ +NvU8 +gpuGetChipSubRev_FWCLIENT +( + OBJGPU *pGpu +) +{ + const NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *pChipInfo = gpuGetChipInfo(pGpu); + NV_ASSERT_OR_RETURN(pChipInfo != NULL, 0); + + return pChipInfo->chipSubRev; +} + +/*! GPU has a new reset required state */ +NV_STATUS +gpuResetRequiredStateChanged_FWCLIENT +( + OBJGPU *pGpu, + NvBool newState +) +{ + gpuRefreshRecoveryAction_HAL(pGpu, NV_FALSE); + + return NV_OK; +} + +NvBool +gpuIsSystemRebootRequired_FWCLIENT +( + OBJGPU *pGpu +) +{ + return NV_FALSE; +} diff --git a/src/nvidia/src/kernel/gpu/gpu_resource.c b/src/nvidia/src/kernel/gpu/gpu_resource.c new file mode 100644 index 0000000..5d6e389 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_resource.c @@ -0,0 +1,415 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This implements functions of the base class for gpu resources. +* +******************************************************************************/ + +#include "core/core.h" +#include "os/os.h" +#include "resserv/resserv.h" +#include "resserv/rs_server.h" +#include "resserv/rs_client.h" +#include "resserv/rs_resource.h" +#include "rmapi/client.h" +#include "rmapi/resource.h" +#include "gpu/gpu.h" +#include "gpu/gpu_resource.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu_mgr/gpu_mgr.h" +#include "virtualization/hypervisor/hypervisor.h" + +#include "g_allclasses.h" + +NV_STATUS +gpuresConstruct_IMPL +( + GpuResource *pGpuResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + RsResourceRef *pDeviceRef; + RsResourceRef *pSubdeviceRef; + OBJGPU *pGpu = NULL; + NvBool bBcResource = NV_TRUE; + NV_STATUS status; + + // Check if instance is a subdevice or device, else check for ancestor + if (pResourceRef->internalClassId == classId(Subdevice)) + pGpuResource->pSubdevice = dynamicCast(pGpuResource, Subdevice); + else + { + status = refFindAncestorOfType(pResourceRef, classId(Subdevice), &pSubdeviceRef); + if (status == NV_OK) + pGpuResource->pSubdevice = dynamicCast(pSubdeviceRef->pResource, Subdevice); + } + + // Check if instance is a device, else check for ancestor + if (pResourceRef->internalClassId == classId(Device)) + pGpuResource->pDevice = dynamicCast(pGpuResource, Device); + else + { + status = refFindAncestorOfType(pResourceRef, classId(Device), &pDeviceRef); + if (status == NV_OK) + pGpuResource->pDevice = dynamicCast(pDeviceRef->pResource, Device); + } + + if (RS_IS_COPY_CTOR(pParams)) + return gpuresCopyConstruct(pGpuResource, pCallContext, pParams); + + + // Fails during device/subdevice ctor. Subclass ctor calls gpuresSetGpu + status = gpuGetByRef(pResourceRef, &bBcResource, &pGpu); + + if (status == NV_OK) + gpuresSetGpu(pGpuResource, pGpu, bBcResource); + + return NV_OK; +} + +NV_STATUS +gpuresCopyConstruct_IMPL +( + GpuResource *pGpuResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + GpuResource *pGpuResourceSrc = dynamicCast(pParams->pSrcRef->pResource, GpuResource); + + if (pGpuResourceSrc == NULL) + return NV_ERR_INVALID_OBJECT; + + gpuresSetGpu(pGpuResource, pGpuResourceSrc->pGpu, pGpuResourceSrc->bBcResource); + + return NV_OK; +} + +NV_STATUS +gpuresMap_IMPL +( + GpuResource *pGpuResource, + CALL_CONTEXT *pCallContext, + RS_CPU_MAP_PARAMS *pParams, + RsCpuMapping *pCpuMapping +) +{ + OBJGPU *pGpu; + NvU32 offset, size; + NV_STATUS rmStatus; + NvBool bBroadcast; + + pGpu = CliGetGpuFromContext(pCpuMapping->pContextRef, &bBroadcast); + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_ARGUMENT); + gpuSetThreadBcState(pGpu, bBroadcast); + + rmStatus = gpuresGetRegBaseOffsetAndSize(pGpuResource, pGpu, &offset, &size); + if (rmStatus != NV_OK) + return rmStatus; + + rmStatus = rmapiMapGpuCommon(staticCast(pGpuResource, RsResource), + pCallContext, + pCpuMapping, + pGpu, + offset, + size); + pCpuMapping->processId = osGetCurrentProcess(); + + if (pParams->ppCpuVirtAddr) + *pParams->ppCpuVirtAddr = pCpuMapping->pLinearAddress; + + return rmStatus; +} + +NV_STATUS +gpuresUnmap_IMPL +( + GpuResource *pGpuResource, + CALL_CONTEXT *pCallContext, + RsCpuMapping *pCpuMapping +) +{ + RmClient *pClient = dynamicCast(pCallContext->pClient, RmClient); + OBJGPU *pGpu; + NvBool bBroadcast; + + pGpu = CliGetGpuFromContext(pCpuMapping->pContextRef, &bBroadcast); + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_ARGUMENT); + gpuSetThreadBcState(pGpu, bBroadcast); + + osUnmapGPU(pGpu->pOsGpuInfo, + rmclientGetCachedPrivilege(pClient), + pCpuMapping->pLinearAddress, + pCpuMapping->length, + pCpuMapping->pPrivate->pPriv); + + return NV_OK; +} + +NvBool +gpuresShareCallback_IMPL +( + GpuResource *pGpuResource, + RsClient *pInvokingClient, + RsResourceRef *pParentRef, + RS_SHARE_POLICY *pSharePolicy +) +{ + NvBool bMIGInUse = NV_FALSE; + NvU16 shareType = pSharePolicy->type; + + if ((shareType == RS_SHARE_TYPE_SMC_PARTITION) && !bMIGInUse) + { + // When MIG is not enabled, ignore Require restrictions + if (pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE) + return NV_TRUE; + + // Fallback if feature is not available + shareType = RS_SHARE_TYPE_GPU; + } + + switch (shareType) + { + case RS_SHARE_TYPE_SMC_PARTITION: + { + if (RS_ACCESS_MASK_TEST(&pSharePolicy->accessMask, RS_ACCESS_DUP_OBJECT)) + { + // Special exceptions only for Dup + RsResourceRef *pSrcRef = RES_GET_REF(pGpuResource); + + switch (pSrcRef->externalClassId) + { + case NV01_MEMORY_SYSTEM: + return NV_TRUE; + } + + } + + break; + } + case RS_SHARE_TYPE_GPU: + { + RsResourceRef *pDeviceAncestorRef; + RsResourceRef *pParentDeviceAncestorRef; + + // This share type only works when called from dup + if (pParentRef == NULL) + break; + + if (pParentRef->internalClassId == classId(Device)) + { + // pParentRef is allowed to itself be the Device ancestor + pParentDeviceAncestorRef = pParentRef; + } + else + { + // If pParentRef is not itself the device, try to find a Device ancestor. If none exist, fail. + if (refFindAncestorOfType(pParentRef, classId(Device), &pParentDeviceAncestorRef) != NV_OK) + break; + } + // Check that the source resource's ancestor device instance matches the destination parent's device instance + if (refFindAncestorOfType(RES_GET_REF(pGpuResource), classId(Device), &pDeviceAncestorRef) == NV_OK) + { + Device *pDevice = dynamicCast(pDeviceAncestorRef->pResource, Device); + Device *pParentDevice = dynamicCast(pParentDeviceAncestorRef->pResource, Device); + + if ((pDevice != NULL) && (pParentDevice != NULL) && + (pDevice->deviceInst == pParentDevice->deviceInst)) + { + return NV_TRUE; + } + } + } + } + + // Delegate to superclass + return rmresShareCallback_IMPL(staticCast(pGpuResource, RmResource), pInvokingClient, pParentRef, pSharePolicy); +} + +NV_STATUS +gpuresGetRegBaseOffsetAndSize_IMPL +( + GpuResource *pGpuResource, + OBJGPU *pGpu, + NvU32 *pOffset, + NvU32 *pSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +gpuresGetMapAddrSpace_IMPL +( + GpuResource *pGpuResource, + CALL_CONTEXT *pCallContext, + NvU32 mapFlags, + NV_ADDRESS_SPACE *pAddrSpace +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pGpuResource); + NV_STATUS status; + NvU32 offset; + NvU32 size; + + // Default to REGMEM if the GPU resource has a register base and offset defined + status = gpuresGetRegBaseOffsetAndSize(pGpuResource, pGpu, &offset, &size); + if (status != NV_OK) + return status; + + if (pAddrSpace) + *pAddrSpace = ADDR_REGMEM; + + return NV_OK; +} + +/*! + * @brief Forward a control call to the Physical RM portion of this API. + */ +NV_STATUS +gpuresInternalControlForward_IMPL +( + GpuResource *pGpuResource, + NvU32 command, + void *pParams, + NvU32 size +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(GPU_RES_GET_GPU(pGpuResource)); + return pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pGpuResource), + gpuresGetInternalObjectHandle(pGpuResource), + command, + pParams, + size); +} + +/*! + * @brief Retrieve the handle associated with the Physical RM portion of the API. + * For non-split object, this is the same as the handle of the object. + */ +NvHandle +gpuresGetInternalObjectHandle_IMPL(GpuResource *pGpuResource) +{ + return RES_GET_HANDLE(pGpuResource); +} + +NV_STATUS +gpuresControl_IMPL +( + GpuResource *pGpuResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + NV_ASSERT_OR_RETURN(pGpuResource->pGpu != NULL, NV_ERR_INVALID_STATE); + gpuresControlSetup(pParams, pGpuResource); + + return resControl_IMPL(staticCast(pGpuResource, RsResource), + pCallContext, pParams); +} + +void +gpuresControlSetup_IMPL +( + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + GpuResource *pGpuResource +) +{ + RmCtrlParams *pRmCtrlParams = pParams->pLegacyParams; + pRmCtrlParams->pGpu = pGpuResource->pGpu; + + GPU_RES_SET_THREAD_BC_STATE(pGpuResource); +} + +void +gpuresSetGpu_IMPL +( + GpuResource *pGpuResource, + OBJGPU *pGpu, + NvBool bBcResource +) +{ + if (pGpu != NULL) + { + RmResource *pResource = staticCast(pGpuResource, RmResource); + pResource->rpcGpuInstance = gpuGetInstance(pGpu); + pGpuResource->pGpu = pGpu; + pGpuResource->pGpuGrp = gpumgrGetGpuGrpFromGpu(pGpuResource->pGpu); + pGpuResource->bBcResource = bBcResource; + gpuSetThreadBcState(pGpu, bBcResource); + } +} + +NV_STATUS +gpuresGetByHandle_IMPL +( + RsClient *pClient, + NvHandle hResource, + GpuResource **ppGpuResource +) +{ + RsResourceRef *pResourceRef; + NV_STATUS status; + + *ppGpuResource = NULL; + + status = clientGetResourceRef(pClient, hResource, &pResourceRef); + if (status != NV_OK) + return status; + + *ppGpuResource = dynamicCast(pResourceRef->pResource, GpuResource); + + return (*ppGpuResource) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +NV_STATUS +gpuresGetByDeviceOrSubdeviceHandle +( + RsClient *pClient, + NvHandle hResource, + GpuResource **ppGpuResource +) +{ + NV_STATUS status; + + status = gpuresGetByHandle(pClient, hResource, ppGpuResource); + + if (status != NV_OK) + return status; + + // Must be device or subdevice + if (!dynamicCast(*ppGpuResource, Device) && + !dynamicCast(*ppGpuResource, Subdevice)) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/gpu_resource_desc.c b/src/nvidia/src/kernel/gpu/gpu_resource_desc.c new file mode 100644 index 0000000..b27e795 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_resource_desc.c @@ -0,0 +1,585 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief Object Manager: Object Classes are defined in this module. + */ + +#include "gpu/gpu.h" +#include "os/os.h" +#include "core/locks.h" +#include "nvrm_registry.h" +#include "lib/base_utils.h" + +ct_assert(NVOC_CLASS_ID_MAX_WIDTH <= SF_WIDTH(ENGDESC_CLASS)); + +NV_STATUS +gpuBuildClassDB_IMPL(OBJGPU *pGpu) +{ + GpuEngineOrder *pEngineOrder = &pGpu->engineOrder; + CLASSDESCRIPTOR *pClassDynamic; + const CLASSDESCRIPTOR *pClassStatic; + NvU32 numClasses; + NvU32 i, j; + NV_STATUS status; + GpuClassDb *pClassDB = &pGpu->classDB; + + // + // Calculate number of classes supported by this device. + // + // Loop through the list of GPU-specific classes throwing out any the + // rmconfig has marked not supported. + // + numClasses = 0; + + pClassStatic = &pEngineOrder->pClassDescriptors[0]; + for (i = 0; i < pEngineOrder->numClassDescriptors; i++) + { + // RMCONFIG: throw out any that are not supported + if (pClassStatic[i].externalClassId == (NvU32)~0 || + pClassStatic[i].engDesc == ENG_INVALID) + continue; + + numClasses++; + } + + NV_PRINTF(LEVEL_INFO, "num class descriptors: 0x%x\n", numClasses); + + // + // Allocate space for correct number of entries. + // + pClassDynamic = portMemAllocNonPaged(sizeof(CLASSDESCRIPTOR) * numClasses); + if (pClassDynamic == NULL) + { + status = NV_ERR_NO_MEMORY; + NV_PRINTF(LEVEL_ERROR, "alloc failed: 0x%x\n", status); + DBG_BREAKPOINT(); + return status; + } + portMemSet((void *)pClassDynamic, 0, sizeof(CLASSDESCRIPTOR) * numClasses); + + // + // Now load up chip-dependent classes into pClass table. + // + pClassStatic = &pEngineOrder->pClassDescriptors[0]; + i = 0; + for (j = 0; j < pEngineOrder->numClassDescriptors; j++) + { + // RMCONFIG: skip over any that are not supported + if (pClassStatic[j].externalClassId == (NvU32)~0 || + pClassStatic[j].engDesc == ENG_INVALID) + continue; + + // store info for class in class DB entry + pClassDynamic[i] = pClassStatic[j]; + + // move to next slot in class DB + i++; + } + + pClassDB->pClasses = pClassDynamic; + pClassDB->numClasses = numClasses; + pClassDB->pSuppressClasses = NULL; + pClassDB->bSuppressRead = NV_FALSE; + pGpu->engineDB.bValid = NV_FALSE; + + return NV_OK; +} + +NV_STATUS +gpuDestroyClassDB_IMPL(OBJGPU *pGpu) +{ + portMemFree(pGpu->classDB.pClasses); + portMemFree(pGpu->classDB.pSuppressClasses); + + pGpu->engineDB.bValid = NV_FALSE; + return NV_OK; +} + +NvBool +gpuIsClassSupported_IMPL(OBJGPU *pGpu, NvU32 externalClassId) +{ + CLASSDESCRIPTOR *pClassDesc; + NV_STATUS status; + + status = gpuGetClassByClassId(pGpu, externalClassId, &pClassDesc); + + return (status == NV_OK) && (pClassDesc); +} + +NV_STATUS +gpuGetClassByClassId_IMPL(OBJGPU *pGpu, NvU32 externalClassId, CLASSDESCRIPTOR **ppClassDesc) +{ + GpuClassDb *pClassDB = &pGpu->classDB; + NvU32 i; + + for (i = 0; i < pClassDB->numClasses; i++) + { + if (pClassDB->pClasses[i].externalClassId == externalClassId) + { + if (ppClassDesc != NULL) + { + *ppClassDesc = &pClassDB->pClasses[i]; + } + return NV_OK; + } + } + + return NV_ERR_INVALID_ARGUMENT; +} + +NV_STATUS +gpuGetClassByEngineAndClassId_IMPL(OBJGPU *pGpu, NvU32 externalClassId, NvU32 engDesc, CLASSDESCRIPTOR **ppClassDesc) +{ + GpuClassDb *pClassDB = &pGpu->classDB; + NvU32 i; + + for (i = 0; i < pClassDB->numClasses; i++) + { + if (pClassDB->pClasses[i].externalClassId == externalClassId && pClassDB->pClasses[i].engDesc == engDesc) + { + *ppClassDesc = &pClassDB->pClasses[i]; + return NV_OK; + } + } + + return NV_ERR_GENERIC; +} + +static NvU32 * +gpuGetSuppressedClassList +( + OBJGPU *pGpu +) +{ + NvU8 *pStr; + NvU8 *pEndStr; + NvU8 *pSaveStr; + NvU32 strLength; + NvU32 nIndex; + NvU32 nCount = 0; + NvU32 *pData = NULL; + NvU32 numAModelClassesInChip = 0; + NvBool bSuppressClassList = NV_FALSE; + NvU32 numFound; + + // alloc regkey buffer + strLength = 256; + pStr = portMemAllocNonPaged(strLength); + if (pStr == NULL) + { + NV_PRINTF(LEVEL_ERROR, "portMemAllocNonPaged failed\n"); + return NULL; + } + + pSaveStr = pStr; + + if (osReadRegistryString(pGpu, NV_REG_STR_SUPPRESS_CLASS_LIST, pStr, &strLength) == NV_OK) + { + bSuppressClassList = NV_TRUE; + } + + if (bSuppressClassList) + { + // count number of classes + for (; *pStr; pStr = pEndStr, nCount++) + { + nvStrToL(pStr, &pEndStr, BASE16, 0, &numFound); + } + } + + // allocate memory only if there is something to suppress. + if ( ! ( nCount + numAModelClassesInChip ) ) + { + portMemFree(pSaveStr); + return NULL; + } + + // + // add one dword to store the count of classes here. + // This fixes a memory leak caused by changelist 1620538 + // + nCount++; + + pData = portMemAllocNonPaged(sizeof(NvU32)*(nCount + numAModelClassesInChip)); + if (pData == NULL) + { + NV_PRINTF(LEVEL_ERROR, "portMemAllocNonPaged failed\n"); + portMemFree(pSaveStr); + return NULL; + } + + // fill array -- first is number of classes + pData[0]=nCount; + + if (bSuppressClassList) + { + pStr = pSaveStr; + for (nIndex = 1; *pStr; pStr = pEndStr, nIndex++) + { + pData[nIndex] = nvStrToL(pStr, &pEndStr, BASE16, 0, &numFound); + } + } + + portMemFree(pSaveStr); + + return pData; +} + +/** + * @brief Returns list of classes supported by engDesc. + * If ( engDesc == ENG_INVALID ) returns classes + * supported by all engines. + * @param[in] pGpu OBJGPU pointer + * @param[in/out] pNumClasses in - denotes the size of pClassList when pClassList != NULL + out - when pClassList is NULL, denotes the number of matching + classes found + * @param[out] pClassList Returns matching class(s) when pNumClasses in not 0 + * @param[out] engDesc Engine ID + * + * @return NV_OK if class match found + */ +NV_STATUS +gpuGetClassList_IMPL(OBJGPU *pGpu, NvU32 *pNumClasses, NvU32 *pClassList, NvU32 engDesc) +{ + NvU32 *pSuppressClasses = NULL; + NvU32 numClasses; + NV_STATUS status = NV_OK; + NvU32 i, k; + NvBool bCount; + CLASSDESCRIPTOR *pClassDB = pGpu->classDB.pClasses; + NvU32 lastClassId = 0; + + // Read the registry one time to get the list + if (NV_FALSE == pGpu->classDB.bSuppressRead) + { + pGpu->classDB.pSuppressClasses = gpuGetSuppressedClassList(pGpu); + pGpu->classDB.bSuppressRead = NV_TRUE; + } + + pSuppressClasses = pGpu->classDB.pSuppressClasses; + + numClasses = 0; + + for (i = 0; i < pGpu->classDB.numClasses; i++) + { + if ((engDesc != ENG_INVALID) && (pClassDB[i].engDesc != engDesc)) + continue; + + bCount = NV_TRUE; + + if (pSuppressClasses != NULL) + { + for (k=1; k < pSuppressClasses[0]; k++) + { + if (pSuppressClasses[k] == pClassDB[i].externalClassId) + { + bCount = NV_FALSE; + break; + } + } + } + + if (bCount) + { + const NvU32 classId = pClassDB[i].externalClassId; + + // + // Skip duplicate classes. These exist in the classDB for + // multi-instance engines (e.g., CE0, CE1, etc), but we only want + // one entry in the class list. The classDB maintains ordering + // such that all classes with the same ID are contiguous. + // + if (classId != lastClassId) + { + // save the class in caller's buffer, if provided + if (pClassList) + { + if (numClasses < *pNumClasses) + pClassList[numClasses] = classId; + else + status = NV_ERR_INVALID_PARAM_STRUCT; + } + numClasses++; + lastClassId = classId; + } + } + } + + // and return number of classes + if (status == NV_OK) + *pNumClasses = numClasses; + + return status; +} + +/*! + * @brief Add a class to class DB with given Engine Tag and Class Id. + * + * Note that a matching class/engine must already exist in the read-only + * "static" class list populated from gpuGetClassDescriptorList_HAL(). This + * means that only classes/engines which have previously been removed from the + * class DB can be added here. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pEngDesc EngDesc of Classes to be added to Class DB + * (NULL = don't care) + * @param[in] pExternalClassId Class to add to DB (NULL = don't care) + * + * @returns NV_STATUS - + * NV_ERR_INVALID_ARGUMENT if both pEngineTag and pClass are NULL. + * NV_OK otherwise + */ +static NV_STATUS +_gpuAddClassToClassDBByEngTagClassId(OBJGPU *pGpu, ENGDESCRIPTOR *pEngDesc, NvU32 *pExternalClassId) +{ + GpuEngineOrder *pEngineOrder = &pGpu->engineOrder; + const CLASSDESCRIPTOR *pClassDesc = &pEngineOrder->pClassDescriptors[0]; + const CLASSDESCRIPTOR *pClassDescToCopy = NULL; + GpuClassDb *pClassDB = &pGpu->classDB; + NvU32 numClasses = pClassDB->numClasses; + NvBool bMatchingClassIdFound = NV_FALSE; + NvU32 matchingClassIdIndex; + NvU32 newClassDBIndex; + NvU32 i; + + NV_CHECK_OR_RETURN(LEVEL_INFO, (NULL != pEngDesc) || (NULL != pExternalClassId), NV_ERR_INVALID_ARGUMENT); + + // Return early if requested class/engine is already in the dynamic classdb + for (i = 0; i < numClasses; i++) + { + if (((NULL == pEngDesc) || (pClassDB->pClasses[i].engDesc == *pEngDesc)) && + ((NULL == pExternalClassId) || (pClassDB->pClasses[i].externalClassId == *pExternalClassId))) + { + return NV_OK; + } + } + + // Populate the ClassDB with information from the static class list (g_gpu_class_list.c) + for (i = 0; i < pEngineOrder->numClassDescriptors; i++) + { + // RMCONFIG: skip over any that are not supported + if (pClassDesc[i].externalClassId == (NvU32)~0 || + pClassDesc[i].engDesc == ENG_INVALID) + continue; + + if (((NULL == pEngDesc) || (pClassDesc[i].engDesc == *pEngDesc)) && + ((NULL == pExternalClassId) || (pClassDesc[i].externalClassId == *pExternalClassId))) + { + pClassDescToCopy = &pClassDesc[i]; + break; + } + } + + if (pClassDescToCopy == NULL) { + // This should probably be an error, but that causes existing tests to fail... + return NV_OK; + } + + // + // Find the last entry with a matching externalClassId, if any. We keep + // all entries of the same externalClassId contiguous so that + // gpuGetClassList_IMPL() can easily filter them out. + // + for (i = 0; i < numClasses; i++) + { + if (pClassDB->pClasses[i].externalClassId == pClassDescToCopy->externalClassId) + { + bMatchingClassIdFound = NV_TRUE; + matchingClassIdIndex = i; + } + } + + if (bMatchingClassIdFound) + { + NvLength bytesToMove; + + // Add an entry next to the existing block of this class ID. + newClassDBIndex = matchingClassIdIndex + 1; + + // Move the rest of the entries to make space. + bytesToMove = (numClasses - newClassDBIndex) * sizeof(pClassDB->pClasses[0]); + portMemMove(&pClassDB->pClasses[newClassDBIndex + 1], bytesToMove, + &pClassDB->pClasses[newClassDBIndex], bytesToMove); + + } + else + { + // Add new entry at the end. + newClassDBIndex = numClasses; + } + + // + // Store info for class in class DB entry. + // + // It is assumed that because there is a matching entry in the static class + // list, but not the dynamic class list, the classDB array already has + // enough space allocated (because gpuBuildClassDB_IMPL duplicated the + // static class list, and this entry must have been removed since then). + // + + pClassDB->pClasses[newClassDBIndex] = *pClassDescToCopy; + pClassDB->numClasses++; + + pGpu->engineDB.bValid = NV_FALSE; + + return NV_OK; +} + +/*! + * @brief Add a class to class DB with given Engine Tag and Class Id. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc Engine ID of Classes to be added to Class DB + * @param[in] class Class to add to DB + * + * @returns NV_STATUS - NV_OK always. + */ +NV_STATUS +gpuAddClassToClassDBByEngTagClassId_IMPL(OBJGPU *pGpu, ENGDESCRIPTOR engDesc, NvU32 externalClassId) +{ + return _gpuAddClassToClassDBByEngTagClassId(pGpu, &engDesc, &externalClassId); +} + +/*! + * @brief Add a class to class DB with given Engine Tag. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc Engine ID of Class to be added to Class DB + * + * @returns NV_STATUS - NV_OK always. + */ +NV_STATUS gpuAddClassToClassDBByEngTag_IMPL(OBJGPU *pGpu, ENGDESCRIPTOR engDesc) +{ + return _gpuAddClassToClassDBByEngTagClassId(pGpu, &engDesc, NULL); +} + +/*! + * @brief Add a class to class DB with given Class ID. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] class Class ID + * + * @returns NV_STATUS - NV_OK always. + */ +NV_STATUS gpuAddClassToClassDBByClassId_IMPL(OBJGPU *pGpu, NvU32 externalClassId) +{ + return _gpuAddClassToClassDBByEngTagClassId(pGpu, NULL, &externalClassId); +} + +/*! + * @brief Delete a class from class DB with given Engine Tag and Class Id. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pEngDesc Engine Tag of Classes to be removed from Class DB + * (NULL = don't care) + * @param[in] pExternalClassId Class to remove from DB (NULL = don't care) + * + * @returns NV_STATUS - NV_OK always. + */ +static NV_STATUS +_gpuDeleteClassFromClassDBByEngTagClassId(OBJGPU *pGpu, ENGDESCRIPTOR *pEngDesc, NvU32 *pExternalClassId) +{ + GpuClassDb *pClassDB = &pGpu->classDB; + NvU32 i, j; + + NV_CHECK_OR_RETURN(LEVEL_INFO, (NULL != pEngDesc) || (NULL != pExternalClassId), NV_ERR_INVALID_ARGUMENT); + + for (i = 0; i < pClassDB->numClasses; i++) + { + if (((NULL == pEngDesc) || (pClassDB->pClasses[i].engDesc == *pEngDesc)) && + ((NULL == pExternalClassId) || (pClassDB->pClasses[i].externalClassId == *pExternalClassId))) + { + for (j = i; j < pClassDB->numClasses - 1; j++) + { + pClassDB->pClasses[j] = pClassDB->pClasses[j + 1]; + } + pClassDB->numClasses--; + i--; // Be sure to check the new entry at index i on the next loop. + } + } + + pGpu->engineDB.bValid = NV_FALSE; + + return NV_OK; +} + +/*! + * @brief Delete a class from class DB with given Engine Tag and Class Id. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc EngDesc of Classes to be removed from Class DB + * @param[in] externalClassId Class to remove from DB + * + * @returns NV_STATUS - NV_OK always. + */ +NV_STATUS +gpuDeleteClassFromClassDBByEngTagClassId_IMPL(OBJGPU *pGpu, ENGDESCRIPTOR engDesc, NvU32 externalClassId) +{ + return _gpuDeleteClassFromClassDBByEngTagClassId(pGpu, &engDesc, &externalClassId); +} + +/*! + * @brief Delete a class from class DB with given Engine Tag. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] externalClassId Class to remove from DB + * + * @returns NV_STATUS - NV_OK always. + */ +NV_STATUS +gpuDeleteClassFromClassDBByClassId_IMPL(OBJGPU *pGpu, NvU32 externalClassId) +{ + return _gpuDeleteClassFromClassDBByEngTagClassId(pGpu, NULL, &externalClassId); +} + +/*! + * @brief Delete a class from class DB with given Engine Tag. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc Engine Descriptor of Classes to be removed from Class DB + * + * @returns NV_STATUS - NV_OK always. + */ +NV_STATUS +gpuDeleteClassFromClassDBByEngTag_IMPL(OBJGPU *pGpu, ENGDESCRIPTOR engDesc) +{ + return _gpuDeleteClassFromClassDBByEngTagClassId(pGpu, &engDesc, NULL); +} diff --git a/src/nvidia/src/kernel/gpu/gpu_rmapi.c b/src/nvidia/src/kernel/gpu/gpu_rmapi.c new file mode 100644 index 0000000..9bb0709 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_rmapi.c @@ -0,0 +1,700 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "class/cl0040.h" /* NV01_MEMORY_LOCAL_USER */ +#include "class/cl84a0.h" /* NV01_MEMORY_LIST_XXX */ +#include "class/cl00b1.h" /* NV01_MEMORY_HW_RESOURCES */ + +#include "nverror.h" + +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "rmapi/rs_utils.h" +#include "rmapi/rmapi.h" +#include "rmapi/client.h" +#include "rmapi/resource_fwd_decls.h" +#include "core/thread_state.h" + +NV_STATUS +gpuSetExternalKernelClientCount_IMPL(OBJGPU *pGpu, NvBool bIncr) +{ + if (bIncr) + { + pGpu->externalKernelClientCount++; + } + else + { + NV_ASSERT_OR_RETURN(pGpu->externalKernelClientCount > 0, NV_ERR_INVALID_OPERATION); + pGpu->externalKernelClientCount--; + } + + return NV_OK; +} + +// Get the count of user clients that are using given gpu +static NvU32 +_gpuGetUserClientCount +( + OBJGPU *pGpu, + NvBool bCount +) +{ + NvU32 count = 0; + Device *pDevice; + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + NV_STATUS status; + + // Search list of clients for any that have an InUse ref to the gpu + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + // Skip internal client + if (pRsClient->type == CLIENT_TYPE_KERNEL) + continue; + + status = deviceGetByGpu(pRsClient, pGpu, NV_TRUE /* bAnyInGroup */, &pDevice); + + if (status != NV_OK) + continue; + + count++; + + if (!bCount) + break; + } + + return count; +} + +NvBool +gpuIsInUse_IMPL +( + OBJGPU *pGpu +) +{ + return !!_gpuGetUserClientCount(pGpu, NV_FALSE) || + (pGpu->externalKernelClientCount > 0); +} + +// Get the count of user clients that are using given gpu +NvU32 +gpuGetUserClientCount_IMPL +( + OBJGPU *pGpu +) +{ + return _gpuGetUserClientCount(pGpu, NV_TRUE); +} + +// Get the count of external clients (User+External modules) that are using given gpu +NvU32 +gpuGetExternalClientCount_IMPL +( + OBJGPU *pGpu +) +{ + return _gpuGetUserClientCount(pGpu, NV_TRUE) + pGpu->externalKernelClientCount; +} + +/** + * Find the GPU associated with a resource reference in this order: + * + * 1. Directly from the RsResource if the resource is a Device or Subdevice + * 2. From an ancestor subdevice (if any) + * 3. From an ancestor device (if any) + * + * If the resource your querying is guaranteed to be a GpuResource you should + * directly call GPU_RES_GET_GPU() + * + * @param[out] pbBroadcast True if the found GPU corresponds to a device + * [optional] + */ +NV_STATUS +gpuGetByRef +( + RsResourceRef *pContextRef, + NvBool *pbBroadcast, + OBJGPU **ppGpu +) +{ + NV_STATUS status = NV_OK; + RsResourceRef *pDeviceRef; + RsResourceRef *pSubdeviceRef; + GpuResource *pGpuResource; + + if (ppGpu != NULL) + *ppGpu = NULL; + + if (pContextRef == NULL) + return NV_ERR_INVALID_ARGUMENT; + + pGpuResource = dynamicCast(pContextRef->pResource, GpuResource); + + // + // NULL check on GpuResource::pGpu as this routine is used from within + // GpuResource::Construct to initialize GpuResource::pGpu + // + if ((pGpuResource == NULL) || (pGpuResource->pGpu == NULL)) + { + status = refFindAncestorOfType(pContextRef, classId(Subdevice), &pSubdeviceRef); + if (status == NV_OK) + { + pGpuResource = dynamicCast(pSubdeviceRef->pResource, GpuResource); + if ((pGpuResource == NULL) || (pGpuResource->pGpu == NULL)) + status = NV_ERR_OBJECT_NOT_FOUND; + } + + if (status != NV_OK) + { + status = refFindAncestorOfType(pContextRef, classId(Device), &pDeviceRef); + if (status == NV_OK) + { + pGpuResource = dynamicCast(pDeviceRef->pResource, GpuResource); + if ((pGpuResource == NULL) || (pGpuResource->pGpu == NULL)) + status = NV_ERR_OBJECT_NOT_FOUND; + } + } + } + + if (status == NV_OK) + { + if (pbBroadcast != NULL) + *pbBroadcast = pGpuResource->bBcResource; + + if (ppGpu != NULL) + *ppGpu = pGpuResource->pGpu; + } + + return status; +} + +/** + * Wrapper for gpuGetByRef that takes a pClient + hResource instead of a + * pResourceRef. + * + * Find the GPU associated with a resource; + */ +NV_STATUS +gpuGetByHandle +( + RsClient *pClient, + NvHandle hResource, + NvBool *pbBroadcast, + OBJGPU **ppGpu +) +{ + RsResourceRef *pResourceRef; + NV_STATUS status; + + if (ppGpu != NULL) + *ppGpu = NULL; + + status = clientGetResourceRef(pClient, hResource, &pResourceRef); + if (status != NV_OK) + return status; + + return gpuGetByRef(pResourceRef, pbBroadcast, ppGpu); +} + +NV_STATUS gpuRegisterSubdevice_IMPL(OBJGPU *pGpu, Subdevice *pSubdevice) +{ + const NvU32 initialSize = 32; + const NvU32 expansionFactor = 2; + + if (pGpu->numSubdeviceBackReferences == pGpu->maxSubdeviceBackReferences) + { + if (pGpu->pSubdeviceBackReferences == NULL) + { + pGpu->pSubdeviceBackReferences = portMemAllocNonPaged(initialSize * sizeof(Subdevice*)); + if (pGpu->pSubdeviceBackReferences == NULL) + return NV_ERR_NO_MEMORY; + pGpu->maxSubdeviceBackReferences = initialSize; + } + else + { + const NvU32 newSize = expansionFactor * pGpu->maxSubdeviceBackReferences * sizeof(Subdevice*); + Subdevice **newArray = portMemAllocNonPaged(newSize); + if (newArray == NULL) + return NV_ERR_NO_MEMORY; + + portMemCopy(newArray, newSize, pGpu->pSubdeviceBackReferences, pGpu->maxSubdeviceBackReferences * sizeof(Subdevice*)); + portMemFree(pGpu->pSubdeviceBackReferences); + pGpu->pSubdeviceBackReferences = newArray; + pGpu->maxSubdeviceBackReferences *= expansionFactor; + } + } + pGpu->pSubdeviceBackReferences[pGpu->numSubdeviceBackReferences++] = pSubdevice; + return NV_OK; +} + +void gpuUnregisterSubdevice_IMPL(OBJGPU *pGpu, Subdevice *pSubdevice) +{ + NvU32 i; + for (i = 0; i < pGpu->numSubdeviceBackReferences; i++) + { + if (pGpu->pSubdeviceBackReferences[i] == pSubdevice) + { + pGpu->numSubdeviceBackReferences--; + pGpu->pSubdeviceBackReferences[i] = pGpu->pSubdeviceBackReferences[pGpu->numSubdeviceBackReferences]; + pGpu->pSubdeviceBackReferences[pGpu->numSubdeviceBackReferences] = NULL; + return; + } + } + NV_ASSERT_FAILED("Subdevice not found!"); +} + +// +// For a particular gpu, find all the clients waiting for a particular event, +// fill in the notifier if allocated, and raise an event to the client if registered. +// +void +gpuNotifySubDeviceEvent_IMPL +( + OBJGPU *pGpu, + NvU32 notifyIndex, + void *pNotifyParams, + NvU32 notifyParamsSize, + NvV32 info32, + NvV16 info16 +) +{ + PEVENTNOTIFICATION pEventNotification; + THREAD_STATE_NODE *pCurThread; + NvU32 localNotifyType; + NvU32 localInfo32; + NvU32 i; + + if (NV_OK == threadStateGetCurrent(&pCurThread, pGpu)) + { + // This function shouldn't be used from lockless ISR. + // Use engineNonStallIntrNotify() to notify event from lockless ISR. + NV_ASSERT_OR_RETURN_VOID(!(pCurThread->flags & THREAD_STATE_FLAGS_IS_ISR_LOCKLESS)); + } + + NV_ASSERT_OR_RETURN_VOID(notifyIndex < NV2080_NOTIFIERS_MAXCOUNT); + + // search notifiers with events hooked up for this gpu + for (i = 0; i < pGpu->numSubdeviceBackReferences; i++) + { + Subdevice *pSubdevice = pGpu->pSubdeviceBackReferences[i]; + + // + // We've seen cases where pSubdevice is NULL implying that the + // pSubdeviceBackReferences[] array is being modified during this loop. + // Adding a NULL pointer check here is only a stopgap. See bug 3892382. + // + NV_ASSERT_OR_RETURN_VOID(pSubdevice != NULL); + + INotifier *pNotifier = staticCast(pSubdevice, INotifier); + + if (inotifyGetNotificationShare(pNotifier) == NULL) + continue; + + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + + // + // For SMC, partitioned engines have partition local IDs and events are + // registered using partition localId while RM deals with global Ids. + // Convert global to partition local if necessary + // + localNotifyType = notifyIndex; + localInfo32 = info32; + + if (pSubdevice->notifyActions[localNotifyType] == NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + continue; + } + + pEventNotification = inotifyGetNotificationList(pNotifier); + if (pEventNotification != NULL) + { + // ping any events on the list of type notifyIndex + osEventNotificationWithInfo(pGpu, pEventNotification, localNotifyType, localInfo32, info16, + pNotifyParams, notifyParamsSize); + } + + // reset if single shot notify action + if (pSubdevice->notifyActions[localNotifyType] == NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE) + { + pSubdevice->notifyActions[localNotifyType] = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + } + } +} + +// +// For a particular gpu, find all the clients waiting for a particular event, +// fill in the notifier if allocated, and raise an event to the client if registered. +// +void +gpuGspPluginTriggeredEvent_IMPL +( + OBJGPU *pGpu, + NvU32 gfid, + NvU32 notifyIndex +) +{ +} + + +// +// Searches the Pid Array to see if the process this client belongs to is already +// in the list. +// +static NvBool +_gpuiIsPidSavedAlready +( + NvU32 pid, + NvU32 *pPidArray, + NvU32 pidCount +) +{ + NvU32 j; + + for (j = 0; j < pidCount; j++) + { + if (pid == pPidArray[j]) + return NV_TRUE; + } + return NV_FALSE; +} + +static NV_STATUS +_gpuConvertPid +( + RmClient *pClient, + NvU32 *pNsPid +) +{ + if (pClient->pOsPidInfo != NULL) + return osFindNsPid(pClient->pOsPidInfo, pNsPid); + + *pNsPid = pClient->ProcID; + return NV_OK; +} + +// +// Searches through clients to find processes with clients that have +// allocated an ElementType of class, defined by elementID. The return values +// are the array containing the PIDs for the processes and the count for the +// array. +// If a valid partitionRef is provided, the scope of search gets limited to a +// partition +// +NV_STATUS +gpuGetProcWithObject_IMPL +( + OBJGPU *pGpu, + NvU32 elementID, + NvU32 internalClassId, + NvU32 *pPidArray, + NvU32 *pPidArrayCount, + MIG_INSTANCE_REF *pRef +) +{ + NvU32 pidcount = 0; + NvHandle hClient; + Device *pDevice; + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + RsResourceRef *pResourceRef; + NV_STATUS status; + + NV_ASSERT_OR_RETURN((pPidArray != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pPidArrayCount != NULL), NV_ERR_INVALID_ARGUMENT); + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + NvBool elementInClient = NV_FALSE; + RS_ITERATOR iter; + RS_PRIV_LEVEL privLevel = rmclientGetCachedPrivilege(*ppClient); + + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + hClient = pRsClient->hClient; + + // Skip reporting of kernel mode and internal RM clients + if ((privLevel >= RS_PRIV_LEVEL_KERNEL) && rmclientIsAdmin(pClient, privLevel)) + continue; + + if (_gpuiIsPidSavedAlready(pClient->ProcID, pPidArray, pidcount)) + continue; + + if (deviceGetByGpu(pRsClient, pGpu, NV_TRUE /* bAnyInGroup */, &pDevice) != NV_OK) + continue; + + iter = serverutilRefIter(hClient, NV01_NULL_OBJECT, 0, RS_ITERATE_DESCENDANTS, NV_TRUE); + + // + // At this point it has been determined that the client's subdevice + // is associated with the Gpu of interest, and it is not already + // included in the pidArray. In the call, objects belonging to the + // client are returned. If any object in the client belongs to + // the class being queried, then that process is added to the array. + // + while (clientRefIterNext(iter.pClient, &iter)) + { + pResourceRef = iter.pResourceRef; + + if (!objDynamicCastById(pResourceRef->pResource, internalClassId)) + continue; + + switch (internalClassId) + { + + case (classId(Device)): + case (classId(Subdevice)): + { + // + // It has been already verified that the client's subdevice + // or device is associated with the GPU of interest. + // Hence, Just add the client->pid into the list. + // + elementInClient = NV_TRUE; + break; + } + case (classId(MpsApi)): + { + elementInClient = NV_TRUE; + break; + } + default: + return NV_ERR_INVALID_ARGUMENT; + } + if (elementInClient) + { + status = _gpuConvertPid(pClient, &pPidArray[pidcount]); + if (status == NV_OK) + { + pidcount++; + } + else if (status != NV_ERR_OBJECT_NOT_FOUND) + { + return status; + } + + if (pidcount == NV2080_CTRL_GPU_GET_PIDS_MAX_COUNT) + { + NV_PRINTF(LEVEL_ERROR, + "Maximum PIDs reached. Returning.\n"); + + goto done; + } + + break; + } + } + } +done: + *pPidArrayCount = pidcount; + + return NV_OK; +} + +// +// _gpuCollectMemInfo +// +// Retrieves all the FB memory allocated for that client and returned as *pData. +// If the input parameter bIsGuestProcess is true, that means we are on VGX host +// and the caller is trying to find FB memory usage of a process which is +// running inside a VM. +// +static void +_gpuCollectMemInfo +( + NvHandle hClient, + NvHandle hDevice, + Heap *pTargetedHeap, + NV2080_CTRL_GPU_PID_INFO_VIDEO_MEMORY_USAGE_DATA *pData, + NvBool bIsGuestProcess, + NvBool bGlobalInfo +) +{ + RS_ITERATOR iter; + Memory *pMemory = NULL; + RsResourceRef *pResourceRef; + + NV_ASSERT_OR_RETURN_VOID(pData != NULL); + + iter = serverutilRefIter(hClient, NV01_NULL_OBJECT, 0, RS_ITERATE_DESCENDANTS, NV_TRUE); + + while (clientRefIterNext(iter.pClient, &iter)) + { + pResourceRef = iter.pResourceRef; + pMemory = dynamicCast(pResourceRef->pResource, Memory); + + if (!pMemory) + continue; + + // In case we are trying to find memory allocated by a process running + // on a VM - the case where isGuestProcess is true, only consider the + // memory : + // 1. which is allocated by the guest VM or by a process running in it. + // 2. if the memory is not tagged with NVOS32_TYPE_UNUSED type. + // Windows KMD and Linux X driver makes dummy allocations which is + // done using NV01_MEMORY_LOCAL_USER class with rmAllocMemory() + // function. + // On VGX, while passing this allocation in RPC, we use the memory + // type NVOS32_TYPE_UNUSED. So while calculating the per process FB + // usage, only consider the allocation if memory type is not + // NVOS32_TYPE_UNUSED. + if ((pResourceRef->externalClassId == NV01_MEMORY_LOCAL_USER || + pResourceRef->externalClassId == NV01_MEMORY_LIST_FBMEM || + pResourceRef->externalClassId == NV01_MEMORY_LIST_OBJECT ) && + (pMemory->categoryClassId == NV01_MEMORY_LOCAL_USER) && + (bGlobalInfo || (pMemory->pHeap == pTargetedHeap)) && + (RES_GET_HANDLE(pMemory->pDevice) == hDevice) && + (pMemory->pMemDesc != NULL) && + ((!bIsGuestProcess && (!memdescGetFlag(pMemory->pMemDesc, MEMDESC_FLAGS_LIST_MEMORY))) || + (bIsGuestProcess && (memdescGetFlag(pMemory->pMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED)) && (pMemory->Type != NVOS32_TYPE_UNUSED)))) + { + + if (pMemory->pMemDesc->DupCount == 1) + { + pData->memPrivate += pMemory->Length; + } + else if (pMemory->isMemDescOwner) + { + pData->memSharedOwned += pMemory->Length; + } + else + { + pData->memSharedDuped += pMemory->Length; + } + } + } +} + +static NvBool +_gpuMatchClientPid +( + RmClient *pClient, + NvU32 pid, + NvU32 subPid +) +{ + NvU32 clientNsPid; // pClient PID on current's namespace + + if (_gpuConvertPid(pClient, &clientNsPid) != NV_OK) + return NV_FALSE; + + return (((subPid == 0) && (clientNsPid == pid)) || + ((subPid != 0) && (clientNsPid == pid) && (pClient->SubProcessID == subPid))); +} + +// +// This function takes in the PID for the process of interest, and queries all +// clients for elementType. The 64-bit Data is updated by specific functions +// which handle queries for different elementTypes. +// +NV_STATUS +gpuFindClientInfoWithPidIterator_IMPL +( + OBJGPU *pGpu, + NvU32 pid, + NvU32 subPid, + NvU32 internalClassId, + NV2080_CTRL_GPU_PID_INFO_DATA *pData, + NV2080_CTRL_SMC_SUBSCRIPTION_INFO *pSmcInfo, + MIG_INSTANCE_REF *pRef, + NvBool bGlobalInfo +) +{ + NvHandle hClient; + Device *pDevice; + NvHandle hDevice; + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + Heap *pHeap = GPU_GET_HEAP(pGpu); + NvU32 computeInstanceId = PARTITIONID_INVALID; + NvU32 gpuInstanceId = PARTITIONID_INVALID; + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN((pid != 0), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pData != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pSmcInfo != NULL), NV_ERR_INVALID_ARGUMENT); + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + if (_gpuMatchClientPid(pClient, pid, subPid)) + { + RS_PRIV_LEVEL privLevel = rmclientGetCachedPrivilege(pClient); + RS_ITERATOR it; + + hClient = pRsClient->hClient; + + // Skip reporting of kernel mode and internal RM clients + if ((privLevel >= RS_PRIV_LEVEL_KERNEL) && rmclientIsAdmin(pClient, privLevel)) + continue; + + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + + while (clientRefIterNext(pRsClient, &it)) + { + pDevice = dynamicCast(it.pResourceRef->pResource, Device); + + if (GPU_RES_GET_GPU(pDevice) != pGpu) + continue; + + hDevice = RES_GET_HANDLE(pDevice); + + switch (internalClassId) + { + case (classId(Memory)): + { + // TODO - + // When single process spanning across multiple GI or CI by creating multiple + // clients, RM needs to provide the unique list being used by the client + _gpuCollectMemInfo(hClient, hDevice, pHeap, + &pData->vidMemUsage, ((subPid != 0) ? NV_TRUE : NV_FALSE), + bGlobalInfo); + break; + } + default: + return NV_ERR_INVALID_ARGUMENT; + } + } + } + } + + pSmcInfo->computeInstanceId = computeInstanceId; + pSmcInfo->gpuInstanceId = gpuInstanceId; + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/gpu_t234d_kernel.c b/src/nvidia/src/kernel/gpu/gpu_t234d_kernel.c new file mode 100644 index 0000000..30197b6 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_t234d_kernel.c @@ -0,0 +1,68 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief T234D / DCE client specific kernel stubs + */ + +#include "core/core.h" +#include "gpu/gpu.h" + +NV_STATUS +gpuGetNameString_T234D +( + OBJGPU *pGpu, + NvU32 type, + void *nameStringBuffer +) +{ + const char name[] = "T234D"; + const NvU32 inputLength = NV2080_GPU_MAX_NAME_STRING_LENGTH; + + if (type == NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_ASCII) + { + portStringCopy(nameStringBuffer, inputLength, name, sizeof(name)); + } + else + { + portStringConvertAsciiToUtf16(nameStringBuffer, inputLength, name, sizeof(name)); + } + + return NV_OK; +} + +NV_STATUS +gpuGetShortNameString_T234D +( + OBJGPU *pGpu, + NvU8 *nameStringBuffer +) +{ + return gpuGetNameString_T234D(pGpu, NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_ASCII, nameStringBuffer); +} + +NvU32 gpuGetSimulationModeHal_T234D(OBJGPU *pGpu) +{ + return NV_SIM_MODE_INVALID; +} diff --git a/src/nvidia/src/kernel/gpu/gpu_timeout.c b/src/nvidia/src/kernel/gpu/gpu_timeout.c new file mode 100644 index 0000000..1fa0f67 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_timeout.c @@ -0,0 +1,601 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief GPU Timeout related routines. + */ + +/* ------------------------ Includes ---------------------------------------- */ + +#include "lib/base_utils.h" +#include "gpu/gpu.h" +#include "gpu/timer/objtmr.h" +#include "nvrm_registry.h" +#include "core/thread_state.h" +#include "core/locks.h" +#include "gpu_mgr/gpu_mgr.h" + +/* ------------------------ Public Functions ------------------------------- */ + +/*! + * @brief Initializes default timeout values from a provided GPU. + */ +void +timeoutInitializeGpuDefault +( + TIMEOUT_DATA *pTD, + OBJGPU *pGpu +) +{ + NvU32 timeoutDefault; + + pTD->pGpu = pGpu; + + // Set default timeout mode before loading HAL state + osGetTimeoutParams(pGpu, &timeoutDefault, &(pTD->scale), &(pTD->defaultFlags)); + if (!pTD->bDefaultOverridden) + { + pTD->defaultResetus = timeoutDefault; + pTD->defaultus = timeoutDefault; + pTD->bScaled = NV_FALSE; + } + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + // + // vGPU: + // + // Since vGPU does all real hardware management in the host, use the OS + // timer by default in the guest OS (where IS_VIRTUAL(pGpu) is true), + // as it (hopefully) tracks a VM's actual time executing + // (vs. reading the HW PTIMER which'll be too fast). + // SOC NvDisplay: + // SOC NvDisplay doesn't have HW timer so use OSTIMER as default + // + pTD->defaultFlags = GPU_TIMEOUT_FLAGS_OSTIMER; + } + + // Using this boolean to ensure defaultus isn't scaled more than once. + if (!pTD->bScaled) + { + pTD->defaultus = gpuScaleTimeout(pGpu, pTD->defaultus); + pTD->bScaled = NV_TRUE; + } + + if (!pTD->bDefaultResetFSMStateTransitionOverridden) + { + // + // Add a default delay. This is a worst case estimate. + // A delay of based on HW's suggestion. + // See bug 5020859 comment 40 and bug 200636529 comment 20 + // for the delay calculation. + // This delay can be overwritten by regkey RmResetFsmStateTimeoutUs. + // + pTD->defaultResetFSMStateTransitionUs = gpuGetDefaultResetFSMStateTransitionUs_HAL(pGpu); + } + + // + // Note we need to call threadStateResetTimeout() now that the timeout + // mechanism and values are known to allow threadStateCheckTimeout() + // to work after this point during init. + // + threadStateInitTimeout(pGpu, pTD->defaultus, pTD->defaultFlags); + threadStateResetTimeout(pGpu); +} + +/*! + * @brief Applies external timeout override based on registry values. + */ +void +timeoutRegistryOverride +( + TIMEOUT_DATA *pTD, + OBJGPU *pGpu +) +{ + NvU32 data32 = 0; + + NvU32 bug5203024OverrideTimeouts = ( + (osReadRegistryDword(pGpu, NV_REG_STR_RM_BUG5203024_OVERRIDE_TIMEOUT, + &data32) == NV_OK) ? + data32 : + 0); + + pGpu->bug5203024OverrideTimeouts = bug5203024OverrideTimeouts; + + NvBool bOverrideDefaultTimeout = (DRF_VAL(_REG_STR, + _RM_BUG5203024_OVERRIDE_TIMEOUT, + _FLAGS_SET_RM_DEFAULT_TIMEOUT, + bug5203024OverrideTimeouts) == 1); + + // Override timeout value + if (bOverrideDefaultTimeout || + ((osReadRegistryDword(pGpu, + NV_REG_STR_RM_DEFAULT_TIMEOUT_MS, + &data32) == NV_OK) && + (data32 != 0))) + { + if (bOverrideDefaultTimeout) + { + data32 = DRF_VAL(_REG_STR, + _RM_BUG5203024_OVERRIDE_TIMEOUT, + _VALUE_MS, + bug5203024OverrideTimeouts); + } + + // Handle 32-bit overflow. + if (data32 > (NV_U32_MAX / 1000)) + { + pTD->defaultus = NV_U32_MAX; + pTD->defaultResetus = NV_U32_MAX; + } + else + { + // Convert to [us] + pTD->defaultus = data32 * 1000; + pTD->defaultResetus = data32 * 1000; + } + pTD->bDefaultOverridden = NV_TRUE; + NV_PRINTF(LEVEL_ERROR, "Overriding default timeout to 0x%08x\n", + pTD->defaultus); + } + + if (IS_SIMULATION(pGpu) && + (osReadRegistryDword(pGpu, + NV_REG_STR_RM_RESET_FSM_STATE_TRANSITION_TIMEOUT_US, + &data32) == NV_OK) && + (data32 > gpuGetDefaultResetFSMStateTransitionUs_HAL(pGpu))) + { + // The default delay value can only be overwritten by a greater value + pTD->defaultResetFSMStateTransitionUs = data32; + pTD->bDefaultResetFSMStateTransitionOverridden = NV_TRUE; + NV_PRINTF(LEVEL_ERROR, "Overriding default timeout for reset FSM state transition to 0x%08x\n", + pTD->defaultResetFSMStateTransitionUs); + } + + // Override timeout flag values + if (osReadRegistryDword(pGpu, + NV_REG_STR_RM_OVERRIDE_DEFAULT_TIMEOUT_FLAGS, + &data32) == NV_OK) + { + switch (data32) + { + case NV_REG_STR_RM_OVERRIDE_DEFAULT_TIMEOUT_FLAGS_OSDELAY: + { + pTD->defaultFlags = GPU_TIMEOUT_FLAGS_OSDELAY; + break; + } + + case NV_REG_STR_RM_OVERRIDE_DEFAULT_TIMEOUT_FLAGS_OSTIMER: + { + pTD->defaultFlags = GPU_TIMEOUT_FLAGS_OSTIMER; + break; + } + + default: + { + NV_PRINTF(LEVEL_ERROR, "Unknown TIMEOUT_FLAGS value: 0x%08x\n", + data32); + NV_ASSERT(0); + } + } + + NV_PRINTF(LEVEL_ERROR, "Overriding default flags to 0x%08x\n", + pTD->defaultFlags); + } +} + +/*! + * @brief Applies external timeout override. + */ +void +timeoutOverride +( + TIMEOUT_DATA *pTD, + NvBool bOverride, + NvU32 timeoutMs +) +{ + pTD->bDefaultOverridden = bOverride; + + pTD->defaultus = bOverride ? (timeoutMs * 1000) : pTD->defaultResetus; +} + +/*! + * @brief Initialize the RMTIMEOUT structure with the selected timeout scheme. + */ +void +timeoutSet +( + TIMEOUT_DATA *pTD, + RMTIMEOUT *pTimeout, + NvU32 timeoutUs, + NvU32 flags +) +{ + OBJTMR *pTmr; + NvU64 timeInNs; + NvU64 timeoutNs; + + portMemSet(pTimeout, 0, sizeof(*pTimeout)); + + // + // Note that if GPU_TIMEOUT_DEFAULT is used we will go through + // threadStateCheckTimeout rather than timeoutCheck as we do + // not want to have "stacked" gpuSetTimeouts. The intent of + // GPU_TIMEOUT_DEFAULT was to cover the *entire* RM API stack. + // If GPU_TIMEOUT_DEFAULT was specified, this is essentially a + // NULL operation other than setting the flags to route us to + // threadStateCheckTimeout. This can be overridden by + // setting GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE. + // + pTimeout->flags = flags; + if ((flags == 0) || (flags & GPU_TIMEOUT_FLAGS_DEFAULT) || + !(flags & (GPU_TIMEOUT_FLAGS_OSTIMER | GPU_TIMEOUT_FLAGS_OSDELAY | + GPU_TIMEOUT_FLAGS_TMR | GPU_TIMEOUT_FLAGS_TMRDELAY))) + { + pTimeout->flags |= pTD->defaultFlags; + } + + if (timeoutUs == GPU_TIMEOUT_DEFAULT) + { + timeoutUs = pTD->defaultus; + + // + // Use the ThreadState by default if GPU_TIMEOUT_DEFAULT was specified + // unless we were told explicitly not to. + // ThreadState only supports OSTIMER and OSDELAY + // + if (!(pTimeout->flags & GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE) && + (pTimeout->flags & (GPU_TIMEOUT_FLAGS_OSTIMER | GPU_TIMEOUT_FLAGS_OSDELAY))) + { + pTimeout->flags |= GPU_TIMEOUT_FLAGS_USE_THREAD_STATE; + } + } + + // Set end time for elapsed time methods + timeoutNs = (NvU64)timeoutUs * 1000; + if (pTimeout->flags & GPU_TIMEOUT_FLAGS_OSTIMER) + { + // + // For small timeouts (timeout durations on the order of magnitude of + // the OS tick resolution), starting the timeout near the end of a tick + // could cause a premature timeout since the start time is determined + // by the start of the tick. Mitigate this by always padding the + // timeout using the OS tick resolution, to bump us to the next tick. + // + timeoutNs += osGetMonotonicTickResolutionNs(); + + timeInNs = osGetMonotonicTimeNs(); + + pTimeout->pTmrGpu = NULL; + pTimeout->timeout = timeInNs + timeoutNs; + } + else if ((pTimeout->flags & GPU_TIMEOUT_FLAGS_TMR) || + (pTimeout->flags & GPU_TIMEOUT_FLAGS_TMRDELAY)) + { + OBJGPU *pGpu = pTD->pGpu; + NV_ASSERT_OR_RETURN_VOID(pGpu != NULL); + + OBJGPU *pParentGpu = gpumgrGetParentGPU(pGpu); + + // + // Set timer GPU to primary GPU for accurate timeout with SLI loop. But only + // use the primary GPU if it is in full power mode or in the process of resuming. + // Also don't use the primary if it is in full chip reset. + // + if (gpumgrIsParentGPU(pGpu) || + ((gpuIsGpuFullPower(pParentGpu) == NV_FALSE) && + !pParentGpu->getProperty(pParentGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH)) || + pParentGpu->getProperty(pParentGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET)) + { + pTimeout->pTmrGpu = pGpu; + } + else + { + pTimeout->pTmrGpu = pParentGpu; + } + + pTmr = GPU_GET_TIMER(pTimeout->pTmrGpu); + NV_ASSERT_OR_RETURN_VOID(pTmr != NULL); + + if (pTimeout->flags & GPU_TIMEOUT_FLAGS_TMR) + { + + // nanoseconds + tmrGetCurrentTime(pTmr, &pTimeout->timeout); + pTimeout->timeout += timeoutNs; + } + else // GPU_TIMEOUT_FLAGS_TMRDELAY + { + pTimeout->timeout = timeoutUs; + } + } + else + { + pTimeout->pTmrGpu = NULL; + pTimeout->timeout = timeoutUs; + } +} + +/*! + * We typically only use this code if a time other than GPU_TIMEOUT_DEFAULT + * was specified. For GPU_TIMEOUT_DEFAULT we use threadStateCheckTimeout. + * The logic in the _threadNodeCheckTimeout() should closely resemble that + * of the _checkTimeout(). + */ +static NV_STATUS +_checkTimeout +( + RMTIMEOUT *pTimeout +) +{ + NV_STATUS status = NV_OK; + OBJTMR *pTmr; + NvU64 current; + NvU64 timeInNs; + + if (pTimeout->flags & GPU_TIMEOUT_FLAGS_OSTIMER) + { + timeInNs = osGetMonotonicTimeNs(); + if (timeInNs >= pTimeout->timeout) + { + if (!(pTimeout->flags & GPU_TIMEOUT_FLAGS_BYPASS_JOURNAL_LOG)) + { + NV_PRINTF(LEVEL_INFO, "OS elapsed %llx >= %llx\n", + timeInNs, pTimeout->timeout); + } + status = NV_ERR_TIMEOUT; + } + } + else if (pTimeout->flags & GPU_TIMEOUT_FLAGS_OSDELAY) + { + osDelayUs(100); + + // + // TODO: Bug: 3312158 - Isolate the fix timeout logic to emulation. + // This is because of the numerous timeout issues exposed in DVS + // Emulation requires this to make sure we are not wasting emulation resources + // by waiting for timeouts too long. + // Once DVS issues are fixed, this fix will be enabled for all platforms. + // + if ((pTimeout->pTmrGpu != NULL) && (IS_EMULATION(pTimeout->pTmrGpu))) + { + // + // Adjust the remaining time. Note that the remaining time is in nanoseconds unit + // for GPU_TIMEOUT_FLAGS_OSDELAY + // + pTimeout->timeout -= NV_MIN(100ULL * 1000ULL, pTimeout->timeout); + } + else + { + pTimeout->timeout -= NV_MIN(100ULL , pTimeout->timeout); + } + + if (pTimeout->timeout == 0) + { + if (!(pTimeout->flags & GPU_TIMEOUT_FLAGS_BYPASS_JOURNAL_LOG)) + { + NV_PRINTF(LEVEL_INFO, "OS timeout == 0\n"); + } + status = NV_ERR_TIMEOUT; + } + } + else if (pTimeout->flags & GPU_TIMEOUT_FLAGS_TMR) + { + NV_ASSERT_OR_RETURN(pTimeout->pTmrGpu != NULL, NV_ERR_INVALID_STATE); + if (!API_GPU_ATTACHED_SANITY_CHECK(pTimeout->pTmrGpu)) + return NV_ERR_TIMEOUT; + + pTmr = GPU_GET_TIMER(pTimeout->pTmrGpu); + NV_ASSERT_OR_RETURN(pTmr != NULL, NV_ERR_INVALID_STATE); + + tmrDelay(pTmr, 5ULL * 1000ULL); + tmrGetCurrentTime(pTmr, ¤t); + + if (current >= pTimeout->timeout) + { + if (!(pTimeout->flags & GPU_TIMEOUT_FLAGS_BYPASS_JOURNAL_LOG)) + { + NV_PRINTF(LEVEL_ERROR, "ptmr elapsed %llx >= %llx\n", + current, pTimeout->timeout); + } + status = NV_ERR_TIMEOUT; + } + } + else if (pTimeout->flags & GPU_TIMEOUT_FLAGS_TMRDELAY) + { + NV_ASSERT_OR_RETURN(pTimeout->pTmrGpu != NULL, NV_ERR_INVALID_STATE); + if (!API_GPU_ATTACHED_SANITY_CHECK(pTimeout->pTmrGpu)) + return NV_ERR_TIMEOUT; + + pTmr = GPU_GET_TIMER(pTimeout->pTmrGpu); + NV_ASSERT_OR_RETURN(pTmr != NULL, NV_ERR_INVALID_STATE); + + tmrDelay(pTmr, 5ULL * 1000ULL); + pTimeout->timeout -= NV_MIN(5, pTimeout->timeout); + + if (pTimeout->timeout == 0) + { + if (!(pTimeout->flags & GPU_TIMEOUT_FLAGS_BYPASS_JOURNAL_LOG)) + { + NV_PRINTF(LEVEL_INFO, "ptmr timeout == 0\n"); + } + status = NV_ERR_TIMEOUT; + } + } + else + { + NV_PRINTF(LEVEL_ERROR, "Invalid timeout flags 0x%08x\n", + pTimeout->flags); + DBG_BREAKPOINT(); + status = NV_ERR_INVALID_STATE; + } + + return status; +} + +/*! + * @brief Check if the passed in RMTIMEOUT struct has expired. + */ +NV_STATUS +timeoutCheck +( + TIMEOUT_DATA *pTD, + RMTIMEOUT *pTimeout, + NvU32 lineNum +) +{ + OBJGPU *pGpu = pTD->pGpu; + NV_STATUS status = NV_OK; + + NV_ASSERT(pTimeout != NULL); + + if ((pGpu != NULL) && API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + return NV_ERR_TIMEOUT; + + if (!(pTimeout->flags & GPU_TIMEOUT_FLAGS_BYPASS_CPU_YIELD)) + { + threadStateYieldCpuIfNecessary(pGpu, !!(pTimeout->flags & GPU_TIMEOUT_FLAGS_BYPASS_JOURNAL_LOG)); + } + + // + // Note that if GPU_TIMEOUT_DEFAULT is used we will go through + // threadStateCheckTimeout rather than timeoutCheck as we do + // not want to have "stacked" gpuSetTimeouts. The intent of + // GPU_TIMEOUT_DEFAULT is to cover the *entire* RM API stack. + // If we are going through the case below, we should have just + // called threadStateCheckTimeout directly rather than + // timeoutCheck. + // + + // If local timeout check was intended, check that first. + if (!(pTimeout->flags & GPU_TIMEOUT_FLAGS_USE_THREAD_STATE)) + { + status = _checkTimeout(pTimeout); + if (status == NV_ERR_TIMEOUT) + { + // Mark that this Timeout is the result of a local timeout + pTimeout->flags |= GPU_TIMEOUT_FLAGS_STATUS_LOCAL_TIMEOUT; + } + } + + // + // Always check for the thread timeout in addition to any local timeout + // unless we have EXPLICITLY been instructed not to by a timeout flag. + // + if ((status != NV_ERR_TIMEOUT) && !(pTimeout->flags & GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE)) + { + status = threadStateCheckTimeout(pGpu, NULL /*pElapsedTime*/); + + if (status == NV_ERR_TIMEOUT) + { + // Mark that this Timeout is the result of ThreadState + pTimeout->flags |= GPU_TIMEOUT_FLAGS_STATUS_THREAD_STATE_TIMEOUT; + } + else if (status != NV_OK) + { + // Try the local timeout as fallback, unless it was already checked. + if (pTimeout->flags & GPU_TIMEOUT_FLAGS_USE_THREAD_STATE) + { + status = _checkTimeout(pTimeout); + if (status == NV_ERR_TIMEOUT) + { + // Mark that this Timeout is the result of a local timeout + pTimeout->flags |= GPU_TIMEOUT_FLAGS_STATUS_LOCAL_TIMEOUT; + } + } + } + } + + // Throttle priority of boosted threads if necessary + threadPriorityThrottle(); + + // Log the Timeout in the RM Journal + if ( (status == NV_ERR_TIMEOUT) && + !(pTimeout->flags & GPU_TIMEOUT_FLAGS_BYPASS_JOURNAL_LOG)) + { + NvU64 funcAddr = (NvU64) (NV_RETURN_ADDRESS()); + threadStateLogTimeout(pGpu, funcAddr, lineNum); + } + + return status; +} + +/*! + * @brief Wait for a condition function to return NV_TRUE or timeout. + * + * @param[in] pTD Timeout data + * @param[in] pTimeout RM timeout structure to be used, or NULL to use default timeout + * @param[in] pCondFunc Function implementing condition check to wait for + * @param[in] pCondData An optional param to @ref pCondFunc (NULL if unused) + * + * @return NV_OK Condition met within the provided timeout period. + * @return NV_ERR_TIMEOUT Timed out while waiting for the condition. + * + * @note This interface addresses the recurring problem of reporting time-out + * when condition is actually met. That can happen since RM can get + * preempted by the OS any time during the execution. It is achieved by + * one additional condition check before the exit in case when timeout + * has been detected. + */ +NV_STATUS +timeoutCondWait +( + TIMEOUT_DATA *pTD, + RMTIMEOUT *pTimeout, + GpuWaitConditionFunc *pCondFunc, + void *pCondData, + NvU32 lineNum +) +{ + OBJGPU *pGpu = pTD->pGpu; + NV_STATUS status = NV_OK; + RMTIMEOUT timeout; + + if (pTimeout == NULL) + { + timeoutSet(pTD, &timeout, GPU_TIMEOUT_DEFAULT, 0); + pTimeout = &timeout; + } + + while (!pCondFunc(pGpu, pCondData)) + { + osSpinLoop(); + + status = timeoutCheck(pTD, pTimeout, lineNum); + if (status != NV_OK) + { + if ((status == NV_ERR_TIMEOUT) && + pCondFunc(pGpu, pCondData)) + { + status = NV_OK; + } + break; + } + } + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/gpu_user_shared_data.c b/src/nvidia/src/kernel/gpu/gpu_user_shared_data.c new file mode 100644 index 0000000..c66ef31 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_user_shared_data.c @@ -0,0 +1,565 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/gpu_user_shared_data.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/subdevice/subdevice.h" +#include "os/os.h" +#include "rmapi/client.h" +#include "rmapi/rmapi.h" +#include "rmapi/rs_utils.h" +#include "nvrm_registry.h" +#include "class/cl00de.h" +#include "class/cl003e.h" // NV01_MEMORY_SYSTEM +#include "class/cl00de.h" +#include "ctrl/ctrl2080/ctrl2080gpu.h" +#include "gpu_mgr/gpu_db.h" +#include "gpu_mgr/gpu_mgr.h" +#include "core/locks.h" + +static NV_STATUS _gpushareddataInitGsp(OBJGPU *pGpu); +static void _gpushareddataDestroyGsp(OBJGPU *pGpu); +static NV_STATUS _gpushareddataSendDataPollRpc(OBJGPU *pGpu, NvU64 polledDataMask); +static NV_STATUS _gpushareddataRequestDataPoll(GpuUserSharedData *pData, NvU64 polledDataMask); +static inline void _gpushareddataUpdateSeqOpen(volatile NvU64 *pSeq); +static inline void _gpushareddataUpdateSeqClose(volatile NvU64 *pSeq); + +static inline +NvBool +_rusdPollingSupported +( + OBJGPU *pGpu +) +{ + // + // RUSD polling is disabled on non-GSP for pre-GA102 due to collisions + // with VSYNC interrupt on high refresh rate monitors. See Bug 4432698. + // For GA102+, the RPC to PMU are replaced by PMUMON RMCTRLs. + // + return ((!IS_VIRTUAL(pGpu)) && + (pGpu->userSharedData.pollingRegistryOverride != NV_REG_STR_RM_DEBUG_RUSD_POLLING_FORCE_DISABLE) && + (IS_GSP_CLIENT(pGpu) || + (pGpu->userSharedData.pollingRegistryOverride == NV_REG_STR_RM_DEBUG_RUSD_POLLING_FORCE_ENABLE) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_RUSD_POLLING_SUPPORT_MONOLITHIC))); +} + +NV_STATUS +gpushareddataConstruct_IMPL +( + GpuUserSharedData *pData, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + Memory *pMemory = staticCast(pData, Memory); + OBJGPU *pGpu = pMemory->pGpu; // pGpu is initialized in the Memory class constructor + MEMORY_DESCRIPTOR **ppMemDesc = &(pGpu->userSharedData.pMemDesc); + NV00DE_ALLOC_PARAMETERS *pAllocParams = (NV00DE_ALLOC_PARAMETERS*)(pParams->pAllocParams); + + NV_ASSERT_OR_RETURN(!RMCFG_FEATURE_PLATFORM_GSP, NV_ERR_NOT_SUPPORTED); + + if (IS_VIRTUAL(pGpu)) + return NV_ERR_NOT_SUPPORTED; + + if (!_rusdPollingSupported(pGpu) && (pAllocParams->polledDataMask != 0U)) + return NV_ERR_NOT_SUPPORTED; + + if (RS_IS_COPY_CTOR(pParams)) + return NV_OK; + + if (*ppMemDesc == NULL) + return NV_ERR_NOT_SUPPORTED; + + if (pAllocParams->polledDataMask != 0U) + { + NV_ASSERT_OK_OR_RETURN(_gpushareddataRequestDataPoll(pData, pAllocParams->polledDataMask)); + } + + NV_ASSERT_OK_OR_RETURN(memConstructCommon(pMemory, + NV01_MEMORY_SYSTEM, 0, *ppMemDesc, 0, NULL, 0, 0, 0, 0, + NVOS32_MEM_TAG_NONE, NULL)); + memdescAddRef(pGpu->userSharedData.pMemDesc); + + return NV_OK; +} + +void +gpushareddataDestruct_IMPL(GpuUserSharedData *pData) +{ + Memory *pMemory = staticCast(pData, Memory); + OBJGPU *pGpu = pMemory->pGpu; + + NV_ASSERT_OR_RETURN_VOID(!RMCFG_FEATURE_PLATFORM_GSP); + + if (!pMemory->bConstructed || (pMemory->pMemDesc == NULL)) + { + return; + } + + _gpushareddataRequestDataPoll(pData, 0U); + + memdescRemoveRef(pGpu->userSharedData.pMemDesc); + memDestructCommon(pMemory); +} + +// Called before starting a non-polled data write, changes seq valid->invalid +static inline void +_gpushareddataUpdateSeqOpen +( + volatile NvU64 *pSeq +) +{ + NvU64 seqVal; + + // Initialize seq to RUSD_SEQ_START at first write. If never written before, seq is treated as an invalid timestamp + if (MEM_RD64(pSeq) == 0LLU) + { + portAtomicExSetU64(pSeq, RUSD_SEQ_START + 1); + } + else + { + portAtomicExIncrementU64(pSeq); + } + + portAtomicMemoryFenceStore(); + + seqVal = MEM_RD64(pSeq); + + NV_ASSERT(!RUSD_SEQ_DATA_VALID(seqVal)); +} + +// Called after finishing a non-polled data write, changes seq invalid->valid +static inline void +_gpushareddataUpdateSeqClose +( + volatile NvU64 *pSeq +) +{ + NvU64 seqVal; + + portAtomicExIncrementU64(pSeq); + portAtomicMemoryFenceStore(); + + seqVal = MEM_RD64(pSeq); + + NV_ASSERT(RUSD_SEQ_DATA_VALID(seqVal)); +} + + +NvBool +gpushareddataCanCopy_IMPL(GpuUserSharedData *pData) +{ + return NV_TRUE; +} + +NV00DE_SHARED_DATA * gpushareddataWriteStart_INTERNAL(OBJGPU *pGpu, NvU64 offset) +{ + NV00DE_SHARED_DATA *pSharedData = (NV00DE_SHARED_DATA *) pGpu->userSharedData.pMapBuffer; + + if (pSharedData == NULL) + { + pSharedData = &pGpu->userSharedData.data; + } + + _gpushareddataUpdateSeqOpen((volatile NvU64*)(((NvU8*)pSharedData) + offset)); + + return pSharedData; +} + +void gpushareddataWriteFinish_INTERNAL(OBJGPU *pGpu, NvU64 offset) +{ + NV00DE_SHARED_DATA *pSharedData = (NV00DE_SHARED_DATA *) pGpu->userSharedData.pMapBuffer; + + if (pSharedData == NULL) + { + pSharedData = &pGpu->userSharedData.data; + } + + _gpushareddataUpdateSeqClose((volatile NvU64*)(((NvU8*)pSharedData) + offset)); +} +static void +_gpushareddataDestroyGsp +( + OBJGPU *pGpu +) +{ + NV2080_CTRL_INTERNAL_INIT_USER_SHARED_DATA_PARAMS params = { 0 }; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_STATUS status; + + params.bInit = NV_FALSE; + + // Free Memdesc on GSP-side + NV_CHECK_OK(status, LEVEL_ERROR, + pRmApi->Control(pRmApi, pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_INIT_USER_SHARED_DATA, + ¶ms, sizeof(params))); +} + +static NV_STATUS +_gpushareddataInitGsp +( + OBJGPU *pGpu +) +{ + NV2080_CTRL_INTERNAL_INIT_USER_SHARED_DATA_PARAMS params = { 0 }; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + params.bInit = NV_TRUE; + params.physAddr = memdescGetPhysAddr(pGpu->userSharedData.pMemDesc, AT_GPU, 0); + + // Link up Memdesc on GSP-side + NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi, pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_INIT_USER_SHARED_DATA, + ¶ms, sizeof(params))); + + if (pGpu->userSharedData.pollingRegistryOverride == NV_REG_STR_RM_DEBUG_RUSD_POLLING_FORCE_ENABLE) + { + // If polling is forced always on, start polling during init and never stop + return _gpushareddataSendDataPollRpc(pGpu, ~0ULL); + } + + return NV_OK; +} + +static NV_STATUS +_gpushareddataInitPollingFrequency +( + OBJGPU *pGpu +) +{ + NV2080_CTRL_INTERNAL_USER_SHARED_DATA_SET_DATA_POLL_PARAMS params = { 0 }; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + // + // Let Kernel RM decide the polling frequency. + // RUSD currently does not support VGPU, skip the RMCTRL on vGPU. + // + if (!RMCFG_FEATURE_PLATFORM_GSP && !IS_VIRTUAL(pGpu)) + { + if (!pGpu->userSharedData.bPollFrequencyOverridden && gpuIsTeslaBranded(pGpu)) + pGpu->userSharedData.pollingFrequencyMs = NV_REG_STR_RM_RUSD_POLLING_INTERVAL_TESLA; + + params.polledDataMask = pGpu->userSharedData.lastPolledDataMask; + params.pollFrequencyMs = pGpu->userSharedData.pollingFrequencyMs; + + NV_ASSERT_OK_OR_RETURN( + pRmApi->Control(pRmApi, pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_USER_SHARED_DATA_SET_DATA_POLL, + ¶ms, sizeof(params))); + } + + return NV_OK; +} + +NV_STATUS +gpuCreateRusdMemory_IMPL +( + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + MEMORY_DESCRIPTOR **ppMemDesc = &(pGpu->userSharedData.pMemDesc); + + // RUSD is not yet supported when CPU CC is enabled. See bug 4148522. + if ((sysGetStaticConfig(SYS_GET_INSTANCE()))->bOsCCEnabled) + return NV_OK; + + // Create a kernel-side mapping for writing RUSD data + NV_ASSERT_OK_OR_RETURN(memdescCreate(ppMemDesc, pGpu, sizeof(NV00DE_SHARED_DATA), 0, NV_TRUE, + ADDR_SYSMEM, NV_MEMORY_CACHED, MEMDESC_FLAGS_USER_READ_ONLY)); + + memdescTagAlloc(status, NV_FB_ALLOC_RM_INTERNAL_OWNER_RUSD_BUFFER, (*ppMemDesc)); + NV_ASSERT_OK_OR_GOTO(status, status, err); + + pGpu->userSharedData.pMapBuffer = memdescMapInternal(pGpu, *ppMemDesc, TRANSFER_FLAGS_NONE); + if (pGpu->userSharedData.pMapBuffer == NULL) + { + status = NV_ERR_MEMORY_ERROR; + goto err; + } + + portMemSet(pGpu->userSharedData.pMapBuffer, 0, sizeof(NV00DE_SHARED_DATA)); + + if (IS_GSP_CLIENT(pGpu)) + { + // Init system memdesc on GSP + _gpushareddataInitGsp(pGpu); + } + + _gpushareddataInitPollingFrequency(pGpu); + + return NV_OK; + +err: // Only for global memdesc construct fail cleanup + memdescFree(*ppMemDesc); + memdescDestroy(*ppMemDesc); + *ppMemDesc = NULL; + return status; +} + +void +gpuDestroyRusdMemory_IMPL +( + OBJGPU *pGpu +) +{ + GpuSharedDataMap *pData = &pGpu->userSharedData; + + if (pData->pMemDesc == NULL) + return; + + if (IS_GSP_CLIENT(pGpu)) + { + // Destroy system memdesc on GSP + _gpushareddataDestroyGsp(pGpu); + } + + NV_ASSERT(pGpu->userSharedData.pMemDesc->RefCount == 1); + + memdescUnmapInternal(pGpu, pData->pMemDesc, TRANSFER_FLAGS_NONE); + memdescFree(pData->pMemDesc); + memdescDestroy(pData->pMemDesc); + pData->pMemDesc = NULL; + pData->pMapBuffer = NULL; +} + +static NV_STATUS +_gpushareddataSendDataPollRpc +( + OBJGPU *pGpu, + NvU64 polledDataMask +) +{ + NV2080_CTRL_INTERNAL_USER_SHARED_DATA_SET_DATA_POLL_PARAMS params; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_STATUS status; + + if (polledDataMask == pGpu->userSharedData.lastPolledDataMask) + return NV_OK; // Nothing to do + + portMemSet(¶ms, 0, sizeof(params)); + + params.polledDataMask = polledDataMask; + + // Send updated data request to GSP + status = pRmApi->Control(pRmApi, pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_USER_SHARED_DATA_SET_DATA_POLL, + ¶ms, sizeof(params)); + NV_ASSERT_OR_RETURN((status == NV_OK) || (status == NV_ERR_GPU_IN_FULLCHIP_RESET), status); + if (status == NV_ERR_GPU_IN_FULLCHIP_RESET) + return status; + pGpu->userSharedData.lastPolledDataMask = polledDataMask; + + return NV_OK; +} + +static NvU64 _gpushareddataGetPollDataUnion(OBJGPU *pGpu) +{ + NvU64 polledDataUnion = 0U; + RmClient **ppClient; + + // Iterate over all clients to get all RUSD objects + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient != NULL; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + // Collect mask of all data requested by any existing RUSD objects + RS_ITERATOR iter = clientRefIter(staticCast(*ppClient, RsClient), NULL, + classId(GpuUserSharedData), RS_ITERATE_DESCENDANTS, NV_TRUE); + while (clientRefIterNext(iter.pClient, &iter)) + { + // Ignore RUSD objects on GPUs that don't match the RUSD object this poll request came from + GpuUserSharedData *pIterData = dynamicCast(iter.pResourceRef->pResource, GpuUserSharedData); + if ((pIterData != NULL) && (staticCast(pIterData, Memory)->pGpu == pGpu)) + { + polledDataUnion |= pIterData->polledDataMask; + } + } + } + + return polledDataUnion; +} + +/* + * bPermanentRequest: + * NV_TRUE: if caller is from workqueue or 2080 handler. + * NV_FALSE: if the claler is from 00DE handler. + */ +static NV_STATUS +_handlePollMaskHelper +( + OBJGPU *pGpu, + NvBool bPermanentRequest +) +{ + GPU_DB_RUSD_SETTINGS gpudbRusd = { 0 }; + NvU64 polledDataUnion = 0; + + // Get the permanent mask if the setting is active. + if (pGpu->numUserKernelChannel > 0) + { + if (gpudbGetRusdSettings(pGpu->gpuUuid.uuid, &gpudbRusd) == NV_OK) + polledDataUnion = gpudbRusd.permanentPolledDataMask; + } + + // Skip if the permanent request is the same as lastPolledDataMask + if (bPermanentRequest && + (polledDataUnion == pGpu->userSharedData.lastPolledDataMask)) + { + return NV_OK; + } + + // Combine with 00DE objects polling mask + polledDataUnion |= _gpushareddataGetPollDataUnion(pGpu); + + return _gpushareddataSendDataPollRpc(pGpu, polledDataUnion); +} + +static NV_STATUS +_gpushareddataRequestDataPoll +( + GpuUserSharedData *pData, + NvU64 polledDataMask +) +{ + OBJGPU *pGpu = staticCast(pData, Memory)->pGpu; + + if (polledDataMask == pData->polledDataMask) + return NV_OK; // Nothing to do + + pData->polledDataMask = polledDataMask; + return _handlePollMaskHelper(pGpu, NV_FALSE); +} + +static void +_gpuRusdRequestPermanentDataPollCallback +( + NvU32 gpuInstance, + void *pArgs +) +{ + OBJGPU *pGpu = gpumgrGetGpu(gpuInstance); + + if (pGpu == NULL) + return; + + _handlePollMaskHelper(pGpu, NV_TRUE); +} + +NV_STATUS +gpuRusdRequestPermanentDataPoll_IMPL +( + OBJGPU *pGpu +) +{ + NV_STATUS status; + + status = osQueueWorkItem(pGpu, + _gpuRusdRequestPermanentDataPollCallback, + NULL, + OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW | + OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE); + + if (status != NV_OK) + NV_PRINTF(LEVEL_ERROR, "Fail to queue work _gpuRusdRequestPermanentDataPollCallback\n"); + + return status; +} + +NV_STATUS +gpushareddataCtrlCmdRequestDataPoll_IMPL +( + GpuUserSharedData *pData, + NV00DE_CTRL_REQUEST_DATA_POLL_PARAMS *pParams +) +{ + OBJGPU *pGpu = staticCast(pData, Memory)->pGpu; + + // Polling is always forced on, no point routing to GSP because we will never change state + if (pGpu->userSharedData.pollingRegistryOverride == NV_REG_STR_RM_DEBUG_RUSD_POLLING_FORCE_ENABLE) + return NV_OK; + + if (!_rusdPollingSupported(pGpu) && (pParams->polledDataMask != 0U)) + return NV_ERR_NOT_SUPPORTED; + + return _gpushareddataRequestDataPoll(pData, pParams->polledDataMask); +} + +NV_STATUS +subdeviceCtrlCmdRusdGetSupportedFeatures_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_RUSD_GET_SUPPORTED_FEATURES_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + pParams->supportedFeatures = 0; + + if (!IS_VIRTUAL(pGpu)) + { + pParams->supportedFeatures |= RUSD_FEATURE_NON_POLLING; + } + + if (_rusdPollingSupported(pGpu)) + { + pParams->supportedFeatures |= RUSD_FEATURE_POLLING; + } + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdRusdSetFeatures_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_RUSD_SET_FEATURES_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + GPU_DB_RUSD_SETTINGS gpudbRusd = { 0 }; + NV_STATUS status = NV_OK; + + if (IS_VIRTUAL(pGpu)) + return NV_ERR_NOT_SUPPORTED; + + /* + * permanentPolledDataMask is saved in gpudb + * The permanentPolledDataMask is preserved when GPU is unbind. + * when GPU is rebind (same gpuId), the permanentPolledDataMask will be applied. + */ + gpudbRusd.permanentPolledDataMask = pParams->permanentPolledDataMask; + + status = gpudbSetRusdSettings(pGpu->gpuUuid.uuid, &gpudbRusd); + + if (status == NV_OK) + status = _handlePollMaskHelper(pGpu, NV_TRUE); + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/gpu_uuid.c b/src/nvidia/src/kernel/gpu/gpu_uuid.c new file mode 100644 index 0000000..0f368cb --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_uuid.c @@ -0,0 +1,296 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu_uuid.h" +#include "ctrl/ctrl2080/ctrl2080gpu.h" +#include "os/os.h" +#include "nvSha1.h" + +/** + * @brief Transforms a raw GPU ID into an ASCII string of the form + * "GPU-%08x-%04x-%04x-%04x-%012x" (SHA-1) + * + * @param[in] pGidData Raw GID from OBJPMU/OBJBIF + * @param[in] gidSize Size of the raw ID + * @param[out] ppGidString Return pointer for the GID string + * @param[out] pGidStrlen Return pointer for the GID string length + * @param[in] gidFlags NV2080_GPU_CMD_GPU_GET_GID_FLAGS values: selects + * SHA-1 only + * + * @returns matching mapping, or NULL if not found. + */ +NV_STATUS +transformGidToUserFriendlyString +( + const NvU8 *pGidData, + NvU32 gidSize, + NvU8 **ppGidString, + NvU32 *pGidStrlen, + NvU32 gidFlags, + NvU8 prefix +) +{ + NvUuid uuid; + + if (!FLD_TEST_DRF(2080_GPU_CMD,_GPU_GET_GID_FLAGS,_TYPE,_SHA1,gidFlags)) + { + return NV_ERR_INVALID_FLAGS; + } + + NV_ASSERT(NV_UUID_LEN == gidSize); + + portMemCopy(uuid.uuid, NV_UUID_LEN, pGidData, gidSize); + + *ppGidString = portMemAllocNonPaged(NV_UUID_STR_LEN); + if (*ppGidString == NULL) + { + return NV_ERR_NO_MEMORY; + } + + nvGetUuidString(&uuid, prefix, (char*)*ppGidString); + *pGidStrlen = NV_UUID_STR_LEN; + + return NV_OK; +} + +static NvU32 +_nvCopyUuid +( + NvU8 *pBuff, + NvU32 index, + NvU32 size, + void *pInfo +) +{ + NvU8 *pBytes = pInfo; + portMemCopy(pBuff, size, pBytes + index, size); + return size; +} + +/** + * @brief Generates SHA1 UUID for a GPU or a MIG instance. + * + * The UUID will be computed as SHA1(message) where the message is as follows: + * + * offset 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 + * value c8 16 c9 a3 52 24 56 bf 9d 9a ac 7e a7 03 fb 5b + * + * offset 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 + * value N V I D I A '' G P U 02 x x 08 y y + * + * offset 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 + * value y y y y y y 0b S M C z z z z p p + * + * offset 48 49 + * value p p + */ +/** + * where, + * Char is the byte value in ASCII encoding ('' is space = 0x20) + * Number is the numeric byte value in hex (0x02) + * xx is the chip id in little endian format. + * The chip ID ARCH+IMPL. For example: 0x017B for GA10B + * yyyyyyyy is the 64-bit PDI in little endian. PDI = (PDI_1 << 32) OR PDI_0. + * + * Additionally, when fractional GPU with MIG is used, and the MIG + * configurations are exposed as separate logical devices, the following bytes + * are appended in the message: + * + * zzzz is the numeric value of the swizzle id (32-bit little-endian) + * pppp is the numeric value of the graphics engine physical + * sys pipe ID (32-bit little-endian) + * + * See bug 3028068 for more details. + * + * @param[in] bMIG "MIG" or "GPU" UUID prefix + * @param[in] chipId GPU chip ID + * @param[in] pdi GPU PDI + * @param[in] swizzId MIG GPU instance swizz ID (only needed for MIG) + * @param[in] syspipeId MIG GPU instance syspipe ID (only needed for MIG) + * @param[out] pUuid UUID + * + * @returns NV_OK upon success, otherwise returns NV_ERR_* + */ + +#define UUID_MESSAGE_SIZE 50 +#define GPU_UUID_MESSAGE_SIZE 38 +#define SMC_UUID_MESSAGE_SIZE UUID_MESSAGE_SIZE + +static const NvU8 uuidMessage[UUID_MESSAGE_SIZE] = +{ + 0xc8, 0x16, 0xc9, 0xa3, 0x52, 0x24, 0x56, 0xbf, 0x9d, 0x9a, 0xac, + 0x7e, 0xa7, 0x03, 0xfb, 0x5b, 'N', 'V', 'I', 'D', 'I', 'A', + ' ', 'G', 'P', 'U', 0x02, 'x', 'x', 0x08, 'y', 'y', 'y', + 'y', 'y', 'y', 'y', 'y', 0x0b, 'S', 'M', 'C', 'z', 'z', + 'z', 'z', 'p', 'p', 'p', 'p' +}; + +static NV_STATUS +_nvGenerateUuid +( + NvBool bMIG, + NvU16 chipId, + NvU64 pdi, + NvU32 swizzId, + NvU32 syspipeId, + NvUuid *pUuid +) +{ + NvU8 *pSha1Digest; + NvU8 *pMessage; + NvU32 messageSize = GPU_UUID_MESSAGE_SIZE; + + pSha1Digest = portMemAllocNonPaged(NV_SHA1_DIGEST_LENGTH + + UUID_MESSAGE_SIZE); + if (pSha1Digest == NULL) + { + return NV_ERR_NO_MEMORY; + } + + pMessage = pSha1Digest + NV_SHA1_DIGEST_LENGTH; + + portMemCopy(pMessage, UUID_MESSAGE_SIZE, uuidMessage, UUID_MESSAGE_SIZE); + + portUtilWriteLittleEndian16(&pMessage[27], chipId); + portUtilWriteLittleEndian64(&pMessage[30], pdi); + + if (bMIG) + { + portUtilWriteLittleEndian32(&pMessage[42], swizzId); + portUtilWriteLittleEndian32(&pMessage[46], syspipeId); + + messageSize = SMC_UUID_MESSAGE_SIZE; + } + + // UUID strings only use the first 16 bytes of the 20-byte SHA-1 digest. + sha1Generate(pSha1Digest, pMessage, messageSize, _nvCopyUuid); + portMemCopy(pUuid->uuid, NV_UUID_LEN, pSha1Digest, NV_UUID_LEN); + + // version 5 - SHA1-based + pUuid->uuid[6] = (pUuid->uuid[6] & 0x0f) | 0x50; + // variant 1 - network byte ordering + pUuid->uuid[8] = (pUuid->uuid[8] & 0x3f) | 0x80; + + portMemFree(pSha1Digest); + + return NV_OK; +} + +/** + * @brief Generates SHA1 UUID for GPU. + * + * @param[in] chipId GPU chip ID + * @param[in] pdi GPU PDI + * @param[out] pUuid UUID + * + * @returns NV_OK upon success, otherwise returns NV_ERR_* + */ +NV_STATUS +nvGenerateGpuUuid +( + NvU16 chipId, + NvU64 pdi, + NvUuid *pUuid +) +{ + return _nvGenerateUuid(NV_FALSE, chipId, pdi, 0, 0, pUuid); +} + +/** + * @brief Generates SHA1 UUID for MIG instance. + * + * @param[in] chipId GPU chip ID + * @param[in] pdi GPU PDI + * @param[in] swizzId MIG GPU instance swizz ID (only needed for _TYPE_SMC) + * @param[in] syspipeId MIG GPU instance syspipe ID (only needed for _TYPE_SMC) + * @param[out] pUuid UUID + * + * @returns NV_OK upon success, otherwise returns NV_ERR_* + */ +NV_STATUS +nvGenerateSmcUuid +( + NvU16 chipId, + NvU64 pdi, + NvU32 swizzId, + NvU32 syspipeId, + NvUuid *pUuid +) +{ + return _nvGenerateUuid(NV_TRUE, chipId, pdi, swizzId, syspipeId, pUuid); +} + +/** + * @brief Gets UUID ASCII string, "GPU-%08x-%04x-%04x-%04x-%012x" + * (SHA-1) or "MIG-%08x-%04x-%04x-%04x-%012x" (SHA-1) + * or "DLA-%08x-%04x-%04x-%04x-%012x" (SHA-1) + * + * @param[in] pUuid UUID + * @param[in] prefix Prefix to add for string + * @param[out] pUuidStr Returns UUID string + * + * @returns void + */ +void +nvGetUuidString +( + const NvUuid *pUuid, + NvU8 prefix, + char *pUuidStr +) +{ + const NvU32 sha1GroupEntryNum[] = { 8, 4, 4, 4, 12 }; + const NvU32 *pGroupEntryNum; + const NvU32 extraSymbolLen = 9; // 'G' 'P' 'U' '-'(x5), '\0x0', total = 9 + const NvU8 prefixLen = 4; + const char *pPrefix; + NvU32 groupCount; + NvU32 expectedStringLength = (NV_UUID_LEN << 1) + extraSymbolLen; + + pGroupEntryNum = sha1GroupEntryNum; + groupCount = NV_ARRAY_ELEMENTS(sha1GroupEntryNum); + + switch (prefix) + { + case RM_UUID_PREFIX_GPU: + pPrefix = "GPU-"; + break; + case RM_UUID_PREFIX_MIG: + pPrefix = "MIG-"; + break; + case RM_UUID_PREFIX_DLA: + pPrefix = "DLA-"; + break; + default: + pPrefix = "GPU-"; + break; + } + + portMemCopy(pUuidStr, prefixLen, pPrefix, prefixLen); + pUuidStr += prefixLen; + + portStringBufferToHexGroups(pUuidStr, (expectedStringLength - prefixLen), + pUuid->uuid, NV_UUID_LEN, + groupCount, pGroupEntryNum, "-"); +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c b/src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c new file mode 100644 index 0000000..8b5956f --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c @@ -0,0 +1,89 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" + +#include "turing/tu102/dev_mmu.h" +#include "turing/tu102/kind_macros.h" + +/*! + * @brief Returns NV_TRUE if memory kind matches the given op. + * + * @param[in] op Kind-type to check for + * @param[in] kind Value to check + * + * @return NV_TRUE if "kind" matches kind-type specified by op. + * NV_FALSE otherwise. + */ +NvBool +memmgrIsKind_TU102 +( + MemoryManager *pMemoryManager, + FB_IS_KIND_OP op, + NvU32 kind +) +{ + switch (op) + { + case FB_IS_KIND_Z: + return PTEKIND_Z(kind); + case FB_IS_KIND_ZBC: + return PTEKIND_COMPRESSIBLE(kind); + case FB_IS_KIND_COMPRESSIBLE: + return PTEKIND_COMPRESSIBLE(kind); + case FB_IS_KIND_ZBC_ALLOWS_1: + case FB_IS_KIND_ZBC_ALLOWS_2: + case FB_IS_KIND_COMPRESSIBLE_1: + case FB_IS_KIND_COMPRESSIBLE_2: + case FB_IS_KIND_COMPRESSIBLE_4: + return NV_FALSE; + case FB_IS_KIND_SUPPORTED: + return (PTEKIND_SUPPORTED(kind) && !(KIND_INVALID(kind))); + case FB_IS_KIND_DISALLOW_PLC: + return PTEKIND_DISALLOWS_PLC(kind); + case FB_IS_KIND_SWIZZLED: + return !PTEKIND_GENERIC_MEMORY(kind) && !PTEKIND_PITCH(kind); + default: + NV_PRINTF(LEVEL_ERROR, "Bad op (%08x) passed in\n", op); + DBG_BREAKPOINT(); + return NV_FALSE; + } +} + +/** + * From Turing, we will not have Pitch Kind, so this function will determine + * type of surface from pMemoryInfo of the allocation. + * return NV_TRUE for BL surfaces and NV_FALSE otherwise. + */ +NvBool +memmgrIsSurfaceBlockLinear_TU102 +( + MemoryManager *pMemoryManager, + Memory *pMemory, + NvU32 kind +) +{ + return FLD_TEST_DRF(OS32, _ATTR, _FORMAT, _BLOCK_LINEAR, pMemory->Attr); +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/context_dma.c b/src/nvidia/src/kernel/gpu/mem_mgr/context_dma.c new file mode 100644 index 0000000..8141c0e --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/context_dma.c @@ -0,0 +1,656 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This module contains contextDma implementation. +* +******************************************************************************/ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "mem_mgr/mem.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "gpu/mem_mgr/context_dma.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/disp/disp_objs.h" +#include "gpu/disp/disp_channel.h" +#include "gpu/disp/inst_mem/disp_inst_mem.h" +#include "os/os.h" +#include "gpu_mgr/gpu_mgr.h" +#include "vgpu/rpc.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "rmapi/rs_utils.h" +#include "rmapi/mapping_list.h" +#include "platform/sli/sli.h" + +#include "ctrl/ctrl0002.h" + +static NV_STATUS _ctxdmaConstruct(ContextDma *pContextDma, RsClient *, NvHandle, NvU32, NvU32, RsResourceRef *, NvU64, NvU64); +static NV_STATUS _ctxdmaDestruct(ContextDma *pContextDma, NvHandle hClient); + +NV_STATUS +ctxdmaConstruct_IMPL +( + ContextDma *pContextDma, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status; + NV_CONTEXT_DMA_ALLOCATION_PARAMS *pAllocParams = pParams->pAllocParams; + NvU32 cachesnoop, type, i; + NvBool bReadOnly; + RsResourceRef *pMemoryRef; + NvHandle hParentFromMemory; + RsClient *pClient = pCallContext->pClient; + NvHandle hSubDevice = pAllocParams->hSubDevice; + NvU32 hClass = pParams->externalClassId; + NvU32 flags = pAllocParams->flags; + NvU64 offset = pAllocParams->offset; + NvU64 limit = pAllocParams->limit; + + status = clientGetResourceRef(pClient, pAllocParams->hMemory, &pMemoryRef); + if (status != NV_OK) + return status; + + hParentFromMemory = pMemoryRef->pParentRef ? pMemoryRef->pParentRef->hResource : 0; + + if (RES_GET_PARENT_HANDLE(pContextDma) != hParentFromMemory) + return NV_ERR_INVALID_OBJECT_PARENT; + + // validate the flags + switch (flags >> DRF_SHIFT(NVOS03_FLAGS_ACCESS) & DRF_MASK(NVOS03_FLAGS_ACCESS)) + { + case NVOS03_FLAGS_ACCESS_WRITE_ONLY: + // we don't currently have a need to distinguish write-only + // permissions; fall through to read/write + + case NVOS03_FLAGS_ACCESS_READ_WRITE: + bReadOnly = NV_FALSE; + break; + + case NVOS03_FLAGS_ACCESS_READ_ONLY: + bReadOnly = NV_TRUE; + break; + + default: + return NV_ERR_INVALID_FLAGS; + } + + switch (DRF_VAL(OS03, _FLAGS, _CACHE_SNOOP, flags)) + { + case NVOS03_FLAGS_CACHE_SNOOP_ENABLE: + cachesnoop = NV_TRUE; + break; + + case NVOS03_FLAGS_CACHE_SNOOP_DISABLE: + cachesnoop = NV_FALSE; + break; + + default: + return NV_ERR_INVALID_FLAGS; + } + + /* + * Note that the NV_OS03_FLAGS_MAPPING is an alias to xg + * the LSB of the NV_OS03_FLAGS_TYPE. And in fact if + * type is NV_OS03_FLAGS_TYPE_NOTIFIER (bit 20 set) + * then it implicitly means that NV_OS03_FLAGS_MAPPING + * is _MAPPING_KERNEL. If the client wants to have a + * Kernel Mapping, it should use the _MAPPING_KERNEL + * flag set and the _TYPE_NOTIFIER should be used only + * with NOTIFIERS. + */ + type = DRF_VAL(OS03, _FLAGS, _MAPPING, flags); + + // fill in dmaInfo + pContextDma->Flags = flags; + pContextDma->bReadOnly = bReadOnly; + pContextDma->CacheSnoop = cachesnoop; + pContextDma->Type = type; + pContextDma->Limit = limit; + + for (i = 0; i < NV_ARRAY_ELEMENTS(pContextDma->KernelVAddr); i++) + pContextDma->KernelVAddr[i] = NULL; + + pContextDma->KernelPriv = NULL; + + for (i = 0; i < NV_ARRAY_ELEMENTS(pContextDma->FbAperture); i++) + { + pContextDma->FbAperture[i] = (NvU64)-1; + pContextDma->FbApertureLen[i] = 0; + } + + for (i = 0; i < NV_ARRAY_ELEMENTS(pContextDma->Instance); i++) + { + pContextDma->Instance[i] = 0; + pContextDma->InstRefCount[i] = 0; + } + + pContextDma->pMemDesc = NULL; + pContextDma->AddressSpace = ADDR_UNKNOWN; + + // Display context dmas have always been explicitly bound. + if (DRF_VAL(OS03, _FLAGS, _HASH_TABLE, flags) == NVOS03_FLAGS_HASH_TABLE_ENABLE) + { + NV_PRINTF(LEVEL_ERROR, "HASH_TABLE=ENABLE no longer supported!\n"); + return NV_ERR_INVALID_FLAGS; + } + + status = _ctxdmaConstruct(pContextDma, pClient, hSubDevice, hClass, + flags, pMemoryRef, offset, limit); + + if (status == NV_OK) + refAddDependant(pMemoryRef, RES_GET_REF(pContextDma)); + + return status; +} + +void +ctxdmaDestruct_IMPL +( + ContextDma *pContextDma +) +{ + _ctxdmaDestruct(pContextDma, RES_GET_CLIENT_HANDLE(pContextDma)); +} + +/*! + * NOTE: this control call may be called at high IRQL with LOCK_BYPASS on WDDM. + */ +NV_STATUS +ctxdmaCtrlCmdUpdateContextdma_IMPL +( + ContextDma *pContextDma, + NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS *pUpdateCtxDmaParams +) +{ + RsClient *pClient = RES_GET_CLIENT(pContextDma); + OBJGPU *pGpu; + KernelDisplay *pKernelDisplay; + DisplayInstanceMemory *pInstMem; + NvU64 *pNewAddress = NULL; + NvU64 *pNewLimit = NULL; + NvHandle hMemory = NV01_NULL_OBJECT; + NvU32 comprInfo; + NV_STATUS status = NV_OK; + + // + // Validate that if hCtxDma is passed in it is the same as the hCtxDma + // used for the top level RmControl hObject + // + if (pUpdateCtxDmaParams->hCtxDma != NV01_NULL_OBJECT) + NV_ASSERT_OR_RETURN(pUpdateCtxDmaParams->hCtxDma == RES_GET_HANDLE(pContextDma), NV_ERR_INVALID_OBJECT); + + if (pUpdateCtxDmaParams->hSubDevice != NV01_NULL_OBJECT) + { + Subdevice *pSubdevice; + + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + subdeviceGetByHandle(pClient, pUpdateCtxDmaParams->hSubDevice, &pSubdevice)); + + // Ensure requested hSubDevice is valid for the GPU associated with this contextdma + NV_CHECK_OR_RETURN(LEVEL_ERROR, pSubdevice->pDevice == pContextDma->pDevice, NV_ERR_INVALID_OBJECT_HANDLE); + + pGpu = GPU_RES_GET_GPU(pSubdevice); + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + } + else + { + pGpu = pContextDma->pGpu; + gpuSetThreadBcState(pGpu, !pContextDma->bUnicast); + } + + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + if (pKernelDisplay == NULL) + return NV_ERR_GENERIC; + + if (FLD_TEST_DRF(0002_CTRL_CMD, _UPDATE_CONTEXTDMA, _FLAGS_BASEADDRESS, _VALID, pUpdateCtxDmaParams->flags)) + pNewAddress = &pUpdateCtxDmaParams->baseAddress; + if (FLD_TEST_DRF(0002_CTRL_CMD, _UPDATE_CONTEXTDMA, _FLAGS_LIMIT, _VALID, pUpdateCtxDmaParams->flags)) + pNewLimit = &pUpdateCtxDmaParams->limit; + if (FLD_TEST_DRF(0002_CTRL_CMD, _UPDATE_CONTEXTDMA, _FLAGS_HINT, _VALID, pUpdateCtxDmaParams->flags)) + hMemory = pUpdateCtxDmaParams->hintHandle; + + comprInfo = DRF_VAL(0002_CTRL_CMD, _UPDATE_CONTEXTDMA_FLAGS, _USE_COMPR_INFO, pUpdateCtxDmaParams->flags); + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + pInstMem = KERNEL_DISPLAY_GET_INST_MEM(pKernelDisplay); + + status = instmemUpdateContextDma_HAL(pGpu, pInstMem, pContextDma, + pNewAddress, pNewLimit, hMemory, comprInfo); + NV_ASSERT(status == NV_OK); + + SLI_LOOP_END + + return status; +} + +static NV_STATUS +_ctxdmaDestruct +( + ContextDma *pContextDma, + NvHandle hClient +) +{ + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = NULL; + + pGpu = pContextDma->pGpu; + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_WARN_NULL_OBJECT); + gpuSetThreadBcState(pGpu, !pContextDma->bUnicast); + + if (pContextDma->bUnicast || RES_GET_PARENT_HANDLE(pContextDma) == RES_GET_HANDLE(pContextDma->pDevice)) + { + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + if ((IS_VIRTUAL(pGpu) && + (!(IS_VIRTUAL_WITH_SRIOV(pGpu) && (!gpuIsWarBug200577889SriovHeavyEnabled(pGpu)))))) + { + NV_RM_RPC_FREE(pGpu, hClient, RES_GET_HANDLE(pContextDma->pMemory), RES_GET_HANDLE(pContextDma), rmStatus); + } + } + + // Clean-up the context, first unbind from display + if (ctxdmaIsBound(pContextDma)) + dispchnUnbindCtxFromAllChannels(pGpu, pContextDma); + + // Handle unicast sysmem mapping mapping before _ctxdmaDestroyFBMappings() + if (pContextDma->AddressSpace == ADDR_SYSMEM) + { + NvU32 gpuDevInst = gpumgrGetSubDeviceInstanceFromGpu(gpumgrGetParentGPU(pGpu)); + + if (pContextDma->KernelVAddr[gpuDevInst]) + { + memdescUnmapOld(pContextDma->pMemory->pMemDesc, NV_TRUE, + pContextDma->KernelVAddr[gpuDevInst], + pContextDma->KernelPriv); + pContextDma->KernelVAddr[gpuDevInst] = NULL; + pContextDma->KernelPriv = NULL; + } + } + + // Ideally we'd do all of the below in RmFreeDeviceContextDma when + // DeviceRefCount goes to 0 but leaving here because RmFreeDeviceContextDma + // is also called from other places. + memdescFree(pContextDma->pMemDesc); + memdescDestroy(pContextDma->pMemDesc); + pContextDma->pMemDesc = NULL; + + return rmStatus; +} + +/*! + * NOTE: this control call may be called at high IRQL with LOCK_BYPASS on WDDM. + */ +NV_STATUS +ctxdmaCtrlCmdBindContextdma_IMPL +( + ContextDma *pContextDma, + NV0002_CTRL_BIND_CONTEXTDMA_PARAMS *pBindCtxDmaParams +) +{ + NvHandle hChannel = pBindCtxDmaParams->hChannel; + + gpuSetThreadBcState(pContextDma->pGpu, !pContextDma->bUnicast); + + API_GPU_FULL_POWER_SANITY_CHECK(pContextDma->pGpu, NV_TRUE, NV_FALSE); + + // + // Call dispchn to alloc inst mem, write the ctxdma data, and write + // the hash table entry. + // + return dispchnBindCtx(pContextDma->pGpu, pContextDma, hChannel); +} + +/*! + * NOTE: this control call may be called at high IRQL with LOCK_BYPASS on WDDM. + */ +NV_STATUS +ctxdmaCtrlCmdUnbindContextdma_IMPL +( + ContextDma *pContextDma, + NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS *pUnbindCtxDmaParams +) +{ + gpuSetThreadBcState(pContextDma->pGpu, !pContextDma->bUnicast); + + API_GPU_FULL_POWER_SANITY_CHECK(pContextDma->pGpu, NV_TRUE, NV_FALSE); + + return dispchnUnbindCtx(pContextDma->pGpu, pContextDma, pUnbindCtxDmaParams->hChannel); +} + +static NV_STATUS +_ctxdmaConstruct +( + ContextDma *pContextDma, + RsClient *pClient, + NvHandle hSubDevice, + NvU32 hClass, + NvU32 flags, + RsResourceRef *pMemoryRef, + NvU64 offset, + NvU64 limit +) +{ + NV_STATUS rmStatus = NV_OK; + Memory *pMemory = NULL; + OBJGPU *pGpu = NULL; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NvHandle hDevice = 0; + NvHandle hClient = pClient->hClient; + Device *pDevice = NULL; + + pMemory = dynamicCast(pMemoryRef->pResource, Memory); + if (pMemory == NULL) + return NV_ERR_INVALID_OBJECT; + + if (hSubDevice != 0) + { + pContextDma->bUnicast = NV_TRUE; + rmStatus = gpuGetByHandle(pClient, hSubDevice, NULL, &pGpu); + if (rmStatus != NV_OK) + return rmStatus; + } + else + { + pContextDma->bUnicast = NV_FALSE; + pGpu = pMemory->pGpu; + if (pGpu == NULL) + return NV_ERR_INVALID_OBJECT_PARENT; + } + + gpuSetThreadBcState(pGpu, !pContextDma->bUnicast); + + if (hSubDevice == 0) + { + // + // We verified that pMemory is parented by Device. + // pGpu == NULL & hSubdevice == 0 errors out above. + // + pDevice = pMemory->pDevice; + } + else + { + rmStatus = deviceGetByGpu(pClient, pGpu, NV_TRUE, &pDevice); + if (rmStatus != NV_OK) + return NV_ERR_INVALID_OBJECT_PARENT; + } + + pContextDma->pDevice = pDevice; + + hDevice = RES_GET_HANDLE(pDevice); + + API_GPU_FULL_POWER_SANITY_CHECK(pGpu, NV_TRUE, NV_FALSE); + + pMemDesc = pMemory->pMemDesc; + + // + // Validate the offset and limit passed in + // Check end of contextdma is within the memory object which was created (RmAllocMemory) + // Since "limit" is inclusive, it should be strictly less than the length + // + { + NvU64 combinedLimit; + if (!portSafeAddU64(offset, limit, &combinedLimit) || + (combinedLimit >= pMemory->Length)) + { + return NV_ERR_INVALID_LIMIT; + } + } + + // The destructor expects the following fields in pContextDma to be set, + // so do not invoke destructor (goto done) before they are assigned. + pContextDma->pMemory = pMemory; + pContextDma->pGpu = pGpu; + + pContextDma->AddressSpace = memdescGetAddressSpace(memdescGetMemDescFromGpu(pMemDesc, pGpu)); + + // Fail allocation of virtual ContextDmas. These have moved the DynamicMemory. + if (pContextDma->AddressSpace == ADDR_VIRTUAL) + { + return NV_ERR_OBJECT_TYPE_MISMATCH; + } + + // + // Create a MEMORY_DESCRIPTOR describing this region of the memory alloc + // in question + // + rmStatus = memdescCreateSubMem( + &pContextDma->pMemDesc, pMemDesc, pGpu, offset, limit+1); + if (rmStatus != NV_OK) + goto done; + + if (pContextDma->AddressSpace == ADDR_SYSMEM) + { + if (pContextDma->Type == NVOS03_FLAGS_MAPPING_KERNEL) + { + rmStatus = memdescMapOld( + pMemDesc, + offset, limit+1, NV_TRUE, NV_PROTECT_READ_WRITE, + &pContextDma->KernelVAddr[gpumgrGetSubDeviceInstanceFromGpu(gpumgrGetParentGPU(pGpu))], + &pContextDma->KernelPriv); + if (rmStatus != NV_OK) + goto done; + } + } + +done: + + if (rmStatus == NV_OK) + { + if (IS_VIRTUAL(pGpu)) + { + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + NV_RM_RPC_ALLOC_CONTEXT_DMA(pGpu, hClient, hDevice, RES_GET_HANDLE(pContextDma), hClass, + flags, RES_GET_HANDLE(pMemory), offset, limit, rmStatus); + } + } + + if (rmStatus != NV_OK) + { + memdescDestroy(pContextDma->pMemDesc); + pContextDma->pMemDesc = NULL; + + _ctxdmaDestruct(pContextDma, hClient); + } + + return rmStatus; +} + +// +// Fetch ContextDma from resource server +// +NV_STATUS +ctxdmaGetByHandle +( + RsClient *pClient, + NvHandle hContextDma, + ContextDma **ppContextDma +) +{ + RsResourceRef *pResourceRef; + NV_STATUS status; + + *ppContextDma = NULL; + + status = clientGetResourceRef(pClient, hContextDma, &pResourceRef); + if (status != NV_OK) + { + return status; + } + + *ppContextDma = dynamicCast(pResourceRef->pResource, ContextDma); + + return (*ppContextDma) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +// +// Validate that the range described by Start+Length is within ContextDma +// limits. +// +NV_STATUS +ctxdmaValidate_IMPL +( + ContextDma *pContextDma, + NvU64 Start, + NvU64 Length +) +{ + if (pContextDma == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Invalid DMA context in ctxdmaValidate\n"); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_XLATE; + } + DBG_VAL_PTR(pContextDma); + + if ((Start + Length - 1) > pContextDma->Limit) + return NV_ERR_INVALID_OFFSET; + + return NV_OK; +} + +// +// Return the CPU VA of a DMA buffer. +// +NV_STATUS +ctxdmaGetKernelVA_IMPL +( + ContextDma *pContextDma, + NvU64 Start, + NvU64 Length, + void **ppAddress, + NvU32 VA_idx +) +{ + NV_STATUS status; + + if (pContextDma == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Invalid DMA context in ctxdmaGetKernelVA\n"); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_XLATE; + } + DBG_VAL_PTR(pContextDma); + + status = ctxdmaValidate(pContextDma, Start, Length); + if (status != NV_OK) + return status; + + if (pContextDma->KernelVAddr[VA_idx] == NULL) + return NV_ERR_DMA_MEM_NOT_LOCKED; + + *ppAddress = (NvU8*)pContextDma->KernelVAddr[VA_idx] + Start; + + return NV_OK; +} + +NV_STATUS +ctxdmaMapTo_IMPL +( + ContextDma *pContextDma, + RS_RES_MAP_TO_PARAMS *pParams +) +{ + OBJGPU *pGpu = pParams->pGpu; + MEMORY_DESCRIPTOR *pSrcMemDesc = pParams->pSrcMemDesc; + NvU64 offset = pParams->offset; + + // + // For video memory, provide a way to look up the offset of an FB allocation within + // the given context target context dma. still useful for dFPGA. + // It is used by mods. + // + if ((memdescGetAddressSpace(memdescGetMemDescFromGpu(pSrcMemDesc, pGpu)) == ADDR_FBMEM) && + (memdescGetAddressSpace(memdescGetMemDescFromGpu(pContextDma->pMemDesc, pGpu)) == ADDR_FBMEM)) + { + RmPhysAddr physaddr; + if (!memdescGetContiguity(pSrcMemDesc, AT_GPU)) + { + NV_PRINTF(LEVEL_ERROR, "Cannot obtain the video memory offset of a noncontiguous vidmem alloc!\n"); + return NV_ERR_GENERIC; + } + + // Return an Big GPU device physical address, if available + physaddr = memdescGetPhysAddr(pSrcMemDesc, AT_GPU, offset); + *pParams->pDmaOffset = physaddr - memdescGetPhysAddr(pContextDma->pMemDesc, AT_GPU, 0); + return NV_OK; + } + + // We no longer support tracking mappings on ContextDma. Has moved to DynamicMemory. + return NV_ERR_INVALID_ARGUMENT; +} + +NV_STATUS +ctxdmaUnmapFrom_IMPL +( + ContextDma *pContextDma, + RS_RES_UNMAP_FROM_PARAMS *pParams +) +{ + // + // With ContextDmas only supporting physical (or IOMMU VA) there is + // nothing to unmap. We silently allow this call for compatibility. + // + return NV_OK; +} + +/*! + * @brief Is the ContextDma bound to a display channel? + * + * This is a fast check to see if a ContextDma is bound to a display channel. + * + * This is called during display channel or ContextDma teardown only, + * which DD cannot do while a using LOCK_BYPASS bind is active with + * these objects. Locking would require per subdevice lock/unlock. + */ +NvBool +ctxdmaIsBound_IMPL +( + ContextDma *pContextDma +) +{ + NvU32 refs = 0; + NvU32 i; + + for (i=0; i < NV_MAX_SUBDEVICES; i++) + refs += pContextDma->InstRefCount[i]; + + return refs != 0; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/mem_ctrl.c b/src/nvidia/src/kernel/gpu/mem_mgr/mem_ctrl.c new file mode 100644 index 0000000..e676ed2 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/mem_ctrl.c @@ -0,0 +1,310 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This module contains Nv04Control support for heap allocations +* represented by NV04_MEMORY class instantiations. +* +******************************************************************************/ + +#include "core/core.h" +#include "os/os.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "platform/sli/sli.h" + +#include "gpu/mem_mgr/mem_desc.h" +#include "rmapi/client_resource.h" +#include "rmapi/control.h" +#include "gpu_mgr/gpu_mgr.h" +#include "rmapi/rs_utils.h" +#include "gpu/subdevice/subdevice.h" +#include "vgpu/rpc.h" + +#include "ctrl/ctrl0041.h" + +static NV_STATUS +_memmgrGetSurfaceComprInfo +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 *pComprOffset, + NvU32 *pComprKind, + NvU32 *pLineMin, + NvU32 *pLineMax +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + + if (pGpu == NULL || IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + // + // vGPU-legacy: pPrivate->pCompTags is not initialized in the guest RM + // SRIOV: same handling as raw mode + // GPU=NULL: devicelless sysmem, can't have comptags + // + *pComprKind = 0; + *pComprOffset = 0; + *pLineMin = 0; + *pLineMax = 0; + return NV_OK; + } + + return NV_OK; +} + +NV_STATUS +memCtrlCmdGetSurfaceCompressionCoverageLvm_IMPL +( + Memory *pMemory, + NV0041_CTRL_GET_SURFACE_COMPRESSION_COVERAGE_PARAMS *pParams +) +{ + MEMORY_DESCRIPTOR *pMemDesc = pMemory->pMemDesc; + NvU32 unused; + + if (pParams->hSubDevice) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + Subdevice *pSubDevice; + NV_STATUS status; + + // Alloc operation in unicast mode + status = subdeviceGetByHandle(pCallContext->pClient, + pParams->hSubDevice, &pSubDevice); + + if (status != NV_OK) + return status; + + GPU_RES_SET_THREAD_BC_STATE(pSubDevice); + + pMemDesc = memdescGetMemDescFromGpu(pMemDesc, GPU_RES_GET_GPU(pSubDevice)); + } + + return _memmgrGetSurfaceComprInfo(pMemory->pMemDesc, &unused, &pParams->format, &pParams->lineMin, &pParams->lineMax); +} + +NV_STATUS +memCtrlCmdGetSurfaceInfoLvm_IMPL +( + Memory *pMemory, + NV0041_CTRL_GET_SURFACE_INFO_PARAMS *pSurfaceInfoParams +) +{ + NV0041_CTRL_SURFACE_INFO *pSurfaceInfos = NvP64_VALUE(pSurfaceInfoParams->surfaceInfoList); + NV_STATUS status = NV_OK; + NvU32 i; + NvU32 data = 0; + NvU64 size = 0; + + if ((pSurfaceInfoParams->surfaceInfoListSize == 0) || pSurfaceInfos == NULL) + return NV_OK; + + // step thru the list + for (i = 0; i < pSurfaceInfoParams->surfaceInfoListSize; i++) + { + status = NV_OK; + data = 0; + + switch (pSurfaceInfos[i].index) + { + case NV0041_CTRL_SURFACE_INFO_INDEX_ATTRS: + { + if ((pMemory->pHwResource != NULL) && + pMemory->pHwResource->attr & DRF_DEF(OS32, _ATTR, _COMPR, _REQUIRED)) + data |= NV0041_CTRL_SURFACE_INFO_ATTRS_COMPR; + if ((pMemory->pHwResource != NULL) && + pMemory->pHwResource->attr & DRF_DEF(OS32, _ATTR, _ZCULL, _REQUIRED)) + data |= NV0041_CTRL_SURFACE_INFO_ATTRS_ZCULL; + break; + } + case NV0041_CTRL_SURFACE_INFO_INDEX_COMPR_COVERAGE: + { + // + // adding check for pHwResource, since host managed HW resource + // gets allocated only when ATTR is set to COMPR_REQUIRED + // + if ((pMemory->pHwResource != NULL) && + pMemory->pHwResource->attr & + DRF_DEF(OS32, _ATTR, _COMPR, _REQUIRED)) + { + NvU64 contigSegmentSize; + NvU32 unused; + NvU64 zero = 0; + + status = memdescFillMemdescForPhysAttr(pMemory->pMemDesc, AT_GPU, &zero, &unused, + &unused, &unused, &unused, + &contigSegmentSize); + if (status == NV_OK) + { + // report compression coverage in units of 64k + data = NvOffset_LO32(contigSegmentSize / 0x10000); + } + } + break; + } + case NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_SIZE_LO: + { + // Report the low 32 bits of the size of the physical allocation + size = memdescGetSize(pMemory->pMemDesc); + data = NvU64_LO32(size); + break; + } + case NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_SIZE_HI: + { + // Report the high 32 bits of the size of the physical allocation + size = memdescGetSize(pMemory->pMemDesc); + data = NvU64_HI32(size); + break; + } + case NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_SIZE: + { + // Report the scaled size of the physical allocation + size = (memdescGetSize(pMemory->pMemDesc) / NV0041_CTRL_SURFACE_INFO_PHYS_SIZE_SCALE_FACTOR); + data = NvOffset_LO32(size); + NV_ASSERT_OR_RETURN((NvU64)data == size, NV_ERR_OUT_OF_RANGE); + break; + } + case NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_ATTR: + { + if (pMemory->pHwResource != NULL) + data = pMemory->pHwResource->attr & (DRF_SHIFTMASK(NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_PAGE_SIZE) | DRF_SHIFTMASK(NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_CPU_COHERENCY) | DRF_SHIFTMASK(NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_FORMAT)); + break; + } + case NV0041_CTRL_SURFACE_INFO_INDEX_ADDR_SPACE_TYPE: + { + // This is equivalent to NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE on the surface. + NV_ADDRESS_SPACE addrSpace; + + addrSpace = memdescGetAddressSpace(pMemory->pMemDesc); + if (addrSpace == ADDR_SYSMEM) + { + if (memdescGetFlag(pMemory->pMemDesc, MEMDESC_FLAGS_BAR0_REFLECT)) + { + addrSpace = ADDR_REGMEM; + } + else if (memdescGetFlag(pMemory->pMemDesc, MEMDESC_FLAGS_BAR1_REFLECT)) + { + addrSpace = ADDR_FBMEM; + } + } + switch (addrSpace) + { + case ADDR_SYSMEM: + { + data = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_SYSMEM; + break; + } + case ADDR_FBMEM: + { + data = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM; + break; + } + case ADDR_REGMEM: + { + data = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_REGMEM; + break; + } + default: + { + data = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_INVALID; + break; + } + } + break; + } + default: + { + status = NV_ERR_INVALID_ARGUMENT; + break; + } + } + + // stop processing list at first failure + if (status != NV_OK) + break; + + pSurfaceInfos[i].data = data; + } + + return status; +} + +NV_STATUS +memCtrlCmdGetSurfacePhysAttrLvm_IMPL +( + Memory *pMemory, + NV0041_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS *pGPAP +) +{ + NvU32 unused; + + NV_ASSERT_OK_OR_RETURN( + memdescFillMemdescForPhysAttr(pMemory->pMemDesc, AT_GPU, &pGPAP->memOffset, &pGPAP->memAperture, + &pGPAP->memFormat, &pGPAP->gpuCacheAttr, &pGPAP->gpuP2PCacheAttr, + &pGPAP->contigSegmentSize)); + + NV_ASSERT_OK_OR_RETURN( + _memmgrGetSurfaceComprInfo(pMemory->pMemDesc, &pGPAP->comprOffset, &pGPAP->comprFormat, &unused, &unused)); + + return NV_OK; +} + + +NV_STATUS +memCtrlCmdMapMemoryForGpuAccess_IMPL +( + Memory *pMemory, + NV0041_CTRL_MAP_MEMORY_FOR_GPU_ACCESS_PARAMS *pParams +) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + MEMORY_DESCRIPTOR *pMemDesc = pMemory->pMemDesc; + Subdevice *pSubdevice; + + NV_CHECK_OR_RETURN(LEVEL_ERROR, memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM, NV_ERR_INVALID_ARGUMENT); + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, subdeviceGetByHandle(pCallContext->pClient, pParams->hSubdevice, &pSubdevice)); + NV_ASSERT_OK_OR_RETURN(memdescMapIommu(pMemDesc, GPU_RES_GET_GPU(pSubdevice)->busInfo.iovaspaceId)); + + memdescGetPhysAddrsForGpu(pMemDesc, GPU_RES_GET_GPU(pSubdevice), AT_GPU, 0, 0, 1, &pParams->address); + + return NV_OK; +} + +NV_STATUS +memCtrlCmdUnmapMemoryForGpuAccess_IMPL +( + Memory *pMemory, + NV0041_CTRL_UNMAP_MEMORY_FOR_GPU_ACCESS_PARAMS *pParams +) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + MEMORY_DESCRIPTOR *pMemDesc = pMemory->pMemDesc; + Subdevice *pSubdevice; + + NV_CHECK_OR_RETURN(LEVEL_ERROR, memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM, NV_ERR_INVALID_ARGUMENT); + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, subdeviceGetByHandle(pCallContext->pClient, pParams->hSubdevice, &pSubdevice)); + memdescUnmapIommu(pMemDesc, GPU_RES_GET_GPU(pSubdevice)->busInfo.iovaspaceId); + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c b/src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c new file mode 100644 index 0000000..f85b85f --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c @@ -0,0 +1,4307 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief Memory descriptor handling utility routines. + */ + +#include "gpu/mem_mgr/mem_desc.h" + +#include "os/os.h" + +#include "gpu_mgr/gpu_mgr.h" +#include "core/locks.h" +#include "mem_mgr/io_vaspace.h" +#include "mem_mgr/virt_mem_mgr.h" +#include "core/system.h" +#include "vgpu/vgpu_util.h" +#include "platform/sli/sli.h" +#include "resserv/rs_client.h" + +#include "rmconfig.h" +#include "vgpu/rpc.h" +#include "mem_mgr/mem.h" + +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/mem_utils.h" + +#include "nvrm_registry.h" // For memdescOverrideInstLoc*() + +#include "rmapi/rmapi.h" +#include "rmapi/rs_utils.h" +#include "class/cl0071.h" // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR + +// Structure for keeping track of BAR1 mappings +typedef struct +{ + NvU64 FbAperture; + NvU64 FbApertureLen; + NvP64 pPriv; +} FB_MAPPING_INFO; + +// +// Common address space lists +// +const NV_ADDRESS_SPACE ADDRLIST_FBMEM_PREFERRED[] = {ADDR_FBMEM, ADDR_SYSMEM, ADDR_UNKNOWN}; +const NV_ADDRESS_SPACE ADDRLIST_SYSMEM_PREFERRED[] = {ADDR_SYSMEM, ADDR_FBMEM, ADDR_UNKNOWN}; +const NV_ADDRESS_SPACE ADDRLIST_FBMEM_ONLY[] = {ADDR_FBMEM, ADDR_UNKNOWN}; +const NV_ADDRESS_SPACE ADDRLIST_SYSMEM_ONLY[] = {ADDR_SYSMEM, ADDR_UNKNOWN}; + +// XXX These could probably encode the whole list in the u32 bits. +NvU32 memdescAddrSpaceListToU32(const NV_ADDRESS_SPACE *addrlist) +{ + if (addrlist == ADDRLIST_FBMEM_PREFERRED) + return 1; + else if (addrlist == ADDRLIST_SYSMEM_PREFERRED) + return 2; + else if (addrlist == ADDRLIST_FBMEM_ONLY) + return 3; + else if (addrlist == ADDRLIST_SYSMEM_ONLY) + return 4; + else + return 0; +} + +const NV_ADDRESS_SPACE *memdescU32ToAddrSpaceList(NvU32 index) +{ + switch (index) + { + case 1: return ADDRLIST_FBMEM_PREFERRED; + case 2: return ADDRLIST_SYSMEM_PREFERRED; + case 3: return ADDRLIST_FBMEM_ONLY; + case 4: return ADDRLIST_SYSMEM_ONLY; + default: + return NULL; + } +} + +/* + * @brief Setting a MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE has to initialize + * pHeap and bUsingSubAllocator flags + */ +static NV_STATUS _memdescSetSubAllocatorFlag +( + OBJGPU *pGpu, + PMEMORY_DESCRIPTOR pMemDesc, + NvBool bSet +) +{ + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_ARGUMENT); + + if (!bSet) + { + NV_PRINTF(LEVEL_ERROR, + "Unsetting MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE not supported\n"); + NV_ASSERT(0); + return NV_ERR_INVALID_ARGUMENT; + } + + NV_ASSERT(!(pMemDesc->_flags & MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL)); + + // Set flag forcing the allocation to fall into suballocator + pMemDesc->_flags |= MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE; + + return NV_OK; +} + +/*! + * @brief Initializing GFID for guest allocated memdescs + */ +static NV_STATUS _memdescSetGuestAllocatedFlag +( + OBJGPU *pGpu, + PMEMORY_DESCRIPTOR pMemDesc, + NvBool bSet +) +{ + + return NV_OK; +} + +/*! + * @brief Allocate and initialize a new empty memory descriptor + * + * Allocate a new memory descriptor. This allocates the memory descriptor + * only. memdescAlloc or memdescDescribe are later used to allocate or associate + * memory to the memory descriptor. + * + * This routine takes size and the physical contiguous of the future allocation + * in order to size the PTE array for non-contiguous requests. + * + * memdescDestroy should be called to free a memory descriptor. + * + * If MEMDESC_FLAGS_PRE_ALLOCATED is specified, use the memory descriptor + * supplied by the client instead of allocating a new one. + * + * @param[out] ppMemDesc Return pointer to new memory descriptor + * @param[in] pGpu + * @param[in] Size Size of memory descriptor in bytes. + * @param[in] PhysicallyContiguous Need physical contig or can it be scattered? + * @param[in] AddressSpace NV_ADDRESS_SPACE requested + * @param[in] CpuCacheAttrib CPU cacheability requested + * @param[in] Flags MEMDESC_FLAGS_* + * + * @returns NV_OK on success + */ +NV_STATUS +memdescCreate +( + MEMORY_DESCRIPTOR **ppMemDesc, + OBJGPU *pGpu, + NvU64 Size, + NvU64 Alignment, + NvBool PhysicallyContiguous, + NV_ADDRESS_SPACE AddressSpace, + NvU32 CpuCacheAttrib, + NvU64 Flags +) +{ + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 allocSize, MdSize, PageCount; + NvU32 gpuCacheAttrib = NV_MEMORY_UNCACHED; + NV_STATUS status = NV_OK; + NvU64 pageArraySize; + + + allocSize = Size; + + // + // this memdesc may have gotten forced to sysmem if no carveout, + // but for VPR it needs to be in vidmem, so check and re-direct here, + // unless running with zero-FB + // + if ((AddressSpace != ADDR_UNKNOWN) && + (Flags & MEMDESC_ALLOC_FLAGS_PROTECTED) && + (!pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB) || + gpuIsCacheOnlyModeEnabled(pGpu))) + { + AddressSpace = ADDR_FBMEM; + } + + if (pGpu != NULL) + { + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + if (((AddressSpace == ADDR_SYSMEM) || (AddressSpace == ADDR_UNKNOWN)) && + !(Flags & MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL)) + { + NvU64 pageSize = osGetPageSize(); + + if (pMemoryManager && pMemoryManager->sysmemPageSize) + { + pageSize = pMemoryManager->sysmemPageSize; + } + + allocSize = RM_ALIGN_UP(allocSize, pageSize); + if (allocSize < Size) + { + return NV_ERR_INVALID_ARGUMENT; + } + } + + if (RMCFG_FEATURE_PLATFORM_MODS || IsT194(pGpu) || IsT234(pGpu)) + { + if ( (AddressSpace == ADDR_FBMEM) && + !(Flags & MEMDESC_ALLOC_FLAGS_PROTECTED) && + memmgrGetUsableMemSizeMB_HAL(pGpu, pMemoryManager) == 0 && + gpuIsUnifiedMemorySpaceEnabled(pGpu)) + { + // On Tegra, force sysmem if carveout and SMMU are not available + AddressSpace = ADDR_SYSMEM; + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM)) + { + CpuCacheAttrib = pGpu->instCacheOverride; + } + } + + // + // Support for aligned contiguous SYSMEM allocations. + // + if ((AddressSpace == ADDR_SYSMEM || AddressSpace == ADDR_UNKNOWN) && + PhysicallyContiguous && (Alignment > RM_PAGE_SIZE)) + { + if (!portSafeAddU64(allocSize, (Alignment - RM_PAGE_SIZE), &allocSize)) + { + return NV_ERR_INVALID_ARGUMENT; + } + } + } + } + + // + // + // Note that we allocate one extra PTE, since we don't know what the PteAdjust + // is yet; if the PteAdjust is zero, we simply won't use it. This is in the + // MEMORY_DESCRIPTOR structure definition. + // + // RM_PAGE_SIZE is 4k and RM_PAGE_SHIFT is 12, so shift operation can be + // modified from ((allocSize + RM_PAGE_SIZE-1) >> RM_PAGE_SHIFT) to below as + // (4k >> 12 = 1). This modification helps us to avoid overflow of variable + // allocSize, in case caller of this function passes highest value of NvU64. + // + // If allocSize is passed as 0, PageCount should be returned as 0. + // + if (allocSize == 0) + { + PageCount = 0; + } + else + { + PageCount = ((allocSize - 1) >> RM_PAGE_SHIFT) + 1; + } + + if (PhysicallyContiguous) + { + MdSize = sizeof(MEMORY_DESCRIPTOR); + pageArraySize = 1; + } + else + { + pageArraySize = PageCount + 1; + + MdSize = sizeof(MEMORY_DESCRIPTOR) + + (sizeof(RmPhysAddr) * PageCount); + NV_ASSERT(MdSize <= 0xffffffffULL); + if (MdSize > 0xffffffffULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + if (Flags & MEMDESC_FLAGS_PAGED_SYSMEM) + { + // The flag MEMDESC_FLAGS_PAGED_SYSMEM is only for Windows + return NV_ERR_NOT_SUPPORTED; + } + + if (Flags & MEMDESC_FLAGS_PRE_ALLOCATED) + { + // Only fixed sized memDesc can be supported + if (PhysicallyContiguous == NV_FALSE) + { + return NV_ERR_BUFFER_TOO_SMALL; + } + + NV_ASSERT_OR_RETURN(*ppMemDesc, NV_ERR_NOT_SUPPORTED); + + pMemDesc = *ppMemDesc; + } + else + { + pMemDesc = portMemAllocNonPaged((NvU32)MdSize); + if (pMemDesc == NULL) + { + return NV_ERR_NO_MEMORY; + } + } + + portMemSet(pMemDesc, 0, (NvU32)MdSize); + + // Fill in initial non-zero parameters + pMemDesc->pGpu = pGpu; + pMemDesc->Size = Size; + pMemDesc->PageCount = PageCount; + pMemDesc->ActualSize = allocSize; + pMemDesc->pageArraySize = pageArraySize; + pMemDesc->_addressSpace = AddressSpace; + pMemDesc->RefCount = 1; + pMemDesc->DupCount = 1; + pMemDesc->_subDeviceAllocCount = 1; + pMemDesc->_flags = Flags; + pMemDesc->_gpuCacheAttrib = gpuCacheAttrib; + pMemDesc->_gpuP2PCacheAttrib = NV_MEMORY_UNCACHED; + pMemDesc->Alignment = Alignment; + pMemDesc->gfid = GPU_GFID_PF; + pMemDesc->bUsingSuballocator = NV_FALSE; + pMemDesc->bDeferredFree = NV_FALSE; + pMemDesc->numaNode = NV0000_CTRL_NO_NUMA_NODE; + + // parameter to determine page granularity + pMemDesc->pageArrayGranularity = RM_PAGE_SIZE; + + memdescSetCpuCacheAttrib(pMemDesc, CpuCacheAttrib); + + // Set any additional flags + pMemDesc->_flags |= MEMDESC_FLAGS_KERNEL_MODE; + if (PhysicallyContiguous) + pMemDesc->_flags |= MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS; + else + pMemDesc->_flags &= ~MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS; + + // OBJHEAP may not be created at this time and pMemDesc->pHeap may be NULL after this if-else + if (Flags & MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL) + { + pMemDesc->_flags |= MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL; + pMemDesc->_flags &= ~MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE; + } + else if (Flags & MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE) + { + NV_ASSERT_OK_OR_GOTO(status, _memdescSetSubAllocatorFlag(pGpu, pMemDesc, NV_TRUE), failed); + } + + // In case of guest allocated memory, just initialize GFID + if (Flags & MEMDESC_FLAGS_GUEST_ALLOCATED) + { + NV_ASSERT_OK_OR_GOTO(status, _memdescSetGuestAllocatedFlag(pGpu, pMemDesc, NV_TRUE), failed); + } + + // + // Set localized flag if we alread know we're localized. Must be set manually if flag is set + // after memdesc creation + // + if (Flags & MEMDESC_FLAGS_ALLOC_AS_LOCALIZED) + { + MEMORY_DESCRIPTOR *pRootMemDesc = memdescGetRootMemDesc(pMemDesc, NULL); + pMemDesc->localizedMask = GPU_GET_MEMORY_MANAGER(pRootMemDesc->pGpu)->localizedMask; + } + +failed: + if (status != NV_OK) + { + if (!(Flags & MEMDESC_FLAGS_PRE_ALLOCATED)) + { + portMemFree(pMemDesc); + } + } + else + { + *ppMemDesc = pMemDesc; + } + + return status; +} + +/*! + * @brief Initialize an caller allocated memory descriptor + * + * Helper to make it easier to get the memDesc **, and typically used + * with memdescDescribe. + * + * Only can be used for physically contiguous regions with a fixed + * size PTE array. + * + * memdescDestroy should be called to free a memory descriptor. + * + * If MEMDESC_FLAGS_PRE_ALLOCATED is specified, use the memory descriptor + * supplied by the client instead of allocating a new one. + * + * @param[out] pMemDesc Return pointer to new memory descriptor + * @param[in] pGpu + * @param[in] Size Size of memory descriptor in bytes + * @param[in] AddressSpace NV_ADDRESS_SPACE requested + * @param[in] CpuCacheAttrib CPU cacheability requested + * @param[in] Flags MEMDESC_FLAGS_* + * + * @returns void with no malloc there should be no failure cases + */ +void +memdescCreateExisting +( + MEMORY_DESCRIPTOR *pMemDesc, + OBJGPU *pGpu, + NvU64 Size, + NV_ADDRESS_SPACE AddressSpace, + NvU32 CpuCacheAttrib, + NvU64 Flags +) +{ + NV_STATUS status; + status = memdescCreate(&pMemDesc, pGpu, Size, 0, NV_TRUE, AddressSpace, + CpuCacheAttrib, + Flags | MEMDESC_FLAGS_PRE_ALLOCATED | MEMDESC_FLAGS_SKIP_RESOURCE_COMPUTE); + NV_ASSERT(status == NV_OK); +} + + +/*! + * Increment ref count + */ +void memdescAddRef +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(pMemDesc != NULL); + ++(pMemDesc->RefCount); +} + +/*! + * Decrement ref count + */ +void memdescRemoveRef +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT_OR_RETURN_VOID(pMemDesc != NULL); + --(pMemDesc->RefCount); +} + +// +// Destroy all IOMMU mappings under this memdesc, including child +// mappings for root memdescs. +// +// TODO: merge the new IOMMU paths with the SMMU paths (see bug 1625121). +// +static void +_memdescFreeIommuMappings(PMEMORY_DESCRIPTOR pMemDesc) +{ +#if (RMCFG_FEATURE_PLATFORM_UNIX || RMCFG_FEATURE_PLATFORM_MODS) && !NVCPU_IS_ARM + PIOVAMAPPING pIovaMapping = pMemDesc->_pIommuMappings; + + if (!pIovaMapping) + return; + + if (memdescIsSubMemoryMemDesc(pMemDesc)) + { + iovaMappingDestroy(pIovaMapping); + return; + } + + while (pIovaMapping) + { + PIOVAMAPPING pTmpIovaMapping = pIovaMapping->pNext; + iovaMappingDestroy(pIovaMapping); + pIovaMapping = pTmpIovaMapping; + } + + pMemDesc->_pIommuMappings = NULL; +#endif +} + +/*! + * Destroy a memory descriptor if last reference is released + * + * If the memory descriptor is down to one reference, we need + * to check with the bus code check if that reference needs + * to be reclaimed. + * + * @param[in] pMemDesc Memory descriptor to be destroyed + * + * @returns None + */ +void +memdescDestroy +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + // Allow null frees + if (!pMemDesc) + { + return; + } + + memdescRemoveRef(pMemDesc); + + // if still more references are there for pMemDesc (pMemDesc->RefCount != 0), then bail out. + + if (pMemDesc->RefCount == 0) + { + MEM_DESC_DESTROY_CALLBACK *pCb = memdescGetDestroyCallbackList(pMemDesc); + MEM_DESC_DESTROY_CALLBACK *pNext; + + if (pMemDesc->_flags & MEMDESC_FLAGS_DUMMY_TOPLEVEL) + { + // When called from RmFreeFrameBuffer() and memdescFree could not do it because it is unallocated. + pMemDesc->_pNext = NULL; + pMemDesc->_subDeviceAllocCount = 1; + } + + NV_ASSERT(pMemDesc->childDescriptorCnt == 0); + NV_ASSERT(pMemDesc->_addressSpace == ADDR_FBMEM || pMemDesc->pHeap == NULL); + + // + // If there is private memdata, use the CB to free + // + if (pMemDesc->_pMemData && pMemDesc->_pMemDataReleaseCallback) + { + pMemDesc->_pMemDataReleaseCallback(pMemDesc); + } + + if (pMemDesc->bDeferredFree) + { + memdescFree(pMemDesc); + } + else if (pMemDesc->Allocated != 0) + { + // + // The caller forgot to free the actual memory before destroying the memdesc. + // Please fix this by calling memdescFree(). + // To prevent memory leaks, we explicitly free here until its fixed elsewhere. + // + NV_PRINTF(LEVEL_ERROR, "Destroying unfreed memory %p\n", pMemDesc); + NV_PRINTF(LEVEL_ERROR, "Please call memdescFree()\n"); + memdescFree(pMemDesc); + NV_ASSERT(!pMemDesc->Allocated); + } + + if (memdescGetStandbyBuffer(pMemDesc)) + { + memdescFree(memdescGetStandbyBuffer(pMemDesc)); + memdescDestroy(memdescGetStandbyBuffer(pMemDesc)); + memdescSetStandbyBuffer(pMemDesc, NULL); + } + + // + // Submemory descriptors will be destroyed without going through a free + // path, so we need to make sure that we remove the IOMMU submapping + // here. For root descriptors, we should already have removed all the + // associated IOVA mappings. + // + // However, for memory descriptors that weren't allocated by the RM, + // (e.g., were created from a user allocation), we won't go through a + // free path at all. In this case, mappings for other GPUs may still be + // attached to this root memory descriptor, so release them now. + // + _memdescFreeIommuMappings(pMemDesc); + + // Notify all interested parties of destruction + while (pCb) + { + pNext = pCb->pNext; + pCb->destroyCallback(pMemDesc->pGpu, pCb->pObject, pMemDesc); + // pCb is now invalid + pCb = pNext; + } + + portMemFree(pMemDesc->pPteSpaMappings); + pMemDesc->pPteSpaMappings = NULL; + portMemFree(pMemDesc->pSubMemDescList); + pMemDesc->pSubMemDescList = NULL; + + if (pMemDesc->pPteEgmMappings != NULL) + { + portMemFree(pMemDesc->pPteEgmMappings); + pMemDesc->pPteEgmMappings = NULL; + } + + if (pMemDesc->_pParentDescriptor) + { + if ((pMemDesc->_flags & MEMDESC_FLAGS_PRE_ALLOCATED) == 0) + pMemDesc->_pParentDescriptor->childDescriptorCnt--; + memdescDestroy(pMemDesc->_pParentDescriptor); + pMemDesc->_pParentDescriptor = NULL; + } + + // Verify memdesc is not top + NV_ASSERT(memdescHasSubDeviceMemDescs(pMemDesc) == NV_FALSE); + + if ((pMemDesc->_flags & MEMDESC_FLAGS_PRE_ALLOCATED) == 0) + { + portMemFree(pMemDesc); + } + } +} + +/*! + * @brief Function that frees subdevice memory descriptors. If there are no + * subdevice memory descriptors function just simply resets memdesc structure. + * Top level memory descriptor is not destroyed. + * + * @param[in,out] pMemDesc Top level memory descriptor. + * + * @returns None + */ +static void +_memSubDeviceFreeAndDestroy +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + MEMORY_DESCRIPTOR *pSubDevMemDesc = pMemDesc->_pNext; + MEMORY_DESCRIPTOR *pNextMemDesc; + OBJGPU *pGpu = pMemDesc->pGpu; + NvBool bBcState; + + // No subdevice memdescs + if (pSubDevMemDesc == NULL || pGpu == NULL) + { + return; + } + + bBcState = gpumgrGetBcEnabledStatus(pGpu); + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + + do + { + pNextMemDesc = pSubDevMemDesc->_pNext; + pSubDevMemDesc->_pNext = NULL; + memdescFree(pSubDevMemDesc); + memdescDestroy(pSubDevMemDesc); + pSubDevMemDesc = pNextMemDesc; + } while (pSubDevMemDesc != NULL); + + gpumgrSetBcEnabledStatus(pGpu, bBcState); +} + +/*! + * @brief Lower memdesc allocation layer for the special case of allocation + in the VPR region when MODS is managing it. + * + * @param[in] pMemDesc Memory descriptor to allocate + * + * @returns NV_OK on successful allocation. + * NV_ERR_NOT_SUPPORTED if not supported + */ +static NV_STATUS +_memdescAllocVprRegion +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Allocate and populate the EGM array based off of the already + * populated _pteArray of the memdesc + * + * @param[in] pMemDesc Memory descriptor to allocate EGM array in + * + * @returns NV_OK on successful allocation. NV_ERR if not. + */ +static NV_INLINE NV_STATUS +_memdescAllocEgmArray +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NvU64 i; + NvU64 pageCount = pMemDesc->PageCount; + + // + // Get the root memory descriptor's memory manager to be able to get the + // EGM base of that GPU, instead of the mapping GPU in the case of this + // array being used in a submemdesc. The submemdesc should always have the + // mapping of the root since it's a submemdesc derived from the root, and + // not based on the mapping GPU. + // + MEMORY_DESCRIPTOR *pRootMemDesc = memdescGetRootMemDesc(pMemDesc, NULL); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pRootMemDesc->pGpu); + + if (pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS) + { + pageCount = 1; + } + + if (pMemDesc->pPteEgmMappings == NULL) + { + pMemDesc->pPteEgmMappings = portMemAllocNonPaged(sizeof(RmPhysAddr) * pageCount); + } + + NV_ASSERT_OR_RETURN(pMemDesc->pPteEgmMappings != NULL, NV_ERR_NO_MEMORY); + + for (i = 0; i < pageCount; i++) + { + pMemDesc->pPteEgmMappings[i] = pMemDesc->_pteArray[i] - + pMemoryManager->localEgmBasePhysAddr; + } + + return NV_OK; +} + +/*! + * @brief Lower memdesc allocation layer. Provides underlying allocation + * functionality. + * + * @param[in,out] pMemDesc Memory descriptor to allocate + * + * @returns NV_OK on successful allocation. Various NV_ERR_GENERIC codes otherwise. + */ +static NV_STATUS +_memdescAllocInternal +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + NV_STATUS status = NV_OK; + FB_ALLOC_INFO *pFbAllocInfo = NULL; + FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat = NULL; + + if (pMemDesc->Allocated) + { + NV_ASSERT(!pMemDesc->Allocated); + return NV_ERR_INVALID_OBJECT_BUFFER; + } + + // Special case of an allocation request in MODS managed VPR region. + status = _memdescAllocVprRegion(pMemDesc); + if (status != NV_ERR_NOT_SUPPORTED) + goto done; + + switch (pMemDesc->_addressSpace) + { + case ADDR_EGM: + case ADDR_SYSMEM: + // System memory can be obtained from osAllocPages + status = osAllocPages(pMemDesc); + if (status != NV_OK) + { + goto done; + } + + if (memdescIsEgm(pMemDesc)) + { + NV_ASSERT_OK_OR_GOTO(status, + _memdescAllocEgmArray(pMemDesc), + done); + } + + // + // The pages have been allocated, so mark the descriptor as + // allocated. The IOMMU-mapping code needs the memdesc to be + // allocated in order to create the mapping. + // + pMemDesc->Allocated = 1; + + // + // TODO: merge new IOMMU paths with the SMMU paths below (see bug + // 1625121). For now they are parallel, and only one will be + // used. + // + if (!memdescGetFlag(pMemDesc, MEMDESC_FLAGS_CPU_ONLY) && + !memdescIsEgm(pMemDesc) && + !memdescGetFlag(pMemDesc, MEMDESC_FLAGS_SKIP_IOMMU_MAPPING)) + { + status = memdescMapIommu(pMemDesc, pGpu->busInfo.iovaspaceId); + if (status != NV_OK) + { + pMemDesc->Allocated = 0; + osFreePages(pMemDesc); + goto done; + } + } + + if (pMemDesc->_flags & MEMDESC_FLAGS_PROVIDE_IOMMU_MAP) + { + NV_PRINTF(LEVEL_ERROR, "SMMU mapping allocation is not supported for ARMv7.\n"); + NV_ASSERT(0); + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + else if ((pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS) && + RMCFG_FEATURE_PLATFORM_MODS) + { + if (pMemDesc->Alignment > RM_PAGE_SIZE) + { + RmPhysAddr addr = memdescGetPhysAddr(pMemDesc, AT_CPU, 0); + NvU64 offset; + + NV_ASSERT((addr & (RM_PAGE_SIZE - 1)) == 0); + + NV_ASSERT((pMemDesc->Alignment & (pMemDesc->Alignment - 1)) == 0); + offset = addr & (pMemDesc->Alignment - 1); + + if (offset) + { + NV_ASSERT((pMemDesc->PageCount * pMemDesc->pageArrayGranularity - pMemDesc->Size) >= offset); + NV_ASSERT(pMemDesc->PteAdjust == 0); + pMemDesc->PteAdjust += NvU64_LO32(pMemDesc->Alignment - offset); + } + } + } + + break; + + default: + // Don't know how to do any other types of memory yet + DBG_BREAKPOINT(); + status = NV_ERR_GENERIC; + goto done; + } + +done: + if (status == NV_OK) + { + memdescPrintMemdesc(pMemDesc, NV_TRUE, MAKE_NV_PRINTF_STR("memdesc allocated")); + } + else if (pMemDesc->pPteEgmMappings != NULL) + { + portMemFree(pMemDesc->pPteEgmMappings); + pMemDesc->pPteEgmMappings = NULL; + } + portMemFree(pFbAllocPageFormat); + portMemFree(pFbAllocInfo); + + return status; +} + +/*! + * @brief Upper memdesc allocation layer. Provides support for per-subdevice + * sysmem buffers and lockless sysmem allocation. + * + * @param[in,out] pMemDesc Memory descriptor to allocate + * + * @returns NV_OK on successful allocation. Various NV_ERR_GENERIC codes otherwise. + */ +NV_STATUS +memdescAlloc +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + NV_STATUS status = NV_OK; + NvBool bcState = NV_FALSE; + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvBool reAcquire; + NvU32 gpuMask = 0; + + NV_ASSERT_OR_RETURN(!pMemDesc->Allocated, NV_ERR_INVALID_OBJECT_BUFFER); + + switch (pMemDesc->_addressSpace) + { + case ADDR_SYSMEM: + case ADDR_EGM: + // Can only alloc sysmem on 0FB GSP + if (RMCFG_FEATURE_PLATFORM_GSP && + !memdescGetFlag(pMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_ZERO_FB)) + { + // + // TO DO: Make this an error once existing allocations are cleaned up. + // After that pHeap selection can be moved to memdescAllocInternal() + // + NV_PRINTF(LEVEL_WARNING, + "WARNING sysmem alloc on GSP firmware moved to FB\n"); + pMemDesc->_addressSpace = ADDR_FBMEM; + pMemDesc->pHeap = GPU_GET_HEAP(pGpu); + } + + break; + default: + // Don't know how to do any other types of memory yet + DBG_BREAKPOINT(); + return NV_ERR_GENERIC; + } + + if (status != NV_OK) + { + return status; + } + + if (gpumgrGetBcEnabledStatus(pGpu)) + { + // Broadcast memdescAlloc call with flag set to allocate per subdevice. + if (pMemDesc->_flags & MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE) + { + NvU32 i; + MEMORY_DESCRIPTOR *pSubDevMemDesc = pMemDesc; + MEMORY_DESCRIPTOR *pPrev = pMemDesc; + OBJGPU *pGpuChild; + + pMemDesc->_subDeviceAllocCount = NumSubDevices(pGpu); + + for (i = 0; i < pMemDesc->_subDeviceAllocCount; i++) + { + // Get pGpu for this subdeviceinst + pGpuChild = gpumgrGetGpuFromSubDeviceInst(gpuGetDeviceInstance(pGpu), i); + if (NULL == pGpuChild) + { + NV_ASSERT(0); + status = NV_ERR_OBJECT_NOT_FOUND; + goto subdeviceAlloc_failed; + } + + // + // We are accessing the fields of the top level desc here directly without using the + // accessor routines on purpose. + // + status = memdescCreate(&pSubDevMemDesc, pGpuChild, pMemDesc->Size, pMemDesc->Alignment, + !!(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS), + pMemDesc->_addressSpace, + pMemDesc->_cpuCacheAttrib, + pMemDesc->_flags & ~MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE); + + if (status != NV_OK) + { + NV_ASSERT(0); + goto subdeviceAlloc_failed; + } + + pSubDevMemDesc->_gpuCacheAttrib = pMemDesc->_gpuCacheAttrib; + pSubDevMemDesc->_pageSize = pMemDesc->_pageSize; + + // Force broadcast state to false when allocating a subdevice memdesc + gpumgrSetBcEnabledStatus(pGpuChild, NV_FALSE); + + status = memdescAlloc(pSubDevMemDesc); + + if (pMemDesc->_addressSpace == ADDR_FBMEM) + { + // + // The top level memdesc could have flags that don't reflect contiguity which + // is set after memdescAlloc. + // + pMemDesc->Alignment = pSubDevMemDesc->Alignment; + pMemDesc->_flags = pSubDevMemDesc->_flags | MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE; + pMemDesc->ActualSize = pSubDevMemDesc->ActualSize; + } + + // Restore broadcast state to true after allocating a subdevice memdesc + gpumgrSetBcEnabledStatus(pGpuChild, NV_TRUE); + + if (status != NV_OK) + { + memdescDestroy(pSubDevMemDesc); + NV_ASSERT(0); + goto subdeviceAlloc_failed; + } + + // Check for similarity in allocations for previous allocated subdev with current allocated subdev. + // If subdev0 ~ subdev1 && subdev1~subdev2 then subdev0 ~ subdev2 and so on...Thus can check symmetry across all subdev allocations + if (i > 0) + { + NV_ASSERT(pPrev->Size == pSubDevMemDesc->Size); + NV_ASSERT(pPrev->PteAdjust == pSubDevMemDesc->PteAdjust); + NV_ASSERT(pPrev->_addressSpace == pSubDevMemDesc->_addressSpace); + NV_ASSERT(pPrev->_flags == pSubDevMemDesc->_flags); + NV_ASSERT(pPrev->_pteKind == pSubDevMemDesc->_pteKind); + NV_ASSERT(pPrev->_pteKindCompressed == pSubDevMemDesc->_pteKindCompressed); + NV_ASSERT(pPrev->pHeap != pSubDevMemDesc->pHeap); + } + + pPrev->_pNext = pSubDevMemDesc; + pPrev = pSubDevMemDesc; + } + pMemDesc->Allocated = 1; + return NV_OK; + } + else if (pMemDesc->_addressSpace == ADDR_FBMEM) + { + // Broadcast memdescAlloc call on vidmem *without* flag set to allocate per subdevice + NV_ASSERT(0); + } + } + + // Unicast memdescAlloc call but with flag set to allocate per subdevice. + NV_ASSERT(!((pMemDesc->_flags & MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE) && !gpumgrGetBcEnabledStatus(pGpu))); + + reAcquire = NV_FALSE; + bcState = NV_FALSE; + + if ((pMemDesc->_flags & MEMDESC_FLAGS_LOCKLESS_SYSMEM_ALLOC) && (pMemDesc->_addressSpace != ADDR_FBMEM)) + { + bcState = gpumgrGetBcEnabledStatus(pGpu); + if (RMCFG_FEATURE_RM_BASIC_LOCK_MODEL) + { + // + // There is no equivalent routine for osCondReleaseRmSema in + // the new basic lock model. + + // + // However, we can't drop the RM system semaphore in this + // path because on non-windows platforms (i.e. MODS) it + // has undesirable consequences. So for now we must + // bracket this section with a reference to the feature + // flag until we can rework this interface. + // + // + // Check to make sure we own the lock and that we are + // not at elevated IRQL; this models the behavior + // of osCondReleaseRmSema. + // + if (!osIsRaisedIRQL() && + (rmGpuGroupLockIsOwner(pGpu->gpuInstance, GPU_LOCK_GRP_DEVICE, &gpuMask) || + rmGpuGroupLockIsOwner(pGpu->gpuInstance, GPU_LOCK_GRP_SUBDEVICE, &gpuMask))) + { + // + // Release all owned gpu locks rather than just the + // device-related locks because the caller may be holding more + // than the required device locks. All currently owned + // locks will be re-acquired before returning. + // + // This prevents potential GPU locking violations (e.g., if the + // caller is holding all the gpu locks but only releases the + // first of two device locks, then attempting to re-acquire + // the first device lock will be a locking violation with + // respect to the second device lock.) + // + gpuMask = rmGpuLocksGetOwnedMask(); + rmGpuGroupLockRelease(gpuMask, GPUS_LOCK_FLAGS_NONE); + reAcquire = NV_TRUE; + } + } + else + { + reAcquire = osCondReleaseRmSema(pSys->pSema); + } + } + + // Actually allocate the memory + NV_CHECK_OK(status, LEVEL_ERROR, _memdescAllocInternal(pMemDesc)); + + if (status != NV_OK) + { + pMemDesc->pHeap = NULL; + } + + if (reAcquire) + { + if (osAcquireRmSema(pSys->pSema) != NV_OK) + { + DBG_BREAKPOINT(); + + } + + if (rmGpuGroupLockAcquire(pGpu->gpuInstance, GPU_LOCK_GRP_MASK, + GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_MEM, + &gpuMask) != NV_OK) + { + DBG_BREAKPOINT(); + } + // Releasing the semaphore allows another thread to enter RM and + // modify broadcast state. We need to set it back (see bug 368643) + gpumgrSetBcEnabledStatus(pGpu, bcState); + } + + return status; + +subdeviceAlloc_failed: + _memSubDeviceFreeAndDestroy(pMemDesc); + pMemDesc->_subDeviceAllocCount = 1; + pMemDesc->_pNext = NULL; + return status; +} + +/*! + * Allocate memory from one of the possible locations specified in pList. + * + * @param[in,out] pMemDesc Memory descriptor to allocate + * @param[in] pList List of NV_ADDRESS_SPACE values. Terminated + * by an ADDR_UNKNOWN entry. + * + * @returns NV_OK on successful allocation. Various NV_ERR_GENERIC codes otherwise. + */ +NV_STATUS +memdescAllocList +( + MEMORY_DESCRIPTOR *pMemDesc, + const NV_ADDRESS_SPACE *pList +) +{ + NV_STATUS status = NV_ERR_INVALID_ARGUMENT; + NvU32 i = 0; + + if (!pList) + { + return status; + } + + // + // this memdesc may have gotten forced to sysmem if no carveout, + // but for VPR it needs to be in vidmem, so check and re-direct here + // + if (pMemDesc->_flags & MEMDESC_ALLOC_FLAGS_PROTECTED) + { + OBJGPU *pGpu = pMemDesc->pGpu; + + // Only force to vidmem if not running with zero-FB. + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB) || + gpuIsCacheOnlyModeEnabled(pGpu)) + { + pList = ADDRLIST_FBMEM_ONLY; + } + } + + while (pList[i] != ADDR_UNKNOWN) + { + pMemDesc->_addressSpace = pList[i]; + status = memdescAlloc(pMemDesc); + + if (status == NV_OK) + { + return status; + } + + i++; + } + + return status; +} + +/*! + * @brief Lower memdesc free layer. Provides underlying free + * functionality. + * + * @param[in,out] pMemDesc Memory descriptor to free + * + * @returns None + */ +static void +_memdescFreeInternal +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + MEM_DESC_DESTROY_CALLBACK *pCb, *pNext; + NvU64 oldSize; + + // Allow null frees + if (!pMemDesc) + { + return; + } + + pCb = memdescGetDestroyCallbackList(pMemDesc); + + // Notify all interested parties of destruction + while (pCb) + { + pNext = pCb->pNext; + pCb->destroyCallback(pMemDesc->pGpu, pCb->pObject, pMemDesc); + // pCb is now invalid + pCb = pNext; + } + + if (memdescHasSubDeviceMemDescs(pMemDesc)) + return; + + memdescPrintMemdesc(pMemDesc, NV_FALSE, MAKE_NV_PRINTF_STR("memdesc being freed")); + + // Bail our early in case this memdesc describes a MODS managed VPR region. + if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_VPR_REGION_CLIENT_MANAGED)) + return; + + switch (pMemDesc->_addressSpace) + { + case ADDR_SYSMEM: + case ADDR_EGM: + + oldSize = pMemDesc->Size; + pMemDesc->Size = pMemDesc->ActualSize; + pMemDesc->PageCount = ((pMemDesc->ActualSize + pMemDesc->pageArrayGranularity - 1) >> BIT_IDX_64(pMemDesc->pageArrayGranularity)); + + osFreePages(pMemDesc); + + pMemDesc->Size = oldSize; + pMemDesc->PageCount = ((oldSize + pMemDesc->pageArrayGranularity - 1) >> BIT_IDX_64(pMemDesc->pageArrayGranularity)); + + break; + + default: + // Don't know how to do any other types of memory yet + DBG_BREAKPOINT(); + } +} + +/*! + * @brief Upper memdesc free layer. Provides support for per-subdevice + * sysmem buffers and lockless sysmem allocation. Because of SLI and subdevice + * submem allocations (refer to submem chart) support, if memory has never + * been allocated function will just unlink subdevice structure and destroy + * subdevice descriptors. + * + * @param[in,out] pMemDesc Memory descriptor to free + * + * @returns None + */ +void +memdescFree +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + // Allow null frees + if (!pMemDesc) + { + return; + } + + + if (memdescIsSubMemoryMemDesc(pMemDesc)) + { + NV_ASSERT(!pMemDesc->_pInternalMapping); + + if (pMemDesc->_addressSpace == ADDR_SYSMEM) + { + // The memdesc is being freed so destroy all of its IOMMU mappings. + _memdescFreeIommuMappings(pMemDesc); + } + + if (pMemDesc->_addressSpace != ADDR_FBMEM && + pMemDesc->_addressSpace != ADDR_SYSMEM && + pMemDesc->_addressSpace != ADDR_EGM) + { + return; + } + + _memSubDeviceFreeAndDestroy(pMemDesc); + } + else + { + // + // In case RM attempts to free memory that has more than 1 refcount, the free is deferred until refcount reaches 0 + // + // Bug 3307574 RM crashes when client's specify sysmem UserD location. + // RM attempts to peek at the client allocated UserD when waiting for a channel to go idle. + // + if (pMemDesc->RefCount > 1 && pMemDesc->Allocated == 1) + { + pMemDesc->bDeferredFree = NV_TRUE; + return; + } + + if (!pMemDesc->Allocated) + { + return; + } + pMemDesc->Allocated--; + if (0 != pMemDesc->Allocated) + { + return; + } + + // If standbyBuffer memory was allocated then free it + if (pMemDesc->_pStandbyBuffer) + { + memdescFree(pMemDesc->_pStandbyBuffer); + memdescDestroy(pMemDesc->_pStandbyBuffer); + pMemDesc->_pStandbyBuffer = NULL; + } + + NV_ASSERT(!pMemDesc->_pInternalMapping); + + if (pMemDesc->_addressSpace == ADDR_SYSMEM) + { + // The memdesc is being freed so destroy all of its IOMMU mappings. + _memdescFreeIommuMappings(pMemDesc); + } + + if (pMemDesc->_addressSpace != ADDR_FBMEM && + pMemDesc->_addressSpace != ADDR_EGM && + pMemDesc->_addressSpace != ADDR_SYSMEM) + { + return; + } + + _memSubDeviceFreeAndDestroy(pMemDesc); + + _memdescFreeInternal(pMemDesc); + } + + // Reset tracking state + pMemDesc->_pNext = NULL; + pMemDesc->_subDeviceAllocCount = 1; + + // + // Reset tracking state of parent + // Why it is needed: + // When a submemory toplevel memdesc with subdevices is freed, + // the subdecice memdescs and their parent are destroyed or their + // refcount decreased. + // When the parent subdevice descriptors are destroyed, their + // top level descriptor is left alone and has a dangling + // _pNext pointer + // + if ((pMemDesc->_pParentDescriptor != NULL) && + (memdescHasSubDeviceMemDescs(pMemDesc->_pParentDescriptor)) && + (pMemDesc->_pParentDescriptor->RefCount == 1)) + { + pMemDesc->_pParentDescriptor->_pNext = NULL; + pMemDesc->_pParentDescriptor->_subDeviceAllocCount = 1; + } +} + +/*! + * @brief Lock the paged virtual memory descripted by the memory descriptor + * + * @param[in] pMemDesc Memory descriptor to lock + * + * @returns NV_OK on successful allocation. Various NV_ERR_GENERIC codes otherwise. + */ +NV_STATUS +memdescLock +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + if (!(pMemDesc->_flags & MEMDESC_FLAGS_PAGED_SYSMEM)) + { + return NV_ERR_ILLEGAL_ACTION; + } + + return osLockMem(pMemDesc); +} + +/*! + * @brief Unlock the paged virtual memory descripted by the memory descriptor + * + * @param[in] pMemDesc Memory descriptor to unlock + * + * @returns NV_OK on successful allocation. Various NV_ERR_GENERIC codes otherwise. + */ +NV_STATUS +memdescUnlock +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + if (!(pMemDesc->_flags & MEMDESC_FLAGS_PAGED_SYSMEM)) + { + return NV_ERR_ILLEGAL_ACTION; + } + + return osUnlockMem(pMemDesc); +} + +/*! + * @brief Get a CPU mapping to the memory described by a memory descriptor + * + * This is for memory descriptors used by RM clients, not by the RM itself. + * For internal mappings the busMapRmAperture() hal routines are used. + * + * @param[in] pMemDesc Memory descriptor to map + * @param[in] Offset Offset into memory descriptor to start map + * @param[in] Size Size of mapping + * @param[in] Kernel Kernel or user address space + * @param[in] Protect NV_PROTECT_* + * @param[out] pAddress Return address + * @param[out] pPriv Return cookie to be passed back to memdescUnmap + * + * @returns NV_STATUS + */ + +NV_STATUS +memdescMapOld +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 Offset, + NvU64 Size, + NvBool Kernel, + NvU32 Protect, + void **pAddress, + void **pPriv +) +{ + NvP64 pAddressP64 = NV_PTR_TO_NvP64(*pAddress); + NvP64 pPrivP64 = NV_PTR_TO_NvP64(*pPriv); + NV_STATUS status; + +#if !defined(NV_64_BITS) + NV_ASSERT(Kernel); +#endif + + status = memdescMap(pMemDesc, + Offset, + Size, + Kernel, + Protect, + &pAddressP64, + &pPrivP64); + + *pAddress = NvP64_VALUE(pAddressP64); + *pPriv = NvP64_VALUE(pPrivP64); + + return status; +} + +NV_STATUS +memdescMap +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 Offset, + NvU64 Size, + NvBool Kernel, + NvU32 Protect, + NvP64 *pAddress, + NvP64 *pPriv +) +{ + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(((Offset + Size) <= memdescGetSize(pMemDesc)), NV_ERR_INVALID_ARGUMENT); + + NV_ASSERT_OR_RETURN(!memdescHasSubDeviceMemDescs(pMemDesc), NV_ERR_INVALID_OBJECT_BUFFER); + + switch (pMemDesc->_addressSpace) + { + case ADDR_SYSMEM: + case ADDR_EGM: + { + status = osMapSystemMemory(pMemDesc, Offset, Size, + Kernel, Protect, pAddress, pPriv); + if (status != NV_OK) + { + return status; + } + break; + } + + default: + // Don't know how to do any other types of memory yet + DBG_BREAKPOINT(); + return NV_ERR_GENERIC; + } + return NV_OK; +} +void +memdescUnmapOld +( + MEMORY_DESCRIPTOR *pMemDesc, + NvBool Kernel, + void *Address, + void *Priv +) +{ + memdescUnmap(pMemDesc, + Kernel, + NV_PTR_TO_NvP64(Address), + NV_PTR_TO_NvP64(Priv)); +} + +/*! + * @brief Remove a mapping for the memory descriptor, reversing memdescMap + * + * @param[in] pMemDesc Memory descriptor to unmap + * @param[in] Kernel Kernel or user address space + * @param[in] Address Mapped address + * @param[in] Priv Return priv cookie from memdescMap + * + * @returns None + */ +void +memdescUnmap +( + MEMORY_DESCRIPTOR *pMemDesc, + NvBool Kernel, + NvP64 Address, + NvP64 Priv +) +{ + // Allow null unmaps + if (!Address) + return; + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + + switch (pMemDesc->_addressSpace) + { + case ADDR_SYSMEM: + case ADDR_EGM: + { + osUnmapSystemMemory(pMemDesc, Kernel, Address, Priv); + break; + } + + default: + // Don't know how to do any other types of memory yet + DBG_BREAKPOINT(); + } +} + +typedef enum +{ + MEMDESC_MAP_INTERNAL_TYPE_GSP, // On GSP, use a pre-existing mapping + MEMDESC_MAP_INTERNAL_TYPE_COHERENT_FBMEM, // For NVLINK, use a pre-existing mapping for fbmem + MEMDESC_MAP_INTERNAL_TYPE_BAR2, // Use BAR2 (fbmem or reflected sysmem) + MEMDESC_MAP_INTERNAL_TYPE_SYSMEM_DIRECT, // Use OS to map sysmem +} MEMDESC_MAP_INTERNAL_TYPE; + +static MEMDESC_MAP_INTERNAL_TYPE +memdescGetMapInternalType +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + if (RMCFG_FEATURE_PLATFORM_GSP) + { + return MEMDESC_MAP_INTERNAL_TYPE_GSP; + } + + return MEMDESC_MAP_INTERNAL_TYPE_SYSMEM_DIRECT; +} + +void +memdescFlushCpuCaches +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + // Flush WC to get the data written to this mapping out to memory + osFlushCpuWriteCombineBuffer(); + +} + + +/* + * @brief map memory descriptor for internal access + * + * flags - subset of TRANSFER_FLAGS_ + */ +void* +memdescMapInternal +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 flags +) +{ + MEMDESC_MAP_INTERNAL_TYPE mapType; + NV_STATUS status; + + NV_ASSERT_OR_RETURN(pMemDesc != NULL, NULL); + + if (pMemDesc->_addressSpace == ADDR_FBMEM) + { + pMemDesc = memdescGetMemDescFromGpu(pMemDesc, pGpu); + } + + mapType = memdescGetMapInternalType(pGpu, pMemDesc); + + if (pMemDesc->_pInternalMapping != NULL) + { + NV_ASSERT(pMemDesc->_internalMappingRefCount); + + pMemDesc->_internalMappingRefCount++; + return pMemDesc->_pInternalMapping; + } + + switch (mapType) + { + case MEMDESC_MAP_INTERNAL_TYPE_GSP: + case MEMDESC_MAP_INTERNAL_TYPE_SYSMEM_DIRECT: + { + status = memdescMapOld(pMemDesc, 0, pMemDesc->Size, NV_TRUE, NV_PROTECT_READ_WRITE, + &pMemDesc->_pInternalMapping, &pMemDesc->_pInternalMappingPriv); + NV_CHECK_OR_RETURN(LEVEL_ERROR, status == NV_OK, NULL); + break; + } + + default: + DBG_BREAKPOINT(); + } + + pMemDesc->_internalMappingRefCount = 1; + return pMemDesc->_pInternalMapping; +} + +void memdescUnmapInternal +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 flags +) +{ + MEMDESC_MAP_INTERNAL_TYPE mapType; + + NV_ASSERT_OR_RETURN_VOID(pMemDesc != NULL); + NV_ASSERT_OR_RETURN_VOID(pMemDesc->_pInternalMapping != NULL && pMemDesc->_internalMappingRefCount != 0); + + if (pMemDesc->_addressSpace == ADDR_FBMEM) + { + pMemDesc = memdescGetMemDescFromGpu(pMemDesc, pGpu); + } + + mapType = memdescGetMapInternalType(pGpu, pMemDesc); + + if (mapType == MEMDESC_MAP_INTERNAL_TYPE_SYSMEM_DIRECT || mapType == MEMDESC_MAP_INTERNAL_TYPE_BAR2) + { + memdescFlushCpuCaches(pGpu, pMemDesc); + } + + if (--pMemDesc->_internalMappingRefCount == 0) + { + switch (mapType) + { + case MEMDESC_MAP_INTERNAL_TYPE_GSP: + case MEMDESC_MAP_INTERNAL_TYPE_SYSMEM_DIRECT: + memdescUnmapOld(pMemDesc, NV_TRUE, + pMemDesc->_pInternalMapping, pMemDesc->_pInternalMappingPriv); + break; + + default: + DBG_BREAKPOINT(); + } + + pMemDesc->_pInternalMapping = NULL; + pMemDesc->_pInternalMappingPriv = NULL; + pMemDesc->_internalMappingRefCount = 0; + } + +} + +/*! + * Describe an existing region of memory in a memory descriptor + * + * Memory must be physically contiguous. + * + * The memory descriptor must be initialized with + * memdescCreate*(), typically memdescCreateExisting() + * prior to calling memdescDescribe. + * + * memdescDescribe() now only updates the fields needed in the call. + * + * @param[out] pMemDesc Memory descriptor to fill + * @param[in] AddressSpace Address space of memory + * @param[in] Base Physical address of region + * @param[in] Size Size of region + * + * @returns None + */ +void +memdescDescribe +( + MEMORY_DESCRIPTOR *pMemDesc, + NV_ADDRESS_SPACE AddressSpace, + RmPhysAddr Base, + NvU64 Size +) +{ + // Some sanity checks to see if we went through MemCreate*() first + NV_ASSERT((pMemDesc->RefCount == 1) && + (memdescGetDestroyCallbackList(pMemDesc) == NULL) && + (pMemDesc->PteAdjust == 0)); + + NV_ASSERT(pMemDesc->_pIommuMappings == NULL); + NV_ASSERT(pMemDesc->Allocated == 0); + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + + // + // Check if the base address accounts for the DMA window start address + // (always in the high, unaddressable bits of the address) and add it + // if necessary. On most platforms, the DMA window start address will + // simply be 0. + // + // This is most likely to happen in cases where the Base address is + // read directly from a register or MMU entry, which does not already + // account for the DMA window. + // + if (pMemDesc->pGpu == NULL) + { + NV_PRINTF(LEVEL_WARNING, + "unable to check Base 0x%016llx for DMA window\n", Base); + } + + if (pMemDesc->Alignment != 0) + { + NV_ASSERT(NV_FLOOR_TO_QUANTA(Base, pMemDesc->Alignment) == Base); + } + + pMemDesc->Size = Size; + pMemDesc->ActualSize = NV_ROUNDUP(Size, pMemDesc->pageArrayGranularity); + pMemDesc->_flags |= MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS; + pMemDesc->_addressSpace = AddressSpace; + pMemDesc->_pteArray[0] = Base & ~RM_PAGE_MASK; + pMemDesc->_subDeviceAllocCount = 1; + pMemDesc->PteAdjust = NvU64_LO32(Base) & RM_PAGE_MASK; + pMemDesc->PageCount = ((Size + pMemDesc->PteAdjust + RM_PAGE_SIZE - 1) >> RM_PAGE_SHIFT); + pMemDesc->_pParentDescriptor = NULL; + pMemDesc->childDescriptorCnt = 0; +} + +/*! + * Static helper called from memdescFillPages. + * When dynamic granularity memdescs are enabled. We only need to copy over the pages + * without worrying about converting them to 4K. + * + * @param[in] pMemDesc Memory descriptor to fill + * @param[in] pageIndex Index into memory descriptor to fill from + * @param[in] pPages Array of physical addresses + * @param[in] pageCount Number of entries in pPages + * @param[in] pageSize Size of each page in pPages + * + * @returns None + */ +static void +_memdescFillPagesAtNativeGranularity +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 pageIndex, + NvU64 *pPages, + NvU32 pageCount, + NvU64 pageSize +) +{ + NV_STATUS status; + NvU32 fillLimit; + + NV_ASSERT_OR_RETURN_VOID(pageIndex < pMemDesc->pageArraySize); + NV_ASSERT_OR_RETURN_VOID(portSafeAddU32(pageIndex, pageCount, &fillLimit)); + NV_ASSERT_OR_RETURN_VOID(fillLimit <= pMemDesc->pageArraySize); + + status = memdescSetPageArrayGranularity(pMemDesc, pageSize); + if (status != NV_OK) + { + return; + } + + for (NvU32 i = 0; i < pageCount; i++) + { + pMemDesc->_pteArray[pageIndex + i] = pPages[i]; + } + + // PageCount is set to the last populated page. + pMemDesc->PageCount = pageIndex + pageCount; + pMemDesc->ActualSize = pMemDesc->PageCount * pageSize; +} + +/*! + * Fill the PTE array of a memory descriptor with an array of addresses + * returned by pmaAllocatePages(). + * + * Memory must be physically discontiguous. For the contiguous case + * memdescDescribe() is more apt. + * + * The memory descriptor must be initialized with memdescCreate*(), + * typically memdescCreateExisting() prior to calling + * memdescFillPages(). + * + * Using this function modifies PageCount, the function operates + * on an assumption that users will fill the memory from start to end. + * using memdescFill to describe a memdesc out of order is illegal. + * + * @param[in] pMemDesc Memory descriptor to fill + * @param[in] pageIndex Index into memory descriptor to fill from. + * @param[in] pPages Array of physical addresses + * @param[in] pageCount Number of entries in pPages + * @param[in] pageSize Size of each page in pPages + * + * @returns None + */ +void +memdescFillPages +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 pageIndex, + NvU64 *pPages, + NvU32 pageCount, + NvU64 pageSize +) +{ + OBJGPU *pGpu = gpumgrGetSomeGpu(); + NvU32 i, j, k; + NvU32 numChunks4k = pageSize / RM_PAGE_SIZE; + NvU32 offset4k = numChunks4k * pageIndex; + NvU32 pageCount4k = numChunks4k * pageCount; + NvU32 result4k, limit4k; + NvU64 addr; + NvBool bClippedMemFill = NV_FALSE; + NvU64 totalFilledPageCount; + + NV_ASSERT_OR_RETURN_VOID(pMemDesc != NULL); + NV_ASSERT_OR_RETURN_VOID(pageSize > 0); + + + if (GPU_GET_MEMORY_MANAGER(pGpu)->bEnableDynamicGranularityPageArrays) + { + _memdescFillPagesAtNativeGranularity(pMemDesc, pageIndex, pPages, pageCount, pageSize); + return; + } + + NV_ASSERT_OR_RETURN_VOID(offset4k < pMemDesc->pageArraySize); + NV_ASSERT_OR_RETURN_VOID(portSafeAddU32(offset4k, pageCount4k, &result4k)); + + // + // There is a possibility that the pMemDesc was created using 4K aligned + // allocSize, but the actual memory allocator could align up the allocation + // size based on its supported pageSize, (e.g. PMA supports 64K pages). In + // that case, pageCount4k would be greater than pMemdesc->pageCount. We + // limit pageCount4k to stay within pMemdesc->pageCount in that case. + // + if (result4k > pMemDesc->pageArraySize) + { + bClippedMemFill = NV_TRUE; + pageCount4k = pMemDesc->pageArraySize - offset4k; + } + + NV_ASSERT_OR_RETURN_VOID(0 == (pageSize & (RM_PAGE_SIZE - 1))); + NV_ASSERT_OR_RETURN_VOID(!memdescHasSubDeviceMemDescs(pMemDesc)); + + // Fill _pteArray array using numChunks4k as a stride + for (i = 0, j = offset4k; i < pageCount; i++, j += numChunks4k) + { + pMemDesc->_pteArray[j] = addr = pPages[i]; + + // Fill _pteArray at 4K granularity + limit4k = NV_MIN(j + numChunks4k, pageCount4k); + + addr += RM_PAGE_SIZE; + for (k = j + 1; k < limit4k; k++, addr += RM_PAGE_SIZE) + pMemDesc->_pteArray[k] = addr; + } + + if (bClippedMemFill) + { + totalFilledPageCount = pMemDesc->pageArraySize; + } + else + { + totalFilledPageCount = offset4k + pageCount4k; + } + + pMemDesc->PageCount = totalFilledPageCount; + pMemDesc->ActualSize = pMemDesc->PageCount * RM_PAGE_SIZE; + pMemDesc->pageArrayGranularity = RM_PAGE_SIZE; +} + +/*! + * @brief Acquire exclusive use for memdesc for RM. + * + * @param[inout] pMemDesc Memory descriptor + * + * @returns Boolean indicating whether we successfully acquired the memdesc for exclusive use + */ +NvBool +memdescAcquireRmExclusiveUse +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_CHECK_OR_RETURN(LEVEL_ERROR, pMemDesc->_pParentDescriptor == NULL && + !pMemDesc->bRmExclusiveUse && + pMemDesc->DupCount == 1, + NV_FALSE); + + pMemDesc->bRmExclusiveUse = NV_TRUE; + return NV_TRUE; +} + +// +// SubMemory per subdevice chart: (MD - Memory Descriptor, SD - subdevice) +// +// If we try to create submemory of descriptor which has subdevices: +// +// [Top level MD] +// ^ | +// | +--------> [ Subdevice 0 MD ] --------> [Subdevice 1 MD] +// | ^ ^ +// | | | +// [SubMemory top level MD] | | +// | | | +// +--------> [Subdevice 0 SubMemory MD] --------> [Subdevice 1 SubMemory MD] +// +// Top Level MD : parent of SubMemoryTopLevelMD; has subdescriptors +// for two subdevices +// SubMemory top level MD : has pointer to parent memory descriptor; has two +// subdevice MDs +// Subdevice 0 MD : subdevice MD of topLevelMD and parent of SD0 +// submemory descriptor; has pointer to next in the +// list of subdevice MDs +// Subdevice 0 SubMemory MD : submemory of subdevice 0 MD; has pointer to +// parent, subdevice 0 MD and to next in list of +// submemory subdevice memory descriptors +// + + + +/*! + * @brief Create a new memory descriptor that is a subset of pMemDesc. If + * pMemDesc has subdevice memory descriptors subMemory will be created for all + * subdevices and new memory descriptor will be top level for them (ASCII art) + * + * @param[out] ppMemDescNew New memory descriptor + * @param[in] pMemDesc Original memory descriptor + * @param[in] pGpu The GPU that this memory will be mapped to + * @param[in] Offset Sub memory descriptor starts at pMemdesc+Offset + * @param[in] Size For Size bytes + * + * @returns None + */ +NV_STATUS +memdescCreateSubMem +( + MEMORY_DESCRIPTOR **ppMemDescNew, + MEMORY_DESCRIPTOR *pMemDesc, + OBJGPU *pGpu, + NvU64 Offset, + NvU64 Size +) +{ + NV_STATUS status; + MEMORY_DESCRIPTOR *pMemDescNew; + NvU32 subDevInst; + NvU64 tmpSize = Size; + MEMORY_DESCRIPTOR *pLast; + MEMORY_DESCRIPTOR *pNew; + OBJGPU *pGpuChild; + const NvU64 pageArrayGranularity = pMemDesc->pageArrayGranularity; + const NvU64 pageArrayGranularityMask = pMemDesc->pageArrayGranularity - 1; + const NvU32 pageArrayGranularityShift = BIT_IDX_64(pMemDesc->pageArrayGranularity); + + // Default to the original memdesc's GPU if none is specified + if (pGpu == NULL) + { + pGpu = pMemDesc->pGpu; + } + + // Allocation size should be adjusted for the memory descriptor _pageSize. + // Also note that the first 4k page may not be at _pageSize boundary so at + // the time of the mapping, we maybe overmapping at the beginning or end of + // the descriptor. To fix it in the right way, memory descriptor needs to + // be further cleaned. Do not round to page size if client specifies so. + if (!(pMemDesc->_flags & MEMDESC_FLAGS_PAGE_SIZE_ALIGN_IGNORE) && + pMemDesc->_pageSize != 0) + { + PMEMORY_DESCRIPTOR pTempMemDesc = pMemDesc; + NvU64 pageOffset; + + if (memdescHasSubDeviceMemDescs(pMemDesc)) + { + NV_ASSERT(pGpu); + pTempMemDesc = memdescGetMemDescFromGpu(pMemDesc, pGpu); + } + + pageOffset = memdescGetPhysAddr(pTempMemDesc, AT_CPU, Offset) & + (pTempMemDesc->_pageSize - 1); + + // Check for integer overflow + if (!portSafeAddU64(pageOffset, Size, &tmpSize)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + tmpSize = RM_ALIGN_UP(pageOffset + Size, pTempMemDesc->_pageSize); + + // Check for integer overflow + if (tmpSize < pageOffset + Size) + { + return NV_ERR_INVALID_ARGUMENT; + } + } + + // Allocate the new MEMORY_DESCRIPTOR + status = memdescCreate(&pMemDescNew, pGpu, tmpSize, 0, + !!(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS), + pMemDesc->_addressSpace, + pMemDesc->_cpuCacheAttrib, + ((pMemDesc->_flags & ~MEMDESC_FLAGS_PRE_ALLOCATED) | MEMDESC_FLAGS_SKIP_RESOURCE_COMPUTE)); + + if (status != NV_OK) + { + return status; + } + + // Fill in various fields as best we can; XXX this can get sort of sketchy + // in places, which should be all the more motivation to rip some of these + // fields out of the MEMORY_DESCRIPTOR. + if (pMemDesc->_flags & MEMDESC_FLAGS_KERNEL_MODE) + pMemDescNew->_flags |= MEMDESC_FLAGS_KERNEL_MODE; + else + pMemDescNew->_flags &= ~MEMDESC_FLAGS_KERNEL_MODE; + + pMemDescNew->Size = Size; + pMemDescNew->_pteKind = pMemDesc->_pteKind; + pMemDescNew->_hwResId = pMemDesc->_hwResId; + if (pMemDesc->_flags & MEMDESC_FLAGS_ENCRYPTED) + pMemDescNew->_flags |= MEMDESC_FLAGS_ENCRYPTED; + else + pMemDescNew->_flags &= ~MEMDESC_FLAGS_ENCRYPTED; + pMemDescNew->_pageSize = pMemDesc->_pageSize; + pMemDescNew->pageArrayGranularity = pageArrayGranularity; + pMemDescNew->_gpuCacheAttrib = pMemDesc->_gpuCacheAttrib; + pMemDescNew->_gpuP2PCacheAttrib = pMemDesc->_gpuP2PCacheAttrib; + pMemDescNew->cpuCacheSnoop = pMemDesc->cpuCacheSnoop; + pMemDescNew->gpuCacheSnoop = pMemDesc->gpuCacheSnoop; + pMemDescNew->gfid = pMemDesc->gfid; + pMemDescNew->bUsingSuballocator = pMemDesc->bUsingSuballocator; + pMemDescNew->_pParentDescriptor = pMemDesc; + pMemDesc->childDescriptorCnt++; + pMemDescNew->bRmExclusiveUse = pMemDesc->bRmExclusiveUse; + pMemDescNew->numaNode = pMemDesc->numaNode; + + pMemDescNew->subMemOffset = Offset; + + // increase refCount of parent descriptor + memdescAddRef(pMemDesc); + + // Fill in the PteArray and PteAdjust + if ((pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS) || + (pMemDesc->PageCount == 1)) + { + // Compute the base address, then fill it in + RmPhysAddr Base = pMemDesc->_pteArray[0] + pMemDesc->PteAdjust + Offset; + pMemDescNew->_pteArray[0] = Base & ~pageArrayGranularityMask; + pMemDescNew->PteAdjust = NvU64_LO32(Base) & pageArrayGranularityMask; + } + else + { + // More complicated... + RmPhysAddr Adjust; + NvU32 PageIndex; + const NvU32 pageMask = GET_PAGE_MASK(pMemDescNew->pageArrayGranularity); + // + // The last bitwise AND with the pageMask makes sure the value is cleaned + // and we don't end up with numBytesToPageAlignedSize == pageArrayGranularity. + // + NvU32 numBytesToPageAlignedSize = (pMemDescNew->pageArrayGranularity - + (Size & pageMask)) & pageMask; + NvBool bNeedExtraPage; + NvU64 alignedSize; + + // We start this many bytes into the memory alloc + Adjust = pMemDesc->PteAdjust + Offset; + + // Break it down into pages (PageIndex) and bytes (PteAdjust) + PageIndex = (NvU32)(Adjust >> pageArrayGranularityShift); + pMemDescNew->PteAdjust = NvU64_LO32(Adjust) & pageArrayGranularityMask; + + // + // Calculate how many bytes are left to being page aligned. + // If the offset within the tracking page is: + // 1) Smaller than the number of bytes left - + // PageCount equals the aligned Size. + // 2) Larger than the number of bytes left - + // The submem description will always span an extra page. + // + bNeedExtraPage = (pMemDescNew->PteAdjust > numBytesToPageAlignedSize); + + alignedSize = NV_ALIGN_UP64(Size, pMemDescNew->pageArrayGranularity); + pMemDescNew->PageCount = (alignedSize >> GET_PAGE_SHIFT(pMemDesc->pageArrayGranularity)) + (bNeedExtraPage ? 1 :0); + pMemDescNew->ActualSize = pMemDescNew->PageCount * pMemDescNew->pageArrayGranularity; + + // Fill in the PTEs; remember to copy the extra PTE, in case we need it + if (pMemDesc->PageCount) + { + memdescFillPages(pMemDescNew, 0, &pMemDesc->_pteArray[PageIndex], pMemDescNew->PageCount, pMemDescNew->pageArrayGranularity); + } + } + + if (memdescIsEgm(pMemDesc)) + { + NV_ASSERT_OK_OR_GOTO(status, + _memdescAllocEgmArray(pMemDescNew), + fail); + } + + if ((pMemDesc->_addressSpace == ADDR_SYSMEM) && + !memdescIsEgm(pMemDesc) && + !memdescGetFlag(memdescGetMemDescFromGpu(pMemDesc, pGpu), MEMDESC_FLAGS_CPU_ONLY) && + !memdescGetFlag(memdescGetMemDescFromGpu(pMemDesc, pGpu), MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1) && + !memdescGetFlag(memdescGetMemDescFromGpu(pMemDesc, pGpu), MEMDESC_FLAGS_SKIP_IOMMU_MAPPING)) + { + // + // For different IOVA spaces, the IOMMU mapping will often not be a + // subrange of the original mapping. + // + // Request the submapping to be associated with the submemdesc. + // + // TODO: merge the new IOMMU paths with the SMMU path above (see bug + // 1625121). + // + status = memdescMapIommu(pMemDescNew, pGpu->busInfo.iovaspaceId); + if (status != NV_OK) + { + memdescDestroy(pMemDescNew); + return status; + } + } + + // Support for SLI submemory per-subdevice allocations (refer to chart) + if (memdescHasSubDeviceMemDescs(pMemDesc)) + { + NvBool bBcState = gpumgrGetBcEnabledStatus(pGpu); + + if (gpumgrGetBcEnabledStatus(pGpu) && (pMemDesc->_addressSpace == ADDR_FBMEM)) + { + NV_ASSERT(!!(pMemDesc->_flags & MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE)); + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + } + pLast = pMemDescNew; + + pMemDescNew->_subDeviceAllocCount = pMemDesc->_subDeviceAllocCount; + + for (subDevInst = 0; subDevInst < pMemDesc->_subDeviceAllocCount; subDevInst++) + { + pGpuChild = gpumgrGetGpuFromSubDeviceInst(gpuGetDeviceInstance(pGpu), subDevInst); + status = memdescCreateSubMem(&pNew, memdescGetMemDescFromGpu(pMemDesc, pGpuChild), pGpuChild, Offset, Size); + + if (status != NV_OK) + { + while (NULL != pMemDescNew) + { + pNew = pMemDescNew; + pMemDescNew = pMemDescNew->_pNext; + memdescDestroy(pNew); + } + return status; + } + + pLast->_pNext = pNew; + pLast = pNew; + } + + gpumgrSetBcEnabledStatus(pGpu, bBcState); + } + + *ppMemDescNew = pMemDescNew; + + return NV_OK; + +fail: + memdescDestroy(pMemDescNew); + return status; +} + +/*! + * Given a memdesc, this checks if the allocated memory falls under subheap or in GPA address space + */ +static NvBool +_memIsSriovMappingsEnabled +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + return gpuIsSriovEnabled(pMemDesc->pGpu) && + (((pMemDesc->_flags & MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE) && pMemDesc->bUsingSuballocator) || + (pMemDesc->_flags & MEMDESC_FLAGS_GUEST_ALLOCATED)); +} + +/*! + * Fills pGpaEntries with numEntries GPAs from pMemDesc->_pteArray starting at + * the given starting index. For physically contiguous memdescs, fills with + * RM_PAGE_SIZE strides. + */ +static void +_memdescFillGpaEntriesForSpaTranslation +( + PMEMORY_DESCRIPTOR pMemDesc, + RmPhysAddr *pGpaEntries, + NvU32 start, + NvU32 numEntries +) +{ + if (pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS) + { + NvU32 i; + + for (i = 0; i < numEntries; i++) + { + pGpaEntries[i] = pMemDesc->_pteArray[0] + (((RmPhysAddr) (start + i)) * pMemDesc->pageArrayGranularity); + } + } + else + { + portMemCopy(&pGpaEntries[0], numEntries * sizeof(pGpaEntries[0]), + &pMemDesc->_pteArray[start], numEntries * sizeof(pGpaEntries[0])); + } +} + +/*! + * This function translates GPA -> SPA for a given memdesc and updates pPteSpaMappings with list of SPA addresses. + * If memdesc is contiguous and if the translated SPA count > 1, this function fails for now. + */ +NV_STATUS +_memdescUpdateSpaArray +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + NV_STATUS status = NV_OK; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pMemDesc->pGpu); + NvU32 allocCnt; + NvU32 i; + NV2080_CTRL_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES_PARAMS *pParams = NULL; + + if ((pMemDesc->pPteSpaMappings) || (!pMemDesc->PageCount)) + { + status = NV_OK; + goto _memUpdateSpArray_exit; + } + + allocCnt = memdescGetPteArraySize(pMemDesc, AT_PA); + + // Allocate the array to hold pages up to PageCount + pMemDesc->pPteSpaMappings = portMemAllocNonPaged(sizeof(RmPhysAddr) * allocCnt); + if (pMemDesc->pPteSpaMappings == NULL) + { + status = NV_ERR_NO_MEMORY; + goto _memUpdateSpArray_exit; + } + + pParams = portMemAllocStackOrHeap(sizeof(*pParams)); + if (pParams == NULL) + { + status = NV_ERR_NO_MEMORY; + goto _memUpdateSpArray_exit; + } + portMemSet(pParams, 0, sizeof(*pParams)); + + pParams->gfid = pMemDesc->gfid; + + for (i = 0; i < allocCnt; i += NV2080_CTRL_INTERNAL_VMMU_MAX_SPA_FOR_GPA_ENTRIES) + { + NvU32 numEntries = NV_MIN(allocCnt - i, NV2080_CTRL_INTERNAL_VMMU_MAX_SPA_FOR_GPA_ENTRIES); + pParams->numEntries = numEntries; + + _memdescFillGpaEntriesForSpaTranslation(pMemDesc, &pParams->gpaEntries[0], + i, numEntries); + + status = pRmApi->Control(pRmApi, + pMemDesc->pGpu->hInternalClient, + pMemDesc->pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES, + pParams, + sizeof(*pParams)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Getting SPA for GPA failed: GFID=%u, GPA=0x%llx\n", + pMemDesc->gfid, pMemDesc->_pteArray[i]); + goto _memUpdateSpArray_exit; + } + + portMemCopy(&pMemDesc->pPteSpaMappings[i], numEntries * sizeof(pParams->spaEntries[0]), + &pParams->spaEntries[0], numEntries * sizeof(pParams->spaEntries[0])); + } + +_memUpdateSpArray_exit: + if (status != NV_OK) + { + portMemFree(pMemDesc->pPteSpaMappings); + pMemDesc->pPteSpaMappings = NULL; + } + portMemFreeStackOrHeap(pParams); + + return status; +} + +/*! + * @brief Return the physical addresses of pMemdesc + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] pGpu GPU to return the addresses for + * @param[in] addressTranslation Address translation identifier + * @param[in] offset Offset into memory descriptor + * @param[in] stride How much to advance the offset for each + * consecutive address + * @param[in] count How many addresses to retrieve + * @param[out] pAddresses Returned array of addresses + * + */ +void memdescGetPhysAddrsForGpu(MEMORY_DESCRIPTOR *pMemDesc, + OBJGPU *pGpu, + ADDRESS_TRANSLATION addressTranslation, + NvU64 offset, + NvU64 stride, + NvU64 count, + RmPhysAddr *pAddresses) +{ + // + // Get the PTE array that we should use for phys addr lookups based on the + // MMU context. (see bug 1625121) + // + NvU64 i; + NvU64 pageIndex; + RmPhysAddr *pteArray = memdescGetPteArrayForGpu(pMemDesc, pGpu, addressTranslation); + const NvBool contiguous = (memdescGetPteArraySize(pMemDesc, addressTranslation) == 1); + const NvU64 pageArrayGranularityMask = pMemDesc->pageArrayGranularity - 1; + const NvU32 pageArrayGranularityShift = BIT_IDX_64(pMemDesc->pageArrayGranularity); + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + offset += pMemDesc->PteAdjust; + + for (i = 0; i < count; ++i) + { + if (contiguous) + { + pAddresses[i] = pteArray[0] + offset; + } + else + { + pageIndex = offset >> pageArrayGranularityShift; + NV_CHECK_OR_RETURN_VOID(LEVEL_ERROR, pageIndex < pMemDesc->PageCount); + pAddresses[i] = pteArray[pageIndex] + (offset & pageArrayGranularityMask); + } + + offset += stride; + } +} + +/*! + * @brief Return the physical addresses of pMemdesc for use in a PTE or to give to HW + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] pGpu GPU to return the addresses for + * @param[in] addressTranslation Address translation identifier + * @param[in] offset Offset into memory descriptor + * @param[in] stride How much to advance the offset for each + * consecutive address + * @param[in] count How many addresses to retrieve + * @param[out] pAddresses Returned array of addresses + * + */ +void memdescGetPtePhysAddrsForGpu(MEMORY_DESCRIPTOR *pMemDesc, + OBJGPU *pGpu, + ADDRESS_TRANSLATION addressTranslation, + NvU64 offset, + NvU64 stride, + NvU64 count, + RmPhysAddr *pAddresses) +{ + // + // Get the PTE array that we should use for phys addr lookups based on the + // MMU context. (see bug 1625121) + // + NvU64 i; + NvU64 pageIndex; + RmPhysAddr *pteArray = memdescGetPteArrayForGpu(pMemDesc, pGpu, addressTranslation); + const NvBool contiguous = (memdescGetPteArraySize(pMemDesc, addressTranslation) == 1); + const NvU64 pageArrayGranularityMask = pMemDesc->pageArrayGranularity - 1; + const NvU32 pageArrayGranularityShift = BIT_IDX_64(pMemDesc->pageArrayGranularity); + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + offset += pMemDesc->PteAdjust; + + for (i = 0; i < count; ++i) + { + if (contiguous) + { + pAddresses[i] = pteArray[0] + offset; + } + else + { + pageIndex = offset >> pageArrayGranularityShift; + NV_CHECK_OR_RETURN_VOID(LEVEL_ERROR, pageIndex < pMemDesc->PageCount); + pAddresses[i] = pteArray[pageIndex] + (offset & pageArrayGranularityMask); + } + + if (pMemDesc->_flags & MEMDESC_FLAGS_ALLOC_AS_LOCALIZED) + { + // Set the bit in the physical address itself + pAddresses[i] |= pMemDesc->localizedMask; + } + + offset += stride; + } +} + +/*! + * @brief Return the physical addresses of pMemdesc + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] addressTranslation Address translation identifier + * @param[in] offset Offset into memory descriptor + * @param[in] stride How much to advance the offset for each + * consecutive address + * @param[in] count How many addresses to retrieve + * @param[out] pAddresses Returned array of addresses + * + */ +void memdescGetPhysAddrs(MEMORY_DESCRIPTOR *pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + NvU64 offset, + NvU64 stride, + NvU64 count, + RmPhysAddr *pAddresses) +{ + memdescGetPhysAddrsForGpu(pMemDesc, pMemDesc->pGpu, addressTranslation, offset, stride, count, pAddresses); +} + +/*! + * @brief Return the physical addresses of pMemdesc + * for use in a PTE or to give to HW + * @param[in] pMemDesc Memory descriptor used + * @param[in] addressTranslation Address translation identifier + * @param[in] offset Offset into memory descriptor + * @param[in] stride How much to advance the offset for each + * consecutive address + * @param[in] count How many addresses to retrieve + * @param[out] pAddresses Returned array of addresses + * + */ +void memdescGetPtePhysAddrs(MEMORY_DESCRIPTOR *pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + NvU64 offset, + NvU64 stride, + NvU64 count, + RmPhysAddr *pAddresses) +{ + memdescGetPtePhysAddrsForGpu(pMemDesc, pMemDesc->pGpu, addressTranslation, offset, stride, count, pAddresses); + +} + +/*! + * @brief Return the physical address of pMemdesc+Offset + * + * "long description" + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] addressTranslation Address translation identifier + * @param[in] offset Offset into memory descriptor + * + * @returns A physical address + */ +RmPhysAddr +memdescGetPhysAddr +( + MEMORY_DESCRIPTOR *pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + NvU64 offset +) +{ + RmPhysAddr addr; + memdescGetPhysAddrs(pMemDesc, addressTranslation, offset, 0, 1, &addr); + return addr; +} + +/*! + * @brief Return the physical address of pMemdesc+Offset for + * for a PTE or to give to HW + * + * "long description" + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] addressTranslation Address translation identifier + * @param[in] offset Offset into memory descriptor + * + * @returns A physical address + */ +RmPhysAddr +memdescGetPtePhysAddr +( + MEMORY_DESCRIPTOR *pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + NvU64 offset +) +{ + RmPhysAddr addr; + memdescGetPtePhysAddrsForGpu(pMemDesc, pMemDesc->pGpu, addressTranslation, offset, 0, 1, &addr); + return addr; +} + +/*! + * @brief Return physical address for page specified by PteIndex + * + * @param[in] pMemDesc Memory descriptor to use + * @param[in] addressTranslation Address translation identifier + * @param[in] PteIndex Look up this PteIndex + * + * @returns A physical address + */ +RmPhysAddr +memdescGetPte +( + PMEMORY_DESCRIPTOR pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + NvU32 PteIndex +) +{ + // + // Get the PTE array that we should use for phys addr lookups based on the + // MMU context. (see bug 1625121) + // + RmPhysAddr *pteArray = memdescGetPteArray(pMemDesc, addressTranslation); + RmPhysAddr PhysAddr; + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + + if (pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS) + { + PhysAddr = pteArray[0] + (PteIndex << RM_PAGE_SHIFT); + } + else + { + // + // This check should verify against PageCount, since it points + // to the number of valid entries. Since Pagecount is not accurate in some cases, + // verify that we are not performing accesses past the allocated page array size. + // + NV_ASSERT_OR_RETURN(PteIndex < pMemDesc->pageArraySize, MEMDESC_INVALID_PTE); + PhysAddr = pteArray[PteIndex]; + } + + return PhysAddr; +} + +/*! + * @brief Return physical address for page specified by PteIndex + * + * @param[in] pMemDesc Memory descriptor to use + * @param[in] addressTranslation Address translation identifier + * @param[in] PteIndex Look up this PteIndex + * @param[in] PhysAddr PTE address + * + * @returns None + */ +void +memdescSetPte +( + PMEMORY_DESCRIPTOR pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + NvU32 PteIndex, + RmPhysAddr PhysAddr +) +{ + // + // Get the PTE array that we should use for phys addr lookups based on the + // MMU context. (see bug 1625121) + // + RmPhysAddr *pteArray = memdescGetPteArray(pMemDesc, addressTranslation); + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + + if (pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS) + { + NV_ASSERT_OR_RETURN_VOID(PteIndex == 0); + } + else + { + NV_ASSERT_OR_RETURN_VOID(PteIndex < pMemDesc->pageArraySize); + } + + pteArray[PteIndex] = PhysAddr; + + // Free pteArraySpa + portMemFree(pMemDesc->pPteSpaMappings); + pMemDesc->pPteSpaMappings = NULL; +} + +/*! + * @brief Return page array size based on the MMU context + * For SRIOV, the host context (AT_PA) will + * have discontiguous view of the GPA in SPA space + * This is treated similar to discontiguous memdescs + * + * @param[in] pMemDesc Memory descriptor to use + * @param[in] addressTranslation Address translation identifier + * + * @returns PageArray + */ +NvU32 memdescGetPteArraySize(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation) +{ + // + // Contiguous allocations in SPA domain can be non-contiguous at vmmusegment granularity. + // Hence treat SPA domain allocations as non-contiguous by default. + // + // Bug 4801329: Store SPA array size separately in the memdesc and return it when needed. Otherwise return pageArraySize. + // + if (!(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS) || + ((addressTranslation == AT_PA) && (pMemDesc->_addressSpace == ADDR_FBMEM) && _memIsSriovMappingsEnabled(pMemDesc))) + { + return NvU64_LO32(pMemDesc->PageCount); + } + return 1; +} + +/*! + * @brief Return page array + * + * @param[in] pMemDesc Memory descriptor to use + * @param[in] pGpu GPU to get the PTE array for. + * @param[in] addressTranslation Address translation identifier + * + * @returns PageArray + */ +RmPhysAddr * +memdescGetPteArrayForGpu +( + PMEMORY_DESCRIPTOR pMemDesc, + OBJGPU *pGpu, + ADDRESS_TRANSLATION addressTranslation +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + + switch (AT_VALUE(addressTranslation)) + { + // + // In SRIOV systems, an access from guest has to go through the following translations + // GVA -> GPA -> SPA + // + // Given HOST manages channel/memory management for guest, there are certain code paths that + // expects VA -> GPA translations and some may need GPA -> SPA translations. We use addressTranslation + // to differentiate between these cases. + // Since GPA -> SPA is very similar to IOMMU xlation and since existing AT_PA is used only in + // SYSMEM allocations, we decided to reuse AT_PA addressTranslation to fetch GPA -> SPA xlations. + // In case of non-SRIOV systems, using AT_PA will fall back to AT_GPU or default context. + // + // pMemDesc -> _pteArray tracks GVA -> GPA translations + // pMemDesc -> pPteSpaMappings tracks GPA -> SPA translations + // + + case AT_VALUE(AT_PA): + { + if (pGpu != NULL) + { + if (pMemDesc->_addressSpace == ADDR_FBMEM) + { + if (_memIsSriovMappingsEnabled(pMemDesc)) + { + if (!pMemDesc->pPteSpaMappings) + _memdescUpdateSpaArray(pMemDesc); + + return pMemDesc->pPteSpaMappings; + } + } + } + } + case AT_VALUE(AT_GPU): + { + // Imported ADDR_FABRIC_V2 memdescs are device-less. + if (pGpu != NULL) + { + if (memdescIsEgm(pMemDesc) && (pMemDesc->pPteEgmMappings != NULL)) + { + return pMemDesc->pPteEgmMappings; + } + + PIOVAMAPPING pIovaMap = memdescGetIommuMap(pMemDesc, + pGpu->busInfo.iovaspaceId); + if (pIovaMap != NULL) + { + return pIovaMap->iovaArray; + } + } + + // + // If no IOMMU mapping exists in the default IOVASPACE, fall + // through and use the physical memory descriptor instead. + // + } + default: + { + return pMemDesc->_pteArray; + } + } +} + + + +/*! + * @brief Convert aperture into a descriptive string. + * + * @param[in] addressSpace + * + * @returns String + * + * @todo "text" + */ +const char * +memdescGetApertureString +( + NV_ADDRESS_SPACE addressSpace +) +{ + static NV_PRINTF_STRING_SECTION const char ADDR_FBMEM_STR[] = "VIDEO MEMORY"; + static NV_PRINTF_STRING_SECTION const char ADDR_SYSMEM_STR[] = "SYSTEM MEMORY"; + + if (addressSpace == ADDR_FBMEM) + { + return ADDR_FBMEM_STR; + } + + if (addressSpace == ADDR_SYSMEM) + { + return ADDR_SYSMEM_STR; + } + + return NULL; +} + +/*! + * @brief Compare two memory descriptors to see if the memory described the same + * + * @param[in] pMemDescOne + * @param[in] pMemDescTwo + * + * @returns NV_TRUE if the memory descriptors refer to the same memory + */ +NvBool +memdescDescIsEqual +( + MEMORY_DESCRIPTOR *pMemDescOne, + MEMORY_DESCRIPTOR *pMemDescTwo +) +{ + if ((pMemDescOne == NULL) || (pMemDescTwo == NULL)) + return NV_FALSE; + + if (pMemDescOne->_addressSpace != pMemDescTwo->_addressSpace) + return NV_FALSE; + + // All the physical memory views should match. + if ((memdescGetPhysAddr(pMemDescOne, AT_CPU, 0) != memdescGetPhysAddr(pMemDescTwo, AT_CPU, 0)) || + (memdescGetPhysAddr(pMemDescOne, AT_GPU, 0) != memdescGetPhysAddr(pMemDescTwo, AT_GPU, 0))) + return NV_FALSE; + + if (memdescGetCpuCacheAttrib(pMemDescOne) != memdescGetCpuCacheAttrib(pMemDescTwo)) + return NV_FALSE; + + if (pMemDescOne->Size != pMemDescTwo->Size) + return NV_FALSE; + + if (pMemDescOne->Alignment != pMemDescTwo->Alignment) + return NV_FALSE; + + if (pMemDescOne->_pageSize != pMemDescTwo->_pageSize) + return NV_FALSE; + + return NV_TRUE; +} + +/*! + * @brief Add callback block to the destroy callback queue + * + * @param[in] pMemDesc Memory descriptor to update + * @param[in] pCb Callee allocated block with callback func/arg + * + * @returns nothing + */ +void +memdescAddDestroyCallback +( + MEMORY_DESCRIPTOR *pMemDesc, + MEM_DESC_DESTROY_CALLBACK *pCb +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pCb->pNext = memdescGetDestroyCallbackList(pMemDesc); + memdescSetDestroyCallbackList(pMemDesc, pCb); +} + +/*! + * @brief Remove callback block from the destroy callback queue + * + * @param[in] pMemDesc Memory descriptor to update + * @param[in] pRemoveCb Callee allocated block with callback func/arg + * + * @returns nothing + */ +void +memdescRemoveDestroyCallback +( + MEMORY_DESCRIPTOR *pMemDesc, + MEM_DESC_DESTROY_CALLBACK *pRemoveCb +) +{ + MEM_DESC_DESTROY_CALLBACK *pCb = memdescGetDestroyCallbackList(pMemDesc); + MEM_DESC_DESTROY_CALLBACK *pPrev = NULL; + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + while (pCb) + { + if (pCb == pRemoveCb) + { + if (pPrev == NULL) + { + memdescSetDestroyCallbackList(pMemDesc, pCb->pNext); + } + else + { + pPrev->pNext = pCb->pNext; + } + break; + } + pPrev = pCb; + pCb = pCb->pNext; + } +} + +/*! + * @brief Retrieves a subdevice's memory descriptor by subdevice instance + * + * Subdevice memory descriptors are memory descriptors that describe + * per-subdevice memory buffers. This functionality is required by our current + * SLI programming model as our memdescAlloc() calls are primarily broadcast + * operations. A singular memdesc works for video memory as the + * heaps are symmetric. However, we run into trouble when dealing with system + * memory as both GPUs then share the same address space and symmetric + * addressing is no longer possible. + * + * N.B. The rational for exposing this routine is that it keeps SLI-isms out of + * most of the RM -- the alternative approach would've been to pass in the + * subdevice or a pGpu for all memdesc methods which would require more code + * changes solely for SLI. Long term hopefully we can transition to a unicast + * allocation model (SLI loops above memdescAlloc()/memdescCreate()) and the + * subdevice support in memdesc can (easily) be deleted. This approach also + * provides a safety net against misuse, e.g., if we added pGpu to + * memdescGetPhysAddr, current code which utilizes that routine outside an SLI loop + * would execute cleanly even though it's incorrect. + * + * @param[in] pMemDesc Memory descriptor to query + * @param[in] subDeviceInst SLI subdevice instance (subdevice - 1) + * + * @returns Memory descriptor if one exist for the subdevice. + * NULL if none is found. + */ +MEMORY_DESCRIPTOR * +memdescGetMemDescFromSubDeviceInst(MEMORY_DESCRIPTOR *pMemDesc, NvU32 subDeviceInst) +{ + if (!memdescHasSubDeviceMemDescs(pMemDesc)) + { + return pMemDesc; + } + else + { + return memdescGetMemDescFromIndex(pMemDesc, subDeviceInst); + } +} + +/*! + * @brief Retrieves a subdevice's memory descriptor by GPU object + * + * See memdescGetMemDescFromSubDeviceInst for an explanation of subdevice memory + * descriptors + * + * @param[in] pMemDesc Memory descriptor to query + * @param[in] pGpu + * + * @returns Memory descriptor if one exist for the GPU. + * NULL if none is found. + */ +MEMORY_DESCRIPTOR * +memdescGetMemDescFromGpu(MEMORY_DESCRIPTOR *pMemDesc, OBJGPU *pGpu) +{ + NvU32 subDeviceInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + return memdescGetMemDescFromSubDeviceInst(pMemDesc, subDeviceInst); +} + +/*! + * @brief Retrieves a subdevice's memory descriptor by memdesc index. + * + * See memdescGetMemDescFromSubDeviceInst for an explanation of subdevice memory + * descriptors + * + * @param[in] pMemDesc Memory descriptor to query + * @param[in] index Index into array of memdesc + * + * @returns Memory descriptor if one exist for the GPU. + * NULL if none is found. + */ +MEMORY_DESCRIPTOR * +memdescGetMemDescFromIndex(MEMORY_DESCRIPTOR *pMemDesc, NvU32 index) +{ + if (!memdescHasSubDeviceMemDescs(pMemDesc)) + { + return pMemDesc; + } + else + { + MEMORY_DESCRIPTOR *pSubDevMemDesc = pMemDesc->_pNext; + + NV_ASSERT(pSubDevMemDesc); + + while (index--) + { + pSubDevMemDesc = pSubDevMemDesc->_pNext; + + if (!pSubDevMemDesc) + { + NV_ASSERT(0); + return NULL; + } + } + + return pSubDevMemDesc; + } +} + +/*! + * @brief Set address for a fixed heap allocation. + * + * Offset must refer to the heap. A later memdescAlloc() will + * force this offset. + * + * @param[in] pMemDesc Memory descriptor to update + * @param[in] fbOffset Offset to refer to + * + * @returns nothing + */ +void +memdescSetHeapOffset +( + MEMORY_DESCRIPTOR *pMemDesc, + RmPhysAddr fbOffset +) +{ + NV_ASSERT(pMemDesc->_addressSpace == ADDR_FBMEM); + NV_ASSERT(pMemDesc->Allocated == NV_FALSE); + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_flags |= MEMDESC_FLAGS_FIXED_ADDRESS_ALLOCATE; + pMemDesc->_pteArray[0] = fbOffset; + NV_ASSERT_OR_RETURN_VOID( + memdescSetAllocSizeFields(pMemDesc, + NV_ALIGN_UP64(pMemDesc->ActualSize, pMemDesc->pageArrayGranularity), + pMemDesc->pageArrayGranularity) == NV_OK); +} + +/*! + * @brief Set GPU cacheability + * + * A later memdescAlloc() will use this setting. + * + * @param[in] pMemDesc Memory descriptor to update + * @param[in] cacheAttrib Set memory to GPU cacheable + * + * @returns nothing + */ +void memdescSetGpuCacheAttrib +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 cacheAttrib +) +{ + NV_ASSERT(pMemDesc->Allocated == NV_FALSE); + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_gpuCacheAttrib = cacheAttrib; +} + +/*! + * @brief Get GPU P2P cache attributes + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Current GPU P2P cache attributes + */ +NvU32 memdescGetGpuP2PCacheAttrib +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_gpuP2PCacheAttrib; +} + +/*! + * @brief Set GPU P2P cacheability + * + * A later memdescAlloc() will use this setting. + * + * @param[in] pMemDesc Memory descriptor to update + * @param[in] cacheAttrib Set memory to GPU P2P cacheable + * + * @returns nothing + */ +void memdescSetGpuP2PCacheAttrib +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 cacheAttrib +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_gpuP2PCacheAttrib = cacheAttrib; +} + +/*! + * @brief Set CPU cacheability + * + * A later memdescAlloc() will use this setting. + * + * @param[in] pMemDesc Memory descriptor to update + * @param[in] cacheAttrib Set memory to CPU cacheable + * + * @returns nothing + */ +void memdescSetCpuCacheAttrib +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 cpuCacheAttrib +) +{ + // + // When running 64-bit MODS on ARM v8, we need to force all CPU mappings as WC. + // This seems to be an issue with glibc. See bug 1556221. + // + // Ideally, this should have been set based on a Core Logic (CL) property. + // But chipset initialization will only happen during bifStateInit(). + // RM can makes sysmem CPU mappings before bifStateInit(). + // + if (RMCFG_FEATURE_PLATFORM_MODS && NVCPU_IS_AARCH64) + { + if (cpuCacheAttrib == NV_MEMORY_UNCACHED) + { + cpuCacheAttrib = NV_MEMORY_WRITECOMBINED; + } + } + + pMemDesc->_cpuCacheAttrib = cpuCacheAttrib; +} + +/*! + * @brief Print contents of a MEMORY_DESCRIPTOR in a human readable format. + * + * @param[in] pMemDesc Memory Descriptor to print + * @param[in] bPrintIndividualPages Individual pages will also be printed + * iff they are discontiguous + * @param[in] pPrefixMessage Message that will be printed before the contents + * of the Memory Descriptor are printed. + * + * @returns nothing + */ +void memdescPrintMemdesc +( + MEMORY_DESCRIPTOR *pMemDesc, + NvBool bPrintIndividualPages, + const char *pPrefixMessage +) +{ +#if 0 + NvU32 i; + + if ((DBG_RMMSG_CHECK(LEVEL_NOTICE) == 0) || (pPrefixMessage == NULL) || (pMemDesc == NULL)) + { + return; + } + + NV_PRINTF(LEVEL_NOTICE, + "%s Aperture %s starting at 0x%llx and of size 0x%llx\n", + pPrefixMessage, + memdescGetApertureString(pMemDesc->_addressSpace), + memdescGetPhysAddr(pMemDesc, AT_CPU, 0), + pMemDesc->Size); + + if ((bPrintIndividualPages == NV_TRUE) && + (pMemDesc->PageCount > 1) && + (!(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS))) + { + for (i = 0; i < pMemDesc->PageCount; i++) + { + NV_PRINTF(LEVEL_NOTICE, + " contains page starting @0x%llx\n", + pMemDesc->_pteArray[i]); + } + } + + // TODO: merge with SMMU path above (see bug 1625121). + if (pMemDesc->_pIommuMappings != NULL) + { + if (!memdescIsSubMemoryMemDesc(pMemDesc)) + { + PIOVAMAPPING pIovaMap = pMemDesc->_pIommuMappings; + while (pIovaMap != NULL) + { + NV_PRINTF(LEVEL_NOTICE, + "Has additional IOMMU mapping for IOVA space 0x%x starting @ 0x%llx\n", + pIovaMap->iovaspaceId, + pIovaMap->iovaArray[0]); + pIovaMap = pIovaMap->pNext; + } + } + else + { + NV_PRINTF(LEVEL_NOTICE, + "Has additional IOMMU mapping starting @ 0x%llx\n", + memdescGetPhysAddr(pMemDesc, AT_PA, 0)); + } + } +#endif // NV_PRINTF_ENABLED +} + +/*! + * @brief Return page offset from a MEMORY_DESCRIPTOR for an arbitrary power of two page size + * + * PageAdjust covers the 4KB alignment, but must include bits from the address for big pages. + * + * @param[in] pMemDesc Memory Descriptor to print + * @param[in] pageSize Page size (4096, 64K, 128K, etc) + * + * @returns nothing + */ +NvU64 memdescGetPageOffset +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 pageSize +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return (pMemDesc->PteAdjust + pMemDesc->_pteArray[0]) & (pageSize-1); +} + +/*! + * @brief Get PTE kind using GPU + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pGpu GPU to be used get supported kind + * @param[in] addressTranslation Address translation identifier + * + * @returns Current PTE kind value. + */ +NvU32 memdescGetPteKindForGpu +( + PMEMORY_DESCRIPTOR pMemDesc, + OBJGPU *pGpu +) +{ + return memmgrGetHwPteKindFromSwPteKind_HAL(pGpu, GPU_GET_MEMORY_MANAGER(pGpu), pMemDesc->_pteKind); +} + +/*! + * @brief Set PTE kind using GPU. + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pGpu GPU to be used set supported kind + * @param[in] addressTranslation Address translation identifier + * @param[in] pteKind New PTE kind + * + * @returns nothing + */ +void memdescSetPteKindForGpu +( + PMEMORY_DESCRIPTOR pMemDesc, + OBJGPU *pGpu, + NvU32 pteKind +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_pteKind = memmgrGetSwPteKindFromHwPteKind_HAL(pGpu, GPU_GET_MEMORY_MANAGER(pGpu), pteKind); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_SET_KIND, NV_TRUE); +} + +/*! + * @brief Set PTE kind compressed value. + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pteKind New PTE kind compressed value + * + * @returns nothing + */ +void memdescSetPteKindCompressed +( + PMEMORY_DESCRIPTOR pMemDesc, + NvU32 pteKindCmpr +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_pteKindCompressed = pteKindCmpr; +} + +/*! + * @brief Get PTE kind compressed value. + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] addressTranslation Address translation identifier + * + * @returns Current PTE kind compressed value. + */ +NvU32 memdescGetPteKindCompressed +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_pteKindCompressed; +} + +/*! + * @brief Get kernel mapping + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Current kernel mapping + */ +NvP64 memdescGetKernelMapping +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_kernelMapping; +} + +/*! + * @brief Set kernel mapping + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] kernelMapping New kernel mapping + * + * @returns nothing + */ +void memdescSetKernelMapping +( + MEMORY_DESCRIPTOR *pMemDesc, + NvP64 kernelMapping +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_kernelMapping = kernelMapping; +} + +/*! + * @brief Get privileged kernel mapping + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Current privileged kernel mapping + */ +NvP64 memdescGetKernelMappingPriv +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_kernelMappingPriv; +} + +/*! + * @brief Set HW resource identifier (HwResId) + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] kernelMappingPriv New privileged kernel mapping + * + * @returns nothing + */ +void memdescSetKernelMappingPriv +( + MEMORY_DESCRIPTOR *pMemDesc, + NvP64 kernelMappingPriv +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_kernelMappingPriv = kernelMappingPriv; +} + + +/*! + * @brief Set standby buffer memory descriptor + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Pointer to standby buffer memory descriptor + */ +MEMORY_DESCRIPTOR *memdescGetStandbyBuffer +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_pStandbyBuffer; +} + +/*! + * @brief Set standby buffer memory descriptor + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pStandbyBuffer Standby buffer memory descriptor pointer + * + * @returns nothing + */ +void memdescSetStandbyBuffer +( + MEMORY_DESCRIPTOR *pMemDesc, + MEMORY_DESCRIPTOR *pStandbyBuffer +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_pStandbyBuffer = pStandbyBuffer; +} + +/*! + * @brief Set mem destroy callback list pointer + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pMemDestroyCallbackList Memory destroy callback list pointer + * + * @returns nothing + */ +void memdescSetDestroyCallbackList +( + MEMORY_DESCRIPTOR *pMemDesc, + MEM_DESC_DESTROY_CALLBACK *pCb +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_pMemDestroyCallbackList = pCb; +} + +/*! + * @brief Get guest ID for specified memory descriptor + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Guest ID value + */ +NvU64 memdescGetGuestId +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_guestId; +} + +/*! + * @brief Set guest ID for memory descriptor + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] guestId New guest ID + * + * @returns nothing + */ +void memdescSetGuestId +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 guestId +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_guestId = guestId; +} + +/*! + * @brief Get value of specified flag + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] flag MEMDESC_FLAGS_* value + * + * @returns Boolean value of specified flag + */ +NvBool memdescGetFlag +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 flag +) +{ + // For checking contiguity, use the memdescGetContiguity() api + NV_ASSERT(flag != MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS); + // GPU_IN_RESET is set/got from top level memdesc always. + if (flag != MEMDESC_FLAGS_GPU_IN_RESET) + { + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + } + return !!(pMemDesc->_flags & flag); +} + +/*! + * @brief Set value of specified flag + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] flag MEMDESC_FLAGS_* value + * @param[in] bValue Boolean value of flag + * + * @returns nothing + */ +void memdescSetFlag +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 flag, + NvBool bValue +) +{ + // For setting contiguity, use the memdescSetContiguity() api + NV_ASSERT(flag != MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS); + + // GPU_IN_RESET is set/got from top level memdesc always. + if (flag != MEMDESC_FLAGS_GPU_IN_RESET) + { + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + } + + if (flag == MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE) + { + NV_ASSERT_OK(_memdescSetSubAllocatorFlag(pMemDesc->pGpu, pMemDesc, bValue)); + return; + } + else if (flag == MEMDESC_FLAGS_GUEST_ALLOCATED) + { + NV_ASSERT_OK(_memdescSetGuestAllocatedFlag(pMemDesc->pGpu, pMemDesc, bValue)); + return; + } + + if (bValue) + pMemDesc->_flags |= flag; + else + pMemDesc->_flags &= ~flag; +} + +/*! + * @brief Return memory descriptor address pointer + * + * The address value is returned by osAllocPages + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Memory descriptor address pointer + */ +NvP64 memdescGetAddress +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_address; +} + +/*! + * @brief Set memory descriptor address pointer + * + * The address value is returned by osAllocPages + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pAddress Pointer to address information + * + * @returns nothing + */ +void memdescSetAddress +( + MEMORY_DESCRIPTOR *pMemDesc, + NvP64 pAddress +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_address = pAddress; +} + +/*! + * @brief Get memory descriptor os-specific memory data pointer + * + * The pMemData value is returned by osAllocPages + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Memory data pointer + */ +void *memdescGetMemData +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_pMemData; +} + +/*! + * @brief Set memory descriptor os-specific memory data pointer + * + * The pMemData value is returned by osAllocPages + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pMemData Pointer to new os-specific memory data + * @param[in] pMemDataReleaseCallback Pointer to CB to be called when memdesc + * is freed. + * + * @returns nothing + */ +void memdescSetMemData +( + MEMORY_DESCRIPTOR *pMemDesc, + void *pMemData, + MEM_DATA_RELEASE_CALL_BACK *pMemDataReleaseCallback +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_pMemData = pMemData; + pMemDesc->_pMemDataReleaseCallback = pMemDataReleaseCallback; +} + +/*! + * @brief Return memory descriptor volatile attribute + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Volatile or not + */ +NvBool memdescGetVolatility +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + NvBool bVolatile = NV_FALSE; + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + if (pMemDesc->_addressSpace == ADDR_SYSMEM) + { + bVolatile = (memdescGetGpuCacheAttrib(pMemDesc) == NV_MEMORY_UNCACHED); + } + else + { + NV_ASSERT(pMemDesc->_addressSpace == ADDR_FBMEM); + } + + return bVolatile; +} + +/*! + * @brief Quick check whether the memory is contiguous or not + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] addressTranslation Address translation identifier + * + * @returns NV_TRUE if contiguous + */ +NvBool memdescGetContiguity(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation) +{ + return !!(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS); +} + +/*! + * @brief Detailed Check whether the memory is contiguous or not + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] addressTranslation Address translation identifier + * + * @returns NV_TRUE if contiguous + */ +NvBool memdescCheckContiguity(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation) +{ + NvU32 i; + + if (!(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS)) + { + for (i = 0; i < (pMemDesc->PageCount - 1); i++) + { + if ((memdescGetPte(pMemDesc, addressTranslation, i) + pMemDesc->pageArrayGranularity) != + memdescGetPte(pMemDesc, addressTranslation, i + 1)) + return NV_FALSE; + } + } + + return NV_TRUE; +} + +/*! + * @brief Set the contiguity of the memory descriptor + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] addressTranslation Address translation identifier + * @param[in] isContiguous Contiguity value + * + * @returns nothing + */ +void memdescSetContiguity(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvBool isContiguous) +{ + NV_ASSERT_OR_RETURN_VOID(pMemDesc); + + if (isContiguous) + pMemDesc->_flags |= MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS; + else + pMemDesc->_flags &= ~MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS; +} + +/*! + * @brief Get the address space of the memory descriptor + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] addressTranslation Address translation identifier + * + * @returns addresspace + */ +NV_ADDRESS_SPACE memdescGetAddressSpace(PMEMORY_DESCRIPTOR pMemDesc) +{ + NV_ASSERT_OR_RETURN(pMemDesc != NULL, 0); + return pMemDesc->_addressSpace; +} + +/*! + * @brief Get page size + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] addressTranslation Address translation identifier + * + * @returns Current page size. + */ +NvU64 memdescGetPageSize +( + PMEMORY_DESCRIPTOR pMemDesc, + ADDRESS_TRANSLATION addressTranslation +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_pageSize; +} + +/*! + * @brief Set page size + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] addressTranslation Address translation identifier + * @param[in] pteKind New PTE kind + * + * @returns nothing + */ +void memdescSetPageSize +( + PMEMORY_DESCRIPTOR pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + NvU64 pageSize +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_pageSize = pageSize; +} + +/*! + * @brief Get the Root memory descriptor. + * + * This can also be used to get the root offset as well. + * + * Root memory descriptor is the top level memory descriptor with no parent, + * from which this memory descriptor was derived + * + * @param[in] pMemDesc Pointer to memory descriptor. + * @param[out] pRootOffset Pointer to the root offset parameter. + * + * @returns the Root memory descriptor. + */ +PMEMORY_DESCRIPTOR memdescGetRootMemDesc +( + PMEMORY_DESCRIPTOR pMemDesc, + NvU64 *pRootOffset +) +{ + NvU64 offset = 0; + + // Find the top-level parent descriptor + while (pMemDesc->_pParentDescriptor) + { + // Sanity check, None of the child descriptors should be allocated + NV_ASSERT(!pMemDesc->Allocated); + offset += pMemDesc->subMemOffset; + pMemDesc = pMemDesc->_pParentDescriptor; + } + + if (pRootOffset) + { + *pRootOffset = offset; + } + + return pMemDesc; +} +/*! + * @brief Sets the CUSTOM_HEAP flag of MEMDESC. + * + * Since we have ACR region, Memory descriptor can be allocated in ACR region + * in that case, we need to set this flag since we are using the custom ACR HEAP + * + * @param[in] pMemDesc Pointer to memory descriptor. + * + * @returns void. + */ +void +memdescSetCustomHeap +( + PMEMORY_DESCRIPTOR pMemDesc, + MEMDESC_CUSTOM_HEAP heap +) +{ + NV_ASSERT(0); +} + +/*! + * @brief Returns the ACR CUSTOM_HEAP flag. + * + * + * @param[in] pMemDesc Pointer to memory descriptor. + * + * @returns NV_TRUE if flag MEMDESC_FLAGS_CUSTOM_HEAP_ACR is SET. + */ +MEMDESC_CUSTOM_HEAP +memdescGetCustomHeap +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + + return MEMDESC_CUSTOM_HEAP_NONE; +} + +PIOVAMAPPING memdescGetIommuMap +( + PMEMORY_DESCRIPTOR pMemDesc, + NvU32 iovaspaceId +) +{ + PIOVAMAPPING pIommuMap = pMemDesc->_pIommuMappings; + while (pIommuMap != NULL) + { + if (pIommuMap->iovaspaceId == iovaspaceId) + { + break; + } + + pIommuMap = pIommuMap->pNext; + } + + return pIommuMap; +} + +NV_STATUS memdescAddIommuMap +( + PMEMORY_DESCRIPTOR pMemDesc, + PIOVAMAPPING pIommuMap +) +{ + NV_ASSERT_OR_RETURN((pMemDesc->_pIommuMappings == NULL) || + (!memdescIsSubMemoryMemDesc(pMemDesc)), NV_ERR_INVALID_ARGUMENT); + + // + // Only root physical memdescs can have multiple IOMMU mappings. + // Submemdescs can only have one, and the list linkage is used + // instead to link it as a child of the root IOMMU mapping, so we + // don't want to overwrite that here. + // + if (!memdescIsSubMemoryMemDesc(pMemDesc)) + { + pIommuMap->pNext = pMemDesc->_pIommuMappings; + } + + pMemDesc->_pIommuMappings = pIommuMap; + + return NV_OK; +} + +void memdescRemoveIommuMap +( + PMEMORY_DESCRIPTOR pMemDesc, + PIOVAMAPPING pIommuMap +) +{ + // + // Only root physical memdescs can have multiple IOMMU mappings. + // Submemdescs can only have one, and the list linkage is used + // instead to link it as a child of the root IOMMU mapping, so we + // don't want to overwrite that here. + // + if (!memdescIsSubMemoryMemDesc(pMemDesc)) + { + PIOVAMAPPING *ppTmpIommuMap = &pMemDesc->_pIommuMappings; + while ((*ppTmpIommuMap != NULL) && (*ppTmpIommuMap != pIommuMap)) + { + ppTmpIommuMap = &(*ppTmpIommuMap)->pNext; + } + + if (*ppTmpIommuMap != NULL) + { + *ppTmpIommuMap = pIommuMap->pNext; + + } + else + { + NV_ASSERT(*ppTmpIommuMap != NULL); + } + } + else if (pMemDesc->_pIommuMappings == pIommuMap) + { + pMemDesc->_pIommuMappings = NULL; + } + else + { + // + // Trying to remove a submemory mapping that doesn't belong to this + // descriptor? + // + NV_ASSERT(pMemDesc->_pIommuMappings == pIommuMap); + } +} + +NV_STATUS memdescMapIommu +( + PMEMORY_DESCRIPTOR pMemDesc, + NvU32 iovaspaceId +) +{ +#if (RMCFG_FEATURE_PLATFORM_UNIX || RMCFG_FEATURE_PLATFORM_MODS) && !NVCPU_IS_ARM + if (iovaspaceId != NV_IOVA_DOMAIN_NONE) + { + NV_ADDRESS_SPACE addrSpace = memdescGetAddressSpace(pMemDesc); + OBJGPU *pMappingGpu = gpumgrGetGpuFromId(iovaspaceId); + PMEMORY_DESCRIPTOR pRootMemDesc = memdescGetRootMemDesc(pMemDesc, NULL); + if ((addrSpace == ADDR_SYSMEM) || gpumgrCheckIndirectPeer(pMappingGpu, pRootMemDesc->pGpu)) + { + NV_STATUS status; + OBJIOVASPACE *pIOVAS = iovaspaceFromId(iovaspaceId); + NV_ASSERT_OR_RETURN(pIOVAS, NV_ERR_OBJECT_NOT_FOUND); + + status = iovaspaceAcquireMapping(pIOVAS, pMemDesc); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + } + } +#endif + + // + // Verify that the final physical addresses are indeed addressable by the + // GPU. We only need to do this for internally allocated sysmem (RM owned) + // as well as externally allocated/mapped sysmem. Note, addresses for peer + // (P2P mailbox registers) BARs are actually not handled by the GMMU and + // support a full 64-bit address width, hence validation is not needed. + // + if ((pMemDesc->Allocated || + memdescGetFlag(pMemDesc, MEMDESC_FLAGS_EXT_PAGE_ARRAY_MEM) || + memdescGetFlag(pMemDesc, MEMDESC_FLAGS_PEER_IO_MEM)) && + memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) + { + // TODO This should look up the GPU corresponding to the IOVAS instead. + OBJGPU *pGpu = pMemDesc->pGpu; + RmPhysAddr dmaWindowEndAddr = gpuGetDmaEndAddress_HAL(pGpu); + RmPhysAddr physAddr; + + if (memdescGetContiguity(pMemDesc, AT_GPU)) + { + physAddr = memdescGetPhysAddr(pMemDesc, AT_GPU, 0); + if (physAddr + pMemDesc->Size - 1 > dmaWindowEndAddr) + { + NV_PRINTF(LEVEL_ERROR, + "0x%llx-0x%llx is not addressable by GPU 0x%x [0x0-0x%llx]\n", + physAddr, physAddr + pMemDesc->Size - 1, + pGpu->gpuId, dmaWindowEndAddr); + memdescUnmapIommu(pMemDesc, iovaspaceId); + return NV_ERR_INVALID_ADDRESS; + } + } + else + { + NvU32 i; + for (i = 0; i < pMemDesc->PageCount; i++) + { + physAddr = memdescGetPte(pMemDesc, AT_GPU, i); + if (physAddr + (pMemDesc->pageArrayGranularity - 1) > dmaWindowEndAddr) + { + NV_PRINTF(LEVEL_ERROR, + "0x%llx is not addressable by GPU 0x%x [0x0-0x%llx]\n", + physAddr, pGpu->gpuId, dmaWindowEndAddr); + memdescUnmapIommu(pMemDesc, iovaspaceId); + return NV_ERR_INVALID_ADDRESS; + } + } + } + } + + return NV_OK; +} + +void memdescUnmapIommu +( + PMEMORY_DESCRIPTOR pMemDesc, + NvU32 iovaspaceId +) +{ +#if (RMCFG_FEATURE_PLATFORM_UNIX || RMCFG_FEATURE_PLATFORM_MODS) && !NVCPU_IS_ARM + PIOVAMAPPING pIovaMapping; + OBJIOVASPACE *pIOVAS; + + if (iovaspaceId == NV_IOVA_DOMAIN_NONE) + return; + + pIovaMapping = memdescGetIommuMap(pMemDesc, iovaspaceId); + NV_ASSERT(pIovaMapping); + + pIOVAS = iovaspaceFromMapping(pIovaMapping); + iovaspaceReleaseMapping(pIOVAS, pIovaMapping); +#endif +} + +void memdescCheckSubDevicePageSizeConsistency +( + OBJGPU *pGpu, + PMEMORY_DESCRIPTOR pMemDesc, + OBJVASPACE *pVAS, + NvU64 pageSize, + NvU64 pageOffset +) +{ + NvU64 tempPageSize, tempPageOffset; + PMEMORY_DESCRIPTOR pTempMemDesc = NULL; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + pTempMemDesc = memdescGetMemDescFromGpu(pMemDesc, pGpu); + tempPageSize = memdescGetPageSize(pTempMemDesc, VAS_ADDRESS_TRANSLATION(pVAS)); + tempPageOffset = memdescGetPhysAddr(pTempMemDesc, VAS_ADDRESS_TRANSLATION(pVAS), 0) & (tempPageSize - 1); + + // Assert if inconsistent + NV_ASSERT(pageSize == tempPageSize); + NV_ASSERT(pageOffset == tempPageOffset); + SLI_LOOP_END +} + +void memdescCheckSubDeviceMemContiguityConsistency +( + OBJGPU *pGpu, + PMEMORY_DESCRIPTOR pMemDesc, + OBJVASPACE *pVAS, + NvBool bIsMemContiguous +) +{ + NvBool bTempIsMemContiguous = NV_FALSE; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + bTempIsMemContiguous = memdescGetContiguity(memdescGetMemDescFromGpu(pMemDesc, pGpu), VAS_ADDRESS_TRANSLATION(pVAS)); + // Assert if inconsistent + NV_ASSERT(bIsMemContiguous == bTempIsMemContiguous); + SLI_LOOP_END +} + +/* @brief Get GPA(guest physical addresses) for given GPU physical addresses. + * + * @param[in] pGpu GPU for which GPAs are needed + * @param[in] pageCount Size of array. Should be 1 for contiguous mappings + * @param[in/out] pGpa Array of GPU PAs to be converted to guest PAs + * + * @returns NV_STATUS + */ + +/*! + * @brief Override the registry INST_LOC two-bit enum to an aperture (list) + cpu attr. + * + * Caller must set initial default values. + */ +void +memdescOverrideInstLocList +( + NvU32 instLoc, // NV_REG_STR_RM_INST_LOC + const char *name, + const NV_ADDRESS_SPACE **ppAllocList, + NvU32 *pCpuMappingAttr +) +{ + switch (instLoc) + { + case NV_REG_STR_RM_INST_LOC_COH: + NV_PRINTF(LEVEL_INFO, "using coh system memory for %s\n", name); + *ppAllocList = ADDRLIST_SYSMEM_ONLY; + *pCpuMappingAttr = NV_MEMORY_CACHED; + break; + case NV_REG_STR_RM_INST_LOC_NCOH: + NV_PRINTF(LEVEL_INFO, "using ncoh system memory for %s\n", name); + *ppAllocList = ADDRLIST_SYSMEM_ONLY; + *pCpuMappingAttr = NV_MEMORY_UNCACHED; + break; + case NV_REG_STR_RM_INST_LOC_VID: + NV_PRINTF(LEVEL_INFO, "using video memory for %s\n", name); + *ppAllocList = ADDRLIST_FBMEM_ONLY; + *pCpuMappingAttr = NV_MEMORY_WRITECOMBINED; + break; + case NV_REG_STR_RM_INST_LOC_DEFAULT: + default: + // Do not update parameters + break; + } +} + +/*! + * @brief Override wrapper for callers needed an aperture + */ +void +memdescOverrideInstLoc +( + NvU32 instLoc, + const char *name, + NV_ADDRESS_SPACE *pAddrSpace, + NvU32 *pCpuMappingAttr +) +{ + const NV_ADDRESS_SPACE *pAllocList = NULL; + + memdescOverrideInstLocList(instLoc, name, &pAllocList, pCpuMappingAttr); + if (pAllocList != NULL) + *pAddrSpace = pAllocList[0]; +} +/*! +* @brief override physical address width +* +* address width to be override in bits. +* @param[in] pGpu +* @param[in] pMemDesc Memory descriptor to update +* @param[in] addresswidth Offset to refer to +* +* @returns nothing +*/ +void +memdescOverridePhysicalAddressWidthWindowsWAR +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 addressWidth +) +{ + if (RMCFG_FEATURE_PLATFORM_WINDOWS) + { + if (addressWidth < gpuarchGetSystemPhysAddrWidth_HAL(gpuGetArch(pGpu))) + { + pMemDesc->_flags |= MEMDESC_FLAGS_OVERRIDE_SYSTEM_ADDRESS_LIMIT; + pMemDesc->_overridenAddressWidth = addressWidth; + } + } +} + +void +memdescSetName(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc, const char *name, const char* suffix) +{ + return; +} + +NV_STATUS +memdescSendMemDescToGSP(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc, NvHandle *pHandle) +{ + NV_STATUS status = NV_OK; + RsClient *pClient; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU32 flags = 0; + NvU32 index = 0; + NvU32 hClass; + NvU64 *pageNumberList = NULL; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NV_MEMORY_LIST_ALLOCATION_PARAMS listAllocParams = {0}; + + // Nothing to do without GSP + if (!IS_GSP_CLIENT(pGpu)) + { + return NV_OK; + } + + switch (memdescGetAddressSpace(pMemDesc)) + { + + case ADDR_FBMEM: + hClass = NV01_MEMORY_LIST_FBMEM; + break; + + case ADDR_SYSMEM: + hClass = NV01_MEMORY_LIST_SYSTEM; + break; + + default: + return NV_ERR_NOT_SUPPORTED; + } + + // Initialize parameters with pMemDesc information + listAllocParams.pteAdjust = pMemDesc->PteAdjust; + listAllocParams.format = memdescGetPteKind(pMemDesc); + listAllocParams.size = pMemDesc->Size; + listAllocParams.hClient = NV01_NULL_OBJECT; + listAllocParams.hParent = NV01_NULL_OBJECT; + listAllocParams.hObject = NV01_NULL_OBJECT; + listAllocParams.limit = pMemDesc->Size - 1; + listAllocParams.flagsOs02 = (DRF_DEF(OS02,_FLAGS,_MAPPING,_NO_MAP) | + (flags & DRF_SHIFTMASK(NVOS02_FLAGS_COHERENCY))); + + // Handle pageCount based on pMemDesc contiguity + if (!memdescGetContiguity(pMemDesc, AT_GPU)) + { + listAllocParams.flagsOs02 |= DRF_DEF(OS02,_FLAGS,_PHYSICALITY,_NONCONTIGUOUS); + listAllocParams.pageCount = pMemDesc->PageCount; + } + else + { + listAllocParams.pageCount = 1; + } + + + // Initialize pageNumberList + pageNumberList = portMemAllocNonPaged(sizeof(NvU64) * listAllocParams.pageCount); + for (index = 0; index < listAllocParams.pageCount; index++) + pageNumberList[index] = memdescGetPte(pMemDesc, AT_GPU, index) >> RM_PAGE_SHIFT; + listAllocParams.pageNumberList = pageNumberList; + + // Create MemoryList object + NV_ASSERT_OK_OR_GOTO(status, + pRmApi->Alloc(pRmApi, + pMemoryManager->hClient, + pMemoryManager->hSubdevice, + pHandle, + hClass, + &listAllocParams, + sizeof(listAllocParams)), + end); + + NV_ASSERT_OK_OR_GOTO(status, + serverGetClientUnderLock(&g_resServ, pMemoryManager->hClient, &pClient), + end); + + // Register MemoryList object to GSP + NV_ASSERT_OK_OR_GOTO(status, + memRegisterWithGsp(pGpu, + pClient, + pMemoryManager->hSubdevice, + *pHandle), + end); + +end: + if ((status != NV_OK) && (*pHandle != NV01_NULL_OBJECT)) + pRmApi->Free(pRmApi, pMemoryManager->hClient, *pHandle); + + if (pageNumberList != NULL) + portMemFree(pageNumberList); + + return status; +} + +NV_STATUS +memdescSetPageArrayGranularity +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 pageArrayGranularity +) +{ + // Make sure pageArrayGranularity is a power of 2 value. + NV_ASSERT_OR_RETURN((pageArrayGranularity & (pageArrayGranularity - 1)) == 0, NV_ERR_INVALID_ARGUMENT); + + // Allow setting the same granularity. + if (pMemDesc->pageArrayGranularity == pageArrayGranularity) + { + return NV_OK; + } + + // Make sure setting the page array happens before the pteArray is populated. + NV_ASSERT_OR_RETURN(pMemDesc->_pteArray[0] == 0, NV_ERR_INVALID_STATE); + + pMemDesc->pageArrayGranularity = pageArrayGranularity; + + return NV_OK; +} + +NV_STATUS +memdescFillMemdescForPhysAttr +( + MEMORY_DESCRIPTOR *pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + NvU64 *pOffset, + NvU32 *pMemAperture, + NvU32 *pMemKind, + NvU32 *pGpuCacheAttr, + NvU32 *pGpuP2PCacheAttr, + NvU64 *contigSegmentSize +) +{ + NvU64 surfOffset = *pOffset, surfBase, surfLimit; + + surfBase = memdescGetPhysAddr(pMemDesc, addressTranslation, 0); + surfLimit = surfBase + pMemDesc->Size - 1; + *pMemKind = memdescGetPteKind(pMemDesc); + + *pOffset = memdescGetPhysAddr(pMemDesc, addressTranslation, surfOffset); + + if (memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM ) + *pMemAperture = NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR_APERTURE_VIDMEM; + else if (memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) + *pMemAperture = NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR_APERTURE_SYSMEM; + else if (memdescGetAddressSpace(pMemDesc) == ADDR_EGM) + *pMemAperture = NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR_APERTURE_SYSMEM; + else if (memdescGetAddressSpace(pMemDesc) == ADDR_VIRTUAL ) + { + // + // XXX we could theoretically find whatever phys mem object is plugged + // in at surfOffset w/in the virt object... that'd mean scanning + // pMemory->DmaMappingList + // + return NV_ERR_NOT_SUPPORTED; + } + else + return NV_ERR_GENERIC; + + if (memdescGetGpuCacheAttrib(pMemDesc) == NV_MEMORY_CACHED) + { + *pGpuCacheAttr = NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED; + } + else if (memdescGetGpuCacheAttrib(pMemDesc) == NV_MEMORY_UNCACHED) + { + *pGpuCacheAttr = NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_UNCACHED; + } + else + { + *pGpuCacheAttr = NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED_UNKNOWN; + } + + if (memdescGetGpuP2PCacheAttrib(pMemDesc) == NV_MEMORY_CACHED) + { + *pGpuP2PCacheAttr = NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED; + } + else if (memdescGetGpuP2PCacheAttrib(pMemDesc) == NV_MEMORY_UNCACHED) + { + *pGpuP2PCacheAttr = NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_UNCACHED; + } + else + { + *pGpuP2PCacheAttr = NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED_UNKNOWN; + } + + *contigSegmentSize = surfLimit - (surfBase + surfOffset) + 1; + + if ( !memdescGetContiguity(pMemDesc, addressTranslation)) + { + // XXX overly conservative. we could scan the PTEs to find out if more pages are contig. + NvU64 surfOffsetLimitSame4KBPage = (4*1024)*((surfBase + surfOffset)/(4*1024)) + (4*1024) - 1; + if ( surfLimit >= surfOffsetLimitSame4KBPage ) + *contigSegmentSize = surfOffsetLimitSame4KBPage - (surfBase + surfOffset) + 1; + } + + return NV_OK; +} + +NvBool +memdescIsEgm +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ADDRESS_SPACE addrSpace; + MEMORY_DESCRIPTOR *pRootMemDesc; + MemoryManager *pMemoryManager; + + // + // If memdesc is not device owned, we can't tell if local EGM is enabled + // due to lack of memory manager. + // + if (pMemDesc->pGpu == NULL) + { + return NV_FALSE; + } + + addrSpace = memdescGetAddressSpace(pMemDesc); + pRootMemDesc = memdescGetRootMemDesc(pMemDesc, NULL); + + if ((pRootMemDesc == NULL) || (pRootMemDesc->pGpu == NULL)) + { + return NV_FALSE; + } + + pMemoryManager = GPU_GET_MEMORY_MANAGER(pRootMemDesc->pGpu); + if (pMemoryManager == NULL) + { + return NV_FALSE; + } + + if ((addrSpace == ADDR_EGM) || + (memmgrIsLocalEgmEnabled(pMemoryManager) && + (addrSpace == ADDR_SYSMEM) && + (pMemoryManager->localEgmNodeId != NV0000_CTRL_NO_NUMA_NODE) && + (memdescGetNumaNode(pMemDesc) == pMemoryManager->localEgmNodeId))) + { + return NV_TRUE; + } + + return NV_FALSE; +} + +NvU64 memdescGetAdjustedPageSize( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NvU64 pageSize = osGetPageSize(); + // + // Only non-contig memory needs to specify order. For contig memory the OS layer + // calculates it within nv_alias_pages and picks the largest order based on the + // allocation size. + // + if (!memdescGetContiguity(pMemDesc, AT_CPU)) + { + pageSize = memdescGetPageSize(pMemDesc, AT_GPU); + // + // pageSize == 0 indicates the caller did not specify a physical page size + // for the allocation. Default to allocating at OS page size granularity. + // + if (pageSize == 0) + { + pageSize = osGetPageSize(); + memdescSetPageSize(pMemDesc, AT_GPU, pageSize); + } + } + + return pageSize; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c b/src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c new file mode 100644 index 0000000..1f6b495 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c @@ -0,0 +1,1560 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/heap_base.h" +#include "gpu/mem_mgr/mem_utils.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "os/nv_memory_type.h" +#include "core/locks.h" +#include "ctrl/ctrl2080.h" +#include "rmapi/rs_utils.h" +#include "gpu/subdevice/subdevice.h" + +#include "class/cl0005.h" // NV01_EVENT + +#include "ctrl/ctrla06f/ctrla06fgpfifo.h" + +// Memory copy block size for if we need to cut up a mapping +#define MEMORY_COPY_BLOCK_SIZE 1024 * 1024 + +/* ------------------------ Private functions --------------------------------------- */ + +/*! + * @brief This utility routine helps in determining the appropriate + * memory transfer technique to be used + */ +static TRANSFER_TYPE +memmgrGetMemTransferType +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pDst, + TRANSFER_SURFACE *pSrc, + NvU32 flags +) +{ + TRANSFER_TYPE transferType = TRANSFER_TYPE_PROCESSOR; + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + + if ((pDst == NULL || memdescGetAddressSpace(pDst->pMemDesc) == ADDR_SYSMEM) && + (pSrc == NULL || memdescGetAddressSpace(pSrc->pMemDesc) == ADDR_SYSMEM) && + !RMCFG_FEATURE_PLATFORM_GSP) + { + // + // If the operation only touches sysmem, use processor copy + // + transferType = TRANSFER_TYPE_PROCESSOR; + } + else if (flags & TRANSFER_FLAGS_PREFER_CE) + { + if (IS_SIMULATION(pGpu) && pSrc != NULL) + { + // + // This is significantly faster on fmodel for S/R (5min vs. 5sec) because of the + // backdoor memory reads and writes. + // Memset is not currently supported + // + return TRANSFER_TYPE_BAR0; + } + + // + // On Emulation we may lack CE support so preventing excessive debug spew. + // ceUtil is disabled for the vGPU host. + // + if (!IS_EMULATION(pGpu) + ) + { + NV_PRINTF(LEVEL_WARNING, "Can't copy using CE, falling back to other methods\n"); + } + } + + return transferType; +} + +static NV_STATUS +memmgrCheckSurfaceBounds +( + TRANSFER_SURFACE *pSurface, + NvU64 size +) +{ + NV_ASSERT_OR_RETURN(pSurface != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pSurface->pMemDesc != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(size != 0, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pSurface->offset <= pSurface->pMemDesc->Size, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pSurface->offset + size <= pSurface->pMemDesc->Size, NV_ERR_INVALID_ARGUMENT); + + return NV_OK; +} + +static NV_STATUS +_memmgrAllocAndMapSurface +( + OBJGPU *pGpu, + NvU64 size, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppMap, + void **ppPriv +) +{ + NV_STATUS status; + NvU64 flags = 0; + + NV_ASSERT_OR_RETURN(ppMemDesc != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(ppMap != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(ppPriv != NULL, NV_ERR_INVALID_ARGUMENT); + + NV_ASSERT_OK_OR_RETURN( + memdescCreate(ppMemDesc, pGpu, size, RM_PAGE_SIZE, NV_TRUE, + ADDR_SYSMEM, NV_MEMORY_CACHED, flags)); + + memdescTagAlloc(status, NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_77, + (*ppMemDesc)); + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, status, failed); + + NV_ASSERT_OK_OR_GOTO(status, + memdescMapOld(*ppMemDesc, 0, size, NV_TRUE, NV_PROTECT_READ_WRITE, + ppMap, ppPriv), + failed); + + // Clear surface before use + portMemSet(*ppMap, 0, size); + + return NV_OK; +failed: + memdescFree(*ppMemDesc); + memdescDestroy(*ppMemDesc); + + *ppMemDesc = NULL; + *ppMap = NULL; + *ppPriv = NULL; + + return status; +} + +static void +_memmgrUnmapAndFreeSurface +( + MEMORY_DESCRIPTOR *pMemDesc, + void *pMap, + void *pPriv +) +{ + memdescUnmapOld(pMemDesc, NV_TRUE, pMap, pPriv); + + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); +} + +/*! + * @brief This function is used for writing/reading data to/from a client + * provided buffer from/to some source region in vidmem + * + * @param[in] pDst TRANSFER_SURFACE info for destination region + * @param[in] pBuf Client provided buffer + * @param[in] size Size in bytes of the memory transfer + * @param[in] bRead TRUE for read and FALSE for write + */ +static NV_STATUS +_memmgrMemReadOrWriteWithGsp +( + OBJGPU *pGpu, + TRANSFER_SURFACE *pDst, + void *pBuf, + NvU64 size, + NvBool bRead +) +{ + NV2080_CTRL_INTERNAL_MEMMGR_MEMORY_TRANSFER_WITH_GSP_PARAMS gspParams; + NV_STATUS status; + MEMORY_DESCRIPTOR *pStagingBuf = NULL; + void *pStagingBufMap = NULL; + void *pStagingBufPriv = NULL; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + // Do not expect GSP to be used for reading/writing from/to sysmem + if (memdescGetAddressSpace(pDst->pMemDesc) == ADDR_SYSMEM) + return NV_ERR_NOT_SUPPORTED; + + // Allocate and map the staging buffer + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + _memmgrAllocAndMapSurface(pGpu, size, &pStagingBuf, &pStagingBufMap, + &pStagingBufPriv)); + + // Setup control call params + portMemSet(&gspParams, 0, sizeof(gspParams)); + + // Copy the data to staging buffer before poking GSP for copying + if (!bRead) + { + { + portMemCopy(pStagingBufMap, size, pBuf, size); + } + } + + gspParams.memop = NV2080_CTRL_MEMMGR_MEMORY_OP_MEMCPY; + gspParams.transferSize = size; + + if (bRead) + { + // Source surface in vidmem + gspParams.src.baseAddr = memdescGetPhysAddr(pDst->pMemDesc, AT_GPU, 0); + gspParams.src.size = memdescGetSize(pDst->pMemDesc); + gspParams.src.offset = pDst->offset; + gspParams.src.cpuCacheAttrib = memdescGetCpuCacheAttrib(pDst->pMemDesc); + gspParams.src.aperture = memdescGetAddressSpace(pDst->pMemDesc); + + // Destination surface in unprotected sysmem + gspParams.dst.baseAddr = memdescGetPhysAddr(pStagingBuf, AT_GPU, 0); + gspParams.dst.size = memdescGetSize(pStagingBuf); + gspParams.dst.offset = 0; + gspParams.dst.cpuCacheAttrib = memdescGetCpuCacheAttrib(pStagingBuf); + gspParams.dst.aperture = memdescGetAddressSpace(pStagingBuf); + } + else + { + // Source surface in unprotected sysmem + gspParams.src.baseAddr = memdescGetPhysAddr(pStagingBuf, AT_GPU, 0); + gspParams.src.size = memdescGetSize(pStagingBuf); + gspParams.src.offset = 0; + gspParams.src.cpuCacheAttrib = memdescGetCpuCacheAttrib(pStagingBuf); + gspParams.src.aperture = memdescGetAddressSpace(pStagingBuf); + + // Destination surface in vidmem + gspParams.dst.baseAddr = memdescGetPhysAddr(pDst->pMemDesc, AT_GPU, 0); + gspParams.dst.size = memdescGetSize(pDst->pMemDesc); + gspParams.dst.offset = pDst->offset; + gspParams.dst.cpuCacheAttrib = memdescGetCpuCacheAttrib(pDst->pMemDesc); + gspParams.dst.aperture = memdescGetAddressSpace(pDst->pMemDesc); + } + + // Send the control call + NV_ASSERT_OK_OR_GOTO(status, + pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_MEMMGR_MEMORY_TRANSFER_WITH_GSP, + &gspParams, + sizeof(gspParams)), + failed); + + // Read contents from staging buffer after GSP is done copying + if (bRead) + { + { + portMemCopy(pBuf, size, pStagingBufMap, size); + } + } + +failed: + _memmgrUnmapAndFreeSurface(pStagingBuf, pStagingBufMap, pStagingBufPriv); + return status; +} + +/*! + * @brief This function is used for copying data b/w two memory regions + * using GSP. + * + * @param[in] pDst TRANSFER_SURFACE info for destination region + * @param[in] pSrc TRANSFER_SURFACE info for source region + * @param[in] size Size in bytes of the memory transfer + */ +static NV_STATUS +_memmgrMemcpyWithGsp +( + OBJGPU *pGpu, + TRANSFER_SURFACE *pDst, + TRANSFER_SURFACE *pSrc, + NvU64 size +) +{ + NV2080_CTRL_INTERNAL_MEMMGR_MEMORY_TRANSFER_WITH_GSP_PARAMS gspParams; + NV_STATUS status; + MEMORY_DESCRIPTOR *pStagingBuf = NULL; + void *pStagingBufMap = NULL; + void *pStagingBufPriv = NULL; + NvU8 *pMap = NULL; + void *pPriv = NULL; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + // + // Do not expect GSP to be used for copying data b/w two surfaces + // in sysmem. For SPT, there is no non-CPR vidmem. So, allow vidmem + // to vidmem copies in plain text. For copies b/w CPR and non-CPR + // vidmem, encryption/decryption needs to happen at the endpoints. + // + if (memdescGetAddressSpace(pSrc->pMemDesc) == ADDR_SYSMEM && + memdescGetAddressSpace(pDst->pMemDesc) == ADDR_SYSMEM) + { + return NV_ERR_NOT_SUPPORTED; + } + + // Allocate and map the bounce buffer + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + _memmgrAllocAndMapSurface(pGpu, size, &pStagingBuf, &pStagingBufMap, + &pStagingBufPriv)); + + // Setup control call params + portMemSet(&gspParams, 0, sizeof(gspParams)); + + gspParams.memop = NV2080_CTRL_MEMMGR_MEMORY_OP_MEMCPY; + gspParams.transferSize = size; + + if (memdescGetAddressSpace(pSrc->pMemDesc) == ADDR_SYSMEM) + { + NV_ASSERT_OK_OR_GOTO(status, + memdescMapOld(pSrc->pMemDesc, 0, size, NV_TRUE, + NV_PROTECT_READ_WRITE, (void**)&pMap, &pPriv), + failed); + + // Copy to staging buffer, encrypting first if CC mode + { + portMemCopy(pStagingBufMap, size, pMap + pSrc->offset, size); + } + + // Be sure to unmap memory before potentially taking cleanup path + memdescUnmapOld(pSrc->pMemDesc, NV_TRUE, (void*)pMap, pPriv); + NV_ASSERT_OK_OR_GOTO(status, status, failed); + + // Source surface in unprotected sysmem + gspParams.src.baseAddr = memdescGetPhysAddr(pStagingBuf, AT_GPU, 0); + gspParams.src.size = memdescGetSize(pStagingBuf); + gspParams.src.offset = 0; + gspParams.src.cpuCacheAttrib = memdescGetCpuCacheAttrib(pStagingBuf); + gspParams.src.aperture = memdescGetAddressSpace(pStagingBuf); + + // Destination surface in vidmem + gspParams.dst.baseAddr = memdescGetPhysAddr(pDst->pMemDesc, AT_GPU, 0); + gspParams.dst.size = memdescGetSize(pDst->pMemDesc); + gspParams.dst.offset = pDst->offset; + gspParams.dst.cpuCacheAttrib = memdescGetCpuCacheAttrib(pDst->pMemDesc); + gspParams.dst.aperture = memdescGetAddressSpace(pDst->pMemDesc); + } + else + { + // Source surface in vidmem + gspParams.src.baseAddr = memdescGetPhysAddr(pSrc->pMemDesc, AT_GPU, 0); + gspParams.src.size = memdescGetSize(pSrc->pMemDesc); + gspParams.src.offset = pSrc->offset; + gspParams.src.cpuCacheAttrib = memdescGetCpuCacheAttrib(pSrc->pMemDesc); + gspParams.src.aperture = memdescGetAddressSpace(pSrc->pMemDesc); + + if (memdescGetAddressSpace(pDst->pMemDesc) == ADDR_FBMEM) + { + // Destination surface in vidmem + gspParams.dst.baseAddr = memdescGetPhysAddr(pDst->pMemDesc, AT_GPU, 0); + gspParams.dst.size = memdescGetSize(pDst->pMemDesc); + gspParams.dst.offset = pDst->offset; + gspParams.dst.cpuCacheAttrib = memdescGetCpuCacheAttrib(pDst->pMemDesc); + gspParams.dst.aperture = memdescGetAddressSpace(pDst->pMemDesc); + } + else + { + // Destination surface in unprotected sysmem + gspParams.dst.baseAddr = memdescGetPhysAddr(pStagingBuf, AT_GPU, 0); + gspParams.dst.size = memdescGetSize(pStagingBuf); + gspParams.dst.offset = 0; + gspParams.dst.cpuCacheAttrib = memdescGetCpuCacheAttrib(pStagingBuf); + gspParams.dst.aperture = memdescGetAddressSpace(pStagingBuf); + } + } + + // Send the control call + NV_ASSERT_OK_OR_GOTO(status, + pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_MEMMGR_MEMORY_TRANSFER_WITH_GSP, + &gspParams, + sizeof(gspParams)), + failed); + + // Copy from staging buffer to destination + if (memdescGetAddressSpace(pDst->pMemDesc) == ADDR_SYSMEM) + { + NV_ASSERT_OK_OR_GOTO(status, + memdescMapOld(pDst->pMemDesc, 0, size, NV_TRUE, + NV_PROTECT_READ_WRITE, (void**)&pMap, &pPriv), + failed); + + { + portMemCopy(pMap + pDst->offset, size, pStagingBufMap, size); + } + + // Be sure to unmap memory before potentially taking cleanup path + memdescUnmapOld(pDst->pMemDesc, NV_TRUE, (void*)pMap, pPriv); + NV_ASSERT_OK_OR_GOTO(status, status, failed); + } + +failed: + _memmgrUnmapAndFreeSurface(pStagingBuf, pStagingBufMap, pStagingBufPriv); + return status; +} + +static NV_STATUS +_memmgrMemsetWithGsp +( + OBJGPU *pGpu, + TRANSFER_SURFACE *pDst, + NvU32 value, + NvU64 size +) +{ + NV2080_CTRL_INTERNAL_MEMMGR_MEMORY_TRANSFER_WITH_GSP_PARAMS gspParams; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + // Do not expect to use GSP to memset surfaces in sysmem + if (memdescGetAddressSpace(pDst->pMemDesc) == ADDR_SYSMEM) + return NV_ERR_NOT_SUPPORTED; + + portMemSet(&gspParams, 0, sizeof(gspParams)); + + gspParams.memop = NV2080_CTRL_MEMMGR_MEMORY_OP_MEMSET; + gspParams.transferSize = size; + gspParams.value = value; + gspParams.dst.baseAddr = memdescGetPhysAddr(pDst->pMemDesc, AT_GPU, 0); + gspParams.dst.size = memdescGetSize(pDst->pMemDesc); + gspParams.dst.offset = pDst->offset; + gspParams.dst.cpuCacheAttrib = memdescGetCpuCacheAttrib(pDst->pMemDesc); + gspParams.dst.aperture = memdescGetAddressSpace(pDst->pMemDesc); + + // Send the control call + NV_ASSERT_OK_OR_RETURN( + pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_MEMMGR_MEMORY_TRANSFER_WITH_GSP, + &gspParams, + sizeof(gspParams))); + + return NV_OK; +} + +/*! + * @brief This function is used for copying data b/w two memory regions + * using the specified memory transfer technique. Both memory regions + * can be in the same aperture or in different apertures. + * + * @param[in] pDstInfo TRANSFER_SURFACE info for destination region + * @param[in] pSrcInfo TRANSFER_SURFACE info for source region + * @param[in] size Size in bytes of the memory transfer + * @param[in] transferType Memory transfer technique to be used + * @param[in] flags Flags + */ +static NV_STATUS +memmgrMemCopyWithTransferType +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pDstInfo, + TRANSFER_SURFACE *pSrcInfo, + NvU32 size, + TRANSFER_TYPE transferType, + NvU32 flags +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + + // Sanitize the input + NV_ASSERT_OK_OR_RETURN(memmgrCheckSurfaceBounds(pDstInfo, size)); + NV_ASSERT_OK_OR_RETURN(memmgrCheckSurfaceBounds(pSrcInfo, size)); + NV_ASSERT_OR_RETURN(!memdescDescIsEqual(pDstInfo->pMemDesc, pSrcInfo->pMemDesc), + NV_ERR_INVALID_ARGUMENT); + + switch (transferType) + { + case TRANSFER_TYPE_PROCESSOR: + { + NvU32 mappingFlags = flags | TRANSFER_FLAGS_ALLOW_MAPPING_REUSE; + NvU8 *pDst = memmgrMemBeginTransfer(pMemoryManager, pDstInfo, size, mappingFlags); + NvU8 *pSrc = memmgrMemBeginTransfer(pMemoryManager, pSrcInfo, size, mappingFlags); + + if (pDst != NULL && pSrc != NULL) + { + portMemCopy(pDst, size, pSrc, size); + } + + memmgrMemEndTransfer(pMemoryManager, pSrcInfo, size, mappingFlags); + memmgrMemEndTransfer(pMemoryManager, pDstInfo, size, mappingFlags); + + NV_CHECK_OR_RETURN(LEVEL_ERROR, pDst != NULL && pSrc != NULL, NV_ERR_INSUFFICIENT_RESOURCES); + } + break; + case TRANSFER_TYPE_GSP_DMA: + if (IS_GSP_CLIENT(pGpu)) + { + NV_ASSERT_OK_OR_RETURN( + _memmgrMemcpyWithGsp(pGpu, pDstInfo, pSrcInfo, size)); + } + else + { + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_ARGUMENT); + } + break; + case TRANSFER_TYPE_CE: + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_STATE); + break; + case TRANSFER_TYPE_CE_PRI: + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_STATE); + break; + case TRANSFER_TYPE_BAR0: + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_STATE); + break; + } + + return NV_OK; +} + +static NV_STATUS +_memmgrMemReadOrWriteUsingStagingBuffer +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pAlloc, + void *pBuf, + NvU64 size, + NvU32 transferType, + NvBool bRead +) +{ + MEMORY_DESCRIPTOR *pStagingBuf = NULL; + void *pStagingBufMap = NULL; + void *pStagingBufPriv = NULL; + TRANSFER_SURFACE staging = {0}; + TRANSFER_SURFACE *pSrc = bRead ? pAlloc : &staging; + TRANSFER_SURFACE *pDst = bRead ? &staging : pAlloc; + NV_STATUS status = NV_OK; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + _memmgrAllocAndMapSurface(ENG_GET_GPU(pMemoryManager), size, &pStagingBuf, &pStagingBufMap, + &pStagingBufPriv)); + staging.pMemDesc = pStagingBuf; + + if (!bRead) + { + portMemCopy(pStagingBufMap, size, pBuf, size); + } + + NV_ASSERT_OK_OR_GOTO(status, memmgrMemCopyWithTransferType(pMemoryManager, pDst, pSrc, size, transferType, 0), failed); + + if (bRead) + { + portMemCopy(pBuf, size, pStagingBufMap, size); + } + +failed: + _memmgrUnmapAndFreeSurface(pStagingBuf, pStagingBufMap, pStagingBufPriv); + + return status; +} + +/*! + * @brief This function is used for setting a memory region to a constant state + * using a specified memory transfer technique + * + * @param[in] pDstInfo TRANSFER_SURFACE info for destination region + * @param[in] value Value to be written to the region + * @param[in] size Size in bytes of the memory to be initialized + * @param[in] transferType Memory transfer technique to be used + * @param[in] flags Flags + */ +static NV_STATUS +memmgrMemSetWithTransferType +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pDstInfo, + NvU32 value, + NvU32 size, + TRANSFER_TYPE transferType, + NvU32 flags +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + + // Sanitize the input + NV_ASSERT_OK_OR_RETURN(memmgrCheckSurfaceBounds(pDstInfo, size)); + + switch (transferType) + { + case TRANSFER_TYPE_PROCESSOR: + { + NvU32 mappingFlags = flags | TRANSFER_FLAGS_ALLOW_MAPPING_REUSE; + NvU8 *pDst = memmgrMemBeginTransfer(pMemoryManager, pDstInfo, size, mappingFlags); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pDst != NULL, NV_ERR_INSUFFICIENT_RESOURCES); + portMemSet(pDst, value, size); + memmgrMemEndTransfer(pMemoryManager, pDstInfo, size, mappingFlags); + } + break; + case TRANSFER_TYPE_GSP_DMA: + if (IS_GSP_CLIENT(pGpu)) + { + NV_ASSERT_OK_OR_RETURN( + _memmgrMemsetWithGsp(pGpu, pDstInfo, value, size)); + } + else + { + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_ARGUMENT); + } + break; + case TRANSFER_TYPE_CE: + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_STATE); + break; + case TRANSFER_TYPE_CE_PRI: + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_STATE); + break; + case TRANSFER_TYPE_BAR0: + NV_PRINTF(LEVEL_ERROR, "BAR0 memset unimplemented\n"); + NV_ASSERT(0); + break; + + } + + return NV_OK; +} + +/*! + * @brief This function is used for writing data placed in a caller passed buffer + * to a given memory region using the specified memory transfer technique + * + * @param[in] pDstInfo TRANSFER_SURFACE info for the destination region + * @param[in] pBuf Buffer allocated by caller + * @param[in] size Size in bytes of the buffer + * @param[in] transferType Memory transfer technique to be used + * @param[in] flags Flags + */ +static NV_STATUS +memmgrMemWriteWithTransferType +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pDstInfo, + void *pBuf, + NvU64 size, + TRANSFER_TYPE transferType, + NvU32 flags +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + + // Sanitize the input + NV_ASSERT_OK_OR_RETURN(memmgrCheckSurfaceBounds(pDstInfo, size)); + NV_ASSERT_OR_RETURN(pBuf != NULL, NV_ERR_INVALID_ARGUMENT); + + switch (transferType) + { + case TRANSFER_TYPE_PROCESSOR: + { + NvU32 mappingFlags = flags | TRANSFER_FLAGS_ALLOW_MAPPING_REUSE; + NvU8 *pDst = memmgrMemBeginTransfer(pMemoryManager, pDstInfo, size, mappingFlags); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pDst != NULL, NV_ERR_INSUFFICIENT_RESOURCES); + portMemCopy(pDst, size, pBuf, size); + memmgrMemEndTransfer(pMemoryManager, pDstInfo, size, mappingFlags); + } + break; + case TRANSFER_TYPE_GSP_DMA: + if (IS_GSP_CLIENT(pGpu)) + { + NV_PRINTF(LEVEL_INFO, "Calling GSP DMA task\n"); + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + _memmgrMemReadOrWriteWithGsp(pGpu, pDstInfo, pBuf, size, + NV_FALSE /* bRead */)); + } + else + { + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_ARGUMENT); + } + break; + case TRANSFER_TYPE_CE: + case TRANSFER_TYPE_CE_PRI: + case TRANSFER_TYPE_BAR0: + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + _memmgrMemReadOrWriteUsingStagingBuffer(pMemoryManager, pDstInfo, pBuf, size, + transferType, NV_FALSE /* bRead */)); + break; + } + + return NV_OK; +} + +/*! + * @brief This function is used for reading specified number of bytes from + * a source memory region into a caller passed buffer using a specified + * memory transfer technique + * + * @param[in] pSrcInfo TRANSFER_SURFACE info for the source region + * @param[in] pBuf Caller allocated buffer + * @param[in] size Size in bytes of the buffer + * @param[in] transferType Memory transfer technique to be used + * @param[in] flags Flags + */ +static NV_STATUS +memmgrMemReadWithTransferType +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pSrcInfo, + void *pBuf, + NvU64 size, + TRANSFER_TYPE transferType, + NvU32 flags +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + + // Sanitize the input + NV_ASSERT_OK_OR_RETURN(memmgrCheckSurfaceBounds(pSrcInfo, size)); + NV_ASSERT_OR_RETURN(pBuf != NULL, NV_ERR_INVALID_ARGUMENT); + + switch (transferType) + { + case TRANSFER_TYPE_PROCESSOR: + { + NvU32 mappingFlags = flags | TRANSFER_FLAGS_ALLOW_MAPPING_REUSE; + NvU8 *pSrc = memmgrMemBeginTransfer(pMemoryManager, pSrcInfo, size, mappingFlags); + NV_CHECK_OR_RETURN(LEVEL_INFO, pSrc != NULL, NV_ERR_INSUFFICIENT_RESOURCES); + portMemCopy(pBuf, size, pSrc, size); + memmgrMemEndTransfer(pMemoryManager, pSrcInfo, size, mappingFlags); + } + break; + case TRANSFER_TYPE_GSP_DMA: + if (IS_GSP_CLIENT(pGpu)) + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + _memmgrMemReadOrWriteWithGsp(pGpu, pSrcInfo, pBuf, size, + NV_TRUE /* bRead */)); + } + else + { + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_ARGUMENT); + } + break; + case TRANSFER_TYPE_CE: + case TRANSFER_TYPE_CE_PRI: + case TRANSFER_TYPE_BAR0: + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + _memmgrMemReadOrWriteUsingStagingBuffer(pMemoryManager, pSrcInfo, pBuf, size, + transferType, NV_TRUE /* bRead */)); + break; + } + + return NV_OK; +} + +/* ------------------------ Public functions --------------------------------------- */ + +NvU64 memUtilsLeastCommonAlignment(NvU64 align1, NvU64 align2) +{ + NvU64 a, b; // For Euclid's algorithm + NvU64 lcm; // Least Common Multiple of align1 and align2 + NvU64 maxAlignment = NV_U64_MAX; + + // WOLOG, make sure align1 >= align2. + // + if (align2 > align1) + { + NvU64 tmp = align1; + align1 = align2; + align2 = tmp; + } + + // If align2 is 0, return min(align1, maxAlignment) + // + if (align2 == 0) + { + return align1 < maxAlignment ? align1 : maxAlignment; + } + + // Use Euclid's algorithm (GCD(a, b) = GCD(b, a % b)) to find the + // GCD of the two alignments, and use the GCD to find the LCM. + // + a = align1; + b = align2; + while (b != 0) + { + NvU64 old_a = a; + a = b; + b = old_a % b; + NV_ASSERT(a > b); // Ensure termination. Should never fail. + } + lcm = align1 * (align2 / a); // May overflow + + // Return min(lcm, maxAlignment). Also return maxAlignment if the + // lcm calculation overflowed, since that means it must have been + // much bigger than maxAlignment. + // + if (lcm > maxAlignment || lcm < align1 || + 0 != (lcm % align1) || 0 != (lcm % align2)) + { + NV_CHECK_FAILED(LEVEL_ERROR, "Alignment limit exceeded"); + return maxAlignment; + } + return lcm; +} + +void memUtilsInitFBAllocInfo +( + NV_MEMORY_ALLOCATION_PARAMS *pAllocParams, + FB_ALLOC_INFO *pFbAllocInfo, + NvHandle hClient, + NvHandle hDevice +) +{ + pFbAllocInfo->pageFormat->type = pAllocParams->type; + pFbAllocInfo->owner = pAllocParams->owner; + pFbAllocInfo->hwResId = 0; + pFbAllocInfo->pad = 0; + pFbAllocInfo->alignPad = 0; + pFbAllocInfo->height = pAllocParams->height; + pFbAllocInfo->width = pAllocParams->width; + pFbAllocInfo->pitch = pAllocParams->pitch; + pFbAllocInfo->size = pAllocParams->size; + pFbAllocInfo->origSize = pAllocParams->size; + pFbAllocInfo->adjustedSize = pAllocParams->size; + pFbAllocInfo->offset = ~0; + pFbAllocInfo->pageFormat->flags = pAllocParams->flags; + pFbAllocInfo->pageFormat->attr = pAllocParams->attr; + pFbAllocInfo->retAttr = pAllocParams->attr; + pFbAllocInfo->pageFormat->attr2 = pAllocParams->attr2; + pFbAllocInfo->retAttr2 = pAllocParams->attr2; + pFbAllocInfo->format = pAllocParams->format; + pFbAllocInfo->comprCovg = pAllocParams->comprCovg; + pFbAllocInfo->zcullCovg = 0; + pFbAllocInfo->ctagOffset = pAllocParams->ctagOffset; + pFbAllocInfo->bIsKernelAlloc = NV_FALSE; + pFbAllocInfo->internalflags = 0; + pFbAllocInfo->hClient = hClient; + pFbAllocInfo->hDevice = hDevice; + + if ((pAllocParams->flags & NVOS32_ALLOC_FLAGS_ALIGNMENT_HINT) || + (pAllocParams->flags & NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE)) + pFbAllocInfo->align = pAllocParams->alignment; + else + pFbAllocInfo->align = RM_PAGE_SIZE; + + if (pAllocParams->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) + { + pFbAllocInfo->offset = pAllocParams->offset; + pFbAllocInfo->desiredOffset = pAllocParams->offset; + } +} + + +MEMORY_DESCRIPTOR * +memmgrMemUtilsGetMemDescFromHandle_IMPL +( + MemoryManager *pMemoryManager, + NvHandle hClient, + NvHandle hMemory +) +{ + RsResourceRef *pMemoryRef; + Memory *pMemory; + + if (serverutilGetResourceRef(hClient, hMemory, &pMemoryRef) != NV_OK) + { + return NULL; + } + + pMemory = dynamicCast(pMemoryRef->pResource, Memory); + if (pMemory == NULL) + { + return NULL; + } + return pMemory->pMemDesc; +} + +/*! + * @brief This function is used for write a value placed in a caller passed buffer + * to a given memory region while only mapping regions as large as the given + * block size. + * + * @param[in] pMemDesc Memory descriptor of buffer to write + * @param[in] value Value to be written + * @param[in] baseOffset Offset of entire buffer to write + * @param[in] size Size in bytes of the buffer + * @param[in] flags Flags + * @param[in] blockSize Maximum size of a mapping to use + */ +NV_STATUS +memmgrMemsetInBlocks_IMPL +( + MemoryManager *pMemoryManager, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 value, + NvU64 baseOffset, + NvU64 size, + NvU32 flags, + NvU32 blockSize +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + NvU64 remaining = size; + NvU64 offset = 0; + + if (blockSize == 0) + { + blockSize = MEMORY_COPY_BLOCK_SIZE; + } + + while ((remaining > 0) && (status == NV_OK)) + { + MEMORY_DESCRIPTOR *pSubMemDesc = NULL; + NvU32 mapSize = NV_MIN(blockSize, remaining); + TRANSFER_SURFACE surf = {0}; + + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, memdescCreateSubMem(&pSubMemDesc, pMemDesc, pGpu, offset + baseOffset, mapSize)); + + surf.pMemDesc = pSubMemDesc; + surf.offset = 0; + + NV_ASSERT_OK_OR_RETURN( + memmgrMemSet(pMemoryManager, &surf, value, + pSubMemDesc->Size, + flags)); + + memdescFree(pSubMemDesc); + memdescDestroy(pSubMemDesc); + + offset += mapSize; + remaining -= mapSize; + } + + return status; +} + +/*! + * @brief This function is used for copying data b/w two memory regions + * Both memory regions can be in the same aperture of different apertures + * + * @param[in] pDstInfo TRANSFER_SURFACE info for destination region + * @param[in] pSrcInfo TRANSFER_SURFACE info for source region + * @param[in] size Size in bytes of the memory transfer + * @param[in] flags Flags + */ +NV_STATUS +memmgrMemCopy_IMPL +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pDstInfo, + TRANSFER_SURFACE *pSrcInfo, + NvU32 size, + NvU32 flags +) +{ + TRANSFER_TYPE transferType = memmgrGetMemTransferType(pMemoryManager, + pDstInfo, pSrcInfo, flags); + + return memmgrMemCopyWithTransferType(pMemoryManager, pDstInfo, pSrcInfo, + size, transferType, flags); +} + +/*! + * @brief This function is used for setting a memory region to a constant state + * + * @param[in] pDstInfo TRANSFER_SURFACE info for the destination region + * @param[in] value Value to be written to the region + * @param[in] size Size in bytes of the memory to be initialized + * @param[in] flags Flags + */ +NV_STATUS +memmgrMemSet_IMPL +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pDstInfo, + NvU32 value, + NvU32 size, + NvU32 flags +) +{ + TRANSFER_TYPE transferType = memmgrGetMemTransferType(pMemoryManager, + pDstInfo, NULL, flags); + + return memmgrMemSetWithTransferType(pMemoryManager, pDstInfo, value, + size, transferType, flags); +} + +/*! + * @brief This function is used for setting a memory region to a constant state + * + * @param[in] pMemDesc Memory descriptor to end transfer to + * @param[in] value Value to be written to the region + * @param[in] flags Flags + */ +NV_STATUS +memmgrMemDescMemSet_IMPL +( + MemoryManager *pMemoryManager, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 value, + NvU32 flags +) +{ + TRANSFER_SURFACE transferSurface = {.offset = 0, .pMemDesc = pMemDesc}; + TRANSFER_TYPE transferType = memmgrGetMemTransferType(pMemoryManager, + &transferSurface, NULL, flags); + + return memmgrMemSetWithTransferType(pMemoryManager, &transferSurface, value, + (NvU32)memdescGetSize(pMemDesc), + transferType, flags); +} + +static NV_STATUS +memmgrMemReadOrWriteInBlocks +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pSurf, + void *pBuf, + NvU64 size, + TRANSFER_TYPE transferType, + NvU32 flags, + NvBool bRead +) +{ + NvBool bCreateSubMemDesc = (transferType == TRANSFER_TYPE_PROCESSOR) && + !(flags & TRANSFER_FLAGS_USE_BAR1); + NvU64 remainingSize = size; + NvU64 baseOffset = pSurf->offset; + MEMORY_DESCRIPTOR *pMemDesc = pSurf->pMemDesc; + NvU64 offset = 0; + NV_STATUS status = NV_OK; + + while ((remainingSize > 0) && (status == NV_OK)) + { + MEMORY_DESCRIPTOR *pSubMemDesc = NULL; + NvU64 copySize = NV_MIN(MEMORY_COPY_BLOCK_SIZE, remainingSize); + TRANSFER_SURFACE tmpSurf = { 0 }; + + if (bCreateSubMemDesc) + { + // + // We need the submemdesc code to not align up because we may need to downgrade + // the page size on the submemdesc + // + NvBool prevAlignIgnore = memdescGetFlag(pMemDesc, MEMDESC_FLAGS_PAGE_SIZE_ALIGN_IGNORE); + + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_PAGE_SIZE_ALIGN_IGNORE, NV_TRUE); + + NV_ASSERT_OK_OR_RETURN( + memdescCreateSubMem(&pSubMemDesc, pMemDesc, pMemDesc->pGpu, offset + baseOffset, copySize)); + + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_PAGE_SIZE_ALIGN_IGNORE, prevAlignIgnore); + + if (memdescGetPageSize(pSubMemDesc, AT_GPU) > RM_PAGE_SIZE_HUGE) + { + // + // Downgrade page size if more than 2MB because BAR2 + // cannot handle massive page sizes in the copy + // + memdescSetPageSize(pSubMemDesc, AT_GPU, RM_PAGE_SIZE_HUGE); + } + + tmpSurf.pMemDesc = pSubMemDesc; + tmpSurf.offset = 0; + } + else + { + tmpSurf.pMemDesc = pMemDesc; + tmpSurf.offset = offset + baseOffset; + } + + if (bRead) + { + status = memmgrMemReadWithTransferType(pMemoryManager, &tmpSurf, (NvU8 *)pBuf + offset, + copySize, transferType, flags); + } + else + { + status = memmgrMemWriteWithTransferType(pMemoryManager, &tmpSurf, (NvU8 *)pBuf + offset, + copySize, transferType, flags); + } + NV_ASSERT_OK(status); + + if (bCreateSubMemDesc) + { + memdescFree(pSubMemDesc); + memdescDestroy(pSubMemDesc); + } + + offset += copySize; + remainingSize -= copySize; + } + + return status; +} + +/*! + * @brief This function is used for writing data placed in a user buffer + * to a given memory region + * + * @param[in] pDstInfo TRANSFER_SURFACE info for the destination region + * @param[in] pBuf Buffer allocated by caller + * @param[in] size Size in bytes of the buffer + * @param[in] flags Flags + */ +NV_STATUS +memmgrMemWrite_IMPL +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pDstInfo, + void *pBuf, + NvU64 size, + NvU32 flags +) +{ + TRANSFER_TYPE transferType = memmgrGetMemTransferType(pMemoryManager, + pDstInfo, NULL, flags); + + NV_STATUS status = memmgrMemWriteWithTransferType(pMemoryManager, pDstInfo, pBuf, + size, transferType, flags); + if (status == NV_OK) + return NV_OK; + + return memmgrMemReadOrWriteInBlocks(pMemoryManager, pDstInfo, pBuf, + size, transferType, flags, NV_FALSE); +} + +/*! + * @brief This function is used for reading specified number of bytes from + * a source memory region into a caller passed buffer + * + * @param[in] pSrcInfo TRANSFER_SURFACE info for the source region + * @param[in] pBuf Caller allocated buffer + * @param[in] size Size in bytes of the buffer + * @param[in] flags Flags + */ +NV_STATUS +memmgrMemRead_IMPL +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pSrcInfo, + void *pBuf, + NvU64 size, + NvU32 flags +) +{ + TRANSFER_TYPE transferType = memmgrGetMemTransferType(pMemoryManager, + NULL, pSrcInfo, flags); + + NV_STATUS status = memmgrMemReadWithTransferType(pMemoryManager, pSrcInfo, pBuf, + size, transferType, flags); + if (status == NV_OK) + return NV_OK; + + return memmgrMemReadOrWriteInBlocks(pMemoryManager, pSrcInfo, pBuf, + size, transferType, flags, NV_TRUE); +} + +/*! + * @brief This helper function can be used to begin transfers + * + * @param[in] pTransferInfo Transfer information + * @param[in] shadowBufSize Size of allocated shadow buffer in case of shadow mapping + * @param[in] flags Flags + */ +NvU8 * +memmgrMemBeginTransfer_IMPL +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pTransferInfo, + NvU64 shadowBufSize, + NvU32 flags +) +{ + TRANSFER_TYPE transferType = memmgrGetMemTransferType(pMemoryManager, + pTransferInfo, NULL, flags); + MEMORY_DESCRIPTOR *pMemDesc = pTransferInfo->pMemDesc; + NvU64 offset = pTransferInfo->offset; + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + void *pPtr = NULL; + void *pPriv = NULL; + NvU64 memSz = shadowBufSize; + NvU8 *pMapping = memdescGetKernelMapping(pTransferInfo->pMemDesc); + + NV_ASSERT_OR_RETURN(memmgrCheckSurfaceBounds(pTransferInfo, memSz) == NV_OK, NULL); + NV_ASSERT_OR_RETURN(pTransferInfo->pMapping == NULL, NULL); + NV_ASSERT_OR_RETURN(pTransferInfo->pMappingPriv == NULL, NULL); + + if ((flags & TRANSFER_FLAGS_ALLOW_MAPPING_REUSE) && pMapping != NULL) + { + // keep TRANSFER_SURFACE's pMapping NULL, as mapping has a different owner + return pMapping + pTransferInfo->offset; + } + + switch (transferType) + { + case TRANSFER_TYPE_PROCESSOR: + if (flags & TRANSFER_FLAGS_USE_BAR1) + { + NvU32 protect = NV_PROTECT_READ_WRITE; + + if (flags & TRANSFER_FLAGS_MAP_PROTECT_READABLE) + { + protect = NV_PROTECT_READABLE; + } + else if (flags & TRANSFER_FLAGS_MAP_PROTECT_WRITEABLE) + { + protect = NV_PROTECT_WRITEABLE; + } + + NV_CHECK_OR_RETURN(LEVEL_ERROR, + memdescMapOld(pMemDesc, offset, memSz, NV_TRUE, protect, &pPtr, &pPriv) == NV_OK, NULL); + } + else + { + NV_CHECK_OR_RETURN(LEVEL_INFO, (pPtr = memdescMapInternal(pGpu, pMemDesc, flags)) != NULL, NULL); + + pPtr = (NvU8 *)pPtr + offset; + } + break; + case TRANSFER_TYPE_GSP_DMA: + case TRANSFER_TYPE_CE: + case TRANSFER_TYPE_CE_PRI: + case TRANSFER_TYPE_BAR0: + if (flags & TRANSFER_FLAGS_SHADOW_ALLOC) + { + NV_ASSERT_OR_RETURN((pPtr = portMemAllocNonPaged(memSz)), NULL); + if (flags & TRANSFER_FLAGS_SHADOW_INIT_MEM) + { + NV_ASSERT_OK(memmgrMemRead(pMemoryManager, pTransferInfo, pPtr, memSz, flags)); + } + } + break; + default: + NV_ASSERT(0); + } + + pTransferInfo->pMapping = pPtr; + pTransferInfo->pMappingPriv = pPriv; + return pPtr; +} + +/*! + * @brief This helper function can be used to end transfers + * + * @param[in] pTransferInfo Transfer information + * @param[in] shadowBufSize Size of allocated shadow buffer in case of shadow mapping + * @param[in] flags Flags + */ +void +memmgrMemEndTransfer_IMPL +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pTransferInfo, + NvU64 shadowBufSize, + NvU32 flags +) +{ + TRANSFER_TYPE transferType = memmgrGetMemTransferType(pMemoryManager, + pTransferInfo, NULL, flags); + MEMORY_DESCRIPTOR *pMemDesc = pTransferInfo->pMemDesc; + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + NvU64 memSz = shadowBufSize; + + NV_ASSERT_OR_RETURN_VOID(memmgrCheckSurfaceBounds(pTransferInfo, memSz) == NV_OK); + + if (pTransferInfo->pMapping == NULL) + { + // Normal for full memdesc mapping reuse + return; + } + + switch (transferType) + { + case TRANSFER_TYPE_PROCESSOR: + if (flags & TRANSFER_FLAGS_USE_BAR1) + { + memdescUnmap(pMemDesc, NV_TRUE, pTransferInfo->pMapping, pTransferInfo->pMappingPriv); + } + else + { + memdescUnmapInternal(pGpu, pMemDesc, flags); + } + break; + case TRANSFER_TYPE_GSP_DMA: + case TRANSFER_TYPE_CE: + case TRANSFER_TYPE_CE_PRI: + case TRANSFER_TYPE_BAR0: + NV_ASSERT_OK(memmgrMemWrite(pMemoryManager, pTransferInfo, pTransferInfo->pMapping, memSz, flags)); + portMemFree(pTransferInfo->pMapping); + break; + default: + NV_ASSERT(0); + } + + pTransferInfo->pMapping = NULL; + pTransferInfo->pMappingPriv = NULL; +} + +/*! + * @brief Helper function that ends transfers to a memdesc with default offset/size + * + * @param[in] pMemDesc Memory descriptor to end transfer to + * @param[in] flags Flags + */ +void +memmgrMemDescEndTransfer_IMPL +( + MemoryManager *pMemoryManager, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 flags +) +{ + if (pMemDesc == NULL) + { + return; + } + + TRANSFER_SURFACE transferSurface = { 0 }; + transferSurface.offset = 0; + transferSurface.pMemDesc = pMemDesc; + transferSurface.pMapping = memdescGetKernelMapping(pMemDesc); + transferSurface.pMappingPriv = memdescGetKernelMappingPriv(pMemDesc); + + memdescSetKernelMapping(pMemDesc, NULL); + memdescSetKernelMappingPriv(pMemDesc, NULL); + + memmgrMemEndTransfer(pMemoryManager, &transferSurface, memdescGetSize(pMemDesc), flags); +} + +/*! + * @brief Helper function that begins transfers to a memdesc with default offset/size + * + * @param[in] pMemDesc Memory descriptor to begin transfer to + * @param[in] flags Flags + */ +NvU8 * +memmgrMemDescBeginTransfer_IMPL +( + MemoryManager *pMemoryManager, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 flags +) +{ + NV_ASSERT_OR_RETURN(pMemDesc != NULL, NULL); + NV_ASSERT_OR_RETURN(memdescGetKernelMapping(pMemDesc) == NULL, NULL); + NV_ASSERT_OR_RETURN(memdescGetKernelMappingPriv(pMemDesc) == NULL, NULL); + NV_ASSERT_OR_RETURN(!(flags & TRANSFER_FLAGS_ALLOW_MAPPING_REUSE), NULL); + + TRANSFER_SURFACE transferSurface = {.offset = 0, .pMemDesc = pMemDesc}; + NvU8 *pMapping = memmgrMemBeginTransfer(pMemoryManager, &transferSurface, memdescGetSize(pMemDesc), flags); + + NV_ASSERT_OR_RETURN(pMapping != NULL, NULL); + NV_ASSERT_OR_RETURN(transferSurface.pMapping == pMapping, NULL); + + // Set mapping and priv to reuse during later operations, or at unmap time + memdescSetKernelMapping(pMemDesc, pMapping); + memdescSetKernelMappingPriv(pMemDesc, transferSurface.pMappingPriv); + + return pMapping; +} + +/*! + * @brief This function is used to allocate common resources across memory + * classes, and must be used before memory-specific resource alloc. + * + * @param[in/out] pAllocRequest User-provided alloc request struct + * @param[in/out] pFbAllocInfo Initialized FB_ALLOC_INFO struct to alloc + */ +NV_STATUS +memmgrAllocResources_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + MEMORY_ALLOCATION_REQUEST *pAllocRequest, + FB_ALLOC_INFO *pFbAllocInfo +) +{ + NV_STATUS status = NV_OK; + NvU64 alignment = 0; + NV_MEMORY_ALLOCATION_PARAMS *pVidHeapAlloc = pAllocRequest->pUserParams; + NV_ADDRESS_SPACE addrSpace = memmgrAllocGetAddrSpace(pMemoryManager, pVidHeapAlloc->flags, + pFbAllocInfo->retAttr); + + // IRQL TEST: must be running at equivalent of passive-level + NV_ASSERT_OR_RETURN(!osIsRaisedIRQL(), NV_ERR_INVALID_IRQ_LEVEL); + + // + // Check for valid size. + // + if (pVidHeapAlloc->size == 0) + return NV_ERR_INVALID_ARGUMENT; + + // + // Ensure a valid allocation pVidHeapAlloc->type was passed in + // + if (pVidHeapAlloc->type > NVOS32_NUM_MEM_TYPES - 1) + return NV_ERR_INVALID_ARGUMENT; + + if (ADDR_VIRTUAL != addrSpace) + { + // If vidmem not requested explicitly, decide on the physical location. + if (FLD_TEST_DRF(OS32, _ATTR, _LOCATION, _PCI, pFbAllocInfo->retAttr) || + FLD_TEST_DRF(OS32, _ATTR, _LOCATION, _ANY, pFbAllocInfo->retAttr)) + { + if (ADDR_FBMEM == addrSpace) + { + pFbAllocInfo->retAttr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, pFbAllocInfo->retAttr); + } + else + { + pFbAllocInfo->retAttr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _PCI, pFbAllocInfo->retAttr); + } + } + } + else // Virtual + { + // Clear location to ANY since virtual does not associate with location. + pFbAllocInfo->retAttr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _ANY, pFbAllocInfo->retAttr); + } + + // + // for fixed allocation check if the alignment needs to adjusted. + // some hardware units request allocation aligned to smaller than + // page sizes which can be handled through alignPad + // + if (pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) + { + // + // is our desired offset suitably aligned? + // if not adjust alignment using alignPad(offset into a page), the + // allocation is page size aligned as required for swizzling. + // + if (pFbAllocInfo->desiredOffset % (pFbAllocInfo->align + 1)) + { + pFbAllocInfo->alignPad = pFbAllocInfo->desiredOffset % (pFbAllocInfo->align + 1); + pFbAllocInfo->desiredOffset -= pFbAllocInfo->alignPad; + } + } + + // + // Refresh search parameters. + // + pFbAllocInfo->adjustedSize = pFbAllocInfo->size - pFbAllocInfo->alignPad; + pVidHeapAlloc->height = pFbAllocInfo->height; + pVidHeapAlloc->pitch = pFbAllocInfo->pitch; + + // + // The api takes alignment-1 (used to be a mask). + // + alignment = pFbAllocInfo->align + 1; + pVidHeapAlloc->alignment = pFbAllocInfo->align + 1; // convert mask to size + + // + // Allow caller to request host page alignment to make it easier + // to move things around with host os VM subsystem + // + if ((pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_FORCE_ALIGN_HOST_PAGE) && + (addrSpace == ADDR_FBMEM)) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU64 hostPageSize = pSys->cpuInfo.hostPageSize; + + // hostPageSize *should* always be set, but.... + if (hostPageSize == 0) + hostPageSize = RM_PAGE_SIZE; + + alignment = memUtilsLeastCommonAlignment(alignment, hostPageSize); + } + + pVidHeapAlloc->alignment = alignment; + pFbAllocInfo->align = alignment - 1; + + return status; + + return status; +} + +/*! + * @brief This function is used to create a memory descriptor if needed. + * + * @param[in/out] pAllocRequest User-provided alloc request struct + * @param[in/out] pFbAllocInfo Initialized FB_ALLOC_INFO struct to alloc + * @param[out] ppMemDesc Double pointer to created descriptor + * @param[in] pHeap Heap pointer to store in descriptor + * @param[in] addrSpace Address space identifier + * @param[in] memDescFlags Memory descriptor alloc flags + * @param[out] bAllocedMemDesc NV_TRUE if a descriptor was created + */ +NV_STATUS +memUtilsAllocMemDesc +( + OBJGPU *pGpu, + MEMORY_ALLOCATION_REQUEST *pAllocRequest, + FB_ALLOC_INFO *pFbAllocInfo, + MEMORY_DESCRIPTOR **ppMemDesc, + Heap *pHeap, + NV_ADDRESS_SPACE addrSpace, + NvBool bContig, + NvBool *bAllocedMemDesc +) +{ + NV_STATUS status = NV_OK; + + // + // Allocate a memory descriptor if needed. We do this after the fbHwAllocResources() call + // so we have the updated size information. Linear callers like memdescAlloc() can live with + // only having access to the requested size in bytes, but block linear callers really do + // need to allocate after fbAlloc() rounding takes place. + // + if (pAllocRequest->pMemDesc == NULL) + { + NvU64 memDescFlags = MEMDESC_FLAGS_SKIP_RESOURCE_COMPUTE; + + if (FLD_TEST_DRF(OS32, _ATTR2, _USE_EGM, _TRUE, pFbAllocInfo->retAttr2)) + { + memDescFlags |= MEMDESC_FLAGS_ALLOC_FROM_EGM; + } + + // + // Allocate a contig vidmem descriptor now; if needed we'll + // allocate a new noncontig memdesc later + // + status = memdescCreate(&pAllocRequest->pMemDesc, pGpu, pFbAllocInfo->adjustedSize, 0, bContig, + addrSpace, NV_MEMORY_UNCACHED, memDescFlags); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "cannot alloc memDesc!\n"); + return status; + } + + *bAllocedMemDesc = NV_TRUE; + } + + *ppMemDesc = pAllocRequest->pMemDesc; + (*ppMemDesc)->pHeap = pHeap; + + // Set attributes tracked by the memdesc + memdescSetPteKind(*ppMemDesc, pFbAllocInfo->format); + memdescSetHwResId(*ppMemDesc, pFbAllocInfo->hwResId); + + return status; +} + +NV_STATUS +memmgrMemUtilsChannelSchedulingSetup_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + OBJCHANNEL *pChannel +) +{ + return NV_ERR_NOT_SUPPORTED; +} diff --git a/src/nvidia/src/kernel/gpu/subdevice/generic_engine.c b/src/nvidia/src/kernel/gpu/subdevice/generic_engine.c new file mode 100644 index 0000000..65e97ce --- /dev/null +++ b/src/nvidia/src/kernel/gpu/subdevice/generic_engine.c @@ -0,0 +1,160 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "os/os.h" +#include "gpu/gpu.h" +#include "gpu/subdevice/generic_engine.h" +#include "rmapi/client.h" + + +NV_STATUS +genapiConstruct_IMPL +( + GenericEngineApi *pGenericEngineApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RS_ITERATOR it; + OBJGPU *pGpu = GPU_RES_GET_GPU(pGenericEngineApi); + + if (!gpuIsClassSupported(pGpu, pCallContext->pResourceRef->externalClassId)) + return NV_ERR_INVALID_CLASS; + + // + // We allow multiple instances of GenericEngineApi class, however, only want + // to allow a single instance of each external class id type. E.g.: + // GF100_SUBDEVICE_GRAPHICS is allowed alongside GF100_SUBDEVICE_FB. + // + it = clientRefIter(pCallContext->pClient, + pCallContext->pResourceRef->pParentRef, + classId(GenericEngineApi), RS_ITERATE_CHILDREN, NV_TRUE); + + while (clientRefIterNext(pCallContext->pClient, &it)) + { + if (it.pResourceRef->externalClassId == pCallContext->pResourceRef->externalClassId && + it.pResourceRef != pCallContext->pResourceRef) + { + return NV_ERR_STATE_IN_USE; + } + } + + return NV_OK; +} + +void +genapiDestruct_IMPL +( + GenericEngineApi *pGenericEngineApi +) +{ +} + +NV_STATUS +genapiControl_IMPL +( + GenericEngineApi *pGenericEngineApi, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + + return gpuresControl_IMPL(staticCast(pGenericEngineApi, GpuResource), + pCallContext, pParams); +} + +NV_STATUS +genapiMap_IMPL +( + GenericEngineApi *pGenericEngineApi, + CALL_CONTEXT *pCallContext, + RS_CPU_MAP_PARAMS *pParams, + RsCpuMapping *pCpuMapping +) +{ + OBJGPU *pGpu; + NvU32 engineOffset, regSize, regBase; + NvU32 protect; + NV_STATUS rmStatus; + + pGpu = GPU_RES_GET_GPU(pGenericEngineApi); + + // XXX The default should really be more restrictive + protect = NV_PROTECT_READ_WRITE; + + switch (RES_GET_EXT_CLASS_ID(pGenericEngineApi)) + { + default: + return NV_ERR_INVALID_CLASS; + } + + // Get the offset to the engine registers + rmStatus = gpuGetRegBaseOffset_HAL(pGpu, regBase, &engineOffset); + if (rmStatus != NV_OK) + return rmStatus; + + // Round down to nearest 4k page + engineOffset &= ~(0x1000-1); + + // Check the caller is requesting more privilieges than we allow + if (pCpuMapping->pPrivate->protect & ~protect) + { + NV_PRINTF(LEVEL_ERROR, "%s%saccess not allowed on class 0x%x\n", + (pCpuMapping->pPrivate->protect & ~protect) & NV_PROTECT_READABLE ? "Read " : "", + (pCpuMapping->pPrivate->protect & ~protect) & NV_PROTECT_WRITEABLE ? "Write " : "", + RES_GET_EXT_CLASS_ID(pGenericEngineApi)); + + return NV_ERR_PROTECTION_FAULT; + } + + // Create mapping + rmStatus = rmapiMapGpuCommon(staticCast(pGenericEngineApi, RsResource), + pCallContext, + pCpuMapping, + pGpu, + engineOffset, + regSize); + pCpuMapping->processId = osGetCurrentProcess(); + + if (pParams->ppCpuVirtAddr) + *pParams->ppCpuVirtAddr = pCpuMapping->pLinearAddress; + + return rmStatus; +} + +NV_STATUS +genapiGetMapAddrSpace_IMPL +( + GenericEngineApi *pGenericEngineApi, + CALL_CONTEXT *pCallContext, + NvU32 mapFlags, + NV_ADDRESS_SPACE *pAddrSpace +) +{ + if (pAddrSpace) + *pAddrSpace = ADDR_REGMEM; + + return NV_OK; +} + diff --git a/src/nvidia/src/kernel/gpu/subdevice/subdevice.c b/src/nvidia/src/kernel/gpu/subdevice/subdevice.c new file mode 100644 index 0000000..7dfc358 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/subdevice/subdevice.c @@ -0,0 +1,370 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This is a subdevice resource implementation. +* +******************************************************************************/ + +#include "resserv/resserv.h" +#include "resserv/rs_server.h" +#include "resserv/rs_client.h" +#include "resserv/rs_resource.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu_mgr/gpu_mgr.h" + +#include "vgpu/rpc.h" +#include "core/locks.h" +#include "rmapi/rs_utils.h" +#include "core/thread_state.h" + +#include "gpu/timer/objtmr.h" + +NV_STATUS +subdeviceConstruct_IMPL +( + Subdevice *pSubdevice, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV2080_ALLOC_PARAMETERS *pNv2080AllocParams = pParams->pAllocParams; + OBJGPU *pPrimaryGpu; + OBJGPU *pGpu; + NvU32 subDeviceInst; + NV_STATUS status = NV_OK; + RsClient *pRsClient = pCallContext->pClient; + GpuResource *pSubdevGpuRes = staticCast(pSubdevice, GpuResource); + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + RsResourceRef *pParentRef = pResourceRef->pParentRef; + Device *pDevice = GPU_RES_GET_DEVICE(pSubdevice); + NvU32 i; + Subdevice *pSubdeviceTest; + + if (pNv2080AllocParams == NULL) + subDeviceInst = 0; + else + subDeviceInst = pNv2080AllocParams->subDeviceId; + + // validate subdevice instance + if (gpumgrIsSubDeviceInstanceValid(subDeviceInst) == NV_FALSE) + return NV_ERR_INVALID_CLASS; + + status = gpuGetByRef(pResourceRef->pParentRef, NULL, &pPrimaryGpu); + if (status != NV_OK) + return status; + + // Lookup GPU for subdevice instance + status = gpugrpGetGpuFromSubDeviceInstance(GPU_RES_GET_GPUGRP(pDevice), subDeviceInst, &pGpu); + if (status != NV_OK) + return NV_ERR_INVALID_CLASS; + + // Check if subdevice already allocated + if (subdeviceGetByInstance(pRsClient, RES_GET_HANDLE(pDevice), subDeviceInst, &pSubdeviceTest) == NV_OK) + return NV_ERR_INSUFFICIENT_RESOURCES; + + gpuresSetGpu(pSubdevGpuRes, pGpu, NV_FALSE); + + pSubdevice->pDevice = pDevice; + pSubdevice->deviceInst = pDevice->deviceInst; + pSubdevice->subDeviceInst = subDeviceInst; + pSubdevice->bUpdateTGP = NV_FALSE; + + for (i = 0; i < NV2080_NOTIFIERS_MAXCOUNT; i++) + pSubdevice->notifyActions[i] = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + + pSubdevice->hNotifierMemory = NV01_NULL_OBJECT; + pSubdevice->hSemMemory = NV01_NULL_OBJECT; + + { + } + + NV_ASSERT_OK_OR_RETURN(gpuRegisterSubdevice(pGpu, pSubdevice)); + + if (IS_VIRTUAL(pGpu) || IS_FW_CLIENT(pGpu)) + { + NV_RM_RPC_ALLOC_SUBDEVICE(pPrimaryGpu, pRsClient->hClient, pParentRef->hResource, + pResourceRef->hResource, NV20_SUBDEVICE_0, + subDeviceInst, status); + NV_ASSERT_OK_OR_RETURN(status); + } + + return status; +} + +// +// subdeviceUnsetDynamicBoostLimit_IMPL +// +// Unset Dynamic Boost limit when nvidia-powerd is terminated +// +NV_STATUS +subdeviceUnsetDynamicBoostLimit_IMPL +( + Subdevice *pSubdevice +) +{ + if (!pSubdevice->bUpdateTGP) + return NV_OK; + + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + return pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_PMGR_UNSET_DYNAMIC_BOOST_LIMIT, + NULL, + 0); +} + +void +subdevicePreDestruct_IMPL +( + Subdevice *pSubdevice +) +{ + subdeviceUnsetDynamicBoostLimit(pSubdevice); +} + +void +subdeviceDestruct_IMPL +( + Subdevice* pSubdevice +) +{ + CALL_CONTEXT *pCallContext; + RsClient *pRsClient = RES_GET_CLIENT(pSubdevice); + RsResourceRef *pResourceRef = RES_GET_REF(pSubdevice); + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NV_STATUS status = NV_OK; + + if (pSubdevice->bGcoffDisallowed) + { + osClientGcoffDisallowRefcount(pGpu->pOsGpuInfo, NV_FALSE); + } + + LOCK_METER_DATA(FREE_SUBDEVICE, 0, 0, 0); + + // TODO - Call context lookup in dtor can likely be phased out now that we have RES_GET_CLIENT + resGetFreeParams(staticCast(pSubdevice, RsResource), &pCallContext, NULL); + + // check for any pending client's timer notification for this subdevice + if ((pSubdevice->notifyActions[NV2080_NOTIFIERS_TIMER] != NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) || + (pSubdevice->pTimerEvent != NULL)) + { + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + tmrEventDestroy(pTmr, pSubdevice->pTimerEvent); + pSubdevice->pTimerEvent = NULL; + pSubdevice->notifyActions[NV2080_NOTIFIERS_TIMER] = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + } + + subdeviceRestoreLockedClock(pSubdevice, pCallContext); + + // Decrement the reference count for VF if previously incremented. + subdeviceRestoreVF(pSubdevice, pCallContext); + + // Restore GR tick frequency to default. + subdeviceRestoreGrTickFreq(pSubdevice, pCallContext); + + // Remove NVLink error injection mode request + subdeviceReleaseNvlinkErrorInjectionMode(pSubdevice, pCallContext); + + subdeviceReleaseComputeModeReservation(pSubdevice, pCallContext); + +#ifdef DEBUG + NV_ASSERT(pSubdevice->notifyActions[NV2080_NOTIFIERS_TIMER] == NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE); +#endif + + subdeviceUnsetGpuDebugMode(pSubdevice); + subdeviceRestoreWatchdog(pSubdevice); + + if (pResourceRef != NULL && (IS_VIRTUAL(pGpu) || IS_FW_CLIENT(pGpu))) + { + NV_RM_RPC_FREE(pGpu, pRsClient->hClient, + pResourceRef->pParentRef->hResource, + pResourceRef->hResource, status); + } + + gpuUnregisterSubdevice(pGpu, pSubdevice); +} + +NV_STATUS +subdeviceInternalControlForward_IMPL +( + Subdevice *pSubdevice, + NvU32 command, + void *pParams, + NvU32 size +) +{ + return gpuresInternalControlForward_IMPL(staticCast(pSubdevice, GpuResource), command, pParams, size); +} + +NV_STATUS +subdeviceGetByHandle_IMPL +( + RsClient *pClient, + NvHandle hSubdevice, + Subdevice **ppSubdevice +) +{ + RsResourceRef *pResourceRef; + NV_STATUS status; + + *ppSubdevice = NULL; + + status = clientGetResourceRef(pClient, hSubdevice, &pResourceRef); + if (status != NV_OK) + return status; + + *ppSubdevice = dynamicCast(pResourceRef->pResource, Subdevice); + + return (*ppSubdevice) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +NV_STATUS +subdeviceGetByGpu_IMPL +( + RsClient *pClient, + OBJGPU *pGpu, + Subdevice **ppSubdevice +) +{ + return subdeviceGetByDeviceAndGpu(pClient, NULL, pGpu, ppSubdevice); +} + +NV_STATUS +subdeviceGetByDeviceAndGpu_IMPL +( + RsClient *pClient, + Device *pDevice, + OBJGPU *pGpu, + Subdevice **ppSubdevice +) +{ + Subdevice *pSubdevice = NULL; + RS_ITERATOR it; + RS_ITER_TYPE iterType = RS_ITERATE_DESCENDANTS; + RsResourceRef *pDeviceRef = NULL; + + *ppSubdevice = NULL; + + if (pDevice != NULL) + { + pDeviceRef = RES_GET_REF(pDevice); + iterType = RS_ITERATE_CHILDREN; + } + + it = clientRefIter(pClient, pDeviceRef, classId(Subdevice), + iterType, NV_TRUE); + + while (clientRefIterNext(pClient, &it)) + { + pSubdevice = dynamicCast(it.pResourceRef->pResource, Subdevice); + + if (GPU_RES_GET_GPU(pSubdevice) == pGpu) + { + *ppSubdevice = pSubdevice; + return NV_OK; + } + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +NV_STATUS +subdeviceGetByInstance_IMPL +( + RsClient *pClient, + NvHandle hDevice, + NvU32 subDeviceInst, + Subdevice **ppSubdevice +) +{ + RsResourceRef *pDeviceRef; + Subdevice *pSubdevice; + RS_ITERATOR it; + + *ppSubdevice = NULL; + + if (clientGetResourceRefByType(pClient, hDevice, classId(Device), &pDeviceRef) != NV_OK) + return NV_ERR_INVALID_ARGUMENT; + + it = clientRefIter(pClient, pDeviceRef, classId(Subdevice), RS_ITERATE_CHILDREN, NV_TRUE); + while (clientRefIterNext(pClient, &it)) + { + pSubdevice = dynamicCast(it.pResourceRef->pResource, Subdevice); + + if (pSubdevice && pSubdevice->subDeviceInst == subDeviceInst) + { + *ppSubdevice = pSubdevice; + return NV_OK; + } + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +// **************************************************************************** +// Helper functions +// **************************************************************************** +void +subdeviceUnsetGpuDebugMode_IMPL +( + Subdevice *pSubdevice +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + if (!pSubdevice->bGpuDebugModeEnabled) + { + return; + } + + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + + pGpu->bIsDebugModeEnabled = NV_FALSE; +} + +void +subdeviceReleaseComputeModeReservation_IMPL +( + Subdevice *pSubdevice, + CALL_CONTEXT *pCallContext +) +{ + RsClient *pRsClient = pCallContext->pClient; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + + // Release the reservation ONLY IF we had the reservation to begin with. Otherwise, + // leave it alone, because someone else has acquired it: + if (pGpu->hComputeModeReservation == pRsClient->hClient) + { + pGpu->hComputeModeReservation = NV01_NULL_OBJECT; + } +} + diff --git a/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c b/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c new file mode 100644 index 0000000..863e2f4 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c @@ -0,0 +1,299 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief This module contains the gpu control interfaces for the + * subdevice (NV20_SUBDEVICE_0) class. Subdevice-level control calls + * are directed unicast to the associated GPU. + */ + +#include "core/core.h" +#include "core/locks.h" +#include "diagnostics/journal.h" +#include "diagnostics/tracer.h" +#include "gpu/gpu.h" +#include "gpu/subdevice/subdevice.h" +#include "rmapi/client.h" +#include "rmapi/rs_utils.h" +#include "mem_mgr/mem.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "gpu/gsp/gsp_trace_rats_macro.h" + +// +// EVENT RM SubDevice Controls +// +NV_STATUS +subdeviceCtrlCmdEventSetTrigger_IMPL +( + Subdevice *pSubdevice +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_SW, NULL, 0, 0, 0); + + return NV_OK; +} + +// +// subdeviceCtrlCmdEventSetTriggerFifo +// +// Used to signal Vulkan timeline semaphores from the CPU. +// +NV_STATUS +subdeviceCtrlCmdEventSetTriggerFifo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS *pTriggerFifoParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + engineNonStallIntrNotifyEvent(pGpu, RM_ENGINE_TYPE_HOST, + pTriggerFifoParams->hEvent); + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdEventSetNotification_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NV_STATUS status = NV_OK; + + // NV01_EVENT must have been plugged into this subdevice + if (inotifyGetNotificationList(staticCast(pSubdevice, INotifier)) == NULL) + { + NV_PRINTF(LEVEL_INFO, "cmd 0x%x: no event list\n", NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION); + return NV_ERR_INVALID_STATE; + } + + if (pSetEventParams->event >= NV2080_NOTIFIERS_MAXCOUNT) + { + NV_PRINTF(LEVEL_INFO, "bad event 0x%x\n", pSetEventParams->event); + return NV_ERR_INVALID_ARGUMENT; + } + + if (pSetEventParams->event == NV2080_NOTIFIERS_TIMER) + { + NV_PRINTF(LEVEL_INFO, "wrong control call for timer event\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + if (IS_FW_CLIENT(pGpu)) + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + NV_CHECK_OK_OR_RETURN(LEVEL_WARNING, + pRmApi->Control(pRmApi, RES_GET_CLIENT_HANDLE(pSubdevice), + RES_GET_HANDLE(pSubdevice), + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, + pSetEventParams, + sizeof *pSetEventParams)); + } + + switch (pSetEventParams->action) + { + case NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE: + case NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT: + { + // must be in disabled state to transition to an active state + if (pSubdevice->notifyActions[pSetEventParams->event] != NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + status = NV_ERR_INVALID_STATE; + break; + } + + pSubdevice->notifyActions[pSetEventParams->event] = pSetEventParams->action; + break; + } + + case NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE: + { + pSubdevice->notifyActions[pSetEventParams->event] = pSetEventParams->action; + break; + } + default: + { + status = NV_ERR_INVALID_ARGUMENT; + break; + } + } + + return status; +} + +NV_STATUS +subdeviceCtrlCmdEventSetMemoryNotifies_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS *pSetMemoryNotifiesParams +) +{ + Memory *pMemory; + RsClient *pClient = RES_GET_CLIENT(pSubdevice); + NvU32 i; + + // ensure there's no pending notifications if there is an existing notification buffer + if (pSubdevice->hNotifierMemory != NV01_NULL_OBJECT) + { + for (i = 0; i < NV2080_NOTIFIERS_MAXCOUNT; i++) + { + if (pSubdevice->notifyActions[i] != NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + return NV_ERR_STATE_IN_USE; + } + } + } + + if (pSetMemoryNotifiesParams->hMemory == NV01_NULL_OBJECT) + { + pSubdevice->hNotifierMemory = pSetMemoryNotifiesParams->hMemory; + pSubdevice->pNotifierMemory = NULL; + return NV_OK; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + memGetByHandle(pClient, pSetMemoryNotifiesParams->hMemory, &pMemory)); + + if (pMemory->pMemDesc->Size < sizeof(NvNotification) * NV2080_NOTIFIERS_MAXCOUNT) + { + return NV_ERR_INVALID_LIMIT; + } + + pSubdevice->hNotifierMemory = pSetMemoryNotifiesParams->hMemory; + pSubdevice->pNotifierMemory = pMemory; + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdEventSetSemaphoreMemory_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS *pSetSemMemoryParams +) +{ + RsClient *pClient = RES_GET_CLIENT(pSubdevice); + Memory *pMemory; + NvU32 i; + + if (pSubdevice->hSemMemory != NV01_NULL_OBJECT) + { + for (i = 0; i < NV2080_NOTIFIERS_MAXCOUNT; i++) + { + if (pSubdevice->notifyActions[i] != NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + return NV_ERR_STATE_IN_USE; + } + } + } + + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + memGetByHandle(pClient, pSetSemMemoryParams->hSemMemory, &pMemory)); + + if (pSetSemMemoryParams->semOffset >= pMemory->pMemDesc->Size) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pSubdevice->hSemMemory = pSetSemMemoryParams->hSemMemory; + pMemory->vgpuNsIntr.nsSemOffset = pSetSemMemoryParams->semOffset; + + pMemory->vgpuNsIntr.nsSemValue = 0; + pMemory->vgpuNsIntr.guestMSIAddr = 0; + pMemory->vgpuNsIntr.guestMSIData = 0; + pMemory->vgpuNsIntr.guestDomainId = 0; + pMemory->vgpuNsIntr.isSemaMemValidationEnabled = NV_TRUE; + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdEventSetSemaMemValidation_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS *pSetSemaMemValidationParams +) +{ + Memory *pMemory; + RsClient *pClient = RES_GET_CLIENT(pSubdevice); + NvU32 *pSemValue; + NV_STATUS rmStatus = NV_ERR_INVALID_OBJECT_HANDLE; + + rmStatus = memGetByHandle(pClient, pSetSemaMemValidationParams->hSemMemory, &pMemory); + + if (rmStatus == NV_OK) + { + pSemValue = (NvU32 *)NvP64_VALUE(memdescGetKernelMapping(pMemory->pMemDesc)); + + if (pSemValue == NULL) + { + return NV_ERR_INVALID_ADDRESS; + } + + portMemSet(pSemValue, 0, RM_PAGE_SIZE); + pMemory->vgpuNsIntr.nsSemValue = 0; + pMemory->vgpuNsIntr.isSemaMemValidationEnabled = pSetSemaMemValidationParams->isSemaMemValidationEnabled; + } + + return rmStatus; +} + + +NV_STATUS +subdeviceCtrlCmdEventGspTraceRatsBindEvtbuf_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_EVENT_RATS_GSP_TRACE_BIND_EVTBUF_PARAMS *pParams +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; +#if KERNEL_GSP_TRACING_RATS_ENABLED + RsClient *pClient = RES_GET_CLIENT(pSubdevice); + RsResourceRef *pEventBufferRef = NULL; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + NvHandle hNotifier = RES_GET_HANDLE(pSubdevice); + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + NV_ASSERT_OK_OR_RETURN(serverutilGetResourceRefWithType(hClient, + pParams->hEventBuffer, + classId(EventBuffer), + &pEventBufferRef)); + status = gspTraceAddBindpoint(pGpu, + pClient, + pEventBufferRef, + hNotifier, + pParams->tracepointMask, + pParams->gspLoggingBufferSize, + pParams->gspLoggingBufferWatermark); +#endif + return status; +} diff --git a/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c b/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c new file mode 100644 index 0000000..9ae1dff --- /dev/null +++ b/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c @@ -0,0 +1,1617 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief This module contains the gpu control interfaces for the + * subdevice (NV20_SUBDEVICE_0) class. Subdevice-level control calls + * are directed unicast to the associated GPU. + * File contains ctrls related to general GPU + */ + +#include "core/core.h" +#include "core/locks.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_db.h" +#include "nvrm_registry.h" +#include "nvVer.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/timer/objtmr.h" +#include "vgpu/rpc.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "virtualization/hypervisor/hypervisor.h" +#include "rmapi/resource_fwd_decls.h" +#include "rmapi/client.h" + +#include "class/cl00de.h" +#include "class/cl900e.h" +#include "ctrl/ctrl2080/ctrl2080thermal.h" + + +#include "g_finn_rm_api.h" + +#define RPC_STRUCTURES +#define RPC_GENERIC_UNION +#include "g_rpc-structures.h" +#undef RPC_STRUCTURES +#undef RPC_GENERIC_UNION + +#define RPC_MESSAGE_STRUCTURES +#include "g_rpc-message-header.h" +#undef RPC_MESSAGE_STRUCTURES + +// bit to set when telling physical to fill in an info entry +#define INDEX_FORWARD_TO_PHYSICAL 0x80000000 +ct_assert(INDEX_FORWARD_TO_PHYSICAL == DRF_NUM(2080, _CTRL_GPU_INFO_INDEX, _RESERVED, 1)); + + +static NV_STATUS +getGpuInfos(Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pParams, NvBool bCanAccessHw) +{ + return NV_OK; +} + +#undef INDEX_FORWARD_TO_PHYSICAL + +NV_STATUS +subdeviceCtrlCmdGpuGetInfoV2_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pGpuInfoParams +) +{ + return getGpuInfos(pSubdevice, pGpuInfoParams, NV_TRUE); +} + +NV_STATUS +subdeviceCtrlCmdGpuGetVfCaps_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_VF_CAPS_PARAMS *pParams +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// +// subdeviceCtrlCmdGpuGetCachedInfo: As subdeviceCtrlCmdGpuGetInfoV2, except +// does not perform any HW access (NO_GPUS_ACCESS and NO_GPUS_LOCK flags) +// +NV_STATUS +subdeviceCtrlCmdGpuGetCachedInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pGpuInfoParams +) +{ + return getGpuInfos(pSubdevice, pGpuInfoParams, NV_FALSE); +} + +/*! + * @brief This command can be used for Optimus enabled system. + * + * @return : + * NV_OK + */ +NV_STATUS +subdeviceCtrlCmdGpuSetOptimusInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS *pGpuOptimusInfoParams +) +{ + NvU32 status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + if (pGpuOptimusInfoParams->isOptimusEnabled) + { + // + // Setting pMemoryManager->bPersistentStandbyBuffer for Optimus system. + // It is used for sys_mem allocation which is pinned across + // S3 transitions.Sys_mem allocations are done at first S3 cycle + // and release during driver unload, which reduces system + // VM fragmentation, which was a problem in optimus system. + // For more details refer bug 754122. + // + GPU_GET_MEMORY_MANAGER(pGpu)->bPersistentStandbyBuffer = NV_TRUE; + } + return status; +} + +// +// subdeviceCtrlCmdGpuGetSdm +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetSdm_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_SDM_PARAMS *pSdmParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + pSdmParams->subdeviceMask = gpuGetSubdeviceMask(pGpu); + + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuGetSimulationInfo +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetSimulationInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS *pGpuSimulationInfoParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + if (IS_SILICON(pGpu)) + { + pGpuSimulationInfoParams->type = NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_NONE; + } + else + { + pGpuSimulationInfoParams->type = NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_UNKNOWN; + } + + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuGetEngines +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetEngines_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ENGINES_PARAMS *pParams +) +{ + NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS getEngineParamsV2; + NvU32 *pKernelEngineList = NvP64_VALUE(pParams->engineList); + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + portMemSet(&getEngineParamsV2, 0, sizeof(getEngineParamsV2)); + + status = subdeviceCtrlCmdGpuGetEnginesV2(pSubdevice, &getEngineParamsV2); + NV_CHECK_OR_RETURN(LEVEL_INFO, NV_OK == status, status); + + // NULL clients just want an engine count + if (NULL != pKernelEngineList) + { + NV_CHECK_OR_RETURN(LEVEL_INFO, pParams->engineCount >= getEngineParamsV2.engineCount, + NV_ERR_BUFFER_TOO_SMALL); + portMemCopy(pKernelEngineList, + getEngineParamsV2.engineCount * sizeof(*getEngineParamsV2.engineList), getEngineParamsV2.engineList, + getEngineParamsV2.engineCount * sizeof(*getEngineParamsV2.engineList)); + } + + pParams->engineCount = getEngineParamsV2.engineCount; + + return status; +} + +// +// subdeviceCtrlCmdGpuGetEnginesV2 +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetEnginesV2_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS *pEngineParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + // Update the engine Database + NV_ASSERT_OK_OR_RETURN(gpuUpdateEngineTable(pGpu)); + + // Validate engine count + if (pGpu->engineDB.size > NV2080_GPU_MAX_ENGINES_LIST_SIZE) + { + NV_PRINTF(LEVEL_ERROR, "The engine database's size (0x%x) exceeds NV2080_GPU_MAX_ENGINES_LIST_SIZE (0x%x)!\n", + pGpu->engineDB.size, NV2080_GPU_MAX_ENGINES_LIST_SIZE); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_STATE; + } + + { + NvU32 i; + pEngineParams->engineCount = pGpu->engineDB.size; + for (i = 0; i < pEngineParams->engineCount; i++) + { + pEngineParams->engineList[i] = gpuGetNv2080EngineType(pGpu->engineDB.pType[i]); + } + } + + return status; +} + +// +// subdeviceCtrlCmdGpuGetEngineClasslist +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetEngineClasslist_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *pClassParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + ENGDESCRIPTOR engDesc; + NV_STATUS status = NV_OK; + RM_ENGINE_TYPE rmEngineType = gpuGetRmEngineType(pClassParams->engineType); + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + { + } + + status = gpuXlateClientEngineIdToEngDesc(pGpu, rmEngineType, &engDesc); + + NV_ASSERT(status == NV_OK); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "NV2080_CTRL_CMD_GPU_GET_ENGINE_CLASSLIST Invalid engine ID 0x%x\n", + pClassParams->engineType); + DBG_BREAKPOINT(); + return status; + } + + status = gpuGetClassList(pGpu, &pClassParams->numClasses, NvP64_VALUE(pClassParams->classList), engDesc); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "NV2080_CTRL_CMD_GPU_GET_ENGINE_CLASSLIST Class List query failed\n"); + } + + return status; +} + +// +// subdeviceCtrlCmdGpuGetEnginePartnerList +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetEnginePartnerList_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *pPartnerListParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + ENGDESCRIPTOR engDesc; + NvU32 nv2080EngineType; + RM_ENGINE_TYPE rmEngineType; + NvU32 i; + CLASSDESCRIPTOR *pClass; + NV_STATUS status = NV_OK; + + pPartnerListParams->numPartners = 0; + + rmEngineType = gpuGetRmEngineType(pPartnerListParams->engineType); + + status = gpuXlateClientEngineIdToEngDesc(pGpu, rmEngineType, &engDesc); + if (NV_OK != status) + { + NV_PRINTF(LEVEL_ERROR, "Invalid engine ID 0x%x (0x%x)\n", + pPartnerListParams->engineType, rmEngineType); + return status; + } + + // find class in class db + status = gpuGetClassByClassId(pGpu, pPartnerListParams->partnershipClassId, &pClass); + if (NV_OK != status) + { + NV_PRINTF(LEVEL_ERROR, "Invalid class ID 0x%x\n", + pPartnerListParams->partnershipClassId); + return status; + } + + // Make sure that the engine related to this class is FIFO... + if (pClass->engDesc != ENG_KERNEL_FIFO) + { + NV_PRINTF(LEVEL_ERROR, + "Class 0x%x is not considered a partnership class.\n", + pPartnerListParams->partnershipClassId); + return NV_ERR_NOT_SUPPORTED; + } + + nv2080EngineType = pPartnerListParams->engineType; + + // Translate the instance-local engine type to the global engine type in MIG mode + + // Restore the client's passed engineType + pPartnerListParams->engineType = nv2080EngineType; + + // + // For channels that the hal didn't handle, we should just return + // all of the supported engines except for the target engine. + // + + // Update the engine Database + NV_ASSERT_OK_OR_RETURN(gpuUpdateEngineTable(pGpu)); + + // Make sure it all will fit + if (pGpu->engineDB.size > NV2080_CTRL_GPU_MAX_ENGINE_PARTNERS) + { + NV_PRINTF(LEVEL_ERROR, + "partnerList space is too small, time to increase. This is fatal\n"); + DBG_BREAKPOINT(); + return status; + } + + // Copy over all of the engines except the target + for (i = 0; i < pGpu->engineDB.size; i++) + { + nv2080EngineType = gpuGetNv2080EngineType(pGpu->engineDB.pType[i]); + + // Skip the engine handed in + if (nv2080EngineType != pPartnerListParams->engineType ) + { + pPartnerListParams->partnerList[pPartnerListParams->numPartners++] = nv2080EngineType; + } + } + + return status; +} + +// +// subdeviceCtrlCmdGpuQueryMode_IMPL +// +// Lock Requirements: +// Assert that API and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuQueryMode_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_QUERY_MODE_PARAMS *pQueryMode +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + switch (gpuGetMode(pGpu)) + { + case NV_GPU_MODE_GRAPHICS_MODE: + { + pQueryMode->mode = NV2080_CTRL_GPU_QUERY_MODE_GRAPHICS_MODE; + break; + } + case NV_GPU_MODE_COMPUTE_MODE: + { + pQueryMode->mode = NV2080_CTRL_GPU_QUERY_MODE_COMPUTE_MODE; + break; + } + default: + { + pQueryMode->mode = NV2080_CTRL_GPU_QUERY_MODE_UNKNOWN_MODE; + break; + } + } + + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuHandleGpuSR +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuHandleGpuSR_IMPL +( + Subdevice *pSubdevice +) +{ + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuGetId +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetId_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ID_PARAMS *pIdParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + pIdParams->gpuId = pGpu->gpuId; + + return NV_OK; +} + +// +// nv2080CtrlCmdGpuGetPids +// +// Lock Requirements: +// Assert that API and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetPids_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_PIDS_PARAMS *pGetPidsParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvU32 internalClassId; + NV_STATUS status; + MIG_INSTANCE_REF *pRef = NULL; + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + switch (pGetPidsParams->idType) + { + case (NV2080_CTRL_GPU_GET_PIDS_ID_TYPE_CLASS): + { + if (pGetPidsParams->id == NV20_SUBDEVICE_0) + { + internalClassId = classId(Subdevice); + } + else if (pGetPidsParams->id == MPS_COMPUTE) + { + internalClassId = classId(MpsApi); + } + else + { + internalClassId = classId(ChannelDescendant); + } + break; + } + + default: + return NV_ERR_INVALID_ARGUMENT; + } + + // + // Search over all clients to see if any contain objects of type = id. + // If they do, then add their PID to the PIDArray param and also + // return the amount of valid entries in the Array through pidTblCount. + // + status = gpuGetProcWithObject(pGpu, pGetPidsParams->id, internalClassId, + pGetPidsParams->pidTbl, &pGetPidsParams->pidTblCount, + pRef); + return status; +} + +// +// subdeviceCtrlCmdGpuGetPidInfo +// +// Lock Requirements: +// Assert that API and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetPidInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_PID_INFO_PARAMS *pGetPidInfoParams +) +{ + NV2080_CTRL_GPU_PID_INFO_DATA *pPidInfoData; + NV2080_CTRL_SMC_SUBSCRIPTION_INFO *pSmcInfo; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NV2080_CTRL_GPU_PID_INFO *pPidInfo; + NvU32 internalClassId; + NvU32 i; + MIG_INSTANCE_REF *pRef = NULL; + NvBool bGlobalInfo = NV_TRUE; + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + if ((pGetPidInfoParams->pidInfoListCount <= 0) || + (pGetPidInfoParams->pidInfoListCount > + NV2080_CTRL_GPU_GET_PID_INFO_MAX_COUNT)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + for (i = 0; i < pGetPidInfoParams->pidInfoListCount; ++i) + { + pPidInfo = &pGetPidInfoParams->pidInfoList[i]; + + pSmcInfo = &pPidInfo->smcSubscription; + pSmcInfo->computeInstanceId = PARTITIONID_INVALID; + pSmcInfo->gpuInstanceId = PARTITIONID_INVALID; + + switch (pPidInfo->index) + { + case (NV2080_CTRL_GPU_PID_INFO_INDEX_VIDEO_MEMORY_USAGE): + { + internalClassId = classId(Memory); + + pPidInfoData = &pPidInfo->data; + portMemSet(pPidInfoData, 0, sizeof(NV2080_CTRL_GPU_PID_INFO_DATA)); + pPidInfo->result = gpuFindClientInfoWithPidIterator(pGpu, pPidInfo->pid, 0, + internalClassId, + pPidInfoData, + pSmcInfo, + pRef, + bGlobalInfo); + break; + } + default: + { + pPidInfo->result = NV_ERR_INVALID_ARGUMENT; + break; + } + } + } + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdGpuGetMaxSupportedPageSize_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN((rmapiLockIsOwner() && rmDeviceGpuLockIsOwner(pGpu->gpuInstance)) || rmapiInRtd3PmPath(), + NV_ERR_INVALID_LOCK_STATE); + + // Default to minimal page size (4k) + pParams->maxSupportedPageSize = RM_PAGE_SIZE; + + if (gpuIsSriovEnabled(pGpu) || IS_VIRTUAL_WITH_SRIOV(pGpu) + ) + { + NvU64 vmmuSegmentSize = gpuGetVmmuSegmentSize(pGpu); + if (vmmuSegmentSize > 0 && + vmmuSegmentSize < NV2080_CTRL_GPU_VMMU_SEGMENT_SIZE_1024MB) + { + pParams->maxSupportedPageSize = RM_PAGE_SIZE_HUGE; + } + } + + return status; +} + +/*! + * @brief Check if address range is within the provided limits + * + * @param[in] addrStart Staring address of address range + * @param[in] addrLength Size of address range + * @param[in] limitStart Staring address of limit + * @param[in] limitLength Size of limit + * + * @return + * NV_TRUE, if address range is within the provided limits + * NV_FALSE, if address range is outside the provided limits + * + */ +static NvBool isAddressWithinLimits +( + NvU64 addrStart, + NvU64 addrLength, + NvU64 limitStart, + NvU64 limitLength +) +{ + NvU64 addrEnd = 0; + NvU64 limitEnd = 0; + + // + // Calculate End address of address range and limit, + // Return NV_FALSE in case of 64-bit addition overflow + // + if (!portSafeAddU64(addrStart, addrLength - 1, &addrEnd) || + !portSafeAddU64(limitStart, limitLength - 1, &limitEnd)) + { + return NV_FALSE; + } + + return ((addrStart >= limitStart) && (addrEnd <= limitEnd)); +} + +/*! + * @brief Validate the address range for Memory Map request by comparing the + * user supplied address range with GPU BAR0/BAR1 range. + * + * Lock Requirements: + * Assert that API and GPUs lock held on entry + * + * @param[in] pSubdevice + * @param[in] pParams pointer to control parameters + * + * Possible status values returned are: + * NV_OK + * NV_ERR_PROTECTION_FAULT + * + */ +NV_STATUS subdeviceCtrlCmdValidateMemMapRequest_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvU64 start = pParams->addressStart; + NvU64 length = pParams->addressLength; + NV_STATUS rmStatus; + NvU32 bar0MapSize; + NvU64 bar0MapOffset; + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner() && rmDeviceGpuLockIsOwner(GPU_RES_GET_GPU(pSubdevice)->gpuInstance), + NV_ERR_INVALID_LOCK_STATE); + + pParams->protection = NV_PROTECT_READ_WRITE; + + if (isAddressWithinLimits(start, length, pGpu->busInfo.gpuPhysAddr, + pGpu->deviceMappings[0].gpuNvLength)) + { + start -= pGpu->busInfo.gpuPhysAddr; + + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + rmStatus = tmrGetTimerBar0MapInfo_HAL(pGpu, pTmr, + &bar0MapOffset, &bar0MapSize); + if ((rmStatus == NV_OK) && + isAddressWithinLimits(start, length, bar0MapOffset, bar0MapSize)) + { + pParams->protection = NV_PROTECT_READABLE; + return NV_OK; + } + + // + // If the kernel side does not know about the object being mapped, + // fall-through to GSP and see if it knows anything. + // + if (IS_FW_CLIENT(pGpu)) + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + return pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_GPU_VALIDATE_MEM_MAP_REQUEST, + pParams, sizeof(*pParams)); + } + + return NV_ERR_PROTECTION_FAULT; + } + + return NV_ERR_PROTECTION_FAULT; +} + +/*! + * @brief Computes the GFID (GPU Function ID) for a given SR-IOV + * Virtual Function (VF) of the physical GPU based on the + * BDF parameters provided by the caller. + * + * Lock Requirements: + * Assert that API and GPUs lock held on entry + * + * @param[in] pSubdevice + * @param[in] pParams pointer to control parameters + * + * Possible status values returned are: + * NV_OK on successful computation of a valid GFID + * NV_ERR_NOT_SUPPORTED if ctrl call is made when + * SRIOV is not enabled OR + * caller is not FM from Host RM + * NV_ERR_INVALID_STATE if computed GFID is greater than + * max GFID that is expected/allowed + */ +NV_STATUS +subdeviceCtrlCmdGpuGetGfid_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_GFID_PARAMS *pParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvU32 pciFunction, gfid; + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + if (!gpuIsSriovEnabled(pGpu)) + return NV_ERR_NOT_SUPPORTED; + + // Host RM && FM + if ((!IS_VIRTUAL(pGpu)) && + (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED))) + { + // + // In unix based systems, OS uses lspci format which is "ssss:bb:dd.f", + // so device is 5 bits and function 3 bits. + // for SR-IOV when ARI is enabled, device and function gets combined and + // we need to consider 8 bits function. + // + pciFunction = (pParams->device << 3) | pParams->func; + gfid = (pciFunction - pGpu->sriovState.firstVFOffset) + 1; + + if (gfid > pGpu->sriovState.maxGfid) + { + NV_PRINTF(LEVEL_ERROR, "Computed GFID %d greater than max supported GFID\n", gfid); + return NV_ERR_INVALID_STATE; + } + + pParams->gfid = gfid; + // Also set the mask for max gfid supported currently in the driver + pParams->gfidMask = (pGpu->sriovState.maxGfid - 1); + } + else + { + return NV_ERR_NOT_SUPPORTED; + } + + return NV_OK; +} + +/*! + * @brief Sets or unsets the SW state to inform the GPU driver that the GPU instance + * associated with input GFID has been activated or de-activated respectively. + * + * Lock Requirements: + * Assert that API and GPUs lock held on entry + * + * @param[in] pSubdevice + * @param[in] pParams pointer to control parameters + * + * Possible status values returned are: + * NV_OK on success + * NV_ERR_INVALID_STATE if SRIOV state for P2P in driver is not setup + * NV_ERR_INVALID_ARGUMENT if input GFID is greater than the max GFID allowed + * NV_ERR_NOT_SUPPORTED if ctrl call is made when + * SRIOV is not enabled OR + * caller is not FM from Host RM + * NV_ERR_IN_USE If MAX_NUM_P2P_GFIDS have already been enabled for P2P + */ +NV_STATUS +gpuUpdateGfidP2pCapability +( + OBJGPU *pGpu, + NV2080_CTRL_CMD_GPU_UPDATE_GFID_P2P_CAPABILITY_PARAMS *pParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + PSRIOV_P2P_INFO pP2PInfo = pGpu->sriovState.pP2PInfo; + NvBool bSetP2PAccess = NV_FALSE; + NvU32 idx; + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + if (!gpuIsSriovEnabled(pGpu)) + return NV_ERR_NOT_SUPPORTED; + + NV_ASSERT_OR_RETURN(pP2PInfo != NULL, NV_ERR_INVALID_STATE); + + // Ctrl call should only be called by the FM from Host RM + if ((!IS_VIRTUAL(pGpu)) && + (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED))) + { + if (pParams->gfid > pGpu->sriovState.maxGfid) + { + NV_PRINTF(LEVEL_ERROR, "Input GFID %d greater than max allowed GFID\n", pParams->gfid); + return NV_ERR_INVALID_ARGUMENT; + } + + for (idx = 0; idx < pGpu->sriovState.maxP2pGfid; idx++) + { + // + // Check if Host RM is already using a GFID for P2P, + // Since only "MAX_NUM_P2P_GFIDS" GFID(s) is(are) allowed to do P2P at any time, + // we should fail here if a GFID greater than supported number is being enabled + // + if (pParams->bEnable) + { + if (pP2PInfo[idx].gfid == INVALID_P2P_GFID) + { + pP2PInfo[idx].gfid = pParams->gfid; + pGpu->sriovState.p2pFabricPartitionId = pParams->fabricPartitionId; + bSetP2PAccess = NV_TRUE; + break; + } + } + else + { + if (pP2PInfo[idx].gfid == pParams->gfid) + { + pP2PInfo[idx].gfid = INVALID_P2P_GFID; + pGpu->sriovState.p2pFabricPartitionId = INVALID_FABRIC_PARTITION_ID; + bSetP2PAccess = NV_TRUE; + break; + } + } + } + + if (bSetP2PAccess == NV_TRUE) + { + pP2PInfo[idx].bAllowP2pAccess = pParams->bEnable; + } + else + { + // Some other GFID(s) has already been enabled to do P2P + // Fail the call + return NV_ERR_IN_USE; + } + } + else + { + return NV_ERR_NOT_SUPPORTED; + } + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdUpdateGfidP2pCapability_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_CMD_GPU_UPDATE_GFID_P2P_CAPABILITY_PARAMS *pParams +) +{ + return gpuUpdateGfidP2pCapability(GPU_RES_GET_GPU(pSubdevice), pParams); +} + +/*! + * @brief: This command returns the load time (latency) of each engine, + * implementing NV2080_CTRL_CMD_GPU_GET_ENGINE_LOAD_TIMES control call. + * + * @param[in] pSubdevice + * @param[in] pParams + * + * @return + * NV_OK Success + */ +NV_STATUS +subdeviceCtrlCmdGpuGetEngineLoadTimes_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + ENGDESCRIPTOR *pEngDescriptorList = gpuGetInitEngineDescriptors(pGpu); + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + NvU32 curEngDescIdx; + + NV_ASSERT_OR_RETURN(numEngDescriptors < NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS, NV_ERR_BUFFER_TOO_SMALL); + + pParams->engineCount = numEngDescriptors; + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGDESCRIPTOR curEngDescriptor = pEngDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + pParams->engineIsInit[curEngDescIdx] = NV_FALSE; + continue; + } + + pParams->engineList[curEngDescIdx] = pEngstate->engDesc; + pParams->engineStateLoadTime[curEngDescIdx] = pEngstate->stats[ENGSTATE_STATE_LOAD].transitionTimeUs * 1000; + pParams->engineIsInit[curEngDescIdx] = NV_TRUE; + } + + return NV_OK; +} + +/*! + * @brief This command is used to determine which GSP features are + * supported on this GPU. + * + * @param[in] pSubdevice + * @param[in,out] pGspFeaturesParams + * + * @return Returns NV_STATUS + * NV_OK Success + */ +NV_STATUS +subdeviceCtrlCmdGspGetFeatures_KERNEL +( + Subdevice *pSubdevice, + NV2080_CTRL_GSP_GET_FEATURES_PARAMS *pGspFeaturesParams +) +{ + pGspFeaturesParams->bValid = NV_FALSE; + return NV_OK; +} + +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetNameString_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS *pNameStringParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + return gpuGetNameString(pGpu, + pNameStringParams->gpuNameStringFlags, + (void *)&pNameStringParams->gpuNameString); +} + +// +// subdeviceCtrlCmdGpuGetShortNameString +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetShortNameString_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS *pShortNameStringParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + return gpuGetShortNameString(pGpu, (void *)&pShortNameStringParams->gpuShortNameString); +} + +// +// subdeviceCtrlCmdGpuGetGidInfo +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetGidInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_GID_INFO_PARAMS *pGidInfoParams +) +{ + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvU8 *pGidString; + NvU32 flags = pGidInfoParams->flags; + NvU32 gidStrlen; + + rmStatus = gpuGetGidInfo(pGpu, &pGidString, &gidStrlen, flags); + if (rmStatus == NV_OK) + { + if (sizeof(pGidInfoParams->data) >= gidStrlen) + { + portMemCopy(pGidInfoParams->data, gidStrlen, pGidString, gidStrlen); + pGidInfoParams->length = gidStrlen; + } + else + { + rmStatus = NV_ERR_INSUFFICIENT_RESOURCES; + } + + portMemFree(pGidString); + } + + return rmStatus; +} + +NV_STATUS +subdeviceCtrlCmdGpuGetChipDetails_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_CHIP_DETAILS_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + return gpuGetChipDetails(pGpu, pParams); +} + +NV_STATUS +subdeviceCtrlCmdBiosGetSKUInfo_KERNEL +( + Subdevice *pSubdevice, + NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS *pBiosGetSKUInfoParams +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief This Command is used to query the recovery action for a GPU to get + * back to an operational state. + * + * @param[in,out] pParams + * action: The GPU recovery action. + * + * @return Returns NV_STATUS + * NV_ERR_GPU_IS_LOST The GPU has fallen off the bus + * NV_OK Success + * + */ +NV_STATUS +subdeviceCtrlCmdGpuGetRecoveryAction_IMPL( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_RECOVERY_ACTION_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + return NV_ERR_GPU_IS_LOST; + } + + gpuGetRecoveryAction(pGpu, pParams); + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdGpuGetVgpuHeapStats_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_CMD_GSP_GET_VGPU_HEAP_STATS_PARAMS *pParams +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/* + * GSP RPC integrity test + */ + +NV_STATUS +subdeviceCtrlCmdGpuRpcGspTest_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_RPC_GSP_TEST_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + NvU32 *data = (NvU32 *) pParams->data; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + if (IS_FW_CLIENT(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + pParams->startTimestamp = osGetTimestamp(); + if ((pParams->test == NV2080_CTRL_GPU_RPC_GSP_TEST_SERIALIZED_INTEGRITY) || + (pParams->test == NV2080_CTRL_GPU_RPC_GSP_TEST_SERIALIZED_NOP)) + { + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + } + else if (pParams->test == NV2080_CTRL_GPU_RPC_GSP_TEST_UNSERIALIZED) + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS params = {0}; + params.targetEngine = NV2080_CTRL_GPU_GET_IP_VERSION_PPWR_PMU; + status = pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_GPU_GET_IP_VERSION, + ¶ms, sizeof(params)); + } + else + { + status = NV_ERR_INVALID_ARGUMENT; + } + pParams->stopTimestamp = osGetTimestamp(); + return status; + } + + if (pParams->test == NV2080_CTRL_GPU_RPC_GSP_TEST_SERIALIZED_INTEGRITY) + { + for (NvU32 i = 0; i < pParams->dataSize; i++) + { + if (data[i] != i * 2) { + status = NV_ERR_INVALID_DATA; + NV_PRINTF(LEVEL_ERROR, "RPC TEST: mismatch in input data, expected %u, received %u\n", i * 2, data[i]); + } + data[i] = i * 3; + } + return status; + } + + return NV_OK; +} + +/* + * Used to query information for RPC integrity tests + */ + +NV_STATUS +subdeviceCtrlCmdGpuRpcGspQuerySizes_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_RPC_GSP_QUERY_SIZES_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + OBJRPC *pRpc = GPU_GET_RPC(pGpu); + + pParams->maxRpcSize = pRpc->maxRpcSize; + pParams->finnRmapiSize = sizeof(FINN_RM_API); + pParams->rpcGspControlSize = sizeof(rpc_gsp_rm_control_v); + pParams->rpcMessageHeaderSize = sizeof(rpc_message_header_v); + pParams->timestampFreq = osGetTimestampFreq(); + + return NV_OK; +} + +static void +subdeviceCtrlCmdThermalSystemExecuteV2_updateCache(Subdevice *pSubdevice, + NV2080_CTRL_THERMAL_SYSTEM_EXECUTE_V2_PARAMS *pSystemExecuteParams) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + for (NvU32 i = 0; i < pSystemExecuteParams->instructionListSize; i++) + { + if ((!pSystemExecuteParams->instructionList[i].executed) || + ( pSystemExecuteParams->instructionList[i].result != NV_OK)) + continue; + + switch (pSystemExecuteParams->instructionList[i].opcode) + { + case NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSORS_AVAILABLE_OPCODE: + { + pGpu->thermalSystemExecuteV2Cache.numSensors = + pSystemExecuteParams->instructionList[i].operands.getInfoSensorsAvailable.availableSensors; + + pGpu->thermalSystemExecuteV2Cache.bNumSensorsCached = NV_TRUE; + + break; + } + + case NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_PROVIDER_TYPE_OPCODE: + { + for (NvU32 j = 0; j < NV_ARRAY_ELEMENTS(pGpu->thermalSystemExecuteV2Cache.sensors); j++) + { + if ((pGpu->thermalSystemExecuteV2Cache.sensors[j].bIsCached[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_PROVIDER_INDEX]) && + (pGpu->thermalSystemExecuteV2Cache.sensors[j].cache[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_PROVIDER_INDEX] == + pSystemExecuteParams->instructionList[i].operands.getInfoProviderType.providerIndex)) + { + pGpu->thermalSystemExecuteV2Cache.sensors[j].cache[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_PROVIDER_TYPE] = + pSystemExecuteParams->instructionList[i].operands.getInfoProviderType.type; + pGpu->thermalSystemExecuteV2Cache.sensors[j].bIsCached[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_PROVIDER_TYPE] = NV_TRUE; + break; + } + } + + break; + } + + case NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_TARGET_TYPE_OPCODE: + { + for (NvU32 j = 0; j < NV_ARRAY_ELEMENTS(pGpu->thermalSystemExecuteV2Cache.sensors); j++) + { + if ((pGpu->thermalSystemExecuteV2Cache.sensors[j].bIsCached[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_TARGET_INDEX]) && + (pGpu->thermalSystemExecuteV2Cache.sensors[j].cache[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_TARGET_INDEX] == + pSystemExecuteParams->instructionList[i].operands.getInfoTargetType.targetIndex)) + { + pGpu->thermalSystemExecuteV2Cache.sensors[j].cache[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_TARGET_TYPE] = + pSystemExecuteParams->instructionList[i].operands.getInfoTargetType.type; + pGpu->thermalSystemExecuteV2Cache.sensors[j].bIsCached[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_TARGET_TYPE] = NV_TRUE; + break; + } + } + + break; + } + + case NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_READING_RANGE_OPCODE: + { + NvU32 sensorIdx = pSystemExecuteParams->instructionList[i].operands.getInfoSensorReadingRange.sensorIndex; + + if (sensorIdx < NV_ARRAY_ELEMENTS(pGpu->thermalSystemExecuteV2Cache.sensors)) + { + pGpu->thermalSystemExecuteV2Cache.sensors[sensorIdx].cache[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_DEFAULT_MIN_TEMP] = + pSystemExecuteParams->instructionList[i].operands.getInfoSensorReadingRange.minimum; + pGpu->thermalSystemExecuteV2Cache.sensors[sensorIdx].bIsCached[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_DEFAULT_MIN_TEMP] = NV_TRUE; + + pGpu->thermalSystemExecuteV2Cache.sensors[sensorIdx].cache[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_DEFAULT_MAX_TEMP] = + pSystemExecuteParams->instructionList[i].operands.getInfoSensorReadingRange.maximum; + pGpu->thermalSystemExecuteV2Cache.sensors[sensorIdx].bIsCached[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_DEFAULT_MAX_TEMP] = NV_TRUE; + } + + break; + } + + case NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_PROVIDER_OPCODE: + { + NvU32 sensorIdx = pSystemExecuteParams->instructionList[i].operands.getInfoSensorProvider.sensorIndex; + + if (sensorIdx < NV_ARRAY_ELEMENTS(pGpu->thermalSystemExecuteV2Cache.sensors)) + { + pGpu->thermalSystemExecuteV2Cache.sensors[sensorIdx].cache[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_PROVIDER_INDEX] = + pSystemExecuteParams->instructionList[i].operands.getInfoSensorProvider.providerIndex; + pGpu->thermalSystemExecuteV2Cache.sensors[sensorIdx].bIsCached[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_PROVIDER_INDEX] = NV_TRUE; + } + + break; + } + + case NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_TARGET_OPCODE: + { + NvU32 sensorIdx = pSystemExecuteParams->instructionList[i].operands.getInfoSensorTarget.sensorIndex; + + if (sensorIdx < NV_ARRAY_ELEMENTS(pGpu->thermalSystemExecuteV2Cache.sensors)) + { + pGpu->thermalSystemExecuteV2Cache.sensors[sensorIdx].cache[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_TARGET_INDEX] = + pSystemExecuteParams->instructionList[i].operands.getInfoSensorTarget.targetIndex; + pGpu->thermalSystemExecuteV2Cache.sensors[sensorIdx].bIsCached[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_TARGET_INDEX] = NV_TRUE; + } + + break; + } + } + } +} + +NV_STATUS +subdeviceCtrlCmdThermalSystemExecuteV2_IMPL(Subdevice *pSubdevice, + NV2080_CTRL_THERMAL_SYSTEM_EXECUTE_V2_PARAMS *pSystemExecuteParams) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvU32 instructionListSize = pSystemExecuteParams->instructionListSize; + + NV_CHECK_OR_RETURN(LEVEL_ERROR, + instructionListSize <= NV_ARRAY_ELEMENTS(pSystemExecuteParams->instructionList), + NV_ERR_INVALID_ARGUMENT); + + NV_STATUS status = NV_OK; + NvBool bForwardRmctrl; + + for (NvU32 i = 0; i < instructionListSize; i++) + { + pSystemExecuteParams->instructionList[i].executed = NV_FALSE; + } + + // Skip unsupported version + if (pSystemExecuteParams->clientAPIVersion != THERMAL_SYSTEM_API_VER || + pSystemExecuteParams->clientAPIRevision != THERMAL_SYSTEM_API_REV || + pSystemExecuteParams->clientInstructionSizeOf != sizeof(NV2080_CTRL_THERMAL_SYSTEM_INSTRUCTION)) + { + return pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_THERMAL_SYSTEM_EXECUTE_V2_PHYSICAL, + pSystemExecuteParams, sizeof(*pSystemExecuteParams)); + } + + // If we cannot service the control by servicing data from cache entirely, forward to physical RM + bForwardRmctrl = NV_FALSE; + + // Service values from cache + for (NvU32 i = 0; i < instructionListSize; i++) + { + // Verify that the size of the union NV2080_CTRL_THERMAL_SYSTEM_INSTRUCTION_OPERANDS is dictated by + // NV2080_CTRL_THERMAL_SYSTEM_INSTRUCTION_OPERANDS::space + ct_assert(sizeof(pSystemExecuteParams->instructionList[0].operands.space) == + sizeof(NV2080_CTRL_THERMAL_SYSTEM_INSTRUCTION_OPERANDS)); + + switch (pSystemExecuteParams->instructionList[i].opcode) + { + case NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSORS_AVAILABLE_OPCODE: + { + if (pGpu->thermalSystemExecuteV2Cache.bNumSensorsCached) + { + pSystemExecuteParams->instructionList[i].operands.getInfoSensorsAvailable.availableSensors = + pGpu->thermalSystemExecuteV2Cache.numSensors; + + pSystemExecuteParams->instructionList[i].executed = NV_TRUE; + pSystemExecuteParams->instructionList[i].result = NV_OK; + pSystemExecuteParams->successfulInstructions++; + } + else + { + bForwardRmctrl = NV_TRUE; + } + + break; + } + + case NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_PROVIDER_TYPE_OPCODE: + { + NvBool bTypeFound = NV_FALSE; + + for (NvU32 j = 0; j < NV_ARRAY_ELEMENTS(pGpu->thermalSystemExecuteV2Cache.sensors); j++) + { + if ((pGpu->thermalSystemExecuteV2Cache.sensors[j].bIsCached[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_PROVIDER_INDEX]) && + (pGpu->thermalSystemExecuteV2Cache.sensors[j].cache[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_PROVIDER_INDEX] == + pSystemExecuteParams->instructionList[i].operands.getInfoProviderType.providerIndex) && + (pGpu->thermalSystemExecuteV2Cache.sensors[j].bIsCached[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_PROVIDER_TYPE])) + { + pSystemExecuteParams->instructionList[i].operands.getInfoProviderType.type = + pGpu->thermalSystemExecuteV2Cache.sensors[j].cache[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_PROVIDER_TYPE]; + + pSystemExecuteParams->instructionList[i].executed = NV_TRUE; + pSystemExecuteParams->instructionList[i].result = NV_OK; + pSystemExecuteParams->successfulInstructions++; + + bTypeFound = NV_TRUE; + break; + } + } + + if (!bTypeFound) + { + bForwardRmctrl = NV_TRUE; + } + + break; + } + + case NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_TARGET_TYPE_OPCODE: + { + NvBool bTypeFound = NV_FALSE; + + for (NvU32 j = 0; j < NV_ARRAY_ELEMENTS(pGpu->thermalSystemExecuteV2Cache.sensors); j++) + { + if ((pGpu->thermalSystemExecuteV2Cache.sensors[j].bIsCached[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_TARGET_INDEX]) && + (pGpu->thermalSystemExecuteV2Cache.sensors[j].cache[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_TARGET_INDEX] == + pSystemExecuteParams->instructionList[i].operands.getInfoProviderType.providerIndex) && + (pGpu->thermalSystemExecuteV2Cache.sensors[j].bIsCached[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_TARGET_TYPE])) + { + pSystemExecuteParams->instructionList[i].operands.getInfoProviderType.type = + pGpu->thermalSystemExecuteV2Cache.sensors[j].cache[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_TARGET_TYPE]; + + pSystemExecuteParams->instructionList[i].executed = NV_TRUE; + pSystemExecuteParams->instructionList[i].result = NV_OK; + pSystemExecuteParams->successfulInstructions++; + + bTypeFound = NV_TRUE; + break; + } + } + + if (!bTypeFound) + { + bForwardRmctrl = NV_TRUE; + } + + break; + } + + case NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_READING_RANGE_OPCODE: + { + NvU32 sensorIdx = pSystemExecuteParams->instructionList[i].operands.getInfoSensorReadingRange.sensorIndex; + + if (sensorIdx >= NV_ARRAY_ELEMENTS(pGpu->thermalSystemExecuteV2Cache.sensors)) + { + bForwardRmctrl = NV_TRUE; + break; + } + + if (pGpu->thermalSystemExecuteV2Cache.sensors[sensorIdx].bIsCached[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_DEFAULT_MIN_TEMP] && + pGpu->thermalSystemExecuteV2Cache.sensors[sensorIdx].bIsCached[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_DEFAULT_MAX_TEMP]) + { + pSystemExecuteParams->instructionList[i].operands.getInfoSensorReadingRange.minimum = + pGpu->thermalSystemExecuteV2Cache.sensors[sensorIdx].cache[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_DEFAULT_MIN_TEMP]; + pSystemExecuteParams->instructionList[i].operands.getInfoSensorReadingRange.maximum = + pGpu->thermalSystemExecuteV2Cache.sensors[sensorIdx].cache[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_DEFAULT_MAX_TEMP]; + + pSystemExecuteParams->instructionList[i].executed = NV_TRUE; + pSystemExecuteParams->instructionList[i].result = NV_OK; + pSystemExecuteParams->successfulInstructions++; + } + else + { + bForwardRmctrl = NV_TRUE; + } + + break; + } + + case NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_PROVIDER_OPCODE: + { + NvU32 sensorIdx = pSystemExecuteParams->instructionList[i].operands.getInfoSensorProvider.sensorIndex; + + if (sensorIdx >= NV_ARRAY_ELEMENTS(pGpu->thermalSystemExecuteV2Cache.sensors)) + { + bForwardRmctrl = NV_TRUE; + break; + } + + if (pGpu->thermalSystemExecuteV2Cache.sensors[sensorIdx].bIsCached[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_PROVIDER_INDEX]) + { + pSystemExecuteParams->instructionList[i].operands.getInfoSensorProvider.providerIndex = + pGpu->thermalSystemExecuteV2Cache.sensors[sensorIdx].cache[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_PROVIDER_INDEX]; + + pSystemExecuteParams->instructionList[i].executed = NV_TRUE; + pSystemExecuteParams->instructionList[i].result = NV_OK; + pSystemExecuteParams->successfulInstructions++; + } + else + { + bForwardRmctrl = NV_TRUE; + } + + break; + } + + case NV2080_CTRL_THERMAL_SYSTEM_GET_INFO_SENSOR_TARGET_OPCODE: + { + NvU32 sensorIdx = pSystemExecuteParams->instructionList[i].operands.getInfoSensorTarget.sensorIndex; + + if (sensorIdx >= NV_ARRAY_ELEMENTS(pGpu->thermalSystemExecuteV2Cache.sensors)) + { + bForwardRmctrl = NV_TRUE; + break; + } + + if (pGpu->thermalSystemExecuteV2Cache.sensors[sensorIdx].bIsCached[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_TARGET_INDEX]) + { + pSystemExecuteParams->instructionList[i].operands.getInfoSensorTarget.targetIndex = + pGpu->thermalSystemExecuteV2Cache.sensors[sensorIdx].cache[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_TARGET_INDEX]; + + pSystemExecuteParams->instructionList[i].executed = NV_TRUE; + pSystemExecuteParams->instructionList[i].result = NV_OK; + pSystemExecuteParams->successfulInstructions++; + } + else + { + bForwardRmctrl = NV_TRUE; + } + + break; + } + + case NV2080_CTRL_THERMAL_SYSTEM_GET_STATUS_SENSOR_READING_OPCODE: + { + NvU32 sensorIdx = pSystemExecuteParams->instructionList[i].operands.getStatusSensorReading.sensorIndex; + + if ((sensorIdx >= NV_ARRAY_ELEMENTS(pGpu->thermalSystemExecuteV2Cache.sensors)) || + !(pGpu->thermalSystemExecuteV2Cache.sensors[sensorIdx].bIsCached[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_TARGET_TYPE]) || + (pGpu->userSharedData.pMapBuffer == NULL)) + { + bForwardRmctrl = NV_TRUE; + break; + } + + RUSD_TEMPERATURE rusdTemperature; + RUSD_TEMPERATURE_TYPE temperatureType; + + temperatureType = RUSD_TEMPERATURE_TYPE_MAX; + switch (pGpu->thermalSystemExecuteV2Cache.sensors[sensorIdx].cache[THERMAL_SYSTEM_EXECUTE_V2_CACHE_ENTRY_TARGET_TYPE]) + { + case NV2080_CTRL_THERMAL_SYSTEM_TARGET_GPU: + { + temperatureType = RUSD_TEMPERATURE_TYPE_GPU; + break; + } + case NV2080_CTRL_THERMAL_SYSTEM_TARGET_MEMORY: + { + temperatureType = RUSD_TEMPERATURE_TYPE_MEMORY; + break; + } + case NV2080_CTRL_THERMAL_SYSTEM_TARGET_POWER_SUPPLY: + { + temperatureType = RUSD_TEMPERATURE_TYPE_POWER_SUPPLY; + break; + } + case NV2080_CTRL_THERMAL_SYSTEM_TARGET_BOARD: + { + temperatureType = RUSD_TEMPERATURE_TYPE_BOARD; + break; + } + } + + if (temperatureType != RUSD_TEMPERATURE_TYPE_MAX) + { + RUSD_READ_DATA((NV00DE_SHARED_DATA*)(pGpu->userSharedData.pMapBuffer), temperatures[temperatureType], &rusdTemperature); + } + + if ((temperatureType == RUSD_TEMPERATURE_TYPE_MAX) || + (rusdTemperature.lastModifiedTimestamp == RUSD_TIMESTAMP_INVALID)) + { + bForwardRmctrl = NV_TRUE; + break; + } + + + pSystemExecuteParams->instructionList[i].operands.getStatusSensorReading.value = + NV_TYPES_NV_TEMP_TO_CELSIUS_ROUNDED(rusdTemperature.temperature); + + pSystemExecuteParams->instructionList[i].executed = NV_TRUE; + pSystemExecuteParams->instructionList[i].result = NV_OK; + pSystemExecuteParams->successfulInstructions++; + + break; + } + + // Unknown opcode + default: + { + bForwardRmctrl = NV_TRUE; + } + } + } + + if (!bForwardRmctrl) + { + status = NV_OK; + } + else + { + status = pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_THERMAL_SYSTEM_EXECUTE_V2_PHYSICAL, + pSystemExecuteParams, sizeof(*pSystemExecuteParams)); + + subdeviceCtrlCmdThermalSystemExecuteV2_updateCache(pSubdevice, pSystemExecuteParams); + } + + return status; + +} + diff --git a/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_internal_kernel.c b/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_internal_kernel.c new file mode 100644 index 0000000..ffc867a --- /dev/null +++ b/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_internal_kernel.c @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief This module contains the gpu control interfaces for the + * subdevice (NV20_SUBDEVICE_0) class. Subdevice-level control calls + * are directed unicast to the associated GPU. + * File contains ctrls related to general GPU + */ + +#include "gpu/gpu.h" +#include "gpu/subdevice/subdevice.h" +#include "ctrl/ctrl2080/ctrl2080internal.h" + +NV_STATUS +subdeviceCtrlCmdInternalGetChipInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *pParams +) +{ + NvU32 i; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + pParams->chipSubRev = gpuGetChipSubRev_HAL(pGpu); + pParams->isCmpSku = gpuGetIsCmpSku_HAL(pGpu); + pParams->pciDeviceId = pGpu->idInfo.PCIDeviceID; + pParams->pciSubDeviceId = pGpu->idInfo.PCISubDeviceID; + pParams->pciRevisionId = pGpu->idInfo.PCIRevisionID; + + for (i = 0; i < NV_ARRAY_ELEMENTS(pParams->regBases); i++) + { + if (gpuGetRegBaseOffset_HAL(pGpu, i, &pParams->regBases[i]) != NV_OK) + pParams->regBases[i] = 0xFFFFFFFF; + } + + + return NV_OK; +} \ No newline at end of file diff --git a/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c b/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c new file mode 100644 index 0000000..decf25d --- /dev/null +++ b/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c @@ -0,0 +1,464 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief This module contains the gpu control interfaces for the + * subdevice (NV20_SUBDEVICE_0) class. Subdevice-level control calls + * are directed unicast to the associated GPU. + * File contains ctrls related to TMR engine object + */ + +#include "core/core.h" + + +#define NVOC_OBJTMR_H_PRIVATE_ACCESS_ALLOWED + +#include "core/locks.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu/timer/objtmr.h" +#include "rmapi/client.h" + +// +// subdeviceCtrlCmdTimerCancel +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdTimerCancel_IMPL +( + Subdevice *pSubdevice +) +{ + OBJGPU *pGpu; + OBJTMR *pTmr; + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + if (pSubdevice == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pGpu = GPU_RES_GET_GPU(pSubdevice); + pTmr = GPU_GET_TIMER(pGpu); + + if (pSubdevice->notifyActions[NV2080_NOTIFIERS_TIMER] != NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + tmrEventCancel(pTmr, pSubdevice->pTimerEvent); + + pSubdevice->notifyActions[NV2080_NOTIFIERS_TIMER] = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + } + return NV_OK; +} + +static NV_STATUS +gpuControlTimerCallback(OBJGPU *pGpu, OBJTMR *pTmr, TMR_EVENT *pTmrEvent) +{ + Subdevice *pSubDevice = reinterpretCast(pTmrEvent->pUserData, Subdevice *); + PEVENTNOTIFICATION pNotifyEvent = inotifyGetNotificationList(staticCast(pSubDevice, INotifier)); + + if (pSubDevice->notifyActions[NV2080_NOTIFIERS_TIMER] == NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + NV_PRINTF(LEVEL_INFO, + "callback is called but the timer is not scheduled\n"); + return NV_ERR_INVALID_STATE; + } + + // Mark the timer as processed (no self-rescheduling for now) + pSubDevice->notifyActions[NV2080_NOTIFIERS_TIMER] = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + + // Find timer event + while ((pNotifyEvent != NULL) && (pNotifyEvent->NotifyIndex != NV2080_NOTIFIERS_TIMER)) + { + pNotifyEvent = pNotifyEvent->Next; + } + if (pNotifyEvent == NULL) + { + NV_PRINTF(LEVEL_INFO, "timer event is missing\n"); + return NV_ERR_INVALID_STATE; + } + + // perform a direct callback to the client + if (pNotifyEvent->Data != NvP64_NULL) + { + NvU64 currentTime = tmrGetTime_HAL(pGpu, pTmr); + osEventNotification(pGpu, pNotifyEvent, NV2080_NOTIFIERS_TIMER, + ¤tTime, sizeof(currentTime)); + } + else + { + NV_PRINTF(LEVEL_INFO, "timer callback pointer is missing\n"); + return NV_ERR_INVALID_STATE; + } + return NV_OK; +} + +static NV_STATUS +timerSchedule +( + Subdevice *pSubdevice, + NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS *pTimerScheduleParams +) +{ + OBJGPU *pGpu; + OBJTMR *pTmr; + PEVENTNOTIFICATION pNotifyEvent; + + if (pSubdevice == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pGpu = GPU_RES_GET_GPU(pSubdevice); + pTmr = GPU_GET_TIMER(pGpu); + + pNotifyEvent = inotifyGetNotificationList(staticCast(pSubdevice, INotifier)); + + if (pSubdevice->notifyActions[NV2080_NOTIFIERS_TIMER] != NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + NV_PRINTF(LEVEL_INFO, + "gpuControlTimerCallback: the timer is already scheduled for this subdevice\n"); + return NV_ERR_INVALID_STATE; + } + + // Validate the timer event + while ((pNotifyEvent != NULL) && (pNotifyEvent->NotifyIndex != NV2080_NOTIFIERS_TIMER)) + { + pNotifyEvent = pNotifyEvent->Next; + } + if (pNotifyEvent == NULL) + { + NV_PRINTF(LEVEL_INFO, + "gpuControlTimerCallback: timer event is missing\n"); + return NV_ERR_INVALID_STATE; + } + if (((pNotifyEvent->NotifyType != NV01_EVENT_KERNEL_CALLBACK) && (pNotifyEvent->NotifyType != NV01_EVENT_KERNEL_CALLBACK_EX)) || + (pNotifyEvent->Data == NvP64_NULL)) + { + NV_PRINTF(LEVEL_INFO, + "gpuControlTimer: cmd 0x%x: callback function is missing\n", + NV2080_CTRL_CMD_TIMER_SCHEDULE); + return NV_ERR_INVALID_STATE; + + } + + // Mark the timer as processed (no self-rescheduling for now). Set the flag before calling the timer + // since callback may be called right away. + pSubdevice->notifyActions[NV2080_NOTIFIERS_TIMER] = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE; + + if (pSubdevice->pTimerEvent != NULL) + { + if (tmrEventOnList(pTmr, pSubdevice->pTimerEvent)) + { + tmrEventCancel(pTmr, pSubdevice->pTimerEvent); + } + } + else + { + NV_ASSERT_OK_OR_RETURN(tmrEventCreate(pTmr, + &pSubdevice->pTimerEvent, + gpuControlTimerCallback, + pSubdevice, + TMR_FLAGS_NONE)); + } + + if (DRF_VAL(2080, _CTRL_TIMER_SCHEDULE_FLAGS, _TIME, pTimerScheduleParams->flags) == NV2080_CTRL_TIMER_SCHEDULE_FLAGS_TIME_ABS) + { + tmrEventScheduleAbs(pTmr, pSubdevice->pTimerEvent, pTimerScheduleParams->time_nsec); + } + else + { + tmrEventScheduleRel(pTmr, pSubdevice->pTimerEvent, pTimerScheduleParams->time_nsec); + } + + return NV_OK; +} + +// +// subdeviceCtrlCmdTimerSchedule +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdTimerSchedule_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS *pParams +) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + if (pRmCtrlParams->flags & NVOS54_FLAGS_IRQL_RAISED) + { + NV_ASSERT_OR_RETURN(rmDeviceGpuLockIsOwner(GPU_RES_GET_GPU(pSubdevice)->gpuInstance), + NV_ERR_INVALID_LOCK_STATE); + } + else + { + NV_ASSERT_OR_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + } + + return timerSchedule(pSubdevice, pParams); +} + +// +// subdeviceCtrlCmdTimerGetTime +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// Timer callback list accessed in tmrService at DPC +// +NV_STATUS +subdeviceCtrlCmdTimerGetTime_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_TIMER_GET_TIME_PARAMS *pParams +) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + + if ((pRmCtrlParams->flags & NVOS54_FLAGS_IRQL_RAISED) && + (pRmCtrlParams->flags & NVOS54_FLAGS_LOCK_BYPASS)) + { + if (pTmr->tmrChangePending) + { + return NV_ERR_STATE_IN_USE; + } + } + else + { + NV_ASSERT_OR_RETURN(rmapiLockIsOwner() && rmDeviceGpuLockIsOwner(pGpu->gpuInstance), + NV_ERR_INVALID_LOCK_STATE); + } + + tmrGetCurrentTime(pTmr, &pParams->time_nsec); + + return NV_OK; +} + +// +// subdeviceCtrlCmdTimerGetRegisterOffset +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +subdeviceCtrlCmdTimerGetRegisterOffset_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS *pTimerRegOffsetParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + return gpuGetRegBaseOffset_HAL(pGpu, NV_REG_BASE_TIMER, &pTimerRegOffsetParams->tmr_offset); +} + +/*! + * @brief Provides correlation information between GPU time and CPU time. + * + * @param[in] pSubDevice + * @param[in] pParams + * + * @return NV_OK Success + * @return NV_ERR_INVALID_ARGUMENT Invalid argument + * @return NV_ERR_NOT_SUPPORTED Unsupported CPU clock id + */ +NV_STATUS +subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + NV_STATUS status = NV_OK; + NvU8 i; + NvU32 sec, usec; + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + NV_CHECK_OR_RETURN(LEVEL_SILENT, + (pParams->sampleCount <= NV2080_CTRL_TIMER_GPU_CPU_TIME_MAX_SAMPLES), + NV_ERR_INVALID_ARGUMENT); + + if (RMCFG_FEATURE_PLATFORM_GSP) + { + NV_ASSERT_OR_RETURN( + FLD_TEST_DRF(2080, _TIMER_GPU_CPU_TIME_CPU_CLK_ID, _PROCESSOR, _GSP, + pParams->cpuClkId), + NV_ERR_INVALID_ARGUMENT); + } + else if (FLD_TEST_DRF(2080, _TIMER_GPU_CPU_TIME_CPU_CLK_ID, _PROCESSOR, _GSP, + pParams->cpuClkId)) + { + // + // If GSP time is requested, forward the whole request to GSP. + // This can only be supported in GSP-RM offload mode. + // + if (!IS_FW_CLIENT(pGpu)) + return NV_ERR_NOT_SUPPORTED; + + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + return pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO, + pParams, sizeof(*pParams)); + } + else + { + NV_CHECK_OR_RETURN(LEVEL_SILENT, + FLD_TEST_DRF(2080, _TIMER_GPU_CPU_TIME_CPU_CLK_ID, _PROCESSOR, _CPU, + pParams->cpuClkId), + NV_ERR_INVALID_ARGUMENT); + } + + switch (DRF_VAL(2080, _TIMER_GPU_CPU_TIME_CPU_CLK_ID, _SOURCE, pParams->cpuClkId)) + { + case NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_OSTIME: + { + for (i = 0; i < pParams->sampleCount; i++) + { + osGetSystemTime(&sec, &usec); + pParams->samples[i].cpuTime = (((NvU64)sec) * 1000000) + usec; + status = tmrGetCurrentTime(pTmr, + &pParams->samples[i].gpuTime); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Could not get GPU time. status=0x%08x\n", + status); + break; + } + pParams->samples[i].gpuTime += tmrGetPtimerOffsetNs_HAL(pGpu, pTmr); + } + break; + } + + case NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_PLATFORM_API: + { + // + // As reading CPU time and GPU time is a serial process we need to + // have a technique to mitigate the effects of preemption so we read + // the timestamps in a zipper pattern like c G c G c G c into an + // array storing all 7 values, find the two c values closest together, + // and report the sync point as the average of those two c values and + // the G between them. One complication is that reading a GPU's PTIMER + // directly from the CPU must be done via two non-adjacent BAR0-mapped + // memory locations for the low 32 bits and high 32 bits, and there's + // no way to atomically get both. One way to fix this is to make the + // read of the GPU time do the high bits, the low bits, and the high + // bits again, and if the two high values differ, we repeat the process + // until Ghi1 and Ghi2 match Once Ghi1 and 2 match, we use that as + // the high bits and the lo bits & CPU time from the zipper. + // + const NvU32 numTimerSamples = 3; // We take (hardcoded) 3 gpu timestamps. + NvU32 gpuTimeLo[3]; // Array to hold num_timer_samples gpu timestamps. + NvU64 cpuTime[4]; // Array to hold num_timer_samples+1 cpu timestamps. + NvU64 min; + NvU32 closestPairBeginIndex; + NvU32 gpuTimeHiOld; + NvU32 gpuTimeHiNew; + NvU32 i; + + gpuTimeHiNew = tmrReadTimeHiReg_HAL(pGpu, pTmr, NULL); + + do + { + gpuTimeHiOld = gpuTimeHiNew; + for (i = 0; i < numTimerSamples; i++) + { + + osGetPerformanceCounter(&cpuTime[i]); + + gpuTimeLo[i] = tmrReadTimeLoReg_HAL(pGpu, pTmr, NULL); + } + + osGetPerformanceCounter(&cpuTime[i]); + + // Read GPU TIME_1(High) again to detect wrap around. + gpuTimeHiNew = tmrReadTimeHiReg_HAL(pGpu, pTmr, NULL); + } while (gpuTimeHiNew != gpuTimeHiOld); + + // find i such that cpuTime[i+1] - cpuTime[i] is minimum + // i.e. find closest pair of cpuTime. + min = cpuTime[1] - cpuTime[0]; + closestPairBeginIndex = 0; + for (i = 0; i < numTimerSamples; i++) + { + if ((cpuTime[i+1] - cpuTime[i]) < min) + { + closestPairBeginIndex = i; + min = cpuTime[i+1] - cpuTime[i]; + } + } + + pParams->samples[0].gpuTime = ((((NvU64)gpuTimeHiNew) << 32) | + gpuTimeLo[closestPairBeginIndex]) + tmrGetPtimerOffsetNs_HAL(pGpu, pTmr); + pParams->samples[0].cpuTime = (cpuTime[closestPairBeginIndex] + + cpuTime[closestPairBeginIndex + 1])/2; + NV_PRINTF(LEVEL_INFO, + "GPUTime = %llx CPUTime = %llx\n", + pParams->samples[0].gpuTime, pParams->samples[0].cpuTime); + break; + } + + case NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_TSC: + { + for (i = 0; i < pParams->sampleCount; i++) + { + status = tmrGetGpuAndCpuTimestampPair_HAL(pGpu, pTmr, &pParams->samples[i].gpuTime, &pParams->samples[i].cpuTime); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Could not get CPU GPU time. status=0x%08x\n", + status); + break; + } + } + break; + } + default: + { + status = NV_ERR_NOT_SUPPORTED; + break; + } + } + + return status; +} + diff --git a/src/nvidia/src/kernel/gpu/timer/timer.c b/src/nvidia/src/kernel/gpu/timer/timer.c new file mode 100644 index 0000000..9c07467 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/timer/timer.c @@ -0,0 +1,1836 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file timer.c + * @brief Timer Object Function Definitions. + */ + +/* ------------------------ Includes ---------------------------------------- */ +#include "gpu/timer/objtmr.h" +#include "class/cl0004.h" // NV004_NOTIFIERS_SET_ALARM_NOTIFY +#include "gpu/gpu_resource.h" + +/* ------------------------ Static Function Prototypes ---------------------- */ +static TMR_EVENT_PVT * _tmrPullCallbackFromHead (OBJTMR *); +static void _tmrScanCallback(OBJTMR *, void *); +static void _tmrScanCallbackOSTimer(OBJTMR *, TMR_EVENT_PVT *); +static TMR_EVENT_PVT * _tmrGetNextFreeCallback(OBJTMR *); +static NV_STATUS _tmrInsertCallback(OBJTMR *, TMR_EVENT_PVT *, NvU64); +static void _tmrInsertCallbackInList(OBJGPU *pGpu, OBJTMR *pTmr, TMR_EVENT_PVT *pEvent); +static void _tmrStateLoadCallbacks(OBJGPU *, OBJTMR *); +static NV_STATUS _tmrGetNextAlarmTime(OBJTMR *, NvU64 *); +static void _tmrScheduleCallbackInterrupt(OBJGPU *, OBJTMR *, NvU64); + +NV_STATUS +tmrConstructEngine_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr, + ENGDESCRIPTOR engDesc +) +{ + // Create the Granular lock for SWRL Timer callback + pTmr->pTmrSwrlLock = portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged()); + if (pTmr->pTmrSwrlLock == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Alloc spinlock failed\n"); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + tmrInitCallbacks(pTmr); + osInit1HzCallbacks(pTmr); + + pTmr->retryTimes = 3; + + pTmr->errorCount = 0; + + pTmr->pGrTickFreqRefcnt = NULL; + + return NV_OK; +} + +void +tmrDestruct_IMPL(OBJTMR *pTmr) +{ + // Delete the Granular lock for SWRL Timer callback + if (pTmr->pTmrSwrlLock != NULL) + { + portSyncSpinlockDestroy(pTmr->pTmrSwrlLock); + pTmr->pTmrSwrlLock = NULL; + } + + objDelete(pTmr->pGrTickFreqRefcnt); + pTmr->pGrTickFreqRefcnt = NULL; + + osDestroy1HzCallbacks(pTmr); +} + +/*! + * Simple Utility function, checks if there are any queued callbacks + */ +static NV_INLINE NvBool tmrEventsExist(OBJTMR *pTmr) +{ + return pTmr->pRmActiveEventList != NULL; +} + +static NV_INLINE NvBool tmrIsOSTimer(OBJTMR *pTmr, TMR_EVENT *pEventPublic) +{ + return ((pEventPublic != NULL) && + (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS) || + (pEventPublic->flags & TMR_FLAG_USE_OS_TIMER))); +} + +/*! + * Allocates the necessary memory for storing a callback in the timer. + * + * @param[out] ppEvent A reference to the client's pointer. + */ +NV_STATUS tmrEventCreate_IMPL +( + OBJTMR *pTmr, + TMR_EVENT **ppEventPublic, + TIMEPROC Proc, + void *pUserData, + NvU32 flags +) +{ + TMR_EVENT_PVT **ppEvent = (TMR_EVENT_PVT **)ppEventPublic; + NV_STATUS status = NV_OK; + + *ppEvent = portMemAllocNonPaged(sizeof(TMR_EVENT_PVT)); + if (*ppEvent == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to allocate timer event\n"); + return NV_ERR_NO_MEMORY; + } + (*ppEvent)->bLegacy = NV_FALSE; + (*ppEvent)->bInUse = NV_FALSE; + (*ppEvent)->pNext = NULL; + (*ppEventPublic)->pTimeProc = Proc; + (*ppEventPublic)->pUserData = pUserData; + (*ppEventPublic)->flags = flags; + + if (tmrIsOSTimer(pTmr, *ppEventPublic)) + { + status = tmrEventCreateOSTimer_HAL(pTmr, *ppEventPublic); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to create OS timer \n"); + } + } + return status; +} + +static void +_tmrScheduleCallbackInterrupt +( + OBJGPU *pGpu, + OBJTMR *pTmr, + NvU64 alarmTime +) +{ + // + // Don't schedule the interrupt if we are polling. The interrupt can be + // routed to a different device, which could get confused. Also we don't + // want the extra priv writes. + // + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_POLLING_FOR_CALLBACKS)) + return; + + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS)) + { + NvU64 currentTime; + // + // Bug: 5071665, 4417666. High spikes of DPC Activity in ARM based system. + // The issue is caused due to an ARM compiler optimization bug which is doing + // 32-bit subtraction instead of 64-bit subtraction casted to 32-bits. + // Fix: Set 'volatile' qualifier to' countdownTime' so that it is not optimized. + // + volatile NvU32 countdownTime = 0; + + tmrGetCurrentTime(pTmr, ¤tTime); + if (currentTime < alarmTime) + { + countdownTime = NvU64_LO32(alarmTime - currentTime); + } + tmrSetCountdown_HAL(pGpu, pTmr, countdownTime, 0, NULL); + } + else + { + tmrSetAlarm_HAL(pGpu, pTmr, alarmTime, NULL); + } +} + +void +tmrResetCallbackInterrupt_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS)) + { + tmrSetCountdownIntrReset_HAL(pGpu, pTmr, NULL); + } + else + { + tmrSetAlarmIntrReset_HAL(pGpu, pTmr, NULL); + } +} + +NvBool +tmrGetCallbackInterruptPending_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS)) + { + return tmrGetCountdownPending_HAL(pGpu, pTmr, NULL); + } + else + { + return tmrGetAlarmPending_HAL(pGpu, pTmr, NULL); + } +} + +/*! + * Cancels a given callback, marking it invalid and preventing it from being executed. + * Updates the next alarm time appropriately + * + * @param[in] pEvent The callback to be cancelled + */ +void tmrEventCancel_IMPL +( + OBJTMR *pTmr, + TMR_EVENT *pEventPublic +) +{ + NvU64 nextAlarmTime; + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + TMR_EVENT_PVT *pEvent = (TMR_EVENT_PVT *)pEventPublic; + TMR_EVENT_PVT *pChaser = pTmr->pRmActiveEventList; + NvBool bRemovedHead = pChaser == pEvent; + + if (pEventPublic == NULL) + { + return; + } + + NV_ASSERT(!pEvent->bLegacy); + + pEvent->bInUse = NV_FALSE; + + if (tmrIsOSTimer(pTmr, pEventPublic)) + { + NV_STATUS status = NV_OK; + _tmrScanCallbackOSTimer(pTmr, pEvent); + status = tmrEventCancelOSTimer_HAL(pTmr, pEventPublic); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed in cancel of OS timer callback\n"); + } + return; + } + + if (bRemovedHead) + { + pTmr->pRmActiveEventList = pEvent->pNext; + + // Need to update the alarm time + if (NV_OK == _tmrGetNextAlarmTime(pTmr, &nextAlarmTime)) + { + _tmrScheduleCallbackInterrupt(pGpu, pTmr, nextAlarmTime); + } + else + { + // List is empty! Disable PTIMER interrupt. + tmrRmCallbackIntrDisable(pTmr, pGpu); + } + } + else + { + while (pChaser != NULL && pChaser->pNext != pEvent) + { + pChaser = pChaser->pNext; + } + if (pChaser == NULL) + { + // The callback wasn't currently scheduled, nothing to change. + return; + } + pChaser->pNext = pEvent->pNext; + } +} + +/*! + * Frees the memory used for maintaining a given callback in the timer. + * Currently automatically calls cancel on the event. + * + * @param[in] pEvent The callback to cancel and free. + */ +void tmrEventDestroy_IMPL +( + OBJTMR *pTmr, + TMR_EVENT *pEventPublic +) +{ + TMR_EVENT_PVT *pEvent = (TMR_EVENT_PVT *)pEventPublic; + + if (pEvent != NULL) + { + NV_ASSERT(!pEvent->bLegacy); + if (tmrIsOSTimer(pTmr, pEventPublic)) + { + _tmrScanCallbackOSTimer(pTmr, pEvent); + + // OS timer destroying will cancel the timer + tmrEventDestroyOSTimer_HAL(pTmr, pEventPublic); + } + else + { + tmrEventCancel(pTmr, pEventPublic); + } + portMemFree(pEvent); + } +} + +/*! + * Returns time until next callback for a given event + * + * @param[in] pEvent The event whose remaining time needs to be determined. + */ +NV_STATUS +tmrEventTimeUntilNextCallback_IMPL +( + OBJTMR *pTmr, + TMR_EVENT *pEventPublic, + NvU64 *pTimeUntilCallbackNs +) +{ + NvU64 currentTime; + NvU64 nextAlarmTime; + + TMR_EVENT_PVT *pEvent = (TMR_EVENT_PVT*)pEventPublic; + + if (tmrIsOSTimer(pTmr, pEventPublic)) + { + currentTime = osGetMonotonicTimeNs(); + // timens corresponds to relative time for OS timer + NV_CHECK_OR_RETURN(LEVEL_ERROR, portSafeAddU64(pEvent->timens, pEvent->startTimeNs, &nextAlarmTime), + NV_ERR_INVALID_ARGUMENT); + } + else + { + NV_ASSERT_OK_OR_RETURN(tmrGetCurrentTime(pTmr, ¤tTime)); + // timens corresponds to abs time in case of ptimer + nextAlarmTime = pEvent->timens; + } + if (currentTime > nextAlarmTime) + return NV_ERR_INVALID_STATE; + + *pTimeUntilCallbackNs = nextAlarmTime - currentTime; + return NV_OK; +} + + +/*! + * Callback invoked by NV0004_CTRL_CMD_TMR_SET_ALARM_NOTIFY + */ +static NV_STATUS +_nv0004CtrlCmdTmrSetAlarmNotifyCallback(OBJGPU *pGpu, OBJTMR *pTmr, TMR_EVENT *pTmrEvent) +{ + PEVENTNOTIFICATION pNotifyEvent = pTmrEvent->pUserData; + NV_STATUS status = NV_OK; + + // perform a direct callback to the client + if (NvP64_VALUE(pNotifyEvent->Data) != NULL) + { + //one shot signal + status = osNotifyEvent(pGpu, pNotifyEvent, NV004_NOTIFIERS_SET_ALARM_NOTIFY, 0, NV_OK); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to notify event in callback, status: 0x%08x\n", status); + return status; + } + } + + return status; +} + +NV_STATUS +tmrapiCtrlCmdTmrSetAlarmNotify_IMPL +( + TimerApi *pTimerApi, + NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS *pParams +) +{ + NV_STATUS status; + OBJGPU *pGpu = GPU_RES_GET_GPU(pTimerApi); + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + TMR_EVENT *pTmrEvent = NULL; + PEVENTNOTIFICATION pNotifyEvent = inotifyGetNotificationList(staticCast(pTimerApi, INotifier)); + + // Validate the timer event + while ((pNotifyEvent != NULL) && (pNotifyEvent->hEvent != pParams->hEvent)) + { + pNotifyEvent = pNotifyEvent->Next; + } + + if (pNotifyEvent == NULL) + { + NV_PRINTF(LEVEL_INFO, "timer event is missing\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + // Create an OS timer if not already created + if (pNotifyEvent->pTmrEvent == NULL) + { + status = tmrEventCreate(pTmr, + &pTmrEvent, + _nv0004CtrlCmdTmrSetAlarmNotifyCallback, + (void*)pNotifyEvent, + 0); + if (status != NV_OK) + return status; + + pNotifyEvent->pGpu = pGpu; + pNotifyEvent->pTmrEvent = pTmrEvent; + } + else + { + pTmrEvent = pNotifyEvent->pTmrEvent; + } + + // Schedule the timer + status = tmrEventScheduleRel(pTmr, pTmrEvent, pParams->alarmTimeNsecs); + if (status != NV_OK) + return status; + + return NV_OK; +} + +NV_STATUS tmrGetCurrentTimeEx_IMPL +( + OBJTMR *pTmr, + NvU64 *pTime, + THREAD_STATE_NODE *pThreadState +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + + if (API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + { + *pTime = (NvU64)(~0); + return NV_ERR_GPU_IN_FULLCHIP_RESET; + } + + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + *pTime = (NvU64)(~0); + return NV_ERR_GPU_IS_LOST; + } + + *pTime = tmrGetTimeEx_HAL(pGpu, pTmr, pThreadState); + + return NV_OK; +} + +NV_STATUS tmrGetCurrentTime_IMPL +( + OBJTMR *pTmr, + NvU64 *pTime +) +{ + return tmrGetCurrentTimeEx(pTmr, pTime, NULL); +} + +/*! + * TODO: document + */ +NV_STATUS tmrGetCurrentDiffTime_IMPL +( + OBJTMR *pTmr, + NvU64 startTime, + NvU64 *pDiffTime +) +{ + NvU64 currentTime; + NV_STATUS rmStatus; + + rmStatus = tmrGetCurrentTime(pTmr, ¤tTime); + + *pDiffTime = currentTime - startTime; + + return rmStatus; +} + +/*! + * Schedule a callback relative to current time specified in units of nanoseconds. + * Callbacks should be expected to be late however, this is not an RTOS, and a + * scheduling delay has been implemented to fix some race condition bugs. + * User has to provide a structure in memory for the timer to use. + * + * @Note: For statically defined events it is recommended to preallocate them all + * at the appropriate stage in task life-cycle, and deallocated at the + * corresponding end of the life-cycle. For dynamically generated events + * consider the affects on fragmentation and potentially deferring deallocation. + * + * @param[in] pEvent Callback memory structure, provided by user. + * @param[in] RelTime Number of nanoseconds from now to call Proc. + * + * @returns Status + */ +NV_STATUS tmrEventScheduleRel_IMPL +( + OBJTMR *pTmr, + TMR_EVENT *pEvent, + NvU64 RelTime +) +{ + NvU64 AbsTime, currentTime; + NV_STATUS rmStatus; + + if ((pEvent != NULL) && tmrIsOSTimer(pTmr, pEvent)) + { + TMR_EVENT_PVT *pEventPvt = (TMR_EVENT_PVT *)pEvent; + + NV_CHECK_OK(rmStatus, LEVEL_ERROR, + tmrEventScheduleRelOSTimer_HAL(pTmr, pEvent, RelTime)); + // + // Capture system time here, this will help in scheduling callbacks + // if there is a state unload before receiving the OS timer callback. + // + pEventPvt->startTimeNs = osGetMonotonicTimeNs(); + if (!tmrEventOnList(pTmr, pEvent)) + { + _tmrInsertCallback(pTmr, pEventPvt, RelTime); + } + return rmStatus; + } + + rmStatus = tmrGetCurrentTime(pTmr, ¤tTime); + if (rmStatus != NV_OK) + return rmStatus; + + NV_CHECK_OR_RETURN(LEVEL_ERROR, portSafeAddU64(currentTime, RelTime, &AbsTime), NV_ERR_INVALID_ARGUMENT); + + return tmrEventScheduleAbs(pTmr, pEvent, AbsTime); +} + +/*! + * Warning! This code is dangerous, it can cause the whole system to crash. It will be + * removed as soon as possible! Use the new API! + * It only remains for transitional purposes only. + */ +NV_STATUS tmrScheduleCallbackRel_IMPL +( + OBJTMR *pTmr, + TIMEPROC_OBSOLETE Proc, + void *Object, + NvU64 RelTime, + NvU32 Flags, + NvU32 ChId +) +{ + NvU64 AbsTime, currentTime; + NV_STATUS rmStatus; + + rmStatus = tmrGetCurrentTime(pTmr, ¤tTime); + if (rmStatus != NV_OK) + return rmStatus; + + NV_CHECK_OR_RETURN(LEVEL_ERROR, portSafeAddU64(currentTime, RelTime, &AbsTime), NV_ERR_INVALID_ARGUMENT); + + return tmrScheduleCallbackAbs(pTmr, Proc, Object, AbsTime, Flags, ChId); +} + +/*! + * Warning! This code is dangerous, it can cause the whole system to crash. It will be + * removed as soon as possible! Use the new API! + * It only remains for transitional purposes only. + */ +NV_STATUS tmrScheduleCallbackRelSec_IMPL +( + OBJTMR *pTmr, + TIMEPROC_OBSOLETE Proc, + void *Object, + NvU32 RelTimeSec, + NvU32 Flags, + NvU32 ChId +) +{ + NvU64 RelTimeNs; + + RelTimeNs = (NvU64)RelTimeSec * 1000000000; + + return tmrScheduleCallbackRel(pTmr, Proc, Object, RelTimeNs, Flags, ChId); +} + +/*! + * Determines if the Callback is actually scheduled currently. + * + * @param[in] pEvent The event in question + */ +NvBool tmrEventOnList_IMPL +( + OBJTMR *pTmr, + TMR_EVENT *pEventPublic +) +{ + TMR_EVENT_PVT *pEvent = (TMR_EVENT_PVT *)pEventPublic; + TMR_EVENT_PVT *pScan = tmrIsOSTimer(pTmr, pEventPublic) ? + pTmr->pRmActiveOSTimerEventList : + pTmr->pRmActiveEventList; + + while (pScan != NULL) + { + if (pScan == pEvent) + { + NV_ASSERT(pEvent->bInUse); + return NV_TRUE; + } + pScan = pScan->pNext; + } + return NV_FALSE; +} + +/*! + * Warning! This code is dangerous, it can cause the whole system to crash. It will be + * removed as soon as possible! Use the new API! + * It only remains for transitional purposes only. + */ +NvBool tmrCallbackOnList_IMPL +( + OBJTMR *pTmr, + TIMEPROC_OBSOLETE Proc, + void *Object +) +{ + NvBool onList = NV_FALSE; + TMR_EVENT_PVT *tmrScan; + TMR_EVENT_PVT *tmrList; + + tmrList = pTmr->pRmActiveEventList; + + for (tmrScan = tmrList; tmrScan; tmrScan = tmrScan->pNext) + { + if ((Proc == tmrScan->pTimeProc_OBSOLETE) && + (Object == tmrScan->super.pUserData)) + { + onList = NV_TRUE; + break; + } + } + + return onList; +} + +/*! + * OBSOLETE: This will be removed very soon! + */ +static TMR_EVENT_PVT * +_tmrGetNextFreeCallback +( + OBJTMR *pTmr +) +{ + TMR_EVENT_PVT *pEvent = NULL; + + pEvent = pTmr->pRmCallbackFreeList_OBSOLETE; + if (pEvent != NULL) + { + NV_ASSERT(pEvent->bLegacy); // OBSOLETE, remove later + pTmr->pRmCallbackFreeList_OBSOLETE = pEvent->pNext; + // just to be sure. + pEvent->pNext = NULL; + } + + return pEvent; +} + +/*! + * Creates and inserts a node into the callback list. + * + * @param[in] pEvent Callback memory structure, provided by user. + * @param[in] Time Absolute(for ptimer) or relative (for OS timer) nanoseconds at which to call Proc. + * + * @returns Status + */ +static NV_STATUS +_tmrInsertCallback +( + OBJTMR *pTmr, + TMR_EVENT_PVT *pEvent, + NvU64 Time +) +{ + NV_STATUS returnStatus = NV_ERR_GENERIC; // Indicate that the timer was NOT inserted in the list + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + + // If this is a free callback + if (!pEvent->bInUse && !tmrEventOnList(pTmr, (TMR_EVENT *)pEvent)) + { + pEvent->timens = Time; + + _tmrInsertCallbackInList(pGpu, pTmr, pEvent); + + returnStatus = NV_OK; + } + else + { + // Shouldn't get here. Don't call this function unless valid + NV_ASSERT_OR_RETURN(!"Invalid call to insert, already in use", NV_ERR_INVALID_ARGUMENT); + } + + return returnStatus; +} + +/*! + * Insert (time sorted) a specific event into the callback queue in case + * of GPU Timer. Insert a specific event on the top of callback queue in + * case of OS timer. + * + * Handles setting the next alarm time as well as enabling alarm if needed + * + * @param[in] pEvent The event to be inserted, must be initialized + */ +static void +_tmrInsertCallbackInList +( + OBJGPU *pGpu, + OBJTMR *pTmr, + TMR_EVENT_PVT *pEvent +) +{ + TMR_EVENT_PVT *pScan; + NvBool bAddedAsHead = NV_TRUE; + NvU64 nextAlarmTime; + + NV_ASSERT(!pEvent->bInUse); + pEvent->bInUse = NV_TRUE; + + if (tmrIsOSTimer(pTmr, (TMR_EVENT *)pEvent)) + { + pEvent->pNext = pTmr->pRmActiveOSTimerEventList; + pTmr->pRmActiveOSTimerEventList = pEvent; + return; + } + + if (pTmr->pRmActiveEventList == NULL) + { + // Enable PTIMER interrupt. + tmrRmCallbackIntrEnable(pTmr, pGpu); + + // insert pEvent as first and only entry. + pEvent->pNext = NULL; + pTmr->pRmActiveEventList = pEvent; + } + else if (pEvent->timens <= pTmr->pRmActiveEventList->timens) + { + // insert pEvent as head entry of the non-empty callback list. + pEvent->pNext = pTmr->pRmActiveEventList; + pTmr->pRmActiveEventList = pEvent; + } + else + { + bAddedAsHead = NV_FALSE; + + pScan = pTmr->pRmActiveEventList; + + while (pScan->pNext != NULL) + { + if (pEvent->timens <= pScan->pNext->timens) + { + // insert into the middle of the list. + pEvent->pNext = pScan->pNext; + pScan->pNext = pEvent; + + break; + } + pScan = pScan->pNext; + } + + if (pScan->pNext == NULL) + { + // insert it at the end of the list. + pEvent->pNext = NULL; + pScan->pNext = pEvent; + } + } + + if (bAddedAsHead) + { + // Find out when the next alarm should be. + if (NV_OK != _tmrGetNextAlarmTime(pTmr, &nextAlarmTime)) + { + // if there is no event list, then just use 0. + nextAlarmTime = 0; + } + + _tmrScheduleCallbackInterrupt(pGpu, pTmr, nextAlarmTime); + } +} + +/*! + * Schedule a callback at the absolute time specified in units of nanoseconds. + * + * Account for bad scheduling times, if the time too close in the future push + * it back till a short delay later. This avoids some race conditions. Even though + * callbacks may be delayed. However callbacks will not happen early. + * + * @Note: For statically defined events it is recommended to preallocate them all + * at the appropriate stage in task life-cycle, and deallocated at the + * corresponding end of the life-cycle. For dynamically generated events + * consider the affects on fragmentation and potentially deferring deallocation. + * + * @param[in] pEvent Callback memory structure, provided by user. + * @param[in] Time Absolute nanoseconds at which to call Proc. + * + * @returns Status + */ +NV_STATUS tmrEventScheduleAbs_IMPL +( + OBJTMR *pTmr, + TMR_EVENT *pEventPublic, + NvU64 timeAbsNs +) +{ + NV_STATUS rmStatus = NV_OK; + TMR_EVENT_PVT *pEvent = (TMR_EVENT_PVT *)pEventPublic; + + if ((pEvent != NULL) && tmrIsOSTimer(pTmr, pEventPublic)) + { + NvU64 timeRelNs, currentTime; + + rmStatus = tmrGetCurrentTime(pTmr, ¤tTime); + if (rmStatus != NV_OK) + return rmStatus; + + // + // If absolute time is less than current time, then timer has already + // expired. For this case, schedule timer with zero delay to trigger + // immediate callback. + // + timeRelNs = (timeAbsNs > currentTime) ? (timeAbsNs - currentTime) : 0; + return tmrEventScheduleRel(pTmr, pEventPublic, timeRelNs); + } + + if ((pEvent == NULL) || (pEventPublic->pTimeProc == NULL && + pEvent->pTimeProc_OBSOLETE == NULL)) + { + // + // Bug 372159: Not sure exactly how this is happening, but we are seeing + // it in OCA. If you see this during development/testing, please update + // the bug. + // + NV_ASSERT_FAILED( + "Attempting to schedule callback with NULL procedure. " + "Please update Bug 372159 with appropriate information."); + rmStatus = NV_ERR_INVALID_ARGUMENT; + } + else + { + // + // Insert this proc into the callback list. + // + // if (Time <= CurrentTime + SCHEDULING_DELAY_MIN): + // + // We used to return NV_ERR_CALLBACK_NOT_SCHEDULED here. + // The next fix called the callback immediately in order to simulate + // it being "scheduled", however this introduced nasty stack-overflow + // due self rescheduling tasks. + // + // CL 16512758 fixed the stack-overflow issue, and added a case for + // handling callbacks scheduled to occur within 250 ns. Later we found + // out that a 1 us callback could cause the alarm to be set to the past + // and cause a 4+ second delay due to wrap-around. To fix this, we + // removed the 250 ns threshold, so that we will always re-read the + // current time after setting the alarm to prevent the wrap-around. + // + rmStatus = _tmrInsertCallback(pTmr, pEvent, timeAbsNs); + } + + return rmStatus; +} + +/*! + * Warning! This code is dangerous, it can cause the whole system to crash. It will be + * removed as soon as possible! Use the new API! + */ +NV_STATUS tmrScheduleCallbackAbs_IMPL +( + OBJTMR *pTmr, + TIMEPROC_OBSOLETE Proc, + void *Object, + NvU64 Time, + NvU32 Flags, + NvU32 ChId +) +{ + TMR_EVENT_PVT *tmrInsert; + // Get a free callback from the free list. + if(pTmr->pRmCallbackFreeList_OBSOLETE == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + if (!tmrCallbackOnList(pTmr, Proc, Object)) + { + tmrInsert = _tmrGetNextFreeCallback(pTmr); + if (tmrInsert){ + tmrInsert->pTimeProc_OBSOLETE = Proc; + tmrInsert->super.pUserData = Object; + tmrInsert->super.flags = Flags; + tmrInsert->super.chId = ChId; + + return tmrEventScheduleAbs(pTmr, (TMR_EVENT *)tmrInsert, Time); + } + else + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + } + else + { + NV_PRINTF(LEVEL_ERROR, "Proc %p Object %p already on tmrList\n", Proc, + Object); + } + return NV_OK; +} + +/*! + * Searches specified lists for TMR_EVENT* associated with Object and + * removes it. + * + * @param[in] Object Unique identifier based on TMR_POBJECT_BASE (tmr.h) + * + * @returns None + */ +static void _tmrScanCallback +( + OBJTMR *pTmr, + void *pObject +) +{ + TMR_EVENT_PVT *tmrScan; + TMR_EVENT_PVT *tmrNext; + TMR_EVENT_PVT *tmrCurrent; + + // + // Start at the beginning of the callback list. + // + // 'current' is either the same as 'scan' or + // it's the item immediately before 'scan' in + // the algorithm below. + // + tmrScan = tmrCurrent = pTmr->pRmActiveEventList; + + // + // Loop through the callback list while there are entries. + // + while (tmrScan) + { + // Point to the next callback so that we + // can continue our scan through the list. + tmrNext = tmrScan->pNext; + + // + // Scan list looking for matches to 'Object'. + // + if (tmrScan->super.pUserData == pObject) + { + // + // If the 'current' is not the item to be deleted + // (It must be the previous item) then link it + // to the 'next' item + // + if (tmrCurrent != tmrScan) + { + tmrCurrent->pNext = tmrScan->pNext; + } + else + { + // + // If 'current' is the same as the item to be deleted + // then move it to the next item. + // + tmrCurrent = tmrNext; + + // + // Update the head pointer if removing the head entry. + // This fixes bug 93812. + // + if (pTmr->pRmActiveEventList == tmrScan) + { + pTmr->pRmActiveEventList = tmrScan->pNext; + } + } + + if (tmrScan->bLegacy) + { + // + // Tack the object to be deleted onto the head of the + // callback free list (OBSOLETE) + // + tmrScan->pNext = pTmr->pRmCallbackFreeList_OBSOLETE; + pTmr->pRmCallbackFreeList_OBSOLETE = tmrScan; + } + + tmrScan->bInUse = NV_FALSE; + } + else + { + // + // If we haven't deleted this item, then the 'current' + // item becomes this item. So 'scan' will advance ONE beyond + // the item that was NOT deleted, and 'current' becomes + // the item NOT deleted. + // + tmrCurrent = tmrScan; + } + + // Now point to the 'next' object in the callback list. + tmrScan = tmrNext; + } +} + +/*! + * This function removes the timer callback from the OS timer list + * after it is serviced, cancelled, or destroyed. + */ +static void +_tmrScanCallbackOSTimer +( + OBJTMR *pTmr, + TMR_EVENT_PVT *pEvent +) +{ + TMR_EVENT_PVT *pCurrent = pTmr->pRmActiveOSTimerEventList; + + if (pCurrent == pEvent) + { + pTmr->pRmActiveOSTimerEventList = pCurrent->pNext; + pEvent->pNext = NULL; + pEvent->bInUse = NV_FALSE; + return; + } + + while (pCurrent != NULL) + { + if (pCurrent->pNext == pEvent) + { + pCurrent->pNext = pEvent->pNext; + pEvent->pNext = NULL; + pEvent->bInUse = NV_FALSE; + break; + } + pCurrent = pCurrent->pNext; + } +} + +// determine which (if any) callback should determine the next alarm time +static NV_STATUS +_tmrGetNextAlarmTime +( + OBJTMR *pTmr, + NvU64 *pNextAlarmTime +) +{ + if (pTmr->pRmActiveEventList == NULL) + { + *pNextAlarmTime = 0; + return NV_ERR_CALLBACK_NOT_SCHEDULED; + } + + *pNextAlarmTime = pTmr->pRmActiveEventList->timens; + + return NV_OK; +} + +/*! + * Return the very next callback to be scheduled, removing it from the list + * and marking it as free (only "In Use" when in the list) + */ +static TMR_EVENT_PVT * _tmrPullCallbackFromHead +( + OBJTMR *pTmr +) +{ + TMR_EVENT_PVT *tmrDelete = pTmr->pRmActiveEventList; + if (tmrDelete) + { + // remove from callbackList + pTmr->pRmActiveEventList = tmrDelete->pNext; + tmrDelete->bInUse = NV_FALSE; + + if(tmrDelete->bLegacy) + { + // Might be a race condition, but will be removed so it's OK + tmrDelete->pNext = pTmr->pRmCallbackFreeList_OBSOLETE; + pTmr->pRmCallbackFreeList_OBSOLETE = tmrDelete; + } + } + + return tmrDelete; +} + +/*! + * Time until next callback expires. + * + * Returns NV_ERR_CALLBACK_NOT_SCHEDULED if not callbacks are scheduled. + */ +NV_STATUS +tmrTimeUntilNextCallback_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr, + NvU64 *pTimeUntilCallbackNs +) +{ + NvU64 currentTime; + NvU64 nextAlarmTime; + NV_STATUS status; + + *pTimeUntilCallbackNs = 0; + + // Get the time from the first (earliest) entry. + status = _tmrGetNextAlarmTime(pTmr, &nextAlarmTime); + if (status != NV_OK) + return status; + + status = tmrGetCurrentTime(pTmr, ¤tTime); + if (status != NV_OK) + return status; + + if (currentTime < nextAlarmTime) + *pTimeUntilCallbackNs = nextAlarmTime - currentTime; + + return NV_OK; +} + +/*! + * Used by tmrService, iteratively checks which callbacks need to be executed. + */ +NvBool +tmrCallExpiredCallbacks_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + NvU64 currentTime = 0; + NvU64 nextAlarmTime; + TMR_EVENT_PVT *pEvent; + NV_STATUS rmStatus; + NvBool bProccessedCallback = NV_FALSE; + + // Call all callbacks that have expired + if (pTmr && (tmrEventsExist(pTmr))) + { + // Check for expired time. + for (;;) + { + // Get the time from the first (earliest) entry. + rmStatus = _tmrGetNextAlarmTime(pTmr, &nextAlarmTime); + if (rmStatus != NV_OK) + break; + + if (nextAlarmTime > currentTime) + { + rmStatus = tmrGetCurrentTime(pTmr, ¤tTime); + if ((rmStatus != NV_OK) || (nextAlarmTime > currentTime)) + break; + } + + // Pull from head of list. + pEvent = _tmrPullCallbackFromHead(pTmr); + + if (pEvent != NULL) + { + // Call callback. This could insert a new callback into the list. + if (pEvent->bLegacy && pEvent->pTimeProc_OBSOLETE != NULL) + { + pEvent->pTimeProc_OBSOLETE(pGpu, pTmr, pEvent->super.pUserData); + bProccessedCallback = NV_TRUE; + } + else if (!pEvent->bLegacy && pEvent->super.pTimeProc != NULL) + { + pEvent->super.pTimeProc(pGpu, pTmr, (TMR_EVENT *)pEvent); + bProccessedCallback = NV_TRUE; + } + else + { + NV_ASSERT_FAILED("Attempting to execute callback with NULL procedure."); + } + } + else + { + NV_ASSERT_FAILED("Attempting to execute callback with NULL timer event."); + } + } + + // + // rmStatus is NV_OK only when there are more events in the list AND + // the GPU has not fallen off the bus AND the GPU is not in full chip + // reset. + // + // We get this this routine with bInterrupt set to true when we got + // (and cleared) the timer interrupt. So, we need to set it again. + // + if (rmStatus == NV_OK) + { + _tmrScheduleCallbackInterrupt(pGpu, pTmr, nextAlarmTime); + } + } + + return bProccessedCallback; +} + +/*! + * TODO: document + */ +static void +_tmrStateLoadCallbacks +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + NvU64 nextAlarmTime = 0; + TMR_EVENT_PVT *pScan = pTmr->pRmActiveOSTimerEventList; + + if (tmrEventsExist(pTmr)) + { + if (tmrGetCallbackInterruptPending(pGpu, pTmr)) + { + if (NV_OK == _tmrGetNextAlarmTime(pTmr, &nextAlarmTime)) + { + _tmrScheduleCallbackInterrupt(pGpu, pTmr, nextAlarmTime); + } + } + + // + // else - we have alarm pending - just proceed to enable interrupts + // so that it's immediately handled + // + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS)) + { + tmrSetCountdownIntrEnable_HAL(pGpu, pTmr); + } + else + { + tmrSetAlarmIntrEnable_HAL(pGpu, pTmr); + } + } + + // Schedule the timer callbacks which were paused during state unload + while (pScan != NULL) + { + // + // Capture system time here, this will help in scheduling callbacks + // if there is a state unload before receiving the OS timer callback. + // + pScan->startTimeNs = osGetMonotonicTimeNs(); + tmrEventScheduleRelOSTimer_HAL(pTmr, (TMR_EVENT *)pScan, pScan->timens); + pScan = pScan->pNext; + } +} + +/*! + * Wraps HAL functions to enable hardware timer interrupts for the rm callbacks. + */ +void +tmrRmCallbackIntrEnable_IMPL +( + OBJTMR *pTmr, + OBJGPU *pGpu +) +{ + tmrResetCallbackInterrupt(pGpu, pTmr); + + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS)) + { + tmrSetCountdownIntrEnable_HAL(pGpu, pTmr); + } + else + { + tmrSetAlarmIntrEnable_HAL(pGpu, pTmr); + } +} + +/*! + * Wraps HAL functions to disable hardware timer interrupts for the rm callbacks. + */ +void +tmrRmCallbackIntrDisable_IMPL +( + OBJTMR *pTmr, + OBJGPU *pGpu +) +{ + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS)) + { + tmrSetCountdownIntrDisable_HAL(pGpu, pTmr); + } + else + { + tmrSetAlarmIntrDisable_HAL(pGpu, pTmr); + } +} + +void +tmrSetCountdownCallback_IMPL +( + OBJTMR *pTmr, + TIMEPROC_COUNTDOWN pSwrlCallback +) +{ + pTmr->pSwrlCallback = pSwrlCallback; +} + +/*! + * TODO: document + */ +void +tmrGetSystemTime_IMPL +( + OBJTMR *pTmr, + PDAYMSECTIME pTime +) +{ + NvU32 sec; + NvU32 usec; + + // + // This function finds out the current time in terms of number of days and + // milliseconds since 1900. Note that the estimates are really crude since + // 1 year is treated as 365 days, 1 month as 30 days and so on. Keep these + // points in mind before using the function. + // + if (pTime != NULL) + { + // Get the system time and calculate the contents of the returned structure. + osGetSystemTime(&sec, &usec); + pTime->days = sec / (3600 * 24); // # of days since ref point + sec = sec % (3600 * 24); // seconds since day began + pTime->msecs = sec * 1000 + (usec / 1000); // milliseconds since day began + } +} + +/*! + * This has become obsolete, it should be replaced with userData logic + */ +NvBool +tmrCheckCallbacksReleaseSem_IMPL +( + OBJTMR *pTmr, + NvU32 chId +) +{ + TMR_EVENT_PVT *pScan; + + for (pScan = pTmr->pRmActiveEventList; pScan != NULL; pScan = pScan->pNext) + { + if ((pScan->super.flags & TMR_FLAG_RELEASE_SEMAPHORE) && + (pScan->super.chId == chId)) + { + break; + } + } + + return pScan != NULL; +} + +/*! + * TODO: document + */ +void +tmrInitCallbacks_IMPL +( + OBJTMR *pTmr +) +{ + NvU32 i; + + // Initialize the timer callback lists. + pTmr->pRmActiveEventList = NULL; + pTmr->pRmActiveOSTimerEventList = NULL; + + // Everything below this comment will be removed with new API + pTmr->pRmCallbackFreeList_OBSOLETE = pTmr->rmCallbackTable_OBSOLETE; + + // Fill in all the forward pointers in the callback table. + for (i = 0; i < (TMR_NUM_CALLBACKS_RM - 1); i++) + { + pTmr->rmCallbackTable_OBSOLETE[i].pNext = &pTmr->rmCallbackTable_OBSOLETE[i+1]; + pTmr->rmCallbackTable_OBSOLETE[i].bInUse = NV_FALSE; + pTmr->rmCallbackTable_OBSOLETE[i].bLegacy = NV_TRUE; + } + pTmr->rmCallbackTable_OBSOLETE[i].pNext = NULL; + pTmr->rmCallbackTable_OBSOLETE[i].bInUse = NV_FALSE; + pTmr->rmCallbackTable_OBSOLETE[i].bLegacy = NV_TRUE; +} + +/*! + * Searches for all events associated with an Object and removes them. + * + * @param[in,out] pTmr TMR object pointer + * @param[in] pObject Unique identifier based on TMR_POBJECT_BASE (tmr.h) + * + * @returns NV_OK always succeeds + */ +NV_STATUS +tmrCancelCallback_IMPL +( + OBJTMR *pTmr, + void *pObject +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + NvU64 nextAlarmTime; + + if (tmrEventsExist(pTmr) && pObject != NULL) + { + // Pull all objects with the same address from the callback list. + _tmrScanCallback(pTmr, pObject); + + // + // If there's anything left then set an alarm for the soonest one. + // Otherwise, disable the PTIMER interrupt altogether. + // + if (NV_OK == _tmrGetNextAlarmTime(pTmr, &nextAlarmTime)) + { + _tmrScheduleCallbackInterrupt(pGpu, pTmr, nextAlarmTime); + } + else + { + // List is empty! Disable PTIMER interrupt. + tmrRmCallbackIntrDisable(pTmr, pGpu); + } + } + + return NV_OK; +} + +/*! + * TODO: document + * + * This function finds out if the (futureTime - pastTime) > maxCacheTimeInMSec + */ +NvBool +tmrDiffExceedsTime_IMPL +( + OBJTMR *pTmr, + PDAYMSECTIME pFutureTime, + PDAYMSECTIME pPastTime, + NvU32 time +) +{ + NvU32 msecsInADay = 1000 * 3600 * 24; + NvBool bRetVal = NV_FALSE; + + if ((pFutureTime->days < pPastTime->days) || + (((pFutureTime->days == pPastTime->days) && + (pFutureTime->msecs < pPastTime->msecs)))) + { + bRetVal = NV_TRUE; + } + else + { + // Because of overflow possibility, first check for diff in days + if ((((pFutureTime->days - pPastTime->days) + + (pFutureTime->msecs - pPastTime->msecs)/msecsInADay)) > (time/msecsInADay)) + { + bRetVal = NV_TRUE; + } + else + { + // Now diff in millisecs + if ((((pFutureTime->days - pPastTime->days) * msecsInADay) + + (pFutureTime->msecs - pPastTime->msecs)) > time) + { + bRetVal = NV_TRUE; + } + } + } + + return bRetVal; +} + +NV_STATUS +tmrStatePreInitLocked_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + + return NV_OK; +} + +NV_STATUS +tmrStateInitLocked_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + + return NV_OK; +} + +/*! + * TODO: document + */ +NV_STATUS +tmrStateLoad_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr, + NvU32 flags +) +{ + // Have to restore any pending callbacks' state + _tmrStateLoadCallbacks(pGpu, pTmr); + + return NV_OK; +} + +/*! + * TODO: document + */ +NV_STATUS +tmrStateUnload_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr, + NvU32 flags +) +{ + TMR_EVENT_PVT *pScan = pTmr->pRmActiveOSTimerEventList; + NvU64 currentSysTime, elapsedTime; + + // Disable Timer interrupt. + tmrSetAlarmIntrDisable_HAL(pGpu, pTmr); + tmrSetCountdownIntrDisable_HAL(pGpu, pTmr); + + // + // Cancel OS timers, and save the time remaining for callback + // in 'timens', to reschedule after state load. + // + while (pScan != NULL) + { + currentSysTime = osGetMonotonicTimeNs(); + // + // If somehow any of the time difference is negative, + // we will use the original time duration. + // + if (currentSysTime >= pScan->startTimeNs) + { + elapsedTime = currentSysTime - pScan->startTimeNs; + if (pScan->timens > elapsedTime) + { + pScan->timens -= elapsedTime; + } + } + + tmrEventCancelOSTimer_HAL(pTmr, (TMR_EVENT *)pScan); + pScan = pScan->pNext; + } + return NV_OK; +} + +/*! + * TODO: document + */ +void +tmrStateDestroy_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + objDelete(pTmr->pGrTickFreqRefcnt); + pTmr->pGrTickFreqRefcnt = NULL; +} + +NV_STATUS +tmrapiConstruct_IMPL +( + TimerApi *pTimerApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +void +tmrapiDestruct_IMPL +( + TimerApi *pTimerApi +) +{ +} + +NV_STATUS +tmrapiGetRegBaseOffsetAndSize_IMPL +( + TimerApi *pTimerApi, + OBJGPU *pGpu, + NvU32 *pOffset, + NvU32 *pSize +) +{ + NV_STATUS status; + NvU32 offset; + + status = gpuGetRegBaseOffset_HAL(GPU_RES_GET_GPU(pTimerApi), NV_REG_BASE_TIMER, &offset); + if (status != NV_OK) + return status; + + if (pOffset) + *pOffset = offset; + + if (pSize) + *pSize = sizeof(Nv01TimerMap); + + return NV_OK; +} + +void +tmrapiDeregisterEvents_IMPL(TimerApi *pTimerApi) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pTimerApi); + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + PEVENTNOTIFICATION pNotifyEvent = inotifyGetNotificationList(staticCast(pTimerApi, INotifier)); + + // Validate the timer event + while (pNotifyEvent != NULL) + { + // + // TimerApi events are only set through NV0004_CTRL_CMD_TMR_SET_ALARM_NOTIFY + // which only schedules TMR_EVENT type callbacks. So only call the new API. + // + TMR_EVENT *pTmrEvent = pNotifyEvent->pTmrEvent; + if (pTmrEvent != NULL) + { + tmrEventDestroy(pTmr, pTmrEvent); + pNotifyEvent->pGpu = NULL; + pNotifyEvent->pTmrEvent = NULL; + } + + pNotifyEvent = pNotifyEvent->Next; + } +} + +//--------------------------------------------------------------------------- +// +// NV0004 Control Functions +// +//--------------------------------------------------------------------------- + +// +// There is some type hacking involved here. The inner callback is called correctly here +// though it is cast to the outer callback type for storage. The timer only sees the +// outer callback type directly so it will call it correctly, and this wrapper hides the +// inner callback and calls it correctly from itself. Hacky but it should work around the +// limitations in the SDK (all RM derived types undefined, so TIMEPROC type is impossible). +// +typedef NvU32 (*TMR_CALLBACK_FUNCTION)(void *pCallbackData); + +typedef struct +{ + TMR_CALLBACK_FUNCTION pTimeProc; + void *pCallbackData; +} wrapperStorage_t; + +static NV_STATUS _tmrCallbackWrapperfunction +( + OBJGPU *pGpu, + OBJTMR *pTmr, + TMR_EVENT *pEvent +) +{ + wrapperStorage_t *pObj_Inner = (wrapperStorage_t *)pEvent->pUserData; + + // Backup the wrapper function and data + TIMEPROC pCallback_Outer = pEvent->pTimeProc; + void *pCallbackData_Outer = pEvent->pUserData; + + // Swap in the inner function and data + pEvent->pTimeProc = (TIMEPROC) pObj_Inner->pTimeProc; // Intentionally the wrong type! + pEvent->pUserData = pObj_Inner->pCallbackData; + + // Perform the actual callback the way the user expects it + pObj_Inner->pTimeProc((void *)pEvent->pUserData); + + // Rewrap whatever changes the user may have made + pObj_Inner->pTimeProc = (TMR_CALLBACK_FUNCTION) pEvent->pTimeProc; + pObj_Inner->pCallbackData = pEvent->pUserData; + + // Restore the wrapper function and data + pEvent->pTimeProc = pCallback_Outer; + pEvent->pUserData = pCallbackData_Outer; + + return NV_OK; +} + +/*! + * Creates an event and initializes the wrapper callback data, putting the + * desired callback inside of it's struct to be swapped in later. + * + * @returns NV_STATUS + */ +NV_STATUS +tmrCtrlCmdEventCreate +( + OBJGPU *pGpu, + TMR_EVENT_SET_PARAMS *pParams +) +{ + NV_STATUS rc; + TMR_EVENT *pEvent; + wrapperStorage_t *pWrapper; + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + + // ALlocate the wrapper's callerdata to store real caller data! + pWrapper = portMemAllocNonPaged(sizeof(wrapperStorage_t)); + if (pWrapper == NULL) + { + return NV_ERR_NO_MEMORY; + } + pWrapper->pTimeProc = (TMR_CALLBACK_FUNCTION)NvP64_VALUE(pParams->pTimeProc); + pWrapper->pCallbackData = NvP64_VALUE(pParams->pCallbackData); + + rc = tmrEventCreate(pTmr, + &pEvent, + _tmrCallbackWrapperfunction, + pWrapper, + pParams->flags); + + *(pParams->ppEvent) = NV_PTR_TO_NvP64(pEvent); + + return rc; +} + + +/*! + * Schedules an existing event. Takes in time arguments and a flag to + * determine if it should be interpreted as absolute or relative time. + * + * @returns NV_STATUS + */ +NV_STATUS +tmrCtrlCmdEventSchedule +( + OBJGPU *pGpu, + TMR_EVENT_SCHEDULE_PARAMS *pParams +) +{ + NV_STATUS rc; + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + TMR_EVENT *pEvent = (TMR_EVENT *)NvP64_VALUE(pParams->pEvent); + + if (pParams->bUseTimeAbs) + { + rc = tmrEventScheduleAbs(pTmr, pEvent, pParams->timeNs); + } + else + { + rc = tmrEventScheduleRel(pTmr, pEvent, pParams->timeNs); + } + + return rc; +} + +/*! + * Cancels an existing event. NOP on unscheduled event. + * + * @returns NV_OK + */ +NV_STATUS +tmrCtrlCmdEventCancel +( + OBJGPU *pGpu, + TMR_EVENT_GENERAL_PARAMS *pParams +) +{ + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + TMR_EVENT *pEvent = (TMR_EVENT *)NvP64_VALUE(pParams->pEvent); + tmrEventCancel(pTmr, pEvent); + + return NV_OK; +} + +/*! + * Cancel and destroys an existing event. It also cleans up the special + * wrapper memory used by this API framework. + * + * @returns NV_OK + */ +NV_STATUS +tmrCtrlCmdEventDestroy +( + OBJGPU *pGpu, + TMR_EVENT_GENERAL_PARAMS *pParams +) +{ + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + TMR_EVENT *pEvent = (TMR_EVENT *)NvP64_VALUE(pParams->pEvent); + + // Free our temporary wrapper storage + portMemFree(pEvent->pUserData); + + tmrEventDestroy(pTmr, pEvent); + + return NV_OK; +} + +NV_STATUS tmrEventServiceTimer_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr, + TMR_EVENT *pPublicEvent +) +{ + TMR_EVENT_PVT *pEvent = (TMR_EVENT_PVT *)pPublicEvent; + NV_STATUS status = NV_ERR_INVALID_REQUEST; + + if ((pEvent == NULL) || !tmrIsOSTimer(pTmr, (TMR_EVENT *)pEvent)) + { + return status; + } + + if (tmrEventOnList(pTmr, (TMR_EVENT *)pEvent)) + { + _tmrScanCallbackOSTimer(pTmr, pEvent); + status = tmrEventServiceOSTimerCallback_HAL(pGpu, pTmr, pPublicEvent); + } + + return status; +} + diff --git a/src/nvidia/src/kernel/gpu/timer/timer_ostimer.c b/src/nvidia/src/kernel/gpu/timer/timer_ostimer.c new file mode 100644 index 0000000..d51e7fe --- /dev/null +++ b/src/nvidia/src/kernel/gpu/timer/timer_ostimer.c @@ -0,0 +1,329 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/********************* Non-Chip Specific HAL TMR Routines ******************\ +* * +* This file contains TMR method implementations using OSTIMER * +* * +\***************************************************************************/ + +#include "gpu/timer/objtmr.h" + +// Minimum delay for OS timer +#define OSTIMER_MIN_DELAY_NS 1 // 1 nanosecond + +// +// This function returns current time from OS timer +// +NvU64 +tmrGetTimeEx_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 seconds; // Time since 1970 in seconds + NvU32 useconds; // and uSeconds. + NvU64 timeNs; // Time since 1970 in ns. + + // + // Get current time from operating system. + // + // We get the time in seconds and microseconds since 1970 + // Note that we don't really need the real time of day + // + osGetSystemTime(&seconds, &useconds); + + // + // Calculate ns since 1970. + // + timeNs = ((NvU64)seconds * 1000000 + useconds) * 1000; + + return timeNs; +} + +/*! + * Creates OS timer event + * + * @param[in] pTmr Pointer to Timer Object + * @param[in] pEvent pointer to timer event information + * @param[out] NV_STATUS + */ +NV_STATUS tmrEventCreateOSTimer_OSTIMER +( + OBJTMR *pTmr, + TMR_EVENT *pEventPublic +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + TMR_EVENT_PVT *pEvent = (TMR_EVENT_PVT *)pEventPublic; + + status = osCreateNanoTimer(pGpu->pOsGpuInfo, pEvent, &(pEvent->super.pOSTmrCBdata)); + + if (status != NV_OK) + { + pEvent->super.pOSTmrCBdata = NULL; + NV_PRINTF(LEVEL_ERROR, "OS create timer failed\n"); + } + + return status; +} + +/*! + * This function Starts or Schedules OS Timer + * + * @param[in] pTmr Pointer to Timer Object + * @param[in] pEvent pointer to timer event information + * @param[in] relative time in nano seconds + * + * @returns NV_ERR_INVALID_REQUEST failed to create timer +*/ +NV_STATUS tmrEventScheduleRelOSTimer_OSTIMER +( + OBJTMR *pTmr, + TMR_EVENT *pPublicEvent, + NvU64 timeRelNs +) +{ + NV_STATUS status= NV_OK; + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + TMR_EVENT_PVT *pEvent = (TMR_EVENT_PVT *) pPublicEvent; + + if (pEvent->super.pOSTmrCBdata == NULL) + { + NV_PRINTF(LEVEL_ERROR, "OS Timer not created\n"); + return NV_ERR_INVALID_REQUEST; + } + + timeRelNs = NV_MAX(timeRelNs, OSTIMER_MIN_DELAY_NS); + status = osStartNanoTimer(pGpu->pOsGpuInfo, pEvent->super.pOSTmrCBdata, timeRelNs); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "OS Start timer FAILED!\n"); + } + + pEvent->super.flags |= TMR_FLAG_OS_TIMER_QUEUED; + return status; +} + +/*! + * This function runs OS timer callback + * +* @param[in] pGpu Pointer to GPU object +* @param[in] pTmr Pointer to Timer Object +* @param[in] pEvent pointer to timer event information +* + * @returns NV_ERR_INVALID_REQUEST if callback not found + */ +NV_STATUS tmrEventServiceOSTimerCallback_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr, + TMR_EVENT *pPublicEvent +) +{ + TMR_EVENT_PVT *pEvent = (TMR_EVENT_PVT *)pPublicEvent; + NV_STATUS status = NV_OK; + + if (pEvent && (pEvent->super.pTimeProc != NULL)) + { + pEvent->super.pTimeProc(pGpu, pTmr, (TMR_EVENT *)pEvent); + pEvent->super.flags &= ~TMR_FLAG_OS_TIMER_QUEUED; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "ERROR No Timer event callback found, invalid timer SW state\n"); + status = NV_ERR_INVALID_REQUEST; + } + + return status; +} + +/*! + * This function cancels OS timer callback + * + * @param[in] pTmr Pointer to Timer Object + * @param[in] pEvent pointer to timer event information + * @returns NV_ERR_INVALID_REQUEST if callback entry not found + */ +NV_STATUS tmrEventCancelOSTimer_OSTIMER +( + OBJTMR *pTmr, + TMR_EVENT *pPublicEvent +) +{ + NV_STATUS status= NV_OK; + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + TMR_EVENT_PVT *pTmrEvent = (TMR_EVENT_PVT *) pPublicEvent; + + if (pTmrEvent != NULL && pTmrEvent->super.pOSTmrCBdata != NULL) + { + // Cancel the callback of OS timer + status = osCancelNanoTimer(pGpu->pOsGpuInfo, pTmrEvent->super.pOSTmrCBdata); + pTmrEvent->super.flags &= ~TMR_FLAG_OS_TIMER_QUEUED; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "ERROR No Timer event callback found, invalid timer SW state\n"); + status = NV_ERR_INVALID_REQUEST; + } + + return status; +} + +/*! + * This function cancels OS timer callback + * + * @param[in] pTmr Pointer to Timer Object + * @param[in] pEvent pointer to timer event information + * + * @returns NV_ERR_INVALID_REQUEST if callback entry not found + */ +NV_STATUS tmrEventDestroyOSTimer_OSTIMER +( + OBJTMR *pTmr, + TMR_EVENT *pPublicEvent +) +{ + NV_STATUS status= NV_OK; + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + TMR_EVENT_PVT *pTmrEvent = (TMR_EVENT_PVT *) pPublicEvent; + + if (pTmrEvent != NULL && pTmrEvent->super.pOSTmrCBdata != NULL) + { + // Cancel the callback of OS timer + status = osDestroyNanoTimer(pGpu->pOsGpuInfo, pTmrEvent->super.pOSTmrCBdata); + pTmrEvent->super.flags &= ~TMR_FLAG_OS_TIMER_QUEUED; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "No Timer event callback found, invalid timer SW state\n"); + status = NV_ERR_INVALID_REQUEST; + } + + return status; +} + +NV_STATUS +tmrGetIntrStatus_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr, + NvU32 *pStatus, + THREAD_STATE_NODE *pThreadState +) +{ + *pStatus = 0; + return NV_OK; +} + +// +// For functions that only need a short delta of time elapsed (~ 4.29 seconds) +// NOTE: Since it wraps around every 4.29 seconds, for general GetTime purposes, +// it's better to use tmrGetTime(). +// +NvU32 +tmrGetTimeLo_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + return NvU64_LO32(tmrGetTimeEx_HAL(pGpu, pTmr, NULL)); +} + +NvU64 +tmrGetTime_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + return tmrGetTimeEx_HAL(pGpu, pTmr, NULL); +} + +NvU32 +tmrReadTimeLoReg_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr, + THREAD_STATE_NODE *pThreadState +) +{ + return NvU64_LO32(tmrGetTimeEx_HAL(pGpu, pTmr, pThreadState)); +} + +NvU32 +tmrReadTimeHiReg_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr, + THREAD_STATE_NODE *pThreadState +) +{ + return NvU64_HI32(tmrGetTimeEx_HAL(pGpu, pTmr, pThreadState)); +} + +NV_STATUS +tmrGetGpuAndCpuTimestampPair_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr, + NvU64 *pGpuTime, + NvU64 *pCpuTime +) +{ +#if PORT_IS_FUNC_SUPPORTED(portUtilExReadTimestampCounter) + *pGpuTime = tmrGetTimeEx_HAL(pGpu, pTmr, NULL); + *pCpuTime = portUtilExReadTimestampCounter(); + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NV_STATUS +tmrDelay_OSTIMER +( + OBJTMR *pTmr, + NvU32 nsec +) +{ + if (nsec > 50000000) // 50 ms. + { + osDelay(nsec / 1000000); + } + else if (nsec > 0) + { + osDelayNs(nsec); + } + + return NV_OK; +} + diff --git a/src/nvidia/src/kernel/gpu_mgr/gpu_db.c b/src/nvidia/src/kernel/gpu_mgr/gpu_db.c new file mode 100644 index 0000000..2ad8b47 --- /dev/null +++ b/src/nvidia/src/kernel/gpu_mgr/gpu_db.c @@ -0,0 +1,461 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/**************************************************************************** + * + * Description: + * This file contains the functions managing the gpu database + * + ***************************************************************************/ + +#include "gpu_mgr/gpu_db.h" +#include "core/system.h" + +#include "gpu/gpu.h" // for NBADDR + +NV_STATUS +gpudbConstruct_IMPL +( + GpuDb *pGpuDb +) +{ + listInit(&pGpuDb->gpuList, portMemAllocatorGetGlobalNonPaged()); + + pGpuDb->pLock = portSyncMutexCreate(portMemAllocatorGetGlobalNonPaged()); + if (pGpuDb->pLock == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Gpu data base list lock init failed\n"); + listDestroy(&pGpuDb->gpuList); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + return NV_OK; +} + +void +gpudbDestruct_IMPL +( + GpuDb *pGpuDb +) +{ + if (pGpuDb->pLock != NULL) + { + portSyncMutexDestroy(pGpuDb->pLock); + } + + listDestroy(&pGpuDb->gpuList); +} + +static PGPU_INFO_LIST_NODE +_gpudbFindGpuInfoByUuid +( + const NvU8 *pUuid +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuDb *pGpuDb = SYS_GET_GPUDB(pSys); + GPU_INFO_LIST_NODE *pNode = NULL; + + for (pNode = listHead(&pGpuDb->gpuList); + pNode != NULL; + pNode = listNext(&pGpuDb->gpuList, pNode)) + { + if (portMemCmp(pNode->uuid, pUuid, RM_SHA1_GID_SIZE) == 0) + { + break; + } + } + + return pNode; +} + +NV_STATUS +gpudbRegisterGpu(const NvU8 *pUuid, const NBADDR *pUpstreamPortPciInfo, NvU64 pciInfo) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuDb *pGpuDb = SYS_GET_GPUDB(pSys); + GPU_INFO_LIST_NODE *pNode; + NV_STATUS status = NV_OK; + NvU32 i = 0; + + portSyncMutexAcquire(pGpuDb->pLock); + + pNode = _gpudbFindGpuInfoByUuid(pUuid); + if (pNode != NULL) + { + pNode->bShutdownState = NV_FALSE; + goto done; + } + + pNode = listAppendNew(&pGpuDb->gpuList); + if (pNode == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Append the list failed\n"); + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto done; + } + + portMemCopy(pNode->uuid, RM_SHA1_GID_SIZE, pUuid, RM_SHA1_GID_SIZE); + + pNode->pciPortInfo.domain = gpuDecodeDomain(pciInfo); + pNode->pciPortInfo.bus = gpuDecodeBus(pciInfo); + pNode->pciPortInfo.device = gpuDecodeDevice(pciInfo); + pNode->pciPortInfo.function = 0; + pNode->pciPortInfo.bValid = NV_TRUE; + + pNode->upstreamPciPortInfo.domain = pUpstreamPortPciInfo->domain; + pNode->upstreamPciPortInfo.bus = pUpstreamPortPciInfo->bus; + pNode->upstreamPciPortInfo.device = pUpstreamPortPciInfo->device; + pNode->upstreamPciPortInfo.function = pUpstreamPortPciInfo->func; + pNode->upstreamPciPortInfo.bValid = pUpstreamPortPciInfo->valid; + + pNode->bShutdownState = NV_FALSE; + + // Initialize all compute polcies with default values + pNode->policyInfo.timeslice = NV2080_CTRL_CMD_GPU_COMPUTE_TIMESLICE_DEFAULT; + + // Initialize all choesnIdx to _INVALID + for (i = 0; i < GPUDB_CLK_PROP_TOP_POLS_COUNT; ++i) + { + ct_assert(sizeof(pNode->clkPropTopPolsControl.chosenIdx[0]) == sizeof(NvU8)); + pNode->clkPropTopPolsControl.chosenIdx[i] = NV_U8_MAX; + } + + // Initialize rusd permanent polling mask to 0 + pNode->rusd.permanentPolledDataMask = 0; + +done: + portSyncMutexRelease(pGpuDb->pLock); + return status; +} + +/*! +* @brief Update/Set the compute policy config for a GPU +* +* @param[in] uuid GPU uuid +* @param[in] policyType Policy for which config has to be set +* @param[in] policyInfo Requested policy config +* +* @return NV_OK Config updated successfully +* @return NV_ERR_INVALID_ARGUMENT Invalid argument specified +* @return NV_ERR_OBJECT_NOT_FOUND GPU entry in db not found +*/ +NV_STATUS +gpudbSetGpuComputePolicyConfig +( + const NvU8 *pUuid, + NvU32 policyType, + GPU_COMPUTE_POLICY_INFO *policyInfo +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuDb *pGpuDb = SYS_GET_GPUDB(pSys); + GPU_INFO_LIST_NODE *pNode; + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + + if (pUuid == NULL || policyInfo == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + portSyncMutexAcquire(pGpuDb->pLock); + + pNode = _gpudbFindGpuInfoByUuid(pUuid); + if (pNode == NULL) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + // Store the policy specific data + switch(policyType) + { + case NV2080_CTRL_GPU_COMPUTE_POLICY_TIMESLICE: + pNode->policyInfo.timeslice = policyInfo->timeslice; + status = NV_OK; + break; + default: + status = NV_ERR_INVALID_ARGUMENT; + break; + + } + +done: + portSyncMutexRelease(pGpuDb->pLock); + return status; +} + +/*! +* @brief Get all compute policy configs for a GPU +* +* @param[in] uuid GPU uuid +* @param[in] policyInfo Pointer in which to retrieve all compute policies +* for the requested GPU +* +* @return NV_OK Configs retrieved successfully +* @return NV_ERR_INVALID_ARGUMENT Invalid argument specified +* @return NV_ERR_OBJECT_NOT_FOUND GPU entry in db not found +*/ +NV_STATUS +gpudbGetGpuComputePolicyConfigs +( + const NvU8 *pUuid, + GPU_COMPUTE_POLICY_INFO *policyInfo +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuDb *pGpuDb = SYS_GET_GPUDB(pSys); + GPU_INFO_LIST_NODE *pNode; + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + + if (pUuid == NULL || policyInfo == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + portSyncMutexAcquire(pGpuDb->pLock); + + pNode = _gpudbFindGpuInfoByUuid(pUuid); + if (pNode == NULL) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + // Return the policy specific data + portMemCopy(policyInfo, sizeof(GPU_COMPUTE_POLICY_INFO), + &pNode->policyInfo, sizeof(GPU_COMPUTE_POLICY_INFO)); + status = NV_OK; + +done: + portSyncMutexRelease(pGpuDb->pLock); + return status; +} + +/*! +* @brief Set clock policies control for a GPU +* +* @param[in] pUuid Pointer to GPU uuid +* @param[in] pControl Pointer to the control tuple +* +* @return NV_OK Configs retrieved successfully +* @return NV_ERR_INVALID_ARGUMENT Invalid argument specified +* @return NV_ERR_OBJECT_NOT_FOUND GPU entry in db not found +*/ +NV_STATUS +gpudbSetClockPoliciesControl +( + const NvU8 *pUuid, + GPU_CLK_PROP_TOP_POLS_CONTROL *pControl +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuDb *pGpuDb = SYS_GET_GPUDB(pSys); + GPU_INFO_LIST_NODE *pNode; + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + + if (pUuid == NULL || pControl == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + portSyncMutexAcquire(pGpuDb->pLock); + + pNode = _gpudbFindGpuInfoByUuid(pUuid); + if (pNode == NULL) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + portMemCopy(&pNode->clkPropTopPolsControl, + sizeof(GPU_CLK_PROP_TOP_POLS_CONTROL), + pControl, + sizeof(GPU_CLK_PROP_TOP_POLS_CONTROL)); + + status = NV_OK; +done: + portSyncMutexRelease(pGpuDb->pLock); + return status; +} + +/*! +* @brief Get clock policies control for a GPU +* +* @param[in] pUuid Pointer to GPU uuid +* @param[out] pControl Pointer to the control tuple +* +* @return NV_OK Configs retrieved successfully +* @return NV_ERR_INVALID_ARGUMENT Invalid argument specified +* @return NV_ERR_OBJECT_NOT_FOUND GPU entry in db not found +*/ +NV_STATUS +gpudbGetClockPoliciesControl +( + const NvU8 *pUuid, + GPU_CLK_PROP_TOP_POLS_CONTROL *pControl +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuDb *pGpuDb = SYS_GET_GPUDB(pSys); + GPU_INFO_LIST_NODE *pNode; + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + + if (pUuid == NULL || pControl == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + portSyncMutexAcquire(pGpuDb->pLock); + + pNode = _gpudbFindGpuInfoByUuid(pUuid); + if (pNode == NULL) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + portMemCopy(pControl, + sizeof(GPU_CLK_PROP_TOP_POLS_CONTROL), + &pNode->clkPropTopPolsControl, + sizeof(GPU_CLK_PROP_TOP_POLS_CONTROL)); + + status = NV_OK; +done: + portSyncMutexRelease(pGpuDb->pLock); + return status; +} + +NV_STATUS +gpudbSetShutdownState +( + const NvU8 *pUuid +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuDb *pGpuDb = SYS_GET_GPUDB(pSys); + GPU_INFO_LIST_NODE *pNode; + NV_STATUS status = NV_OK; + + portSyncMutexAcquire(pGpuDb->pLock); + pNode = _gpudbFindGpuInfoByUuid(pUuid); + if (pNode == NULL) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + pNode->bShutdownState = NV_TRUE; + +done: + portSyncMutexRelease(pGpuDb->pLock); + return status; +} + +/*! +* @brief Update/Set the RUSD settings for a GPU +* +* @param[in] uuid GPU uuid +* @param[in] pRusd pointer to rusd settings +* +* @return NV_OK Config updated successfully +* @return NV_ERR_INVALID_ARGUMENT Invalid argument specified +* @return NV_ERR_OBJECT_NOT_FOUND GPU entry in db not found +*/ +NV_STATUS +gpudbSetRusdSettings +( + const NvU8 *pUuid, + GPU_DB_RUSD_SETTINGS *pRusd +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuDb *pGpuDb = SYS_GET_GPUDB(pSys); + GPU_INFO_LIST_NODE *pNode; + NV_STATUS status = NV_OK; + + if (pUuid == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + portSyncMutexAcquire(pGpuDb->pLock); + + pNode = _gpudbFindGpuInfoByUuid(pUuid); + if (pNode == NULL) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + pNode->rusd = *pRusd; + +done: + portSyncMutexRelease(pGpuDb->pLock); + return status; +} + +/*! +* @brief Get all GPU RUSD settings +* +* @param[in] uuid GPU uuid +* @param[in] pRusd pointer to the rusd settings. +* +* @return NV_OK Configs retrieved successfully +* @return NV_ERR_INVALID_ARGUMENT Invalid argument specified +* @return NV_ERR_OBJECT_NOT_FOUND GPU entry in db not found +*/ +NV_STATUS +gpudbGetRusdSettings +( + const NvU8 *pUuid, + GPU_DB_RUSD_SETTINGS *pRusd +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuDb *pGpuDb = SYS_GET_GPUDB(pSys); + GPU_INFO_LIST_NODE *pNode; + NV_STATUS status = NV_OK; + + if (pUuid == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + portSyncMutexAcquire(pGpuDb->pLock); + + pNode = _gpudbFindGpuInfoByUuid(pUuid); + if (pNode == NULL) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + // Return the policy specific data + *pRusd = pNode->rusd; + +done: + portSyncMutexRelease(pGpuDb->pLock); + return status; +} + diff --git a/src/nvidia/src/kernel/gpu_mgr/gpu_group.c b/src/nvidia/src/kernel/gpu_mgr/gpu_group.c new file mode 100644 index 0000000..d0af0c2 --- /dev/null +++ b/src/nvidia/src/kernel/gpu_mgr/gpu_group.c @@ -0,0 +1,329 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Routines ***************************\ +* * +* GpuGrp Object Function Definitions. * +* * +\***************************************************************************/ + +#include "gpu_mgr/gpu_group.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/gpu.h" +#include "core/system.h" +#include "class/cl2080.h" +#include "mem_mgr/virt_mem_mgr.h" +#include "mem_mgr/vaspace.h" +#include "class/cl90f1.h" // FERMI_VASPACE_A +#include "nvlimits.h" + +/*! + * Creates the gpugrp object. + * + * @param[in] pGpuGrp gpugrp object pointer + * @param[in] gpuMask Mask of GPUs corresponding to this gpugrp + * + * @returns NV_OK on success, appropriate error on failure. + */ +NV_STATUS +gpugrpCreate_IMPL +( + OBJGPUGRP *pGpuGrp, + NvU32 gpuMask +) +{ + pGpuGrp->gpuMask = gpuMask; + // + // Add the gpugrp instance to the GPU objects in the mask + // At boot this call fails and is deferred to GPU post construct. + // When coming out of SLI this call is succeeding - and postconstruct + // is not called. + // + gpumgrAddDeviceInstanceToGpus(gpuMask); + return NV_OK; +} + +/*! + * Destroys gpugrp object. + * + * It first iterates over the GPUs that belong to this gpugrp + * object indicated by the gpuMask. + * Following this it destroys the object. + * + * @param[in] pGpuGrp gpugrp object pointer + */ +NV_STATUS +gpugrpDestroy_IMPL +( + OBJGPUGRP *pGpuGrp +) +{ + NV_STATUS rmStatus = NV_ERR_OBJECT_NOT_FOUND; + OBJGPU *pGpu = NULL; + NvU32 gpuIndex = 0; + + // Add the gpugrp instance to the GPU objects in the mask + while ((pGpu = gpumgrGetNextGpu(pGpuGrp->gpuMask, &gpuIndex))) + { + rmStatus = NV_OK; + pGpu->deviceInstance = NV_MAX_DEVICES; + } + + // Call the utility routine that does the object deletion. + objDelete(pGpuGrp); + return rmStatus; +} + +/*! + * Gets the gpu mask for the gpugrp. + * + * @param[in] pGpuGrp gpugrp object pointer + * + * @returns NvU32 gpumask + */ +NvU32 +gpugrpGetGpuMask_IMPL(OBJGPUGRP *pGpuGrp) +{ + return pGpuGrp->gpuMask; +} + +/*! + * Sets the gpu mask for the gpugrp. + * + * @param[in] pGpuGrp gpugrp object pointer + * @param[in] gpuMask gpumask to set + * + */ +void +gpugrpSetGpuMask_IMPL(OBJGPUGRP *pGpuGrp, NvU32 gpuMask) +{ + pGpuGrp->gpuMask = gpuMask; +} +/*! + * Gets the broadcast enabled state + * + * @param[in] pGpuGrp gpugrp object pointer + * + * @returns NvBool + */ +NvBool +gpugrpGetBcEnabledState_IMPL(OBJGPUGRP *pGpuGrp) +{ + return pGpuGrp->bcEnabled; +} + +/*! + * Sets the broadcast enable state + * + * @param[in] pGpuGrp gpugrp object pointer + * @param[in] bcState Broadcast enable state + * + */ +void +gpugrpSetBcEnabledState_IMPL(OBJGPUGRP *pGpuGrp, NvBool bcState) +{ + pGpuGrp->bcEnabled = bcState; +} + +/*! + * Sets the parent GPU for the gpugrp + * + * @param[in] pGpuGrp gpugrp object pointer + * @param[in] pGpu Parent GPU object pointer + * + */ +void +gpugrpSetParentGpu_IMPL +( + OBJGPUGRP *pGpuGrp, + OBJGPU *pParentGpu +) +{ + pGpuGrp->parentGpu = pParentGpu; +} + +/*! + * Gets the parent GPU for the gpugrp + * + * @param[in] pGpuGrp gpugrp object pointer + * + * @returns GPU pointer + */ +POBJGPU +gpugrpGetParentGpu_IMPL(OBJGPUGRP *pGpuGrp) +{ + return pGpuGrp->parentGpu; +} + + +/*! + * @brief gpugrpCreateVASpace - creates the GLobal VASpace for this gpugrp. + * + * This is created once per group. So for GPUs in SLI, there is only + * one of this created. + * + * @param[in] pGpuGrp GPUGRP object pointer + * @param[in] pGpu Parent GPU object pointer + * @param[in] vaspaceClass VASPACE class to create + * @param[in] vaStart vaspace start + * @param[in] vaLimit vaspace limit + * @param[in] vaspaceFlags VASPACE flags for creation + * @param[out] ppGlobalVASpace Global vaspace that is created + * + * @return NV_OK on success or appropriate RM_ERR on failure + * + */ +NV_STATUS +gpugrpCreateGlobalVASpace_IMPL +( + OBJGPUGRP *pGpuGrp, + OBJGPU *pGpu, + NvU32 vaspaceClass, + NvU64 vaStart, + NvU64 vaLimit, + NvU32 vaspaceFlags, + OBJVASPACE **ppGlobalVASpace +) +{ + NV_STATUS rmStatus; + NvU32 gpuMask = pGpuGrp->gpuMask; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJVMM *pVmm = SYS_GET_VMM(pSys); + NvBool bcState = gpumgrGetBcEnabledStatus(pGpu); + + NV_ASSERT_OR_RETURN(ppGlobalVASpace != NULL, NV_ERR_INVALID_ARGUMENT); + *ppGlobalVASpace = NULL; + + gpumgrSetBcEnabledStatus(pGpu, NV_TRUE); + vaspaceFlags |= VASPACE_FLAGS_ENABLE_VMM; + rmStatus = vmmCreateVaspace(pVmm, vaspaceClass, 0x0, gpuMask, vaStart, + vaLimit, 0, 0, NULL, vaspaceFlags, &pGpuGrp->pGlobalVASpace); + gpumgrSetBcEnabledStatus(pGpu, bcState); + if (NV_OK != rmStatus) + { + pGpuGrp->pGlobalVASpace = NULL; + return rmStatus; + } + *ppGlobalVASpace = pGpuGrp->pGlobalVASpace ; + + return rmStatus; +} + +/*! + * @brief gpugrpDestroyVASpace - Destroys the gpugrp global vaspace + * + * @param[in] pGpuGrp GPUGRP object pointer + * @param[in] pGpu Parent GPU object pointer + * + * @return NV_OK on success or appropriate RM_ERR on failure + * + */ +NV_STATUS +gpugrpDestroyGlobalVASpace_IMPL(OBJGPUGRP *pGpuGrp, OBJGPU *pGpu) +{ + NV_STATUS rmStatus = NV_OK; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJVMM *pVmm = SYS_GET_VMM(pSys); + NvBool bcState = gpumgrGetBcEnabledStatus(pGpu); + + // Nothing to destroy, bail out early + if (pGpuGrp->pGlobalVASpace == NULL) + return rmStatus; + + gpumgrSetBcEnabledStatus(pGpu, NV_TRUE); + vmmDestroyVaspace(pVmm, pGpuGrp->pGlobalVASpace); + gpumgrSetBcEnabledStatus(pGpu, bcState); + pGpuGrp->pGlobalVASpace = NULL; + return rmStatus; +} + + +/*! + * @brief gpugrpGetVASpace - retrieves the group global vaspace + * + * @param[in] pGpuGrp GPUGRP object pointer + * @param[out] ppGlobalVASpace Global vaspace for this GPUGRP + * + * @return NV_OK on success + * NV_ERR_INVALID_ARGUMENT on NULL pointer parameter + * NV_ERR_OBJECT_NOT_FOUND if there is no device vaspace + */ +NV_STATUS +gpugrpGetGlobalVASpace_IMPL(OBJGPUGRP *pGpuGrp, OBJVASPACE **ppVASpace) +{ + NV_ASSERT_OR_RETURN(ppVASpace != NULL, NV_ERR_INVALID_ARGUMENT); + + if (pGpuGrp->pGlobalVASpace == NULL) + { + *ppVASpace = NULL; + return NV_ERR_OBJECT_NOT_FOUND; + } + *ppVASpace = pGpuGrp->pGlobalVASpace; + return NV_OK; +} + + +/*! + * @brief gpugrpGetGpuFromSubDeviceInstance - retrieves the pGpu associated to + * a GPU group and a subdevice instance. + * + * @param[in] pGpuGrp GPUGRP object pointer + * @param[in] subDeviceInst GPU sundevice Instance + * @param[out] ppGpu POBJGPU* pointer + * + * @return NV_OK on success + * NV_ERR_INVALID_ARGUMENT on NULL pointer parameter + * NV_ERR_OBJECT_NOT_FOUND if there is no GPU for the input parameters + */ +NV_STATUS +gpugrpGetGpuFromSubDeviceInstance_IMPL(OBJGPUGRP *pGpuGrp, NvU32 subDeviceInst, OBJGPU **ppGpu) +{ + OBJGPU *pGpu = NULL; + NvU32 gpuInst = 0; + NvU32 gpuMask; + + NV_ASSERT_OR_RETURN(pGpuGrp != NULL, NV_ERR_INVALID_ARGUMENT); + + *ppGpu = NULL; + + gpuMask = gpugrpGetGpuMask(pGpuGrp); + + // check for single GPU case + if (gpumgrGetSubDeviceCount(gpuMask) == 1) + { + *ppGpu = gpumgrGetNextGpu(gpuMask, &gpuInst); + } + else + { + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInst)) != NULL) + { + if (gpumgrGetSubDeviceInstanceFromGpu(pGpu) == subDeviceInst) + { + *ppGpu = pGpu; + break; + } + } + } + return (*ppGpu == NULL ? NV_ERR_OBJECT_NOT_FOUND : NV_OK); +} + diff --git a/src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c b/src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c new file mode 100644 index 0000000..354581f --- /dev/null +++ b/src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c @@ -0,0 +1,65 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** + * + * File: gpu_mgmt_api.c + * + * Description: + * This file contains the functions managing the GPU information + * encapsulated by GPUDB object or probed state GPU. + * + *****************************************************************************/ + +#include "core/core.h" +#include "gpu_mgr/gpu_mgmt_api.h" +#include "gpu_mgr/gpu_db.h" + +NV_STATUS +gpumgmtapiConstruct_IMPL +( + GpuManagementApi *pGpuMgmt, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +void +gpumgmtapiDestruct_IMPL +( + GpuManagementApi *pGpuMgmt +) +{ +} + +NV_STATUS +gpumgmtapiCtrlCmdSetShutdownState_IMPL +( + GpuManagementApi *pGpuMgmt, + NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS *pParams +) +{ + return gpudbSetShutdownState(pParams->uuid); +} diff --git a/src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c b/src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c new file mode 100644 index 0000000..55628f6 --- /dev/null +++ b/src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c @@ -0,0 +1,3499 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Routines ***************************\ +* GPU Manager * +\***************************************************************************/ + + + +#include "core/system.h" +#include "core/locks.h" +#include "platform/sli/sli.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/device/device.h" +#include "gpu/gpu_arch.h" +#include "gpu/gpu.h" +#include "tls/tls.h" +#include "nvrm_registry.h" +#include "nv_ref.h" +#include "nvlimits.h" +#include "nv-firmware-registry.h" + +#include "virtualization/hypervisor/hypervisor.h" + +// local static funcs +static void gpumgrSetAttachInfo(OBJGPU *, GPUATTACHARG *); +static void gpumgrGetGpuHalFactor(NvU32 *pChipId0, NvU32 *pChipId1, NvU32 *pSocChipId0, RM_RUNTIME_VARIANT *pRmVariant, TEGRA_CHIP_TYPE *pTegraType, GPUATTACHARG *pAttachArg); +static NvBool _gpumgrGetPcieP2PCapsFromCache(NvU32 gpuMask, NvU8* pP2PWriteCapsStatus, NvU8* pP2PReadCapsStatus); + +static void +_gpumgrUnregisterRmCapsForGpuUnderLock(NvU64 gpuDomainBusDevice) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + // SMC partition caps must be destroyed before GPU caps. + gpumgrUnregisterRmCapsForMIGGI(gpuDomainBusDevice); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + PROBEDGPU *pProbedGpu = &pGpuMgr->probedGpus[i]; + + if (pProbedGpu->gpuDomainBusDevice == gpuDomainBusDevice && + pProbedGpu->gpuId != NV0000_CTRL_GPU_INVALID_ID) + { + osRmCapUnregister(&pProbedGpu->pOsRmCaps); + break; + } + } +} + +static void +_gpumgrUnregisterRmCapsForGpu(NvU64 gpuDomainBusDevice) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + _gpumgrUnregisterRmCapsForGpuUnderLock(gpuDomainBusDevice); + portSyncMutexRelease(pGpuMgr->probedGpusLock); +} + +static NV_STATUS +_gpumgrRegisterRmCapsForGpu(OBJGPU *pGpu) +{ + NV_STATUS status = NV_ERR_INVALID_STATE; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + PROBEDGPU *pProbedGpu = &pGpuMgr->probedGpus[i]; + + if (((pProbedGpu->gpuDomainBusDevice == 0) && pGpu->bIsSOC) || + (pProbedGpu->gpuDomainBusDevice == gpuGetDBDF(pGpu) && + pProbedGpu->gpuId != NV0000_CTRL_GPU_INVALID_ID)) + { + if (pProbedGpu->pOsRmCaps == NULL) + { + status = osRmCapRegisterGpu(pGpu->pOsGpuInfo, + &pProbedGpu->pOsRmCaps); + } + else + { + status = NV_OK; + } + + pGpu->pOsRmCaps = pProbedGpu->pOsRmCaps; + break; + } + } + + NV_ASSERT(status == NV_OK); + + portSyncMutexRelease(pGpuMgr->probedGpusLock); + + return status; +} + +static void +_gpumgrCacheClearMIGGpuIdInfo(NvU32 gpuId) +{ +} + +// Iterates through OBJGPUMGR children and deletes any GpuArch objects. +static void +_gpumgrDeleteCachedGpuArch(OBJGPUMGR *pGpuMgr) +{ + Object *pIter = objGetChild(staticCast(pGpuMgr, Object)); + while (pIter != NULL) + { + Object *pNext = objGetSibling(pIter); + GpuArch *pGpuArch = dynamicCast(pIter, GpuArch); + if (pGpuArch != NULL) + objDelete(pGpuArch); + pIter = pNext; + } +} + +// +// ODB functions +// +NV_STATUS +gpumgrConstruct_IMPL(OBJGPUMGR *pGpuMgr) +{ + NV_STATUS status; + NvU32 i; + + NV_PRINTF(LEVEL_INFO, "gpumgrConstruct\n"); + + pGpuMgr->numGpuHandles = 0; + + for (i = 0; i < NV_MAX_DEVICES; i++) + pGpuMgr->gpuHandleIDList[i].gpuInstance = NV_MAX_DEVICES; + + pGpuMgr->pChildListMutex = portSyncMutexCreate(portMemAllocatorGetGlobalNonPaged()); + NV_ASSERT_OR_RETURN(pGpuMgr->pChildListMutex != NULL, NV_ERR_INSUFFICIENT_RESOURCES); + + pGpuMgr->probedGpusLock = portSyncMutexCreate(portMemAllocatorGetGlobalNonPaged()); + if (pGpuMgr->probedGpusLock == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + NV_ASSERT_OR_GOTO(pGpuMgr->probedGpusLock != NULL, cleanup); + } + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + portMemSet(&pGpuMgr->probedGpus[i], 0, sizeof(PROBEDGPU)); + pGpuMgr->probedGpus[i].gpuId = NV0000_CTRL_GPU_INVALID_ID; + } + + pGpuMgr->gpuAttachCount = 0; + pGpuMgr->gpuAttachMask = 0; + + pGpuMgr->deviceCount = 0; + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->pGpuGrpTable); i++) + { + pGpuMgr->pGpuGrpTable[i] = NULL; + } + + pGpuMgr->powerDisconnectedGpuCount = 0; + + NV_ASSERT_OK_OR_GOTO(status, gpumgrInitPcieP2PCapsCache(pGpuMgr), cleanup); + + return NV_OK; + +cleanup: + gpumgrDestruct_IMPL(pGpuMgr); + return status; +} + + +void +gpumgrDestruct_IMPL(OBJGPUMGR *pGpuMgr) +{ + NV_PRINTF(LEVEL_INFO, "gpumgrDestruct\n"); + + _gpumgrDeleteCachedGpuArch(pGpuMgr); + + if (pGpuMgr->pChildListMutex != NULL) + portSyncMutexDestroy(pGpuMgr->pChildListMutex); + + if (pGpuMgr->probedGpusLock != NULL) + portSyncMutexDestroy(pGpuMgr->probedGpusLock); + + gpumgrDestroyPcieP2PCapsCache(pGpuMgr); +} + +static NvBool +_gpumgrThreadHasExpandedGpuVisibilityInTls(void) +{ + NvP64 entry = tlsEntryGet(TLS_ENTRY_ID_GPUMGR_EXPANDED_GPU_VISIBILITY); + return (entry != NvP64_NULL) && (entry == ((NvP64) 1)); +} + +NvBool +gpumgrThreadHasExpandedGpuVisibility(void) +{ + if (RMCFG_FEATURE_PLATFORM_UNIX) + { + return _gpumgrThreadHasExpandedGpuVisibilityInTls(); + } + else + { + // + // Bug 4376209 + // Non-UNIX platforms have expanded GPU visibility by default for now + // (this is OK as they do not have parallel init yet). + // + return NV_TRUE; + } +} + +NV_STATUS +gpumgrThreadEnableExpandedGpuVisibility(void) +{ + NvP64 *pEntry = tlsEntryAcquire(TLS_ENTRY_ID_GPUMGR_EXPANDED_GPU_VISIBILITY); + if (pEntry == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + *pEntry = (NvP64) 1; + return NV_OK; +} + +void +gpumgrThreadDisableExpandedGpuVisibility(void) +{ + if (_gpumgrThreadHasExpandedGpuVisibilityInTls()) + { + NvU32 refCount = tlsEntryRelease(TLS_ENTRY_ID_GPUMGR_EXPANDED_GPU_VISIBILITY); + NV_ASSERT(refCount == 0); + } +} + +// +// gpumgrAllocGpuInstance +// +// This interface returns the next available gpu number. +// +NV_STATUS +gpumgrAllocGpuInstance(NvU32 *pGpuInstance) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + + NvU32 i; + NvU64 availableIDs = ((1ULL << NV_MAX_DEVICES) - 1); + + if (pGpuMgr->numGpuHandles == 0) + { + *pGpuInstance = 0; + return NV_OK; + } + else if (pGpuMgr->numGpuHandles == NV_MAX_DEVICES) + { + *pGpuInstance = NV_MAX_DEVICES; + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + for (i = 0; i < pGpuMgr->numGpuHandles; i++) + availableIDs &= ~NVBIT(pGpuMgr->gpuHandleIDList[i].gpuInstance); + + for (i = 0; ((availableIDs & (1ULL << i)) == 0); i++) + ; + + *pGpuInstance = i; + + return NV_OK; +} + +// +// During destruction of a GPU the handle list needs to be modified. +// Since we cannot guarantee the _last_ GPU will always be the one +// destroyed we have to compact the handle list so we have no gaps +// and can simply decrement numGpuHandles. +// +static void +_gpumgrShiftDownGpuHandles(NvU32 startIndex) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i, lastMovedIndex = startIndex; + + for (i = startIndex; i < (NV_MAX_DEVICES - 1); i++) + { + if (pGpuMgr->gpuHandleIDList[i + 1].pGpu != 0) + { + lastMovedIndex = i + 1; + pGpuMgr->gpuHandleIDList[i].gpuInstance = + pGpuMgr->gpuHandleIDList[i + 1].gpuInstance; + + pGpuMgr->gpuHandleIDList[i].pGpu = + pGpuMgr->gpuHandleIDList[i + 1].pGpu; + } + } + + pGpuMgr->gpuHandleIDList[lastMovedIndex].gpuInstance = NV_MAX_DEVICES; + pGpuMgr->gpuHandleIDList[lastMovedIndex].pGpu = reinterpretCast(NULL, OBJGPU *); + pGpuMgr->numGpuHandles--; +} + +static void +_gpumgrDestroyGpu(NvU32 gpuInstance) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + OBJGPU *pGpu; + NvU32 i; + + osSyncWithGpuDestroy(NV_TRUE); + + pGpu = gpumgrGetGpu(gpuInstance); + + objDelete(pGpu); + + for (i = 0; i < pGpuMgr->numGpuHandles; i++) + { + if (pGpuMgr->gpuHandleIDList[i].gpuInstance == gpuInstance) + { + pGpuMgr->gpuHandleIDList[i].gpuInstance = NV_MAX_DEVICES; + pGpuMgr->gpuHandleIDList[i].pGpu = reinterpretCast(NULL, OBJGPU *); + _gpumgrShiftDownGpuHandles(i); + break; + } + } + + osSyncWithGpuDestroy(NV_FALSE); +} + +POBJGPU +gpumgrGetGpu(NvU32 gpuInstance) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + for (i = 0; i < pGpuMgr->numGpuHandles; i++) + { + if (pGpuMgr->gpuHandleIDList[i].gpuInstance == gpuInstance) + { + OBJGPU *pGpu = pGpuMgr->gpuHandleIDList[i].pGpu; + if (pGpu != NULL) + { + if (gpumgrThreadHasExpandedGpuVisibility() || + pGpu->getProperty(pGpu, PDB_PROP_GPU_STATE_INITIALIZED)) + { + return pGpu; + } + } + } + } + + return NULL; +} + +POBJGPU +gpumgrGetSomeGpu(void) +{ + OBJGPU *pGpu = NULL; + NvU32 gpuMask = 0; + NvU32 gpuIndex = 0; + NvU32 gpuCount = 0; + + // Get some gpu to get the SLI Display Parent + gpumgrGetGpuAttachInfo(&gpuCount, &gpuMask); + pGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex); + + if (pGpu == NULL) + { + // None of the GPUs are initialized - Too early + NV_PRINTF(LEVEL_ERROR, + "Failed to retrieve pGpu - Too early call!.\n"); + NV_ASSERT(NV_FALSE); + return pGpu; + } + return pGpu; +} + + +// +// gpumgrAllocDeviceInstance +// +// This interface returns the next available broadcast device number. +// This broadcast device number is used to uniquely identify this set +// of gpu(s) both internally in the RM (e.g. OBJGPUGRP handle) as well +// as via the architecture (e.g., for the 'deviceId' parameter of +// NV0080_ALLOC_PARAMETERS). +// +NV_STATUS +gpumgrAllocDeviceInstance(NvU32 *pDeviceInstance) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->pGpuGrpTable); i++) + if (pGpuMgr->pGpuGrpTable[i] == NULL) + break; + + if (i == NV_MAX_DEVICES) + { + *pDeviceInstance = NV_MAX_DEVICES; + return NV_ERR_GENERIC; + } + + *pDeviceInstance = i; + + return NV_OK; +} + +// +// gpumgrGetGpuAttachInfo +// +// Returns current gpu attach info. +// +NV_STATUS +gpumgrGetGpuAttachInfo(NvU32 *pGpuCnt, NvU32 *pGpuMask) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + + NvU32 outCount = 0; + NvU32 outMask = 0; + + NvU32 gpuMask = pGpuMgr->gpuAttachMask; + NvU32 gpuInstance = 0; + + while (gpuInstance != NV_MAX_DEVICES) + { + OBJGPU *pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance); + if (pGpu == NULL) + continue; + + if (gpumgrThreadHasExpandedGpuVisibility() || + pGpu->getProperty(pGpu, PDB_PROP_GPU_STATE_INITIALIZED)) + { + // adjust gpuInstance down one as gpumgrGetNextGpu increments one + outMask |= NVBIT(gpuInstance - 1); + outCount++; + } + } + + // caller can pass in NULL for outparams that it doesn't need. + if (pGpuCnt != NULL) *pGpuCnt = outCount; + if (pGpuMask != NULL) *pGpuMask = outMask; + + return NV_OK; +} + +NvU32 +gpumgrGetDeviceGpuMask(NvU32 deviceInstance) +{ + OBJGPUGRP *pGpuGrp = gpumgrGetGpuGrpFromInstance(deviceInstance); + + if (pGpuGrp == NULL) + { + NV_PRINTF(LEVEL_WARNING, + "Could not find GPU Group for deviceInstance 0x%x!\n", + deviceInstance); + return 0; + } + + return gpugrpGetGpuMask(pGpuGrp); +} + +NV_STATUS +gpumgrIsDeviceInstanceValid(NvU32 deviceInstance) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + OBJGPUGRP *pGpuGrp = NULL; + + if (deviceInstance >= NV_MAX_DEVICES) + return NV_ERR_INVALID_ARGUMENT; + + pGpuGrp = pGpuMgr->pGpuGrpTable[deviceInstance]; + if (NULL == pGpuGrp) + return NV_ERR_INVALID_DATA; + + if (0 == gpugrpGetGpuMask(pGpuGrp)) + return NV_ERR_INVALID_ARGUMENT; + + return NV_OK; +} + +NvBool +gpumgrIsSubDeviceInstanceValid(NvU32 subDeviceInstance) +{ + if (subDeviceInstance >= NV2080_MAX_SUBDEVICES) + return NV_FALSE; + + return NV_TRUE; +} + +NvU32 gpumgrGetPrimaryForDevice(NvU32 deviceInstance) +{ + NvU32 gpuMask, gpuInstance = 0; + OBJGPU *pGpu = NULL; + + gpuMask = gpumgrGetDeviceGpuMask(deviceInstance); + + if (gpumgrIsSubDeviceCountOne(gpuMask)) + { + pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance); + if (pGpu != NULL) + { + return pGpu->gpuInstance; + } + } + else + { + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + if (gpumgrIsParentGPU(pGpu)) + { + return pGpu->gpuInstance; + } + } + } + + NV_PRINTF(LEVEL_ERROR, + "deviceInstance 0x%x does not exist!\n", deviceInstance); + + return 0; // this should not happen, never +} + +NvBool +gpumgrIsDeviceEnabled(NvU32 deviceInstance) +{ + NvU32 gpuMask, gpuInstance = 0; + NvBool bEnabled; + + gpuMask = gpumgrGetDeviceGpuMask(deviceInstance); + + if (gpuMask == 0) + { + return NV_FALSE; + } + /* + * Check if this device + * - has been disabled via Power-SLI + * - is in the "drain" state + */ + if (gpumgrIsSubDeviceCountOne(gpuMask)) + { + OBJGPU *pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance); + + if (pGpu == NULL) + return NV_FALSE; + + if ((gpumgrQueryGpuDrainState(pGpu->gpuId, &bEnabled, NULL) == NV_OK) + && bEnabled) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + +// +// gpumgrRegisterGpuId +// +// This interface is used by os-dependent code to insert a probed +// gpu into the table of probed gpus known to the RM. +// +NV_STATUS +gpumgrRegisterGpuId(NvU32 gpuId, NvU64 gpuDomainBusDevice) +{ + NV_STATUS status = NV_ERR_INSUFFICIENT_RESOURCES; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + if (pGpuMgr->probedGpus[i].gpuId == gpuId) + { + NV_PRINTF(LEVEL_ERROR, + "GPU id 0x%x already registered at index %u\n", + gpuId, i); + + // Duplicate gpu + status = NV_ERR_IN_USE; + goto done; + } + + if (pGpuMgr->probedGpus[i].gpuId == NV0000_CTRL_GPU_INVALID_ID) + { + pGpuMgr->probedGpus[i].gpuId = gpuId; + pGpuMgr->probedGpus[i].gpuDomainBusDevice = gpuDomainBusDevice; + pGpuMgr->probedGpus[i].bInitAttempted = NV_FALSE; + pGpuMgr->probedGpus[i].bExcluded = NV_FALSE; + pGpuMgr->probedGpus[i].bUuidValid = NV_FALSE; + pGpuMgr->probedGpus[i].pOsRmCaps = NULL; + status = NV_OK; + goto done; + } + } + +done: + portSyncMutexRelease(pGpuMgr->probedGpusLock); + return status; +} + +// +// gpumgrUnregisterGpuId +// +// This interface is used by os-dependent code to remove a gpu +// from the table of probed gpus known to the RM. +// +NV_STATUS +gpumgrUnregisterGpuId(NvU32 gpuId) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + PROBEDGPU *pProbedGpu = &pGpuMgr->probedGpus[i]; + + if (pProbedGpu->gpuId == gpuId) + { + _gpumgrCacheClearMIGGpuIdInfo(gpuId); + gpumgrRemovePcieP2PCapsFromCache(pProbedGpu->gpuId); + _gpumgrUnregisterRmCapsForGpuUnderLock(pProbedGpu->gpuDomainBusDevice); + pProbedGpu->gpuId = NV0000_CTRL_GPU_INVALID_ID; + pProbedGpu->flags = 0; + pProbedGpu->bDrainState = NV_FALSE; + pProbedGpu->bRemoveIdle = NV_FALSE; + pProbedGpu->bExcluded = NV_FALSE; + pProbedGpu->bUuidValid = NV_FALSE; + goto done; + } + } + +done: + portSyncMutexRelease(pGpuMgr->probedGpusLock); + return NV_OK; +} + +// +// gpumgrExcludeGpuId +// +// This interface is used by os-dependent code to 'exclude' a gpu. +// +// gpuId: the device to exclude +// +NV_STATUS +gpumgrExcludeGpuId(NvU32 gpuId) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + PROBEDGPU *pProbedGpu = &pGpuMgr->probedGpus[i]; + + if (pProbedGpu->gpuId == gpuId) + { + pProbedGpu->bExcluded = NV_TRUE; + goto done; + } + } + +done: + portSyncMutexRelease(pGpuMgr->probedGpusLock); + return NV_OK; +} + +// +// gpumgrSetUuid +// +// This interface is used by os-dependent code to pass the UUID for a gpu. +// The UUID is a 16-byte raw UUID/GID. +// +NV_STATUS +gpumgrSetUuid(NvU32 gpuId, NvU8 *uuid) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + if (uuid == NULL) + return NV_ERR_INVALID_DATA; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + PROBEDGPU *pProbedGpu = &pGpuMgr->probedGpus[i]; + + if (pProbedGpu->gpuId == gpuId) + { + portMemCopy(pProbedGpu->uuid, RM_SHA1_GID_SIZE, uuid, RM_SHA1_GID_SIZE); + pProbedGpu->bUuidValid = NV_TRUE; + goto done; + } + } + +done: + portSyncMutexRelease(pGpuMgr->probedGpusLock); + return NV_OK; +} + +// +// gpumgrGetCachedUuid +// +// Lookup the cached UUID for a GPU +// +static NV_STATUS +gpumgrGetCachedUuid(NvU32 gpuId, NvU8 *uuid, unsigned int len) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + + if (uuid == NULL || len < RM_SHA1_GID_SIZE) + return NV_ERR_INVALID_ARGUMENT; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + PROBEDGPU *pProbedGpu = &pGpuMgr->probedGpus[i]; + + if (pProbedGpu->gpuId == gpuId) + { + if (pProbedGpu->bUuidValid) + { + portMemCopy(uuid, RM_SHA1_GID_SIZE, pProbedGpu->uuid, RM_SHA1_GID_SIZE); + status = NV_OK; + } + else + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + } + +done: + portSyncMutexRelease(pGpuMgr->probedGpusLock); + return status; +} + +NV_STATUS +gpumgrGetGpuUuidInfo(NvU32 gpuId, NvU8 **ppUuidStr, NvU32 *pUuidStrLen, NvU32 uuidFlags) +{ + NvU8 *pUuid; + NV_STATUS status; + + if (ppUuidStr == NULL || pUuidStrLen == NULL) + return NV_ERR_INVALID_DATA; + + // gpumgr only supports SHA1 format; error out if requesting otherwise + if (!FLD_TEST_DRF(0000_CTRL_CMD, _GPU_GET_UUID_FROM_GPU_ID_FLAGS, _TYPE, _SHA1, uuidFlags)) + return NV_ERR_INVALID_ARGUMENT; + + pUuid = portMemAllocNonPaged(RM_SHA1_GID_SIZE); + if (pUuid == NULL) + return NV_ERR_NO_MEMORY; + + status = gpumgrGetCachedUuid(gpuId, pUuid, RM_SHA1_GID_SIZE); + if (status != NV_OK) + { + portMemFree(pUuid); + return status; + } + + if (FLD_TEST_DRF(0000_CTRL_CMD, _GPU_GET_UUID_FROM_GPU_ID_FLAGS, _FORMAT, _BINARY, uuidFlags)) + { + // Binary case - pUuid is freed by the caller + *ppUuidStr = pUuid; + *pUuidStrLen = RM_SHA1_GID_SIZE; + } + else + { + // Conversion to ASCII or UNICODE + status = transformGidToUserFriendlyString(pUuid, RM_SHA1_GID_SIZE, + ppUuidStr, pUuidStrLen, uuidFlags, RM_UUID_PREFIX_GPU); + portMemFree(pUuid); + } + + return status; +} + +static NV_STATUS +gpumgrGetRegisteredIds +( + NvU64 gpuDomainBusDevice, + NvU32 *pGpuId, + NvUuid *pUuid, + NvBool *pbUuidValid +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (NvU32 i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + PROBEDGPU *pProbedGpu = &pGpuMgr->probedGpus[i]; + if (pProbedGpu->gpuId == NV0000_CTRL_GPU_INVALID_ID) + continue; + + if (pProbedGpu->gpuDomainBusDevice == gpuDomainBusDevice) + { + *pGpuId = pProbedGpu->gpuId; + *pbUuidValid = pProbedGpu->bUuidValid; + if (pProbedGpu->bUuidValid) + portMemCopy(pUuid->uuid, sizeof(pUuid->uuid), + pProbedGpu->uuid, sizeof(pProbedGpu->uuid)); + + status = NV_OK; + break; + } + } + + portSyncMutexRelease(pGpuMgr->probedGpusLock); + return status; +} + +static void +gpumgrGetGpuHalFactorOfVirtual(NvBool *pIsVirtual, GPUATTACHARG *pAttachArg) +{ + DEVICE_MAPPING gpuDevMapping = {0}; + NvU32 pmcBoot1; + + gpuDevMapping.gpuNvAddr = pAttachArg->regBaseAddr; + gpuDevMapping.gpuNvLength = pAttachArg->regLength; + + *pIsVirtual = NV_FALSE; + + // Check register NV_PMC_BOOT_1 + pmcBoot1 = osDevReadReg032(/*pGpu=*/ NULL, &gpuDevMapping, NV_PMC_BOOT_1); + + // VGPU with SRIOV + if (FLD_TEST_DRF(_PMC, _BOOT_1, _VGPU, _VF, pmcBoot1)) + { + *pIsVirtual = NV_TRUE; + } +} + +NvBool gpumgrGetRmFirmwareLogsEnabled +( + NvU32 enableFirmwareLogsRegVal +) +{ + // Check for logs + if (enableFirmwareLogsRegVal == NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE) + { + return NV_TRUE; + } +#if defined(DEBUG) || defined(DEVELOP) + if (enableFirmwareLogsRegVal == NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG) + { + return NV_TRUE; + } +#endif // defined(DEBUG) || defined(DEVELOP) + + return NV_FALSE; +} + +void gpumgrGetRmFirmwarePolicy +( + NvU32 pmcBoot42, + NvBool bIsVirtualWithSriov, + NvBool bIsSoc, + NvU32 enableFirmwareRegVal, + NvBool *pbRequestFirmware, + NvBool *pbAllowFallbackToMonolithicRm, + WindowsFirmwarePolicyArg *pWinRmFwPolicyArg +) +{ + NvBool bFirmwareCapable = NV_FALSE; + NvBool bEnableByDefault = NV_FALSE; + NvU32 regkeyFirmwareMode; + + regkeyFirmwareMode = + enableFirmwareRegVal & NV_REG_ENABLE_GPU_FIRMWARE_MODE_MASK; + *pbAllowFallbackToMonolithicRm = + !!(enableFirmwareRegVal & NV_REG_ENABLE_GPU_FIRMWARE_POLICY_ALLOW_FALLBACK); + + bFirmwareCapable = gpumgrIsDeviceRmFirmwareCapable(pmcBoot42, + bIsVirtualWithSriov, + bIsSoc, + &bEnableByDefault, + pWinRmFwPolicyArg); + + *pbRequestFirmware = + (bFirmwareCapable && + ((regkeyFirmwareMode == NV_REG_ENABLE_GPU_FIRMWARE_MODE_ENABLED) || + (bEnableByDefault && (regkeyFirmwareMode != NV_REG_ENABLE_GPU_FIRMWARE_MODE_DISABLED)))); +} + +static NvBool _gpumgrIsRmFirmwareCapableChip(NvU32 pmcBoot42) +{ + return (decodePmcBoot42Architecture(pmcBoot42) >= NV_PMC_BOOT_42_ARCHITECTURE_TU100); +} + +NvBool gpumgrIsVgxRmFirmwareCapableChip(NvU32 pmcBoot42) +{ + return (decodePmcBoot42Architecture(pmcBoot42) >= NV_PMC_BOOT_42_ARCHITECTURE_GB100) || + (decodePmcBoot42Architecture(pmcBoot42) == NV_PMC_BOOT_42_ARCHITECTURE_GH100) || + (decodePmcBoot42Architecture(pmcBoot42) == NV_PMC_BOOT_42_ARCHITECTURE_AD100); +} + +static NvBool _gpumgrIsVgxRmFirmwareDefaultChip(NvU32 pmcBoot42) +{ + return gpumgrIsVgxRmFirmwareCapableChip(pmcBoot42); +} + +NvBool gpumgrIsDeviceRmFirmwareCapable +( + NvU32 pmcBoot42, + NvBool bIsVirtualWithSriov, + NvBool bIsSoc, + NvBool *pbEnabledByDefault, + WindowsFirmwarePolicyArg *pWinRmFwPolicyArg +) +{ + NvBool bEnabledByDefault = NV_FALSE; + NvBool bFirmwareCapable = NV_TRUE; + + // SoC is treated as always firmware capable and not enabled by default + if (bIsSoc) + goto finish; + + if (!hypervisorIsVgxHyper() && !_gpumgrIsRmFirmwareCapableChip(pmcBoot42)) + { + bFirmwareCapable = NV_FALSE; + goto finish; + } + else if (hypervisorIsVgxHyper() && !gpumgrIsVgxRmFirmwareCapableChip(pmcBoot42)) + { + bFirmwareCapable = NV_FALSE; + goto finish; + } + +#if NVCPU_IS_FAMILY_PPC + // Disable default enablement for GSP on PowerPC until it is fully tested + bEnabledByDefault = NV_FALSE; + goto finish; +#endif + + if (hypervisorIsVgxHyper()) + { + if (_gpumgrIsVgxRmFirmwareDefaultChip(pmcBoot42)) + { + bEnabledByDefault = NV_TRUE; + } + } + else + { + bEnabledByDefault = NV_TRUE; + } + +finish: + if (pbEnabledByDefault != NULL) + { + *pbEnabledByDefault = bEnabledByDefault; + } + + return bFirmwareCapable; +} + +static NvBool gpumgrCheckRmFirmwarePolicy +( + NvU64 nvDomainBusDeviceFunc, + NvBool bRequestFwClientRm, + NvU32 pmcBoot42, + NvBool bIsSocDisp +) +{ + + if (!bRequestFwClientRm) + return NV_FALSE; + + if (bIsSocDisp) + return NV_TRUE; + + return NV_TRUE; +} + +// +// gpumgrGetGpuArchHalFactor +// +// Decode GPU architecture HAL factors from the given register values. +// +static void +_gpumgrGetGpuArchHalFactor +( + NvU32 pmcBoot42, + NvU32 socChipId0, + NvU32 *pChipArch, + NvU32 *pChipImpl, + NvU32 *pHidrev +) +{ + NvU32 hidrev, majorRev; + + // dGPU architecture identification + *pChipArch = decodePmcBoot42Architecture(pmcBoot42); + *pChipImpl = DRF_VAL(_PMC, _BOOT_42, _IMPLEMENTATION, pmcBoot42); + + // SOC/iGPU architecture identification + hidrev = DRF_VAL(_PAPB_MISC, _GP_HIDREV, _CHIPID, socChipId0); + majorRev = DRF_VAL(_PAPB_MISC, _GP_HIDREV, _MAJORREV, socChipId0); + + // WAR: The majorrev of t234 shows 0xa on fmodel instead of 0x4 + if ((hidrev == 0x23) && (majorRev == 0xa)) + { + majorRev = 0x4; + } + + *pHidrev = (hidrev << 4) | majorRev; +} + + +// +// gpumgrGetGpuHalFactor +// +// Get Gpu Hal factors those are used to init Hal binding +// +static void +gpumgrGetGpuHalFactor +( + NvU32 *pChipId0, + NvU32 *pChipId1, + NvU32 *pSocChipId0, + RM_RUNTIME_VARIANT *pRmVariant, + TEGRA_CHIP_TYPE *pTegraType, + GPUATTACHARG *pAttachArg +) +{ + NvBool isVirtual; + NvBool isFwClient; + NvBool bIsSocDisp = NV_FALSE; + + *pTegraType = TEGRA_CHIP_TYPE_DEFAULT; + + // get ChipId0 and ChipId1 + if (pAttachArg->socDeviceArgs.specified) + { + *pChipId0 = pAttachArg->socDeviceArgs.socChipId0; + + // iGPU has a PMC_BOOT_0, Display does not + if (pAttachArg->socDeviceArgs.bIsIGPU) + { + DEVICE_MAPPING *pGpuDevMapping = NULL; + pGpuDevMapping = &pAttachArg->socDeviceArgs.deviceMapping[DEVICE_INDEX_GPU]; + *pChipId0 = osDevReadReg032(/*pGpu=*/ NULL, pGpuDevMapping, NV_PMC_BOOT_0); + } + else + { + bIsSocDisp = NV_TRUE; + } + + *pChipId1 = 0; + *pSocChipId0 = pAttachArg->socDeviceArgs.socChipId0; + isVirtual = NV_FALSE; + } + else if (pAttachArg->bIsSOC) + { + // This path is only taken for ARCH MODS iGPU verification. + + *pChipId0 = pAttachArg->socChipId0; + *pChipId1 = 0; + *pSocChipId0 = pAttachArg->socChipId0; + isVirtual = NV_FALSE; + } + else + { + DEVICE_MAPPING gpuDevMapping = {0}; + gpuDevMapping.gpuNvAddr = pAttachArg->regBaseAddr; + gpuDevMapping.gpuNvLength = pAttachArg->regLength; + + // + // PMC_BOOT_42 register is added above G94+ chips which is internal to NVIDIA + // Earlier we used PMC_BOOT_0 as Internal ID which is now exposed to customers + // + *pChipId0 = osDevReadReg032(/*pGpu=*/ NULL, &gpuDevMapping, NV_PMC_BOOT_0); + *pChipId1 = osDevReadReg032(/*pGpu=*/ NULL, &gpuDevMapping, NV_PMC_BOOT_42); + *pSocChipId0 = 0; + + gpumgrGetGpuHalFactorOfVirtual(&isVirtual, pAttachArg); + + // + // If socChipId0 has valid value, then running environment is SOCV. + // The Tegra chip after Ampere arch is using PCIE interface which connects + // iGPU to SoC for BAR and control accesses (interrupt). + // The code between TEGRA_CHIP_TYPE_PCIE and TEGRA_CHIP_TYPE_SOC + // shares same dGPU ARCH specific HAL mostly except manual differences due to + // latency of manual updates between nvgpu (Standlone iGPU/Full Chip Verification) + // and nvmobile (SOC) trees. + // + if (pAttachArg->socChipId0 != 0) + { + *pTegraType = TEGRA_CHIP_TYPE_SOC; + } + } + + isFwClient = gpumgrCheckRmFirmwarePolicy(pAttachArg->nvDomainBusDeviceFunc, + pAttachArg->bRequestFwClientRm, + *pChipId1, + bIsSocDisp); + + if (RMCFG_FEATURE_PLATFORM_GSP || RMCFG_FEATURE_PLATFORM_DCE) + *pRmVariant = RM_RUNTIME_VARIANT_UCODE; + else if (isVirtual) + *pRmVariant = RM_RUNTIME_VARIANT_VF; + else if (isFwClient) + *pRmVariant = RM_RUNTIME_VARIANT_PF_KERNEL_ONLY; + else + *pRmVariant = RM_RUNTIME_VARIANT_PF_MONOLITHIC; // default, monolithic mode + + NV_PRINTF(LEVEL_INFO, + "ChipId0[0x%x] ChipId1[0x%x] SocChipId0[0x%x] isFwClient[%d] RmVariant[%d] tegraType[%d]\n", + *pChipId0, *pChipId1, *pSocChipId0, isFwClient, *pRmVariant, *pTegraType); +} + + +// +// _gpumgrCreateGpu +// +// Former _sysCreateGpu(). The function is moved to Gpumgr for hinding struct +// GPUATTACHARG from SYS. SYS is still the parent object of both GPUMGR and +// GPU. +// +static NV_STATUS +_gpumgrCreateGpu(NvU32 gpuInstance, GPUATTACHARG *pAttachArg) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + OBJGPU *pGpu; + NV_STATUS status; + RM_RUNTIME_VARIANT rmVariant; + TEGRA_CHIP_TYPE tegraType; + NvU32 chipId0; // 32-bit chipId (pmcBoot0 on GPU) + NvU32 chipId1; // 32-bit chipId (pmcBoot42 on GPU) + NvU32 socChipId0; // 32-bit SOC chipId + NvU32 gpuId; + NvUuid gpuUuid; + NvBool bGpuUuidValid = NV_FALSE; + + gpumgrGetGpuHalFactor(&chipId0, &chipId1, &socChipId0, &rmVariant, &tegraType, pAttachArg); + + // Get the GpuArch instance for this GPU + // TODO: make `const` after bug 4292180 is fixed + /* const */ GpuArch *pGpuArch = gpumgrGetGpuArch(chipId1, socChipId0, tegraType); + NV_ASSERT_OR_RETURN(pGpuArch != NULL, NV_ERR_INVALID_DEVICE); + + // + // The OS layer must have previously registered the GPU ID, and may have already registered + // the UUID. Pull out the registered IDs for this device from the probed GPU info to pass to + // the OBJGPU constructor. + // + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, gpumgrGetRegisteredIds(pAttachArg->nvDomainBusDeviceFunc, + &gpuId, &gpuUuid, &bGpuUuidValid)); + + // + // Create OBJGPU with halspec factor initialization value. + // Both GpuArch and OBJGPU inherit from GpuHalspecOwner so that the chip-architecture halspecs + // can be shared, so we pass on the derived architecture IDs from GpuArch to OBJGPU here. + // + status = objCreate(&pGpu, pSys, OBJGPU, + /* ChipHal_arch = */ pGpuArch->chipArch, + /* ChipHal_impl = */ pGpuArch->chipImpl, + /* ChipHal_hidrev = */ pGpuArch->hidrev, + /* TegraChipHal_tegraType = */ pGpuArch->tegraType, + /* RmVariantHal_rmVariant = */ rmVariant, + /* DispIpHal_ipver = */ 0, // initialized later + /* ctor.gpuInstance = */ gpuInstance, + /* ctor.gpuId = */ gpuId, + /* ctor.pUuid = */ bGpuUuidValid ? &gpuUuid : NULL, + /* ctor.pGpuArch = */ pGpuArch); + if (status != NV_OK) + { + return status; + } + + // legacy chip-config Hal registration path + status = gpuBindHalLegacy(pGpu, chipId0, chipId1, socChipId0); + if (status != NV_OK) + { + objDelete(pGpu); + return status; + } + + // + // Save away the public ID associated with the handle just returned + // from create object. + // + pGpuMgr->gpuHandleIDList[pGpuMgr->numGpuHandles].gpuInstance = gpuInstance; + pGpuMgr->gpuHandleIDList[pGpuMgr->numGpuHandles].pGpu = pGpu; + + pGpuMgr->numGpuHandles++; + + return status; +} + + +static void +_gpumgrGetEncSessionStatsReportingState(OBJGPU *pGpu) +{ +} + +// +// gpumgrGetGpuArch +// +// Gets the static GPU architecture information for the given PMC_BOOT_42 or +// PAPB_MISC_GP_HIDREV value. +// +// TODO: make return value `const` after bug 4292180 is fixed. +// +/* const */ GpuArch * +gpumgrGetGpuArch_IMPL(NvU32 pmcBoot42, NvU32 socChipId0, TEGRA_CHIP_TYPE tegraType) +{ + NvU32 chipArch; + NvU32 chipImpl; + NvU32 hidrev; + Object *pIter; + GpuArch *pGpuArch; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + + portSyncMutexAcquire(pGpuMgr->pChildListMutex); + + _gpumgrGetGpuArchHalFactor(pmcBoot42, socChipId0, &chipArch, &chipImpl, &hidrev); + + // Do we already have a GpuArch that matches these parameters? + for (pGpuArch = NULL, pIter = objGetChild(staticCast(pGpuMgr, Object)); pIter != NULL; + pGpuArch = NULL, pIter = objGetSibling(pIter)) + { + pGpuArch = dynamicCast(pIter, GpuArch); + if (pGpuArch == NULL) + continue; + + NV_PRINTF(LEVEL_INFO, "checking GpuArch(chipArch:%x chipImpl:%x hidrev:%x tegraType:%x)\n", + pGpuArch->chipArch, pGpuArch->chipImpl, pGpuArch->hidrev, pGpuArch->tegraType); + + if (pGpuArch->chipArch == chipArch && + pGpuArch->chipImpl == chipImpl && + pGpuArch->hidrev == hidrev && + pGpuArch->tegraType == tegraType) + break; + } + + if (pGpuArch == NULL) + { + NV_PRINTF(LEVEL_INFO, "creating GpuArch(chipArch:%x chipImpl:%x hidrev:%x tegraType:%x)\n", + chipArch, chipImpl, hidrev, tegraType); + + // + // Create GpuArch with halspec factor initialization values. We also pass the values to the + // constructor so that they can be cached and reapplied when creating OBJGPU later. + // + NV_ASSERT_OK(objCreate(&pGpuArch, pGpuMgr, GpuArch, + /* ChipHal_arch = */ chipArch, + /* ChipHal_impl = */ chipImpl, + /* ChipHal_hidrev = */ hidrev, + /* TegraChipHal_tegraType = */ tegraType, + /* ctor.chipArch = */ chipArch, + /* ctor.chipImpl = */ chipImpl, + /* ctor.hidrev = */ hidrev, + /* ctor.tegraType = */ tegraType)); + } + + portSyncMutexRelease(pGpuMgr->pChildListMutex); + + return pGpuArch; +} + +// +// gpumgrAttachGpu +// +// This interface is used by os-dependent code to attach a new gpu +// to the pool managed by the RM. Construction of OBJGPU and it's +// descendants is handled here, along with any other necessary prep +// for the subsequent gpu preinit/init stages. +// +NV_STATUS +gpumgrAttachGpu(NvU32 gpuInstance, GPUATTACHARG *pAttachArg) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + OBJGPU *pGpu = NULL; + NV_STATUS status; + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + // create the new OBJGPU + if ((status = _gpumgrCreateGpu(gpuInstance, pAttachArg)) != NV_OK) + { + goto gpumgrAttach_error_and_exit; + } + + // get a pointer to the new OBJGPU + pGpu = gpumgrGetGpu(gpuInstance); + + // load up attach parameters + gpumgrSetAttachInfo(pGpu, pAttachArg); + + // Load OOR check address mode based on arch +#if defined(NVCPU_X86_64) + pGpu->busInfo.oorArch = OOR_ARCH_X86_64; +#elif defined(NVCPU_PPC64LE) + pGpu->busInfo.oorArch = OOR_ARCH_PPC64LE; +#elif defined(NVCPU_ARM) + pGpu->busInfo.oorArch = OOR_ARCH_ARM; +#elif defined(NVCPU_AARCH64) + pGpu->busInfo.oorArch = OOR_ARCH_AARCH64; +#else + pGpu->busInfo.oorArch = OOR_ARCH_NONE; +#endif + + pGpu->pOS = SYS_GET_OS(pSys); + + // let os fill in dpc details before we get into engine construction + if ((status = osDpcAttachGpu(pGpu, pAttachArg->pOsAttachArg)) != NV_OK) + { + goto gpumgrAttach_error_and_exit; + } + + // let os fill in what it needs before we get into engine construction + if ((status = osAttachGpu(pGpu, pAttachArg->pOsAttachArg)) != NV_OK) + { + goto gpumgrAttach_error_and_exit; + } + + NV_ASSERT((pGpuMgr->gpuAttachMask & NVBIT(gpuInstance)) == 0); + pGpuMgr->gpuAttachMask |= NVBIT(gpuInstance); + pGpuMgr->gpuAttachCount++; + + status = _gpumgrRegisterRmCapsForGpu(pGpu); + if (status != NV_OK) + { + goto gpumgrAttach_error_and_exit; + } + + // finish gpu construction + if ((status = gpuPostConstruct(pGpu, pAttachArg)) != NV_OK) + { + goto gpumgrAttach_error_and_exit; + } + + _gpumgrGetEncSessionStatsReportingState(pGpu); + + // Add entry into system partition topo array + gpumgrAddSystemMIGInstanceTopo(pAttachArg->nvDomainBusDeviceFunc); + + if (!IS_FW_CLIENT(pGpu)) + pGpuMgr->gpuMonolithicRmMask |= NVBIT(gpuInstance); + + return status; + +gpumgrAttach_error_and_exit: + if ((pGpuMgr->gpuAttachMask & NVBIT(gpuInstance)) != 0) + { + pGpuMgr->gpuAttachMask &= ~NVBIT(gpuInstance); + pGpuMgr->gpuAttachCount--; + } + + if (pGpu != NULL) + { + _gpumgrUnregisterRmCapsForGpu(gpuGetDBDF(pGpu)); + } + + osDpcDetachGpu(pGpu); + _gpumgrDestroyGpu(gpuInstance); + return status; +} + +// +// gpumgrDetachGpu +// +// This entry point detaches a gpu from the RM. The corresponding +// OBJGPU and any of it's offspring are released, etc. +// +NV_STATUS +gpumgrDetachGpu(NvU32 gpuInstance) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + OBJGPU *pGpu = gpumgrGetGpu(gpuInstance); + NvBool bDelClientResourcesFromGpuMask = !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_TIMEOUT_RECOVERY); + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + // Mark for deletion the stale clients related to the GPU mask + if (bDelClientResourcesFromGpuMask) + { + rmapiSetDelPendingClientResourcesFromGpuMask(NVBIT(gpuInstance)); + } + + osDpcDetachGpu(pGpu); + + pGpu->pOsRmCaps = NULL; + + // release pDev + _gpumgrDestroyGpu(gpuInstance); + + // Delete the marked clients related to the GPU mask + if (bDelClientResourcesFromGpuMask) + { + rmapiDelPendingDevices(NVBIT(gpuInstance)); + rmapiDelPendingClients(); + } + + pGpuMgr->gpuMonolithicRmMask &= ~NVBIT(gpuInstance); + NV_ASSERT(pGpuMgr->gpuAttachMask & NVBIT(gpuInstance)); + pGpuMgr->gpuAttachMask &= ~NVBIT(gpuInstance); + pGpuMgr->gpuAttachCount--; + + return NV_OK; +} + +// +// gpumgrCreateDevice +// +// Create a broadcast device. The set of one or more gpus +// comprising the broadcast device is described by gpuMask. +// +NV_STATUS +gpumgrCreateDevice(NvU32 *pDeviceInstance, NvU32 gpuMask, NvU32 *pGpuIdsOrdinal) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + OBJGPU *pParentGpu = NULL; + NvU32 gpuInstance; + NV_STATUS status = NV_ERR_INVALID_REQUEST; + OBJGPUGRP *pGpuGrp = NULL; + + pGpuMgr->deviceCount++; + + NV_ASSERT(gpuMask != 0); + + // if only 1 gpu in the set, we're done + if (gpumgrIsSubDeviceCountOne(gpuMask)) + { + // alloc new broadcast device instance + status = gpumgrAllocDeviceInstance(pDeviceInstance); + if (status != NV_OK) + { + goto gpumgrCreateDevice_exit; + } + + gpumgrConstructGpuGrpObject(pGpuMgr, gpuMask, + &pGpuMgr->pGpuGrpTable[*pDeviceInstance]); + // + // Set up parent gpu state. pParentGpu == NULL during boot when + // we're first creating this device because the GPU attach process + // has not yet completed. pParentGpu != NULL when we're coming + // out of SLI (unlinking). + // + gpuInstance = 0; + pParentGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance); + if (pParentGpu) + { + gpumgrSetParentGPU(pParentGpu, pParentGpu); + } + + gpumgrAddDeviceMaskToGpuInstTable(gpuMask); + status = NV_OK; + goto gpumgrCreateDevice_exit; + } + +gpumgrCreateDevice_exit: + if (status != NV_OK) + { + // Device creation failed + pGpuMgr->deviceCount--; + } + else + { + pGpuGrp = pGpuMgr->pGpuGrpTable[*pDeviceInstance]; + if (gpugrpGetGpuMask(pGpuGrp) != gpuMask) + { + NV_ASSERT(0); + gpumgrDestroyDevice(*pDeviceInstance); + return NV_ERR_INVALID_DATA; + } + NV_PRINTF(LEVEL_INFO, + "gpumgrCreateDevice: deviceInst 0x%x mask 0x%x\n", + *pDeviceInstance, gpuMask); + } + return status; +} + +NV_STATUS +gpumgrDestroyDevice(NvU32 deviceInstance) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NV_STATUS status = NV_OK; + OBJGPUGRP *pGpuGrp = pGpuMgr->pGpuGrpTable[deviceInstance]; + NvU32 gpuMask; + + NV_ASSERT_OR_RETURN(pGpuGrp != NULL, NV_ERR_OBJECT_NOT_FOUND); + gpuMask = gpugrpGetGpuMask(pGpuGrp); + + NV_ASSERT(gpuMask != 0); + + // if we only have one subdevice we're done + if (gpumgrIsSubDeviceCountOne(gpuMask)) + { + gpugrpDestroy(pGpuGrp); + pGpuMgr->pGpuGrpTable[deviceInstance] = NULL; + gpumgrClearDeviceMaskFromGpuInstTable(gpuMask); + goto gpumgrDestroyDevice_exit; + } + +gpumgrDestroyDevice_exit: + pGpuMgr->deviceCount--; + + return status; +} + +// +// gpumgrGetDeviceInstanceMask +// +// Returns mask of enabled (or valid) device instances. +// This mask tells clients which NV01_DEVICE class +// instances are valid. +// +NvU32 +gpumgrGetDeviceInstanceMask(void) +{ + NvU32 i, deviceInstanceMask = 0; + + // for every broadcast device... + for (i = 0; i < NV_MAX_DEVICES; i++) + { + // ...add it to our mask if it's enabled + if (NV_OK == gpumgrIsDeviceInstanceValid(i)) + deviceInstanceMask |= NVBIT(i); + } + + return deviceInstanceMask; +} + +NvU32 +gpumgrGetGpuMask(OBJGPU *pGpu) +{ + NvU32 deviceInstance = gpuGetDeviceInstance(pGpu); + + NV_ASSERT(deviceInstance < NV_MAX_DEVICES); + + return gpumgrGetDeviceGpuMask(deviceInstance); +} + +// +// gpumgrGetSubDeviceCount +// +NvU32 +gpumgrGetSubDeviceCount(NvU32 gpuMask) +{ + NvU32 subDeviceCount = 0; + + // tally # of gpus in the set + while (gpuMask != 0) + { + subDeviceCount ++; + gpuMask &= (gpuMask-1); // remove lowest bit in gpuMask + } + return subDeviceCount; +} + +// +// gpumgrGetSubDeviceCountFromGpu +// ATTENTION: When using with SLI Next / RM Unlinked SLI, the +// subdevice count is always 1 for each GPU. This can cause +// bugs, buffer overflows with arrays based on subdevice instances as +// with RM Unlinked SLI: +// - subdevice count is always 1 (the GPUs are not linked) +// - GPU subdevice instance can be non zero +// For subdevice instance arrays, please use +// gpumgrGetSubDeviceMaxValuePlus1() +// +NvU32 +gpumgrGetSubDeviceCountFromGpu(OBJGPU *pGpu) +{ + NvU32 gpuMask = gpumgrGetGpuMask(pGpu); + NvU32 subDeviceCount = gpumgrGetSubDeviceCount(gpuMask); + + NV_ASSERT(subDeviceCount > 0); + return subDeviceCount; +} + +// +// gpumgrGetSubDeviceMaxValuePlus1 +// SLI disabled: return 1 +// SLI enabled with RM linked in SLI: returns 2 or more +// SLI enabled with RM unlinked: return current subdeviceInstance + 1 +// Life of the function: until a full transition to SLI Next / RM Unlinked SLI. +// +NvU32 +gpumgrGetSubDeviceMaxValuePlus1(OBJGPU *pGpu) +{ + if (!IsSLIEnabled(pGpu)) + { + // SLI disabled: return 1 as all GPU subdevice instances are 0 + // Unkinked SLI: returns the current subdevice instance + 1 + return gpumgrGetSubDeviceInstanceFromGpu(pGpu) + 1; + } + else + { + // SLI Enabled in RM: The count of subdevice instances for that GPU/device + return gpumgrGetSubDeviceCountFromGpu(pGpu); + } +} + +static void +gpumgrUpdateAttachInfo(OBJGPU *pGpu, GPUATTACHARG *pAttachArg) +{ + NvU32 idx; + NvU32 i; + NvU32 numDeviceIDs; + DEVICE_ID_MAPPING *deviceIdMapping; + + numDeviceIDs = gpuGetDeviceIDList_HAL(pGpu, &deviceIdMapping); + + for (idx = 0; idx < DEVICE_INDEX_MAX; idx++) + { + if (pAttachArg->socDeviceArgs.deviceMapping[idx].gpuNvLength == 0) + { + continue; + } + + for (i = 0; i < numDeviceIDs; i++) + { + if (deviceIdMapping[i].deviceIndex == idx) + { + pAttachArg->socDeviceArgs.deviceMapping[idx].gpuDeviceEnum = deviceIdMapping[i].devId; + break; + } + } + } +} + +static void +gpumgrSetAttachInfo(OBJGPU *pGpu, GPUATTACHARG *pAttachArg) +{ + if (pAttachArg->socDeviceArgs.specified) + { + NvU32 idx; + NvU32 maxIdx; + // This path is taken for Tegra Display and iGPU + + // + // TODO: This existing field is specifically used to safeguard + // iGPU-specific code paths within RM, and should actually be NV_FALSE for + // T234D+. + // + // See JIRA TDS-5101 for more details. + // + pGpu->bIsSOC = NV_TRUE; + maxIdx = SOC_DEV_MAPPING_MAX; + + if (pAttachArg->socDeviceArgs.bIsIGPU) + { + gpumgrUpdateAttachInfo(pGpu, pAttachArg); + maxIdx = DEVICE_INDEX_MAX; + } + for (idx = 0; idx < maxIdx; idx++) + { + pGpu->deviceMappings[idx] = pAttachArg->socDeviceArgs.deviceMapping[idx]; + } + + pGpu->busInfo.iovaspaceId = pAttachArg->socDeviceArgs.iovaspaceId; + if (pAttachArg->socDeviceArgs.bIsIGPU) + { + pGpu->busInfo.gpuPhysAddr = pGpu->deviceMappings[DEVICE_INDEX_GPU].gpuNvPAddr; + pGpu->gpuDeviceMapCount = DEVICE_INDEX_MAX; + } + else + { + pGpu->busInfo.gpuPhysAddr = pGpu->deviceMappings[SOC_DEV_MAPPING_DISP].gpuNvPAddr; + pGpu->gpuDeviceMapCount = 1; + } + + // + // TODO bug 2100708: a fake DBDF is used on SOC to opt out of some + // RM paths that cause issues otherwise, see the bug for details. + // + pGpu->busInfo.nvDomainBusDeviceFunc = pAttachArg->nvDomainBusDeviceFunc; + pGpu->busInfo.bNvDomainBusDeviceFuncValid = NV_TRUE; + } + else if (pAttachArg->bIsSOC) + { + // This path is only taken for ARCH MODS iGPU verification. + + NV_ASSERT(sizeof(pGpu->deviceMappings) == sizeof(pAttachArg->socDeviceMappings)); + pGpu->bIsSOC = NV_TRUE; + pGpu->idInfo.PCIDeviceID = pAttachArg->socId; + pGpu->idInfo.PCISubDeviceID = pAttachArg->socSubId; + pGpu->busInfo.iovaspaceId = pAttachArg->iovaspaceId; + if (RMCFG_FEATURE_PLATFORM_MODS) + { + NV_ASSERT(sizeof(pGpu->deviceMappings) == sizeof(pAttachArg->socDeviceMappings)); + portMemCopy(pGpu->deviceMappings, sizeof(pGpu->deviceMappings), pAttachArg->socDeviceMappings, sizeof(pGpu->deviceMappings)); + pGpu->gpuDeviceMapCount = pAttachArg->socDeviceCount; + + // + // TODO bug 2100708: a fake DBDF is used on SOC to opt out of some + // RM paths that cause issues otherwise, see the bug for details. + // + pGpu->busInfo.nvDomainBusDeviceFunc = pAttachArg->nvDomainBusDeviceFunc; + pGpu->busInfo.bNvDomainBusDeviceFuncValid = NV_TRUE; + } + } + else + { + // + // Set this gpu's hardware register access address pointers + // from the contents of mappingInfo. + // + pGpu->bIsSOC = NV_FALSE; + + pGpu->deviceMappings[0].gpuNvAddr = pAttachArg->regBaseAddr; + pGpu->registerAccess.gpuFbAddr = pAttachArg->fbBaseAddr; + pGpu->busInfo.gpuPhysAddr = pAttachArg->devPhysAddr; + pGpu->busInfo.gpuPhysFbAddr = pAttachArg->fbPhysAddr; + pGpu->busInfo.gpuPhysInstAddr = pAttachArg->instPhysAddr; + pGpu->busInfo.gpuPhysIoAddr = pAttachArg->ioPhysAddr; + pGpu->busInfo.iovaspaceId = pAttachArg->iovaspaceId; + pGpu->busInfo.nvDomainBusDeviceFunc = pAttachArg->nvDomainBusDeviceFunc; + pGpu->busInfo.bNvDomainBusDeviceFuncValid = NV_TRUE; + pGpu->deviceMappings[0].gpuNvLength = pAttachArg->regLength; + pGpu->fbLength = pAttachArg->fbLength; + pGpu->busInfo.IntLine = pAttachArg->intLine; + pGpu->gpuDeviceMapCount = 1; + pGpu->cpuNumaNodeId = pAttachArg->cpuNumaNodeId; + + if ( ! pAttachArg->instBaseAddr ) + { + // + // The OS init goo didn't map a separate region for instmem. + // So instead use the 1M mapping in bar0. + // + pGpu->instSetViaAttachArg = NV_FALSE; + pGpu->registerAccess.gpuInstAddr = (GPUHWREG*)(((NvU8*)pGpu->deviceMappings[0].gpuNvAddr) + 0x00700000); // aka NV_PRAMIN. + if (!pGpu->busInfo.gpuPhysInstAddr) + { + // + // Only use the bar0 window physical address if the OS didn't + // specify a bar2 physical address. + // + pGpu->busInfo.gpuPhysInstAddr = pGpu->busInfo.gpuPhysAddr + 0x00700000; // aka NV_PRAMIN + } + pGpu->instLength = 0x100000; // 1MB + } + else + { + pGpu->instSetViaAttachArg = NV_TRUE; + pGpu->registerAccess.gpuInstAddr = pAttachArg->instBaseAddr; + pGpu->instLength = pAttachArg->instLength; + } + } +} + +// +// gpumgrStatePreInitGpu & gpumgrStateInitGpu +// +// These routines handle unicast gpu initialization. +// +NV_STATUS +gpumgrStatePreInitGpu(OBJGPU *pGpu) +{ + NV_STATUS status; + + // LOCK: acquire GPUs lock + status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT); + if (status == NV_OK) + { + if (FULL_GPU_SANITY_CHECK(pGpu)) + { + // pre-init phase done in UC mode + status = gpuStatePreInit(pGpu); + } + else + { + status = NV_ERR_GPU_IS_LOST; + DBG_BREAKPOINT(); + } + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + // save the init status for later client queries + gpumgrSetGpuInitStatus(pGpu->gpuId, status); + + return status; +} + +NV_STATUS +gpumgrStateInitGpu(OBJGPU *pGpu) +{ + NV_STATUS status; + + // LOCK: acquire GPUs lock + status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT); + if (status == NV_OK) + { + // init phase + status = gpuStateInit(pGpu); + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + // save the init status for later client queries + gpumgrSetGpuInitStatus(pGpu->gpuId, status); + + return status; +} + +NV_STATUS +gpumgrStateLoadGpu(OBJGPU *pGpu, NvU32 flags) +{ + NV_STATUS status; + + // LOCK: acquire GPUs lock + status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT); + if (status == NV_OK) + { + // Load phase + status = gpuStateLoad(pGpu, flags); + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + if (status != NV_OK) + goto gpumgrStateLoadGpu_exit; + +gpumgrStateLoadGpu_exit: + // save the init status for later client queries + gpumgrSetGpuInitStatus(pGpu->gpuId, status); + + return status; +} + +// +// gpumgrGetNextGpu +// +// This routine searches subDeviceMask for the next gpu by using +// the start index value as a beginning bit position. If a gpu is +// found, the start index value is bumped to the next bit position +// in the mask. +// +POBJGPU +gpumgrGetNextGpu(NvU32 subDeviceMask, NvU32 *pStartIndex) +{ + NvU32 i; + + if (*pStartIndex > NV_MAX_DEVICES) + { + *pStartIndex = NV_MAX_DEVICES; + return NULL; + } + + for (i = *pStartIndex; i < NV_MAX_DEVICES; i++) + { + if (subDeviceMask & NVBIT(i)) + { + *pStartIndex = i+1; + return gpumgrGetGpu(i); + } + } + + *pStartIndex = NV_MAX_DEVICES; + return NULL; +} + + +// +// gpumgrIsGpuPointerValid - Validates pGpu is initialized without dereferencing it. +// +NvBool +gpumgrIsGpuPointerValid(OBJGPU *pGpu) +{ + OBJGPU *pTempGpu = NULL; + NvU32 gpuMask = 0; + NvU32 gpuCount = 0; + NvU32 gpuIndex = 0; + + gpumgrGetGpuAttachInfo(&gpuCount, &gpuMask); + pTempGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex); + + while(pTempGpu) + { + if (pTempGpu->getProperty(pTempGpu, PDB_PROP_GPU_STATE_INITIALIZED)) + { + if (pTempGpu == pGpu) + { + return NV_TRUE; + } + } + + pTempGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex); + } + + return NV_FALSE; +} + +// +// gpumgrIsGpuPointerAttached - Validates pGpu is attached without dereferencing it. +// +NvBool +gpumgrIsGpuPointerAttached(OBJGPU *pGpu) +{ + OBJGPU *pTempGpu = NULL; + NvU32 gpuMask = 0; + NvU32 gpuCount = 0; + NvU32 gpuIndex = 0; + + gpumgrGetGpuAttachInfo(&gpuCount, &gpuMask); + pTempGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex); + + while(pTempGpu) + { + if (pTempGpu == pGpu) + { + return NV_TRUE; + } + pTempGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex); + } + + return NV_FALSE; +} + + +NvBool gpumgrIsGpuDisplayParent(OBJGPU *pGpu) +{ + OBJGPUGRP *pGpuGrp = gpumgrGetGpuGrpFromGpu(pGpu); + NvBool rc = NV_FALSE; + NvU32 gpuMask; + + NV_ASSERT_OR_RETURN(pGpuGrp != NULL, NV_FALSE); + gpuMask = gpugrpGetGpuMask(pGpuGrp); + + // If there's only one GPU in the device, then of course it's the display parent! + if (gpumgrIsSubDeviceCountOne(gpuMask)) + { + rc = NV_TRUE; + } + // + // If the gpuInstance argument is the first gpuInstance in the ordering, + // then it's the display parent! + // + else if (pGpu->gpuInstance == pGpuGrp->SliLinkOrder[0].gpuInstance) + { + rc = NV_TRUE; + } + + // Otherwise it isn't. + return rc; +} + +OBJGPU *gpumgrGetDisplayParent(OBJGPU *pGpu) +{ + OBJGPUGRP *pGpuGrp = gpumgrGetGpuGrpFromGpu(pGpu); + NvU32 gpuCount; + NvU32 gpuMask; + NvU32 gpuInstance; + + NV_ASSERT_OR_RETURN(pGpuGrp != NULL, NULL); + gpuMask = gpugrpGetGpuMask(pGpuGrp); + gpuCount = gpumgrGetSubDeviceCount(gpuMask); + + if (gpuCount > 1) + { + gpuInstance = pGpuGrp->SliLinkOrder[0].gpuInstance; + pGpu = gpumgrGetGpu(gpuInstance); + } + + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + + return pGpu; +} + +// +// gpumgrGetProbedGpuIds +// +// This routine services the NV0000_CTRL_GPU_GET_PROBED_IDS command. +// The passed in gpuIds table is filled in with valid gpuId info +// for each probed gpu. Invalid entries in the table are set to the +// invalid id value. +// +NV_STATUS +gpumgrGetProbedGpuIds(NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *pGpuIdsParams) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i, j, k; + + ct_assert(NV_MAX_DEVICES == NV0000_CTRL_GPU_MAX_PROBED_GPUS); + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0, j = 0, k = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + if (pGpuMgr->probedGpus[i].gpuId != NV0000_CTRL_GPU_INVALID_ID) + { + if (pGpuMgr->probedGpus[i].bExcluded) + { + pGpuIdsParams->excludedGpuIds[k++] = pGpuMgr->probedGpus[i].gpuId; + } + else + { + pGpuIdsParams->gpuIds[j] = pGpuMgr->probedGpus[i].gpuId; + pGpuIdsParams->gpuFlags[j++] = pGpuMgr->probedGpus[i].flags; + } + } + } + + portSyncMutexRelease(pGpuMgr->probedGpusLock); + + for (i = j; i < NV_ARRAY_ELEMENTS(pGpuIdsParams->gpuIds); i++) + pGpuIdsParams->gpuIds[i] = NV0000_CTRL_GPU_INVALID_ID; + + for (i = k; i < NV_ARRAY_ELEMENTS(pGpuIdsParams->excludedGpuIds); i++) + pGpuIdsParams->excludedGpuIds[i] = NV0000_CTRL_GPU_INVALID_ID; + + return NV_OK; +} + +// +// gpumgrSetProbedFlags +// +// This routine is used by os-dependent code to set probed-related flags for +// the specified gpu. See ctrl0000gpu.h for a description of valid flags +// values. +// +void +gpumgrSetProbedFlags(NvU32 gpuId, NvU32 flags) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + PROBEDGPU *pProbedGpu = &pGpuMgr->probedGpus[i]; + + if (pProbedGpu->gpuId == gpuId) + { + pProbedGpu->flags = flags; + goto done; + } + } + +done: + portSyncMutexRelease(pGpuMgr->probedGpusLock); +} + +// +// gpumgrGetAttachedGpuIds +// +// This routine services the NV0000_CTRL_GPU_GET_ATTACHED_IDS command. +// The passed in gpuIds table is filled in with valid gpuId info +// for each attached gpu. Any remaining entries in the table are set to +// the invalid id value. +// +NV_STATUS +gpumgrGetAttachedGpuIds(NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *pGpuIdsParams) +{ + OBJGPU *pGpu; + NvU32 gpuAttachCnt, gpuAttachMask, i, cnt; + NvU32 *pGpuIds = &pGpuIdsParams->gpuIds[0]; + + // fill the table w/valid entries + gpumgrGetGpuAttachInfo(&gpuAttachCnt, &gpuAttachMask); + for (cnt = 0, i = 0; i < NV_MAX_DEVICES; i++) + { + if (gpuAttachMask & NVBIT(i)) + { + pGpu = gpumgrGetGpu(i); + pGpuIds[cnt++] = pGpu->gpuId; + } + } + + // invalidate rest of the entries + while (cnt < NV0000_CTRL_GPU_MAX_ATTACHED_GPUS) + pGpuIds[cnt++] = NV0000_CTRL_GPU_INVALID_ID; + + return NV_OK; +} + +// +// gpumgrGetSubDeviceDeviceInstanceFromGpu +// +// Given a pGpu return the corresponding subdevice instance value. +// +NvU32 +gpumgrGetSubDeviceInstanceFromGpu(OBJGPU *pGpu) +{ + return pGpu->subdeviceInstance; +} + +// +// gpumgrGetParentGPU +// +POBJGPU +gpumgrGetParentGPU(OBJGPU *pGpu) +{ + OBJGPUGRP *pGpuGrp = gpumgrGetGpuGrpFromGpu(pGpu); + NvU32 gpuMask; + + NV_ASSERT_OR_RETURN(pGpuGrp != NULL, NULL); + gpuMask = gpugrpGetGpuMask(pGpuGrp); + + if (gpumgrIsSubDeviceCountOne(gpuMask)) + { + return pGpu; + } + else + { + return gpugrpGetParentGpu(pGpuGrp); + } +} + +// +// gpumgrSetParentGPU +// +void +gpumgrSetParentGPU(OBJGPU *pGpu, OBJGPU *pParentGpu) +{ + OBJGPUGRP *pGpuGrp = gpumgrGetGpuGrpFromGpu(pGpu); + + NV_ASSERT_OR_RETURN_VOID(pGpuGrp != NULL); + gpugrpSetParentGpu(pGpuGrp, pParentGpu); +} + +// +// gpumgrGetGpuFromId +// +// Find the specified gpu from it's gpuId. +// +POBJGPU +gpumgrGetGpuFromId(NvU32 gpuId) +{ + OBJGPU *pGpu; + NvU32 gpuAttachCnt, gpuAttachMask; + NvU32 i; + + gpumgrGetGpuAttachInfo(&gpuAttachCnt, &gpuAttachMask); + for (i = 0; i < NV_MAX_DEVICES; i++) + { + if (gpuAttachMask & NVBIT(i)) + { + pGpu = gpumgrGetGpu(i); + + // found it + if (pGpu->gpuId == gpuId) + return pGpu; + } + } + + // didn't find it + return NULL; +} + +// +// gpumgrGetGpuFromUuid() +// +// Get GPUOBJECT from UUID. Returns NULL if it cannot find a GPU with the +// requested UUID. +// +POBJGPU +gpumgrGetGpuFromUuid(const NvU8 *pGpuUuid, NvU32 flags) +{ + OBJGPU *pGpu; + NvU32 attachedGpuCount; + NvU32 attachedGpuMask; + NvU32 gpuIndex; + NvU32 gidStrLen; + NvU8 *pGidString = NULL; + NV_STATUS rmStatus; + + // get all attached GPUs + rmStatus = gpumgrGetGpuAttachInfo(&attachedGpuCount, &attachedGpuMask); + + gpuIndex = 0; + + for(pGpu = gpumgrGetNextGpu(attachedGpuMask, &gpuIndex); + pGpu != NULL; + pGpu = gpumgrGetNextGpu(attachedGpuMask, &gpuIndex)) + { + // + // get the GPU's UUID + // + // This implementation relies on the fact that gpuGetGidInfo() only + // allocates memory if it succeeds. + // + rmStatus = gpuGetGidInfo(pGpu, &pGidString, &gidStrLen, flags); + if (NV_OK != rmStatus) + return NULL; + + // check if it matches + if (0 == portMemCmp(pGidString, pGpuUuid, gidStrLen)) + { + portMemFree(pGidString); + return pGpu; + } + else + { + // if it doesn't match, clean up allocated memory for next iteration + portMemFree(pGidString); + } + } + + return NULL; // Failed to find a GPU with the requested UUID +} + +// +// gpumgrGetGpuFromBusInfo +// +// Find the specified GPU using its PCI bus info. +// +POBJGPU +gpumgrGetGpuFromBusInfo(NvU32 domain, NvU8 bus, NvU8 device) +{ + NV_STATUS status; + OBJGPU *pGpu; + NvU32 attachedGpuCount; + NvU32 attachedGpuMask; + NvU32 gpuIndex = 0; + + status = gpumgrGetGpuAttachInfo(&attachedGpuCount, &attachedGpuMask); + NV_ASSERT_OR_RETURN(status == NV_OK, NULL); + + for (pGpu = gpumgrGetNextGpu(attachedGpuMask, &gpuIndex); + pGpu != NULL; + pGpu = gpumgrGetNextGpu(attachedGpuMask, &gpuIndex)) + { + if ((gpuGetDomain(pGpu) == domain) && + (gpuGetBus(pGpu) == bus) && + (gpuGetDevice(pGpu) == device)) + { + return pGpu; + } + } + + return NULL; +} + +// +// gpumgrSetGpuId +// +// This routine assigns the specified gpuId to the specified gpu. +// +void +gpumgrSetGpuId(OBJGPU *pGpu, NvU32 gpuId) +{ + pGpu->gpuId = gpuId; + + // if boardId is unassigned then give it a default value now + if (pGpu->boardId == 0xffffffff) + { + pGpu->boardId = gpuId; + } +} + +// +// gpumgrGetGpuIdInfo +// +// Special purpose routine that handles NV0000_CTRL_CMD_GPU_GET_ID_INFO +// requests from clients. +// NV0000_CTRL_CMD_GPU_GET_ID_INFO is deprecated in favour of +// NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2, per comments in ctrl0000gpu.h +// +NV_STATUS +gpumgrGetGpuIdInfoV2(NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS *pGpuInfo) +{ + OBJGPU *pGpu; + NvU32 deviceInstance, subDeviceInstance; + + // start by making sure client request specifies a valid gpu + pGpu = gpumgrGetGpuFromId(pGpuInfo->gpuId); + if (pGpu == NULL) + { + NV_PRINTF(LEVEL_INFO, + "gpumgrGetGpuInfoV2: bad gpuid spec: 0x%x\n", + pGpuInfo->gpuId); + return NV_ERR_INVALID_ARGUMENT; + } + + NV_ASSERT_OR_RETURN(gpumgrIsSafeToReadGpuInfo(), NV_ERR_INVALID_LOCK_STATE); + + // + // We have a valid gpuInstance, so now let's get the corresponding + // deviceInstance/subDeviceInstance pair. + // + deviceInstance = gpuGetDeviceInstance(pGpu); + if (deviceInstance == NV_MAX_DEVICES) + { + NV_PRINTF(LEVEL_ERROR, + "gpumgrGetGpuInfoV2: deviceInstance not found\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + { + subDeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + } + + pGpuInfo->gpuInstance = pGpu->gpuInstance; + pGpuInfo->deviceInstance = deviceInstance; + pGpuInfo->subDeviceInstance = subDeviceInstance; + pGpuInfo->boardId = pGpu->boardId; + + // + // Setup gpu info flags; see ctrl0000gpu.h for list of flags. + // + pGpuInfo->gpuFlags = 0; + pGpuInfo->numaId = NV0000_CTRL_NO_NUMA_NODE; + if (osGpuSupportsAts(pGpu)) + { + pGpuInfo->gpuFlags |= DRF_NUM(0000, _CTRL_GPU_ID_INFO, _ATS_ENABLED, + NV0000_CTRL_GPU_ID_INFO_ATS_ENABLED_TRUE); + + { + pGpuInfo->numaId = pGpu->numaNodeId; + } + } + + // is this gpu in use? + pGpuInfo->gpuFlags |= DRF_NUM(0000, _CTRL_GPU_ID_INFO, _IN_USE, gpuIsInUse(pGpu)); + + // is this gpu part of a sli device? + pGpuInfo->gpuFlags |= DRF_NUM(0000, _CTRL_GPU_ID_INFO, _LINKED_INTO_SLI_DEVICE, IsSLIEnabled(pGpu)); + + // is this gpu a mobile gpu? + if (IsMobile(pGpu)) + { + pGpuInfo->gpuFlags |= DRF_DEF(0000, _CTRL_GPU_ID_INFO, _MOBILE, _TRUE); + } + + // is this gpu the boot primary? + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_PRIMARY_DEVICE)) + { + pGpuInfo->gpuFlags |= DRF_DEF(0000, _CTRL_GPU_ID_INFO, _BOOT_MASTER, _TRUE); + } + + // is this GPU part of an SOC + if (pGpu->bIsSOC || pGpu->getProperty(pGpu, PDB_PROP_GPU_ZERO_FB)) + { + pGpuInfo->gpuFlags |= DRF_DEF(0000, _CTRL_GPU_ID_INFO, _SOC, _TRUE); + } + + // set GPU SOC type if applicable + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + pGpuInfo->gpuFlags = FLD_SET_DRF(0000, _CTRL_GPU_ID_INFO, + _SOC_TYPE, _DISPLAY, + pGpuInfo->gpuFlags); + } + // + // To handle the MDM use case for SDM device model supported chips. + // If KERNEL_DISPLAY is not present for SOC_SDM, then it is treated as MDM device. + // + else if ((pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_SOC_SDM)) && + (GPU_GET_KERNEL_DISPLAY(pGpu) != NULL)) + { + pGpuInfo->gpuFlags = FLD_SET_DRF(0000, _CTRL_GPU_ID_INFO, + _SOC_TYPE, _DISPLAY_AND_IGPU, + pGpuInfo->gpuFlags); + } + else if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ZERO_FB)) + { + pGpuInfo->gpuFlags = FLD_SET_DRF(0000, _CTRL_GPU_ID_INFO, + _SOC_TYPE, _IGPU, + pGpuInfo->gpuFlags); + } + + // GPU specific SLI status + pGpuInfo->sliStatus = pGpu->sliStatus; + + NV_PRINTF(LEVEL_INFO, + "gpumgrGetGpuInfoV2: gpu[0x%x]: device 0x%x subdevice 0x%x\n", + pGpuInfo->gpuId, pGpuInfo->deviceInstance, + pGpuInfo->subDeviceInstance); + + return NV_OK; +} +NV_STATUS +gpumgrGetGpuIdInfo(NV0000_CTRL_GPU_GET_ID_INFO_PARAMS *pGpuInfo) +{ + NV_STATUS status; + NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS pGpuInfoV2 = {0}; + pGpuInfoV2.gpuId = pGpuInfo->gpuId; + + status = gpumgrGetGpuIdInfoV2(&pGpuInfoV2); + + if (status != NV_OK) + { + return status; + } + pGpuInfo->gpuFlags = pGpuInfoV2.gpuFlags; + pGpuInfo->deviceInstance = pGpuInfoV2.deviceInstance; + pGpuInfo->subDeviceInstance = pGpuInfoV2.subDeviceInstance; + pGpuInfo->sliStatus = pGpuInfoV2.sliStatus; + pGpuInfo->boardId = pGpuInfoV2.boardId; + pGpuInfo->gpuInstance = pGpuInfoV2.gpuInstance; + pGpuInfo->numaId = pGpuInfoV2.numaId; + + return status; +} + +// +// gpumgrGetGpuInitStatus +// +// Special purpose routine that handles NV0000_CTRL_CMD_GET_GPU_INIT_STATUS +// requests from clients. +// +NV_STATUS +gpumgrGetGpuInitStatus(NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS *pGpuInitStatus) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + NV_STATUS rmStatus; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + if (pGpuMgr->probedGpus[i].gpuId == pGpuInitStatus->gpuId) + { + if (pGpuMgr->probedGpus[i].bInitAttempted) + { + pGpuInitStatus->status = pGpuMgr->probedGpus[i].initStatus; + rmStatus = NV_OK; + } + else + { + // + // No init has been attempted on this GPU yet, so this request + // doesn't make any sense. + // + rmStatus = NV_ERR_INVALID_STATE; + } + goto done; + } + } + + // We couldn't find a probed gpuId matching the requested one. + rmStatus = NV_ERR_INVALID_ARGUMENT; +done: + portSyncMutexRelease(pGpuMgr->probedGpusLock); + return rmStatus; +} + +NV_STATUS +gpumgrGetProbedGpuDomainBusDevice(NvU32 gpuId, NvU64 *gpuDomainBusDevice) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + NV_STATUS rmStatus; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + if (pGpuMgr->probedGpus[i].gpuId == gpuId) + { + *gpuDomainBusDevice = pGpuMgr->probedGpus[i].gpuDomainBusDevice; + rmStatus = NV_OK; + goto done; + } + } + + // + // We couldn't find a probed gpuId matching the requested one. + // + // This used to return a generic NV_ERR_INVALID_ARGUMENT, but we want to be + // more specific as at least nvml wants to be able to tell this case apart + // from other errors. This case is expected when GPUs are removed from the + // driver (e.g. through unbind on Linux) after a client queries for the + // probed GPUs, but before getting the PCI info for all of them. + // + rmStatus = NV_ERR_OBJECT_NOT_FOUND; + +done: + portSyncMutexRelease(pGpuMgr->probedGpusLock); + return rmStatus; +} + +// +// gpumgrSetGpuInitStatus +// +// Marks initialization of the gpu in question as attempted and stores the +// status. +// +void +gpumgrSetGpuInitStatus(NvU32 gpuId, NV_STATUS status) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + if (pGpuMgr->probedGpus[i].gpuId == gpuId) + { + // Overwrite any previous init status + pGpuMgr->probedGpus[i].bInitAttempted = NV_TRUE; + pGpuMgr->probedGpus[i].initStatus = status; + break; + } + } + + portSyncMutexRelease(pGpuMgr->probedGpusLock); +} + +// +// gpumgrGetDefaultPrimaryGpu +// +// This routine looks at the set of GPUs and picks a the primary (parent) +// with the following rules, in this order: +// 1- If a primary GPU has been passed in an SLI config by a client +// 2- If there is a boot primary in the GPU mask +// 3- The first VGA device attached (not 3d controller) +// +NvU32 +gpumgrGetDefaultPrimaryGpu +( + NvU32 gpuMask +) +{ + OBJGPU *pGpu = NULL; + NvU32 gpuInstance; + + if (gpuMask == 0) + { + NV_ASSERT(gpuMask); + return 0; + } + + // Find masterFromSLIConfig, set when a RM client passes a primary GPU + // index from a SLI config + gpuInstance = 0; + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + if (pGpu->masterFromSLIConfig) + { + break; + } + } + + // default to boot primary + if (pGpu == NULL) + { + gpuInstance = 0; + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_PRIMARY_DEVICE)) + { + break; + } + } + } + + if (pGpu) + { + return gpuGetInstance(pGpu); + } + + // otherwise the primary is the first non 3d controller in the set attached to the RM + gpuInstance = 0; + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_3D_CONTROLLER)) + { + break; + } + } + + if (!pGpu) + { + // The GPU mask contains only 3d Controllers. + // Choose first one in the set attached to the RM. + gpuInstance = 0; + pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance); + } + + if (pGpu == NULL) + { + return 0; // This should never happen + } + + return gpuGetInstance(pGpu); +} + +NV_STATUS +gpumgrGetGpuLockAndDrPorts +( + OBJGPU *pGpu, + OBJGPU *pPeerGpu, + NvU32 *pPinsetOut, + NvU32 *pPinsetIn +) +{ + *pPinsetOut = 0; + *pPinsetIn = 0; + return NV_OK; +} + +// +// Stores the address of the boot primary in pGpu +// Returns NV_OK on success NV_ERR_GENERIC otherwise. +// +NV_STATUS +gpumgrGetBootPrimary(OBJGPU **ppGpu) +{ + NvU32 gpuCount, gpuMask, idx1; + OBJGPU *pGpu = NULL; + + // Find boot primary + idx1 = 0; + gpumgrGetGpuAttachInfo(&gpuCount, &gpuMask); + while ((pGpu = gpumgrGetNextGpu(gpuMask, &idx1)) != NULL) + { + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_PRIMARY_DEVICE)) + break; + } + *ppGpu = pGpu; + + // No boot primary + if (pGpu == NULL) + { + return NV_ERR_GENERIC; + } + + return NV_OK; +} + +// +// Returns the mGpu +// +OBJGPU *gpumgrGetMGpu (void) +{ + OBJGPU *pGpu; + NvU32 gpuCount, gpuMask, gpuIndex = 0; + // Parse through all the GPUs + + gpumgrGetGpuAttachInfo(&gpuCount, &gpuMask); + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex))) + { + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_HYBRID_MGPU)) + { + break; + } + } + + return pGpu; +} + +// +// Get PhysFbAddr for the given GPU which may be different if +// the GPU is broadcast or chipset broadcast are enabled or not: +// - BC GPU + no CL BC -> returns gpu address +// - UC GPU -> returns GPU address +// - BC GPU + CL BC -> returns broadcast address +// +RmPhysAddr gpumgrGetGpuPhysFbAddr(OBJGPU *pGpu) +{ + RmPhysAddr physFbAddr; + + physFbAddr = pGpu->busInfo.gpuPhysFbAddr; + + NV_ASSERT(pGpu->getProperty(pGpu, PDB_PROP_GPU_ZERO_FB) || physFbAddr); + return physFbAddr; +} + + +// +// Get GPU object from subdevice instance +// +POBJGPU +gpumgrGetGpuFromSubDeviceInst(NvU32 deviceInst, NvU32 subDeviceInst) +{ + OBJGPU *pGpu = NULL; + OBJGPUGRP *pGpuGrp = NULL; + NvU32 gpuInst = 0; + NvU32 gpuMask; + + pGpuGrp = gpumgrGetGpuGrpFromInstance(deviceInst); + NV_ASSERT_OR_RETURN(pGpuGrp != NULL, NULL); + + gpuMask = gpugrpGetGpuMask(pGpuGrp); + + // check for single GPU case + if (gpumgrGetSubDeviceCount(gpuMask) == 1) + return gpumgrGetNextGpu(gpuMask, &gpuInst); + + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInst)) != NULL) + { + if (gpumgrGetSubDeviceInstanceFromGpu(pGpu) == subDeviceInst) + { + break; + } + } + + NV_ASSERT(pGpu); + + return pGpu; +} + +/*! + * @brief sets the device instance pGpu->deviceInstance for the GPUs indicated by the gpu mask + * + * Only remove the device instance if it is the last GPU to be removed. + * + * At RM initialization we fill in the software feature values for this GPU. + * The values are determined from the software feature database + * + * @param[in] gpuMask NvU32 value + * + * @return NV_OK or NV_ERR_OBJECT_NOT_FOUND if no GPU has been found + * + */ +NV_STATUS +gpumgrAddDeviceInstanceToGpus(NvU32 gpuMask) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NV_STATUS rmStatus = NV_ERR_OBJECT_NOT_FOUND; + OBJGPU *pGpu = NULL; + NvU32 i, gpuIndex = 0; + OBJGPUGRP *pGpuGrp = NULL; + + // Add the device instance to the GPU objects in the mask + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex))) + { + rmStatus = NV_ERR_OBJECT_NOT_FOUND; + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->pGpuGrpTable); i++) + { + pGpuGrp = pGpuMgr->pGpuGrpTable[i]; + // if it contains the specified gpu... + if ((pGpuGrp != NULL) && + (gpugrpGetGpuMask(pGpuGrp) & NVBIT(pGpu->gpuInstance))) + { + pGpu->deviceInstance = i; + rmStatus = NV_OK; + break; + } + } + NV_ASSERT_OK_OR_RETURN(rmStatus); + } + + return rmStatus; +} + +/*! + * @brief Retrieves the OBJGPUGRP pointer given the instance + * + * @param[in] gpugrpInstance GPUGRP instance + * + * @return GPUGRP pointer on success, NULL on error + * + */ +OBJGPUGRP * +gpumgrGetGpuGrpFromInstance +( + NvU32 gpugrpInstance +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NV_ASSERT_OR_RETURN(gpugrpInstance < NV_MAX_DEVICES, NULL); + return pGpuMgr->pGpuGrpTable[gpugrpInstance]; +} + +/*! + * @brief Retrieves the OBJGPUGRP pointer given the GPU pointer. + * + * @param[in] pGpu GPU object pointer + * + * @return OBJGPUGRP pointer on success, NULL on error + * + */ +OBJGPUGRP * +gpumgrGetGpuGrpFromGpu +( + OBJGPU *pGpu +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 gpugrpInstance = gpuGetDeviceInstance(pGpu); + NV_ASSERT_OR_RETURN(gpugrpInstance < NV_MAX_DEVICES, NULL); + + return pGpuMgr->pGpuGrpTable[gpugrpInstance]; +} + +/*! + * @brief Constructs the GPUGRP object for the given instance + * + * @param[in] pGpu GPU object pointer + * @param[in] gpuMask GpuMask corresponding to this GPUGRP + * @param[out] ppGpuGrp Newly created gpugrp object pointer + * + * @return NV_OK on success, appropriate error on failure. + * + */ +NV_STATUS +gpumgrConstructGpuGrpObject +( + OBJGPUMGR *pGpuMgr, + NvU32 gpuMask, + OBJGPUGRP **ppGpuGrp +) +{ + NV_STATUS status; + + status = objCreate(ppGpuGrp, pGpuMgr, OBJGPUGRP); + if (NV_OK != status) + { + return status; + } + + status = gpugrpCreate(*ppGpuGrp, gpuMask); + if (NV_OK != status) + { + return status; + } + + return NV_OK; +} + +/*! + * @brief Enter/exit "drain" state on a given GPU + * + * @param[in] gpuId Platform specific GPU Id + * @param[in] bEnable NV_TRUE: enter, NV_FALSE: exit + * @param[in] bRemove Ask the OS to forget the GPU, once quiescent + * @param[in] bLinkDisable Shut down the upstream PCIe link after the removal. + * This is done in user-land, we just check that the + * GPU is in the right state. + * + * @return NV_OK on success, appropriate error on failure. + */ +NV_STATUS +gpumgrModifyGpuDrainState + +( + NvU32 gpuId, + NvBool bEnable, + NvBool bRemove, + NvBool bLinkDisable +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + OBJGPU *pGpu; + NvBool bAttached; + NvBool bStateChange = NV_FALSE; + NvU32 i; + NvU32 domain = 0; + NvU8 bus = 0; + NvU8 device = 0; + + if (bRemove && !osRemoveGpuSupported()) + { + return NV_ERR_NOT_SUPPORTED; + } + + bAttached = ((pGpu = gpumgrGetGpuFromId(gpuId)) != NULL); + + if (bEnable && bLinkDisable && bAttached) + { + return NV_ERR_IN_USE; + } + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); ++i) + { + if (pGpuMgr->probedGpus[i].gpuId == gpuId) + { + bStateChange = pGpuMgr->probedGpus[i].bDrainState != bEnable; + pGpuMgr->probedGpus[i].bDrainState = bEnable; + pGpuMgr->probedGpus[i].bRemoveIdle = bEnable && bRemove; + domain = gpuDecodeDomain(pGpuMgr->probedGpus[i].gpuDomainBusDevice); + bus = gpuDecodeBus(pGpuMgr->probedGpus[i].gpuDomainBusDevice); + device = gpuDecodeDevice(pGpuMgr->probedGpus[i].gpuDomainBusDevice); + break; + } + } + + portSyncMutexRelease(pGpuMgr->probedGpusLock); + + if (i == NV_MAX_DEVICES) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // If the initial drain state (characterized by enabling draining without + // setting the remove flag) is already enabled, multiple clients may be + // trying to simultaneously manage drain state. Only return success for the + // first to allow them to filter out the others. + // + if (bEnable && !bRemove && !bStateChange) + { + return NV_ERR_IN_USE; + } + + if (bEnable && bRemove && !bAttached) + { + osRemoveGpu(domain, bus, device); + } + + return NV_OK; +} + +/*! + * @brief Query "drain"/remove state on a given GPU + * + * @param[in] gpuId Platform specific GPU Id + * @param[out] pBEnable Drain state ptr + * @param[out] pBRemove Remove flag ptr + * + * @return NV_OK on success, appropriate error on failure. + */ +NV_STATUS +gpumgrQueryGpuDrainState + +( + NvU32 gpuId, + NvBool *pBEnable, + NvBool *pBRemove +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); ++i) + { + if (pGpuMgr->probedGpus[i].gpuId == gpuId) + { + if (pBEnable != NULL) + { + *pBEnable = pGpuMgr->probedGpus[i].bDrainState; + } + + if (pBRemove != NULL) + { + *pBRemove = pGpuMgr->probedGpus[i].bRemoveIdle; + } + + break; + } + } + + portSyncMutexRelease(pGpuMgr->probedGpusLock); + + // + // This used to return a generic NV_ERR_INVALID_ARGUMENT on error, but we + // want to be more specific as at least nvml wants to be able to tell this + // case apart from other errors. This case is expected when GPUs are + // removed from the driver (e.g. through unbind on Linux) after a client + // queries for the probed GPUs, but before getting the PCI info for all of + // them. + // + return (i == NV_MAX_DEVICES) ? NV_ERR_OBJECT_NOT_FOUND : NV_OK; +} + +/*! +* @brief Retrieves the group gpuMask that contains this gpuInstance. +* Used for locking all gpus under the same device together +* +* @param[in] gpuInstance: unique Index per GPU +* +* @return gpuMask: mask of all GPU that are in the same group +* +*/ +NvU32 +gpumgrGetGrpMaskFromGpuInst +( + NvU32 gpuInst +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + + NV_ASSERT_OR_RETURN(gpuInst < NV_MAX_DEVICES, 0); + + return pGpuMgr->gpuInstMaskTable[gpuInst]; +} + +/*! +* @brief Updates per GPU isnstance table to contain correct group mask +* +* @param[in] gpuMask: mask of all GPUs that are in the same group +* +*/ +void +gpumgrAddDeviceMaskToGpuInstTable +( + NvU32 gpuMask +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + int gpuInst; + NvU32 tempGpuMask = gpuMask; + + for (gpuInst = 0; (tempGpuMask != 0) && (gpuInst < NV_MAX_DEVICES); gpuInst++) + { + if (NVBIT(gpuInst) & gpuMask) + pGpuMgr->gpuInstMaskTable[gpuInst] = gpuMask; + + tempGpuMask &= ~NVBIT(gpuInst); + } +} + +/*! +* @brief Clear group mask from per GPU isnstance table (when group is destroyed) +* +* @param[in] gpuMask: gpu group mask being teared down +* +*/ +void +gpumgrClearDeviceMaskFromGpuInstTable +( + NvU32 gpuMask +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + int gpuInst; + NvU32 tempGpuMask = gpuMask; + + for (gpuInst = 0; (tempGpuMask != 0) && (gpuInst < NV_MAX_DEVICES); gpuInst++) + { + if (NVBIT(gpuInst) & gpuMask) + pGpuMgr->gpuInstMaskTable[gpuInst] = 0; + + tempGpuMask &= ~NVBIT(gpuInst); + } +} + +NV_STATUS +gpumgrCacheGetActiveDeviceIds_IMPL +( + NV0000_CTRL_GPU_GET_ACTIVE_DEVICE_IDS_PARAMS *pActiveDeviceIdsParams +) +{ + NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS probedGpuIds; + NV0000_CTRL_GPU_ACTIVE_DEVICE *pDevices = pActiveDeviceIdsParams->devices; + NvU32 *pNumDevices = &pActiveDeviceIdsParams->numDevices; + NvU32 total = 0; + NvU32 i; + + NV_ASSERT_OK_OR_RETURN(gpumgrGetProbedGpuIds(&probedGpuIds)); + + for (i = 0; + (i < NV0000_CTRL_GPU_MAX_PROBED_GPUS) && + (probedGpuIds.gpuIds[i] != NV0000_CTRL_GPU_INVALID_ID); i++) + { + NvU32 gpuId = probedGpuIds.gpuIds[i]; + + NV_ASSERT_OR_RETURN(total < NV0000_CTRL_GPU_MAX_ACTIVE_DEVICES, + NV_ERR_INVALID_STATE); + + pDevices[total].gpuId = gpuId; + pDevices[total].gpuInstanceId = NV0000_CTRL_GPU_INVALID_ID; + pDevices[total].computeInstanceId = NV0000_CTRL_GPU_INVALID_ID; + total++; + } + + *pNumDevices = total; + + return NV_OK; + +} + +/** + * @brief Saves a pointer to the current GPU instance in thread local storage, + * to be logged by NVLOG, until gpumgrSetGpuRelease is called. + * Returns a pointer to tls entry (to be passed to gpumgrSetGpuRelease) + * + * @param[in] pGpu + */ +NvBool +gpumgrSetGpuAcquire(OBJGPU *pGpu) +{ + NvU32 **ppGpuInstance; + ppGpuInstance = (NvU32 **)tlsEntryAcquire + (TLS_ENTRY_ID_CURRENT_GPU_INSTANCE); + if (ppGpuInstance) + { + *ppGpuInstance = &(pGpu->gpuInstance); + return NV_TRUE; + } + return NV_FALSE; +} + +/** + * @brief Releases the thread local storage for GPU ID. + */ +void +gpumgrSetGpuRelease(void) +{ + tlsEntryRelease(TLS_ENTRY_ID_CURRENT_GPU_INSTANCE); +} + +/** +* @brief Returns the type of bridge SLI_BT_* +*/ +NvU8 +gpumgrGetGpuBridgeType(void) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + + return pGpuMgr->gpuBridgeType; +} + +/** +* @brief Init the PCIE P2P info cache +*/ +NV_STATUS +gpumgrInitPcieP2PCapsCache_IMPL(OBJGPUMGR* pGpuMgr) +{ + listInitIntrusive(&pGpuMgr->pcieP2PCapsInfoCache); + pGpuMgr->pcieP2PCapsInfoLock = portSyncMutexCreate(portMemAllocatorGetGlobalNonPaged()); + if (pGpuMgr->pcieP2PCapsInfoLock == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + return NV_OK; +} + +/** +* @brief Destroy the PCIE P2P info cache +*/ +void +gpumgrDestroyPcieP2PCapsCache_IMPL(OBJGPUMGR* pGpuMgr) +{ + PCIEP2PCAPSINFO *pPcieCapsInfo, *pPcieCapsInfoNext; + + if (pGpuMgr->pcieP2PCapsInfoLock == NULL) + return; + + portSyncMutexAcquire(pGpuMgr->pcieP2PCapsInfoLock); + + // Remove and free all entries that have this GPU + for (pPcieCapsInfo = listHead(&(pGpuMgr->pcieP2PCapsInfoCache)); + pPcieCapsInfo != NULL; + pPcieCapsInfo = pPcieCapsInfoNext) + { + pPcieCapsInfoNext = listNext(&(pGpuMgr->pcieP2PCapsInfoCache), pPcieCapsInfo); + portMemFree(pPcieCapsInfo); + } + + listDestroy(&pGpuMgr->pcieP2PCapsInfoCache); + portSyncMutexRelease(pGpuMgr->pcieP2PCapsInfoLock); + + portSyncMutexDestroy(pGpuMgr->pcieP2PCapsInfoLock); +} + +/** +* @brief Add an entry in the PCIE P2P info cache + * @param[in] gpuMask NvU32 value + * @param[in] p2pWriteCapsStatus NvU8 value + * @param[in] pP2PReadCapsStatus NvU8 value + * + * @return NV_OK or NV_ERR_NO_MEMORY + */ +NV_STATUS +gpumgrStorePcieP2PCapsCache_IMPL +( + NvU32 gpuMask, + NvU8 p2pWriteCapsStatus, + NvU8 p2pReadCapsStatus +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + PCIEP2PCAPSINFO *pPcieCapsInfo; + NvU32 gpuInstance; + OBJGPU *pGpu; + NvU32 gpuCount = 0; + NvU32 status = NV_OK; + + portSyncMutexAcquire(pGpuMgr->pcieP2PCapsInfoLock); + if (_gpumgrGetPcieP2PCapsFromCache(gpuMask, NULL, NULL)) + { + // Entry already present in cache + goto exit; + } + + pPcieCapsInfo = portMemAllocNonPaged(sizeof(PCIEP2PCAPSINFO)); + if (pPcieCapsInfo == NULL) + { + status = NV_ERR_NO_MEMORY; + goto exit; + } + listAppendExisting(&(pGpuMgr->pcieP2PCapsInfoCache), pPcieCapsInfo); + + gpuInstance = 0; + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + pPcieCapsInfo->gpuId[gpuCount] = pGpu->gpuId; + gpuCount++; + } + + pPcieCapsInfo->gpuCount = gpuCount; + pPcieCapsInfo->p2pWriteCapsStatus = p2pWriteCapsStatus; + pPcieCapsInfo->p2pReadCapsStatus = p2pReadCapsStatus; + +exit: + portSyncMutexRelease(pGpuMgr->pcieP2PCapsInfoLock); + return status; +} + +/** + * @brief Get the PCIE P2P info from cache if present + * - Helper function + * + * @param[in] gpuMask NvU32 value + * @param[out] pP2PWriteCapsStatus NvU8* pointer + * Can be NULL + * @param[out] pP2PReadCapsStatus NvU8* pointer + * Can be NULL + * Return bFound NvBool + */ +static NvBool +_gpumgrGetPcieP2PCapsFromCache +( + NvU32 gpuMask, + NvU8 *pP2PWriteCapsStatus, + NvU8 *pP2PReadCapsStatus) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + PCIEP2PCAPSINFO *pPcieCapsInfo; + pcieP2PCapsInfoListIter it; + NvU32 gpuInstance; + NvU32 gpuCount; + NvU32 remainingGpuCount; + OBJGPU *pGpu; + NvU32 gpuIdLoop; + NvBool bFound = NV_FALSE; + + gpuCount = gpumgrGetSubDeviceCount(gpuMask); + + it = listIterAll(&pGpuMgr->pcieP2PCapsInfoCache); + while (listIterNext(&it)) + { + pPcieCapsInfo = it.pValue; + if (gpuCount != pPcieCapsInfo->gpuCount) + { + continue; + } + + // + // Same count of GPUs in gpuId array and GPU mask. + // All GPU in the gpuMask must have a match in gpuId[] + // + gpuInstance = 0; + remainingGpuCount = gpuCount; + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + gpuIdLoop = 0; + while (gpuIdLoop < gpuCount) + { + if (pPcieCapsInfo->gpuId[gpuIdLoop] == pGpu->gpuId) + { + remainingGpuCount--; + break; + } + gpuIdLoop++; + } + if (remainingGpuCount == 0) + { + break; + } + } + + if (remainingGpuCount == 0) + { + if (pP2PWriteCapsStatus != NULL) + *pP2PWriteCapsStatus = pPcieCapsInfo->p2pWriteCapsStatus; + if (pP2PReadCapsStatus != NULL) + *pP2PReadCapsStatus = pPcieCapsInfo->p2pReadCapsStatus; + bFound = NV_TRUE; + break; + } + } + return bFound; +} + +/** + * @brief Get the PCIE P2P info from cache if present + * - Take cache locks + * + * @param[in] gpuMask NvU32 value + * @param[out] pP2PWriteCapsStatus NvU8* pointer + * Can be NULL + * @param[out] pP2PReadCapsStatus NvU8* pointer + * Can be NULL + * + * return bFound NvBool + */ +NvBool +gpumgrGetPcieP2PCapsFromCache_IMPL +( + NvU32 gpuMask, + NvU8 *pP2PWriteCapsStatus, + NvU8 *pP2PReadCapsStatus +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvBool bFound; + + portSyncMutexAcquire(pGpuMgr->pcieP2PCapsInfoLock); + + bFound = _gpumgrGetPcieP2PCapsFromCache(gpuMask, pP2PWriteCapsStatus, pP2PReadCapsStatus); + + portSyncMutexRelease(pGpuMgr->pcieP2PCapsInfoLock); + + return bFound; +} + + +/** + * @brief Remove the PCIE P2P info from cache if present + * + * @param[in] gpuId NvU32 value + */ +void +gpumgrRemovePcieP2PCapsFromCache_IMPL +( + NvU32 gpuId +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + PCIEP2PCAPSINFO *pPcieCapsInfo, *pPcieCapsInfoNext; + NvU32 gpuIdLoop; + + portSyncMutexAcquire(pGpuMgr->pcieP2PCapsInfoLock); + + // Remove and free all entries that have this GPU + for (pPcieCapsInfo = listHead(&(pGpuMgr->pcieP2PCapsInfoCache)); + pPcieCapsInfo != NULL; + pPcieCapsInfo = pPcieCapsInfoNext) + { + // As we potentially remove an entry we need to save off the next one. + pPcieCapsInfoNext = listNext(&(pGpuMgr->pcieP2PCapsInfoCache), pPcieCapsInfo); + gpuIdLoop = 0; + while (gpuIdLoop < pPcieCapsInfo->gpuCount) + { + if (pPcieCapsInfo->gpuId[gpuIdLoop] == gpuId) + { + listRemove(&pGpuMgr->pcieP2PCapsInfoCache, pPcieCapsInfo); + portMemFree(pPcieCapsInfo); + // Go to next entry (for loop) + break; + } + gpuIdLoop++; + } + } + portSyncMutexRelease(pGpuMgr->pcieP2PCapsInfoLock); +} + +NvBool gpumgrAreAllGpusInOffloadMode(void) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + + return pGpuMgr->gpuMonolithicRmMask == 0; +} + +NvBool gpumgrIsSafeToReadGpuInfo(void) +{ + // + // A thread that tears down the GPU must own both the API lock for WRITE + // and all GPU locks. + // + // Conversely, if you hold the API lock (either READ or WRITE), or hold + // any GPU locks, you know that no GPUs will be freed from under you. + // + + // + // NOTE: Currently rmapiLockIsOwner() returns TRUE if you own the lock in + // either READ or WRITE modes + // + return rmapiLockIsOwner() || (rmGpuLocksGetOwnedMask() != 0); +} + +// +// Workaround for Bug 3809777. This is a HW bug happening in Ampere and +// Ada GPU's. For these GPU's, after device reset, CRS (Configuration Request +// Retry Status) is being released without waiting for GFW boot completion. +// MSI-X capability in the config space may be inconsistent when GFW boot +// is in progress, so this function checks if MSI-X is allowed. +// For Hopper and above, the CRS will be released after +// GFW boot completion, so the WAR is not needed. +// The bug will be exposed only when GPU is running inside guest in +// pass-through mode. +// +NvBool gpumgrIsDeviceMsixAllowed +( + RmPhysAddr bar0BaseAddr, + NvU32 pmcBoot1, + NvU32 pmcBoot42 +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys); + NvU32 chipArch; + + if ((hypervisorGetHypervisorType(pHypervisor) == OS_HYPERVISOR_UNKNOWN) || + !FLD_TEST_DRF(_PMC, _BOOT_1, _VGPU, _REAL, pmcBoot1)) + { + return NV_TRUE; + } + + chipArch = decodePmcBoot42Architecture(pmcBoot42); + if ((chipArch != NV_PMC_BOOT_42_ARCHITECTURE_AD100) && + (chipArch != NV_PMC_BOOT_42_ARCHITECTURE_GA100)) + { + return NV_TRUE; + } + + return NV_FALSE; +} + +// +// Workaround for Bug 5041782 +// There is a BAR firewall that will prevent any reads/writes to the BAR +// register space while a reset is still pending. We must wait for this +// to drop. Return true if the wait is successful, false otherwise +// +NvBool gpumgrWaitForBarFirewall +( + NvU32 domain, + NvU8 bus, + NvU8 device, + NvU8 function, + NvU16 devId, + NvU16 subsystemId +) +{ + + return NV_TRUE; +} + diff --git a/src/nvidia/src/kernel/mem_mgr/io_vaspace.c b/src/nvidia/src/kernel/mem_mgr/io_vaspace.c new file mode 100644 index 0000000..5eb8c38 --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/io_vaspace.c @@ -0,0 +1,561 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/***************************** HW State Routines ***************************\ +* * +* IOMMU Virtual Address Space Function Definitions. * +* * +\***************************************************************************/ + +#include "mem_mgr/io_vaspace.h" +#include "class/cl00f2.h" // IO_VASPACE_A +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "gpu_mgr/gpu_mgr.h" +#include "os/os.h" +#include "core/system.h" +#include "mem_mgr/virt_mem_mgr.h" + + +NV_STATUS +iovaspaceConstruct__IMPL +( + OBJIOVASPACE *pIOVAS, + NvU32 classId, + NvU32 vaspaceId, + NvU64 vaStart, + NvU64 vaLimit, + NvU64 vaStartInternal, + NvU64 vaLimitInternal, + NvU32 flags +) +{ + NV_ASSERT_OR_RETURN(IO_VASPACE_A == classId, NV_ERR_INVALID_ARGUMENT); + pIOVAS->mappingCount = 0; + return NV_OK; +} + +void +iovaspaceDestruct_IMPL(OBJIOVASPACE *pIOVAS) +{ + OBJVASPACE *pVAS = staticCast(pIOVAS, OBJVASPACE); + + if (pIOVAS->mappingCount != 0) + { + NV_PRINTF(LEVEL_ERROR, "%lld left-over mappings in IOVAS 0x%x\n", + pIOVAS->mappingCount, pVAS->vaspaceId); + DBG_BREAKPOINT(); + } +} + +NV_STATUS +iovaspaceAlloc_IMPL +( + OBJIOVASPACE *pIOVAS, + NvU64 size, + NvU64 align, + NvU64 rangeLo, + NvU64 rangeHi, + NvU64 pageSizeLockMask, + VAS_ALLOC_FLAGS flags, + NvU64 *pAddr +) +{ + NV_STATUS status = NV_OK; + + // TBD implement iommu specific stuff + return status; +} + +NV_STATUS +iovaspaceFree_IMPL +( + OBJIOVASPACE *pIOVAS, + NvU64 vAddr +) +{ + NV_STATUS status = NV_OK; + + // TBD implement iommu specific stuff + return status; +} + +NV_STATUS +iovaspaceApplyDefaultAlignment_IMPL +( + OBJIOVASPACE *pIOVAS, + const FB_ALLOC_INFO *pAllocInfo, + NvU64 *pAlign, + NvU64 *pSize, + NvU64 *pPageSizeLockMask +) +{ + RM_ATTR_PAGE_SIZE pageSizeAttr; + NvU64 maxPageSize = RM_PAGE_SIZE; + + pageSizeAttr = dmaNvos32ToPageSizeAttr(pAllocInfo->pageFormat->attr, pAllocInfo->pageFormat->attr2); + switch(pageSizeAttr) + { + case RM_ATTR_PAGE_SIZE_DEFAULT: + case RM_ATTR_PAGE_SIZE_4KB: + *pAlign = NV_MAX(*pAlign, maxPageSize); + *pSize = RM_ALIGN_UP(*pSize, maxPageSize); + return NV_OK; + default: + break; + } + + return NV_OK; +} + +NV_STATUS +iovaspaceIncAllocRefCnt_IMPL +( + OBJIOVASPACE *pIOVAS, + NvU64 vAddr +) +{ + NV_STATUS status = NV_OK; + + // TBD: Implement iommu specific stuff + return status; +} + +NV_STATUS +iovaspaceGetVasInfo_IMPL +( + OBJIOVASPACE *pIOVAS, + struct NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams +) +{ + return NV_OK; +} + +NvU64 +iovaspaceGetVaStart_IMPL(OBJIOVASPACE *pIOVAS) +{ + // TODO: query OS layer, this could also be set in ctor, not virtual? + return 0; +} + +NvU64 +iovaspaceGetVaLimit_IMPL(OBJIOVASPACE *pIOVAS) +{ + // TODO: query OS layer, this could also be set in ctor, not virtual? + return NVBIT64(32) - 1; +} + +#if (RMCFG_FEATURE_PLATFORM_UNIX || RMCFG_FEATURE_MODS_FEATURES) && !NVCPU_IS_ARM +static PIOVAMAPPING +_iovaspaceCreateMappingDataFromMemDesc +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + PIOVAMAPPING pIovaMapping = NULL; + NvU64 mappingDataSize = 0; + + mappingDataSize = sizeof(IOVAMAPPING); + if (!memdescGetContiguity(pMemDesc, AT_CPU)) + { + mappingDataSize += sizeof(RmPhysAddr) * + (NvU64_LO32(pMemDesc->PageCount) - 1); + } + + // + // The portMemAllocNonPaged() and portMemSet() interfaces work with 32-bit sizes, + // so make sure we don't exceed that here. + // + if (NvU64_HI32(mappingDataSize) != 0UL) + { + NV_PRINTF(LEVEL_ERROR, "too much memory to map! (0x%llx bytes)\n", + mappingDataSize); + DBG_BREAKPOINT(); + return NULL; + } + + pIovaMapping = portMemAllocNonPaged(NvU64_LO32(mappingDataSize)); + if (pIovaMapping == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "failed to allocate 0x%x bytes for IOVA mapping metadata\n", + NvU64_LO32(mappingDataSize)); + return NULL; + } + + portMemSet((void *)pIovaMapping, 0, NvU64_LO32(mappingDataSize)); + + pIovaMapping->pPhysMemDesc = pMemDesc; + + return pIovaMapping; +} + +static NV_STATUS +_iovaspaceCreateSubmapping +( + OBJIOVASPACE *pIOVAS, + PMEMORY_DESCRIPTOR pPhysMemDesc +) +{ + NvU64 rootOffset; + NV_STATUS status = NV_OK; + OBJVASPACE *pVAS = staticCast(pIOVAS, OBJVASPACE); + PMEMORY_DESCRIPTOR pRootMemDesc = memdescGetRootMemDesc(pPhysMemDesc, &rootOffset); + PIOVAMAPPING pRootIovaMapping; + PIOVAMAPPING pSubMapping = NULL; + + NV_ASSERT(pRootMemDesc != pPhysMemDesc); + + // + // A submapping requires the root mapping to be there, acquire a reference + // on it so that it sticks around for at least as long as the submapping. + // The reference is released when the submapping is destroyed. + // + status = iovaspaceAcquireMapping(pIOVAS, pRootMemDesc); + if (status != NV_OK) + return status; + + // + // The root mapping has been just successfully acquired so it has to be + // there. + // + pRootIovaMapping = memdescGetIommuMap(pRootMemDesc, pVAS->vaspaceId); + NV_ASSERT(pRootIovaMapping != NULL); + + // + // Since this is a submemory descriptor, we need to account for the + // PteAdjust as well, which is included in rootOffset. We don't want to + // account for it in the iovaArray because it is not accounted for in the + // memdesc's PTE array. This should result in a 4K-aligned root offset. + // + rootOffset -= pPhysMemDesc->PteAdjust; + NV_ASSERT((rootOffset & RM_PAGE_MASK) == 0); + + // + // For submemory descriptors, there are two possibilities: + // (1) The root descriptor already has an IOVA mapping for the entire + // allocation in this IOVA space, in which case we just need a subset + // of that. + // (2) The root descriptor does not have an IOVA mapping for any of the + // allocation in this IOVA space, in which case we need to create one + // first. + // + + pSubMapping = _iovaspaceCreateMappingDataFromMemDesc(pPhysMemDesc); + if (pSubMapping == NULL) + { + iovaspaceReleaseMapping(pIOVAS, pRootIovaMapping); + return NV_ERR_NO_MEMORY; + } + + pSubMapping->refcount = 1; + pSubMapping->iovaspaceId = pRootIovaMapping->iovaspaceId; + pSubMapping->link.pParent = pRootIovaMapping; + + pSubMapping->pNext = pRootIovaMapping->link.pChildren; + pRootIovaMapping->link.pChildren = pSubMapping; + + // + // We need to copy over the corresponding entries from the root IOVA + // mapping before we assign it to the physical memdesc. The root offset + // determines where in the root mapping we need to start. + // + if (memdescGetContiguity(pPhysMemDesc, AT_CPU)) + { + pSubMapping->iovaArray[0] = pRootIovaMapping->iovaArray[0] + rootOffset; + } + else + { + NvU64 i, j; + NV_ASSERT(((rootOffset >> RM_PAGE_SHIFT) + pPhysMemDesc->PageCount) <= + pRootMemDesc->PageCount); + for (i = (rootOffset >> RM_PAGE_SHIFT), j = 0; + j < pPhysMemDesc->PageCount && i < pRootMemDesc->PageCount; i++, j++) + { + pSubMapping->iovaArray[j] = pRootIovaMapping->iovaArray[i]; + } + } + + memdescAddIommuMap(pPhysMemDesc, pSubMapping); + + ++pIOVAS->mappingCount; + + return NV_OK; +} + +static void +_iovaspaceDestroySubmapping +( + OBJIOVASPACE *pIOVAS, + PIOVAMAPPING pIovaMapping +) +{ + PMEMORY_DESCRIPTOR pPhysMemDesc = pIovaMapping->pPhysMemDesc; + PIOVAMAPPING pRootIovaMapping = pIovaMapping->link.pParent; + PIOVAMAPPING pTmpIovaMapping = pRootIovaMapping->link.pChildren; + + memdescRemoveIommuMap(pPhysMemDesc, pIovaMapping); + + if (pTmpIovaMapping == pIovaMapping) + { + pRootIovaMapping->link.pChildren = pIovaMapping->pNext; + } + else + { + while (pTmpIovaMapping != NULL && pTmpIovaMapping->pNext != pIovaMapping) + { + pTmpIovaMapping = pTmpIovaMapping->pNext; + } + + if (pTmpIovaMapping != NULL) + { + pTmpIovaMapping->pNext = pIovaMapping->pNext; + } + else + { + // Not found in the root submappings list? + NV_ASSERT(pTmpIovaMapping != NULL); + } + } + + portMemFree(pIovaMapping); + --pIOVAS->mappingCount; + + // + // After destroying a submapping, release its reference on the root mapping. + // The reference was acquired in _iovaspaceCreateSubmapping(). + // + iovaspaceReleaseMapping(pIOVAS, pRootIovaMapping); +} + +static NV_STATUS +_iovaspaceCreateMapping +( + OBJIOVASPACE *pIOVAS, + PMEMORY_DESCRIPTOR pPhysMemDesc +) +{ + NV_STATUS status; + OBJVASPACE *pVAS = staticCast(pIOVAS, OBJVASPACE); + NV_ADDRESS_SPACE addressSpace; + PIOVAMAPPING pIovaMapping = NULL; + OBJGPU *pMappingGpu = NULL; + + // + // The source memdesc has to be allocated to acquire an I/O VA space + // mapping, because the OS layer will be setting up a layer of indirection + // that assumes the PTEs in the memdesc are valid. There is no requirement + // that it be mapped to the CPU at this point. + // + if (pPhysMemDesc == NULL) + { + NV_ASSERT(pPhysMemDesc != NULL); + return NV_ERR_INVALID_ARGUMENT; + } + + pMappingGpu = gpumgrGetGpuFromId(pVAS->vaspaceId); + addressSpace = memdescGetAddressSpace(pPhysMemDesc); + + // Only support SYSMEM or indirect peer mappings + if ((addressSpace != ADDR_SYSMEM) && + !gpumgrCheckIndirectPeer(pMappingGpu, pPhysMemDesc->pGpu)) + { + NV_ASSERT(0); + return NV_ERR_INVALID_STATE; + } + + pIovaMapping = _iovaspaceCreateMappingDataFromMemDesc(pPhysMemDesc); + if (pIovaMapping == NULL) + { + return NV_ERR_NO_MEMORY; + } + + // Initialize the mapping as an identity mapping for the OS layer + if (memdescGetContiguity(pPhysMemDesc, AT_CPU)) + { + pIovaMapping->iovaArray[0] = memdescGetPte(pPhysMemDesc, AT_CPU, 0); + } + else + { + NvU32 i; + for (i = 0; i < pPhysMemDesc->PageCount; i++) + { + pIovaMapping->iovaArray[i] = memdescGetPte(pPhysMemDesc, AT_CPU, i); + } + } + + pIovaMapping->iovaspaceId = pVAS->vaspaceId; + pIovaMapping->refcount = 1; + + status = osIovaMap(pIovaMapping); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to map memdesc into I/O VA space 0x%x (status = 0x%x)\n", + pVAS->vaspaceId, status); + goto error; + } + + memdescAddIommuMap(pPhysMemDesc, pIovaMapping); + ++pIOVAS->mappingCount; + + return NV_OK; + +error: + portMemFree(pIovaMapping); + pIovaMapping = NULL; + + return status; +} + +NV_STATUS +iovaspaceAcquireMapping_IMPL +( + OBJIOVASPACE *pIOVAS, + PMEMORY_DESCRIPTOR pPhysMemDesc +) +{ + OBJVASPACE *pVAS = staticCast(pIOVAS, OBJVASPACE); + PIOVAMAPPING pIovaMapping = memdescGetIommuMap(pPhysMemDesc, pVAS->vaspaceId); + + if (pIovaMapping) + { + // If the mapping is already there, just increment its refcount. + NV_ASSERT(pIovaMapping->refcount != 0); + ++pIovaMapping->refcount; + return NV_OK; + } + + if (memdescIsSubMemoryMemDesc(pPhysMemDesc)) + return _iovaspaceCreateSubmapping(pIOVAS, pPhysMemDesc); + else + return _iovaspaceCreateMapping(pIOVAS, pPhysMemDesc); +} + +static void +_iovaspaceDestroyRootMapping +( + OBJIOVASPACE *pIOVAS, + PIOVAMAPPING pIovaMapping +) +{ + PMEMORY_DESCRIPTOR pPhysMemDesc = pIovaMapping->pPhysMemDesc; + PIOVAMAPPING pNextIovaMapping, pTmpIovaMapping; + + // + // Increment the refcount to guarantee that destroying the last submapping + // won't end up trying to destroy the root mapping we are already + // destroying. + // + ++pIovaMapping->refcount; + + // + // Clear out any submappings underneath this mapping, since they will no + // longer be valid. + // + pNextIovaMapping = pIovaMapping->link.pChildren; + while (pNextIovaMapping != NULL) + { + pTmpIovaMapping = pNextIovaMapping->pNext; + _iovaspaceDestroySubmapping(pIOVAS, pNextIovaMapping); + pNextIovaMapping = pTmpIovaMapping; + } + + memdescRemoveIommuMap(pPhysMemDesc, pIovaMapping); + + osIovaUnmap(pIovaMapping); + portMemFree(pIovaMapping); + + --pIOVAS->mappingCount; +} + +void +iovaspaceDestroyMapping_IMPL +( + OBJIOVASPACE *pIOVAS, + PIOVAMAPPING pIovaMapping +) +{ + if (memdescIsSubMemoryMemDesc(pIovaMapping->pPhysMemDesc)) + _iovaspaceDestroySubmapping(pIOVAS, pIovaMapping); + else + _iovaspaceDestroyRootMapping(pIOVAS, pIovaMapping); +} + +void +iovaspaceReleaseMapping_IMPL +( + OBJIOVASPACE *pIOVAS, + PIOVAMAPPING pIovaMapping +) +{ + if (pIovaMapping == NULL) + { + NV_ASSERT(0); + return; + } + + if (pIovaMapping->refcount == 0) + NV_ASSERT(pIovaMapping->refcount > 0); + + if (--pIovaMapping->refcount != 0) + return; + + iovaspaceDestroyMapping(pIOVAS, pIovaMapping); +} + +OBJIOVASPACE *iovaspaceFromId(NvU32 iovaspaceId) +{ + OBJVASPACE *pVAS; + OBJVMM *pVmm = SYS_GET_VMM(SYS_GET_INSTANCE()); + NV_STATUS status = vmmGetVaspaceFromId(pVmm, iovaspaceId, IO_VASPACE_A, &pVAS); + + if (status != NV_OK) + return NULL; + + return dynamicCast(pVAS, OBJIOVASPACE); +} + +OBJIOVASPACE *iovaspaceFromMapping(PIOVAMAPPING pIovaMapping) +{ + OBJIOVASPACE *pIOVAS = iovaspaceFromId(pIovaMapping->iovaspaceId); + + // + // The IOVASPACE has to be there as the mapping is referencing it. If it's + // not, the mapping has been left dangling outlasting the IOVAS it was + // under. + // + NV_ASSERT(pIOVAS != NULL); + + return pIOVAS; +} + +void iovaMappingDestroy(PIOVAMAPPING pIovaMapping) +{ + OBJIOVASPACE *pIOVAS = iovaspaceFromMapping(pIovaMapping); + + NV_ASSERT_OR_RETURN_VOID(pIOVAS != NULL); + iovaspaceDestroyMapping(pIOVAS, pIovaMapping); +} + +#endif // (RMCFG_FEATURE_PLATFORM_UNIX || RMCFG_FEATURE_MODS_FEATURES) && !NVCPU_IS_ARM diff --git a/src/nvidia/src/kernel/mem_mgr/mem.c b/src/nvidia/src/kernel/mem_mgr/mem.c new file mode 100644 index 0000000..464595d --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/mem.c @@ -0,0 +1,1272 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mem_mgr/mem.h" + + +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/disp/disp_objs.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "core/locks.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "vgpu/rpc.h" +#include "platform/sli/sli.h" +#include "deprecated/rmapi_deprecated.h" +#include "vgpu/vgpu_util.h" + +#include "class/cl0041.h" // NV04_MEMORY +#include "class/cl003e.h" // NV01_MEMORY_SYSTEM +#include "class/cl0071.h" // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR +#include "class/cl00b1.h" // NV01_MEMORY_HW_RESOURCES + +NV_STATUS +memConstruct_IMPL +( + Memory *pMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + RsResourceRef *pParentRef = pResourceRef->pParentRef; + + // + // Common initialization used for both normal construction & copy + // constructor + // + + // NULL if parent isn't a device + pMemory->pDevice = dynamicCast(pParentRef->pResource, Device); + + // NULL if parent isn't a subdevice + pMemory->pSubDevice = dynamicCast(pParentRef->pResource, Subdevice); + + // If parent subdevice, grandparent must be a device + if (pMemory->pSubDevice) + { + RsResourceRef *pGrandParentRef = pParentRef->pParentRef; + + pMemory->pDevice = dynamicCast(pGrandParentRef->pResource, Device); + + if (pMemory->pDevice == NULL) + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + // If child of device, we have a pGpu + if (pMemory->pDevice) + { + // NOTE: pGpu and pDevice be NULL for NoDeviceMemory + pMemory->pGpu = CliGetGpuFromContext(pResourceRef, &pMemory->bBcResource); + + NV_ASSERT_OR_RETURN(pMemory->pGpu != NULL, NV_ERR_INVALID_ARGUMENT); + + // Set thread BC state + gpuSetThreadBcState(pMemory->pGpu, pMemory->bBcResource); + } + + if (RS_IS_COPY_CTOR(pParams)) + { + // + // Copy constructor path (NvRmDupObject) + // + return memCopyConstruct_IMPL(pMemory, pCallContext, pParams); + } + else + { + // + // Default constructor path (NvRmAlloc) + // + } + + return NV_OK; +} + +NV_STATUS +memGetMapAddrSpace_IMPL +( + Memory *pMemory, + CALL_CONTEXT *pCallContext, + NvU32 mapFlags, + NV_ADDRESS_SPACE *pAddrSpace +) +{ + NV_ADDRESS_SPACE addrSpace; + OBJGPU *pGpu = pMemory->pGpu; + NvBool bBcResource = pMemory->bBcResource; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + + if (pGpu == NULL) + return NV_ERR_INVALID_OBJECT; + + gpuSetThreadBcState(pGpu, bBcResource); + + pMemDesc = memdescGetMemDescFromGpu(pMemory->pMemDesc, pGpu); + + NV_ASSERT_OK_OR_RETURN(rmapiGetEffectiveAddrSpace(pGpu, pMemDesc, mapFlags, &addrSpace)); + + if (addrSpace == ADDR_SYSMEM) + { + if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_BAR0_REFLECT)) + { + addrSpace = ADDR_REGMEM; + } + else if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_BAR1_REFLECT)) + { + addrSpace = ADDR_FBMEM; + } + } + + if (pAddrSpace) + *pAddrSpace = addrSpace; + + return NV_OK; +} + +void +memDestruct_IMPL +( + Memory *pMemory +) +{ + OBJGPU *pGpu = pMemory->pGpu; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pMemory); + NvHandle hParent = RES_GET_PARENT_HANDLE(pMemory); + NvHandle hMemory = RES_GET_HANDLE(pMemory); + NV_STATUS status = NV_OK; + + // + // The default destructor is used when memConstructCommon() is called by + // the subclass but not memDestructCommon(). + // + if (pMemory->bConstructed && pMemory->pMemDesc != NULL) + { + // Remove the system memory reference from the client + memDestructCommon(pMemory); + memdescFree(pMemory->pMemDesc); + memdescDestroy(pMemory->pMemDesc); + } + + // if the allocation is RPC-ed, free using RPC + if (pMemory->bRpcAlloc && (IS_VIRTUAL(pGpu) || IS_FW_CLIENT(pGpu))) + { + NV_RM_RPC_FREE(pGpu, hClient, hParent, hMemory, status); + NV_ASSERT((status == NV_OK) || (status == NV_ERR_GPU_IN_FULLCHIP_RESET)); + } +} + +NV_STATUS +memCreateMemDesc_IMPL +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR **ppMemDesc, + NV_ADDRESS_SPACE addrSpace, + NvU64 FBOffset, + NvU64 length, + NvU32 attr, + NvU32 attr2 +) +{ + NV_STATUS status = NV_OK; + NvU32 CpuCacheAttrib, gpuCacheAttrib; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + + *ppMemDesc = NULL; + + if (addrSpace == ADDR_SYSMEM) + NV_ASSERT_OR_RETURN(FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS, attr), NV_ERR_INVALID_ARGUMENT); + + // setup the CpuCacheAttrib as well.. (if the caller doesn't specify anything it will be 0=UNCACHED) + switch (DRF_VAL(OS32, _ATTR, _COHERENCY, attr)) + { + case NVOS32_ATTR_COHERENCY_UNCACHED: + CpuCacheAttrib = NV_MEMORY_UNCACHED; + break; + case NVOS32_ATTR_COHERENCY_WRITE_COMBINE: + CpuCacheAttrib = NV_MEMORY_WRITECOMBINED; + break; + case NVOS32_ATTR_COHERENCY_CACHED: + case NVOS32_ATTR_COHERENCY_WRITE_THROUGH: + case NVOS32_ATTR_COHERENCY_WRITE_PROTECT: + case NVOS32_ATTR_COHERENCY_WRITE_BACK: + CpuCacheAttrib = NV_MEMORY_CACHED; + break; + default: + NV_ASSERT(0); + CpuCacheAttrib = NV_MEMORY_UNCACHED; + break; + } + + gpuCacheAttrib = FLD_TEST_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _YES, attr2) ? NV_MEMORY_CACHED : NV_MEMORY_UNCACHED; + + // Create and fill in a memory descriptor + status = memdescCreate(&pMemDesc, pGpu, length, 0, NV_TRUE, addrSpace, + CpuCacheAttrib, + MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE_FB_BC_ONLY(pGpu, addrSpace)); + if (status == NV_OK) + { + if (memdescHasSubDeviceMemDescs(pMemDesc)) + { + MEMORY_DESCRIPTOR *pMemDescNext = pMemDesc->_pNext; + while (pMemDescNext) + { + memdescDescribe(pMemDescNext, addrSpace, FBOffset, length); + memdescSetGpuCacheAttrib(pMemDescNext, gpuCacheAttrib); + pMemDescNext = pMemDescNext->_pNext; + } + } + else + { + memdescDescribe(pMemDesc, addrSpace, FBOffset, length); + memdescSetGpuCacheAttrib(pMemDesc, gpuCacheAttrib); + } + + *ppMemDesc = pMemDesc; + } + + return status; +} + +NV_STATUS +memCreateKernelMapping_IMPL +( + Memory *pMemory, + NvU32 Protect, + NvBool bClear +) +{ + NV_STATUS status; + + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, memIsReady(pMemory, NV_FALSE)); + + if (pMemory->KernelVAddr == NvP64_NULL) + { + if (memdescGetAddressSpace(pMemory->pMemDesc) != ADDR_SYSMEM) + { + return NV_ERR_NOT_SUPPORTED; + } + + status = memdescMap(pMemory->pMemDesc, 0, pMemory->Length, NV_TRUE, + Protect, &pMemory->KernelVAddr, &pMemory->KernelMapPriv); + + if (status != NV_OK) + { + pMemory->KernelVAddr = NvP64_NULL; + pMemory->KernelMapPriv = NvP64_NULL; + return status; + } + + memdescSetKernelMapping(pMemory->pMemDesc, pMemory->KernelVAddr); + memdescSetKernelMappingPriv(pMemory->pMemDesc, pMemory->KernelMapPriv); + + if (bClear) + { + portMemSet(NvP64_VALUE(pMemory->KernelVAddr), 0, pMemory->Length); + } + } + + return NV_OK; +} + +RM_ATTR_PAGE_SIZE +dmaNvos32ToPageSizeAttr +( + NvU32 attr, + NvU32 attr2 +) +{ + switch (DRF_VAL(OS32, _ATTR, _PAGE_SIZE, attr)) + { + case NVOS32_ATTR_PAGE_SIZE_DEFAULT: + return RM_ATTR_PAGE_SIZE_DEFAULT; + case NVOS32_ATTR_PAGE_SIZE_4KB: + return RM_ATTR_PAGE_SIZE_4KB; + case NVOS32_ATTR_PAGE_SIZE_BIG: + return RM_ATTR_PAGE_SIZE_BIG; + case NVOS32_ATTR_PAGE_SIZE_HUGE: + switch (DRF_VAL(OS32, _ATTR2, _PAGE_SIZE_HUGE, attr2)) + { + case NVOS32_ATTR2_PAGE_SIZE_HUGE_DEFAULT: + case NVOS32_ATTR2_PAGE_SIZE_HUGE_2MB: + return RM_ATTR_PAGE_SIZE_HUGE; + case NVOS32_ATTR2_PAGE_SIZE_HUGE_512MB: + return RM_ATTR_PAGE_SIZE_512MB; + } + break; + } + + NV_ASSERT_FAILED("Invalid attr and attr2 page size arguments"); + return RM_ATTR_PAGE_SIZE_DEFAULT; +} + +NV_STATUS +memConstructCommon_IMPL +( + Memory *pMemory, + NvU32 categoryClassId, + NvU32 flags, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 heapOwner, + Heap *pHeap, + NvU32 attr, + NvU32 attr2, + NvU32 Pitch, + NvU32 type, + NvU32 tag, + HWRESOURCE_INFO *pHwResource +) +{ + OBJGPU *pGpu = NULL; + NV_STATUS status = NV_OK; + NvHandle hParent = RES_GET_PARENT_HANDLE(pMemory); + NvHandle hMemory = RES_GET_HANDLE(pMemory); + + if (pMemDesc == NULL) + return NV_ERR_INVALID_ARGUMENT; + + // initialize the memory description + pMemory->categoryClassId = categoryClassId; + pMemory->pMemDesc = pMemDesc; + pMemory->Length = pMemDesc->Size; + pMemory->RefCount = 1; + pMemory->HeapOwner = heapOwner; + pMemory->pHeap = pHeap; + pMemory->Attr = attr; + pMemory->Attr2 = attr2; + pMemory->Pitch = Pitch; + pMemory->Type = type; + pMemory->Flags = flags; + pMemory->tag = tag; + pMemory->isMemDescOwner = NV_TRUE; + pMemory->bRpcAlloc = NV_FALSE; + + // We are finished if this instance is device-less + if (pMemory->pDevice == NULL) + { + goto done; + } + + if (pMemDesc->pGpu == NULL) + { + return NV_ERR_INVALID_STATE; + } + + // Memory has hw resources associated with it that need to be tracked. + if ((pHwResource != NULL) && + ((pHwResource->hwResId != 0) || (RES_GET_REF(pMemory)->externalClassId == NV01_MEMORY_HW_RESOURCES))) + { + pMemory->pHwResource = portMemAllocNonPaged(sizeof(HWRESOURCE_INFO)); + if (pMemory->pHwResource != NULL) + { + *pMemory->pHwResource = *pHwResource; // struct copy + pMemory->pHwResource->refCount = 1; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Unable to allocate HWRESOURCE_INFO tracking structure\n"); + status = NV_ERR_NO_MEMORY; + goto done; + } + } + + NV_ASSERT(status == NV_OK); + + // + // Apply attr and flags to the memory descriptor. Ideally all should + // be handled before we get here. + // + + // Check whether encryption should be enabled + if (flags & NVOS32_ALLOC_FLAGS_TURBO_CIPHER_ENCRYPTED) + { + pGpu = pMemDesc->pGpu; + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + memdescSetFlag(memdescGetMemDescFromGpu(pMemDesc, pGpu), MEMDESC_FLAGS_ENCRYPTED, NV_TRUE); + SLI_LOOP_END + } + + if (FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_ONLY, attr2)) + { + pGpu = pMemDesc->pGpu; + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + memdescSetFlag(memdescGetMemDescFromGpu(pMemDesc, pGpu), MEMDESC_FLAGS_USER_READ_ONLY, NV_TRUE); + SLI_LOOP_END + } + + if (FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_DEVICE, _READ_ONLY, attr2)) + { + pGpu = pMemDesc->pGpu; + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + memdescSetFlag(memdescGetMemDescFromGpu(pMemDesc, pGpu), MEMDESC_FLAGS_DEVICE_READ_ONLY, NV_TRUE); + SLI_LOOP_END + } + + // setup GpuP2PCacheAttrib + switch (DRF_VAL(OS32, _ATTR2, _P2P_GPU_CACHEABLE, attr2)) + { + case NVOS32_ATTR2_P2P_GPU_CACHEABLE_YES: + pGpu = pMemDesc->pGpu; + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + memdescSetGpuP2PCacheAttrib(memdescGetMemDescFromGpu(pMemDesc, pGpu), NV_MEMORY_CACHED); + SLI_LOOP_END + break; + default: + NV_ASSERT(0); + /*FALLSTHRU*/ + case NVOS32_ATTR2_P2P_GPU_CACHEABLE_NO: + case NVOS32_ATTR2_P2P_GPU_CACHEABLE_DEFAULT: + pGpu = pMemDesc->pGpu; + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + memdescSetGpuP2PCacheAttrib(memdescGetMemDescFromGpu(pMemDesc, pGpu), NV_MEMORY_UNCACHED); + SLI_LOOP_END + break; + } + + // + // Page size may be specified at allocation. This if for fermi family + // chips and is a nop for previous generations. At this point the hal call + // to set the page size should never fail as the memory was just allocated. + // + if (pMemDesc->pGpu) + { + pGpu = pMemDesc->pGpu; + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + + RM_ATTR_PAGE_SIZE pageSizeAttr = dmaNvos32ToPageSizeAttr(attr, attr2); + status = memmgrSetMemDescPageSize_HAL(pGpu, GPU_GET_MEMORY_MANAGER(pGpu), memdescGetMemDescFromGpu(pMemDesc, pGpu), + AT_GPU, pageSizeAttr); + if (status != NV_OK) + { + SLI_LOOP_BREAK; + } + SLI_LOOP_END + + if (status != NV_OK) + { + goto done; + } + } + + pMemory->Node.keyStart = RES_GET_HANDLE(pMemory); + pMemory->Node.keyEnd = RES_GET_HANDLE(pMemory); + pMemory->Node.Data = pMemory; + + status = btreeInsert(&pMemory->Node, &pMemory->pDevice->DevMemoryTable); + if (status != NV_OK) + goto done; + + // Make GSP-RM aware of the memory descriptor so it can be used there + if (FLD_TEST_DRF(OS32, _ATTR2, _REGISTER_MEMDESC_TO_PHYS_RM, _TRUE, attr2)) + { + status = memRegisterWithGsp(pGpu, RES_GET_CLIENT(pMemory), hParent, hMemory); + if (status != NV_OK) + goto done; + } + + // Initialize the circular list item for tracking dup/sharing of pMemDesc + pMemory->dupListItem.pNext = pMemory->dupListItem.pPrev = pMemory; + +done: + if (status != NV_OK) + { + if (pMemory->pHwResource != NULL) + { + portMemFree(pMemory->pHwResource); + } + } + else + { + pMemory->bConstructed = NV_TRUE; + } + + return status; +} + +NV_STATUS +memRegisterWithGsp_IMPL +( + OBJGPU *pGpu, + RsClient *pClient, + NvHandle hParent, + NvHandle hMemory +) +{ + NV_STATUS status = NV_OK; + Memory *pMemory = NULL; + RsResourceRef *pMemoryRef = NULL; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NvU32 hClass; + + // Nothing to do without GSP + if (!IS_FW_CLIENT(pGpu)) + { + return NV_OK; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, clientGetResourceRef(pClient, hMemory, &pMemoryRef)); + + pMemory = dynamicCast(pMemoryRef->pResource, Memory); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pMemory != NULL, NV_ERR_INVALID_OBJECT); + + pMemDesc = pMemory->pMemDesc; + + // Check: memory already registered + if (pMemory->bRegisteredWithGsp) + { + return NV_OK; + } + + // Check: no subdevice memDescs + NV_CHECK_OR_RETURN(LEVEL_ERROR, + !memdescHasSubDeviceMemDescs(pMemDesc), + NV_ERR_INVALID_STATE); + + // Check: SYSMEM or FBMEM only + if (memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM) + hClass = NV01_MEMORY_LIST_FBMEM; + else if (memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) + hClass = NV01_MEMORY_LIST_SYSTEM; + else + return NV_ERR_INVALID_STATE; + + NvU32 os02Flags = 0; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + RmDeprecatedConvertOs32ToOs02Flags(pMemory->Attr, + pMemory->Attr2, + pMemory->Flags, + &os02Flags)); + NV_RM_RPC_ALLOC_MEMORY(pGpu, + pClient->hClient, + hParent, + hMemory, + hClass, + os02Flags, + pMemDesc, + status); + + if (status == NV_OK) + { + // Mark memory as registered in GSP + pMemory->bRegisteredWithGsp = NV_TRUE; + } + + return status; +} + +static void +_memUnregisterFromGsp +( + Memory *pMemory, + RsClient *pClient, + NvHandle hParent, + NvHandle hMemory +) +{ + NV_STATUS status = NV_OK; + + // Nothing to do without GSP + if ((pMemory->pGpu == NULL) || + !IS_FW_CLIENT(pMemory->pGpu)) + { + return; + } + + // Nothing to do if memory is not registered to GSP + if (!pMemory->bRegisteredWithGsp) + { + return; + } + + NV_RM_RPC_FREE(pMemory->pGpu, + pClient->hClient, + hParent, + hMemory, + status); + + if (status == NV_OK) + { + // Mark memory as not registered in GSP + pMemory->bRegisteredWithGsp = NV_FALSE; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Failed to unregister hMemory 0x%08x from GSP, status 0x%08x\n", + hMemory, status); + } +} + +static void +_memDestructCommonWithDevice +( + Memory *pMemory +) +{ + NvHandle hMemory = RES_GET_HANDLE(pMemory); + OBJGPU *pGpu = pMemory->pGpu; + Device *pDevice = pMemory->pDevice; + RsResourceRef *pDeviceRef = RES_GET_REF(pDevice); + NvHandle hDevice = RES_GET_HANDLE(pDevice); + Subdevice *pSubDeviceInfo; + DispCommon *pDispCommon; + RsClient *pRsClient = RES_GET_CLIENT(pMemory); + NV_STATUS status = NV_OK; + RS_ITERATOR subDevIt; + FB_ALLOC_INFO *pFbAllocInfo = NULL; + FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat = NULL; + + gpuSetThreadBcState(pGpu, pMemory->bBcResource); + + subDevIt = clientRefIter(pRsClient, pDeviceRef, classId(Subdevice), RS_ITERATE_CHILDREN, NV_TRUE); + while (clientRefIterNext(pRsClient, &subDevIt)) + { + pSubDeviceInfo = dynamicCast(subDevIt.pResourceRef->pResource, Subdevice); + + if (hMemory == pSubDeviceInfo->hNotifierMemory) + { + pSubDeviceInfo->hNotifierMemory = NV01_NULL_OBJECT; + pSubDeviceInfo->pNotifierMemory = NULL; + } + } + + dispcmnGetByDevice(pRsClient, hDevice, &pDispCommon); + + // + // Release any FB HW resources + // + if (pMemory->pHwResource && --pMemory->pHwResource->refCount == 0) + { + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + pFbAllocInfo = portMemAllocNonPaged(sizeof(FB_ALLOC_INFO)); + if (pFbAllocInfo == NULL) + { + NV_ASSERT(0); + status = NV_ERR_NO_MEMORY; + goto done; + } + + pFbAllocPageFormat = portMemAllocNonPaged(sizeof(FB_ALLOC_PAGE_FORMAT)); + if (pFbAllocPageFormat == NULL) { + NV_ASSERT(0); + status = NV_ERR_NO_MEMORY; + goto done; + } + + portMemSet(pFbAllocInfo, 0, sizeof(FB_ALLOC_INFO)); + portMemSet(pFbAllocPageFormat, 0, sizeof(FB_ALLOC_PAGE_FORMAT)); + pFbAllocInfo->pageFormat = pFbAllocPageFormat; + + pFbAllocInfo->pageFormat->type = pMemory->Type; + pFbAllocInfo->pageFormat->attr = pMemory->Attr; + pFbAllocInfo->pageFormat->attr2 = pMemory->Attr2; + pFbAllocInfo->hwResId = memdescGetHwResId(pMemory->pMemDesc); + pFbAllocInfo->size = pMemory->Length; + pFbAllocInfo->format = memdescGetPteKind(pMemory->pMemDesc); + pFbAllocInfo->hClient = pRsClient->hClient; + pFbAllocInfo->hDevice = hDevice; + + // + // Note that while freeing duped memory under a device, the + // device may not be the memory owning device. Hence, always use + // memory owning device (pMemDesc->pGpu) to free HW resources. + // + if (pMemory->pHwResource->isVgpuHostAllocated) + { + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + NV_RM_RPC_MANAGE_HW_RESOURCE_FREE(pMemory->pMemDesc->pGpu, + RES_GET_CLIENT_HANDLE(pMemory), + RES_GET_HANDLE(pDevice), + RES_GET_HANDLE(pMemory), + NVOS32_DELETE_RESOURCES_ALL, + status); + } + else + { + status = memmgrFreeHwResources(pMemory->pMemDesc->pGpu, pMemoryManager, pFbAllocInfo); + } + NV_ASSERT(status == NV_OK); + } + + portMemFree(pMemory->pHwResource); + pMemory->pHwResource = NULL; + } + + NV_ASSERT_OK_OR_GOTO(status, btreeUnlink(&pMemory->Node, &pDevice->DevMemoryTable), done); + + pMemory->pMemDesc->DupCount--; + + // Choose the new owner + if (pMemory->isMemDescOwner) + { + (pMemory->dupListItem.pNext)->isMemDescOwner = NV_TRUE; + } + // Remove from circular list tracking dup/sharing of pMemDesc + pMemory->dupListItem.pPrev->dupListItem.pNext = pMemory->dupListItem.pNext; + pMemory->dupListItem.pNext->dupListItem.pPrev = pMemory->dupListItem.pPrev; + pMemory->dupListItem.pNext = pMemory->dupListItem.pPrev = NULL; + + pMemory->bConstructed = NV_FALSE; + +done: + portMemFree(pFbAllocPageFormat); + portMemFree(pFbAllocInfo); + + // The unmap call(s) above may have changed the broadcast state so restore it here + gpuSetThreadBcState(pGpu, pMemory->bBcResource); +} + +void +memDestructCommon_IMPL +( + Memory *pMemory +) +{ + RsResourceRef *pResourceRef = RES_GET_REF(pMemory); + RsResourceRef *pParentRef = pResourceRef->pParentRef; + RsClient *pClient = RES_GET_CLIENT(pMemory); + NvHandle hParent = pParentRef->hResource; + NvHandle hMemory = RES_GET_HANDLE(pMemory); + + if (!pMemory->bConstructed) + return; + + _memUnregisterFromGsp(pMemory, pClient, hParent, hMemory); + + // Do device specific teardown if we have a device + if (pMemory->pDevice != NULL) + { + _memDestructCommonWithDevice(pMemory); + } + else + { + pMemory->bConstructed = NV_FALSE; + } + + if (pMemory->KernelVAddr != NvP64_NULL) + { + memdescUnmap(pMemory->pMemDesc, NV_TRUE, + pMemory->KernelVAddr, pMemory->KernelMapPriv); + pMemory->KernelVAddr = NvP64_NULL; + pMemory->KernelMapPriv = NvP64_NULL; + } +} + +NV_STATUS +memGetByHandleAndDevice_IMPL +( + RsClient *pClient, + NvHandle hMemory, + NvHandle hDevice, + Memory **ppMemory +) +{ + NV_STATUS status; + + status = memGetByHandle(pClient, hMemory, ppMemory); + if (status != NV_OK) + return status; + + if (hDevice != RES_GET_HANDLE((*ppMemory)->pDevice)) + { + *ppMemory = NULL; + return NV_ERR_OBJECT_NOT_FOUND; + } + + return NV_OK; +} + +NV_STATUS +memGetByHandle_IMPL +( + RsClient *pClient, + NvHandle hMemory, + Memory **ppMemory +) +{ + RsResourceRef *pResourceRef; + NV_STATUS status; + + *ppMemory = NULL; + + status = clientGetResourceRef(pClient, hMemory, &pResourceRef); + if (status != NV_OK) + return status; + + *ppMemory = dynamicCast(pResourceRef->pResource, Memory); + + if (*ppMemory == NULL) + return NV_ERR_INVALID_OBJECT_HANDLE; + + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, memIsReady(*ppMemory, NV_FALSE)); + + return NV_OK; +} + +NV_STATUS +memGetByHandleAndGroupedGpu_IMPL +( + RsClient *pClient, + NvHandle hMemory, + OBJGPU *pGpu, + Memory **ppMemory +) +{ + Memory *pMemory; + NV_STATUS status; + Device *pDevice; + + status = memGetByHandle(pClient, hMemory, &pMemory); + if (status != NV_OK) + return status; + + pDevice = pMemory->pDevice; + + if ((pDevice == NULL) || + (gpumgrGetParentGPU(pGpu) != GPU_RES_GET_GPU(pDevice))) + { + *ppMemory = NULL; + return NV_ERR_OBJECT_NOT_FOUND; + } + + *ppMemory = pMemory; + return NV_OK; +} + +NV_STATUS +memIsReady_IMPL +( + Memory *pMemory, + NvBool bCopyConstructorContext +) +{ + if (pMemory->pMemDesc == NULL) + return NV_ERR_INVALID_OBJECT; + + return NV_OK; +} + +NV_STATUS +memControl_IMPL +( + Memory *pMemory, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, memIsReady(pMemory, NV_FALSE)); + + return resControl_IMPL(staticCast(pMemory, RsResource), pCallContext, pParams); +} + +NV_STATUS +memCopyConstruct_IMPL +( + Memory *pMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RsClient *pDstClient = pCallContext->pClient; + RsClient *pSrcClient = pParams->pSrcClient; + RsResourceRef *pDstRef = pCallContext->pResourceRef; + RsResourceRef *pSrcRef = pParams->pSrcRef; + Memory *pMemorySrc = dynamicCast(pSrcRef->pResource, Memory); + Memory *pMemoryDst = pMemory; + OBJGPU *pSrcGpu = NULL; + OBJGPU *pDstGpu = NULL; + NV_STATUS status = NV_OK; + NvBool bReleaseGpuLock = NV_FALSE; + Device *pSrcDevice = NULL; + Device *pDstDevice = NULL; + Subdevice *pSrcSubDevice = NULL; + Subdevice *pDstSubDevice = NULL; + RsResourceRef *pSrcParentRef = pSrcRef->pParentRef; + RsResourceRef *pDstParentRef = pDstRef->pParentRef; + + NV_ASSERT_OR_RETURN(pSrcParentRef != NULL, NV_ERR_INVALID_OBJECT_PARENT); + NV_ASSERT_OR_RETURN(pDstParentRef != NULL, NV_ERR_INVALID_OBJECT_PARENT); + NV_ASSERT_OR_RETURN(pMemorySrc != NULL, NV_ERR_INVALID_OBJECT_HANDLE); + + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, memIsReady(pMemorySrc, NV_TRUE)); + + // + // Must return early when parent is Client. + // This copy constructor is very device-specific so it is up + // to the device-less Memory subclasses to define their own dup behavior. + // + if (RES_GET_CLIENT_HANDLE(pMemoryDst) == RES_GET_PARENT_HANDLE(pMemoryDst)) + { + NV_CHECK_OR_RETURN(LEVEL_ERROR, + RES_GET_CLIENT_HANDLE(pMemorySrc) == RES_GET_PARENT_HANDLE(pMemorySrc), + NV_ERR_INVALID_OBJECT_PARENT); + return NV_OK; + } + + pSrcGpu = pMemorySrc->pGpu; + pDstGpu = pMemoryDst->pGpu; + pSrcDevice = pMemorySrc->pDevice; + pDstDevice = pMemoryDst->pDevice; + pSrcSubDevice = pMemorySrc->pSubDevice; + pDstSubDevice = pMemoryDst->pSubDevice; + + // Only children of device are supported + NV_ASSERT_OR_RETURN(pSrcDevice != NULL, NV_ERR_INVALID_OBJECT_PARENT); + NV_ASSERT_OR_RETURN(pDstDevice != NULL, NV_ERR_INVALID_OBJECT_PARENT); + + if (!!pSrcSubDevice != !!pDstSubDevice) + { + NV_PRINTF(LEVEL_INFO, "Parent type mismatch between Src and Dst objects" + "Both should be either device or subDevice\n"); + return NV_ERR_INVALID_OBJECT_PARENT; + } + + // RS-TODO: This should use pMemorySrc->bBcResource when adding full support for subdevice duping + gpuSetThreadBcState(pSrcGpu, NV_TRUE); + + if (!rmGpuLockIsOwner() && + !(rmDeviceGpuLockIsOwner(pSrcGpu->gpuInstance) && + rmDeviceGpuLockIsOwner(pDstGpu->gpuInstance))) + { + // LOCK: acquire GPUs lock + if ((status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_MEM)) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to acquire GPU locks, error 0x%x\n", status); + return status; + } + + bReleaseGpuLock = NV_TRUE; + } + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + memCheckCopyPermissions(pMemorySrc, pDstGpu, pDstDevice), done); + + // Initialize Memory + pMemoryDst->categoryClassId = pMemorySrc->categoryClassId; + pMemoryDst->Length = pMemorySrc->Length; + pMemoryDst->HeapOwner = pMemorySrc->HeapOwner; + pMemoryDst->pHeap = pMemorySrc->pHeap; + pMemoryDst->pMemDesc = pMemorySrc->pMemDesc; + pMemoryDst->KernelVAddr = NvP64_NULL; + pMemoryDst->KernelMapPriv = NvP64_NULL; + pMemoryDst->Attr = pMemorySrc->Attr; + pMemoryDst->Attr2 = pMemorySrc->Attr2; + pMemoryDst->Pitch = pMemorySrc->Pitch; + pMemoryDst->Type = pMemorySrc->Type; + pMemoryDst->Flags = pMemorySrc->Flags; + pMemoryDst->tag = pMemorySrc->tag; + pMemoryDst->pHwResource = pMemorySrc->pHwResource; + pMemoryDst->isMemDescOwner = NV_FALSE; + pMemoryDst->bRpcAlloc = pMemorySrc->bRpcAlloc; + + // Link in the new device memory mapping + pMemoryDst->Node.keyStart = RES_GET_HANDLE(pMemoryDst); + pMemoryDst->Node.keyEnd = RES_GET_HANDLE(pMemoryDst); + pMemoryDst->Node.Data = pMemoryDst; + + status = btreeInsert(&pMemoryDst->Node, &pDstDevice->DevMemoryTable); + if (status != NV_OK) + goto done; + + { + OBJGPU *pGpu = pDstGpu; // Need pGpu for SLI loop + + gpuSetThreadBcState(pDstGpu, NV_TRUE); + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + if (memdescGetPageSize(memdescGetMemDescFromGpu(pMemoryDst->pMemDesc, pGpu), AT_GPU) == 0) + { + status = memmgrSetMemDescPageSize_HAL(pGpu, GPU_GET_MEMORY_MANAGER(pGpu), + memdescGetMemDescFromGpu(pMemoryDst->pMemDesc, pGpu), + AT_GPU, RM_ATTR_PAGE_SIZE_DEFAULT); + NV_ASSERT(status == NV_OK); + } + SLI_LOOP_END + } + + // + // ref-count increments for shared structs after all places where we + // could return early. + // + if (pMemoryDst->pHwResource != NULL) + pMemoryDst->pHwResource->refCount++; + + memdescAddRef(pMemoryDst->pMemDesc); + pMemoryDst->pMemDesc->DupCount++; + if (pMemoryDst->pMemDesc->Allocated) + pMemoryDst->pMemDesc->Allocated++; + + // Insert pMemoryDst after pMemorySrc in circular list to track dup/sharing of pMemDesc + pMemoryDst->dupListItem.pNext = pMemorySrc->dupListItem.pNext; + pMemoryDst->dupListItem.pPrev = pMemorySrc; + pMemorySrc->dupListItem.pNext = pMemoryDst; + pMemoryDst->dupListItem.pNext->dupListItem.pPrev = pMemoryDst; + +done: + + // If the original allocation was RPCed, also send the Dup. + if (pMemory->bRpcAlloc && (IS_VIRTUAL(pSrcGpu) || IS_FW_CLIENT(pSrcGpu))) + { + NV_RM_RPC_DUP_OBJECT(pSrcGpu, pDstClient->hClient, pDstParentRef->hResource, pDstRef->hResource, + pSrcClient->hClient, pSrcRef->hResource, 0, + NV_FALSE, // do not automatically issue RPC_FREE on object free + NULL, + status); + NV_ASSERT(status == NV_OK); + } + + // UNLOCK: release GPUs lock + if (bReleaseGpuLock) + { + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + pMemory->bConstructed = (status == NV_OK); + return status; +} + +NV_STATUS +memGetMemInterMapParams_IMPL +( + Memory *pMemory, + RMRES_MEM_INTER_MAP_PARAMS *pParams +) +{ + OBJGPU *pGpu = pParams->pGpu; + RsResourceRef *pMemoryRef = pParams->pMemoryRef; + + MEMORY_DESCRIPTOR *pSrcMemDesc = pMemory->pMemDesc; + Device *pDevice; + Subdevice *pSubdevice; + NvBool bcState = gpumgrGetBcEnabledStatus(pGpu); + + // Don't expect to use default, but safe thing to do is set src=dest + NvHandle hMemoryDevice = 0; + OBJGPU *pSrcGpu = pGpu; + + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, memIsReady(pMemory, NV_FALSE)); + + if (pMemoryRef->pParentRef != NULL) + { + pDevice = dynamicCast(pMemoryRef->pParentRef->pResource, Device); + if (pDevice != NULL) + { + pSrcGpu = GPU_RES_GET_GPU(pDevice); + hMemoryDevice = RES_GET_HANDLE(pDevice); + GPU_RES_SET_THREAD_BC_STATE(pDevice); + } + else + { + pSubdevice = dynamicCast(pMemoryRef->pParentRef->pResource, Subdevice); + if (pSubdevice != NULL) + { + pSrcGpu = GPU_RES_GET_GPU(pSubdevice); + hMemoryDevice = RES_GET_HANDLE(pSubdevice->pDevice); + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + } + } + } + + pParams->pSrcGpu = pSrcGpu; + pParams->hMemoryDevice = hMemoryDevice; + + // + // Restore pGpu's bcState in case it was overwritten above (i.e., + // the case that hMemoryDevice and hBroadcastDevice are the same + // device, but a unicast mapping was desired). + // + gpumgrSetBcEnabledStatus(pGpu, bcState); + + pParams->pSrcMemDesc = pSrcMemDesc; + + return NV_OK; +} + +NV_STATUS +memGetMemoryMappingDescriptor_IMPL +( + Memory *pMemory, + MEMORY_DESCRIPTOR **ppMemDesc +) +{ + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, memIsReady(pMemory, NV_FALSE)); + if (pMemory->pGpu != NULL) + { + *ppMemDesc = memdescGetMemDescFromGpu(pMemory->pMemDesc, pMemory->pGpu); + } + else + { + *ppMemDesc = pMemory->pMemDesc; + } + return NV_OK; +} + +NV_STATUS +memIsDuplicate_IMPL +( + Memory *pMemory, + NvHandle hMemory, + NvBool *pDuplicate +) +{ + RsClient *pClient = RES_GET_CLIENT(pMemory); + Memory *pMemory1; + + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + memIsReady(pMemory, NV_FALSE)); + + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + memGetByHandle(pClient, hMemory, &pMemory1)); + + // + // Do not dereference pMemdesc here. We only take RMAPI RO lock and + // client lock in this context. + // + + *pDuplicate = (pMemory->pMemDesc == pMemory1->pMemDesc); + + return NV_OK; +} + +void memSetSysmemCacheAttrib_IMPL +( + OBJGPU *pGpu, + NV_MEMORY_ALLOCATION_PARAMS *pAllocData, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU32 gpuCacheAttrib, cpuCacheAttrib; + + NV_ASSERT((memdescGetAddressSpace(pMemDesc) == ADDR_EGM) || + (memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM)); + + // + // For system memory default to GPU uncached. GPU caching is different from + // the expected default memory model since it is not coherent. Clients must + // understand this and handle any coherency requirements explicitly. + // + if (DRF_VAL(OS32, _ATTR2, _GPU_CACHEABLE, pAllocData->attr2) == + NVOS32_ATTR2_GPU_CACHEABLE_DEFAULT) + { + pAllocData->attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _NO, + pAllocData->attr2); + } + + if (DRF_VAL(OS32, _ATTR2, _GPU_CACHEABLE, pAllocData->attr2) == + NVOS32_ATTR2_GPU_CACHEABLE_YES) + { + gpuCacheAttrib = NV_MEMORY_CACHED; + } + else + { + gpuCacheAttrib = NV_MEMORY_UNCACHED; + } + + if (DRF_VAL(OS32, _ATTR, _COHERENCY, pAllocData->attr) == NVOS32_ATTR_COHERENCY_UNCACHED) + cpuCacheAttrib = NV_MEMORY_UNCACHED; + else if (DRF_VAL(OS32, _ATTR, _COHERENCY, pAllocData->attr) == NVOS32_ATTR_COHERENCY_CACHED) + cpuCacheAttrib = NV_MEMORY_CACHED; + else if (DRF_VAL(OS32, _ATTR, _COHERENCY, pAllocData->attr) == NVOS32_ATTR_COHERENCY_WRITE_COMBINE) + cpuCacheAttrib = NV_MEMORY_WRITECOMBINED; + else if (DRF_VAL(OS32, _ATTR, _COHERENCY, pAllocData->attr) == NVOS32_ATTR_COHERENCY_WRITE_THROUGH) + cpuCacheAttrib = NV_MEMORY_CACHED; + else if (DRF_VAL(OS32, _ATTR, _COHERENCY, pAllocData->attr) == NVOS32_ATTR_COHERENCY_WRITE_PROTECT) + cpuCacheAttrib = NV_MEMORY_CACHED; + else if (DRF_VAL(OS32, _ATTR, _COHERENCY, pAllocData->attr) == NVOS32_ATTR_COHERENCY_WRITE_BACK) + cpuCacheAttrib = NV_MEMORY_CACHED; + else + cpuCacheAttrib = 0; + + ct_assert(NVOS32_ATTR_COHERENCY_UNCACHED == NVOS02_FLAGS_COHERENCY_UNCACHED); + ct_assert(NVOS32_ATTR_COHERENCY_CACHED == NVOS02_FLAGS_COHERENCY_CACHED); + ct_assert(NVOS32_ATTR_COHERENCY_WRITE_COMBINE == NVOS02_FLAGS_COHERENCY_WRITE_COMBINE); + ct_assert(NVOS32_ATTR_COHERENCY_WRITE_THROUGH == NVOS02_FLAGS_COHERENCY_WRITE_THROUGH); + ct_assert(NVOS32_ATTR_COHERENCY_WRITE_PROTECT == NVOS02_FLAGS_COHERENCY_WRITE_PROTECT); + ct_assert(NVOS32_ATTR_COHERENCY_WRITE_BACK == NVOS02_FLAGS_COHERENCY_WRITE_BACK); + + memdescSetCpuCacheAttrib(pMemDesc, cpuCacheAttrib); + memdescSetGpuCacheAttrib(pMemDesc, gpuCacheAttrib); + + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_NON_IO_COHERENT, + !memmgrIsMemoryIoCoherent(pGpu, pMemoryManager, pAllocData)); +} + +NV_STATUS +memSetGpuCacheSnoop_IMPL +( + OBJGPU *pGpu, + NvU32 attr, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + MemoryManager *pMemoryManager; + NV_STATUS status = NV_OK; + // Assume platform is fully coherent to cover nodevicemem case. + NvBool bPlatformFullyCoherent = NV_TRUE; + + if (pGpu != NULL) + { + pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + bPlatformFullyCoherent = pMemoryManager->bPlatformFullyCoherent; + } + + // GPU cache snooping is a property of fully coherent platforms. + if (bPlatformFullyCoherent) + { + // + // The default allows the client to defer the choice to mapping time. + // This matches old RM behavior. + // + if (FLD_TEST_DRF(OS32, _ATTR, _GPU_CACHE_SNOOPABLE, _MAPPING, attr)) + { + memdescSetGpuCacheSnoop(pMemDesc, MEMDESC_CACHE_SNOOP_DEFER_TO_MAP); + } + else if (FLD_TEST_DRF(OS32, _ATTR, _GPU_CACHE_SNOOPABLE, _ON, attr)) + { + memdescSetGpuCacheSnoop(pMemDesc, MEMDESC_CACHE_SNOOP_ENABLE); + } + else if (FLD_TEST_DRF(OS32, _ATTR, _GPU_CACHE_SNOOPABLE, _OFF, attr)) + { + memdescSetGpuCacheSnoop(pMemDesc, MEMDESC_CACHE_SNOOP_DISABLE); + } + else + { + status = NV_ERR_INVALID_ARGUMENT; + } + } + + return status; +} diff --git a/src/nvidia/src/kernel/mem_mgr/mem_mgr_internal.h b/src/nvidia/src/kernel/mem_mgr/mem_mgr_internal.h new file mode 100644 index 0000000..cc0253c --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/mem_mgr_internal.h @@ -0,0 +1,38 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _MM_INTERNAL_H_ +#define _MM_INTERNAL_H_ + +// +// Don't define deprecated definitions for RM MM implementations +// +#define RM_STRICT_SUPPRESS_DEPRECATED_DEFINITIONS_VER_JAN_21_2020 + +// +// MM API runs within VGPU guest/GSP client. Don't allow direct access to +// physical engine objects/definitions. +// +#define RM_STRICT_SUPPRESS_PHYSICAL_DEFINITIONS_VER_JAN_21_2020 + +#endif // _MM_INTERNAL_H_ diff --git a/src/nvidia/src/kernel/mem_mgr/os_desc_mem.c b/src/nvidia/src/kernel/mem_mgr/os_desc_mem.c new file mode 100644 index 0000000..16d3e25 --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/os_desc_mem.c @@ -0,0 +1,258 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mem_mgr_internal.h" +#include "mem_mgr/os_desc_mem.h" +#include "rmapi/client.h" +#include "rmapi/mapping_list.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "os/os.h" +#include "vgpu/rpc.h" +#include "mem_mgr/mem.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "deprecated/rmapi_deprecated.h" +#include "vgpu/vgpu_util.h" + +#include "class/cl0071.h" // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR + +NV_STATUS +osdescConstruct_IMPL +( + OsDescMemory *pOsDescMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pRmAllocParams +) +{ + Memory *pMemory = staticCast(pOsDescMemory, Memory); + NV_OS_DESC_MEMORY_ALLOCATION_PARAMS *pUserParams; + OBJGPU *pGpu = pMemory->pGpu; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NV_STATUS status; + NvU64 limit; + NvU32 os02Flags; + NvHandle hClient = pCallContext->pClient->hClient; + NvHandle hParent = pCallContext->pResourceRef->pParentRef->hResource; + NvHandle hMemory = pCallContext->pResourceRef->hResource; + RsResourceRef *pResourceRef = RES_GET_REF(pMemory); + RsCpuMapping *pCpuMapping = NULL; + NvU32 flags = 0; + NvU32 kind = 0; + FB_ALLOC_PAGE_FORMAT fbAllocPageFormat = {0}; + + + // Copy-construction has already been done by the base Memory class + if (RS_IS_COPY_CTOR(pRmAllocParams)) + return NV_OK; + + pUserParams = pRmAllocParams->pAllocParams; + + limit = pUserParams->limit; + + // + // Bug 860684: osCreateMemFromOsDescriptor expects OS02 flags + // from the old NvRmAllocMemory64() interface so we need to + // translate the OS32_ATTR flags to OS02 flags. + // + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, + RmDeprecatedConvertOs32ToOs02Flags(pUserParams->attr, + pUserParams->attr2, + pUserParams->flags, + &os02Flags)); + + // Only kernel user is allowed to register physical address with RM + if (pUserParams->descriptorType == NVOS32_DESCRIPTOR_TYPE_OS_PHYS_ADDR) + { + if (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL) + { + return NV_ERR_NOT_SUPPORTED; + } + } + + if (pUserParams->descriptorType == NVOS32_DESCRIPTOR_TYPE_OS_IO_MEMORY) + { + // + // We currently allow RmMapMemory on external IO resources which are + // safe to share across processes. + // + // Otherwise we would be affected by the security issues like Bug 1630288. + // + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _MAPPING, _NEVER_MAP, os02Flags); + + // + // Force peerMappingOverride check for IO memory registration through + // RmVidHeapCtrl. See Bug 1630288 "[PeerSync] threat related to GPU.." for + // more details. + // + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _PEER_MAP_OVERRIDE, _REQUIRED, os02Flags); + } + + if (pUserParams->type == NVOS32_TYPE_SYNCPOINT) + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _ALLOC_TYPE_SYNCPOINT, _APERTURE, os02Flags); + } + + // + // Create and fill in the memory descriptor based on the current + // state of the OS descriptor. + // + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, + osCreateMemFromOsDescriptor(pGpu, + pUserParams->descriptor, + hClient, + os02Flags, + &limit, + &pMemDesc, + pUserParams->descriptorType, + pRmAllocParams->pSecInfo->privLevel)); + + if (!memdescGetContiguity(pMemDesc, AT_PA)) + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _PHYSICALITY, _NONCONTIGUOUS, os02Flags); + pUserParams->attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS, pUserParams->attr); + } + + // Allow user space mapping support only for FILE_HANDLE type OS_DESCRIPTOR + if (pUserParams->descriptorType == NVOS32_DESCRIPTOR_TYPE_OS_FILE_HANDLE) + { + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_ALLOW_EXT_SYSMEM_USER_CPU_MAPPING, NV_TRUE); + } + + if (pMemoryManager->bAllowSysmemHugePages && pMemDesc->bForceHugePages) + { + pUserParams->attr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _HUGE, pUserParams->attr); + pUserParams->attr2 = FLD_SET_DRF(OS32, _ATTR2, _PAGE_SIZE_HUGE, _DEFAULT, pUserParams->attr2); + } + + NV_CHECK_OK_OR_GOTO(status, LEVEL_INFO, + memSetGpuCacheSnoop(pGpu, pUserParams->attr, pMemDesc), failure); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_INFO, + memConstructCommon(pMemory, NV01_MEMORY_SYSTEM_OS_DESCRIPTOR, pUserParams->flags, + pMemDesc, 0, NULL, pUserParams->attr, pUserParams->attr2, 0, 0, + pUserParams->tag, (HWRESOURCE_INFO *)NULL), + failure); + + flags = FLD_SET_DRF(OS33, _FLAGS, _OS_DESCRIPTOR, _ENABLE, flags); + RS_CPU_MAP_PARAMS dummyParams; + portMemSet(&dummyParams, 0, sizeof(dummyParams)); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + refAddMapping(pResourceRef, &dummyParams, pResourceRef->pParentRef, &pCpuMapping), + failure); + + NV_ASSERT_OK_OR_GOTO(status, + CliUpdateMemoryMappingInfo(pCpuMapping, + pCallContext->secInfo.privLevel >= RS_PRIV_LEVEL_KERNEL, + pUserParams->descriptor, NvP64_NULL, + limit+1, flags), + failure); + pCpuMapping->pPrivate->pGpu = pGpu; + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + if (IS_VIRTUAL(pGpu)) + { + NV_RM_RPC_ALLOC_MEMORY(pGpu, + hClient, + hParent, + hMemory, + NV01_MEMORY_SYSTEM_OS_DESCRIPTOR, + os02Flags, + pMemDesc, + status); + NV_CHECK_OK_OR_GOTO(status, LEVEL_INFO, status, failure); + + pMemory->bRpcAlloc = NV_TRUE; + NV_ASSERT_OK_OR_GOTO(status, vgpuUpdateGuestSysmemPfnBitMap(pGpu, pMemDesc, NV_TRUE), failure); + } + + fbAllocPageFormat.flags = pUserParams->flags; + fbAllocPageFormat.type = pUserParams->type; + fbAllocPageFormat.attr = pUserParams->attr; + fbAllocPageFormat.attr2 = pUserParams->attr2; + + // memmgrChooseKind will select kind based on format + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + memmgrChooseKind_HAL(pGpu, pMemoryManager, &fbAllocPageFormat, + DRF_VAL(OS32, _ATTR, _COMPR, fbAllocPageFormat.attr), &kind), + failure); + + if (!memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_SUPPORTED, kind)) + { + NV_PRINTF(LEVEL_ERROR, + "memmgrChooseKind_HAL() return (%d) kind(%x).\n", + status, kind); + goto failure; + } + + memdescSetPteKind(pMemDesc, kind); + + return status; + +failure: + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + + return status; +} + +void +osdescDestruct_IMPL +( + OsDescMemory *pOsDescMemory +) +{ + Memory *pMemory = staticCast(pOsDescMemory, Memory); + OBJGPU *pGpu = pMemory->pGpu; + MEMORY_DESCRIPTOR *pMemDesc = pMemory->pMemDesc; + + if (pMemDesc->DupCount == 1) + { + if (pMemDesc->RefCount > 1) + { + NV_ASSERT_FAILED("Destroying memdesc but not all refs destroyed!\n"); + } + + if (IS_VIRTUAL(pGpu)) + { + NV_ASSERT_OR_RETURN_VOID(vgpuUpdateGuestSysmemPfnBitMap(pGpu, pMemDesc, NV_FALSE) == NV_OK); + } + } +} + +NvBool +osdescCanCopy_IMPL +( + OsDescMemory *pOsDescMemory +) +{ + // In case of MODS the caller is responsible for not freeing the memory. + return (RMCFG_FEATURE_PLATFORM_UNIX || RMCFG_FEATURE_PLATFORM_MODS); +} + diff --git a/src/nvidia/src/kernel/mem_mgr/standard_mem.c b/src/nvidia/src/kernel/mem_mgr/standard_mem.c new file mode 100644 index 0000000..f27f508 --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/standard_mem.c @@ -0,0 +1,235 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mem_mgr_internal.h" +#include "mem_mgr/standard_mem.h" +#include "vgpu/rpc.h" +#include "rmapi/client.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "virtualization/hypervisor/hypervisor.h" +#include "resserv/rs_server.h" +#include "rmapi/rs_utils.h" + +NV_STATUS stdmemValidateParams +( + OBJGPU *pGpu, + RmClient *pRmClient, + NV_MEMORY_ALLOCATION_PARAMS *pAllocData +) +{ + RS_PRIV_LEVEL privLevel; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM || RMCFG_FEATURE_PLATFORM_GSP, NV_ERR_NOT_SUPPORTED); + + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + privLevel = pCallContext->secInfo.privLevel; + + // + // Make sure UMD does not impact the internal allocation flags + // Do this check right after copy in. RM is free to set these flags later + // + if ((privLevel < RS_PRIV_LEVEL_KERNEL) && + (pAllocData->internalflags != 0)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // These flags don't do anything in this path. No mapping on alloc and + // kernel map is controlled by TYPE + // + pAllocData->flags |= NVOS32_ALLOC_FLAGS_MAP_NOT_REQUIRED; + pAllocData->flags &= ~NVOS32_ALLOC_FLAGS_KERNEL_MAPPING_MAP; + + pAllocData->address = NvP64_NULL; + + // + // check PAGE_OFFLINING flag for client + // If the client is not a ROOT client, then turning PAGE_OFFLINIG OFF is invalid + // + if (FLD_TEST_DRF(OS32, _ATTR2, _PAGE_OFFLINING, _OFF, pAllocData->attr2)) + { + if (!hypervisorIsVgxHyper()) + { + // if the client requesting is not kernel mode, return early +#if defined(DEBUG) || defined(DEVELOP) || defined(NV_VERIF_FEATURES) + if (!rmclientIsAdmin(pRmClient, privLevel)) +#else + if (privLevel < RS_PRIV_LEVEL_KERNEL) +#endif + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + } + } + + // + // Check if type implies ISO requirement and set _ISO attribute. + // MM-TODO: Eventually, we should decouple NVOS32_TYPE from conveying + // ISO behavior (Bug 1896562). + // + if ((pAllocData->type == NVOS32_TYPE_PRIMARY) || + (pAllocData->type == NVOS32_TYPE_VIDEO) || + (pAllocData->type == NVOS32_TYPE_CURSOR)) + { + pAllocData->attr2 = FLD_SET_DRF(OS32, _ATTR2, _ISO, _YES, + pAllocData->attr2); + } + + if (!(pAllocData->flags & NVOS32_ALLOC_FLAGS_USE_BEGIN_END)) + { + NV_ASSERT_OR_RETURN((pAllocData->rangeLo == 0) && + (pAllocData->rangeHi == 0), NV_ERR_INVALID_ARGUMENT); + } + NV_PRINTF(LEVEL_INFO, "MMU_PROFILER Attr 0x%x Type 0x%x Attr2 0x%x\n", + pAllocData->attr, pAllocData->type, pAllocData->attr2); + + // Make sure that encryption is supported if it is requested + if ((pAllocData->flags & NVOS32_ALLOC_FLAGS_TURBO_CIPHER_ENCRYPTED) && + DRF_VAL(OS32, _ATTR, _LOCATION, pAllocData->attr) == NVOS32_ATTR_LOCATION_VIDMEM) + { + NV_PRINTF(LEVEL_ERROR, + "Encryption requested for video memory on a non-0FB chip;\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + if (FLD_TEST_DRF(OS32, _ATTR2, _ALLOCATE_FROM_SUBHEAP, _YES, pAllocData->attr2)) + { + NV_CHECK_OR_RETURN(LEVEL_ERROR, FLD_TEST_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, pAllocData->attr), + NV_ERR_INVALID_ARGUMENT); + return NV_ERR_INVALID_ARGUMENT; + } + + // + // When a sparsified VA range is requested by the client, RM constructs + // the page tables during the VirtMem construct call causing the lazy + // flag to skip memory reservation. This can cause RM to OOM if the + // memPool reserved memory is exhausted. + // + if ((pAllocData->flags & NVOS32_ALLOC_FLAGS_SPARSE) && + (pAllocData->flags & NVOS32_ALLOC_FLAGS_VIRTUAL)) + { + pAllocData->flags = pAllocData->flags &~ NVOS32_ALLOC_FLAGS_LAZY; + } + + return NV_OK; +} + +void stdmemDumpInputAllocParams +( + NV_MEMORY_ALLOCATION_PARAMS *pAllocData, + CALL_CONTEXT *pCallContext +) +{ + NV_PRINTF(LEVEL_INFO, "stdmemConstruct input\n"); + NV_PRINTF(LEVEL_INFO, " Owner: 0x%x\n", pAllocData->owner); + NV_PRINTF(LEVEL_INFO, " hMemory: 0x%x\n", pCallContext->pResourceRef->hResource); + NV_PRINTF(LEVEL_INFO, " Type: 0x%x\n", pAllocData->type); + NV_PRINTF(LEVEL_INFO, " Flags: 0x%x\n", pAllocData->flags); + NV_PRINTF(LEVEL_INFO, " Begin: 0x%08llx\n", pAllocData->rangeLo); + NV_PRINTF(LEVEL_INFO, " End: 0x%08llx\n", pAllocData->rangeHi); + NV_PRINTF(LEVEL_INFO, " Height: 0x%x\n", pAllocData->height); + NV_PRINTF(LEVEL_INFO, " Width: 0x%x\n", pAllocData->width); + NV_PRINTF(LEVEL_INFO, " Pitch: 0x%x\n", pAllocData->pitch); + NV_PRINTF(LEVEL_INFO, " Size: 0x%08llx\n", pAllocData->size); + NV_PRINTF(LEVEL_INFO, " Alignment: 0x%08llx\n", pAllocData->alignment); + NV_PRINTF(LEVEL_INFO, " Offset: 0x%08llx\n", pAllocData->offset); + NV_PRINTF(LEVEL_INFO, " Attr: 0x%x\n", pAllocData->attr); + NV_PRINTF(LEVEL_INFO, " Attr2: 0x%x\n", pAllocData->attr2); + NV_PRINTF(LEVEL_INFO, " Format: 0x%x\n", pAllocData->format); + NV_PRINTF(LEVEL_INFO, " ComprCovg: 0x%x\n", pAllocData->comprCovg); + NV_PRINTF(LEVEL_INFO, " ZCullCovg: 0x%x\n", pAllocData->zcullCovg); + NV_PRINTF(LEVEL_INFO, " CtagOffset: 0x%x\n", pAllocData->ctagOffset); + NV_PRINTF(LEVEL_INFO, " hVASpace: 0x%x\n", pAllocData->hVASpace); + NV_PRINTF(LEVEL_INFO, " tag: 0x%x\n", pAllocData->tag); +} + +void stdmemDumpOutputAllocParams +( + NV_MEMORY_ALLOCATION_PARAMS *pAllocData +) +{ + NV_PRINTF(LEVEL_INFO, "stdmemConstruct output\n"); + NV_PRINTF(LEVEL_INFO, " Height: 0x%x\n", pAllocData->height); + NV_PRINTF(LEVEL_INFO, " Width: 0x%x\n", pAllocData->width); + NV_PRINTF(LEVEL_INFO, " Pitch: 0x%x\n", pAllocData->pitch); + NV_PRINTF(LEVEL_INFO, " Size: 0x%08llx\n", pAllocData->size); + NV_PRINTF(LEVEL_INFO, " Alignment: 0x%08llx\n", pAllocData->alignment); + NV_PRINTF(LEVEL_INFO, " Offset: 0x%08llx\n", pAllocData->offset); + NV_PRINTF(LEVEL_INFO, " Attr: 0x%x\n", pAllocData->attr); + NV_PRINTF(LEVEL_INFO, " Attr2: 0x%x\n", pAllocData->attr2); + NV_PRINTF(LEVEL_INFO, " Format: 0x%x\n", pAllocData->format); + NV_PRINTF(LEVEL_INFO, " ComprCovg: 0x%x\n", pAllocData->comprCovg); + NV_PRINTF(LEVEL_INFO, " ZCullCovg: 0x%x\n", pAllocData->zcullCovg); +} + +NV_STATUS +stdmemConstruct_IMPL +( + StandardMemory *pStandardMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + + +NvBool stdmemCanCopy_IMPL(StandardMemory *pStandardMemory) +{ + return NV_TRUE; +} + +/*! + * stdmemQueryPageSize + * + * @brief + * Returns page size requested by client. + * + * @param[in] pMemoryManager MemoryManager pointer + * @param[in] hClient Client handle. + * @param[in] pAllocData Pointer to VIDHEAP_ALLOC_DATA + * + * @returns + * The page size in bytes. + */ +NvU64 +stdmemQueryPageSize +( + MemoryManager *pMemoryManager, + NvHandle hClient, + NV_MEMORY_ALLOCATION_PARAMS *pAllocData +) +{ + NvU32 retAttr = pAllocData->attr; + NvU32 retAttr2 = pAllocData->attr2; + + return memmgrDeterminePageSize(pMemoryManager, hClient, pAllocData->size, + pAllocData->format, pAllocData->flags, &retAttr, &retAttr2); +} + +NvU64 stdmemGetSysmemPageSize_IMPL(OBJGPU * pGpu, StandardMemory *pStdMemory) +{ + return GPU_GET_MEMORY_MANAGER(pGpu)->sysmemPageSize; +} diff --git a/src/nvidia/src/kernel/mem_mgr/syncpoint_mem.c b/src/nvidia/src/kernel/mem_mgr/syncpoint_mem.c new file mode 100644 index 0000000..1cfaea6 --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/syncpoint_mem.c @@ -0,0 +1,114 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/gpu.h" +#include "mem_mgr_internal.h" +#include "mem_mgr/syncpoint_mem.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "os/os.h" +#include "rmapi/client.h" + +#include "class/cl00c3.h" // NV01_MEMORY_SYNCPOINT + +NV_STATUS +syncpointConstruct_IMPL +( + SyncpointMemory *pSyncpointMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pRmAllocParams +) +{ + Memory *pMemory = staticCast(pSyncpointMemory, Memory); + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NvP64 physAddr = NvP64_NULL; + NvU64 syncPointBase = 0; + NvU64 limit = 0; + NvU32 offset = 0; + NV_MEMORY_SYNCPOINT_ALLOCATION_PARAMS *pAllocParams = pRmAllocParams->pAllocParams; + NV_STATUS status = NV_OK; + OBJGPU *pGpu = pMemory->pGpu; + + // OS get sync-point aperture address. + status = osGetSyncpointAperture(pGpu->pOsGpuInfo, pAllocParams->syncpointId, &syncPointBase, &limit, &offset); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to get syncpoint aperture %x\n", status); + return status; + } + + physAddr = (NvP64)(syncPointBase + offset); + + NvU32 os02Flags = + DRF_DEF(OS02, _FLAGS, _MAPPING, _NO_MAP) + | DRF_DEF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS) + | DRF_DEF(OS02, _FLAGS, _ALLOC_TYPE_SYNCPOINT, _APERTURE) + | DRF_DEF(OS02, _FLAGS, _ALLOC_NISO_DISPLAY, _YES); + + status = osCreateMemFromOsDescriptor(pGpu, + physAddr, + pCallContext->pClient->hClient, + os02Flags, + &limit, + &pMemDesc, + NVOS32_DESCRIPTOR_TYPE_OS_PHYS_ADDR, + RS_PRIV_LEVEL_KERNEL); // Physical address is obtained using osGetSyncpointAperture, Overriding the privlevel here to KERNEL. + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to import syncpoint memory %x\n", status); + return status; + } + + status = memConstructCommon(pMemory, + NV01_MEMORY_SYNCPOINT, + 0, // pUserParams->flags + pMemDesc, + 0, + NULL, + 0, // pUserParams->attr + 0, // pUserParams->attr2 + 0, + 0, + NVOS32_MEM_TAG_NONE, + NULL); + + + // failure case + if (status != NV_OK) + { + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + } + + return status; +} + +NvBool +syncpointCanCopy_IMPL +( + SyncpointMemory *pSyncpointMemory +) +{ + return NV_TRUE; +} diff --git a/src/nvidia/src/kernel/mem_mgr/system_mem.c b/src/nvidia/src/kernel/mem_mgr/system_mem.c new file mode 100644 index 0000000..56afabc --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/system_mem.c @@ -0,0 +1,761 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mem_mgr/system_mem.h" +#include "vgpu/rpc.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "core/locks.h" +#include "os/os.h" +#include "rmapi/client.h" +#include "deprecated/rmapi_deprecated.h" +#include "gpu/mem_mgr/mem_utils.h" +#include "core/system.h" +#include "ctrl/ctrl0000/ctrl0000gpu.h" + +#include "class/cl003e.h" // NV01_MEMORY_SYSTEM + +static NvU64 +_sysmemGetNextSmallerPageSize +( + OBJGPU *pGpu, + NvU32 *pAttr, + NvU32 *pAttr2 +) +{ + NvU64 pageSize = 0ULL; + NvU64 nextPageSize; + + switch (DRF_VAL(OS32, _ATTR, _PAGE_SIZE, *pAttr)) + { + case NVOS32_ATTR_PAGE_SIZE_4KB: + pageSize = RM_PAGE_SIZE; + break; + case NVOS32_ATTR_PAGE_SIZE_BIG: + pageSize = RM_PAGE_SIZE_64K; + break; + case NVOS32_ATTR_PAGE_SIZE_HUGE: + switch (DRF_VAL(OS32, _ATTR2, _PAGE_SIZE_HUGE, *pAttr2)) + { + case NVOS32_ATTR2_PAGE_SIZE_HUGE_DEFAULT: + case NVOS32_ATTR2_PAGE_SIZE_HUGE_2MB: + pageSize = RM_PAGE_SIZE_2M; + break; + case NVOS32_ATTR2_PAGE_SIZE_HUGE_512MB: + pageSize = RM_PAGE_SIZE_512M; + break; + case NVOS32_ATTR2_PAGE_SIZE_HUGE_256GB: + pageSize = RM_PAGE_SIZE_256G; + } + break; + default: + NV_PRINTF(LEVEL_ERROR, "Invalid page size attribute: 0x%x\n", DRF_VAL(OS32, _ATTR, _PAGE_SIZE, *pAttr)); + return 0ULL; + } + + // Reached smallest page size. Return 0 to stop retrying. + if (pageSize == osGetPageSize()) + { + return 0ULL; + } + + switch (pageSize) + { + case RM_PAGE_SIZE_64K: + case RM_PAGE_SIZE_128K: + nextPageSize = RM_PAGE_SIZE; + *pAttr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _4KB, *pAttr); + break; + case RM_PAGE_SIZE_2M: + nextPageSize = RM_PAGE_SIZE_64K; + *pAttr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _BIG, *pAttr); + break; + case RM_PAGE_SIZE_512M: + nextPageSize = RM_PAGE_SIZE_2M; + *pAttr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _HUGE, *pAttr); + *pAttr2 = FLD_SET_DRF(OS32, _ATTR2, _PAGE_SIZE_HUGE, _2MB, *pAttr2); + break; + case RM_PAGE_SIZE_256G: + nextPageSize = RM_PAGE_SIZE_512M; + *pAttr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _HUGE, *pAttr); + *pAttr2 = FLD_SET_DRF(OS32, _ATTR2, _PAGE_SIZE_HUGE, _512MB, *pAttr2); + break; + default: + NV_PRINTF(LEVEL_ERROR, "Invalid page size: 0x%llx\n", pageSize); + return 0ULL; + } + + return nextPageSize; +} + +/*! + * sysmemConstruct + * + * @brief + * This routine provides common allocation services used by the + * following heap allocation functions: + * NVOS32_FUNCTION_ALLOC_SIZE + * NVOS32_FUNCTION_ALLOC_SIZE_RANGE + * NVOS32_FUNCTION_ALLOC_TILED_PITCH_HEIGHT + * + * @param[in] pSystemMemory Pointer to SystemMemory object + * @param[in] pCallContext Pointer to the current CALL_CONTEXT. + * @param[in] pParams Pointer to the alloc params + * + * @return 'NV_OK' + * Operation completed successfully. + * @return 'NV_ERR_NO_MEMORY' + * There is not enough available memory to satisfy allocation request. + * @return 'NV_ERR_INSUFFICIENT_RESOURCES' + * Not enough available resources to satisfy allocation request. + */ +NV_STATUS +sysmemConstruct_IMPL +( + SystemMemory *pSystemMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + Memory *pMemory = staticCast(pSystemMemory, Memory); + NV_MEMORY_ALLOCATION_PARAMS *pAllocData = pParams->pAllocParams; + MEMORY_ALLOCATION_REQUEST allocRequest = {0}; + MEMORY_ALLOCATION_REQUEST *pAllocRequest = &allocRequest; + OBJGPU *pGpu = pMemory->pGpu; + HWRESOURCE_INFO hwResource = {0}; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + NV_STATUS rmStatus = NV_OK; + NvHandle hClient = pCallContext->pClient->hClient; + RmClient *pRmClient = dynamicCast(pCallContext->pClient, RmClient); + NvHandle hParent = pCallContext->pResourceRef->pParentRef->hResource; + NvU64 sizeOut; + NvU64 offsetOut; + MEMORY_DESCRIPTOR *pMemDesc; + NvU32 flags; + RM_ATTR_PAGE_SIZE pageSizeAttr; + NvBool bRetry = NV_FALSE; + + NV_ASSERT_OR_RETURN(pRmClient != NULL, NV_ERR_INVALID_CLIENT); + + if (RMCFG_FEATURE_PLATFORM_GSP && !pGpu->getProperty(pGpu, PDB_PROP_GPU_ZERO_FB)) + { + NV_ASSERT_FAILED("System memory can't be allocated on GSP without 0FB"); + return NV_ERR_NOT_SUPPORTED; + } + + // Copy-construction has already been done by the base Memory class + if (RS_IS_COPY_CTOR(pParams)) + { + return NV_OK; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, stdmemValidateParams(pGpu, pRmClient, pAllocData)); + NV_CHECK_OR_RETURN(LEVEL_ERROR, + DRF_VAL(OS32, _ATTR, _LOCATION, pAllocData->attr) != NVOS32_ATTR_LOCATION_VIDMEM && + !(pAllocData->flags & NVOS32_ALLOC_FLAGS_VIRTUAL), + NV_ERR_INVALID_ARGUMENT); + + stdmemDumpInputAllocParams(pAllocData, pCallContext); + + // send it through the regular allocator even though it is for sysmem + pAllocRequest->classNum = NV01_MEMORY_SYSTEM; + pAllocRequest->pUserParams = pAllocData; + pAllocRequest->hMemory = pResourceRef->hResource; + pAllocRequest->hClient = hClient; + pAllocRequest->hParent = hParent; + pAllocRequest->pGpu = pGpu; + pAllocRequest->internalflags = NVOS32_ALLOC_INTERNAL_FLAGS_CLIENTALLOC; + pAllocRequest->pHwResource = &hwResource; + + // Unsure if we need to keep separate copies, but keeping old behavior for now. + sizeOut = pAllocData->size; + offsetOut = pAllocData->offset; + + { + } + + // + // Enable retrying the allocation only if the page size is default. + // Each new attempt decreases the page size to the next supported RM value + // until the minimum OS granularity is reached. + // + if (FLD_TEST_DRF(OS32, _ATTR, _PAGE_SIZE, _DEFAULT, pAllocData->attr) && + (GPU_GET_MEMORY_MANAGER(pGpu)->bSysmemPageSizeDefaultAllowLargePages)) + { + bRetry = NV_TRUE; + } + + do + { + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, + sysmemInitAllocRequest(pGpu, pSystemMemory, pAllocRequest)); + + // Memdesc should be allocated by now. + NV_ASSERT_OR_RETURN(pAllocRequest->pMemDesc, NV_ERR_INVALID_STATE); + pMemDesc = pAllocRequest->pMemDesc; + + // Copy final heap size back to client struct + sizeOut = pMemDesc->Size; + pAllocData->limit = sizeOut - 1; + + memSetSysmemCacheAttrib(pGpu, pAllocData, pMemDesc); + + // GPU cache snooping is a property of fully coherent platforms. + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, + memSetGpuCacheSnoop(pGpu, pAllocData->attr, pMemDesc)); + + ct_assert(NVOS32_ATTR_COHERENCY_UNCACHED == NVOS02_FLAGS_COHERENCY_UNCACHED); + ct_assert(NVOS32_ATTR_COHERENCY_CACHED == NVOS02_FLAGS_COHERENCY_CACHED); + ct_assert(NVOS32_ATTR_COHERENCY_WRITE_COMBINE == NVOS02_FLAGS_COHERENCY_WRITE_COMBINE); + ct_assert(NVOS32_ATTR_COHERENCY_WRITE_THROUGH == NVOS02_FLAGS_COHERENCY_WRITE_THROUGH); + ct_assert(NVOS32_ATTR_COHERENCY_WRITE_PROTECT == NVOS02_FLAGS_COHERENCY_WRITE_PROTECT); + ct_assert(NVOS32_ATTR_COHERENCY_WRITE_BACK == NVOS02_FLAGS_COHERENCY_WRITE_BACK); + + flags = DRF_DEF(OS02, _FLAGS, _LOCATION, _PCI) | + DRF_DEF(OS02, _FLAGS, _MAPPING, _NO_MAP) | + DRF_NUM(OS02, _FLAGS, _COHERENCY, DRF_VAL(OS32, _ATTR, _COHERENCY, pAllocData->attr)); + + NV_ASSERT(memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM); + + if (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL) + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_KERNEL_MODE, NV_FALSE); + + if (pAllocData->flags & NVOS32_ALLOC_FLAGS_TURBO_CIPHER_ENCRYPTED) + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_ENCRYPTED, NV_TRUE); + + if (FLD_TEST_DRF(OS32, _ATTR2, _NISO_DISPLAY, _YES, pAllocData->attr2)) + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO, NV_TRUE); + + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_SYSMEM_OWNED_BY_CLIENT, NV_TRUE); + + if (FLD_TEST_DRF(OS32, _ATTR2, _FIXED_NUMA_NODE_ID, _YES, pAllocData->attr2)) + { + + if ((pGpu->cpuNumaNodeId != NV0000_CTRL_NO_NUMA_NODE) && + (pAllocData->numaNode != pGpu->cpuNumaNodeId)) + { + NV_PRINTF(LEVEL_ERROR, "NUMA node mismatch. Requested node: %u CPU node: %u\n", + pAllocData->numaNode, pGpu->cpuNumaNodeId); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto failed_destroy_memdesc; + } + + memdescSetNumaNode(pMemDesc, pAllocData->numaNode); + } + + pageSizeAttr = dmaNvos32ToPageSizeAttr(pAllocData->attr, pAllocData->attr2); + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_INFO, + memmgrSetMemDescPageSize_HAL(pGpu, GPU_GET_MEMORY_MANAGER(pGpu), pMemDesc, + AT_GPU, pageSizeAttr), + failed_destroy_memdesc); + + memdescTagAlloc(rmStatus, NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_132, pMemDesc); + if (rmStatus != NV_OK) + { + if (bRetry) + { + NvU64 pageSize; + pageSize = _sysmemGetNextSmallerPageSize(pGpu, &pAllocData->attr, &pAllocData->attr2); + if (pageSize == 0) + { + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, rmStatus, failed_destroy_memdesc); + } + NV_PRINTF(LEVEL_INFO, "Sysmem alloc failed, retrying with page size 0x%llx.\n", pageSize); + + // Freeing memdescs to avoid leaks on retry. + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + + pAllocRequest->pMemDesc = pMemDesc = NULL; + } + else + { + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, rmStatus, failed_destroy_memdesc); + } + } + else + { + // Got a valid allocation, set retry to false. + bRetry = NV_FALSE; + } + } while (bRetry); + + // ClientDB can set the pagesize for memdesc. + // With GPU SMMU mapping, this needs to be set on the SMMU memdesc. + // So SMMU allocation should happen before memConstructCommon() + // Eventually SMMU allocation will be part of memdescAlloc(). + + // An SMMU mapping will be added to SYSMEM allocations in the following cases: + // 1. BIG page allocations with non-contiguous SYSMEM in Tegra. + // 2. RM clients forcing SMMU mapping via flags. + // GPU Arch verification with VPR is one such usecase. + // + // fbAlloc_GF100() will set the page size attribute to BIG for these cases. + + NV_ASSERT_OR_ELSE(!FLD_TEST_DRF(OS32, _ATTR2, _SMMU_ON_GPU, _ENABLE, pAllocData->attr2), + rmStatus = NV_ERR_NOT_SUPPORTED; goto failed_free_memdesc); + + if ((FLD_TEST_DRF(OS32, _ATTR, _PAGE_SIZE, _BIG, pAllocData->attr) || + FLD_TEST_DRF(OS32, _ATTR, _PAGE_SIZE, _HUGE, pAllocData->attr)) && + FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS, pAllocData->attr)) + { + NvBool bLargePageNonContigAllocSupported; + + // + // The below checks verify whether a non-contig large page size sysmem request is supported by + // the platform. In the MODS case we can only support large page size sysmem requests + // if MODS supplied the sysmem page size regkey that is >4K. + // This check could be performed better, but avoiding a change in MODS behavior for now. + // + if (RMCFG_FEATURE_PLATFORM_UNIX) + { + bLargePageNonContigAllocSupported = NV_TRUE; + } + else if (RMCFG_FEATURE_MODS_FEATURES) + { + StandardMemory *pStdMemory = staticCast(pSystemMemory, StandardMemory); + + if (stdmemGetSysmemPageSize_HAL(pGpu, pStdMemory) > RM_PAGE_SIZE) + { + bLargePageNonContigAllocSupported = NV_TRUE; + } + else + { + bLargePageNonContigAllocSupported = NV_FALSE; + } + } + else + { + bLargePageNonContigAllocSupported = NV_FALSE; + } + + NV_CHECK_OR_ELSE(LEVEL_ERROR, bLargePageNonContigAllocSupported, + rmStatus = NV_ERR_NOT_SUPPORTED; goto failed_free_memdesc); + } + + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_INFO, + memConstructCommon(pMemory, pAllocRequest->classNum, flags, pMemDesc, 0, + NULL, pAllocData->attr, pAllocData->attr2, 0, 0, + pAllocData->tag, &hwResource), + failed_free_memdesc); + + // + // We need to force a kernel mapping of system memory-backed notifiers + // allocated in this path. + // + if (pAllocData->type == NVOS32_TYPE_NOTIFIER) + { + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + memCreateKernelMapping(pMemory, NV_PROTECT_READ_WRITE, NV_FALSE), + failed_destruct_common); + } + + if (IS_VIRTUAL(pGpu)) + { + NvU32 os02Flags; + NvU32 os32Flags = pAllocData->flags; + + // NVOS32_TYPE_NOTIFIER notifier indicates kernel mapping in this path + if (pAllocData->type == NVOS32_TYPE_NOTIFIER) + os32Flags |= NVOS32_ALLOC_FLAGS_KERNEL_MAPPING_MAP; + + // + // Calculate os02flags as VGPU plugin allocates sysmem with legacy + // RmAllocMemory API + // + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_INFO, + RmDeprecatedConvertOs32ToOs02Flags(pAllocData->attr, + pAllocData->attr2, + os32Flags, + &os02Flags), + failed_destruct_common); + + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + NV_RM_RPC_ALLOC_MEMORY(pGpu, + hClient, + hParent, + pAllocRequest->hMemory, + pAllocRequest->classNum, + os02Flags, + pMemDesc, + rmStatus); + + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, rmStatus, failed_destruct_common); + + pMemory->bRpcAlloc = NV_TRUE; + } + + pAllocData->size = sizeOut; + pAllocData->offset = offsetOut; + + stdmemDumpOutputAllocParams(pAllocData); + + return rmStatus; + +// Resource cleanup on failure +failed_destruct_common: + memDestructCommon(pMemory); +failed_free_memdesc: + memdescFree(pMemDesc); +failed_destroy_memdesc: + memdescDestroy(pMemDesc); + + return rmStatus; +} + +NV_STATUS +sysmemCtrlCmdGetSurfaceNumPhysPages_IMPL +( + SystemMemory *pSystemMemory, + NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS *pParams +) +{ + Memory *pMemory = staticCast(pSystemMemory, Memory); + NV_STATUS status; + + NV_ASSERT_OR_RETURN(memdescGetAddressSpace(pMemory->pMemDesc) == ADDR_SYSMEM, NV_ERR_NOT_SUPPORTED); + + status = osGetNumMemoryPages(pMemory->pMemDesc, + &pParams->numPages); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to get sysmem pages\n"); + } + + return status; +} + +NV_STATUS +sysmemCtrlCmdGetSurfacePhysPages_IMPL +( + SystemMemory *pSystemMemory, + NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS *pParams +) +{ + Memory *pMemory = staticCast(pSystemMemory, Memory); + NV_STATUS status; + + NV_ASSERT_OR_RETURN(memdescGetAddressSpace(pMemory->pMemDesc) == ADDR_SYSMEM, NV_ERR_NOT_SUPPORTED); + + status = osGetMemoryPages(pMemory->pMemDesc, + NvP64_VALUE(pParams->pPages), + &pParams->numPages); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to get sysmem pages\n"); + } + + return status; +} + +NV_STATUS +sysmemInitAllocRequest_SOC +( + OBJGPU *pGpu, + SystemMemory *pSystemMemory, + MEMORY_ALLOCATION_REQUEST *pAllocRequest +) +{ + NV_MEMORY_ALLOCATION_PARAMS *pAllocParams = pAllocRequest->pUserParams; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NvBool bAllocedMemDesc = NV_FALSE; + NvBool bContig = NV_TRUE; + NvU32 localAttr = pAllocParams->attr; + NvU32 localAttr2 = pAllocParams->attr2; + NvU64 pageSize = 0; + NV_STATUS status = NV_OK; + + // Check for valid size. + NV_CHECK_OR_RETURN(LEVEL_ERROR, pAllocParams->size != 0, NV_ERR_INVALID_ARGUMENT); + + // Ensure a valid allocation pAllocParams->type was passed in + NV_CHECK_OR_RETURN(LEVEL_ERROR,(pAllocParams->type < NVOS32_NUM_MEM_TYPES), NV_ERR_INVALID_ARGUMENT); + + // If vidmem not requested explicitly, decide on the physical location. + if (FLD_TEST_DRF(OS32, _ATTR, _LOCATION, _PCI, localAttr) || + FLD_TEST_DRF(OS32, _ATTR, _LOCATION, _ANY, localAttr)) + { + + localAttr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, + _PCI, localAttr); + } + + if (pAllocParams->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) + { + NV_PRINTF(LEVEL_ERROR, + "Fixed allocation on sysmem not allowed.\n"); + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + + CLEAR_HAL_ATTR(localAttr) + CLEAR_HAL_ATTR2(localAttr2) + + bContig = FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS, localAttr); + + // Allocate a memory descriptor if needed. + if (pAllocRequest->pMemDesc == NULL) + { + NV_ASSERT_OK_OR_GOTO(status, + memdescCreate(&pAllocRequest->pMemDesc, pGpu, pAllocParams->size, 0, + bContig, ADDR_SYSMEM, NV_MEMORY_UNCACHED, MEMDESC_FLAGS_SKIP_RESOURCE_COMPUTE), + failed); + bAllocedMemDesc = NV_TRUE; + } + + pMemDesc = pAllocRequest->pMemDesc; + + // Set attributes tracked by the memdesc + memdescSetPteKind(pMemDesc, pAllocParams->format); + memdescSetHwResId(pMemDesc, 0); // hwResId is 0. + + // update contiguity attribute to reflect memdesc + if (memdescGetContiguity(pAllocRequest->pMemDesc, AT_GPU)) + { + localAttr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, + _CONTIGUOUS, localAttr); + } + else + { + localAttr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, + _NONCONTIGUOUS, localAttr); + } + + // Select the sysmem alloc page size. + pageSize = memmgrDeterminePageSize(GPU_GET_MEMORY_MANAGER(pGpu), + pAllocRequest->hClient, + pAllocParams->size, + pAllocParams->format, + pAllocParams->flags, + &localAttr, &localAttr2); + if (!IsAMODEL(pGpu) && pageSize == 0) + { + status = NV_ERR_INVALID_STATE; + NV_PRINTF(LEVEL_ERROR, "memmgrDeterminePageSize failed, status: 0x%x\n", status); + goto failed; + } + + pAllocParams->attr = localAttr; + pAllocParams->attr2 = localAttr2; + pAllocParams->offset = ~0ULL; + + return NV_OK; + +failed: + if (bAllocedMemDesc) + { + memdescDestroy(pAllocRequest->pMemDesc); + pAllocRequest->pMemDesc = NULL; + } + + return status; +} + +NV_STATUS +sysmemInitAllocRequest_HMM +( + OBJGPU *pGpu, + SystemMemory *pSystemMemory, + MEMORY_ALLOCATION_REQUEST *pAllocRequest +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + FB_ALLOC_INFO *pFbAllocInfo = NULL; + FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat = NULL; + NV_STATUS status = NV_OK; + + pFbAllocInfo = portMemAllocNonPaged(sizeof(FB_ALLOC_INFO)); + NV_ASSERT_TRUE_OR_GOTO(status, pFbAllocInfo != NULL, NV_ERR_NO_MEMORY, done); + + pFbAllocPageFormat = portMemAllocNonPaged(sizeof(FB_ALLOC_PAGE_FORMAT)); + NV_ASSERT_TRUE_OR_GOTO(status, pFbAllocPageFormat != NULL, NV_ERR_NO_MEMORY, done); + + portMemSet(pFbAllocInfo, 0, sizeof(FB_ALLOC_INFO)); + portMemSet(pFbAllocPageFormat, 0, sizeof(FB_ALLOC_PAGE_FORMAT)); + pFbAllocInfo->pageFormat = pFbAllocPageFormat; + + memUtilsInitFBAllocInfo(pAllocRequest->pUserParams, pFbAllocInfo, pAllocRequest->hClient, pAllocRequest->hParent); + + NV_ASSERT_OK_OR_GOTO(status, + memmgrAllocResources(pGpu, pMemoryManager, pAllocRequest, pFbAllocInfo), + done); + + NV_ASSERT_OK_OR_GOTO(status, + sysmemAllocResources(pGpu, pMemoryManager, pAllocRequest, pFbAllocInfo, pSystemMemory), + done); + +done: + portMemFree(pFbAllocPageFormat); + portMemFree(pFbAllocInfo); + + return status; +} + +NV_STATUS +sysmemAllocResources +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + MEMORY_ALLOCATION_REQUEST *pAllocRequest, + FB_ALLOC_INFO *pFbAllocInfo, + SystemMemory *pSystemMemory +) +{ + NV_STATUS status = NV_OK; + NV_MEMORY_ALLOCATION_PARAMS *pVidHeapAlloc = pAllocRequest->pUserParams; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NvBool bAllocedMemDesc = NV_FALSE; + NvBool bContig = FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY, + _CONTIGUOUS, pVidHeapAlloc->attr); + // + // BUG 3506666 + // While replaying a trace, it is possible for the playback OS to have a smaller page size + // than the capture OS so if we're running a replay where the requested page size is larger, + // assume this is a contiguous piece of memory, if contiguity is not specified. + // + if (RMCFG_FEATURE_PLATFORM_MODS && FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY, _DEFAULT, pVidHeapAlloc->attr)) + { + if ((FLD_TEST_DRF(OS32, _ATTR, _PAGE_SIZE, _BIG, pVidHeapAlloc->attr) || + FLD_TEST_DRF(OS32, _ATTR, _PAGE_SIZE, _HUGE, pVidHeapAlloc->attr)) && + (stdmemGetSysmemPageSize_HAL(pGpu, staticCast(pSystemMemory, StandardMemory)) == RM_PAGE_SIZE)) + { + bContig = NV_TRUE; + } + } + + // + // Check for virtual-only parameters used on physical allocs. + // + if (pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_VIRTUAL_ONLY) + { + NV_PRINTF(LEVEL_ERROR, + "Virtual-only flag used with physical allocation\n"); + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + if (FLD_TEST_DRF(OS32, _ATTR2, _32BIT_POINTER, _ENABLE, pVidHeapAlloc->attr2)) + { + NV_PRINTF(LEVEL_ERROR, + "Virtual-only 32-bit pointer attr used with physical allocation\n"); + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + if (pVidHeapAlloc->hVASpace != 0) + { + NV_PRINTF(LEVEL_ERROR, + "VA space handle used with physical allocation\n"); + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + + NV_ASSERT(!(pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_WPR1) && !(pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_WPR2)); + + if (pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) + { + NV_PRINTF(LEVEL_ERROR, + "Expected fixed address allocation\n"); + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, memUtilsAllocMemDesc(pGpu, pAllocRequest, pFbAllocInfo, &pMemDesc, NULL, + ADDR_SYSMEM, bContig, &bAllocedMemDesc), failed); + + // get possibly updated surface attributes + pVidHeapAlloc->attr = pFbAllocInfo->retAttr; + pVidHeapAlloc->attr2 = pFbAllocInfo->retAttr2; + + // update contiguity attribute to reflect memdesc + if (memdescGetContiguity(pAllocRequest->pMemDesc, AT_GPU)) + { + pVidHeapAlloc->attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, + _CONTIGUOUS, + pVidHeapAlloc->attr); + } + else + { + pVidHeapAlloc->attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, + _NONCONTIGUOUS, + pVidHeapAlloc->attr); + } + + pVidHeapAlloc->offset = pFbAllocInfo->offset; + + if (pAllocRequest->pHwResource != NULL) + { + pAllocRequest->pHwResource->attr = pFbAllocInfo->retAttr; + pAllocRequest->pHwResource->attr2 = pFbAllocInfo->retAttr2; + pAllocRequest->pHwResource->hwResId = pFbAllocInfo->hwResId; + pAllocRequest->pHwResource->comprCovg = pFbAllocInfo->comprCovg; + pAllocRequest->pHwResource->ctagOffset = pFbAllocInfo->ctagOffset; + pAllocRequest->pHwResource->hwResId = pFbAllocInfo->hwResId; + } + + return NV_OK; + +failed: + + if (bAllocedMemDesc) + { + memdescDestroy(pAllocRequest->pMemDesc); + pAllocRequest->pMemDesc = NULL; + } + + return status; +} + +void sysmemDestruct_IMPL(SystemMemory *pSystemMemory) +{ + Memory *pMemory = staticCast(pSystemMemory, Memory); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pMemory); + NvHandle hParent = RES_GET_PARENT_HANDLE(pMemory); + NvHandle hMemory = RES_GET_HANDLE(pMemory); + MEMORY_DESCRIPTOR *pMemDesc = pMemory->pMemDesc; + OBJGPU *pGpu = pMemDesc->pGpu; + + if (pMemDesc == NULL || pMemDesc->DupCount > 1) + return; + + memDestructCommon(pMemory); + + memdescFree(pMemDesc); + + memdescDestroy(pMemDesc); + + // if the allocation is RPC-ed, free using RPC + if (pMemory->bRpcAlloc && (IS_VIRTUAL(pGpu) || IS_FW_CLIENT(pGpu))) + { + NV_STATUS status = NV_OK; + + NV_RM_RPC_FREE(pGpu, hClient, hParent, hMemory, status); + NV_ASSERT((status == NV_OK) || (status == NV_ERR_GPU_IN_FULLCHIP_RESET)); + } +} diff --git a/src/nvidia/src/kernel/mem_mgr/vaspace.c b/src/nvidia/src/kernel/mem_mgr/vaspace.c new file mode 100644 index 0000000..b1efae9 --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/vaspace.c @@ -0,0 +1,233 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Routines ***************************\ +* Virtual Address Space Function Definitions. * +\***************************************************************************/ + + +#include "mem_mgr/vaspace.h" +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" +#include "rmapi/rs_utils.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" + +void +vaspaceIncRefCnt_IMPL(OBJVASPACE *pVAS) +{ + pVAS->refCnt++; +} + +void +vaspaceDecRefCnt_IMPL(OBJVASPACE *pVAS) +{ + NV_ASSERT_OR_RETURN_VOID(pVAS->refCnt != 0); + pVAS->refCnt--; +} + +NV_STATUS +vaspaceFillAllocParams_IMPL +( + OBJVASPACE *pVAS, + const FB_ALLOC_INFO *pAllocInfo, + NvU64 *pSize, + NvU64 *pAlign, + NvU64 *pRangeLo, + NvU64 *pRangeHi, + NvU64 *pPageSizeLockMask, + VAS_ALLOC_FLAGS *pFlags +) +{ + NvBool bRestrictedVaRange = NV_FALSE; + NvBool bEnforce32bitPtr = NV_FALSE; + NvU32 vasFlags = vaspaceGetFlags(pVAS); + + // Apply default alignment policies to offset alignment and size. + NV_ASSERT_OK_OR_RETURN( + vaspaceApplyDefaultAlignment(pVAS, pAllocInfo, pAlign, pSize, + pPageSizeLockMask)); + + pFlags->bClientAllocation = !!(pAllocInfo->internalflags & NVOS32_ALLOC_INTERNAL_FLAGS_CLIENTALLOC); + + if (pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) + { + // Fixed address allocation implemented by restricting range. + *pRangeLo = pAllocInfo->offset; + *pRangeHi = pAllocInfo->offset + *pSize - 1; + } + else if (!(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_USE_BEGIN_END)) + { + // If user didn't specify fixed or restricted range, allow full VAS range. + *pRangeLo = vaspaceGetVaStart(pVAS); + *pRangeHi = vaspaceGetVaLimit(pVAS); + + // + // For MODS we also allow restricting the range to 40 bits by default. + // This is needed for Pascal 49b support where some HW units can only + // access 40b VA. MODS must use range/fixed address allocations to + // get a VA above 40 bits in this mode. + // + if (bRestrictedVaRange && !(vasFlags & VASPACE_FLAGS_FLA)) + { + *pRangeHi = NV_MIN(*pRangeHi, NVBIT64(40) - 1); + } + } + + // + // Handle 32bit pointer requests. 32b pointers are forced below 32b + // on all chips. Non-32b requests are only forced on some chips, + // typically kepler, and only if there are no other address hints. + // + // If requested size cannot be satisfied with range above 4 GB, then relax that + // restriction. + // + if (FLD_TEST_DRF(OS32, _ATTR2, _32BIT_POINTER, _ENABLE, pAllocInfo->pageFormat->attr2)) + { + *pRangeHi = NV_MIN(*pRangeHi, NVBIT64(32) - 1); + } + + else if (bEnforce32bitPtr && + !(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) && + !(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_USE_BEGIN_END) && + ((*pRangeHi - *pRangeLo + 1 - *pSize) > NVBIT64(32)) && + !(vasFlags & VASPACE_FLAGS_FLA)) + { + *pRangeLo = NV_MAX(*pRangeLo, NVBIT64(32)); + } + + if ((*pRangeHi - *pRangeLo + 1) < *pSize) // Moved the range check here + { + NV_PRINTF(LEVEL_ERROR, + "Requested size 0x%llx more than available range. RangeLo=0x%llx, RangeHi=0x%llx\n", + *pSize, *pRangeLo, *pRangeHi); + NV_ASSERT_OR_RETURN(0, NV_ERR_INSUFFICIENT_RESOURCES); + } + + // Convert flags. + pFlags->bReverse = + !!(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN); + + pFlags->bPreferSysmemPageTables = + !!(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_PREFER_PTES_IN_SYSMEMORY); + + pFlags->bExternallyManaged = + !!(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_EXTERNALLY_MANAGED); + + pFlags->bLazy = + !!(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_LAZY); + + pFlags->bSparse = + !!(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_SPARSE); + + // + // The protected flag for kernel allocations is honoured only + // if this is a root client(kernel client). + // + pFlags->bPrivileged = pAllocInfo->bIsKernelAlloc; + + return NV_OK; +} + +NvU64 +vaspaceGetVaStart_IMPL(OBJVASPACE *pVAS) +{ + return pVAS->vasStart; +} + +NvU64 +vaspaceGetVaLimit_IMPL(OBJVASPACE *pVAS) +{ + return pVAS->vasLimit; +} + +void +vaspaceInvalidateTlb_IMPL +( + OBJVASPACE *pVAS, + OBJGPU *pGpu, + VAS_PTE_UPDATE_TYPE type +) +{ + NV_ASSERT(0); +} + +NV_STATUS +vaspaceGetByHandleOrDeviceDefault_IMPL +( + RsClient *pClient, + NvHandle hDeviceOrSubDevice, + NvHandle hVASpace, + OBJVASPACE **ppVAS +) +{ + NV_STATUS status = NV_OK; + NvHandle _hDeviceOrSubDevice; + Device *pDevice = NULL; + + if (hVASpace == NV01_NULL_OBJECT) + { + if (hDeviceOrSubDevice == 0) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + _hDeviceOrSubDevice = hDeviceOrSubDevice; + } + else + { + NV_PRINTF(LEVEL_ERROR, "Trying to fetch VASpace with VASPACE handle. Not supported \n"); + return NV_ERR_NOT_SUPPORTED; + } + + status = deviceGetByHandle(pClient, _hDeviceOrSubDevice, &pDevice); + if (status != NV_OK) + { + Subdevice *pSubdevice; + + status = subdeviceGetByHandle(pClient, _hDeviceOrSubDevice, &pSubdevice); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Invalid parent handle!\n"); + return status; + } + + pDevice = pSubdevice->pDevice; + } + + // Allocates/Finds VA Space according to the handle type. + if (hVASpace == NV01_NULL_OBJECT) + { + // Check the vaspace mode + if (pDevice->vaMode == NV_DEVICE_ALLOCATION_VAMODE_MULTIPLE_VASPACES) + { + NV_PRINTF(LEVEL_ERROR, + "VA mode %d (PRIVATE) doesn't support allocating an implicit VA space.\n", + pDevice->vaMode); + return NV_ERR_INVALID_STATE; + } + return deviceGetDefaultVASpace(pDevice, ppVAS); + } + + return NV_OK; +} + diff --git a/src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c b/src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c new file mode 100644 index 0000000..f710cf0 --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c @@ -0,0 +1,188 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Routines ***************************\ +* * +* Virtual Memory Manager Object Function Definitions. * +* * +\***************************************************************************/ + +#include "core/system.h" + +#include "mem_mgr/virt_mem_mgr.h" +#include "mem_mgr/vaspace.h" +#include "mem_mgr/io_vaspace.h" +#include "class/cl00f2.h" // IO_VASPACE_A +#include "class/cl00fc.h" // FABRIC_VASPACE_A + +NV_STATUS +vmmCreateVaspace_IMPL +( + OBJVMM *pVmm, + NvU32 classId, + NvU32 vaspaceId, + NvU32 gpuMask, + NvU64 vaStart, + NvU64 vaLimit, + NvU64 vaStartInternal, + NvU64 vaLimitInternal, + OBJVASPACE *pPteSpaceMap, + NvU32 flags, + OBJVASPACE **ppVAS +) +{ + NV_STATUS status = NV_OK; + const NVOC_CLASS_INFO *pClassInfo; + Dynamic *pNewObj; + ADDRESS_TRANSLATION addressTranslation; + + NV_ASSERT_OR_RETURN(ppVAS != NULL, NV_ERR_INVALID_ARGUMENT); + + // + // IOMMU vaspaces may be created for a device before the device itself + // has been created, so there isn't an OBJGPU to get here yet. In these + // cases, the vaspaceId is used to correlate the vaspace with the GPU (it + // is the GPU ID). + // + if (gpuMask == 0) + { + NV_ASSERT_OR_RETURN(IO_VASPACE_A == classId, NV_ERR_INVALID_ARGUMENT); + } + + switch (classId) + { + case IO_VASPACE_A: + addressTranslation = AT_PA; + pClassInfo = RMCFG_MODULE_IOVASPACE ? classInfo(OBJIOVASPACE) : NULL; + // + // For IOMMU vaspace, there is only one per vaspaceID. See if + // vaspace for this vaspaceID already exists, if it does, just increment + // the refcount. + // + if (vmmGetVaspaceFromId(pVmm, vaspaceId, classId, ppVAS) == NV_OK) + { + vaspaceIncRefCnt(*ppVAS); + return NV_OK; + } + break; + default: // Unsupported class + addressTranslation = AT_GPU; + pClassInfo = NULL; + break; + } + + if (pClassInfo == NULL) + { + *ppVAS = NULL; + return NV_ERR_INVALID_CLASS; + } + + status = objCreateDynamic(&pNewObj, pVmm, pClassInfo); + if (NV_OK != status) + return status; + + *ppVAS = dynamicCast(pNewObj, OBJVASPACE); + + (*ppVAS)->addressTranslation = addressTranslation; + (*ppVAS)->vaspaceId = vaspaceId; + (*ppVAS)->gpuMask = gpuMask; + + vaspaceIncRefCnt(*ppVAS); + + status = vaspaceConstruct_(*ppVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags); + if (status != NV_OK) + { + vmmDestroyVaspace(pVmm, *ppVAS); + *ppVAS = NULL; + return status; + } + + (*ppVAS)->vasUniqueId = portAtomicIncrementU32(&SYS_GET_INSTANCE()->currentVasUniqueId); + return status; +} + +void +vmmDestroyVaspace_IMPL +( + OBJVMM *pVmm, + OBJVASPACE *pVAS +) +{ + OBJVASPACE *pTargetVAS = pVAS; + + vaspaceDecRefCnt(pTargetVAS); + + // + // Call the utility routine that does the object deletion when the last + // reference has been destroyed. + // + if (0 == pTargetVAS->refCnt) + { + objDelete(pTargetVAS); + pTargetVAS = NULL; + } +} + +NV_STATUS +vmmGetVaspaceFromId_IMPL +( + OBJVMM *pVmm, + NvU32 vaspaceId, + NvU32 classId, + OBJVASPACE **ppVAS +) +{ + Object *pIter = NULL; + OBJVASPACE *pVAS = NULL; + OBJIOVASPACE *pIOVAS = NULL; + + pIter = objGetChild(staticCast(pVmm, Object)); + while (pIter != NULL) + { + switch (classId) + { + case IO_VASPACE_A: + pIOVAS = dynamicCast(pIter, OBJIOVASPACE); + if (pIOVAS != NULL) + { + pVAS = staticCast(pIOVAS, OBJVASPACE); + } + break; + default: + NV_ASSERT(0); + break; + } + + if ((pVAS != NULL) && (pVAS->vaspaceId == vaspaceId)) + { + *ppVAS = pVAS; + return NV_OK; + } + + pIter = objGetSibling(pIter); + } + + *ppVAS = NULL; + return NV_ERR_OBJECT_NOT_FOUND; +} + diff --git a/src/nvidia/src/kernel/os/os_init.c b/src/nvidia/src/kernel/os/os_init.c new file mode 100644 index 0000000..2eba01a --- /dev/null +++ b/src/nvidia/src/kernel/os/os_init.c @@ -0,0 +1,408 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Routines ***************************\ +* * +* Common Operating System Object Function Pointer Initializations. * +* All the function pointers in the OS object are initialized here. * +* The initializations are broken into 'bite-sized' sub-functions * +* for ease of reading. Any functions that are common among all OS's * +* are directly initialized to the common function name. However, * +* the actual code for that function may be different from one OS * +* to the other; each OS compiles separately. Any function pointers * +* that are either not used by some OS's or are initialized to * +* different functions by different OS's are 'stubbed' out by * +* initializing them to a 'stub' function. * +\***************************************************************************/ + +#include "os/os.h" +#include "os/os_stub.h" +#include "core/system.h" +#include "core/locks.h" +#include "gpu/gpu.h" +#include "gpu/gpu_access.h" +#include "kernel/diagnostics/xid_context.h" +#include "nv_ref.h" +#include "virtualization/hypervisor/hypervisor.h" + + +// Bug check code string common to all OS +const char *ppOsBugCheckBugcodeStr[] = OS_BUG_CHECK_BUGCODE_STR; + +NV_STATUS +constructObjOS(OBJOS *pOS) +{ + // Now call the OS specific initialization + osInitObjOS(pOS); + + return NV_OK; +} + +// +// Function to find the maximum number of cores in the system +// +NvU32 osGetMaximumCoreCount(void) +{ + // + // Windows provides an API to query this that supports CPU hotadd that our + // cpuid() didn't catch, so favor that. + // +#if NVOS_IS_WINDOWS && PORT_IS_KERNEL_BUILD && !defined(NV_MODS) + extern unsigned long KeQueryMaximumProcessorCountEx(unsigned short); + return KeQueryMaximumProcessorCountEx(0xFFFF); // All groups. +#else + OBJSYS *pSys = SYS_GET_INSTANCE(); + return pSys ? pSys->cpuInfo.maxLogicalCpus : 0; +#endif +} + +/*! + * @brief Generic OS 8-bit GPU register write function. + * + * This function first obtains the pointer to the mapping for the GPU + * registers and then calls the OS specific osDevWriteReg008 function. + * + * @param[in] pGpu - The GPU context specific to this call. + * @param[in] thisAddress - Address of the register to be written + * @param[in] thisValue - Value to be written + * + */ +void osGpuWriteReg008( + OBJGPU *pGpu, + NvU32 thisAddress, + NvU8 thisValue +) +{ + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0); + osDevWriteReg008(pGpu, pMapping, thisAddress, thisValue); +} + +/*! + * @brief Generic OS 16-bit GPU register write function. + * + * This function first obtains the pointer to the mapping for the GPU + * registers and then calls the OS specific osDevWriteReg016 function. + * + * @param[in] pGpu - The GPU context specific to this call. + * @param[in] thisAddress - Address of the register to be written + * @param[in] thisValue - Value to be written + * + */ +void osGpuWriteReg016( + OBJGPU *pGpu, + NvU32 thisAddress, + NvV16 thisValue +) +{ + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0); + osDevWriteReg016(pGpu, pMapping, thisAddress, thisValue); +} + +/*! + * @brief Generic OS 32-bit GPU register write function. + * + * This function first obtains the pointer to the mapping for the GPU + * registers and then calls the OS specific osDevWriteReg032 function. + * + * @param[in] pGpu - The GPU context specific to this call. + * @param[in] thisAddress - Address of the register to be written + * @param[in] thisValue - Value to be written + * + */ +void osGpuWriteReg032( + OBJGPU *pGpu, + NvU32 thisAddress, + NvV32 thisValue +) +{ + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0); + osDevWriteReg032(pGpu, pMapping, thisAddress, thisValue); +} + +/*! + * @brief Generic OS 8-bit GPU register read function. + * + * This function first obtains the pointer to the mapping for the GPU + * registers and then calls the OS specific osDevReadReg008 function. + * + * @param[in] pGpu - The GPU context specific to this call. + * @param[in] thisAddress - Address of the register to be read. + * + * @return The value read from the register + */ +NvU8 osGpuReadReg008( + OBJGPU *pGpu, + NvU32 thisAddress +) +{ + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0); + return osDevReadReg008(pGpu, pMapping, thisAddress); +} + +/*! + * @brief Generic OS 16-bit GPU register read function. + * + * This function first obtains the pointer to the mapping for the GPU + * registers and then calls the OS specific osDevReadReg016 function. + * + * @param[in] pGpu - The GPU context specific to this call. + * @param[in] thisAddress - Address of the register to be read. + * + * @return The value read from the register + */ +NvU16 osGpuReadReg016( + OBJGPU *pGpu, + NvU32 thisAddress +) +{ + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0); + return osDevReadReg016(pGpu, pMapping, thisAddress); +} + +/*! + * @brief Generic OS 32-bit GPU register read function. + * + * This function first obtains the pointer to the mapping for the GPU + * registers and then calls the OS specific osDevReadReg032 function. + * + * @param[in] pGpu - The GPU context specific to this call. + * @param[in] thisAddress - Address of the register to be read. + * + * @return The value read from the register + */ +NvU32 osGpuReadReg032( + OBJGPU *pGpu, + NvU32 thisAddress +) +{ + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0); + return osDevReadReg032(pGpu, pMapping, thisAddress); +} + +void vgpuDevWriteReg032( + OBJGPU *pGpu, + NvU32 thisAddress, + NvV32 thisValue, + NvBool *vgpuHandled +) +{ + + *vgpuHandled = NV_FALSE; +} + +NvU32 vgpuDevReadReg032( + OBJGPU *pGpu, + NvU32 thisAddress, + NvBool *vgpuHandled +) +{ + + *vgpuHandled = NV_FALSE; + return 0; +} + +NvU64 osGetMaxUserVa(void); + +/** + * @brief Get the Max User VA address shift + * + * The max user VA address shift may not be power-2 aligned, + * so do some math to round it up. + * + * @return max user VA address shift + */ +NvU32 +osGetCpuVaAddrShift(void) +{ + NvU64 maxUserVa = osGetMaxUserVa(); + + // + // Add 1 to account for kernel VA space, on the assumption + // that kernel VA space is the top half of the address space. + // + return (64 - portUtilCountLeadingZeros64(maxUserVa - 1)) + 1; +} + +/*! + * Some data such as Bindata array are placed on paged memory. Access to paged segment + * on high IRQL is not allowed on some platform (e.g. Windows). The issue could be + * difficult to debug as the repro rate is random. The failure only happens when the + * target segment is paged out. + * + * This utility function checks whether it is safe to access paged segments. When the + * function is called at high IRQL path, it gives an assertion with a message. On + * developer branches, such as chips_a, it triggers an intended Bugcheck. + * + * @param[in] void No input required + * + * @return void To avoid random failure, do not return and check the error + * code of this function. BSOD D1 or internal BSOD provides + * full call stack that is much helpful for debugging. + */ + +void osPagedSegmentAccessCheck(void) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + + // check whether it is safe to access/alloc Paged memory + if (! portMemExSafeForPagedAlloc() || pOS->getProperty(pOS, PDB_PROP_OS_NO_PAGED_SEGMENT_ACCESS)) + { + NV_ASSERT_FAILED("Paged memory access is prohibited"); + + } +} + +/*! + * @brief Retrieves a registry key DWORD value and returns the best result + * from both nbsi and os registry tables. + * + * @param[in] OBJGPU pointer + * @param[in] pRegParmStr Registry key string + * @param[out] pData Registry key DWORD value + * + * @return NV_OK if key was found and data returned in pData + * @return Other unexpected errors + */ +NV_STATUS osReadRegistryDword +( + OBJGPU *pGpu, + const char *pRegParmStr, + NvU32 *pData +) +{ + NV_STATUS status; + + NV_ASSERT_OR_RETURN(pRegParmStr != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pData != NULL, NV_ERR_INVALID_ARGUMENT); + + status = osReadRegistryDwordBase(pGpu, pRegParmStr, pData); + + return status; +} + +/*! + * @brief Retrieves a registry key STRING value and returns the best result + * from both nbsi and os registry tables. + * + * @param[in] OBJGPU pointer + * @param[in] pRegParmStr Registry key string + * @param[out] pData Registry key STRING value + * @param[in] pCbLen Count of bytes in registry value. + * + * @return NV_OK if key was found and data returned in pData + * @return Other unexpected errors + */ +NV_STATUS osReadRegistryString +( + OBJGPU *pGpu, + const char *pRegParmStr, + NvU8 *pData, + NvU32 *pCbLen +) +{ + NV_STATUS status; + NV_ASSERT_OR_RETURN(pRegParmStr != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pCbLen != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(!(*pCbLen != 0 && pData == NULL), NV_ERR_INVALID_ARGUMENT); + + status = osReadRegistryStringBase(pGpu, pRegParmStr, pData, pCbLen); + + return status; +} + +static void nvErrorLog2(void *pVoid, XidContext context, NvBool oobLogging, const char *pFormat, va_list arglist) +{ + if ((pFormat == NULL) || (*pFormat == '\0')) + { + return; + } + + OBJGPU *pGpu = reinterpretCast(pVoid, OBJGPU *); + +#if RMCFG_MODULE_OOB || \ + (RMCFG_MODULE_KERNEL_RC && !RMCFG_FEATURE_PLATFORM_GSP) + char *errorString = portMemAllocNonPaged(MAX_ERROR_STRING); + if (errorString == NULL) + goto done; + + unsigned msglen; + va_list arglistCpy; + + va_copy(arglistCpy, arglist); + msglen = nvDbgVsnprintf(errorString, MAX_ERROR_STRING, pFormat, arglistCpy); + va_end(arglistCpy); + + if (msglen == 0) + goto done; + + if (pGpu != NULL && oobLogging) + { + gpuLogOobXidMessage(pGpu, context.xid, errorString, msglen); + } + +done: + portMemFree(errorString); +#endif // RMCFG_MODULE_OOB || (RMCFG_MODULE_KERNEL_RC && + // !RMCFG_FEATURE_PLATFORM_GSP) + + osErrorLogV(pGpu, context, pFormat, arglist); +} + +void nvErrorLog(void *pVoid, XidContext context, const char *pFormat, va_list arglist) +{ + nvErrorLog2(pVoid, context, NV_TRUE, pFormat, arglist); +} + +void +nvErrorLog_va +( + void * pVoid, + NvU32 num, + const char * pFormat, + ... +) +{ + va_list arglist; + + va_start(arglist, pFormat); + nvErrorLog2(pVoid, (XidContext){.xid = num}, NV_TRUE, pFormat, arglist); + va_end(arglist); +} + +void +nvErrorLog2_va +( + void * pVoid, + XidContext context, + NvBool oobLogging, + const char * pFormat, + ... +) +{ + va_list arglist; + + va_start(arglist, pFormat); + nvErrorLog2(pVoid, context, oobLogging, pFormat, arglist); + va_end(arglist); +} diff --git a/src/nvidia/src/kernel/os/os_sanity.c b/src/nvidia/src/kernel/os/os_sanity.c new file mode 100644 index 0000000..f33bb7d --- /dev/null +++ b/src/nvidia/src/kernel/os/os_sanity.c @@ -0,0 +1,55 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/************************************************************************************************************** +* +* Description: +* Sanity test the system environment to verify our driver can run properly +* +**************************************************************************************************************/ + +#include +#include +#include +#include + +NV_STATUS osSanityTestIsr( + OBJGPU *pGpu +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// +// add various system environment start-up tests here +// currently, just verify interrupt hookup, but could also verify other details +// +NV_STATUS osVerifySystemEnvironment( + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + + return status; +} + diff --git a/src/nvidia/src/kernel/os/os_stubs.c b/src/nvidia/src/kernel/os/os_stubs.c new file mode 100644 index 0000000..bf95ffc --- /dev/null +++ b/src/nvidia/src/kernel/os/os_stubs.c @@ -0,0 +1,726 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/***************************** HW State Routines ***************************\ +* * +* Module: os_stubs.c * +* Stubs for all the public stub routines * +\***************************************************************************/ + +#include "os/os_stub.h" + +void osQADbgRegistryInit(void) +{ + return; +} + +#if !(RMCFG_FEATURE_PLATFORM_WINDOWS && NVCPU_IS_X86_64) && \ + !(RMCFG_FEATURE_PLATFORM_UNIX && NVCPU_IS_X86_64) && \ + !RMCFG_FEATURE_PLATFORM_MODS +NvU32 osNv_rdcr4(void) +{ + return 0; +} +#endif + +NvU64 osNv_rdxcr0(void) +{ + return 0; +} + +#if !(RMCFG_FEATURE_PLATFORM_WINDOWS && NVCPU_IS_X86_64) && \ + !(RMCFG_FEATURE_PLATFORM_UNIX && NVCPU_IS_X86_64) && \ + !RMCFG_FEATURE_PLATFORM_MODS +int osNv_cpuid(int arg1, int arg2, NvU32 *arg3, + NvU32 *arg4, NvU32 *arg5, NvU32 *arg6) +{ + return 0; +} +#endif + +NV_STATUS osSimEscapeWrite(OBJGPU *pGpu, const char *path, NvU32 Index, NvU32 Size, NvU32 Value) +{ + return NV_ERR_GENERIC; +} + +NV_STATUS osSimEscapeWriteBuffer(OBJGPU *pGpu, const char *path, NvU32 Index, NvU32 Size, void* pBuffer) +{ + return NV_ERR_GENERIC; +} + +NV_STATUS osSimEscapeRead(OBJGPU *pGpu, const char *path, NvU32 Index, NvU32 Size, NvU32 *Value) +{ + return NV_ERR_GENERIC; +} + +NV_STATUS osSimEscapeReadBuffer(OBJGPU *pGpu, const char *path, NvU32 Index, NvU32 Size, void* pBuffer) +{ + return NV_ERR_GENERIC; +} + +NV_STATUS osCallACPI_MXMX(OBJGPU *pGpu, NvU32 AcpiId, NvU8 *pInOut) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osCallACPI_BCL(OBJGPU *pGpu, NvU32 acpiId, NvU32 *pOut, NvU16 *size) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osCallACPI_OPTM_GPUON(OBJGPU *pGpu) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osCallACPI_NVHG_GPUON(OBJGPU *pGpu, NvU32 *pInOut) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osCallACPI_NVHG_GPUOFF(OBJGPU *pGpu, NvU32 *pInOut) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osCallACPI_NVHG_GPUSTA(OBJGPU *pGpu, NvU32 *pInOut) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osCallACPI_NVHG_MXDS(OBJGPU *pGpu, NvU32 AcpiId, NvU32 *pInOut) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osCallACPI_NVHG_MXMX(OBJGPU *pGpu, NvU32 AcpiId, NvU32 *pInOut) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osCallACPI_NVHG_DOS(OBJGPU *pGpu, NvU32 AcpiId, NvU32 *pInOut) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osCallACPI_NVHG_DCS(OBJGPU *pGpu, NvU32 AcpiId, NvU32 *pInOut) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osCallACPI_MXID(OBJGPU *pGpu, NvU32 ulAcpiId, NvU32 *pInOut) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osCallACPI_LRST(OBJGPU *pGpu, NvU32 ulAcpiId, NvU32 *pInOut) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NvBool osCheckCallback(OBJGPU *pGpu) +{ + return NV_FALSE; +} + +RC_CALLBACK_STATUS +osRCCallback +( + OBJGPU *pGpu, + NvHandle hClient, // IN The client RC is on + NvHandle hDevice, // IN The device RC is on + NvHandle hFifo, // IN The channel or TSG RC is on + NvHandle hChannel, // IN The channel RC is on + NvU32 errorLevel, // IN Error Level + NvU32 errorType, // IN Error type + NvU32 *data, // IN/OUT context of RC handler + void *pfnRmRCReenablePusher +) +{ + return RC_CALLBACK_IGNORE; +} + +NvBool osCheckCallback_v2(OBJGPU *pGpu) +{ + return NV_FALSE; +} + +RC_CALLBACK_STATUS +osRCCallback_v2 +( + OBJGPU *pGpu, + NvHandle hClient, // IN The client RC is on + NvHandle hDevice, // IN The device RC is on + NvHandle hFifo, // IN The channel or TSG RC is on + NvHandle hChannel, // IN The channel RC is on + NvU32 errorLevel, // IN Error Level + NvU32 errorType, // IN Error type + NvBool bDeferRcRequested, // IN defer RC state + NvU32 *data, // IN/OUT context of RC handler + void *pfnRmRCReenablePusher +) +{ + return RC_CALLBACK_IGNORE; +} + +RmPhysAddr +osPageArrayGetPhysAddr(OS_GPU_INFO *pOsGpuInfo, void* pPageData, NvU32 pageIndex) +{ + NV_ASSERT(0); + return 0; +} + +void osInternalReserveAllocCallback(NvU64 offset, NvU64 size, NvU32 gpuId) +{ +} + +void osInternalReserveFreeCallback(NvU64 offset, NvU32 gpuId) +{ +} + +NV_STATUS osGetCurrentProcessGfid(NvU32 *pGfid) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osSetRegistryList(nv_reg_entry_t *pRegList) +{ + return NV_ERR_NOT_SUPPORTED; +} + +nv_reg_entry_t *osGetRegistryList(void) +{ + return NULL; +} + +#if !(RMCFG_FEATURE_PLATFORM_UNIX || RMCFG_FEATURE_PLATFORM_DCE) || \ + (RMCFG_FEATURE_PLATFORM_UNIX && !RMCFG_FEATURE_TEGRA_SOC_NVDISPLAY) +NV_STATUS osTegraSocGpioGetPinState( + OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 *pArg3 +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void osTegraSocGpioSetPinState( + OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 arg3 +) +{ +} + +NV_STATUS osTegraSocGpioSetPinDirection( + OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 arg3 +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osTegraSocGpioGetPinDirection( + OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 *pArg3 +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osTegraSocGpioGetPinNumber( + OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 *pArg3 +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osTegraSocGpioGetPinInterruptStatus( + OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 arg3, + NvBool *pArg4 +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osTegraSocGpioSetPinInterrupt( + OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 arg3 +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocResetMipiCal +( + OS_GPU_INFO *pOsGpuInfo +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osGetValidWindowHeadMask +( + OS_GPU_INFO *pArg1, + NvU64 *pWindowHeadMask +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NvBool +osTegraSocIsDsiPanelConnected +( + OS_GPU_INFO *pOsGpuInfo +) +{ + return NV_FALSE; +} + +NV_STATUS +osTegraSocDsiParsePanelProps +( + OS_GPU_INFO *pOsGpuInfo, + void *dsiPanelInfo +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocDsiPanelEnable +( + OS_GPU_INFO *pOsGpuInfo, + void *dsiPanelInfo +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocDsiPanelReset +( + OS_GPU_INFO *pOsGpuInfo, + void *dsiPanelInfo +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void +osTegraSocDsiPanelDisable +( + OS_GPU_INFO *pOsGpuInfo, + void *dsiPanelInfo +) +{ + return; +} + +void +osTegraSocDsiPanelCleanup +( + OS_GPU_INFO *pOsGpuInfo, + void *dsiPanelInfo +) +{ + return; +} + +NV_STATUS +osTegraSocHspSemaphoreAcquire +( + OBJGPU *pGpu, + NvU32 ownerId, + NvBool bAcquire, + NvU64 timeout +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NvBool +osTegraSocGetHdcpEnabled(OS_GPU_INFO *pOsGpuInfo) +{ + return NV_TRUE; +} + +void +osTegraGetDispSMMUStreamIds +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 *dispIsoStreamId, + NvU32 *dispNisoStreamId +) +{ + /* NV_U32_MAX is used to indicate that the platform does not support SMMU */ + *dispIsoStreamId = NV_U32_MAX; + *dispNisoStreamId = NV_U32_MAX; +} +#endif + +NV_STATUS +osTegraSocParseFixedModeTimings +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 dcbIndex, + NV0073_CTRL_DFP_GET_FIXED_MODE_TIMING_PARAMS *pTimingsPerStream, + NvU8 *pNumTimings +) +{ + return NV_OK; +} + +NV_STATUS +osTegraSocPowerManagement +( + OS_GPU_INFO *pOsGpuInfo, + NvBool bInPMTransition, + NvU32 newPMLevel +) +{ + return NV_OK; +} + +NV_STATUS osLockPageableDataSection(RM_PAGEABLE_SECTION *pSection) +{ + return NV_OK; +} + +NV_STATUS osUnlockPageableDataSection(RM_PAGEABLE_SECTION *pSection) +{ + return NV_OK; +} + +NV_STATUS osIsKernelBuffer(void *pArg1, NvU32 arg2) +{ + return NV_OK; +} + +NV_STATUS osMapViewToSection(OS_GPU_INFO *pArg1, + void *pSectionHandle, + void **ppAddress, + NvU64 actualSize, + NvU64 sectionOffset, + NvBool bIommuEnabled) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osUnmapViewFromSection(OS_GPU_INFO *pArg1, + void *pAddress, + NvBool bIommuEnabled) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osSrPinSysmem( + OS_GPU_INFO *pArg1, + NvU64 commitSize, + void *pMdl +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osSrUnpinSysmem(OS_GPU_INFO *pArg1) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osCreateMemFromOsDescriptorInternal( + OBJGPU *pGpu, + void *pAddress, + NvU32 flags, + NvU64 size, + MEMORY_DESCRIPTOR **ppMemDesc, + NvBool bCachedKernel, + RS_PRIV_LEVEL privilegeLevel +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osReserveCpuAddressSpaceUpperBound(void **ppSectionHandle, + NvU64 maxSectionSize) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void osReleaseCpuAddressSpaceUpperBound(void *pSectionHandle) +{ +} + +void osIoWriteDword( + NvU32 port, + NvU32 data +) +{ +} + +NvU32 osIoReadDword( + NvU32 port +) +{ + return 0; +} + +NvBool osIsVga( + OS_GPU_INFO *pArg1, + NvBool bIsGpuPrimaryDevice +) +{ + return bIsGpuPrimaryDevice; +} + +void osInitOSHwInfo( + OBJGPU *pGpu +) +{ +} + +void osDestroyOSHwInfo( + OBJGPU *pGpu +) +{ +} + +NV_STATUS osDoFunctionLevelReset( + OBJGPU *pGpu +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NvBool osGrService( + OS_GPU_INFO *pOsGpuInfo, + NvU32 grIdx, + NvU32 intr, + NvU32 nstatus, + NvU32 addr, + NvU32 dataLo +) +{ + return NV_FALSE; +} + +NvBool osDispService( + NvU32 Intr0, + NvU32 Intr1 +) +{ + return NV_FALSE; +} + +NV_STATUS osDeferredIsr( + OBJGPU *pGpu +) +{ + return NV_OK; +} + +void osSetSurfaceName( + void *pDescriptor, + char *name +) +{ +} + +NV_STATUS osGetAcpiTable( + NvU32 tableSignature, + void **ppTable, + NvU32 tableSize, + NvU32 *retSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osInitGetAcpiTable(void) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void osDbgBugCheckOnAssert(void) +{ + return; +} + +NvBool osQueueDpc(OBJGPU *pGpu) +{ + return NV_FALSE; +} + +NvBool osBugCheckOnTimeoutEnabled(void) +{ + return NV_FALSE; +} + +NvBool osDbgBreakpointEnabled(void) +{ + return NV_TRUE; +} + +NV_STATUS +osGetSysmemInfo +( + OBJGPU *pGpu, + NvU64 *pSysmemBaseAddr, + NvU64 *pSysmemTotalSize +) +{ + // Bug 4377373 - TODO: Need to add proper implementation for non MODS platform. + *pSysmemBaseAddr = 0; + *pSysmemTotalSize = (1ULL << 32); + + return NV_OK; +} + +NV_STATUS osNvifMethod( + OBJGPU *pGpu, + NvU32 func, + NvU32 subFunc, + void *pInParam, + NvU16 inParamSize, + NvU32 *pOutStatus, + void *pOutData, + NvU16 *pOutDataSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osNvifInitialize( + OBJGPU *pGpu +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osGetUefiVariable +( + const char *pName, + LPGUID pGuid, + NvU8 *pBuffer, + NvU32 *pSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osGetNvGlobalRegistryDword +( + OBJGPU *pGpu, + const char *pRegParmStr, + NvU32 *pData +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osIsr +( + OBJGPU *pGpu +) +{ + return NV_OK; +} + +NvBool osLockShouldToggleInterrupts +( + OBJGPU *pGpu +) +{ + return NV_TRUE; +} + +void osEnableInterrupts +( + OBJGPU *pGpu +) +{ +} + +void osDisableInterrupts +( + OBJGPU *pGpu, + NvBool bIsr +) +{ +} + +NV_STATUS osInitMapping +( + OBJGPU *pGpu +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +#if !(RMCFG_FEATURE_PLATFORM_UNIX) || \ + (RMCFG_FEATURE_PLATFORM_UNIX && !RMCFG_FEATURE_TEGRA_SOC_NVDISPLAY) +NV_STATUS +osTegraSocDpUphyPllInit(OS_GPU_INFO *pOsGpuInfo, NvU32 link_rate, NvU32 lanes) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocDpUphyPllDeInit(OS_GPU_INFO *pOsGpuInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +#endif + + +NV_STATUS osGetPcieCplAtomicsCaps +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 *pMask +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osMapGsc +( + NvU64 gsc_base, + NvU64 *va +) +{ + return NV_ERR_NOT_SUPPORTED; +} + diff --git a/src/nvidia/src/kernel/os/os_timer.c b/src/nvidia/src/kernel/os/os_timer.c new file mode 100644 index 0000000..e2950eb --- /dev/null +++ b/src/nvidia/src/kernel/os/os_timer.c @@ -0,0 +1,454 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2002-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief This file contains platform-independent code for the 1 Hz OS timer. + */ + +#include "gpu/timer/objtmr.h" +#include "core/thread_state.h" +#include "core/locks.h" + +static NvBool _os1HzCallbackIsOnList(OBJTMR *pTmr, OS1HZPROC callback, void *pData, NvU32 flags); +static NV_STATUS _os1HzCallback(OBJGPU *pGpu, OBJTMR *pTmr, TMR_EVENT *pTmrEvent); + +/*! + * @brief Initialize 1Hz callbacks + * + * Initialize the 1Hz callback list and create a timer event + * (if using PTIMER events). + * + * @param[in,out] pTmr TMR object pointer + */ +NV_STATUS +osInit1HzCallbacks +( + OBJTMR *pTmr +) +{ + NvU32 i; + + // Initialize the OS 1 Hz timer callback list. + pTmr->pOs1HzCallbackList = NULL; + pTmr->pOs1HzCallbackFreeList = pTmr->os1HzCallbackTable; + + // Fill in all the forward pointers in the callback table. + for (i = 0; i < (TMR_NUM_CALLBACKS_OS - 1); i++) + { + pTmr->os1HzCallbackTable[i].next = &pTmr->os1HzCallbackTable[i+1]; + } + pTmr->os1HzCallbackTable[i].next = NULL; + + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_PTIMER_FOR_OSTIMER_CALLBACKS)) + { + NV_ASSERT_OK_OR_RETURN(tmrEventCreate(pTmr, &pTmr->pOs1HzEvent, + _os1HzCallback, NULL, TMR_FLAG_RECUR)); + } + + return NV_OK; +} + +/*! + * @brief Destroy 1Hz callbacks + * + * Destroy the 1Hz callback list and free the timer event + * (if using PTIMER events). + * + * @param[in,out] pTmr TMR object pointer + */ +NV_STATUS +osDestroy1HzCallbacks +( + OBJTMR *pTmr +) +{ + if (pTmr->pOs1HzEvent != NULL) + { + tmrEventDestroy(pTmr, pTmr->pOs1HzEvent); + pTmr->pOs1HzEvent = NULL; + } + + pTmr->pOs1HzCallbackList = NULL; + pTmr->pOs1HzCallbackFreeList = NULL; + return NV_OK; +} + +/*! + * @brief Timer function to insert 1Hz callback to the list. + * + * This function is used to insert/register the 1Hz callback to the callback list. + * + * @param[in,out] pTmr TMR object pointer + * @param[in] callback OS1HZPROC callback function point + * @param[in] pData Unique identifier for the callback + * @param[in] flags Callback flags + * + * @return NV_OK The callback has been added + * @return NV_ERR_INVALID_REQUEST The callback has not been added + */ +NV_STATUS +osSchedule1HzCallback +( + OBJGPU *pGpu, + OS1HZPROC callback, + void *pData, + NvU32 flags +) +{ + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + OS1HZTIMERENTRY *pEntry; + NV_STATUS nvStatus = NV_OK; + + // Grab the next free timer entry. + if ((pTmr->pOs1HzCallbackFreeList != NULL) && + !_os1HzCallbackIsOnList(pTmr, callback, pData, flags)) + { + if ((pTmr->pOs1HzCallbackList == NULL) && (pTmr->pOs1HzEvent != NULL)) + { + // First one. Add 1Hz callback to timer events. + NV_ASSERT_OK(tmrEventScheduleRelSec(pTmr, pTmr->pOs1HzEvent, 1)); + } + + pEntry = pTmr->pOs1HzCallbackFreeList; + pTmr->pOs1HzCallbackFreeList = pEntry->next; + + pEntry->callback = callback; + pEntry->data = pData; + pEntry->flags = flags; + + pEntry->next = pTmr->pOs1HzCallbackList; + pTmr->pOs1HzCallbackList = pEntry; + } + else + { + NV_PRINTF(LEVEL_INFO, "Callback registration FAILED!\n"); + nvStatus = NV_ERR_INVALID_REQUEST; + } + + return nvStatus; +} + +/*! + * @brief Timer function to remove 1Hz callback from the list. + * + * This function is used to remove/unregister the 1Hz callback from + * the callback list. + * + * @param[in,out] pTmr TMR object pointer + * @param[in] callback OS1HZPROC callback function point + * @param[in] pData Unique identifier for the callback + */ +void +osRemove1HzCallback +( + OBJGPU *pGpu, + OS1HZPROC callback, + void *pData +) +{ + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + OS1HZTIMERENTRY *pEntry; + OS1HZTIMERENTRY **ppEntryPtr; + + ppEntryPtr = &pTmr->pOs1HzCallbackList; + while ((*ppEntryPtr) != NULL) + { + pEntry = *ppEntryPtr; + if ((pEntry->callback == callback) && + (pEntry->data == pData) && + (pEntry->flags & NV_OS_1HZ_REPEAT)) + { + *ppEntryPtr = pEntry->next; + pEntry->next = pTmr->pOs1HzCallbackFreeList; + pEntry->data = NULL; + pEntry->callback = NULL; + pEntry->flags = NV_OS_1HZ_REPEAT; + pTmr->pOs1HzCallbackFreeList = pEntry; + break; + } + ppEntryPtr = &pEntry->next; + } + + if ((pTmr->pOs1HzCallbackList == NULL) && (pTmr->pOs1HzEvent != NULL)) + { + // Last one. Remove 1Hz callback from timer events. + tmrEventCancel(pTmr, pTmr->pOs1HzEvent); + } +} + +static void _osRunAll1HzCallbacks(OBJGPU *pGpu) +{ + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + OS1HZTIMERENTRY **ppEntryPtr; + OS1HZPROC pProc; + void *pData; + + if (!gpuIsGpuFullPower(pGpu)) + { + return; + } + + ppEntryPtr = &pTmr->pOs1HzCallbackList; + for (;;) + { + POS1HZTIMERENTRY entry; + + // Be paranoid. + entry = *ppEntryPtr; + + // End of list? + if (entry == NULL) + break; + + // Run the callback. + if (entry->callback != NULL) + { + pProc = entry->callback; + pData = entry->data; + pProc(pGpu, pData); + } + + // + // The proc call above can add new entries to the list. + // When new entries are added, they are added at the + // beginning of the list. That means that our *entryPtr + // might no longer point to our current entry. If that is + // the case, then we need to search the list again to find + // our entry. Or inside this code, we need to find the + // entryPtr over again. + // + if (entry != *ppEntryPtr) + { + POS1HZTIMERENTRY item; + + ppEntryPtr = &pTmr->pOs1HzCallbackList; + for (;;) + { + // Be paranoid. + item = *ppEntryPtr; + + // End of list? + if (item == NULL) + break; + + if (item == entry) + { + break; + } + + ppEntryPtr = &item->next; + } + + if (item != entry) + { + // + // The entry was removed from the list inside the proc. + // So, we don't need to do anything below. Use + // ppEntryPtr = NULL to indicate that for now. + // + ppEntryPtr = NULL; + } + + } + + // + // If this timer is supposed to repeat, leave it in place. + // Otherwise, move it to the free list. + // + if ( (ppEntryPtr != NULL) && + !(entry->flags & NV_OS_1HZ_REPEAT)) + { + *ppEntryPtr = entry->next; + entry->next = pTmr->pOs1HzCallbackFreeList; + pTmr->pOs1HzCallbackFreeList = entry; + } + else + { + ppEntryPtr = &entry->next; + } + } + pGpu->lastCallbackTime = osGetMonotonicTimeNs(); +} + +// +// Return Value(TRUE) is used by Vista to determine if we were able to acquire the lock +// If we cannot acquire the lock this means the API or ISR/DPC has it +// +NvBool +osRun1HzCallbacksNow +( + OBJGPU *pGpu +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + THREAD_STATE_NODE threadState; + NvBool bAcquired = NV_TRUE; + GPU_MASK lockedGpus = 0; +#if !TLS_DPC_HAVE_UNIQUE_ID + NvU8 stackAllocator[TLS_ISR_ALLOCATOR_SIZE]; // ISR allocations come from this buffer + PORT_MEM_ALLOCATOR *pDpcAllocator; + pDpcAllocator = portMemAllocatorCreateOnExistingBlock(stackAllocator, sizeof(stackAllocator)); + tlsIsrInit(pDpcAllocator); +#endif + + // + // LOCK: + // + // What irql are we at here? Should we acquire the API lock in addition to + // or instead of the GPUs lock? + // + + // LOCK: try to acquire GPU lock + if (rmGpuGroupLockAcquire(pGpu->gpuInstance, GPU_LOCK_GRP_DEVICE, + GPU_LOCK_FLAGS_COND_ACQUIRE, RM_LOCK_MODULES_TMR, + &lockedGpus) != NV_OK) + { + // Out of conflicting thread + bAcquired = NV_FALSE; + goto exit; + } + + if (osCondAcquireRmSema(pSys->pSema) != NV_OK) + { + // UNLOCK: release GPU lock + rmGpuGroupLockRelease(lockedGpus, GPUS_LOCK_FLAGS_NONE); + // Out of conflicting thread + bAcquired = NV_FALSE; + goto exit; + } + + threadStateInitISRAndDeferredIntHandler(&threadState, pGpu, + THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER); + + _osRunAll1HzCallbacks(pGpu); + +exit: + if (bAcquired) + { + // Out of conflicting thread + osReleaseRmSema(pSys->pSema, NULL); + // UNLOCK: release GPU lock + rmGpuGroupLockRelease(lockedGpus, GPUS_LOCK_FLAGS_NONE); + threadStateFreeISRAndDeferredIntHandler(&threadState, + pGpu, THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER); + } + else + { + portAtomicSetU32(&pGpu->bCallbackQueued, NV_TRUE); + } + +#if !TLS_DPC_HAVE_UNIQUE_ID + tlsIsrDestroy(pDpcAllocator); + portMemAllocatorRelease(pDpcAllocator); +#endif + + return bAcquired; +} + +void osRunQueued1HzCallbacksUnderLock(OBJGPU *pGpu) +{ + // + // In traditional SLI, we might occasionally get called with just the + // *sub*device lock held. Since all callbacks were written with the + // assumption that they hold the device lock, just bail out here. + // + if (!rmDeviceGpuLockIsOwner(pGpu->gpuInstance)) + return; + + // callbacks shouldn't run at > DISPATCH_LEVEL + if (!portUtilIsInterruptContext()) + { + if (pGpu->bCallbackQueued) + { + _osRunAll1HzCallbacks(pGpu); + portAtomicSetU32(&pGpu->bCallbackQueued, NV_FALSE); + } + } +} + +/*! + * @brief Timer function to check the duplicate callback on the list. + * + * This function is used to check if there's any duplicate repeat callback has + * been registered to the list, walk through the list and find if there's any + * registered callback matched with flags NV_OS_1HZ_REPEAT. + * + * @param[in,out] pTmr TMR object pointer + * @param[in] callback OS1HZPROC callback function point + * @param[in] pData Unique identifier for the callback + * @param[in] flags Callback flags + * + * @return NV_TRUE The callback has been registered + * @return NV_FALSE The callback has not been registered + */ +static NvBool +_os1HzCallbackIsOnList +( + OBJTMR *pTmr, + OS1HZPROC callback, + void *pData, + NvU32 flags +) +{ + POS1HZTIMERENTRY pScan; + + for (pScan = pTmr->pOs1HzCallbackList; pScan != NULL; pScan = pScan->next) + { + if ((pScan->callback == callback) && + (pScan->data == pData) && + (pScan->flags & NV_OS_1HZ_REPEAT)) + { + break; + } + } + + return pScan != NULL; +} + +/*! + * @brief Os 1Hz callback function + * + * Calls all callbacks on the 1Hz list and reschedules callback + * (if using PTIMER events). + * + * @param[in,out] pGpu GPU object pointer + * @param[in,out] pTmr TMR object pointer + * @param[in] pTmrEvent pointer to the timer event + * + * @return NV_OK The callback was rescheduled successfully. + * @return NV_ERR_INVALID_ARGUMENT The callback was not rescheduled. + */ +static NV_STATUS +_os1HzCallback +( + OBJGPU *pGpu, + OBJTMR *pTmr, + TMR_EVENT *pTmrEvent +) +{ + osRun1HzCallbacksNow(pGpu); + + // TMR_FLAG_RECUR does not work, so reschedule it here. + return tmrEventScheduleRelSec(pTmr, pTmrEvent, 1); +} diff --git a/src/nvidia/src/kernel/rmapi/alloc_free.c b/src/nvidia/src/kernel/rmapi/alloc_free.c new file mode 100644 index 0000000..7b0eea5 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/alloc_free.c @@ -0,0 +1,1668 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "rmapi/rmapi.h" +#include "rmapi/rmapi_specific.h" +#include "rmapi/client.h" +#include "entry_points.h" +#include "core/locks.h" +#include "core/thread_state.h" +#include "vgpu/rpc.h" +#include "resource_desc.h" +#include "gpu/disp/disp_objs.h" +#include "gpu/disp/disp_channel.h" +#include "nvsecurityinfo.h" +#include "virtualization/hypervisor/hypervisor.h" +#include "gpu_mgr/gpu_mgr.h" +#include "platform/sli/sli.h" +#include "kernel/gpu/gsp/gsp_trace_rats_macro.h" + +#include "gpu/device/device.h" +#include "class/cl0080.h" +#include "class/clc372sw.h" + +#include "gpu/timer/tmr.h" + +// +// RM Alloc & Free internal flags -- code should be migrated to use rsresdesc +// and rmapi types directly where possible. +// +#define RM_ALLOC_STATES_NONE 0 +#define RM_ALLOC_STATES_INTERNAL_CLIENT_HANDLE ALLOC_STATE_INTERNAL_CLIENT_HANDLE // NVBIT(5) +#define RM_ALLOC_STATES_SKIP_RPC NVBIT(6) +#define RM_ALLOC_STATES_INTERNAL_ALLOC NVBIT(7) + +#define RM_FREE_STATES_NONE 0 + +static void +rmapiResourceDescToLegacyFlags +( + const RS_RESOURCE_DESC *pResDesc, + NvU32 *pAllocFlags, + NvU32 *pFreeFlags +) +{ + if (pAllocFlags) + { + *pAllocFlags = (pResDesc->flags & RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_ALLOC) ? RM_LOCK_FLAGS_NONE : RM_LOCK_FLAGS_NO_GPUS_LOCK; + *pAllocFlags |= (pResDesc->flags & RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_ALLOC) ? RM_LOCK_FLAGS_GPU_GROUP_LOCK : 0; + } + + if (pFreeFlags) + { + *pFreeFlags = (pResDesc->flags & RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE) ? RM_LOCK_FLAGS_NONE : RM_LOCK_FLAGS_NO_GPUS_LOCK; + *pFreeFlags |= (pResDesc->flags & RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_FREE) ? RM_LOCK_FLAGS_GPU_GROUP_LOCK : 0; + } +} + +NvU32 +serverAllocClientHandleBase +( + RsServer *pServer, + NvBool bInternalHandle, + API_SECURITY_INFO *pSecInfo +) +{ + NvU32 handleBase; + NvU32 gfid = (NvU32)((NvU64)pSecInfo->pProcessToken); + + if (bInternalHandle) + { + handleBase = pServer->internalHandleBase; + } + else + { + handleBase = pServer->clientHandleBase; + + if (RMCFG_FEATURE_PLATFORM_GSP && IS_GFID_VF(gfid)) + handleBase = RS_CLIENT_GET_VF_HANDLE_BASE(gfid); + } + + return handleBase; +} + +NV_STATUS +serverAllocApiCopyIn +( + RsServer *pServer, + RS_RES_ALLOC_PARAMS_INTERNAL *pRmAllocParams, + API_STATE **ppParamCopy +) +{ + NV_STATUS status; + API_SECURITY_INFO *pSecInfo = pRmAllocParams->pSecInfo; + NvBool bCopyInParams = pSecInfo->paramLocation == PARAM_LOCATION_USER; + RMAPI_PARAM_COPY *pParamCopy = NULL; + NvU32 allocParamsSize = 0; + void *pUserParams = pRmAllocParams->pAllocParams; + + pParamCopy = (RMAPI_PARAM_COPY*)PORT_ALLOC(g_resServ.pAllocator, sizeof(*pParamCopy)); + if (pParamCopy == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto done; + } + portMemSet(pParamCopy, 0, sizeof(*pParamCopy)); + pRmAllocParams->pAllocParams = NULL; + + // Setup for access to param + // Param size is initialized to zero, and then set via rmapiParamsCopyInit + RMAPI_PARAM_COPY_INIT(*pParamCopy, pRmAllocParams->pAllocParams, NV_PTR_TO_NvP64(pUserParams), allocParamsSize, 1); + + // Look up param size based on hClass + status = rmapiParamsCopyInit(pParamCopy, pRmAllocParams->externalClassId); + if (NV_OK != status) + goto done; + + // Using the per-class info set above, pull in the parameters for this allocation + if (pParamCopy->paramsSize > 0) + { + // gain access to client's parameters via 'pKernelCtrl' + status = rmapiParamsAcquire(pParamCopy, bCopyInParams); + if (status != NV_OK) + goto done; + } + + // Prevent requesting rights before rights are enabled, just in case old code doesn't zero it properly. + if (!pServer->bRsAccessEnabled) + pRmAllocParams->pRightsRequested = NULL; + + if (pRmAllocParams->pRightsRequested != NULL) + { + // copyFromUser requires a non-stack buffer, allocate one to copy into + RS_ACCESS_MASK *pMaskBuffer = (RS_ACCESS_MASK*)PORT_ALLOC(g_resServ.pAllocator, sizeof(RS_ACCESS_MASK)); + if (pMaskBuffer == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto done; + } + + // Mask is a fixed size, just copy it directly into allocParams + status = rmapiParamsCopyIn("RightsRequested", + pMaskBuffer, + NV_PTR_TO_NvP64(pRmAllocParams->pRightsRequested), + sizeof(RS_ACCESS_MASK), + bCopyInParams); + + portMemCopy(&pRmAllocParams->rightsRequestedCopy, sizeof(RS_ACCESS_MASK), + pMaskBuffer, sizeof(RS_ACCESS_MASK)); + + PORT_FREE(g_resServ.pAllocator, pMaskBuffer); + + if (status != NV_OK) + goto done; + + pRmAllocParams->pRightsRequested = &pRmAllocParams->rightsRequestedCopy; + } +done: + if (status != NV_OK) + { + if (pParamCopy != NULL) + PORT_FREE(g_resServ.pAllocator, pParamCopy); + pParamCopy = NULL; + } + + if (ppParamCopy != NULL) + *ppParamCopy = pParamCopy; + + return status; +} + +NV_STATUS +serverAllocApiCopyOut +( + RsServer *pServer, + NV_STATUS status, + API_STATE *pParamCopy +) +{ + NV_STATUS cpStatus = NV_OK; + if (pParamCopy->paramsSize > 0) + { + // don't copyout if an error + if (status != NV_OK) + pParamCopy->flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + + cpStatus = rmapiParamsRelease(pParamCopy); + if (status == NV_OK) + status = cpStatus; + } + + PORT_FREE(g_resServ.pAllocator, pParamCopy); + + return status; +} + +NV_STATUS +serverTopLock_Prologue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NV_STATUS status; + if ((pLockInfo->flags & RM_LOCK_FLAGS_RM_SEMA) && + !(pLockInfo->state & RM_LOCK_STATES_RM_SEMA_ACQUIRED)) + { + if ((status = osAcquireRmSema(pSys->pSema)) != NV_OK) + return status; + pLockInfo->state |= RM_LOCK_STATES_RM_SEMA_ACQUIRED; + *pReleaseFlags |= RM_LOCK_RELEASE_RM_SEMA; + } + + if (!(pLockInfo->flags & RM_LOCK_FLAGS_NO_API_LOCK)) + { + if (!(pLockInfo->state & RM_LOCK_STATES_API_LOCK_ACQUIRED)) + { + NvU32 flags = RMAPI_LOCK_FLAGS_NONE; + if (access == LOCK_ACCESS_READ) + flags |= RMAPI_LOCK_FLAGS_READ; + + if (pLockInfo->flags & RS_LOCK_FLAGS_LOW_PRIORITY) + flags |= RMAPI_LOCK_FLAGS_LOW_PRIORITY; + + if ((status = rmapiLockAcquire(flags, RM_LOCK_MODULES_CLIENT)) != NV_OK) + { + return status; + } + pLockInfo->state |= RM_LOCK_STATES_API_LOCK_ACQUIRED; + *pReleaseFlags |= RM_LOCK_RELEASE_API_LOCK; + } + else + { + if (!rmapiLockIsOwner()) + { + NV_ASSERT(0); + return NV_ERR_INVALID_LOCK_STATE; + } + } + } + + return NV_OK; +} + +void +serverTopLock_Epilogue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + if (*pReleaseFlags & RM_LOCK_RELEASE_API_LOCK) + { + rmapiLockRelease(); + pLockInfo->state &= ~RM_LOCK_STATES_API_LOCK_ACQUIRED; + *pReleaseFlags &= ~RM_LOCK_RELEASE_API_LOCK; + } + + if (*pReleaseFlags & RM_LOCK_RELEASE_RM_SEMA) + { + osReleaseRmSema(pSys->pSema, NULL); + pLockInfo->state &= ~RM_LOCK_STATES_RM_SEMA_ACQUIRED; + *pReleaseFlags &= ~RM_LOCK_RELEASE_RM_SEMA; + } +} + +static NvU32 +_resGetBackRefGpusMask(RsResourceRef *pResourceRef) +{ + NvU32 gpuMask = 0x0; + RsInterMapping *pBackRefItem; + + if (pResourceRef == NULL) + { + return 0x0; + } + + pBackRefItem = listHead(&pResourceRef->interBackRefsContext); + while (pBackRefItem != NULL) + { + RsResourceRef *pDeviceRef = pBackRefItem->pContextRef; + GpuResource *pGpuResource = dynamicCast(pDeviceRef->pResource, GpuResource); + + if (pGpuResource != NULL) + { + OBJGPU *pGpu = GPU_RES_GET_GPU(pGpuResource); + gpuMask |= gpumgrGetGpuMask(pGpu); + } + + pBackRefItem = listNext(&pResourceRef->interBackRefsContext, pBackRefItem); + } + + return gpuMask; +} + +NV_STATUS +serverResLock_Prologue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags, + NvU32 gpuMask +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pParentGpu = NULL; + + if (pLockInfo->state & RM_LOCK_STATES_GPUS_LOCK_ACQUIRED) + { + if (rmGpuLockIsOwner()) + { + return NV_OK; + } + else + { + NV_ASSERT(0); + status = NV_ERR_INVALID_LOCK_STATE; + goto done; + } + } + + if (!(pLockInfo->flags & RM_LOCK_FLAGS_NO_GPUS_LOCK)) + { + if (rmGpuLockIsOwner()) + { + if (!(pLockInfo->state & RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS)) + { + NV_ASSERT(0); + status = NV_ERR_INVALID_LOCK_STATE; + goto done; + } + } + else + { + if ((status = rmGpuLocksAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_CLIENT)) != NV_OK) + goto done; + + *pReleaseFlags |= RM_LOCK_RELEASE_GPUS_LOCK; + pLockInfo->state |= RM_LOCK_STATES_GPUS_LOCK_ACQUIRED; + } + } + + if (pLockInfo->flags & RM_LOCK_FLAGS_GPU_GROUP_LOCK) + { + RsResourceRef *pParentRef = pLockInfo->pContextRef; + GpuResource *pGpuResource = NULL; + NvU32 gpuGroupMask = 0; + + if (pParentRef == NULL) + { + NV_ASSERT(0); + status = NV_ERR_INVALID_OBJECT_PARENT; + goto done; + } + + // + // Use the pGpu from parent resource as it will work on alloc & free. + // Everything below NV0080_DEVICE uses the same pGpu group + // + // GPU teardown paths free client resources before tearing down pGpu so + // pGpu should always be valid at this point. + // + pGpuResource = dynamicCast(pParentRef->pResource, GpuResource); + + if (pGpuResource == NULL) + { + // + // If parent is not a GpuResource, we might still be a NV0080_DEVICE + // so check and handle that case before reporting an error.. + // + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + if (pCallContext != NULL && pCallContext->pResourceRef != NULL) + { + pGpuResource = dynamicCast(pCallContext->pResourceRef->pResource, GpuResource); + } + if (pGpuResource == NULL) + { + NV_ASSERT_FAILED("Attempting to lock per-GPU lock for a non-GpuResource"); + status = NV_ERR_INVALID_OBJECT_PARENT; + goto done; + } + } + + pParentGpu = GPU_RES_GET_GPU(pGpuResource); + + if (pLockInfo->state & RM_LOCK_STATES_GPU_GROUP_LOCK_ACQUIRED) + { + if (rmGpuGroupLockIsOwner(pParentGpu->gpuInstance, GPU_LOCK_GRP_DEVICE, &gpuGroupMask)) + { + goto done; + } + else + { + NV_ASSERT(0); + status = NV_ERR_INVALID_LOCK_STATE; + goto done; + } + } + + if (rmGpuGroupLockIsOwner(pParentGpu->gpuInstance, GPU_LOCK_GRP_DEVICE, &gpuGroupMask)) + { + if (!(pLockInfo->state & RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS)) + { + NV_ASSERT(0); + status = NV_ERR_INVALID_LOCK_STATE; + goto done; + } + } + else + { + // + // Lock the parent GPU and if specified any GPUs that resource + // may backreference via mappings. + // + pLockInfo->gpuMask = gpuMask | gpumgrGetGpuMask(pParentGpu) | + _resGetBackRefGpusMask(pLockInfo->pResRefToBackRef); + + status = rmGpuGroupLockAcquire(0, + GPU_LOCK_GRP_MASK, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_CLIENT, + &pLockInfo->gpuMask); + if (status != NV_OK) + goto done; + + *pReleaseFlags |= RM_LOCK_RELEASE_GPU_GROUP_LOCK; + pLockInfo->state |= RM_LOCK_STATES_GPU_GROUP_LOCK_ACQUIRED; + } + } + +done: + switch(pLockInfo->traceOp) + { + case RS_LOCK_TRACE_ALLOC: + LOCK_METER_DATA(ALLOC, pLockInfo->traceClassId, 0, 0); + break; + case RS_LOCK_TRACE_FREE: + LOCK_METER_DATA(FREE_OBJECT, pLockInfo->traceClassId, 0, 0); + break; + case RS_LOCK_TRACE_CTRL: + LOCK_METER_DATA(RMCTRL, pLockInfo->traceClassId, pLockInfo->flags, status); + break; + default: + break; + } + + return status; +} + +NV_STATUS +serverAllocEpilogue_WAR +( + RsServer *pServer, + NV_STATUS status, + NvBool bClientAlloc, + RS_RES_ALLOC_PARAMS_INTERNAL *pRmAllocParams +) +{ + // + // Pre-Volta Linux swapgroups is the only remaining use of channel grabbing. + // Bug 2869820 is tracking the transition of swapgroups from requiring this + // RM feature. + // + NV_STATUS tmpStatus; + if (!bClientAlloc && status == NV_ERR_INSERT_DUPLICATE_NAME) + { + NvBool gpulockRelease = NV_FALSE; + RsResourceRef *pResourceRef; + + if (!rmGpuLockIsOwner()) + { + tmpStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_CLIENT); + + if (tmpStatus != NV_OK) + return tmpStatus; + + gpulockRelease = NV_TRUE; + } + + // + // Hack for taking ownership of display channels. Clients call rmAlloc + // on a previously allocated handle to indicate they want to grab + // ownership of the underlying hardware channel. + // + // TODO - this should be moved to an RM control and called directly by + // clients instead of the overloaded allocation call. RmAlloc should + // be for allocating objects only. + // + tmpStatus = clientGetResourceRef(pRmAllocParams->pClient, pRmAllocParams->hResource, &pResourceRef); + if (tmpStatus == NV_OK) + { + DispChannel *pDispChannel = dynamicCast(pResourceRef->pResource, DispChannel); + if (pDispChannel != NULL) + { + status = dispchnGrabChannel(pDispChannel, + pRmAllocParams->hClient, + pRmAllocParams->hParent, + pRmAllocParams->hResource, + pRmAllocParams->externalClassId, + pRmAllocParams->pAllocParams); + } + } + + if (gpulockRelease) + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + return status; +} + +static NV_STATUS +_rmAlloc +( + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvU32 hClass, + NvP64 pUserAllocParams, + NvU32 paramsSize, + NvU32 allocFlags, + NvU32 allocInitStates, + RS_LOCK_INFO *pLockInfo, + NvP64 pRightsRequested, + API_SECURITY_INFO secInfo +) +{ + NV_STATUS status; + RS_RES_ALLOC_PARAMS_INTERNAL rmAllocParams = {0}; + + NV_ASSERT_OR_RETURN(phObject != NULL, NV_ERR_INVALID_ARGUMENT); + + // init RmAllocParams + rmAllocParams.hClient = hClient; + rmAllocParams.hParent = hParent; + rmAllocParams.hResource = *phObject; + rmAllocParams.externalClassId = hClass; + rmAllocParams.allocFlags = allocFlags; + rmAllocParams.allocState = allocInitStates; + rmAllocParams.pSecInfo = &secInfo; + rmAllocParams.pResourceRef = NULL; + rmAllocParams.pAllocParams = NvP64_VALUE(pUserAllocParams); + rmAllocParams.paramsSize = paramsSize; + rmAllocParams.pLockInfo = pLockInfo; + rmAllocParams.pRightsRequested = NvP64_VALUE(pRightsRequested); + rmAllocParams.pRightsRequired = NULL; + + status = serverAllocResource(&g_resServ, &rmAllocParams); + *phObject = rmAllocParams.hResource; + + return status; + +} + +static NV_STATUS +_serverAlloc_ValidateVgpu +( + RsClient *pClient, + NvU32 hParent, + NvU32 externalClassId, + RS_PRIV_LEVEL privLevel, + const NvU32 flags +) +{ + + // Check whether context is already sufficiently privileged + if (flags & RS_FLAGS_ALLOC_PRIVILEGED) + { + if (privLevel >= RS_PRIV_LEVEL_USER_ROOT) + { + return NV_TRUE; + } + } + + return NV_FALSE; +} + +static NV_STATUS +_serverAllocValidatePrivilege +( + RsServer *pServer, + RS_RESOURCE_DESC *pResDesc, + RS_RES_ALLOC_PARAMS *pParams +) +{ + RsClient *pClient = pParams->pClient; + + // Reject allocations for objects with no flags. + if (!(pResDesc->flags & RS_FLAGS_ALLOC_NON_PRIVILEGED) && + !(pResDesc->flags & RS_FLAGS_ALLOC_PRIVILEGED) && + !(pResDesc->flags & RS_FLAGS_ALLOC_KERNEL_PRIVILEGED)) + { + // See GPUSWSEC-1560 for more details on object privilege flag requirements + NV_PRINTF(LEVEL_WARNING, "external class 0x%08x is missing its privilege flag in RS_ENTRY\n", pParams->externalClassId); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + if (hypervisorIsVgxHyper() && + clientIsAdmin(pClient, clientGetCachedPrivilege(pClient)) && + (pParams->pSecInfo->privLevel != RS_PRIV_LEVEL_KERNEL) && + !(pResDesc->flags & RS_FLAGS_ALLOC_NON_PRIVILEGED)) + { + // Host CPU-RM context + if (!_serverAlloc_ValidateVgpu(pClient, pParams->hParent, pParams->externalClassId, + pParams->pSecInfo->privLevel, pResDesc->flags)) + { + NV_PRINTF(LEVEL_NOTICE, + "hClient: 0x%08x, externalClassId: 0x%08x: CPU hypervisor does not have permission to allocate object\n", + pParams->hClient, pParams->externalClassId); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + } + else + { + RS_PRIV_LEVEL privLevel = pParams->pSecInfo->privLevel; + + // Default case, verify admin and kernel privileges + if (pResDesc->flags & RS_FLAGS_ALLOC_PRIVILEGED) + { + if (privLevel < RS_PRIV_LEVEL_USER_ROOT) + { + NV_PRINTF(LEVEL_NOTICE, + "hClient: 0x%08x, externalClassId: 0x%08x: non-privileged context tried to allocate privileged object\n", + pParams->hClient, pParams->externalClassId); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + } + + if (pResDesc->flags & RS_FLAGS_ALLOC_KERNEL_PRIVILEGED) + { + if (privLevel < RS_PRIV_LEVEL_KERNEL) + { + NV_PRINTF(LEVEL_NOTICE, + "hClient: 0x%08x, externalClassId: 0x%08x: non-privileged context tried to allocate kernel privileged object\n", + pParams->hClient, pParams->externalClassId); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + } + } + + return NV_OK; +} + +NV_STATUS +serverAllocResourceUnderLock +( + RsServer *pServer, + RS_RES_ALLOC_PARAMS *pRmAllocParams +) +{ + NvHandle hClient = pRmAllocParams->hClient; + NvHandle hParent; + RS_RESOURCE_DESC *pResDesc; + NV_STATUS status = NV_OK; + NV_STATUS tmpStatus; + RsClient *pClient = pRmAllocParams->pClient; + RsResourceRef *pParentRef = NULL; + RsResourceRef *pResourceRef = NULL; + NvU32 i = 0; + RS_LOCK_INFO *pLockInfo = pRmAllocParams->pLockInfo; + NvU32 releaseFlags = 0; + RS_ACCESS_MASK rightsRequired; + LOCK_ACCESS_TYPE resLockAccess = LOCK_ACCESS_WRITE; + OBJGPU *pGpu = NULL; + NvBool bClearRecursiveStateFlag = NV_FALSE; + + if (!pServer->bConstructed) + return NV_ERR_NOT_READY; + + if (pRmAllocParams->pSecInfo == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pResDesc = RsResInfoByExternalClassId(pRmAllocParams->externalClassId); + if (pResDesc == NULL) + { + return NV_ERR_INVALID_CLASS; + } + + NV_ASSERT_OK_OR_RETURN(rmapiFixupAllocParams(&pResDesc, pRmAllocParams)); + rmapiResourceDescToLegacyFlags(pResDesc, &pLockInfo->flags, NULL); + + status = _serverAllocValidatePrivilege(pServer, pResDesc, pRmAllocParams); + if (status != NV_OK) + goto done; + + pLockInfo->traceOp = RS_LOCK_TRACE_ALLOC; + pLockInfo->traceClassId = pRmAllocParams->externalClassId; + hParent = pRmAllocParams->hParent; + if (pRmAllocParams->hResource == hClient) + { + if (pResDesc->pParentList[i] != 0) + { + status = NV_ERR_INVALID_OBJECT_PARENT; + goto done; + } + hParent = 0; + + // Single instance restriction is implied + NV_ASSERT(!pResDesc->bMultiInstance); + } + else + { + // Check if parent is valid + status = clientGetResourceRef(pClient, hParent, &pParentRef); + if (status != NV_OK) + { + goto done; + } + pLockInfo->pContextRef = pParentRef; + } + + if ((pResDesc->flags & RS_FLAGS_INTERNAL_ONLY) && + !(pRmAllocParams->allocState & RM_ALLOC_STATES_INTERNAL_ALLOC)) + { + status = NV_ERR_INVALID_CLASS; + goto done; + } + + status = serverAllocResourceLookupLockFlags(&g_resServ, RS_LOCK_RESOURCE, pRmAllocParams, &resLockAccess); + if (status != NV_OK) + goto done; + + // + // We can get the GPU pointer for alloc of a device child. + // Device allocs need to be handled separately. See deviceInit_IMPL() + // + tmpStatus = gpuGetByRef(pParentRef, NULL, &pGpu); + + // Override locking flags if we'll need to RPC to GSP + if (pGpu != NULL && IS_FW_CLIENT(pGpu) && + (pResDesc->flags & RS_FLAGS_ALLOC_RPC_TO_PHYS_RM)) + { + resLockAccess = LOCK_ACCESS_WRITE; // always write as we're RPCing to GSP + + // + // If the resource desc says no need for GPU locks, we still need to lock + // the current pGpu in order to send the RPC + // + if (pLockInfo->flags & RM_LOCK_FLAGS_NO_GPUS_LOCK) + { + NV_PRINTF(LEVEL_INFO, "Overriding flags for alloc of class %04x\n", + pRmAllocParams->externalClassId); + pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK; + if ((pLockInfo->state & RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS) == 0) + { + pLockInfo->state |= RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS; + bClearRecursiveStateFlag = NV_TRUE; + } + } + } + + status = serverResLock_Prologue(&g_resServ, resLockAccess, pLockInfo, &releaseFlags, 0); + if (status != NV_OK) + goto done; + + if (pParentRef != NULL) + { + + // If single instance, ensure parent doesn't yet have a class of this type + if (!pResDesc->bMultiInstance) + { + if (refFindChildOfType(pParentRef, pResDesc->pClassInfo->classId, NV_TRUE, NULL) == NV_OK) + { + status = NV_ERR_STATE_IN_USE; + } + } + + // Check if hParent is an allowed parent for this resource + if (status == NV_OK && !pResDesc->bAnyParent) + { + status = NV_ERR_INVALID_OBJECT_PARENT; + for (i = 0; pResDesc->pParentList[i]; i++) + { + if (pParentRef->internalClassId == pResDesc->pParentList[i]) + { + status = NV_OK; + break; + } + } + } + } + + if (status != NV_OK) + goto done; + + status = clientAssignResourceHandle(pClient, &pRmAllocParams->hResource); + if (status != NV_OK) + goto done; + + pRmAllocParams->hParent = (pRmAllocParams->hParent == 0) ? pRmAllocParams->hClient : pRmAllocParams->hParent; + + if (pServer->bRsAccessEnabled) + { + rsAccessMaskFromArray(&rightsRequired, pResDesc->pRightsRequiredArray, + pResDesc->rightsRequiredLength); + pRmAllocParams->pRightsRequired = &rightsRequired; + } + + status = clientAllocResource(pClient, &g_resServ, pRmAllocParams); + if (status != NV_OK) + goto done; + + pResourceRef = pRmAllocParams->pResourceRef; + + // + // Alloc RPC handling + // + if (!(pRmAllocParams->allocState & RM_ALLOC_STATES_SKIP_RPC)) + { + if (pResDesc->flags & (RS_FLAGS_ALLOC_RPC_TO_VGPU_HOST | RS_FLAGS_ALLOC_RPC_TO_PHYS_RM)) + { + OBJGPU *pGpu = NULL; + RmResource *pRmResource = dynamicCast(pResourceRef->pResource, RmResource); + CALL_CONTEXT callContext = {0}; + CALL_CONTEXT *pOldContext = NULL; + + status = gpuGetByRef(pResourceRef, NULL, &pGpu); + if (status != NV_OK || pRmResource == NULL) + { + status = NV_ERR_INVALID_CLASS; + goto done; + } + + if (!IS_VIRTUAL(pGpu) && !IS_FW_CLIENT(pGpu)) + { + status = NV_OK; + goto done; + } + + // if physical RM RPC make sure we're a GSP client otherwise skip + if (((pResDesc->flags & (RS_FLAGS_ALLOC_RPC_TO_VGPU_HOST | RS_FLAGS_ALLOC_RPC_TO_PHYS_RM)) == RS_FLAGS_ALLOC_RPC_TO_PHYS_RM) && + (!IS_FW_CLIENT(pGpu))) + { + status = NV_OK; + goto done; + } + + // Set the call context to allow vgpuGetCallingContextDevice() + // and other context dependent functions to operate in the RPC code. + // + // The context is assigned in the above clientAllocResource() call, + // but we can't simply extend the context scope to this place + // as pResourceRef is allocated internally in clientAllocResource(). + // + // Instead, we create basically the same context here once again + // and use it for the RPC call. + callContext.pServer = pServer; + callContext.pClient = pClient; + callContext.pResourceRef = pResourceRef; + callContext.pLockInfo = pRmAllocParams->pLockInfo; + callContext.secInfo = *pRmAllocParams->pSecInfo; + + NV_ASSERT_OK_OR_GOTO(status, + resservSwapTlsCallContext(&pOldContext, &callContext), done); + NV_RM_RPC_ALLOC_OBJECT(pGpu, + pRmAllocParams->hClient, + pRmAllocParams->hParent, + pRmAllocParams->hResource, + pRmAllocParams->externalClassId, + pRmAllocParams->pAllocParams, + pRmAllocParams->paramsSize, + status); + NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext)); + + if (status != NV_OK) + goto done; + + pRmResource->bRpcFree = NV_TRUE; + } + } + +done: + if ((status != NV_OK) && (pResourceRef != NULL)) + { + RS_RES_FREE_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + params.hClient = hClient; + params.hResource = pRmAllocParams->hResource; + params.pResourceRef = pResourceRef; + params.pSecInfo = pRmAllocParams->pSecInfo; + params.pLockInfo = pRmAllocParams->pLockInfo; + tmpStatus = clientFreeResource(pClient, &g_resServ, ¶ms); + NV_ASSERT(tmpStatus == NV_OK); + pRmAllocParams->pResourceRef = NULL; + } + + serverResLock_Epilogue(&g_resServ, resLockAccess, pLockInfo, &releaseFlags); + + if (bClearRecursiveStateFlag) + { + pLockInfo->state &= ~RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS; + } + return status; +} + +NV_STATUS +serverFreeResourceRpcUnderLock +( + RsServer *pServer, + RS_RES_FREE_PARAMS *pFreeParams +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef = pFreeParams->pResourceRef; + OBJGPU *pGpu = NULL; + NvBool bBcResource; + RmResource *pRmResource = NULL; + + NV_ASSERT_OR_RETURN(pResourceRef != NULL, NV_ERR_INVALID_OBJECT_HANDLE); + + pRmResource = dynamicCast(pResourceRef->pResource, RmResource); + status = gpuGetByRef(pResourceRef, &bBcResource, &pGpu); + if ((status != NV_OK) || + (!IS_VIRTUAL(pGpu) && !IS_FW_CLIENT(pGpu)) || + (pRmResource == NULL) || + (pRmResource->bRpcFree == NV_FALSE)) + { + status = NV_OK; + goto rpc_done; + } + + gpuSetThreadBcState(pGpu, bBcResource); + NV_RM_RPC_FREE(pGpu, pResourceRef->pClient->hClient, + pResourceRef->pParentRef->hResource, + pResourceRef->hResource, status); + +rpc_done: + return status; +} + +void +serverResLock_Epilogue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + if (*pReleaseFlags & RM_LOCK_RELEASE_GPU_GROUP_LOCK) + { + // UNLOCK: release GPU group lock + rmGpuGroupLockRelease(pLockInfo->gpuMask, GPUS_LOCK_FLAGS_NONE); + pLockInfo->state &= ~RM_LOCK_STATES_GPU_GROUP_LOCK_ACQUIRED; + *pReleaseFlags &= ~RM_LOCK_RELEASE_GPU_GROUP_LOCK; + } + + if (*pReleaseFlags & RM_LOCK_RELEASE_GPUS_LOCK) + { + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + pLockInfo->state &= ~RM_LOCK_STATES_GPUS_LOCK_ACQUIRED; + *pReleaseFlags &= ~RM_LOCK_RELEASE_GPUS_LOCK; + } +} + +NV_STATUS +serverInitFreeParams_Recursive(NvHandle hClient, NvHandle hResource, RS_LOCK_INFO *pLockInfo, RS_RES_FREE_PARAMS *pParams) +{ + portMemSet(pParams, 0, sizeof(*pParams)); + pParams->hClient = hClient; + pParams->hResource = hResource; + pParams->pLockInfo = pLockInfo; + return NV_OK; +} + +NV_STATUS +serverUpdateLockFlagsForFree +( + RsServer *pServer, + RS_RES_FREE_PARAMS_INTERNAL *pRmFreeParams +) +{ + RS_LOCK_INFO *pLockInfo = pRmFreeParams->pLockInfo; + OBJGPU *pGpu = NULL; + + rmapiResourceDescToLegacyFlags(pRmFreeParams->pResourceRef->pResourceDesc, NULL, &pLockInfo->flags); + + pLockInfo->pContextRef = pRmFreeParams->pResourceRef->pParentRef; + if (gpuGetByRef(pLockInfo->pContextRef, NULL, &pGpu) == NV_OK) + { + RmResource *pRmResource = dynamicCast(pRmFreeParams->pResourceRef->pResource, RmResource); + if (pGpu != NULL && IS_FW_CLIENT(pGpu) && pRmResource != NULL && pRmResource->bRpcFree) + { + // + // If the resource desc says no need for GPU locks, we still need to lock + // the current pGpu in order to send the RPC + // + if (pLockInfo->flags & RM_LOCK_FLAGS_NO_GPUS_LOCK) + { + NV_PRINTF(LEVEL_INFO, "Overriding flags for free of class %04x\n", + pRmFreeParams->pResourceRef->externalClassId); + pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK; + pLockInfo->state |= RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS; + } + } + } + + return NV_OK; +} + +NV_STATUS +rmapiFreeResourcePrologue +( + RS_RES_FREE_PARAMS_INTERNAL *pRmFreeParams +) +{ + RsResourceRef *pResourceRef = pRmFreeParams->pResourceRef; + NV_STATUS tmpStatus; + OBJGPU *pGpu = NULL; + NvBool bBcResource; + + NV_ASSERT_OR_RETURN(pResourceRef, NV_ERR_INVALID_OBJECT_HANDLE); + + // + // Use gpuGetByRef instead of GpuResource because gpuGetByRef will work even + // if resource isn't a GpuResource (e.g.: Memory which can be allocated + // under a subdevice, device or client root) + // + tmpStatus = gpuGetByRef(pResourceRef, &bBcResource, &pGpu); + if (tmpStatus == NV_OK) + gpuSetThreadBcState(pGpu, bBcResource); + + // + // Need to cancel pending timer callbacks before event structs are freed. + // RS-TODO: provide notifications to objects referencing events or add + // dependency + // + TimerApi *pTimerApi = dynamicCast(pResourceRef->pResource, TimerApi); + if (pTimerApi != NULL) + { + tmrapiDeregisterEvents(pTimerApi); + } + + CliDelObjectEvents(pResourceRef); + + return NV_OK; +} + +NV_STATUS +rmapiAlloc +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvU32 hClass, + void *pAllocParams, + NvU32 paramsSize +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->AllocWithSecInfo(pRmApi, hClient, hParent, phObject, hClass, NV_PTR_TO_NvP64(pAllocParams), paramsSize, + RMAPI_ALLOC_FLAGS_NONE, NvP64_NULL, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiAllocWithHandle +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle hObject, + NvU32 hClass, + void *pAllocParams, + NvU32 paramsSize +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->AllocWithSecInfo(pRmApi, hClient, hParent, &hObject, hClass, NV_PTR_TO_NvP64(pAllocParams), paramsSize, + RMAPI_ALLOC_FLAGS_NONE, NvP64_NULL, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiAllocWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvU32 hClass, + NvP64 pAllocParams, + NvU32 paramsSize, + NvU32 flags, + NvP64 pRightsRequested, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + NvU32 allocInitStates = RM_ALLOC_STATES_NONE; + RM_API_CONTEXT rmApiContext = {0}; + RS_LOCK_INFO *pLockInfo; + NvHandle hSecondClient = NV01_NULL_OBJECT; + + status = rmapiPrologue(pRmApi, &rmApiContext); + if (status != NV_OK) + return status; + + pLockInfo = portMemAllocNonPaged(sizeof(*pLockInfo)); + if (pLockInfo == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + if (pSecInfo->paramLocation == PARAM_LOCATION_KERNEL) + { + status = serverAllocLookupSecondClient(hClass, + NvP64_VALUE(pAllocParams), + &hSecondClient); + if (status != NV_OK) + goto done; + } + + portMemSet(pLockInfo, 0, sizeof(*pLockInfo)); + status = rmapiInitLockInfo(pRmApi, hClient, hSecondClient, pLockInfo); + if (status != NV_OK) + goto done; + + // RS-TODO: Fix calls that use RMAPI_GPU_LOCK_INTERNAL without holding the API lock + if (pRmApi->bGpuLockInternal && !rmapiLockIsOwner()) + { + // CORERM-6052 targets fixing the API lockless path for RTD3. + if (!rmapiInRtd3PmPath()) + { + NV_PRINTF(LEVEL_ERROR, + "NVRM: %s: RMAPI_GPU_LOCK_INTERNAL alloc requested without holding the RMAPI lock: client:0x%x parent:0x%x object:0x%x class:0x%x\n", + __FUNCTION__, hClient, hParent, *phObject, hClass); + } + + pLockInfo->flags |= RM_LOCK_FLAGS_NO_API_LOCK; + pLockInfo->state &= ~RM_LOCK_STATES_API_LOCK_ACQUIRED; + } + + + // This flag applies to both VGPU and GSP cases + if (flags & RMAPI_ALLOC_FLAGS_SKIP_RPC) + allocInitStates |= RM_ALLOC_STATES_SKIP_RPC; + + // + // Mark internal client allocations as such, so the resource server generates + // the internal client handle with a distinct template. + // The distinct template purpose is to make sure that GSP client provided + // client handles do not collide with the client handles ganerated by the GSP/FW RM. + // + if ((pSecInfo->privLevel >= RS_PRIV_LEVEL_KERNEL) && + (pSecInfo->paramLocation == PARAM_LOCATION_KERNEL) && pRmApi->bGpuLockInternal) + allocInitStates |= RM_ALLOC_STATES_INTERNAL_CLIENT_HANDLE; + + if ((pSecInfo->paramLocation == PARAM_LOCATION_KERNEL) && + (pRmApi->bApiLockInternal || pRmApi->bGpuLockInternal)) + allocInitStates |= RM_ALLOC_STATES_INTERNAL_ALLOC; + + NV_PRINTF(LEVEL_INFO, "client:0x%x parent:0x%x object:0x%x class:0x%x\n", + hClient, hParent, *phObject, hClass); + + status = _rmAlloc(hClient, + hParent, + phObject, + hClass, + pAllocParams, + paramsSize, + flags, + allocInitStates, + pLockInfo, + pRightsRequested, + *pSecInfo); + + // + // If hClient is allocated behind GPU locks, client is marked as internal + // + if ((status == NV_OK) && ((hClass == NV01_ROOT) || (hClass == NV01_ROOT_NON_PRIV) || (hClass == NV01_ROOT_CLIENT)) && + pSecInfo->paramLocation == PARAM_LOCATION_KERNEL && pRmApi->bGpuLockInternal) + { + void *pHClient = *(void **)&pAllocParams; + + // flag this client as an RM internal client + rmclientSetClientFlagsByHandle(*(NvU32*)pHClient /* hClient */, RMAPI_CLIENT_FLAG_RM_INTERNAL_CLIENT); + } + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "allocation complete\n"); + } + else + { + NV_PRINTF(LEVEL_INFO, "allocation failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + NV_PRINTF(LEVEL_INFO, + "client:0x%x parent:0x%x object:0x%x class:0x%x\n", hClient, + hParent, *phObject, hClass); + } + + portMemFree(pLockInfo); + +done: + rmapiEpilogue(pRmApi, &rmApiContext); + + return status; +} + +NV_STATUS +resservClientFactory +( + PORT_MEM_ALLOCATOR *pAllocator, + RS_RES_ALLOC_PARAMS *pParams, + RsClient **ppRsClient +) +{ + RmClient *pClient; + NV_STATUS status; + + status = objCreate(&pClient, NVOC_NULL_OBJECT, RmClient, pAllocator, pParams); + + if (status != NV_OK) + { + return status; + } + NV_ASSERT(pClient != NULL); + + *ppRsClient = staticCast(pClient, RsClient); + return NV_OK; +} + +NV_STATUS +resservResourceFactory +( + PORT_MEM_ALLOCATOR *pAllocator, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS *pParams, + RsResource **ppResource +) +{ + RS_RESOURCE_DESC *pResDesc; + NV_STATUS status; + Dynamic *pDynamic = NULL; + RsResource *pResource = NULL; + OBJGPU *pGpu = NULL; + + pResDesc = RsResInfoByExternalClassId(pParams->externalClassId); + if (pResDesc == NULL) + return NV_ERR_INVALID_CLASS; + + if (pCallContext->pResourceRef->pParentRef != NULL && + pCallContext->pResourceRef->pParentRef->pResource != NULL) + { + GpuResource *pParentGpuResource = dynamicCast(pCallContext->pResourceRef->pParentRef->pResource, + GpuResource); + if (pParentGpuResource != NULL) + { + pGpu = GPU_RES_GET_GPU(pParentGpuResource); + } + } + + if (pResDesc->internalClassId == classId(Device)) + { + if (pParams->pAllocParams == NULL) + return NV_ERR_INVALID_ARGUMENT; + + NV0080_ALLOC_PARAMETERS *pNv0080AllocParams = pParams->pAllocParams; + NvU32 deviceInst = pNv0080AllocParams->deviceId; + + if (deviceInst >= NV_MAX_DEVICES) + return NV_ERR_INVALID_ARGUMENT; + + NvU32 gpuInst = gpumgrGetPrimaryForDevice(deviceInst); + + if ((pGpu = gpumgrGetGpu(gpuInst)) == NULL) + { + return NV_ERR_INVALID_STATE; + } + } + + if (pGpu != NULL && + !RMCFG_FEATURE_PLATFORM_MODS && + !gpuIsClassSupported(pGpu, pParams->externalClassId)) + { + NV_PRINTF(LEVEL_INFO, "Skipping unsupported class 0x%x\n", pParams->externalClassId); + return NV_ERR_NOT_SUPPORTED; + } + + status = objCreateDynamicWithFlags(&pDynamic, + (Object*)pGpu, + pResDesc->pClassInfo, + NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY, + pCallContext, + pParams); + if (status != NV_OK) + return status; + + pResource = dynamicCast(pDynamic, RsResource); + + if (pResource == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + if (pResDesc->internalClassId == classId(Subdevice) || pResDesc->internalClassId == classId(Device) || + pResDesc->internalClassId == classId(DispCommon)) + { + // + // DispCommon and DispSwObj's pGpu will be retrieved at the beginning of the function, + // since their parent is Device. + // + if (!(pResDesc->internalClassId == classId(DispCommon)) && !(pResDesc->internalClassId == classId(DispSwObj))) + { + pGpu = GPU_RES_GET_GPU(dynamicCast(pDynamic, GpuResource)); + } + + if (pGpu) + { + rmapiControlCacheSetGpuAttrForObject(pParams->hClient, pParams->hResource, pGpu); + } + } + + *ppResource = pResource; + + return status; +} + +NV_STATUS +rmapiAllocWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvU32 hClass, + NvP64 pAllocParams, + NvU32 paramsSize, + NvU32 flags, + NvP64 pRightsRequested, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiAllocWithSecInfo(pRmApi, hClient, hParent, phObject, hClass, + pAllocParams, paramsSize, flags, pRightsRequested, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NV_STATUS +rmapiFree +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->FreeWithSecInfo(pRmApi, hClient, hObject, RMAPI_FREE_FLAGS_NONE, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiFreeWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + RS_RES_FREE_PARAMS freeParams; + RS_LOCK_INFO lockInfo; + RM_API_CONTEXT rmApiContext = {0}; + + portMemSet(&freeParams, 0, sizeof(freeParams)); + + NV_PRINTF(LEVEL_INFO, "Nv01Free: client:0x%x object:0x%x\n", hClient, + hObject); + + status = rmapiPrologue(pRmApi, &rmApiContext); + + if (status != NV_OK) + return status; + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + status = rmapiInitLockInfo(pRmApi, hClient, NV01_NULL_OBJECT, &lockInfo); + if (status != NV_OK) + { + rmapiEpilogue(pRmApi, &rmApiContext); + return status; + } + + // RS-TODO: Fix calls that use RMAPI_GPU_LOCK_INTERNAL without holding the API lock + if (pRmApi->bGpuLockInternal && !rmapiLockIsOwner()) + { + // CORERM-6052 targets fixing the API lockless path for RTD3. + if (!rmapiInRtd3PmPath()) + { + NV_PRINTF(LEVEL_ERROR, "RMAPI_GPU_LOCK_INTERNAL free requested without holding the RMAPI lock\n"); + } + + lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK; + lockInfo.state &= ~RM_LOCK_STATES_API_LOCK_ACQUIRED; + } + + freeParams.hClient = hClient; + freeParams.hResource = hObject; + freeParams.freeState = RM_FREE_STATES_NONE; + freeParams.pLockInfo = &lockInfo; + freeParams.freeFlags = flags; + freeParams.pSecInfo = pSecInfo; + + rmapiControlCacheFreeObjectEntry(hClient, hObject); + + status = serverFreeResourceTree(&g_resServ, &freeParams); + + rmapiEpilogue(pRmApi, &rmApiContext); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Nv01Free: free complete\n"); + } + else + { + NV_PRINTF_COND(status == NV_ERR_GPU_IN_FULLCHIP_RESET, LEVEL_INFO, LEVEL_WARNING, + "Nv01Free: free failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + NV_PRINTF_COND(status == NV_ERR_GPU_IN_FULLCHIP_RESET, LEVEL_INFO, LEVEL_WARNING, + "Nv01Free: client:0x%x object:0x%x\n", + hClient, hObject); + } + + return status; +} + +NV_STATUS +rmapiFreeWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiFreeWithSecInfo(pRmApi, hClient, hObject, flags, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NV_STATUS +rmapiDisableClients +( + RM_API *pRmApi, + NvHandle *phClientList, + NvU32 numClients +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->DisableClientsWithSecInfo(pRmApi, phClientList, numClients, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiDisableClientsWithSecInfo +( + RM_API *pRmApi, + NvHandle *phClientList, + NvU32 numClients, + API_SECURITY_INFO *pSecInfo +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU32 lockState = 0; + NvU32 i; + + NV_PRINTF(LEVEL_INFO, "numClients: %d\n", numClients); + + if (!pRmApi->bRmSemaInternal && osAcquireRmSema(pSys->pSema) != NV_OK) + return NV_ERR_INVALID_LOCK_STATE; + + if (pRmApi->bApiLockInternal) + lockState |= RM_LOCK_STATES_API_LOCK_ACQUIRED; + + if (pRmApi->bGpuLockInternal) + lockState |= RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS; + + for (i = 0; i < numClients; ++i) + rmapiControlCacheFreeClientEntry(phClientList[i]); + + serverMarkClientListDisabled(&g_resServ, phClientList, numClients, lockState, pSecInfo); + + if (!pRmApi->bRmSemaInternal) + osReleaseRmSema(pSys->pSema, NULL); + + NV_PRINTF(LEVEL_INFO, "Disable clients complete\n"); + + return NV_OK; +} + +NV_STATUS +rmapiDisableClientsWithSecInfoTls +( + RM_API *pRmApi, + NvHandle *phClientList, + NvU32 numClients, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiDisableClientsWithSecInfo(pRmApi, phClientList, numClients, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NvBool +serverRwApiLockIsOwner +( + RsServer *pServer +) +{ + return rmapiLockIsWriteOwner(); +} + +NV_STATUS +serverAllocResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + if (lock == RS_LOCK_TOP) + { + RS_RESOURCE_DESC *pResDesc; + + pResDesc = RsResInfoByExternalClassId(pParams->externalClassId); + + if (pResDesc == NULL) + { + return NV_ERR_INVALID_CLASS; + } + + if (pResDesc->flags & RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC) + *pAccess = LOCK_ACCESS_READ; + else + *pAccess = LOCK_ACCESS_WRITE; + + if ((pResDesc->flags & RS_FLAGS_FORCE_ACQUIRE_RO_API_LOCK_ON_ALLOC_FREE) != 0 && + pSys->getProperty(pSys, PDB_PROP_SYS_ENABLE_FORCE_SHARED_LOCK)) + { + // + // If the force acquire RO flag is set then ignore module parameter + // setting and always use RO. + // + *pAccess = LOCK_ACCESS_READ; + return NV_OK; + } + + if (!serverSupportsReadOnlyLock(&g_resServ, RS_LOCK_TOP, RS_API_ALLOC_RESOURCE)) + { + *pAccess = LOCK_ACCESS_WRITE; + return NV_OK; + } + + return NV_OK; + } + + if (lock == RS_LOCK_RESOURCE) + { + *pAccess = LOCK_ACCESS_WRITE; + return NV_OK; + } + + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +serverFreeResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_FREE_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess, + NvBool *pbSupportForceROLock +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU32 flags; + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_FREE_RESOURCE)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + + *pbSupportForceROLock = pSys->getProperty(pSys, PDB_PROP_SYS_ENABLE_FORCE_SHARED_LOCK); + + if (pParams->pResourceRef != NULL) + { + // + // If pResourceRef is set, check for the explicit RO opt-in flag that + // some resources set as a transition before enabling RO across the + // board. + // + // bug 4283710 - [RM][Locking] Allow RMAPI to take API lock in reader's mode by default. + // + flags = pParams->pResourceRef->pResourceDesc->flags; + if ((flags & RS_FLAGS_FORCE_ACQUIRE_RO_API_LOCK_ON_ALLOC_FREE) != 0 && + (*pbSupportForceROLock)) + { + *pAccess = LOCK_ACCESS_READ; + } + } + return NV_OK; +} diff --git a/src/nvidia/src/kernel/rmapi/binary_api.c b/src/nvidia/src/kernel/rmapi/binary_api.c new file mode 100644 index 0000000..aed4b0a --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/binary_api.c @@ -0,0 +1,162 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "rmapi/binary_api.h" +#include "resserv/rs_client.h" +#include "resserv/rs_server.h" +#include "rmapi/client.h" +#include "rmapi/resource.h" +#include "rmapi/rmapi.h" +#include "rmapi/control.h" +#include "ctrl/ctrlxxxx.h" +#include "gpu/gpu_resource.h" +#include "gpu/gpu.h" +#include "core/locks.h" +#include "vgpu/rpc.h" +#include "rmapi/rmapi_utils.h" +#include "kernel/gpu_mgr/gpu_mgr.h" + +NV_STATUS +binapiConstruct_IMPL +( + BinaryApi *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +NV_STATUS +binapiprivConstruct_IMPL +( + BinaryApiPrivileged *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +NV_STATUS +binapiControl_IMPL +( + BinaryApi *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pResource); + GPU_MASK gpuMaskRelease = 0; + RM_API *pRmApi; + + // check if CMD is NULL, return early + if (RMCTRL_IS_NULL_CMD(pParams->cmd)) + return NV_OK; + + if (pGpu == NULL) + return NV_ERR_INVALID_ARGUMENT; + + if (IS_VIRTUAL(pGpu)) + { + { + NV_ASSERT_OK_OR_RETURN(rmGpuGroupLockAcquire(pGpu->gpuInstance, + GPU_LOCK_GRP_SUBDEVICE, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_RPC, + &gpuMaskRelease)); + + NV_RM_RPC_API_CONTROL(pGpu, + pParams->hClient, + pParams->hObject, + pParams->cmd, + pParams->pParams, + pParams->paramsSize, + status); + + if (gpuMaskRelease != 0) + { + rmGpuGroupLockRelease(gpuMaskRelease, GPUS_LOCK_FLAGS_NONE); + } + } + } + else if (IS_FW_CLIENT(pGpu)) + { + NV_ASSERT_OK_OR_RETURN(rmGpuGroupLockAcquire(pGpu->gpuInstance, + GPU_LOCK_GRP_SUBDEVICE, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_RPC, + &gpuMaskRelease)); + + pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + status = pRmApi->Control(pRmApi, + pParams->hClient, + pParams->hObject, + pParams->cmd, + pParams->pParams, + pParams->paramsSize); + + if (gpuMaskRelease != 0) + { + rmGpuGroupLockRelease(gpuMaskRelease, GPUS_LOCK_FLAGS_NONE); + } + } + + return status; +} + +NV_STATUS +binapiprivControl_IMPL +( + BinaryApiPrivileged *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status = NV_OK; + + // check if CMD is NULL, return early + if (RMCTRL_IS_NULL_CMD(pParams->cmd)) + return NV_OK; + { + if (pParams->secInfo.privLevel >= RS_PRIV_LEVEL_USER_ROOT) + { + status = NV_OK; + } + else + { + status = NV_ERR_INSUFFICIENT_PERMISSIONS; + } + } + + if (status == NV_OK) + { + return binapiControl_IMPL(staticCast(pResource, BinaryApi), pCallContext, pParams); + } + else + { + return status; + } +} + diff --git a/src/nvidia/src/kernel/rmapi/client.c b/src/nvidia/src/kernel/rmapi/client.c new file mode 100644 index 0000000..c2a9a91 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/client.c @@ -0,0 +1,948 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" + +#include "rmapi/rmapi.h" +#include "rmapi/rs_utils.h" +#include "rmapi/client.h" +#include "rmapi/client_resource.h" +#include "rmapi/resource_fwd_decls.h" +#include "core/locks.h" +#include "core/system.h" +#include "resource_desc.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/gpu.h" + +OsInfoMap g_osInfoList; +UserInfoList g_userInfoList; +RmClientList g_clientListBehindGpusLock; // RS-TODO remove this WAR + +#define RS_FW_UNIQUE_HANDLE_BASE (0xc9f00000) + +static NV_STATUS _registerUserInfo(PUID_TOKEN *ppUidToken, UserInfo **ppUserInfo); +static NV_STATUS _unregisterUserInfo(UserInfo *pUserInfo); +static NV_STATUS _registerOSInfo(RmClient *pClient, void *pOSInfo); +static NV_STATUS _unregisterOSInfo(RmClient *pClient, void *pOSInfo); + +NV_STATUS +rmclientConstruct_IMPL +( + RmClient *pClient, + PORT_MEM_ALLOCATOR* pAllocator, + RS_RES_ALLOC_PARAMS_INTERNAL* pParams +) +{ + NV_STATUS status = NV_OK; + NvU32 i; + OBJSYS *pSys = SYS_GET_INSTANCE(); + RsClient *pRsClient = staticCast(pClient, RsClient); + NvBool bReleaseLock = NV_FALSE; + API_SECURITY_INFO *pSecInfo = pParams->pSecInfo; + OBJGPU *pGpu = NULL; + + // + // RM client objects can only be created/destroyed with the RW API lock. + // Bug 4193761 - allow internal clients to be created with the GPU lock, + // GR-2409 will remove the possible race condition with the client list. + // + NV_ASSERT_OR_RETURN(rmapiLockIsWriteOwner() || + (serverIsClientInternal(&g_resServ, pRsClient->hClient) && rmGpuLockIsOwner()), + NV_ERR_INVALID_LOCK_STATE); + + if (RMCFG_FEATURE_PLATFORM_GSP) + { + pGpu = gpumgrGetSomeGpu(); + + if (pGpu == NULL) + { + NV_PRINTF(LEVEL_ERROR, "GPU is not found\n"); + return NV_ERR_INVALID_STATE; + } + } + + pClient->bIsRootNonPriv = (pParams->externalClassId == NV01_ROOT_NON_PRIV); + pClient->pUserInfo = NULL; + pClient->pSecurityToken = NULL; + pClient->pOSInfo = pSecInfo->clientOSInfo; + pClient->imexChannel = -1; + + pClient->cachedPrivilege = pSecInfo->privLevel; + + { + pClient->ProcID = osGetCurrentProcess(); + if (pClient->cachedPrivilege <= RS_PRIV_LEVEL_USER_ROOT) + pClient->pOsPidInfo = osGetPidInfo(); + } + + // Set user-friendly client name from current process + osGetCurrentProcessName(pClient->name, NV_PROC_NAME_MAX_LENGTH); + + for (i = 0; i < NV0000_NOTIFIERS_MAXCOUNT; i++) + { + pClient->CliSysEventInfo.notifyActions[i] = + NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + } + + // Prevent kernel clients from requesting handles in the FW handle generator range + status = clientSetRestrictedRange(pRsClient, + RS_FW_UNIQUE_HANDLE_BASE, RS_UNIQUE_HANDLE_RANGE); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Failed to set host client restricted resource handle range. Status=%x\n", status); + goto out; + } + + if (!rmGpuLockIsOwner()) + { + // LOCK: acquire GPUs lock + if ((status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_CLIENT)) != NV_OK) + { + NV_ASSERT(0); + goto out; + } + bReleaseLock = NV_TRUE; + } + + _registerOSInfo(pClient, pClient->pOSInfo); + + pClient->bIsClientVirtualMode = (pSecInfo->pProcessToken != NULL); + + // + // Cache the security/uid tokens only if the client handle validation is + // enabled AND its a user mode path or a non privileged kernel class. + // + if (pSys->getProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE) && + ((pParams->pSecInfo->privLevel < RS_PRIV_LEVEL_KERNEL) || pClient->bIsRootNonPriv)) + { + PSECURITY_TOKEN pSecurityToken = (pClient->bIsClientVirtualMode ? + pSecInfo->pProcessToken : osGetSecurityToken()); + PUID_TOKEN pUidToken = osGetCurrentUidToken(); + UserInfo *pUserInfo = NULL; + + if (RMCFG_FEATURE_PLATFORM_GSP) + { + pClient->pSecurityToken = pSecurityToken; + } + else + { + // pUserInfo takes ownership of pUidToken upon successful registration + status = _registerUserInfo(&pUidToken, &pUserInfo); + + if (status == NV_OK) + { + pClient->pUserInfo = pUserInfo; + pClient->pSecurityToken = pSecurityToken; + } + else + { + portMemFree(pUidToken); + + if (pSecurityToken != NULL && !pClient->bIsClientVirtualMode) + portMemFree(pSecurityToken); + } + } + } + + if (listAppendValue(&g_clientListBehindGpusLock, (void*)&pClient) == NULL) + status = NV_ERR_INSUFFICIENT_RESOURCES; + + if (bReleaseLock) + { + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + // RM gets the client handle from the allocation parameters + if (status == NV_OK && pParams->pAllocParams != NULL) + *(NvHandle*)(pParams->pAllocParams) = pParams->hClient; + + eventSystemInitEventQueue(&pClient->CliSysEventInfo.eventQueue); + + NV_PRINTF(LEVEL_INFO, "New RM Client: hClient=0x%08x (%c), ProcID=%u, name='%s'\n", + pRsClient->hClient, (pRsClient->type == CLIENT_TYPE_USER) ? 'U' : 'K', pClient->ProcID, pClient->name); + +out: + if (status != NV_OK) + { + osPutPidInfo(pClient->pOsPidInfo); + pClient->pOsPidInfo = NULL; + } + + return status; +} + +void +rmclientDestruct_IMPL +( + RmClient *pClient +) +{ + NV_STATUS status = NV_OK; + NvBool bReleaseLock = NV_FALSE; + + // + // RM client objects can only be created/destroyed with the RW API lock. + // Bug 4193761 - allow internal clients to be created with the GPU lock, + // GR-2409 will remove the possible race condition with the client list. + // + NV_ASSERT_OR_ELSE(rmapiLockIsWriteOwner() || + (serverIsClientInternal(&g_resServ, staticCast(pClient, RsClient)->hClient) && + rmGpuLockIsOwner()), + return); + + NV_PRINTF(LEVEL_INFO, " type: client\n"); + + LOCK_METER_DATA(FREE_CLIENT, hClient, 0, 0); + + osPutPidInfo(pClient->pOsPidInfo); + + eventSystemClearEventQueue(&pClient->CliSysEventInfo.eventQueue); + + // Updating the client list just before client handle unregister // + // in case child free functions need to iterate over all clients // + if (!rmGpuLockIsOwner()) + { + // LOCK: acquire GPUs lock + if ((status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_CLIENT)) != NV_OK) + { + // This is the only chance that the shadow client list can be + // updated so modify it regardless of whether or not we obtained the lock + NV_ASSERT(0); + } + else + { + bReleaseLock = NV_TRUE; + } + } + + _unregisterOSInfo(pClient, pClient->pOSInfo); + + listRemoveFirstByValue(&g_clientListBehindGpusLock, (void*)&pClient); + + if (pClient->pUserInfo != NULL) + { + _unregisterUserInfo(pClient->pUserInfo); + pClient->pUserInfo = NULL; + } + + if (pClient->pSecurityToken != NULL) + { + if (!pClient->bIsClientVirtualMode) + portMemFree(pClient->pSecurityToken); + + pClient->pSecurityToken = NULL; + } + + if (bReleaseLock) + { + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } +} + +NV_STATUS +rmclientInterMap_IMPL +( + RmClient *pClient, + RsResourceRef *pMapperRef, + RsResourceRef *pMappableRef, + RS_INTER_MAP_PARAMS *pParams +) +{ + RS_INTER_MAP_PRIVATE *pPrivate = pParams->pPrivate; + RS_RES_MAP_TO_PARAMS mapToParams; + + // Use virtual MapTo to perform the class-specific mapping to pMapperRef + portMemSet(&mapToParams, 0, sizeof(mapToParams)); + + mapToParams.pMemoryRef = pMappableRef; + mapToParams.offset = pParams->offset; + mapToParams.length = pParams->length; + mapToParams.flags = pParams->flags; + mapToParams.flags2 = pParams->flags2; + mapToParams.kindOverride = pParams->kindOverride; + mapToParams.pDmaOffset = &pParams->dmaOffset; + mapToParams.ppMemDesc = (MEMORY_DESCRIPTOR**)&pParams->pMemDesc; + + mapToParams.pGpu = pPrivate->pGpu; + mapToParams.pSrcGpu = pPrivate->pSrcGpu; + mapToParams.pSrcMemDesc = pPrivate->pSrcMemDesc; + mapToParams.hBroadcastDevice = pPrivate->hBroadcastDevice; + mapToParams.hMemoryDevice = pPrivate->hMemoryDevice; + mapToParams.gpuMask = pPrivate->gpuMask; + mapToParams.bSubdeviceHandleProvided = pPrivate->bSubdeviceHandleProvided; + mapToParams.bFlaMapping = pPrivate->bFlaMapping; + + return resMapTo(pMapperRef->pResource, &mapToParams); +} + +NV_STATUS +rmclientInterUnmap_IMPL +( + RmClient *pClient, + RsResourceRef *pMapperRef, + RS_INTER_UNMAP_PARAMS *pParams +) +{ + RS_INTER_UNMAP_PRIVATE *pPrivate = pParams->pPrivate; + RS_RES_UNMAP_FROM_PARAMS unmapFromParams; + + // Use virtual UnmapFrom to perform the class-specific unmapping from pMapperRef + portMemSet(&unmapFromParams, 0, sizeof(unmapFromParams)); + + unmapFromParams.pMemDesc = pParams->pMemDesc; + unmapFromParams.hMemory = pParams->hMappable; + unmapFromParams.flags = pParams->flags; + unmapFromParams.dmaOffset = pParams->dmaOffset; + unmapFromParams.size = pParams->size; + + unmapFromParams.pGpu = pPrivate->pGpu; + unmapFromParams.hBroadcastDevice = pPrivate->hBroadcastDevice; + unmapFromParams.gpuMask = pPrivate->gpuMask; + unmapFromParams.bSubdeviceHandleProvided = pPrivate->bSubdeviceHandleProvided; + + return resUnmapFrom(pMapperRef->pResource, &unmapFromParams); +} + +RS_PRIV_LEVEL +rmclientGetCachedPrivilege_IMPL +( + RmClient *pClient +) +{ + return pClient->cachedPrivilege; +} + +NvBool +rmclientIsAdmin_IMPL +( + RmClient *pClient, + RS_PRIV_LEVEL privLevel +) +{ + if (pClient == NULL) + return NV_FALSE; + + return (privLevel >= RS_PRIV_LEVEL_USER_ROOT) && !pClient->bIsRootNonPriv; +} + +void +rmclientSetClientFlags_IMPL +( + RmClient *pClient, + NvU32 clientFlags +) +{ + pClient->Flags |= clientFlags; +} + +static void +_rmclientPromoteDebuggerState +( + RmClient *pClient, + NvU32 newMinimumState +) +{ + if (pClient->ClientDebuggerState < newMinimumState) + { + pClient->ClientDebuggerState = newMinimumState; + } +} + +void * +rmclientGetSecurityToken_IMPL +( + RmClient *pClient +) +{ + return pClient->pSecurityToken; +} + +/*! + * @brief Given a client handle, validate the handle for security. + * + * Important!! This function should be called ONLY in the user mode paths. + * The security validations will fail in kernel paths, especially if called + * with privileged kernel handles. + * + * @param[in] hClient The client handle + * @param[in] pSecInfo The new calling context's security info. + * + * @return NV_OK if validated + * NV_ERR_INVALID_CLIENT if client cannot be found + * or if there isn't a match. + */ +static NV_STATUS +_rmclientUserClientSecurityCheck +( + RmClient *pClient, + const API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status = NV_OK; + OBJSYS *pSys = SYS_GET_INSTANCE(); + PSECURITY_TOKEN pCurrentToken = NULL; + PSECURITY_TOKEN pSecurityToken = pSecInfo->pProcessToken; + + if ((pSys == NULL) || + (!pSys->getProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE))) + { + return NV_OK; + } + + // + // Check 1: + // The following check to make sure that user paths cannot be called with + // privileged kernel handles + // + // Note: For the user paths, we are checking against both kernel and admin. + // client The reason is that KMD today creates unprivileged kernel handles + // (of class NV01_ROOT_NON_PRIV) on behalf of user clients (cuda debugger, + // profiler, OGL etc) and gives out those handles. These handles are + // kernel, but they do not have admin privileges and since clients already + // use these handles to call into RM through the user paths, we are allowing + // them through ... for now. + // + // Till we either fix the clients to wean off these kernel handles or change + // KMD to not give out the kernel handles, we need to keep the check restricted + // to handles created with NV01_ROOT using the the CliCheckAdmin interface. + // + if ((pSecInfo->privLevel >= RS_PRIV_LEVEL_KERNEL) && !pClient->bIsRootNonPriv) + { + NV_PRINTF(LEVEL_WARNING, "Incorrect client handle used in the User export\n"); + return NV_ERR_INVALID_CLIENT; + } + + // + // Check 2: + // Validate the client handle to make sure that the user who created the + // handle is the one that uses it. Otherwise a malicious user can guess the + // client handle created by another user and access information that its + // not privy to. + // + pCurrentToken = (pSecurityToken != NULL ? pSecurityToken : osGetSecurityToken()); + if (pCurrentToken == NULL) + { + NV_PRINTF(LEVEL_WARNING, + "Cannot get the security token for the current user.\n"); + NV_PRINTF(LEVEL_WARNING, + "The user client cannot be validated\n"); + status = NV_ERR_INVALID_CLIENT; + DBG_BREAKPOINT(); + goto CliUserClientSecurityCheck_exit; + } + + status = osValidateClientTokens((void*)rmclientGetSecurityToken(pClient), + (void*)pCurrentToken); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Error validating client token. Status = 0x%08x\n", status); + goto CliUserClientSecurityCheck_exit; + } + +CliUserClientSecurityCheck_exit: + if (pCurrentToken != NULL && pSecurityToken == NULL) + { + portMemFree(pCurrentToken); + pCurrentToken = NULL; + } + return status; +} + +NV_STATUS +rmclientPostProcessPendingFreeList_IMPL +( + RmClient *pClient, + RsResourceRef **ppFirstLowPriRef +) +{ + RsClient *pRsClient = staticCast(pClient, RsClient); + RsResourceRef *pTargetRef = NULL; + RsResourceRef *pStopRef = NULL; + RsResourceRef *pFirstLowPriRef = NULL; + + pStopRef = pRsClient->pFreeStack->pResourceRef; + pTargetRef = listHead(&pRsClient->pendingFreeList); + while (pTargetRef != pStopRef) + { + RsResourceRef *pNextRef = listNext(&pRsClient->pendingFreeList, pTargetRef); + + // Ensure that high priority resources (and their children/dependents) are freed first + if (pTargetRef->pResourceDesc->freePriority == RS_FREE_PRIORITY_HIGH) + { + clientUpdatePendingFreeList(pRsClient, pTargetRef, pTargetRef, NV_TRUE); + } + pTargetRef = pNextRef; + } + + // + // Find the last high-priority resource in the list. + // The next resource will be the first low priority resource. + // If there are no high-priority resources: use the head of the list + // + pTargetRef = (pStopRef != NULL) + ? pStopRef + : listTail(&pRsClient->pendingFreeList); + pFirstLowPriRef = listHead(&pRsClient->pendingFreeList); + + while (pTargetRef != NULL) + { + RsResourceRef *pPrevRef = listPrev(&pRsClient->pendingFreeList, pTargetRef); + + if (pTargetRef->pResourceDesc->freePriority == RS_FREE_PRIORITY_HIGH) + { + pFirstLowPriRef = listNext(&pRsClient->pendingFreeList, pTargetRef); + break; + } + pTargetRef = pPrevRef; + } + + if (ppFirstLowPriRef) + *ppFirstLowPriRef = pFirstLowPriRef; + + return NV_OK; +} + +static inline NvBool rmclientIsKernelOnly(RmClient *pClient) +{ + return (pClient->pSecurityToken == NULL); +} + +NvBool rmclientIsKernelOnlyByHandle(NvHandle hClient) +{ + RmClient *pClient = serverutilGetClientUnderLock(hClient); + return (pClient ? rmclientIsKernelOnly(pClient) : NV_FALSE); +} + +NvBool rmclientSetClientFlagsByHandle(NvHandle hClient, NvU32 clientFlags) +{ + RmClient *pClient = serverutilGetClientUnderLock(hClient); + if (pClient) + rmclientSetClientFlags(pClient, clientFlags); + return !!pClient; +} + +void rmclientPromoteDebuggerStateByHandle(NvHandle hClient, NvU32 newMinimumState) +{ + RmClient *pClient = serverutilGetClientUnderLock(hClient); + if (pClient) + _rmclientPromoteDebuggerState(pClient, newMinimumState); +} + +void *rmclientGetSecurityTokenByHandle(NvHandle hClient) +{ + RmClient *pClient = serverutilGetClientUnderLock(hClient); + return pClient ? rmclientGetSecurityToken(pClient) : NULL; +} + +NV_STATUS rmclientUserClientSecurityCheckByHandle(NvHandle hClient, const API_SECURITY_INFO *pSecInfo) +{ + RmClient *pClient = serverutilGetClientUnderLock(hClient); + + // + // Return early if it's a null object. This is probably the allocation of + // the root client object, so the client class is going to be null. + // + // RS-TODO - This check should move to the caller. + // + if (hClient == NV01_NULL_OBJECT) + { + return NV_OK; + } + + if (pClient) + { + return _rmclientUserClientSecurityCheck(pClient, pSecInfo); + } + else + return NV_ERR_INVALID_CLIENT; +} + +/** + * Register a uid token with the client database and return a UserInfo that + * corresponds to the uid token. + * + * If the uid token has not been registered before, a new UserInfo will be registered and returned. + * If the uid token is already registered, an existing UserInfo will be ref-counted and + * returned. + * + * This function must be protected by a lock (currently the GPUs lock.) + * + * @param[inout] ppUidToken + * @param[out] ppUserInfo + */ +static NV_STATUS +_registerUserInfo +( + PUID_TOKEN *ppUidToken, + UserInfo **ppUserInfo +) +{ + NV_STATUS status = NV_OK; + NvBool bFound = NV_FALSE; + UserInfo *pUserInfo = NULL; + UserInfoListIter it = listIterAll(&g_userInfoList); + PUID_TOKEN pUidToken; + + if ((!ppUidToken) || (!(*ppUidToken))) + return NV_ERR_INVALID_ARGUMENT; + + pUidToken = *ppUidToken; + + // Find matching user token + while(listIterNext(&it)) + { + pUserInfo = *it.pValue; + if (osUidTokensEqual(pUserInfo->pUidToken, pUidToken)) + { + bFound = NV_TRUE; + break; + } + } + + if (!bFound) + { + RsShared *pShared; + status = serverAllocShare(&g_resServ, classInfo(UserInfo), &pShared); + if (status != NV_OK) + return status; + + pUserInfo = dynamicCast(pShared, UserInfo); + pUserInfo->pUidToken = pUidToken; + + if (listAppendValue(&g_userInfoList, (void*)&pUserInfo) == NULL) + { + serverFreeShare(&g_resServ, pShared); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + } + else + { + serverRefShare(&g_resServ, staticCast(pUserInfo, RsShared)); + portMemFree(pUidToken); + *ppUidToken = NULL; + } + + *ppUserInfo = pUserInfo; + + return NV_OK; +} + +/** + * + * Unregister a client from a user info list + * + * This function must be protected by a lock (currently the GPUs lock.) + * + * @param[in] pUserInfo + */ +static NV_STATUS +_unregisterUserInfo +( + UserInfo *pUserInfo +) +{ + NvS32 refCount = serverGetShareRefCount(&g_resServ, staticCast(pUserInfo, RsShared)); + if (--refCount == 0) + { + listRemoveFirstByValue(&g_userInfoList, (void*)&pUserInfo); + } + return serverFreeShare(&g_resServ, staticCast(pUserInfo, RsShared)); +} + +NV_STATUS userinfoConstruct_IMPL +( + UserInfo *pUserInfo +) +{ + return NV_OK; +} + +void +userinfoDestruct_IMPL +( + UserInfo *pUserInfo +) +{ + portMemFree(pUserInfo->pUidToken); +} + +NV_STATUS +rmclientValidate_IMPL +( + RmClient *pClient, + const API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status = NV_OK; + OBJSYS *pSys = SYS_GET_INSTANCE(); + + if (pSys->getProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE) && + pSecInfo != NULL) + { + if (pSys->getProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT) && + pSecInfo->clientOSInfo != NULL) + { + if (pClient->pOSInfo != pSecInfo->clientOSInfo) + { + status = NV_ERR_INVALID_CLIENT; + } + } + else if (pSecInfo->privLevel < RS_PRIV_LEVEL_KERNEL) + { + status = _rmclientUserClientSecurityCheck(pClient, pSecInfo); + } + } + + return status; +} + +NV_STATUS +rmclientValidateLocks_IMPL +( + RmClient *pClient, + RsServer *pServer, + const CLIENT_ENTRY *pClientEntry +) +{ + // Possessing the client lock means it's always safe to use this client object + if (pClientEntry->lockOwnerTid == portThreadGetCurrentThreadId()) + return NV_OK; + + // + // Without the client lock, the API lock in write mode guarantees safety for the + // client object since nothing else can execute in parallel when holding it. + // + if (rmapiLockIsWriteOwner()) + return NV_OK; + + // + // Without the client lock, the API lock in read mode guarantees safety for the + // client object IF it's a client that cannot be used directly by user space (i.e. + // kernel privileged client and/or internal client). + // + if (rmapiLockIsOwner() && + (rmclientIsKernelOnly(pClient) || + serverIsClientInternal(pServer, pClientEntry->hClient))) + { + return NV_OK; + } + + + NV_ASSERT(0); + // Otherwise we don't have the required locks to use this RM client + return NV_ERR_INVALID_LOCK_STATE; +} + +NV_STATUS +rmclientFreeResource_IMPL +( + RmClient *pClient, + RsServer *pServer, + RS_RES_FREE_PARAMS_INTERNAL *pRmFreeParams +) +{ + NV_STATUS status; + OBJGPU *pGpu; + NvBool bBcState; + NvBool bRestoreBcState = NV_FALSE; + RsClient *pRsClient = staticCast(pClient, RsClient); + + if (gpuGetByRef(pRmFreeParams->pResourceRef, NULL, &pGpu) == NV_OK) + { + bBcState = gpumgrGetBcEnabledStatus(pGpu); + bRestoreBcState = NV_TRUE; + } + + rmapiFreeResourcePrologue(pRmFreeParams); + + // + // In the RTD3 case, the API lock isn't taken since it can be initiated + // from another thread that holds the API lock and because we now hold + // the GPU lock. + // + if (rmapiInRtd3PmPath()) + { + pRmFreeParams->pLockInfo->flags |= RM_LOCK_FLAGS_NO_API_LOCK; + } + + status = clientFreeResource_IMPL(pRsClient, pServer, pRmFreeParams); + + if (bRestoreBcState) + { + gpumgrSetBcEnabledStatus(pGpu, bBcState); + } + return status; +} + +static NvBool _rmclientIsCapable +( + NvHandle hClient, + NvU32 capability +) +{ + NvU32 internalClassId; + RsResourceRef *pResourceRef = NULL; + + switch(capability) + { + case NV_RM_CAP_SYS_SMC_CONFIG: + { + internalClassId = classId(MIGConfigSession); + break; + } + case NV_RM_CAP_EXT_FABRIC_MGMT: + { + internalClassId = classId(FmSessionApi); + break; + } + case NV_RM_CAP_SYS_SMC_MONITOR: + { + internalClassId = classId(MIGMonitorSession); + break; + } + default: + { + NV_ASSERT(0); + return NV_FALSE; + } + } + + // Check if client has allocated a given class + pResourceRef = serverutilFindChildRefByType(hClient, hClient, internalClassId, NV_TRUE); + if (pResourceRef == NULL) + { + return NV_FALSE; + } + + return NV_TRUE; +} + +NvBool rmclientIsCapableOrAdmin_IMPL +( + RmClient *pClient, + NvU32 capability, + RS_PRIV_LEVEL privLevel +) +{ + RsClient *pRsClient = staticCast(pClient, RsClient); + NvHandle hClient = pRsClient->hClient; + + if (rmclientIsAdmin(pClient, privLevel)) + { + return NV_TRUE; + } + + return _rmclientIsCapable(hClient, capability); +} + +NvBool rmclientIsCapable_IMPL +( + RmClient *pClient, + NvU32 capability +) +{ + RsClient *pRsClient = staticCast(pClient, RsClient); + NvHandle hClient = pRsClient->hClient; + + return _rmclientIsCapable(hClient, capability); +} + +/** + * + * Register a client with a user info list + * + * This function must be protected by a lock (currently the GPUs lock.) + * + * @param[in] pClient + * @param[in] pOSInfo + */ +static NV_STATUS +_registerOSInfo +( + RmClient *pClient, + void *pOSInfo +) +{ + OsInfoMapSubmap *pSubmap = NULL; + RmClient **pInsert = NULL; + NvU64 key1 = (NvUPtr)pOSInfo; + NvU64 key2 = (NvU64)(staticCast(pClient,RsClient))->hClient; + + if (multimapFindItem(&g_osInfoList, key1, key2) != NULL) + return NV_ERR_INSERT_DUPLICATE_NAME; + + if (multimapFindSubmap(&g_osInfoList, key1) == NULL) + { + pSubmap = multimapInsertSubmap(&g_osInfoList, key1); + if (pSubmap == NULL) + return NV_ERR_NO_MEMORY; + } + + pInsert = multimapInsertItemNew(&g_osInfoList, key1, key2); + if (pInsert == NULL) + return NV_ERR_NO_MEMORY; + + osAllocatedRmClient(pOSInfo); + + *pInsert = pClient; + + return NV_OK; +} + +/** + * + * Unregister a client from a user info list + * + * This function must be protected by a lock (currently the GPUs lock.) + * + * @param[in] pClient + * @param[in] pOSInfo + */ +static NV_STATUS +_unregisterOSInfo +( + RmClient *pClient, + void *pOSInfo +) +{ + NvU64 key1 = (NvUPtr)pOSInfo; + NvU64 key2 = (NvU64)(staticCast(pClient, RsClient))->hClient; + OsInfoMapSubmap *pSubmap = NULL; + RmClient **pFind = NULL; + + pFind = multimapFindItem(&g_osInfoList, key1, key2); + if (pFind != NULL) + multimapRemoveItem(&g_osInfoList, pFind); + + pSubmap = multimapFindSubmap(&g_osInfoList, key1); + if (pSubmap == NULL || multimapCountSubmapItems(&g_osInfoList, pSubmap) > 0) + return NV_OK; + + multimapRemoveSubmap(&g_osInfoList, pSubmap); + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/rmapi/client_resource.c b/src/nvidia/src/kernel/rmapi/client_resource.c new file mode 100644 index 0000000..0d228d7 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/client_resource.c @@ -0,0 +1,1791 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "core/locks.h" +#include "core/system.h" +#include "os/os.h" +#include "rmapi/client_resource.h" +#include "rmapi/param_copy.h" +#include "rmapi/rs_utils.h" +#include "rmapi/rmapi.h" +#include "gpu/gpu.h" +#include "gpu/device/device.h" +#include "gpu/gpu_uuid.h" +#include "gpu_mgr/gpu_mgr.h" +#include "resserv/rs_client.h" +#include "resserv/rs_server.h" +#include "resserv/rs_access_map.h" +#include "nvBldVer.h" +#include "nvVer.h" +#include "platform/nvpcf.h" +#include "mem_mgr/mem.h" +#include "nvsecurityinfo.h" +#include "resource_desc.h" +#include "platform/sli/sli.h" + +#include "nvop.h" +#include "mem_mgr/virt_mem_mgr.h" + +#define CONFIG_2X_BUFF_SIZE_MIN (2) + +/*! + * Define large signed mW values. Adding one of these values will produce a + * result that is then MIN/MAX-ed to be within the range allowed by VBIOS. + */ +#define QBOOST_LARGE_POSITIVE_MW (10000000) +#define QBOOST_LARGE_NEGATIVE_MW (-10000000) + +// +// Controller Table v2.2 has removed some params, set them using these +// default values instead +// +// EWMA retention weight (232/256) results in tau being 10x the sampling period +// +#define CONTROLLER_GRP_DEFAULT_BASE_SAMPLING_PERIOD_MS (100) +#define CONTROLLER_GRP_DEFAULT_SAMPLING_MULTIPLIER (1) +#define CONTROLLER_GRP_DEFAULT_EWMA_WEIGHT (232) +#define CONTROLLER_GRP_DEFAULT_INCREASE_GAIN_UFXP4_12 (3686) +#define CONTROLLER_GRP_DEFAULT_DECREASE_GAIN_UFXP4_12 (4096) + +/*! + * Define the filter types. + */ +#define NVPCF0100_CTRL_CONTROLLER_FILTER_TYPE_EMWA (0) +#define NVPCF0100_CTRL_CONTROLLER_FILTER_TYPE_MOVING_MAX (1) + +NV_STATUS +cliresConstruct_IMPL +( + RmClientResource *pRmCliRes, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL* pParams +) +{ + return NV_OK; +} + +void +cliresDestruct_IMPL +( + RmClientResource *pRmCliRes +) +{ +} + +NvBool +cliresAccessCallback_IMPL +( + RmClientResource *pRmCliRes, + RsClient *pInvokingClient, + void *pAllocParams, + RsAccessRight accessRight +) +{ + // Client resource's access callback will grant any rights here to any resource it owns + switch (accessRight) + { + case RS_ACCESS_PERFMON: + case RS_ACCESS_NICE: + { + return osCheckAccess(accessRight); + } + default: + { + return NV_FALSE; + } + } +} + +NvBool +cliresShareCallback_IMPL +( + RmClientResource *pRmCliRes, + RsClient *pInvokingClient, + RsResourceRef *pParentRef, + RS_SHARE_POLICY *pSharePolicy +) +{ + RmClient *pSrcClient = dynamicCast(RES_GET_CLIENT(pRmCliRes), RmClient); + RmClient *pDstClient = dynamicCast(pInvokingClient, RmClient); + NvBool bDstKernel = (pDstClient != NULL) && + (rmclientGetCachedPrivilege(pDstClient) >= RS_PRIV_LEVEL_KERNEL); + + // Client resource's share callback will also share rights it shares here with any resource it owns + // + // If a kernel client is validating share policies, that means it's most likely duping on behalf of + // a user space client. For this case, we check against the current process instead of the kernel + // client object's process. + // + switch (pSharePolicy->type) + { + case RS_SHARE_TYPE_OS_SECURITY_TOKEN: + if ((pSrcClient != NULL) && (pDstClient != NULL) && + (pSrcClient->pSecurityToken != NULL)) + { + if (bDstKernel) + { + NV_STATUS status; + PSECURITY_TOKEN *pCurrentToken; + + pCurrentToken = osGetSecurityToken(); + if (pCurrentToken == NULL) + { + NV_ASSERT_FAILED("Cannot get the security token for the current user"); + return NV_FALSE; + } + + status = osValidateClientTokens(pSrcClient->pSecurityToken, pCurrentToken); + portMemFree(pCurrentToken); + if (status == NV_OK) + { + return NV_TRUE; + } + } + else if (pDstClient->pSecurityToken != NULL) + { + if (osValidateClientTokens(pSrcClient->pSecurityToken, pDstClient->pSecurityToken) == NV_OK) + return NV_TRUE; + } + } + break; + case RS_SHARE_TYPE_PID: + if ((pSrcClient != NULL) && (pDstClient != NULL)) + { + if ((pParentRef != NULL) && bDstKernel) + { + if (pSrcClient->ProcID == osGetCurrentProcess()) + return NV_TRUE; + } + else + { + if (pSrcClient->ProcID == pDstClient->ProcID) + return NV_TRUE; + } + } + break; + case RS_SHARE_TYPE_SMC_PARTITION: + case RS_SHARE_TYPE_GPU: + // Require exceptions, since RmClientResource is not an RmResource + if (pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE) + return NV_TRUE; + break; + } + + // Delegate to superclass + return resShareCallback_IMPL(staticCast(pRmCliRes, RsResource), pInvokingClient, pParentRef, pSharePolicy); +} + +NV_STATUS +cliresControl_Prologue_IMPL +( + RmClientResource *pRmCliRes, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status = serverDeserializeCtrlDown(pCallContext, pParams->cmd, &pParams->pParams, &pParams->paramsSize, &pParams->flags); + + return status; +} + +void +cliresControl_Epilogue_IMPL +( + RmClientResource *pRmCliRes, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + NV_ASSERT_OK(serverSerializeCtrlUp(pCallContext, pParams->cmd, &pParams->pParams, &pParams->paramsSize, &pParams->flags)); + serverFreeSerializeStructures(pCallContext, pParams->pParams); +} + +// **************************************************************************** +// Helper functions +// **************************************************************************** + + +static NV_STATUS +CliControlSystemEvent +( + NvHandle hClient, + NvU32 event, + NvU32 action +) +{ + NV_STATUS status = NV_OK; + RmClient *pClient; + PEVENTNOTIFICATION *pEventNotification = NULL; + + if (event >= NV0000_NOTIFIERS_MAXCOUNT) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pClient = serverutilGetClientUnderLock(hClient); + if (pClient == NULL) + return NV_ERR_INVALID_CLIENT; + + CliGetEventNotificationList(hClient, hClient, NULL, &pEventNotification); + if (pEventNotification != NULL) + { + switch (action) + { + case NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE: + case NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT: + { + if (pClient->CliSysEventInfo.notifyActions[event] != NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + status = NV_ERR_INVALID_STATE; + break; + } + + //fall through + } + case NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE: + { + pClient->CliSysEventInfo.notifyActions[event] = action; + break; + } + + default: + { + status = NV_ERR_INVALID_ARGUMENT; + break; + } + } + } + else + { + status = NV_ERR_INVALID_STATE; + } + + return status; +} + +// **************************************************************************** +// Other functions +// **************************************************************************** + +// +// cliresCtrlCmdSystemGetFeatures +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdSystemGetFeatures_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS *pFeaturesParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU32 featuresMask = 0; + + NV_ASSERT_OR_RETURN(pSys != NULL, NV_ERR_INVALID_STATE); + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + if (pSys->getProperty(pSys, PDB_PROP_SYS_ENABLE_RM_TEST_ONLY_CODE)) + { + featuresMask = FLD_SET_DRF(0000, _CTRL_SYSTEM_GET_FEATURES, + _RM_TEST_ONLY_CODE_ENABLED, _TRUE, featuresMask); + } + + pFeaturesParams->featuresMask = featuresMask; + + return NV_OK; +} + +// +// cliresCtrlCmdSystemGetBuildVersionV2 +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdSystemGetBuildVersionV2_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS *pParams +) +{ + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + ct_assert(sizeof(NV_VERSION_STRING) <= sizeof(pParams->driverVersionBuffer)); + ct_assert(sizeof(NV_BUILD_BRANCH_VERSION) <= sizeof(pParams->versionBuffer)); + ct_assert(sizeof(NV_DISPLAY_DRIVER_TITLE) <= sizeof(pParams->titleBuffer)); + ct_assert(sizeof(STRINGIZE(NV_BUILD_BRANCH)) <= sizeof(pParams->driverBranch)); + + portMemCopy(pParams->driverVersionBuffer, sizeof(pParams->driverVersionBuffer), + NV_VERSION_STRING, sizeof(NV_VERSION_STRING)); + portMemCopy(pParams->versionBuffer, sizeof(pParams->versionBuffer), + NV_BUILD_BRANCH_VERSION, sizeof(NV_BUILD_BRANCH_VERSION)); + portMemCopy(pParams->titleBuffer, sizeof(pParams->titleBuffer), + NV_DISPLAY_DRIVER_TITLE, sizeof(NV_DISPLAY_DRIVER_TITLE)); + portMemCopy(pParams->driverBranch, sizeof(pParams->driverBranch), + STRINGIZE(NV_BUILD_BRANCH), sizeof(STRINGIZE(NV_BUILD_BRANCH))); + + pParams->changelistNumber = NV_BUILD_CHANGELIST_NUM; + pParams->officialChangelistNumber = NV_LAST_OFFICIAL_CHANGELIST_NUM; + + return NV_OK; +} + +// +// cliresCtrlCmdSystemGetCpuInfo +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdSystemGetCpuInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS *pCpuInfoParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPU *pGpuIter = NULL; + NvU32 gpuMask = 0U; + NvU32 gpuIndex = 0U; + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + pCpuInfoParams->type = pSys->cpuInfo.type; + pCpuInfoParams->capabilities = pSys->cpuInfo.caps; + pCpuInfoParams->clock = pSys->cpuInfo.clock; + pCpuInfoParams->L1DataCacheSize = pSys->cpuInfo.l1DataCacheSize; + pCpuInfoParams->L2DataCacheSize = pSys->cpuInfo.l2DataCacheSize; + pCpuInfoParams->dataCacheLineSize = pSys->cpuInfo.dataCacheLineSize; + pCpuInfoParams->numLogicalCpus = pSys->cpuInfo.numLogicalCpus; + pCpuInfoParams->numPhysicalCpus = pSys->cpuInfo.numPhysicalCpus; + pCpuInfoParams->coresOnDie = pSys->cpuInfo.coresOnDie; + pCpuInfoParams->family = pSys->cpuInfo.family; + pCpuInfoParams->model = pSys->cpuInfo.model; + pCpuInfoParams->stepping = pSys->cpuInfo.stepping; + pCpuInfoParams->bCCEnabled = (sysGetStaticConfig(pSys))->bOsCCEnabled; + portMemCopy(pCpuInfoParams->name, + sizeof (pCpuInfoParams->name), pSys->cpuInfo.name, + sizeof (pCpuInfoParams->name)); + + pCpuInfoParams->selfHostedSocType = NV0000_CTRL_SYSTEM_SH_SOC_TYPE_NA; + gpumgrGetGpuAttachInfo(NULL, &gpuMask); + while ((pGpuIter = gpumgrGetNextGpu(gpuMask, &gpuIndex)) != NULL) + { + pCpuInfoParams->selfHostedSocType = gpuDetermineSelfHostedSocType_HAL(pGpuIter); + if (pCpuInfoParams->selfHostedSocType != NV0000_CTRL_SYSTEM_SH_SOC_TYPE_NA) + { + break; + } + } + + return NV_OK; +} + +// +// cliresCtrlCmdSystemGetLockTimes +// +// Get API and GPU lock hold/wait times. +// +// Lock Requirements: +// None +// +NV_STATUS +cliresCtrlCmdSystemGetLockTimes_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_LOCK_TIMES_PARAMS *pParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + // Check if lock time collection is enabled first + if (!pSys->getProperty(pSys, PDB_PROP_SYS_RM_LOCK_TIME_COLLECT)) + return NV_ERR_NOT_SUPPORTED; + + // Fetch API lock hold/wait times + rmapiLockGetTimes(pParams); + + // Fetch GPU lock hold/wait times + rmGpuLockGetTimes(pParams); + + return NV_OK; +} + +static NV_STATUS +classGetSystemClasses(NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS *pParams) +{ + NvU32 i; + NvU32 numResources; + const RS_RESOURCE_DESC *resources; + NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS params; + + NV_ASSERT_OR_RETURN(pParams, NV_ERR_INVALID_ARGUMENT); + + RsResInfoGetResourceList(&resources, &numResources); + + portMemSet(¶ms, 0x0, sizeof(params)); + + for (i = 0; i < numResources; i++) + { + if ((resources[i].pParentList[0] == classId(RmClientResource)) && + (resources[i].pParentList[1] == 0x0)) + { + NV_ASSERT_OR_RETURN(params.numClasses < NV0000_CTRL_SYSTEM_MAX_CLASSLIST_SIZE, + NV_ERR_INVALID_STATE); + + params.classes[params.numClasses] = resources[i].externalClassId; + params.numClasses++; + } + } + + portMemCopy(pParams, sizeof(*pParams), ¶ms, sizeof(params)); + + return NV_OK; +} + +// +// cliresCtrlCmdSystemGetClassList +// +// Get list of supported system classes. +// +// Lock Requirements: +// Assert that API and GPUs locks held on entry +// +NV_STATUS +cliresCtrlCmdSystemGetClassList_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS *pParams +) +{ + NV_STATUS status; + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + status = classGetSystemClasses(pParams); + + return status; +} + +// +// cliresCtrlCmdSystemNotifyEvent +// +// This function exists to allow the RM Client to notify us when they receive +// a system event message. We generally will store off the data, but in some +// cases, we'll trigger our own handling of that code. Prior to Vista, we +// would just poll a scratch bit for these events. But for Vista, we get them +// directly from the OS. +// +// Added Support for notifying power change event to perfhandler +// +NV_STATUS +cliresCtrlCmdSystemNotifyEvent_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + + switch(pParams->eventType) + { + case NV0000_CTRL_SYSTEM_EVENT_TYPE_LID_STATE: + case NV0000_CTRL_SYSTEM_EVENT_TYPE_DOCK_STATE: + case NV0000_CTRL_SYSTEM_EVENT_TYPE_TRUST_LID: + case NV0000_CTRL_SYSTEM_EVENT_TYPE_TRUST_DOCK: + { + status = NV_ERR_NOT_SUPPORTED; + break; + } + + case NV0000_CTRL_SYSTEM_EVENT_TYPE_POWER_SOURCE: + status = NV_ERR_NOT_SUPPORTED; + break; + + default: + status = NV_ERR_INVALID_ARGUMENT; + break; + } + + return status; +} + +NV_STATUS +cliresCtrlCmdSystemDebugCtrlRmMsg_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS *pParams +) +{ +// NOTE: RmMsg is only available when NV_PRINTF_STRINGS_ALLOWED is true. +#if NV_PRINTF_STRINGS_ALLOWED + NvU32 len = 0; + + extern char RmMsg[NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE]; + + switch (pParams->cmd) + { + case NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_CMD_GET: + { + len = (NvU32)portStringLength(RmMsg); + portMemCopy(pParams->data, len, RmMsg, len); + pParams->count = len; + break; + } + case NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_CMD_SET: + { +#if !(defined(DEBUG) || defined(DEVELOP)) + RmClient *pRmClient = dynamicCast(RES_GET_CLIENT(pRmCliRes), RmClient); + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pRmClient != NULL, NV_ERR_INVALID_CLIENT); + + if (!rmclientIsAdmin(pRmClient, pCallContext->secInfo.privLevel)) + { + NV_PRINTF(LEVEL_WARNING, "Non-privileged context issued privileged cmd\n"); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } +#endif + portMemCopy(RmMsg, NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE, pParams->data, NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE); + break; + } + default: + return NV_ERR_INVALID_ARGUMENT; + break; + } + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NV_STATUS +cliresCtrlCmdSystemGetRmInstanceId_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS *pRmInstanceIdParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + pRmInstanceIdParams->rm_instance_id = pSys->rmInstanceId; + + return NV_OK; +} + +// +// cliresCtrlCmdGpuGetAttachedIds +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdGpuGetAttachedIds_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *pGpuAttachedIds +) +{ + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + return gpumgrGetAttachedGpuIds(pGpuAttachedIds); +} + +// +// cliresCtrlCmdGpuGetIdInfo +// +// Lock Requirements: +// Assert that API lock and Gpus lock held on entry +// +NV_STATUS +cliresCtrlCmdGpuGetIdInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_ID_INFO_PARAMS *pGpuIdInfoParams +) +{ + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + return gpumgrGetGpuIdInfo(pGpuIdInfoParams); +} + +NV_STATUS +cliresCtrlCmdGpuGetIdInfoV2_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS *pGpuIdInfoParams +) +{ + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + return gpumgrGetGpuIdInfoV2(pGpuIdInfoParams); +} + +// +// cliresCtrlCmdGpuGetInitStatus +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdGpuGetInitStatus_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS *pGpuInitStatusParams +) +{ + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + return gpumgrGetGpuInitStatus(pGpuInitStatusParams); +} + +// +// cliresCtrlCmdGpuGetDeviceIds +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdGpuGetDeviceIds_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS *pDeviceIdsParams +) +{ + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + pDeviceIdsParams->deviceIds = gpumgrGetDeviceInstanceMask(); + + return NV_OK; +} + +// +// cliresCtrlCmdGpuGetActiveDeviceIds +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdGpuGetActiveDeviceIds_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_ACTIVE_DEVICE_IDS_PARAMS *pActiveDeviceIdsParams +) +{ + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + return gpumgrCacheGetActiveDeviceIds(pActiveDeviceIdsParams); +} + +// +// cliresCtrlCmdGpuGetPciInfo +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdGpuGetPciInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS *pPciInfoParams +) +{ + NV_STATUS status; + NvU64 gpuDomainBusDevice; + + NV_ASSERT(rmapiLockIsOwner()); + + status = gpumgrGetProbedGpuDomainBusDevice(pPciInfoParams->gpuId, &gpuDomainBusDevice); + if (status != NV_OK) + return status; + + pPciInfoParams->domain = gpuDecodeDomain(gpuDomainBusDevice); + pPciInfoParams->bus = gpuDecodeBus(gpuDomainBusDevice); + pPciInfoParams->slot = gpuDecodeDevice(gpuDomainBusDevice); + + return NV_OK; +} + +// +// cliresCtrlCmdGpuGetProbedIds +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdGpuGetProbedIds_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *pGpuProbedIds +) +{ + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + return gpumgrGetProbedGpuIds(pGpuProbedIds); +} + +// +// _cliresValidateGpuIdAgainstProbed +// +// Lock Requirements: none (only operates on arguments) +// +static NV_STATUS +_cliresValidateGpuIdAgainstProbed +( + NvU32 gpuId, + const NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *pGpuProbedIds +) +{ + NvU32 j; + + for (j = 0; j < NV0000_CTRL_GPU_MAX_PROBED_GPUS; j++) + { + if (pGpuProbedIds->gpuIds[j] == NV0000_CTRL_GPU_INVALID_ID) + break; + + if (gpuId == pGpuProbedIds->gpuIds[j]) + return NV_OK; + } + + return NV_ERR_INVALID_ARGUMENT; +} + +// +// cliresCtrlCmdGpuAttachIds +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdGpuAttachIds_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_ATTACH_IDS_PARAMS *pGpuAttachIds +) +{ + NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *pGpuProbedIds = NULL; + NvU32 i; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + if (pGpuAttachIds->gpuIds[0] == NV0000_CTRL_GPU_ATTACH_ALL_PROBED_IDS) + { + // XXX add callback to attach logic on Windows + status = NV_OK; + goto done; + } + + pGpuProbedIds = portMemAllocNonPaged(sizeof(*pGpuProbedIds)); + if (pGpuProbedIds == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + status = gpumgrGetProbedGpuIds(pGpuProbedIds); + if (status != NV_OK) + { + goto done; + } + + for (i = 0; (i < NV0000_CTRL_GPU_MAX_PROBED_GPUS) && + (pGpuAttachIds->gpuIds[i] != NV0000_CTRL_GPU_INVALID_ID); i++) + { + status = _cliresValidateGpuIdAgainstProbed(pGpuAttachIds->gpuIds[i], + pGpuProbedIds); + if (status != NV_OK) + break; + } + + // XXX add callback to attach logic on Windows +done: + portMemFree(pGpuProbedIds); + return status; +} + +// +// cliresCtrlCmdGpuAsyncAttachId +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdGpuAsyncAttachId_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_ASYNC_ATTACH_ID_PARAMS *pAsyncAttachIdParams +) +{ + // + // Similar to non-async attach, async attach is mostly handled by + // libnvrmapi in userspace. Here, Core RM just does validation. + // + + NV_STATUS status = NV_OK; + NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *pGpuProbedIds = NULL; + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + pGpuProbedIds = portMemAllocNonPaged(sizeof(*pGpuProbedIds)); + if (pGpuProbedIds == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + status = gpumgrGetProbedGpuIds(pGpuProbedIds); + if (status != NV_OK) + { + goto done; + } + + status = _cliresValidateGpuIdAgainstProbed(pAsyncAttachIdParams->gpuId, + pGpuProbedIds); + if (status != NV_OK) + { + goto done; + } + +done: + portMemFree(pGpuProbedIds); + return status; +} + +// +// cliresCtrlCmdGpuWaitAttachId +// +// Lock Requirements: none +// +NV_STATUS +cliresCtrlCmdGpuWaitAttachId_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_WAIT_ATTACH_ID_PARAMS *pWaitAttachIdParams +) +{ + // + // Similar to non-async attach, async attach is mostly handled by + // libnvrmapi in userspace. That includes the logic for waiting for + // background attach operations to complete. + // + // Since background attach operations are not tracked by Core RM + // (that is the responsibility of libnvrmapi and the kernel interface + // layer), there is nothing to do here. + // + // Note: libnvrmapi on UNIX skips calling into Core RM entirely for this + // command, so this path (and the unneeded API lock acquire) is not taken. + // + + return NV_OK; +} + +// +// cliresCtrlCmdGpuDetachIds +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdGpuDetachIds_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_DETACH_IDS_PARAMS *pGpuDetachIds +) +{ + NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *pGpuAttachedIds = NULL; + NvU32 i, j; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + if (pGpuDetachIds->gpuIds[0] == NV0000_CTRL_GPU_DETACH_ALL_ATTACHED_IDS) + { + // XXX add callback to detach logic on Windows + status = NV_OK; + goto done; + } + else + { + pGpuAttachedIds = portMemAllocNonPaged(sizeof(*pGpuAttachedIds)); + if (pGpuAttachedIds == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + status = gpumgrGetAttachedGpuIds(pGpuAttachedIds); + if (status != NV_OK) + { + goto done; + } + + for (i = 0; (i < NV0000_CTRL_GPU_MAX_ATTACHED_GPUS) && + (pGpuDetachIds->gpuIds[i] != NV0000_CTRL_GPU_INVALID_ID); i++) + { + for (j = 0; (j < NV0000_CTRL_GPU_MAX_ATTACHED_GPUS) && + (pGpuAttachedIds->gpuIds[j] != NV0000_CTRL_GPU_INVALID_ID); j++) + { + if (pGpuDetachIds->gpuIds[i] == pGpuAttachedIds->gpuIds[j]) + break; + } + + if ((j == NV0000_CTRL_GPU_MAX_ATTACHED_GPUS) || + (pGpuAttachedIds->gpuIds[j] == NV0000_CTRL_GPU_INVALID_ID)) + { + status = NV_ERR_INVALID_ARGUMENT; + break; + } + else + { + // XXX add callback to detach logic on Windows + break; + } + } + } + +done: + portMemFree(pGpuAttachedIds); + return status; +} + +NV_STATUS +cliresCtrlCmdGsyncGetAttachedIds_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS *pGsyncAttachedIds +) +{ + NvU32 i; + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGsyncAttachedIds->gsyncIds); i++) + { + pGsyncAttachedIds->gsyncIds[i] = NV0000_CTRL_GSYNC_INVALID_ID; + } + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdGsyncGetIdInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS *pGsyncIdInfoParams +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +cliresCtrlCmdEventSetNotification_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pEventSetNotificationParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + + return CliControlSystemEvent(hClient, pEventSetNotificationParams->event, pEventSetNotificationParams->action); +} + +NV_STATUS +cliresCtrlCmdEventGetSystemEventData_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GET_SYSTEM_EVENT_DATA_PARAMS *pSystemEventDataParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + RmClient *pClient = serverutilGetClientUnderLock(hClient); + + if (pClient == NULL) + return NV_ERR_INVALID_CLIENT; + + return eventSystemDequeueEvent(&pClient->CliSysEventInfo.eventQueue, + pSystemEventDataParams); +} + +NV_STATUS +cliresCtrlCmdSystemNVPCFGetPowerModeInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *pParams +) +{ + return NV_ERR_NOT_SUPPORTED; + +} + +NV_STATUS +cliresCtrlCmdSystemGetVgxSystemInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_VGX_SYSTEM_INFO_PARAMS *pParams +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +cliresCtrlCmdSystemGetPrivilegedStatus_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS *pParams +) +{ + RmClient *pClient = dynamicCast(RES_GET_CLIENT(pRmCliRes), RmClient); + NvU8 privStatus = 0; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN (pClient != NULL, NV_ERR_INVALID_CLIENT); + + if (pCallContext->secInfo.privLevel >= RS_PRIV_LEVEL_KERNEL) + { + privStatus |= NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_KERNEL_HANDLE_FLAG; + } + + if (pCallContext->secInfo.privLevel >= RS_PRIV_LEVEL_USER_ROOT) + { + privStatus |= NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PRIV_USER_FLAG; + } + + if (rmclientIsAdmin(pClient, pCallContext->secInfo.privLevel)) + { + privStatus |= NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PRIV_HANDLE_FLAG; + } + + pParams->privStatusFlags = privStatus; + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdSystemGetFabricStatus_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS *pParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU32 fabricStatus = NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_SKIP; + + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED)) + { + fabricStatus = NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_UNINITIALIZED; + + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_MANAGER_IS_REGISTERED)) + { + fabricStatus = NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_IN_PROGRESS; + } + + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_MANAGER_IS_INITIALIZED)) + { + fabricStatus = NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_INITIALIZED; + } + } + + pParams->fabricStatus = fabricStatus; + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdGpuGetUuidInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS *pParams +) +{ + OBJGPU *pGpu = NULL; + + pGpu = gpumgrGetGpuFromUuid(pParams->gpuUuid, pParams->flags); + + if (NULL == pGpu) + return NV_ERR_OBJECT_NOT_FOUND; + + pParams->gpuId = pGpu->gpuId; + pParams->deviceInstance = gpuGetDeviceInstance(pGpu); + pParams->subdeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdGpuGetUuidFromGpuId_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS *pParams +) +{ + OBJGPU *pGpu = NULL; + NvU8 *pGidString = NULL; + NvU32 gidStrLen = 0; + NV_STATUS rmStatus; + + // First check for UUID cached by gpumgr + rmStatus = gpumgrGetGpuUuidInfo(pParams->gpuId, &pGidString, &gidStrLen, pParams->flags); + + if (rmStatus != NV_OK) + { + // If UUID not cached by gpumgr then try to query device + pGpu = gpumgrGetGpuFromId(pParams->gpuId); + + if (NULL == pGpu) + return NV_ERR_OBJECT_NOT_FOUND; + + // get the UUID of this GPU + rmStatus = gpuGetGidInfo(pGpu, &pGidString, &gidStrLen, pParams->flags); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "gpumgrGetGpuInfo: getting gpu GUID failed\n"); + return rmStatus; + } + } + + if (gidStrLen <= NV0000_GPU_MAX_GID_LENGTH) + { + portMemCopy(pParams->gpuUuid, gidStrLen, pGidString, gidStrLen); + pParams->uuidStrLen = gidStrLen; + } + + // cleanup the allocated gidstring + portMemFree(pGidString); + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdGpuModifyGpuDrainState_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS *pParams +) +{ + NV_STATUS status; + NvBool bEnable; + NvBool bRemove = NV_FALSE; + NvBool bLinkDisable = NV_FALSE; + OBJGPU *pGpu = gpumgrGetGpuFromId(pParams->gpuId); + + if (NV0000_CTRL_GPU_DRAIN_STATE_ENABLED == pParams->newState) + { + if ((pGpu != NULL) && IsSLIEnabled(pGpu)) + { + // "drain" state not supported in SLI configurations + return NV_ERR_NOT_SUPPORTED; + } + + bEnable = NV_TRUE; + bRemove = + ((pParams->flags & NV0000_CTRL_GPU_DRAIN_STATE_FLAG_REMOVE_DEVICE) != 0); + bLinkDisable = + ((pParams->flags & NV0000_CTRL_GPU_DRAIN_STATE_FLAG_LINK_DISABLE) != 0); + + if (bLinkDisable && !bRemove) + { + return NV_ERR_INVALID_ARGUMENT; + } + } + else if (NV0000_CTRL_GPU_DRAIN_STATE_DISABLED == + pParams->newState) + { + bEnable = NV_FALSE; + } + else + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Set/Clear GPU manager drain state + status = gpumgrModifyGpuDrainState(pParams->gpuId, bEnable, bRemove, bLinkDisable); + + return status; +} + +NV_STATUS +cliresCtrlCmdGpuQueryGpuDrainState_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS *pParams +) +{ + NvBool bDrainState; + NvBool bRemove; + NV_STATUS status; + + status = gpumgrQueryGpuDrainState(pParams->gpuId, &bDrainState, &bRemove); + + if (status != NV_OK) + { + return status; + } + + pParams->drainState = bDrainState ? NV0000_CTRL_GPU_DRAIN_STATE_ENABLED + : NV0000_CTRL_GPU_DRAIN_STATE_DISABLED; + + pParams->flags = bRemove ? NV0000_CTRL_GPU_DRAIN_STATE_FLAG_REMOVE_DEVICE : 0; + + return NV_OK; +} + +/* + * Associate sub process ID with client handle + * + * @return 'NV_OK' on success. Otherwise return NV_ERR_INVALID_CLIENT + */ +NV_STATUS +cliresCtrlCmdSetSubProcessID_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + RmClient *pClient = serverutilGetClientUnderLock(hClient); + + if (pClient == NULL) + return NV_ERR_INVALID_CLIENT; + + pClient->SubProcessID = pParams->subProcessID; + portStringCopy(pClient->SubProcessName, sizeof(pClient->SubProcessName), pParams->subProcessName, sizeof(pParams->subProcessName)); + + return NV_OK; +} + +/* + * Disable USERD isolation among all the sub processes within a user process + * + * @return 'NV_OK' on success. Otherwise return NV_ERR_INVALID_CLIENT + */ +NV_STATUS +cliresCtrlCmdDisableSubProcessUserdIsolation_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + RmClient *pClient = serverutilGetClientUnderLock(hClient); + + if (pClient == NULL) + return NV_ERR_INVALID_CLIENT; + + pClient->bIsSubProcessDisabled = pParams->bIsSubProcessDisabled; + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdClientGetAddrSpaceType_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + CALL_CONTEXT callContext; + RsClient *pRsClient; + RsResourceRef *pResourceRef; + Memory *pMemory = NULL; + GpuResource *pGpuResource = NULL; + NV_ADDRESS_SPACE memType; + + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hClient, &pRsClient)); + NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(pRsClient, pParams->hObject, &pResourceRef)); + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pClient = pRsClient; + callContext.pResourceRef = pResourceRef; + + pMemory = dynamicCast(pResourceRef->pResource, Memory); + if (pMemory != NULL) + { + NV_ASSERT_OK_OR_RETURN(memGetMapAddrSpace(pMemory, &callContext, pParams->mapFlags, &memType)); + } + else + { + pGpuResource = dynamicCast(pResourceRef->pResource, GpuResource); + if (pGpuResource != NULL) + { + NV_ASSERT_OK_OR_RETURN(gpuresGetMapAddrSpace(pGpuResource, &callContext, pParams->mapFlags, &memType)); + } + else + { + return NV_ERR_INVALID_OBJECT; + } + } + + switch (memType) + { + case ADDR_SYSMEM: + case ADDR_EGM: + pParams->addrSpaceType = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_SYSMEM; + break; + case ADDR_FBMEM: + pParams->addrSpaceType = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM; + break; + case ADDR_REGMEM: + pParams->addrSpaceType = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_REGMEM; + break; + case ADDR_FABRIC_V2: + pParams->addrSpaceType = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_FABRIC; + break; + case ADDR_FABRIC_MC: +#ifdef NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_FABRIC_MC + pParams->addrSpaceType = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_FABRIC_MC; + break; +#else + NV_ASSERT(0); + return NV_ERR_INVALID_ARGUMENT; +#endif + case ADDR_VIRTUAL: + NV_PRINTF(LEVEL_ERROR, + "VIRTUAL (0x%x) is not a valid NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE\n", + memType); + pParams->addrSpaceType = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_INVALID; + DBG_BREAKPOINT(); + return NV_ERR_INVALID_ARGUMENT; + default: + NV_PRINTF(LEVEL_ERROR, "Cannot determine address space 0x%x\n", + memType); + pParams->addrSpaceType = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_INVALID; + DBG_BREAKPOINT(); + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdClientGetHandleInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + NV_STATUS status; + RsResourceRef *pRsResourceRef; + + status = serverutilGetResourceRef(hClient, pParams->hObject, &pRsResourceRef); + if (status != NV_OK) + { + return status; + } + + switch (pParams->index) + { + case NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO_INDEX_PARENT: + pParams->data.hResult = pRsResourceRef->pParentRef ? pRsResourceRef->pParentRef->hResource : 0; + break; + case NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO_INDEX_CLASSID: + pParams->data.iResult = pRsResourceRef->externalClassId; + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdClientGetAccessRights_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS *pParams +) +{ + NV_STATUS status; + RsResourceRef *pRsResourceRef; + RsResourceRef *pClientRef = RES_GET_REF(pRmCliRes); + RsClient *pClient = pClientRef->pClient; + + status = serverutilGetResourceRef(pParams->hClient, pParams->hObject, &pRsResourceRef); + if (status != NV_OK) + { + return status; + } + + rsAccessUpdateRights(pRsResourceRef, pClient, NULL); + + rsAccessGetAvailableRights(pRsResourceRef, pClient, &pParams->maskResult); + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdClientSetInheritedSharePolicy_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS *pParams +) +{ + NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + params.sharePolicy = pParams->sharePolicy; + params.hObject = RES_GET_REF(pRmCliRes)->hResource; + + return cliresCtrlCmdClientShareObject(pRmCliRes, ¶ms); +} + +NV_STATUS +cliresCtrlCmdClientShareObject_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS *pParams +) +{ + RS_SHARE_POLICY *pSharePolicy = &pParams->sharePolicy; + RsClient *pClient = RES_GET_CLIENT(pRmCliRes); + RsResourceRef *pObjectRef; + + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldCallContext; + + NV_STATUS status; + + if (pSharePolicy->type >= RS_SHARE_TYPE_MAX) + return NV_ERR_INVALID_ARGUMENT; + + status = clientGetResourceRef(pClient, pParams->hObject, &pObjectRef); + if (status != NV_OK) + return status; + + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = &g_resServ; + callContext.pClient = pClient; + callContext.pResourceRef = pObjectRef; + callContext.secInfo = pCallContext->secInfo; + + NV_ASSERT_OK_OR_RETURN(resservSwapTlsCallContext(&pOldCallContext, &callContext)); + + status = clientShareResource(pClient, pObjectRef, pSharePolicy, &callContext); + NV_ASSERT_OK(resservRestoreTlsCallContext(pOldCallContext)); + if (status != NV_OK) + return status; + + // + // Above clientShareResource does everything needed for normal sharing, + // but we may still need to add a backref if we're sharing with a client, + // to prevent stale access. + // + if (!(pSharePolicy->action & RS_SHARE_ACTION_FLAG_REVOKE) && + (pSharePolicy->type == RS_SHARE_TYPE_CLIENT)) + { + RsClient *pClientTarget; + + // Trying to share with self, nothing to do. + if (pSharePolicy->target == pClient->hClient) + return NV_OK; + + status = serverGetClientUnderLock(&g_resServ, pSharePolicy->target, &pClientTarget); + if (status != NV_OK) + return status; + + status = clientAddAccessBackRef(pClientTarget, pObjectRef); + if (status != NV_OK) + return status; + } + + return status; +} + +NV_STATUS +cliresCtrlCmdClientGetChildHandle_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + NV_STATUS status; + RsResourceRef *pParentRef; + RsResourceRef *pResourceRef; + + status = serverutilGetResourceRef(hClient, pParams->hParent, &pParentRef); + if (status != NV_OK) + { + return status; + } + + status = refFindChildOfType(pParentRef, pParams->classId, NV_TRUE, &pResourceRef); + if (status == NV_OK) + { + pParams->hObject = pResourceRef ? pResourceRef->hResource : 0; + } + return status; +} + +NV_STATUS +cliresCtrlCmdObjectsAreDuplicates_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CLIENT_OBJECTS_ARE_DUPLICATES_PARAMS *pParams +) +{ + RsResourceRef *pResRef; + RsClient *pClient = RES_GET_CLIENT(pRmCliRes); + + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + clientGetResourceRef(pClient, pParams->hObject1, &pResRef)); + + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + resIsDuplicate(pResRef->pResource, pParams->hObject2, + &pParams->bDuplicates)); + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdGpuGetMemOpEnable_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS *pMemOpEnableParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NV_STATUS status = NV_OK; + + pMemOpEnableParams->enableMask = 0; + + if (pSys->getProperty(pSys, PDB_PROP_SYS_ENABLE_STREAM_MEMOPS)) + { + NV_PRINTF(LEVEL_INFO, "MemOpOverride enabled\n"); + pMemOpEnableParams->enableMask = NV0000_CTRL_GPU_FLAGS_MEMOP_ENABLE; + } + + return status; +} + +NV_STATUS +cliresCtrlCmdGpuDisableNvlinkInit_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS *pParams +) +{ + RmClient *pRmClient = dynamicCast(RES_GET_CLIENT(pRmCliRes), RmClient); + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(NULL != pRmClient, NV_ERR_INVALID_CLIENT); + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + + if (!rmclientIsCapableOrAdmin(pRmClient, + NV_RM_CAP_EXT_FABRIC_MGMT, + pCallContext->secInfo.privLevel)) + { + NV_PRINTF(LEVEL_WARNING, "Non-privileged context issued privileged cmd\n"); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + if (pParams->gpuId == NV0000_CTRL_GPU_INVALID_ID) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return gpumgrSetGpuInitDisabledNvlinks(pParams->gpuId, pParams->mask, + &pParams->links, pParams->bSkipHwNvlinkDisable); +} + +NV_STATUS +cliresCtrlCmdGpuSetNvlinkBwMode_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_SET_NVLINK_BW_MODE_PARAMS *pParams +) +{ + return gpumgrSetGpuNvlinkBwMode(pParams->mode); +} + +NV_STATUS +cliresCtrlCmdGpuGetNvlinkBwMode_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_NVLINK_BW_MODE_PARAMS *pParams +) +{ + pParams->mode = gpumgrGetGpuNvlinkBwMode(); + pParams->bwModeScope = gpumgrGetGpuNvlinkBwModeScope(); + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdLegacyConfig_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + RsClient *pClient = RES_GET_CLIENT(pRmCliRes); + RmClient *pRmClient = dynamicCast(pClient, RmClient); + NvHandle hDeviceOrSubdevice = pParams->hContext; + NvHandle hDevice; + OBJGPU *pGpu; + GpuResource *pGpuResource; + NV_STATUS rmStatus = NV_OK; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pRmClient != NULL, NV_ERR_INVALID_CLIENT); + + // + // Clients pass in device or subdevice as context for NvRmConfigXyz. + // + rmStatus = gpuresGetByDeviceOrSubdeviceHandle(pClient, + hDeviceOrSubdevice, + &pGpuResource); + if (rmStatus != NV_OK) + return rmStatus; + + hDevice = RES_GET_HANDLE(GPU_RES_GET_DEVICE(pGpuResource)); + pGpu = GPU_RES_GET_GPU(pGpuResource); + + // + // GSP client builds should have these legacy APIs disabled, + // but a monolithic build running in offload mode can still reach here, + // so log those cases and bail early to keep the same behavior. + // + NV_ASSERT_OR_RETURN(!IS_GSP_CLIENT(pGpu), NV_ERR_NOT_SUPPORTED); + + GPU_RES_SET_THREAD_BC_STATE(pGpuResource); + + pParams->dataType = pParams->opType; + + switch (pParams->opType) + { + default: + PORT_UNREFERENCED_VARIABLE(pGpu); + PORT_UNREFERENCED_VARIABLE(hDevice); + PORT_UNREFERENCED_VARIABLE(hClient); + rmStatus = NV_ERR_NOT_SUPPORTED; + break; + } + + return rmStatus; +} + +NV_STATUS +cliresCtrlCmdSystemSyncExternalFabricMgmt_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS *pExtFabricMgmtParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + pSys->setProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED, + pExtFabricMgmtParams->bExternalFabricMgmt); + return NV_OK; +} + +NV_STATUS cliresCtrlCmdSystemGetClientDatabaseInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS *pParams +) +{ + pParams->clientCount = g_resServ.activeClientCount; + pParams->resourceCount = g_resServ.activeResourceCount; + return NV_OK; +} + +/* + * Helper function to cliresCtrlCmdPushUcode_IMPL to allocate memory and copy data. + */ + +/*! + * @brief Used to push the GSP ucode or bindata_image into RM. This function is used only on + * VMware + * + * @return + * NV_OK The sent data is stored successfully + * NV_ERR_INVALID_ARGUMENT if the arguments are not proper + * NV_ERR_NO_MEMORY if memory allocation failed + * NV_ERR_NOT_SUPPORTED if function is invoked on non-GSP setup or any + * setup other than VMware host + */ +NV_STATUS cliresCtrlCmdPushUcodeImage_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_PUSH_UCODE_IMAGE_PARAMS *pParams +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS cliresCtrlCmdSystemRmctrlCacheModeCtrl_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_PARAMS *pParams +) +{ + switch (pParams->cmd) + { + case NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_CMD_SET: + rmapiControlCacheSetMode(pParams->mode); + break; + case NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_CMD_GET: + pParams->mode = rmapiControlCacheGetMode(); + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + return NV_OK; +} + +// GPS HOSUNGK DELETE after KMD, NvAPI changes are made + +NV_STATUS +cliresCtrlCmdSystemGetVrrCookiePresent_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_VRR_COOKIE_PRESENT_PARAMS *pParams +) +{ + return NV_ERR_NOT_SUPPORTED; +} diff --git a/src/nvidia/src/kernel/rmapi/control.c b/src/nvidia/src/kernel/rmapi/control.c new file mode 100644 index 0000000..11a109a --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/control.c @@ -0,0 +1,1149 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "rmapi/rmapi.h" +#include "rmapi/control.h" +#include "rmapi/client.h" +#include "rmapi/rs_utils.h" +#include "diagnostics/tracer.h" +#include "core/locks.h" +#include "core/thread_state.h" +#include "virtualization/hypervisor/hypervisor.h" +#include "gpu/device/device.h" + +#include "entry_points.h" +#include "resserv/rs_access_map.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/gpu.h" +#include "rmapi/rmapi_specific.h" +#include "rmapi/rmapi_utils.h" +#include "kernel/gpu/gsp/gsp_trace_rats_macro.h" + +#include "ctrl/ctrl0000/ctrl0000gpuacct.h" // NV0000_CTRL_CMD_GPUACCT_* +#include "ctrl/ctrl2080/ctrl2080tmr.h" // NV2080_CTRL_CMD_TIMER_SCHEDULE + +static NV_STATUS +releaseDeferRmCtrlBuffer(RmCtrlDeferredCmd* pRmCtrlDeferredCmd) +{ + portMemSet(&pRmCtrlDeferredCmd->paramBuffer, 0, RMCTRL_DEFERRED_MAX_PARAM_SIZE); + + portAtomicSetS32(&pRmCtrlDeferredCmd->pending, RMCTRL_DEFERRED_FREE); + + return NV_OK; +} + +// +// This is the rmControl internal handler for deferred calls. +// +// + +NV_STATUS +rmControl_Deferred(RmCtrlDeferredCmd* pRmCtrlDeferredCmd) +{ + RmCtrlParams rmCtrlParams; + NvU8 paramBuffer[RMCTRL_DEFERRED_MAX_PARAM_SIZE]; + NV_STATUS status; + RS_LOCK_INFO lockInfo = {0}; + RS_CONTROL_COOKIE rmCtrlExecuteCookie = {0}; + + // init RmCtrlParams + portMemCopy(&rmCtrlParams, sizeof(RmCtrlParams), &pRmCtrlDeferredCmd->rmCtrlDeferredParams, sizeof(RmCtrlParams)); + rmCtrlParams.hParent = NV01_NULL_OBJECT; + rmCtrlParams.pGpu = NULL; + rmCtrlParams.pLockInfo = &lockInfo; + rmCtrlParams.pCookie = &rmCtrlExecuteCookie; + + // Temporary: tell ResServ not to take any locks + lockInfo.flags = RM_LOCK_FLAGS_NO_GPUS_LOCK | + RM_LOCK_FLAGS_NO_CLIENT_LOCK; + + if (rmapiLockIsOwner()) + { + lockInfo.state = RM_LOCK_STATES_API_LOCK_ACQUIRED; + } + else + { + lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK; + } + + // paramsSize not > _MAX already verified in _rmControlDeferred + if ((rmCtrlParams.pParams != NvP64_NULL) && (rmCtrlParams.paramsSize != 0)) + { + // copy param to a local buffer so that pRmCtrlDeferredCmd can be released + portMemSet(paramBuffer, 0, RMCTRL_DEFERRED_MAX_PARAM_SIZE); + portMemCopy(paramBuffer, rmCtrlParams.paramsSize, rmCtrlParams.pParams, rmCtrlParams.paramsSize); + rmCtrlParams.pParams = paramBuffer; + } + + releaseDeferRmCtrlBuffer(pRmCtrlDeferredCmd); + + // client was checked when we came in through rmControl() + // but check again to make sure it's still good + if (serverutilGetClientUnderLock(rmCtrlParams.hClient) == NULL) + { + status = NV_ERR_INVALID_CLIENT; + goto exit; + } + + status = serverControl(&g_resServ, &rmCtrlParams); + +exit: + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_NOTICE, "deferred rmctrl %x failed %x!\n", + rmCtrlParams.cmd, status); + } + + return status; +} + +static NV_STATUS +_rmControlDeferred(RmCtrlParams *pRmCtrlParams, NvP64 pUserParams, NvU32 paramsSize) +{ + // Schedule a deferred rmctrl call + OBJGPU *pGpu; + NvBool bBcResource; + NV_STATUS rmStatus; + RsClient *pClient; + + // We can't allocate memory at DIRQL, so use pre-allocated buffer to store any rmctrl param. + // The size can't be large than DEFERRED_RMCTRL_MAX_PARAM_SIZE (defined in rmctrl.h), otherwise, + // fail this call. + if (paramsSize > RMCTRL_DEFERRED_MAX_PARAM_SIZE) + { + NV_PRINTF(LEVEL_WARNING, + "rmctrl param size (%d) larger than limit (%d).\n", + paramsSize, RMCTRL_DEFERRED_MAX_PARAM_SIZE); + rmStatus = NV_ERR_INSUFFICIENT_RESOURCES; + goto done; + } + + rmStatus = serverGetClientUnderLock(&g_resServ, pRmCtrlParams->hClient, &pClient); + if (rmStatus != NV_OK) + return rmStatus; + + rmStatus = gpuGetByHandle(pClient, pRmCtrlParams->hObject, &bBcResource, &pGpu); + if (rmStatus != NV_OK) + return rmStatus; + + // Set SLI BC state for thread + gpuSetThreadBcState(pGpu, bBcResource); + + pRmCtrlParams->pGpu = pGpu; + pRmCtrlParams->pLockInfo = NULL; + + switch (pRmCtrlParams->cmd) + { + // we don't have available bit left in RmCtrlParams.cmd to + // indicate a rmctrl type as deferrable so use cmd list here + case NV2080_CTRL_CMD_TIMER_SCHEDULE: + { + if (pRmCtrlParams->flags & NVOS54_FLAGS_IRQL_RAISED) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU32 idx; + + for ( idx = 0; idx < MAX_DEFERRED_CMDS; idx++) + { + if (portAtomicCompareAndSwapS32(&pGpu->pRmCtrlDeferredCmd[idx].pending, + RMCTRL_DEFERRED_ACQUIRED, + RMCTRL_DEFERRED_FREE)) + { + portMemCopy(&pGpu->pRmCtrlDeferredCmd[idx].rmCtrlDeferredParams, + sizeof(RmCtrlParams), pRmCtrlParams, sizeof(RmCtrlParams)); + + // copyin param to kernel buffer for deferred rmctrl + if (paramsSize != 0 && pUserParams != 0) + { + portMemCopy(pGpu->pRmCtrlDeferredCmd[idx].paramBuffer, paramsSize, + NvP64_VALUE(pUserParams), paramsSize); + + if (paramsSize < RMCTRL_DEFERRED_MAX_PARAM_SIZE) + { + portMemSet(pGpu->pRmCtrlDeferredCmd[idx].paramBuffer + + paramsSize, + 0, RMCTRL_DEFERRED_MAX_PARAM_SIZE - paramsSize); + } + + pGpu->pRmCtrlDeferredCmd[idx].rmCtrlDeferredParams.pParams = + pGpu->pRmCtrlDeferredCmd[idx].paramBuffer; + } + + portAtomicSetS32(&pGpu->pRmCtrlDeferredCmd[idx].pending, + RMCTRL_DEFERRED_READY); + + // Make sure there's a release call to trigger the deferred rmctrl. + // Previous rmctrl that is holding the lock can already + // finished (release its lock) during the period before the pending + // flag is set and after this rmctrl failed to acquire lock. + + // LOCK: try to acquire GPUs lock + if (rmGpuLocksAcquire(GPU_LOCK_FLAGS_COND_ACQUIRE, + RM_LOCK_MODULES_CLIENT) == NV_OK) + { + if (osCondAcquireRmSema(pSys->pSema) == NV_OK) + { + // In case this is called from device interrupt, use pGpu to queue DPC. + osReleaseRmSema(pSys->pSema, pGpu); + } + // In case this is called from device interrupt, use pGpu to queue DPC. + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, pGpu); + } + + rmStatus = NV_OK; + goto done; + } + } + } + + rmStatus = NV_ERR_STATE_IN_USE; + break; + } + + default: + rmStatus = NV_ERR_BUSY_RETRY; + break; + } + +done: + return rmStatus; +} + +NV_STATUS +serverControlApiCopyIn +( + RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pRmCtrlParams, + RS_CONTROL_COOKIE *pCookie +) +{ + NV_STATUS rmStatus; + API_STATE *pParamCopy; + API_STATE *pEmbeddedParamCopies; + NvP64 pUserParams; + NvU32 paramsSize; + + NV_ASSERT_OR_RETURN(pCookie != NULL, NV_ERR_INVALID_ARGUMENT); + pParamCopy = &pCookie->paramCopy; + pEmbeddedParamCopies = pCookie->embeddedParamCopies; + pUserParams = NV_PTR_TO_NvP64(pRmCtrlParams->pParams); + paramsSize = pRmCtrlParams->paramsSize; + + RMAPI_PARAM_COPY_INIT(*pParamCopy, pRmCtrlParams->pParams, pUserParams, 1, paramsSize); + + if (pCookie->apiCopyFlags & RMCTRL_API_COPY_FLAGS_SKIP_COPYIN_ZERO_BUFFER) + { + pParamCopy->flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + pParamCopy->flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER; + } + + rmStatus = rmapiParamsAcquire(pParamCopy, (pRmCtrlParams->secInfo.paramLocation == PARAM_LOCATION_USER)); + if (rmStatus != NV_OK) + return rmStatus; + pCookie->bFreeParamCopy = NV_TRUE; + + rmStatus = embeddedParamCopyIn(pEmbeddedParamCopies, pRmCtrlParams); + if (rmStatus != NV_OK) + { + rmapiParamsRelease(pParamCopy); + pRmCtrlParams->pParams = NvP64_VALUE(pUserParams); + pCookie->bFreeParamCopy = NV_FALSE; + return rmStatus; + } + pCookie->bFreeEmbeddedCopy = NV_TRUE; + + return NV_OK; +} + +NV_STATUS +serverControlApiCopyOut +( + RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pRmCtrlParams, + RS_CONTROL_COOKIE *pCookie, + NV_STATUS rmStatus +) +{ + NV_STATUS cpStatus; + API_STATE *pParamCopy; + API_STATE *pEmbeddedParamCopies; + NvP64 pUserParams; + NvBool bFreeEmbeddedCopy; + NvBool bFreeParamCopy; + + NV_ASSERT_OR_RETURN(pCookie != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pRmCtrlParams != NULL, NV_ERR_INVALID_ARGUMENT); + + if ((pCookie->apiCopyFlags & RMCTRL_API_COPY_FLAGS_SET_CONTROL_CACHE) && rmStatus == NV_OK) + { + rmapiControlCacheSet(pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize); + } + + pParamCopy = &pCookie->paramCopy; + pEmbeddedParamCopies = pCookie->embeddedParamCopies; + pUserParams = pCookie->paramCopy.pUserParams; + bFreeParamCopy = pCookie->bFreeParamCopy; + bFreeEmbeddedCopy = pCookie->bFreeEmbeddedCopy; + + if ((rmStatus != NV_OK) && + (!(pCookie->ctrlFlags & RMCTRL_FLAGS_COPYOUT_ON_ERROR) || + (pCookie->apiCopyFlags & RMCTRL_API_COPY_FLAGS_FORCE_SKIP_COPYOUT_ON_ERROR))) + { + pParamCopy->flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + + if (bFreeEmbeddedCopy) + { + pEmbeddedParamCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + pEmbeddedParamCopies[1].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + pEmbeddedParamCopies[2].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + pEmbeddedParamCopies[3].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + } + } + + if (bFreeEmbeddedCopy) + { + cpStatus = embeddedParamCopyOut(pEmbeddedParamCopies, pRmCtrlParams); + if (rmStatus == NV_OK) + rmStatus = cpStatus; + pCookie->bFreeEmbeddedCopy = NV_FALSE; + } + + if (bFreeParamCopy) + { + cpStatus = rmapiParamsRelease(pParamCopy); + if (rmStatus == NV_OK) + rmStatus = cpStatus; + pRmCtrlParams->pParams = NvP64_VALUE(pUserParams); + pCookie->bFreeParamCopy = NV_FALSE; + } + + return rmStatus; +} + +static NV_STATUS +_rmapiRmControl(NvHandle hClient, NvHandle hObject, NvU32 cmd, NvP64 pUserParams, NvU32 paramsSize, NvU32 flags, RM_API *pRmApi, API_SECURITY_INFO *pSecInfo) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + RmCtrlParams rmCtrlParams; + RS_CONTROL_COOKIE rmCtrlExecuteCookie = {0}; + NvBool bIsRaisedIrqlCmd; + NvBool bIsLockBypassCmd; + NvBool bInternalRequest; + NV_STATUS rmStatus = NV_OK; + RS_LOCK_INFO lockInfo = {0}; + NvU32 ctrlFlags = 0; + NvU32 ctrlAccessRight = 0; + NvU32 ctrlParamsSize = 0; + NV_STATUS getCtrlInfoStatus; + + RMTRACE_RMAPI(_RMCTRL_ENTRY, cmd); + + // Check first for the NULL command. + // Return NV_OK immediately for NVXXXX_CTRL_CMD_NULL (0x00000000) + // as well as the per-class NULL cmd ( _CATEGORY==0x00 and _INDEX==0x00 ) + if ((cmd == NVXXXX_CTRL_CMD_NULL) || + (FLD_TEST_DRF_NUM(XXXX, _CTRL_CMD, _CATEGORY, 0x00, cmd) && + FLD_TEST_DRF_NUM(XXXX, _CTRL_CMD, _INDEX, 0x00, cmd))) + { + return NV_OK; + } + + NV_PRINTF(LEVEL_INFO, + "rmControl: hClient 0x%x hObject 0x%x cmd 0x%x\n", hClient, + hObject, cmd); + + NV_PRINTF(LEVEL_INFO, "rmControl: pUserParams 0x%p paramSize 0x%x\n", + NvP64_VALUE(pUserParams), paramsSize); + + // If we're behind either API lock or GPU lock treat as internal. + bInternalRequest = pRmApi->bApiLockInternal || pRmApi->bGpuLockInternal; + + // is this a raised IRQL cmd? + bIsRaisedIrqlCmd = (flags & NVOS54_FLAGS_IRQL_RAISED); + + // is this a lock bypass cmd? + bIsLockBypassCmd = ((flags & NVOS54_FLAGS_LOCK_BYPASS) || pRmApi->bGpuLockInternal); + + // NVOS54_FLAGS_IRQL_RAISED cmds are only allowed to be called in raised irq level. + if (bIsRaisedIrqlCmd) + { + // Check that we support this control call at raised IRQL + if (!rmapiRmControlCanBeRaisedIrql(cmd)) + { + NV_PRINTF(LEVEL_WARNING, + "rmControl: cmd 0x%x cannot be called at raised irq level\n", cmd); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + if (!osIsRaisedIRQL()) + { + NV_PRINTF(LEVEL_WARNING, + "rmControl: raised cmd 0x%x at normal irq level\n", cmd); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + } + + if (bIsLockBypassCmd) + { + flags |= NVOS54_FLAGS_LOCK_BYPASS; + + if (!bInternalRequest) + { + // Check that we support bypassing locks with this control call + if (!rmapiRmControlCanBeBypassLock(cmd)) + { + NV_PRINTF(LEVEL_WARNING, + "rmControl: cmd 0x%x cannot bypass locks\n", cmd); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + } + } + + // Potential race condition if run lockless? + if (serverutilGetClientUnderLock(hClient) == NULL) + { + rmStatus = NV_ERR_INVALID_CLIENT; + goto done; + } + + // only kernel clients can issue raised IRQL or lock bypass cmds + // bypass client priv check for internal calls done on behalf of lower priv + // clients + if ((bIsRaisedIrqlCmd || bIsLockBypassCmd) && !bInternalRequest) + { + if (pSecInfo->privLevel < RS_PRIV_LEVEL_KERNEL) + { + rmStatus = NV_ERR_INVALID_CLIENT; + goto done; + } + } + + getCtrlInfoStatus = rmapiutilGetControlInfo(cmd, &ctrlFlags, &ctrlAccessRight, &ctrlParamsSize); + + // error check parameters + if (((paramsSize != 0) && (pUserParams == (NvP64) 0)) || + ((paramsSize == 0) && (pUserParams != (NvP64) 0)) || + ((getCtrlInfoStatus == NV_OK) && (paramsSize != ctrlParamsSize))) + { + NV_PRINTF(LEVEL_INFO, + "bad params: cmd:0x%x ptr " NvP64_fmt " size: 0x%x expect size: 0x%x\n", + cmd, pUserParams, paramsSize, ctrlParamsSize); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + // init RmCtrlParams + portMemSet(&rmCtrlParams, 0, sizeof(rmCtrlParams)); + rmCtrlParams.hClient = hClient; + rmCtrlParams.hObject = hObject; + rmCtrlParams.cmd = cmd; + rmCtrlParams.flags = flags; + rmCtrlParams.pParams = NvP64_VALUE(pUserParams); + rmCtrlParams.paramsSize = paramsSize; + rmCtrlParams.hParent = NV01_NULL_OBJECT; + rmCtrlParams.pGpu = NULL; + rmCtrlParams.pResourceRef = NULL; + rmCtrlParams.secInfo = *pSecInfo; + rmCtrlParams.pLockInfo = &lockInfo; + rmCtrlParams.pCookie = &rmCtrlExecuteCookie; + rmCtrlParams.bInternal = bInternalRequest; + + if (pRmApi->bApiLockInternal) + { + lockInfo.state |= RM_LOCK_STATES_API_LOCK_ACQUIRED; + lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK; + } + + if (getCtrlInfoStatus == NV_OK) + { + // + // The output of CACHEABLE RMCTRL do not depend on the input. + // Skip param copy and clear the buffer in case the uninitialized + // buffer leaks information to clients. + // + if (ctrlFlags & RMCTRL_FLAGS_CACHEABLE) + rmCtrlParams.pCookie->apiCopyFlags |= RMCTRL_API_COPY_FLAGS_SKIP_COPYIN_ZERO_BUFFER; + } + + rmCtrlParams.pCookie->ctrlFlags = ctrlFlags; + + // + // Three separate rmctrl command modes: + // + // mode#1: lock bypass rmctrl request + // mode#2: raised-irql rmctrl request + // mode#3: normal rmctrl request + // + if (bIsLockBypassCmd) + { + lockInfo.state |= RM_LOCK_STATES_API_LOCK_ACQUIRED; + lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK | + RM_LOCK_FLAGS_NO_GPUS_LOCK | + RM_LOCK_FLAGS_NO_CLIENT_LOCK; + + // + // Lock bypass rmctrl request. + // + rmStatus = serverControl(&g_resServ, &rmCtrlParams); + } + else if (bIsRaisedIrqlCmd) + { + // + // Raised IRQL rmctrl request. + // + // Try to get lock; if we cannot get it then place on deferred queue. + // + + // LOCK: try to acquire GPUs lock + if (osCondAcquireRmSema(pSys->pSema) == NV_OK) + { + if (rmGpuLocksAcquire(GPU_LOCK_FLAGS_COND_ACQUIRE, RM_LOCK_MODULES_CLIENT) == NV_OK) + { + lockInfo.state |= RM_LOCK_STATES_GPUS_LOCK_ACQUIRED; + lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK | + RM_LOCK_FLAGS_NO_GPUS_LOCK | + RM_LOCK_FLAGS_NO_CLIENT_LOCK; + rmStatus = serverControl(&g_resServ, &rmCtrlParams); + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPU_LOCK_FLAGS_COND_ACQUIRE, osIsISR() ? rmCtrlParams.pGpu : NULL); + } + else + { + rmStatus = _rmControlDeferred(&rmCtrlParams, pUserParams, paramsSize); + } + // we must have a pGpu here for queuing of a DPC. + NV_ASSERT(!osIsISR() || rmCtrlParams.pGpu); + osReleaseRmSema(pSys->pSema, osIsISR() ? rmCtrlParams.pGpu : NULL); + } + else + { + rmStatus = _rmControlDeferred(&rmCtrlParams, pUserParams, paramsSize); + } + } + else + { + // + // Normal rmctrl request. + // + + if (getCtrlInfoStatus == NV_OK) + { + if (rmapiControlIsCacheable(ctrlFlags, ctrlAccessRight, NV_FALSE)) + { + rmCtrlParams.pCookie->apiCopyFlags |= RMCTRL_API_COPY_FLAGS_FORCE_SKIP_COPYOUT_ON_ERROR; + + rmStatus = serverControlApiCopyIn(&g_resServ, &rmCtrlParams, + rmCtrlParams.pCookie); + if (rmStatus == NV_OK) + { + rmStatus = rmapiControlCacheGet(hClient, hObject, cmd, + rmCtrlParams.pParams, + paramsSize, + pSecInfo); + + // rmStatus is passed in for error handling + rmStatus = serverControlApiCopyOut(&g_resServ, + &rmCtrlParams, + rmCtrlParams.pCookie, + rmStatus); + } + + if (rmStatus == NV_OK) + { + goto done; + } + else + { + // reset cookie if cache get failed + portMemSet(rmCtrlParams.pCookie, 0, sizeof(RS_CONTROL_COOKIE)); + rmCtrlParams.pCookie->apiCopyFlags |= RMCTRL_API_COPY_FLAGS_SET_CONTROL_CACHE; + + // re-initialize the flag if it's cleaned + if (ctrlFlags & RMCTRL_FLAGS_CACHEABLE) + rmCtrlParams.pCookie->apiCopyFlags |= RMCTRL_API_COPY_FLAGS_SKIP_COPYIN_ZERO_BUFFER; + } + } + } + + RM_API_CONTEXT rmApiContext = {0}; + rmStatus = rmapiPrologue(pRmApi, &rmApiContext); + if (rmStatus != NV_OK) + goto epilogue; + + // + // If this is an internal request within the same RM instance, make + // sure we don't double lock clients and preserve previous lock state. + // + if (bInternalRequest && resservGetTlsCallContext() != NULL) + { + NvHandle hSecondClient = NV01_NULL_OBJECT; + + if (pSecInfo->paramLocation == PARAM_LOCATION_KERNEL) + { + rmStatus = serverControlLookupSecondClient(cmd, + NvP64_VALUE(pUserParams), rmCtrlParams.pCookie, &hSecondClient); + + if (rmStatus != NV_OK) + goto epilogue; + } + + rmStatus = rmapiInitLockInfo(pRmApi, hClient, hSecondClient, &lockInfo); + if (rmStatus != NV_OK) + goto epilogue; + + // + // rmapiInitLockInfo overwrites lockInfo.flags, re-add + // RM_LOCK_FLAGS_NO_API_LOCK if it was originally added. + // + if (pRmApi->bApiLockInternal) + lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK; + } + + lockInfo.flags |= RM_LOCK_FLAGS_RM_SEMA; + rmStatus = serverControl(&g_resServ, &rmCtrlParams); +epilogue: + rmapiEpilogue(pRmApi, &rmApiContext); + } +done: + + RMTRACE_RMAPI(_RMCTRL_EXIT, cmd); + return rmStatus; +} + +static NvBool +serverControl_ValidateVgpu +( + OBJGPU *pGpu, + NvU32 cmd, + RS_PRIV_LEVEL privLevel, + const NvU32 cookieFlags +) +{ + NvBool bPermissionGranted = NV_FALSE; + + // Check if context is already sufficiently admin privileged + if (cookieFlags & RMCTRL_FLAGS_PRIVILEGED) + { + if (privLevel >= RS_PRIV_LEVEL_USER_ROOT) + { + bPermissionGranted = NV_TRUE; + } + } + + return bPermissionGranted; +} + +// +// Validate privilege level access for specific control command. +// +// This function is used for validating access for following clients: +// 1. Non-Hypervisor clients +// 2. PF clients +// 3. Unprivileged processes running in Hypervisor +// 4. Privileged processes running in Hypervisor, executing an unprivileged control call. +// 5. Kernel privileged processes running in Hypervisor +// +NV_STATUS rmControlValidateClientPrivilegeAccess +( + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + NvU32 ctrlFlags, + API_SECURITY_INFO *pSecInfo +) +{ + // permissions check for PRIVILEGED controls + if (ctrlFlags & RMCTRL_FLAGS_PRIVILEGED) + { + // + // Calls originating from usermode require admin perms while calls + // originating from other kernel drivers are always allowed. + // + if (pSecInfo->privLevel < RS_PRIV_LEVEL_USER_ROOT) + { + NV_PRINTF(LEVEL_NOTICE, + "hClient: 0x%08x, hObject 0x%08x, cmd 0x%08x: non-privileged context issued privileged cmd\n", + hClient, hObject, cmd); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + } + + // permissions check for KERNEL_PRIVILEGED (default) unless NON_PRIVILEGED, PRIVILEGED or INTERNAL is specified + if (!(ctrlFlags & (RMCTRL_FLAGS_NON_PRIVILEGED | RMCTRL_FLAGS_PRIVILEGED | RMCTRL_FLAGS_INTERNAL))) + { + if (pSecInfo->privLevel < RS_PRIV_LEVEL_KERNEL) + { + NV_PRINTF(LEVEL_NOTICE, + "hClient: 0x%08x, hObject 0x%08x, cmd 0x%08x: non-kernel client issued kernel-only cmd\n", + hClient, hObject, cmd); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + } + + return NV_OK; +} + +// validate rmctrl flags +NV_STATUS serverControl_ValidateCookie +( + RS_RES_CONTROL_PARAMS_INTERNAL *pRmCtrlParams, + RS_CONTROL_COOKIE *pRmCtrlExecuteCookie +) +{ + NV_STATUS status; + OBJGPU *pGpu; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + if (pCallContext == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Calling context is NULL!\n"); + return NV_ERR_INVALID_PARAMETER; + } + + if (RMCFG_FEATURE_PLATFORM_GSP) + { + pGpu = gpumgrGetSomeGpu(); + if (pGpu == NULL) + { + NV_PRINTF(LEVEL_ERROR, "GPU is not found\n"); + return NV_ERR_INVALID_STATE; + } + } + + if (g_resServ.bRsAccessEnabled) + { + if (pRmCtrlParams->pResourceRef != NULL) + { + // + // Check that the invoking client has appropriate access rights + // For control calls, the invoking client is the owner of the ref + // + status = rsAccessCheckRights(pRmCtrlParams->pResourceRef, + pRmCtrlParams->pResourceRef->pClient, + &pRmCtrlExecuteCookie->rightsRequired); + if (status != NV_OK) + return status; + } + else + { + // pResourceRef can be NULL when rmControlCmdExecute is manually + // invoked from the deferred API path (see class5080DeferredApiV2). + // For now, we skip performing any access right checks in this case. + } + } + else + { + // + // When access rights are disabled, any control calls that have the + // *_IF_RS_ACCESS_DISABLED flags should be treated as if they were declared + // with the corresponding flags + // + if ((pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_PRIVILEGED_IF_RS_ACCESS_DISABLED) != 0) + { + pRmCtrlExecuteCookie->ctrlFlags |= RMCTRL_FLAGS_PRIVILEGED; + } + } + + if (pRmCtrlParams->pGpu != NULL && IS_VIRTUAL(pRmCtrlParams->pGpu) && + (pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_PHYSICAL) && + !(pRmCtrlExecuteCookie->ctrlFlags & (RMCTRL_FLAGS_ROUTE_TO_VGPU_HOST | RMCTRL_FLAGS_PHYSICAL_IMPLEMENTED_ON_VGPU_GUEST))) + { + if (!rmapiutilSkipErrorMessageForUnsupportedVgpuGuestControl(pRmCtrlParams->pGpu, pRmCtrlParams->cmd)) + { + NV_PRINTF(LEVEL_ERROR, "Unsupported ROUTE_TO_PHYSICAL control 0x%x was called on vGPU guest\n", pRmCtrlParams->cmd); + } + + return NV_ERR_NOT_SUPPORTED; + } + + // confirm command is on allowlist for target gpu + if (pRmCtrlParams->pGpu != NULL) + { + status = gpuValidateRmctrlCmd_HAL(pRmCtrlParams->pGpu, pRmCtrlParams->cmd); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, "Control command 0x%x is not in allowlist\n", pRmCtrlParams->cmd); + return status; + } + } + + if ((pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_INTERNAL)) + { + NvBool bInternalCall = pRmCtrlParams->bInternal; + if (!bInternalCall) + return NV_ERR_NOT_SUPPORTED; + } + + // + // Narrow down usecase as much as possible to CPU-plugin. + // Must be running in hypervisor, at least cached privileged, not a kernel context and + // accessing a privileged or kernel privileged control call. + // + if (hypervisorIsVgxHyper() && + clientIsAdmin(pCallContext->pClient, clientGetCachedPrivilege(pCallContext->pClient)) && + (pRmCtrlParams->secInfo.privLevel != RS_PRIV_LEVEL_KERNEL) && + !(pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_NON_PRIVILEGED)) + { + // VGPU CPU-Plugin (Legacy Non-SRIOV, SRIOV-HYPERV, SRIOV-LEGACY, SRIOV-Offload), and Admin or kernel clients running in hypervisor + NvBool bPermissionGranted = serverControl_ValidateVgpu(pRmCtrlParams->pGpu, + pRmCtrlParams->cmd, + pRmCtrlParams->secInfo.privLevel, + pRmCtrlExecuteCookie->ctrlFlags); + if (!bPermissionGranted) + { + NV_PRINTF(LEVEL_NOTICE, + "hClient: 0x%08x, hObject 0x%08x, cmd 0x%08x: non-privileged hypervisor context issued privileged cmd\n", + pRmCtrlParams->hClient, pRmCtrlParams->hObject, + pRmCtrlParams->cmd); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + } + else + { + NV_CHECK_OK_OR_RETURN(LEVEL_NOTICE, rmControlValidateClientPrivilegeAccess(pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlExecuteCookie->ctrlFlags, + &pRmCtrlParams->secInfo)); + } + + // fail if GPU isn't ready + if ((!(pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_NO_GPUS_ACCESS)) && (pRmCtrlParams->pGpu != NULL)) + { + API_GPU_FULL_POWER_SANITY_CHECK(pRmCtrlParams->pGpu, NV_FALSE, + pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_ALLOW_WITHOUT_SYSMEM_ACCESS); + + if ( ! API_GPU_ATTACHED_SANITY_CHECK(pRmCtrlParams->pGpu)) + return NV_ERR_GPU_IS_LOST; + } + + if ((pRmCtrlParams->flags & NVOS54_FLAGS_IRQL_RAISED) && + (pRmCtrlParams->secInfo.paramLocation != PARAM_LOCATION_KERNEL)) + { + return NV_ERR_INVALID_PARAMETER; + } + + if (pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_RM_TEST_ONLY_CODE) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + if (!pSys->getProperty(pSys, PDB_PROP_SYS_ENABLE_RM_TEST_ONLY_CODE)) + { + return NV_ERR_TEST_ONLY_CODE_NOT_ENABLED; + } + } + + return NV_OK; +} + +NV_STATUS +serverControlLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RmCtrlParams *pRmCtrlParams, + RmCtrlExecuteCookie *pRmCtrlExecuteCookie, + LOCK_ACCESS_TYPE *pAccess +) +{ + // + // Calls with LOCK_TOP doesn't fill in the cookie param correctly. + // This is just a WAR for this. + // + NvU32 controlFlags = pRmCtrlExecuteCookie->ctrlFlags; + if (controlFlags == 0 && !RMCFG_FEATURE_PLATFORM_GSP) + { + NV_STATUS status = rmapiutilGetControlInfo(pRmCtrlParams->cmd, &controlFlags, NULL, NULL); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, + "rmapiutilGetControlInfo(cmd=0x%x, out flags=0x%x, NULL) = status=0x%x\n", + pRmCtrlParams->cmd, controlFlags, status); + } + + pRmCtrlExecuteCookie->ctrlFlags = controlFlags; + } + + NvBool areAllGpusInOffloadMode = gpumgrAreAllGpusInOffloadMode(); + + // + // If the control is ROUTE_TO_PHYSICAL, and we're in GSP offload mode, + // we can use a more relaxed locking mode: + // 1. Only lock the single device and not all GPUs + // 2. Take the API lock for READ instead of WRITE. + // Unfortunately, at this point we don't have the pGpu yet to check if it + // is in offload mode or not. So, instead, these optimizations are only + // done if *all* GPUs in the system are in offload mode. + // + NvBool bUseGspLockingMode = areAllGpusInOffloadMode && + (controlFlags & RMCTRL_FLAGS_ROUTE_TO_PHYSICAL); + + if (pAccess == NULL) + return NV_ERR_INVALID_ARGUMENT; + + *pAccess = LOCK_ACCESS_WRITE; + + if (lock == RS_LOCK_TOP) + { + if (controlFlags & RMCTRL_FLAGS_NO_API_LOCK) + { + // NO_API_LOCK requires no access to GPU lock protected data + NV_ASSERT_OR_RETURN(((controlFlags & RMCTRL_FLAGS_NO_GPUS_LOCK) != 0), + NV_ERR_INVALID_LOCK_STATE); + + // NO_API_LOCK used in combination with API_LOCK_READONLY does not make sense + NV_ASSERT_OR_RETURN(((controlFlags & RMCTRL_FLAGS_API_LOCK_READONLY) == 0), + NV_ERR_INVALID_LOCK_STATE); + + RS_LOCK_INFO *pLockInfo = pRmCtrlParams->pLockInfo; + + pLockInfo->flags |= RM_LOCK_FLAGS_NO_API_LOCK; + return NV_OK; + } + + if (!serverSupportsReadOnlyLock(&g_resServ, RS_LOCK_TOP, RS_API_CTRL)) + { + *pAccess = LOCK_ACCESS_WRITE; + return NV_OK; + } + + if (controlFlags & RMCTRL_FLAGS_API_LOCK_READONLY) + { + *pAccess = LOCK_ACCESS_READ; + } + + // + // ROUTE_TO_PHYSICAL controls always take the READ API lock. This only applies + // to GSP clients: Only there can we guarantee per-gpu execution of commands. + // + if (g_resServ.bRouteToPhysicalLockBypass && bUseGspLockingMode) + { + *pAccess = LOCK_ACCESS_READ; + } + + return NV_OK; + } + + if (lock == RS_LOCK_RESOURCE) + { + RS_LOCK_INFO *pLockInfo = pRmCtrlParams->pLockInfo; + + // + // Do not acquire the GPU lock if we were explicitly told + // not to or if this is an Internal Call meaning that + // we already own the GPUs Lock. + // + if ((pLockInfo->state & RM_LOCK_STATES_GPUS_LOCK_ACQUIRED) || + (controlFlags & RMCTRL_FLAGS_NO_GPUS_LOCK) || + (pRmCtrlParams->flags & NVOS54_FLAGS_IRQL_RAISED) || + (pRmCtrlParams->flags & NVOS54_FLAGS_LOCK_BYPASS)) + { + pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK; + pLockInfo->flags &= ~RM_LOCK_FLAGS_GPU_GROUP_LOCK; + } + else + { + if ((controlFlags & RMCTRL_FLAGS_GPU_LOCK_DEVICE_ONLY) || + (g_resServ.bRouteToPhysicalLockBypass && bUseGspLockingMode)) + { + pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK; + pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK; + } + else + { + pLockInfo->flags &= ~RM_LOCK_FLAGS_NO_GPUS_LOCK; + pLockInfo->flags &= ~RM_LOCK_FLAGS_GPU_GROUP_LOCK; + } + } + + return NV_OK; + } + + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +serverControlLookupClientLockFlags +( + RmCtrlExecuteCookie *pRmCtrlExecuteCookie, + enum CLIENT_LOCK_TYPE *pClientLockType +) +{ + NvU32 controlFlags = pRmCtrlExecuteCookie->ctrlFlags; + + if ((controlFlags & RMCTRL_FLAGS_ALL_CLIENT_LOCK) != 0) + { + *pClientLockType = CLIENT_LOCK_ALL; + + // Locking all clients requires the RW API lock + NV_ASSERT_OR_RETURN(rmapiLockIsWriteOwner(), NV_ERR_INVALID_LOCK_STATE); + } + else + *pClientLockType = CLIENT_LOCK_SPECIFIC; + + return NV_OK; +} + +NV_STATUS +rmapiControl +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + void *pParams, + NvU32 paramsSize +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->ControlWithSecInfo(pRmApi, hClient, hObject, cmd, NV_PTR_TO_NvP64(pParams), + paramsSize, 0, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiControlWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + NvP64 pParams, + NvU32 paramsSize, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + + NV_PRINTF(LEVEL_INFO, + "Nv04Control: hClient:0x%x hObject:0x%x cmd:0x%x params:" NvP64_fmt " paramSize:0x%x flags:0x%x\n", + hClient, hObject, cmd, pParams, paramsSize, flags); + + status = _rmapiRmControl(hClient, hObject, cmd, pParams, paramsSize, flags, pRmApi, pSecInfo); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Nv04Control: control complete\n"); + } + else + { + NV_PRINTF(LEVEL_INFO, + "Nv04Control: control failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + NV_PRINTF(LEVEL_INFO, + "Nv04Control: hClient:0x%x hObject:0x%x cmd:0x%x params:" NvP64_fmt " paramSize:0x%x flags:0x%x\n", + hClient, hObject, cmd, pParams, paramsSize, flags); + } + + return status; +} + + +// +// Called at DIRQL, where we can't do memory allocations +// Do not inline that function to save stack space +// +static NV_NOINLINE NV_STATUS +_rmapiControlWithSecInfoTlsIRQL +( + RM_API* pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + NvP64 pParams, + NvU32 paramsSize, + NvU32 flags, + API_SECURITY_INFO* pSecInfo +) +{ + NV_STATUS status; + THREAD_STATE_NODE threadState; + + NvU8 stackAllocator[2*TLS_ISR_ALLOCATOR_SIZE]; + PORT_MEM_ALLOCATOR* pIsrAllocator = portMemAllocatorCreateOnExistingBlock(stackAllocator, sizeof(stackAllocator)); + tlsIsrInit(pIsrAllocator); + + // + // SMP synchronization for Nv04Control is handled lower in the + // call sequence to accommodate the various operation-specific + // lock requirements (e.g. some operations can run locklessly). + // + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiControlWithSecInfo(pRmApi, hClient, hObject, cmd, pParams, paramsSize, flags, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + tlsIsrDestroy(pIsrAllocator); + portMemAllocatorRelease(pIsrAllocator); + + return status; +} + + +NV_STATUS +rmapiControlWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + NvP64 pParams, + NvU32 paramsSize, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + THREAD_STATE_NODE threadState; + + if (!portMemExSafeForNonPagedAlloc()) + { + return _rmapiControlWithSecInfoTlsIRQL(pRmApi, hClient, hObject, cmd, pParams, paramsSize, flags, pSecInfo); + } + + // + // SMP synchronization for Nv04Control is handled lower in the + // call sequence to accommodate the various operation-specific + // lock requirements (e.g. some operations can run locklessly). + // + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiControlWithSecInfo(pRmApi, hClient, hObject, cmd, pParams, paramsSize, flags, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + diff --git a/src/nvidia/src/kernel/rmapi/deprecated_context.c b/src/nvidia/src/kernel/rmapi/deprecated_context.c new file mode 100644 index 0000000..3752e21 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/deprecated_context.c @@ -0,0 +1,205 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "rmapi/rmapi.h" +#include "rmapi/param_copy.h" +#include "os/os.h" +#include "deprecated_context.h" + +static NV_STATUS +_rmAllocForDeprecatedApi(DEPRECATED_CONTEXT *_pContext, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvU32 hClass, void *pAllocParams, NvU32 paramsSize) +{ + DEPRECATED_CONTEXT_EXT *pContext = (DEPRECATED_CONTEXT_EXT *)_pContext; + RM_API *pRmApi = pContext->pRmApi; + + return pRmApi->AllocWithSecInfo(pRmApi, hClient, hParent, phObject, + hClass, NV_PTR_TO_NvP64(pAllocParams), paramsSize, + RMAPI_ALLOC_FLAGS_NONE, NvP64_NULL, &pContext->secInfo); +} + +static NV_STATUS +_rmControlForDeprecatedApi(DEPRECATED_CONTEXT *_pContext, NvHandle hClient, NvHandle hObject, + NvU32 cmd, void *pParams, NvU32 paramsSize) +{ + DEPRECATED_CONTEXT_EXT *pContext = (DEPRECATED_CONTEXT_EXT *)_pContext; + RM_API *pRmApi = pContext->pRmApi; + + return pRmApi->ControlWithSecInfo(pRmApi, hClient, hObject, cmd, + NV_PTR_TO_NvP64(pParams), paramsSize, 0, + &pContext->secInfo); +} + +static NV_STATUS +_rmFreeForDeprecatedApi(DEPRECATED_CONTEXT *_pContext, NvHandle hClient, NvHandle hObject) +{ + DEPRECATED_CONTEXT_EXT *pContext = (DEPRECATED_CONTEXT_EXT *)_pContext; + RM_API *pRmApi = pContext->pRmApi; + + return pRmApi->FreeWithSecInfo(pRmApi, hClient, hObject, + RMAPI_FREE_FLAGS_NONE, &pContext->secInfo); +} + +static NV_STATUS +_rmMapMemoryForDeprecatedApi(DEPRECATED_CONTEXT *_pContext, NvHandle hClient, NvHandle hDevice, + NvHandle hMemory, NvU64 offset, NvU64 length, NvP64 *ppCpuVirtAddr, NvU32 flags) +{ + DEPRECATED_CONTEXT_EXT *pContext = (DEPRECATED_CONTEXT_EXT *)_pContext; + RM_API *pRmApi = pContext->pRmApi; + + return pRmApi->MapToCpuWithSecInfo(pRmApi, hClient, hDevice, hMemory, offset, length, ppCpuVirtAddr, flags, &pContext->secInfo); +} + +NV_STATUS +RmCopyUserForDeprecatedApi +( + RMAPI_DEPRECATED_COPY_OP op, + RMAPI_DEPRECATED_BUFFER_POLICY bufPolicy, + NvP64 dataPtr, + NvU32 dataSize, + void **ppKernelPtr, + NvBool bUserModeArgs +) +{ + NV_STATUS status = NV_OK; + + switch (op) + { + case RMAPI_DEPRECATED_COPYIN: + if (bufPolicy == RMAPI_DEPRECATED_BUFFER_ALLOCATE) + { + *ppKernelPtr = portMemAllocNonPaged(dataSize); + + if (*ppKernelPtr == NULL) + return NV_ERR_NO_MEMORY; + } + + status = rmapiParamsCopyIn(NULL, + *ppKernelPtr, + dataPtr, + dataSize, + bUserModeArgs); + + if (bufPolicy == RMAPI_DEPRECATED_BUFFER_ALLOCATE) + { + if (status != NV_OK) + { + portMemFree(*ppKernelPtr); + *ppKernelPtr = NULL; + } + } + break; + case RMAPI_DEPRECATED_COPYOUT: + status = rmapiParamsCopyOut(NULL, + *ppKernelPtr, + dataPtr, + dataSize, + bUserModeArgs); + + // intentionally fall through to release memory... + case RMAPI_DEPRECATED_COPYRELEASE: + if (bufPolicy == RMAPI_DEPRECATED_BUFFER_ALLOCATE) + { + portMemFree(*ppKernelPtr); + *ppKernelPtr = NULL; + } + break; + } + + return status; +} + +static NV_STATUS +_rmCopyUserForDeprecatedApi +( + DEPRECATED_CONTEXT *_pContext, + RMAPI_DEPRECATED_COPY_OP op, + RMAPI_DEPRECATED_BUFFER_POLICY bufPolicy, + NvP64 dataPtr, + NvU32 dataSize, + void **ppKernelPtr +) +{ + return RmCopyUserForDeprecatedApi(op, bufPolicy, dataPtr, dataSize, + ppKernelPtr, + ((DEPRECATED_CONTEXT_EXT *)_pContext)->bUserModeArgs); +} + +static void * +_rmAllocMemForDeprecatedApi(NvU32 length) +{ + return portMemAllocNonPaged(length); +} + +static void +_rmFreeMemForDeprecatedApi(void *pAddress) +{ + portMemFree(pAddress); +} + +/** + * Setting bUserModeArgs to NV_FALSE can lead to Security issues where + * Privileged RM CTRL APIs are accessible by non-admin users. + * Please find more details in Bug: 3136168. + */ +void rmapiInitDeprecatedContext +( + DEPRECATED_CONTEXT_EXT *pContext, + API_SECURITY_INFO *pSecInfo, + NvBool bUserModeArgs, + NvBool bInternal +) +{ + if (pSecInfo == NULL) + { + portMemSet(&pContext->secInfo, 0, sizeof(pContext->secInfo)); + + if (bUserModeArgs) + { + pContext->secInfo.privLevel = osIsAdministrator() ? RS_PRIV_LEVEL_USER_ROOT : RS_PRIV_LEVEL_USER; + } + else + { + pContext->secInfo.privLevel = RS_PRIV_LEVEL_KERNEL; + } + } + else + { + pContext->secInfo = *pSecInfo; + } + + pContext->secInfo.paramLocation = PARAM_LOCATION_KERNEL; + + pContext->bInternal = bInternal; + pContext->pRmApi = rmapiGetInterface(bInternal ? RMAPI_GPU_LOCK_INTERNAL : RMAPI_EXTERNAL); + pContext->bUserModeArgs = bUserModeArgs; + + pContext->parent.RmAlloc = _rmAllocForDeprecatedApi; + pContext->parent.RmControl = _rmControlForDeprecatedApi; + pContext->parent.RmFree = _rmFreeForDeprecatedApi; + pContext->parent.RmMapMemory = _rmMapMemoryForDeprecatedApi; + pContext->parent.CopyUser = _rmCopyUserForDeprecatedApi; + pContext->parent.AllocMem = _rmAllocMemForDeprecatedApi; + pContext->parent.FreeMem = _rmFreeMemForDeprecatedApi; +} diff --git a/src/nvidia/src/kernel/rmapi/deprecated_context.h b/src/nvidia/src/kernel/rmapi/deprecated_context.h new file mode 100644 index 0000000..1459c32 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/deprecated_context.h @@ -0,0 +1,42 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _DEPRECATED_CONTEXT_ +#define _DEPRECATED_CONTEXT_ + +#include "deprecated/rmapi_deprecated.h" + +typedef struct +{ + DEPRECATED_CONTEXT parent; + API_SECURITY_INFO secInfo; + NvBool bInternal; + NvBool bUserModeArgs; + RM_API *pRmApi; +} DEPRECATED_CONTEXT_EXT; + +void rmapiInitDeprecatedContext (DEPRECATED_CONTEXT_EXT *pContext, + API_SECURITY_INFO *pSecInfo, + NvBool bUserModeArgs, + NvBool bInternal); + +#endif // _DEPRECATED_CONTEXT_ diff --git a/src/nvidia/src/kernel/rmapi/entry_points.c b/src/nvidia/src/kernel/rmapi/entry_points.c new file mode 100644 index 0000000..a53bedd --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/entry_points.c @@ -0,0 +1,583 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "rmapi/rmapi.h" +#include "entry_points.h" +#include "deprecated_context.h" +#include "os/os.h" + +#define RMAPI_DEPRECATED(pFunc, pArgs, bUserModeArgs) \ + NV_PRINTF(LEVEL_WARNING, "Calling deprecated function at %d\n", __LINE__); \ + pArgs->status = NV_ERR_NOT_SUPPORTED; + +#define RMAPI_NOT_SUPPORTED(pArgs) \ + pArgs->status = NV_ERR_NOT_SUPPORTED; + +// Primary APIs +static void _nv04Alloc(NVOS21_PARAMETERS*, NvBool); +static void _nv01Free(NVOS00_PARAMETERS*, NvBool); +static void _nv04Control(NVOS54_PARAMETERS*, NvBool, NvBool); +static void _nv04DupObject(NVOS55_PARAMETERS*, NvBool); +static void _nv04Share(NVOS57_PARAMETERS*, NvBool); +static void _nv04MapMemory(NVOS33_PARAMETERS*, NvBool, NvBool); +static void _nv04UnmapMemory(NVOS34_PARAMETERS*, NvBool, NvBool); +static void _nv04MapMemoryDma(NVOS46_PARAMETERS*, NvBool); +static void _nv04UnmapMemoryDma(NVOS47_PARAMETERS*, NvBool); + +// Legacy APIs +static void _nv01AllocMemory(NVOS02_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedAllocMemory, pArgs, bUserModeArgs); } +static void _nv01AllocObject(NVOS05_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedAllocObject, pArgs, bUserModeArgs); } +static void _nv04AddVblankCallback(NVOS61_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedAddVblankCallback, pArgs, bUserModeArgs); } +static void _nv04AllocContextDma(NVOS39_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedAllocContextDma, pArgs, bUserModeArgs); } +static void _nv04BindContextDma(NVOS49_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedBindContextDma, pArgs, bUserModeArgs); } +static void _nv04I2CAccess(NVOS_I2C_ACCESS_PARAMS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedI2CAccess, pArgs, bUserModeArgs); } +static void _nv04IdleChannels(NVOS30_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedIdleChannels, pArgs, bUserModeArgs); } +static void _nv04VidHeapControl(NVOS32_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedVidHeapControl, pArgs, bUserModeArgs); } + +static void _nv04AllocWithSecInfo(NVOS21_PARAMETERS*, API_SECURITY_INFO); +static void _nv04AllocWithAccessSecInfo(NVOS64_PARAMETERS*, API_SECURITY_INFO); +static void _nv04ControlWithSecInfo(NVOS54_PARAMETERS*, API_SECURITY_INFO, NvBool bInternalCall); +static void _nv01FreeWithSecInfo(NVOS00_PARAMETERS*, API_SECURITY_INFO); +static void _nv04AllocWithAccess(NVOS64_PARAMETERS*, NvBool); +static void _nv04MapMemoryWithSecInfo(NVOS33_PARAMETERS*, API_SECURITY_INFO); +static void _nv04UnmapMemoryWithSecInfo(NVOS34_PARAMETERS*, API_SECURITY_INFO); +static void _nv04MapMemoryDmaWithSecInfo(NVOS46_PARAMETERS*, API_SECURITY_INFO); +static void _nv04UnmapMemoryDmaWithSecInfo(NVOS47_PARAMETERS*, API_SECURITY_INFO); +static void _nv04DupObjectWithSecInfo(NVOS55_PARAMETERS*, API_SECURITY_INFO); +static void _nv04ShareWithSecInfo(NVOS57_PARAMETERS*, API_SECURITY_INFO); + + +// +// RM API entry points +// +// User mode clients should call base version (no suffix). +// +// Kernel mode clients should call Kernel or User version +// (call User if the parameters come from a user mode source). +// + +void Nv01AllocMemory(NVOS02_PARAMETERS *pArgs) { _nv01AllocMemory(pArgs, NV_TRUE); } +void Nv01AllocObject(NVOS05_PARAMETERS *pArgs) { _nv01AllocObject(pArgs, NV_TRUE); } +void Nv01Free(NVOS00_PARAMETERS *pArgs) { _nv01Free(pArgs, NV_TRUE); } +void Nv04AddVblankCallback(NVOS61_PARAMETERS *pArgs) { _nv04AddVblankCallback(pArgs, NV_TRUE); } +void Nv04Alloc(NVOS21_PARAMETERS *pArgs) { _nv04Alloc(pArgs, NV_TRUE); } +void Nv04AllocWithAccess(NVOS64_PARAMETERS *pArgs) { _nv04AllocWithAccess(pArgs, NV_TRUE); } +void Nv04AllocContextDma(NVOS39_PARAMETERS *pArgs) { _nv04AllocContextDma(pArgs, NV_TRUE); } +void Nv04BindContextDma(NVOS49_PARAMETERS *pArgs) { _nv04BindContextDma(pArgs, NV_TRUE); } +void Nv04Control(NVOS54_PARAMETERS *pArgs) { _nv04Control(pArgs, NV_TRUE, NV_FALSE); } +void Nv04DupObject(NVOS55_PARAMETERS *pArgs) { _nv04DupObject(pArgs, NV_TRUE); } +void Nv04Share(NVOS57_PARAMETERS *pArgs) { _nv04Share(pArgs, NV_TRUE); } +void Nv04I2CAccess(NVOS_I2C_ACCESS_PARAMS *pArgs) { _nv04I2CAccess(pArgs, NV_TRUE); } +void Nv04IdleChannels(NVOS30_PARAMETERS *pArgs) { _nv04IdleChannels(pArgs, NV_TRUE); } +void Nv04MapMemory(NVOS33_PARAMETERS *pArgs) { _nv04MapMemory(pArgs, NV_TRUE, NV_FALSE); } +void Nv04MapMemoryDma(NVOS46_PARAMETERS *pArgs) { _nv04MapMemoryDma(pArgs, NV_TRUE); } +void Nv04UnmapMemory(NVOS34_PARAMETERS *pArgs) { _nv04UnmapMemory(pArgs, NV_TRUE, NV_FALSE); } +void Nv04UnmapMemoryDma(NVOS47_PARAMETERS *pArgs) { _nv04UnmapMemoryDma(pArgs, NV_TRUE); } +void Nv04VidHeapControl(NVOS32_PARAMETERS *pArgs) { _nv04VidHeapControl(pArgs, NV_TRUE); } + +void Nv01AllocMemoryUser(NVOS02_PARAMETERS *pArgs) { _nv01AllocMemory(pArgs, NV_TRUE); } +void Nv01AllocObjectUser(NVOS05_PARAMETERS *pArgs) { _nv01AllocObject(pArgs, NV_TRUE); } +void Nv01FreeUser(NVOS00_PARAMETERS *pArgs) { _nv01Free(pArgs, NV_TRUE); } +void Nv04AddVblankCallbackUser(NVOS61_PARAMETERS *pArgs) { _nv04AddVblankCallback(pArgs, NV_TRUE); } +void Nv04AllocUser(NVOS21_PARAMETERS *pArgs) { _nv04Alloc(pArgs, NV_TRUE); } +void Nv04AllocWithAccessUser(NVOS64_PARAMETERS *pArgs) { _nv04AllocWithAccess(pArgs, NV_TRUE); } +void Nv04AllocContextDmaUser(NVOS39_PARAMETERS *pArgs) { _nv04AllocContextDma(pArgs, NV_TRUE); } +void Nv04BindContextDmaUser(NVOS49_PARAMETERS *pArgs) { _nv04BindContextDma(pArgs, NV_TRUE); } +void Nv04ControlUser(NVOS54_PARAMETERS *pArgs) { _nv04Control(pArgs, NV_TRUE, NV_FALSE); } +void Nv04DupObjectUser(NVOS55_PARAMETERS *pArgs) { _nv04DupObject(pArgs, NV_TRUE); } +void Nv04ShareUser(NVOS57_PARAMETERS *pArgs) { _nv04Share(pArgs, NV_TRUE); } +void Nv04I2CAccessUser(NVOS_I2C_ACCESS_PARAMS *pArgs) { _nv04I2CAccess(pArgs, NV_TRUE); } +void Nv04IdleChannelsUser(NVOS30_PARAMETERS *pArgs) { _nv04IdleChannels(pArgs, NV_TRUE); } +void Nv04MapMemoryUser(NVOS33_PARAMETERS *pArgs) { _nv04MapMemory(pArgs, NV_TRUE, NV_FALSE); } +void Nv04MapMemoryDmaUser(NVOS46_PARAMETERS *pArgs) { _nv04MapMemoryDma(pArgs, NV_TRUE); } +void Nv04UnmapMemoryUser(NVOS34_PARAMETERS *pArgs) { _nv04UnmapMemory(pArgs, NV_TRUE, NV_FALSE); } +void Nv04UnmapMemoryDmaUser(NVOS47_PARAMETERS *pArgs) { _nv04UnmapMemoryDma(pArgs, NV_TRUE); } +void Nv04VidHeapControlUser(NVOS32_PARAMETERS *pArgs) { _nv04VidHeapControl(pArgs, NV_TRUE); } + +void Nv01AllocMemoryKernel(NVOS02_PARAMETERS *pArgs) { _nv01AllocMemory(pArgs, NV_FALSE); } +void Nv01AllocObjectKernel(NVOS05_PARAMETERS *pArgs) { _nv01AllocObject(pArgs, NV_FALSE); } +void Nv01FreeKernel(NVOS00_PARAMETERS *pArgs) { _nv01Free(pArgs, NV_FALSE); } +void Nv04AddVblankCallbackKernel(NVOS61_PARAMETERS *pArgs) { _nv04AddVblankCallback(pArgs, NV_FALSE); } +void Nv04AllocKernel(NVOS21_PARAMETERS *pArgs) { _nv04Alloc(pArgs, NV_FALSE); } +void Nv04AllocWithAccessKernel(NVOS64_PARAMETERS *pArgs) { _nv04AllocWithAccess(pArgs, NV_FALSE); } +void Nv04AllocContextDmaKernel(NVOS39_PARAMETERS *pArgs) { _nv04AllocContextDma(pArgs, NV_FALSE); } +void Nv04BindContextDmaKernel(NVOS49_PARAMETERS *pArgs) { _nv04BindContextDma(pArgs, NV_FALSE); } +void Nv04ControlKernel(NVOS54_PARAMETERS *pArgs) { _nv04Control(pArgs, NV_FALSE, NV_FALSE); } +void Nv04DupObjectKernel(NVOS55_PARAMETERS *pArgs) { _nv04DupObject(pArgs, NV_FALSE); } +void Nv04ShareKernel(NVOS57_PARAMETERS *pArgs) { _nv04Share(pArgs, NV_FALSE); } +void Nv04I2CAccessKernel(NVOS_I2C_ACCESS_PARAMS *pArgs) { _nv04I2CAccess(pArgs, NV_FALSE); } +void Nv04IdleChannelsKernel(NVOS30_PARAMETERS *pArgs) { _nv04IdleChannels(pArgs, NV_FALSE); } +void Nv04MapMemoryKernel(NVOS33_PARAMETERS *pArgs) { _nv04MapMemory(pArgs, NV_FALSE, NV_FALSE); } +void Nv04MapMemoryDmaKernel(NVOS46_PARAMETERS *pArgs) { _nv04MapMemoryDma(pArgs, NV_FALSE); } +void Nv04UnmapMemoryKernel(NVOS34_PARAMETERS *pArgs) { _nv04UnmapMemory(pArgs, NV_FALSE, NV_FALSE); } +void Nv04UnmapMemoryDmaKernel(NVOS47_PARAMETERS *pArgs) { _nv04UnmapMemoryDma(pArgs, NV_FALSE); } +void Nv04VidHeapControlKernel(NVOS32_PARAMETERS *pArgs) { _nv04VidHeapControl(pArgs, NV_FALSE); } + +// MODS-specific API functions which ignore RM locking model +#if defined(LINUX_MFG) +void Nv04ControlInternal(NVOS54_PARAMETERS *pArgs) { _nv04Control(pArgs, NV_FALSE, NV_TRUE); } +void Nv04MapMemoryInternal(NVOS33_PARAMETERS *pArgs) { _nv04MapMemory(pArgs, NV_FALSE, NV_TRUE); } +void Nv04UnmapMemoryInternal(NVOS34_PARAMETERS *pArgs) { _nv04UnmapMemory(pArgs, NV_FALSE, NV_TRUE); } +#endif + +#define RMAPI_DEPRECATED_WITH_SECINFO(pFunc, pArgs, secInfo) \ + NV_PRINTF(LEVEL_WARNING, "Calling deprecated function at %d\n", __LINE__); \ + pArgs->status = NV_ERR_NOT_SUPPORTED; + +void Nv01AllocMemoryWithSecInfo(NVOS02_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { RMAPI_DEPRECATED_WITH_SECINFO(RmDeprecatedAllocMemory, pArgs, secInfo); } +void Nv01AllocObjectWithSecInfo(NVOS05_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { RMAPI_DEPRECATED_WITH_SECINFO(RmDeprecatedAllocObject, pArgs, secInfo); } +void Nv04AllocWithSecInfo(NVOS21_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04AllocWithSecInfo(pArgs, secInfo); } +void Nv04AllocWithAccessSecInfo(NVOS64_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04AllocWithAccessSecInfo(pArgs, secInfo); } +void Nv01FreeWithSecInfo(NVOS00_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv01FreeWithSecInfo(pArgs, secInfo); } +void Nv04ControlWithSecInfo(NVOS54_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04ControlWithSecInfo(pArgs, secInfo, NV_FALSE); } +void Nv04VidHeapControlWithSecInfo(NVOS32_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { RMAPI_DEPRECATED_WITH_SECINFO(RmDeprecatedVidHeapControl, pArgs, secInfo); } +void Nv04IdleChannelsWithSecInfo(NVOS30_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { RMAPI_DEPRECATED_WITH_SECINFO(RmDeprecatedIdleChannels, pArgs, secInfo); } +void Nv04MapMemoryWithSecInfo(NVOS33_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04MapMemoryWithSecInfo(pArgs, secInfo); } +void Nv04UnmapMemoryWithSecInfo(NVOS34_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04UnmapMemoryWithSecInfo(pArgs, secInfo); } +void Nv04I2CAccessWithSecInfo(NVOS_I2C_ACCESS_PARAMS *pArgs, API_SECURITY_INFO secInfo) { RMAPI_DEPRECATED_WITH_SECINFO(RmDeprecatedI2CAccess, pArgs, secInfo); } +void Nv04AllocContextDmaWithSecInfo(NVOS39_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { RMAPI_DEPRECATED_WITH_SECINFO(RmDeprecatedAllocContextDma, pArgs, secInfo); } +void Nv04BindContextDmaWithSecInfo(NVOS49_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { RMAPI_DEPRECATED_WITH_SECINFO(RmDeprecatedBindContextDma, pArgs, secInfo); } +void Nv04MapMemoryDmaWithSecInfo(NVOS46_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04MapMemoryDmaWithSecInfo(pArgs, secInfo); } +void Nv04UnmapMemoryDmaWithSecInfo(NVOS47_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04UnmapMemoryDmaWithSecInfo(pArgs, secInfo); } +void Nv04DupObjectWithSecInfo(NVOS55_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04DupObjectWithSecInfo(pArgs, secInfo); } +void Nv04ShareWithSecInfo(NVOS57_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04ShareWithSecInfo(pArgs, secInfo); } + + +static void +XlateUserModeArgsToSecInfo +( + NvBool bUserModeArgs, + NvBool bInternalCall, + API_SECURITY_INFO *pSecInfo +) +{ + portMemSet(pSecInfo, 0, sizeof(*pSecInfo)); + + if (bInternalCall == NV_FALSE && bUserModeArgs == NV_TRUE) + { + pSecInfo->privLevel = osIsAdministrator() ? RS_PRIV_LEVEL_USER_ROOT : RS_PRIV_LEVEL_USER; + pSecInfo->paramLocation = PARAM_LOCATION_USER; + } + else + { + pSecInfo->privLevel = RS_PRIV_LEVEL_KERNEL; + pSecInfo->paramLocation = PARAM_LOCATION_KERNEL; + } +} + +/* +NV04_ALLOC + NVOS21_PARAMETERS; + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectNew; + NvV32 hClass; + NvP64 pAllocParms; + NvV32 status; +*/ + +static void _nv04Alloc +( + NVOS21_PARAMETERS *pArgs, + NvBool bUserModeArgs +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->AllocWithSecInfo(pRmApi, pArgs->hRoot, pArgs->hObjectParent, &pArgs->hObjectNew, + pArgs->hClass, pArgs->pAllocParms, pArgs->paramsSize, RMAPI_ALLOC_FLAGS_NONE, + NvP64_NULL, &secInfo); +} // end of Nv04Alloc() + +/* +NV04_ALLOC + NVOS21_PARAMETERS; + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectNew; + NvV32 hClass; + NvP64 pAllocParms; + NvV32 status; +*/ + +static void _nv04AllocWithSecInfo +( + NVOS21_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->AllocWithSecInfo(pRmApi, pArgs->hRoot, pArgs->hObjectParent, &pArgs->hObjectNew, + pArgs->hClass, pArgs->pAllocParms, pArgs->paramsSize, RMAPI_ALLOC_FLAGS_NONE, + NvP64_NULL, &secInfo); +} // end of _nv04AllocWithSecInfo() + +/* +NV04_ALLOC_WITH_ACCESS + NVOS64_PARAMETERS; + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectNew; + NvV32 hClass; + NvP64 pAllocParms; + NvP64 pRightsRequested; + NvV32 status; +*/ + +static void _nv04AllocWithAccess +( + NVOS64_PARAMETERS *pArgs, + NvBool bUserModeArgs +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + NvU32 flags = RMAPI_ALLOC_FLAGS_NONE; + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + if (pArgs->flags & NVOS64_FLAGS_FINN_SERIALIZED) + flags |= RMAPI_ALLOC_FLAGS_SERIALIZED; + + pArgs->status = pRmApi->AllocWithSecInfo(pRmApi, pArgs->hRoot, pArgs->hObjectParent, &pArgs->hObjectNew, + pArgs->hClass, pArgs->pAllocParms, pArgs->paramsSize, flags, + pArgs->pRightsRequested, &secInfo); +} // end of _nv04AllocWithAccess() + +static void _nv04AllocWithAccessSecInfo +( + NVOS64_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + NvU32 flags = RMAPI_ALLOC_FLAGS_NONE; + + if (pArgs->flags & NVOS64_FLAGS_FINN_SERIALIZED) + flags |= RMAPI_ALLOC_FLAGS_SERIALIZED; + + pArgs->status = pRmApi->AllocWithSecInfo(pRmApi, pArgs->hRoot, pArgs->hObjectParent, &pArgs->hObjectNew, + pArgs->hClass, pArgs->pAllocParms, pArgs->paramsSize, flags, + pArgs->pRightsRequested, &secInfo); +} // end of _nv04AllocWithAccessSecInfo() + +/* +NV01_FREE + NVOS00_PARAMETERS: + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectOld; + NvV32 status; +*/ + +static void _nv01Free +( + NVOS00_PARAMETERS *pArgs, + NvBool bUserModeArgs +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->FreeWithSecInfo(pRmApi, pArgs->hRoot, pArgs->hObjectOld, RMAPI_FREE_FLAGS_NONE, &secInfo); +} // end of Nv01Free() + +/* +NV01_FREE + NVOS00_PARAMETERS: + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectOld; + NvV32 status; +*/ + +static void _nv01FreeWithSecInfo +( + NVOS00_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->FreeWithSecInfo(pRmApi, pArgs->hRoot, pArgs->hObjectOld, RMAPI_FREE_FLAGS_NONE, &secInfo); +} // end of Nv01FreeWithSecInfo() + +/* +NV04_MAP_MEMORY + NVOS33_PARAMETERS: + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvU64 offset; + NvU64 length; + NvP64 pLinearAddress; + NvU32 status; + NvU32 flags; +*/ +static void _nv04MapMemory +( + NVOS33_PARAMETERS *pArgs, + NvBool bUserModeArgs, + NvBool bInternalCall +) +{ + RM_API *pRmApi = rmapiGetInterface(bInternalCall ? RMAPI_MODS_LOCK_BYPASS : RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->MapToCpuWithSecInfo(pRmApi, pArgs->hClient, pArgs->hDevice, pArgs->hMemory, pArgs->offset, + pArgs->length, &pArgs->pLinearAddress, pArgs->flags, &secInfo); +} // end of Nv04MapMemory() + +static void _nv04MapMemoryWithSecInfo +( + NVOS33_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->MapToCpuWithSecInfoV2(pRmApi, pArgs->hClient, pArgs->hDevice, pArgs->hMemory, pArgs->offset, + pArgs->length, &pArgs->pLinearAddress, &pArgs->flags, &secInfo); +} + +/* +NV04_UNMAP_MEMORY + NVOS34_PARAMETERS: + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvP64 pLinearAddress; + NvU32 status; + NvU32 flags; +*/ +static void _nv04UnmapMemory +( + NVOS34_PARAMETERS *pArgs, + NvBool bUserModeArgs, + NvBool bInternalCall +) +{ + RM_API *pRmApi = rmapiGetInterface(bInternalCall ? RMAPI_MODS_LOCK_BYPASS : RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->UnmapFromCpuWithSecInfo(pRmApi, pArgs->hClient, pArgs->hDevice, pArgs->hMemory, + pArgs->pLinearAddress, pArgs->flags, osGetCurrentProcess(), &secInfo); +} // end of Nv04UnmapMemory() + +static void _nv04UnmapMemoryWithSecInfo +( + NVOS34_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->UnmapFromCpuWithSecInfo(pRmApi, pArgs->hClient, pArgs->hDevice, pArgs->hMemory, + pArgs->pLinearAddress, pArgs->flags, osGetCurrentProcess(), &secInfo); +} + +static void _nv04MapMemoryDma +( + NVOS46_PARAMETERS *pArgs, + NvBool bUserModeArgs +) +{ + + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->MapWithSecInfo(pRmApi, pArgs, &secInfo); +} // end of Nv04MapMemoryDma() + +static void _nv04MapMemoryDmaWithSecInfo +( + NVOS46_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->MapWithSecInfo(pRmApi, pArgs, &secInfo); +} + +/* +NV04_UNMAP_MEMORY_DMA + NVOS47_PARAMETERS: + NvHandle hClient; + NvHandle hDevice; + NvHandle hDma; + NvHandle hMemory; + NvV32 flags; + NvU64 dmaOffset; + NvV32 status; +*/ +static void _nv04UnmapMemoryDma +( + NVOS47_PARAMETERS *pArgs, + NvBool bUserModeArgs +) +{ + + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->UnmapWithSecInfo(pRmApi, pArgs, &secInfo); +} // end of Nv04UnmapMemoryDma() + +static void _nv04UnmapMemoryDmaWithSecInfo +( + NVOS47_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->UnmapWithSecInfo(pRmApi, pArgs, &secInfo); +} + +/* +NV04_CONTROL + NVOS54_PARAMETERS: + NvHandle hClient; + NvHandle hObject; + NvV32 cmd; + NvP64 params; + NvU32 paramsSize; + NvV32 status; +*/ +static void _nv04ControlWithSecInfo +( + NVOS54_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo, + NvBool bInternalCall +) +{ + { + RM_API *pRmApi = rmapiGetInterface(bInternalCall ? RMAPI_MODS_LOCK_BYPASS : RMAPI_EXTERNAL); + + pArgs->status = pRmApi->ControlWithSecInfo(pRmApi, pArgs->hClient, pArgs->hObject, pArgs->cmd, + pArgs->params, pArgs->paramsSize, pArgs->flags, &secInfo); + } +} // end of Nv04Control() + +/* +NV04_CONTROL + NVOS54_PARAMETERS: + NvHandle hClient; + NvHandle hObject; + NvV32 cmd; + NvP64 params; + NvU32 paramsSize; + NvV32 status; +*/ +static void _nv04Control +( + NVOS54_PARAMETERS *pArgs, + NvBool bUserModeArgs, + NvBool bInternalCall +) +{ + API_SECURITY_INFO secInfo = {0}; + XlateUserModeArgsToSecInfo(bUserModeArgs, bInternalCall, &secInfo); + _nv04ControlWithSecInfo(pArgs, secInfo, bInternalCall); +} // end of Nv04Control() + +/* +NV04_DUP_OBJECT + NVOS55_PARAMETERS: + NvHandle hClient; + NvHandle hParent; + NvHandle hObject; + NvHandle hClientSrc; + NvHandle hObjectSrc; + NvU32 flags; + NvU32 status; +*/ +static void _nv04DupObject +( + NVOS55_PARAMETERS *pArgs, + NvBool bUserModeArgs +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->DupObjectWithSecInfo(pRmApi, pArgs->hClient, pArgs->hParent, &pArgs->hObject, + pArgs->hClientSrc, pArgs->hObjectSrc, pArgs->flags, &secInfo); +} // end of Nv04DupObject() + +static void _nv04DupObjectWithSecInfo +( + NVOS55_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->DupObjectWithSecInfo(pRmApi, pArgs->hClient, pArgs->hParent, &pArgs->hObject, + pArgs->hClientSrc, pArgs->hObjectSrc, pArgs->flags, &secInfo); +} + +static void _nv04Share +( + NVOS57_PARAMETERS *pArgs, + NvBool bUserModeArgs +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->ShareWithSecInfo(pRmApi, pArgs->hClient, pArgs->hObject, + &pArgs->sharePolicy, &secInfo); +} // end of Nv04Share() + +static void _nv04ShareWithSecInfo +( + NVOS57_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->ShareWithSecInfo(pRmApi, pArgs->hClient, pArgs->hObject, + &pArgs->sharePolicy, &secInfo); +} diff --git a/src/nvidia/src/kernel/rmapi/entry_points.h b/src/nvidia/src/kernel/rmapi/entry_points.h new file mode 100644 index 0000000..f0991ed --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/entry_points.h @@ -0,0 +1,396 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _ENTRYPOINTS_H_ +#define _ENTRYPOINTS_H_ + +// +// Internal handlers for RM APIs +// + +NV_STATUS +rmapiAlloc +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvU32 hClass, + void *pAllocParams, + NvU32 paramsSize +); + +NV_STATUS +rmapiAllocWithHandle +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle hObject, + NvU32 hClass, + void *pAllocParams, + NvU32 paramsSize +); + +NV_STATUS +rmapiAllocWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvU32 hClass, + NvP64 pAllocParams, + NvU32 paramsSize, + NvU32 flags, + NvP64 pRightsRequired, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiAllocWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvU32 hClass, + NvP64 pAllocParams, + NvU32 paramsSize, + NvU32 flags, + NvP64 pRightsRequired, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiFree +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject +); + +NV_STATUS +rmapiFreeWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiFreeWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiDisableClients +( + RM_API *pRmApi, + NvHandle *phClientList, + NvU32 numClients +); + +NV_STATUS +rmapiDisableClientsWithSecInfo +( + RM_API *pRmApi, + NvHandle *phClientList, + NvU32 numClients, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiDisableClientsWithSecInfoTls +( + RM_API *pRmApi, + NvHandle *phClientList, + NvU32 numClients, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiControl +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + void *pParams, + NvU32 paramsSize +); + +NV_STATUS +rmapiControlWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + NvP64 pParams, + NvU32 paramsSize, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiControlWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + NvP64 pParams, + NvU32 paramsSize, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiDupObject +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags +); + +NV_STATUS +rmapiDupObjectWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiDupObjectWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiShare +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy +); + +NV_STATUS +rmapiShareWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiShareWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiMapToCpu +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + void **ppCpuVirtAddr, + NvU32 flags +); + +NV_STATUS +rmapiMapToCpuWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvP64 *ppCpuVirtAddr, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiMapToCpuWithSecInfoV2 +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvP64 *ppCpuVirtAddr, + NvU32 *flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiMapToCpuWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvP64 *ppCpuVirtAddr, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); +NV_STATUS +rmapiMapToCpuWithSecInfoTlsV2 +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvP64 *ppCpuVirtAddr, + NvU32 *flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiUnmapFromCpu +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + void *pLinearAddress, + NvU32 flags, + NvU32 ProcessId +); + +NV_STATUS +rmapiUnmapFromCpuWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvP64 pLinearAddress, + NvU32 flags, + NvU32 ProcessId, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiUnmapFromCpuWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvP64 pLinearAddress, + NvU32 flags, + NvU32 ProcessId, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiMap +( + RM_API *pRmApi, + NVOS46_PARAMETERS *pParms +); + +NV_STATUS +rmapiMapWithSecInfo +( + RM_API *pRmApi, + NVOS46_PARAMETERS *pParms, + API_SECURITY_INFO *pSecInfo +); + + +NV_STATUS +rmapiMapWithSecInfoTls +( + RM_API *pRmApi, + NVOS46_PARAMETERS *pParms, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiUnmap +( + RM_API *pRmApi, + NVOS47_PARAMETERS *pParms +); + +NV_STATUS +rmapiUnmapWithSecInfo +( + RM_API *pRmApi, + NVOS47_PARAMETERS *pParms, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiUnmapWithSecInfoTls +( + RM_API *pRmApi, + NVOS47_PARAMETERS *pParms, + API_SECURITY_INFO *pSecInfo +); + +#endif // _ENTRYPOINTS_H_ + diff --git a/src/nvidia/src/kernel/rmapi/event.c b/src/nvidia/src/kernel/rmapi/event.c new file mode 100644 index 0000000..e3b1776 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/event.c @@ -0,0 +1,753 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "core/core.h" +#include "os/os.h" +#include "rmapi/event.h" +#include "rmapi/resource_fwd_decls.h" +#include "vgpu/rpc.h" +#include "gpu/device/device.h" +#include "core/locks.h" +#include "rmapi/rs_utils.h" + +#include "resserv/rs_client.h" +#include "class/cl0005.h" + +#include "ctrl/ctrl0000/ctrl0000event.h" // NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_* + +static NV_STATUS _eventRpcForType(NvHandle hClient, NvHandle hObject); +static void eventSystemDequeueEventLatest(SystemEventQueueList *pQueue); + +NV_STATUS +eventConstruct_IMPL +( + Event *pEvent, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV0005_ALLOC_PARAMETERS *pNv0050AllocParams = pParams->pAllocParams; + RsClient *pRsClient = pCallContext->pClient; + RsResourceRef *pClientRef; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + NV_STATUS rmStatus = NV_OK; + PEVENTNOTIFICATION *ppEventNotification; + NvHandle hChannel = 0x0; + OBJGPU *pGpu = NULL; + RS_PRIV_LEVEL privLevel = pParams->pSecInfo->privLevel; + NvBool bUserOsEventHandle = NV_FALSE; + NvHandle hParentClient = pNv0050AllocParams->hParentClient; + + // + // Allow hParentClient being zero to imply the allocating client should be + // the parent client of this event. + // + if (hParentClient == NV01_NULL_OBJECT) + { + hParentClient = pRsClient->hClient; + } + + // never allow user mode/non-root clients to create ring0 callbacks as + // we can not trust the function pointer (encoded in data). + if ((NV01_EVENT_KERNEL_CALLBACK == pResourceRef->externalClassId) || + (NV01_EVENT_KERNEL_CALLBACK_EX == pResourceRef->externalClassId)) + { + if (privLevel < RS_PRIV_LEVEL_KERNEL) + { + // sometimes it is nice to hook up callbacks for debug purposes + // -- but disable the override for release builds! +#if defined(DEBUG) || defined(DEVELOP) + if (!(pNv0050AllocParams->notifyIndex & NV01_EVENT_PERMIT_NON_ROOT_EVENT_KERNEL_CALLBACK_CREATION)) +#endif + { + return NV_ERR_ILLEGAL_ACTION; + } + } + } + + if (_eventRpcForType(hParentClient, pNv0050AllocParams->hSrcResource)) + { + RsResourceRef *pSrcRef; + NV_STATUS tmpStatus; + + tmpStatus = serverutilGetResourceRef(hParentClient, + pNv0050AllocParams->hSrcResource, + &pSrcRef); + + if (tmpStatus == NV_OK) + { + hChannel = pSrcRef->pParentRef ? pSrcRef->pParentRef->hResource : 0; + pGpu = CliGetGpuFromContext(pSrcRef, NULL); + + if (pGpu == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "RmAllocEvent could not set pGpu. hClient=0x%x, hObject=0x%x\n", + pRsClient->hClient, pResourceRef->hResource); + } + } + } + + NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(pRsClient, pRsClient->hClient, &pClientRef)); + + // add event to client and parent object + rmStatus = eventInit(pEvent, + pCallContext, + hParentClient, + pNv0050AllocParams->hSrcResource, + &ppEventNotification); + if (rmStatus == NV_OK) + { + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + // In RM-offload, we don't allocate ContextDma in GSP-RM unless there + // is any necessity to use it (e.g. display channel binding time). So + // GSP-RM will find no valid object if the event is associated with + // ContextDma object. So we are ignoring the event allocation here if + // the event is associated with ContextDma object. + // + if (pGpu != NULL) + { + RsResourceRef *pSourceRef = NULL; + + if (IS_FW_CLIENT(pGpu)) + { + NV_ASSERT_OK_OR_RETURN( + serverutilGetResourceRef(hParentClient, + pNv0050AllocParams->hSrcResource, + &pSourceRef)); + } + + if ( + (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_FW_CLIENT(pGpu) && pSourceRef->internalClassId != classId(ContextDma)) || + (IS_VIRTUAL_WITH_SRIOV(pGpu) && !(pNv0050AllocParams->notifyIndex & NV01_EVENT_NONSTALL_INTR)))) + { + // + // In SR-IOV enabled systems, nonstall events can be registered + // directly with guest RM since guest RM is capable of + // receiving and handling nonstall interrupts itself. In + // paravirtualized systems, we always need to use the RPC to + // host RM. + // + NV_RM_RPC_ALLOC_EVENT(pGpu, + pRsClient->hClient, + pEvent->hNotifierClient, + hChannel, + pEvent->hNotifierResource, + pResourceRef->hResource, + pResourceRef->externalClassId, + pNv0050AllocParams->notifyIndex, + rmStatus); + } + } + + if (NV01_EVENT_OS_EVENT == pResourceRef->externalClassId) + { + // convert a user event handle to its kernel equivalent. + if (privLevel <= RS_PRIV_LEVEL_USER_ROOT) + { + rmStatus = osUserHandleToKernelPtr(pRsClient->hClient, + pNv0050AllocParams->data, + &pNv0050AllocParams->data); + bUserOsEventHandle = NV_TRUE; + } + } + + if (rmStatus == NV_OK) + rmStatus = registerEventNotification(ppEventNotification, + pRsClient, + pEvent->hNotifierResource, + pResourceRef->hResource, + pNv0050AllocParams->notifyIndex, + pResourceRef->externalClassId, + pNv0050AllocParams->data, + bUserOsEventHandle); + } + + if (rmStatus != NV_OK) + goto cleanup; + + return NV_OK; + +cleanup: + eventDestruct_IMPL(pEvent); + return rmStatus; +} + +void eventDestruct_IMPL +( + Event *pEvent +) +{ + CALL_CONTEXT *pCallContext; + RS_RES_FREE_PARAMS_INTERNAL *pParams; + + RsClient* pRsClient; + NvHandle hEventClient; + NV_STATUS status = NV_OK; + NvHandle hEvent; + NotifShare *pNotifierShare; + + resGetFreeParams(staticCast(pEvent, RsResource), &pCallContext, &pParams); + pRsClient = pCallContext->pClient; + hEventClient = pRsClient->hClient; + hEvent = pCallContext->pResourceRef->hResource; + + LOCK_METER_DATA(FREE_EVENT, 0, 0, 0); + + pNotifierShare = pEvent->pNotifierShare; + if (pNotifierShare != NULL) + { + if (pNotifierShare->pNotifier != NULL) + { + status = inotifyUnregisterEvent(pNotifierShare->pNotifier, + pNotifierShare->hNotifierClient, + pNotifierShare->hNotifierResource, + hEventClient, + hEvent); + } + serverFreeShare(&g_resServ, staticCast(pEvent->pNotifierShare, RsShared)); + } + + if (pParams != NULL) + pParams->status = status; +} + +NV_STATUS notifyUnregisterEvent_IMPL +( + Notifier *pNotifier, + NvHandle hNotifierClient, + NvHandle hNotifierResource, + NvHandle hEventClient, + NvHandle hEvent +) +{ + NV_STATUS status = NV_OK; + PEVENTNOTIFICATION *ppEventNotification; + + ppEventNotification = inotifyGetNotificationListPtr(staticCast(pNotifier, INotifier)); + + // delete the event from the parent object and client + if (*ppEventNotification != NULL) + { + + if (_eventRpcForType(hNotifierClient, hNotifierResource)) + { + OBJGPU *pGpu = CliGetGpuFromHandle(hNotifierClient, hNotifierResource, NULL); + + if (pGpu != NULL) + { + RsResourceRef *pNotifierRef = NULL; + + if (IS_FW_CLIENT(pGpu)) + { + NV_ASSERT_OK_OR_RETURN(serverutilGetResourceRef(hNotifierClient, hNotifierResource, &pNotifierRef)); + } + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + if ( + (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_FW_CLIENT(pGpu) && pNotifierRef->internalClassId != classId(ContextDma)) || + (IS_VIRTUAL_WITH_SRIOV(pGpu) && !((*ppEventNotification)->bNonStallIntrEvent)))) + { + // + // In SR-IOV enabled systems, nonstall events are registered + // directly with guest RM since guest RM is capable of + // receiving and handling nonstall interrupts itself. We skip + // the allocation, so here, we skip the free too. In + // paravirtualized systems, we always need to use the RPC to + // host RM. + // + NV_RM_RPC_FREE(pGpu, hEventClient, hEventClient, hEvent, status); + } + } + else + { + NV_PRINTF(LEVEL_ERROR, + "RmFreeEvent could not set pGpu. hClient=0x%x, hObject=0x%x\n", + hNotifierClient, hNotifierResource); + } + } + + unregisterEventNotification(ppEventNotification, + hEventClient, + hNotifierResource, + hEvent); + + } + + return status; +} + +NV_STATUS +eventInit_IMPL +( + Event *pEvent, + CALL_CONTEXT *pCallContext, + NvHandle hNotifierClient, + NvHandle hNotifierResource, + PEVENTNOTIFICATION **pppEventNotification +) +{ + NV_STATUS rmStatus = NV_OK; + RsClient *pRsClient = pCallContext->pClient; + RsClient *pNotifierClient; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + NotifShare *pNotifierShare = NULL; + + // validate event class + switch (pResourceRef->externalClassId) + { + case NV01_EVENT_KERNEL_CALLBACK: + case NV01_EVENT_KERNEL_CALLBACK_EX: + case NV01_EVENT_OS_EVENT: + break; + + default: + return NV_ERR_INVALID_CLASS; + } + + // RS-TODO remove support for this after per-client locking is enabled + if (pRsClient->hClient != hNotifierClient) + { + rmStatus = serverGetClientUnderLock(&g_resServ, hNotifierClient, &pNotifierClient); + if (rmStatus != NV_OK) + return rmStatus; + } + else + { + pNotifierClient = pRsClient; + } + + if (pNotifierClient != NULL) + { + RsResourceRef *pNotifierRef; + INotifier *pNotifier; + if (clientGetResourceRef(pNotifierClient, hNotifierResource, &pNotifierRef) != NV_OK) + return NV_ERR_INVALID_OBJECT; + + pNotifier = dynamicCast(pNotifierRef->pResource, INotifier); + if (pNotifier == NULL) + return NV_ERR_INVALID_OBJECT; + + rmStatus = inotifyGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, &pNotifierShare); + if (rmStatus != NV_OK) + return rmStatus; + + *pppEventNotification = inotifyGetNotificationListPtr(pNotifierShare->pNotifier); + } + + serverRefShare(&g_resServ, staticCast(pNotifierShare, RsShared)); + pEvent->pNotifierShare = pNotifierShare; + + // RS-TODO these can be looked up from share + pEvent->hNotifierClient = hNotifierClient; + pEvent->hNotifierResource = hNotifierResource; + pEvent->hEvent = pCallContext->pResourceRef->hResource; + + return rmStatus; +} + +NV_STATUS +notifyGetOrAllocNotifShare_IMPL +( + Notifier *pNotifier, + NvHandle hNotifierClient, + NvHandle hNotifierResource, + NotifShare **ppNotifierShare +) +{ + NV_STATUS status; + NotifShare *pNotifierShare; + + // + // Most objects that are notifiers will never have any events to notify so + // notifier shares are allocated as needed (i.e., when an event + // registers itself with the notifier.) + // + pNotifierShare = inotifyGetNotificationShare(staticCast(pNotifier, INotifier)); + if (pNotifierShare == NULL) + { + RsShared *pShare; + status = serverAllocShare(&g_resServ, classInfo(NotifShare), &pShare); + if (status != NV_OK) + return status; + + pNotifierShare = dynamicCast(pShare, NotifShare); + pNotifierShare->pNotifier = staticCast(pNotifier, INotifier); + pNotifierShare->hNotifierClient = hNotifierClient; + pNotifierShare->hNotifierResource = hNotifierResource; + inotifySetNotificationShare(staticCast(pNotifier, INotifier), pNotifierShare); + } + + if (ppNotifierShare) + *ppNotifierShare = pNotifierShare; + + return NV_OK; +} + +NV_STATUS +CliGetEventNotificationList +( + NvHandle hClient, + NvHandle hObject, + INotifier **ppNotifier, + PEVENTNOTIFICATION **pppEventNotification +) +{ + NV_STATUS status = NV_OK; + RsResourceRef *pResourceRef; + RsClient *pRsClient; + INotifier *pNotifier; + + *pppEventNotification = NULL; + + // Populate Resource Server information + status = serverGetClientUnderLock(&g_resServ, hClient, &pRsClient); + if (status != NV_OK) + return status; + + status = clientGetResourceRef(pRsClient, hObject, &pResourceRef); + if (status != NV_OK) + return status; + + pNotifier = dynamicCast(pResourceRef->pResource, INotifier); + if (pNotifier != NULL) + *pppEventNotification = inotifyGetNotificationListPtr(pNotifier); + + if (*pppEventNotification == NULL) + return NV_ERR_INVALID_OBJECT; + + if (ppNotifier != NULL) + *ppNotifier = pNotifier; + + return NV_OK; +} + +NvBool +CliGetEventInfo +( + NvHandle hClient, + NvHandle hEvent, + Event **ppEvent +) +{ + RsClient *pRsClient; + RsResourceRef *pResourceRef; + RmClient *pClient = serverutilGetClientUnderLock(hClient); + + if (pClient == NULL) + return NV_FALSE; + + pRsClient = staticCast(pClient, RsClient); + if (clientGetResourceRefByType(pRsClient, hEvent, classId(Event), &pResourceRef) != NV_OK) + return NV_FALSE; + + if (pResourceRef->pResource != NULL) + { + *ppEvent = dynamicCast(pResourceRef->pResource, Event); + return NV_TRUE; + } + + return NV_FALSE; + +} + +void +CliDelObjectEvents +( + RsResourceRef *pResourceRef +) +{ + NotifShare *pNotifierShare; + INotifier *pNotifier; + + if (pResourceRef == NULL) + return; + + // If not a notifier object, there aren't any events to free + pNotifier = dynamicCast(pResourceRef->pResource, INotifier); + + if (pNotifier == NULL) + return; + + pNotifierShare = inotifyGetNotificationShare(pNotifier); + if (pNotifierShare != NULL) + { + while(pNotifierShare->pEventList != NULL) + { + PEVENTNOTIFICATION pEventNotif = pNotifierShare->pEventList; + inotifyUnregisterEvent(pNotifier, + pNotifierShare->hNotifierClient, + pNotifierShare->hNotifierResource, + pEventNotif->hEventClient, + pEventNotif->hEvent); + } + pNotifierShare->pNotifier = NULL; + } +} // end of CliDelObjectEvents() + +// **************************************************************************** +// System events +// **************************************************************************** + +void CliAddSystemEvent( + NvU32 event, + void *pEventData, + NvBool *isEventNotified +) +{ + PEVENTNOTIFICATION pEventNotification = NULL; + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + RsResourceRef *pCliResRef; + NV_STATUS rmStatus = NV_OK; + Notifier *pNotifier; + + if (isEventNotified != NULL) + *isEventNotified = NV_FALSE; + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + if (pClient->CliSysEventInfo.notifyActions[event] == NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + continue; + } + + rmStatus = clientGetResourceRef(staticCast(pClient, RsClient), pRsClient->hClient, &pCliResRef); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to look up resource reference handle: 0x%x\n", + pRsClient->hClient); + return; + } + + pNotifier = dynamicCast(pCliResRef->pResource, Notifier); + if (pNotifier != NULL) + pEventNotification = inotifyGetNotificationList(staticCast(pNotifier, INotifier)); + + if (pEventNotification != NULL) + { + while (pEventNotification) + { + if (pEventNotification->NotifyIndex == event) + { + // only log the system event that has data + if (pEventData != NULL) + { + if (eventSystemEnqueueEvent(&pClient->CliSysEventInfo.eventQueue, + event, pEventData) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "fails to add event=%d\n", event); + return; + } + } + + if (osNotifyEvent(NULL, pEventNotification, 0, 0, 0) != NV_OK) + { + if (pEventData != NULL) + eventSystemDequeueEventLatest(&pClient->CliSysEventInfo.eventQueue); + + NV_PRINTF(LEVEL_ERROR, "failed to deliver event 0x%x", + event); + } + else + { + if (isEventNotified != NULL) + *isEventNotified = NV_TRUE; + } + } + pEventNotification = pEventNotification->Next; + } + + if (pClient->CliSysEventInfo.notifyActions[event] == NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE) + { + pClient->CliSysEventInfo.notifyActions[event] = NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + } + } + } + + return; +} + +static NV_STATUS +_eventRpcForType(NvHandle hClient, NvHandle hObject) +{ + NV_STATUS status; + RsResourceRef *pResourceRef; + + status = serverutilGetResourceRef(hClient, hObject, &pResourceRef); + + if (status != NV_OK) + { + return NV_FALSE; + } + + if (objDynamicCastById(pResourceRef->pResource, classId(Subdevice)) || + objDynamicCastById(pResourceRef->pResource, classId(ChannelDescendant)) || + objDynamicCastById(pResourceRef->pResource, classId(ContextDma)) || + objDynamicCastById(pResourceRef->pResource, classId(DispChannel)) || + objDynamicCastById(pResourceRef->pResource, classId(DispCommon)) || + objDynamicCastById(pResourceRef->pResource, classId(TimerApi)) || + objDynamicCastById(pResourceRef->pResource, classId(KernelSMDebuggerSession))) + { + return NV_TRUE; + } + + return NV_FALSE; +} + +NV_STATUS +eventGetByHandle_IMPL +( + RsClient *pClient, + NvHandle hEvent, + NvU32 *pNotifyIndex +) +{ + RsResourceRef *pEventResourceRef; + NV_STATUS status; + Event *pEvent; + NotifShare *pNotifierShare; + PEVENTNOTIFICATION pEventNotification; + + *pNotifyIndex = NV2080_NOTIFIERS_MAXCOUNT; + + status = clientGetResourceRef(pClient, hEvent, &pEventResourceRef); + if (status != NV_OK) + return status; + + pEvent = dynamicCast(pEventResourceRef->pResource, Event); + if (pEvent == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Event is null \n"); + return NV_ERR_INVALID_ARGUMENT; + } + + // TODO: Check existing notifiers in that event + pNotifierShare = pEvent->pNotifierShare; + if ((pNotifierShare == NULL) || (pNotifierShare->pNotifier == NULL)) + { + NV_PRINTF(LEVEL_ERROR, "pNotifierShare or pNotifier is NULL \n"); + return NV_ERR_INVALID_ARGUMENT; + } + + pEventNotification = inotifyGetNotificationList(pNotifierShare->pNotifier); + if (pEventNotification == NULL) + { + NV_PRINTF(LEVEL_ERROR, "pEventNotification is NULL \n"); + return NV_ERR_INVALID_ARGUMENT; + } + + *pNotifyIndex = pEventNotification->NotifyIndex; + + return status; +} + +void eventSystemInitEventQueue(SystemEventQueueList *pQueue) +{ + listInit(pQueue, portMemAllocatorGetGlobalNonPaged()); +} + +NV_STATUS eventSystemEnqueueEvent(SystemEventQueueList *pQueue, NvU32 event, void *pEventData) +{ + NV0000_CTRL_GET_SYSTEM_EVENT_DATA_PARAMS newNode = { 0 }; + + newNode.event = event; + switch (event) + { + case NV0000_NOTIFIERS_DISPLAY_CHANGE: + newNode.data.display = *((NV0000_CTRL_SYSTEM_EVENT_DATA_DISPLAY_CHANGE *)pEventData); + break; + + case NV0000_NOTIFIERS_VGPU_UNBIND_EVENT: + newNode.data.vgpuUnbind = *((NV0000_CTRL_SYSTEM_EVENT_DATA_VGPU_UNBIND *)pEventData); + break; + + case NV0000_NOTIFIERS_VGPU_BIND_EVENT: + newNode.data.vgpuBind = *((NV0000_CTRL_SYSTEM_EVENT_DATA_VGPU_BIND *)pEventData); + break; + + case NV0000_NOTIFIERS_GPU_BIND_UNBIND_EVENT: + newNode.data.gpuBindUnbind = *((NV0000_CTRL_SYSTEM_EVENT_DATA_GPU_BIND_UNBIND *)pEventData); + break; + + default: + return NV_ERR_INVALID_EVENT; + } + + if (listAppendValue(pQueue, &newNode) == NULL) + return NV_ERR_NO_MEMORY; + + return NV_OK; +} + +static void eventSystemDequeueEventLatest(SystemEventQueueList *pQueue) +{ + NV0000_CTRL_GET_SYSTEM_EVENT_DATA_PARAMS *pLastNode = listTail(pQueue); + + if (pLastNode == NULL) + return; + + listRemove(pQueue, pLastNode); +} + +NV_STATUS eventSystemDequeueEvent(SystemEventQueueList *pQueue, NV0000_CTRL_GET_SYSTEM_EVENT_DATA_PARAMS *pEvent) +{ + NV0000_CTRL_GET_SYSTEM_EVENT_DATA_PARAMS *pFirstNode = listHead(pQueue); + + if (pFirstNode == NULL) + { + // Queue is empty + return NV_ERR_OBJECT_NOT_FOUND; + } + + *pEvent = *pFirstNode; + + listRemove(pQueue, pFirstNode); + + return NV_OK; +} + +void eventSystemClearEventQueue(SystemEventQueueList *pQueue) +{ + listClear(pQueue); +} diff --git a/src/nvidia/src/kernel/rmapi/event_buffer.c b/src/nvidia/src/kernel/rmapi/event_buffer.c new file mode 100644 index 0000000..a2cd57f --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/event_buffer.c @@ -0,0 +1,731 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "rmapi/event_buffer.h" +#include "os/os.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "core/locks.h" +#include "gpu/gpu.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "rmapi/rs_utils.h" +#include "rmapi/rmapi_utils.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "rmapi/rs_utils.h" +#include "rmapi/rmapi_utils.h" +#include "class/cl0040.h" +#include "gpu/gsp/gsp_trace_rats_macro.h" + +static NV_STATUS _allocAndMapMemory(CALL_CONTEXT *pCallContext, NvP64 pAddress, MEMORY_DESCRIPTOR** ppMemDesc, NvU64 size, NvBool bKernel, + NvP64* pKernelAddr, NvP64* pKernelPriv, NvP64* pUserAddr, NvP64* pUserPriv, Subdevice *pSubdevice); + +static void _unmapAndFreeMemory(MEMORY_DESCRIPTOR *pMemDesc, NvBool bKernel, NvP64 kernelAddr, + NvP64 kernelPriv, NvP64 userAddr, NvP64 userPriv); + +NV_STATUS +eventbufferConstruct_IMPL +( + EventBuffer *pEventBuffer, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status; + NV_EVENT_BUFFER_ALLOC_PARAMETERS *pAllocParams = pParams->pAllocParams; + + EVENT_BUFFER_MAP_INFO *pKernelMap = &pEventBuffer->kernelMapInfo; + EVENT_BUFFER_MAP_INFO *pClientMap = &pEventBuffer->clientMapInfo; + + RmClient *pRmClient; + NvBool bKernel; + + NvU32 recordBufferSize; + NvP64 kernelNotificationhandle; + Subdevice *pSubdevice = NULL; + NvBool bInternalAlloc = (pAllocParams->hBufferHeader == 0); + NvBool bNoDeviceMem = NV_FALSE; + NvBool bUsingVgpuStagingBuffer = NV_FALSE; + OBJGPU *pGpu = NULL; + RsResourceRef *pHeaderRef = NULL; + RsResourceRef *pRecordRef = NULL; + RsResourceRef *pVardataRef = NULL; + NvHandle hMapperClient = 0; + NvHandle hMapperDevice = 0; + + pRmClient = dynamicCast(pCallContext->pClient, RmClient); + NV_ASSERT_OR_RETURN(pRmClient != NULL, NV_ERR_INVALID_CLIENT); + + bKernel = (rmclientGetCachedPrivilege(pRmClient) >= RS_PRIV_LEVEL_KERNEL); + + pAllocParams->bufferHeader = NvP64_NULL; + pAllocParams->recordBuffer = NvP64_NULL; + pAllocParams->vardataBuffer = NvP64_NULL; + + if (bInternalAlloc) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvBool bSupported = pSys->getProperty(pSys, PDB_PROP_SYS_INTERNAL_EVENT_BUFFER_ALLOC_ALLOWED); + NV_ASSERT_OR_RETURN(bSupported, NV_ERR_NOT_SUPPORTED); + } + else + { + NV_ASSERT_OR_RETURN((pAllocParams->hRecordBuffer != 0), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(((pAllocParams->vardataBufferSize == 0) ^ (pAllocParams->hVardataBuffer != 0)), + NV_ERR_INVALID_ARGUMENT); + + status = clientGetResourceRef(pCallContext->pClient, pAllocParams->hBufferHeader, &pHeaderRef); + if (status != NV_OK) + return status; + + status = clientGetResourceRef(pCallContext->pClient, pAllocParams->hRecordBuffer, &pRecordRef); + if (status != NV_OK) + return status; + + // Avoid mixing and matching backing-memory + if (pRecordRef->externalClassId != pHeaderRef->externalClassId) + return NV_ERR_INVALID_ARGUMENT; + + if (pAllocParams->hVardataBuffer != 0) + { + status = clientGetResourceRef(pCallContext->pClient, pAllocParams->hVardataBuffer, &pVardataRef); + if (status != NV_OK) + return status; + + if (pVardataRef->externalClassId != pHeaderRef->externalClassId) + return NV_ERR_INVALID_ARGUMENT; + } + + if (!bNoDeviceMem) + { + if (pAllocParams->hSubDevice == 0) + { + NV_PRINTF(LEVEL_WARNING, "hSubDevice must be provided.\n"); + return NV_ERR_INVALID_ARGUMENT; + } + } + else + { + return NV_ERR_NOT_SUPPORTED; + } + } + + // bound check inputs and also check for overflow + if ((pAllocParams->recordSize == 0) || (pAllocParams->recordCount == 0) || + (!portSafeMulU32(pAllocParams->recordSize, pAllocParams->recordCount, &recordBufferSize)) || + (recordBufferSize / pAllocParams->recordCount != pAllocParams->recordSize) || + (pAllocParams->recordsFreeThreshold > pAllocParams->recordCount) || + (pAllocParams->vardataFreeThreshold > pAllocParams->vardataBufferSize)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pEventBuffer->hClient = pCallContext->pClient->hClient; + pEventBuffer->hSubDevice = pAllocParams->hSubDevice; + if (pEventBuffer->hSubDevice != 0) + { + status = subdeviceGetByHandle(pCallContext->pClient, pEventBuffer->hSubDevice, &pSubdevice); + if (status != NV_OK) + return NV_ERR_INVALID_OBJECT_HANDLE; + + pEventBuffer->subDeviceInst = pSubdevice->subDeviceInst; + pGpu = GPU_RES_GET_GPU(pSubdevice); + + if (!bNoDeviceMem) + { + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + // Staging buffer should be mapped as read-only in guest RM + bUsingVgpuStagingBuffer = NV_TRUE; + } + + if (!bKernel) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + status = rmapiutilAllocClientAndDeviceHandles(pRmApi, + pGpu, + &pEventBuffer->hInternalClient, + &pEventBuffer->hInternalDevice, + &pEventBuffer->hInternalSubdevice); + + if (status != NV_OK) + return status; + + hMapperClient = pEventBuffer->hInternalClient; + hMapperDevice = pEventBuffer->hInternalDevice; + } + else + { + hMapperClient = pCallContext->pClient->hClient; + hMapperDevice = RES_GET_PARENT_HANDLE(pSubdevice); + } + } + } + + + // + // Use goto cleanup on failure below here + // + + if (!bInternalAlloc) + { + Memory *pMemory; + NvBool bRequireReadOnly = bUsingVgpuStagingBuffer || !bKernel; + NvU32 flags = 0; + + if (bUsingVgpuStagingBuffer) + { + flags = FLD_SET_DRF(OS33, _FLAGS, _ACCESS, _READ_ONLY, flags); + } + else + { + flags = FLD_SET_DRF(OS33, _FLAGS, _ACCESS, _READ_WRITE, flags); + } + + // + // Buffer header + // + pEventBuffer->pHeader = dynamicCast(pHeaderRef->pResource, Memory); + pMemory = pEventBuffer->pHeader; + if ((pMemory == NULL) || (bRequireReadOnly && !memdescGetFlag(pMemory->pMemDesc, MEMDESC_FLAGS_USER_READ_ONLY))) + { + status = NV_ERR_INVALID_ARGUMENT; + goto cleanup; + } + + if (pMemory->Length < sizeof(NV_EVENT_BUFFER_HEADER)) + { + status = NV_ERR_INVALID_ARGUMENT; + goto cleanup; + } + + if (!bNoDeviceMem) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvHandle hMemory = RES_GET_HANDLE(pMemory); + + // Dup memory object under CPU-RM's hClient + if (!bKernel) + { + status = pRmApi->DupObject(pRmApi, + hMapperClient, + hMapperDevice, + &hMemory, + pCallContext->pClient->hClient, + hMemory, 0); + if (status != NV_OK) + { + goto cleanup; + } + } + + status = pRmApi->MapToCpu(pRmApi, + hMapperClient, + hMapperDevice, + hMemory, + 0, + pMemory->Length, + &pKernelMap->headerAddr, + flags); + + if (status != NV_OK) + { + goto cleanup; + } + } + else + { + status = memCreateKernelMapping(pMemory, NV_PROTECT_READ_WRITE, NV_TRUE); + if (status != NV_OK) + goto cleanup; + + pKernelMap->headerAddr = pMemory->KernelVAddr; + } + + // + // Record buffer + // + pEventBuffer->pRecord = dynamicCast(pRecordRef->pResource, Memory); + pMemory = pEventBuffer->pRecord; + if ((pMemory == NULL) || (bRequireReadOnly && !memdescGetFlag(pMemory->pMemDesc, MEMDESC_FLAGS_USER_READ_ONLY))) + { + status = NV_ERR_INVALID_ARGUMENT; + goto cleanup; + } + + if (pMemory->Length < recordBufferSize) + { + status = NV_ERR_INVALID_ARGUMENT; + goto cleanup; + } + + if (!bNoDeviceMem) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvHandle hMemory = RES_GET_HANDLE(pMemory); + + // Dup memory object under CPU-RM's hClient + if (!bKernel) + { + status = pRmApi->DupObject(pRmApi, + hMapperClient, + hMapperDevice, + &hMemory, + pCallContext->pClient->hClient, + hMemory, 0); + if (status != NV_OK) + { + goto cleanup; + } + } + + status = pRmApi->MapToCpu(pRmApi, + hMapperClient, + hMapperDevice, + hMemory, + 0, + pMemory->Length, + &pKernelMap->recordBuffAddr, + flags); + if (status != NV_OK) + { + goto cleanup; + } + } + else + { + status = memCreateKernelMapping(pMemory, NV_PROTECT_READ_WRITE, NV_TRUE); + pKernelMap->recordBuffAddr = pMemory->KernelVAddr; + if (status != NV_OK) + goto cleanup; + } + + // + // Vardata buffer [optional] + // + if (pAllocParams->hVardataBuffer != 0) + { + pEventBuffer->pVardata = dynamicCast(pVardataRef->pResource, Memory); + pMemory = pEventBuffer->pVardata; + if ((pMemory == NULL) || (bRequireReadOnly && !memdescGetFlag(pMemory->pMemDesc, MEMDESC_FLAGS_USER_READ_ONLY))) + { + status = NV_ERR_INVALID_ARGUMENT; + goto cleanup; + } + + if (pMemory->Length < pAllocParams->vardataBufferSize) + { + status = NV_ERR_INVALID_ARGUMENT; + goto cleanup; + } + + if (!bNoDeviceMem) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvHandle hMemory = RES_GET_HANDLE(pMemory); + + // Dup memory object under CPU-RM's hClient + if (!bKernel) + { + status = pRmApi->DupObject(pRmApi, + hMapperClient, + hMapperDevice, + &hMemory, + pCallContext->pClient->hClient, + hMemory, 0); + if (status != NV_OK) + { + goto cleanup; + } + } + + status = pRmApi->MapToCpu(pRmApi, + hMapperClient, + hMapperDevice, + hMemory, + 0, + pMemory->Length, + &pKernelMap->recordBuffAddr, + flags); + if (status != NV_OK) + { + goto cleanup; + } + } + else + { + status = memCreateKernelMapping(pMemory, NV_PROTECT_READ_WRITE, NV_TRUE); + if (status != NV_OK) + goto cleanup; + } + + pKernelMap->vardataBuffAddr = pMemory->KernelVAddr; + + refAddDependant(pVardataRef, pCallContext->pResourceRef); + } + + refAddDependant(pHeaderRef, pCallContext->pResourceRef); + refAddDependant(pRecordRef, pCallContext->pResourceRef); + } + + if (bInternalAlloc) + { + status = _allocAndMapMemory(pCallContext, + pAllocParams->bufferHeader, + &pEventBuffer->pHeaderDesc, + sizeof(NV_EVENT_BUFFER_HEADER), + bKernel, + &pKernelMap->headerAddr, + &pKernelMap->headerPriv, + &pClientMap->headerAddr, + &pClientMap->headerPriv, + pSubdevice); + if (status != NV_OK) + goto cleanup; + + status = _allocAndMapMemory(pCallContext, + pAllocParams->recordBuffer, + &pEventBuffer->pRecordBufDesc, + recordBufferSize, + bKernel, + &pKernelMap->recordBuffAddr, + &pKernelMap->recordBuffPriv, + &pClientMap->recordBuffAddr, + &pClientMap->recordBuffPriv, + pSubdevice); + if (status != NV_OK) + goto cleanup; + } + + eventBufferInitRecordBuffer(&pEventBuffer->producerInfo, + KERNEL_POINTER_FROM_NvP64(NV_EVENT_BUFFER_HEADER*, pKernelMap->headerAddr), + pKernelMap->recordBuffAddr, + pAllocParams->recordSize, + pAllocParams->recordCount, + recordBufferSize, + pAllocParams->recordsFreeThreshold); + + // not needed for all events, such as FECS context switch events + if (pAllocParams->vardataBufferSize != 0) + { + if (bInternalAlloc) + { + status = _allocAndMapMemory(pCallContext, + pAllocParams->vardataBuffer, + &pEventBuffer->pVardataBufDesc, + pAllocParams->vardataBufferSize, + bKernel, + &pKernelMap->vardataBuffAddr, + &pKernelMap->vardataBuffPriv, + &pClientMap->vardataBuffAddr, + &pClientMap->vardataBuffPriv, + pSubdevice); + + if (status != NV_OK) + goto cleanup; + } + + eventBufferInitVardataBuffer(&pEventBuffer->producerInfo, + pKernelMap->vardataBuffAddr, + pAllocParams->vardataBufferSize, + pAllocParams->vardataFreeThreshold); + } + + kernelNotificationhandle = (NvP64)pAllocParams->notificationHandle; + if (bKernel != NV_TRUE) + status = osUserHandleToKernelPtr(pCallContext->pClient->hClient, + kernelNotificationhandle, + &kernelNotificationhandle); + + eventBufferInitNotificationHandle(&pEventBuffer->producerInfo, kernelNotificationhandle); + eventBufferSetEnable(&pEventBuffer->producerInfo, NV_FALSE); + + // return user mode mappings + pAllocParams->bufferHeader = pClientMap->headerAddr; + pAllocParams->recordBuffer = pClientMap->recordBuffAddr; + pAllocParams->vardataBuffer = pClientMap->vardataBuffAddr; + + return NV_OK; + +cleanup: + eventbufferDestruct_IMPL(pEventBuffer); + return status; +} + +void +eventbufferDestruct_IMPL +( + EventBuffer *pEventBuffer +) +{ + CALL_CONTEXT *pCallContext; + EVENT_BUFFER_MAP_INFO *pClientMap = &pEventBuffer->clientMapInfo; + EVENT_BUFFER_MAP_INFO *pKernelMap = &pEventBuffer->kernelMapInfo; + RmClient *pRmClient = dynamicCast(RES_GET_CLIENT(pEventBuffer), RmClient); + NvBool bKernel; + void *notificationHandle = NvP64_VALUE(pEventBuffer->producerInfo.notificationHandle); + + NV_ASSERT_OR_RETURN_VOID(pRmClient != NULL); + + bKernel = (rmclientGetCachedPrivilege(pRmClient) >= RS_PRIV_LEVEL_KERNEL); + + resGetFreeParams(staticCast(pEventBuffer, RsResource), &pCallContext, NULL); + + if (notificationHandle != NULL) + { + osDereferenceObjectCount(notificationHandle); + } + +#if KERNEL_GSP_TRACING_RATS_ENABLED + gspTraceRemoveAllBindpoints(pEventBuffer); +#endif + + _unmapAndFreeMemory(pEventBuffer->pHeaderDesc, bKernel, pKernelMap->headerAddr, + pKernelMap->headerPriv, pClientMap->headerAddr, pClientMap->headerPriv); + + _unmapAndFreeMemory(pEventBuffer->pRecordBufDesc, bKernel, pKernelMap->recordBuffAddr, + pKernelMap->recordBuffPriv, pClientMap->recordBuffAddr, pClientMap->recordBuffPriv); + + _unmapAndFreeMemory(pEventBuffer->pVardataBufDesc, bKernel, pKernelMap->vardataBuffAddr, + pKernelMap->vardataBuffPriv, pClientMap->vardataBuffAddr, pClientMap->vardataBuffPriv); + + if (pEventBuffer->hInternalClient != 0) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + pRmApi->Free(pRmApi, pEventBuffer->hInternalClient, pEventBuffer->hInternalClient); + } + +} + +NV_STATUS +_allocAndMapMemory +( + CALL_CONTEXT *pCallContext, + NvP64 pAddress, + MEMORY_DESCRIPTOR** ppMemDesc, + NvU64 size, + NvBool bKernel, + NvP64* pKernelAddr, + NvP64* pKernelPriv, + NvP64* pUserAddr, + NvP64* pUserPriv, + Subdevice *pSubdevice +) +{ + NV_STATUS status; + MEMORY_DESCRIPTOR* pMemDesc = NULL; + OBJGPU* pGpu = NULL; + + NV_ASSERT_OR_RETURN(pAddress == NvP64_NULL, NV_ERR_NOT_SUPPORTED); + + if (pSubdevice != NULL) + pGpu = GPU_RES_GET_GPU(pSubdevice); + + NV_ASSERT_OR_RETURN(pSubdevice != NULL && pGpu != NULL, NV_ERR_INVALID_STATE); + + status = memdescCreate(ppMemDesc, pGpu, size, 0, NV_MEMORY_CONTIGUOUS, + ADDR_SYSMEM, NV_MEMORY_WRITECOMBINED, MEMDESC_FLAGS_CPU_ONLY); + if (status != NV_OK) + return status; + + pMemDesc = *ppMemDesc; + + status = osAllocPages(pMemDesc); + if (status != NV_OK) + goto cleanup; + pMemDesc->Allocated = 1; + + // map memory to kernel VA space + status = memdescMap(pMemDesc, 0, size, NV_TRUE, NV_PROTECT_READ_WRITE, + pKernelAddr, pKernelPriv); + if (status != NV_OK) + goto cleanup; + + portMemSet(NvP64_VALUE(*pKernelAddr), 0, size); + + // map memory to user VA space + status = memdescMap(pMemDesc, 0, size, bKernel, NV_PROTECT_READABLE, + pUserAddr, pUserPriv); + + if (status != NV_OK) + goto cleanup; + + return NV_OK; + +cleanup: + _unmapAndFreeMemory(pMemDesc, bKernel, *pKernelAddr, *pKernelPriv, *pUserAddr, *pUserPriv); + return status; +} + +static void +_unmapAndFreeMemory +( + MEMORY_DESCRIPTOR *pMemDesc, + NvBool bKernel, + NvP64 kernelAddr, + NvP64 kernelPriv, + NvP64 userAddr, + NvP64 userPriv +) +{ + if (pMemDesc == NULL) + return; + + if (userAddr) + memdescUnmap(pMemDesc, bKernel, userAddr, userPriv); + + if (kernelAddr) + memdescUnmap(pMemDesc, NV_TRUE, kernelAddr, kernelPriv); + + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); +} + +NV_STATUS +eventbuffertBufferCtrlCmdFlush_IMPL +( + EventBuffer *pEventBuffer +) +{ + return NV_OK; +} + +NV_STATUS +eventbuffertBufferCtrlCmdEnableEvent_IMPL +( + EventBuffer *pEventBuffer, + NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS *pEnableParams +) +{ + GPU_MASK gpuMask; + NV_STATUS status = NV_OK; + NvBool updateTelemetry = NV_FALSE; + + if (pEnableParams->flags & + ~(NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_KEEP_NEWEST|NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_KEEP_OLDEST)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (pEnableParams->enable && !pEventBuffer->producerInfo.isEnabled) + { + updateTelemetry = NV_TRUE; + } + + eventBufferSetEnable(&pEventBuffer->producerInfo, pEnableParams->enable); + if (pEnableParams->flags & NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_KEEP_NEWEST) + eventBufferSetKeepNewest(&pEventBuffer->producerInfo, NV_TRUE); + else if (pEnableParams->flags & NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_KEEP_OLDEST) + eventBufferSetKeepNewest(&pEventBuffer->producerInfo, NV_FALSE); + + // NvTelemetry requires a valid subdevice + if (updateTelemetry && pEventBuffer->hSubDevice) + { + Subdevice *pSubDevice; + + status = rmGpuGroupLockAcquire(pEventBuffer->subDeviceInst, + GPU_LOCK_GRP_SUBDEVICE, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_GPU, &gpuMask); + if (status != NV_OK) + return status; + + status = subdeviceGetByHandle(RES_GET_CLIENT(pEventBuffer), + pEventBuffer->hSubDevice, &pSubDevice); + if (status != NV_OK) + return status; + + GPU_RES_SET_THREAD_BC_STATE(pSubDevice); + + rmGpuGroupLockRelease(gpuMask, GPUS_LOCK_FLAGS_NONE); + } + return NV_OK; +} + +NV_STATUS +eventbuffertBufferCtrlCmdUpdateGet_IMPL +( + EventBuffer *pEventBuffer, + NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS *pUpdateParams +) +{ + EVENT_BUFFER_PRODUCER_INFO *pProducerInfo = &pEventBuffer->producerInfo; + NvP64 pVardataBuf = pEventBuffer->kernelMapInfo.vardataBuffAddr; + + if ((pUpdateParams->recordBufferGet >= eventBufferGetRecordBufferCount(pProducerInfo)) || + (pVardataBuf == NvP64_NULL && pUpdateParams->varDataBufferGet > 0) || + (pVardataBuf != NvP64_NULL && pUpdateParams->varDataBufferGet >= eventBufferGetVardataBufferCount(pProducerInfo))) + { + return NV_ERR_INVALID_ARGUMENT; + } + + eventBufferUpdateRecordBufferGet(pProducerInfo, pUpdateParams->recordBufferGet); + if (pVardataBuf) + eventBufferUpdateVardataBufferGet(pProducerInfo, pUpdateParams->varDataBufferGet); + + pEventBuffer->bNotifyPending = NV_FALSE; + + return NV_OK; +} + +/* + * eventbuffertBufferCtrlCmdPostTelemetryEvent posts an event to the event buffer for testing purposes. + * Note -- in order to post an event, a handle to the buffer is required. since the handle is + * only available to the client that created the buffer, one can only post events to buffers that + * it created. this has been done to limit the ability to post to buffers for testing purposes + * only. if it is determined that we want to open this up to other callers, then this ctrl call + * should be moved to the 2080 class & adjustments made for acquiring the pGpu based on the + * subdevice handle there. + */ +NV_STATUS +eventbuffertBufferCtrlCmdPostTelemetryEvent_IMPL +( + EventBuffer *pEventBuffer, + NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS *pPostTelemetryEvent +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +eventBufferAdd(EventBuffer* pEventBuffer, void *pEventData, NvU32 recordType, NvBool *pBNotify, NvP64 *pHandle) +{ + EVENT_BUFFER_PRODUCER_DATA *pProducerData = (EVENT_BUFFER_PRODUCER_DATA*)pEventData; + RECORD_BUFFER_INFO *pRBI; + NV_EVENT_BUFFER_HEADER *pHeader; + + if (!pEventBuffer->producerInfo.isEnabled) + return NV_WARN_NOTHING_TO_DO; + + pRBI = &pEventBuffer->producerInfo.recordBuffer; + pHeader = pEventBuffer->producerInfo.recordBuffer.pHeader; + + NV_ASSERT_OR_RETURN(pHeader->recordPut < pRBI->totalRecordCount, NV_ERR_INVALID_STATE); + + eventBufferProducerAddEvent(&pEventBuffer->producerInfo, + recordType, 0, pProducerData); + + *pBNotify = (!pEventBuffer->bNotifyPending) && + (eventBufferIsNotifyThresholdMet(&pEventBuffer->producerInfo)); + *pHandle = pEventBuffer->producerInfo.notificationHandle; + return NV_OK; +} diff --git a/src/nvidia/src/kernel/rmapi/event_notification.c b/src/nvidia/src/kernel/rmapi/event_notification.c new file mode 100644 index 0000000..0437ed0 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/event_notification.c @@ -0,0 +1,1128 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/********************************* DMA Manager *****************************\ +* * +* Event notifications are handled in this module. DMA report and OS * +* action are dealt with on a per-object basis. * +* * +****************************************************************************/ + +#include "core/core.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "class/cl0000.h" +#include "os/os.h" +#include "class/cl0005.h" +#include "gpu/subdevice/subdevice.h" +#include "rmapi/rs_utils.h" +#include "mem_mgr/mem.h" +#include "kernel/gpu/gpu_engine_type.h" +#include "platform/sli/sli.h" +#include "gpu/timer/objtmr.h" + +typedef struct +{ + EVENTNOTIFICATION *pEventNotify; + Memory *pMemory; + ListNode eventNotificationListNode; + + // Protected by event list spinlock + ListNode pendingEventNotifyListNode; + NvBool bInPendingNotifyList; + + // + // Incremented under event list spinlock when a notification is pending for + // this event, decremented for each notification sent (not under spinlock). + // + volatile NvS32 pendingNotifyCount; +} ENGINE_EVENT_NOTIFICATION; + +// +// These lists are intrusive to avoid memory allocation during insertion while +// in a non-preemptible context (holding a spinlock/in an ISR). +// +MAKE_INTRUSIVE_LIST(EngineEventNotificationList, ENGINE_EVENT_NOTIFICATION, + eventNotificationListNode); +MAKE_INTRUSIVE_LIST(PendingEventNotifyList, ENGINE_EVENT_NOTIFICATION, + pendingEventNotifyListNode); + +// Linked list of per engine non-stall event notifications +struct GpuEngineEventNotificationList +{ + PORT_SPINLOCK *pSpinlock; + + // List insertion and removal happens under pSpinlock + EngineEventNotificationList eventNotificationList; + + // Filled while pSpinlock is held, drained outside of the lock in ISR + PendingEventNotifyList pendingEventNotifyList; + + // + // Accessed under pSpinlock, incremented when a thread starts the notify + // Decremented when the thread finishes notification + // + volatile NvU32 activeNotifyThreads; +}; + +static NV_STATUS _insertEventNotification +( + PEVENTNOTIFICATION *ppEventNotification, + NvHandle hEventClient, + NvHandle hEvent, + NvU32 NotifyIndex, + NvU32 NotifyType, + NvP64 Data, + NvBool bUserOsEventHandle +); + +static NV_STATUS _removeEventNotification +( + PEVENTNOTIFICATION *ppEventNotification, + NvHandle hEventClient, + NvHandle hEvent, + NvBool bMatchData, + NvP64 Data, + PEVENTNOTIFICATION *ppOldEvent +); + +//--------------------------------------------------------------------------- +// +// Event support. +// +//--------------------------------------------------------------------------- + +NV_STATUS gpuEngineEventNotificationListCreate +( + OBJGPU *pGpu, + GpuEngineEventNotificationList **ppEventNotificationList +) +{ + NV_STATUS status = NV_OK; + + PORT_MEM_ALLOCATOR *pAllocator = portMemAllocatorGetGlobalNonPaged(); + GpuEngineEventNotificationList *pEventNotificationList = + portMemAllocNonPaged(sizeof(*pEventNotificationList)); + NV_ASSERT_OR_RETURN(pEventNotificationList != NULL, NV_ERR_NO_MEMORY); + + portMemSet(pEventNotificationList, 0, sizeof(*pEventNotificationList)); + + pEventNotificationList->pSpinlock = portSyncSpinlockCreate(pAllocator); + NV_ASSERT_OR_ELSE(pEventNotificationList->pSpinlock != NULL, + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto exit; + }); + + listInitIntrusive(&pEventNotificationList->eventNotificationList); + listInitIntrusive(&pEventNotificationList->pendingEventNotifyList); + + pEventNotificationList->activeNotifyThreads = 0; + + *ppEventNotificationList = pEventNotificationList; + +exit: + if (status != NV_OK) + gpuEngineEventNotificationListDestroy(pGpu, pEventNotificationList); + return status; +} + +void gpuEngineEventNotificationListDestroy +( + OBJGPU *pGpu, + GpuEngineEventNotificationList *pEventNotificationList +) +{ + if (pEventNotificationList == NULL) + return; + + NV_ASSERT(pEventNotificationList->activeNotifyThreads == 0); + + NV_ASSERT(listCount(&pEventNotificationList->pendingEventNotifyList) == 0); + listDestroy(&pEventNotificationList->pendingEventNotifyList); + + NV_ASSERT(listCount(&pEventNotificationList->eventNotificationList) == 0); + listDestroy(&pEventNotificationList->eventNotificationList); + + if (pEventNotificationList->pSpinlock != NULL) + portSyncSpinlockDestroy(pEventNotificationList->pSpinlock); + + portMemFree(pEventNotificationList); +} + +static void _gpuEngineEventNotificationListLockPreemptible +( + GpuEngineEventNotificationList *pEventNotificationList +) +{ + do + { + portSyncSpinlockAcquire(pEventNotificationList->pSpinlock); + + // + // Only return with the lock held once there are no pending + // notifications to process. No more pending notifications can be queued + // while the spinlock is held, and we drop the lock to re-enable + // preemption, to guarantee that _gpuEngineEventNotificationListNotify() + // can make forward progress to drain the pending notifications list. + // + if (pEventNotificationList->activeNotifyThreads == 0) + return; + + portSyncSpinlockRelease(pEventNotificationList->pSpinlock); + + // + // Spin waiting for the pending notifications to drain. + // This can only be done in a preemptible context (i.e., add + // or remove notification in a thread context). + // + while (pEventNotificationList->activeNotifyThreads > 0) + osSpinLoop(); + } while (NV_TRUE); +} + +static inline void _gpuEngineEventNotificationListUnlockPreemptible +( + GpuEngineEventNotificationList *pEventNotificationList +) +{ + portSyncSpinlockRelease(pEventNotificationList->pSpinlock); +} + +static NV_STATUS _gpuEngineEventNotificationInsert +( + GpuEngineEventNotificationList *pEventNotificationList, + EVENTNOTIFICATION *pEventNotify, + Memory *pMemory +) +{ + NV_CHECK_OR_RETURN(LEVEL_ERROR, pEventNotify != NULL, + NV_ERR_INVALID_ARGUMENT); + + // Allocate the new node outside of the spinlock + ENGINE_EVENT_NOTIFICATION *pEngineEventNotification = + portMemAllocNonPaged(sizeof(*pEngineEventNotification)); + + NV_CHECK_OR_RETURN(LEVEL_ERROR, pEngineEventNotification != NULL, + NV_ERR_NO_MEMORY); + + portMemSet(pEngineEventNotification, 0, sizeof(*pEngineEventNotification)); + + pEngineEventNotification->pEventNotify = pEventNotify; + pEngineEventNotification->pMemory = pMemory; + + // Take the lock to add the node to the list + _gpuEngineEventNotificationListLockPreemptible(pEventNotificationList); + { + listPrependExisting(&pEventNotificationList->eventNotificationList, + pEngineEventNotification); + } + _gpuEngineEventNotificationListUnlockPreemptible(pEventNotificationList); + + return NV_OK; +} + +static void _gpuEngineEventNotificationRemove +( + GpuEngineEventNotificationList *pEventNotificationList, + EVENTNOTIFICATION *pEventNotify +) +{ + ENGINE_EVENT_NOTIFICATION *pEngineEventNotification = NULL; + + _gpuEngineEventNotificationListLockPreemptible(pEventNotificationList); + { + EngineEventNotificationListIter it = + listIterAll(&pEventNotificationList->eventNotificationList); + while (listIterNext(&it)) + { + if (it.pValue->pEventNotify == pEventNotify) + { + pEngineEventNotification = it.pValue; + listRemove(&pEventNotificationList->eventNotificationList, + pEngineEventNotification); + break; + } + } + } + _gpuEngineEventNotificationListUnlockPreemptible(pEventNotificationList); + + NV_ASSERT(pEngineEventNotification != NULL); + portMemFree(pEngineEventNotification); +} + +static NV_STATUS _gpuEngineEventNotificationListNotify +( + OBJGPU *pGpu, + GpuEngineEventNotificationList *pEventNotificationList, + NvHandle hEvent +) +{ + NV_STATUS status = NV_OK; + PendingEventNotifyList *pPending = + &pEventNotificationList->pendingEventNotifyList; + ENGINE_EVENT_NOTIFICATION *pIter, *pTail; + + // + // Acquire engine list spinlock before traversing the list. Note that this + // is called without holding locks from ISR for Linux. This spinlock is used + // to protect the per GPU per engine event node list. + // + portSyncSpinlockAcquire(pEventNotificationList->pSpinlock); + { + pEventNotificationList->activeNotifyThreads++; + + EngineEventNotificationListIter it = + listIterAll(&pEventNotificationList->eventNotificationList); + while (listIterNext(&it)) + { + ENGINE_EVENT_NOTIFICATION *pEngineEventNotification = it.pValue; + if (hEvent && + pEngineEventNotification->pEventNotify->hEvent != hEvent) + continue; + + Memory *pSemMemory = pEngineEventNotification->pMemory; + if (pSemMemory && + pSemMemory->vgpuNsIntr.isSemaMemValidationEnabled && + pSemMemory->pMemDesc && pSemMemory->pMemDesc->Allocated) + { + NvU32 *pTempKernelMapping = + (NvU32 *)NvP64_VALUE( + memdescGetKernelMapping(pSemMemory->pMemDesc)); + if (pTempKernelMapping == NULL) + { + NV_PRINTF(LEVEL_WARNING, + "Per-vGPU semaphore location mapping is NULL." + " Skipping the current node.\n"); + continue; + } + + NvU32 semValue = MEM_RD32(pTempKernelMapping + + (pSemMemory->vgpuNsIntr.nsSemOffset / + sizeof(NvU32))); + + if (pSemMemory->vgpuNsIntr.nsSemValue == semValue) + continue; + + pSemMemory->vgpuNsIntr.nsSemValue = semValue; + + } + + portAtomicIncrementS32(&pEngineEventNotification->pendingNotifyCount); + + // + // Queue up this event notification to be completed outside of the + // critical section, as the osNotifyEvent implementation may need + // to be preemptible. + // + if (!pEngineEventNotification->bInPendingNotifyList) + { + pEngineEventNotification->bInPendingNotifyList = NV_TRUE; + listAppendExisting(pPending, pEngineEventNotification); + } + } + + // + // We can't use the list iterator, because listIterNext() will assert + // if a node is appended to the list after the iterator has been + // initialized. For the loop below, it's safe to iterate over the list, + // (up to the point of the last node appended above), since nodes can't + // be removed from the list while the list's `activeNotifyThreads` is + // non-zero. + // + pIter = listHead(pPending); + pTail = listTail(pPending); + } + portSyncSpinlockRelease(pEventNotificationList->pSpinlock); + + // + // Iterate through the pending notifications and call the OS to send them. + // pIter and pTail are initialized to the list head and tail respectively, + // under the spinlock above. + // + while (pIter != NULL) + { + // + // Don't miss firing events - latch the pending count. + // This can race with the `portAtomicIncrementS32()` above. + // - If the increment wins, then the next thread to call the + // portAtomicCompareAndSwap32 with the incremented value will send + // all of the notifications. Example: thread A is preempted by ISR + // here, ISR will run through and send all notifications. Thread A + // will get pendingNotifyCount = 0 when it resumes and skip this + // element. + // - If this loop wins, thread A will proceed to send N notifications, + // and thread B will increment the pendingNotifyCount from 0 to 1. + // Thread B will service its own notification when it reaches this + // loop. + // + NvS32 pendingNotifyCount; + do + pendingNotifyCount = pIter->pendingNotifyCount; + while ((pendingNotifyCount > 0) && + !portAtomicCompareAndSwapS32(&pIter->pendingNotifyCount, 0, pendingNotifyCount)); + + while (pendingNotifyCount--) + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(status, + osNotifyEvent(pGpu, pIter->pEventNotify, 0, 0, NV_OK)); + + if (pIter == pTail) + break; + + pIter = listNext(pPending, pIter); + } + + portSyncSpinlockAcquire(pEventNotificationList->pSpinlock); + { + // + // The last active notify thread drains the pending notify list. + // Otherwise, we could be removing list nodes while it's being + // iterated over by another thread above, outside of the protection of + // the spinlock. + // + if (--pEventNotificationList->activeNotifyThreads == 0) + { + ENGINE_EVENT_NOTIFICATION *pNext; + for (pIter = listHead(pPending); pIter != NULL; pIter = pNext) + { + pNext = listNext(pPending, pIter); + + // + // There should not be any unsent notifications at this point, + // since we are the last active thread. + // + NV_ASSERT(pIter->pendingNotifyCount == 0); + + pIter->bInPendingNotifyList = NV_FALSE; + listRemove(pPending, pIter); + } + } + } + portSyncSpinlockRelease(pEventNotificationList->pSpinlock); + + return status; +} + +NV_STATUS +engineNonStallIntrNotify(OBJGPU *pGpu, RM_ENGINE_TYPE rmEngineId) +{ + NV_ASSERT_OR_RETURN(rmEngineId < NV_ARRAY_ELEMENTS(pGpu->engineNonstallIntrEventNotifications), + NV_ERR_INVALID_ARGUMENT); + return _gpuEngineEventNotificationListNotify(pGpu, + pGpu->engineNonstallIntrEventNotifications[rmEngineId], 0); +} + +NV_STATUS +engineNonStallIntrNotifyEvent(OBJGPU *pGpu, RM_ENGINE_TYPE rmEngineId, NvHandle hEvent) +{ + NV_ASSERT_OR_RETURN(rmEngineId < NV_ARRAY_ELEMENTS(pGpu->engineNonstallIntrEventNotifications), + NV_ERR_INVALID_ARGUMENT); + return _gpuEngineEventNotificationListNotify(pGpu, + pGpu->engineNonstallIntrEventNotifications[rmEngineId], hEvent); +} + +static NV_STATUS +eventGetEngineTypeFromSubNotifyIndex +( + NvU32 notifyIndex, + RM_ENGINE_TYPE *pRmEngineId +) +{ + NV_ASSERT_OR_RETURN(pRmEngineId, NV_ERR_INVALID_ARGUMENT); + + *pRmEngineId = RM_ENGINE_TYPE_NULL; + + switch (notifyIndex) + { + case NV2080_NOTIFIERS_FIFO_EVENT_MTHD: + *pRmEngineId = RM_ENGINE_TYPE_HOST; + break; + case NV2080_NOTIFIERS_CE0: + *pRmEngineId = RM_ENGINE_TYPE_COPY0; + break; + case NV2080_NOTIFIERS_CE1: + *pRmEngineId = RM_ENGINE_TYPE_COPY1; + break; + case NV2080_NOTIFIERS_CE2: + *pRmEngineId = RM_ENGINE_TYPE_COPY2; + break; + case NV2080_NOTIFIERS_CE3: + *pRmEngineId = RM_ENGINE_TYPE_COPY3; + break; + case NV2080_NOTIFIERS_CE4: + *pRmEngineId = RM_ENGINE_TYPE_COPY4; + break; + case NV2080_NOTIFIERS_CE5: + *pRmEngineId = RM_ENGINE_TYPE_COPY5; + break; + case NV2080_NOTIFIERS_CE6: + *pRmEngineId = RM_ENGINE_TYPE_COPY6; + break; + case NV2080_NOTIFIERS_CE7: + *pRmEngineId = RM_ENGINE_TYPE_COPY7; + break; + case NV2080_NOTIFIERS_CE8: + *pRmEngineId = RM_ENGINE_TYPE_COPY8; + break; + case NV2080_NOTIFIERS_CE9: + *pRmEngineId = RM_ENGINE_TYPE_COPY9; + break; + case NV2080_NOTIFIERS_GR0: + *pRmEngineId = RM_ENGINE_TYPE_GR0; + break; + case NV2080_NOTIFIERS_GR1: + *pRmEngineId = RM_ENGINE_TYPE_GR1; + break; + case NV2080_NOTIFIERS_GR2: + *pRmEngineId = RM_ENGINE_TYPE_GR2; + break; + case NV2080_NOTIFIERS_GR3: + *pRmEngineId = RM_ENGINE_TYPE_GR3; + break; + case NV2080_NOTIFIERS_GR4: + *pRmEngineId = RM_ENGINE_TYPE_GR4; + break; + case NV2080_NOTIFIERS_GR5: + *pRmEngineId = RM_ENGINE_TYPE_GR5; + break; + case NV2080_NOTIFIERS_GR6: + *pRmEngineId = RM_ENGINE_TYPE_GR6; + break; + case NV2080_NOTIFIERS_GR7: + *pRmEngineId = RM_ENGINE_TYPE_GR7; + break; + case NV2080_NOTIFIERS_PPP: + *pRmEngineId = RM_ENGINE_TYPE_PPP; + break; + case NV2080_NOTIFIERS_NVDEC0: + *pRmEngineId = RM_ENGINE_TYPE_NVDEC0; + break; + case NV2080_NOTIFIERS_NVDEC1: + *pRmEngineId = RM_ENGINE_TYPE_NVDEC1; + break; + case NV2080_NOTIFIERS_NVDEC2: + *pRmEngineId = RM_ENGINE_TYPE_NVDEC2; + break; + case NV2080_NOTIFIERS_NVDEC3: + *pRmEngineId = RM_ENGINE_TYPE_NVDEC3; + break; + case NV2080_NOTIFIERS_NVDEC4: + *pRmEngineId = RM_ENGINE_TYPE_NVDEC4; + break; + case NV2080_NOTIFIERS_NVDEC5: + *pRmEngineId = RM_ENGINE_TYPE_NVDEC5; + break; + case NV2080_NOTIFIERS_NVDEC6: + *pRmEngineId = RM_ENGINE_TYPE_NVDEC6; + break; + case NV2080_NOTIFIERS_NVDEC7: + *pRmEngineId = RM_ENGINE_TYPE_NVDEC7; + break; + case NV2080_NOTIFIERS_PDEC: + *pRmEngineId = RM_ENGINE_TYPE_VP; + break; + case NV2080_NOTIFIERS_MSENC: + NV_ASSERT(NV2080_NOTIFIERS_MSENC == NV2080_NOTIFIERS_NVENC0); + NV_ASSERT(RM_ENGINE_TYPE_MSENC == RM_ENGINE_TYPE_NVENC0); + *pRmEngineId = RM_ENGINE_TYPE_MSENC; + break; + case NV2080_NOTIFIERS_NVENC1: + *pRmEngineId = RM_ENGINE_TYPE_NVENC1; + break; + case NV2080_NOTIFIERS_NVENC2: + *pRmEngineId = RM_ENGINE_TYPE_NVENC2; + break; + case NV2080_NOTIFIERS_NVENC3: + *pRmEngineId = RM_ENGINE_TYPE_NVENC3; + break; + case NV2080_NOTIFIERS_SEC2: + *pRmEngineId = RM_ENGINE_TYPE_SEC2; + break; + case NV2080_NOTIFIERS_NVJPEG0: + *pRmEngineId = RM_ENGINE_TYPE_NVJPEG0; + break; + case NV2080_NOTIFIERS_NVJPEG1: + *pRmEngineId = RM_ENGINE_TYPE_NVJPEG1; + break; + case NV2080_NOTIFIERS_NVJPEG2: + *pRmEngineId = RM_ENGINE_TYPE_NVJPEG2; + break; + case NV2080_NOTIFIERS_NVJPEG3: + *pRmEngineId = RM_ENGINE_TYPE_NVJPEG3; + break; + case NV2080_NOTIFIERS_NVJPEG4: + *pRmEngineId = RM_ENGINE_TYPE_NVJPEG4; + break; + case NV2080_NOTIFIERS_NVJPEG5: + *pRmEngineId = RM_ENGINE_TYPE_NVJPEG5; + break; + case NV2080_NOTIFIERS_NVJPEG6: + *pRmEngineId = RM_ENGINE_TYPE_NVJPEG6; + break; + case NV2080_NOTIFIERS_NVJPEG7: + *pRmEngineId = RM_ENGINE_TYPE_NVJPEG7; + break; + case NV2080_NOTIFIERS_OFA0: + *pRmEngineId = RM_ENGINE_TYPE_OFA0; + break; + default: + NV_PRINTF(LEVEL_WARNING, + "notifier 0x%x doesn't use the fast non-stall interrupt path!\n", + notifyIndex); + NV_ASSERT(0); + return NV_ERR_NOT_SUPPORTED; + } + + return NV_OK; +} + +NV_STATUS registerEventNotification +( + PEVENTNOTIFICATION *ppEventNotification, + RsClient *pEventClient, + NvHandle hNotifier, + NvHandle hEvent, + NvU32 NotifyIndex, + NvU32 NotifyType, + NvP64 Data, + NvBool bUserOsEventHandle +) +{ + NvHandle hEventClient = pEventClient->hClient; + Subdevice *pSubDevice; + PEVENTNOTIFICATION pTargetEvent = NULL; + NV_STATUS rmStatus = NV_OK, rmTmpStatus = NV_OK; + OBJGPU *pGpu; + NvBool bNonStallIntrEvent = NV_FALSE; + RM_ENGINE_TYPE rmEngineId; + RsResourceRef *pResourceRef; + Memory *pSemMemory = NULL; + + rmStatus = _insertEventNotification(ppEventNotification, hEventClient, + hEvent, NotifyIndex, NotifyType, Data, bUserOsEventHandle); + + if (rmStatus != NV_OK) + goto failed_insert; + + bNonStallIntrEvent = ((NotifyIndex & NV01_EVENT_NONSTALL_INTR) ? NV_TRUE : NV_FALSE); + + if (bNonStallIntrEvent) + { + // + // For non-stall interrupt, the event parent type is NV20_SUBDEVICE, so we can locate + // the correct OBJGPU and attach to its per-engine non-stall event list. + // + if ((clientGetResourceRef(pEventClient, hNotifier, &pResourceRef) != NV_OK) || + (!dynamicCast(pResourceRef->pResource, Subdevice))) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto free_entry; + } + + pSubDevice = dynamicCast(pResourceRef->pResource, Subdevice); + + pGpu = GPU_RES_GET_GPU(pSubDevice); + + GPU_RES_SET_THREAD_BC_STATE(pSubDevice); + + rmStatus = eventGetEngineTypeFromSubNotifyIndex( + DRF_VAL(0005, _NOTIFY_INDEX, _INDEX, NotifyIndex), &rmEngineId); + + if (rmStatus != NV_OK) + goto free_entry; + + if (pSubDevice->hSemMemory != NV01_NULL_OBJECT) + { + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT, + memGetByHandle(RES_GET_CLIENT(pSubDevice), + pSubDevice->hSemMemory, + &pSemMemory), + free_entry); + } + + rmStatus = _gpuEngineEventNotificationInsert( + pGpu->engineNonstallIntrEventNotifications[rmEngineId], + *ppEventNotification, pSemMemory); + + if (rmStatus != NV_OK) + goto free_entry; + + return rmStatus; + } + +free_entry: + if (rmStatus != NV_OK) + { + rmTmpStatus = _removeEventNotification(ppEventNotification, hEventClient, + hEvent, NV_TRUE, Data, &pTargetEvent); + + if (rmTmpStatus == NV_OK) + portMemFree(pTargetEvent); + } + +failed_insert: + NV_ASSERT(rmStatus == NV_OK); + return rmStatus; +} + +static NV_STATUS _insertEventNotification +( + PEVENTNOTIFICATION *ppEventNotification, + NvHandle hEventClient, + NvHandle hEvent, + NvU32 NotifyIndex, + NvU32 NotifyType, + NvP64 Data, + NvBool bUserOsEventHandle + +) +{ + PEVENTNOTIFICATION EventNotify; + + // + // Create the event notification object + // + EventNotify = portMemAllocNonPaged(sizeof(EVENTNOTIFICATION)); + if (EventNotify == NULL) + return NV_ERR_NO_MEMORY; + + // + // Fill in the fields + // + if (NotifyIndex & NV01_EVENT_BROADCAST) + { + EventNotify->bBroadcastEvent = NV_TRUE; + } + else + { + EventNotify->bBroadcastEvent = NV_FALSE; + } + + if (NotifyIndex & NV01_EVENT_SUBDEVICE_SPECIFIC) + { + EventNotify->bSubdeviceSpecificEvent = NV_TRUE; + EventNotify->SubdeviceSpecificValue = + DRF_VAL(0005, _NOTIFY_INDEX, _SUBDEVICE, NotifyIndex); + } + else + { + EventNotify->bSubdeviceSpecificEvent = NV_FALSE; + EventNotify->SubdeviceSpecificValue = 0; + } + + if (NotifyIndex & NV01_EVENT_WITHOUT_EVENT_DATA) + { + EventNotify->bEventDataRequired = NV_FALSE; + } + else + { + EventNotify->bEventDataRequired = NV_TRUE; + } + + if (NotifyIndex & NV01_EVENT_CLIENT_RM) + { + EventNotify->bClientRM = NV_TRUE; + } + else + { + EventNotify->bClientRM = NV_FALSE; + } + + EventNotify->bNonStallIntrEvent = + ((NotifyIndex & NV01_EVENT_NONSTALL_INTR) ? NV_TRUE : NV_FALSE); + + // strip the upper bits as they are actually flags + NotifyIndex = DRF_VAL(0005, _NOTIFY_INDEX, _INDEX, NotifyIndex); + + EventNotify->hEventClient = hEventClient; + EventNotify->hEvent = hEvent; + EventNotify->subdeviceInst = 0; + EventNotify->NotifyIndex = NotifyIndex; + EventNotify->NotifyType = NotifyType; + EventNotify->Data = Data; + EventNotify->NotifyTriggerCount = 0; + EventNotify->bUserOsEventHandle = bUserOsEventHandle; + + // These fields are set by NV0004_CTRL_CMD_TMR_SET_ALARM_NOTIFY for graceful TMR_EVENT teardown + EventNotify->pGpu = NULL; + EventNotify->pTmrEvent = NULL; + + // + // Now insert the event into the event chain of this object. + // Order doesn't really matter. + // + EventNotify->Next = *ppEventNotification; + *ppEventNotification = EventNotify; + + return (NV_OK); +} + +//--------------------------------------------------------------------------- +// +// Event Notification support. +// +//--------------------------------------------------------------------------- + +NV_STATUS unregisterEventNotification +( + PEVENTNOTIFICATION *ppEventNotification, + NvHandle hEventClient, + NvHandle hNotifier, + NvHandle hEvent +) +{ + return unregisterEventNotificationWithData(ppEventNotification, + hEventClient, + hNotifier, + hEvent, + NV_FALSE, + NvP64_NULL); +} + +NV_STATUS unregisterEventNotificationWithData +( + PEVENTNOTIFICATION *ppEventNotification, + NvHandle hEventClient, + NvHandle hNotifier, + NvHandle hEvent, + NvBool bMatchData, + NvP64 Data +) +{ + NV_STATUS rmStatus = NV_OK; + PEVENTNOTIFICATION pTargetEvent = NULL; + Subdevice *pSubDevice; + RsResourceRef *pResourceRef; + RM_ENGINE_TYPE rmEngineId; + OBJGPU *pGpu; + + rmStatus = _removeEventNotification(ppEventNotification, hEventClient, + hEvent, bMatchData, Data, &pTargetEvent); + + if (rmStatus != NV_OK) + goto error; + + if (pTargetEvent->bNonStallIntrEvent) + { + // + // For non-stall interrupt, the event parent type is NV20_SUBDEVICE, so we can locate + // the correct OBJGPU and attach to its per-engine non-stall event list. + // + if ((serverutilGetResourceRef(hEventClient, hNotifier, &pResourceRef) != NV_OK) || + (!dynamicCast(pResourceRef->pResource, Subdevice))) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto free_entry; + } + + pSubDevice = dynamicCast(pResourceRef->pResource, Subdevice); + + // Fetch pGpu and hDevice, set the threadstate to the pGpu + pGpu = GPU_RES_GET_GPU(pSubDevice); + + GPU_RES_SET_THREAD_BC_STATE(pSubDevice); + + rmStatus = eventGetEngineTypeFromSubNotifyIndex(pTargetEvent->NotifyIndex, &rmEngineId); + + if (rmStatus != NV_OK) + goto free_entry; + + _gpuEngineEventNotificationRemove( + pGpu->engineNonstallIntrEventNotifications[rmEngineId], + pTargetEvent); + } + +free_entry: + portMemFree(pTargetEvent); + +error: + NV_ASSERT(rmStatus == NV_OK); + return rmStatus; +} + +static NV_STATUS _removeEventNotification +( + PEVENTNOTIFICATION *ppEventNotification, + NvHandle hEventClient, + NvHandle hEvent, + NvBool bMatchData, + NvP64 Data, + PEVENTNOTIFICATION *ppOldEvent +) +{ + PEVENTNOTIFICATION nextEvent, lastEvent; + NvBool found = NV_FALSE; + + // check for null list + nextEvent = NULL; + + if (*ppEventNotification != NULL) + { + // check for head of list + nextEvent = lastEvent = *ppEventNotification; + if ((nextEvent->hEventClient == hEventClient) && + (nextEvent->hEvent == hEvent) && + (!bMatchData || (nextEvent->Data == Data))) + { + *ppEventNotification = nextEvent->Next; + found = NV_TRUE; + } + else + { + // check for internal nodes + nextEvent = nextEvent->Next; + while (nextEvent) + { + if ((nextEvent->hEventClient == hEventClient) && + (nextEvent->hEvent == hEvent) && + (!bMatchData || (nextEvent->Data == Data))) + { + lastEvent->Next = nextEvent->Next; + found = NV_TRUE; + break; + } + lastEvent = nextEvent; + nextEvent = nextEvent->Next; + } + } + } + + // delete the event if it was found + if (found) + { + if (nextEvent->pTmrEvent != NULL) + { + NV_ASSERT_OR_RETURN((nextEvent->pGpu != NULL), NV_ERR_INVALID_STATE); + + tmrEventDestroy(GPU_GET_TIMER(nextEvent->pGpu), nextEvent->pTmrEvent); + nextEvent->pGpu = NULL; + nextEvent->pTmrEvent = NULL; + } + + if (nextEvent->bUserOsEventHandle) + osDereferenceObjectCount(NvP64_VALUE(nextEvent->Data)); + + *ppOldEvent = nextEvent; + } + + return (found) ? NV_OK : NV_ERR_GENERIC; + +} // end of unregisterEventNotificationEventNotify() + +NV_STATUS notifyEvents +( + OBJGPU *pGpu, + PEVENTNOTIFICATION pEventNotification, + NvU32 Notifier, + NvU32 Method, + NvU32 Data, + NV_STATUS Status, + NvU32 Action +) +{ + NV_STATUS rmStatus = NV_OK; + PEVENTNOTIFICATION NotifyEvent; + + NV_PRINTF(LEVEL_INFO, " Method = 0x%x\n", Method); + NV_PRINTF(LEVEL_INFO, " Data = 0x%x\n", Data); + NV_PRINTF(LEVEL_INFO, " Status = 0x%x\n", Status); + NV_PRINTF(LEVEL_INFO, " Action = 0x%x\n", Action); + + // perform the type of action + switch (Action) + { + case NV_OS_WRITE_THEN_AWAKEN: + + // walk this object's event list and find any matches for this specific notify + for (NotifyEvent = pEventNotification; NotifyEvent; NotifyEvent = NotifyEvent->Next) + { + if (NotifyEvent->bSubdeviceSpecificEvent) + { + if (gpumgrGetSubDeviceInstanceFromGpu(pGpu) != NotifyEvent->SubdeviceSpecificValue) + { + continue; + } + } + + if (NotifyEvent->NotifyIndex == Notifier) + { + // Do any OS specified action related to this notification. + if (NotifyEvent->bBroadcastEvent) + { + // + // Only do the OS notify when all sub devices under + // a BC device have seen the event. + // + if (++NotifyEvent->NotifyTriggerCount == NumSubDevices(pGpu)) + { + rmStatus = osNotifyEvent(pGpu, NotifyEvent, Method, Data, Status); + NotifyEvent->NotifyTriggerCount = 0x0; + } + } + else + { + rmStatus = osNotifyEvent(pGpu, NotifyEvent, Method, Data, Status); + } + } + } + break; + + default: + // any other actions are legacy channel-based notifies + rmStatus = NV_ERR_INVALID_EVENT; + break; + } + + return rmStatus; +} + +// +// bindEventNotificationToSubdevice +// +// This routine walks the given EVENTNOTIFICATION list and sets +// the designated subdevice instance value for any that are associated +// with the specific NV01_EVENT handle hEvent. +// +NV_STATUS +bindEventNotificationToSubdevice +( + PEVENTNOTIFICATION pEventNotificationList, + NvHandle hEvent, + NvU32 subdeviceInst +) +{ + PEVENTNOTIFICATION pEventNotify; + NvU32 count = 0; + + if (pEventNotificationList == NULL) + return NV_ERR_INVALID_STATE; + + pEventNotify = pEventNotificationList; + while (pEventNotify) + { + if (pEventNotify->hEvent == hEvent) + { + pEventNotify->subdeviceInst = subdeviceInst; + count++; + } + pEventNotify = pEventNotify->Next; + } + + if (count == 0) + return NV_ERR_INVALID_STATE; + + return NV_OK; +} + +NV_STATUS +inotifyConstruct_IMPL(INotifier *pNotifier, CALL_CONTEXT *pCallContext) +{ + if (dynamicCast(pNotifier, RsResource) == NULL) + return NV_ERR_INVALID_OBJECT; + + return NV_OK; +} + +void inotifyDestruct_IMPL(INotifier* pNotifier) +{ + return; +} + +PEVENTNOTIFICATION +inotifyGetNotificationList_IMPL +( + INotifier *pNotifier +) +{ + PEVENTNOTIFICATION *ppEventNotifications = inotifyGetNotificationListPtr(pNotifier); + if (ppEventNotifications != NULL) + return *ppEventNotifications; + + return NULL; +} + +NV_STATUS +notifyConstruct_IMPL(Notifier *pNotifier, CALL_CONTEXT *pCallContext) +{ + return NV_OK; +} + +void notifyDestruct_IMPL(Notifier* pNotifier) +{ + NotifShare *pNotifierShare = inotifyGetNotificationShare(staticCast(pNotifier, INotifier)); + if (pNotifierShare != NULL) + { + pNotifierShare->pNotifier = NULL; + serverFreeShare(&g_resServ, staticCast(pNotifierShare, RsShared)); + } +} + +PEVENTNOTIFICATION +*notifyGetNotificationListPtr_IMPL +( + Notifier *pNotifier +) +{ + NotifShare *pNotifierShare = pNotifier->pNotifierShare; + if (pNotifierShare == NULL) + return NULL; + + return &pNotifierShare->pEventList; +} + +NotifShare +*notifyGetNotificationShare_IMPL +( + Notifier *pNotifier +) +{ + return pNotifier->pNotifierShare; +} + +void +notifySetNotificationShare_IMPL +( + Notifier *pNotifier, + NotifShare *pNotifierShare +) +{ + pNotifier->pNotifierShare = pNotifierShare; +} + +NV_STATUS +shrnotifConstruct_IMPL +( + NotifShare *pNotifShare +) +{ + return NV_OK; +} + +void +shrnotifDestruct_IMPL +( + NotifShare *pNotifShare +) +{ +} diff --git a/src/nvidia/src/kernel/rmapi/lock_stress.c b/src/nvidia/src/kernel/rmapi/lock_stress.c new file mode 100644 index 0000000..a3fc22e --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/lock_stress.c @@ -0,0 +1,598 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define NVOC_LOCK_STRESS_H_PRIVATE_ACCESS_ALLOWED + +#include "core/locks.h" +#include "core/system.h" +#include "rmapi/client.h" +#include "rmapi/lock_stress.h" +#include "rmapi/rs_utils.h" + +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" +#include "os/os.h" + +#include "class/cl0080.h" +#include "class/cl0100.h" + +#include "g_finn_rm_api.h" + +static NvS32 g_LockStressCounter = 0; + +NV_STATUS +lockStressObjConstruct_IMPL +( + LockStressObject *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvHandle hClient; + + // + // This is an off-by-default object since its only use is with testing. Return + // an error unless the RM test code registry key is turned on by the user. + // + if (!pSys->getProperty(pSys, PDB_PROP_SYS_ENABLE_RM_TEST_ONLY_CODE)) + return NV_ERR_TEST_ONLY_CODE_NOT_ENABLED; + + // + // Allocate internal client handle for stressing locks in the internal RM API + // path if this LockStressObject was externally allocated. + // + hClient = RES_GET_CLIENT_HANDLE(pResource); + + if (!serverIsClientInternal(&g_resServ, hClient)) + { + NV_STATUS status; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + OBJGPU *pGpu = GPU_RES_GET_GPU(pResource); + NV0080_ALLOC_PARAMETERS nv0080AllocParams; + NV2080_ALLOC_PARAMETERS nv2080AllocParams; + + pResource->hInternalClient = NV01_NULL_OBJECT; + + // Allocate internal client + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->AllocWithHandle(pRmApi, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_ROOT, + &pResource->hInternalClient, + sizeof(pResource->hInternalClient)), + failed); + + // Allocate a device + portMemSet(&nv0080AllocParams, 0, sizeof(nv0080AllocParams)); + nv0080AllocParams.deviceId = gpuGetDeviceInstance(pGpu); + nv0080AllocParams.hClientShare = pResource->hInternalClient; + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->Alloc(pRmApi, + pResource->hInternalClient, + pResource->hInternalClient, + &pResource->hInternalDevice, + NV01_DEVICE_0, + &nv0080AllocParams, + sizeof(nv0080AllocParams)), + failed); + + // Allocate a subdevice + portMemSet(&nv2080AllocParams, 0, sizeof(nv2080AllocParams)); + nv2080AllocParams.subDeviceId = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->Alloc(pRmApi, + pResource->hInternalClient, + pResource->hInternalDevice, + &pResource->hInternalSubdevice, + NV20_SUBDEVICE_0, + &nv2080AllocParams, + sizeof(nv2080AllocParams)), + failed); + + // Allocate the internal lock stress object + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->Alloc(pRmApi, + pResource->hInternalClient, + pResource->hInternalSubdevice, + &pResource->hInternalLockStressObject, + LOCK_STRESS_OBJECT, + NULL, 0), + failed); + + return NV_OK; +failed: + // + // Free internal client on error, Resource Server will free all other internal + // objects allocated with it. + // + if (pResource->hInternalClient != NV01_NULL_OBJECT) + pRmApi->Free(pRmApi, pResource->hInternalClient, pResource->hInternalClient); + + return status; + } + + return NV_OK; +} + +void +lockStressObjDestruct_IMPL +( + LockStressObject *pResource +) +{ + if (!serverIsClientInternal(&g_resServ, RES_GET_CLIENT_HANDLE(pResource))) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + // + // Free internal client, Resource Server will free all other internal + // objects allocated with it. + // + pRmApi->Free(pRmApi, pResource->hInternalClient, pResource->hInternalClient); + } +} + +NV_STATUS +lockStressObjCtrlCmdResetLockStressState_IMPL +( + LockStressObject *pResource +) +{ + RsClient *pClient = RES_GET_CLIENT(pResource); + RmClient *pRmClient = dynamicCast(pClient, RmClient); + RmClient *pRmInternalClient; + OBJGPU *pGpu = GPU_RES_GET_GPU(pResource); + + if (pRmClient == NULL) + return NV_ERR_INVALID_STATE; + + pRmInternalClient = serverutilGetClientUnderLock(pGpu->hInternalLockStressClient); + if (pRmInternalClient == NULL) + return NV_ERR_INVALID_STATE; + + // Reset all lock stress counters to 0 + g_LockStressCounter = 0; + + pGpu->lockStressCounter = 0; + pRmClient->lockStressCounter = 0; + pRmInternalClient->lockStressCounter = 0; + + return NV_OK; +} + +static NV_STATUS +updateLockStressCounters +( + LockStressObject *pResource, + NvU8 action +) +{ + // Perform increments/decrements as the "action" bitmask dictates + if (DRF_VAL(0100_CTRL, _GLOBAL_RMAPI, _LOCK_STRESS_COUNTER_ACTION, action) != 0) + { + // Assert that we hold the RW API lock here + NV_ASSERT_OR_RETURN(rmapiLockIsWriteOwner(), NV_ERR_INVALID_LOCK_STATE); + + if (DRF_VAL(0100_CTRL, _GLOBAL_RMAPI, _LOCK_STRESS_COUNTER_INCREMENT, action) != 0) + g_LockStressCounter++; + else + g_LockStressCounter--; + } + + if (DRF_VAL(0100_CTRL, _GPU, _LOCK_STRESS_COUNTER_ACTION, action) != 0) + { + OBJGPU *pGpu; + + NV_ASSERT_OR_RETURN(rmGpuLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + pGpu = GPU_RES_GET_GPU(pResource); + + if (DRF_VAL(0100_CTRL, _GPU, _LOCK_STRESS_COUNTER_INCREMENT, action) != 0) + pGpu->lockStressCounter++; + else + pGpu->lockStressCounter--; + } + + if (DRF_VAL(0100_CTRL, _CLIENT, _LOCK_STRESS_COUNTER_ACTION, action) != 0) + { + RsClient *pClient = RES_GET_CLIENT(pResource); + RmClient *pRmClient; + + // + // Resource Server currently doesn't attempt to lock the internal client on the + // internal RM API path if we already hold a client lock to avoid risking lock + // ordering issues so only assert if we aren't on the internal RM API path. + // + if (!serverIsClientInternal(&g_resServ, pClient->hClient)) + NV_ASSERT_OR_RETURN(serverIsClientLocked(&g_resServ, pClient->hClient), NV_ERR_INVALID_LOCK_STATE); + + pRmClient = dynamicCast(pClient, RmClient); + + if (pRmClient == NULL) + return NV_ERR_INVALID_STATE; + + if (DRF_VAL(0100_CTRL, _CLIENT, _LOCK_STRESS_COUNTER_INCREMENT, action) != 0) + pRmClient->lockStressCounter++; + else + pRmClient->lockStressCounter--; + } + + if (DRF_VAL(0100_CTRL, _INTERNAL_CLIENT, _LOCK_STRESS_COUNTER_ACTION, action) != 0) + { + OBJGPU *pGpu; + RmClient *pRmInternalClient; + + NV_ASSERT_OR_RETURN(rmGpuLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + pGpu = GPU_RES_GET_GPU(pResource); + pRmInternalClient = serverutilGetClientUnderLock(pGpu->hInternalLockStressClient); + + if (pRmInternalClient == NULL) + return NV_ERR_INVALID_STATE; + + if (DRF_VAL(0100_CTRL, _INTERNAL_CLIENT, _LOCK_STRESS_COUNTER_INCREMENT, action) + != 0) + { + pRmInternalClient->lockStressCounter++; + } + else + pRmInternalClient->lockStressCounter--; + } + + return NV_OK; +} + +NV_STATUS +lockStressObjCtrlCmdPerformLockStressAllRmLocks_IMPL +( + LockStressObject *pResource, + NV0100_CTRL_PERFORM_LOCK_STRESS_ALL_RM_LOCKS_PARAMS *pParams +) +{ + NvU8 rand; + + // Perform random increments/decrements but report what we did back to caller + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, osGetRandomBytes(&rand, 1)); + + // This API has all locks so we can increment/decrement all counters + pParams->action = + (rand & DRF_SHIFTMASK(NV0100_CTRL_ALL_LOCK_STRESS_COUNTER_INCREMENT)) | + DRF_SHIFTMASK(NV0100_CTRL_GLOBAL_RMAPI_LOCK_STRESS_COUNTER_ACTION) | + DRF_SHIFTMASK(NV0100_CTRL_GPU_LOCK_STRESS_COUNTER_ACTION) | + DRF_SHIFTMASK(NV0100_CTRL_CLIENT_LOCK_STRESS_COUNTER_ACTION) | + DRF_SHIFTMASK(NV0100_CTRL_INTERNAL_CLIENT_LOCK_STRESS_COUNTER_ACTION); + + return updateLockStressCounters(pResource, pParams->action); +} + +NV_STATUS +lockStressObjCtrlCmdPerformLockStressNoGpusLock_IMPL +( + LockStressObject *pResource, + NV0100_CTRL_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_PARAMS *pParams +) +{ + NvU8 rand; + + // Perform random increments/decrements but report what we did back to caller + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, osGetRandomBytes(&rand, 1)); + + // + // This API has all locks except the GPU lock so we can increment/decrement just + // the global counter and the per client counter. + // + // Internal clients can't be accessed without either the GPU lock or acquiring the + // internal client's lock (requires dual client locking). Until then, just assume + // we can only modify the global counter and the per client counter. + // + pParams->action = + (rand & DRF_SHIFTMASK(NV0100_CTRL_ALL_LOCK_STRESS_COUNTER_INCREMENT)) | + DRF_SHIFTMASK(NV0100_CTRL_GLOBAL_RMAPI_LOCK_STRESS_COUNTER_ACTION) | + DRF_SHIFTMASK(NV0100_CTRL_CLIENT_LOCK_STRESS_COUNTER_ACTION); + + return updateLockStressCounters(pResource, pParams->action); +} + +NV_STATUS +lockStressObjCtrlCmdPerformLockStressApiLockReadMode_IMPL +( + LockStressObject *pResource, + NV0100_CTRL_PERFORM_LOCK_STRESS_API_LOCK_READ_MODE_PARAMS *pParams +) +{ + NvU8 rand; + + // Perform random increments/decrements but report what we did back to caller + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, osGetRandomBytes(&rand, 1)); + + // + // This API takes the API lock in read mode so we can increment/decrement just the + // per GPU counter and the per client counter. + // + pParams->action = + (rand & DRF_SHIFTMASK(NV0100_CTRL_ALL_LOCK_STRESS_COUNTER_INCREMENT)) | + DRF_SHIFTMASK(NV0100_CTRL_GPU_LOCK_STRESS_COUNTER_ACTION) | + DRF_SHIFTMASK(NV0100_CTRL_CLIENT_LOCK_STRESS_COUNTER_ACTION) | + DRF_SHIFTMASK(NV0100_CTRL_INTERNAL_CLIENT_LOCK_STRESS_COUNTER_ACTION); + + return updateLockStressCounters(pResource, pParams->action); +} + +NV_STATUS +lockStressObjCtrlCmdPerformLockStressNoGpusLockApiLockReadMode_IMPL +( + LockStressObject *pResource, + NV0100_CTRL_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_API_LOCK_READ_MODE_PARAMS *pParams +) +{ + NvU8 rand; + + // Perform random increments/decrements but report what we did back to caller + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, osGetRandomBytes(&rand, 1)); + + // + // This API takes the API lock in read mode and no GPU lock so we can only + // increment/decrement the per client counter. + // + pParams->action = + (rand & DRF_SHIFTMASK(NV0100_CTRL_ALL_LOCK_STRESS_COUNTER_INCREMENT)) | + DRF_SHIFTMASK(NV0100_CTRL_CLIENT_LOCK_STRESS_COUNTER_ACTION); + + return updateLockStressCounters(pResource, pParams->action); +} + +static NV_STATUS +updateLockStressCountersInternal +( + LockStressObject *pResource, + RM_API *pRmApi, + NvU32 internalCmd, + NvU8 *pAction +) +{ + NV0100_CTRL_LOCK_STRESS_OUTPUT internalParams; + + // Handle the external client updates here before making the internal RM API call + if (DRF_VAL(0100_CTRL, _CLIENT, _LOCK_STRESS_COUNTER_ACTION, *pAction) != 0) + { + RsClient *pClient = RES_GET_CLIENT(pResource); + RmClient *pRmClient = dynamicCast(pClient, RmClient); + + // + // Resource Server currently doesn't attempt to lock the internal client on the + // internal RM API path if we already hold a client lock to avoid risking lock + // ordering issues so only assert if we aren't on the internal RM API path. + // + if (!serverIsClientInternal(&g_resServ, pClient->hClient)) + NV_ASSERT_OR_RETURN(serverIsClientLocked(&g_resServ, pClient->hClient), NV_ERR_INVALID_LOCK_STATE); + + if (pRmClient == NULL) + return NV_ERR_INVALID_STATE; + + if (DRF_VAL(0100_CTRL, _CLIENT, _LOCK_STRESS_COUNTER_INCREMENT, *pAction) != 0) + pRmClient->lockStressCounter++; + else + pRmClient->lockStressCounter--; + } + + // Make internal control + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, pRmApi->Control(pRmApi, pResource->hInternalClient, + pResource->hInternalLockStressObject, internalCmd, + &internalParams, sizeof(internalParams))); + + // + // Capture how the internal control updated the counters. Don't capture the per + // client counter since the internal client isn't visible to the caller anyway. + // + *pAction |= + (DRF_SHIFTMASK(NV0100_CTRL_GLOBAL_RMAPI_LOCK_STRESS_COUNTER_ACTION) | + DRF_SHIFTMASK(NV0100_CTRL_GPU_LOCK_STRESS_COUNTER_ACTION) | + DRF_SHIFTMASK(NV0100_CTRL_INTERNAL_CLIENT_LOCK_STRESS_COUNTER_ACTION) | + DRF_SHIFTMASK(NV0100_CTRL_GLOBAL_RMAPI_LOCK_STRESS_COUNTER_INCREMENT) | + DRF_SHIFTMASK(NV0100_CTRL_GPU_LOCK_STRESS_COUNTER_INCREMENT) | + DRF_SHIFTMASK(NV0100_CTRL_INTERNAL_CLIENT_LOCK_STRESS_COUNTER_INCREMENT)) + & internalParams.action; + + return NV_OK; +} + +NV_STATUS +lockStressObjCtrlCmdPerformLockStressInternalAllRmLocks_IMPL +( + LockStressObject *pResource, + NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_ALL_RM_LOCKS_PARAMS *pParams +) +{ + NvU8 rand; + NvU32 internalCmd = (FINN_LOCK_STRESS_OBJECT_LOCK_STRESS_INTERFACE_ID << 8); + + // Perform random increments/decrements but report what we did back to caller + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, osGetRandomBytes(&rand, 1)); + + // + // Let the randomly selected internal API update the counters other than the per + // client counter. + // + pParams->action = + ((rand & + (DRF_SHIFTMASK(NV0100_CTRL_CLIENT_LOCK_STRESS_COUNTER_INCREMENT))) | + (DRF_SHIFTMASK(NV0100_CTRL_CLIENT_LOCK_STRESS_COUNTER_ACTION))); + + // + // Pick a random control call to call internally between the following: + // * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_ALL_RM_LOCKS + // * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_NO_GPUS_LOCK + // * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_API_LOCK_READ_MODE + // * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_API_LOCK_READ_MODE + // + internalCmd |= ((rand & 3) + + NV0100_CTRL_PERFORM_LOCK_STRESS_ALL_RM_LOCKS_PARAMS_MESSAGE_ID); + + return updateLockStressCountersInternal(pResource, + rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL), + internalCmd, &pParams->action); +} + +NV_STATUS +lockStressObjCtrlCmdPerformLockStressInternalNoGpusLock_IMPL +( + LockStressObject *pResource, + NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_PARAMS *pParams +) +{ + NvU8 rand; + NvU32 internalCmd = (FINN_LOCK_STRESS_OBJECT_LOCK_STRESS_INTERFACE_ID << 8); + + // Perform random increments/decrements but report what we did back to caller + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, osGetRandomBytes(&rand, 1)); + + // + // Let the randomly selected internal API update the counters other than the per + // client counter. + // + pParams->action = + ((rand & + (DRF_SHIFTMASK(NV0100_CTRL_CLIENT_LOCK_STRESS_COUNTER_INCREMENT))) | + (DRF_SHIFTMASK(NV0100_CTRL_CLIENT_LOCK_STRESS_COUNTER_ACTION))); + + // + // Pick a random control call to call internally between the following: + // * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_ALL_RM_LOCKS + // * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_NO_GPUS_LOCK + // * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_API_LOCK_READ_MODE + // * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_API_LOCK_READ_MODE + // + internalCmd |= ((rand & (NVBIT(2)- 1)) + + NV0100_CTRL_PERFORM_LOCK_STRESS_ALL_RM_LOCKS_PARAMS_MESSAGE_ID); + + return updateLockStressCountersInternal(pResource, + rmapiGetInterface(RMAPI_API_LOCK_INTERNAL), + internalCmd, &pParams->action); +} + +NV_STATUS +lockStressObjCtrlCmdPerformLockStressInternalApiLockReadMode_IMPL +( + LockStressObject *pResource, + NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_API_LOCK_READ_MODE_PARAMS *pParams +) +{ + NvU8 rand; + NvU32 internalCmd = (FINN_LOCK_STRESS_OBJECT_LOCK_STRESS_INTERFACE_ID << 8); + + // Perform random increments/decrements but report what we did back to caller + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, osGetRandomBytes(&rand, 1)); + + // + // Let the randomly selected internal API update the counters other than the per + // client counter. + // + pParams->action = + ((rand & + (DRF_SHIFTMASK(NV0100_CTRL_CLIENT_LOCK_STRESS_COUNTER_INCREMENT))) | + (DRF_SHIFTMASK(NV0100_CTRL_CLIENT_LOCK_STRESS_COUNTER_ACTION))); + + // + // Pick a random control call to call internally between the following: + // * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_API_LOCK_READ_MODE + // * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_API_LOCK_READ_MODE + // We only have the RO API lock so we can't call: + // * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_ALL_RM_LOCKS + // * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_NO_GPUS_LOCK + // + internalCmd |= ((rand & NVBIT(0)) + + NV0100_CTRL_PERFORM_LOCK_STRESS_API_LOCK_READ_MODE_PARAMS_MESSAGE_ID); + + return updateLockStressCountersInternal(pResource, + rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL), + internalCmd, &pParams->action); +} + +NV_STATUS +lockStressObjCtrlCmdPerformLockStressInternalNoGpusLockApiLockReadMode_IMPL +( + LockStressObject *pResource, + NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_API_LOCK_READ_MODE_PARAMS *pParams +) +{ + NvU8 rand; + NvU32 internalCmd = (FINN_LOCK_STRESS_OBJECT_LOCK_STRESS_INTERFACE_ID << 8); + + // Perform random increments/decrements but report what we did back to caller + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, osGetRandomBytes(&rand, 1)); + + // + // Let the randomly selected internal API update the counters other than the per + // client counter. + // + pParams->action = + ((rand & + (DRF_SHIFTMASK(NV0100_CTRL_CLIENT_LOCK_STRESS_COUNTER_INCREMENT))) | + (DRF_SHIFTMASK(NV0100_CTRL_CLIENT_LOCK_STRESS_COUNTER_ACTION))); + // + // Pick a random control call to call internally between the following: + // * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_API_LOCK_READ_MODE + // * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_API_LOCK_READ_MODE + // We only have the RO API lock so we can't call: + // * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_ALL_RM_LOCKS + // * NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_NO_GPUS_LOCK + // + internalCmd |= ((rand & NVBIT(0)) + + NV0100_CTRL_PERFORM_LOCK_STRESS_API_LOCK_READ_MODE_PARAMS_MESSAGE_ID); + + return updateLockStressCountersInternal(pResource, + rmapiGetInterface(RMAPI_API_LOCK_INTERNAL), + internalCmd, &pParams->action); +} + +NV_STATUS +lockStressObjCtrlCmdGetLockStressCounters_IMPL +( + LockStressObject *pResource, + NV0100_CTRL_GET_LOCK_STRESS_COUNTERS_PARAMS *pParams +) +{ + RsClient *pClient = RES_GET_CLIENT(pResource); + RmClient *pRmClient = dynamicCast(pClient, RmClient); + RmClient *pRmInternalClient; + OBJGPU *pGpu = GPU_RES_GET_GPU(pResource); + + if (pRmClient == NULL) + return NV_ERR_INVALID_STATE; + + pRmInternalClient = serverutilGetClientUnderLock(pGpu->hInternalLockStressClient); + if (pRmInternalClient == NULL) + return NV_ERR_INVALID_STATE; + + // Fetch all lock stress counter values for user space caller + pParams->globalLockStressCounter = g_LockStressCounter; + + pParams->gpuLockStressCounter = pGpu->lockStressCounter; + pParams->clientLockStressCounter = pRmClient->lockStressCounter; + pParams->internalClientLockStressCounter = pRmInternalClient->lockStressCounter; + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/rmapi/lock_test.c b/src/nvidia/src/kernel/rmapi/lock_test.c new file mode 100644 index 0000000..a839a71 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/lock_test.c @@ -0,0 +1,103 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "core/locks.h" +#include "rmapi/client.h" +#include "rmapi/lock_test.h" +#include "rmapi/rs_utils.h" + +#include "gpu/gpu.h" + +#include "class/cl0101.h" + +#include "g_finn_rm_api.h" + +NV_STATUS +lockTestRelaxedDupObjConstruct_IMPL +( + LockTestRelaxedDupObject *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + OBJGPU *pParentGpu; + OBJSYS *pSys = SYS_GET_INSTANCE(); + RsResourceRef *pParentRef; + GpuResource *pParentGpuRes; + + // + // This is an off-by-default object since its only use is with testing. Return + // an error unless the RM test code registry key is turned on by the user. + // + if (!pSys->getProperty(pSys, PDB_PROP_SYS_ENABLE_RM_TEST_ONLY_CODE)) + return NV_ERR_TEST_ONLY_CODE_NOT_ENABLED; + + NV_ASSERT_OR_RETURN(pParams->pClient != NULL, NV_ERR_INVALID_PARAMETER); + NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(pParams->pClient, pParams->hParent, &pParentRef)); + NV_ASSERT_OR_RETURN(pParentRef != NULL, NV_ERR_INVALID_PARAMETER); + + pParentGpuRes = dynamicCast(pParentRef->pResource, GpuResource); + NV_ASSERT_OR_RETURN(pParentGpuRes != NULL, NV_ERR_INVALID_PARAMETER); + + pParentGpu = GPU_RES_GET_GPU(pParentGpuRes); + + if (RS_IS_COPY_CTOR(pParams)) + { + NvU32 gpuMask = 0; + + LockTestRelaxedDupObject *pSrcObj = dynamicCast(pParams->pSrcRef->pResource, LockTestRelaxedDupObject); + if (pSrcObj == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Invalid source object\n"); + return NV_ERR_INVALID_PARAMETER; + } + + if (pParentGpu == GPU_RES_GET_GPU(pSrcObj)) + { + NV_ASSERT_OR_RETURN( + rmGpuGroupLockIsOwner(pParentGpu->gpuInstance, GPU_LOCK_GRP_DEVICE, &gpuMask), + NV_ERR_INVALID_LOCK_STATE); + } + else + { + NV_ASSERT_OR_RETURN( + rmGpuGroupLockIsOwner(0, GPU_LOCK_GRP_ALL, &gpuMask), + NV_ERR_INVALID_LOCK_STATE); + } + + // Also verify the mask since all GPU lock is a superset of GPU device lock. + NV_ASSERT_OR_RETURN(gpuMask == rmGpuLocksGetOwnedMask(), NV_ERR_INVALID_LOCK_STATE); + } + + return NV_OK; +} + +void +lockTestRelaxedDupObjDestruct_IMPL +( + LockTestRelaxedDupObject *pResource +) +{ + return; +} diff --git a/src/nvidia/src/kernel/rmapi/mapping.c b/src/nvidia/src/kernel/rmapi/mapping.c new file mode 100644 index 0000000..e6359bf --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/mapping.c @@ -0,0 +1,535 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "core/core.h" +#include "core/locks.h" +#include "core/thread_state.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu/device/device.h" +#include "class/cl0000.h" // NV01_NULL_OBJECT + +#include "rmapi/rs_utils.h" + +#include "entry_points.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/mem_mgr/mem_mgr.h" + +NV_STATUS +serverInterMap_Prologue +( + RsServer *pServer, + RsResourceRef *pMapperRef, + RsResourceRef *pMappableRef, + RS_INTER_MAP_PARAMS *pParams, + NvU32 *pReleaseFlags +) +{ + OBJGPU *pGpu; + Device *pDevice; + Subdevice *pSubdevice; + NV_STATUS rmStatus = NV_OK; + NvU64 offset = pParams->offset; + NvU64 length = pParams->length; + NvU32 gpuMask = 0; + MEMORY_DESCRIPTOR *pSrcMemDesc = NULL; + NvHandle hBroadcastDevice; + NvBool bSubdeviceHandleProvided; + + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RsResourceRef *pDeviceRef = pCallContext->pContextRef; + RS_INTER_MAP_PRIVATE *pPrivate = pParams->pPrivate; + + NV_ASSERT_OR_RETURN(pPrivate != NULL, NV_ERR_INVALID_ARGUMENT); + + // Get pGpu, assuming user passed in either a device or subdevice handle. + pDevice = dynamicCast(pDeviceRef->pResource, Device); + if (pDevice == NULL) + { + pSubdevice = dynamicCast(pDeviceRef->pResource, Subdevice); + if (pSubdevice == NULL) + return NV_ERR_INVALID_OBJECT; + + pGpu = GPU_RES_GET_GPU(pSubdevice); + pDevice = GPU_RES_GET_DEVICE(pSubdevice); + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + + hBroadcastDevice = RES_GET_HANDLE(pSubdevice->pDevice); + bSubdeviceHandleProvided = NV_TRUE; + pPrivate->gpuMask = NVBIT(gpuGetInstance(pGpu)); + } + else + { + pGpu = GPU_RES_GET_GPU(pDevice); + GPU_RES_SET_THREAD_BC_STATE(pDevice); + + hBroadcastDevice = pParams->hDevice; + bSubdeviceHandleProvided = NV_FALSE; + pPrivate->gpuMask = gpumgrGetGpuMask(pGpu); + } + + pPrivate->pGpu = pGpu; + + API_GPU_FULL_POWER_SANITY_CHECK(pGpu, NV_TRUE, NV_FALSE); + + // Use virtual GetMemInterMapParams to get information needed for mapping from pMappableRef->pResource + RMRES_MEM_INTER_MAP_PARAMS memInterMapParams; + portMemSet(&memInterMapParams, 0, sizeof(memInterMapParams)); + + memInterMapParams.pGpu = pGpu; + memInterMapParams.pMemoryRef = pMappableRef; + memInterMapParams.bSubdeviceHandleProvided = bSubdeviceHandleProvided; + + rmStatus = rmresGetMemInterMapParams(dynamicCast(pMappableRef->pResource, RmResource), &memInterMapParams); + if (rmStatus != NV_OK) + return rmStatus; + + pSrcMemDesc = memInterMapParams.pSrcMemDesc; + NV_ASSERT_OR_RETURN(pSrcMemDesc != NULL, NV_ERR_INVALID_OBJECT_HANDLE); + + if (memInterMapParams.pGpu != NULL) + { + gpuMask |= gpumgrGetGpuMask(memInterMapParams.pGpu); + } + if (memInterMapParams.pSrcGpu != NULL) + { + gpuMask |= gpumgrGetGpuMask(memInterMapParams.pSrcGpu); + } + + rmStatus = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pParams->pLockInfo, pReleaseFlags, gpuMask); + if (rmStatus != NV_OK) + return rmStatus; + + pPrivate->pSrcGpu = memInterMapParams.pSrcGpu; + pPrivate->hMemoryDevice = memInterMapParams.hMemoryDevice; + pPrivate->bFlaMapping = memInterMapParams.bFlaMapping; + + // Check length for overflow and against the physical memory size. + if (((offset + length) < offset) || + ((offset + length) > pSrcMemDesc->Size)) + { + NV_PRINTF(LEVEL_NOTICE, + "Mapping offset 0x%llX or length 0x%llX out of bounds!\n", + offset, length); + return NV_ERR_INVALID_LIMIT; + } + + if (memdescGetFlag(memdescGetMemDescFromGpu(pSrcMemDesc, pGpu), MEMDESC_FLAGS_DEVICE_READ_ONLY) && + !FLD_TEST_DRF(OS46, _FLAGS, _ACCESS, _READ_ONLY, pParams->flags)) + { + NV_PRINTF(LEVEL_NOTICE, "Attempting to map READ_ONLY surface as READ_WRITE / WRITE_ONLY!\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + pPrivate->hBroadcastDevice = hBroadcastDevice; + pPrivate->pSrcMemDesc = pSrcMemDesc; + pPrivate->bSubdeviceHandleProvided = bSubdeviceHandleProvided; + + return NV_OK; +} + +void +serverInterMap_Epilogue +( + RsServer *pServer, + RS_INTER_MAP_PARAMS *pParams, + NvU32 *pReleaseFlags +) +{ + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pParams->pLockInfo, pReleaseFlags); +} + +NV_STATUS +serverInterUnmap_Prologue +( + RsServer *pServer, + RS_INTER_UNMAP_PARAMS *pParams +) +{ + OBJGPU *pGpu = NULL; + Device *pDevice = NULL; + Subdevice *pSubdevice = NULL; + + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RsResourceRef *pDeviceRef = pCallContext->pContextRef; + + RS_INTER_UNMAP_PRIVATE *pPrivate = pParams->pPrivate; + + // Alloc pPrivate if not set, Unmap does not require any input into Prologue + if (pPrivate == NULL) + { + pPrivate = portMemAllocNonPaged(sizeof(*pPrivate)); + if (pPrivate == NULL) + return NV_ERR_NO_MEMORY; + + portMemSet(pPrivate, 0, sizeof(*pPrivate)); + pParams->pPrivate = pPrivate; + pPrivate->bAllocated = NV_TRUE; + } + + // Set subdevice or device context. + pDevice = dynamicCast(pDeviceRef->pResource, Device); + if (pDevice == NULL) + { + pSubdevice = dynamicCast(pDeviceRef->pResource, Subdevice); + if (pSubdevice == NULL) + return NV_ERR_INVALID_OBJECT; + + pGpu = GPU_RES_GET_GPU(pSubdevice); + pPrivate->bcState = gpumgrGetBcEnabledStatus(pGpu); + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + pPrivate->hBroadcastDevice = RES_GET_HANDLE(pSubdevice->pDevice); + pPrivate->bSubdeviceHandleProvided = NV_TRUE; + pPrivate->gpuMask = NVBIT(gpuGetInstance(pGpu)); + } + else + { + pGpu = GPU_RES_GET_GPU(pDevice); + pPrivate->bcState = gpumgrGetBcEnabledStatus(pGpu); + GPU_RES_SET_THREAD_BC_STATE(pDevice); + pPrivate->hBroadcastDevice = RES_GET_HANDLE(pDevice); + pPrivate->bSubdeviceHandleProvided = NV_FALSE; + pPrivate->gpuMask = gpumgrGetGpuMask(pGpu); + } + + pPrivate->pGpu = pGpu; + + API_GPU_FULL_POWER_SANITY_CHECK(pGpu, NV_FALSE, NV_FALSE); + + return NV_OK; +} + +void +serverInterUnmap_Epilogue +( + RsServer *pServer, + RS_INTER_UNMAP_PARAMS *pParams +) +{ + RS_INTER_UNMAP_PRIVATE *pPrivate = pParams->pPrivate; + OBJGPU *pGpu; + + if (pPrivate == NULL) + return; + + pGpu = pPrivate->pGpu; + + if (pGpu != NULL) + { + gpumgrSetBcEnabledStatus(pGpu, pPrivate->bcState); + } + + if (pPrivate->bAllocated) + { + portMemFree(pPrivate); + pParams->pPrivate = NULL; + } +} + +static NV_STATUS +_rmapiRmUnmapMemoryDma +( + NVOS47_PARAMETERS *pParms, + RS_LOCK_INFO *pLockInfo, + API_SECURITY_INFO *pSecInfo +) +{ + RsClient *pRsClient = NULL; + + RS_INTER_UNMAP_PARAMS params; + RS_INTER_UNMAP_PRIVATE private; + + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, pParms->hClient, &pRsClient)); + + portMemSet(¶ms, 0, sizeof(params)); + params.hClient = pParms->hClient; + params.hMapper = pParms->hDma; + params.hDevice = pParms->hDevice; + params.flags = pParms->flags; + params.dmaOffset = pParms->dmaOffset; + params.pLockInfo = pLockInfo; + params.pSecInfo = pSecInfo; + + params.size = pParms->size; + + portMemSet(&private, 0, sizeof(private)); + params.pPrivate = &private; + + return serverInterUnmap(&g_resServ, ¶ms); +} + +NV_STATUS +rmapiMap +( + RM_API *pRmApi, + NVOS46_PARAMETERS *pParms +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->MapWithSecInfo(pRmApi, pParms, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiMapWithSecInfo +( + RM_API *pRmApi, + NVOS46_PARAMETERS *pParms, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + RM_API_CONTEXT rmApiContext = {0}; + RS_INTER_MAP_PARAMS params; + RS_INTER_MAP_PRIVATE private; + RS_LOCK_INFO lockInfo; + + NV_PRINTF(LEVEL_INFO, + "Nv04Map: client:0x%x device:0x%x context:0x%x memory:0x%x flags:0x%x flags2:0x%x\n", + pParms->hClient, pParms->hDevice, pParms->hDma, pParms->hMemory, pParms->flags, pParms->flags2); + NV_PRINTF(LEVEL_INFO, + "Nv04Map: offset:0x%llx length:0x%llx dmaOffset:0x%08llx\n", + pParms->offset, pParms->length, pParms->dmaOffset); + + NV_PRINTF(LEVEL_INFO, "MMU_PROFILER Nv04Map 0x%x\n", pParms->flags); + + status = rmapiPrologue(pRmApi, &rmApiContext); + if (status != NV_OK) + return status; + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + status = rmapiInitLockInfo(pRmApi, pParms->hClient, NV01_NULL_OBJECT, &lockInfo); + if (status != NV_OK) + { + rmapiEpilogue(pRmApi, &rmApiContext); + return status; + } + + lockInfo.flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK | + RM_LOCK_FLAGS_NO_GPUS_LOCK; + + // + // In the RTD3 case, the API lock isn't taken since it can be initiated + // from another thread that holds the API lock and because we now hold + // the GPU lock. + // + if (rmapiInRtd3PmPath()) + { + lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK; + lockInfo.state &= ~RM_LOCK_STATES_API_LOCK_ACQUIRED; + } + + LOCK_METER_DATA(MAPMEM_DMA, flags, 0, 0); + + + portMemSet(¶ms, 0, sizeof(params)); + params.hClient = pParms->hClient; + params.hMapper = pParms->hDma; + params.hDevice = pParms->hDevice; + params.hMappable = pParms->hMemory; + params.offset = pParms->offset; + params.length = pParms->length; + params.flags = pParms->flags; + params.flags2 = pParms->flags2; + params.kindOverride = pParms->kindOverride; + params.dmaOffset = pParms->dmaOffset; + params.pLockInfo = &lockInfo; + params.pSecInfo = pSecInfo; + + portMemSet(&private, 0, sizeof(private)); + params.pPrivate = &private; + + // map DMA memory + status = serverInterMap(&g_resServ, ¶ms); + + pParms->dmaOffset = params.dmaOffset; + + rmapiEpilogue(pRmApi, &rmApiContext); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Nv04Map: map complete\n"); + NV_PRINTF(LEVEL_INFO, "Nv04Map: dmaOffset: 0x%08llx\n", pParms->dmaOffset); + } + else + { + NV_PRINTF(LEVEL_INFO, "Nv04Map: map failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + } + + return status; +} + +NV_STATUS +rmapiMapWithSecInfoTls +( + RM_API *pRmApi, + NVOS46_PARAMETERS *pParms, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiMapWithSecInfo(pRmApi, pParms, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NV_STATUS +rmapiUnmap +( + RM_API *pRmApi, + NVOS47_PARAMETERS *pParms +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->UnmapWithSecInfo(pRmApi, pParms, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiUnmapWithSecInfo +( + RM_API *pRmApi, + NVOS47_PARAMETERS *pParms, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + RM_API_CONTEXT rmApiContext = {0}; + RS_LOCK_INFO lockInfo; + + NV_PRINTF(LEVEL_INFO, + "Nv04Unmap: client:0x%x device:0x%x context:0x%x\n", + pParms->hClient, pParms->hDevice, pParms->hDma); + NV_PRINTF(LEVEL_INFO, "Nv04Unmap: flags:0x%x dmaOffset:0x%08llx size:0x%llx\n", + pParms->flags, pParms->dmaOffset, pParms->size); + + status = rmapiPrologue(pRmApi, &rmApiContext); + if (status != NV_OK) + return status; + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + status = rmapiInitLockInfo(pRmApi, pParms->hClient, NV01_NULL_OBJECT, &lockInfo); + if (status != NV_OK) + { + rmapiEpilogue(pRmApi, &rmApiContext); + return status; + } + lockInfo.flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK | + RM_LOCK_FLAGS_NO_GPUS_LOCK; + + LOCK_METER_DATA(UNMAPMEM_DMA, pParms->flags, 0, 0); + + // Unmap DMA memory + status = _rmapiRmUnmapMemoryDma(pParms, &lockInfo, pSecInfo); + + rmapiEpilogue(pRmApi, &rmApiContext); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Nv04Unmap: Unmap complete\n"); + } + else + { + NV_PRINTF(LEVEL_INFO, + "Nv04Unmap: ummap failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + } + + return status; +} + +NV_STATUS +rmapiUnmapWithSecInfoTls +( + RM_API *pRmApi, + NVOS47_PARAMETERS *pParms, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiUnmapWithSecInfo(pRmApi, pParms, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NV_STATUS +serverInterMapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_INTER_MAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +serverInterUnmapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_INTER_UNMAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +serverUpdateLockFlagsForInterAutoUnmap +( + RsServer *pServer, + RS_INTER_UNMAP_PARAMS *pParams +) +{ + pParams->pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK | + RM_LOCK_FLAGS_GPU_GROUP_LOCK; + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/rmapi/mapping_cpu.c b/src/nvidia/src/kernel/rmapi/mapping_cpu.c new file mode 100644 index 0000000..b093ff3 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/mapping_cpu.c @@ -0,0 +1,986 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "core/locks.h" +#include "core/thread_state.h" +#include "os/os.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/generic_engine.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu/mem_mgr/mem_mgr.h" + +#include "class/cl0000.h" // NV01_NULL_OBJECT + +#include "resserv/rs_server.h" +#include "resserv/rs_client.h" +#include "resserv/rs_resource.h" + +#include "rmapi/rs_utils.h" +#include "rmapi/mapping_list.h" +#include "entry_points.h" + +typedef struct RS_CPU_MAP_PARAMS RmMapParams; +typedef struct RS_CPU_UNMAP_PARAMS RmUnmapParams; + +NV_STATUS +rmapiMapGpuCommon +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RsCpuMapping *pCpuMapping, + OBJGPU *pGpu, + NvU32 regionOffset, + NvU32 regionSize +) +{ + NV_STATUS rmStatus; + RmClient *pClient = dynamicCast(pCallContext->pClient, RmClient); + NvU64 offset; + + // Validate the offset and limit passed in. + if (pCpuMapping->offset >= regionSize) + return NV_ERR_INVALID_BASE; + if (pCpuMapping->length == 0) + return NV_ERR_INVALID_LIMIT; + if ((pCpuMapping->offset + pCpuMapping->length > regionSize) || + !portSafeAddU64(pCpuMapping->offset, pCpuMapping->length, &offset)) + return NV_ERR_INVALID_LIMIT; + + if (!portSafeAddU64((NvU64)regionOffset, pCpuMapping->offset, &offset)) + return NV_ERR_INVALID_OFFSET; + + // Create a mapping of BAR0 + rmStatus = osMapGPU(pGpu, + rmclientGetCachedPrivilege(pClient), + offset, + pCpuMapping->length, + pCpuMapping->pPrivate->protect, + &pCpuMapping->pLinearAddress, + &pCpuMapping->pPrivate->pPriv); + return rmStatus; +} + + + +NV_STATUS +rmapiGetEffectiveAddrSpace +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 mapFlags, + NV_ADDRESS_SPACE *pAddrSpace +) +{ + // FIXME: Stub for openrm-orin, remove when openrm is merged + if (pAddrSpace != NULL) + *pAddrSpace = memdescGetAddressSpace(pMemDesc); + + return NV_OK; +} + +// Asserts to check caching type matches across sdk and nv_memory_types +ct_assert(NVOS33_FLAGS_CACHING_TYPE_CACHED == NV_MEMORY_CACHED); +ct_assert(NVOS33_FLAGS_CACHING_TYPE_UNCACHED == NV_MEMORY_UNCACHED); +ct_assert(NVOS33_FLAGS_CACHING_TYPE_WRITECOMBINED == NV_MEMORY_WRITECOMBINED); +ct_assert(NVOS33_FLAGS_CACHING_TYPE_WRITEBACK == NV_MEMORY_WRITEBACK); +ct_assert(NVOS33_FLAGS_CACHING_TYPE_DEFAULT == NV_MEMORY_DEFAULT); +ct_assert(NVOS33_FLAGS_CACHING_TYPE_UNCACHED_WEAK == NV_MEMORY_UNCACHED_WEAK); + +// +// Map memory entry points. +// +NV_STATUS +memMap_IMPL +( + Memory *pMemory, + CALL_CONTEXT *pCallContext, + RS_CPU_MAP_PARAMS *pMapParams, + RsCpuMapping *pCpuMapping +) +{ + OBJGPU *pGpu = NULL; + RmClient *pClient; + RsResourceRef *pContextRef; + RsResourceRef *pMemoryRef; + Memory *pMemoryInfo; // TODO: rename this field. pMemoryInfo is the legacy name. + // Name should be clear on how pMemoryInfo different from pMemory + MEMORY_DESCRIPTOR *pMemDesc; + NvP64 priv = NvP64_NULL; + NV_STATUS rmStatus = NV_OK; + NV_ADDRESS_SPACE effectiveAddrSpace; + NvBool bBroadcast; + NvU64 mapLimit; + NvBool bIsSysmem = NV_FALSE; + NvBool bSkipSizeCheck = (DRF_VAL(OS33, _FLAGS, _SKIP_SIZE_CHECK, pMapParams->flags) == + NVOS33_FLAGS_SKIP_SIZE_CHECK_ENABLE); + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + + NV_ASSERT_OR_RETURN(pMapParams->pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + pContextRef = pMapParams->pLockInfo->pContextRef; + if (pContextRef != NULL) + { + NV_ASSERT_OK_OR_RETURN(gpuGetByRef(pContextRef, &bBroadcast, &pGpu)); + gpuSetThreadBcState(pGpu, bBroadcast); + + } + + pClient = serverutilGetClientUnderLock(pMapParams->hClient); + NV_ASSERT_OR_ELSE(pClient != NULL, return NV_ERR_INVALID_CLIENT); + NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(staticCast(pClient, RsClient), + pMapParams->hMemory, &pMemoryRef)); + + pMemoryInfo = dynamicCast(pMemoryRef->pResource, Memory); + NV_ASSERT_OR_RETURN(pMemoryInfo != NULL, NV_ERR_NOT_SUPPORTED); + pMemDesc = pMemoryInfo->pMemDesc; + + if (!pMapParams->bKernel && + FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_ONLY, pMemoryInfo->Attr2) && + (pMapParams->protect != NV_PROTECT_READABLE)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Validate the offset and limit passed in. + if (pMapParams->offset >= pMemoryInfo->Length) + { + return NV_ERR_INVALID_BASE; + } + if (pMapParams->length == 0) + { + return NV_ERR_INVALID_LIMIT; + } + + if (bSkipSizeCheck && (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + // + // See bug #140807 and #150889 - we need to pad memory mappings to past their + // actual allocation size (to PAGE_SIZE+1) because of a buggy ms function so + // skip the allocation size sanity check so the map operation still succeeds. + // + if (!portSafeAddU64(pMapParams->offset, pMapParams->length, &mapLimit) || + (!bSkipSizeCheck && (mapLimit > pMemoryInfo->Length))) + { + return NV_ERR_INVALID_LIMIT; + } + + if (pGpu != NULL) + { + NV_ASSERT_OK_OR_RETURN(rmapiGetEffectiveAddrSpace(pGpu, memdescGetMemDescFromGpu(pMemDesc, pGpu), pMapParams->flags, &effectiveAddrSpace)); + } + else + { + effectiveAddrSpace = ADDR_SYSMEM; + } + + bIsSysmem = (effectiveAddrSpace == ADDR_SYSMEM) || (effectiveAddrSpace == ADDR_EGM); + + if (bIsSysmem) + { + // A client can specify not to map memory by default when + // calling into RmAllocMemory. In those cases, we don't have + // a mapping yet, so go ahead and map it for the client now. + rmStatus = memdescMap(pMemDesc, + pMapParams->offset, + pMapParams->length, + pMapParams->bKernel, + pMapParams->protect, + pMapParams->ppCpuVirtAddr, + &priv); + + // Associate this mapping with the client + if (rmStatus == NV_OK && *(pMapParams->ppCpuVirtAddr)) + { + pMapParams->flags = FLD_SET_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pMapParams->flags); + rmStatus = CliUpdateMemoryMappingInfo(pCpuMapping, + pMapParams->bKernel, + *(pMapParams->ppCpuVirtAddr), + priv, + pMapParams->length, + pMapParams->flags); + pCpuMapping->pPrivate->pGpu = pGpu; + } + } + else if (effectiveAddrSpace == ADDR_VIRTUAL) + { + rmStatus = NV_ERR_NOT_SUPPORTED; + } + else if (effectiveAddrSpace == ADDR_REGMEM) + { + RS_PRIV_LEVEL privLevel; + + privLevel = rmclientGetCachedPrivilege(pClient); + if (!rmclientIsAdmin(pClient, privLevel) && + !memdescGetFlag(pMemDesc, MEMDESC_FLAGS_SKIP_REGMEM_PRIV_CHECK)) + { + return NV_ERR_PROTECTION_FAULT; + } + + if (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, pMapParams->flags) == NVOS33_FLAGS_MEM_SPACE_USER) + { + privLevel = RS_PRIV_LEVEL_USER; + } + + // Create a mapping of BAR0 + rmStatus = osMapGPU(pGpu, + privLevel, + pMapParams->offset + pMemDesc-> _pteArray[0], + pMapParams->length, + pMapParams->protect, + pMapParams->ppCpuVirtAddr, + &priv); + if (rmStatus != NV_OK) + return rmStatus; + + // Save off the mapping + rmStatus = CliUpdateDeviceMemoryMapping(pCpuMapping, + pMapParams->bKernel, + priv, + *(pMapParams->ppCpuVirtAddr), + pMapParams->length, + 0, // gpu virtual addr + 0, // gpu map length + pMapParams->flags); + pCpuMapping->pPrivate->pGpu = pGpu; + + if (rmStatus != NV_OK) + { + osUnmapGPU(pGpu->pOsGpuInfo, + privLevel, + *(pMapParams->ppCpuVirtAddr), + pMapParams->length, + priv); + return rmStatus; + } + } + else + { + return NV_ERR_INVALID_CLASS; + } + + if (rmStatus == NV_OK) + { + NV_PRINTF(LEVEL_INFO, + "%s created. CPU Virtual Address: " NvP64_fmt "\n", + FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pMapParams->flags) ? "Direct mapping" : "Mapping", + *(pMapParams->ppCpuVirtAddr)); + } + + return rmStatus; +} + +NV_STATUS +memUnmap_IMPL +( + Memory *pMemory, + CALL_CONTEXT *pCallContext, + RsCpuMapping *pCpuMapping +) +{ + RmClient *pClient = dynamicCast(pCallContext->pClient, RmClient); + OBJGPU *pGpu = pCpuMapping->pPrivate->pGpu; + MEMORY_DESCRIPTOR *pMemDesc = pMemory->pMemDesc; + + if (FLD_TEST_DRF(OS33, _FLAGS, _OS_DESCRIPTOR, _ENABLE, pCpuMapping->flags)) + { + // Nothing more to do + } + // System Memory case + else if ((pGpu == NULL) || (((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) || + (memdescGetAddressSpace(pMemDesc) == ADDR_EGM)) && + FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pCpuMapping->flags))) + { + if (FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pCpuMapping->flags)) + { + memdescUnmap(pMemDesc, + pCpuMapping->pPrivate->bKernel, + pCpuMapping->pLinearAddress, + pCpuMapping->pPrivate->pPriv); + } + } + else if (memdescGetAddressSpace(pMemDesc) == ADDR_VIRTUAL) + { + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_STATE); + } + else if (memdescGetAddressSpace(pMemDesc) == ADDR_REGMEM) + { + osUnmapGPU(pGpu->pOsGpuInfo, + rmclientGetCachedPrivilege(pClient), + pCpuMapping->pLinearAddress, + pCpuMapping->length, + pCpuMapping->pPrivate->pPriv); + } + return NV_OK; +} + +NV_STATUS +rmapiValidateKernelMapping +( + RS_PRIV_LEVEL privLevel, + NvU32 flags, + NvBool *pbKernel +) +{ + NvBool bKernel; + NV_STATUS status = NV_OK; + if (privLevel < RS_PRIV_LEVEL_KERNEL) + { + // only kernel clients should be specifying the user mapping flags + if (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, flags) == NVOS33_FLAGS_MEM_SPACE_USER) + status = NV_ERR_INVALID_FLAGS; + bKernel = NV_FALSE; + } + else + { + // + // Kernel clients can only use the persistent flag if they are + // doing a user mapping. + // + bKernel = (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, flags) == NVOS33_FLAGS_MEM_SPACE_CLIENT); + } + + // OS descriptor will already be mapped + if (FLD_TEST_DRF(OS33, _FLAGS, _OS_DESCRIPTOR, _ENABLE, flags)) + status = NV_ERR_INVALID_FLAGS; + + if (pbKernel != NULL) + *pbKernel = bKernel; + + return status; +} + +NV_STATUS +serverMap_Prologue +( + RsServer *pServer, RS_CPU_MAP_PARAMS *pMapParams +) +{ + NV_STATUS rmStatus; + RmClient *pClient; + RsResourceRef *pMemoryRef; + NvHandle hClient = pMapParams->hClient; + NvHandle hParent = hClient; + NvHandle hSubDevice = NV01_NULL_OBJECT; + NvBool bClientAlloc = (hClient == pMapParams->hDevice); + NvU32 flags = pMapParams->flags; + RS_PRIV_LEVEL privLevel; + + // Persistent sysmem mapping support is no longer supported + if (DRF_VAL(OS33, _FLAGS, _PERSISTENT, flags) == NVOS33_FLAGS_PERSISTENT_ENABLE) + return NV_ERR_INVALID_FLAGS; + + // Populate Resource Server information + pClient = serverutilGetClientUnderLock(hClient); + NV_ASSERT_OR_ELSE(pClient != NULL, return NV_ERR_INVALID_CLIENT); + + // Validate hClient + privLevel = rmclientGetCachedPrivilege(pClient); + + // RS-TODO: Assert if this fails after all objects are converted + NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(staticCast(pClient, RsClient), + pMapParams->hMemory, &pMemoryRef)); + + if (pMemoryRef->pParentRef != NULL) + hParent = pMemoryRef->pParentRef->hResource; + + // check if we have a user or kernel RM client + rmStatus = rmapiValidateKernelMapping(privLevel, flags, &pMapParams->bKernel); + if (rmStatus != NV_OK) + return rmStatus; + + // + // First check to see if it is a standard device or the BC region of + // a MC adapter. + // + pMapParams->pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK; + if (!bClientAlloc) + { + NV_ASSERT_OR_RETURN(hParent != hClient, NV_ERR_INVALID_OBJECT_PARENT); + + RsResourceRef *pContextRef; + rmStatus = clientGetResourceRef(staticCast(pClient, RsClient), + pMapParams->hDevice, &pContextRef); + + if (rmStatus != NV_OK) + return rmStatus; + + if (pContextRef->internalClassId == classId(Device)) + { + } + else if (pContextRef->internalClassId == classId(Subdevice)) + { + hSubDevice = pMapParams->hDevice; + pMapParams->hDevice = pContextRef->pParentRef->hResource; + } + else + { + return NV_ERR_INVALID_OBJECT_PARENT; + } + + pMapParams->pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK; + pMapParams->pLockInfo->pContextRef = pContextRef; + } + else + { + NV_ASSERT_OR_RETURN(hParent == hClient, NV_ERR_INVALID_OBJECT_PARENT); + } + + pMapParams->hContext = (hSubDevice != NV01_NULL_OBJECT) + ? hSubDevice + : pMapParams->hDevice; + + + // convert from OS33 flags to RM's memory protection flags + switch (DRF_VAL(OS33, _FLAGS, _ACCESS, flags)) + { + case NVOS33_FLAGS_ACCESS_READ_WRITE: + pMapParams->protect = NV_PROTECT_READ_WRITE; + break; + case NVOS33_FLAGS_ACCESS_READ_ONLY: + pMapParams->protect = NV_PROTECT_READABLE; + break; + case NVOS33_FLAGS_ACCESS_WRITE_ONLY: + pMapParams->protect = NV_PROTECT_WRITEABLE; + break; + default: + return NV_ERR_INVALID_FLAGS; + } + + return NV_OK; +} + +NV_STATUS +serverUnmap_Prologue +( + RsServer *pServer, + RS_CPU_UNMAP_PARAMS *pUnmapParams +) +{ + OBJGPU *pGpu = NULL; + NV_STATUS rmStatus; + RmClient *pClient; + RsResourceRef *pMemoryRef; + NvHandle hClient = pUnmapParams->hClient; + NvHandle hParent = hClient; + NvHandle hMemory = pUnmapParams->hMemory; + NvBool bClientAlloc = (pUnmapParams->hDevice == pUnmapParams->hClient); + NvBool bKernel; + NvBool bBroadcast; + NvU32 ProcessId = pUnmapParams->processId; + RS_PRIV_LEVEL privLevel; + void *pProcessHandle = NULL; + + // Populate Resource Server information + pClient = serverutilGetClientUnderLock(hClient); + NV_ASSERT_OR_ELSE(pClient != NULL, return NV_ERR_INVALID_CLIENT); + + // check if we have a user or kernel RM client + privLevel = rmclientGetCachedPrivilege(pClient); + + // RS-TODO: Assert if this fails after all objects are converted + NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(staticCast(pClient, RsClient), + hMemory, &pMemoryRef)); + + if (pMemoryRef->pParentRef != NULL) + hParent = pMemoryRef->pParentRef->hResource; + + // + // First check to see if it is a standard device or the BC region of + // a MC adapter. + // + pUnmapParams->pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK; + if (!bClientAlloc) + { + NV_ASSERT_OR_RETURN(hParent != hClient, NV_ERR_INVALID_OBJECT_PARENT); + + RsResourceRef *pContextRef; + rmStatus = clientGetResourceRef(staticCast(pClient, RsClient), + pUnmapParams->hDevice, &pContextRef); + + if (rmStatus != NV_OK) + return rmStatus; + + if (pContextRef->internalClassId == classId(Subdevice)) + { + pUnmapParams->hDevice = pContextRef->pParentRef->hResource; + } + else if (pContextRef->internalClassId != classId(Device)) + { + return NV_ERR_INVALID_OBJECT_PARENT; + } + + pUnmapParams->pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK; + pUnmapParams->pLockInfo->pContextRef = pContextRef; + NV_ASSERT_OK_OR_RETURN(gpuGetByRef(pUnmapParams->pLockInfo->pContextRef, &bBroadcast, &pGpu)); + gpuSetThreadBcState(pGpu, bBroadcast); + } + else + { + NV_ASSERT_OR_RETURN(hParent == hClient, NV_ERR_INVALID_OBJECT_PARENT); + } + + // Decide what sort of mapping it is, user or kernel + if (privLevel < RS_PRIV_LEVEL_KERNEL) + { + bKernel = NV_FALSE; + } + else + { + bKernel = (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, pUnmapParams->flags) == NVOS33_FLAGS_MEM_SPACE_CLIENT); + } + + // + // If it's a user mapping, and we're not currently in the same process that + // it's mapped into, then attempt to attach to the other process first. + // + if (!bKernel && (ProcessId != osGetCurrentProcess())) + { + rmStatus = osAttachToProcess(&pProcessHandle, ProcessId); + if (rmStatus != NV_OK) + { + if (pUnmapParams->bTeardown) + pProcessHandle = NULL; + else + return rmStatus; + } + + pUnmapParams->pProcessHandle = pProcessHandle; + } + + // Don't do any filtering if this is a tear-down path + if (pUnmapParams->bTeardown) + { + pUnmapParams->fnFilter = NULL; + return NV_OK; + } + + + pUnmapParams->fnFilter = bKernel + ? serverutilMappingFilterKernel + : serverutilMappingFilterCurrentUserProc; + + return NV_OK; +} + +void +serverUnmap_Epilogue +( + RsServer *pServer, + RS_CPU_UNMAP_PARAMS *pUnmapParams +) +{ + // do we need to detach? + if (pUnmapParams->pProcessHandle != NULL) + { + osDetachFromProcess(pUnmapParams->pProcessHandle); + pUnmapParams->pProcessHandle = NULL; + } +} + +NV_STATUS +rmapiMapToCpu +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + void **ppCpuVirtAddr, + NvU32 flags +) +{ + NvP64 pCpuVirtAddrNvP64 = NvP64_NULL; + NV_STATUS status; + + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + status = pRmApi->MapToCpuWithSecInfo(pRmApi, hClient, hDevice, hMemory, offset, length, + &pCpuVirtAddrNvP64, flags, &pRmApi->defaultSecInfo); + + if (ppCpuVirtAddr) + *ppCpuVirtAddr = NvP64_VALUE(pCpuVirtAddrNvP64); + + return status; +} + +/** + * Call into Resource Server to register and execute a CPU mapping operation. + * + * Resource Server will: + * 1. Callback into RM (serverMap_Prologue) to set up mapping parameters, mapping context object, + * and locking requirements + * 2. Take locks (if required) + * 3. Allocate and register a RsCpuMapping book-keeping entry on the target object's RsResourceRef + * 4. Call the target object's mapping virtual function (xxxMap_IMPL, defined in RM) + * 5. Setup back-references to the mapping context object (if required.) This mapping will automatically + * be unmapped if either the target object or mapping context object are freed. + * 6. Release any locks taken + */ +NV_STATUS +rmapiMapToCpuWithSecInfoV2 +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvP64 *ppCpuVirtAddr, + NvU32 *flags, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + RM_API_CONTEXT rmApiContext = {0}; + RmMapParams rmMapParams; + RS_LOCK_INFO lockInfo; + + NV_PRINTF(LEVEL_INFO, + "Nv04MapMemory: client:0x%x device:0x%x memory:0x%x\n", hClient, + hDevice, hMemory); + NV_PRINTF(LEVEL_INFO, + "Nv04MapMemory: offset: %llx length: %llx flags:0x%x\n", + offset, length, *flags); + + status = rmapiPrologue(pRmApi, &rmApiContext); + if (status != NV_OK) + return status; + + NV_PRINTF(LEVEL_INFO, "MMU_PROFILER Nv04MapMemory 0x%x\n", *flags); + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + status = rmapiInitLockInfo(pRmApi, hClient, NV01_NULL_OBJECT, &lockInfo); + if (status != NV_OK) + { + rmapiEpilogue(pRmApi, &rmApiContext); + return status; + } + + // + // In the RTD3 case, the API lock isn't taken since it can be initiated + // from another thread that holds the API lock and because we now hold + // the GPU lock. + // + if (rmapiInRtd3PmPath()) + { + lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK; + lockInfo.state &= ~RM_LOCK_STATES_API_LOCK_ACQUIRED; + } + + LOCK_METER_DATA(MAPMEM, flags, 0, 0); + + // clear params for good measure + portMemSet(&rmMapParams, 0, sizeof (rmMapParams)); + + // load user args + rmMapParams.hClient = hClient; + rmMapParams.hDevice = hDevice; + rmMapParams.hMemory = hMemory; + rmMapParams.offset = offset; + rmMapParams.length = length; + rmMapParams.ppCpuVirtAddr = ppCpuVirtAddr; + rmMapParams.flags = *flags; + rmMapParams.pLockInfo = &lockInfo; + rmMapParams.pSecInfo = pSecInfo; + + status = serverMap(&g_resServ, rmMapParams.hClient, rmMapParams.hMemory, &rmMapParams); + + rmapiEpilogue(pRmApi, &rmApiContext); + + *flags = rmMapParams.flags; + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Nv04MapMemory: complete\n"); + NV_PRINTF(LEVEL_INFO, + "Nv04MapMemory: *ppCpuVirtAddr:" NvP64_fmt "\n", + *ppCpuVirtAddr); + } + else + { + NV_PRINTF(LEVEL_WARNING, + "Nv04MapMemory: map failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + } + + return status; +} + +NV_STATUS +rmapiMapToCpuWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvP64 *ppCpuVirtAddr, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + return rmapiMapToCpuWithSecInfoV2(pRmApi, hClient, + hDevice, hMemory, offset, length, ppCpuVirtAddr, + &flags, pSecInfo); +} + +NV_STATUS +rmapiMapToCpuWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvP64 *ppCpuVirtAddr, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiMapToCpuWithSecInfoV2(pRmApi, hClient, hDevice, hMemory, offset, length, ppCpuVirtAddr, &flags, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} +NV_STATUS +rmapiMapToCpuWithSecInfoTlsV2 +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvP64 *ppCpuVirtAddr, + NvU32 *flags, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiMapToCpuWithSecInfoV2(pRmApi, hClient, hDevice, hMemory, offset, length, ppCpuVirtAddr, flags, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NV_STATUS +rmapiUnmapFromCpu +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + void *pLinearAddress, + NvU32 flags, + NvU32 ProcessId +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->UnmapFromCpuWithSecInfo(pRmApi, hClient, hDevice, hMemory, NV_PTR_TO_NvP64(pLinearAddress), + flags, ProcessId, &pRmApi->defaultSecInfo); +} + +/** + * Call into Resource Server to execute a CPU unmapping operation. + * + * Resource Server will: + * 1. Callback into RM (serverUnmap_Prologue) to set up unmapping parameters, locking requirements, + * and attempt to attach to the mapping's user process (for user mappings only) + * 2. Take locks (if required) + * 3. Lookup the mapping + * 4. Call the target object's unmapping virtual function (xxxUnmap_IMPL, defined in RM) + * 5. Unregister the mapping from its back-references, and free the mapping + * 6. Callback into RM (serverUnmap_Epilogue) to detach from the mapping's user process (if required) + * 7. Release any locks taken + */ +NV_STATUS +rmapiUnmapFromCpuWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvP64 pLinearAddress, + NvU32 flags, + NvU32 ProcessId, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + RM_API_CONTEXT rmApiContext = {0}; + RmUnmapParams rmUnmapParams; + RS_LOCK_INFO lockInfo; + + NV_PRINTF(LEVEL_INFO, + "Nv04UnmapMemory: client:0x%x device:0x%x memory:0x%x pLinearAddr:" NvP64_fmt " flags:0x%x\n", + hClient, hDevice, hMemory, pLinearAddress, flags); + + status = rmapiPrologue(pRmApi, &rmApiContext); + if (status != NV_OK) + return status; + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + status = rmapiInitLockInfo(pRmApi, hClient, NV01_NULL_OBJECT, &lockInfo); + if (status != NV_OK) + { + rmapiEpilogue(pRmApi, &rmApiContext); + return NV_OK; + } + + LOCK_METER_DATA(UNMAPMEM, flags, 0, 0); + + portMemSet(&rmUnmapParams, 0, sizeof (rmUnmapParams)); + rmUnmapParams.hClient = hClient; + rmUnmapParams.hDevice = hDevice; + rmUnmapParams.hMemory = hMemory; + rmUnmapParams.pLinearAddress = pLinearAddress; + rmUnmapParams.flags = flags; + rmUnmapParams.processId = ProcessId; + rmUnmapParams.pLockInfo = &lockInfo; + rmUnmapParams.pSecInfo = pSecInfo; + + status = serverUnmap(&g_resServ, hClient, hMemory, &rmUnmapParams); + + rmapiEpilogue(pRmApi, &rmApiContext); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Nv04UnmapMemory: unmap complete\n"); + } + else + { + NV_PRINTF(LEVEL_WARNING, + "Nv04UnmapMemory: unmap failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + } + + return status; +} + +NV_STATUS +rmapiUnmapFromCpuWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvP64 pLinearAddress, + NvU32 flags, + NvU32 ProcessId, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiUnmapFromCpuWithSecInfo(pRmApi, hClient, hDevice, hMemory, pLinearAddress, + flags, ProcessId, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NV_STATUS +serverMapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_MAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +serverUnmapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_UNMAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +refAllocCpuMappingPrivate +( + RS_CPU_MAP_PARAMS *pMapParams, + RsCpuMapping *pCpuMapping +) +{ + pCpuMapping->pPrivate = portMemAllocNonPaged(sizeof(RS_CPU_MAPPING_PRIVATE)); + if (pCpuMapping->pPrivate == NULL) + return NV_ERR_NO_MEMORY; + + pCpuMapping->pPrivate->protect = pMapParams->protect; + pCpuMapping->pPrivate->bKernel = pMapParams->bKernel; + + return NV_OK; +} + +void +refFreeCpuMappingPrivate +( + RsCpuMapping *pCpuMapping +) +{ + portMemFree(pCpuMapping->pPrivate); +} diff --git a/src/nvidia/src/kernel/rmapi/param_copy.c b/src/nvidia/src/kernel/rmapi/param_copy.c new file mode 100644 index 0000000..6d0cf85 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/param_copy.c @@ -0,0 +1,348 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "core/core.h" +#include "core/system.h" +#include "rmapi/rmapi.h" +#include "rmapi/param_copy.h" +#include "rmapi/alloc_size.h" +#include "rmapi/control.h" +#include "os/os.h" + +NV_STATUS rmapiParamsAcquire +( + RMAPI_PARAM_COPY *pParamCopy, + NvBool bUserModeArgs +) +{ + NvBool bUseParamsDirectly; + void *pKernelParams = NULL; + NV_STATUS rmStatus = NV_OK; + OBJSYS *pSys = SYS_GET_INSTANCE(); + + // Error check parameters + if (((pParamCopy->paramsSize != 0) && (pParamCopy->pUserParams == NvP64_NULL)) || + ((pParamCopy->paramsSize == 0) && (pParamCopy->pUserParams != NvP64_NULL)) || + !pParamCopy->bSizeValid) + { + NV_PRINTF(LEVEL_WARNING, + "%s: bad params from client: ptr " NvP64_fmt " size: 0x%x (%s)\n", + pParamCopy->msgTag, pParamCopy->pUserParams, pParamCopy->paramsSize, + pParamCopy->bSizeValid ? "valid" : "invalid"); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + bUseParamsDirectly = (pParamCopy->paramsSize == 0) || (!bUserModeArgs); + + // if we can use client params directly, we're done. + if (bUseParamsDirectly) + { + if (pSys->getProperty(pSys, PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS)) + { + // Check that its a kernel pointer + rmStatus = osIsKernelBuffer((void*)NvP64_VALUE(pParamCopy->pUserParams), + pParamCopy->paramsSize); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Error validating kernel pointer. Status 0x%x\n", + rmStatus); + goto done; + } + } + + pParamCopy->flags |= RMAPI_PARAM_COPY_FLAGS_IS_DIRECT_USAGE; + pKernelParams = NvP64_VALUE(pParamCopy->pUserParams); + + if (pParamCopy->flags & RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER) + portMemSet(pKernelParams, 0, pParamCopy->paramsSize); + + goto done; + } + + if (!(pParamCopy->flags & RMAPI_PARAM_COPY_FLAGS_DISABLE_MAX_SIZE_CHECK)) + { + if (pParamCopy->paramsSize > RMAPI_PARAM_COPY_MAX_PARAMS_SIZE) + { + NV_PRINTF(LEVEL_WARNING, + "(%s): Requested size exceeds max (%ud > %ud)\n", + pParamCopy->msgTag, pParamCopy->paramsSize, + RMAPI_PARAM_COPY_MAX_PARAMS_SIZE); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + } + + pKernelParams = portMemAllocNonPaged(pParamCopy->paramsSize); + if (pKernelParams == NULL) + { + rmStatus = NV_ERR_INSUFFICIENT_RESOURCES; + NV_PRINTF(LEVEL_WARNING, "(%s): portMemAllocNonPaged failure: status 0x%x\n", + pParamCopy->msgTag, rmStatus); + goto done; + } + + // Copyin unless directed otherwise + if (pParamCopy->pUserParams) + { + if (pParamCopy->flags & RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN) + { + if (pParamCopy->flags & RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER) + portMemSet(pKernelParams, 0, pParamCopy->paramsSize); + } + else + { + rmStatus = portMemExCopyFromUser(pParamCopy->pUserParams, pKernelParams, pParamCopy->paramsSize); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "(%s): portMemExCopyFromUser failure: status 0x%x\n", + pParamCopy->msgTag, rmStatus); + goto done; + } + } + } + +done: + if (rmStatus != NV_OK) // There was an error, be sure to free the buffer + { + if (pKernelParams != NULL) + { + portMemFree(pKernelParams); + pKernelParams = NULL; + } + } + + NV_ASSERT(pParamCopy->ppKernelParams != NULL); + *(pParamCopy->ppKernelParams) = pKernelParams; + return rmStatus; +} + +// +// Copyout if needed and free any tmp param buffer +// Skips copyout if API_PARAMS_SKIP_COPYOUT is set. +// +NV_STATUS rmapiParamsRelease +( + RMAPI_PARAM_COPY *pParamCopy +) +{ + NV_STATUS rmStatus = NV_OK; + + // nothing to do, rmapiParamsAcquire() is either not called or not completed + if (NULL == pParamCopy->ppKernelParams) + return NV_OK; + + // if using the client's buffer directly, there's nothing to do + if (pParamCopy->flags & RMAPI_PARAM_COPY_FLAGS_IS_DIRECT_USAGE) + goto done; + + // if no kernel param ptr, there must be nothing to copy out + // This can only happen if rmapiParamsAccess() returned an error, + // but we need to handle it since rmapiParamsRelease() might be + // called anyway. + if (NULL == *pParamCopy->ppKernelParams) + goto done; + + // do the copyout if something to copy, unless told to skip it... + if (pParamCopy->pUserParams && ! (pParamCopy->flags & RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT)) + { + rmStatus = portMemExCopyToUser(*(pParamCopy->ppKernelParams), pParamCopy->pUserParams, pParamCopy->paramsSize); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "(%s): portMemExCopyToUser failure: status 0x%x\n", + pParamCopy->msgTag, rmStatus); + + // even if the copyout fails, we still need to free the kernel mem + } + } + + portMemFree(*pParamCopy->ppKernelParams); + +done: + // no longer ok to use the ptr, even if it was a direct usage + *pParamCopy->ppKernelParams = NULL; + return rmStatus; +} + +// This is a one-shot suitable for a case where we already have a kernel +// buffer and just need to copy into it from a user buffer. +// Not for general use... +// +// It uses the same logic as rmapiParamsAccess(), but does not maintain +// an RMAPI_PARAM_COPY container. +NV_STATUS rmapiParamsCopyIn +( + const char *msgTag, + void *pKernelParams, + NvP64 pUserParams, + NvU32 paramsSize, + NvBool bUserModeArgs +) +{ + NV_STATUS rmStatus; + + // error check parameters + if ((paramsSize == 0) || + (pKernelParams == NULL) || + (pUserParams == NvP64_NULL)) + { + NV_PRINTF(LEVEL_WARNING, + "(%s): bad params from client: ptr " NvP64_fmt " size: 0x%x\n", + msgTag, pUserParams, paramsSize); + + return NV_ERR_INVALID_ARGUMENT; + } + + // if we can use client params directly, just memcpy() + if (bUserModeArgs == NV_FALSE) + { + // If the same ptr we can skip the memcpy + if (pKernelParams != NvP64_VALUE(pUserParams)) + { + (void) portMemCopy(pKernelParams, paramsSize, NvP64_VALUE(pUserParams), paramsSize); + } + rmStatus = NV_OK; + } + else + { + rmStatus = portMemExCopyFromUser(pUserParams, pKernelParams, paramsSize); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "(%s): portMemExCopyFromUser failure: status 0x%x\n", + msgTag, rmStatus); + } + } + + return rmStatus; +} + +// This is a one-shot suitable for a case where we already have a kernel +// buffer and just need to copy it out correctly to a user buffer. +// Not for general use... +// +// It uses the same logic as rmapiParamsAccess(), but does not maintain +// an RMAPI_PARAM_COPY container. + +NV_STATUS rmapiParamsCopyOut +( + const char *msgTag, + void *pKernelParams, + NvP64 pUserParams, + NvU32 paramsSize, + NvBool bUserModeArgs +) +{ + NV_STATUS rmStatus; + + // error check parameters + if ((paramsSize == 0) || + (pKernelParams == NULL) || + (pUserParams == NvP64_NULL)) + { + NV_PRINTF(LEVEL_WARNING, + "(%s): bad params from client: ptr " NvP64_fmt " size: 0x%x\n", + msgTag, pUserParams, paramsSize); + + return NV_ERR_INVALID_ARGUMENT; + } + + // if we can use client params directly, just memcpy() + if (bUserModeArgs == NV_FALSE) + { + // If the same ptr we can skip the memcpy + if (pKernelParams != NvP64_VALUE(pUserParams)) + { + (void) portMemCopy(NvP64_VALUE(pUserParams), paramsSize, pKernelParams, paramsSize); + } + rmStatus = NV_OK; + } + else + { + rmStatus = portMemExCopyToUser(pKernelParams, pUserParams, paramsSize); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "(%s): portMemExCopyToUser failure: status 0x%x\n", + msgTag, rmStatus); + } + } + + return rmStatus; +} + +NV_STATUS +rmapiParamsCopyInit +( + RMAPI_PARAM_COPY *pParamCopy, + NvU32 hClass +) +{ + NvU32 status; + NvBool bAllowNull; + + status = rmapiGetClassAllocParamSize(&pParamCopy->paramsSize, + pParamCopy->pUserParams, + &bAllowNull, + hClass); + if (status != NV_OK) + return status; + + // NULL pUserParams is not allowed for given class + if (bAllowNull == NV_FALSE && pParamCopy->pUserParams == NvP64_NULL) + return NV_ERR_INVALID_ARGUMENT; + + pParamCopy->bSizeValid = NV_TRUE; + return NV_OK; +} + + +#include "ctrl/ctrl0080/ctrl0080gpu.h" +#include "ctrl/ctrl2080/ctrl2080gpu.h" +static inline NV_STATUS _embeddedParamsCheck(RmCtrlParams *pRmCtrlParams) +{ + // + // These Orin controls have embedded params in them, so they can only be + // called by kernel clients + // + switch (pRmCtrlParams->cmd) + { + case NV0080_CTRL_CMD_GPU_GET_CLASSLIST: + case NV2080_CTRL_CMD_GPU_GET_ENGINE_CLASSLIST: + case NV2080_CTRL_CMD_GPU_GET_ENGINES: + case NV2080_CTRL_CMD_GPU_GET_INFO: + NV_ASSERT_OR_RETURN(pRmCtrlParams->secInfo.paramLocation == PARAM_LOCATION_KERNEL, NV_ERR_INVALID_POINTER); + } + return NV_OK; +} + +NV_STATUS embeddedParamCopyIn(RMAPI_PARAM_COPY *paramCopies, RmCtrlParams *pRmCtrlParams) +{ + return _embeddedParamsCheck(pRmCtrlParams); +} +NV_STATUS embeddedParamCopyOut(RMAPI_PARAM_COPY *paramCopies, RmCtrlParams *pRmCtrlParams) +{ + return _embeddedParamsCheck(pRmCtrlParams); +} diff --git a/src/nvidia/src/kernel/rmapi/resource.c b/src/nvidia/src/kernel/rmapi/resource.c new file mode 100644 index 0000000..ef20446 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/resource.c @@ -0,0 +1,311 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "resserv/rs_client.h" +#include "resserv/rs_server.h" +#include "rmapi/client.h" +#include "rmapi/resource.h" +#include "rmapi/rmapi.h" +#include "rmapi/rmapi_utils.h" +#include "rmapi/control.h" +#include "ctrl/ctrlxxxx.h" +#include "gpu/gpu_resource.h" +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" +#include "vgpu/rpc.h" +#include "core/locks.h" + +NV_STATUS +rmrescmnConstruct_IMPL +( + RmResourceCommon *pResourceCommmon +) +{ + return NV_OK; +} + +NV_STATUS +rmresConstruct_IMPL +( + RmResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + if (RS_IS_COPY_CTOR(pParams)) + { + RmResource *pSrcResource = dynamicCast(pParams->pSrcRef->pResource, RmResource); + + pResource->rpcGpuInstance = pSrcResource->rpcGpuInstance; + pResource->bRpcFree = pSrcResource->bRpcFree; + } + else + { + pResource->rpcGpuInstance = ~0; + pResource->bRpcFree = NV_FALSE; + } + + return NV_OK; +} + +NvBool +rmresAccessCallback_IMPL +( + RmResource *pResource, + RsClient *pInvokingClient, + void *pAllocParams, + RsAccessRight accessRight +) +{ + NV_STATUS status; + RsResourceRef *pCliResRef; + + status = clientGetResourceRef(RES_GET_CLIENT(pResource), + RES_GET_CLIENT_HANDLE(pResource), + &pCliResRef); + + if (status == NV_OK) + { + // Allow access if the resource's owner would get the access right + if(resAccessCallback(pCliResRef->pResource, pInvokingClient, pAllocParams, accessRight)) + return NV_TRUE; + } + + // Delegate to superclass + return resAccessCallback_IMPL(staticCast(pResource, RsResource), pInvokingClient, pAllocParams, accessRight); +} + +NvBool +rmresShareCallback_IMPL +( + RmResource *pResource, + RsClient *pInvokingClient, + RsResourceRef *pParentRef, + RS_SHARE_POLICY *pSharePolicy +) +{ + NV_STATUS status; + RsResourceRef *pCliResRef; + + // + // cliresShareCallback contains some require exceptions for non-GpuResource, + // which we don't want to hit. ClientResource doesn't normally implement these + // share types anyway, so we're fine with skipping them. + // + switch (pSharePolicy->type) + { + case RS_SHARE_TYPE_SMC_PARTITION: + case RS_SHARE_TYPE_GPU: + { + // + // We do not want to lock down these GpuResource-specific require policies + // when the check cannot be applied for other resources, so add these checks + // as an alternative bypass for those policies + // + if ((pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE) && + (NULL == dynamicCast(pResource, GpuResource))) + { + return NV_TRUE; + } + break; + } + case RS_SHARE_TYPE_FM_CLIENT: + { + RmClient *pSrcClient = dynamicCast(RES_GET_CLIENT(pResource), RmClient); + NvBool bSrcIsKernel = (pSrcClient != NULL) && (rmclientGetCachedPrivilege(pSrcClient) >= RS_PRIV_LEVEL_KERNEL); + + if (rmclientIsCapable(dynamicCast(pInvokingClient, RmClient), + NV_RM_CAP_EXT_FABRIC_MGMT) && !bSrcIsKernel) + { + return NV_TRUE; + } + break; + } + default: + { + status = clientGetResourceRef(RES_GET_CLIENT(pResource), + RES_GET_CLIENT_HANDLE(pResource), + &pCliResRef); + if (status == NV_OK) + { + // Allow sharing if the resource's owner would be shared with + if (resShareCallback(pCliResRef->pResource, pInvokingClient, + pParentRef, pSharePolicy)) + return NV_TRUE; + } + break; + } + } + + // Delegate to superclass + return resShareCallback_IMPL(staticCast(pResource, RsResource), + pInvokingClient, pParentRef, pSharePolicy); +} + +void serverControl_InitCookie +( + const struct NVOC_EXPORTED_METHOD_DEF *exportedEntry, + RmCtrlExecuteCookie *pRmCtrlExecuteCookie +) +{ + // Copy from NVOC exportedEntry + pRmCtrlExecuteCookie->cmd = exportedEntry->methodId; + pRmCtrlExecuteCookie->ctrlFlags = exportedEntry->flags; + // One time initialization of a const variable + *(NvU32 *)&pRmCtrlExecuteCookie->rightsRequired.limbs[0] + = exportedEntry->accessRight; +} + +NV_STATUS +rmresGetMemInterMapParams_IMPL +( + RmResource *pRmResource, + RMRES_MEM_INTER_MAP_PARAMS *pParams +) +{ + return NV_ERR_INVALID_OBJECT_HANDLE; +} + +NV_STATUS +rmresCheckMemInterUnmap_IMPL +( + RmResource *pRmResource, + NvBool bSubdeviceHandleProvided +) +{ + return NV_ERR_INVALID_OBJECT_HANDLE; +} + +NV_STATUS +rmresGetMemoryMappingDescriptor_IMPL +( + RmResource *pRmResource, + struct MEMORY_DESCRIPTOR **ppMemDesc +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +rmresControlSerialization_Prologue_IMPL +( + RmResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + OBJGPU *pGpu = gpumgrGetGpu(pResource->rpcGpuInstance); + + if (pGpu != NULL && + ((IS_VIRTUAL(pGpu) && (pParams->pCookie->ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_VGPU_HOST) + ) || (IS_FW_CLIENT(pGpu) && (pParams->pCookie->ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_PHYSICAL)))) + { + return serverSerializeCtrlDown(pCallContext, pParams->cmd, &pParams->pParams, &pParams->paramsSize, &pParams->flags); + } + else + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, serverDeserializeCtrlDown(pCallContext, pParams->cmd, &pParams->pParams, &pParams->paramsSize, &pParams->flags)); + } + + return NV_OK; +} + +void +rmresControlSerialization_Epilogue_IMPL +( + RmResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + OBJGPU *pGpu = gpumgrGetGpu(pResource->rpcGpuInstance); + + if (pGpu != NULL && + ((IS_VIRTUAL(pGpu) && (pParams->pCookie->ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_VGPU_HOST) + ) || (IS_FW_CLIENT(pGpu) && (pParams->pCookie->ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_PHYSICAL)))) + { + NV_ASSERT_OK(serverDeserializeCtrlUp(pCallContext, pParams->cmd, &pParams->pParams, &pParams->paramsSize, &pParams->flags)); + } + + NV_ASSERT_OK(serverSerializeCtrlUp(pCallContext, pParams->cmd, &pParams->pParams, &pParams->paramsSize, &pParams->flags)); + serverFreeSerializeStructures(pCallContext, pParams->pParams); +} + +NV_STATUS +rmresControl_Prologue_IMPL +( + RmResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = gpumgrGetGpu(pResource->rpcGpuInstance); + + if (pGpu != NULL && + ((IS_VIRTUAL(pGpu) && (pParams->pCookie->ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_VGPU_HOST) + ) || (IS_FW_CLIENT(pGpu) && (pParams->pCookie->ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_PHYSICAL)))) + { + // + // GPU lock is required to protect the RPC buffers. + // However, some controls have ROUTE_TO_PHYSICAL + NO_GPUS_LOCK flags set. + // This is not valid in offload mode, but is in monolithic. + // In those cases, just acquire the lock for the RPC + // + GPU_MASK gpuMaskRelease = 0; + if (!rmDeviceGpuLockIsOwner(pGpu->gpuInstance)) + { + // + // Log any case where the above assumption is not true, but continue + // anyway. Use SAFE_LOCK_UPGRADE to try and recover in these cases. + // + NV_ASSERT(pParams->pCookie->ctrlFlags & RMCTRL_FLAGS_NO_GPUS_LOCK); + NV_ASSERT_OK_OR_RETURN(rmGpuGroupLockAcquire(pGpu->gpuInstance, + GPU_LOCK_GRP_SUBDEVICE, + GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE, + RM_LOCK_MODULES_RPC, + &gpuMaskRelease)); + } + + NV_RM_RPC_CONTROL(pGpu, pParams->hClient, pParams->hObject, pParams->cmd, + pParams->pParams, pParams->paramsSize, status); + + if (gpuMaskRelease != 0) + { + rmGpuGroupLockRelease(gpuMaskRelease, GPUS_LOCK_FLAGS_NONE); + } + + return (status == NV_OK) ? NV_WARN_NOTHING_TO_DO : status; + } + + return NV_OK; +} + +void +rmresControl_Epilogue_IMPL +( + RmResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ +} diff --git a/src/nvidia/src/kernel/rmapi/resource_desc.c b/src/nvidia/src/kernel/rmapi/resource_desc.c new file mode 100644 index 0000000..bd92ef4 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/resource_desc.c @@ -0,0 +1,218 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "core/core.h" +#include "resource_desc.h" + +// Need the full header for the class allocation param structure. +#define SDK_ALL_CLASSES_INCLUDE_FULL_HEADER +#include "g_allclasses.h" + +#include "resource_list_required_includes.h" + +#include "rmapi/alloc_size.h" +#include "rmapi/resource_fwd_decls.h" +#include "resserv/rs_access_rights.h" + +// +// Macros to transform list into static table +// + +// NULL terminated list +#define RS_LIST(...) {__VA_ARGS__, 0} +#define RS_ROOT_OBJECT {0} +#define RS_ANY_PARENT {0} + +// Populate parents +#define RS_ENTRY(cls, internalClass, bMultiInstance, parentList, allocParam, freePriority, flags, rightsRequired) \ + NvU32 cls##ParentList[] = parentList; + +#include "resource_list.h" + +#undef RS_LIST +#undef RS_ROOT_OBJECT +#undef RS_ANY_PARENT + + +#define RS_ACCESS_NONE {-1} +#define RS_ACCESS_LIST(...) {__VA_ARGS__} + +// Populate rights required +#define RS_ENTRY(cls, internalClass, bMultiInstance, parentList, allocParam, freePriority, flags, rightsRequired) \ + static const RsAccessRight cls##_RightsRequiredArray[] = rightsRequired; + +#include "resource_list.h" + +#undef RS_ACCESS_NONE +#undef RS_ACCESS_LIST + +// Populate forward declarations +#define RS_ENTRY(cls, internalClass, bMultiInstance, parentList, allocParam, freePriority, flags, rightsRequired) \ + extern const struct NVOC_CLASS_DEF __nvoc_class_def_##internalClass; /* defn here to keep POPULATE_STRUCT happy if the class is disabled */ + +#include "resource_list.h" + + +#define RS_REQUIRED(allocParam) sizeof(allocParam), NV_TRUE +#define RS_OPTIONAL(allocParam) sizeof(allocParam), NV_FALSE +#define RS_NONE 0, NV_FALSE +#define RS_ENTRY(cls, internalClass, bMultiInstance, bAnyParent, allocParam, freePriority, flags, bRightsRequired) \ +{ \ + cls, \ + classId(internalClass), \ + classInfo(internalClass), \ + allocParam, \ + bMultiInstance, \ + bAnyParent, \ + cls##ParentList, \ + freePriority, \ + flags, \ + cls##_RightsRequiredArray, \ + bRightsRequired ? NV_ARRAY_ELEMENTS(cls##_RightsRequiredArray) : 0, \ +}, + +#define RS_LIST(...) NV_FALSE +#define RS_ROOT_OBJECT NV_FALSE +#define RS_ANY_PARENT NV_TRUE +#define RS_ACCESS_NONE NV_FALSE +#define RS_ACCESS_LIST(...) NV_TRUE +static RS_RESOURCE_DESC +g_RsResourceDescList[] = +{ +#include "resource_list.h" +}; +#undef RS_LIST +#undef RS_ROOT_OBJECT +#undef RS_ANY_PARENT +#undef RS_ACCESS_NONE +#undef RS_ACCESS_LIST +#undef RS_REQUIRED +#undef RS_OPTIONAL +#undef RS_NONE + +#define NUM_ENTRIES_DESC_LIST NV_ARRAY_ELEMENTS(g_RsResourceDescList) + +void RsResInfoInitialize(void) +{ + // + // Keep the array sorted by externalClassId, so we can binary search it + // Simple bubble-sort is fine here as the number of elements is below 300, + // and we only call this once on boot anyway. + // + NvU32 i, j; + for (i = 0; i < NUM_ENTRIES_DESC_LIST - 1; i++) + { + for (j = i + 1; j < NUM_ENTRIES_DESC_LIST; j++) + { + RS_RESOURCE_DESC *a = &g_RsResourceDescList[i]; + RS_RESOURCE_DESC *b = &g_RsResourceDescList[j]; + + if (a->externalClassId > b->externalClassId) + { + RS_RESOURCE_DESC tmp; + portMemCopy(&tmp, sizeof(tmp), a, sizeof(*a)); + portMemCopy(a, sizeof(*a), b, sizeof(*b)); + portMemCopy(b, sizeof(*b), &tmp, sizeof(tmp)); + } + } + } +} + +RS_RESOURCE_DESC * +RsResInfoByExternalClassId +( + NvU32 externalClassId +) +{ + NvU32 low = 0; + NvU32 high = NUM_ENTRIES_DESC_LIST; + + // Binary search the array; If not found, the break in the middle will be hit + while (1) + { + NvU32 mid = (low + high) / 2; + + if (g_RsResourceDescList[mid].externalClassId == externalClassId) + return &g_RsResourceDescList[mid]; + + if (high == mid || low == mid) + break; + + if (g_RsResourceDescList[mid].externalClassId > externalClassId) + high = mid; + else + low = mid; + } + + return NULL; +} + +NvU32 RsResInfoGetInternalClassId(const RS_RESOURCE_DESC *pResDesc) +{ + return pResDesc ? pResDesc->internalClassId : 0; +} + +void RsResInfoGetResourceList(const RS_RESOURCE_DESC **ppResourceList, NvU32 *numResources) +{ + *ppResourceList = g_RsResourceDescList; + *numResources = NV_ARRAY_ELEMENTS(g_RsResourceDescList); +} + +NV_STATUS +rmapiGetClassAllocParamSize +( + NvU32 *pAllocParamSizeBytes, + NvP64 pUserParams, + NvBool *pBAllowNull, + NvU32 hClass +) +{ + RS_RESOURCE_DESC *pResDesc; + + *pAllocParamSizeBytes = 0; + *pBAllowNull = NV_FALSE; + + pResDesc = RsResInfoByExternalClassId(hClass); + + if (!pResDesc) + return NV_ERR_INVALID_CLASS; + + if (pResDesc->bParamRequired) + { + // params are required + *pAllocParamSizeBytes = pResDesc->allocParamSize; + } + else if (pResDesc->allocParamSize) + { + // params are *optional* + *pBAllowNull = NV_TRUE; + if (pUserParams != (NvP64) 0) + *pAllocParamSizeBytes = pResDesc->allocParamSize; + } + else + { + // no params + *pBAllowNull = NV_TRUE; + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/rmapi/resource_desc.h b/src/nvidia/src/kernel/rmapi/resource_desc.h new file mode 100644 index 0000000..87f3037 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/resource_desc.h @@ -0,0 +1,63 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _RESOURCE_DESC_H_ +#define _RESOURCE_DESC_H_ + +#include "nvtypes.h" +#include "nvoc/object.h" +#include "resserv/rs_access_rights.h" +#include "resource_desc_flags.h" + +/** + * Information about a RsResource subclass. + */ +typedef struct RS_RESOURCE_DESC +{ + NvU32 externalClassId; ///< Id of the class as seen by the client + NvU32 internalClassId; ///< NVOC class ID, mirrored from pClassInfo->classId + const NVOC_CLASS_INFO *pClassInfo; ///< RTTI information for internal class + NvU32 allocParamSize; ///< Size of allocation param structure + NvBool bParamRequired; ///< If not required, param size can be 0 or allocParamSize + NvBool bMultiInstance; ///< Multiple instances of this object under a parent + NvBool bAnyParent; ///< Resource can be allocated under any parent + NvU32 *pParentList; ///< NULL terminated list of internalClassId of parents + NvU32 freePriority; ///< RS_FREE_PRIORITY_* + NvU32 flags; ///< Flags + const RsAccessRight *pRightsRequiredArray; ///< Access rights required to allocate this resource + NvLength rightsRequiredLength; ///< Length of pRightsRequiredArray +} RS_RESOURCE_DESC; + +/** Initialize the global resource info table */ +void RsResInfoInitialize(void); + +/** + * Look up RS_RESOURCE_DESC using the externalClassId. The id of the class as + * seen by clients. + */ +RS_RESOURCE_DESC *RsResInfoByExternalClassId(NvU32 externalClassId); +NvU32 RsResInfoGetInternalClassId(const RS_RESOURCE_DESC *); + +/** Get the global resource info table */ +void RsResInfoGetResourceList(const RS_RESOURCE_DESC **ppResourceList, NvU32 *numResources); + +#endif // _RESOURCE_DESC_H_ diff --git a/src/nvidia/src/kernel/rmapi/resource_desc_flags.h b/src/nvidia/src/kernel/rmapi/resource_desc_flags.h new file mode 100644 index 0000000..76676b3 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/resource_desc_flags.h @@ -0,0 +1,94 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _RESOURCE_DESC_FLAGS_H_ +#define _RESOURCE_DESC_FLAGS_H_ + +// Flags for RS_ENTRY +#define RS_FLAGS_NONE 0 + +#define RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_ALLOC NVBIT(0) ///< GPUs Lock is acquired on allocation +#define RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE NVBIT(1) ///< GPUs Lock is acquired for free + +#define RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_ALLOC NVBIT(2) ///< GPU Group Lock is acquired on allocation +#define RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_FREE NVBIT(3) ///< GPU Group Lock is acquired for free + +#define RS_FLAGS_ALLOC_RPC_TO_VGPU_HOST NVBIT(4) ///< Issue RPC to host to allocate resource for virtual GPUs + +#define RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC NVBIT(5) ///< Acquire the RO API lock for allocation, default is RW API lock + +#define RS_FLAGS_ALLOC_RPC_TO_PHYS_RM NVBIT(6) ///< Issue RPC to allocate resource in physical RM + +#define RS_FLAGS_ALLOC_RPC_TO_ALL (RS_FLAGS_ALLOC_RPC_TO_VGPU_HOST | RS_FLAGS_ALLOC_RPC_TO_PHYS_RM) + +#define RS_FLAGS_INTERNAL_ONLY NVBIT(7) ///< Class cannot be allocated outside of RM + +#define RS_FLAGS_CHANNEL_DESCENDANT_COMMON (RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL) + +#define RS_FREE_PRIORITY_DEFAULT 0 +#define RS_FREE_PRIORITY_HIGH 1 ///< Resources with this priority will be freed ahead of others + +#define RS_FLAGS_ALLOC_NON_PRIVILEGED NVBIT(8) ///< Class is non privileged + +#define RS_FLAGS_ALLOC_PRIVILEGED NVBIT(9) ///< Class requires at least admin privilege + +#define RS_FLAGS_ALLOC_KERNEL_PRIVILEGED NVBIT(10) ///< Class requires at least kernel privilege + +/** + * CPU_PLUGIN_FOR_* indicates object can be allocated in the respective environment + * if the context is at least cached admin privileged + * - Cached-admin Hyper-V may access flagged privileged and kernel privileged objects + * - Other, runtime-admin hosts may access flagged kernel privileged objects + * - Note that runtime-admin hosts do not need the flag to allocate admin privileged objects + * + * GSP_PLUGIN_FOR_* is even stricter; any admin and kernel privileged object allocated in a VF context + * is required to have the flag or it will be rejected. + */ +#define RS_FLAGS_ALLOC_CPU_PLUGIN_FOR_SRIOV NVBIT(11) ///< CPU-RM, SRIOV, vGPU-GSP disabled, hypervisor environment + +#define RS_FLAGS_ALLOC_CPU_PLUGIN_FOR_LEGACY NVBIT(12) ///< CPU-RM, non-SRIOV or SRIOV-Heavy, hypervisor environment. + +#define RS_FLAGS_ALLOC_GSP_PLUGIN_FOR_VGPU_GSP NVBIT(13) ///< GSP-RM, SRIOV, vGPU-GSP enabled, VF context. + +#define RS_FLAGS_ALLOC_ALL_VGPU_PLUGINS (RS_FLAGS_ALLOC_CPU_PLUGIN_FOR_SRIOV | RS_FLAGS_ALLOC_CPU_PLUGIN_FOR_LEGACY | RS_FLAGS_ALLOC_GSP_PLUGIN_FOR_VGPU_GSP) + +#define RS_FLAGS_DUAL_CLIENT_LOCK NVBIT(14) ///< Class needs to lock two clients when being allocated, must update serverAllocLookupSecondClient in order to use + +#define RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_DUP NVBIT(15) ///< GPUs Lock is acquired on dup + +#define RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_DUP NVBIT(16) ///< GPU Group Lock is acquired for dup + +#define RS_FLAGS_ACQUIRE_GPUS_LOCK (RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_ALLOC | RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE | RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_DUP) +#define RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK (RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_ALLOC | RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_FREE | RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_DUP) + +#define RS_FLAGS_ACQUIRE_RELAXED_GPUS_LOCK_ON_DUP NVBIT(17) /// < If the object is a GPU resource and the src/dst GPUs are the same, take the GPU Group Lock. Take all GPUs lock otherwise. + +/** + * Use RO API lock even if NV_REG_STR_RM_READONLY_API_LOCK_ALLOC_RESOURCE is not set. + * + * This flag is intended to be temporary to allow explicit opt-in for RO API + * lock while NV_REG_STR_RM_READONLY_API_LOCK_ALLOC/FREE_RESOURCE is not the + * default. Default enablement is tracked in bug 4283710. + */ +#define RS_FLAGS_FORCE_ACQUIRE_RO_API_LOCK_ON_ALLOC_FREE NVBIT(17) + +#endif // _RESOURCE_DESC_FLAGS_H_ diff --git a/src/nvidia/src/kernel/rmapi/resource_list.h b/src/nvidia/src/kernel/rmapi/resource_list.h new file mode 100644 index 0000000..449a325 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/resource_list.h @@ -0,0 +1,488 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +// No include guards - this file is included multiple times, each time with a +// different definition for RS_ENTRY +// +// Some of those definitions of RS_ENTRY may depend on declarations in various +// other header files. Include "resource_list_required_includes.h" to pull them +// in. +// + +// +// Table describing all RsResource subclasses. +// +// Internal Class - there is a RM internal class representing all classes +// exported to RM clients. The internal name of the class should be similar to +// the symbolic name used by clients. If there is ambiguity between RM internal +// classes, e.g.: between the PMU engine (OBJPMU) and the exported class, it's +// recommended to use Api as the suffix to disambiguate; for example, OBJPMU +// (the engine) vs PmuApi (the per-client api object). It's also recommended to +// avoid using Object, Resource, etc as those terms don't improve clarity. +// If there is no ambiguity, there is no need to add the Api suffix; for example, +// Channel is preferred over ChannelApi (there is no other Channel object in +// RM). +// +// Multi-Instance - NV_TRUE if there can be multiple instances of this object's +// *internal* class id under a parent. +// +// This list should eventually replace the similar lists in nvapi.c and +// rmctrl.c. The number of fields in the table should be kept minimal, just +// enough to create the object, with as much of the detail being specified +// within the class itself. +// +// In the future we should consider switching to a registration approach or +// generating with NVOC and/or annotating the class definition. +// +// RS-TODO: Rename classes that have 'Object' in their names +// + + + +RS_ENTRY( + /* External Class */ NV01_ROOT, + /* Internal Class */ RmClientResource, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_ROOT_OBJECT, + /* Alloc Param Info */ RS_OPTIONAL(NvHandle), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_ALLOC | RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_DUP | + RS_FLAGS_ALLOC_GSP_PLUGIN_FOR_VGPU_GSP, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_ROOT_NON_PRIV, + /* Internal Class */ RmClientResource, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_ROOT_OBJECT, + /* Alloc Param Info */ RS_OPTIONAL(NvHandle), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_ALLOC | RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_DUP, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_ROOT_CLIENT, + /* Internal Class */ RmClientResource, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_ROOT_OBJECT, + /* Alloc Param Info */ RS_OPTIONAL(NvHandle), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_ALLOC | RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_DUP, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_DEVICE_0, + /* Internal Class */ Device, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(RmClientResource)), + /* Alloc Param Info */ RS_OPTIONAL(NV0080_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_NON_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK | + RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC | RS_FLAGS_ALLOC_GSP_PLUGIN_FOR_VGPU_GSP, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ GF100_HDACODEC, + /* Internal Class */ Hdacodec, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_NON_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE | + RS_FLAGS_ALLOC_RPC_TO_ALL | RS_FLAGS_ALLOC_GSP_PLUGIN_FOR_VGPU_GSP, + /* Required Access Rights */ RS_ACCESS_NONE +) + /* Channels can have a CHANNEL_GROUP, a DEVICE, or a CONTEXT_SHARE (starting in Volta) as parents */ + /* RS-TODO: Update channel parent list when CONTEXT_SHARE is added */ +RS_ENTRY( + /* External Class */ NV20_SUBDEVICE_0, + /* Internal Class */ Subdevice, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_OPTIONAL(NV2080_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_NON_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK | + RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC | RS_FLAGS_ALLOC_GSP_PLUGIN_FOR_VGPU_GSP, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_MEMORY_SYSTEM, + /* Internal Class */ SystemMemory, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_REQUIRED(NV_MEMORY_ALLOCATION_PARAMS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_NON_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_MEMORY_SYSTEM_OS_DESCRIPTOR, + /* Internal Class */ OsDescMemory, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_REQUIRED(NV_OS_DESC_MEMORY_ALLOCATION_PARAMS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_NON_PRIVILEGED | RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_ALLOC | + RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_DUP | RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_FREE | + RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC | + RS_FLAGS_FORCE_ACQUIRE_RO_API_LOCK_ON_ALLOC_FREE, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_MEMORY_SYNCPOINT, + /* Internal Class */ SyncpointMemory, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_REQUIRED(NV_MEMORY_SYNCPOINT_ALLOCATION_PARAMS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_NON_PRIVILEGED | RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) + /* Subdevice Children: */ +RS_ENTRY( + /* External Class */ NVC671_DISP_SF_USER, + /* Internal Class */ DispSfUser, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC971_DISP_SF_USER, + /* Internal Class */ DispSfUser, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVCC71_DISP_SF_USER, + /* Internal Class */ DispSfUser, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) + /* Display classes: */ +RS_ENTRY( + /* External Class */ NVC670_DISPLAY, + /* Internal Class */ NvDispApi, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC970_DISPLAY, + /* Internal Class */ NvDispApi, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVCC70_DISPLAY, + /* Internal Class */ NvDispApi, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC372_DISPLAY_SW, + /* Internal Class */ DispSwObj, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_PRIVILEGED | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV04_DISPLAY_COMMON, + /* Internal Class */ DispCommon, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_NON_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC67A_CURSOR_IMM_CHANNEL_PIO, + /* Internal Class */ DispChannelPio, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC97A_CURSOR_IMM_CHANNEL_PIO, + /* Internal Class */ DispChannelPio, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVCC7A_CURSOR_IMM_CHANNEL_PIO, + /* Internal Class */ DispChannelPio, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC67B_WINDOW_IMM_CHANNEL_DMA, + /* Internal Class */ DispChannelDma, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC67D_CORE_CHANNEL_DMA, + /* Internal Class */ DispChannelDma, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC77F_ANY_CHANNEL_DMA, + /* Internal Class */ DispChannelDma, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC67E_WINDOW_CHANNEL_DMA, + /* Internal Class */ DispChannelDma, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC673_DISP_CAPABILITIES, + /* Internal Class */ DispCapabilities, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC97B_WINDOW_IMM_CHANNEL_DMA, + /* Internal Class */ DispChannelDma, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Right */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC97D_CORE_CHANNEL_DMA, + /* Internal Class */ DispChannelDma, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Right */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC97E_WINDOW_CHANNEL_DMA, + /* Internal Class */ DispChannelDma, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Right */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC973_DISP_CAPABILITIES, + /* Internal Class */ DispCapabilities, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) + +RS_ENTRY( + /* External Class */ NVCC73_DISP_CAPABILITIES, + /* Internal Class */ DispCapabilities, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVCC7B_WINDOW_IMM_CHANNEL_DMA, + /* Internal Class */ DispChannelDma, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Right */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVCC7D_CORE_CHANNEL_DMA, + /* Internal Class */ DispChannelDma, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Right */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVCC7E_WINDOW_CHANNEL_DMA, + /* Internal Class */ DispChannelDma, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Right */ RS_ACCESS_NONE +) + /* Classes allocated under channel: */ +RS_ENTRY( + /* External Class */ NV01_CONTEXT_DMA, + /* Internal Class */ ContextDma, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_ANY_PARENT, + /* Alloc Param Info */ RS_REQUIRED(NV_CONTEXT_DMA_ALLOCATION_PARAMS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_NON_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_EVENT, + /* Internal Class */ Event, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_ANY_PARENT, + /* Alloc Param Info */ RS_REQUIRED(NV0005_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_NON_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK | + RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC | RS_FLAGS_DUAL_CLIENT_LOCK | + RS_FLAGS_ALLOC_GSP_PLUGIN_FOR_VGPU_GSP, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_EVENT_OS_EVENT, + /* Internal Class */ Event, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_ANY_PARENT, + /* Alloc Param Info */ RS_REQUIRED(NV0005_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_NON_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK | + RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC | RS_FLAGS_DUAL_CLIENT_LOCK | + RS_FLAGS_ALLOC_GSP_PLUGIN_FOR_VGPU_GSP, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_EVENT_KERNEL_CALLBACK, + /* Internal Class */ Event, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_ANY_PARENT, + /* Alloc Param Info */ RS_REQUIRED(NV0005_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_NON_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK | + RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC | RS_FLAGS_DUAL_CLIENT_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_EVENT_KERNEL_CALLBACK_EX, + /* Internal Class */ Event, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_ANY_PARENT, + /* Alloc Param Info */ RS_REQUIRED(NV0005_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_NON_PRIVILEGED | RS_FLAGS_ACQUIRE_GPUS_LOCK | + RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC | RS_FLAGS_DUAL_CLIENT_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) + +RS_ENTRY( + /* External Class */ LOCK_STRESS_OBJECT, + /* Internal Class */ LockStressObject, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_NON_PRIVILEGED, + /* Required Access Rights */ RS_ACCESS_NONE +) + +RS_ENTRY( + /* External Class */ LOCK_TEST_RELAXED_DUP_OBJECT, + /* Internal Class */ LockTestRelaxedDupObject, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Subdevice), classId(Device)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_NON_PRIVILEGED | RS_FLAGS_ACQUIRE_RELAXED_GPUS_LOCK_ON_DUP | + RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC | RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_ALLOC | + RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_FREE, + /* Required Access Rights */ RS_ACCESS_NONE +) + +// Undefine the entry macro to simplify call sites +#undef RS_ENTRY diff --git a/src/nvidia/src/kernel/rmapi/resource_list_required_includes.h b/src/nvidia/src/kernel/rmapi/resource_list_required_includes.h new file mode 100644 index 0000000..7f07b24 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/resource_list_required_includes.h @@ -0,0 +1,48 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef RESOURCE_LIST_REQUIRED_INCLUDES_H +#define RESOURCE_LIST_REQUIRED_INCLUDES_H 1 + +// +// This file must be included wherever resource_list.h is included. It provides +// declarations for types that resource_list.h may depend on while honoring +// RMCFG at the same time. +// +// We cannot include the required files right above the RS_ENTRY definitions in +// resource_list.h because resource_list.h may be included in places that don't +// allow some declarations (example typedefs in an enum). +// + + + +// +// CORERM-3604 +// A lot of declarations are in nvos.h +// These can be assumed to not require RMCFG. These should be eventually split +// into individual files and nvos.h should be deprecated. +// +#include "nvos.h" + + +#endif /* ifndef RESOURCE_LIST_REQUIRED_INCLUDES_H */ diff --git a/src/nvidia/src/kernel/rmapi/rmapi.c b/src/nvidia/src/kernel/rmapi/rmapi.c new file mode 100644 index 0000000..ac5baaf --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/rmapi.c @@ -0,0 +1,1045 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "nvrm_registry.h" +#include "rmapi/rmapi.h" +#include "entry_points.h" +#include "resserv/rs_server.h" +#include "rmapi/rs_utils.h" +#include "gpu/gpu_resource.h" +#include "gpu/device/device.h" +#include "core/locks.h" +#include "gpu/gpu.h" +#include "diagnostics/tracer.h" +#include "tls/tls.h" +#include "core/thread_state.h" +#include "gpu_mgr/gpu_mgr.h" +#include "resource_desc.h" +#include "ctrl/ctrl0000/ctrl0000system.h" + +typedef struct +{ + PORT_RWLOCK * pLock; + NvU64 threadId; + NvU64 timestamp; + LOCK_TRACE_INFO traceInfo; + NvU64 tlsEntryId; + volatile NvU32 contentionCount; + NvU32 lowPriorityAging; + volatile NvU64 totalWaitTime; + volatile NvU64 totalRwHoldTime; + volatile NvU64 totalRoHoldTime; +} RMAPI_LOCK; + +RsServer g_resServ; +static RM_API g_RmApiList[RMAPI_TYPE_MAX]; +static NvBool g_bResServInit = NV_FALSE; +static RMAPI_LOCK g_RmApiLock; + +static NvU64 g_rtd3PmPathThreadId = ~0ULL; + +static void _rmapiInitInterface(RM_API *pRmApi, API_SECURITY_INFO *pDefaultSecurityInfo, NvBool bTlsInternal, + NvBool bApiLockInternal, NvBool bGpuLockInternal); +static NV_STATUS _rmapiLockAlloc(void); +static void _rmapiLockFree(void); + +// from rmapi_stubs.c +void rmapiInitStubInterface(RM_API *pRmApi); + +NV_STATUS +rmapiInitialize +( + void +) +{ + NV_STATUS status = NV_OK; + API_SECURITY_INFO secInfo = {0}; + + NV_ASSERT(!g_bResServInit); + + status = _rmapiLockAlloc(); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** Cannot allocate rmapi locks\n"); + goto failed; + } + + status = rmapiControlCacheInit(); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** Cannot initialize rmapi cache\n"); + goto failed_free_lock; + } + + RsResInfoInitialize(); + status = serverConstruct(&g_resServ, RS_PRIV_LEVEL_HOST, 0); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** Cannot initialize resource server\n"); + goto failed_free_cache; + } + + serverSetClientHandleBase(&g_resServ, RS_CLIENT_HANDLE_BASE); + + listInit(&g_clientListBehindGpusLock, g_resServ.pAllocator); + listInit(&g_userInfoList, g_resServ.pAllocator); + multimapInit(&g_osInfoList, g_resServ.pAllocator); + + secInfo.privLevel = RS_PRIV_LEVEL_KERNEL; + secInfo.paramLocation = PARAM_LOCATION_KERNEL; + + _rmapiInitInterface(&g_RmApiList[RMAPI_EXTERNAL], NULL, NV_FALSE /* bTlsInternal */, NV_FALSE /* bApiLockInternal */, NV_FALSE /* bGpuLockInternal */); + _rmapiInitInterface(&g_RmApiList[RMAPI_EXTERNAL_KERNEL], &secInfo, NV_FALSE /* bTlsInternal */, NV_FALSE /* bApiLockInternal */, NV_FALSE /* bGpuLockInternal */); + _rmapiInitInterface(&g_RmApiList[RMAPI_MODS_LOCK_BYPASS], &secInfo, NV_FALSE /* bTlsInternal */, NV_TRUE /* bApiLockInternal */, NV_TRUE /* bGpuLockInternal */); + _rmapiInitInterface(&g_RmApiList[RMAPI_API_LOCK_INTERNAL], &secInfo, NV_TRUE /* bTlsInternal */, NV_TRUE /* bApiLockInternal */, NV_FALSE /* bGpuLockInternal */); + _rmapiInitInterface(&g_RmApiList[RMAPI_GPU_LOCK_INTERNAL], &secInfo, NV_TRUE /* bTlsInternal */, NV_TRUE /* bApiLockInternal */, NV_TRUE /* bGpuLockInternal */); + + rmapiInitStubInterface(&g_RmApiList[RMAPI_STUBS]); + + g_bResServInit = NV_TRUE; + + return NV_OK; + +failed_free_cache: + rmapiControlCacheFree(); +failed_free_lock: + _rmapiLockFree(); +failed: + return status; +} + +void +rmapiShutdown +( + void +) +{ + if (!g_bResServInit) + return; + + serverFreeDomain(&g_resServ, 0); + serverDestruct(&g_resServ); + _rmapiLockFree(); + + rmapiControlCacheFree(); + + g_bResServInit = NV_FALSE; +} + +static void +_rmapiInitInterface +( + RM_API *pRmApi, + API_SECURITY_INFO *pDefaultSecInfo, + NvBool bTlsInternal, + NvBool bApiLockInternal, + NvBool bGpuLockInternal +) +{ + // + // Initialize to all stubs first, so any APIs not explicitly set here + // will return NV_ERR_NOT_SUPPORTED if called + // + rmapiInitStubInterface(pRmApi); + + // + // Init members + // + if (pDefaultSecInfo) + pRmApi->defaultSecInfo = *pDefaultSecInfo; + + pRmApi->bHasDefaultSecInfo = !!pDefaultSecInfo; + pRmApi->bTlsInternal = bTlsInternal; + pRmApi->bApiLockInternal = bApiLockInternal; + pRmApi->bRmSemaInternal = bApiLockInternal; + pRmApi->bGpuLockInternal = bGpuLockInternal; + pRmApi->pPrivateContext = NULL; + + // + // Init function pointers + // + pRmApi->Alloc = rmapiAlloc; + pRmApi->AllocWithHandle = rmapiAllocWithHandle; + pRmApi->AllocWithSecInfo = pRmApi->bTlsInternal ? rmapiAllocWithSecInfo : rmapiAllocWithSecInfoTls; + + pRmApi->DisableClients = rmapiDisableClients; + pRmApi->DisableClientsWithSecInfo = pRmApi->bTlsInternal ? rmapiDisableClientsWithSecInfo : rmapiDisableClientsWithSecInfoTls; + + pRmApi->Free = rmapiFree; + pRmApi->FreeWithSecInfo = pRmApi->bTlsInternal ? rmapiFreeWithSecInfo : rmapiFreeWithSecInfoTls; + + pRmApi->Control = rmapiControl; + pRmApi->ControlWithSecInfo = pRmApi->bTlsInternal ? rmapiControlWithSecInfo : rmapiControlWithSecInfoTls; + + pRmApi->DupObject = rmapiDupObject; + pRmApi->DupObjectWithSecInfo = pRmApi->bTlsInternal ? rmapiDupObjectWithSecInfo : rmapiDupObjectWithSecInfoTls; + + pRmApi->Share = rmapiShare; + pRmApi->ShareWithSecInfo = pRmApi->bTlsInternal ? rmapiShareWithSecInfo : rmapiShareWithSecInfoTls; + + pRmApi->MapToCpu = rmapiMapToCpu; + pRmApi->MapToCpuWithSecInfo = pRmApi->bTlsInternal ? rmapiMapToCpuWithSecInfo : rmapiMapToCpuWithSecInfoTls; + pRmApi->MapToCpuWithSecInfoV2 = pRmApi->bTlsInternal ? rmapiMapToCpuWithSecInfoV2 : rmapiMapToCpuWithSecInfoTlsV2; + + pRmApi->UnmapFromCpu = rmapiUnmapFromCpu; + pRmApi->UnmapFromCpuWithSecInfo = pRmApi->bTlsInternal ? rmapiUnmapFromCpuWithSecInfo : rmapiUnmapFromCpuWithSecInfoTls; + + pRmApi->Map = rmapiMap; + pRmApi->MapWithSecInfo = pRmApi->bTlsInternal ? rmapiMapWithSecInfo : rmapiMapWithSecInfoTls; + + pRmApi->Unmap = rmapiUnmap; + pRmApi->UnmapWithSecInfo = pRmApi->bTlsInternal ? rmapiUnmapWithSecInfo : rmapiUnmapWithSecInfoTls; +} + +RM_API * +rmapiGetInterface +( + RMAPI_TYPE rmapiType +) +{ + return &g_RmApiList[rmapiType]; +} + +static void +_rmapiUnrefGpuAccessNeeded +( + NvU32 gpuMask +) +{ + NvU32 gpuInstance = 0; + OBJGPU *pGpu = NULL; + + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + osUnrefGpuAccessNeeded(pGpu->pOsGpuInfo); + } +} + +static NV_STATUS +_rmapiRefGpuAccessNeeded +( + NvU32 *pGpuMask +) +{ + NV_STATUS status = NV_OK; + NvU32 mask = 0; + NvU32 gpuInstance = 0; + OBJGPU *pGpu = NULL; + + status = gpumgrGetGpuAttachInfo(NULL, &mask); + if (status != NV_OK) + { + return status; + } + + while ((pGpu = gpumgrGetNextGpu(mask, &gpuInstance)) != NULL) + { + status = osRefGpuAccessNeeded(pGpu->pOsGpuInfo); + if (status != NV_OK) + { + goto unref; + } + + /* + *_rmapiRefGpuAccessNeeded records the gpuMask + * during ref up and this is used to unref exact same + * GPUs in _rmapiUnrefGpuAccessNeeded. This is done + * to protect against obtaining incorrect pGpu if the mask + * changes due to a RM_API called between ref/unref + * sequence. + */ + *pGpuMask |= (1 << pGpu->gpuInstance); + } + +unref: + if (status != NV_OK) + { + _rmapiUnrefGpuAccessNeeded(*pGpuMask); + } + return status; +} + +NV_STATUS +rmapiPrologue +( + RM_API *pRmApi, + RM_API_CONTEXT *pContext +) +{ + NV_STATUS status = NV_OK; + NvBool bApiLockTaken = NV_FALSE; + NvU32 mask; + + NV_ASSERT_OR_RETURN(pRmApi != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pContext != NULL, NV_ERR_INVALID_ARGUMENT); + + /* + * Check for external clients. This condition is checked here + * in order to avoid a check at all caller sites of + * rmapiPrologue. Effectively rmapiprologue is a no-op for + * internal clients. + */ + if (!pRmApi->bTlsInternal) + { + mask = osGetDynamicPowerSupportMask(); + if (!mask) + return status; + /* + * NOTE1: Callers of rmapiPro{Epi}logue function call may call + * it with or without API lock taken. Hence, we check here + * whether API lock has been taken. We take API lock if + * it not taken already. + * We obtain the pGPU by using the gpuMask in + * _rmapiRef{Unref}GpuAccessNeeded. This needs API lock to be + * safe against init/teardown of GPUs while we ref/unref + * the GPUs. We release the lock after we have finished + * with ref/unref, if we had taken it. + */ + if (!rmapiLockIsOwner()) + { + status = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_CLIENT); + if (status != NV_OK) + { + return status; + } + bApiLockTaken = NV_TRUE; + } + status = _rmapiRefGpuAccessNeeded(&pContext->gpuMask); + if (bApiLockTaken == NV_TRUE) + { + rmapiLockRelease(); + } + } + return status; +} + +void +rmapiEpilogue +( + RM_API *pRmApi, + RM_API_CONTEXT *pContext +) +{ + NV_STATUS status = NV_OK; + NvBool bApiLockTaken = NV_FALSE; + NvU32 mask; + + NV_ASSERT_OR_RETURN_VOID(pRmApi != NULL); + NV_ASSERT_OR_RETURN_VOID(pContext != NULL); + + /* + * Check for external clients. This condition is checked here + * in order to avoid a check at all caller sites of + * rmapiEpilogue. Effectively rmapiEpilogue is a no-op for + * internal clients. + */ + if (!pRmApi->bTlsInternal) + { + mask = osGetDynamicPowerSupportMask(); + if (!mask) + return; + + /* Please see NOTE1 */ + if (!rmapiLockIsOwner()) + { + status = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_CLIENT); + if (status != NV_OK) + { + return; + } + bApiLockTaken = NV_TRUE; + } + + _rmapiUnrefGpuAccessNeeded(pContext->gpuMask); + + if (bApiLockTaken == NV_TRUE) + { + rmapiLockRelease(); + } + } +} + +NV_STATUS +rmapiInitLockInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hSecondClient, + RS_LOCK_INFO *pLockInfo +) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_STATE); + pLockInfo->flags = 0; + pLockInfo->state = 0; + + if (hClient != 0) + { + if ((pCallContext != NULL) && (pCallContext->pLockInfo != NULL)) + { + pLockInfo->state = pCallContext->pLockInfo->state; + + if (!serverAllClientsLockIsOwner(&g_resServ)) + { + // If no clients are locked, then we need to acquire client locks + if (pCallContext->pLockInfo->pClient == NULL) + pLockInfo->state &= ~RM_LOCK_STATES_CLIENT_LOCK_ACQUIRED; + + // If we only need one client locked + else if (hSecondClient == NV01_NULL_OBJECT) + { + if (pCallContext->pLockInfo->pClient->hClient == hClient) + pLockInfo->pClient = pCallContext->pLockInfo->pClient; + else if ((pCallContext->pLockInfo->pSecondClient != NULL) && + (pCallContext->pLockInfo->pSecondClient->hClient == hClient)) + { + pLockInfo->pClient = pCallContext->pLockInfo->pSecondClient; + } + else + pLockInfo->state &= ~RM_LOCK_STATES_CLIENT_LOCK_ACQUIRED; + } + + // If we only have one client locked, but we need two + else if (pCallContext->pLockInfo->pSecondClient == NULL) + { + if ((pCallContext->pLockInfo->pClient->hClient == hClient) || + (pCallContext->pLockInfo->pClient->hClient == hSecondClient)) + { + pLockInfo->pClient = pCallContext->pLockInfo->pClient; + + // + // Special case: if both clients are the same - + // Set both pClient's so _serverLockDualClientWithLockInfo + // doesn't complain about the lock state being invalid. + // + if (hClient == hSecondClient) + pLockInfo->pSecondClient = pCallContext->pLockInfo->pClient; + } + else + pLockInfo->state &= ~RM_LOCK_STATES_CLIENT_LOCK_ACQUIRED; + } + + // If we need two clients locked, and already have two + else + { + // + // Check whether both clients match, keep the original order of the + // clients (dual client locking always locks the lower numbered client + // handle first). + // + if (((pCallContext->pLockInfo->pClient->hClient == hClient) && + (pCallContext->pLockInfo->pSecondClient->hClient == + hSecondClient)) || + ((pCallContext->pLockInfo->pClient->hClient == hSecondClient) && + (pCallContext->pLockInfo->pSecondClient->hClient == hClient))) + { + pLockInfo->pClient = pCallContext->pLockInfo->pClient; + pLockInfo->pSecondClient = pCallContext->pLockInfo->pSecondClient; + } + + // Check whether one client handle matches + else if ((pCallContext->pLockInfo->pClient->hClient == hClient) || + (pCallContext->pLockInfo->pClient->hClient == hSecondClient)) + { + pLockInfo->pClient = pCallContext->pLockInfo->pClient; + pLockInfo->state &= ~RM_LOCK_STATES_CLIENT_LOCK_ACQUIRED; + } + else if ((pCallContext->pLockInfo->pSecondClient->hClient == + hClient) || + (pCallContext->pLockInfo->pSecondClient->hClient == + hSecondClient)) + { + pLockInfo->pClient = pCallContext->pLockInfo->pSecondClient; + pLockInfo->state &= ~RM_LOCK_STATES_CLIENT_LOCK_ACQUIRED; + } + else + pLockInfo->state &= ~RM_LOCK_STATES_CLIENT_LOCK_ACQUIRED; + } + } + } + } + + if (!pRmApi->bRmSemaInternal) + pLockInfo->flags |= RM_LOCK_FLAGS_RM_SEMA; + + if (pRmApi->bApiLockInternal) + { + pLockInfo->state |= RM_LOCK_STATES_API_LOCK_ACQUIRED; + + // + // Don't acquire client locks if we already hold the API lock since we might've + // already acquired RM locks that are ordered after client locks (such as higher numbered + // client/GPU locks) and don't want to violate RM lock ordering. + // + if (rmapiLockIsOwner()) + { + pLockInfo->flags |= RM_LOCK_FLAGS_NO_CLIENT_LOCK; + } + } + + if (pRmApi->bGpuLockInternal) + pLockInfo->state |= RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS; + + return NV_OK; +} + +static NV_STATUS +_rmapiLockAlloc(void) +{ + // Turn on by default for Linux to get some soak time + // bug 2539044, bug 2536036: Enable by default. + g_resServ.bUnlockedParamCopy = NV_TRUE; + + NvU32 val = 0; + + if ((osReadRegistryDword(NULL, + NV_REG_STR_RM_LOCKING_LOW_PRIORITY_AGING, + &val) == NV_OK)) + { + g_RmApiLock.lowPriorityAging = val; + } + + if ((osReadRegistryDword(NULL, + NV_REG_STR_RM_PARAM_COPY_NO_LOCK, + &val) == NV_OK)) + { + g_resServ.bUnlockedParamCopy = (val != 0); + } + + portMemSet(&g_RmApiLock, 0, sizeof(g_RmApiLock)); + g_RmApiLock.threadId = ~((NvU64)(0)); + g_RmApiLock.pLock = portSyncRwLockCreate(portMemAllocatorGetGlobalNonPaged()); + if (g_RmApiLock.pLock == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + g_RmApiLock.tlsEntryId = tlsEntryAlloc(); + + return NV_OK; +} + +static void +_rmapiLockFree(void) +{ + portSyncRwLockDestroy(g_RmApiLock.pLock); +} + +NV_STATUS +rmapiLockAcquire(NvU32 flags, NvU32 module) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NV_STATUS rmStatus = NV_OK; + NvU64 threadId = portThreadGetCurrentThreadId(); + + NvU64 myPriority = 0; + NvU64 startWaitTime = 0; + + // Make sure lock has been created + NV_CHECK_OR_RETURN(LEVEL_ERROR, g_RmApiLock.pLock != NULL, NV_ERR_NOT_READY); + + NV_ASSERT_OR_RETURN(!rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + // Ensure that GPU locks are NEVER acquired before the API lock + NV_ASSERT_OR_RETURN(rmGpuLocksGetOwnedMask() == 0, NV_ERR_INVALID_LOCK_STATE); + + // + // If a read-only lock was requested, check to see if the module is allowed + // to take read-only locks or the _FORCE flag was enabled and set. + // + if ((flags & RMAPI_LOCK_FLAGS_READ) && (module != RM_LOCK_MODULES_NONE)) + { + if ((((flags & RMAPI_LOCK_FLAGS_READ_FORCE) == 0) || + !pSys->getProperty(pSys, PDB_PROP_SYS_ENABLE_FORCE_SHARED_LOCK)) && + ((pSys->apiLockModuleMask & RM_LOCK_MODULE_GRP(module)) == 0)) + { + flags &= ~RMAPI_LOCK_FLAGS_READ; + } + } + + // Get start wait time measuring lock wait times + if (pSys->getProperty(pSys, PDB_PROP_SYS_RM_LOCK_TIME_COLLECT)) + startWaitTime = osGetMonotonicTimeNs(); + + // + // For conditional acquires and DISPATCH_LEVEL we want to exit + // immediately without waiting. + // + // If RM Locking V3 Lite is not enabled, *always* acquire the API + // lock in WRITE mode to ensure compatibility with Locking model V2 + // behavior (providing exclusive access to the resource). + // + flags = osApiLockAcquireConfigureFlags(flags); + if (flags & API_LOCK_FLAGS_COND_ACQUIRE) + { + if ((flags & RMAPI_LOCK_FLAGS_READ)) + { + if (!portSyncRwLockAcquireReadConditional(g_RmApiLock.pLock)) + rmStatus = NV_ERR_TIMEOUT_RETRY; + } + else + { + // Conditional acquires don't care about contention or priority + if (portSyncRwLockAcquireWriteConditional(g_RmApiLock.pLock)) + { + g_RmApiLock.threadId = threadId; + } + else + { + rmStatus = NV_ERR_TIMEOUT_RETRY; + } + } + } + else + { + if ((flags & RMAPI_LOCK_FLAGS_READ)) + { + portSyncRwLockAcquireRead(g_RmApiLock.pLock); + } + else + { + + if (flags & RMAPI_LOCK_FLAGS_LOW_PRIORITY) + { + NvS32 age = g_RmApiLock.lowPriorityAging; + + portSyncRwLockAcquireWrite(g_RmApiLock.pLock); + while ((g_RmApiLock.contentionCount > 0) && (age--)) + { + portSyncRwLockReleaseWrite(g_RmApiLock.pLock); + osDelay(10); + portSyncRwLockAcquireWrite(g_RmApiLock.pLock); + } + } + else + { + portAtomicIncrementU32(&g_RmApiLock.contentionCount); + portSyncRwLockAcquireWrite(g_RmApiLock.pLock); + portAtomicDecrementU32(&g_RmApiLock.contentionCount); + } + g_RmApiLock.threadId = threadId; + } + } + + + if (rmStatus == NV_OK) + { + NvU64 timestamp; + timestamp = osGetMonotonicTimeNs(); + + // Update total API lock wait time if measuring lock times + if (pSys->getProperty(pSys, PDB_PROP_SYS_RM_LOCK_TIME_COLLECT)) + portAtomicExAddU64(&g_RmApiLock.totalWaitTime, timestamp - startWaitTime); + + if (g_RmApiLock.threadId == threadId) + g_RmApiLock.timestamp = timestamp; + + // save off owning thread + RMTRACE_RMLOCK(_API_LOCK_ACQUIRE); + + // add api lock trace record + INSERT_LOCK_TRACE(&g_RmApiLock.traceInfo, + NV_RETURN_ADDRESS(), + lockTraceAcquire, + flags, module, + threadId, + !portSyncExSafeToSleep(), + myPriority, + timestamp); + + // + // If enabled, reset the timeout now that we are running and off + // the Sleep Queue. + // + if (threadStateGetSetupFlags() & + THREAD_STATE_SETUP_FLAGS_DO_NOT_INCLUDE_SLEEP_TIME_ENABLED) + { + threadStateResetTimeout(NULL); + } + } + + NvP64 *pStartTime = tlsEntryAcquire(g_RmApiLock.tlsEntryId); + if (pStartTime != NULL) + { + // + // Store start time to track lock hold time. This is done + // regardless of the value of PDB_PROP_SYS_RM_LOCK_TIME_COLLECT since + // the API lock can be acquired before PDB properties are initialized + // and released after they are which could lead to uninitialized memory + // being present in TLS. + // + *(NvU64*)pStartTime = osGetMonotonicTimeNs(); + } + + return rmStatus; +} + +void +rmapiLockRelease(void) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU64 threadId = portThreadGetCurrentThreadId(); + NvU64 timestamp; + NvU64 startTime = 0; + + // Fetch start of hold time from TLS if measuring lock times + if (pSys->getProperty(pSys, PDB_PROP_SYS_RM_LOCK_TIME_COLLECT)) + startTime = (NvU64) tlsEntryGet(g_RmApiLock.tlsEntryId); + + timestamp = osGetMonotonicTimeNs(); + + RMTRACE_RMLOCK(_API_LOCK_RELEASE); + + // add api lock trace record + INSERT_LOCK_TRACE(&g_RmApiLock.traceInfo, + NV_RETURN_ADDRESS(), + lockTraceRelease, + 0, 0, + threadId, + !portSyncExSafeToSleep(), + 0, + timestamp); + + if (g_RmApiLock.threadId == threadId) + { + // + // If the threadId in the global is same as current thread id, then + // we know that it was acquired in WRITE mode. + // + g_RmApiLock.threadId = ~0ull; + g_RmApiLock.timestamp = timestamp; + + // Update total RW API lock hold time if measuring lock times + if (pSys->getProperty(pSys, PDB_PROP_SYS_RM_LOCK_TIME_COLLECT)) + portAtomicExAddU64(&g_RmApiLock.totalRwHoldTime, timestamp - startTime); + + portSyncRwLockReleaseWrite(g_RmApiLock.pLock); + + } + else + { + // Update total RO API lock hold time if measuring lock times + if (pSys->getProperty(pSys, PDB_PROP_SYS_RM_LOCK_TIME_COLLECT)) + portAtomicExAddU64(&g_RmApiLock.totalRoHoldTime, timestamp - startTime); + + portSyncRwLockReleaseRead(g_RmApiLock.pLock); + } + + tlsEntryRelease(g_RmApiLock.tlsEntryId); +} + +NvBool +rmapiLockIsOwner(void) +{ + return tlsEntryGet(g_RmApiLock.tlsEntryId) != 0; +} + +NvBool +rmapiLockIsWriteOwner(void) +{ + NvU64 threadId = portThreadGetCurrentThreadId(); + + return (rmapiLockIsOwner() && (threadId == g_RmApiLock.threadId)); +} + +// +// Retrieve total RM API lock wait and hold times +// +void +rmapiLockGetTimes(NV0000_CTRL_SYSTEM_GET_LOCK_TIMES_PARAMS *pParams) +{ + pParams->waitApiLock = g_RmApiLock.totalWaitTime; + pParams->holdRoApiLock = g_RmApiLock.totalRoHoldTime; + pParams->holdRwApiLock = g_RmApiLock.totalRwHoldTime; +} + +// +// Indicates current thread is in the RTD3 PM path (rm_transition_dynamic_power) which +// means that certain locking asserts/checks must be skipped due to inability to acquire +// the API lock in this path. +// +void rmapiEnterRtd3PmPath(void) +{ + // RTD3 path cannot be entered without the GPU lock + NV_ASSERT(rmGpuLockIsOwner()); + + NV_ASSERT(g_rtd3PmPathThreadId == ~0ULL); + g_rtd3PmPathThreadId = portThreadGetCurrentThreadId(); +} + +// +// Signifies that current thread is leaving the RTD3 PM path, restoring lock +// asserting/checking behavior to normal. +// +void rmapiLeaveRtd3PmPath(void) +{ + NV_ASSERT(rmapiInRtd3PmPath()); + g_rtd3PmPathThreadId = ~0ULL; +} + +// +// Checks if current thread is currently running in the RTD3 PM path. +// +NvBool rmapiInRtd3PmPath(void) +{ + return (g_rtd3PmPathThreadId == portThreadGetCurrentThreadId()); +} + +// +// Mark for deletion the client resources from the data base, given a GPU mask +// +void +rmapiSetDelPendingClientResourcesFromGpuMask +( + NvU32 gpuMask +) +{ + RS_ITERATOR it; + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + Device *pDevice; + NvBool bDevicesInMask = NV_FALSE; + OBJGPU *pGpu; + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + + // Check that one of the devices is in the GPU mask + bDevicesInMask = NV_FALSE; + while (clientRefIterNext(it.pClient, &it)) + { + pDevice = dynamicCast(it.pResourceRef->pResource, Device); + + if (!pDevice) + { + continue; + } + + pGpu = GPU_RES_GET_GPU(pDevice); + if ((gpuMask & NVBIT(gpuGetInstance(pGpu))) != 0) + { + bDevicesInMask = NV_TRUE; + break; + } + } + + if (bDevicesInMask == NV_FALSE) + { + continue; + } + + pClient->Flags |= RMAPI_CLIENT_FLAG_DELETE_PENDING; + } +} + +void +rmapiDelPendingDevices +( + NvU32 gpuMask +) +{ + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + RS_ITERATOR it; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + ppClient = serverutilGetFirstClientUnderLock(); + while (ppClient) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + if (((pClient->Flags & RMAPI_CLIENT_FLAG_DELETE_PENDING) != 0) && + ((pClient->Flags & RMAPI_CLIENT_FLAG_RM_INTERNAL_CLIENT) == 0)) + { + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + while(clientRefIterNext(pRsClient, &it)) + { + RsResourceRef *pDeviceRef = it.pResourceRef; + Device *pDevice = dynamicCast(pDeviceRef->pResource, Device); + + if ((gpuMask & NVBIT(gpuGetInstance(GPU_RES_GET_GPU(pDevice)))) != 0) + { + pRmApi->Free(pRmApi, pRsClient->hClient, pDeviceRef->hResource); + + // Client's resource map has been modified, re-snap iterator + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + } + } + + } + + ppClient = serverutilGetNextClientUnderLock(ppClient); + } +} + +void +rmapiReportLeakedDevices +( + NvU32 gpuMask +) +{ + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + RS_ITERATOR it; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + ppClient = serverutilGetFirstClientUnderLock(); + while (ppClient) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + while(clientRefIterNext(pRsClient, &it)) + { + RsResourceRef *pDeviceRef = it.pResourceRef; + Device *pDevice = dynamicCast(pDeviceRef->pResource, Device); + + if ((gpuMask & NVBIT(gpuGetInstance(GPU_RES_GET_GPU(pDevice)))) != 0) + { + NV_PRINTF(LEVEL_ERROR, + "Device object leak: (0x%x, 0x%x). Please file a bug against RM-core.\n", + pRsClient->hClient, pDeviceRef->hResource); + NV_ASSERT(0); + + // Delete leaked resource from database + pRmApi->Free(pRmApi, pRsClient->hClient, pDeviceRef->hResource); + + // Client's resource map has been modified, re-snap iterator + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + } + } + + ppClient = serverutilGetNextClientUnderLock(ppClient); + } +} + +// +// Delete the marked client resources +// +void +rmapiDelPendingClients +( + void +) +{ + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + RS_ITERATOR it; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + ppClient = serverutilGetFirstClientUnderLock(); + while (ppClient) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + ppClient = serverutilGetNextClientUnderLock(ppClient); + if ((pClient->Flags & RMAPI_CLIENT_FLAG_DELETE_PENDING) != 0) + { + // Only free clients that have no devices left + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + if (!clientRefIterNext(pRsClient, &it)) + pRmApi->Free(pRmApi, pRsClient->hClient, pRsClient->hClient); + } + } +} + +extern OsInfoMap g_osInfoList; + +NV_STATUS +rmapiGetClientHandlesFromOSInfo +( + void *pOSInfo, + NvHandle **ppClientHandleList, + NvU32 *pClientHandleListSize +) +{ + NvHandle *pClientHandleList; + NvU32 clientHandleListSize = 0; + NvU32 k; + + RmClient *pClient; + RsClient *pRsClient; + + OsInfoMapSubmap *pSubmap = NULL; + OsInfoMapIter it; + NvU64 key1 = (NvUPtr)pOSInfo; + + *pClientHandleListSize = 0; + *ppClientHandleList = NULL; + + pSubmap = multimapFindSubmap(&g_osInfoList, key1); + + if (pSubmap == NULL) + return NV_WARN_NOTHING_TO_DO; + + clientHandleListSize = multimapCountSubmapItems(&g_osInfoList, pSubmap); + NV_PRINTF(LEVEL_INFO, "*** Found %d clients for %llx\n", clientHandleListSize, key1); + + if (clientHandleListSize == 0) + { + NV_ASSERT_FAILED("Empty client handle submap"); + return NV_ERR_INVALID_STATE; + } + + pClientHandleList = portMemAllocNonPaged(clientHandleListSize * sizeof(NvU32)); + + if (pClientHandleList == NULL) + { + return NV_ERR_NO_MEMORY; + } + + *pClientHandleListSize = clientHandleListSize; + *ppClientHandleList = pClientHandleList; + + k = 0; + it = multimapSubmapIterItems(&g_osInfoList, pSubmap); + while(multimapItemIterNext(&it)) + { + NV_ASSERT_OR_ELSE(clientHandleListSize > k, break); + + pClient = *it.pValue; + pRsClient = staticCast(pClient, RsClient); + + NV_CHECK_OR_ELSE_STR(LEVEL_ERROR, pClient->pOSInfo == pOSInfo, "*** OS info mismatch", continue); + + pClientHandleList[k++] = pRsClient->hClient; + NV_PRINTF(LEVEL_INFO, "*** Found: %x\n", pRsClient->hClient); + } + + return NV_OK; +} + diff --git a/src/nvidia/src/kernel/rmapi/rmapi_cache.c b/src/nvidia/src/kernel/rmapi/rmapi_cache.c new file mode 100644 index 0000000..29cb7c3 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/rmapi_cache.c @@ -0,0 +1,1784 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "containers/map.h" +#include "containers/multimap.h" +#include "nvctassert.h" +#include "nvmisc.h" +#include "nvport/sync.h" +#include "nvrm_registry.h" +#include "nvsecurityinfo.h" +#include "os/os.h" +#include "rmapi/control.h" +#include "rmapi/rmapi.h" +#include "rmapi/rmapi_cache_handlers.h" +#include "rmapi/rmapi_utils.h" +#include "ctrl/ctrl0000/ctrl0000system.h" +#include "ctrl/ctrl2080/ctrl2080gpu.h" +#include "ctrl/ctrl2080/ctrl2080fifo.h" +#include "ctrl/ctrl2080/ctrl2080bus.h" +#include "ctrl/ctrl2080/ctrl2080bios.h" +#include "ctrl/ctrl2080/ctrl2080ce.h" +#include "gpu/gpu.h" + +typedef struct +{ + void* params; + size_t paramSize; + NvU32 rmctrlFlags; +} RmapiControlCacheEntry; + +#define CACHE_GPU_FLAGS_SHIFT 32 + +// +// Stores the mapping of client object to the corresponding GPU attributes +// The key is generated by _handlesToGpuAttrKey with client and object handle +// +// The value is the combination of attribute of the GPU the object is linked to. +// The low 32 bits are used to store the GPU instance. +// THe high 32 bits are used for the above CACHE_GPU_FLAG* flags +// +MAKE_MAP(ObjectToGpuAttrMap, NvU64); + +// +// Stores the cached control value. +// Each submap in the multimap stores the cached control value for one GPU. +// The key to find a submap is GPU Instance stored in ObjectToGpuAttrMap +// +// The key inside the submap is the control command +// The value inside the submap is the cached control value for the command +// +MAKE_MULTIMAP(GpusControlCache, RmapiControlCacheEntry); + +ct_assert(sizeof(NvHandle) <= 4); + +#define CLIENT_KEY_SHIFT (sizeof(NvHandle) * 8) + +static inline NvBool _isCmdSystemWide(NvU32 cmd) +{ + return DRF_VAL(XXXX, _CTRL_CMD, _CLASS, cmd) == NV01_ROOT; +} + +static inline NvHandle _gpuAttrKeyToClient(NvU64 key) +{ + return (key >> CLIENT_KEY_SHIFT); +} + +static inline NvU64 _handlesToGpuAttrKey(NvHandle hClient, NvHandle hObject) +{ + return ((NvU64)hClient << CLIENT_KEY_SHIFT) | hObject; +} + +static inline NvU32 _getGpuInstFromGpuAttr(NvU64 gpuAttr) +{ + return (NvU32)(gpuAttr & (NVBIT64(CACHE_GPU_FLAGS_SHIFT) - 1)); +} + +static inline NvU32 _getCacheGpuFlagsFromGpuAttr(NvU64 gpuAttr) +{ + return (NvU32)(gpuAttr >> CACHE_GPU_FLAGS_SHIFT); +} + +static NvU64 _getGpuAttrFromGpu(OBJGPU *pGpu) +{ + NvU64 gpuAttr = 0; + NvU32 cacheGpuFlags = 0; + + gpuAttr |= pGpu->gpuInstance; + gpuAttr |= ((NvU64)cacheGpuFlags) << CACHE_GPU_FLAGS_SHIFT; + + return gpuAttr; +} + +static struct { + GpusControlCache gpusControlCache; + ObjectToGpuAttrMap objectToGpuAttrMap; + NvU32 mode; + PORT_RWLOCK *pLock; +} RmapiControlCache; + +enum CACHE_LOCK_TYPE +{ + LOCK_EXCLUSIVE, + LOCK_SHARED +}; + +static void _cacheLockAcquire(enum CACHE_LOCK_TYPE lockType) +{ + if (lockType == LOCK_EXCLUSIVE) + portSyncRwLockAcquireWrite(RmapiControlCache.pLock); + else + portSyncRwLockAcquireRead(RmapiControlCache.pLock); +} + +static void _cacheLockRelease(enum CACHE_LOCK_TYPE lockType) +{ + if (lockType == LOCK_EXCLUSIVE) + portSyncRwLockReleaseWrite(RmapiControlCache.pLock); + else + portSyncRwLockReleaseRead(RmapiControlCache.pLock); +} + +static inline +NvBool _cacheIsDisabled(void) +{ + return (rmapiControlCacheGetMode() == + NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_MODE_DISABLE); +} + +static RmapiControlCacheEntry* _setCacheEntry(NvU64 key1, NvU64 key2, NvU32 allocSize, + NvU32 rmctrlFlags, NvBool *pbParamsAllocated); +static RmapiControlCacheEntry* _getCacheEntry(NvU64 key1, NvU64 key2); + +NvBool rmapiControlIsCacheable(NvU32 flags, NvU32 accessRight, NvBool bAllowInternal) +{ + if (_cacheIsDisabled()) + return NV_FALSE; + + if (!(flags & RMCTRL_FLAGS_CACHEABLE_ANY)) + return NV_FALSE; + + // + // RMCTRL with access right requires access right check. + // We need resource ref and client object to check the access right, both not + // available here. Do not cache RMCTRLs with access right. + // + if (accessRight != 0) + return NV_FALSE; + + // Allow internal RMCTRL of all privilege with bAllowInternal flag + if (flags & RMCTRL_FLAGS_INTERNAL) + return bAllowInternal; + + return NV_TRUE; +} + +NvBool rmapiCmdIsCacheable(NvU32 cmd, NvBool bAllowInternal) +{ + NvU32 flags; + NvU32 accessRight; + + if (rmapiutilGetControlInfo(cmd, &flags, &accessRight, NULL) != NV_OK) + return NV_FALSE; + + return rmapiControlIsCacheable(flags, accessRight, bAllowInternal); +} + +NV_STATUS rmapiControlCacheInit(void) +{ +#if defined(DEBUG) + // Beware that verification only mode will not work during GCOFF. + RmapiControlCache.mode = NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_MODE_VERIFY_ONLY; +#else + RmapiControlCache.mode = NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_MODE_ENABLE; +#endif + + NvU32 mode; + + if (osReadRegistryDword(NULL, NV_REG_STR_RM_CACHEABLE_CONTROLS, &mode) == NV_OK) + { + RmapiControlCache.mode = mode; + } + NV_PRINTF(LEVEL_INFO, "using cache mode %d\n", RmapiControlCache.mode); + + multimapInit(&RmapiControlCache.gpusControlCache, portMemAllocatorGetGlobalNonPaged()); + mapInit(&RmapiControlCache.objectToGpuAttrMap, portMemAllocatorGetGlobalNonPaged()); + RmapiControlCache.pLock = portSyncRwLockCreate(portMemAllocatorGetGlobalNonPaged()); + if (RmapiControlCache.pLock == NULL) + { + NV_PRINTF(LEVEL_ERROR, "failed to create rw lock\n"); + multimapDestroy(&RmapiControlCache.gpusControlCache); + mapDestroy(&RmapiControlCache.objectToGpuAttrMap); + return NV_ERR_NO_MEMORY; + } + return NV_OK; +} + +NV_STATUS rmapiControlCacheSetGpuAttrForObject +( + NvHandle hClient, + NvHandle hObject, + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + NvU64 *entry; + NvU64 gpuAttr; + + if (pGpu == NULL) + return NV_ERR_INVALID_ARGUMENT; + + gpuAttr = _getGpuAttrFromGpu(pGpu); + + NV_PRINTF(LEVEL_INFO, "gpu attr set for 0x%x 0x%x: 0x%llx\n", hClient, hObject, gpuAttr); + + _cacheLockAcquire(LOCK_EXCLUSIVE); + entry = mapFind(&RmapiControlCache.objectToGpuAttrMap, _handlesToGpuAttrKey(hClient, hObject)); + + if (entry != NULL) + { + // set happens in object allocation, should not exist in cache already + NV_PRINTF(LEVEL_WARNING, + "set existing gpu attr 0x%x 0x%x was 0x%llx is 0x%llx\n", + hClient, hObject, *entry, gpuAttr); + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + entry = mapInsertNew(&RmapiControlCache.objectToGpuAttrMap, _handlesToGpuAttrKey(hClient, hObject)); + + if (entry == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + *entry = gpuAttr; + +done: + _cacheLockRelease(LOCK_EXCLUSIVE); + return status; +} + +// Need to hold rmapi control cache read/write lock +static NV_STATUS _rmapiControlCacheGetGpuAttrForObject +( + NvHandle hClient, + NvHandle hObject, + NvU32 *pGpuInst, + NvU32 *pCacheGpuFlags +) +{ + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + NvU64* entry = mapFind(&RmapiControlCache.objectToGpuAttrMap, _handlesToGpuAttrKey(hClient, hObject)); + + NV_PRINTF(LEVEL_INFO, "cached gpu attr lookup for 0x%x 0x%x\n", hClient, hObject); + + if (entry != NULL) + { + NV_PRINTF(LEVEL_INFO, "cached gpu attr for 0x%x 0x%x: 0x%llx\n", hClient, hObject, *entry); + + if (pGpuInst != NULL) + *pGpuInst = _getGpuInstFromGpuAttr(*entry); + + if (pCacheGpuFlags != NULL) + *pCacheGpuFlags = _getCacheGpuFlagsFromGpuAttr(*entry); + + status = NV_OK; + } + + return status; +} + +// Need to hold rmapi control cache write lock +static void _rmapiControlCacheFreeGpuAttrForObject +( + NvHandle hClient, + NvHandle hObject +) +{ + const NvU64 key = _handlesToGpuAttrKey(hClient, hObject); + NvU64* entry = mapFind(&RmapiControlCache.objectToGpuAttrMap, key); + + if (entry != NULL) + { + mapRemove(&RmapiControlCache.objectToGpuAttrMap, entry); + NV_PRINTF(LEVEL_INFO, "Gpu Inst entry with key 0x%llx freed\n", key); + } +} + +// Need to hold rmapi control cache write lock +static void _rmapiControlCacheFreeGpuAttrForClient(NvHandle hClient) +{ + while (NV_TRUE) + { + NvU64* entry = mapFindGEQ(&RmapiControlCache.objectToGpuAttrMap, _handlesToGpuAttrKey(hClient, 0)); + NvU64 key; + + if (entry == NULL) + break; + + key = mapKey(&RmapiControlCache.objectToGpuAttrMap, entry); + + if (_gpuAttrKeyToClient(key) != hClient) + break; + + mapRemove(&RmapiControlCache.objectToGpuAttrMap, entry); + NV_PRINTF(LEVEL_INFO, "Gpu Inst entry with key 0x%llx freed\n", key); + } +} + +static NV_STATUS _rmapiControlCacheGetCacheable +( + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + void* params, + NvU32 paramsSize +) +{ + RmapiControlCacheEntry *entry; + NvU32 gpuInst; + NV_STATUS status = NV_OK; + + _cacheLockAcquire(LOCK_SHARED); + + if (_cacheIsDisabled()) + { + // unexpected mode change. + status = NV_ERR_INVALID_STATE; + goto done; + } + + if (_isCmdSystemWide(cmd)) + { + gpuInst = NV_MAX_DEVICES; + } + else + { + status = _rmapiControlCacheGetGpuAttrForObject(hClient, hObject, &gpuInst, NULL); + if (status != NV_OK) + goto done; + } + + entry = _getCacheEntry(gpuInst, cmd); + if (entry == NULL || entry->params == NULL) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + if (!(entry->rmctrlFlags & RMCTRL_FLAGS_CACHEABLE)) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + + if (entry->paramSize == 0) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + + if (entry->paramSize > paramsSize) + { + status = NV_ERR_BUFFER_TOO_SMALL; + goto done; + } + + portMemCopy(params, paramsSize, entry->params, entry->paramSize); +done: + _cacheLockRelease(LOCK_SHARED); + return status; +} + +static NV_STATUS _rmapiControlCacheGetByInput +( + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + void* params, + NvU32 paramsSize +); + +/*! + * Look up an entry keyed with (hClient, hObject, cmd) in RMCTRL cache. + * Handle it either as a CACHEABLE or CACHEABLE_BY_INPUT entry according to its stored type. + */ +static NV_STATUS _rmapiControlCacheGetAny +( + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + void* params, + NvU32 paramsSize, + API_SECURITY_INFO *pSecInfo +) +{ + RmapiControlCacheEntry *entry; + NvU32 gpuInst; + NV_STATUS status = NV_OK; + NvU32 rmctrlFlags = 0; + + _cacheLockAcquire(LOCK_SHARED); + + if (_cacheIsDisabled()) + { + // unexpected mode change. + status = NV_ERR_INVALID_STATE; + goto fail_release; + } + + if (_isCmdSystemWide(cmd)) + { + gpuInst = NV_MAX_DEVICES; + } + else + { + status = _rmapiControlCacheGetGpuAttrForObject(hClient, hObject, &gpuInst, NULL); + if (status != NV_OK) + { + goto fail_release; + } + } + + entry = _getCacheEntry(gpuInst, cmd); + if (entry == NULL || entry->params == NULL) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto fail_release; + } + + rmctrlFlags = entry->rmctrlFlags; + + status = rmControlValidateClientPrivilegeAccess(hClient, hObject, cmd, rmctrlFlags, pSecInfo); + if (status != NV_OK) + goto fail_release; + + _cacheLockRelease(LOCK_SHARED); + + // Re-acquire the lock when we do the actual lookup + switch ((rmctrlFlags & RMCTRL_FLAGS_CACHEABLE_ANY)) + { + case RMCTRL_FLAGS_CACHEABLE: + status = _rmapiControlCacheGetCacheable(hClient, hObject, cmd, params, paramsSize); + break; + case RMCTRL_FLAGS_CACHEABLE_BY_INPUT: + status = _rmapiControlCacheGetByInput(hClient, hObject, cmd, params, paramsSize); + break; + default: + NV_PRINTF(LEVEL_ERROR, "Invalid cacheable flag 0x%x for cmd 0x%x\n", rmctrlFlags, cmd); + status = NV_ERR_INVALID_PARAMETER; + break; + } + + return status; + +fail_release: + _cacheLockRelease(LOCK_SHARED); + return status; +} + +static NV_STATUS _rmapiControlCacheSet +( + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + NvU32 rmctrlFlags, + const void* params, + NvU32 paramsSize +) +{ + NV_STATUS status = NV_OK; + RmapiControlCacheEntry* entry = NULL; + NvU32 gpuInst; + NvBool bParamsAllocated; + + _cacheLockAcquire(LOCK_EXCLUSIVE); + + if (_cacheIsDisabled()) + { + // unexpected mode change. + status = NV_ERR_INVALID_STATE; + goto done; + } + + if (_isCmdSystemWide(cmd)) + { + gpuInst = NV_MAX_DEVICES; + } + else + { + status = _rmapiControlCacheGetGpuAttrForObject(hClient, hObject, &gpuInst, NULL); + if (status != NV_OK) + goto done; + } + + entry = _setCacheEntry(gpuInst, cmd, paramsSize, rmctrlFlags, &bParamsAllocated); + if (entry == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + // + // A succeeded getOrInit call without params allocated implies + // duplicated cache insertion that should be skipped. + // Duplicated cache set happens when + // 1. Parallel controls call into RM before first cache set. + // All threads will attempt cache set after the control calls. + // 2. Cache already set by RPC to GSP path + // 3. Cache in verify only mode + // + if (!bParamsAllocated) + { + if (RmapiControlCache.mode == NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_MODE_VERIFY_ONLY) + { + NV_ASSERT(portMemCmp(entry->params, params, paramsSize) == 0); + } + status = NV_OK; + goto done; + } + + portMemCopy(entry->params, paramsSize, params, paramsSize); + +done: + _cacheLockRelease(LOCK_EXCLUSIVE); + return status; +} + +// +// Create new cache entry without taking any locks. +// Requires cache lock to be held before calling. +// +// key1 [IN] +// First key for the multimap entry +// key2 [IN] +// Second key for the multimap entry +// bSet [IN] +// If the query is for cache set or cache get +// allocSize [IN] +// The size to allocate for the new cache entry. +// pbParamsAllocated [OUT] +// Indicate if we allocated new memory for cache entry +// A cache set without memory allocation implies there's an existng entry. +// +static RmapiControlCacheEntry* +_setCacheEntry +( + NvU64 key1, + NvU64 key2, + NvU32 allocSize, + NvU32 rmctrlFlags, + NvBool *pbParamsAllocated +) +{ + RmapiControlCacheEntry *entry = NULL; + GpusControlCacheSubmap *insertedSubmap = NULL; + + entry = multimapFindItem(&RmapiControlCache.gpusControlCache, key1, key2); + + // for cache set, try to init entry if not valid + if (entry == NULL) + { + if (multimapFindSubmap(&RmapiControlCache.gpusControlCache, key1) == NULL) + { + insertedSubmap = multimapInsertSubmap(&RmapiControlCache.gpusControlCache, key1); + if (insertedSubmap == NULL) + goto failed; + } + + entry = multimapInsertItemNew(&RmapiControlCache.gpusControlCache, key1, key2); + } + + if (entry == NULL) + goto failed_free_submap; + + if (entry->params == NULL) + { + entry->params = portMemAllocNonPaged(allocSize); + if (entry->params == NULL) + goto failed_free_entry; + + portMemSet(entry->params, 0, allocSize); + entry->paramSize = allocSize; + entry->rmctrlFlags = rmctrlFlags; + + if (pbParamsAllocated != NULL) + *pbParamsAllocated = NV_TRUE; + } + else if (pbParamsAllocated != NULL) + { + *pbParamsAllocated = NV_FALSE; + } + + return entry; + +failed_free_entry: + if (entry != NULL) + multimapRemoveItem(&RmapiControlCache.gpusControlCache, entry); +failed_free_submap: + if (insertedSubmap != NULL) + multimapRemoveSubmap(&RmapiControlCache.gpusControlCache, insertedSubmap); +failed: + return NULL; +} + + +/*! + * Look up an entry keyed with (key1, key2) in RMCTRL cache. + * Does not take any locks. + */ +static RmapiControlCacheEntry* +_getCacheEntry +( + NvU64 key1, + NvU64 key2 +) +{ + // for cache get, return map find result directly + return multimapFindItem(&RmapiControlCache.gpusControlCache, key1, key2); +} + +static NvBool _isGpuGetInfoIndexCacheable(NvU32 index, NvU32 cacheGpuFlags) +{ + switch (index) + { + case NV2080_CTRL_GPU_INFO_INDEX_MINOR_REVISION_EXT: + case NV2080_CTRL_GPU_INFO_INDEX_NETLIST_REV0: + case NV2080_CTRL_GPU_INFO_INDEX_NETLIST_REV1: + case NV2080_CTRL_GPU_INFO_INDEX_SYSMEM_ACCESS: + case NV2080_CTRL_GPU_INFO_INDEX_GEMINI_BOARD: + case NV2080_CTRL_GPU_INFO_INDEX_SURPRISE_REMOVAL_POSSIBLE: + case NV2080_CTRL_GPU_INFO_INDEX_GLOBAL_POISON_FUSE_ENABLED: + case NV2080_CTRL_GPU_INFO_INDEX_GPU_SR_SUPPORT: + case NV2080_CTRL_GPU_INFO_INDEX_SPLIT_VAS_MGMT_SERVER_CLIENT_RM: + case NV2080_CTRL_GPU_INFO_INDEX_GPU_SM_VERSION: + case NV2080_CTRL_GPU_INFO_INDEX_4K_PAGE_ISOLATION_REQUIRED: + case NV2080_CTRL_GPU_INFO_INDEX_DISPLAY_ENABLED: + case NV2080_CTRL_GPU_INFO_INDEX_MOBILE_CONFIG_ENABLED: + case NV2080_CTRL_GPU_INFO_INDEX_GPU_PROFILING_CAPABILITY: + case NV2080_CTRL_GPU_INFO_INDEX_GPU_DEBUGGING_CAPABILITY: + case NV2080_CTRL_GPU_INFO_INDEX_CMP_SKU: + case NV2080_CTRL_GPU_INFO_INDEX_DMABUF_CAPABILITY: + case NV2080_CTRL_GPU_INFO_INDEX_ECID_LO32: + case NV2080_CTRL_GPU_INFO_INDEX_ECID_HI32: + case NV2080_CTRL_GPU_INFO_INDEX_ECID_EXTENDED: + return NV_TRUE; + + default: + return NV_FALSE; + } +} + +static NvBool _isFifoGetInfoIndexCacheable(NvU32 index) +{ + switch (index) + { + case NV2080_CTRL_FIFO_INFO_INDEX_INSTANCE_TOTAL: + case NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNEL_GROUPS: + case NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNELS_PER_GROUP: + case NV2080_CTRL_FIFO_INFO_INDEX_MAX_SUBCONTEXT_PER_GROUP: + case NV2080_CTRL_FIFO_INFO_INDEX_BAR1_USERD_START_OFFSET: + case NV2080_CTRL_FIFO_INFO_INDEX_DEFAULT_CHANNEL_TIMESLICE: + return NV_TRUE; + default: + return NV_FALSE; + } +} + +static NvBool _isBusGetInfoIndexCacheable(NvU32 index) +{ + switch (index) + { + case NV2080_CTRL_BUS_INFO_INDEX_TYPE: + case NV2080_CTRL_BUS_INFO_INDEX_INTLINE: + case NV2080_CTRL_BUS_INFO_INDEX_CAPS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_DOWNSTREAM_LINK_CAPS: + case NV2080_CTRL_BUS_INFO_INDEX_COHERENT_DMA_FLAGS: + case NV2080_CTRL_BUS_INFO_INDEX_NONCOHERENT_DMA_FLAGS: + case NV2080_CTRL_BUS_INFO_INDEX_BUS_NUMBER: + case NV2080_CTRL_BUS_INFO_INDEX_DEVICE_NUMBER: + case NV2080_CTRL_BUS_INFO_INDEX_DOMAIN_NUMBER: + case NV2080_CTRL_BUS_INFO_INDEX_INTERFACE_TYPE: + case NV2080_CTRL_BUS_INFO_INDEX_GPU_INTERFACE_TYPE: + return NV_TRUE; + default: + return NV_FALSE; + } +} + +static NvBool _isVbiosGetInfoIndexCacheable(NvU32 index) +{ + switch (index) + { + case NV2080_CTRL_BIOS_INFO_INDEX_REVISION: + case NV2080_CTRL_BIOS_INFO_INDEX_OEM_REVISION: + return NV_TRUE; + default: + return NV_FALSE; + } +} + +static NvBool _isGetInfoIndexCacheable(NvU32 cmd, NvU32 index, NvU32 cacheGpuFlags) +{ + switch (cmd) + { + case NV2080_CTRL_CMD_GPU_GET_INFO_V2: + return _isGpuGetInfoIndexCacheable(index, cacheGpuFlags); + case NV2080_CTRL_CMD_FIFO_GET_INFO: + return _isFifoGetInfoIndexCacheable(index); + case NV2080_CTRL_CMD_BUS_GET_INFO_V2: + return _isBusGetInfoIndexCacheable(index); + case NV2080_CTRL_CMD_BIOS_GET_INFO_V2: + return _isVbiosGetInfoIndexCacheable(index); + } + + return NV_FALSE; +} + +void _rmapiControlCacheRemoveMapEntry +( + RmapiControlCacheEntry *pEntry +) +{ + multimapRemoveItem(&RmapiControlCache.gpusControlCache, pEntry); +} + +// +// For GET_INFO controls, we use an array of getInfoCacheEntry to store the +// cached value. +// +// The length of the array is the max list length of each control and is +// enough to store the cached value of all indexes. +// +// The Nth item in the array, array[N], represent the cache state of the info +// whose index value is N. If the info is cached, array[N].valid is NV_TRUE +// and the cached value is stored in array[N].data. +// array[N].valid is NV_FALSE if the info is not cached. +// +typedef struct GetInfoCacheEntry { + NvBool valid; + NvU32 data; +} GetInfoCacheEntry; + +static NV_STATUS _getInfoCacheHandler +( + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + NvU32 rmctrlFlags, + NVXXXX_CTRL_XXX_INFO *pInfo, + NvU32 listSize, + NvU32 listSizeLimit, + NvBool bSet +) +{ + NV_STATUS status = NV_OK; + NvU32 i = 0; + NvU32 gpuInst; + NvU32 cacheGpuFlags; + RmapiControlCacheEntry *entry = NULL; + GetInfoCacheEntry *cachedTable = NULL; + const NvU32 allocSize = sizeof(GetInfoCacheEntry) * listSizeLimit; + enum CACHE_LOCK_TYPE lockType = bSet ? LOCK_EXCLUSIVE : LOCK_SHARED; + + if (listSize <= 0 || listSize > listSizeLimit || pInfo == NULL) + { + return NV_ERR_INVALID_PARAMETER; + } + + _cacheLockAcquire(lockType); + + if (_cacheIsDisabled()) + { + // unexpected mode change. + status = NV_ERR_INVALID_STATE; + goto done; + } + + status = _rmapiControlCacheGetGpuAttrForObject(hClient, hObject, &gpuInst, &cacheGpuFlags); + if (status != NV_OK) + goto done; + + if (bSet) + entry = _setCacheEntry(gpuInst, cmd, allocSize, rmctrlFlags, NULL); + else + entry = _getCacheEntry(gpuInst, cmd); + + if (entry == NULL || entry->params == NULL) + { + status = bSet ? NV_ERR_NO_MEMORY : NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + if (!(entry->rmctrlFlags & RMCTRL_FLAGS_CACHEABLE_BY_INPUT)) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + + cachedTable = (GetInfoCacheEntry*)entry->params; + + for (i = 0; i < listSize; ++i) + { + const NvU32 index = pInfo[i].index; + + if (index >= listSizeLimit) + { + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + if (bSet) + { + if (_isGetInfoIndexCacheable(cmd, index, cacheGpuFlags)) + { + if (cachedTable[index].valid) + { + NV_ASSERT(cachedTable[index].data == pInfo[i].data); + } + else + { + cachedTable[index].valid = NV_TRUE; + cachedTable[index].data = pInfo[i].data; + } + } + } + else + { + // if any of the entry is not cacheable or not in the cache, skip the whole cmd + if (!_isGetInfoIndexCacheable(cmd, index, cacheGpuFlags) || !cachedTable[index].valid) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + } + } + + if (!bSet) + { + for (i = 0; i < listSize; ++i) + pInfo[i].data = cachedTable[pInfo[i].index].data; + } + +done: + + if (status != NV_OK && bSet) + { + if (entry != NULL) + { + portMemFree(entry->params); + _rmapiControlCacheRemoveMapEntry(entry); + } + } + + _cacheLockRelease(lockType); + + return status; +} + +NV_STATUS _rmapiControlCacheGetByInputTemplateMethod +( + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + NvU32 rmctrlFlags, + void *pParams, + NvU32 cacheEntrySize, + RmapiCacheGetByInputHandler cacheHandler, + NvBool bSet +) +{ + NvU32 gpuInst; + NV_STATUS status = NV_OK; + RmapiControlCacheEntry *entry = NULL; + enum CACHE_LOCK_TYPE lockType = (bSet) ? LOCK_EXCLUSIVE : LOCK_SHARED; + NvBool bCacheEntryAllocated; + + _cacheLockAcquire(lockType); + + if (_cacheIsDisabled()) + { + // Unexpected mode change. + status = NV_ERR_INVALID_STATE; + goto done; + } + + status = _rmapiControlCacheGetGpuAttrForObject(hClient, hObject, &gpuInst, NULL); + if (status != NV_OK) + goto done; + + if (bSet) + entry = _setCacheEntry(gpuInst, cmd, cacheEntrySize, rmctrlFlags, &bCacheEntryAllocated); + else + entry = _getCacheEntry(gpuInst, cmd); + + if (entry == NULL || entry->params == NULL) + { + status = (bSet) ? NV_ERR_NO_MEMORY : NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + status = cacheHandler(entry->params, pParams, bSet); +done: + if (status != NV_OK && bSet) + { + if (entry != NULL && bCacheEntryAllocated) + { + if (entry->params) + portMemFree(entry->params); + _rmapiControlCacheRemoveMapEntry(entry); + } + } + _cacheLockRelease(lockType); + return status; +} + +typedef struct GpuNameStringCacheEntry +{ + NvBool bAsciiValid; + NvBool bUnicodeValid; + NvU8 ascii[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + NvU16 unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH]; +} GpuNameStringCacheEntry; + +NV_STATUS _gpuNameStringGet +( + NvHandle hClient, + NvHandle hObject, + NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS *pParams +) +{ + NvU32 gpuInst; + NV_STATUS status = NV_OK; + RmapiControlCacheEntry *entry = NULL; + GpuNameStringCacheEntry *cachedParams = NULL; + + _cacheLockAcquire(LOCK_SHARED); + + if (_cacheIsDisabled()) + { + // unexpected mode change. + status = NV_ERR_INVALID_STATE; + goto done; + } + + status = _rmapiControlCacheGetGpuAttrForObject(hClient, hObject, &gpuInst, NULL); + if (status != NV_OK) + goto done; + + entry = _getCacheEntry(gpuInst, NV2080_CTRL_CMD_GPU_GET_NAME_STRING); + if (entry == NULL || entry->params == NULL) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + if (!(entry->rmctrlFlags & RMCTRL_FLAGS_CACHEABLE_BY_INPUT)) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + + cachedParams = (GpuNameStringCacheEntry *)entry->params; + + switch (pParams->gpuNameStringFlags) + { + case NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_ASCII: + if (!cachedParams->bAsciiValid) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + portMemCopy(pParams->gpuNameString.ascii, + sizeof(pParams->gpuNameString.ascii), + cachedParams->ascii, + sizeof(pParams->gpuNameString.ascii)); + break; + + case NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_UNICODE: + if (!cachedParams->bUnicodeValid) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + portMemCopy(pParams->gpuNameString.unicode, + sizeof(pParams->gpuNameString.unicode), + cachedParams->unicode, + sizeof(pParams->gpuNameString.unicode)); + break; + + default: + NV_PRINTF(LEVEL_ERROR, "Unknown gpu name string flag: %u\n", pParams->gpuNameStringFlags); + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } +done: + _cacheLockRelease(LOCK_SHARED); + return status; +} + +NV_STATUS _gpuNameStringSet +( + NvHandle hClient, + NvHandle hObject, + NvU32 rmctrlFlags, + const NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS *pParams +) +{ + NvU32 gpuInst; + NV_STATUS status; + RmapiControlCacheEntry *entry = NULL; + GpuNameStringCacheEntry *cachedParams = NULL; + + _cacheLockAcquire(LOCK_EXCLUSIVE); + + if (_cacheIsDisabled()) + { + // unexpected mode change. + status = NV_ERR_INVALID_STATE; + goto done; + } + + status = _rmapiControlCacheGetGpuAttrForObject(hClient, hObject, &gpuInst, NULL); + if (status != NV_OK) + goto done; + + entry = _setCacheEntry(gpuInst, NV2080_CTRL_CMD_GPU_GET_NAME_STRING, sizeof(GpuNameStringCacheEntry), rmctrlFlags, NULL); + if (entry == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + cachedParams = (GpuNameStringCacheEntry *)entry->params; + + switch (pParams->gpuNameStringFlags) + { + case NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_ASCII: + if (cachedParams->bAsciiValid) + { + NV_ASSERT( + portMemCmp(pParams->gpuNameString.ascii, + cachedParams->ascii, + sizeof(pParams->gpuNameString.ascii)) == 0); + } + else + { + portMemCopy(cachedParams->ascii, + sizeof(pParams->gpuNameString.ascii), + pParams->gpuNameString.ascii, + sizeof(pParams->gpuNameString.ascii)); + cachedParams->bAsciiValid = NV_TRUE; + } + break; + + case NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_UNICODE: + if (cachedParams->bUnicodeValid) + { + NV_ASSERT( + portMemCmp(pParams->gpuNameString.unicode, + cachedParams->unicode, + sizeof(pParams->gpuNameString.unicode)) == 0); + } + else + { + portMemCopy(cachedParams->unicode, + sizeof(pParams->gpuNameString.unicode), + pParams->gpuNameString.unicode, + sizeof(pParams->gpuNameString.unicode)); + cachedParams->bUnicodeValid = NV_TRUE; + } + break; + + default: + NV_PRINTF(LEVEL_ERROR, "Unknown gpu name string flag: %u\n", pParams->gpuNameStringFlags); + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + +done: + if (status != NV_OK) + { + if (entry != NULL) + { + if (entry->params) + portMemFree(entry->params); + _rmapiControlCacheRemoveMapEntry(entry); + } + } + _cacheLockRelease(LOCK_EXCLUSIVE); + + return status; +} + +typedef struct CePhysicalCapsCacheEntry +{ + NvBool valid; + NvU8 capsTbl[NV2080_CTRL_CE_CAPS_TBL_SIZE]; +} CePhysicalCapsCacheEntry; + +static NV_STATUS _getCePhysicalCapsHandler +( + NvHandle hClient, + NvHandle hObject, + NvU32 rmctrlFlags, + NvU32 ceEngineType, + NvU8 capsTbl[NV2080_CTRL_CE_CAPS_TBL_SIZE], + NvBool bSet +) +{ + NV_STATUS status = NV_OK; + NvU32 gpuInst; + RmapiControlCacheEntry *entry = NULL; + CePhysicalCapsCacheEntry *cachedTable = NULL; + const NvU32 allocSize = sizeof(CePhysicalCapsCacheEntry) * NV2080_ENGINE_TYPE_COPY_SIZE; + enum CACHE_LOCK_TYPE lockType = bSet ? LOCK_EXCLUSIVE : LOCK_SHARED; + NvU32 ceEngineIndex; + + if (!NV2080_ENGINE_TYPE_IS_COPY(ceEngineType)) + { + return NV_ERR_INVALID_ARGUMENT; + } + ceEngineIndex = NV2080_ENGINE_TYPE_COPY_IDX(ceEngineType); + + _cacheLockAcquire(lockType); + + if (_cacheIsDisabled()) + { + // unexpected mode change. + status = NV_ERR_INVALID_STATE; + goto done; + } + + status = _rmapiControlCacheGetGpuAttrForObject(hClient, hObject, &gpuInst, NULL); + if (status != NV_OK) + goto done; + + if (bSet) + entry = _setCacheEntry(gpuInst, NV2080_CTRL_CMD_CE_GET_PHYSICAL_CAPS, allocSize, rmctrlFlags, NULL); + else + entry = _getCacheEntry(gpuInst, NV2080_CTRL_CMD_CE_GET_PHYSICAL_CAPS); + + if (entry == NULL || entry->params == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + if (!(entry->rmctrlFlags & RMCTRL_FLAGS_CACHEABLE_BY_INPUT)) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + + cachedTable = (CePhysicalCapsCacheEntry *)entry->params; + + if (bSet) + { + if (cachedTable[ceEngineIndex].valid) + { + NV_ASSERT(portMemCmp(capsTbl, + cachedTable[ceEngineIndex].capsTbl, + NV2080_CTRL_CE_CAPS_TBL_SIZE) == 0); + } + else + { + cachedTable[ceEngineIndex].valid = NV_TRUE; + portMemCopy(cachedTable[ceEngineIndex].capsTbl, + NV2080_CTRL_CE_CAPS_TBL_SIZE, + capsTbl, + NV2080_CTRL_CE_CAPS_TBL_SIZE); + } + } + else + { + if (!cachedTable[ceEngineIndex].valid) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + else + { + portMemCopy(capsTbl, + NV2080_CTRL_CE_CAPS_TBL_SIZE, + cachedTable[ceEngineIndex].capsTbl, + NV2080_CTRL_CE_CAPS_TBL_SIZE); + } + } + +done: + if (status != NV_OK && bSet) + { + if (entry != NULL) + { + if (entry->params != NULL) + portMemFree(entry->params); + multimapRemoveItem(&RmapiControlCache.gpusControlCache, entry); + } + } + _cacheLockRelease(lockType); + + return status; +} + +typedef struct CePceMaskCacheEntry +{ + NvBool valid; + NvU32 pceMask; +} CePceMaskCacheEntry; + +static NV_STATUS _getCePceMaskHandler +( + NvHandle hClient, + NvHandle hObject, + NvU32 rmctrlFlags, + NvU32 ceEngineType, + NvU32 *pceMask, + NvBool bSet +) +{ + NV_STATUS status = NV_OK; + NvU32 gpuInst; + RmapiControlCacheEntry *entry = NULL; + CePceMaskCacheEntry *cachedTable = NULL; + const NvU32 allocSize = sizeof(CePceMaskCacheEntry) * NV2080_ENGINE_TYPE_COPY_SIZE; + enum CACHE_LOCK_TYPE lockType = bSet ? LOCK_EXCLUSIVE : LOCK_SHARED; + NvU32 ceEngineIndex; + + if (!NV2080_ENGINE_TYPE_IS_COPY(ceEngineType)) + { + return NV_ERR_INVALID_ARGUMENT; + } + ceEngineIndex = NV2080_ENGINE_TYPE_COPY_IDX(ceEngineType); + + _cacheLockAcquire(lockType); + + if (_cacheIsDisabled()) + { + // unexpected mode change. + status = NV_ERR_INVALID_STATE; + goto done; + } + + status = _rmapiControlCacheGetGpuAttrForObject(hClient, hObject, &gpuInst, NULL); + if (status != NV_OK) + goto done; + + if (bSet) + entry = _setCacheEntry(gpuInst, NV2080_CTRL_CMD_CE_GET_CE_PCE_MASK, allocSize, rmctrlFlags, NULL); + else + entry = _getCacheEntry(gpuInst, NV2080_CTRL_CMD_CE_GET_CE_PCE_MASK); + + if (entry == NULL || entry->params == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + if (!(entry->rmctrlFlags & RMCTRL_FLAGS_CACHEABLE_BY_INPUT)) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + + cachedTable = (CePceMaskCacheEntry *)entry->params; + + if (bSet) + { + if (cachedTable[ceEngineIndex].valid) + { + NV_ASSERT(*pceMask == cachedTable[ceEngineIndex].pceMask); + } + else + { + cachedTable[ceEngineIndex].valid = NV_TRUE; + cachedTable[ceEngineIndex].pceMask = *pceMask; + } + } + else + { + if (!cachedTable[ceEngineIndex].valid) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + else + { + *pceMask = cachedTable[ceEngineIndex].pceMask; + } + } + +done: + if (status != NV_OK && bSet) + { + if (entry != NULL) + { + if (entry->params != NULL) + portMemFree(entry->params); + multimapRemoveItem(&RmapiControlCache.gpusControlCache, entry); + } + } + _cacheLockRelease(lockType); + + return status; +} + +NV_STATUS rmapiControlCacheFreeForControl +( + NvU32 gpuInstance, + NvU32 cmd +) +{ + RmapiControlCacheEntry *entry = NULL; + + _cacheLockAcquire(LOCK_EXCLUSIVE); + + entry = multimapFindItem(&RmapiControlCache.gpusControlCache, gpuInstance, cmd); + + if (entry == NULL) + goto done; + + if (entry->params != NULL) + portMemFree(entry->params); + + multimapRemoveItem(&RmapiControlCache.gpusControlCache, entry); + +done: + _cacheLockRelease(LOCK_EXCLUSIVE); + + return NV_OK; +} + +static NV_STATUS _rmapiControlCacheGetByInput +( + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + void* params, + NvU32 paramsSize +) +{ + switch (cmd) + { + case NV2080_CTRL_CMD_GPU_GET_INFO_V2: + return _getInfoCacheHandler(hClient, hObject, cmd, 0, + ((NV2080_CTRL_GPU_GET_INFO_V2_PARAMS*)params)->gpuInfoList, + ((NV2080_CTRL_GPU_GET_INFO_V2_PARAMS*)params)->gpuInfoListSize, + NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE, + NV_FALSE); + + case NV2080_CTRL_CMD_FIFO_GET_INFO: + return _getInfoCacheHandler(hClient, hObject, cmd, 0, + ((NV2080_CTRL_FIFO_GET_INFO_PARAMS*)params)->fifoInfoTbl, + ((NV2080_CTRL_FIFO_GET_INFO_PARAMS*)params)->fifoInfoTblSize, + NV2080_CTRL_FIFO_GET_INFO_MAX_ENTRIES, + NV_FALSE); + + case NV2080_CTRL_CMD_BUS_GET_INFO_V2: + return _getInfoCacheHandler(hClient, hObject, cmd, 0, + ((NV2080_CTRL_BUS_GET_INFO_V2_PARAMS*)params)->busInfoList, + ((NV2080_CTRL_BUS_GET_INFO_V2_PARAMS*)params)->busInfoListSize, + NV2080_CTRL_BUS_INFO_MAX_LIST_SIZE, + NV_FALSE); + + case NV2080_CTRL_CMD_BIOS_GET_INFO_V2: + return _getInfoCacheHandler(hClient, hObject, cmd, 0, + ((NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS*)params)->biosInfoList, + ((NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS*)params)->biosInfoListSize, + NV2080_CTRL_BIOS_INFO_MAX_SIZE, + NV_FALSE); + + case NV2080_CTRL_CMD_CE_GET_PHYSICAL_CAPS: + return _getCePhysicalCapsHandler(hClient, hObject, 0, + ((NV2080_CTRL_CE_GET_PHYSICAL_CAPS_PARAMS*)params)->ceEngineType, + ((NV2080_CTRL_CE_GET_PHYSICAL_CAPS_PARAMS*)params)->capsTbl, + NV_FALSE); + + case NV2080_CTRL_CMD_CE_GET_CE_PCE_MASK: + return _getCePceMaskHandler(hClient, hObject, 0, + ((NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS*)params)->ceEngineType, + &((NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS*)params)->pceMask, + NV_FALSE); + + case NV2080_CTRL_CMD_GPU_GET_NAME_STRING: + return _gpuNameStringGet(hClient, hObject, params); + + case NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED: + return _rmapiControlCacheGetByInputTemplateMethod(hClient, hObject, cmd, 0, + params, sizeof(DispSystemGetSupportedCacheEntry), + _dispSystemGetSupportedCacheHandler, NV_FALSE); + + case NV0073_CTRL_CMD_SYSTEM_GET_INTERNAL_DISPLAYS: + return _rmapiControlCacheGetByInputTemplateMethod(hClient, hObject, cmd, 0, + params, sizeof(DispSystemGetInternalDisplaysCacheEntry), + _dispSystemGetInternalDisplaysCacheHandler, NV_FALSE); + + case NV0073_CTRL_CMD_SPECIFIC_GET_TYPE: + return _rmapiControlCacheGetByInputTemplateMethod(hClient, hObject, cmd, 0, + params, sizeof(DispSpecificGetTypeCacheTable), + _dispSpecificGetTypeCacheHandler, NV_FALSE); + case NV0073_CTRL_CMD_DP_GET_CAPS: + return _rmapiControlCacheGetByInputTemplateMethod(hClient, hObject, cmd, 0, + params, sizeof(DispDpGetCapsCacheTable), + _dispDpGetCapsCacheHandler, NV_FALSE); + default: + NV_PRINTF(LEVEL_WARNING, "No implementation for cacheable by input cmd 0x%x\n", cmd); + return NV_ERR_OBJECT_NOT_FOUND; + } +} + +NV_STATUS _rmapiControlCacheSetByInput +( + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + NvU32 rmctrlFlags, + void* params, + NvU32 paramsSize +) +{ + switch (cmd) + { + case NV2080_CTRL_CMD_GPU_GET_INFO_V2: + return _getInfoCacheHandler(hClient, hObject, cmd, rmctrlFlags, + ((NV2080_CTRL_GPU_GET_INFO_V2_PARAMS*)params)->gpuInfoList, + ((NV2080_CTRL_GPU_GET_INFO_V2_PARAMS*)params)->gpuInfoListSize, + NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE, + NV_TRUE); + + case NV2080_CTRL_CMD_FIFO_GET_INFO: + return _getInfoCacheHandler(hClient, hObject, cmd, rmctrlFlags, + ((NV2080_CTRL_FIFO_GET_INFO_PARAMS*)params)->fifoInfoTbl, + ((NV2080_CTRL_FIFO_GET_INFO_PARAMS*)params)->fifoInfoTblSize, + NV2080_CTRL_FIFO_GET_INFO_MAX_ENTRIES, + NV_TRUE); + + case NV2080_CTRL_CMD_BUS_GET_INFO_V2: + return _getInfoCacheHandler(hClient, hObject, cmd, rmctrlFlags, + ((NV2080_CTRL_BUS_GET_INFO_V2_PARAMS*)params)->busInfoList, + ((NV2080_CTRL_BUS_GET_INFO_V2_PARAMS*)params)->busInfoListSize, + NV2080_CTRL_BUS_INFO_MAX_LIST_SIZE, + NV_TRUE); + + case NV2080_CTRL_CMD_BIOS_GET_INFO_V2: + return _getInfoCacheHandler(hClient, hObject, cmd, rmctrlFlags, + ((NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS*)params)->biosInfoList, + ((NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS*)params)->biosInfoListSize, + NV2080_CTRL_BIOS_INFO_MAX_SIZE, + NV_TRUE); + + case NV2080_CTRL_CMD_CE_GET_PHYSICAL_CAPS: + return _getCePhysicalCapsHandler(hClient, hObject, rmctrlFlags, + ((NV2080_CTRL_CE_GET_PHYSICAL_CAPS_PARAMS*)params)->ceEngineType, + ((NV2080_CTRL_CE_GET_PHYSICAL_CAPS_PARAMS*)params)->capsTbl, + NV_TRUE); + + case NV2080_CTRL_CMD_CE_GET_CE_PCE_MASK: + return _getCePceMaskHandler(hClient, hObject, rmctrlFlags, + ((NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS*)params)->ceEngineType, + &((NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS*)params)->pceMask, + NV_TRUE); + + case NV2080_CTRL_CMD_GPU_GET_NAME_STRING: + return _gpuNameStringSet(hClient, hObject, rmctrlFlags, params); + + case NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED: + return _rmapiControlCacheGetByInputTemplateMethod(hClient, hObject, cmd, rmctrlFlags, + params, sizeof(DispSystemGetSupportedCacheEntry), + _dispSystemGetSupportedCacheHandler, NV_TRUE); + + case NV0073_CTRL_CMD_SYSTEM_GET_INTERNAL_DISPLAYS: + return _rmapiControlCacheGetByInputTemplateMethod(hClient, hObject, cmd, rmctrlFlags, + params, sizeof(DispSystemGetInternalDisplaysCacheEntry), + _dispSystemGetInternalDisplaysCacheHandler, NV_TRUE); + + case NV0073_CTRL_CMD_SPECIFIC_GET_TYPE: + return _rmapiControlCacheGetByInputTemplateMethod(hClient, hObject, cmd, rmctrlFlags, + params, sizeof(DispSpecificGetTypeCacheTable), + _dispSpecificGetTypeCacheHandler, NV_TRUE); + case NV0073_CTRL_CMD_DP_GET_CAPS: + return _rmapiControlCacheGetByInputTemplateMethod(hClient, hObject, cmd, rmctrlFlags, + params, sizeof(DispDpGetCapsCacheTable), + _dispDpGetCapsCacheHandler, NV_TRUE); + default: + NV_PRINTF(LEVEL_WARNING, "No implementation for cacheable by input cmd 0x%x\n", cmd); + return NV_ERR_OBJECT_NOT_FOUND; + } +} + +/*! + * Look up cached params for a given (hClient, hObject, cmd) triple. + * This function does not perform any checks against the RMCTRL export tables. + * + * @param[in] paramsSize size of parameters to copy out to + * @param[out] params returned cached RMCTRL parameter data + */ +NV_STATUS rmapiControlCacheGetUnchecked +( + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + void* params, + NvU32 paramsSize, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status = NV_OK; + + if (RmapiControlCache.mode == NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_MODE_VERIFY_ONLY) + return NV_ERR_OBJECT_NOT_FOUND; + + status = _rmapiControlCacheGetAny(hClient, hObject, cmd, params, paramsSize, pSecInfo); + + NV_PRINTF(LEVEL_INFO, "control cache get for 0x%x 0x%x 0x%x status: 0x%x\n", hClient, hObject, cmd, status); + return status; +} + +/*! + * Look up cached params for a given (hClient, hObject, cmd) triple. + * This function checks passed parameters against RMCTRL export tables. It treats a control + * as CACHEABLE or CACHEABLE_BY_INPUT depending on RMCTRL flags from the tables. + * + * @param[in] paramsSize size of parameters to copy out to + * @param[out] params returned cached RMCTRL parameter data + */ +NV_STATUS rmapiControlCacheGet +( + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + void* params, + NvU32 paramsSize, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status = NV_OK; + NvU32 flags; + NvU32 ctrlParamsSize; + + if (RmapiControlCache.mode == NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_MODE_VERIFY_ONLY) + return NV_ERR_OBJECT_NOT_FOUND; + + status = rmapiutilGetControlInfo(cmd, &flags, NULL, &ctrlParamsSize); + if (status != NV_OK) + goto done; + + NV_CHECK_OR_ELSE(LEVEL_ERROR, + (params != NULL && paramsSize == ctrlParamsSize), + status = NV_ERR_INVALID_PARAMETER; goto done); + + status = rmControlValidateClientPrivilegeAccess(hClient, hObject, cmd, flags, pSecInfo); + if (status != NV_OK) + goto done; + + switch ((flags & RMCTRL_FLAGS_CACHEABLE_ANY)) + { + case RMCTRL_FLAGS_CACHEABLE: + status = _rmapiControlCacheGetCacheable(hClient, hObject, cmd, params, paramsSize); + break; + case RMCTRL_FLAGS_CACHEABLE_BY_INPUT: + status = _rmapiControlCacheGetByInput(hClient, hObject, cmd, params, paramsSize); + break; + default: + NV_PRINTF(LEVEL_ERROR, "Invalid cacheable flag 0x%x for cmd 0x%x\n", flags, cmd); + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + +done: + NV_PRINTF(LEVEL_INFO, "control cache get for 0x%x 0x%x 0x%x status: 0x%x\n", hClient, hObject, cmd, status); + return status; +} + +/*! + * Try to set cached params for a (hClient, hObject, cmd) triple. + * If there is an existing cache entry for the triple, the entry is unmodified. + * This function checks passed parameters against RMCTRL export tables. It treats a control + * as CACHEABLE or CACHEABLE_BY_INPUT depending on known RMCTRL flags from the tables. + * + * @param[in] paramsSize size of parameters to allocate for cache entry + * @param[in] params data for the cached parameters + */ +NV_STATUS rmapiControlCacheSet +( + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + void* params, + NvU32 paramsSize +) +{ + NvU32 flags; + NvU32 ctrlParamsSize; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, rmapiutilGetControlInfo(cmd, &flags, NULL, &ctrlParamsSize)); + + NV_CHECK_OR_RETURN(LEVEL_ERROR, + (params != NULL && paramsSize == ctrlParamsSize), + NV_ERR_INVALID_PARAMETER); + + return rmapiControlCacheSetUnchecked(hClient, hObject, cmd, params, paramsSize, flags); +} + +/*! + * Try to set cached params for a (hClient, hObject, cmd) triple. + * If there is an existing cache entry for the triple, the entry is unmodified. + * This function does not perform any checks against the RMCTRL export tables. It trusts caller + * to pass the correct paramsSize and rmctrlFlags. + * + * @param[in] paramsSize size of parameters to allocate for cache entry + * @param[in] params data for the cached parameters + * @param[in] rmctrlFlags RMCTRL_FLAGS_CACHEABLE_* flag + */ +NV_STATUS rmapiControlCacheSetUnchecked +( + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + void* params, + NvU32 paramsSize, + NvU32 rmctrlFlags +) +{ + NV_STATUS status = NV_OK; + + switch ((rmctrlFlags & RMCTRL_FLAGS_CACHEABLE_ANY)) + { + case RMCTRL_FLAGS_CACHEABLE: + status = _rmapiControlCacheSet(hClient, hObject, cmd, rmctrlFlags, params, paramsSize); + break; + case RMCTRL_FLAGS_CACHEABLE_BY_INPUT: + status = _rmapiControlCacheSetByInput(hClient, hObject, cmd, rmctrlFlags, params, paramsSize); + break; + default: + NV_PRINTF(LEVEL_ERROR, "Invalid cacheable flag 0x%x for cmd 0x%x\n", rmctrlFlags, cmd); + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + +done: + NV_PRINTF(LEVEL_INFO, "control cache set for 0x%x 0x%x 0x%x status: 0x%x\n", hClient, hObject, cmd, status); + return status; +} + +// Need to hold rmapi control cache write lock +static void _freeSubmap(GpusControlCache *pMap, GpusControlCacheSubmap* pSubmap, NvBool bFreePersistent) +{ + /* (Sub)map modification invalidates the iterator, so we have to restart */ + while (NV_TRUE) + { + GpusControlCacheIter it = multimapSubmapIterItems(pMap, pSubmap); + NvBool bHasNext; + + for (bHasNext = multimapItemIterNext(&it); bHasNext; bHasNext = multimapItemIterNext(&it)) + { + RmapiControlCacheEntry* entry = it.pValue; + + if ((entry->rmctrlFlags & RMCTRL_FLAGS_PERSISTENT_CACHEABLE) && !bFreePersistent) + { + continue; + } + + portMemFree(entry->params); + multimapRemoveItem(pMap, entry); + + // Restart iterating + break; + } + + // Reached the end of the submap without removing any items, or can't remove any items + if (!bHasNext) + break; + } + + if (multimapCountSubmapItems(pMap, pSubmap) == 0) + { + multimapRemoveSubmap(pMap, pSubmap); + } +} + +static void _rmapiControlCacheFreeGpuCache(NvU32 gpuInst, NvBool bFreePersistent) +{ + GpusControlCacheSubmap *submap; + + submap = multimapFindSubmap(&RmapiControlCache.gpusControlCache, gpuInst); + if (submap != NULL) + _freeSubmap(&RmapiControlCache.gpusControlCache, submap, bFreePersistent); +} + +void rmapiControlCacheFreeNonPersistentCacheForGpu +( + NvU32 gpuInst +) +{ + _cacheLockAcquire(LOCK_EXCLUSIVE); + + _rmapiControlCacheFreeGpuCache(gpuInst, NV_FALSE); + + _cacheLockRelease(LOCK_EXCLUSIVE); +} + +void rmapiControlCacheFreeAllCacheForGpu +( + NvU32 gpuInst +) +{ + _cacheLockAcquire(LOCK_EXCLUSIVE); + + _rmapiControlCacheFreeGpuCache(gpuInst, NV_TRUE); + + _cacheLockRelease(LOCK_EXCLUSIVE); +} + +void rmapiControlCacheFreeClientEntry(NvHandle hClient) +{ + _cacheLockAcquire(LOCK_EXCLUSIVE); + _rmapiControlCacheFreeGpuAttrForClient(hClient); + _cacheLockRelease(LOCK_EXCLUSIVE); +} + +void rmapiControlCacheFreeObjectEntry(NvHandle hClient, NvHandle hObject) +{ + if (hClient == hObject) + { + rmapiControlCacheFreeClientEntry(hClient); + return; + } + + _cacheLockAcquire(LOCK_EXCLUSIVE); + _rmapiControlCacheFreeGpuAttrForObject(hClient, hObject); + _cacheLockRelease(LOCK_EXCLUSIVE); +} + +void rmapiControlCacheFree(void) +{ + GpusControlCacheIter it; + + it = multimapItemIterAll(&RmapiControlCache.gpusControlCache); + while (multimapItemIterNext(&it)) + { + RmapiControlCacheEntry* entry = it.pValue; + + portMemFree(entry->params); + } + + multimapDestroy(&RmapiControlCache.gpusControlCache); + mapDestroy(&RmapiControlCache.objectToGpuAttrMap); + portSyncRwLockDestroy(RmapiControlCache.pLock); + RmapiControlCache.pLock = NULL; +} + +void rmapiControlCacheSetMode(NvU32 mode) +{ + NV_PRINTF(LEVEL_INFO, "Set rmapi control cache mode to 0x%x\n", mode); + + _cacheLockAcquire(LOCK_EXCLUSIVE); + RmapiControlCache.mode = mode; + _cacheLockRelease(LOCK_EXCLUSIVE); +} + +NvU32 rmapiControlCacheGetMode(void) +{ + return RmapiControlCache.mode; +} diff --git a/src/nvidia/src/kernel/rmapi/rmapi_cache_handlers.c b/src/nvidia/src/kernel/rmapi/rmapi_cache_handlers.c new file mode 100644 index 0000000..52ae87c --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/rmapi_cache_handlers.c @@ -0,0 +1,200 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "rmapi/rmapi_cache_handlers.h" +#include "nvport/nvport.h" +#include "containers/list.h" +#include "containers/map.h" + +// +// NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED is CACHEABLE_BY_INPUT instead of CACHEABLE, +// because parameter subDeviceInstance, although unused in non-SLI setup, is user-provided +// and it's not expected to be changed. +// +NV_STATUS _dispSystemGetSupportedCacheHandler +( + void *cachedEntry, + void *pProvidedParams, + NvBool bSet +) +{ + NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *pParams = pProvidedParams; + DispSystemGetSupportedCacheEntry *cacheEntry = cachedEntry; + + if (!bSet && !cacheEntry->valid) + return NV_ERR_OBJECT_NOT_FOUND; + + if (bSet) + { + cacheEntry->displayMask = pParams->displayMask; + cacheEntry->displayMaskDDC = pParams->displayMaskDDC; + cacheEntry->valid = NV_TRUE; + } + else + { + pParams->displayMask = cacheEntry->displayMask; + pParams->displayMaskDDC = cacheEntry->displayMaskDDC; + } + + return NV_OK; +} + +// +// NV0073_CTRL_CMD_SYSTEM_GET_INTERNAL_DISPLAYS is CACHEABLE_BY_INPUT instead of CACHEABLE, +// because parameter subDeviceInstance, although unused in non-SLI setup, is user-provided +// and it's not expected to be changed. +// +NV_STATUS _dispSystemGetInternalDisplaysCacheHandler +( + void *cachedEntry, + void *pProvidedParams, + NvBool bSet +) +{ + NV0073_CTRL_SYSTEM_GET_INTERNAL_DISPLAYS_PARAMS *pParams = pProvidedParams; + DispSystemGetInternalDisplaysCacheEntry *cacheEntry = cachedEntry; + + if (!bSet && !cacheEntry->valid) + return NV_ERR_OBJECT_NOT_FOUND; + + if (bSet) + { + cacheEntry->internalDisplaysMask = pParams->internalDisplaysMask; + cacheEntry->availableInternalDisplaysMask = pParams->availableInternalDisplaysMask; + cacheEntry->valid = NV_TRUE; + } + else + { + pParams->internalDisplaysMask = cacheEntry->internalDisplaysMask; + pParams->availableInternalDisplaysMask = cacheEntry->availableInternalDisplaysMask; + } + + return NV_OK; +} +// +// NV0073_CTRL_CMD_DP_GET_CAPS Cache Handler. +// +NV_STATUS _dispDpGetCapsCacheHandler +( + void *cachedEntry, + void *pProvidedParams, + NvBool bSet +) +{ + NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *pParams = pProvidedParams; + DispDpGetCapsCacheTable *pCacheTable = cachedEntry; + NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *src = (bSet) ? pParams + : &pCacheTable->cachedEntries[pParams->sorIndex].params; + NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *dst = (bSet) ? &pCacheTable->cachedEntries[pParams->sorIndex].params + : pParams; + + + if (pParams->sorIndex >= NV_ARRAY_ELEMENTS(pCacheTable->cachedEntries)) + return NV_ERR_INVALID_ARGUMENT; + + if (!bSet && !pCacheTable->cachedEntries[pParams->sorIndex].valid) + return NV_ERR_OBJECT_NOT_FOUND; + + // Verify mode. + if (bSet) + { + if (pCacheTable->cachedEntries[pParams->sorIndex].valid) + { + // + // Assert that all fields match between src and dst + // + // NOTE: subdeviceInstance is not compared, since it's not used in non-SLI systems + // and there's no need to compare bIsPC2Disabled, since it's deprecated and unused. + // + NV_ASSERT((src->sorIndex == dst->sorIndex) && + (src->maxLinkRate == dst->maxLinkRate) && + (src->dpVersionsSupported == dst->dpVersionsSupported) && + (src->UHBRSupportedByGpu == dst->UHBRSupportedByGpu) && + (src->bIsMultistreamSupported == dst->bIsMultistreamSupported) && + (src->bIsSCEnabled == dst->bIsSCEnabled) && + (src->bHasIncreasedWatermarkLimits == dst->bHasIncreasedWatermarkLimits) && + (src->isSingleHeadMSTSupported == dst->isSingleHeadMSTSupported) && + (src->bFECSupported == dst->bFECSupported) && + (src->bIsTrainPhyRepeater == dst->bIsTrainPhyRepeater) && + (src->bOverrideLinkBw == dst->bOverrideLinkBw) && + (src->bUseRgFlushSequence == dst->bUseRgFlushSequence) && + (src->bSupportDPDownSpread == dst->bSupportDPDownSpread) && + (src->bAvoidHBR3 == dst->bAvoidHBR3) && + (src->DSC.bDscSupported == dst->DSC.bDscSupported) && + (src->DSC.encoderColorFormatMask == dst->DSC.encoderColorFormatMask) && + (src->DSC.lineBufferSizeKB == dst->DSC.lineBufferSizeKB) && + (src->DSC.rateBufferSizeKB == dst->DSC.rateBufferSizeKB) && + (src->DSC.bitsPerPixelPrecision == dst->DSC.bitsPerPixelPrecision) && + (src->DSC.maxNumHztSlices == dst->DSC.maxNumHztSlices) && + (src->DSC.lineBufferBitDepth == dst->DSC.lineBufferBitDepth)); + } + + pCacheTable->cachedEntries[pParams->sorIndex].valid = NV_TRUE; + } + + portMemCopy(dst, sizeof(*pParams), src, sizeof(*pParams)); + + return NV_OK; +} + +// +// NV0073_CTRL_CMD_SPECIFIC_GET_TYPE Cache Handler. +// +NV_STATUS _dispSpecificGetTypeCacheHandler +( + void *cachedEntry, + void *pProvidedParams, + NvBool bSet +) +{ + NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS *pParams = pProvidedParams; + DispSpecificGetTypeCacheTable *pCacheTable = cachedEntry; + NvU32 cacheEntryIdx; + + if (!ONEBITSET(pParams->displayId)) + return NV_ERR_INVALID_ARGUMENT; + + cacheEntryIdx = BIT_IDX_32(pParams->displayId); + + if (!bSet && !pCacheTable->cachedEntries[cacheEntryIdx].valid) + return NV_ERR_OBJECT_NOT_FOUND; + + if (bSet) + { + if (pCacheTable->cachedEntries[cacheEntryIdx].valid) + { + // Verify mode. + NV_ASSERT(pCacheTable->cachedEntries[cacheEntryIdx].displayType == pParams->displayType); + } + + pCacheTable->cachedEntries[cacheEntryIdx].displayType = pParams->displayType; + pCacheTable->cachedEntries[cacheEntryIdx].valid = NV_TRUE; + } + else + { + pParams->displayType = pCacheTable->cachedEntries[cacheEntryIdx].displayType; + } + + return NV_OK; +} + diff --git a/src/nvidia/src/kernel/rmapi/rmapi_finn.c b/src/nvidia/src/kernel/rmapi/rmapi_finn.c new file mode 100644 index 0000000..6fa3b79 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/rmapi_finn.c @@ -0,0 +1,609 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "resserv/resserv.h" +#include "rmapi/rmapi.h" + +#include "ctrl/ctrlxxxx.h" + +#include "g_finn_rm_api.h" + +/** + * Serialize parameters for servicing command + * + * If unserialized params are passed in, try to serialize into CALL_CONTEXT. + * If serialized params are passed in, set CALL_CONTEXT variables. + * + * Caller's parameters pointer and size will be overridden and restored on *CtrlUp call + */ +NV_STATUS +serverSerializeCtrlDown +( + CALL_CONTEXT *pCallContext, + NvU32 cmd, + void **ppParams, + NvU32 *pParamsSize, + NvU32 *flags +) +{ + if (!(*flags & NVOS54_FLAGS_FINN_SERIALIZED)) + { + NV_STATUS status; + NvU8 *pSerBuffer; + const NvU32 interface_id = (DRF_VAL(XXXX, _CTRL_CMD, _CLASS, cmd) << 8) | + DRF_VAL(XXXX, _CTRL_CMD, _CATEGORY, cmd); + const NvU32 message_id = DRF_VAL(XXXX, _CTRL_CMD, _INDEX, cmd); + NvU32 serializedSize = (NvU32)FinnRmApiGetSerializedSize(interface_id, message_id, *ppParams); + + pCallContext->pDeserializedParams = *ppParams; + pCallContext->deserializedSize = *pParamsSize; + + // Nothing to do if FINN doesn't support serializing this control + if (serializedSize == 0) + return NV_OK; + + // Assume we've already serialized if it exists + if (pCallContext->pSerializedParams != NULL) + { + NV_ASSERT_OR_RETURN(pCallContext->serializedSize == serializedSize, NV_ERR_INVALID_STATE); + return NV_OK; + } + else + { + pSerBuffer = portMemAllocNonPaged(serializedSize); + if (pSerBuffer == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + pCallContext->pSerializedParams = (FINN_RM_API *)pSerBuffer; + } + + status = FinnRmApiSerializeDown(interface_id, message_id, *ppParams, pSerBuffer, serializedSize); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Serialization failed for cmd 0x%06x with status %s (0x%02x)\n", + cmd, nvstatusToString(status), status); + portMemFree(pCallContext->pSerializedParams); + pCallContext->pSerializedParams = NULL; + return status; + } + + pCallContext->serializedSize = serializedSize; + *flags |= NVOS54_FLAGS_FINN_SERIALIZED; + + // Override passed in parameters + *ppParams = pCallContext->pSerializedParams; + *pParamsSize = pCallContext->serializedSize; + pCallContext->bLocalSerialization = NV_TRUE; + } + else + { + // Params are already serialized, just copy them in + pCallContext->pSerializedParams = *ppParams; + pCallContext->serializedSize = *pParamsSize; + } + + return NV_OK; +} + +/** + * Deserialize parameters for servicing command + * + * If serialized params are passed in, deserialize them into CALL_CONTEXT. + * If deserialized params are passed in, set CALL_CONTEXT variables. + * + * Caller's parameters pointer and size will be overridden and restored on *CtrlUp call + */ +NV_STATUS +serverDeserializeCtrlDown +( + CALL_CONTEXT *pCallContext, + NvU32 cmd, + void **ppParams, + NvU32 *pParamsSize, + NvU32 *flags +) +{ + if (*flags & NVOS54_FLAGS_FINN_SERIALIZED) + { + NV_STATUS status; + NvU8 *pSerBuffer; + void *pDeserParams; + const NvU32 interface_id = (DRF_VAL(XXXX, _CTRL_CMD, _CLASS, cmd) << 8) | + DRF_VAL(XXXX, _CTRL_CMD, _CATEGORY, cmd); + const NvU32 message_id = DRF_VAL(XXXX, _CTRL_CMD, _INDEX, cmd); + NvU32 unserializedSize = (NvU32)FinnRmApiGetUnserializedSize(interface_id, message_id); + + // Report error if FINN can't deserialize but RM is reporting the control as serialized + if (unserializedSize == 0) + return NV_ERR_LIB_RM_VERSION_MISMATCH; + + // Assume we've already deserialized if it exists + if (pCallContext->pDeserializedParams != NULL) + { + NV_ASSERT_OR_RETURN(pCallContext->deserializedSize == unserializedSize, NV_ERR_INVALID_STATE); + return NV_OK; + } + else + { + pDeserParams = portMemAllocNonPaged(unserializedSize); + if (pDeserParams == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + pCallContext->pDeserializedParams = pDeserParams; + } + + pSerBuffer = (NvU8 *)*ppParams; + + status = FinnRmApiDeserializeDown(pSerBuffer, *pParamsSize, pDeserParams, unserializedSize); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Deserialization failed for cmd 0x%06x with status %s (0x%02x)\n", + cmd, nvstatusToString(status), status); + portMemFree(pCallContext->pDeserializedParams); + pCallContext->pDeserializedParams = NULL; + return status; + } + + pCallContext->deserializedSize = unserializedSize; + pCallContext->pSerializedParams = *ppParams; + pCallContext->serializedSize = *pParamsSize; + + // Override passed in parameters + *ppParams = pCallContext->pDeserializedParams; + *pParamsSize = pCallContext->deserializedSize; + } + else + { + // Not serialized, copy into deser params + pCallContext->pDeserializedParams = *ppParams; + pCallContext->deserializedSize = *pParamsSize; + } + + // Automatically request reserialization in case the control goes to GSP + pCallContext->bReserialize = NV_TRUE; + + return NV_OK; +} + +/** + * Serialize parameters for returning from command + * + * If serialized params are passed in, serialize into them from CALL_CONTEXT deserialized params. + * If deserialized params are passed in, unset the serialized flag. We expect that this means that + * the parameters were serialized locally and not by the caller. + * + * Caller's parameters pointer and size will be restored from *CtrlDown call + */ +NV_STATUS +serverSerializeCtrlUp +( + CALL_CONTEXT *pCallContext, + NvU32 cmd, + void **ppParams, + NvU32 *pParamsSize, + NvU32 *flags +) +{ + // + // We add NVOS54_FLAGS_FINN_SERIALIZED if the control can be serialized but wasn't in serverSerializeCtrlDown + // We don't want to return a serialized buffer if one wasn't given to us in the first place + // + if ((*flags & NVOS54_FLAGS_FINN_SERIALIZED) && !pCallContext->bLocalSerialization) + { + NV_STATUS status; + NvU8 *pSerBuffer; + void *pDeserBuffer; + const NvU32 interface_id = (DRF_VAL(XXXX, _CTRL_CMD, _CLASS, cmd) << 8) | + DRF_VAL(XXXX, _CTRL_CMD, _CATEGORY, cmd); + const NvU32 message_id = DRF_VAL(XXXX, _CTRL_CMD, _INDEX, cmd); + + // Should be serialized at this point. Expect that serializedSize is set otherwise something is wrong + if (pCallContext->serializedSize == 0) + return NV_ERR_INVALID_STATE; + + pSerBuffer = (NvU8 *) pCallContext->pSerializedParams; + pDeserBuffer = (NvU8 *) pCallContext->pDeserializedParams; + + if (pSerBuffer == NULL) + return NV_ERR_INVALID_STATE; + + // It's possible that we have nothing to do if we're just passing parameters along + if (pDeserBuffer == NULL) + return NV_OK; + + status = FinnRmApiSerializeUp(interface_id, message_id, pDeserBuffer, pSerBuffer, pCallContext->serializedSize); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Serialization failed for cmd 0x%06x with status %s (0x%02x)\n", + cmd, nvstatusToString(status), status); + return status; + } + + *ppParams = pCallContext->pSerializedParams; + *pParamsSize = pCallContext->serializedSize; + } + else if (*flags & NVOS54_FLAGS_FINN_SERIALIZED) + { + // We serialized the control. Unset the flag + *flags &= ~NVOS54_FLAGS_FINN_SERIALIZED; + } + + return NV_OK; +} + +/** + * Deserialize parameters for returning from command + * + * If serialized params are passed in with the serialized flag set, do nothing. We expect this means we are just + * passing along the parameters to another layer. + * If deserialized params are passed in with the serialized flag set, unset flag and deserialize into params. + * We expect that this means that the parameters were serialized locally and not by the caller. + * If deserialized params are passed in without the flag set just copy to CALL_CONTEXT + * + * Caller's parameters pointer and size will be restored from *CtrlDown call + */ +NV_STATUS +serverDeserializeCtrlUp +( + CALL_CONTEXT *pCallContext, + NvU32 cmd, + void **ppParams, + NvU32 *pParamsSize, + NvU32 *flags +) +{ + if (*flags & NVOS54_FLAGS_FINN_SERIALIZED) + { + NV_STATUS status; + NvU8 *pSerBuffer; + void *pDeserBuffer; + const NvU32 interface_id = (DRF_VAL(XXXX, _CTRL_CMD, _CLASS, cmd) << 8) | + DRF_VAL(XXXX, _CTRL_CMD, _CATEGORY, cmd); + const NvU32 message_id = DRF_VAL(XXXX, _CTRL_CMD, _INDEX, cmd); + NvU32 unserializedSize = (NvU32)FinnRmApiGetUnserializedSize(interface_id, message_id); + + if (!pCallContext->bLocalSerialization) + { + // We didn't serialize down, so don't deserialize up + return NV_OK; + } + else + { + // Serialized, but not passing back up serialized params, clear flag + *flags &= ~NVOS54_FLAGS_FINN_SERIALIZED; + } + + // Report error if FINN can't deserialize but RM is reporting the control as serialized + if (unserializedSize == 0) + return NV_ERR_LIB_RM_VERSION_MISMATCH; + + // DeserializeUp expects pointers to be set up already + pDeserBuffer = pCallContext->pDeserializedParams; + if (pDeserBuffer == NULL) + return NV_ERR_INVALID_STATE; + + pSerBuffer = (NvU8 *) pCallContext->pSerializedParams; + if (pSerBuffer == NULL) + return NV_ERR_INVALID_STATE; + + status = FinnRmApiDeserializeUp(pSerBuffer, pCallContext->serializedSize, pDeserBuffer, unserializedSize); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Deserialization failed for cmd 0x%06x with status %s (0x%02x)\n", + cmd, nvstatusToString(status), status); + return status; + } + + pCallContext->deserializedSize = unserializedSize; + + *ppParams = pCallContext->pDeserializedParams; + *pParamsSize = pCallContext->deserializedSize; + } + + return NV_OK; +} + +NV_STATUS +serverSerializeAllocDown +( + CALL_CONTEXT *pCallContext, + NvU32 classId, + void **ppParams, + NvU32 *pParamsSize, + NvU32 *flags +) +{ + const NvU32 interface_id = NV_RM_ALLOC_INTERFACE_INTERFACE_ID; + const NvU32 message_id = classId; + if (!(*flags & RMAPI_ALLOC_FLAGS_SERIALIZED)) + { + NV_STATUS status; + NvU8 *pSerBuffer; + NvU32 serializedSize = (NvU32)FinnRmApiGetSerializedSize(interface_id, message_id, *ppParams); + + pCallContext->pDeserializedParams = *ppParams; + pCallContext->deserializedSize = *pParamsSize; + + // Nothing to do if FINN doesn't support serializing this allocation + if (serializedSize == 0) + return NV_OK; + + // Assume we've already serialized if it exists + if (pCallContext->pSerializedParams != NULL) + { + NV_ASSERT_OR_RETURN(pCallContext->serializedSize == serializedSize, NV_ERR_INVALID_STATE); + return NV_OK; + } + else + { + pSerBuffer = portMemAllocNonPaged(serializedSize); + if (pSerBuffer == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + pCallContext->pSerializedParams = (FINN_RM_API *)pSerBuffer; + } + + status = FinnRmApiSerializeDown(interface_id, message_id, *ppParams, pSerBuffer, serializedSize); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Serialization failed for classId 0x%06x with status %s (0x%02x)\n", + classId, nvstatusToString(status), status); + portMemFree(pCallContext->pSerializedParams); + pCallContext->pSerializedParams = NULL; + return status; + } + + pCallContext->serializedSize = serializedSize; + *flags |= RMAPI_ALLOC_FLAGS_SERIALIZED; + *ppParams = pCallContext->pSerializedParams; + *pParamsSize = serializedSize; + pCallContext->bLocalSerialization = NV_TRUE; + } + else + { + // Params are already serialized, just copy them in + pCallContext->pSerializedParams = *ppParams; + pCallContext->serializedSize = *pParamsSize; + } + + return NV_OK; +} + + +NV_STATUS +serverDeserializeAllocDown +( + CALL_CONTEXT *pCallContext, + NvU32 classId, + void **ppParams, + NvU32 *pParamsSize, + NvU32 *flags +) +{ + const NvU32 interface_id = NV_RM_ALLOC_INTERFACE_INTERFACE_ID; + const NvU32 message_id = classId; + if (*flags & RMAPI_ALLOC_FLAGS_SERIALIZED) + { + NV_STATUS status; + NvU8 *pSerBuffer; + void *pDeserParams; + NvU32 unserializedSize = (NvU32)FinnRmApiGetUnserializedSize(interface_id, message_id); + + // Report error if FINN can't deserialize but RM is reporting the alloc as serialized + if (unserializedSize == 0) + return NV_ERR_LIB_RM_VERSION_MISMATCH; + + // Assume we've already deserialized if it exists + if (pCallContext->pDeserializedParams != NULL) + { + NV_ASSERT_OR_RETURN(pCallContext->deserializedSize == unserializedSize, NV_ERR_INVALID_STATE); + return NV_OK; + } + else + { + pDeserParams = portMemAllocNonPaged(unserializedSize); + if (pDeserParams == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + pCallContext->pDeserializedParams = pDeserParams; + } + + pSerBuffer = (NvU8 *)*ppParams; + + status = FinnRmApiDeserializeDown(pSerBuffer, *pParamsSize, pDeserParams, unserializedSize); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Deserialization failed for classId 0x%06x with status %s (0x%02x)\n", + classId, nvstatusToString(status), status); + portMemFree(pCallContext->pDeserializedParams); + pCallContext->pDeserializedParams = NULL; + return status; + } + + pCallContext->serializedSize = *pParamsSize; + pCallContext->deserializedSize = unserializedSize; + pCallContext->pSerializedParams = *ppParams; + + *ppParams = pCallContext->pDeserializedParams; + *pParamsSize = pCallContext->deserializedSize; + } + else + { + // Not serialized, copy into deser params + pCallContext->pDeserializedParams = *ppParams; + pCallContext->deserializedSize = *pParamsSize; + } + + // Automatically request reserialization in case the alloc goes to GSP + pCallContext->bReserialize = NV_TRUE; + + return NV_OK; +} + +NV_STATUS +serverSerializeAllocUp +( + CALL_CONTEXT *pCallContext, + NvU32 classId, + void **ppParams, + NvU32 *pParamsSize, + NvU32 *flags +) +{ + // + // We add RMAPI_ALLOC_FLAGS_SERIALIZED if the alloc can be serialized but wasn't in serverSerializeAllocDown + // We don't want to return a serialized buffer if one wasn't given to us in the first place + // Check if pSerializedParams matches pParams to make sure the caller expects serialized info returned + // + if ((*flags & RMAPI_ALLOC_FLAGS_SERIALIZED) && !pCallContext->bLocalSerialization) + { + NV_STATUS status; + NvU8 *pSerBuffer; + void *pDeserBuffer; + const NvU32 interface_id = NV_RM_ALLOC_INTERFACE_INTERFACE_ID; + const NvU32 message_id = classId; + + // Should be serialized at this point. Expect that serializedSize is set otherwise something is wrong + if (pCallContext->serializedSize == 0) + return NV_ERR_INVALID_STATE; + + pSerBuffer = (NvU8 *) pCallContext->pSerializedParams; + pDeserBuffer = (NvU8 *) pCallContext->pDeserializedParams; + + if (pSerBuffer == NULL) + return NV_ERR_INVALID_STATE; + + // It's possible that we have nothing to do if we're just passing parameters along + if (pDeserBuffer == NULL) + return NV_OK; + + status = FinnRmApiSerializeUp(interface_id, message_id, pDeserBuffer, pSerBuffer, pCallContext->serializedSize); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Serialization failed for classId 0x%06x with status %s (0x%02x)\n", + classId, nvstatusToString(status), status); + return status; + } + + *ppParams = pCallContext->pSerializedParams; + *pParamsSize = pCallContext->serializedSize; + } + else if (*flags & RMAPI_ALLOC_FLAGS_SERIALIZED) + { + // We serialized the alloc. Unset the flag + *flags &= ~RMAPI_ALLOC_FLAGS_SERIALIZED; + } + + return NV_OK; +} + +NV_STATUS +serverDeserializeAllocUp +( + CALL_CONTEXT *pCallContext, + NvU32 classId, + void **ppParams, + NvU32 *pParamsSize, + NvU32 *flags +) +{ + const NvU32 interface_id = NV_RM_ALLOC_INTERFACE_INTERFACE_ID; + const NvU32 message_id = classId; + if (*flags & RMAPI_ALLOC_FLAGS_SERIALIZED) + { + NV_STATUS status; + NvU8 *pSerBuffer; + void *pDeserBuffer; + NvU32 unserializedSize = (NvU32)FinnRmApiGetUnserializedSize(interface_id, message_id); + + if (!pCallContext->bLocalSerialization) + { + // We didn't serialize down, so don't deserialize up + return NV_OK; + } + else + { + // Serialized, but not passing back up serialized params, clear flag + *flags &= ~RMAPI_ALLOC_FLAGS_SERIALIZED; + } + + // Report error if FINN can't deserialize but RM is reporting the alloc as serialized + if (unserializedSize == 0) + return NV_ERR_LIB_RM_VERSION_MISMATCH; + + // DeserializeUp expects pointers to be set up already + pDeserBuffer = pCallContext->pDeserializedParams; + if (pDeserBuffer == NULL) + return NV_ERR_INVALID_STATE; + + pSerBuffer = (NvU8 *) pCallContext->pSerializedParams; + if (pSerBuffer == NULL) + return NV_ERR_INVALID_STATE; + + status = FinnRmApiDeserializeUp(pSerBuffer, pCallContext->serializedSize, pDeserBuffer, unserializedSize); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Deserialization failed for classId 0x%06x with status %s (0x%02x)\n", + classId, nvstatusToString(status), status); + return status; + } + + pCallContext->deserializedSize = unserializedSize; + *ppParams = pCallContext->pDeserializedParams; + *pParamsSize = unserializedSize; + } + + return NV_OK; +} + +void +serverFreeSerializeStructures +( + CALL_CONTEXT *pCallContext, + void *pParams +) +{ + if (pCallContext->pSerializedParams != pParams) + portMemFree(pCallContext->pSerializedParams); + if (pCallContext->pDeserializedParams != pParams) + portMemFree(pCallContext->pDeserializedParams); + + pCallContext->pSerializedParams = NULL; + pCallContext->pDeserializedParams = NULL; + pCallContext->serializedSize = 0; + pCallContext->deserializedSize = 0; +} diff --git a/src/nvidia/src/kernel/rmapi/rmapi_specific.c b/src/nvidia/src/kernel/rmapi/rmapi_specific.c new file mode 100644 index 0000000..54e6651 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/rmapi_specific.c @@ -0,0 +1,157 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "rmconfig.h" +#include "g_rmconfig_private.h" +#include "rmapi/rmapi_specific.h" +#include "rmapi/event.h" +#include "resource_desc.h" +#include "resserv/rs_server.h" + +#include "class/cl0005.h" // NV01_EVENT +#include "class/clc574.h" // UVM_CHANNEL_RETAINER + +#include "ctrl/ctrl0002.h" // NV0002_CTRL_CMD_*_CONTEXTDMA + +#include "ctrl/ctrl2080/ctrl2080gr.h" // NV2080_CTRL_CMD_GR_CTXSW_ZCULL_BIND +#include "ctrl/ctrl2080/ctrl2080internal.h" // NV2080_CTRL_CMD_INTERNAL_GR_CTXSW_ZCULL_BIND + +NV_STATUS +rmapiFixupAllocParams +( + RS_RESOURCE_DESC **ppResDesc, + RS_RES_ALLOC_PARAMS_INTERNAL *pRmAllocParams +) +{ + RS_RESOURCE_DESC *pResDesc = *ppResDesc; + + if ((pResDesc->pClassInfo != NULL) && (pResDesc->pClassInfo->classId == classId(Event))) + { + NV0005_ALLOC_PARAMETERS *pNv0005Params = pRmAllocParams->pAllocParams; + + // + // This field isn't filled out consistently by clients. Some clients specify NV01_EVENT as the class + // and then override it using the subclass in the event parameters, while other clients specify the + // same subclass in both the RmAllocParams and event params. NV01_EVENT isn't a valid class to allocate + // so overwrite it with the subclass from the event params. + // + if (pRmAllocParams->externalClassId == NV01_EVENT) + pRmAllocParams->externalClassId = pNv0005Params->hClass; + + pNv0005Params->hSrcResource = pRmAllocParams->hParent; + + // No support for event and src resource that reside under different clients + if (pNv0005Params->hParentClient != pRmAllocParams->hClient) + pRmAllocParams->hParent = pRmAllocParams->hClient; + + // class id may have changed so refresh the resource descriptor, but make sure it is still an Event + pResDesc = RsResInfoByExternalClassId(pRmAllocParams->externalClassId); + if (pResDesc == NULL || pResDesc->pClassInfo == NULL || pResDesc->pClassInfo->classId != classId(Event)) + return NV_ERR_INVALID_CLASS; + + *ppResDesc = pResDesc; + } + + return NV_OK; +} + +NV_STATUS +serverAllocLookupSecondClient +( + NvU32 externalClassId, + void *pAllocParams, + NvHandle *phSecondClient +) +{ + RS_RESOURCE_DESC *pResDesc = RsResInfoByExternalClassId(externalClassId); + + *phSecondClient = NV01_NULL_OBJECT; + + if ((pAllocParams != NULL) && (pResDesc != NULL) && + (pResDesc->flags & RS_FLAGS_DUAL_CLIENT_LOCK)) + { + switch (externalClassId) + { + case UVM_CHANNEL_RETAINER: + { + *phSecondClient = ((NV_UVM_CHANNEL_RETAINER_ALLOC_PARAMS *) pAllocParams)->hClient; + break; + } + case NV01_EVENT: + case NV01_EVENT_OS_EVENT: + case NV01_EVENT_KERNEL_CALLBACK: + case NV01_EVENT_KERNEL_CALLBACK_EX: + { + *phSecondClient = ((NV0005_ALLOC_PARAMETERS *) pAllocParams)->hParentClient; + break; + } + default: + { + // RS_FLAGS_DUAL_CLIENT_LOCK flag requires adding a case statement here. + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_STATE); + } + } + } + + return NV_OK; +} + +NV_STATUS +serverControlLookupSecondClient +( + NvU32 cmd, + void *pControlParams, + RS_CONTROL_COOKIE *pCookie, + NvHandle *phSecondClient +) +{ + *phSecondClient = NV01_NULL_OBJECT; + + if ((pControlParams != NULL) && (pCookie->ctrlFlags & RMCTRL_FLAGS_DUAL_CLIENT_LOCK)) + { + switch (cmd) + { + case NV2080_CTRL_CMD_GR_CTXSW_ZCULL_BIND: + { + *phSecondClient = ((NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS *) pControlParams)->hClient; + break; + } + default: + // RMCTRL_FLAGS_DUAL_CLIENT_LOCK flag requires adding a case statement here. + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_STATE); + } + } + + return NV_OK; +} + +NvBool +rmapiRmControlCanBeRaisedIrql(NvU32 cmd) +{ + return NV_FALSE; +} + +NvBool +rmapiRmControlCanBeBypassLock(NvU32 cmd) +{ + return NV_FALSE; +} diff --git a/src/nvidia/src/kernel/rmapi/rmapi_stubs.c b/src/nvidia/src/kernel/rmapi/rmapi_stubs.c new file mode 100644 index 0000000..7cb60c5 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/rmapi_stubs.c @@ -0,0 +1,181 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "rmapi/rmapi.h" + + +static NV_STATUS _rmapiAlloc_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvU32 hClass, void *pAllocParams, NvU32 paramsSize) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiAllocWithHandle_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle hObject, NvU32 hClass, void *pAllocParams, NvU32 paramsSize) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiAllocWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvU32 hClass, NvP64 pAllocParams, NvU32 paramsSize, + NvU32 flags, NvP64 pRightsRequested, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiFree_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hObject) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiFreeWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hObject, + NvU32 flags, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiDisableClients_STUB(RM_API *pRmApi, NvHandle *phClientList, NvU32 numClients) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiDisableClientsWithSecInfo_STUB(RM_API *pRmApi, NvHandle *phClientList, + NvU32 numClients, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiControl_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hObject, NvU32 cmd, + void *pParams, NvU32 paramsSize) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiControlWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hObject, NvU32 cmd, + NvP64 pParams, NvU32 paramsSize, NvU32 flags, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiControlPrefetch_STUB(RM_API *pRmApi, NvU32 cmd) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiDupObject_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hParent, NvHandle *phObject, + NvHandle hClientSrc, NvHandle hObjectSrc, NvU32 flags) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiDupObjectWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvHandle hClientSrc, NvHandle hObjectSrc, NvU32 flags, + API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiShare_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiShareWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiMapToCpu_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, + NvU64 offset, NvU64 length, void **ppCpuVirtAddr, NvU32 flags) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiMapToCpuWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, + NvU64 offset, NvU64 length, NvP64 *ppCpuVirtAddr, NvU32 flags, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiUnmapFromCpu_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, void *pLinearAddress, + NvU32 flags, NvU32 ProcessId) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiUnmapFromCpuWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, + NvP64 pLinearAddress, NvU32 flags, NvU32 ProcessId, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiMap_STUB(RM_API *pRmApi, NVOS46_PARAMETERS *pParms) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiMapWithSecInfo_STUB(RM_API *pRmApi, NVOS46_PARAMETERS *pParms, + API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiUnmap_STUB(RM_API *pRmApi, NVOS47_PARAMETERS *pParms) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiUnmapWithSecInfo_STUB(RM_API *pRmApi, NVOS47_PARAMETERS *pParms, + API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void rmapiInitStubInterface(RM_API *pRmApi) +{ + portMemSet(pRmApi, 0, sizeof(*pRmApi)); + + pRmApi->Alloc = _rmapiAlloc_STUB; + pRmApi->AllocWithHandle = _rmapiAllocWithHandle_STUB; + pRmApi->AllocWithSecInfo = _rmapiAllocWithSecInfo_STUB; + pRmApi->Free = _rmapiFree_STUB; + pRmApi->FreeWithSecInfo = _rmapiFreeWithSecInfo_STUB; + pRmApi->DisableClients = _rmapiDisableClients_STUB; + pRmApi->DisableClientsWithSecInfo = _rmapiDisableClientsWithSecInfo_STUB; + pRmApi->Control = _rmapiControl_STUB; + pRmApi->ControlWithSecInfo = _rmapiControlWithSecInfo_STUB; + pRmApi->ControlPrefetch = _rmapiControlPrefetch_STUB; + pRmApi->DupObject = _rmapiDupObject_STUB; + pRmApi->DupObjectWithSecInfo = _rmapiDupObjectWithSecInfo_STUB; + pRmApi->Share = _rmapiShare_STUB; + pRmApi->ShareWithSecInfo = _rmapiShareWithSecInfo_STUB; + pRmApi->MapToCpu = _rmapiMapToCpu_STUB; + pRmApi->MapToCpuWithSecInfo = _rmapiMapToCpuWithSecInfo_STUB; + pRmApi->UnmapFromCpu = _rmapiUnmapFromCpu_STUB; + pRmApi->UnmapFromCpuWithSecInfo = _rmapiUnmapFromCpuWithSecInfo_STUB; + pRmApi->Map = _rmapiMap_STUB; + pRmApi->MapWithSecInfo = _rmapiMapWithSecInfo_STUB; + pRmApi->Unmap = _rmapiUnmap_STUB; + pRmApi->UnmapWithSecInfo = _rmapiUnmapWithSecInfo_STUB; +} diff --git a/src/nvidia/src/kernel/rmapi/rmapi_utils.c b/src/nvidia/src/kernel/rmapi/rmapi_utils.c new file mode 100644 index 0000000..182b2d3 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/rmapi_utils.c @@ -0,0 +1,243 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "rmapi/rmapi_utils.h" +#include "rmapi/rs_utils.h" +#include "resource_desc.h" +#include "nvoc/rtti.h" + +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" + +#include "class/cl0080.h" +#include "class/cl2080.h" + +#include "ctrl/ctrl0080.h" +#include "ctrl/ctrl2080.h" +#include "ctrl/ctrl402c.h" +#include "ctrl/ctrl90e7/ctrl90e7bbx.h" + +NV_STATUS +rmapiutilAllocClientAndDeviceHandles +( + RM_API *pRmApi, + OBJGPU *pGpu, + NvHandle *phClient, + NvHandle *phDevice, + NvHandle *phSubDevice +) +{ + NV_STATUS rmStatus; + NV0080_ALLOC_PARAMETERS nv0080AllocParams; + NV2080_ALLOC_PARAMETERS nv2080AllocParams; + NvHandle hClient = NV01_NULL_OBJECT; + NvHandle hDevice = NV01_NULL_OBJECT; + NvHandle hSubDevice = NV01_NULL_OBJECT; + + NV_ASSERT_OR_RETURN(phClient != NULL, NV_ERR_INVALID_ARGUMENT); + + // Allocate a client + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + pRmApi->AllocWithHandle(pRmApi, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_ROOT, + &hClient, + sizeof(hClient)), + cleanup); + + // Allocate a device + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + serverutilGenResourceHandle(hClient, &hDevice), + cleanup); + + portMemSet(&nv0080AllocParams, 0, sizeof(nv0080AllocParams)); + nv0080AllocParams.deviceId = gpuGetDeviceInstance(pGpu); + nv0080AllocParams.hClientShare = hClient; + + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + pRmApi->AllocWithHandle(pRmApi, + hClient, + hClient, + hDevice, + NV01_DEVICE_0, + &nv0080AllocParams, + sizeof(nv0080AllocParams)), + cleanup); + + // Allocate a subDevice + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + serverutilGenResourceHandle(hClient, &hSubDevice), + cleanup); + + portMemSet(&nv2080AllocParams, 0, sizeof(nv2080AllocParams)); + nv2080AllocParams.subDeviceId = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + pRmApi->AllocWithHandle(pRmApi, + hClient, + hDevice, + hSubDevice, + NV20_SUBDEVICE_0, + &nv2080AllocParams, + sizeof(nv2080AllocParams)), + cleanup); + + *phClient = hClient; + if (phDevice != NULL) + *phDevice = hDevice; + if (phSubDevice != NULL) + *phSubDevice = hSubDevice; + + return rmStatus; + +cleanup: + rmapiutilFreeClientAndDeviceHandles(pRmApi, &hClient, &hDevice, &hSubDevice); + return rmStatus; +} + +void +rmapiutilFreeClientAndDeviceHandles +( + RM_API *pRmApi, + NvHandle *phClient, + NvHandle *phDevice, + NvHandle *phSubDevice +) +{ + NV_ASSERT_OR_RETURN_VOID(phClient != NULL); + NV_CHECK_OR_RETURN_VOID(LEVEL_ERROR, *phClient != NV01_NULL_OBJECT); + + if (phSubDevice != NULL && *phSubDevice != NV01_NULL_OBJECT) + { + pRmApi->Free(pRmApi, *phClient, *phSubDevice); + *phSubDevice = NV01_NULL_OBJECT; + } + + if (phDevice != NULL && *phDevice != NV01_NULL_OBJECT) + { + pRmApi->Free(pRmApi, *phClient, *phDevice); + *phDevice = NV01_NULL_OBJECT; + } + + pRmApi->Free(pRmApi, *phClient, *phClient); + *phClient = NV01_NULL_OBJECT; +} + +NvBool +rmapiutilIsExternalClassIdInternalOnly +( + NvU32 externalClassId +) +{ + RS_RESOURCE_DESC *pResDesc = RsResInfoByExternalClassId(externalClassId); + NV_ASSERT_OR_RETURN(pResDesc != NULL, NV_FALSE); + return (pResDesc->flags & RS_FLAGS_INTERNAL_ONLY) != 0x0; +} + +NV_STATUS +rmapiutilGetControlInfo +( + NvU32 cmd, + NvU32 *pFlags, + NvU32 *pAccessRight, + NvU32 *pParamsSize +) +{ + RS_RESOURCE_DESC *pResourceDesc = RsResInfoByExternalClassId(DRF_VAL(XXXX, _CTRL_CMD, _CLASS, cmd)); + + if (pResourceDesc != NULL) + { + struct NVOC_CLASS_DEF *pClassDef = (void*)pResourceDesc->pClassInfo; + if (pClassDef != NULL) + { + const struct NVOC_EXPORTED_METHOD_DEF *pMethodDef = + nvocGetExportedMethodDefFromMethodInfo_IMPL(pClassDef->pExportInfo, cmd); + + if (pMethodDef != NULL) + { + if (pFlags != NULL) + *pFlags = pMethodDef->flags; + + if (pAccessRight != NULL) + *pAccessRight = pMethodDef->accessRight; + + if (pParamsSize != NULL) + *pParamsSize = pMethodDef->paramSize; + + return NV_OK; + } + } + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +NvBool rmapiutilSkipErrorMessageForUnsupportedVgpuGuestControl(OBJGPU *pGpu, NvU32 cmd) +{ + switch (cmd) + { + case NV2080_CTRL_CMD_GPU_GET_OEM_BOARD_INFO: + case NV2080_CTRL_CMD_GPU_GET_INFOROM_IMAGE_VERSION: + case NV2080_CTRL_CMD_GPU_GET_INFOROM_OBJECT_VERSION: + case NV2080_CTRL_CMD_GPU_GET_RESET_STATUS: + case NV2080_CTRL_CMD_GPU_GET_DRAIN_AND_RESET_STATUS: + case NV2080_CTRL_CMD_BUS_GET_PEX_COUNTERS: + case NV2080_CTRL_CMD_FB_GET_OFFLINED_PAGES: + case NV2080_CTRL_CMD_FB_GET_REMAPPED_ROWS: + case NV2080_CTRL_CMD_ECC_GET_VOLATILE_COUNTS: + case NV2080_CTRL_CMD_GPU_QUERY_INFOROM_ECC_SUPPORT: + case NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO: + case NV2080_CTRL_CMD_BUS_CLEAR_PEX_COUNTERS: + case NV2080_CTRL_CMD_HSHUB_GET_AVAILABLE_MASK: + case NV2080_CTRL_CMD_I2C_WRITE_REG: + case NV2080_CTRL_CMD_I2C_READ_BUFFER: + case NV2080_CTRL_CMD_I2C_READ_REG: + case NV2080_CTRL_CMD_FB_SETUP_VPR_REGION: + case NV2080_CTRL_CMD_I2C_ACCESS: + case NV2080_CTRL_CMD_FLCN_USTREAMER_QUEUE_INFO: + case NV2080_CTRL_CMD_INTERNAL_NVLINK_ENABLE_COMPUTE_PEER_ADDR: + case NV0000_CTRL_CMD_SYSTEM_PFM_REQ_HNDLR_CTRL: + case NV2080_CTRL_CMD_BIOS_GET_NBSI_V2: + case NV2080_CTRL_CMD_BIOS_GET_UEFI_SUPPORT: + case NV2080_CTRL_CMD_BUS_GET_PCIE_LTR_LATENCY: + case NV2080_CTRL_CMD_BUS_SET_PCIE_LTR_LATENCY: + case NV2080_CTRL_CMD_BUS_SET_PCIE_SPEED: + case NV2080_CTRL_CMD_FB_GET_CALIBRATION_LOCK_FAILED: + case NV2080_CTRL_CMD_GPU_GET_ILLUM: + case NV2080_CTRL_CMD_GPU_GET_VPR_CAPS: + case NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS: + case NV2080_CTRL_CMD_GPU_SET_ILLUM: + case NV2080_CTRL_CMD_LPWR_DIFR_CTRL: + case NV2080_CTRL_CMD_PMGR_GET_MODULE_INFO: + case NV2080_CTRL_CMD_FB_QUERY_DRAM_ENCRYPTION_INFOROM_SUPPORT: + case NV2080_CTRL_CMD_NVLINK_GET_PLATFORM_INFO: + case NV402C_CTRL_CMD_I2C_GET_PORT_SPEED: + case NV90E7_CTRL_CMD_BBX_GET_LAST_FLUSH_TIME: + return NV_TRUE; + + default: + return NV_FALSE; + } +} + diff --git a/src/nvidia/src/kernel/rmapi/rpc_common.c b/src/nvidia/src/kernel/rmapi/rpc_common.c new file mode 100644 index 0000000..f1d1943 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/rpc_common.c @@ -0,0 +1,123 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +//****************************************************************************** +// +// Description: +// This file implements RPC code common to all builds. +// +//****************************************************************************** + +#include "gpu/gpu.h" +#include "gpu/device/device.h" +#include "vgpu/rpc.h" +#include "os/os.h" +#include "core/locks.h" + +#define RPC_STRUCTURES +#define RPC_GENERIC_UNION +#include "g_rpc-structures.h" +#undef RPC_STRUCTURES +#undef RPC_GENERIC_UNION + +#define RPC_MESSAGE_STRUCTURES +#define RPC_MESSAGE_GENERIC_UNION +#include "g_rpc-message-header.h" +#undef RPC_MESSAGE_STRUCTURES +#undef RPC_MESSAGE_GENERIC_UNION + +static void rpcRmApiSetup(OBJGPU *pGpu) +{ + // + // Physical RMAPI is already initialized for monolithic, and this function + // just needs to overwrite individual methods as needed + // + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + PORT_UNREFERENCED_VARIABLE(pRmApi); + + if (IS_VIRTUAL(pGpu)) + { + // none for now + } + else if (IS_DCE_CLIENT(pGpu)) + { + pRmApi->Control = rpcRmApiControl_dce; + pRmApi->AllocWithHandle = rpcRmApiAlloc_dce; + pRmApi->Free = rpcRmApiFree_dce; + pRmApi->DupObject = rpcRmApiDupObject_dce; + } +} + +OBJRPC *initRpcObject(OBJGPU *pGpu) +{ + OBJRPC *pRpc = NULL; + + pRpc = portMemAllocNonPaged(sizeof(OBJRPC)); + if (pRpc == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "cannot allocate memory for OBJRPC (instance %d)\n", + gpuGetInstance(pGpu)); + return NULL; + } + pRpc->timeoutCount = 0; + pRpc->bQuietPrints = NV_FALSE; + + rpcRmApiSetup(pGpu); + + return pRpc; +} + +NV_STATUS rpcWriteCommonHeader(OBJGPU *pGpu, OBJRPC *pRpc, NvU32 func, NvU32 paramLength) +{ + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT(rmDeviceGpuLockIsOwner(pGpu->gpuInstance)); + + if (!pRpc) + { + NV_PRINTF(LEVEL_ERROR, + "NVRM_RPC: called with NULL pRpc. Function %d.\n", func); + NV_ASSERT(0); + return NV_ERR_INVALID_STATE; + } + + if (func == NV_VGPU_MSG_FUNCTION_RM_API_CONTROL) + portMemSet(pRpc->message_buffer, 0, pRpc->largeRpcSize); + else + portMemSet(pRpc->message_buffer, 0, pRpc->maxRpcSize); + + vgpu_rpc_message_header_v->header_version = DRF_DEF(_VGPU, _MSG_HEADER_VERSION, _MAJOR, _TOT) | + DRF_DEF(_VGPU, _MSG_HEADER_VERSION, _MINOR, _TOT); + vgpu_rpc_message_header_v->signature = NV_VGPU_MSG_SIGNATURE_VALID; + vgpu_rpc_message_header_v->rpc_result = NV_VGPU_MSG_RESULT_RPC_PENDING; + vgpu_rpc_message_header_v->rpc_result_private = NV_VGPU_MSG_RESULT_RPC_PENDING; + { + vgpu_rpc_message_header_v->u.spare = NV_VGPU_MSG_UNION_INIT; + } + vgpu_rpc_message_header_v->function = func; + vgpu_rpc_message_header_v->length = sizeof(rpc_message_header_v) + paramLength; + + return status; +} diff --git a/src/nvidia/src/kernel/rmapi/rs_utils.c b/src/nvidia/src/kernel/rmapi/rs_utils.c new file mode 100644 index 0000000..5010722 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/rs_utils.c @@ -0,0 +1,382 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "rmapi/rs_utils.h" +#include "rmapi/rmapi.h" +#include "core/locks.h" + +NV_STATUS +serverutilGetResourceRef +( + NvHandle hClient, + NvHandle hObject, + RsResourceRef **ppResourceRef +) +{ + RsResourceRef *pResourceRef; + RsClient *pRsClient; + NV_STATUS status; + + *ppResourceRef = NULL; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pRsClient); + if (status != NV_OK) + return NV_ERR_INVALID_CLIENT; + + status = clientGetResourceRef(pRsClient, hObject, &pResourceRef); + if (status != NV_OK) + return status; + + *ppResourceRef = pResourceRef; + + return NV_OK; +} + +NV_STATUS +serverutilGetResourceRefWithType +( + NvHandle hClient, + NvHandle hObject, + NvU32 internalClassId, + RsResourceRef **ppResourceRef +) +{ + if (serverutilGetResourceRef(hClient, hObject, ppResourceRef) != NV_OK) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + if (!objDynamicCastById((*ppResourceRef)->pResource, internalClassId)) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + return NV_OK; +} + +NV_STATUS +serverutilGetResourceRefWithParent +( + NvHandle hClient, + NvHandle hParent, + NvHandle hObject, + NvU32 internalClassId, + RsResourceRef **ppResourceRef +) +{ + NvHandle hFoundParent; + + if (serverutilGetResourceRef(hClient, hObject, ppResourceRef) != NV_OK) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + hFoundParent = (*ppResourceRef)->pParentRef ? (*ppResourceRef)->pParentRef->hResource : 0; + + if (!objDynamicCastById((*ppResourceRef)->pResource, internalClassId) || + hFoundParent != hParent) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + return NV_OK; +} + +RmClient +*serverutilGetClientUnderLock +( + NvHandle hClient +) +{ + NV_STATUS status; + RsClient *pRsClient; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pRsClient); + if (status != NV_OK) + return NULL; + + return dynamicCast(pRsClient, RmClient); +} + +RmClient +**serverutilGetFirstClientUnderLock +( + void +) +{ + RmClient **ppClient; + + // + // Resource server's client list is not protected by any RM locks + // so, as a WAR, we access a lock-protected shadow client list. This avoids + // the race condition where a client is freed while a DPC is iterating + // through the client list. + // + ppClient = listHead(&g_clientListBehindGpusLock); + if (NULL == ppClient) + return NULL; + + return ppClient; +} + +RmClient +**serverutilGetNextClientUnderLock +( + RmClient **ppClient +) +{ + // + // Resource server's client list is not protected by any RM locks + // so, as a WAR, we access a lock-protected shadow client list. This avoids + // the race condition where a client is freed while a DPC is iterating + // through the client list. + // + ppClient = listNext(&g_clientListBehindGpusLock, ppClient); + if (NULL == ppClient) + return NULL; + + return ppClient; +} + +RsResourceRef * +serverutilFindChildRefByType +( + NvHandle hClient, + NvHandle hParent, + NvU32 internalClassId, + NvBool bExactMatch +) +{ + NV_STATUS status; + RsClient *pRsClient; + RsResourceRef *pResourceRef; + RsResourceRef *pParentRef; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pRsClient); + if (status != NV_OK) + return NULL; + + status = clientGetResourceRef(pRsClient, hParent, &pParentRef); + if (status != NV_OK) + { + return NULL; + } + + status = refFindChildOfType(pParentRef, internalClassId, bExactMatch, &pResourceRef); + if (status != NV_OK) + { + return NULL; + } + + return pResourceRef; +} + +RS_ITERATOR +serverutilRefIter +( + NvHandle hClient, + NvHandle hScopedObject, + NvU32 internalClassId, + RS_ITER_TYPE iterType, + NvBool bExactMatch +) +{ + NV_STATUS status; + RsClient *pRsClient; + RsResourceRef *pScopedRef = NULL; + RS_ITERATOR it; + + portMemSet(&it, 0, sizeof(it)); + + status = serverGetClientUnderLock(&g_resServ, hClient, &pRsClient); + if (status != NV_OK) + return it; + + if (hScopedObject != NV01_NULL_OBJECT) + { + status = clientGetResourceRef(pRsClient, hScopedObject, &pScopedRef); + if (status != NV_OK) + { + return it; + } + } + + return clientRefIter(pRsClient, pScopedRef, internalClassId, iterType, bExactMatch); +} + +NvBool +serverutilValidateNewResourceHandle +( + NvHandle hClient, + NvHandle hObject +) +{ + RmClient *pClient = serverutilGetClientUnderLock(hClient); + + return ((pClient != NULL) && + (NV_OK == clientValidateNewResourceHandle(staticCast(pClient, RsClient), hObject, NV_TRUE))); +} + +NV_STATUS +serverutilGenResourceHandle +( + NvHandle hClient, + NvHandle *returnHandle +) +{ + NV_STATUS status; + RmClient *pClient; + + // + // LOCK TEST: we should have the API lock here unless we're executing out of + // the power management path. + // + NV_ASSERT_OR_RETURN(rmapiLockIsOwner() || rmapiInRtd3PmPath(), NV_ERR_INVALID_LOCK_STATE); + + pClient = serverutilGetClientUnderLock(hClient); + + if (pClient == NULL) + return NV_ERR_INVALID_CLIENT; + + status = clientGenResourceHandle(staticCast(pClient, RsClient), returnHandle); + return status; +} + +RS_SHARE_ITERATOR +serverutilShareIter +( + NvU32 internalClassId +) +{ + return serverShareIter(&g_resServ, internalClassId); +} + +NvBool +serverutilShareIterNext +( + RS_SHARE_ITERATOR* pIt +) +{ + return serverShareIterNext(pIt); +} + +NV_STATUS +serverutilGetClientHandlesFromPid +( + NvU32 procID, + NvU32 subProcessID, + ClientHandlesList *pClientList +) +{ + RmClient **ppClient; + RmClient *pClient; + + // If the list passed in has old elements, lets clear its elements. + if (listCount(pClientList)) + { + // Clear & free nodes in temp list + listDestroy(pClientList); + } + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + RsClient *pRsClient; + + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + if ((pClient->ProcID == procID) && + (pClient->SubProcessID == subProcessID)) + { + if (listAppendValue(pClientList, + &pRsClient->hClient) == NULL) + { + listClear(pClientList); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + } + } + + return NV_OK; +} + +NvBool +serverutilMappingFilterCurrentUserProc +( + RsCpuMapping *pMapping +) +{ + return (!pMapping->pPrivate->bKernel && + (pMapping->processId == osGetCurrentProcess())); +} + +NvBool +serverutilMappingFilterKernel +( + RsCpuMapping *pMapping +) +{ + return pMapping->pPrivate->bKernel; +} + + +NV_STATUS +serverutilAcquireClient +( + NvHandle hClient, + LOCK_ACCESS_TYPE access, + CLIENT_ENTRY **ppClientEntry, + RmClient **ppClient +) +{ + CLIENT_ENTRY *pClientEntry; + RmClient *pClient; + + // LOCK TEST: we should have the API lock here + NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + if (NV_OK != serverAcquireClient(&g_resServ, hClient, access, &pClientEntry)) + return NV_ERR_INVALID_CLIENT; + + pClient = dynamicCast(pClientEntry->pClient, RmClient); + if (pClient == NULL) + { + serverReleaseClient(&g_resServ, access, pClientEntry); + return NV_ERR_INVALID_CLIENT; + } + + *ppClientEntry = pClientEntry; + *ppClient = pClient; + return NV_OK; +} + +void +serverutilReleaseClient +( + LOCK_ACCESS_TYPE access, + CLIENT_ENTRY *pClientEntry +) +{ + serverReleaseClient(&g_resServ, access, pClientEntry); +} diff --git a/src/nvidia/src/kernel/rmapi/sharing.c b/src/nvidia/src/kernel/rmapi/sharing.c new file mode 100644 index 0000000..7cff0da --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/sharing.c @@ -0,0 +1,430 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "rmapi/rmapi.h" +#include "rmapi/rmapi_utils.h" +#include "entry_points.h" +#include "core/thread_state.h" +#include "rmapi/rs_utils.h" +#include "resserv/rs_access_map.h" +#include "resource_desc.h" +#include "class/cl0071.h" +#include "gpu/gpu_resource.h" + +static NV_STATUS +_RmDupObject +( + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags, + API_SECURITY_INFO *pSecInfo, + RS_LOCK_INFO *pLockInfo +) +{ + NV_STATUS rmStatus; + RS_RES_DUP_PARAMS params; + + NV_ASSERT_OR_RETURN(phObject != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + portMemSet(¶ms, 0, sizeof(params)); + params.hClientSrc = hClientSrc; + params.hResourceSrc = hObjectSrc; + params.hClientDst = hClient; + params.hParentDst = hParent; + params.hResourceDst = *phObject; + params.pSecInfo = pSecInfo; + params.flags = flags; + params.pLockInfo = pLockInfo; + + rmStatus = serverCopyResource(&g_resServ, ¶ms); + + if (rmStatus == NV_OK) + *phObject = params.hResourceDst; + + return rmStatus; +} + +NV_STATUS +rmapiDupObject +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->DupObjectWithSecInfo(pRmApi, hClient, hParent, phObject, hClientSrc, hObjectSrc, + flags, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiDupObjectWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + RM_API_CONTEXT rmApiContext = {0}; + RS_LOCK_INFO lockInfo; + + NV_PRINTF(LEVEL_INFO, + "Nv04DupObject: hClient:0x%x hParent:0x%x hObject:0x%x\n", + hClient, hParent, *phObject); + NV_PRINTF(LEVEL_INFO, + "Nv04DupObject: hClientSrc:0x%x hObjectSrc:0x%x flags:0x%x\n", + hClientSrc, hObjectSrc, flags); + + status = rmapiPrologue(pRmApi, &rmApiContext); + if (status != NV_OK) + { + return status; + } + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + status = rmapiInitLockInfo(pRmApi, hClient, hClientSrc, &lockInfo); + if (status != NV_OK) + { + rmapiEpilogue(pRmApi, &rmApiContext); + return NV_OK; + } + + status = _RmDupObject(hClient, hParent, phObject, hClientSrc, hObjectSrc, flags, pSecInfo, &lockInfo); + + rmapiEpilogue(pRmApi, &rmApiContext); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "...handle dup complete\n"); + } + else + { + NV_PRINTF(LEVEL_INFO, + "Nv04DupObject: dup failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + NV_PRINTF(LEVEL_INFO, + "Nv04DupObject: hClient:0x%x hParent:0x%x hObject:0x%x\n", + hClient, hParent, *phObject); + NV_PRINTF(LEVEL_INFO, + "Nv04DupObject: hClientSrc:0x%x hObjectSrc:0x%x flags:0x%x\n", + hClientSrc, hObjectSrc, flags); + } + + return status; +} + +NV_STATUS +rmapiDupObjectWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiDupObjectWithSecInfo(pRmApi, hClient, hParent, phObject, hClientSrc, hObjectSrc, flags, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + + +static NV_STATUS +_RmShare +( + NvHandle hClient, + NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy, + API_SECURITY_INFO *pSecInfo, + RS_LOCK_INFO *pLockInfo +) +{ + RS_RES_SHARE_PARAMS params; + portMemSet(¶ms, 0, sizeof(params)); + params.hClient = hClient; + params.hResource = hObject; + params.pSharePolicy = pSharePolicy; + params.pSecInfo = pSecInfo; + params.pLockInfo = pLockInfo; + + return serverShareResourceAccess(&g_resServ, ¶ms); +} + +NV_STATUS +rmapiShare +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->ShareWithSecInfo(pRmApi, hClient, hObject, pSharePolicy, + &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiShareWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + RM_API_CONTEXT rmApiContext = {0}; + RS_LOCK_INFO lockInfo; + NvHandle hSecondClient = NV01_NULL_OBJECT; + + NV_PRINTF(LEVEL_INFO, + "Nv04Share: hClient:0x%x hObject:0x%x pSharePolicy:%p\n", + hClient, hObject, pSharePolicy); + + status = rmapiPrologue(pRmApi, &rmApiContext); + if (status != NV_OK) + { + return status; + } + + if ((pSecInfo->paramLocation == PARAM_LOCATION_KERNEL) && + (pSharePolicy->type == RS_SHARE_TYPE_CLIENT)) + { + hSecondClient = pSharePolicy->target; + } + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + status = rmapiInitLockInfo(pRmApi, hClient, hSecondClient, &lockInfo); + if (status != NV_OK) + { + rmapiEpilogue(pRmApi, &rmApiContext); + return NV_OK; + } + + // + // Currently, Share should have no internal callers. + // If this changes and one takes a client lock, this could mess with + // Share since it may require two clients when sharing with SHARE_TYPE_CLIENT. + // Assert this for now, handle it properly if this ever changes (See DupObject) + // + NV_ASSERT (lockInfo.pClient == NULL); + + status = _RmShare(hClient, hObject, pSharePolicy, pSecInfo, &lockInfo); + + rmapiEpilogue(pRmApi, &rmApiContext); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "...resource share complete\n"); + } + else + { + NV_PRINTF(LEVEL_INFO, + "Nv04Share: share failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + NV_PRINTF(LEVEL_INFO, + "Nv04Share: hClient:0x%x hObject:0x%x pSharePolicy:%p\n", + hClient, hObject, pSharePolicy); + } + + return status; +} + +NV_STATUS +rmapiShareWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiShareWithSecInfo(pRmApi, hClient, hObject, pSharePolicy, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NV_STATUS +serverCopyResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_DUP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_COPY)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +serverShareResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_SHARE_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_SHARE)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +serverInitGlobalSharePolicies +( + RsServer *pServer +) +{ + RS_SHARE_POLICY sharePolicy; + + // Global default policies, these can be overridden by clients/objects + portMemSet(&sharePolicy, 0, sizeof(sharePolicy)); + RS_ACCESS_MASK_ADD(&sharePolicy.accessMask, RS_ACCESS_DUP_OBJECT); + sharePolicy.type = RS_SHARE_TYPE_PID; + + if (listAppendValue(&pServer->defaultInheritedSharePolicyList, + &sharePolicy) == NULL) + return NV_ERR_NO_MEMORY; + + // Internal share policies, these can't be overridden + + // SMC dup policy: Do not allow duping across different SMC partition + portMemSet(&sharePolicy, 0, sizeof(sharePolicy)); + sharePolicy.type = RS_SHARE_TYPE_SMC_PARTITION; + sharePolicy.action = RS_SHARE_ACTION_FLAG_REQUIRE; + RS_ACCESS_MASK_ADD(&sharePolicy.accessMask, RS_ACCESS_DUP_OBJECT); + + if (listAppendValue(&pServer->globalInternalSharePolicyList, + &sharePolicy) == NULL) + return NV_ERR_NO_MEMORY; + + // FM dup policy: Allow FM to dup any user-mode client's resource. + portMemSet(&sharePolicy, 0, sizeof(sharePolicy)); + sharePolicy.type = RS_SHARE_TYPE_FM_CLIENT; + RS_ACCESS_MASK_ADD(&sharePolicy.accessMask, RS_ACCESS_DUP_OBJECT); + + if (listAppendValue(&pServer->globalInternalSharePolicyList, + &sharePolicy) == NULL) + return NV_ERR_NO_MEMORY; + + return NV_OK; +} + +// Called with both src/dst client lock held +NV_STATUS serverUpdateLockFlagsForCopy(RsServer *pServer, RS_RES_DUP_PARAMS *pParams) +{ + RS_RESOURCE_DESC *pResDesc; + RS_LOCK_INFO *pLockInfo = pParams->pLockInfo; + + if (pParams->pSrcRef == NULL) + return NV_ERR_INVALID_STATE; + + pResDesc = RsResInfoByExternalClassId(pParams->pSrcRef->externalClassId); + if (pResDesc == NULL) + return NV_ERR_INVALID_OBJECT; + + if (!(pResDesc->flags & RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_DUP)) + { + pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK; + } + + if (pResDesc->flags & RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_DUP) + { + pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK; + } + + if (pResDesc->flags & RS_FLAGS_ACQUIRE_RELAXED_GPUS_LOCK_ON_DUP) + { + // Holding both client lock and the at least RO API lock. Safe to access the resource + if (rmapiLockIsOwner()) + { + GpuResource *pGpuResSrc = dynamicCast(pParams->pSrcRef->pResource, GpuResource); + GpuResource *pGpuResDst = dynamicCast(pParams->pDstParentRef->pResource, GpuResource); + + if ((pGpuResSrc != NULL) && + (pGpuResDst != NULL) && + (pGpuResSrc->pGpu == pGpuResDst->pGpu)) + { + pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK; + } + else + { + pLockInfo->flags &= ~(RM_LOCK_FLAGS_NO_GPUS_LOCK); + } + } + else + { + pLockInfo->flags &= ~(RM_LOCK_FLAGS_NO_GPUS_LOCK); + } + } + + pLockInfo->pContextRef = pParams->pSrcRef->pParentRef; + + return NV_OK; +} diff --git a/src/nvidia/src/lib/base_utils.c b/src/nvidia/src/lib/base_utils.c new file mode 100644 index 0000000..5af3a9d --- /dev/null +++ b/src/nvidia/src/lib/base_utils.c @@ -0,0 +1,385 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief Common utility code that has no natural home + */ + + +#include "lib/base_utils.h" +#include "os/os.h" + +// +// Log2 approximation that assumes a power of 2 number passed in. +// +NvU32 nvLogBase2(NvU64 val) +{ + NV_ASSERT(val != 0); + NV_ASSERT(((val) & (val - 1)) == 0); + + return portUtilCountTrailingZeros64(val); +} + + +/** + * @brief Finds the lowest unset bit of a given bitfield. + * + * Returns the lowest value of X such that the expression + * pBitField[X/32] & (1<<(X%32)) is zero. + * + * If all bits are set, returns numElements*32. + * + * @param[in] pBitField + * @param[in] numElements size of array pBitField + * + * @return the lowest zero bit, numElements*32 otherwise. + */ +NvU32 nvBitFieldLSZero(NvU32 *pBitField32, NvU32 numElements) +{ + NvU32 i; + + for (i = 0; i < numElements; ++i) + { + NvU32 temp = ~pBitField32[i]; + if (temp) + { + LOWESTBITIDX_32(temp); + return temp + i * sizeof(NvU32) * 8; + } + } + + return numElements*32; +} + +/** + * @brief Finds the highest unset bit of a given bitfield. + * + * Returns the highest value of X such that the expression + * pBitField[X/32] & (1<<(X%32)) is zero. + * + * If all bits are set, returns numElements*32. + * + * @param[in] pBitField + * @param[in] numBits must be a multiple of 32. + * + * @return The highest zero bit, numElements*32 otherwise. + */ +NvU32 nvBitFieldMSZero(NvU32 *pBitField32, NvU32 numElements) +{ + NvU32 i = 0, j = numElements - 1; + + while (i++ < numElements) + { + NvU32 temp = ~pBitField32[j]; + if (temp) + { + HIGHESTBITIDX_32(temp); + return temp + j * sizeof(NvU32) * 8; + } + j--; + } + + return numElements * 32; +} + +NvBool nvBitFieldTest(NvU32 *pBitField, NvU32 numElements, NvU32 bit) +{ + return (bit < numElements*32 ? (NvBool) !!(pBitField[bit/32] & NVBIT(bit%32)) : NV_FALSE); +} + +void nvBitFieldSet(NvU32 *pBitField, NvU32 numElements, NvU32 bit, NvBool val) +{ + NV_ASSERT(bit < numElements*32); + pBitField[bit/32] = (pBitField[bit/32] & ~NVBIT(bit%32)) | (val ? NVBIT(bit%32) : 0); +} + +// +// Sort an array of n elements/structures. +// Example: +// NvBool integerLess(void * a, void * b) +// { +// return *(NvU32 *)a < *(NvU32 *)b; +// } +// NvU32 array[1000]; +// ... +// NvU32 temp[1000]; +// nvMergeSort(array, arrsize(array), temp, sizeof(NvU32), integerLess); +// +#define EL(n) ((char *)array+(n)*elementSize) +void nvMergeSort(void * array, NvU32 n, void * tempBuffer, NvU32 elementSize, NvBool (*less)(void *, void *)) +{ + char * mergeArray = (char *)tempBuffer; + NvU32 m, i; + + // + // Bottom-up merge sort divides the sort into a sequence of passes. + // In each pass, the array is divided into blocks of size 'm'. + // Every pair of two adjacent blocks are merged (in place). + // The next pass is started with twice the block size + // + for (m = 1; m<=n; m*=2) + { + for (i = 0; i<(n-m); i+=2*m) + { + NvU32 loMin = i; + NvU32 lo = loMin; + NvU32 loMax = i+m; + NvU32 hi = i+m; + NvU32 hiMax = NV_MIN(n,i+2*m); + + char * dest = mergeArray; + + // + // Standard merge of [lo, loMax) and [hi, hiMax) + // + while (1) + { + if (less(EL(lo), EL(hi))) + { + portMemCopy(dest, elementSize, EL(lo), elementSize); + lo++; + dest+=elementSize; + if (lo >= loMax) + break; + } + else + { + portMemCopy(dest, elementSize, EL(hi), elementSize); + hi++; + dest+=elementSize; + if (hi >= hiMax) + break; + } + } + + // + // Copy remaining items (only one of these loops can run) + // + while (lo < loMax) + { + portMemCopy(dest, elementSize,EL(lo), elementSize); + dest+=elementSize; + lo++; + } + + while (hi < hiMax) + { + portMemCopy(dest, elementSize,EL(hi), elementSize); + dest+=elementSize; + hi++; + } + + // + // Copy merged data back over array + // + portMemCopy(EL(loMin), (NvU32)(dest - mergeArray), mergeArray, (NvU32)(dest - mergeArray)); + } + } +} + +#define RANGE(val,low,hi) (((val) >= (low)) && ((val) <= (hi))) + +// Do not conflict with libc naming +NvS32 nvStrToL +( + NvU8* pStr, + NvU8** pEndStr, + NvS32 base, + NvU8 stopChar, + NvU32 *numFound +) +{ + NvU32 num; + NvU32 newnum; + + *numFound = 0; + + // scan for start of number + for (;*pStr;pStr++) + { + if (RANGE(*pStr, '0', '9')) + { + *numFound = 1; + break; + } + else if ((BASE16 == base) && (RANGE(*pStr,'a','f'))) + { + *numFound = 1; + break; + } + else if ((BASE16 == base) && (RANGE(*pStr,'A', 'F'))) + { + *numFound = 1; + break; + } + else if(*pStr == stopChar) + { + break; + } + } + + // convert number + num = 0; + for (;*pStr;pStr++) + { + if (RANGE(*pStr, '0', '9')) + { + newnum = *pStr - '0'; + } + else if ((BASE16 == base) && (RANGE(*pStr,'a','f'))) + { + newnum = *pStr - 'a' + 10; + } + else if ((BASE16 == base) && (RANGE(*pStr,'A', 'F'))) + { + newnum = *pStr - 'A' + 10; + } + else + break; + + num *= base; + num += newnum; + + } + + *pEndStr = pStr; + + return num; +} + +/** + * @brief Returns MSB of input as a bit mask + * + * @param x + * @return MSB of x + */ +NvU64 +nvMsb64(NvU64 x) +{ + x |= (x >> 1); + x |= (x >> 2); + x |= (x >> 4); + x |= (x >> 8); + x |= (x >> 16); + x |= (x >> 32); + // + // At this point, x has same MSB as input, but with all 1's below it, clear + // everything but MSB + // + return(x & ~(x >> 1)); +} + +/** + * @brief Convert unsigned long int to char* + * + * @param value to be converted to string + * @param *string is the char array to be have the converted data + * @param radix denoted the base of the operation : hex(16),octal(8)..etc + * @return the converted string + */ +char * nvU32ToStr(NvU32 value, char *string, NvU32 radix) +{ + char tmp[33]; + char *tp = tmp; + NvS32 i; + NvU32 v = value; + char *sp; + + if (radix > 36 || radix <= 1) + { + return 0; + } + + while (v || tp == tmp) + { + i = v % radix; + v = v / radix; + if (i < 10) + *tp++ = (char)(i + '0'); + else + *tp++ = (char)(i + 'a' - 10); + } + + sp = string; + + while (tp > tmp) + *sp++ = *--tp; + *sp = 0; + + return string; +} + +/*! + * @brief Convert unsigned long long hex int to char* + * + * @param[in] value to be converted to string + * @param[in] targetStrLen Denoted the converted string Length + * @param[out] *string is the char array to be have the converted data + * + * @return the converted string + */ +char * +nvU64ToStr +( + NvU64 value, + char *string, + NvU32 targetStrLen +) +{ + char tempBuffer[65]; + NvU32 base = 16; + NvU32 rem = 0; + NvU32 inx = 0; + + for (inx = 0; inx < targetStrLen; inx++) + { + rem = value % base; + value = value / base; + tempBuffer[inx] = (rem > 9)? (rem - 10) + 'a' : rem + '0'; + } + + for(inx = 0; inx < targetStrLen; inx++) + { + string[inx] = tempBuffer[targetStrLen - inx - 1]; + } + + string[targetStrLen] = '\0'; + + return string; +} + +/** + * @brief Get the string length + * + * @param string for which length has to be calculated + * @return the string length + */ +NvU32 nvStringLen(const char * str) +{ + NvU32 i = 0; + while (str[i++] != '\0') + ; + return i - 1; +} + diff --git a/src/nvidia/src/lib/zlib/inflate.c b/src/nvidia/src/lib/zlib/inflate.c new file mode 100644 index 0000000..3c3103d --- /dev/null +++ b/src/nvidia/src/lib/zlib/inflate.c @@ -0,0 +1,1157 @@ +/* inflate.c -- Not copyrighted 1992 by Mark Adler + version c10p1, 10 January 1993 */ + +/* You can do whatever you like with this source file, though I would + prefer that if you modify it and redistribute it that you include + comments to that effect with your name and the date. Thank you. + [The history has been moved to the file ChangeLog.] + */ + +/* + Inflate deflated (PKZIP's method 8 compressed) data. The compression + method searches for as much of the current string of bytes (up to a + length of 258) in the previous 32K bytes. If it doesn't find any + matches (of at least length 3), it codes the next byte. Otherwise, it + codes the length of the matched string and its distance backwards from + the current position. There is a single Huffman code that codes both + single bytes (called "literals") and match lengths. A second Huffman + code codes the distance information, which follows a length code. Each + length or distance code actually represents a base value and a number + of "extra" (sometimes zero) bits to get to add to the base value. At + the end of each deflated block is a special end-of-block (EOB) literal/ + length code. The decoding process is basically: get a literal/length + code; if EOB then done; if a literal, emit the decoded byte; if a + length then get the distance and emit the referred-to bytes from the + sliding window of previously emitted data. + + There are (currently) three kinds of inflate blocks: stored, fixed, and + dynamic. The compressor deals with some chunk of data at a time, and + decides which method to use on a chunk-by-chunk basis. A chunk might + typically be 32K or 64K. If the chunk is uncompressible, then the + "stored" method is used. In this case, the bytes are simply stored as + is, eight bits per byte, with none of the above coding. The bytes are + preceded by a count, since there is no longer an EOB code. + + If the data is compressible, then either the fixed or dynamic methods + are used. In the dynamic method, the compressed data is preceded by + an encoding of the literal/length and distance Huffman codes that are + to be used to decode this block. The representation is itself Huffman + coded, and so is preceded by a description of that code. These code + descriptions take up a little space, and so for small blocks, there is + a predefined set of codes, called the fixed codes. The fixed method is + used if the block codes up smaller that way (usually for quite small + chunks), otherwise the dynamic method is used. In the latter case, the + codes are customized to the probabilities in the current block, and so + can code it much better than the pre-determined fixed codes. + + The Huffman codes themselves are decoded using a mutli-level table + lookup, in order to maximize the speed of decoding plus the speed of + building the decoding tables. See the comments below that precede the + lbits and dbits tuning parameters. + */ + + +/* + Notes beyond the 1.93a appnote.txt: + + 1. Distance pointers never point before the beginning of the output + stream. + 2. Distance pointers can point back across blocks, up to 32k away. + 3. There is an implied maximum of 7 bits for the bit length table and + 15 bits for the actual data. + 4. If only one code exists, then it is encoded using one bit. (Zero + would be more efficient, but perhaps a little confusing.) If two + codes exist, they are coded using one bit each (0 and 1). + 5. There is no way of sending zero distance codes--a dummy must be + sent if there are none. (History: a pre 2.0 version of PKZIP would + store blocks with no distance codes, but this was discovered to be + too harsh a criterion.) Valid only for 1.93a. 2.04c does allow + zero distance codes, which is sent as one code of zero bits in + length. + 6. There are up to 286 literal/length codes. Code 256 represents the + end-of-block. Note however that the static length tree defines + 288 codes just to fill out the Huffman codes. Codes 286 and 287 + cannot be used though, since there is no length base or extra bits + defined for them. Similarly, there are up to 30 distance codes. + However, static trees define 32 codes (all 5 bits) to fill out the + Huffman codes, but the last two had better not show up in the data. + 7. Unzip can check dynamic Huffman blocks for complete code sets. + The exception is that a single code would not be complete (see #4). + 8. The five bits following the block type is really the number of + literal codes sent minus 257. + 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits + (1+6+6). Therefore, to output three times the length, you output + three codes (1+1+1), whereas to output four times the same length, + you only need two codes (1+3). Hmm. + 10. In the tree reconstruction algorithm, Code = Code + Increment + only if BitLength(i) is not zero. (Pretty obvious.) + 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19) + 12. Note: length code 284 can represent 227-258, but length code 285 + really is 258. The last length deserves its own, short code + since it gets used a lot in very redundant files. The length + 258 is special since 258 - 3 (the min match length) is 255. + 13. The literal/length and distance code bit lengths are read as a + single stream of lengths. It is possible (and advantageous) for + a repeat code (16, 17, or 18) to go across the boundary between + the two sets of lengths. + */ + +//----------------------------------------------------------------------------- +// NVIDIA modifications are solely around interface cleanup, compiler warnings, etc. +//----------------------------------------------------------------------------- + +#include "nvtypes.h" +#include "nvstatus.h" + +#ifndef NVGZ_USER +#define __DRIVER_BUILD__ +// driver build +#include "os/os.h" +#endif /* NVGZ_USER */ + +#ifndef __DRIVER_BUILD__ +// user build : NVGZ_USER +#include +#include +#include +#include + +#define portMemCopy(p1, s1, p2, s2) memcpy(p1, p2, ((s1) > (s2)) ? (s2) : (s1)) +#define portMemSet memset +#define portMemAllocNonPaged malloc +#define portMemFree free +#define sizeof sizeof +#define NV_PRINTF(a,b) printf(b) +#endif + +#include "lib/zlib/inflate.h" + +/* Function prototypes */ +static NvU32 huft_build(NvU8 *, NvU16, NvU32 , ush *, ush *, + struct huft **, NvS32 *); +static NvU32 huft_free(struct huft *); +static NvU32 inflate_codes_iterator(PGZ_INFLATE_STATE); +static NvU32 fixed_huft_build(PGZ_INFLATE_STATE); +static NvU32 dynamic_huft_build(PGZ_INFLATE_STATE); + +static void flush_window(PGZ_INFLATE_STATE pGzState) +{ + if ( pGzState->wp == 0) return; + + pGzState->wp2 = pGzState->wp; + + // If output range is not specified, do normal output + if (pGzState->outLower == 0xFFFFFFFF && pGzState->outUpper == 0xFFFFFFFF) + { + portMemCopy(pGzState->outbuf + pGzState->outptr, pGzState->wp, pGzState->window, pGzState->wp); + pGzState->wp1 += pGzState->wp; + pGzState->optSize += pGzState->wp; + } + // slide pGzState->outLower pGzState->outUpper slide + // ----============-----|--------------|-----============ + else if (pGzState->outptr + pGzState->wp - 1 < pGzState->outLower + || pGzState->outptr > pGzState->outUpper) + { + } + // slide pGzState->outLower pGzState->outUpper + // ----=================|===-----------|----------------- + else if (pGzState->outptr <= pGzState->outLower + && pGzState->outptr + pGzState->wp - 1 >= pGzState->outLower + && pGzState->outptr + pGzState->wp - 1 <= pGzState->outUpper) + { + portMemCopy(pGzState->outbuf, + pGzState->wp - (pGzState->outLower - pGzState->outptr), pGzState->window + pGzState->outLower - pGzState->outptr, + pGzState->wp - (pGzState->outLower - pGzState->outptr)); + pGzState->wp1 += pGzState->wp - (pGzState->outLower - pGzState->outptr); + pGzState->optSize += pGzState->wp - (pGzState->outLower - pGzState->outptr); + } + // slide pGzState->outLower pGzState->outUpper + // ----=================|==============|===-------------- + else if (pGzState->outptr <= pGzState->outLower + && pGzState->outptr + pGzState->wp - 1 > pGzState->outUpper ) + { + portMemCopy(pGzState->outbuf, + pGzState->outUpper - pGzState->outLower + 1, pGzState->window + pGzState->outLower - pGzState->outptr, + pGzState->outUpper - pGzState->outLower + 1); + pGzState->wp1 += pGzState->outUpper - pGzState->outptr + 1; + pGzState->optSize += pGzState->outUpper - pGzState->outLower + 1; + } + // slide pGzState->outLower pGzState->outUpper + // ---------------------|===========---|----------------- + else if (pGzState->outptr >= pGzState->outLower + && pGzState->outptr + pGzState->wp - 1 <= pGzState->outUpper) + { + portMemCopy(pGzState->outbuf + pGzState->outptr - pGzState->outLower, + pGzState->wp, pGzState->window, + pGzState->wp); + pGzState->wp1 += pGzState->wp; + pGzState->optSize += pGzState->wp; + } + // slide pGzState->outLower pGzState->outUpper + // ---------------------|==============|===-------------- + else if (pGzState->outptr >= pGzState->outLower + && pGzState->outptr + pGzState->wp - 1 > pGzState->outUpper) + { + portMemCopy(pGzState->outbuf + pGzState->outptr - pGzState->outLower, + pGzState->outUpper - pGzState->outptr + 1, pGzState->window, + pGzState->outUpper - pGzState->outptr + 1); + pGzState->wp1 += pGzState->outUpper - pGzState->outptr + 1; + pGzState->optSize += pGzState->outUpper - pGzState->outptr + 1; + } + + pGzState->outptr += pGzState->wp; + pGzState->wp = 0; +} + + +/* Tables for deflate from PKZIP's appnote.txt. */ +static NvU32 border[] = { /* Order of the bit length code lengths */ + 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; +static ush cplens[] = { /* Copy lengths for literal codes 257..285 */ + 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, + 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; + /* note: see note #13 above about the 258 in this list. */ +static ush cplext[] = { /* Extra bits for literal codes 257..285 */ + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, + 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 99, 99}; /* 99==invalid */ +static ush cpdist[] = { /* Copy offsets for distance codes 0..29 */ + 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, + 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, + 8193, 12289, 16385, 24577}; +static ush cpdext[] = { /* Extra bits for distance codes */ + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, + 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, + 12, 12, 13, 13}; + +/* Macros for inflate() bit peeking and grabbing. + The usage is: + + NEEDBITS(j) + x = b & mask_bits[j]; + DUMPBITS(j) + + where NEEDBITS makes sure that b has at least j bits in it, and + DUMPBITS removes the bits from b. The macros use the variable k + for the number of bits in b. Normally, b and k are register + variables for speed, and are initialized at the beginning of a + routine that uses these macros from a global bit buffer and count. + + If we assume that EOB will be the longest code, then we will never + ask for bits with NEEDBITS that are beyond the end of the stream. + So, NEEDBITS should not read any more bytes than are needed to + meet the request. Then no bytes need to be "returned" to the buffer + at the end of the last block. + + However, this assumption is not true for fixed blocks--the EOB code + is 7 bits, but the other literal/length codes can be 8 or 9 bits. + (The EOB code is shorter than other codes because fixed blocks are + generally short. So, while a block always has an EOB, many other + literal/length codes have a significantly lower probability of + showing up at all.) However, by making the first table have a + lookup of seven bits, the EOB code will be found in that first + lookup, and so will not require that too many bits be pulled from + the stream. + */ + +static ush mask_bits[] = { + 0x0000, + 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff, + 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff +}; + +/* + Huffman code decoding is performed using a multi-level table lookup. + The fastest way to decode is to simply build a lookup table whose + size is determined by the longest code. However, the time it takes + to build this table can also be a factor if the data being decoded + is not very long. The most common codes are necessarily the + shortest codes, so those codes dominate the decoding time, and hence + the speed. The idea is you can have a shorter table that decodes the + shorter, more probable codes, and then point to subsidiary tables for + the longer codes. The time it costs to decode the longer codes is + then traded against the time it takes to make longer tables. + + This results of this trade are in the variables lbits and dbits + below. lbits is the number of bits the first level table for literal/ + length codes can decode in one step, and dbits is the same thing for + the distance codes. Subsequent tables are also less than or equal to + those sizes. These values may be adjusted either when all of the + codes are shorter than that, in which case the longest code length in + bits is used, or when the shortest code is *longer* than the requested + table size, in which case the length of the shortest code in bits is + used. + + There are two different values for the two tables, since they code a + different number of possibilities each. The literal/length table + codes 286 possible values, or in a flat code, a little over eight + bits. The distance table codes 30 possible values, or a little less + than five bits, flat. The optimum values for speed end up being + about one bit more than those, so lbits is 8+1 and dbits is 5+1. + The optimum values may differ though from machine to machine, and + possibly even between compilers. Your mileage may vary. + */ + + +const NvU32 lbits = 9; /* bits in base literal/length lookup table */ +const NvU32 dbits = 6; /* bits in base distance lookup table */ + +static NvU32 hufts; /* track memory usage */ + +/* + * Given a list of code lengths and a maximum table size, make a set of + * tables to decode that set of codes. Return zero on success, one if + * the given code set is incomplete (the tables are still built in + * case), two if the input is invalid (all zero length codes or an + * oversubscribed set of lengths), and three if not enough memory. + */ +static NvU32 huft_build +( + NvU8 *b, /* code lengths in bits (all assumed <= BMAX) */ + NvU16 n, /* number of codes (assumed <= N_MAX) */ + NvU32 s, /* number of simple-valued codes (0..s-1) */ + ush *d, /* list of base values for non-simple codes */ + ush *e, /* list of extra bits for non-simple codes */ + struct huft **t, /* result: starting table */ + NvS32 *m /* maximum lookup bits, returns actual */ +) +{ + NvU32 a; /* counter for codes of length k */ + NvU32 c[BMAX+1]; /* bit length count table */ + NvU32 f; /* i repeats in table every f entries */ + NvS32 g; /* maximum code length */ + NvS32 h; /* table level */ + NvU16 i; /* counter, current code */ + NvU32 j; /* counter */ + NvS32 k; /* number of bits in current code */ + NvS32 l; /* bits per table (returned in m) */ + NvU8 *p8; /* pointer into b[] */ + NvU16 *p16; /* pointer into v[] */ + NvU32 *p32; /* pointer into c[] */ + struct huft *q; /* points to current table */ + struct huft r; /* table entry for structure assignment */ + struct huft *u[BMAX]; /* table stack */ + NvU16 v[N_MAX]; /* values in order of bit length */ + NvS32 w; /* bits before this table == (l * h) */ + NvU32 x[BMAX+1]; /* bit offsets, then code stack */ + NvU32 *xp; /* pointer into x */ + NvS32 y; /* number of dummy codes added */ + NvU32 z; /* number of entries in current table */ + + /* Generate counts for each bit length */ + portMemSet((void*)c,0,sizeof(c)); + + p8 = b; i = n; + do { + Tracecv(*p8, (stderr, (n-i >= ' ' && n-i <= '~' ? "%c %d\n" : "0x%x %d\n"), + n-i, *p8)); + c[*p8]++; /* assume all entries <= BMAX */ + p8++; /* Can't combine with above line (Solaris bug) */ + } while (--i); + if (c[0] == n) /* null input--all zero length codes */ + { + *t = (struct huft *)NULL; + *m = 0; + return GZ_STATE_HUFT_OK; + } + + + /* Find minimum and maximum length, bound *m by those */ + l = *m; + for (j = 1; j <= BMAX; j++) + if (c[j]) + break; + k = j; /* minimum code length */ + if ((NvU32)l < j) + l = j; + for (i = BMAX; i; i--) + if (c[i]) + break; + g = i; /* maximum code length */ + if ((NvU32)l > i) + l = i; + *m = l; + + + /* Adjust last length count to fill out codes, if needed */ + for (y = 1 << j; j < i; j++, y <<= 1) + if ((y -= c[j]) < 0) + return GZ_STATE_HUFT_ERROR; /* bad input: more codes than bits */ + if ((y -= c[i]) < 0) + return GZ_STATE_HUFT_ERROR; + c[i] += y; + + + /* Generate starting offsets into the value table for each length */ + x[1] = j = 0; + p32 = c + 1; xp = x + 2; + while (--i) { /* note that i == g from above */ + *xp++ = (j += *p32++); + } + + + /* Make a table of values in order of bit lengths */ + p8 = b; i = 0; + do { + if ((j = *p8++) != 0) + v[x[j]++] = i; + } while (++i < n); + + + /* Generate the Huffman codes and for each, make the table entries */ + x[0] = i = 0; /* first Huffman code is zero */ + p16 = v; /* grab values in bit order */ + h = -1; /* no tables yet--level -1 */ + w = -l; /* bits decoded == (l * h) */ + u[0] = (struct huft *)NULL; /* just to keep compilers happy */ + q = (struct huft *)NULL; /* ditto */ + z = 0; /* ditto */ + + /* go through the bit lengths (k already is bits in shortest code) */ + for (; k <= g; k++) + { + a = c[k]; + while (a--) + { + /* here i is the Huffman code of length k bits for value *p */ + /* make tables up to required level */ + while (k > w + l) + { + h++; + w += l; /* previous table always l bits */ + + /* compute minimum size table less than or equal to l bits */ + z = (NvU32)((z = (NvU32)(g - w)) > (NvU32)l ? (NvU32)l : z); /* upper limit on table size */ + if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */ + { /* too few codes for k-w bit table */ + f -= a + 1; /* deduct codes from patterns left */ + xp = c + k; + while (++j < z) /* try smaller tables up to z bits */ + { + if ((f <<= 1) <= *++xp) + break; /* enough codes to use up j bits */ + f -= *xp; /* else deduct codes from patterns */ + } + } + z = 1 << j; /* table entries for j-bit table */ + + /* allocate and link in new table */ + + q = portMemAllocNonPaged((z + 1)*sizeof(struct huft)); + if (q == NULL) + { + return GZ_STATE_HUFT_ERROR; + } + + if (q == (struct huft *)NULL) + { + if (h) + huft_free(u[0]); + return GZ_STATE_HUFT_ERROR; /* not enough memory */ + } + hufts += z + 1; /* track memory usage */ + *t = q + 1; /* link to list for huft_free() */ + *(t = &(q->v.t)) = (struct huft *)NULL; + u[h] = ++q; /* table starts after link */ + + /* connect to last table, if there is one */ + if (h) + { + x[h] = i; /* save pattern for backing up */ + r.b = (uch)l; /* bits to dump before this table */ + r.e = (uch)(16 + j); /* bits in this table */ + r.v.t = q; /* pointer to this table */ + j = i >> (w - l); /* (get around Turbo C bug) */ + u[h-1][j] = r; /* connect to last table */ + } + } + + /* set up table entry in r */ + r.b = (uch)(k - w); + if (p16 >= v + n) + r.e = 99; /* out of values--invalid code */ + else if (*p16 < s) + { + r.e = (uch)(*p16 < 256 ? 16 : 15); /* 256 is end-of-block code */ + r.v.n = (ush)(*p16); /* simple code is just the value */ + p16++; /* one compiler does not like *p++ */ + } + else + { + r.e = (uch)e[*p16 - s]; /* non-simple--look up in lists */ + r.v.n = d[*p16++ - s]; + } + + /* fill code-like entries with r */ + f = 1 << (k - w); + for (j = i >> w; j < z; j += f) + q[j] = r; + + /* backwards increment the k-bit code i */ + for (j = 1 << (k - 1); i & j; j >>= 1) + i ^= j; + i ^= j; + + /* backup over finished tables */ + while ((i & ((NvU32)(1 << w) - 1)) != x[h]) + { + h--; /* don't need to update q */ + w -= l; + } + } + } + + + /* Return true (1) if we were given an incomplete table */ + return y != 0 && g != 1; +} + +/* + * Free the malloc'ed tables built by huft_build(), which makes a linked + * list of the tables it made, with the links in a dummy first entry of + * each table. + */ +static NvU32 huft_free +( + struct huft *t /* table to free */ +) +{ + struct huft *p, *q; + + /* Go through linked list, freeing from the malloced (t[-1]) address. */ + p = t; + while (p != (struct huft *)NULL) + { + q = (--p)->v.t; + portMemFree((void*)p); + p = q; + } + return GZ_STATE_HUFT_OK; +} + +static NvU32 inflate_codes_iterator_store(PGZ_INFLATE_STATE pGzState) +{ + NvU32 n = pGzState->codesState.sn; /* number of bytes in block */ + NvU32 w = pGzState->codesState.w; /* current window position */ + NvU32 k = pGzState->codesState.k; /* number of bits in bit buffer */ + ulg b = pGzState->codesState.b; /* bit buffer */ + + /* read and output the compressed data */ + while (n) + { + n--; + NEEDBITS(8) + pGzState->window[w++] = (uch)b; + DUMPBITS(8) + if (w == WSIZE) + { + flush_output(w); + w = 0; + break; + } + } + + /* restore the globals from the locals */ + pGzState->codesState.sn = n; + pGzState->codesState.w = w; + pGzState->codesState.b = b; + pGzState->codesState.k = k; + + if (n != 0) + { + return GZ_STATE_ITERATOR_OK; + } + else + { + return GZ_STATE_ITERATOR_END; + } +} + +/* inflate (decompress) the codes in a deflated (compressed) block. +Return an error code or zero if it all goes ok. */ +static NvU32 inflate_codes_iterator(PGZ_INFLATE_STATE pGzState) +{ + NvU32 e = pGzState->codesState.e; /* table entry flag/number of extra bits */ + NvU32 n = pGzState->codesState.n; /* length and index for copy */ + NvU32 d = pGzState->codesState.d; + NvU32 w = pGzState->codesState.w; /* current window position */ + struct huft *t = pGzState->codesState.t; /* pointer to table entry */ + ulg b = pGzState->codesState.b; /* bit buffer */ + NvU32 k = pGzState->codesState.k; /* number of bits in bit buffer */ + NvU32 ml = mask_bits[pGzState->bl]; /* masks for bl and bd bits */ + NvU32 md = mask_bits[pGzState->bd]; + NvU32 r = 0; + + if (pGzState->codesState.continue_copy == 1) + goto continue_copy; + + for (;;) + { + NEEDBITS((unsigned)pGzState->bl) + if ((e = (t = pGzState->tl + ((unsigned)b & ml))->e) > 16) + { + do { + if (e == 99) + return GZ_STATE_ITERATOR_ERROR; + DUMPBITS(t->b) + e -= 16; + NEEDBITS(e) + } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16); + } + DUMPBITS(t->b) + + if (e == 16) /* then it's a literal */ + { + pGzState->window[w++] = (uch)t->v.n; + Tracevv((stderr, "%c", pGzState->window[w-1])); + if (w == WSIZE) + { + pGzState->wp1 = 0; + flush_output(w); + w = 0; + r = GZ_STATE_ITERATOR_OK; + goto exit; + } + } + else /* it's an EOB or a length */ + { + /* exit if end of block */ + if (e == 15) + { + r = GZ_STATE_ITERATOR_END; + goto exit; + } + + /* get length of block to copy */ + NEEDBITS(e) + n = t->v.n + ((unsigned)b & mask_bits[e]); + DUMPBITS(e); + + /* decode distance of block to copy */ + NEEDBITS((unsigned)pGzState->bd) + if ((e = (t = pGzState->td + ((unsigned)b & md))->e) > 16) + { + do { + if (e == 99) + return 1; + DUMPBITS(t->b) + e -= 16; + NEEDBITS(e) + } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16); + } + DUMPBITS(t->b) + + NEEDBITS(e) + d = w - t->v.n - ((unsigned)b & mask_bits[e]); + DUMPBITS(e) + + Tracevv((stderr,"\\[%d,%d]", w-d, n)); + + /* do the copy */ + do { + n -= (e = (e = WSIZE - ((d &= WSIZE-1) > w ? d : w)) > n ? n : e); +#if !defined(NOMEMCPY) && !defined(DEBUG) + if (w - d >= e) /* (this test assumes unsigned comparison) */ + { + memcpy(pGzState->window + w, pGzState->window + d, e); + w += e; + d += e; + } + else /* do it slow to avoid memcpy() overlap */ +#endif /* !NOMEMCPY */ + { + do { + pGzState->window[w++] = pGzState->window[d++]; + Tracevv((stderr, "%c", pGzState->window[w-1])); + } while (--e); + } + if (w == WSIZE) + { + pGzState->wp1 = 0; + flush_output(w); + w = 0; + r = GZ_STATE_ITERATOR_OK; + pGzState->codesState.continue_copy = 1; + goto exit; + } +continue_copy: ; + } while (n); + + pGzState->codesState.continue_copy = 0; + } + } + +exit: + + pGzState->codesState.e = e; /* table entry flag/number of extra bits */ + pGzState->codesState.n = n; + pGzState->codesState.d = d; /* length and index for copy */ + pGzState->codesState.w = w; /* current window position */ + pGzState->codesState.t = t; /* pointer to table entry */ + pGzState->codesState.b = b; /* bit buffer */ + pGzState->codesState.k = k; /* number of bits in bit buffer */ + + /* done */ + return r; +} + +static void huft_destroy(PGZ_INFLATE_STATE pGzState) +{ + /* free the decoding tables, return */ + if (pGzState->tl != NULL) + { + huft_free(pGzState->tl); + pGzState->tl = NULL; + } + + if (pGzState->td != NULL) + { + huft_free(pGzState->td); + pGzState->td = NULL; + } +} + +static NvU32 fixed_huft_build(PGZ_INFLATE_STATE pGzState) +/* decompress an inflated type 1 (fixed Huffman codes) block. We should + either replace this with a custom decoder, or at least precompute the + Huffman tables. */ +{ + NvU32 i; /* temporary variable */ + NvU8 l[N_MAX]; /* length list for huft_build */ + + /* set up literal table */ + for (i = 0; i < 144; i++) + l[i] = 8; + for (; i < 256; i++) + l[i] = 9; + for (; i < 280; i++) + l[i] = 7; + for (; i < N_MAX; i++) /* make a complete, but wrong code set */ + l[i] = 8; + pGzState->bl = 7; + if ((i = huft_build(l, N_MAX, 257, cplens, cplext, &pGzState->tl, &pGzState->bl)) != 0) + return i; + + + /* set up distance table */ + for (i = 0; i < 30; i++) /* make an incomplete code set */ + l[i] = 5; + pGzState->bd = 5; + if ((i = huft_build(l, 30, 0, cpdist, cpdext, &pGzState->td, &pGzState->bd)) > GZ_STATE_HUFT_INCOMP) + { + huft_free(pGzState->tl); + return i; + } + + return GZ_STATE_HUFT_OK; +} + +/* decompress an inflated type 2 (dynamic Huffman codes) block. */ +static NvU32 dynamic_huft_build(PGZ_INFLATE_STATE pGzState) +{ + NvU32 i; /* temporary variables */ + NvU32 j; + NvU32 l; /* last length */ + NvU32 m; /* mask for bit lengths table */ + NvU32 n; /* number of lengths to get */ + NvU32 nb; /* number of bit length codes */ + NvU16 nl; /* number of literal/length codes */ + NvU16 nd; /* number of distance codes */ +#ifdef PKZIP_BUG_WORKAROUND + NvU8 ll[288+32]; /* literal/length and distance code lengths */ +#else + NvU8 ll[286+30]; /* literal/length and distance code lengths */ +#endif + ulg b; /* bit buffer */ + NvU32 k; /* number of bits in bit buffer */ + + + /* make local bit buffer */ + b = pGzState->bb; + k = pGzState->bk; + + + /* read in table lengths */ + NEEDBITS(5) + nl = 257 + ((NvU8)b & 0x1f); /* number of literal/length codes */ + DUMPBITS(5) + NEEDBITS(5) + nd = 1 + ((NvU8)b & 0x1f); /* number of distance codes */ + DUMPBITS(5) + NEEDBITS(4) + nb = 4 + ((NvU8)b & 0xf); /* number of bit length codes */ + DUMPBITS(4) +#ifdef PKZIP_BUG_WORKAROUND + if (nl > 288 || nd > 32) +#else + if (nl > 286 || nd > 30) +#endif + return GZ_STATE_HUFT_INCOMP; /* bad lengths */ + + /* read in bit-length-code lengths */ + for (j = 0; j < nb; j++) + { + NEEDBITS(3) + ll[border[j]] = (NvU8)b & 7; + DUMPBITS(3) + } + for (; j < 19; j++) + ll[border[j]] = 0; + + /* build decoding table for trees--single level, 7 bit lookup */ + pGzState->bl = 7; + if ((i = huft_build(ll, 19, 19, NULL, NULL, &pGzState->tl, &pGzState->bl)) != 0) + { + if (i == GZ_STATE_HUFT_INCOMP) + huft_free(pGzState->tl); + return i; /* incomplete code set */ + } + + /* read in literal and distance code lengths */ + n = nl + nd; + m = mask_bits[pGzState->bl]; + i = l = 0; + while ((NvU32)i < n) + { + NEEDBITS((NvU32)pGzState->bl) + j = (pGzState->td = pGzState->tl + ((NvU32)b & m))->b; + DUMPBITS(j) + j = pGzState->td->v.n; + if (j < 16) /* length of code in bits (0..15) */ + ll[i++] = (NvU8)(l = j); /* save last length in l */ + else if (j == 16) /* repeat last length 3 to 6 times */ + { + NEEDBITS(2) + j = 3 + ((NvU32)b & 3); + DUMPBITS(2) + if ((NvU32)i + j > n) + return GZ_STATE_HUFT_INCOMP; + while (j--) + ll[i++] = (NvU8)l; + } + else if (j == 17) /* 3 to 10 zero length codes */ + { + NEEDBITS(3) + j = 3 + ((NvU32)b & 7); + DUMPBITS(3) + if ((NvU32)i + j > n) + return GZ_STATE_HUFT_INCOMP; + while (j--) + ll[i++] = 0; + l = 0; + } + else /* j == 18: 11 to 138 zero length codes */ + { + NEEDBITS(7) + j = 11 + ((NvU32)b & 0x7f); + DUMPBITS(7) + if ((NvU32)i + j > n) + return GZ_STATE_HUFT_INCOMP; + while (j--) + ll[i++] = 0; + l = 0; + } + } + + /* free decoding table for trees */ + huft_free(pGzState->tl); + + /* restore the global bit buffer */ + pGzState->bb = b; + pGzState->bk = k; + + /* build the decoding tables for literal/length and distance codes */ + pGzState->bl = lbits; + if ((i = huft_build(ll, nl, 257, cplens, cplext, &pGzState->tl, &pGzState->bl)) != 0) + { + if (i == GZ_STATE_HUFT_INCOMP) { + NV_PRINTF(LEVEL_ERROR, "dload, incomplete literal tree\n"); + huft_free(pGzState->tl); + } + return i; /* incomplete code set */ + } + pGzState->bd = dbits; + if ((i = huft_build(ll + nl, nd, 0, cpdist, cpdext, &pGzState->td, &pGzState->bd)) != 0) + { + if (i == GZ_STATE_HUFT_INCOMP) { + NV_PRINTF(LEVEL_ERROR, "dload, incomplete distance tree\n"); +#ifdef PKZIP_BUG_WORKAROUND + i = GZ_STATE_HUFT_OK; + } +#else + huft_free(pGzState->td); + } + huft_free(pGzState->tl); + return i; /* incomplete code set */ +#endif + } + + return GZ_STATE_HUFT_OK; +} + +static +NV_STATUS utilGzInit(const NvU8 *zArray, NvU8* oBuffer, NvU32 numTotalBytes, NvU8* window, PGZ_INFLATE_STATE pGzState) +{ + portMemSet(pGzState, 0, sizeof(GZ_INFLATE_STATE)); + portMemSet(window, 0, GZ_SLIDE_WINDOW_SIZE); + + pGzState->inbuf = (NvU8*)zArray; + pGzState->outbuf = oBuffer; + pGzState->outBufSize = numTotalBytes; + pGzState->window = window; + pGzState->newblock = 1; + pGzState->outLower = 0xFFFFFFFF; + pGzState->outUpper = 0xFFFFFFFF; + + return NV_OK; +} + +/* NVIDIA addition: give pointers to input and known-large-enough output buffers. */ +/* decompress an inflated entry */ +NV_STATUS utilGzAllocate(const NvU8 *zArray, NvU32 numTotalBytes, PGZ_INFLATE_STATE *ppGzState) +{ + PGZ_INFLATE_STATE pGzState = NULL; + NvU8 *window = NULL; + NV_STATUS status = NV_OK; + + pGzState = portMemAllocNonPaged(sizeof(GZ_INFLATE_STATE)); + if (pGzState == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + window = portMemAllocNonPaged(GZ_SLIDE_WINDOW_SIZE); + if (window == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + utilGzInit(zArray, 0, numTotalBytes, window, pGzState); + + *ppGzState = pGzState; + +done: + if (status != NV_OK) + { + portMemFree(pGzState); + portMemFree(window); + } + return status; + +} + +NvU32 utilGzIterator(PGZ_INFLATE_STATE pGzState) +{ + NvU32 t; /* block type */ + NvU32 w; /* current window position */ + NvU32 b; /* bit buffer */ + NvU32 k; /* number of bits in bit buffer */ + NvU32 gzStatus = GZ_STATE_ITERATOR_ERROR; + + // new decompression block, we need to construct huffman tree. + if (pGzState->newblock == 1) + { + /* make local bit buffer */ + b = pGzState->bb; + k = pGzState->bk; + + /* read in last block bit */ + NEEDBITS(1) + pGzState->e = (NvU32)b & 1; + DUMPBITS(1) + + /* read in block type */ + NEEDBITS(2) + t = (NvU32)b & 3; + DUMPBITS(2) + + /* restore the global bit buffer */ + pGzState->bb = b; + pGzState->bk = k; + + /* inflate that block type */ + switch (t) + { + case 2: + { + gzStatus = dynamic_huft_build(pGzState); + break; + } + case 1: + { + gzStatus = fixed_huft_build(pGzState); + break; + } + case 0: + { + NvU32 n; + b = pGzState->bb; + k = pGzState->bk; + w = pGzState->wp; + + n = k & 7; + DUMPBITS(n); + + /* get the length and its complement */ + NEEDBITS(16) + n = ((unsigned int)b & 0xffff); + DUMPBITS(16) + NEEDBITS(16) + if (n != (unsigned int)((~b) & 0xffff)) + { + return GZ_STATE_ITERATOR_ERROR; /* error in compressed data */ + } + DUMPBITS(16) + + pGzState->wp = w; /* restore global window pointer */ + pGzState->bb = b; /* restore global bit buffer */ + pGzState->bk = k; + pGzState->codesState.sn = n; + break; + } + default: + { + return GZ_STATE_ITERATOR_ERROR; + } + } + + if (t != 0 && gzStatus != GZ_STATE_HUFT_OK) + { + return GZ_STATE_ITERATOR_ERROR; + } + + pGzState->newblock = 0; + + /* make local copies of globals */ + pGzState->codesState.b = pGzState->bb; /* initialize bit buffer */ + pGzState->codesState.k = pGzState->bk; + pGzState->codesState.w = pGzState->wp; /* initialize window position */ + } + + // decompress one slide window + if (pGzState->codesState.sn == 0) + { + gzStatus = inflate_codes_iterator(pGzState); + } + else + { + gzStatus = inflate_codes_iterator_store(pGzState); + } + + // decompression ok and current block finished. + if (gzStatus == GZ_STATE_ITERATOR_END) + { + /* restore the globals from the locals */ + pGzState->wp = pGzState->codesState.w; /* restore global window pointer */ + pGzState->bb = pGzState->codesState.b; /* restore global bit buffer */ + pGzState->bk = pGzState->codesState.k; + portMemSet(&pGzState->codesState, 0, sizeof(GZ_INFLATE_CODES_STATE)); + + huft_destroy(pGzState); + pGzState->newblock = 1; + + // current block is the last one, flush remain data in slide window + if (pGzState->e) + { + while (pGzState->bk >= 8) + { + pGzState->bk -= 8; + pGzState->inptr--; + } + + /* flush out slide */ + flush_output(pGzState->wp); + } + + // continue iteration + gzStatus = GZ_STATE_ITERATOR_OK; + } + + return gzStatus; +} + +NV_STATUS utilGzDestroy(PGZ_INFLATE_STATE pGzState) +{ + huft_destroy(pGzState); + portMemFree(pGzState->window); + portMemFree(pGzState); + return NV_OK; +} + +NvU32 utilGzGetData(PGZ_INFLATE_STATE pGzState, NvU32 offset, NvU32 size, NvU8 * outBuffer) +{ + NvU32 sizew = 0, oldOutBufSize; + NvU8 * oldInBuf, *oldOutBuf; + uch * oldWindow; + NV_STATUS status = NV_OK; + + if (pGzState == NULL || outBuffer == NULL || offset >= pGzState->outBufSize) + { + return 0; + } + + pGzState->optSize = 0; + // check requested range [offset, offset + size) with outptr + if (pGzState->outptr != 0) + { + if ( offset >= ((pGzState->outptr + WSIZE - 1) / WSIZE - 1) * WSIZE + pGzState->wp1 ) + { + // check remaining data in previous slide window + pGzState->wp1 = offset - (((pGzState->outptr + WSIZE -1 ) / WSIZE - 1) * WSIZE); + + if (pGzState->wp1 < pGzState->wp2) + { + sizew = pGzState->wp2 - pGzState->wp1; + + // request can be satisfied from window + if (sizew >= size) + { + portMemCopy(outBuffer, size, pGzState->window + pGzState->wp1, size); + pGzState->wp1 += size; + pGzState->optSize += size; + return pGzState->optSize; + } + // copy data from slide window and continue iteration + else + { + portMemCopy(outBuffer, sizew,pGzState->window + pGzState->wp1, sizew); + outBuffer += sizew; + pGzState->optSize += sizew; + } + } + } + else + { + // slide window passed requested range, restart decompression from beginning. + huft_destroy(pGzState); + + oldInBuf = pGzState->inbuf; + oldOutBuf = pGzState->outbuf; + oldOutBufSize = pGzState->outBufSize; + oldWindow = pGzState->window; + + utilGzInit(oldInBuf, oldOutBuf, oldOutBufSize, oldWindow, pGzState); + } + } + + pGzState->outLower = offset + sizew; + pGzState->outUpper = offset + size - 1; + pGzState->outbuf = outBuffer; + pGzState->wp1 = 0; + pGzState->wp2 = 0; + + while (pGzState->outptr < offset + size) + { + if ((status = utilGzIterator(pGzState)) != GZ_STATE_ITERATOR_OK) + break; + } + + if (status == GZ_STATE_ITERATOR_ERROR) + { + return 0; + } + + return pGzState->optSize; +} + diff --git a/src/nvidia/src/libraries/containers/btree/btree.c b/src/nvidia/src/libraries/containers/btree/btree.c new file mode 100644 index 0000000..a7b8c68 --- /dev/null +++ b/src/nvidia/src/libraries/containers/btree/btree.c @@ -0,0 +1,841 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/***************************** Balanced Tree *******************************\ +* * +* A generic library to red black tree -- every operation is O(log(n)) * +* check https://en.wikipedia.org/wiki/Red-black_tree or similar www pages * +* * +\***************************************************************************/ + +#include "utils/nvprintf.h" +#include "utils/nvassert.h" +#include "nvport/nvport.h" +#include "containers/btree.h" + +// +// Debugging support. +// +#if PORT_IS_CHECKED_BUILD + +// +// Dump current tree to debug port. +// +static NV_STATUS +_btreeDumpBranch +( + NODE *pNode, + NvU32 level +) +{ + NvU32 i; + if (pNode) + { + _btreeDumpBranch(pNode->left, level+1); + + NV_PRINTF(LEVEL_INFO, "NVRM_BTREE: "); + for (i=0; ikeyStart); + NV_PRINTF(LEVEL_INFO, "keyEnd = 0x%llx\n", pNode->keyEnd); + NV_PRINTF(LEVEL_INFO, "isRed = 0x%d\n", pNode->isRed ? 1 : 0); + NV_PRINTF(LEVEL_INFO, "parent = 0x%p\n", pNode->parent); + NV_PRINTF(LEVEL_INFO, "left = 0x%p\n", pNode->left); + NV_PRINTF(LEVEL_INFO, "right = 0x%p\n", pNode->right); + + _btreeDumpBranch(pNode->right, level+1); + } + return (NV_OK); +} + +static NV_STATUS +_btreeDumpTree +( + NODE *pRoot +) +{ + NV_PRINTF(LEVEL_INFO, "NVRM_BTREE: ======================== Tree Dump ==========================\n\r"); + if (pRoot == NULL) + { + NV_PRINTF(LEVEL_INFO, "NVRM_BTREE: NULL\n\r"); + } + else + { + _btreeDumpBranch(pRoot, 0); + } + NV_PRINTF(LEVEL_INFO, "NVRM_BTREE: =============================================================\n\r"); + return (NV_OK); +} + +// +// Validate node. +// +#define VALIDATE_NODE(pn) \ +{ \ + NV_ASSERT(_btreeNodeValidate(pn) == NV_OK); \ +} + +#define VALIDATE_TREE(pt) \ +{ \ + NV_ASSERT(_btreeTreeValidate(pt) == NV_OK); \ +} + +// +// Validate a nodes branch and count values. +// +static NV_STATUS +_btreeNodeValidate +( + NODE *pNode +) +{ + NV_STATUS status; + + status = NV_OK; + if (pNode == NULL) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_BTREE: ERROR validating NULL NODE.\n\r"); + NV_ASSERT_FAILED("DBG_BREAKPOINT"); + return (NV_ERR_INVALID_PARAMETER); + } + if (pNode->left) + { + if (pNode->left->keyEnd >= pNode->keyStart) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_BTREE: ERROR inconsistent left branch, keyStart = 0x%llx\n", pNode->keyStart); + NV_PRINTF(LEVEL_ERROR, " Left keyEnd = 0x%llx\n", pNode->left->keyEnd); + NV_ASSERT_FAILED("DBG_BREAKPOINT"); + status = NV_ERR_INVALID_PARAMETER; + } + if (pNode->left->parent != pNode) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_BTREE: ERROR inconsistent left branch, Node = 0x%p\n", pNode); + NV_PRINTF(LEVEL_ERROR, " left->parent = 0x%p\n", pNode->left); + NV_ASSERT_FAILED("DBG_BREAKPOINT"); + status = NV_ERR_INVALID_PARAMETER; + } + } + if (pNode->right) + { + if (pNode->right->keyStart <= pNode->keyEnd) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_BTREE: ERROR inconsistent right branch, keyEnd = 0x%llx\n", pNode->keyEnd); + NV_PRINTF(LEVEL_ERROR, " Right keyStart = 0x%llx\n", pNode->right->keyStart); + NV_ASSERT_FAILED("DBG_BREAKPOINT"); + status = NV_ERR_INVALID_PARAMETER; + } + if (pNode->right->parent != pNode) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_BTREE: ERROR inconsistent right branch, Node = 0x%p\n", pNode); + NV_PRINTF(LEVEL_ERROR, " right->parent = 0x%p\n", pNode->right); + NV_ASSERT_FAILED("DBG_BREAKPOINT"); + status = NV_ERR_INVALID_PARAMETER; + } + } + + // red black tree property: Every red node that is not a leaf has only black children. + if (pNode->isRed) + { + if (pNode->left && pNode->left->isRed) + { + NV_ASSERT_FAILED("DBG_BREAKPOINT"); + status = NV_ERR_INVALID_PARAMETER; + } + if (pNode->right && pNode->right->isRed) + { + NV_ASSERT_FAILED("DBG_BREAKPOINT"); + status = NV_ERR_INVALID_PARAMETER; + } + } + + return (status); +} + + +static NV_STATUS +_btreeBranchValidate +( + NODE *pNode +) +{ + NV_STATUS status; + status = NV_OK; + if (pNode) + { + if (pNode->left) + { + status |= _btreeBranchValidate(pNode->left); + } + status |= _btreeNodeValidate(pNode); + if (pNode->right) + { + status |= _btreeBranchValidate(pNode->right); + } + } + return (status); +} + +static NV_STATUS +_btreeTreeValidate +( + NODE *pRoot +) +{ + NV_STATUS status; + + status = NV_OK; + if (pRoot) + { + NV_ASSERT(!pRoot->isRed); + status = _btreeNodeValidate(pRoot); + if (pRoot->left) + { + status |= _btreeBranchValidate(pRoot->left); + } + if (pRoot->right) + { + status |= _btreeBranchValidate(pRoot->right); + } + } + if (status) + { + _btreeDumpTree(pRoot); + } + return (status); +} + +#else +// +// Validate nothing. +// +#define VALIDATE_NODE(pn) +#define VALIDATE_TREE(pt) +#endif // PORT_IS_CHECKED_BUILD + +// rbt helper function +static void _rotateLeft(NODE **pRoot, NODE *x) +{ + // rotate node x to left + NODE *y = x->right; + + NV_ASSERT (x); + NV_ASSERT (y); + + // establish x->right link + x->right = y->left; + if (y->left) + { + y->left->parent = x; + } + + // establish y->parent link + y->parent = x->parent; + if (x->parent) + { + if (x == x->parent->left) + { + x->parent->left = y; + } + else + { + x->parent->right = y; + } + } + else + { + *pRoot = y; + } + + // link x and y + y->left = x; + x->parent = y; + VALIDATE_NODE(x); +} + +// rbt helper function +static void _rotateRight(NODE **pRoot, NODE *x) +{ + // rotate node x to right + NODE *y = x->left; + + NV_ASSERT (x); + NV_ASSERT (y); + + // establish x->left link + x->left = y->right; + if (y->right) + { + y->right->parent = x; + } + + // establish y->parent link + y->parent = x->parent; + if (x->parent) + { + if (x == x->parent->right) + { + x->parent->right = y; + } + else + { + x->parent->left = y; + } + } + else + { + *pRoot = y; + } + + // link x and y + y->right = x; + x->parent = y; + VALIDATE_NODE(x); +} + +// rbt helper function: +// - maintain red-black tree balance after inserting node x +static void _insertFixup(NODE **pRoot, NODE *x) +{ + // check red-black properties + while((x!=*pRoot) && x->parent->isRed) + { + // we have a violation + if (x->parent == x->parent->parent->left) + { + NODE *y = x->parent->parent->right; + if (y && y->isRed) + { + // uncle is RED + x->parent->isRed = NV_FALSE; + y->isRed = NV_FALSE; + x->parent->parent->isRed = NV_TRUE; + x = x->parent->parent; + } + else + { + // uncle is BLACK + if (x == x->parent->right) + { + // make x a left child + x = x->parent; + _rotateLeft(pRoot, x); + } + + // recolor and rotate + x->parent->isRed = NV_FALSE; + x->parent->parent->isRed = NV_TRUE; + _rotateRight(pRoot, x->parent->parent); + } + } + else + { + // mirror image of above code + NODE *y = x->parent->parent->left; + if (y && y->isRed) + { + // uncle is RED + x->parent->isRed = NV_FALSE; + y->isRed = NV_FALSE; + x->parent->parent->isRed = NV_TRUE; + x = x->parent->parent; + } + else + { + // uncle is BLACK + if (x == x->parent->left) + { + x = x->parent; + _rotateRight(pRoot, x); + } + x->parent->isRed = NV_FALSE; + x->parent->parent->isRed = NV_TRUE; + _rotateLeft(pRoot, x->parent->parent); + } + } + } + (*pRoot)->isRed = NV_FALSE; +} + +// insert a new node (no duplicates allowed) +NV_STATUS +btreeInsert +( + PNODE newNode, + PNODE *pRoot +) +{ + NODE *current; + NODE *parent; + + if (newNode == NULL || pRoot == NULL) + { + return NV_ERR_INVALID_POINTER; + } + + // find future parent + current = *pRoot; + parent = NULL; + + if (newNode->keyEnd < newNode->keyStart) + { + return NV_ERR_INVALID_ARGUMENT; + } + + while (current) + { + parent = current; + if (newNode->keyEnd < current->keyStart) + { + current = current->left; + } + else if (newNode->keyStart > current->keyEnd) + { + current = current->right; + } + else + { + return NV_ERR_INSERT_DUPLICATE_NAME; + } + } + + // the caller allocated the node already, just fix the links + newNode->parent = parent; + newNode->left = NULL; + newNode->right = NULL; + newNode->isRed = NV_TRUE; + + // insert node in tree + if(parent) + { + if (newNode->keyEnd < parent->keyStart) + { + parent->left = newNode; + } + else + { + parent->right = newNode; + } + } + else + { + *pRoot = newNode; + } + + _insertFixup(pRoot, newNode); + VALIDATE_NODE(newNode); + + return NV_OK; +} + +// rbt helper function +// - maintain red-black tree balance after deleting node x +// - this is a bit ugly because we use NULL as a sentinel +static void _deleteFixup(NODE **pRoot, NODE *parentOfX, NODE *x) +{ + while ((x != *pRoot) && (!x || !x->isRed)) + { + NV_ASSERT (!(x == NULL && parentOfX == NULL)); + // NULL nodes are sentinel nodes. If we delete a sentinel node (x==NULL) it + // must have a parent node (or be the root). Hence, parentOfX == NULL with + // x==NULL is never possible (tree invariant) + + if ((parentOfX != NULL) && (x == parentOfX->left)) + { + NODE *w = parentOfX->right; + if (w && w->isRed) + { + w->isRed = NV_FALSE; + parentOfX->isRed = NV_TRUE; + _rotateLeft(pRoot, parentOfX); + w = parentOfX->right; + } + if (!w || (((!w->left || !w->left->isRed) && (!w->right || !w->right->isRed)))) + { + if (w) + { + w->isRed = NV_TRUE; + } + x = parentOfX; + } + else + { + if (!w->right || !w->right->isRed) + { + w->left->isRed = NV_FALSE; + w->isRed = NV_TRUE; + _rotateRight(pRoot, w); + w = parentOfX->right; + } + w->isRed = parentOfX->isRed; + parentOfX->isRed = NV_FALSE; + w->right->isRed = NV_FALSE; + _rotateLeft(pRoot, parentOfX); + x = *pRoot; + } + } + else if (parentOfX != NULL) + { + NODE *w = parentOfX->left; + if (w && w->isRed) + { + w->isRed = NV_FALSE; + parentOfX->isRed = NV_TRUE; + _rotateRight(pRoot, parentOfX); + w = parentOfX->left; + } + if (!w || ((!w->right || !w->right->isRed) && (!w->left || !w->left->isRed))) + { + if (w) + { + w->isRed = NV_TRUE; + } + x = parentOfX; + } + else + { + if (!w->left || !w->left->isRed) + { + w->right->isRed = NV_FALSE; + w->isRed = NV_TRUE; + _rotateLeft(pRoot, w); + w = parentOfX->left; + } + w->isRed = parentOfX->isRed; + parentOfX->isRed = NV_FALSE; + w->left->isRed = NV_FALSE; + _rotateRight(pRoot, parentOfX); + x = *pRoot; + } + } + else if (x == NULL) + { + // This should never happen. + break; + } + parentOfX = x->parent; + } + if (x) + { + x->isRed = NV_FALSE; + } +} + +// +// Unlink node from tree +// +NV_STATUS +btreeUnlink +( + PNODE pNode, + PNODE *pRoot +) +{ + NODE *x; + NODE *y; + NODE *z; + NODE *parentOfX; + NvU32 yWasBlack; + + NV_ASSERT_CHECKED(btreeSearch(pNode->keyStart, &z, *pRoot) == NV_OK); + NV_ASSERT_CHECKED(z == pNode); + + if (pNode == NULL || pRoot == NULL) + { + return NV_ERR_INVALID_POINTER; + } + + z = pNode; + + // unlink + if (!z->left || !z->right) + { + // y has a SENTINEL node as a child + y = z; + } + else + { + // find tree successor + y = z->right; + while (y->left) + { + y = y->left; + } + } + + // x is y's only child + if (y->left) + { + x = y->left; + } + else + { + x = y->right; + } + + // remove y from the parent chain + parentOfX = y->parent; + if (x) + { + x->parent = parentOfX; + } + if (y->parent) + { + if (y == y->parent->left) + { + y->parent->left = x; + } + else + { + y->parent->right = x; + } + } + else + { + *pRoot = x; + } + + yWasBlack = !y->isRed; + if (y != z) + { + // we need to replace z with y so the memory for z can be freed + y->parent = z->parent; + if (z->parent) + { + if (z == z->parent->left) + { + z->parent->left = y; + } + else + { + z->parent->right = y; + } + } + else + { + *pRoot = y; + } + + y->isRed = z->isRed; + + y->left = z->left; + if (z->left) + { + z->left->parent = y; + } + y->right = z->right; + if (z->right) + { + z->right->parent = y; + } + + if (parentOfX == z) + { + parentOfX = y; + } + } + + if (yWasBlack) + { + _deleteFixup(pRoot, parentOfX, x); + if (parentOfX) + { + VALIDATE_NODE(parentOfX); + } + } + + return NV_OK; +} + +// +// Search for node in tree. +// +NV_STATUS +btreeSearch +( + NvU64 keyOffset, + PNODE *pNode, + PNODE root +) +{ + // uninitialized ? + NODE *current = root; + while(current) + { + VALIDATE_NODE(current); + if (keyOffset < current->keyStart) + { + current = current->left; + } + else if (keyOffset > current->keyEnd) + { + current = current->right; + } + else + { + *pNode = current; + return NV_OK; + } + } + *pNode = NULL; + return NV_ERR_OBJECT_NOT_FOUND; +} + +// +// Enumerate tree (starting at the node with specified value) +// +NV_STATUS +btreeEnumStart +( + NvU64 keyOffset, + PNODE *pNode, + PNODE root +) +{ + *pNode = NULL; + + // initialized ? + if (root) + { + NODE *current = root; + VALIDATE_TREE(root); + while(current) + { + if (keyOffset < current->keyStart) + { + *pNode = current; + current = current->left; + } + else if (keyOffset > current->keyEnd) + { + current = current->right; + } + else + { + *pNode = current; + break; + + } + } + if (*pNode) + { + VALIDATE_NODE(*pNode); + } + return NV_OK; + } + return NV_OK; +} + +NV_STATUS +btreeEnumNext +( + PNODE *pNode, + PNODE root +) +{ + // no nodes ? + NODE *current = NULL; + VALIDATE_NODE(*pNode); + VALIDATE_NODE(root); + if (root && *pNode) + { + // if we don't have a right subtree return the parent + current = *pNode; + + // pick the leftmost node of the right subtree ? + if (current->right) + { + current = current->right; + for(;current->left;) + { + current = current->left; + } + } + else + { + // go up until we find the right inorder node + for(current = current->parent; current; current = current->parent) + { + if (current->keyStart > (*pNode)->keyEnd) + { + break; + } + } + } + } + *pNode = current; + if (*pNode) + { + VALIDATE_NODE(*pNode); + } + return NV_OK; +} + + + +// +// Frees all the "Data" fields stored in Nodes. +// If each Node is embedded in the structure pointed by its "Data" field, then +// this function destroys the whole btree +// +NV_STATUS +btreeDestroyData +( + PNODE pNode +) +{ + if (pNode == NULL) + return NV_OK; + + btreeDestroyData(pNode->left); + btreeDestroyData(pNode->right); + portMemFree (pNode->Data); + + return NV_OK; +} + + + +// +// Frees all the nodes and data stored in them. +// Don't use if the nodes were allocated within other structs +// (e.g. if the Node is embedded within the struct pointed by its "Data" field) +// +NV_STATUS +btreeDestroyNodes +( + PNODE pNode +) +{ + if (pNode == NULL) + return NV_OK; + + btreeDestroyNodes(pNode->left); + btreeDestroyNodes(pNode->right); + portMemFree (pNode); + + return NV_OK; +} diff --git a/src/nvidia/src/libraries/containers/eheap/eheap_old.c b/src/nvidia/src/libraries/containers/eheap/eheap_old.c new file mode 100644 index 0000000..1b0882f --- /dev/null +++ b/src/nvidia/src/libraries/containers/eheap/eheap_old.c @@ -0,0 +1,1367 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if defined(NVRM) +# include "os/os.h" +#else +# include "nvos.h" +#endif +#include "utils/nvassert.h" +#include "containers/eheap_old.h" + +#if !defined(SRT_BUILD) +#include "os/os.h" +#endif + +static void initPublicObjectFunctionPointers_EHeap(OBJEHEAP *pHeap); +static NV_STATUS eheapInit(OBJEHEAP *, NvU64, NvU64, NvU32, NvU32); +static NV_STATUS eheapDestruct(OBJEHEAP *); +static NV_STATUS eheapAlloc(OBJEHEAP *, NvU32, NvU32 *, NvU64 *, NvU64 *,NvU64, NvU64, EMEMBLOCK **, void*, EHeapOwnershipComparator*); +static NV_STATUS eheapFree(OBJEHEAP *, NvU64); +static void eheapInfo(OBJEHEAP *, NvU64 *, NvU64 *, NvU64 *, NvU64 *, NvU32 *, NvU64 *); +static void eheapInfoForRange(OBJEHEAP *, NV_RANGE, NvU64 *, NvU64 *, NvU32 *, NvU64 *); +static NV_STATUS eheapGetSize(OBJEHEAP *, NvU64 *); +static NV_STATUS eheapGetFree(OBJEHEAP *, NvU64 *); +static NV_STATUS eheapGetBase(OBJEHEAP *, NvU64 *); +static EMEMBLOCK *eheapGetBlock(OBJEHEAP *, NvU64, NvBool); +static NV_STATUS eheapSetAllocRange(OBJEHEAP *, NvU64, NvU64); +static NV_STATUS eheapTraverse(OBJEHEAP *, void *, EHeapTraversalFn, NvS32); +static NV_STATUS _eheapBlockFree(OBJEHEAP *pHeap, EMEMBLOCK *block); +static NvU32 eheapGetNumBlocks(OBJEHEAP *); +static NV_STATUS eheapSetOwnerIsolation(OBJEHEAP *, NvBool, NvU32); +static NvBool _eheapCheckOwnership(OBJEHEAP *, void*, NvU64, NvU64, EMEMBLOCK *, EHeapOwnershipComparator*); + +void +constructObjEHeap(OBJEHEAP *pHeap, NvU64 Base, NvU64 LimitPlusOne, NvU32 sizeofMemBlock, NvU32 numPreAllocMemStruct) +{ + initPublicObjectFunctionPointers_EHeap(pHeap); + + eheapInit(pHeap, Base, LimitPlusOne, sizeofMemBlock, numPreAllocMemStruct); +} + +static void +initPublicObjectFunctionPointers_EHeap(OBJEHEAP *pHeap) +{ + pHeap->eheapDestruct = eheapDestruct; + pHeap->eheapAlloc = eheapAlloc; + pHeap->eheapFree = eheapFree; + pHeap->eheapInfo = eheapInfo; + pHeap->eheapInfoForRange = eheapInfoForRange; + pHeap->eheapGetSize = eheapGetSize; + pHeap->eheapGetFree = eheapGetFree; + pHeap->eheapGetBase = eheapGetBase; + pHeap->eheapGetBlock = eheapGetBlock; + pHeap->eheapSetAllocRange = eheapSetAllocRange; + pHeap->eheapTraverse = eheapTraverse; + pHeap->eheapGetNumBlocks = eheapGetNumBlocks; + pHeap->eheapSetOwnerIsolation = eheapSetOwnerIsolation; +} + +static NV_STATUS +_eheapAllocMemStruct +( + OBJEHEAP *pHeap, + EMEMBLOCK **ppMemBlock +) +{ + if (pHeap->numPreAllocMemStruct > 0) + { + // We are out of pre-allocated mem data structs + if (NULL == pHeap->pFreeMemStructList) + { + NV_ASSERT(0); + return NV_ERR_OPERATING_SYSTEM; + } + + *ppMemBlock = pHeap->pFreeMemStructList; + pHeap->pFreeMemStructList = pHeap->pFreeMemStructList->next; + } + else + { + *ppMemBlock = portMemAllocNonPaged(pHeap->sizeofMemBlock); + + if (*ppMemBlock == NULL) + { + NV_ASSERT(0); + return NV_ERR_OPERATING_SYSTEM; + } + portMemSet(*ppMemBlock, 0, pHeap->sizeofMemBlock); + } + + return NV_OK; +} + +static NV_STATUS +_eheapFreeMemStruct +( + OBJEHEAP *pHeap, + EMEMBLOCK **ppMemBlock +) +{ + if (pHeap->numPreAllocMemStruct > 0) + { + portMemSet(*ppMemBlock, 0, pHeap->sizeofMemBlock); + + (*ppMemBlock)->next = pHeap->pFreeMemStructList; + pHeap->pFreeMemStructList = *ppMemBlock; + + *ppMemBlock = NULL; + } + else + { + portMemFree(*ppMemBlock); + *ppMemBlock = NULL; + } + + return NV_OK; +} + +// +// Create a heap. Even though we can return error here the resultant +// object must be self consistent (zero pointers, etc) if there were +// alloc failures, etc. +// +static NV_STATUS +eheapInit +( + OBJEHEAP *pHeap, + NvU64 Base, + NvU64 LimitPlusOne, + NvU32 sizeofData, + NvU32 numPreAllocMemStruct +) +{ + EMEMBLOCK *block; + NvU32 i; + + // + // Simply create a free heap. + // + pHeap->base = Base; + pHeap->total = LimitPlusOne - Base; + pHeap->rangeLo = pHeap->base; + pHeap->rangeHi = pHeap->base + pHeap->total - 1; + pHeap->free = pHeap->total; + pHeap->sizeofMemBlock = sizeofData + sizeof(EMEMBLOCK); + + pHeap->numPreAllocMemStruct = 0; + pHeap->pPreAllocAddr = NULL; + pHeap->pBlockList = NULL; + pHeap->pFreeBlockList = NULL; + pHeap->pFreeMemStructList = NULL; + pHeap->numBlocks = 0; + pHeap->pBlockTree = NULL; + pHeap->bOwnerIsolation = NV_FALSE; + pHeap->ownerGranularity = 0; + + // + // User requested a static eheap that has a list of pre-allocated + // EMEMBLOCK data structure. + // + if (numPreAllocMemStruct > 0) + { + ++numPreAllocMemStruct; // reserve one for us - see below + + pHeap->pPreAllocAddr = portMemAllocNonPaged(pHeap->sizeofMemBlock * numPreAllocMemStruct); + + if (pHeap->pPreAllocAddr) + { + pHeap->numPreAllocMemStruct = numPreAllocMemStruct; + pHeap->pFreeMemStructList = pHeap->pPreAllocAddr; + + portMemSet(pHeap->pFreeMemStructList, 0, pHeap->sizeofMemBlock * numPreAllocMemStruct); + + // + // Form the list of free mem structures. Just need to utilize the next field of EMEMBLOCK. + // + for (i = 0; i < numPreAllocMemStruct - 1; i++) + { + ((EMEMBLOCK *)((NvU8 *)pHeap->pFreeMemStructList + (i * pHeap->sizeofMemBlock)))->next + = (EMEMBLOCK *)((NvU8 *)pHeap->pFreeMemStructList + (i + 1) * pHeap->sizeofMemBlock); + } + } + } + + if (_eheapAllocMemStruct(pHeap, &block) != NV_OK) + { + return NV_ERR_OPERATING_SYSTEM; + } + + block->owner = NVOS32_BLOCK_TYPE_FREE; + block->refCount = 0; + block->begin = Base; + block->align = Base; + block->end = LimitPlusOne - 1; + block->prevFree = block; + block->nextFree = block; + block->next = block; + block->prev = block; + block->pData = (void*)(block+1); + + // + // Fill in the heap bank info. + // + pHeap->pBlockList = block; + pHeap->pFreeBlockList = block; + pHeap->numBlocks = 1; + + portMemSet((void *)&block->node, 0, sizeof(NODE)); + block->node.keyStart = block->begin; + block->node.keyEnd = block->end; + block->node.Data = (void *)block; + if (btreeInsert(&block->node, &pHeap->pBlockTree) != NV_OK) + { + eheapDestruct(pHeap); + return NV_ERR_OPERATING_SYSTEM; + } + + return NV_OK; +} + +static NV_STATUS +eheapDestruct +( + OBJEHEAP *pHeap +) +{ + EMEMBLOCK *block, *blockFirst, *blockNext; + NvBool headptr_updated; + + if (!pHeap->pBlockList) + return NV_OK; + + // + // Free all allocated blocks + // + do { + block = blockFirst = pHeap->pBlockList; + headptr_updated = NV_FALSE; + + do { + blockNext = block->next; + + _eheapBlockFree(pHeap, block); + + // restart scanning the list, if the heap->pBlockList changed + if (blockFirst != pHeap->pBlockList) { + headptr_updated = NV_TRUE; + break; + } + + block = blockNext; + + } while (block != pHeap->pBlockList); + + } while (headptr_updated); + + if (pHeap->numPreAllocMemStruct > 0) + { + // free static blocks + portMemFree(pHeap->pPreAllocAddr); + pHeap->pPreAllocAddr = NULL; + } + else + { + portMemFree(pHeap->pBlockList); + pHeap->pBlockList = NULL; + } + + return NV_OK; +} + +// 'flags' using NVOS32_ALLOC_FLAGS_* though some are n/a +static NV_STATUS +eheapAlloc +( + OBJEHEAP *pHeap, + NvU32 owner, + NvU32 *flags, + NvU64 *offset, + NvU64 *size, + NvU64 offsetAlign, + NvU64 sizeAlign, + EMEMBLOCK **ppMemBlock, // not generally useful over e.g. a split! + void *pIsolationID, + EHeapOwnershipComparator *checker +) +{ + NvU64 allocLo, allocAl, allocHi; + EMEMBLOCK *blockFirstFree, *blockFree; + EMEMBLOCK *blockNew = NULL, *blockSplit = NULL; + NvU64 desiredOffset; + NvU64 allocSize; + NvU64 rangeLo, rangeHi; + + if ((*flags & NVOS32_ALLOC_FLAGS_FORCE_INTERNAL_INDEX) && + (*flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Save the offset for fixed address requests, or it's likely uninitialized. + desiredOffset = (*flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) ? *offset: 0; + + // + // zero result so that apps that ignore return code have another + // chance to see the error of their ways... + // + *offset = 0; + + // + // Check for valid size. + // + if (*size == 0) + return NV_ERR_INVALID_ARGUMENT; + + // + // Range-limited the request. + // + rangeLo = pHeap->rangeLo; + rangeHi = pHeap->rangeHi; + + if (rangeLo == 0 && rangeHi == 0) { + rangeLo = pHeap->base; + rangeHi = pHeap->base + pHeap->total - 1; + } + if (rangeHi > pHeap->base + pHeap->total - 1) { + rangeHi = pHeap->base + pHeap->total - 1; + } + if (rangeLo > rangeHi) + return NV_ERR_INVALID_ARGUMENT; + + // Align size up. + allocSize = ((*size + (sizeAlign - 1)) / sizeAlign) * sizeAlign; + + // + // Trivial reject size vs. free. + // + if (pHeap->free < allocSize) + return NV_ERR_NO_MEMORY; + + /* This flag will force an exclusive allocation of the request + * within the range of ownerGranularity + */ + + if ( *flags & NVOS32_ALLOC_FLAGS_FORCE_INTERNAL_INDEX ) + { + NvU64 desiredOffsetLo, desiredOffsetHi; + + NV_ASSERT_OR_RETURN(pHeap->ownerGranularity, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pHeap->bOwnerIsolation && checker, NV_ERR_INVALID_ARGUMENT); + + blockFree = pHeap->pFreeBlockList; + + if (blockFree == NULL) + goto failed; + + do + { + desiredOffset = NV_ALIGN_DOWN(blockFree->begin, pHeap->ownerGranularity) + offsetAlign; + + while (desiredOffset + allocSize - 1 <= blockFree->end) + { + desiredOffsetLo = NV_ALIGN_DOWN(desiredOffset, pHeap->ownerGranularity); + desiredOffsetHi = (((desiredOffset % pHeap->ownerGranularity) == 0) ? + NV_ALIGN_UP((desiredOffset + 1), pHeap->ownerGranularity) : + NV_ALIGN_UP(desiredOffset, pHeap->ownerGranularity)); + + if ((desiredOffset >= blockFree->begin) && + ((desiredOffsetLo >= blockFree->begin) && + (desiredOffsetHi <= blockFree->end))) + { + if (_eheapCheckOwnership(pHeap, pIsolationID, desiredOffset, + desiredOffset + allocSize - 1, blockFree, checker)) + { + allocLo = desiredOffset; + allocHi = desiredOffset + allocSize - 1; + allocAl = allocLo; + goto got_one; + } + } + + desiredOffset += pHeap->ownerGranularity; + } + + blockFree = blockFree->nextFree; + + } while (blockFree != pHeap->pFreeBlockList); + + /* return error if can't get that particular address */ + goto failed; + } + + // Ensure a valid allocation type was passed in + //if (type > NVOS32_NUM_MEM_TYPES - 1) + //return NV_ERR_INVALID_ARGUMENT; + + // + // Check for fixed address request. + // This allows caller to say: I really want this memory at a particular + // offset. Returns error if can't get that offset. + // + if ( *flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE ) + { + // is our desired offset suitably aligned? + if (desiredOffset % offsetAlign) + goto failed; + + blockFree = pHeap->pFreeBlockList; + + if (blockFree == NULL) + { + goto failed; + } + + do + { + // + // Allocate from the bottom of the memory block. + // + blockFree = blockFree->nextFree; + + // Does this block contain our desired range? + if ( (desiredOffset >= blockFree->begin) && + (desiredOffset + allocSize - 1) <= blockFree->end ) + { + // + // Make sure no allocated block between ALIGN_DOWN(allocLo, granularity) + // and ALIGN_UP(allocHi, granularity) have a different owner than the current allocation + // + if (pHeap->bOwnerIsolation) + { + NV_ASSERT(NULL != checker); + if (!_eheapCheckOwnership(pHeap, pIsolationID, desiredOffset, + desiredOffset + allocSize - 1, blockFree, checker)) + { + break; + } + } + + // we have a match, now remove it from the pool + allocLo = desiredOffset; + allocHi = desiredOffset + allocSize - 1; + allocAl = allocLo; + goto got_one; + } + + } while (blockFree != pHeap->pFreeBlockList); + + // return error if can't get that particular address + goto failed; + } + + blockFirstFree = pHeap->pFreeBlockList; + if (!blockFirstFree) + goto failed; + + // + // When scanning upwards, start at the bottom - 1 so the following loop looks symmetric. + // + if ( *flags & NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN ) { + blockFirstFree = blockFirstFree->prevFree; + } + blockFree = blockFirstFree; + do + { + NvU64 blockLo; + NvU64 blockHi; + + // + // Is this block completely out of range? + // + if ( ( blockFree->end < rangeLo ) || ( blockFree->begin > rangeHi ) ) + { + if ( *flags & NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN ) + blockFree = blockFree->prevFree; + else + blockFree = blockFree->nextFree; + continue; + } + + // + // Find the intersection of the free block and the specified range. + // + blockLo = (rangeLo > blockFree->begin) ? rangeLo : blockFree->begin; + blockHi = (rangeHi < blockFree->end) ? rangeHi : blockFree->end; + + if ( *flags & NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN ) + { + // + // Allocate from the top of the memory block. + // + allocLo = (blockHi - allocSize + 1) / offsetAlign * offsetAlign; + allocAl = allocLo; + allocHi = allocAl + allocSize - 1; + } + else + { + // + // Allocate from the bottom of the memory block. + // + allocAl = (blockLo + (offsetAlign - 1)) / offsetAlign * offsetAlign; + allocLo = allocAl; + allocHi = allocAl + allocSize - 1; + } + + // + // Make sure no allocated block between ALIGN_DOWN(allocLo, granularity) + // and ALIGN_UP(allocHi, granularity) have a different owner than the current allocation + // + if (pHeap->bOwnerIsolation) + { + NV_ASSERT(NULL != checker); + + if (_eheapCheckOwnership(pHeap, pIsolationID, allocLo, allocHi, blockFree, checker)) + { + goto alloc_done; + } + + // + // Try realloc if we still have enough free memory in current free block + // + if (*flags & NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN) + { + NvU64 checkLo = NV_ALIGN_DOWN(allocLo, pHeap->ownerGranularity); + + if (checkLo > blockFree->begin) + { + blockHi = checkLo; + + allocLo = (blockHi - allocSize + 1) / offsetAlign * offsetAlign; + allocAl = allocLo; + allocHi = allocAl + allocSize - 1; + + if (_eheapCheckOwnership(pHeap, pIsolationID, allocLo, allocHi, blockFree, checker)) + { + goto alloc_done; + } + } + } + else + { + NvU64 checkHi = NV_ALIGN_UP(allocHi, pHeap->ownerGranularity); + + if (checkHi < blockFree->end) + { + blockLo = checkHi; + + allocAl = (blockLo + (offsetAlign - 1)) / offsetAlign * offsetAlign; + allocLo = allocAl; + allocHi = allocAl + allocSize - 1; + + if (_eheapCheckOwnership(pHeap, pIsolationID, allocLo, allocHi, blockFree, checker)) + { + goto alloc_done; + } + } + } + + // + // Cannot find any available memory in current free block, go to the next + // + goto next_free; + } + +alloc_done: + // + // Does the desired range fall completely within this block? + // Also make sure it does not wrap-around. + // Also make sure it is within the desired range. + // + if ((allocLo >= blockFree->begin) && (allocHi <= blockFree->end)) + { + if (allocLo <= allocHi) + if ((allocLo >= rangeLo) && (allocHi <= rangeHi)) + goto got_one; + + } + +next_free: + if ( *flags & NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN ) + blockFree = blockFree->prevFree; + else + blockFree = blockFree->nextFree; + + } while (blockFree != blockFirstFree); + + // + // Out of memory. + // + goto failed; + + // + // We have a match. Now link it in, trimming or splitting + // any slop from the enclosing block as needed. + // + + got_one: + + if ((allocLo == blockFree->begin) && (allocHi == blockFree->end)) + { + // + // Wow, exact match so replace free block. + // Remove from free list. + // + blockFree->nextFree->prevFree = blockFree->prevFree; + blockFree->prevFree->nextFree = blockFree->nextFree; + if (pHeap->pFreeBlockList == blockFree) + { + // + // This could be the last free block. + // + if (blockFree->nextFree == blockFree) + pHeap->pFreeBlockList = NULL; + else + pHeap->pFreeBlockList = blockFree->nextFree; + } + + // + // Set owner/type values here. Don't move because some fields are unions. + // + blockFree->owner = owner; + blockFree->refCount = 1; + blockFree->align = allocAl; + + // tail end code below assumes 'blockFree' is the new block + blockNew = blockFree; + } + else if ((allocLo >= blockFree->begin) && (allocHi <= blockFree->end)) + { + // + // Found a fit. + // It isn't exact, so we'll have to do a split + // + if (_eheapAllocMemStruct(pHeap, &blockNew) != NV_OK) + { + goto failed; + } + + blockNew->owner = owner; + blockNew->refCount = 1; + blockNew->begin = allocLo; + blockNew->align = allocAl; + blockNew->end = allocHi; + + if ((blockFree->begin < blockNew->begin) && (blockFree->end > blockNew->end)) + { + // + // Split free block in two. + // + if (_eheapAllocMemStruct(pHeap, &blockSplit) != NV_OK) + { + goto failed; + } + + // + // Remove free block from rb-tree since node's range will be + // changed. + // + if (btreeUnlink(&blockFree->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + + blockSplit->owner = NVOS32_BLOCK_TYPE_FREE; + blockSplit->refCount = 0; + blockSplit->begin = blockNew->end + 1; + blockSplit->align = blockSplit->begin; + blockSplit->end = blockFree->end; + blockSplit->pData = (void*)(blockNew+1); + blockFree->end = blockNew->begin - 1; + // + // Insert free split block into free list. + // + blockSplit->nextFree = blockFree->nextFree; + blockSplit->prevFree = blockFree; + blockSplit->nextFree->prevFree = blockSplit; + blockFree->nextFree = blockSplit; + // + // Insert new and split blocks into block list. + // + blockNew->next = blockSplit; + blockNew->prev = blockFree; + blockSplit->next = blockFree->next; + blockSplit->prev = blockNew; + blockFree->next = blockNew; + blockSplit->next->prev = blockSplit; + + // update numBlocks count + pHeap->numBlocks++; + + // re-insert updated free block into rb-tree + blockFree->node.keyEnd = blockFree->end; + if (btreeInsert(&blockFree->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + + // insert new and split blocks into rb-tree + portMemSet((void *)&blockNew->node, 0, sizeof(NODE)); + portMemSet((void *)&blockSplit->node, 0, sizeof(NODE)); + blockNew->node.keyStart = blockNew->begin; + blockNew->node.keyEnd = blockNew->end; + blockNew->node.Data = (void *)blockNew; + blockSplit->node.keyStart = blockSplit->begin; + blockSplit->node.keyEnd = blockSplit->end; + blockSplit->node.Data = (void *)blockSplit; + if (btreeInsert(&blockNew->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + if (btreeInsert(&blockSplit->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + } + else if (blockFree->end == blockNew->end) + { + // + // Remove free block from rb-tree since node's range will be + // changed. + // + if (btreeUnlink(&blockFree->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + + // + // New block inserted after free block. + // + blockFree->end = blockNew->begin - 1; + blockNew->next = blockFree->next; + blockNew->prev = blockFree; + blockFree->next->prev = blockNew; + blockFree->next = blockNew; + + // re-insert updated free block into rb-tree + blockFree->node.keyEnd = blockFree->end; + if (btreeInsert(&blockFree->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + + // insert new block into rb-tree + portMemSet((void *)&blockNew->node, 0, sizeof(NODE)); + blockNew->node.keyStart = blockNew->begin; + blockNew->node.keyEnd = blockNew->end; + blockNew->node.Data = (void *)blockNew; + if (btreeInsert(&blockNew->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + } + else if (blockFree->begin == blockNew->begin) + { + // + // Remove free block from rb-tree since node's range will be + // changed. + // + if (btreeUnlink(&blockFree->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + + // + // New block inserted before free block. + // + blockFree->begin = blockNew->end + 1; + blockFree->align = blockFree->begin; + blockNew->next = blockFree; + blockNew->prev = blockFree->prev; + blockFree->prev->next = blockNew; + blockFree->prev = blockNew; + if (pHeap->pBlockList == blockFree) + pHeap->pBlockList = blockNew; + + // re-insert updated free block into rb-tree + blockFree->node.keyStart = blockFree->begin; + if (btreeInsert(&blockFree->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + + // insert new block into rb-tree + portMemSet((void *)&blockNew->node, 0, sizeof(NODE)); + blockNew->node.keyStart = blockNew->begin; + blockNew->node.keyEnd = blockNew->end; + blockNew->node.Data = (void *)blockNew; + if (btreeInsert(&blockNew->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + } + else + { + failed: + if (blockNew) _eheapFreeMemStruct(pHeap, &blockNew); + if (blockSplit) _eheapFreeMemStruct(pHeap, &blockSplit); + return NV_ERR_NO_MEMORY; + } + + pHeap->numBlocks++; + } + + NV_ASSERT(blockNew != NULL); // assert is for Coverity + pHeap->free -= blockNew->end - blockNew->begin + 1; // Reduce free amount by allocated block size. + + // Initialize a pointer to the outer wrapper's specific control structure, tacked to the end of the EMEMBLOCK + blockNew->pData = (void*)(blockNew+1); + + // Return values + *size = allocSize; + *offset = blockNew->align; + if ( ppMemBlock) *ppMemBlock = blockNew; + + return NV_OK; +} + +static NV_STATUS +_eheapBlockFree +( + OBJEHEAP *pHeap, + EMEMBLOCK *block +) +{ + EMEMBLOCK *blockTmp; + + // + // Check for valid owner. + // + if (block->owner == NVOS32_BLOCK_TYPE_FREE) return NV_ERR_INVALID_ARGUMENT; + + // + // Check refCount. + // + if (--block->refCount != 0) + return NV_OK; + + // + // Update free count. + // + pHeap->free += block->end - block->begin + 1; + + // + // + // Can this merge with any surrounding free blocks? + // + if ((block->prev->owner == NVOS32_BLOCK_TYPE_FREE) && (block != pHeap->pBlockList)) + { + // + // Remove block to be freed and previous one since nodes will be + // combined into single one. + // + if (btreeUnlink(&block->node, &pHeap->pBlockTree) != NV_OK) + { + return NV_ERR_INVALID_OFFSET; + } + if (btreeUnlink(&block->prev->node, &pHeap->pBlockTree) != NV_OK) + { + return NV_ERR_INVALID_OFFSET; + } + + // + // Merge with previous block. + // + block->prev->next = block->next; + block->next->prev = block->prev; + block->prev->end = block->end; + blockTmp = block; + block = block->prev; + pHeap->numBlocks--; + _eheapFreeMemStruct(pHeap, &blockTmp); + + // re-insert updated free block into rb-tree + block->node.keyEnd = block->end; + if (btreeInsert(&block->node, &pHeap->pBlockTree) != NV_OK) + { + return NV_ERR_INVALID_OFFSET; + } + } + if ((block->next->owner == NVOS32_BLOCK_TYPE_FREE) && (block->next != pHeap->pBlockList)) + { + // + // Remove block to be freed and next one since nodes will be + // combined into single one. + // + if (btreeUnlink(&block->node, &pHeap->pBlockTree) != NV_OK) + { + return NV_ERR_INVALID_OFFSET; + } + if (btreeUnlink(&block->next->node, &pHeap->pBlockTree) != NV_OK) + { + return NV_ERR_INVALID_OFFSET; + } + + // + // Merge with next block. + // + block->prev->next = block->next; + block->next->prev = block->prev; + block->next->begin = block->begin; + if (pHeap->pBlockList == block) + pHeap->pBlockList = block->next; + if (block->owner == NVOS32_BLOCK_TYPE_FREE) + { + if (pHeap->pFreeBlockList == block) + pHeap->pFreeBlockList = block->nextFree; + block->nextFree->prevFree = block->prevFree; + block->prevFree->nextFree = block->nextFree; + } + blockTmp = block; + block = block->next; + pHeap->numBlocks--; + _eheapFreeMemStruct(pHeap, &blockTmp); + + // re-insert updated free block into rb-tree + block->node.keyStart = block->begin; + if (btreeInsert(&block->node, &pHeap->pBlockTree) != NV_OK) + { + return NV_ERR_INVALID_OFFSET; + } + } + if (block->owner != NVOS32_BLOCK_TYPE_FREE) + { + // + // Nothing was merged. Add to free list. + // + blockTmp = pHeap->pFreeBlockList; + if (!blockTmp) + { + pHeap->pFreeBlockList = block; + block->nextFree = block; + block->prevFree = block; + } + else + { + if (blockTmp->begin > block->begin) + // + // Insert into beginning of free list. + // + pHeap->pFreeBlockList = block; + else if (blockTmp->prevFree->begin > block->begin) + // + // Insert into free list. + // + do + { + blockTmp = blockTmp->nextFree; + } while (blockTmp->begin < block->begin); + /* + else + * Insert at end of list. + */ + block->nextFree = blockTmp; + block->prevFree = blockTmp->prevFree; + block->prevFree->nextFree = block; + blockTmp->prevFree = block; + } + } + block->owner = NVOS32_BLOCK_TYPE_FREE; + //block->mhandle = 0x0; + block->align = block->begin; + + portMemSet((block+1), 0, pHeap->sizeofMemBlock - sizeof(EMEMBLOCK)); + + return NV_OK; +} + +static NV_STATUS +eheapFree +( + OBJEHEAP *pHeap, + NvU64 offset +) +{ + EMEMBLOCK *block; + + block = (EMEMBLOCK *) eheapGetBlock(pHeap, offset, 0); + if (!block) + return NV_ERR_INVALID_OFFSET; + + return _eheapBlockFree(pHeap, block); +} + +static EMEMBLOCK * +eheapGetBlock +( + OBJEHEAP *pHeap, + NvU64 offset, + NvBool bReturnFreeBlock +) +{ + EMEMBLOCK *block; + PNODE pNode; + + if (btreeSearch(offset, &pNode, pHeap->pBlockTree) != NV_OK) + { + return NULL; + } + + block = (EMEMBLOCK *)pNode->Data; + if ((block->owner == NVOS32_BLOCK_TYPE_FREE ) && !bReturnFreeBlock) + { + return NULL; + } + + return block; +} + +static NV_STATUS +eheapGetSize +( + OBJEHEAP *pHeap, + NvU64 *size +) +{ + *size = pHeap->total; + return NV_OK; +} + +static NV_STATUS +eheapGetFree +( + OBJEHEAP *pHeap, + NvU64 *free +) +{ + *free = pHeap->free; + return NV_OK; +} + +static NV_STATUS +eheapGetBase +( + OBJEHEAP *pHeap, + NvU64 *base +) +{ + *base = pHeap->base; + return NV_OK; +} + +static void +eheapInfo +( + OBJEHEAP *pHeap, + NvU64 *pBytesFree, // in all of the space managed + NvU64 *pBytesTotal, // in all of the space managed + NvU64 *pLargestFreeOffset, // constrained to pHeap->rangeLo, pHeap->rangeHi + NvU64 *pLargestFreeSize, // constrained to pHeap->rangeLo, pHeap->rangeHi + NvU32 *pNumFreeBlocks, + NvU64 *pUsableBytesFree // constrained to pHeap->rangeLo, pHeap->rangeHi +) +{ + NV_RANGE range = rangeMake(pHeap->rangeLo, pHeap->rangeHi); + + if (pBytesFree) + { + *pBytesFree = pHeap->free; + } + if (pBytesTotal) + { + *pBytesTotal = pHeap->total; + } + eheapInfoForRange(pHeap, range, pLargestFreeOffset, pLargestFreeSize, pNumFreeBlocks, pUsableBytesFree); +} + +static void +eheapInfoForRange +( + OBJEHEAP *pHeap, + NV_RANGE range, + NvU64 *pLargestFreeOffset, // constrained to rangeLo, rangeHi + NvU64 *pLargestFreeSize, // constrained to rangeLo, rangeHi + NvU32 *pNumFreeBlocks, + NvU64 *pUsableBytesFree // constrained to rangeLo, rangeHi +) +{ + EMEMBLOCK *blockFirstFree, *blockFree; + NvU64 freeBlockSize = 0; + NvU64 largestFreeOffset = 0; + NvU64 largestFreeSize = 0; + NvU32 numFreeBlocks = 0; + + if (pUsableBytesFree) + *pUsableBytesFree = 0; + + blockFirstFree = pHeap->pFreeBlockList; + if (blockFirstFree) + { + NV_ASSERT( range.lo <= range.hi ); + + blockFree = blockFirstFree; + do { + NvU64 clampedBlockBegin = (blockFree->begin >= range.lo) ? + blockFree->begin : range.lo; + NvU64 clampedBlockEnd = (blockFree->end <= range.hi) ? + blockFree->end : range.hi; + if (clampedBlockBegin <= clampedBlockEnd) + { + numFreeBlocks++; + freeBlockSize = clampedBlockEnd - clampedBlockBegin + 1; + + if (pUsableBytesFree) + *pUsableBytesFree += freeBlockSize; + + if ( freeBlockSize > largestFreeSize ) + { + largestFreeOffset = clampedBlockBegin; + largestFreeSize = freeBlockSize; + } + } + blockFree = blockFree->nextFree; + } while (blockFree != blockFirstFree); + } + + if (pLargestFreeOffset) + { + *pLargestFreeOffset = largestFreeOffset; + } + if (pLargestFreeSize) + { + *pLargestFreeSize = largestFreeSize; + } + if (pNumFreeBlocks) + { + *pNumFreeBlocks = numFreeBlocks; + } +} + +static NV_STATUS +eheapSetAllocRange +( + OBJEHEAP *pHeap, + NvU64 rangeLo, + NvU64 rangeHi +) +{ + + if ( rangeLo < pHeap->base ) + rangeLo = pHeap->base; + + if ( rangeHi > (pHeap->base + pHeap->total - 1) ) + rangeHi = (pHeap->base + pHeap->total - 1); + + if ( rangeHi < rangeLo ) + return NV_ERR_INVALID_ARGUMENT; + + pHeap->rangeLo = rangeLo; + pHeap->rangeHi = rangeHi; + + return NV_OK; +} + +static NV_STATUS +eheapTraverse +( + OBJEHEAP *pHeap, + void *pEnv, + EHeapTraversalFn traversalFn, + NvS32 direction +) +{ + NvU32 cont = 1, backAtFirstBlock = 0; + EMEMBLOCK *pBlock, *pBlockNext; + NV_STATUS rc; + NvU64 cursorOffset; // for dealing with cursor invalidates. + NvU64 firstBlockBegin, firstBlockEnd; // we'll never call the traversal fn twice on the same (sub)extent. + + pBlock = (direction > 0) ? pHeap->pBlockList : pHeap->pBlockList->prev; + NV_ASSERT_OR_RETURN(pBlock != NULL, NV_ERR_INVALID_STATE); + + // + // Cursor invalidates mean we can't compare with 'pHeap->pBlockList'. + // Instead we'll compare with the extent. If we intersect it at all in + // a later block then we'll consider that as having returned to the first block. + // + firstBlockBegin = pBlock->begin; + firstBlockEnd = pBlock->end; + + do + { + NvU32 invalCursor = 0; + + if ( direction > 0 ) + { + pBlockNext = pBlock->next; + cursorOffset = pBlockNext->begin; + } + else + { + pBlockNext = pBlock->prev; + cursorOffset = pBlockNext->end; + } + + rc = traversalFn(pHeap, pEnv, pBlock, &cont, &invalCursor); + + if ( invalCursor ) + { + // A block was added at or freed. So far only freeing the current block. + pBlock = eheapGetBlock(pHeap, cursorOffset, 1 /*return even if it is a free block*/); + + // Advance to the next block if the cursor block was merged. + if ((direction > 0) && (pBlock->begin < cursorOffset)) + { + pBlock = pBlock->next; + } + else if ((direction <= 0) && (pBlock->end > cursorOffset)) + { + pBlock = pBlock->prev; + } + } + else + { + // No change to the list, use the fast way to find the next block. + pBlock = pBlockNext; + + } + + NV_ASSERT_OR_RETURN(pBlock != NULL, NV_ERR_INVALID_STATE); // 1. list is circular, 2. cursorOffset should always be found unless the list is badly malformed. + + // + // Back to first block? Defined as being at a block for which the + // intersection with the original first block is non-null. + // + if ( ((firstBlockBegin >= pBlock->begin ) && (firstBlockBegin <= pBlock->end)) || + ((firstBlockEnd <= pBlock->end ) && (firstBlockEnd >= pBlock->begin)) ) + { + backAtFirstBlock = 1; + } + + } while (cont && !backAtFirstBlock); + + return rc; +} + +/*! + * @brief returns number of blocks in eHeap. + * + * @param[in] pHeap: pointer to eHeap struct to get data from + * + * @returns the number of blocks (free or allocated) currently in the heap + */ +static NvU32 +eheapGetNumBlocks +( + OBJEHEAP *pHeap +) +{ + return pHeap->numBlocks; +} + +/** + * @brief Set up block owner isolation + * + * Owner isolation means that no two block owners can own allocations which live within a specified range. + * + * @param[in] pHeap pointer to EHEAP object + * @param[in] bEnable NV_TRUE to enable the allocation isolation + * @param[in] granularity allocation granularity + * + * @return NV_OK on success + */ +NV_STATUS +eheapSetOwnerIsolation +( + OBJEHEAP *pHeap, + NvBool bEnable, + NvU32 granularity +) +{ + // This can only be set before any allocations have occurred. + if (pHeap->free != pHeap->total) + { + return NV_ERR_INVALID_STATE; + } + // Saying no 2 block owners can share the same block doesn't make sense. + if (bEnable && granularity < 2) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (bEnable && (granularity & (granularity-1))) + { + return NV_ERR_INVALID_ARGUMENT; + } + pHeap->bOwnerIsolation = bEnable; + pHeap->ownerGranularity = granularity; + + return NV_OK; +} + +/** + * @brief Check heap block ownership + * + * @param[in] pHeap Pointer to EHEAP object + * @param[in] pIsolationID Unique isolation ID constructed by the caller + * @param[in] allocLo Allocated range low + * @param[in] allocHi Allocated range high + * @param[in] blockFree Free block list + * @param[in] pChecker Caller defined ownership ID comparator + * + * @return NV_TRUE if success + */ +static NvBool +_eheapCheckOwnership +( + OBJEHEAP *pHeap, + void *pIsolationID, + NvU64 allocLo, + NvU64 allocHi, + EMEMBLOCK *blockFree, + EHeapOwnershipComparator *pComparator +) +{ + EMEMBLOCK *pTmpBlock; + NvU64 checkLo = NV_ALIGN_DOWN(allocLo, pHeap->ownerGranularity); + NvU64 checkHi = (((allocHi % pHeap->ownerGranularity) == 0) ? + NV_ALIGN_UP((allocHi + 1), pHeap->ownerGranularity) : + NV_ALIGN_UP(allocHi, pHeap->ownerGranularity)); + NvU64 check; + + checkLo = (checkLo <= pHeap->base) ? pHeap->base : checkLo; + checkHi = (checkHi >= pHeap->base + pHeap->total - 1) ? (pHeap->base + pHeap->total - 1) : checkHi; + + NV_ASSERT(NULL != blockFree); + + if (blockFree->begin > checkLo || blockFree->end < checkHi) + { + for (check = checkLo; check < checkHi; /* in-loop */) + { + pTmpBlock = pHeap->eheapGetBlock(pHeap, check, NV_TRUE); + NV_ASSERT(pTmpBlock); + + if (pTmpBlock->owner != NVOS32_BLOCK_TYPE_FREE) + { + if (!pComparator(pIsolationID, pTmpBlock->pData)) + { + return NV_FALSE; + } + } + + check = pTmpBlock->end + 1; + } + } + + return NV_TRUE; +} diff --git a/src/nvidia/src/libraries/containers/list.c b/src/nvidia/src/libraries/containers/list.c new file mode 100644 index 0000000..4f5155b --- /dev/null +++ b/src/nvidia/src/libraries/containers/list.c @@ -0,0 +1,438 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "containers/list.h" +#include "utils/nvassert.h" + +CONT_VTABLE_DEFN(ListBase, listIterRange_IMPL, NULL); + +#if PORT_IS_CHECKED_BUILD +static NvBool _listIterRangeCheck(ListBase *pList, ListNode *pFirst, + ListNode *pLast); +#endif +static void _listInsertBase(ListBase *pList, void *pNext, void *pValue); + +void listInit_IMPL(NonIntrusiveList *pList, PORT_MEM_ALLOCATOR *pAllocator, + NvU32 valueSize) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pList); + NV_ASSERT_OR_RETURN_VOID(NULL != pAllocator); + + portMemSet(&(pList->base), 0, sizeof(pList->base)); + CONT_VTABLE_INIT(ListBase, &pList->base); + pList->pAllocator = pAllocator; + pList->valueSize = valueSize; + pList->base.nodeOffset = (NvS32)(0 - sizeof(ListNode)); +} + +void listInitIntrusive_IMPL(IntrusiveList *pList, NvS32 nodeOffset) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pList); + portMemSet(&(pList->base), 0, sizeof(pList->base)); + CONT_VTABLE_INIT(ListBase, &pList->base); + pList->base.nodeOffset = nodeOffset; +} + +static void +_listDestroy(ListBase *pList, PORT_MEM_ALLOCATOR *pAllocator) +{ + ListNode *pNode; + NV_ASSERT_OR_RETURN_VOID(NULL != pList); + + pNode = pList->pHead; + + pList->pHead = NULL; + pList->pTail = NULL; + pList->count = 0; + NV_CHECKED_ONLY(pList->versionNumber++); + + while (pNode != NULL) + { + ListNode *pTemp = pNode; + pNode = pNode->pNext; + pTemp->pPrev = NULL; + pTemp->pNext = NULL; + NV_CHECKED_ONLY(pTemp->pList = NULL); + if (NULL != pAllocator) + { + PORT_FREE(pAllocator, pTemp); + } + } +} + +void listDestroy_IMPL(NonIntrusiveList *pList) +{ + _listDestroy(&pList->base, pList->pAllocator); +} + +void listDestroyIntrusive_IMPL(ListBase *pList) +{ + _listDestroy(pList, NULL); +} + +NvU32 listCount_IMPL(ListBase *pList) +{ + NV_ASSERT_OR_RETURN(pList, 0); + return pList->count; +} + +void *listInsertNew_IMPL(NonIntrusiveList *pList, void *pNext) +{ + void *pNode = NULL; + void *pValue; + + NV_ASSERT_OR_RETURN(NULL != pList, NULL); + + pNode = PORT_ALLOC(pList->pAllocator, sizeof(ListNode) + pList->valueSize); + NV_ASSERT_OR_RETURN(NULL != pNode, NULL); + + portMemSet(pNode, 0, sizeof(ListNode) + pList->valueSize); + pValue = listNodeToValue(&pList->base, pNode); + _listInsertBase(&(pList->base), pNext, pValue); + + return pValue; +} + +void *listAppendNew_IMPL(NonIntrusiveList *pList) +{ + return listInsertNew_IMPL(pList, NULL); +} + +void *listPrependNew_IMPL(NonIntrusiveList *pList) +{ + return listInsertNew_IMPL(pList, listHead_IMPL(&(pList->base))); +} + +void *listInsertValue_IMPL +( + NonIntrusiveList *pList, + void *pNext, + const void *pValue +) +{ + void *pCurrent; + + NV_ASSERT_OR_RETURN(NULL != pValue, NULL); + + pCurrent = listInsertNew_IMPL(pList, pNext); + if (NULL == pCurrent) + return NULL; + + return portMemCopy(pCurrent, pList->valueSize, pValue, pList->valueSize); +} + +void *listAppendValue_IMPL(NonIntrusiveList *pList, const void *pValue) +{ + return listInsertValue_IMPL(pList, NULL, pValue); +} + +void *listPrependValue_IMPL(NonIntrusiveList *pList, const void *pValue) +{ + return listInsertValue_IMPL(pList, listHead_IMPL(&(pList->base)), pValue); +} + +void listInsertExisting_IMPL(IntrusiveList *pList, void *pNext, void *pValue) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pList); + NV_ASSERT_OR_RETURN_VOID(NULL != pValue); + _listInsertBase(&(pList->base), pNext, pValue); +} + +void listAppendExisting_IMPL(IntrusiveList *pList, void *pValue) +{ + listInsertExisting_IMPL(pList, NULL, pValue); +} + +void listPrependExisting_IMPL(IntrusiveList *pList, void *pValue) +{ + listInsertExisting_IMPL(pList, listHead_IMPL(&(pList->base)), pValue); +} + +// for nonintrusive version +void listRemove_IMPL(NonIntrusiveList *pList, void *pValue) +{ + if (pValue == NULL) + return; + listRemoveIntrusive_IMPL(&(pList->base), pValue); + PORT_FREE(pList->pAllocator, listValueToNode(&pList->base, pValue)); +} + +// intrusive version +void listRemoveIntrusive_IMPL +( + ListBase *pList, + void *pValue +) +{ + ListNode *pNode; + + if (pValue == NULL) + return; + + pNode = listValueToNode(pList, pValue); + NV_ASSERT_OR_RETURN_VOID(NULL != pNode); + NV_ASSERT_CHECKED(pNode->pList == pList); + + if (pNode->pPrev != NULL) + pNode->pPrev->pNext = pNode->pNext; + else + pList->pHead = pNode->pNext; + + if (pNode->pNext != NULL) + pNode->pNext->pPrev = pNode->pPrev; + else + pList->pTail = pNode->pPrev; + + pNode->pNext = NULL; + pNode->pPrev = NULL; + + pList->count--; + NV_CHECKED_ONLY(pList->versionNumber++); + NV_CHECKED_ONLY(pNode->pList = NULL); +} + +// pvalue here means the value +void listRemoveFirstByValue_IMPL +( + NonIntrusiveList *pList, + void *pValue +) +{ + void *pValueFound = listFindByValue_IMPL(pList, pValue); + if (pValueFound) + { + listRemove_IMPL(pList, pValueFound); + } +} + +void listRemoveAllByValue_IMPL +( + NonIntrusiveList *pList, + void *pValue +) +{ + void *pValueFound; + ListNode *pNode; + + NV_ASSERT_OR_RETURN_VOID(NULL != pList); + NV_ASSERT_OR_RETURN_VOID(NULL != pValue); + + pNode = pList->base.pHead; + while (pNode != NULL) + { + pValueFound = listNodeToValue(&pList->base, pNode); + pNode = pNode->pNext; + + if (portMemCmp(pValueFound, pValue, pList->valueSize) == 0) + { + listRemove_IMPL(pList, pValueFound); + pValueFound = NULL; + } + } +} + +void *listFindByValue_IMPL +( + NonIntrusiveList *pList, + void *pValue +) +{ + void *pResult; + ListNode *pNode; + + NV_ASSERT_OR_RETURN(NULL != pList, NULL); + NV_ASSERT_OR_RETURN(NULL != pValue, NULL); + + pNode = pList->base.pHead; + while (pNode != NULL) + { + pResult = listNodeToValue(&pList->base, pNode); + + if (portMemCmp(pResult, pValue, pList->valueSize) == 0) + return pResult; + + pNode = pNode->pNext; + } + + return NULL; +} + +void *listHead_IMPL +( + ListBase *pList +) +{ + NV_ASSERT_OR_RETURN(NULL != pList, NULL); + return listNodeToValue(pList, pList->pHead); +} + +void *listTail_IMPL +( + ListBase *pList +) +{ + NV_ASSERT_OR_RETURN(NULL != pList, NULL); + return listNodeToValue(pList, pList->pTail); +} + +void *listNext_IMPL +( + ListBase *pList, + void *pValue +) +{ + ListNode *pNode = listValueToNode(pList, pValue); + NV_ASSERT_OR_RETURN(NULL != pNode, NULL); + NV_ASSERT_CHECKED(pNode->pList == pList); + return listNodeToValue(pList, pNode->pNext); +} + +void *listPrev_IMPL +( + ListBase *pList, + void *pValue +) +{ + ListNode *pNode = listValueToNode(pList, pValue); + NV_ASSERT_OR_RETURN(NULL != pNode, NULL); + NV_ASSERT_CHECKED(pNode->pList == pList); + return listNodeToValue(pList, pNode->pPrev); +} + +ListIterBase listIterRange_IMPL +( + ListBase *pList, + void *pFirst, + void *pLast +) +{ + ListIterBase it; + + NV_ASSERT(NULL != pList); + + NV_CHECKED_ONLY(it.versionNumber = pList->versionNumber); + it.pList = pList; + it.pNode = listValueToNode(pList, pFirst); + it.pLast = listValueToNode(pList, pLast); + it.pValue = NULL; + + NV_ASSERT_CHECKED(it.pNode == NULL || it.pNode->pList == pList); + NV_ASSERT_CHECKED(it.pLast == NULL || it.pLast->pList == pList); + NV_ASSERT_CHECKED(_listIterRangeCheck(pList, it.pNode, it.pLast)); + NV_CHECKED_ONLY(it.bValid = NV_TRUE); + + return it; +} + +NvBool listIterNext_IMPL(ListIterBase *pIt) +{ + NV_ASSERT_OR_RETURN(NULL != pIt, NV_FALSE); + +#if PORT_IS_CHECKED_BUILD + if (pIt->bValid && !CONT_ITER_IS_VALID(pIt->pList, pIt)) + { + NV_ASSERT(CONT_ITER_IS_VALID(pIt->pList, pIt)); + PORT_DUMP_STACK(); + + pIt->bValid = NV_FALSE; + } +#endif + + if (!pIt->pNode) + return NV_FALSE; + + pIt->pValue = listNodeToValue(pIt->pList, pIt->pNode); + + if (pIt->pNode == pIt->pLast) + pIt->pNode = NULL; + else + pIt->pNode = pIt->pNode->pNext; + + return NV_TRUE; +} + +#if PORT_IS_CHECKED_BUILD +// @todo: optimize for best average complexity +// assumption: nodes ownership checked in the caller function +// allow same node +static NvBool _listIterRangeCheck +( + ListBase *pList, + ListNode *pFirst, + ListNode *pLast +) +{ + ListNode *pNode; + + for (pNode = pFirst; pNode != NULL; pNode = pNode->pNext) + { + if (pNode == pLast) + return NV_TRUE; + } + + // Check for both NULL (empty range) case. + return pNode == pLast; +} +#endif + +static void _listInsertBase +( + ListBase *pList, + void *pNextValue, + void *pValue +) +{ + ListNode *pNext = listValueToNode(pList, pNextValue); + ListNode *pNode = listValueToNode(pList, pValue); + + pNode->pPrev = pNext ? pNext->pPrev : pList->pTail; + pNode->pNext = pNext; + + if (pNode->pPrev) + pNode->pPrev->pNext = pNode; + else + pList->pHead = pNode; + + if (pNode->pNext) + pNode->pNext->pPrev = pNode; + else + pList->pTail = pNode; + + pList->count++; + NV_CHECKED_ONLY(pList->versionNumber++); + NV_CHECKED_ONLY(pNode->pList = pList); +} + +NvBool listIsValid_IMPL(void *pList) +{ +#if NV_TYPEOF_SUPPORTED + return NV_TRUE; +#else + if (CONT_VTABLE_VALID((ListBase*)pList)) + return NV_TRUE; + + NV_ASSERT_FAILED("vtable not valid!"); + CONT_VTABLE_INIT(ListBase, (ListBase*)pList); + return NV_FALSE; +#endif +} + diff --git a/src/nvidia/src/libraries/containers/map.c b/src/nvidia/src/libraries/containers/map.c new file mode 100644 index 0000000..cfc153f --- /dev/null +++ b/src/nvidia/src/libraries/containers/map.c @@ -0,0 +1,913 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "containers/map.h" + +CONT_VTABLE_DEFN(MapBase, mapIterRange_IMPL, NULL); + +static void _mapRotateLeft(MapNode **pPRoot, MapNode *x); +static void _mapRotateRight(MapNode **pPRoot, MapNode *x); +static void _mapInsertFixup(MapNode **pRoot, MapNode *x); +static void _mapDeleteFixup(MapNode **pRoot, MapNode *parentOfX, MapNode *x); + +/** + * @brief Replace the old node with the new one. + * @details Does nothing if old node is NULL. Does not + * update oldnode links + */ +static void _mapPutNodeInPosition(MapBase *pMap, MapNode *pTargetPosition, + MapNode *pNewNode); + +/** + * @brief Take on target node's children connections. + * @details Does nothing is any of the input is NULL. + * Does not update oldnode links + */ +static void _mapAdoptChildrenNodes(MapNode *pTargetNode, MapNode *pNewNode); + +/** + * @brief Basic insertion procedure + * @details Shared by three versions of map insertion functions + */ +static NvBool _mapInsertBase(MapBase *pMap, NvU64 key, void *pValue); + +void mapInit_IMPL +( + NonIntrusiveMap *pMap, + PORT_MEM_ALLOCATOR *pAllocator, + NvU32 valueSize +) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pMap); + NV_ASSERT_OR_RETURN_VOID(NULL != pAllocator); + portMemSet(&(pMap->base), 0, sizeof(pMap->base)); + CONT_VTABLE_INIT(MapBase, &pMap->base); + pMap->pAllocator = pAllocator; + pMap->valueSize = valueSize; + pMap->base.nodeOffset = (NvS32)(0 - sizeof(MapNode)); +} + +void mapInitIntrusive_IMPL +( + IntrusiveMap *pMap, + NvS32 nodeOffset +) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pMap); + portMemSet(&(pMap->base), 0, sizeof(pMap->base)); + CONT_VTABLE_INIT(MapBase, &pMap->base); + pMap->base.nodeOffset = nodeOffset; +} + +static void _mapDestroy(MapBase *pMap, PORT_MEM_ALLOCATOR *pAllocator) +{ + MapNode *pNode; + + NV_ASSERT_OR_RETURN_VOID(NULL != pMap); + + pNode = pMap->pRoot; + while (NULL != pNode) + { + while (NULL != pNode->pLeft) + pNode = pNode->pLeft; + + while (NULL != pNode->pRight) + pNode = pNode->pRight; + + if ((NULL == pNode->pLeft) && (NULL == pNode->pRight)) + { + MapNode *pTemp = pNode->pParent; + + // update parent node + if (NULL != pTemp) + { + if (pTemp->pLeft == pNode) + pTemp->pLeft = NULL; + else + pTemp->pRight = NULL; + } + + // free the node + pNode->pParent = NULL; + NV_CHECKED_ONLY(pNode->pMap = NULL); + if (NULL != pAllocator) + { + PORT_FREE(pAllocator, pNode); + } + + pNode = pTemp; + } + } + + pMap->pRoot = NULL; + pMap->count = 0; + NV_CHECKED_ONLY(pMap->versionNumber++); +} + +void mapDestroy_IMPL +( + NonIntrusiveMap *pMap +) +{ + _mapDestroy(&pMap->base, pMap->pAllocator); +} + +void mapDestroyIntrusive_IMPL +( + MapBase *pMap +) +{ + _mapDestroy(pMap, NULL); +} + +NvU32 mapCount_IMPL +( + MapBase *pMap +) +{ + NV_ASSERT_OR_RETURN(pMap, 0); + return pMap->count; +} + +NvU64 mapKey_IMPL +( + MapBase *pMap, + void *pValue +) +{ + MapNode *pNode = mapValueToNode(pMap, pValue); + NV_ASSERT_OR_RETURN(NULL != pNode, 0); + NV_ASSERT_CHECKED(pNode->pMap == pMap); + return pNode->key; +} + +void *mapInsertNew_IMPL +( + NonIntrusiveMap *pMap, + NvU64 key +) +{ + void *pNode = NULL; + void *pValue; + + NV_ASSERT_OR_RETURN(NULL != pMap, NULL); + + pNode = PORT_ALLOC(pMap->pAllocator, sizeof(MapNode) + pMap->valueSize); + NV_ASSERT_OR_RETURN(NULL != pNode, NULL); + + portMemSet(pNode, 0, sizeof(MapNode) + pMap->valueSize); + pValue = mapNodeToValue(&pMap->base, pNode); + + // check key duplication + if (!_mapInsertBase(&(pMap->base), key, pValue)) + { + PORT_FREE(pMap->pAllocator, pNode); + return NULL; + } + + return pValue; +} + +void *mapInsertValue_IMPL +( + NonIntrusiveMap *pMap, + NvU64 key, + const void *pValue +) +{ + void *pCurrent; + + NV_ASSERT_OR_RETURN(NULL != pValue, NULL); + + pCurrent = mapInsertNew_IMPL(pMap, key); + if (NULL == pCurrent) + return NULL; + + return portMemCopy(pCurrent, pMap->valueSize, pValue, pMap->valueSize); +} + +NvBool mapInsertExisting_IMPL +( + IntrusiveMap *pMap, + NvU64 key, + void *pValue +) +{ + NV_ASSERT_OR_RETURN(NULL != pMap, NV_FALSE); + NV_ASSERT_OR_RETURN(NULL != pValue, NV_FALSE); + return _mapInsertBase(&(pMap->base), key, pValue); +} + +void mapRemove_IMPL +( + NonIntrusiveMap *pMap, + void *pValue +) +{ + if (pValue == NULL) + return; + mapRemoveIntrusive_IMPL(&(pMap->base), pValue); + PORT_FREE(pMap->pAllocator, mapValueToNode(&pMap->base, pValue)); +} + +void mapRemoveIntrusive_IMPL +( + MapBase *pMap, + void *pValue +) +{ + MapNode *x; // child node of y, might be NULL + MapNode *y; // successor for z + MapNode *z; // node to remove + MapNode *parentOfX; + NvU32 yWasBlack; + + // do nothing is pValue is NULL + if (pValue == NULL) + return; + + // 1. find y, the successor for z + z = mapValueToNode(pMap, pValue); + NV_ASSERT_OR_RETURN_VOID(NULL != z); + NV_ASSERT_CHECKED(z->pMap == pMap); + + if (z->pLeft == NULL || z->pRight == NULL) + { + // z has at least one empty successor, y = z + y = z; + } + + else + { + // y is z's least greater node + y = z->pRight; + + while (y->pLeft != NULL) + y = y->pLeft; + } + + // 2. find x, y's children + if (y->pLeft != NULL) + x = y->pLeft; + else + x = y->pRight; + + // 3. put x into y's position + _mapPutNodeInPosition(pMap, y, x); + // 4. put y into z's position if not the same + parentOfX = y->pParent; + yWasBlack = !y->bIsRed; + + if (y != z) + { + _mapPutNodeInPosition(pMap, z, y); + _mapAdoptChildrenNodes(z, y); + y->bIsRed = z->bIsRed; + + if (parentOfX == z) + parentOfX = y; + } + + // 5. fixup, to rebalance the tree + if (yWasBlack) + _mapDeleteFixup(&(pMap->pRoot), parentOfX, x); + + // 6. update the count + NV_CHECKED_ONLY(pMap->versionNumber++); + NV_CHECKED_ONLY(z->pMap = NULL); + pMap->count--; + return; +} + +void mapRemoveByKey_IMPL +( + NonIntrusiveMap *pMap, + NvU64 key +) +{ + mapRemove_IMPL(pMap, mapFind_IMPL(&(pMap->base), key)); +} + +void mapRemoveByKeyIntrusive_IMPL +( + MapBase *pMap, + NvU64 key +) +{ + mapRemoveIntrusive_IMPL(pMap, mapFind_IMPL(pMap, key)); +} + +void *mapFind_IMPL +( + MapBase *pMap, + NvU64 key +) +{ + MapNode *pCurrent; + NV_ASSERT_OR_RETURN(NULL != pMap, NULL); + pCurrent = pMap->pRoot; + + while (pCurrent != NULL) + { + if (key < pCurrent->key) + pCurrent = pCurrent->pLeft; + else if (key > pCurrent->key) + pCurrent = pCurrent->pRight; + else + return mapNodeToValue(pMap, pCurrent); + } + + return NULL; +} + +void *mapFindGEQ_IMPL +( + MapBase *pMap, + NvU64 keyMin +) +{ + MapNode *pCurrent; + MapNode *pResult; + NV_ASSERT_OR_RETURN(NULL != pMap, NULL); + pCurrent = pMap->pRoot; + pResult = NULL; + + while (pCurrent != NULL) + { + if (pCurrent->key > keyMin) + { + pResult = pCurrent; + pCurrent = pCurrent->pLeft; + } + + else if (pCurrent->key == keyMin) + return mapNodeToValue(pMap, pCurrent); + else + pCurrent = pCurrent->pRight; + } + + if (pResult == NULL) + return NULL; + + return mapNodeToValue(pMap, pResult); +} + +void *mapFindLEQ_IMPL +( + MapBase *pMap, + NvU64 keyMax +) +{ + MapNode *pCurrent; + MapNode *pResult; + NV_ASSERT_OR_RETURN(NULL != pMap, NULL); + pCurrent = pMap->pRoot; + pResult = NULL; + + while (pCurrent != NULL) + { + if (pCurrent->key > keyMax) + pCurrent = pCurrent->pLeft; + else if (pCurrent->key == keyMax) + return mapNodeToValue(pMap, pCurrent); + else + { + pResult = pCurrent; + pCurrent = pCurrent->pRight; + } + } + + if (pResult == NULL) + return NULL; + + return mapNodeToValue(pMap, pResult); +} + +void *mapNext_IMPL +( + MapBase *pMap, + void *pValue +) +{ + MapNode *pCurrent; + MapNode *pNode = mapValueToNode(pMap, pValue); + + NV_ASSERT_OR_RETURN(NULL != pNode, NULL); + NV_ASSERT_CHECKED(pNode->pMap == pMap); + + if (NULL != (pCurrent = pNode->pRight)) + { + while (pCurrent->pLeft != NULL) + pCurrent = pCurrent->pLeft; + + return mapNodeToValue(pMap, pCurrent); + } + + else + { + pCurrent = pNode->pParent; + + while (pCurrent != NULL && pNode == pCurrent->pRight) + { + if (pCurrent == pMap->pRoot) + return NULL; + + pNode = pCurrent; + pCurrent = pCurrent->pParent; + } + + if (pCurrent == NULL) + return NULL; + + return mapNodeToValue(pMap, pCurrent); + } +} + +void *mapPrev_IMPL +( + MapBase *pMap, + void *pValue +) +{ + MapNode *pCurrent; + MapNode *pNode = mapValueToNode(pMap, pValue); + + NV_ASSERT_OR_RETURN(NULL != pNode, NULL); + NV_ASSERT_CHECKED(pNode->pMap == pMap); + + if (NULL != (pCurrent = pNode->pLeft)) + { + while (pCurrent->pRight != NULL) + pCurrent = pCurrent->pRight; + + return mapNodeToValue(pMap, pCurrent); + } + + else + { + pCurrent = pNode->pParent; + + while (pCurrent != NULL && pNode == pCurrent->pLeft) + { + if (pCurrent == pMap->pRoot) + { + return NULL; + } + + pNode = pCurrent; + pCurrent = pCurrent->pParent; + } + + if (pCurrent == NULL) + return NULL; + + return mapNodeToValue(pMap, pCurrent); + } +} + +// @todo: do we need to change the definition of pFirst and pLast? +// currently they are mapNodes +MapIterBase mapIterRange_IMPL +( + MapBase *pMap, + void *pFirst, + void *pLast +) +{ + MapIterBase it; + MapNode *pFirstNode; + MapNode *pLastNode; + NV_ASSERT(pMap); + + portMemSet(&it, 0, sizeof(it)); + it.pMap = pMap; + + if (pMap->count == 0) + { + NV_CHECKED_ONLY(it.versionNumber = pMap->versionNumber); + return it; + } + + NV_ASSERT(pFirst); + NV_ASSERT(pLast); + NV_ASSERT_CHECKED((mapValueToNode(pMap, pFirst))->pMap == pMap); + NV_ASSERT_CHECKED((mapValueToNode(pMap, pLast))->pMap == pMap); + NV_ASSERT(mapKey_IMPL(pMap, pLast) >= mapKey_IMPL(pMap, pFirst)); + pFirstNode = mapValueToNode(pMap, pFirst); + pLastNode = mapValueToNode(pMap, pLast); + it.pNode = pFirstNode; + it.pLast = pLastNode; + NV_CHECKED_ONLY(it.versionNumber = pMap->versionNumber); + return it; +} + +// @todo: not sure about ppvalue, change it from void * to void ** +NvBool mapIterNext_IMPL(MapIterBase *pIt) +{ + NV_ASSERT_OR_RETURN(pIt, NV_FALSE); + +#if PORT_IS_CHECKED_BUILD + if (pIt->bValid && !CONT_ITER_IS_VALID(pIt->pMap, pIt)) + { + NV_ASSERT(CONT_ITER_IS_VALID(pIt->pMap, pIt)); + PORT_DUMP_STACK(); + pIt->bValid = NV_FALSE; + } +#endif + + if (!pIt->pNode) + return NV_FALSE; + + pIt->pValue = mapNodeToValue(pIt->pMap, pIt->pNode); + + if (pIt->pNode == pIt->pLast) + pIt->pNode = NULL; + else + pIt->pNode = mapValueToNode(pIt->pMap, + mapNext_IMPL(pIt->pMap, pIt->pValue)); + + return NV_TRUE; +} + +static void _mapRotateLeft +( + MapNode **pPRoot, + MapNode *x +) +{ + // rotate node x to left + MapNode *y = x->pRight; + // establish x->pRight link + x->pRight = y->pLeft; + + if (y->pLeft) + y->pLeft->pParent = x; + + // establish y->pParent link + y->pParent = x->pParent; + + if (x->pParent) + { + if (x == x->pParent->pLeft) + x->pParent->pLeft = y; + else + x->pParent->pRight = y; + } + + else + (*pPRoot) = y; + + // link x and y + y->pLeft = x; + x->pParent = y; +} + +static void _mapRotateRight +( + MapNode **pPRoot, + MapNode *x +) +{ + // rotate node x to right + MapNode *y = x->pLeft; + // establish x->pLeft link + x->pLeft = y->pRight; + + if (y->pRight) + y->pRight->pParent = x; + + // establish y->pParent link + y->pParent = x->pParent; + + if (x->pParent) + { + if (x == x->pParent->pRight) + x->pParent->pRight = y; + else + x->pParent->pLeft = y; + } + + else + (*pPRoot) = y; + + // link x and y + y->pRight = x; + x->pParent = y; +} + +static void _mapInsertFixup +( + MapNode **pPRoot, + MapNode *x +) +{ + // check red-black properties + while ((x != *pPRoot) && x->pParent->bIsRed) + { + // we have a violation + if (x->pParent == x->pParent->pParent->pLeft) + { + MapNode *y = x->pParent->pParent->pRight; + + if (y && y->bIsRed) + { + // uncle is RED + x->pParent->bIsRed = NV_FALSE; + y->bIsRed = NV_FALSE; + x->pParent->pParent->bIsRed = NV_TRUE; + x = x->pParent->pParent; + } + + else + { + // uncle is BLACK + if (x == x->pParent->pRight) + { + // make x a left child + x = x->pParent; + _mapRotateLeft(pPRoot, x); + } + + // recolor and rotate + x->pParent->bIsRed = NV_FALSE; + x->pParent->pParent->bIsRed = NV_TRUE; + _mapRotateRight(pPRoot, x->pParent->pParent); + } + } + + else + { + // mirror image of above code + MapNode *y = x->pParent->pParent->pLeft; + + if (y && y->bIsRed) + { + // uncle is RED + x->pParent->bIsRed = NV_FALSE; + y->bIsRed = NV_FALSE; + x->pParent->pParent->bIsRed = NV_TRUE; + x = x->pParent->pParent; + } + + else + { + // uncle is BLACK + if (x == x->pParent->pLeft) + { + x = x->pParent; + _mapRotateRight(pPRoot, x); + } + + x->pParent->bIsRed = NV_FALSE; + x->pParent->pParent->bIsRed = NV_TRUE; + _mapRotateLeft(pPRoot, x->pParent->pParent); + } + } + } + + (*pPRoot)->bIsRed = NV_FALSE; +} + +static void _mapDeleteFixup +( + MapNode **pPRoot, + MapNode *parentOfX, + MapNode *x +) +{ + while ((x != *pPRoot) && (!x || !x->bIsRed)) + { + //NV_ASSERT (!(x == NULL && parentOfX == NULL)); + // NULL nodes are sentinel nodes. If we delete a sentinel node (x==NULL) it + // must have a parent node (or be the root). Hence, parentOfX == NULL with + // x==NULL is never possible (tree invariant) + if ((parentOfX != NULL) && (x == parentOfX->pLeft)) + { + MapNode *w = parentOfX->pRight; + + if (w && w->bIsRed) + { + w->bIsRed = NV_FALSE; + parentOfX->bIsRed = NV_TRUE; + _mapRotateLeft(pPRoot, parentOfX); + w = parentOfX->pRight; + } + + if (!w || (((!w->pLeft || !w->pLeft->bIsRed) + && (!w->pRight || !w->pRight->bIsRed)))) + { + if (w) + w->bIsRed = NV_TRUE; + + x = parentOfX; + } + + else + { + if (!w->pRight || !w->pRight->bIsRed) + { + w->pLeft->bIsRed = NV_FALSE; + w->bIsRed = NV_TRUE; + _mapRotateRight(pPRoot, w); + w = parentOfX->pRight; + } + + w->bIsRed = parentOfX->bIsRed; + parentOfX->bIsRed = NV_FALSE; + w->pRight->bIsRed = NV_FALSE; + _mapRotateLeft(pPRoot, parentOfX); + x = *pPRoot; + } + } + + else if (parentOfX != NULL) + { + MapNode *w = parentOfX->pLeft; + + if (w && w->bIsRed) + { + w->bIsRed = NV_FALSE; + parentOfX->bIsRed = NV_TRUE; + _mapRotateRight(pPRoot, parentOfX); + w = parentOfX->pLeft; + } + + if (!w || ((!w->pRight || !w->pRight->bIsRed) && + (!w->pLeft || !w->pLeft->bIsRed))) + { + if (w) + w->bIsRed = NV_TRUE; + + x = parentOfX; + } + + else + { + if (!w->pLeft || !w->pLeft->bIsRed) + { + w->pRight->bIsRed = NV_FALSE; + w->bIsRed = NV_TRUE; + _mapRotateLeft(pPRoot, w); + w = parentOfX->pLeft; + } + + w->bIsRed = parentOfX->bIsRed; + parentOfX->bIsRed = NV_FALSE; + w->pLeft->bIsRed = NV_FALSE; + _mapRotateRight(pPRoot, parentOfX); + x = *pPRoot; + } + } + + else if (x == NULL) + { + // This should never happen. + break; + } + + parentOfX = x->pParent; + } + + if (x) + x->bIsRed = NV_FALSE; +} + +static void _mapPutNodeInPosition +( + MapBase *pMap, + MapNode *pTargetPosition, + MapNode *pNewNode +) +{ + // error check - can be removed + if (pTargetPosition == NULL) + return; + + // 1. change connection from new node side + if (pNewNode != NULL) + pNewNode->pParent = pTargetPosition->pParent; + + // 2. connection from parent side + if (pTargetPosition->pParent != NULL) + { + if (pTargetPosition == pTargetPosition->pParent->pLeft) + pTargetPosition->pParent->pLeft = pNewNode; + else + pTargetPosition->pParent->pRight = pNewNode; + } + + else + pMap->pRoot = pNewNode; +} + +static void _mapAdoptChildrenNodes +( + MapNode *pTargetNode, + MapNode *pNewNode +) +{ + // error check - can be removed + if (pTargetNode == NULL || pNewNode == NULL) + return; + + // take on connections + pNewNode->pLeft = pTargetNode->pLeft; + + if (pTargetNode->pLeft != NULL) + pTargetNode->pLeft->pParent = pNewNode; + + pNewNode->pRight = pTargetNode->pRight; + + if (pTargetNode->pRight != NULL) + pTargetNode->pRight->pParent = pNewNode; +} + +static NvBool _mapInsertBase +( + MapBase *pMap, + NvU64 key, + void *pValue +) +{ + MapNode *pCurrent; + MapNode *pParent; + MapNode *pNode; + pNode = mapValueToNode(pMap, pValue); + // 1. locate parent leaf node for the new node + pCurrent = pMap->pRoot; + pParent = NULL; + + while (pCurrent != NULL) + { + pParent = pCurrent; + + if (key < pCurrent->key) + pCurrent = pCurrent->pLeft; + else if (key > pCurrent->key) + pCurrent = pCurrent->pRight; + else + { + // duplication detected + return NV_FALSE; + } + } + + // 2. set up the new node structure + NV_CHECKED_ONLY(pNode->pMap = pMap); + pNode->key = key; + pNode->pParent = pParent; + pNode->pLeft = NULL; + pNode->pRight = NULL; + pNode->bIsRed = NV_TRUE; + + // 3. insert node in tree + if (pParent != NULL) + { + if (pNode->key < pParent->key) + pParent->pLeft = pNode; + else + pParent->pRight = pNode; + } + + else + pMap->pRoot = pNode; + + // 4. balance the tree + _mapInsertFixup(&(pMap->pRoot), pNode); + NV_CHECKED_ONLY(pMap->versionNumber++); + pMap->count++; + return NV_TRUE; +} + +NvBool mapIsValid_IMPL(void *pMap) +{ +#if NV_TYPEOF_SUPPORTED + return NV_TRUE; +#else + if (CONT_VTABLE_VALID((MapBase*)pMap)) + return NV_TRUE; + + NV_ASSERT_FAILED("vtable not valid!"); + CONT_VTABLE_INIT(MapBase, (MapBase*)pMap); + return NV_FALSE; +#endif +} diff --git a/src/nvidia/src/libraries/containers/multimap.c b/src/nvidia/src/libraries/containers/multimap.c new file mode 100644 index 0000000..ae19915 --- /dev/null +++ b/src/nvidia/src/libraries/containers/multimap.c @@ -0,0 +1,394 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "containers/multimap.h" + +CONT_VTABLE_DEFN(MultimapBase, multimapItemIterRange_IMPL, NULL); + +void multimapInit_IMPL +( + MultimapBase *pBase, + PORT_MEM_ALLOCATOR *pAllocator, + NvU32 valueSize, + NvS32 nodeOffset, + NvU32 submapSize +) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pBase); + NV_ASSERT_OR_RETURN_VOID(NULL != pAllocator); + mapInit_IMPL(&pBase->map, pAllocator, submapSize); + CONT_VTABLE_INIT(MultimapBase, pBase); + pBase->multimapNodeOffset = nodeOffset; + pBase->itemCount = 0; + pBase->itemSize = valueSize; +} + +void multimapDestroy_IMPL +( + MultimapBase *pBase +) +{ + void *pLeaf; + IntrusiveMap *pSubmap; + NV_ASSERT_OR_RETURN_VOID(NULL != pBase); + + pLeaf = multimapFirstItem_IMPL(pBase); + while (NULL != pLeaf) + { + void *pNext = multimapNextItem_IMPL(pBase, pLeaf); + multimapRemoveItem_IMPL(pBase, pLeaf); + pLeaf = pNext; + } + + while (NULL != (pSubmap = (IntrusiveMap *)mapFindGEQ_IMPL(&pBase->map.base, 0))) + { + mapDestroyIntrusive_IMPL(&pSubmap->base); + mapRemove_IMPL(&pBase->map, pSubmap); + } + + mapDestroy_IMPL(&pBase->map); +} + +void multimapClear_IMPL +( + MultimapBase *pBase +) +{ + PORT_MEM_ALLOCATOR *pAllocator; + NvU32 valueSize; + NvS32 nodeOffset; + NvU32 submapSize; + + NV_ASSERT_OR_RETURN_VOID(NULL != pBase); + pAllocator = pBase->map.pAllocator; + valueSize = pBase->itemSize; + nodeOffset = pBase->multimapNodeOffset; + submapSize = pBase->map.valueSize; + + multimapDestroy_IMPL(pBase); + multimapInit_IMPL(pBase, pAllocator, valueSize, nodeOffset, submapSize); +} + +void *multimapInsertSubmap_IMPL(MultimapBase *pBase, NvU64 submapKey) +{ + void *pSubmap; + NV_ASSERT_OR_RETURN(NULL != pBase, NULL); + + pSubmap = mapInsertNew_IMPL(&pBase->map, submapKey); + if (NULL != pSubmap) + { + NvS32 submapNodeOffset = pBase->multimapNodeOffset + + NV_OFFSETOF(MultimapNode, submapNode); + mapInitIntrusive_IMPL((IntrusiveMap *)pSubmap, submapNodeOffset); + } + + return pSubmap; +} + +void *multimapFindSubmap_IMPL(MultimapBase *pBase, NvU64 submapKey) +{ + return mapFind_IMPL(&pBase->map.base, submapKey); +} + +void *multimapFindSubmapLEQ_IMPL(MultimapBase *pBase, NvU64 submapKey) +{ + return mapFindLEQ_IMPL(&pBase->map.base, submapKey); +} + +void *multimapFindSubmapGEQ_IMPL(MultimapBase *pBase, NvU64 submapKey) +{ + return mapFindGEQ_IMPL(&pBase->map.base, submapKey); +} + +void *multimapInsertItemNew_IMPL +( + MultimapBase *pBase, + NvU64 submapKey, + NvU64 itemKey +) +{ + IntrusiveMap *pSubmap; + void *pLeaf; + NvU32 leafSize; + + if (NULL == pBase) + return NULL; + + pSubmap = (IntrusiveMap *)multimapFindSubmap_IMPL(pBase, submapKey); + if (NULL == pSubmap) + return NULL; + + leafSize = pBase->multimapNodeOffset + sizeof(MultimapNode); + pLeaf = PORT_ALLOC(pBase->map.pAllocator, leafSize); + + if (NULL == pLeaf) + return NULL; + + portMemSet(pLeaf, 0, leafSize); + + multimapValueToNode(pBase, pLeaf)->pSubmap = pSubmap; + + if (!mapInsertExisting_IMPL(pSubmap, itemKey, pLeaf)) + { + PORT_FREE(pBase->map.pAllocator, pLeaf); + return NULL; + } + + pBase->itemCount++; + + return pLeaf; +} + +void *multimapInsertItemValue_IMPL +( + MultimapBase *pBase, + NvU64 submapKey, + NvU64 itemKey, + const void *pValue +) +{ + void *pLeaf; + + NV_ASSERT_OR_RETURN(NULL != pBase, NULL); + NV_ASSERT_OR_RETURN(NULL != pValue, NULL); + + pLeaf = multimapInsertItemNew_IMPL(pBase, submapKey, itemKey); + + if (NULL == pLeaf) + return NULL; + + return portMemCopy(pLeaf, pBase->itemSize, pValue, pBase->itemSize); +} + +void *multimapFindItem_IMPL +( + MultimapBase *pBase, + NvU64 submapKey, + NvU64 itemKey +) +{ + IntrusiveMap *pSubmap; + + NV_ASSERT_OR_RETURN(NULL != pBase, NULL); + + pSubmap = (IntrusiveMap *)multimapFindSubmap_IMPL(pBase, submapKey); + if (NULL == pSubmap) + return NULL; + + return mapFind_IMPL(&pSubmap->base, itemKey); +} + +void multimapRemoveItem_IMPL(MultimapBase *pBase, void *pLeaf) +{ + IntrusiveMap *pSubmap; + NvU32 itemCount; + + NV_ASSERT_OR_RETURN_VOID(NULL != pBase); + NV_ASSERT_OR_RETURN_VOID(NULL != pLeaf); + + pSubmap = (IntrusiveMap *)multimapValueToNode(pBase, pLeaf)->pSubmap; + NV_ASSERT_OR_RETURN_VOID(NULL != pSubmap); + + itemCount = pSubmap->base.count; + mapRemoveIntrusive_IMPL(&pSubmap->base, pLeaf); + // Only continue if an item was actually removed + if (itemCount == pSubmap->base.count) + return; + + PORT_FREE(pBase->map.pAllocator, pLeaf); + + pBase->itemCount--; +} + +void multimapRemoveSubmap_IMPL +( + MultimapBase *pBase, + MapBase *pSubmap +) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pBase); + NV_ASSERT_OR_RETURN_VOID(NULL != pSubmap); + NV_ASSERT_OR_RETURN_VOID(pSubmap->count == 0); + mapDestroyIntrusive_IMPL(pSubmap); + mapRemove_IMPL(&pBase->map, pSubmap); +} + +void multimapRemoveItemByKey_IMPL +( + MultimapBase *pBase, + NvU64 submapKey, + NvU64 itemKey +) +{ + void *pLeaf = multimapFindItem_IMPL(pBase, submapKey, itemKey); + if (NULL != pLeaf) + multimapRemoveItem_IMPL(pBase, pLeaf); +} + +void *multimapNextItem_IMPL(MultimapBase *pBase, void *pValue) +{ + IntrusiveMap *pSubmap; + + NV_ASSERT_OR_RETURN(NULL != pBase && NULL != pValue, NULL); + + pSubmap = (IntrusiveMap *)multimapValueToNode(pBase, pValue)->pSubmap; + NV_ASSERT_OR_RETURN(NULL != pSubmap, NULL); + + pValue = mapNext_IMPL(&pSubmap->base, pValue); + while (NULL == pValue) + { + pSubmap = (IntrusiveMap *)mapNext_IMPL(&pBase->map.base, pSubmap); + if (NULL == pSubmap) + return NULL; + + pValue = mapFindGEQ_IMPL(&pSubmap->base, 0); + } + + return pValue; +} + +void *multimapPrevItem_IMPL(MultimapBase *pBase, void *pValue) +{ + IntrusiveMap *pSubmap; + + NV_ASSERT_OR_RETURN(NULL != pBase && NULL != pValue, NULL); + + pSubmap = (IntrusiveMap *)multimapValueToNode(pBase, pValue)->pSubmap; + NV_ASSERT_OR_RETURN(NULL != pSubmap, NULL); + + pValue = mapPrev_IMPL(&pSubmap->base, pValue); + while (NULL == pValue) + { + pSubmap = (IntrusiveMap *)mapPrev_IMPL(&pBase->map.base, pSubmap); + if (NULL == pSubmap) + return NULL; + + pValue = mapFindLEQ_IMPL(&pSubmap->base, NV_U64_MAX); + } + + return pValue; +} + +void *multimapFirstItem_IMPL(MultimapBase *pBase) +{ + IntrusiveMap *pSubmap; + NV_ASSERT_OR_RETURN(NULL != pBase, NULL); + + pSubmap = mapFindGEQ_IMPL(&pBase->map.base, 0); + while (NULL != pSubmap) + { + void *pItem = mapFindGEQ_IMPL(&pSubmap->base, 0); + if (NULL != pItem) + return pItem; + + pSubmap = mapNext_IMPL(&pBase->map.base, pSubmap); + } + + return NULL; +} + +void *multimapLastItem_IMPL(MultimapBase *pBase) +{ + IntrusiveMap *pSubmap; + NV_ASSERT_OR_RETURN(NULL != pBase, NULL); + + pSubmap = mapFindLEQ_IMPL(&pBase->map.base, NV_U64_MAX); + while (NULL != pSubmap) + { + void *pItem = mapFindLEQ_IMPL(&pSubmap->base, NV_U64_MAX); + if (NULL != pItem) + return pItem; + + pSubmap = mapPrev_IMPL(&pBase->map.base, pSubmap); + } + + return NULL; +} + +MultimapIterBase multimapItemIterRange_IMPL +( + MultimapBase *pBase, + void *pFirst, + void *pLast +) +{ + MultimapIterBase it; + + portMemSet(&it, 0, sizeof(it)); + it.pMultimap = pBase; + + NV_ASSERT_OR_RETURN(NULL != pBase, it); + + if (pBase->itemCount == 0 || pFirst == NULL || pLast == NULL) + return it; + + { + MultimapNode *pFirstNode; + MultimapNode *pLastNode; + NvU64 firstKey, lastKey, firstSubmapKey, lastSubmapKey; + + pFirstNode = multimapValueToNode(pBase, pFirst); + pLastNode = multimapValueToNode(pBase, pLast); + + firstKey = pFirstNode->submapNode.key; + lastKey = pLastNode->submapNode.key; + firstSubmapKey = mapValueToNode(&pBase->map.base, pFirstNode->pSubmap)->key; + lastSubmapKey = mapValueToNode(&pBase->map.base, pLastNode->pSubmap)->key; + + NV_ASSERT(firstSubmapKey < lastSubmapKey || + (firstSubmapKey == lastSubmapKey && firstKey <= lastKey)); + } + it.pNext = pFirst; + it.pLast = pLast; + return it; +} + +NvBool multimapItemIterNext_IMPL(MultimapIterBase *pIt) +{ + NV_ASSERT_OR_RETURN(NULL != pIt, NV_FALSE); + + pIt->pValue = pIt->pNext; + + if (NULL == pIt->pNext) + return NV_FALSE; + + if (pIt->pNext == pIt->pLast) + pIt->pNext = NULL; + else + pIt->pNext = multimapNextItem_IMPL(pIt->pMultimap, pIt->pNext); + + return NV_TRUE; +} + +NvBool multimapIsValid_IMPL(void *pMap) +{ +#if NV_TYPEOF_SUPPORTED + return NV_TRUE; +#else + if (CONT_VTABLE_VALID((MultimapBase*)pMap)) + return NV_TRUE; + + NV_ASSERT_FAILED("vtable not valid!"); + CONT_VTABLE_INIT(MultimapBase, (MultimapBase*)pMap); + return NV_FALSE; +#endif +} diff --git a/src/nvidia/src/libraries/containers/queue.c b/src/nvidia/src/libraries/containers/queue.c new file mode 100644 index 0000000..3e49867 --- /dev/null +++ b/src/nvidia/src/libraries/containers/queue.c @@ -0,0 +1,303 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "containers/queue.h" + +#define MEM_WR(a, d) portMemCopy((a), sizeof(*(a)), &(d), sizeof(d)) +#define MEM_RD(v, a) portMemCopy(&(v), sizeof(v), (a), sizeof(*(a))) + +static +NV_STATUS circularQueueInitCommon +( + Queue *pQueue, + void *pData, + NvLength capacity, + PORT_MEM_ALLOCATOR *pAllocator, + NvLength msgSize +) +{ + NV_ASSERT_OR_RETURN(pQueue != NULL, NV_ERR_INVALID_ARGUMENT); + + MEM_WR(&pQueue->pData, pData); + MEM_WR(&pQueue->pAllocator, pAllocator); + PORT_MEM_WR64(&pQueue->msgSize, msgSize); + PORT_MEM_WR64(&pQueue->capacity, capacity); + PORT_MEM_WR64(&pQueue->getIdx, 0); + PORT_MEM_WR64(&pQueue->putIdx, 0); + + return NV_OK; +} + +static +NvLength queueGetCount(Queue *pQueue) +{ + NvLength get = PORT_MEM_RD64(&pQueue->getIdx); + NvLength put = PORT_MEM_RD64(&pQueue->putIdx); + + if (put >= get) + { + return put - get; + } + else + { + return put + PORT_MEM_RD64(&pQueue->capacity) - get; + } +} + +static +void managedCopyData(NvLength msgSize, + NvLength opIdx, + QueueContext *pCtx, + void *pClientData, + NvLength count, + NvBool bCopyIn) +{ + NvLength size = msgSize * count; + void *pQueueData = (NvU8 *)pCtx->pData + (opIdx * msgSize); + void *src = bCopyIn ? pClientData : pQueueData; + void *dst = bCopyIn ? pQueueData : pClientData; + + portMemCopy(dst, size, src, size); +} + +NV_STATUS circularQueueInit_IMPL +( + Queue *pQueue, + PORT_MEM_ALLOCATOR *pAllocator, + NvLength capacity, + NvLength msgSize +) +{ + void *pData = NULL; + + // One element is wasted as no separate count/full/empty state + // is kept - only indices. + // Managed queue, can hide this due to owning the buffer and + // preserve original queue semantics. + capacity += 1; + + NV_ASSERT_OR_RETURN(pAllocator != NULL, NV_ERR_INVALID_ARGUMENT); + + pData = PORT_ALLOC(pAllocator, capacity * msgSize); + if (pData == NULL) + return NV_ERR_NO_MEMORY; + + return circularQueueInitCommon(pQueue, pData, capacity, pAllocator, msgSize); +} + +NV_STATUS circularQueueInitNonManaged_IMPL +( + Queue *pQueue, + NvLength capacity, + NvLength msgSize +) +{ + return circularQueueInitCommon(pQueue, NULL /*pData*/, capacity, NULL /*pAllocator*/, msgSize); +} + +void circularQueueDestroy_IMPL(Queue *pQueue) +{ + PORT_MEM_ALLOCATOR *pAllocator; + + NV_ASSERT_OR_RETURN_VOID(NULL != pQueue); + + PORT_MEM_WR64(&pQueue->capacity, 1); + PORT_MEM_WR64(&pQueue->getIdx, 0); + PORT_MEM_WR64(&pQueue->putIdx, 0); + MEM_RD(pAllocator, &pQueue->pAllocator); + + if (pAllocator) + PORT_FREE(pQueue->pAllocator, pQueue->pData); +} + +NvLength circularQueueCapacity_IMPL(Queue *pQueue) +{ + NV_ASSERT_OR_RETURN(NULL != pQueue, 0); + + return PORT_MEM_RD64(&pQueue->capacity) - 1; +} + +NvLength circularQueueCount_IMPL(Queue *pQueue) +{ + NV_ASSERT_OR_RETURN(NULL != pQueue, 0); + + return queueGetCount(pQueue); +} + +NvBool circularQueueIsEmpty_IMPL(Queue *pQueue) +{ + NV_ASSERT_OR_RETURN(NULL != pQueue, 0); + + return queueGetCount(pQueue) == 0; +} + +NvLength circularQueuePushNonManaged_IMPL +( + Queue *pQueue, + QueueContext *pCtx, + void* pElements, + NvLength numElements +) +{ + void *src; + NvLength cntLimit = 0; + NvLength elemToCpy, srcSize; + NvLength putIdx; + NvLength msgSize; + NvLength capacity; + + NV_ASSERT_OR_RETURN(NULL != pQueue, 0); + + putIdx = PORT_MEM_RD64(&pQueue->putIdx); + msgSize = PORT_MEM_RD64(&pQueue->msgSize); + capacity = PORT_MEM_RD64(&pQueue->capacity); + + // Calculate the elements to copy + cntLimit = capacity - queueGetCount(pQueue) - 1; + if (numElements > cntLimit) + { + numElements = cntLimit; + } + + src = pElements; + if (numElements > 0) + { + NvLength remainingElemToCpy = numElements; + + // We need a max of 2 copies to take care of wrapAround case. See if we have a wrap around + if ((putIdx + numElements) > capacity) + { + // do the extra copy here + elemToCpy = capacity - putIdx; + srcSize = msgSize * elemToCpy; + + pCtx->pCopyData(msgSize, putIdx, pCtx, src, elemToCpy, NV_TRUE /*bCopyIn*/); + + // Update variables for next copy + remainingElemToCpy -= elemToCpy; + src = (void *)((NvU8 *)src + srcSize); + + putIdx = 0; + } + + NV_ASSERT(remainingElemToCpy <= capacity - putIdx); + + pCtx->pCopyData(msgSize, putIdx, pCtx, src, remainingElemToCpy, NV_TRUE /*bCopyIn*/); + + // The data must land before index update. + portAtomicMemoryFenceStore(); + PORT_MEM_WR64(&pQueue->putIdx, (putIdx + remainingElemToCpy) % capacity); + } + + return numElements; +} + +NvLength circularQueuePush_IMPL +( + Queue *pQueue, + void* pElements, + NvLength numElements +) +{ + QueueContext ctx = {0}; + + NV_ASSERT_OR_RETURN(pQueue != NULL, NV_FALSE); + NV_ASSERT_OR_RETURN(pQueue->pAllocator != NULL, NV_FALSE); + + ctx.pCopyData = managedCopyData; + ctx.pData = pQueue->pData; + + return circularQueuePushNonManaged_IMPL(pQueue, &ctx, pElements, numElements); +} + +void* circularQueuePeek_IMPL(Queue *pQueue) +{ + void *top; + + NV_ASSERT_OR_RETURN(pQueue != NULL, 0); + NV_ASSERT_OR_RETURN(pQueue->pAllocator != NULL, 0); + + if (queueGetCount(pQueue) == 0) return NULL; + top = (void*)((NvU8*)pQueue->pData + pQueue->getIdx * pQueue->msgSize); + return top; +} + +void circularQueuePop_IMPL(Queue *pQueue) +{ + NvLength getIdx; + NvLength capacity; + + NV_ASSERT_OR_RETURN_VOID(NULL != pQueue); + + getIdx = PORT_MEM_RD64(&pQueue->getIdx); + capacity = PORT_MEM_RD64(&pQueue->capacity); + + if (queueGetCount(pQueue) > 0) + { + PORT_MEM_WR64(&pQueue->getIdx, (getIdx + 1) % capacity); + } +} + +NvBool circularQueuePopAndCopyNonManaged_IMPL(Queue *pQueue, QueueContext *pCtx, void *pCopyTo) +{ + NvLength capacity; + NvLength msgSize; + + NV_ASSERT_OR_RETURN(pQueue != NULL, NV_FALSE); + + capacity = PORT_MEM_RD64(&pQueue->capacity); + msgSize = PORT_MEM_RD64(&pQueue->msgSize); + + if (queueGetCount(pQueue) > 0) + { + NvLength getIdx = PORT_MEM_RD64(&pQueue->getIdx); + pCtx->pCopyData(msgSize, getIdx, pCtx, pCopyTo, 1, NV_FALSE /*bCopyIn*/); + + // Update of index can't happen before we read all the data. + portAtomicMemoryFenceLoad(); + + PORT_MEM_WR64(&pQueue->getIdx, (getIdx + 1) % capacity); + + return NV_TRUE; + } + return NV_FALSE; +} + +NvBool circularQueuePopAndCopy_IMPL(Queue *pQueue, void *pCopyTo) +{ + QueueContext ctx = {0}; + + NV_ASSERT_OR_RETURN(pQueue != NULL, NV_FALSE); + NV_ASSERT_OR_RETURN(pQueue->pAllocator != NULL, NV_FALSE); + + ctx.pCopyData = managedCopyData; + ctx.pData = pQueue->pData; + + return circularQueuePopAndCopyNonManaged_IMPL(pQueue, &ctx, pCopyTo); +} + + +NvBool circularQueueIsValid_IMPL(void *pQueue) +{ + // No vtable for circularQueue + return NV_TRUE; +} diff --git a/src/nvidia/src/libraries/containers/ringbuf.c b/src/nvidia/src/libraries/containers/ringbuf.c new file mode 100644 index 0000000..e12c2b9 --- /dev/null +++ b/src/nvidia/src/libraries/containers/ringbuf.c @@ -0,0 +1,233 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "containers/ringbuf.h" + +/* + * @brief Initialize ring buffer given pre-allocated array for storage + * + * @param pBase + * Pointer to RingBufBase structure + * @param logSz + * Log base 2 of total size of storage, in # of elements + * @param arr + * Storage for ring buffer + * + * @returns NvBool + * NV_TRUE if unable to construct, NV_FALSE otherwise + */ +NvBool +ringbufConstruct_IMPL +( + RingBufBase *pBase, + NvU64 logSz, + void *arr +) +{ + pBase->head = 0; + pBase->tail = 0; + pBase->logSz = logSz; + pBase->arr = arr; + return pBase->arr == NULL; +} + +/* + * @brief Initialize dynamic ring buffer given a PORT_MEM_ALLOCATOR + * + * @param pBase + * Pointer to RingBufBase structure + * @param logSz + * Log base 2 of total size of storage, in # of elements + * @param eleSz + * Size per element + * @param pAlloc + * Pointer to PORT_MEM_ALLOCATOR + * + * @returns NvBool + * NV_TRUE if unable to construct, NV_FALSE otherwise + */ +NvBool +ringbufConstructDynamic_IMPL +( + RingBufBase *pBase, + NvU64 logSz, + NvU64 eleSz, + PORT_MEM_ALLOCATOR *pAlloc +) +{ + NV_ASSERT_OR_RETURN(logSz < 64, NV_TRUE); + return ringbufConstruct_IMPL(pBase, logSz, PORT_ALLOC(pAlloc, eleSz << logSz)); +} + +/* + * @brief Destroy a dynamic ring buffer + * + * @param pBase + * Pointer to RingBufBase structure + * @param pAlloc + * Pointer to PORT_MEM_ALLOCATOR used to construct Ring Buffer + * + * @returns void + */ +void +ringbufDestruct_IMPL +( + RingBufBase *pBase, + void *pAlloc +) +{ + if ((pAlloc != NULL) && (pBase->arr != NULL)) + { + PORT_FREE((PORT_MEM_ALLOCATOR *) pAlloc, pBase->arr); + pBase->arr = 0; + pBase->logSz = 0; + } +} + +/* + * @brief Peek up to *pMax elements from a ring buffer + * + * @param pBase + * Pointer to RingBufBase structure + * @param eleSz + * Size per element + * @param pMax + * Initially a pointer to number of elements to try to fetch. Returns number of elements actually gotten + * + * @returns void * + * Pointer to the start of the ringbuffer area containing pMax elements, NULL if *pMax == 0, initially or finally + */ +void * +ringbufPeekN_IMPL +( + RingBufBase *pBase, + NvU64 eleSz, + NvU64 *pMax +) +{ + NvU64 mask = RINGBUF_ARRAY_MASK(pBase); + NvU8 *ret = &pBase->arr[(pBase->tail & mask)*eleSz]; + NvU64 max = NV_ALIGN_UP64(pBase->tail + 1llu, RINGBUF_ARRAY_SIZE(pBase)); + max = max > pBase->head ? pBase->head : max; + max -= pBase->tail; + + NV_ASSERT_OR_RETURN(pMax != NULL, NULL); + + max = (max > *pMax) ? *pMax : max; + *pMax = max; + return max == 0 ? NULL : ret; +} + +/* + * @brief Pop up to *pMax elements from a ring buffer + * + * @param pBase + * Pointer to RingBufBase structure + * @param eleSz + * Size per element + * @param pMax + * Initially a pointer to number of elements to try to fetch. Returns number of elements actually gotten + * + * @returns void * + * Pointer to the start of the ringbuffer area containing pMax elements, NULL if *pMax == 0, initially or finally + */ +void * +ringbufPopN_IMPL +( + RingBufBase *pBase, + NvU64 eleSz, + NvU64 *pMax +) +{ + void *ret = ringbufPeekN_IMPL(pBase, eleSz, pMax); + + NV_CHECK_OR_RETURN(LEVEL_INFO, ret != NULL, NULL); + + pBase->tail += *pMax; + return ret; +} + +/* + * @brief Append num elements to the ringbuffer + * + * @param pBase + * Pointer to RingBufBase structure + * @param eleSz + * Size per element + * @param pEle + * Pointer to buffer with num elements + * @param num + * Number of elements + * @param bOverwrite + * Whether to overwrite existing elements in ring buffer + * + * @returns NvBool + * NV_TRUE if success, NV_FALSE otherwise + */ +NvBool +ringbufAppendN_IMPL +( + RingBufBase *pBase, + NvU64 eleSz, + NvU8 *pEle, + NvU64 num, + NvBool bOverwrite +) +{ + if ((RINGBUF_ARRAY_SIZE(pBase) - (bOverwrite ? 0llu : (pBase->head - pBase->tail))) < num) + { + return NV_FALSE; + } + + while (num != 0) + { + NvU64 mask = RINGBUF_ARRAY_MASK(pBase); + NvU8 *pPtr = &pBase->arr[(pBase->head & mask) * eleSz]; + NvU64 max = NV_ALIGN_UP64(pBase->head + 1llu, RINGBUF_ARRAY_SIZE(pBase)); + max -= pBase->head; + max = (max > num) ? num : max; + portMemCopy(pPtr, max * eleSz, pEle, max * eleSz); + pBase->head += max; + pEle = &pEle[max * eleSz]; + num -= max; + } + pBase->tail = (pBase->head - pBase->tail) > (1llu<logSz) ? (pBase->head - (1llu<logSz)) : pBase->tail; + return NV_TRUE; +} + +/* + * @brief Get current size of ring buffer + * + * @param pBase + * Pointer to RingBufBase structure + * + * @returns NvU64 + * Number of elements currently in ring buffer + */ +NvU64 +ringbufCurrentSize_IMPL +( + RingBufBase *pBase +) +{ + return pBase->head - pBase->tail; +} \ No newline at end of file diff --git a/src/nvidia/src/libraries/containers/vector.c b/src/nvidia/src/libraries/containers/vector.c new file mode 100644 index 0000000..82af7c7 --- /dev/null +++ b/src/nvidia/src/libraries/containers/vector.c @@ -0,0 +1,478 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + #include "containers/vector.h" + +CONT_VTABLE_DEFN(Vector, vectIterRange_IMPL, NULL); + +/** + * Check if the given index is contained in vector, that is if + * ((index >= 0) && (index < pVector->size)) + */ +static NvBool _vectIndexCheck(Vector *pVector, NvU32 index); + +/** + * Reallocates container. + * + * Allocate a memory of 'newSize' bytes, then copy 'copySize' bytes from the old + * vector memory to the new one. + */ +static NvBool _vectReallocHelper(Vector *pVector, NvU32 newSize, NvU32 copySize); + +NV_STATUS vectInit_IMPL +( + Vector *pVector, + PORT_MEM_ALLOCATOR *pAllocator, + NvU32 capacity, + NvU32 valueSize +) +{ + NV_ASSERT_OR_RETURN(pVector != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pAllocator != NULL, NV_ERR_INVALID_ARGUMENT); + NV_CHECKED_ONLY(pVector->versionNumber++); + + portMemSet(pVector, 0, sizeof(*pVector)); + CONT_VTABLE_INIT(Vector, pVector); + pVector->pAllocator = pAllocator; + pVector->valueSize = valueSize; + pVector->capacity = capacity; + pVector->size = 0; + + if (capacity > 0) + { + pVector->pHead = PORT_ALLOC(pVector->pAllocator, + capacity * pVector->valueSize); + if (NULL == pVector->pHead) + { + return NV_ERR_NO_MEMORY; + } + + portMemSet(pVector->pHead, 0, capacity * pVector->valueSize); + } + return NV_OK; +} + +void vectDestroy_IMPL(Vector *pVector) +{ + NV_ASSERT_OR_RETURN_VOID(pVector != NULL); + NV_CHECKED_ONLY(pVector->versionNumber++); + + if (pVector->pAllocator != NULL) + { + PORT_FREE(pVector->pAllocator, pVector->pHead); + } + pVector->pHead = NULL; + pVector->capacity = 0; + pVector->size = 0; +} + +void vectClear_IMPL(Vector *pVector) +{ + NV_ASSERT_OR_RETURN_VOID(pVector != NULL); + NV_CHECKED_ONLY(pVector->versionNumber++); + pVector->size = 0; +} + +void *vectAt_IMPL +( + Vector *pVector, + NvU32 index +) +{ + NV_ASSERT_OR_RETURN(pVector != NULL, NULL); + if (pVector->size == 0) + { + // possible for empty vectors from vectIterAll, don't assert + return NULL; + } + NV_ASSERT_OR_RETURN(_vectIndexCheck(pVector, index), NULL); + return (void *)((NvU8 *)pVector->pHead + index * pVector->valueSize); +} + +NvU32 vectCapacity_IMPL +( + Vector *pVector +) +{ + NV_ASSERT_OR_RETURN(pVector != NULL, 0); + return pVector->capacity; +} + +NvU32 vectCount_IMPL +( + Vector *pVector +) +{ + NV_ASSERT_OR_RETURN(pVector != NULL, 0); + return pVector->size; +} + +NvBool vectIsEmpty_IMPL +( + Vector *pVector +) +{ + NV_ASSERT_OR_RETURN(pVector != NULL, 0); + + return pVector->size == 0; +} + +NV_STATUS vectTrim_IMPL +( + Vector *pVector, + NvU32 n +) +{ + NV_ASSERT_OR_RETURN(pVector != NULL, NV_ERR_INVALID_ARGUMENT); + NV_CHECKED_ONLY(pVector->versionNumber++); + + if (n > pVector->capacity) + { + return NV_OK; + } + + if (n < pVector->size) + { + n = pVector->size; + } + + if (!_vectReallocHelper(pVector, + n * pVector->valueSize, + pVector->size * pVector->valueSize)) + { + return NV_ERR_NO_MEMORY; + } + return NV_OK; +} + +NV_STATUS vectReserve_IMPL +( + Vector *pVector, + NvU32 n +) +{ + NV_ASSERT_OR_RETURN(pVector != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(n > 0, NV_ERR_INVALID_ARGUMENT); + NV_CHECKED_ONLY(pVector->versionNumber++); + + if (n > pVector->capacity) + { + if (!_vectReallocHelper(pVector, + n * pVector->valueSize, + pVector->size * pVector->valueSize)) + { + return NV_ERR_NO_MEMORY; + } + } + return NV_OK; +} + +void *vectInsert_IMPL +( + Vector *pVector, + NvU32 index, + const void *pData +) +{ + void *dst; + void *src; + NvU32 i; + NV_ASSERT_OR_RETURN(pVector != NULL, NULL); + NV_CHECKED_ONLY(pVector->versionNumber++); + if (pVector->size != index) + { + NV_ASSERT_OR_RETURN(_vectIndexCheck(pVector, index), NULL); + } + if (pVector->size + 1 > pVector->capacity) + { + // resize the container by the factor of 2, newcapacity = capacity * 2 + NvU32 newCapacity = pVector->capacity == 0 ? 10 : pVector->capacity * 2; + + if (!_vectReallocHelper(pVector, + newCapacity * pVector->valueSize, + pVector->size * pVector->valueSize)) + return NULL; + } + + for (i = pVector->size; i > index; i--) + { + dst = (void *)((NvU8 *)pVector->pHead + i * pVector->valueSize); + src = (void *)((NvU8 *)pVector->pHead + (i - 1) * pVector->valueSize); + portMemCopy(dst, pVector->valueSize, src, pVector->valueSize); + } + pVector->size++; + dst = (void *)((NvU8 *)pVector->pHead + index * pVector->valueSize); + portMemCopy(dst, pVector->valueSize, pData, pVector->valueSize); + + return dst; +} + +void vectRemove_IMPL +( + Vector *pVector, + NvU32 index +) +{ + void *src; + void *dst; + NvU32 i; + NV_ASSERT_OR_RETURN_VOID(pVector != NULL); + NV_CHECKED_ONLY(pVector->versionNumber++); + NV_ASSERT_OR_RETURN_VOID(_vectIndexCheck(pVector, index)); + + for (i = index; i < pVector->size - 1; i++) + { + dst = (void *)((NvU8 *)pVector->pHead + i * pVector->valueSize); + src = (void *)((NvU8 *)pVector->pHead + (i + 1) * pVector->valueSize); + portMemCopy(dst, pVector->valueSize, src, pVector->valueSize); + } + + pVector->size--; +} + +void *vectAppend_IMPL +( + Vector *pVector, + const void *pData +) +{ + return vectInsert_IMPL(pVector, pVector->size, pData); +} + +void *vectPrepend_IMPL +( + Vector *pVector, + const void *pData +) +{ + return vectInsert_IMPL(pVector, 0, pData); +} + +VectorIterBase vectIterRange_IMPL +( + Vector *pVector, + void *pFirst, + void *pLast +) +{ + VectorIterBase it; + NvU32 first = ~0U; + NvU32 last = ~0U; + NV_ASSERT(pVector != NULL); + + if (pFirst != NULL) + { + first = (NvU32)(((NvU8 *)pFirst - (NvU8 *)pVector->pHead) / + pVector->valueSize); + } + if (pLast != NULL) + { + last = (NvU32)(((NvU8 *)pLast - (NvU8 *)pVector->pHead) / + pVector->valueSize); + } + + NV_CHECKED_ONLY(it.versionNumber = pVector->versionNumber); + NV_CHECKED_ONLY(it.bValid = NV_TRUE); + + if ((pVector->size == 0) || (pFirst == NULL) || (first >= pVector->size) || + (pLast == NULL) || (last >= pVector->size)) + { + it.pVector = pVector; + it.nextIndex = -1; + it.prevIndex = -1; + it.firstIndex = -1; + it.lastIndex = -1; + it.bForward = NV_TRUE; + it.pValue = NULL; + return it; + } + it.pVector = pVector; + it.nextIndex = first; + it.prevIndex = last; + it.firstIndex = first; + it.lastIndex = last; + it.bForward = (first <= last); + it.pValue = NULL; + return it; +} + +NvBool vectIterNext_IMPL +( + VectorIterBase *pIter, + void **ppValue +) +{ + NV_ASSERT_OR_RETURN(pIter != NULL, NV_FALSE); + NV_ASSERT_OR_RETURN(ppValue != NULL, NV_FALSE); + + if (pIter->nextIndex == -1) + { + return NV_FALSE; + } + +#if PORT_IS_CHECKED_BUILD + if (pIter->bValid && !CONT_ITER_IS_VALID(pIter->pVector, pIter)) + { + NV_ASSERT(CONT_ITER_IS_VALID(pIter->pVector, pIter)); + PORT_DUMP_STACK(); + pIter->bValid = NV_FALSE; + } +#endif + + *ppValue = (void *)((NvU8 *)pIter->pVector->pHead + + pIter->nextIndex * pIter->pVector->valueSize); + + pIter->prevIndex = pIter->bForward ? pIter->nextIndex - 1 : + pIter->nextIndex + 1; + + if (pIter->nextIndex == pIter->lastIndex) + { + pIter->nextIndex = -1; + } + else + { + pIter->nextIndex = pIter->bForward ? pIter->nextIndex + 1 : + pIter->nextIndex - 1; + } + + return NV_TRUE; +} + +NvBool vectIterPrev_IMPL +( + VectorIterBase *pIter, + void **ppValue +) +{ + NV_ASSERT_OR_RETURN(pIter != NULL, NV_FALSE); + NV_ASSERT_OR_RETURN(ppValue != NULL, NV_FALSE); + + if (pIter->prevIndex == -1) + { + return NV_FALSE; + } + +#if PORT_IS_CHECKED_BUILD + if (pIter->bValid && !CONT_ITER_IS_VALID(pIter->pVector, pIter)) + { + NV_ASSERT(CONT_ITER_IS_VALID(pIter->pVector, pIter)); + PORT_DUMP_STACK(); + pIter->bValid = NV_FALSE; + } +#endif + + *ppValue = (void *)((NvU8 *)pIter->pVector->pHead + + pIter->prevIndex * pIter->pVector->valueSize); + + pIter->nextIndex = pIter->bForward ? pIter->prevIndex + 1 : + pIter->prevIndex - 1; + + if (pIter->prevIndex == pIter->firstIndex) + { + pIter->prevIndex = -1; + } + else + { + pIter->prevIndex = pIter->bForward ? pIter->prevIndex - 1 : + pIter->prevIndex + 1; + } + + return NV_TRUE; +} + +static NvBool _vectReallocHelper +( + Vector *pVector, + NvU32 newSize, + NvU32 copySize +) +{ + void *pNewArray; + void *pCopiedArray; + + NV_ASSERT_OR_RETURN(newSize >= copySize, NV_FALSE); + + pNewArray = PORT_ALLOC(pVector->pAllocator, newSize); + if (pNewArray == NULL && newSize > 0) + { + return NV_FALSE; + } + portMemSet(pNewArray, 0, newSize); + + if (copySize > 0) + { + pCopiedArray = portMemCopy(pNewArray, newSize, + pVector->pHead, copySize); + if (NULL == pCopiedArray) + { + NV_ASSERT(pCopiedArray); + PORT_FREE(pVector->pAllocator, pNewArray); + return NV_FALSE; + } + + PORT_FREE(pVector->pAllocator, pVector->pHead); + pNewArray = pCopiedArray; + } + + pVector->pHead = pNewArray; + pVector->capacity = newSize / pVector->valueSize; + pVector->size = copySize / pVector->valueSize; + + return NV_TRUE; +} + +static NvBool _vectIndexCheck +( + Vector *pVector, + NvU32 index +) +{ + void *pActualOffset, *pLastElem; + + if (pVector->size == 0) + { + return NV_FALSE; + } + + pActualOffset = (void *)((NvU8 *)pVector->pHead + + index * pVector->valueSize); + + pLastElem = (void *)((NvU8 *)pVector->pHead + + (pVector->size - 1) * pVector->valueSize); + + return ((void *)pVector->pHead <= pActualOffset && + pActualOffset <= (void *)pLastElem); +} + +NvBool vectIsValid_IMPL(void *pVect) +{ +#if NV_TYPEOF_SUPPORTED + return NV_TRUE; +#else + if (CONT_VTABLE_VALID((Vector*)pVect)) + return NV_TRUE; + + NV_ASSERT_FAILED("vtable not valid!"); + CONT_VTABLE_INIT(Vector, (Vector*)pVect); + return NV_FALSE; +#endif +} diff --git a/src/nvidia/src/libraries/eventbuffer/eventbufferproducer.c b/src/nvidia/src/libraries/eventbuffer/eventbufferproducer.c new file mode 100644 index 0000000..b1d174a --- /dev/null +++ b/src/nvidia/src/libraries/eventbuffer/eventbufferproducer.c @@ -0,0 +1,304 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "eventbufferproducer.h" +#include "nvport/nvport.h" + +// +// This file contains generic event buffer producer implementation for adding variable length data +// +// Data format: +// +// Event Record buffer holds fixed size records +// +// |---------|---------|---------|---------|...|---------| +// | record1 | record2 | record3 | record4 |...| recordn | +// |---------|---------|---------|---------|...|---------| +// +// Variable length data buffer: +// The fixed event record can optionally contain a pointer to variable length data. +// This buffer stores the varlength data that doesn't fit in the fixed size records. +// +// |------------|--------|...|---------| +// | data2 | data4 |...| data n | +// |------------|--------|...|---------| +// + +static NV_EVENT_BUFFER_RECORD* _eventBufferGetFreeRecord(EVENT_BUFFER_PRODUCER_INFO *); +static void _eventBufferAddVardata(EVENT_BUFFER_PRODUCER_INFO*, NvP64, NvU32, NV_EVENT_BUFFER_RECORD_HEADER*); +static void _eventBufferUpdateRecordBufferCount(EVENT_BUFFER_PRODUCER_INFO*); +static void _eventBufferUpdateVarRemaingSize(EVENT_BUFFER_PRODUCER_INFO* info); + +void +eventBufferInitRecordBuffer +( + EVENT_BUFFER_PRODUCER_INFO *info, + NV_EVENT_BUFFER_HEADER* pHeader, + NvP64 recordBuffAddr, + NvU32 recordSize, + NvU32 recordCount, + NvU32 bufferSize, + NvU32 notificationThreshold +) +{ + RECORD_BUFFER_INFO* pRecordBuffer = &info->recordBuffer; + pRecordBuffer->pHeader = pHeader; + pRecordBuffer->recordBuffAddr = recordBuffAddr; + pRecordBuffer->recordSize = recordSize; + pRecordBuffer->totalRecordCount = recordCount; + pRecordBuffer->bufferSize = bufferSize; + pRecordBuffer->notificationThreshold = notificationThreshold; +} + +void +eventBufferInitVardataBuffer +( + EVENT_BUFFER_PRODUCER_INFO *info, + NvP64 vardataBuffAddr, + NvU32 bufferSize, + NvU32 notificationThreshold +) +{ + VARDATA_BUFFER_INFO* pVardataBuffer = &info->vardataBuffer; + pVardataBuffer->vardataBuffAddr = vardataBuffAddr; + pVardataBuffer->bufferSize = bufferSize; + pVardataBuffer->notificationThreshold = notificationThreshold; + pVardataBuffer->get = 0; + pVardataBuffer->put = 0; + pVardataBuffer->remainingSize = bufferSize; +} + +void +eventBufferInitNotificationHandle(EVENT_BUFFER_PRODUCER_INFO *info, NvP64 notificationHandle) +{ + info->notificationHandle = notificationHandle; +} + +void +eventBufferSetEnable(EVENT_BUFFER_PRODUCER_INFO *info, NvBool isEnabled) +{ + info->isEnabled = isEnabled; +} + +void +eventBufferSetKeepNewest(EVENT_BUFFER_PRODUCER_INFO *info,NvBool isKeepNewest) +{ + info->isKeepNewest = isKeepNewest; +} + +void +eventBufferUpdateRecordBufferGet(EVENT_BUFFER_PRODUCER_INFO *info, NvU32 get) +{ + RECORD_BUFFER_INFO* pRecordBuffer = &info->recordBuffer; + pRecordBuffer->pHeader->recordGet = get; + + // used for notification + _eventBufferUpdateRecordBufferCount(info); + + // dropCounts get reset on every updateGet call + pRecordBuffer->pHeader->recordDropcount = 0; + pRecordBuffer->pHeader->vardataDropcount = 0; + +} + +void +_eventBufferUpdateRecordBufferCount(EVENT_BUFFER_PRODUCER_INFO *info) +{ + RECORD_BUFFER_INFO* pRecordBuffer = &info->recordBuffer; + NV_EVENT_BUFFER_HEADER* pHeader = info->recordBuffer.pHeader; + + if (pHeader->recordGet <= pHeader->recordPut) + pHeader->recordCount = (pHeader->recordPut - pHeader->recordGet); + else + pHeader->recordCount = pHeader->recordPut + (pRecordBuffer->totalRecordCount - pHeader->recordGet); +} + +void +eventBufferUpdateVardataBufferGet(EVENT_BUFFER_PRODUCER_INFO *info, NvU32 get) +{ + VARDATA_BUFFER_INFO* pVardataBuffer = &info->vardataBuffer; + pVardataBuffer->get = get; + + _eventBufferUpdateVarRemaingSize(info); +} + +NvU32 +eventBufferGetRecordBufferCount(EVENT_BUFFER_PRODUCER_INFO *info) +{ + return info->recordBuffer.totalRecordCount; +} + +NvU32 +eventBufferGetVardataBufferCount(EVENT_BUFFER_PRODUCER_INFO *info) +{ + return info->vardataBuffer.bufferSize; +} + +// +// eventBufferProducerAddEvent +// +// Adds an event to an event buffer +// This function is called after acquiring correct locks (depending on which module includes it) +// and bound checks for input parameters +// eventType : for RM this would be either 2080 subdevice events or 0000 system events +// eventSubtype: optional +// payloadSize and vardataSize must be 64 bit aligned +// +void +eventBufferProducerAddEvent +( + EVENT_BUFFER_PRODUCER_INFO *info, + NvU16 eventType, + NvU16 eventSubtype, + EVENT_BUFFER_PRODUCER_DATA *pData +) +{ + NV_EVENT_BUFFER_RECORD *record; + + if (info->isEnabled) + { + record = _eventBufferGetFreeRecord(info); + if (record) + { + RECORD_BUFFER_INFO *pRecInfo = &info->recordBuffer; + NV_EVENT_BUFFER_HEADER *pHeader = pRecInfo->pHeader; + NvU32 putNext = (pHeader->recordPut + 1) % pRecInfo->totalRecordCount; + + record->recordHeader.type = eventType; + record->recordHeader.subtype = eventSubtype; + + if (pData->payloadSize) + portMemCopy(record->inlinePayload, pData->payloadSize, + NvP64_VALUE(pData->pPayload), pData->payloadSize); + + _eventBufferAddVardata(info, pData->pVardata, pData->vardataSize, &record->recordHeader); + + pHeader->recordPut = putNext; + } + } +} + +NV_EVENT_BUFFER_RECORD * +_eventBufferGetFreeRecord(EVENT_BUFFER_PRODUCER_INFO *info) +{ + RECORD_BUFFER_INFO *pRecInfo = &info->recordBuffer; + NV_EVENT_BUFFER_HEADER *pHeader = pRecInfo->pHeader; + NvU32 recordOffset = 0; + NV_EVENT_BUFFER_RECORD *pFreeRecord = NULL; + + NvU32 putNext = (pHeader->recordPut + 1) % pRecInfo->totalRecordCount; + + if ((!info->isKeepNewest) && (putNext == pHeader->recordGet)) + { + pHeader->recordDropcount++; + } + else + { + recordOffset = pHeader->recordPut * pRecInfo->recordSize; + pFreeRecord = (NV_EVENT_BUFFER_RECORD *)((NvUPtr)pRecInfo->recordBuffAddr + recordOffset); + } + return pFreeRecord; +} + +void +_eventBufferAddVardata +( + EVENT_BUFFER_PRODUCER_INFO *info, + NvP64 data, + NvU32 size, + NV_EVENT_BUFFER_RECORD_HEADER* recordHeader +) +{ + VARDATA_BUFFER_INFO *pVarInfo = &info->vardataBuffer; + NV_EVENT_BUFFER_HEADER* pHeader = info->recordBuffer.pHeader; + NvU32 pVardataOffset; + NvU32 alignedSize = NV_ALIGN_UP(size, NV_EVENT_VARDATA_GRANULARITY); + NvU32 vardataOffsetEnd = pVarInfo->put + alignedSize; + + if (vardataOffsetEnd <= pVarInfo->bufferSize) + { + if ((!info->isKeepNewest) && (pVarInfo->remainingSize < alignedSize)) + goto skip; + + pVardataOffset = pVarInfo->put; + recordHeader->varData = vardataOffsetEnd; + } + else + { + // wrap-around; the effective vardataPut=0, vardataOffsetEnd=size + vardataOffsetEnd = 0 + alignedSize; + if ((!info->isKeepNewest) && (pVarInfo->get <= vardataOffsetEnd)) + goto skip; + + recordHeader->varData = vardataOffsetEnd | NV_EVENT_VARDATA_START_OFFSET_ZERO; + pVardataOffset = 0; + } + + if(size) + { + portMemCopy((void*)((NvUPtr)pVarInfo->vardataBuffAddr + pVardataOffset), size, NvP64_VALUE(data), size); + + if (alignedSize != size) + { + pVardataOffset += size; + portMemSet((void*)((NvUPtr)pVarInfo->vardataBuffAddr + pVardataOffset), 0, (alignedSize - size)); + } + } + + pVarInfo->put = vardataOffsetEnd; + _eventBufferUpdateVarRemaingSize(info); + return; + +skip: + recordHeader->varData = pVarInfo->put; + pHeader->vardataDropcount += 1; +} + +void +_eventBufferUpdateVarRemaingSize(EVENT_BUFFER_PRODUCER_INFO* info) +{ + VARDATA_BUFFER_INFO *pVarInfo = &info->vardataBuffer; + + if (!info->isKeepNewest) + { + if (pVarInfo->get <= pVarInfo->put) + pVarInfo->remainingSize = pVarInfo->get + (pVarInfo->bufferSize - pVarInfo->put); + else + pVarInfo->remainingSize = pVarInfo->get - pVarInfo->put; + } +} + +NvBool +eventBufferIsNotifyThresholdMet(EVENT_BUFFER_PRODUCER_INFO* info) +{ + VARDATA_BUFFER_INFO *pVarInfo = &info->vardataBuffer; + RECORD_BUFFER_INFO* pRecInfo = &info->recordBuffer; + NV_EVENT_BUFFER_HEADER* pHeader = pRecInfo->pHeader; + + if (((pRecInfo->totalRecordCount - pHeader->recordCount) <= pRecInfo->notificationThreshold) || + (pVarInfo->remainingSize <= pVarInfo->notificationThreshold)) + { + return NV_TRUE; + } + return NV_FALSE; +} diff --git a/src/nvidia/src/libraries/ioaccess/ioaccess.c b/src/nvidia/src/libraries/ioaccess/ioaccess.c new file mode 100644 index 0000000..33d5bf7 --- /dev/null +++ b/src/nvidia/src/libraries/ioaccess/ioaccess.c @@ -0,0 +1,93 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "ioaccess/ioaccess.h" +#include "utils/nvprintf.h" +#include "nvport/nvport.h" + +#if !((defined(NVRM) || defined(RMCFG_FEATURE_PLATFORM_GSP)) && !defined(NVWATCH)) +/*! + * Initialize an IO_APERTURE instance. This enables initialization for derived IO_APERTURE instances + * that are not allocated via CreateIOAperture. + * + * @param[in,out] pAperture pointer to IO_APERTURE instance to be initialized. + * @param[in] pParentAperture pointer to parent of the new IO_APERTURE. + * @param[in] pDevice pointer to IO_DEVICE of the APERTURE. + * @param[in] offset offset from the parent APERTURE's baseAddress. + * @param[in] length length of the APERTURE. + * + * @return NV_OK when inputs are valid. + */ +NV_STATUS +ioaccessInitIOAperture +( + IO_APERTURE *pAperture, + IO_APERTURE *pParentAperture, + IO_DEVICE *pDevice, + NvU32 offset, + NvU32 length +) +{ + if (pAperture == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // Aperture's IO device can't be set if both the parent aperture and IO device + // input arguments are NULL. + // + if ((pDevice == NULL) && (pParentAperture == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (pDevice != NULL) + { + pAperture->pDevice = pDevice; + } + + if (pParentAperture != NULL) + { + pAperture->pDevice = pParentAperture->pDevice; + pAperture->baseAddress = pParentAperture->baseAddress; + + // Check if the child Aperture strides beyond the parent's boundary. + if ((length + offset) > pParentAperture->length) + { + NV_PRINTF(LEVEL_WARNING, + "Child aperture crosses parent's boundary, length %u offset %u, Parent's length %u\n", + length, offset, pParentAperture->length); + } + } + else + { + pAperture->baseAddress = 0; + } + + pAperture->baseAddress += offset; + pAperture->length = length; + + return NV_OK; +} +#endif // !((defined(NVRM) || defined(RMCFG_FEATURE_PLATFORM_GSP)) && !defined(NVWATCH)) diff --git a/src/nvidia/src/libraries/mapping_reuse/mapping_reuse.c b/src/nvidia/src/libraries/mapping_reuse/mapping_reuse.c new file mode 100644 index 0000000..d8d9ea8 --- /dev/null +++ b/src/nvidia/src/libraries/mapping_reuse/mapping_reuse.c @@ -0,0 +1,324 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mapping_reuse/mapping_reuse.h" + + +static NV_STATUS _reusemappingdbAddMappingCallback(void *, NvU64, NvU64, NvU64); + +/*! + * @brief Initialize the mapping reuse object + * + * @param[in] pReuseMappingDb Pointer to reuse mapping object + * @param[in] pAllocator Pointer to PORT_MEM_ALLOCATOR to use internally + * @param[in] pGlobalCtx Pointer that is passed into callbacks, is constant for all calls of callback + * @param[in] pMapCb Callback called when we need a new mapping not present in caching structures + * @param[in] pUnmapCb Callback called when refcount of a given cached mapping becomes 0 + * @param[in] pSplitCb Callback called when we need to split a given mapping due to partial map + */ +void +reusemappingdbInit +( + ReuseMappingDb *pReuseMappingDb, + PORT_MEM_ALLOCATOR *pAllocator, + void *pGlobalCtx, + ReuseMappingDbMapFunction pMapCb, + ReuseMappingDbUnnmapFunction pUnmapCb, + ReuseMappingDbSplitMappingFunction pSplitCb +) +{ + mapInitIntrusive(&(pReuseMappingDb->virtualMap)); + mapInit(&(pReuseMappingDb->allocCtxPhysicalMap), pAllocator); + pReuseMappingDb->pGlobalCtx = pGlobalCtx; + pReuseMappingDb->pAllocator = pAllocator; + pReuseMappingDb->pMapCb = pMapCb; + pReuseMappingDb->pUnmapCb = pUnmapCb; + pReuseMappingDb->pSplitCb = pSplitCb; +} + +/*! + * @brief Destroy the mapping reuse object + * + * @param[in] pReuseMappingDb Pointer to reuse mapping object + */ +void +reusemappingdbDestruct +( + ReuseMappingDb *pReuseMappingDb +) +{ + mapDestroy(&(pReuseMappingDb->virtualMap)); + mapDestroy(&(pReuseMappingDb->allocCtxPhysicalMap)); + portMemSet(pReuseMappingDb, 0, sizeof(*pReuseMappingDb)); +} + +/*! + * @brief Unmap a range returned from a previous map calll + * + * @param[in] pReuseMappingDb Pointer to reuse mapping object + * @param[in] range Range returned from previous map call. Only actually unmapped when refcnt is 0 + */ +void +reusemappingdbUnmap +( + ReuseMappingDb *pReuseMappingDb, + void *pAllocCtx, + MemoryRange range +) +{ + ReuseMappingDbEntry *pEntry = mapFindGEQ(&(pReuseMappingDb->virtualMap), range.start); + NvU64 curOffset = range.start; + NvBool bFirstRange = NV_TRUE; + + while (pEntry != NULL) + { + ReuseMappingDbEntry *pNextEntry = mapNext(&(pReuseMappingDb->virtualMap), pEntry); + NvU64 revOffset = mapKey(&(pReuseMappingDb->virtualMap), pEntry); + MemoryRange revRange = mrangeMake(revOffset, pEntry->size); + MemoryRange diffRange = mrangeMake(curOffset, revOffset - curOffset); + + // Only unmap ranges contained within the desired unmap range + if (!mrangeContains(range, revRange)) + { + if (bFirstRange) + { + break; + } + return; + } + + bFirstRange = NV_FALSE; + curOffset = mrangeLimit(revRange); + + // Unmap any partial range not tracked by data structure + if (diffRange.size != 0) + { + pReuseMappingDb->pUnmapCb(pReuseMappingDb->pGlobalCtx, pAllocCtx, diffRange); + } + + // Remove the range tracked by the data structure + pEntry->refCount--; + if (pEntry->refCount == 0) + { + // Only remove entry and unmap if refCount is 0. + void *pEntryAllocCtx = pEntry->trackingInfo.pAllocCtx; + ReuseMappingDbPhysicalMap *pPhysicalMap = mapFind(&(pReuseMappingDb->allocCtxPhysicalMap), + (NvU64) pEntryAllocCtx); + + mapRemove(&(pReuseMappingDb->virtualMap), pEntry); + mapRemove(pPhysicalMap, pEntry); + + pReuseMappingDb->pUnmapCb(pReuseMappingDb->pGlobalCtx, pEntryAllocCtx, revRange); + PORT_FREE(pReuseMappingDb->pAllocator, pEntry); + } + + pEntry = pNextEntry; + } + + // Take care of any overhang. + if (mrangeLimit(range) != curOffset) + { + MemoryRange diffRange = mrangeMake(curOffset, mrangeLimit(range) - curOffset); + pReuseMappingDb->pUnmapCb(pReuseMappingDb->pGlobalCtx, pAllocCtx, diffRange); + } +} + +typedef struct ReuseMappingDbToken +{ + ReuseMappingDbEntry *pList; + ReuseMappingDb *pDb; + NvU64 numNewEntries; +} ReuseMappingDbToken; + +// +// This callback appends a new mapping entry to the pending linked list. The caller calls this with +// an opaque token which is the head of the list as well as the physical and virtual offsets, and +// this function attaches the entry to the head of the list. +// +static NV_STATUS +_reusemappingdbAddMappingCallback +( + void *pToken, + NvU64 physicalOffset, + NvU64 virtualOffset, + NvU64 size +) +{ + ReuseMappingDbToken *pRealToken = (ReuseMappingDbToken *) pToken; + ReuseMappingDbEntry *pEntry = (ReuseMappingDbEntry *) PORT_ALLOC(pRealToken->pDb->pAllocator, + sizeof(ReuseMappingDbEntry)); + NV_ASSERT_OR_RETURN(pEntry != NULL, NV_ERR_NO_MEMORY); + + pEntry->size = size; + pEntry->refCount = 1; + pEntry->newMappingNode.pNextEntry = pRealToken->pList; + pEntry->newMappingNode.physicalOffset = physicalOffset; + pEntry->newMappingNode.virtualOffset = virtualOffset; + + pRealToken->pList = pEntry; + pRealToken->numNewEntries++; + + return NV_OK; +} + +/*! + * @brief Initialize the mapping reuse object + * + * @param[in] pReuseMappingDb Pointer to reuse mapping object + * @param[in] pAllocCtx Context for a given mapping, for this particular call, passed into the map/unmap callbacks. + This is cached with the mapping offset for reuse. + * @param[in] range Range within that allocation context to map. + * @param[out] pMemoryArea MemoryArea representing (potentially cached) ranges returned from this database + * @param[in] cachingFlags Caching flags used for this mapping (ie _SINGLE_RANGE, _NO_REUSE) + */ +NV_STATUS +reusemappingdbMap +( + ReuseMappingDb *pReuseMappingDb, + void *pAllocCtx, + MemoryRange range, + MemoryArea *pMemoryArea, + NvU64 cachingFlags +) +{ + ReuseMappingDbPhysicalMap *pPhysicalMap; + ReuseMappingDbToken token; + NvBool bNoReuse = !!(cachingFlags & REUSE_MAPPING_DB_MAP_FLAGS_NO_REUSE); + NvBool bSingleRange = !!(cachingFlags & REUSE_MAPPING_DB_MAP_FLAGS_SINGLE_RANGE); + NvBool bAddToMap = !bNoReuse; + NV_STATUS status = NV_OK; + + // TODO: Remove when we support multi-range reuse + NV_ASSERT_OR_RETURN( bSingleRange || bNoReuse, NV_ERR_NOT_SUPPORTED); + + pPhysicalMap = mapFind(&(pReuseMappingDb->allocCtxPhysicalMap), (NvU64) pAllocCtx); + + // We don't currently have any mappings for this alloc context, create new map + if (pPhysicalMap == NULL) + { + pPhysicalMap = mapInsertNew(&(pReuseMappingDb->allocCtxPhysicalMap), (NvU64) pAllocCtx); + mapInitIntrusive(pPhysicalMap); + NV_ASSERT_OR_RETURN(pPhysicalMap != NULL, NV_ERR_NO_MEMORY); + } + + if (!bNoReuse && bSingleRange) + { + ReuseMappingDbEntry *pEntry = mapFindLEQ(pPhysicalMap, range.start); + // If no range LEQ, then try GEQ + if (pEntry == NULL) + { + pEntry = mapFindGEQ(pPhysicalMap, range.start); + } + if (pEntry != NULL) + { + NvU64 physicalOffset = mapKey(pPhysicalMap, pEntry); + MemoryRange physRange = mrangeMake(physicalOffset, pEntry->size); + + // LEQ returned a resultant range before current range + if(!mrangeIntersects(physRange, range)) + { + pEntry = mapNext(pPhysicalMap, pEntry); + } + if (pEntry != NULL) + { + // We at least now have an entry thats not before the desired range. + physicalOffset = mapKey(pPhysicalMap, pEntry); + physRange = mrangeMake(physicalOffset, pEntry->size); + NvU64 virtualOffset = mapKey(&(pReuseMappingDb->virtualMap), pEntry); + + // Do another intersection check becasue the range might be after the desired range + if (mrangeIntersects(physRange, range)) + { + bAddToMap = NV_FALSE; + // Only return exact match + if (physRange.start == range.start && physRange.size == range.size) + { + pMemoryArea->numRanges = 1; + pEntry->refCount++; + pMemoryArea->pRanges = PORT_ALLOC(pReuseMappingDb->pAllocator, sizeof(MemoryRange)); + NV_ASSERT_OR_RETURN(pMemoryArea->pRanges != NULL, NV_ERR_NO_MEMORY); + pMemoryArea->pRanges[0] = mrangeMake(virtualOffset, range.size); + return NV_OK; + } + } + } + } + } + // Initialize linked list of new entries + token.numNewEntries = 0; + token.pDb = pReuseMappingDb; + token.pList = NULL; + + // Get new mappings, added to linked list + NV_ASSERT_OK_OR_GOTO(status, pReuseMappingDb->pMapCb(pReuseMappingDb->pGlobalCtx, pAllocCtx, + range, cachingFlags, &token, _reusemappingdbAddMappingCallback), err_unmap); + + pMemoryArea->pRanges = PORT_ALLOC(pReuseMappingDb->pAllocator, sizeof(MemoryRange) * token.numNewEntries); + pMemoryArea->numRanges = 0; + + NV_ASSERT_TRUE_OR_GOTO(status, pMemoryArea->pRanges != NULL, NV_ERR_NO_MEMORY, err_unmap); + + // Now append the mappings to the result memory area + while (token.pList != NULL) + { + ReuseMappingDbEntry *pEntry = token.pList; + NvU64 physicalOffset = pEntry->newMappingNode.physicalOffset; + NvU64 virtualOffset = pEntry->newMappingNode.virtualOffset; + NvU64 size = pEntry->size; + + token.pList = pEntry->newMappingNode.pNextEntry; + pMemoryArea->numRanges++; + + // + // The linked list is inserted in reverse order, so we reverse the list when inserting into + // final array. + // + pMemoryArea->pRanges[token.numNewEntries - pMemoryArea->numRanges] = mrangeMake(virtualOffset, size); + + // Add data to tracking structures only if the mapped range does not overlap + if (bAddToMap) + { + pEntry->trackingInfo.pAllocCtx = pAllocCtx; + mapInsertExisting(pPhysicalMap, physicalOffset, pEntry); + mapInsertExisting(&(pReuseMappingDb->virtualMap), virtualOffset, pEntry); + } + else + { + PORT_FREE(pReuseMappingDb->pAllocator, pEntry); + } + } + + return NV_OK; + +err_unmap: + // Unmap and free if we can't allocate the required space for the result array. + while (token.pList != NULL) + { + void *pCur = token.pList; + pReuseMappingDb->pUnmapCb(pReuseMappingDb->pGlobalCtx, pAllocCtx, + mrangeMake(token.pList->newMappingNode.virtualOffset, range.size)); + token.pList = token.pList->newMappingNode.pNextEntry; + PORT_FREE(pReuseMappingDb->pAllocator, pCur); + } + return status; +} diff --git a/src/nvidia/src/libraries/nvbitvector/nvbitvector.c b/src/nvidia/src/libraries/nvbitvector/nvbitvector.c new file mode 100644 index 0000000..e62dbbe --- /dev/null +++ b/src/nvidia/src/libraries/nvbitvector/nvbitvector.c @@ -0,0 +1,990 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "utils/nvbitvector.h" + +/** + * @brief Returns the size, in bytes, of this bitvector. + * @note due to the compiler trick of storing the last index within a + * structure pointer in the data, the minimum size of an NV_BITEVECTOR + * will be the size of one pointer on a given architecture. If the + * storage size of the underlying data is changed to something less + * than the size of a pointer on a given architecture, then two + * libraries running on different architectures transferring bitvectors + * between them may disagree on the value of the direct sizeof operator + * on a struct of an NV_BITVECTOR derivative. This version of SizeOf + * should be agreeable to all architectures, and should be used instead + * of sizeof to marshall data between libraries running on different + * architectures. + */ +NvU32 +bitVectorSizeOf_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + NV_ASSERT_OR_RETURN(NULL != pBitVector, 0); + + return NV_BITVECTOR_BYTE_SIZE(bitVectorLast); +} + +/** + * @brief Clears all flags in pBitVector. + */ +NV_STATUS +bitVectorClrAll_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + NvLength byteSize = NV_BITVECTOR_BYTE_SIZE(bitVectorLast); + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + + portMemSet(&pBitVector->qword, 0x0, byteSize); + return NV_OK; +} + +/** + * @brief Clears the flag in pBitVector according to bit index idx + */ +NV_STATUS +bitVectorClr_IMPL +( + NV_BITVECTOR *pBitVector, + NvU32 bitVectorLast, + NvU32 idx +) +{ + NvU64 *qword; + NvU32 qwordIdx = NV_BITVECTOR_IDX(idx); + NvU32 qwordOffset = NV_BITVECTOR_OFFSET(idx); + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(idx < bitVectorLast, NV_ERR_INVALID_ARGUMENT); + + qword = (NvU64 *)&pBitVector->qword; + qword[qwordIdx] &= ~NVBIT64(qwordOffset); + return NV_OK; +} + +/** + * @brief Clears all flags within a range in pBitVector + */ +NV_STATUS +bitVectorClrRange_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NV_RANGE range +) +{ + NvU64 *qword; + NvU16 idx; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(rangeContains(rangeMake(0, bitVectorLast - 1), range), + NV_ERR_INVALID_ARGUMENT); + + qword = (NvU64 *)&pBitVector->qword; + for (idx = (NvU16)range.lo; idx <= (NvU16)range.hi; ++idx) + { + if ((0 == NV_BITVECTOR_OFFSET(idx)) && + (rangeContains(range, rangeMake(idx + 63, idx + 63)))) + { + qword[NV_BITVECTOR_IDX(idx)] = 0x0; + idx += 63; + continue; + } + + status = bitVectorClr_IMPL(pBitVector, bitVectorLast, idx); + if (NV_OK != status) + { + return status; + } + } + + return status; +} + +/** + * @brief Sets all flags in pBitVector + */ +NV_STATUS +bitVectorSetAll_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + NvU64 *qword; + NvLength byteSize = NV_BITVECTOR_BYTE_SIZE(bitVectorLast); + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorLast); + NvU32 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorLast - 1); + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + + qword = (NvU64 *)&pBitVector->qword; + portMemSet(qword, NV_U8_MAX, byteSize); + qword[arraySize - 1] &= (NV_U64_MAX >> (63 - qwordOffset)); + + return NV_OK; +} + +/** + * @brief Sets the flag in pBitVector according to bit index idx + */ +NV_STATUS +bitVectorSet_IMPL +( + NV_BITVECTOR *pBitVector, + NvU32 bitVectorLast, + NvU32 idx +) +{ + NvU64 *qword; + NvU32 qwordIdx = NV_BITVECTOR_IDX(idx); + NvU32 qwordOffset = NV_BITVECTOR_OFFSET(idx); + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(idx < bitVectorLast, NV_ERR_INVALID_ARGUMENT); + + qword = (NvU64 *)&pBitVector->qword; + qword[qwordIdx] |= NVBIT64(qwordOffset); + + return NV_OK; +} + +/** + * @brief Sets all flags within a range in pBitVector + */ +NV_STATUS +bitVectorSetRange_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NV_RANGE range +) +{ + NvU64 *qword; + NvU16 idx; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(rangeContains(rangeMake(0, bitVectorLast - 1), range), + NV_ERR_INVALID_ARGUMENT); + + qword = (NvU64 *)&pBitVector->qword; + for (idx = (NvU16)range.lo; idx <= (NvU16)range.hi; ++idx) + { + if ((0 == NV_BITVECTOR_OFFSET(idx)) && + (rangeContains(range, rangeMake(idx + 63, idx + 63)))) + { + qword[NV_BITVECTOR_IDX(idx)] = (NV_U64_MAX); + idx += 63; + continue; + } + + status = bitVectorSet_IMPL(pBitVector, bitVectorLast, idx); + if (NV_OK != status) + { + return status; + } + } + + return status; +} + +/** + * @brief Toggles the flag in pBitVector according to bit index idx + */ +NV_STATUS +bitVectorInv_IMPL +( + NV_BITVECTOR *pBitVector, + NvU32 bitVectorLast, + NvU32 idx +) +{ + NvU64 *qword; + NvU32 qwordIdx = NV_BITVECTOR_IDX(idx); + NvU32 qwordOffset = NV_BITVECTOR_OFFSET(idx); + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(idx < bitVectorLast, NV_ERR_INVALID_ARGUMENT); + + qword = (NvU64 *)&pBitVector->qword; + qword[qwordIdx] ^= NVBIT64(qwordOffset); + + return NV_OK; +} + +/** + * @brief Toggles all flags within a range in pBitVector + */ +NV_STATUS +bitVectorInvRange_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NV_RANGE range +) +{ + NvU64 *qword; + NvU16 idx; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(rangeContains(rangeMake(0, bitVectorLast - 1), range), + NV_ERR_INVALID_ARGUMENT); + + qword = (NvU64 *)&pBitVector->qword; + for (idx = (NvU16)range.lo; idx <= (NvU16)range.hi; ++idx) + { + if ((0 == NV_BITVECTOR_OFFSET(idx)) && + (rangeContains(range, rangeMake(idx + 63, idx + 63)))) + { + qword[NV_BITVECTOR_IDX(idx)] = ~qword[NV_BITVECTOR_IDX(idx)]; + idx += 63; + continue; + } + + status = bitVectorInv_IMPL(pBitVector, bitVectorLast, idx); + if (NV_OK != status) + { + return status; + } + } + + return status; +} + +/** + * @brief Initializes a NV_BITVECTOR with the bit indices contained within + * pIndices set. + */ +NV_STATUS +bitVectorFromArrayU16_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NvU16 *pIndices, + NvU32 indicesSize +) +{ + NV_STATUS status = NV_OK; + NvU32 i; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pIndices, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(0 != indicesSize, NV_ERR_INVALID_ARGUMENT); + + status = bitVectorClrAll_IMPL(pBitVector, bitVectorLast); + if (NV_OK != status) + { + return status; + } + + for (i = 0; i < indicesSize; ++i) + { + status = bitVectorSet_IMPL(pBitVector, bitVectorLast, pIndices[i]); + if (NV_OK != status) + { + return status; + } + } + + return status; +} + +/** + * @brief Checks if all flags in pBitVector are set + */ +NvBool +bitVectorTestAllSet_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + const NvU64 *qword; + NvU32 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorLast); + NvU32 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorLast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_FALSE); + + qword = (const NvU64 *)&pBitVector->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + if (mask != (qword[idx] & mask)) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + +/** + * @brief Checks if all flags in pBitVector are cleared + */ +NvBool +bitVectorTestAllCleared_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + const NvU64 *qword; + NvU32 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorLast); + NvU32 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorLast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_FALSE); + + qword = (const NvU64 *)&pBitVector->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + if (0x0 != (qword[idx] & mask)) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + +/** + * @brief Checks if two bitVectors are equivalent + */ +NvBool +bitVectorTestEqual_IMPL +( + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +) +{ + const NvU64 *qwordA; + const NvU64 *qwordB; + NvU32 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorALast); + NvU32 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorALast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVectorA, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorB, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((bitVectorALast == bitVectorBLast), NV_ERR_INVALID_ARGUMENT); + + qwordA = (const NvU64 *)&pBitVectorA->qword; + qwordB = (const NvU64 *)&pBitVectorB->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + if ((qwordA[idx] & mask) != (qwordB[idx] & mask)) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + +/** + * @brief Checks if the set of set flags in bitVectorA is a subset of the set of + * set flags in bitVectorB. + */ +NvBool +bitVectorTestIsSubset_IMPL +( + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +) +{ + const NvU64 *qwordA; + const NvU64 *qwordB; + NvU32 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorALast); + NvU32 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorALast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVectorA, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorB, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((bitVectorALast == bitVectorBLast), NV_ERR_INVALID_ARGUMENT); + + qwordA = (const NvU64 *)&pBitVectorA->qword; + qwordB = (const NvU64 *)&pBitVectorB->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + if (((qwordA[idx] & mask) & (qwordB[idx] & mask)) != (qwordA[idx] & mask)) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + +/** + * @brief Checks if the flag according to bit index idx in pBitVector is set + */ +NvBool +bitVectorTest_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU32 bitVectorLast, + NvU32 idx +) +{ + const NvU64 *qword; + NvU32 qwordIdx = NV_BITVECTOR_IDX(idx); + NvU32 qwordOffset = NV_BITVECTOR_OFFSET(idx); + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_FALSE); + NV_ASSERT_OR_RETURN(idx < bitVectorLast, NV_FALSE); + + qword = (const NvU64 *)&pBitVector->qword; + return !!(qword[qwordIdx] & NVBIT64(qwordOffset)); +} + +/** + * @brief Computes the intersection of flags in pBitVectorA and pBitVectorB, and + * stores the result in pBitVectorDst + * + * @param[out] pBitVectorDst Destination + * @param[in] pBitVectorA First operand + * @param[in] pBitVectorB Second operand + * + * @note it is valid for the same bitVector to be both destination and operand + * for this operation + */ +NV_STATUS +bitVectorAnd_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +) +{ + NvU64 *qwordDst; + const NvU64 *qwordA; + const NvU64 *qwordB; + NvU32 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorDstLast); + NvU32 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorDstLast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVectorDst, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorA, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorB, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(((bitVectorDstLast == bitVectorALast) && (bitVectorALast == + bitVectorBLast)), NV_ERR_INVALID_ARGUMENT); + + qwordDst = (NvU64 *)&pBitVectorDst->qword; + qwordA = (const NvU64 *)&pBitVectorA->qword; + qwordB = (const NvU64 *)&pBitVectorB->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + qwordDst[idx] = (qwordA[idx] & qwordB[idx]) & mask; + } + + return NV_OK; +} + +/** + * @brief Computes the union of flags in pBitVectorA and pBitVectorB, and stores + * the result in pBitVectorDst + * + * @param[out] pBitVectorDst Destination + * @param[in] pBitVectorA First operand + * @param[in] pBitVectorB Second operand + * + * @note it is valid for the same bitVector to be both destination and operand + * for this operation + */ +NV_STATUS +bitVectorOr_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +) +{ + NvU64 *qwordDst; + const NvU64 *qwordA; + const NvU64 *qwordB; + NvU32 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorDstLast); + NvU32 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorDstLast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVectorDst, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorA, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorB, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(((bitVectorDstLast == bitVectorALast) && (bitVectorALast == + bitVectorBLast)), NV_ERR_INVALID_ARGUMENT); + + qwordDst = (NvU64 *)&pBitVectorDst->qword; + qwordA = (const NvU64 *)&pBitVectorA->qword; + qwordB = (const NvU64 *)&pBitVectorB->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + qwordDst[idx] = (qwordA[idx] | qwordB[idx]) & mask; + } + + return NV_OK; +} + +/** + * @brief Computes the exclusive OR of flags in pBitVectorA and pBitVectorB, and stores + * the result in pBitVectorDst + * + * @param[out] pBitVectorDst Destination + * @param[in] pBitVectorA First operand + * @param[in] pBitVectorB Second operand + * + * @note it is valid for the same bitVector to be both destination and operand + * for this operation + */ +NV_STATUS +bitVectorXor_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +) +{ + NvU64 *qwordDst; + const NvU64 *qwordA; + const NvU64 *qwordB; + NvU32 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorDstLast); + NvU32 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorDstLast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVectorDst, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorA, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorB, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(((bitVectorDstLast == bitVectorALast) && (bitVectorALast == + bitVectorBLast)), NV_ERR_INVALID_ARGUMENT); + + qwordDst = (NvU64 *)&pBitVectorDst->qword; + qwordA = (const NvU64 *)&pBitVectorA->qword; + qwordB = (const NvU64 *)&pBitVectorB->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + qwordDst[idx] = (qwordA[idx] ^ qwordB[idx]) & mask; + } + + return NV_OK; +} + +/** + * @brief Causes the set of raised flags in pBitVectorDst to be equal to the + * complement of the set of raised flags in pBitVectorSrc. + * + * @param[out] pBitVectorDst Destination + * @param[in] pBitVectorSrc Source + * + * @note it is valid for the same bitVector to be both destination and + * source for this operation + */ +NV_STATUS +bitVectorComplement_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorSrc, + NvU16 bitVectorSrcLast +) +{ + NvU64 *qwordDst; + const NvU64 *qwordSrc; + NvU32 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorDstLast); + NvU32 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorDstLast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVectorDst, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorSrc, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(((bitVectorDstLast == bitVectorSrcLast)), NV_ERR_INVALID_ARGUMENT); + + qwordDst = (NvU64 *)&pBitVectorDst->qword; + qwordSrc = (const NvU64 *)&pBitVectorSrc->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + qwordDst[idx] = (~qwordSrc[idx]) & mask; + } + + return NV_OK; +} + +/** + * @brief Causes the set of raised flags in pBitVectorDst to be equal to the set + * of raised flags in pBitVectorSrc. + * + * @param[out] pBitVectorDst Destination + * @param[in] pBitVectorSrc Source + * + * @note it is \b invalid for the same bitVector to be both destination and + * source for this operation + */ +NV_STATUS +bitVectorCopy_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorSrc, + NvU16 bitVectorSrcLast +) +{ + NvLength byteSizeDst = NV_BITVECTOR_BYTE_SIZE(bitVectorDstLast); + NvLength byteSizeSrc = NV_BITVECTOR_BYTE_SIZE(bitVectorSrcLast); + + NV_ASSERT_OR_RETURN(NULL != pBitVectorDst, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorSrc, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(bitVectorDstLast == bitVectorSrcLast, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pBitVectorDst != pBitVectorSrc, NV_WARN_NOTHING_TO_DO); + + portMemCopy(&pBitVectorDst->qword, byteSizeDst, &pBitVectorSrc->qword, byteSizeSrc); + return NV_OK; +} + +/** + * @brief Returns the bit index of the first set flag in pBitVector. + * + * @note in the absence of set flags in pBitVector, the index of the first + * invalid flag is returned. + */ +NvU32 +bitVectorCountTrailingZeros_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + const NvU64 *qword; + NvU32 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorLast); + NvU32 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorLast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, 0); + + qword = (const NvU64 *)&pBitVector->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + if (0x0 != (qword[idx] & mask)) + { + return ((idx * (sizeof(NvU64) * 8)) + + portUtilCountTrailingZeros64(qword[idx] & mask)); + } + } + + return bitVectorLast; +} + +/** + * @brief Returns the bit index of the last set flag in pBitVector. + * + * @note in the absence of set flags in pBitVector, the index of the first + * invalid flag is returned. + */ +NvU32 +bitVectorCountLeadingZeros_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + const NvU64 *qword; + NvU32 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorLast); + NvU32 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorLast - 1); + NvU32 qwordUnused = 63 - qwordOffset; + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, 0); + + qword = (const NvU64 *)&pBitVector->qword; + for (idx = (arraySize - 1); idx != (NvU32)-1; idx--) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + if (0x0 != qword[idx]) + { + // + // We're counting from the MSB, and we have to subtract the unused + // portion of the bitvector from the output + // + return (((arraySize - idx - 1) * (sizeof(NvU64) * 8)) + + portUtilCountLeadingZeros64(qword[idx] & mask)) - + qwordUnused; + } + } + + return bitVectorLast; +} + +/** + * @brief Returns the number of set bits in the bitvector. + */ +NvU32 +bitVectorCountSetBits_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + const NvU64 *qword; + NvU32 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorLast); + NvU32 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorLast - 1); + NvU64 mask; + NvU32 count; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, 0); + + count = 0; + qword = (const NvU64 *)&pBitVector->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + count += nvPopCount64(qword[idx] & mask); + } + + return count; +} + +/** + * @brief Exports the bitVector data to an NvU64 raw bitmask array. + */ +NV_STATUS +bitVectorToRaw_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + void *pRawMask, + NvU32 rawMaskSize +) +{ + const NvLength byteSize = NV_BITVECTOR_BYTE_SIZE(bitVectorLast); + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pRawMask, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(rawMaskSize >= byteSize, NV_ERR_BUFFER_TOO_SMALL); + + portMemCopy(pRawMask, byteSize, &pBitVector->qword, byteSize); + return NV_OK; +} + +/** + * @brief Imports the bitVector data from an Nvu64 raw bitmask array. + */ +NV_STATUS +bitVectorFromRaw_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + const void *pRawMask, + NvU32 rawMaskSize +) +{ + const NvLength byteSize = NV_BITVECTOR_BYTE_SIZE(bitVectorLast); + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pRawMask, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(rawMaskSize >= byteSize, NV_ERR_BUFFER_TOO_SMALL); + + portMemCopy(&pBitVector->qword, byteSize, pRawMask, byteSize); + return NV_OK; +} + + +/** + * @brief Gets slice for a range within pBitVector + * + * @note range length must be <=64, so the output slice can fit in a NvU64 + */ +NV_STATUS +bitVectorGetSlice_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NV_RANGE range, + NvU64 *slice +) +{ + NvU64 *qword; + NvU64 temp; + NvU64 offsetLo = NV_BITVECTOR_OFFSET(range.lo); + NvU64 offsetHi = NV_BITVECTOR_OFFSET(range.hi); + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(pBitVector != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(rangeLength(range) <= 8 * sizeof(NvU64), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(range.hi != NV_U64_MAX, NV_ERR_INVALID_ARGUMENT); //detect underflow + NV_ASSERT_OR_RETURN(rangeContains(rangeMake(0, bitVectorLast - 1), range), + NV_ERR_INVALID_ARGUMENT); + + qword = (NvU64 *)&pBitVector->qword; + + if(NV_BITVECTOR_IDX(range.lo) == NV_BITVECTOR_IDX(range.hi)) + { + // range fits within a single qword index + temp = qword[NV_BITVECTOR_IDX(range.lo)]; + temp &= DRF_SHIFTMASK64(offsetHi : offsetLo); + temp >>= offsetLo; + *slice = temp; + } + else + { + // range spreads across 2 qword indexes + NV_ASSERT_OR_RETURN(NV_BITVECTOR_IDX(range.lo) == NV_BITVECTOR_IDX(range.hi) - 1, + NV_ERR_INVALID_ARGUMENT); + + temp = qword[NV_BITVECTOR_IDX(range.lo)]; + temp &= DRF_SHIFTMASK64(63 : offsetLo); + temp >>= offsetLo; + *slice = temp; + + temp = qword[NV_BITVECTOR_IDX(range.hi)]; + temp &= DRF_SHIFTMASK64(offsetHi : 0); + temp <<= 64 - offsetLo; + *slice |= temp; + } + + return status; +} + +/** + * @brief Causes the least significant N raised bits in pBitVectorSrc to be + * raised in pBitVectorDst. + * + * @param[out] pBitVectorDst Destination + * @param[in] pBitVectorSrc Source + * @param[in] n Count of bits to copy + * + * @note it is invalid for the same bitvector to be both dest and source + * @note n cannot be larger than the size of the bitvector + */ +NV_STATUS +bitVectorLowestNBits_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorSrc, + NvU16 bitVectorSrcLast, + NvU16 n +) +{ + NvU64 *qwordDst; + const NvU64 *qwordSrc; + NvU32 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorSrcLast); + NvU32 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorSrcLast - 1); + NvU64 mask; + NvU16 count; + + NV_ASSERT_OR_RETURN(NULL != pBitVectorSrc, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorDst, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((bitVectorSrcLast == bitVectorDstLast), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pBitVectorDst != pBitVectorSrc, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(n < bitVectorSrcLast, NV_ERR_INVALID_ARGUMENT); + + if (n == bitVectorSrcLast) + return bitVectorCopy_IMPL(pBitVectorDst, bitVectorDstLast, pBitVectorSrc, bitVectorSrcLast); + + bitVectorClrAll_IMPL(pBitVectorDst, bitVectorDstLast); + + if (n == 0) + return NV_OK; + + count = 0; + qwordSrc = (const NvU64 *)&pBitVectorSrc->qword; + qwordDst = (NvU64 *)&pBitVectorDst->qword; + for (idx = 0; idx < arraySize; idx++) + { + NvU64 bit; + + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + FOR_EACH_INDEX_IN_MASK(64, bit, qwordSrc[idx] & mask) + { + qwordDst[idx] |= NVBIT64(bit); + + count++; + if (count == n) + return NV_OK; + } + FOR_EACH_INDEX_IN_MASK_END; + } + + return NV_OK; +} + diff --git a/src/nvidia/src/libraries/nvoc/src/runtime.c b/src/nvidia/src/libraries/nvoc/src/runtime.c new file mode 100644 index 0000000..739c106 --- /dev/null +++ b/src/nvidia/src/libraries/nvoc/src/runtime.c @@ -0,0 +1,370 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvport/nvport.h" + +#include "nvtypes.h" + +#include "nvoc/rtti.h" +#include "nvoc/runtime.h" + +#include "nvoc/object.h" + +# include "utils/nvassert.h" + + +static NV_FORCEINLINE Dynamic *__nvoc_fullyDerive_IMPL(Dynamic *pDynamic) +{ + return (Dynamic*)((NvU8*)pDynamic - pDynamic->__nvoc_rtti->offset); +} + +Dynamic *fullyDeriveWrapper(Dynamic *pDynamic) +{ + return __nvoc_fullyDerive_IMPL(pDynamic); +} + +const struct NVOC_RTTI_PROVIDER __nvoc_rtti_provider = { 0 }; + +NVOC_CLASS_ID __nvoc_objGetClassId(Dynamic *pObj) +{ + Dynamic *pDerivedObj = __nvoc_fullyDerive(pObj); + return pDerivedObj->__nvoc_rtti->pClassDef->classInfo.classId; +} + +const NVOC_CLASS_INFO *__nvoc_objGetClassInfo(Dynamic *pObj) +{ + Dynamic *pDerivedObj = __nvoc_fullyDerive(pObj); + return &pDerivedObj->__nvoc_rtti->pClassDef->classInfo; +} + +Dynamic *objFindAncestor_IMPL(Dynamic *pDynamic, NVOC_CLASS_ID classId) +{ + Object *pObj = dynamicCast(pDynamic, Object); + NV_ASSERT(pObj != NULL); + + while ((pObj = pObj->pParent) != NULL) + { + if (objDynamicCastById(pObj, classId) != NULL) return __nvoc_fullyDerive(pObj); + } + + NV_ASSERT(0); + return NULL; +} + +void objAddChild_IMPL(Object *pObj, Object *pChild) +{ + NV_ASSERT(pChild->pParent == NULL); + +#if defined(DEBUG) + if (pChild->createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + { + // + // For objects constructed in place it is possible to call objCreate() twice without calling objDelete() + // This results in a loop in parent's child list, making it endless + // This check is supposed to make catching this issue easier without affecting perf + // + Object *pCurrentChild = pObj->childTree.pChild; + + while (pCurrentChild != NULL) + { + if (pCurrentChild == pChild) + { +#if NV_PRINTF_STRINGS_ALLOWED + portDbgPrintf("NVOC: %s: class %s called in-place objCreate() twice without calling objDelete()", + __FUNCTION__, + objGetClassInfo(pChild)->name); +#endif // NV_PRINTF_STRINGS_ALLOWED + PORT_BREAKPOINT_DEBUG(); + } + + pCurrentChild = pCurrentChild->childTree.pSibling; + } + } +#endif // defined(DEBUG) + + pChild->pParent = pObj; + pChild->childTree.pSibling = pObj->childTree.pChild; + pObj->childTree.pChild = pChild; +} + +void objRemoveChild_IMPL(Object *pObj, Object *pChild) +{ + Object **ppChild; + + NV_ASSERT(pObj == pChild->pParent); + pChild->pParent = NULL; + ppChild = &pObj->childTree.pChild; + while (*ppChild != NULL) + { + if (*ppChild == pChild) + { + *ppChild = pChild->childTree.pSibling; + return; + } + + ppChild = &(*ppChild)->childTree.pSibling; + } +} + +Object *objGetChild_IMPL(Object *pObj) +{ + NV_ASSERT(pObj != NULL); + return pObj->childTree.pChild; +} + +Object *objGetSibling_IMPL(Object *pObj) +{ + NV_ASSERT(pObj != NULL); + return pObj->childTree.pSibling; +} + +Object *objGetDirectParent_IMPL(Object *pObj) +{ + NV_ASSERT(pObj != NULL); + return pObj->pParent; +} + +NV_STATUS __nvoc_handleObjCreateMemAlloc(NvU32 createFlags, NvU32 allocSize, void **ppLocalPtr, void **ppThis) +{ + if (allocSize == 0 || ppThis == NULL || ppLocalPtr == NULL) + return NV_ERR_INVALID_PARAMETER; + + if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT) + { + *ppLocalPtr = *ppThis; + } + else + { + *ppLocalPtr = portMemAllocNonPaged(allocSize); + if (*ppLocalPtr == NULL) + return NV_ERR_NO_MEMORY; + } + + return NV_OK; +} + +//! Internal backing method for objDelete. +void __nvoc_objDelete(Dynamic *pDynamic) +{ + Dynamic *pDerivedObj; + Object *pObj, *pChild; + + if (pDynamic == NULL) + { + return; + } + // objCreate might be skipped for NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT objects. + // In result, objDelete will be called on zeroed objects, skip their destruction. + if (pDynamic->__nvoc_rtti == NULL) + { + return; + } + + __nvoc_destructFromBase(pDynamic); + + pObj = dynamicCast(pDynamic, Object); + if (pObj->pParent != NULL) + { + objRemoveChild(pObj->pParent, pObj); + } + + if ((pChild = objGetChild(pObj)) != NULL) + { +#if NV_PRINTF_STRINGS_ALLOWED + portDbgPrintf("NVOC: %s: Child class %s not freed from parent class %s.", + __FUNCTION__, + objGetClassInfo(pChild)->name, + objGetClassInfo(pObj)->name); +#endif + PORT_BREAKPOINT_CHECKED(); + } + + pDerivedObj = __nvoc_fullyDerive(pDynamic); + if (!(pObj->createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT)) + portMemFree(pDerivedObj); +} + +//! Fill out an object's RTTI pointers from a class definition. +//! This function is not needed for metadata v2 (and after). +//! The linker discards this function if all NVOC objects use v2 or later. +//! The prototype is needed to suppress GCC's `missing-prototypes` warning. +void __nvoc_initRtti(Dynamic *pNewObject, const struct NVOC_CLASS_DEF *pClassDef); +void __nvoc_initRtti(Dynamic *pNewObject, const struct NVOC_CLASS_DEF *pClassDef) +{ + NvU32 relativeIdx; + for (relativeIdx = 0; relativeIdx < pClassDef->pCastInfo->numRelatives; relativeIdx++) + { + const struct NVOC_RTTI *pRelative = pClassDef->pCastInfo->relatives[relativeIdx]; + const struct NVOC_RTTI **ppRelativeRtti = &((Dynamic*)((NvU8*)pNewObject + pRelative->offset))->__nvoc_rtti; + *ppRelativeRtti = pRelative; + } +} + + +//! Internal backing method for objCreateDynamic. +NV_STATUS __nvoc_objCreateDynamic( + Dynamic **ppNewObject, + Dynamic *pParent, + const NVOC_CLASS_INFO *pClassInfo, + NvU32 createFlags, + ...) +{ + NV_STATUS status; + va_list args; + + const struct NVOC_CLASS_DEF *pClassDef = + (const struct NVOC_CLASS_DEF*)pClassInfo; + + if (pClassDef == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + else if (pClassDef->objCreatefn == NULL) + { + return NV_ERR_INVALID_CLASS; + } + + va_start(args, createFlags); + status = pClassDef->objCreatefn(ppNewObject, pParent, createFlags, args); + va_end(args); + + return status; +} + +Dynamic *objDynamicCastById_IMPL(Dynamic *pFromObj, NVOC_CLASS_ID classId) +{ + NvU32 i, numBases; + Dynamic *pDerivedObj; + + const struct NVOC_RTTI *const *bases; + const struct NVOC_RTTI *pFromRtti; + const struct NVOC_RTTI *pDerivedRtti; + + if (pFromObj == NULL) + { + return NULL; + } + + pFromRtti = pFromObj->__nvoc_rtti; + + // fastpath, we're dynamic casting to what we already have + if (classId == pFromRtti->pClassDef->classInfo.classId) + { + return pFromObj; + } + + pDerivedObj = __nvoc_fullyDerive(pFromObj); + pDerivedRtti = pDerivedObj->__nvoc_rtti; + + // fastpath, we're dynamic casting to the fully derived class + if (classId == pDerivedRtti->pClassDef->classInfo.classId) + { + return pDerivedObj; + } + + // slowpath, search all the possibilities for a match + numBases = pDerivedRtti->pClassDef->pCastInfo->numRelatives; + bases = pDerivedRtti->pClassDef->pCastInfo->relatives; + + for (i = 0; i < numBases; i++) + { + if (classId == bases[i]->pClassDef->classInfo.classId) + { + return (Dynamic*)((NvU8*)pDerivedObj + bases[i]->offset); + } + } + + return NULL; +} + +//! Internal backing method for dynamicCast. +Dynamic *__nvoc_dynamicCast(Dynamic *pFromObj, const NVOC_CLASS_INFO *pClassInfo) +{ + return objDynamicCastById(pFromObj, pClassInfo->classId); +} + +/*! + * @brief Internal dummy destructor for non-fully-derived pointers. + * + * Resolves pDynamic to its most derived pointer and then calls the real + * destructor on the fully-derived object. + */ +void __nvoc_destructFromBase(Dynamic *pDynamic) +{ + Dynamic *pDerivedObj = __nvoc_fullyDerive(pDynamic); + pDerivedObj->__nvoc_rtti->dtor(pDerivedObj); +} + +const struct NVOC_EXPORTED_METHOD_DEF* nvocGetExportedMethodDefFromMethodInfo_IMPL(const struct NVOC_EXPORT_INFO *pExportInfo, NvU32 methodId) +{ + NvU32 exportLength; + const struct NVOC_EXPORTED_METHOD_DEF *exportArray; + + if (pExportInfo == NULL) + return NULL; + + exportLength = pExportInfo->numEntries; + exportArray = pExportInfo->pExportEntries; + + if (exportArray != NULL && exportLength > 0) + { + // The export array is sorted by methodId, so we can binary search it + NvU32 low = 0; + NvU32 high = exportLength; + while (1) + { + NvU32 mid = (low + high) / 2; + + if (exportArray[mid].methodId == methodId) + return &exportArray[mid]; + + if (high == mid || low == mid) + break; + + if (exportArray[mid].methodId > methodId) + high = mid; + else + low = mid; + } + } + + return NULL; +} + +const struct NVOC_EXPORTED_METHOD_DEF *objGetExportedMethodDef_IMPL(Dynamic *pObj, NvU32 methodId) +{ + const struct NVOC_CASTINFO *const pCastInfo = pObj->__nvoc_rtti->pClassDef->pCastInfo; + const NvU32 numRelatives = pCastInfo->numRelatives; + const struct NVOC_RTTI *const *relatives = pCastInfo->relatives; + NvU32 i; + + for (i = 0; i < numRelatives; i++) + { + const void *pDef = nvocGetExportedMethodDefFromMethodInfo_IMPL(relatives[i]->pClassDef->pExportInfo, methodId); + if (pDef != NULL) + return pDef; + } + + return NULL; +} + diff --git a/src/nvidia/src/libraries/nvport/core/core.c b/src/nvidia/src/libraries/nvport/core/core.c new file mode 100644 index 0000000..7b79534 --- /dev/null +++ b/src/nvidia/src/libraries/nvport/core/core.c @@ -0,0 +1,98 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "nvport/nvport.h" + +typedef struct _PORT_STATE +{ + NvU32 initCount; +} PORT_STATE; +static PORT_STATE portState; + +// RISC-V implementation of atomics requires initialization +// Disable initCount atomic operations for RISC-V builds +#if PORT_IS_MODULE_SUPPORTED(atomic) && !NVCPU_IS_RISCV64 +#define PORT_DEC(x) portAtomicDecrementS32((volatile NvS32 *)&x) +#define PORT_INC(x) portAtomicIncrementS32((volatile NvS32 *)&x) +#else +#define PORT_DEC(x) --x +#define PORT_INC(x) ++x +#endif + + +/// @todo Add better way to initialize all modules +NV_STATUS portInitialize(void) +{ + if (PORT_INC(portState.initCount) == 1) + { +#if PORT_IS_MODULE_SUPPORTED(debug) + portDbgInitialize(); +#endif +#if PORT_IS_MODULE_SUPPORTED(atomic) + portAtomicInit(); +#endif +#if PORT_IS_MODULE_SUPPORTED(sync) + portSyncInitialize(); +#endif +#if PORT_IS_MODULE_SUPPORTED(memory) + portMemInitialize(); +#endif +#if PORT_IS_MODULE_SUPPORTED(crypto) + portCryptoInitialize(); +#endif +#if PORT_IS_MODULE_SUPPORTED(cpu) + portCpuInitialize(); +#endif + } + return NV_OK; +} + +void portShutdown(void) +{ + if (PORT_DEC(portState.initCount) == 0) + { +#if PORT_IS_MODULE_SUPPORTED(cpu) + portCpuShutdown(); +#endif +#if PORT_IS_MODULE_SUPPORTED(crypto) + portCryptoShutdown(); +#endif +#if PORT_IS_MODULE_SUPPORTED(memory) +#if (!defined(DEBUG) || defined(NV_MODS)) && !NVCPU_IS_RISCV64 + portMemShutdown(NV_TRUE); +#else + portMemShutdown(NV_FALSE); +#endif +#endif +#if PORT_IS_MODULE_SUPPORTED(sync) + portSyncShutdown(); +#endif +#if PORT_IS_MODULE_SUPPORTED(debug) + portDbgShutdown(); +#endif + } +} + +NvBool portIsInitialized(void) +{ + return portState.initCount > 0; +} diff --git a/src/nvidia/src/libraries/nvport/cpu/cpu_common.c b/src/nvidia/src/libraries/nvport/cpu/cpu_common.c new file mode 100644 index 0000000..0d9c9f1 --- /dev/null +++ b/src/nvidia/src/libraries/nvport/cpu/cpu_common.c @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief CPU module function implementations which are shared across platforms + * + */ + +#include "nvport/nvport.h" +#include "cpu_common.h" + +void +portCpuInitialize(void) +{ + PORT_CPU_SET_IMC_BAR_DESC_INIT_STATE(NV_FALSE); +} + +void +portCpuShutdown(void) +{ + // + // Not returning status to the caller since that seems like a norm in nvort + // for init and shutdown functions + // + if (PORT_CPU_GET_IMC_BAR_DESC_INIT_STATE() == NV_TRUE) + { + // + // If PORT_CPU_GET_IMC_BAR_DESC_INIT_STATE is true then + // portCpuExFreeImcBarDesc will be supported. Adding following check + // to avoid compile time issues + // + #if PORT_IS_FUNC_SUPPORTED(portCpuExFreeImcBarDesc) + if (portCpuExFreeImcBarDesc(PORT_CPU_GET_IMC_BAR_DESC()) != NV_OK) + { + PORT_BREAKPOINT_DEBUG(); + } + #endif + } + PORT_CPU_SET_IMC_BAR_DESC_INIT_STATE(NV_FALSE); +} diff --git a/src/nvidia/src/libraries/nvport/cpu/cpu_common.h b/src/nvidia/src/libraries/nvport/cpu/cpu_common.h new file mode 100644 index 0000000..a9c7ee3 --- /dev/null +++ b/src/nvidia/src/libraries/nvport/cpu/cpu_common.h @@ -0,0 +1,54 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief CPU module private defines/interfaces + */ + +#ifndef _NVPORT_CPU_COMMON_H_ +#define _NVPORT_CPU_COMMON_H_ + +#include "nvport/nvport.h" + +// +// Structure representing internal state for CPU +// +typedef struct PORT_CPU_STATE +{ + // BAR descriptor for Integrated Memory controller + PORT_CPU_BAR_DESC imcBarDesc; + + // If init for IMC BAR descriptor is done + NvBool bImcBarDescInit; +} PORT_CPU_STATE; + +PORT_CPU_STATE gCpuPortState; + +#define PORT_CPU_GET_IMC_BAR_DESC() (&(gCpuPortState.imcBarDesc)) + +#define PORT_CPU_GET_IMC_BAR_DESC_INIT_STATE() (gCpuPortState.bImcBarDescInit) + +#define PORT_CPU_SET_IMC_BAR_DESC_INIT_STATE(state) (gCpuPortState.bImcBarDescInit = state) +#endif // _NVPORT_CPU_COMMON_H_ +/// @} diff --git a/src/nvidia/src/libraries/nvport/crypto/crypto_random_xorshift.c b/src/nvidia/src/libraries/nvport/crypto/crypto_random_xorshift.c new file mode 100644 index 0000000..f2d34cc --- /dev/null +++ b/src/nvidia/src/libraries/nvport/crypto/crypto_random_xorshift.c @@ -0,0 +1,190 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief CRYPTO module PRNG implementation using the xorshift algorithm. + * + * For details about the Xorshift algorithms, see: + * https://en.wikipedia.org/wiki/Xorshift + * + * @note Xorshift algorithms take either 128bit or 1024bit seeds. The algorithm + * author suggests seeding a splitmix64.c with a 64bit value, and using its + * output to seed xorshift. + * See https://prng.di.unimi.it/ for details. + * + * @warning Xorshift algorithms are NOT CRYPTOGRAPHICALLY SECURE. They generally + * perform really well on various randomness tests, but are not suitable for + * security sensitive operations such as key generation. If you require a CSRNG + * use @ref portCryptoExTrueRandomGetU32 and family. + */ +#include "nvport/nvport.h" + + +/** + * @brief Number of 64bit words used to store the state of the algorithm. + * xorshift128+ uses 2 qwords of state, and xorshift1024* uses 16 qwords + */ +#define XORSHIFT_STATE_QWORDS 2 + +struct PORT_CRYPTO_PRNG +{ + NvU64 state[XORSHIFT_STATE_QWORDS]; +}; +PORT_CRYPTO_PRNG *portCryptoDefaultGenerator; + +void portCryptoInitialize(void) +{ + NvU64 seed; +#if defined(PORT_CRYPTO_PRNG_SEED) + seed = PORT_CRYPTO_PRNG_SEED; +#elif PORT_IS_FUNC_SUPPORTED(portCryptoExTrueRandomGetU64) + seed = portCryptoExTrueRandomGetU64(); +#elif PORT_IS_MODULE_SUPPORTED(time) + seed = portTimeGetUptimeNanosecondsHighPrecision(); +#elif defined(NVRM) && !defined(NVWATCH) + { + extern NvU64 osGetTimestamp(void); + seed = osGetTimestamp(); + } +#else + seed = (NvUPtr)&portCryptoDefaultGenerator; +#endif + portCryptoPseudoRandomSetSeed(seed); +} + +void portCryptoShutdown(void) +{ + portCryptoPseudoRandomGeneratorDestroy(portCryptoDefaultGenerator); + portCryptoDefaultGenerator = NULL; +} + + +/** + * @brief Initializes a xorshift state from a 64bit seed. Performed using a + * splitmix64 PRNG. + * + * Adapted from: https://xorshift.di.unimi.it/splitmix64.c + */ +static void _initState(NvU64 seed64, NvU64 state[XORSHIFT_STATE_QWORDS]) +{ + NvU32 i; + for (i = 0; i < XORSHIFT_STATE_QWORDS; i++) + { + NvU64 z = (seed64 += 0x9E3779B97F4A7C15ULL); + z = (z ^ (z >> 30)) * 0xBF58476D1CE4E5B9ULL; + z = (z ^ (z >> 27)) * 0x94D049BB133111EBULL; + state[i] = z ^ (z >> 31); + } +} + +/** + * @brief Get the next 64bit value using the xorshift128+ algorithm + * + * Adapted from: https://xorshift.di.unimi.it/xorshift128plus.c + */ +static NvU64 _xorshift128plus_GetU64(NvU64 state[2]) +{ + NvU64 s1 = state[0]; + const NvU64 s0 = state[1]; + state[0] = s0; + s1 ^= s1 << 23; // a + state[1] = s1 ^ s0 ^ (s1 >> 18) ^ (s0 >> 5); // b, c + return state[1] + s0; +} + +PORT_CRYPTO_PRNG *portCryptoPseudoRandomGeneratorCreate(NvU64 seed) +{ + PORT_CRYPTO_PRNG *pPrng = portMemAllocNonPaged(sizeof(*pPrng)); + + if (pPrng != NULL) + { + _initState(seed, pPrng->state); + } + return pPrng; +} + +void portCryptoPseudoRandomGeneratorDestroy(PORT_CRYPTO_PRNG *pPrng) +{ + portMemFree(pPrng); +} + +NvU32 portCryptoPseudoRandomGeneratorGetU32(PORT_CRYPTO_PRNG *pPrng) +{ + + return (NvU32) _xorshift128plus_GetU64(pPrng->state); +} +NvU64 portCryptoPseudoRandomGeneratorGetU64(PORT_CRYPTO_PRNG *pPrng) +{ + return _xorshift128plus_GetU64(pPrng->state); +} + +NV_STATUS portCryptoPseudoRandomGeneratorFillBuffer(PORT_CRYPTO_PRNG *pPrng, NvU8 *pBuffer, NvLength bufSize) +{ + NvLength i; + + PORT_ASSERT_CHECKED(pPrng != NULL); + + /** @note Unlike True Random generators which don't have seeds, here we must + * preserve the complete order of bytes across platforms. That means that + * we cannot fill the misaligned section first, then copy aligned qwords, + * and then fill the remainder - That way we lose some bytes + */ + + // Maybe require 64bit alignment for buffers: + // PORT_ASSERT_CHECKED(portUtilCheckAlignment(pBuffer, sizeof(NvU64))); + + if (pBuffer == NULL) + return NV_ERR_INVALID_POINTER; + + for (i = 0; i < bufSize; i+=8) + { + NvU64 x = _xorshift128plus_GetU64(pPrng->state); + portMemCopy(pBuffer+i, bufSize-i, &x, (bufSize-i < 8) ? bufSize-i : 8); + } + + return NV_OK; +} + + +void portCryptoPseudoRandomSetSeed(NvU64 seed) +{ + if (portCryptoDefaultGenerator) + portCryptoPseudoRandomGeneratorDestroy(portCryptoDefaultGenerator); + portCryptoDefaultGenerator = portCryptoPseudoRandomGeneratorCreate(seed); +} + +NvU32 portCryptoPseudoRandomGetU32(void) +{ + return portCryptoPseudoRandomGeneratorGetU32(portCryptoDefaultGenerator); +} + +NvU64 portCryptoPseudoRandomGetU64(void) +{ + return portCryptoPseudoRandomGeneratorGetU64(portCryptoDefaultGenerator); +} + +NV_STATUS portCryptoPseudoRandomFillBuffer(NvU8 *pBuffer, NvLength bufSize) +{ + return portCryptoPseudoRandomGeneratorFillBuffer(portCryptoDefaultGenerator, pBuffer, bufSize); +} diff --git a/src/nvidia/src/libraries/nvport/memory/memory_generic.h b/src/nvidia/src/libraries/nvport/memory/memory_generic.h new file mode 100644 index 0000000..5627190 --- /dev/null +++ b/src/nvidia/src/libraries/nvport/memory/memory_generic.h @@ -0,0 +1,195 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief MEMORY module tracking functions implementation + * + */ + +#include "nvport/nvport.h" + +#if defined(PORT_MEM_USE_GENERIC_portMemSetPattern) +void * +portMemSetPattern +( + void *pData, + NvLength lengthBytes, + const NvU8 *pPattern, + NvLength patternBytes +) +{ + PORT_ASSERT_CHECKED(pData != NULL); + PORT_ASSERT_CHECKED(pPattern != NULL); + PORT_ASSERT_CHECKED(patternBytes > 0); + + if (lengthBytes > 0) + { + void *p = pData; + while (lengthBytes > patternBytes) + { + portMemCopy(p, patternBytes, pPattern, patternBytes); + p = (NvU8*)p + patternBytes; + lengthBytes -= patternBytes; + } + portMemCopy(p, lengthBytes, pPattern, lengthBytes); + } + return pData; +} +#endif + +#if defined(PORT_MEM_USE_GENERIC_portMemMove) +void * +portMemMove +( + void *pDestination, + NvLength destSize, + const void *pSource, + NvLength srcSize +) +{ + NvU32 *pDst32; + NvU8 *pDst8; + const NvU32 *pSrc32; + const NvU8 *pSrc8; + NvLength dwords = 0; + NvLength bytes = srcSize; + PORT_ASSERT_CHECKED(pDestination != NULL); + PORT_ASSERT_CHECKED(pSource != NULL); + PORT_ASSERT_CHECKED(srcSize <= destSize); + + if (pDestination == NULL || pSource == NULL || srcSize > destSize) + { + return NULL; + } + + if (pDestination == pSource) + { + return pDestination; + } + + if ((((NvUPtr)pSource & 3) == 0) && (((NvUPtr)pDestination & 3) == 0)) + { + dwords = srcSize / sizeof(NvU32); + bytes = srcSize % sizeof(NvU32); + } + + if (pDestination > pSource) + { + pDst8 = (NvU8*)pDestination + srcSize; + pSrc8 = (const NvU8*)pSource + srcSize; + + while (bytes--) + { + PORT_MEM_WR08(--pDst8, PORT_MEM_RD08(--pSrc8)); + } + pDst32 = (NvU32*)pDst8; + pSrc32 = (const NvU32*)pSrc8; + while (dwords--) + { + PORT_MEM_WR32(--pDst32, PORT_MEM_RD32(--pSrc32)); + } + } + else + { + pDst32 = (NvU32*)pDestination; + pSrc32 = (const NvU32*)pSource; + + while (dwords--) + { + PORT_MEM_WR32(pDst32++, PORT_MEM_RD32(pSrc32++)); + } + pDst8 = (NvU8*)pDst32; + pSrc8 = (const NvU8*)pSrc32; + while (bytes--) + { + PORT_MEM_WR08(pDst8++, PORT_MEM_RD08(pSrc8++)); + } + } + return pDestination; +} +#endif + +#if defined(PORT_MEM_USE_GENERIC_portMemCopy) +void * +portMemCopy +( + void *pDestination, + NvLength destSize, + const void *pSource, + NvLength srcSize +) +{ + // API guarantees this is a NOP when destSize==0 + if (destSize == 0) + return pDestination; + + PORT_ASSERT_CHECKED(!portUtilCheckOverlap((const NvU8*)pDestination, destSize, + (const NvU8*)pSource, srcSize)); + return portMemMove(pDestination, destSize, pSource, srcSize); +} +#endif + + +#if defined(PORT_MEM_USE_GENERIC_portMemCmp) +NvS32 +portMemCmp +( + const void *pData0, + const void *pData1, + NvLength lengthBytes +) +{ + const NvU8 *p0 = (const NvU8*)pData0; + const NvU8 *p1 = (const NvU8*)pData1; + PORT_ASSERT_CHECKED(pData0 != NULL); + PORT_ASSERT_CHECKED(pData1 != NULL); + PORT_ASSERT_CHECKED(lengthBytes > 0); + while (lengthBytes--) + { + NvU8 u0 = PORT_MEM_RD08(p0++); + NvU8 u1 = PORT_MEM_RD08(p1++); + if (u0 != u1) + return u0 - u1; + } + return 0; +} +#endif + +#if defined(PORT_MEM_USE_GENERIC_portMemSet) +void * +portMemSet +( + void *pData, + NvU8 value, + NvLength lengthBytes +) +{ + NvLength i; + for (i = 0; i < lengthBytes; i++) + { + PORT_MEM_WR08(((NvU8 *)pData)+i, value); + } + return pData; +} +#endif diff --git a/src/nvidia/src/libraries/nvport/memory/memory_tracking.c b/src/nvidia/src/libraries/nvport/memory/memory_tracking.c new file mode 100644 index 0000000..558fca8 --- /dev/null +++ b/src/nvidia/src/libraries/nvport/memory/memory_tracking.c @@ -0,0 +1,1945 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief MEMORY module tracking functions implementation + * + */ + +#include "nvport/nvport.h" +#include "nvtypes.h" +#include + +#if NVOS_IS_LIBOS && defined(DEBUG) +#include "gsp_error_injection.h" +#endif + +#if !PORT_IS_MODULE_SUPPORTED(debug) +#error "DEBUG module must be present for memory tracking" +#endif + +#if PORT_MEM_TRACK_USE_LIMIT +#include "os/os.h" +#endif + +#if NVOS_IS_LIBOS +#define PORT_MEM_THREAD_SAFE_ALLOCATIONS 0 +#else +#define PORT_MEM_THREAD_SAFE_ALLOCATIONS 1 +#endif + +#if PORT_MEM_THREAD_SAFE_ALLOCATIONS && !PORT_IS_MODULE_SUPPORTED(atomic) +#error "ATOMIC module must be present for memory tracking" +#endif + +#if PORT_MEM_THREAD_SAFE_ALLOCATIONS +#define PORT_MEM_ATOMIC_ADD_SIZE portAtomicAddSize +#define PORT_MEM_ATOMIC_SUB_SIZE portAtomicSubSize +#define PORT_MEM_ATOMIC_DEC_U32 portAtomicDecrementU32 +#define PORT_MEM_ATOMIC_INC_U32 portAtomicIncrementU32 +#define PORT_MEM_ATOMIC_SET_U32 portAtomicSetU32 +#define PORT_MEM_ATOMIC_CAS_SIZE portAtomicCompareAndSwapSize +#define PORT_MEM_ATOMIC_CAS_U32 portAtomicCompareAndSwapU32 +#else +// +// We can just stub out the atomic operations for non-atomic ones and not waste +// cycles on synchronization +// +#define PORT_MEM_ATOMIC_ADD_SIZE(pVal, val) (*((volatile NvSPtr *)pVal) += val) +#define PORT_MEM_ATOMIC_SUB_SIZE(pVal, val) (*((volatile NvSPtr *)pVal) -= val) +#define PORT_MEM_ATOMIC_DEC_U32(pVal) (--(*((volatile NvU32 *)pVal))) +#define PORT_MEM_ATOMIC_INC_U32(pVal) (++(*((volatile NvU32 *)pVal))) +#define PORT_MEM_ATOMIC_SET_U32(pVal, val) (*((volatile NvU32 *)pVal) = val) +#define PORT_MEM_ATOMIC_CAS_SIZE(pVal, newVal, oldVal) \ + ((*pVal == oldVal) ? ((*((volatile NvSPtr *)pVal) = newVal), NV_TRUE) : NV_FALSE) +#define PORT_MEM_ATOMIC_CAS_U32(pVal, newVal, oldVal) \ + ((*pVal == oldVal) ? ((*((volatile NvU32 *)pVal) = newVal), NV_TRUE) : NV_FALSE) +#endif // !PORT_MEM_THREAD_SAFE_ALLOCATIONS + +struct PORT_MEM_ALLOCATOR_IMPL +{ + PORT_MEM_ALLOCATOR_TRACKING tracking; +}; + +// +// Debug print macros +// +#if PORT_MEM_TRACK_PRINT_LEVEL == PORT_MEM_TRACK_PRINT_LEVEL_SILENT +#define PORT_MEM_PRINT_ERROR(...) +#define PORT_MEM_PRINT_INFO(...) +#elif PORT_MEM_TRACK_PRINT_LEVEL == PORT_MEM_TRACK_PRINT_LEVEL_BASIC +#define PORT_MEM_PRINT_ERROR(...) portDbgPrintf(__VA_ARGS__) +#define PORT_MEM_PRINT_INFO(...) +#else +#define PORT_MEM_PRINT_ERROR(...) portDbgPrintf(__VA_ARGS__) +#define PORT_MEM_PRINT_INFO(...) portDbgPrintf(__VA_ARGS__) +#endif + +// Simple implementation of a spinlock that is going to be used where sync module is not included. +#if !PORT_IS_MODULE_SUPPORTED(sync) + +#if NVCPU_IS_RISCV64 +#error "Sync module should be enabled for RISC-V builds" +#endif + +typedef volatile NvU32 PORT_SPINLOCK; +static NvLength portSyncSpinlockSize = sizeof(PORT_SPINLOCK); +static NV_STATUS portSyncSpinlockInitialize(PORT_SPINLOCK *pSpinlock) +{ + *pSpinlock = 0; + return NV_OK; +} +static void portSyncSpinlockAcquire(PORT_SPINLOCK *pSpinlock) +{ + while (!PORT_MEM_ATOMIC_CAS_U32(pSpinlock, 1, 0)); +} +static void portSyncSpinlockRelease(PORT_SPINLOCK *pSpinlock) +{ + PORT_MEM_ATOMIC_SET_U32(pSpinlock, 0); +} +static void portSyncSpinlockDestroy(PORT_SPINLOCK *pSpinlock) +{ + PORT_UNREFERENCED_VARIABLE(pSpinlock); +} +#endif + +#define PORT_MEM_LOCK_INIT(lock) \ + do { \ + lock = _portMemAllocNonPagedUntracked(portSyncSpinlockSize); \ + portSyncSpinlockInitialize(lock); \ + } while (0) +#define PORT_MEM_LOCK_DESTROY(lock) \ + do { \ + portSyncSpinlockDestroy(lock); \ + _portMemFreeUntracked(lock); \ + } while(0) +#define PORT_MEM_LOCK_ACQUIRE(lock) portSyncSpinlockAcquire(lock) +#define PORT_MEM_LOCK_RELEASE(lock) portSyncSpinlockRelease(lock) + + +// +// List link operation that operates on structures that have pNext and pPrev +// fields. Assumes the root always exists. +// +#define PORT_LOCKED_LIST_LINK(pRoot, pNode, lock) \ + do { \ + PORT_MEM_LOCK_ACQUIRE(lock); \ + (pNode)->pNext = (pRoot); \ + (pNode)->pPrev = (pRoot)->pPrev; \ + (pRoot)->pPrev = (pNode); \ + (pNode)->pPrev->pNext = (pNode); \ + PORT_MEM_LOCK_RELEASE(lock); \ + } while(0) + +#define PORT_LOCKED_LIST_UNLINK(pRoot, pNode, lock) \ + do { \ + PORT_MEM_LOCK_ACQUIRE(lock); \ + (pNode)->pNext->pPrev = (pNode)->pPrev; \ + (pNode)->pPrev->pNext = (pNode)->pNext; \ + PORT_MEM_LOCK_RELEASE(lock); \ + } while (0) + +// +// All memory tracking globals are contained in this structure +// +static struct PORT_MEM_GLOBALS +{ + PORT_MEM_ALLOCATOR_TRACKING mainTracking; + void *trackingLock; + struct + { + PORT_MEM_ALLOCATOR paged; + PORT_MEM_ALLOCATOR nonPaged; + PORT_MEM_ALLOCATOR_IMPL pagedImpl; + PORT_MEM_ALLOCATOR_IMPL nonPagedImpl; + } alloc; + NvU32 initCount; + NvU32 totalAllocators; +#if PORT_MEM_TRACK_USE_LIMIT + NvBool bLimitEnabled; + PORT_MEM_ALLOCATOR_TRACKING *pGfidTracking[PORT_MEM_LIMIT_MAX_GFID]; +#endif +} portMemGlobals; + +// +// Memory counter implementation +// +#if PORT_MEM_TRACK_USE_COUNTER +#if PORT_MEM_TRACK_ALLOC_SIZE +static NV_INLINE NvLength +_portMemExTrackingGetAllocUsableSizeWrapper +( + void *pMem +) +{ +#if PORT_MEM_HEADER_HAS_BLOCK_SIZE + return PORT_MEM_SUB_HEADER_PTR(pMem)->blockSize; +#endif +} +static NV_INLINE void +_portMemExTrackingSetOrGetAllocUsableSize +( + void *pMem, + NvLength *pSize +) +{ +#if PORT_MEM_HEADER_HAS_BLOCK_SIZE + PORT_MEM_SUB_HEADER_PTR(pMem)->blockSize = *pSize; +#else + *pSize = _portMemExTrackingGetAllocUsableSizeWrapper(pMem); +#endif +} +#endif // PORT_MEM_TRACK_ALLOC_SIZE +static NV_INLINE void +_portMemCounterInit +( + PORT_MEM_COUNTER *pCounter +) +{ + portMemSet(pCounter, 0, sizeof(*pCounter)); +} +static NV_INLINE void +_portMemCounterInc +( + PORT_MEM_COUNTER *pCounter, + NvLength size +) +{ + NvU32 activeAllocs; + NvLength activeSize = 0; + + activeAllocs = PORT_MEM_ATOMIC_INC_U32(&pCounter->activeAllocs); + PORT_MEM_ATOMIC_INC_U32(&pCounter->totalAllocs); +#if PORT_MEM_TRACK_ALLOC_SIZE + // + // activeSize is only tracked on configurations where we can retrieve the + // allocation size from allocation metadata in _portMemCounterDec. + // + activeSize = PORT_MEM_ATOMIC_ADD_SIZE(&pCounter->activeSize, size); +#endif + + // + // Note: this can overflow on 32-bit platforms if we exceed 4GB cumulative + // allocations. It's not trivial to fix, since NvPort doesn't emulate 64-bit + // atomics on 32-bit platforms, so just assume this doesn't happen (or + // doesn't matter too much if it does, since it's only for reporting). + // + PORT_MEM_ATOMIC_ADD_SIZE(&pCounter->totalSize, size); + + // Update the peak stats, if we're updating the peakSize + { + NvU32 peakAllocs; + NvLength peakSize = pCounter->peakSize; + while (activeSize > peakSize) + { + PORT_MEM_ATOMIC_CAS_SIZE(&pCounter->peakSize, activeSize, peakSize); + peakSize = pCounter->peakSize; + } + + // + // Ensure peakAllocs stays (approximately) in sync with peakSize, rather + // than always taking the greatest peakAllocs, so that the peak stats + // report is consistent. + // + do + { + peakAllocs = pCounter->peakAllocs; + + // + // Only attempt to update the peakAllocs if activeSize is still the + // peakSize. + // + if (activeSize != pCounter->peakSize) + break; + } while (!PORT_MEM_ATOMIC_CAS_U32(&pCounter->peakAllocs, activeAllocs, peakAllocs)); + } +} +static NV_INLINE void +_portMemCounterDec +( + PORT_MEM_COUNTER *pCounter, + NvLength size +) +{ + PORT_MEM_ATOMIC_DEC_U32(&pCounter->activeAllocs); +#if PORT_MEM_TRACK_ALLOC_SIZE + PORT_MEM_ATOMIC_SUB_SIZE(&pCounter->activeSize, size); +#else + PORT_UNREFERENCED_VARIABLE(size); +#endif +} + +#define PORT_MEM_COUNTER_INIT(pCounter) _portMemCounterInit(pCounter) +#define PORT_MEM_COUNTER_INC(pCounter, size) _portMemCounterInc(pCounter, size) +#define PORT_MEM_COUNTER_DEC(pCounter, size) _portMemCounterDec(pCounter, size) +#else +#define PORT_MEM_COUNTER_INIT(x) +#define PORT_MEM_COUNTER_INC(x, y) PORT_UNREFERENCED_VARIABLE(y) +#define PORT_MEM_COUNTER_DEC(x, y) PORT_UNREFERENCED_VARIABLE(y) +#endif // COUNTER + + +// +// Memory fenceposts implementation +// +#if PORT_MEM_TRACK_USE_FENCEPOSTS +#define PORT_MEM_FENCE_HEAD_MAGIC 0x68656164 // 'head' +#define PORT_MEM_FENCE_TAIL_MAGIC 0x7461696c // 'tail' +#define PORT_MEM_FENCE_FREE_MAGIC 0x66726565 // 'free' + +static NV_INLINE void +_portMemFenceInit +( + PORT_MEM_ALLOCATOR *pAlloc, + void *pMem, + NvLength size +) +{ + PORT_MEM_HEADER *pHead = (PORT_MEM_HEADER*)pMem - 1; + PORT_MEM_FOOTER *pTail = (PORT_MEM_FOOTER*)((NvU8*)pMem + size); + + pHead->fence.pAllocator = pAlloc; + pHead->fence.magic = PORT_MEM_FENCE_HEAD_MAGIC; + pTail->fence.magic = PORT_MEM_FENCE_TAIL_MAGIC; +} + +static NV_INLINE void +_portMemFenceCheck +( + PORT_MEM_ALLOCATOR *pAlloc, + void *pMem, + NvLength size +) +{ + PORT_MEM_HEADER *pHead = (PORT_MEM_HEADER*)pMem - 1; + PORT_MEM_FOOTER *pTail = (PORT_MEM_FOOTER*)((NvU8*)pMem + size); + + if (pHead->fence.magic != PORT_MEM_FENCE_HEAD_MAGIC || + pTail->fence.magic != PORT_MEM_FENCE_TAIL_MAGIC) + { + PORT_MEM_PRINT_ERROR("Memory corruption detected on block %p\n", pMem); + PORT_ASSERT_CHECKED(pHead->fence.magic == PORT_MEM_FENCE_HEAD_MAGIC); + PORT_ASSERT_CHECKED(pTail->fence.magic == PORT_MEM_FENCE_TAIL_MAGIC); + } + if (pHead->fence.pAllocator != pAlloc) + { + PORT_MEM_PRINT_ERROR("Freeing block %p using a wrong allocator (%p instead of %p)\n", + pMem, pAlloc, pHead->fence.pAllocator); + PORT_ASSERT_CHECKED(pHead->fence.pAllocator == pAlloc); + + } +} + +#define PORT_MEM_FENCE_CHECK(pAlloc, pMem, size) _portMemFenceCheck(pAlloc, pMem, size) +#define PORT_MEM_FENCE_INIT(pAlloc, pMem, size) _portMemFenceInit(pAlloc, pMem, size) +#else +#define PORT_MEM_FENCE_INIT(x, y, z) +#define PORT_MEM_FENCE_CHECK(x, y, z) +#endif // FENCEPOSTS + + +// +// Memory allocation lists implementation +// +#if PORT_MEM_TRACK_USE_ALLOCLIST +static NV_INLINE void +_portMemListAdd +( + PORT_MEM_ALLOCATOR_TRACKING *pTracking, + void *pMem +) +{ + PORT_MEM_HEADER *pHead = (PORT_MEM_HEADER*)pMem - 1; + PORT_MEM_LIST *pList = &pHead->list; + pList->pNext = pList; + pList->pPrev = pList; + if (!PORT_MEM_ATOMIC_CAS_SIZE(&pTracking->pFirstAlloc, pList, NULL)) + { + PORT_LOCKED_LIST_LINK(pTracking->pFirstAlloc, pList, pTracking->listLock); + } +} +static NV_INLINE void +_portMemListRemove +( + PORT_MEM_ALLOCATOR_TRACKING *pTracking, + void *pMem +) +{ + PORT_MEM_HEADER *pHead = (PORT_MEM_HEADER*)pMem - 1; + PORT_MEM_LIST *pList = &pHead->list; + + if (!PORT_MEM_ATOMIC_CAS_SIZE(&pList->pNext, NULL, pList)) + { + PORT_LOCKED_LIST_UNLINK(pTracking->pFirstAlloc, pList, pTracking->listLock); + } + PORT_MEM_ATOMIC_CAS_SIZE(&pTracking->pFirstAlloc, pList->pNext, pList); +} + +static NV_INLINE PORT_MEM_HEADER * +_portMemListGetHeader +( + PORT_MEM_LIST *pList +) +{ + return (PORT_MEM_HEADER*)((NvU8*)pList - (NvUPtr)(&((PORT_MEM_HEADER*)NULL)->list)); +} +#define PORT_MEM_LIST_INIT(pTracking) \ + do { \ + (pTracking)->pFirstAlloc = NULL; \ + PORT_MEM_LOCK_INIT((pTracking)->listLock); \ + } while (0) +#define PORT_MEM_LIST_DESTROY(pTracking) PORT_MEM_LOCK_DESTROY((pTracking)->listLock) +#define PORT_MEM_LIST_ADD(pTracking, pMem) _portMemListAdd(pTracking, pMem) +#define PORT_MEM_LIST_REMOVE(Tracking, pMem) _portMemListRemove(pTracking, pMem) +#else +#define PORT_MEM_LIST_INIT(x) +#define PORT_MEM_LIST_DESTROY(x) +#define PORT_MEM_LIST_ADD(x, y) +#define PORT_MEM_LIST_REMOVE(x, y) +#endif // ALLOCLIST + + + +// +// Memory allocation-caller info implementation +// +#if PORT_MEM_TRACK_USE_CALLERINFO + +static NV_INLINE void +_portMemCallerInfoInitMem +( + void *pMem, + PORT_MEM_CALLERINFO callerInfo +) +{ + PORT_MEM_HEADER *pHead = (PORT_MEM_HEADER*)pMem - 1; + portMemCopy(&pHead->callerInfo, sizeof(callerInfo), + &callerInfo, sizeof(callerInfo)); +} +static NV_INLINE void +_portMemCallerInfoInitTracking +( + PORT_MEM_ALLOCATOR_TRACKING *pTracking, + PORT_MEM_CALLERINFO callerInfo +) +{ + portMemCopy(&pTracking->callerInfo, sizeof(callerInfo), + &callerInfo, sizeof(callerInfo)); +} + +#define PORT_MEM_CALLERINFO_INIT_TRACKING(pTracking) \ + _portMemCallerInfoInitTracking(pTracking, PORT_MEM_CALLERINFO_PARAM) +#define PORT_MEM_CALLERINFO_INIT_MEM(pMem) \ + _portMemCallerInfoInitMem(pMem, PORT_MEM_CALLERINFO_PARAM) + +#if PORT_MEM_TRACK_USE_CALLERINFO_IP +#if NVOS_IS_LIBOS +// +// Libos has custom %a format specifier that decodes an instruction pointer into +// a function / file / line reference when the binary output is decoded. +// +#define PORT_MEM_CALLERINFO_PRINT_ARGS(x) "@ %a\n", x +#else +#define PORT_MEM_CALLERINFO_PRINT_ARGS(x) "@ 0x%016x\n", x +#endif // NVOS_IS_LIBOS +#else +#define PORT_MEM_CALLERINFO_PRINT_ARGS(x) "@ %s:%u (%s)\n", x.file, x.line, x.func +#endif // PORT_MEM_TRACK_USE_CALLERINFO_IP + +#else // PORT_MEM_TRACK_USE_CALLERINFO +#define PORT_MEM_CALLERINFO_INIT_TRACKING(x) +#define PORT_MEM_CALLERINFO_INIT_MEM(x) +#define PORT_MEM_CALLERINFO_PRINT_ARGS(x) "\n" +#endif // PORT_MEM_TRACK_USE_CALLERINFO + + +#if PORT_MEM_TRACK_USE_LOGGING +#include "nvlog/nvlog.h" +/** @brief Single log entry. Uses 64bit values even on 32bit systems. */ +typedef struct PORT_MEM_LOG_ENTRY +{ + NvP64 address; + NvP64 allocator; + NvLength size; // if size is 0, it is a free() call, not alloc() +} PORT_MEM_LOG_ENTRY; + +#define PORT_MEM_TRACK_LOG_TAG 0x70726d74 +#define PORT_MEM_LOG_ENTRIES 4096 + +static void +_portMemLogInit(void) +{ + NVLOG_BUFFER_HANDLE hBuffer; + nvlogAllocBuffer(PORT_MEM_LOG_ENTRIES * sizeof(PORT_MEM_LOG_ENTRY), + DRF_DEF(LOG, _BUFFER_FLAGS, _FORMAT, _MEMTRACK), + PORT_MEM_TRACK_LOG_TAG, &hBuffer); +} + +static void +_portMemLogDestroy(void) +{ + NVLOG_BUFFER_HANDLE hBuffer; + nvlogGetBufferHandleFromTag(PORT_MEM_TRACK_LOG_TAG, &hBuffer); + nvlogDeallocBuffer(hBuffer); +} + +static void +_portMemLogAdd +( + PORT_MEM_ALLOCATOR *pAllocator, + void *pMem, + NvLength lengthBytes +) +{ + NVLOG_BUFFER_HANDLE hBuffer; + PORT_MEM_LOG_ENTRY entry = {0}; + entry.address = NV_PTR_TO_NvP64(pMem); + entry.address = NV_PTR_TO_NvP64(pAllocator); + entry.size = lengthBytes; + nvlogGetBufferHandleFromTag(PORT_MEM_TRACK_LOG_TAG, &hBuffer); + nvlogWriteToBuffer(hBuffer, &entry, sizeof(entry)); +} + +#define PORT_MEM_LOG_INIT() _portMemLogInit() +#define PORT_MEM_LOG_DESTROY() _portMemLogDestroy() +#define PORT_MEM_LOG_ALLOC(pAlloc, pMem, size) \ + _portMemLogAdd(pAlloc, pMem, size) +#define PORT_MEM_LOG_FREE(pAlloc, pMem) \ + _portMemLogAdd(pAlloc, pMem, 0) +#else +#define PORT_MEM_LOG_INIT() +#define PORT_MEM_LOG_DESTROY() +#define PORT_MEM_LOG_ALLOC(x, y, z) +#define PORT_MEM_LOG_FREE(x, y) +#endif // LOGGING + + +//////////////////////////////////////////////////////////////////////////////// +// +// Main memory tracking implementation +// +//////////////////////////////////////////////////////////////////////////////// + +// +// All static functions declarations. Definitions at the end of file. +// +static void *_portMemAllocatorAllocPagedWrapper(PORT_MEM_ALLOCATOR *pAlloc, NvLength length); +static void *_portMemAllocatorAllocNonPagedWrapper(PORT_MEM_ALLOCATOR *pAlloc, NvLength length); +static void _portMemAllocatorFreeWrapper(PORT_MEM_ALLOCATOR *pAlloc, void *pMem); +static void _portMemAllocatorReleaseWrapper(PORT_MEM_ALLOCATOR *pAlloc); + +static PORT_MEM_ALLOCATOR *_portMemAllocatorCreateOnExistingBlock(void *pAlloc, NvLength blockSizeBytes, void *pSpinlock PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM); +static void *_portMemAllocatorAllocExistingWrapper(PORT_MEM_ALLOCATOR *pAlloc, NvLength length); +static void _portMemAllocatorFreeExistingWrapper(PORT_MEM_ALLOCATOR *pAlloc, void *pMem); + +static void _portMemTrackingRelease(PORT_MEM_ALLOCATOR_TRACKING *pTracking, NvBool bReportLeaks); +static void _portMemTrackAlloc( + PORT_MEM_ALLOCATOR_TRACKING *pTracking, + void *pMem, + NvLength size, + NvU32 gfid + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +); +static NvBool _portMemTrackFree(PORT_MEM_ALLOCATOR_TRACKING *pTracking, void *pMem); + + + +#if PORT_MEM_TRACK_USE_CALLERINFO +#undef portMemAllocPaged +#undef portMemAllocNonPaged +#undef portMemAllocatorCreatePaged +#undef portMemAllocatorCreateNonPaged +#undef portMemInitializeAllocatorTracking +#undef _portMemAllocatorAlloc +#undef portMemAllocatorCreateOnExistingBlock +#undef portMemExAllocatorCreateLockedOnExistingBlock +// These functions have different names if CallerInfo is enabled. +#define portMemAllocPaged portMemAllocPaged_CallerInfo +#define portMemAllocNonPaged portMemAllocNonPaged_CallerInfo +#define portMemAllocatorCreatePaged portMemAllocatorCreatePaged_CallerInfo +#define portMemAllocatorCreateNonPaged portMemAllocatorCreateNonPaged_CallerInfo +#define portMemInitializeAllocatorTracking portMemInitializeAllocatorTracking_CallerInfo +#define _portMemAllocatorAlloc _portMemAllocatorAlloc_CallerInfo +#define portMemAllocatorCreateOnExistingBlock portMemAllocatorCreateOnExistingBlock_CallerInfo +#define portMemExAllocatorCreateLockedOnExistingBlock portMemExAllocatorCreateLockedOnExistingBlock_CallerInfo +#endif + +// +// Per-process heap limiting implementation +// +#if PORT_MEM_TRACK_USE_LIMIT +static inline NvBool isGfidValid(NvU32 gfid) +{ + return (gfid > 0) && (gfid <= PORT_MEM_LIMIT_MAX_GFID); +} + +static void _portMemTrackingGfidRelease(void) +{ + int gfidIdx; + + for (gfidIdx = 0; gfidIdx < PORT_MEM_LIMIT_MAX_GFID; ++gfidIdx) + { + if (portMemGlobals.pGfidTracking[gfidIdx] != NULL) + { + _portMemTrackingRelease(portMemGlobals.pGfidTracking[gfidIdx], NV_FALSE); + portMemGlobals.pGfidTracking[gfidIdx] = NULL; + } + } +} + +static NV_INLINE void +_portMemLimitInc(NvU32 gfid, void *pMem, NvLength size) +{ + PORT_MEM_HEADER *pMemHeader = PORT_MEM_SUB_HEADER_PTR(pMem); + pMemHeader->gfid = gfid; + if (portMemGlobals.bLimitEnabled) + { + if (isGfidValid(gfid)) + { + NvU32 gfidIdx = gfid - 1; + PORT_MEM_ATOMIC_ADD_SIZE(&portMemGlobals.pGfidTracking[gfidIdx]->counterGfid, size); + } + } +} + +static NV_INLINE void +_portMemLimitDec(void *pMem, NvLength size) +{ + if (portMemGlobals.bLimitEnabled) + { + PORT_MEM_HEADER *pMemHeader = PORT_MEM_SUB_HEADER_PTR(pMem); + NvU32 gfid = pMemHeader->gfid; + + if (isGfidValid(gfid)) + { + NvU32 gfidIdx = gfid - 1; + if (portMemGlobals.pGfidTracking[gfidIdx]->counterGfid < size) + { + PORT_MEM_PRINT_ERROR("memory free error: counter underflow\n"); + PORT_BREAKPOINT_CHECKED(); + } + else + { + PORT_MEM_ATOMIC_SUB_SIZE( + &portMemGlobals.pGfidTracking[gfidIdx]->counterGfid, + size); + } + } + } +} + +static NV_INLINE NvBool +_portMemLimitExceeded(NvU32 gfid, NvLength size) +{ + NvBool bExceeded = NV_FALSE; + + if (portMemGlobals.bLimitEnabled) + { + if (isGfidValid(gfid)) + { + NvU32 gfidIdx = gfid - 1; + if ((size + portMemGlobals.pGfidTracking[gfidIdx]->counterGfid) > + portMemGlobals.pGfidTracking[gfidIdx]->limitGfid) + { + PORT_MEM_PRINT_ERROR( + "memory allocation denied; GFID %d exceeded per-process heap limit of " + "%"NvUPtr_fmtu"\n", + gfid, portMemGlobals.pGfidTracking[gfidIdx]->limitGfid + ); + bExceeded = NV_TRUE; + } + } + } + return bExceeded; +} + +void +portMemLibosLimitInc(NvU32 gfid, NvLength size) +{ + if (portMemGlobals.bLimitEnabled) + { + if (isGfidValid(gfid)) + { + NvU32 gfidIdx = gfid - 1; + PORT_MEM_ATOMIC_ADD_SIZE(&portMemGlobals.pGfidTracking[gfidIdx]->counterLibosGfid, size); + } + } +} + +void +portMemLibosLimitDec(NvU32 gfid, NvLength size) +{ + if (portMemGlobals.bLimitEnabled) + { + if (isGfidValid(gfid)) + { + NvU32 gfidIdx = gfid - 1; + if (portMemGlobals.pGfidTracking[gfidIdx]->counterLibosGfid < size) + { + PORT_MEM_PRINT_ERROR("GFID %d memory free error: counter underflow\n" + "counter = %llu\nsize = %llu\n", + gfid, + portMemGlobals.pGfidTracking[gfidIdx]->counterLibosGfid, + size); + PORT_BREAKPOINT_CHECKED(); + } + else + { + PORT_MEM_ATOMIC_SUB_SIZE( + &portMemGlobals.pGfidTracking[gfidIdx]->counterLibosGfid, + size); + } + } + } +} + +NvBool +portMemLibosLimitExceeded(NvU32 gfid, NvLength size) +{ + NvBool bExceeded = NV_FALSE; + + if (portMemGlobals.bLimitEnabled) + { + if (isGfidValid(gfid)) + { + NvU32 gfidIdx = gfid - 1; + if ((size + portMemGlobals.pGfidTracking[gfidIdx]->counterLibosGfid) > + portMemGlobals.pGfidTracking[gfidIdx]->limitLibosGfid) + { + PORT_MEM_PRINT_ERROR( + "LibOS memory allocation denied; GFID %d exceeded per-VF LibOS heap limit of " + "%"NvUPtr_fmtu"\n" + "counter = %llu\nsize = %llu\n", + gfid, + portMemGlobals.pGfidTracking[gfidIdx]->limitLibosGfid, + portMemGlobals.pGfidTracking[gfidIdx]->counterLibosGfid, + size + ); + bExceeded = NV_TRUE; + } + } + else + { + // Also fail for invalid GFID + PORT_MEM_PRINT_ERROR("LibOS memory allocation denied; GFID %d invalid\n", gfid); + bExceeded = NV_TRUE; + } + } + return bExceeded; +} + +#define PORT_MEM_LIMIT_INC(gfid, pMem, size) _portMemLimitInc(gfid, pMem, size) +#define PORT_MEM_LIMIT_DEC(pMem, size) _portMemLimitDec(pMem, size) +#define PORT_MEM_LIMIT_EXCEEDED(gfid, size) _portMemLimitExceeded(gfid, size) +#else +#define PORT_MEM_LIMIT_INC(gfid, pMem, size) \ + do { \ + PORT_UNREFERENCED_VARIABLE(gfid); \ + PORT_UNREFERENCED_VARIABLE(pMem); \ + PORT_UNREFERENCED_VARIABLE(size); \ + } while (0) +#define PORT_MEM_LIMIT_DEC(pMem, size) \ + do { \ + PORT_UNREFERENCED_VARIABLE(pMem); \ + PORT_UNREFERENCED_VARIABLE(size); \ + } while (0) +#define PORT_MEM_LIMIT_EXCEEDED(gfid, size) (NV_FALSE) +#endif // PORT_MEM_TRACK_USE_LIMIT + +static NV_INLINE PORT_MEM_ALLOCATOR_TRACKING * +_portMemGetTracking +( + const PORT_MEM_ALLOCATOR *pAlloc +) +{ + if (pAlloc == NULL) + return &portMemGlobals.mainTracking; + else + return pAlloc->pTracking; +} + + +void +portMemInitialize(void) +{ +#if PORT_MEM_TRACK_USE_CALLERINFO + PORT_MEM_CALLERINFO_TYPE_PARAM = PORT_MEM_CALLERINFO_MAKE; +#endif + if (PORT_MEM_ATOMIC_INC_U32(&portMemGlobals.initCount) != 1) + return; + + portMemGlobals.mainTracking.pAllocator = NULL; + portMemGlobals.mainTracking.pNext = &portMemGlobals.mainTracking; + portMemGlobals.mainTracking.pPrev = &portMemGlobals.mainTracking; + PORT_MEM_COUNTER_INIT(&portMemGlobals.mainTracking.counter); + PORT_MEM_LIST_INIT(&portMemGlobals.mainTracking); + PORT_MEM_LOCK_INIT(portMemGlobals.trackingLock); + +#if PORT_MEM_TRACK_USE_LIMIT + // Initialize process heap limit to max int (i.e. no limit) + portMemGlobals.bLimitEnabled = NV_FALSE; +#endif + + portMemGlobals.alloc.paged._portAlloc = _portMemAllocatorAllocPagedWrapper; + portMemGlobals.alloc.nonPaged._portAlloc = _portMemAllocatorAllocNonPagedWrapper; + portMemGlobals.alloc.paged._portFree = _portMemAllocatorFreeWrapper; + portMemGlobals.alloc.nonPaged._portFree = _portMemAllocatorFreeWrapper; + portMemGlobals.alloc.paged._portRelease = NULL; + portMemGlobals.alloc.nonPaged._portRelease = NULL; + + if (PORT_MEM_TRACK_USE_FENCEPOSTS) + { + // + // Distinct paged and non-paged allocators require PORT_MEM_TRACK_USE_FENCEPOSTS + // so that the correct allocator can be looked up from the fenceposts in the + // portMemFree path. + // + portMemGlobals.alloc.paged.pImpl = &portMemGlobals.alloc.pagedImpl; + portMemGlobals.alloc.nonPaged.pImpl = &portMemGlobals.alloc.nonPagedImpl; + + portMemInitializeAllocatorTracking(&portMemGlobals.alloc.paged, + &portMemGlobals.alloc.paged.pImpl->tracking + PORT_MEM_CALLERINFO_COMMA_PARAM); + portMemInitializeAllocatorTracking(&portMemGlobals.alloc.nonPaged, + &portMemGlobals.alloc.nonPaged.pImpl->tracking + PORT_MEM_CALLERINFO_COMMA_PARAM); + } + else + { + // Use the same impl for both paged and nonpaged. + portMemGlobals.alloc.paged.pImpl = &portMemGlobals.alloc.pagedImpl; + portMemGlobals.alloc.nonPaged.pImpl = &portMemGlobals.alloc.pagedImpl; + portMemInitializeAllocatorTracking(&portMemGlobals.alloc.paged, + &portMemGlobals.alloc.pagedImpl.tracking + PORT_MEM_CALLERINFO_COMMA_PARAM); + portMemGlobals.alloc.paged.pTracking = &portMemGlobals.alloc.pagedImpl.tracking; + portMemGlobals.alloc.nonPaged.pTracking = &portMemGlobals.alloc.pagedImpl.tracking; + } + PORT_MEM_LOG_INIT(); +} +void +portMemShutdown(NvBool bForceSilent) +{ + PORT_UNREFERENCED_VARIABLE(bForceSilent); + if (PORT_MEM_ATOMIC_DEC_U32(&portMemGlobals.initCount) != 0) + return; + +#if (PORT_MEM_TRACK_PRINT_LEVEL > PORT_MEM_TRACK_PRINT_LEVEL_SILENT) + if (!bForceSilent) + { + portMemPrintAllTrackingInfo(); + } +#endif + PORT_MEM_LOG_DESTROY(); + + if (PORT_MEM_TRACK_USE_FENCEPOSTS) + { + _portMemTrackingRelease(&portMemGlobals.alloc.nonPaged.pImpl->tracking, NV_FALSE); + _portMemTrackingRelease(&portMemGlobals.alloc.paged.pImpl->tracking, NV_FALSE); + } + else + { + _portMemTrackingRelease(&portMemGlobals.alloc.pagedImpl.tracking, NV_FALSE); + } +#if PORT_MEM_TRACK_USE_LIMIT + _portMemTrackingGfidRelease(); +#endif + PORT_MEM_LOCK_DESTROY(portMemGlobals.trackingLock); + PORT_MEM_LIST_DESTROY(&portMemGlobals.mainTracking); + portMemSet(&portMemGlobals, 0, sizeof(portMemGlobals)); +} + +void * +portMemAllocPaged +( + NvLength length + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ +#if defined(__COVERITY__) + return __coverity_alloc__(length); +#endif + PORT_MEM_ALLOCATOR *pAlloc = portMemAllocatorGetGlobalPaged(); + return _portMemAllocatorAlloc(pAlloc, length PORT_MEM_CALLERINFO_COMMA_PARAM); +} + +void * +portMemAllocNonPaged +( + NvLength length + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ +#if defined(__COVERITY__) + return __coverity_alloc__(length); +#endif + PORT_MEM_ALLOCATOR *pAlloc = portMemAllocatorGetGlobalNonPaged(); + return _portMemAllocatorAlloc(pAlloc, length PORT_MEM_CALLERINFO_COMMA_PARAM); +} + +void +portMemFree +( + void *pMem +) +{ + if (pMem != NULL) + { +#if PORT_MEM_TRACK_USE_FENCEPOSTS + PORT_MEM_HEADER *pHead = (PORT_MEM_HEADER*)pMem - 1; + PORT_FREE(pHead->fence.pAllocator, pMem); +#else + // Paged/nonpaged are logged together if we don't have fenceposts. + PORT_FREE(portMemAllocatorGetGlobalPaged(), pMem); +#endif + } + +#if defined(__COVERITY__) + __coverity_free__(pMem); +#endif +} + +PORT_MEM_ALLOCATOR * +portMemAllocatorCreatePaged(PORT_MEM_CALLERINFO_TYPE_PARAM) +{ + PORT_MEM_ALLOCATOR *pAllocator; + + pAllocator = portMemAllocPaged(PORT_MEM_ALLOCATOR_SIZE + PORT_MEM_CALLERINFO_COMMA_PARAM); + if (pAllocator == NULL) + return NULL; + + pAllocator->pImpl = (PORT_MEM_ALLOCATOR_IMPL*)(pAllocator + 1); + pAllocator->_portAlloc = _portMemAllocatorAllocPagedWrapper; + pAllocator->_portFree = _portMemAllocatorFreeWrapper; + pAllocator->_portRelease = _portMemAllocatorReleaseWrapper; + portMemInitializeAllocatorTracking(pAllocator, &pAllocator->pImpl->tracking + PORT_MEM_CALLERINFO_COMMA_PARAM); + + PORT_MEM_PRINT_INFO("Acquired paged allocator %p ", pAllocator); + PORT_MEM_PRINT_INFO(PORT_MEM_CALLERINFO_PRINT_ARGS(PORT_MEM_CALLERINFO_PARAM)); + + return pAllocator; +} + +PORT_MEM_ALLOCATOR * +portMemAllocatorCreateNonPaged(PORT_MEM_CALLERINFO_TYPE_PARAM) +{ + PORT_MEM_ALLOCATOR *pAllocator; + + pAllocator = portMemAllocNonPaged(PORT_MEM_ALLOCATOR_SIZE + PORT_MEM_CALLERINFO_COMMA_PARAM); + if (pAllocator == NULL) + return NULL; + + pAllocator->pImpl = (PORT_MEM_ALLOCATOR_IMPL*)(pAllocator + 1); + pAllocator->_portAlloc = _portMemAllocatorAllocNonPagedWrapper; + pAllocator->_portFree = _portMemAllocatorFreeWrapper; + pAllocator->_portRelease = _portMemAllocatorReleaseWrapper; + portMemInitializeAllocatorTracking(pAllocator, &pAllocator->pImpl->tracking + PORT_MEM_CALLERINFO_COMMA_PARAM); + + PORT_MEM_PRINT_INFO("Acquired nonpaged allocator %p ", pAllocator); + PORT_MEM_PRINT_INFO(PORT_MEM_CALLERINFO_PRINT_ARGS(PORT_MEM_CALLERINFO_PARAM)); + return pAllocator; +} + + +PORT_MEM_ALLOCATOR * +portMemAllocatorCreateOnExistingBlock +( + void *pPreallocatedBlock, + NvLength blockSizeBytes + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ + return _portMemAllocatorCreateOnExistingBlock(pPreallocatedBlock, blockSizeBytes, + NULL PORT_MEM_CALLERINFO_COMMA_PARAM); +} + +PORT_MEM_ALLOCATOR * +portMemExAllocatorCreateLockedOnExistingBlock +( + void *pPreallocatedBlock, + NvLength blockSizeBytes, + void *pSpinlock + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ + return _portMemAllocatorCreateOnExistingBlock(pPreallocatedBlock, blockSizeBytes, + pSpinlock PORT_MEM_CALLERINFO_COMMA_PARAM); +} + +void +portMemAllocatorRelease +( + PORT_MEM_ALLOCATOR *pAllocator +) +{ + if (pAllocator == NULL) + { + PORT_BREAKPOINT_CHECKED(); + return; + } + _portMemTrackingRelease(pAllocator->pTracking, NV_TRUE); + PORT_MEM_PRINT_INFO("Released allocator %p\n", pAllocator); + + if (pAllocator->_portRelease != NULL) + pAllocator->_portRelease(pAllocator); +} + + +PORT_MEM_ALLOCATOR * +portMemAllocatorGetGlobalNonPaged(void) +{ + return &portMemGlobals.alloc.nonPaged; +} +PORT_MEM_ALLOCATOR * +portMemAllocatorGetGlobalPaged(void) +{ + return &portMemGlobals.alloc.paged; +} + +void +portMemInitializeAllocatorTracking +( + PORT_MEM_ALLOCATOR *pAlloc, + PORT_MEM_ALLOCATOR_TRACKING *pTracking + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ + if (portMemGlobals.initCount == 0) + { + portMemSet(pTracking, 0, sizeof(*pTracking)); + if (pAlloc != NULL) + pAlloc->pTracking = NULL; + return; + } + + pTracking->pAllocator = pAlloc; + if (pAlloc != NULL) + pAlloc->pTracking = pTracking; + PORT_LOCKED_LIST_LINK(&portMemGlobals.mainTracking, pTracking, portMemGlobals.trackingLock); + PORT_MEM_COUNTER_INIT(&pTracking->counter); + PORT_MEM_LIST_INIT(pTracking); + PORT_MEM_CALLERINFO_INIT_TRACKING(pTracking); + PORT_MEM_ATOMIC_INC_U32(&portMemGlobals.totalAllocators); +} + +#if PORT_MEM_TRACK_USE_LIMIT +void +portMemInitializeAllocatorTrackingLimit(NvU32 gfid, NvLength limit, NvBool bLimitEnabled) +{ + if (!isGfidValid(gfid)) + return; + + NvU32 gfidIdx = gfid - 1; + + portMemGlobals.pGfidTracking[gfidIdx]->limitGfid = limit; + portMemGlobals.bLimitEnabled = bLimitEnabled; +} + +void +portMemInitializeAllocatorTrackingLibosLimit(NvU32 gfid, NvLength limit) +{ + if (!isGfidValid(gfid)) + return; + + NvU32 gfidIdx = gfid - 1; + + if (portMemGlobals.pGfidTracking[gfidIdx] != NULL) + portMemGlobals.pGfidTracking[gfidIdx]->limitLibosGfid = limit; +} + +void portMemGfidTrackingInit(NvU32 gfid) +{ + if (!isGfidValid(gfid)) + { + PORT_BREAKPOINT_CHECKED(); + return; + } + + NvU32 gfidIdx = gfid - 1; + + if (portMemGlobals.pGfidTracking[gfidIdx] != NULL) + return; + + PORT_MEM_ALLOCATOR_TRACKING *pTracking = + _portMemAllocNonPagedUntracked(sizeof(PORT_MEM_ALLOCATOR_TRACKING)); + + if (pTracking == NULL) + { + portDbgPrintf("!!! Failed memory allocation for pTracking !!!\n"); + PORT_BREAKPOINT_CHECKED(); + return; + } + + portMemSet(pTracking, 0, sizeof(*pTracking)); + pTracking->limitGfid = NV_U64_MAX; + pTracking->counterGfid = 0; + pTracking->gfid = gfid; + pTracking->limitLibosGfid = NV_U64_MAX; + pTracking->counterLibosGfid = 0; + PORT_LOCKED_LIST_LINK(&portMemGlobals.mainTracking, pTracking, portMemGlobals.trackingLock); + PORT_MEM_COUNTER_INIT(&pTracking->counter); + portMemGlobals.pGfidTracking[gfidIdx] = pTracking; +} + +void portMemGfidTrackingFree(NvU32 gfid) +{ + if (!isGfidValid(gfid)) + { + PORT_BREAKPOINT_CHECKED(); + return; + } + + NvU32 gfidIdx = gfid - 1; + PORT_MEM_ALLOCATOR_TRACKING *pTracking = portMemGlobals.pGfidTracking[gfidIdx]; + + if (pTracking == NULL) + { + PORT_BREAKPOINT_CHECKED(); + return; + } + + if (pTracking->counter.activeAllocs != 0) + { + portDbgPrintf(" !!! MEMORY LEAK DETECTED (%u blocks) !!!\n", + pTracking->counter.activeAllocs); + + } + + portMemPrintTrackingInfo(pTracking); +} + +#endif + +void * +_portMemAllocatorAlloc +( + PORT_MEM_ALLOCATOR *pAlloc, + NvLength length + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ + NvU32 gfid = 0; + void *pMem = NULL; +#if NVOS_IS_LIBOS && defined(DEBUG) + __error_injection_probe(NV2082_CTRL_GSP_INJECT_TARGET_NVPORT_MEM_ALLOC, 0); + if (g_bMemoryAllocationFailure) + { + portDbgPrintf(" !!! MEMORY ALLOCATION FAILURE !!!\n"); + g_bMemoryAllocationFailure = NV_FALSE; + return NULL; + } +#endif + if (pAlloc == NULL) + { + PORT_BREAKPOINT_CHECKED(); + return NULL; + } + +#if PORT_MEM_TRACK_USE_LIMIT + if (portMemGlobals.bLimitEnabled) + { + if (osGetCurrentProcessGfid(&gfid) != NV_OK) + { + PORT_BREAKPOINT_CHECKED(); + return NULL; + } + } +#endif + + // Check if per-process memory limit will be exhausted by this allocation + if (PORT_MEM_LIMIT_EXCEEDED(gfid, length)) + return NULL; + + if (length > 0) + { + NvLength paddedLength; +// RISCV64 requires 64-bit alignment of structures, and length indicates the alignment of the footer +#if defined(__riscv) + if (PORT_MEM_STAGING_SIZE > 0 && (length & 7)) + { + if (!portSafeAddLength(length & ~7, 8, &length)) + { + return NULL; + } + } +#endif + if (!portSafeAddLength(length, PORT_MEM_STAGING_SIZE, &paddedLength)) + { + return NULL; + } + pMem = pAlloc->_portAlloc(pAlloc, paddedLength); + } + if (pMem != NULL) + { + pMem = PORT_MEM_ADD_HEADER_PTR(pMem); + _portMemTrackAlloc(_portMemGetTracking(pAlloc), pMem, length, gfid + PORT_MEM_CALLERINFO_COMMA_PARAM); + } + return pMem; +} +void +_portMemAllocatorFree +( + PORT_MEM_ALLOCATOR *pAlloc, + void *pMem +) +{ + if (pAlloc == NULL) + { + PORT_BREAKPOINT_CHECKED(); + return; + } + if (pMem != NULL) + { + if (_portMemTrackFree(_portMemGetTracking(pAlloc), pMem)) + { + pMem = PORT_MEM_SUB_HEADER_PTR(pMem); + pAlloc->_portFree(pAlloc, pMem); + } + } +} + +void +portMemPrintTrackingInfo +( + const PORT_MEM_ALLOCATOR_TRACKING *pTracking +) +{ + if (pTracking == NULL) + pTracking = &portMemGlobals.mainTracking; + + if (pTracking == &portMemGlobals.mainTracking) + portDbgPrintf("[NvPort] ======== Aggregate Memory Tracking ========\n"); + else if ((pTracking == portMemGlobals.alloc.nonPaged.pTracking) && + (pTracking == portMemGlobals.alloc.paged.pTracking)) + portDbgPrintf("[NvPort] ======== Global Allocator Tracking ========\n"); + else if (pTracking == portMemGlobals.alloc.nonPaged.pTracking) + portDbgPrintf("[NvPort] ======== Global Non-Paged Memory Allocator Tracking ========\n"); + else if (pTracking == portMemGlobals.alloc.paged.pTracking) + portDbgPrintf("[NvPort] ======== Global Paged Memory Allocator Tracking ========\n"); +#if PORT_MEM_TRACK_USE_LIMIT + else if (isGfidValid(pTracking->gfid)) + portDbgPrintf("[NvPort] ======== GFID %u Tracking ========\n", pTracking->gfid); +#endif + else + portDbgPrintf("[NvPort] ======== Memory Allocator %p Tracking ======== \n", pTracking->pAllocator); + + if (pTracking->counter.activeAllocs != 0) + portDbgPrintf(" !!! MEMORY LEAK DETECTED (%u blocks) !!!\n", + pTracking->counter.activeAllocs); + +#if PORT_MEM_TRACK_USE_CALLERINFO + { + portDbgPrintf(" Allocator acquired " + PORT_MEM_CALLERINFO_PRINT_ARGS(pTracking->callerInfo)); + } +#endif + +#if PORT_IS_FUNC_SUPPORTED(portMemExTrackingGetHeapSize) + // + // Heap is shared across all allocators, so only print it with the + // aggregate stats. + // + if (pTracking == _portMemGetTracking(NULL)) + portDbgPrintf(" HEAP: %"NvUPtr_fmtu" bytes\n", portMemExTrackingGetHeapSize()); +#endif + +#if PORT_IS_FUNC_SUPPORTED(portMemExTrackingGetActiveStats) + { + PORT_MEM_TRACK_ALLOCATOR_STATS stats; + + portMemSet(&stats, 0, sizeof(stats)); +#if PORT_MEM_TRACK_USE_LIMIT + if (isGfidValid(pTracking->gfid)) + { + portMemExTrackingGetGfidActiveStats(pTracking->gfid, &stats); + } + else +#endif + { + portMemExTrackingGetActiveStats(pTracking->pAllocator, &stats); + } + + // + // rmtest_gsp test script (dvs_gsp_sanity.sh) depends on this print, so do not change + // format without updating script! + // + portDbgPrintf(" ACTIVE: %u allocations, %"NvUPtr_fmtu" bytes allocated (%"NvUPtr_fmtu" useful, %"NvUPtr_fmtu" meta)\n", + stats.numAllocations, + stats.allocatedSize, + stats.usefulSize, + stats.metaSize); + } +#endif + +#if PORT_IS_FUNC_SUPPORTED(portMemExTrackingGetTotalStats) + { + PORT_MEM_TRACK_ALLOCATOR_STATS stats; + + portMemSet(&stats, 0, sizeof(stats)); +#if PORT_MEM_TRACK_USE_LIMIT + if (isGfidValid(pTracking->gfid)) + { + portMemExTrackingGetGfidTotalStats(pTracking->gfid, &stats); + } + else +#endif + { + portMemExTrackingGetTotalStats(pTracking->pAllocator, &stats); + } + portDbgPrintf(" TOTAL: %u allocations, %"NvUPtr_fmtu" bytes allocated (%"NvUPtr_fmtu" useful, %"NvUPtr_fmtu" meta)\n", + stats.numAllocations, + stats.allocatedSize, + stats.usefulSize, + stats.metaSize); + } +#endif + +#if PORT_IS_FUNC_SUPPORTED(portMemExTrackingGetPeakStats) + { + PORT_MEM_TRACK_ALLOCATOR_STATS stats; + + portMemSet(&stats, 0, sizeof(stats)); +#if PORT_MEM_TRACK_USE_LIMIT + if (isGfidValid(pTracking->gfid)) + { + portMemExTrackingGetGfidPeakStats(pTracking->gfid, &stats); + } + else +#endif + { + portMemExTrackingGetPeakStats(pTracking->pAllocator, &stats); + } + portDbgPrintf(" PEAK: %u allocations, %"NvUPtr_fmtu" bytes allocated (%"NvUPtr_fmtu" useful, %"NvUPtr_fmtu" meta)\n", + stats.numAllocations, + stats.allocatedSize, + stats.usefulSize, + stats.metaSize); + } +#endif + +#if PORT_IS_FUNC_SUPPORTED(portMemExTrackingGetNext) + { + PORT_MEM_TRACK_ALLOC_INFO info; + NvBool bPrinted = NV_FALSE; + void *iterator = NULL; + + do + { + if (portMemExTrackingGetNext(pTracking->pAllocator, &info, &iterator) != NV_OK) + { + portDbgPrintf(" (no active allocations)\n"); + break; + } + else if (!bPrinted) + { + portDbgPrintf(" Currently active allocations:\n"); + bPrinted = NV_TRUE; + } + portDbgPrintf(" - A:%p - 0x%p [%8"NvUPtr_fmtu" bytes] T=%llu ", + info.pAllocator, + info.pMemory, + info.size, + info.timestamp); + portDbgPrintf(PORT_MEM_CALLERINFO_PRINT_ARGS(info.callerInfo)); + } while (iterator != NULL); + } +#endif +} + +void +portMemPrintAllTrackingInfo(void) +{ + const PORT_MEM_ALLOCATOR_TRACKING *pTracking = &portMemGlobals.mainTracking; + PORT_MEM_LOCK_ACQUIRE(portMemGlobals.trackingLock); + do + { + portMemPrintTrackingInfo(pTracking); + } while ((pTracking = pTracking->pNext) != &portMemGlobals.mainTracking); + PORT_MEM_LOCK_RELEASE(portMemGlobals.trackingLock); +} + +#if portMemExTrackingGetActiveStats_SUPPORTED +NV_STATUS +portMemExTrackingGetActiveStats +( + const PORT_MEM_ALLOCATOR *pAllocator, + PORT_MEM_TRACK_ALLOCATOR_STATS *pStats +) +{ + PORT_MEM_ALLOCATOR_TRACKING *pTracking = _portMemGetTracking(pAllocator); + if (pTracking == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + pStats->numAllocations = pTracking->counter.activeAllocs; + pStats->usefulSize = pTracking->counter.activeSize; + pStats->metaSize = pStats->numAllocations * PORT_MEM_STAGING_SIZE; + pStats->allocatedSize = pStats->usefulSize + pStats->metaSize; + return NV_OK; +} +#if PORT_MEM_TRACK_USE_LIMIT +NV_STATUS +portMemExTrackingGetGfidActiveStats +( + NvU32 gfid, + PORT_MEM_TRACK_ALLOCATOR_STATS *pStats +) +{ + if (!isGfidValid(gfid)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + NvU32 gfidIdx = gfid - 1; + PORT_MEM_ALLOCATOR_TRACKING *pTracking = portMemGlobals.pGfidTracking[gfidIdx]; + + if (pTracking == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + pStats->numAllocations = pTracking->counter.activeAllocs; + pStats->usefulSize = pTracking->counter.activeSize; + pStats->metaSize = pStats->numAllocations * PORT_MEM_STAGING_SIZE; + pStats->allocatedSize = pStats->usefulSize + pStats->metaSize; + return NV_OK; +} +#endif +#endif + +#if portMemExTrackingGetTotalStats_SUPPORTED +NV_STATUS +portMemExTrackingGetTotalStats +( + const PORT_MEM_ALLOCATOR *pAllocator, + PORT_MEM_TRACK_ALLOCATOR_STATS *pStats +) +{ + PORT_MEM_ALLOCATOR_TRACKING *pTracking = _portMemGetTracking(pAllocator); + if (pTracking == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + pStats->numAllocations = pTracking->counter.totalAllocs; + pStats->usefulSize = pTracking->counter.totalSize; + pStats->metaSize = pStats->numAllocations * PORT_MEM_STAGING_SIZE; + pStats->allocatedSize = pStats->usefulSize + pStats->metaSize; + return NV_OK; +} +#if PORT_MEM_TRACK_USE_LIMIT +NV_STATUS +portMemExTrackingGetGfidTotalStats +( + NvU32 gfid, + PORT_MEM_TRACK_ALLOCATOR_STATS *pStats +) +{ + if (!isGfidValid(gfid)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + NvU32 gfidIdx = gfid - 1; + PORT_MEM_ALLOCATOR_TRACKING *pTracking = portMemGlobals.pGfidTracking[gfidIdx]; + + if (pTracking == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + pStats->numAllocations = pTracking->counter.totalAllocs; + pStats->usefulSize = pTracking->counter.totalSize; + pStats->metaSize = pStats->numAllocations * PORT_MEM_STAGING_SIZE; + pStats->allocatedSize = pStats->usefulSize + pStats->metaSize; + return NV_OK; +} +#endif +#endif + +#if portMemExTrackingGetPeakStats_SUPPORTED +NV_STATUS +portMemExTrackingGetPeakStats +( + const PORT_MEM_ALLOCATOR *pAllocator, + PORT_MEM_TRACK_ALLOCATOR_STATS *pStats +) +{ + PORT_MEM_ALLOCATOR_TRACKING *pTracking = _portMemGetTracking(pAllocator); + if (pTracking == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + pStats->numAllocations = pTracking->counter.peakAllocs; + pStats->usefulSize = pTracking->counter.peakSize; + pStats->metaSize = pStats->numAllocations * PORT_MEM_STAGING_SIZE; + pStats->allocatedSize = pStats->usefulSize + pStats->metaSize; + return NV_OK; +} +#if PORT_MEM_TRACK_USE_LIMIT +NV_STATUS +portMemExTrackingGetGfidPeakStats +( + NvU32 gfid, + PORT_MEM_TRACK_ALLOCATOR_STATS *pStats +) +{ + if (!isGfidValid(gfid)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + NvU32 gfidIdx = gfid - 1; + PORT_MEM_ALLOCATOR_TRACKING *pTracking = portMemGlobals.pGfidTracking[gfidIdx]; + + if (pTracking == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + pStats->numAllocations = pTracking->counter.peakAllocs; + pStats->usefulSize = pTracking->counter.peakSize; + pStats->metaSize = pStats->numAllocations * PORT_MEM_STAGING_SIZE; + pStats->allocatedSize = pStats->usefulSize + pStats->metaSize; + return NV_OK; +} +#endif +#endif + +#if portMemExTrackingGetNext_SUPPORTED +NV_STATUS +portMemExTrackingGetNext +( + const PORT_MEM_ALLOCATOR *pAllocator, + PORT_MEM_TRACK_ALLOC_INFO *pInfo, + void **pIterator +) +{ + PORT_MEM_ALLOCATOR_TRACKING *pTracking = _portMemGetTracking(pAllocator); + PORT_MEM_LIST *pList; + PORT_MEM_HEADER *pHead; + + if (pTracking == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + if (pTracking->pFirstAlloc == NULL) + return NV_ERR_OBJECT_NOT_FOUND; + + if (*pIterator == NULL) + pList = pTracking->pFirstAlloc; + else + pList = (PORT_MEM_LIST*)(*pIterator); + + pHead = _portMemListGetHeader(pList); + + // Advance iterator + if (pList->pNext == pTracking->pFirstAlloc) + *pIterator = NULL; + else + *pIterator = pList->pNext; + + // Populate pInfo + pInfo->pMemory = pHead + 1; + pInfo->size = _portMemExTrackingGetAllocUsableSizeWrapper(pInfo->pMemory); + pInfo->pAllocator = pHead->fence.pAllocator; + pInfo->timestamp = 0; + +#if PORT_MEM_TRACK_USE_CALLERINFO + pInfo->callerInfo = pHead->callerInfo; +#endif + + return NV_OK; +} +#endif + +static void +_portMemTrackingRelease +( + PORT_MEM_ALLOCATOR_TRACKING *pTracking, + NvBool bReportLeaks +) +{ + if (pTracking == NULL) return; + +#if (PORT_MEM_TRACK_PRINT_LEVEL > PORT_MEM_TRACK_PRINT_LEVEL_SILENT) + if (bReportLeaks && (pTracking->counter.activeAllocs != 0)) + portMemPrintTrackingInfo(pTracking); +#else + PORT_UNREFERENCED_VARIABLE(bReportLeaks); +#endif + + PORT_LOCKED_LIST_UNLINK(&portMemGlobals.mainTracking, pTracking, portMemGlobals.trackingLock); + PORT_MEM_LIST_DESTROY(pTracking); +#if PORT_MEM_TRACK_USE_LIMIT + if (isGfidValid(pTracking->gfid)) + { + _portMemFreeUntracked(pTracking); + } + else +#endif + { + PORT_MEM_ATOMIC_DEC_U32(&portMemGlobals.totalAllocators); + } +} + +static void +_portMemTrackAlloc +( + PORT_MEM_ALLOCATOR_TRACKING *pTracking, + void *pMem, + NvLength size, + NvU32 gfid + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ + PORT_UNREFERENCED_VARIABLE(pMem); + if (pTracking == NULL) return; + +#if PORT_MEM_TRACK_ALLOC_SIZE + // + // Either set the block size in the header, or override it with the value + // from the underlying allocator (which may be bigger than what was + // requested). This keeps the counters consistent with the free path. + // + _portMemExTrackingSetOrGetAllocUsableSize(pMem, &size); +#endif + + PORT_MEM_PRINT_INFO("Allocated %"NvUPtr_fmtu" bytes at address %p", size, pMem); + PORT_MEM_PRINT_INFO(PORT_MEM_CALLERINFO_PRINT_ARGS(PORT_MEM_CALLERINFO_PARAM)); + + PORT_MEM_COUNTER_INC(&pTracking->counter, size); + PORT_MEM_COUNTER_INC(&portMemGlobals.mainTracking.counter, size); + PORT_MEM_LIMIT_INC(gfid, pMem, size); + + PORT_MEM_FENCE_INIT(pTracking->pAllocator, pMem, size); + PORT_MEM_LIST_ADD(pTracking, pMem); + PORT_MEM_CALLERINFO_INIT_MEM(pMem); + PORT_MEM_LOG_ALLOC(pTracking->pAllocator, pMem, size); +#if PORT_MEM_TRACK_USE_LIMIT + if (isGfidValid(gfid)) + { + NvU32 gfidIdx = gfid - 1; + + PORT_MEM_COUNTER_INC(&portMemGlobals.pGfidTracking[gfidIdx]->counter, size); + } +#endif +} + +static NvBool +_portMemTrackFree +( + PORT_MEM_ALLOCATOR_TRACKING *pTracking, + void *pMem +) +{ + NvLength size = 0; +#if PORT_MEM_TRACK_USE_FENCEPOSTS + PORT_MEM_HEADER *pHead = (PORT_MEM_HEADER*)pMem - 1; +#endif + + if (pTracking == NULL) return NV_TRUE; + +#if PORT_MEM_TRACK_USE_FENCEPOSTS + if (pHead->fence.magic == PORT_MEM_FENCE_FREE_MAGIC) + { + PORT_MEM_PRINT_ERROR("Detected double free of block at address %p\n", pMem); + PORT_ASSERT_CHECKED(pHead->fence.magic == PORT_MEM_FENCE_FREE_MAGIC); + return NV_FALSE; + } +#endif + +#if PORT_MEM_TRACK_ALLOC_SIZE + size = _portMemExTrackingGetAllocUsableSizeWrapper(pMem); + PORT_MEM_PRINT_INFO("Freeing %"NvUPtr_fmtu"-byte block at address %p\n", size, pMem); +#else + PORT_MEM_PRINT_INFO("Freeing block at address %p\n", pMem); +#endif + + PORT_MEM_COUNTER_DEC(&pTracking->counter, size); + PORT_MEM_COUNTER_DEC(&portMemGlobals.mainTracking.counter, size); + PORT_MEM_LIMIT_DEC(pMem, size); + + PORT_MEM_FENCE_CHECK(pTracking->pAllocator, pMem, size); + PORT_MEM_LIST_REMOVE(pTracking, pMem); + PORT_MEM_LOG_FREE(pTracking->pAllocator, pMem); +#if PORT_MEM_TRACK_USE_LIMIT + PORT_MEM_HEADER *pMemHeader = PORT_MEM_SUB_HEADER_PTR(pMem); + + if (isGfidValid(pMemHeader->gfid)) + { + NvU32 gfidIdx = pMemHeader->gfid - 1; + + PORT_MEM_COUNTER_DEC(&portMemGlobals.pGfidTracking[gfidIdx]->counter, size); + } +#endif + +#if PORT_MEM_TRACK_USE_FENCEPOSTS + pHead->fence.magic = PORT_MEM_FENCE_FREE_MAGIC; +#endif + + return NV_TRUE; +} + + +static void * +_portMemAllocatorAllocPagedWrapper +( + PORT_MEM_ALLOCATOR *pAlloc, + NvLength length +) +{ + PORT_UNREFERENCED_VARIABLE(pAlloc); + return _portMemAllocPagedUntracked(length); +} + +static void * +_portMemAllocatorAllocNonPagedWrapper +( + PORT_MEM_ALLOCATOR *pAlloc, + NvLength length +) +{ + PORT_UNREFERENCED_VARIABLE(pAlloc); + return _portMemAllocNonPagedUntracked(length); +} + +static void +_portMemAllocatorFreeWrapper +( + PORT_MEM_ALLOCATOR *pAlloc, + void *pMem +) +{ + PORT_UNREFERENCED_VARIABLE(pAlloc); + _portMemFreeUntracked(pMem); +} + +static void +_portMemAllocatorReleaseWrapper +( + PORT_MEM_ALLOCATOR *pAllocator +) +{ + portMemFree(pAllocator); +} + +/// @todo Add these as intrinsics to UTIL module +static NV_INLINE NvBool _isBitSet(NvU32 *vect, NvU32 bit) +{ + return !!(vect[bit/32] & NVBIT32(bit%32)); +} +static NV_INLINE void _setBit(NvU32 *vect, NvU32 bit) +{ + vect[bit/32] |= NVBIT32(bit%32); +} +static NV_INLINE void _clearBit(NvU32 *vect, NvU32 bit) +{ + vect[bit/32] &= ~NVBIT32(bit%32); +} + +static PORT_MEM_ALLOCATOR * +_portMemAllocatorCreateOnExistingBlock +( + void *pPreallocatedBlock, + NvLength blockSizeBytes, + void *pSpinlock + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ + PORT_MEM_ALLOCATOR *pAllocator = (PORT_MEM_ALLOCATOR *)pPreallocatedBlock; + PORT_MEM_BITVECTOR *pBitVector; + PORT_MEM_BITVECTOR_CHUNK *pLastChunkInBlock; + NvU32 bitVectorSize; + + if ((pPreallocatedBlock == NULL) || + (blockSizeBytes < PORT_MEM_PREALLOCATED_BLOCK_MINIMAL_EXTRA_SIZE) || + (blockSizeBytes > NV_S32_MAX)) + { + return NULL; + } + + pAllocator->_portAlloc = _portMemAllocatorAllocExistingWrapper; + pAllocator->_portFree = _portMemAllocatorFreeExistingWrapper; + pAllocator->_portRelease = NULL; + pAllocator->pTracking = NULL; // No tracking for this allocator + pAllocator->pImpl = (PORT_MEM_ALLOCATOR_IMPL*)(pAllocator + 1); + + + // + // PORT_MEM_BITVECTOR (pAllocator->pImpl) and PORT_MEM_ALLOCATOR_TRACKING (pAllocator->pImpl->tracking) + // are mutually exclusively used. + // When pAllocator->pTracking = NULL the data in pAllocator->pImpl->tracking is not used and instead + // pBitVector uses the same meory location. + // When pAllocator->pImpl->tracking there is no usage of PORT_MEM_BITVECTOR + // + pBitVector = (PORT_MEM_BITVECTOR*)(pAllocator->pImpl); + pBitVector->pSpinlock = pSpinlock; + + // Calculate total number of chunks available + pBitVector->pChunks = (PORT_MEM_BITVECTOR_CHUNK *)(pBitVector + 1); + pBitVector->pChunks = (void*)NV_ALIGN_UP((NvUPtr)pBitVector->pChunks, + (NvUPtr)PORT_MEM_BITVECTOR_CHUNK_SIZE); + + pLastChunkInBlock = (void*)NV_ALIGN_DOWN((NvUPtr)pPreallocatedBlock + + blockSizeBytes - + PORT_MEM_BITVECTOR_CHUNK_SIZE, + (NvUPtr)PORT_MEM_BITVECTOR_CHUNK_SIZE); + if (pLastChunkInBlock < pBitVector->pChunks) + { + pBitVector->numChunks = 0; + } + else + { + pBitVector->numChunks = (NvU32)(pLastChunkInBlock - pBitVector->pChunks) + 1; + } + bitVectorSize = (NvU32)((NvU8*)pBitVector->pChunks - (NvU8*)pBitVector->bits); + + while (bitVectorSize*8 < pBitVector->numChunks*2) + { + // If too many chunks to track in current bit vector, increase bitvector by one chunk + pBitVector->pChunks++; + pBitVector->numChunks--; + bitVectorSize = (NvU32)((NvU8*)pBitVector->pChunks - (NvU8*)pBitVector->bits); + } + portMemSet(pBitVector->bits, 0, bitVectorSize); + + PORT_MEM_PRINT_INFO("Acquired preallocated block allocator %p (%"NvUPtr_fmtu" bytes) ", pAllocator, blockSizeBytes); + PORT_MEM_PRINT_INFO(PORT_MEM_CALLERINFO_PRINT_ARGS(PORT_MEM_CALLERINFO_PARAM)); + return pAllocator; +} + +static void * +_portMemAllocatorAllocExistingWrapper +( + PORT_MEM_ALLOCATOR *pAlloc, + NvLength length +) +{ + NvU32 chunksNeeded = (NvU32)NV_DIV_AND_CEIL(length, PORT_MEM_BITVECTOR_CHUNK_SIZE); + void *pMem = NULL; + NvU32 chunksFound = 0; + NvU32 i; + PORT_MEM_BITVECTOR *pBitVector = (PORT_MEM_BITVECTOR*)(pAlloc->pImpl); + PORT_SPINLOCK *pSpinlock = (PORT_SPINLOCK*)(pBitVector->pSpinlock); + + if (chunksNeeded > pBitVector->numChunks) + { + return NULL; + } + if (pSpinlock != NULL) + { + portSyncSpinlockAcquire(pSpinlock); + } + for (i = 0; i < pBitVector->numChunks; i++) + { + NvBool bWholeWordSet; + bWholeWordSet = pBitVector->bits[i/32] == ~0U; + if (bWholeWordSet || (_isBitSet(pBitVector->bits, i))) + { + // Chunk not available as whole. + chunksFound = 0; + // Skip fully set words + if (bWholeWordSet) + { + i += 31; + } + if (chunksNeeded > (pBitVector->numChunks - i - (bWholeWordSet ? 1 : 0))) + { + break; + } + } + else + { + chunksFound++; + if (chunksFound == chunksNeeded) + { + NvU32 j; + NvU32 firstAllocatedChunk = i - chunksFound + 1; + + pMem = pBitVector->pChunks[firstAllocatedChunk]; + // Mark all acquired chunks as occupied + for (j = firstAllocatedChunk; j <= i; j++) + { + _setBit(pBitVector->bits, j); + } + // Mark last chunk of allocation + _setBit(pBitVector->bits, pBitVector->numChunks + i); + break; + } + } + } + if (pSpinlock != NULL) + { + portSyncSpinlockRelease(pSpinlock); + } + if (pMem == NULL) + { + PORT_MEM_PRINT_ERROR("Memory allocation failed.\n"); + } + return pMem; +} + +static void +_portMemAllocatorFreeExistingWrapper +( + PORT_MEM_ALLOCATOR *pAlloc, + void *pMem +) +{ + PORT_MEM_BITVECTOR_CHUNK *pChunk = (PORT_MEM_BITVECTOR_CHUNK *)pMem; + NvU32 i; + PORT_MEM_BITVECTOR *pBitVector = (PORT_MEM_BITVECTOR*)(pAlloc->pImpl); + PORT_SPINLOCK *pSpinlock = (PORT_SPINLOCK*)(pBitVector->pSpinlock); + + if (((NvUPtr)pMem < (NvUPtr)pBitVector->pChunks) || + ((NvUPtr)pMem > (NvUPtr)(pBitVector->pChunks + pBitVector->numChunks))) + { + // pMem not inside this allocator. + PORT_BREAKPOINT_CHECKED(); + return; + } + + if (pSpinlock != NULL) + { + portSyncSpinlockAcquire(pSpinlock); + } + for (i = (NvU32)(pChunk - pBitVector->pChunks); i < pBitVector->numChunks; i++) + { + // Mark chunk as free + _clearBit(pBitVector->bits, i); + if (_isBitSet(pBitVector->bits, pBitVector->numChunks + i)) + { + // Clear last-allocation-bit and bail + _clearBit(pBitVector->bits, pBitVector->numChunks + i); + break; + } + } + if (pSpinlock != NULL) + { + portSyncSpinlockRelease(pSpinlock); + } +} diff --git a/src/nvidia/src/libraries/nvport/memory/memory_unix_kernel_os.c b/src/nvidia/src/libraries/nvport/memory/memory_unix_kernel_os.c new file mode 100644 index 0000000..b7445e6 --- /dev/null +++ b/src/nvidia/src/libraries/nvport/memory/memory_unix_kernel_os.c @@ -0,0 +1,206 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/** + * @file + * @brief MEMORY module implementation for Unix kernelmode + * + * This implementation uses the NVIDIA OS interface into the unix kernels. + */ + +#if !PORT_IS_KERNEL_BUILD +#error "This file can only be compiled as part of the kernel build." +#endif +#if !NVOS_IS_UNIX +#error "This file can only be compiled on Unix." +#endif + +#include "nvport/nvport.h" +#include "os-interface.h" + +/** + * @note All kernel memory in unix is non-paged. + */ +void * +_portMemAllocPagedUntracked +( + NvLength lengthBytes +) +{ + return _portMemAllocNonPagedUntracked(lengthBytes); +} + +void * +_portMemAllocNonPagedUntracked +( + NvLength lengthBytes +) +{ + void *pMem = NULL; + PORT_ASSERT_CHECKED(lengthBytes > 0); + if (lengthBytes > 0) + os_alloc_mem(&pMem, lengthBytes); + return pMem; +} + + + +void +_portMemFreeUntracked +( + void *pData +) +{ + if (pData != NULL) + { + os_free_mem(pData); + } +} + +void * +portMemCopy +( + void *pDestination, + NvLength destSize, + const void *pSource, + NvLength srcSize +) +{ + // API guarantees this is a NOP when destSize==0 + if (destSize == 0) + { + return pDestination; + } + + PORT_ASSERT_CHECKED(pDestination != NULL); + PORT_ASSERT_CHECKED(pSource != NULL); + PORT_ASSERT_CHECKED(srcSize <= destSize); + PORT_ASSERT_CHECKED(!portUtilCheckOverlap(pDestination, destSize, + pSource, srcSize)); + + if ((pSource == NULL) || (pDestination == NULL) || (srcSize > destSize)) + { + return NULL; + } + return os_mem_copy(pDestination, pSource, srcSize); +} + + +void * +portMemSet +( + void *pData, + NvU8 value, + NvLength lengthBytes +) +{ + if (lengthBytes == 0) + { + return pData; + } + if (pData == NULL) + { + return pData; + } + return os_mem_set(pData, value, lengthBytes); +} + +NvS32 +portMemCmp +( + const void *pData0, + const void *pData1, + NvLength lengthBytes +) +{ + if (lengthBytes == 0) + { + return 0; + } + if ((pData0 == NULL) || (pData1 == NULL)) + { + return -1; + } + return os_mem_cmp(pData0, pData1, lengthBytes); +} + + + +#define PORT_MEM_USE_GENERIC_portMemSetPattern +#define PORT_MEM_USE_GENERIC_portMemMove +#include "memory_generic.h" + +NV_STATUS +portMemExCopyFromUser +( + const NvP64 pUser, + void *pKernel, + NvLength lengthBytes +) +{ + if (pKernel == NULL) + { + return NV_ERR_INVALID_POINTER; + } + if (lengthBytes == 0) + { + return NV_ERR_INVALID_ARGUMENT; + } + return os_memcpy_from_user(pKernel, NvP64_VALUE(pUser), lengthBytes); +} + +NV_STATUS +portMemExCopyToUser +( + const void *pKernel, + NvP64 pUser, + NvLength lengthBytes +) +{ + if (pKernel == NULL) + { + return NV_ERR_INVALID_POINTER; + } + if (lengthBytes == 0) + { + return NV_ERR_INVALID_ARGUMENT; + } + return os_memcpy_to_user(NvP64_VALUE(pUser), (void*)pKernel, lengthBytes); +} + +NvLength +portMemExGetPageSize(void) +{ + return os_page_size; +} + +// Large allocations (>KMALLOC_LIMIT) will fail, but it is safe to call +NvBool +portMemExSafeForPagedAlloc(void) +{ + return NV_TRUE; +} +NvBool +portMemExSafeForNonPagedAlloc(void) +{ + return NV_TRUE; +} diff --git a/src/nvidia/src/libraries/nvport/string/string_generic.c b/src/nvidia/src/libraries/nvport/string/string_generic.c new file mode 100644 index 0000000..e94a977 --- /dev/null +++ b/src/nvidia/src/libraries/nvport/string/string_generic.c @@ -0,0 +1,379 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief STRING module implementation for platforms without stdlib support + */ + +#include "nvport/nvport.h" +#include "nvmisc.h" + + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringLength +NvLength +portStringLength +( + const char *str +) +{ + const char *begin = str; + + PORT_ASSERT_CHECKED(str != NULL); + + while ('\0' != *str) str++; + + return str - begin; +} + +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringLengthSafe +NvLength +portStringLengthSafe +( + const char *str, + NvLength maxLength +) +{ + const char *begin = str; + + PORT_ASSERT_CHECKED(str != NULL); + + while ((0 != maxLength--) && ('\0' != *str)) + str++; + + return str - begin; +} +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringCompare +NvS32 +portStringCompare +( + const char *str1, + const char *str2, + NvLength maxLength +) +{ + NvLength i; + + PORT_ASSERT_CHECKED(str1 != NULL); + PORT_ASSERT_CHECKED(str2 != NULL); + + for (i = 0; i < maxLength; i++) + { + if (str1[i] != str2[i]) + { + // + // Cast to unsigned before assigning to NvS32, to avoid sign + // extension. E.g., if str1[i] is 0xff, we want s1 to contain + // 0xff, not -1. In practice, this shouldn't matter for printable + // characters, but still... + // + NvS32 s1 = (unsigned char)str1[i]; + NvS32 s2 = (unsigned char)str2[i]; + return s1 - s2; + } + + if ((str1[i] == '\0') && + (str2[i] == '\0')) + { + break; + } + } + + return 0; +} +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringCopy +NvLength +portStringCopy +( + char *dest, + NvLength destSize, + const char *src, + NvLength srcSize +) +{ + NvLength minCopyLength; + NvLength srcLen; + + PORT_ASSERT_CHECKED(dest != NULL); + PORT_ASSERT_CHECKED(src != NULL); + + PORT_ASSERT_CHECKED(destSize != 0); + PORT_ASSERT_CHECKED(srcSize != 0); + + srcLen = portStringLengthSafe(src, srcSize); + if (srcLen == srcSize) srcLen--; + + minCopyLength = NV_MIN(destSize, srcLen + 1); + + PORT_ASSERT_CHECKED(minCopyLength != 0); + + if (minCopyLength > 1) + portMemCopy(dest, destSize, src, minCopyLength - 1); + + dest[minCopyLength - 1] = '\0'; + + return minCopyLength; +} +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringCat +char * +portStringCat +( + char *str, + NvLength strSize, + const char *cat, + NvLength catSize +) +{ + NvLength strLen; + NvLength catLen; + NvLength minCatLength; + char* begin; + + PORT_ASSERT_CHECKED(str != NULL); + PORT_ASSERT_CHECKED(cat != NULL); + + strLen = portStringLengthSafe(str, strSize); + catLen = portStringLengthSafe(cat, catSize); + + // In case of no NULL terminating char in cat. + if (catLen == catSize) catLen--; + + minCatLength = NV_MIN(strSize - strLen, catLen + 1); + if (0 == minCatLength) + return str; + + begin = str; + str = str + strLen; + + // strncat doesn't count NULL char. + if (minCatLength > 1) + portMemCopy(str, strSize, cat, minCatLength - 1); + + begin[strLen + minCatLength - 1] = '\0'; + return begin; +} + +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringConvertAsciiToUtf16 +NvLength +portStringConvertAsciiToUtf16 +( + NvU16 *dest, + NvLength destSize, + const char *src, + NvLength srcSize +) +{ + NvLength i, len; + + PORT_ASSERT_CHECKED(dest != NULL); + PORT_ASSERT_CHECKED(src != NULL); + + if (destSize == 0) + return 0; + + len = portStringLengthSafe(src, srcSize); + if (len >= destSize) + len = destSize - 1; + + i = len; + while (i-- > 0) + dest[i] = src[i]; + + dest[len] = 0; + return len; +} +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringBufferToHex +NvLength +portStringBufferToHex +( + char *str, + NvLength strSize, + const NvU8 *buf, + NvLength bufSize +) +{ + NvLength i, len; + + if (strSize == 0) + return 0; + + PORT_ASSERT_CHECKED(str != NULL); + PORT_ASSERT_CHECKED(buf != NULL); + + len = bufSize * 2; + if (len >= strSize) + len = strSize - 1; + + for (i = 0; i < len; i++) + { + NvU8 n = (i % 2) ? (buf[i/2] & 0xF) : (buf[i/2] >> 4); + str[i] = (n < 0xA) ? ('0' + n) : ('a' + n - 0xA); + } + str[len] = 0; + return len; +} +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringBufferToHexGroups +NvLength +portStringBufferToHexGroups +( + char *str, + NvLength strSize, + const NvU8 *buf, + NvLength bufSize, + NvLength groupCount, + const NvU32 *groups, + const char *separator +) +{ + NvLength group, sepLength, written; + + if (strSize == 0) + return 0; + + PORT_ASSERT_CHECKED(str != NULL); + PORT_ASSERT_CHECKED(buf != NULL); + PORT_ASSERT_CHECKED(groups != NULL); + PORT_ASSERT_CHECKED(separator != NULL); + + sepLength = portStringLength(separator); + + for (written = 0, group = 0; (group < groupCount) && (written < (strSize - 1)); group++) + { + NvLength groupSize = NV_MIN(groups[group] / 2, bufSize); + written += portStringBufferToHex(str + written, strSize - written, buf, groupSize); + buf += groupSize; + bufSize -= groupSize; + + if (group != groupCount - 1) + { + portMemCopy(str + written, strSize - written, separator, sepLength); + written += sepLength; + } + } + + str[written] = 0; + return written; +} +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringTok +static const char *portStringChr(const char *s, int c) +{ + while (s && *s != 0 && *s != c) + { + s++; + } + + if (*s == 0) + { + return NULL; + } + + return s; +} + +char *portStringTok(char *str, const char *delim, char **saveptr) +{ + char *cp, *start; + start = cp = str ? str : *saveptr; + + if (cp == NULL) + { + return NULL; + } + + while (*cp && !portStringChr(delim, *cp)) + { + ++cp; + } + + if (!*cp) + { + if (cp == start) + { + return NULL; + } + *saveptr = NULL; + return start; + } + else + { + *cp++ = '\0'; + *saveptr = cp; + return start; + } + +} +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringStrStr +char *portStringStrStr(char *str, char *substr) +{ + char* ptr; + + ptr = str; + + while (*ptr) + { + if (portStringCompare(ptr, substr, portStringLength(substr)) == 0) + { + return ptr; + } + ptr++; + } + return NULL; +} +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringStrChar +const char *portStringStrChar(const char *str, int c) +{ + const char* ptr; + + ptr = str; + + while (*ptr) + { + if (*ptr == (char)c) + { + return ptr; + } + ptr++; + } + return NULL; +} +#endif diff --git a/src/nvidia/src/libraries/nvport/sync/inc/sync_unix_kernel_os_def.h b/src/nvidia/src/libraries/nvport/sync/inc/sync_unix_kernel_os_def.h new file mode 100644 index 0000000..b12c4ad --- /dev/null +++ b/src/nvidia/src/libraries/nvport/sync/inc/sync_unix_kernel_os_def.h @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief SYNC unix kernel struct implementations + * + */ + +#ifndef _NVPORT_SYNC_UNIX_KERNEL_DEF_H_ +#define _NVPORT_SYNC_UNIX_KERNEL_DEF_H_ + +#include "os-interface.h" + +struct PORT_SPINLOCK +{ + void *lock; + NvU64 oldIrql; + PORT_MEM_ALLOCATOR *pAllocator; +}; + +struct PORT_MUTEX +{ + void *mutex; + PORT_MEM_ALLOCATOR *pAllocator; +}; + +struct PORT_SEMAPHORE +{ + void *sem; + PORT_MEM_ALLOCATOR *pAllocator; +}; + +struct PORT_RWLOCK +{ + void *rwlock; + PORT_MEM_ALLOCATOR *pAllocator; +}; + +#endif diff --git a/src/nvidia/src/libraries/nvport/sync/sync_common.h b/src/nvidia/src/libraries/nvport/sync/sync_common.h new file mode 100644 index 0000000..babb90f --- /dev/null +++ b/src/nvidia/src/libraries/nvport/sync/sync_common.h @@ -0,0 +1,158 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief SYNC common function implementations + * + * The portSyncXxxCreate functions have the same implementation, so they are + * extracted here instead of repeated in every file. + */ + +#define PORT_SYNC_IMPL +#include "nvport/nvport.h" + +#ifdef PORT_SYNC_COMMON_DEFINE_SPINLOCK +PORT_SPINLOCK * +portSyncSpinlockCreate +( + PORT_MEM_ALLOCATOR *pAllocator +) +{ + PORT_SPINLOCK *pSpinlock; + PORT_ASSERT_CHECKED(pAllocator != NULL); + pSpinlock = PORT_ALLOC(pAllocator, portSyncSpinlockSize); + if (pSpinlock != NULL) + { + if (portSyncSpinlockInitialize(pSpinlock) != NV_OK) + { + PORT_FREE(pAllocator, pSpinlock); + return NULL; + } + pSpinlock->pAllocator = pAllocator; + } + return pSpinlock; +} +#endif + +#ifdef PORT_SYNC_COMMON_DEFINE_MUTEX +PORT_MUTEX * +portSyncMutexCreate +( + PORT_MEM_ALLOCATOR *pAllocator +) +{ + PORT_MUTEX *pMutex; + PORT_ASSERT_CHECKED(pAllocator != NULL); + pMutex = PORT_ALLOC(pAllocator, portSyncMutexSize); + if (pMutex != NULL) + { + if (portSyncMutexInitialize(pMutex) != NV_OK) + { + PORT_FREE(pAllocator, pMutex); + return NULL; + } + pMutex->pAllocator = pAllocator; + } + return pMutex; +} +#endif + +#ifdef PORT_SYNC_COMMON_DEFINE_SEMAPHORE +PORT_SEMAPHORE * +portSyncSemaphoreCreate +( + PORT_MEM_ALLOCATOR *pAllocator, + NvU32 startValue +) +{ + PORT_SEMAPHORE *pSemaphore; + PORT_ASSERT_CHECKED(pAllocator != NULL); + pSemaphore = PORT_ALLOC(pAllocator, portSyncSemaphoreSize); + if (pSemaphore != NULL) + { + if (portSyncSemaphoreInitialize(pSemaphore, startValue) != NV_OK) + { + PORT_FREE(pAllocator, pSemaphore); + return NULL; + } + pSemaphore->pAllocator = pAllocator; + } + return pSemaphore; +} +#endif + +#ifdef PORT_SYNC_COMMON_DEFINE_RWLOCK +PORT_RWLOCK * +portSyncRwLockCreate +( + PORT_MEM_ALLOCATOR *pAllocator +) +{ + PORT_RWLOCK *pLock; + PORT_ASSERT_CHECKED(pAllocator != NULL); + + pLock = PORT_ALLOC(pAllocator, portSyncRwLockSize); + if (pLock != NULL) + { + if (portSyncRwLockInitialize(pLock) != NV_OK) + { + PORT_FREE(pAllocator, pLock); + return NULL; + } + pLock->pAllocator = pAllocator; + } + return pLock; +} +#endif + +#ifdef PORT_SYNC_COMMON_DEFINE_SYNC_INIT + +NvLength portSyncSpinlockSize; +NvLength portSyncMutexSize; +NvLength portSyncSemaphoreSize; +NvLength portSyncRwLockSize; + +void portSyncInitialize(void) +{ + portSyncSpinlockSize = sizeof(PORT_SPINLOCK); + portSyncMutexSize = sizeof(PORT_MUTEX); + portSyncSemaphoreSize = sizeof(PORT_SEMAPHORE); + portSyncRwLockSize = sizeof(PORT_RWLOCK); +#if LOCK_VAL_ENABLED +{ + extern void portSyncInitialize_LOCKVAL(void); + portSyncInitialize_LOCKVAL(); +} +#endif +} + +void portSyncShutdown(void) +{ +#if LOCK_VAL_ENABLED + extern void portSyncShutdown_LOCKVAL(void); + portSyncShutdown_LOCKVAL(); +#endif +} + +#endif // PORT_SYNC_COMMON_DEFINE_SYNC_INIT diff --git a/src/nvidia/src/libraries/nvport/sync/sync_unix_kernel_os.c b/src/nvidia/src/libraries/nvport/sync/sync_unix_kernel_os.c new file mode 100644 index 0000000..9a54577 --- /dev/null +++ b/src/nvidia/src/libraries/nvport/sync/sync_unix_kernel_os.c @@ -0,0 +1,339 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief SYNC module implementation for Unix kernelmode + * + * This implementation uses the NVIDIA OS interface into the unix kernels. + */ + +#define PORT_SYNC_IMPL +#include "nvport/nvport.h" + +#if !PORT_IS_KERNEL_BUILD +#error "This file can only be compiled as part of the kernel build." +#endif +#if !NVOS_IS_UNIX +#error "This file can only be compiled on Unix." +#endif + +#include "os-interface.h" + +#include "inc/sync_unix_kernel_os_def.h" + +NV_STATUS +portSyncSpinlockInitialize +( + PORT_SPINLOCK *pSpinlock +) +{ + if (pSpinlock == NULL) + { + return NV_ERR_INVALID_POINTER; + } + pSpinlock->pAllocator = NULL; + return os_alloc_spinlock(&pSpinlock->lock); +} + +void +portSyncSpinlockDestroy +( + PORT_SPINLOCK *pSpinlock +) +{ + PORT_ASSERT_CHECKED(pSpinlock != NULL); + os_free_spinlock(pSpinlock->lock); + if (pSpinlock->pAllocator != NULL) + { + PORT_FREE(pSpinlock->pAllocator, pSpinlock); + } +} + +void +portSyncSpinlockAcquire +( + PORT_SPINLOCK *pSpinlock +) +{ + PORT_ASSERT_CHECKED(pSpinlock != NULL); + pSpinlock->oldIrql = os_acquire_spinlock(pSpinlock->lock); +} + +void +portSyncSpinlockRelease +( + PORT_SPINLOCK *pSpinlock +) +{ + PORT_ASSERT_CHECKED(pSpinlock != NULL); + os_release_spinlock(pSpinlock->lock, pSpinlock->oldIrql); +} + + + +NV_STATUS +portSyncMutexInitialize +( + PORT_MUTEX *pMutex +) +{ + if (pMutex == NULL) + { + return NV_ERR_INVALID_POINTER; + } + pMutex->pAllocator = NULL; + return os_alloc_mutex(&pMutex->mutex); +} + +void +portSyncMutexDestroy +( + PORT_MUTEX *pMutex +) +{ + PORT_ASSERT_CHECKED(pMutex != NULL); + os_free_mutex(pMutex->mutex); + if (pMutex->pAllocator != NULL) + { + PORT_FREE(pMutex->pAllocator, pMutex); + } +} + +void +portSyncMutexAcquire +( + PORT_MUTEX *pMutex +) +{ + NV_STATUS status; + PORT_ASSERT_CHECKED(pMutex != NULL); + PORT_ASSERT_CHECKED(portSyncExSafeToSleep()); + status = os_acquire_mutex(pMutex->mutex); + PORT_ASSERT(status == NV_OK); +} + +NvBool +portSyncMutexAcquireConditional +( + PORT_MUTEX *pMutex +) +{ + PORT_ASSERT_CHECKED(pMutex != NULL); + return os_cond_acquire_mutex(pMutex->mutex) == NV_OK; + +} + +void +portSyncMutexRelease +( + PORT_MUTEX *pMutex +) +{ + PORT_ASSERT_CHECKED(pMutex != NULL); + os_release_mutex(pMutex->mutex); +} + + + +NV_STATUS +portSyncSemaphoreInitialize +( + PORT_SEMAPHORE *pSemaphore, + NvU32 startValue +) +{ + if (pSemaphore == NULL) + { + return NV_ERR_INVALID_POINTER; + } + pSemaphore->pAllocator = NULL; + pSemaphore->sem = os_alloc_semaphore(startValue); + return (pSemaphore->sem != NULL) ? NV_OK : NV_ERR_NO_MEMORY; +} + +void +portSyncSemaphoreDestroy +( + PORT_SEMAPHORE *pSemaphore +) +{ + PORT_ASSERT_CHECKED(pSemaphore != NULL); + os_free_semaphore(pSemaphore->sem); + if (pSemaphore->pAllocator != NULL) + { + PORT_FREE(pSemaphore->pAllocator, pSemaphore); + } +} + +void +portSyncSemaphoreAcquire +( + PORT_SEMAPHORE *pSemaphore +) +{ + NV_STATUS status; + PORT_ASSERT_CHECKED(pSemaphore != NULL); + status = os_acquire_semaphore(pSemaphore->sem); + PORT_ASSERT(status == NV_OK); +} + +NvBool +portSyncSemaphoreAcquireConditional +( + PORT_SEMAPHORE *pSemaphore +) +{ + + PORT_ASSERT_CHECKED(pSemaphore != NULL); + return os_cond_acquire_semaphore(pSemaphore->sem) == NV_OK; +} + +void +portSyncSemaphoreRelease +( + PORT_SEMAPHORE *pSemaphore +) +{ + PORT_ASSERT_CHECKED(pSemaphore != NULL); + os_release_semaphore(pSemaphore->sem); +} + + +NV_STATUS +portSyncRwLockInitialize +( + PORT_RWLOCK *pRwLock +) +{ + if (pRwLock == NULL) + { + return NV_ERR_INVALID_POINTER; + } + pRwLock->pAllocator = NULL; + pRwLock->rwlock = os_alloc_rwlock(); + return (pRwLock->rwlock != NULL) ? NV_OK : NV_ERR_NO_MEMORY; +} + +void +portSyncRwLockDestroy +( + PORT_RWLOCK *pRwLock +) +{ + PORT_ASSERT_CHECKED(pRwLock != NULL); + os_free_rwlock(pRwLock->rwlock); + if (pRwLock->pAllocator != NULL) + { + PORT_FREE(pRwLock->pAllocator, pRwLock); + } +} + +void +portSyncRwLockAcquireRead +( + PORT_RWLOCK *pRwLock +) +{ + NV_STATUS status; + PORT_ASSERT_CHECKED(pRwLock != NULL); + PORT_ASSERT_CHECKED(portSyncExSafeToSleep()); + status = os_acquire_rwlock_read(pRwLock->rwlock); + PORT_ASSERT(status == NV_OK); +} + +NvBool +portSyncRwLockAcquireReadConditional +( + PORT_RWLOCK *pRwLock +) +{ + PORT_ASSERT_CHECKED(pRwLock != NULL); + return os_cond_acquire_rwlock_read(pRwLock->rwlock) == NV_OK; +} + +void +portSyncRwLockAcquireWrite +( + PORT_RWLOCK *pRwLock +) +{ + NV_STATUS status; + PORT_ASSERT_CHECKED(pRwLock != NULL); + PORT_ASSERT_CHECKED(portSyncExSafeToSleep()); + status = os_acquire_rwlock_write(pRwLock->rwlock); + PORT_ASSERT(status == NV_OK); +} + +NvBool +portSyncRwLockAcquireWriteConditional +( + PORT_RWLOCK *pRwLock +) +{ + PORT_ASSERT_CHECKED(pRwLock != NULL); + return os_cond_acquire_rwlock_write(pRwLock->rwlock) == NV_OK; +} + +void +portSyncRwLockReleaseRead +( + PORT_RWLOCK *pRwLock +) +{ + PORT_ASSERT_CHECKED(pRwLock != NULL); + os_release_rwlock_read(pRwLock->rwlock); +} + +void +portSyncRwLockReleaseWrite +( + PORT_RWLOCK *pRwLock +) +{ + PORT_ASSERT_CHECKED(pRwLock != NULL); + os_release_rwlock_write(pRwLock->rwlock); +} + +NvBool portSyncExSafeToSleep(void) +{ + return os_semaphore_may_sleep(); +} + +NvBool portSyncExSafeToWake(void) +{ + return NV_TRUE; +} + +NvU64 portSyncExGetInterruptLevel(void) +{ + return !os_semaphore_may_sleep(); +} + +// Include implementations common for all platforms +#define PORT_SYNC_COMMON_DEFINE_SPINLOCK +#define PORT_SYNC_COMMON_DEFINE_MUTEX +#define PORT_SYNC_COMMON_DEFINE_SEMAPHORE +#define PORT_SYNC_COMMON_DEFINE_RWLOCK +#define PORT_SYNC_COMMON_DEFINE_SYNC_INIT +#include "sync_common.h" diff --git a/src/nvidia/src/libraries/nvport/thread/thread_unix_kernel_os.c b/src/nvidia/src/libraries/nvport/thread/thread_unix_kernel_os.c new file mode 100644 index 0000000..9f3ebd7 --- /dev/null +++ b/src/nvidia/src/libraries/nvport/thread/thread_unix_kernel_os.c @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/** + * @file + * @brief THREAD module implementation for Unix kernelmode + * + * This implementation uses the NVIDIA OS interface into the unix kernels. + */ + +#if !PORT_IS_KERNEL_BUILD +#error "This file can only be compiled as part of the kernel build." +#endif + +#if !NVOS_IS_UNIX +#error "This file can only be compiled on Unix." +#endif + +#include "nvport/nvport.h" +#include "os-interface.h" + +// Invalid value for thread. +const PORT_THREAD PORT_THREAD_INVALID = {0ULL}; + +// Invalid value for process. +const PORT_PROCESS PORT_PROCESS_INVALID = {0ULL}; + +NvU64 portThreadGetCurrentThreadId(void) +{ + NvU64 tid = 0; + os_get_current_thread(&tid); + return tid; +} + +void portThreadYield(void) +{ + os_schedule(); +} + diff --git a/src/nvidia/src/libraries/nvport/util/util_compiler_switch.c b/src/nvidia/src/libraries/nvport/util/util_compiler_switch.c new file mode 100644 index 0000000..e1e222b --- /dev/null +++ b/src/nvidia/src/libraries/nvport/util/util_compiler_switch.c @@ -0,0 +1,38 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief UTIL module implementation for cross platform, because it's not possible to + * determine which compiler is currently compiling in nvmk for some util functions. + */ + +#include "nvport/nvport.h" + +#if PORT_COMPILER_IS_MSVC +#include "util_msvc.c" +#elif PORT_COMPILER_IS_GCC || PORT_COMPILER_IS_CLANG +#include "util_gcc_clang.c" +#else +#error "Compiler is not supported" +#endif // switch for compiler diff --git a/src/nvidia/src/libraries/nvport/util/util_gcc_clang.c b/src/nvidia/src/libraries/nvport/util/util_gcc_clang.c new file mode 100644 index 0000000..b16bf7c --- /dev/null +++ b/src/nvidia/src/libraries/nvport/util/util_gcc_clang.c @@ -0,0 +1,80 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Util functions implementations using gcc compiler intrinsics + */ + +#include "nvport/nvport.h" + +#if portUtilExGetStackTrace_SUPPORTED +NV_NOINLINE NvUPtr +portUtilExGetStackTrace +( + NvU32 level +) +{ + switch (level) + { + case 0: return (__builtin_frame_address(0) != 0) ? + (NvUPtr)__builtin_return_address(0) : (NvUPtr)0; + case 1: return (__builtin_frame_address(1) != 0) ? + (NvUPtr)__builtin_return_address(1) : (NvUPtr)0; + case 2: return (__builtin_frame_address(2) != 0) ? + (NvUPtr)__builtin_return_address(2) : (NvUPtr)0; + case 3: return (__builtin_frame_address(3) != 0) ? + (NvUPtr)__builtin_return_address(3) : (NvUPtr)0; + case 4: return (__builtin_frame_address(4) != 0) ? + (NvUPtr)__builtin_return_address(4) : (NvUPtr)0; + case 5: return (__builtin_frame_address(5) != 0) ? + (NvUPtr)__builtin_return_address(5) : (NvUPtr)0; + case 6: return (__builtin_frame_address(6) != 0) ? + (NvUPtr)__builtin_return_address(6) : (NvUPtr)0; + case 7: return (__builtin_frame_address(7) != 0) ? + (NvUPtr)__builtin_return_address(7) : (NvUPtr)0; + case 8: return (__builtin_frame_address(8) != 0) ? + (NvUPtr)__builtin_return_address(8) : (NvUPtr)0; + case 9: return (__builtin_frame_address(9) != 0) ? + (NvUPtr)__builtin_return_address(9) : (NvUPtr)0; + case 10: return (__builtin_frame_address(10) != 0) ? + (NvUPtr)__builtin_return_address(10) : (NvUPtr)0; + case 11: return (__builtin_frame_address(11) != 0) ? + (NvUPtr)__builtin_return_address(11) : (NvUPtr)0; + case 12: return (__builtin_frame_address(12) != 0) ? + (NvUPtr)__builtin_return_address(12) : (NvUPtr)0; + case 13: return (__builtin_frame_address(13) != 0) ? + (NvUPtr)__builtin_return_address(13) : (NvUPtr)0; + case 14: return (__builtin_frame_address(14) != 0) ? + (NvUPtr)__builtin_return_address(14) : (NvUPtr)0; + case 15: return (__builtin_frame_address(15) != 0) ? + (NvUPtr)__builtin_return_address(15) : (NvUPtr)0; + } + return 0; +} +#endif + +NV_NOINLINE NvUPtr portUtilGetIPAddress(void) +{ + return portUtilGetReturnAddress(); +} diff --git a/src/nvidia/src/libraries/nvport/util/util_unix_kernel_os.c b/src/nvidia/src/libraries/nvport/util/util_unix_kernel_os.c new file mode 100644 index 0000000..44a5b6d --- /dev/null +++ b/src/nvidia/src/libraries/nvport/util/util_unix_kernel_os.c @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Util functions implementations for unix based OS. + */ + +#if !PORT_IS_KERNEL_BUILD +#error "This file can only be compiled as part of the kernel build." +#endif + +#if !NVOS_IS_UNIX +#error "This file can only be compiled on Unix." +#endif + +#include "nvport/nvport.h" +#include "os-interface.h" + +NvBool portUtilIsInterruptContext(void) +{ + return os_is_isr(); +} + diff --git a/src/nvidia/src/libraries/prereq_tracker/prereq_tracker.c b/src/nvidia/src/libraries/prereq_tracker/prereq_tracker.c new file mode 100644 index 0000000..444928e --- /dev/null +++ b/src/nvidia/src/libraries/prereq_tracker/prereq_tracker.c @@ -0,0 +1,349 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* ------------------------ Includes --------------------------------------- */ +#include "prereq_tracker/prereq_tracker.h" + +/* ------------------------ Static Function Prototypes --------------------- */ +static NvBool _prereqValid(PrereqTracker *pTracker, PREREQ_ENTRY *pPrereq); + +/* ------------------------ Public Functions ------------------------------ */ + +/*! + * @brief Construct the prereq tracker object + * + * @param[in] pTracker PrereqTracker object to be constructed + * @param[in] pParent Parent GPU passed into the first parameter of callbacks + * + * @return NV_OK Successfully constructed tracker + * @return NV_ERR_INVALID_STATE If already constructed + */ +NV_STATUS +prereqConstruct_IMPL +( + PrereqTracker *pTracker, + OBJGPU *pParent +) +{ + NV_ASSERT_OR_RETURN(!pTracker->bInitialized, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pParent != NULL, NV_ERR_INVALID_OBJECT_PARENT); + + bitVectorClrAll(&pTracker->satisfied); + + listInit(&pTracker->prereqList, portMemAllocatorGetGlobalNonPaged()); + pTracker->bInitialized = NV_TRUE; + pTracker->pParent = pParent; + + return NV_OK; +} + +/*! + * @brief Destroys the prerequisite tracker object + * + * @param[in] pTracker PrereqTracker object to be destroyed + */ +void +prereqDestruct_IMPL +( + PrereqTracker *pTracker +) +{ + NV_ASSERT_OR_RETURN_VOID(pTracker->bInitialized); + + listDestroy(&pTracker->prereqList); + pTracker->bInitialized = NV_FALSE; +} + +/*! + * @brief Arms a tracking structure to fire the callback when all prerequisites + * are satisfied. May only be called after all prerequisites are specified. No + * more prerequisites may be specified after arming. + * + * @param[in] pTracker PrereqTracker object + * @param[in] pPrereq PREREQ_ENTRY object pointer + * + * @return NV_OK Prerequisite successfully armed. + * @return error Errors propagated up from functions called. + */ +static NV_STATUS +_prereqArm +( + PrereqTracker *pTracker, + PREREQ_ENTRY *pPrereq +) +{ + PREREQ_ID_BIT_VECTOR requestedAndSatisfied; + + NV_ASSERT_OR_RETURN(pTracker->bInitialized, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(_prereqValid(pTracker, pPrereq), NV_ERR_INVALID_OBJECT); + NV_ASSERT_OR_RETURN(!pPrereq->bArmed, NV_ERR_INVALID_STATE); + + // + // Set the PREREQ_ENTRY state to bArmed. No more PREREQ_IDs may be added + // after this point. + // + pPrereq->bArmed = NV_TRUE; + + // + // Put together a mask of PREREQ_IDs which are both satisfied and requested + // We do not keep track of satisfied prereqs until armed, so we have no existing + // state to worry about here. + // + NV_ASSERT_OK_OR_RETURN(bitVectorAnd(&requestedAndSatisfied, + &pPrereq->requested, + &pTracker->satisfied)); + + pPrereq->countSatisfied = bitVectorCountSetBits(&requestedAndSatisfied); + + if (PREREQ_IS_SATISFIED(pPrereq)) + { + NV_ASSERT_OK_OR_RETURN(pPrereq->callback(pTracker->pParent, NV_TRUE)); + } + + return NV_OK; +} + +/*! + * @brief Creates, adds IDs to, and Arms a prereq tracking structure into the list. + * Caller gives up all control of the prereq structure to the prereq tracker, which + * will take care of storing the completed, final struct and freeing it once done. + * + * @param[in] pTracker PrereqTracker object + * @param[in] callback Callback function pointer + * First parameter passed will be NVOC parent of pTracker + * @param[in] pDepends Bitvector of prerequisite IDs to add as requirement + * @param[out] ppPrereq PREREQ_ENTRY object pointer created, or NULL if not desired + * + * @return NV_OK Prerequisite successfully armed. + * @return error Errors propagated up from functions called. + */ +NV_STATUS +prereqComposeEntry_IMPL +( + PrereqTracker *pTracker, + GpuPrereqCallback *callback, + PREREQ_ID_BIT_VECTOR *pDepends, + PREREQ_ENTRY **ppPrereq +) +{ + PREREQ_ENTRY *pPrereq; + + NV_ASSERT_OR_RETURN(pTracker->bInitialized, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(callback != NULL, NV_ERR_INVALID_POINTER); + NV_ASSERT_OR_RETURN(pDepends != NULL, NV_ERR_INVALID_POINTER); + + pPrereq = listAppendNew(&pTracker->prereqList); + NV_ASSERT_OR_RETURN(pPrereq != NULL, NV_ERR_NO_MEMORY); + + NV_ASSERT_OK_OR_RETURN(bitVectorCopy(&pPrereq->requested, pDepends)); + + pPrereq->countRequested = bitVectorCountSetBits(&pPrereq->requested); + pPrereq->countSatisfied = 0; + pPrereq->callback = callback; + + NV_ASSERT_OK_OR_RETURN(_prereqArm(pTracker, pPrereq)); + + if (ppPrereq != NULL) + *ppPrereq = pPrereq; + + return NV_OK; +} + +/*! + * @brief Notifies that prerequisite was satisfied. + * + * @param[in] pTracker PrereqTracker object + * @param[in] prereqId Prerequisite ID to add as requirement + * + * @return NV_OK Prerequisite successfully satisfied & all callbacks passed. + * @return error Errors propagated up from functions called. + */ +NV_STATUS +prereqSatisfy_IMPL +( + PrereqTracker *pTracker, + PREREQ_ID prereqId +) +{ + PREREQ_ENTRY *pPrereq; + PrereqListIter it; + + NV_ASSERT_OR_RETURN(pTracker->bInitialized, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN((prereqId < PREREQ_ID_VECTOR_SIZE), + NV_ERR_INVALID_REQUEST); + + // + // The prerequisite can be satisfied only once. An attempt to satisfy + // the prerequisite multiple times should indicate bad code design. + // + NV_ASSERT_OR_RETURN(!bitVectorTest(&pTracker->satisfied, prereqId), + NV_ERR_INVALID_STATE); + + NV_ASSERT_OK_OR_RETURN(bitVectorSet(&pTracker->satisfied, prereqId)); + + // Broadcast satisfaction of this PREREQ_ID to all armed PREREQ_ENTRY. + it = listIterAll(&pTracker->prereqList); + while (listIterNext(&it)) + { + pPrereq = it.pValue; + if (pPrereq->bArmed && + bitVectorTest(&pPrereq->requested, prereqId)) + { + pPrereq->countSatisfied++; + NV_ASSERT_OR_RETURN(pPrereq->countSatisfied <= pPrereq->countRequested, + NV_ERR_INVALID_STATE); + + if (PREREQ_IS_SATISFIED(pPrereq)) + { + NV_ASSERT_OK_OR_RETURN(pPrereq->callback(pTracker->pParent, NV_TRUE)); + } + } + } + + return NV_OK; +} + +/*! + * @brief Notifies that prerequisite will be retracted. + * + * @param[in] pTracker PrereqTracker object + * @param[in] prereqId Prerequisite ID to add as requirement + * + * @return NV_OK Prerequisite successfully retracted & all callbacks passed. + * @return error Errors propagated up from functions called. + */ +NV_STATUS +prereqRetract_IMPL +( + PrereqTracker *pTracker, + PREREQ_ID prereqId +) +{ + PREREQ_ENTRY *pNode; + PrereqListIter it; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(pTracker != NULL, + NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pTracker->bInitialized, + NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN((prereqId < PREREQ_ID_VECTOR_SIZE), + NV_ERR_INVALID_REQUEST); + + // + // The prerequisite can be retracted even if it was not satisfied. This + // simplifies client code since it no longer need to track if prerequisite + // was satisfied (or not) and allows us avoiding isSatisfied() interface. + // + if (!bitVectorTest(&pTracker->satisfied, prereqId)) + return NV_OK; + + NV_ASSERT_OK_OR_RETURN(bitVectorClr(&pTracker->satisfied, prereqId)); + + it = listIterAll(&pTracker->prereqList); + while (listIterNext(&it)) + { + pNode = it.pValue; + + if (pNode->bArmed && + bitVectorTest(&pNode->requested, prereqId)) + { + if (PREREQ_IS_SATISFIED(pNode)) + { + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(status, pNode->callback(pTracker->pParent, NV_FALSE)); + } + + pNode->countSatisfied--; + if (pNode->countSatisfied < 0) + { + NV_ASSERT(0); + if (status == NV_OK) + { + status = NV_ERR_INVALID_STATE; + } + } + } + } + + return status; +} + +/*! + * @brief Indicates if a prerequisite ID is currently satisfied. + * + * @param[in] pTracker PrereqTracker object pointer + * @param[in] prereqId Prerequisite ID to check + * + * @return NV_TRUE Prerequisite ID is in the satisfied mask. + * NV_FALSE otherwise + */ +NvBool +prereqIdIsSatisfied_IMPL +( + PrereqTracker *pTracker, + PREREQ_ID prereqId +) +{ + NvBool bIsSatisfied; + + if ((pTracker->bInitialized) && + (prereqId < PREREQ_ID_VECTOR_SIZE)) + { + bIsSatisfied = bitVectorTest(&pTracker->satisfied, prereqId); + } + else + { + bIsSatisfied = NV_FALSE; + } + + return bIsSatisfied; +} + +/* ---------------------- Private Static Functions -------------------------- */ +/*! + * Helper function which determines whether a given PREREQ_ENTRY tracking + * structure is valid (i.e. is in the tracker's list at @ref + * PrereqTracker::prereqList). + * + * @param[in] pTracker PrereqTracker object pointer + * @param[in] pPrereq PREREQ_ENTRY object pointer + * + * @return NV_TRUE pPrereq is valid. + * @return NV_FALSE pPrereq is invalid. + */ +static NvBool +_prereqValid +( + PrereqTracker *pTracker, + PREREQ_ENTRY *pPrereq +) +{ + PrereqListIter it = listIterAll(&pTracker->prereqList); + while (listIterNext(&it)) + { + // pPrereq is valid if found in the list. + if (it.pValue == pPrereq) + return NV_TRUE; + } + + return NV_FALSE; +} diff --git a/src/nvidia/src/libraries/resserv/src/rs_access_map.c b/src/nvidia/src/libraries/resserv/src/rs_access_map.c new file mode 100644 index 0000000..2f53c04 --- /dev/null +++ b/src/nvidia/src/libraries/resserv/src/rs_access_map.c @@ -0,0 +1,717 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvstatus.h" +#include "nvtypes.h" + +#include "containers/map.h" +#include "resserv/resserv.h" +#include "resserv/rs_resource.h" +#include "resserv/rs_client.h" +#include "resserv/rs_server.h" +#include "resserv/rs_access_rights.h" +#include "resserv/rs_access_map.h" + +static NV_STATUS +_rsAccessGrantCallback +( + RsResourceRef *pResourceRef, + CALL_CONTEXT *pCallContext, + RsClient *pInvokingClient, + const RS_ACCESS_MASK *pParentRights, + void *pAllocParams, + RsAccessRight accessRight +); + +/*! + * @brief Checks which rights, if any, are being shared with the invoking client by a resource + * This is a static helper function for rsAccessGrantRights. + * + * @param[in] pResourceRef + * @param[in] pInvokingClient + * @param[in] pCallContext May be NULL + * @param[out] pRightsShared The set of access rights shared + * + * @return none + */ +static void +_rsAccessGetSharedRights +( + RsResourceRef *pResourceRef, + RsClient *pInvokingClient, + CALL_CONTEXT *pCallContext, + RS_ACCESS_MASK *pRightsShared +) +{ + RsShareList *pShareList; + RsShareListIter it; + + RsServer *pServer = NULL; + RsResourceRef *pParentRef = NULL; + + RS_ACCESS_MASK rightsGranted; + RS_ACCESS_MASK rightsDenied; + + portMemSet(&rightsGranted, 0, sizeof(RS_ACCESS_MASK)); + portMemSet(&rightsDenied, 0, sizeof(RS_ACCESS_MASK)); + + RS_ACCESS_MASK_CLEAR(pRightsShared); + + // No meaning to sharing rights with self, skip + if (pInvokingClient == pResourceRef->pClient) + return; + + if (pCallContext != NULL) + { + pServer = pCallContext->pServer; + pParentRef = pCallContext->pContextRef; + } + + pShareList = rsAccessGetActiveShareList(pResourceRef, pServer); + + if (pShareList != NULL) + { + it = listIterAll(pShareList); + while (listIterNext(&it)) + { + RS_SHARE_POLICY *pSharePolicy = it.pValue; + + if (resShareCallback(pResourceRef->pResource, pInvokingClient, pParentRef, pSharePolicy)) + { + // Allow policies give rights on success + if (!(pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE)) + RS_ACCESS_MASK_UNION(&rightsGranted, &pSharePolicy->accessMask); + } + else + { + // Require policies reject rights on failure + if (pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE) + RS_ACCESS_MASK_UNION(&rightsDenied, &pSharePolicy->accessMask); + } + } + } + + if (pServer != NULL) + { + it = listIterAll(&pServer->globalInternalSharePolicyList); + while (listIterNext(&it)) + { + RS_SHARE_POLICY *pSharePolicy = it.pValue; + + if (resShareCallback(pResourceRef->pResource, pInvokingClient, pParentRef, pSharePolicy)) + { + // Allow policies give rights on success + if (!(pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE)) + RS_ACCESS_MASK_UNION(&rightsGranted, &pSharePolicy->accessMask); + } + else + { + // Require policies reject rights on failure + if (pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE) + RS_ACCESS_MASK_UNION(&rightsDenied, &pSharePolicy->accessMask); + } + } + } + + RS_ACCESS_MASK_UNION(pRightsShared, &rightsGranted); + RS_ACCESS_MASK_SUBTRACT(pRightsShared, &rightsDenied); +} + +void rsAccessGetAvailableRights +( + RsResourceRef *pResourceRef, + RsClient *pClient, + RS_ACCESS_MASK *pAvailableRights +) +{ + RS_ACCESS_MASK *pTargetRights; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + RS_ACCESS_MASK_CLEAR(pAvailableRights); + + // Look up rights client has on target resource + pTargetRights = rsAccessLookup(pResourceRef, pClient); + if (pTargetRights != NULL) + { + // Client owns the resource, use those rights directly + portMemCopy(pAvailableRights, sizeof(*pAvailableRights), + pTargetRights, sizeof(*pTargetRights)); + } + else + { + // Client does not own the resource, add any rights shared with this client + _rsAccessGetSharedRights(pResourceRef, pClient, pCallContext, pAvailableRights); + } +} + +RS_ACCESS_MASK * +rsAccessLookup +( + RsResourceRef *pResourceRef, + RsClient *pClient +) +{ + if (pResourceRef->pClient == pClient) + return &pResourceRef->accessMask; + + return NULL; +} + +NV_STATUS +rsAccessCheckRights +( + RsResourceRef *pResourceRef, + RsClient *pInvokingClient, + const RS_ACCESS_MASK *pRightsRequired +) +{ + RS_ACCESS_MASK ownedRights; + + NV_ASSERT_OR_RETURN(pRightsRequired != NULL, NV_ERR_INVALID_ARGUMENT); + + // Return if nothing to check + if (rsAccessMaskIsEmpty(pRightsRequired)) + return NV_OK; + + // Uncached access rights require executing the callback every time + rsAccessUpdateRights(pResourceRef, pInvokingClient, pRightsRequired); + + // Look up updated rights on target resource + rsAccessGetAvailableRights(pResourceRef, pInvokingClient, &ownedRights); + + // Check that rights are sufficient + if (!rsAccessMaskIsSubset(&ownedRights, pRightsRequired)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + return NV_OK; +} + +void rsAccessUpdateRights +( + RsResourceRef *pResourceRef, + RsClient *pInvokingClient, + const RS_ACCESS_MASK *pRightsToUpdate +) +{ + RS_ACCESS_MASK *pTargetRights; + RsAccessRight accessRight; + + // Look up rights on target resource + pTargetRights = rsAccessLookup(pResourceRef, pInvokingClient); + + // + // Nothing to update if the resource is not owned by the client + // (Uncached rights only have meaning for resources owned by the client) + // + if (pTargetRights == NULL) + return; + + // Update access rights owned by the client for any uncached rights + for (accessRight = 0; accessRight < RS_ACCESS_COUNT; accessRight++) + { + NV_STATUS status; + const RS_ACCESS_INFO *pAccessRightInfo = &g_rsAccessMetadata[accessRight]; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + if ((pRightsToUpdate != NULL && + !RS_ACCESS_MASK_TEST(pRightsToUpdate, accessRight)) || + !(pAccessRightInfo->flags & RS_ACCESS_FLAG_UNCACHED_CHECK)) + { + continue; + } + + status = _rsAccessGrantCallback(pResourceRef, pCallContext, pInvokingClient, NULL, NULL, accessRight); + + if (status != NV_OK) + { + RS_ACCESS_MASK_REMOVE(pTargetRights, accessRight); + } + else + { + RS_ACCESS_MASK_ADD(pTargetRights, accessRight); + } + } +} + +/*! + * @brief Checks whether two share policies are considered equal and can be merged + * + * This function uses the type and target of a share policy to determine whether + * two share policy entries would match the same clients, in which case they could + * be merged into one policy entry. + * + * @param[in] pSharePolicyA, pSharePolicyB the two policies to compare + * + * @return NV_TRUE if the two policies are equal, + * NV_FALSE otherwise + */ +static NvBool +rsSharePolicyEquals +( + const RS_SHARE_POLICY *pSharePolicyA, + const RS_SHARE_POLICY *pSharePolicyB +) +{ + if (pSharePolicyA == NULL || pSharePolicyB == NULL) + return NV_FALSE; + + if (pSharePolicyA->type != pSharePolicyB->type) + return NV_FALSE; + + if ((pSharePolicyA->action & RS_SHARE_ACTION_FLAG_REQUIRE) != + (pSharePolicyB->action & RS_SHARE_ACTION_FLAG_REQUIRE)) + { + return NV_FALSE; + } + + if (pSharePolicyA->type == RS_SHARE_TYPE_CLIENT) + { + return pSharePolicyA->target == pSharePolicyB->target; + } + + // Otherwise, ignore target entirely + return NV_TRUE; +} + +RS_SHARE_POLICY * +rsShareListLookup +( + RsShareList *pShareList, + RS_SHARE_POLICY *pSharePolicy +) +{ + RsShareListIter it; + + // + // Need to match a condition instead of just pValue, + // can't just use listLookup directly + // + it = listIterAll(pShareList); + while (listIterNext(&it)) + { + if (rsSharePolicyEquals(it.pValue, pSharePolicy)) + { + return it.pValue; + } + } + + return NULL; +} + +NV_STATUS +rsShareListInsert +( + RsShareList *pShareList, + RS_SHARE_POLICY *pSharePolicy, + RS_ACCESS_MASK *pAccessMask +) +{ + RS_ACCESS_MASK *pCurrentAccessMask; + RS_SHARE_POLICY *pCurrentPolicy; + RS_SHARE_POLICY *pNewPolicy; + + pCurrentPolicy = rsShareListLookup(pShareList, pSharePolicy); + if (pCurrentPolicy == NULL) + { + // Allocate and insert a share policy entry + pNewPolicy = listAppendValue(pShareList, pSharePolicy); + if (pNewPolicy == NULL) + { + return NV_ERR_NO_MEMORY; + } + + if (pAccessMask != NULL) + { + portMemCopy(pAccessMask, sizeof(*pAccessMask), + &pNewPolicy->accessMask, sizeof(pNewPolicy->accessMask)); + } + } + else + { + // Merge into existing share policy entry + pCurrentAccessMask = &pCurrentPolicy->accessMask; + RS_ACCESS_MASK_UNION(pCurrentAccessMask, &pSharePolicy->accessMask); + + if (pAccessMask != NULL) + { + portMemCopy(pAccessMask, sizeof(*pAccessMask), + pCurrentAccessMask, sizeof(*pCurrentAccessMask)); + } + } + + return NV_OK; +} + +void +rsShareListRemove +( + RsShareList *pShareList, + RS_SHARE_POLICY *pSharePolicy, + RS_ACCESS_MASK *pAccessMask +) +{ + RS_SHARE_POLICY *pCurrentPolicy; + RS_ACCESS_MASK *pCurrentAccessMask; + + pCurrentPolicy = rsShareListLookup(pShareList, pSharePolicy); + if (pCurrentPolicy != NULL) + { + // Revoke specified rights from found mask + pCurrentAccessMask = &pCurrentPolicy->accessMask; + RS_ACCESS_MASK_SUBTRACT(pCurrentAccessMask, &pSharePolicy->accessMask); + + // pCurrentAccessMask may not exist afterwards, so copy output first + if (pAccessMask != NULL) + { + portMemCopy(pAccessMask, sizeof(*pAccessMask), + pCurrentAccessMask, sizeof(*pCurrentAccessMask)); + } + + if (rsAccessMaskIsEmpty(pCurrentAccessMask)) + { + // No more rights shared under this policy, erase it from the list + listRemove(pShareList, pCurrentPolicy); + } + } + else + { + // No match, no rights to revoke, output empty mask + if (pAccessMask != NULL) + { + RS_ACCESS_MASK_CLEAR(pAccessMask); + } + } +} + +NV_STATUS +rsShareListCopy +( + RsShareList *pShareListDst, + RsShareList *pShareListSrc +) +{ + RsShareListIter it; + + if (pShareListSrc == NULL) + return NV_OK; + + it = listIterAll(pShareListSrc); + while (listIterNext(&it)) + { + if (NULL == listAppendValue(pShareListDst, it.pValue)) + return NV_ERR_NO_MEMORY; + } + + return NV_OK; +} + +RsShareList * +rsAccessGetActiveShareList +( + RsResourceRef *pResourceRef, + RsServer *pServer +) +{ + RsResourceRef *pSearchRef = pResourceRef; + + // Search up the tree for a resource with an edited share list + while (pSearchRef != NULL) + { + if (pSearchRef->bSharePolicyListModified) + return &pSearchRef->sharePolicyList; + + pSearchRef = pSearchRef->pParentRef; + } + + if (pServer != NULL) + return &pServer->defaultInheritedSharePolicyList; + + return NULL; +} + +/*! + * @brief Checks whether one access right can be granted on a resource + * + * This is a static helper function for rsAccessGrantRights. The pParentRights + * argument is not strictly necessary, but is used to avoid performing multiple + * identical lookups in a map. + * + * @param[in] pResourceRef + * @param[in] pCallContext + * @param[in] pInvokingClient + * @param[in] pParentRights The set of access rights held by the invoking client + * on the resource's parent + * @param[in] accessRight The access right to try to grant + * + * @return NV_OK if the access right can be granted, or an error otherwise + */ +static NV_STATUS +_rsAccessGrantCallback +( + RsResourceRef *pResourceRef, + CALL_CONTEXT *pCallContext, + RsClient *pInvokingClient, + const RS_ACCESS_MASK *pParentRights, + void *pAllocParams, + RsAccessRight accessRight +) +{ + const RS_ACCESS_INFO *pAccessRightInfo; + API_SECURITY_INFO *pSecInfo = NULL; + + NV_ASSERT_OR_RETURN(accessRight < RS_ACCESS_COUNT, NV_ERR_INVALID_ARGUMENT); + + pAccessRightInfo = &g_rsAccessMetadata[accessRight]; + + if (pCallContext != NULL) + { + pSecInfo = &pCallContext->secInfo; + } + else + { + NV_PRINTF(LEVEL_WARNING, "Called with NULL pCallContext, skipping permission checks\n"); + } + + // + // If the parent object has this access right, then we should be able to + // inherit it without doing any other checks + // + if ((pParentRights != NULL) && RS_ACCESS_MASK_TEST(pParentRights, accessRight)) + { + return NV_OK; + } + + if ((pSecInfo != NULL) && ((pAccessRightInfo->flags & RS_ACCESS_FLAG_ALLOW_PRIVILEGED) != 0)) + { + // Allow admin-privileged contexts + if (pSecInfo->privLevel >= RS_PRIV_LEVEL_USER_ROOT) + { + return NV_OK; + } + } + + if ((pSecInfo != NULL) && ((pAccessRightInfo->flags & RS_ACCESS_FLAG_ALLOW_KERNEL_PRIVILEGED) != 0)) + { + // Allow kernel-privileged contexts + if (pSecInfo->privLevel >= RS_PRIV_LEVEL_KERNEL) + { + return NV_OK; + } + } + + if ((pAccessRightInfo->flags & RS_ACCESS_FLAG_ALLOW_OWNER) != 0) + { + // Allow client this access right on itself + if (pResourceRef->hResource == pInvokingClient->hClient) + { + return NV_OK; + } + } + + // Finally, invoke the resource's access callback + if (resAccessCallback(pResourceRef->pResource, pInvokingClient, pAllocParams, accessRight)) + { + return NV_OK; + } + + // All attempts to grant access failed + return NV_ERR_INSUFFICIENT_PERMISSIONS; +} + + +/*! + * @brief Computes the list of access rights to attempt to grant on a resource + * + * This is a static helper function for rsAccessGrantRights. + * + * @param[in] pResourceRef + * @param[in] pInvokingClient + * @param[in] pRightsRequested The rights specified in the allocation parameters, + * or NULL if no access rights were explicitly requested + * @param[in] pRightsRequired Rights required for the allocation of this object + * to succeed, not used if rights were explicitly requested + * @param[out] pRightsToRequest The set of access rights that should be requested, + * based on input parameters provided + * + * @return NV_TRUE if access rights were explicitly requested, or + * NV_FALSE otherwise + */ +static NvBool +_rsAccessGetRightsToRequest +( + RsResourceRef *pResourceRef, + RsClient *pInvokingClient, + const RS_ACCESS_MASK *pRightsRequested, + const RS_ACCESS_MASK *pRightsRequired, + RS_ACCESS_MASK *pRightsToRequest +) +{ + NvBool bExplicitlyRequested; + + NV_ASSERT(pRightsToRequest != NULL); + RS_ACCESS_MASK_CLEAR(pRightsToRequest); + + if (pRightsRequested != NULL) + { + // A set of access rights was explicitly requested + bExplicitlyRequested = NV_TRUE; + + portMemCopy(pRightsToRequest, sizeof(*pRightsToRequest), + pRightsRequested, sizeof(*pRightsRequested)); + } + else + { + // No rights were explicitly requested + bExplicitlyRequested = NV_FALSE; + + if (pResourceRef->pParentRef == NULL) + { + // Only client resources don't have a parent reference + // Try to request all access rights for new clients + RS_ACCESS_MASK_FILL(pRightsToRequest); + } + else + { + // Inherit access rights from parent reference + RS_ACCESS_MASK *pParentRights = rsAccessLookup(pResourceRef->pParentRef, pInvokingClient); + if (pParentRights != NULL) + { + portMemCopy(pRightsToRequest, sizeof(*pRightsToRequest), + pParentRights, sizeof(*pParentRights)); + } + + // Add any required rights as well + if (pRightsRequired != NULL) + { + RS_ACCESS_MASK_UNION(pRightsToRequest, pRightsRequired); + } + } + } + + return bExplicitlyRequested; +} + +NV_STATUS +rsAccessGrantRights +( + RsResourceRef *pResourceRef, + CALL_CONTEXT *pCallContext, + RsClient *pInvokingClient, + const RS_ACCESS_MASK *pRightsRequested, + const RS_ACCESS_MASK *pRightsRequired, + void *pAllocParams +) +{ + NV_STATUS status; + NvBool bExplicitlyRequested; + RS_ACCESS_MASK rightsToRequest; + RS_ACCESS_MASK rightsShared; + RS_ACCESS_MASK *pResourceRights; + RS_ACCESS_MASK resourceRights; + RS_ACCESS_MASK *pParentRights = NULL; + RsAccessRight accessRight; + + // Determine which rights to request based on pRightsRequested + bExplicitlyRequested = _rsAccessGetRightsToRequest(pResourceRef, pInvokingClient, + pRightsRequested, pRightsRequired, + &rightsToRequest); + + // Return if nothing to grant + if (rsAccessMaskIsEmpty(&rightsToRequest)) + return NV_OK; + + // Find rights on the current resource + pResourceRights = rsAccessLookup(pResourceRef, pInvokingClient); + if (pResourceRights == NULL) + { + // + // When using grant for resources the client doesn't own, we don't modify the + // resource's mask, we only use a local mask to record which rights were available + // + RS_ACCESS_MASK_CLEAR(&resourceRights); + pResourceRights = &resourceRights; + } + + // Explicitly requesting to not get all required rights, cannot possibly succeed + if (bExplicitlyRequested && + (pRightsRequired != NULL) && + !rsAccessMaskIsSubset(&rightsToRequest, pRightsRequired)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + // Get rights on the parent resource to cache for _rsAccessGrantCallback + if (pResourceRef->pParentRef != NULL) + { + pParentRights = rsAccessLookup(pResourceRef->pParentRef, pInvokingClient); + } + + // Get any rights shared with this client + _rsAccessGetSharedRights(pResourceRef, pInvokingClient, pCallContext, &rightsShared); + + // Grant each access right in rightsToRequest + for (accessRight = 0; accessRight < RS_ACCESS_COUNT; accessRight++) + { + if (!RS_ACCESS_MASK_TEST(&rightsToRequest, accessRight)) + continue; + + if (RS_ACCESS_MASK_TEST(&rightsShared, accessRight)) + { + status = NV_OK; + } + else + { + status = _rsAccessGrantCallback(pResourceRef, pCallContext, pInvokingClient, + pParentRights, pAllocParams, accessRight); + } + + if (status == NV_OK) + { + RS_ACCESS_MASK_ADD(pResourceRights, accessRight); + } + else + { + // + // The default behavior is to silently ignore failure to grant an access right, + // which happens when the requested access rights are not specified. + // + // In contrast, if access rights are explicitly requested (i.e. with + // the NvRmAllocWithAccess API), we return an error code when we fail to + // grant access rights. + // + if (bExplicitlyRequested) + return status; + } + } + + // Fail if could not get all required rights + if ((pRightsRequired != NULL) && + !rsAccessMaskIsSubset(pResourceRights, pRightsRequired)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + return NV_OK; +} diff --git a/src/nvidia/src/libraries/resserv/src/rs_access_rights.c b/src/nvidia/src/libraries/resserv/src/rs_access_rights.c new file mode 100644 index 0000000..eca1b4d --- /dev/null +++ b/src/nvidia/src/libraries/resserv/src/rs_access_rights.c @@ -0,0 +1,124 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvstatus.h" +#include "nvtypes.h" +#include "utils/nvassert.h" +#include "nvctassert.h" + +#include "resserv/rs_access_rights.h" + + +// Ensure the number of declared access rights is within the capacity +// provided by the number of limbs used. +// Also, NVOC acces_right is NvU32 currently. It requires NVOC change to support 32+ bits +ct_assert(RS_ACCESS_COUNT <= SDK_RS_ACCESS_MAX_COUNT); + + +#if !(RS_STANDALONE_TEST) +const RS_ACCESS_INFO g_rsAccessMetadata[RS_ACCESS_COUNT] = +{ + // RS_ACCESS_DUP_OBJECT + { + RS_ACCESS_FLAG_ALLOW_OWNER + }, + + // RS_ACCESS_NICE + { + RS_ACCESS_FLAG_ALLOW_PRIVILEGED | RS_ACCESS_FLAG_UNCACHED_CHECK + }, + + // RS_ACCESS_DEBUG + { + RS_ACCESS_FLAG_ALLOW_OWNER + }, + + // RS_ACCESS_PERFMON + { + RS_ACCESS_FLAG_UNCACHED_CHECK + }, +}; +#endif /* RS_STANDALONE_TEST */ + + +NvBool +rsAccessMaskIsSubset +( + const RS_ACCESS_MASK *pRightsPresent, + const RS_ACCESS_MASK *pRightsRequired +) +{ + RsAccessRight accessRight; + + for (accessRight = 0; accessRight < RS_ACCESS_COUNT; accessRight++) + { + if (RS_ACCESS_MASK_TEST(pRightsRequired, accessRight) && + !RS_ACCESS_MASK_TEST(pRightsPresent, accessRight)) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + + +NvBool +rsAccessMaskIsEmpty +( + const RS_ACCESS_MASK *pAccessMask +) +{ + RsAccessRight accessRight; + + for (accessRight = 0; accessRight < RS_ACCESS_COUNT; accessRight++) + { + if (RS_ACCESS_MASK_TEST(pAccessMask, accessRight)) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + + +void +rsAccessMaskFromArray +( + RS_ACCESS_MASK *pAccessMask, + const RsAccessRight *pRightsArray, + NvLength length +) +{ + NvLength i; + + RS_ACCESS_MASK_CLEAR(pAccessMask); + + NV_ASSERT_OR_RETURN_VOID(pRightsArray != NULL); + + for (i = 0; i < length; i++) + { + RS_ACCESS_MASK_ADD(pAccessMask, pRightsArray[i]); + } +} diff --git a/src/nvidia/src/libraries/resserv/src/rs_client.c b/src/nvidia/src/libraries/resserv/src/rs_client.c new file mode 100644 index 0000000..62d9738 --- /dev/null +++ b/src/nvidia/src/libraries/resserv/src/rs_client.c @@ -0,0 +1,1845 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "nvlog_inc.h" +#include "resserv/resserv.h" +#include "resserv/rs_client.h" +#include "resserv/rs_server.h" + +#if !(RS_STANDALONE) +#include "os/os.h" +#include "resserv/rs_access_map.h" +#endif + +typedef enum +{ + ALLOC_NEW_RESOURCE, + ALLOC_SHARED_RESOURCE +} ALLOC_TYPE; + +/** + * Allocate a new or shared resource in RM for this client + * @param[in] pClient This client + * @param[in] pServer The resource server instance + * @param[in] pParams Parameters for the resource allocation + * @param[in,out] phResource Server will assign a handle if it is 0 + */ +static NV_STATUS _clientAllocResourceHelper(RsClient *pClient, RsServer *pServer, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + NvHandle *phResource); + +/** + * Add a resource reference to the client's resource hashmap + * @param[in] pClient This client + * @param[in] pServer The resource server that owns the resource ref + * @param[in] pParentRef The resource's parent reference + * @param[in] hResource The resource's handle + * @param[in] classId The resource's class + * @param[out] ppResourceRef The new resource reference + */ +static NV_STATUS _clientConstructResourceRef(RsClient *pClient, RsServer *pServer, RsResourceRef *pParentRef, + NvHandle hResource, NvU32 classId, RsResourceRef **ppResourceRef); + +/** + * Release all CPU address mappings that reference this resource + * + * @param[in] pClient Client that owns the resource + * @param[in] pCallContext Caller information (which includes the resource reference + * whose mapping back references will be freed) + * @param[in] pLockInfo Information about which locks are already held, for recursive calls + */ +static NV_STATUS _clientUnmapBackRefMappings(RsClient *pClient, CALL_CONTEXT *pCallContext, RS_LOCK_INFO *pLockInfo); + +static void _clientUnmapInterMappings(RsClient *pClient, CALL_CONTEXT *pCallContext, RS_LOCK_INFO *pLockInfo); +static void _clientUnmapInterBackRefMappings(RsClient *pClient, CALL_CONTEXT *pCallContext, RS_LOCK_INFO *pLockInfo); + +NV_STATUS +clientConstruct_IMPL +( + RsClient *pClient, + PORT_MEM_ALLOCATOR *pAllocator, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status; + CLIENT_TYPE type; + + if (pParams->pSecInfo->privLevel >= RS_PRIV_LEVEL_KERNEL) + type = CLIENT_TYPE_KERNEL; + else + type = CLIENT_TYPE_USER; + + pClient->type = type; + pClient->hClient = pParams->hClient; + + mapInit(&pClient->resourceMap, pAllocator); + listInitIntrusive(&pClient->pendingFreeList); + + listInit(&pClient->accessBackRefList, pAllocator); + + pClient->handleGenIdx = 0; + status = clientSetHandleGenerator(pClient, 0, 0); + if (status != NV_OK) + return status; + + pClient->bActive = NV_TRUE; + + status = clientSetRestrictedRange(pClient, 0, 0); + if (status != NV_OK) + return status; + + return NV_OK; +} + +NV_STATUS +clientSetHandleGenerator_IMPL +( + RsClient *pClient, + NvHandle handleRangeStart, + NvHandle handleRangeSize +) +{ + // + // on vGPU, when client uses RM allocated handles, post allocation of rmclient NV01_ROOT, + // NV01_DEVICE_0 is allocated which increment the handleGenIdx to 0x1. + // In order to avoid the handle clash, we split the default RM handle ranges between Guest RM + // (0xcaf00000, 0xcaf3ffff) and host RM (0xcaf40000, 0xcaf80000). + // Hence, we should take this overriding into consideration when the ranges over the default + // RM handle ranges. + // + NvBool bShrinkUnusedRange = ((pClient->handleRangeStart == handleRangeStart) && + (pClient->handleGenIdx <= handleRangeSize)); + + if (!((pClient->handleGenIdx == 0) || bShrinkUnusedRange)) + { + return NV_ERR_INVALID_STATE; + } + + if ((handleRangeStart == 0) && (handleRangeSize == 0)) + { + pClient->handleRangeStart = RS_UNIQUE_HANDLE_BASE; + pClient->handleRangeSize = RS_UNIQUE_HANDLE_RANGE; + } + else if ((handleRangeStart != 0) && (handleRangeSize != 0)) + { + pClient->handleRangeStart = handleRangeStart; + pClient->handleRangeSize = handleRangeSize; + } + else + { + return NV_ERR_INVALID_PARAMETER; + } + + return NV_OK; +} + +NV_STATUS clientCanShareResource_IMPL +( + RsClient *pClient, + RsResourceRef *pResourceRef, + RS_SHARE_POLICY *pSharePolicy, + CALL_CONTEXT *pCallContext +) +{ + NV_STATUS status = NV_OK; + + RS_ACCESS_MASK rightsNeeded; + RS_ACCESS_MASK *pRightsHeld; + + // + // If sharing, check that the client has the rights it is trying to share + // Revoking does not require this to allow revoking all rights without checking + // + if (!(pSharePolicy->action & RS_SHARE_ACTION_FLAG_REVOKE)) + { + status = rsAccessCheckRights(pResourceRef, pClient, &pSharePolicy->accessMask); + if (status == NV_ERR_INSUFFICIENT_PERMISSIONS) + { + // Attempt to grant rights which aren't already owned + portMemCopy(&rightsNeeded, sizeof(rightsNeeded), + &pSharePolicy->accessMask, sizeof(pSharePolicy->accessMask)); + + pRightsHeld = rsAccessLookup(pResourceRef, pClient); + if (pRightsHeld != NULL) + { + // Skip trying to grant rights already held + RS_ACCESS_MASK_SUBTRACT(&rightsNeeded, pRightsHeld); + } + + status = rsAccessGrantRights(pResourceRef, pCallContext, pClient, + &rightsNeeded, // pRightsRequested + NULL, // pRightsRequired + NULL); // pAllocParams + } + } + + return status; +} + +NV_STATUS +clientShareResource_IMPL +( + RsClient *pClient, + RsResourceRef *pResourceRef, + RS_SHARE_POLICY *pSharePolicy, + CALL_CONTEXT *pCallContext +) +{ + RsServer *pServer = NULL; + RsShareList *pActiveList; + NV_STATUS status; + + status = clientCanShareResource(pClient, pResourceRef, pSharePolicy, pCallContext); + if (status != NV_OK) + return status; + + if (!pResourceRef->bSharePolicyListModified) + { + if (pSharePolicy->action & RS_SHARE_ACTION_FLAG_COMPOSE) + { + if (pCallContext != NULL) + pServer = pCallContext->pServer; + + pActiveList = rsAccessGetActiveShareList(pResourceRef, pServer); + status = rsShareListCopy(&pResourceRef->sharePolicyList, pActiveList); + if (status != NV_OK) + return status; + } + + pResourceRef->bSharePolicyListModified = NV_TRUE; + } + + if (!(pSharePolicy->action & RS_SHARE_ACTION_FLAG_COMPOSE)) + { + listClear(&pResourceRef->sharePolicyList); + } + + if (pSharePolicy->action & RS_SHARE_ACTION_FLAG_REVOKE) + { + rsShareListRemove(&pResourceRef->sharePolicyList, pSharePolicy, NULL); + } + else + { + status = rsShareListInsert(&pResourceRef->sharePolicyList, pSharePolicy, NULL); + } + + return status; +} + +NV_STATUS +clientShareResourceTargetClient_IMPL +( + RsClient *pClient, + RsResourceRef *pResourceRef, + RS_SHARE_POLICY *pSharePolicy, + CALL_CONTEXT *pCallContext +) +{ + NV_STATUS status; + RS_ACCESS_MASK *pCurrentRights; + + // Special case: This should only be called when share policy is for own client + NV_ASSERT(pSharePolicy->type == RS_SHARE_TYPE_CLIENT); + NV_ASSERT(pSharePolicy->target == pClient->hClient); + + status = clientCanShareResource(pClient, pResourceRef, pSharePolicy, pCallContext); + if (status != NV_OK) + return status; + + pCurrentRights = rsAccessLookup(pResourceRef, pClient); + + if (pSharePolicy->action & RS_SHARE_ACTION_FLAG_REVOKE) + { + RS_ACCESS_MASK_SUBTRACT(pCurrentRights, &pSharePolicy->accessMask); + } + else + { + RS_ACCESS_MASK_UNION(pCurrentRights, &pSharePolicy->accessMask); + } + + return NV_OK; +} + +NV_STATUS +clientSetRestrictedRange_IMPL +( + RsClient *pClient, + NvHandle handleRangeStart, + NvU32 handleRangeSize +) +{ + NvHandle hFirst = handleRangeStart; + NvHandle hLast; + + // Only allow modification if we haven't generated any handles + if (pClient->handleGenIdx != 0) + return NV_ERR_INVALID_STATE; + + if (handleRangeSize == 0) + { + if (handleRangeStart != 0) + return NV_ERR_INVALID_PARAMETER; + + pClient->handleRestrictRange = NV_RANGE_EMPTY; + return NV_OK; + } + + // Wrapping-around the reserved range is not supported + if (!portSafeAddU32(hFirst, handleRangeSize-1, &hLast)) + return NV_ERR_INVALID_REQUEST; + + pClient->handleRestrictRange = rangeMake(hFirst, hLast); + + return NV_OK; +} + +void clientDestruct_IMPL +( + RsClient *pClient +) +{ + NV_ASSERT(mapCount(&pClient->resourceMap) == 0); + mapDestroy(&pClient->resourceMap); + + NV_ASSERT(listCount(&pClient->accessBackRefList) == 0); + listDestroy(&pClient->accessBackRefList); +} + +NV_STATUS +clientGetResource_IMPL +( + RsClient *pClient, + NvHandle hResource, + NvU32 internalClassId, + RsResource **ppResource +) +{ + NV_STATUS status = NV_OK; + RsResourceRef *pResourceRef; + RsResource *pResource; + + pResourceRef = mapFind(&pClient->resourceMap, hResource); + if (pResourceRef == NULL) + { + status = NV_ERR_OBJECT_NOT_FOUND; + pResource = NULL; + goto done; + } + + if (pResourceRef->internalClassId != internalClassId) + { + status = NV_ERR_INVALID_CLASS; + pResource = NULL; + goto done; + } + + pResource = pResourceRef->pResource; + +done: + if (ppResource != NULL) + *ppResource = pResource; + + return status; +} + +NV_STATUS +clientGetResourceByRef_IMPL +( + RsClient *pClient, + RsResourceRef *pResourceRef, + RsResource **ppResource +) +{ + if (ppResource != NULL) + *ppResource = pResourceRef->pResource; + + return NV_OK; +} + +NV_STATUS +clientGetResourceRef_IMPL +( + RsClient *pClient, + NvHandle hResource, + RsResourceRef **ppResourceRef +) +{ + RsResourceRef *pResourceRef; + + pResourceRef = mapFind(&pClient->resourceMap, hResource); + if (pResourceRef == NULL) + return NV_ERR_OBJECT_NOT_FOUND; + + if (ppResourceRef != NULL) + *ppResourceRef = pResourceRef; + + return NV_OK; +} + +NV_STATUS +clientGetResourceRefWithAccess_IMPL +( + RsClient *pClient, + NvHandle hResource, + const RS_ACCESS_MASK *pRightsRequired, + RsResourceRef **ppResourceRef +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef; + + status = clientGetResourceRef(pClient, hResource, &pResourceRef); + if (status != NV_OK) + return status; + + status = rsAccessCheckRights(pResourceRef, pClient, pRightsRequired); + if (status != NV_OK) + return status; + + if (ppResourceRef != NULL) + *ppResourceRef = pResourceRef; + + return NV_OK; +} + +NV_STATUS +clientGetResourceRefByType_IMPL +( + RsClient *pClient, + NvHandle hResource, + NvU32 internalClassId, + RsResourceRef **ppResourceRef +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef; + + status = clientGetResourceRef(pClient, hResource, &pResourceRef); + if (status != NV_OK) + return status; + + if (pResourceRef->internalClassId != internalClassId) + return NV_ERR_INVALID_OBJECT_HANDLE; + + if (ppResourceRef != NULL) + *ppResourceRef = pResourceRef; + + return NV_OK; +} + +NV_STATUS +clientValidate_IMPL +( + RsClient *pClient, + const API_SECURITY_INFO *pSecInfo +) +{ + return NV_OK; +} + +NV_STATUS +clientValidateLocks_IMPL +( + RsClient *pClient, + RsServer *pServer, + const CLIENT_ENTRY *pClientEntry +) +{ + NV_CHECK_OR_RETURN(LEVEL_SILENT, + pClientEntry->lockOwnerTid == portThreadGetCurrentThreadId(), + NV_ERR_INVALID_LOCK_STATE); + + return NV_OK; +} + +RS_PRIV_LEVEL +clientGetCachedPrivilege_IMPL +( + RsClient *pClient +) +{ + // Non-functional, base class stubs + return RS_PRIV_LEVEL_USER; +} + +NvBool +clientIsAdmin_IMPL +( + RsClient *pClient, + RS_PRIV_LEVEL privLevel +) +{ + // Non-functional, base class stubs + return NV_FALSE; +} + +NV_STATUS +clientAllocResource_IMPL +( + RsClient *pClient, + RsServer *pServer, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return _clientAllocResourceHelper(pClient, pServer, pParams, &pParams->hResource); +} + +NV_STATUS +clientCopyResource_IMPL +( + RsClient *pClientDst, + RsServer *pServer, + RS_RES_DUP_PARAMS_INTERNAL *pParams +) +{ + RS_RES_ALLOC_PARAMS_INTERNAL params; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + + RsResourceRef *pParentRef = NULL; + NV_STATUS status; + + status = clientGetResourceRef(pClientDst, pParams->hParentDst, &pParentRef); + if (status != NV_OK) + return status; + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = pServer; + callContext.pClient = pClientDst; + callContext.pResourceRef = pParams->pSrcRef; + callContext.pContextRef = pParentRef; + callContext.secInfo = *pParams->pSecInfo; + callContext.pLockInfo = pParams->pLockInfo; + + NV_ASSERT_OK_OR_RETURN(resservSwapTlsCallContext(&pOldContext, &callContext)); + + // + // Kernel clients are allowed to dup anything, unless they request otherwise. + // Also, if access rights are disabled, owner client should still be able to dup. + // For anything else, check that the client has dup access on the object + // + if (((pParams->pSecInfo->privLevel < RS_PRIV_LEVEL_KERNEL) || + (pParams->flags & NV04_DUP_HANDLE_FLAGS_REJECT_KERNEL_DUP_PRIVILEGE)) && + (pServer->bRsAccessEnabled || (pParams->pSrcClient->hClient != pClientDst->hClient))) + { + RS_ACCESS_MASK rightsRequired; + + portMemSet(&rightsRequired, 0, sizeof(rightsRequired)); + RS_ACCESS_MASK_ADD(&rightsRequired, RS_ACCESS_DUP_OBJECT); + + status = rsAccessCheckRights(pParams->pSrcRef, pClientDst, &rightsRequired); + } + else + { + // Server's globalInternalSharePolicyList applies Require policies even to kernel + RsShareListIter it = listIterAll(&pServer->globalInternalSharePolicyList); + while (listIterNext(&it)) + { + RS_SHARE_POLICY *pSharePolicy = it.pValue; + + // We only care about failing Require policies which apply to Dup, ignore everything else + if ((pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE) && + RS_ACCESS_MASK_TEST(&pSharePolicy->accessMask, RS_ACCESS_DUP_OBJECT) && + !resShareCallback(pParams->pSrcRef->pResource, pClientDst, pParentRef, pSharePolicy)) + { + status = NV_ERR_INVALID_REQUEST; + break; + } + } + } + + NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext)); + + if (status != NV_OK) + return status; + + portMemSet(¶ms, 0, sizeof(params)); + + params.hClient = pClientDst->hClient; + params.hParent = pParams->hParentDst; + params.hResource = pParams->hResourceDst; + params.externalClassId = pParams->pSrcRef->externalClassId; + params.pSecInfo = pParams->pSecInfo; + + params.pClient = pClientDst; + params.pSrcClient = pParams->pSrcClient; + params.pSrcRef = pParams->pSrcRef; + params.pAllocParams = pParams->pShareParams; + params.pLockInfo = pParams->pLockInfo; + params.allocFlags = pParams->flags; + + return _clientAllocResourceHelper(pClientDst, pServer, ¶ms, &pParams->hResourceDst); +} + +static +void +_refCleanupDependencies +( + RsResourceRef *pResourceRef +) +{ + RsResourceRef **ppIndepRef; + while (NULL != (ppIndepRef = multimapFirstItem(&pResourceRef->depBackRefMap))) + { + refRemoveDependant(*ppIndepRef, pResourceRef); + } +} + +static +void +_refCleanupDependants +( + RsResourceRef *pResourceRef +) +{ + RsResourceRef **ppDepRef; + while (NULL != (ppDepRef = multimapFirstItem(&pResourceRef->depRefMap))) + { + refRemoveDependant(pResourceRef, *ppDepRef); + } +} + +static +void +_refRemoveAllDependencies +( + RsResourceRef *pResourceRef +) +{ + _refCleanupDependencies(pResourceRef); + + if (pResourceRef->pDependantSession != NULL) + sessionRemoveDependency(pResourceRef->pDependantSession, pResourceRef); + + if (pResourceRef->pSession != NULL) + sessionRemoveDependant(pResourceRef->pSession, pResourceRef); +} + +static +NV_STATUS +_clientAllocResourceHelper +( + RsClient *pClient, + RsServer *pServer, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + NvHandle *phResource +) +{ + NV_STATUS status; + NvHandle hResource = *phResource; + NvU32 depth = 0; + RsResource *pResource = NULL; + RsResourceRef *pParentRef = NULL; + RsResourceRef *pResourceRef = NULL; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + NvHandle hParent = pParams->hParent; + + status = clientGetResourceRef(pClient, hParent, &pParentRef); + if (status != NV_OK && hParent != pClient->hClient && hParent != 0) + return status; + + status = _clientConstructResourceRef(pClient, pServer, pParentRef, hResource, pParams->externalClassId, &pResourceRef); + if (status != NV_OK) + goto fail; + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = pServer; + callContext.pClient = pClient; + callContext.pResourceRef = pResourceRef; + callContext.pContextRef = pParams->pSrcRef; + callContext.pLockInfo = pParams->pLockInfo; + + if (pParams->pSecInfo == NULL) + { + status = NV_ERR_INVALID_ARGUMENT; + goto fail; + } + callContext.secInfo = *pParams->pSecInfo; + + NV_ASSERT_OK_OR_GOTO(status, + resservSwapTlsCallContext(&pOldContext, &callContext), fail); + + status = resservResourceFactory(pServer->pAllocator, &callContext, pParams, &pResource); + NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext)); + + if (status != NV_OK) + goto fail; + + // Clear free params implicitly set by constructor + resSetFreeParams(pResource, NULL, NULL); + pParams->pResourceRef = pResourceRef; + + // + // resConstruct_IMPL sets these fields but we need to set them again until + // Bug 2527351 is fixed + // + pResourceRef->pResource = pResource; + pResource->pResourceRef = pResourceRef; + + if (pParentRef != NULL) + { + depth = pParentRef->depth + 1; + pResourceRef->depth = depth; + + // Allow one additional level of depth to offset the depth used up by the RsClientResource at the root + // of the object hierarchy + if (RS_MAX_RESOURCE_DEPTH + 1 <= depth) + { + status = NV_ERR_ILLEGAL_ACTION; + goto fail; + } + + // Add this ref to the parent's child map + if (NV_OK != indexAdd(&pParentRef->childRefMap, pResourceRef->internalClassId, pResourceRef)) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto fail; + } + } + + if (pServer->bRsAccessEnabled) + { + status = rsAccessGrantRights(pResourceRef, &callContext, pClient, + pParams->pRightsRequested, + pParams->pRightsRequired, + pParams->pAllocParams); + if (status != NV_OK) + goto fail; + } + + *phResource = hResource; + + return NV_OK; + +fail: + if (pResource != NULL) + { + NV_STATUS callContextStatus; + + RS_RES_FREE_PARAMS_INTERNAL params; + pOldContext = NULL; + + // First undo dependency tracking since it might access the resource + _refRemoveAllDependencies(pResourceRef); + + portMemSet(¶ms, 0, sizeof(params)); + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = pServer; + callContext.pClient = pClient; + callContext.secInfo = *pParams->pSecInfo; + callContext.pResourceRef = pResourceRef; + callContext.pLockInfo = pParams->pLockInfo; + + callContextStatus = resservSwapTlsCallContext(&pOldContext, &callContext); + if (callContextStatus == NV_OK) + { + resSetFreeParams(pResource, &callContext, ¶ms); + + objDelete(pResource); + NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext)); + } + else + { + NV_PRINTF(LEVEL_ERROR, "Failed to set call context! Error: 0x%x\n", + callContextStatus); + } + + } + + if (pResourceRef != NULL) + { + if (pParentRef != NULL) + { + indexRemove(&pParentRef->childRefMap, pResourceRef->internalClassId, pResourceRef); + } + + clientDestructResourceRef(pClient, pServer, pResourceRef, pParams->pLockInfo, pParams->pSecInfo); + } + + return status; +} + +NV_STATUS +clientFreeResource_IMPL +( + RsClient *pClient, + RsServer *pServer, + RS_RES_FREE_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status = NV_OK; + NV_STATUS tmpStatus; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + RsResourceRef *pClientRef = NULL; + RsResourceRef *pParentRef = NULL; + RsResourceRef *pResourceRef; + RsResource *pResource; + + pResourceRef = mapFind(&pClient->resourceMap, pParams->hResource); + if (pResourceRef == NULL) + return NV_ERR_OBJECT_NOT_FOUND; + + if (refPendingFree(pResourceRef, pClient)) + listRemove(&pClient->pendingFreeList, pResourceRef); + + pResource = pResourceRef->pResource; + pParentRef = pResourceRef->pParentRef; + + if (pResourceRef->bInvalidated) + goto done; + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pClient = pClient; + callContext.pResourceRef = pResourceRef; + callContext.pServer = pServer; + callContext.pLockInfo = pParams->pLockInfo; + + // Some MODS tests don't set secInfo. + if (pParams->pSecInfo != NULL) + callContext.secInfo = *pParams->pSecInfo; + + NV_ASSERT_OK_OR_GOTO(status, + resservSwapTlsCallContext(&pOldContext, &callContext), done); + + resSetFreeParams(pResource, &callContext, pParams); + + resPreDestruct(pResource); + + // Remove all CPU mappings + clientUnmapResourceRefMappings(pClient, &callContext, pParams->pLockInfo); + _clientUnmapBackRefMappings(pClient, &callContext, pParams->pLockInfo); + + // Remove all inter-mappings + _clientUnmapInterMappings(pClient, &callContext, pParams->pLockInfo); + _clientUnmapInterBackRefMappings(pClient, &callContext, pParams->pLockInfo); + + // Remove this resource as a dependency from other resources + pResourceRef->bInvalidated = NV_TRUE; + _refRemoveAllDependencies(pResourceRef); + + status = serverFreeResourceRpcUnderLock(pServer, pParams); + NV_ASSERT((status == NV_OK) || (status == NV_ERR_GPU_IN_FULLCHIP_RESET)); + + // NV_PRINTF(LEVEL_INFO, "hClient %x: Freeing hResource: %x\n", + // pClient->hClient, pResourceRef->hResource); + + objDelete(pResource); + + pResourceRef->pResource = NULL; + + NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext)); + +done: + if (!pParams->bInvalidateOnly) + { + // Remove this ref from its parent's child ref list + if (pParentRef != NULL) + { + multimapRemoveItemByKey(&pParentRef->childRefMap, + pResourceRef->internalClassId, pResourceRef->hResource); + } + + pClientRef = mapFind(&pClient->resourceMap, pClient->hClient); + if (pClientRef != NULL) + refUncacheRef(pClientRef, pResourceRef); + + tmpStatus = clientDestructResourceRef(pClient, pServer, pResourceRef, pParams->pLockInfo, pParams->pSecInfo); + NV_ASSERT(tmpStatus == NV_OK); + } + + return status; +} + +NV_STATUS +clientUnmapMemory_IMPL +( + RsClient *pClient, + RsResourceRef *pResourceRef, + RS_LOCK_INFO *pLockInfo, + RsCpuMapping **ppCpuMapping, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + RsCpuMapping *pCpuMapping = *ppCpuMapping; + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pClient = pClient; + callContext.pResourceRef = pResourceRef; + callContext.pLockInfo = pLockInfo; + + // Some MODS tests don't set secInfo. + if (pSecInfo != NULL) + callContext.secInfo = *pSecInfo; + + NV_ASSERT_OK_OR_RETURN(resservSwapTlsCallContext(&pOldContext, &callContext)); + + status = resUnmap(pResourceRef->pResource, &callContext, pCpuMapping); + NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext)); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "hClient %x: Failed to unmap cpu mapping: hResource: %x error: 0x%x\n", + pClient->hClient, + pResourceRef->hResource, + status); + + if (pCpuMapping != NULL) + { + NV_PRINTF(LEVEL_ERROR, "hContext: %x\n", + (pCpuMapping->pContextRef == NULL) ? 0 : pCpuMapping->pContextRef->hResource); + } + } + + refRemoveMapping(pResourceRef, pCpuMapping); + *ppCpuMapping = NULL; + + return status; +} + +NV_STATUS +clientInterMap_IMPL +( + RsClient *pClient, + RsResourceRef *pMapperRef, + RsResourceRef *pMappableRef, + RS_INTER_MAP_PARAMS *pParams +) +{ + return NV_ERR_INVALID_CLIENT; +} + +NV_STATUS +clientInterUnmap_IMPL +( + RsClient *pClient, + RsResourceRef *pMapperRef, + RS_INTER_UNMAP_PARAMS *pParams +) +{ + return NV_ERR_INVALID_CLIENT; +} + +NV_STATUS +clientGenResourceHandle_IMPL +( + RsClient *pClient, + NvHandle *pHandle +) +{ + NvHandle hFirst; + NvHandle hResource; + NV_STATUS status; + + NV_ASSERT(pClient->handleRangeStart != 0); + NV_ASSERT(pClient->handleRangeSize != 0); + + hResource = pClient->handleRangeStart + ((pClient->handleGenIdx++) % pClient->handleRangeSize); + status = clientValidateNewResourceHandle(pClient, hResource, NV_FALSE); + if (status == NV_OK) + { + goto done; + } + + hFirst = hResource; + do + { + hResource = pClient->handleRangeStart + ((pClient->handleGenIdx++) % pClient->handleRangeSize); + status = clientValidateNewResourceHandle(pClient, hResource, NV_FALSE); + } while(hResource != hFirst && status != NV_OK); + + if (status != NV_OK) + return NV_ERR_INSUFFICIENT_RESOURCES; + +done: + NV_ASSERT(hResource - pClient->handleRangeStart < pClient->handleRangeSize); + + *pHandle = hResource; + return NV_OK; +} + +NV_STATUS +clientAssignResourceHandle_IMPL +( + RsClient *pClient, + NvHandle *phResource +) +{ + NV_STATUS status; + + if (phResource == NULL) + return NV_ERR_INVALID_ARGUMENT; + + if (*phResource == 0) + { + status = clientGenResourceHandle(pClient, phResource); + } + else + { + status = clientValidateNewResourceHandle(pClient, *phResource, NV_TRUE); + } + + return status; + +} + +static +NV_STATUS +_clientConstructResourceRef +( + RsClient *pClient, + RsServer *pServer, + RsResourceRef *pParentRef, + NvHandle hResource, + NvU32 externalClassId, + RsResourceRef **ppResourceRef +) +{ + PORT_MEM_ALLOCATOR *pAllocator = pServer->pAllocator; + RsResourceRef *pResourceRef = mapInsertNew(&pClient->resourceMap, hResource); + if (pResourceRef == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + if (!pClient->bResourceWarning && (mapCount(&pClient->resourceMap) >= RS_CLIENT_RESOURCE_WARNING_THRESHOLD)) + { + NV_PRINTF(LEVEL_WARNING, "Client 0x%08x has allocated a large number of resources. [Current classid: 0x%04x]\n", pClient->hClient, externalClassId); + NV_PRINTF(LEVEL_WARNING, "The client may be leaking resources. This warning can be ignored if the allocations were intentional.\n"); + pClient->bResourceWarning = NV_TRUE; + } + + pResourceRef->pClient = pClient; + pResourceRef->pResourceDesc = RsResInfoByExternalClassId(externalClassId); + pResourceRef->externalClassId = externalClassId; + pResourceRef->internalClassId = RsResInfoGetInternalClassId(pResourceRef->pResourceDesc); + pResourceRef->pResource = NULL; + pResourceRef->pParentRef = pParentRef; + pResourceRef->hResource = hResource; + pResourceRef->depth = 0; + + multimapInit(&pResourceRef->childRefMap, pAllocator); + multimapInit(&pResourceRef->cachedRefMap, pAllocator); + multimapInit(&pResourceRef->depRefMap, pAllocator); + multimapInit(&pResourceRef->depBackRefMap, pAllocator); + listInit(&pResourceRef->cpuMappings, pAllocator); + listInitIntrusive(&pResourceRef->backRefs); + listInit(&pResourceRef->interMappings, pAllocator); + listInitIntrusive(&pResourceRef->interBackRefsContext); + listInitIntrusive(&pResourceRef->interBackRefsMappable); + listInit(&pResourceRef->sharePolicyList, pAllocator); + + portAtomicExIncrementU64(&pServer->activeResourceCount); + + *ppResourceRef = pResourceRef; + return NV_OK; +} + +NV_STATUS +clientDestructResourceRef_IMPL +( + RsClient *pClient, + RsServer *pServer, + RsResourceRef *pResourceRef, + RS_LOCK_INFO *pLockInfo, + API_SECURITY_INFO *pSecInfo +) +{ + NV_ASSERT(pResourceRef != NULL); + NV_ASSERT(listCount(&pResourceRef->backRefs) == 0); + NV_ASSERT(listCount(&pResourceRef->cpuMappings) == 0); + NV_ASSERT(listCount(&pResourceRef->interBackRefsMappable) == 0); + NV_ASSERT(listCount(&pResourceRef->interBackRefsContext) == 0); + NV_ASSERT(listCount(&pResourceRef->interMappings) == 0); + + listDestroy(&pResourceRef->backRefs); + listDestroy(&pResourceRef->cpuMappings); + listDestroy(&pResourceRef->interBackRefsMappable); + listDestroy(&pResourceRef->interBackRefsContext); + listDestroy(&pResourceRef->interMappings); + listDestroy(&pResourceRef->sharePolicyList); + + // All children should be free + if (0 != multimapCountItems(&pResourceRef->childRefMap)) + { + RS_RES_FREE_PARAMS_INTERNAL params; + NV_STATUS tmpStatus; + +#if !(RS_STANDALONE_TEST) + NV_ASSERT(0 == multimapCountItems(&pResourceRef->childRefMap)); +#endif + + NV_PRINTF(LEVEL_ERROR, "Resource %x (Class %x) has unfreed children!\n", + pResourceRef->hResource, pResourceRef->externalClassId); + + RsIndexSupermapIter it = multimapSubmapIterAll(&pResourceRef->childRefMap); + while (multimapSubmapIterNext(&it)) + { + RsIndexSubmap *pSubmap = it.pValue; + RsIndexIter subIt = multimapSubmapIterItems(&pResourceRef->childRefMap, pSubmap); + while (multimapItemIterNext(&subIt)) + { + RsResourceRef *pChildRef = *subIt.pValue; + NV_PRINTF(LEVEL_ERROR, "Child %x (Class %x) is still alive!\n", + pChildRef->hResource, pChildRef->externalClassId); + + // + // Attempt to kill any leaked children. If they are not deleted here, + // they are likely to use-after-free when interacting with this parent object later. + // + portMemSet(¶ms, 0, sizeof(params)); + params.hClient = pChildRef->pClient->hClient; + params.hResource = pChildRef->hResource; + params.pResourceRef = pChildRef; + params.pSecInfo = pSecInfo; + params.pLockInfo = pLockInfo; + tmpStatus = clientFreeResource(pChildRef->pClient, pServer, ¶ms); + NV_ASSERT(tmpStatus == NV_OK); + } + } + } + multimapDestroy(&pResourceRef->childRefMap); + + // Nothing should be cached + NV_ASSERT(0 == multimapCountItems(&pResourceRef->cachedRefMap)); + multimapDestroy(&pResourceRef->cachedRefMap); + + _refCleanupDependencies(pResourceRef); + multimapDestroy(&pResourceRef->depBackRefMap); + + _refCleanupDependants(pResourceRef); + multimapDestroy(&pResourceRef->depRefMap); + + mapRemove(&pClient->resourceMap, pResourceRef); + + portAtomicExDecrementU64(&pServer->activeResourceCount); + + return NV_OK; +} + +NV_STATUS +clientUnmapResourceRefMappings +( + RsClient *pClient, + CALL_CONTEXT *pCallContext, + RS_LOCK_INFO *pLockInfo +) +{ + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + RsCpuMapping *pCpuMapping; + NV_STATUS status; + RS_LOCK_INFO lockInfo; + RS_CPU_UNMAP_PARAMS params; + + pCpuMapping = listHead(&pResourceRef->cpuMappings); + while(pCpuMapping != NULL) + { + portMemSet(¶ms, 0, sizeof(params)); + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + + params.hClient = pClient->hClient; + params.hDevice = (pCpuMapping->pContextRef == NULL) + ? pClient->hClient + : pCpuMapping->pContextRef->hResource; + params.hMemory = pResourceRef->hResource; + params.pLinearAddress = pCpuMapping->pLinearAddress; + params.processId = pCpuMapping->processId; + params.bTeardown = NV_TRUE; + params.flags = pCpuMapping->flags; + params.pSecInfo = &pCallContext->secInfo; + params.pLockInfo = &lockInfo; + lockInfo.pClient = pLockInfo->pClient; + lockInfo.state = pLockInfo->state; + lockInfo.flags = pLockInfo->flags; + + // TODO: temp WAR for bug 2840284: deadlock during recursive free operation + lockInfo.flags |= RS_LOCK_FLAGS_NO_CLIENT_LOCK; + + status = serverUnmap(pCallContext->pServer, params.hClient, params.hMemory, ¶ms); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to auto-unmap (status=0x%x) hClient %x: hResource: %x\n", + status, pClient->hClient, pResourceRef->hResource); + NV_PRINTF(LEVEL_ERROR, "hContext: %x at addr " NvP64_fmt "\n", + params.hDevice, params.pLinearAddress); + + if (pCpuMapping == listHead(&pResourceRef->cpuMappings)) + { +#if !(RS_STANDALONE_TEST) + NV_ASSERT(0); +#endif + refRemoveMapping(pResourceRef, pCpuMapping); + } + } + pCpuMapping = listHead(&pResourceRef->cpuMappings); + } + + return NV_OK; +} + +NV_STATUS +_clientUnmapBackRefMappings +( + RsClient *pClient, + CALL_CONTEXT *pCallContext, + RS_LOCK_INFO *pLockInfo +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + RsCpuMapping *pBackRefItem; + RS_LOCK_INFO lockInfo; + RS_CPU_UNMAP_PARAMS params; + + pBackRefItem = listHead(&pResourceRef->backRefs); + while(pBackRefItem != NULL) + { + RsCpuMapping *pCpuMapping = pBackRefItem; + RsResourceRef *pBackRef = pCpuMapping->pResourceRef; + + portMemSet(¶ms, 0, sizeof(params)); + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + + params.hClient = pClient->hClient; + params.hDevice = (pCpuMapping->pContextRef == NULL) + ? pClient->hClient + : pCpuMapping->pContextRef->hResource; + params.hMemory = pBackRef->hResource; + params.pLinearAddress = pCpuMapping->pLinearAddress; + params.processId = pCpuMapping->processId; + params.bTeardown = NV_TRUE; + params.flags = pCpuMapping->flags; + params.pSecInfo = &pCallContext->secInfo; + params.pLockInfo = &lockInfo; + + lockInfo.pClient = pLockInfo->pClient; + lockInfo.state = pLockInfo->state; + + status = serverUnmap(pCallContext->pServer, pClient->hClient, pBackRef->hResource, ¶ms); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to auto-unmap backref (status=0x%x) hClient %x: hResource: %x\n", + status, pClient->hClient, pBackRef->hResource); + NV_PRINTF(LEVEL_ERROR, "hContext: %x at addr " NvP64_fmt "\n", + params.hDevice, params.pLinearAddress); + + if (pBackRefItem == listHead(&pResourceRef->backRefs)) + { + NV_ASSERT(0); + listRemove(&pResourceRef->backRefs, pBackRefItem); + } + } + + pBackRefItem = listHead(&pResourceRef->backRefs); + } + + return NV_OK; +} + +static NV_STATUS +_unmapInterMapping +( + RsServer *pServer, + RsClient *pClient, + RsResourceRef *pMapperRef, + RsInterMapping *pMapping, + RS_LOCK_INFO *pLockInfo, + API_SECURITY_INFO *pSecInfo +) +{ + RS_INTER_UNMAP_PARAMS params; + RS_LOCK_INFO lockInfo; + NV_STATUS status; + + portMemSet(¶ms, 0, sizeof(params)); + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + + params.hClient = pClient->hClient; + params.hMapper = pMapperRef->hResource; + params.hDevice = pMapping->pContextRef->hResource; + + // This is a bug. Passing NVOS46 flags to virtmemUnmap which checks against NVOS47 flags. + params.flags = pMapping->flags; + params.dmaOffset = pMapping->dmaOffset; + params.size = 0; + params.pMemDesc = pMapping->pMemDesc; + params.pSecInfo = pSecInfo; + params.pLockInfo = &lockInfo; + + lockInfo.pClient = pLockInfo->pClient; + lockInfo.pContextRef = (pLockInfo->pContextRef != NULL) + ? pLockInfo->pContextRef + : pMapping->pContextRef; + lockInfo.state = pLockInfo->state; + lockInfo.flags = pLockInfo->flags; + + status = serverUpdateLockFlagsForInterAutoUnmap(pServer, ¶ms); + if (status != NV_OK) + return status; + + return serverInterUnmap(pServer, ¶ms); +} + +void +_clientUnmapInterMappings +( + RsClient *pClient, + CALL_CONTEXT *pCallContext, + RS_LOCK_INFO *pLockInfo +) +{ + NV_STATUS status; + RsResourceRef *pMapperRef = pCallContext->pResourceRef; + RsInterMapping *pMapping; + + pMapping = listHead(&pMapperRef->interMappings); + while (pMapping != NULL) + { + status = _unmapInterMapping(pCallContext->pServer, pClient, pMapperRef, + pMapping, pLockInfo, &pCallContext->secInfo); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to auto-unmap (status=0x%x) hClient %x: hMapper: %x\n", + status, pClient->hClient, pMapperRef->hResource); + NV_PRINTF(LEVEL_ERROR, "hMappable: %x hContext: %x\n", + pMapping->pMappableRef->hResource, pMapping->pContextRef->hResource); + + if (pMapping == listHead(&pMapperRef->interMappings)) + { + NV_ASSERT(0); + refRemoveInterMapping(pMapperRef, pMapping); + } + } + + pMapping = listHead(&pMapperRef->interMappings); + } +} + +void +_clientUnmapInterBackRefMappings +( + RsClient *pClient, + CALL_CONTEXT *pCallContext, + RS_LOCK_INFO *pLockInfo +) +{ + NV_STATUS status; + RsInterMapping *pBackRefItem; + + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + NvBool bSwitched = NV_FALSE; + + pBackRefItem = listHead(&(pResourceRef->interBackRefsMappable)); + if (pBackRefItem == NULL) + { + bSwitched = NV_TRUE; + pBackRefItem = listHead(&(pResourceRef->interBackRefsContext)); + } + while (pBackRefItem != NULL) + { + RsResourceRef *pMapperRef = pBackRefItem->pMapperRef; + RsInterMapping *pMapping = pBackRefItem; + + status = _unmapInterMapping(pCallContext->pServer, pClient, pMapperRef, + pMapping, pLockInfo, &pCallContext->secInfo); + if (status != NV_OK) + { + RsInterMapping *pCurHead = bSwitched ? listHead(&(pResourceRef->interBackRefsContext)) : + listHead(&(pResourceRef->interBackRefsMappable)); + + NV_PRINTF(LEVEL_ERROR, "Failed to auto-unmap backref (status=0x%x) hClient %x: hMapper: %x\n", + status, pClient->hClient, pMapperRef->hResource); + NV_PRINTF(LEVEL_ERROR, "hMappable: %x hContext: %x\n", + pMapping->pMappableRef->hResource, pMapping->pContextRef->hResource); + + if (pBackRefItem == pCurHead) + { + NV_ASSERT(0); + refRemoveInterMapping(pMapperRef, pMapping); + } + } + + pBackRefItem = bSwitched ? listHead(&(pResourceRef->interBackRefsContext)) : + listHead(&(pResourceRef->interBackRefsMappable)); + + if (pBackRefItem == NULL && (!bSwitched)) + { + bSwitched = NV_TRUE; + pBackRefItem = listHead(&(pResourceRef->interBackRefsContext)); + } + } +} + +NV_STATUS +indexAdd +( + RsIndex *pIndex, + NvU32 index, + RsResourceRef *pResourceRef +) +{ + NV_ASSERT(pResourceRef != NULL && pResourceRef->hResource != 0); + + if (NULL == multimapFindSubmap(pIndex, index)) + { + if (NULL == multimapInsertSubmap(pIndex, index)) + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + if (NULL == multimapInsertItemValue(pIndex, index, pResourceRef->hResource, + &pResourceRef)) + return NV_ERR_INSUFFICIENT_RESOURCES; + + return NV_OK; +} + +NV_STATUS +indexRemove +( + RsIndex *pIndex, + NvU32 index, + RsResourceRef *pResourceRef +) +{ + RsResourceRef **ppResourceRef; + + NV_ASSERT(pResourceRef != NULL && pResourceRef->hResource != 0); + + ppResourceRef = multimapFindItem(pIndex, index, pResourceRef->hResource); + if (ppResourceRef == NULL) + return NV_ERR_OBJECT_NOT_FOUND; + + multimapRemoveItem(pIndex, ppResourceRef); + + return NV_OK; +} + +NV_STATUS +clientValidateNewResourceHandle_IMPL +( + RsClient *pClient, + NvHandle hResource, + NvBool bRestrict +) +{ + // + // Resource handle should not be the same as the client handle + // because some control calls pass hClient in the hObject field + // + if (pClient->hClient == hResource || hResource == 0) + return NV_ERR_INVALID_OBJECT_HANDLE; + + if (bRestrict && !rangeIsEmpty(pClient->handleRestrictRange)) + { + NV_RANGE requestedRange = rangeMake(hResource, hResource); + if (rangeContains(pClient->handleRestrictRange, requestedRange)) + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + if (clientGetResourceRef(pClient, hResource, NULL) == NV_OK) + return NV_ERR_INSERT_DUPLICATE_NAME; + + return NV_OK; +} + +NV_STATUS +clientresConstruct_IMPL +( + RsClientResource *pClientRes, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RsClient *pClient = pCallContext->pClient; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + + // Client proxy resource must have the same handle as its client + if (pClient->hClient != pResourceRef->hResource) + return NV_ERR_INVALID_OBJECT_HANDLE; + + pClientRes->pClient = pCallContext->pClient; + return NV_OK; +} + +void +clientresDestruct_IMPL +( + RsClientResource *pClientRes +) +{ +} + +RsIndexIter +indexRefIter +( + RsIndex *pIndex, + NvU32 index +) +{ + RsIndexIter it; + RsIndexSubmap *pSubmap; + + portMemSet(&it, 0, sizeof(it)); + NV_ASSERT(pIndex); + + pSubmap = multimapFindSubmap(pIndex, index); + if (pSubmap != NULL) + it = multimapSubmapIterItems(pIndex, pSubmap); + + return it; +} + +RsIndexIter +indexRefIterAll +( + RsIndex *pIndex +) +{ + NV_ASSERT(pIndex); + return multimapItemIterAll(pIndex); +} + +NvBool +indexRefIterNext +( + RsIndexIter *pIt +) +{ + return multimapItemIterNext(pIt); +} + +RS_ITERATOR +clientRefIter +( + RsClient *pClient, + RsResourceRef *pScopeRef, + NvU32 internalClassId, + RS_ITER_TYPE type, + NvBool bExactMatch +) +{ + RS_ITERATOR it; + RsIndex *pIndex = NULL; + NvBool bChildrenOnly = (type == RS_ITERATE_CHILDREN); + NvBool bCachedOnly = (type == RS_ITERATE_CACHED); + NvBool bDependantsOnly = (type == RS_ITERATE_DEPENDANTS); + portMemSet(&it, 0, sizeof(it)); + + if (pClient == NULL) + { + NV_ASSERT(0); + return it; + } + + if (pScopeRef == NULL) + { + if (NV_OK != clientGetResourceRef(pClient, pClient->hClient, &pScopeRef)) + return it; + } + + if (bChildrenOnly || bCachedOnly || bDependantsOnly) + { + NvBool bIterAll = (internalClassId == 0) || !bExactMatch; + + if (bChildrenOnly) + { + pIndex = &pScopeRef->childRefMap; + } + else if (bCachedOnly) + { + pIndex = &pScopeRef->cachedRefMap; + } + else if (bDependantsOnly) + { + pIndex = &pScopeRef->depRefMap; + } + + if (!bIterAll && multimapFindSubmap(pIndex, internalClassId) == NULL) + goto done; + + it.idxIt = (bIterAll) + ? indexRefIterAll(pIndex) + : indexRefIter(pIndex, internalClassId); + } + else + { + // Match descendants of the scope resource (specific class / any class) + it.mapIt = mapIterAll(&pClient->resourceMap); + } + + it.pClient = pClient; + it.pScopeRef = pScopeRef; + it.internalClassId = internalClassId; + it.type = type; + it.bExactMatch = bExactMatch; + +done: + return it; +} + +RS_ORDERED_ITERATOR +clientRefOrderedIter +( + RsClient *pClient, + RsResourceRef *pScopeRef, + NvU32 internalClassId, + NvBool bExactMatch +) +{ + RS_ORDERED_ITERATOR it; + RsIndex *pIndex = NULL; + portMemSet(&it, 0, sizeof(it)); + + if (pClient == NULL) + { + NV_ASSERT(0); + return it; + } + + if (pScopeRef == NULL) + { + if (NV_OK != clientGetResourceRef(pClient, pClient->hClient, &pScopeRef)) + return it; + } + + it.depth = -1; + pIndex = &pScopeRef->childRefMap; + it.idxIt[0] = indexRefIterAll(pIndex); + + it.pClient = pClient; + it.pScopeRef = pScopeRef; + it.internalClassId = internalClassId; + it.bExactMatch = bExactMatch; + + return it; +} + +NvBool +clientRefOrderedIterNext +( + RsClient *pClient, + RS_ORDERED_ITERATOR *pIt +) +{ + RsResourceRef *pResourceRef; + NvBool bNext; + + if ((pIt == NULL) || (pIt->pClient != pClient) || pIt->pScopeRef == NULL) + { + // Iterator not initialized or nothing to iterate over + NV_ASSERT(pIt != NULL && pIt->pClient == NULL); + return NV_FALSE; + } + + // Iterate over the scope reference if the scope is not the client + if (pIt->depth == -1) + { + pIt->depth = 0; + if ((pIt->pScopeRef->hResource != pIt->pClient->hClient) && + ((pIt->internalClassId == 0) || (pIt->internalClassId == pIt->pScopeRef->internalClassId)) && + (pIt->pScopeRef->pResource != NULL)) + { + pIt->pResourceRef = pIt->pScopeRef; + return NV_TRUE; + } + } + + pIt->pResourceRef = NULL; + + bNext = NV_TRUE; + while (1) + { + // Get the next sibling, or else backtrack to parent and get its next sibling + do + { + if (!bNext) + --pIt->depth; + bNext = indexRefIterNext(&pIt->idxIt[pIt->depth]); + } while (!bNext && pIt->depth != 0); + + if (!bNext) + break; + + pResourceRef = *pIt->idxIt[pIt->depth].pValue; + + // Iterate over this resource's children next (up to max depth) + if (pIt->depth < RS_MAX_RESOURCE_DEPTH) + { + ++pIt->depth; + pIt->idxIt[pIt->depth] = indexRefIterAll(&pResourceRef->childRefMap); + } + + if (refHasAncestor(pResourceRef, pIt->pScopeRef)) + { + NvBool bMatch = NV_TRUE; + if (pIt->internalClassId != 0) + { + if (pIt->bExactMatch && (pIt->internalClassId != pResourceRef->internalClassId)) + bMatch = NV_FALSE; + + if (!pIt->bExactMatch && objDynamicCastById(pResourceRef->pResource, pIt->internalClassId) == NULL) + bMatch = NV_FALSE; + } + + if (bMatch && (pResourceRef->pResource != NULL)) + { + pIt->pResourceRef = pResourceRef; + return NV_TRUE; + } + } + } + + return NV_FALSE; +} + +NvBool +clientRefIterNext +( + RsClient *pClient, + RS_ITERATOR *pIt +) +{ + RsResourceRef *pResourceRef; + NvBool bLoop; + NvBool bUseIdx; + + if ((pIt == NULL) || (pIt->pClient != pClient) || pIt->pScopeRef == NULL) + { + // Iterator not initialized or nothing to iterate over + NV_ASSERT(pIt != NULL && pIt->pClient == NULL); + return NV_FALSE; + } + + bUseIdx = (pIt->type == RS_ITERATE_CACHED) || + (pIt->type == RS_ITERATE_CHILDREN) || + (pIt->type == RS_ITERATE_DEPENDANTS); + + pIt->pResourceRef = NULL; + + bLoop = bUseIdx ? indexRefIterNext(&pIt->idxIt) : mapIterNext(&pIt->mapIt); + while (bLoop) + { + pResourceRef = bUseIdx ? *pIt->idxIt.pValue : pIt->mapIt.pValue; + + if (bUseIdx || + ((pResourceRef == pIt->pScopeRef) || + (refHasAncestor(pResourceRef, pIt->pScopeRef)))) + { + NvBool bMatch = NV_TRUE; + if (pIt->internalClassId != 0) + { + if (pIt->bExactMatch && (pIt->internalClassId != pResourceRef->internalClassId)) + bMatch = NV_FALSE; + + if (!pIt->bExactMatch && objDynamicCastById(pResourceRef->pResource, pIt->internalClassId) == NULL) + bMatch = NV_FALSE; + } + + if (bMatch && (pResourceRef->pResource != NULL)) + { + pIt->pResourceRef = pResourceRef; + return NV_TRUE; + } + } + + bLoop = bUseIdx ? indexRefIterNext(&pIt->idxIt) : mapIterNext(&pIt->mapIt); + } + + return NV_FALSE; +} + +NV_STATUS +clientPostProcessPendingFreeList_IMPL +( + RsClient *pClient, + RsResourceRef **ppFirstLowPriRef +) +{ + if (ppFirstLowPriRef != NULL) + *ppFirstLowPriRef = NULL; + + return NV_OK; +} + +NV_STATUS +clientAddAccessBackRef_IMPL +( + RsClient *pClient, + RsResourceRef *pResourceRef +) +{ + AccessBackRef *pAccessBackRef = listPrependNew(&pClient->accessBackRefList);; + + if (pAccessBackRef == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + pAccessBackRef->hClient = pResourceRef->pClient->hClient; + pAccessBackRef->hResource = pResourceRef->hResource; + + return NV_OK; +} + +void clientFreeAccessBackRefs_IMPL +( + RsClient *pClient, + RsServer *pServer +) +{ + AccessBackRef *pAccessBackRef; + NV_STATUS status; + + while ((pAccessBackRef = listHead(&pClient->accessBackRefList)) != NULL) + { + RsClient *pSharedClient; + + // + // Remove access rights entry if client/resource pair is still in use + // so that another client doesn't get unauthorized access to them + // + status = serverGetClientUnderLock(pServer, pAccessBackRef->hClient, &pSharedClient); + if (status == NV_OK) + { + RsResourceRef *pResourceRef; + + status = clientGetResourceRef(pSharedClient, pAccessBackRef->hResource, &pResourceRef); + if (status == NV_OK) + { + RS_SHARE_POLICY revokePolicy; + + revokePolicy.type = RS_SHARE_TYPE_CLIENT; + revokePolicy.target = pClient->hClient; + revokePolicy.action = RS_SHARE_ACTION_FLAG_REVOKE; + RS_ACCESS_MASK_FILL(&revokePolicy.accessMask); + + // Check the resource's share policy for matching client policies + rsShareListRemove(&pResourceRef->sharePolicyList, &revokePolicy, NULL); + } + } + + listRemove(&pClient->accessBackRefList, pAccessBackRef); + } +} diff --git a/src/nvidia/src/libraries/resserv/src/rs_domain.c b/src/nvidia/src/libraries/resserv/src/rs_domain.c new file mode 100644 index 0000000..bbbc84d --- /dev/null +++ b/src/nvidia/src/libraries/resserv/src/rs_domain.c @@ -0,0 +1,52 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlog_inc.h" +#include "resserv/resserv.h" +#include "resserv/rs_domain.h" + +#if !(RS_STANDALONE) +#include "os/os.h" +#endif + +NV_STATUS +domainConstruct +( + RsDomain *pDomain, + PORT_MEM_ALLOCATOR *pAllocator, + NvHandle hDomain, + NvHandle hParentDomain, + ACCESS_CONTROL *pAccessControl +) +{ + return NV_OK; +} + +NV_STATUS +domainDestruct +( + RsDomain *pDomain +) +{ + return NV_OK; +} diff --git a/src/nvidia/src/libraries/resserv/src/rs_resource.c b/src/nvidia/src/libraries/resserv/src/rs_resource.c new file mode 100644 index 0000000..c5f6462 --- /dev/null +++ b/src/nvidia/src/libraries/resserv/src/rs_resource.c @@ -0,0 +1,750 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define NVOC_RS_RESOURCE_H_PRIVATE_ACCESS_ALLOWED + +#include "nvlog_inc.h" +#include "resserv/resserv.h" +#include "resserv/rs_resource.h" +#include "resserv/rs_client.h" +#include "resserv/rs_server.h" + +#if !(RS_STANDALONE) +#include "os/os.h" +#endif + +NV_STATUS +resConstruct_IMPL +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RsResourceRef *pResourceRef; + + if (pCallContext == NULL) + { + return NV_OK; + } + + pResourceRef = pCallContext->pResourceRef; + + pResource->bConstructed = NV_TRUE; + + // Init pResourceRef->pResource so iteration APIs work during ctor + pResourceRef->pResource = pResource; + + // Init back-ref so we can use during ctor + pResource->pResourceRef = pResourceRef; + + // Set context for free in case a chained constructor fails. + resSetFreeParams(pResource, pCallContext, NULL); + + // NV_PRINTF(LEVEL_INFO, "Constructing resource with external class: 0x%x\n", pParams->externalClassId); + + return NV_OK; +} + +void +resPreDestruct_IMPL +( + RsResource *pResource +) +{ +} + +void +resDestruct_IMPL +( + RsResource *pResource +) +{ + if (!pResource->bConstructed) + { + return; + } + + // NV_PRINTF(LEVEL_INFO, "Freeing resource: " NvP64_fmt "\n", NV_PTR_TO_NvP64(pResource)); +} + +NV_STATUS +resSetFreeParams_IMPL(RsResource *pResource, CALL_CONTEXT *pCallContext, RS_RES_FREE_PARAMS_INTERNAL *pParams) +{ + if (!pResource->bConstructed) + { + return NV_OK; + } + + pResource->dtorParams.pFreeContext = pCallContext; + pResource->dtorParams.pFreeParams = pParams; + + return NV_OK; +} + +NV_STATUS +resGetFreeParams_IMPL(RsResource *pResource, CALL_CONTEXT **ppCallContext, RS_RES_FREE_PARAMS_INTERNAL **ppParams) +{ + if (ppCallContext != NULL) + *ppCallContext = pResource->dtorParams.pFreeContext; + + if (ppParams != NULL) + *ppParams = pResource->dtorParams.pFreeParams; + + return NV_OK; +} + +NV_STATUS resControlLookup_IMPL +( + RsResource *pResource, + RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams, + const struct NVOC_EXPORTED_METHOD_DEF **ppEntry +) +{ + const struct NVOC_EXPORTED_METHOD_DEF *pEntry; + NvU32 cmd = pRsParams->cmd; + + *ppEntry = NULL; + pEntry = objGetExportedMethodDef(staticCast(objFullyDerive(pResource), Dynamic), cmd); + + if (pEntry == NULL) + return NV_ERR_NOT_SUPPORTED; + + if ((pEntry->paramSize != 0) && (pRsParams->paramsSize != pEntry->paramSize)) + { + NV_PRINTF(LEVEL_NOTICE, + "hObject 0x%08x, cmd 0x%08x: bad paramsize %d, expected %d\n", + RES_GET_HANDLE(pResource), pRsParams->cmd, + (int)pRsParams->paramsSize, + (int)pEntry->paramSize); + + return NV_ERR_INVALID_PARAM_STRUCT; + } + + *ppEntry = pEntry; + return NV_OK; +} + +typedef NV_STATUS (*CONTROL_EXPORT_FNPTR)(void*, void*); +typedef NV_STATUS (*CONTROL_EXPORT_FNPTR_NO_PARAMS)(void*); + +NV_STATUS +resControl_IMPL +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams +) +{ + RsServer *pServer = pCallContext->pServer; + const struct NVOC_EXPORTED_METHOD_DEF *pEntry; + NV_STATUS status; + Dynamic *pDynamicObj; + NvU32 releaseFlags = 0; + LOCK_ACCESS_TYPE access = LOCK_ACCESS_WRITE; + + status = resControlLookup(pResource, pRsParams, &pEntry); + if (status != NV_OK) + { + if (status == NV_WARN_NOTHING_TO_DO) + return NV_OK; + return status; + } + + NV_ASSERT_OR_RETURN(pEntry != NULL, NV_ERR_NOT_SUPPORTED); + + // Initialize the execution cookie + serverControl_InitCookie(pEntry, pRsParams->pCookie); + + status = resControlFilter(pResource, pCallContext, pRsParams); + if (status != NV_OK) + return status; + + status = serverControl_Prologue(pServer, pRsParams, &access, &releaseFlags); + if (status != NV_OK) + return status; + + status = resControlSerialization_Prologue(pResource, pCallContext, pRsParams); + if (status != NV_OK) + goto done; + + status = resControl_Prologue(pResource, pCallContext, pRsParams); + if ((status != NV_OK) && (status != NV_WARN_NOTHING_TO_DO)) + goto done; + + pDynamicObj = objDynamicCastById(pResource, pEntry->pClassInfo->classId); + + if (status == NV_WARN_NOTHING_TO_DO) + { + // Call handled by the prologue. + status = NV_OK; + } + else + { + // Check the size of paramSize while it is non-zero. + // Zero size means the exported method only have one param (pResource) + if (pEntry->paramSize == 0) + { + CONTROL_EXPORT_FNPTR_NO_PARAMS pFunc = ((CONTROL_EXPORT_FNPTR_NO_PARAMS) pEntry->pFunc); + status = pFunc(pDynamicObj); + } + else + { + CONTROL_EXPORT_FNPTR pFunc = ((CONTROL_EXPORT_FNPTR) pEntry->pFunc); + + status = pFunc(pDynamicObj, pRsParams->pParams); + } + } + + resControl_Epilogue(pResource, pCallContext, pRsParams); + +done: + resControlSerialization_Epilogue(pResource, pCallContext, pRsParams); + status = serverControl_Epilogue(pServer, pRsParams, access, &releaseFlags, status); + + return status; +} + +NV_STATUS +resControlFilter_IMPL +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +NV_STATUS +resControlSerialization_Prologue_IMPL +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +void +resControlSerialization_Epilogue_IMPL +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ +} + +NV_STATUS +resControl_Prologue_IMPL +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +void +resControl_Epilogue_IMPL +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + return; +} + +NvU32 resGetRefCount_IMPL +( + RsResource *pResource +) +{ + return 1; +} + +NV_STATUS +resMap_IMPL +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RS_CPU_MAP_PARAMS *pParams, + RsCpuMapping *pCpuMapping +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +resUnmap_IMPL +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RsCpuMapping *pCpuMapping +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +resMapTo_IMPL +( + RsResource *pResource, + RS_RES_MAP_TO_PARAMS *pParams +) +{ + return NV_ERR_INVALID_OBJECT_HANDLE; +} + +NV_STATUS +resUnmapFrom_IMPL +( + RsResource *pResource, + RS_RES_UNMAP_FROM_PARAMS *pParams +) +{ + return NV_ERR_INVALID_OBJECT_HANDLE; +} + +NvBool +resCanCopy_IMPL +( + RsResource *pResource +) +{ + return NV_FALSE; +} + +NV_STATUS +resIsDuplicate_IMPL +( + RsResource *pResource, + NvHandle hMemory, + NvBool *pDuplicate +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NvBool +resAccessCallback_IMPL +( + RsResource *pResource, + RsClient *pInvokingClient, + void *pAllocParams, + RsAccessRight accessRight +) +{ + return NV_FALSE; +} + +NvBool +resShareCallback_IMPL +( + RsResource *pResource, + RsClient *pInvokingClient, + RsResourceRef *pParentRef, + RS_SHARE_POLICY *pSharePolicy +) +{ + switch (pSharePolicy->type) + { + case RS_SHARE_TYPE_ALL: + return NV_TRUE; + case RS_SHARE_TYPE_CLIENT: + if (pSharePolicy->target == pInvokingClient->hClient) + return NV_TRUE; + break; + } + + return NV_FALSE; +} + +NV_STATUS +refFindCpuMapping +( + RsResourceRef *pResourceRef, + NvP64 pAddress, + RsCpuMapping **ppMapping +) +{ + return refFindCpuMappingWithFilter(pResourceRef, pAddress, NULL, ppMapping); +} + +NV_STATUS +refFindCpuMappingWithFilter +( + RsResourceRef *pResourceRef, + NvP64 pAddress, + NvBool (*fnFilter)(RsCpuMapping*), + RsCpuMapping **ppMapping +) +{ + RsCpuMappingListIter it; + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + RsCpuMapping *pMapping = NULL; + + if (pResourceRef == NULL) + { + NV_ASSERT(0); + return status; + } + + it = listIterAll(&pResourceRef->cpuMappings); + while (listIterNext(&it)) + { + pMapping = it.pValue; + if ((pMapping->pLinearAddress == pAddress) && + ((fnFilter == NULL) || fnFilter(pMapping))) + { + status = NV_OK; + break; + } + } + + if (status != NV_OK) + pMapping = NULL; + + if (pMapping != NULL) + *ppMapping = pMapping; + + return status; +} + +NV_STATUS +refFindChildOfType +( + RsResourceRef *pParentRef, + NvU32 internalClassId, + NvBool bExactMatch, + RsResourceRef **ppResourceRef +) +{ + if (bExactMatch) + { + RsIndexIter it = indexRefIter(&pParentRef->childRefMap, internalClassId); + if (indexRefIterNext(&it)) + { + RsResourceRef *pResourceRef = *it.pValue; + + if (ppResourceRef != NULL) + *ppResourceRef = pResourceRef; + + return NV_OK; + } + } + else + { + RsIndexSupermapIter it = multimapSubmapIterAll(&pParentRef->childRefMap); + while (multimapSubmapIterNext(&it)) + { + RsIndexSubmap *pSubmap = it.pValue; + RsIndexIter subIt = multimapSubmapIterItems(&pParentRef->childRefMap, pSubmap); + if (multimapItemIterNext(&subIt)) + { + RsResourceRef *pResourceRef = *subIt.pValue; + + if (objDynamicCastById(pResourceRef->pResource, internalClassId) == NULL) + continue; + + if (ppResourceRef != NULL) + *ppResourceRef = pResourceRef; + + return NV_OK; + } + } + + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +NV_STATUS +refFindAncestorOfType +( + RsResourceRef *pDescendantRef, + NvU32 internalClassId, + RsResourceRef **ppAncestorRef +) +{ + RsResourceRef *pAncestorRef = pDescendantRef->pParentRef; + + while (pAncestorRef != NULL) + { + if (pAncestorRef->internalClassId == internalClassId) + { + if(pAncestorRef->bInvalidated) + return NV_ERR_OBJECT_NOT_FOUND; + + if (ppAncestorRef != NULL) + *ppAncestorRef = pAncestorRef; + + return NV_OK; + } + + pAncestorRef = pAncestorRef->pParentRef; + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +NvBool +refHasAncestor +( + RsResourceRef *pDescendantRef, + RsResourceRef *pAncestorRef +) +{ + RsResourceRef *pSearchRef = pDescendantRef->pParentRef; + + while (pSearchRef != NULL) + { + if (pSearchRef == pAncestorRef) + return NV_TRUE; + + pSearchRef = pSearchRef->pParentRef; + } + + return NV_FALSE; +} + +NV_STATUS +refAddMapping +( + RsResourceRef *pResourceRef, + RS_CPU_MAP_PARAMS *pParams, + RsResourceRef *pContextRef, + RsCpuMapping **ppMapping +) +{ + NV_STATUS status; + RsCpuMapping *pCpuMapping = listAppendNew(&pResourceRef->cpuMappings); + if (pCpuMapping == NULL) + return NV_ERR_NO_MEMORY; + + status = refAllocCpuMappingPrivate(pParams, pCpuMapping); + if (status != NV_OK) + { + listRemove(&pResourceRef->cpuMappings, pCpuMapping); + return status; + } + + if ((pContextRef != NULL) && + (pContextRef != pResourceRef) && + !refHasAncestor(pResourceRef, pContextRef)) + { + listAppendExisting(&pContextRef->backRefs, pCpuMapping); + } + + pCpuMapping->offset = pParams->offset; + pCpuMapping->length = pParams->length; + pCpuMapping->flags = pParams->flags; + pCpuMapping->pContextRef = pContextRef; + pCpuMapping->pResourceRef = pResourceRef; + + if (ppMapping != NULL) + *ppMapping = pCpuMapping; + + return NV_OK; +} + +void +refRemoveMapping +( + RsResourceRef *pResourceRef, + RsCpuMapping *pCpuMapping +) +{ + if ((pCpuMapping->pContextRef != NULL) && + !refHasAncestor(pResourceRef, pCpuMapping->pContextRef)) + { + listRemove(&pCpuMapping->pContextRef->backRefs, pCpuMapping); + } + + refFreeCpuMappingPrivate(pCpuMapping); + listRemove(&pResourceRef->cpuMappings, pCpuMapping); +} + +#if RS_STANDALONE +NV_STATUS +refAllocCpuMappingPrivate +( + RS_CPU_MAP_PARAMS *pMapParams, + RsCpuMapping *pCpuMapping +) +{ + return NV_OK; +} + +void +refFreeCpuMappingPrivate +( + RsCpuMapping *pCpuMapping +) +{ +} +#endif /* RS_STANDALONE */ + +NV_STATUS +refAddInterMapping +( + RsResourceRef *pMapperRef, + RsResourceRef *pMappableRef, + RsResourceRef *pContextRef, + RsInterMapping **ppMapping +) +{ + RsInterMapping *pInterMapping; + + NV_ASSERT(pMapperRef != NULL); + NV_ASSERT(pMappableRef != NULL); + NV_ASSERT(pMappableRef != pMapperRef); + + pInterMapping = listAppendNew(&pMapperRef->interMappings); + if (pInterMapping == NULL) + return NV_ERR_NO_MEMORY; + + // Add backref linked to this inter-mapping + listAppendExisting(&pMappableRef->interBackRefsMappable, pInterMapping); + + // + // Either pMapperRef or pMappableRef should be a descendant of pContextRef + // Otherwise, it becomes possible to have a stale reference if hContext is freed first + // If this is not the case, add a backref to pContextRef as well + // + if (!refHasAncestor(pMapperRef, pContextRef) && + !refHasAncestor(pMappableRef, pContextRef)) + { + listAppendExisting(&pContextRef->interBackRefsContext, pInterMapping); + } + + pInterMapping->pMappableRef = pMappableRef; + pInterMapping->pContextRef = pContextRef; + pInterMapping->pMapperRef = pMapperRef; + + if (ppMapping != NULL) + *ppMapping = pInterMapping; + + return NV_OK; +} + +void +refRemoveInterMapping +( + RsResourceRef *pMapperRef, + RsInterMapping *pMapping +) +{ + RsResourceRef *pMappableRef = pMapping->pMappableRef; + RsResourceRef *pContextRef = pMapping->pContextRef; + + // Find and remove the mappable's backref linked to this inter-mapping + listRemove(&pMappableRef->interBackRefsMappable, pMapping); + + // Find and remove the context's backref linked to this inter-mapping, if present + if (!refHasAncestor(pMapperRef, pContextRef) && + !refHasAncestor(pMappableRef, pContextRef)) + { + listRemove(&pContextRef->interBackRefsContext, pMapping); + } + + listRemove(&pMapperRef->interMappings, pMapping); +} + +NV_STATUS +refCacheRef +( + RsResourceRef *pParentRef, + RsResourceRef *pResourceRef +) +{ + return indexAdd(&pParentRef->cachedRefMap, pResourceRef->internalClassId, pResourceRef); +} + +NV_STATUS +refUncacheRef +( + RsResourceRef *pParentRef, + RsResourceRef *pResourceRef +) +{ + return indexRemove(&pParentRef->cachedRefMap, pResourceRef->internalClassId, pResourceRef); +} + +NV_STATUS +refAddDependant +( + RsResourceRef *pResourceRef, + RsResourceRef *pDependantRef +) +{ + NV_STATUS status; + + // dependencies are implicit between a parent resource reference and child resource reference + if (refHasAncestor(pDependantRef, pResourceRef)) + return NV_OK; + + status = indexAdd(&pDependantRef->depBackRefMap, pResourceRef->internalClassId, pResourceRef); + if (status != NV_OK) + return status; + + return indexAdd(&pResourceRef->depRefMap, pDependantRef->internalClassId, pDependantRef); +} + +void +refRemoveDependant +( + RsResourceRef *pResourceRef, + RsResourceRef *pDependantRef +) +{ + indexRemove(&pDependantRef->depBackRefMap, pResourceRef->internalClassId, pResourceRef); + indexRemove(&pResourceRef->depRefMap, pDependantRef->internalClassId, pDependantRef); +} + +NvBool +refPendingFree +( + RsResourceRef *pResourceRef, + RsClient *pClient +) +{ + return ((pResourceRef->freeNode.pNext != NULL) || + (pResourceRef->freeNode.pPrev != NULL) || + (pResourceRef == listHead(&pClient->pendingFreeList))); +} + +void +resAddAdditionalDependants_IMPL +( + RsClient *pClient, + RsResource *pResource, + RsResourceRef *pReference +) +{ + return; +} diff --git a/src/nvidia/src/libraries/resserv/src/rs_server.c b/src/nvidia/src/libraries/resserv/src/rs_server.c new file mode 100644 index 0000000..fee3155 --- /dev/null +++ b/src/nvidia/src/libraries/resserv/src/rs_server.c @@ -0,0 +1,5008 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define NVOC_RS_SERVER_H_PRIVATE_ACCESS_ALLOWED +#include "nvlog_inc.h" +#include "resserv/resserv.h" +#include "resserv/rs_server.h" +#include "resserv/rs_client.h" +#include "resserv/rs_resource.h" +#include "tls/tls.h" +#include "nv_speculation_barrier.h" + +#if !RS_STANDALONE +#include "os/os.h" +#endif + +// Describes types of clients to find when getting client entries +enum CLIENT_STATE +{ + CLIENT_PARTIALLY_INITIALIZED = NVBIT(0), + CLIENT_PENDING_FREE = NVBIT(1) +} CLIENT_STATE; + +enum CLIENT_LIST_LOCK_STATE +{ + CLIENT_LIST_LOCK_LOCKED, + CLIENT_LIST_LOCK_UNLOCKED, +}; + +/** + * Get the RsClient from a client handle without taking locks + * @param[in] pServer + * @param[in] hClient The handle to lookup + * @param[out] ppClient The RsClient associated with the handle + */ +static NV_STATUS _serverFindClient(RsServer *pServer, NvHandle hClient, RsClient **ppClient); + +/** + * Get the CLIENT_ENTRY from a client handle without taking locks + * @param[in] pServer + * @param[in] hClient The handle to lookup + * @param[in] clientState Type of clients to look for + * @param[in] clientListLockState State of the global client list lock + * @param[out] ppClientEntry The client entry associated with the handle + */ +static NvBool _serverFindClientEntryByHandle(RsServer *pServer, NvHandle hClient, enum CLIENT_STATE clientState, enum CLIENT_LIST_LOCK_STATE clientListLockState, CLIENT_ENTRY **ppClientEntry); + +/** + * Get the CLIENT_ENTRY from a client handle, incrementing the ref count if a reference + * to the CLIENT_ENTRY is held outside of this function (i.e. outside locks). + * @param[in] pServer + * @param[in] hClient The handle to lookup + * @param[in] clientState Type of clients to look for + * @param[in] clientListLockState State of the global client list lock + * @param[out] ppClientEntry The client entry associated with the handle + */ +static NvBool _serverGetClientEntryByHandle(RsServer *pServer, NvHandle hClient, enum CLIENT_STATE clientState, enum CLIENT_LIST_LOCK_STATE clientListLockState, CLIENT_ENTRY **ppClientEntry); + +/** + * Get the CLIENT_ENTRY from a client handle, incrementing the reference count if a + * reference is held outside this function and also locking the client object if "access" + * dictates we should. + * @param[in] pServer + * @param[in] hClient The handle to lookup + * @param[in] access Lock access type to lock the client object with + * @param[out] ppClientEntry The client entry associated with the handle + */ +static NvBool _serverGetAndLockClientEntryByHandle(RsServer *pServer, NvHandle hClient, LOCK_ACCESS_TYPE access, CLIENT_ENTRY **ppClientEntry); + +/** + * Put the CLIENT_ENTRY, decrementing the reference count, and also + * unlocking the client object as "access" dictates we should. + * @param[in] pServer + * @param[in] access Lock access type to lock the client object with + * @param[in] pClientEntry The client entry to put/unlock + */ +static void _serverPutAndUnlockClientEntry(RsServer *pServer, LOCK_ACCESS_TYPE access, CLIENT_ENTRY *pClientEntry); + +/** + * Insert a CLIENT_ENTRY in the server database at an arbitrary location, must be + * called with client list lock taken in RW mode. + * @param[in] pServer + * @param[in] pClientEntry The client entry associated with the handle + * @param[in] ppClientNext The client entry to insert the entry before, or NULL if + * we should just insert at the end of the bucket list. + */ +static NV_STATUS _serverInsertClientEntry(RsServer *pServer, CLIENT_ENTRY *ppClientEntry, CLIENT_ENTRY *pClientNext); + +/** + * Mark a CLIENT_ENTRY as about to be freed + * @param[in] pServer + * @param[in] hClient The handle to lookup + * @param[out] ppClientEntry The client entry associated with the handle + */ +static NvBool _serverMarkClientEntryPendingFree(RsServer *pServer, NvHandle hClient, CLIENT_ENTRY **ppClientEntry); + +/** + * Find the next available client handle in bucket, must be called with client list lock + * @param[in] pServer + * @param[in] hClientIn + * @param[out] pClientOut + */ +static NV_STATUS _serverFindNextAvailableClientHandleInBucket(RsServer *pServer, NvHandle hClientIn, NvHandle *phClientOut, CLIENT_ENTRY **ppClientNext); + +/** + * Create a client entry and a client lock for a client that does not exist yet. Used during client + * construction. No locks will be taken if this call fails. + * @param[in] pServer + * @param[in] hClient + */ +static NV_STATUS _serverCreateEntryAndLockForNewClient(RsServer *pServer, NvHandle *phClient, NvBool bInternalHandle, CLIENT_ENTRY **ppClientEntry, API_SECURITY_INFO *pSecInfo); + +/** + * Lock the RsClient given a CLIENT_ENTRY + * @param[in] access Read or write lock access + * @param[in] pClientEntry The client entry to lock + */ +static void _serverLockClient(LOCK_ACCESS_TYPE access, CLIENT_ENTRY* pClientEntry); + +/** + * Lock and retrieve the RsClient associated with a client handle, and update lock info. + * @param[in] pServer + * @param[in] access + * @param[in] hClient Handle of client to look-up + * @param[in] bValidateLocks Whether to validate currently held locks are sufficient + * @param[inout] pLockInfo Lock state + * @param[out] pReleaseFlags Local lock flags to keep track of what locks to release + * @param[out] pplientEntry CLIENT_ENTRY associated with the client handle + */ +static NV_STATUS _serverLockClientWithLockInfo(RsServer *pServer, LOCK_ACCESS_TYPE access, NvHandle hClient, NvBool bValidateLocks, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags, CLIENT_ENTRY **ppClientEntry); + +/** + * Lock and retrieve two RsClient associated with a pair of client handles, and update lock info. + * @param[in] pServer + * @param[in] access + * @param[in] hClient1, hClient2 Handles of clients to look-up and lock + * @param[in] bValidateLocks Whether to validate currently held locks are sufficient + * @param[inout] pLockInfo Lock state + * @param[out] pReleaseFlags Local lock flags to keep track of what locks to release + * @param[out] ppClientEntry1 CLIENT_ENTRY associated with the first client handle + * @param[out] ppClientEntry2 CLIENT_ENTRY associated with the second client handle + */ +static NV_STATUS _serverLockDualClientWithLockInfo(RsServer *pServer, LOCK_ACCESS_TYPE access, NvHandle hClient1, NvHandle hClient2, NvBool bValidateLocks, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags, CLIENT_ENTRY **ppClientEntry1, CLIENT_ENTRY **ppClientEntry2); + +/** + * Lock all clients, and update lock info. + * @param[in] pServer + * @param[inout] pLockInfo Lock state + * @param[out] pReleaseFlags Local lock flags to keep track of what locks to release + */ +static NV_STATUS _serverLockAllClientsWithLockInfo(RsServer *pServer, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Unlock the RsClient given a CLIENT_ENTRY + * @param[in] access Read or write lock access + * @param[in] pClientEntry The client entry to unlock + */ +static void _serverUnlockClient(LOCK_ACCESS_TYPE access, CLIENT_ENTRY* pClientEntry); + +/** + * Unlock a client and update lock info. + * @param[in] pServer + * @param[in] access + * @param[in] pClientEntry Client entry to unlock + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +static void _serverUnlockClientWithLockInfo(RsServer *pServer, LOCK_ACCESS_TYPE access, CLIENT_ENTRY *pClientEntry, RS_LOCK_INFO* pLockInfo, NvU32 *pReleaseFlags); + +/** + * Unlock two clients and update lock info. + * @param[in] pServer + * @param[in] access + * @param[in] pClientEntry1 First client's entry to unlock + * @param[in] pClientEntry2 Second client's entry to unlock + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +static void _serverUnlockDualClientWithLockInfo(RsServer *pServer, LOCK_ACCESS_TYPE access, CLIENT_ENTRY *pClientEntry1, CLIENT_ENTRY *pClientEntry2, RS_LOCK_INFO* pLockInfo, NvU32 *pReleaseFlags); + +/** + * Unlock all clients, and update lock info. + * @param[in] pServer + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +static NV_STATUS _serverUnlockAllClientsWithLockInfo(RsServer *pServer, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Increment reference count for CLIENT_ENTRY, preventing it from being freed + * @param[in] pClientEntry The client entry to reference + */ +static void _serverGetClientEntry(CLIENT_ENTRY *pClientEntry); + +/** + * Decrement reference count for CLIENT_ENTRY, freeing it if it reaches 0 + * @param[in] pClientEntry The client entry to dereference + */ +static void _serverPutClientEntry(RsServer *pServer, CLIENT_ENTRY *pClientEntry); + +NV_STATUS serverFreeResourceTreeUnderLock(RsServer *pServer, RS_RES_FREE_PARAMS *pFreeParams) +{ + NV_STATUS status; + RsResourceRef *pResourceRef = pFreeParams->pResourceRef; + RS_LOCK_INFO *pLockInfo = pFreeParams->pLockInfo; + NvU32 releaseFlags = 0; + + NV_ASSERT_OR_RETURN(pResourceRef != NULL, NV_ERR_INVALID_OBJECT_HANDLE); + + status = serverUpdateLockFlagsForFree(pServer, pFreeParams); + if (status != NV_OK) + return status; + + status = serverSessionLock_Prologue(LOCK_ACCESS_WRITE, pResourceRef, pLockInfo, &releaseFlags); + if (status != NV_OK) + return status; + + if (pResourceRef->pResource == NULL) + { + // + // We don't need to acquire the resource lock for a resource + // that already got freed during resource invalidation. + // + + status = clientFreeResource(pResourceRef->pClient, pServer, pFreeParams); + NV_ASSERT(status == NV_OK); + } + else + { + pLockInfo->flags |= RS_LOCK_FLAGS_FREE_SESSION_LOCK; + pLockInfo->pResRefToBackRef = pResourceRef; + pLockInfo->traceOp = RS_LOCK_TRACE_FREE; + pLockInfo->traceClassId = pResourceRef->externalClassId; + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags, 0); + if (status != NV_OK) + goto done; + + status = clientFreeResource(pResourceRef->pClient, pServer, pFreeParams); + NV_ASSERT((status == NV_OK) || (status == NV_ERR_GPU_IN_FULLCHIP_RESET)); + + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + } + +done: + serverSessionLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + + return status; +} + +#if RS_STANDALONE +NV_STATUS +serverInitFreeParams_Recursive(NvHandle hClient, NvHandle hResource, RS_LOCK_INFO* pLockInfo, RS_RES_FREE_PARAMS *pParams) +{ + portMemSet(pParams, 0, sizeof(*pParams)); + pParams->hClient = hClient; + pParams->hResource = hResource; + pParams->pLockInfo = pLockInfo; + return NV_OK; +} + +NV_STATUS serverUpdateLockFlagsForCopy(RsServer *pServer, RS_RES_DUP_PARAMS *pParams) +{ + return NV_OK; +} + +NV_STATUS serverUpdateLockFlagsForFree(RsServer *pServer, RS_RES_FREE_PARAMS *pParams) +{ + return NV_OK; +} + +NV_STATUS serverUpdateLockFlagsForInterAutoUnmap(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pParams) +{ + return NV_OK; +} + +NV_STATUS serverFreeResourceRpcUnderLock(RsServer *pServer, RS_RES_FREE_PARAMS *pParams) +{ + return NV_OK; +} +#endif + + +// +// Client handle format: +// +// fn [ C[1..3][0..F]] [ *INDEX* ] +// bit 31 20 19 0 +// + +#define RS_CLIENT_HANDLE_DECODE_MASK (RS_CLIENT_HANDLE_MAX - 1) +#define CLIENT_DECODEHANDLE(handle) (handle & RS_CLIENT_HANDLE_DECODE_MASK) + +#define CLIENT_ENCODEHANDLE(handleBase, index) (handleBase | index) + +NV_STATUS +serverConstruct +( + RsServer *pServer, + RS_PRIV_LEVEL privilegeLevel, + NvU32 maxDomains +) +{ + NvU32 i; + PORT_MEM_ALLOCATOR *pAllocator = portMemAllocatorCreateNonPaged(); + + pServer->privilegeLevel = privilegeLevel; + pServer->bConstructed = NV_TRUE; + pServer->pAllocator = pAllocator; + pServer->bDebugFreeList = NV_FALSE; + pServer->bRsAccessEnabled = NV_TRUE; + pServer->allClientLockOwnerTid = ~0; + pServer->internalHandleBase = RS_CLIENT_INTERNAL_HANDLE_BASE; + pServer->clientHandleBase = RS_CLIENT_HANDLE_BASE; + pServer->activeClientCount = 0; + pServer->activeResourceCount= 0; + pServer->roTopLockApiMask = 0; + /* pServer->bUnlockedParamCopy is set in _rmapiLockAlloc */ + + pServer->pClientSortedList = PORT_ALLOC(pAllocator, sizeof(RsClientList)*RS_CLIENT_HANDLE_BUCKET_COUNT); + if (NULL == pServer->pClientSortedList) + goto fail; + + for (i = 0; i < RS_CLIENT_HANDLE_BUCKET_COUNT; i++) + { + listInitIntrusive(&pServer->pClientSortedList[i]); + } + pServer->clientCurrentHandleIndex = 0; + + RS_LOCK_VALIDATOR_INIT(&pServer->clientListLockVal, LOCK_VAL_LOCK_CLASS_CLIENT_LIST, 0xcafe0000); + pServer->pClientListLock = portSyncSpinlockCreate(pAllocator); + if (pServer->pClientListLock == NULL) + goto fail; + +#if RS_STANDALONE + RS_LOCK_VALIDATOR_INIT(&pServer->topLockVal, LOCK_VAL_LOCK_CLASS_API, 0xdead0000); + pServer->pTopLock = portSyncRwLockCreate(pAllocator); + if (pServer->pTopLock == NULL) + goto fail; + + RS_LOCK_VALIDATOR_INIT(&pServer->resLockVal, LOCK_VAL_LOCK_CLASS_GPU, 0xbeef0000); + pServer->pResLock = portSyncRwLockCreate(pAllocator); + if (pServer->pResLock == NULL) + goto fail; + + pServer->topLockOwnerTid = ~0; +#endif + + pServer->pShareMapLock = portSyncSpinlockCreate(pAllocator); + + mapInitIntrusive(&pServer->shareMap); + + listInit(&pServer->defaultInheritedSharePolicyList, pAllocator); + listInit(&pServer->globalInternalSharePolicyList, pAllocator); + + if (NV_OK != serverInitGlobalSharePolicies(pServer)) + { + mapDestroy(&pServer->shareMap); + listDestroy(&pServer->defaultInheritedSharePolicyList); + listDestroy(&pServer->globalInternalSharePolicyList); + goto fail; + } + + listInitIntrusive(&pServer->disabledClientList); + pServer->pDisabledClientListLock = portSyncSpinlockCreate(pAllocator); + + return NV_OK; +fail: + +#if RS_STANDALONE + if (pServer->pResLock != NULL) + portSyncRwLockDestroy(pServer->pResLock); + + if (pServer->pTopLock != NULL) + portSyncRwLockDestroy(pServer->pTopLock); +#endif + + if (pServer->pClientListLock != NULL) + portSyncSpinlockDestroy(pServer->pClientListLock); + + if (pServer->pShareMapLock != NULL) + portSyncSpinlockDestroy(pServer->pShareMapLock); + + if (pServer->pClientSortedList != NULL) + { + for (i = 0; i < RS_CLIENT_HANDLE_BUCKET_COUNT; i++) + { + listDestroy(&pServer->pClientSortedList[i]); + } + PORT_FREE(pAllocator, pServer->pClientSortedList); + } + + if (pAllocator != NULL) + portMemAllocatorRelease(pAllocator); + + return NV_ERR_INSUFFICIENT_RESOURCES; +} + + +NV_STATUS +serverDestruct +( + RsServer *pServer +) +{ + NvU32 i; + RS_LOCK_INFO lockInfo; + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + + if (!pServer->bConstructed) + return NV_ERR_INVALID_OBJECT; + + for (i = 0; i < RS_CLIENT_HANDLE_BUCKET_COUNT; i++) + { + CLIENT_ENTRY *pClientEntry; + NvHandle hClient = 0; + + while ((pClientEntry = listHead(&pServer->pClientSortedList[i])) != NULL) + { + RS_RES_FREE_PARAMS_INTERNAL freeParams; + lockInfo.pClient = pClientEntry->pClient; + hClient = lockInfo.pClient->hClient; + serverInitFreeParams_Recursive(hClient, hClient, &lockInfo, &freeParams); + serverFreeResourceTree(pServer, &freeParams); + } + + listDestroy(&pServer->pClientSortedList[i]); + } + + listDestroy(&pServer->disabledClientList); + portSyncSpinlockDestroy(pServer->pDisabledClientListLock); + + PORT_FREE(pServer->pAllocator, pServer->pClientSortedList); + mapDestroy(&pServer->shareMap); + listDestroy(&pServer->defaultInheritedSharePolicyList); + listDestroy(&pServer->globalInternalSharePolicyList); + +#if RS_STANDALONE + portSyncRwLockDestroy(pServer->pResLock); + portSyncRwLockDestroy(pServer->pTopLock); +#endif + + portSyncSpinlockDestroy(pServer->pShareMapLock); + portSyncSpinlockDestroy(pServer->pClientListLock); + + portMemAllocatorRelease(pServer->pAllocator); + + pServer->bConstructed = NV_FALSE; + + return NV_OK; +} + +NV_STATUS +serverSetClientHandleBase +( + RsServer *pServer, + NvU32 clientHandleBase +) +{ + NvU32 releaseFlags = 0; + RS_LOCK_INFO lockInfo; + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + + // Grab top level lock before updating the internal state + NV_ASSERT_OK_OR_RETURN(serverTopLock_Prologue(pServer, LOCK_ACCESS_WRITE, &lockInfo, &releaseFlags)); + + // Do not allow fixedClientHandle base to be same as internalHandleBase + if (clientHandleBase != pServer->internalHandleBase) + { + pServer->clientHandleBase = clientHandleBase; + } + else + { + NV_PRINTF(LEVEL_ERROR, "Error setting fixed Client handle base\n"); + } + + serverTopLock_Epilogue(pServer, LOCK_ACCESS_WRITE, &lockInfo, &releaseFlags); + + return NV_OK; +} + +static +void +_serverFreeClient_underlock +( + RsServer *pServer, + CLIENT_ENTRY *pClientEntry +) +{ + RsClient *pClient = pClientEntry->pClient; + NvHandle hClient = pClient->hClient; + + clientFreeAccessBackRefs(pClient, pServer); + + if (pClient->bDisabled) + { + portSyncSpinlockAcquire(pServer->pDisabledClientListLock); + listRemove(&pServer->disabledClientList, pClient); + portSyncSpinlockRelease(pServer->pDisabledClientListLock); + } + + objDelete(pClient); + + // Now remove the client entry and decrease the client count + serverAcquireClientListLock(pServer); + listRemove( + &pServer->pClientSortedList[hClient & RS_CLIENT_HANDLE_BUCKET_MASK], + pClientEntry); + serverReleaseClientListLock(pServer); + + NV_ASSERT(pClientEntry->refCount == 1); + _serverPutClientEntry(pServer, pClientEntry); +} + +NV_STATUS +serverAllocDomain +( + RsServer *pServer, + NvU32 hParentDomain, + ACCESS_CONTROL *pAccessControl, + NvHandle *phDomain +) +{ + return NV_OK; +} + +NV_STATUS +serverFreeDomain +( + RsServer *pServer, + NvHandle hDomain +) +{ + NvU32 bucket; + for (bucket = 0; bucket < RS_CLIENT_HANDLE_BUCKET_COUNT; bucket ++) + { + RsClientList *pClientList = &(pServer->pClientSortedList[bucket]); + CLIENT_ENTRY *pClientEntry = listHead(pClientList); + while (pClientEntry != NULL) + { + RS_CLIENT_FREE_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + params.hClient = pClientEntry->hClient; + + serverFreeClient(pServer, ¶ms); + pClientEntry = listHead(pClientList); + } + } + return NV_OK; +} + +NV_STATUS serverValidate +( + RsServer *pServer, + NvU32 hDomain, + NvHandle hClient +) +{ + return NV_OK; +} + +NV_STATUS +serverValidateAlloc +( + RsServer *pServer, + NvU32 hDomain, + NvU32 externalClassId +) +{ + // Placeholder for allocation validation + return NV_OK; +} + +NV_STATUS +serverAllocClient +( + RsServer *pServer, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status; + NvHandle hClient = 0; + RsClient *pClient = NULL; + CLIENT_ENTRY *pClientEntry = NULL; + NvBool bLockedClient = NV_FALSE; + + if (!pServer->bConstructed) + return NV_ERR_NOT_READY; + + // RS-TODO Assert that the RW top lock is held + + hClient = pParams->hClient; +#if !(RS_COMPATABILITY_MODE) + // Fail if the server supplied a client id + if (hClient != 0) + return NV_ERR_INVALID_ARGUMENT; +#endif + + status = _serverCreateEntryAndLockForNewClient(pServer, &hClient, !!(pParams->allocState & ALLOC_STATE_INTERNAL_CLIENT_HANDLE), &pClientEntry, pParams->pSecInfo); + + if (status != NV_OK) + goto done; + + pParams->hClient = hClient; + pParams->hResource = hClient; + bLockedClient = NV_TRUE; + + status = resservClientFactory(pServer->pAllocator, pParams, &pClient); + if (NV_OK != status) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto done; + } + + // Automatically allocate client proxy resource + status = clientAllocResource(pClient, pServer, pParams); + if (status != NV_OK) + goto done; + + // + // Client list lock is required when the client becomes active in order to avoid + // race conditions with serverLockAllClients. + // + serverAcquireClientListLock(pServer); + pClientEntry->pClient = pClient; + + // Increase client count + portAtomicIncrementU32(&pServer->activeClientCount); + serverReleaseClientListLock(pServer); + +done: + if (bLockedClient) + _serverUnlockClient(LOCK_ACCESS_WRITE, pClientEntry); + + if ((status != NV_OK) && (pClientEntry != NULL)) + { + serverAcquireClientListLock(pServer); + listRemove( + &pServer->pClientSortedList[hClient & RS_CLIENT_HANDLE_BUCKET_MASK], + pClientEntry); + serverReleaseClientListLock(pServer); + + // + // Decrement reference count outside of client list lock, memory free is + // disallowed in the spinlock's critical section on Windows. + // + _serverPutClientEntry(pServer, pClientEntry); + + objDelete(pClient); + } + + if (pClientEntry != NULL) + _serverPutClientEntry(pServer, pClientEntry); + + return status; +} + +static +NV_STATUS +_serverFreeClient +( + RsServer *pServer, + RS_CLIENT_FREE_PARAMS *pParams +) +{ + NV_STATUS status; + CLIENT_ENTRY *pClientEntry; + NvU32 releaseFlags = 0; + + // + // Mark the client entry as pending free which will allow us to prevent other threads + // from using the client while we deallocate resources. + // + if (!_serverMarkClientEntryPendingFree(pServer, pParams->hClient, &pClientEntry)) + return NV_ERR_INVALID_OBJECT_HANDLE; + + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, + pParams->pResFreeParams->pLockInfo, &releaseFlags, 0); + if (status != NV_OK) + goto done; + + _serverFreeClient_underlock(pServer, pClientEntry); + +done: + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pParams->pResFreeParams->pLockInfo, &releaseFlags); + + // Undo pending free marker + if (status != NV_OK) + { + serverAcquireClientListLock(pServer); + pClientEntry->bPendingFree = NV_FALSE; + serverReleaseClientListLock(pServer); + } + + return status; +} + +NV_STATUS +serverAllocResource +( + RsServer *pServer, + RS_RES_ALLOC_PARAMS *pParams +) +{ + NV_STATUS status; + NvU32 releaseFlags = 0; + API_STATE *pApiState; + NvBool bClientAlloc = (pParams->externalClassId == NV01_ROOT || + pParams->externalClassId == NV01_ROOT_CLIENT || + pParams->externalClassId == NV01_ROOT_NON_PRIV); + LOCK_ACCESS_TYPE topLockAccess; + NvU32 initialLockState; + RS_LOCK_INFO *pLockInfo; + CLIENT_ENTRY *pClientEntry = NULL; + CLIENT_ENTRY *pSecondClientEntry = NULL; + NvHandle hSecondClient; + CALL_CONTEXT callContext = {0}; + + if (!pServer->bConstructed) + return NV_ERR_NOT_READY; + + pLockInfo = pParams->pLockInfo; + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + initialLockState = pLockInfo->state; + + status = serverAllocApiCopyIn(pServer, pParams, &pApiState); + if (status != NV_OK) + return status; + + status = serverAllocResourceLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + if ((status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags)) != NV_OK) + goto done; + + if (status == NV_OK) + { + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + serverDeserializeAllocDown(&callContext, pParams->externalClassId, &pParams->pAllocParams, &pParams->paramsSize, &pParams->allocFlags), + done); + + if (bClientAlloc) + { + status = serverAllocClient(pServer, pParams); + } + else + { + status = serverAllocLookupSecondClient(pParams->externalClassId, + pParams->pAllocParams, + &hSecondClient); + if (status != NV_OK) + goto done; + + if (hSecondClient == 0) + { + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + pParams->hClient, NV_TRUE, + pLockInfo, &releaseFlags, + &pClientEntry); + + if (status != NV_OK) + goto done; + + NV_ASSERT_OR_ELSE(!serverIsClientLockedForRead(pClientEntry), + status = NV_ERR_INVALID_LOCK_STATE; goto done); + + if (!pClientEntry->pClient->bActive) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + } + else + { + status = _serverLockDualClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + pParams->hClient, hSecondClient, + NV_TRUE, pLockInfo, + &releaseFlags, + &pClientEntry, + &pSecondClientEntry); + + if (status != NV_OK) + goto done; + + NV_ASSERT_OR_ELSE( + (!serverIsClientLockedForRead((pClientEntry)) && + !serverIsClientLockedForRead((pSecondClientEntry))), + status = NV_ERR_INVALID_LOCK_STATE; goto done); + + if (!pClientEntry->pClient->bActive || + !pSecondClientEntry->pClient->bActive) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + } + + pParams->pClient = pClientEntry->pClient; + + // The second client's usage is class-dependent and should be validated + // by the class's constructor + status = clientValidate(pParams->pClient, pParams->pSecInfo); + + if (status != NV_OK) + goto done; + + status = serverAllocResourceUnderLock(pServer, pParams); + } + } + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, + "hParent 0x%08x : hClass 0x%08x allocation failed\n", + pParams->hParent, pParams->externalClassId); + } + + // RS-TODO: Can this be moved before _ResLock? + status = serverAllocEpilogue_WAR(pServer, status, bClientAlloc, pParams); + +done: + + if (!bClientAlloc) + { + if (pClientEntry != NULL) + { + if (pSecondClientEntry != NULL) + { + _serverUnlockDualClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + pClientEntry, pSecondClientEntry, + pLockInfo, &releaseFlags); + } + else + { + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pClientEntry, + pLockInfo, &releaseFlags); + } + } + } + + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(status, LEVEL_ERROR, + serverSerializeAllocUp(&callContext, pParams->externalClassId, &pParams->pAllocParams, &pParams->paramsSize, &pParams->allocFlags)); + serverFreeSerializeStructures(&callContext, pParams->pAllocParams); + + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + // copyout as needed, being careful not to overwrite a useful status value + status = serverAllocApiCopyOut(pServer, status, pApiState); + + NV_ASSERT(pLockInfo->state == initialLockState); + + return status; +} + +#if RS_STANDALONE +// RS-TODO rename to UnderClientLock +NV_STATUS +serverAllocResourceUnderLock +( + RsServer *pServer, + RS_RES_ALLOC_PARAMS *pParams +) +{ + NV_STATUS status; + RsClient *pClient = pParams->pClient; + NvHandle hResource = pParams->hResource; + NvU32 releaseFlags = 0; + + if (!pServer->bConstructed) + return NV_ERR_NOT_READY; + + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pParams->pLockInfo, &releaseFlags, 0); + if (status != NV_OK) + goto done; + + status = clientAssignResourceHandle(pClient, &hResource); + if (status != NV_OK) + goto done; + + pParams->hResource = hResource; + pParams->hParent = (pParams->hParent == 0) ? pParams->hClient : pParams->hParent; + status = clientAllocResource(pClient, pServer, pParams); + if (status != NV_OK) + goto done; + + // NV_PRINTF(LEVEL_INFO, "hClient %x: Allocated hResource %x with class %x\n", + // pParams->hClient, pParams->hResource, pParams->externalClassId); + +done: + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pParams->pLockInfo, &releaseFlags); + return status; +} + +NvU32 +serverAllocClientHandleBase +( + RsServer *pServer, + NvBool bInternalHandle, + API_SECURITY_INFO *pSecInfo +) +{ + return bInternalHandle ? pServer->internalHandleBase : + pServer->clientHandleBase; +} +#endif + +NV_STATUS +clientUpdatePendingFreeList_IMPL +( + RsClient *pClient, + RsResourceRef *pTargetRef, + RsResourceRef *pReference, + NvBool bMove +) +{ + RsIndexIter it; + NvBool bInList = refPendingFree(pTargetRef, pClient); + RS_FREE_STACK *pFs = pClient->pFreeStack; + if (bMove) + { + if (pReference != pTargetRef) + { + // Basic circular dependency check + while (pFs != NULL) + { + RsResourceRef *pFsRef = pFs->pResourceRef; + NV_ASSERT_OR_GOTO(pFsRef != pTargetRef, done); + + pFs = pFs->pPrev; + } + } + + if (bInList) + listRemove(&pClient->pendingFreeList, pTargetRef); + listPrependExisting(&pClient->pendingFreeList, pTargetRef); + } + else if (!bInList) + { + listPrependExisting(&pClient->pendingFreeList, pTargetRef); + } + + // + // Recursively add children to the pending free list and move + // them to the front of the list + // + it = indexRefIterAll(&pTargetRef->childRefMap); + while (indexRefIterNext(&it)) + { + clientUpdatePendingFreeList(pClient, *it.pValue, pReference, NV_TRUE); + } + + // + // Recursively add dependencies to the pending free list and + // move them to the front of the list + // + it = indexRefIterAll(&pTargetRef->depRefMap); + while (indexRefIterNext(&it)) + { + clientUpdatePendingFreeList(pClient, *it.pValue, pReference, NV_TRUE); + } + + if (pTargetRef->pResource != NULL) + { + // Allow some objects to add more dependants here + resAddAdditionalDependants(pClient, pTargetRef->pResource, pReference); + } + +done: + return NV_OK; +} + +NV_STATUS +serverMarkClientListDisabled +( + RsServer *pServer, + NvHandle *phClientList, + NvU32 numClients, + NvU32 freeState, + API_SECURITY_INFO *pSecInfo +) +{ + NvU32 i; + for (i = 0; i < numClients; ++i) + { + RS_CLIENT_FREE_PARAMS params; + portMemSet(¶ms, 0, sizeof(params)); + + if (phClientList[i] == 0) + continue; + + params.hClient = phClientList[i]; + params.bDisableOnly = NV_TRUE; + params.state = freeState; + params.pSecInfo = pSecInfo; + + // If individual calls fail not much to do, just log error and move on + NV_ASSERT_OK(serverFreeClient(pServer, ¶ms)); + } + + return NV_OK; +} + +// Returns pServer->pNextDisabledClient and advances it by one node ahead +static RsClient * +_getNextDisabledClient(RsServer *pServer) +{ + RsClient *pClient; + portSyncSpinlockAcquire(pServer->pDisabledClientListLock); + + pClient = + (pServer->pNextDisabledClient != NULL) ? + pServer->pNextDisabledClient : + listHead(&pServer->disabledClientList); + + pServer->pNextDisabledClient = + (pClient != NULL) ? + listNext(&pServer->disabledClientList, pClient) : + listHead(&pServer->disabledClientList); + + portSyncSpinlockRelease(pServer->pDisabledClientListLock); + return pClient; +} + +NV_STATUS serverFreeDisabledClients +( + RsServer *pServer, + NvU32 freeState, + NvU32 limit +) +{ + RsClient *pClient; + RS_RES_FREE_PARAMS params; + API_SECURITY_INFO secInfo; + RS_LOCK_INFO lockInfo; + NV_STATUS status = NV_OK; + + // + // Only allow one instance of this function at a time. + // Multiple calls can happen if one thread requested delayed free via worker, + // while another tries to flush disabled clients immediately. + // It doesn't matter which one ends up running, they all free everything + // + static volatile NvU32 inProgress; + if (!portAtomicCompareAndSwapU32(&inProgress, 1, 0)) + return NV_ERR_IN_USE; + + portMemSet(¶ms, 0, sizeof(params)); + portMemSet(&secInfo, 0, sizeof(secInfo)); + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + + secInfo.privLevel = RS_PRIV_LEVEL_KERNEL; + secInfo.paramLocation = PARAM_LOCATION_KERNEL; + lockInfo.state = freeState; + params.pLockInfo = &lockInfo; + params.pSecInfo = &secInfo; + + while ((pClient = _getNextDisabledClient(pServer))) + { + NV_ASSERT(pClient->bDisabled); + + params.hClient = pClient->hClient; + params.hResource = pClient->hClient; + + // + // We call serverFreeClient twice; first for high priority resources + // then again for remaining resources + // + if (!pClient->bHighPriorityFreeDone) + { + params.bHiPriOnly = NV_TRUE; + pClient->bHighPriorityFreeDone = NV_TRUE; + } + else + { + params.bHiPriOnly = NV_FALSE; + } + + serverFreeResourceTree(pServer, ¶ms); + + // + // If limit is 0, it'll wrap-around and count down from 0xFFFFFFFF + // But RS_CLIENT_HANDLE_MAX is well below that, so it effectively + // means process all of them + // + if (--limit == 0) + { + status = NV_WARN_MORE_PROCESSING_REQUIRED; + break; + } + } + + portAtomicSetU32(&inProgress, 0); + return status; +} + +// +// Helper that validates the client and looks up the resource +// +// It acquires the top lock (RM API lock) in the desired mode (topLockAccess) +// and client lock always as exclusive. +// +static NV_STATUS +serverFreeResourceTreeLockAndFindResource +( + RsServer *pServer, + RS_RES_FREE_PARAMS *pParams, + LOCK_ACCESS_TYPE topLockAccess, + NvU32 *pReleaseFlags, + CLIENT_ENTRY **ppClientEntry, + RsResourceRef **ppResourceRef +) +{ + NV_STATUS status; + RS_LOCK_INFO *pLockInfo = pParams->pLockInfo; + CLIENT_ENTRY *pClientEntry; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, pReleaseFlags); + if (status != NV_OK) + return status; + + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, + NV_TRUE, pLockInfo, pReleaseFlags, &pClientEntry); + if (status != NV_OK) + return status; + + NV_ASSERT_OR_RETURN(!serverIsClientLockedForRead(pClientEntry), + NV_ERR_INVALID_LOCK_STATE); + + *ppClientEntry = pClientEntry; + + status = clientValidate(pClientEntry->pClient, pParams->pSecInfo); + if (status != NV_OK) + return status; + + status = clientGetResourceRef(pClientEntry->pClient, pParams->hResource, ppResourceRef); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, "hObject 0x%x not found for client 0x%x\n", + pParams->hResource, + pParams->hClient); +#if (RS_COMPATABILITY_MODE) + status = NV_OK; +#endif + return status; + } + + return NV_OK; +} + +NV_STATUS +serverFreeResourceTree +( + RsServer *pServer, + RS_RES_FREE_PARAMS *pParams +) +{ + CLIENT_ENTRY *pClientEntry = NULL; + RsClient *pClient = NULL; + NV_STATUS status; + RsResourceRef *pResourceRef = NULL; + RsResourceRef *pTargetRef; + RsResourceRef *pFirstLowPriRef; + NvBool bHiPriOnly = pParams->bHiPriOnly; + NvBool bRecursive = NV_FALSE; + RS_FREE_STACK freeStack; + NvBool bPopFreeStack = NV_FALSE; + RS_LOCK_INFO *pLockInfo; + NvU32 initialLockState; + NvU32 releaseFlags = 0; + LOCK_ACCESS_TYPE topLockAccess; + LOCK_ACCESS_TYPE firstTopLockAccess; + NvBool bSupportForceROLock; + + if (!pServer->bConstructed) + return NV_ERR_NOT_READY; + + pLockInfo = pParams->pLockInfo; + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + initialLockState = pLockInfo->state; + + portMemSet(&freeStack, 0, sizeof(freeStack)); + + // Reset pResourceRef since it's used as bookkeeping in this function. + pParams->pResourceRef = NULL; + + status = serverFreeResourceLookupLockFlags(pServer, RS_LOCK_TOP, pParams, + &topLockAccess, &bSupportForceROLock); + if (status != NV_OK) + goto done; + + // + // If force RO lock is enabled, always lock as RO first to look up the + // resource and check its flags (see handling below) + // + firstTopLockAccess = bSupportForceROLock ? LOCK_ACCESS_READ : topLockAccess; + status = serverFreeResourceTreeLockAndFindResource(pServer, pParams, firstTopLockAccess, + &releaseFlags, &pClientEntry, &pResourceRef); + if ((status != NV_OK) || (pResourceRef == NULL)) + { + // + // Check for pResourceRef == NULL to cover the compatibility case where + // we return NV_OK for resources that don't exist. + // + goto done; + } + + if (topLockAccess != firstTopLockAccess) + { + // + // RO locking has not been enabled across the board, but some resources + // explicitly opt-in after having been verified to be safe. Query the + // lock flags again now that we know the resource. + // + pParams->pResourceRef = pResourceRef; + status = serverFreeResourceLookupLockFlags(pServer, RS_LOCK_TOP, pParams, + &topLockAccess, &bSupportForceROLock); + if (status != NV_OK) + goto done; + + if (topLockAccess != firstTopLockAccess) + { + // Resource requires RW locking so need to re-lock and look up the resource again + pParams->pResourceRef = NULL; + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pClientEntry, pLockInfo, &releaseFlags); + pClientEntry = NULL; + serverTopLock_Epilogue(pServer, firstTopLockAccess, pLockInfo, &releaseFlags); + + status = serverFreeResourceTreeLockAndFindResource(pServer, pParams, topLockAccess, + &releaseFlags, &pClientEntry, &pResourceRef); + if ((status != NV_OK) || (pResourceRef == NULL)) + goto done; + } + } + + pClient = pClientEntry->pClient; + if (pClient->pFreeStack != NULL) + freeStack.pPrev = pClient->pFreeStack; + pClient->pFreeStack = &freeStack; + bPopFreeStack = NV_TRUE; + + pParams->pResourceRef = pResourceRef; + freeStack.pResourceRef = pResourceRef; + + if (pParams->bDisableOnly) + { + if (!pClient->bDisabled) + { + pClient->bDisabled = NV_TRUE; + portSyncSpinlockAcquire(pServer->pDisabledClientListLock); + listAppendExisting(&pServer->disabledClientList, pClient); + portSyncSpinlockRelease(pServer->pDisabledClientListLock); + } + else + { + status = NV_ERR_INVALID_STATE; + goto done; + } + + pClient->bActive = NV_FALSE; + status = NV_OK; + + // Unmap all CPU mappings + { + CALL_CONTEXT callContext; + RS_ITERATOR it; + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = pServer; + callContext.pClient = pClient; + callContext.pLockInfo = pLockInfo; + + it = clientRefIter(pClient, NULL, 0, RS_ITERATE_DESCENDANTS, NV_TRUE); + while (clientRefIterNext(pClient, &it)) + { + callContext.pResourceRef = it.pResourceRef; + clientUnmapResourceRefMappings(pClient, &callContext, pLockInfo); + } + } + + goto done; + } + + if (pParams->bInvalidateOnly && pResourceRef->bInvalidated) + { + status = NV_OK; + goto done; + } + + bRecursive = (freeStack.pPrev != NULL); + status = clientUpdatePendingFreeList(pClient, pResourceRef, pResourceRef, bRecursive); + if (status != NV_OK) + goto done; + + clientPostProcessPendingFreeList(pClient, &pFirstLowPriRef); + + if (pServer->bDebugFreeList) + { + NV_PRINTF(LEVEL_INFO, "PENDING FREE LIST START (0x%x)\n", pClient->hClient); + NV_PRINTF(LEVEL_INFO, " _HI_PRIORITY_:\n"); + pTargetRef = listHead(&pClient->pendingFreeList); + while (pTargetRef != NULL) + { + if (pTargetRef == pFirstLowPriRef) + NV_PRINTF(LEVEL_INFO, " _LO_PRIORITY_:\n"); + + NV_PRINTF(LEVEL_INFO, " 0x%08x [%04x]\n", + pTargetRef->hResource, + pTargetRef->externalClassId); + pTargetRef = listNext(&pClient->pendingFreeList, pTargetRef); + } + NV_PRINTF(LEVEL_INFO, "PENDING FREE LIST END (0x%x)\n", pClient->hClient); + } + + while ((pTargetRef = listHead(&pClient->pendingFreeList)) != NULL) + { + NvBool bInvalidateOnly = NV_TRUE; + RS_FREE_STACK *pFs = &freeStack; + RS_RES_FREE_PARAMS_INTERNAL freeParams; + NvHandle hTarget = pTargetRef->hResource; + + if (bHiPriOnly && pTargetRef == pFirstLowPriRef) + goto done; + + if (pServer->bDebugFreeList) + { + NV_PRINTF(LEVEL_INFO, "(%08x, %08x)\n", pClient->hClient, hTarget); + } + + if (hTarget == pParams->hResource) + { + // Target resource should always be the last one to be freed + NV_ASSERT((listCount(&pClient->pendingFreeList) == 1) || bRecursive); + status = serverFreeResourceTreeUnderLock(pServer, pParams); + break; + } + + while (pFs != NULL) + { + RsResourceRef *pFsRef = pFs->pResourceRef; + if (refHasAncestor(pTargetRef, pFsRef)) + { + bInvalidateOnly = pParams->bInvalidateOnly; + break; + } + pFs = pFs->pPrev; + } + + serverInitFreeParams_Recursive(pClient->hClient, hTarget, pLockInfo, &freeParams); + freeParams.pResourceRef = pTargetRef; + freeParams.bInvalidateOnly = bInvalidateOnly; + freeParams.pSecInfo = pParams->pSecInfo; + status = serverFreeResourceTreeUnderLock(pServer, &freeParams); + NV_ASSERT((status == NV_OK) || (status == NV_ERR_GPU_IN_FULLCHIP_RESET)); + + if (pServer->bDebugFreeList) + { + NV_PRINTF(LEVEL_INFO, "(%08x, %08x) status=0x%x\n", + pClient->hClient, + hTarget, + status); + } + } + + if (bPopFreeStack) + { + pClient->pFreeStack = freeStack.pPrev; + bPopFreeStack = NV_FALSE; + } + + if (pParams->hClient == pParams->hResource) + { + pClient->bActive = NV_FALSE; + } + + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pClientEntry, pLockInfo, + &releaseFlags); + + pClientEntry = NULL; + + if (pParams->hClient == pParams->hResource) + { + NvBool bReAcquireLock = (topLockAccess != LOCK_ACCESS_WRITE); + RS_CLIENT_FREE_PARAMS_INTERNAL clientFreeParams; + portMemSet(&clientFreeParams, 0, sizeof(clientFreeParams)); + clientFreeParams.pResFreeParams = pParams; + clientFreeParams.hClient = pParams->hClient; + + if (bReAcquireLock) + { + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + NV_CHECK_OK(status, LEVEL_INFO, serverTopLock_Prologue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags)); + _serverFreeClient(pServer, &clientFreeParams); + serverTopLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + initialLockState &= ~RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED; + } + else + { + _serverFreeClient(pServer, &clientFreeParams); + } + + pClient = NULL; + } + +done: + if (bPopFreeStack) + { + if (pClient != NULL) + pClient->pFreeStack = freeStack.pPrev; + bPopFreeStack = NV_FALSE; + } + + if (pClientEntry != NULL) + { + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + pClientEntry, pLockInfo, &releaseFlags); + } + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + // + // Log any changes to lock state, but ignore the ALLOW_RECURSIVE_LOCKS flag + // as that can be set by serverUpdateLockFlagsForFree() when dealing with + // RPCs to GSP; this would have already printed the relevant message. + // + NV_ASSERT((pLockInfo->state == initialLockState) || + (pLockInfo->state == (initialLockState | RS_LOCK_STATE_ALLOW_RECURSIVE_RES_LOCK))); + + return status; +} + +NV_STATUS +serverControl +( + RsServer *pServer, + RS_RES_CONTROL_PARAMS *pParams +) +{ + NV_STATUS status; + CLIENT_ENTRY *pClientEntry = NULL; + CLIENT_ENTRY *pSecondClientEntry = NULL; + RsClient *pClient; + RsResourceRef *pResourceRef = NULL; + RS_LOCK_INFO *pLockInfo; + NvU32 releaseFlags = 0; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + LOCK_ACCESS_TYPE access = LOCK_ACCESS_WRITE; + enum CLIENT_LOCK_TYPE clientLockType = CLIENT_LOCK_SPECIFIC; + NvHandle hSecondClient; + + pLockInfo = pParams->pLockInfo; + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + status = serverControlLookupLockFlags(pServer, RS_LOCK_TOP, pParams, pParams->pCookie, &access); + if (status != NV_OK) + goto done; + + if (pServer->bUnlockedParamCopy) + { + status = serverControlApiCopyIn(pServer, pParams, pParams->pCookie); + if (status != NV_OK) + goto done; + } + + status = serverTopLock_Prologue(pServer, access, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = serverControlLookupClientLockFlags(pParams->pCookie, &clientLockType); + if (status != NV_OK) + goto done; + + if (clientLockType == CLIENT_LOCK_SPECIFIC) + { + status = serverControlLookupSecondClient(pParams->cmd, pParams->pParams, + pParams->pCookie, &hSecondClient); + if (status != NV_OK) + goto done; + + if (hSecondClient == 0) + { + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + pParams->hClient, + (((pParams->flags & NVOS54_FLAGS_IRQL_RAISED) == 0) && + ((pParams->flags & NVOS54_FLAGS_LOCK_BYPASS) == 0)), + pLockInfo, &releaseFlags, &pClientEntry); + if (status != NV_OK) + goto done; + + if (!pClientEntry->pClient->bActive) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + } + else + { + status = _serverLockDualClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + pParams->hClient, hSecondClient, + (((pParams->flags & NVOS54_FLAGS_IRQL_RAISED) == 0) && + ((pParams->flags & NVOS54_FLAGS_LOCK_BYPASS) == 0)), + pLockInfo, &releaseFlags, &pClientEntry, &pSecondClientEntry); + if (status != NV_OK) + goto done; + + if (!pClientEntry->pClient->bActive || !pSecondClientEntry->pClient->bActive) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + } + + pClient = pClientEntry->pClient; + } + else + { + status = _serverLockAllClientsWithLockInfo(pServer, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverFindClient(pServer, pParams->hClient, &pClient); + if (status != NV_OK) + goto done; + } + + status = clientValidate(pClient, &pParams->secInfo); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, pParams->hObject, &pResourceRef); + if (status != NV_OK) + goto done; + pParams->pResourceRef = pResourceRef; + + if (pResourceRef->bInvalidated || pResourceRef->pResource == NULL) + { + status = NV_ERR_RESOURCE_LOST; + goto done; + } + + pLockInfo->flags |= RS_LOCK_FLAGS_NO_DEPENDANT_SESSION_LOCK; + + status = serverSessionLock_Prologue(LOCK_ACCESS_WRITE, pResourceRef, + pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + if (pResourceRef->pSession != NULL) + { + if (!pResourceRef->pSession->bValid) + { + status = NV_ERR_RESOURCE_LOST; + goto done; + } + } + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pResourceRef = pResourceRef; + callContext.pClient = pClient; + callContext.secInfo = pParams->secInfo; + callContext.pServer = pServer; + callContext.pControlParams = pParams; + callContext.pLockInfo = pParams->pLockInfo; + + // RS-TODO removeme + pParams->pLegacyParams = pParams; + + if (pParams->hClient == pParams->hObject) + { + pParams->hParent = pParams->hClient; + } + else + { + pParams->hParent = pResourceRef->pParentRef->hResource; + } + pLockInfo->pContextRef = pResourceRef->pParentRef; + + NV_ASSERT_OK_OR_GOTO(status, + resservSwapTlsCallContext(&pOldContext, &callContext), done); + + status = resControl(pResourceRef->pResource, &callContext, pParams); + NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext)); + +done: + + serverSessionLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + + if (clientLockType == CLIENT_LOCK_SPECIFIC) + { + if (pClientEntry != NULL) + { + if (pSecondClientEntry != NULL) + { + _serverUnlockDualClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + pClientEntry, pSecondClientEntry, + pLockInfo, &releaseFlags); + } + else + { + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pClientEntry, + pLockInfo, &releaseFlags); + } + } + } + else + { + _serverUnlockAllClientsWithLockInfo(pServer, pLockInfo, &releaseFlags); + } + + serverTopLock_Epilogue(pServer, access, pLockInfo, &releaseFlags); + + if (pServer->bUnlockedParamCopy) + { + status = serverControlApiCopyOut(pServer, pParams, pParams->pCookie, status); + } + + return status; +} + +NV_STATUS +serverCopyResource +( + RsServer *pServer, + RS_RES_DUP_PARAMS *pParams +) +{ + NV_STATUS status; + RS_LOCK_INFO *pLockInfo = pParams->pLockInfo; + NvU32 releaseFlags = 0; + CLIENT_ENTRY *pClientEntrySrc = NULL; + CLIENT_ENTRY *pClientEntryDst = NULL; + RsClient *pClientSrc; + RsClient *pClientDst; + RsResourceRef *pResourceRefSrc; + LOCK_ACCESS_TYPE topLockAccess; + + NvHandle hClientSrc = pParams->hClientSrc; + NvHandle hClientDst = pParams->hClientDst; + + if (!pServer->bConstructed) + return NV_ERR_NOT_READY; + + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + status = serverCopyResourceLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockDualClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + hClientSrc, hClientDst, NV_TRUE, + pLockInfo, &releaseFlags, + &pClientEntrySrc, &pClientEntryDst); + if (status != NV_OK) + goto done; + + NV_ASSERT_OR_ELSE( + (!serverIsClientLockedForRead((pClientEntrySrc)) && + !serverIsClientLockedForRead((pClientEntryDst))), + status = NV_ERR_INVALID_LOCK_STATE; goto done); + + pClientSrc = pClientEntrySrc->pClient; + pClientDst = pClientEntryDst->pClient; + + if (!pClientSrc->bActive || !pClientDst->bActive) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + + status = clientValidate(pClientDst, pParams->pSecInfo); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClientSrc, pParams->hResourceSrc, &pResourceRefSrc); + if (status != NV_OK) + goto done; + + if (pResourceRefSrc->bInvalidated) + { + status = NV_ERR_RESOURCE_LOST; + goto done; + } + + status = clientGetResourceRef(pClientDst, pParams->hParentDst, &pParams->pDstParentRef); + if (status != NV_OK) + return status; + + if (!resCanCopy(pResourceRefSrc->pResource)) + { + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + status = clientAssignResourceHandle(pClientDst, &pParams->hResourceDst); + if (status != NV_OK) + goto done; + + pParams->pSrcClient = pClientSrc; + pParams->pSrcRef = pResourceRefSrc; + pParams->pDstClient = pClientDst; + + status = serverUpdateLockFlagsForCopy(pServer, pParams); + if (status != NV_OK) + return status; + + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pParams->pLockInfo, &releaseFlags, 0); + if (status != NV_OK) + goto done; + + status = clientCopyResource(pClientDst, pServer, pParams); + if (status != NV_OK) + goto done; + + // NV_PRINTF(LEVEL_INFO, "hClient %x: Copied hResource: %x from hClientSrc: %x hResourceSrc: %x\n", + // hClientDst, hResourceDst, hClientSrc, hResourceSrc); + +done: + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pParams->pLockInfo, &releaseFlags); + + if (pClientEntrySrc != NULL && pClientEntryDst != NULL) + { + _serverUnlockDualClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + pClientEntrySrc, pClientEntryDst, + pLockInfo, &releaseFlags); + } + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + return status; +} + +/** + * Special case of serverShareResourceAccess for sharing with a specific client + * Requires two client locks, so separated into a different function from the normal + * @param[in] pServer + * @param[in] pParams Parameters passed into share function + */ +static NV_STATUS +_serverShareResourceAccessClient +( + RsServer *pServer, + RS_RES_SHARE_PARAMS *pParams +) +{ + NV_STATUS status; + RS_LOCK_INFO *pLockInfo = pParams->pLockInfo; + NvU32 releaseFlags = 0; + CLIENT_ENTRY *pClientEntryOwner = NULL; + CLIENT_ENTRY *pClientEntryTarget = NULL; + RsClient *pClientOwner; + RsClient *pClientTarget; + RsResourceRef *pResourceRef; + LOCK_ACCESS_TYPE topLockAccess; + + NvHandle hClientOwner = pParams->hClient; + NvHandle hClientTarget = pParams->pSharePolicy->target; + + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + + if (!pServer->bConstructed) + return NV_ERR_NOT_READY; + + status = serverShareResourceLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockDualClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + hClientOwner, hClientTarget, NV_TRUE, + pLockInfo, &releaseFlags, + &pClientEntryOwner, &pClientEntryTarget); + if (status != NV_OK) + goto done; + + NV_ASSERT_OR_ELSE( + (!serverIsClientLockedForRead((pClientEntryOwner)) && + !serverIsClientLockedForRead((pClientEntryTarget))), + status = NV_ERR_INVALID_LOCK_STATE; goto done); + + pClientOwner = pClientEntryOwner->pClient; + pClientTarget = pClientEntryTarget->pClient; + + status = clientGetResourceRef(pClientOwner, pParams->hResource, &pResourceRef); + if (status != NV_OK) + goto done; + + if (pResourceRef->bInvalidated) + { + status = NV_ERR_RESOURCE_LOST; + goto done; + } + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = pServer; + callContext.pClient = pClientOwner; + callContext.pResourceRef = pResourceRef; + callContext.secInfo = *pParams->pSecInfo; + callContext.pLockInfo = pParams->pLockInfo; + NV_ASSERT_OK_OR_GOTO(status, + resservSwapTlsCallContext(&pOldContext, &callContext), done); + + if (hClientOwner == hClientTarget) + { + // + // Special case: RS_SHARE_TYPE_CLIENT with own client + // Allows the caller to directly modify the access map of their object + // + status = clientShareResourceTargetClient(pClientOwner, pResourceRef, pParams->pSharePolicy, &callContext); + if (status != NV_OK) + goto restore_context; + } + + // Add backref into pClientTarget to prevent stale client handles + status = clientAddAccessBackRef(pClientTarget, pResourceRef); + if (status != NV_OK) + goto restore_context; + + status = clientShareResource(pClientOwner, pResourceRef, pParams->pSharePolicy, &callContext); + if (status != NV_OK) + goto restore_context; + +restore_context: + NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext)); + + // NV_PRINTF(LEVEL_INFO, "hClientOwner %x: Shared hResource: %x with hClientTarget: %x\n", + // hClientOwner, pParams->hResource, hClientTarget); + +done: + if (pClientEntryOwner != NULL && pClientEntryTarget != NULL) + { + _serverUnlockDualClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + pClientEntryOwner, pClientEntryTarget, + pLockInfo, &releaseFlags); + } + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + return status; +} + + +NV_STATUS +serverShareResourceAccess +( + RsServer *pServer, + RS_RES_SHARE_PARAMS *pParams +) +{ + NV_STATUS status; + RS_LOCK_INFO *pLockInfo; + NvU32 releaseFlags = 0; + CLIENT_ENTRY *pClientEntry = NULL; + RsClient *pClient; + RsResourceRef *pResourceRef; + NvU16 shareType; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + LOCK_ACCESS_TYPE topLockAccess; + + if (!pServer->bConstructed) + return NV_ERR_NOT_READY; + + if (!pServer->bRsAccessEnabled) + return NV_ERR_FEATURE_NOT_ENABLED; + + if (pParams->pSharePolicy == NULL) + return NV_ERR_INVALID_ARGUMENT; + + shareType = pParams->pSharePolicy->type; + if (shareType >= RS_SHARE_TYPE_MAX) + return NV_ERR_INVALID_ARGUMENT; + + pLockInfo = pParams->pLockInfo; + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + if (shareType == RS_SHARE_TYPE_CLIENT) + { + // Special case: This requires two locks, so it has its own function + return _serverShareResourceAccessClient(pServer, pParams); + } + + status = serverShareResourceLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, + NV_TRUE, pLockInfo, &releaseFlags, &pClientEntry); + if (status != NV_OK) + goto done; + + NV_ASSERT_OR_ELSE(!serverIsClientLockedForRead(pClientEntry), + status = NV_ERR_INVALID_LOCK_STATE; goto done); + + pClient = pClientEntry->pClient; + + status = clientValidate(pClient, pParams->pSecInfo); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, pParams->hResource, &pResourceRef); + if (status != NV_OK) + goto done; + + if (pResourceRef->bInvalidated) + { + status = NV_ERR_RESOURCE_LOST; + goto done; + } + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = pServer; + callContext.pClient = pClient; + callContext.pResourceRef = pResourceRef; + callContext.secInfo = *pParams->pSecInfo; + callContext.pLockInfo = pParams->pLockInfo; + + NV_ASSERT_OK_OR_GOTO(status, + resservSwapTlsCallContext(&pOldContext, &callContext), done); + + status = clientShareResource(pClient, pResourceRef, pParams->pSharePolicy, &callContext); + NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext)); + if (status != NV_OK) + goto done; + + // NV_PRINTF(LEVEL_INFO, "hClient %x: Shared hResource: %x\n", hClient, pParams->hResource); + +done: + if (pClientEntry != NULL) + { + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pClientEntry, + pLockInfo, &releaseFlags); + } + + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + return status; +} + +NV_STATUS +serverMap +( + RsServer *pServer, + NvHandle hClient, + NvHandle hResource, + RS_CPU_MAP_PARAMS *pParams +) +{ + NV_STATUS status = NV_ERR_INVALID_STATE; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + CLIENT_ENTRY *pClientEntry = NULL; + RsClient *pClient; + RsResourceRef *pResourceRef = NULL; + RsResourceRef *pContextRef = NULL; + RsResource *pResource; + RsCpuMapping *pCpuMapping = NULL; + RS_LOCK_INFO *pLockInfo; + NvU32 releaseFlags = 0; + LOCK_ACCESS_TYPE topLockAccess = LOCK_ACCESS_WRITE; + + pLockInfo = pParams->pLockInfo; + NV_ASSERT_OR_GOTO(pLockInfo != NULL, done); + + status = serverMapLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, hClient, + NV_TRUE, pLockInfo, &releaseFlags, &pClientEntry); + if (status != NV_OK) + goto done; + + pClient = pClientEntry->pClient; + + if (!pClient->bActive) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + + status = clientValidate(pClient, pParams->pSecInfo); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, hResource, &pResourceRef); + if (status != NV_OK) + goto done; + + pResource = pResourceRef->pResource; + if (pResource == NULL) + { + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + + status = serverMap_Prologue(pServer, pParams); + if (status != NV_OK) + goto done; + + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags, 0); + if (status != NV_OK) + goto done; + + if (pParams->hContext != 0) + { + status = clientGetResourceRef(pClient, pParams->hContext, &pContextRef); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, "hClient %x: Cannot find hContext: 0x%x\n", pClient->hClient, pParams->hContext); + goto done; + } + } + + status = refAddMapping(pResourceRef, pParams, pContextRef, &pCpuMapping); + if (status != NV_OK) + goto done; + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pClient = pClient; + callContext.pResourceRef = pResourceRef; + callContext.pLockInfo = pParams->pLockInfo; + + // Some MODS tests don't set secInfo. + if (pParams->pSecInfo != NULL) + callContext.secInfo = *pParams->pSecInfo; + + NV_ASSERT_OK_OR_GOTO(status, + resservSwapTlsCallContext(&pOldContext, &callContext), done); + + status = resMap(pResource, &callContext, pParams, pCpuMapping); + NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext)); + + if (status != NV_OK) + goto done; + + // NV_PRINTF(LEVEL_INFO, "hClient %x: Mapped hResource: 0x%x hContext: %x at addr: " NvP64_fmt "\n", + // hClient, hResource, pParams->hContext, pCpuMapping->pAddress); + + if (pParams->ppCpuVirtAddr != NULL) + *pParams->ppCpuVirtAddr = pCpuMapping->pLinearAddress; + +done: + if (status != NV_OK) + { + if (pCpuMapping != NULL) + refRemoveMapping(pResourceRef, pCpuMapping); + } + + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + if (pClientEntry != NULL) + { + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pClientEntry, + pLockInfo, &releaseFlags); + } + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + return status; +} + +NV_STATUS +serverUnmap +( + RsServer *pServer, + NvHandle hClient, + NvHandle hResource, + RS_CPU_UNMAP_PARAMS *pParams +) +{ + NV_STATUS status = NV_ERR_INVALID_STATE; + CLIENT_ENTRY *pClientEntry = NULL; + RsClient *pClient; + RsResourceRef *pResourceRef; + RsResource *pResource; + RsCpuMapping *pCpuMapping; + RS_LOCK_INFO *pLockInfo; + NvU32 releaseFlags = 0; + LOCK_ACCESS_TYPE topLockAccess = LOCK_ACCESS_WRITE; + + pLockInfo = pParams->pLockInfo; + NV_ASSERT_OR_GOTO(pLockInfo != NULL, done); + + status = serverUnmapLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, hClient, + NV_TRUE, pLockInfo, &releaseFlags, &pClientEntry); + if (status != NV_OK) + goto done; + + pClient = pClientEntry->pClient; + + status = clientValidate(pClient, pParams->pSecInfo); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, hResource, &pResourceRef); + if (status != NV_OK) + goto done; + + pResource = pResourceRef->pResource; + if (pResource == NULL) + { + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + + status = serverUnmap_Prologue(pServer, pParams); + if (status != NV_OK) + goto done; + + status = refFindCpuMappingWithFilter(pResourceRef, + pParams->pLinearAddress, + pParams->fnFilter, + &pCpuMapping); + if (status != NV_OK) + goto done; + + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags, 0); + if (status != NV_OK) + goto done; + + status = clientUnmapMemory(pClient, pResourceRef, pLockInfo, &pCpuMapping, pParams->pSecInfo); + +done: + serverUnmap_Epilogue(pServer, pParams); + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + if (pClientEntry != NULL) + { + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pClientEntry, + pLockInfo, &releaseFlags); + } + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + return status; +} + +NV_STATUS +serverInterMap +( + RsServer *pServer, + RS_INTER_MAP_PARAMS *pParams +) +{ + CLIENT_ENTRY *pClientEntry = NULL; + RsClient *pClient; + RsResourceRef *pMapperRef = NULL; + RsResourceRef *pMappableRef; + RsResourceRef *pContextRef; + RsInterMapping *pMapping = NULL; + LOCK_ACCESS_TYPE topLockAccess; + + NV_STATUS status; + RS_LOCK_INFO *pLockInfo = pParams->pLockInfo; + NvU32 releaseFlags = 0; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + NvBool bRestoreCallContext = NV_FALSE; + + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + if (pParams->length == 0) + return NV_ERR_INVALID_LIMIT; + + status = serverInterMapLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, + NV_TRUE, pLockInfo, &releaseFlags, + &pClientEntry); + if (status != NV_OK) + goto done; + + pClient = pClientEntry->pClient; + + if (!pClient->bActive) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + + status = clientValidate(pClient, pParams->pSecInfo); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, pParams->hMapper, &pMapperRef); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, pParams->hMappable, &pMappableRef); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, pParams->hDevice, &pContextRef); + if (status != NV_OK) + goto done; + + pLockInfo->pContextRef = pContextRef; + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = pServer; + callContext.pClient = pClient; + callContext.pResourceRef = pMapperRef; + callContext.pContextRef = pContextRef; + callContext.pLockInfo = pLockInfo; + + // Some MODS tests don't set secInfo. + if (pParams->pSecInfo != NULL) + callContext.secInfo = *pParams->pSecInfo; + + NV_ASSERT_OK_OR_GOTO(status, + resservSwapTlsCallContext(&pOldContext, &callContext), done); + + bRestoreCallContext = NV_TRUE; + + status = refAddInterMapping(pMapperRef, pMappableRef, pContextRef, &pMapping); + if (status != NV_OK) + goto done; + + // serverResLock_Prologue should be called during serverInterMap_Prologue + status = serverInterMap_Prologue(pServer, pMapperRef, pMappableRef, pParams, &releaseFlags); + if (status != NV_OK) + goto done; + + status = clientInterMap(pClient, pMapperRef, pMappableRef, pParams); + if (status != NV_OK) + goto done; + + pMapping->flags = pParams->flags; + pMapping->flags2 = pParams->flags2; + pMapping->dmaOffset = pParams->dmaOffset; + pMapping->size = pParams->length; + pMapping->pMemDesc = pParams->pMemDesc; + +done: + serverInterMap_Epilogue(pServer, pParams, &releaseFlags); + + if (bRestoreCallContext) + NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext)); + + if (status != NV_OK) + { + if (pMapping != NULL) + refRemoveInterMapping(pMapperRef, pMapping); + } + + if (pClientEntry != NULL) + { + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pClientEntry, + pLockInfo, &releaseFlags); + } + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + return status; +} + +static NV_STATUS +serverInterUnmapMapping +( + RsClient *pClient, + RsResourceRef *pMapperRef, + RsInterMapping *pMapping, + RS_INTER_UNMAP_PARAMS *pParams, + NvBool bPartialUnmap +) +{ + RsInterMapping *pNewMappingLeft = NULL; + RsInterMapping *pNewMappingRight = NULL; + NV_STATUS status = NV_OK; + + if (pParams->dmaOffset > pMapping->dmaOffset) + { + NV_ASSERT_OK_OR_GOTO(status, refAddInterMapping(pMapperRef, pMapping->pMappableRef, pMapping->pContextRef, &pNewMappingLeft), done); + + pNewMappingLeft->flags = pMapping->flags; + pNewMappingLeft->flags2 = pMapping->flags2; + pNewMappingLeft->dmaOffset = pMapping->dmaOffset; + pNewMappingLeft->size = pParams->dmaOffset - pMapping->dmaOffset; + } + + if (pParams->dmaOffset + pParams->size < pMapping->dmaOffset + pMapping->size) + { + NV_ASSERT_OK_OR_GOTO(status, refAddInterMapping(pMapperRef, pMapping->pMappableRef, pMapping->pContextRef, &pNewMappingRight), done); + + pNewMappingRight->flags = pMapping->flags; + pNewMappingRight->flags2 = pMapping->flags2; + pNewMappingRight->dmaOffset = pParams->dmaOffset + pParams->size; + pNewMappingRight->size = pMapping->dmaOffset + pMapping->size - pNewMappingRight->dmaOffset; + } + + pParams->hMappable = pMapping->pMappableRef->hResource; + pParams->pMemDesc = pMapping->pMemDesc; + status = clientInterUnmap(pClient, pMapperRef, pParams); + +done: + if (bPartialUnmap && status != NV_OK) + { + if (pNewMappingLeft != NULL) + refRemoveInterMapping(pMapperRef, pNewMappingLeft); + + if (pNewMappingRight != NULL) + refRemoveInterMapping(pMapperRef, pNewMappingRight); + } + else + { + // Regular unmap should never fail when the range is found + NV_ASSERT(status == NV_OK); + refRemoveInterMapping(pMapperRef, pMapping); + } + + return status; +} + +static NV_STATUS +serverInterUnmapInternal +( + + RsClient *pClient, + RsResourceRef *pMapperRef, + RsResourceRef *pContextRef, + RS_INTER_UNMAP_PARAMS *pParams + +) +{ + RsInterMapping *pNextMapping = listHead(&pMapperRef->interMappings); + NvU64 unmapDmaOffset = pParams->dmaOffset; + NvU64 unmapSize = pParams->size; + NvBool bPartialUnmap = (unmapSize != 0); + NV_STATUS unmapStatus = NV_OK; + NV_STATUS status = bPartialUnmap ? NV_OK : NV_ERR_OBJECT_NOT_FOUND; + NvU64 unmapEnd; + + NV_CHECK_OR_RETURN(LEVEL_ERROR, portSafeAddU64(unmapDmaOffset, unmapSize, &unmapEnd), NV_ERR_INVALID_ARGUMENT); + + while (pNextMapping != NULL) + { + RsInterMapping *pMapping = pNextMapping; + pNextMapping = listNext(&pMapperRef->interMappings, pMapping); + + if (pMapping->pContextRef != pContextRef) + continue; + + NvU64 mappingEnd; + NV_ASSERT_OR_RETURN(portSafeAddU64(pMapping->dmaOffset, pMapping->size, &mappingEnd), NV_ERR_INVALID_STATE); + + if (bPartialUnmap && + mappingEnd > unmapDmaOffset && + pMapping->dmaOffset < unmapEnd) + { + if (pMapping->dmaOffset < unmapDmaOffset || mappingEnd > unmapEnd) + { + // If the mapping does not lie entirely in the unmapped range, we are in the "true" partial unmap path + NV_CHECK_TRUE_OR_GOTO(unmapStatus, LEVEL_ERROR, resIsPartialUnmapSupported(pMapperRef->pResource), NV_ERR_INVALID_ARGUMENT, done); + // It is unclear what to do with pMemDesc when the mapping is split + NV_ASSERT_TRUE_OR_GOTO(unmapStatus, pMapping->pMemDesc == NULL, NV_ERR_INVALID_STATE, done); + } + + pParams->dmaOffset = NV_MAX(pMapping->dmaOffset, unmapDmaOffset); + pParams->size = NV_MIN(unmapEnd, mappingEnd) - pParams->dmaOffset; + } + else if (!bPartialUnmap && pMapping->dmaOffset == unmapDmaOffset) + { + pParams->dmaOffset = pMapping->dmaOffset; + pParams->size = pMapping->size; + } + else + { + continue; + } + + NV_ASSERT_OK_OR_GOTO(unmapStatus, serverInterUnmapMapping(pClient, pMapperRef, pMapping, pParams, bPartialUnmap), done); + + if (!bPartialUnmap) + { + // non-partial unmap always touches a single mapping + status = NV_OK; + break; + } + } + +done: + if (unmapStatus != NV_OK) + status = unmapStatus; + + return status; +} + +NV_STATUS +serverInterUnmap +( + RsServer *pServer, + RS_INTER_UNMAP_PARAMS *pParams +) +{ + CLIENT_ENTRY *pClientEntry = NULL; + RsClient *pClient; + RsResourceRef *pMapperRef; + RsResourceRef *pContextRef; + LOCK_ACCESS_TYPE topLockAccess; + + NV_STATUS status; + RS_LOCK_INFO *pLockInfo = pParams->pLockInfo; + NvU32 releaseFlags = 0; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + NvBool bRestoreCallContext = NV_FALSE; + + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + status = serverInterUnmapLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, + NV_TRUE, pLockInfo, &releaseFlags, + &pClientEntry); + if (status != NV_OK) + goto done; + + pClient = pClientEntry->pClient; + + status = clientValidate(pClient, pParams->pSecInfo); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, pParams->hMapper, &pMapperRef); + if (status != NV_OK) + goto done; + + if ((pMapperRef->bInvalidated) && (pMapperRef->pResource == NULL)) + { + // Object has already been freed and unmapped + goto done; + } + + status = clientGetResourceRef(pClient, pParams->hDevice, &pContextRef); + if (status != NV_OK) + goto done; + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = pServer; + callContext.pClient = pClient; + callContext.pResourceRef = pMapperRef; + callContext.pContextRef = pContextRef; + callContext.pLockInfo = pLockInfo; + + // Some MODS tests don't set secInfo. + if (pParams->pSecInfo != NULL) + callContext.secInfo = *pParams->pSecInfo; + + if (pLockInfo->pContextRef == NULL) + pLockInfo->pContextRef = pContextRef; + + NV_ASSERT_OK_OR_GOTO(status, + resservSwapTlsCallContext(&pOldContext, &callContext), done); + + bRestoreCallContext = NV_TRUE; + + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags, 0); + if (status != NV_OK) + goto done; + + status = serverInterUnmap_Prologue(pServer, pParams); + if (status != NV_OK) + goto done; + + status = serverInterUnmapInternal(pClient, pMapperRef, pContextRef, pParams); +done: + serverInterUnmap_Epilogue(pServer, pParams); + + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + + if (bRestoreCallContext) + NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext)); + + if (pClientEntry != NULL) + { + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pClientEntry, + pLockInfo, &releaseFlags); + } + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + return status; +} + +NV_STATUS +serverAcquireClient +( + RsServer *pServer, + NvHandle hClient, + LOCK_ACCESS_TYPE lockAccess, + CLIENT_ENTRY **ppClientEntry +) +{ + CLIENT_ENTRY *pClientEntry; + + if (ppClientEntry == NULL) + return NV_ERR_INVALID_ARGUMENT; + + // NV_PRINTF(LEVEL_INFO, "Acquiring hClient %x\n", hClient); + if (!_serverGetAndLockClientEntryByHandle(pServer, hClient, lockAccess, + &pClientEntry)) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + *ppClientEntry = pClientEntry; + + return NV_OK; +} + +NV_STATUS +serverGetClientUnderLock +( + RsServer *pServer, + NvHandle hClient, + RsClient **ppClient +) +{ + NV_STATUS status; + RsClient *pClient; + + // NV_PRINTF(LEVEL_INFO, "Acquiring hClient %x (without lock)\n", hClient); + status = _serverFindClient(pServer, hClient, &pClient); + if (status != NV_OK) + { + return status; + } + + if (ppClient != NULL) + *ppClient = pClient; + + return NV_OK; +} + +void +serverReleaseClient +( + RsServer *pServer, + LOCK_ACCESS_TYPE lockAccess, + CLIENT_ENTRY *pClientEntry +) +{ + _serverPutAndUnlockClientEntry(pServer, lockAccess, pClientEntry); +} + +NvBool +serverIsClientLocked +( + RsServer *pServer, + NvHandle hClient +) +{ + CLIENT_ENTRY *pClientEntry; + + NV_CHECK_OR_RETURN(LEVEL_SILENT, _serverFindClientEntryByHandle(pServer, hClient, + 0, CLIENT_LIST_LOCK_UNLOCKED, &pClientEntry), NV_ERR_INVALID_OBJECT_HANDLE); + + return (pClientEntry->lockOwnerTid == portThreadGetCurrentThreadId()); +} + + +NvBool +serverIsClientInternal +( + RsServer *pServer, + NvHandle hClient +) +{ + return ((hClient & pServer->internalHandleBase) == pServer->internalHandleBase); +} + +static NV_STATUS _serverBuildAllClientLockList +( + RsServer *pServer +) +{ + NvU32 i; + NvU32 activeClientCount; + NvBool bClientsRemaining; + NvHandle hClientBucket = RS_CLIENT_HANDLE_BASE; + CLIENT_ENTRY **ppClientListLocations; + RsLockedClientListIter lockedClientListIter; + + // + // Perform memory allocations outside of the client list lock's + // critical section since it's a spinlock. + // + ppClientListLocations = PORT_ALLOC(pServer->pAllocator, + sizeof(*ppClientListLocations) * RS_CLIENT_HANDLE_BUCKET_COUNT); + + if (ppClientListLocations == NULL) + return NV_ERR_NO_MEMORY; + + i = 0; + while (1) + { + activeClientCount = pServer->activeClientCount; + + // + // Allocate memory for locked client list outside of client list lock critical + // section. + // + listInit(&pServer->lockedClientList, pServer->pAllocator); + + for (; i < activeClientCount; i++) + { + CLIENT_ENTRY **ppClientEntry = listAppendNew(&pServer->lockedClientList); + + if (ppClientEntry == NULL) + { + listDestroy(&pServer->lockedClientList); + PORT_FREE(pServer->pAllocator, ppClientListLocations); + return NV_ERR_NO_MEMORY; + } + + *ppClientEntry = NULL; + } + + serverAcquireClientListLock(pServer); + + // + // Ensure that active client count didn't increase while we checked it + // outside of the client list lock. Increase size of the list if so. + // + if (activeClientCount >= pServer->activeClientCount) + break; + + serverReleaseClientListLock(pServer); + } + + // Initialize the client location array to start of all used client lists + for (i = 0; i < RS_CLIENT_HANDLE_BUCKET_COUNT; i++) + { + RsClientList *pClientList = &(pServer->pClientSortedList[i]); + + ppClientListLocations[i] = listHead(pClientList); + } + + bClientsRemaining = (activeClientCount > 0); + lockedClientListIter = listIterAll(&pServer->lockedClientList); + + // + // Add client entries to all clients lock list, keeping it sorted, + // using the client location array. + // + while (bClientsRemaining) + { + NvHandle hClientNextBucket = ~0; + + bClientsRemaining = NV_FALSE; + + // Iterate over client location array + for (i = 0; i < RS_CLIENT_HANDLE_BUCKET_COUNT; i++) + { + CLIENT_ENTRY *pClientEntry = ppClientListLocations[i]; + + if (pClientEntry == NULL) + continue; + + // + // Add this client to the all clients lock list if it's in range of + // the current bucket to ensure sorted order. + // + if (pClientEntry->hClient >= hClientBucket && + (pClientEntry->hClient < (hClientBucket + RS_CLIENT_HANDLE_BUCKET_COUNT))) + { + CLIENT_ENTRY **ppLockedClientEntry; + RsClientList *pClientList = &(pServer->pClientSortedList[i]); + + NV_ASSERT(listIterNext(&lockedClientListIter)); + ppLockedClientEntry = lockedClientListIter.pValue; + + // + // Ignore any partially constructed client. + // Ignore anything pending free since nothing can use this client + // object after it's been marked pending free + // + if ((pClientEntry->pClient != NULL) && !pClientEntry->bPendingFree) + { + *ppLockedClientEntry = pClientEntry; + + // + // Increase the ref count so client entry doesn't get freed when + // we release the client list lock. + // + _serverGetClientEntry(pClientEntry); + } + + pClientEntry = listNext(pClientList, pClientEntry); + ppClientListLocations[i] = pClientEntry; + + // Move to next bucket if at end of the list + if (pClientEntry == NULL) + continue; + } + + // Any remaining non-NULL client entries must be in a larger bucket + if (pClientEntry->hClient >= + (hClientBucket + RS_CLIENT_HANDLE_BUCKET_COUNT)) + { + // Update next bucket if there are remaining clients to process + hClientNextBucket = NV_MIN(hClientNextBucket, + (pClientEntry->hClient & ~RS_CLIENT_HANDLE_BUCKET_MASK)); + bClientsRemaining = NV_TRUE; + } + } + + hClientBucket = hClientNextBucket; + } + + serverReleaseClientListLock(pServer); + + // Free client list locations array since it's no longer needed + PORT_FREE(pServer->pAllocator, ppClientListLocations); + + return NV_OK; +} + +NV_STATUS +serverLockAllClients +( + RsServer *pServer +) +{ + RsLockedClientListIter lockedClientListIter; + + NV_ASSERT_OK_OR_RETURN(_serverBuildAllClientLockList(pServer)); + + lockedClientListIter = listIterAll(&pServer->lockedClientList); + + // Lock all clients in order + while (listIterNext(&lockedClientListIter)) + { + CLIENT_ENTRY *pClientEntry = *lockedClientListIter.pValue; + + if (pClientEntry != NULL) + _serverLockClient(LOCK_ACCESS_WRITE, pClientEntry); + } + + // Set all client lock owner TID + pServer->allClientLockOwnerTid = portThreadGetCurrentThreadId(); + + return NV_OK; +} + +NV_STATUS +serverUnlockAllClients +( + RsServer *pServer +) +{ + CLIENT_ENTRY **ppClientEntry = listTail(&pServer->lockedClientList); + + NV_ASSERT_OR_RETURN(serverAllClientsLockIsOwner(pServer), NV_ERR_INVALID_LOCK_STATE); + + // Unlock clients in reverse order + while (ppClientEntry != NULL) + { + CLIENT_ENTRY *pClientEntry = *ppClientEntry; + + if (pClientEntry != NULL) + { + // Unlock and restore reference count for clients + _serverPutAndUnlockClientEntry(pServer, LOCK_ACCESS_WRITE, *ppClientEntry); + } + + ppClientEntry = listPrev(&pServer->lockedClientList, ppClientEntry); + } + + // Destroy locked client list + listDestroy(&pServer->lockedClientList); + + // Unset all client lock owner TID + pServer->allClientLockOwnerTid = ~0; + + return NV_OK; +} + +static +NvBool +__serverFindClientEntryByHandle +( + RsServer *pServer, + NvHandle hClient, + enum CLIENT_STATE clientState, + enum CLIENT_LIST_LOCK_STATE clientListLockState, + NvBool bIncRefCount, + CLIENT_ENTRY **ppClientEntry +) +{ + NvBool bClientFound = NV_FALSE; + RsClientList *pClientList; + CLIENT_ENTRY *pClientEntryLoop; + + if (clientListLockState == CLIENT_LIST_LOCK_UNLOCKED) + serverAcquireClientListLock(pServer); + + pClientList = &(pServer->pClientSortedList[hClient & RS_CLIENT_HANDLE_BUCKET_MASK]); + pClientEntryLoop = listHead(pClientList); + + while (pClientEntryLoop != NULL) + { + CLIENT_ENTRY *pClientEntry = pClientEntryLoop; + pClientEntryLoop = listNext(pClientList, pClientEntryLoop); + + if (pClientEntry->hClient == hClient) + { + // Client may not have finished constructing yet + if ((pClientEntry->pClient == NULL) && + ((clientState & CLIENT_PARTIALLY_INITIALIZED) == 0)) + { + goto done; + } + + // Client may be pending free + if (pClientEntry->bPendingFree && + ((clientState & CLIENT_PENDING_FREE) == 0)) + { + goto done; + } + + if (bIncRefCount) + _serverGetClientEntry(pClientEntry); + + if (ppClientEntry != NULL) + *ppClientEntry = pClientEntry; + + bClientFound = NV_TRUE; + goto done; + } + else if (pClientEntry->hClient > hClient) + { + // Not found in sorted list + goto done; + } + } + +done: + if (clientListLockState == CLIENT_LIST_LOCK_UNLOCKED) + serverReleaseClientListLock(pServer); + + return bClientFound; +} + +static +NvBool +_serverFindClientEntryByHandle +( + RsServer *pServer, + NvHandle hClient, + enum CLIENT_STATE clientState, + enum CLIENT_LIST_LOCK_STATE clientListLockState, + CLIENT_ENTRY **ppClientEntry +) +{ + return __serverFindClientEntryByHandle(pServer, hClient, clientState, + clientListLockState, NV_FALSE, ppClientEntry); +} + +static +NvBool +_serverGetClientEntryByHandle +( + RsServer *pServer, + NvHandle hClient, + enum CLIENT_STATE clientState, + enum CLIENT_LIST_LOCK_STATE clientListLockState, + CLIENT_ENTRY **ppClientEntry +) +{ + return __serverFindClientEntryByHandle(pServer, hClient, clientState, + clientListLockState, NV_TRUE, ppClientEntry); +} + +static +NvBool +_serverGetAndLockClientEntryByHandle +( + RsServer *pServer, + NvHandle hClient, + LOCK_ACCESS_TYPE access, + CLIENT_ENTRY **ppClientEntry +) +{ + CLIENT_ENTRY *pClientEntry; + + if (!_serverGetClientEntryByHandle(pServer, hClient, 0, CLIENT_LIST_LOCK_UNLOCKED, + &pClientEntry)) + { + return NV_FALSE; + } + + _serverLockClient(access, pClientEntry); + + // Handle race condition where client entry was marked pending free + if (pClientEntry->bPendingFree) + { + _serverPutAndUnlockClientEntry(pServer, access, pClientEntry); + return NV_FALSE; + } + + *ppClientEntry = pClientEntry; + return NV_TRUE; +} + +static +void +_serverPutAndUnlockClientEntry +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + CLIENT_ENTRY *pClientEntry +) +{ + _serverUnlockClient(access, pClientEntry); + _serverPutClientEntry(pServer, pClientEntry); +} + +static +NV_STATUS +_serverFindClient +( + RsServer *pServer, + NvHandle hClient, + RsClient **ppClient +) +{ + CLIENT_ENTRY *pClientEntry; + + NV_CHECK_OR_RETURN(LEVEL_SILENT, _serverFindClientEntryByHandle(pServer, hClient, + CLIENT_PENDING_FREE, CLIENT_LIST_LOCK_UNLOCKED, &pClientEntry), + NV_ERR_INVALID_OBJECT_HANDLE); + + *ppClient = pClientEntry->pClient; + return NV_OK; +} + +static +NV_STATUS +_serverInsertClientEntry +( + RsServer *pServer, + CLIENT_ENTRY *pClientEntry, + CLIENT_ENTRY *pClientNext +) +{ + RsClientList *pClientList; + NvHandle hClient = pClientEntry->hClient; + + if (hClient == 0) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + pClientList = &(pServer->pClientSortedList[hClient & RS_CLIENT_HANDLE_BUCKET_MASK]); + + if (pClientNext == NULL) + { + listAppendExisting(pClientList, pClientEntry); + } + else + { + listInsertExisting(pClientList, pClientNext, pClientEntry); + } + + return NV_OK; +} + +/** + * Mark a CLIENT_ENTRY as about to be freed + * @param[in] pServer + * @param[in] hClient The handle to lookup + * @param[out] pClientEntry The client entry associated with the handle + * + * @return NV_TRUE if client entry was found and marked pending free + * NV_FALSE if client couldn't be found + */ +static NvBool _serverMarkClientEntryPendingFree(RsServer *pServer, NvHandle hClient, CLIENT_ENTRY **ppClientEntry) +{ + RsClientList *pClientList; + CLIENT_ENTRY *pClientEntry; + + serverAcquireClientListLock(pServer); + + pClientList = &(pServer->pClientSortedList[hClient & RS_CLIENT_HANDLE_BUCKET_MASK]); + pClientEntry = listHead(pClientList); + + while (pClientEntry != NULL) + { + if (pClientEntry->hClient == hClient) + { + if (pClientEntry->pClient == NULL) + goto fail; + + *ppClientEntry = pClientEntry; + + // + // Mark client entry pending free if it isn't already in the process of + // being freed + // + if (pClientEntry->bPendingFree) + goto fail; + + pClientEntry->bPendingFree = NV_TRUE; + + // + // Release client list lock - retaining it while attempting to acquire a + // client lock could deadlock. + // + serverReleaseClientListLock(pServer); + + // + // If we locked this client as part of locking all client locks, ensure we + // unlock this client first to avoid self-deadlocking. Also decrement + // reference count since serverLockAllClients increments it. Remove from + // locked client list to prevent dangling reference to the CLIENT_ENTRY. + // + if (serverAllClientsLockIsOwner(pServer) && + (pClientEntry->lockOwnerTid == portThreadGetCurrentThreadId())) + { + _serverPutAndUnlockClientEntry(pServer, LOCK_ACCESS_WRITE, pClientEntry); + listRemoveFirstByValue(&pServer->lockedClientList, &pClientEntry); + } + + // + // Wait for all API's using the CLIENT_ENTRY to finish, waiting till the + // reference count reaches 1 and using the client lock to postpone execution + // until these API's finish. The caller will take care of the final decrement + // of the reference count to 0. + // + while (pClientEntry->refCount > 1) + { + _serverLockClient(LOCK_ACCESS_WRITE, pClientEntry); + _serverUnlockClient(LOCK_ACCESS_WRITE, pClientEntry); + } + + // Client is no longer active so decrement the count + portAtomicDecrementU32(&pServer->activeClientCount); + + return NV_TRUE; + } + else if (pClientEntry->hClient > hClient) + { + serverReleaseClientListLock(pServer); + + // Not found in sorted list + return NV_FALSE; + } + + pClientEntry = listNext(pClientList, pClientEntry); + } + +fail: + serverReleaseClientListLock(pServer); + + return NV_FALSE; +} + +static +void +_serverGetClientEntry(CLIENT_ENTRY *pClientEntry) +{ + NV_ASSERT(!pClientEntry->bPendingFree); + portAtomicIncrementU32(&pClientEntry->refCount); +} + +static +void +_serverPutClientEntry +( + RsServer *pServer, + CLIENT_ENTRY *pClientEntry +) +{ + if (portAtomicDecrementU32(&pClientEntry->refCount) == 0) + { + pClientEntry->pClient = NULL; + pClientEntry->hClient = 0; + + portSyncRwLockDestroy(pClientEntry->pLock); + PORT_FREE(pServer->pAllocator, pClientEntry); + } +} + +static +NV_STATUS +_serverFindNextAvailableClientHandleInBucket +( + RsServer *pServer, + NvHandle hClientIn, + NvHandle *phClientOut, + CLIENT_ENTRY **ppClientNext +) +{ + NvHandle hPrefixIn, hPrefixOut; + RsClientList *pClientList = &(pServer->pClientSortedList[hClientIn & RS_CLIENT_HANDLE_BUCKET_MASK]); + NvHandle hClientOut = hClientIn; + CLIENT_ENTRY *pClientEntry = listHead(pClientList); + + *ppClientNext = NULL; + if (pClientEntry == NULL) + { + *phClientOut = hClientOut; + return NV_OK; + } + + // + // The list is ordered by increased client handles + // We need to find a value to insert or change the handle + // + while (pClientEntry != NULL) + { + if (pClientEntry->hClient < hClientOut) + { + pClientEntry = listNext(pClientList, pClientEntry); + continue; + } + else if (pClientEntry->hClient == hClientOut) + { + // Increase client handle by one unit in same bucket + hClientOut = hClientOut + RS_CLIENT_HANDLE_BUCKET_COUNT; + NV_ASSERT((hClientIn & RS_CLIENT_HANDLE_BUCKET_MASK) == (hClientOut & RS_CLIENT_HANDLE_BUCKET_MASK)); + } + else // last pClientEntry->hClient > hClientOut + { + break; + } + pClientEntry = listNext(pClientList, pClientEntry); + } + + hPrefixIn = hClientIn & ~RS_CLIENT_HANDLE_DECODE_MASK; + hPrefixOut = hClientOut & ~RS_CLIENT_HANDLE_DECODE_MASK; + if (hPrefixIn != hPrefixOut) + return NV_ERR_INSUFFICIENT_RESOURCES; + + *phClientOut = hClientOut; + if (pClientEntry != NULL) + { + *ppClientNext = pClientEntry; + } + return NV_OK; +} + +static +NV_STATUS +_serverCreateEntryAndLockForNewClient +( + RsServer *pServer, + NvHandle *phClient, + NvBool bInternalHandle, + CLIENT_ENTRY **ppClientEntry, + API_SECURITY_INFO *pSecInfo +) +{ + CLIENT_ENTRY *pClientEntry = NULL; + NV_STATUS status = NV_OK; + NvHandle hClient = *phClient; + CLIENT_ENTRY *pClientNext = NULL; + PORT_RWLOCK *pLock = NULL; + NvU32 handleBase = serverAllocClientHandleBase(pServer, bInternalHandle, pSecInfo); + NvBool bLockedClientList = NV_FALSE; + + // + // Perform memory allocations before taking the client list spinlock, they must + // be performed outside of the spinlock's critical section. + // + pLock = portSyncRwLockCreate(pServer->pAllocator); + if (pLock == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + pClientEntry = (CLIENT_ENTRY *)PORT_ALLOC(pServer->pAllocator, sizeof(CLIENT_ENTRY)); + if (pClientEntry == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto _serverCreateEntryAndLockForNewClient_exit; + } + + portMemSet(pClientEntry, 0, sizeof(*pClientEntry)); + + pClientEntry->pLock = pLock; + + serverAcquireClientListLock(pServer); + bLockedClientList = NV_TRUE; + + if (hClient == 0) + { + NvU32 clientHandleIndex = pServer->clientCurrentHandleIndex; + NvU16 clientHandleBucketInit = clientHandleIndex & RS_CLIENT_HANDLE_BUCKET_MASK; + do + { + hClient = CLIENT_ENCODEHANDLE(handleBase, clientHandleIndex); + clientHandleIndex++; + if (clientHandleIndex > RS_CLIENT_HANDLE_DECODE_MASK) + { + // We will override the client base, loop over + clientHandleIndex = 0; + } + if (clientHandleBucketInit == (clientHandleIndex & RS_CLIENT_HANDLE_BUCKET_MASK)) + { + // We looked through all buckets and we did not find any available client (very unlikely) + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto _serverCreateEntryAndLockForNewClient_exit; + } + } + while (_serverFindNextAvailableClientHandleInBucket(pServer, hClient, &hClient, &pClientNext) != NV_OK); + + pServer->clientCurrentHandleIndex = clientHandleIndex; + } + else + { + NvHandle hClientOut = 0; + +#if !(RS_COMPATABILITY_MODE) + // Re-encode handle so it matches expected format + NvU32 clientIndex = CLIENT_DECODEHANDLE(hClient); + hClient = CLIENT_ENCODEHANDLE(handleBase, clientIndex); +#endif + + if (_serverFindClientEntryByHandle(pServer, hClient, + CLIENT_PARTIALLY_INITIALIZED | CLIENT_PENDING_FREE, + CLIENT_LIST_LOCK_LOCKED, NULL)) + { + // The handle already exists + status = NV_ERR_INSERT_DUPLICATE_NAME; + goto _serverCreateEntryAndLockForNewClient_exit; + } + + status = _serverFindNextAvailableClientHandleInBucket(pServer, hClient, &hClientOut, &pClientNext); + if (status != NV_OK) + goto _serverCreateEntryAndLockForNewClient_exit; + + if (hClient != hClientOut) + { + // This should not happen as we checked for duplicates already + NV_PRINTF(LEVEL_ERROR, "Client handle mismatch: %x != %x.\n", hClient, hClientOut); + status = NV_ERR_INVALID_STATE; + goto _serverCreateEntryAndLockForNewClient_exit; + } + } + + // At this point we have a hClient, we know in which bucket and where in the bucket to insert the entry. + pClientEntry->hClient = hClient; + pClientEntry->pLock = pLock; + pClientEntry->refCount = 1; + pClientEntry->bPendingFree = NV_FALSE; + + RS_LOCK_VALIDATOR_INIT(&pClientEntry->lockVal, + bInternalHandle ? LOCK_VAL_LOCK_CLASS_CLIENT_INTERNAL : LOCK_VAL_LOCK_CLASS_CLIENT, + hClient); + + status = _serverInsertClientEntry(pServer, pClientEntry, pClientNext); + if (status != NV_OK) + goto _serverCreateEntryAndLockForNewClient_exit; + + // + // Increase the reference count so this CLIENT_ENTRY can't be freed until we're + // done using it. + // + _serverGetClientEntry(pClientEntry); + + // Release client list lock + serverReleaseClientListLock(pServer); + bLockedClientList = NV_FALSE; + + // + // Acquire the client lock here. Nothing else should have acquired it since + // pClientEntry->pClient is still NULL. + // + RS_RWLOCK_ACQUIRE_WRITE(pClientEntry->pLock, &pClientEntry->lockVal); + pClientEntry->lockOwnerTid = portThreadGetCurrentThreadId(); + + *phClient = hClient; + *ppClientEntry = pClientEntry; + +_serverCreateEntryAndLockForNewClient_exit: + if (bLockedClientList) + serverReleaseClientListLock(pServer); + + if (status != NV_OK) + { + if (pClientEntry != NULL) + PORT_FREE(pServer->pAllocator, pClientEntry); + + if (pLock != NULL) + portSyncRwLockDestroy(pLock); + } + + return status; +} + +static +void +_serverLockClient +( + LOCK_ACCESS_TYPE access, + CLIENT_ENTRY* pClientEntry +) +{ + if (access == LOCK_ACCESS_READ) + { + RS_RWLOCK_ACQUIRE_READ(pClientEntry->pLock, &pClientEntry->lockVal); + portAtomicIncrementU32(&pClientEntry->lockReadOwnerCnt); + } + else + { + RS_RWLOCK_ACQUIRE_WRITE(pClientEntry->pLock, &pClientEntry->lockVal); + pClientEntry->lockOwnerTid = portThreadGetCurrentThreadId(); + } +} + +static +NV_STATUS +_serverLockClientWithLockInfo +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + NvHandle hClient, + NvBool bValidateLocks, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags, + CLIENT_ENTRY **ppClientEntry +) +{ + NV_STATUS status = NV_OK; + if ((pLockInfo->flags & RS_LOCK_FLAGS_NO_CLIENT_LOCK) || + serverAllClientsLockIsOwner(pServer)) + { + if (!_serverGetClientEntryByHandle(pServer, hClient, 0, + CLIENT_LIST_LOCK_UNLOCKED, ppClientEntry)) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + goto check_locks; + } + + if ((pLockInfo->state & RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED)) + { + NV_ASSERT_OR_RETURN(_serverGetClientEntryByHandle(pServer, hClient, 0, + CLIENT_LIST_LOCK_UNLOCKED, ppClientEntry), + NV_ERR_INVALID_OBJECT_HANDLE); + NV_ASSERT_OR_ELSE(pLockInfo->pClient != NULL, + status = NV_ERR_INVALID_STATE; goto done); + NV_ASSERT_OR_ELSE(pLockInfo->pClient == (*ppClientEntry)->pClient, + status = NV_ERR_INVALID_STATE; goto done); + NV_ASSERT_OR_ELSE((*ppClientEntry)->lockOwnerTid == + portThreadGetCurrentThreadId(), + status = NV_ERR_INVALID_STATE; goto done); + + goto check_locks; + } + + if (!_serverGetAndLockClientEntryByHandle(pServer, hClient, access, ppClientEntry)) + return NV_ERR_INVALID_OBJECT_HANDLE; + + pLockInfo->state |= RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED; + pLockInfo->pClient = (*ppClientEntry)->pClient; + *pReleaseFlags |= RS_LOCK_RELEASE_CLIENT_LOCK; + +check_locks: + if (bValidateLocks) + status = clientValidateLocks((*ppClientEntry)->pClient, pServer, *ppClientEntry); + +done: + if (status != NV_OK) + { + if (*pReleaseFlags & RS_LOCK_RELEASE_CLIENT_LOCK) + { + _serverUnlockClientWithLockInfo(pServer, access, *ppClientEntry, + pLockInfo, pReleaseFlags); + } + else if (*ppClientEntry != NULL) + { + _serverPutClientEntry(pServer, *ppClientEntry); + *ppClientEntry = NULL; + } + } + + return status; +} + +static +NV_STATUS +_serverLockDualClientWithLockInfo +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + NvHandle hClient1, + NvHandle hClient2, + NvBool bValidateLocks, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags, + CLIENT_ENTRY **ppClientEntry1, + CLIENT_ENTRY **ppClientEntry2 +) +{ + NV_STATUS status = NV_OK; + + // 1st and 2nd in handle order, as opposed to fixed 1 and 2 + NvHandle hClient1st; + NvHandle hClient2nd; + CLIENT_ENTRY **ppClientEntry1st; + CLIENT_ENTRY **ppClientEntry2nd; + + *ppClientEntry1 = NULL; + *ppClientEntry2 = NULL; + + if ((pLockInfo->flags & RS_LOCK_FLAGS_NO_CLIENT_LOCK) || + serverAllClientsLockIsOwner(pServer)) + { + ppClientEntry1st = ppClientEntry1; + ppClientEntry2nd = ppClientEntry2; + + if (!_serverGetClientEntryByHandle(pServer, hClient1, 0, + CLIENT_LIST_LOCK_UNLOCKED, ppClientEntry1st)) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + if (hClient1 == hClient2) + { + *ppClientEntry2nd = *ppClientEntry1st; + } + else + { + NV_ASSERT_OR_ELSE(_serverGetClientEntryByHandle(pServer, hClient2, 0, + CLIENT_LIST_LOCK_UNLOCKED, ppClientEntry2nd), + status = NV_ERR_INVALID_OBJECT_HANDLE; goto done); + } + + goto check_locks; + } + + if (hClient1 <= hClient2) + { + hClient1st = hClient1; + ppClientEntry1st = ppClientEntry1; + + hClient2nd = hClient2; + ppClientEntry2nd = ppClientEntry2; + } + else + { + hClient1st = hClient2; + ppClientEntry1st = ppClientEntry2; + + hClient2nd = hClient1; + ppClientEntry2nd = ppClientEntry1; + } + + if ((pLockInfo->state & RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED)) + { + NV_ASSERT_OR_RETURN(pLockInfo->pSecondClient != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pLockInfo->pClient->hClient == hClient1st, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pLockInfo->pSecondClient->hClient == hClient2nd, NV_ERR_INVALID_STATE); + + NV_ASSERT_OR_ELSE(_serverGetClientEntryByHandle(pServer, hClient1st, 0, + CLIENT_LIST_LOCK_UNLOCKED, ppClientEntry1st), + status = NV_ERR_INVALID_OBJECT_HANDLE; goto done); + NV_ASSERT_OR_ELSE((*ppClientEntry1st)->pClient == pLockInfo->pClient, + status = NV_ERR_INVALID_STATE; goto done); + NV_ASSERT_OR_ELSE((*ppClientEntry1st)->lockOwnerTid == + portThreadGetCurrentThreadId(), + status = NV_ERR_INVALID_STATE; goto done); + + if (hClient1st == hClient2nd) + { + *ppClientEntry2nd = *ppClientEntry1st; + } + else + { + NV_ASSERT_OR_ELSE(_serverGetClientEntryByHandle(pServer, hClient2nd, 0, + CLIENT_LIST_LOCK_UNLOCKED, ppClientEntry2nd), + status = NV_ERR_INVALID_OBJECT_HANDLE; goto done); + } + + NV_ASSERT_OR_ELSE((*ppClientEntry2nd)->pClient == pLockInfo->pSecondClient, + status = NV_ERR_INVALID_STATE; goto done); + NV_ASSERT_OR_ELSE( + (*ppClientEntry2nd)->lockOwnerTid == (*ppClientEntry1st)->lockOwnerTid, + status = NV_ERR_INVALID_STATE; goto done); + + goto check_locks; + } + + if (!_serverGetAndLockClientEntryByHandle(pServer, hClient1st, access, + ppClientEntry1st)) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + if (hClient1 == hClient2) + *ppClientEntry2nd = *ppClientEntry1st; + else + { + if (!_serverGetAndLockClientEntryByHandle(pServer, hClient2nd, access, + ppClientEntry2nd)) + { + _serverUnlockClient(access, *ppClientEntry1st); + status = NV_ERR_INVALID_OBJECT_HANDLE; + goto done; + } + } + + pLockInfo->state |= RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED; + pLockInfo->pClient = (*ppClientEntry1st)->pClient; + pLockInfo->pSecondClient = (*ppClientEntry2nd)->pClient; + *pReleaseFlags |= RS_LOCK_RELEASE_CLIENT_LOCK; + +check_locks: + if (bValidateLocks) + { + status = clientValidateLocks((*ppClientEntry1st)->pClient, pServer, + *ppClientEntry1st); + + if ((status == NV_OK) && (hClient1 != hClient2)) + { + status = clientValidateLocks((*ppClientEntry2nd)->pClient, pServer, + *ppClientEntry2nd); + } + } + +done: + + if (status != NV_OK) + { + if (*pReleaseFlags & RS_LOCK_RELEASE_CLIENT_LOCK) + { + _serverUnlockDualClientWithLockInfo(pServer, access, *ppClientEntry1st, + *ppClientEntry2nd, pLockInfo, pReleaseFlags); + } + else + { + if (*ppClientEntry1st != NULL) + { + _serverPutClientEntry(pServer, *ppClientEntry1st); + *ppClientEntry1st = NULL; + } + + if ((*ppClientEntry2nd != NULL) && (*ppClientEntry2nd != *ppClientEntry1st)) + { + _serverPutClientEntry(pServer, *ppClientEntry2nd); + *ppClientEntry2nd = NULL; + } + } + } + + return status; +} + +static +NV_STATUS +_serverLockAllClientsWithLockInfo +( + RsServer *pServer, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + if (!serverAllClientsLockIsOwner(pServer)) + { + NV_STATUS status = serverLockAllClients(pServer); + if (status != NV_OK) + return status; + + *pReleaseFlags |= RS_LOCK_RELEASE_CLIENT_LOCK; + } + + return NV_OK; +} + +static +void +_serverUnlockClient +( + LOCK_ACCESS_TYPE access, + CLIENT_ENTRY* pClientEntry +) +{ + if (access == LOCK_ACCESS_READ) + { + portAtomicDecrementU32(&pClientEntry->lockReadOwnerCnt); + RS_RWLOCK_RELEASE_READ(pClientEntry->pLock, &pClientEntry->lockVal); + } + else + { + pClientEntry->lockOwnerTid = ~0; + RS_RWLOCK_RELEASE_WRITE(pClientEntry->pLock, &pClientEntry->lockVal); + } +} + +NvBool +serverIsClientLockedForRead +( + CLIENT_ENTRY* pClientEntry +) +{ + NV_ASSERT_OR_RETURN(pClientEntry != NULL, NV_FALSE); + return pClientEntry->lockReadOwnerCnt != 0; +} + +static +void +_serverUnlockClientWithLockInfo +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + CLIENT_ENTRY *pClientEntry, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + if (*pReleaseFlags & RS_LOCK_RELEASE_CLIENT_LOCK) + { + _serverUnlockClient(access, pClientEntry); + + pLockInfo->state &= ~RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED; + pLockInfo->pClient = NULL; + *pReleaseFlags &= ~RS_LOCK_RELEASE_CLIENT_LOCK; + } + + _serverPutClientEntry(pServer, pClientEntry); +} + +static +void +_serverUnlockDualClientWithLockInfo +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + CLIENT_ENTRY *pClientEntry1, + CLIENT_ENTRY *pClientEntry2, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + if (*pReleaseFlags & RS_LOCK_RELEASE_CLIENT_LOCK) + { + // 1st and 2nd in handle order, as opposed to fixed 1 and 2 + CLIENT_ENTRY *pClientEntry1st; + CLIENT_ENTRY *pClientEntry2nd; + + if (pClientEntry1->pClient->hClient <= pClientEntry2->pClient->hClient) + { + pClientEntry1st = pClientEntry1; + pClientEntry2nd = pClientEntry2; + } + else + { + pClientEntry1st = pClientEntry2; + pClientEntry2nd = pClientEntry1; + } + + _serverUnlockClient(access, pClientEntry2nd); + if (pClientEntry1->pClient->hClient != pClientEntry2->pClient->hClient) + _serverUnlockClient(access, pClientEntry1st); + + pLockInfo->state &= ~RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED; + pLockInfo->pClient = NULL; + pLockInfo->pSecondClient = NULL; + *pReleaseFlags &= ~RS_LOCK_RELEASE_CLIENT_LOCK; + } + + _serverPutClientEntry(pServer, pClientEntry1); + + if (pClientEntry1 != pClientEntry2) + _serverPutClientEntry(pServer, pClientEntry2); +} + +NV_STATUS +_serverUnlockAllClientsWithLockInfo +( + RsServer *pServer, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + if (*pReleaseFlags & RS_LOCK_RELEASE_CLIENT_LOCK) + { + NV_STATUS status = serverUnlockAllClients(pServer); + if (status != NV_OK) + return status; + + *pReleaseFlags &= ~RS_LOCK_RELEASE_CLIENT_LOCK; + } + + return NV_OK; +} + +NvU32 +serverGetClientCount(RsServer *pServer) +{ + return pServer->activeClientCount; +} + +NvU64 +serverGetResourceCount(RsServer *pServer) +{ + return pServer->activeResourceCount; +} + +NV_STATUS +resservSwapTlsCallContext +( + CALL_CONTEXT **ppOldCallContext, + CALL_CONTEXT *pNewCallContext +) +{ + CALL_CONTEXT **ppTlsCallContext; + + if (ppOldCallContext == NULL) + return NV_ERR_INVALID_ARGUMENT; + + ppTlsCallContext = (CALL_CONTEXT**)tlsEntryAcquire(TLS_ENTRY_ID_RESSERV_CALL_CONTEXT); + if (ppTlsCallContext == NULL) + return NV_ERR_INVALID_STATE; + + *ppOldCallContext = *ppTlsCallContext; + *ppTlsCallContext = pNewCallContext; + + // New call contexts inherit the bDeferredApi flag from the old + if ((*ppOldCallContext != NULL) && (pNewCallContext != NULL) && + (pNewCallContext->pControlParams != NULL) && + ((*ppOldCallContext)->pControlParams != NULL)) + { + pNewCallContext->pControlParams->bDeferredApi |= + (*ppOldCallContext)->pControlParams->bDeferredApi; + } + + return NV_OK; +} + +CALL_CONTEXT * +resservGetTlsCallContext(void) +{ + CALL_CONTEXT *pTlsCallContext = NvP64_VALUE(tlsEntryGet(TLS_ENTRY_ID_RESSERV_CALL_CONTEXT)); + return pTlsCallContext; +} + +NV_STATUS +resservRestoreTlsCallContext +( + CALL_CONTEXT *pOldCallContext +) +{ + CALL_CONTEXT **ppTlsCallContext = (CALL_CONTEXT**)tlsEntryAcquire(TLS_ENTRY_ID_RESSERV_CALL_CONTEXT); + if (ppTlsCallContext == NULL) + return NV_ERR_INVALID_ARGUMENT; + + *ppTlsCallContext = pOldCallContext; + tlsEntryRelease(TLS_ENTRY_ID_RESSERV_CALL_CONTEXT); + tlsEntryRelease(TLS_ENTRY_ID_RESSERV_CALL_CONTEXT); + + return NV_OK; +} + +RsResourceRef * +resservGetContextRefByType(NvU32 internalClassId, NvBool bSearchAncestors) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RsResourceRef *pContextRef = NULL; + + if (pCallContext == NULL) + return NULL; + + if (pCallContext->pResourceRef != NULL) + { + if (pCallContext->pResourceRef->internalClassId == internalClassId) + { + return pCallContext->pResourceRef; + } + else if (bSearchAncestors && + (refFindAncestorOfType(pCallContext->pResourceRef, internalClassId, &pContextRef) == NV_OK)) + { + return pContextRef; + } + } + + if (pCallContext->pContextRef != NULL) + { + if (pCallContext->pContextRef->internalClassId == internalClassId) + { + return pCallContext->pContextRef; + } + else if (bSearchAncestors && + (refFindAncestorOfType(pCallContext->pContextRef, internalClassId, &pContextRef) == NV_OK)) + { + return pContextRef; + } + } + + return NULL; +} + +NV_STATUS serverFreeClient(RsServer *pServer, RS_CLIENT_FREE_PARAMS* pParams) +{ + RS_RES_FREE_PARAMS params; + RS_LOCK_INFO lockInfo; + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + portMemSet(¶ms, 0, sizeof(params)); + + lockInfo.state = pParams->state; + lockInfo.flags = RS_LOCK_FLAGS_LOW_PRIORITY; + params.pLockInfo = &lockInfo; + params.hClient = pParams->hClient; + params.hResource = pParams->hClient; + params.bHiPriOnly = pParams->bHiPriOnly; + params.bDisableOnly = pParams->bDisableOnly; + params.pSecInfo = pParams->pSecInfo; + + return serverFreeResourceTree(pServer, ¶ms); +} + +NV_STATUS +shrConstruct_IMPL +( + RsShared *pShare +) +{ + return NV_OK; +} + +void +shrDestruct_IMPL +( + RsShared *pShare +) +{ +} + +NV_STATUS +sessionConstruct_IMPL +( + RsSession *pSession +) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + pSession->bValid = NV_TRUE; + listInit(&pSession->dependencies, pCallContext->pServer->pAllocator); + listInit(&pSession->dependants, pCallContext->pServer->pAllocator); + pSession->pLock = portSyncRwLockCreate(pCallContext->pServer->pAllocator); + + RS_LOCK_VALIDATOR_INIT(&pSession->lockVal, LOCK_VAL_LOCK_CLASS_SESSION, LOCK_VAL_LOCK_GENERATE); + return NV_OK; +} + +void +sessionDestruct_IMPL +( + RsSession *pSession +) +{ + NV_ASSERT(listCount(&pSession->dependencies) == 0); + NV_ASSERT(listCount(&pSession->dependants) == 0); + listDestroy(&pSession->dependencies); + listDestroy(&pSession->dependants); + pSession->pLock = NULL; +} + +NV_STATUS +sessionAddDependant_IMPL +( + RsSession *pSession, + RsResourceRef *pResourceRef +) +{ + NV_STATUS status; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + status = sessionCheckLocksForAdd(pSession, pResourceRef); + + if (status != NV_OK) + return status; + + if (pResourceRef->pSession == pSession) + return NV_OK; + + NV_ASSERT_OR_RETURN(pResourceRef->pSession == NULL, NV_ERR_INVALID_ARGUMENT); + + if (listAppendValue(&pSession->dependants, &pResourceRef) == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + serverRefShare(pCallContext->pServer, staticCast(pSession, RsShared)); + + pResourceRef->pSession = pSession; + + return NV_OK; +} + +NV_STATUS +sessionAddDependency_IMPL +( + RsSession *pSession, + RsResourceRef *pResourceRef +) +{ + NV_STATUS status; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + status = sessionCheckLocksForAdd(pSession, pResourceRef); + + if (status != NV_OK) + return status; + + if (pResourceRef->pDependantSession == pSession) + return NV_OK; + + NV_ASSERT_OR_RETURN(pResourceRef->pDependantSession == NULL, NV_ERR_INVALID_ARGUMENT); + + if (listAppendValue(&pSession->dependencies, &pResourceRef) == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + serverRefShare(pCallContext->pServer, staticCast(pSession, RsShared)); + + pResourceRef->pDependantSession = pSession; + + return NV_OK; +} + +void +sessionRemoveDependant_IMPL +( + RsSession *pSession, + RsResourceRef *pResourceRef +) +{ + listRemoveFirstByValue(&pSession->dependants, &pResourceRef); + sessionCheckLocksForRemove(pSession, pResourceRef); + pResourceRef->pSession = NULL; +} + +void +sessionRemoveDependency_IMPL +( + RsSession *pSession, + RsResourceRef *pResourceRef +) +{ + listRemoveFirstByValue(&pSession->dependencies, &pResourceRef); + pSession->bValid = NV_FALSE; + sessionCheckLocksForRemove(pSession, pResourceRef); + pResourceRef->pDependantSession = NULL; +} + +NV_STATUS sessionCheckLocksForAdd_IMPL(RsSession *pSession, RsResourceRef *pResourceRef) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RS_LOCK_INFO *pLockInfo; + + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + pLockInfo = pCallContext->pLockInfo; + + NV_ASSERT_OR_RETURN((pLockInfo != NULL), NV_ERR_INVALID_STATE); + + if (!serverRwApiLockIsOwner(pCallContext->pServer)) + { + // Assert clients locked or RW lock + if (pLockInfo->state & RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED) + { + NV_ASSERT_OR_RETURN((pLockInfo->pClient == pResourceRef->pClient) || + (pLockInfo->pSecondClient == pResourceRef->pClient), + NV_ERR_INVALID_ARGUMENT); + } + else if (pLockInfo->state & RS_LOCK_STATE_TOP_LOCK_ACQUIRED) + { + NV_ASSERT_OR_RETURN((pLockInfo->pClient == NULL) && (pLockInfo->pSecondClient == NULL), NV_ERR_INVALID_ARGUMENT); + } + else + { + NV_ASSERT_FAILED("Incorrect locks taken"); + return NV_ERR_INVALID_LOCK_STATE; + } + } + + return NV_OK; +} + +void sessionCheckLocksForRemove_IMPL(RsSession *pSession, RsResourceRef *pResourceRef) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RS_LOCK_INFO *pLockInfo; + + NV_ASSERT(pCallContext != NULL); + pLockInfo = pCallContext->pLockInfo; + + NV_ASSERT(pLockInfo != NULL); + + if (pLockInfo->flags & RS_LOCK_FLAGS_FREE_SESSION_LOCK) + { + RsShared *pShared = staticCast(pSession, RsShared); + PORT_RWLOCK *pSessionLock = pSession->pLock; + NvBool bDestroy = (pShared->refCount == 1); + + if (!(pLockInfo->state & RS_LOCK_STATE_SESSION_LOCK_ACQUIRED) || !bDestroy) + { + serverFreeShare(pCallContext->pServer, pShared); + pLockInfo->flags &= ~RS_LOCK_FLAGS_FREE_SESSION_LOCK; + } + + if (!(pLockInfo->state & RS_LOCK_STATE_SESSION_LOCK_ACQUIRED) && bDestroy) + portSyncRwLockDestroy(pSessionLock); + } +} + +NV_STATUS +serverAllocShareWithHalspecParent +( + RsServer *pServer, + const NVOC_CLASS_INFO *pClassInfo, + RsShared **ppShare, + Object *pHalspecParent +) +{ + RsShared *pShare; + NV_STATUS status; + Dynamic *pDynamic = NULL; + NvU32 flags = NVOC_OBJ_CREATE_FLAGS_NONE; + + if (pClassInfo == NULL) + return NV_ERR_INVALID_CLASS; + + if (pHalspecParent != NULL) + flags |= NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY; + + status = objCreateDynamicWithFlags(&pDynamic, + pHalspecParent, + (const NVOC_CLASS_INFO*)(const void*)pClassInfo, + flags); + if (status != NV_OK) + return status; + + if (pDynamic == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + pShare = dynamicCast(pDynamic, RsShared); + if (pShare == NULL) + { + status = NV_ERR_INVALID_CLASS; + goto fail; + } + + pShare->refCount = 1; + + portSyncSpinlockAcquire(pServer->pShareMapLock); + if (mapInsertExisting(&pServer->shareMap, (NvUPtr)pShare, pShare) != NV_TRUE) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + portSyncSpinlockRelease(pServer->pShareMapLock); + goto fail; + } + portSyncSpinlockRelease(pServer->pShareMapLock); + + if (ppShare != NULL) + *ppShare = pShare; + + return NV_OK; + +fail: + objDelete(pShare); + + return status; +} + +NV_STATUS +serverAllocShare +( + RsServer *pServer, + const NVOC_CLASS_INFO *pClassInfo, + RsShared **ppShare +) +{ + return serverAllocShareWithHalspecParent(pServer, pClassInfo, ppShare, NULL); +} + +NvS32 +serverGetShareRefCount +( + RsServer *pServer, + RsShared *pShare +) +{ + return pShare->refCount; +} + +NV_STATUS +serverRefShare +( + RsServer *pServer, + RsShared *pShare +) +{ + portAtomicIncrementS32(&pShare->refCount); + return NV_OK; +} + +NV_STATUS +serverFreeShare +( + RsServer *pServer, + RsShared *pShare +) +{ + if (portAtomicDecrementS32(&pShare->refCount) == 0) + { + portSyncSpinlockAcquire(pServer->pShareMapLock); + mapRemove(&pServer->shareMap, pShare); + portSyncSpinlockRelease(pServer->pShareMapLock); + + objDelete(pShare); + } + return NV_OK; +} + +RS_SHARE_ITERATOR +serverShareIter +( + RsServer *pServer, + NvU32 internalClassId +) +{ + RS_SHARE_ITERATOR it; + portMemSet(&it, 0, sizeof(it)); + it.internalClassId = internalClassId; + it.mapIt = mapIterAll(&pServer->shareMap); + + return it; +} + +NvBool +serverShareIterNext +( + RS_SHARE_ITERATOR* pIt +) +{ + NvBool bLoop = NV_TRUE; + if (pIt == NULL) + return NV_FALSE; + + pIt->pShared = NULL; + bLoop = mapIterNext(&pIt->mapIt); + while(bLoop) + { + RsShared *pShared = pIt->mapIt.pValue; + if ((pIt->internalClassId == 0) || (objDynamicCastById(pShared, pIt->internalClassId) != NULL)) + { + pIt->pShared = pShared; + return NV_TRUE; + } + bLoop = mapIterNext(&pIt->mapIt); + } + + return NV_FALSE; +} + +#if RS_STANDALONE +NV_STATUS +serverSerializeCtrlDown +( + CALL_CONTEXT *pCallContext, + NvU32 cmd, + void **ppParams, + NvU32 *pParamsSize, + NvU32 *flags +) +{ + return NV_OK; +} + +NV_STATUS +serverDeserializeCtrlDown +( + CALL_CONTEXT *pCallContext, + NvU32 cmd, + void **ppParams, + NvU32 *pParamsSize, + NvU32 *flags +) +{ + return NV_OK; +} + +NV_STATUS +serverSerializeCtrlUp +( + CALL_CONTEXT *pCallContext, + NvU32 cmd, + void **ppParams, + NvU32 *pParamsSize, + NvU32 *flags +) +{ + return NV_OK; +} + +NV_STATUS +serverDeserializeCtrlUp +( + CALL_CONTEXT *pCallContext, + NvU32 cmd, + void **ppParams, + NvU32 *pParamsSize, + NvU32 *flags +) +{ + return NV_OK; +} + +NV_STATUS +serverSerializeAllocDown +( + CALL_CONTEXT *pCallContext, + NvU32 classId, + void **ppParams, + NvU32 *pParamsSize, + NvU32 *flags +) +{ + return NV_OK; +} + +NV_STATUS +serverDeserializeAllocDown +( + CALL_CONTEXT *pCallContext, + NvU32 classId, + void **ppParams, + NvU32 *pParamsSize, + NvU32 *flags +) +{ + return NV_OK; +} + +NV_STATUS +serverSerializeAllocUp +( + CALL_CONTEXT *pCallContext, + NvU32 classId, + void **ppParams, + NvU32 *pParamsSize, + NvU32 *flags +) +{ + return NV_OK; +} + +NV_STATUS +serverDeserializeAllocUp +( + CALL_CONTEXT *pCallContext, + NvU32 classId, + void **pParams, + NvU32 *pParamsSize, + NvU32 *flags +) +{ + return NV_OK; +} + +void +serverFreeSerializeStructures +( + CALL_CONTEXT *pCallContext, + void *pParams +) +{ +} +#endif // RS_STANDALONE + +void +serverDisableReserializeControl +( + CALL_CONTEXT *pCallContext +) +{ + NV_CHECK_OR_RETURN_VOID(LEVEL_INFO, pCallContext != NULL); + pCallContext->bReserialize = NV_FALSE; +} + +#if (RS_PROVIDES_API_STATE) +NV_STATUS +serverAllocApiCopyIn +( + RsServer *pServer, + RS_RES_ALLOC_PARAMS_INTERNAL *pAllocParams, + API_STATE **ppApiState +) +{ + if (ppApiState != NULL) + *ppApiState = NULL; + + return NV_OK; +} + +NV_STATUS +serverAllocApiCopyOut +( + RsServer *pServer, + NV_STATUS status, + API_STATE *pApiState +) +{ + return status; +} +#endif + +void +serverAcquireClientListLock +( + RsServer *pServer +) +{ + RS_SPINLOCK_ACQUIRE(pServer->pClientListLock, &pServer->clientListLockVal); +} + +void +serverReleaseClientListLock +( + RsServer *pServer +) +{ + RS_SPINLOCK_RELEASE(pServer->pClientListLock, &pServer->clientListLockVal); +} + +#if (RS_STANDALONE) +NV_STATUS +serverAllocEpilogue_WAR +( + RsServer *pServer, + NV_STATUS status, + NvBool bClientAlloc, + RS_RES_ALLOC_PARAMS_INTERNAL *pAllocParams +) +{ + return status; +} + +NV_STATUS +serverAllocLookupSecondClient +( + NvU32 externalClassId, + void *pAllocParams, + NvHandle *phSecondClient +) +{ + *phSecondClient = 0; + return NV_OK; +} + +NV_STATUS +serverControlLookupSecondClient +( + NvU32 cmd, + void *pControlParams, + RS_CONTROL_COOKIE *pCookie, + NvHandle *phSecondClient +) +{ + *phSecondClient = 0; + return NV_OK; +} + +NV_STATUS serverTopLock_Prologue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + if ((pLockInfo->flags & RS_LOCK_FLAGS_NO_TOP_LOCK)) + return NV_OK; + + if (!(pLockInfo->state & RS_LOCK_STATE_TOP_LOCK_ACQUIRED)) + { + if (access == LOCK_ACCESS_READ) + { + RS_RWLOCK_ACQUIRE_READ(pServer->pTopLock, &pServer->topLockVal); + } + else + { + RS_RWLOCK_ACQUIRE_WRITE(pServer->pTopLock, &pServer->topLockVal); + pServer->topLockOwnerTid = portThreadGetCurrentThreadId(); + } + + pLockInfo->state |= RS_LOCK_STATE_TOP_LOCK_ACQUIRED; + *pReleaseFlags |= RS_LOCK_RELEASE_TOP_LOCK; + } + else if (access == LOCK_ACCESS_WRITE) + { + NV_ASSERT_OR_RETURN(pServer->topLockOwnerTid == portThreadGetCurrentThreadId(), + NV_ERR_INVALID_LOCK_STATE); + } + + return NV_OK; +} + +void +serverTopLock_Epilogue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + if (*pReleaseFlags & RS_LOCK_RELEASE_TOP_LOCK) + { + if (access == LOCK_ACCESS_READ) + RS_RWLOCK_RELEASE_READ(pServer->pTopLock, &pServer->topLockVal); + else + { + pServer->topLockOwnerTid = ~0; + RS_RWLOCK_RELEASE_WRITE(pServer->pTopLock, &pServer->topLockVal); + } + + pLockInfo->state &= ~RS_LOCK_STATE_TOP_LOCK_ACQUIRED; + *pReleaseFlags &= ~RS_LOCK_RELEASE_TOP_LOCK; + } +} + +NV_STATUS +serverResLock_Prologue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags, + NvU32 gpuMask +) +{ + if (!(pLockInfo->state & RS_LOCK_STATE_CUSTOM_LOCK_1_ACQUIRED)) + { + if (access == LOCK_ACCESS_READ) + RS_RWLOCK_ACQUIRE_READ(pServer->pResLock, &pServer->resLockVal); + else + RS_RWLOCK_ACQUIRE_WRITE(pServer->pResLock, &pServer->resLockVal); + + pLockInfo->state |= RS_LOCK_STATE_CUSTOM_LOCK_1_ACQUIRED; + *pReleaseFlags |= RS_LOCK_RELEASE_CUSTOM_LOCK_1; + } + + return NV_OK; +} + +void +serverResLock_Epilogue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + if (*pReleaseFlags & RS_LOCK_RELEASE_CUSTOM_LOCK_1) + { + if (access == LOCK_ACCESS_READ) + RS_RWLOCK_RELEASE_READ(pServer->pResLock, &pServer->resLockVal); + else + RS_RWLOCK_RELEASE_WRITE(pServer->pResLock, &pServer->resLockVal); + + pLockInfo->state &= ~RS_LOCK_STATE_CUSTOM_LOCK_1_ACQUIRED; + *pReleaseFlags &= ~RS_LOCK_RELEASE_CUSTOM_LOCK_1; + } +} + +#if !(RS_STANDALONE_TEST) +NV_STATUS +serverMap_Prologue +( + RsServer *pServer, + RS_CPU_MAP_PARAMS *pMapParams +) +{ + return NV_OK; +} +#endif /* !RS_STANDALONE_TEST */ + +void +serverMap_Epilogue +( + RsServer *pServer, + RS_CPU_MAP_PARAMS *pMapParams +) +{ +} + +#if !(RS_STANDALONE_TEST) +NV_STATUS +serverUnmap_Prologue +( + RsServer *pServer, + RS_CPU_UNMAP_PARAMS *pUnmapParams +) +{ + return NV_OK; +} +#endif /* !RS_STANDALONE_TEST */ + +void +serverUnmap_Epilogue +( + RsServer *pServer, + RS_CPU_UNMAP_PARAMS *pUnmapParams +) +{ +} + +void +serverControl_InitCookie +( + const struct NVOC_EXPORTED_METHOD_DEF *pExportedEntry, + RS_CONTROL_COOKIE *pCookie +) +{ +} + +NV_STATUS +serverInterMap_Prologue +( + RsServer *pServer, + RsResourceRef *pMapperRef, + RsResourceRef *pMappableRef, + RS_INTER_MAP_PARAMS *pMapParams, + NvU32 *pReleaseFlags +) +{ + NV_STATUS status; + + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pMapParams->pLockInfo, pReleaseFlags, 0); + + return status; +} + +void +serverInterMap_Epilogue +( + RsServer *pServer, + RS_INTER_MAP_PARAMS *pMapParams, + NvU32 *pReleaseFlags +) +{ + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pMapParams->pLockInfo, pReleaseFlags); +} + +NV_STATUS +serverInterUnmap_Prologue +( + RsServer *pServer, + RS_INTER_UNMAP_PARAMS *pUnmapParams +) +{ + return NV_OK; +} + +void +serverInterUnmap_Epilogue +( + RsServer *pServer, + RS_INTER_UNMAP_PARAMS *pUnmapParams +) +{ +} + +NvBool +serverRwApiLockIsOwner +( + RsServer *pServer +) +{ + return (pServer->topLockOwnerTid == portThreadGetCurrentThreadId()); +} + +NV_STATUS +serverAllocResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + if (lock == RS_LOCK_TOP) + { + NvBool bClientAlloc = (pParams->externalClassId == NV01_ROOT || + pParams->externalClassId == NV01_ROOT_CLIENT || + pParams->externalClassId == NV01_ROOT_NON_PRIV); + + if (bClientAlloc) + { + *pAccess = LOCK_ACCESS_WRITE; + return NV_OK; + } + } + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_ALLOC_RESOURCE)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + + return NV_OK; +} + +NV_STATUS +serverFreeResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_FREE_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess, + NvBool *pbSupportForceROLock +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_FREE_RESOURCE)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + *pbSupportForceROLock = NV_TRUE; + + return NV_OK; +} + +NV_STATUS +serverCopyResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_DUP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_COPY)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +serverShareResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_SHARE_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_SHARE)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + + return NV_OK; +} + +#if !(RS_STANDALONE_TEST) +NV_STATUS +serverControlLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie, + LOCK_ACCESS_TYPE *pAccess +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_CTRL)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + + return NV_OK; +} +#endif + +NV_STATUS +serverControlLookupClientLockFlags +( + RS_CONTROL_COOKIE *pCookie, + enum CLIENT_LOCK_TYPE *pClientLockType +) +{ + *pClientLockType = CLIENT_LOCK_SPECIFIC; + return NV_OK; +} + +NV_STATUS +serverMapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_MAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +serverUnmapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_UNMAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + + return NV_OK; +} + +NV_STATUS +serverInterMapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_INTER_MAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + + return NV_OK; +} + +NV_STATUS +serverInterUnmapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_INTER_UNMAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + + return NV_OK; +} + +NV_STATUS +serverControl_ValidateCookie +( + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie +) +{ + return NV_OK; +} + +NV_STATUS +serverControlApiCopyIn +( + RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie +) +{ + return NV_OK; +} + +NV_STATUS +serverControlApiCopyOut +( + RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie, + NV_STATUS rmStatus +) +{ + return NV_OK; +} + +NV_STATUS +serverInitGlobalSharePolicies +( + RsServer *pServer +) +{ + return NV_OK; +} +#endif + +NV_STATUS +serverSessionLock_Prologue +( + LOCK_ACCESS_TYPE access, + RsResourceRef *pResourceRef, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + RsSession *pSession = pResourceRef->pSession; + RsSession *pDependantSession = pResourceRef->pDependantSession; + + if (!(pLockInfo->state & RS_LOCK_STATE_SESSION_LOCK_ACQUIRED)) + { + if (pSession != NULL) + { + if (access == LOCK_ACCESS_READ) + RS_RWLOCK_ACQUIRE_READ(pSession->pLock, &pSession->lockVal); + else + RS_RWLOCK_ACQUIRE_WRITE(pSession->pLock, &pSession->lockVal); + pLockInfo->state |= RS_LOCK_STATE_SESSION_LOCK_ACQUIRED; + *pReleaseFlags |= RS_LOCK_RELEASE_SESSION_LOCK; + + pLockInfo->pSession = pSession; + } + } + else + { + NV_ASSERT_OR_RETURN(pLockInfo->pSession == pSession, NV_ERR_INVALID_LOCK_STATE); + } + + if (!(pLockInfo->flags & RS_LOCK_FLAGS_NO_DEPENDANT_SESSION_LOCK) && + (pDependantSession != NULL)) + { + if (!(pLockInfo->state & RS_LOCK_STATE_SESSION_LOCK_ACQUIRED)) + { + // + // The only reason we lock the back reference session is if we're freeing the + // resource so take the write lock in all cases + // + RS_RWLOCK_ACQUIRE_WRITE(pDependantSession->pLock, &pDependantSession->lockVal); + + pLockInfo->state |= RS_LOCK_STATE_SESSION_LOCK_ACQUIRED; + *pReleaseFlags |= RS_LOCK_RELEASE_SESSION_LOCK; + + pLockInfo->pSession = pDependantSession; + } + else + { + // + // For now, don't allow a resource to be both depended on and depending on a + // session to keep this locking code simpler. We'll have to revisit if that + // becomes necessary. + // + NV_ASSERT_OR_RETURN(pLockInfo->pSession == pDependantSession, NV_ERR_INVALID_LOCK_STATE); + } + } + + pLockInfo->flags &= ~RS_LOCK_FLAGS_NO_DEPENDANT_SESSION_LOCK; + + return NV_OK; +} + +void +serverSessionLock_Epilogue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + RsSession *pSession = pLockInfo->pSession; + + if ((pSession != NULL) && (*pReleaseFlags & RS_LOCK_RELEASE_SESSION_LOCK)) + { + if (access == LOCK_ACCESS_READ) + RS_RWLOCK_RELEASE_READ(pSession->pLock, &pSession->lockVal); + else + RS_RWLOCK_RELEASE_WRITE(pSession->pLock, &pSession->lockVal); + + pLockInfo->state &= ~RS_LOCK_STATE_SESSION_LOCK_ACQUIRED; + *pReleaseFlags &= ~RS_LOCK_RELEASE_SESSION_LOCK; + + if (pLockInfo->flags & RS_LOCK_FLAGS_FREE_SESSION_LOCK) + { + RsShared *pShared = staticCast(pSession, RsShared); + PORT_RWLOCK *pSessionLock = pSession->pLock; + + serverFreeShare(pServer, pShared); + portSyncRwLockDestroy(pSessionLock); + } + + pLockInfo->pSession = NULL; + } + + pLockInfo->flags &= ~RS_LOCK_FLAGS_FREE_SESSION_LOCK; +} + +NV_STATUS serverControl_Prologue +( + RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess, + NvU32* pReleaseFlags +) +{ + NV_STATUS status; + RS_LOCK_INFO *pLockInfo = pParams->pLockInfo; + + status = serverControl_ValidateCookie(pParams, pParams->pCookie); + if (status != NV_OK) + return status; + + status = serverControlLookupLockFlags(pServer, RS_LOCK_RESOURCE, pParams, pParams->pCookie, pAccess); + if (status != NV_OK) + return status; + + if (!pServer->bUnlockedParamCopy) + { + status = serverControlApiCopyIn(pServer, pParams, pParams->pCookie); + if (status != NV_OK) + return status; + } + + pLockInfo->traceOp = RS_LOCK_TRACE_CTRL; + pLockInfo->traceClassId = pParams->cmd; + status = serverResLock_Prologue(pServer, *pAccess, pParams->pLockInfo, pReleaseFlags, 0); + if (status != NV_OK) + return status; + + return NV_OK; +} + +NV_STATUS +serverControl_Epilogue +( + RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE access, + NvU32 *pReleaseFlags, + NV_STATUS status +) +{ + serverResLock_Epilogue(pServer, access, pParams->pLockInfo, pReleaseFlags); + + if (!pServer->bUnlockedParamCopy) + { + status = serverControlApiCopyOut(pServer, pParams, pParams->pCookie, status); + } + + return status; +} + +NvBool +serverSupportsReadOnlyLock +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_API_ENUM api +) +{ + NV_ASSERT(api < RS_API_MAX); + if (lock == RS_LOCK_TOP) + { + return (!!(pServer->roTopLockApiMask & NVBIT(api))); + } + + return NV_FALSE; +} diff --git a/src/nvidia/src/libraries/tls/tls.c b/src/nvidia/src/libraries/tls/tls.c new file mode 100644 index 0000000..a8c2861 --- /dev/null +++ b/src/nvidia/src/libraries/tls/tls.c @@ -0,0 +1,673 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "tls/tls.h" +#include "containers/map.h" +#include "nvport/nvport.h" + +/// @todo Figure out which builds have upward stack. Looks like none? +#define STACK_GROWS_DOWNWARD 1 + + +/** + * @brief Entry which counts how many times some data in TLS has been referenced. + */ +typedef struct TlsEntry +{ + NvU32 refCount; + NvP64 pUserData; + MapNode node; +} TlsEntry; + +MAKE_INTRUSIVE_MAP(TlsEntryMap, TlsEntry, node); + +/** + * @brief Single thread's TLS information + */ +typedef struct ThreadEntry +{ + union { + NvU64 threadId; /// < For passive threads + NvU64 sp; /// < For ISR threads + } key; /// @todo Use node.key instead? + TlsEntryMap map; + MapNode node; +} ThreadEntry; + +MAKE_INTRUSIVE_MAP(ThreadEntryMap, ThreadEntry, node); + +/** + * @brief Stores all necessary data for TLS mechanism. + * + * @todo Use RW Spinlocks instead. Nice perf boost. + */ +typedef struct TlsDatabase +{ + /// @brief Allocator which allocates all necessary data for current @ref TlsDatabase. + PORT_MEM_ALLOCATOR *pAllocator; + /// @brief Last allocated entry id. + NvU64 lastEntryId; + + /// @brief Lock for the passive thread entry map + PORT_SPINLOCK *pLock; + /// @brief Map of thread entries of non ISR threads. + ThreadEntryMap threadEntries; + +#if TLS_ISR_CAN_USE_LOCK + /// @brief Lock which controls access to ISR-specific structures + PORT_SPINLOCK *pIsrLock; + /// @brief Map of thread entries of ISR threads. + ThreadEntryMap isrEntries; +#else +#if !defined(TLS_ISR_UNIT_TEST) +#define TLS_MAX_ISRS 64 +#else +#define TLS_MAX_ISRS 1024 +#endif + struct { + volatile NvU64 sp; + ThreadEntry *pThreadEntry; + } isrEntries[TLS_MAX_ISRS]; +#endif + +#if TLS_THREADS_CAN_RAISE_IRQL + /** + * @brief Number of ISRs / DPCs active on a given CPU. + * + * Every time an ISR starts, it increments this, and decrements on end. + * Since ISRs never get rescheduled, and passive threads will never preempt + * them, (isrCount[current_cpu] == 0) will be true IFF we're in ISR/DPC. + */ + NvU32 *isrCount; +#endif + + volatile NvU32 initCount; +} TlsDatabase; + +TlsDatabase tlsDatabase; // Zero initialized + +// Helper function prototypes +static NvBool _tlsIsIsr(void); +static ThreadEntry *_tlsThreadEntryGet(void); +static ThreadEntry *_tlsThreadEntryGetOrAlloc(void); +static NvP64 *_tlsEntryAcquire(ThreadEntry *pThreadEntry, NvU64 entryId, PORT_MEM_ALLOCATOR *pCustomAllocator); +static NvU32 _tlsEntryRelease(ThreadEntry *pThreadEntry, TlsEntry *pTlsEntry, PORT_MEM_ALLOCATOR *pCustomAllocator); +static NV_STATUS _tlsIsrEntriesInit(void); +static void _tlsIsrEntriesDestroy(void); +static void _tlsIsrEntriesInsert(ThreadEntry *pThreadEntry); +static ThreadEntry *_tlsIsrEntriesRemove(NvU64 sp); +static ThreadEntry *_tlsIsrEntriesFind(NvU64 approxSp); +static PORT_MEM_ALLOCATOR *_tlsIsrAllocatorGet(void); +static PORT_MEM_ALLOCATOR *_tlsAllocatorGet(void); + +#if TLS_THREADS_CAN_RAISE_IRQL +/// @todo move to NvPort (bug 1583359) +NvU32 osGetCurrentProcessorNumber(void); +#if defined(NVRM) +NvU32 osGetMaximumCoreCount(void); +#else +#define osGetMaximumCoreCount() 0x0 +#endif +#endif + +#if NVOS_IS_LIBOS +// +// On LibOS we have at most one passive thread (task_rm) and one ISR +// (task_interrupt) active at once (on same CPU core). Since these two will +// use different maps, we don't need to protect them with spinlocks. +// +#define TLS_SPINLOCK_ACQUIRE(x) +#define TLS_SPINLOCK_RELEASE(x) +#else +#define TLS_SPINLOCK_ACQUIRE(x) portSyncSpinlockAcquire(x) +#define TLS_SPINLOCK_RELEASE(x) portSyncSpinlockRelease(x) +#endif // NVOS_IS_LIBOS + +#if !PORT_IS_FUNC_SUPPORTED(portSyncExSafeToSleep) +#define portSyncExSafeToSleep() NV_TRUE +#endif + +#if !PORT_IS_FUNC_SUPPORTED(portMemExSafeForNonPagedAlloc) +#define portMemExSafeForNonPagedAlloc() NV_TRUE +#endif + +#if defined(TLS_PROFILING) +#include "tls_profiling.h" +#endif + + + + +NV_STATUS tlsInitialize(void) +{ + NV_STATUS status; + + if (portAtomicIncrementU32(&tlsDatabase.initCount) != 1) + { + return NV_OK; /// @todo Maybe return NV_WARN_NOTHING_TO_DO? + } + + status = portInitialize(); + if (status != NV_OK) + return status; + + tlsDatabase.pAllocator = portMemAllocatorCreateNonPaged(); + if (tlsDatabase.pAllocator == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + tlsDatabase.pLock = portSyncSpinlockCreate(tlsDatabase.pAllocator); + if (tlsDatabase.pLock == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto done; + } + mapInitIntrusive(&tlsDatabase.threadEntries); + + status = _tlsIsrEntriesInit(); + if (status != NV_OK) + goto done; + + tlsDatabase.lastEntryId = TLS_ENTRY_ID_DYNAMIC; + +#if TLS_THREADS_CAN_RAISE_IRQL +{ + NvU32 maxCoreCount = osGetMaximumCoreCount(); + if (maxCoreCount == 0) + maxCoreCount = 1; // MODS reports only 1 CPU at index 0. + + tlsDatabase.isrCount = PORT_ALLOC(tlsDatabase.pAllocator, maxCoreCount * sizeof(NvU32)); + if (tlsDatabase.isrCount == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + portMemSet(tlsDatabase.isrCount, 0, maxCoreCount * sizeof(NvU32)); +} +#endif // TLS_THREADS_CAN_RAISE_IRQL + +done: + if (status != NV_OK) + { + tlsShutdown(); + } + return status; +} + +void tlsShutdown(void) +{ + if (portAtomicDecrementU32(&tlsDatabase.initCount) != 0) + { + return; + } + +#if defined(TLS_PROFILING) + _tlsProfilePrint(); +#endif + + mapDestroy(&tlsDatabase.threadEntries); + if (tlsDatabase.pLock) + portSyncSpinlockDestroy(tlsDatabase.pLock); + + _tlsIsrEntriesDestroy(); + + if (tlsDatabase.pAllocator) + { +#if TLS_THREADS_CAN_RAISE_IRQL + PORT_FREE(tlsDatabase.pAllocator, tlsDatabase.isrCount); +#endif + portMemAllocatorRelease(tlsDatabase.pAllocator); + } + portMemSet(&tlsDatabase, 0, sizeof(tlsDatabase)); + portShutdown(); +} + +void tlsIsrInit(PORT_MEM_ALLOCATOR *pIsrAllocator) +{ + ThreadEntry *pThreadEntry; + NV_ASSERT_OR_RETURN_VOID(tlsDatabase.initCount > 0); + + // + // If TLS_THREADS_CAN_RAISE_IRQL we treat anything that calls tlsIsrInit as + // ISR, and cannot perform this check. Will be moved to ASSERT later. + // See CORERM-96 + // + if (!TLS_THREADS_CAN_RAISE_IRQL && !_tlsIsIsr()) + { + static NvBool bAlreadyPrinted = NV_FALSE; + if (!bAlreadyPrinted) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: Unnecessary tlsIsrInit() call at %p. Will stop reporting further violations.\n", + (void*)portUtilGetReturnAddress()); + bAlreadyPrinted = NV_TRUE; + } + return; + } + + pThreadEntry = PORT_ALLOC(pIsrAllocator, sizeof(*pThreadEntry)); + NV_ASSERT_OR_RETURN_VOID(pThreadEntry != NULL); + + pThreadEntry->key.sp = (NvU64)(NvUPtr)pIsrAllocator; + mapInitIntrusive(&pThreadEntry->map); + + _tlsIsrEntriesInsert(pThreadEntry); + +#if TLS_THREADS_CAN_RAISE_IRQL + portAtomicIncrementU32(&tlsDatabase.isrCount[osGetCurrentProcessorNumber()]); +#endif +} + +void tlsIsrDestroy(PORT_MEM_ALLOCATOR *pIsrAllocator) +{ + ThreadEntry *pThreadEntry; + NV_ASSERT_OR_RETURN_VOID(tlsDatabase.initCount > 0); + + if (!_tlsIsIsr()) + { + if (TLS_THREADS_CAN_RAISE_IRQL) + { + NV_PRINTF(LEVEL_ERROR, + "TLS: Calling tlsIsrDestroy() without accompanying tlsIsrInit at %p\n", + (void*)portUtilGetReturnAddress()); + } + return; + } + + pThreadEntry = _tlsIsrEntriesRemove((NvU64)(NvUPtr)pIsrAllocator); + + mapDestroy(&pThreadEntry->map); + PORT_FREE(pIsrAllocator, pThreadEntry); + +#if TLS_THREADS_CAN_RAISE_IRQL + portAtomicDecrementU32(&tlsDatabase.isrCount[osGetCurrentProcessorNumber()]); +#endif +} + +PORT_MEM_ALLOCATOR *tlsIsrAllocatorGet(void) +{ + + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, NULL); + return _tlsIsrAllocatorGet(); +} + +NvU64 tlsEntryAlloc(void) +{ + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, TLS_ERROR_VAL); + return portAtomicExIncrementU64(&tlsDatabase.lastEntryId); +} + +NvP64 *tlsEntryAcquire(NvU64 entryId) +{ + ThreadEntry *pThreadEntry; + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, NULL); + + // User tries allocation of unallocated entryId. + NV_ASSERT_OR_RETURN(entryId <= tlsDatabase.lastEntryId || + entryId >= TLS_ENTRY_ID_TAG_START, NULL); + + pThreadEntry = _tlsThreadEntryGetOrAlloc(); + NV_ASSERT_OR_RETURN(pThreadEntry != NULL, NULL); + + return _tlsEntryAcquire(pThreadEntry, entryId, NULL); +} + +NvP64 *tlsEntryAcquireWithAllocator(NvU64 entryId, PORT_MEM_ALLOCATOR *pCustomAllocator) +{ + ThreadEntry *pThreadEntry; + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, NULL); + + // User tries allocation of unallocated entryId. + NV_ASSERT_OR_RETURN(entryId <= tlsDatabase.lastEntryId || + entryId >= TLS_ENTRY_ID_TAG_START, NULL); + NV_ASSERT_OR_RETURN(pCustomAllocator != NULL, NULL); + + pThreadEntry = _tlsThreadEntryGetOrAlloc(); + NV_ASSERT_OR_RETURN(pThreadEntry != NULL, NULL); + + return _tlsEntryAcquire(pThreadEntry, entryId, pCustomAllocator); +} + +NvU32 tlsEntryRelease(NvU64 entryId) +{ + ThreadEntry *pThreadEntry; + TlsEntry *pTlsEntry; + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, TLS_ERROR_VAL); + + pThreadEntry = _tlsThreadEntryGet(); + NV_ASSERT_OR_RETURN(pThreadEntry != NULL, TLS_ERROR_VAL); + + pTlsEntry = mapFind(&pThreadEntry->map, entryId); + NV_ASSERT_OR_RETURN(pTlsEntry != NULL, TLS_ERROR_VAL); + + return _tlsEntryRelease(pThreadEntry, pTlsEntry, NULL); +} + +NvU32 tlsEntryReleaseWithAllocator(NvU64 entryId, PORT_MEM_ALLOCATOR *pCustomAllocator) +{ + ThreadEntry *pThreadEntry; + TlsEntry *pTlsEntry; + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, TLS_ERROR_VAL); + NV_ASSERT_OR_RETURN(pCustomAllocator != NULL, TLS_ERROR_VAL); + + pThreadEntry = _tlsThreadEntryGet(); + NV_ASSERT_OR_RETURN(pThreadEntry != NULL, TLS_ERROR_VAL); + + pTlsEntry = mapFind(&pThreadEntry->map, entryId); + NV_ASSERT_OR_RETURN(pTlsEntry != NULL, TLS_ERROR_VAL); + + return _tlsEntryRelease(pThreadEntry, pTlsEntry, pCustomAllocator); +} + +NvP64 tlsEntryGet(NvU64 entryId) +{ + ThreadEntry *pThreadEntry; + TlsEntry *pTlsEntry; + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, NvP64_NULL); + + pThreadEntry = _tlsThreadEntryGet(); + if (pThreadEntry == NULL) + return NvP64_NULL; + + pTlsEntry = mapFind(&pThreadEntry->map, entryId); + return pTlsEntry ? pTlsEntry->pUserData : NvP64_NULL; +} + +NvU32 tlsEntryReference(NvU64 entryId) +{ + ThreadEntry *pThreadEntry; + TlsEntry *pTlsEntry; + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, TLS_ERROR_VAL); + + pThreadEntry = _tlsThreadEntryGet(); + NV_ASSERT_OR_RETURN(pThreadEntry != NULL, TLS_ERROR_VAL); + + pTlsEntry = mapFind(&pThreadEntry->map, entryId); + NV_ASSERT_OR_RETURN(pTlsEntry != NULL, TLS_ERROR_VAL); + + return ++pTlsEntry->refCount; +} + +NvU32 tlsEntryUnreference(NvU64 entryId) +{ + ThreadEntry *pThreadEntry; + TlsEntry *pTlsEntry; + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, TLS_ERROR_VAL); + + pThreadEntry = _tlsThreadEntryGet(); + NV_ASSERT_OR_RETURN(pThreadEntry != NULL, TLS_ERROR_VAL); + + pTlsEntry = mapFind(&pThreadEntry->map, entryId); + NV_ASSERT_OR_RETURN(pTlsEntry != NULL, TLS_ERROR_VAL); + + return --pTlsEntry->refCount; +} + + +static ThreadEntry * +_tlsThreadEntryGet(void) +{ + ThreadEntry *pThreadEntry; + + if (_tlsIsIsr()) + { + pThreadEntry = _tlsIsrEntriesFind((NvU64)(NvUPtr)&pThreadEntry); + } + else + { + NvU64 threadId = portThreadGetCurrentThreadId(); + TLS_SPINLOCK_ACQUIRE(tlsDatabase.pLock); + pThreadEntry = mapFind(&tlsDatabase.threadEntries, threadId); + TLS_SPINLOCK_RELEASE(tlsDatabase.pLock); + } + return pThreadEntry; +} + + +static ThreadEntry * +_tlsThreadEntryGetOrAlloc(void) +{ + ThreadEntry* pThreadEntry = NULL; + + pThreadEntry = _tlsThreadEntryGet(); + if (pThreadEntry == NULL) // Only non-ISRs can be missing + { + NV_ASSERT(portMemExSafeForNonPagedAlloc()); + pThreadEntry = PORT_ALLOC(tlsDatabase.pAllocator, sizeof(*pThreadEntry)); + if (pThreadEntry != NULL) + { + pThreadEntry->key.threadId = portThreadGetCurrentThreadId(); + mapInitIntrusive(&pThreadEntry->map); + TLS_SPINLOCK_ACQUIRE(tlsDatabase.pLock); + mapInsertExisting(&tlsDatabase.threadEntries, + pThreadEntry->key.threadId, + pThreadEntry); + TLS_SPINLOCK_RELEASE(tlsDatabase.pLock); + } + } + + return pThreadEntry; +} + +static NvP64* +_tlsEntryAcquire +( + ThreadEntry *pThreadEntry, + NvU64 entryId, + PORT_MEM_ALLOCATOR *pCustomAllocator +) +{ + TlsEntry *pTlsEntry; + PORT_MEM_ALLOCATOR *pAllocator; + + pAllocator = (pCustomAllocator != NULL) ? pCustomAllocator : _tlsAllocatorGet(); + pTlsEntry = mapFind(&pThreadEntry->map, entryId); + if (pTlsEntry != NULL) + { + pTlsEntry->refCount++; + } + else + { + pTlsEntry = PORT_ALLOC(pAllocator, sizeof(*pTlsEntry)); + NV_ASSERT_OR_RETURN(pTlsEntry != NULL, NULL); + mapInsertExisting(&pThreadEntry->map, entryId, pTlsEntry); + + pTlsEntry->refCount = 1; + pTlsEntry->pUserData = NvP64_NULL; + } + return &pTlsEntry->pUserData; +} + +static NvU32 +_tlsEntryRelease +( + ThreadEntry* pThreadEntry, + TlsEntry *pTlsEntry, + PORT_MEM_ALLOCATOR *pCustomAllocator +) +{ + NvU32 refCount; + PORT_MEM_ALLOCATOR *pAllocator; + pAllocator = (pCustomAllocator != NULL) ? pCustomAllocator : _tlsAllocatorGet(); + + refCount = --pTlsEntry->refCount; + if (refCount == 0) + { + mapRemove(&pThreadEntry->map, pTlsEntry); + PORT_FREE(pAllocator, pTlsEntry); + // Only non ISR Thread Entry can be deallocated. + if (!_tlsIsIsr() && (mapCount(&pThreadEntry->map) == 0)) + { + NV_ASSERT(portMemExSafeForNonPagedAlloc()); + mapDestroy(&pThreadEntry->map); + TLS_SPINLOCK_ACQUIRE(tlsDatabase.pLock); + mapRemove(&tlsDatabase.threadEntries, pThreadEntry); + TLS_SPINLOCK_RELEASE(tlsDatabase.pLock); + PORT_FREE(tlsDatabase.pAllocator, pThreadEntry); + } + } + return refCount; +} + +static PORT_MEM_ALLOCATOR *_tlsIsrAllocatorGet(void) +{ + ThreadEntry *pThreadEntry; + + if (!_tlsIsIsr()) { return NULL; } + pThreadEntry = _tlsThreadEntryGet(); + + return (PORT_MEM_ALLOCATOR*)(NvUPtr)pThreadEntry->key.sp; +} + +static PORT_MEM_ALLOCATOR *_tlsAllocatorGet(void) +{ + PORT_MEM_ALLOCATOR *pIsrAllocator = _tlsIsrAllocatorGet(); + return (pIsrAllocator == NULL) ? tlsDatabase.pAllocator : pIsrAllocator; +} + +#if TLS_ISR_CAN_USE_LOCK + +static NV_STATUS _tlsIsrEntriesInit(void) +{ + tlsDatabase.pIsrLock = portSyncSpinlockCreate(tlsDatabase.pAllocator); + if (tlsDatabase.pLock == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + mapInitIntrusive(&tlsDatabase.isrEntries); + return NV_OK; +} +static void _tlsIsrEntriesDestroy(void) +{ + if (tlsDatabase.pIsrLock) + portSyncSpinlockDestroy(tlsDatabase.pIsrLock); + mapDestroy(&tlsDatabase.isrEntries); +} +static void _tlsIsrEntriesInsert(ThreadEntry *pThreadEntry) +{ + TLS_SPINLOCK_ACQUIRE(tlsDatabase.pIsrLock); + mapInsertExisting(&tlsDatabase.isrEntries, pThreadEntry->key.sp, pThreadEntry); + TLS_SPINLOCK_RELEASE(tlsDatabase.pIsrLock); +} +static ThreadEntry *_tlsIsrEntriesRemove(NvU64 sp) +{ + ThreadEntry *pThreadEntry; + TLS_SPINLOCK_ACQUIRE(tlsDatabase.pIsrLock); + pThreadEntry = mapFind(&tlsDatabase.isrEntries, sp); + mapRemove(&tlsDatabase.isrEntries, pThreadEntry); + TLS_SPINLOCK_RELEASE(tlsDatabase.pIsrLock); + return pThreadEntry; +} +static ThreadEntry *_tlsIsrEntriesFind(NvU64 approxSp) +{ + ThreadEntry *pThreadEntry; + TLS_SPINLOCK_ACQUIRE(tlsDatabase.pIsrLock); +#if STACK_GROWS_DOWNWARD + pThreadEntry = mapFindGEQ(&tlsDatabase.isrEntries, approxSp); +#else + pThreadEntry = mapFindLEQ(&tlsDatabase.isrEntries, approxSp); +#endif + TLS_SPINLOCK_RELEASE(tlsDatabase.pIsrLock); + return pThreadEntry; +} + +#else // Lockless + +static NV_STATUS _tlsIsrEntriesInit(void) +{ + portMemSet(tlsDatabase.isrEntries, 0, sizeof(tlsDatabase.isrEntries)); + return NV_OK; +} +static void _tlsIsrEntriesDestroy(void) +{ + portMemSet(tlsDatabase.isrEntries, 0, sizeof(tlsDatabase.isrEntries)); +} +static void _tlsIsrEntriesInsert(ThreadEntry *pThreadEntry) +{ + NvU32 i = 0; + + while (!portAtomicExCompareAndSwapU64(&tlsDatabase.isrEntries[i].sp, + pThreadEntry->key.sp, 0)) + { + i = (i + 1) % TLS_MAX_ISRS; + } + tlsDatabase.isrEntries[i].pThreadEntry = pThreadEntry; +} +static ThreadEntry *_tlsIsrEntriesRemove(NvU64 sp) +{ + ThreadEntry *pThreadEntry; + NvU32 i = 0; + + while (tlsDatabase.isrEntries[i].sp != sp) + { + i++; + } + pThreadEntry = tlsDatabase.isrEntries[i].pThreadEntry; + portAtomicExSetU64(&tlsDatabase.isrEntries[i].sp, 0); + + return pThreadEntry; +} +static ThreadEntry *_tlsIsrEntriesFind(NvU64 approxSp) +{ + NvU32 i; + NvU32 closestIdx = ~0x0; + NvU64 closestSp = STACK_GROWS_DOWNWARD ? ~0ULL : 0; + + for (i = 0; i < TLS_MAX_ISRS; i++) + { + NvU64 sp = tlsDatabase.isrEntries[i].sp; +#if STACK_GROWS_DOWNWARD + if (sp != 0 && sp >= approxSp && sp < closestSp) +#else + if (sp != 0 && sp <= approxSp && sp > closestSp) +#endif + { + closestSp = sp; + closestIdx = i; + } + } + NV_ASSERT_OR_RETURN(closestIdx != ~0x0, NULL); + return tlsDatabase.isrEntries[closestIdx].pThreadEntry; +} + +#endif // TLS_ISR_CAN_USE_LOCK + + + +static NvBool _tlsIsIsr(void) +{ +#if defined (TLS_ISR_UNIT_TEST) + // In unit tests we simulate ISR tests in different ways, so tests define this + extern NvBool tlsTestIsIsr(void); + return tlsTestIsIsr(); +#elif TLS_THREADS_CAN_RAISE_IRQL + NvU64 preempt = portSyncExDisablePreemption(); + NvBool bIsIsr = (tlsDatabase.isrCount[osGetCurrentProcessorNumber()] > 0); + portSyncExRestorePreemption(preempt); + return bIsIsr; +#else // Usermode and most kernelmode platforms + return portUtilIsInterruptContext(); +#endif // TLS_ISR_UNIT_TEST +} diff --git a/src/nvidia/src/libraries/utils/nvassert.c b/src/nvidia/src/libraries/utils/nvassert.c new file mode 100644 index 0000000..808866f --- /dev/null +++ b/src/nvidia/src/libraries/utils/nvassert.c @@ -0,0 +1,341 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvport/nvport.h" +#include "utils/nvassert.h" + +#if defined(NVRM) && !defined(NVWATCH) && !defined(GSP_PLUGIN_BUILD) +#include "containers/map.h" +#include "os/os.h" +#include "nvrm_registry.h" +#include "rmconfig.h" +#else +#if !defined(RMCFG_MODULE_ENABLED) +#define RMCFG_MODULE_x 0 +#endif + +#if !defined(RMCFG_IS_PLATFORM) +#define RMCFG_FEATURE_PLATFORM_x 0 +#endif +#endif + +#define NV_JOURNAL_ASSERT_FAILURE(lineNum, ip) ((void)0) +#define NV_JOURNAL_ASSERT_FAILURE_STATUS(lineNum, ip, status) ((void)0) + +#if NV_ASSERT_FAILED_BACKTRACE_ENABLE +static void nvAssertFailedBacktrace(NvU64 ip); + +// Print call stack in dmesg when assert fails +#define NV_ASSERT_FAILED_BACKTRACE(ip) do {nvAssertFailedBacktrace(ip);} while(0) +#else +#define NV_ASSERT_FAILED_BACKTRACE(ip) ((void)0) +#endif + +#if NV_ASSERT_FAILED_USES_STRINGS +#define NV_ASSERT_FAILED_PRINTF_FMT "%s @ %s:%d\n" +#define NV_ASSERT_FAILED_PRINTF_PARAM pszExpr, trimFN(pszFileName), lineNum +#else +#define NV_ASSERT_FAILED_PRINTF_FMT "0x%016llx\n" +#define NV_ASSERT_FAILED_PRINTF_PARAM ip +#endif + +#if !RMCFG_FEATURE_PLATFORM_GSP && !defined(GSP_PLUGIN_BUILD) +#define NV_ASSERT_PRINTF(level, fmt, ...) NV_PRINTF_STRING \ + (NV_PRINTF_MODULE, level, NV_PRINTF_ADD_PREFIX(fmt), ##__VA_ARGS__) +#else +#define NV_ASSERT_PRINTF(level, fmt, ...) +#define NV_ASSERT_LOG(level, fmt, ...) +#endif + +#define PATH_SEP '/' + +/* + * Trim path from source filename. + */ +#if NV_ASSERT_FAILED_USES_STRINGS +static const char *trimFN(const char *pszFileName) +{ + NvLength i; + + for (i = 0; pszFileName[i] != 0; i++) + ; + + for (; i > 0; i--) + { + if (pszFileName[i] == PATH_SEP) + return &pszFileName[i + 1]; + } + + return pszFileName; +} +#endif + +#if NV_PRINTF_ENABLED || NV_JOURNAL_ASSERT_ENABLE +/* + * Helper function for NV_ASSERT_FAILED + */ +void +nvAssertFailed +( + NV_ASSERT_FAILED_FUNC_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(LEVEL_ERROR, "Assertion failed: " NV_ASSERT_FAILED_PRINTF_FMT, + NV_ASSERT_FAILED_PRINTF_PARAM); + NV_ASSERT_LOG(LEVEL_ERROR, "Assertion failed @ 0x%016x", ip); + NV_JOURNAL_ASSERT_FAILURE(lineNum, ip); + NV_ASSERT_FAILED_BACKTRACE(ip); +} + +/* + * Helper functions for NV_ASSERT_OK_FAILED + */ +void +nvAssertOkFailed +( + NvU32 status + NV_ASSERT_FAILED_FUNC_COMMA_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(LEVEL_ERROR, + "Assertion failed: %s (0x%08X) returned from " NV_ASSERT_FAILED_PRINTF_FMT, + nvstatusToString(status), status, NV_ASSERT_FAILED_PRINTF_PARAM); + NV_ASSERT_LOG(LEVEL_ERROR, "Assertion failed: 0x%08X returned from 0x%016llx", + status, ip); + NV_JOURNAL_ASSERT_FAILURE_STATUS(lineNum, ip, status); + NV_ASSERT_FAILED_BACKTRACE(ip); +} + +/* + * Helper function for NV_CHECK_FAILED + */ +void +nvCheckFailed +( + NvU32 level + NV_ASSERT_FAILED_FUNC_COMMA_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(level, "Check failed: " NV_ASSERT_FAILED_PRINTF_FMT, + NV_ASSERT_FAILED_PRINTF_PARAM); + NV_ASSERT_LOG(level, "Check failed @ 0x%016llx", ip); +} + +/* + * Helper function for NV_CHECK_OK_FAILED + */ +void +nvCheckOkFailed +( + NvU32 level, + NvU32 status + NV_ASSERT_FAILED_FUNC_COMMA_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(level, + "Check failed: %s (0x%08X) returned from " NV_ASSERT_FAILED_PRINTF_FMT, + nvstatusToString(status), status, NV_ASSERT_FAILED_PRINTF_PARAM); + NV_ASSERT_LOG(level, "Check failed: 0x%08X returned from 0x%016llx", status, ip); +} + +/* + * Helper function for NV_ASSERT_FAILED + */ +void +nvAssertFailedNoLog +( + NV_ASSERT_FAILED_FUNC_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(LEVEL_ERROR, "Assertion failed: " NV_ASSERT_FAILED_PRINTF_FMT, + NV_ASSERT_FAILED_PRINTF_PARAM); + NV_JOURNAL_ASSERT_FAILURE(lineNum, ip); + NV_ASSERT_FAILED_BACKTRACE(ip); +} + +/* + * Helper function for NV_ASSERT_OK_FAILED + */ +void +nvAssertOkFailedNoLog +( + NvU32 status + NV_ASSERT_FAILED_FUNC_COMMA_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(LEVEL_ERROR, + "Assertion failed: %s (0x%08X) returned from " NV_ASSERT_FAILED_PRINTF_FMT, + nvstatusToString(status), status, NV_ASSERT_FAILED_PRINTF_PARAM); + NV_JOURNAL_ASSERT_FAILURE_STATUS(lineNum, ip, status); + NV_ASSERT_FAILED_BACKTRACE(ip); +} + +/* + * Helper function for NV_CHECK_FAILED + */ +void +nvCheckFailedNoLog +( + NvU32 level + NV_ASSERT_FAILED_FUNC_COMMA_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(level, "Check failed: " NV_ASSERT_FAILED_PRINTF_FMT, + NV_ASSERT_FAILED_PRINTF_PARAM); +} + +/* + * Helper function for NV_CHECK_OK_FAILED + */ +void +nvCheckOkFailedNoLog +( + NvU32 level, + NvU32 status + NV_ASSERT_FAILED_FUNC_COMMA_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(level, + "Check failed: %s (0x%08X) returned from " NV_ASSERT_FAILED_PRINTF_FMT, + nvstatusToString(status), status, NV_ASSERT_FAILED_PRINTF_PARAM); +} + +#endif + +#if NV_ASSERT_FAILED_BACKTRACE_ENABLE +MAKE_MAP(AssertedIPMap, NvU8); + +static struct +{ + AssertedIPMap map; + NvU32 mode; + PORT_MUTEX *mtx; + NvBool init; + OS_THREAD_HANDLE tid; +} osAssertInternal; + +void nvAssertInit(void) +{ + if (osAssertInternal.init) + return; + + osAssertInternal.mode = NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_UNIQUE; + + // Map is not thread-safe and osAssertFailed can be called concurrently. + osReadRegistryDword(NULL, NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE, &osAssertInternal.mode); + if (osAssertInternal.mode == NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_UNIQUE) + { + osAssertInternal.mtx = portSyncMutexCreate(portMemAllocatorGetGlobalNonPaged()); + if (!osAssertInternal.mtx) + { + osAssertInternal.mode = NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_DISABLE; + } + else + { + mapInit(&osAssertInternal.map, portMemAllocatorGetGlobalNonPaged()); + } + } + osAssertInternal.init = NV_TRUE; +} + +static void nvAssertFailedBacktrace(NvU64 ip) +{ + if (!osAssertInternal.init) + return; + + if (osAssertInternal.mode == NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_UNIQUE) + { + OS_THREAD_HANDLE tid; + if (osGetCurrentThread(&tid) != NV_OK) + return; + + // nvport mutex is not reentrant and will deadlock with nested locking. + // If the next condition holds, we're in a reentrant call. + if (tid == osAssertInternal.tid) + return; + + portSyncMutexAcquire(osAssertInternal.mtx); + osAssertInternal.tid = tid; + + if (!mapFind(&osAssertInternal.map, ip)) + { + // If we're out of memory, do not dump anything to avoid spam + if (mapInsertNew(&osAssertInternal.map, ip)) + osAssertFailed(); + } + + osAssertInternal.tid = 0; + portSyncMutexRelease(osAssertInternal.mtx); + } + else if (osAssertInternal.mode == NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_ENABLE) + osAssertFailed(); +} + +void nvAssertDestroy(void) +{ + if (!osAssertInternal.init) + return; + + if (osAssertInternal.mode == NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_UNIQUE && osAssertInternal.mtx) + { + portSyncMutexDestroy(osAssertInternal.mtx); + mapDestroy(&osAssertInternal.map); + } + osAssertInternal.init = 0; +} + +#else + +// We do not expose NV_ASSERT_FAILED_BACKTRACE outside this file. The callers will use these stubs. +void nvAssertInit(void) +{ +} + +void nvAssertDestroy(void) +{ +} +#endif /* defined(NV_ASSERT_FAILED_BACKTRACE) */ diff --git a/src/nvidia/srcs.mk b/src/nvidia/srcs.mk new file mode 100644 index 0000000..6cd801c --- /dev/null +++ b/src/nvidia/srcs.mk @@ -0,0 +1,220 @@ +SRCS ?= +SRCS_CXX ?= + +SRCS += generated/g_binary_api_nvoc.c +SRCS += generated/g_chips2halspec_nvoc.c +SRCS += generated/g_client_nvoc.c +SRCS += generated/g_client_resource_nvoc.c +SRCS += generated/g_code_coverage_mgr_nvoc.c +SRCS += generated/g_context_dma_nvoc.c +SRCS += generated/g_dce_client_nvoc.c +SRCS += generated/g_device_nvoc.c +SRCS += generated/g_disp_capabilities_nvoc.c +SRCS += generated/g_disp_channel_nvoc.c +SRCS += generated/g_disp_inst_mem_nvoc.c +SRCS += generated/g_disp_objs_nvoc.c +SRCS += generated/g_disp_sf_user_nvoc.c +SRCS += generated/g_eng_state_nvoc.c +SRCS += generated/g_event_buffer_nvoc.c +SRCS += generated/g_event_nvoc.c +SRCS += generated/g_generic_engine_nvoc.c +SRCS += generated/g_gpu_access_nvoc.c +SRCS += generated/g_gpu_arch_nvoc.c +SRCS += generated/g_gpu_class_list.c +SRCS += generated/g_gpu_db_nvoc.c +SRCS += generated/g_gpu_group_nvoc.c +SRCS += generated/g_gpu_halspec_nvoc.c +SRCS += generated/g_gpu_mgmt_api_nvoc.c +SRCS += generated/g_gpu_mgr_nvoc.c +SRCS += generated/g_gpu_nvoc.c +SRCS += generated/g_gpu_resource_nvoc.c +SRCS += generated/g_gpu_user_shared_data_nvoc.c +SRCS += generated/g_hal_mgr_nvoc.c +SRCS += generated/g_hal_nvoc.c +SRCS += generated/g_hda_codec_api_nvoc.c +SRCS += generated/g_io_vaspace_nvoc.c +SRCS += generated/g_ioaccess_nvoc.c +SRCS += generated/g_kern_disp_nvoc.c +SRCS += generated/g_kernel_head_nvoc.c +SRCS += generated/g_lock_stress_nvoc.c +SRCS += generated/g_lock_test_nvoc.c +SRCS += generated/g_mem_mgr_nvoc.c +SRCS += generated/g_mem_nvoc.c +SRCS += generated/g_object_nvoc.c +SRCS += generated/g_objtmr_nvoc.c +SRCS += generated/g_os_desc_mem_nvoc.c +SRCS += generated/g_os_nvoc.c +SRCS += generated/g_prereq_tracker_nvoc.c +SRCS += generated/g_resource_nvoc.c +SRCS += generated/g_rmconfig_util.c +SRCS += generated/g_rs_client_nvoc.c +SRCS += generated/g_rs_resource_nvoc.c +SRCS += generated/g_rs_server_nvoc.c +SRCS += generated/g_standard_mem_nvoc.c +SRCS += generated/g_subdevice_nvoc.c +SRCS += generated/g_syncpoint_mem_nvoc.c +SRCS += generated/g_system_mem_nvoc.c +SRCS += generated/g_system_nvoc.c +SRCS += generated/g_tmr_nvoc.c +SRCS += generated/g_traceable_nvoc.c +SRCS += generated/g_vaspace_nvoc.c +SRCS += generated/g_virt_mem_mgr_nvoc.c +SRCS += ../common/shared/nvstatus/nvstatus.c +SRCS += arch/nvalloc/unix/src/escape.c +SRCS += arch/nvalloc/unix/src/exports-stubs.c +SRCS += arch/nvalloc/unix/src/gcc_helper.c +SRCS += arch/nvalloc/unix/src/os-hypervisor-stubs.c +SRCS += arch/nvalloc/unix/src/os.c +SRCS += arch/nvalloc/unix/src/osapi.c +SRCS += arch/nvalloc/unix/src/osinit.c +SRCS += arch/nvalloc/unix/src/osmemdesc.c +SRCS += arch/nvalloc/unix/src/osunix.c +SRCS += arch/nvalloc/unix/src/power-management-tegra.c +SRCS += arch/nvalloc/unix/src/registry.c +SRCS += arch/nvalloc/unix/src/rmobjexportimport.c +SRCS += interface/deprecated/rmapi_deprecated_utils.c +SRCS += interface/rmapi/src/g_finn_rm_api.c +SRCS += src/kernel/core/hal/hal.c +SRCS += src/kernel/core/hal/hals_all.c +SRCS += src/kernel/core/hal/info_block.c +SRCS += src/kernel/core/hal_mgr.c +SRCS += src/kernel/core/locks_common.c +SRCS += src/kernel/core/locks_minimal.c +SRCS += src/kernel/core/system.c +SRCS += src/kernel/core/thread_state.c +SRCS += src/kernel/diagnostics/code_coverage_mgr.c +SRCS += src/kernel/diagnostics/nvlog.c +SRCS += src/kernel/diagnostics/nvlog_printf.c +SRCS += src/kernel/diagnostics/profiler.c +SRCS += src/kernel/gpu/arch/t23x/kern_gpu_arch_t234d.c +SRCS += src/kernel/gpu/arch/t23x/kern_gpu_t234d.c +SRCS += src/kernel/gpu/arch/t25x/kern_gpu_t256d.c +SRCS += src/kernel/gpu/arch/t26x/kern_gpu_t264d.c +SRCS += src/kernel/gpu/audio/hda_codec_api.c +SRCS += src/kernel/gpu/dce_client/dce_client.c +SRCS += src/kernel/gpu/dce_client/dce_client_rpc.c +SRCS += src/kernel/gpu/device.c +SRCS += src/kernel/gpu/device_ctrl.c +SRCS += src/kernel/gpu/device_share.c +SRCS += src/kernel/gpu/disp/arch/v02/kern_disp_0204.c +SRCS += src/kernel/gpu/disp/arch/v03/kern_disp_0300.c +SRCS += src/kernel/gpu/disp/arch/v04/kern_disp_0401.c +SRCS += src/kernel/gpu/disp/arch/v04/kern_disp_0402.c +SRCS += src/kernel/gpu/disp/arch/v04/kern_disp_0404.c +SRCS += src/kernel/gpu/disp/arch/v05/kern_disp_0501.c +SRCS += src/kernel/gpu/disp/disp_capabilities.c +SRCS += src/kernel/gpu/disp/disp_channel.c +SRCS += src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c +SRCS += src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c +SRCS += src/kernel/gpu/disp/disp_objs.c +SRCS += src/kernel/gpu/disp/disp_sf_user.c +SRCS += src/kernel/gpu/disp/head/arch/v04/kernel_head_0401.c +SRCS += src/kernel/gpu/disp/head/kernel_head.c +SRCS += src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c +SRCS += src/kernel/gpu/disp/inst_mem/disp_inst_mem.c +SRCS += src/kernel/gpu/disp/kern_disp.c +SRCS += src/kernel/gpu/disp/vblank_callback/vblank.c +SRCS += src/kernel/gpu/eng_state.c +SRCS += src/kernel/gpu/gpu.c +SRCS += src/kernel/gpu/gpu_access.c +SRCS += src/kernel/gpu/gpu_arch.c +SRCS += src/kernel/gpu/gpu_device_mapping.c +SRCS += src/kernel/gpu/gpu_engine_type.c +SRCS += src/kernel/gpu/gpu_gspclient.c +SRCS += src/kernel/gpu/gpu_resource.c +SRCS += src/kernel/gpu/gpu_resource_desc.c +SRCS += src/kernel/gpu/gpu_rmapi.c +SRCS += src/kernel/gpu/gpu_t234d_kernel.c +SRCS += src/kernel/gpu/gpu_timeout.c +SRCS += src/kernel/gpu/gpu_user_shared_data.c +SRCS += src/kernel/gpu/gpu_uuid.c +SRCS += src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c +SRCS += src/kernel/gpu/mem_mgr/context_dma.c +SRCS += src/kernel/gpu/mem_mgr/mem_ctrl.c +SRCS += src/kernel/gpu/mem_mgr/mem_desc.c +SRCS += src/kernel/gpu/mem_mgr/mem_utils.c +SRCS += src/kernel/gpu/subdevice/generic_engine.c +SRCS += src/kernel/gpu/subdevice/subdevice.c +SRCS += src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c +SRCS += src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c +SRCS += src/kernel/gpu/subdevice/subdevice_ctrl_internal_kernel.c +SRCS += src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c +SRCS += src/kernel/gpu/timer/timer.c +SRCS += src/kernel/gpu/timer/timer_ostimer.c +SRCS += src/kernel/gpu_mgr/gpu_db.c +SRCS += src/kernel/gpu_mgr/gpu_group.c +SRCS += src/kernel/gpu_mgr/gpu_mgmt_api.c +SRCS += src/kernel/gpu_mgr/gpu_mgr.c +SRCS += src/kernel/mem_mgr/io_vaspace.c +SRCS += src/kernel/mem_mgr/mem.c +SRCS += src/kernel/mem_mgr/os_desc_mem.c +SRCS += src/kernel/mem_mgr/standard_mem.c +SRCS += src/kernel/mem_mgr/syncpoint_mem.c +SRCS += src/kernel/mem_mgr/system_mem.c +SRCS += src/kernel/mem_mgr/vaspace.c +SRCS += src/kernel/mem_mgr/virt_mem_mgr.c +SRCS += src/kernel/os/os_init.c +SRCS += src/kernel/os/os_sanity.c +SRCS += src/kernel/os/os_stubs.c +SRCS += src/kernel/os/os_timer.c +SRCS += src/kernel/rmapi/alloc_free.c +SRCS += src/kernel/rmapi/binary_api.c +SRCS += src/kernel/rmapi/client.c +SRCS += src/kernel/rmapi/client_resource.c +SRCS += src/kernel/rmapi/control.c +SRCS += src/kernel/rmapi/deprecated_context.c +SRCS += src/kernel/rmapi/entry_points.c +SRCS += src/kernel/rmapi/event.c +SRCS += src/kernel/rmapi/event_buffer.c +SRCS += src/kernel/rmapi/event_notification.c +SRCS += src/kernel/rmapi/lock_stress.c +SRCS += src/kernel/rmapi/lock_test.c +SRCS += src/kernel/rmapi/mapping.c +SRCS += src/kernel/rmapi/mapping_cpu.c +SRCS += src/kernel/rmapi/param_copy.c +SRCS += src/kernel/rmapi/resource.c +SRCS += src/kernel/rmapi/resource_desc.c +SRCS += src/kernel/rmapi/rmapi.c +SRCS += src/kernel/rmapi/rmapi_cache.c +SRCS += src/kernel/rmapi/rmapi_cache_handlers.c +SRCS += src/kernel/rmapi/rmapi_finn.c +SRCS += src/kernel/rmapi/rmapi_specific.c +SRCS += src/kernel/rmapi/rmapi_stubs.c +SRCS += src/kernel/rmapi/rmapi_utils.c +SRCS += src/kernel/rmapi/rpc_common.c +SRCS += src/kernel/rmapi/rs_utils.c +SRCS += src/kernel/rmapi/sharing.c +SRCS += src/lib/base_utils.c +SRCS += src/lib/zlib/inflate.c +SRCS += src/libraries/containers/btree/btree.c +SRCS += src/libraries/containers/eheap/eheap_old.c +SRCS += src/libraries/containers/list.c +SRCS += src/libraries/containers/map.c +SRCS += src/libraries/containers/multimap.c +SRCS += src/libraries/containers/queue.c +SRCS += src/libraries/containers/ringbuf.c +SRCS += src/libraries/containers/vector.c +SRCS += src/libraries/eventbuffer/eventbufferproducer.c +SRCS += src/libraries/ioaccess/ioaccess.c +SRCS += src/libraries/mapping_reuse/mapping_reuse.c +SRCS += src/libraries/nvbitvector/nvbitvector.c +SRCS += src/libraries/nvoc/src/runtime.c +SRCS += src/libraries/nvport/core/core.c +SRCS += src/libraries/nvport/cpu/cpu_common.c +SRCS += src/libraries/nvport/crypto/crypto_random_xorshift.c +SRCS += src/libraries/nvport/memory/memory_tracking.c +SRCS += src/libraries/nvport/memory/memory_unix_kernel_os.c +SRCS += src/libraries/nvport/string/string_generic.c +SRCS += src/libraries/nvport/sync/sync_unix_kernel_os.c +SRCS += src/libraries/nvport/thread/thread_unix_kernel_os.c +SRCS += src/libraries/nvport/util/util_compiler_switch.c +SRCS += src/libraries/nvport/util/util_unix_kernel_os.c +SRCS += src/libraries/prereq_tracker/prereq_tracker.c +SRCS += src/libraries/resserv/src/rs_access_map.c +SRCS += src/libraries/resserv/src/rs_access_rights.c +SRCS += src/libraries/resserv/src/rs_client.c +SRCS += src/libraries/resserv/src/rs_domain.c +SRCS += src/libraries/resserv/src/rs_resource.c +SRCS += src/libraries/resserv/src/rs_server.c +SRCS += src/libraries/tls/tls.c +SRCS += src/libraries/utils/nvassert.c diff --git a/utils.mk b/utils.mk new file mode 100644 index 0000000..3d33dc4 --- /dev/null +++ b/utils.mk @@ -0,0 +1,618 @@ +# +# Copyright (C) 2008 NVIDIA Corporation +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +# +# +# utils.mk: common Makefile fragment used by nvidia-xconfig, +# nvidia-installer, and nvidia-settings +# + + + +############################################################################## +# The calling Makefile (when building as part of the NVIDIA graphics +# driver) may export any of the following variables; we assign default +# values if they are not exported by the caller +############################################################################## + +CC ?= gcc +CXX ?= g++ +LD ?= ld +AR ?= ar +# only set these warnings if CFLAGS is unset +CFLAGS ?= -Wall +# always set these -f CFLAGS +CFLAGS += -fno-strict-aliasing -fno-omit-frame-pointer -Wformat=2 +CC_ONLY_CFLAGS ?= -Wstrict-prototypes -Wold-style-definition +CXX_ONLY_CFLAGS ?= +LDFLAGS ?= +BIN_LDFLAGS ?= +EXTRA_CFLAGS ?= +EXTRA_LDFLAGS ?= + +STACK_USAGE_WARNING ?= +CFLAGS += $(if $(STACK_USAGE_WARNING),-Wstack-usage=$(STACK_USAGE_WARNING)) + +IMPLICIT_FALLTHROUGH_WARNING ?= +CFLAGS += $(if $(IMPLICIT_FALLTHROUGH_WARNING),-Wimplicit-fallthrough=$(IMPLICIT_FALLTHROUGH_WARNING)) + +HOST_CC ?= $(CC) +HOST_LD ?= $(LD) +HOST_CFLAGS ?= $(CFLAGS) +HOST_CC_ONLY_CFLAGS ?= +HOST_CXX_ONLY_CFLAGS ?= +HOST_LDFLAGS ?= $(LDFLAGS) +HOST_BIN_LDFLAGS ?= + +# always disable warnings that will break the build +CC_ONLY_CFLAGS += -Wno-format-zero-length +CFLAGS += -Wno-unused-parameter +HOST_CC_ONLY_CFLAGS += -Wno-format-zero-length +HOST_CFLAGS += -Wno-unused-parameter + +# Treat warnings as errors, if requested +WARNINGS_AS_ERRORS ?= +CFLAGS += $(if $(WARNINGS_AS_ERRORS),-Werror) + +DEBUG ?= +DEVELOP ?= + +ifeq ($(DEBUG),1) + STRIP_CMD ?= true + DO_STRIP ?= + CFLAGS += -O0 -g + CFLAGS += -DDEBUG=1 +else + CFLAGS += -O2 +endif + +ifeq ($(DEVELOP),1) + STRIP_CMD ?= true + DO_STRIP ?= + CFLAGS += -DDEVELOP=1 +endif + +CFLAGS += $(EXTRA_CFLAGS) +LDFLAGS += $(EXTRA_LDFLAGS) + +STRIP_CMD ?= strip +DO_STRIP ?= 1 + +INSTALL ?= install +INSTALL_BIN_ARGS ?= -m 755 +INSTALL_LIB_ARGS ?= -m 644 +INSTALL_DOC_ARGS ?= -m 644 + +M4 ?= m4 +SED ?= sed +M4 ?= m4 +ECHO ?= echo +PRINTF ?= printf +MKDIR ?= mkdir -p +RM ?= rm -f +TOUCH ?= touch +HARDLINK ?= ln -f +DATE ?= date +GZIP_CMD ?= gzip +CHMOD ?= chmod +OBJCOPY ?= objcopy +XZ ?= xz +PKG_CONFIG ?= pkg-config + +NV_BUILD_USER ?= $(shell whoami) +NV_BUILD_HOST ?= $(shell hostname) + +NV_AUTO_DEPEND ?= 1 +NV_VERBOSE ?= 0 + +ifndef TARGET_OS + TARGET_OS := $(shell uname) +endif + +ifeq ($(TARGET_OS),Linux) + CFLAGS += -DNV_LINUX +endif + +ifeq ($(TARGET_OS),FreeBSD) + CFLAGS += -DNV_BSD +endif + +ifeq ($(TARGET_OS),SunOS) + CFLAGS += -DNV_SUNOS +endif + +ifndef TARGET_ARCH + ifneq ($(TARGET_OS),SunOS) + TARGET_ARCH := $(shell uname -m) + else + TARGET_ARCH := $(shell isainfo -n) + endif + TARGET_ARCH := $(subst i386,x86,$(TARGET_ARCH)) + TARGET_ARCH := $(subst i486,x86,$(TARGET_ARCH)) + TARGET_ARCH := $(subst i586,x86,$(TARGET_ARCH)) + TARGET_ARCH := $(subst i686,x86,$(TARGET_ARCH)) + TARGET_ARCH := $(subst amd64,x86_64,$(TARGET_ARCH)) +endif + +ifeq ($(TARGET_ARCH),x86) + CFLAGS += -DNV_X86 -DNV_ARCH_BITS=32 +endif + +ifeq ($(TARGET_ARCH),x86_64) + CFLAGS += -DNV_X86_64 -DNV_ARCH_BITS=64 +endif + +ifeq ($(TARGET_ARCH),armv7l) + CFLAGS += -DNV_ARMV7 -DNV_ARCH_BITS=32 +endif + +ifeq ($(TARGET_ARCH),aarch64) + CFLAGS += -DNV_AARCH64 -DNV_ARCH_BITS=64 +endif + +ifeq ($(TARGET_ARCH),ppc64le) + CFLAGS += -DNV_PPC64LE -DNV_ARCH_BITS=64 +endif + +ifeq ($(TARGET_OS),Linux) + LIBDL_LIBS = -ldl +else + LIBDL_LIBS = +endif + +# This variable controls which floating-point ABI is targeted. For ARM, it +# defaults to "gnueabi" for softfp. Another option is "gnueabihf" for +# hard(fp). This is necessary to pick up the correct rtld_test binary. +# All other architectures default to empty. +ifeq ($(TARGET_ARCH),armv7l) + TARGET_ARCH_ABI ?= gnueabi +endif +TARGET_ARCH_ABI ?= + +ifeq ($(TARGET_ARCH_ABI),gnueabi) + CFLAGS += -DNV_GNUEABI +endif + +ifeq ($(TARGET_ARCH_ABI),gnueabihf) + CFLAGS += -DNV_GNUEABIHF +endif + +OUTPUTDIR ?= _out/$(TARGET_OS)_$(TARGET_ARCH) +OUTPUTDIR_ABSOLUTE ?= $(CURDIR)/$(OUTPUTDIR) + +NV_SEPARATE_DEBUG_INFO ?= +NV_KEEP_UNSTRIPPED_BINARIES ?= + +NV_QUIET_COMMAND_REMOVED_TARGET_PREFIX ?= + +NV_GENERATED_HEADERS ?= + +############################################################################## +# This makefile uses the $(eval) builtin function, which was added in +# GNU make 3.80. Check that the current make version recognizes it. +# Idea suggested by "The GNU Make Book" by John Graham-Cumming. +############################################################################## + +_eval_available := +$(eval _eval_available := T) + +ifneq ($(_eval_available),T) + $(error This Makefile requires a GNU Make that supports 'eval'. Please upgrade to GNU make 3.80 or later) +endif + +############################################################################## +# Test passing $(1) to $(CC). If $(CC) succeeds, then echo $(1). +# +# Because this uses $(shell), it is best to use this to assign simply expanded +# variables (e.g., ":="). +# +# Example usage: +# CONDITIONAL_CFLAGS := $(call TEST_CC_ARG, -ffoo) +############################################################################## + +TEST_CC_ARG = \ + $(shell $(CC) -c -x c /dev/null -Werror $(1) -o /dev/null > /dev/null 2>&1 && \ + $(ECHO) $(1)) + + +############################################################################## +# Test if instruction $(1) is understood by the assembler +# Returns "1" if the instruction is understood, else returns empty string. +# +# Example usage: +# ENDBR_SUPPORTED := $(call AS_HAS_INSTR, endbr64) +############################################################################## + +AS_HAS_INSTR = \ + $(shell if ($(ECHO) "$(1)" | $(CC) -c -x assembler - -o /dev/null) >/dev/null 2>&1 ;\ + then $(ECHO) "1"; else $(ECHO) ""; fi) + + +############################################################################## +# define variables used when installing the open source utilities from +# the source tarball +############################################################################## + +PREFIX ?= /usr/local + +BINDIR = $(DESTDIR)$(PREFIX)/bin +LIBDIR = $(DESTDIR)$(PREFIX)/lib +MANDIR = $(DESTDIR)$(PREFIX)/share/man/man1 + + +############################################################################## +# default build rule, so that nothing here in utils.mk accidentally +# gets selected as the default rule +############################################################################## + +default build: all +.PHONY: default build + +############################################################################## +# get the definition of NVIDIA_VERSION from version.mk +# +# version.mk may be in one of two places: either in $(OUTPUTDIR) when +# building as part of the NVIDIA driver build, or directly in the +# source directory when building from the source tarball +# +# Throw an error if one of these two places did not define NVIDIA_VERSION. +############################################################################## + +VERSION_MK_DIR ?= . +VERSION_MK := $(wildcard $(OUTPUTDIR)/version.mk $(VERSION_MK_DIR)/version.mk ) +include $(VERSION_MK) + +ifndef NVIDIA_VERSION +$(error NVIDIA_VERSION undefined) +endif + +############################################################################## +# NV_GET_SOURCE_TYPE: if the source file $(1) should be compiled as C, this +# evalutes to "CC"; if the source file $(1) should be compiled as C++, this +# evalutes to "CXX". +############################################################################## + +NV_GET_SOURCE_TYPE = $(strip \ + $(if $(filter %.c, $(1)),CC, \ + $(if $(filter %.cpp, $(1)),CXX, \ + $(error Unrecognized source $(1))))) + + +############################################################################## +# Several of the functions below take an argument that indicates if +# the expression is for the target platform (the system the built +# program is going to run on) or the host platform (the system +# performing the build). The argument is either "HOST" or "TARGET" +# and needs to be converted: +# +# "HOST" -> "HOST_" +# "TARGET" -> "" +############################################################################## + +host_target = $(patsubst HOST,HOST_,$(patsubst TARGET,,$(1))) + + +############################################################################## +# To generate the dependency files: +# +# - Use the compiler's "-MMD" option to generate output of the form +# "foo.o : foo.c foo.h bar.h". +# +# - Also, "-MMD" will cause the compiler to name the target as if it were in the +# current directory ("foo.o: "); use -MT to rename the target in the output +# directory ("_out/Linux_x86/foo.o: ") so that the target actually applies to +# the object files produced in the build. +# +# - Use -MP to generate a phony target for each of those prerequisites (except +# the source file being compiled). E.g., +# "foo.o : foo.c foo.h bar.h +# foo.h: +# bar.h:" +# so that the makefile can handle incremental builds after a prerequisite has +# been deleted from source control. +# +# - Use sed to remove the source file from the list of prerequisties in the +# above, so that the makefile can handle increment builds after the source has +# moved from one directory to another. The DEFINE_OBJECT_RULE macro spells +# out the obj: src dependency, so we don't require it here. +############################################################################## + +ifeq ($(NV_AUTO_DEPEND),1) + AUTO_DEP_SUFFIX = -MMD -MF $$(@:.o=.d.to_be_processed) -MP -MT $$@ && \ + $$(SED) -e "1,3s| $$< | |" < $$(@:.o=.d.to_be_processed) > $$(@:.o=.d) +else + AUTO_DEP_SUFFIX = +endif + + +############################################################################## +# echo minimal compile information in the non-NV_VERBOSE case +# +# NV_MODULE_LOGGING_NAME can be set to prepend quiet build output with a +# label of which build component is being built +############################################################################## + +NV_MODULE_LOGGING_NAME ?= + +ifeq ($(NV_VERBOSE),0) + at_if_quiet := @ + quiet_cmd_no_at = $(PRINTF) \ + " $(if $(NV_MODULE_LOGGING_NAME),[ %-17.17s ],%s) $(quiet_$(1))\n" \ + "$(NV_MODULE_LOGGING_NAME)" && $($(1)) + quiet_cmd = @$(quiet_cmd_no_at) +else + at_if_quiet := + quiet_cmd_no_at = $($(1)) + quiet_cmd = $($(1)) +endif + +# define LINK and HOST_LINK to be the same as CC; this is so that, +# even though we use CC to link programs, we can have a different +# quiet rule that uses '$@' as it's arg, rather than '$<' +LINK = $(CC) +HOST_LINK = $(HOST_CC) + +# strip NV_QUIET_COMMAND_REMOVED_TARGET_PREFIX from the target string +define_quiet_cmd = $(1) $(patsubst $(NV_QUIET_COMMAND_REMOVED_TARGET_PREFIX)/%,%,$(2)) + +# define the quiet commands: +quiet_CC = $(call define_quiet_cmd,CC ,$<) +quiet_CXX = $(call define_quiet_cmd,CXX ,$<) +quiet_HOST_CC = $(call define_quiet_cmd,HOST_CC ,$<) +quiet_HOST_CXX = $(call define_quiet_cmd,HOST_CXX ,$<) +quiet_LINK = $(call define_quiet_cmd,LINK ,$@) +quiet_HOST_LINK = $(call define_quiet_cmd,HOST_LINK ,$@) +quiet_M4 = $(call define_quiet_cmd,M4 ,$<) +quiet_STRIP_CMD = $(call define_quiet_cmd,STRIP ,$@) +quiet_HARDLINK = $(call define_quiet_cmd,HARDLINK ,$@) +quiet_LD = $(call define_quiet_cmd,LD ,$@) +quiet_OBJCOPY = $(call define_quiet_cmd,OBJCOPY ,$@) +quiet_AR = $(call define_quiet_cmd,AR ,$@) +quiet_XZ = $(call define_quiet_cmd,XZ ,$@) + +############################################################################## +# Tell gmake to delete the target of a rule if it has changed and its +# commands exit with a nonzero exit status. +############################################################################## +.DELETE_ON_ERROR: + + +############################################################################## +# function to generate a list of object files from their corresponding +# source files using the specified path. The _WITH_DIR variant takes an +# output path as the second argument while the BUILD_OBJECT_LIST defaults +# to using the value of OUTPUTDIR as the output path. example usage: +# +# OBJS = $(call BUILD_OBJECT_LIST_WITH_DIR,$(SRC),$(DIR)) +############################################################################## + +BUILD_OBJECT_LIST_WITH_DIR = \ + $(addprefix $(2)/,$(notdir $(addsuffix .o,$(basename $(1))))) + +BUILD_OBJECT_LIST = \ + $(call BUILD_OBJECT_LIST_WITH_DIR,$(1),$(OUTPUTDIR)) + +############################################################################## +# function to generate a list of dependency files from their +# corresponding source files using the specified path. The _WITH_DIR +# variant takes an output path as the second argument while the +# BUILD_DEPENDENCY_LIST default to using the value of OUTPUTDIR as the +# output path. example usage: +# +# DEPS = $(call BUILD_DEPENDENCY_LIST_WITH_DIR,$(SRC),$(DIR)) +############################################################################## + +BUILD_DEPENDENCY_LIST_WITH_DIR = \ + $(addprefix $(2)/,$(notdir $(addsuffix .d,$(basename $(1))))) + +BUILD_DEPENDENCY_LIST = \ + $(call BUILD_DEPENDENCY_LIST_WITH_DIR,$(1),$(OUTPUTDIR)) + + +############################################################################## +# functions to define a rule to build an object file; the first +# argument for all functions is whether the rule is for the target or +# host platform ("HOST" or "TARGET"), the second argument for all +# functions is the source file to compile. +# +# An order-only dependency is added on any generated header files listed in +# $(NV_GENERATED_HEADERS), to ensure they're present before invoking the +# compiler. For incremental builds where the object file already exists, a +# real (not order-only) dependency will be created by automatic dependency +# tracking if needed. +# +# The _WITH_OBJECT_NAME and _WITH_DIR function name suffixes describe +# the third and possibly fourth arguments based on order. The +# _WITH_OBJECT_NAME argument is the object filename to produce while +# the _WITH_DIR argument is the destination path for the object file. +# +# Example usage: +# +# $(eval $(call DEFINE_OBJECT_RULE,TARGET,foo.c)) +# +# Note this also attempts to include the dependency file for this +# source file. +# +# The DEFINE_OBJECT_RULE is functionally equivalent to +# DEFINE_OBJECT_RULE_WITH_OBJECT_NAME, but infers the object file name +# from the source file name (this is normally what you want). +# +# Arguments: +# $(1) : HOST or TARGET +# $(2) : source file +# $(3) : object file +# $(4) : directory +############################################################################## + +define DEFINE_OBJECT_RULE_WITH_OBJECT_NAME_WITH_DIR + $(3): NV_SOURCE_TYPE = $$(call NV_GET_SOURCE_TYPE,$(2)) + + # obj: {HOST_,}CFLAGS += $$({HOST_,}{CC,CXX}_ONLY_CFLAGS) + $(3): $$(call host_target,$(1))CFLAGS += \ + $$($(call host_target,$(1))$$(NV_SOURCE_TYPE)_ONLY_CFLAGS) + + $(3): $(2) | $$(NV_GENERATED_HEADERS) + @$(MKDIR) $(4) + $$(call quiet_cmd,$(call host_target,$(1))$$(NV_SOURCE_TYPE)) \ + $$($(call host_target,$(1))CFLAGS) -c $$< -o $$@ \ + $(AUTO_DEP_SUFFIX) + + -include $$(call BUILD_DEPENDENCY_LIST_WITH_DIR,$(3),$(4)) + + # declare empty rule for generating dependency file; we generate the + # dependency files implicitly when compiling the source file (see + # AUTO_DEP_SUFFIX above), so we don't want gmake to spend time searching + # for an explicit rule to generate the dependency file + $$(call BUILD_DEPENDENCY_LIST_WITH_DIR,$(3),$(4)): ; + +endef + +define DEFINE_OBJECT_RULE_WITH_OBJECT_NAME + $$(eval $$(call DEFINE_OBJECT_RULE_WITH_OBJECT_NAME_WITH_DIR,$(1),$(2),\ + $(3),$(OUTPUTDIR))) +endef + +define DEFINE_OBJECT_RULE_WITH_DIR + $$(eval $$(call DEFINE_OBJECT_RULE_WITH_OBJECT_NAME_WITH_DIR,$(1),$(2),\ + $$(call BUILD_OBJECT_LIST_WITH_DIR,$(2),$(3)),$(3))) +endef + +define DEFINE_OBJECT_RULE + $$(eval $$(call DEFINE_OBJECT_RULE_WITH_DIR,$(1),$(2),$(OUTPUTDIR))) +endef + +# This is a function that will generate rules to build +# files with separate debug information, if so requested. +# +# It takes one parameter: (1) Name of unstripped binary +# +# When used, the target for linking should be named (1).unstripped +# +# If separate debug information is requested, it will +# generate a rule to build one from the unstripped binary. +# If requested, it will also retain the unstripped binary. +define DEBUG_INFO_RULES + $(1): $(1).unstripped + ifneq ($(or $(DO_STRIP),$(NV_SEPARATE_DEBUG_INFO)),) + $$(call quiet_cmd,STRIP_CMD) -o $$@ $$< + else + $$(call quiet_cmd,HARDLINK) $$^ $$@ + endif + ifeq ($(NV_SEPARATE_DEBUG_INFO),1) + $(1).debug: $(1).unstripped + $$(call quiet_cmd,STRIP_CMD) --only-keep-debug -o $$@ $$< + $(1): $(1).debug + endif + ifneq ($(NV_KEEP_UNSTRIPPED_BINARIES),1) + .INTERMEDIATE: $(1).unstripped + endif +endef + +############################################################################## +# Define rule for generating a source file containing identification information +# for the build. +# +# $(1) string name +# $(2) module name +# $(3) prerequisite object files +############################################################################## + +NVIDSTRING = $(OUTPUTDIR)/g_nvid_string.c + +ifeq ($(DEBUG),1) + NVIDSTRING_BUILD_TYPE_STRING = Debug Build +else ifeq ($(DEVELOP),1) + NVIDSTRING_BUILD_TYPE_STRING = Develop Build +else + NVIDSTRING_BUILD_TYPE_STRING = Release Build +endif + +define GENERATE_NVIDSTRING + $(1)_BUILD_NVID := NVIDIA $$(strip $(2)) for $$(TARGET_ARCH) $$(NVIDIA_NVID_VERSION) + $(1)_BUILD_NVID := $$($$(strip $(1))_BUILD_NVID) $$(NVIDSTRING_BUILD_TYPE_STRING) + ifneq ($$(NVIDIA_NVID_EXTRA),) + $(1)_BUILD_NVID := $$($$(strip $(1))_BUILD_NVID) $$(NVIDIA_NVID_EXTRA) + endif + $(1)_BUILD_NVID := $$($$(strip $(1))_BUILD_NVID) ($$(NV_BUILD_USER)@$$(NV_BUILD_HOST)) + # g_nvid_string.c depends on all objects except g_nvid_string.o, and version.mk + $(NVIDSTRING): $$(filter-out $$(call BUILD_OBJECT_LIST,$$(NVIDSTRING)), $(3)) $$(VERSION_MK) + $(at_if_quiet)$$(MKDIR) $$(dir $$@) + $(at_if_quiet)$$(ECHO) "const char $(1)[] = \"nvidia id: $$($$(strip $(1))_BUILD_NVID) `$$(DATE)`\";" > $$@ + $(at_if_quiet)$$(ECHO) "const char *const p$$(strip $(1)) = $(1) + 11;" >> $$@; +endef + + +############################################################################## +# Define rules that can be used for embedding a file into an ELF object that +# contains the raw contents of that file and symbols pointing to the embedded +# data. +# +# Note that objcopy will name the symbols in the resulting object file based on +# the filename specified in $(1). For example, +# +# $(eval $(call $(READ_ONLY_OBJECT_FROM_FILE_RULE),a/b/c)) +# +# will create an object named $(OUTPUTDIR)/c.o with the symbols _binary_c_start, +# _binary_c_end, and _binary_c_size. +# +# Arguments: +# $(1): Path to the file to convert +############################################################################## + +LD_TARGET_EMULATION_FLAG = +LD_TARGET_EMULATION_FLAG_Linux_x86 = elf_i386 +LD_TARGET_EMULATION_FLAG_Linux_x86_64 = elf_x86_64 +LD_TARGET_EMULATION_FLAG_Linux_aarch64 = aarch64elf +LD_TARGET_EMULATION_FLAG_Linux_ppc64le = elf64lppc +LD_TARGET_EMULATION_FLAG_SunOS_x86 = elf_i386_sol2 +LD_TARGET_EMULATION_FLAG_SunOS_x86_64 = elf_x86_64_sol2 +LD_TARGET_EMULATION_FLAG_FreeBSD_x86 = elf_i386_fbsd +LD_TARGET_EMULATION_FLAG_FreeBSD_x86_64 = elf_x86_64_fbsd + +# Different linkers (GNU ld versus ld.lld versus ld.gold) expect different +# target architecture values for '-m'. Empirically, only ld.lld appears to +# actually need it, so only add the option when linking with ld.lld. Example +# `ld.lld -v` output: "LLD 15.0.7 (compatible with GNU linkers)". +LD_IS_LLD := $(if $(filter LLD,$(shell $(LD) -v)),1) + +ifdef LD_TARGET_EMULATION_FLAG_$(TARGET_OS)_$(TARGET_ARCH) + LD_TARGET_EMULATION_FLAG = $(if $(LD_IS_LLD), -m $(LD_TARGET_EMULATION_FLAG_$(TARGET_OS)_$(TARGET_ARCH))) +endif + +define READ_ONLY_OBJECT_FROM_FILE_RULE + $$(OUTPUTDIR)/$$(notdir $(1)).o: $(1) + $(at_if_quiet)$$(MKDIR) $$(OUTPUTDIR) + $(at_if_quiet)cd $$(dir $(1)); \ + $$(call quiet_cmd_no_at,LD) -r -z noexecstack --format=binary \ + $$(LD_TARGET_EMULATION_FLAG) \ + $$(notdir $(1)) -o $$(OUTPUTDIR_ABSOLUTE)/$$(notdir $$@) + $$(call quiet_cmd,OBJCOPY) \ + --rename-section .data=.rodata,contents,alloc,load,data,readonly \ + $$@ +endef + +define BINARY_DATA_HEADER_RULE + $$(OUTPUTDIR)/$(notdir $(1)).h: + $(at_if_quiet)$(MKDIR) $$(OUTPUTDIR) + $(at_if_quiet){ \ + $$(PRINTF) "extern const char _binary_$(subst -,_,$(subst .,_,$(notdir $(1))))_start[];\n"; \ + $$(PRINTF) "extern const char _binary_$(subst -,_,$(subst .,_,$(notdir $(1))))_end[];\n"; \ + } > $$@ +endef diff --git a/version.mk b/version.mk new file mode 100644 index 0000000..3b64395 --- /dev/null +++ b/version.mk @@ -0,0 +1,11 @@ +NVIDIA_VERSION = 580.00 +NVIDIA_NVID_VERSION = TempVersion +NVIDIA_NVID_EXTRA = (bugfix_main) + +# This file. +VERSION_MK_FILE := $(lastword $(MAKEFILE_LIST)) +$(OUTPUTDIR)/version.h: $(VERSION_MK_FILE) + @$(MKDIR) $(OUTPUTDIR) + @$(ECHO) '#define NVIDIA_VERSION "$(NVIDIA_VERSION)"' > $@ + +NV_GENERATED_HEADERS += $(OUTPUTDIR)/version.h